From 83ba6762cc43d9db581b979bb5e3445669e46cc2 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Mon, 25 Nov 2024 18:33:56 +0100 Subject: Merging upstream version 2.0.3+dfsg (Closes: #923993, #1042533, #1045145). Signed-off-by: Daniel Baumann --- .codacy.yml | 1 - .github/ISSUE_TEMPLATE/BUG_REPORT.yml | 2 + .github/data/distros.yml | 16 +- .github/dockerfiles/Dockerfile.build_test | 5 +- .github/dockerfiles/Dockerfile.clang | 2 +- .github/labeler.yml | 12 +- .github/scripts/check-updater.sh | 1 + .github/scripts/prepare-release-base.sh | 1 + .github/workflows/build.yml | 313 +- .github/workflows/checks.yml | 4 +- .github/workflows/dashboard-pr.yml | 2 +- .github/workflows/docker.yml | 12 +- .github/workflows/generate-integrations.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/packaging.yml | 2 +- .gitignore | 6 +- CHANGELOG.md | 329 +- CMakeLists.txt | 986 +- README.md | 430 +- REDISTRIBUTED.md | 216 +- .../.page-level/_concept-page-template.md | 7 - docs/Demo-Sites.md | 66 +- .../notifications/README.md | 2 + .../maintenance-operations-on-netdata-agents.md | 11 +- docs/category-overview-pages/working-with-logs.md | 2 +- docs/dashboards-and-charts/README.md | 2 +- docs/dashboards-and-charts/alerts-tab.md | 2 +- docs/dashboards-and-charts/anomaly-advisor-tab.md | 7 +- docs/dashboards-and-charts/events-feed.md | 4 +- .../import-export-print-snapshot.md | 19 +- docs/dashboards-and-charts/kubernetes-tab.md | 1 - docs/dashboards-and-charts/netdata-charts.md | 71 +- docs/dashboards-and-charts/themes.md | 1 - docs/dashboards-and-charts/top-tab.md | 2 +- docs/deployment-guides/deployment-strategies.md | 27 +- docs/developer-and-contributor-corner/README.md | 2 +- .../build-the-netdata-agent-yourself.md | 2 +- .../collect-apache-nginx-web-logs.md | 15 +- .../collect-unbound-metrics.md | 14 +- docs/developer-and-contributor-corner/customize.md | 13 +- .../kubernetes-k8s-netdata.md | 237 - .../kubernetes-k8s-netdata.txt | 234 + .../developer-and-contributor-corner/lamp-stack.md | 238 - .../lamp-stack.txt | 237 + .../monitor-cockroachdb.md | 118 - .../monitor-cockroachdb.txt | 118 + .../monitor-debug-applications-ebpf.md | 29 +- .../monitor-hadoop-cluster.md | 20 +- .../pi-hole-raspberry-pi.md | 140 - .../pi-hole-raspberry-pi.txt | 120 + docs/developer-and-contributor-corner/process.md | 270 - docs/developer-and-contributor-corner/process.txt | 270 + .../python-collector.md | 626 - .../python-collector.txt | 629 + .../raspberry-pi-anomaly-detection.md | 96 - .../raspberry-pi-anomaly-detection.txt | 96 + .../running-through-cf-tunnels.md | 2 +- .../style-guide.md | 42 +- docs/diagrams/netdata-overview.xml | 2 +- docs/exporting-metrics/README.md | 68 +- .../enable-an-exporting-connector.md | 4 +- docs/glossary.md | 12 +- docs/guidelines.md | 2 +- docs/netdata-agent/README.md | 10 +- docs/netdata-agent/backup-and-restore-an-agent.md | 45 +- docs/netdata-agent/configuration/README.md | 20 +- .../configuration/anonymous-telemetry-events.md | 60 +- docs/netdata-agent/configuration/cheatsheet.md | 144 +- .../configuration/common-configuration-changes.md | 16 +- .../configuration/dynamic-configuration.md | 12 +- .../optimize-the-netdata-agents-performance.md | 80 +- .../optimizing-metrics-database/README.md | 2 +- .../change-metrics-storage.md | 51 +- .../organize-systems-metrics-and-alerts.md | 93 +- .../README.md | 8 +- .../Running-behind-apache.md | 229 +- .../Running-behind-caddy.md | 15 +- .../Running-behind-h2o.md | 47 +- .../Running-behind-haproxy.md | 54 +- .../Running-behind-lighttpd.md | 34 +- .../Running-behind-nginx.md | 71 +- docs/netdata-agent/securing-netdata-agents.md | 116 +- docs/netdata-agent/sizing-netdata-agents/README.md | 106 +- .../bandwidth-requirements.md | 12 +- .../sizing-netdata-agents/cpu-requirements.md | 80 +- .../disk-requirements-and-retention.md | 10 +- .../sizing-netdata-agents/ram-requirements.md | 20 +- docs/netdata-agent/start-stop-restart.md | 150 +- docs/netdata-agent/versions-and-platforms.md | 13 +- docs/netdata-assistant.md | 8 +- .../authentication-and-authorization/api-tokens.md | 2 +- .../enterprise-sso-authentication.md | 31 +- .../role-based-access-model.md | 6 +- .../netdata-cloud-on-prem/installation.md | 61 +- docs/netdata-cloud/versions.md | 2 +- .../active-journal-source-without-encryption.md | 2 +- ...th-encryption-using-self-signed-certificates.md | 6 +- ...ve-journal-centralization-without-encryption.md | 4 +- ...ing-and-high-availability-of-netdata-parents.md | 6 +- .../metrics-centralization-points/configuration.md | 6 +- .../metrics-centralization-points/faq.md | 10 +- .../replication-of-past-samples.md | 4 +- docs/security-and-privacy-design/README.md | 81 +- .../netdata-agent-security.md | 3 +- .../netdata-cloud-security.md | 2 +- docs/top-monitoring-netdata-functions.md | 2 +- .../cloud-authentication/integrations/oidc.md | 2 +- .../cloud-authentication/integrations/okta_sso.md | 2 +- .../cloud-authentication/integrations/scim.md | 52 +- integrations/cloud-authentication/metadata.yaml | 52 +- .../cloud-notifications/integrations/amazon_sns.md | 45 +- .../cloud-notifications/integrations/discord.md | 41 +- .../cloud-notifications/integrations/ilert.md | 51 + .../cloud-notifications/integrations/mattermost.md | 45 +- .../integrations/microsoft_teams.md | 65 +- .../integrations/netdata_mobile_app.md | 35 +- .../cloud-notifications/integrations/opsgenie.md | 38 +- .../cloud-notifications/integrations/pagerduty.md | 40 +- .../cloud-notifications/integrations/rocketchat.md | 52 +- .../cloud-notifications/integrations/slack.md | 45 +- .../cloud-notifications/integrations/splunk.md | 35 +- .../integrations/splunk_victorops.md | 31 +- .../cloud-notifications/integrations/telegram.md | 45 +- .../cloud-notifications/integrations/webhook.md | 299 +- integrations/cloud-notifications/metadata.yaml | 949 +- integrations/deploy.yaml | 65 +- integrations/gen_docs_integrations.py | 192 +- integrations/gen_integrations.py | 324 +- integrations/integrations.js | 2829 +- integrations/integrations.json | 2825 +- .../logs/integrations/systemd_journal_logs.md | 53 + .../logs/integrations/windows_event_logs.md | 53 + integrations/logs/metadata.yaml | 75 + integrations/schemas/agent_notification.json | 87 + integrations/schemas/cloud_notification.json | 68 + integrations/schemas/logs.json | 97 + integrations/schemas/notification.json | 87 - integrations/templates/overview.md | 4 +- integrations/templates/overview/logs.md | 11 + integrations/templates/platform_info.md | 2 +- integrations/templates/setup.md | 13 +- integrations/templates/troubleshooting.md | 6 +- netdata-installer.sh | 36 +- netdata.spec.in | 37 +- packaging/PLATFORM_SUPPORT.md | 92 +- packaging/VERSIONING_AND_PUBLIC_API.md | 10 +- packaging/build-package.sh | 5 +- packaging/building-native-packages-locally.md | 14 +- packaging/check-for-go-toolchain.sh | 262 +- packaging/cmake/Modules/NetdataDashboard.cmake | 85 + packaging/cmake/Modules/NetdataEBPFCORE.cmake | 4 +- packaging/cmake/Modules/NetdataEBPFLegacy.cmake | 8 +- packaging/cmake/Modules/NetdataJSONC.cmake | 2 +- packaging/cmake/Modules/NetdataLibBPF.cmake | 2 +- packaging/cmake/Modules/NetdataUtil.cmake | 75 + packaging/cmake/Modules/Packaging.cmake | 45 +- packaging/cmake/config.cmake.h.in | 15 +- packaging/cmake/pkg-files/deb/dashboard/postinst | 11 + packaging/cmake/pkg-files/deb/dashboard/postrm | 17 + packaging/cmake/pkg-files/deb/dashboard/preinst | 11 + packaging/cmake/pkg-files/deb/netdata/postinst | 4 - packaging/cmake/pkg-files/deb/netdata/postrm | 8 - packaging/dag/files/child_stream.conf | 4 +- packaging/dag/files/parent_stream.conf | 4 +- packaging/dag/imageutils.py | 1 - packaging/docker/Dockerfile | 11 +- packaging/docker/README.md | 16 +- packaging/docker/run.sh | 10 - packaging/installer/README.md | 145 +- packaging/installer/REINSTALL.md | 65 - packaging/installer/UNINSTALL.md | 97 +- packaging/installer/UPDATE.md | 171 +- packaging/installer/functions.sh | 14 +- packaging/installer/install-required-packages.sh | 31 +- packaging/installer/installer.nsi | 128 - packaging/installer/kickstart.sh | 209 +- packaging/installer/methods/ansible.md | 32 +- packaging/installer/methods/aws.md | 21 +- packaging/installer/methods/azure.md | 25 +- packaging/installer/methods/freebsd.md | 36 +- packaging/installer/methods/gcp.md | 24 +- packaging/installer/methods/kickstart.md | 230 +- packaging/installer/methods/kubernetes.md | 108 +- packaging/installer/methods/macos.md | 50 +- packaging/installer/methods/manual.md | 132 +- packaging/installer/methods/methods.md | 14 +- packaging/installer/methods/no_ipv4.md | 13 + packaging/installer/methods/offline.md | 13 +- packaging/installer/methods/packages.md | 20 +- packaging/installer/methods/pfsense.md | 21 +- packaging/installer/methods/source.md | 58 +- packaging/installer/methods/synology.md | 32 +- packaging/installer/methods/systems.md | 9 - packaging/installer/netdata-updater.sh | 4 +- packaging/maintainers/README.md | 69 +- packaging/makeself/README.md | 10 - packaging/makeself/jobs/70-netdata-git.install.sh | 1 - packaging/makeself/jobs/99-makeself.install.sh | 6 +- packaging/repoconfig/CMakeLists.txt | 4 +- packaging/repoconfig/deb.changelog | 12 + packaging/repoconfig/netdata.repo.dnf | 8 +- packaging/repoconfig/netdata.repo.zypp | 8 +- packaging/repoconfig/netdata.sources.in | 8 +- packaging/repoconfig/rpm.changelog | 6 +- packaging/utils/compile-and-run-windows.sh | 92 + packaging/version | 2 +- packaging/windows/BackGround.bmp | Bin 0 -> 724270 bytes packaging/windows/Top.bmp | Bin 0 -> 128378 bytes packaging/windows/WINDOWS_INSTALLER.md | 64 + packaging/windows/clion-msys-msys-environment.bat | 4 +- packaging/windows/compile-on-windows.sh | 2 +- packaging/windows/find-sdk-path.sh | 217 + packaging/windows/get-convert-licenses.sh | 29 + packaging/windows/get-win-build-path.sh | 8 + packaging/windows/gpl-3.0.rtf | 679 + packaging/windows/install-dependencies.ps1 | 21 + packaging/windows/installer.nsi | 317 +- packaging/windows/msi-extension.bat | 2 + packaging/windows/msys2-dependencies.sh | 22 +- packaging/windows/ncul1.rtf | 47 + packaging/windows/netdata.wxs.in | 276 + packaging/windows/package-windows.sh | 35 +- packaging/windows/package.ps1 | 23 + packaging/windows/resources/netdata.manifest.in | 29 + packaging/windows/resources/netdata.rc | 3 + .../windows/resources/netdata_claim.manifest.in | 16 + packaging/windows/resources/netdata_claim.rc | 3 + packaging/windows/resources/netdatacli.manifest.in | 16 + packaging/windows/resources/netdatacli.rc | 3 + packaging/windows/win-build-dir.sh | 6 +- src/aclk/README.md | 106 +- src/aclk/aclk-schemas/.gitignore | 11 + src/aclk/aclk-schemas/.travis.yml | 4 + src/aclk/aclk-schemas/LICENSE | 674 + src/aclk/aclk-schemas/Makefile | 74 + src/aclk/aclk-schemas/README.md | 2 + src/aclk/aclk-schemas/buf.yml | 9 + src/aclk/aclk-schemas/proto/aclk/v1/lib.proto | 22 + src/aclk/aclk-schemas/proto/agent/v1/cmds.proto | 79 + .../aclk-schemas/proto/agent/v1/connection.proto | 71 + .../aclk-schemas/proto/agent/v1/disconnect.proto | 16 + src/aclk/aclk-schemas/proto/alarm/v1/config.proto | 61 + src/aclk/aclk-schemas/proto/alarm/v1/stream.proto | 151 + src/aclk/aclk-schemas/proto/chart/v1/config.proto | 37 + .../aclk-schemas/proto/chart/v1/dimension.proto | 24 + .../aclk-schemas/proto/chart/v1/instance.proto | 32 + src/aclk/aclk-schemas/proto/chart/v1/stream.proto | 86 + .../aclk-schemas/proto/context/v1/context.proto | 57 + .../aclk-schemas/proto/context/v1/stream.proto | 34 + .../nodeinstance/connection/v1/connection.proto | 37 + .../proto/nodeinstance/create/v1/creation.proto | 32 + .../proto/nodeinstance/info/v1/info.proto | 148 + src/aclk/aclk.c | 345 +- src/aclk/aclk.h | 31 +- src/aclk/aclk_alarm_api.c | 9 - src/aclk/aclk_alarm_api.h | 1 - src/aclk/aclk_capas.c | 14 +- src/aclk/aclk_capas.h | 1 + src/aclk/aclk_otp.c | 67 +- src/aclk/aclk_proxy.c | 12 +- src/aclk/aclk_query.c | 228 +- src/aclk/aclk_query.h | 29 +- src/aclk/aclk_query_queue.c | 111 +- src/aclk/aclk_query_queue.h | 32 +- src/aclk/aclk_rrdhost_state.h | 11 - src/aclk/aclk_rx_msgs.c | 43 +- src/aclk/aclk_rx_msgs.h | 7 +- src/aclk/aclk_stats.c | 483 - src/aclk/aclk_stats.h | 77 - src/aclk/aclk_tx_msgs.c | 85 +- src/aclk/aclk_tx_msgs.h | 4 +- src/aclk/aclk_util.c | 66 +- src/aclk/aclk_util.h | 14 +- src/aclk/helpers/mqtt_wss_pal.h | 13 - src/aclk/helpers/ringbuffer_pal.h | 11 - src/aclk/https_client.c | 6 +- src/aclk/https_client.h | 3 - .../.github/workflows/run-tests.yaml | 14 - src/aclk/mqtt_websockets/.gitignore | 10 - src/aclk/mqtt_websockets/README.md | 2 +- src/aclk/mqtt_websockets/c-rbuf/cringbuffer.c | 203 - src/aclk/mqtt_websockets/c-rbuf/cringbuffer.h | 47 - .../mqtt_websockets/c-rbuf/cringbuffer_internal.h | 37 - src/aclk/mqtt_websockets/c-rbuf/ringbuffer_test.c | 485 - src/aclk/mqtt_websockets/c_rhash/c_rhash.c | 264 - src/aclk/mqtt_websockets/c_rhash/c_rhash.h | 61 - .../mqtt_websockets/c_rhash/c_rhash_internal.h | 19 - src/aclk/mqtt_websockets/c_rhash/tests.c | 273 - src/aclk/mqtt_websockets/common_internal.h | 17 +- src/aclk/mqtt_websockets/common_public.h | 2 + src/aclk/mqtt_websockets/mqtt_ng.c | 541 +- src/aclk/mqtt_websockets/mqtt_ng.h | 10 +- src/aclk/mqtt_websockets/mqtt_wss_client.c | 360 +- src/aclk/mqtt_websockets/mqtt_wss_client.h | 54 +- src/aclk/mqtt_websockets/mqtt_wss_log.c | 130 - src/aclk/mqtt_websockets/mqtt_wss_log.h | 39 - src/aclk/mqtt_websockets/ws_client.c | 262 +- src/aclk/mqtt_websockets/ws_client.h | 17 +- src/claim/README.md | 250 +- src/claim/claim-with-api.c | 486 + src/claim/claim.c | 555 +- src/claim/claim.h | 45 +- src/claim/claim_id.c | 123 + src/claim/claim_id.h | 28 + src/claim/cloud-conf.c | 117 + src/claim/cloud-status.c | 134 + src/claim/cloud-status.h | 26 + src/claim/main.c | 305 + src/claim/main.h | 19 + src/claim/netdata-claim.sh.in | 514 +- src/claim/ui.c | 110 + src/claim/ui.h | 12 + src/cli/README.md | 48 +- src/cli/cli.c | 1 - src/collectors/COLLECTORS.md | 42 +- src/collectors/README.md | 82 +- src/collectors/REFERENCE.md | 99 +- src/collectors/all.h | 55 +- src/collectors/apps.plugin/README.md | 450 +- src/collectors/apps.plugin/apps_aggregations.c | 250 + src/collectors/apps.plugin/apps_functions.c | 395 +- src/collectors/apps.plugin/apps_groups.conf | 521 +- .../apps.plugin/apps_incremental_collection.c | 187 + src/collectors/apps.plugin/apps_os_freebsd.c | 368 + src/collectors/apps.plugin/apps_os_linux.c | 770 + src/collectors/apps.plugin/apps_os_macos.c | 334 + src/collectors/apps.plugin/apps_os_windows.c | 1011 + src/collectors/apps.plugin/apps_os_windows_nt.c | 41 + src/collectors/apps.plugin/apps_output.c | 346 +- src/collectors/apps.plugin/apps_pid.c | 927 + src/collectors/apps.plugin/apps_pid_files.c | 450 + src/collectors/apps.plugin/apps_pid_match.c | 90 + src/collectors/apps.plugin/apps_plugin.c | 722 +- src/collectors/apps.plugin/apps_plugin.h | 771 +- src/collectors/apps.plugin/apps_proc_meminfo.c | 68 - src/collectors/apps.plugin/apps_proc_pid_cmdline.c | 130 - src/collectors/apps.plugin/apps_proc_pid_fd.c | 753 - src/collectors/apps.plugin/apps_proc_pid_io.c | 95 - src/collectors/apps.plugin/apps_proc_pid_limits.c | 151 - src/collectors/apps.plugin/apps_proc_pid_stat.c | 293 - src/collectors/apps.plugin/apps_proc_pid_status.c | 192 - src/collectors/apps.plugin/apps_proc_pids.c | 720 - src/collectors/apps.plugin/apps_proc_stat.c | 154 - src/collectors/apps.plugin/apps_targets.c | 464 +- src/collectors/apps.plugin/apps_users_and_groups.c | 206 - src/collectors/apps.plugin/busy_threads.c | 76 + src/collectors/cgroups.plugin/README.md | 9 - src/collectors/cgroups.plugin/cgroup-discovery.c | 6 +- src/collectors/cgroups.plugin/cgroup-internals.h | 8 +- src/collectors/cgroups.plugin/cgroup-name.sh.in | 4 +- src/collectors/cgroups.plugin/cgroup-network.c | 257 +- src/collectors/cgroups.plugin/cgroup-top.c | 4 +- src/collectors/cgroups.plugin/sys_fs_cgroup.c | 37 +- src/collectors/charts.d.plugin/README.md | 9 +- src/collectors/charts.d.plugin/apcupsd/README.md | 1 - .../charts.d.plugin/apcupsd/apcupsd.chart.sh | 306 - .../charts.d.plugin/apcupsd/apcupsd.conf | 25 - .../apcupsd/integrations/apc_ups.md | 237 - .../charts.d.plugin/apcupsd/metadata.yaml | 256 - src/collectors/charts.d.plugin/example/README.md | 9 - .../libreswan/integrations/libreswan.md | 6 +- .../opensips/integrations/opensips.md | 6 +- src/collectors/charts.d.plugin/sensors/README.md | 1 - .../sensors/integrations/linux_sensors_sysfs.md | 235 - .../charts.d.plugin/sensors/metadata.yaml | 182 - .../charts.d.plugin/sensors/sensors.chart.sh | 250 - .../charts.d.plugin/sensors/sensors.conf | 32 - src/collectors/checks.plugin/README.md | 12 - src/collectors/common-contexts/common-contexts.h | 26 +- src/collectors/common-contexts/disk-avgsz.h | 44 + src/collectors/common-contexts/disk-await.h | 44 + src/collectors/common-contexts/disk-busy.h | 41 + src/collectors/common-contexts/disk-io.h | 44 + src/collectors/common-contexts/disk-iotime.h | 44 + src/collectors/common-contexts/disk-ops.h | 44 + src/collectors/common-contexts/disk-qops.h | 41 + src/collectors/common-contexts/disk-svctm.h | 41 + src/collectors/common-contexts/disk-util.h | 41 + src/collectors/common-contexts/disk.io.h | 44 - src/collectors/common-contexts/mem-available.h | 35 + src/collectors/common-contexts/mem-pgfaults.h | 38 + src/collectors/common-contexts/mem-swap.h | 41 + src/collectors/common-contexts/mem.available.h | 35 - src/collectors/common-contexts/mem.pgfaults.h | 40 - src/collectors/common-contexts/mem.swap.h | 43 - src/collectors/common-contexts/system-interrupts.h | 37 + src/collectors/common-contexts/system-io.h | 38 + src/collectors/common-contexts/system-ipc.h | 34 + src/collectors/common-contexts/system-processes.h | 115 + src/collectors/common-contexts/system-ram.h | 68 + src/collectors/common-contexts/system.interrupts.h | 39 - src/collectors/common-contexts/system.io.h | 38 - src/collectors/common-contexts/system.ipc.h | 34 - src/collectors/common-contexts/system.processes.h | 115 - src/collectors/common-contexts/system.ram.h | 68 - src/collectors/cups.plugin/cups_plugin.c | 6 +- src/collectors/cups.plugin/integrations/cups.md | 4 +- src/collectors/debugfs.plugin/debugfs_extfrag.c | 31 +- src/collectors/debugfs.plugin/debugfs_plugin.c | 6 +- .../debugfs.plugin/integrations/linux_zswap.md | 4 +- .../debugfs.plugin/integrations/power_capping.md | 4 +- .../integrations/system_memory_fragmentation.md | 4 +- .../diskspace.plugin/integrations/disk_space.md | 6 +- src/collectors/diskspace.plugin/metadata.yaml | 2 +- src/collectors/diskspace.plugin/plugin_diskspace.c | 22 +- src/collectors/ebpf.plugin/README.md | 468 +- src/collectors/ebpf.plugin/ebpf.c | 59 +- src/collectors/ebpf.plugin/ebpf_apps.c | 11 +- src/collectors/ebpf.plugin/ebpf_apps.h | 2 +- src/collectors/ebpf.plugin/ebpf_cachestat.c | 24 +- src/collectors/ebpf.plugin/ebpf_cgroup.c | 5 +- src/collectors/ebpf.plugin/ebpf_dcstat.c | 24 +- src/collectors/ebpf.plugin/ebpf_disk.c | 10 +- src/collectors/ebpf.plugin/ebpf_fd.c | 25 +- src/collectors/ebpf.plugin/ebpf_filesystem.c | 12 +- src/collectors/ebpf.plugin/ebpf_functions.c | 6 +- src/collectors/ebpf.plugin/ebpf_hardirq.c | 12 +- src/collectors/ebpf.plugin/ebpf_mdflush.c | 12 +- src/collectors/ebpf.plugin/ebpf_mount.c | 10 +- src/collectors/ebpf.plugin/ebpf_oomkill.c | 12 +- src/collectors/ebpf.plugin/ebpf_process.c | 14 +- src/collectors/ebpf.plugin/ebpf_shm.c | 27 +- src/collectors/ebpf.plugin/ebpf_socket.c | 37 +- src/collectors/ebpf.plugin/ebpf_socket.h | 4 +- src/collectors/ebpf.plugin/ebpf_softirq.c | 10 +- src/collectors/ebpf.plugin/ebpf_swap.c | 27 +- src/collectors/ebpf.plugin/ebpf_sync.c | 12 +- src/collectors/ebpf.plugin/ebpf_vfs.c | 20 +- .../ebpf.plugin/integrations/ebpf_cachestat.md | 4 +- .../ebpf.plugin/integrations/ebpf_dcstat.md | 4 +- .../ebpf.plugin/integrations/ebpf_disk.md | 4 +- .../integrations/ebpf_filedescriptor.md | 4 +- .../ebpf.plugin/integrations/ebpf_filesystem.md | 4 +- .../ebpf.plugin/integrations/ebpf_hardirq.md | 4 +- .../ebpf.plugin/integrations/ebpf_mdflush.md | 4 +- .../ebpf.plugin/integrations/ebpf_mount.md | 4 +- .../ebpf.plugin/integrations/ebpf_oomkill.md | 4 +- .../ebpf.plugin/integrations/ebpf_processes.md | 4 +- .../ebpf.plugin/integrations/ebpf_shm.md | 4 +- .../ebpf.plugin/integrations/ebpf_socket.md | 4 +- .../ebpf.plugin/integrations/ebpf_softirq.md | 4 +- .../ebpf.plugin/integrations/ebpf_swap.md | 4 +- .../ebpf.plugin/integrations/ebpf_sync.md | 4 +- .../ebpf.plugin/integrations/ebpf_vfs.md | 4 +- src/collectors/freebsd.plugin/README.md | 11 - src/collectors/freebsd.plugin/freebsd_devstat.c | 14 - src/collectors/freebsd.plugin/freebsd_getifaddrs.c | 10 - src/collectors/freebsd.plugin/freebsd_ipfw.c | 1 - src/collectors/freebsd.plugin/freebsd_sysctl.c | 27 - .../freebsd.plugin/integrations/dev.cpu.0.freq.md | 4 +- .../integrations/dev.cpu.temperature.md | 4 +- .../freebsd.plugin/integrations/devstat.md | 4 +- .../freebsd.plugin/integrations/getifaddrs.md | 4 +- .../freebsd.plugin/integrations/getmntinfo.md | 4 +- .../freebsd.plugin/integrations/hw.intrcnt.md | 4 +- src/collectors/freebsd.plugin/integrations/ipfw.md | 4 +- .../freebsd.plugin/integrations/kern.cp_time.md | 4 +- .../freebsd.plugin/integrations/kern.ipc.msq.md | 4 +- .../freebsd.plugin/integrations/kern.ipc.sem.md | 4 +- .../freebsd.plugin/integrations/kern.ipc.shm.md | 4 +- .../integrations/net.inet.icmp.stats.md | 4 +- .../integrations/net.inet.ip.stats.md | 4 +- .../integrations/net.inet.tcp.states.md | 4 +- .../integrations/net.inet.tcp.stats.md | 4 +- .../integrations/net.inet.udp.stats.md | 4 +- .../integrations/net.inet6.icmp6.stats.md | 4 +- .../integrations/net.inet6.ip6.stats.md | 4 +- .../freebsd.plugin/integrations/net.isr.md | 4 +- .../freebsd.plugin/integrations/system.ram.md | 4 +- .../freebsd.plugin/integrations/uptime.md | 4 +- .../freebsd.plugin/integrations/vm.loadavg.md | 4 +- .../integrations/vm.stats.sys.v_intr.md | 4 +- .../integrations/vm.stats.sys.v_soft.md | 4 +- .../integrations/vm.stats.sys.v_swtch.md | 4 +- .../integrations/vm.stats.vm.v_pgfaults.md | 4 +- .../integrations/vm.stats.vm.v_swappgs.md | 4 +- .../freebsd.plugin/integrations/vm.swap_info.md | 4 +- .../freebsd.plugin/integrations/vm.vmtotal.md | 4 +- src/collectors/freebsd.plugin/integrations/zfs.md | 4 +- src/collectors/freebsd.plugin/plugin_freebsd.c | 5 +- src/collectors/freeipmi.plugin/freeipmi_plugin.c | 23 +- ...telligent_platform_management_interface_ipmi.md | 4 +- .../integrations/idle_os_jitter.md | 6 +- src/collectors/idlejitter.plugin/metadata.yaml | 4 +- .../idlejitter.plugin/plugin_idlejitter.c | 4 +- .../ioping.plugin/integrations/ioping.md | 4 +- src/collectors/log2journal/README.md | 37 +- .../log2journal/log2journal-hashed-key.h | 80 + src/collectors/log2journal/log2journal-help.c | 2 +- src/collectors/log2journal/log2journal-inject.c | 11 +- src/collectors/log2journal/log2journal-params.c | 58 +- src/collectors/log2journal/log2journal-pattern.c | 4 +- src/collectors/log2journal/log2journal-pcre2.c | 9 +- src/collectors/log2journal/log2journal-rename.c | 6 +- src/collectors/log2journal/log2journal-replace.c | 12 +- src/collectors/log2journal/log2journal-rewrite.c | 7 +- src/collectors/log2journal/log2journal-txt.h | 90 + src/collectors/log2journal/log2journal-yaml.c | 301 +- src/collectors/log2journal/log2journal.c | 64 +- src/collectors/log2journal/log2journal.h | 251 +- src/collectors/macos.plugin/integrations/macos.md | 4 +- src/collectors/macos.plugin/macos_fw.c | 10 - src/collectors/macos.plugin/macos_mach_smi.c | 1 - src/collectors/macos.plugin/macos_sysctl.c | 11 - src/collectors/macos.plugin/plugin_macos.c | 5 +- .../network-viewer.plugin/network-viewer.c | 48 +- .../nfacct.plugin/integrations/netfilter.md | 4 +- src/collectors/nfacct.plugin/plugin_nfacct.c | 6 +- .../perf.plugin/integrations/cpu_performance.md | 10 +- src/collectors/perf.plugin/metadata.yaml | 2 +- src/collectors/perf.plugin/perf_plugin.c | 49 +- src/collectors/plugins.d/README.md | 875 - src/collectors/plugins.d/functions-table.md | 418 - src/collectors/plugins.d/gperf-config.txt | 112 - src/collectors/plugins.d/gperf-hashtable.h | 237 - src/collectors/plugins.d/local_listeners.c | 316 - src/collectors/plugins.d/ndsudo.c | 458 - src/collectors/plugins.d/plugins_d.c | 350 - src/collectors/plugins.d/plugins_d.h | 54 - src/collectors/plugins.d/pluginsd_dyncfg.c | 69 - src/collectors/plugins.d/pluginsd_dyncfg.h | 11 - src/collectors/plugins.d/pluginsd_functions.c | 412 - src/collectors/plugins.d/pluginsd_functions.h | 48 - src/collectors/plugins.d/pluginsd_internals.c | 120 - src/collectors/plugins.d/pluginsd_internals.h | 355 - src/collectors/plugins.d/pluginsd_parser.c | 1402 - src/collectors/plugins.d/pluginsd_parser.h | 244 - src/collectors/plugins.d/pluginsd_replication.c | 371 - src/collectors/plugins.d/pluginsd_replication.h | 14 - src/collectors/proc.plugin/README.md | 417 +- .../proc.plugin/integrations/system_statistics.md | 4 +- src/collectors/proc.plugin/ipc.c | 8 +- src/collectors/proc.plugin/plugin_proc.c | 7 +- src/collectors/proc.plugin/proc_diskstats.c | 422 +- src/collectors/proc.plugin/proc_mdstat.c | 2 +- src/collectors/proc.plugin/proc_meminfo.c | 19 - src/collectors/proc.plugin/proc_net_dev.c | 34 +- src/collectors/proc.plugin/proc_net_netstat.c | 25 - src/collectors/proc.plugin/proc_net_rpc_nfs.c | 3 - src/collectors/proc.plugin/proc_net_rpc_nfsd.c | 3 - src/collectors/proc.plugin/proc_net_sctp_snmp.c | 4 - src/collectors/proc.plugin/proc_net_sockstat.c | 2 +- .../proc.plugin/proc_net_stat_conntrack.c | 6 +- src/collectors/proc.plugin/proc_net_wireless.c | 11 +- src/collectors/proc.plugin/proc_pressure.c | 5 +- src/collectors/proc.plugin/proc_spl_kstat_zfs.c | 2 +- src/collectors/proc.plugin/proc_stat.c | 7 +- src/collectors/proc.plugin/proc_uptime.c | 2 +- src/collectors/proc.plugin/proc_vmstat.c | 4 - src/collectors/proc.plugin/sys_class_drm.c | 2 +- src/collectors/proc.plugin/sys_class_infiniband.c | 17 +- .../proc.plugin/sys_class_power_supply.c | 2 +- src/collectors/proc.plugin/sys_devices_pci_aer.c | 2 +- .../proc.plugin/sys_devices_system_edac_mc.c | 2 +- .../proc.plugin/sys_devices_system_node.c | 322 +- src/collectors/proc.plugin/sys_fs_btrfs.c | 16 +- src/collectors/profile.plugin/README.md | 24 +- src/collectors/profile.plugin/plugin_profile.cc | 10 +- src/collectors/python.d.plugin/README.md | 35 +- .../python.d.plugin/am2320/integrations/am2320.md | 4 +- src/collectors/python.d.plugin/anomalies/README.md | 248 - .../python.d.plugin/anomalies/anomalies.chart.py | 425 - .../python.d.plugin/anomalies/anomalies.conf | 184 - .../python.d.plugin/anomalies/metadata.yaml | 87 - src/collectors/python.d.plugin/boinc/README.md | 1 - .../python.d.plugin/boinc/boinc.chart.py | 168 - src/collectors/python.d.plugin/boinc/boinc.conf | 66 - .../python.d.plugin/boinc/integrations/boinc.md | 238 - src/collectors/python.d.plugin/boinc/metadata.yaml | 198 - src/collectors/python.d.plugin/ceph/README.md | 1 - src/collectors/python.d.plugin/ceph/ceph.chart.py | 374 - src/collectors/python.d.plugin/ceph/ceph.conf | 75 - .../python.d.plugin/ceph/integrations/ceph.md | 228 - src/collectors/python.d.plugin/ceph/metadata.yaml | 223 - .../integrations/go_applications_expvar.md | 14 +- .../python.d.plugin/go_expvar/metadata.yaml | 6 +- src/collectors/python.d.plugin/haproxy/README.md | 9 - src/collectors/python.d.plugin/openldap/README.md | 1 - .../openldap/integrations/openldap.md | 249 - .../python.d.plugin/openldap/metadata.yaml | 225 - .../python.d.plugin/openldap/openldap.chart.py | 216 - .../python.d.plugin/openldap/openldap.conf | 75 - src/collectors/python.d.plugin/oracledb/README.md | 1 - .../oracledb/integrations/oracle_db.md | 260 - .../python.d.plugin/oracledb/metadata.yaml | 309 - .../python.d.plugin/oracledb/oracledb.chart.py | 846 - .../python.d.plugin/oracledb/oracledb.conf | 88 - .../python.d.plugin/pandas/integrations/pandas.md | 8 +- src/collectors/python.d.plugin/python.d.conf | 17 +- src/collectors/python.d.plugin/python.d.plugin.in | 3 +- .../python_modules/bases/loaders.py | 14 +- .../python_modules/pyyaml2/__init__.py | 316 - .../python_modules/pyyaml2/composer.py | 140 - .../python_modules/pyyaml2/constructor.py | 676 - .../python_modules/pyyaml2/cyaml.py | 86 - .../python_modules/pyyaml2/dumper.py | 63 - .../python_modules/pyyaml2/emitter.py | 1141 - .../python_modules/pyyaml2/error.py | 76 - .../python_modules/pyyaml2/events.py | 87 - .../python_modules/pyyaml2/loader.py | 41 - .../python_modules/pyyaml2/nodes.py | 50 - .../python_modules/pyyaml2/parser.py | 590 - .../python_modules/pyyaml2/reader.py | 191 - .../python_modules/pyyaml2/representer.py | 485 - .../python_modules/pyyaml2/resolver.py | 225 - .../python_modules/pyyaml2/scanner.py | 1458 - .../python_modules/pyyaml2/serializer.py | 112 - .../python_modules/pyyaml2/tokens.py | 105 - .../python_modules/pyyaml3/__init__.py | 313 - .../python_modules/pyyaml3/composer.py | 140 - .../python_modules/pyyaml3/constructor.py | 687 - .../python_modules/pyyaml3/cyaml.py | 86 - .../python_modules/pyyaml3/dumper.py | 63 - .../python_modules/pyyaml3/emitter.py | 1138 - .../python_modules/pyyaml3/error.py | 76 - .../python_modules/pyyaml3/events.py | 87 - .../python_modules/pyyaml3/loader.py | 41 - .../python_modules/pyyaml3/nodes.py | 50 - .../python_modules/pyyaml3/parser.py | 590 - .../python_modules/pyyaml3/reader.py | 193 - .../python_modules/pyyaml3/representer.py | 375 - .../python_modules/pyyaml3/resolver.py | 225 - .../python_modules/pyyaml3/scanner.py | 1449 - .../python_modules/pyyaml3/serializer.py | 112 - .../python_modules/pyyaml3/tokens.py | 105 - .../python_modules/third_party/__init__.py | 0 .../python_modules/third_party/boinc_client.py | 515 - .../python_modules/third_party/filelock.py | 451 - .../python_modules/third_party/mcrcon.py | 74 - .../python_modules/third_party/monotonic.py | 201 - .../python_modules/third_party/ordereddict.py | 110 - .../python_modules/urllib3/__init__.py | 98 - .../python_modules/urllib3/_collections.py | 320 - .../python_modules/urllib3/connection.py | 374 - .../python_modules/urllib3/connectionpool.py | 900 - .../python_modules/urllib3/contrib/__init__.py | 0 .../urllib3/contrib/_securetransport/__init__.py | 0 .../urllib3/contrib/_securetransport/bindings.py | 591 - .../urllib3/contrib/_securetransport/low_level.py | 344 - .../python_modules/urllib3/contrib/appengine.py | 297 - .../python_modules/urllib3/contrib/ntlmpool.py | 113 - .../python_modules/urllib3/contrib/pyopenssl.py | 458 - .../urllib3/contrib/securetransport.py | 808 - .../python_modules/urllib3/contrib/socks.py | 189 - .../python_modules/urllib3/exceptions.py | 247 - .../python_modules/urllib3/fields.py | 179 - .../python_modules/urllib3/filepost.py | 95 - .../python_modules/urllib3/packages/__init__.py | 5 - .../urllib3/packages/backports/__init__.py | 0 .../urllib3/packages/backports/makefile.py | 54 - .../urllib3/packages/ordered_dict.py | 260 - .../python_modules/urllib3/packages/six.py | 852 - .../packages/ssl_match_hostname/__init__.py | 20 - .../packages/ssl_match_hostname/_implementation.py | 156 - .../python_modules/urllib3/poolmanager.py | 441 - .../python_modules/urllib3/request.py | 149 - .../python_modules/urllib3/response.py | 623 - .../python_modules/urllib3/util/__init__.py | 55 - .../python_modules/urllib3/util/connection.py | 131 - .../python_modules/urllib3/util/request.py | 119 - .../python_modules/urllib3/util/response.py | 82 - .../python_modules/urllib3/util/retry.py | 402 - .../python_modules/urllib3/util/selectors.py | 588 - .../python_modules/urllib3/util/ssl_.py | 338 - .../python_modules/urllib3/util/timeout.py | 243 - .../python_modules/urllib3/util/url.py | 231 - .../python_modules/urllib3/util/wait.py | 41 - src/collectors/python.d.plugin/samba/README.md | 1 - .../python.d.plugin/samba/integrations/samba.md | 255 - src/collectors/python.d.plugin/samba/metadata.yaml | 205 - .../python.d.plugin/samba/samba.chart.py | 144 - src/collectors/python.d.plugin/samba/samba.conf | 60 - src/collectors/python.d.plugin/spigotmc/README.md | 1 - .../spigotmc/integrations/spigotmc.md | 250 - .../python.d.plugin/spigotmc/metadata.yaml | 176 - .../python.d.plugin/spigotmc/spigotmc.chart.py | 184 - .../python.d.plugin/spigotmc/spigotmc.conf | 66 - src/collectors/python.d.plugin/traefik/README.md | 9 - src/collectors/python.d.plugin/varnish/README.md | 1 - .../varnish/integrations/varnish.md | 247 - .../python.d.plugin/varnish/metadata.yaml | 253 - .../python.d.plugin/varnish/varnish.chart.py | 385 - .../python.d.plugin/varnish/varnish.conf | 66 - src/collectors/python.d.plugin/w1sensor/README.md | 1 - .../w1sensor/integrations/1-wire_sensors.md | 201 - .../python.d.plugin/w1sensor/metadata.yaml | 119 - .../python.d.plugin/w1sensor/w1sensor.chart.py | 97 - .../python.d.plugin/w1sensor/w1sensor.conf | 72 - src/collectors/python.d.plugin/zscores/README.md | 1 - .../zscores/integrations/python.d_zscores.md | 229 - .../python.d.plugin/zscores/metadata.yaml | 187 - .../python.d.plugin/zscores/zscores.chart.py | 146 - .../python.d.plugin/zscores/zscores.conf | 108 - .../linux_kernel_slab_allocator_statistics.md | 4 +- src/collectors/slabinfo.plugin/slabinfo.c | 9 +- src/collectors/statsd.plugin/README.md | 23 +- src/collectors/statsd.plugin/asterisk.md | 8 - src/collectors/statsd.plugin/k6.md | 8 - src/collectors/statsd.plugin/statsd.c | 38 +- src/collectors/systemd-journal.plugin/README.md | 7 +- ...e_journal_centralization_guide_no_encryption.md | 2 +- .../forward_secure_sealing.md | 7 +- ...e_journal_centralization_guide_no_encryption.md | 4 +- ...urnal_centralization_guide_self_signed_certs.md | 11 +- .../systemd-journal.plugin/systemd-internals.h | 2 + .../systemd-journal-annotations.c | 30 +- .../systemd-journal-dyncfg.c | 4 + .../systemd-journal.plugin/systemd-journal-files.c | 88 +- .../systemd-journal-sampling.h | 378 + .../systemd-journal-watcher.c | 2 +- .../systemd-journal.plugin/systemd-journal.c | 1546 +- .../systemd-journal.plugin/systemd-main.c | 11 +- .../systemd-journal.plugin/systemd-units.c | 17 +- .../tc.plugin/integrations/tc_qos_classes.md | 8 +- src/collectors/tc.plugin/metadata.yaml | 4 +- src/collectors/tc.plugin/plugin_tc.c | 4 +- src/collectors/timex.plugin/integrations/timex.md | 4 +- src/collectors/timex.plugin/plugin_timex.c | 10 +- src/collectors/utils/local_listeners.c | 339 + src/collectors/utils/ndsudo.c | 494 + src/collectors/windows-events.plugin/README.md | 289 + .../windows-events-fields-cache.c | 158 + .../windows-events-fields-cache.h | 22 + .../windows-events-providers.c | 678 + .../windows-events-providers.h | 41 + .../windows-events-query-builder.c | 107 + .../windows-events-query-builder.h | 10 + .../windows-events-query-evt-variant.c | 354 + .../windows-events.plugin/windows-events-query.c | 717 + .../windows-events.plugin/windows-events-query.h | 296 + .../windows-events.plugin/windows-events-sources.c | 644 + .../windows-events.plugin/windows-events-sources.h | 78 + .../windows-events.plugin/windows-events-unicode.c | 46 + .../windows-events.plugin/windows-events-unicode.h | 42 + .../windows-events.plugin/windows-events-xml.c | 344 + .../windows-events.plugin/windows-events-xml.h | 12 + .../windows-events.plugin/windows-events.c | 1402 + .../windows-events.plugin/windows-events.h | 262 + src/collectors/windows.plugin/GetSystemUptime.c | 68 +- .../integrations/memory_statistics.md | 123 + .../integrations/system_statistics.md | 123 + .../integrations/system_thermal_zone.md | 121 + src/collectors/windows.plugin/metadata.yaml | 276 + src/collectors/windows.plugin/metdata.yaml | 92 - src/collectors/windows.plugin/perflib-dump.c | 529 - src/collectors/windows.plugin/perflib-hyperv.c | 1793 + src/collectors/windows.plugin/perflib-memory.c | 284 +- src/collectors/windows.plugin/perflib-mssql.c | 1413 + src/collectors/windows.plugin/perflib-names.c | 242 - .../windows.plugin/perflib-netframework.c | 796 + src/collectors/windows.plugin/perflib-network.c | 1500 +- src/collectors/windows.plugin/perflib-objects.c | 94 +- src/collectors/windows.plugin/perflib-processes.c | 116 +- src/collectors/windows.plugin/perflib-processor.c | 410 +- src/collectors/windows.plugin/perflib-rrd.c | 822 +- src/collectors/windows.plugin/perflib-rrd.h | 23 +- src/collectors/windows.plugin/perflib-storage.c | 949 +- .../windows.plugin/perflib-thermalzone.c | 103 + .../windows.plugin/perflib-web-service.c | 669 + src/collectors/windows.plugin/perflib.c | 671 - src/collectors/windows.plugin/perflib.h | 72 - src/collectors/windows.plugin/windows-internals.h | 35 +- src/collectors/windows.plugin/windows_plugin.c | 33 +- src/collectors/windows.plugin/windows_plugin.h | 77 +- .../xenstat.plugin/integrations/xen_xcp-ng.md | 4 +- src/collectors/xenstat.plugin/xenstat_plugin.c | 7 +- src/daemon/README.md | 79 +- src/daemon/analytics.c | 240 +- src/daemon/analytics.h | 5 +- src/daemon/buildinfo.c | 20 +- src/daemon/commands.c | 83 +- src/daemon/commands.h | 2 - src/daemon/common.c | 197 - src/daemon/common.h | 77 +- src/daemon/config/README.md | 66 +- src/daemon/config/dyncfg-echo.c | 6 +- src/daemon/config/dyncfg-intercept.c | 2 +- src/daemon/config/dyncfg-tree.c | 12 +- src/daemon/config/dyncfg-unittest.c | 6 +- src/daemon/config/dyncfg.c | 3 +- src/daemon/config/dyncfg.h | 2 + src/daemon/daemon.c | 61 +- src/daemon/daemon.h | 5 +- src/daemon/environment.c | 99 + src/daemon/event_loop.c | 66 - src/daemon/event_loop.h | 55 - src/daemon/global_statistics.c | 23 +- src/daemon/h2o-common.c | 60 + src/daemon/libuv_workers.c | 66 + src/daemon/libuv_workers.h | 55 + src/daemon/main.c | 385 +- src/daemon/main.h | 3 +- src/daemon/service.c | 12 +- src/daemon/signals.c | 49 +- src/daemon/signals.h | 7 +- src/daemon/static_threads.c | 2 - src/daemon/unit_test.c | 4 +- src/daemon/win_system-info.c | 37 +- src/daemon/winsvc.cc | 12 +- src/database/README.md | 2 +- src/database/contexts/api_v1.c | 439 - src/database/contexts/api_v1_contexts.c | 440 + src/database/contexts/api_v2.c | 2454 - src/database/contexts/api_v2_contexts.c | 1033 + src/database/contexts/api_v2_contexts.h | 98 + src/database/contexts/api_v2_contexts_agents.c | 162 + .../contexts/api_v2_contexts_alert_config.c | 135 + .../contexts/api_v2_contexts_alert_transitions.c | 487 + src/database/contexts/api_v2_contexts_alerts.c | 604 + src/database/contexts/api_v2_contexts_alerts.h | 52 + src/database/contexts/instance.c | 18 +- src/database/contexts/internal.h | 40 +- src/database/contexts/query_scope.c | 4 +- src/database/contexts/query_target.c | 14 +- src/database/contexts/rrdcontext.c | 27 +- src/database/contexts/rrdcontext.h | 4 +- src/database/contexts/worker.c | 57 +- src/database/engine/cache.c | 16 +- src/database/engine/cache.h | 11 + src/database/engine/datafile.c | 9 +- src/database/engine/datafile.h | 7 +- src/database/engine/dbengine-stresstest.c | 14 +- src/database/engine/dbengine-unittest.c | 14 +- src/database/engine/journalfile.c | 1 - src/database/engine/pdc.c | 8 +- src/database/engine/rrdengine.c | 6 +- src/database/engine/rrdengine.h | 5 +- src/database/ram/README.md | 10 - src/database/rrd.h | 142 +- src/database/rrddim.c | 17 + src/database/rrdfunctions-exporters.c | 27 +- src/database/rrdfunctions-exporters.h | 4 +- src/database/rrdfunctions-inflight.c | 30 +- src/database/rrdfunctions-inline.c | 7 +- src/database/rrdfunctions-inline.h | 4 +- src/database/rrdfunctions-internals.h | 3 +- src/database/rrdfunctions-progress.c | 8 - src/database/rrdfunctions-progress.h | 10 - src/database/rrdfunctions-streaming.c | 627 - src/database/rrdfunctions-streaming.h | 12 - src/database/rrdfunctions.c | 114 +- src/database/rrdfunctions.h | 10 +- src/database/rrdhost.c | 404 +- src/database/rrdlabels.c | 470 +- src/database/rrdlabels.h | 3 +- src/database/rrdset.c | 17 +- src/database/sqlite/sqlite3.c | 8635 +- src/database/sqlite/sqlite3.h | 97 +- src/database/sqlite/sqlite_aclk.c | 222 +- src/database/sqlite/sqlite_aclk.h | 14 +- src/database/sqlite/sqlite_aclk_alert.c | 88 +- src/database/sqlite/sqlite_aclk_node.c | 33 +- src/database/sqlite/sqlite_functions.c | 13 +- src/database/sqlite/sqlite_health.c | 51 +- src/database/sqlite/sqlite_metadata.c | 152 +- src/database/sqlite/sqlite_metadata.h | 11 +- src/exporting/README.md | 102 +- src/exporting/TIMESCALE.md | 19 +- src/exporting/WALKTHROUGH.md | 14 +- .../aws_kinesis/integrations/aws_kinesis.md | 4 +- src/exporting/clean_connectors.c | 2 - src/exporting/exporting_engine.c | 7 +- src/exporting/exporting_engine.h | 2 - src/exporting/graphite/graphite.c | 2 - src/exporting/graphite/integrations/blueflood.md | 4 +- src/exporting/graphite/integrations/graphite.md | 4 +- src/exporting/graphite/integrations/influxdb.md | 4 +- src/exporting/graphite/integrations/kairosdb.md | 4 +- src/exporting/json/integrations/json.md | 4 +- src/exporting/json/json.c | 2 - src/exporting/mongodb/integrations/mongodb.md | 6 +- src/exporting/mongodb/metadata.yaml | 2 +- src/exporting/opentsdb/integrations/opentsdb.md | 4 +- src/exporting/opentsdb/opentsdb.c | 4 - src/exporting/prometheus/README.md | 6 +- src/exporting/prometheus/integrations/appoptics.md | 4 +- .../prometheus/integrations/azure_data_explorer.md | 4 +- .../prometheus/integrations/azure_event_hub.md | 4 +- src/exporting/prometheus/integrations/chronix.md | 4 +- src/exporting/prometheus/integrations/cortex.md | 4 +- src/exporting/prometheus/integrations/cratedb.md | 4 +- .../prometheus/integrations/elasticsearch.md | 4 +- src/exporting/prometheus/integrations/gnocchi.md | 4 +- .../prometheus/integrations/google_bigquery.md | 4 +- .../prometheus/integrations/greptimedb.md | 4 +- src/exporting/prometheus/integrations/irondb.md | 4 +- src/exporting/prometheus/integrations/kafka.md | 4 +- src/exporting/prometheus/integrations/m3db.md | 4 +- .../prometheus/integrations/metricfire.md | 4 +- src/exporting/prometheus/integrations/new_relic.md | 4 +- .../prometheus/integrations/opeansearch.md | 4 +- .../prometheus/integrations/postgresql.md | 4 +- .../integrations/prometheus_remote_write.md | 4 +- src/exporting/prometheus/integrations/quasardb.md | 4 +- .../prometheus/integrations/splunk_signalfx.md | 4 +- src/exporting/prometheus/integrations/thanos.md | 4 +- src/exporting/prometheus/integrations/tikv.md | 4 +- .../prometheus/integrations/timescaledb.md | 4 +- .../prometheus/integrations/victoriametrics.md | 4 +- .../prometheus/integrations/vmware_aria.md | 4 +- src/exporting/prometheus/integrations/wavefront.md | 4 +- src/exporting/prometheus/prometheus.c | 71 +- src/exporting/prometheus/prometheus.h | 4 +- .../prometheus/remote_write/remote_write.c | 26 +- .../pubsub/integrations/google_cloud_pub_sub.md | 4 +- src/exporting/read_config.c | 19 +- src/exporting/send_data.c | 24 +- src/go/cmd/godplugin/main.go | 2 +- src/go/go.mod | 74 +- src/go/go.sum | 206 +- src/go/logger/journal_linux.go | 25 +- src/go/pkg/matcher/README.md | 134 + src/go/pkg/matcher/cache.go | 56 + src/go/pkg/matcher/cache_test.go | 53 + src/go/pkg/matcher/doc.go | 40 + src/go/pkg/matcher/doc_test.go | 51 + src/go/pkg/matcher/expr.go | 62 + src/go/pkg/matcher/expr_test.go | 100 + src/go/pkg/matcher/glob.go | 265 + src/go/pkg/matcher/glob_test.go | 97 + src/go/pkg/matcher/logical.go | 101 + src/go/pkg/matcher/logical_test.go | 97 + src/go/pkg/matcher/matcher.go | 149 + src/go/pkg/matcher/matcher_test.go | 122 + src/go/pkg/matcher/regexp.go | 60 + src/go/pkg/matcher/regexp_test.go | 66 + src/go/pkg/matcher/simple_patterns.go | 65 + src/go/pkg/matcher/simple_patterns_test.go | 88 + src/go/pkg/matcher/string.go | 48 + src/go/pkg/matcher/string_test.go | 62 + src/go/pkg/multipath/multipath.go | 90 + src/go/pkg/multipath/multipath_test.go | 60 + .../pkg/multipath/testdata/data1/test-empty.conf | 0 src/go/pkg/multipath/testdata/data1/test.conf | 1 + .../pkg/multipath/testdata/data2/test-empty.conf | 0 src/go/pkg/multipath/testdata/data2/test.conf | 1 + src/go/pkg/netdataapi/api.go | 213 + src/go/pkg/netdataapi/api_test.go | 265 + src/go/pkg/safewriter/writer.go | 30 + src/go/pkg/ticker/ticker.go | 55 + src/go/pkg/ticker/ticket_test.go | 50 + src/go/plugin/go.d/README.md | 85 +- src/go/plugin/go.d/agent/agent.go | 11 +- src/go/plugin/go.d/agent/agent_test.go | 2 +- src/go/plugin/go.d/agent/confgroup/config_test.go | 12 +- src/go/plugin/go.d/agent/config.go | 4 +- src/go/plugin/go.d/agent/discovery/file/parse.go | 2 +- .../plugin/go.d/agent/discovery/file/sim_test.go | 2 +- src/go/plugin/go.d/agent/discovery/sd/conffile.go | 2 +- .../discovery/sd/discoverer/dockerd/docker.go | 8 +- .../sd/discoverer/kubernetes/kubernetes.go | 2 +- .../sd/discoverer/kubernetes/kubernetes_test.go | 2 +- .../discovery/sd/discoverer/kubernetes/pod.go | 4 +- .../discovery/sd/discoverer/kubernetes/service.go | 5 +- .../discovery/sd/discoverer/netlisteners/ll.go | 62 + .../sd/discoverer/netlisteners/netlisteners.go | 76 +- .../go.d/agent/discovery/sd/pipeline/funcmap.go | 10 +- .../agent/discovery/sd/pipeline/pipeline_test.go | 6 +- .../go.d/agent/discovery/sd/pipeline/promport.go | 2 - src/go/plugin/go.d/agent/discovery/sd/sd.go | 2 +- src/go/plugin/go.d/agent/discovery/sim_test.go | 1 + src/go/plugin/go.d/agent/functions/manager.go | 4 +- src/go/plugin/go.d/agent/jobmgr/manager.go | 6 +- src/go/plugin/go.d/agent/jobmgr/sim_test.go | 5 +- src/go/plugin/go.d/agent/module/charts.go | 28 +- src/go/plugin/go.d/agent/module/job.go | 42 +- src/go/plugin/go.d/agent/module/module.go | 9 +- src/go/plugin/go.d/agent/netdataapi/api.go | 213 - src/go/plugin/go.d/agent/netdataapi/api_test.go | 265 - src/go/plugin/go.d/agent/safewriter/writer.go | 30 - src/go/plugin/go.d/agent/ticker/ticker.go | 55 - src/go/plugin/go.d/agent/ticker/ticket_test.go | 50 - src/go/plugin/go.d/agent/vnodes/vnodes.go | 17 +- src/go/plugin/go.d/config/go.d.conf | 14 +- src/go/plugin/go.d/config/go.d/apcupsd.conf | 6 + src/go/plugin/go.d/config/go.d/boinc.conf | 6 + src/go/plugin/go.d/config/go.d/ceph.conf | 6 + src/go/plugin/go.d/config/go.d/example.conf | 5 - src/go/plugin/go.d/config/go.d/maxscale.conf | 6 + src/go/plugin/go.d/config/go.d/nginxunit.conf | 6 + src/go/plugin/go.d/config/go.d/nvidia_smi.conf | 1 - src/go/plugin/go.d/config/go.d/openldap.conf | 8 + src/go/plugin/go.d/config/go.d/oracledb.conf | 9 + src/go/plugin/go.d/config/go.d/samba.conf | 5 + src/go/plugin/go.d/config/go.d/sd/docker.conf | 47 + .../plugin/go.d/config/go.d/sd/net_listeners.conf | 68 + src/go/plugin/go.d/config/go.d/spigotmc.conf | 6 + src/go/plugin/go.d/config/go.d/typesense.conf | 6 + src/go/plugin/go.d/config/go.d/varnish.conf | 5 + src/go/plugin/go.d/config/go.d/w1sensor.conf | 6 + src/go/plugin/go.d/docs/how-to-write-a-module.md | 52 +- src/go/plugin/go.d/examples/simple/main.go | 6 +- src/go/plugin/go.d/modules/activemq/activemq.go | 43 +- .../plugin/go.d/modules/activemq/activemq_test.go | 8 +- src/go/plugin/go.d/modules/activemq/apiclient.go | 70 +- .../go.d/modules/activemq/config_schema.json | 1 - src/go/plugin/go.d/modules/activemq/init.go | 3 +- .../go.d/modules/activemq/integrations/activemq.md | 8 +- src/go/plugin/go.d/modules/adaptecraid/adaptec.go | 15 +- .../go.d/modules/adaptecraid/adaptec_test.go | 2 + src/go/plugin/go.d/modules/adaptecraid/charts.go | 2 + src/go/plugin/go.d/modules/adaptecraid/collect.go | 2 + .../plugin/go.d/modules/adaptecraid/collect_ld.go | 2 + .../plugin/go.d/modules/adaptecraid/collect_pd.go | 2 + .../go.d/modules/adaptecraid/config_schema.json | 1 - src/go/plugin/go.d/modules/adaptecraid/doc.go | 3 + src/go/plugin/go.d/modules/adaptecraid/exec.go | 2 + src/go/plugin/go.d/modules/adaptecraid/init.go | 2 + .../adaptecraid/integrations/adaptec_raid.md | 9 +- .../plugin/go.d/modules/adaptecraid/metadata.yaml | 2 +- src/go/plugin/go.d/modules/ap/ap.go | 20 +- src/go/plugin/go.d/modules/ap/ap_test.go | 26 +- src/go/plugin/go.d/modules/ap/charts.go | 2 + src/go/plugin/go.d/modules/ap/collect.go | 2 + src/go/plugin/go.d/modules/ap/config_schema.json | 1 - src/go/plugin/go.d/modules/ap/doc.go | 3 + src/go/plugin/go.d/modules/ap/exec.go | 2 + src/go/plugin/go.d/modules/ap/init.go | 2 + .../go.d/modules/ap/integrations/access_points.md | 4 +- src/go/plugin/go.d/modules/apache/apache.go | 21 +- src/go/plugin/go.d/modules/apache/apache_test.go | 8 +- src/go/plugin/go.d/modules/apache/collect.go | 28 +- .../plugin/go.d/modules/apache/config_schema.json | 1 - src/go/plugin/go.d/modules/apache/init.go | 2 +- .../go.d/modules/apache/integrations/apache.md | 4 +- .../go.d/modules/apache/integrations/httpd.md | 4 +- src/go/plugin/go.d/modules/apcupsd/README.md | 1 + src/go/plugin/go.d/modules/apcupsd/apcupsd.go | 98 + src/go/plugin/go.d/modules/apcupsd/apcupsd_test.go | 283 + src/go/plugin/go.d/modules/apcupsd/charts.go | 224 + src/go/plugin/go.d/modules/apcupsd/client.go | 115 + src/go/plugin/go.d/modules/apcupsd/collect.go | 144 + .../plugin/go.d/modules/apcupsd/config_schema.json | 43 + .../go.d/modules/apcupsd/integrations/apc_ups.md | 240 + src/go/plugin/go.d/modules/apcupsd/metadata.yaml | 244 + src/go/plugin/go.d/modules/apcupsd/status.go | 137 + .../go.d/modules/apcupsd/testdata/config.json | 5 + .../go.d/modules/apcupsd/testdata/config.yaml | 3 + .../go.d/modules/apcupsd/testdata/status.txt | 56 + .../modules/apcupsd/testdata/status_commlost.txt | 18 + src/go/plugin/go.d/modules/beanstalk/beanstalk.go | 15 +- src/go/plugin/go.d/modules/beanstalk/client.go | 8 +- .../go.d/modules/beanstalk/config_schema.json | 1 - src/go/plugin/go.d/modules/beanstalk/init.go | 2 +- .../modules/beanstalk/integrations/beanstalk.md | 4 +- src/go/plugin/go.d/modules/bind/README.md | 10 - src/go/plugin/go.d/modules/bind/bind.go | 36 +- src/go/plugin/go.d/modules/bind/config_schema.json | 1 - src/go/plugin/go.d/modules/bind/init.go | 6 +- src/go/plugin/go.d/modules/bind/json_client.go | 44 +- src/go/plugin/go.d/modules/bind/xml3_client.go | 34 +- src/go/plugin/go.d/modules/boinc/README.md | 1 + src/go/plugin/go.d/modules/boinc/boinc.go | 101 + src/go/plugin/go.d/modules/boinc/boinc_test.go | 295 + src/go/plugin/go.d/modules/boinc/charts.go | 82 + src/go/plugin/go.d/modules/boinc/client.go | 180 + src/go/plugin/go.d/modules/boinc/client_proto.go | 107 + src/go/plugin/go.d/modules/boinc/collect.go | 80 + .../plugin/go.d/modules/boinc/config_schema.json | 52 + .../go.d/modules/boinc/integrations/boinc.md | 229 + src/go/plugin/go.d/modules/boinc/metadata.yaml | 171 + .../plugin/go.d/modules/boinc/testdata/config.json | 6 + .../plugin/go.d/modules/boinc/testdata/config.yaml | 4 + .../go.d/modules/boinc/testdata/get_results.xml | 2090 + .../boinc/testdata/get_results_no_tasks.xml | 3 + src/go/plugin/go.d/modules/cassandra/cassandra.go | 21 +- .../go.d/modules/cassandra/cassandra_test.go | 4 +- src/go/plugin/go.d/modules/cassandra/collect.go | 3 +- .../go.d/modules/cassandra/config_schema.json | 1 - src/go/plugin/go.d/modules/cassandra/init.go | 4 +- .../modules/cassandra/integrations/cassandra.md | 4 +- src/go/plugin/go.d/modules/ceph/README.md | 1 + src/go/plugin/go.d/modules/ceph/api.go | 126 + src/go/plugin/go.d/modules/ceph/auth.go | 139 + src/go/plugin/go.d/modules/ceph/ceph.go | 131 + src/go/plugin/go.d/modules/ceph/ceph_test.go | 331 + src/go/plugin/go.d/modules/ceph/charts.go | 576 + src/go/plugin/go.d/modules/ceph/collect.go | 109 + src/go/plugin/go.d/modules/ceph/collect_health.go | 155 + src/go/plugin/go.d/modules/ceph/collect_osd.go | 66 + src/go/plugin/go.d/modules/ceph/collect_pools.go | 58 + src/go/plugin/go.d/modules/ceph/config_schema.json | 185 + src/go/plugin/go.d/modules/ceph/init.go | 17 + .../plugin/go.d/modules/ceph/integrations/ceph.md | 307 + src/go/plugin/go.d/modules/ceph/metadata.yaml | 391 + .../plugin/go.d/modules/ceph/testdata/config.json | 20 + .../plugin/go.d/modules/ceph/testdata/config.yaml | 17 + .../ceph/testdata/v16.2.15/api_health_minimal.json | 105 + .../ceph/testdata/v16.2.15/api_monitor.json | 315 + .../modules/ceph/testdata/v16.2.15/api_osd.json | 930 + .../ceph/testdata/v16.2.15/api_pool_stats.json | 1923 + src/go/plugin/go.d/modules/chrony/charts.go | 143 +- src/go/plugin/go.d/modules/chrony/chrony.go | 60 +- src/go/plugin/go.d/modules/chrony/chrony_test.go | 66 +- src/go/plugin/go.d/modules/chrony/client.go | 152 +- src/go/plugin/go.d/modules/chrony/collect.go | 133 +- .../plugin/go.d/modules/chrony/config_schema.json | 1 - src/go/plugin/go.d/modules/chrony/exec.go | 46 + src/go/plugin/go.d/modules/chrony/init.go | 33 + .../go.d/modules/chrony/integrations/chrony.md | 9 +- src/go/plugin/go.d/modules/chrony/metadata.yaml | 23 +- .../plugin/go.d/modules/clickhouse/clickhouse.go | 21 +- .../go.d/modules/clickhouse/clickhouse_test.go | 19 +- src/go/plugin/go.d/modules/clickhouse/collect.go | 25 +- .../clickhouse/collect_system_async_metrics.go | 4 +- .../modules/clickhouse/collect_system_disks.go | 4 +- .../modules/clickhouse/collect_system_events.go | 4 +- .../modules/clickhouse/collect_system_metrics.go | 4 +- .../modules/clickhouse/collect_system_parts.go | 4 +- .../modules/clickhouse/collect_system_processes.go | 4 +- .../go.d/modules/clickhouse/config_schema.json | 1 - src/go/plugin/go.d/modules/clickhouse/init.go | 2 +- .../modules/clickhouse/integrations/clickhouse.md | 4 +- .../plugin/go.d/modules/cockroachdb/cockroachdb.go | 21 +- .../go.d/modules/cockroachdb/cockroachdb_test.go | 27 +- .../go.d/modules/cockroachdb/config_schema.json | 1 - src/go/plugin/go.d/modules/cockroachdb/init.go | 6 +- .../cockroachdb/integrations/cockroachdb.md | 4 +- src/go/plugin/go.d/modules/consul/collect.go | 44 +- .../go.d/modules/consul/collect_autopilot.go | 7 +- .../plugin/go.d/modules/consul/collect_checks.go | 7 +- .../plugin/go.d/modules/consul/collect_config.go | 7 +- .../plugin/go.d/modules/consul/collect_net_rtt.go | 7 +- .../plugin/go.d/modules/consul/config_schema.json | 1 - src/go/plugin/go.d/modules/consul/consul.go | 26 +- src/go/plugin/go.d/modules/consul/consul_test.go | 4 +- src/go/plugin/go.d/modules/consul/init.go | 6 +- .../go.d/modules/consul/integrations/consul.md | 6 +- src/go/plugin/go.d/modules/coredns/collect.go | 15 +- .../plugin/go.d/modules/coredns/config_schema.json | 1 - src/go/plugin/go.d/modules/coredns/coredns.go | 27 +- src/go/plugin/go.d/modules/coredns/init.go | 6 +- .../go.d/modules/coredns/integrations/coredns.md | 8 +- src/go/plugin/go.d/modules/coredns/metadata.yaml | 4 +- src/go/plugin/go.d/modules/couchbase/collect.go | 32 +- .../go.d/modules/couchbase/config_schema.json | 1 - src/go/plugin/go.d/modules/couchbase/couchbase.go | 24 +- .../go.d/modules/couchbase/couchbase_test.go | 30 +- src/go/plugin/go.d/modules/couchbase/init.go | 4 +- .../modules/couchbase/integrations/couchbase.md | 4 +- src/go/plugin/go.d/modules/couchdb/collect.go | 68 +- .../plugin/go.d/modules/couchdb/config_schema.json | 1 - src/go/plugin/go.d/modules/couchdb/couchdb.go | 29 +- src/go/plugin/go.d/modules/couchdb/couchdb_test.go | 32 +- src/go/plugin/go.d/modules/couchdb/init.go | 4 +- .../go.d/modules/couchdb/integrations/couchdb.md | 4 +- src/go/plugin/go.d/modules/couchdb/metrics.go | 2 +- src/go/plugin/go.d/modules/dmcache/charts.go | 2 + src/go/plugin/go.d/modules/dmcache/collect.go | 2 + .../plugin/go.d/modules/dmcache/config_schema.json | 1 - src/go/plugin/go.d/modules/dmcache/dmcache.go | 15 +- src/go/plugin/go.d/modules/dmcache/dmcache_test.go | 20 +- src/go/plugin/go.d/modules/dmcache/doc.go | 3 + src/go/plugin/go.d/modules/dmcache/exec.go | 2 + src/go/plugin/go.d/modules/dmcache/init.go | 2 + .../dmcache/integrations/dmcache_devices.md | 4 +- src/go/plugin/go.d/modules/dnsdist/collect.go | 37 +- .../plugin/go.d/modules/dnsdist/config_schema.json | 1 - src/go/plugin/go.d/modules/dnsdist/dnsdist.go | 24 +- src/go/plugin/go.d/modules/dnsdist/dnsdist_test.go | 34 +- src/go/plugin/go.d/modules/dnsdist/init.go | 4 +- .../go.d/modules/dnsdist/integrations/dnsdist.md | 4 +- .../plugin/go.d/modules/dnsmasq/config_schema.json | 1 - src/go/plugin/go.d/modules/dnsmasq/dnsmasq.go | 23 +- src/go/plugin/go.d/modules/dnsmasq/dnsmasq_test.go | 22 +- .../go.d/modules/dnsmasq/integrations/dnsmasq.md | 4 +- src/go/plugin/go.d/modules/dnsmasq_dhcp/charts.go | 2 + src/go/plugin/go.d/modules/dnsmasq_dhcp/collect.go | 2 + .../go.d/modules/dnsmasq_dhcp/config_schema.json | 1 - src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp.go | 10 +- .../plugin/go.d/modules/dnsmasq_dhcp/dhcp_test.go | 2 + src/go/plugin/go.d/modules/dnsmasq_dhcp/doc.go | 3 + src/go/plugin/go.d/modules/dnsmasq_dhcp/init.go | 2 + .../dnsmasq_dhcp/integrations/dnsmasq_dhcp.md | 8 +- .../plugin/go.d/modules/dnsmasq_dhcp/metadata.yaml | 2 +- .../modules/dnsmasq_dhcp/parse_configuration.go | 2 + .../go.d/modules/dnsquery/config_schema.json | 1 - src/go/plugin/go.d/modules/dnsquery/dnsquery.go | 30 +- .../plugin/go.d/modules/dnsquery/dnsquery_test.go | 14 +- src/go/plugin/go.d/modules/dnsquery/init.go | 1 + .../modules/dnsquery/integrations/dns_query.md | 4 +- .../plugin/go.d/modules/docker/config_schema.json | 1 - src/go/plugin/go.d/modules/docker/docker.go | 13 +- .../go.d/modules/docker/integrations/docker.md | 4 +- .../go.d/modules/docker_engine/config_schema.json | 1 - .../go.d/modules/docker_engine/docker_engine.go | 21 +- .../modules/docker_engine/docker_engine_test.go | 30 +- src/go/plugin/go.d/modules/docker_engine/init.go | 6 +- .../docker_engine/integrations/docker_engine.md | 4 +- src/go/plugin/go.d/modules/dockerhub/apiclient.go | 37 +- .../go.d/modules/dockerhub/config_schema.json | 1 - src/go/plugin/go.d/modules/dockerhub/dockerhub.go | 23 +- src/go/plugin/go.d/modules/dockerhub/init.go | 5 +- .../integrations/docker_hub_repository.md | 4 +- src/go/plugin/go.d/modules/dovecot/client.go | 6 +- .../plugin/go.d/modules/dovecot/config_schema.json | 1 - src/go/plugin/go.d/modules/dovecot/dovecot.go | 14 +- .../go.d/modules/dovecot/integrations/dovecot.md | 4 +- .../plugin/go.d/modules/elasticsearch/collect.go | 61 +- .../go.d/modules/elasticsearch/config_schema.json | 1 - .../go.d/modules/elasticsearch/elasticsearch.go | 19 +- .../modules/elasticsearch/elasticsearch_test.go | 51 +- src/go/plugin/go.d/modules/elasticsearch/init.go | 4 +- .../elasticsearch/integrations/elasticsearch.md | 4 +- .../elasticsearch/integrations/opensearch.md | 4 +- .../plugin/go.d/modules/envoy/config_schema.json | 1 - src/go/plugin/go.d/modules/envoy/envoy.go | 21 +- src/go/plugin/go.d/modules/envoy/envoy_test.go | 21 +- src/go/plugin/go.d/modules/envoy/init.go | 4 +- .../go.d/modules/envoy/integrations/envoy.md | 4 +- src/go/plugin/go.d/modules/example/README.md | 80 - src/go/plugin/go.d/modules/example/charts.go | 59 - src/go/plugin/go.d/modules/example/collect.go | 47 - .../plugin/go.d/modules/example/config_schema.json | 177 - src/go/plugin/go.d/modules/example/example.go | 110 - src/go/plugin/go.d/modules/example/example_test.go | 351 - src/go/plugin/go.d/modules/example/init.go | 63 - .../go.d/modules/example/testdata/config.json | 17 - .../go.d/modules/example/testdata/config.yaml | 13 - src/go/plugin/go.d/modules/exim/config_schema.json | 1 - src/go/plugin/go.d/modules/exim/exim.go | 13 +- .../plugin/go.d/modules/exim/integrations/exim.md | 4 +- src/go/plugin/go.d/modules/fail2ban/charts.go | 2 + src/go/plugin/go.d/modules/fail2ban/collect.go | 2 + .../go.d/modules/fail2ban/config_schema.json | 1 - src/go/plugin/go.d/modules/fail2ban/doc.go | 3 + src/go/plugin/go.d/modules/fail2ban/exec.go | 2 + src/go/plugin/go.d/modules/fail2ban/fail2ban.go | 15 +- .../plugin/go.d/modules/fail2ban/fail2ban_test.go | 24 +- src/go/plugin/go.d/modules/fail2ban/init.go | 2 + .../go.d/modules/fail2ban/integrations/fail2ban.md | 8 +- src/go/plugin/go.d/modules/fail2ban/metadata.yaml | 2 +- .../go.d/modules/filecheck/config_schema.json | 1 - src/go/plugin/go.d/modules/filecheck/filecheck.go | 24 +- .../go.d/modules/filecheck/filecheck_test.go | 16 +- src/go/plugin/go.d/modules/filecheck/init.go | 2 +- .../integrations/files_and_directories.md | 4 +- src/go/plugin/go.d/modules/fluentd/apiclient.go | 35 +- .../plugin/go.d/modules/fluentd/config_schema.json | 1 - src/go/plugin/go.d/modules/fluentd/fluentd.go | 28 +- src/go/plugin/go.d/modules/fluentd/init.go | 6 +- .../go.d/modules/fluentd/integrations/fluentd.md | 4 +- .../go.d/modules/freeradius/config_schema.json | 1 - .../plugin/go.d/modules/freeradius/freeradius.go | 19 +- .../go.d/modules/freeradius/freeradius_test.go | 19 +- .../modules/freeradius/integrations/freeradius.md | 4 +- src/go/plugin/go.d/modules/gearman/client.go | 6 +- .../plugin/go.d/modules/gearman/config_schema.json | 1 - src/go/plugin/go.d/modules/gearman/gearman.go | 14 +- .../go.d/modules/gearman/integrations/gearman.md | 4 +- src/go/plugin/go.d/modules/geth/config_schema.json | 1 - src/go/plugin/go.d/modules/geth/geth.go | 21 +- src/go/plugin/go.d/modules/geth/init.go | 4 +- .../go.d/modules/geth/integrations/go-ethereum.md | 4 +- .../plugin/go.d/modules/haproxy/config_schema.json | 1 - src/go/plugin/go.d/modules/haproxy/haproxy.go | 21 +- src/go/plugin/go.d/modules/haproxy/haproxy_test.go | 30 +- src/go/plugin/go.d/modules/haproxy/init.go | 6 +- .../go.d/modules/haproxy/integrations/haproxy.md | 4 +- src/go/plugin/go.d/modules/hddtemp/client.go | 39 +- src/go/plugin/go.d/modules/hddtemp/collect.go | 10 +- .../plugin/go.d/modules/hddtemp/config_schema.json | 1 - src/go/plugin/go.d/modules/hddtemp/hddtemp.go | 48 +- src/go/plugin/go.d/modules/hddtemp/hddtemp_test.go | 60 +- .../hddtemp/integrations/hdd_temperature.md | 4 +- src/go/plugin/go.d/modules/hdfs/client.go | 69 - src/go/plugin/go.d/modules/hdfs/collect.go | 40 +- src/go/plugin/go.d/modules/hdfs/config_schema.json | 1 - src/go/plugin/go.d/modules/hdfs/hdfs.go | 43 +- src/go/plugin/go.d/modules/hdfs/hdfs_test.go | 2 +- src/go/plugin/go.d/modules/hdfs/init.go | 25 - .../hadoop_distributed_file_system_hdfs.md | 4 +- .../plugin/go.d/modules/hpssa/config_schema.json | 1 - src/go/plugin/go.d/modules/hpssa/hpssa.go | 17 +- src/go/plugin/go.d/modules/hpssa/hpssa_test.go | 22 +- .../modules/hpssa/integrations/hpe_smart_arrays.md | 4 +- src/go/plugin/go.d/modules/httpcheck/collect.go | 14 +- .../go.d/modules/httpcheck/config_schema.json | 1 - src/go/plugin/go.d/modules/httpcheck/httpcheck.go | 25 +- .../go.d/modules/httpcheck/httpcheck_test.go | 17 +- src/go/plugin/go.d/modules/httpcheck/init.go | 4 +- .../httpcheck/integrations/http_endpoints.md | 8 +- src/go/plugin/go.d/modules/httpcheck/metadata.yaml | 4 +- src/go/plugin/go.d/modules/icecast/collect.go | 32 +- .../plugin/go.d/modules/icecast/config_schema.json | 1 - src/go/plugin/go.d/modules/icecast/icecast.go | 21 +- src/go/plugin/go.d/modules/icecast/icecast_test.go | 4 +- .../go.d/modules/icecast/integrations/icecast.md | 4 +- src/go/plugin/go.d/modules/init.go | 14 +- .../go.d/modules/intelgpu/config_schema.json | 1 - .../modules/intelgpu/integrations/intel_gpu.md | 4 +- src/go/plugin/go.d/modules/intelgpu/intelgpu.go | 6 +- src/go/plugin/go.d/modules/ipfs/collect.go | 43 +- src/go/plugin/go.d/modules/ipfs/config_schema.json | 1 - .../plugin/go.d/modules/ipfs/integrations/ipfs.md | 4 +- src/go/plugin/go.d/modules/ipfs/ipfs.go | 25 +- src/go/plugin/go.d/modules/ipfs/ipfs_test.go | 23 +- src/go/plugin/go.d/modules/isc_dhcpd/charts.go | 2 + src/go/plugin/go.d/modules/isc_dhcpd/collect.go | 2 + .../go.d/modules/isc_dhcpd/config_schema.json | 1 - src/go/plugin/go.d/modules/isc_dhcpd/doc.go | 3 + src/go/plugin/go.d/modules/isc_dhcpd/init.go | 2 + .../modules/isc_dhcpd/integrations/isc_dhcp.md | 9 +- src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd.go | 13 +- .../go.d/modules/isc_dhcpd/isc_dhcpd_test.go | 26 +- src/go/plugin/go.d/modules/isc_dhcpd/metadata.yaml | 2 +- src/go/plugin/go.d/modules/isc_dhcpd/parse.go | 2 + .../go.d/modules/k8s_kubelet/config_schema.json | 1 - src/go/plugin/go.d/modules/k8s_kubelet/init.go | 4 +- .../modules/k8s_kubelet/integrations/kubelet.md | 4 +- src/go/plugin/go.d/modules/k8s_kubelet/kubelet.go | 25 +- .../go.d/modules/k8s_kubelet/kubelet_test.go | 4 +- .../go.d/modules/k8s_kubeproxy/config_schema.json | 1 - src/go/plugin/go.d/modules/k8s_kubeproxy/init.go | 4 +- .../k8s_kubeproxy/integrations/kubeproxy.md | 4 +- .../plugin/go.d/modules/k8s_kubeproxy/kubeproxy.go | 21 +- src/go/plugin/go.d/modules/k8s_state/charts.go | 155 +- .../plugin/go.d/modules/k8s_state/cluster_meta.go | 46 +- src/go/plugin/go.d/modules/k8s_state/collect.go | 100 +- .../go.d/modules/k8s_state/config_schema.json | 1 - .../go.d/modules/k8s_state/discover_kubernetes.go | 2 +- .../plugin/go.d/modules/k8s_state/discover_node.go | 15 +- .../plugin/go.d/modules/k8s_state/discover_pod.go | 15 +- .../integrations/kubernetes_cluster_state.md | 11 +- src/go/plugin/go.d/modules/k8s_state/kube_state.go | 8 +- .../go.d/modules/k8s_state/kube_state_test.go | 620 +- src/go/plugin/go.d/modules/k8s_state/metadata.yaml | 35 +- src/go/plugin/go.d/modules/k8s_state/resource.go | 6 +- src/go/plugin/go.d/modules/k8s_state/state.go | 64 +- .../go.d/modules/k8s_state/update_node_state.go | 9 +- .../go.d/modules/k8s_state/update_pod_state.go | 27 +- src/go/plugin/go.d/modules/lighttpd/apiclient.go | 170 - src/go/plugin/go.d/modules/lighttpd/collect.go | 21 +- .../go.d/modules/lighttpd/config_schema.json | 1 - src/go/plugin/go.d/modules/lighttpd/init.go | 29 - .../go.d/modules/lighttpd/integrations/lighttpd.md | 4 +- src/go/plugin/go.d/modules/lighttpd/lighttpd.go | 49 +- .../plugin/go.d/modules/lighttpd/lighttpd_test.go | 1 - src/go/plugin/go.d/modules/lighttpd/metrics.go | 33 - src/go/plugin/go.d/modules/lighttpd/status.go | 146 + src/go/plugin/go.d/modules/litespeed/charts.go | 2 + src/go/plugin/go.d/modules/litespeed/collect.go | 2 + .../go.d/modules/litespeed/config_schema.json | 1 - src/go/plugin/go.d/modules/litespeed/doc.go | 3 + .../modules/litespeed/integrations/litespeed.md | 9 +- src/go/plugin/go.d/modules/litespeed/litespeed.go | 3 +- .../go.d/modules/litespeed/litespeed_test.go | 21 +- src/go/plugin/go.d/modules/litespeed/metadata.yaml | 2 +- src/go/plugin/go.d/modules/logind/charts.go | 1 - src/go/plugin/go.d/modules/logind/collect.go | 1 - .../plugin/go.d/modules/logind/config_schema.json | 1 - src/go/plugin/go.d/modules/logind/connection.go | 1 - .../logind/integrations/systemd-logind_users.md | 8 +- src/go/plugin/go.d/modules/logind/logind.go | 10 +- src/go/plugin/go.d/modules/logind/logind_test.go | 1 - src/go/plugin/go.d/modules/logind/metadata.yaml | 2 +- src/go/plugin/go.d/modules/logstash/collect.go | 40 +- .../go.d/modules/logstash/config_schema.json | 1 - .../go.d/modules/logstash/integrations/logstash.md | 4 +- src/go/plugin/go.d/modules/logstash/logstash.go | 23 +- .../plugin/go.d/modules/logstash/logstash_test.go | 21 +- src/go/plugin/go.d/modules/lvm/charts.go | 2 + src/go/plugin/go.d/modules/lvm/collect.go | 2 + src/go/plugin/go.d/modules/lvm/config_schema.json | 1 - src/go/plugin/go.d/modules/lvm/doc.go | 3 + src/go/plugin/go.d/modules/lvm/exec.go | 6 + src/go/plugin/go.d/modules/lvm/init.go | 2 + .../lvm/integrations/lvm_logical_volumes.md | 9 +- src/go/plugin/go.d/modules/lvm/lvm.go | 34 +- src/go/plugin/go.d/modules/lvm/lvm_test.go | 2 + src/go/plugin/go.d/modules/lvm/metadata.yaml | 2 +- src/go/plugin/go.d/modules/maxscale/README.md | 1 + src/go/plugin/go.d/modules/maxscale/charts.go | 224 + src/go/plugin/go.d/modules/maxscale/collect.go | 141 + .../go.d/modules/maxscale/config_schema.json | 184 + .../go.d/modules/maxscale/integrations/maxscale.md | 276 + src/go/plugin/go.d/modules/maxscale/maxscale.go | 115 + .../plugin/go.d/modules/maxscale/maxscale_test.go | 284 + src/go/plugin/go.d/modules/maxscale/metadata.yaml | 272 + src/go/plugin/go.d/modules/maxscale/restapi.go | 84 + .../go.d/modules/maxscale/testdata/config.json | 20 + .../go.d/modules/maxscale/testdata/config.yaml | 17 + .../maxscale/testdata/v24.02.3/maxscale.json | 124 + .../testdata/v24.02.3/maxscale_threads.json | 51 + .../maxscale/testdata/v24.02.3/servers.json | 198 + src/go/plugin/go.d/modules/megacli/charts.go | 2 + src/go/plugin/go.d/modules/megacli/collect.go | 2 + src/go/plugin/go.d/modules/megacli/collect_bbu.go | 2 + .../go.d/modules/megacli/collect_phys_drives.go | 2 + .../plugin/go.d/modules/megacli/config_schema.json | 1 - src/go/plugin/go.d/modules/megacli/doc.go | 3 + src/go/plugin/go.d/modules/megacli/exec.go | 2 + src/go/plugin/go.d/modules/megacli/init.go | 2 + .../megacli/integrations/megacli_megaraid.md | 9 +- src/go/plugin/go.d/modules/megacli/megacli.go | 15 +- src/go/plugin/go.d/modules/megacli/megacli_test.go | 2 + src/go/plugin/go.d/modules/megacli/metadata.yaml | 2 +- src/go/plugin/go.d/modules/memcached/client.go | 6 +- .../go.d/modules/memcached/config_schema.json | 1 - .../modules/memcached/integrations/memcached.md | 4 +- src/go/plugin/go.d/modules/memcached/memcached.go | 14 +- .../plugin/go.d/modules/mongodb/config_schema.json | 1 - src/go/plugin/go.d/modules/mongodb/documents.go | 2 +- .../go.d/modules/mongodb/integrations/mongodb.md | 4 +- src/go/plugin/go.d/modules/mongodb/metadata.yaml | 2 +- src/go/plugin/go.d/modules/mongodb/mongodb.go | 16 +- src/go/plugin/go.d/modules/mongodb/mongodb_test.go | 2 +- src/go/plugin/go.d/modules/monit/collect.go | 36 +- .../plugin/go.d/modules/monit/config_schema.json | 1 - .../go.d/modules/monit/integrations/monit.md | 4 +- src/go/plugin/go.d/modules/monit/monit.go | 23 +- src/go/plugin/go.d/modules/monit/monit_test.go | 4 +- .../plugin/go.d/modules/mysql/config_schema.json | 1 - .../go.d/modules/mysql/integrations/mariadb.md | 4 +- .../go.d/modules/mysql/integrations/mysql.md | 4 +- .../modules/mysql/integrations/percona_mysql.md | 4 +- src/go/plugin/go.d/modules/mysql/mysql.go | 27 +- src/go/plugin/go.d/modules/mysql/mysql_test.go | 18 +- src/go/plugin/go.d/modules/nginx/apiclient.go | 168 - src/go/plugin/go.d/modules/nginx/collect.go | 19 +- .../plugin/go.d/modules/nginx/config_schema.json | 1 - .../go.d/modules/nginx/integrations/nginx.md | 4 +- src/go/plugin/go.d/modules/nginx/metrics.go | 34 - src/go/plugin/go.d/modules/nginx/nginx.go | 44 +- src/go/plugin/go.d/modules/nginx/nginx_test.go | 1 - src/go/plugin/go.d/modules/nginx/status.go | 147 + .../go.d/modules/nginxplus/config_schema.json | 1 - .../modules/nginxplus/integrations/nginx_plus.md | 4 +- .../go.d/modules/nginxplus/nginx_http_api_query.go | 99 +- src/go/plugin/go.d/modules/nginxplus/nginxplus.go | 23 +- .../go.d/modules/nginxplus/nginxplus_test.go | 24 +- src/go/plugin/go.d/modules/nginxunit/README.md | 1 + src/go/plugin/go.d/modules/nginxunit/charts.go | 59 + src/go/plugin/go.d/modules/nginxunit/collect.go | 59 + .../go.d/modules/nginxunit/config_schema.json | 182 + .../modules/nginxunit/integrations/nginx_unit.md | 262 + src/go/plugin/go.d/modules/nginxunit/metadata.yaml | 207 + src/go/plugin/go.d/modules/nginxunit/nginxunit.go | 110 + .../go.d/modules/nginxunit/nginxunit_test.go | 254 + .../go.d/modules/nginxunit/testdata/config.json | 20 + .../go.d/modules/nginxunit/testdata/config.yaml | 17 + .../modules/nginxunit/testdata/v1.29.1/status.json | 12 + src/go/plugin/go.d/modules/nginxvts/collect.go | 43 +- .../go.d/modules/nginxvts/config_schema.json | 1 - src/go/plugin/go.d/modules/nginxvts/init.go | 4 +- .../modules/nginxvts/integrations/nginx_vts.md | 4 +- src/go/plugin/go.d/modules/nginxvts/nginxvts.go | 23 +- .../plugin/go.d/modules/nginxvts/nginxvts_test.go | 30 +- src/go/plugin/go.d/modules/nsd/charts.go | 2 + src/go/plugin/go.d/modules/nsd/collect.go | 2 + src/go/plugin/go.d/modules/nsd/config_schema.json | 1 - src/go/plugin/go.d/modules/nsd/doc.go | 3 + src/go/plugin/go.d/modules/nsd/exec.go | 2 + src/go/plugin/go.d/modules/nsd/init.go | 2 + src/go/plugin/go.d/modules/nsd/integrations/nsd.md | 10 +- src/go/plugin/go.d/modules/nsd/metadata.yaml | 16 +- src/go/plugin/go.d/modules/nsd/nsd.go | 15 +- src/go/plugin/go.d/modules/nsd/nsd_test.go | 2 + src/go/plugin/go.d/modules/nsd/stats_counters.go | 2 + src/go/plugin/go.d/modules/ntpd/config_schema.json | 1 - .../plugin/go.d/modules/ntpd/integrations/ntpd.md | 4 +- src/go/plugin/go.d/modules/ntpd/ntpd.go | 17 +- .../go.d/modules/nvidia_smi/config_schema.json | 1 - .../modules/nvidia_smi/integrations/nvidia_gpu.md | 11 +- .../plugin/go.d/modules/nvidia_smi/nvidia_smi.go | 22 +- src/go/plugin/go.d/modules/nvme/charts.go | 11 +- src/go/plugin/go.d/modules/nvme/collect.go | 81 +- src/go/plugin/go.d/modules/nvme/config_schema.json | 1 - src/go/plugin/go.d/modules/nvme/doc.go | 3 + src/go/plugin/go.d/modules/nvme/exec.go | 10 +- src/go/plugin/go.d/modules/nvme/init.go | 2 + .../go.d/modules/nvme/integrations/nvme_devices.md | 10 +- src/go/plugin/go.d/modules/nvme/metadata.yaml | 4 +- src/go/plugin/go.d/modules/nvme/nvme.go | 15 +- src/go/plugin/go.d/modules/nvme/nvme_test.go | 2 + src/go/plugin/go.d/modules/openldap/README.md | 1 + src/go/plugin/go.d/modules/openldap/charts.go | 141 + src/go/plugin/go.d/modules/openldap/client.go | 83 + src/go/plugin/go.d/modules/openldap/collect.go | 55 + .../go.d/modules/openldap/collect_mon_counters.go | 63 + .../go.d/modules/openldap/collect_operations.go | 71 + .../go.d/modules/openldap/config_schema.json | 109 + .../go.d/modules/openldap/integrations/openldap.md | 228 + src/go/plugin/go.d/modules/openldap/metadata.yaml | 192 + src/go/plugin/go.d/modules/openldap/openldap.go | 114 + .../plugin/go.d/modules/openldap/openldap_test.go | 363 + .../go.d/modules/openldap/testdata/config.json | 11 + .../go.d/modules/openldap/testdata/config.yaml | 8 + .../go.d/modules/openvpn/client/client_test.go | 1 + .../plugin/go.d/modules/openvpn/config_schema.json | 1 - src/go/plugin/go.d/modules/openvpn/init.go | 8 +- .../go.d/modules/openvpn/integrations/openvpn.md | 4 +- src/go/plugin/go.d/modules/openvpn/metadata.yaml | 2 +- src/go/plugin/go.d/modules/openvpn/openvpn.go | 12 +- src/go/plugin/go.d/modules/openvpn/openvpn_test.go | 2 +- .../modules/openvpn_status_log/config_schema.json | 1 - .../plugin/go.d/modules/openvpn_status_log/init.go | 3 +- .../integrations/openvpn_status_log.md | 4 +- .../go.d/modules/openvpn_status_log/metadata.yaml | 2 +- .../go.d/modules/openvpn_status_log/openvpn.go | 10 +- .../modules/openvpn_status_log/openvpn_test.go | 2 +- src/go/plugin/go.d/modules/oracledb/README.md | 1 + src/go/plugin/go.d/modules/oracledb/charts.go | 363 + src/go/plugin/go.d/modules/oracledb/collect.go | 107 + .../go.d/modules/oracledb/collect_sysmetric.go | 58 + .../go.d/modules/oracledb/collect_sysstat.go | 58 + .../go.d/modules/oracledb/collect_tablespace.go | 106 + .../go.d/modules/oracledb/collect_wait_class.go | 60 + .../go.d/modules/oracledb/config_schema.json | 47 + src/go/plugin/go.d/modules/oracledb/init.go | 41 + .../modules/oracledb/integrations/oracle_db.md | 299 + src/go/plugin/go.d/modules/oracledb/metadata.yaml | 271 + src/go/plugin/go.d/modules/oracledb/oracledb.go | 109 + .../plugin/go.d/modules/oracledb/oracledb_test.go | 317 + .../go.d/modules/oracledb/testdata/config.json | 5 + .../go.d/modules/oracledb/testdata/config.yaml | 3 + .../oracledb/testdata/v21.3.0-xe/sysmetric.txt | 13 + .../oracledb/testdata/v21.3.0-xe/sysstat.txt | 17 + .../oracledb/testdata/v21.3.0-xe/tablespace.txt | 6 + .../oracledb/testdata/v21.3.0-xe/wait_class.txt | 12 + .../go.d/modules/pgbouncer/config_schema.json | 1 - .../modules/pgbouncer/integrations/pgbouncer.md | 4 +- src/go/plugin/go.d/modules/pgbouncer/pgbouncer.go | 15 +- src/go/plugin/go.d/modules/phpdaemon/client.go | 77 - src/go/plugin/go.d/modules/phpdaemon/collect.go | 53 +- .../go.d/modules/phpdaemon/config_schema.json | 1 - src/go/plugin/go.d/modules/phpdaemon/init.go | 27 - .../modules/phpdaemon/integrations/phpdaemon.md | 4 +- src/go/plugin/go.d/modules/phpdaemon/metrics.go | 33 - src/go/plugin/go.d/modules/phpdaemon/phpdaemon.go | 42 +- .../go.d/modules/phpdaemon/phpdaemon_test.go | 1 - src/go/plugin/go.d/modules/phpfpm/client.go | 26 +- src/go/plugin/go.d/modules/phpfpm/collect.go | 12 +- .../plugin/go.d/modules/phpfpm/config_schema.json | 1 - src/go/plugin/go.d/modules/phpfpm/init.go | 4 +- .../go.d/modules/phpfpm/integrations/php-fpm.md | 4 +- src/go/plugin/go.d/modules/phpfpm/phpfpm.go | 24 +- src/go/plugin/go.d/modules/pihole/collect.go | 62 +- .../plugin/go.d/modules/pihole/config_schema.json | 1 - src/go/plugin/go.d/modules/pihole/init.go | 2 +- .../go.d/modules/pihole/integrations/pi-hole.md | 4 +- src/go/plugin/go.d/modules/pihole/pihole.go | 23 +- src/go/plugin/go.d/modules/pihole/pihole_test.go | 4 +- src/go/plugin/go.d/modules/pika/config_schema.json | 1 - src/go/plugin/go.d/modules/pika/init.go | 2 +- .../plugin/go.d/modules/pika/integrations/pika.md | 4 +- src/go/plugin/go.d/modules/pika/pika.go | 23 +- src/go/plugin/go.d/modules/pika/pika_test.go | 24 +- src/go/plugin/go.d/modules/ping/config_schema.json | 3 +- src/go/plugin/go.d/modules/ping/init.go | 2 +- .../plugin/go.d/modules/ping/integrations/ping.md | 29 +- src/go/plugin/go.d/modules/ping/metadata.yaml | 43 +- src/go/plugin/go.d/modules/ping/ping.go | 25 +- src/go/plugin/go.d/modules/ping/prober.go | 72 +- src/go/plugin/go.d/modules/portcheck/README.md | 2 +- src/go/plugin/go.d/modules/portcheck/charts.go | 125 +- .../go.d/modules/portcheck/check_tcp_port.go | 56 + .../go.d/modules/portcheck/check_udp_port.go | 167 + src/go/plugin/go.d/modules/portcheck/collect.go | 110 +- .../go.d/modules/portcheck/config_schema.json | 53 +- src/go/plugin/go.d/modules/portcheck/init.go | 39 +- .../portcheck/integrations/tcp-udp_endpoints.md | 327 + .../portcheck/integrations/tcp_endpoints.md | 252 - src/go/plugin/go.d/modules/portcheck/metadata.yaml | 87 +- src/go/plugin/go.d/modules/portcheck/portcheck.go | 61 +- .../go.d/modules/portcheck/portcheck_test.go | 121 +- .../go.d/modules/portcheck/testdata/config.json | 3 + .../go.d/modules/portcheck/testdata/config.yaml | 2 + .../plugin/go.d/modules/postfix/config_schema.json | 1 - .../go.d/modules/postfix/integrations/postfix.md | 4 +- src/go/plugin/go.d/modules/postfix/postfix.go | 18 +- .../go.d/modules/postgres/config_schema.json | 1 - src/go/plugin/go.d/modules/postgres/init.go | 2 +- .../modules/postgres/integrations/postgresql.md | 8 +- src/go/plugin/go.d/modules/postgres/metadata.yaml | 2 +- src/go/plugin/go.d/modules/postgres/postgres.go | 30 +- .../plugin/go.d/modules/postgres/postgres_test.go | 2 +- .../go.d/modules/powerdns/authoritativens.go | 24 +- .../go.d/modules/powerdns/authoritativens_test.go | 32 +- src/go/plugin/go.d/modules/powerdns/collect.go | 36 +- .../go.d/modules/powerdns/config_schema.json | 1 - src/go/plugin/go.d/modules/powerdns/init.go | 4 +- .../integrations/powerdns_authoritative_server.md | 4 +- src/go/plugin/go.d/modules/powerdns/metrics.go | 2 +- .../go.d/modules/powerdns_recursor/collect.go | 36 +- .../modules/powerdns_recursor/config_schema.json | 1 - .../plugin/go.d/modules/powerdns_recursor/init.go | 4 +- .../integrations/powerdns_recursor.md | 4 +- .../go.d/modules/powerdns_recursor/metrics.go | 2 +- .../go.d/modules/powerdns_recursor/recursor.go | 24 +- .../modules/powerdns_recursor/recursor_test.go | 32 +- src/go/plugin/go.d/modules/prometheus/charts.go | 15 +- .../go.d/modules/prometheus/config_schema.json | 6 +- src/go/plugin/go.d/modules/prometheus/init.go | 6 +- .../modules/prometheus/integrations/4d_server.md | 7 +- .../prometheus/integrations/8430ft_modem.md | 7 +- .../integrations/a10_acos_network_devices.md | 7 +- .../integrations/airthings_waveplus_air_sensor.md | 7 +- .../integrations/akamai_edge_dns_traffic.md | 7 +- .../akamai_global_traffic_management.md | 7 +- .../prometheus/integrations/akami_cloudmonitor.md | 7 +- .../prometheus/integrations/alamos_fe2_server.md | 7 +- .../prometheus/integrations/alibaba_cloud.md | 7 +- .../prometheus/integrations/altaro_backup.md | 7 +- .../prometheus/integrations/amd_cpu_&_gpu.md | 7 +- .../integrations/andrews_&_arnold_line_status.md | 7 +- .../prometheus/integrations/apache_airflow.md | 7 +- .../prometheus/integrations/apache_flink.md | 7 +- .../modules/prometheus/integrations/apicast.md | 7 +- .../prometheus/integrations/apple_time_machine.md | 7 +- .../modules/prometheus/integrations/arm_hwcpipe.md | 7 +- .../prometheus/integrations/aruba_devices.md | 7 +- .../prometheus/integrations/arvancloud_cdn.md | 7 +- .../modules/prometheus/integrations/audisto.md | 7 +- .../modules/prometheus/integrations/authlog.md | 7 +- .../integrations/aws_ec2_compute_instances.md | 7 +- .../integrations/aws_ec2_spot_instance.md | 7 +- .../modules/prometheus/integrations/aws_ecs.md | 7 +- .../prometheus/integrations/aws_health_events.md | 7 +- .../prometheus/integrations/aws_instance_health.md | 7 +- .../modules/prometheus/integrations/aws_quota.md | 7 +- .../modules/prometheus/integrations/aws_rds.md | 7 +- .../prometheus/integrations/aws_s3_buckets.md | 7 +- .../modules/prometheus/integrations/aws_sqs.md | 7 +- .../integrations/azure_ad_app_passwords.md | 7 +- .../prometheus/integrations/azure_application.md | 7 +- .../integrations/azure_elastic_pool_sql.md | 7 +- .../prometheus/integrations/azure_resources.md | 7 +- .../prometheus/integrations/azure_service_bus.md | 7 +- .../modules/prometheus/integrations/azure_sql.md | 7 +- .../modules/prometheus/integrations/bigquery.md | 7 +- .../prometheus/integrations/bird_routing_daemon.md | 7 +- .../modules/prometheus/integrations/blackbox.md | 7 +- .../prometheus/integrations/bobcat_miner_300.md | 7 +- .../modules/prometheus/integrations/borg_backup.md | 7 +- .../go.d/modules/prometheus/integrations/bosh.md | 7 +- .../prometheus/integrations/bpftrace_variables.md | 7 +- .../modules/prometheus/integrations/bungeecord.md | 7 +- .../modules/prometheus/integrations/cadvisor.md | 7 +- .../go.d/modules/prometheus/integrations/celery.md | 7 +- .../integrations/certificate_transparency.md | 7 +- .../prometheus/integrations/checkpoint_device.md | 7 +- .../go.d/modules/prometheus/integrations/chia.md | 7 +- .../christ_elektronik_clm5ip_power_panel.md | 7 +- .../prometheus/integrations/cilium_agent.md | 7 +- .../prometheus/integrations/cilium_operator.md | 7 +- .../prometheus/integrations/cilium_proxy.md | 7 +- .../modules/prometheus/integrations/cisco_aci.md | 7 +- .../prometheus/integrations/citrix_netscaler.md | 7 +- .../prometheus/integrations/clamav_daemon.md | 7 +- .../prometheus/integrations/clamscan_results.md | 7 +- .../go.d/modules/prometheus/integrations/clash.md | 7 +- .../prometheus/integrations/cloud_foundry.md | 7 +- .../integrations/cloud_foundry_firehose.md | 7 +- .../prometheus/integrations/cloudflare_pcap.md | 7 +- .../modules/prometheus/integrations/cloudwatch.md | 7 +- .../prometheus/integrations/clustercontrol_cmon.md | 7 +- .../modules/prometheus/integrations/collectd.md | 7 +- .../modules/prometheus/integrations/concourse.md | 7 +- .../modules/prometheus/integrations/craftbeerpi.md | 7 +- .../modules/prometheus/integrations/crowdsec.md | 7 +- .../prometheus/integrations/crypto_exchanges.md | 7 +- .../modules/prometheus/integrations/cryptowatch.md | 7 +- .../prometheus/integrations/custom_exporter.md | 7 +- .../prometheus/integrations/cvmfs_clients.md | 7 +- .../prometheus/integrations/ddwrt_routers.md | 7 +- .../integrations/dell_emc_ecs_cluster.md | 7 +- .../integrations/dell_emc_isilon_cluster.md | 7 +- .../integrations/dell_emc_xtremio_cluster.md | 7 +- .../prometheus/integrations/dell_powermax.md | 7 +- .../prometheus/integrations/dependency-track.md | 7 +- .../prometheus/integrations/digitalocean.md | 7 +- .../modules/prometheus/integrations/discourse.md | 7 +- .../go.d/modules/prometheus/integrations/dmarc.md | 7 +- .../go.d/modules/prometheus/integrations/dnsbl.md | 7 +- .../integrations/dutch_electricity_smart_meter.md | 7 +- .../modules/prometheus/integrations/dynatrace.md | 7 +- .../modules/prometheus/integrations/eaton_ups.md | 7 +- .../integrations/elgato_key_light_devices..md | 7 +- .../integrations/energomera_smart_power_meters.md | 7 +- .../go.d/modules/prometheus/integrations/eos.md | 7 +- .../go.d/modules/prometheus/integrations/etcd.md | 7 +- .../prometheus/integrations/excel_spreadsheet.md | 7 +- .../go.d/modules/prometheus/integrations/fastd.md | 7 +- .../prometheus/integrations/fortigate_firewall.md | 7 +- .../modules/prometheus/integrations/freebsd_nfs.md | 7 +- .../prometheus/integrations/freebsd_rctl-racct.md | 7 +- .../prometheus/integrations/freifunk_network.md | 7 +- .../integrations/fritzbox_network_devices.md | 7 +- .../modules/prometheus/integrations/frrouting.md | 7 +- .../modules/prometheus/integrations/gcp_gce.md | 7 +- .../modules/prometheus/integrations/gcp_quota.md | 7 +- .../integrations/generic_command_line_output.md | 7 +- .../integrations/generic_storage_enclosure_tool.md | 7 +- .../integrations/github_api_rate_limit.md | 7 +- .../prometheus/integrations/github_repository.md | 7 +- .../prometheus/integrations/gitlab_runner.md | 7 +- .../modules/prometheus/integrations/gobetween.md | 7 +- .../integrations/google_cloud_platform.md | 7 +- .../prometheus/integrations/google_pagespeed.md | 7 +- .../prometheus/integrations/google_stackdriver.md | 7 +- .../go.d/modules/prometheus/integrations/gpsd.md | 7 +- .../modules/prometheus/integrations/grafana.md | 7 +- .../prometheus/integrations/graylog_server.md | 7 +- .../go.d/modules/prometheus/integrations/gtp.md | 7 +- .../go.d/modules/prometheus/integrations/halon.md | 7 +- .../go.d/modules/prometheus/integrations/hana.md | 7 +- .../integrations/hashicorp_vault_secrets.md | 7 +- .../integrations/hasura_graphql_server.md | 7 +- .../modules/prometheus/integrations/hdsentinel.md | 7 +- .../prometheus/integrations/helium_hotspot.md | 7 +- .../integrations/helium_miner_validator.md | 7 +- .../go.d/modules/prometheus/integrations/hhvm.md | 7 +- .../integrations/hitron_cgn_series_cpe.md | 7 +- .../integrations/hitron_coda_cable_modem.md | 7 +- .../modules/prometheus/integrations/homebridge.md | 7 +- .../go.d/modules/prometheus/integrations/homey.md | 7 +- .../modules/prometheus/integrations/honeypot.md | 7 +- .../go.d/modules/prometheus/integrations/hp_ilo.md | 7 +- .../prometheus/integrations/huawei_devices.md | 7 +- .../go.d/modules/prometheus/integrations/hubble.md | 7 +- .../integrations/ibm_aix_systems_njmon.md | 7 +- .../integrations/ibm_cryptoexpress_cex_cards.md | 7 +- .../go.d/modules/prometheus/integrations/ibm_mq.md | 7 +- .../prometheus/integrations/ibm_spectrum.md | 7 +- .../integrations/ibm_spectrum_virtualize.md | 7 +- .../ibm_z_hardware_management_console.md | 7 +- .../modules/prometheus/integrations/influxdb.md | 7 +- .../prometheus/integrations/iota_full_node.md | 7 +- .../prometheus/integrations/ipmi_by_soundcloud.md | 7 +- .../iqair_airvisual_air_quality_monitors.md | 7 +- .../integrations/jarvis_standing_desk.md | 7 +- .../modules/prometheus/integrations/jenkins.md | 7 +- .../jetbrains_floating_license_server.md | 7 +- .../go.d/modules/prometheus/integrations/jmx.md | 7 +- .../modules/prometheus/integrations/jolokia.md | 7 +- .../modules/prometheus/integrations/journald.md | 7 +- .../go.d/modules/prometheus/integrations/kafka.md | 7 +- .../prometheus/integrations/kafka_connect.md | 7 +- .../prometheus/integrations/kafka_consumer_lag.md | 7 +- .../prometheus/integrations/kafka_zookeeper.md | 7 +- .../go.d/modules/prometheus/integrations/kannel.md | 7 +- .../modules/prometheus/integrations/keepalived.md | 7 +- .../integrations/kubernetes_cluster_cloud_cost.md | 7 +- .../integrations/lagerist_disk_latency.md | 7 +- .../go.d/modules/prometheus/integrations/ldap.md | 7 +- .../go.d/modules/prometheus/integrations/linode.md | 7 +- .../go.d/modules/prometheus/integrations/loki.md | 7 +- .../prometheus/integrations/lustre_metadata.md | 7 +- .../prometheus/integrations/lynis_audit_reports.md | 7 +- .../modules/prometheus/integrations/machbase.md | 7 +- .../modules/prometheus/integrations/maildir.md | 7 +- .../modules/prometheus/integrations/meilisearch.md | 7 +- .../prometheus/integrations/memcached_community.md | 7 +- .../prometheus/integrations/meraki_dashboard.md | 7 +- .../go.d/modules/prometheus/integrations/mesos.md | 7 +- .../prometheus/integrations/mikrotik_devices.md | 7 +- .../integrations/mikrotik_routeros_devices.md | 7 +- .../modules/prometheus/integrations/minecraft.md | 7 +- .../prometheus/integrations/modbus_protocol.md | 7 +- .../modules/prometheus/integrations/mogilefs.md | 7 +- .../prometheus/integrations/monnit_sensors_mqtt.md | 7 +- .../modules/prometheus/integrations/mosquitto.md | 7 +- .../integrations/mp707_usb_thermometer.md | 7 +- .../prometheus/integrations/mqtt_blackbox.md | 7 +- .../go.d/modules/prometheus/integrations/mtail.md | 7 +- .../go.d/modules/prometheus/integrations/naemon.md | 7 +- .../go.d/modules/prometheus/integrations/nagios.md | 7 +- .../integrations/nature_remo_e_lite_devices.md | 7 +- .../prometheus/integrations/netapp_ontap_api.md | 7 +- .../prometheus/integrations/netapp_solidfire.md | 7 +- .../prometheus/integrations/netatmo_sensors.md | 7 +- .../modules/prometheus/integrations/netflow.md | 7 +- .../modules/prometheus/integrations/netmeter.md | 7 +- .../modules/prometheus/integrations/new_relic.md | 7 +- .../prometheus/integrations/nextcloud_servers.md | 7 +- .../modules/prometheus/integrations/nextdns.md | 7 +- .../modules/prometheus/integrations/nftables.md | 7 +- .../modules/prometheus/integrations/nrpe_daemon.md | 7 +- .../go.d/modules/prometheus/integrations/nsx-t.md | 7 +- .../go.d/modules/prometheus/integrations/nvml.md | 7 +- .../modules/prometheus/integrations/obs_studio.md | 7 +- .../go.d/modules/prometheus/integrations/odbc.md | 7 +- .../prometheus/integrations/open_vswitch.md | 7 +- .../modules/prometheus/integrations/openhab.md | 7 +- .../prometheus/integrations/openldap_community.md | 7 +- .../go.d/modules/prometheus/integrations/openrc.md | 7 +- .../modules/prometheus/integrations/openrct2.md | 7 +- .../prometheus/integrations/openroadm_devices.md | 7 +- .../modules/prometheus/integrations/openstack.md | 7 +- .../modules/prometheus/integrations/openvas.md | 7 +- .../prometheus/integrations/openweathermap.md | 7 +- .../prometheus/integrations/oracle_db_community.md | 7 +- .../go.d/modules/prometheus/integrations/otrs.md | 7 +- .../modules/prometheus/integrations/patroni.md | 7 +- .../integrations/personal_weather_station.md | 7 +- .../modules/prometheus/integrations/pgbackrest.md | 7 +- .../modules/prometheus/integrations/pgpool-ii.md | 7 +- .../modules/prometheus/integrations/philips_hue.md | 7 +- .../prometheus/integrations/pimoroni_enviro+.md | 7 +- .../modules/prometheus/integrations/pingdom.md | 7 +- .../go.d/modules/prometheus/integrations/podman.md | 7 +- .../prometheus/integrations/powerpal_devices.md | 7 +- .../modules/prometheus/integrations/proftpd.md | 7 +- .../prometheus/integrations/prometheus_endpoint.md | 7 +- .../modules/prometheus/integrations/proxmox_ve.md | 7 +- .../prometheus/integrations/radio_thermostat.md | 7 +- .../go.d/modules/prometheus/integrations/radius.md | 7 +- .../modules/prometheus/integrations/rancher.md | 7 +- .../modules/prometheus/integrations/raritan_pdu.md | 7 +- .../modules/prometheus/integrations/redis_queue.md | 7 +- .../modules/prometheus/integrations/ripe_atlas.md | 7 +- .../modules/prometheus/integrations/sabnzbd.md | 7 +- .../integrations/salicru_eqx_inverter.md | 7 +- .../prometheus/integrations/sense_energy.md | 7 +- .../go.d/modules/prometheus/integrations/sentry.md | 7 +- .../modules/prometheus/integrations/servertech.md | 7 +- .../prometheus/integrations/shell_command.md | 7 +- .../integrations/shelly_humidity_sensor.md | 7 +- .../go.d/modules/prometheus/integrations/sia.md | 7 +- .../prometheus/integrations/siemens_s7_plc.md | 7 +- .../modules/prometheus/integrations/site_24x7.md | 7 +- .../go.d/modules/prometheus/integrations/slurm.md | 7 +- .../prometheus/integrations/sma_inverters.md | 7 +- .../prometheus/integrations/smart_meters_sml.md | 7 +- .../integrations/smartrg_808ac_cable_modem.md | 7 +- .../integrations/softether_vpn_server.md | 7 +- .../prometheus/integrations/solar_logging_stick.md | 7 +- .../prometheus/integrations/solaredge_inverters.md | 7 +- .../integrations/solis_ginlong_5g_inverters.md | 7 +- .../modules/prometheus/integrations/sonic_nos.md | 7 +- .../modules/prometheus/integrations/spacelift.md | 7 +- .../prometheus/integrations/speedify_cli.md | 7 +- .../go.d/modules/prometheus/integrations/sphinx.md | 7 +- .../integrations/sql_database_agnostic.md | 7 +- .../go.d/modules/prometheus/integrations/ssh.md | 7 +- .../prometheus/integrations/ssl_certificate.md | 7 +- .../prometheus/integrations/starlink_spacex.md | 7 +- .../integrations/starwind_vsan_vsphere_edition.md | 7 +- .../modules/prometheus/integrations/statuspage.md | 7 +- .../go.d/modules/prometheus/integrations/steam.md | 7 +- .../modules/prometheus/integrations/storidge.md | 7 +- .../go.d/modules/prometheus/integrations/stream.md | 7 +- .../modules/prometheus/integrations/strongswan.md | 7 +- .../integrations/sunspec_solar_energy.md | 7 +- .../modules/prometheus/integrations/suricata.md | 7 +- .../integrations/synology_activebackup.md | 7 +- .../modules/prometheus/integrations/sysload.md | 7 +- .../integrations/t-rex_nvidia_gpu_miner.md | 7 +- .../go.d/modules/prometheus/integrations/tacacs.md | 7 +- .../integrations/tado_smart_heating_solution.md | 7 +- .../prometheus/integrations/tankerkoenig_api.md | 7 +- .../prometheus/integrations/tesla_powerwall.md | 7 +- .../prometheus/integrations/tesla_vehicle.md | 7 +- .../integrations/tesla_wall_connector.md | 7 +- .../prometheus/integrations/tp-link_p110.md | 7 +- .../modules/prometheus/integrations/traceroute.md | 7 +- .../integrations/twincat_ads_web_service.md | 7 +- .../go.d/modules/prometheus/integrations/twitch.md | 7 +- .../prometheus/integrations/ubiquiti_ufiber_olt.md | 7 +- .../modules/prometheus/integrations/uptimerobot.md | 7 +- .../modules/prometheus/integrations/vault_pki.md | 7 +- .../modules/prometheus/integrations/vertica.md | 7 +- .../go.d/modules/prometheus/integrations/vscode.md | 7 +- .../go.d/modules/prometheus/integrations/warp10.md | 7 +- .../prometheus/integrations/xiaomi_mi_flora.md | 7 +- .../modules/prometheus/integrations/xmpp_server.md | 7 +- .../integrations/yourls_url_shortener.md | 7 +- .../go.d/modules/prometheus/integrations/zerto.md | 7 +- .../go.d/modules/prometheus/integrations/zulip.md | 7 +- .../prometheus/integrations/zyxel_gs1200-8.md | 7 +- .../plugin/go.d/modules/prometheus/metadata.yaml | 4 + .../plugin/go.d/modules/prometheus/prometheus.go | 26 +- .../go.d/modules/prometheus/prometheus_test.go | 6 +- .../go.d/modules/prometheus/testdata/config.json | 1 + .../go.d/modules/prometheus/testdata/config.yaml | 1 + .../go.d/modules/proxysql/config_schema.json | 1 - .../go.d/modules/proxysql/integrations/proxysql.md | 4 +- src/go/plugin/go.d/modules/proxysql/proxysql.go | 15 +- .../plugin/go.d/modules/pulsar/config_schema.json | 1 - src/go/plugin/go.d/modules/pulsar/init.go | 6 +- .../modules/pulsar/integrations/apache_pulsar.md | 4 +- src/go/plugin/go.d/modules/pulsar/pulsar.go | 28 +- src/go/plugin/go.d/modules/pulsar/pulsar_test.go | 35 +- src/go/plugin/go.d/modules/puppet/collect.go | 31 +- .../plugin/go.d/modules/puppet/config_schema.json | 1 - .../go.d/modules/puppet/integrations/puppet.md | 4 +- src/go/plugin/go.d/modules/puppet/puppet.go | 21 +- src/go/plugin/go.d/modules/puppet/puppet_test.go | 23 +- src/go/plugin/go.d/modules/rabbitmq/collect.go | 65 +- .../go.d/modules/rabbitmq/config_schema.json | 1 - .../go.d/modules/rabbitmq/integrations/rabbitmq.md | 4 +- src/go/plugin/go.d/modules/rabbitmq/rabbitmq.go | 25 +- .../plugin/go.d/modules/rabbitmq/rabbitmq_test.go | 4 +- src/go/plugin/go.d/modules/redis/collect.go | 3 +- .../plugin/go.d/modules/redis/config_schema.json | 1 - src/go/plugin/go.d/modules/redis/init.go | 2 +- .../go.d/modules/redis/integrations/redis.md | 4 +- src/go/plugin/go.d/modules/redis/redis.go | 27 +- src/go/plugin/go.d/modules/redis/redis_test.go | 27 +- .../go.d/modules/rethinkdb/config_schema.json | 1 - .../modules/rethinkdb/integrations/rethinkdb.md | 4 +- src/go/plugin/go.d/modules/rethinkdb/rethinkdb.go | 18 +- .../go.d/modules/rethinkdb/rethinkdb_test.go | 4 +- src/go/plugin/go.d/modules/riakkv/collect.go | 37 +- .../plugin/go.d/modules/riakkv/config_schema.json | 1 - .../go.d/modules/riakkv/integrations/riak_kv.md | 4 +- src/go/plugin/go.d/modules/riakkv/riakkv.go | 23 +- src/go/plugin/go.d/modules/riakkv/riakkv_test.go | 4 +- src/go/plugin/go.d/modules/rspamd/collect.go | 31 +- .../plugin/go.d/modules/rspamd/config_schema.json | 1 - .../go.d/modules/rspamd/integrations/rspamd.md | 4 +- src/go/plugin/go.d/modules/rspamd/rspamd.go | 23 +- src/go/plugin/go.d/modules/rspamd/rspamd_test.go | 23 +- src/go/plugin/go.d/modules/samba/README.md | 1 + src/go/plugin/go.d/modules/samba/charts.go | 124 + src/go/plugin/go.d/modules/samba/collect.go | 71 + .../plugin/go.d/modules/samba/config_schema.json | 34 + src/go/plugin/go.d/modules/samba/exec.go | 47 + src/go/plugin/go.d/modules/samba/init.go | 23 + .../go.d/modules/samba/integrations/samba.md | 240 + src/go/plugin/go.d/modules/samba/metadata.yaml | 153 + src/go/plugin/go.d/modules/samba/samba.go | 99 + src/go/plugin/go.d/modules/samba/samba_test.go | 339 + .../plugin/go.d/modules/samba/testdata/config.json | 4 + .../plugin/go.d/modules/samba/testdata/config.yaml | 2 + .../modules/samba/testdata/smbstatus-profile.txt | 451 + .../plugin/go.d/modules/scaleio/client/client.go | 37 +- .../go.d/modules/scaleio/client/client_test.go | 4 +- .../plugin/go.d/modules/scaleio/config_schema.json | 1 - .../scaleio/integrations/dell_emc_scaleio.md | 4 +- src/go/plugin/go.d/modules/scaleio/scaleio.go | 24 +- src/go/plugin/go.d/modules/scaleio/scaleio_test.go | 25 +- src/go/plugin/go.d/modules/sensors/README.md | 2 +- src/go/plugin/go.d/modules/sensors/charts.go | 647 +- src/go/plugin/go.d/modules/sensors/collect.go | 271 +- .../plugin/go.d/modules/sensors/config_schema.json | 109 +- src/go/plugin/go.d/modules/sensors/doc.go | 3 + src/go/plugin/go.d/modules/sensors/exec.go | 41 - src/go/plugin/go.d/modules/sensors/init.go | 38 - .../modules/sensors/integrations/linux_sensors.md | 249 + .../integrations/linux_sensors_lm-sensors.md | 215 - .../go.d/modules/sensors/lmsensors/LICENSE.md | 10 + .../go.d/modules/sensors/lmsensors/README.md | 4 + .../plugin/go.d/modules/sensors/lmsensors/doc.go | 2 + src/go/plugin/go.d/modules/sensors/lmsensors/fs.go | 44 + .../plugin/go.d/modules/sensors/lmsensors/parse.go | 364 + .../go.d/modules/sensors/lmsensors/scanner.go | 228 + .../go.d/modules/sensors/lmsensors/scanner_test.go | 523 + .../go.d/modules/sensors/lmsensors/sensor.go | 177 + src/go/plugin/go.d/modules/sensors/metadata.yaml | 200 +- src/go/plugin/go.d/modules/sensors/sensors.go | 48 +- src/go/plugin/go.d/modules/sensors/sensors_test.go | 495 +- .../go.d/modules/sensors/testdata/config.json | 13 +- .../go.d/modules/sensors/testdata/config.yaml | 7 +- .../testdata/sensors-temp-in-curr-power-fan.txt | 72 - .../go.d/modules/sensors/testdata/sensors-temp.txt | 81 - src/go/plugin/go.d/modules/smartctl/charts.go | 2 + src/go/plugin/go.d/modules/smartctl/collect.go | 2 + .../go.d/modules/smartctl/config_schema.json | 1 - src/go/plugin/go.d/modules/smartctl/doc.go | 3 + src/go/plugin/go.d/modules/smartctl/exec.go | 2 + src/go/plugin/go.d/modules/smartctl/init.go | 4 +- .../modules/smartctl/integrations/s.m.a.r.t..md | 9 +- src/go/plugin/go.d/modules/smartctl/metadata.yaml | 2 +- src/go/plugin/go.d/modules/smartctl/scan.go | 2 + .../plugin/go.d/modules/smartctl/smart_device.go | 2 + src/go/plugin/go.d/modules/smartctl/smartctl.go | 29 +- .../plugin/go.d/modules/smartctl/smartctl_test.go | 30 +- src/go/plugin/go.d/modules/snmp/charts.go | 6 +- src/go/plugin/go.d/modules/snmp/collect.go | 347 +- .../go.d/modules/snmp/collect_custom_oids.go | 45 + src/go/plugin/go.d/modules/snmp/collect_if_mib.go | 251 + .../plugin/go.d/modules/snmp/collect_sys_info.go | 93 + src/go/plugin/go.d/modules/snmp/config.go | 4 + src/go/plugin/go.d/modules/snmp/config_schema.json | 44 +- .../modules/snmp/entnum/enterprise-numbers.txt | 249594 ++++++++++++++++++ src/go/plugin/go.d/modules/snmp/entnum/lookup.go | 82 + src/go/plugin/go.d/modules/snmp/init.go | 10 +- .../go.d/modules/snmp/integrations/snmp_devices.md | 12 +- src/go/plugin/go.d/modules/snmp/metadata.yaml | 16 + src/go/plugin/go.d/modules/snmp/snmp.go | 38 +- src/go/plugin/go.d/modules/snmp/snmp_test.go | 10 +- .../plugin/go.d/modules/snmp/testdata/config.json | 8 + .../plugin/go.d/modules/snmp/testdata/config.yaml | 6 + src/go/plugin/go.d/modules/spigotmc/README.md | 1 + src/go/plugin/go.d/modules/spigotmc/charts.go | 59 + src/go/plugin/go.d/modules/spigotmc/client.go | 75 + src/go/plugin/go.d/modules/spigotmc/collect.go | 126 + .../go.d/modules/spigotmc/config_schema.json | 52 + .../go.d/modules/spigotmc/integrations/spigotmc.md | 221 + src/go/plugin/go.d/modules/spigotmc/metadata.yaml | 136 + src/go/plugin/go.d/modules/spigotmc/spigotmc.go | 104 + .../plugin/go.d/modules/spigotmc/spigotmc_test.go | 351 + .../go.d/modules/spigotmc/testdata/config.json | 6 + .../go.d/modules/spigotmc/testdata/config.yaml | 4 + src/go/plugin/go.d/modules/squid/collect.go | 43 +- .../plugin/go.d/modules/squid/config_schema.json | 1 - .../go.d/modules/squid/integrations/squid.md | 4 +- src/go/plugin/go.d/modules/squid/squid.go | 23 +- src/go/plugin/go.d/modules/squid/squid_test.go | 8 +- .../go.d/modules/squidlog/config_schema.json | 1 - .../squidlog/integrations/squid_log_files.md | 6 +- src/go/plugin/go.d/modules/squidlog/metadata.yaml | 2 +- src/go/plugin/go.d/modules/squidlog/squidlog.go | 10 +- .../plugin/go.d/modules/squidlog/squidlog_test.go | 27 +- src/go/plugin/go.d/modules/storcli/charts.go | 19 + src/go/plugin/go.d/modules/storcli/collect.go | 2 + .../go.d/modules/storcli/collect_controllers.go | 13 +- .../plugin/go.d/modules/storcli/collect_drives.go | 2 + .../plugin/go.d/modules/storcli/config_schema.json | 1 - src/go/plugin/go.d/modules/storcli/doc.go | 3 + src/go/plugin/go.d/modules/storcli/exec.go | 2 + src/go/plugin/go.d/modules/storcli/init.go | 2 + .../modules/storcli/integrations/storecli_raid.md | 10 +- src/go/plugin/go.d/modules/storcli/metadata.yaml | 8 +- src/go/plugin/go.d/modules/storcli/storcli.go | 15 +- src/go/plugin/go.d/modules/storcli/storcli_test.go | 25 +- src/go/plugin/go.d/modules/supervisord/client.go | 4 +- .../go.d/modules/supervisord/config_schema.json | 1 - src/go/plugin/go.d/modules/supervisord/init.go | 2 +- .../modules/supervisord/integrations/supervisor.md | 4 +- .../plugin/go.d/modules/supervisord/supervisord.go | 19 +- .../go.d/modules/supervisord/supervisord_test.go | 31 +- src/go/plugin/go.d/modules/systemdunits/charts.go | 1 - src/go/plugin/go.d/modules/systemdunits/client.go | 1 - src/go/plugin/go.d/modules/systemdunits/collect.go | 1 - .../modules/systemdunits/collect_unit_files.go | 1 - .../go.d/modules/systemdunits/collect_units.go | 1 - .../go.d/modules/systemdunits/config_schema.json | 1 - src/go/plugin/go.d/modules/systemdunits/init.go | 3 +- .../systemdunits/integrations/systemd_units.md | 4 +- .../go.d/modules/systemdunits/systemdunits.go | 31 +- .../go.d/modules/systemdunits/systemdunits_test.go | 19 +- src/go/plugin/go.d/modules/tengine/apiclient.go | 247 - src/go/plugin/go.d/modules/tengine/collect.go | 20 +- .../plugin/go.d/modules/tengine/config_schema.json | 1 - .../go.d/modules/tengine/integrations/tengine.md | 4 +- src/go/plugin/go.d/modules/tengine/metrics.go | 75 - src/go/plugin/go.d/modules/tengine/status.go | 269 + src/go/plugin/go.d/modules/tengine/tengine.go | 40 +- src/go/plugin/go.d/modules/tengine/tengine_test.go | 1 - src/go/plugin/go.d/modules/testrandom/charts.go | 60 + src/go/plugin/go.d/modules/testrandom/collect.go | 47 + .../go.d/modules/testrandom/config_schema.json | 176 + src/go/plugin/go.d/modules/testrandom/init.go | 64 + .../go.d/modules/testrandom/testdata/config.json | 17 + .../go.d/modules/testrandom/testdata/config.yaml | 13 + .../plugin/go.d/modules/testrandom/testrandom.go | 109 + .../go.d/modules/testrandom/testrandom_test.go | 306 + src/go/plugin/go.d/modules/tomcat/charts.go | 2 +- src/go/plugin/go.d/modules/tomcat/collect.go | 34 +- .../plugin/go.d/modules/tomcat/config_schema.json | 1 - src/go/plugin/go.d/modules/tomcat/init.go | 2 +- .../go.d/modules/tomcat/integrations/tomcat.md | 4 +- src/go/plugin/go.d/modules/tomcat/tomcat.go | 21 +- src/go/plugin/go.d/modules/tomcat/tomcat_test.go | 4 +- src/go/plugin/go.d/modules/tor/client.go | 6 +- src/go/plugin/go.d/modules/tor/config_schema.json | 1 - src/go/plugin/go.d/modules/tor/integrations/tor.md | 4 +- src/go/plugin/go.d/modules/tor/tor.go | 16 +- .../plugin/go.d/modules/traefik/config_schema.json | 1 - src/go/plugin/go.d/modules/traefik/init.go | 4 +- .../go.d/modules/traefik/integrations/traefik.md | 4 +- src/go/plugin/go.d/modules/traefik/traefik.go | 21 +- src/go/plugin/go.d/modules/traefik/traefik_test.go | 32 +- src/go/plugin/go.d/modules/typesense/README.md | 1 + src/go/plugin/go.d/modules/typesense/charts.go | 105 + src/go/plugin/go.d/modules/typesense/collect.go | 129 + .../go.d/modules/typesense/config_schema.json | 192 + src/go/plugin/go.d/modules/typesense/init.go | 3 + .../modules/typesense/integrations/typesense.md | 245 + src/go/plugin/go.d/modules/typesense/metadata.yaml | 222 + .../go.d/modules/typesense/testdata/config.json | 21 + .../go.d/modules/typesense/testdata/config.yaml | 18 + .../typesense/testdata/v27.0/health_nok.json | 4 + .../typesense/testdata/v27.0/health_ok.json | 3 + .../modules/typesense/testdata/v27.0/stats.json | 15 + src/go/plugin/go.d/modules/typesense/typesense.go | 121 + .../go.d/modules/typesense/typesense_test.go | 318 + .../plugin/go.d/modules/unbound/config_schema.json | 1 - src/go/plugin/go.d/modules/unbound/init.go | 8 +- .../go.d/modules/unbound/integrations/unbound.md | 4 +- src/go/plugin/go.d/modules/unbound/unbound.go | 21 +- src/go/plugin/go.d/modules/unbound/unbound_test.go | 22 +- src/go/plugin/go.d/modules/upsd/client.go | 6 +- src/go/plugin/go.d/modules/upsd/config_schema.json | 1 - .../go.d/modules/upsd/integrations/ups_nut.md | 4 +- src/go/plugin/go.d/modules/upsd/upsd.go | 18 +- src/go/plugin/go.d/modules/uwsgi/client.go | 6 +- .../plugin/go.d/modules/uwsgi/config_schema.json | 1 - .../go.d/modules/uwsgi/integrations/uwsgi.md | 4 +- src/go/plugin/go.d/modules/uwsgi/uwsgi.go | 14 +- src/go/plugin/go.d/modules/varnish/README.md | 1 + src/go/plugin/go.d/modules/varnish/charts.go | 387 + src/go/plugin/go.d/modules/varnish/collect.go | 182 + .../plugin/go.d/modules/varnish/config_schema.json | 51 + src/go/plugin/go.d/modules/varnish/exec.go | 81 + src/go/plugin/go.d/modules/varnish/init.go | 28 + .../go.d/modules/varnish/integrations/varnish.md | 231 + src/go/plugin/go.d/modules/varnish/metadata.yaml | 224 + .../go.d/modules/varnish/testdata/config.json | 6 + .../go.d/modules/varnish/testdata/config.yaml | 4 + .../modules/varnish/testdata/v7.1/varnishstat.txt | 370 + src/go/plugin/go.d/modules/varnish/varnish.go | 105 + src/go/plugin/go.d/modules/varnish/varnish_test.go | 255 + src/go/plugin/go.d/modules/vcsa/client/client.go | 28 +- src/go/plugin/go.d/modules/vcsa/config_schema.json | 1 - src/go/plugin/go.d/modules/vcsa/init.go | 2 +- .../vcsa/integrations/vcenter_server_appliance.md | 4 +- src/go/plugin/go.d/modules/vcsa/vcsa.go | 20 +- src/go/plugin/go.d/modules/vcsa/vcsa_test.go | 2 +- src/go/plugin/go.d/modules/vernemq/charts.go | 1883 +- src/go/plugin/go.d/modules/vernemq/collect.go | 350 +- .../plugin/go.d/modules/vernemq/config_schema.json | 1 - src/go/plugin/go.d/modules/vernemq/init.go | 6 +- .../go.d/modules/vernemq/integrations/vernemq.md | 207 +- src/go/plugin/go.d/modules/vernemq/metadata.yaml | 504 +- src/go/plugin/go.d/modules/vernemq/metrics.go | 271 +- .../vernemq/testdata/metrics-v1.10.1-mqtt5.txt | 416 - .../go.d/modules/vernemq/testdata/non_vernemq.txt | 27 - .../modules/vernemq/testdata/v1.10.1/metrics.txt | 416 + .../modules/vernemq/testdata/v2.0.1/metrics.txt | 588 + src/go/plugin/go.d/modules/vernemq/vernemq.go | 51 +- src/go/plugin/go.d/modules/vernemq/vernemq_test.go | 1147 +- .../plugin/go.d/modules/vsphere/config_schema.json | 1 - .../go.d/modules/vsphere/discover/discover.go | 2 +- src/go/plugin/go.d/modules/vsphere/init.go | 2 +- .../vsphere/integrations/vmware_vcenter_server.md | 8 +- src/go/plugin/go.d/modules/vsphere/match/match.go | 2 +- .../go.d/modules/vsphere/match/match_test.go | 2 +- .../plugin/go.d/modules/vsphere/scrape/scrape.go | 2 +- .../vsphere/scrape/throttled_caller_test.go | 8 +- src/go/plugin/go.d/modules/vsphere/vsphere.go | 26 +- src/go/plugin/go.d/modules/vsphere/vsphere_test.go | 25 +- src/go/plugin/go.d/modules/w1sensor/README.md | 1 + src/go/plugin/go.d/modules/w1sensor/charts.go | 59 + src/go/plugin/go.d/modules/w1sensor/collect.go | 112 + .../go.d/modules/w1sensor/config_schema.json | 31 + src/go/plugin/go.d/modules/w1sensor/doc.go | 3 + .../w1sensor/integrations/1-wire_sensors.md | 190 + src/go/plugin/go.d/modules/w1sensor/metadata.yaml | 95 + .../go.d/modules/w1sensor/testdata/config.json | 4 + .../go.d/modules/w1sensor/testdata/config.yaml | 2 + .../testdata/devices/28-01204e9d2fa0/w1_slave | 2 + .../testdata/devices/28-01204e9d2fa1/w1_slave | 2 + .../testdata/devices/28-01204e9d2fa2/w1_slave | 2 + .../testdata/devices/28-01204e9d2fa3/w1_slave | 2 + src/go/plugin/go.d/modules/w1sensor/w1sensor.go | 96 + .../plugin/go.d/modules/w1sensor/w1sensor_test.go | 175 + .../plugin/go.d/modules/weblog/config_schema.json | 26 +- src/go/plugin/go.d/modules/weblog/init.go | 2 +- .../weblog/integrations/web_server_log_files.md | 6 +- src/go/plugin/go.d/modules/weblog/metadata.yaml | 2 +- src/go/plugin/go.d/modules/weblog/weblog.go | 19 +- src/go/plugin/go.d/modules/weblog/weblog_test.go | 11 +- .../go.d/modules/whoisquery/config_schema.json | 1 - .../integrations/domain_expiration_date.md | 4 +- .../plugin/go.d/modules/whoisquery/whoisquery.go | 22 +- .../go.d/modules/whoisquery/whoisquery_test.go | 21 +- .../plugin/go.d/modules/windows/config_schema.json | 1 - src/go/plugin/go.d/modules/windows/init.go | 4 +- .../windows/integrations/active_directory.md | 18 +- .../go.d/modules/windows/integrations/hyperv.md | 18 +- .../modules/windows/integrations/ms_exchange.md | 18 +- .../modules/windows/integrations/ms_sql_server.md | 18 +- .../modules/windows/integrations/net_framework.md | 18 +- .../go.d/modules/windows/integrations/windows.md | 18 +- src/go/plugin/go.d/modules/windows/metadata.yaml | 14 +- src/go/plugin/go.d/modules/windows/windows.go | 21 +- src/go/plugin/go.d/modules/windows/windows_test.go | 19 +- .../go.d/modules/wireguard/config_schema.json | 1 - .../modules/wireguard/integrations/wireguard.md | 4 +- src/go/plugin/go.d/modules/wireguard/wireguard.go | 1 - .../go.d/modules/x509check/config_schema.json | 1 - .../x509check/integrations/x.509_certificate.md | 4 +- src/go/plugin/go.d/modules/x509check/x509check.go | 26 +- .../go.d/modules/x509check/x509check_test.go | 19 +- src/go/plugin/go.d/modules/zfspool/charts.go | 2 + src/go/plugin/go.d/modules/zfspool/collect.go | 2 + .../go.d/modules/zfspool/collect_zpool_list.go | 2 + .../modules/zfspool/collect_zpool_list_vdev.go | 2 + .../plugin/go.d/modules/zfspool/config_schema.json | 1 - src/go/plugin/go.d/modules/zfspool/doc.go | 3 + src/go/plugin/go.d/modules/zfspool/exec.go | 2 + src/go/plugin/go.d/modules/zfspool/init.go | 2 + .../go.d/modules/zfspool/integrations/zfs_pools.md | 9 +- src/go/plugin/go.d/modules/zfspool/metadata.yaml | 2 +- src/go/plugin/go.d/modules/zfspool/zfspool.go | 20 +- src/go/plugin/go.d/modules/zfspool/zfspool_test.go | 4 +- .../go.d/modules/zookeeper/config_schema.json | 1 - .../plugin/go.d/modules/zookeeper/fetcher_test.go | 1 + src/go/plugin/go.d/modules/zookeeper/init.go | 8 +- .../modules/zookeeper/integrations/zookeeper.md | 4 +- src/go/plugin/go.d/modules/zookeeper/zookeeper.go | 18 +- .../go.d/modules/zookeeper/zookeeper_test.go | 22 +- src/go/plugin/go.d/pkg/README.md | 10 +- src/go/plugin/go.d/pkg/confopt/duration.go | 72 + src/go/plugin/go.d/pkg/confopt/duration_test.go | 114 + src/go/plugin/go.d/pkg/dockerhost/dockerhost.go | 75 + src/go/plugin/go.d/pkg/iprange/README.md | 8 - src/go/plugin/go.d/pkg/k8sclient/k8sclient.go | 2 +- src/go/plugin/go.d/pkg/logs/csv.go | 2 +- src/go/plugin/go.d/pkg/matcher/README.md | 142 - src/go/plugin/go.d/pkg/matcher/cache.go | 56 - src/go/plugin/go.d/pkg/matcher/cache_test.go | 53 - src/go/plugin/go.d/pkg/matcher/doc.go | 40 - src/go/plugin/go.d/pkg/matcher/doc_test.go | 49 - src/go/plugin/go.d/pkg/matcher/expr.go | 62 - src/go/plugin/go.d/pkg/matcher/expr_test.go | 100 - src/go/plugin/go.d/pkg/matcher/glob.go | 265 - src/go/plugin/go.d/pkg/matcher/glob_test.go | 97 - src/go/plugin/go.d/pkg/matcher/logical.go | 101 - src/go/plugin/go.d/pkg/matcher/logical_test.go | 97 - src/go/plugin/go.d/pkg/matcher/matcher.go | 149 - src/go/plugin/go.d/pkg/matcher/matcher_test.go | 122 - src/go/plugin/go.d/pkg/matcher/regexp.go | 60 - src/go/plugin/go.d/pkg/matcher/regexp_test.go | 66 - src/go/plugin/go.d/pkg/matcher/simple_patterns.go | 65 - .../go.d/pkg/matcher/simple_patterns_test.go | 88 - src/go/plugin/go.d/pkg/matcher/string.go | 48 - src/go/plugin/go.d/pkg/matcher/string_test.go | 62 - src/go/plugin/go.d/pkg/metrics/unique_counter.go | 3 +- src/go/plugin/go.d/pkg/multipath/multipath.go | 90 - src/go/plugin/go.d/pkg/multipath/multipath_test.go | 60 - .../pkg/multipath/testdata/data1/test-empty.conf | 0 .../go.d/pkg/multipath/testdata/data1/test.conf | 1 - .../pkg/multipath/testdata/data2/test-empty.conf | 0 .../go.d/pkg/multipath/testdata/data2/test.conf | 1 - src/go/plugin/go.d/pkg/prometheus/client.go | 19 +- src/go/plugin/go.d/pkg/prometheus/client_test.go | 19 +- src/go/plugin/go.d/pkg/prometheus/metric_series.go | 8 +- .../plugin/go.d/pkg/prometheus/selector/README.md | 8 - .../plugin/go.d/pkg/prometheus/selector/parse.go | 2 +- .../go.d/pkg/prometheus/selector/parse_test.go | 2 +- .../go.d/pkg/prometheus/selector/selector.go | 2 +- src/go/plugin/go.d/pkg/socket/client.go | 94 +- src/go/plugin/go.d/pkg/socket/client_test.go | 37 +- src/go/plugin/go.d/pkg/socket/types.go | 41 - src/go/plugin/go.d/pkg/stm/stm.go | 2 +- src/go/plugin/go.d/pkg/stm/stm_test.go | 10 +- src/go/plugin/go.d/pkg/web/client.go | 106 +- src/go/plugin/go.d/pkg/web/client_config.go | 81 + src/go/plugin/go.d/pkg/web/client_config_test.go | 25 + src/go/plugin/go.d/pkg/web/client_test.go | 23 - src/go/plugin/go.d/pkg/web/config.go | 11 + src/go/plugin/go.d/pkg/web/doc.go | 2 +- src/go/plugin/go.d/pkg/web/doc_test.go | 12 +- src/go/plugin/go.d/pkg/web/duration.go | 72 - src/go/plugin/go.d/pkg/web/duration_test.go | 114 - src/go/plugin/go.d/pkg/web/request.go | 105 - src/go/plugin/go.d/pkg/web/request_config.go | 105 + src/go/plugin/go.d/pkg/web/request_config_test.go | 208 + src/go/plugin/go.d/pkg/web/request_test.go | 208 - src/go/plugin/go.d/pkg/web/web.go | 11 - src/health/README.md | 6 +- src/health/REFERENCE.md | 10 +- src/health/health.c | 37 +- src/health/health.d/anomalies.conf | 25 - src/health/health.d/apcupsd.conf | 48 +- src/health/health.d/boinc.conf | 6 +- src/health/health.d/ceph.conf | 18 +- src/health/health.d/disks.conf | 28 +- src/health/health.d/net.conf | 24 +- src/health/health.d/vernemq.conf | 188 +- src/health/health.h | 10 +- src/health/health_config.c | 25 +- src/health/health_dyncfg.c | 25 +- src/health/health_event_loop.c | 12 +- src/health/health_internals.h | 4 +- src/health/health_notifications.c | 35 +- src/health/notifications/README.md | 16 +- src/health/notifications/alarm-notify.sh.in | 62 +- src/health/notifications/alerta/README.md | 6 +- src/health/notifications/alerta/metadata.yaml | 2 +- src/health/notifications/awssns/README.md | 8 +- src/health/notifications/awssns/metadata.yaml | 4 +- src/health/notifications/custom/README.md | 4 +- src/health/notifications/discord/README.md | 6 +- src/health/notifications/discord/metadata.yaml | 2 +- src/health/notifications/dynatrace/README.md | 4 +- src/health/notifications/email/README.md | 6 +- src/health/notifications/email/metadata.yaml | 2 +- src/health/notifications/flock/README.md | 6 +- src/health/notifications/flock/metadata.yaml | 2 +- src/health/notifications/gotify/README.md | 4 +- src/health/notifications/health_alarm_notify.conf | 21 + src/health/notifications/ilert/README.md | 96 + src/health/notifications/ilert/metadata.yaml | 55 + src/health/notifications/irc/README.md | 6 +- src/health/notifications/irc/metadata.yaml | 2 +- src/health/notifications/kavenegar/README.md | 6 +- src/health/notifications/kavenegar/metadata.yaml | 2 +- src/health/notifications/matrix/README.md | 6 +- src/health/notifications/matrix/metadata.yaml | 2 +- src/health/notifications/messagebird/README.md | 6 +- src/health/notifications/messagebird/metadata.yaml | 2 +- src/health/notifications/msteams/README.md | 6 +- src/health/notifications/msteams/metadata.yaml | 2 +- src/health/notifications/ntfy/README.md | 6 +- src/health/notifications/ntfy/metadata.yaml | 2 +- src/health/notifications/opsgenie/README.md | 4 +- src/health/notifications/pagerduty/README.md | 6 +- src/health/notifications/pagerduty/metadata.yaml | 2 +- src/health/notifications/prowl/README.md | 6 +- src/health/notifications/prowl/metadata.yaml | 2 +- src/health/notifications/pushbullet/README.md | 6 +- src/health/notifications/pushbullet/metadata.yaml | 2 +- src/health/notifications/pushover/README.md | 6 +- src/health/notifications/pushover/metadata.yaml | 2 +- src/health/notifications/rocketchat/README.md | 6 +- src/health/notifications/rocketchat/metadata.yaml | 2 +- src/health/notifications/slack/README.md | 4 +- src/health/notifications/smstools3/README.md | 6 +- src/health/notifications/smstools3/metadata.yaml | 2 +- src/health/notifications/syslog/README.md | 6 +- src/health/notifications/syslog/metadata.yaml | 2 +- src/health/notifications/telegram/README.md | 6 +- src/health/notifications/telegram/metadata.yaml | 2 +- src/health/notifications/twilio/README.md | 6 +- src/health/notifications/twilio/metadata.yaml | 2 +- src/health/notifications/web/README.md | 10 - src/health/rrdcalc.c | 2 +- src/health/rrdvar.c | 14 - src/health/rrdvar.h | 2 - .../schema.d/health%3Aalert%3Aprototype.json | 2 +- src/libnetdata/README.md | 11 - src/libnetdata/adaptive_resortable_list/README.md | 9 - src/libnetdata/aral/README.md | 9 - src/libnetdata/aral/aral.c | 9 +- src/libnetdata/aral/aral.h | 4 +- src/libnetdata/avl/README.md | 9 - src/libnetdata/bitmap/bitmap64.h | 35 + src/libnetdata/bitmap64.h | 35 - src/libnetdata/buffer/README.md | 9 - src/libnetdata/buffer/buffer.c | 4 +- src/libnetdata/buffer/buffer.h | 138 +- src/libnetdata/buffered_reader/buffered_reader.h | 2 - src/libnetdata/c_rhash/c_rhash.c | 219 + src/libnetdata/c_rhash/c_rhash.h | 63 + src/libnetdata/c_rhash/c_rhash_internal.h | 19 + src/libnetdata/c_rhash/tests.c | 273 + src/libnetdata/circular_buffer/README.md | 9 - src/libnetdata/clocks/clocks.c | 123 +- src/libnetdata/clocks/clocks.h | 18 +- src/libnetdata/common.h | 419 + src/libnetdata/config/README.md | 9 - src/libnetdata/config/appconfig.c | 973 +- src/libnetdata/config/appconfig.h | 128 +- src/libnetdata/config/appconfig_api_boolean.c | 68 + src/libnetdata/config/appconfig_api_boolean.h | 24 + src/libnetdata/config/appconfig_api_durations.c | 134 + src/libnetdata/config/appconfig_api_durations.h | 21 + src/libnetdata/config/appconfig_api_numbers.c | 43 + src/libnetdata/config/appconfig_api_numbers.h | 16 + src/libnetdata/config/appconfig_api_sizes.c | 86 + src/libnetdata/config/appconfig_api_sizes.h | 16 + src/libnetdata/config/appconfig_api_text.c | 17 + src/libnetdata/config/appconfig_api_text.h | 12 + src/libnetdata/config/appconfig_cleanup.c | 60 + src/libnetdata/config/appconfig_conf_file.c | 320 + src/libnetdata/config/appconfig_exporters.c | 95 + src/libnetdata/config/appconfig_internals.h | 119 + src/libnetdata/config/appconfig_migrate.c | 95 + src/libnetdata/config/appconfig_options.c | 183 + src/libnetdata/config/appconfig_sections.c | 82 + src/libnetdata/config/appconfig_traversal.c | 21 + src/libnetdata/config/dyncfg.c | 8 +- src/libnetdata/datetime/README.md | 8 - src/libnetdata/dictionary/dictionary-hashtable.h | 242 +- src/libnetdata/dictionary/dictionary.c | 11 +- src/libnetdata/dictionary/dictionary.h | 6 +- src/libnetdata/ebpf/README.md | 9 - src/libnetdata/ebpf/ebpf.c | 18 +- src/libnetdata/ebpf/ebpf.h | 15 +- src/libnetdata/eval/eval.c | 114 +- src/libnetdata/facets/facets.c | 277 +- src/libnetdata/facets/facets.h | 35 +- src/libnetdata/facets/logs_query_status.h | 868 + src/libnetdata/functions_evloop/functions_evloop.c | 12 +- src/libnetdata/functions_evloop/functions_evloop.h | 16 +- src/libnetdata/http/content_type.c | 20 +- src/libnetdata/http/http_access.c | 32 +- src/libnetdata/http/http_access.h | 52 +- src/libnetdata/inlined.h | 48 +- src/libnetdata/json/README.md | 9 - src/libnetdata/json/json-c-parser-inline.c | 52 + src/libnetdata/json/json-c-parser-inline.h | 69 +- src/libnetdata/libjudy/judy-malloc.c | 78 + src/libnetdata/libjudy/judy-malloc.h | 11 + src/libnetdata/libnetdata.c | 1066 +- src/libnetdata/libnetdata.h | 423 +- src/libnetdata/line_splitter/README.md | 9 - src/libnetdata/line_splitter/line_splitter.c | 37 +- src/libnetdata/line_splitter/line_splitter.h | 16 +- src/libnetdata/linked-lists.h | 133 - src/libnetdata/linked_lists/linked_lists.h | 133 + src/libnetdata/local-sockets/local-sockets.h | 1821 + src/libnetdata/locks/README.md | 23 +- src/libnetdata/log/README.md | 261 +- src/libnetdata/log/journal.c | 142 - src/libnetdata/log/journal.h | 18 - src/libnetdata/log/log.c | 2545 - src/libnetdata/log/log.h | 313 - src/libnetdata/log/nd_log-annotators.c | 84 + src/libnetdata/log/nd_log-common.h | 147 + src/libnetdata/log/nd_log-config.c | 207 + src/libnetdata/log/nd_log-field-formatters.c | 127 + src/libnetdata/log/nd_log-format-json.c | 78 + src/libnetdata/log/nd_log-format-logfmt.c | 151 + src/libnetdata/log/nd_log-init.c | 313 + src/libnetdata/log/nd_log-internals.c | 823 + src/libnetdata/log/nd_log-internals.h | 249 + src/libnetdata/log/nd_log-to-file.c | 41 + src/libnetdata/log/nd_log-to-syslog.c | 20 + src/libnetdata/log/nd_log-to-systemd-journal.c | 296 + src/libnetdata/log/nd_log-to-windows-common.h | 188 + src/libnetdata/log/nd_log-to-windows-events.c | 554 + src/libnetdata/log/nd_log.c | 465 + src/libnetdata/log/nd_log.h | 179 + src/libnetdata/log/nd_log_limit.c | 100 + src/libnetdata/log/nd_log_limit.h | 26 + src/libnetdata/log/nd_wevents_manifest.xml | 295 + src/libnetdata/log/systemd-cat-native.c | 73 +- src/libnetdata/log/systemd-journal-helpers.c | 142 + src/libnetdata/log/systemd-journal-helpers.h | 18 + src/libnetdata/log/wevt_netdata_compile.bat | 121 + src/libnetdata/log/wevt_netdata_compile.sh | 48 + src/libnetdata/log/wevt_netdata_install.bat | 52 + src/libnetdata/log/wevt_netdata_mc_generate.c | 518 + src/libnetdata/maps/local-sockets.h | 1419 - src/libnetdata/maps/system-groups.h | 67 - src/libnetdata/maps/system-services.h | 92 - src/libnetdata/maps/system-users.h | 67 - src/libnetdata/onewayalloc/README.md | 9 - src/libnetdata/os/close_range.c | 33 +- src/libnetdata/os/close_range.h | 12 +- src/libnetdata/os/get_system_cpus.c | 13 +- src/libnetdata/os/gettid.c | 11 +- src/libnetdata/os/gettid.h | 1 + src/libnetdata/os/os-windows-wrappers.c | 40 +- src/libnetdata/os/os-windows-wrappers.h | 2 + src/libnetdata/os/os.c | 3 +- src/libnetdata/os/os.h | 13 +- src/libnetdata/os/random.c | 200 + src/libnetdata/os/random.h | 19 + src/libnetdata/os/setenv.c | 25 +- src/libnetdata/os/setenv.h | 2 + src/libnetdata/os/sleep.c | 36 + src/libnetdata/os/sleep.h | 9 + .../os/system-maps/cache-host-users-and-groups.c | 101 + .../os/system-maps/cache-host-users-and-groups.h | 9 + .../os/system-maps/cached-gid-groupname.c | 149 + .../os/system-maps/cached-gid-groupname.h | 24 + .../os/system-maps/cached-sid-username.c | 200 + .../os/system-maps/cached-sid-username.h | 17 + .../os/system-maps/cached-uid-username.c | 149 + .../os/system-maps/cached-uid-username.h | 24 + src/libnetdata/os/system-maps/system-services.h | 96 + src/libnetdata/os/timestamps.c | 4 + src/libnetdata/os/timestamps.h | 42 + src/libnetdata/os/tinysleep.c | 23 - src/libnetdata/os/tinysleep.h | 8 - src/libnetdata/os/uuid_generate.c | 2 - src/libnetdata/os/windows-perflib/perflib-dump.c | 531 + src/libnetdata/os/windows-perflib/perflib-names.c | 245 + src/libnetdata/os/windows-perflib/perflib.c | 687 + src/libnetdata/os/windows-perflib/perflib.h | 92 + .../os/windows-wmi/windows-wmi-GetDiskDriveInfo.c | 144 + .../os/windows-wmi/windows-wmi-GetDiskDriveInfo.h | 30 + src/libnetdata/os/windows-wmi/windows-wmi.c | 110 + src/libnetdata/os/windows-wmi/windows-wmi.h | 23 + src/libnetdata/parsers/duration.c | 252 + src/libnetdata/parsers/duration.h | 32 + src/libnetdata/parsers/duration.html | 205 + src/libnetdata/parsers/durations.md | 94 + src/libnetdata/parsers/entries.c | 183 + src/libnetdata/parsers/entries.h | 19 + src/libnetdata/parsers/parsers.h | 12 + src/libnetdata/parsers/size.c | 212 + src/libnetdata/parsers/size.h | 20 + src/libnetdata/parsers/sizes.md | 52 + src/libnetdata/parsers/timeframe.c | 128 + src/libnetdata/parsers/timeframe.h | 28 + src/libnetdata/paths/paths.c | 327 + src/libnetdata/paths/paths.h | 26 + src/libnetdata/procfile/README.md | 9 - src/libnetdata/procfile/procfile.c | 67 +- src/libnetdata/procfile/procfile.h | 27 +- src/libnetdata/query_progress/progress.c | 11 +- src/libnetdata/required_dummies.h | 7 +- src/libnetdata/ringbuffer/ringbuffer.c | 195 + src/libnetdata/ringbuffer/ringbuffer.h | 46 + src/libnetdata/ringbuffer/ringbuffer_internal.h | 26 + src/libnetdata/sanitizers/chart_id_and_name.c | 145 + src/libnetdata/sanitizers/chart_id_and_name.h | 22 + src/libnetdata/sanitizers/sanitizers-functions.c | 68 + src/libnetdata/sanitizers/sanitizers-functions.h | 10 + src/libnetdata/sanitizers/sanitizers-labels.c | 157 + src/libnetdata/sanitizers/sanitizers-labels.h | 13 + src/libnetdata/sanitizers/sanitizers-pluginsd.c | 79 + src/libnetdata/sanitizers/sanitizers-pluginsd.h | 10 + src/libnetdata/sanitizers/sanitizers.h | 12 + src/libnetdata/sanitizers/utf8-sanitizer.c | 116 + src/libnetdata/sanitizers/utf8-sanitizer.h | 10 + src/libnetdata/simple_hashtable.h | 544 - src/libnetdata/simple_hashtable/simple_hashtable.h | 544 + .../simple_hashtable/simple_hashtable_undef.h | 35 + src/libnetdata/simple_hashtable_undef.h | 35 - src/libnetdata/simple_pattern/README.md | 10 - src/libnetdata/simple_pattern/simple_pattern.c | 23 +- src/libnetdata/simple_pattern/simple_pattern.h | 2 + src/libnetdata/socket/security.c | 5 +- src/libnetdata/socket/security.h | 26 +- src/libnetdata/socket/socket.c | 104 +- src/libnetdata/socket/socket.h | 10 +- src/libnetdata/spawn_server/log-forwarder.c | 322 + src/libnetdata/spawn_server/log-forwarder.h | 17 + src/libnetdata/spawn_server/spawn-tester.c | 493 + src/libnetdata/spawn_server/spawn_library.c | 51 + src/libnetdata/spawn_server/spawn_library.h | 10 + src/libnetdata/spawn_server/spawn_popen.c | 115 +- src/libnetdata/spawn_server/spawn_popen.h | 12 +- src/libnetdata/spawn_server/spawn_server.c | 1533 - src/libnetdata/spawn_server/spawn_server.h | 15 +- .../spawn_server/spawn_server_internals.h | 90 + src/libnetdata/spawn_server/spawn_server_libuv.c | 395 + src/libnetdata/spawn_server/spawn_server_nofork.c | 1308 + src/libnetdata/spawn_server/spawn_server_posix.c | 299 + src/libnetdata/spawn_server/spawn_server_windows.c | 456 + src/libnetdata/statistical/README.md | 9 - src/libnetdata/storage_number/README.md | 9 - src/libnetdata/string/README.md | 9 - src/libnetdata/string/string.c | 22 +- src/libnetdata/string/string.h | 17 +- src/libnetdata/string/utf8.c | 408 + src/libnetdata/string/utf8.h | 78 +- src/libnetdata/template-enum.h | 43 + src/libnetdata/threads/README.md | 9 - src/libnetdata/url/README.md | 9 - src/libnetdata/uuid/README.md | 8 - src/libnetdata/uuid/uuid.h | 2 + src/libnetdata/worker_utilization/README.md | 9 - src/libnetdata/xxHash/xxhash.h | 6773 + src/libnetdata/xxhash.h | 6773 - src/ml/Config.cc | 27 +- src/ml/ml-configuration.md | 26 +- src/ml/ml-private.h | 2 +- src/ml/ml.cc | 4 +- src/plugins.d/README.md | 868 + src/plugins.d/functions-table.md | 418 + src/plugins.d/gperf-config.txt | 118 + src/plugins.d/gperf-hashtable.h | 241 + src/plugins.d/plugins_d.c | 375 + src/plugins.d/plugins_d.h | 55 + src/plugins.d/pluginsd_dyncfg.c | 69 + src/plugins.d/pluginsd_dyncfg.h | 11 + src/plugins.d/pluginsd_functions.c | 419 + src/plugins.d/pluginsd_functions.h | 48 + src/plugins.d/pluginsd_internals.c | 99 + src/plugins.d/pluginsd_internals.h | 355 + src/plugins.d/pluginsd_parser.c | 1372 + src/plugins.d/pluginsd_parser.h | 245 + src/plugins.d/pluginsd_replication.c | 371 + src/plugins.d/pluginsd_replication.h | 14 + src/registry/README.md | 82 +- src/registry/registry.c | 32 +- src/registry/registry.h | 2 +- src/registry/registry_init.c | 6 +- src/registry/registry_internals.c | 4 +- src/registry/registry_internals.h | 16 +- src/streaming/README.md | 138 +- src/streaming/common.h | 9 - src/streaming/compression.c | 707 - src/streaming/compression.h | 175 - src/streaming/compression_brotli.c | 142 - src/streaming/compression_brotli.h | 15 - src/streaming/compression_gzip.c | 164 - src/streaming/compression_gzip.h | 15 - src/streaming/compression_lz4.c | 143 - src/streaming/compression_lz4.h | 19 - src/streaming/compression_zstd.c | 163 - src/streaming/compression_zstd.h | 19 - src/streaming/h2o-common.h | 9 + src/streaming/protocol/command-begin-set-end.c | 126 + src/streaming/protocol/command-chart-definition.c | 206 + src/streaming/protocol/command-claimed_id.c | 78 + src/streaming/protocol/command-function.c | 20 + src/streaming/protocol/command-host-labels.c | 25 + src/streaming/protocol/command-host-variables.c | 52 + src/streaming/protocol/command-nodeid.c | 128 + src/streaming/protocol/commands.c | 58 + src/streaming/protocol/commands.h | 41 + src/streaming/receiver.c | 673 +- src/streaming/receiver.h | 93 + src/streaming/replication.c | 9 +- src/streaming/replication.h | 6 +- src/streaming/rrdhost-status.c | 355 + src/streaming/rrdhost-status.h | 161 + src/streaming/rrdpush.c | 1418 - src/streaming/rrdpush.h | 761 +- src/streaming/sender-commit.c | 168 + src/streaming/sender-connect.c | 741 + src/streaming/sender-destinations.c | 143 + src/streaming/sender-destinations.h | 38 + src/streaming/sender-execute.c | 294 + src/streaming/sender-internals.h | 48 + src/streaming/sender.c | 1412 +- src/streaming/sender.h | 169 + src/streaming/stream-capabilities.c | 169 + src/streaming/stream-capabilities.h | 100 + src/streaming/stream-compression/brotli.c | 142 + src/streaming/stream-compression/brotli.h | 15 + src/streaming/stream-compression/compression.c | 703 + src/streaming/stream-compression/compression.h | 183 + src/streaming/stream-compression/gzip.c | 164 + src/streaming/stream-compression/gzip.h | 15 + src/streaming/stream-compression/lz4.c | 143 + src/streaming/stream-compression/lz4.h | 19 + src/streaming/stream-compression/zstd.c | 163 + src/streaming/stream-compression/zstd.h | 19 + src/streaming/stream-conf.c | 137 + src/streaming/stream-conf.h | 28 + src/streaming/stream-handshake.c | 53 + src/streaming/stream-handshake.h | 82 + src/streaming/stream-path.c | 353 + src/streaming/stream-path.h | 54 + src/streaming/stream.conf | 73 +- src/web/api/badges/README.md | 369 - src/web/api/badges/web_buffer_svg.c | 1159 - src/web/api/badges/web_buffer_svg.h | 18 - src/web/api/exporters/README.md | 9 - src/web/api/exporters/allmetrics.c | 132 - src/web/api/exporters/allmetrics.h | 12 - src/web/api/exporters/prometheus/README.md | 9 - src/web/api/exporters/shell/README.md | 15 +- src/web/api/exporters/shell/allmetrics_shell.c | 170 - src/web/api/exporters/shell/allmetrics_shell.h | 21 - src/web/api/formatters/README.md | 9 - src/web/api/formatters/charts2json.c | 2 +- src/web/api/formatters/csv/README.md | 9 - src/web/api/formatters/json/README.md | 9 - src/web/api/formatters/rrd2json.c | 40 - src/web/api/formatters/rrd2json.h | 35 +- src/web/api/formatters/ssv/README.md | 9 - src/web/api/formatters/value/README.md | 9 - src/web/api/functions/function-bearer_get_token.c | 81 + src/web/api/functions/function-bearer_get_token.h | 14 + src/web/api/functions/function-progress.c | 8 + src/web/api/functions/function-progress.h | 10 + src/web/api/functions/function-streaming.c | 627 + src/web/api/functions/function-streaming.h | 12 + src/web/api/functions/functions.c | 43 + src/web/api/functions/functions.h | 14 + src/web/api/health/README.md | 12 +- src/web/api/http_auth.c | 338 +- src/web/api/http_auth.h | 2 +- src/web/api/ilove/README.md | 0 src/web/api/ilove/ilove.c | 306 - src/web/api/ilove/ilove.h | 13 - src/web/api/ilove/measure-text.js | 73 - src/web/api/maps/contexts_alert_statuses.c | 60 + src/web/api/maps/contexts_alert_statuses.h | 26 + src/web/api/maps/contexts_options.c | 58 + src/web/api/maps/contexts_options.h | 22 + src/web/api/maps/datasource_formats.c | 89 + src/web/api/maps/datasource_formats.h | 32 + src/web/api/maps/maps.h | 12 + src/web/api/maps/rrdr_options.c | 139 + src/web/api/maps/rrdr_options.h | 52 + src/web/api/queries/average/README.md | 9 - src/web/api/queries/countif/README.md | 9 - src/web/api/queries/des/README.md | 9 - src/web/api/queries/incremental_sum/README.md | 9 - src/web/api/queries/max/README.md | 9 - src/web/api/queries/median/README.md | 10 - src/web/api/queries/min/README.md | 9 - src/web/api/queries/percentile/README.md | 10 - src/web/api/queries/rrdr.h | 56 - src/web/api/queries/ses/README.md | 9 - src/web/api/queries/stddev/README.md | 9 - src/web/api/queries/sum/README.md | 9 - src/web/api/queries/trimmed_mean/README.md | 10 - src/web/api/queries/weights.c | 16 +- src/web/api/queries/weights.h | 2 - src/web/api/v1/api_v1_aclk.c | 20 + src/web/api/v1/api_v1_alarms.c | 153 + src/web/api/v1/api_v1_allmetrics.c | 308 + src/web/api/v1/api_v1_badge/README.md | 360 + src/web/api/v1/api_v1_badge/web_buffer_svg.c | 1160 + src/web/api/v1/api_v1_calls.h | 47 + src/web/api/v1/api_v1_charts.c | 64 + src/web/api/v1/api_v1_config.c | 92 + src/web/api/v1/api_v1_context.c | 68 + src/web/api/v1/api_v1_contexts.c | 61 + src/web/api/v1/api_v1_data.c | 246 + src/web/api/v1/api_v1_dbengine.c | 97 + src/web/api/v1/api_v1_function.c | 44 + src/web/api/v1/api_v1_functions.c | 19 + src/web/api/v1/api_v1_info.c | 207 + src/web/api/v1/api_v1_manage.c | 86 + src/web/api/v1/api_v1_ml_info.c | 28 + src/web/api/v1/api_v1_registry.c | 198 + src/web/api/v1/api_v1_weights.c | 11 + src/web/api/v2/api_v2_alert_config.c | 32 + src/web/api/v2/api_v2_alert_transitions.c | 7 + src/web/api/v2/api_v2_alerts.c | 7 + src/web/api/v2/api_v2_bearer.c | 139 + src/web/api/v2/api_v2_calls.h | 38 + src/web/api/v2/api_v2_claim.c | 236 + src/web/api/v2/api_v2_contexts.c | 78 + src/web/api/v2/api_v2_data.c | 302 + src/web/api/v2/api_v2_functions.c | 8 + src/web/api/v2/api_v2_ilove/README.md | 0 src/web/api/v2/api_v2_ilove/ilove.c | 306 + src/web/api/v2/api_v2_ilove/measure-text.js | 73 + src/web/api/v2/api_v2_info.c | 7 + src/web/api/v2/api_v2_node_instances.c | 10 + src/web/api/v2/api_v2_nodes.c | 7 + src/web/api/v2/api_v2_progress.c | 28 + src/web/api/v2/api_v2_q.c | 9 + src/web/api/v2/api_v2_versions.c | 7 + src/web/api/v2/api_v2_webrtc.c | 8 + src/web/api/v2/api_v2_weights.c | 152 + src/web/api/v3/api_v3_calls.h | 11 + src/web/api/v3/api_v3_me.c | 37 + src/web/api/v3/api_v3_settings.c | 285 + src/web/api/web_api.c | 221 +- src/web/api/web_api.h | 22 +- src/web/api/web_api_v1.c | 1874 +- src/web/api/web_api_v1.h | 36 - src/web/api/web_api_v2.c | 648 +- src/web/api/web_api_v3.c | 263 + src/web/api/web_api_v3.h | 12 + src/web/gui/.dashboard-v2-notice.md | 8 - src/web/gui/README.md | 163 +- src/web/gui/bundle_dashboard_v1.py | 18 +- src/web/gui/bundle_dashboard_v2.py | 97 - src/web/gui/index.html | 245 - src/web/gui/registry-access.html | 73 - src/web/gui/registry-alert-redirect.html | 152 - src/web/gui/registry-hello.html | 94 - src/web/gui/static/splash.css | 171 - src/web/gui/v1/dashboard_v1.cmake | 18 +- src/web/gui/v2/.well-known/assetlinks.json | 11 - src/web/gui/v2/1220.01d6bbaab869c74f4437.chunk.js | 1 - src/web/gui/v2/1396.56f70d7c659ac0b694cd.chunk.js | 2 - .../1396.56f70d7c659ac0b694cd.chunk.js.LICENSE.txt | 16 - src/web/gui/v2/1418.16d53ba5cce2c6a8143a.chunk.js | 1 - src/web/gui/v2/1782.d82eb301aa81b380dd0c.chunk.js | 1 - src/web/gui/v2/1839.a4196d2a87ac0fdd9f34.chunk.js | 1 - src/web/gui/v2/185.42bab351ba68de7ca4aa.chunk.js | 1 - src/web/gui/v2/1876.e610906417b961290730.chunk.js | 1 - src/web/gui/v2/195.4cdbea6af54d14a95949.chunk.js | 1 - src/web/gui/v2/2007.b33ce2b4b736228fd681.chunk.js | 1 - src/web/gui/v2/252.40edc9b0f6da1422f40b.chunk.js | 1 - src/web/gui/v2/3104.3b70865e21a81a616af3.chunk.js | 1 - src/web/gui/v2/3350.ae7151980981854dc3d1.chunk.js | 1 - src/web/gui/v2/3455.f9ca876de57244386773.chunk.js | 2 - .../3455.f9ca876de57244386773.chunk.js.LICENSE.txt | 13 - src/web/gui/v2/3621.01ee70ee9c311ac163d9.chunk.js | 1 - src/web/gui/v2/3624.bfeb1fdc3057ba82ddac.chunk.js | 1 - src/web/gui/v2/3736.e572adfdf7951f74a741.chunk.js | 1 - src/web/gui/v2/3750.4ad02f036f2a7c520b1c.chunk.js | 1 - src/web/gui/v2/3843.89070793921be1288bb5.css | 2 - src/web/gui/v2/3843.ffbb6f614ba4f7b77570.chunk.js | 1 - src/web/gui/v2/3968.483ca2ad3b300293e655.chunk.js | 1 - src/web/gui/v2/3D_PARTY_LICENSES.txt | 7457 - src/web/gui/v2/4034.35199d2809d318eed690.chunk.js | 1 - src/web/gui/v2/4140.46221d08bcda08826c78.chunk.js | 1 - src/web/gui/v2/4140.89070793921be1288bb5.css | 2 - src/web/gui/v2/4414.590ba07d470ba2ce7dd0.chunk.js | 1 - src/web/gui/v2/4631.158982e127e11bdc6a45.chunk.js | 1 - src/web/gui/v2/4680.7d8122d91e9d4582836a.chunk.js | 1 - src/web/gui/v2/4958.5969fedc1ff7dc82775e.chunk.js | 1 - src/web/gui/v2/5246.07c5a1649f0805c140fe.chunk.js | 1 - src/web/gui/v2/5304.cc797fdd343c7e873b2f.chunk.js | 1 - src/web/gui/v2/5426.254557ad3e1f2d14ad29.chunk.js | 1 - src/web/gui/v2/5596.2036706750ff4028cff2.chunk.js | 1 - src/web/gui/v2/5598.07ff43a6b96bd41e8637.chunk.js | 1 - src/web/gui/v2/5700.b7c9908dc7f30a5a57e7.chunk.js | 1 - src/web/gui/v2/5709.c494eb62187917e2f2f6.chunk.js | 2 - .../5709.c494eb62187917e2f2f6.chunk.js.LICENSE.txt | 12 - src/web/gui/v2/5794.252ff787d58d64eb4988.chunk.js | 1 - src/web/gui/v2/6008.3d0636fe17f4f6274485.chunk.js | 1 - src/web/gui/v2/6121.f7286809e53e1c6d655a.chunk.js | 2 - .../6121.f7286809e53e1c6d655a.chunk.js.LICENSE.txt | 6 - src/web/gui/v2/6323.26d4d949c9b6f8674c2e.chunk.js | 1 - src/web/gui/v2/6331.89070793921be1288bb5.css | 2 - src/web/gui/v2/6331.c91b5d104cdff1be3b80.chunk.js | 1 - src/web/gui/v2/6384.0fad56b0bc902f186c98.chunk.js | 1 - src/web/gui/v2/6469.47926fa38028dc7d0d41.chunk.js | 1 - src/web/gui/v2/6469.89070793921be1288bb5.css | 2 - src/web/gui/v2/6661.72f782bd78fea8c2d836.chunk.js | 1 - src/web/gui/v2/6760.370b9780120c145da28f.chunk.js | 1 - src/web/gui/v2/683.02c173493ef257c210fa.chunk.js | 1 - src/web/gui/v2/683.cc9fa5f3bdc0bf3ab2fc.css | 10 - src/web/gui/v2/6944.ab3e70c9ac0f05013b5f.chunk.js | 1 - src/web/gui/v2/7144.382c341e09540fdebaa6.chunk.js | 2 - .../7144.382c341e09540fdebaa6.chunk.js.LICENSE.txt | 1 - src/web/gui/v2/7146.79304e386ac9238b7cf1.chunk.js | 1 - src/web/gui/v2/7170.5d6047bb6ce9d77d53db.chunk.js | 1 - src/web/gui/v2/7208.1d75cf5d007de32e403b.chunk.js | 1 - src/web/gui/v2/7304.ed4690ec296b59fbe7fd.chunk.js | 1 - src/web/gui/v2/7332.3acf93dcfa52c7f1bc18.chunk.js | 1 - src/web/gui/v2/7340.25dce1c5cc66b613700f.chunk.js | 1 - src/web/gui/v2/7436.1ebd371d70e6a87c5499.chunk.js | 1 - src/web/gui/v2/7471.f96c4d04a73fb7551c03.chunk.js | 1 - src/web/gui/v2/7487.89070793921be1288bb5.css | 2 - src/web/gui/v2/7487.db63c95c27d973a07d9b.chunk.js | 1 - src/web/gui/v2/749.e44087ac3a2e3a994318.chunk.js | 8 - .../749.e44087ac3a2e3a994318.chunk.js.LICENSE.txt | 11 - src/web/gui/v2/7519.7982a2e0fcdf82ba78dd.chunk.js | 1 - src/web/gui/v2/7529.658d363e12e73df83b60.chunk.js | 1 - src/web/gui/v2/7840.2f2023f2eb1dcc943d94.chunk.js | 1 - src/web/gui/v2/785.d016913841bcc0209d5b.chunk.js | 1 - src/web/gui/v2/7857.813ae058cca579e05462.chunk.js | 1 - src/web/gui/v2/7959.4f20f4b203e2bad8af39.chunk.js | 1 - src/web/gui/v2/8059.4fdc76bb2cac1f74b41b.chunk.js | 1 - src/web/gui/v2/8239.c85fc9f3599f198a9efb.chunk.js | 1 - src/web/gui/v2/8323.437406936b642e8f6cb3.chunk.js | 2 - .../8323.437406936b642e8f6cb3.chunk.js.LICENSE.txt | 5 - src/web/gui/v2/8323.e22de33686bb2f34063c.css | 2 - src/web/gui/v2/8505.c330f2104fefd71717da.chunk.js | 1 - src/web/gui/v2/86.2c88d4d37b88e2620051.chunk.js | 1 - src/web/gui/v2/8637.0958494526e838a60d2b.js | 2 - .../v2/8637.0958494526e838a60d2b.js.LICENSE.txt | 140 - src/web/gui/v2/8784.a04e9c07186e1f057f56.chunk.js | 1 - src/web/gui/v2/8842.406028f523a00acb97bd.chunk.js | 1 - src/web/gui/v2/8910.019974f8675d8834dd07.chunk.js | 1 - src/web/gui/v2/8938.5116982f737a2ef85330.chunk.js | 1 - src/web/gui/v2/9292.cc5055091db9a0826933.chunk.js | 1 - src/web/gui/v2/934.24d6fdc5f60aa6493962.chunk.js | 1 - src/web/gui/v2/9400.6250bbf86c4fd3173de2.chunk.js | 1 - src/web/gui/v2/9473.4fd4742ffb6b5348bea8.chunk.js | 1 - src/web/gui/v2/963.35da4a3c4e49aac29dae.chunk.js | 1 - src/web/gui/v2/979.3e5fddf93c977e6c71c3.chunk.js | 1 - src/web/gui/v2/9818.3ce64e0b472412bfbc97.chunk.js | 1 - src/web/gui/v2/9843.93f8c71c64ef97b9905e.chunk.js | 1 - src/web/gui/v2/9912.702300c2dd9616289606.chunk.js | 1 - src/web/gui/v2/LICENSE.md | 42 - src/web/gui/v2/README.md | 1 - src/web/gui/v2/agent.html | 245 - src/web/gui/v2/allFiles.6.138.3.json | 386 - src/web/gui/v2/allFiles.6.json | 386 - src/web/gui/v2/app.08c9fe3ead1d43ff769b.js | 1 - src/web/gui/v2/app.cb2e9f9a81cf9533384e.css | 2 - src/web/gui/v2/apple-app-site-association | 11 - src/web/gui/v2/bundlesManifest.6.json | 9 - src/web/gui/v2/dashboard_v2.cmake | 303 - src/web/gui/v2/favicon.ico | Bin 101252 -> 0 bytes src/web/gui/v2/index.html | 245 - src/web/gui/v2/local-agent.html | 245 - .../gui/v2/netdata.charts.fdfd27674ac5533bbcc2.js | 1 - src/web/gui/v2/netdata.ui.647a4c3303ee8ec0da64.js | 2 - .../netdata.ui.647a4c3303ee8ec0da64.js.LICENSE.txt | 1 - .../gui/v2/npm.react.dom.2994f1b4604bd8ce80f6.js | 2 - ...m.react.dom.2994f1b4604bd8ce80f6.js.LICENSE.txt | 9 - src/web/gui/v2/registry-access.html | 73 - src/web/gui/v2/registry-alert-redirect.html | 152 - src/web/gui/v2/registry-hello.html | 94 - src/web/gui/v2/runtime.ceccffb089cc539b1c1f.js | 1 - src/web/gui/v2/static/.well-known/assetlinks.json | 11 - src/web/gui/v2/static/apple-app-site-association | 11 - src/web/gui/v2/static/email/img/clea_badge.png | Bin 1147 -> 0 bytes src/web/gui/v2/static/email/img/clea_siren.png | Bin 2427 -> 0 bytes src/web/gui/v2/static/email/img/community_icon.png | Bin 1271 -> 0 bytes src/web/gui/v2/static/email/img/configure_icon.png | Bin 1256 -> 0 bytes src/web/gui/v2/static/email/img/crit_badge.png | Bin 2479 -> 0 bytes src/web/gui/v2/static/email/img/crit_siren.png | Bin 3432 -> 0 bytes src/web/gui/v2/static/email/img/flood_siren.png | Bin 2059 -> 0 bytes src/web/gui/v2/static/email/img/full_logo.png | Bin 1298 -> 0 bytes src/web/gui/v2/static/email/img/header.png | Bin 5386 -> 0 bytes src/web/gui/v2/static/email/img/isotype_600.png | Bin 9114 -> 0 bytes src/web/gui/v2/static/email/img/label_critical.png | Bin 1595 -> 0 bytes .../gui/v2/static/email/img/label_recovered.png | Bin 2027 -> 0 bytes src/web/gui/v2/static/email/img/label_warning.png | Bin 1752 -> 0 bytes .../gui/v2/static/email/img/reachability_siren.png | Bin 3951 -> 0 bytes src/web/gui/v2/static/email/img/warn_badge.png | Bin 2204 -> 0 bytes src/web/gui/v2/static/email/img/warn_siren.png | Bin 3005 -> 0 bytes src/web/gui/v2/static/img/list-style-image.svg | 9 - src/web/gui/v2/static/img/logos/os/alpine.svg | 1 - src/web/gui/v2/static/img/logos/os/arch.svg | 72 - src/web/gui/v2/static/img/logos/os/centos.svg | 13 - src/web/gui/v2/static/img/logos/os/coreos.svg | 1 - src/web/gui/v2/static/img/logos/os/debian.svg | 1 - src/web/gui/v2/static/img/logos/os/docker.svg | 3 - src/web/gui/v2/static/img/logos/os/fedora.svg | 13 - src/web/gui/v2/static/img/logos/os/freebsd.svg | 1 - src/web/gui/v2/static/img/logos/os/freenas.svg | 38 - src/web/gui/v2/static/img/logos/os/gentoo.svg | 419 - src/web/gui/v2/static/img/logos/os/kubernetes.svg | 84 - src/web/gui/v2/static/img/logos/os/linux-small.svg | 3 - src/web/gui/v2/static/img/logos/os/linux.svg | 1 - src/web/gui/v2/static/img/logos/os/macos.svg | 13 - src/web/gui/v2/static/img/logos/os/manjaro.svg | 91 - src/web/gui/v2/static/img/logos/os/openstack.svg | 1 - src/web/gui/v2/static/img/logos/os/opensuse.svg | 114 - src/web/gui/v2/static/img/logos/os/openwrt.svg | 8 - src/web/gui/v2/static/img/logos/os/oracle.svg | 28 - src/web/gui/v2/static/img/logos/os/pfsense.svg | 7 - src/web/gui/v2/static/img/logos/os/placeholder.svg | 3 - .../gui/v2/static/img/logos/os/raspberry-pi.svg | 1 - src/web/gui/v2/static/img/logos/os/redhat.svg | 1 - src/web/gui/v2/static/img/logos/os/rocky.svg | 3 - src/web/gui/v2/static/img/logos/os/suse.svg | 1 - src/web/gui/v2/static/img/logos/os/ubuntu.svg | 1 - .../v2/static/img/logos/services/access-point.svg | 7 - .../gui/v2/static/img/logos/services/activemq.svg | 253 - .../gui/v2/static/img/logos/services/adaptec.svg | 143 - .../gui/v2/static/img/logos/services/alerta.svg | 20 - .../gui/v2/static/img/logos/services/apache.svg | 138 - src/web/gui/v2/static/img/logos/services/apc.svg | 103 - .../gui/v2/static/img/logos/services/aws-sns.svg | 1 - src/web/gui/v2/static/img/logos/services/aws.svg | 3 - .../v2/static/img/logos/services/beanstalkd.svg | 112 - src/web/gui/v2/static/img/logos/services/boinc.svg | 134 - src/web/gui/v2/static/img/logos/services/btrfs.svg | 93 - src/web/gui/v2/static/img/logos/services/ceph.svg | 1 - .../gui/v2/static/img/logos/services/chrony.svg | 68 - src/web/gui/v2/static/img/logos/services/cloud.svg | 13 - .../gui/v2/static/img/logos/services/concul.svg | 8 - .../gui/v2/static/img/logos/services/consul.svg | 1 - .../gui/v2/static/img/logos/services/container.svg | 1 - .../gui/v2/static/img/logos/services/couchdb.svg | 62 - src/web/gui/v2/static/img/logos/services/cups.svg | 91 - .../static/img/logos/services/data-encryption.svg | 4 - src/web/gui/v2/static/img/logos/services/ddos.svg | 7 - .../gui/v2/static/img/logos/services/discord.svg | 1 - src/web/gui/v2/static/img/logos/services/dns.svg | 4 - .../gui/v2/static/img/logos/services/docker.svg | 100 - .../gui/v2/static/img/logos/services/dovecot.svg | 67 - .../v2/static/img/logos/services/elasticsearch.svg | 74 - src/web/gui/v2/static/img/logos/services/email.svg | 43 - src/web/gui/v2/static/img/logos/services/exim.svg | 1 - .../gui/v2/static/img/logos/services/fail2ban.svg | 1 - src/web/gui/v2/static/img/logos/services/flock.svg | 3 - .../gui/v2/static/img/logos/services/fluentd.svg | 91 - src/web/gui/v2/static/img/logos/services/fping.svg | 82 - .../v2/static/img/logos/services/freeradius.svg | 260 - .../gui/v2/static/img/logos/services/fronius.svg | 1 - .../v2/static/img/logos/services/gnu-freeipmi.svg | 1 - .../gui/v2/static/img/logos/services/golang.svg | 3 - .../gui/v2/static/img/logos/services/grafana.svg | 57 - .../gui/v2/static/img/logos/services/graphite.svg | 1 - .../gui/v2/static/img/logos/services/haproxy.svg | 1 - src/web/gui/v2/static/img/logos/services/hub.svg | 1 - .../gui/v2/static/img/logos/services/icecast.svg | 114 - .../gui/v2/static/img/logos/services/influxdb.svg | 3 - src/web/gui/v2/static/img/logos/services/ipfs.svg | 28 - src/web/gui/v2/static/img/logos/services/irc.svg | 99 - src/web/gui/v2/static/img/logos/services/isc.svg | 19 - src/web/gui/v2/static/img/logos/services/kafka.svg | 1 - .../gui/v2/static/img/logos/services/kairosdb.svg | 3 - .../gui/v2/static/img/logos/services/kavenegar.svg | 10 - .../gui/v2/static/img/logos/services/key-file.svg | 1 - .../v2/static/img/logos/services/kubernetes.svg | 84 - .../gui/v2/static/img/logos/services/libreswan.svg | 27 - .../gui/v2/static/img/logos/services/libvirt.svg | 713 - .../v2/static/img/logos/services/lighthttpd.svg | 1 - src/web/gui/v2/static/img/logos/services/linux.svg | 1 - .../gui/v2/static/img/logos/services/litespeed.svg | 91 - .../v2/static/img/logos/services/lm-sensors.svg | 1 - .../v2/static/img/logos/services/load-balancer.svg | 8 - .../gui/v2/static/img/logos/services/log-file.svg | 55 - .../gui/v2/static/img/logos/services/logstash.svg | 1 - src/web/gui/v2/static/img/logos/services/lxd.svg | 200 - .../gui/v2/static/img/logos/services/mariadb.svg | 49 - .../gui/v2/static/img/logos/services/memcached.svg | 31 - .../v2/static/img/logos/services/messagebird.svg | 1 - .../gui/v2/static/img/logos/services/mongodb.svg | 1 - src/web/gui/v2/static/img/logos/services/monit.svg | 12 - .../v2/static/img/logos/services/monitoring.svg | 55 - src/web/gui/v2/static/img/logos/services/mysql.svg | 17 - .../gui/v2/static/img/logos/services/netfilter.svg | 98 - .../static/img/logos/services/network-protocol.svg | 44 - .../gui/v2/static/img/logos/services/network.svg | 14 - src/web/gui/v2/static/img/logos/services/nfs.svg | 18 - .../v2/static/img/logos/services/nginx-plus.svg | 28 - src/web/gui/v2/static/img/logos/services/nginx.svg | 20 - .../img/logos/services/notification-bell.svg | 55 - src/web/gui/v2/static/img/logos/services/nsd.svg | 117 - src/web/gui/v2/static/img/logos/services/ntpd.svg | 56 - src/web/gui/v2/static/img/logos/services/nut.svg | 1 - .../gui/v2/static/img/logos/services/nvidia.svg | 1 - .../gui/v2/static/img/logos/services/openldap.svg | 1 - .../gui/v2/static/img/logos/services/opensips.svg | 1 - .../gui/v2/static/img/logos/services/opentsdb.svg | 27 - .../gui/v2/static/img/logos/services/openvpn.svg | 62 - .../gui/v2/static/img/logos/services/openzfs.svg | 72 - .../gui/v2/static/img/logos/services/oracle.svg | 28 - .../gui/v2/static/img/logos/services/pagerduty.svg | 12 - .../gui/v2/static/img/logos/services/php-fpm.svg | 90 - .../v2/static/img/logos/services/placeholder.svg | 8 - .../gui/v2/static/img/logos/services/postfix.svg | 293 - .../v2/static/img/logos/services/postgresql.svg | 22 - .../gui/v2/static/img/logos/services/powerdns.svg | 1 - .../gui/v2/static/img/logos/services/processor.svg | 1 - .../v2/static/img/logos/services/prometheus.svg | 50 - src/web/gui/v2/static/img/logos/services/prowl.svg | 35 - .../gui/v2/static/img/logos/services/proxysql.svg | 1 - .../gui/v2/static/img/logos/services/puppet.svg | 31 - .../v2/static/img/logos/services/pushbullet.svg | 1 - .../gui/v2/static/img/logos/services/pushover.svg | 7 - src/web/gui/v2/static/img/logos/services/qos.svg | 7 - .../gui/v2/static/img/logos/services/rabbitmq.svg | 75 - .../v2/static/img/logos/services/raspberry-pi.svg | 1 - src/web/gui/v2/static/img/logos/services/redis.svg | 1 - .../gui/v2/static/img/logos/services/rethinkdb.svg | 805 - .../v2/static/img/logos/services/retroshare.svg | 263 - .../v2/static/img/logos/services/rocketchat.svg | 7 - src/web/gui/v2/static/img/logos/services/samba.svg | 3 - .../img/logos/services/server-connection.svg | 68 - src/web/gui/v2/static/img/logos/services/slack.svg | 3 - src/web/gui/v2/static/img/logos/services/sma.svg | 27 - .../gui/v2/static/img/logos/services/smstools3.svg | 74 - src/web/gui/v2/static/img/logos/services/solr.svg | 1 - .../gui/v2/static/img/logos/services/spigot.svg | 1 - .../v2/static/img/logos/services/springboot.svg | 1 - src/web/gui/v2/static/img/logos/services/squid.svg | 398 - .../gui/v2/static/img/logos/services/statsd.svg | 29 - .../gui/v2/static/img/logos/services/stiebel.svg | 49 - .../gui/v2/static/img/logos/services/systemd.svg | 152 - .../gui/v2/static/img/logos/services/telegram.svg | 17 - .../v2/static/img/logos/services/temperature.svg | 4 - .../gui/v2/static/img/logos/services/tomcat.svg | 107 - src/web/gui/v2/static/img/logos/services/tor.svg | 140 - .../gui/v2/static/img/logos/services/traefik.svg | 90 - .../gui/v2/static/img/logos/services/twilio.svg | 1 - .../gui/v2/static/img/logos/services/unbound.svg | 105 - src/web/gui/v2/static/img/logos/services/uwsgi.svg | 109 - .../gui/v2/static/img/logos/services/varnish.svg | 1 - .../gui/v2/static/img/logos/services/veritas.svg | 197 - src/web/gui/v2/static/img/logos/services/xen.svg | 104 - src/web/gui/v2/static/img/mail/isotype.png | Bin 2611 -> 0 bytes src/web/gui/v2/static/img/mail/isotype.svg | 3 - src/web/gui/v2/static/img/mail/logotype.png | Bin 1301 -> 0 bytes src/web/gui/v2/static/img/mail/logotype.svg | 11 - src/web/gui/v2/static/img/new-dashboard.svg | 52 - src/web/gui/v2/static/img/no-filter-results.png | Bin 79381 -> 0 bytes src/web/gui/v2/static/img/no-nodes-room.svg | 226 - src/web/gui/v2/static/img/rack.png | Bin 9261 -> 0 bytes .../pages/holding-page-503/holding-page-503.css | 101 - .../pages/holding-page-503/holding-page-503.svg | 534 - .../static/site/pages/holding-page-503/index.html | 39 - .../holding-page-503/multiple-logos-group.svg | 32 - .../pages/holding-page-503/netdata-logo-white.svg | 3 - .../static/site/pages/holding-page-503/reset.svg | 3 - src/web/gui/v2/static/splash.css | 171 - src/web/gui/v2/sw.js | 1 - src/web/rtc/webrtc.c | 10 +- src/web/server/README.md | 203 +- src/web/server/h2o/http_server.c | 27 +- src/web/server/h2o/rrdpush.c | 384 + src/web/server/h2o/streaming.c | 384 - src/web/server/h2o/streaming.h | 2 - src/web/server/static/README.md | 10 - src/web/server/static/static-threaded.c | 9 - src/web/server/web_client.c | 166 +- src/web/server/web_client.h | 30 +- src/web/server/web_client_cache.c | 3 +- src/web/server/web_server.c | 4 - system/freebsd/rc.d/netdata.in | 5 + tests/health_mgmtapi/README.md | 9 - 3064 files changed, 391521 insertions(+), 139221 deletions(-) delete mode 100644 docs/developer-and-contributor-corner/kubernetes-k8s-netdata.md create mode 100644 docs/developer-and-contributor-corner/kubernetes-k8s-netdata.txt delete mode 100644 docs/developer-and-contributor-corner/lamp-stack.md create mode 100644 docs/developer-and-contributor-corner/lamp-stack.txt delete mode 100644 docs/developer-and-contributor-corner/monitor-cockroachdb.md create mode 100644 docs/developer-and-contributor-corner/monitor-cockroachdb.txt delete mode 100644 docs/developer-and-contributor-corner/pi-hole-raspberry-pi.md create mode 100644 docs/developer-and-contributor-corner/pi-hole-raspberry-pi.txt delete mode 100644 docs/developer-and-contributor-corner/process.md create mode 100644 docs/developer-and-contributor-corner/process.txt delete mode 100644 docs/developer-and-contributor-corner/python-collector.md create mode 100644 docs/developer-and-contributor-corner/python-collector.txt delete mode 100644 docs/developer-and-contributor-corner/raspberry-pi-anomaly-detection.md create mode 100644 docs/developer-and-contributor-corner/raspberry-pi-anomaly-detection.txt create mode 100644 integrations/cloud-notifications/integrations/ilert.md create mode 100644 integrations/logs/integrations/systemd_journal_logs.md create mode 100644 integrations/logs/integrations/windows_event_logs.md create mode 100644 integrations/logs/metadata.yaml create mode 100644 integrations/schemas/agent_notification.json create mode 100644 integrations/schemas/cloud_notification.json create mode 100644 integrations/schemas/logs.json delete mode 100644 integrations/schemas/notification.json create mode 100644 integrations/templates/overview/logs.md create mode 100644 packaging/cmake/Modules/NetdataDashboard.cmake create mode 100755 packaging/cmake/pkg-files/deb/dashboard/postinst create mode 100755 packaging/cmake/pkg-files/deb/dashboard/postrm create mode 100755 packaging/cmake/pkg-files/deb/dashboard/preinst delete mode 100644 packaging/installer/REINSTALL.md delete mode 100644 packaging/installer/installer.nsi create mode 100644 packaging/installer/methods/no_ipv4.md create mode 100644 packaging/utils/compile-and-run-windows.sh create mode 100644 packaging/windows/BackGround.bmp create mode 100644 packaging/windows/Top.bmp create mode 100644 packaging/windows/WINDOWS_INSTALLER.md create mode 100644 packaging/windows/find-sdk-path.sh create mode 100755 packaging/windows/get-convert-licenses.sh create mode 100755 packaging/windows/get-win-build-path.sh create mode 100644 packaging/windows/gpl-3.0.rtf create mode 100644 packaging/windows/msi-extension.bat create mode 100644 packaging/windows/ncul1.rtf create mode 100644 packaging/windows/netdata.wxs.in create mode 100644 packaging/windows/resources/netdata.manifest.in create mode 100644 packaging/windows/resources/netdata.rc create mode 100644 packaging/windows/resources/netdata_claim.manifest.in create mode 100644 packaging/windows/resources/netdata_claim.rc create mode 100644 packaging/windows/resources/netdatacli.manifest.in create mode 100644 packaging/windows/resources/netdatacli.rc create mode 100644 src/aclk/aclk-schemas/.gitignore create mode 100644 src/aclk/aclk-schemas/.travis.yml create mode 100644 src/aclk/aclk-schemas/LICENSE create mode 100644 src/aclk/aclk-schemas/Makefile create mode 100644 src/aclk/aclk-schemas/README.md create mode 100644 src/aclk/aclk-schemas/buf.yml create mode 100644 src/aclk/aclk-schemas/proto/aclk/v1/lib.proto create mode 100644 src/aclk/aclk-schemas/proto/agent/v1/cmds.proto create mode 100644 src/aclk/aclk-schemas/proto/agent/v1/connection.proto create mode 100644 src/aclk/aclk-schemas/proto/agent/v1/disconnect.proto create mode 100644 src/aclk/aclk-schemas/proto/alarm/v1/config.proto create mode 100644 src/aclk/aclk-schemas/proto/alarm/v1/stream.proto create mode 100644 src/aclk/aclk-schemas/proto/chart/v1/config.proto create mode 100644 src/aclk/aclk-schemas/proto/chart/v1/dimension.proto create mode 100644 src/aclk/aclk-schemas/proto/chart/v1/instance.proto create mode 100644 src/aclk/aclk-schemas/proto/chart/v1/stream.proto create mode 100644 src/aclk/aclk-schemas/proto/context/v1/context.proto create mode 100644 src/aclk/aclk-schemas/proto/context/v1/stream.proto create mode 100644 src/aclk/aclk-schemas/proto/nodeinstance/connection/v1/connection.proto create mode 100644 src/aclk/aclk-schemas/proto/nodeinstance/create/v1/creation.proto create mode 100644 src/aclk/aclk-schemas/proto/nodeinstance/info/v1/info.proto delete mode 100644 src/aclk/aclk_rrdhost_state.h delete mode 100644 src/aclk/aclk_stats.c delete mode 100644 src/aclk/aclk_stats.h delete mode 100644 src/aclk/helpers/mqtt_wss_pal.h delete mode 100644 src/aclk/helpers/ringbuffer_pal.h delete mode 100644 src/aclk/mqtt_websockets/.github/workflows/run-tests.yaml delete mode 100644 src/aclk/mqtt_websockets/.gitignore delete mode 100644 src/aclk/mqtt_websockets/c-rbuf/cringbuffer.c delete mode 100644 src/aclk/mqtt_websockets/c-rbuf/cringbuffer.h delete mode 100644 src/aclk/mqtt_websockets/c-rbuf/cringbuffer_internal.h delete mode 100644 src/aclk/mqtt_websockets/c-rbuf/ringbuffer_test.c delete mode 100644 src/aclk/mqtt_websockets/c_rhash/c_rhash.c delete mode 100644 src/aclk/mqtt_websockets/c_rhash/c_rhash.h delete mode 100644 src/aclk/mqtt_websockets/c_rhash/c_rhash_internal.h delete mode 100644 src/aclk/mqtt_websockets/c_rhash/tests.c delete mode 100644 src/aclk/mqtt_websockets/mqtt_wss_log.c delete mode 100644 src/aclk/mqtt_websockets/mqtt_wss_log.h create mode 100644 src/claim/claim-with-api.c create mode 100644 src/claim/claim_id.c create mode 100644 src/claim/claim_id.h create mode 100644 src/claim/cloud-conf.c create mode 100644 src/claim/cloud-status.c create mode 100644 src/claim/cloud-status.h create mode 100644 src/claim/main.c create mode 100644 src/claim/main.h create mode 100644 src/claim/ui.c create mode 100644 src/claim/ui.h create mode 100644 src/collectors/apps.plugin/apps_aggregations.c create mode 100644 src/collectors/apps.plugin/apps_incremental_collection.c create mode 100644 src/collectors/apps.plugin/apps_os_freebsd.c create mode 100644 src/collectors/apps.plugin/apps_os_linux.c create mode 100644 src/collectors/apps.plugin/apps_os_macos.c create mode 100644 src/collectors/apps.plugin/apps_os_windows.c create mode 100644 src/collectors/apps.plugin/apps_os_windows_nt.c create mode 100644 src/collectors/apps.plugin/apps_pid.c create mode 100644 src/collectors/apps.plugin/apps_pid_files.c create mode 100644 src/collectors/apps.plugin/apps_pid_match.c delete mode 100644 src/collectors/apps.plugin/apps_proc_meminfo.c delete mode 100644 src/collectors/apps.plugin/apps_proc_pid_cmdline.c delete mode 100644 src/collectors/apps.plugin/apps_proc_pid_fd.c delete mode 100644 src/collectors/apps.plugin/apps_proc_pid_io.c delete mode 100644 src/collectors/apps.plugin/apps_proc_pid_limits.c delete mode 100644 src/collectors/apps.plugin/apps_proc_pid_stat.c delete mode 100644 src/collectors/apps.plugin/apps_proc_pid_status.c delete mode 100644 src/collectors/apps.plugin/apps_proc_pids.c delete mode 100644 src/collectors/apps.plugin/apps_proc_stat.c delete mode 100644 src/collectors/apps.plugin/apps_users_and_groups.c create mode 100644 src/collectors/apps.plugin/busy_threads.c delete mode 120000 src/collectors/charts.d.plugin/apcupsd/README.md delete mode 100644 src/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh delete mode 100644 src/collectors/charts.d.plugin/apcupsd/apcupsd.conf delete mode 100644 src/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md delete mode 100644 src/collectors/charts.d.plugin/apcupsd/metadata.yaml delete mode 120000 src/collectors/charts.d.plugin/sensors/README.md delete mode 100644 src/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md delete mode 100644 src/collectors/charts.d.plugin/sensors/metadata.yaml delete mode 100644 src/collectors/charts.d.plugin/sensors/sensors.chart.sh delete mode 100644 src/collectors/charts.d.plugin/sensors/sensors.conf delete mode 100644 src/collectors/checks.plugin/README.md create mode 100644 src/collectors/common-contexts/disk-avgsz.h create mode 100644 src/collectors/common-contexts/disk-await.h create mode 100644 src/collectors/common-contexts/disk-busy.h create mode 100644 src/collectors/common-contexts/disk-io.h create mode 100644 src/collectors/common-contexts/disk-iotime.h create mode 100644 src/collectors/common-contexts/disk-ops.h create mode 100644 src/collectors/common-contexts/disk-qops.h create mode 100644 src/collectors/common-contexts/disk-svctm.h create mode 100644 src/collectors/common-contexts/disk-util.h delete mode 100644 src/collectors/common-contexts/disk.io.h create mode 100644 src/collectors/common-contexts/mem-available.h create mode 100644 src/collectors/common-contexts/mem-pgfaults.h create mode 100644 src/collectors/common-contexts/mem-swap.h delete mode 100644 src/collectors/common-contexts/mem.available.h delete mode 100644 src/collectors/common-contexts/mem.pgfaults.h delete mode 100644 src/collectors/common-contexts/mem.swap.h create mode 100644 src/collectors/common-contexts/system-interrupts.h create mode 100644 src/collectors/common-contexts/system-io.h create mode 100644 src/collectors/common-contexts/system-ipc.h create mode 100644 src/collectors/common-contexts/system-processes.h create mode 100644 src/collectors/common-contexts/system-ram.h delete mode 100644 src/collectors/common-contexts/system.interrupts.h delete mode 100644 src/collectors/common-contexts/system.io.h delete mode 100644 src/collectors/common-contexts/system.ipc.h delete mode 100644 src/collectors/common-contexts/system.processes.h delete mode 100644 src/collectors/common-contexts/system.ram.h create mode 100644 src/collectors/log2journal/log2journal-hashed-key.h create mode 100644 src/collectors/log2journal/log2journal-txt.h delete mode 100644 src/collectors/plugins.d/README.md delete mode 100644 src/collectors/plugins.d/functions-table.md delete mode 100644 src/collectors/plugins.d/gperf-config.txt delete mode 100644 src/collectors/plugins.d/gperf-hashtable.h delete mode 100644 src/collectors/plugins.d/local_listeners.c delete mode 100644 src/collectors/plugins.d/ndsudo.c delete mode 100644 src/collectors/plugins.d/plugins_d.c delete mode 100644 src/collectors/plugins.d/plugins_d.h delete mode 100644 src/collectors/plugins.d/pluginsd_dyncfg.c delete mode 100644 src/collectors/plugins.d/pluginsd_dyncfg.h delete mode 100644 src/collectors/plugins.d/pluginsd_functions.c delete mode 100644 src/collectors/plugins.d/pluginsd_functions.h delete mode 100644 src/collectors/plugins.d/pluginsd_internals.c delete mode 100644 src/collectors/plugins.d/pluginsd_internals.h delete mode 100644 src/collectors/plugins.d/pluginsd_parser.c delete mode 100644 src/collectors/plugins.d/pluginsd_parser.h delete mode 100644 src/collectors/plugins.d/pluginsd_replication.c delete mode 100644 src/collectors/plugins.d/pluginsd_replication.h delete mode 100644 src/collectors/python.d.plugin/anomalies/README.md delete mode 100644 src/collectors/python.d.plugin/anomalies/anomalies.chart.py delete mode 100644 src/collectors/python.d.plugin/anomalies/anomalies.conf delete mode 100644 src/collectors/python.d.plugin/anomalies/metadata.yaml delete mode 120000 src/collectors/python.d.plugin/boinc/README.md delete mode 100644 src/collectors/python.d.plugin/boinc/boinc.chart.py delete mode 100644 src/collectors/python.d.plugin/boinc/boinc.conf delete mode 100644 src/collectors/python.d.plugin/boinc/integrations/boinc.md delete mode 100644 src/collectors/python.d.plugin/boinc/metadata.yaml delete mode 120000 src/collectors/python.d.plugin/ceph/README.md delete mode 100644 src/collectors/python.d.plugin/ceph/ceph.chart.py delete mode 100644 src/collectors/python.d.plugin/ceph/ceph.conf delete mode 100644 src/collectors/python.d.plugin/ceph/integrations/ceph.md delete mode 100644 src/collectors/python.d.plugin/ceph/metadata.yaml delete mode 120000 src/collectors/python.d.plugin/openldap/README.md delete mode 100644 src/collectors/python.d.plugin/openldap/integrations/openldap.md delete mode 100644 src/collectors/python.d.plugin/openldap/metadata.yaml delete mode 100644 src/collectors/python.d.plugin/openldap/openldap.chart.py delete mode 100644 src/collectors/python.d.plugin/openldap/openldap.conf delete mode 120000 src/collectors/python.d.plugin/oracledb/README.md delete mode 100644 src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md delete mode 100644 src/collectors/python.d.plugin/oracledb/metadata.yaml delete mode 100644 src/collectors/python.d.plugin/oracledb/oracledb.chart.py delete mode 100644 src/collectors/python.d.plugin/oracledb/oracledb.conf delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/composer.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/error.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/events.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/loader.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/parser.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/reader.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/representer.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/composer.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/error.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/events.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/loader.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/parser.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/reader.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/representer.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py delete mode 100644 src/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py delete mode 100644 src/collectors/python.d.plugin/python_modules/third_party/__init__.py delete mode 100644 src/collectors/python.d.plugin/python_modules/third_party/boinc_client.py delete mode 100644 src/collectors/python.d.plugin/python_modules/third_party/filelock.py delete mode 100644 src/collectors/python.d.plugin/python_modules/third_party/mcrcon.py delete mode 100644 src/collectors/python.d.plugin/python_modules/third_party/monotonic.py delete mode 100644 src/collectors/python.d.plugin/python_modules/third_party/ordereddict.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/__init__.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/_collections.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/connection.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/exceptions.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/fields.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/filepost.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/packages/six.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/request.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/response.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/util/connection.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/util/request.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/util/response.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/util/retry.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/util/url.py delete mode 100644 src/collectors/python.d.plugin/python_modules/urllib3/util/wait.py delete mode 120000 src/collectors/python.d.plugin/samba/README.md delete mode 100644 src/collectors/python.d.plugin/samba/integrations/samba.md delete mode 100644 src/collectors/python.d.plugin/samba/metadata.yaml delete mode 100644 src/collectors/python.d.plugin/samba/samba.chart.py delete mode 100644 src/collectors/python.d.plugin/samba/samba.conf delete mode 120000 src/collectors/python.d.plugin/spigotmc/README.md delete mode 100644 src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md delete mode 100644 src/collectors/python.d.plugin/spigotmc/metadata.yaml delete mode 100644 src/collectors/python.d.plugin/spigotmc/spigotmc.chart.py delete mode 100644 src/collectors/python.d.plugin/spigotmc/spigotmc.conf delete mode 120000 src/collectors/python.d.plugin/varnish/README.md delete mode 100644 src/collectors/python.d.plugin/varnish/integrations/varnish.md delete mode 100644 src/collectors/python.d.plugin/varnish/metadata.yaml delete mode 100644 src/collectors/python.d.plugin/varnish/varnish.chart.py delete mode 100644 src/collectors/python.d.plugin/varnish/varnish.conf delete mode 120000 src/collectors/python.d.plugin/w1sensor/README.md delete mode 100644 src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md delete mode 100644 src/collectors/python.d.plugin/w1sensor/metadata.yaml delete mode 100644 src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py delete mode 100644 src/collectors/python.d.plugin/w1sensor/w1sensor.conf delete mode 120000 src/collectors/python.d.plugin/zscores/README.md delete mode 100644 src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md delete mode 100644 src/collectors/python.d.plugin/zscores/metadata.yaml delete mode 100644 src/collectors/python.d.plugin/zscores/zscores.chart.py delete mode 100644 src/collectors/python.d.plugin/zscores/zscores.conf create mode 100644 src/collectors/systemd-journal.plugin/systemd-journal-sampling.h create mode 100644 src/collectors/utils/local_listeners.c create mode 100644 src/collectors/utils/ndsudo.c create mode 100644 src/collectors/windows-events.plugin/README.md create mode 100644 src/collectors/windows-events.plugin/windows-events-fields-cache.c create mode 100644 src/collectors/windows-events.plugin/windows-events-fields-cache.h create mode 100644 src/collectors/windows-events.plugin/windows-events-providers.c create mode 100644 src/collectors/windows-events.plugin/windows-events-providers.h create mode 100644 src/collectors/windows-events.plugin/windows-events-query-builder.c create mode 100644 src/collectors/windows-events.plugin/windows-events-query-builder.h create mode 100644 src/collectors/windows-events.plugin/windows-events-query-evt-variant.c create mode 100644 src/collectors/windows-events.plugin/windows-events-query.c create mode 100644 src/collectors/windows-events.plugin/windows-events-query.h create mode 100644 src/collectors/windows-events.plugin/windows-events-sources.c create mode 100644 src/collectors/windows-events.plugin/windows-events-sources.h create mode 100644 src/collectors/windows-events.plugin/windows-events-unicode.c create mode 100644 src/collectors/windows-events.plugin/windows-events-unicode.h create mode 100644 src/collectors/windows-events.plugin/windows-events-xml.c create mode 100644 src/collectors/windows-events.plugin/windows-events-xml.h create mode 100644 src/collectors/windows-events.plugin/windows-events.c create mode 100644 src/collectors/windows-events.plugin/windows-events.h create mode 100644 src/collectors/windows.plugin/integrations/memory_statistics.md create mode 100644 src/collectors/windows.plugin/integrations/system_statistics.md create mode 100644 src/collectors/windows.plugin/integrations/system_thermal_zone.md create mode 100644 src/collectors/windows.plugin/metadata.yaml delete mode 100644 src/collectors/windows.plugin/metdata.yaml delete mode 100644 src/collectors/windows.plugin/perflib-dump.c create mode 100644 src/collectors/windows.plugin/perflib-hyperv.c create mode 100644 src/collectors/windows.plugin/perflib-mssql.c delete mode 100644 src/collectors/windows.plugin/perflib-names.c create mode 100644 src/collectors/windows.plugin/perflib-netframework.c create mode 100644 src/collectors/windows.plugin/perflib-thermalzone.c create mode 100644 src/collectors/windows.plugin/perflib-web-service.c delete mode 100644 src/collectors/windows.plugin/perflib.c delete mode 100644 src/collectors/windows.plugin/perflib.h delete mode 100644 src/daemon/common.c create mode 100644 src/daemon/environment.c delete mode 100644 src/daemon/event_loop.c delete mode 100644 src/daemon/event_loop.h create mode 100644 src/daemon/h2o-common.c create mode 100644 src/daemon/libuv_workers.c create mode 100644 src/daemon/libuv_workers.h delete mode 100644 src/database/contexts/api_v1.c create mode 100644 src/database/contexts/api_v1_contexts.c delete mode 100644 src/database/contexts/api_v2.c create mode 100644 src/database/contexts/api_v2_contexts.c create mode 100644 src/database/contexts/api_v2_contexts.h create mode 100644 src/database/contexts/api_v2_contexts_agents.c create mode 100644 src/database/contexts/api_v2_contexts_alert_config.c create mode 100644 src/database/contexts/api_v2_contexts_alert_transitions.c create mode 100644 src/database/contexts/api_v2_contexts_alerts.c create mode 100644 src/database/contexts/api_v2_contexts_alerts.h delete mode 100644 src/database/rrdfunctions-progress.c delete mode 100644 src/database/rrdfunctions-progress.h delete mode 100644 src/database/rrdfunctions-streaming.c delete mode 100644 src/database/rrdfunctions-streaming.h create mode 100644 src/go/pkg/matcher/README.md create mode 100644 src/go/pkg/matcher/cache.go create mode 100644 src/go/pkg/matcher/cache_test.go create mode 100644 src/go/pkg/matcher/doc.go create mode 100644 src/go/pkg/matcher/doc_test.go create mode 100644 src/go/pkg/matcher/expr.go create mode 100644 src/go/pkg/matcher/expr_test.go create mode 100644 src/go/pkg/matcher/glob.go create mode 100644 src/go/pkg/matcher/glob_test.go create mode 100644 src/go/pkg/matcher/logical.go create mode 100644 src/go/pkg/matcher/logical_test.go create mode 100644 src/go/pkg/matcher/matcher.go create mode 100644 src/go/pkg/matcher/matcher_test.go create mode 100644 src/go/pkg/matcher/regexp.go create mode 100644 src/go/pkg/matcher/regexp_test.go create mode 100644 src/go/pkg/matcher/simple_patterns.go create mode 100644 src/go/pkg/matcher/simple_patterns_test.go create mode 100644 src/go/pkg/matcher/string.go create mode 100644 src/go/pkg/matcher/string_test.go create mode 100644 src/go/pkg/multipath/multipath.go create mode 100644 src/go/pkg/multipath/multipath_test.go create mode 100644 src/go/pkg/multipath/testdata/data1/test-empty.conf create mode 100644 src/go/pkg/multipath/testdata/data1/test.conf create mode 100644 src/go/pkg/multipath/testdata/data2/test-empty.conf create mode 100644 src/go/pkg/multipath/testdata/data2/test.conf create mode 100644 src/go/pkg/netdataapi/api.go create mode 100644 src/go/pkg/netdataapi/api_test.go create mode 100644 src/go/pkg/safewriter/writer.go create mode 100644 src/go/pkg/ticker/ticker.go create mode 100644 src/go/pkg/ticker/ticket_test.go create mode 100644 src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/ll.go delete mode 100644 src/go/plugin/go.d/agent/netdataapi/api.go delete mode 100644 src/go/plugin/go.d/agent/netdataapi/api_test.go delete mode 100644 src/go/plugin/go.d/agent/safewriter/writer.go delete mode 100644 src/go/plugin/go.d/agent/ticker/ticker.go delete mode 100644 src/go/plugin/go.d/agent/ticker/ticket_test.go create mode 100644 src/go/plugin/go.d/config/go.d/apcupsd.conf create mode 100644 src/go/plugin/go.d/config/go.d/boinc.conf create mode 100644 src/go/plugin/go.d/config/go.d/ceph.conf delete mode 100644 src/go/plugin/go.d/config/go.d/example.conf create mode 100644 src/go/plugin/go.d/config/go.d/maxscale.conf create mode 100644 src/go/plugin/go.d/config/go.d/nginxunit.conf create mode 100644 src/go/plugin/go.d/config/go.d/openldap.conf create mode 100644 src/go/plugin/go.d/config/go.d/oracledb.conf create mode 100644 src/go/plugin/go.d/config/go.d/samba.conf create mode 100644 src/go/plugin/go.d/config/go.d/spigotmc.conf create mode 100644 src/go/plugin/go.d/config/go.d/typesense.conf create mode 100644 src/go/plugin/go.d/config/go.d/varnish.conf create mode 100644 src/go/plugin/go.d/config/go.d/w1sensor.conf create mode 100644 src/go/plugin/go.d/modules/adaptecraid/doc.go create mode 100644 src/go/plugin/go.d/modules/ap/doc.go create mode 120000 src/go/plugin/go.d/modules/apcupsd/README.md create mode 100644 src/go/plugin/go.d/modules/apcupsd/apcupsd.go create mode 100644 src/go/plugin/go.d/modules/apcupsd/apcupsd_test.go create mode 100644 src/go/plugin/go.d/modules/apcupsd/charts.go create mode 100644 src/go/plugin/go.d/modules/apcupsd/client.go create mode 100644 src/go/plugin/go.d/modules/apcupsd/collect.go create mode 100644 src/go/plugin/go.d/modules/apcupsd/config_schema.json create mode 100644 src/go/plugin/go.d/modules/apcupsd/integrations/apc_ups.md create mode 100644 src/go/plugin/go.d/modules/apcupsd/metadata.yaml create mode 100644 src/go/plugin/go.d/modules/apcupsd/status.go create mode 100644 src/go/plugin/go.d/modules/apcupsd/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/apcupsd/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/apcupsd/testdata/status.txt create mode 100644 src/go/plugin/go.d/modules/apcupsd/testdata/status_commlost.txt create mode 120000 src/go/plugin/go.d/modules/boinc/README.md create mode 100644 src/go/plugin/go.d/modules/boinc/boinc.go create mode 100644 src/go/plugin/go.d/modules/boinc/boinc_test.go create mode 100644 src/go/plugin/go.d/modules/boinc/charts.go create mode 100644 src/go/plugin/go.d/modules/boinc/client.go create mode 100644 src/go/plugin/go.d/modules/boinc/client_proto.go create mode 100644 src/go/plugin/go.d/modules/boinc/collect.go create mode 100644 src/go/plugin/go.d/modules/boinc/config_schema.json create mode 100644 src/go/plugin/go.d/modules/boinc/integrations/boinc.md create mode 100644 src/go/plugin/go.d/modules/boinc/metadata.yaml create mode 100644 src/go/plugin/go.d/modules/boinc/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/boinc/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/boinc/testdata/get_results.xml create mode 100644 src/go/plugin/go.d/modules/boinc/testdata/get_results_no_tasks.xml create mode 120000 src/go/plugin/go.d/modules/ceph/README.md create mode 100644 src/go/plugin/go.d/modules/ceph/api.go create mode 100644 src/go/plugin/go.d/modules/ceph/auth.go create mode 100644 src/go/plugin/go.d/modules/ceph/ceph.go create mode 100644 src/go/plugin/go.d/modules/ceph/ceph_test.go create mode 100644 src/go/plugin/go.d/modules/ceph/charts.go create mode 100644 src/go/plugin/go.d/modules/ceph/collect.go create mode 100644 src/go/plugin/go.d/modules/ceph/collect_health.go create mode 100644 src/go/plugin/go.d/modules/ceph/collect_osd.go create mode 100644 src/go/plugin/go.d/modules/ceph/collect_pools.go create mode 100644 src/go/plugin/go.d/modules/ceph/config_schema.json create mode 100644 src/go/plugin/go.d/modules/ceph/init.go create mode 100644 src/go/plugin/go.d/modules/ceph/integrations/ceph.md create mode 100644 src/go/plugin/go.d/modules/ceph/metadata.yaml create mode 100644 src/go/plugin/go.d/modules/ceph/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/ceph/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_health_minimal.json create mode 100644 src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_monitor.json create mode 100644 src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_osd.json create mode 100644 src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_pool_stats.json create mode 100644 src/go/plugin/go.d/modules/chrony/exec.go create mode 100644 src/go/plugin/go.d/modules/dmcache/doc.go create mode 100644 src/go/plugin/go.d/modules/dnsmasq_dhcp/doc.go delete mode 100644 src/go/plugin/go.d/modules/example/README.md delete mode 100644 src/go/plugin/go.d/modules/example/charts.go delete mode 100644 src/go/plugin/go.d/modules/example/collect.go delete mode 100644 src/go/plugin/go.d/modules/example/config_schema.json delete mode 100644 src/go/plugin/go.d/modules/example/example.go delete mode 100644 src/go/plugin/go.d/modules/example/example_test.go delete mode 100644 src/go/plugin/go.d/modules/example/init.go delete mode 100644 src/go/plugin/go.d/modules/example/testdata/config.json delete mode 100644 src/go/plugin/go.d/modules/example/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/fail2ban/doc.go delete mode 100644 src/go/plugin/go.d/modules/hdfs/client.go delete mode 100644 src/go/plugin/go.d/modules/hdfs/init.go create mode 100644 src/go/plugin/go.d/modules/isc_dhcpd/doc.go delete mode 100644 src/go/plugin/go.d/modules/lighttpd/apiclient.go delete mode 100644 src/go/plugin/go.d/modules/lighttpd/init.go delete mode 100644 src/go/plugin/go.d/modules/lighttpd/metrics.go create mode 100644 src/go/plugin/go.d/modules/lighttpd/status.go create mode 100644 src/go/plugin/go.d/modules/litespeed/doc.go create mode 100644 src/go/plugin/go.d/modules/lvm/doc.go create mode 120000 src/go/plugin/go.d/modules/maxscale/README.md create mode 100644 src/go/plugin/go.d/modules/maxscale/charts.go create mode 100644 src/go/plugin/go.d/modules/maxscale/collect.go create mode 100644 src/go/plugin/go.d/modules/maxscale/config_schema.json create mode 100644 src/go/plugin/go.d/modules/maxscale/integrations/maxscale.md create mode 100644 src/go/plugin/go.d/modules/maxscale/maxscale.go create mode 100644 src/go/plugin/go.d/modules/maxscale/maxscale_test.go create mode 100644 src/go/plugin/go.d/modules/maxscale/metadata.yaml create mode 100644 src/go/plugin/go.d/modules/maxscale/restapi.go create mode 100644 src/go/plugin/go.d/modules/maxscale/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/maxscale/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/maxscale/testdata/v24.02.3/maxscale.json create mode 100644 src/go/plugin/go.d/modules/maxscale/testdata/v24.02.3/maxscale_threads.json create mode 100644 src/go/plugin/go.d/modules/maxscale/testdata/v24.02.3/servers.json create mode 100644 src/go/plugin/go.d/modules/megacli/doc.go delete mode 100644 src/go/plugin/go.d/modules/nginx/apiclient.go delete mode 100644 src/go/plugin/go.d/modules/nginx/metrics.go create mode 100644 src/go/plugin/go.d/modules/nginx/status.go create mode 120000 src/go/plugin/go.d/modules/nginxunit/README.md create mode 100644 src/go/plugin/go.d/modules/nginxunit/charts.go create mode 100644 src/go/plugin/go.d/modules/nginxunit/collect.go create mode 100644 src/go/plugin/go.d/modules/nginxunit/config_schema.json create mode 100644 src/go/plugin/go.d/modules/nginxunit/integrations/nginx_unit.md create mode 100644 src/go/plugin/go.d/modules/nginxunit/metadata.yaml create mode 100644 src/go/plugin/go.d/modules/nginxunit/nginxunit.go create mode 100644 src/go/plugin/go.d/modules/nginxunit/nginxunit_test.go create mode 100644 src/go/plugin/go.d/modules/nginxunit/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/nginxunit/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/nginxunit/testdata/v1.29.1/status.json create mode 100644 src/go/plugin/go.d/modules/nsd/doc.go create mode 100644 src/go/plugin/go.d/modules/nvme/doc.go create mode 120000 src/go/plugin/go.d/modules/openldap/README.md create mode 100644 src/go/plugin/go.d/modules/openldap/charts.go create mode 100644 src/go/plugin/go.d/modules/openldap/client.go create mode 100644 src/go/plugin/go.d/modules/openldap/collect.go create mode 100644 src/go/plugin/go.d/modules/openldap/collect_mon_counters.go create mode 100644 src/go/plugin/go.d/modules/openldap/collect_operations.go create mode 100644 src/go/plugin/go.d/modules/openldap/config_schema.json create mode 100644 src/go/plugin/go.d/modules/openldap/integrations/openldap.md create mode 100644 src/go/plugin/go.d/modules/openldap/metadata.yaml create mode 100644 src/go/plugin/go.d/modules/openldap/openldap.go create mode 100644 src/go/plugin/go.d/modules/openldap/openldap_test.go create mode 100644 src/go/plugin/go.d/modules/openldap/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/openldap/testdata/config.yaml create mode 120000 src/go/plugin/go.d/modules/oracledb/README.md create mode 100644 src/go/plugin/go.d/modules/oracledb/charts.go create mode 100644 src/go/plugin/go.d/modules/oracledb/collect.go create mode 100644 src/go/plugin/go.d/modules/oracledb/collect_sysmetric.go create mode 100644 src/go/plugin/go.d/modules/oracledb/collect_sysstat.go create mode 100644 src/go/plugin/go.d/modules/oracledb/collect_tablespace.go create mode 100644 src/go/plugin/go.d/modules/oracledb/collect_wait_class.go create mode 100644 src/go/plugin/go.d/modules/oracledb/config_schema.json create mode 100644 src/go/plugin/go.d/modules/oracledb/init.go create mode 100644 src/go/plugin/go.d/modules/oracledb/integrations/oracle_db.md create mode 100644 src/go/plugin/go.d/modules/oracledb/metadata.yaml create mode 100644 src/go/plugin/go.d/modules/oracledb/oracledb.go create mode 100644 src/go/plugin/go.d/modules/oracledb/oracledb_test.go create mode 100644 src/go/plugin/go.d/modules/oracledb/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/oracledb/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/sysmetric.txt create mode 100644 src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/sysstat.txt create mode 100644 src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/tablespace.txt create mode 100644 src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/wait_class.txt delete mode 100644 src/go/plugin/go.d/modules/phpdaemon/client.go delete mode 100644 src/go/plugin/go.d/modules/phpdaemon/init.go delete mode 100644 src/go/plugin/go.d/modules/phpdaemon/metrics.go create mode 100644 src/go/plugin/go.d/modules/portcheck/check_tcp_port.go create mode 100644 src/go/plugin/go.d/modules/portcheck/check_udp_port.go create mode 100644 src/go/plugin/go.d/modules/portcheck/integrations/tcp-udp_endpoints.md delete mode 100644 src/go/plugin/go.d/modules/portcheck/integrations/tcp_endpoints.md create mode 120000 src/go/plugin/go.d/modules/samba/README.md create mode 100644 src/go/plugin/go.d/modules/samba/charts.go create mode 100644 src/go/plugin/go.d/modules/samba/collect.go create mode 100644 src/go/plugin/go.d/modules/samba/config_schema.json create mode 100644 src/go/plugin/go.d/modules/samba/exec.go create mode 100644 src/go/plugin/go.d/modules/samba/init.go create mode 100644 src/go/plugin/go.d/modules/samba/integrations/samba.md create mode 100644 src/go/plugin/go.d/modules/samba/metadata.yaml create mode 100644 src/go/plugin/go.d/modules/samba/samba.go create mode 100644 src/go/plugin/go.d/modules/samba/samba_test.go create mode 100644 src/go/plugin/go.d/modules/samba/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/samba/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/samba/testdata/smbstatus-profile.txt create mode 100644 src/go/plugin/go.d/modules/sensors/doc.go delete mode 100644 src/go/plugin/go.d/modules/sensors/exec.go delete mode 100644 src/go/plugin/go.d/modules/sensors/init.go create mode 100644 src/go/plugin/go.d/modules/sensors/integrations/linux_sensors.md delete mode 100644 src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md create mode 100644 src/go/plugin/go.d/modules/sensors/lmsensors/LICENSE.md create mode 100644 src/go/plugin/go.d/modules/sensors/lmsensors/README.md create mode 100644 src/go/plugin/go.d/modules/sensors/lmsensors/doc.go create mode 100644 src/go/plugin/go.d/modules/sensors/lmsensors/fs.go create mode 100644 src/go/plugin/go.d/modules/sensors/lmsensors/parse.go create mode 100644 src/go/plugin/go.d/modules/sensors/lmsensors/scanner.go create mode 100644 src/go/plugin/go.d/modules/sensors/lmsensors/scanner_test.go create mode 100644 src/go/plugin/go.d/modules/sensors/lmsensors/sensor.go delete mode 100644 src/go/plugin/go.d/modules/sensors/testdata/sensors-temp-in-curr-power-fan.txt delete mode 100644 src/go/plugin/go.d/modules/sensors/testdata/sensors-temp.txt create mode 100644 src/go/plugin/go.d/modules/smartctl/doc.go create mode 100644 src/go/plugin/go.d/modules/snmp/collect_custom_oids.go create mode 100644 src/go/plugin/go.d/modules/snmp/collect_if_mib.go create mode 100644 src/go/plugin/go.d/modules/snmp/collect_sys_info.go create mode 100644 src/go/plugin/go.d/modules/snmp/entnum/enterprise-numbers.txt create mode 100644 src/go/plugin/go.d/modules/snmp/entnum/lookup.go create mode 120000 src/go/plugin/go.d/modules/spigotmc/README.md create mode 100644 src/go/plugin/go.d/modules/spigotmc/charts.go create mode 100644 src/go/plugin/go.d/modules/spigotmc/client.go create mode 100644 src/go/plugin/go.d/modules/spigotmc/collect.go create mode 100644 src/go/plugin/go.d/modules/spigotmc/config_schema.json create mode 100644 src/go/plugin/go.d/modules/spigotmc/integrations/spigotmc.md create mode 100644 src/go/plugin/go.d/modules/spigotmc/metadata.yaml create mode 100644 src/go/plugin/go.d/modules/spigotmc/spigotmc.go create mode 100644 src/go/plugin/go.d/modules/spigotmc/spigotmc_test.go create mode 100644 src/go/plugin/go.d/modules/spigotmc/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/spigotmc/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/storcli/doc.go delete mode 100644 src/go/plugin/go.d/modules/tengine/apiclient.go delete mode 100644 src/go/plugin/go.d/modules/tengine/metrics.go create mode 100644 src/go/plugin/go.d/modules/tengine/status.go create mode 100644 src/go/plugin/go.d/modules/testrandom/charts.go create mode 100644 src/go/plugin/go.d/modules/testrandom/collect.go create mode 100644 src/go/plugin/go.d/modules/testrandom/config_schema.json create mode 100644 src/go/plugin/go.d/modules/testrandom/init.go create mode 100644 src/go/plugin/go.d/modules/testrandom/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/testrandom/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/testrandom/testrandom.go create mode 100644 src/go/plugin/go.d/modules/testrandom/testrandom_test.go create mode 120000 src/go/plugin/go.d/modules/typesense/README.md create mode 100644 src/go/plugin/go.d/modules/typesense/charts.go create mode 100644 src/go/plugin/go.d/modules/typesense/collect.go create mode 100644 src/go/plugin/go.d/modules/typesense/config_schema.json create mode 100644 src/go/plugin/go.d/modules/typesense/init.go create mode 100644 src/go/plugin/go.d/modules/typesense/integrations/typesense.md create mode 100644 src/go/plugin/go.d/modules/typesense/metadata.yaml create mode 100644 src/go/plugin/go.d/modules/typesense/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/typesense/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/typesense/testdata/v27.0/health_nok.json create mode 100644 src/go/plugin/go.d/modules/typesense/testdata/v27.0/health_ok.json create mode 100644 src/go/plugin/go.d/modules/typesense/testdata/v27.0/stats.json create mode 100644 src/go/plugin/go.d/modules/typesense/typesense.go create mode 100644 src/go/plugin/go.d/modules/typesense/typesense_test.go create mode 120000 src/go/plugin/go.d/modules/varnish/README.md create mode 100644 src/go/plugin/go.d/modules/varnish/charts.go create mode 100644 src/go/plugin/go.d/modules/varnish/collect.go create mode 100644 src/go/plugin/go.d/modules/varnish/config_schema.json create mode 100644 src/go/plugin/go.d/modules/varnish/exec.go create mode 100644 src/go/plugin/go.d/modules/varnish/init.go create mode 100644 src/go/plugin/go.d/modules/varnish/integrations/varnish.md create mode 100644 src/go/plugin/go.d/modules/varnish/metadata.yaml create mode 100644 src/go/plugin/go.d/modules/varnish/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/varnish/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/varnish/testdata/v7.1/varnishstat.txt create mode 100644 src/go/plugin/go.d/modules/varnish/varnish.go create mode 100644 src/go/plugin/go.d/modules/varnish/varnish_test.go delete mode 100644 src/go/plugin/go.d/modules/vernemq/testdata/metrics-v1.10.1-mqtt5.txt delete mode 100644 src/go/plugin/go.d/modules/vernemq/testdata/non_vernemq.txt create mode 100644 src/go/plugin/go.d/modules/vernemq/testdata/v1.10.1/metrics.txt create mode 100644 src/go/plugin/go.d/modules/vernemq/testdata/v2.0.1/metrics.txt create mode 120000 src/go/plugin/go.d/modules/w1sensor/README.md create mode 100644 src/go/plugin/go.d/modules/w1sensor/charts.go create mode 100644 src/go/plugin/go.d/modules/w1sensor/collect.go create mode 100644 src/go/plugin/go.d/modules/w1sensor/config_schema.json create mode 100644 src/go/plugin/go.d/modules/w1sensor/doc.go create mode 100644 src/go/plugin/go.d/modules/w1sensor/integrations/1-wire_sensors.md create mode 100644 src/go/plugin/go.d/modules/w1sensor/metadata.yaml create mode 100644 src/go/plugin/go.d/modules/w1sensor/testdata/config.json create mode 100644 src/go/plugin/go.d/modules/w1sensor/testdata/config.yaml create mode 100644 src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa0/w1_slave create mode 100644 src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa1/w1_slave create mode 100644 src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa2/w1_slave create mode 100644 src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa3/w1_slave create mode 100644 src/go/plugin/go.d/modules/w1sensor/w1sensor.go create mode 100644 src/go/plugin/go.d/modules/w1sensor/w1sensor_test.go create mode 100644 src/go/plugin/go.d/modules/zfspool/doc.go create mode 100644 src/go/plugin/go.d/pkg/confopt/duration.go create mode 100644 src/go/plugin/go.d/pkg/confopt/duration_test.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/README.md delete mode 100644 src/go/plugin/go.d/pkg/matcher/cache.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/cache_test.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/doc.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/doc_test.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/expr.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/expr_test.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/glob.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/glob_test.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/logical.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/logical_test.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/matcher.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/matcher_test.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/regexp.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/regexp_test.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/simple_patterns.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/simple_patterns_test.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/string.go delete mode 100644 src/go/plugin/go.d/pkg/matcher/string_test.go delete mode 100644 src/go/plugin/go.d/pkg/multipath/multipath.go delete mode 100644 src/go/plugin/go.d/pkg/multipath/multipath_test.go delete mode 100644 src/go/plugin/go.d/pkg/multipath/testdata/data1/test-empty.conf delete mode 100644 src/go/plugin/go.d/pkg/multipath/testdata/data1/test.conf delete mode 100644 src/go/plugin/go.d/pkg/multipath/testdata/data2/test-empty.conf delete mode 100644 src/go/plugin/go.d/pkg/multipath/testdata/data2/test.conf delete mode 100644 src/go/plugin/go.d/pkg/socket/types.go create mode 100644 src/go/plugin/go.d/pkg/web/client_config.go create mode 100644 src/go/plugin/go.d/pkg/web/client_config_test.go delete mode 100644 src/go/plugin/go.d/pkg/web/client_test.go create mode 100644 src/go/plugin/go.d/pkg/web/config.go delete mode 100644 src/go/plugin/go.d/pkg/web/duration.go delete mode 100644 src/go/plugin/go.d/pkg/web/duration_test.go delete mode 100644 src/go/plugin/go.d/pkg/web/request.go create mode 100644 src/go/plugin/go.d/pkg/web/request_config.go create mode 100644 src/go/plugin/go.d/pkg/web/request_config_test.go delete mode 100644 src/go/plugin/go.d/pkg/web/request_test.go delete mode 100644 src/go/plugin/go.d/pkg/web/web.go delete mode 100644 src/health/health.d/anomalies.conf create mode 100644 src/health/notifications/ilert/README.md create mode 100644 src/health/notifications/ilert/metadata.yaml create mode 100644 src/libnetdata/bitmap/bitmap64.h delete mode 100644 src/libnetdata/bitmap64.h create mode 100644 src/libnetdata/c_rhash/c_rhash.c create mode 100644 src/libnetdata/c_rhash/c_rhash.h create mode 100644 src/libnetdata/c_rhash/c_rhash_internal.h create mode 100644 src/libnetdata/c_rhash/tests.c create mode 100644 src/libnetdata/common.h create mode 100644 src/libnetdata/config/appconfig_api_boolean.c create mode 100644 src/libnetdata/config/appconfig_api_boolean.h create mode 100644 src/libnetdata/config/appconfig_api_durations.c create mode 100644 src/libnetdata/config/appconfig_api_durations.h create mode 100644 src/libnetdata/config/appconfig_api_numbers.c create mode 100644 src/libnetdata/config/appconfig_api_numbers.h create mode 100644 src/libnetdata/config/appconfig_api_sizes.c create mode 100644 src/libnetdata/config/appconfig_api_sizes.h create mode 100644 src/libnetdata/config/appconfig_api_text.c create mode 100644 src/libnetdata/config/appconfig_api_text.h create mode 100644 src/libnetdata/config/appconfig_cleanup.c create mode 100644 src/libnetdata/config/appconfig_conf_file.c create mode 100644 src/libnetdata/config/appconfig_exporters.c create mode 100644 src/libnetdata/config/appconfig_internals.h create mode 100644 src/libnetdata/config/appconfig_migrate.c create mode 100644 src/libnetdata/config/appconfig_options.c create mode 100644 src/libnetdata/config/appconfig_sections.c create mode 100644 src/libnetdata/config/appconfig_traversal.c create mode 100644 src/libnetdata/facets/logs_query_status.h create mode 100644 src/libnetdata/json/json-c-parser-inline.c create mode 100644 src/libnetdata/libjudy/judy-malloc.c create mode 100644 src/libnetdata/libjudy/judy-malloc.h delete mode 100644 src/libnetdata/linked-lists.h create mode 100644 src/libnetdata/linked_lists/linked_lists.h create mode 100644 src/libnetdata/local-sockets/local-sockets.h delete mode 100644 src/libnetdata/log/journal.c delete mode 100644 src/libnetdata/log/journal.h delete mode 100644 src/libnetdata/log/log.c delete mode 100644 src/libnetdata/log/log.h create mode 100644 src/libnetdata/log/nd_log-annotators.c create mode 100644 src/libnetdata/log/nd_log-common.h create mode 100644 src/libnetdata/log/nd_log-config.c create mode 100644 src/libnetdata/log/nd_log-field-formatters.c create mode 100644 src/libnetdata/log/nd_log-format-json.c create mode 100644 src/libnetdata/log/nd_log-format-logfmt.c create mode 100644 src/libnetdata/log/nd_log-init.c create mode 100644 src/libnetdata/log/nd_log-internals.c create mode 100644 src/libnetdata/log/nd_log-internals.h create mode 100644 src/libnetdata/log/nd_log-to-file.c create mode 100644 src/libnetdata/log/nd_log-to-syslog.c create mode 100644 src/libnetdata/log/nd_log-to-systemd-journal.c create mode 100644 src/libnetdata/log/nd_log-to-windows-common.h create mode 100644 src/libnetdata/log/nd_log-to-windows-events.c create mode 100644 src/libnetdata/log/nd_log.c create mode 100644 src/libnetdata/log/nd_log.h create mode 100644 src/libnetdata/log/nd_log_limit.c create mode 100644 src/libnetdata/log/nd_log_limit.h create mode 100644 src/libnetdata/log/nd_wevents_manifest.xml create mode 100644 src/libnetdata/log/systemd-journal-helpers.c create mode 100644 src/libnetdata/log/systemd-journal-helpers.h create mode 100644 src/libnetdata/log/wevt_netdata_compile.bat create mode 100644 src/libnetdata/log/wevt_netdata_compile.sh create mode 100644 src/libnetdata/log/wevt_netdata_install.bat create mode 100644 src/libnetdata/log/wevt_netdata_mc_generate.c delete mode 100644 src/libnetdata/maps/local-sockets.h delete mode 100644 src/libnetdata/maps/system-groups.h delete mode 100644 src/libnetdata/maps/system-services.h delete mode 100644 src/libnetdata/maps/system-users.h create mode 100644 src/libnetdata/os/random.c create mode 100644 src/libnetdata/os/random.h create mode 100644 src/libnetdata/os/sleep.c create mode 100644 src/libnetdata/os/sleep.h create mode 100644 src/libnetdata/os/system-maps/cache-host-users-and-groups.c create mode 100644 src/libnetdata/os/system-maps/cache-host-users-and-groups.h create mode 100644 src/libnetdata/os/system-maps/cached-gid-groupname.c create mode 100644 src/libnetdata/os/system-maps/cached-gid-groupname.h create mode 100644 src/libnetdata/os/system-maps/cached-sid-username.c create mode 100644 src/libnetdata/os/system-maps/cached-sid-username.h create mode 100644 src/libnetdata/os/system-maps/cached-uid-username.c create mode 100644 src/libnetdata/os/system-maps/cached-uid-username.h create mode 100644 src/libnetdata/os/system-maps/system-services.h create mode 100644 src/libnetdata/os/timestamps.c create mode 100644 src/libnetdata/os/timestamps.h delete mode 100644 src/libnetdata/os/tinysleep.c delete mode 100644 src/libnetdata/os/tinysleep.h create mode 100644 src/libnetdata/os/windows-perflib/perflib-dump.c create mode 100644 src/libnetdata/os/windows-perflib/perflib-names.c create mode 100644 src/libnetdata/os/windows-perflib/perflib.c create mode 100644 src/libnetdata/os/windows-perflib/perflib.h create mode 100644 src/libnetdata/os/windows-wmi/windows-wmi-GetDiskDriveInfo.c create mode 100644 src/libnetdata/os/windows-wmi/windows-wmi-GetDiskDriveInfo.h create mode 100644 src/libnetdata/os/windows-wmi/windows-wmi.c create mode 100644 src/libnetdata/os/windows-wmi/windows-wmi.h create mode 100644 src/libnetdata/parsers/duration.c create mode 100644 src/libnetdata/parsers/duration.h create mode 100644 src/libnetdata/parsers/duration.html create mode 100644 src/libnetdata/parsers/durations.md create mode 100644 src/libnetdata/parsers/entries.c create mode 100644 src/libnetdata/parsers/entries.h create mode 100644 src/libnetdata/parsers/parsers.h create mode 100644 src/libnetdata/parsers/size.c create mode 100644 src/libnetdata/parsers/size.h create mode 100644 src/libnetdata/parsers/sizes.md create mode 100644 src/libnetdata/parsers/timeframe.c create mode 100644 src/libnetdata/parsers/timeframe.h create mode 100644 src/libnetdata/paths/paths.c create mode 100644 src/libnetdata/paths/paths.h create mode 100644 src/libnetdata/ringbuffer/ringbuffer.c create mode 100644 src/libnetdata/ringbuffer/ringbuffer.h create mode 100644 src/libnetdata/ringbuffer/ringbuffer_internal.h create mode 100644 src/libnetdata/sanitizers/chart_id_and_name.c create mode 100644 src/libnetdata/sanitizers/chart_id_and_name.h create mode 100644 src/libnetdata/sanitizers/sanitizers-functions.c create mode 100644 src/libnetdata/sanitizers/sanitizers-functions.h create mode 100644 src/libnetdata/sanitizers/sanitizers-labels.c create mode 100644 src/libnetdata/sanitizers/sanitizers-labels.h create mode 100644 src/libnetdata/sanitizers/sanitizers-pluginsd.c create mode 100644 src/libnetdata/sanitizers/sanitizers-pluginsd.h create mode 100644 src/libnetdata/sanitizers/sanitizers.h create mode 100644 src/libnetdata/sanitizers/utf8-sanitizer.c create mode 100644 src/libnetdata/sanitizers/utf8-sanitizer.h delete mode 100644 src/libnetdata/simple_hashtable.h create mode 100644 src/libnetdata/simple_hashtable/simple_hashtable.h create mode 100644 src/libnetdata/simple_hashtable/simple_hashtable_undef.h delete mode 100644 src/libnetdata/simple_hashtable_undef.h create mode 100644 src/libnetdata/spawn_server/log-forwarder.c create mode 100644 src/libnetdata/spawn_server/log-forwarder.h create mode 100644 src/libnetdata/spawn_server/spawn-tester.c create mode 100644 src/libnetdata/spawn_server/spawn_library.c create mode 100644 src/libnetdata/spawn_server/spawn_library.h delete mode 100644 src/libnetdata/spawn_server/spawn_server.c create mode 100644 src/libnetdata/spawn_server/spawn_server_internals.h create mode 100644 src/libnetdata/spawn_server/spawn_server_libuv.c create mode 100644 src/libnetdata/spawn_server/spawn_server_nofork.c create mode 100644 src/libnetdata/spawn_server/spawn_server_posix.c create mode 100644 src/libnetdata/spawn_server/spawn_server_windows.c create mode 100644 src/libnetdata/string/utf8.c create mode 100644 src/libnetdata/xxHash/xxhash.h delete mode 100644 src/libnetdata/xxhash.h create mode 100644 src/plugins.d/README.md create mode 100644 src/plugins.d/functions-table.md create mode 100644 src/plugins.d/gperf-config.txt create mode 100644 src/plugins.d/gperf-hashtable.h create mode 100644 src/plugins.d/plugins_d.c create mode 100644 src/plugins.d/plugins_d.h create mode 100644 src/plugins.d/pluginsd_dyncfg.c create mode 100644 src/plugins.d/pluginsd_dyncfg.h create mode 100644 src/plugins.d/pluginsd_functions.c create mode 100644 src/plugins.d/pluginsd_functions.h create mode 100644 src/plugins.d/pluginsd_internals.c create mode 100644 src/plugins.d/pluginsd_internals.h create mode 100644 src/plugins.d/pluginsd_parser.c create mode 100644 src/plugins.d/pluginsd_parser.h create mode 100644 src/plugins.d/pluginsd_replication.c create mode 100644 src/plugins.d/pluginsd_replication.h delete mode 100644 src/streaming/common.h delete mode 100644 src/streaming/compression.c delete mode 100644 src/streaming/compression.h delete mode 100644 src/streaming/compression_brotli.c delete mode 100644 src/streaming/compression_brotli.h delete mode 100644 src/streaming/compression_gzip.c delete mode 100644 src/streaming/compression_gzip.h delete mode 100644 src/streaming/compression_lz4.c delete mode 100644 src/streaming/compression_lz4.h delete mode 100644 src/streaming/compression_zstd.c delete mode 100644 src/streaming/compression_zstd.h create mode 100644 src/streaming/h2o-common.h create mode 100644 src/streaming/protocol/command-begin-set-end.c create mode 100644 src/streaming/protocol/command-chart-definition.c create mode 100644 src/streaming/protocol/command-claimed_id.c create mode 100644 src/streaming/protocol/command-function.c create mode 100644 src/streaming/protocol/command-host-labels.c create mode 100644 src/streaming/protocol/command-host-variables.c create mode 100644 src/streaming/protocol/command-nodeid.c create mode 100644 src/streaming/protocol/commands.c create mode 100644 src/streaming/protocol/commands.h create mode 100644 src/streaming/receiver.h create mode 100644 src/streaming/rrdhost-status.c create mode 100644 src/streaming/rrdhost-status.h delete mode 100644 src/streaming/rrdpush.c create mode 100644 src/streaming/sender-commit.c create mode 100644 src/streaming/sender-connect.c create mode 100644 src/streaming/sender-destinations.c create mode 100644 src/streaming/sender-destinations.h create mode 100644 src/streaming/sender-execute.c create mode 100644 src/streaming/sender-internals.h create mode 100644 src/streaming/sender.h create mode 100644 src/streaming/stream-capabilities.c create mode 100644 src/streaming/stream-capabilities.h create mode 100644 src/streaming/stream-compression/brotli.c create mode 100644 src/streaming/stream-compression/brotli.h create mode 100644 src/streaming/stream-compression/compression.c create mode 100644 src/streaming/stream-compression/compression.h create mode 100644 src/streaming/stream-compression/gzip.c create mode 100644 src/streaming/stream-compression/gzip.h create mode 100644 src/streaming/stream-compression/lz4.c create mode 100644 src/streaming/stream-compression/lz4.h create mode 100644 src/streaming/stream-compression/zstd.c create mode 100644 src/streaming/stream-compression/zstd.h create mode 100644 src/streaming/stream-conf.c create mode 100644 src/streaming/stream-conf.h create mode 100644 src/streaming/stream-handshake.c create mode 100644 src/streaming/stream-handshake.h create mode 100644 src/streaming/stream-path.c create mode 100644 src/streaming/stream-path.h delete mode 100644 src/web/api/badges/README.md delete mode 100644 src/web/api/badges/web_buffer_svg.c delete mode 100644 src/web/api/badges/web_buffer_svg.h delete mode 100644 src/web/api/exporters/allmetrics.c delete mode 100644 src/web/api/exporters/allmetrics.h delete mode 100644 src/web/api/exporters/shell/allmetrics_shell.c delete mode 100644 src/web/api/exporters/shell/allmetrics_shell.h create mode 100644 src/web/api/functions/function-bearer_get_token.c create mode 100644 src/web/api/functions/function-bearer_get_token.h create mode 100644 src/web/api/functions/function-progress.c create mode 100644 src/web/api/functions/function-progress.h create mode 100644 src/web/api/functions/function-streaming.c create mode 100644 src/web/api/functions/function-streaming.h create mode 100644 src/web/api/functions/functions.c create mode 100644 src/web/api/functions/functions.h delete mode 100644 src/web/api/ilove/README.md delete mode 100644 src/web/api/ilove/ilove.c delete mode 100644 src/web/api/ilove/ilove.h delete mode 100644 src/web/api/ilove/measure-text.js create mode 100644 src/web/api/maps/contexts_alert_statuses.c create mode 100644 src/web/api/maps/contexts_alert_statuses.h create mode 100644 src/web/api/maps/contexts_options.c create mode 100644 src/web/api/maps/contexts_options.h create mode 100644 src/web/api/maps/datasource_formats.c create mode 100644 src/web/api/maps/datasource_formats.h create mode 100644 src/web/api/maps/maps.h create mode 100644 src/web/api/maps/rrdr_options.c create mode 100644 src/web/api/maps/rrdr_options.h create mode 100644 src/web/api/v1/api_v1_aclk.c create mode 100644 src/web/api/v1/api_v1_alarms.c create mode 100644 src/web/api/v1/api_v1_allmetrics.c create mode 100644 src/web/api/v1/api_v1_badge/README.md create mode 100644 src/web/api/v1/api_v1_badge/web_buffer_svg.c create mode 100644 src/web/api/v1/api_v1_calls.h create mode 100644 src/web/api/v1/api_v1_charts.c create mode 100644 src/web/api/v1/api_v1_config.c create mode 100644 src/web/api/v1/api_v1_context.c create mode 100644 src/web/api/v1/api_v1_contexts.c create mode 100644 src/web/api/v1/api_v1_data.c create mode 100644 src/web/api/v1/api_v1_dbengine.c create mode 100644 src/web/api/v1/api_v1_function.c create mode 100644 src/web/api/v1/api_v1_functions.c create mode 100644 src/web/api/v1/api_v1_info.c create mode 100644 src/web/api/v1/api_v1_manage.c create mode 100644 src/web/api/v1/api_v1_ml_info.c create mode 100644 src/web/api/v1/api_v1_registry.c create mode 100644 src/web/api/v1/api_v1_weights.c create mode 100644 src/web/api/v2/api_v2_alert_config.c create mode 100644 src/web/api/v2/api_v2_alert_transitions.c create mode 100644 src/web/api/v2/api_v2_alerts.c create mode 100644 src/web/api/v2/api_v2_bearer.c create mode 100644 src/web/api/v2/api_v2_calls.h create mode 100644 src/web/api/v2/api_v2_claim.c create mode 100644 src/web/api/v2/api_v2_contexts.c create mode 100644 src/web/api/v2/api_v2_data.c create mode 100644 src/web/api/v2/api_v2_functions.c create mode 100644 src/web/api/v2/api_v2_ilove/README.md create mode 100644 src/web/api/v2/api_v2_ilove/ilove.c create mode 100644 src/web/api/v2/api_v2_ilove/measure-text.js create mode 100644 src/web/api/v2/api_v2_info.c create mode 100644 src/web/api/v2/api_v2_node_instances.c create mode 100644 src/web/api/v2/api_v2_nodes.c create mode 100644 src/web/api/v2/api_v2_progress.c create mode 100644 src/web/api/v2/api_v2_q.c create mode 100644 src/web/api/v2/api_v2_versions.c create mode 100644 src/web/api/v2/api_v2_webrtc.c create mode 100644 src/web/api/v2/api_v2_weights.c create mode 100644 src/web/api/v3/api_v3_calls.h create mode 100644 src/web/api/v3/api_v3_me.c create mode 100644 src/web/api/v3/api_v3_settings.c create mode 100644 src/web/api/web_api_v3.c create mode 100644 src/web/api/web_api_v3.h delete mode 100644 src/web/gui/.dashboard-v2-notice.md delete mode 100755 src/web/gui/bundle_dashboard_v2.py delete mode 100644 src/web/gui/index.html delete mode 100644 src/web/gui/registry-access.html delete mode 100644 src/web/gui/registry-alert-redirect.html delete mode 100644 src/web/gui/registry-hello.html delete mode 100644 src/web/gui/static/splash.css delete mode 100644 src/web/gui/v2/.well-known/assetlinks.json delete mode 100644 src/web/gui/v2/1220.01d6bbaab869c74f4437.chunk.js delete mode 100644 src/web/gui/v2/1396.56f70d7c659ac0b694cd.chunk.js delete mode 100644 src/web/gui/v2/1396.56f70d7c659ac0b694cd.chunk.js.LICENSE.txt delete mode 100644 src/web/gui/v2/1418.16d53ba5cce2c6a8143a.chunk.js delete mode 100644 src/web/gui/v2/1782.d82eb301aa81b380dd0c.chunk.js delete mode 100644 src/web/gui/v2/1839.a4196d2a87ac0fdd9f34.chunk.js delete mode 100644 src/web/gui/v2/185.42bab351ba68de7ca4aa.chunk.js delete mode 100644 src/web/gui/v2/1876.e610906417b961290730.chunk.js delete mode 100644 src/web/gui/v2/195.4cdbea6af54d14a95949.chunk.js delete mode 100644 src/web/gui/v2/2007.b33ce2b4b736228fd681.chunk.js delete mode 100644 src/web/gui/v2/252.40edc9b0f6da1422f40b.chunk.js delete mode 100644 src/web/gui/v2/3104.3b70865e21a81a616af3.chunk.js delete mode 100644 src/web/gui/v2/3350.ae7151980981854dc3d1.chunk.js delete mode 100644 src/web/gui/v2/3455.f9ca876de57244386773.chunk.js delete mode 100644 src/web/gui/v2/3455.f9ca876de57244386773.chunk.js.LICENSE.txt delete mode 100644 src/web/gui/v2/3621.01ee70ee9c311ac163d9.chunk.js delete mode 100644 src/web/gui/v2/3624.bfeb1fdc3057ba82ddac.chunk.js delete mode 100644 src/web/gui/v2/3736.e572adfdf7951f74a741.chunk.js delete mode 100644 src/web/gui/v2/3750.4ad02f036f2a7c520b1c.chunk.js delete mode 100644 src/web/gui/v2/3843.89070793921be1288bb5.css delete mode 100644 src/web/gui/v2/3843.ffbb6f614ba4f7b77570.chunk.js delete mode 100644 src/web/gui/v2/3968.483ca2ad3b300293e655.chunk.js delete mode 100644 src/web/gui/v2/3D_PARTY_LICENSES.txt delete mode 100644 src/web/gui/v2/4034.35199d2809d318eed690.chunk.js delete mode 100644 src/web/gui/v2/4140.46221d08bcda08826c78.chunk.js delete mode 100644 src/web/gui/v2/4140.89070793921be1288bb5.css delete mode 100644 src/web/gui/v2/4414.590ba07d470ba2ce7dd0.chunk.js delete mode 100644 src/web/gui/v2/4631.158982e127e11bdc6a45.chunk.js delete mode 100644 src/web/gui/v2/4680.7d8122d91e9d4582836a.chunk.js delete mode 100644 src/web/gui/v2/4958.5969fedc1ff7dc82775e.chunk.js delete mode 100644 src/web/gui/v2/5246.07c5a1649f0805c140fe.chunk.js delete mode 100644 src/web/gui/v2/5304.cc797fdd343c7e873b2f.chunk.js delete mode 100644 src/web/gui/v2/5426.254557ad3e1f2d14ad29.chunk.js delete mode 100644 src/web/gui/v2/5596.2036706750ff4028cff2.chunk.js delete mode 100644 src/web/gui/v2/5598.07ff43a6b96bd41e8637.chunk.js delete mode 100644 src/web/gui/v2/5700.b7c9908dc7f30a5a57e7.chunk.js delete mode 100644 src/web/gui/v2/5709.c494eb62187917e2f2f6.chunk.js delete mode 100644 src/web/gui/v2/5709.c494eb62187917e2f2f6.chunk.js.LICENSE.txt delete mode 100644 src/web/gui/v2/5794.252ff787d58d64eb4988.chunk.js delete mode 100644 src/web/gui/v2/6008.3d0636fe17f4f6274485.chunk.js delete mode 100644 src/web/gui/v2/6121.f7286809e53e1c6d655a.chunk.js delete mode 100644 src/web/gui/v2/6121.f7286809e53e1c6d655a.chunk.js.LICENSE.txt delete mode 100644 src/web/gui/v2/6323.26d4d949c9b6f8674c2e.chunk.js delete mode 100644 src/web/gui/v2/6331.89070793921be1288bb5.css delete mode 100644 src/web/gui/v2/6331.c91b5d104cdff1be3b80.chunk.js delete mode 100644 src/web/gui/v2/6384.0fad56b0bc902f186c98.chunk.js delete mode 100644 src/web/gui/v2/6469.47926fa38028dc7d0d41.chunk.js delete mode 100644 src/web/gui/v2/6469.89070793921be1288bb5.css delete mode 100644 src/web/gui/v2/6661.72f782bd78fea8c2d836.chunk.js delete mode 100644 src/web/gui/v2/6760.370b9780120c145da28f.chunk.js delete mode 100644 src/web/gui/v2/683.02c173493ef257c210fa.chunk.js delete mode 100644 src/web/gui/v2/683.cc9fa5f3bdc0bf3ab2fc.css delete mode 100644 src/web/gui/v2/6944.ab3e70c9ac0f05013b5f.chunk.js delete mode 100644 src/web/gui/v2/7144.382c341e09540fdebaa6.chunk.js delete mode 100644 src/web/gui/v2/7144.382c341e09540fdebaa6.chunk.js.LICENSE.txt delete mode 100644 src/web/gui/v2/7146.79304e386ac9238b7cf1.chunk.js delete mode 100644 src/web/gui/v2/7170.5d6047bb6ce9d77d53db.chunk.js delete mode 100644 src/web/gui/v2/7208.1d75cf5d007de32e403b.chunk.js delete mode 100644 src/web/gui/v2/7304.ed4690ec296b59fbe7fd.chunk.js delete mode 100644 src/web/gui/v2/7332.3acf93dcfa52c7f1bc18.chunk.js delete mode 100644 src/web/gui/v2/7340.25dce1c5cc66b613700f.chunk.js delete mode 100644 src/web/gui/v2/7436.1ebd371d70e6a87c5499.chunk.js delete mode 100644 src/web/gui/v2/7471.f96c4d04a73fb7551c03.chunk.js delete mode 100644 src/web/gui/v2/7487.89070793921be1288bb5.css delete mode 100644 src/web/gui/v2/7487.db63c95c27d973a07d9b.chunk.js delete mode 100644 src/web/gui/v2/749.e44087ac3a2e3a994318.chunk.js delete mode 100644 src/web/gui/v2/749.e44087ac3a2e3a994318.chunk.js.LICENSE.txt delete mode 100644 src/web/gui/v2/7519.7982a2e0fcdf82ba78dd.chunk.js delete mode 100644 src/web/gui/v2/7529.658d363e12e73df83b60.chunk.js delete mode 100644 src/web/gui/v2/7840.2f2023f2eb1dcc943d94.chunk.js delete mode 100644 src/web/gui/v2/785.d016913841bcc0209d5b.chunk.js delete mode 100644 src/web/gui/v2/7857.813ae058cca579e05462.chunk.js delete mode 100644 src/web/gui/v2/7959.4f20f4b203e2bad8af39.chunk.js delete mode 100644 src/web/gui/v2/8059.4fdc76bb2cac1f74b41b.chunk.js delete mode 100644 src/web/gui/v2/8239.c85fc9f3599f198a9efb.chunk.js delete mode 100644 src/web/gui/v2/8323.437406936b642e8f6cb3.chunk.js delete mode 100644 src/web/gui/v2/8323.437406936b642e8f6cb3.chunk.js.LICENSE.txt delete mode 100644 src/web/gui/v2/8323.e22de33686bb2f34063c.css delete mode 100644 src/web/gui/v2/8505.c330f2104fefd71717da.chunk.js delete mode 100644 src/web/gui/v2/86.2c88d4d37b88e2620051.chunk.js delete mode 100644 src/web/gui/v2/8637.0958494526e838a60d2b.js delete mode 100644 src/web/gui/v2/8637.0958494526e838a60d2b.js.LICENSE.txt delete mode 100644 src/web/gui/v2/8784.a04e9c07186e1f057f56.chunk.js delete mode 100644 src/web/gui/v2/8842.406028f523a00acb97bd.chunk.js delete mode 100644 src/web/gui/v2/8910.019974f8675d8834dd07.chunk.js delete mode 100644 src/web/gui/v2/8938.5116982f737a2ef85330.chunk.js delete mode 100644 src/web/gui/v2/9292.cc5055091db9a0826933.chunk.js delete mode 100644 src/web/gui/v2/934.24d6fdc5f60aa6493962.chunk.js delete mode 100644 src/web/gui/v2/9400.6250bbf86c4fd3173de2.chunk.js delete mode 100644 src/web/gui/v2/9473.4fd4742ffb6b5348bea8.chunk.js delete mode 100644 src/web/gui/v2/963.35da4a3c4e49aac29dae.chunk.js delete mode 100644 src/web/gui/v2/979.3e5fddf93c977e6c71c3.chunk.js delete mode 100644 src/web/gui/v2/9818.3ce64e0b472412bfbc97.chunk.js delete mode 100644 src/web/gui/v2/9843.93f8c71c64ef97b9905e.chunk.js delete mode 100644 src/web/gui/v2/9912.702300c2dd9616289606.chunk.js delete mode 100644 src/web/gui/v2/LICENSE.md delete mode 120000 src/web/gui/v2/README.md delete mode 100644 src/web/gui/v2/agent.html delete mode 100644 src/web/gui/v2/allFiles.6.138.3.json delete mode 100644 src/web/gui/v2/allFiles.6.json delete mode 100644 src/web/gui/v2/app.08c9fe3ead1d43ff769b.js delete mode 100644 src/web/gui/v2/app.cb2e9f9a81cf9533384e.css delete mode 100644 src/web/gui/v2/apple-app-site-association delete mode 100644 src/web/gui/v2/bundlesManifest.6.json delete mode 100644 src/web/gui/v2/dashboard_v2.cmake delete mode 100644 src/web/gui/v2/favicon.ico delete mode 100644 src/web/gui/v2/index.html delete mode 100644 src/web/gui/v2/local-agent.html delete mode 100644 src/web/gui/v2/netdata.charts.fdfd27674ac5533bbcc2.js delete mode 100644 src/web/gui/v2/netdata.ui.647a4c3303ee8ec0da64.js delete mode 100644 src/web/gui/v2/netdata.ui.647a4c3303ee8ec0da64.js.LICENSE.txt delete mode 100644 src/web/gui/v2/npm.react.dom.2994f1b4604bd8ce80f6.js delete mode 100644 src/web/gui/v2/npm.react.dom.2994f1b4604bd8ce80f6.js.LICENSE.txt delete mode 100644 src/web/gui/v2/registry-access.html delete mode 100644 src/web/gui/v2/registry-alert-redirect.html delete mode 100644 src/web/gui/v2/registry-hello.html delete mode 100644 src/web/gui/v2/runtime.ceccffb089cc539b1c1f.js delete mode 100644 src/web/gui/v2/static/.well-known/assetlinks.json delete mode 100644 src/web/gui/v2/static/apple-app-site-association delete mode 100644 src/web/gui/v2/static/email/img/clea_badge.png delete mode 100644 src/web/gui/v2/static/email/img/clea_siren.png delete mode 100644 src/web/gui/v2/static/email/img/community_icon.png delete mode 100644 src/web/gui/v2/static/email/img/configure_icon.png delete mode 100644 src/web/gui/v2/static/email/img/crit_badge.png delete mode 100644 src/web/gui/v2/static/email/img/crit_siren.png delete mode 100644 src/web/gui/v2/static/email/img/flood_siren.png delete mode 100644 src/web/gui/v2/static/email/img/full_logo.png delete mode 100644 src/web/gui/v2/static/email/img/header.png delete mode 100644 src/web/gui/v2/static/email/img/isotype_600.png delete mode 100644 src/web/gui/v2/static/email/img/label_critical.png delete mode 100644 src/web/gui/v2/static/email/img/label_recovered.png delete mode 100644 src/web/gui/v2/static/email/img/label_warning.png delete mode 100644 src/web/gui/v2/static/email/img/reachability_siren.png delete mode 100644 src/web/gui/v2/static/email/img/warn_badge.png delete mode 100644 src/web/gui/v2/static/email/img/warn_siren.png delete mode 100644 src/web/gui/v2/static/img/list-style-image.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/alpine.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/arch.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/centos.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/coreos.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/debian.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/docker.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/fedora.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/freebsd.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/freenas.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/gentoo.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/kubernetes.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/linux-small.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/linux.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/macos.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/manjaro.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/openstack.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/opensuse.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/openwrt.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/oracle.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/pfsense.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/placeholder.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/raspberry-pi.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/redhat.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/rocky.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/suse.svg delete mode 100644 src/web/gui/v2/static/img/logos/os/ubuntu.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/access-point.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/activemq.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/adaptec.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/alerta.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/apache.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/apc.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/aws-sns.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/aws.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/beanstalkd.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/boinc.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/btrfs.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/ceph.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/chrony.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/cloud.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/concul.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/consul.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/container.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/couchdb.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/cups.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/data-encryption.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/ddos.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/discord.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/dns.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/docker.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/dovecot.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/elasticsearch.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/email.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/exim.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/fail2ban.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/flock.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/fluentd.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/fping.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/freeradius.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/fronius.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/gnu-freeipmi.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/golang.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/grafana.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/graphite.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/haproxy.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/hub.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/icecast.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/influxdb.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/ipfs.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/irc.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/isc.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/kafka.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/kairosdb.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/kavenegar.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/key-file.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/kubernetes.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/libreswan.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/libvirt.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/lighthttpd.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/linux.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/litespeed.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/lm-sensors.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/load-balancer.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/log-file.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/logstash.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/lxd.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/mariadb.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/memcached.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/messagebird.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/mongodb.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/monit.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/monitoring.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/mysql.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/netfilter.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/network-protocol.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/network.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/nfs.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/nginx-plus.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/nginx.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/notification-bell.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/nsd.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/ntpd.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/nut.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/nvidia.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/openldap.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/opensips.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/opentsdb.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/openvpn.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/openzfs.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/oracle.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/pagerduty.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/php-fpm.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/placeholder.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/postfix.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/postgresql.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/powerdns.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/processor.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/prometheus.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/prowl.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/proxysql.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/puppet.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/pushbullet.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/pushover.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/qos.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/rabbitmq.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/raspberry-pi.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/redis.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/rethinkdb.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/retroshare.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/rocketchat.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/samba.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/server-connection.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/slack.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/sma.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/smstools3.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/solr.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/spigot.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/springboot.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/squid.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/statsd.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/stiebel.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/systemd.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/telegram.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/temperature.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/tomcat.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/tor.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/traefik.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/twilio.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/unbound.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/uwsgi.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/varnish.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/veritas.svg delete mode 100644 src/web/gui/v2/static/img/logos/services/xen.svg delete mode 100644 src/web/gui/v2/static/img/mail/isotype.png delete mode 100644 src/web/gui/v2/static/img/mail/isotype.svg delete mode 100644 src/web/gui/v2/static/img/mail/logotype.png delete mode 100644 src/web/gui/v2/static/img/mail/logotype.svg delete mode 100644 src/web/gui/v2/static/img/new-dashboard.svg delete mode 100644 src/web/gui/v2/static/img/no-filter-results.png delete mode 100644 src/web/gui/v2/static/img/no-nodes-room.svg delete mode 100644 src/web/gui/v2/static/img/rack.png delete mode 100644 src/web/gui/v2/static/site/pages/holding-page-503/holding-page-503.css delete mode 100644 src/web/gui/v2/static/site/pages/holding-page-503/holding-page-503.svg delete mode 100644 src/web/gui/v2/static/site/pages/holding-page-503/index.html delete mode 100644 src/web/gui/v2/static/site/pages/holding-page-503/multiple-logos-group.svg delete mode 100644 src/web/gui/v2/static/site/pages/holding-page-503/netdata-logo-white.svg delete mode 100644 src/web/gui/v2/static/site/pages/holding-page-503/reset.svg delete mode 100644 src/web/gui/v2/static/splash.css delete mode 100644 src/web/gui/v2/sw.js create mode 100644 src/web/server/h2o/rrdpush.c delete mode 100644 src/web/server/h2o/streaming.c diff --git a/.codacy.yml b/.codacy.yml index 170853c7f..ee9c34669 100644 --- a/.codacy.yml +++ b/.codacy.yml @@ -1,6 +1,5 @@ --- exclude_paths: - - src/collectors/python.d.plugin/python_modules/pyyaml2/** - src/collectors/python.d.plugin/python_modules/pyyaml3/** - src/collectors/python.d.plugin/python_modules/urllib3/** - src/collectors/python.d.plugin/python_modules/third_party/** diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml index b63daba8a..a3ade2c1c 100644 --- a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -72,6 +72,8 @@ body: uname -a; uname -K # macOS uname -a; sw_vers + # Windows (prompt) + ver ``` > NOTE: This will be automatically formatted into code, so no need for backticks. render: shell diff --git a/.github/data/distros.yml b/.github/data/distros.yml index 2c5569bff..73cb488ab 100644 --- a/.github/data/distros.yml +++ b/.github/data/distros.yml @@ -174,7 +174,7 @@ include: - &fedora distro: fedora - version: "40" + version: "41" support_type: Core notes: '' eol_check: true @@ -183,13 +183,20 @@ include: dnf remove -y json-c-devel packages: &fedora_packages type: rpm - repo_distro: fedora/40 + repo_distro: fedora/41 builder_rev: *def_builder_rev arches: - x86_64 - aarch64 test: ebpf-core: true + - <<: *fedora + version: "40" + packages: + <<: *fedora_packages + repo_distro: fedora/40 + test: + ebpf-core: true - <<: *fedora version: "39" packages: @@ -303,6 +310,11 @@ include: - arm64 test: ebpf-core: true + - <<: *ubuntu + version: "24.10" + packages: + <<: *ubuntu_packages + repo_distro: ubuntu/oracular - <<: *ubuntu version: "22.04" packages: diff --git a/.github/dockerfiles/Dockerfile.build_test b/.github/dockerfiles/Dockerfile.build_test index c275d61d6..80d97319d 100644 --- a/.github/dockerfiles/Dockerfile.build_test +++ b/.github/dockerfiles/Dockerfile.build_test @@ -1,4 +1,7 @@ -ARG BASE +# The default value is overridden in every Dockerfile usage, but adding it here helps avoid issues with +# CI checks that require a non-empty or valid base image name. See more details here: +# https://docs.docker.com/go/dockerfile/rule/invalid-default-arg-in-from/ +ARG BASE="netdata" FROM ${BASE} diff --git a/.github/dockerfiles/Dockerfile.clang b/.github/dockerfiles/Dockerfile.clang index 869254198..4864fa0d4 100644 --- a/.github/dockerfiles/Dockerfile.clang +++ b/.github/dockerfiles/Dockerfile.clang @@ -16,4 +16,4 @@ WORKDIR /netdata COPY . . # Build Netdata -RUN ./netdata-installer.sh --dont-wait --dont-start-it --disable-go --require-cloud +RUN ./netdata-installer.sh --dont-wait --dont-start-it --disable-go diff --git a/.github/labeler.yml b/.github/labeler.yml index 36d18e74e..fe3e3b39f 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -77,12 +77,6 @@ area/collectors: - src/collectors/** - src/go/** -collectors/plugins.d: - - any: - - changed-files: - - any-glob-to-any-file: - - src/collectors/plugins.d/** - collectors/apps: - any: - changed-files: @@ -289,3 +283,9 @@ area/web: - changed-files: - any-glob-to-any-file: - src/web/** + +area/plugins.d: + - any: + - changed-files: + - any-glob-to-any-file: + - src/plugins.d/** diff --git a/.github/scripts/check-updater.sh b/.github/scripts/check-updater.sh index 3df0c9de4..73de6d587 100755 --- a/.github/scripts/check-updater.sh +++ b/.github/scripts/check-updater.sh @@ -9,6 +9,7 @@ check_successful_update() { ( netdata_version=$(netdata -v | awk '{print $2}') updater_version=$(cat packaging/version) + echo "Version: netdata '$netdata_version', updater '$updater_version'" if [ "$netdata_version" = "$updater_version" ]; then echo "Update successful!" else diff --git a/.github/scripts/prepare-release-base.sh b/.github/scripts/prepare-release-base.sh index 85bcb7a31..c89cc72b6 100755 --- a/.github/scripts/prepare-release-base.sh +++ b/.github/scripts/prepare-release-base.sh @@ -178,6 +178,7 @@ elif [ "${EVENT_TYPE}" = 'major' ] && [ "${EVENT_VERSION}" != "nightly" ]; then echo "ref=${EVENT_VERSION}" >> "${GITHUB_OUTPUT}" echo "type=release" >> "${GITHUB_OUTPUT}" echo "branch=master" >> "${GITHUB_OUTPUT}" + echo "new-branch=${branch_name}" >> "${GITHUB_OUTPUT}" echo "version=$(tr -d 'v' < packaging/version)" >> "${GITHUB_OUTPUT}" else echo '::error::Unrecognized release type or invalid version.' diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3a253c0cc..6add58697 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -35,7 +35,7 @@ jobs: submodules: recursive - name: Check source files id: check-source-files - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: since_last_remote_commit: ${{ github.event_name != 'pull_request' }} files: | @@ -54,7 +54,7 @@ jobs: **/*.md - name: Check build files id: check-build-files - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: since_last_remote_commit: ${{ github.event_name != 'pull_request' }} files: | @@ -275,12 +275,119 @@ jobs: && needs.file-check.outputs.run == 'true' }} + windows-build: # Test building on Windows + name: Test building on Windows + runs-on: windows-latest + needs: + - file-check + steps: + - name: Skip Check + id: skip + if: needs.file-check.outputs.run != 'true' + run: Write-Output "SKIPPED" + - name: Checkout + uses: actions/checkout@v4 + id: checkout + if: needs.file-check.outputs.run == 'true' + with: + submodules: recursive + lfs: true + - name: Set Up Go + id: golang + if: needs.file-check.outputs.run == 'true' + uses: actions/setup-go@v5 + with: + go-version: "^1.23" + - name: Set Up Dependencies + id: deps + if: needs.file-check.outputs.run == 'true' + run: ./packaging/windows/install-dependencies.ps1 + - name: Build Netdata + id: build + if: needs.file-check.outputs.run == 'true' + env: + BUILD_DIR: ${{ github.workspace }}\build + run: ./packaging/windows/build.ps1 + - name: Sign Agent Code + id: sign-agent + if: needs.file-check.outputs.run == 'true' && github.event_name != 'pull_request' + uses: azure/trusted-signing-action@v0.5.0 + with: + azure-tenant-id: ${{ secrets.CODE_SIGNING_TENNANT_ID }} + azure-client-id: ${{ secrets.CODE_SIGNING_CLIENT_ID }} + azure-client-secret: ${{ secrets.CODE_SIGNING_CLIENT_SECRET }} + endpoint: "https://eus.codesigning.azure.net/" + trusted-signing-account-name: Netdata + certificate-profile-name: Netdata + files-folder: ${{ github.workspace }}\build + files-folder-filter: exe,dll + files-folder-recurse: true + file-digest: SHA256 + timestamp-rfc3161: "http://timestamp.acs.microsoft.com" + timestamp-digest: SHA256 + - name: Package Netdata + id: package + if: needs.file-check.outputs.run == 'true' + env: + BUILD_DIR: ${{ github.workspace }}\build + run: ./packaging/windows/package.ps1 + - name: Sign Installer + id: sign-installer + if: needs.file-check.outputs.run == 'true' && github.event_name != 'pull_request' + uses: azure/trusted-signing-action@v0.5.0 + with: + azure-tenant-id: ${{ secrets.CODE_SIGNING_TENNANT_ID }} + azure-client-id: ${{ secrets.CODE_SIGNING_CLIENT_ID }} + azure-client-secret: ${{ secrets.CODE_SIGNING_CLIENT_SECRET }} + endpoint: "https://eus.codesigning.azure.net/" + trusted-signing-account-name: Netdata + certificate-profile-name: Netdata + files-folder: ${{ github.workspace }}\packaging\windows + files-folder-filter: msi + file-digest: SHA256 + timestamp-rfc3161: "http://timestamp.acs.microsoft.com" + timestamp-digest: SHA256 + - name: Upload Installer + id: upload + uses: actions/upload-artifact@v4.4.2 + with: + name: windows-x86_64-installer + path: packaging\windows\netdata*.msi + retention-days: 30 + - name: Failure Notification + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Windows build failed:' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: |- + ${{ github.repository }}: Updater checks for ${{ matrix.distro }} failed. + Checkout: ${{ steps.checkout.outcome }} + Set Up Dependencies: ${{ steps.deps.outcome }} + Build Netdata: ${{ steps.build.outcome }} + Sign Agent Code: ${{ steps.sign-agent.outcome }} + Package Netdata: ${{ steps.package.outcome }} + Sign Installer: ${{ steps.sign-installer.outcome }} + Upload Installer: ${{ steps.upload.outcome }} + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} + if: >- + ${{ + failure() + && startsWith(github.ref, 'refs/heads/master') + && github.event_name != 'pull_request' + && github.repository == 'netdata/netdata' + && needs.file-check.outputs.run == 'true' + }} + prepare-upload: # Consolidate the artifacts for uploading or releasing. name: Prepare Artifacts runs-on: ubuntu-latest needs: - build-dist - build-static + - windows-build - file-check steps: - name: Skip Check @@ -307,6 +414,18 @@ jobs: merge-multiple: true attempt_limit: 3 attempt_delay: 2000 + - name: Retrieve Windows Artifacts + id: fetch-windows + if: needs.file-check.outputs.run == 'true' + uses: Wandalen/wretry.action@v3 + with: + action: actions/download-artifact@v4 + with: | + pattern: windows-*-installer + path: dist-artifacts + merge-multiple: true + attempt_limit: 3 + attempt_delay: 2000 - name: Prepare Artifacts id: consolidate if: needs.file-check.outputs.run == 'true' @@ -338,7 +457,8 @@ jobs: ${{ github.repository }}: Failed to prepare release artifacts for upload. Checkout: ${{ steps.checkout.outcome }} Prepare environment: ${{ steps.prepare.outcome }} - Fetch dist tarball: ${{ steps.fetch-dist.outcome }} + Fetch dist artifacts: ${{ steps.fetch-dist.outcome }} + Fetch Windows installers: ${{ steps.fetch-windows.outcome }} Consolidate artifacts: ${{ steps.consolidate.outcome }} Store: ${{ steps.store.outcome }} SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} @@ -489,68 +609,12 @@ jobs: && needs.file-check.outputs.run == 'true' }} - upload-nightly: # Upload the nightly build artifacts to GCS. - name: Upload Nightly Artifacts - runs-on: ubuntu-latest - if: github.event_name == 'workflow_dispatch' && github.event.inputs.type == 'nightly' && github.repository == 'netdata/netdata' - needs: - - artifact-verification-dist - - artifact-verification-static - steps: - - name: Retrieve Artifacts - id: fetch - uses: Wandalen/wretry.action@v3 - with: - action: actions/download-artifact@v4 - with: | - name: final-artifacts - path: final-artifacts - attempt_limit: 3 - attempt_delay: 2000 - - name: Authenticate to GCS - id: gcs-auth - uses: google-github-actions/auth@v2 - with: - project_id: ${{ secrets.GCP_NIGHTLY_STORAGE_PROJECT }} - credentials_json: ${{ secrets.GCS_STORAGE_SERVICE_KEY_JSON }} - - name: Setup GCS - id: gcs-setup - uses: google-github-actions/setup-gcloud@v2.1.1 - - name: Upload Artifacts - id: upload - uses: google-github-actions/upload-cloud-storage@v2.1.2 - with: - destination: ${{ secrets.GCP_NIGHTLY_STORAGE_BUCKET }} - gzip: false - path: ./final-artifacts/latest-version.txt - parent: false - - name: Failure Notification - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_COLOR: 'danger' - SLACK_FOOTER: '' - SLACK_ICON_EMOJI: ':github-actions:' - SLACK_TITLE: 'Failed to upload nightly release artifacts:' - SLACK_USERNAME: 'GitHub Actions' - SLACK_MESSAGE: |- - ${{ github.repository }}: Failed to upload nightly release artifacts. - Fetch artifacts: ${{ steps.fetch.outcome }} - Authenticatie GCS: ${{ steps.gcs-auth.outcome }} - Setup GCS: ${{ steps.gcs-setup.outcome }} - Upload artifacts: ${{ steps.upload.outcome }} - SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} - if: >- - ${{ - failure() - && startsWith(github.ref, 'refs/heads/master') - && github.event_name != 'pull_request' - }} - create-nightly: # Create a nightly build release in netdata/netdata-nightlies name: Create Nightly Release runs-on: ubuntu-latest if: github.event_name == 'workflow_dispatch' && github.event.inputs.type == 'nightly' && github.repository == 'netdata/netdata' needs: + - prepare-upload - artifact-verification-dist - artifact-verification-static steps: @@ -589,7 +653,7 @@ jobs: with: allowUpdates: false artifactErrorsFailBuild: true - artifacts: 'final-artifacts/sha256sums.txt,final-artifacts/netdata-*.tar.gz,final-artifacts/netdata-*.gz.run,final-artifacts/integrations.js' + artifacts: 'final-artifacts/sha256sums.txt,final-artifacts/netdata-*.tar.gz,final-artifacts/netdata-*.gz.run,final-artifacts/netdata-*.msi,final-artifacts/integrations.js' owner: netdata repo: netdata-nightlies body: Netdata nightly build for ${{ steps.version.outputs.date }}. @@ -707,7 +771,7 @@ jobs: with: allowUpdates: false artifactErrorsFailBuild: true - artifacts: 'final-artifacts/sha256sums.txt,final-artifacts/netdata-*.tar.gz,final-artifacts/netdata-*.gz.run,final-artifacts/integrations.js' + artifacts: 'final-artifacts/sha256sums.txt,final-artifacts/netdata-*.tar.gz,final-artifacts/netdata-*.gz.run,final-artifacts/netdata-*.msi,final-artifacts/integrations.js' draft: true tag: ${{ needs.normalize-tag.outputs.tag }} token: ${{ secrets.NETDATABOT_GITHUB_TOKEN }} @@ -930,24 +994,18 @@ jobs: id: load if: needs.file-check.outputs.run == 'true' run: docker load --input image.tar - - name: netdata-installer on ${{ matrix.distro }}, disable cloud - id: build-no-cloud - if: needs.file-check.outputs.run == 'true' - run: | - docker run --security-opt seccomp=unconfined -w /netdata test:${{ matrix.artifact_key }} \ - /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --disable-cloud --one-time-build ${{ needs.file-check.outputs.skip-go }}' - name: netdata-installer on ${{ matrix.distro }}, require cloud id: build-cloud if: needs.file-check.outputs.run == 'true' run: | docker run --security-opt seccomp=unconfined -w /netdata test:${{ matrix.artifact_key }} \ - /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --require-cloud --one-time-build ${{ needs.file-check.outputs.skip-go }}' + /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --one-time-build ${{ needs.file-check.outputs.skip-go }}' - name: netdata-installer on ${{ matrix.distro }}, require cloud, no JSON-C id: build-no-jsonc if: matrix.jsonc_removal != '' && needs.file-check.outputs.run == 'true' run: | docker run --security-opt seccomp=unconfined -w /netdata test:${{ matrix.artifact_key }} \ - /bin/sh -c '/rmjsonc.sh && ./netdata-installer.sh --dont-wait --dont-start-it --require-cloud --one-time-build ${{ needs.file-check.outputs.skip-go }}' + /bin/sh -c '/rmjsonc.sh && ./netdata-installer.sh --dont-wait --dont-start-it --one-time-build ${{ needs.file-check.outputs.skip-go }}' - name: Failure Notification uses: rtCamp/action-slack-notify@v2 env: @@ -961,8 +1019,7 @@ jobs: Checkout: ${{ steps.checkout.outcome }} Fetch test environment: ${{ steps.fetch.outcome }} Load test environment: ${{ steps.load.outcome }} - netdata-installer, disable cloud: ${{ steps.build-no-cloud.outcome }} - netdata-installer, require cloud: ${{ steps.build-cloud.outcome }} + netdata-installer: ${{ steps.build-cloud.outcome }} netdata-installer, no JSON-C: ${{ steps.build-no-jsonc.outcome }} SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} if: >- @@ -985,12 +1042,12 @@ jobs: max-parallel: 8 matrix: include: - - name: macos-12 - runner: macos-12 - name: macos-13 runner: macos-13 - name: macos-14-M1 runner: macos-14 + - name: macos-15-M1 + runner: macos-15 steps: - name: Skip Check id: skip @@ -1015,7 +1072,7 @@ jobs: id: build-source if: needs.file-check.outputs.run == 'true' run: | - sudo bash ./netdata-installer.sh --install-no-prefix /usr/local/netdata --dont-wait --dont-start-it --require-cloud --one-time-build + sudo bash ./netdata-installer.sh --install-no-prefix /usr/local/netdata --dont-wait --dont-start-it --one-time-build - name: Test Agent start up id: test-agent if: needs.file-check.outputs.run == 'true' @@ -1046,112 +1103,6 @@ jobs: && github.repository == 'netdata/netdata' }} - windows-build: # Test building on Windows - name: Test building on Windows - runs-on: windows-latest - if: github.event_name != 'workflow_dispatch' - needs: - - file-check - steps: - - name: Skip Check - id: skip - if: needs.file-check.outputs.run != 'true' - run: Write-Output "SKIPPED" - - name: Checkout - uses: actions/checkout@v4 - id: checkout - if: needs.file-check.outputs.run == 'true' - with: - submodules: recursive - lfs: true - - name: Set Up Go - id: golang - if: needs.file-check.outputs.run == 'true' - uses: actions/setup-go@v5 - with: - go-version: "^1.22" - - name: Set Up Dependencies - id: deps - if: needs.file-check.outputs.run == 'true' - run: ./packaging/windows/install-dependencies.ps1 - - name: Build Netdata - id: build - if: needs.file-check.outputs.run == 'true' - env: - BUILD_DIR: ${{ github.workspace }}\build - run: ./packaging/windows/build.ps1 - - name: Sign Agent Code - id: sign-agent - if: needs.file-check.outputs.run == 'true' && github.event_name != 'pull_request' - uses: azure/trusted-signing-action@v0.4.0 - with: - azure-tenant-id: ${{ secrets.CODE_SIGNING_TENNANT_ID }} - azure-client-id: ${{ secrets.CODE_SIGNING_CLIENT_ID }} - azure-client-secret: ${{ secrets.CODE_SIGNING_CLIENT_SECRET }} - endpoint: "https://eus.codesigning.azure.net/" - trusted-signing-account-name: Netdata - certificate-profile-name: Netdata - files-folder: ${{ github.workspace }}\build - files-folder-filter: exe,dll - files-folder-recurse: true - file-digest: SHA256 - timestamp-rfc3161: "http://timestamp.acs.microsoft.com" - timestamp-digest: SHA256 - - name: Package Netdata - id: package - if: needs.file-check.outputs.run == 'true' - env: - BUILD_DIR: ${{ github.workspace }}\build - run: ./packaging/windows/package.ps1 - - name: Sign Installer - id: sign-installer - if: needs.file-check.outputs.run == 'true' && github.event_name != 'pull_request' - uses: azure/trusted-signing-action@v0.4.0 - with: - azure-tenant-id: ${{ secrets.CODE_SIGNING_TENNANT_ID }} - azure-client-id: ${{ secrets.CODE_SIGNING_CLIENT_ID }} - azure-client-secret: ${{ secrets.CODE_SIGNING_CLIENT_SECRET }} - endpoint: "https://eus.codesigning.azure.net/" - trusted-signing-account-name: Netdata - certificate-profile-name: Netdata - files: ${{ github.workspace }}\packaging\windows\netdata-installer.exe - file-digest: SHA256 - timestamp-rfc3161: "http://timestamp.acs.microsoft.com" - timestamp-digest: SHA256 - - name: Upload Installer - id: upload - uses: actions/upload-artifact@v4.4.2 - with: - name: windows-x86_64-installer - path: packaging\windows\netdata-installer.exe - retention-days: 30 - - name: Failure Notification - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_COLOR: 'danger' - SLACK_FOOTER: '' - SLACK_ICON_EMOJI: ':github-actions:' - SLACK_TITLE: 'Windows build failed:' - SLACK_USERNAME: 'GitHub Actions' - SLACK_MESSAGE: |- - ${{ github.repository }}: Updater checks for ${{ matrix.distro }} failed. - Checkout: ${{ steps.checkout.outcome }} - Set Up Dependencies: ${{ steps.deps.outcome }} - Build Netdata: ${{ steps.build.outcome }} - Sign Agent Code: ${{ steps.sign-agent.outcome }} - Package Netdata: ${{ steps.package.outcome }} - Sign Installer: ${{ steps.sign-installer.outcome }} - Upload Installer: ${{ steps.upload.outcome }} - SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} - if: >- - ${{ - failure() - && startsWith(github.ref, 'refs/heads/master') - && github.event_name != 'pull_request' - && github.repository == 'netdata/netdata' - && needs.file-check.outputs.run == 'true' - }} - updater-check: # Test the generated dist archive using the updater code. name: Test Generated Distfile and Updater Code runs-on: ubuntu-latest diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index b33aa6ff1..82567a54e 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -25,7 +25,7 @@ jobs: submodules: recursive - name: Check source files id: check-source-files - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: since_last_remote_commit: ${{ github.event_name != 'pull_request' }} files: | @@ -44,7 +44,7 @@ jobs: **/*.md - name: Check build files id: check-build-files - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: since_last_remote_commit: ${{ github.event_name != 'pull_request' }} files: | diff --git a/.github/workflows/dashboard-pr.yml b/.github/workflows/dashboard-pr.yml index 418a8b8e6..b72c0fc6c 100644 --- a/.github/workflows/dashboard-pr.yml +++ b/.github/workflows/dashboard-pr.yml @@ -28,7 +28,7 @@ jobs: src/web/gui/bundle_dashboard_v1.py ${{ github.event.inputs.dashboard_version }} - name: Create Pull Request id: pr - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: title: 'Update dashboard to version ${{ github.event.inputs.dashboard_version }}.' body: 'See https://github.com/netdata/dashboard/releases/tag/${{ github.event.inputs.dashboard_version }} for changes.' diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index b1448a0de..d98903073 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -43,7 +43,7 @@ jobs: - name: Check source files id: check-source-files if: github.event_name != 'workflow_dispatch' - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: since_last_remote_commit: ${{ github.event_name != 'pull_request' }} files: | @@ -63,7 +63,7 @@ jobs: - name: Check build system files id: check-build-files if: github.event_name != 'workflow_dispatch' - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: since_last_remote_commit: ${{ github.event_name != 'pull_request' }} files: | @@ -178,7 +178,7 @@ jobs: - name: Upload Cache id: upload-cache if: github.repository == 'netdata/netdata' && needs.file-check.outputs.run == 'true' && github.event_name == 'workflow_dispatch' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v4.4.2 with: name: cache-${{ steps.artifact-name.outputs.platform }} path: /tmp/build-cache/* @@ -296,7 +296,7 @@ jobs: - name: Upload digest id: upload-digest if: github.repository == 'netdata/netdata' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v4.4.2 with: name: docker-digests-${{ steps.artifact-name.outputs.platform }} path: /tmp/digests/* @@ -451,7 +451,7 @@ jobs: - name: Upload digest id: upload-digest if: github.repository == 'netdata/netdata' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v4.4.2 with: name: quay-digests-${{ steps.artifact-name.outputs.platform }} path: /tmp/digests/* @@ -607,7 +607,7 @@ jobs: - name: Upload digest id: upload-digest if: github.repository == 'netdata/netdata' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v4.4.2 with: name: ghcr-digests-${{ steps.artifact-name.outputs.platform }} path: /tmp/digests/* diff --git a/.github/workflows/generate-integrations.yml b/.github/workflows/generate-integrations.yml index f6e930899..4acca5887 100644 --- a/.github/workflows/generate-integrations.yml +++ b/.github/workflows/generate-integrations.yml @@ -57,7 +57,7 @@ jobs: run: rm -rf go.d.plugin virtualenv - name: Create PR id: create-pr - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: token: ${{ secrets.NETDATABOT_GITHUB_TOKEN }} commit-message: Regenerate integrations.js diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 93596454b..7e94dc012 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -24,7 +24,7 @@ jobs: submodules: recursive - name: Check files id: check-files - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: since_last_remote_commit: ${{ github.event_name != 'pull_request' }} files: | diff --git a/.github/workflows/packaging.yml b/.github/workflows/packaging.yml index 7cf472707..3bcde17f6 100644 --- a/.github/workflows/packaging.yml +++ b/.github/workflows/packaging.yml @@ -41,7 +41,7 @@ jobs: submodules: recursive - name: Check files id: check-files - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: since_last_remote_commit: ${{ github.event_name != 'pull_request' }} files: | diff --git a/.gitignore b/.gitignore index 05b503bb3..d4b1c020c 100644 --- a/.gitignore +++ b/.gitignore @@ -88,7 +88,6 @@ system/systemd/netdata-updater.service !system/systemd/netdata.service.*.in src/health/notifications/alarm-notify.sh -claim/netdata-claim.sh src/collectors/cgroups.plugin/cgroup-name.sh src/collectors/cgroups.plugin/cgroup-network-helper.sh src/collectors/tc.plugin/tc-qos-helper.sh @@ -180,6 +179,7 @@ Session.*.vim # Special exceptions !packaging/repoconfig/Makefile +packaging/windows/resources/*.manifest # Jupyter notebook checkpoints .ipynb_checkpoints @@ -196,5 +196,5 @@ build/ src/go/plugin/go.d/bin/ src/go/plugin/go.d/vendor -# ignore nsis installer -packaging/utils/netdata-installer.exe +# ignore files used with msi installer +packaging/windows/*.msi diff --git a/CHANGELOG.md b/CHANGELOG.md index ad2783f5d..a764f6896 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,181 @@ # Changelog -## [v1.47.5](https://github.com/netdata/netdata/tree/v1.47.5) (2024-10-24) +## [v2.0.3](https://github.com/netdata/netdata/tree/v2.0.3) (2024-11-22) -[Full Changelog](https://github.com/netdata/netdata/compare/v1.47.4...v1.47.5) +[Full Changelog](https://github.com/netdata/netdata/compare/v2.0.2...v2.0.3) + +**Merged pull requests:** + +- Register service to delay start [\#19063](https://github.com/netdata/netdata/pull/19063) ([stelfrag](https://github.com/stelfrag)) +- add links to mssql perflib object docs [\#19062](https://github.com/netdata/netdata/pull/19062) ([ilyam8](https://github.com/ilyam8)) +- build\(deps\): bump k8s.io/client-go from 0.31.2 to 0.31.3 in /src/go [\#19059](https://github.com/netdata/netdata/pull/19059) ([dependabot[bot]](https://github.com/apps/dependabot)) +- build\(deps\): bump github.com/vmware/govmomi from 0.46.1 to 0.46.2 in /src/go [\#19058](https://github.com/netdata/netdata/pull/19058) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Windows doc updates [\#19054](https://github.com/netdata/netdata/pull/19054) ([Ancairon](https://github.com/Ancairon)) +- Securing Agents section docs cleanup [\#19053](https://github.com/netdata/netdata/pull/19053) ([Ancairon](https://github.com/Ancairon)) +- fix\(go.d/pkg/web\): correct close idle connections [\#19052](https://github.com/netdata/netdata/pull/19052) ([ilyam8](https://github.com/ilyam8)) +- Update documentation about our native package repos. [\#19049](https://github.com/netdata/netdata/pull/19049) ([Ferroin](https://github.com/Ferroin)) +- Regenerate integrations.js [\#19048](https://github.com/netdata/netdata/pull/19048) ([netdatabot](https://github.com/netdatabot)) +- feat\(go.d/pkg/web\): add "force\_http2" option [\#19047](https://github.com/netdata/netdata/pull/19047) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#19045](https://github.com/netdata/netdata/pull/19045) ([netdatabot](https://github.com/netdatabot)) +- Capitalize the word "Agent" [\#19044](https://github.com/netdata/netdata/pull/19044) ([Ancairon](https://github.com/Ancairon)) +- Capitalize the word "cloud" [\#19043](https://github.com/netdata/netdata/pull/19043) ([Ancairon](https://github.com/Ancairon)) +- Add a special version number to bypass alert snapshots [\#19042](https://github.com/netdata/netdata/pull/19042) ([stelfrag](https://github.com/stelfrag)) +- Add Custom Actions \(Installer\) [\#19041](https://github.com/netdata/netdata/pull/19041) ([thiagoftsm](https://github.com/thiagoftsm)) +- fix\(go.d/nvidia\_smi\): disable loop mode on Win [\#19040](https://github.com/netdata/netdata/pull/19040) ([ilyam8](https://github.com/ilyam8)) +- fix\(go.d/nvidia\_smi\): disable loop mode by default on Win [\#19039](https://github.com/netdata/netdata/pull/19039) ([ilyam8](https://github.com/ilyam8)) +- improvement\(go.d.plugin\): terminate on QUIT command [\#19038](https://github.com/netdata/netdata/pull/19038) ([ilyam8](https://github.com/ilyam8)) +- fix\(windows/netframework\): dont sanitize proc name for labels [\#19036](https://github.com/netdata/netdata/pull/19036) ([ilyam8](https://github.com/ilyam8)) +- Fix MSSQL algorithm \(Windows.plugin\) [\#19035](https://github.com/netdata/netdata/pull/19035) ([thiagoftsm](https://github.com/thiagoftsm)) +- --dev option to installer [\#19034](https://github.com/netdata/netdata/pull/19034) ([ktsaou](https://github.com/ktsaou)) +- add `shutdown` keyword to ensure graceful service termination on FreeBSD [\#19033](https://github.com/netdata/netdata/pull/19033) ([ilyam8](https://github.com/ilyam8)) +- fix: ensure correct startup order for Netdata service on FreeBSD [\#19032](https://github.com/netdata/netdata/pull/19032) ([ilyam8](https://github.com/ilyam8)) +- build\(deps\): bump github.com/gorcon/rcon from 1.3.5 to 1.4.0 in /src/go [\#19031](https://github.com/netdata/netdata/pull/19031) ([dependabot[bot]](https://github.com/apps/dependabot)) +- build\(deps\): bump github.com/vmware/govmomi from 0.46.0 to 0.46.1 in /src/go [\#19030](https://github.com/netdata/netdata/pull/19030) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Regenerate integrations.js [\#19029](https://github.com/netdata/netdata/pull/19029) ([netdatabot](https://github.com/netdatabot)) +- improvement\(windows/iis\): add requests by type chart [\#19028](https://github.com/netdata/netdata/pull/19028) ([ilyam8](https://github.com/ilyam8)) +- fix\(windows/iis\): dont sanitize site name for labels [\#19027](https://github.com/netdata/netdata/pull/19027) ([ilyam8](https://github.com/ilyam8)) +- chore\(go.d.plugin\): set nooplogger for automaxprocs [\#19026](https://github.com/netdata/netdata/pull/19026) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#19025](https://github.com/netdata/netdata/pull/19025) ([netdatabot](https://github.com/netdatabot)) +- docs\(go.d/windows\): remove references to old MSI [\#19024](https://github.com/netdata/netdata/pull/19024) ([ilyam8](https://github.com/ilyam8)) +- improvement\(go.d.plugin\): automatically set GOMAXPROCS [\#19023](https://github.com/netdata/netdata/pull/19023) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#19022](https://github.com/netdata/netdata/pull/19022) ([netdatabot](https://github.com/netdatabot)) +- docs: just iis [\#19021](https://github.com/netdata/netdata/pull/19021) ([ilyam8](https://github.com/ilyam8)) +- chore\(windows.plugin\): format win collectors code [\#19019](https://github.com/netdata/netdata/pull/19019) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#19018](https://github.com/netdata/netdata/pull/19018) ([netdatabot](https://github.com/netdatabot)) +- fix\(go.d/ping\): fix "interface" option [\#19016](https://github.com/netdata/netdata/pull/19016) ([ilyam8](https://github.com/ilyam8)) +- Remove MSI test [\#19015](https://github.com/netdata/netdata/pull/19015) ([thiagoftsm](https://github.com/thiagoftsm)) +- fix has\_receiver condition in rrdhost\_status\(\) [\#19014](https://github.com/netdata/netdata/pull/19014) ([ktsaou](https://github.com/ktsaou)) +- backport of fixes from balance-parents [\#19012](https://github.com/netdata/netdata/pull/19012) ([ktsaou](https://github.com/ktsaou)) +- add missing spinlock unlocks on containers [\#19011](https://github.com/netdata/netdata/pull/19011) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#19010](https://github.com/netdata/netdata/pull/19010) ([netdatabot](https://github.com/netdatabot)) +- docs\(go.d/windows\): add deprecation notice [\#19009](https://github.com/netdata/netdata/pull/19009) ([ilyam8](https://github.com/ilyam8)) +- fix\(go.d/dyncfg\): remove additionalProperties [\#19006](https://github.com/netdata/netdata/pull/19006) ([ilyam8](https://github.com/ilyam8)) +- Set expires header when serving files [\#19005](https://github.com/netdata/netdata/pull/19005) ([stelfrag](https://github.com/stelfrag)) +- fix\(go.d/x509check\): correct check revocation code [\#19004](https://github.com/netdata/netdata/pull/19004) ([ilyam8](https://github.com/ilyam8)) +- fix\(go.d/dyncfg\): remove additionalProperties check [\#19003](https://github.com/netdata/netdata/pull/19003) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#19002](https://github.com/netdata/netdata/pull/19002) ([netdatabot](https://github.com/netdatabot)) +- improvement\(go.d/x509check\): support checking full chain expiry time [\#19001](https://github.com/netdata/netdata/pull/19001) ([ilyam8](https://github.com/ilyam8)) +- fix: exclude volumes w/o drive letter from disk\_space\_usage\_alert [\#19000](https://github.com/netdata/netdata/pull/19000) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#18997](https://github.com/netdata/netdata/pull/18997) ([netdatabot](https://github.com/netdatabot)) +- docs: win deploy remove `./` [\#18996](https://github.com/netdata/netdata/pull/18996) ([ilyam8](https://github.com/ilyam8)) +- docs: single line win deploy [\#18994](https://github.com/netdata/netdata/pull/18994) ([ilyam8](https://github.com/ilyam8)) +- Add SQL Express Metrics [\#18992](https://github.com/netdata/netdata/pull/18992) ([thiagoftsm](https://github.com/thiagoftsm)) +- Do not intentionally abort on non-0 exit code. [\#18991](https://github.com/netdata/netdata/pull/18991) ([vkalintiris](https://github.com/vkalintiris)) +- update plugin\_data\_collection\_status alert summary/info [\#18990](https://github.com/netdata/netdata/pull/18990) ([ilyam8](https://github.com/ilyam8)) +- health: enable go.d data collection job status alert [\#18989](https://github.com/netdata/netdata/pull/18989) ([ilyam8](https://github.com/ilyam8)) +- update GH bug report [\#18988](https://github.com/netdata/netdata/pull/18988) ([ilyam8](https://github.com/ilyam8)) +- chore\(go.d.plugin\): fix duplicate boolToInt [\#18987](https://github.com/netdata/netdata/pull/18987) ([ilyam8](https://github.com/ilyam8)) +- build\(deps\): bump golang.org/x/net from 0.30.0 to 0.31.0 in /src/go [\#18986](https://github.com/netdata/netdata/pull/18986) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Improve Installer \(Part II\) [\#18983](https://github.com/netdata/netdata/pull/18983) ([thiagoftsm](https://github.com/thiagoftsm)) +- improvement\(go.d.plugin\): add data collection status chart [\#18981](https://github.com/netdata/netdata/pull/18981) ([ilyam8](https://github.com/ilyam8)) +- ci: fix win jobs [\#18979](https://github.com/netdata/netdata/pull/18979) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#18977](https://github.com/netdata/netdata/pull/18977) ([netdatabot](https://github.com/netdatabot)) +- improvement\(go.d/rabbitmq\): add queue status and net partitions [\#18976](https://github.com/netdata/netdata/pull/18976) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#18973](https://github.com/netdata/netdata/pull/18973) ([netdatabot](https://github.com/netdatabot)) +- add rabbitmq alerts [\#18972](https://github.com/netdata/netdata/pull/18972) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#18971](https://github.com/netdata/netdata/pull/18971) ([netdatabot](https://github.com/netdatabot)) +- fix\(go.d/snmp\): don't return error if no sysName [\#18970](https://github.com/netdata/netdata/pull/18970) ([ilyam8](https://github.com/ilyam8)) +- build\(deps\): bump golang.org/x/text from 0.19.0 to 0.20.0 in /src/go [\#18968](https://github.com/netdata/netdata/pull/18968) ([dependabot[bot]](https://github.com/apps/dependabot)) +- go mod tidy [\#18967](https://github.com/netdata/netdata/pull/18967) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#18966](https://github.com/netdata/netdata/pull/18966) ([netdatabot](https://github.com/netdatabot)) +- feat\(go.d/rabbitmq\): add cluster support [\#18965](https://github.com/netdata/netdata/pull/18965) ([ilyam8](https://github.com/ilyam8)) +- Tidy up CI to improve overall run times. [\#18957](https://github.com/netdata/netdata/pull/18957) ([Ferroin](https://github.com/Ferroin)) +- added /api/v3/stream\_path [\#18943](https://github.com/netdata/netdata/pull/18943) ([ktsaou](https://github.com/ktsaou)) +- Update Windows Documentation [\#18928](https://github.com/netdata/netdata/pull/18928) ([thiagoftsm](https://github.com/thiagoftsm)) +- IIS Metadata [\#18765](https://github.com/netdata/netdata/pull/18765) ([thiagoftsm](https://github.com/thiagoftsm)) +- Bump github.com/Wing924/ltsv from 0.3.1 to 0.4.0 in /src/go [\#18636](https://github.com/netdata/netdata/pull/18636) ([dependabot[bot]](https://github.com/apps/dependabot)) + +## [v2.0.2](https://github.com/netdata/netdata/tree/v2.0.2) (2024-11-21) + +[Full Changelog](https://github.com/netdata/netdata/compare/v2.0.1...v2.0.2) + +## [v2.0.1](https://github.com/netdata/netdata/tree/v2.0.1) (2024-11-14) + +[Full Changelog](https://github.com/netdata/netdata/compare/v2.0.0...v2.0.1) + +## [v2.0.0](https://github.com/netdata/netdata/tree/v2.0.0) (2024-11-07) + +[Full Changelog](https://github.com/netdata/netdata/compare/v1.47.5...v2.0.0) **Merged pull requests:** +- build\(deps\): update go toolchain to v1.23.3 [\#18961](https://github.com/netdata/netdata/pull/18961) ([ilyam8](https://github.com/ilyam8)) +- Adjust max possible extent size [\#18960](https://github.com/netdata/netdata/pull/18960) ([stelfrag](https://github.com/stelfrag)) +- build\(deps\): bump github.com/vmware/govmomi from 0.45.1 to 0.46.0 in /src/go [\#18959](https://github.com/netdata/netdata/pull/18959) ([dependabot[bot]](https://github.com/apps/dependabot)) +- chore\(go.d.plugin\): remove duplicate logging in init/check [\#18955](https://github.com/netdata/netdata/pull/18955) ([ilyam8](https://github.com/ilyam8)) +- Update README.md [\#18954](https://github.com/netdata/netdata/pull/18954) ([Ancairon](https://github.com/Ancairon)) +- Fix br elements [\#18952](https://github.com/netdata/netdata/pull/18952) ([Ancairon](https://github.com/Ancairon)) +- Precompile Python code on Windows. [\#18951](https://github.com/netdata/netdata/pull/18951) ([Ferroin](https://github.com/Ferroin)) +- docs: simplify go.d.plugin readme [\#18949](https://github.com/netdata/netdata/pull/18949) ([ilyam8](https://github.com/ilyam8)) +- fix memory leak when using libcurl [\#18947](https://github.com/netdata/netdata/pull/18947) ([ktsaou](https://github.com/ktsaou)) +- docs: add "Plugin Privileges" section [\#18946](https://github.com/netdata/netdata/pull/18946) ([ilyam8](https://github.com/ilyam8)) +- docs: fix Caddy docker compose example [\#18944](https://github.com/netdata/netdata/pull/18944) ([ilyam8](https://github.com/ilyam8)) +- docs: grammar/format fixes to `docs/netdata-agent/` [\#18942](https://github.com/netdata/netdata/pull/18942) ([ilyam8](https://github.com/ilyam8)) +- Streaming re-organization [\#18941](https://github.com/netdata/netdata/pull/18941) ([ktsaou](https://github.com/ktsaou)) +- random numbers No 3 [\#18940](https://github.com/netdata/netdata/pull/18940) ([ktsaou](https://github.com/ktsaou)) +- Random numbers improvements [\#18939](https://github.com/netdata/netdata/pull/18939) ([ktsaou](https://github.com/ktsaou)) +- fix\(go.d/prometheus\): correct unsupported protocol scheme "file" error [\#18938](https://github.com/netdata/netdata/pull/18938) ([ilyam8](https://github.com/ilyam8)) +- Improve ACLK sync CPU usage [\#18935](https://github.com/netdata/netdata/pull/18935) ([stelfrag](https://github.com/stelfrag)) +- Hyper collector fixes [\#18934](https://github.com/netdata/netdata/pull/18934) ([stelfrag](https://github.com/stelfrag)) +- Regenerate integrations.js [\#18932](https://github.com/netdata/netdata/pull/18932) ([netdatabot](https://github.com/netdatabot)) +- better randomness for heartbeat [\#18930](https://github.com/netdata/netdata/pull/18930) ([ktsaou](https://github.com/ktsaou)) +- add randomness per thread to heartbeat [\#18929](https://github.com/netdata/netdata/pull/18929) ([ktsaou](https://github.com/ktsaou)) +- Improve the documentation on removing stale nodes [\#18927](https://github.com/netdata/netdata/pull/18927) ([ralphm](https://github.com/ralphm)) +- Docs: Changes to title and CPU requirements [\#18925](https://github.com/netdata/netdata/pull/18925) ([Ancairon](https://github.com/Ancairon)) +- chore\(go.d/nvidia\_smi\): remove use\_csv\_format \(deprecated\) from config [\#18924](https://github.com/netdata/netdata/pull/18924) ([ilyam8](https://github.com/ilyam8)) +- Docs: small fixes and pass on sizing Agents [\#18923](https://github.com/netdata/netdata/pull/18923) ([Ancairon](https://github.com/Ancairon)) +- go.d/portcheck: separate tabs for tcp/upd ports [\#18922](https://github.com/netdata/netdata/pull/18922) ([ilyam8](https://github.com/ilyam8)) +- Update Libbpf [\#18921](https://github.com/netdata/netdata/pull/18921) ([thiagoftsm](https://github.com/thiagoftsm)) +- build\(deps\): bump github.com/fsnotify/fsnotify from 1.7.0 to 1.8.0 in /src/go [\#18920](https://github.com/netdata/netdata/pull/18920) ([dependabot[bot]](https://github.com/apps/dependabot)) +- log2journal now uses libnetdata [\#18919](https://github.com/netdata/netdata/pull/18919) ([ktsaou](https://github.com/ktsaou)) +- docs: fix ui license link [\#18918](https://github.com/netdata/netdata/pull/18918) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#18917](https://github.com/netdata/netdata/pull/18917) ([netdatabot](https://github.com/netdatabot)) +- Switch DEB/RPM repositories to new subdomain. [\#18916](https://github.com/netdata/netdata/pull/18916) ([Ferroin](https://github.com/Ferroin)) +- docs: fix broken links in metadata [\#18915](https://github.com/netdata/netdata/pull/18915) ([ilyam8](https://github.com/ilyam8)) +- Update CI to generate MSI installer for Windows using WiX. [\#18914](https://github.com/netdata/netdata/pull/18914) ([Ferroin](https://github.com/Ferroin)) +- Fix potential wait forever in mqtt loop [\#18913](https://github.com/netdata/netdata/pull/18913) ([stelfrag](https://github.com/stelfrag)) +- add `dagster` to apps\_groups.conf [\#18912](https://github.com/netdata/netdata/pull/18912) ([andrewm4894](https://github.com/andrewm4894)) +- Installation section simplification [\#18911](https://github.com/netdata/netdata/pull/18911) ([Ancairon](https://github.com/Ancairon)) +- fix\(debugfs/extfrag\): add zone label [\#18910](https://github.com/netdata/netdata/pull/18910) ([ilyam8](https://github.com/ilyam8)) +- proc.plugin: log as info if a dir not exists [\#18909](https://github.com/netdata/netdata/pull/18909) ([ilyam8](https://github.com/ilyam8)) +- uninstall docs edits [\#18908](https://github.com/netdata/netdata/pull/18908) ([Ancairon](https://github.com/Ancairon)) +- Update uninstallation docs and remove reinstallation page [\#18907](https://github.com/netdata/netdata/pull/18907) ([Ancairon](https://github.com/Ancairon)) +- Adjust API version [\#18906](https://github.com/netdata/netdata/pull/18906) ([stelfrag](https://github.com/stelfrag)) +- Fix a potential invalid double free memory [\#18905](https://github.com/netdata/netdata/pull/18905) ([stelfrag](https://github.com/stelfrag)) +- MSI Improvements [\#18903](https://github.com/netdata/netdata/pull/18903) ([thiagoftsm](https://github.com/thiagoftsm)) +- versioning for functions [\#18902](https://github.com/netdata/netdata/pull/18902) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#18901](https://github.com/netdata/netdata/pull/18901) ([netdatabot](https://github.com/netdatabot)) +- chore\(go.d.plugin\): add build tags to modules [\#18900](https://github.com/netdata/netdata/pull/18900) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#18899](https://github.com/netdata/netdata/pull/18899) ([netdatabot](https://github.com/netdatabot)) +- Updating Netdata docs [\#18898](https://github.com/netdata/netdata/pull/18898) ([Ancairon](https://github.com/Ancairon)) +- remove python.d/zscores [\#18897](https://github.com/netdata/netdata/pull/18897) ([ilyam8](https://github.com/ilyam8)) +- Coverity fixes [\#18896](https://github.com/netdata/netdata/pull/18896) ([stelfrag](https://github.com/stelfrag)) +- docs edit [\#18895](https://github.com/netdata/netdata/pull/18895) ([Ancairon](https://github.com/Ancairon)) +- Start-stop-restart for windows, plus move info to its own file [\#18894](https://github.com/netdata/netdata/pull/18894) ([Ancairon](https://github.com/Ancairon)) +- log2journal: fix config parsing memory leaks [\#18893](https://github.com/netdata/netdata/pull/18893) ([ktsaou](https://github.com/ktsaou)) +- Fix coverity issues [\#18892](https://github.com/netdata/netdata/pull/18892) ([stelfrag](https://github.com/stelfrag)) +- Regenerate integrations.js [\#18891](https://github.com/netdata/netdata/pull/18891) ([netdatabot](https://github.com/netdatabot)) +- feat\(go.d.plugin\): add spigotmc collector [\#18890](https://github.com/netdata/netdata/pull/18890) ([ilyam8](https://github.com/ilyam8)) +- remove python.d/spigotmc [\#18889](https://github.com/netdata/netdata/pull/18889) ([ilyam8](https://github.com/ilyam8)) +- improvement\(go.d/k8sstate\): collect pod status reason [\#18887](https://github.com/netdata/netdata/pull/18887) ([ilyam8](https://github.com/ilyam8)) +- Regenerate integrations.js [\#18886](https://github.com/netdata/netdata/pull/18886) ([netdatabot](https://github.com/netdatabot)) +- fix\(go.d/k8sstate\): use static list of warning/terminated reasons [\#18885](https://github.com/netdata/netdata/pull/18885) ([ilyam8](https://github.com/ilyam8)) +- properly sanitize prometheus names and values [\#18884](https://github.com/netdata/netdata/pull/18884) ([ktsaou](https://github.com/ktsaou)) +- Windows storage fixes [\#18880](https://github.com/netdata/netdata/pull/18880) ([ktsaou](https://github.com/ktsaou)) +- include windows.h globally in libnetdata [\#18878](https://github.com/netdata/netdata/pull/18878) ([ktsaou](https://github.com/ktsaou)) +- fix: correct go.d.plugin permission for source builds [\#18876](https://github.com/netdata/netdata/pull/18876) ([ilyam8](https://github.com/ilyam8)) +- build\(deps\): bump github.com/prometheus/common from 0.60.0 to 0.60.1 in /src/go [\#18874](https://github.com/netdata/netdata/pull/18874) ([dependabot[bot]](https://github.com/apps/dependabot)) +- build\(deps\): bump k8s.io/client-go from 0.31.1 to 0.31.2 in /src/go [\#18873](https://github.com/netdata/netdata/pull/18873) ([dependabot[bot]](https://github.com/apps/dependabot)) +- build\(deps\): bump github.com/vmware/govmomi from 0.45.0 to 0.45.1 in /src/go [\#18872](https://github.com/netdata/netdata/pull/18872) ([dependabot[bot]](https://github.com/apps/dependabot)) +- fix: correct health schema typo preventing Action alert rendering. [\#18871](https://github.com/netdata/netdata/pull/18871) ([ilyam8](https://github.com/ilyam8)) +- Adjust text\_sanitizer to accept the default value [\#18870](https://github.com/netdata/netdata/pull/18870) ([stelfrag](https://github.com/stelfrag)) +- Regenerate integrations.js [\#18869](https://github.com/netdata/netdata/pull/18869) ([netdatabot](https://github.com/netdatabot)) +- docs\(go.d/ping\): clarify permissions [\#18868](https://github.com/netdata/netdata/pull/18868) ([ilyam8](https://github.com/ilyam8)) +- Fix corruption in expression value replacement [\#18865](https://github.com/netdata/netdata/pull/18865) ([stelfrag](https://github.com/stelfrag)) +- Prevent memory corruption during ACLK OTP decode [\#18863](https://github.com/netdata/netdata/pull/18863) ([stelfrag](https://github.com/stelfrag)) +- Do not build H2O by default. [\#18861](https://github.com/netdata/netdata/pull/18861) ([vkalintiris](https://github.com/vkalintiris)) +- Regenerate integrations.js [\#18860](https://github.com/netdata/netdata/pull/18860) ([netdatabot](https://github.com/netdatabot)) +- feat\(go.d.plugin\): add MaxScale collector [\#18859](https://github.com/netdata/netdata/pull/18859) ([ilyam8](https://github.com/ilyam8)) - fix\(apps.plugin\): add tini to Linux managers [\#18856](https://github.com/netdata/netdata/pull/18856) ([ilyam8](https://github.com/ilyam8)) - feat\(proc/numa\): add numa node mem activity [\#18855](https://github.com/netdata/netdata/pull/18855) ([ilyam8](https://github.com/ilyam8)) - build\(deps\): bump github.com/vmware/govmomi from 0.44.1 to 0.45.0 in /src/go [\#18854](https://github.com/netdata/netdata/pull/18854) ([dependabot[bot]](https://github.com/apps/dependabot)) @@ -61,6 +231,7 @@ - Do not load/save context data in RAM mode [\#18790](https://github.com/netdata/netdata/pull/18790) ([stelfrag](https://github.com/stelfrag)) - Fix broken claiming via kickstart on some systems. [\#18789](https://github.com/netdata/netdata/pull/18789) ([Ferroin](https://github.com/Ferroin)) - Fix atomic builtins test that currently fails for llvm+compiler\_rt when gcc is not present [\#18788](https://github.com/netdata/netdata/pull/18788) ([StormBytePP](https://github.com/StormBytePP)) +- Add basis for MSI installer. [\#18787](https://github.com/netdata/netdata/pull/18787) ([vkalintiris](https://github.com/vkalintiris)) - fix\(netdata-updater.sh\): ensure `--non-interactive` flag is passed during self-update [\#18786](https://github.com/netdata/netdata/pull/18786) ([ilyam8](https://github.com/ilyam8)) - Windows Network Interfaces Charts and Alerts [\#18785](https://github.com/netdata/netdata/pull/18785) ([ktsaou](https://github.com/ktsaou)) - Document ML enabled `auto` [\#18784](https://github.com/netdata/netdata/pull/18784) ([stelfrag](https://github.com/stelfrag)) @@ -127,6 +298,7 @@ - Detect when swap is disabled when agent is running [\#18702](https://github.com/netdata/netdata/pull/18702) ([stelfrag](https://github.com/stelfrag)) - Bump golang.org/x/net from 0.29.0 to 0.30.0 in /src/go [\#18701](https://github.com/netdata/netdata/pull/18701) ([dependabot[bot]](https://github.com/apps/dependabot)) - Load chart labels on demand [\#18699](https://github.com/netdata/netdata/pull/18699) ([stelfrag](https://github.com/stelfrag)) +- Add hyper-v metrics [\#18697](https://github.com/netdata/netdata/pull/18697) ([stelfrag](https://github.com/stelfrag)) - fix system-info disk space in LXC [\#18696](https://github.com/netdata/netdata/pull/18696) ([ilyam8](https://github.com/ilyam8)) - fix ram usage calculation in LXC [\#18695](https://github.com/netdata/netdata/pull/18695) ([ilyam8](https://github.com/ilyam8)) - cgroups.plugin: call `setresuid` before spawn server init [\#18694](https://github.com/netdata/netdata/pull/18694) ([ilyam8](https://github.com/ilyam8)) @@ -268,92 +440,10 @@ - Bump github.com/jackc/pgx/v5 from 5.7.0 to 5.7.1 in /src/go [\#18515](https://github.com/netdata/netdata/pull/18515) ([dependabot[bot]](https://github.com/apps/dependabot)) - go.d update redis lib to v9 [\#18513](https://github.com/netdata/netdata/pull/18513) ([ilyam8](https://github.com/ilyam8)) - go.d/varnish: add docker support [\#18512](https://github.com/netdata/netdata/pull/18512) ([ilyam8](https://github.com/ilyam8)) -- go.d add function to execute a command inside a Docker container [\#18509](https://github.com/netdata/netdata/pull/18509) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18508](https://github.com/netdata/netdata/pull/18508) ([netdatabot](https://github.com/netdatabot)) -- server dashboard v3 static files, when available [\#18507](https://github.com/netdata/netdata/pull/18507) ([ktsaou](https://github.com/ktsaou)) -- add varnishstat and varnishadm to ndsudo [\#18503](https://github.com/netdata/netdata/pull/18503) ([ilyam8](https://github.com/ilyam8)) -- Bump github.com/docker/docker from 27.2.0+incompatible to 27.2.1+incompatible in /src/go [\#18502](https://github.com/netdata/netdata/pull/18502) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Assorted build cleanup for external data collection plugins. [\#18501](https://github.com/netdata/netdata/pull/18501) ([Ferroin](https://github.com/Ferroin)) -- remove python.d/varnish [\#18499](https://github.com/netdata/netdata/pull/18499) ([ilyam8](https://github.com/ilyam8)) -- Bump github.com/jackc/pgx/v5 from 5.6.0 to 5.7.0 in /src/go [\#18498](https://github.com/netdata/netdata/pull/18498) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/prometheus/common from 0.58.0 to 0.59.1 in /src/go [\#18497](https://github.com/netdata/netdata/pull/18497) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump golang.org/x/net from 0.28.0 to 0.29.0 in /src/go [\#18496](https://github.com/netdata/netdata/pull/18496) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Windows Plugin Metrics \(Thermal and Memory\) [\#18494](https://github.com/netdata/netdata/pull/18494) ([thiagoftsm](https://github.com/thiagoftsm)) -- Regenerate integrations.js [\#18493](https://github.com/netdata/netdata/pull/18493) ([netdatabot](https://github.com/netdatabot)) -- varnish collector Go implementation [\#18491](https://github.com/netdata/netdata/pull/18491) ([Ancairon](https://github.com/Ancairon)) -- add go.d/apcupsd [\#18489](https://github.com/netdata/netdata/pull/18489) ([ilyam8](https://github.com/ilyam8)) -- Improve processing on removed alerts after agent restart [\#18488](https://github.com/netdata/netdata/pull/18488) ([stelfrag](https://github.com/stelfrag)) -- Bump github.com/prometheus/common from 0.57.0 to 0.58.0 in /src/go [\#18487](https://github.com/netdata/netdata/pull/18487) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump golang.org/x/text from 0.17.0 to 0.18.0 in /src/go [\#18486](https://github.com/netdata/netdata/pull/18486) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Remove Warnings \(ebpf\) [\#18484](https://github.com/netdata/netdata/pull/18484) ([thiagoftsm](https://github.com/thiagoftsm)) -- \[WIP\] Windows-Events Logs Explorer [\#18483](https://github.com/netdata/netdata/pull/18483) ([ktsaou](https://github.com/ktsaou)) -- fix win sysinfo installed ram calculation [\#18482](https://github.com/netdata/netdata/pull/18482) ([ilyam8](https://github.com/ilyam8)) -- remove charts.d/apcupsd [\#18481](https://github.com/netdata/netdata/pull/18481) ([ilyam8](https://github.com/ilyam8)) -- Update LIbbpf [\#18480](https://github.com/netdata/netdata/pull/18480) ([thiagoftsm](https://github.com/thiagoftsm)) -- added missing comma in Access-Control-Allow-Headers [\#18479](https://github.com/netdata/netdata/pull/18479) ([ktsaou](https://github.com/ktsaou)) -- add Access-Control-Allow-Headers: x-transaction-id [\#18478](https://github.com/netdata/netdata/pull/18478) ([ktsaou](https://github.com/ktsaou)) -- add Access-Control-Allow-Headers: x-netdata-auth [\#18477](https://github.com/netdata/netdata/pull/18477) ([ktsaou](https://github.com/ktsaou)) -- prevent sigsegv in config-parsers [\#18476](https://github.com/netdata/netdata/pull/18476) ([ktsaou](https://github.com/ktsaou)) -- Regenerate integrations.js [\#18475](https://github.com/netdata/netdata/pull/18475) ([netdatabot](https://github.com/netdatabot)) -- added version to systemd-journal info response [\#18474](https://github.com/netdata/netdata/pull/18474) ([ktsaou](https://github.com/ktsaou)) -- Regenerate integrations.js [\#18473](https://github.com/netdata/netdata/pull/18473) ([netdatabot](https://github.com/netdatabot)) -- Remove w1sensor in favor of Go implementation [\#18471](https://github.com/netdata/netdata/pull/18471) ([Ancairon](https://github.com/Ancairon)) -- Improve processing of pending alerts [\#18470](https://github.com/netdata/netdata/pull/18470) ([stelfrag](https://github.com/stelfrag)) -- Fix node index in alerts [\#18469](https://github.com/netdata/netdata/pull/18469) ([stelfrag](https://github.com/stelfrag)) -- go.d storcli: fix unmarshal driveInfo [\#18466](https://github.com/netdata/netdata/pull/18466) ([ilyam8](https://github.com/ilyam8)) -- w1sensor collector Go implementation [\#18464](https://github.com/netdata/netdata/pull/18464) ([Ancairon](https://github.com/Ancairon)) -- Check correct number of bits for LZC of XOR value. [\#18463](https://github.com/netdata/netdata/pull/18463) ([vkalintiris](https://github.com/vkalintiris)) -- netdata-claim.sh: fix parsing url arg [\#18460](https://github.com/netdata/netdata/pull/18460) ([ilyam8](https://github.com/ilyam8)) -- Bump github.com/likexian/whois from 1.15.4 to 1.15.5 in /src/go [\#18457](https://github.com/netdata/netdata/pull/18457) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/likexian/whois-parser from 1.24.19 to 1.24.20 in /src/go [\#18456](https://github.com/netdata/netdata/pull/18456) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Cleanup, rename and packaging fix \(Windows Codes\) [\#18455](https://github.com/netdata/netdata/pull/18455) ([thiagoftsm](https://github.com/thiagoftsm)) -- Regenerate integrations.js [\#18454](https://github.com/netdata/netdata/pull/18454) ([netdatabot](https://github.com/netdatabot)) -- Bump github.com/Masterminds/sprig/v3 from 3.2.3 to 3.3.0 in /src/go [\#18453](https://github.com/netdata/netdata/pull/18453) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/prometheus/common from 0.55.0 to 0.57.0 in /src/go [\#18452](https://github.com/netdata/netdata/pull/18452) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/docker/docker from 27.1.2+incompatible to 27.2.0+incompatible in /src/go [\#18451](https://github.com/netdata/netdata/pull/18451) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Regenerate integrations.js [\#18450](https://github.com/netdata/netdata/pull/18450) ([netdatabot](https://github.com/netdatabot)) -- go.d sensors add parsing intrusion to exec method [\#18449](https://github.com/netdata/netdata/pull/18449) ([ilyam8](https://github.com/ilyam8)) -- Exit slabinfo.plugin on EPIPE [\#18448](https://github.com/netdata/netdata/pull/18448) ([teqwve](https://github.com/teqwve)) -- ilert Integration [\#18447](https://github.com/netdata/netdata/pull/18447) ([DaTiMy](https://github.com/DaTiMy)) -- go.d remove vnode disable [\#18446](https://github.com/netdata/netdata/pull/18446) ([ilyam8](https://github.com/ilyam8)) -- go.d add support for symlinked vnode config files [\#18445](https://github.com/netdata/netdata/pull/18445) ([ilyam8](https://github.com/ilyam8)) -- Proper precedence when calculating time\_to\_evict [\#18444](https://github.com/netdata/netdata/pull/18444) ([stelfrag](https://github.com/stelfrag)) -- Windows Permissions [\#18443](https://github.com/netdata/netdata/pull/18443) ([thiagoftsm](https://github.com/thiagoftsm)) -- do not free the sender when the sender thread exits [\#18441](https://github.com/netdata/netdata/pull/18441) ([ktsaou](https://github.com/ktsaou)) -- fix receiver deadlock [\#18440](https://github.com/netdata/netdata/pull/18440) ([ktsaou](https://github.com/ktsaou)) -- fix charts.d/sensors leftovers [\#18439](https://github.com/netdata/netdata/pull/18439) ([ilyam8](https://github.com/ilyam8)) -- remove deadlock from sender [\#18438](https://github.com/netdata/netdata/pull/18438) ([ktsaou](https://github.com/ktsaou)) -- Un-vendor proprietary dashboard code. [\#18437](https://github.com/netdata/netdata/pull/18437) ([Ferroin](https://github.com/Ferroin)) -- go.d remove duplicates in testing [\#18435](https://github.com/netdata/netdata/pull/18435) ([ilyam8](https://github.com/ilyam8)) -- Improve agent shutdown time [\#18434](https://github.com/netdata/netdata/pull/18434) ([stelfrag](https://github.com/stelfrag)) -- Regenerate integrations.js [\#18432](https://github.com/netdata/netdata/pull/18432) ([netdatabot](https://github.com/netdatabot)) -- go.d/sensors: add sysfs scan method to collect metrics [\#18431](https://github.com/netdata/netdata/pull/18431) ([ilyam8](https://github.com/ilyam8)) -- stream paths propagated to children and parents [\#18430](https://github.com/netdata/netdata/pull/18430) ([ktsaou](https://github.com/ktsaou)) -- go.d lmsensors improve performance [\#18429](https://github.com/netdata/netdata/pull/18429) ([ilyam8](https://github.com/ilyam8)) -- ci fix InvalidDefaultArgInFrom warn [\#18428](https://github.com/netdata/netdata/pull/18428) ([ilyam8](https://github.com/ilyam8)) -- vendor https://github.com/mdlayher/lmsensors [\#18427](https://github.com/netdata/netdata/pull/18427) ([ilyam8](https://github.com/ilyam8)) -- remove charts.d/sensors [\#18426](https://github.com/netdata/netdata/pull/18426) ([ilyam8](https://github.com/ilyam8)) -- Reset last connected when removing stale nodes with netdatacli [\#18425](https://github.com/netdata/netdata/pull/18425) ([stelfrag](https://github.com/stelfrag)) -- remove checks.plugin dir [\#18424](https://github.com/netdata/netdata/pull/18424) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18421](https://github.com/netdata/netdata/pull/18421) ([netdatabot](https://github.com/netdatabot)) -- fix hyperlink in go.d samba meta [\#18420](https://github.com/netdata/netdata/pull/18420) ([ilyam8](https://github.com/ilyam8)) -- add go.d samba [\#18418](https://github.com/netdata/netdata/pull/18418) ([ilyam8](https://github.com/ilyam8)) -- ACLK code cleanup [\#18417](https://github.com/netdata/netdata/pull/18417) ([stelfrag](https://github.com/stelfrag)) -- restore /api/v1/badge.svg [\#18416](https://github.com/netdata/netdata/pull/18416) ([ktsaou](https://github.com/ktsaou)) -- add "smbstatus -P" to ndsudo [\#18414](https://github.com/netdata/netdata/pull/18414) ([ilyam8](https://github.com/ilyam8)) -- remove python.d/sambsa [\#18413](https://github.com/netdata/netdata/pull/18413) ([ilyam8](https://github.com/ilyam8)) -- SPAWN-SERVER: re-evaluate signals even 500ms [\#18411](https://github.com/netdata/netdata/pull/18411) ([ktsaou](https://github.com/ktsaou)) -- Claim on Windows [\#18410](https://github.com/netdata/netdata/pull/18410) ([thiagoftsm](https://github.com/thiagoftsm)) -- kickstart: fix write\_claim\_config when executed as a regular user [\#18406](https://github.com/netdata/netdata/pull/18406) ([ilyam8](https://github.com/ilyam8)) -- Fix coverity issues [\#18405](https://github.com/netdata/netdata/pull/18405) ([stelfrag](https://github.com/stelfrag)) -- remove pyyaml2 [\#18404](https://github.com/netdata/netdata/pull/18404) ([ilyam8](https://github.com/ilyam8)) -- imporve netdatacli help usage readability [\#18403](https://github.com/netdata/netdata/pull/18403) ([ilyam8](https://github.com/ilyam8)) -- remove python.d/anomalies [\#18402](https://github.com/netdata/netdata/pull/18402) ([ilyam8](https://github.com/ilyam8)) -- go.d dnsmasqdhcp: fix potential panic in parseDHCPRangeValue [\#18401](https://github.com/netdata/netdata/pull/18401) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18400](https://github.com/netdata/netdata/pull/18400) ([netdatabot](https://github.com/netdatabot)) -- go.d boinc [\#18398](https://github.com/netdata/netdata/pull/18398) ([ilyam8](https://github.com/ilyam8)) -- remove python.d/boinc [\#18397](https://github.com/netdata/netdata/pull/18397) ([ilyam8](https://github.com/ilyam8)) -- fix warnings in Dockerfile [\#18395](https://github.com/netdata/netdata/pull/18395) ([NicolasCARPi](https://github.com/NicolasCARPi)) + +## [v1.47.5](https://github.com/netdata/netdata/tree/v1.47.5) (2024-10-24) + +[Full Changelog](https://github.com/netdata/netdata/compare/v1.47.4...v1.47.5) ## [v1.47.4](https://github.com/netdata/netdata/tree/v1.47.4) (2024-10-09) @@ -379,69 +469,6 @@ [Full Changelog](https://github.com/netdata/netdata/compare/v1.46.3...v1.47.0) -**Merged pull requests:** - -- go.d dnsmsasq\_dhcp: improve parsing of dhcp ranges [\#18394](https://github.com/netdata/netdata/pull/18394) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18391](https://github.com/netdata/netdata/pull/18391) ([netdatabot](https://github.com/netdatabot)) -- remove proc zfspools [\#18389](https://github.com/netdata/netdata/pull/18389) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18387](https://github.com/netdata/netdata/pull/18387) ([netdatabot](https://github.com/netdatabot)) -- Modify CLI command remove-stale-node to accept hostname [\#18386](https://github.com/netdata/netdata/pull/18386) ([stelfrag](https://github.com/stelfrag)) -- Update windows installer [\#18385](https://github.com/netdata/netdata/pull/18385) ([thiagoftsm](https://github.com/thiagoftsm)) -- go.d zfspool: collect vdev health state [\#18383](https://github.com/netdata/netdata/pull/18383) ([ilyam8](https://github.com/ilyam8)) -- Remove debug message [\#18382](https://github.com/netdata/netdata/pull/18382) ([stelfrag](https://github.com/stelfrag)) -- Remove host immediately on stale node removal [\#18381](https://github.com/netdata/netdata/pull/18381) ([stelfrag](https://github.com/stelfrag)) -- Regenerate integrations.js [\#18380](https://github.com/netdata/netdata/pull/18380) ([netdatabot](https://github.com/netdatabot)) -- go.d docs: add a note that debug mode not supported for Dyncfg jobs [\#18379](https://github.com/netdata/netdata/pull/18379) ([ilyam8](https://github.com/ilyam8)) -- ci gen integrations: add cloud-authentication dir [\#18378](https://github.com/netdata/netdata/pull/18378) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18377](https://github.com/netdata/netdata/pull/18377) ([netdatabot](https://github.com/netdatabot)) -- go.d dnsmasq: query metrics individually to handle v2.90+ SERVFAIL [\#18376](https://github.com/netdata/netdata/pull/18376) ([ilyam8](https://github.com/ilyam8)) -- Switch to DEB822 format for APT repository configuration. [\#18374](https://github.com/netdata/netdata/pull/18374) ([Ferroin](https://github.com/Ferroin)) -- Regenerate integrations.js [\#18373](https://github.com/netdata/netdata/pull/18373) ([netdatabot](https://github.com/netdatabot)) -- Origin-sign all DEB packages regardless of upload target. [\#18372](https://github.com/netdata/netdata/pull/18372) ([Ferroin](https://github.com/Ferroin)) -- remove python.d/changefinder [\#18370](https://github.com/netdata/netdata/pull/18370) ([ilyam8](https://github.com/ilyam8)) -- remove python.d/example [\#18369](https://github.com/netdata/netdata/pull/18369) ([ilyam8](https://github.com/ilyam8)) -- go.d squidlog: improve parser init and parsing [\#18368](https://github.com/netdata/netdata/pull/18368) ([ilyam8](https://github.com/ilyam8)) -- Bump github.com/axiomhq/hyperloglog from 0.0.0-20240507144631-af9851f82b27 to 0.1.0 in /src/go [\#18367](https://github.com/netdata/netdata/pull/18367) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/vmware/govmomi from 0.40.0 to 0.42.0 in /src/go [\#18366](https://github.com/netdata/netdata/pull/18366) ([dependabot[bot]](https://github.com/apps/dependabot)) -- eBPF \(reduce CPU and memory usage\) [\#18365](https://github.com/netdata/netdata/pull/18365) ([thiagoftsm](https://github.com/thiagoftsm)) -- Regenerate integrations.js [\#18363](https://github.com/netdata/netdata/pull/18363) ([netdatabot](https://github.com/netdatabot)) -- add go.d/tor [\#18361](https://github.com/netdata/netdata/pull/18361) ([ilyam8](https://github.com/ilyam8)) -- remove python.d/tor [\#18358](https://github.com/netdata/netdata/pull/18358) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18357](https://github.com/netdata/netdata/pull/18357) ([netdatabot](https://github.com/netdatabot)) -- remove python.d lm\_sensors.py [\#18356](https://github.com/netdata/netdata/pull/18356) ([ilyam8](https://github.com/ilyam8)) -- remove python.d/retroshare [\#18355](https://github.com/netdata/netdata/pull/18355) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18353](https://github.com/netdata/netdata/pull/18353) ([netdatabot](https://github.com/netdatabot)) -- go.d httpcheck: add status description to docs [\#18351](https://github.com/netdata/netdata/pull/18351) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18350](https://github.com/netdata/netdata/pull/18350) ([netdatabot](https://github.com/netdatabot)) -- Add missing initial slashes for internal documation links [\#18348](https://github.com/netdata/netdata/pull/18348) ([ralphm](https://github.com/ralphm)) -- fix sending CLEAR notifications with critical severity modifier [\#18347](https://github.com/netdata/netdata/pull/18347) ([ilyam8](https://github.com/ilyam8)) -- add license to readmes menu [\#18345](https://github.com/netdata/netdata/pull/18345) ([ilyam8](https://github.com/ilyam8)) -- add go.d/monit [\#18344](https://github.com/netdata/netdata/pull/18344) ([ilyam8](https://github.com/ilyam8)) -- remove python.d/monit [\#18343](https://github.com/netdata/netdata/pull/18343) ([ilyam8](https://github.com/ilyam8)) -- Bump github.com/docker/docker from 27.1.1+incompatible to 27.1.2+incompatible in /src/go [\#18340](https://github.com/netdata/netdata/pull/18340) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/vmware/govmomi from 0.39.0 to 0.40.0 in /src/go [\#18338](https://github.com/netdata/netdata/pull/18338) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/miekg/dns from 1.1.61 to 1.1.62 in /src/go [\#18337](https://github.com/netdata/netdata/pull/18337) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump k8s.io/client-go from 0.30.3 to 0.31.0 in /src/go [\#18336](https://github.com/netdata/netdata/pull/18336) ([dependabot[bot]](https://github.com/apps/dependabot)) -- add i2pd to apps\_groups.conf [\#18335](https://github.com/netdata/netdata/pull/18335) ([ilyam8](https://github.com/ilyam8)) -- add dashboard v2 license to readme [\#18334](https://github.com/netdata/netdata/pull/18334) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18333](https://github.com/netdata/netdata/pull/18333) ([netdatabot](https://github.com/netdatabot)) -- go.d riakkv [\#18330](https://github.com/netdata/netdata/pull/18330) ([ilyam8](https://github.com/ilyam8)) -- remove python.d/riakkv [\#18329](https://github.com/netdata/netdata/pull/18329) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18328](https://github.com/netdata/netdata/pull/18328) ([netdatabot](https://github.com/netdatabot)) -- add go.d/uwsgi [\#18326](https://github.com/netdata/netdata/pull/18326) ([ilyam8](https://github.com/ilyam8)) -- remove python.d/uwsgi [\#18325](https://github.com/netdata/netdata/pull/18325) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18324](https://github.com/netdata/netdata/pull/18324) ([netdatabot](https://github.com/netdatabot)) -- remove python.d/dovecot [\#18322](https://github.com/netdata/netdata/pull/18322) ([ilyam8](https://github.com/ilyam8)) -- add go.d dovecot [\#18321](https://github.com/netdata/netdata/pull/18321) ([ilyam8](https://github.com/ilyam8)) -- go.d redis: fix default "address" in config\_schema.json [\#18320](https://github.com/netdata/netdata/pull/18320) ([ilyam8](https://github.com/ilyam8)) -- Ensure files in /usr/lib/netdata/system are not executable. [\#18318](https://github.com/netdata/netdata/pull/18318) ([Ferroin](https://github.com/Ferroin)) -- Regenerate integrations.js [\#18317](https://github.com/netdata/netdata/pull/18317) ([netdatabot](https://github.com/netdatabot)) -- remove python.d/nvidia\_smi [\#18316](https://github.com/netdata/netdata/pull/18316) ([ilyam8](https://github.com/ilyam8)) -- go.d nvidia\_smi: enable by default [\#18315](https://github.com/netdata/netdata/pull/18315) ([ilyam8](https://github.com/ilyam8)) -- go.d nvidia\_smi: add loop mode [\#18313](https://github.com/netdata/netdata/pull/18313) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#18312](https://github.com/netdata/netdata/pull/18312) ([netdatabot](https://github.com/netdatabot)) -- go.d nvidia\_smi remove "csv" mode [\#18311](https://github.com/netdata/netdata/pull/18311) ([ilyam8](https://github.com/ilyam8)) - ## [v1.46.3](https://github.com/netdata/netdata/tree/v1.46.3) (2024-07-23) [Full Changelog](https://github.com/netdata/netdata/compare/v1.46.2...v1.46.3) diff --git a/CMakeLists.txt b/CMakeLists.txt index 54d9a161e..a5d8d959b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,6 +30,8 @@ if(STATIC_BUILD) list(APPEND PKG_CONFIG_EXECUTABLE "--static") endif() +set(CMAKE_INSTALL_MESSAGE LAZY) + set(CMAKE_C_STANDARD 11) set(CMAKE_CXX_STANDARD 14) set(CMAKE_INSTALL_DEFAULT_COMPONENT_NAME "netdata") @@ -118,9 +120,15 @@ elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "CYGWIN" OR "${CMAKE_SYSTEM_NAME}" STREQU endif() set(BINDIR usr/bin) + set(CMAKE_RC_COMPILER_INIT windres) + ENABLE_LANGUAGE(RC) + + SET(CMAKE_RC_COMPILE_OBJECT " -O coff -i -o ") add_definitions(-D_GNU_SOURCE) if($ENV{CLION_IDE}) + set(RUN_UNDER_CLION True) + # clion needs these to find the includes if("${CMAKE_SYSTEM_NAME}" STREQUAL "MSYS" OR "${CMAKE_SYSTEM_NAME}" STREQUAL "Windows") if("$ENV{MSYSTEM}" STREQUAL "MSYS") @@ -147,15 +155,17 @@ option(DEFAULT_FEATURE_STATE "Specify the default state for most optional featur mark_as_advanced(DEFAULT_FEATURE_STATE) # High-level features -option(ENABLE_ACLK "Enable Netdata Cloud support (ACLK)" ${DEFAULT_FEATURE_STATE}) -option(ENABLE_CLOUD "Enable Netdata Cloud by default at runtime" ${DEFAULT_FEATURE_STATE}) option(ENABLE_ML "Enable machine learning features" ${DEFAULT_FEATURE_STATE}) option(ENABLE_DBENGINE "Enable dbengine metrics storage" True) +option(ENABLE_DASHBOARD "Enable local dashboard" True) +mark_as_advanced(ENABLE_DASHBOARD) # Data collection plugins option(ENABLE_PLUGIN_GO "Enable metric collectors written in Go" ${DEFAULT_FEATURE_STATE}) +option(ENABLE_PLUGIN_PYTHON "Enable metric collectors written in Python" ${DEFAULT_FEATURE_STATE}) -cmake_dependent_option(ENABLE_PLUGIN_APPS "Enable per-process resource usage monitoring" ${DEFAULT_FEATURE_STATE} "NOT OS_WINDOWS" False) +cmake_dependent_option(ENABLE_PLUGIN_APPS "Enable per-process resource usage monitoring" ${DEFAULT_FEATURE_STATE} "OS_LINUX OR OS_FREEBSD OR OS_MACOS OR OS_WINDOWS" False) +cmake_dependent_option(ENABLE_PLUGIN_CHARTS "Enable metric collectors written in Bash" ${DEFAULT_FEATURE_STATE} "NOT OS_WINDOWS" False) cmake_dependent_option(ENABLE_PLUGIN_CUPS "Enable CUPS monitoring" ${DEFAULT_FEATURE_STATE} "NOT OS_WINDOWS" False) cmake_dependent_option(ENABLE_PLUGIN_FREEIPMI "Enable IPMI monitoring" ${DEFAULT_FEATURE_STATE} "OS_LINUX OR OS_FREEBSD" False) @@ -185,7 +195,7 @@ option(ENABLE_BUNDLED_PROTOBUF "Use a vendored copy of protobuf" False) # Experimental features option(ENABLE_WEBRTC "Enable WebRTC dashboard communications (experimental)" False) mark_as_advanced(ENABLE_WEBRTC) -option(ENABLE_H2O "Enable H2O web server (experimental)" True) +option(ENABLE_H2O "Enable H2O web server (experimental)" False) mark_as_advanced(ENABLE_H2O) # Other optional functionality @@ -198,11 +208,7 @@ mark_as_advanced(BUILD_FOR_PACKAGING) cmake_dependent_option(FORCE_LEGACY_LIBBPF "Force usage of libbpf 0.0.9 instead of the latest version." False "ENABLE_PLUGIN_EBPF" False) mark_as_advanced(FORCE_LEGACY_LIBBPF) -if(ENABLE_ACLK OR ENABLE_EXPORTER_PROMETHEUS_REMOTE_WRITE) - set(NEED_PROTOBUF True) -else() - set(NEED_PROTOBUF False) -endif() +set(NEED_PROTOBUF True) if(ENABLE_PLUGIN_GO) include(NetdataGoTools) @@ -261,6 +267,9 @@ if(ENABLE_PLUGIN_EBPF) netdata_fetch_ebpf_co_re() endif() +pkg_check_modules(CURL libcurl>=7.21 REQUIRED IMPORTED_TARGET) +set(HAVE_LIBCURL TRUE) + # # Libm # @@ -379,6 +388,10 @@ check_function_exists(sched_get_priority_max HAVE_SCHED_GET_PRIORITY_MAX) check_function_exists(close_range HAVE_CLOSE_RANGE) check_function_exists(backtrace HAVE_BACKTRACE) +check_function_exists(arc4random_buf HAVE_ARC4RANDOM_BUF) +check_function_exists(arc4random_uniform HAVE_ARC4RANDOM_UNIFORM) +check_function_exists(getrandom HAVE_GETRANDOM) + # # check source compilation # @@ -530,12 +543,22 @@ int my_function() { } " HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT) +# Windows MSVCRT random number generator +# used only when compiling natively (not MSYS/CYGWIN) +check_c_source_compiles(" + #define _CRT_RAND_S + #include + int main() { + unsigned int x; + return rand_s(&x); + } +" HAVE_RAND_S) + if(OS_FREEBSD OR OS_MACOS) set(HAVE_BUILTIN_ATOMICS True) endif() # openssl/crypto -set(ENABLE_OPENSSL True) pkg_check_modules(TLS IMPORTED_TARGET openssl) if(NOT TARGET PkgConfig::TLS) @@ -628,8 +651,11 @@ set(LIBNETDATA_FILES src/libnetdata/aral/aral.h src/libnetdata/avl/avl.c src/libnetdata/avl/avl.h + src/libnetdata/bitmap/bitmap64.h src/libnetdata/buffer/buffer.c src/libnetdata/buffer/buffer.h + src/libnetdata/ringbuffer/ringbuffer.c + src/libnetdata/ringbuffer/ringbuffer.h src/libnetdata/circular_buffer/circular_buffer.c src/libnetdata/circular_buffer/circular_buffer.h src/libnetdata/clocks/clocks.c @@ -664,15 +690,15 @@ set(LIBNETDATA_FILES src/libnetdata/line_splitter/line_splitter.c src/libnetdata/line_splitter/line_splitter.h src/libnetdata/libnetdata.h + src/libnetdata/linked_lists/linked_lists.h src/libnetdata/locks/locks.c src/libnetdata/locks/locks.h - src/libnetdata/log/journal.c - src/libnetdata/log/journal.h - src/libnetdata/log/log.c - src/libnetdata/log/log.h + src/libnetdata/log/systemd-journal-helpers.c + src/libnetdata/log/systemd-journal-helpers.h + src/libnetdata/log/nd_log.c + src/libnetdata/log/nd_log.h src/libnetdata/os/os.c src/libnetdata/os/os.h - src/libnetdata/simple_hashtable.h src/libnetdata/os/byteorder.h src/libnetdata/onewayalloc/onewayalloc.c src/libnetdata/onewayalloc/onewayalloc.h @@ -683,6 +709,8 @@ set(LIBNETDATA_FILES src/libnetdata/required_dummies.h src/libnetdata/socket/security.c src/libnetdata/socket/security.h + src/libnetdata/simple_hashtable/simple_hashtable.h + src/libnetdata/simple_hashtable/simple_hashtable_undef.h src/libnetdata/simple_pattern/simple_pattern.c src/libnetdata/simple_pattern/simple_pattern.h src/libnetdata/socket/socket.c @@ -723,9 +751,9 @@ set(LIBNETDATA_FILES src/libnetdata/dictionary/dictionary-hashtable.h src/libnetdata/dictionary/dictionary-item.h src/libnetdata/dictionary/dictionary-callbacks.h - src/libnetdata/linked-lists.h src/libnetdata/storage-point.h - src/libnetdata/bitmap64.h + src/libnetdata/parsers/parsers.h + src/libnetdata/parsers/duration.c src/libnetdata/os/gettid.c src/libnetdata/os/gettid.h src/libnetdata/os/adjtimex.c @@ -746,22 +774,111 @@ set(LIBNETDATA_FILES src/libnetdata/os/os-windows-wrappers.h src/libnetdata/os/get_system_cpus.c src/libnetdata/os/get_system_cpus.h - src/libnetdata/os/tinysleep.c - src/libnetdata/os/tinysleep.h + src/libnetdata/os/sleep.c + src/libnetdata/os/sleep.h src/libnetdata/os/uuid_generate.c src/libnetdata/os/uuid_generate.h src/libnetdata/os/setenv.c src/libnetdata/os/setenv.h src/libnetdata/os/strndup.c src/libnetdata/os/strndup.h - src/libnetdata/spawn_server/spawn_server.c + src/libnetdata/os/windows-wmi/windows-wmi.c + src/libnetdata/os/windows-wmi/windows-wmi.h + src/libnetdata/os/windows-wmi/windows-wmi-GetDiskDriveInfo.c + src/libnetdata/os/windows-wmi/windows-wmi-GetDiskDriveInfo.h + src/libnetdata/os/windows-perflib/perflib.c + src/libnetdata/os/windows-perflib/perflib.h + src/libnetdata/os/windows-perflib/perflib-names.c + src/libnetdata/os/windows-perflib/perflib-dump.c + src/libnetdata/os/system-maps/cached-uid-username.c + src/libnetdata/os/system-maps/cached-uid-username.h + src/libnetdata/os/system-maps/cached-sid-username.c + src/libnetdata/os/system-maps/cached-sid-username.h + src/libnetdata/os/system-maps/cached-gid-groupname.c + src/libnetdata/os/system-maps/cached-gid-groupname.h + src/libnetdata/os/system-maps/cache-host-users-and-groups.c + src/libnetdata/os/system-maps/cache-host-users-and-groups.h + src/libnetdata/spawn_server/spawn_server_nofork.c src/libnetdata/spawn_server/spawn_server.h src/libnetdata/spawn_server/spawn_popen.c src/libnetdata/spawn_server/spawn_popen.h + src/libnetdata/spawn_server/spawn_server_windows.c + src/libnetdata/spawn_server/spawn_server_internals.h + src/libnetdata/spawn_server/spawn_server_libuv.c + src/libnetdata/spawn_server/spawn_server_posix.c + src/libnetdata/spawn_server/spawn_library.c + src/libnetdata/spawn_server/spawn_library.h src/libnetdata/os/close_range.c src/libnetdata/os/close_range.h src/libnetdata/os/setproctitle.c src/libnetdata/os/setproctitle.h + src/libnetdata/paths/paths.c + src/libnetdata/paths/paths.h + src/libnetdata/json/json-c-parser-inline.c + src/libnetdata/parsers/duration.h + src/libnetdata/parsers/timeframe.c + src/libnetdata/parsers/timeframe.h + src/libnetdata/parsers/size.c + src/libnetdata/parsers/size.h + src/libnetdata/libjudy/judy-malloc.c + src/libnetdata/libjudy/judy-malloc.h + src/libnetdata/config/appconfig_internals.h + src/libnetdata/config/appconfig_exporters.c + src/libnetdata/config/appconfig_conf_file.c + src/libnetdata/config/appconfig_cleanup.c + src/libnetdata/config/appconfig_sections.c + src/libnetdata/config/appconfig_options.c + src/libnetdata/config/appconfig_migrate.c + src/libnetdata/config/appconfig_traversal.c + src/libnetdata/config/appconfig_api_sizes.c + src/libnetdata/config/appconfig_api_sizes.h + src/libnetdata/config/appconfig_api_durations.c + src/libnetdata/config/appconfig_api_durations.h + src/libnetdata/config/appconfig_api_numbers.c + src/libnetdata/config/appconfig_api_numbers.h + src/libnetdata/config/appconfig_api_text.c + src/libnetdata/config/appconfig_api_text.h + src/libnetdata/config/appconfig_api_boolean.c + src/libnetdata/config/appconfig_api_boolean.h + src/libnetdata/facets/logs_query_status.h + src/libnetdata/os/timestamps.c + src/libnetdata/os/timestamps.h + src/libnetdata/parsers/entries.c + src/libnetdata/parsers/entries.h + src/libnetdata/sanitizers/chart_id_and_name.c + src/libnetdata/sanitizers/chart_id_and_name.h + src/libnetdata/sanitizers/utf8-sanitizer.c + src/libnetdata/sanitizers/utf8-sanitizer.h + src/libnetdata/sanitizers/sanitizers.h + src/libnetdata/sanitizers/sanitizers-labels.c + src/libnetdata/sanitizers/sanitizers-labels.h + src/libnetdata/sanitizers/sanitizers-functions.c + src/libnetdata/sanitizers/sanitizers-functions.h + src/libnetdata/sanitizers/sanitizers-pluginsd.c + src/libnetdata/sanitizers/sanitizers-pluginsd.h + src/libnetdata/log/nd_log-internals.c + src/libnetdata/log/nd_log-internals.h + src/libnetdata/log/nd_log_limit.c + src/libnetdata/log/nd_log_limit.h + src/libnetdata/log/nd_log-config.c + src/libnetdata/log/nd_log-init.c + src/libnetdata/log/nd_log-to-syslog.c + src/libnetdata/log/nd_log-to-systemd-journal.c + src/libnetdata/log/nd_log-annotators.c + src/libnetdata/log/nd_log-field-formatters.c + src/libnetdata/log/nd_log-format-logfmt.c + src/libnetdata/log/nd_log-format-json.c + src/libnetdata/log/nd_log-to-file.c + src/libnetdata/log/nd_log-to-windows-events.c + src/libnetdata/string/utf8.c + src/libnetdata/spawn_server/log-forwarder.c + src/libnetdata/spawn_server/log-forwarder.h + src/libnetdata/log/nd_log-common.h + src/libnetdata/log/nd_log-to-windows-common.h + src/libnetdata/common.h + src/libnetdata/xxHash/xxhash.h + src/libnetdata/os/random.c + src/libnetdata/os/random.h ) if(ENABLE_PLUGIN_EBPF) @@ -856,18 +973,19 @@ set(LIBH2O_FILES set(DAEMON_FILES src/daemon/buildinfo.c src/daemon/buildinfo.h - src/daemon/common.c + src/daemon/h2o-common.c src/daemon/common.h src/daemon/daemon.c src/daemon/daemon.h - src/daemon/event_loop.c - src/daemon/event_loop.h + src/daemon/libuv_workers.c + src/daemon/libuv_workers.h src/daemon/global_statistics.c src/daemon/global_statistics.h src/daemon/analytics.c src/daemon/analytics.h src/daemon/main.c src/daemon/main.h + src/daemon/environment.c src/daemon/win_system-info.c src/daemon/win_system-info.h src/daemon/signals.c @@ -899,7 +1017,7 @@ set(H2O_FILES src/web/server/h2o/http_server.h src/web/server/h2o/h2o_utils.c src/web/server/h2o/h2o_utils.h - src/web/server/h2o/streaming.c + src/web/server/h2o/rrdpush.c src/web/server/h2o/streaming.h src/web/server/h2o/connlist.c src/web/server/h2o/connlist.h @@ -916,16 +1034,65 @@ set(API_PLUGIN_FILES src/web/api/web_api_v1.h src/web/api/web_api_v2.c src/web/api/web_api_v2.h + src/web/api/web_api_v3.c + src/web/api/web_api_v3.h src/web/api/http_auth.c src/web/api/http_auth.h src/web/api/http_header.c src/web/api/http_header.h - src/web/api/badges/web_buffer_svg.c - src/web/api/badges/web_buffer_svg.h - src/web/api/exporters/allmetrics.c - src/web/api/exporters/allmetrics.h - src/web/api/exporters/shell/allmetrics_shell.c - src/web/api/exporters/shell/allmetrics_shell.h + src/web/api/maps/rrdr_options.c + src/web/api/maps/rrdr_options.h + src/web/api/maps/contexts_options.c + src/web/api/maps/contexts_options.h + src/web/api/maps/datasource_formats.c + src/web/api/maps/datasource_formats.h + src/web/api/maps/maps.h + src/web/api/maps/contexts_alert_statuses.c + src/web/api/maps/contexts_alert_statuses.h + src/web/api/v1/api_v1_allmetrics.c + src/web/api/v1/api_v1_badge/web_buffer_svg.c + src/web/api/v1/api_v1_function.c + src/web/api/v1/api_v1_manage.c + src/web/api/v1/api_v1_calls.h + src/web/api/v1/api_v1_dbengine.c + src/web/api/v1/api_v1_config.c + src/web/api/v1/api_v1_functions.c + src/web/api/v1/api_v1_weights.c + src/web/api/v1/api_v1_info.c + src/web/api/v1/api_v1_registry.c + src/web/api/v1/api_v1_data.c + src/web/api/v1/api_v1_contexts.c + src/web/api/v1/api_v1_ml_info.c + src/web/api/v1/api_v1_aclk.c + src/web/api/v1/api_v1_context.c + src/web/api/v1/api_v1_alarms.c + src/web/api/v1/api_v1_charts.c + src/web/api/v2/api_v2_info.c + src/web/api/v2/api_v2_nodes.c + src/web/api/v2/api_v2_node_instances.c + src/web/api/v2/api_v2_q.c + src/web/api/v2/api_v2_versions.c + src/web/api/v2/api_v2_functions.c + src/web/api/v2/api_v2_alerts.c + src/web/api/v2/api_v2_alert_transitions.c + src/web/api/v2/api_v2_ilove/ilove.c + src/web/api/v2/api_v2_bearer.c + src/web/api/v2/api_v2_calls.h + src/web/api/v2/api_v2_data.c + src/web/api/v2/api_v2_progress.c + src/web/api/v2/api_v2_weights.c + src/web/api/v2/api_v2_alert_config.c + src/web/api/v2/api_v2_contexts.c + src/web/api/v2/api_v2_claim.c + src/web/api/v2/api_v2_webrtc.c + src/web/api/v3/api_v3_calls.h + src/web/api/v3/api_v3_settings.c + src/web/api/functions/functions.c + src/web/api/functions/functions.h + src/web/api/functions/function-progress.c + src/web/api/functions/function-progress.h + src/web/api/functions/function-streaming.c + src/web/api/functions/function-streaming.h src/web/api/queries/rrdr.c src/web/api/queries/rrdr.h src/web/api/queries/query.c @@ -972,10 +1139,11 @@ set(API_PLUGIN_FILES src/web/api/formatters/charts2json.h src/web/api/formatters/rrdset2json.c src/web/api/formatters/rrdset2json.h - src/web/api/ilove/ilove.c - src/web/api/ilove/ilove.h src/web/rtc/webrtc.c src/web/rtc/webrtc.h + src/web/api/functions/function-bearer_get_token.c + src/web/api/functions/function-bearer_get_token.h + src/web/api/v3/api_v3_me.c ) set(EXPORTING_ENGINE_FILES @@ -1040,34 +1208,48 @@ endif() set(INTERNAL_COLLECTORS_FILES src/collectors/common-contexts/common-contexts.h - src/collectors/common-contexts/disk.io.h - src/collectors/common-contexts/system.io.h - src/collectors/common-contexts/system.interrupts.h - src/collectors/common-contexts/system.processes.h - src/collectors/common-contexts/system.ram.h - src/collectors/common-contexts/mem.swap.h - src/collectors/common-contexts/mem.pgfaults.h - src/collectors/common-contexts/mem.available.h + src/collectors/common-contexts/disk-await.h + src/collectors/common-contexts/disk-avgsz.h + src/collectors/common-contexts/disk-busy.h + src/collectors/common-contexts/disk-io.h + src/collectors/common-contexts/disk-iotime.h + src/collectors/common-contexts/disk-ops.h + src/collectors/common-contexts/disk-qops.h + src/collectors/common-contexts/disk-svctm.h + src/collectors/common-contexts/disk-util.h + src/collectors/common-contexts/system-io.h + src/collectors/common-contexts/system-interrupts.h + src/collectors/common-contexts/system-processes.h + src/collectors/common-contexts/system-ram.h + src/collectors/common-contexts/mem-swap.h + src/collectors/common-contexts/mem-pgfaults.h + src/collectors/common-contexts/mem-available.h ) set(PLUGINSD_PLUGIN_FILES - src/collectors/plugins.d/plugins_d.c - src/collectors/plugins.d/plugins_d.h - src/collectors/plugins.d/pluginsd_dyncfg.c - src/collectors/plugins.d/pluginsd_dyncfg.h - src/collectors/plugins.d/pluginsd_functions.c - src/collectors/plugins.d/pluginsd_functions.h - src/collectors/plugins.d/pluginsd_internals.c - src/collectors/plugins.d/pluginsd_internals.h - src/collectors/plugins.d/pluginsd_parser.c - src/collectors/plugins.d/pluginsd_parser.h - src/collectors/plugins.d/pluginsd_replication.c - src/collectors/plugins.d/pluginsd_replication.h + src/plugins.d/plugins_d.c + src/plugins.d/plugins_d.h + src/plugins.d/pluginsd_dyncfg.c + src/plugins.d/pluginsd_dyncfg.h + src/plugins.d/pluginsd_functions.c + src/plugins.d/pluginsd_functions.h + src/plugins.d/pluginsd_internals.c + src/plugins.d/pluginsd_internals.h + src/plugins.d/pluginsd_parser.c + src/plugins.d/pluginsd_parser.h + src/plugins.d/pluginsd_replication.c + src/plugins.d/pluginsd_replication.h ) set(RRD_PLUGIN_FILES - src/database/contexts/api_v1.c - src/database/contexts/api_v2.c + src/database/contexts/api_v1_contexts.c + src/database/contexts/api_v2_contexts.c + src/database/contexts/api_v2_contexts.h + src/database/contexts/api_v2_contexts_agents.c + src/database/contexts/api_v2_contexts_alerts.c + src/database/contexts/api_v2_contexts_alerts.h + src/database/contexts/api_v2_contexts_alert_transitions.c + src/database/contexts/api_v2_contexts_alert_config.c src/database/contexts/context.c src/database/contexts/instance.c src/database/contexts/internal.h @@ -1084,10 +1266,6 @@ set(RRD_PLUGIN_FILES src/database/rrdfunctions.h src/database/rrdfunctions-inline.c src/database/rrdfunctions-inline.h - src/database/rrdfunctions-progress.c - src/database/rrdfunctions-progress.h - src/database/rrdfunctions-streaming.c - src/database/rrdfunctions-streaming.h src/database/rrdhost.c src/database/rrdlabels.c src/database/rrd.c @@ -1188,29 +1366,54 @@ set(SYSTEMD_JOURNAL_PLUGIN_FILES src/collectors/systemd-journal.plugin/systemd-journal-fstat.c src/collectors/systemd-journal.plugin/systemd-journal-watcher.c src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c - src/libnetdata/maps/system-users.h - src/libnetdata/maps/system-groups.h - src/libnetdata/maps/system-services.h + src/libnetdata/os/system-maps/system-services.h + src/collectors/systemd-journal.plugin/systemd-journal-sampling.h ) set(STREAMING_PLUGIN_FILES - src/streaming/rrdpush.c src/streaming/rrdpush.h - src/streaming/compression.c - src/streaming/compression.h - src/streaming/compression_brotli.c - src/streaming/compression_brotli.h - src/streaming/compression_gzip.c - src/streaming/compression_gzip.h - src/streaming/compression_lz4.c - src/streaming/compression_lz4.h - src/streaming/compression_zstd.c - src/streaming/compression_zstd.h + src/streaming/stream-compression/compression.c + src/streaming/stream-compression/compression.h + src/streaming/stream-compression/brotli.c + src/streaming/stream-compression/brotli.h + src/streaming/stream-compression/gzip.c + src/streaming/stream-compression/gzip.h + src/streaming/stream-compression/lz4.c + src/streaming/stream-compression/lz4.h + src/streaming/stream-compression/zstd.c + src/streaming/stream-compression/zstd.h src/streaming/receiver.c src/streaming/sender.c src/streaming/replication.c src/streaming/replication.h - src/streaming/common.h + src/streaming/h2o-common.h + src/streaming/protocol/command-nodeid.c + src/streaming/protocol/commands.c + src/streaming/protocol/commands.h + src/streaming/protocol/command-claimed_id.c + src/streaming/stream-path.c + src/streaming/stream-path.h + src/streaming/stream-capabilities.c + src/streaming/stream-capabilities.h + src/streaming/sender-connect.c + src/streaming/sender-internals.h + src/streaming/sender-execute.c + src/streaming/sender-commit.c + src/streaming/sender-destinations.c + src/streaming/stream-handshake.c + src/streaming/protocol/command-function.c + src/streaming/protocol/command-host-labels.c + src/streaming/protocol/command-chart-definition.c + src/streaming/protocol/command-begin-set-end.c + src/streaming/protocol/command-host-variables.c + src/streaming/stream-conf.c + src/streaming/stream-conf.h + src/streaming/stream-handshake.h + src/streaming/sender.h + src/streaming/sender-destinations.h + src/streaming/rrdhost-status.c + src/streaming/rrdhost-status.h + src/streaming/receiver.h ) set(WEB_PLUGIN_FILES @@ -1227,10 +1430,22 @@ set(WEB_PLUGIN_FILES set(CLAIM_PLUGIN_FILES src/claim/claim.c src/claim/claim.h + src/claim/claim_id.c + src/claim/claim_id.h + src/claim/cloud-conf.c + src/claim/claim-with-api.c + src/claim/cloud-status.c + src/claim/cloud-status.h +) + +set(CLAIM_WINDOWS_FILES + src/claim/main.c + src/claim/main.h + src/claim/ui.c + src/claim/ui.h ) set(ACLK_ALWAYS_BUILD - src/aclk/aclk_rrdhost_state.h src/aclk/aclk_proxy.c src/aclk/aclk_proxy.h src/aclk/aclk.c @@ -1241,12 +1456,9 @@ set(ACLK_ALWAYS_BUILD src/aclk/aclk_util.h src/aclk/https_client.c src/aclk/https_client.h - src/aclk/mqtt_websockets/c-rbuf/cringbuffer.c - src/aclk/mqtt_websockets/c-rbuf/cringbuffer.h - src/aclk/mqtt_websockets/c-rbuf/cringbuffer_internal.h - src/aclk/mqtt_websockets/c_rhash/c_rhash.c - src/aclk/mqtt_websockets/c_rhash/c_rhash.h - src/aclk/mqtt_websockets/c_rhash/c_rhash_internal.h + src/libnetdata/c_rhash/c_rhash.c + src/libnetdata/c_rhash/c_rhash.h + src/libnetdata/c_rhash/c_rhash_internal.h ) set(TIMEX_PLUGIN_FILES @@ -1291,24 +1503,45 @@ set(FREEBSD_PLUGIN_FILES src/collectors/proc.plugin/zfs_common.h ) +set(WINDOWS_EVENTS_PLUGIN_FILES + src/collectors/windows-events.plugin/windows-events.c + src/collectors/windows-events.plugin/windows-events.h + src/collectors/windows-events.plugin/windows-events-query.h + src/collectors/windows-events.plugin/windows-events-query.c + src/collectors/windows-events.plugin/windows-events-sources.c + src/collectors/windows-events.plugin/windows-events-sources.h + src/collectors/windows-events.plugin/windows-events-unicode.c + src/collectors/windows-events.plugin/windows-events-unicode.h + src/collectors/windows-events.plugin/windows-events-xml.c + src/collectors/windows-events.plugin/windows-events-xml.h + src/collectors/windows-events.plugin/windows-events-providers.c + src/collectors/windows-events.plugin/windows-events-providers.h + src/collectors/windows-events.plugin/windows-events-fields-cache.c + src/collectors/windows-events.plugin/windows-events-fields-cache.h + src/collectors/windows-events.plugin/windows-events-query-builder.c + src/collectors/windows-events.plugin/windows-events-query-builder.h + src/collectors/windows-events.plugin/windows-events-query-evt-variant.c +) + set(WINDOWS_PLUGIN_FILES src/collectors/windows.plugin/windows_plugin.c src/collectors/windows.plugin/windows_plugin.h src/collectors/windows.plugin/GetSystemUptime.c src/collectors/windows.plugin/GetSystemRAM.c src/collectors/windows.plugin/GetSystemCPU.c - src/collectors/windows.plugin/perflib.c - src/collectors/windows.plugin/perflib.h src/collectors/windows.plugin/perflib-rrd.c src/collectors/windows.plugin/perflib-rrd.h - src/collectors/windows.plugin/perflib-names.c - src/collectors/windows.plugin/perflib-dump.c + src/collectors/windows.plugin/perflib-mssql.c src/collectors/windows.plugin/perflib-storage.c src/collectors/windows.plugin/perflib-processor.c + src/collectors/windows.plugin/perflib-thermalzone.c src/collectors/windows.plugin/perflib-objects.c src/collectors/windows.plugin/perflib-network.c + src/collectors/windows.plugin/perflib-netframework.c src/collectors/windows.plugin/perflib-memory.c src/collectors/windows.plugin/perflib-processes.c + src/collectors/windows.plugin/perflib-web-service.c + src/collectors/windows.plugin/perflib-hyperv.c ) set(PROC_PLUGIN_FILES @@ -1424,8 +1657,6 @@ endif() set(MQTT_WEBSOCKETS_FILES src/aclk/mqtt_websockets/mqtt_wss_client.c src/aclk/mqtt_websockets/mqtt_wss_client.h - src/aclk/mqtt_websockets/mqtt_wss_log.c - src/aclk/mqtt_websockets/mqtt_wss_log.h src/aclk/mqtt_websockets/ws_client.c src/aclk/mqtt_websockets/ws_client.h src/aclk/mqtt_websockets/mqtt_ng.c @@ -1450,8 +1681,6 @@ set(ACLK_PROTO_DEFS ) set(ACLK_FILES - src/aclk/aclk_stats.c - src/aclk/aclk_stats.h src/aclk/aclk_query.c src/aclk/aclk_query.h src/aclk/aclk_query_queue.c @@ -1491,8 +1720,6 @@ set(ACLK_FILES src/aclk/schema-wrappers/schema_wrapper_utils.h src/aclk/schema-wrappers/agent_cmds.cc src/aclk/schema-wrappers/agent_cmds.h - src/aclk/helpers/mqtt_wss_pal.h - src/aclk/helpers/ringbuffer_pal.h ) @@ -1596,10 +1823,90 @@ target_include_directories(libnetdata BEFORE PUBLIC ${CONFIG_H_DIR} ${CMAKE_SOUR target_link_libraries(libnetdata PUBLIC "$<$>:atomic>" "$<$,$>:pthread;rt>" - "$<$:kernel32;advapi32;winmm;rpcrt4>" + "$<$:kernel32;advapi32;winmm;rpcrt4;wevtapi;ole32;oleaut32;wbemuuid>" "$<$:m>" "${SYSTEMD_LDFLAGS}") +if(OS_WINDOWS) + set(HAVE_ETW True) + set(HAVE_WEL True) + + # Output the results for debugging purposes + message(STATUS "Have Event Tracing for Windows (ETW): ${HAVE_ETW}") + message(STATUS "Have Windows Event Log (WEL): ${HAVE_WEL}") + + if(HAVE_WEL OR HAVE_ETW) + # Define the source and generated file paths + set(WEVT_GEN_SRC_H_FILE "${CMAKE_SOURCE_DIR}/src/libnetdata/log/nd_log-to-windows-common.h") + set(WEVT_GEN_SRC_C_FILE "${CMAKE_SOURCE_DIR}/src/libnetdata/log/wevt_netdata_mc_generate.c") + set(WEVT_GEN_BIN_FILE "${CMAKE_BINARY_DIR}/wevt_netdata_mc_generate") + + set(WEVT_BUILD_SCRIPT "${CMAKE_SOURCE_DIR}/src/libnetdata/log/wevt_netdata_compile.sh") + + set(WEVT_MC_FILE "${CMAKE_BINARY_DIR}/wevt_netdata.mc") + set(WEVT_MAN_FILE "${CMAKE_BINARY_DIR}/wevt_netdata_manifest.xml") + set(WEVT_RC_FILE "${CMAKE_BINARY_DIR}/wevt_netdata.rc") + set(WEVT_MC_H_FILE "${CMAKE_BINARY_DIR}/wevt_netdata.h") + set(WEVT_MAN_H_FILE "${CMAKE_BINARY_DIR}/wevt_netdata_manifest.h") + set(WEVT_RES_OBJECT "${CMAKE_BINARY_DIR}/wevt_netdata_res.o") + + set(WEVT_DLL_FILE "${CMAKE_BINARY_DIR}/wevt_netdata.dll") + set(WEVT_ETW_INSTALL_SCRIPT "${CMAKE_SOURCE_DIR}/src/libnetdata/log/wevt_netdata_install.bat") + + # we compile ${WEVT_GEN_BIN_FILE}, which generates the manifest, the .mc, + # and the headers required for compiling libnetdata/logs + + if(HAVE_ETW) + # ETW method also supports WEL + # but it requires Microsoft tools mc, rc, and link + add_custom_command( + OUTPUT "${WEVT_MC_H_FILE}" "${WEVT_MAN_H_FILE}" "${WEVT_DLL_FILE}" + COMMAND "${CMAKE_C_COMPILER}" -o "${WEVT_GEN_BIN_FILE}" "${WEVT_GEN_SRC_C_FILE}" + COMMAND "${WEVT_GEN_BIN_FILE}" >"${WEVT_MC_FILE}" + COMMAND "${WEVT_GEN_BIN_FILE}" --manifest >"${WEVT_MAN_FILE}" + COMMAND "${WEVT_BUILD_SCRIPT}" "${CMAKE_SOURCE_DIR}/src/libnetdata/log" "${CMAKE_BINARY_DIR}" + DEPENDS "${WEVT_GEN_SRC_C_FILE}" "${WEVT_GEN_SRC_H_FILE}" + COMMENT "Compiling ${WEVT_MC_FILE} to generate ${WEVT_MC_H_FILE} and ${WEVT_DLL_FILE}" + ) + else() + # WEL method can be built with windmc, windres and the normal linker + add_custom_command( + OUTPUT "${WEVT_MC_H_FILE}" "${WEVT_DLL_FILE}" + COMMAND "${CMAKE_C_COMPILER}" -o "${WEVT_GEN_BIN_FILE}" "${WEVT_GEN_SRC_C_FILE}" + COMMAND "${WEVT_GEN_BIN_FILE}" >"${WEVT_MC_FILE}" + COMMAND "${WEVT_GEN_BIN_FILE}" --manifest >"${WEVT_MAN_FILE}" + COMMAND windmc -r "${CMAKE_BINARY_DIR}" -h "${CMAKE_BINARY_DIR}" ${WEVT_MC_FILE} + COMMAND echo "1 2004" "wevt_netdata_manifest.xml" >> "${WEVT_RC_FILE}" + COMMAND windres ${WEVT_RC_FILE} -o ${WEVT_RES_OBJECT} + COMMAND ${CMAKE_LINKER} -dll --entry 0 -nostdlib -o ${WEVT_DLL_FILE} ${WEVT_RES_OBJECT} + DEPENDS "${WEVT_GEN_SRC_C_FILE}" "${WEVT_GEN_SRC_H_FILE}" + COMMENT "Compiling ${WEVT_MC_FILE} to generate ${WEVT_MC_H_FILE} and ${WEVT_DLL_FILE}" + ) + endif() + + # Create a custom target for the DLL + add_custom_target(wevt_netdata ALL DEPENDS ${WEVT_DLL_FILE}) + + set_source_files_properties(src/libnetdata/log/nd_log-to-windows-events.c PROPERTIES + OBJECT_DEPENDS "${WEVT_MC_H_FILE}") + + if(HAVE_ETW) + set_source_files_properties(src/libnetdata/log/nd_log-to-windows-events.c PROPERTIES + OBJECT_DEPENDS "${WEVT_MAN_H_FILE}") + + install(FILES "${WEVT_DLL_FILE}" "${WEVT_MAN_FILE}" "${WEVT_ETW_INSTALL_SCRIPT}" + COMPONENT wevt_netdata_dll + DESTINATION "${BINDIR}") + else() + # do not install the manifest in this case + # the nsi installer will skip registering the ETW publisher + install(FILES "${WEVT_DLL_FILE}" + COMPONENT wevt_netdata_dll + DESTINATION "${BINDIR}") + endif() + endif() +endif() + # ebpf if(ENABLE_PLUGIN_EBPF) netdata_add_libbpf_to_target(libnetdata) @@ -1690,10 +1997,7 @@ endif() # # mqtt library # -if (ENABLE_H2O OR ENABLE_ACLK) - set(ENABLE_MQTTWEBSOCKETS True) -endif() - +set(ENABLE_MQTTWEBSOCKETS True) if(ENABLE_MQTTWEBSOCKETS) add_library(mqttwebsockets STATIC ${MQTT_WEBSOCKETS_FILES}) @@ -1708,20 +2012,17 @@ if(ENABLE_MQTTWEBSOCKETS) endif() -if(ENABLE_ACLK) - # - # proto definitions - # - netdata_protoc_generate_cpp("${CMAKE_SOURCE_DIR}/src/aclk/aclk-schemas" - "${CMAKE_SOURCE_DIR}/src/aclk/aclk-schemas" - ACLK_PROTO_BUILT_SRCS - ACLK_PROTO_BUILT_HDRS - ${ACLK_PROTO_DEFS}) - - list(APPEND ACLK_FILES ${ACLK_PROTO_BUILT_SRCS} - ${ACLK_PROTO_BUILT_HDRS}) +# +# proto definitions +# +netdata_protoc_generate_cpp("${CMAKE_SOURCE_DIR}/src/aclk/aclk-schemas" + "${CMAKE_SOURCE_DIR}/src/aclk/aclk-schemas" + ACLK_PROTO_BUILT_SRCS + ACLK_PROTO_BUILT_HDRS + ${ACLK_PROTO_DEFS}) -endif() +list(APPEND ACLK_FILES ${ACLK_PROTO_BUILT_SRCS} + ${ACLK_PROTO_BUILT_HDRS}) # # build plugins @@ -1753,6 +2054,9 @@ if(ENABLE_PLUGIN_DEBUGFS) endif() endif() +add_executable(spawn-tester src/libnetdata/spawn_server/spawn-tester.c) +target_link_libraries(spawn-tester libnetdata) + if(ENABLE_PLUGIN_APPS) pkg_check_modules(CAP QUIET libcap) @@ -1761,21 +2065,24 @@ if(ENABLE_PLUGIN_APPS) src/collectors/apps.plugin/apps_plugin.h src/collectors/apps.plugin/apps_functions.c src/collectors/apps.plugin/apps_targets.c - src/collectors/apps.plugin/apps_users_and_groups.c src/collectors/apps.plugin/apps_output.c - src/collectors/apps.plugin/apps_proc_pid_status.c - src/collectors/apps.plugin/apps_proc_pid_limits.c - src/collectors/apps.plugin/apps_proc_pid_stat.c - src/collectors/apps.plugin/apps_proc_pid_cmdline.c - src/collectors/apps.plugin/apps_proc_pid_io.c - src/collectors/apps.plugin/apps_proc_stat.c - src/collectors/apps.plugin/apps_proc_pid_fd.c - src/collectors/apps.plugin/apps_proc_pids.c - src/collectors/apps.plugin/apps_proc_meminfo.c + src/collectors/apps.plugin/apps_pid_files.c + src/collectors/apps.plugin/apps_pid.c + src/collectors/apps.plugin/apps_aggregations.c + src/collectors/apps.plugin/apps_os_linux.c + src/collectors/apps.plugin/apps_os_freebsd.c + src/collectors/apps.plugin/apps_os_macos.c + src/collectors/apps.plugin/apps_os_windows.c + src/collectors/apps.plugin/apps_incremental_collection.c + src/collectors/apps.plugin/apps_os_windows_nt.c + src/collectors/apps.plugin/apps_pid_match.c ) add_executable(apps.plugin ${APPS_PLUGIN_FILES}) - target_link_libraries(apps.plugin libnetdata ${CAP_LIBRARIES}) + + target_link_libraries(apps.plugin libnetdata ${CAP_LIBRARIES} + "$<$:Version;ntdll>") + target_include_directories(apps.plugin PRIVATE ${CAP_INCLUDE_DIRS}) target_compile_options(apps.plugin PRIVATE ${CAP_CFLAGS_OTHER}) @@ -1948,7 +2255,7 @@ if(ENABLE_PLUGIN_CUPS) endif() if(NEED_NDSUDO) - set(NDSUDO_FILES src/collectors/plugins.d/ndsudo.c) + set(NDSUDO_FILES src/collectors/utils/ndsudo.c) add_executable(ndsudo ${NDSUDO_FILES}) @@ -1988,6 +2295,15 @@ if(ENABLE_PLUGIN_SYSTEMD_JOURNAL) endif() endif() +if(OS_WINDOWS) + add_executable(windows-events.plugin ${WINDOWS_EVENTS_PLUGIN_FILES}) + target_link_libraries(windows-events.plugin libnetdata wevtapi) + + install(TARGETS windows-events.plugin + COMPONENT plugin-windows-events + DESTINATION usr/libexec/netdata/plugins.d) +endif() + if(ENABLE_PLUGIN_EBPF) set(EBPF_PLUGIN_FILES src/collectors/ebpf.plugin/ebpf.c @@ -2083,8 +2399,8 @@ endif() if(ENABLE_PLUGIN_LOCAL_LISTENERS) set(LOCAL_LISTENERS_FILES - src/collectors/plugins.d/local_listeners.c - src/libnetdata/maps/local-sockets.h + src/collectors/utils/local_listeners.c + src/libnetdata/local-sockets/local-sockets.h ) add_executable(local-listeners ${LOCAL_LISTENERS_FILES}) @@ -2103,8 +2419,7 @@ endif() if(ENABLE_PLUGIN_NETWORK_VIEWER) set(NETWORK_VIEWER_FILES - src/libnetdata/maps/local-sockets.h - src/libnetdata/maps/system-users.h + src/libnetdata/local-sockets/local-sockets.h src/collectors/network-viewer.plugin/network-viewer.c ) @@ -2175,14 +2490,39 @@ endif() # build netdata (only Linux ATM) # +if(OS_WINDOWS) + set(NETDATA_CLAIM_RES_FILES "packaging/windows/resources/netdata_claim.rc") + configure_file(packaging/windows/resources/netdata_claim.manifest.in ${CMAKE_SOURCE_DIR}/packaging/windows/resources/netdata_claim.manifest @ONLY) + + set(NETDATACLI_RES_FILES "packaging/windows/resources/netdatacli.rc") + configure_file(packaging/windows/resources/netdatacli.manifest.in ${CMAKE_SOURCE_DIR}/packaging/windows/resources/netdatacli.manifest @ONLY) + + set(NETDATA_RES_FILES "packaging/windows/resources/netdata.rc") + configure_file(packaging/windows/resources/netdata.manifest.in ${CMAKE_SOURCE_DIR}/packaging/windows/resources/netdata.manifest @ONLY) + + configure_file(packaging/windows/netdata.wxs.in netdata.wxs @ONLY) + configure_file(packaging/windows/NetdataWhite.ico NetdataWhite.ico COPYONLY) + configure_file(packaging/windows/gpl-3.0.rtf gpl-3.0.rtf COPYONLY) + configure_file(packaging/windows/ncul1.rtf ncul1.rtf COPYONLY) + configure_file(packaging/windows/Top.bmp Top.bmp COPYONLY) + configure_file(packaging/windows/BackGround.bmp BackGround.bmp COPYONLY) +endif() + add_executable(netdata ${NETDATA_FILES} - "$<$:${ACLK_FILES}>" + "${ACLK_FILES}" "$<$:${H2O_FILES}>" "$<$:${MONGODB_EXPORTING_FILES}>" "$<$:${PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES}>" + "$<$:${NETDATA_RES_FILES}>" ) +if(OS_WINDOWS) + add_executable(NetdataClaim ${CLAIM_WINDOWS_FILES} ${NETDATA_CLAIM_RES_FILES}) + target_link_libraries(NetdataClaim shell32 gdi32 msftedit) + target_compile_options(NetdataClaim PUBLIC -mwindows) +endif() + target_compile_definitions(netdata PRIVATE "$<$:DLIB_NO_GUI_SUPPORT>" ) @@ -2193,7 +2533,7 @@ target_compile_options(netdata PRIVATE ) target_include_directories(netdata PRIVATE - "$<$:${CMAKE_SOURCE_DIR}/src/aclk/aclk-schemas>" + "${CMAKE_SOURCE_DIR}/src/aclk/aclk-schemas" "$<$:${MONGOC_INCLUDE_DIRS}>" "$<$:${SNAPPY_INCLUDE_DIRS}>" ) @@ -2209,6 +2549,7 @@ target_link_libraries(netdata PRIVATE "$<$:sentry>" "$<$:LibDataChannel::LibDataChannelStatic>" "$<$:h2o>" + "$<$:PkgConfig::CURL>" ) if(NEED_PROTOBUF) @@ -2222,7 +2563,10 @@ set(SYSTEMD_CAT_NATIVE_FILES src/libnetdata/log/systemd-cat-native.c src/libnetdata/log/systemd-cat-native.h) add_executable(systemd-cat-native ${SYSTEMD_CAT_NATIVE_FILES}) -target_link_libraries(systemd-cat-native libnetdata) +target_link_libraries(systemd-cat-native + libnetdata + "$<$:PkgConfig::CURL>" +) install(TARGETS systemd-cat-native COMPONENT netdata @@ -2250,13 +2594,16 @@ if(PCRE2_FOUND) src/collectors/log2journal/log2journal-replace.c src/collectors/log2journal/log2journal-rename.c src/collectors/log2journal/log2journal-rewrite.c + src/collectors/log2journal/log2journal-txt.h + src/collectors/log2journal/log2journal-hashed-key.h ) add_executable(log2journal ${LOG2JOURNAL_FILES}) target_include_directories(log2journal BEFORE PUBLIC ${CONFIG_H_DIR} ${CMAKE_SOURCE_DIR}/src ${PCRE2_INCLUDE_DIRS}) target_compile_options(log2journal PUBLIC ${PCRE2_CFLAGS_OTHER}) - target_link_libraries(log2journal PUBLIC "${PCRE2_LDFLAGS}") + target_link_libraries(log2journal PUBLIC libnetdata) + target_link_libraries(log2journal PUBLIC "${PCRE2_LDFLAGS}") netdata_add_libyaml_to_target(log2journal) install(TARGETS log2journal @@ -2279,7 +2626,7 @@ set(NETDATACLI_FILES src/cli/cli.c ) -add_executable(netdatacli ${NETDATACLI_FILES}) +add_executable(netdatacli ${NETDATACLI_FILES} "$<$:${NETDATACLI_RES_FILES}>") target_link_libraries(netdatacli libnetdata) install(TARGETS netdatacli @@ -2342,9 +2689,7 @@ install(DIRECTORY COMPONENT netdata DESTINATION var/lib/netdata/cloud.d) install(DIRECTORY COMPONENT netdata DESTINATION var/run/netdata) install(DIRECTORY COMPONENT netdata DESTINATION etc/netdata) install(DIRECTORY COMPONENT netdata DESTINATION etc/netdata/custom-plugins.d) -install(DIRECTORY COMPONENT netdata DESTINATION etc/netdata/go.d) install(DIRECTORY COMPONENT netdata DESTINATION etc/netdata/health.d) -install(DIRECTORY COMPONENT netdata DESTINATION etc/netdata/python.d) install(DIRECTORY COMPONENT netdata DESTINATION etc/netdata/ssl) install(DIRECTORY COMPONENT netdata DESTINATION etc/netdata/statsd.d) install(DIRECTORY COMPONENT netdata DESTINATION usr/lib/netdata/conf.d) @@ -2362,26 +2707,21 @@ set(cachedir_POST "${NETDATA_RUNTIME_PREFIX}/var/cache/netdata") set(registrydir_POST "${NETDATA_RUNTIME_PREFIX}/var/lib/netdata/registry") set(varlibdir_POST "${NETDATA_RUNTIME_PREFIX}/var/lib/netdata") set(netdata_user_POST "${NETDATA_USER}") +set(netdata_group_POST "${NETDATA_USER}") -# netdata-claim.sh -if(ENABLE_CLOUD) - set(enable_cloud_POST "yes") -else() - set(enable_cloud_POST "no") -endif() - -if(ENABLE_ACLK) - set(enable_aclk_POST "yes") +if(NOT OS_WINDOWS) + configure_file(src/claim/netdata-claim.sh.in src/claim/netdata-claim.sh @ONLY) + install(PROGRAMS + ${CMAKE_BINARY_DIR}/src/claim/netdata-claim.sh + COMPONENT netdata + DESTINATION "${BINDIR}") else() - set(enable_aclk_POST "no") + install(PROGRAMS + ${CMAKE_BINARY_DIR}/NetdataClaim.exe + COMPONENT netdata + DESTINATION "${BINDIR}") endif() -configure_file(src/claim/netdata-claim.sh.in src/claim/netdata-claim.sh @ONLY) -install(PROGRAMS - ${CMAKE_BINARY_DIR}/src/claim/netdata-claim.sh - COMPONENT netdata - DESTINATION "${BINDIR}") - # # We don't check ENABLE_PLUGIN_CGROUP_NETWORK because rpm builds assume # the files exists unconditionally. @@ -2704,64 +3044,66 @@ endif() # charts.d plugin # -install(DIRECTORY COMPONENT plugin-chartsd DESTINATION etc/netdata/charts.d) +if(ENABLE_PLUGIN_CHARTS) + install(DIRECTORY COMPONENT plugin-chartsd DESTINATION etc/netdata/charts.d) -configure_file(src/collectors/charts.d.plugin/charts.d.plugin.in src/collectors/charts.d.plugin/charts.d.plugin @ONLY) -install(PROGRAMS - ${CMAKE_BINARY_DIR}/src/collectors/charts.d.plugin/charts.d.plugin - COMPONENT plugin-chartsd - DESTINATION usr/libexec/netdata/plugins.d) + configure_file(src/collectors/charts.d.plugin/charts.d.plugin.in src/collectors/charts.d.plugin/charts.d.plugin @ONLY) + install(PROGRAMS + ${CMAKE_BINARY_DIR}/src/collectors/charts.d.plugin/charts.d.plugin + COMPONENT plugin-chartsd + DESTINATION usr/libexec/netdata/plugins.d) -install(PROGRAMS - src/collectors/charts.d.plugin/charts.d.dryrun-helper.sh - COMPONENT plugin-chartsd - DESTINATION usr/libexec/netdata/plugins.d) + install(PROGRAMS + src/collectors/charts.d.plugin/charts.d.dryrun-helper.sh + COMPONENT plugin-chartsd + DESTINATION usr/libexec/netdata/plugins.d) -# loopsleepms is used by the tc.plugin -> ship it in the netdata component -install(FILES - src/collectors/charts.d.plugin/loopsleepms.sh.inc - COMPONENT netdata - DESTINATION usr/libexec/netdata/plugins.d) + install(FILES + src/collectors/charts.d.plugin/charts.d.conf + COMPONENT plugin-chartsd + DESTINATION usr/lib/netdata/conf.d) -install(FILES - src/collectors/charts.d.plugin/charts.d.conf - COMPONENT plugin-chartsd - DESTINATION usr/lib/netdata/conf.d) + install(PROGRAMS + src/collectors/charts.d.plugin/example/example.chart.sh + src/collectors/charts.d.plugin/libreswan/libreswan.chart.sh + src/collectors/charts.d.plugin/opensips/opensips.chart.sh + COMPONENT plugin-chartsd + DESTINATION usr/libexec/netdata/charts.d) -install(PROGRAMS - src/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh - src/collectors/charts.d.plugin/example/example.chart.sh - src/collectors/charts.d.plugin/libreswan/libreswan.chart.sh - src/collectors/charts.d.plugin/opensips/opensips.chart.sh - src/collectors/charts.d.plugin/sensors/sensors.chart.sh - COMPONENT plugin-chartsd - DESTINATION usr/libexec/netdata/charts.d) + install(FILES + src/collectors/charts.d.plugin/example/example.conf + src/collectors/charts.d.plugin/libreswan/libreswan.conf + src/collectors/charts.d.plugin/opensips/opensips.conf + COMPONENT plugin-chartsd + DESTINATION usr/lib/netdata/conf.d/charts.d) -install(FILES - src/collectors/charts.d.plugin/apcupsd/apcupsd.conf - src/collectors/charts.d.plugin/example/example.conf - src/collectors/charts.d.plugin/libreswan/libreswan.conf - src/collectors/charts.d.plugin/opensips/opensips.conf - src/collectors/charts.d.plugin/sensors/sensors.conf - COMPONENT plugin-chartsd - DESTINATION usr/lib/netdata/conf.d/charts.d) + if(BUILD_FOR_PACKAGING) + install(FILES + ${PKG_FILES_PATH}/copyright + COMPONENT plugin-chartsd + DESTINATION usr/share/doc/netdata-plugin-chartsd) + endif() +endif() -if(BUILD_FOR_PACKAGING) - install(FILES - ${PKG_FILES_PATH}/copyright - COMPONENT plugin-chartsd - DESTINATION usr/share/doc/netdata-plugin-chartsd) +# This is needed both by the TC plugin (which only gets built on Linux) and the charts plugin. +if(OS_LINUX OR ENABLE_PLUGIN_CHARTS) + install(FILES + src/collectors/charts.d.plugin/loopsleepms.sh.inc + COMPONENT netdata + DESTINATION usr/libexec/netdata/plugins.d) endif() # # tc-qos-helper # -configure_file(src/collectors/tc.plugin/tc-qos-helper.sh.in src/collectors/tc.plugin/tc-qos-helper.sh @ONLY) -install(PROGRAMS - ${CMAKE_BINARY_DIR}/src/collectors/tc.plugin/tc-qos-helper.sh - COMPONENT netdata - DESTINATION usr/libexec/netdata/plugins.d) +if(OS_LINUX) + configure_file(src/collectors/tc.plugin/tc-qos-helper.sh.in src/collectors/tc.plugin/tc-qos-helper.sh @ONLY) + install(PROGRAMS + ${CMAKE_BINARY_DIR}/src/collectors/tc.plugin/tc-qos-helper.sh + COMPONENT netdata + DESTINATION usr/libexec/netdata/plugins.d) +endif() # confs install(FILES @@ -2774,62 +3116,51 @@ install(FILES # python.d plugin # -configure_file(src/collectors/python.d.plugin/python.d.plugin.in src/collectors/python.d.plugin/python.d.plugin @ONLY) -install(PROGRAMS ${CMAKE_BINARY_DIR}/src/collectors/python.d.plugin/python.d.plugin - COMPONENT plugin-pythond - DESTINATION usr/libexec/netdata/plugins.d) +if(ENABLE_PLUGIN_PYTHON) + install(DIRECTORY COMPONENT plugin-pythond DESTINATION etc/netdata/python.d) -install(DIRECTORY src/collectors/python.d.plugin/python_modules - COMPONENT plugin-pythond - DESTINATION usr/libexec/netdata/python.d) + configure_file(src/collectors/python.d.plugin/python.d.plugin.in src/collectors/python.d.plugin/python.d.plugin @ONLY) + install(PROGRAMS ${CMAKE_BINARY_DIR}/src/collectors/python.d.plugin/python.d.plugin + COMPONENT plugin-pythond + DESTINATION usr/libexec/netdata/plugins.d) -install(FILES src/collectors/python.d.plugin/python.d.conf - COMPONENT plugin-pythond - DESTINATION usr/lib/netdata/conf.d) + install(DIRECTORY src/collectors/python.d.plugin/python_modules + COMPONENT plugin-pythond + DESTINATION usr/libexec/netdata/python.d) -install(FILES - src/collectors/python.d.plugin/am2320/am2320.conf - src/collectors/python.d.plugin/anomalies/anomalies.conf - src/collectors/python.d.plugin/boinc/boinc.conf - src/collectors/python.d.plugin/ceph/ceph.conf - src/collectors/python.d.plugin/go_expvar/go_expvar.conf - src/collectors/python.d.plugin/haproxy/haproxy.conf - src/collectors/python.d.plugin/openldap/openldap.conf - src/collectors/python.d.plugin/oracledb/oracledb.conf - src/collectors/python.d.plugin/pandas/pandas.conf - src/collectors/python.d.plugin/samba/samba.conf - src/collectors/python.d.plugin/spigotmc/spigotmc.conf - src/collectors/python.d.plugin/traefik/traefik.conf - src/collectors/python.d.plugin/varnish/varnish.conf - src/collectors/python.d.plugin/w1sensor/w1sensor.conf - src/collectors/python.d.plugin/zscores/zscores.conf - COMPONENT plugin-pythond - DESTINATION usr/lib/netdata/conf.d/python.d) + if(OS_WINDOWS) + include(NetdataUtil) + precompile_python(usr/libexec/netdata/python.d plugin-pythond) + endif() -install(FILES - src/collectors/python.d.plugin/am2320/am2320.chart.py - src/collectors/python.d.plugin/anomalies/anomalies.chart.py - src/collectors/python.d.plugin/boinc/boinc.chart.py - src/collectors/python.d.plugin/ceph/ceph.chart.py - src/collectors/python.d.plugin/go_expvar/go_expvar.chart.py - src/collectors/python.d.plugin/haproxy/haproxy.chart.py - src/collectors/python.d.plugin/openldap/openldap.chart.py - src/collectors/python.d.plugin/oracledb/oracledb.chart.py - src/collectors/python.d.plugin/pandas/pandas.chart.py - src/collectors/python.d.plugin/samba/samba.chart.py - src/collectors/python.d.plugin/spigotmc/spigotmc.chart.py - src/collectors/python.d.plugin/traefik/traefik.chart.py - src/collectors/python.d.plugin/varnish/varnish.chart.py - src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py - src/collectors/python.d.plugin/zscores/zscores.chart.py - COMPONENT plugin-pythond - DESTINATION usr/libexec/netdata/python.d) + install(FILES src/collectors/python.d.plugin/python.d.conf + COMPONENT plugin-pythond + DESTINATION usr/lib/netdata/conf.d) -if(BUILD_FOR_PACKAGING) - install(FILES - ${PKG_FILES_PATH}/copyright - COMPONENT plugin-pythond - DESTINATION usr/share/doc/netdata-plugin-pythond) + install(FILES + src/collectors/python.d.plugin/am2320/am2320.conf + src/collectors/python.d.plugin/go_expvar/go_expvar.conf + src/collectors/python.d.plugin/haproxy/haproxy.conf + src/collectors/python.d.plugin/pandas/pandas.conf + src/collectors/python.d.plugin/traefik/traefik.conf + COMPONENT plugin-pythond + DESTINATION usr/lib/netdata/conf.d/python.d) + + install(FILES + src/collectors/python.d.plugin/am2320/am2320.chart.py + src/collectors/python.d.plugin/go_expvar/go_expvar.chart.py + src/collectors/python.d.plugin/haproxy/haproxy.chart.py + src/collectors/python.d.plugin/pandas/pandas.chart.py + src/collectors/python.d.plugin/traefik/traefik.chart.py + COMPONENT plugin-pythond + DESTINATION usr/libexec/netdata/python.d) + + if(BUILD_FOR_PACKAGING) + install(FILES + ${PKG_FILES_PATH}/copyright + COMPONENT plugin-pythond + DESTINATION usr/share/doc/netdata-plugin-pythond) + endif() endif() # @@ -2845,6 +3176,8 @@ install(PROGRAMS ${CMAKE_BINARY_DIR}/src/collectors/ioping.plugin/ioping.plugin # go.d.plugin # if(ENABLE_PLUGIN_GO) + install(DIRECTORY COMPONENT plugin-go DESTINATION etc/netdata/go.d) + install(FILES src/go/plugin/go.d/config/go.d.conf COMPONENT plugin-go DESTINATION usr/lib/netdata/conf.d) @@ -2877,129 +3210,76 @@ endif() # dashboard # -include(src/web/gui/v1/dashboard_v1.cmake) -include(src/web/gui/v2/dashboard_v2.cmake) -include(src/web/gui/gui.cmake) +if(ENABLE_DASHBOARD) + include(NetdataDashboard) + bundle_dashboard() -function(cat IN_FILE OUT_FILE) - file(READ ${IN_FILE} CONTENTS) - file(APPEND ${OUT_FILE} "${CONTENTS}") -endfunction() + include(src/web/gui/v1/dashboard_v1.cmake) + include(src/web/gui/gui.cmake) -file(WRITE ${CMAKE_BINARY_DIR}/src/web/gui/dashboard.js.in "") -foreach(JS_FILE ${DASHBOARD_JS_FILES}) - cat(${JS_FILE} ${CMAKE_BINARY_DIR}/dashboard.js.in) -endforeach() -configure_file(${CMAKE_BINARY_DIR}/dashboard.js.in - ${CMAKE_BINARY_DIR}/dashboard.js COPYONLY) + function(cat IN_FILE OUT_FILE) + file(READ ${IN_FILE} CONTENTS) + file(APPEND ${OUT_FILE} "${CONTENTS}") + endfunction() -install(FILES - ${CMAKE_BINARY_DIR}/dashboard.js - COMPONENT netdata - DESTINATION ${WEB_DEST}) + file(WRITE ${CMAKE_BINARY_DIR}/src/web/gui/dashboard.js.in "") -install(FILES - src/web/gui/dashboard_info_custom_example.js - src/web/gui/dashboard_info.js - src/web/gui/index.html - src/web/gui/main.css - src/web/gui/main.js - src/web/gui/registry-access.html - src/web/gui/registry-alert-redirect.html - src/web/gui/registry-hello.html - src/web/gui/switch.html - src/web/gui/ilove.html - COMPONENT netdata - DESTINATION ${WEB_DEST}) + foreach(JS_FILE ${DASHBOARD_JS_FILES}) + cat(${JS_FILE} ${CMAKE_BINARY_DIR}/dashboard.js.in) + endforeach() -install(FILES - src/web/gui/old/index.html - COMPONENT netdata - DESTINATION ${WEB_DEST}/old) + configure_file(${CMAKE_BINARY_DIR}/dashboard.js.in + ${CMAKE_BINARY_DIR}/dashboard.js COPYONLY) -install(FILES - src/web/gui/static/img/netdata-logomark.svg - COMPONENT netdata - DESTINATION ${WEB_DEST}/static/img) + install(FILES + ${CMAKE_BINARY_DIR}/dashboard.js + COMPONENT dashboard + DESTINATION ${WEB_DEST}) -install(FILES - src/web/gui/css/morris-0.5.1.css - src/web/gui/css/c3-0.4.18.min.css - COMPONENT netdata - DESTINATION ${WEB_DEST}/css) + install(FILES + src/web/gui/dashboard_info_custom_example.js + src/web/gui/dashboard_info.js + src/web/gui/main.css + src/web/gui/main.js + src/web/gui/switch.html + src/web/gui/ilove.html + COMPONENT dashboard + DESTINATION ${WEB_DEST}) -install(FILES - src/web/gui/.well-known/dnt/cookies - COMPONENT netdata - DESTINATION ${WEB_DEST}/.well-known/dnt) + install(FILES + src/web/gui/old/index.html + COMPONENT dashboard + DESTINATION ${WEB_DEST}/old) -if(NOT OS_WINDOWS) - # v0 dashboard - install(FILES - src/web/gui/v0/index.html - COMPONENT netdata - DESTINATION ${WEB_DEST}/v0) + install(FILES + src/web/gui/static/img/netdata-logomark.svg + COMPONENT dashboard + DESTINATION ${WEB_DEST}/static/img) + + install(FILES + src/web/gui/css/morris-0.5.1.css + src/web/gui/css/c3-0.4.18.min.css + COMPONENT dashboard + DESTINATION ${WEB_DEST}/css) + + install(FILES + src/web/gui/.well-known/dnt/cookies + COMPONENT dashboard + DESTINATION ${WEB_DEST}/.well-known/dnt) + + if(NOT OS_WINDOWS) + # v0 dashboard + install(FILES + src/web/gui/v0/index.html + COMPONENT dashboard + DESTINATION ${WEB_DEST}/v0) + endif() endif() if(OS_WINDOWS) - install(FILES /usr/bin/awk.exe - /usr/bin/bash.exe - /usr/bin/cat.exe - /usr/bin/chown.exe - /usr/bin/curl.exe - /usr/bin/env.exe - /usr/bin/grep.exe - /usr/bin/mkdir.exe - /usr/bin/openssl.exe - /usr/bin/rm.exe - /usr/bin/sed.exe - /usr/bin/sh.exe - /usr/bin/tail.exe - /usr/bin/tr.exe - /usr/bin/uuidgen.exe - /usr/bin/whoami.exe - DESTINATION "${BINDIR}") - - install(FILES /usr/bin/msys-2.0.dll - /usr/bin/msys-asn1-8.dll - /usr/bin/msys-brotlicommon-1.dll - /usr/bin/msys-brotlidec-1.dll - /usr/bin/msys-brotlienc-1.dll - /usr/bin/msys-com_err-1.dll - /usr/bin/msys-crypt-2.dll - /usr/bin/msys-crypto-3.dll - /usr/bin/msys-curl-4.dll - /usr/bin/msys-gcc_s-seh-1.dll - /usr/bin/msys-gmp-10.dll - /usr/bin/msys-gssapi-3.dll - /usr/bin/msys-hcrypto-4.dll - /usr/bin/msys-heimbase-1.dll - /usr/bin/msys-heimntlm-0.dll - /usr/bin/msys-hx509-5.dll - /usr/bin/msys-iconv-2.dll - /usr/bin/msys-idn2-0.dll - /usr/bin/msys-intl-8.dll - /usr/bin/msys-krb5-26.dll - /usr/bin/msys-lz4-1.dll - /usr/bin/msys-mpfr-6.dll - /usr/bin/msys-ncursesw6.dll - /usr/bin/msys-nghttp2-14.dll - /usr/bin/msys-pcre-1.dll - /usr/bin/msys-protobuf-32.dll - /usr/bin/msys-psl-5.dll - /usr/bin/msys-readline8.dll - /usr/bin/msys-roken-18.dll - /usr/bin/msys-sqlite3-0.dll - /usr/bin/msys-ssh2-1.dll - /usr/bin/msys-ssl-3.dll - /usr/bin/msys-stdc++-6.dll - /usr/bin/msys-unistring-5.dll - /usr/bin/msys-uuid-1.dll + install(FILES /usr/bin/msys-protobuf-32.dll /usr/bin/msys-uv-1.dll - /usr/bin/msys-wind-0.dll - /usr/bin/msys-z.dll - /usr/bin/msys-zstd-1.dll - DESTINATION "${BINDIR}") + DESTINATION "${BINDIR}") # Make bash & netdata happy install(DIRECTORY DESTINATION tmp) diff --git a/README.md b/README.md index 3cfdee14c..9fe095724 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,3 @@ -

Netdata @@ -7,7 +6,7 @@ Netdata

-

Monitor your servers, containers, and applications,
in high-resolution and in real-time.

+

Monitor your servers, containers, and applications
in high-resolution and in real-time.


@@ -30,51 +29,51 @@ MENU: **[GETTING STARTED](#getting-started)** | **[HOW IT WORKS](#how-it-works)* > **Important** :bulb:
> People get addicted to Netdata. Once you use it on your systems, **there's no going back!**
-**Netdata** is a high-performance, cloud-native, and on-premises observability platform designed to monitor metrics and logs with unparalleled efficiency. It delivers a simpler, faster, and significantly easier approach to real-time, low-latency monitoring for systems, containers, and applications. +**Netdata** is a high-performance, cloud-native, and on-premises observability platform designed to monitor metrics and logs with unparalleled efficiency. It delivers a simpler, faster, and significantly easier approach to real-time, low-latency monitoring for systems, containers, and applications. Netdata requires **zero-configuration** to get started, offering a powerful and comprehensive monitoring experience, out of the box. -What sets Netdata apart is its **cost-efficient, distributed design**. Unlike traditional monitoring solutions that centralize data, **Netdata distributes the code**. Instead of funneling all data into a few central databases, Netdata processes data at the edge, keeping it close to the source. The smart open-source Netdata Agent acts as a distributed database, enabling the construction of complex observability pipelines with modular, Lego-like simplicity. +Netdata is also known for its **cost-efficient, distributed design**. Unlike traditional monitoring solutions that centralize data, **Netdata distributes the code**. Instead of funneling all data into a few central databases, Netdata processes data at the edge, keeping it close to the source. The smart open-source Netdata Agent acts as a distributed database, enabling the construction of complex observability pipelines with modular, Lego-like simplicity. -Netdata also incorporates **A.I. insights** for all monitored data, training machine learning models directly at the edge. This allows for fully automated and unsupervised anomaly detection, and with the provided APIs and UIs, users can quickly spot correlations and gain deeper insights. +Netdata provides **A.I. insights** for all monitored data, training machine learning models directly at the edge. This allows for fully automated and unsupervised anomaly detection, and with its intuitive APIs and UIs, users can quickly perform root cause analysis and troubleshoot issues, identifying correlations and gaining deeper insights into their infrastructure. ### The Netdata Ecosystem -Netdata is built on three core components: - -1. **Netdata Agent** (usually called just "Netdata"): This open-source component is the heart of the Netdata ecosystem, handling data collection, storage (embedded database), querying, machine learning, exporting, and alerting of observability data. All observability data and features a Netdata ecosystem offers, are managed by the Netdata Agent. It runs in physical and virtual servers, cloud environments, Kubernetes clusters, and edge/IoT devices and is carefully optimized to be a _**polite citizen**_ for production systems and applications. - - Netdata Agent License: GPL v3+ CII Best Practices Coverity Scan - -2. **Netdata Cloud**: Enhancing the Netdata Agent, Netdata Cloud offers enterprise features such as user management, role-based access control, horizontal scalability, alert and notification management, access from anywhere, and more. Netdata Cloud does **not** centralize or store observability data. - - _Netdata Cloud is a commercial product, available as an on-premises installation, or a SaaS solution, with a free community tier._ - -3. **Netdata UI**: The user interface that powers all dashboards, data visualization, and configuration. - - _While closed-source, it is free to use with both Netdata Agents and Netdata Cloud, via their public APIs. It is included in the binary packages offered by Netdata and its latest version is publicly available via a CDN._ - - Netdata UI License: NCUL1 - +Netdata is built on three core parts: + +1. **Netdata Agent** (usually called just "Netdata"): This open-source component is the heart of the Netdata ecosystem, handling data collection, storage (embedded database), querying, machine learning, exporting, and alerting of observability data. All observability data and features a Netdata ecosystem offers, are managed by the Netdata Agent. It runs in physical and virtual servers, cloud environments, Kubernetes clusters, and edge/IoT devices and is carefully optimized to have _**zero impact**_ on production systems and applications. + + Netdata Agent License: GPL v3+ CII Best Practices Coverity Scan + +2. **Netdata Cloud**: Enhancing the Netdata Agent, Netdata Cloud offers enterprise features such as user management, role-based access control, horizontal scalability, alert and notification management, access from anywhere, and more. Netdata Cloud does **not** centralize or store observability data. + + _Netdata Cloud is a commercial product, available as an on-premises installation, or a SaaS solution, with a free community tier._ + +3. **Netdata UI**: The user interface that powers all dashboards, data visualization, and configuration. + + _While closed-source, it is free to use with both Netdata Agents and Netdata Cloud, via their public APIs. It is included in the binary packages offered by Netdata, and its latest version is publicly available via CDN._ + + Netdata UI License: NCUL1 + Netdata scales effortlessly from a single server to thousands, even in complex, multi-cloud or hybrid environments, with the ability to retain data for years. ### Key characteristics of the Netdata Agent - :boom: **Collects data from 800+ integrations**
Operating system metrics, container metrics, virtual machines, hardware sensors, applications metrics, OpenMetrics exporters, StatsD, and logs. OpenTelemetry is on its way to be included (currently being developed)... - + - :muscle: **Real-Time, Low-Latency, High-Resolution**
All data are collected per second and are made available on the APIs for visualization, immediately after data collection (1-second latency, data collection to visualization). - :face_in_clouds: **AI across the board**
Trains multiple Machine-Learning (ML) models at the edge, for each metric collected and uses AI to detect anomalies based on the past behavior of each metric. -- :scroll: **systemd-journald Logs**
+- :scroll: **systemd-journald Logs**
Includes tools to efficiently convert plain text log (text, csv, logfmt, json) files to structured systemd-journald entries (`log2journal`, `systemd-cat-native`) and queries systemd-journal files directly enabling powerful logs visualization dashboards. The Netdata Agents eliminate the need to centralize logs and provide all the functions to work with logs directly at the edge. - :star: **Lego like, Observability Pipelines**
Netdata Agents can be linked to together (in parent-child relationships), to build observability centralization points within your infrastructure, allowing you to control data replication and retention at multiple levels. - :fire: **Fully Automated Powerful Visualization**
- Using the NIDL (Nodes, Instances, Dimensions & Labels) data model, the Netdata Agent enables the creation of fully automated dashboards, providing corellated visualization of all metrics, allowing you to understand any dataset at first sight, but also to filter, slice and dice the data directly on the dashboards, without the need to learn a query language. + Using the NIDL (Nodes, Instances, Dimensions & Labels) data model, the Netdata Agent enables the creation of fully automated dashboards, providing correlated visualization of all metrics, allowing you to understand any dataset at first sight, but also to filter, slice and dice the data directly on the dashboards, without the need to learn a query language. Note: the Netdata UI is closed-source, but free to use with Netdata Agents and Netdata Cloud. @@ -87,33 +86,28 @@ Netdata scales effortlessly from a single server to thousands, even in complex, - :star: **Open and Extensible**
Netdata is a modular platform that can be extended in all possible ways, and it also integrates nicely with other monitoring solutions. - ### What can be monitored with the Netdata Agent Netdata monitors all the following: -| Component | Linux | FreeBSD | macOS | Windows* | +| Component | Linux | FreeBSD | macOS | Windows | |------------------------------------------------------------------------------------------------------------:|:--------------------------------:|:-------:|:-----:|:--------------------------------:| | **System Resources**
CPU, Memory and system shared resources
| Full | Yes | Yes | Yes | -| **Storage**
Disks, Mount points, Filesystems, RAID arrays
| Full | Basic | Basic | Basic | -| **Network**
Network Interfaces, Protocols, Firewall, etc
| Full | Basic | Basic | Basic | -| **Hardware & Sensors**
Fans, Temperatures, Controllers, GPUs, etc
| Full | Some | Some | Some | -| **O/S Services**
Resources, Performance and Status
| Yes
`systemd`
| - | - | Basic | -| **Logs** | Yes
`systemd`-journal | - | - | - | +| **Storage**
Disks, Mount points, Filesystems, RAID arrays
| Full | Yes | Yes | Yes | +| **Network**
Network Interfaces, Protocols, Firewall, etc
| Full | Yes | Yes | Yes | +| **Hardware & Sensors**
Fans, Temperatures, Controllers, GPUs, etc
| Full | Some | Some | Some | +| **O/S Services**
Resources, Performance and Status
| Yes
`systemd`
| - | - | - | | **Processes**
Resources, Performance, OOM, and more
| Yes | Yes | Yes | Yes | +| System and Application **Logs** | Yes
`systemd`-journal | - | - | Yes
`Windows Event Log`, and
`Event Tracing for Windows`
| | **Network Connections**
Live TCP and UDP sockets per PID
| Yes | - | - | - | | **Containers**
Docker/containerd, LXC/LXD, Kubernetes, etc
| Yes | - | - | - | | **VMs** (from the host)
KVM, qemu, libvirt, Proxmox, etc
| Yes
`cgroups`
| - | - | Yes
`Hyper-V`
| | **Synthetic Checks**
Test APIs, TCP ports, Ping, Certificates, etc
| Yes | Yes | Yes | Yes | | **Packaged Applications**
nginx, apache, postgres, redis, mongodb,
and hundreds more
| Yes | Yes | Yes | Yes | -| **Cloud Provider Infrastructure**
AWS, GCP, Azure, and more
| Yes | Yes | Yes | Yes | -| **Custom Applications**
OpenMetrics, StatsD and soon OpenTelemetry
| Yes | Yes | Yes | Yes | - -When the Netdata Agent runs on Linux, it monitors every kernel feature available, providing full coverage of all kernel technologies that can be monitored. - -The Netdata Agent also provides full **enterprise hardware** coverage, monitoring all components that provide hardware error reporting, like PCI AER, RAM EDAC, IPMI, S.M.A.R.T., NVMe, Fans, Power, Voltages, and more. +| **Cloud Provider Infrastructure**
AWS, GCP, Azure, and more
| Yes | Yes | Yes | Yes | +| **Custom Applications**
OpenMetrics, StatsD and soon OpenTelemetry
| Yes | Yes | Yes | Yes | - * The Netdata Agent runs on Linux, FreeBSD and macOS. For Windows, we currently rely on Windows Exporter (so a Netdata running on Linux, FreeBSD or macOS is required, next to the monitored Windows servers). However, a Windows version of the Netdata Agent is at its final state for release. +When the Netdata Agent runs on Linux, it monitors every kernel feature available, providing full coverage of all kernel technologies and offers full **enterprise hardware** coverage, monitoring all components that provide hardware error reporting, like PCI AER, RAM EDAC, IPMI, S.M.A.R.T., NVMe, Fans, Power, Voltages, and more. --- @@ -133,10 +127,10 @@ Dec 11, 2023: [University of Amsterdam published a study](https://twitter.com/IM 1. **The impact of monitoring on the energy efficiency of Docker-based systems** 2. **The impact of monitoring on Docker-based systems?** -- 🚀 Netdata excels in energy efficiency: **"... Netdata being the most energy-efficient tool ..."**, as the study says. -- 🚀 Netdata excels in CPU Usage, RAM Usage and Execution Time, and has a similar impact in Network Traffic as Prometheus. +- 🚀 Netdata excels in energy efficiency: **"... Netdata is the most energy-efficient tool ..."**, as the study says. +- 🚀 Netdata excels in CPU Usage, RAM Usage and Execution Time, and has a similar impact on Network Traffic as Prometheus. -The study did not normalize the results based on the number of metrics collected. Given that Netdata usually collects significantly more metrics than the other tools, Netdata managed to outperform the other tools, while ingesting a much higher number of metrics. [Read the full study here](https://www.ivanomalavolta.com/files/papers/ICSOC_2023.pdf). +The study didn’t normalize the results based on the number of metrics collected. Given that Netdata usually collects significantly more metrics than the other tools, Netdata managed to outperform the other tools, while ingesting a much higher number of metrics. [Read the full study here](https://www.ivanomalavolta.com/files/papers/ICSOC_2023.pdf). --- @@ -191,98 +185,98 @@ On the same workload, Netdata uses **35% less CPU**, **49% less RAM**, **12% les ## Getting Started

- User base - Servers monitored - Sessions served - Docker Hub pulls + User base + Servers monitored + Sessions served + Docker Hub pulls
- New users today - New machines today - Sessions today - Docker Hub pulls today + New users today + New machines today + Sessions today + Docker Hub pulls today

### 1. **Install Netdata everywhere** :v: - - Netdata can be installed on all Linux, macOS, FreeBSD (and soon on Windows) systems. We provide binary packages for the most popular operating systems and package managers. - - Install on [Ubuntu, Debian CentOS, Fedora, Suse, Red Hat, Arch, Alpine, Gentoo, even BusyBox](https://learn.netdata.cloud/docs/installing/one-line-installer-for-all-linux-systems). - - Install with [Docker](/packaging/docker/README.md).
- Netdata is a [Verified Publisher on DockerHub](https://hub.docker.com/r/netdata/netdata) and our users enjoy free unlimited DockerHub pulls :heart_eyes:. - - Install on [macOS](https://learn.netdata.cloud/docs/installing/macos) :metal:. - - Install on [FreeBSD](https://learn.netdata.cloud/docs/installing/freebsd) and [pfSense](https://learn.netdata.cloud/docs/installing/pfsense). - - Install [from source](https://learn.netdata.cloud/docs/installing/build-the-netdata-agent-yourself/compile-from-source-code) ![github downloads](https://img.shields.io/github/downloads/netdata/netdata/total?color=success&logo=github) - - For Kubernetes deployments [check here](https://learn.netdata.cloud/docs/installation/install-on-specific-environments/kubernetes/). +Netdata can be installed on all Linux, macOS, FreeBSD (and soon on Windows) systems. We provide binary packages for the most popular operating systems and package managers. - Check also the [Netdata Deployment Guides](https://learn.netdata.cloud/docs/deployment-guides/) to decide how to deploy it in your infrastructure. +- Install on [Ubuntu, Debian CentOS, Fedora, Suse, Red Hat, Arch, Alpine, Gentoo, even BusyBox](https://learn.netdata.cloud/docs/installing/one-line-installer-for-all-linux-systems). +- Install with [Docker](/packaging/docker/README.md).
+ Netdata is a [Verified Publisher on DockerHub](https://hub.docker.com/r/netdata/netdata) and our users enjoy free unlimited DockerHub pulls :heart_eyes:. +- Install on [macOS](https://learn.netdata.cloud/docs/installing/macos) :metal:. +- Install on [FreeBSD](https://learn.netdata.cloud/docs/installing/freebsd) and [pfSense](https://learn.netdata.cloud/docs/installing/pfsense). +- Install [from source](https://learn.netdata.cloud/docs/installing/build-the-netdata-agent-yourself/compile-from-source-code) ![github downloads](https://img.shields.io/github/downloads/netdata/netdata/total?color=success&logo=github) +- For Kubernetes deployments [check here](https://learn.netdata.cloud/docs/installation/install-on-specific-environments/kubernetes/). - By default, you will have immediately available a local dashboard. Netdata starts a web server for its dashboard at port `19999`. Open up your web browser of choice and -navigate to `http://NODE:19999`, replacing `NODE` with the IP address or hostname of your Agent. If installed on localhost, you can access it through `http://localhost:19999`. +Check also the [Netdata Deployment Guides](https://learn.netdata.cloud/docs/deployment-guides/) to decide how to deploy it in your infrastructure. - _Note: the binary packages we provide, install Netdata UI automatically. Netdata UI is closed-source, but free to use with Netdata Agents and Netdata Cloud._ +By default, you will have immediately available a local dashboard. Netdata starts a web server for its dashboard at port `19999`. Open up your web browser of choice and +navigate to `http://NODE:19999`, replacing `NODE` with the IP address or hostname of your Agent. If installed on localhost, you can access it through `http://localhost:19999`. + +_Note: the binary packages we provide, install Netdata UI automatically. Netdata UI is closed-source, but free to use with Netdata Agents and Netdata Cloud._ ### 2. **Configure Collectors** :boom: - Netdata auto-detects and auto-discovers most operating system data sources and applications. However, many data sources require some manual configuration, usually to allow Netdata to get access to the metrics. - - - For a detailed list of the 800+ collectors available, check [this guide](https://learn.netdata.cloud/docs/data-collection/). - - To monitor Windows servers and applications use [this guide](https://learn.netdata.cloud/docs/data-collection/monitor-anything/system-metrics/windows-machines).
Note that Netdata on Windows is at its final release stage, so at the next Netdata release Netdata will natively support Windows. - - To monitor SNMP devices check [this guide](https://learn.netdata.cloud/docs/data-collection/monitor-anything/networking/snmp). +Netdata auto-detects and auto-discovers most operating system data sources and applications. However, many data sources require some manual configuration, usually to allow Netdata to get access to the metrics. + +- For a detailed list of the 800+ collectors available, check [this guide](https://learn.netdata.cloud/docs/data-collection/). +- To monitor Windows servers and applications, use [this guide](https://learn.netdata.cloud/docs/data-collection/monitor-anything/system-metrics/windows-machines).
Note that Netdata on Windows is at its final release stage, so at the next Netdata release Netdata will natively support Windows. +- To monitor SNMP devices, check [this guide](https://learn.netdata.cloud/docs/data-collection/monitor-anything/networking/snmp). ### 3. **Configure Alert Notifications** :bell: - Netdata comes with hundreds of pre-configured alerts, that automatically check your metrics, immediately after they start getting collected. +Netdata comes with hundreds of pre-configured alerts that automatically check your metrics immediately after they start getting collected. - Netdata can dispatch alert notifications to multiple third party systems, including: `email`, `Alerta`, `AWS SNS`, `Discord`, `Dynatrace`, `flock`, `gotify`, `IRC`, `Matrix`, `MessageBird`, `Microsoft Teams`, `ntfy`, `OPSgenie`, `PagerDuty`, `Prowl`, `PushBullet`, `PushOver`, `RocketChat`, `Slack`, `SMS tools`, `Syslog`, `Telegram`, `Twilio`. +Netdata can dispatch alert notifications to multiple third party systems, including: `email`, `Alerta`, `AWS SNS`, `Discord`, `Dynatrace`, `flock`, `gotify`, `IRC`, `Matrix`, `MessageBird`, `Microsoft Teams`, `ntfy`, `OPSgenie`, `PagerDuty`, `Prowl`, `PushBullet`, `PushOver`, `RocketChat`, `Slack`, `SMS tools`, `Syslog`, `Telegram`, `Twilio`. - By default, Netdata will send e-mail notifications, if there is a configured MTA on the system. +By default, Netdata will send e-mail notifications if there is a configured MTA on the system. ### 4. **Configure Netdata Parents** :family: - Optionally, configure one or more Netdata Parents. A Netdata Parent is a Netdata Agent that has been configured to accept [streaming connections](https://learn.netdata.cloud/docs/streaming/streaming-configuration-reference) from other Netdata agents. - - Netdata Parents provide: - - - **Infrastructure level dashboards, at `http://parent.server.ip:19999/`.**
- - Each Netdata Agent has an API listening at the TCP port 19999 of each server. - When you hit that port with a web browser (e.g. `http://server.ip:19999/`), the Netdata Agent UI is presented. - When the Netdata Agent is also a Parent, the UI of the Parent includes data for all nodes that stream metrics to that Parent. - - - **Increased retention for all metrics of all your nodes.**
- - Each Netdata Agent maintains each own database of metrics. But Parents can be given additional resources to maintain a much longer database than - individual Netdata Agents. - - - **Central configuration of alerts and dispatch of notifications.**
- - Using Netdata Parents, all the alert notifications integrations can be configured only once, at the Parent and they can be disabled at the Netdata Agents. - - You can also use Netdata Parents to: - - - Offload your production systems (the parents run ML, alerts, queries, etc. for all their children) - - Secure your production systems (the parents accept user connections, for all their children) +Optionally, configure one or more Netdata Parents. A Netdata Parent is a Netdata Agent that has been configured to accept [streaming connections](https://learn.netdata.cloud/docs/streaming/streaming-configuration-reference) from other Netdata agents. + +Netdata Parents provide: + +- **Infrastructure level dashboards, at `http://parent.server.ip:19999/`.**
+ + Each Netdata Agent has an API listening at the TCP port 19999 of each server. + When you hit that port with a web browser (e.g. `http://server.ip:19999/`), the Netdata Agent UI is presented. + When the Netdata Agent is also a Parent, the UI of the Parent includes data for all nodes that stream metrics to that Parent. + +- **Increased retention for all metrics of all your nodes.**
+ + Each Netdata Agent maintains each own database of metrics. But Parents can be given additional resources to maintain a much longer database than + individual Netdata Agents. + +- **Central configuration of alerts and dispatch of notifications.**
+ + Using Netdata Parents, all the alert notifications integrations can be configured only once at the Parent and they can be disabled at the Netdata Agents. + +You can also use Netdata Parents to: + +- Offload your production systems (the parents run ML, alerts, queries, etc. for all their children) +- Secure your production systems (the parents accept user connections for all their children) ### 5. **Connect to Netdata Cloud** :cloud: - [Sign-in](https://app.netdata.cloud/sign-in) to [Netdata Cloud](https://www.netdata.cloud/) and claim your Netdata Agents and Parents. - If you connect your Netdata Parents, there is no need to connect your Netdata Agents. They will be connected via the Parents. - - When your Netdata nodes are connected to Netdata Cloud, you can (on top of the above): - - - Access your Netdata agents from anywhere - - Access sensitive Netdata agent features (like "Netdata Functions": processes, systemd-journal) - - Organize your infra in spaces and Rooms - - Create, manage, and share **custom dashboards** - - Invite your team and assign roles to them (Role Based Access Control - RBAC) - - Get infinite horizontal scalability (multiple independent Netdata Agents are viewed as one infra) - - Configure alerts from the UI - - Configure data collection from the UI - - Netdata Mobile App notifications - - :love_you_gesture: Netdata Cloud does not prevent you from using your Netdata Agents and Parents directly, and vice versa.
- - :ok_hand: Your metrics are still stored in your network when you connect your Netdata Agents and Parents to Netdata Cloud. +[Sign-in](https://app.netdata.cloud/sign-in) to [Netdata Cloud](https://www.netdata.cloud/) and claim your Netdata Agents and Parents. +If you connect your Netdata Parents, there is no need to connect your Netdata Agents. They will be connected via the Parents. + +When your Netdata nodes are connected to Netdata Cloud, you can (on top of the above): + +- Access your Netdata agents from anywhere +- Access sensitive Netdata agent features (like "Netdata Functions": processes, systemd-journal) +- Organize your infra in spaces and Rooms +- Create, manage, and share **custom dashboards** +- Invite your team and assign roles to them (Role-Based Access Control) +- Get infinite horizontal scalability (multiple independent Netdata Agents are viewed as one infra) +- Configure alerts from the UI +- Configure data collection from the UI +- Netdata Mobile App notifications + +:love_you_gesture: Netdata Cloud doesn’t prevent you from using your Netdata Agents and Parents directly, and vice versa.
+ +:ok_hand: Your metrics are still stored in your network when you connect your Netdata Agents and Parents to Netdata Cloud.
@@ -304,26 +298,26 @@ Each Netdata Agent can perform the following functions: Netdata also collects your custom application metrics by scraping OpenMetrics exporters, or via StatsD. - It can convert web server log files to metrics and apply ML and alerts to them, in real-time. + It can convert web server log files to metrics and apply ML and alerts to them in real-time. And it also supports synthetic tests / white box tests, so you can ping servers, check API responses, or even check filesystem files and directories to generate metrics, train ML and run alerts and notifications on their status. - + 2. **`STORE` metrics to a database**
- Uses database engine plugins to store the collected data, either in memory and/or on disk. We have developed our own [`dbengine`](https://github.com/netdata/netdata/tree/master/src/database/engine#readme) for storing the data in a very efficient manner, allowing Netdata to have less than 1 byte per sample on disk and amazingly fast queries. - + Uses database engine plugins to store the collected data, either in memory and/or on disk. We have developed our own [`dbengine`](https://github.com/netdata/netdata/tree/master/src/database/engine#readme) for storing the data in a very efficient manner, allowing Netdata to have less than one byte per sample on disk and amazingly fast queries. + 3. **`LEARN` the behavior of metrics** (ML)
Trains multiple Machine-Learning (ML) models per metric to learn the behavior of each metric individually. Netdata uses the `kmeans` algorithm and creates by default a model per metric per hour, based on the values collected for that metric over the last 6 hours. The trained models are persisted to disk. - + 4. **`DETECT` anomalies in metrics** (ML)
Uses the trained machine learning (ML) models to detect outliers and mark collected samples as **anomalies**. Netdata stores anomaly information together with each sample and also streams it to Netdata Parents so that the anomaly is also available at query time for the whole retention of each metric. 5. **`CHECK` metrics and trigger alert notifications**
- Uses its configured alerts (you can configure your own) to check the metrics for common issues and uses notifications plugins to send alert notifications. + Uses its configured alerts (you can configure your own) to check the metrics for common issues and uses notification plugins to send alert notifications. 6. **`STREAM` metrics to other Netdata Agents**
Push metrics in real-time to Netdata Parents. -7. **`ARCHIVE` metrics to 3rd party databases**
+7. **`ARCHIVE` metrics to third party databases**
Export metrics to industry standard time-series databases, like `Prometheus`, `InfluxDB`, `OpenTSDB`, `Graphite`, etc. 8. **`QUERY` metrics and present dashboards**
@@ -337,28 +331,28 @@ When using Netdata Parents, all the functions of a Netdata Agent (except data co The core of Netdata is developed in C. We have our own `libnetdata`, that provides: - **`DICTIONARY`**
- A high-performance algorithm to maintain both indexed and ordered pools of structures Netdata needs. It uses JudyHS arrays for indexing, although it is modular: any hashtable or tree can be integrated into it. Despite being in C, dictionaries follow object-oriented programming principles, so there are constructors, destructors, automatic memory management, garbage collection, and more. For more see [here](https://github.com/netdata/netdata/tree/master/src/libnetdata/dictionary). - + A high-performance algorithm to maintain both indexed and ordered pools of structures Netdata needs. It uses JudyHS arrays for indexing, although it is modular: any hashtable or tree can be integrated into it. Despite being in C, dictionaries follow object-oriented programming principles, so there are constructors, destructors, automatic memory management, garbage collection, and more. For more, see [here](https://github.com/netdata/netdata/tree/master/src/libnetdata/dictionary). + - **`ARAL`**
- ARray ALlocator (ARAL) is used to minimize the system allocations made by Netdata. ARAL is optimized for maximum multithreaded performance. It also allows all structures that use it to be allocated in memory-mapped files (shared memory) instead of RAM. For more see [here](https://github.com/netdata/netdata/tree/master/src/libnetdata/aral). + ARray ALlocator (ARAL) is used to minimize the system allocations made by Netdata. ARAL is optimized for maximum multithreaded performance. It also allows all structures that use it to be allocated in memory-mapped files (shared memory) instead of RAM. For more, see [here](https://github.com/netdata/netdata/tree/master/src/libnetdata/aral). - **`PROCFILE`**
- A high-performance `/proc` (but also any) file parser and text tokenizer. It achieves its performance by keeping files open and adjusting its buffers to read the entire file in one call (which is also required by the Linux kernel). For more see [here](https://github.com/netdata/netdata/tree/master/src/libnetdata/procfile). + A high-performance `/proc` (but also any) file parser and text tokenizer. It achieves its performance by keeping files open and adjusting its buffers to read the entire file in one call (which is also required by the Linux kernel). For more, see [here](https://github.com/netdata/netdata/tree/master/src/libnetdata/procfile). - **`STRING`**
- A string internet mechanism, for string deduplication and indexing (using JudyHS arrays), optimized for multithreaded usage. For more see [here](https://github.com/netdata/netdata/tree/master/src/libnetdata/string). + A string internet mechanism, for string deduplication and indexing (using JudyHS arrays), optimized for multithreaded usage. For more, see [here](https://github.com/netdata/netdata/tree/master/src/libnetdata/string). - **`ARL`**
- Adaptive Resortable List (ARL), is a very fast list iterator, that keeps the expected items on the list in the same order they are found in input list. So, the first iteration is somewhat slower, but all the following iterations are perfectly aligned for best performance. For more see [here](https://github.com/netdata/netdata/tree/master/src/libnetdata/adaptive_resortable_list). + Adaptive Resortable List (ARL) is a very fast list iterator, that keeps the expected items on the list in the same order they are found in an input list. So, the first iteration is somewhat slower, but all the following iterations are perfectly aligned for the best performance. For more, see [here](https://github.com/netdata/netdata/tree/master/src/libnetdata/adaptive_resortable_list). - **`BUFFER`**
- A flexible text buffer management system that allows Netdata to automatically handle dynamically sized text buffer allocations. The same mechanism is used for generating consistent JSON output by the Netdata APIs. For more see [here](https://github.com/netdata/netdata/tree/master/src/libnetdata/buffer). + A flexible text buffer management system that allows Netdata to automatically handle dynamically sized text buffer allocations. The same mechanism is used for generating consistent JSON output by the Netdata APIs. For more, see [here](https://github.com/netdata/netdata/tree/master/src/libnetdata/buffer). - **`SPINLOCK`**
Like POSIX `MUTEX` and `RWLOCK` but a lot faster, based on atomic operations, with significantly smaller memory impact, while being portable. - **`PGC`**
- A caching layer that can be used to cache any kind of time-related data, with automatic indexing (based on a tree of JudyL arrays), memory management, evictions, flushing, pressure management. This is extensively used in `dbengine`. For more see [here](/src/database/engine/README.md). + A caching layer that can be used to cache any kind of time-related data, with automatic indexing (based on a tree of JudyL arrays), memory management, evictions, flushing, pressure management. This is extensively used in `dbengine`. For more, see [here](/src/database/engine/README.md). The above, and many more, allow Netdata developers to work on the application fast and with confidence. Most of the business logic in Netdata is a work of mixing the above. @@ -375,19 +369,19 @@ Of course, it is! We do our best to ensure it is!
Click to see detailed answer ...  
 
-We understand that Netdata is a software piece that is installed on millions of production systems across the world. So, it is important for us, Netdata to be as secure as possible: +We understand that Netdata is a software piece installed on millions of production systems across the world. So, it is important for us, Netdata to be as secure as possible: - - We follow the [Open Source Security Foundation](https://bestpractices.coreinfrastructure.org/en/projects/2231) best practices. - - We have given great attention to detail when it comes to security design. Check out our [security design](/docs/security-and-privacy-design/README.md). - - Netdata is a popular open-source project and is frequently tested by many security analysts. - - Check also our [security policies and advisories published so far](https://github.com/netdata/netdata/security). +- We follow the [Open Source Security Foundation](https://bestpractices.coreinfrastructure.org/en/projects/2231) best practices. +- We have given great attention to detail when it comes to security design. Check out our [security design](/docs/security-and-privacy-design/README.md). +- Netdata is a popular open-source project and is frequently tested by many security analysts. +- Check also our [security policies and advisories published so far](https://github.com/netdata/netdata/security).  
 
### :cyclone: Will Netdata consume significant resources on my servers? -No. It will not! We promise this will be fast! +No, it will not! We promise this will be fast!
Click to see detailed answer ...  
 
@@ -396,11 +390,11 @@ Although each Netdata Agent is a complete monitoring solution packed into a sing This is what you should expect: - - For production systems, each Netdata Agent with default settings (everything enabled, ML, Health, DB) should consume about 5% CPU utilization of one core and about 150 MiB or RAM. +- For production systems, each Netdata Agent with default settings (everything enabled, ML, Health, DB) should consume about 5% CPU utilization of one core and about 150 MiB or RAM. + + By using a Netdata parent and streaming all metrics to that parent, you can disable ML & health and use an ephemeral DB (like `alloc`) on the children, leading to utilization of about 1% CPU of a single core and 100 MiB of RAM. Of course, these depend on how many metrics are collected. - By using a Netdata parent and streaming all metrics to that parent, you can disable ML & health and use an ephemeral DB mode (like `alloc`) on the children, leading to utilization of about 1% CPU of a single core and 100 MiB of RAM. Of course, these depend on how many metrics are collected. - - - For Netdata Parents, for about 1 to 2 million metrics, all collected every second, we suggest a server with 16 cores and 32GB RAM. Less than half of it will be used for data collection and ML. The rest will be available for queries. +- For Netdata Parents, for about 1 to 2 million metrics, all collected every second, we suggest a server with 16 cores and 32GB RAM. Less than half of it will be used for data collection and ML. The rest will be available for queries. Netdata has extensive internal instrumentation to help us reveal how the resources consumed are used. All these are available in the "Netdata Monitoring" section of the dashboard. Depending on your use case, there are many options to optimize resource consumption. @@ -416,18 +410,18 @@ As much as you need!
Click to see detailed answer ...  
 
-Netdata supports **tiering**, to downsample past data and save disk space. With default settings, it has 3 tiers: +Netdata supports **tiering**, to downsample past data and save disk space. With default settings, it has three tiers: - 1. `tier 0`, with high resolution, per-second, data. - 2. `tier 1`, mid-resolution, per minute, data. - 3. `tier 2`, low-resolution, per hour, data. +1. `tier 0`, with high resolution, per-second, data. +2. `tier 1`, mid-resolution, per minute, data. +3. `tier 2`, low-resolution, per hour, data. -All tiers are updated in parallel during data collection. Just increase the disk space you give to Netdata to get a longer history for your metrics. Tiers are automatically chosen at query time depending on the time frame and the resolution requested. +All tiers are updated in parallel during data collection. Increase the disk space you give to Netdata to get a longer history for your metrics. Tiers are automatically chosen at query time depending on the time frame and the resolution requested.  
 
-### :rocket: Does it scale? I have really a lot of servers! +### :rocket: Does it scale? I really have a lot of servers! Netdata is designed to scale and can handle large volumes of data. @@ -437,11 +431,11 @@ Netdata is a distributed monitoring solution. You can scale it to infinity by sp With the streaming feature of the Agent, we can support monitoring ephemeral servers but also allow the creation of "monitoring islands" where metrics are aggregated to a few servers (Netdata Parents) for increased retention, or for offloading production systems. - - :airplane: Netdata Parents provide great vertical scalability, so you can have as big parents as the CPU, RAM and Disk resources you can dedicate to them. In our lab we constantly stress test Netdata Parents with several million metrics collected per second, to ensure it is reliable, stable, and robust at scale. - - - :rocket: In addition, Netdata Cloud provides virtually unlimited horizontal scalability. It "merges" all the Netdata parents you have into one unified infrastructure at query time. Netdata Cloud itself is probably the biggest single installation monitoring platform ever created, currently monitoring about 100k online servers with about 10k servers changing state (added/removed) per day! +- :airplane: Netdata Parents provide great vertical scalability, so you can have as big parents as the CPU, RAM and Disk resources you can dedicate to them. In our lab, we constantly stress test Netdata Parents with several million metrics collected per second, to ensure it is reliable, stable, and robust at scale. -Example: the following chart comes from a single Netdata Parent. As you can see on it, 244 nodes stream to it metrics of about 20k running containers. On this specific chart there are 3 dimensions per container, so a total of about 60k time-series queries are executed to present it. +- :rocket: In addition, Netdata Cloud provides virtually unlimited horizontal scalability. It "merges" all the Netdata parents you have into one unified infrastructure at query time. Netdata Cloud itself is probably the biggest single installation monitoring platform ever created, currently monitoring about 100k online servers with about 10k servers changing state (added/removed) per day! + +Example: the following chart comes from a single Netdata Parent. As you can see on it, 244 nodes stream to it metrics of about 20k running containers. On this specific chart, there are three dimensions per container, so a total of about 60k time-series queries are executed to present it. ![image](https://github.com/netdata/netdata/assets/2662304/33db4aed-86af-4018-a547-e70643308f25) @@ -465,10 +459,10 @@ Health Alerts and Machine-Learning run queries to evaluate their expressions and To make Netdata not use the disks at all, we suggest the following: - 1. Use database mode `alloc` or `ram` to disable writing metric data to disk. - 2. Configure streaming to push in real-time all metrics to a Netdata Parent. The Netdata Parent will maintain metrics on disk for this node. - 3. Disable ML and health on this node. The Netdata Parent will do them for this node. - 4. Use the Netdata Parent to access the dashboard. +1. Use database mode `alloc` or `ram` to disable writing metric data to disk. +2. Configure streaming to push in real-time all metrics to a Netdata Parent. The Netdata Parent will maintain metrics on disk for this node. +3. Disable ML and health on this node. The Netdata Parent will do them for this node. +4. Use the Netdata Parent to access the dashboard. Using the above, the Netdata Agent on your production system will not use a disk. @@ -479,30 +473,30 @@ Using the above, the Netdata Agent on your production system will not use a disk Netdata is a "ready to use" monitoring solution. Prometheus and Grafana are tools to build your own monitoring solution. -Netdata is also a lot faster, requires significantly less resources and puts almost no stress on the server it runs. For a performance comparison check [this blog](https://blog.netdata.cloud/netdata-vs-prometheus-performance-analysis/). +Netdata is also a lot faster, requires significantly fewer resources and puts almost no stress on the server it runs. For a performance comparison check [this blog](https://blog.netdata.cloud/netdata-vs-prometheus-performance-analysis/).
Click to see detailed answer ...  
 
First, we have to say that Prometheus as a time-series database and Grafana as a visualizer are excellent tools for what they do. -However, we believe that such a setup is missing a key element: A Prometheus and Grafana setup assumes that you know everything about the metrics you collect and you understand deeply how they are structured, they should be queried and visualized. +However, we believe that such a setup is missing a key element: A Prometheus and Grafana setup assumes that you know everything about the metrics you collect, and you understand deeply how they’re structured, they should be queried and visualized. -In reality, this setup has a lot of problems. The vast number of technologies, operating systems, and applications we use in our modern stacks, makes it impossible for any single person to know and understand everything about anything. We get testimonials regularly from Netdata users across the biggest enterprises, that Netdata manages to reveal issues, anomalies and problems they were not aware of and they didn't even have the means to find or troubleshoot. +In reality, this setup has a lot of problems. The vast number of technologies, operating systems, and applications we use in our modern stacks makes it impossible for any single person to know and understand everything about anything. We get testimonials regularly from Netdata users across the biggest enterprises, that Netdata manages to reveal issues, anomalies and problems they weren’t aware of, and they didn't even have the means to find or troubleshoot. So, the biggest difference of Netdata to Prometheus, and Grafana, is that we decided that the tool needs to have a much better understanding of the components, the applications, and the metrics it monitors. - - When compared to Prometheus, Netdata needs for each metric much more than just a name, some labels, and a value over time. A metric in Netdata is a structured entity that correlates with other metrics in a certain way and has specific attributes that depict how it should be organized, treated, queried, and visualized. We call this the NIDL (Nodes, Instances, Dimensions, Labels) framework. +- When compared to Prometheus, Netdata needs for each metric much more than just a name, some labels, and a value over time. A metric in Netdata is a structured entity that correlates with other metrics in a certain way and has specific attributes that depict how it should be organized, treated, queried, and visualized. We call this the NIDL (Nodes, Instances, Dimensions, Labels) framework. - Maintaining such an index is a challenge: first, because the raw metrics collected do not provide this information, so we have to add it, and second because we need to maintain this index for the lifetime of each metric, which with our current database retention, it is usually more than a year. + Maintaining such an index is a challenge: first, because the raw metrics collected do not provide this information, so we have to add it, and second because we need to maintain this index for the lifetime of each metric, which with our current database retention, it is usually more than a year. - At the same time, Netdata provides better retention than Prometheus due to database tiering, scales easier than Prometheus due to streaming, supports anomaly detection and it has a metrics scoring engine to find the needle in the haystack when needed. + At the same time, Netdata provides better retention than Prometheus due to database tiering, scales easier than Prometheus due to streaming, supports anomaly detection, and it has a metrics scoring engine to find the needle in the haystack when needed. - - When compared to Grafana, Netdata is fully automated. Grafana has more customization capabilities than Netdata, but Netdata presents fully functional dashboards by itself and most importantly it gives you the means to understand, analyze, filter, slice and dice the data without the need for you to edit queries or be aware of any peculiarities the underlying metrics may have. +- When compared to Grafana, Netdata is fully automated. Grafana has more customization capabilities than Netdata, but Netdata presents fully functional dashboards by itself, and most importantly, it gives you the means to understand, analyze, filter, slice and dice the data without the need for you to edit queries or be aware of any peculiarities the underlying metrics may have. - Furthermore, to help you when you need to find the needle in the haystack, Netdata has advanced troubleshooting tools provided by the Netdata metrics scoring engine, that allows it to score metrics based on their anomaly rate, their differences or similarities for any given time frame. + Furthermore, to help you when you need to find the needle in the haystack, Netdata has advanced troubleshooting tools provided by the Netdata metrics scoring engine, that allows it to score metrics based on their anomaly rate, their differences or similarities for any given time frame. -Still, if you are already familiar with Prometheus and Grafana, Netdata integrates nicely with them, and we have reports from users who use Netdata with Prometheus and Grafana in production. +Still, if you’re already familiar with Prometheus and Grafana, Netdata integrates nicely with them, and we have reports from users who use Netdata with Prometheus and Grafana in production.  
 
@@ -514,11 +508,11 @@ With Netdata your data are always on-prem and your metrics are always high-resol
Click to see detailed answer ...  
 
-Most commercial monitoring providers face a significant challenge: they centralize all metrics to their infrastructure and this is, inevitably, expensive. It leads them to one or more of the following: +Most commercial monitoring providers face a significant challenge: they centralize all metrics to their infrastructure, and this is, inevitably, expensive. It leads them to one or more of the following: - 1. be unrealistically expensive - 2. limit the number of metrics they collect - 3. limit the resolution of the metrics they collect +1. be unrealistically expensive +2. limit the number of metrics they collect +3. limit the resolution of the metrics they collect As a result, they try to find a balance: collect the least possible data, but collect enough to have something useful out of it. @@ -528,33 +522,33 @@ This is why Netdata trains multiple machine-learning models per metric, based ex This is also why Netdata alerts are attached to components (instances) and are configured with dynamic thresholds and rolling windows, instead of static values. -The distributed nature of Netdata helps scale this approach: your data is spread inside your infrastructure, as close to the edge as possible. Netdata is not one data lane. Each Netdata Agent is a data lane and all of them together build a massive distributed metrics processing pipeline that ensures all your infrastructure components and applications are monitored and operating as they should. +The distributed nature of Netdata helps scale this approach: your data is spread inside your infrastructure, as close to the edge as possible. Netdata is not one data lane. Each Netdata Agent is a data lane, and all of them together build a massive distributed metrics processing pipeline that ensures all your infrastructure components and applications are monitored and operating as they should.  
 
### :raised_eyebrow: How is Netdata different from Nagios, Icinga, Zabbix, etc.? -Netdata offers real-time, comprehensive monitoring and the ability to monitor everything, without any custom configuration required. +Netdata offers real-time, comprehensive monitoring and the ability to monitor everything without any custom configuration required.
Click to see detailed answer ...  
 
-While Nagios, Icinga, Zabbix, and other similar tools are powerful and highly customizable, they can be complex to set up and manage. Their flexibility often comes at the cost of ease-of-use, especially for users who are not systems administrators or do not have extensive experience with these tools. Additionally, these tools generally require you to know what you want to monitor in advance and configure it explicitly. +While Nagios, Icinga, Zabbix, and other similar tools are powerful and highly customizable, they can be complex to set up and manage. Their flexibility often comes at the cost of ease-of-use, especially for users who aren’t systems administrators or don’t have extensive experience with these tools. Additionally, these tools generally require you to know what you want to monitor in advance and configure it explicitly. -Netdata, on the other hand, takes a different approach. It provides a "ready to use" monitoring solution with a focus on simplicity and comprehensiveness. It automatically detects and starts monitoring many different system metrics and applications out-of-the-box, without any need for custom configuration. +Netdata, on the other hand, takes a different approach. It provides a "ready to use" monitoring solution with a focus on simplicity and comprehensiveness. It automatically detects and starts monitoring many different system metrics and applications out-of-the-box, without any need for custom configuration. In comparison to these traditional monitoring tools, Netdata: - - Provides real-time, high-resolution metrics, as opposed to the often minute-level granularity that tools like Nagios, Icinga, and Zabbix provide. +- Provides real-time, high-resolution metrics, as opposed to the often minute-level granularity that tools like Nagios, Icinga, and Zabbix provide. - - Automatically generates meaningful, organized, and interactive visualizations of the collected data. Unlike other tools, where you have to manually create and organize graphs and dashboards, Netdata takes care of this for you. +- Automatically generates meaningful, organized, and interactive visualizations of the collected data. Unlike other tools, where you have to manually create and organize graphs and dashboards, Netdata takes care of this for you. - - Applies machine learning to each individual metric to detect anomalies, providing more insightful and relevant alerts than static thresholds. +- Applies machine learning to each individual metric to detect anomalies, providing more insightful and relevant alerts than static thresholds. - - Is designed to be distributed, so your data is spread inside your infrastructure, as close to the edge as possible. This approach is more scalable and avoids the potential bottleneck of a single centralized server. +- Designed to be distributed, so your data is spread inside your infrastructure, as close to the edge as possible. This approach is more scalable and avoids the potential bottleneck of a single centralized server. - - Has a more modern and user-friendly interface, making it easy for anyone, not just experienced administrators, to understand the health and performance of their systems. +- Has a more modern and user-friendly interface, allowing anyone, not just experienced administrators, to easily assess the health and performance of their systems. Even if you're already using Nagios, Icinga, Zabbix, or similar tools, you can use Netdata alongside them to augment your existing monitoring capabilities with real-time insights and user-friendly dashboards. @@ -563,7 +557,7 @@ Even if you're already using Nagios, Icinga, Zabbix, or similar tools, you can u ### :flushed: I feel overwhelmed by the amount of information in Netdata. What should I do? -Netdata is designed to provide comprehensive insights, but we understand that the richness of information might sometimes feel overwhelming. Here are some tips on how to navigate and utilize Netdata effectively... +Netdata is designed to provide comprehensive insights, but we understand that the richness of information might sometimes feel overwhelming. Here are some tips on how to navigate and use Netdata effectively...
Click to see detailed answer ...  
 
@@ -572,20 +566,20 @@ Netdata is indeed a very comprehensive monitoring tool. It's designed to provide Here are some suggestions on how to manage and navigate this wealth of information: - 1. **Start with the Metrics Dashboard**
- Netdata's Metrics Dashboard provides a high-level summary of your system's status. We have added summary tiles on almost every section, you reveal the information that is more important. This is a great place to start, as it can help you identify any major issues or trends at a glance. +1. **Start with the Metrics Dashboard**
+ Netdata's Metrics Dashboard provides a high-level summary of your system's status. We have added summary tiles on almost every section, you reveal the information that is more important. This is a great place to start, as it can help you identify any major issues or trends at a glance. - 2. **Use the Search Feature**
- If you're looking for specific information, you can use the search feature to find the relevant metrics or charts. This can help you avoid scrolling through all the data. +2. **Use the Search Feature**
+ If you're looking for specific information, you can use the search feature to find the relevant metrics or charts. This can help you avoid scrolling through all the data. - 3. **Customize your Dashboards**
- Netdata allows you to create custom dashboards, which can help you focus on the metrics that are most important to you. Sign-in to Netdata and there you can have your custom dashboards. (coming soon to the agent dashboard too) +3. **Customize your Dashboards**
+ Netdata allows you to create custom dashboards, which can help you focus on the metrics that are most important to you. Sign-in to Netdata and there you can have your custom dashboards. (coming soon to the agent dashboard too) - 4. **Leverage Netdata's Anomaly Detection**
- Netdata uses machine learning to detect anomalies in your metrics. This can help you identify potential issues before they become major problems. We have added an `AR` button above the dashboard table of contents to reveal the anomaly rate per section so that you can easily spot what could need your attention. +4. **Leverage Netdata's Anomaly Detection**
+ Netdata uses machine learning to detect anomalies in your metrics. This can help you identify potential issues before they become major problems. We have added an `AR` button above the dashboard table of contents to reveal the anomaly rate per section so that you can spot what could need your attention. - 5. **Take Advantage of Netdata's Documentation and Blogs**
- Netdata has extensive documentation that can help you understand the different metrics and how to interpret them. You can also find tutorials, guides, and best practices there. +5. **Take Advantage of Netdata's Documentation and Blogs**
+ Netdata has extensive documentation that can help you understand the different metrics and how to interpret them. You can also find tutorials, guides, and best practices there. Remember, it's not necessary to understand every single metric or chart right away. Netdata is a powerful tool, and it can take some time to fully explore and understand all of its features. Start with the basics and gradually delve into more complex metrics as you become more comfortable with the tool. @@ -596,23 +590,23 @@ Remember, it's not necessary to understand every single metric or chart right aw Netdata Cloud delivers the full suite of features and functionality that Netdata offers, including a free community tier. -While our default onboarding process encourages users to take advantage of Netdata Cloud, including a complimentary one-month trial of our full business product, it is not mandatory. Users have the option to bypass this process entirely and still utilize the Netdata Agents along with the Netdata UI, without the need to sign up for Netdata Cloud. +While our default onboarding process encourages users to take advantage of Netdata Cloud, including a complimentary one-month trial of our full business product, it is not mandatory. Users can bypass this process entirely and still use the Netdata Agents along with the Netdata UI, without the need to sign up for Netdata Cloud.
Click to see detailed answer ...  
 
-The Netdata Agent dashboard and the Netdata Cloud dashboard are the same. Still, Netdata Cloud provides additional features, that the Netdata Agent is not capable of. These include: +The Netdata Agent dashboard and the Netdata Cloud dashboard are the same. Still, Netdata Cloud provides additional features that the Netdata Agent is not capable of. These include: - 1. Access your infrastructure from anywhere. - 2. Have SSO to protect sensitive features. - 3. Customizable (custom dashboards and other settings are persisted when you are signed in to Netdata Cloud) - 4. Configuration of Alerts and Data Collection from the UI - 5. Security (role-based access control - RBAC). - 6. Horizontal Scalability ("blend" multiple independent parents in one uniform infrastructure) - 7. Central Dispatch of Alert Notifications (even when multiple independent parents are involved) - 8. Mobile App for Alert Notifications +1. Access your infrastructure from anywhere. +2. Have SSO to protect sensitive features. +3. Customizable (custom dashboards and other settings are persisted when you’re signed in to Netdata Cloud) +4. Configuration of Alerts and Data Collection from the UI +5. Security (Role-Based Access Control). +6. Horizontal Scalability ("blend" multiple independent parents in one uniform infrastructure) +7. Central Dispatch of Alert Notifications (even when multiple independent parents are involved) +8. Mobile App for Alert Notifications -We encourage you to support Netdata by buying a Netdata Cloud subscription. A successful Netdata is a Netdata that evolves and gets improved to provide a simpler, faster and easier monitoring for all of us. +We encourage you to support Netdata by buying a Netdata Cloud subscription. A successful Netdata is a Netdata that evolves and gets improved to provide simpler, faster and easier monitoring for all of us. For organizations that need a fully on-prem solution, we provide Netdata Cloud for on-prem installation. [Contact us for more information](mailto:info@netdata.cloud). @@ -628,11 +622,11 @@ Should you wish to disable telemetry, instructions for doing so are provided in
Click to see detailed answer ...  
 
-Netdata is in a constant state of growth and evolution. The decisions that guide this development are ideally rooted in data. By analyzing anonymous telemetry data, we can answer questions such as: "What features are being used frequently?", "How do we prioritize between potential new features?" and "What elements of Netdata are most important to our users?" +Netdata is in a constant state of growth and evolution. The decisions that guide this development are ideally rooted in data. By analyzing anonymous telemetry data, we can answer questions such as "What features are being used frequently?", "How do we prioritize between potential new features?" and "What elements of Netdata are most important to our users?" By leaving anonymous telemetry enabled, users indirectly contribute to shaping Netdata's roadmap, providing invaluable information that helps us prioritize our efforts for the project and the community. -We are aware that for privacy or regulatory reasons, not all environments can allow telemetry. To cater to this, we have simplified the process of disabling telemetry: +We are aware that for privacy or regulatory reasons, not all environments can allow telemetry. To cater to this, we’ve simplified the process of disabling telemetry: - During installation, you can append `--disable-telemetry` to our `kickstart.sh` script, or - Create the file `/etc/netdata/.opt-out-from-anonymous-statistics` and then restart Netdata. @@ -641,7 +635,7 @@ These steps will disable the anonymous telemetry for your Netdata installation. Please note, even with telemetry disabled, Netdata still requires a [Netdata Registry](https://learn.netdata.cloud/docs/configuring/securing-netdata-agents/registry) for alert notifications' Call To Action (CTA) functionality. When you click an alert notification, it redirects you to the Netdata Registry, which then directs your web browser to the specific Netdata Agent that issued the alert for further troubleshooting. The Netdata Registry learns the URLs of your agents when you visit their dashboards. -Any Netdata Agent can act as a Netdata Registry. Simply designate one Netdata Agent as your registry, and our global Netdata Registry will no longer be in use. For further information on this, please refer to [this guide](https://learn.netdata.cloud/docs/configuring/securing-netdata-agents/registry). +Any Netdata Agent can act as a Netdata Registry. Designate one Netdata Agent as your registry, and our global Netdata Registry will no longer be in use. For further information on this, please refer to [this guide](https://learn.netdata.cloud/docs/configuring/securing-netdata-agents/registry).  
 
@@ -657,13 +651,13 @@ Browse the [Netdata stargazers on GitHub](https://github.com/netdata/netdata/sta Netdata also enjoys significant usage in academia, with notable institutions including New York University, Columbia University, New Jersey University, Seoul National University, University College London, among several others. -And, Netdata is also used by numerous governmental organizations worldwide. +And, Netdata is also used by many governmental organizations worldwide. In a nutshell, Netdata proves invaluable for: - **Infrastructure intensive organizations**
Such as hosting/cloud providers and companies with hundreds or thousands of nodes, who require a high-resolution, real-time monitoring solution for a comprehensive view of all their components and applications. - + - **Technology operators**
Those in need of a standardized, comprehensive solution for round-the-clock operations. Netdata not only facilitates operational automation and provides controlled access for their operations engineers, but also enhances skill development over time. @@ -699,14 +693,14 @@ However, as a privately funded company, we also need to monetize our open-source Traditionally, open-source projects have often used the open-core model, where a basic version of the software is open-source, and additional features are reserved for a commercial, closed-source version. This approach can limit access to advanced innovations, as most of these remain closed-source. -At Netdata, we take a slightly different path. We don't create a separate enterprise version of our product. Instead, all users—whether commercial or not—utilize the same Netdata Agent, ensuring that all our observability innovations are always open-source. +At Netdata, we take a slightly different path. We don't create a separate enterprise version of our product. Instead, all users - both commercial and non-commercial - use the same Netdata Agent, ensuring that all of our observability innovations are always open source. To experience the full capabilities of the Netdata ecosystem, users need to combine the open-source components with our closed-source offerings. The complete product still remains free to use. The closed-source components include: -- **Netdata UI**: This is closed-source but free to use with the Netdata Agents and Netdata Cloud. It’s also publicly available via a CDN. -- **Netdata Cloud**: A commercial product available both as an on-premises installation and as a SaaS solution, with a free community tier. +- **Netdata UI**: This is closed-source but free to use with the Netdata Agents and Netdata Cloud. It’s also publicly available via a CDN. +- **Netdata Cloud**: A commercial product available both as an on-premises installation and as a SaaS solution, with a free community tier. By balancing open-source and closed-source components, we ensure that all users have access to our innovations while sustaining our ability to grow and innovate as a company. @@ -724,7 +718,7 @@ Netdata generates revenue from these activities: 1. **Netdata Cloud Subscriptions**
Direct funding for our project's vision comes from users subscribing to Netdata Cloud's advanced features. - + 2. **Netdata Cloud On-Prem or Private**
Purchasing the on-premises or private versions of Netdata Cloud supports our financial growth. @@ -737,11 +731,11 @@ Our Open-Source Community and the free access to Netdata Cloud, contribute to Ne Feedback, especially issues and bug reports, is invaluable. It steers us towards a more resilient and efficient product. This, too, isn't a revenue source but is pivotal for our project's evolution. - **Anonymous Telemetry Insights**
- Users who keep anonymous telemetry enabled, help us make data informed decisions in refining and enhancing Netdata. This isn't a revenue stream, but knowing which features are used and how, contributes in building a better product for everyone. + Users who keep anonymous telemetry enabled, help us make data informed decisions on refining and enhancing Netdata. This isn't a revenue stream, but knowing which features are used and how, contributes in building a better product for everyone. -We don't monetize, directly or indirectly, users' or "device heuristics" data. Any data collected from community members are exclusively used for the purposes stated above. +We don't monetize, directly or indirectly, users' or "device heuristics" data. Any data collected from community members is exclusively used for the purposes stated above. -Netdata grows financially when technology intensive organizations and operators, need - due to regulatory or business requirements - the entire Netdata suite on-prem or private, bundled with top-tier support. It is a win-win case for all parties involved: these companies get a battle tested, robust and reliable solution, while the broader community that helps us build this product, enjoys it at no cost. +Netdata grows financially when technology intensive organizations and operators need - due to regulatory or business requirements - the entire Netdata suite on-prem or private, bundled with top-tier support. It is a win-win case for all parties involved: these companies get a battle tested, robust and reliable solution, while the broader community that helps us build this product enjoys it at no cost.  
 
@@ -787,7 +781,7 @@ Contributions are essential to the success of open-source projects. In other wor What is a contribution? All the following are highly valuable to Netdata: -1. **Let us know of the best-practices you believe should be standardized**
+1. **Let us know of the best practices you believe should be standardized**
Netdata should out-of-the-box detect as many infrastructure issues as possible. By sharing your knowledge and experiences, you help us build a monitoring solution that has baked into it all the best-practices about infrastructure monitoring. 2. **Let us know if Netdata is not perfect for your use case**
@@ -796,10 +790,10 @@ What is a contribution? All the following are highly valuable to Netdata: Although we can't implement everything imaginable, we try to prioritize development on use-cases that are common to our community, are in the same direction we want Netdata to evolve and are aligned with our roadmap. 3. **Support other community members**
- Join our community on GitHub, Discord and Reddit. Generally, Netdata is relatively easy to set up and configure, but still people may need a little push in the right direction to use it effectively. Supporting other members is a great contribution by itself! + Join our community on GitHub, Discord, and Reddit. Generally, Netdata is relatively easy to set up and configure, but still people may need a little push in the right direction to use it effectively. Supporting other members is a great contribution by itself! 4. **Add or improve integrations you need**
- Integrations tend to be easier and simpler to develop. If you would like to contribute your code to Netdata, we suggest that you start with the integrations you need, which Netdata does not currently support. + Integrations tend to be easier and simpler to develop. If you would like to contribute your code to Netdata, we suggest that you start with the integrations you need, which Netdata doesn’t currently support. General information about contributions: @@ -812,14 +806,14 @@ instructions on building each Netdata component from the source and preparing a ## License -The Netdata ecosystem is comprised of three key components: +The Netdata ecosystem consists of three key parts: + +- **Netdata Agent**: The heart of the Netdata ecosystem, the Netdata Agent is an open-source tool that must be installed on all systems monitored by Netdata. It offers a wide range of essential features, including data collection via various plugins, an embedded high-performance time-series database (dbengine), unsupervised anomaly detection powered by edge-trained machine learning, alerting and notifications, as well as query and scoring engines with associated APIs. Additionally, it supports exporting data to third-party monitoring systems, among other capabilities. + + The Netdata Agent is released under the [GPLv3+ license](https://github.com/netdata/netdata/blob/master/LICENSE) and redistributes several other open-source tools and libraries, which are listed in the [Netdata Agent third-party licenses](https://github.com/netdata/netdata/blob/master/REDISTRIBUTED.md). + +- **Netdata Cloud**: A commercial, closed-source component, Netdata Cloud enhances the capabilities of the open-source Netdata Agent by providing horizontal scalability, centralized alert notification dispatch (including a mobile app), user management, role-based access control, and other enterprise-grade features. It is available both as a SaaS solution and for on-premises deployment, with a free-to-use community tier also offered. -- **Netdata Agent**: The heart of the Netdata ecosystem, the Netdata Agent is an open-source tool that must be installed on all systems monitored by Netdata. It offers a wide range of essential features, including data collection via various plugins, an embedded high-performance time-series database (dbengine), unsupervised anomaly detection powered by edge-trained machine learning, alerting and notifications, as well as query and scoring engines with associated APIs. Additionally, it supports exporting data to third-party monitoring systems, among other capabilities. - - The Netdata Agent is released under the [GPLv3+ license](https://github.com/netdata/netdata/blob/master/LICENSE) and redistributes several other open-source tools and libraries, which are listed in the [Netdata Agent third-party licenses](https://github.com/netdata/netdata/blob/master/REDISTRIBUTED.md). - -- **Netdata Cloud**: A commercial, closed-source component, Netdata Cloud enhances the capabilities of the open-source Netdata Agent by providing horizontal scalability, centralized alert notification dispatch (including a mobile app), user management, role-based access control, and other enterprise-grade features. It is available both as a SaaS solution and for on-premises deployment, with a free-to-use community tier also offered. - -- **Netdata UI**: The Netdata UI is closed-source, and handles all visualization and dashboard functionalities related to metrics, logs and other collected data, as well as the central configuration and management of the Netdata ecosystem. It serves both the Netdata Agent and Netdata Cloud. The Netdata UI is distributed in binary form with the Netdata Agent and is publicly accessible via a CDN, licensed under the [Netdata Cloud UI License 1 (NCUL1)](https://github.com/netdata/netdata/blob/master/src/web/gui/v2/LICENSE.md). It integrates third-party open-source components, detailed in the [Netdata UI third-party licenses](https://github.com/netdata/netdata/blob/master/src/web/gui/v2/3D_PARTY_LICENSES.txt). +- **Netdata UI**: The Netdata UI is closed-source, and handles all visualization and dashboard functionalities related to metrics, logs and other collected data, as well as the central configuration and management of the Netdata ecosystem. It serves both the Netdata Agent and Netdata Cloud. The Netdata UI is distributed in binary form with the Netdata Agent and is publicly accessible via a CDN, licensed under the [Netdata Cloud UI License 1 (NCUL1)](https://app.netdata.cloud/LICENSE.txt). It integrates third-party open-source components, detailed in the [Netdata UI third-party licenses](https://github.com/netdata/netdata/blob/master/src/web/gui/v2/3D_PARTY_LICENSES.txt). -The binary installation packages provided by Netdata include the Netdata Agent and the Netdata UI. Since the Netdata Agent is open-source, it is frequently packaged by third parties (e.g. Linux Distributions) excluding the closed-source components (Netdata UI is not included). While their packages can still be useful in providing the necessary back-ends and the APIs of a fully functional monitoring solution, we recommend using the installation packages we provide to experience the full feature set of Netdata. +The binary installation packages provided by Netdata include the Netdata Agent and the Netdata UI. Since the Netdata Agent is open-source, it is frequently packaged by third parties (e.g., Linux Distributions) excluding the closed-source components (Netdata UI is not included). While their packages can still be useful in providing the necessary back-ends and the APIs of a fully functional monitoring solution, we recommend using the installation packages we provide to experience the full feature set of Netdata. diff --git a/REDISTRIBUTED.md b/REDISTRIBUTED.md index 5149127f6..f01b19f00 100644 --- a/REDISTRIBUTED.md +++ b/REDISTRIBUTED.md @@ -1,12 +1,3 @@ - - # Redistributed software Netdata copyright info:
@@ -16,183 +7,44 @@ Released under [GPL v3 or later](https://raw.githubusercontent.com/netdata/netda Netdata uses SPDX license tags to identify the license for its files. Individual licenses referenced in the tags are available on the [SPDX project site](http://spdx.org/licenses/). -Netdata redistributes the Netdata Cloud UI, licensed under [Netdata Cloud UI License v1.0 (NCUL1)](https://raw.githubusercontent.com/netdata/netdata/master/src/web/gui/v2/LICENSE.md). Netdata Cloud UI includes [third party open-source software](https://raw.githubusercontent.com/netdata/netdata/master/src/web/gui/v2/3D_PARTY_LICENSES.txt). +Netdata redistributes the Netdata Cloud UI, licensed under [Netdata Cloud UI License v1.0 (NCUL1)](https://app.netdata.cloud/LICENSE.txt). Netdata Cloud UI includes [third party open-source software](https://raw.githubusercontent.com/netdata/netdata/master/src/web/gui/v2/3D_PARTY_LICENSES.txt). Netdata redistributes the following third-party software. We have decided to redistribute all these, instead of using them through a CDN, to allow Netdata to work in cases where Internet connectivity is not available. -- [Dygraphs](http://dygraphs.com/) - - Copyright 2009, Dan Vanderkam - [MIT License](http://dygraphs.com/legal.html) - -- [Easy Pie Chart](https://rendro.github.io/easy-pie-chart/) - - Copyright 2013, Robert Fleischmann - [MIT License](https://github.com/rendro/easy-pie-chart/blob/master/LICENSE) - -- [Gauge.js](http://bernii.github.io/gauge.js/) - - Copyright, Bernard Kobos - [MIT License](https://github.com/getgauge/gauge-js/blob/master/LICENSE) - -- [d3pie](https://github.com/benkeen/d3pie) - - Copyright (c) 2014-2015 Benjamin Keen - [MIT License](https://github.com/benkeen/d3pie/blob/master/LICENSE) - -- [jQuery Sparklines](http://omnipotent.net/jquery.sparkline/) - - Copyright 2009-2012, Splunk Inc. - [New BSD License](http://opensource.org/licenses/BSD-3-Clause) - -- [Peity](http://benpickles.github.io/peity/) - - Copyright 2009-2015, Ben Pickles - [MIT License](https://github.com/benpickles/peity/blob/master/LICENCE) - -- [morris.js](http://morrisjs.github.io/morris.js/) - - Copyright 2013, Olly Smith - [Simplified BSD License](http://morrisjs.github.io/morris.js/) - -- [Raphaël](http://dmitrybaranovskiy.github.io/raphael/) - - Copyright 2008, Dmitry Baranovskiy - [MIT License](http://dmitrybaranovskiy.github.io/raphael/license.html) - -- [C3](http://c3js.org/) - - Copyright 2013, Masayuki Tanaka - [MIT License](https://github.com/masayuki0812/c3/blob/master/LICENSE) - -- [D3](http://d3js.org/) - - Copyright 2015, Mike Bostock - [BSD License](http://opensource.org/licenses/BSD-3-Clause) - -- [jQuery](https://jquery.org/) - - Copyright 2015, jQuery Foundation - [MIT License](https://jquery.org/license/) - -- [Bootstrap](http://getbootstrap.com/getting-started/) - - Copyright 2015, Twitter - [MIT License](https://github.com/twbs/bootstrap/blob/v4-dev/LICENSE) - -- [Bootstrap Toggle](http://www.bootstraptoggle.com/) - - Copyright (c) 2011-2014 Min Hur, The New York Times Company - [MIT License](https://github.com/minhur/bootstrap-toggle/blob/master/LICENSE) - -- [Bootstrap-slider](http://seiyria.com/bootstrap-slider/) - - Copyright 2017 Kyle Kemp, Rohit Kalkur, and contributors - [MIT License](https://github.com/seiyria/bootstrap-slider/blob/master/LICENSE.md) - -- [bootstrap-table](http://bootstrap-table.wenzhixin.net.cn/) - - Copyright (c) 2012-2016 Zhixin Wen [wenzhixin2010@gmail.com](mailto:wenzhixin2010@gmail.com) - [MIT License](https://github.com/wenzhixin/bootstrap-table/blob/master/LICENSE) - -- [tableExport.jquery.plugin](https://github.com/hhurz/tableExport.jquery.plugin) - - Copyright (c) 2015,2016 hhurz - [MIT License](https://github.com/hhurz/tableExport.jquery.plugin/blob/master/LICENSE) - -- [perfect-scrollbar](https://jamesflorentino.github.io/nanoScrollerJS/) - - Copyright 2016, Hyunje Alex Jun and other contributors - [MIT License](https://github.com/noraesae/perfect-scrollbar/blob/master/LICENSE) - -- [FontAwesome](https://github.com/FortAwesome/Font-Awesome) - - Created by Dave Gandy - Font license: [SIL OFL 1.1](http://scripts.sil.org/OFL) - Icon license [Creative Commons Attribution 4.0 (CC-BY 4.0)](https://creativecommons.org/licenses/by/4.0/) - Code license: [MIT License](http://opensource.org/licenses/mit-license.html) - -- [node-extend](https://github.com/justmoon/node-extend) - - Copyright 2014, Stefan Thomas - [MIT License](https://github.com/justmoon/node-extend/blob/master/LICENSE) - -- [node-net-snmp](https://github.com/stephenwvickers/node-net-snmp) - - Copyright 2013, Stephen Vickers - [MIT License](https://github.com/nospaceships/node-net-snmp#license) - -- [node-asn1-ber](https://github.com/stephenwvickers/node-asn1-ber) - - Copyright 2017, Stephen Vickers - Copyright 2011, Mark Cavage - [MIT License](https://github.com/nospaceships/node-asn1-ber#license) - -- [pixl-xml](https://github.com/jhuckaby/pixl-xml) - - Copyright 2015, Joseph Huckaby - [MIT License](https://github.com/jhuckaby/pixl-xml#license) - -- [sensors](https://github.com/paroj/sensors.py) - - Copyright 2014, Pavel Rojtberg - [LGPL 2.1 License](http://opensource.org/licenses/LGPL-2.1) - -- [PyYAML](https://pypi.org/project/PyYAML/) - - Copyright 2006, Kirill Simonov - [MIT License](https://github.com/yaml/pyyaml/blob/master/LICENSE) - -- [urllib3](https://github.com/shazow/urllib3) - - Copyright 2008-2016 Andrey Petrov and [contributors](https://github.com/shazow/urllib3/blob/master/CONTRIBUTORS.txt) - [MIT License](https://github.com/shazow/urllib3/blob/master/LICENSE.txt) - -- [lz-string](http://pieroxy.net/blog/pages/lz-string/index.html) - - Copyright 2013 Pieroxy - [WTFPL License](http://pieroxy.net/blog/pages/lz-string/index.html#inline_menu_10) - -- [pako](http://nodeca.github.io/pako/) - - Copyright 2014-2017 Vitaly Puzrin and Andrei Tuputcyn - [MIT License](https://github.com/nodeca/pako/blob/master/LICENSE) - -- [clipboard-polyfill](https://github.com/lgarron/clipboard-polyfill) - - Copyright (c) 2014 Lucas Garron - [MIT License](https://github.com/lgarron/clipboard-polyfill/blob/master/LICENSE.md) - -- [Utilities for writing code that runs on Python 2 and 3](https://raw.githubusercontent.com/netdata/netdata/master/src/collectors/python.d.plugin/python_modules/urllib3/packages/six.py) - - Copyright (c) 2010-2015 Benjamin Peterson - [MIT License](https://github.com/benjaminp/six/blob/master/LICENSE) - -- [mcrcon](https://github.com/barneygale/MCRcon) - - Copyright (C) 2015 Barnaby Gale - [MIT License](https://raw.githubusercontent.com/barneygale/MCRcon/master/COPYING.txt) - -- [monotonic](https://github.com/atdt/monotonic) - - Copyright 2014, 2015, 2016 Ori Livneh [ori@wikimedia.org](mailto:ori@wikimedia.org) - [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0) - -- [filelock](https://github.com/benediktschmitt/py-filelock) - - Copyright 2015, Benedikt Schmitt [Unlicense License](https://unlicense.org/) - -- [Kolmogorov-Smirnov distribution](http://simul.iro.umontreal.ca/ksdir/) - - Copyright March 2010 by Université de Montréal, Richard Simard and Pierre L'Ecuyer - [GPL 3.0](https://www.gnu.org/licenses/gpl-3.0.en.html) - -- [xxHash](https://github.com/Cyan4973/xxHash) - - Copyright (c) 2012-2021 Yann Collet - [BSD](https://github.com/Cyan4973/xxHash/blob/dev/LICENSE) - +| Name | Copyright | License | +|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [Dygraphs](http://dygraphs.com/) | Copyright 2009, Dan Vanderkam | [MIT](http://dygraphs.com/legal.html) | +| [Easy Pie Chart](https://rendro.github.io/easy-pie-chart/) | Copyright 2013, Robert Fleischmann | [MIT](https://github.com/rendro/easy-pie-chart/blob/master/LICENSE) | +| [Gauge.js](http://bernii.github.io/gauge.js/) | Copyright, Bernard Kobos | [MIT](https://github.com/getgauge/gauge-js/blob/master/LICENSE) | +| [d3pie](https://github.com/benkeen/d3pie) | Copyright (c) 2014-2015 Benjamin Keen | [MIT](https://github.com/benkeen/d3pie/blob/master/LICENSE) | +| [jQuery Sparklines](http://omnipotent.net/jquery.sparkline/) | Copyright 2009-2012, Splunk Inc. | [New BSD](http://opensource.org/licenses/BSD-3-Clause) | +| [Peity](http://benpickles.github.io/peity/) | Copyright 2009-2015, Ben Pickles | [MIT](https://github.com/benpickles/peity/blob/master/LICENCE) | +| [morris.js](http://morrisjs.github.io/morris.js/) | Copyright 2013, Olly Smith | [Simplified BSD](http://morrisjs.github.io/morris.js/) | +| [Raphaël](http://dmitrybaranovskiy.github.io/raphael/) | Copyright 2008, Dmitry Baranovskiy | [MIT](http://dmitrybaranovskiy.github.io/raphael/license.html) | +| [C3](http://c3js.org/) | Copyright 2013, Masayuki Tanaka | [MIT](https://github.com/masayuki0812/c3/blob/master/LICENSE) | +| [D3](http://d3js.org/) | Copyright 2015, Mike Bostock | [BSD](http://opensource.org/licenses/BSD-3-Clause) | +| [jQuery](https://jquery.org/) | Copyright 2015, jQuery Foundation | [MIT](https://jquery.org/license/) | +| [Bootstrap](http://getbootstrap.com/getting-started/) | Copyright 2015, Twitter | [MIT](https://github.com/twbs/bootstrap/blob/v4-dev/LICENSE) | +| [Bootstrap Toggle](http://www.bootstraptoggle.com/) | Copyright (c) 2011-2014 Min Hur, The New York Times Company | [MIT](https://github.com/minhur/bootstrap-toggle/blob/master/LICENSE) | +| [Bootstrap-slider](http://seiyria.com/bootstrap-slider/) | Copyright 2017 Kyle Kemp, Rohit Kalkur, and contributors | [MIT](https://github.com/seiyria/bootstrap-slider/blob/master/LICENSE.md) | +| [bootstrap-table](http://bootstrap-table.wenzhixin.net.cn/) | Copyright (c) 2012-2016 Zhixin Wen | [MIT](https://github.com/wenzhixin/bootstrap-table/blob/master/LICENSE) | +| [tableExport.jquery.plugin](https://github.com/hhurz/tableExport.jquery.plugin) | Copyright (c) 2015,2016 hhurz | [MIT](https://github.com/hhurz/tableExport.jquery.plugin/blob/master/LICENSE) | +| [perfect-scrollbar](https://jamesflorentino.github.io/nanoScrollerJS/) | Copyright 2016, Hyunje Alex Jun and other contributors | [MIT](https://github.com/noraesae/perfect-scrollbar/blob/master/LICENSE) | +| [FontAwesome](https://github.com/FortAwesome/Font-Awesome) | Created by Dave Gandy | Font: [SIL OFL 1.1](http://scripts.sil.org/OFL), Icon: [Creative Commons Attribution 4.0 (CC-BY 4.0)](https://creativecommons.org/licenses/by/4.0/), Code: [MIT](http://opensource.org/licenses/mit-license.html) | +| [node-extend](https://github.com/justmoon/node-extend) | Copyright 2014, Stefan Thomas | [MIT](https://github.com/justmoon/node-extend/blob/master/LICENSE) | +| [pixl-xml](https://github.com/jhuckaby/pixl-xml) | Copyright 2015, Joseph Huckaby | [MIT](https://github.com/jhuckaby/pixl-xml#license) | +| [PyYAML](https://pypi.org/project/PyYAML/) | Copyright 2006, Kirill Simonov | [MIT](https://github.com/yaml/pyyaml/blob/master/LICENSE) | +| [urllib3](https://github.com/shazow/urllib3) | Copyright 2008-2016 Andrey Petrov and contributors | [MIT](https://github.com/shazow/urllib3/blob/master/LICENSE.txt) | +| [lz-string](http://pieroxy.net/blog/pages/lz-string/index.html) | Copyright 2013 Pieroxy | [WTFPL](http://pieroxy.net/blog/pages/lz-string/index.html#inline_menu_10) | +| [pako](http://nodeca.github.io/pako/) | Copyright 2014-2017 Vitaly Puzrin and Andrei Tuputcyn | [MIT](https://github.com/nodeca/pako/blob/master/LICENSE) | +| [clipboard-polyfill](https://github.com/lgarron/clipboard-polyfill) | Copyright (c) 2014 Lucas Garron | [MIT](https://github.com/lgarron/clipboard-polyfill/blob/master/LICENSE.md) | +| [Utilities for writing code that runs on Python 2 and 3](https://raw.githubusercontent.com/netdata/netdata/master/src/collectors/python.d.plugin/python_modules/urllib3/packages/six.py) | Copyright (c) 2010-2015 Benjamin Peterson | [MIT](https://github.com/benjaminp/six/blob/master/LICENSE) | +| [monotonic](https://github.com/atdt/monotonic) | Copyright 2014, 2015, 2016 Ori Livneh | [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0) | +| [filelock](https://github.com/benediktschmitt/py-filelock) | Copyright 2015, Benedikt Schmitt | [Unlicense](https://unlicense.org/) | +| [Kolmogorov-Smirnov distribution](http://simul.iro.umontreal.ca/ksdir/) | Copyright March 2010 by Université de Montréal, Richard Simard and Pierre L'Ecuyer | [GPL 3.0](https://www.gnu.org/licenses/gpl-3.0.en.html) | +| [xxHash](https://github.com/Cyan4973/xxHash) | Copyright (c) 2012-2021 Yann Collet | [BSD](https://github.com/Cyan4973/xxHash/blob/dev/LICENSE) | +| [lmsensors](https://github.com/mdlayher/lmsensors) | Copyright 2016, Matt Layher | [MIT](https://github.com/mdlayher/lmsensors/blob/master/LICENSE.md) | diff --git a/docs/.templates/.page-level/_concept-page-template.md b/docs/.templates/.page-level/_concept-page-template.md index 685dd2ff3..d6c4babba 100644 --- a/docs/.templates/.page-level/_concept-page-template.md +++ b/docs/.templates/.page-level/_concept-page-template.md @@ -1,10 +1,3 @@ - - # Title Why should the reader care: “What’s in it for me?” diff --git a/docs/Demo-Sites.md b/docs/Demo-Sites.md index 291e3a5e3..91b9c514f 100644 --- a/docs/Demo-Sites.md +++ b/docs/Demo-Sites.md @@ -1,44 +1,34 @@ - # Live demos -See the live Netdata Cloud demo with Rooms (listed below) for specific use cases at **https://app.netdata.cloud/spaces/netdata-demo** +See the live Netdata Cloud demo with Rooms (listed below) for specific use cases at `https://app.netdata.cloud/spaces/netdata-demo` -| Location | Netdata Demo URL | 60 mins reqs | VM donated by | -| :------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| :------------------------------------------------- | -| Netdata Cloud | **[Netdata Demo - All nodes](https://app.netdata.cloud/spaces/netdata-demo/rooms/all-nodes/overview)** ||| -| Netdata Cloud | **[Netdata Demo - Active Directory](https://app.netdata.cloud/spaces/netdata-demo/rooms/active-directory/overview)** ||| -| Netdata Cloud | **[Netdata Demo - Apache](https://app.netdata.cloud/spaces/netdata-demo/rooms/apache/overview)** ||| -| Netdata Cloud | **[Netdata Demo - Cassandra](https://app.netdata.cloud/spaces/netdata-demo/rooms/cassandra/overview)** ||| -| Netdata Cloud | **[Netdata Demo - CoreDNS](https://app.netdata.cloud/spaces/netdata-demo/rooms/coredns/overview)** ||| -| Netdata Cloud | **[Netdata Demo - DNS Query](https://app.netdata.cloud/spaces/netdata-demo/rooms/dns-query/overview)** ||| -| Netdata Cloud | **[Netdata Demo - Docker](https://app.netdata.cloud/spaces/netdata-demo/rooms/docker/overview)** ||| -| Netdata Cloud | **[Netdata Demo - Host Reachability](https://app.netdata.cloud/spaces/netdata-demo/rooms/host-reachability/overview)** ||| -| Netdata Cloud | **[Netdata Demo - HTTP Endpoints](https://app.netdata.cloud/spaces/netdata-demo/rooms/http-endpoints/overview)** ||| -| Netdata Cloud | **[Netdata Demo - IIS](https://app.netdata.cloud/spaces/netdata-demo/rooms/iis/overview)** ||| -| Netdata Cloud | **[Netdata Demo - Kubernetes](https://app.netdata.cloud/spaces/netdata-demo/rooms/kubernetes/kubernetes)** ||| -| Netdata Cloud | **[Netdata Demo - Machine Learning](https://app.netdata.cloud/spaces/netdata-demo/rooms/machine-learning/overview)** ||| -| Netdata Cloud | **[Netdata Demo - MS Exchange](https://app.netdata.cloud/spaces/netdata-demo/rooms/ms-exchange/overview)** ||| -| Netdata Cloud | **[Netdata Demo - Nginx](https://app.netdata.cloud/spaces/netdata-demo/rooms/nginx/overview)** ||| -| Netdata Cloud | **[Netdata Demo - PostgreSQL](https://app.netdata.cloud/spaces/netdata-demo/rooms/postgresql/overview)** ||| -| Netdata Cloud | **[Netdata Demo - Redis](https://app.netdata.cloud/spaces/netdata-demo/rooms/redis/overview)** ||| -| Netdata Cloud | **[Netdata Demo - Windows](https://app.netdata.cloud/spaces/netdata-demo/rooms/windows/overview)** ||| -| London (UK) | **[london3.my-netdata.io](https://london3.my-netdata.io)**
(this is the global Netdata **registry** and has **named** and **mysql** charts) | [![Requests Per Second](https://london3.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://london3.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | -| Atlanta (USA) | **[cdn77.my-netdata.io](https://cdn77.my-netdata.io)**
(with **named** and **mysql** charts) | [![Requests Per Second](https://cdn77.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://cdn77.my-netdata.io) | [CDN77.com](https://www.cdn77.com/) | -| Bangalore (India) | **[bangalore.my-netdata.io](https://bangalore.my-netdata.io)** | [![Requests Per Second](https://bangalore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://bangalore.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | -| Frankfurt (Germany) | **[frankfurt.my-netdata.io](https://frankfurt.my-netdata.io)** | [![Requests Per Second](https://frankfurt.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://frankfurt.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | -| New York (USA) | **[newyork.my-netdata.io](https://newyork.my-netdata.io)** | [![Requests Per Second](https://newyork.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://newyork.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | -| San Francisco (USA) | **[sanfrancisco.my-netdata.io](https://sanfrancisco.my-netdata.io)** | [![Requests Per Second](https://sanfrancisco.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://sanfrancisco.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | -| Singapore | **[singapore.my-netdata.io](https://singapore.my-netdata.io)** | [![Requests Per Second](https://singapore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://singapore.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | -| Toronto (Canada) | **[toronto.my-netdata.io](https://toronto.my-netdata.io)** | [![Requests Per Second](https://toronto.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://toronto.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | +| Location | Netdata Demo URL | 60 mins reqs | VM donated by | +|:--------------------|:------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------| +| Netdata Cloud | **[Netdata Demo - All nodes](https://app.netdata.cloud/spaces/netdata-demo/rooms/all-nodes/overview)** | | | +| Netdata Cloud | **[Netdata Demo - Active Directory](https://app.netdata.cloud/spaces/netdata-demo/rooms/active-directory/overview)** | | | +| Netdata Cloud | **[Netdata Demo - Apache](https://app.netdata.cloud/spaces/netdata-demo/rooms/apache/overview)** | | | +| Netdata Cloud | **[Netdata Demo - Cassandra](https://app.netdata.cloud/spaces/netdata-demo/rooms/cassandra/overview)** | | | +| Netdata Cloud | **[Netdata Demo - CoreDNS](https://app.netdata.cloud/spaces/netdata-demo/rooms/coredns/overview)** | | | +| Netdata Cloud | **[Netdata Demo - DNS Query](https://app.netdata.cloud/spaces/netdata-demo/rooms/dns-query/overview)** | | | +| Netdata Cloud | **[Netdata Demo - Docker](https://app.netdata.cloud/spaces/netdata-demo/rooms/docker/overview)** | | | +| Netdata Cloud | **[Netdata Demo - Host Reachability](https://app.netdata.cloud/spaces/netdata-demo/rooms/host-reachability/overview)** | | | +| Netdata Cloud | **[Netdata Demo - HTTP Endpoints](https://app.netdata.cloud/spaces/netdata-demo/rooms/http-endpoints/overview)** | | | +| Netdata Cloud | **[Netdata Demo - IIS](https://app.netdata.cloud/spaces/netdata-demo/rooms/iis/overview)** | | | +| Netdata Cloud | **[Netdata Demo - Kubernetes](https://app.netdata.cloud/spaces/netdata-demo/rooms/kubernetes/kubernetes)** | | | +| Netdata Cloud | **[Netdata Demo - Machine Learning](https://app.netdata.cloud/spaces/netdata-demo/rooms/machine-learning/overview)** | | | +| Netdata Cloud | **[Netdata Demo - MS Exchange](https://app.netdata.cloud/spaces/netdata-demo/rooms/ms-exchange/overview)** | | | +| Netdata Cloud | **[Netdata Demo - Nginx](https://app.netdata.cloud/spaces/netdata-demo/rooms/nginx/overview)** | | | +| Netdata Cloud | **[Netdata Demo - PostgreSQL](https://app.netdata.cloud/spaces/netdata-demo/rooms/postgresql/overview)** | | | +| Netdata Cloud | **[Netdata Demo - Redis](https://app.netdata.cloud/spaces/netdata-demo/rooms/redis/overview)** | | | +| Netdata Cloud | **[Netdata Demo - Windows](https://app.netdata.cloud/spaces/netdata-demo/rooms/windows/overview)** | | | +| London (UK) | **[london3.my-netdata.io](https://london3.my-netdata.io)**
(this is the global Netdata **registry** and has **named** and **mysql** charts) | [![Requests Per Second](https://london3.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://london3.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | +| Atlanta (USA) | **[cdn77.my-netdata.io](https://cdn77.my-netdata.io)**
(with **named** and **mysql** charts) | [![Requests Per Second](https://cdn77.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://cdn77.my-netdata.io) | [CDN77.com](https://www.cdn77.com/) | +| Bangalore (India) | **[bangalore.my-netdata.io](https://bangalore.my-netdata.io)** | [![Requests Per Second](https://bangalore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://bangalore.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | +| Frankfurt (Germany) | **[frankfurt.my-netdata.io](https://frankfurt.my-netdata.io)** | [![Requests Per Second](https://frankfurt.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://frankfurt.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | +| New York (USA) | **[newyork.my-netdata.io](https://newyork.my-netdata.io)** | [![Requests Per Second](https://newyork.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://newyork.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | +| San Francisco (USA) | **[sanfrancisco.my-netdata.io](https://sanfrancisco.my-netdata.io)** | [![Requests Per Second](https://sanfrancisco.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://sanfrancisco.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | +| Singapore | **[singapore.my-netdata.io](https://singapore.my-netdata.io)** | [![Requests Per Second](https://singapore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://singapore.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | +| Toronto (Canada) | **[toronto.my-netdata.io](https://toronto.my-netdata.io)** | [![Requests Per Second](https://toronto.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://toronto.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | Netdata dashboards are mobile- and touch-friendly. diff --git a/docs/alerts-and-notifications/notifications/README.md b/docs/alerts-and-notifications/notifications/README.md index 3368b4e14..870076b97 100644 --- a/docs/alerts-and-notifications/notifications/README.md +++ b/docs/alerts-and-notifications/notifications/README.md @@ -2,6 +2,8 @@ This section includes the documentation of the integrations for both of Netdata's notification methods. + + - Netdata Cloud provides centralized alert notifications, utilizing the health status data already sent to Netdata Cloud from connected nodes to send alerts to configured integrations. [Supported integrations](/docs/alerts-&-notifications/notifications/centralized-cloud-notifications) include Amazon SNS, Discord, Slack, Splunk, and others. - The Netdata Agent offers a [wider range of notification options](/docs/alerts-&-notifications/notifications/agent-dispatched-notifications) directly from the agent itself. You can choose from over a dozen services, including email, Slack, PagerDuty, Twilio, and others, for more granular control over notifications on each node. diff --git a/docs/category-overview-pages/maintenance-operations-on-netdata-agents.md b/docs/category-overview-pages/maintenance-operations-on-netdata-agents.md index 1867d863f..e989abc85 100644 --- a/docs/category-overview-pages/maintenance-operations-on-netdata-agents.md +++ b/docs/category-overview-pages/maintenance-operations-on-netdata-agents.md @@ -1,8 +1,7 @@ -# Maintenance operations on Netdata Agents Overview +# Netdata Agent Maintenance Operations Overview -This section provides information on various actions you can take while maintaining a Netdata Agent. +This section provides information on various actions to maintain a Netdata Agent: -- [Starting and Stopping Netdata Agents](/docs/netdata-agent/start-stop-restart.md) -- [Update Netdata Agents](/packaging/installer/UPDATE.md) -- [Reinstall Netdata Agents](/packaging/installer/REINSTALL.md) -- [Uninstall Netdata Agents](/packaging/installer/UNINSTALL.md) +- [Service Control](/docs/netdata-agent/start-stop-restart.md) +- [Update](/packaging/installer/UPDATE.md) +- [Uninstall](/packaging/installer/UNINSTALL.md) diff --git a/docs/category-overview-pages/working-with-logs.md b/docs/category-overview-pages/working-with-logs.md index e1f027529..d28074d2e 100644 --- a/docs/category-overview-pages/working-with-logs.md +++ b/docs/category-overview-pages/working-with-logs.md @@ -6,4 +6,4 @@ The [systemd journal plugin](/src/collectors/systemd-journal.plugin/) is the cor For structured logs, Netdata provides tools like [log2journal](/src/collectors/log2journal/README.md) and [systemd-cat-native](/src/libnetdata/log/systemd-cat-native.md) to convert them into compatible systemd journal entries. -You can also find useful guides on how to set up log centralization points in the [Observability Cetralization Points](/docs/observability-centralization-points/README.md) section of our docs. +You can also find useful guides on how to set up log centralization points in the [Observability Centralization Points](/docs/observability-centralization-points/README.md) section of our docs. diff --git a/docs/dashboards-and-charts/README.md b/docs/dashboards-and-charts/README.md index 372f2030b..f94d776a3 100644 --- a/docs/dashboards-and-charts/README.md +++ b/docs/dashboards-and-charts/README.md @@ -35,6 +35,6 @@ You can access the dashboard at and [sign-in with a ### Netdata Agent -To view your Netdata dashboard, open a web browser and enter the address `http://NODE:19999` - replace `NODE` with your Agent's IP address or hostname. If the Agent is on the same machine, use http://localhost:19999. +To view your Netdata dashboard, open a web browser and enter the address `http://NODE:19999` - replace `NODE` with your Agent's IP address or hostname. If the Agent is on the same machine, use `http://localhost:19999`. Documentation for previous Agent dashboard can still be found [here](/src/web/gui/README.md). diff --git a/docs/dashboards-and-charts/alerts-tab.md b/docs/dashboards-and-charts/alerts-tab.md index 00d3efcb7..66c019ec0 100644 --- a/docs/dashboards-and-charts/alerts-tab.md +++ b/docs/dashboards-and-charts/alerts-tab.md @@ -45,7 +45,7 @@ At the bottom of the panel you can click the green button "View alert page" to o ### Silence an alert -From this tab, the "Silencing" column shows if there is any rule present for each alert, and from the "Actions" column you can create a new [silencing rule](/docs/alerts-and-notifications/notifications/centralized-cloud-notifications/centralized-cloud-notifications-reference.md#alert-notifications-silencing-rules) for this alert, or get help and information about this alert from the [Netdata Assistant](/docs/netdata-assistant.md). +From this tab, the "Silencing" column shows if there is any rule present for each alert, and from the "Actions" column you can create a new [silencing rule](/docs/alerts-and-notifications/notifications/centralized-cloud-notifications/centralized-cloud-notifications-reference.md#alert-notification-silencing-rules) for this alert, or get help and information about this alert from the [Netdata Assistant](/docs/netdata-assistant.md). ## Alert Configurations tab diff --git a/docs/dashboards-and-charts/anomaly-advisor-tab.md b/docs/dashboards-and-charts/anomaly-advisor-tab.md index 51b58b23a..bf3243ef1 100644 --- a/docs/dashboards-and-charts/anomaly-advisor-tab.md +++ b/docs/dashboards-and-charts/anomaly-advisor-tab.md @@ -1,11 +1,10 @@ # Anomaly Advisor tab -The Anomaly Advisor tab lets you focus on potentially anomalous metrics and charts related to a particular highlighted window of interest. In addition to this tab, each chart in the [Metrics tab](/docs/dashboards-and-charts/metrics-tab-and-single-node-tabs.md) also has an [Anomaly Rate ribbon](/docs/dashboards-and-charts/netdata-charts.md#anomaly-rate-ribbon). +The Anomaly Advisor tab lets you focus on potentially anomalous metrics and charts related to a particular highlighted window of interest. In addition to this tab, each chart in the [Metrics tab](/docs/dashboards-and-charts/metrics-tab-and-single-node-tabs.md) also has an [Anomaly Rate ribbon](/docs/dashboards-and-charts/netdata-charts.md#anomaly-rate-ribbon). +More details about configuration can be found in the [ML documentation](/src/ml/README.md). -More details about configuration can be found in the [ML documentation](/src/ml/README.md#configuration). - -This tab uses our [Anomaly Rate ML feature](/src/ml/README.md#anomaly-rate---averageanomaly-bit) to score metrics in terms of anomalous behavior. +This tab uses our [Anomaly Rate ML feature](/src/ml/README.md#anomaly-bit) to score metrics in terms of anomalous behavior. - The "Anomaly Rate" chart shows the percentage of anomalous metrics over time per node. diff --git a/docs/dashboards-and-charts/events-feed.md b/docs/dashboards-and-charts/events-feed.md index a5386e80e..34d6ee0e6 100644 --- a/docs/dashboards-and-charts/events-feed.md +++ b/docs/dashboards-and-charts/events-feed.md @@ -66,8 +66,8 @@ All users will be able to see events from the Topology and Alerts domain but Aud ## How to use the events feed 1. Click on the **Events** tab (located near the top of your screen) -1. You will be presented with a table listing the events that occurred from the timeframe defined on the [date time picker](/docs/dashboards-and-charts/visualization-date-and-time-controls.md#date-and-time-selector) -1. You can use the filtering capabilities available on right-hand bar to slice through the results provided. See more details on [event types and filters](#event-types-and-filters) +2. You will be presented with a table listing the events that occurred from the timeframe defined on the [date time picker](/docs/dashboards-and-charts/visualization-date-and-time-controls.md#date-and-time-selector) +3. You can use the filtering capabilities available on right-hand bar to slice through the results provided > **Note** > diff --git a/docs/dashboards-and-charts/import-export-print-snapshot.md b/docs/dashboards-and-charts/import-export-print-snapshot.md index 80bf514ae..f2df15dab 100644 --- a/docs/dashboards-and-charts/import-export-print-snapshot.md +++ b/docs/dashboards-and-charts/import-export-print-snapshot.md @@ -1,22 +1,7 @@ - - # Import, export, and print a snapshot >❗This feature is only available on v1 dashboards, it hasn't been port-forwarded to v2. -> For more information on accessing dashboards check [this documentation](/docs/dashboards-and-charts/README.md). - +> For more information on accessing dashboards check [this documentation](/docs/dashboards-and-charts/README.md). Netdata can export snapshots of the contents of your dashboard at a given time, which you can then import into any other node running Netdata. Or, you can create a print-ready version of your dashboard to save to PDF or actually print to @@ -44,7 +29,7 @@ Select the Netdata snapshot file to import. Once the file is loaded, the modal u snapshot and the system from which it was taken. Click **Import** to begin to process. Netdata takes the data embedded inside the snapshot and re-creates a static replica on your dashboard. When the import -finishes, you're free to move around and examine the charts. +finishes, you're free to move around and examine the charts. Some caveats and tips to keep in mind: diff --git a/docs/dashboards-and-charts/kubernetes-tab.md b/docs/dashboards-and-charts/kubernetes-tab.md index 9b5df87d8..3289615f0 100644 --- a/docs/dashboards-and-charts/kubernetes-tab.md +++ b/docs/dashboards-and-charts/kubernetes-tab.md @@ -27,7 +27,6 @@ Netdata Cloud organizes and visualizes the following metrics from your Kubernete | `k8s.cgroup.net_net` | Sum of `received` and `sent` bandwidth per second. | | `k8s.cgroup.net_packets` | Sum of `multicast`, `received`, and `sent` packets. | - When viewing the [overview of this dashboard](#kubernetes-containers-overview), Netdata presents the above metrics per container, or aggregated based on their associated pods. diff --git a/docs/dashboards-and-charts/netdata-charts.md b/docs/dashboards-and-charts/netdata-charts.md index 5536f83b2..c7563aa29 100644 --- a/docs/dashboards-and-charts/netdata-charts.md +++ b/docs/dashboards-and-charts/netdata-charts.md @@ -19,14 +19,14 @@ These charts provide a lot of useful information, so that you can: - View individual metric collection status about a chart These charts are available on Netdata Cloud's -[Metrics tab](/docs/dashboards-and-charts/metrics-tab-and-single-node-tabs.md), [single sode tabs](/docs/dashboards-and-charts/metrics-tab-and-single-node-tabs.md) and +[Metrics tab](/docs/dashboards-and-charts/metrics-tab-and-single-node-tabs.md), [single node tabs](/docs/dashboards-and-charts/metrics-tab-and-single-node-tabs.md) and on your [Custom Dashboards](/docs/dashboards-and-charts/dashboards-tab.md). ## Overview A Netdata chart looks like this: - +A Netdata Chart With a quick glance you have immediate information available at your disposal: @@ -37,7 +37,7 @@ With a quick glance you have immediate information available at your disposal: - [Chart area](#hover-over-the-chart) - [Legend with dimensions](#dimensions-bar) -## Fundemental elements +## Fundamental elements While Netdata's charts require no configuration and are easy to interact with, they have a lot of underlying complexity. To meaningfully organize charts out of the box based on what's happening in your nodes, Netdata uses the concepts of [dimensions](#dimensions), [contexts](#contexts), and [families](#families). @@ -100,7 +100,7 @@ names: When you start interacting with a chart, you'll notice valuable information on the Title bar: - +Netdata Chart Title bar Title bar elements: @@ -110,8 +110,7 @@ Title bar elements: Along with viewing chart type, context and units, on this bar you have access to immediate actions over the chart: - - +Netdata Chart Title bar immediate actions - **Manage Alerts**: manage [Alert configurations](/docs/dashboards-and-charts/alerts-tab.md#alert-configurations-tab) for this chart. - **Chart info**: get more information relevant to the chart you are interacting with. @@ -119,14 +118,14 @@ Along with viewing chart type, context and units, on this bar you have access to - **Enter fullscreen mode**: expand the current chart to the full size of your screen. - **User settings**: save your settings for the chart at hand, so it persists across dashboard reloads. - Personal has the top priority. - - Room and Space settings for a chart are shared across all users who don't have personal settings for it. + - Room and Space settings for a chart are shared across all users who don't have personal settings for it. - **Drag and Drop the chart to a Dashboard**: add the chart to an existing custom [Dashboard](/docs/dashboards-and-charts/dashboards-tab.md) or directly create a new one that includes the chart. ## Definition bar Each composite chart has a definition bar to provide information and options about the following: - +Netdata Chart Definition bar - Group by option - Aggregate function to be applied in case multiple data sources exist @@ -145,14 +144,14 @@ To help users instantly understand and validate the data they see on charts, we > allowing you to zoom in to the different parts of it. > > -> +> Netdata NIDL Framework > You can rapidly access condensed information for collected metrics, grouped by node, monitored instances, dimension, or any key/value label pair. At the Definition bar of each chart, there are a few dropdown menus: - +Netdata Chart NIDL Dropdown menus These dropdown menus have 2 functions: @@ -171,7 +170,7 @@ All of these dropdown menus can be used for instantly filtering the information The "Group by" dropdown menu allows selecting 1 or more groupings to be applied at once on the same dataset. - +Netdata Chart Group by dropdown It supports: @@ -188,7 +187,7 @@ Using this menu, you can slice and dice the data in any possible way, to quickly > You have the means to change the default group by or apply filtering to get a better view into what data your are trying to analyze. > For example, if you change the group by to _instance_ you get a view with the data of all the instances (cgroups) that contribute to that chart. > Then you can use further filtering tools to focus the data that is important to you and even save the result to your own dashboards. - +> > ### Tip > > Group by instance, dimension to see the time series of every individual collected metric participating in the chart. @@ -197,7 +196,7 @@ Using this menu, you can slice and dice the data in any possible way, to quickly Each chart uses an opinionated-but-valuable default aggregate function over the data sources. - +Netdata Chart Aggregate functions over data For example, the `system.cpu` chart shows the average for each dimension from every contributing chart, while the `net.net` chart shows the sum for each dimension from every contributing chart, which can also come from multiple networking interfaces. @@ -218,7 +217,7 @@ The following aggregate functions are available for each selected dimension: In this dropdown, you can view or filter the nodes contributing time-series metrics to the chart. This menu also provides the contribution of each node to the volume of the chart, and a break down of the anomaly rate of the queried data per node. - +Netdata Chart Nodes dropdown If one or more nodes can't contribute to a given chart, the definition bar shows a warning symbol plus the number of affected nodes, then lists them in the dropdown along with the associated error. Nodes might return errors because of @@ -229,38 +228,38 @@ networking issues, a stopped `netdata` service, or because that node does not ha In this dropdown, you can view or filter the instances contributing time-series metrics to the chart. This menu also provides the contribution of each instance to the volume of the chart, and a break down of the anomaly rate of the queried data per instance. - +Netdata Chart Instances dropdown ### Dimensions dropdown In this dropdown, you can view or filter the original dimensions contributing time-series metrics to the chart. This menu also presents the contribution of each original dimensions on the chart, and a break down of the anomaly rate of the data per dimension. - +Netdata Chart Dimensions Dropdown ### Labels dropdown In this dropdown, you can view or filter the contributing time-series labels of the chart. This menu also presents the contribution of each label on the chart,and a break down of the anomaly rate of the data per label. - +Netdata Chart Labels Dropdown ### Aggregate functions over time When the granularity of the data collected is higher than the plotted points on the chart an aggregation function over time is applied. - +Netdata Chart Aggregate functions over time By default the aggregation applied is _average_ but the user can choose different options from the following: - Min, Max, Average or Sum - Percentile - you can specify the percentile you want to focus on: 25th, 50th, 75th, 80th, 90th, 95th, 97th, 98th and 99th. - + Netdata Chart Aggregate functions over time Percentile selection - Trimmed Mean or Trimmed Median - you can choose the percentage of data tha you want to focus on: 1%, 2%, 3%, 5%, 10%, 15%, 20% and 25%. - + Netdata Chart Aggregate functions over time Trimmed Mean or Median selection - Median - Standard deviation - Coefficient of variation @@ -280,7 +279,7 @@ It then uses these unique models during data collection to predict the value tha If the value collected is an outlier, it is marked as anomalous. - +Netdata Chart Anomaly Rate Ribbon This unmatched capability of real-time predictions as data is collected allows you to **detect anomalies for potentially millions of metrics across your entire infrastructure within a second of occurrence**. @@ -297,29 +296,29 @@ It includes a bar indicating the volume percentage of each time series compared This overlay sorts all dimensions by value, makes bold the closest dimension to the mouse and presents a histogram based on the values of the dimensions. - +Netdata Chart Hover over Chart When hovering the anomaly ribbon, the overlay sorts all dimensions by anomaly rate, and presents a histogram of these anomaly rates. -#### Info column +### Info column Additionally, when hovering over the chart, the overlay may display an indication in the "Info" column. Currently, this column is used to inform users of any data collection issues that might affect the chart. Below each chart, there is an information ribbon. This ribbon currently shows 3 states related to the points presented in the chart: -1. **[P]: Partial Data** +1. **Partial Data** At least one of the dimensions in the chart has partial data, meaning that not all instances available contributed data to this point. This can happen when a container is stopped, or when a node is restarted. This indicator helps to gain confidence of the dataset, in situations when unusual spikes or dives appear due to infrastructure maintenance, or due to failures to part of the infrastructure. -2. **[O]: Overflown** +2. **Overflown** At least one of the data sources included in the chart has a counter that has overflowed at this point. -3. **[E]: Empty Data** +3. **Empty Data** At least one of the dimensions included in the chart has no data at all for the given points. All these indicators are also visualized per dimension, in the pop-over that appears when hovering the chart. - +Netdata Chart Hover over the chart Info Column ## Play, Pause and Reset @@ -346,7 +345,7 @@ Note: These interactions are available when the default "Pan" action is used fro While exploring the chart, a tool bar will appear. This tool bar is there to support you on this task. The available manipulation tools you can select are: - +Netdata Chart Tool bar - Pan - Highlight @@ -382,10 +381,10 @@ Selecting timeframes is useful when you see an interesting spike or change in a You can zoom to a specific timeframe, either horizontally of vertically, by selecting a timeframe. -| Interaction | Keyboard/mouse | Touchpad/touchscreen | -|:-------------------------------------------|:-------------------------------------|:-----------------------------------------------------| -| **Zoom** to a specific timeframe | `Shift + mouse vertical selection` | `n/a` | -| **Horizontal Zoom** a specific Y-axis area | `Shift + mouse horizontal selection` | `n/a` | +| Interaction | Keyboard/mouse | Touchpad/touchscreen | +|:-------------------------------------------|:-------------------------------------|:---------------------| +| **Zoom** to a specific timeframe | `Shift + mouse vertical selection` | `n/a` | +| **Horizontal Zoom** a specific Y-axis area | `Shift + mouse horizontal selection` | `n/a` | ### Chart zoom @@ -394,9 +393,9 @@ of an anomaly or outage. Zooming out lets you see metrics within the larger context, such as the last hour, day, or week, which is useful in understanding what "normal" looks like, or to identify long-term trends, like a slow creep in memory usage. -| Interaction | Keyboard/mouse | Touchpad/touchscreen | -|:-------------------------------------------|:-------------------------------------|:-----------------------------------------------------| -| **Zoom** in or out | `Shift + mouse scrollwheel` | `two-finger pinch`
`Shift + two-finger scroll` | +| Interaction | Keyboard/mouse | Touchpad/touchscreen | +|:-------------------|:----------------------------|:-----------------------------------------------------| +| **Zoom** in or out | `Shift + mouse scrollwheel` | `two-finger pinch`
`Shift + two-finger scroll` | ## Dimensions bar @@ -404,7 +403,7 @@ Zooming out lets you see metrics within the larger context, such as the last hou The bottom legend where you can see the dimensions of the chart can be ordered by: - +Netdata Chart order dimensions legend - Dimension name (Ascending or Descending) - Dimension value (Ascending or Descending) diff --git a/docs/dashboards-and-charts/themes.md b/docs/dashboards-and-charts/themes.md index 0ca7425ae..bdce5db6f 100644 --- a/docs/dashboards-and-charts/themes.md +++ b/docs/dashboards-and-charts/themes.md @@ -12,4 +12,3 @@ tab, and then choose your preferred theme: **Light** or **Dark**. **Light**: ![Light theme](https://github.com/netdata/netdata/assets/70198089/eb0fb8c1-5695-450a-8ba8-a185874e8496) - diff --git a/docs/dashboards-and-charts/top-tab.md b/docs/dashboards-and-charts/top-tab.md index 4edaf32f9..6b96010a7 100644 --- a/docs/dashboards-and-charts/top-tab.md +++ b/docs/dashboards-and-charts/top-tab.md @@ -6,7 +6,7 @@ They can be used to retrieve additional information to help you troubleshoot or > **Tip** > > You can also execute a Function from the [Nodes tab](/docs/dashboards-and-charts/nodes-tab.md), by pressing the `f(x)` button. - +> > **Note** > > If you get an error saying that your node can't execute Functions please check the [prerequisites](/docs/top-monitoring-netdata-functions.md#prerequisites). diff --git a/docs/deployment-guides/deployment-strategies.md b/docs/deployment-guides/deployment-strategies.md index 1a3c67164..5c7afda20 100644 --- a/docs/deployment-guides/deployment-strategies.md +++ b/docs/deployment-guides/deployment-strategies.md @@ -32,7 +32,7 @@ In this example, Machine Learning and Alerting are disabled for the Child, so th ##### netdata.conf -On the child node, edit `netdata.conf` by using the [edit-config](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script and set the following parameters: +On the child node, edit `netdata.conf` by using the [edit-config](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script and set the following parameters: ```yaml [db] @@ -63,7 +63,7 @@ On the child node, edit `netdata.conf` by using the [edit-config](/docs/netdata- ##### stream.conf -To edit `stream.conf`, use again the [edit-config](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script and set the following parameters: +To edit `stream.conf`, use again the [edit-config](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script and set the following parameters: ```yaml [stream] @@ -77,7 +77,7 @@ To edit `stream.conf`, use again the [edit-config](/docs/netdata-agent/configura #### Parent config -For the Parent, besides setting up streaming, this example also provides configuration for multiple [tiers of metrics storage](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md#calculate-the-system-resources-ram-disk-space-needed-to-store-metrics), for 10 Children, with about 2k metrics each. This allows for: +For the Parent, besides setting up streaming, this example also provides configuration for multiple [tiers of metrics storage](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md), for 10 Children, with about 2k metrics each. This allows for: - 1s granularity at tier 0 for 1 week - 1m granularity at tier 1 for 1 month @@ -90,28 +90,23 @@ Requiring: ##### netdata.conf -On the Parent, edit `netdata.conf` by using the [edit-config](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script and set the following parameters: +On the Parent, edit `netdata.conf` by using the [edit-config](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script and set the following parameters: ```yaml [db] mode = dbengine + dbengine tier backfill = new storage tiers = 3 - # To allow memory pressure to offload index from ram - dbengine page descriptors in file mapped memory = yes + dbengine page cache size = 1.4GiB # storage tier 0 update every = 1 - dbengine multihost disk space MB = 12000 - dbengine page cache size MB = 1400 + dbengine tier 0 retention space = 12GiB # storage tier 1 - dbengine tier 1 page cache size MB = 512 - dbengine tier 1 multihost disk space MB = 4096 dbengine tier 1 update every iterations = 60 - dbengine tier 1 backfill = new + dbengine tier 1 retention space = 4GiB # storage tier 2 - dbengine tier 2 page cache size MB = 128 - dbengine tier 2 multihost disk space MB = 2048 dbengine tier 2 update every iterations = 60 - dbengine tier 2 backfill = new + dbengine tier 2 retention space = 2GiB [ml] # Enabled by default # enabled = yes @@ -125,7 +120,7 @@ On the Parent, edit `netdata.conf` by using the [edit-config](/docs/netdata-agen ##### stream.conf -On the Parent node, edit `stream.conf` by using the [edit-config](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script and set the following parameters: +On the Parent node, edit `stream.conf` by using the [edit-config](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script and set the following parameters: ```yaml [API_KEY] @@ -137,7 +132,7 @@ On the Parent node, edit `stream.conf` by using the [edit-config](/docs/netdata- In order to setup active–active streaming between Parent 1 and Parent 2, Parent 1 needs to be instructed to stream data to Parent 2 and Parent 2 to stream data to Parent 1. The Child Agents need to be configured with the addresses of both Parent Agents. An Agent will only connect to one Parent at a time, falling back to the next upon failure. These examples use the same API key between Parent Agents and for connections for Child Agents. -On both Netdata Parent and all Child Agents, edit `stream.conf` by using the [edit-config](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script: +On both Netdata Parent and all Child Agents, edit `stream.conf` by using the [edit-config](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script: #### stream.conf on Parent 1 diff --git a/docs/developer-and-contributor-corner/README.md b/docs/developer-and-contributor-corner/README.md index d4d86382a..817938126 100644 --- a/docs/developer-and-contributor-corner/README.md +++ b/docs/developer-and-contributor-corner/README.md @@ -1,3 +1,3 @@ # Developer and Contributor Corner -In this section of our Documentation you will find more advanced information, suited for developers and contributors alike. \ No newline at end of file +In this section of our Documentation you will find more advanced information, suited for developers and contributors alike. diff --git a/docs/developer-and-contributor-corner/build-the-netdata-agent-yourself.md b/docs/developer-and-contributor-corner/build-the-netdata-agent-yourself.md index 99166ad95..d98784ccd 100644 --- a/docs/developer-and-contributor-corner/build-the-netdata-agent-yourself.md +++ b/docs/developer-and-contributor-corner/build-the-netdata-agent-yourself.md @@ -1,3 +1,3 @@ # Build the Netdata Agent yourself -This section contains documentation on all the ways that you can build the Netdata Agent. \ No newline at end of file +This section contains documentation on all the ways that you can build the Netdata Agent. diff --git a/docs/developer-and-contributor-corner/collect-apache-nginx-web-logs.md b/docs/developer-and-contributor-corner/collect-apache-nginx-web-logs.md index 55af82fb7..9a307b0b3 100644 --- a/docs/developer-and-contributor-corner/collect-apache-nginx-web-logs.md +++ b/docs/developer-and-contributor-corner/collect-apache-nginx-web-logs.md @@ -81,18 +81,13 @@ jobs: log_type: auto ``` -Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate -method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system. Netdata should pick up your web server's access log and -begin showing real-time charts! +Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system. Netdata should pick up your web server's access log and begin showing real-time charts! ### Custom log formats and fields -The web log collector is capable of parsing custom Nginx and Apache log formats and presenting them as charts, but we'll -leave that topic for a separate guide. +The web log collector is capable of parsing custom Nginx and Apache log formats and presenting them as charts, but we'll leave that topic for a separate guide. -We do have [extensive -documentation](/src/go/plugin/go.d/modules/weblog/README.md#custom-log-format) on how -to build custom parsing for Nginx and Apache logs. +We do have [extensive documentation](/src/go/plugin/go.d/modules/weblog/README.md) on how to build custom parsing for Nginx and Apache logs. ## Tweak web log collector alerts @@ -100,7 +95,7 @@ Over time, we've created some default alerts for web log monitoring. These alert web server is receiving more than 120 requests per minute. Otherwise, there's simply not enough data to make conclusions about what is "too few" or "too many." -- [web log alerts](https://raw.githubusercontent.com/netdata/netdata/master/src/health/health.d/web_log.conf). +- [web log alerts](https://raw.githubusercontent.com/netdata/netdata/master/src/health/health.d/web_log.conf). You can also edit this file directly with `edit-config`: @@ -108,5 +103,5 @@ You can also edit this file directly with `edit-config`: ./edit-config health.d/weblog.conf ``` -For more information about editing the defaults or writing new alert entities, see our +For more information about editing the defaults or writing new alert entities, see our [health monitoring documentation](/src/health/README.md). diff --git a/docs/developer-and-contributor-corner/collect-unbound-metrics.md b/docs/developer-and-contributor-corner/collect-unbound-metrics.md index ac997b7f9..abfaca723 100644 --- a/docs/developer-and-contributor-corner/collect-unbound-metrics.md +++ b/docs/developer-and-contributor-corner/collect-unbound-metrics.md @@ -1,13 +1,3 @@ - - # Monitor Unbound DNS servers with Netdata [Unbound](https://nlnetlabs.nl/projects/unbound/about/) is a "validating, recursive, caching DNS resolver" from NLNet @@ -35,7 +25,7 @@ the TLS key files that will encrypt connections to the remote interface. Then ad documentation](https://nlnetlabs.nl/documentation/unbound/howto-setup/#setup-remote-control) for more details on using `unbound-control`, such as how to handle situations when Unbound is run under a unique user. -```conf +```text # enable remote-control remote-control: control-enable: yes @@ -137,5 +127,3 @@ Now that you're collecting metrics from your Unbound servers, let us know how it for improvement or refinement based on real-world use cases. Feel free to [file an issue](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml) with your thoughts. - - diff --git a/docs/developer-and-contributor-corner/customize.md b/docs/developer-and-contributor-corner/customize.md index 03a6a842a..7d9895dc0 100644 --- a/docs/developer-and-contributor-corner/customize.md +++ b/docs/developer-and-contributor-corner/customize.md @@ -1,15 +1,15 @@ # Customize the standard dashboard -> ### Disclaimer +> **Disclaimer** > > This document is only applicable to the v1 version of the dashboard and doesn't affect the [Netdata Dashboard](/docs/dashboards-and-charts/README.md). -While the [Netdata dashboard](/src/web/gui/README.md) comes preconfigured with hundreds of charts and +While the [Netdata dashboard](/src/web/gui/README.md) comes pre-configured with hundreds of charts and thousands of metrics, you may want to alter your experience based on a particular use case or preferences. ## Dashboard settings -To change dashboard settings, click the on the **settings** icon +To change dashboard settings, click the on the **settings** icon ![Import icon](https://raw.githubusercontent.com/netdata/netdata-ui/98e31799c1ec0983f433537ff16d2ac2b0d994aa/src/components/icon/assets/gear.svg) in the top panel. @@ -21,10 +21,9 @@ Here are a few popular settings: ### Change chart legend position -Find this setting under the **Visual** tab. By default, Netdata places the legend of dimensions _below_ charts. +Find this setting under the **Visual** tab. By default, Netdata places the legend of dimensions _below_ charts. Click this toggle to move the legend to the _right_ of charts. - ### Change theme Find this setting under the **Visual** tab. Choose between Dark (the default) and White. @@ -67,9 +66,9 @@ dashboard. Save the file, then navigate to your [Netdata config directory](/docs/netdata-agent/configuration/README.md) to edit `netdata.conf`. Add the following line to the `[web]` section to tell Netdata where to find your custom configuration. -```conf +```text [web] custom dashboard_info.js = your_dashboard_info_file.js ``` -Reload your browser tab to see your custom configuration. \ No newline at end of file +Reload your browser tab to see your custom configuration. diff --git a/docs/developer-and-contributor-corner/kubernetes-k8s-netdata.md b/docs/developer-and-contributor-corner/kubernetes-k8s-netdata.md deleted file mode 100644 index 011aac8da..000000000 --- a/docs/developer-and-contributor-corner/kubernetes-k8s-netdata.md +++ /dev/null @@ -1,237 +0,0 @@ -# Kubernetes monitoring with Netdata - -This document gives an overview of what visualizations Netdata provides on Kubernetes deployments. - -At Netdata, we've built Kubernetes monitoring tools that add visibility without complexity while also helping you -actively troubleshoot anomalies or outages. This guide walks you through each of the visualizations and offers best -practices on how to use them to start Kubernetes monitoring in a matter of minutes, not hours or days. - -Netdata's Kubernetes monitoring solution uses a handful of [complementary tools and -collectors](#related-reference-documentation) for peeling back the many complex layers of a Kubernetes cluster, -_entirely for free_. These methods work together to give you every metric you need to troubleshoot performance or -availability issues across your Kubernetes infrastructure. - -## Challenge - -While Kubernetes (k8s) might simplify the way you deploy, scale, and load-balance your applications, not all clusters -come with "batteries included" when it comes to monitoring. Doubly so for a monitoring stack that helps you actively -troubleshoot issues with your cluster. - -Some k8s providers, like GKE (Google Kubernetes Engine), do deploy clusters bundled with monitoring capabilities, such -as Google Stackdriver Monitoring. However, these pre-configured solutions might not offer the depth of metrics, -customization, or integration with your preferred alerting methods. - -Without this visibility, it's like you built an entire house and _then_ smashed your way through the finished walls to -add windows. - -## Solution - -In this tutorial, you'll learn how to navigate Netdata's Kubernetes monitoring features, using -[robot-shop](https://github.com/instana/robot-shop) as an example deployment. Deploying robot-shop is purely optional. -You can also follow along with your own Kubernetes deployment if you choose. While the metrics might be different, the -navigation and best practices are the same for every cluster. - -## What you need to get started - -To follow this tutorial, you need: - -- A free Netdata Cloud account. [Sign up](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) if you don't have one - already. -- A working cluster running Kubernetes v1.9 or newer, with a Netdata deployment and connected parent/child nodes. See - our [Kubernetes deployment process](/packaging/installer/methods/kubernetes.md) for details on deployment and - conneting to Cloud. -- The [`kubectl`](https://kubernetes.io/docs/reference/kubectl/overview/) command line tool, within [one minor version - difference](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin) of your cluster, on an - administrative system. -- The [Helm package manager](https://helm.sh/) v3.0.0 or newer on the same administrative system. - -### Install the `robot-shop` demo (optional) - -Begin by downloading the robot-shop code and using `helm` to create a new deployment. - -```bash -git clone git@github.com:instana/robot-shop.git -cd robot-shop/K8s/helm -kubectl create ns robot-shop -helm install robot-shop --namespace robot-shop . -``` - -Running `kubectl get pods` shows both the Netdata and robot-shop deployments. - -```bash -kubectl get pods --all-namespaces -NAMESPACE NAME READY STATUS RESTARTS AGE -default netdata-child-29f9c 2/2 Running 0 10m -default netdata-child-8xphf 2/2 Running 0 10m -default netdata-child-jdvds 2/2 Running 0 11m -default netdata-parent-554c755b7d-qzrx4 1/1 Running 0 11m -kube-system aws-node-jnjv8 1/1 Running 0 17m -kube-system aws-node-svzdb 1/1 Running 0 17m -kube-system aws-node-ts6n2 1/1 Running 0 17m -kube-system coredns-559b5db75d-f58hp 1/1 Running 0 22h -kube-system coredns-559b5db75d-tkzj2 1/1 Running 0 22h -kube-system kube-proxy-9p9cd 1/1 Running 0 17m -kube-system kube-proxy-lt9ss 1/1 Running 0 17m -kube-system kube-proxy-n75t9 1/1 Running 0 17m -robot-shop cart-b4bbc8fff-t57js 1/1 Running 0 14m -robot-shop catalogue-8b5f66c98-mr85z 1/1 Running 0 14m -robot-shop dispatch-67d955c7d8-lnr44 1/1 Running 0 14m -robot-shop mongodb-7f65d86c-dsslc 1/1 Running 0 14m -robot-shop mysql-764c4c5fc7-kkbnf 1/1 Running 0 14m -robot-shop payment-67c87cb7d-5krxv 1/1 Running 0 14m -robot-shop rabbitmq-5bb66bb6c9-6xr5b 1/1 Running 0 14m -robot-shop ratings-94fd9c75b-42wvh 1/1 Running 0 14m -robot-shop redis-0 0/1 Pending 0 14m -robot-shop shipping-7d69cb88b-w7hpj 1/1 Running 0 14m -robot-shop user-79c445b44b-hwnm9 1/1 Running 0 14m -robot-shop web-8bb887476-lkcjx 1/1 Running 0 14m -``` - -## Explore Netdata's Kubernetes monitoring charts - -The Netdata Helm chart deploys and enables everything you need for monitoring Kubernetes on every layer. Once you deploy -Netdata and connect your cluster's nodes, you're ready to check out the visualizations **with zero configuration**. - -To get started, [sign in](https://app.netdata.cloud/sign-in?cloudRoute=/spaces) to your Netdata Cloud account. Head over -to the Room you connected your cluster to, if not **General**. - -Let's walk through monitoring each layer of a Kubernetes cluster using the Overview as our framework. - -## Cluster and node metrics - -The gauges and time-series charts you see right away in the Overview show aggregated metrics from every node in your -cluster. - -For example, the `apps.cpu` chart (in the **Applications** menu item), visualizes the CPU utilization of various -applications/services running on each of the nodes in your cluster. The **X Nodes** dropdown shows which nodes -contribute to the chart and links to jump a single-node dashboard for further investigation. - -![Per-application monitoring in a Kubernetes -cluster](https://user-images.githubusercontent.com/1153921/109042169-19c8fa00-768d-11eb-91a7-1a7afc41fea2.png) - -For example, the chart above shows a spike in the CPU utilization from `rabbitmq` every minute or so, along with a -baseline CPU utilization of 10-15% across the cluster. - - -## Pod and container metrics - -Click on the **Kubernetes xxxxxxx...** section to jump down to Netdata Cloud's unique Kubernetes visualizations for view -real-time resource utilization metrics from your Kubernetes pods and containers. - -![Navigating to the Kubernetes monitoring -visualizations](https://user-images.githubusercontent.com/1153921/109049195-349f6c80-7695-11eb-8902-52a029dca77f.png) - -### Health map - -The first visualization is the [health map](/docs/dashboards-and-charts/kubernetes-tab.md#health-map), -which places each container into its own box, then varies the intensity of their color to visualize the resource -utilization. By default, the health map shows the **average CPU utilization as a percentage of the configured limit** -for every container in your cluster. - -![The Kubernetes health map in Netdata -Cloud](https://user-images.githubusercontent.com/1153921/109050085-3f0e3600-7696-11eb-988f-52cb187f53ea.png) - -Let's explore the most colorful box by hovering over it. - -![Hovering over a -container](https://user-images.githubusercontent.com/1153921/109049544-a8417980-7695-11eb-80a7-109b4a645a27.png) - -The **Context** tab shows `rabbitmq-5bb66bb6c9-6xr5b` as the container's image name, which means this container is -running a [RabbitMQ](/src/go/plugin/go.d/modules/rabbitmq/README.md) workload. - -Click the **Metrics** tab to see real-time metrics from that container. Unsurprisingly, it shows a spike in CPU -utilization at regular intervals. - -![Viewing real-time container -metrics](https://user-images.githubusercontent.com/1153921/109050482-aa580800-7696-11eb-9e3e-d3bdf0f3eff7.png) - -### Time-series charts - -Beneath the health map is a variety of time-series charts that help you visualize resource utilization over time, which -is useful for targeted troubleshooting. - -The default is to display metrics grouped by the `k8s_namespace` label, which shows resource utilization based on your -different namespaces. - -![Time-series Kubernetes monitoring in Netdata -Cloud](https://user-images.githubusercontent.com/1153921/109075210-126a1680-76b6-11eb-918d-5acdcdac152d.png) - -Each composite chart has a [definition bar](/docs/dashboards-and-charts/netdata-charts.md#definition-bar) -for complete customization. For example, grouping the top chart by `k8s_container_name` reveals new information. - -![Changing time-series charts](https://user-images.githubusercontent.com/1153921/109075212-139b4380-76b6-11eb-836f-939482ae55fc.png) - -## Service metrics - -Netdata has a [service discovery plugin](https://github.com/netdata/agent-service-discovery), which discovers and -creates configuration files for [compatible -services](https://github.com/netdata/helmchart#service-discovery-and-supported-services) and any endpoints covered by -our [generic Prometheus collector](/src/go/plugin/go.d/modules/prometheus/README.md). -Netdata uses these files to collect metrics from any compatible application as they run _inside_ of a pod. Service -discovery happens without manual intervention as pods are created, destroyed, or moved between nodes. - -Service metrics show up on the Overview as well, beneath the **Kubernetes** section, and are labeled according to the -service in question. For example, the **RabbitMQ** section has numerous charts from the [`rabbitmq` -collector](/src/go/plugin/go.d/modules/rabbitmq/README.md): - -![Finding service discovery -metrics](https://user-images.githubusercontent.com/1153921/109054511-2eac8a00-769b-11eb-97f1-da93acb4b5fe.png) - -> The robot-shop cluster has more supported services, such as MySQL, which are not visible with zero configuration. This -> is usually because of services running on non-default ports, using non-default names, or required passwords. Read up -> on [configuring service discovery](/packaging/installer/methods/kubernetes.md#configure-service-discovery) to collect -> more service metrics. - -Service metrics are essential to infrastructure monitoring, as they're the best indicator of the end-user experience, -and key signals for troubleshooting anomalies or issues. - -## Kubernetes components - -Netdata also automatically collects metrics from two essential Kubernetes processes. - -### kubelet - -The **k8s kubelet** section visualizes metrics from the Kubernetes agent responsible for managing every pod on a given -node. This also happens without any configuration thanks to the [kubelet -collector](/src/go/plugin/go.d/modules/k8s_kubelet/README.md). - -Monitoring each node's kubelet can be invaluable when diagnosing issues with your Kubernetes cluster. For example, you -can see if the number of running containers/pods has dropped, which could signal a fault or crash in a particular -Kubernetes service or deployment (see `kubectl get services` or `kubectl get deployments` for more details). If the -number of pods increases, it may be because of something more benign, like another team member scaling up a -service with `kubectl scale`. - -You can also view charts for the Kubelet API server, the volume of runtime/Docker operations by type, -configuration-related errors, and the actual vs. desired numbers of volumes, plus a lot more. - -### kube-proxy - -The **k8s kube-proxy** section displays metrics about the network proxy that runs on each node in your Kubernetes -cluster. kube-proxy lets pods communicate with each other and accept sessions from outside your cluster. Its metrics are -collected by the [kube-proxy -collector](/src/go/plugin/go.d/modules/k8s_kubeproxy/README.md). - -With Netdata, you can monitor how often your k8s proxies are syncing proxy rules between nodes. Dramatic changes in -these figures could indicate an anomaly in your cluster that's worthy of further investigation. - -## What's next? - -After reading this guide, you should now be able to monitor any Kubernetes cluster with Netdata, including nodes, pods, -containers, services, and more. - -With the health map, time-series charts, and the ability to drill down into individual nodes, you can see hundreds of -per-second metrics with zero configuration and less time remembering all the `kubectl` options. Netdata moves with your -cluster, automatically picking up new nodes or services as your infrastructure scales. And it's entirely free for -clusters of all sizes. - -### Related reference documentation - -- [Netdata Helm chart](https://github.com/netdata/helmchart) -- [Netdata service discovery](https://github.com/netdata/agent-service-discovery) -- [Netdata Agent · `kubelet` - collector](/src/go/plugin/go.d/modules/k8s_kubelet/README.md) -- [Netdata Agent · `kube-proxy` - collector](/src/go/plugin/go.d/modules/k8s_kubeproxy/README.md) -- [Netdata Agent · `cgroups.plugin`](/src/collectors/cgroups.plugin/README.md) - - diff --git a/docs/developer-and-contributor-corner/kubernetes-k8s-netdata.txt b/docs/developer-and-contributor-corner/kubernetes-k8s-netdata.txt new file mode 100644 index 000000000..5ebb963c3 --- /dev/null +++ b/docs/developer-and-contributor-corner/kubernetes-k8s-netdata.txt @@ -0,0 +1,234 @@ +# Kubernetes monitoring with Netdata + +This document gives an overview of what visualizations Netdata provides on Kubernetes deployments. + +At Netdata, we've built Kubernetes monitoring tools that add visibility without complexity while also helping you +actively troubleshoot anomalies or outages. This guide walks you through each of the visualizations and offers best +practices on how to use them to start Kubernetes monitoring in a matter of minutes, not hours or days. + +Netdata's Kubernetes monitoring solution uses a handful of [complementary tools and +collectors](#related-reference-documentation) for peeling back the many complex layers of a Kubernetes cluster, +_entirely for free_. These methods work together to give you every metric you need to troubleshoot performance or +availability issues across your Kubernetes infrastructure. + +## Challenge + +While Kubernetes (k8s) might simplify the way you deploy, scale, and load-balance your applications, not all clusters +come with "batteries included" when it comes to monitoring. Doubly so for a monitoring stack that helps you actively +troubleshoot issues with your cluster. + +Some k8s providers, like GKE (Google Kubernetes Engine), do deploy clusters bundled with monitoring capabilities, such +as Google Stackdriver Monitoring. However, these pre-configured solutions might not offer the depth of metrics, +customization, or integration with your preferred alerting methods. + +Without this visibility, it's like you built an entire house and _then_ smashed your way through the finished walls to +add windows. + +## Solution + +In this tutorial, you'll learn how to navigate Netdata's Kubernetes monitoring features, using +[robot-shop](https://github.com/instana/robot-shop) as an example deployment. Deploying robot-shop is purely optional. +You can also follow along with your own Kubernetes deployment if you choose. While the metrics might be different, the +navigation and best practices are the same for every cluster. + +## What you need to get started + +To follow this tutorial, you need: + +- A free Netdata Cloud account. [Sign up](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) if you don't have one + already. +- A working cluster running Kubernetes v1.9 or newer, with a Netdata deployment and connected parent/child nodes. See + our [Kubernetes deployment process](/packaging/installer/methods/kubernetes.md) for details on deployment and + connecting to Cloud. +- The [`kubectl`](https://kubernetes.io/docs/reference/kubectl/overview/) command line tool, within [one minor version + difference](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin) of your cluster, on an + administrative system. +- The [Helm package manager](https://helm.sh/) v3.0.0 or newer on the same administrative system. + +### Install the `robot-shop` demo (optional) + +Begin by downloading the robot-shop code and using `helm` to create a new deployment. + +```bash +git clone git@github.com:instana/robot-shop.git +cd robot-shop/K8s/helm +kubectl create ns robot-shop +helm install robot-shop --namespace robot-shop . +``` + +Running `kubectl get pods` shows both the Netdata and robot-shop deployments. + +```bash +kubectl get pods --all-namespaces +NAMESPACE NAME READY STATUS RESTARTS AGE +default netdata-child-29f9c 2/2 Running 0 10m +default netdata-child-8xphf 2/2 Running 0 10m +default netdata-child-jdvds 2/2 Running 0 11m +default netdata-parent-554c755b7d-qzrx4 1/1 Running 0 11m +kube-system aws-node-jnjv8 1/1 Running 0 17m +kube-system aws-node-svzdb 1/1 Running 0 17m +kube-system aws-node-ts6n2 1/1 Running 0 17m +kube-system coredns-559b5db75d-f58hp 1/1 Running 0 22h +kube-system coredns-559b5db75d-tkzj2 1/1 Running 0 22h +kube-system kube-proxy-9p9cd 1/1 Running 0 17m +kube-system kube-proxy-lt9ss 1/1 Running 0 17m +kube-system kube-proxy-n75t9 1/1 Running 0 17m +robot-shop cart-b4bbc8fff-t57js 1/1 Running 0 14m +robot-shop catalogue-8b5f66c98-mr85z 1/1 Running 0 14m +robot-shop dispatch-67d955c7d8-lnr44 1/1 Running 0 14m +robot-shop mongodb-7f65d86c-dsslc 1/1 Running 0 14m +robot-shop mysql-764c4c5fc7-kkbnf 1/1 Running 0 14m +robot-shop payment-67c87cb7d-5krxv 1/1 Running 0 14m +robot-shop rabbitmq-5bb66bb6c9-6xr5b 1/1 Running 0 14m +robot-shop ratings-94fd9c75b-42wvh 1/1 Running 0 14m +robot-shop redis-0 0/1 Pending 0 14m +robot-shop shipping-7d69cb88b-w7hpj 1/1 Running 0 14m +robot-shop user-79c445b44b-hwnm9 1/1 Running 0 14m +robot-shop web-8bb887476-lkcjx 1/1 Running 0 14m +``` + +## Explore Netdata's Kubernetes monitoring charts + +The Netdata Helm chart deploys and enables everything you need for monitoring Kubernetes on every layer. Once you deploy +Netdata and connect your cluster's nodes, you're ready to check out the visualizations **with zero configuration**. + +To get started, [sign in](https://app.netdata.cloud/sign-in?cloudRoute=/spaces) to your Netdata Cloud account. Head over +to the Room you connected your cluster to, if not **General**. + +Let's walk through monitoring each layer of a Kubernetes cluster using the Overview as our framework. + +## Cluster and node metrics + +The gauges and time-series charts you see right away in the Overview show aggregated metrics from every node in your +cluster. + +For example, the `apps.cpu` chart (in the **Applications** menu item), visualizes the CPU utilization of various +applications/services running on each of the nodes in your cluster. The **X Nodes** dropdown shows which nodes +contribute to the chart and links to jump a single-node dashboard for further investigation. + +![Per-application monitoring in a Kubernetes +cluster](https://user-images.githubusercontent.com/1153921/109042169-19c8fa00-768d-11eb-91a7-1a7afc41fea2.png) + +For example, the chart above shows a spike in the CPU utilization from `rabbitmq` every minute or so, along with a +baseline CPU utilization of 10-15% across the cluster. + +## Pod and container metrics + +Click on the **Kubernetes xxxxxxx...** section to jump down to Netdata Cloud's unique Kubernetes visualizations for view +real-time resource utilization metrics from your Kubernetes pods and containers. + +![Navigating to the Kubernetes monitoring +visualizations](https://user-images.githubusercontent.com/1153921/109049195-349f6c80-7695-11eb-8902-52a029dca77f.png) + +### Health map + +The first visualization is the [health map](/docs/dashboards-and-charts/kubernetes-tab.md#health-map), +which places each container into its own box, then varies the intensity of their color to visualize the resource +utilization. By default, the health map shows the **average CPU utilization as a percentage of the configured limit** +for every container in your cluster. + +![The Kubernetes health map in Netdata +Cloud](https://user-images.githubusercontent.com/1153921/109050085-3f0e3600-7696-11eb-988f-52cb187f53ea.png) + +Let's explore the most colorful box by hovering over it. + +![Hovering over a +container](https://user-images.githubusercontent.com/1153921/109049544-a8417980-7695-11eb-80a7-109b4a645a27.png) + +The **Context** tab shows `rabbitmq-5bb66bb6c9-6xr5b` as the container's image name, which means this container is +running a [RabbitMQ](/src/go/plugin/go.d/modules/rabbitmq/README.md) workload. + +Click the **Metrics** tab to see real-time metrics from that container. Unsurprisingly, it shows a spike in CPU +utilization at regular intervals. + +![Viewing real-time container +metrics](https://user-images.githubusercontent.com/1153921/109050482-aa580800-7696-11eb-9e3e-d3bdf0f3eff7.png) + +### Time-series charts + +Beneath the health map is a variety of time-series charts that help you visualize resource utilization over time, which +is useful for targeted troubleshooting. + +The default is to display metrics grouped by the `k8s_namespace` label, which shows resource utilization based on your +different namespaces. + +![Time-series Kubernetes monitoring in Netdata +Cloud](https://user-images.githubusercontent.com/1153921/109075210-126a1680-76b6-11eb-918d-5acdcdac152d.png) + +Each composite chart has a [definition bar](/docs/dashboards-and-charts/netdata-charts.md#definition-bar) +for complete customization. For example, grouping the top chart by `k8s_container_name` reveals new information. + +![Changing time-series charts](https://user-images.githubusercontent.com/1153921/109075212-139b4380-76b6-11eb-836f-939482ae55fc.png) + +## Service metrics + +Netdata has a [service discovery plugin](https://github.com/netdata/agent-service-discovery), which discovers and +creates configuration files for [compatible +services](https://github.com/netdata/helmchart#service-discovery-and-supported-services) and any endpoints covered by +our [generic Prometheus collector](/src/go/plugin/go.d/modules/prometheus/README.md). +Netdata uses these files to collect metrics from any compatible application as they run _inside_ of a pod. Service +discovery happens without manual intervention as pods are created, destroyed, or moved between nodes. + +Service metrics show up on the Overview as well, beneath the **Kubernetes** section, and are labeled according to the +service in question. For example, the **RabbitMQ** section has numerous charts from the [`rabbitmq` +collector](/src/go/plugin/go.d/modules/rabbitmq/README.md): + +![Finding service discovery +metrics](https://user-images.githubusercontent.com/1153921/109054511-2eac8a00-769b-11eb-97f1-da93acb4b5fe.png) + +> The robot-shop cluster has more supported services, such as MySQL, which are not visible with zero configuration. This +> is usually because of services running on non-default ports, using non-default names, or required passwords. Read up +> on [configuring service discovery](/packaging/installer/methods/kubernetes.md#configure-service-discovery) to collect +> more service metrics. + +Service metrics are essential to infrastructure monitoring, as they're the best indicator of the end-user experience, +and key signals for troubleshooting anomalies or issues. + +## Kubernetes components + +Netdata also automatically collects metrics from two essential Kubernetes processes. + +### kubelet + +The **k8s kubelet** section visualizes metrics from the Kubernetes agent responsible for managing every pod on a given +node. This also happens without any configuration thanks to the [kubelet +collector](/src/go/plugin/go.d/modules/k8s_kubelet/README.md). + +Monitoring each node's kubelet can be invaluable when diagnosing issues with your Kubernetes cluster. For example, you +can see if the number of running containers/pods has dropped, which could signal a fault or crash in a particular +Kubernetes service or deployment (see `kubectl get services` or `kubectl get deployments` for more details). If the +number of pods increases, it may be because of something more benign, like another team member scaling up a +service with `kubectl scale`. + +You can also view charts for the Kubelet API server, the volume of runtime/Docker operations by type, +configuration-related errors, and the actual vs. desired numbers of volumes, plus a lot more. + +### kube-proxy + +The **k8s kube-proxy** section displays metrics about the network proxy that runs on each node in your Kubernetes +cluster. kube-proxy lets pods communicate with each other and accept sessions from outside your cluster. Its metrics are +collected by the [kube-proxy +collector](/src/go/plugin/go.d/modules/k8s_kubeproxy/README.md). + +With Netdata, you can monitor how often your k8s proxies are syncing proxy rules between nodes. Dramatic changes in +these figures could indicate an anomaly in your cluster that's worthy of further investigation. + +## What's next? + +After reading this guide, you should now be able to monitor any Kubernetes cluster with Netdata, including nodes, pods, +containers, services, and more. + +With the health map, time-series charts, and the ability to drill down into individual nodes, you can see hundreds of +per-second metrics with zero configuration and less time remembering all the `kubectl` options. Netdata moves with your +cluster, automatically picking up new nodes or services as your infrastructure scales. And it's entirely free for +clusters of all sizes. + +### Related reference documentation + +- [Netdata Helm chart](https://github.com/netdata/helmchart) +- [Netdata service discovery](https://github.com/netdata/agent-service-discovery) +- [Netdata Agent · `kubelet` + collector](/src/go/plugin/go.d/modules/k8s_kubelet/README.md) +- [Netdata Agent · `kube-proxy` + collector](/src/go/plugin/go.d/modules/k8s_kubeproxy/README.md) +- [Netdata Agent · `cgroups.plugin`](/src/collectors/cgroups.plugin/README.md) diff --git a/docs/developer-and-contributor-corner/lamp-stack.md b/docs/developer-and-contributor-corner/lamp-stack.md deleted file mode 100644 index 2df5a7167..000000000 --- a/docs/developer-and-contributor-corner/lamp-stack.md +++ /dev/null @@ -1,238 +0,0 @@ -import { OneLineInstallWget } from '@site/src/components/OneLineInstall/' - -# LAMP stack monitoring with Netdata - -Set up robust LAMP stack monitoring (Linux, Apache, MySQL, PHP) in a few minutes using Netdata. - -The LAMP stack is the "hello world" for deploying dynamic web applications. It's fast, flexible, and reliable, which -means a developer or sysadmin won't go far in their career without interacting with the stack and its services. - -_LAMP_ is an acronym of the core services that make up the web application: **L**inux, **A**pache, **M**ySQL, and -**P**HP. - -- [Linux](https://en.wikipedia.org/wiki/Linux) is the operating system running the whole stack. -- [Apache](https://httpd.apache.org/) is a web server that responds to HTTP requests from users and returns web pages. -- [MySQL](https://www.mysql.com/) is a database that stores and returns information based on queries from the web - application. -- [PHP](https://www.php.net/) is a scripting language used to query the MySQL database and build new pages. - -LAMP stacks are the foundation for tons of end-user applications, with [Wordpress](https://wordpress.org/) being the -most popular. - -## Challenge - -You've already deployed a LAMP stack, either in testing or production. You want to monitor every service's performance -and availability to ensure the best possible experience for your end-users. You might also be particularly interested in -using a free, open-source monitoring tool. - -Depending on your monitoring experience, you may not even know what metrics you're looking for, much less how to build -dashboards using a query language. You need a robust monitoring experience that has the metrics you need without a ton -of required setup. - -## Solution - -In this tutorial, you'll set up robust LAMP stack monitoring with Netdata in just a few minutes. When you're done, -you'll have one dashboard to monitor every part of your web application, including each essential LAMP stack service. - -This dashboard updates every second with new metrics, and pairs those metrics up with preconfigured alerts to keep you -informed of any errors or odd behavior. - -## What you need to get started - -To follow this tutorial, you need: - -- A physical or virtual Linux system, which we'll call a _node_. -- A functional LAMP stack. There's plenty of tutorials for installing a LAMP stack, like [this - one](https://www.digitalocean.com/community/tutorials/how-to-install-linux-apache-mysql-php-lamp-stack-ubuntu-18-04) - from Digital Ocean. -- Optionally, a [Netdata Cloud](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) account, which you can use to view - metrics from multiple nodes in one dashboard, and a whole lot more, for free. - -## Install the Netdata Agent - -If you don't have the free, open-source Netdata monitoring agent installed on your node yet, get started with a [single -kickstart command](/packaging/installer/README.md): - - - -The Netdata Agent is now collecting metrics from your node every second. You don't need to jump into the dashboard yet, -but if you're curious, open your favorite browser and navigate to `http://localhost:19999` or `http://NODE:19999`, -replacing `NODE` with the hostname or IP address of your system. - -## Enable hardware and Linux system monitoring - -There's nothing you need to do to enable system monitoring and Linux monitoring with -the Netdata Agent, which autodetects metrics from CPUs, memory, disks, networking devices, and Linux processes like -systemd without any configuration. If you're using containers, Netdata automatically collects resource utilization -metrics from each using the [cgroups data collector](/src/collectors/cgroups.plugin/README.md). - -## Enable Apache monitoring - -Let's begin by configuring Apache to work with Netdata's [Apache data -collector](/src/go/plugin/go.d/modules/apache/README.md). - -Actually, there's nothing for you to do to enable Apache monitoring with Netdata. - -Apache comes with `mod_status` enabled by default these days, and Netdata is smart enough to look for metrics at that -endpoint without you configuring it. Netdata is already collecting [`mod_status` -metrics](https://httpd.apache.org/docs/2.4/mod/mod_status.html), which is just _part_ of your web server monitoring. - -## Enable web log monitoring - -The Netdata Agent also comes with a [web log -collector](/src/go/plugin/go.d/modules/weblog/README.md), which reads Apache's access -log file, processes each line, and converts them into per-second metrics. On Debian systems, it reads the file at -`/var/log/apache2/access.log`. - -At installation, the Netdata Agent adds itself to the [`adm` -group](https://wiki.debian.org/SystemGroups#Groups_without_an_associated_user), which gives the `netdata` process the -right privileges to read Apache's log files. In other words, you don't need to do anything to enable Apache web log -monitoring. - -## Enable MySQL monitoring - -Because your MySQL database is password-protected, you do need to tell MySQL to allow the `netdata` user to connect to -without a password. Netdata's [MySQL data -collector](/src/go/plugin/go.d/modules/mysql/README.md) collects metrics in _read-only_ -mode, without being able to alter or affect operations in any way. - -First, log into the MySQL shell. Then, run the following three commands, one at a time: - -```mysql -CREATE USER 'netdata'@'localhost'; -GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost'; -FLUSH PRIVILEGES; -``` - -Run `sudo systemctl restart netdata`, or the [appropriate alternative for your -system](/packaging/installer/README.md#maintaining-a-netdata-agent-installation), to collect dozens of metrics every second for robust MySQL monitoring. - -## Enable PHP monitoring - -Unlike Apache or MySQL, PHP isn't a service that you can monitor directly, unless you instrument a PHP-based application -with [StatsD](/src/collectors/statsd.plugin/README.md). - -However, if you use [PHP-FPM](https://php-fpm.org/) in your LAMP stack, you can monitor that process with our [PHP-FPM -data collector](/src/go/plugin/go.d/modules/phpfpm/README.md). - -Open your PHP-FPM configuration for editing, replacing `7.4` with your version of PHP: - -```bash -sudo nano /etc/php/7.4/fpm/pool.d/www.conf -``` - -> Not sure what version of PHP you're using? Run `php -v`. - -Find the line that reads `;pm.status_path = /status` and remove the `;` so it looks like this: - -```conf -pm.status_path = /status -``` - -Next, add a new `/status` endpoint to Apache. Open the Apache configuration file you're using for your LAMP stack. - -```bash -sudo nano /etc/apache2/sites-available/your_lamp_stack.conf -``` - -Add the following to the end of the file, again replacing `7.4` with your version of PHP: - -```apache -ProxyPass "/status" "unix:/run/php/php7.4-fpm.sock|fcgi://localhost" -``` - -Save and close the file. Finally, restart the PHP-FPM, Apache, and Netdata processes. - -```bash -sudo systemctl restart php7.4-fpm.service -sudo systemctl restart apache2 -sudo systemctl restart netdata -``` - -As the Netdata Agent starts up again, it automatically connects to the new `127.0.0.1/status` page and collects -per-second PHP-FPM metrics to get you started with PHP monitoring. - -## View LAMP stack metrics - -If the Netdata Agent isn't already open in your browser, open a new tab and navigate to `http://localhost:19999` or -`http://NODE:19999`, replacing `NODE` with the hostname or IP address of your system. - -> If you [signed up](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) for Netdata Cloud earlier, you can also view -> the exact same LAMP stack metrics there, plus additional features, like drag-and-drop custom dashboards. Be sure to -> [connecting your node](/src/claim/README.md) to start streaming metrics to your browser through Netdata Cloud. - -Netdata automatically organizes all metrics and charts onto a single page for easy navigation. Peek at gauges to see -overall system performance, then scroll down to see more. Click-and-drag with your mouse to pan _all_ charts back and -forth through different time intervals, or hold `SHIFT` and use the scrollwheel (or two-finger scroll) to zoom in and -out. Check out our doc on [interacting with charts](/docs/dashboards-and-charts/netdata-charts.md) for all the details. - -![The Netdata dashboard](https://user-images.githubusercontent.com/1153921/109520555-98e17800-7a69-11eb-86ec-16f689da4527.png) - -The **System Overview** section, which you can also see in the right-hand menu, contains key hardware monitoring charts, -including CPU utilization, memory page faults, network monitoring, and much more. The **Applications** section shows you -exactly which Linux processes are using the most system resources. - -Next, let's check out LAMP-specific metrics. You should see four relevant sections: **Apache local**, **MySQL local**, -**PHP-FPM local**, and **web log apache**. Click on any of these to see metrics from each service in your LAMP stack. - -![LAMP stack monitoring in -Netdata](https://user-images.githubusercontent.com/1153921/109516332-49994880-7a65-11eb-807c-3cba045582e6.png) - -### Key LAMP stack monitoring charts - -Here's a quick reference for what charts you might want to focus on after setting up Netdata. - -| Chart name / context | Type | Why? | -|-------------------------------------------------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| System Load Average (`system.load`) | Hardware monitoring | A good baseline load average is `0.7`, while `1` (on a 1-core system, `2` on a 2-core system, and so on) means resources are "perfectly" utilized. Higher load indicates a bottleneck somewhere in your system. | -| System RAM (`system.ram`) | Hardware monitoring | Look at the `free` dimension. If that drops to `0`, your system will use swap memory and slow down. | -| Uptime (`apache_local.uptime`) | Apache monitoring | This chart should always be "climbing," indicating a continuous uptime. Investigate any drops back to `0`. | -| Requests By Type (`web_log_apache.requests_by_type`) | Apache monitoring | Check for increases in the `error` or `bad` dimensions, which could indicate users arriving at broken pages or PHP returning errors. | -| Queries (`mysql_local.queries`) | MySQL monitoring | Queries is the total number of queries (queries per second, QPS). Check this chart for sudden spikes or drops, which indicate either increases in traffic/demand or bottlenecks in hardware performance. | -| Active Connections (`mysql_local.connections_active`) | MySQL monitoring | If the `active` dimension nears the `limit`, your MySQL database will bottleneck responses. | -| Performance (phpfpm_local.performance) | PHP monitoring | The `slow requests` dimension lets you know if any requests exceed the configured `request_slowlog_timeout`. If so, users might be having a less-than-ideal experience. | - -## Get alerts for LAMP stack errors - -The Netdata Agent comes with hundreds of pre-configured alerts to help you keep tabs on your system, including 19 alerts -designed for smarter LAMP stack monitoring. - -Click the 🔔 icon in the top navigation to [see active alerts](/docs/dashboards-and-charts/alerts-tab.md). The **Active** tabs -shows any alerts currently triggered, while the **All** tab displays a list of _every_ pre-configured alert. The - -![An example of LAMP stack -alerts](https://user-images.githubusercontent.com/1153921/109524120-5883f900-7a6d-11eb-830e-0e7baaa28163.png) - -[Tweak alerts](/src/health/REFERENCE.md) based on your infrastructure monitoring needs, and to see these alerts -in other places, like your inbox or a Slack channel, [enable a notification -method](/docs/alerts-and-notifications/notifications/README.md). - -## What's next? - -You've now set up robust monitoring for your entire LAMP stack: Linux, Apache, MySQL, and PHP (-FPM, to be exact). These -metrics will help you keep tabs on the performance and availability of your web application and all its essential -services. The per-second metrics granularity means you have the most accurate information possible for troubleshooting -any LAMP-related issues. - -Another powerful way to monitor the availability of a LAMP stack is the [`httpcheck` -collector](/src/go/plugin/go.d/modules/httpcheck/README.md), which pings a web server at -a regular interval and tells you whether if and how quickly it's responding. The `response_match` option also lets you -monitor when the web server's response isn't what you expect it to be, which might happen if PHP-FPM crashes, for -example. - -The best way to use the `httpcheck` collector is from a separate node from the one running your LAMP stack, which is why -we're not covering it here, but it _does_ work in a single-node setup. Just don't expect it to tell you if your whole -node crashed. - -If you're planning on managing more than one node, or want to take advantage of advanced features, like finding the -source of issues faster with [Metric Correlations](/docs/metric-correlations.md), -[sign up](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) for a free Netdata Cloud account. - -### Related reference documentation - -- [Netdata Agent · Get started](/packaging/installer/README.md) -- [Netdata Agent · Apache data collector](/src/go/plugin/go.d/modules/apache/README.md) -- [Netdata Agent · Web log collector](/src/go/plugin/go.d/modules/weblog/README.md) -- [Netdata Agent · MySQL data collector](/src/go/plugin/go.d/modules/mysql/README.md) -- [Netdata Agent · PHP-FPM data collector](/src/go/plugin/go.d/modules/phpfpm/README.md) - diff --git a/docs/developer-and-contributor-corner/lamp-stack.txt b/docs/developer-and-contributor-corner/lamp-stack.txt new file mode 100644 index 000000000..bc4611ac1 --- /dev/null +++ b/docs/developer-and-contributor-corner/lamp-stack.txt @@ -0,0 +1,237 @@ +import { OneLineInstallWget } from '@site/src/components/OneLineInstall/' + +# LAMP stack monitoring with Netdata + +Set up robust LAMP stack monitoring (Linux, Apache, MySQL, PHP) in a few minutes using Netdata. + +The LAMP stack is the "hello world" for deploying dynamic web applications. It's fast, flexible, and reliable, which +means a developer or sysadmin won't go far in their career without interacting with the stack and its services. + +_LAMP_ is an acronym of the core services that make up the web application: **L**inux, **A**pache, **M**ySQL, and +**P**HP. + +- [Linux](https://en.wikipedia.org/wiki/Linux) is the operating system running the whole stack. +- [Apache](https://httpd.apache.org/) is a web server that responds to HTTP requests from users and returns web pages. +- [MySQL](https://www.mysql.com/) is a database that stores and returns information based on queries from the web + application. +- [PHP](https://www.php.net/) is a scripting language used to query the MySQL database and build new pages. + +LAMP stacks are the foundation for tons of end-user applications, with [Wordpress](https://wordpress.org/) being the +most popular. + +## Challenge + +You've already deployed a LAMP stack, either in testing or production. You want to monitor every service's performance +and availability to ensure the best possible experience for your end-users. You might also be particularly interested in +using a free, open-source monitoring tool. + +Depending on your monitoring experience, you may not even know what metrics you're looking for, much less how to build +dashboards using a query language. You need a robust monitoring experience that has the metrics you need without a ton +of required setup. + +## Solution + +In this tutorial, you'll set up robust LAMP stack monitoring with Netdata in just a few minutes. When you're done, +you'll have one dashboard to monitor every part of your web application, including each essential LAMP stack service. + +This dashboard updates every second with new metrics, and pairs those metrics up with preconfigured alerts to keep you +informed of any errors or odd behavior. + +## What you need to get started + +To follow this tutorial, you need: + +- A physical or virtual Linux system, which we'll call a _node_. +- A functional LAMP stack. There's plenty of tutorials for installing a LAMP stack, like [this + one](https://www.digitalocean.com/community/tutorials/how-to-install-linux-apache-mysql-php-lamp-stack-ubuntu-18-04) + from Digital Ocean. +- Optionally, a [Netdata Cloud](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) account, which you can use to view + metrics from multiple nodes in one dashboard, and a whole lot more, for free. + +## Install the Netdata Agent + +If you don't have the free, open-source Netdata monitoring agent installed on your node yet, get started with a [single +kickstart command](/packaging/installer/README.md): + + + +The Netdata Agent is now collecting metrics from your node every second. You don't need to jump into the dashboard yet, +but if you're curious, open your favorite browser and navigate to `http://localhost:19999` or `http://NODE:19999`, +replacing `NODE` with the hostname or IP address of your system. + +## Enable hardware and Linux system monitoring + +There's nothing you need to do to enable system monitoring and Linux monitoring with +the Netdata Agent, which autodetects metrics from CPUs, memory, disks, networking devices, and Linux processes like +systemd without any configuration. If you're using containers, Netdata automatically collects resource utilization +metrics from each using the [cgroups data collector](/src/collectors/cgroups.plugin/README.md). + +## Enable Apache monitoring + +Let's begin by configuring Apache to work with Netdata's [Apache data +collector](/src/go/plugin/go.d/modules/apache/README.md). + +Actually, there's nothing for you to do to enable Apache monitoring with Netdata. + +Apache comes with `mod_status` enabled by default these days, and Netdata is smart enough to look for metrics at that +endpoint without you configuring it. Netdata is already collecting [`mod_status` +metrics](https://httpd.apache.org/docs/2.4/mod/mod_status.html), which is just _part_ of your web server monitoring. + +## Enable web log monitoring + +The Netdata Agent also comes with a [web log +collector](/src/go/plugin/go.d/modules/weblog/README.md), which reads Apache's access +log file, processes each line, and converts them into per-second metrics. On Debian systems, it reads the file at +`/var/log/apache2/access.log`. + +At installation, the Netdata Agent adds itself to the [`adm` +group](https://wiki.debian.org/SystemGroups#Groups_without_an_associated_user), which gives the `netdata` process the +right privileges to read Apache's log files. In other words, you don't need to do anything to enable Apache web log +monitoring. + +## Enable MySQL monitoring + +Because your MySQL database is password-protected, you do need to tell MySQL to allow the `netdata` user to connect to +without a password. Netdata's [MySQL data +collector](/src/go/plugin/go.d/modules/mysql/README.md) collects metrics in _read-only_ +mode, without being able to alter or affect operations in any way. + +First, log into the MySQL shell. Then, run the following three commands, one at a time: + +```mysql +CREATE USER 'netdata'@'localhost'; +GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost'; +FLUSH PRIVILEGES; +``` + +Run `sudo systemctl restart netdata`, or the [appropriate alternative for your system](/docs/netdata-agent/start-stop-restart.md), to collect dozens of metrics every second for robust MySQL monitoring. + +## Enable PHP monitoring + +Unlike Apache or MySQL, PHP isn't a service that you can monitor directly, unless you instrument a PHP-based application +with [StatsD](/src/collectors/statsd.plugin/README.md). + +However, if you use [PHP-FPM](https://php-fpm.org/) in your LAMP stack, you can monitor that process with our [PHP-FPM +data collector](/src/go/plugin/go.d/modules/phpfpm/README.md). + +Open your PHP-FPM configuration for editing, replacing `7.4` with your version of PHP: + +```bash +sudo nano /etc/php/7.4/fpm/pool.d/www.conf +``` + +> Not sure what version of PHP you're using? Run `php -v`. + +Find the line that reads `;pm.status_path = /status` and remove the `;` so it looks like this: + +```text +pm.status_path = /status +``` + +Next, add a new `/status` endpoint to Apache. Open the Apache configuration file you're using for your LAMP stack. + +```bash +sudo nano /etc/apache2/sites-available/your_lamp_stack.conf +``` + +Add the following to the end of the file, again replacing `7.4` with your version of PHP: + +```apache +ProxyPass "/status" "unix:/run/php/php7.4-fpm.sock|fcgi://localhost" +``` + +Save and close the file. Finally, restart the PHP-FPM, Apache, and Netdata processes. + +```bash +sudo systemctl restart php7.4-fpm.service +sudo systemctl restart apache2 +sudo systemctl restart netdata +``` + +As the Netdata Agent starts up again, it automatically connects to the new `127.0.0.1/status` page and collects +per-second PHP-FPM metrics to get you started with PHP monitoring. + +## View LAMP stack metrics + +If the Netdata Agent isn't already open in your browser, open a new tab and navigate to `http://localhost:19999` or +`http://NODE:19999`, replacing `NODE` with the hostname or IP address of your system. + +> If you [signed up](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) for Netdata Cloud earlier, you can also view +> the exact same LAMP stack metrics there, plus additional features, like drag-and-drop custom dashboards. Be sure to +> [connecting your node](/src/claim/README.md) to start streaming metrics to your browser through Netdata Cloud. + +Netdata automatically organizes all metrics and charts onto a single page for easy navigation. Peek at gauges to see +overall system performance, then scroll down to see more. Click-and-drag with your mouse to pan _all_ charts back and +forth through different time intervals, or hold `SHIFT` and use the scrollwheel (or two-finger scroll) to zoom in and +out. Check out our doc on [interacting with charts](/docs/dashboards-and-charts/netdata-charts.md) for all the details. + +![The Netdata dashboard](https://user-images.githubusercontent.com/1153921/109520555-98e17800-7a69-11eb-86ec-16f689da4527.png) + +The **System Overview** section, which you can also see in the right-hand menu, contains key hardware monitoring charts, +including CPU utilization, memory page faults, network monitoring, and much more. The **Applications** section shows you +exactly which Linux processes are using the most system resources. + +Next, let's check out LAMP-specific metrics. You should see four relevant sections: **Apache local**, **MySQL local**, +**PHP-FPM local**, and **web log apache**. Click on any of these to see metrics from each service in your LAMP stack. + +![LAMP stack monitoring in +Netdata](https://user-images.githubusercontent.com/1153921/109516332-49994880-7a65-11eb-807c-3cba045582e6.png) + +### Key LAMP stack monitoring charts + +Here's a quick reference for what charts you might want to focus on after setting up Netdata. + +| Chart name / context | Type | Why? | +|-------------------------------------------------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| System Load Average (`system.load`) | Hardware monitoring | A good baseline load average is `0.7`, while `1` (on a 1-core system, `2` on a 2-core system, and so on) means resources are "perfectly" utilized. Higher load indicates a bottleneck somewhere in your system. | +| System RAM (`system.ram`) | Hardware monitoring | Look at the `free` dimension. If that drops to `0`, your system will use swap memory and slow down. | +| Uptime (`apache_local.uptime`) | Apache monitoring | This chart should always be "climbing," indicating a continuous uptime. Investigate any drops back to `0`. | +| Requests By Type (`web_log_apache.requests_by_type`) | Apache monitoring | Check for increases in the `error` or `bad` dimensions, which could indicate users arriving at broken pages or PHP returning errors. | +| Queries (`mysql_local.queries`) | MySQL monitoring | Queries is the total number of queries (queries per second, QPS). Check this chart for sudden spikes or drops, which indicate either increases in traffic/demand or bottlenecks in hardware performance. | +| Active Connections (`mysql_local.connections_active`) | MySQL monitoring | If the `active` dimension nears the `limit`, your MySQL database will bottleneck responses. | +| Performance (phpfpm_local.performance) | PHP monitoring | The `slow requests` dimension lets you know if any requests exceed the configured `request_slowlog_timeout`. If so, users might be having a less-than-ideal experience. | + +## Get alerts for LAMP stack errors + +The Netdata Agent comes with hundreds of pre-configured alerts to help you keep tabs on your system, including 19 alerts +designed for smarter LAMP stack monitoring. + +Click the 🔔 icon in the top navigation to [see active alerts](/docs/dashboards-and-charts/alerts-tab.md). The **Active** tabs +shows any alerts currently triggered, while the **All** tab displays a list of _every_ pre-configured alert. The + +![An example of LAMP stack +alerts](https://user-images.githubusercontent.com/1153921/109524120-5883f900-7a6d-11eb-830e-0e7baaa28163.png) + +[Tweak alerts](/src/health/REFERENCE.md) based on your infrastructure monitoring needs, and to see these alerts +in other places, like your inbox or a Slack channel, [enable a notification +method](/docs/alerts-and-notifications/notifications/README.md). + +## What's next? + +You've now set up robust monitoring for your entire LAMP stack: Linux, Apache, MySQL, and PHP (-FPM, to be exact). These +metrics will help you keep tabs on the performance and availability of your web application and all its essential +services. The per-second metrics granularity means you have the most accurate information possible for troubleshooting +any LAMP-related issues. + +Another powerful way to monitor the availability of a LAMP stack is the [`httpcheck` +collector](/src/go/plugin/go.d/modules/httpcheck/README.md), which pings a web server at +a regular interval and tells you whether if and how quickly it's responding. The `response_match` option also lets you +monitor when the web server's response isn't what you expect it to be, which might happen if PHP-FPM crashes, for +example. + +The best way to use the `httpcheck` collector is from a separate node from the one running your LAMP stack, which is why +we're not covering it here, but it _does_ work in a single-node setup. Just don't expect it to tell you if your whole +node crashed. + +If you're planning on managing more than one node, or want to take advantage of advanced features, like finding the +source of issues faster with [Metric Correlations](/docs/metric-correlations.md), +[sign up](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) for a free Netdata Cloud account. + +### Related reference documentation + +- [Netdata Agent · Get started](/packaging/installer/README.md) +- [Netdata Agent · Apache data collector](/src/go/plugin/go.d/modules/apache/README.md) +- [Netdata Agent · Web log collector](/src/go/plugin/go.d/modules/weblog/README.md) +- [Netdata Agent · MySQL data collector](/src/go/plugin/go.d/modules/mysql/README.md) +- [Netdata Agent · PHP-FPM data collector](/src/go/plugin/go.d/modules/phpfpm/README.md) + diff --git a/docs/developer-and-contributor-corner/monitor-cockroachdb.md b/docs/developer-and-contributor-corner/monitor-cockroachdb.md deleted file mode 100644 index f0db12cc4..000000000 --- a/docs/developer-and-contributor-corner/monitor-cockroachdb.md +++ /dev/null @@ -1,118 +0,0 @@ - - -# Monitor CockroachDB metrics with Netdata - -[CockroachDB](https://github.com/cockroachdb/cockroach) is an open-source project that brings SQL databases into -scalable, disaster-resilient cloud deployments. Thanks to -a [new CockroachDB collector](/src/go/plugin/go.d/modules/cockroachdb/README.md) -released in -[v1.20](https://blog.netdata.cloud/posts/release-1.20/), you can now monitor any number of CockroachDB databases with -maximum granularity using Netdata. Collect more than 50 unique metrics and put them on interactive visualizations -designed for better visual anomaly detection. - -Netdata itself uses CockroachDB as part of its Netdata Cloud infrastructure, so we're happy to introduce this new -collector and help others get started with it straight away. - -Let's dive in and walk through the process of monitoring CockroachDB metrics with Netdata. - -## What's in this guide - -- [Monitor CockroachDB metrics with Netdata](#monitor-cockroachdb-metrics-with-netdata) - - [What's in this guide](#whats-in-this-guide) - - [Configure the CockroachDB collector](#configure-the-cockroachdb-collector) - - [Manual setup for a local CockroachDB database](#manual-setup-for-a-local-cockroachdb-database) - - [Tweak CockroachDB alerts](#tweak-cockroachdb-alerts) - -## Configure the CockroachDB collector - -Because _all_ of Netdata's collectors can auto-detect the services they monitor, you _shouldn't_ need to worry about -configuring CockroachDB. Netdata only needs to regularly query the database's `_status/vars` page to gather metrics and -display them on the dashboard. - -If your CockroachDB instance is accessible through `http://localhost:8080/` or `http://127.0.0.1:8080`, your setup is -complete. Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate -method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system, and refresh your browser. You should see CockroachDB -metrics in your Netdata dashboard! - -
- CPU utilization charts from a CockroachDB database monitored by Netdata -
CPU utilization charts from a CockroachDB database monitored by Netdata
-
- -> Note: Netdata collects metrics from CockroachDB every 10 seconds, instead of our usual 1 second, because CockroachDB -> only updates `_status/vars` every 10 seconds. You can't change this setting in CockroachDB. - -If you don't see CockroachDB charts, you may need to configure the collector manually. - -### Manual setup for a local CockroachDB database - -To configure Netdata's CockroachDB collector, navigate to your Netdata configuration directory (typically at -`/etc/netdata/`) and use `edit-config` to initialize and edit your CockroachDB configuration file. - -```bash -cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/ -./edit-config go.d/cockroachdb.conf -``` - -Scroll down to the `[JOBS]` section at the bottom of the file. You will see the two default jobs there, which you can -edit, or create a new job with any of the parameters listed above in the file. Both the `name` and `url` values are -required, and everything else is optional. - -For a production cluster, you'll use either an IP address or the system's hostname. Be sure that your remote system -allows TCP communication on port 8080, or whichever port you have configured CockroachDB's -[Admin UI](https://www.cockroachlabs.com/docs/stable/monitoring-and-alerting.html#prometheus-endpoint) to listen on. - -```yaml -# [ JOBS ] -jobs: - - name: remote - url: http://203.0.113.0:8080/_status/vars - - - name: remote_hostname - url: http://cockroachdb.example.com:8080/_status/vars -``` - -For a secure cluster, use `https` in the `url` field instead. - -```yaml -# [ JOBS ] -jobs: - - name: remote - url: https://203.0.113.0:8080/_status/vars - tls_skip_verify: yes # If your certificate is self-signed - - - name: remote_hostname - url: https://cockroachdb.example.com:8080/_status/vars - tls_skip_verify: yes # If your certificate is self-signed -``` - -You can add as many jobs as you'd like based on how many CockroachDB databases you have—Netdata will create separate -charts for each job. Once you've edited `cockroachdb.conf` according to the needs of your infrastructure, restart -Netdata to see your new charts. - -
- Charts showing a node failure during a simulated test -
Charts showing a node failure during a simulated test
-
- -## Tweak CockroachDB alerts - -This release also includes eight pre-configured alerts for live nodes, such as whether the node is live, storage -capacity, issues with replication, and the number of SQL connections/statements. See [health.d/cockroachdb.conf on -GitHub](https://raw.githubusercontent.com/netdata/netdata/master/src/health/health.d/cockroachdb.conf) for details. - -You can also edit these files directly with `edit-config`: - -```bash -cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/ -./edit-config health.d/cockroachdb.conf # You may need to use `sudo` for write privileges -``` - -For more information about editing the defaults or writing new alert entities, see our documentation on [configuring health alerts](/src/health/REFERENCE.md). diff --git a/docs/developer-and-contributor-corner/monitor-cockroachdb.txt b/docs/developer-and-contributor-corner/monitor-cockroachdb.txt new file mode 100644 index 000000000..d677c376c --- /dev/null +++ b/docs/developer-and-contributor-corner/monitor-cockroachdb.txt @@ -0,0 +1,118 @@ + + +# Monitor CockroachDB metrics with Netdata + +[CockroachDB](https://github.com/cockroachdb/cockroach) is an open-source project that brings SQL databases into +scalable, disaster-resilient cloud deployments. Thanks to +a [new CockroachDB collector](/src/go/plugin/go.d/modules/cockroachdb/README.md) +released in +[v1.20](https://blog.netdata.cloud/posts/release-1.20/), you can now monitor any number of CockroachDB databases with +maximum granularity using Netdata. Collect more than 50 unique metrics and put them on interactive visualizations +designed for better visual anomaly detection. + +Netdata itself uses CockroachDB as part of its Netdata Cloud infrastructure, so we're happy to introduce this new +collector and help others get started with it straight away. + +Let's dive in and walk through the process of monitoring CockroachDB metrics with Netdata. + +## What's in this guide + +- [Monitor CockroachDB metrics with Netdata](#monitor-cockroachdb-metrics-with-netdata) + - [What's in this guide](#whats-in-this-guide) + - [Configure the CockroachDB collector](#configure-the-cockroachdb-collector) + - [Manual setup for a local CockroachDB database](#manual-setup-for-a-local-cockroachdb-database) + - [Tweak CockroachDB alerts](#tweak-cockroachdb-alerts) + +## Configure the CockroachDB collector + +Because _all_ of Netdata's collectors can auto-detect the services they monitor, you _shouldn't_ need to worry about +configuring CockroachDB. Netdata only needs to regularly query the database's `_status/vars` page to gather metrics and +display them on the dashboard. + +If your CockroachDB instance is accessible through `http://localhost:8080/` or `http://127.0.0.1:8080`, your setup is +complete. Restart Netdata with `sudo systemctl restart netdata`, or the appropriate +method for your system, and refresh your browser. You should see CockroachDB +metrics in your Netdata dashboard! + +
+ CPU utilization charts from a CockroachDB database monitored by Netdata +
CPU utilization charts from a CockroachDB database monitored by Netdata
+
+ +> Note: Netdata collects metrics from CockroachDB every 10 seconds, instead of our usual 1 second, because CockroachDB +> only updates `_status/vars` every 10 seconds. You can't change this setting in CockroachDB. + +If you don't see CockroachDB charts, you may need to configure the collector manually. + +### Manual setup for a local CockroachDB database + +To configure Netdata's CockroachDB collector, navigate to your Netdata configuration directory (typically at +`/etc/netdata/`) and use `edit-config` to initialize and edit your CockroachDB configuration file. + +```bash +cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/ +./edit-config go.d/cockroachdb.conf +``` + +Scroll down to the `[JOBS]` section at the bottom of the file. You will see the two default jobs there, which you can +edit, or create a new job with any of the parameters listed above in the file. Both the `name` and `url` values are +required, and everything else is optional. + +For a production cluster, you'll use either an IP address or the system's hostname. Be sure that your remote system +allows TCP communication on port 8080, or whichever port you have configured CockroachDB's +[Admin UI](https://www.cockroachlabs.com/docs/stable/monitoring-and-alerting.html#prometheus-endpoint) to listen on. + +```yaml +# [ JOBS ] +jobs: + - name: remote + url: http://203.0.113.0:8080/_status/vars + + - name: remote_hostname + url: http://cockroachdb.example.com:8080/_status/vars +``` + +For a secure cluster, use `https` in the `url` field instead. + +```yaml +# [ JOBS ] +jobs: + - name: remote + url: https://203.0.113.0:8080/_status/vars + tls_skip_verify: yes # If your certificate is self-signed + + - name: remote_hostname + url: https://cockroachdb.example.com:8080/_status/vars + tls_skip_verify: yes # If your certificate is self-signed +``` + +You can add as many jobs as you'd like based on how many CockroachDB databases you have—Netdata will create separate +charts for each job. Once you've edited `cockroachdb.conf` according to the needs of your infrastructure, restart +Netdata to see your new charts. + +
+ Charts showing a node failure during a simulated test +
Charts showing a node failure during a simulated test
+
+ +## Tweak CockroachDB alerts + +This release also includes eight pre-configured alerts for live nodes, such as whether the node is live, storage +capacity, issues with replication, and the number of SQL connections/statements. See [health.d/cockroachdb.conf on +GitHub](https://raw.githubusercontent.com/netdata/netdata/master/src/health/health.d/cockroachdb.conf) for details. + +You can also edit these files directly with `edit-config`: + +```bash +cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/ +./edit-config health.d/cockroachdb.conf # You may need to use `sudo` for write privileges +``` + +For more information about editing the defaults or writing new alert entities, see our documentation on [configuring health alerts](/src/health/REFERENCE.md). diff --git a/docs/developer-and-contributor-corner/monitor-debug-applications-ebpf.md b/docs/developer-and-contributor-corner/monitor-debug-applications-ebpf.md index 91d2a2ef2..56f0276bb 100644 --- a/docs/developer-and-contributor-corner/monitor-debug-applications-ebpf.md +++ b/docs/developer-and-contributor-corner/monitor-debug-applications-ebpf.md @@ -1,13 +1,3 @@ - - # Monitor, troubleshoot, and debug applications with eBPF metrics When trying to troubleshoot or debug a finicky application, there's no such thing as too much information. At Netdata, @@ -48,7 +38,7 @@ your application's process name. Your file should now look like this: -```conf +```text ... # ----------------------------------------------------------------------------- # Custom applications to monitor with apps.plugin and ebpf.plugin @@ -60,15 +50,14 @@ dev: custom-app ... ``` -Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate -method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system, to begin seeing metrics for this particular +Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system, to begin seeing metrics for this particular group+process. You can also add additional processes to the same group. You can set up `apps_groups.conf` to more show more precise eBPF metrics for any application or service running on your system, even if it's a standard package like Redis, Apache, or any other [application/service Netdata collects from](/src/collectors/COLLECTORS.md). -```conf +```text # ----------------------------------------------------------------------------- # Custom applications to monitor with apps.plugin and ebpf.plugin @@ -99,7 +88,7 @@ sudo ./edit-config ebpf.d.conf Replace `entry` with `return`: -```conf +```text [global] ebpf load mode = return disable apps = no @@ -109,8 +98,7 @@ Replace `entry` with `return`: network viewer = yes ``` -Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate -method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system. +Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system. ## Get familiar with per-application eBPF metrics and charts @@ -139,7 +127,7 @@ In these charts, you can see first a spike in syscalls to open and close files f followed by a similar spike from the Apache benchmark. > 👋 Don't forget that you can view chart data directly via Netdata's API! -> +> > For example, open your browser and navigate to `http://NODE:19999/api/v1/data?chart=apps.file_open`, replacing `NODE` > with the IP address or hostname of your Agent. The API returns JSON of that chart's dimensions and metrics, which you > can use in other operations. @@ -245,10 +233,7 @@ Once you've added one or more nodes to a Space in Netdata Cloud, you can see agg dashboard under the same **Applications** or **eBPF** sections that you find on the local Agent dashboard. Or, [create new dashboards](/docs/dashboards-and-charts/dashboards-tab.md) using eBPF metrics from any number of distributed nodes to see how your application interacts with multiple Linux kernels on multiple Linux -systems. +systems. Now that you can see eBPF metrics in Netdata Cloud, you can [invite your team](/docs/netdata-cloud/organize-your-infrastructure-invite-your-team.md#invite-your-team) and share your findings with others. - - - diff --git a/docs/developer-and-contributor-corner/monitor-hadoop-cluster.md b/docs/developer-and-contributor-corner/monitor-hadoop-cluster.md index 98bf3d21f..8638f6d66 100644 --- a/docs/developer-and-contributor-corner/monitor-hadoop-cluster.md +++ b/docs/developer-and-contributor-corner/monitor-hadoop-cluster.md @@ -1,12 +1,3 @@ - - # Monitor a Hadoop cluster with Netdata Hadoop is an [Apache project](https://hadoop.apache.org/) is a framework for processing large sets of data across a @@ -27,8 +18,8 @@ alternative, like the guide available from For more specifics on the collection modules used in this guide, read the respective pages in our documentation: -- [HDFS](/src/go/plugin/go.d/modules/hdfs/README.md) -- [Zookeeper](/src/go/plugin/go.d/modules/zookeeper/README.md) +- [HDFS](/src/go/plugin/go.d/modules/hdfs/README.md) +- [Zookeeper](/src/go/plugin/go.d/modules/zookeeper/README.md) ## Set up your HDFS and Zookeeper installations @@ -164,7 +155,7 @@ jobs: address : 203.0.113.10:2182 ``` -Finally, [restart Netdata](/packaging/installer/README.md#maintaining-a-netdata-agent-installation). +Finally, [restart Netdata](/docs/netdata-agent/start-stop-restart.md). ```sh sudo systemctl restart netdata @@ -178,7 +169,7 @@ showing real-time metrics for both in your Netdata dashboard. 🎉 The Netdata community helped us create sane defaults for alerts related to both HDFS and Zookeeper. You may want to investigate these to ensure they work well with your Hadoop implementation. -- [HDFS alerts](https://raw.githubusercontent.com/netdata/netdata/master/src/health/health.d/hdfs.conf) +- [HDFS alerts](https://raw.githubusercontent.com/netdata/netdata/master/src/health/health.d/hdfs.conf) You can also access/edit these files directly with `edit-config`: @@ -187,5 +178,4 @@ sudo /etc/netdata/edit-config health.d/hdfs.conf sudo /etc/netdata/edit-config health.d/zookeeper.conf ``` -For more information about editing the defaults or writing new alert entities, see our -[health monitoring documentation](/src/health/README.md). +For more information about editing the defaults or writing new alert entities, see our [health monitoring documentation](/src/health/README.md). diff --git a/docs/developer-and-contributor-corner/pi-hole-raspberry-pi.md b/docs/developer-and-contributor-corner/pi-hole-raspberry-pi.md deleted file mode 100644 index df6bb0809..000000000 --- a/docs/developer-and-contributor-corner/pi-hole-raspberry-pi.md +++ /dev/null @@ -1,140 +0,0 @@ - - -# Monitor Pi-hole (and a Raspberry Pi) with Netdata - -import { OneLineInstallWget } from '@site/src/components/OneLineInstall/' - -Between intrusive ads, invasive trackers, and vicious malware, many techies and homelab enthusiasts are advancing their -networks' security and speed with a tiny computer and a powerful piece of software: [Pi-hole](https://pi-hole.net/). - -Pi-hole is a DNS sinkhole that prevents unwanted content from even reaching devices on your home network. It blocks ads -and malware at the network, instead of using extensions/add-ons for individual browsers, so you'll stop seeing ads in -some of the most intrusive places, like your smart TV. Pi-hole can even [improve your network's speed and reduce -bandwidth](https://discourse.pi-hole.net/t/will-pi-hole-slow-down-my-network/2048). - -Most Pi-hole users run it on a [Raspberry Pi](https://www.raspberrypi.org/products/raspberry-pi-4-model-b/) (hence the -name), a credit card-sized, super-capable computer that costs about $35. - -And to keep tabs on how both Pi-hole and the Raspberry Pi are working to protect your network, you can use the -open-source [Netdata monitoring agent](https://github.com/netdata/netdata). - -To get started, all you need is a [Raspberry Pi](https://www.raspberrypi.org/products/raspberry-pi-4-model-b/) with -Raspbian installed. This guide uses a Raspberry Pi 4 Model B and Raspbian GNU/Linux 10 (buster). This guide assumes -you're connecting to a Raspberry Pi remotely over SSH, but you could also complete all these steps on the system -directly using a keyboard, mouse, and monitor. - -## Why monitor Pi-hole and a Raspberry Pi with Netdata? - -Netdata helps you monitor and troubleshoot all kinds of devices and the applications they run, including IoT devices -like the Raspberry Pi and applications like Pi-hole. - -After a two-minute installation and with zero configuration, you'll be able to see all of Pi-hole's metrics, including -the volume of queries, connected clients, DNS queries per type, top clients, top blocked domains, and more. - -With Netdata installed, you can also monitor system metrics and any other applications you might be running. By default, -Netdata collects metrics on CPU usage, disk IO, bandwidth, per-application resource usage, and a ton more. With the -Raspberry Pi used for this guide, Netdata automatically collects about 1,500 metrics every second! - -![Real-time Pi-hole monitoring with -Netdata](https://user-images.githubusercontent.com/1153921/90447745-c8fe9600-e098-11ea-8a57-4f07339f002b.png) - -## Install Netdata - -Let's start by installing Netdata first so that it can start collecting system metrics as soon as possible for the most -possible historic data. - -> ⚠️ Don't install Netdata using `apt` and the default package available in Raspbian. The Netdata team does not maintain -> this package, and can't guarantee it works properly. - -On Raspberry Pis running Raspbian, the best way to install Netdata is our one-line kickstart script. This script asks -you to install dependencies, then compiles Netdata from source via [GitHub](https://github.com/netdata/netdata). - - - -Once installed on a Raspberry Pi 4 with no accessories, Netdata starts collecting roughly 1,500 metrics every second and -populates its dashboard with more than 250 charts. - -Open your browser of choice and navigate to `http://NODE:19999/`, replacing `NODE` with the IP address of your Raspberry -Pi. Not sure what that IP is? Try running `hostname -I | awk '{print $1}'` from the Pi itself. - -You'll see Netdata's dashboard and a few hundred real-time, interactive charts. Feel free to explore, but let's turn our attention to installing Pi-hole. - -## Install Pi-Hole - -Like Netdata, Pi-hole has a one-line script for simple installation. From your Raspberry Pi, run the following: - -```bash -curl -sSL https://install.pi-hole.net | bash -``` - -The installer will help you set up Pi-hole based on the topology of your network. Once finished, you should set up your -devices—or your router for system-wide sinkhole protection—to [use Pi-hole as their DNS -service](https://discourse.pi-hole.net/t/how-do-i-configure-my-devices-to-use-pi-hole-as-their-dns-server/245). You've -finished setting up Pi-hole at this point. - -As far as configuring Netdata to monitor Pi-hole metrics, there's nothing you actually need to do. Netdata's [Pi-hole -collector](/src/go/plugin/go.d/modules/pihole/README.md) will autodetect the new service -running on your Raspberry Pi and immediately start collecting metrics every second. - -Restart Netdata with `sudo systemctl restart netdata`, which will then recognize that Pi-hole is running and start a -per-second collection job. When you refresh your Netdata dashboard or load it up again in a new tab, you'll see a new -entry in the menu for **Pi-hole** metrics. - -## Use Netdata to explore and monitor your Raspberry Pi and Pi-hole - -By the time you've reached this point in the guide, Netdata has already collected a ton of valuable data about your -Raspberry Pi, Pi-hole, and any other apps/services you might be running. Even a few minutes of collecting 1,500 metrics -per second adds up quickly. - -You can now use Netdata's synchronized charts to zoom, highlight, scrub through time, and discern how an anomaly in one -part of your system might affect another. - -![The Netdata dashboard in -action](https://user-images.githubusercontent.com/1153921/80827388-b9fee100-8b98-11ea-8f60-0d7824667cd3.gif) - -### Enable temperature sensor monitoring - -You need to manually enable Netdata's built-in [temperature sensor -collector](/src/collectors/charts.d.plugin/sensors/README.md) to start collecting metrics. - -> Netdata uses a few plugins to manage its [collectors](/src/collectors/REFERENCE.md), each using a different language: Go, -> Python, Node.js, and Bash. While our Go collectors are undergoing the most active development, we still support the -> other languages. In this case, you need to enable a temperature sensor collector that's written in Bash. - -First, open the `charts.d.conf` file for editing. You should always use the `edit-config` script to edit Netdata's -configuration files, as it ensures your settings persist across updates to the Netdata Agent. - -```bash -cd /etc/netdata -sudo ./edit-config charts.d.conf -``` - -Uncomment the `sensors=force` line and save the file. Restart Netdata with `sudo systemctl restart netdata` to enable -Raspberry Pi temperature sensor monitoring. - -### Storing historical metrics on your Raspberry Pi - -By default, Netdata allocates 256 MiB in disk space to store historical metrics inside the [database -engine](/src/database/engine/README.md). On the Raspberry Pi used for this guide, Netdata collects 1,500 metrics every -second, which equates to storing 3.5 days worth of historical metrics. - -You can increase this allocation by editing `netdata.conf` and increasing the `dbengine multihost disk space` setting to -more than 256. - -```yaml -[global] - dbengine multihost disk space = 512 -``` - -Use our [database sizing -calculator](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md#calculate-the-system-resources-ram-disk-space-needed-to-store-metrics) -and the [Database configuration documentation](/src/database/README.md) to help you determine the right -setting for your Raspberry Pi. diff --git a/docs/developer-and-contributor-corner/pi-hole-raspberry-pi.txt b/docs/developer-and-contributor-corner/pi-hole-raspberry-pi.txt new file mode 100644 index 000000000..e150cebdc --- /dev/null +++ b/docs/developer-and-contributor-corner/pi-hole-raspberry-pi.txt @@ -0,0 +1,120 @@ + + +# Monitor Pi-hole (and a Raspberry Pi) with Netdata + +import { OneLineInstallWget } from '@site/src/components/OneLineInstall/' + +Between intrusive ads, invasive trackers, and vicious malware, many techies and homelab enthusiasts are advancing their +networks' security and speed with a tiny computer and a powerful piece of software: [Pi-hole](https://pi-hole.net/). + +Pi-hole is a DNS sinkhole that prevents unwanted content from even reaching devices on your home network. It blocks ads +and malware at the network, instead of using extensions/add-ons for individual browsers, so you'll stop seeing ads in +some of the most intrusive places, like your smart TV. Pi-hole can even [improve your network's speed and reduce +bandwidth](https://discourse.pi-hole.net/t/will-pi-hole-slow-down-my-network/2048). + +Most Pi-hole users run it on a [Raspberry Pi](https://www.raspberrypi.org/products/raspberry-pi-4-model-b/) (hence the +name), a credit card-sized, super-capable computer that costs about $35. + +And to keep tabs on how both Pi-hole and the Raspberry Pi are working to protect your network, you can use the +open-source [Netdata monitoring agent](https://github.com/netdata/netdata). + +To get started, all you need is a [Raspberry Pi](https://www.raspberrypi.org/products/raspberry-pi-4-model-b/) with +Raspbian installed. This guide uses a Raspberry Pi 4 Model B and Raspbian GNU/Linux 10 (buster). This guide assumes +you're connecting to a Raspberry Pi remotely over SSH, but you could also complete all these steps on the system +directly using a keyboard, mouse, and monitor. + +## Why monitor Pi-hole and a Raspberry Pi with Netdata? + +Netdata helps you monitor and troubleshoot all kinds of devices and the applications they run, including IoT devices +like the Raspberry Pi and applications like Pi-hole. + +After a two-minute installation and with zero configuration, you'll be able to see all of Pi-hole's metrics, including +the volume of queries, connected clients, DNS queries per type, top clients, top blocked domains, and more. + +With Netdata installed, you can also monitor system metrics and any other applications you might be running. By default, +Netdata collects metrics on CPU usage, disk IO, bandwidth, per-application resource usage, and a ton more. With the +Raspberry Pi used for this guide, Netdata automatically collects about 1,500 metrics every second! + +![Real-time Pi-hole monitoring with +Netdata](https://user-images.githubusercontent.com/1153921/90447745-c8fe9600-e098-11ea-8a57-4f07339f002b.png) + +## Install Netdata + +Let's start by installing Netdata first so that it can start collecting system metrics as soon as possible for the most +possible historic data. + +> ⚠️ Don't install Netdata using `apt` and the default package available in Raspbian. The Netdata team does not maintain +> this package, and can't guarantee it works properly. + +On Raspberry Pis running Raspbian, the best way to install Netdata is our one-line kickstart script. This script asks +you to install dependencies, then compiles Netdata from source via [GitHub](https://github.com/netdata/netdata). + + + +Once installed on a Raspberry Pi 4 with no accessories, Netdata starts collecting roughly 1,500 metrics every second and +populates its dashboard with more than 250 charts. + +Open your browser of choice and navigate to `http://NODE:19999/`, replacing `NODE` with the IP address of your Raspberry +Pi. Not sure what that IP is? Try running `hostname -I | awk '{print $1}'` from the Pi itself. + +You'll see Netdata's dashboard and a few hundred real-time, interactive charts. Feel free to explore, but let's turn our attention to installing Pi-hole. + +## Install Pi-Hole + +Like Netdata, Pi-hole has a one-line script for simple installation. From your Raspberry Pi, run the following: + +```bash +curl -sSL https://install.pi-hole.net | bash +``` + +The installer will help you set up Pi-hole based on the topology of your network. Once finished, you should set up your +devices—or your router for system-wide sinkhole protection—to [use Pi-hole as their DNS +service](https://discourse.pi-hole.net/t/how-do-i-configure-my-devices-to-use-pi-hole-as-their-dns-server/245). You've +finished setting up Pi-hole at this point. + +As far as configuring Netdata to monitor Pi-hole metrics, there's nothing you actually need to do. Netdata's [Pi-hole +collector](/src/go/plugin/go.d/modules/pihole/README.md) will autodetect the new service +running on your Raspberry Pi and immediately start collecting metrics every second. + +Restart Netdata with `sudo systemctl restart netdata`, which will then recognize that Pi-hole is running and start a +per-second collection job. When you refresh your Netdata dashboard or load it up again in a new tab, you'll see a new +entry in the menu for **Pi-hole** metrics. + +## Use Netdata to explore and monitor your Raspberry Pi and Pi-hole + +By the time you've reached this point in the guide, Netdata has already collected a ton of valuable data about your +Raspberry Pi, Pi-hole, and any other apps/services you might be running. Even a few minutes of collecting 1,500 metrics +per second adds up quickly. + +You can now use Netdata's synchronized charts to zoom, highlight, scrub through time, and discern how an anomaly in one +part of your system might affect another. + +![The Netdata dashboard in +action](https://user-images.githubusercontent.com/1153921/80827388-b9fee100-8b98-11ea-8f60-0d7824667cd3.gif) + +### Storing historical metrics on your Raspberry Pi + +By default, Netdata allocates 256 MiB in disk space to store historical metrics inside the [database +engine](/src/database/engine/README.md). On the Raspberry Pi used for this guide, Netdata collects 1,500 metrics every +second, which equates to storing 3.5 days worth of historical metrics. + +You can increase this allocation by editing `netdata.conf` and increasing the `dbengine multihost disk space` setting to +more than 256. + +```yaml +[global] + dbengine multihost disk space = 512 +``` + +Use our [database sizing +calculator](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md#calculate-the-system-resources-ram-disk-space-needed-to-store-metrics) +and the [Database configuration documentation](/src/database/README.md) to help you determine the right +setting for your Raspberry Pi. diff --git a/docs/developer-and-contributor-corner/process.md b/docs/developer-and-contributor-corner/process.md deleted file mode 100644 index 2902a24f6..000000000 --- a/docs/developer-and-contributor-corner/process.md +++ /dev/null @@ -1,270 +0,0 @@ - - -# Monitor any process in real-time with Netdata - -Netdata is more than a multitude of generic system-level metrics and visualizations. Instead of providing only a bird's -eye view of your system, leaving you to wonder exactly _what_ is taking up 99% CPU, Netdata also gives you visibility -into _every layer_ of your node. These additional layers give you context, and meaningful insights, into the true health -and performance of your infrastructure. - -One of these layers is the _process_. Every time a Linux system runs a program, it creates an independent process that -executes the program's instructions in parallel with anything else happening on the system. Linux systems track the -state and resource utilization of processes using the [`/proc` filesystem](https://en.wikipedia.org/wiki/Procfs), and -Netdata is designed to hook into those metrics to create meaningful visualizations out of the box. - -While there are a lot of existing command-line tools for tracking processes on Linux systems, such as `ps` or `top`, -only Netdata provides dozens of real-time charts, at both per-second and event frequency, without you having to write -SQL queries or know a bunch of arbitrary command-line flags. - -With Netdata's process monitoring, you can: - -- Benchmark/optimize performance of standard applications, like web servers or databases -- Benchmark/optimize performance of custom applications -- Troubleshoot CPU/memory/disk utilization issues (why is my system's CPU spiking right now?) -- Perform granular capacity planning based on the specific needs of your infrastructure -- Search for leaking file descriptors -- Investigate zombie processes - -... and much more. Let's get started. - -## Prerequisites - -- One or more Linux nodes running [Netdata](/packaging/installer/README.md) -- A general understanding of how - to [configure the Netdata Agent](/docs/netdata-agent/configuration/README.md) - using `edit-config`. -- A Netdata Cloud account. [Sign up](https://app.netdata.cloud) if you don't have one already. - -## How does Netdata do process monitoring? - -The Netdata Agent already knows to look for hundreds -of [standard applications that we support via collectors](/src/collectors/COLLECTORS.md), -and groups them based on their -purpose. Let's say you want to monitor a MySQL -database using its process. The Netdata Agent already knows to look for processes with the string `mysqld` in their -name, along with a few others, and puts them into the `sql` group. This `sql` group then becomes a dimension in all -process-specific charts. - -The process and groups settings are used by two unique and powerful collectors. - -[**`apps.plugin`**](/src/collectors/apps.plugin/README.md) looks at the Linux -process tree every second, much like `top` or -`ps fax`, and collects resource utilization information on every running process. It then automatically adds a layer of -meaningful visualization on top of these metrics, and creates per-process/application charts. - -[**`ebpf.plugin`**](/src/collectors/ebpf.plugin/README.md): Netdata's extended -Berkeley Packet Filter (eBPF) collector -monitors Linux kernel-level metrics for file descriptors, virtual filesystem IO, and process management, and then hands -process-specific metrics over to `apps.plugin` for visualization. The eBPF collector also collects and visualizes -metrics on an _event frequency_, which means it captures every kernel interaction, and not just the volume of -interaction at every second in time. That's even more precise than Netdata's standard per-second granularity. - -### Per-process metrics and charts in Netdata - -With these collectors working in parallel, Netdata visualizes the following per-second metrics for _any_ process on your -Linux systems: - -- CPU utilization (`apps.cpu`) - - Total CPU usage - - User/system CPU usage (`apps.cpu_user`/`apps.cpu_system`) -- Disk I/O - - Physical reads/writes (`apps.preads`/`apps.pwrites`) - - Logical reads/writes (`apps.lreads`/`apps.lwrites`) - - Open unique files (if a file is found open multiple times, it is counted just once, `apps.files`) -- Memory - - Real Memory Used (non-shared, `apps.mem`) - - Virtual Memory Allocated (`apps.vmem`) - - Minor page faults (i.e. memory activity, `apps.minor_faults`) -- Processes - - Threads running (`apps.threads`) - - Processes running (`apps.processes`) - - Carried over uptime (since the last Netdata Agent restart, `apps.uptime`) - - Minimum uptime (`apps.uptime_min`) - - Average uptime (`apps.uptime_average`) - - Maximum uptime (`apps.uptime_max`) - - Pipes open (`apps.pipes`) -- Swap memory - - Swap memory used (`apps.swap`) - - Major page faults (i.e. swap activity, `apps.major_faults`) -- Network - - Sockets open (`apps.sockets`) -- eBPF file - - Number of calls to open files. (`apps.file_open`) - - Number of files closed. (`apps.file_closed`) - - Number of calls to open files that returned errors. - - Number of calls to close files that returned errors. -- eBPF syscall - - Number of calls to delete files. (`apps.file_deleted`) - - Number of calls to `vfs_write`. (`apps.vfs_write_call`) - - Number of calls to `vfs_read`. (`apps.vfs_read_call`) - - Number of bytes written with `vfs_write`. (`apps.vfs_write_bytes`) - - Number of bytes read with `vfs_read`. (`apps.vfs_read_bytes`) - - Number of calls to write a file that returned errors. - - Number of calls to read a file that returned errors. -- eBPF process - - Number of process created with `do_fork`. (`apps.process_create`) - - Number of threads created with `do_fork` or `__x86_64_sys_clone`, depending on your system's kernel - version. (`apps.thread_create`) - - Number of times that a process called `do_exit`. (`apps.task_close`) -- eBPF net - - Number of bytes sent. (`apps.bandwidth_sent`) - - Number of bytes received. (`apps.bandwidth_recv`) - -As an example, here's the per-process CPU utilization chart, including a `sql` group/dimension. - -![A per-process CPU utilization chart in Netdata Cloud](https://user-images.githubusercontent.com/1153921/101217226-3a5d5700-363e-11eb-8610-aa1640aefb5d.png) - -## Configure the Netdata Agent to recognize a specific process - -To monitor any process, you need to make sure the Netdata Agent is aware of it. As mentioned above, the Agent is already -aware of hundreds of processes, and collects metrics from them automatically. - -But, if you want to change the grouping behavior, add an application that isn't yet supported in the Netdata Agent, or -monitor a custom application, you need to edit the `apps_groups.conf` configuration file. - -Navigate to your [Netdata config directory](/docs/netdata-agent/configuration/README.md) and -use `edit-config` to edit the file. - -```bash -cd /etc/netdata # Replace this with your Netdata config directory if not at /etc/netdata. -sudo ./edit-config apps_groups.conf -``` - -Inside the file are lists of process names, oftentimes using wildcards (`*`), that the Netdata Agent looks for and -groups together. For example, the Netdata Agent looks for processes starting with `mysqld`, `mariad`, `postgres`, and -others, and groups them into `sql`. That makes sense, since all these processes are for SQL databases. - -```conf -sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* sqlservr -``` - -These groups are then reflected as [dimensions](/src/web/README.md#dimensions) -within Netdata's charts. - -![An example per-process CPU utilization chart in Netdata -Cloud](https://user-images.githubusercontent.com/1153921/101369156-352e2100-3865-11eb-9f0d-b8fac162e034.png) - -See the following two sections for details based on your needs. If you don't need to configure `apps_groups.conf`, jump -down to [visualizing process metrics](#visualize-process-metrics). - -### Standard applications (web servers, databases, containers, and more) - -As explained above, the Netdata Agent is already aware of most standard applications you run on Linux nodes, and you -shouldn't need to configure it to discover them. - -However, if you're using multiple applications that the Netdata Agent groups together you may want to separate them for -more precise monitoring. If you're not running any other types of SQL databases on that node, you don't need to change -the grouping, since you know that any MySQL is the only process contributing to the `sql` group. - -Let's say you're using both MySQL and PostgreSQL databases on a single node, and want to monitor their processes -independently. Open the `apps_groups.conf` file as explained in -the [section above](#configure-the-netdata-agent-to-recognize-a-specific-process) and scroll down until you find -the `database servers` section. Create new groups for MySQL and PostgreSQL, and move their process queries into the -unique groups. - -```conf -# ----------------------------------------------------------------------------- -# database servers - -mysql: mysqld* -postgres: postgres* -sql: mariad* postmaster* oracle_* ora_* sqlservr -``` - -Restart Netdata with `sudo systemctl restart netdata`, or -the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system, to start collecting utilization metrics -from your application. Time to [visualize your process metrics](#visualize-process-metrics). - -### Custom applications - -Let's assume you have an application that runs on the process `custom-app`. To monitor eBPF metrics for that application -separate from any others, you need to create a new group in `apps_groups.conf` and associate that process name with it. - -Open the `apps_groups.conf` file as explained in -the [section above](#configure-the-netdata-agent-to-recognize-a-specific-process). Scroll down -to `# NETDATA processes accounting`. -Above that, paste in the following text, which creates a new `custom-app` group with the `custom-app` process. Replace -`custom-app` with the name of your application's Linux process. `apps_groups.conf` should now look like this: - -```conf -... -# ----------------------------------------------------------------------------- -# Custom applications to monitor with apps.plugin and ebpf.plugin - -custom-app: custom-app - -# ----------------------------------------------------------------------------- -# NETDATA processes accounting -... -``` - -Restart Netdata with `sudo systemctl restart netdata`, or -the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system, to start collecting utilization metrics -from your application. - -## Visualize process metrics - -Now that you're collecting metrics for your process, you'll want to visualize them using Netdata's real-time, -interactive charts. Find these visualizations in the same section regardless of whether you -use [Netdata Cloud](https://app.netdata.cloud) for infrastructure monitoring, or single-node monitoring with the local -Agent's dashboard at `http://localhost:19999`. - -If you need a refresher on all the available per-process charts, see -the [above list](#per-process-metrics-and-charts-in-netdata). - -### Using Netdata's application collector (`apps.plugin`) - -`apps.plugin` puts all of its charts under the **Applications** section of any Netdata dashboard. - -![Screenshot of the Applications section on a Netdata dashboard](https://user-images.githubusercontent.com/1153921/101401172-2ceadb80-388f-11eb-9e9a-88443894c272.png) - -Let's continue with the MySQL example. We can create a [test -database](https://www.digitalocean.com/community/tutorials/how-to-measure-mysql-query-performance-with-mysqlslap) in -MySQL to generate load on the `mysql` process. - -`apps.plugin` immediately collects and visualizes this activity `apps.cpu` chart, which shows an increase in CPU -utilization from the `sql` group. There is a parallel increase in `apps.pwrites`, which visualizes writes to disk. - -![Per-application CPU utilization metrics](https://user-images.githubusercontent.com/1153921/101409725-8527da80-389b-11eb-96e9-9f401535aafc.png) - -![Per-application disk writing metrics](https://user-images.githubusercontent.com/1153921/101409728-85c07100-389b-11eb-83fd-d79dd1545b5a.png) - -Next, the `mysqlslap` utility queries the database to provide some benchmarking load on the MySQL database. It won't -look exactly like a production database executing lots of user queries, but it gives you an idea into the possibility of -these visualizations. - -```bash -sudo mysqlslap --user=sysadmin --password --host=localhost --concurrency=50 --iterations=10 --create-schema=employees --query="SELECT * FROM dept_emp;" --verbose -``` - -The following per-process disk utilization charts show spikes under the `sql` group at the same time `mysqlslap` was run -numerous times, with slightly different concurrency and query options. - -![Per-application disk metrics](https://user-images.githubusercontent.com/1153921/101411810-d08fb800-389e-11eb-85b3-f3fa41f1f887.png) - -> 💡 Click on any dimension below a chart in Netdata Cloud (or to the right of a chart on a local Agent dashboard), to -> visualize only that dimension. This can be particularly useful in process monitoring to separate one process' -> utilization from the rest of the system. - -### Using Netdata's eBPF collector (`ebpf.plugin`) - -Netdata's eBPF collector puts its charts in two places. Of most importance to process monitoring are the **ebpf file**, -**ebpf syscall**, **ebpf process**, and **ebpf net** sub-sections under **Applications**, shown in the above screenshot. - -For example, running the above workload shows the entire "story" how MySQL interacts with the Linux kernel to open -processes/threads to handle a large number of SQL queries, then subsequently close the tasks as each query returns the -relevant data. - -![Per-process eBPF charts](https://user-images.githubusercontent.com/1153921/101412395-c8844800-389f-11eb-86d2-20c8a0f7b3c0.png) - -`ebpf.plugin` visualizes additional eBPF metrics, which are system-wide and not per-process, under the **eBPF** section. - - diff --git a/docs/developer-and-contributor-corner/process.txt b/docs/developer-and-contributor-corner/process.txt new file mode 100644 index 000000000..dbb36c550 --- /dev/null +++ b/docs/developer-and-contributor-corner/process.txt @@ -0,0 +1,270 @@ + + +# Monitor any process in real-time with Netdata + +Netdata is more than a multitude of generic system-level metrics and visualizations. Instead of providing only a bird's +eye view of your system, leaving you to wonder exactly _what_ is taking up 99% CPU, Netdata also gives you visibility +into _every layer_ of your node. These additional layers give you context, and meaningful insights, into the true health +and performance of your infrastructure. + +One of these layers is the _process_. Every time a Linux system runs a program, it creates an independent process that +executes the program's instructions in parallel with anything else happening on the system. Linux systems track the +state and resource utilization of processes using the [`/proc` filesystem](https://en.wikipedia.org/wiki/Procfs), and +Netdata is designed to hook into those metrics to create meaningful visualizations out of the box. + +While there are a lot of existing command-line tools for tracking processes on Linux systems, such as `ps` or `top`, +only Netdata provides dozens of real-time charts, at both per-second and event frequency, without you having to write +SQL queries or know a bunch of arbitrary command-line flags. + +With Netdata's process monitoring, you can: + +- Benchmark/optimize performance of standard applications, like web servers or databases +- Benchmark/optimize performance of custom applications +- Troubleshoot CPU/memory/disk utilization issues (why is my system's CPU spiking right now?) +- Perform granular capacity planning based on the specific needs of your infrastructure +- Search for leaking file descriptors +- Investigate zombie processes + +... and much more. Let's get started. + +## Prerequisites + +- One or more Linux nodes running [Netdata](/packaging/installer/README.md) +- A general understanding of how + to [configure the Netdata Agent](/docs/netdata-agent/configuration/README.md) + using `edit-config`. +- A Netdata Cloud account. [Sign up](https://app.netdata.cloud) if you don't have one already. + +## How does Netdata do process monitoring? + +The Netdata Agent already knows to look for hundreds +of [standard applications that we support via collectors](/src/collectors/COLLECTORS.md), +and groups them based on their +purpose. Let's say you want to monitor a MySQL +database using its process. The Netdata Agent already knows to look for processes with the string `mysqld` in their +name, along with a few others, and puts them into the `sql` group. This `sql` group then becomes a dimension in all +process-specific charts. + +The process and groups settings are used by two unique and powerful collectors. + +[**`apps.plugin`**](/src/collectors/apps.plugin/README.md) looks at the Linux +process tree every second, much like `top` or +`ps fax`, and collects resource utilization information on every running process. It then automatically adds a layer of +meaningful visualization on top of these metrics, and creates per-process/application charts. + +[**`ebpf.plugin`**](/src/collectors/ebpf.plugin/README.md): Netdata's extended +Berkeley Packet Filter (eBPF) collector +monitors Linux kernel-level metrics for file descriptors, virtual filesystem IO, and process management, and then hands +process-specific metrics over to `apps.plugin` for visualization. The eBPF collector also collects and visualizes +metrics on an _event frequency_, which means it captures every kernel interaction, and not just the volume of +interaction at every second in time. That's even more precise than Netdata's standard per-second granularity. + +### Per-process metrics and charts in Netdata + +With these collectors working in parallel, Netdata visualizes the following per-second metrics for _any_ process on your +Linux systems: + +- CPU utilization (`apps.cpu`) + - Total CPU usage + - User/system CPU usage (`apps.cpu_user`/`apps.cpu_system`) +- Disk I/O + - Physical reads/writes (`apps.preads`/`apps.pwrites`) + - Logical reads/writes (`apps.lreads`/`apps.lwrites`) + - Open unique files (if a file is found open multiple times, it is counted just once, `apps.files`) +- Memory + - Real Memory Used (non-shared, `apps.mem`) + - Virtual Memory Allocated (`apps.vmem`) + - Minor page faults (i.e. memory activity, `apps.minor_faults`) +- Processes + - Threads running (`apps.threads`) + - Processes running (`apps.processes`) + - Carried over uptime (since the last Netdata Agent restart, `apps.uptime`) + - Minimum uptime (`apps.uptime_min`) + - Average uptime (`apps.uptime_average`) + - Maximum uptime (`apps.uptime_max`) + - Pipes open (`apps.pipes`) +- Swap memory + - Swap memory used (`apps.swap`) + - Major page faults (i.e. swap activity, `apps.major_faults`) +- Network + - Sockets open (`apps.sockets`) +- eBPF file + - Number of calls to open files. (`apps.file_open`) + - Number of files closed. (`apps.file_closed`) + - Number of calls to open files that returned errors. + - Number of calls to close files that returned errors. +- eBPF syscall + - Number of calls to delete files. (`apps.file_deleted`) + - Number of calls to `vfs_write`. (`apps.vfs_write_call`) + - Number of calls to `vfs_read`. (`apps.vfs_read_call`) + - Number of bytes written with `vfs_write`. (`apps.vfs_write_bytes`) + - Number of bytes read with `vfs_read`. (`apps.vfs_read_bytes`) + - Number of calls to write a file that returned errors. + - Number of calls to read a file that returned errors. +- eBPF process + - Number of process created with `do_fork`. (`apps.process_create`) + - Number of threads created with `do_fork` or `__x86_64_sys_clone`, depending on your system's kernel + version. (`apps.thread_create`) + - Number of times that a process called `do_exit`. (`apps.task_close`) +- eBPF net + - Number of bytes sent. (`apps.bandwidth_sent`) + - Number of bytes received. (`apps.bandwidth_recv`) + +As an example, here's the per-process CPU utilization chart, including a `sql` group/dimension. + +![A per-process CPU utilization chart in Netdata Cloud](https://user-images.githubusercontent.com/1153921/101217226-3a5d5700-363e-11eb-8610-aa1640aefb5d.png) + +## Configure the Netdata Agent to recognize a specific process + +To monitor any process, you need to make sure the Netdata Agent is aware of it. As mentioned above, the Agent is already +aware of hundreds of processes, and collects metrics from them automatically. + +But, if you want to change the grouping behavior, add an application that isn't yet supported in the Netdata Agent, or +monitor a custom application, you need to edit the `apps_groups.conf` configuration file. + +Navigate to your [Netdata config directory](/docs/netdata-agent/configuration/README.md) and +use `edit-config` to edit the file. + +```bash +cd /etc/netdata # Replace this with your Netdata config directory if not at /etc/netdata. +sudo ./edit-config apps_groups.conf +``` + +Inside the file are lists of process names, oftentimes using wildcards (`*`), that the Netdata Agent looks for and +groups together. For example, the Netdata Agent looks for processes starting with `mysqld`, `mariad`, `postgres`, and +others, and groups them into `sql`. That makes sense, since all these processes are for SQL databases. + +```text +sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* sqlservr +``` + +These groups are then reflected as [dimensions](/src/web/README.md#dimensions) +within Netdata's charts. + +![An example per-process CPU utilization chart in Netdata +Cloud](https://user-images.githubusercontent.com/1153921/101369156-352e2100-3865-11eb-9f0d-b8fac162e034.png) + +See the following two sections for details based on your needs. If you don't need to configure `apps_groups.conf`, jump +down to [visualizing process metrics](#visualize-process-metrics). + +### Standard applications (web servers, databases, containers, and more) + +As explained above, the Netdata Agent is already aware of most standard applications you run on Linux nodes, and you +shouldn't need to configure it to discover them. + +However, if you're using multiple applications that the Netdata Agent groups together you may want to separate them for +more precise monitoring. If you're not running any other types of SQL databases on that node, you don't need to change +the grouping, since you know that any MySQL is the only process contributing to the `sql` group. + +Let's say you're using both MySQL and PostgreSQL databases on a single node, and want to monitor their processes +independently. Open the `apps_groups.conf` file as explained in +the [section above](#configure-the-netdata-agent-to-recognize-a-specific-process) and scroll down until you find +the `database servers` section. Create new groups for MySQL and PostgreSQL, and move their process queries into the +unique groups. + +```text +# ----------------------------------------------------------------------------- +# database servers + +mysql: mysqld* +postgres: postgres* +sql: mariad* postmaster* oracle_* ora_* sqlservr +``` + +Restart Netdata with `sudo systemctl restart netdata`, or +the appropriate method for your system, to start collecting utilization metrics +from your application. Time to [visualize your process metrics](#visualize-process-metrics). + +### Custom applications + +Let's assume you have an application that runs on the process `custom-app`. To monitor eBPF metrics for that application +separate from any others, you need to create a new group in `apps_groups.conf` and associate that process name with it. + +Open the `apps_groups.conf` file as explained in +the [section above](#configure-the-netdata-agent-to-recognize-a-specific-process). Scroll down +to `# NETDATA processes accounting`. +Above that, paste in the following text, which creates a new `custom-app` group with the `custom-app` process. Replace +`custom-app` with the name of your application's Linux process. `apps_groups.conf` should now look like this: + +```text +... +# ----------------------------------------------------------------------------- +# Custom applications to monitor with apps.plugin and ebpf.plugin + +custom-app: custom-app + +# ----------------------------------------------------------------------------- +# NETDATA processes accounting +... +``` + +Restart Netdata with `sudo systemctl restart netdata`, or +the appropriate method for your system, to start collecting utilization metrics +from your application. + +## Visualize process metrics + +Now that you're collecting metrics for your process, you'll want to visualize them using Netdata's real-time, +interactive charts. Find these visualizations in the same section regardless of whether you +use [Netdata Cloud](https://app.netdata.cloud) for infrastructure monitoring, or single-node monitoring with the local +Agent's dashboard at `http://localhost:19999`. + +If you need a refresher on all the available per-process charts, see +the [above list](#per-process-metrics-and-charts-in-netdata). + +### Using Netdata's application collector (`apps.plugin`) + +`apps.plugin` puts all of its charts under the **Applications** section of any Netdata dashboard. + +![Screenshot of the Applications section on a Netdata dashboard](https://user-images.githubusercontent.com/1153921/101401172-2ceadb80-388f-11eb-9e9a-88443894c272.png) + +Let's continue with the MySQL example. We can create a [test +database](https://www.digitalocean.com/community/tutorials/how-to-measure-mysql-query-performance-with-mysqlslap) in +MySQL to generate load on the `mysql` process. + +`apps.plugin` immediately collects and visualizes this activity `apps.cpu` chart, which shows an increase in CPU +utilization from the `sql` group. There is a parallel increase in `apps.pwrites`, which visualizes writes to disk. + +![Per-application CPU utilization metrics](https://user-images.githubusercontent.com/1153921/101409725-8527da80-389b-11eb-96e9-9f401535aafc.png) + +![Per-application disk writing metrics](https://user-images.githubusercontent.com/1153921/101409728-85c07100-389b-11eb-83fd-d79dd1545b5a.png) + +Next, the `mysqlslap` utility queries the database to provide some benchmarking load on the MySQL database. It won't +look exactly like a production database executing lots of user queries, but it gives you an idea into the possibility of +these visualizations. + +```bash +sudo mysqlslap --user=sysadmin --password --host=localhost --concurrency=50 --iterations=10 --create-schema=employees --query="SELECT * FROM dept_emp;" --verbose +``` + +The following per-process disk utilization charts show spikes under the `sql` group at the same time `mysqlslap` was run +numerous times, with slightly different concurrency and query options. + +![Per-application disk metrics](https://user-images.githubusercontent.com/1153921/101411810-d08fb800-389e-11eb-85b3-f3fa41f1f887.png) + +> 💡 Click on any dimension below a chart in Netdata Cloud (or to the right of a chart on a local Agent dashboard), to +> visualize only that dimension. This can be particularly useful in process monitoring to separate one process' +> utilization from the rest of the system. + +### Using Netdata's eBPF collector (`ebpf.plugin`) + +Netdata's eBPF collector puts its charts in two places. Of most importance to process monitoring are the **ebpf file**, +**ebpf syscall**, **ebpf process**, and **ebpf net** sub-sections under **Applications**, shown in the above screenshot. + +For example, running the above workload shows the entire "story" how MySQL interacts with the Linux kernel to open +processes/threads to handle a large number of SQL queries, then subsequently close the tasks as each query returns the +relevant data. + +![Per-process eBPF charts](https://user-images.githubusercontent.com/1153921/101412395-c8844800-389f-11eb-86d2-20c8a0f7b3c0.png) + +`ebpf.plugin` visualizes additional eBPF metrics, which are system-wide and not per-process, under the **eBPF** section. + + diff --git a/docs/developer-and-contributor-corner/python-collector.md b/docs/developer-and-contributor-corner/python-collector.md deleted file mode 100644 index 0b7aa96a6..000000000 --- a/docs/developer-and-contributor-corner/python-collector.md +++ /dev/null @@ -1,626 +0,0 @@ -# Develop a custom data collector in Python - -The Netdata Agent uses [data collectors](/src/collectors/README.md) to -fetch metrics from hundreds of system, container, and service endpoints. While the Netdata team and community has built -[powerful collectors](/src/collectors/COLLECTORS.md) for most system, container, -and service/application endpoints, some custom applications can't be monitored by default. - -In this tutorial, you'll learn how to leverage the [Python programming language](https://www.python.org/) to build a -custom data collector for the Netdata Agent. Follow along with your own dataset, using the techniques and best practices -covered here, or use the included examples for collecting and organizing either random or weather data. - -## Disclaimer - -If you're comfortable with Golang, consider instead writing a module for the [go.d.plugin](https://github.com/netdata/go.d.plugin). -Golang is more performant, easier to maintain, and simpler for users since it doesn't require a particular runtime on the node to -execute. Python plugins require Python on the machine to be executed. Netdata uses Go as the platform of choice for -production-grade collectors. - -We generally do not accept contributions of Python modules to the GitHub project netdata/netdata. If you write a Python collector and -want to make it available for other users, you should create the pull request in https://github.com/netdata/community. - -## What you need to get started - - - A physical or virtual Linux system, which we'll call a _node_. - - A working [installation of Netdata](/packaging/installer/README.md) monitoring agent. - -### Quick start - -For a quick start, you can look at the -[example plugin](https://raw.githubusercontent.com/netdata/netdata/master/src/collectors/python.d.plugin/example/example.chart.py). - -**Note**: If you are working 'locally' on a new collector and would like to run it in an already installed and running -Netdata (as opposed to having to install Netdata from source again with your new changes) you can copy over the relevant -file to where Netdata expects it and then either `sudo systemctl restart netdata` to have it be picked up and used by -Netdata or you can just run the updated collector in debug mode by following a process like below (this assumes you have -[installed Netdata from a GitHub fork](/packaging/installer/methods/manual.md) you -have made to do your development on). - -```bash -# clone your fork (done once at the start but shown here for clarity) -#git clone --branch my-example-collector https://github.com/mygithubusername/netdata.git --depth=100 --recursive -# go into your netdata source folder -cd netdata -# git pull your latest changes (assuming you built from a fork you are using to develop on) -git pull -# instead of running the installer we can just copy over the updated collector files -#sudo ./netdata-installer.sh --dont-wait -# copy over the file you have updated locally (pretending we are working on the 'example' collector) -sudo cp collectors/python.d.plugin/example/example.chart.py /usr/libexec/netdata/python.d/ -# become user netdata -sudo su -s /bin/bash netdata -# run your updated collector in debug mode to see if it works without having to reinstall netdata -/usr/libexec/netdata/plugins.d/python.d.plugin example debug trace nolock -``` - -## Jobs and elements of a Python collector - -A Python collector for Netdata is a Python script that gathers data from an external source and transforms these data -into charts to be displayed by Netdata dashboard. The basic jobs of the plugin are: - -- Gather the data from the service/application. -- Create the required charts. -- Parse the data to extract or create the actual data to be represented. -- Assign the correct values to the charts -- Set the order for the charts to be displayed. -- Give the charts data to Netdata for visualization. - -The basic elements of a Netdata collector are: - -- `ORDER[]`: A list containing the charts to be displayed. -- `CHARTS{}`: A dictionary containing the details for the charts to be displayed. -- `data{}`: A dictionary containing the values to be displayed. -- `get_data()`: The basic function of the plugin which will return to Netdata the correct values. - -**Note**: All names are better explained in the -[External Plugins Documentation](/src/collectors/plugins.d/README.md). -Parameters like `priority` and `update_every` mentioned in that documentation are handled by the `python.d.plugin`, -not by each collection module. - -Let's walk through these jobs and elements as independent elements first, then apply them to example Python code. - -### Determine how to gather metrics data - -Netdata can collect data from any program that can print to stdout. Common input sources for collectors can be logfiles, -HTTP requests, executables, and more. While this tutorial will offer some example inputs, your custom application will -have different inputs and metrics. - -A great deal of the work in developing a Netdata collector is investigating the target application and understanding -which metrics it exposes and how to - -### Create charts - -For the data to be represented in the Netdata dashboard, you need to create charts. Charts (in general) are defined by -several characteristics: title, legend, units, type, and presented values. Each chart is represented as a dictionary -entry: - -```python -chart= { - "chart_name": - { - "options": [option_list], - "lines": [ - [dimension_list] - ] - } - } -``` - -Use the `options` field to set the chart's options, which is a list in the form `options: [name, title, units, family, -context, charttype]`, where: - -- `name`: The name of the chart. -- `title` : The title to be displayed in the chart. -- `units` : The units for this chart. -- `family`: An identifier used to group charts together (can be null). -- `context`: An identifier used to group contextually similar charts together. The best practice is to provide a context - that is `A.B`, with `A` being the name of the collector, and `B` being the name of the specific metric. -- `charttype`: Either `line`, `area`, or `stacked`. If null line is the default value. - -You can read more about `family` and `context` in the [web dashboard](/src/web/README.md#families) doc. - -Once the chart has been defined, you should define the dimensions of the chart. Dimensions are basically the metrics to -be represented in this chart and each chart can have more than one dimension. In order to define the dimensions, the -"lines" list should be filled in with the required dimensions. Each dimension is a list: - -`dimension: [id, name, algorithm, multiplier, divisor]` -- `id` : The id of the dimension. Mandatory unique field (string) required in order to set a value. -- `name`: The name to be presented in the chart. If null id will be used. -- `algorithm`: Can be absolute or incremental. If null absolute is used. Incremental shows the difference from the - previous value. -- `multiplier`: an integer value to divide the collected value, if null, 1 is used -- `divisor`: an integer value to divide the collected value, if null, 1 is used - -The multiplier/divisor fields are used in cases where the value to be displayed should be decimal since Netdata only -gathers integer values. - -### Parse the data to extract or create the actual data to be represented - -Once the data is received, your collector should process it in order to get the values required. If, for example, the -received data is a JSON string, you should parse the data to get the required data to be used for the charts. - -### Assign the correct values to the charts - -Once you have process your data and get the required values, you need to assign those values to the charts you created. -This is done using the `data` dictionary, which is in the form: - -`"data": {dimension_id: value }`, where: -- `dimension_id`: The id of a defined dimension in a created chart. -- `value`: The numerical value to associate with this dimension. - -### Set the order for the charts to be displayed - -Next, set the order of chart appearance with the `ORDER` list, which is in the form: - -`"ORDER": [chart_name_1,chart_name_2, …., chart_name_X]`, where: -- `chart_name_x`: is the chart name to be shown in X order. - -### Give the charts data to Netdata for visualization - -Our plugin should just rerun the data dictionary. If everything is set correctly the charts should be updated with the -correct values. - -## Framework classes - -Every module needs to implement its own `Service` class. This class should inherit from one of the framework classes: - -- `SimpleService` -- `UrlService` -- `SocketService` -- `LogService` -- `ExecutableService` - -Also it needs to invoke the parent class constructor in a specific way as well as assign global variables to class variables. - -For example, the snippet below is from the -[RabbitMQ collector](https://github.com/netdata/netdata/blob/91f3268e9615edd393bd43de4ad8068111024cc9/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py#L273). -This collector uses an HTTP endpoint and uses the `UrlService` framework class, which only needs to define an HTTP -endpoint for data collection. - -```python -class Service(UrlService): - def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.url = '{0}://{1}:{2}'.format( - configuration.get('scheme', 'http'), - configuration.get('host', '127.0.0.1'), - configuration.get('port', 15672), - ) - self.node_name = str() - self.vhost = VhostStatsBuilder() - self.collected_vhosts = set() - self.collect_queues_metrics = configuration.get('collect_queues_metrics', False) - self.debug("collect_queues_metrics is {0}".format("enabled" if self.collect_queues_metrics else "disabled")) - if self.collect_queues_metrics: - self.queue = QueueStatsBuilder() - self.collected_queues = set() -``` - -In our use-case, we use the `SimpleService` framework, since there is no framework class that suits our needs. - -You can find below the [framework class reference](#framework-class-reference). - -## An example collector using weather station data - -Let's build a custom Python collector for visualizing data from a weather monitoring station. - -### Determine how to gather metrics data - -This example assumes you can gather metrics data through HTTP requests to a web server, and that the data provided are -numeric values for temperature, humidity and pressure. It also assumes you can get the `min`, `max`, and `average` -values for these metrics. - -### Chart creation - -First, create a single chart that shows the latest temperature metric: - -```python -CHARTS = { - "temp_current": { - "options": ["my_temp", "Temperature", "Celsius", "TEMP", "weather_station.temperature", "line"], - "lines": [ - ["current_temp_id","current_temperature"] - ] - } -} -``` - -## Parse the data to extract or create the actual data to be represented - -Every collector must implement `_get_data`. This method should grab raw data from `_get_raw_data`, -parse it, and return a dictionary where keys are unique dimension names, or `None` if no data is collected. - -For example: -```py -def _get_data(self): - try: - raw = self._get_raw_data().split(" ") - return {'active': int(raw[2])} - except (ValueError, AttributeError): - return None -``` - -In our weather data collector we declare `_get_data` as follows: - -```python - def get_data(self): - #The data dict is basically all the values to be represented - # The entries are in the format: { "dimension": value} - #And each "dimension" should belong to a chart. - data = dict() - - self.populate_data() - - data['current_temperature'] = self.weather_data["temp"] - - return data -``` - -A standard practice would be to either get the data on JSON format or transform them to JSON format. We use a dictionary -to give this format and issue random values to simulate received data. - -The following code iterates through the names of the expected values and creates a dictionary with the name of the value -as `key`, and a random value as `value`. - -```python - weather_data=dict() - weather_metrics=[ - "temp","av_temp","min_temp","max_temp", - "humid","av_humid","min_humid","max_humid", - "pressure","av_pressure","min_pressure","max_pressure", - ] - - def populate_data(self): - for metric in self.weather_metrics: - self.weather_data[metric]=random.randint(0,100) -``` - -### Assign the correct values to the charts - -Our chart has a dimension called `current_temp_id`, which should have the temperature value received. - -```python -data['current_temp_id'] = self.weather_data["temp"] -``` - -### Set the order for the charts to be displayed - -```python -ORDER = [ - "temp_current" -] -``` - -### Give the charts data to Netdata for visualization - -```python -return data -``` - -A snapshot of the chart created by this plugin: - -![A snapshot of the chart created by this plugin](https://i.imgur.com/2tR9KvF.png) - -Here's the current source code for the data collector: - -```python -# -*- coding: utf-8 -*- -# Description: howto weather station netdata python.d module -# Author: Panagiotis Papaioannou (papajohn-uop) -# SPDX-License-Identifier: GPL-3.0-or-later - -from bases.FrameworkServices.SimpleService import SimpleService - -import random - -NETDATA_UPDATE_EVERY=1 -priority = 90000 - -ORDER = [ - "temp_current" -] - -CHARTS = { - "temp_current": { - "options": ["my_temp", "Temperature", "Celsius", "TEMP", "weather_station.temperature", "line"], - "lines": [ - ["current_temperature"] - ] - } -} - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - #values to show at graphs - self.values=dict() - - @staticmethod - def check(): - return True - - weather_data=dict() - weather_metrics=[ - "temp","av_temp","min_temp","max_temp", - "humid","av_humid","min_humid","max_humid", - "pressure","av_pressure","min_pressure","max_pressure", - ] - - def logMe(self,msg): - self.debug(msg) - - def populate_data(self): - for metric in self.weather_metrics: - self.weather_data[metric]=random.randint(0,100) - - def get_data(self): - #The data dict is basically all the values to be represented - # The entries are in the format: { "dimension": value} - #And each "dimension" should belong to a chart. - data = dict() - - self.populate_data() - - data['current_temperature'] = self.weather_data["temp"] - - return data -``` - -## Add more charts to the existing weather station collector - -To enrich the example, add another chart the collector which to present the humidity metric. - -Add a new entry in the `CHARTS` dictionary with the definition for the new chart. - -```python -CHARTS = { - 'temp_current': { - 'options': ['my_temp', 'Temperature', 'Celsius', 'TEMP', 'weather_station.temperature', 'line'], - 'lines': [ - ['current_temperature'] - ] - }, - 'humid_current': { - 'options': ['my_humid', 'Humidity', '%', 'HUMIDITY', 'weather_station.humidity', 'line'], - 'lines': [ - ['current_humidity'] - ] - } -} -``` - -The data has already been created and parsed by the `weather_data=dict()` function, so you only need to populate the -`current_humidity` dimension `self.weather_data["humid"]`. - -```python - data['current_temperature'] = self.weather_data["temp"] - data['current_humidity'] = self.weather_data["humid"] -``` - -Next, put the new `humid_current` chart into the `ORDER` list: - -```python -ORDER = [ - 'temp_current', - 'humid_current' -] -``` - -[Restart Netdata](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) with `sudo systemctl restart netdata` to see the new humidity -chart: - -![A snapshot of the modified chart](https://i.imgur.com/XOeCBmg.png) - -Next, time to add one more chart that visualizes the average, minimum, and maximum temperature values. - -Add a new entry in the `CHARTS` dictionary with the definition for the new chart. Since you want three values -represented in this this chart, add three dimensions. You should also use the same `FAMILY` value in the charts (`TEMP`) -so that those two charts are grouped together. - -```python -CHARTS = { - 'temp_current': { - 'options': ['my_temp', 'Temperature', 'Celsius', 'TEMP', 'weather_station.temperature', 'line'], - 'lines': [ - ['current_temperature'] - ] - }, - 'temp_stats': { - 'options': ['stats_temp', 'Temperature', 'Celsius', 'TEMP', 'weather_station.temperature_stats', 'line'], - 'lines': [ - ['min_temperature'], - ['max_temperature'], - ['avg_temperature'] - ] - }, - 'humid_current': { - 'options': ['my_humid', 'Humidity', '%', 'HUMIDITY', 'weather_station.humidity', 'line'], - 'lines': [ - ['current_humidity'] - ] - } - -} -``` - -As before, initiate new dimensions and add data to them: - -```python - data['current_temperature'] = self.weather_data["temp"] - data['min_temperature'] = self.weather_data["min_temp"] - data['max_temperature'] = self.weather_data["max_temp"] - data['avg_temperature`'] = self.weather_data["av_temp"] - data['current_humidity'] = self.weather_data["humid"] -``` - -Finally, set the order for the `temp_stats` chart: - -```python -ORDER = [ - 'temp_current', - ‘temp_stats’ - 'humid_current' -] -``` - -[Restart Netdata](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) with `sudo systemctl restart netdata` to see the new -min/max/average temperature chart with multiple dimensions: - -![A snapshot of the modified chart](https://i.imgur.com/g7E8lnG.png) - -## Add a configuration file - -The last piece of the puzzle to create a fully robust Python collector is the configuration file. Python.d uses -configuration in [YAML](https://www.tutorialspoint.com/yaml/yaml_basics.htm) format and is used as follows: - -- Create a configuration file in the same directory as the `.chart.py`. Name it `.conf`. -- Define a `job`, which is an instance of the collector. It is useful when you want to collect data from different - sources with different attributes. For example, we could gather data from 2 different weather stations, which use - different temperature measures: Fahrenheit and Celsius. -- You can define many different jobs with the same name, but with different attributes. Netdata will try each job - serially and will stop at the first job that returns data. If multiple jobs have the same name, only one of them can - run. This enables you to define different "ways" to fetch data from a particular data source so that the collector has - more chances to work out-of-the-box. For example, if the data source supports both `HTTP` and `linux socket`, you can - define 2 jobs named `local`, with each using a different method. -- Check the `example` collector configuration file on - [GitHub](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/example.conf) to get a - sense of the structure. - -```yaml -weather_station_1: - name: 'Greece' - endpoint: 'https://endpoint_1.com' - port: 67 - type: 'celsius' -weather_station_2: - name: 'Florida USA' - endpoint: 'https://endpoint_2.com' - port: 67 - type: 'fahrenheit' -``` - -Next, access the above configuration variables in the `__init__` function: - -```python -def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.endpoint = self.configuration.get('endpoint', ) -``` - -Because you initiate the `framework class` (e.g `SimpleService.__init__`), the configuration will be available -throughout the whole `Service` class of your module, as `self.configuration`. Finally, note that the `configuration.get` -function takes 2 arguments, one with the name of the configuration field and one with a default value in case it doesn't -find the configuration field. This allows you to define sane defaults for your collector. - -Moreover, when creating the configuration file, create a large comment section that describes the configuration -variables and inform the user about the defaults. For example, take a look at the `example` collector on -[GitHub](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/example.conf). - -You can read more about the configuration file on the [`python.d.plugin` -documentation](/src/collectors/python.d.plugin/README.md). - -You can find the source code for the above examples on [GitHub](https://github.com/papajohn-uop/netdata). - -## Pull Request Checklist for Python Plugins - -Pull requests should be created in https://github.com/netdata/community. - -This is a generic checklist for submitting a new Python plugin for Netdata. It is by no means comprehensive. - -At minimum, to be buildable and testable, the PR needs to include: - -- The module itself, following proper naming conventions: `collectors/python.d.plugin//.chart.py` -- A README.md file for the plugin under `collectors/python.d.plugin/`. -- The configuration file for the module: `collectors/python.d.plugin//.conf`. Python config files are in YAML format, and should include comments describing what options are present. The instructions are also needed in the configuration section of the README.md -- A basic configuration for the plugin in the appropriate global config file: `collectors/python.d.plugin/python.d.conf`, which is also in YAML format. Either add a line that reads `# : yes` if the module is to be enabled by default, or one that reads `: no` if it is to be disabled by default. -- A makefile for the plugin at `collectors/python.d.plugin//Makefile.inc`. Check an existing plugin for what this should look like. -- A line in `collectors/python.d.plugin/Makefile.am` including the above-mentioned makefile. Place it with the other plugin includes (please keep the includes sorted alphabetically). -- Optionally, chart information in `src/web/gui/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts. -- Optionally, some default alert configurations for your collector in `health/health.d/.conf` and a line adding `.conf` in `health/Makefile.am`. - -## Framework class reference - -Every framework class has some user-configurable variables which are specific to this particular class. Those variables should have default values initialized in the child class constructor. - -If module needs some additional user-configurable variable, it can be accessed from the `self.configuration` list and assigned in constructor or custom `check` method. Example: - -```py -def __init__(self, configuration=None, name=None): - UrlService.__init__(self, configuration=configuration, name=name) - try: - self.baseurl = str(self.configuration['baseurl']) - except (KeyError, TypeError): - self.baseurl = "http://localhost:5001" -``` - -Classes implement `_get_raw_data` which should be used to grab raw data. This method usually returns a list of strings. - -### `SimpleService` - -This is last resort class, if a new module cannot be written by using other framework class this one can be used. - -Example: `ceph`, `sensors` - -It is the lowest-level class which implements most of module logic, like: - -- threading -- handling run times -- chart formatting -- logging -- chart creation and updating - -### `LogService` - -Examples: `apache_cache`, `nginx_log`_ - -Variable from config file: `log_path`. - -Object created from this class reads new lines from file specified in `log_path` variable. It will check if file exists and is readable. Also `_get_raw_data` returns list of strings where each string is one line from file specified in `log_path`. - -### `ExecutableService` - -Examples: `exim`, `postfix`_ - -Variable from config file: `command`. - -This allows to execute a shell command in a secure way. It will check for invalid characters in `command` variable and won't proceed if there is one of: - -- '&' -- '|' -- ';' -- '>' -- '\<' - -For additional security it uses python `subprocess.Popen` (without `shell=True` option) to execute command. Command can be specified with absolute or relative name. When using relative name, it will try to find `command` in `PATH` environment variable as well as in `/sbin` and `/usr/sbin`. - -`_get_raw_data` returns list of decoded lines returned by `command`. - -### UrlService - -Examples: `apache`, `nginx`, `tomcat`_ - -Variables from config file: `url`, `user`, `pass`. - -If data is grabbed by accessing service via HTTP protocol, this class can be used. It can handle HTTP Basic Auth when specified with `user` and `pass` credentials. - -Please note that the config file can use different variables according to the specification of each module. - -`_get_raw_data` returns list of utf-8 decoded strings (lines). - -### SocketService - -Examples: `dovecot`, `redis` - -Variables from config file: `unix_socket`, `host`, `port`, `request`. - -Object will try execute `request` using either `unix_socket` or TCP/IP socket with combination of `host` and `port`. This can access unix sockets with SOCK_STREAM or SOCK_DGRAM protocols and TCP/IP sockets in version 4 and 6 with SOCK_STREAM setting. - -Sockets are accessed in non-blocking mode with 15 second timeout. - -After every execution of `_get_raw_data` socket is closed, to prevent this module needs to set `_keep_alive` variable to `True` and implement custom `_check_raw_data` method. - -`_check_raw_data` should take raw data and return `True` if all data is received otherwise it should return `False`. Also it should do it in fast and efficient way. diff --git a/docs/developer-and-contributor-corner/python-collector.txt b/docs/developer-and-contributor-corner/python-collector.txt new file mode 100644 index 000000000..f846b347b --- /dev/null +++ b/docs/developer-and-contributor-corner/python-collector.txt @@ -0,0 +1,629 @@ +# Develop a custom data collector in Python + +The Netdata Agent uses [data collectors](/src/collectors/README.md) to +fetch metrics from hundreds of system, container, and service endpoints. While the Netdata team and community has built +[powerful collectors](/src/collectors/COLLECTORS.md) for most system, container, +and service/application endpoints, some custom applications can't be monitored by default. + +In this tutorial, you'll learn how to leverage the [Python programming language](https://www.python.org/) to build a +custom data collector for the Netdata Agent. Follow along with your own dataset, using the techniques and best practices +covered here, or use the included examples for collecting and organizing either random or weather data. + +## Disclaimer + +If you're comfortable with Golang, consider instead writing a module for the [go.d.plugin](https://github.com/netdata/go.d.plugin). +Golang is more performant, easier to maintain, and simpler for users since it doesn't require a particular runtime on the node to +execute. Python plugins require Python on the machine to be executed. Netdata uses Go as the platform of choice for +production-grade collectors. + +We generally do not accept contributions of Python modules to the GitHub project netdata/netdata. If you write a Python collector and +want to make it available for other users, you should create the pull request in . + +## What you need to get started + +- A physical or virtual Linux system, which we'll call a _node_. +- A working [installation of Netdata](/packaging/installer/README.md) monitoring agent. + +### Quick start + +For a quick start, you can look at the +[example plugin](https://raw.githubusercontent.com/netdata/netdata/master/src/collectors/python.d.plugin/example/example.chart.py). + +**Note**: If you are working 'locally' on a new collector and would like to run it in an already installed and running +Netdata (as opposed to having to install Netdata from source again with your new changes) you can copy over the relevant +file to where Netdata expects it and then either `sudo systemctl restart netdata` to have it be picked up and used by +Netdata or you can just run the updated collector in debug mode by following a process like below (this assumes you have +[installed Netdata from a GitHub fork](/packaging/installer/methods/manual.md) you +have made to do your development on). + +```bash +# clone your fork (done once at the start but shown here for clarity) +#git clone --branch my-example-collector https://github.com/mygithubusername/netdata.git --depth=100 --recursive +# go into your netdata source folder +cd netdata +# git pull your latest changes (assuming you built from a fork you are using to develop on) +git pull +# instead of running the installer we can just copy over the updated collector files +#sudo ./netdata-installer.sh --dont-wait +# copy over the file you have updated locally (pretending we are working on the 'example' collector) +sudo cp collectors/python.d.plugin/example/example.chart.py /usr/libexec/netdata/python.d/ +# become user netdata +sudo su -s /bin/bash netdata +# run your updated collector in debug mode to see if it works without having to reinstall netdata +/usr/libexec/netdata/plugins.d/python.d.plugin example debug trace nolock +``` + +## Jobs and elements of a Python collector + +A Python collector for Netdata is a Python script that gathers data from an external source and transforms these data +into charts to be displayed by Netdata dashboard. The basic jobs of the plugin are: + +- Gather the data from the service/application. +- Create the required charts. +- Parse the data to extract or create the actual data to be represented. +- Assign the correct values to the charts +- Set the order for the charts to be displayed. +- Give the charts data to Netdata for visualization. + +The basic elements of a Netdata collector are: + +- `ORDER[]`: A list containing the charts to be displayed. +- `CHARTS{}`: A dictionary containing the details for the charts to be displayed. +- `data{}`: A dictionary containing the values to be displayed. +- `get_data()`: The basic function of the plugin which will return to Netdata the correct values. + +**Note**: All names are better explained in the +[External Plugins Documentation](/src/plugins.d/README.md). +Parameters like `priority` and `update_every` mentioned in that documentation are handled by the `python.d.plugin`, +not by each collection module. + +Let's walk through these jobs and elements as independent elements first, then apply them to example Python code. + +### Determine how to gather metrics data + +Netdata can collect data from any program that can print to stdout. Common input sources for collectors can be log files, +HTTP requests, executables, and more. While this tutorial will offer some example inputs, your custom application will +have different inputs and metrics. + +A great deal of the work in developing a Netdata collector is investigating the target application and understanding +which metrics it exposes and how to + +### Create charts + +For the data to be represented in the Netdata dashboard, you need to create charts. Charts (in general) are defined by +several characteristics: title, legend, units, type, and presented values. Each chart is represented as a dictionary +entry: + +```python +chart= { + "chart_name": + { + "options": [option_list], + "lines": [ + [dimension_list] + ] + } + } +``` + +Use the `options` field to set the chart's options, which is a list in the form `options: [name, title, units, family, +context, charttype]`, where: + +- `name`: The name of the chart. +- `title` : The title to be displayed in the chart. +- `units` : The units for this chart. +- `family`: An identifier used to group charts together (can be null). +- `context`: An identifier used to group contextually similar charts together. The best practice is to provide a context + that is `A.B`, with `A` being the name of the collector, and `B` being the name of the specific metric. +- `charttype`: Either `line`, `area`, or `stacked`. If null line is the default value. + +You can read more about `family` and `context` in the [Netdata Charts](/docs/dashboards-and-charts/netdata-charts.md) doc. + +Once the chart has been defined, you should define the dimensions of the chart. Dimensions are basically the metrics to +be represented in this chart and each chart can have more than one dimension. In order to define the dimensions, the +"lines" list should be filled in with the required dimensions. Each dimension is a list: + +`dimension: [id, name, algorithm, multiplier, divisor]` + +- `id` : The id of the dimension. Mandatory unique field (string) required in order to set a value. +- `name`: The name to be presented in the chart. If null id will be used. +- `algorithm`: Can be absolute or incremental. If null absolute is used. Incremental shows the difference from the + previous value. +- `multiplier`: an integer value to divide the collected value, if null, 1 is used +- `divisor`: an integer value to divide the collected value, if null, 1 is used + +The multiplier/divisor fields are used in cases where the value to be displayed should be decimal since Netdata only +gathers integer values. + +### Parse the data to extract or create the actual data to be represented + +Once the data is received, your collector should process it in order to get the values required. If, for example, the +received data is a JSON string, you should parse the data to get the required data to be used for the charts. + +### Assign the correct values to the charts + +Once you have process your data and get the required values, you need to assign those values to the charts you created. +This is done using the `data` dictionary, which is in the form: + +`"data": {dimension_id: value }`, where: + +- `dimension_id`: The id of a defined dimension in a created chart. +- `value`: The numerical value to associate with this dimension. + +### Set the order for the charts to be displayed + +Next, set the order of chart appearance with the `ORDER` list, which is in the form: + +`"ORDER": [chart_name_1,chart_name_2, …., chart_name_X]`, where: + +- `chart_name_x`: is the chart name to be shown in X order. + +### Give the charts data to Netdata for visualization + +Our plugin should just rerun the data dictionary. If everything is set correctly the charts should be updated with the +correct values. + +## Framework classes + +Every module needs to implement its own `Service` class. This class should inherit from one of the framework classes: + +- `SimpleService` +- `UrlService` +- `SocketService` +- `LogService` +- `ExecutableService` + +Also it needs to invoke the parent class constructor in a specific way as well as assign global variables to class variables. + +For example, the snippet below is from the +[RabbitMQ collector](https://github.com/netdata/netdata/blob/91f3268e9615edd393bd43de4ad8068111024cc9/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py#L273). +This collector uses an HTTP endpoint and uses the `UrlService` framework class, which only needs to define an HTTP +endpoint for data collection. + +```python +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.url = '{0}://{1}:{2}'.format( + configuration.get('scheme', 'http'), + configuration.get('host', '127.0.0.1'), + configuration.get('port', 15672), + ) + self.node_name = str() + self.vhost = VhostStatsBuilder() + self.collected_vhosts = set() + self.collect_queues_metrics = configuration.get('collect_queues_metrics', False) + self.debug("collect_queues_metrics is {0}".format("enabled" if self.collect_queues_metrics else "disabled")) + if self.collect_queues_metrics: + self.queue = QueueStatsBuilder() + self.collected_queues = set() +``` + +In our use-case, we use the `SimpleService` framework, since there is no framework class that suits our needs. + +You can find below the [framework class reference](#framework-class-reference). + +## An example collector using weather station data + +Let's build a custom Python collector for visualizing data from a weather monitoring station. + +### Determine how to gather metrics data + +This example assumes you can gather metrics data through HTTP requests to a web server, and that the data provided are +numeric values for temperature, humidity and pressure. It also assumes you can get the `min`, `max`, and `average` +values for these metrics. + +### Chart creation + +First, create a single chart that shows the latest temperature metric: + +```python +CHARTS = { + "temp_current": { + "options": ["my_temp", "Temperature", "Celsius", "TEMP", "weather_station.temperature", "line"], + "lines": [ + ["current_temp_id","current_temperature"] + ] + } +} +``` + +## Parse the data to extract or create the actual data to be represented + +Every collector must implement `_get_data`. This method should grab raw data from `_get_raw_data`, +parse it, and return a dictionary where keys are unique dimension names, or `None` if no data is collected. + +For example: + +```py +def _get_data(self): + try: + raw = self._get_raw_data().split(" ") + return {'active': int(raw[2])} + except (ValueError, AttributeError): + return None +``` + +In our weather data collector we declare `_get_data` as follows: + +```python + def get_data(self): + #The data dict is basically all the values to be represented + # The entries are in the format: { "dimension": value} + #And each "dimension" should belong to a chart. + data = dict() + + self.populate_data() + + data['current_temperature'] = self.weather_data["temp"] + + return data +``` + +A standard practice would be to either get the data on JSON format or transform them to JSON format. We use a dictionary +to give this format and issue random values to simulate received data. + +The following code iterates through the names of the expected values and creates a dictionary with the name of the value +as `key`, and a random value as `value`. + +```python + weather_data=dict() + weather_metrics=[ + "temp","av_temp","min_temp","max_temp", + "humid","av_humid","min_humid","max_humid", + "pressure","av_pressure","min_pressure","max_pressure", + ] + + def populate_data(self): + for metric in self.weather_metrics: + self.weather_data[metric]=random.randint(0,100) +``` + +### Assign the correct values to the charts + +Our chart has a dimension called `current_temp_id`, which should have the temperature value received. + +```python +data['current_temp_id'] = self.weather_data["temp"] +``` + +### Set the order for the charts to be displayed + +```python +ORDER = [ + "temp_current" +] +``` + +### Give the charts data to Netdata for visualization + +```python +return data +``` + +A snapshot of the chart created by this plugin: + +![A snapshot of the chart created by this plugin](https://i.imgur.com/2tR9KvF.png) + +Here's the current source code for the data collector: + +```python +# -*- coding: utf-8 -*- +# Description: howto weather station netdata python.d module +# Author: Panagiotis Papaioannou (papajohn-uop) +# SPDX-License-Identifier: GPL-3.0-or-later + +from bases.FrameworkServices.SimpleService import SimpleService + +import random + +NETDATA_UPDATE_EVERY=1 +priority = 90000 + +ORDER = [ + "temp_current" +] + +CHARTS = { + "temp_current": { + "options": ["my_temp", "Temperature", "Celsius", "TEMP", "weather_station.temperature", "line"], + "lines": [ + ["current_temperature"] + ] + } +} + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + #values to show at graphs + self.values=dict() + + @staticmethod + def check(): + return True + + weather_data=dict() + weather_metrics=[ + "temp","av_temp","min_temp","max_temp", + "humid","av_humid","min_humid","max_humid", + "pressure","av_pressure","min_pressure","max_pressure", + ] + + def logMe(self,msg): + self.debug(msg) + + def populate_data(self): + for metric in self.weather_metrics: + self.weather_data[metric]=random.randint(0,100) + + def get_data(self): + #The data dict is basically all the values to be represented + # The entries are in the format: { "dimension": value} + #And each "dimension" should belong to a chart. + data = dict() + + self.populate_data() + + data['current_temperature'] = self.weather_data["temp"] + + return data +``` + +## Add more charts to the existing weather station collector + +To enrich the example, add another chart the collector which to present the humidity metric. + +Add a new entry in the `CHARTS` dictionary with the definition for the new chart. + +```python +CHARTS = { + 'temp_current': { + 'options': ['my_temp', 'Temperature', 'Celsius', 'TEMP', 'weather_station.temperature', 'line'], + 'lines': [ + ['current_temperature'] + ] + }, + 'humid_current': { + 'options': ['my_humid', 'Humidity', '%', 'HUMIDITY', 'weather_station.humidity', 'line'], + 'lines': [ + ['current_humidity'] + ] + } +} +``` + +The data has already been created and parsed by the `weather_data=dict()` function, so you only need to populate the +`current_humidity` dimension `self.weather_data["humid"]`. + +```python + data['current_temperature'] = self.weather_data["temp"] + data['current_humidity'] = self.weather_data["humid"] +``` + +Next, put the new `humid_current` chart into the `ORDER` list: + +```python +ORDER = [ + 'temp_current', + 'humid_current' +] +``` + +[Restart Netdata](/docs/netdata-agent/start-stop-restart.md) to see the new humidity +chart: + +![A snapshot of the modified chart](https://i.imgur.com/XOeCBmg.png) + +Next, time to add one more chart that visualizes the average, minimum, and maximum temperature values. + +Add a new entry in the `CHARTS` dictionary with the definition for the new chart. Since you want three values +represented in this this chart, add three dimensions. You should also use the same `FAMILY` value in the charts (`TEMP`) +so that those two charts are grouped together. + +```python +CHARTS = { + 'temp_current': { + 'options': ['my_temp', 'Temperature', 'Celsius', 'TEMP', 'weather_station.temperature', 'line'], + 'lines': [ + ['current_temperature'] + ] + }, + 'temp_stats': { + 'options': ['stats_temp', 'Temperature', 'Celsius', 'TEMP', 'weather_station.temperature_stats', 'line'], + 'lines': [ + ['min_temperature'], + ['max_temperature'], + ['avg_temperature'] + ] + }, + 'humid_current': { + 'options': ['my_humid', 'Humidity', '%', 'HUMIDITY', 'weather_station.humidity', 'line'], + 'lines': [ + ['current_humidity'] + ] + } + +} +``` + +As before, initiate new dimensions and add data to them: + +```python + data['current_temperature'] = self.weather_data["temp"] + data['min_temperature'] = self.weather_data["min_temp"] + data['max_temperature'] = self.weather_data["max_temp"] + data['avg_temperature`'] = self.weather_data["av_temp"] + data['current_humidity'] = self.weather_data["humid"] +``` + +Finally, set the order for the `temp_stats` chart: + +```python +ORDER = [ + 'temp_current', + ‘temp_stats’ + 'humid_current' +] +``` + +[Restart Netdata](/docs/netdata-agent/start-stop-restart.md) to see the new min/max/average temperature chart with multiple dimensions: + +![A snapshot of the modified chart](https://i.imgur.com/g7E8lnG.png) + +## Add a configuration file + +The last piece of the puzzle to create a fully robust Python collector is the configuration file. Python.d uses +configuration in [YAML](https://www.tutorialspoint.com/yaml/yaml_basics.htm) format and is used as follows: + +- Create a configuration file in the same directory as the `.chart.py`. Name it `.conf`. +- Define a `job`, which is an instance of the collector. It is useful when you want to collect data from different + sources with different attributes. For example, we could gather data from 2 different weather stations, which use + different temperature measures: Fahrenheit and Celsius. +- You can define many different jobs with the same name, but with different attributes. Netdata will try each job + serially and will stop at the first job that returns data. If multiple jobs have the same name, only one of them can + run. This enables you to define different "ways" to fetch data from a particular data source so that the collector has + more chances to work out-of-the-box. For example, if the data source supports both `HTTP` and `linux socket`, you can + define 2 jobs named `local`, with each using a different method. +- Check the `example` collector configuration file on + [GitHub](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/example.conf) to get a + sense of the structure. + +```yaml +weather_station_1: + name: 'Greece' + endpoint: 'https://endpoint_1.com' + port: 67 + type: 'celsius' +weather_station_2: + name: 'Florida USA' + endpoint: 'https://endpoint_2.com' + port: 67 + type: 'fahrenheit' +``` + +Next, access the above configuration variables in the `__init__` function: + +```python +def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.endpoint = self.configuration.get('endpoint', ) +``` + +Because you initiate the `framework class` (e.g `SimpleService.__init__`), the configuration will be available +throughout the whole `Service` class of your module, as `self.configuration`. Finally, note that the `configuration.get` +function takes 2 arguments, one with the name of the configuration field and one with a default value in case it doesn't +find the configuration field. This allows you to define sane defaults for your collector. + +Moreover, when creating the configuration file, create a large comment section that describes the configuration +variables and inform the user about the defaults. For example, take a look at the `example` collector on +[GitHub](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/example.conf). + +You can read more about the configuration file on the [`python.d.plugin` +documentation](/src/collectors/python.d.plugin/README.md). + +You can find the source code for the above examples on [GitHub](https://github.com/papajohn-uop/netdata). + +## Pull Request Checklist for Python Plugins + +Pull requests should be created in . + +This is a generic checklist for submitting a new Python plugin for Netdata. It is by no means comprehensive. + +At minimum, to be buildable and testable, the PR needs to include: + +- The module itself, following proper naming conventions: `collectors/python.d.plugin//.chart.py` +- A README.md file for the plugin under `collectors/python.d.plugin/`. +- The configuration file for the module: `collectors/python.d.plugin//.conf`. Python config files are in YAML format, and should include comments describing what options are present. The instructions are also needed in the configuration section of the README.md +- A basic configuration for the plugin in the appropriate global config file: `collectors/python.d.plugin/python.d.conf`, which is also in YAML format. Either add a line that reads `# : yes` if the module is to be enabled by default, or one that reads `: no` if it is to be disabled by default. +- A makefile for the plugin at `collectors/python.d.plugin//Makefile.inc`. Check an existing plugin for what this should look like. +- A line in `collectors/python.d.plugin/Makefile.am` including the above-mentioned makefile. Place it with the other plugin includes (please keep the includes sorted alphabetically). +- Optionally, chart information in `src/web/gui/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts. +- Optionally, some default alert configurations for your collector in `health/health.d/.conf` and a line adding `.conf` in `health/Makefile.am`. + +## Framework class reference + +Every framework class has some user-configurable variables which are specific to this particular class. Those variables should have default values initialized in the child class constructor. + +If module needs some additional user-configurable variable, it can be accessed from the `self.configuration` list and assigned in constructor or custom `check` method. Example: + +```py +def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + try: + self.baseurl = str(self.configuration['baseurl']) + except (KeyError, TypeError): + self.baseurl = "http://localhost:5001" +``` + +Classes implement `_get_raw_data` which should be used to grab raw data. This method usually returns a list of strings. + +### `SimpleService` + +This is last resort class, if a new module cannot be written by using other framework class this one can be used. + +Example: `ceph`, `sensors` + +It is the lowest-level class which implements most of module logic, like: + +- threading +- handling run times +- chart formatting +- logging +- chart creation and updating + +### `LogService` + +Examples: `apache_cache`, `nginx_log`_ + +Variable from config file: `log_path`. + +Object created from this class reads new lines from file specified in `log_path` variable. It will check if file exists and is readable. Also `_get_raw_data` returns list of strings where each string is one line from file specified in `log_path`. + +### `ExecutableService` + +Examples: `exim`, `postfix`_ + +Variable from config file: `command`. + +This allows to execute a shell command in a secure way. It will check for invalid characters in `command` variable and won't proceed if there is one of: + +- '&' +- '|' +- ';' +- '>' +- '\<' + +For additional security it uses python `subprocess.Popen` (without `shell=True` option) to execute command. Command can be specified with absolute or relative name. When using relative name, it will try to find `command` in `PATH` environment variable as well as in `/sbin` and `/usr/sbin`. + +`_get_raw_data` returns list of decoded lines returned by `command`. + +### UrlService + +Examples: `apache`, `nginx`, `tomcat`_ + +Variables from config file: `url`, `user`, `pass`. + +If data is grabbed by accessing service via HTTP protocol, this class can be used. It can handle HTTP Basic Auth when specified with `user` and `pass` credentials. + +Please note that the config file can use different variables according to the specification of each module. + +`_get_raw_data` returns list of utf-8 decoded strings (lines). + +### SocketService + +Examples: `dovecot`, `redis` + +Variables from config file: `unix_socket`, `host`, `port`, `request`. + +Object will try execute `request` using either `unix_socket` or TCP/IP socket with combination of `host` and `port`. This can access unix sockets with SOCK_STREAM or SOCK_DGRAM protocols and TCP/IP sockets in version 4 and 6 with SOCK_STREAM setting. + +Sockets are accessed in non-blocking mode with 15 second timeout. + +After every execution of `_get_raw_data` socket is closed, to prevent this module needs to set `_keep_alive` variable to `True` and implement custom `_check_raw_data` method. + +`_check_raw_data` should take raw data and return `True` if all data is received otherwise it should return `False`. Also it should do it in fast and efficient way. diff --git a/docs/developer-and-contributor-corner/raspberry-pi-anomaly-detection.md b/docs/developer-and-contributor-corner/raspberry-pi-anomaly-detection.md deleted file mode 100644 index 41cf007eb..000000000 --- a/docs/developer-and-contributor-corner/raspberry-pi-anomaly-detection.md +++ /dev/null @@ -1,96 +0,0 @@ -# Anomaly detection for RPi monitoring - -Learn how to use a low-overhead machine learning algorithm alongside Netdata to detect anomalous metrics on a Raspberry Pi. - -We love IoT and edge at Netdata, we also love machine learning. Even better if we can combine the two to ease the pain -of monitoring increasingly complex systems. - -We recently explored what might be involved in enabling our Python-based [anomalies -collector](/src/collectors/python.d.plugin/anomalies/README.md) on a Raspberry Pi. To our delight, it's actually quite -straightforward! - -Read on to learn all the steps and enable unsupervised anomaly detection on your on Raspberry Pi(s). - -> Spoiler: It's just a couple of extra commands that will make you feel like a pro. - -## What you need to get started - -- A Raspberry Pi running Raspbian, which we'll call a _node_. -- The [open-source Netdata](https://github.com/netdata/netdata) monitoring agent. If you don't have it installed on your - node yet, [get started now](/packaging/installer/README.md). - -## Install dependencies - -First make sure Netdata is using Python 3 when it runs Python-based data collectors. - -Next, open `netdata.conf` using [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) -from within the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). Scroll down to the -`[plugin:python.d]` section to pass in the `-ppython3` command option. - -```conf -[plugin:python.d] - # update every = 1 - command options = -ppython3 -``` - -Next, install some of the underlying libraries used by the Python packages the collector depends upon. - -```bash -sudo apt install llvm-9 libatlas3-base libgfortran5 libatlas-base-dev -``` - -Now you're ready to install the Python packages used by the collector itself. First, become the `netdata` user. - -```bash -sudo su -s /bin/bash netdata -``` - -Then pass in the location to find `llvm` as an environment variable for `pip3`. - -```bash -LLVM_CONFIG=llvm-config-9 pip3 install --user llvmlite numpy==1.20.1 netdata-pandas==0.0.38 numba==0.50.1 scikit-learn==0.23.2 pyod==0.8.3 -``` - -## Enable the anomalies collector - -Now you're ready to enable the collector and [restart Netdata](/packaging/installer/README.md#maintaining-a-netdata-agent-installation). - -```bash -sudo ./edit-config python.d.conf - -# restart netdata -sudo systemctl restart netdata -``` - -And that should be it! Wait a minute or two, refresh your Netdata dashboard, you should see the default anomalies -charts under the **Anomalies** section in the dashboard's menu. - -![Anomaly detection on the Raspberry -Pi](https://user-images.githubusercontent.com/1153921/110149717-9d749c00-7d9b-11eb-853c-e041a36f0a41.png) - -## Overhead on system - -Of course one of the most important considerations when trying to do anomaly detection at the edge (as opposed to in a -centralized cloud somewhere) is the resource utilization impact of running a monitoring tool. - -With the default configuration, the anomalies collector uses about 6.5% of CPU at each run. During the retraining step, -CPU utilization jumps to between 20-30% for a few seconds, but you can [configure -retraining](/src/collectors/python.d.plugin/anomalies/README.md#configuration) to happen less often if you wish. - -![CPU utilization of anomaly detection on the Raspberry -Pi](https://user-images.githubusercontent.com/1153921/110149718-9d749c00-7d9b-11eb-9af8-46e2032cd1d0.png) - -In terms of the runtime of the collector, it was averaging around 250ms during each prediction step, jumping to about -8-10 seconds during a retraining step. This jump equates only to a small gap in the anomaly charts for a few seconds. - -![Execution time of anomaly detection on the Raspberry -Pi](https://user-images.githubusercontent.com/1153921/110149715-9cdc0580-7d9b-11eb-826d-faf6f620621a.png) - -The last consideration then is the amount of RAM the collector needs to store both the models and some of the data -during training. By default, the anomalies collector, along with all other running Python-based collectors, uses about -100MB of system memory. - -![RAM utilization of anomaly detection on the Raspberry -Pi](https://user-images.githubusercontent.com/1153921/110149720-9e0d3280-7d9b-11eb-883d-b1d4d9b9b5e1.png) - - diff --git a/docs/developer-and-contributor-corner/raspberry-pi-anomaly-detection.txt b/docs/developer-and-contributor-corner/raspberry-pi-anomaly-detection.txt new file mode 100644 index 000000000..9bdacf274 --- /dev/null +++ b/docs/developer-and-contributor-corner/raspberry-pi-anomaly-detection.txt @@ -0,0 +1,96 @@ +# Anomaly detection for RPi monitoring + +Learn how to use a low-overhead machine learning algorithm alongside Netdata to detect anomalous metrics on a Raspberry Pi. + +We love IoT and edge at Netdata, we also love machine learning. Even better if we can combine the two to ease the pain +of monitoring increasingly complex systems. + +We recently explored what might be involved in enabling our Python-based [anomalies +collector](/src/collectors/python.d.plugin/anomalies/README.md) on a Raspberry Pi. To our delight, it's actually quite +straightforward! + +Read on to learn all the steps and enable unsupervised anomaly detection on your on Raspberry Pi(s). + +> Spoiler: It's just a couple of extra commands that will make you feel like a pro. + +## What you need to get started + +- A Raspberry Pi running Raspbian, which we'll call a _node_. +- The [open-source Netdata](https://github.com/netdata/netdata) monitoring agent. If you don't have it installed on your + node yet, [get started now](/packaging/installer/README.md). + +## Install dependencies + +First make sure Netdata is using Python 3 when it runs Python-based data collectors. + +Next, open `netdata.conf` using [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) +from within the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). Scroll down to the +`[plugin:python.d]` section to pass in the `-ppython3` command option. + +```text +[plugin:python.d] + # update every = 1 + command options = -ppython3 +``` + +Next, install some of the underlying libraries used by the Python packages the collector depends upon. + +```bash +sudo apt install llvm-9 libatlas3-base libgfortran5 libatlas-base-dev +``` + +Now you're ready to install the Python packages used by the collector itself. First, become the `netdata` user. + +```bash +sudo su -s /bin/bash netdata +``` + +Then pass in the location to find `llvm` as an environment variable for `pip3`. + +```bash +LLVM_CONFIG=llvm-config-9 pip3 install --user llvmlite numpy==1.20.1 netdata-pandas==0.0.38 numba==0.50.1 scikit-learn==0.23.2 pyod==0.8.3 +``` + +## Enable the anomalies collector + +Now you're ready to enable the collector and restart Netdata. + +```bash +sudo ./edit-config python.d.conf + +# restart netdata +sudo systemctl restart netdata +``` + +And that should be it! Wait a minute or two, refresh your Netdata dashboard, you should see the default anomalies +charts under the **Anomalies** section in the dashboard's menu. + +![Anomaly detection on the Raspberry +Pi](https://user-images.githubusercontent.com/1153921/110149717-9d749c00-7d9b-11eb-853c-e041a36f0a41.png) + +## Overhead on system + +Of course one of the most important considerations when trying to do anomaly detection at the edge (as opposed to in a +centralized cloud somewhere) is the resource utilization impact of running a monitoring tool. + +With the default configuration, the anomalies collector uses about 6.5% of CPU at each run. During the retraining step, +CPU utilization jumps to between 20-30% for a few seconds, but you can [configure +retraining](/src/collectors/python.d.plugin/anomalies/README.md#configuration) to happen less often if you wish. + +![CPU utilization of anomaly detection on the Raspberry +Pi](https://user-images.githubusercontent.com/1153921/110149718-9d749c00-7d9b-11eb-9af8-46e2032cd1d0.png) + +In terms of the runtime of the collector, it was averaging around 250ms during each prediction step, jumping to about +8-10 seconds during a retraining step. This jump equates only to a small gap in the anomaly charts for a few seconds. + +![Execution time of anomaly detection on the Raspberry +Pi](https://user-images.githubusercontent.com/1153921/110149715-9cdc0580-7d9b-11eb-826d-faf6f620621a.png) + +The last consideration then is the amount of RAM the collector needs to store both the models and some of the data +during training. By default, the anomalies collector, along with all other running Python-based collectors, uses about +100MB of system memory. + +![RAM utilization of anomaly detection on the Raspberry +Pi](https://user-images.githubusercontent.com/1153921/110149720-9e0d3280-7d9b-11eb-883d-b1d4d9b9b5e1.png) + + diff --git a/docs/developer-and-contributor-corner/running-through-cf-tunnels.md b/docs/developer-and-contributor-corner/running-through-cf-tunnels.md index 3179d5805..588740bc9 100644 --- a/docs/developer-and-contributor-corner/running-through-cf-tunnels.md +++ b/docs/developer-and-contributor-corner/running-through-cf-tunnels.md @@ -102,7 +102,7 @@ You can edit the configuration file using the `edit-config` script from the Netd destination = tcp:127.0.0.1:19999 ``` -[Restart the Agents](/packaging/installer/README.md#maintaining-a-netdata-agent-installation), and you are done! +[Restart the Agents](/docs/netdata-agent/start-stop-restart.md), and you are done! You should now be able to have a Local Dashboard that gets its metrics from Child instances, running through Cloudflare tunnels. diff --git a/docs/developer-and-contributor-corner/style-guide.md b/docs/developer-and-contributor-corner/style-guide.md index 94656bd76..b64a9df0b 100644 --- a/docs/developer-and-contributor-corner/style-guide.md +++ b/docs/developer-and-contributor-corner/style-guide.md @@ -2,7 +2,7 @@ The _Netdata style guide_ establishes editorial guidelines for any writing produced by the Netdata team or the Netdata community, including documentation, articles, in-product UX copy, and more. -> ### Note +> **Note** > This document is meant to be accompanied by the [Documentation Guidelines](/docs/guidelines.md). If you want to contribute to Netdata's documentation, please read it too. Both internal Netdata teams and external contributors to any of Netdata's open-source projects should reference and adhere to this style guide as much as possible. @@ -30,7 +30,6 @@ you're around. In writing, you reflect tone in your word choice, punctuation, se The same idea about voice and tone applies to organizations, too. Our voice shouldn't change much between two pieces of content, no matter who wrote each, but the tone might be quite different based on who we think is reading. - ### Voice Netdata's voice is authentic, passionate, playful, and respectful. @@ -63,7 +62,7 @@ the [language, grammar, and mechanics](#language-grammar-and-mechanics) section - Would this language make sense to someone who doesn't work here? - Could someone quickly scan this document and understand the material? -- Create an information hierarchy with key information presented first and clearly called out to improve scannability. +- Create an information hierarchy with key information presented first and clearly called out to improve clarity and readability. - Avoid directional language like "sidebar on the right of the page" or "header at the top of the page" since presentation elements may adapt for devices. - Use descriptive links rather than "click here" or "learn more". @@ -236,8 +235,8 @@ must reflect the _current state of [production](https://app.netdata.cloud). Every link should clearly state its destination. Don't use words like "here" to describe where a link will take your reader. -| | | -|-----------------|-----------------------------------------------------------------------------------------------------------------------------------------| +| | | +|-----------------|-------------------------------------------------------------------------------------------| | Not recommended | To install Netdata, click [here](/packaging/installer/README.md). | | **Recommended** | To install Netdata, read the [installation instructions](/packaging/installer/README.md). | @@ -300,9 +299,9 @@ universal. Don't include full paths, beginning from the system's root (`/`), as these might not work on certain systems. -| | | -|-----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Not recommended | Use `edit-config` to edit Netdata's configuration: `sudo /etc/netdata/edit-config netdata.conf`. | +| | | +|-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Not recommended | Use `edit-config` to edit Netdata's configuration: `sudo /etc/netdata/edit-config netdata.conf`. | | **Recommended** | Use `edit-config` to edit Netdata's configuration by first navigating to your [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory), which is typically at `/etc/netdata`, then running `sudo edit-config netdata.conf`. | ### `sudo` @@ -394,27 +393,26 @@ the [Docusaurus documentation](https://v2.docusaurus.io/docs/markdown-features#c Notes inside files should render properly both in GitHub and in Learn, to do that, it is best to use the format listed below: -``` -> ### Note +```md +> **Note** > This is an info or a note block. -> ### Tip, Best Practice +> **Tip, Best Practice** > This is a tip or a best practice block. -> ### Warning, Caution +> **Warning, Caution** > This is a warning or a caution block. ``` Which renders into: - -> ### Note +> **Note** > This is an info or a note block. -> ### Tip, Best Practice +> **Tip, Best Practice** > This is a tip or a best practice block. -> ### Warning, Caution +> **Warning, Caution** > This is a warning or a caution block. ### Tabs @@ -450,21 +448,21 @@ The following tables describe the standard spelling, capitalization, and usage o | Term | Definition | |-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **claimed node** | A node that you've proved ownership of by completing the [connecting to Cloud process](/src/claim/README.md). The claimed node will then appear in your Space and any Rooms you added it to. | +| **Connected Node** | A node that you've proved ownership of by completing the [connecting to Cloud process](/src/claim/README.md). The claimed node will then appear in your Space and any Rooms you added it to. | | **Netdata** | The company behind the open-source Netdata Agent and the Netdata Cloud web application. Never use _netdata_ or _NetData_.

In general, focus on the user's goals, actions, and solutions rather than what the company provides. For example, write _Learn more about enabling alert notifications on your preferred platforms_ instead of _Netdata sends alert notifications to your preferred platforms_. | | **Netdata Agent** | The free and open source [monitoring agent](https://github.com/netdata/netdata) that you can install on all of your distributed systems, whether they're physical, virtual, containerized, ephemeral, and more. The Agent monitors systems running Linux, Docker, Kubernetes, macOS, FreeBSD, and more, and collects metrics from hundreds of popular services and applications. | | **Netdata Cloud** | The web application hosted at [https://app.netdata.cloud](https://app.netdata.cloud) that helps you monitor an entire infrastructure of distributed systems in real time.

Never use _Cloud_ without the preceding _Netdata_ to avoid ambiguity. | | **Netdata community forum** | The Discourse-powered forum for feature requests, Netdata Cloud technical support, and conversations about Netdata's monitoring and troubleshooting products. | -| **node** | A system on which the Netdata Agent is installed. The system can be physical, virtual, in a Docker container, and more. Depending on your infrastructure, you may have one, dozens, or hundreds of nodes. Some nodes are _ephemeral_, in that they're created/destroyed automatically by an orchestrator service. | +| **Node** | A system on which the Netdata Agent is installed. The system can be physical, virtual, in a Docker container, and more. Depending on your infrastructure, you may have one, dozens, or hundreds of nodes. Some nodes are _ephemeral_, in that they're created/destroyed automatically by an orchestrator service. | | **Space** | The highest level container within Netdata Cloud for a user to organize their team members and nodes within their infrastructure. A Space likely represents an entire organization or a large team.

_Space_ is always capitalized. | -| **unreachable node** | A connected node with a disrupted [Agent-Cloud link](/src/aclk/README.md). Unreachable could mean the node no longer exists or is experiencing network connectivity issues with Cloud. | -| **visited node** | A node which has had its Agent dashboard directly visited by a user. A list of these is maintained on a per-user basis. | -| **Room** | A smaller grouping of nodes where users can view key metrics in real-time and monitor the health of many nodes with their alert status. Rooms can be used to organize nodes in any way that makes sense for your infrastructure, such as by a service, purpose, physical location, and more.

_Room_ is always capitalized. | +| **Unreachable node** | A connected node with a disrupted [Agent-Cloud link](/src/aclk/README.md). Unreachable could mean the node no longer exists or is experiencing network connectivity issues with Cloud. | +| **Visited Node** | A node which has had its Agent dashboard directly visited by a user. A list of these is maintained on a per-user basis. | +| **Room** | A smaller grouping of nodes where users can view key metrics in real-time and monitor the health of many nodes with their alert status. Rooms can be used to organize nodes in any way that makes sense for your infrastructure, such as by a service, purpose, physical location, and more.

_Room_ is always capitalized. | ### Other technical terms | Term | Definition | |-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **filesystem** | Use instead of _file system_. | -| **preconfigured** | The concept that many of Netdata's features come with sane defaults that users don't need to configure to find immediate value. | +| **pre-configured** | The concept that many of Netdata's features come with sane defaults that users don't need to configure to find immediate value. | | **real time**/**real-time** | Use _real time_ as a noun phrase, most often with _in_: _Netdata collects metrics in real time_. Use _real-time_ as an adjective: _Netdata collects real-time metrics from hundreds of supported applications and services. | diff --git a/docs/diagrams/netdata-overview.xml b/docs/diagrams/netdata-overview.xml index 16c967e6e..2967f915c 100644 --- a/docs/diagrams/netdata-overview.xml +++ b/docs/diagrams/netdata-overview.xml @@ -78,7 +78,7 @@ - + diff --git a/docs/exporting-metrics/README.md b/docs/exporting-metrics/README.md index d667cea15..24e33ad46 100644 --- a/docs/exporting-metrics/README.md +++ b/docs/exporting-metrics/README.md @@ -3,7 +3,7 @@ Netdata allows you to export metrics to external time-series databases with the [exporting engine](/src/exporting/README.md). This system uses a number of **connectors** to initiate connections to [more than thirty](#supported-databases) supported databases, including InfluxDB, Prometheus, Graphite, ElasticSearch, and much -more. +more. The exporting engine resamples Netdata's thousands of per-second metrics at a user-configurable interval, and can export metrics to multiple time-series databases simultaneously. @@ -22,45 +22,45 @@ Netdata supports exporting metrics to the following databases through several [connectors](/src/exporting/README.md#features). Once you find the connector that works for your database, open its documentation and the [enabling a connector](/docs/exporting-metrics/enable-an-exporting-connector.md) doc for details on enabling it. -- **AppOptics**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **AWS Kinesis**: [AWS Kinesis Data Streams](/src/exporting/aws_kinesis/README.md) -- **Azure Data Explorer**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **Azure Event Hubs**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **Blueflood**: [Graphite](/src/exporting/graphite/README.md) -- **Chronix**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **Cortex**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **CrateDB**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **ElasticSearch**: [Graphite](/src/exporting/graphite/README.md), [Prometheus remote +- **AppOptics**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **AWS Kinesis**: [AWS Kinesis Data Streams](/src/exporting/aws_kinesis/README.md) +- **Azure Data Explorer**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **Azure Event Hubs**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **Blueflood**: [Graphite](/src/exporting/graphite/README.md) +- **Chronix**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **Cortex**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **CrateDB**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **ElasticSearch**: [Graphite](/src/exporting/graphite/README.md), [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **Gnocchi**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **Google BigQuery**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **Google Cloud Pub/Sub**: [Google Cloud Pub/Sub Service](/src/exporting/pubsub/README.md) -- **Graphite**: [Graphite](/src/exporting/graphite/README.md), [Prometheus remote +- **Gnocchi**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **Google BigQuery**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **Google Cloud Pub/Sub**: [Google Cloud Pub/Sub Service](/src/exporting/pubsub/README.md) +- **Graphite**: [Graphite](/src/exporting/graphite/README.md), [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **InfluxDB**: [Graphite](/src/exporting/graphite/README.md), [Prometheus remote +- **InfluxDB**: [Graphite](/src/exporting/graphite/README.md), [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **IRONdb**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **JSON**: [JSON document databases](/src/exporting/json/README.md) -- **Kafka**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **KairosDB**: [Graphite](/src/exporting/graphite/README.md), [OpenTSDB](/src/exporting/opentsdb/README.md) -- **M3DB**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **MetricFire**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **MongoDB**: [MongoDB](/src/exporting/mongodb/README.md) -- **New Relic**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **OpenTSDB**: [OpenTSDB](/src/exporting/opentsdb/README.md), [Prometheus remote +- **IRONdb**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **JSON**: [JSON document databases](/src/exporting/json/README.md) +- **Kafka**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **KairosDB**: [Graphite](/src/exporting/graphite/README.md), [OpenTSDB](/src/exporting/opentsdb/README.md) +- **M3DB**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **MetricFire**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **MongoDB**: [MongoDB](/src/exporting/mongodb/README.md) +- **New Relic**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **OpenTSDB**: [OpenTSDB](/src/exporting/opentsdb/README.md), [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **PostgreSQL**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **PostgreSQL**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) via [PostgreSQL Prometheus Adapter](https://github.com/CrunchyData/postgresql-prometheus-adapter) -- **Prometheus**: [Prometheus scraper](/src/exporting/prometheus/README.md) -- **TimescaleDB**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md), +- **Prometheus**: [Prometheus scraper](/src/exporting/prometheus/README.md) +- **TimescaleDB**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md), [netdata-timescale-relay](/src/exporting/TIMESCALE.md) -- **QuasarDB**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **SignalFx**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **Splunk**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **TiKV**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **Thanos**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **VictoriaMetrics**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) -- **Wavefront**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **QuasarDB**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **SignalFx**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **Splunk**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **TiKV**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **Thanos**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **VictoriaMetrics**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) +- **Wavefront**: [Prometheus remote write](/src/exporting/prometheus/remote_write/README.md) Can't find your preferred external time-series database? Ask our [community](https://community.netdata.cloud/) for solutions, or file an [issue on diff --git a/docs/exporting-metrics/enable-an-exporting-connector.md b/docs/exporting-metrics/enable-an-exporting-connector.md index 6a5542fdb..16fbe0b9b 100644 --- a/docs/exporting-metrics/enable-an-exporting-connector.md +++ b/docs/exporting-metrics/enable-an-exporting-connector.md @@ -19,7 +19,7 @@ Use `edit-config` from your [Netdata config directory](/docs/netdata-agent/confi Enable the exporting engine itself by setting `enabled` to `yes`: -```conf +```text [exporting:global] enabled = yes ``` @@ -30,7 +30,7 @@ Save the file but keep it open, as you will edit it again to enable specific con Use the following configuration as a starting point. Copy and paste it into `exporting.conf`. -```conf +```text [opentsdb:http:my_opentsdb_http_instance] enabled = yes destination = localhost:4242 diff --git a/docs/glossary.md b/docs/glossary.md index bcada6030..78ba18072 100644 --- a/docs/glossary.md +++ b/docs/glossary.md @@ -6,7 +6,7 @@ As such, we want to provide a little Glossary as a reference starting point for If you're here looking for the definition of a term you heard elsewhere in our community or products, or if you just want to learn Netdata from the ground up, you've come to the right page. -Use the alphabatized list below to find the answer to your single-term questions, and click the bolded list items to explore more on the topics! We'll be sure to keep constantly updating this list, so if you hear a word that you would like for us to cover, just let us know or submit a request! +Use the alphabetized list below to find the answer to your single-term questions, and click the bolded list items to explore more on the topics! We'll be sure to keep constantly updating this list, so if you hear a word that you would like for us to cover, just let us know or submit a request! [A](#a) | [B](#b) | [C](#c) | [D](#d)| [E](#e) | [F](#f) | [G](#g) | [H](#h) | [I](#i) | [J](#j) | [K](#k) | [L](#l) | [M](#m) | [N](#n) | [O](#o) | [P](#p) | [Q](#q) | [R](#r) | [S](#s) | [T](#t) | [U](#u) | [V](#v) | [W](#w) | [X](#x) | [Y](#y) | [Z](#z) @@ -53,7 +53,7 @@ Use the alphabatized list below to find the answer to your single-term questions ## E -- [**External Plugins**](/src/collectors/plugins.d/README.md): These gather metrics from external processes, such as a webserver or database, and run as independent processes that communicate with the Netdata daemon via pipes. +- [**External Plugins**](/src/plugins.d/README.md): These gather metrics from external processes, such as a webserver or database, and run as independent processes that communicate with the Netdata daemon via pipes. ## F @@ -65,7 +65,7 @@ Use the alphabatized list below to find the answer to your single-term questions ## G -- [**Group by**](/docs/dashboards-and-charts/netdata-charts.md#group-by-dimension-node-or-chart): The drop-down on the dimension bar of a composite chart that allows you to group metrics by dimension, node, or chart. +- [**Group by**](/docs/dashboards-and-charts/netdata-charts.md#group-by-dropdown): The drop-down on the dimension bar of a composite chart that allows you to group metrics by dimension, node, or chart. - [**Health Configuration Files**](/src/health/REFERENCE.md#edit-health-configuration-files): Files that you can edit to configure your Agent's health watchdog service. @@ -110,7 +110,7 @@ metrics, troubleshoot complex performance problems, and make data interoperable ## O -- [**Obsoletion**(of nodes)](/docs/netdata-cloud/organize-your-infrastructure-invite-your-team.md#obsoleting-offline-nodes-from-a-space): Removing nodes from a space. +- [**Obsoletion**(of nodes)](/docs/dashboards-and-charts/nodes-tab.md): Removing nodes from a space. - [**Orchestrators**](/src/collectors/README.md#collector-architecture-and-terminology): External plugins that run and manage one or more modules. They run as independent processes. @@ -145,8 +145,8 @@ even thousands of nodes. There are no actual bottlenecks especially if you retai ## V -- [**Visualizations**](/docs/category-overview-pages/visualizations-overview.md): Netdata uses dimensions, contexts, and families to sort your metric data into graphs, charts, and alerts that maximize your understand of your infrastructure and your ability to troubleshoot it, along or on a team. +- [**Visualizations**](/docs/dashboards-and-charts/README.md): Netdata uses dimensions, contexts, and families to sort your metric data into graphs, charts, and alerts that maximize your understand of your infrastructure and your ability to troubleshoot it, along or on a team. ## Z -- **Zero Configuration**: Netdata is preconfigured and capable to autodetect and monitor any well known application that runs on your system. You just deploy and claim Netdata Agents in your Netdata space, and monitor them in seconds. +- **Zero Configuration**: Netdata is pre-configured and capable to autodetect and monitor any well known application that runs on your system. You just deploy and claim Netdata Agents in your Netdata space, and monitor them in seconds. diff --git a/docs/guidelines.md b/docs/guidelines.md index b0e6759cc..02e7a386f 100644 --- a/docs/guidelines.md +++ b/docs/guidelines.md @@ -49,7 +49,7 @@ Please ensure that any links to a different documentation resource are fully exp e.g. -``` +```text [Correct link to this document](/docs/guidelines.md) vs [Incorrect link to this document](https://learn.netdata.cloud/XYZ) diff --git a/docs/netdata-agent/README.md b/docs/netdata-agent/README.md index 75bd4898e..8096e911a 100644 --- a/docs/netdata-agent/README.md +++ b/docs/netdata-agent/README.md @@ -1,6 +1,6 @@ # Netdata Agent -The Netdata Agent is the main building block in a Netdata ecosystem. It is installed on all monitored systems to monitor system components, containers and applications. +The Netdata Agent is the main building block in the Netdata ecosystem. It is installed on all monitored systems to monitor system components, containers and applications. The Netdata Agent is an **observability pipeline in a box** that can either operate standalone, or blend into a bigger pipeline made by more Netdata Agents (Children and Parents). @@ -53,7 +53,7 @@ stateDiagram-v2 1. **Discover**: auto-detect metric sources on localhost, auto-discover metric sources on Kubernetes. 2. **Collect**: query data sources to collect metric samples, using the optimal protocol for each data source. 800+ integrations supported, including dozens of native application protocols, OpenMetrics and StatsD. -3. **Detect Anomalies**: use the trained machine learning models for each metric, to detect in real-time if each sample collected is an outlier (an anomaly), or not. +3. **Detect Anomalies**: use the trained machine learning models for each metric to detect in real-time if each sample collected is an outlier (an anomaly), or not. 4. **Store**: keep collected samples and their anomaly status, in the time-series database (database mode `dbengine`) or a ring buffer (database modes `ram` and `alloc`). 5. **Learn**: train multiple machine learning models for each metric collected, learning behaviors and patterns for detecting anomalies. 6. **Check**: a health engine, triggering alerts and sending notifications. Netdata comes with hundreds of alert configurations that are automatically attached to metrics when they get collected, detecting errors, common configuration errors and performance issues. @@ -69,7 +69,7 @@ stateDiagram-v2 2. **Automation**: Netdata is designed to automate most of the process of setting up and running an observability solution. It is designed to instantly provide comprehensive dashboards and fully automated alerts, with zero configuration. -3. **High Fidelity Monitoring**: Netdata was born from our need to kill the console for observability. So, it provides metrics and logs in the same granularity and fidelity console tools do, but also comes with tools that go beyond metrics and logs, to provide a holistic view of the monitored infrastructure (e.g. check [Top Monitoring](/docs/top-monitoring-netdata-functions.md)). +3. **High Fidelity Monitoring**: Netdata was born from our need to kill the console for observability. So, it provides metrics and logs in the same granularity and fidelity console tools do, but also comes with tools that go beyond metrics and logs, to provide a holistic view of the monitored infrastructure (e.g., check [Top Monitoring](/docs/top-monitoring-netdata-functions.md)). 4. **Minimal impact on monitored systems and applications**: Netdata has been designed to have a minimal impact on the monitored systems and their applications. There are [independent studies](https://www.ivanomalavolta.com/files/papers/ICSOC_2023.pdf) reporting that Netdata excels in CPU usage, RAM utilization, Execution Time and the impact Netdata has on monitored applications and containers. @@ -77,8 +77,8 @@ stateDiagram-v2 ## Dashboard Versions -The Netdata agents (Standalone, Children and Parents) **share the dashboard** of Netdata Cloud. However, when the user is logged-in and the Netdata agent is connected to Netdata Cloud, the following are enabled (which are otherwise disabled): +The Netdata agents (Standalone, Children and Parents) **share the dashboard** of Netdata Cloud. However, when the user is logged in and the Netdata agent is connected to Netdata Cloud, the following are enabled (which are otherwise disabled): 1. **Access to Sensitive Data**: Some data, like systemd-journal logs and several [Top Monitoring](/docs/top-monitoring-netdata-functions.md) features expose sensitive data, like IPs, ports, process command lines and more. To access all these when the dashboard is served directly from a Netdata agent, Netdata Cloud is required to verify that the user accessing the dashboard has the required permissions. -2. **Dynamic Configuration**: Netdata agents are configured via configuration files, manually or through some provisioning system. The latest Netdata includes a feature to allow users change some of the configuration (collectors, alerts) via the dashboard. This feature is only available to users of paid Netdata Cloud plan. +2. **Dynamic Configuration**: Netdata agents are configured via configuration files, manually or through some provisioning system. The latest Netdata includes a feature to allow users to change some configurations (collectors, alerts) via the dashboard. This feature is only available to users of paid Netdata Cloud plan. diff --git a/docs/netdata-agent/backup-and-restore-an-agent.md b/docs/netdata-agent/backup-and-restore-an-agent.md index d17cad604..db9398b27 100644 --- a/docs/netdata-agent/backup-and-restore-an-agent.md +++ b/docs/netdata-agent/backup-and-restore-an-agent.md @@ -1,44 +1,43 @@ # Backing up a Netdata Agent > **Note** -> +> > Users are responsible for backing up, recovering, and ensuring their data's availability because Netdata stores data locally on each system due to its decentralized architecture. ## Introduction -When preparing to backup a Netdata Agent it is worth considering that there are different kinds of data that you may wish to backup independently or all together: +When planning a Netdata Agent backup, it's essential to recognize the types of data that can be backed up, either individually or collectively: -| Data type | Description | Location | -|---------------------|------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------| +| Data type | Description | Location | +|---------------------|------------------------------------------------------|-----------------------------------------------------------------| | Agent configuration | Files controlling configuration of the Netdata Agent | [config directory](/docs/netdata-agent/configuration/README.md) | -| Metrics | Database files | /var/cache/netdata | -| Identity | Claim token, API key and some other files | /var/lib/netdata | - +| Metrics | Database files | /var/cache/netdata | +| Identity | Claim token, API key and some other files | /var/lib/netdata | ## Scenarios ### Backing up to restore data in case of a node failure -In this standard scenario, you are backing up your Netdata Agent in case of a node failure or data corruption so that the metrics and the configuration can be recovered. The purpose is not to backup/restore the application itself. +In this standard scenario, you’re backing up your Netdata Agent in case of a node failure or data corruption so that the metrics and the configuration can be recovered. The purpose is not to backup/restore the application itself. -1. Verify that the directory paths in the table above contain the information you expect. +1. Verify that the directory paths in the table above contain the information you expect. > **Note** > The specific paths may vary depending on installation method, Operating System, and whether it is a Docker/Kubernetes deployment. 2. It is recommended that you [stop the Netdata Agent](/docs/netdata-agent/start-stop-restart.md) when backing up the Metrics/database files. - Backing up the Agent configuration and Identity folders is straightforward as they should not be changing very frequently. + Backing up the Agent configuration and Identity folders is straightforward as they shouldn’t be changing very frequently. 3. Using a backup tool such as `tar` you will need to run the backup as _root_ or as the _netdata_ user to access all the files in the directories. - - ``` + + ```bash sudo tar -cvpzf netdata_backup.tar.gz /etc/netdata/ /var/cache/netdata /var/lib/netdata ``` - + Stopping the Netdata agent is typically necessary to back up the database files of the Netdata Agent. If you want to minimize the gap in metrics caused by stopping the Netdata Agent, consider implementing a backup job or script that follows this sequence: - + - Backup the Agent configuration Identity directories - Stop the Netdata service - Backup up the database files @@ -46,25 +45,25 @@ If you want to minimize the gap in metrics caused by stopping the Netdata Agent, ### Restoring Netdata -1. Ensure that the Netdata agent is installed and is [stopped](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) +1. Ensure that the Netdata agent is installed and is [stopped](/docs/netdata-agent/start-stop-restart.md) If you plan to deploy the Agent and restore a backup on top of it, then you might find it helpful to use the [`--dont-start-it`](/packaging/installer/methods/kickstart.md#other-options) option upon installation. - ``` + ```bash wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --dont-start-it ``` - - > **Note** - > If you are going to restore the database files then you should first ensure that the Metrics directory is empty. - > - > ``` + + > **Note** + > If you are going to restore the database files, then you should first ensure that the Metrics directory is empty. + > + > ```bash > sudo rm -Rf /var/cache/netdata > ``` 2. Restore the backup from the archive - ``` + ```bash sudo tar -xvpzf /path/to/netdata_backup.tar.gz -C / ``` -3. [Start the Netdata agent](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) +3. [Start the Netdata agent](/docs/netdata-agent/start-stop-restart.md) diff --git a/docs/netdata-agent/configuration/README.md b/docs/netdata-agent/configuration/README.md index 097fb9310..abe511313 100644 --- a/docs/netdata-agent/configuration/README.md +++ b/docs/netdata-agent/configuration/README.md @@ -1,21 +1,28 @@ # Netdata Agent Configuration -The main Netdata agent configuration is `netdata.conf`. +> **Info** +> +> Netdata Cloud lets you configure Agents on the fly. Check the [Dynamic Configuration Manager](/docs/netdata-agent/configuration/dynamic-configuration.md) documentation for details. + +The main Netdata Agent configuration is `netdata.conf`. ## The Netdata config directory -On most Linux systems, by using our [recommended one-line installation](/packaging/installer/README.md#install-on-linux-with-one-line-installer), the **Netdata config +On most Linux systems, the **Netdata config directory** will be `/etc/netdata/`. The config directory contains several configuration files with the `.conf` extension, a few directories, and a shell script named `edit-config`. > Some operating systems will use `/opt/netdata/etc/netdata/` as the config directory. If you're not sure where yours > is, navigate to `http://NODE:19999/netdata.conf` in your browser, replacing `NODE` with the IP address or hostname of -> your node, and find the `# config directory = ` setting. The value listed is the config directory for your system. +> your node, and find the `# config directory =` setting. The value listed is the config directory for your system. All of Netdata's documentation assumes that your config directory is at `/etc/netdata`, and that you're running any scripts from inside that directory. +## Edit a configuration file using `edit-config` + +We recommend the use of the `edit-config` script for configuration changes. -## edit `netdata.conf` +It exists inside your config directory (read above) and helps manage and safely edit configuration files. To edit `netdata.conf`, run this on your terminal: @@ -28,9 +35,9 @@ Your editor will open. ## downloading `netdata.conf` -The running version of `netdata.conf` can be downloaded from a running Netdata agent, at this URL: +The running version of `netdata.conf` can be downloaded from a running Netdata Agent, at this URL: -``` +```url http://agent-ip:19999/netdata.conf ``` @@ -40,4 +47,3 @@ You can save and use this version, using these commands: cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata curl -ksSLo /tmp/netdata.conf.new http://localhost:19999/netdata.conf && sudo mv -i /tmp/netdata.conf.new netdata.conf ``` - diff --git a/docs/netdata-agent/configuration/anonymous-telemetry-events.md b/docs/netdata-agent/configuration/anonymous-telemetry-events.md index b943ea9a3..4d48de4a2 100644 --- a/docs/netdata-agent/configuration/anonymous-telemetry-events.md +++ b/docs/netdata-agent/configuration/anonymous-telemetry-events.md @@ -1,30 +1,22 @@ - - # Anonymous telemetry events -By default, Netdata collects anonymous usage information from the open-source monitoring agent. For agent events like start,stop,crash etc we use our own cloud function in GCP. For frontend telemetry (pageviews etc.) on the agent dashboard itself we use the open-source +By default, Netdata collects anonymous usage information from the open-source monitoring agent. For agent events like start, stop, crash, etc. we use our own cloud function in GCP. For frontend telemetry (page views etc.) on the agent dashboard itself, we use the open-source product analytics platform [PostHog](https://github.com/PostHog/posthog). We are strongly committed to your [data privacy](https://netdata.cloud/privacy/). We use the statistics gathered from this information for two purposes: -1. **Quality assurance**, to help us understand if Netdata behaves as expected, and to help us classify repeated - issues with certain distributions or environments. +1. **Quality assurance**, to help us understand if Netdata behaves as expected, and to help us classify repeated + issues with certain distributions or environments. -2. **Usage statistics**, to help us interpret how people use the Netdata agent in real-world environments, and to help - us identify how our development/design decisions influence the community. +2. **Usage statistics**, to help us interpret how people use the Netdata agent in real-world environments, and to help + us identify how our development/design decisions influence the community. Netdata collects usage information via two different channels: -- **Agent dashboard**: We use the [PostHog JavaScript integration](https://posthog.com/docs/integrations/js-integration) (with sensitive event attributes overwritten to be anonymized) to send product usage events when you access an [Agent's dashboard](/docs/dashboards-and-charts/README.md). -- **Agent backend**: The `netdata` daemon executes the [`anonymous-statistics.sh`](https://github.com/netdata/netdata/blob/6469cf92724644f5facf343e4bdd76ac0551a418/daemon/anonymous-statistics.sh.in) script when Netdata starts, stops cleanly, or fails. +- **Agent dashboard**: We use the [PostHog JavaScript integration](https://posthog.com/docs/integrations/js-integration) (with sensitive event attributes overwritten to be anonymized) to send product usage events when you access an [Agent's dashboard](/docs/dashboards-and-charts/README.md). +- **Agent backend**: The `netdata` daemon executes the [`anonymous-statistics.sh`](https://github.com/netdata/netdata/blob/6469cf92724644f5facf343e4bdd76ac0551a418/daemon/anonymous-statistics.sh.in) script when Netdata starts, stops cleanly, or fails. You can opt-out from sending anonymous statistics to Netdata through three different [opt-out mechanisms](#opt-out). @@ -32,7 +24,7 @@ You can opt-out from sending anonymous statistics to Netdata through three diffe When you kick off an Agent dashboard session by visiting `http://NODE:19999`, Netdata initializes a PostHog session and masks various event attributes. -_Note_: You can see the relevant code in the [dashboard repository](https://github.com/netdata/dashboard/blob/master/src/domains/global/sagas.ts#L107) where the `window.posthog.register()` call is made. +_Note_: You can see the relevant code in the [dashboard repository](https://github.com/netdata/dashboard/blob/master/src/domains/global/sagas.ts#L107) where the `window.posthog.register()` call is made. ```JavaScript window.posthog.register({ @@ -52,28 +44,28 @@ variable is controlled via the [opt-out mechanism](#opt-out). ## Agent Backend - Anonymous Statistics Script Every time the daemon is started or stopped and every time a fatal condition is encountered, Netdata uses the anonymous -statistics script to collect system information and send it to the Netdata telemetry cloud function via an http call. The information collected for all +statistics script to collect system information and send it to the Netdata telemetry cloud function via a http call. The information collected for all events is: -- Netdata version -- OS name, version, id, id_like -- Kernel name, version, architecture -- Virtualization technology -- Containerization technology +- Netdata version +- OS name, version, id, id_like +- Kernel name, version, architecture +- Virtualization technology +- Containerization technology -Furthermore, the FATAL event sends the Netdata process & thread name, along with the source code function, source code +Furthermore, the FATAL event sends the Netdata process and thread name, along with the source code function, source code filename and source code line number of the fatal error. Starting with v1.21, we additionally collect information about: -- Failures to build the dependencies required to use Cloud features. -- Unavailability of Cloud features in an agent. -- Failures to connect to the Cloud in case the [connection process](/src/claim/README.md) has been completed. This includes error codes - to inform the Netdata team about the reason why the connection failed. +- Failures to build the dependencies required to use Cloud features. +- Unavailability of Cloud features in an agent. +- Failures to connect to the Cloud in case the [connection process](/src/claim/README.md) has been completed. This includes error codes + to inform the Netdata team about the reason why the connection failed. To see exactly what and how is collected, you can review the script template `daemon/anonymous-statistics.sh.in`. The template is converted to a bash script called `anonymous-statistics.sh`, installed under the Netdata `plugins -directory`, which is usually `/usr/libexec/netdata/plugins.d`. +directory`, which is usually `/usr/libexec/netdata/plugins.d`. ## Opt-out @@ -87,17 +79,15 @@ installation, including manual, offline, and macOS installations. Create the fil **Pass the option `--disable-telemetry` to any of the installer scripts in the [installation docs](/packaging/installer/README.md).** You can append this option during the initial installation or a manual update. You can also export the environment variable `DISABLE_TELEMETRY` with a non-zero or non-empty value -(e.g: `export DISABLE_TELEMETRY=1`). +(e.g.,: `export DISABLE_TELEMETRY=1`). When using Docker, **set your `DISABLE_TELEMETRY` environment variable to `1`.** You can set this variable with the following command: `export DISABLE_TELEMETRY=1`. When creating a container using Netdata's [Docker image](/packaging/docker/README.md#create-a-new-netdata-agent-container) for the first time, this variable will disable -the anonymous statistics script inside of the container. +the anonymous statistics script inside the container. Each of these opt-out processes does the following: -- Prevents the daemon from executing the anonymous statistics script. -- Forces the anonymous statistics script to exit immediately. -- Stops the PostHog JavaScript snippet, which remains on the dashboard, from firing and sending any data to the Netdata PostHog. - - +- Prevents the daemon from executing the anonymous statistics script. +- Forces the anonymous statistics script to exit immediately. +- Stops the PostHog JavaScript snippet, which remains on the dashboard, from firing and sending any data to the Netdata PostHog. diff --git a/docs/netdata-agent/configuration/cheatsheet.md b/docs/netdata-agent/configuration/cheatsheet.md index 3e1428694..ecd8e8a84 100644 --- a/docs/netdata-agent/configuration/cheatsheet.md +++ b/docs/netdata-agent/configuration/cheatsheet.md @@ -1,8 +1,8 @@ # Useful management and configuration actions -Below you will find some of the most common actions that one can take while using Netdata. You can use this page as a quick reference for installing Netdata, connecting a node to the Cloud, properly editing the configuration, accessing Netdata's API, and more! +Below are some of the most common actions one can take while using Netdata. You can use this page as a quick reference for installing Netdata, connecting a node to the Cloud, properly editing the configuration, accessing Netdata's API, and more! -### Install Netdata +## Install Netdata ```bash wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh @@ -11,12 +11,12 @@ wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh / curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh ``` -#### Connect a node to Netdata Cloud +### Connect a node to Netdata Cloud To do so, sign in to Netdata Cloud, on your Space under the Nodes tab, click `Add Nodes` and paste the provided command into your node’s terminal and run it. You can also copy the Claim token and pass it to the installation script with `--claim-token` and re-run it. -### Configuration +## Configuration **Netdata's config directory** is `/etc/netdata/` but in some operating systems it might be `/opt/netdata/etc/netdata/`. Look for the `# config directory =` line over at `http://NODE_IP:19999/netdata.conf` to find your config directory. @@ -25,63 +25,19 @@ From within that directory you can run `sudo ./edit-config netdata.conf` **to ed You can edit other config files too, by specifying their filename after `./edit-config`. You are expected to use this method in all following configuration changes. - - ---- - -#### Enable/disable plugins (groups of collectors) +### Enable/disable plugins (groups of collectors) ```bash sudo ./edit-config netdata.conf ``` -```conf +```text [plugins] go.d = yes # enabled node.d = no # disabled ``` -#### Enable/disable specific collectors +### Enable/disable specific collectors ```bash sudo ./edit-config go.d.conf # edit a plugin's config @@ -89,24 +45,18 @@ sudo ./edit-config go.d.conf # edit a plugin's config ```yaml modules: - activemq: no # disabled - cockroachdb: yes # enabled + activemq: no # disabled + cockroachdb: yes # enabled ``` -#### Edit a collector's config +### Edit a collector's config ```bash sudo ./edit-config go.d/mysql.conf ``` -### Alerts & notifications - - After any change, reload the Netdata health configuration: ```bash @@ -115,32 +65,23 @@ netdatacli reload-health killall -USR2 netdata ``` -#### Configure a specific alert +### Configure a specific alert ```bash sudo ./edit-config health.d/example-alert.conf ``` -#### Silence a specific alert +### Silence a specific alert ```bash sudo ./edit-config health.d/example-alert.conf ``` -``` +```text to: silent ``` - - ---- - -### Manage the daemon +## Manage the daemon | Intent | Action | |:----------------------------|------------------------------------------------------------:| @@ -151,65 +92,22 @@ sudo ./edit-config health.d/example-alert.conf | View error logs | `less /var/log/netdata/error.log` | | View collectors logs | `less /var/log/netdata/collector.log` | -#### Change the port Netdata listens to (example, set it to port 39999) +### Change the port Netdata listens to (example, set it to port 39999) -```conf +```text [web] default port = 39999 ``` -### See metrics and dashboards +## See metrics and dashboards -#### Netdata Cloud: `https://app.netdata.cloud` +### Netdata Cloud: `https://app.netdata.cloud` -#### Local dashboard: `https://NODE:19999` +### Local dashboard: `https://NODE:19999` > Replace `NODE` with the IP address or hostname of your node. Often `localhost`. -### Access the Netdata API +## Access the Netdata API You can access the API like this: `http://NODE:19999/api/VERSION/REQUEST`. If you want to take a look at all the API requests, check our API page at - - - - - - - diff --git a/docs/netdata-agent/configuration/common-configuration-changes.md b/docs/netdata-agent/configuration/common-configuration-changes.md index e9d8abadc..0eda7dd86 100644 --- a/docs/netdata-agent/configuration/common-configuration-changes.md +++ b/docs/netdata-agent/configuration/common-configuration-changes.md @@ -19,11 +19,7 @@ changes reflected in those visualizations due to the way Netdata Cloud proxies m ### Increase the long-term metrics retention period -Read our doc -on [increasing long-term metrics storage](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md) -for details, including a -[calculator](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md#calculate-the-system-resources-ram-disk-space-needed-to-store-metrics) -to help you determine the exact settings for your desired retention period. +Read our doc on [increasing long-term metrics storage](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md) for details. ### Reduce the data collection frequency @@ -33,7 +29,7 @@ of `netdata.conf` so that it is greater than `1`. An `update every` of `5` means the Netdata Agent enforces a _minimum_ collection frequency of 5 seconds. -```conf +```text [global] update every = 5 ``` @@ -56,7 +52,7 @@ for that specific module. Uncomment the line and change its value to `no`. ## Modify alerts and notifications -Netdata's health monitoring watchdog uses hundreds of preconfigured health entities, with intelligent thresholds, to +Netdata's health monitoring watchdog uses hundreds of pre-configured health entities, with intelligent thresholds, to generate warning and critical alerts for most production systems and their applications without configuration. However, each alert and notification method is completely customizable. @@ -94,7 +90,7 @@ Because the source path contains `health.d/cpu.conf`, run `sudo edit-config heal Open the configuration file for that alert and set the `to` line to `silent`. -```conf +```text template: disk_fill_rate on: disk.space lookup: max -1s at -30m unaligned of avail @@ -111,7 +107,7 @@ section of `netdata.conf`. ### Enable alert notifications -Open `health_alarm_notify.conf` for editing. First, read the [enabling notifications](/docs/alerts-and-notifications/notifications/README.md#netdata-agent) doc +Open `health_alarm_notify.conf` for editing. First, read the [enabling notifications](/src/health/notifications/README.md) doc for an example of the process using Slack, then click on the link to your preferred notification method to find documentation for that specific endpoint. @@ -143,6 +139,6 @@ The following restrictions apply to host label names: - Names cannot start with `_`, but it can be present in other parts of the name. - Names only accept alphabet letters, numbers, dots, and dashes. -The policy for values is more flexible, but you can not use exclamation marks (`!`), whitespaces (` `), single quotes +The policy for values is more flexible, but you cannot use exclamation marks (`!`), whitespaces (` `), single quotes (`'`), double quotes (`"`), or asterisks (`*`), because they are used to compare label values in health alerts and templates. diff --git a/docs/netdata-agent/configuration/dynamic-configuration.md b/docs/netdata-agent/configuration/dynamic-configuration.md index 7064abf9a..c419a82d9 100644 --- a/docs/netdata-agent/configuration/dynamic-configuration.md +++ b/docs/netdata-agent/configuration/dynamic-configuration.md @@ -1,6 +1,8 @@ # Dynamic Configuration Manager -**Netdata Cloud paid subscription required.** +> **Info** +> +> Netdata Cloud paid subscription is required. The Dynamic Configuration Manager allows direct configuration of collectors and alerts through the Netdata UI. This feature allows users to: @@ -11,7 +13,7 @@ The Dynamic Configuration Manager allows direct configuration of collectors and > **Info** > -> To understand what actions users can perform based on their role, refer to the [Role Based Access documentation](/docs/netdata-cloud/authentication-and-authorization/role-based-access-model.md#dynamic-configuration-manager). +> To understand what actions users can perform based on their role, refer to the [Role-Based Access documentation](/docs/netdata-cloud/authentication-and-authorization/role-based-access-model.md#dynamic-configuration-manager). ## Collectors @@ -35,9 +37,9 @@ A job represents a running instance of a module with a specific configuration. T Every job has a designated "source type" indicating its origin: - **Stock**: Pre-installed with Netdata and provides basic data collection for common services. -- **User**: Originates from user-created files on the node. +- **User**: Created from user-defined configuration files on the node. - **Discovered**: Automatically generated by Netdata upon discovering a service running on the node. -- **Dynamic Configuration**: Created and managed using the Dynamic Configuration Manager. +- **Dynamic Configuration**: Managed and created through the Dynamic Configuration Manager. You can manage individual jobs using the following actions: @@ -51,7 +53,7 @@ You can manage individual jobs using the following actions: ## Health -Each entry in the Health tab contains an Alert template, that then is used to create Alerts. +Each entry in the Health tab contains an Alert template that then is used to create Alerts. The functionality in the main view is the same as with the [Collectors tab](#collectors). diff --git a/docs/netdata-agent/configuration/optimize-the-netdata-agents-performance.md b/docs/netdata-agent/configuration/optimize-the-netdata-agents-performance.md index 6acbd4977..ff51fbf78 100644 --- a/docs/netdata-agent/configuration/optimize-the-netdata-agents-performance.md +++ b/docs/netdata-agent/configuration/optimize-the-netdata-agents-performance.md @@ -1,9 +1,9 @@ # How to optimize the Netdata Agent's performance We designed the Netdata Agent to be incredibly lightweight, even when it's collecting a few thousand dimensions every -second and visualizing that data into hundreds of charts. However, the default settings of the Netdata Agent are not -optimized for performance, but for a simple, standalone setup. We want the first install to give you something you can -run without any configuration. Most of the settings and options are enabled, since we want you to experience the full +second and visualizing that data into hundreds of charts. However, the default settings of the Netdata Agent aren’t +optimized for performance, but for a simple, standalone setup. We want the first installation to give you something you can +run without any configuration. Most of the settings and options are enabled since we want you to experience the full thing. By default, Netdata will automatically detect applications running on the node it is installed to start collecting @@ -17,16 +17,16 @@ Netdata for production use. The following table summarizes the effect of each optimization on the CPU, RAM and Disk IO utilization in production. -| Optimization | CPU | RAM | Disk IO | -|-------------------------------------------------------------------------------------------------------------------------------|--------------------|--------------------|--------------------| -| [Use streaming and replication](#use-streaming-and-replication) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | -| [Disable unneeded plugins or collectors](#disable-unneeded-plugins-or-collectors) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | -| [Reduce data collection frequency](#reduce-collection-frequency) | :heavy_check_mark: | | :heavy_check_mark: | +| Optimization | CPU | RAM | Disk IO | +|-----------------------------------------------------------------------------------------------------------------------------------|--------------------|--------------------|--------------------| +| [Use streaming and replication](#use-streaming-and-replication) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| [Disable unneeded plugins or collectors](#disable-unneeded-plugins-or-collectors) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| [Reduce data collection frequency](#reduce-collection-frequency) | :heavy_check_mark: | | :heavy_check_mark: | | [Change how long Netdata stores metrics](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md) | | :heavy_check_mark: | :heavy_check_mark: | -| [Use a different metric storage database](/src/database/README.md) | | :heavy_check_mark: | :heavy_check_mark: | -| [Disable machine learning](#disable-machine-learning) | :heavy_check_mark: | | | -| [Use a reverse proxy](#run-netdata-behind-a-proxy) | :heavy_check_mark: | | | -| [Disable/lower gzip compression for the agent dashboard](#disablelower-gzip-compression-for-the-dashboard) | :heavy_check_mark: | | | +| [Use a different metric storage database](/src/database/README.md) | | :heavy_check_mark: | :heavy_check_mark: | +| [Disable machine learning](#disable-machine-learning) | :heavy_check_mark: | | | +| [Use a reverse proxy](#run-netdata-behind-a-proxy) | :heavy_check_mark: | | | +| [Disable/lower gzip compression for the agent dashboard](#disablelower-gzip-compression-for-the-dashboard) | :heavy_check_mark: | | | ## Resources required by a default Netdata installation @@ -39,15 +39,15 @@ You can configure almost all aspects of data collection/retention, and certain a Expect about: - 1-3% of a single core for the netdata core -- 1-3% of a single core for the various collectors (e.g. go.d.plugin, apps.plugin) +- 1-3% of a single core for the various collectors (e.g., go.d.plugin, apps.plugin) - 5-10% of a single core, when ML training runs Your experience may vary depending on the number of metrics collected, the collectors enabled and the specific -environment they run on, i.e. the work they have to do to collect these metrics. +environment they run on, i.e., the work they have to do to collect these metrics. As a general rule, for modern hardware and VMs, the total CPU consumption of a standalone Netdata installation, including all its components, should be below 5 - 15% of a single core. For example, on 8 core server it will use only -0.6% - 1.8% of a total CPU capacity, depending on the CPU characteristics. +0.6% - 1.8% of the total CPU capacity, depending on the CPU characteristics. The Netdata Agent runs with the lowest possible [process scheduling policy](/src/daemon/README.md#netdata-process-scheduling-policy), @@ -55,7 +55,7 @@ which is `nice 19`, and uses the `idle` process scheduler. Together, these setti resources when the node has CPU resources to space. If the node reaches 100% CPU utilization, the Agent is stopped first to ensure your applications get any available resources. -To reduce CPU usage you can (either one or a combination of the following actions): +To reduce CPU usage, you can (either one or a combination of the following actions): 1. [Disable machine learning](#disable-machine-learning), 2. [Use streaming and replication](#use-streaming-and-replication), @@ -77,19 +77,18 @@ To estimate and control memory consumption, you can (either one or a combination ### Disk footprint and I/O -By default, Netdata should not use more than 1GB of disk space, most of which is dedicated for storing metric data and -metadata. For typical installations collecting 2000 - 3000 metrics, this storage should provide a few days of +By default, Netdata shouldn’t use more than 1GB of disk space, most of which is dedicated to storing metric data and +metadata. For typical installations collecting 2000–3000 metrics, this storage should provide a few days of high-resolution retention (per second), about a month of mid-resolution retention (per minute) and more than a year of low-resolution retention (per hour). -Netdata spreads I/O operations across time. For typical standalone installations there should be a few write operations -every 5-10 seconds of a few kilobytes each, occasionally up to 1MB. In addition, under heavy load, collectors that +Netdata spreads I/O operations across time. For typical standalone installations, there should be a few write operations +every 5–10 seconds of a few kilobytes each, occasionally up to 1MB. In addition, under a heavy load, collectors that require disk I/O may stop and show gaps in charts. -To optimize your disk footprint in any aspect described below you can: +To optimize your disk footprint in any aspect described below, you can: - -To configure retention, you can: +To configure retention, you can: 1. [Change how long Netdata stores metrics](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md). @@ -97,7 +96,6 @@ To control disk I/O: 1. [Use a different metric storage database](/src/database/README.md), - Minimize deployment impact on the production system by optimizing disk footprint: 1. [Using streaming and replication](#use-streaming-and-replication) @@ -118,7 +116,7 @@ and makes it easier to configure or disable alerts and agent notifications. The parents by default run health checks for each child, as long as the child is connected (the details are in `stream.conf`). On the child nodes you should add to `netdata.conf` the following: -```conf +```text [health] enabled = no ``` @@ -131,19 +129,18 @@ See [using a different metric storage database](/src/database/README.md). If you know that you don't need an [entire plugin or a specific collector](/src/collectors/README.md#collector-architecture-and-terminology), -you can disable any of them. Keep in mind that if a plugin/collector has nothing to do, it simply shuts down and does -not consume system resources. You will only improve the Agent's performance by disabling plugins/collectors that are +you can disable any of them. Keep in mind that if a plugin/collector has nothing to do, it simply shuts down and doesn’t consume system resources. You will only improve the Agent's performance by disabling plugins/collectors that are actively collecting metrics. Open `netdata.conf` and scroll down to the `[plugins]` section. To disable any plugin, uncomment it and set the value to `no`. For example, to explicitly keep the `proc` and `go.d` plugins enabled while disabling `python.d` and `charts.d`. -```conf +```text [plugins] proc = yes - python.d = no - charts.d = no - go.d = yes + python.d = no + charts.d = no + go.d = yes ``` Disable specific collectors by opening their respective plugin configuration files, uncommenting the line for the @@ -157,11 +154,11 @@ sudo ./edit-config charts.d.conf For example, to disable a few Python collectors: -```conf +```text modules: - apache: no - dockerd: no - fail2ban: no + apache: no + dockerd: no + fail2ban: no ``` ## Reduce collection frequency @@ -181,7 +178,7 @@ If you change this to `2`, Netdata enforces a minimum `update every` setting of other second, which will effectively halve CPU utilization. Set this to `5` or `10` to collect metrics every 5 or 10 seconds, respectively. -```conf +```text [global] update every = 5 ``` @@ -199,7 +196,7 @@ an [internal_plugin/collector](/src/collectors/README.md#collector-architecture- open `netdata.conf` and find the appropriate section. For example, to reduce the frequency of the `apps` plugin, which collects and visualizes metrics on application resource utilization: -```conf +```text [plugin:apps] update every = 5 ``` @@ -208,7 +205,7 @@ To [configure an individual collector](/src/collectors/REFERENCE.md#configure-a- open its specific configuration file with `edit-config` and look for the `update_every` setting. For example, to reduce the frequency of the `nginx` collector, run `sudo ./edit-config go.d/nginx.conf`: -```conf +```text # [ GLOBAL ] update_every: 10 ``` @@ -229,7 +226,7 @@ on [streaming and replication](/docs/observability-centralization-points/README. Automated anomaly detection may be a powerful tool, but we recommend it to only be enabled on Netdata parents that sit outside your production infrastructure, or if you have cpu and memory to spare. You can disable ML with the following: -```conf +```text [ml] enabled = no ``` @@ -251,16 +248,15 @@ looking at the local Agent dashboard. To disable gzip compression, open `netdata.conf` and find the `[web]` section: -```conf +```text [web] enable gzip compression = no ``` Or to lower the default compression level: -```conf +```text [web] enable gzip compression = yes gzip compression level = 1 ``` - diff --git a/docs/netdata-agent/configuration/optimizing-metrics-database/README.md b/docs/netdata-agent/configuration/optimizing-metrics-database/README.md index fdbd3b690..c5769ccd4 100644 --- a/docs/netdata-agent/configuration/optimizing-metrics-database/README.md +++ b/docs/netdata-agent/configuration/optimizing-metrics-database/README.md @@ -1,3 +1,3 @@ # Optimizing Metrics Database Overview -This section contains documentation to help you understand how the metrics DB works, understand the key features and configure them to suit your needs. \ No newline at end of file +This section contains documentation to help you understand how the metrics DB works, understand the key features and configure them to suit your needs. diff --git a/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md b/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md index 8a8659eff..2282cbc44 100644 --- a/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md +++ b/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md @@ -7,9 +7,9 @@ space**. This provides greater control and helps you optimize storage usage for | Tier | Resolution | Time Limit | Size Limit (min 256 MB) | |:----:|:-------------------:|:----------:|:-----------------------:| -| 0 | high (per second) | 14 days | 1 GiB | -| 1 | middle (per minute) | 3 months | 1 GiB | -| 2 | low (per hour) | 2 years | 1 GiB | +| 0 | high (per second) | 14d | 1 GiB | +| 1 | middle (per minute) | 3mo | 1 GiB | +| 2 | low (per hour) | 2y | 1 GiB | > **Note**: If a user sets a disk space size less than 256 MB for a tier, Netdata will automatically adjust it to 256 MB. @@ -17,7 +17,7 @@ With these defaults, Netdata requires approximately 4 GiB of storage space (incl ## Retention Settings -> **In a parent-child setup**, these settings manage the shared storage space utilized by the Netdata parent agent for +> **In a parent-child setup**, these settings manage the shared storage space used by the Netdata parent agent for > storing metrics collected by both the parent and its child nodes. You can fine-tune retention for each tier by setting a time limit or size limit. Setting a limit to 0 disables it, @@ -32,22 +32,22 @@ retention strategies as shown in the table below: You can change these limits in `netdata.conf`: -``` +```text [db] - mode = dbengine + mode = dbengine storage tiers = 3 # Tier 0, per second data. Set to 0 for no limit. - dbengine tier 0 disk space MB = 1024 - dbengine tier 0 retention days = 14 + dbengine tier 0 retention size = 1GiB + dbengine tier 0 retention time = 14d # Tier 1, per minute data. Set to 0 for no limit. - dbengine tier 1 disk space MB = 1024 - dbengine tier 1 retention days = 90 + dbengine tier 1 retention size = 1GiB + dbengine tier 1 retention time = 3mo # Tier 2, per hour data. Set to 0 for no limit. - dbengine tier 2 disk space MB = 1024 - dbengine tier 2 retention days = 730 + dbengine tier 2 retention size = 1GiB + dbengine tier 2 retention time = 2y ``` ## Monitoring Retention Utilization @@ -58,6 +58,24 @@ your storage space (disk space limits) and time (time limits) are used for metri ## Legacy configuration +### v1.99.0 and prior + +Netdata prior to v2 supports the following configuration options in `netdata.conf`. +They have the same defaults as the latest v2, but the unit of each value is given in the option name, not at the value. + +```text +storage tiers = 3 +# Tier 0, per second data. Set to 0 for no limit. +dbengine tier 0 disk space MB = 1024 +dbengine tier 0 retention days = 14 +# Tier 1, per minute data. Set to 0 for no limit. +dbengine tier 1 disk space MB = 1024 +dbengine tier 1 retention days = 90 +# Tier 2, per hour data. Set to 0 for no limit. +dbengine tier 2 disk space MB = 1024 +dbengine tier 2 retention days = 730 +``` + ### v1.45.6 and prior Netdata versions prior to v1.46.0 relied on a disk space-based retention. @@ -72,17 +90,14 @@ Netdata versions prior to v1.46.0 relied on a disk space-based retention. You can change these limits in `netdata.conf`: -``` +```text [db] - mode = dbengine + mode = dbengine storage tiers = 3 - # Tier 0, per second data dbengine multihost disk space MB = 256 - # Tier 1, per minute data dbengine tier 1 multihost disk space MB = 1024 - # Tier 2, per hour data dbengine tier 2 multihost disk space MB = 1024 ``` @@ -96,7 +111,7 @@ for the parent node and all of its children. To configure the database engine, look for the `page cache size MB` and `dbengine multihost disk space MB` settings in the `[db]` section of your `netdata.conf`. -```conf +```text [db] dbengine page cache size MB = 32 dbengine multihost disk space MB = 256 diff --git a/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts.md b/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts.md index b0094a60f..f7f56279b 100644 --- a/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts.md +++ b/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts.md @@ -1,49 +1,51 @@ # Organize systems, metrics, and alerts When you use Netdata to monitor and troubleshoot an entire infrastructure, you need sophisticated ways of keeping everything organized. -Netdata allows to organize your observability infrastructure with Spaces, Rooms, virtual nodes, host labels, and metric labels. +Netdata allows organizing your observability infrastructure with Spaces, Rooms, virtual nodes, host labels, and metric labels. ## Spaces and Rooms -[Spaces](/docs/netdata-cloud/organize-your-infrastructure-invite-your-team.md#netdata-cloud-spaces) are used for organization-level or infrastructure-level +[Spaces](/docs/netdata-cloud/organize-your-infrastructure-invite-your-team.md#netdata-cloud-spaces) are used for organization-level or infrastructure-level grouping of nodes and people. A node can only appear in a single space, while people can have access to multiple spaces. -The [Rooms](/docs/netdata-cloud/organize-your-infrastructure-invite-your-team.md#netdata-cloud-rooms) in a space bring together nodes and people in -collaboration areas. Rooms can also be used for fine-tuned -[role based access control](/docs/netdata-cloud/authentication-and-authorization/role-based-access-model.md). +The [Rooms](/docs/netdata-cloud/organize-your-infrastructure-invite-your-team.md#netdata-cloud-rooms) in a space bring together nodes and people in +collaboration areas. Rooms can also be used for fine-tuned +[role-based access control](/docs/netdata-cloud/authentication-and-authorization/role-based-access-model.md). ## Virtual nodes -Netdata’s virtual nodes functionality allows you to define nodes in configuration files and have them be treated as regular nodes -in all of the UI, dashboards, tabs, filters etc. For example, you can create a virtual node each for all your Windows machines -and monitor them as discrete entities. Virtual nodes can help you simplify your infrastructure monitoring and focus on the +Netdata’s virtual nodes functionality allows you to define nodes in configuration files and have them be treated as regular nodes +in all the UI, dashboards, tabs, filters, etc. For example, you can create a virtual node each for all your Windows machines +and monitor them as discrete entities. Virtual nodes can help you simplify your infrastructure monitoring and focus on the individual node that matters. To define your windows server as a virtual node you need to: - * Define virtual nodes in `/etc/netdata/vnodes/vnodes.conf` +* Define virtual nodes in `/etc/netdata/vnodes/vnodes.conf` ```yaml - hostname: win_server1 guid: ``` - Just remember to use a valid guid (On Linux you can use `uuidgen` command to generate one, on Windows just use the `[guid]::NewGuid()` command in PowerShell) - - * Add the vnode config to the data collection job. e.g. in `go.d/windows.conf`: + + Just remember to use a valid guid (On Linux you can use `uuidgen` command to generate one, on Windows just use the `[guid]::NewGuid()` command in PowerShell) + +* Add the vnode config to the data collection job. e.g., in `go.d/windows.conf`: + ```yaml jobs: - name: win_server1 vnode: win_server1 url: http://203.0.113.10:9182/metrics ``` - + ## Host labels Host labels can be extremely useful when: -- You need alerts that adapt to the system's purpose -- You need properly-labeled metrics archiving so you can sort, correlate, and mash-up your data to your heart's content. -- You need to keep tabs on ephemeral Docker containers in a Kubernetes cluster. +* You need alerts that adapt to the system's purpose +* You need properly labeled metrics archiving so you can sort, correlate, and mash-up your data to your heart's content. +* You need to keep tabs on ephemeral Docker containers in a Kubernetes cluster. Let's take a peek into how to create host labels and apply them across a few of Netdata's features to give you more organization power over your infrastructure. @@ -56,16 +58,17 @@ parent-child status, and more. They capture the following: -- Kernel version -- Operating system name and version -- CPU architecture, system cores, CPU frequency, RAM, and disk space -- Whether Netdata is running inside of a container, and if so, the OS and hardware details about the container's host -- Whether Netdata is running inside K8s node -- What virtualization layer the system runs on top of, if any -- Whether the system is a streaming parent or child +* Kernel version +* Operating system name and version +* CPU architecture, system cores, CPU frequency, RAM, and disk space +* Whether Netdata is running inside of a container, and if so, the OS and hardware details about the container's host +* Whether Netdata is running inside K8s node +* What virtualization layer the system runs on top of, if any +* Whether the system is a streaming parent or child If you want to organize your systems without manually creating host labels, try the automatic labels in some of the features below. You can see them under `http://HOST-IP:19999/api/v1/info`, beginning with an underscore `_`. + ```json { ... @@ -87,7 +90,7 @@ sudo ./edit-config netdata.conf Create a new `[host labels]` section defining a new host label and its value for the system in question. Make sure not to violate any of the [host label naming rules](/docs/netdata-agent/configuration/common-configuration-changes.md#organize-nodes-with-host-labels). -```conf +```text [host labels] type = webserver location = us-seattle @@ -126,7 +129,6 @@ read the status of your agent. For example, from a VPS system running Debian 10: } ``` - ### Host labels in streaming You may have noticed the `_is_parent` and `_is_child` automatic labels from above. Host labels are also now @@ -134,12 +136,11 @@ streamed from a child to its parent node, which concentrates an entire infrastru and virtualization information in one place: the parent. Now, if you'd like to remind yourself of how much RAM a certain child node has, you can access -`http://localhost:19999/host/CHILD_HOSTNAME/api/v1/info` and reference the automatically-generated host labels from the +`http://localhost:19999/host/CHILD_HOSTNAME/api/v1/info` and reference the automatically generated host labels from the child system. It's a vastly simplified way of accessing critical information about your infrastructure. > ⚠️ Because automatic labels for child nodes are accessible via API calls, and contain sensitive information like -> kernel and operating system versions, you should secure streaming connections with SSL. See the [streaming -> documentation](/src/streaming/README.md#securing-streaming-communications) for details. You may also want to use +> kernel and operating system versions, you should secure streaming connections with SSL. See the [streaming documentation](/src/streaming/README.md#securing-streaming-with-tlsssl) for details. You may also want to use > [access lists](/src/web/server/README.md#access-lists) or [expose the API only to LAN/localhost > connections](/docs/netdata-agent/securing-netdata-agents.md#expose-netdata-only-in-a-private-lan). @@ -153,23 +154,23 @@ alerts to them. For example, let's use configuration example from earlier: -```conf +```text [host labels] type = webserver location = us-seattle installed = 20200218 ``` -You could now create a new health entity (checking if disk space will run out soon) that applies only to any host +You could now create a new health entity (checking if disk space runs out soon) that applies only to any host labeled `webserver`: ```yaml template: disk_fill_rate - on: disk.space - lookup: max -1s at -30m unaligned of avail - calc: ($this - $avail) / (30 * 60) - every: 15s - host labels: type = webserver + on: disk.space + lookup: max -1s at -30m unaligned of avail + calc: ($this - $avail) / (30 * 60) + every: 15s + host labels: type = webserver ``` Or, by using one of the automatic labels, for only webserver systems running a specific OS: @@ -198,9 +199,9 @@ documentation](/src/health/REFERENCE.md#alert-line-host-labels) for more details If you have enabled any metrics exporting via our experimental [exporters](/src/exporting/README.md), any new host labels you created manually are sent to the destination database alongside metrics. You can change this behavior by -editing `exporting.conf`, and you can even send automatically-generated labels on with exported metrics. +editing `exporting.conf`, and you can even send automatically generated labels on with exported metrics. -```conf +```text [exporting:global] enabled = yes send configured labels = yes @@ -209,7 +210,7 @@ send automatic labels = no You can also change this behavior per exporting connection: -```conf +```text [opentsdb:my_instance3] enabled = yes destination = localhost:4242 @@ -227,27 +228,27 @@ more about exporting, read the [documentation](/src/exporting/README.md). The Netdata aggregate charts allow you to filter and group metrics based on label name-value pairs. -All go.d plugin collectors support the specification of labels at the "collection job" level. Some collectors come with out of the box -labels (e.g. generic Prometheus collector, Kubernetes, Docker and more). But you can also add your own custom labels, by configuring -the data collection jobs. +All go.d plugin collectors support the specification of labels at the "collection job" level. Some collectors come without of the box +labels (e.g. generic Prometheus collector, Kubernetes, Docker and more). But you can also add your own custom labels by configuring +the data collection jobs. -For example, suppose we have a single Netdata agent, collecting data from two remote Apache web servers, located in different data centers. +For example, suppose we have a single Netdata agent, collecting data from two remote Apache web servers, located in different data centers. The web servers are load balanced and provide access to the service "Payments". You can define the following in `go.d.conf`, to be able to group the web requests by service or location: -``` +```yaml jobs: - - name: mywebserver1 + - name: my_webserver1 url: http://host1/server-status?auto labels: service: "Payments" location: "Atlanta" - - name: mywebserver2 + - name: my_webserver2 url: http://host2/server-status?auto labels: service: "Payments" location: "New York" ``` -Of course you may define as many custom label/value pairs as you like, in as many data collection jobs you need. +Of course, you may define as many custom label/value pairs as you like, in as many data collection jobs you need. diff --git a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/README.md b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/README.md index 00fe63af1..a0810bb51 100644 --- a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/README.md +++ b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/README.md @@ -1,7 +1,7 @@ # Running the Netdata Agent behind a reverse proxy If you need to access a Netdata agent's user interface or API in a production environment we recommend you put Netdata behind -another web server and secure access to the dashboard via SSL, user authentication and firewall rules. +another web server and secure access to the dashboard via SSL, user authentication and firewall rules. A dedicated web server also provides more robustness and capabilities than the Agent's [internal web server](/src/web/README.md). @@ -12,7 +12,7 @@ We have documented running behind [Lighttpd](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md), [Caddy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md), and [H2O](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-h2o.md). -If you prefer a different web server, we suggest you follow the documentation for nginx and tell us how you did it +If you prefer a different web server, we suggest you follow the documentation for nginx and tell us how you did it by adding your own "Running behind webserverX" document. When you run Netdata behind a reverse proxy, we recommend you firewall protect all your Netdata servers, so that only the web server IP will be allowed to directly access Netdata. To do this, run this on each of your servers (or use your firewall manager): @@ -26,9 +26,9 @@ The above will prevent anyone except your web server to access a Netdata dashboa You can also use `netdata.conf`: -``` +```text [web] - allow connections from = localhost 1.2.3.4 + allow connections from = localhost 1.2.3.4 ``` Of course, you can add more IPs. diff --git a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md index 1f7274d5c..23e4ae233 100644 --- a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md +++ b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md @@ -1,4 +1,4 @@ -# Netdata via Apache's mod_proxy +# Running Netdata behind Apache's mod_proxy Below you can find instructions for configuring an apache server to: @@ -29,6 +29,7 @@ Also, enable the rewrite module: ```sh sudo a2enmod rewrite ``` + ## Netdata on an existing virtual host On any **existing** and already **working** apache virtual host, you can redirect requests for URL `/netdata/` to one or more Netdata servers. @@ -37,29 +38,29 @@ On any **existing** and already **working** apache virtual host, you can redirec Add the following on top of any existing virtual host. It will allow you to access Netdata as `http://virtual.host/netdata/`. -```conf +```text - RewriteEngine On - ProxyRequests Off - ProxyPreserveHost On + RewriteEngine On + ProxyRequests Off + ProxyPreserveHost On + + + Require all granted + - - Require all granted - + # Local Netdata server accessed with '/netdata/', at localhost:19999 + ProxyPass "/netdata/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on + ProxyPassReverse "/netdata/" "http://localhost:19999/" - # Local Netdata server accessed with '/netdata/', at localhost:19999 - ProxyPass "/netdata/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on - ProxyPassReverse "/netdata/" "http://localhost:19999/" + # if the user did not give the trailing /, add it + # for HTTP (if the virtualhost is HTTP, use this) + RewriteRule ^/netdata$ http://%{HTTP_HOST}/netdata/ [L,R=301] + # for HTTPS (if the virtualhost is HTTPS, use this) + #RewriteRule ^/netdata$ https://%{HTTP_HOST}/netdata/ [L,R=301] - # if the user did not give the trailing /, add it - # for HTTP (if the virtualhost is HTTP, use this) - RewriteRule ^/netdata$ http://%{HTTP_HOST}/netdata/ [L,R=301] - # for HTTPS (if the virtualhost is HTTPS, use this) - #RewriteRule ^/netdata$ https://%{HTTP_HOST}/netdata/ [L,R=301] + # rest of virtual host config here - # rest of virtual host config here - ``` @@ -67,16 +68,16 @@ Add the following on top of any existing virtual host. It will allow you to acce Add the following on top of any existing virtual host. It will allow you to access multiple Netdata as `http://virtual.host/netdata/HOSTNAME/`, where `HOSTNAME` is the hostname of any other Netdata server you have (to access the `localhost` Netdata, use `http://virtual.host/netdata/localhost/`). -```conf +```text - RewriteEngine On - ProxyRequests Off - ProxyPreserveHost On + RewriteEngine On + ProxyRequests Off + ProxyPreserveHost On - - Require all granted - + + Require all granted + # proxy any host, on port 19999 ProxyPassMatch "^/netdata/([A-Za-z0-9\._-]+)/(.*)" "http://$1:19999/$2" connectiontimeout=5 timeout=30 keepalive=on @@ -87,8 +88,8 @@ Add the following on top of any existing virtual host. It will allow you to acce # for HTTPS (if the virtualhost is HTTPS, use this) RewriteRule "^/netdata/([A-Za-z0-9\._-]+)$" https://%{HTTP_HOST}/netdata/$1/ [L,R=301] - # rest of virtual host config here - + # rest of virtual host config here + ``` @@ -97,7 +98,7 @@ Add the following on top of any existing virtual host. It will allow you to acce If you want to control the servers your users can connect to, replace the `ProxyPassMatch` line with the following. This allows only `server1`, `server2`, `server3` and `server4`. -``` +```text ProxyPassMatch "^/netdata/(server1|server2|server3|server4)/(.*)" "http://$1:19999/$2" connectiontimeout=5 timeout=30 keepalive=on ``` @@ -113,26 +114,28 @@ nano /etc/apache2/sites-available/netdata.conf with this content: -```conf +```text - ProxyRequests Off - ProxyPreserveHost On - - ServerName netdata.domain.tld - - Require all granted - + ProxyRequests Off + ProxyPreserveHost On + + ServerName netdata.domain.tld + + + Require all granted + - ProxyPass "/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on - ProxyPassReverse "/" "http://localhost:19999/" + ProxyPass "/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on + ProxyPassReverse "/" "http://localhost:19999/" + + ErrorLog ${APACHE_LOG_DIR}/netdata-error.log + CustomLog ${APACHE_LOG_DIR}/netdata-access.log combined - ErrorLog ${APACHE_LOG_DIR}/netdata-error.log - CustomLog ${APACHE_LOG_DIR}/netdata-access.log combined ``` -Enable the VirtualHost: +Enable the VirtualHost: ```sh sudo a2ensite netdata.conf && service apache2 reload @@ -142,15 +145,15 @@ sudo a2ensite netdata.conf && service apache2 reload _Assuming the main goal is to make Netdata running in HTTPS._ -1. Make a subdomain for Netdata on which you enable and force HTTPS - You can use a free Let's Encrypt certificate -2. Go to "Apache & nginx Settings", and in the following section, add: - -```conf -RewriteEngine on -RewriteRule (.*) http://localhost:19999/$1 [P,L] -``` +1. Make a subdomain for Netdata on which you enable and force HTTPS - You can use a free Let's Encrypt certificate +2. Go to "Apache & nginx Settings", and in the following section, add: -3. Optional: If your server is remote, then just replace "localhost" with your actual hostname or IP, it just works. + ```text + RewriteEngine on + RewriteRule (.*) http://localhost:19999/$1 [P,L] + ``` + +3. Optional: If your server is remote, then just replace "localhost" with your actual hostname or IP, it just works. Repeat the operation for as many servers as you need. @@ -165,49 +168,49 @@ Then, generate password for user `netdata`, using `htpasswd -c /etc/apache2/.htp **Apache 2.2 Example:**\ Modify the virtual host with these: -```conf - # replace the section - - Order deny,allow - Allow from all - - - # add a section - - AuthType Basic - AuthName "Protected site" - AuthUserFile /etc/apache2/.htpasswd - Require valid-user - Order deny,allow - Allow from all - +```text + # replace the section + + Order deny,allow + Allow from all + + + # add a section + + AuthType Basic + AuthName "Protected site" + AuthUserFile /etc/apache2/.htpasswd + Require valid-user + Order deny,allow + Allow from all + ``` Specify `Location /` if Netdata is running on dedicated virtual host. **Apache 2.4 (dedicated virtual host) Example:** -```conf +```text - RewriteEngine On - ProxyRequests Off - ProxyPreserveHost On - - ServerName netdata.domain.tld - - - AllowOverride None - AuthType Basic - AuthName "Protected site" - AuthUserFile /etc/apache2/.htpasswd - Require valid-user - - - ProxyPass "/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on - ProxyPassReverse "/" "http://localhost:19999/" - - ErrorLog ${APACHE_LOG_DIR}/netdata-error.log - CustomLog ${APACHE_LOG_DIR}/netdata-access.log combined + RewriteEngine On + ProxyRequests Off + ProxyPreserveHost On + + ServerName netdata.domain.tld + + + AllowOverride None + AuthType Basic + AuthName "Protected site" + AuthUserFile /etc/apache2/.htpasswd + Require valid-user + + + ProxyPass "/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on + ProxyPassReverse "/" "http://localhost:19999/" + + ErrorLog ${APACHE_LOG_DIR}/netdata-error.log + CustomLog ${APACHE_LOG_DIR}/netdata-access.log combined ``` @@ -217,8 +220,8 @@ Note: Changes are applied by reloading or restarting Apache. If you want to enable CSP within your Apache, you should consider some special requirements of the headers. Modify your configuration like that: -``` - Header always set Content-Security-Policy "default-src http: 'unsafe-inline' 'self' 'unsafe-eval'; script-src http: 'unsafe-inline' 'self' 'unsafe-eval'; style-src http: 'self' 'unsafe-inline'" +```text + Header always set Content-Security-Policy "default-src http: 'unsafe-inline' 'self' 'unsafe-eval'; script-src http: 'unsafe-inline' 'self' 'unsafe-eval'; style-src http: 'self' 'unsafe-inline'" ``` Note: Changes are applied by reloading or restarting Apache. @@ -242,7 +245,7 @@ exceed that threshold, and `mod_evasive` will add your IP address to a blocklist Our users have found success by setting `DOSPageCount` to `30`. Try this, and raise the value if you continue to see 403 errors while accessing the dashboard. -```conf +```text DOSPageCount 30 ``` @@ -255,100 +258,92 @@ To adjust the `DOSPageCount` for a specific virtual host, open your virtual host `/etc/httpd/conf/sites-available/my-domain.conf` or `/etc/apache2/sites-available/my-domain.conf` and add the following: -```conf +```text - ... - # Increase the DOSPageCount to prevent 403 errors and IP addresses being blocked. - - DOSPageCount 30 - + ... + # Increase the DOSPageCount to prevent 403 errors and IP addresses being blocked. + + DOSPageCount 30 + ``` See issues [#2011](https://github.com/netdata/netdata/issues/2011) and [#7658](https://github.com/netdata/netdata/issues/7568) for more information. -# Netdata configuration +## Netdata configuration You might edit `/etc/netdata/netdata.conf` to optimize your setup a bit. For applying these changes you need to restart Netdata. -## Response compression +### Response compression If you plan to use Netdata exclusively via apache, you can gain some performance by preventing double compression of its output (Netdata compresses its response, apache re-compresses it) by editing `/etc/netdata/netdata.conf` and setting: -``` +```text [web] enable gzip compression = no ``` Once you disable compression at Netdata (and restart it), please verify you receive compressed responses from apache (it is important to receive compressed responses - the charts will be more snappy). -## Limit direct access to Netdata +### Limit direct access to Netdata You would also need to instruct Netdata to listen only on `localhost`, `127.0.0.1` or `::1`. -``` +```text [web] bind to = localhost ``` or -``` +```text [web] bind to = 127.0.0.1 ``` or -``` +```text [web] bind to = ::1 ``` - - You can also use a unix domain socket. This will also provide a faster route between apache and Netdata: -``` +```text [web] bind to = unix:/tmp/netdata.sock ``` Apache 2.4.24+ can not read from `/tmp` so create your socket in `/var/run/netdata` -``` +```text [web] bind to = unix:/var/run/netdata/netdata.sock ``` -_note: Netdata v1.8+ support unix domain sockets_ - At the apache side, prepend the 2nd argument to `ProxyPass` with `unix:/tmp/netdata.sock|`, like this: -``` +```text ProxyPass "/netdata/" "unix:/tmp/netdata.sock|http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on ``` - - If your apache server is not on localhost, you can set: -``` +```text [web] bind to = * allow connections from = IP_OF_APACHE_SERVER ``` -*note: Netdata v1.9+ support `allow connections from`* - `allow connections from` accepts [Netdata simple patterns](/src/libnetdata/simple_pattern/README.md) to match against the connection IP address. ## Prevent the double access.log apache logs accesses and Netdata logs them too. You can prevent Netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`: -``` +```text [logs] access = off ``` @@ -357,7 +352,5 @@ apache logs accesses and Netdata logs them too. You can prevent Netdata from gen Make sure the requests reach Netdata, by examining `/var/log/netdata/access.log`. -1. if the requests do not reach Netdata, your apache does not forward them. -2. if the requests reach Netdata but the URLs are wrong, you have not re-written them properly. - - +1. if the requests do not reach Netdata, your apache does not forward them. +2. if the requests reach Netdata but the URLs are wrong, you have not re-written them properly. diff --git a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md index b7608b309..f43a7a278 100644 --- a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md +++ b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md @@ -1,15 +1,6 @@ - +# Running Netdata behind Caddy -# Netdata via Caddy - -To run Netdata via [Caddy v2 proxying,](https://caddyserver.com/docs/caddyfile/directives/reverse_proxy) set your Caddyfile up like this: +To run Netdata via [Caddy v2 reverse proxy,](https://caddyserver.com/docs/caddyfile/directives/reverse_proxy) set your Caddyfile up like this: ```caddyfile netdata.domain.tld { @@ -34,5 +25,3 @@ netdata.domain.tld { You would also need to instruct Netdata to listen only to `127.0.0.1` or `::1`. To limit access to Netdata only from localhost, set `bind socket to IP = 127.0.0.1` or `bind socket to IP = ::1` in `/etc/netdata/netdata.conf`. - - diff --git a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-h2o.md b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-h2o.md index 276b72e8b..f2dc45b82 100644 --- a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-h2o.md +++ b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-h2o.md @@ -1,12 +1,3 @@ - - # Running Netdata behind H2O [H2O](https://h2o.examp1e.net/) is a new generation HTTP server that provides quicker response to users with less CPU utilization when compared to older generation of web servers. @@ -15,23 +6,23 @@ It is notable for having much simpler configuration than many popular HTTP serve ## Why H2O -- Sane configuration defaults mean that typical configurations are very minimalistic and easy to work with. +- Sane configuration defaults mean that typical configurations are very minimalistic and easy to work with. -- Native support for HTTP/2 provides improved performance when accessing the Netdata dashboard remotely. +- Native support for HTTP/2 provides improved performance when accessing the Netdata dashboard remotely. -- Password protect access to the Netdata dashboard without requiring Netdata Cloud. +- Password protect access to the Netdata dashboard without requiring Netdata Cloud. -## H2O configuration file. +## H2O configuration file -On most systems, the H2O configuration is found under `/etc/h2o`. H2O uses [YAML 1.1](https://yaml.org/spec/1.1/), with a few special extensions, for it’s configuration files, with the main configuration file being `/etc/h2o/h2o.conf`. +On most systems, the H2O configuration is found under `/etc/h2o`. H2O uses [YAML 1.1](https://yaml.org/spec/1.1/), with a few special extensions, for it’s configuration files, with the main configuration file being `/etc/h2o/h2o.conf`. You can edit the H2O configuration file with Nano, Vim or any other text editors with which you are comfortable. After making changes to the configuration files, perform the following: -- Test the configuration with `h2o -m test -c /etc/h2o/h2o.conf` +- Test the configuration with `h2o -m test -c /etc/h2o/h2o.conf` -- Restart H2O to apply tha changes with `/etc/init.d/h2o restart` or `service h2o restart` +- Restart H2O to apply tha changes with `/etc/init.d/h2o restart` or `service h2o restart` ## Ways to access Netdata via H2O @@ -52,7 +43,7 @@ hosts: ### As a subfolder of an existing virtual host -This method is recommended when Netdata is to be served from a subfolder (or directory). +This method is recommended when Netdata is to be served from a subfolder (or directory). In this case, the virtual host `netdata.example.com` already exists and Netdata has to be accessed via `netdata.example.com/netdata/`. ```yaml @@ -72,7 +63,7 @@ hosts: ### As a subfolder for multiple Netdata servers, via one H2O instance -This is the recommended configuration when one H2O instance will be used to manage multiple Netdata servers via subfolders. +This is the recommended configuration when one H2O instance will be used to manage multiple Netdata servers via sub-folders. ```yaml hosts: @@ -100,12 +91,12 @@ Of course you can add as many backend servers as you like. Using the above, you access Netdata on the backend servers, like this: -- `http://netdata.example.com/netdata/server1/` to reach Netdata on `198.51.100.1:19999` -- `http://netdata.example.com/netdata/server2/` to reach Netdata on `198.51.100.2:19999` +- `http://netdata.example.com/netdata/server1/` to reach Netdata on `198.51.100.1:19999` +- `http://netdata.example.com/netdata/server2/` to reach Netdata on `198.51.100.2:19999` ### Encrypt the communication between H2O and Netdata -In case Netdata's web server has been [configured to use TLS](/src/web/server/README.md#enabling-tls-support), it is +In case Netdata's web server has been [configured to use TLS](/src/web/server/README.md#enable-httpstls-support), it is necessary to specify inside the H2O configuration that the final destination is using TLS. To do this, change the `http://` on the `proxy.reverse.url` line in your H2O configuration with `https://` @@ -141,31 +132,27 @@ For more information on using basic authentication with H2O, see [their official If your H2O server is on `localhost`, you can use this to ensure external access is only possible through H2O: -``` +```text [web] bind to = 127.0.0.1 ::1 ``` - - You can also use a unix domain socket. This will provide faster communication between H2O and Netdata as well: -``` +```text [web] bind to = unix:/run/netdata/netdata.sock ``` In the H2O configuration, use a line like the following to connect to Netdata via the unix socket: -```yaml +```text proxy.reverse.url http://[unix:/run/netdata/netdata.sock] ``` - - If your H2O server is not on localhost, you can set: -``` +```text [web] bind to = * allow connections from = IP_OF_H2O_SERVER @@ -181,7 +168,7 @@ the connection IP address. H2O logs accesses and Netdata logs them too. You can prevent Netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`: -``` +```text [logs] access = off ``` diff --git a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md index 9d2aff670..04bd32838 100644 --- a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md +++ b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md @@ -1,16 +1,6 @@ - - -# Netdata via HAProxy - -> HAProxy is a free, very fast and reliable solution offering high availability, load balancing, -> and proxying for TCP and HTTP-based applications. It is particularly suited for very high traffic websites +# Running Netdata behind HAProxy + +> HAProxy is a free, very fast and reliable solution offering high availability, load balancing, and proxying for TCP and HTTP-based applications. It is particularly suited for very high traffic websites > and powers quite a number of the world's most visited ones. If Netdata is running on a host running HAProxy, rather than connecting to Netdata from a port number, a domain name can @@ -18,14 +8,14 @@ be pointed at HAProxy, and HAProxy can redirect connections to the Netdata port. Netdata at `https://example.com` or `https://example.com/netdata/`, which is a much nicer experience then `http://example.com:19999`. -To proxy requests from [HAProxy](https://github.com/haproxy/haproxy) to Netdata, +To proxy requests from [HAProxy](https://github.com/haproxy/haproxy) to Netdata, the following configuration can be used: ## Default Configuration For all examples, set the mode to `http` -```conf +```text defaults mode http ``` @@ -38,7 +28,7 @@ A simple example where the base URL, say `http://example.com`, is used with no s Create a frontend to receive the request. -```conf +```text frontend http_frontend ## HTTP ipv4 and ipv6 on all ips ## bind :::80 v4v6 @@ -50,7 +40,7 @@ frontend http_frontend Create the Netdata backend which will send requests to port `19999`. -```conf +```text backend netdata_backend option forwardfor server netdata_local 127.0.0.1:19999 @@ -69,7 +59,7 @@ An example where the base URL is used with a subpath `/netdata/`: To use a subpath, create an ACL, which will set a variable based on the subpath. -```conf +```text frontend http_frontend ## HTTP ipv4 and ipv6 on all ips ## bind :::80 v4v6 @@ -92,7 +82,7 @@ frontend http_frontend Same as simple example, except remove `/netdata/` with regex. -```conf +```text backend netdata_backend option forwardfor server netdata_local 127.0.0.1:19999 @@ -107,14 +97,14 @@ backend netdata_backend ## Using TLS communication -TLS can be used by adding port `443` and a cert to the frontend. +TLS can be used by adding port `443` and a cert to the frontend. This example will only use Netdata if host matches example.com (replace with your domain). ### Frontend This frontend uses a certificate list. -```conf +```text frontend https_frontend ## HTTP ## bind :::80 v4v6 @@ -139,11 +129,11 @@ In the cert list file place a mapping from a certificate file to the domain used `/etc/letsencrypt/certslist.txt`: -```txt +```text example.com /etc/letsencrypt/live/example.com/example.com.pem ``` -The file `/etc/letsencrypt/live/example.com/example.com.pem` should contain the key and +The file `/etc/letsencrypt/live/example.com/example.com.pem` should contain the key and certificate (in that order) concatenated into a `.pem` file.: ```sh @@ -156,7 +146,7 @@ cat /etc/letsencrypt/live/example.com/fullchain.pem \ Same as simple, except set protocol `https`. -```conf +```text backend netdata_backend option forwardfor server netdata_local 127.0.0.1:19999 @@ -172,30 +162,30 @@ backend netdata_backend To use basic HTTP Authentication, create an authentication list: -```conf +```text # HTTP Auth userlist basic-auth-list group is-admin # Plaintext password - user admin password passwordhere groups is-admin + user admin password YOUR_PASSWORD groups is-admin ``` You can create a hashed password using the `mkpassword` utility. ```sh - printf "passwordhere" | mkpasswd --stdin --method=sha-256 + printf "YOUR_PASSWORD" | mkpasswd --stdin --method=sha-256 $5$l7Gk0VPIpKO$f5iEcxvjfdF11khw.utzSKqP7W.0oq8wX9nJwPLwzy1 ``` -Replace `passwordhere` with hash: +Replace `YOUR_PASSWORD` with hash: -```conf +```text user admin password $5$l7Gk0VPIpKO$f5iEcxvjfdF11khw.utzSKqP7W.0oq8wX9nJwPLwzy1 groups is-admin ``` Now add at the top of the backend: -```conf +```text acl devops-auth http_auth_group(basic-auth-list) is-admin http-request auth realm netdata_local unless devops-auth ``` @@ -204,7 +194,7 @@ http-request auth realm netdata_local unless devops-auth Full example configuration with HTTP auth over TLS with subpath: -```conf +```text global maxconn 20000 @@ -293,5 +283,3 @@ backend netdata_backend http-request set-header X-Forwarded-Port %[dst_port] http-request set-header Connection "keep-alive" ``` - - diff --git a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md index 637bc0642..48b9b2c93 100644 --- a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md +++ b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md @@ -1,26 +1,17 @@ - - -# Netdata via lighttpd v1.4.x +# Running Netdata behind lighttpd v1.4.x Here is a config for accessing Netdata in a suburl via lighttpd 1.4.46 and newer: -```txt +```text $HTTP["url"] =~ "^/netdata/" { proxy.server = ( "" => ("netdata" => ( "host" => "127.0.0.1", "port" => 19999 ))) proxy.header = ( "map-urlpath" => ( "/netdata/" => "/") ) } ``` -If you have older lighttpd you have to use a chain (such as below), as explained [at this stackoverflow answer](http://stackoverflow.com/questions/14536554/lighttpd-configuration-to-proxy-rewrite-from-one-domain-to-another). +If you have older lighttpd you have to use a chain (such as below), as explained [at this Stack Overflow answer](http://stackoverflow.com/questions/14536554/lighttpd-configuration-to-proxy-rewrite-from-one-domain-to-another). -```txt +```text $HTTP["url"] =~ "^/netdata/" { proxy.server = ( "" => ("" => ( "host" => "127.0.0.1", "port" => 19998 ))) } @@ -31,19 +22,16 @@ $SERVER["socket"] == ":19998" { } ``` - - If the only thing the server is exposing via the web is Netdata (and thus no suburl rewriting required), then you can get away with just -``` +```text proxy.server = ( "" => ( ( "host" => "127.0.0.1", "port" => 19999 ))) ``` -Though if it's public facing you might then want to put some authentication on it. htdigest support -looks like: +Though if it's public facing you might then want to put some authentication on it. `htdigest` support looks like: -``` +```text auth.backend = "htdigest" auth.backend.htdigest.userfile = "/etc/lighttpd/lighttpd.htdigest" auth.require = ( "" => ( "method" => "digest", @@ -55,14 +43,12 @@ auth.require = ( "" => ( "method" => "digest", other auth methods, and more info on htdigest, can be found in lighttpd's [mod_auth docs](http://redmine.lighttpd.net/projects/lighttpd/wiki/Docs_ModAuth). - - It seems that lighttpd (or some versions of it), fail to proxy compressed web responses. To solve this issue, disable web response compression in Netdata. -Open `/etc/netdata/netdata.conf` and set in [global]\: +Open `/etc/netdata/netdata.conf` and set in `[global]`: -``` +```text enable web responses gzip compression = no ``` @@ -71,5 +57,3 @@ enable web responses gzip compression = no You would also need to instruct Netdata to listen only to `127.0.0.1` or `::1`. To limit access to Netdata only from localhost, set `bind socket to IP = 127.0.0.1` or `bind socket to IP = ::1` in `/etc/netdata/netdata.conf`. - - diff --git a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md index f2dd137dd..c0364633a 100644 --- a/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md +++ b/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md @@ -2,19 +2,19 @@ ## Intro -[Nginx](https://nginx.org/en/) is an HTTP and reverse proxy server, a mail proxy server, and a generic TCP/UDP proxy server used to host websites and applications of all sizes. +[Nginx](https://nginx.org/en/) is an HTTP and reverse proxy server, a mail proxy server, and a generic TCP/UDP proxy server used to host websites and applications of all sizes. The software is known for its low impact on memory resources, high scalability, and its modular, event-driven architecture which can offer secure, predictable performance. ## Why Nginx -- By default, Nginx is fast and lightweight out of the box. +- By default, Nginx is fast and lightweight out of the box. -- Nginx is used and useful in cases when you want to access different instances of Netdata from a single server. +- Nginx is used and useful in cases when you want to access different instances of Netdata from a single server. -- Password-protect access to Netdata, until distributed authentication is implemented via the Netdata cloud Sign In mechanism. +- Password-protect access to Netdata, until distributed authentication is implemented via the Netdata cloud Sign In mechanism. -- A proxy was necessary to encrypt the communication to Netdata, until v1.16.0, which provided TLS (HTTPS) support. +- A proxy was necessary to encrypt the communication to Netdata, until v1.16.0, which provided TLS (HTTPS) support. ## Nginx configuration file @@ -22,23 +22,23 @@ All Nginx configurations can be found in the `/etc/nginx/` directory. The main c Configuration options in Nginx are known as directives. Directives are organized into groups known as blocks or contexts. The two terms can be used interchangeably. -Depending on your installation source, you’ll find an example configuration file at `/etc/nginx/conf.d/default.conf` or `etc/nginx/sites-enabled/default`, in some cases you may have to manually create the `sites-available` and `sites-enabled` directories. +Depending on your installation source, you’ll find an example configuration file at `/etc/nginx/conf.d/default.conf` or `etc/nginx/sites-enabled/default`, in some cases you may have to manually create the `sites-available` and `sites-enabled` directories. You can edit the Nginx configuration file with Nano, Vim or any other text editors you are comfortable with. After making changes to the configuration files: -- Test Nginx configuration with `nginx -t`. +- Test Nginx configuration with `nginx -t`. -- Restart Nginx to effect the change with `/etc/init.d/nginx restart` or `service nginx restart`. +- Restart Nginx to effect the change with `/etc/init.d/nginx restart` or `service nginx restart`. ## Ways to access Netdata via Nginx ### As a virtual host -With this method instead of `SERVER_IP_ADDRESS:19999`, the Netdata dashboard can be accessed via a human-readable URL such as `netdata.example.com` used in the configuration below. +With this method instead of `SERVER_IP_ADDRESS:19999`, the Netdata dashboard can be accessed via a human-readable URL such as `netdata.example.com` used in the configuration below. -```conf +```text upstream backend { # the Netdata server server 127.0.0.1:19999; @@ -69,10 +69,10 @@ server { ### As a subfolder to an existing virtual host -This method is recommended when Netdata is to be served from a subfolder (or directory). +This method is recommended when Netdata is to be served from a subfolder (or directory). In this case, the virtual host `netdata.example.com` already exists and Netdata has to be accessed via `netdata.example.com/netdata/`. -```conf +```text upstream netdata { server 127.0.0.1:19999; keepalive 64; @@ -112,9 +112,9 @@ server { ### As a subfolder for multiple Netdata servers, via one Nginx -This is the recommended configuration when one Nginx will be used to manage multiple Netdata servers via subfolders. +This is the recommended configuration when one Nginx will be used to manage multiple Netdata servers via sub-folders. -```conf +```text upstream backend-server1 { server 10.1.1.103:19999; keepalive 64; @@ -159,16 +159,16 @@ Of course you can add as many backend servers as you like. Using the above, you access Netdata on the backend servers, like this: -- `http://netdata.example.com/netdata/server1/` to reach `backend-server1` -- `http://netdata.example.com/netdata/server2/` to reach `backend-server2` +- `http://netdata.example.com/netdata/server1/` to reach `backend-server1` +- `http://netdata.example.com/netdata/server2/` to reach `backend-server2` ### Encrypt the communication between Nginx and Netdata -In case Netdata's web server has been [configured to use TLS](/src/web/server/README.md#enabling-tls-support), it is +In case Netdata's web server has been [configured to use TLS](/src/web/server/README.md#enable-httpstls-support), it is necessary to specify inside the Nginx configuration that the final destination is using TLS. To do this, please, append the following parameters in your `nginx.conf` -```conf +```text proxy_set_header X-Forwarded-Proto https; proxy_pass https://localhost:19999; ``` @@ -189,7 +189,7 @@ printf "yourusername:$(openssl passwd -apr1)" > /etc/nginx/passwords And then enable the authentication inside your server directive: -```conf +```text server { # ... auth_basic "Protected"; @@ -202,40 +202,35 @@ server { If your Nginx is on `localhost`, you can use this to protect your Netdata: -``` +```text [web] bind to = 127.0.0.1 ::1 ``` You can also use a unix domain socket. This will also provide a faster route between Nginx and Netdata: -``` +```text [web] bind to = unix:/var/run/netdata/netdata.sock ``` -*note: Netdata v1.8+ support unix domain sockets* - At the Nginx side, use something like this to use the same unix domain socket: -```conf +```text upstream backend { server unix:/var/run/netdata/netdata.sock; keepalive 64; } ``` - If your Nginx server is not on localhost, you can set: -``` +```text [web] bind to = * allow connections from = IP_OF_NGINX_SERVER ``` -*note: Netdata v1.9+ support `allow connections from`* - `allow connections from` accepts [Netdata simple patterns](/src/libnetdata/simple_pattern/README.md) to match against the connection IP address. @@ -243,7 +238,7 @@ connection IP address. Nginx logs accesses and Netdata logs them too. You can prevent Netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`: -``` +```text [logs] access = off ``` @@ -252,18 +247,18 @@ Nginx logs accesses and Netdata logs them too. You can prevent Netdata from gene By default, netdata compresses its responses. You can have nginx do that instead, with the following options in the `location /` block: -```conf - location / { - ... - gzip on; - gzip_proxied any; - gzip_types *; - } +```text +location / { + ... + gzip on; + gzip_proxied any; + gzip_types *; +} ``` To disable Netdata's gzip compression, open `netdata.conf` and in the `[web]` section put: -```conf +```text [web] enable gzip compression = no ``` @@ -278,5 +273,3 @@ If you get an 502 Bad Gateway error you might check your Nginx error log: ``` If you see something like the above, chances are high that SELinux prevents nginx from connecting to the backend server. To fix that, just use this policy: `setsebool -P httpd_can_network_connect true`. - - diff --git a/docs/netdata-agent/securing-netdata-agents.md b/docs/netdata-agent/securing-netdata-agents.md index 5232173fb..91a82c1ae 100644 --- a/docs/netdata-agent/securing-netdata-agents.md +++ b/docs/netdata-agent/securing-netdata-agents.md @@ -1,26 +1,25 @@ # Securing Netdata Agents -Netdata is a monitoring system. It should be protected, the same way you protect all your admin apps. We assume Netdata +Netdata is a monitoring system. It should be protected, the same way you protect all your admin apps. We assume Netdata will be installed privately, for your eyes only. Upon installation, the Netdata Agent serves the **local dashboard** at port `19999`. If the node is accessible to the internet at large, anyone can access the dashboard and your node's metrics at `http://NODE:19999`. We made this decision so that the local dashboard was immediately accessible to users, and so that we don't dictate how professionals set up -and secure their infrastructures. +and secure their infrastructures. -Viewers will be able to get some information about the system Netdata is running. This information is everything the dashboard -provides. The dashboard includes a list of the services each system runs (the legends of the charts under the `Systemd Services` -section), the applications running (the legends of the charts under the `Applications` section), the disks of the system and -their names, the user accounts of the system that are running processes (the `Users` and `User Groups` section of the dashboard), +Viewers will be able to get some information about the system Netdata is running. This information is everything the dashboard +provides. The dashboard includes a list of the services each system runs (the legends of the charts under the `Systemd Services` +section), the applications running (the legends of the charts under the `Applications` section), the disks of the system and +their names, the user accounts of the system that are running processes (the `Users` and `User Groups` section of the dashboard), the network interfaces and their names (not the IPs) and detailed information about the performance of the system and its applications. -This information is not sensitive (meaning that it is not your business data), but **it is important for possible attackers**. -It will give them clues on what to check, what to try and in the case of DDoS against your applications, they will know if they -are doing it right or not. +This information is not sensitive (meaning that it is not your business data), but **it is important for possible attackers**. +It will give them clues on what to check, what to try and in the case of DDoS against your applications, they will know if they’re doing it right or not. -Also, viewers could use Netdata itself to stress your servers. Although the Netdata daemon runs unprivileged, with the minimum -process priority (scheduling priority `idle` - lower than nice 19) and adjusts its OutOfMemory (OOM) score to 1000 (so that it -will be first to be killed by the kernel if the system starves for memory), some pressure can be applied on your systems if +Also, viewers could use Netdata itself to stress your servers. Although the Netdata daemon runs unprivileged, with the minimum +process priority (scheduling priority `idle` - lower than nice 19) and adjusts its OutOfMemory (OOM) score to 1000 (so that it +will be first to be killed by the kernel if the system starves for memory), some pressure can be applied on your systems if someone attempts a DDoS against Netdata. Instead of dictating how to secure your infrastructure, we give you many options to establish security best practices @@ -29,12 +28,12 @@ that align with your goals and your organization's standards. - [Disable the local dashboard](#disable-the-local-dashboard): **Simplest and recommended method** for those who have added nodes to Netdata Cloud and view dashboards and metrics there. -- [Expose Netdata only in a private LAN](#expose-netdata-only-in-a-private-lan). Simplest and recommended method for those who do not use Netdata Cloud. +- [Expose Netdata only in a private LAN](#expose-netdata-only-in-a-private-lan). Simplest and recommended method for those who don’t use Netdata Cloud. - [Fine-grained access control](#fine-grained-access-control): Allow local dashboard access from only certain IP addresses, such as a trusted static IP or connections from behind a management LAN. Full support for Netdata Cloud. -- [Use a reverse proxy (authenticating web server in proxy mode)](#use-an-authenticating-web-server-in-proxy-mode): Password-protect +- [Use a reverse proxy (authenticating web server in proxy mode)](#use-an-authenticating-web-server-in-proxy-mode): Password-protect a local dashboard and enable TLS to secure it. Full support for Netdata Cloud. - [Use Netdata parents as Web Application Firewalls](#use-netdata-parents-as-web-application-firewalls) @@ -46,7 +45,7 @@ that align with your goals and your organization's standards. This is the _recommended method for those who have connected their nodes to Netdata Cloud_ and prefer viewing real-time metrics using the Room Overview, Nodes tab, and Cloud dashboards. -You can disable the local dashboard (and API) but retain the encrypted Agent-Cloud link +You can disable the local dashboard (and API) but retain the encrypted Agent-Cloud link ([ACLK](/src/aclk/README.md)) that allows you to stream metrics on demand from your nodes via the Netdata Cloud interface. This change mitigates all concerns about revealing metrics and system design to the internet at large, while keeping all the functionality you @@ -55,64 +54,61 @@ need to view metrics and troubleshoot issues with Netdata Cloud. Open `netdata.conf` with `./edit-config netdata.conf`. Scroll down to the `[web]` section, and find the `mode = static-threaded` setting, and change it to `none`. -```conf +```text [web] mode = none ``` -Save and close the editor, then [restart your Agent](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) -using `sudo systemctl -restart netdata`. If you try to visit the local dashboard to `http://NODE:19999` again, the connection will fail because +Save and close the editor, then [restart your Agent](/docs/netdata-agent/start-stop-restart.md). If you try to visit the local dashboard to `http://NODE:19999` again, the connection will fail because that node no longer serves its local dashboard. -> See the [configuration basics doc](/docs/netdata-agent/configuration/README.md) for details on how to find +> See the [configuration basics doc](/docs/netdata-agent/configuration/README.md) for details on how to find `netdata.conf` and use > `edit-config`. -If you are using Netdata with Docker, make sure to set the `NETDATA_HEALTHCHECK_TARGET` environment variable to `cli`. - +If you’re using Netdata with Docker, make sure to set the `NETDATA_HEALTHCHECK_TARGET` environment variable to `cli`. ## Expose Netdata only in a private LAN -If your organisation has a private administration and management LAN, you can bind Netdata on this network interface on all your servers. +If your organization has a private administration and management LAN, you can bind Netdata on this network interface on all your servers. This is done in `Netdata.conf` with these settings: -``` +```text [web] - bind to = 10.1.1.1:19999 localhost:19999 + bind to = 10.1.1.1:19999 localhost:19999 ``` -You can bind Netdata to multiple IPs and ports. If you use hostnames, Netdata will resolve them and use all the IPs +You can bind Netdata to multiple IPs and ports. If you use hostnames, Netdata will resolve them and use all the IPs (in the above example `localhost` usually resolves to both `127.0.0.1` and `::1`). -**This is the best and the suggested way to protect Netdata**. Your systems **should** have a private administration and management +**This is the best and the suggested way to protect Netdata**. Your systems **should** have a private administration and management LAN, so that all management tasks are performed without any possibility of them being exposed on the internet. -For cloud based installations, if your cloud provider does not provide such a private LAN (or if you use multiple providers), -you can create a virtual management and administration LAN with tools like `tincd` or `gvpe`. These tools create a mesh VPN -allowing all servers to communicate securely and privately. Your administration stations join this mesh VPN to get access to +For Cloud-based installations, if your cloud provider doesn’t provide such a private LAN (or if you use multiple providers), +you can create a virtual management and administration LAN with tools like `tincd` or `gvpe`. These tools create a mesh VPN +allowing all servers to communicate securely and privately. Your administration stations join this mesh VPN to get access to management and administration tasks on all your cloud servers. -For `gvpe` we have developed a [simple provisioning tool](https://github.com/netdata/netdata-demo-site/tree/master/gvpe) you -may find handy (it includes statically compiled `gvpe` binaries for Linux and FreeBSD, and also a script to compile `gvpe` -on your macOS system). We use this to create a management and administration LAN for all Netdata demo sites (spread all over +For `gvpe` we have developed a [simple provisioning tool](https://github.com/netdata/netdata-demo-site/tree/master/gvpe) you +may find handy (it includes statically compiled `gvpe` binaries for Linux and FreeBSD, and also a script to compile `gvpe` +on your macOS system). We use this to create a management and administration LAN for all Netdata demo sites (spread all over the internet using multiple hosting providers). ## Fine-grained access control If you want to keep using the local dashboard, but don't want it exposed to the internet, you can restrict access with -[access lists](/src/web/server/README.md#access-lists). This method also fully +[access lists](/src/web/server/README.md#access-lists). This method also fully retains the ability to stream metrics on-demand through Netdata Cloud. The `allow connections from` setting helps you allow only certain IP addresses or FQDN/hostnames, such as a trusted -static IP, only `localhost`, or connections from behind a management LAN. +static IP, only `localhost`, or connections from behind a management LAN. By default, this setting is `localhost *`. This setting allows connections from `localhost` in addition to _all_ connections, using the `*` wildcard. You can change this setting using Netdata's [simple patterns](/src/libnetdata/simple_pattern/README.md). -```conf +```text [web] # Allow only localhost connections allow connections from = localhost @@ -125,9 +121,9 @@ patterns](/src/libnetdata/simple_pattern/README.md). ``` The `allow connections from` setting is global and restricts access to the dashboard, badges, streaming, API, and -`netdata.conf`, but you can also set each of those access lists more granularly if you choose: +`netdata.conf`, but you can also set each of those access lists in more detail if you want: -```conf +```text [web] allow connections from = localhost * allow dashboard from = localhost * @@ -137,44 +133,42 @@ The `allow connections from` setting is global and restricts access to the dashb allow management from = localhost ``` -See the [web server](/src/web/server/README.md#access-lists) docs for additional details -about access lists. You can take -access lists one step further by [enabling SSL](/src/web/server/README.md#enabling-tls-support) to encrypt data from local +See the [web server](/src/web/server/README.md#access-lists) docs for additional details about access lists. You can take access lists one step further by [enabling SSL](/src/web/server/README.md#enable-httpstls-support) to encrypt data from local dashboard in transit. The connection to Netdata Cloud is always secured with TLS. ## Use an authenticating web server in proxy mode -Use one web server to provide authentication in front of **all your Netdata servers**. So, you will be accessing all your Netdata with -URLs like `http://{HOST}/netdata/{NETDATA_HOSTNAME}/` and authentication will be shared among all of them (you will sign-in once for all your servers). -Instructions are provided on how to set the proxy configuration to have Netdata run behind -[nginx](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md), -[HAproxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md), -[Apache](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md), -[lighthttpd](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md), +Use one web server to provide authentication in front of **all your Netdata servers**. So, you will be accessing all your Netdata with +URLs like `http://{HOST}/netdata/{NETDATA_HOSTNAME}/` and authentication will be shared among all of them (you will sign in once for all your servers). +Instructions are provided on how to set the proxy configuration to have Netdata run behind +[nginx](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md), +[HAproxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md), +[Apache](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md), +[lighthttpd](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md), [caddy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md), and [H2O](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-h2o.md). ## Use Netdata parents as Web Application Firewalls -The Netdata Agents you install on your production systems do not need direct access to the Internet. Even when you use -Netdata Cloud, you can appoint one or more Netdata Parents to act as border gateways or application firewalls, isolating -your production systems from the rest of the world. Netdata -Parents receive metric data from Netdata Agents or other Netdata Parents on one side, and serve most queries using their own +The Netdata Agents you install on your production systems don’t need direct access to the Internet. Even when you use +Netdata Cloud, you can appoint one or more Netdata Parents to act as border gateways or application firewalls, isolating +your production systems from the rest of the world. Netdata +Parents receive metric data from Netdata Agents or other Netdata Parents on one side, and serve most queries using their own copy of the data to satisfy dashboard requests on the other side. -For more information see [Streaming and replication](/docs/observability-centralization-points/README.md). +For more information, see [Streaming and replication](/docs/observability-centralization-points/README.md). ## Other methods Of course, there are many more methods you could use to protect Netdata: -- Bind Netdata to localhost and use `ssh -L 19998:127.0.0.1:19999 remote.netdata.ip` to forward connections of local port 19998 to remote port 19999. -This way you can ssh to a Netdata server and then use `http://127.0.0.1:19998/` on your computer to access the remote Netdata dashboard. +- Bind Netdata to localhost and use `ssh -L 19998:127.0.0.1:19999 remote.netdata.ip` to forward connections of local port 19998 to remote port 19999. + This way you can ssh to a Netdata server and then use `http://127.0.0.1:19998/` on your computer to access the remote Netdata dashboard. -- If you are always under a static IP, you can use the script given above to allow direct access to your Netdata servers without authentication, -from all your static IPs. +- If you’re always under a static IP, you can use the script given above to allow direct access to your Netdata servers without authentication, + from all your static IPs. -- Install all your Netdata in **headless data collector** mode, forwarding all metrics in real-time to a parent - Netdata server, which will be protected with authentication using an nginx server running locally at the parent - Netdata server. This requires more resources (you will need a bigger parent Netdata server), but does not require - any firewall changes, since all the child Netdata servers will not be listening for incoming connections. +- Install all your Netdata in **headless data collector** mode, forwarding all metrics in real-time to a parent + Netdata server, which will be protected with authentication using a nginx server running locally at the parent + Netdata server. This requires more resources (you will need a bigger parent Netdata server), but doesn’t require + any firewall changes, since all the child Netdata servers will not be listening for incoming connections. diff --git a/docs/netdata-agent/sizing-netdata-agents/README.md b/docs/netdata-agent/sizing-netdata-agents/README.md index 3ba346f7a..3880e214c 100644 --- a/docs/netdata-agent/sizing-netdata-agents/README.md +++ b/docs/netdata-agent/sizing-netdata-agents/README.md @@ -1,89 +1,85 @@ -# Sizing Netdata Agents +# Resource utilization -Netdata automatically adjusts its resources utilization based on the workload offered to it. +Netdata is designed to automatically adjust its resource consumption based on the specific workload. -This is a map of how Netdata **features impact resources utilization**: +This table shows the specific system resources affected by different Netdata features: -| Feature | CPU | RAM | Disk I/O | Disk Space | Retention | Bandwidth | -|-----------------------------:|:---:|:---:|:--------:|:----------:|:---------:|:---------:| -| Metrics collected | X | X | X | X | X | - | -| Samples collection frequency | X | - | X | X | X | - | -| Database mode and tiers | - | X | X | X | X | - | -| Machine learning | X | X | - | - | - | - | -| Streaming | X | X | - | - | - | X | +| Feature | CPU | RAM | Disk I/O | Disk Space | Network Traffic | +|------------------------:|:---:|:---:|:--------:|:----------:|:---------------:| +| Collected metrics | ✓ | ✓ | ✓ | ✓ | - | +| Sample frequency | ✓ | - | ✓ | ✓ | - | +| Database mode and tiers | - | ✓ | ✓ | ✓ | - | +| Machine learning | ✓ | ✓ | - | - | - | +| Streaming | ✓ | ✓ | - | - | ✓ | -1. **Metrics collected**: The number of metrics collected affects almost every aspect of resources utilization. +1. **Collected metrics** - When you need to lower the resources used by Netdata, this is an obvious first step. + - **Impact**: More metrics mean higher CPU, RAM, disk I/O, and disk space usage. + - **Optimization**: To reduce resource consumption, consider lowering the number of collected metrics by disabling unnecessary data collectors. -2. **Samples collection frequency**: By default Netdata collects metrics with 1-second granularity, unless the metrics collected are not updated that frequently, in which case Netdata collects them at the frequency they are updated. This is controlled per data collection job. +2. **Sample frequency** - Lowering the data collection frequency from every-second to every-2-seconds, will make Netdata use half the CPU utilization. So, CPU utilization is proportional to the data collection frequency. + - **Impact**: Netdata collects most metrics with 1-second granularity. This high frequency impacts CPU usage. + - **Optimization**: Lowering the sampling frequency (e.g., 1-second to 2-second intervals) can halve CPU usage. Balance the need for detailed data with resource efficiency. -3. **Database Mode and Tiers**: By default Netdata stores metrics in 3 database tiers: high-resolution, mid-resolution, low-resolution. All database tiers are updated in parallel during data collection, and depending on the query duration Netdata may consult one or more tiers to optimize the resources required to satisfy it. +3. **Database Mode** - The number of database tiers affects the memory requirements of Netdata. Going from 3-tiers to 1-tier, will make Netdata use half the memory. Of course metrics retention will also be limited to 1 tier. + - **Impact**: The default database mode, `dbengine`, compresses data and writes it to disk. + - **Optimization**: In a Parent-Child setup, switch the Child's database mode to `ram`. This eliminates disk I/O for the Child. -4. **Machine Learning**: Byt default Netdata trains multiple machine learning models for every metric collected, to learn its behavior and detect anomalies. Machine Learning is a CPU intensive process and affects the overall CPU utilization of Netdata. +4. **Database Tiers** -5. **Streaming Compression**: When using Netdata in Parent-Child configurations to create Metrics Centralization Points, the compression algorithm used greatly affects CPU utilization and bandwidth consumption. + - **Impact**: The number of database tiers directly affects memory consumption. More tiers mean higher memory usage. + - **Optimization**: The default number of tiers is 3. Choose the appropriate number of tiers based on data retention requirements. - Netdata supports multiple streaming compressions algorithms, allowing the optimization of either CPU utilization or Network Bandwidth. The default algorithm `zstd` provides the best balance among them. +5. **Machine Learning** -## Minimizing the resources used by Netdata Agents - -To minimize the resources used by Netdata Agents, we suggest to configure Netdata Parents for centralizing metric samples, and disabling most of the features on Netdata Children. This will provide minimal resources utilization at the edge, while all the features of Netdata are available at the Netdata Parents. - -The following guides provide instructions on how to do this. + - **Impact**: Machine learning model training is CPU-intensive, affecting overall CPU usage. + - **Optimization**: Consider disabling machine learning for less critical metrics or adjusting model training frequency. -## Maximizing the scale of Netdata Parents - -Netdata Parents automatically size resource utilization based on the workload they receive. The only possible option for improving query performance is to dedicate more RAM to them, by increasing their caches efficiency. - -Check [RAM Requirements](/docs/netdata-agent/sizing-netdata-agents/ram-requirements.md) for more information. +6. **Streaming Compression** -## Innovations Netdata has for optimal performance and scalability + - **Impact**: Compression algorithm choice affects CPU usage and network traffic. + - **Optimization**: Select an algorithm that balances CPU efficiency with network bandwidth requirements (e.g., zstd for a good balance). -The following are some of the innovations the open-source Netdata agent has, that contribute to its excellent performance, and scalability. - -1. **Minimal disk I/O** - - When Netdata saves data on-disk, it stores them at their final place, eliminating the need to reorganize this data. - - Netdata is organizing its data structures in such a way that samples are committed to disk as evenly as possible across time, without affecting its memory requirements. +## Minimizing the resources used by Netdata Agents - Furthermore, Netdata Agents use direct-I/O for saving and loading metric samples. This prevents Netdata from polluting system caches with metric data. Netdata maintains its own caches for this data. +To optimize resource utilization, consider using a **Parent-Child** setup. - All these features make Netdata an nice partner and a polite citizen for production applications running on the same systems Netdata runs. +This approach involves centralizing the collection and processing of metrics on Parent nodes while running lightweight Children Agents on edge devices. -2. **4 bytes per sample uncompressed** +## Maximizing the scale of Parent Agents - To achieve optimal memory and disk footprint, Netdata uses a custom 32-bit floating point number. This floating point number is used to store the samples collected, together with their anomaly bit. The database of Netdata is fixed-step, so it has predefined slots for every sample, allowing Netdata to store timestamps once every several hundreds samples, minimizing both its memory requirements and the disk footprint. +Parents dynamically adjust their resource usage based on the volume of metrics received. However, for optimal query performance, you may need to dedicate more RAM. - The final disk footprint of Netdata varies due to compression efficiency. It is usually about 0.6 bytes per sample for the high-resolution tier (per-second), 6 bytes per sample for the mid-resolution tier (per-minute) and 18 bytes per sample for the low-resolution tier (per-hour). +Check [RAM Requirements](/docs/netdata-agent/sizing-netdata-agents/ram-requirements.md) for more information. -3. **Query priorities** +## Netdata's performance and scalability optimization techniques - Alerting, Machine Learning, Streaming and Replication, rely on metric queries. When multiple queries are running in parallel, Netdata assigns priorities to all of them, favoring interactive queries over background tasks. This means that queries do not compete equally for resources. Machine learning or replication may slow down when interactive queries are running and the system starves for resources. +1. **Minimal Disk I/O** -4. **A pointer per label** + Netdata directly writes metric data to disk, bypassing system caches and reducing I/O overhead. Additionally, its optimized data structures minimize disk space and memory usage through efficient compression and timestamping. - Apart from metric samples, metric labels and their cardinality is the biggest memory consumer, especially in highly ephemeral environments, like kubernetes. Netdata uses a single pointer for any label key-value pair that is reused. Keys and values are also deduplicated, providing the best possible memory footprint for metric labels. +2. **Compact Storage Engine** -5. **Streaming Protocol** + Netdata uses a custom 32-bit floating-point format tailored for efficient storage of time-series data, along with an anomaly bit. This, combined with a fixed-step database design, enables efficient storage and retrieval of data. - The streaming protocol of Netdata allows minimizing the resources consumed on production systems by delegating features of to other Netdata agents (Parents), without compromising monitoring fidelity or responsiveness, enabling the creation of a highly distributed observability platform. + | Tier | Approximate Sample Size (bytes) | + |-----------------------------------|---------------------------------| + | High-resolution tier (per-second) | 0.6 | + | Mid-resolution tier (per-minute) | 6 | + | Low-resolution tier (per-hour) | 18 | -## Netdata vs Prometheus + Timestamp optimization further reduces storage overhead by storing timestamps at regular intervals. -Netdata outperforms Prometheus in every aspect. -35% CPU Utilization, -49% RAM usage, -12% network bandwidth, -98% disk I/O, -75% in disk footprint for high resolution data, while providing more than a year of retention. +3. **Intelligent Query Engine** -Read the [full comparison here](https://blog.netdata.cloud/netdata-vs-prometheus-performance-analysis/). + Netdata prioritizes interactive queries over background tasks like machine learning and replication, ensuring optimal user experience, especially under heavy load. -## Energy Efficiency +4. **Efficient Label Storage** -University of Amsterdam contacted a research on the impact monitoring systems have on docker based systems. + Netdata uses pointers to reference shared label key-value pairs, minimizing memory usage, especially in highly dynamic environments. -The study found that Netdata excels in CPU utilization, RAM usage, Execution Time and concluded that **Netdata is the most energy efficient tool**. +5. **Scalable Streaming Protocol** -Read the [full study here](https://www.ivanomalavolta.com/files/papers/ICSOC_2023.pdf). + Netdata's streaming protocol enables the creation of distributed monitoring setups, where Children offload data processing to Parents, optimizing resource utilization. diff --git a/docs/netdata-agent/sizing-netdata-agents/bandwidth-requirements.md b/docs/netdata-agent/sizing-netdata-agents/bandwidth-requirements.md index 092c8da16..fbbc279d5 100644 --- a/docs/netdata-agent/sizing-netdata-agents/bandwidth-requirements.md +++ b/docs/netdata-agent/sizing-netdata-agents/bandwidth-requirements.md @@ -1,16 +1,16 @@ # Bandwidth Requirements -## On Production Systems, Standalone Netdata +## Production Systems: Standalone Netdata Standalone Netdata may use network bandwidth under the following conditions: -1. You configured data collection jobs that are fetching data from remote systems. There is no such jobs enabled by default. +1. You configured data collection jobs that are fetching data from remote systems. There are no such jobs enabled by default. 2. You use the dashboard of the Netdata. 3. [Netdata Cloud communication](#netdata-cloud-communication) (see below). -## On Metrics Centralization Points, between Netdata Children & Parents +## Metrics Centralization Points: Between Netdata Children & Parents -Netdata supports multiple compression algorithms for streaming communication. Netdata Children offer all their compression algorithms when connecting to a Netdata Parent, and the Netdata Parent decides which one to use based on algorithms availability and user configuration. +Netdata supports multiple compression algorithms for streaming communication. Netdata Children offer all their compression algorithms when connecting to a Netdata Parent, and the Netdata Parent decides which one to use based on algorithm availability and user configuration. | Algorithm | Best for | |:---------:|:-----------------------------------------------------------------------------------------------------------------------------------:| @@ -23,7 +23,7 @@ The expected bandwidth consumption using `zstd` for 1 million samples per second The order compression algorithms is selected is configured in `stream.conf`, per `[API KEY]`, like this: -``` +```text compression algorithms order = zstd lz4 brotli gzip ``` @@ -42,6 +42,6 @@ The information transferred to Netdata Cloud is: 3. Information about the **metrics available and their retention**. 4. Information about the **configured alerts and their transitions**. -This is not a constant stream of information. Netdata Agents update Netdata Cloud only about status changes on all the above (e.g. an alert being triggered, or a metric stopped being collected). So, there is an initial handshake and exchange of information when Netdata starts, and then there only updates when required. +This is not a constant stream of information. Netdata Agents update Netdata Cloud only about status changes on all the above (e.g., an alert being triggered, or a metric stopped being collected). So, there is an initial handshake and exchange of information when Netdata starts, and then there only updates when required. Of course, when you view Netdata Cloud dashboards that need to query the database a Netdata agent maintains, this query is forwarded to an agent that can satisfy it. This means that Netdata Cloud receives metric samples only when a user is accessing a dashboard and the samples transferred are usually aggregations to allow rendering the dashboards. diff --git a/docs/netdata-agent/sizing-netdata-agents/cpu-requirements.md b/docs/netdata-agent/sizing-netdata-agents/cpu-requirements.md index 021a35fb2..76580b1c3 100644 --- a/docs/netdata-agent/sizing-netdata-agents/cpu-requirements.md +++ b/docs/netdata-agent/sizing-netdata-agents/cpu-requirements.md @@ -1,65 +1,43 @@ -# CPU Requirements +# CPU -Netdata's CPU consumption is affected by the following factors: +Netdata's CPU usage depends on the features you enable. For details, see [resource utilization](/docs/netdata-agent/sizing-netdata-agents/README.md). -1. The number of metrics collected -2. The frequency metrics are collected -3. Machine Learning -4. Streaming compression (streaming of metrics to Netdata Parents) -5. Database Mode +## Children -## On Production Systems, Netdata Children +With default settings on Children, CPU utilization typically falls within the range of 1% to 5% of a single core. This includes the combined resource usage of: -On production systems, where Netdata is running with default settings, monitoring the system it is installed at and its containers and applications, CPU utilization should usually be about 1% to 5% of a single CPU core. +- Three database tiers for data storage. +- Machine learning for anomaly detection. +- Per-second data collection. +- Alerts. +- Streaming to a [Parent Agent](/docs/observability-centralization-points/metrics-centralization-points/README.md). -This includes 3 database tiers, machine learning, per-second data collection, alerts, and streaming to a Netdata Parent. +## Parents -## On Metrics Centralization Points, Netdata Parents +For Netdata Parents (Metrics Centralization Points), we estimate the following CPU utilization: -On Metrics Centralization Points, Netdata Parents running on modern server hardware, we **estimate CPU utilization per million of samples collected per second**: +| Feature | Depends On | Expected Utilization (CPU cores per million) | Key Reasons | +|:--------------------:|:---------------------------------------------------:|:--------------------------------------------:|:------------------------------------------------------------------------:| +| Metrics Ingest | Number of samples received per second | 2 | Decompress and decode received messages, update database | +| Metrics re-streaming | Number of samples resent per second | 2 | Encode and compress messages towards another Parent | +| Machine Learning | Number of unique time-series concurrently collected | 2 | Train machine learning models, query existing models to detect anomalies | -| Feature | Depends On | Expected Utilization | Key Reasons | -|:-----------------:|:---------------------------------------------------:|:----------------------------------------------------------------:|:-------------------------------------------------------------------------:| -| Metrics Ingestion | Number of samples received per second | 2 CPU cores per million of samples per second | Decompress and decode received messages, update database. | -| Metrics re-streaming| Number of samples resent per second | 2 CPU cores per million of samples per second | Encode and compress messages towards Netdata Parent. | -| Machine Learning | Number of unique time-series concurrently collected | 2 CPU cores per million of unique metrics concurrently collected | Train machine learning models, query existing models to detect anomalies. | +To ensure optimal performance, keep total CPU utilization below 60% when the Parent is actively processing metrics, training models, and running health checks. -We recommend keeping the total CPU utilization below 60% when a Netdata Parent is steadily ingesting metrics, training machine learning models and running health checks. This will leave enough CPU resources available for queries. +## Increased CPU consumption on Parent startup -## I want to minimize CPU utilization. What should I do? +When a Netdata Parent starts up, it undergoes a series of initialization tasks that can temporarily increase CPU, network, and disk I/O usage: -You can control Netdata's CPU utilization with these parameters: +1. **Backfilling Higher Tiers**: The Parent calculates aggregated metrics for missing data points, ensuring consistency across different time resolutions. +2. **Metadata Synchronization**: The Parent and Children exchange metadata information about collected metrics. +3. **Data Replication**: Missing data is transferred from Children to the Parent. +4. **Normal Streaming**: Regular streaming of new metrics begins. +5. **Machine Learning Initialization**: Machine learning models are loaded and prepared for anomaly detection. +6. **Health Check Initialization**: The health engine starts monitoring metrics and triggering alerts. -1. **Data collection frequency**: Going from per-second metrics to every-2-seconds metrics will half the CPU utilization of Netdata. -2. **Number of metrics collected**: Netdata by default collects every metric available on the systems it runs. Review the metrics collected and disable data collection plugins and modules not needed. -3. **Machine Learning**: Disable machine learning to save CPU cycles. -4. **Number of database tiers**: Netdata updates database tiers in parallel, during data collection. This affects both CPU utilization and memory requirements. -5. **Database Mode**: The default database mode is `dbengine`, which compresses and commits data to disk. If you have a Netdata Parent where metrics are aggregated and saved to disk and there is a reliable connection between the Netdata you want to optimize and its Parent, switch to database mode `ram` or `alloc`. This disables saving to disk, so your Netdata will also not use any disk I/O. +Additional considerations: -## I see increased CPU consumption when a busy Netdata Parent starts, why? +- **Compression Optimization**: The compression algorithm learns data patterns to optimize compression ratios. +- **Database Optimization**: The database engine adjusts page sizes for efficient disk I/O. -When a Netdata Parent starts and Netdata children get connected to it, there are several operations that temporarily affect CPU utilization, network bandwidth and disk I/O. - -The general flow looks like this: - -1. **Back-filling of higher tiers**: Usually this means calculating the aggregates of the last hour of `tier2` and of the last minute of `tier1`, ensuring that higher tiers reflect all the information `tier0` has. If Netdata was stopped abnormally (e.g. due to a system failure or crash), higher tiers may have to be back-filled for longer durations. -2. **Metadata synchronization**: The metadata of all metrics each Netdata Child maintains are negotiated between the Child and the Parent and are synchronized. -3. **Replication**: If the Parent is missing samples the Child has, these samples are transferred to the Parent before transferring new samples. -4. Once all these finish, the normal **streaming of new metric samples** starts. -5. At the same time, **machine learning** initializes, loads saved trained models and prepares anomaly detection. -6. After a few moments the **health engine starts checking metrics** for triggering alerts. - -The above process is per metric. So, while one metric back-fills, another replicates and a third one streams. - -At the same time: - -- the compression algorithm learns the patterns of the data exchanged and optimizes its dictionaries for optimal compression and CPU utilization, -- the database engine adjusts the page size of each metric, so that samples are committed to disk as evenly as possible across time. - -So, when looking for the "steady CPU consumption during ingestion" of a busy Netdata Parent, we recommend to let it stabilize for a few hours before checking. - -Keep in mind that Netdata has been designed so that even if during the initialization phase and the connection of hundreds of Netdata Children the system lacks CPU resources, the Netdata Parent will complete all the operations and eventually enter a steady CPU consumption during ingestion, without affecting the quality of the metrics stored. So, it is ok if during initialization of a busy Netdata Parent, CPU consumption spikes to 100%. - -Important: the above initialization process is not such intense when new nodes get connected to a Netdata Parent for the first time (e.g. ephemeral nodes), since several of the steps involved are not required. - -Especially for the cases where children disconnect and reconnect to the Parent due to network related issues (i.e. both the Netdata Child and the Netdata Parent have not been restarted and less than 1 hour has passed since the last disconnection), the re-negotiation phase is minimal and metrics are instantly entering the normal streaming phase. +These initial tasks can temporarily increase resource usage, but the impact typically diminishes as the Parent stabilizes and enters a steady-state operation. diff --git a/docs/netdata-agent/sizing-netdata-agents/disk-requirements-and-retention.md b/docs/netdata-agent/sizing-netdata-agents/disk-requirements-and-retention.md index 7cd9a527d..68da44000 100644 --- a/docs/netdata-agent/sizing-netdata-agents/disk-requirements-and-retention.md +++ b/docs/netdata-agent/sizing-netdata-agents/disk-requirements-and-retention.md @@ -12,7 +12,7 @@ Netdata offers two database modes to suit your needs for performance and data pe ## `dbengine` Netdata's `dbengine` mode efficiently stores data on disk using compression. The actual disk space used depends on how well the data compresses. -This mode utilizes a tiered storage approach: data is saved in multiple tiers on disk. Each tier retains data at a different resolution (detail level). Higher tiers store a down-sampled (less detailed) version of the data found in lower tiers. +This mode uses a tiered storage approach: data is saved in multiple tiers on disk. Each tier retains data at a different resolution (detail level). Higher tiers store a down-sampled (less detailed) version of the data found in lower tiers. ```mermaid gantt @@ -25,7 +25,7 @@ gantt tier2, 365d :a3, 2023-11-02, 59d ``` -`dbengine` supports up to 5 tiers. By default, 3 tiers are used: +`dbengine` supports up to five tiers. By default, three tiers are used: | Tier | Resolution | Uncompressed Sample Size | Usually On Disk | |:-------:|:--------------------------------------------------------------------------------------------:|:------------------------:|:---------------:| @@ -40,11 +40,11 @@ gantt ## `ram` -`ram` mode can help when Netdata should not introduce any disk I/O at all. In both of these modes, metric samples exist only in memory, and only while they are collected. +`ram` mode can help when Netdata shouldn’t introduce any disk I/O at all. In both of these modes, metric samples exist only in memory, and only while they’re collected. -When Netdata is configured to stream its metrics to a Metrics Observability Centralization Point (a Netdata Parent), metric samples are forwarded in real-time to that Netdata Parent. The ring buffers available in these modes is used to cache the collected samples for some time, in case there are network issues, or the Netdata Parent is restarted for maintenance. +When Netdata is configured to stream its metrics to a Metrics Observability Centralization Point (a Netdata Parent), metric samples are forwarded in real-time to that Netdata Parent. The ring buffers available in these modes are used to cache the collected samples for some time, in case there are network issues, or the Netdata Parent is restarted for maintenance. -The memory required per sample in these modes, is 4 bytes: `ram` mode uses `mmap()` behind the scene, and can be incremented in steps of 1024 samples (4KiB). Mode `ram` allows the use of the Linux kernel memory dedupper (Kernel-Same-Page or KSM) to deduplicate Netdata ring buffers and save memory. +The memory required per sample in these modes, is four bytes: `ram` mode uses `mmap()` behind the scene, and can be incremented in steps of 1024 samples (4KiB). Mode `ram` allows the use of the Linux kernel memory dedupper (Kernel-Same-Page or KSM) to deduplicate Netdata ring buffers and save memory. **Configuring ram mode and retention**: diff --git a/docs/netdata-agent/sizing-netdata-agents/ram-requirements.md b/docs/netdata-agent/sizing-netdata-agents/ram-requirements.md index 8d8522517..a4ccf5507 100644 --- a/docs/netdata-agent/sizing-netdata-agents/ram-requirements.md +++ b/docs/netdata-agent/sizing-netdata-agents/ram-requirements.md @@ -8,21 +8,21 @@ Netdata supports memory ballooning and automatically sizes and limits the memory With default settings, Netdata should run with 100MB to 200MB of RAM, depending on the number of metrics being collected. -This number can be lowered by limiting the number of database tier or switching database modes. For more information check [Disk Requirements and Retention](/docs/netdata-agent/sizing-netdata-agents/disk-requirements-and-retention.md). +This number can be lowered by limiting the number of database tier or switching database modes. For more information, check [Disk Requirements and Retention](/docs/netdata-agent/sizing-netdata-agents/disk-requirements-and-retention.md). ## On Metrics Centralization Points, Netdata Parents The general formula, with the default configuration of database tiers, is: -``` +```text memory = UNIQUE_METRICS x 16KiB + CONFIGURED_CACHES ``` The default `CONFIGURED_CACHES` is 32MiB. -For 1 million concurrently collected time-series (independently of their data collection frequency), the memory required is: +For one million concurrently collected time-series (independently of their data collection frequency), the memory required is: -``` +```text UNIQUE_METRICS = 1000000 CONFIGURED_CACHES = 32MiB @@ -32,16 +32,16 @@ CONFIGURED_CACHES = 32MiB about 16 GiB ``` -There are 2 cache sizes that can be configured in `netdata.conf`: +There are two cache sizes that can be configured in `netdata.conf`: -1. `[db].dbengine page cache size MB`: this is the main cache that keeps metrics data into memory. When data are not found in it, the extent cache is consulted, and if not found in that either, they are loaded from disk. -2. `[db].dbengine extent cache size MB`: this is the compressed extent cache. It keeps in memory compressed data blocks, as they appear on disk, to avoid reading them again. Data found in the extend cache but not in the main cache have to be uncompressed to be queried. +1. `[db].dbengine page cache size`: this is the main cache that keeps metrics data into memory. When data is not found in it, the extent cache is consulted, and if not found in that too, they are loaded from the disk. +2. `[db].dbengine extent cache size`: this is the compressed extent cache. It keeps in memory compressed data blocks, as they appear on disk, to avoid reading them again. Data found in the extent cache but not in the main cache have to be uncompressed to be queried. Both of them are dynamically adjusted to use some of the total memory computed above. The configuration in `netdata.conf` allows providing additional memory to them, increasing their caching efficiency. ## I have a Netdata Parent that is also a systemd-journal logs centralization point, what should I know? -Logs usually require significantly more disk space and I/O bandwidth than metrics. For optimal performance we recommend to store metrics and logs on separate, independent disks. +Logs usually require significantly more disk space and I/O bandwidth than metrics. For optimal performance, we recommend to store metrics and logs on separate, independent disks. Netdata uses direct-I/O for its database, so that it does not pollute the system caches with its own data. We want Netdata to be a nice citizen when it runs side-by-side with production applications, so this was required to guarantee that Netdata does not affect the operation of databases or other sensitive applications running on the same servers. @@ -49,9 +49,9 @@ To optimize disk I/O, Netdata maintains its own private caches. The default sett `systemd-journal` on the other hand, relies on operating system caches for improving the query performance of logs. When the system lacks free memory, querying logs leads to increased disk I/O. -If you are experiencing slow responses and increased disk reads when metrics queries run, we suggest to dedicate some more RAM to Netdata. +If you are experiencing slow responses and increased disk reads when metrics queries run, we suggest dedicating some more RAM to Netdata. -We frequently see that the following strategy gives best results: +We frequently see that the following strategy gives the best results: 1. Start the Netdata Parent, send all the load you expect it to have and let it stabilize for a few hours. Netdata will now use the minimum memory it believes is required for smooth operation. 2. Check the available system memory. diff --git a/docs/netdata-agent/start-stop-restart.md b/docs/netdata-agent/start-stop-restart.md index 6fbe18d31..21bf443a0 100644 --- a/docs/netdata-agent/start-stop-restart.md +++ b/docs/netdata-agent/start-stop-restart.md @@ -1,30 +1,24 @@ -# Start, stop, or restart the Netdata Agent +# Service Control -When you install the Netdata Agent, the [daemon](/src/daemon/README.md) is -configured to start at boot and stop and restart/shutdown. +The Netdata Agent automatically starts at boot after installation. -You will most often need to _restart_ the Agent to load new or editing configuration files. -[Health configuration](#reload-health-configuration) files are the only exception, as they can be reloaded without restarting -the entire Agent. +> In most cases, you need to **restart the Netdata service** to apply changes to configuration files. Health configuration files, which define alerts, are an exception. They can be [reloaded](#reload-health) **without restarting**. +> +> Restarting the Netdata Agent will cause temporary gaps in your collected metrics. This occurs while the netdata process reinitializes its data collectors and database engine. -Stopping or restarting the Netdata Agent will cause gaps in stored metrics until the `netdata` process initiates -collectors and the database engine. +## UNIX -## Using `systemctl`, `service`, or `init.d` +### Using `systemctl`, `service`, or `init.d` -This is the recommended way to start, stop, or restart the Netdata daemon. +| Action | Systemd | Non-systemd | +|---------|--------------------------------|------------------------------| +| start | `sudo systemctl start netdata` | `sudo service netdata start` | +| stop | `sudo systemctl stop netdata` | `sudo service netdata stop` | +| restart | `sudo systemctl stop netdata` | `sudo service netdata stop` | -- To **start** Netdata, run `sudo systemctl start netdata`. -- To **stop** Netdata, run `sudo systemctl stop netdata`. -- To **restart** Netdata, run `sudo systemctl restart netdata`. +### Using `netdata` -If the above commands fail, or you know that you're using a non-systemd system, try using the `service` command: - -- **service**: `sudo service netdata start`, `sudo service netdata stop`, `sudo service netdata restart` - -## Using `netdata` - -Use the `netdata` command, typically located at `/usr/sbin/netdata`, to start the Netdata daemon. +Use the `netdata` command, typically located at `/usr/sbin/netdata`, to start the Netdata daemon. ```bash sudo netdata @@ -32,122 +26,30 @@ sudo netdata If you start the daemon this way, close it with `sudo killall netdata`. -## Using `netdatacli` +### Using `netdatacli` -The Netdata Agent also comes with a [CLI tool](/src/cli/README.md) capable of performing shutdowns. Start the Agent back up -using your preferred method listed above. +The Netdata Agent also comes with a [CLI tool](/src/cli/README.md) capable of performing shutdowns. Start the Agent back up using your preferred method listed above. ```bash sudo netdatacli shutdown-agent ``` -## Netdata MSI installations - -Netdata provides an installer for Windows using WSL, on those installations by using a Windows terminal (e.g. the Command prompt or Windows Powershell) you can: - -- Start Netdata, by running `start-netdata` -- Stop Netdata, by running `stop-netdata` -- Restart Netdata, by running `restart-netdata` +### Reload health -## Reload health configuration - -You do not need to restart the Netdata Agent between changes to health configuration files, such as specific health -entities. Instead, use [`netdatacli`](#using-netdatacli) and the `reload-health` option to prevent gaps in metrics -collection. +No need to restart the Netdata Agent after modifying health configuration files (alerts). Use `netdatacli` to avoid metric collection gaps. ```bash sudo netdatacli reload-health ``` -If `netdatacli` doesn't work on your system, send a `SIGUSR2` signal to the daemon, which reloads health configuration -without restarting the entire process. - -```bash -killall -USR2 netdata -``` - -## Force stop stalled or unresponsive `netdata` processes - -In rare cases, the Netdata Agent may stall or not properly close sockets, preventing a new process from starting. In -these cases, try the following three commands: - -```bash -sudo systemctl stop netdata -sudo killall netdata -ps aux| grep netdata -``` - -The output of `ps aux` should show no `netdata` or associated processes running. You can now start the Netdata Agent -again with `service netdata start`, or the appropriate method for your system. - -## Starting Netdata at boot - -In the `system` directory you can find scripts and configurations for the -various distros. - -### systemd - -The installer already installs `netdata.service` if it detects a systemd system. - -To install `netdata.service` by hand, run: - -```sh -# stop Netdata -killall netdata - -# copy netdata.service to systemd -cp system/netdata.service /etc/systemd/system/ - -# let systemd know there is a new service -systemctl daemon-reload - -# enable Netdata at boot -systemctl enable netdata - -# start Netdata -systemctl start netdata -``` - -### init.d - -In the system directory you can find `netdata-lsb`. Copy it to the proper place according to your distribution -documentation. For Ubuntu, this can be done via running the following commands as root. - -```sh -# copy the Netdata startup file to /etc/init.d -cp system/netdata-lsb /etc/init.d/netdata - -# make sure it is executable -chmod +x /etc/init.d/netdata - -# enable it -update-rc.d netdata defaults -``` - -### openrc (gentoo) - -In the `system` directory you can find `netdata-openrc`. Copy it to the proper -place according to your distribution documentation. - -### CentOS / Red Hat Enterprise Linux - -For older versions of RHEL/CentOS that don't have systemd, an init script is included in the system directory. This can -be installed by running the following commands as root. - -```sh -# copy the Netdata startup file to /etc/init.d -cp system/netdata-init-d /etc/init.d/netdata - -# make sure it is executable -chmod +x /etc/init.d/netdata - -# enable it -chkconfig --add netdata -``` +## Windows -_There have been some recent work on the init script, see PR -_ +> **Note** +> +> You will need to run PowerShell as administrator. -### other systems +- To **start** Netdata, run `Start-Service Netdata`. +- To **stop** Netdata, run `Stop-Service Netdata`. +- To **restart** Netdata, run `Restart-Service Netdata`. -You can start Netdata by running it from `/etc/rc.local` or equivalent. +If you prefer to manage the Agent through the GUI, you can start-stop and restart the `Netdata` service from the "Services" tab of Task Manager. diff --git a/docs/netdata-agent/versions-and-platforms.md b/docs/netdata-agent/versions-and-platforms.md index 14dc393b5..1f5bf6a97 100644 --- a/docs/netdata-agent/versions-and-platforms.md +++ b/docs/netdata-agent/versions-and-platforms.md @@ -1,6 +1,6 @@ # Netdata Agent Versions & Platforms -Netdata is evolving rapidly and new features are added at a constant pace. Therefore we have a frequent release cadence to deliver all these features to use as soon as possible. +Netdata is evolving rapidly and new features are added at a constant pace. Therefore, we have a frequent release cadence to deliver all these features to use as soon as possible. Netdata Agents are available in 2 versions: @@ -9,11 +9,11 @@ Netdata Agents are available in 2 versions: | Stable | At most once per month, usually every 45 days | Receiving bug fixes and security updates between releases | Up to the 2nd stable release after them | Previous configuration semantics and data are supported by newer releases | | Nightly | Every night at 00:00 UTC | Latest pre-released features | Up to the 2nd nightly release after them | Configuration and data of unreleased features may change between nightly releases | -> "Support Duration" defines the time we consider the release as actively used by users in production systems, so that all features of Netdata should be working like the day they were released. However, after the latest release, previous releases stop receiving bug fixes and security updates. All users are advised to update to the latest release to get the latest bug fixes. +> "Support Duration" defines the time we consider the release as actively used by users in production systems, so that all features of Netdata should be working like the day they were released. However, after the latest release, previous releases stop receiving bug fixes and security updates. All users are advised to update to the latest release to get the latest bug fixes. ## Binary Distribution Packages -Binary distribution packages are provided by Netdata, via CI integration, for the following platforms and architectures: +Binary distribution packages are provided by Netdata, via CI integration, for the following platforms and architectures: | Platform | Platform Versions | Released Packages Architecture | Format | |:-----------------------:|:--------------------------------:|:------------------------------------------------:|:------------:| @@ -30,7 +30,7 @@ Binary distribution packages are provided by Netdata, via CI integration, for th | Redhat Enterprise Linux | 8.x, 9.x | `x86_64`, `AArch64` | RPM | | Ubuntu | 20.04, 22.04, 23.10 | `x86_64`, `i386`, `ARMv7`, `AArch64` | DEB | -> IMPORTANT: Linux distributions frequently provide binary packages of Netdata. However, the packages you will find in the distributions' repositories may be outdated, incomplete, missing significant features or completely broken. We recommend using the packages we provide. +> IMPORTANT: Linux distributions frequently provide binary packages of Netdata. However, the packages you will find in the distributions' repositories may be outdated, incomplete, missing significant features or completely broken. We recommend using the packages we provide. ## Third-party Supported Binary Packages @@ -41,7 +41,6 @@ The following distributions always provide the latest stable version of Netdata: | Arch Linux | Latest | All the Arch supported architectures | | MacOS Brew | Latest | All the Brew supported architectures | - ## Builds from Source We guarantee Netdata builds from source for the platforms we provide automated binary packages. These platforms are automatically checked via our CI, and fixes are always applied to allow merging new code into the nightly versions. @@ -59,9 +58,9 @@ The following builds from source should usually work, although we don't regularl ## Static Builds and Unsupported Linux Versions -The static builds of Netdata can be used on any Linux platform of the supported architectures. The only requirement these static builds have is a working Linux kernel, any version. Everything else required for Netdata to run, is inside the package itself. +The static builds of Netdata can be used on any Linux platform of the supported architectures. The only requirement these static builds have is a working Linux kernel, any version. Everything else required for Netdata to run is inside the package itself. -Static builds usually miss certain features that require operating-system support and cannot be provided in a generic way. These features include: +Static builds usually miss certain features that require operating-system support and can’t be provided generically. These features include: - IPMI hardware sensors support - systemd-journal features diff --git a/docs/netdata-assistant.md b/docs/netdata-assistant.md index afa13f6e9..e01aa2774 100644 --- a/docs/netdata-assistant.md +++ b/docs/netdata-assistant.md @@ -7,14 +7,14 @@ The Netdata Assistant is a feature that uses large language models and the Netda - Navigate to the alerts tab - If there are active alerts, the `Actions` column will have an Assistant button - ![](https://github-production-user-asset-6210df.s3.amazonaws.com/24860547/253559075-815ca123-e2b6-4d44-a780-eeee64cca420.png) + ![actions column](https://github-production-user-asset-6210df.s3.amazonaws.com/24860547/253559075-815ca123-e2b6-4d44-a780-eeee64cca420.png) - Clicking on the Assistant button opens up as a floating window with customized information and troubleshooting tips for this alert (note that the window can follow you through your troubleshooting journey on Netdata dashboards) - ![](https://github-production-user-asset-6210df.s3.amazonaws.com/24860547/253559645-62850c7b-cd1d-45f2-b2dd-474ecbf2b713.png) + ![Netdata Assistant popup](https://github-production-user-asset-6210df.s3.amazonaws.com/24860547/253559645-62850c7b-cd1d-45f2-b2dd-474ecbf2b713.png) -- In case you need more information, or want to understand deeper, Netdata Assistant also provides useful web links to resources that can help. +- In case you need more information, or want to understand deeper, Netdata Assistant also provides useful web links to resources that can help. - ![](https://github-production-user-asset-6210df.s3.amazonaws.com/24860547/253560071-e768fa6d-6c9a-4504-bb1f-17d5f4707627.png) + ![useful resources](https://github-production-user-asset-6210df.s3.amazonaws.com/24860547/253560071-e768fa6d-6c9a-4504-bb1f-17d5f4707627.png) - If there are no active alerts, you can still use Netdata Assistant by clicking the Assistant button on the Alert Configuration view. diff --git a/docs/netdata-cloud/authentication-and-authorization/api-tokens.md b/docs/netdata-cloud/authentication-and-authorization/api-tokens.md index 88b73ee68..a8f304ffb 100644 --- a/docs/netdata-cloud/authentication-and-authorization/api-tokens.md +++ b/docs/netdata-cloud/authentication-and-authorization/api-tokens.md @@ -30,5 +30,5 @@ Currently, the Netdata Cloud is not exposing stable API. * get the cloud space list ```console -$ curl -H 'Accept: application/json' -H "Authorization: Bearer " https://app.netdata.cloud/api/v2/spaces +curl -H 'Accept: application/json' -H "Authorization: Bearer " https://app.netdata.cloud/api/v2/spaces ``` diff --git a/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md b/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md index 7657e8bcf..184ff5db9 100644 --- a/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md +++ b/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md @@ -1,36 +1,47 @@ # Enterprise SSO Authentication Netdata provides you with means to streamline and control how your team connects and authenticates to Netdata Cloud. We provide - diferent Single Sign-On (SSO) integrations that allow you to connect with the tool that your organization is using to manage your + different Single Sign-On (SSO) integrations that allow you to connect with the tool that your organization is using to manage your user accounts. - > ❗ This feature focus is on the Authentication flow, it doesn't support the Authorization with managing Users and Roles. - + > **Note** This feature focus is on the Authentication flow, it doesn't support the Authorization with managing Users and Roles. ## How to set it up? If you want to setup your Netdata Space to allow user Authentication through an Enterprise SSO tool you need to: -* Confirm the integration to the tool you want is available ([Authentication integations](https://learn.netdata.cloud/docs/netdata-cloud/authentication-&-authorization/cloud-authentication-&-authorization-integrations)) + +* Confirm the integration to the tool you want is available ([Authentication integrations](https://learn.netdata.cloud/docs/netdata-cloud/authentication-&-authorization/cloud-authentication-&-authorization-integrations)) * Have a Netdata Cloud account * Have Access to the Space as an administrator * Your Space needs to be on the Business plan or higher Once you ensure the above prerequisites you need to: + 1. Click on the Space settings cog (located above your profile icon) 2. Click on the Authentication tab 3. Select the card for the integration you are looking for, click on Configure 4. Fill the required attributes need to establish the integration with the tool - ## How to authenticate to Netdata? ### From Netdata Sign-up page -If you're starting your flow from Netdata sign-in page you need to: -1. Click on the link `Sign-in with an Enterprise Signle Sign-On (SSO)` -2. Enter your email address -3. Go to your mailbox and check the `Sign In to Nedata` email that you have received -4. Click on the **Sign In** button +#### Requirements + +You have to update your DNS settings by adding a TXT record with the Netdata verification code as its **Value**. +The **Value** can be found by clicking the **DNS TXT record** button in your space settings under **User Management**, in the** Authentication & Authorization** tab. + +Log into your domain provider’s website, and navigate to the DNS records section. +Create a new TXT record with the following specifications: +- Value/Answer/Description: `"netdata-verification=[VERIFICATION CODE]"` +- Name/Host/Alias: Leave this blank or type @ to include a subdomain. +- Time to live (TTL): "86400" (this can also be inherited from the default configuration). + +#### Starting the flow from Netdata sign-in page + +1. Click on the link `Sign-in with an Enterprise Single Sign-On (SSO)` +2. Enter your email address +3. Complete the SSO flow Note: If you're not authenticated on the Enterprise SSO tool you'll be prompted to authenticate there first before being allowed to proceed to Netdata Cloud. diff --git a/docs/netdata-cloud/authentication-and-authorization/role-based-access-model.md b/docs/netdata-cloud/authentication-and-authorization/role-based-access-model.md index d2a3ea4f2..2226a1a0d 100644 --- a/docs/netdata-cloud/authentication-and-authorization/role-based-access-model.md +++ b/docs/netdata-cloud/authentication-and-authorization/role-based-access-model.md @@ -108,9 +108,9 @@ In more detail, you can find on the following tables which functionalities are a | **Functionality** | **Admin** | **Manager** | **Troubleshooter** | **Observer** | **Billing** | **Member** | Notes | |:-------------------------------|:------------------:|:------------------:|:------------------:|:------------------:|:-----------:|:------------------:|:---------------------------------------------------------------------| -| See all functions in Room | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | - | :heavy_check_mark: | -| Run any function in Room | :heavy_check_mark: | :heavy_check_mark: | - | - | - | - | -| Run read-only function in Room | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | - | :heavy_check_mark: | | +| See all functions in Room | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | - | :heavy_check_mark: | :: | +| Run any function in Room | :heavy_check_mark: | :heavy_check_mark: | - | - | - | - | :: | +| Run read-only function in Room | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | - | :heavy_check_mark: | :: | | Run sensitive function in Room | :heavy_check_mark: | :heavy_check_mark: | - | - | - | - | There isn't any function on this category yet, so subject to change. | ### Events feed diff --git a/docs/netdata-cloud/netdata-cloud-on-prem/installation.md b/docs/netdata-cloud/netdata-cloud-on-prem/installation.md index 259ddb5ce..a23baa99c 100644 --- a/docs/netdata-cloud/netdata-cloud-on-prem/installation.md +++ b/docs/netdata-cloud/netdata-cloud-on-prem/installation.md @@ -10,6 +10,20 @@ The following components are required to install Netdata Cloud On-Prem: - **Helm** version 3.12+ with OCI Configuration (explained in the installation section) - **Kubectl** +The minimum requirements for Netdata-Cloud are: + +- 4 CPU cores +- 15GiB of memory +- Cloud services are ephemeral + +The requirements for the non-production Dependencies helm chart: + +- 8 CPU cores +- 14GiB of memory +- 160GiB for PVCs (SSD) + +> **_NOTE:_** Values for each component may vary depending on the type of load. The most compute-intensive task that the On-Prem needs to perform is the initial sync of directly connected Agents. The testing for these requirements was conducted with 1,000 nodes directly connected to the On-Prem. If you plan on spawning hundreds of new nodes within a few minutes, Postgres will be the first bottleneck. For example, a 2 vCPU / 8 GiB memory / 1k IOPS database can handle 1,000 nodes without any problems if your environment is fairly steady, adding nodes in batches of 10-30 (directly connected). + ## Preparations for Installation ### Configure AWS CLI @@ -103,39 +117,40 @@ helm upgrade --wait --install netdata-cloud-onprem -n netdata-cloud --create-nam ## Short description of Netdata Cloud microservices -#### cloud-accounts-service +### cloud-accounts-service Responsible for user registration & authentication. Manages user account information. -#### cloud-agent-data-ctrl-service +### cloud-agent-data-ctrl-service Forwards request from the cloud to the relevant agents. The requests include: + - Fetching chart metadata from the agent - Fetching chart data from the agent - Fetching function data from the agent -#### cloud-agent-mqtt-input-service +### cloud-agent-mqtt-input-service Forwards MQTT messages emitted by the agent related to the agent entities to the internal Pulsar broker. These include agent connection state updates. -#### cloud-agent-mqtt-output-service +### cloud-agent-mqtt-output-service Forwards Pulsar messages emitted in the cloud related to the agent entities to the MQTT broker. From there, the messages reach the relevant agent. -#### cloud-alarm-config-mqtt-input-service +### cloud-alarm-config-mqtt-input-service Forwards MQTT messages emitted by the agent related to the alarm-config entities to the internal Pulsar broker. These include the data for the alarm configuration as seen by the agent. -#### cloud-alarm-log-mqtt-input-service +### cloud-alarm-log-mqtt-input-service Forwards MQTT messages emitted by the agent related to the alarm-log entities to the internal Pulsar broker. These contain data about the alarm transitions that occurred in an agent. -#### cloud-alarm-mqtt-output-service +### cloud-alarm-mqtt-output-service Forwards Pulsar messages emitted in the cloud related to the alarm entities to the MQTT broker. From there, the messages reach the relevant agent. -#### cloud-alarm-processor-service +### cloud-alarm-processor-service Persists latest alert statuses received from the agent in the cloud. Aggregates alert statuses from relevant node instances. @@ -143,69 +158,69 @@ Exposes API endpoints to fetch alert data for visualization on the cloud. Determines if notifications need to be sent when alert statuses change and emits relevant messages to Pulsar. Exposes API endpoints to store and return notification-silencing data. -#### cloud-alarm-streaming-service +### cloud-alarm-streaming-service Responsible for starting the alert stream between the agent and the cloud. Ensures that messages are processed in the correct order, and starts a reconciliation process between the cloud and the agent if out-of-order processing occurs. -#### cloud-charts-mqtt-input-service +### cloud-charts-mqtt-input-service Forwards MQTT messages emitted by the agent related to the chart entities to the internal Pulsar broker. These include the chart metadata that is used to display relevant charts on the cloud. -#### cloud-charts-mqtt-output-service +### cloud-charts-mqtt-output-service Forwards Pulsar messages emitted in the cloud related to the charts entities to the MQTT broker. From there, the messages reach the relevant agent. -#### cloud-charts-service +### cloud-charts-service Exposes API endpoints to fetch the chart metadata. Forwards data requests via the `cloud-agent-data-ctrl-service` to the relevant agents to fetch chart data points. Exposes API endpoints to call various other endpoints on the agent, for instance, functions. -#### cloud-custom-dashboard-service +### cloud-custom-dashboard-service Exposes API endpoints to fetch and store custom dashboard data. -#### cloud-environment-service +### cloud-environment-service Serves as the first contact point between the agent and the cloud. Returns authentication and MQTT endpoints to connecting agents. -#### cloud-feed-service +### cloud-feed-service Processes incoming feed events and stores them in Elasticsearch. Exposes API endpoints to fetch feed events from Elasticsearch. -#### cloud-frontend +### cloud-frontend Contains the on-prem cloud website. Serves static content. -#### cloud-iam-user-service +### cloud-iam-user-service Acts as a middleware for authentication on most of the API endpoints. Validates incoming token headers, injects the relevant ones, and forwards the requests. -#### cloud-metrics-exporter +### cloud-metrics-exporter Exports various metrics from an On-Prem Cloud installation. Uses the Prometheus metric exposition format. -#### cloud-netdata-assistant +### cloud-netdata-assistant Exposes API endpoints to fetch a human-friendly explanation of various netdata configuration options, namely the alerts. -#### cloud-node-mqtt-input-service +### cloud-node-mqtt-input-service Forwards MQTT messages emitted by the agent related to the node entities to the internal Pulsar broker. These include the node metadata as well as their connectivity state, either direct or via parents. -#### cloud-node-mqtt-output-service +### cloud-node-mqtt-output-service Forwards Pulsar messages emitted in the cloud related to the charts entities to the MQTT broker. From there, the messages reach the relevant agent. -#### cloud-notifications-dispatcher-service +### cloud-notifications-dispatcher-service Exposes API endpoints to handle integrations. Handles incoming notification messages and uses the relevant channels(email, slack...) to notify relevant users. -#### cloud-spaceroom-service +### cloud-spaceroom-service Exposes API endpoints to fetch and store relations between agents, nodes, spaces, users, and rooms. Acts as a provider of authorization for other cloud endpoints. diff --git a/docs/netdata-cloud/versions.md b/docs/netdata-cloud/versions.md index 06a8f706a..1bfd363d6 100644 --- a/docs/netdata-cloud/versions.md +++ b/docs/netdata-cloud/versions.md @@ -4,7 +4,7 @@ Netdata Cloud is provided in two versions: - **SaaS**, we run and maintain Netdata Cloud and users use it to complement their observability with the additional features it provides. -- **On Prem**, we provide a licensed copy of the Netdata Cloud software, that users can install and run at their premises. +- **On Prem**, we provide a licensed copy of the Netdata Cloud software, that users can install and run at their premises. The pricing of both versions is similar, with the On-Prem version introducing a monthly fixed-fee for the extra support and packaging required when users are running Netdata Cloud by themselves. diff --git a/docs/observability-centralization-points/logs-centralization-points-with-systemd-journald/active-journal-source-without-encryption.md b/docs/observability-centralization-points/logs-centralization-points-with-systemd-journald/active-journal-source-without-encryption.md index cbed1e81e..8abccad01 100644 --- a/docs/observability-centralization-points/logs-centralization-points-with-systemd-journald/active-journal-source-without-encryption.md +++ b/docs/observability-centralization-points/logs-centralization-points-with-systemd-journald/active-journal-source-without-encryption.md @@ -47,7 +47,7 @@ sudo systemctl enable --now systemd-journal-gatewayd.socket To use it, open your web browser and navigate to: -``` +```url http://server.ip:19531/browse ``` diff --git a/docs/observability-centralization-points/logs-centralization-points-with-systemd-journald/passive-journal-centralization-with-encryption-using-self-signed-certificates.md b/docs/observability-centralization-points/logs-centralization-points-with-systemd-journald/passive-journal-centralization-with-encryption-using-self-signed-certificates.md index 7f0b7152e..8509a33da 100644 --- a/docs/observability-centralization-points/logs-centralization-points-with-systemd-journald/passive-journal-centralization-with-encryption-using-self-signed-certificates.md +++ b/docs/observability-centralization-points/logs-centralization-points-with-systemd-journald/passive-journal-centralization-with-encryption-using-self-signed-certificates.md @@ -26,7 +26,7 @@ This helps to also automate the distribution of the certificates to your servers We suggest to keep this script and all the involved certificates at the journals centralization server, in the directory `/etc/ssl/systemd-journal`, so that you can make future changes as required. If you prefer to keep the certificate authority and all the certificates at a more secure location, just use the script on that location. -On the server that will issue the certificates (usually the centralizaton server), do the following: +On the server that will issue the certificates (usually the centralization server), do the following: ```bash # install systemd-journal-remote to add the users and groups required and openssl for the certs @@ -150,7 +150,7 @@ sudo apt-get install systemd-journal-remote Edit `/etc/systemd/journal-upload.conf` and set the IP address and the port of the server, like so: -```conf +```text [Upload] URL=https://centralization.server.ip:19532 ``` @@ -165,7 +165,7 @@ sudo systemctl edit systemd-journal-upload.service At the top, add: -```conf +```text [Service] Restart=always ``` diff --git a/docs/observability-centralization-points/logs-centralization-points-with-systemd-journald/passive-journal-centralization-without-encryption.md b/docs/observability-centralization-points/logs-centralization-points-with-systemd-journald/passive-journal-centralization-without-encryption.md index b70c22033..a89379e4b 100644 --- a/docs/observability-centralization-points/logs-centralization-points-with-systemd-journald/passive-journal-centralization-without-encryption.md +++ b/docs/observability-centralization-points/logs-centralization-points-with-systemd-journald/passive-journal-centralization-without-encryption.md @@ -74,7 +74,7 @@ sudo apt-get install systemd-journal-remote Edit `/etc/systemd/journal-upload.conf` and set the IP address and the port of the server, like so: -```conf +```text [Upload] URL=http://centralization.server.ip:19532 ``` @@ -87,7 +87,7 @@ sudo systemctl edit systemd-journal-upload At the top, add: -```conf +```text [Service] Restart=always ``` diff --git a/docs/observability-centralization-points/metrics-centralization-points/clustering-and-high-availability-of-netdata-parents.md b/docs/observability-centralization-points/metrics-centralization-points/clustering-and-high-availability-of-netdata-parents.md index 17a10b02e..412263beb 100644 --- a/docs/observability-centralization-points/metrics-centralization-points/clustering-and-high-availability-of-netdata-parents.md +++ b/docs/observability-centralization-points/metrics-centralization-points/clustering-and-high-availability-of-netdata-parents.md @@ -45,6 +45,6 @@ The easiest way is to `rsync` the directory `/var/cache/netdata` from the existi To configure retention at the new Netdata Parent, set in `netdata.conf` the following to at least the values the old Netdata Parent has: -- `[db].dbengine multihost disk space MB`, this is the max disk size for `tier0`. The default is 256MiB. -- `[db].dbengine tier 1 multihost disk space MB`, this is the max disk space for `tier1`. The default is 50% of `tier0`. -- `[db].dbengine tier 2 multihost disk space MB`, this is the max disk space for `tier2`. The default is 50% of `tier1`. +- `[db].dbengine tier 0 retention size`, this is the max disk size for `tier0`. The default is 1GiB. +- `[db].dbengine tier 1 retention size`, this is the max disk space for `tier1`. The default is 1GiB. +- `[db].dbengine tier 2 retention size`, this is the max disk space for `tier2`. The default is 1GiB. diff --git a/docs/observability-centralization-points/metrics-centralization-points/configuration.md b/docs/observability-centralization-points/metrics-centralization-points/configuration.md index bf2aa98db..d1f13f050 100644 --- a/docs/observability-centralization-points/metrics-centralization-points/configuration.md +++ b/docs/observability-centralization-points/metrics-centralization-points/configuration.md @@ -58,7 +58,7 @@ Save the file and restart Netdata. While encrypting the connection between your parent and child nodes is recommended for security, it's not required to get started. -This example uses self-signed certificates. +This example uses self-signed certificates. > **Note** > This section assumes you have read the documentation on [how to edit the Netdata configuration files](/docs/netdata-agent/configuration/README.md). @@ -70,7 +70,7 @@ This example uses self-signed certificates. 2. **Child node** Update `stream.conf` to enable SSL/TLS and allow self-signed certificates. Append ':SSL' to the destination and uncomment 'ssl skip certificate verification'. - ```conf + ```text [stream] enabled = yes destination = 203.0.113.0:SSL @@ -80,8 +80,6 @@ This example uses self-signed certificates. 3. Restart the Netdata Agent on both the parent and child nodes, to stream encrypted metrics using TLS/SSL. - - ## Troubleshooting Streaming Connections You can find any issues related to streaming at Netdata logs. diff --git a/docs/observability-centralization-points/metrics-centralization-points/faq.md b/docs/observability-centralization-points/metrics-centralization-points/faq.md index 027dfc748..1ce0d8534 100644 --- a/docs/observability-centralization-points/metrics-centralization-points/faq.md +++ b/docs/observability-centralization-points/metrics-centralization-points/faq.md @@ -65,6 +65,14 @@ It depends on the ephemerality setting of each Netdata Child. 2. **Ephemeral nodes**: These are nodes that are ephemeral by nature and they may shutdown at any point in time without any impact on the services you run. -To set the ephemeral flag on a node, edit its netdata.conf and in the `[health]` section set `is ephemeral = yes`. This setting is propagated to parent nodes and Netdata Cloud. +To set the ephemeral flag on a node, edit its netdata.conf and in the `[global]` section set `is ephemeral node = yes`. This setting is propagated to parent nodes and Netdata Cloud. + +A parent node tracks connections and disconnections. When a node is marked as ephemeral and stops connecting for more than 24 hours, the parent will delete it from its memory and local administration, and tell Cloud that it is no longer live nor stale. Data for the node can no longer be accessed, but if the node connects again later, the node will be "revived", and previous data becomes available again. + +A node can be forced into this "forgotten" state with the Netdata CLI tool on the parent the node is connected to (if still connected) or one of the parent agents it was previously connected to. The state will be propagated _upwards_ and _sideways_ in case of an HA setup. + +``` +netdatacli remove-stale-node +``` When using Netdata Cloud (via a parent or directly) and a permanent node gets disconnected, Netdata Cloud sends node disconnection notifications. diff --git a/docs/observability-centralization-points/metrics-centralization-points/replication-of-past-samples.md b/docs/observability-centralization-points/metrics-centralization-points/replication-of-past-samples.md index 5c776b860..e0c60e89f 100644 --- a/docs/observability-centralization-points/metrics-centralization-points/replication-of-past-samples.md +++ b/docs/observability-centralization-points/metrics-centralization-points/replication-of-past-samples.md @@ -45,13 +45,13 @@ The following `netdata.conf` configuration parameters affect replication. On the receiving side (Netdata Parent): -- `[db].seconds to replicate` limits the maximum time to be replicated. The default is 1 day (86400 seconds). Keep in mind that replication is also limited by the `tier0` retention the sending side has. +- `[db].replication period` limits the maximum time to be replicated. The default is 1 day. Keep in mind that replication is also limited by the `tier0` retention the sending side has. On the sending side (Netdata Children, or Netdata Parent when parents are clustered): - `[db].replication threads` controls how many concurrent threads will be replicating metrics. The default is 1. Usually the performance is about 2 million samples per second per thread, so increasing this number may allow replication to progress faster between Netdata Parents. -- `[db].cleanup obsolete charts after secs` controls for how much time after metrics stop being collected will not be available for replication. The default is 1 hour (3600 seconds). If you plan to have scheduled maintenance on Netdata Parents of more than 1 hour, we recommend increasing this setting. Keep in mind however, that increasing this duration in highly ephemeral environments can have an impact on RAM utilization, since metrics will be considered as collected for longer durations. +- `[db].cleanup obsolete charts after` controls for how much time after metrics stop being collected will not be available for replication. The default is 1 hour (3600 seconds). If you plan to have scheduled maintenance on Netdata Parents of more than 1 hour, we recommend increasing this setting. Keep in mind however, that increasing this duration in highly ephemeral environments can have an impact on RAM utilization, since metrics will be considered as collected for longer durations. ## Monitoring Replication Progress diff --git a/docs/security-and-privacy-design/README.md b/docs/security-and-privacy-design/README.md index c6bfd699e..da484bc0e 100644 --- a/docs/security-and-privacy-design/README.md +++ b/docs/security-and-privacy-design/README.md @@ -1,9 +1,8 @@ # Security and Privacy Design This document serves as the relevant Annex to the [Terms of Service](https://www.netdata.cloud/service-terms/), -the [Privacy Policy](https://www.netdata.cloud/privacy/) and -the Data Processing Addendum, when applicable. It provides more information regarding Netdata’s technical and -organizational security and privacy measures. +the [Privacy Policy](https://www.netdata.cloud/privacy/) and the Data Processing Addendum, when applicable. +It provides more information regarding Netdata’s technical and organizational security and privacy measures. We have given special attention to all aspects of Netdata, ensuring that everything throughout its operation is as secure as possible. Netdata has been designed with security in mind. @@ -16,6 +15,13 @@ Netdata, an open-source software widely installed across the globe, prioritizes commitment to safeguarding user data. The entire structure and internal architecture of the software is built to ensure maximum security. We aim to provide a secure environment from the ground up, rather than as an afterthought. +Netdata Cloud ensures a secure, user-centric environment for monitoring and troubleshooting, treating +observability data and observability metadata distinctly to maintain user control over system insights and +personal information. **Observability data**, which includes metric values (time series) and log events, remains +fully under user control, stored locally on the user's premises. **Observability metadata**, including hostnames, +metric names, alert names, and alert transitions, is minimally required by Netdata Cloud and securely managed +for routing and platform usage purposes. + ### Compliance with Open Source Security Foundation Best Practices Netdata is committed to adhering to the best practices laid out by the Open Source Security Foundation (OSSF). @@ -23,7 +29,7 @@ Currently, the Netdata Agent follows the OSSF best practices at the passing leve the [OSSF guidelines](https://bestpractices.coreinfrastructure.org/en/projects/2231) Netdata Cloud boasts of comprehensive end-to-end automated testing, encompassing the UI, back-end, and agents, where -involved. In addition, the Netdata Agent uses an array of third-party services for static code analysis, static code +involved. In addition, the Netdata Agent uses an array of third-party services for static code analysis, security analysis, and CI/CD integrations to ensure code quality on a per pull request basis. Tools like Github's CodeQL, Github's Dependabot, our own unit tests, various types of linters, and [Coverity](https://scan.coverity.com/projects/netdata-netdata?tab=overview) are utilized to this end. @@ -75,16 +81,20 @@ protection laws, including the GDPR and CCPA. ### Data Transfers -While Netdata Agent itself does not engage in any cross-border data transfers, certain personal and infrastructure data -is transferred to Netdata Cloud for the purpose of providing its services. The metric data collected and processed by -Netdata Agents, however, stays strictly within the user's infrastructure, eliminating any concerns about cross-border -data transfer issues. +While Netdata Agent itself does not engage in any cross-border data transfers, certain **observability metadata** (e.g. +hostnames, metric names, alert names, and alert transitions) is transferred to Netdata Cloud solely to provide routing +and alert notifications. **Observability data**, consisting of metric values (time series) and log events, stays +strictly within the user's infrastructure, mitigating cross-border data transfer concerns. + +For users leveraging Netdata Cloud, **observability data** is securely tunneled through Netdata Cloud for real-time +viewing, similar to a VPN, without being stored on Netdata Cloud servers. This approach ensures that Netdata Cloud +maintains only necessary metadata, while full control of observability data remains with the user. -When users utilize Netdata Cloud, the metric data is streamed directly from the Netdata Agent to the users’ web browsers -via Netdata Cloud, without being stored on Netdata Cloud's servers. However, user identification data (such as email -addresses) and infrastructure metadata necessary for Netdata Cloud's operation are stored in data centers in the United -States, using compliant infrastructure providers such as Google Cloud and Amazon Web Services. These transfers and -storage are carried out in full compliance with applicable data protection laws, including GDPR and CCPA. +Netdata Cloud only stores Netdata Cloud users identification data (such as observability users' email addresses) and +infrastructure metadata (such as infrastructure hostnames) necessary for Netdata Cloud's operation. All these metadata +are stored in data centers in the United States, using compliant infrastructure providers such as Google Cloud and +Amazon Web Services. These transfers and storage are carried out in full compliance with applicable data protection +laws, including GDPR and CCPA. ### Privacy Rights @@ -104,9 +114,11 @@ and reach out with any questions or concerns they may have about data protection ## Anonymous Statistics -The anonymous statistics collected by the Netdata Agent are related to the installations and not to individual users. -This data includes community size, types of plugins used, possible crashes, operating systems installed, and the use of -the registry feature. No IP addresses are collected, but each Netdata installation has a unique ID. +The anonymous statistics collected by the Netdata Agent pertain to installations rather than individual users, +capturing general information such as community size, plugin types, crashes, operating systems, and feature usage. +Importantly, **observability data** — metric values and log events — remain local to the user's infrastructure and +are not collected in this process. **Observability metadata**, including unique IDs for installations, is anonymized +and stored solely to support product development and community understanding. Netdata also collects anonymous telemetry events, which provide information on the usage of various features, errors, and performance metrics. This data is used to understand how the software is being used and to identify areas for @@ -130,41 +142,45 @@ improvement, while respecting user privacy and maintaining transparency. Internal Security Measures at Netdata are designed with an emphasis on data privacy and protection. The measures include: -1. **Infrastructure as Code (IaC)** : +1. **Observability data and metadata distinction** + Netdata Cloud securely handles observability metadata in isolated environments, while observability data remains + exclusively within user premises, stored locally and managed by the user. This distinction ensures that only + minimal metadata is required for routing and system identification. +3. **Infrastructure as Code (IaC)** : Netdata Cloud follows the IaC model, which means it is a microservices environment that is completely isolated. All changes are managed through Terraform, an open-source IaC software tool that provides a consistent CLI workflow for managing cloud services. -2. **TLS Termination and IAM Service** : +4. **TLS Termination and IAM Service** : At the edge of Netdata Cloud, there is a TLS termination, which provides the decryption point for incoming TLS connections. Additionally, an Identity Access Management (IAM) service validates JWT tokens included in request cookies or denies access to them. -3. **Session Identification** : +5. **Session Identification** : Once inside the microservices environment, all requests are associated with session IDs that identify the user making the request. This approach provides additional layers of security and traceability. -4. **Data Storage** : +6. **Data Storage** : Data is stored in various NoSQL and SQL databases and message brokers. The entire environment is fully isolated, providing a secure space for data management. -5. **Authentication** : +7. **Authentication** : Netdata Cloud does not store credentials. It offers three types of authentication: GitHub Single Sign-On (SSO), Google SSO, and email validation. -6. **DDoS Protection** : +8. **DDoS Protection** : Netdata Cloud has multiple protection mechanisms against Distributed Denial of Service (DDoS) attacks, including rate-limiting and automated blacklisting. -7. **Security-Focused Development Process** : +9. **Security-Focused Development Process** : To ensure a secure environment, Netdata employs a security-focused development process. This includes the use of - static code analysers to identify potential security vulnerabilities in the codebase. -8. **High Security Standards** : + static code analyzers to identify potential security vulnerabilities in the codebase. +10. **High Security Standards** : Netdata Cloud maintains high security standards and can provide additional customization on a per contract basis. -9. **Employee Security Practices** : +11. **Employee Security Practices** : Netdata ensures its employees follow security best practices, including role-based access, periodic access review, and multi-factor authentication. This helps to minimize the risk of unauthorized access to sensitive data. -10. **Experienced Developers** : +12. **Experienced Developers** : Netdata hires senior developers with vast experience in security-related matters. It enforces two code reviews for every Pull Request (PR), ensuring that any potential issues are identified and addressed promptly. -11. **DevOps Methodologies** : +13. **DevOps Methodologies** : Netdata's DevOps methodologies use the highest standards in access control in all places, utilizing the best practices available. -12. **Risk-Based Security Program** : +14. **Risk-Based Security Program** : Netdata has a risk-based security program that continually assesses and mitigates risks associated with data security. This program helps maintain a secure environment for user data. @@ -243,7 +259,12 @@ Netdata is committed to continuous improvement in security and privacy. While we ## Conclusion -In conclusion, Netdata Cloud's commitment to data security and user privacy is paramount. From the careful design of the +Netdata Cloud is designed to secure observability insights for users, maintaining a clear separation between +observability data and observability metadata. All observability data — metric values and log events — are stored locally, +entirely under user control, while only essential metadata (hostnames, metric names, alert details) is managed by Netdata +Cloud for system routing and alerting. + +Netdata Cloud's commitment to data security and user privacy is paramount. From the careful design of the infrastructure and stringent internal security measures to compliance with international regulations and standards like GDPR and CCPA, Netdata Cloud ensures a secure environment for users to monitor and troubleshoot their systems. diff --git a/docs/security-and-privacy-design/netdata-agent-security.md b/docs/security-and-privacy-design/netdata-agent-security.md index f441fe850..d2e2e1429 100644 --- a/docs/security-and-privacy-design/netdata-agent-security.md +++ b/docs/security-and-privacy-design/netdata-agent-security.md @@ -14,7 +14,6 @@ databases, sent to upstream Netdata servers, or archived to external time-series > > Users are responsible for backing up, recovering, and ensuring their data's availability because Netdata stores data locally on each system due to its decentralized architecture. - The Netdata Agent is programmed to safeguard user data. When collecting data, the raw data does not leave the host. All plugins, even those running with escalated capabilities or privileges, perform a hard-coded data collection job. They do not accept commands from Netdata, and the original application data collected do not leave the process they are @@ -60,7 +59,7 @@ information can be found [here](https://github.com/netdata/netdata/security/poli The Netdata agent is resilient against common security threats such as DDoS attacks and SQL injections. For DDoS, Netdata agent uses a fixed number of threads for processing requests, providing a cap on the resources that can be -consumed. It also automatically manages its memory to prevent overutilization. SQL injections are prevented as nothing +consumed. It also automatically manages its memory to prevent over-utilization. SQL injections are prevented as nothing from the UI is passed back to the data collection plugins accessing databases. Additionally, the Netdata agent is running as a normal, unprivileged, operating system user (a few data collections diff --git a/docs/security-and-privacy-design/netdata-cloud-security.md b/docs/security-and-privacy-design/netdata-cloud-security.md index 1f1bb67d2..1df022860 100644 --- a/docs/security-and-privacy-design/netdata-cloud-security.md +++ b/docs/security-and-privacy-design/netdata-cloud-security.md @@ -44,7 +44,7 @@ Netdata Cloud does not store user credentials. Netdata Cloud offers a variety of security features, including infrastructure-level dashboards, centralized alerts notifications, auditing logs, and role-based access to different segments of the infrastructure. The cloud service employs several protection mechanisms against DDoS attacks, such as rate-limiting and automated blacklisting. It also -uses static code analysers to prevent other types of attacks. +uses static code analyzers to prevent other types of attacks. In the event of potential security vulnerabilities or incidents, Netdata Cloud follows the same process as the Netdata agent. Every report is acknowledged and analyzed by the Netdata team within three working days, and the team keeps the diff --git a/docs/top-monitoring-netdata-functions.md b/docs/top-monitoring-netdata-functions.md index ee76d40ff..a9caea781 100644 --- a/docs/top-monitoring-netdata-functions.md +++ b/docs/top-monitoring-netdata-functions.md @@ -7,7 +7,7 @@ executed on the node/host where the function is made available. Collectors besides the metric collection, storing, and/or streaming work are capable of executing specific routines on request. These routines will bring additional information to help you troubleshoot or even trigger some action to happen on the node itself. -For more details please check out documentation on how we use our internal collector to get this from the first collector that exposes functions - [plugins.d](/src/collectors/plugins.d/README.md#function). +For more details please check out documentation on how we use our internal collector to get this from the first collector that exposes functions - [plugins.d](/src/plugins.d/README.md#function). ## Prerequisites diff --git a/integrations/cloud-authentication/integrations/oidc.md b/integrations/cloud-authentication/integrations/oidc.md index 22731da26..1111770ff 100644 --- a/integrations/cloud-authentication/integrations/oidc.md +++ b/integrations/cloud-authentication/integrations/oidc.md @@ -55,7 +55,7 @@ The access settings for your client are the following: ### SP-initiated SSO -If you start your authentication flow from Netdata sign-in page please check [these steps](/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md#from-netdata-sign-up-page). +If you start your authentication flow from Netdata sign-in page please check [these steps](https://github.com/netdata/netdata/blob/master/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md#from-netdata-sign-up-page). ### Reference diff --git a/integrations/cloud-authentication/integrations/okta_sso.md b/integrations/cloud-authentication/integrations/okta_sso.md index 2e9593f4f..f346b0443 100644 --- a/integrations/cloud-authentication/integrations/okta_sso.md +++ b/integrations/cloud-authentication/integrations/okta_sso.md @@ -50,6 +50,6 @@ Steps needed to be done on Okta Admin Portal: ### SP-initiated SSO -If you start your authentication flow from Netdata sign-in page please check [these steps](/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md#from-netdata-sign-up-page). +If you start your authentication flow from Netdata sign-in page please check [these steps](https://github.com/netdata/netdata/blob/master/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md#from-netdata-sign-up-page). diff --git a/integrations/cloud-authentication/integrations/scim.md b/integrations/cloud-authentication/integrations/scim.md index d759a8a9a..4443aacdc 100644 --- a/integrations/cloud-authentication/integrations/scim.md +++ b/integrations/cloud-authentication/integrations/scim.md @@ -26,6 +26,20 @@ The System for Cross-domain Identity Management (SCIM) specification is designed - The Space must be on a paid plan - OIDC/SSO integration must already be enabled in one of your Spaces +### Supported Features +This integration adheres to SCIM v2 specifications. Supported features include: + +- User Resource Management (urn:ietf:params:scim:schemas:core:2.0:User) +- Create users +- Update user attributes +- Deactivate users +- Patch operations: Supported +- Bulk operations: Not supported +- Filtering: Supported (max results: 200) +- Password synchronization: Not supported, as we rely on SSO/OIDC authentication +- eTag: Not supported +- Authentication schemes: OAuth Bearer Token + ### Netdata Configuration Steps 1. Click on the Space settings cog (located above your profile icon). 2. Click on the **User Management** section and access **Authentication and Authorization** tab. @@ -37,6 +51,19 @@ The System for Cross-domain Identity Management (SCIM) specification is designed - **Base URL**: Use this URL as the base URL for your SCIM client. - **Token**: Use this token for Bearer Authentication with your SCIM client. +## Client Configuration Steps + +### Okta +If you're configuring SCIM in Okta, and you already have the Token from the previous section, follow these steps: + +1. Go to the **Applications** menu on the left-hand panel and select the **Netdata** application. +2. In the **Netdata** application, navigate to the **Provisioning** tab. +3. Click on **Configure API Integration** and check the box for **Enable API Integration**. +4. Enter the Token (obtained in the *Netdata Configuration Steps* section) into the **API Token** field, then click **Test API Credentials** to ensure the connection is successful. +5. If the test is successful, click **Save** to apply the configuration. + +## Troubleshoot + ### Rotating the SCIM Token You can rotate the token provided during SCIM integration setup if needed. @@ -47,17 +74,6 @@ Steps to rotate the token: 4. Click **Regenerate Token**. 5. If successful, you will receive a new token for Bearer Authentication with your SCIM client. -### Supported Features -This integration adheres to SCIM v2 specifications. Supported features include: - -- User Resource Management (urn:ietf:params:scim:schemas:core:2.0:User) -- Patch operations: Supported -- Bulk operations: Not supported -- Filtering: Supported (max results: 200) -- Password synchronization: Not supported, as we rely on SSO/OIDC authentication -- eTag: Not supported -- Authentication schemes: OAuth Bearer Token - ### User Keying Between SCIM and OIDC Our SCIM (System for Cross-domain Identity Management) integration utilizes OIDC (OpenID Connect) to authenticate users. To ensure users are correctly identified and authenticated between SCIM and OIDC, we use the following mapping: @@ -70,6 +86,20 @@ This mapping ensures that the identity of users remains consistent and secure ac The externalID in SCIM must correspond to the subfield in OIDC. Any deviation from this mapping may result in incorrect user identification and authentication failures. +## FAQ + +### Why aren’t users automatically added to Netdata spaces when they’re created through SCIM? + +Currently, our SCIM server supports only the User resource. We plan to add support for the Group resource in the future. + +In a Netdata space, users can belong to multiple rooms and have different roles (e.g., admin, manager). Additionally, the same organization may have multiple spaces. + +As we don't yet support groups, when a user is created through SCIM, we don’t have a way to determine which spaces, rooms, and roles the user should be assigned to. + +Once we implement support for the Group resource, admins will be able to map SCIM groups to Netdata memberships, so this assignment will be done automatically. + +Until then, SCIM can only be used to grant or block access to Netdata for users in your organization. After a user is created, it is up to the Netdata administrator to manually invite them to spaces, rooms and assign roles. + ### Reference [SCIM Specification](https://scim.org) diff --git a/integrations/cloud-authentication/metadata.yaml b/integrations/cloud-authentication/metadata.yaml index 72f5a5fe1..a0bf5654d 100644 --- a/integrations/cloud-authentication/metadata.yaml +++ b/integrations/cloud-authentication/metadata.yaml @@ -125,6 +125,20 @@ - The Space must be on a paid plan - OIDC/SSO integration must already be enabled in one of your Spaces + ### Supported Features + This integration adheres to SCIM v2 specifications. Supported features include: + + - User Resource Management (urn:ietf:params:scim:schemas:core:2.0:User) + - Create users + - Update user attributes + - Deactivate users + - Patch operations: Supported + - Bulk operations: Not supported + - Filtering: Supported (max results: 200) + - Password synchronization: Not supported, as we rely on SSO/OIDC authentication + - eTag: Not supported + - Authentication schemes: OAuth Bearer Token + ### Netdata Configuration Steps 1. Click on the Space settings cog (located above your profile icon). 2. Click on the **User Management** section and access **Authentication and Authorization** tab. @@ -136,6 +150,19 @@ - **Base URL**: Use this URL as the base URL for your SCIM client. - **Token**: Use this token for Bearer Authentication with your SCIM client. + ## Client Configuration Steps + + ### Okta + If you're configuring SCIM in Okta, and you already have the Token from the previous section, follow these steps: + + 1. Go to the **Applications** menu on the left-hand panel and select the **Netdata** application. + 2. In the **Netdata** application, navigate to the **Provisioning** tab. + 3. Click on **Configure API Integration** and check the box for **Enable API Integration**. + 4. Enter the Token (obtained in the *Netdata Configuration Steps* section) into the **API Token** field, then click **Test API Credentials** to ensure the connection is successful. + 5. If the test is successful, click **Save** to apply the configuration. + + ## Troubleshoot + ### Rotating the SCIM Token You can rotate the token provided during SCIM integration setup if needed. @@ -146,17 +173,6 @@ 4. Click **Regenerate Token**. 5. If successful, you will receive a new token for Bearer Authentication with your SCIM client. - ### Supported Features - This integration adheres to SCIM v2 specifications. Supported features include: - - - User Resource Management (urn:ietf:params:scim:schemas:core:2.0:User) - - Patch operations: Supported - - Bulk operations: Not supported - - Filtering: Supported (max results: 200) - - Password synchronization: Not supported, as we rely on SSO/OIDC authentication - - eTag: Not supported - - Authentication schemes: OAuth Bearer Token - ### User Keying Between SCIM and OIDC Our SCIM (System for Cross-domain Identity Management) integration utilizes OIDC (OpenID Connect) to authenticate users. To ensure users are correctly identified and authenticated between SCIM and OIDC, we use the following mapping: @@ -169,5 +185,19 @@ The externalID in SCIM must correspond to the subfield in OIDC. Any deviation from this mapping may result in incorrect user identification and authentication failures. + ## FAQ + + ### Why aren’t users automatically added to Netdata spaces when they’re created through SCIM? + + Currently, our SCIM server supports only the User resource. We plan to add support for the Group resource in the future. + + In a Netdata space, users can belong to multiple rooms and have different roles (e.g., admin, manager). Additionally, the same organization may have multiple spaces. + + As we don't yet support groups, when a user is created through SCIM, we don’t have a way to determine which spaces, rooms, and roles the user should be assigned to. + + Once we implement support for the Group resource, admins will be able to map SCIM groups to Netdata memberships, so this assignment will be done automatically. + + Until then, SCIM can only be used to grant or block access to Netdata for users in your organization. After a user is created, it is up to the Netdata administrator to manually invite them to spaces, rooms and assign roles. + ### Reference [SCIM Specification](https://scim.org) diff --git a/integrations/cloud-notifications/integrations/amazon_sns.md b/integrations/cloud-notifications/integrations/amazon_sns.md index 45bf0f6da..94feda0d8 100644 --- a/integrations/cloud-notifications/integrations/amazon_sns.md +++ b/integrations/cloud-notifications/integrations/amazon_sns.md @@ -9,49 +9,40 @@ endmeta--> # Amazon SNS - - -From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on AWS SNS. - - ## Setup ### Prerequisites -To add AWS SNS notification you need: - - A Netdata Cloud account - Access to the space as an **Admin** - The Space needs to be on a paid plan -- Have an AWS account with AWS SNS access, for more details check [how to configure this on AWS SNS](#settings-on-aws-sns) +- An AWS account with AWS SNS access -### Steps +### AWS SNS Configuration -1. Click on the **Space settings** cog (located above your profile icon) -2. Click on the **Notification** tab -3. Click on the **+ Add configuration** button (near the top-right corner of your screen) -4. On the **AwsSns** card click on **+ Add** -5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For AWS SNS: - - Topic ARN - topic provided on AWS SNS (with region) for where to publish your notifications. For more details check [how to configure this on AWS SNS](#settings-on-aws-sns) - -### Settings on AWS SNS - -To enable the webhook integration on AWS SNS you need: 1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html) 2. Create a topic - On AWS SNS management console click on **Create topic** - - On the **Details** section, the standard type and provide the topic name + - On the **Details** section, select the standard type and provide the topic name - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created - - Finally, click on **Create topic** on the bottom of the page -3. Now, use the new **Topic ARN** while adding AWS SNS integration on your space. +3. Copy the **Topic ARN** in order to add it to your integration configuration in the Netdata Cloud UI + +### Netdata Configuration + +1. Click on the **Space settings** cog (located above your profile icon) +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the AWS SNS Integration +5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Topic ARN: The topic provided on AWS SNS (with region) for where to publish your notifications. diff --git a/integrations/cloud-notifications/integrations/discord.md b/integrations/cloud-notifications/integrations/discord.md index 3553d17be..03d446278 100644 --- a/integrations/cloud-notifications/integrations/discord.md +++ b/integrations/cloud-notifications/integrations/discord.md @@ -9,42 +9,37 @@ endmeta--> # Discord - - -From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Discord. - - ## Setup ### Prerequisites + - A Netdata Cloud account -- Access to the Netdata Space as an **Admin** -- You need to have a Discord server able to receive webhooks integrations. +- Access to the Space as an **Admin** ### Discord Server Configuration -Steps to configure your Discord server to receive [webhook notifications](https://support.discord.com/hc/en-us/articles/228383668) from Netdata: -1. Go to `Server Settings` --> `Integrations` + +1. Go to **Server Settings** --> **Integrations** 2. **Create Webhook** or **View Webhooks** if you already have some defined 3. Specify the **Name** and **Channel** on your new webhook -4. Use Webhook URL to add your notification configuration on Netdata UI +4. Keep note of the **Webhook URL** as you will need it for the configuration of the integration on the Netdata Cloud UI + +### Netdata Configuration -### Netdata Configuration Steps 1. Click on the **Space settings** cog (located above your profile icon) -2. Click on the **Notification** tab -3. Click on the **+ Add configuration** button (near the top-right corner of your screen) -4. On the **Discord** card click on **+ Add** -5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Discord: - - Define the type channel you want to send notifications to: **Text channel** or **Forum channel** - - Webhook URL - URL provided on Discord for the channel you want to receive your notifications. - - Thread name - if the Discord channel is a **Forum channel** you will need to provide the thread name as well +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the Discord Integration +5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Webhook URL: The URL you copied from the previous section + - Channel Parameters: Select the channel type which the notifications will be sent to, if it is a Forum channel, you need to specify a thread name diff --git a/integrations/cloud-notifications/integrations/ilert.md b/integrations/cloud-notifications/integrations/ilert.md new file mode 100644 index 000000000..31243d6cc --- /dev/null +++ b/integrations/cloud-notifications/integrations/ilert.md @@ -0,0 +1,51 @@ + + +# ilert + + + + + +## Setup + +### Prerequisites + +- A Netdata Cloud account +- Access to the Space as an **Admin** +- The Space needs to be on a paid plan +- You need to have permissions on ilert to add new Alert sources. + +### ilert Configuration + +1. From the navigation bar, open the Alert sources drop down and click "Alert sources" +2. Click on the "+ Create a new alert source" button +3. Configure an Alert source: + - Select "API integration" and click Next + - Provide a name that suits the source's purpose, for example "Netdata" + - Select Escalation policy + - Select Alert grouping (optional) +4. Obtain the API Key: + - Once the Alert source is created, you will be provided with an API key. Copy it in order to add it to your integration configuration in the Netdata Cloud UI + +### Netdata Configuration + +1. Click on the **Space settings** cog (located above your profile icon) +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the ilert Integration +5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Alert Source API key: The key you copied in the ilert configuration step. + + diff --git a/integrations/cloud-notifications/integrations/mattermost.md b/integrations/cloud-notifications/integrations/mattermost.md index 4302a2e84..5af1058d3 100644 --- a/integrations/cloud-notifications/integrations/mattermost.md +++ b/integrations/cloud-notifications/integrations/mattermost.md @@ -9,13 +9,8 @@ endmeta--> # Mattermost - - -From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Mattermost. - - ## Setup @@ -23,38 +18,32 @@ From the Netdata Cloud UI, you can manage your space's notification settings and ### Prerequisites - A Netdata Cloud account -- Access to the Netdata Space as an **Admin** -- The Netdata Space needs to be on a paid plan +- Access to the Space as an **Admin** +- The Space needs to be on a paid plan - You need to have permissions on Mattermost to add new integrations. -- You need to have a Mattermost app on your workspace to receive the webhooks. ### Mattermost Server Configuration -Steps to configure your Mattermost to receive notifications from Netdata: - 1. In Mattermost, go to Product menu > Integrations > Incoming Webhook - - If you don’t have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below. -2. Select Add Incoming Webhook and add a name and description for the webhook. The description can be up to 500 characters + - If you don't have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below. +2. Select Add Incoming Webhook and add a name and description for the webhook. 3. Select the channel to receive webhook payloads, then select Add to create the webhook -4. You will end up with a webhook endpoint that looks like below: - `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx` - - - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your Mattermost instance. +4. You will end up with a webhook URL that looks like `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`, copy it in order to add it to your integration configuration in the Netdata Cloud UI -For more details please check Mattermost's article [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/). +For more details please check [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/). -### Netdata Configuration Steps +### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) -2. Click on the **Notification** tab -3. Click on the **+ Add configuration** button (near the top-right corner of your screen) -4. On the **Mattermost** card click on **+ Add** -5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Mattermost: - - Webhook URL - URL provided on Mattermost for the channel you want to receive your notifications +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the Mattermost Integration +5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Webhook URL: URL provided on Mattermost for the channel you want to receive your notifications diff --git a/integrations/cloud-notifications/integrations/microsoft_teams.md b/integrations/cloud-notifications/integrations/microsoft_teams.md index ab6780966..169a00ab3 100644 --- a/integrations/cloud-notifications/integrations/microsoft_teams.md +++ b/integrations/cloud-notifications/integrations/microsoft_teams.md @@ -9,50 +9,41 @@ endmeta--> # Microsoft Teams - - -From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications to a Microsoft Teams channel. - - ## Setup ### Prerequisites -To add Microsoft Teams notifications integration to your Netdata Cloud space you will need the following: - -- A Netdata Cloud account. -- Access to the Netdata Cloud space as an **Admin**. -- The Space to be on a paid plan. -- A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature. - -### Settings on Microsoft Teams - -1. **Access the Channel Settings**: Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots (ellipsis) icon that appears. -2. **Create a New Workflow**: Select "Workflows" from the options, then choose "Post to a channel when a webhook request is received." -3. **Configure Workflow Details**: - - Give your workflow a descriptive name, such as "Netdata Alerts." - - Select the target team and channel to receive notifications. - - Click "Add workflow." -4. **Obtain the Webhook URL**: - - Once the workflow is created, you will receive a unique Workflow Webhook URL. - - Copy this URL, as it will be required to configure Netdata Cloud. - -### Settings on Netdata Cloud - -1. Click on the **Space settings** cog (located above your profile icon). -2. Click on the **Notification** tab. -3. Click on the **+ Add configuration** button (near the top-right corner of your screen). -4. On the **Microsoft Teams** card click on **+ Add**. -5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings: - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it. - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration. - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only. - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Microsoft Teams: - - Microsoft Teams Incoming Webhook URL - the _Incoming Webhook URL_ that was generated earlier. +- A Netdata Cloud account +- Access to the Space as an **Admin** +- The Space needs to be on a paid plan +- A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature + +### Microsoft Teams Configuration + +1. Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots icon that appears +2. Select "Workflows" from the options, then choose "Post to a channel when a webhook request is received" +3. **Configure Workflow Details** + - Give your workflow a name, such as "Netdata Alerts" + - Select the target team and channel where you will receive notifications + - Click "Add workflow" +4. Once the workflow is created, you will receive a unique Workflow Webhook URL, copy it, in order to add it to your integration configuration in the Netdata Cloud UI + +### Netdata Configuration + +1. Click on the **Space settings** cog (located above your profile icon) +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the Microsoft Teams Integration +5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Microsoft Teams Incoming Webhook URL: The Incoming Webhook URL that you copied earlier. diff --git a/integrations/cloud-notifications/integrations/netdata_mobile_app.md b/integrations/cloud-notifications/integrations/netdata_mobile_app.md index 93de1316b..e8285bf09 100644 --- a/integrations/cloud-notifications/integrations/netdata_mobile_app.md +++ b/integrations/cloud-notifications/integrations/netdata_mobile_app.md @@ -9,33 +9,28 @@ endmeta--> # Netdata Mobile App - - -From the Netdata Cloud UI, you can manage your user notification settings and enable the configuration to deliver notifications on the Netdata Mobile Application. - - ## Setup ### Prerequisites + - A Netdata Cloud account -- You need to have the Netdata Mobile Application installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone. - -### Netdata Mobile App Configuration -Steps to login to the Netdata Mobile Application to receive alert and reachability and alert notifications: -1. Download the Netdata Mobile Application from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622) -2. Open the App and Choose the Sign In Option - - Sign In with Email Address: Enter the Email Address of your registered Netdata Cloud Account and Click on the Verification link received by Email on your mobile device. - - Sign In with QR Code: Scan the QR Code from your `Netdata Cloud` UI under **User Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code** -3. Start receiving alert and reachability notifications for your **Space(s)** on a **Paid Subscription plan** - -### Netdata Configuration Steps -1. Click on the **User settings** on the bottom left of your screen (your profile icon) -2. Click on the **Notifications** tab -3. Enable **Mobile App Notifications** if disabled (Enabled by default) -4. Use the **Show QR Code** Option to login to your mobile device by scanning the **QR Code** +- You need to have the Netdata Mobile App installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone. + +### Netdata Mobile App Configuration and device linking + +In order to login to the Netdata Mobile App + +1. Download the Netdata Mobile App from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622) +2. Open the App and Choose your Sign-in option + - Email Address: Enter the email address of your registered Netdata Cloud account and click on the verification link received by email on your mobile device. + - Sign-in with QR Code: Scan the QR code from the Netdata Cloud UI under **Profile Picture** --> **Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code** + +### Netdata Configuration + +After linking your device, enable the toggle for **Mobile App Notifications** under the same settings panel. diff --git a/integrations/cloud-notifications/integrations/opsgenie.md b/integrations/cloud-notifications/integrations/opsgenie.md index 594945f4c..1587fd0b6 100644 --- a/integrations/cloud-notifications/integrations/opsgenie.md +++ b/integrations/cloud-notifications/integrations/opsgenie.md @@ -9,13 +9,8 @@ endmeta--> # Opsgenie - - -From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Opsgenie. - - ## Setup @@ -23,30 +18,27 @@ From the Netdata Cloud UI, you can manage your space's notification settings and ### Prerequisites - A Netdata Cloud account -- Access to the Netdata Space as an **Admin** -- The Netdata Space needs to be on a paid plan +- Access to the Space as an **Admin** +- The Space needs to be on a paid plan - You need to have permissions on Opsgenie to add new integrations. ### Opsgenie Server Configuration -Steps to configure your Opsgenie to receive notifications from Netdata: - -1. Go to integrations tab of your team, click **Add integration** -2. Pick **API** from available integrations. Copy your API Key and press **Save Integration**. -3. Paste copied API key into the corresponding field in **Integration configuration** section of Opsgenie modal window in Netdata. +1. Go to the integrations tab of your team, click **Add integration** +2. Pick **API** from the available integrations and copy the API Key in order to add it to your integration configuration in the Netdata Cloud UI -### Netdata Configuration Steps +### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) -2. Click on the **Notification** tab -3. Click on the **+ Add configuration** button (near the top-right corner of your screen) -4. On the **Opsgenie** card click on **+ Add** -5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Opsgenie: - - API Key - a key provided on Opsgenie for the channel you want to receive your notifications. +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the Opsgenie Integration +5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - API Key: The key provided on Opsgenie for the channel you want to receive your notifications diff --git a/integrations/cloud-notifications/integrations/pagerduty.md b/integrations/cloud-notifications/integrations/pagerduty.md index 274949730..b39a97c93 100644 --- a/integrations/cloud-notifications/integrations/pagerduty.md +++ b/integrations/cloud-notifications/integrations/pagerduty.md @@ -9,44 +9,38 @@ endmeta--> # PagerDuty - - -From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on PagerDuty. - - ## Setup ### Prerequisites + - A Netdata Cloud account -- Access to the Netdata Space as an **Admin** -- The Netdata Space needs to be on a paid plan +- Access to the Space as an **Admin** +- The Space needs to be on a paid plan - You need to have a PagerDuty service to receive events using webhooks. - ### PagerDuty Server Configuration -Steps to configure your PagerDuty to receive notifications from Netdata: 1. Create a service to receive events from your services directory page on PagerDuty -2. At step 3, select `Events API V2` Integration or **View Webhooks** if you already have some defined -3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** fields to add them to your notification configuration in the Netdata UI. +2. On the third step of the service creation, select `Events API V2` Integration +3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** in order to add them to your integration configuration in the Netdata Cloud UI -### Netdata Configuration Steps +### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) -2. Click on the **Notification** tab -3. Click on the **+ Add configuration** button (near the top-right corner of your screen) -4. On the **PagerDuty** card click on **+ Add** -5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For PagerDuty: - - Integration Key - is a 32 character key provided by PagerDuty to receive events on your service. - - Integration URL (Alert Events) - is the URL provided by PagerDuty where we will send notifications. +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the PagerDuty Integration +5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Integration Key: A 32 character key provided by PagerDuty to receive events on your service. + - Integration URL (Alert Events): The URL provided by PagerDuty where Netdata Cloud will send notifications. diff --git a/integrations/cloud-notifications/integrations/rocketchat.md b/integrations/cloud-notifications/integrations/rocketchat.md index 300848232..710cd16fa 100644 --- a/integrations/cloud-notifications/integrations/rocketchat.md +++ b/integrations/cloud-notifications/integrations/rocketchat.md @@ -9,13 +9,8 @@ endmeta--> # RocketChat - - -From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on RocketChat. - - ## Setup @@ -23,38 +18,33 @@ From the Netdata Cloud UI, you can manage your space's notification settings and ### Prerequisites - A Netdata Cloud account -- Access to the Netdata Space as an **Admin** -- The Netdata Space needs to be on a paid plan -- You need to have permissions on Mattermost to add new integrations. -- You need to have a RocketChat app on your workspace to receive the webhooks. - -### Mattermost Server Configuration +- Access to the Space as an **Admin** +- The Space needs to be on a paid plan +- You need to have permissions on RocketChat to add new integrations. -Steps to configure your RocketChat to receive notifications from Netdata: +### RocketChat Server Configuration -1. In RocketChat, Navigate to Administration > Workspace > Integrations. -2. Click **+New** at the top right corner. -3. For more details about each parameter, check [create-a-new-incoming-webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook). -4. After configuring integration, click Save. -5. You will end up with a webhook endpoint that looks like below: - `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX` - - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your RocketChat instance. +Steps to configure your RocketChat server to receive notifications from Netdata Cloud: +1. In RocketChat, Navigate to Administration > Workspace > Integrations +2. Click **+New** at the top right corner +3. For more details about each parameter, check [Create a new incoming webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook) +4. You will end up with a webhook endpoint that looks like `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`, copy it in order to add it to your integration configuration in the Netdata Cloud UI -For more details please check RocketChat's article Incoming webhooks for [RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/). +For more details please check [Incoming webhooks for RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/). -### Netdata Configuration Steps +### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) -2. Click on the **Notification** tab -3. Click on the **+ Add configuration** button (near the top-right corner of your screen) -4. On the **RocketChat** card click on **+ Add** -5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For RocketChat: - - Webhook URL - URL provided on RocketChat for the channel you want to receive your notifications. +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the PagerDuty Integration +5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Webhook URL: URL provided on RocketChat for the channel you want to receive your notifications diff --git a/integrations/cloud-notifications/integrations/slack.md b/integrations/cloud-notifications/integrations/slack.md index bed51b5ad..e86f5b216 100644 --- a/integrations/cloud-notifications/integrations/slack.md +++ b/integrations/cloud-notifications/integrations/slack.md @@ -9,13 +9,8 @@ endmeta--> # Slack - - -From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Slack. - - ## Setup @@ -23,36 +18,34 @@ From the Netdata Cloud UI, you can manage your space's notification settings and ### Prerequisites - A Netdata Cloud account -- Access to the Netdata Space as an **Admin** -- The Netdata Space needs to be on a paid plan +- Access to the Space as an **Admin** +- The Space needs to be on a paid plan - You need to have a Slack app on your workspace to receive the Webhooks. ### Slack Server Configuration -Steps to configure your Slack to receive notifications from Netdata: - -1. Create an app to receive webhook integrations. Check [Create an app](https://api.slack.com/apps?new_app=1) from Slack documentation for further details +1. Create an app to receive webhook integrations. Check the [Slack documentation](https://api.slack.com/apps?new_app=1) for further details 2. Install the app on your workspace 3. Configure Webhook URLs for your workspace - - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks** - - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace** - - After pressing that specify the channel where you want your notifications to be delivered - - Once completed copy the Webhook URL that you will need to add to your notification configuration on Netdata UI + - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks** + - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace** + - Specify the channel where you want your notifications to be delivered + - Once completed, copy the Webhook URL in order to add it to your integration configuration in the Netdata Cloud UI -For more details please check Slacks's article [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack). +For more details please check [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack). -### Netdata Configuration Steps +### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) -2. Click on the **Notification** tab -3. Click on the **+ Add configuration** button (near the top-right corner of your screen) -4. On the **Slack** card click on **+ Add** -5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Slack: - - Webhook URL - URL provided on Slack for the channel you want to receive your notifications. +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the Slack Integration +5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Webhook URL: URL provided on Slack for the channel you want to receive your notifications diff --git a/integrations/cloud-notifications/integrations/splunk.md b/integrations/cloud-notifications/integrations/splunk.md index b69032ba6..d7f8b17d6 100644 --- a/integrations/cloud-notifications/integrations/splunk.md +++ b/integrations/cloud-notifications/integrations/splunk.md @@ -9,39 +9,32 @@ endmeta--> # Splunk - - -From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Splunk. - - ## Setup ### Prerequisites -To add Splunk notification you need: - - A Netdata Cloud account -- Access to the space as an **Admin** +- Access to the Space as an **Admin** - The Space needs to be on a paid plan -- URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions. +- The URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions on how to set it up. -### Steps +### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) -2. Click on the **Notification** tab -3. Click on the **+ Add configuration** button (near the top-right corner of your screen) -4. On the **Splunk** card click on **+ Add** -5. A modal will be presented to you to enter the required details to enable the configuration: - - **Notification settings** are Netdata specific settings - - Configuration name - provide a descriptive name for your configuration to easily identify it. - - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about. - - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only. - - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk: - - HTTP Event Collector URI - The URI of your HTTP event collector in Splunk - - HTTP Event Collector Token - the token that Splunk provided to you when you created the HTTP Event Collector +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the Splunk Integration +5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - HTTP Event Collector URI: The URI of your HTTP event collector in Splunk + - HTTP Event Collector Token: The token that Splunk provided to you when you created the HTTP Event Collector diff --git a/integrations/cloud-notifications/integrations/splunk_victorops.md b/integrations/cloud-notifications/integrations/splunk_victorops.md index f5d7932a0..7bf881f4e 100644 --- a/integrations/cloud-notifications/integrations/splunk_victorops.md +++ b/integrations/cloud-notifications/integrations/splunk_victorops.md @@ -9,38 +9,31 @@ endmeta--> # Splunk VictorOps - - -From the Cloud interface, you can manage your space's notification settings and from there you can add a specific configuration to get notifications delivered on Splunk On-Call/VictorOps. - - ## Setup ### Prerequisites -To add Splunk VictorOps notification (also known as Splunk On-Call) you need: - - A Netdata Cloud account -- Access to the space as an **Admin** +- Access to the Space as an **Admin** - The Space needs to be on a paid plan -- Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions. +- The Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions. -### Steps +### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) -2. Click on the **Notification** tab -3. Click on the **+ Add configuration** button (near the top-right corner of your screen) -4. On the **Splunk VictorOps** card click on **+ Add** -5. A modal will be presented to you to enter the required details to enable the configuration: - - **Notification settings** are Netdata specific settings - - Configuration name - provide a descriptive name for your configuration to easily identify it. - - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about. - - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only. - - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk VictorOps: +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the Splunk VictorOps Integration +5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** - Destination URL - The URL provided by VictorOps of your REST endpoint. diff --git a/integrations/cloud-notifications/integrations/telegram.md b/integrations/cloud-notifications/integrations/telegram.md index 68e34bacc..93867e474 100644 --- a/integrations/cloud-notifications/integrations/telegram.md +++ b/integrations/cloud-notifications/integrations/telegram.md @@ -9,43 +9,20 @@ endmeta--> # Telegram - - -From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Telegram. - - ## Setup ### Prerequisites -To add Telegram notification you need: - - A Netdata Cloud account -- Access to the space as an **Admin** +- Access to the Space as an **Admin** - The Space needs to be on a paid plan -- The Telegram bot token, chat ID and _optionally_ the topic ID +- The Telegram bot token, chat ID and optionally the topic ID -### Steps - -1. Click on the **Space settings** cog (located above your profile icon) -2. Click on the **Notification** tab -3. Click on the **+ Add configuration** button (near the top-right corner of your screen) -4. On the **Telegram** card click on **+ Add** -5. A modal will be presented to you to enter the required details to enable the configuration: - - **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Telegram: - - Bot Token - the token of your bot - - Chat ID - the chat id where your bot will deliver messages to - - Topic ID - the identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat. - -### Getting the Telegram bot token, chat ID and topic ID +### Telegram Configuration - Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**. - To get the chat ID you have two options: @@ -53,4 +30,20 @@ To add Telegram notification you need: - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates` - To get the topic ID, the easiest way is this: Post a message to that topic, then right-click on it and select `Copy Message Link`. Paste it on a scratchpad and notice that it has the following structure `https://t.me/c/XXXXXXXXXX/YY/ZZ`. The topic ID is `YY` (integer). +### Netdata Configuration + +1. Click on the **Space settings** cog (located above your profile icon) +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the Telegram Integration +5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Bot Token: The token of your bot + - Chat ID: The chat id where your bot will deliver messages to + - Topic ID: The identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat. + diff --git a/integrations/cloud-notifications/integrations/webhook.md b/integrations/cloud-notifications/integrations/webhook.md index 24ab78232..4d94c2c4e 100644 --- a/integrations/cloud-notifications/integrations/webhook.md +++ b/integrations/cloud-notifications/integrations/webhook.md @@ -9,13 +9,8 @@ endmeta--> # Webhook - - -From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on a webhook using a predefined schema. - - ## Setup @@ -23,142 +18,142 @@ From the Netdata Cloud UI, you can manage your space's notification settings and ### Prerequisites - A Netdata Cloud account -- Access to the Netdata Space as an **Admin** -- The Netdata Space needs to be on a paid plan +- Access to the Space as an **Admin** +- The Space needs to be on a paid plan - You need to have an app that allows you to receive webhooks following a predefined schema. -### Netdata Configuration Steps +### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) -2. Click on the **Notification** tab -3. Click on the **+ Add configuration** button (near the top-right corner of your screen) -4. On the **Webhook** card click on **+ Add** +2. Click on the **Alerts & Notifications** tab +3. Click on the **+ Add configuration** button +4. Add the Webhook integration 5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Webhook: - - Webhook URL - webhook URL is the url of the service that Netdata will send notifications to. In order to keep the communication secured, we only accept HTTPS urls. - - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL. - - Authentication Mechanism - Netdata webhook integration supports 3 different authentication mechanisms. - * Mutual TLS (recommended) - default authentication mechanism used if no other method is selected. - * Basic - the client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. These will settings will be required inputs. - * Bearer - the client sends a request with an Authorization header that includes a **bearer token**. This setting will be a required input. + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Webhook URL: The url of the service that Netdata will send notifications to. In order to keep the communication secured, Netdata only accepts HTTPS urls. + - Extra headers: Optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL. + - Authentication Mechanism, Netdata webhook integration supports 3 different authentication mechanisms. + - Mutual TLS (recommended): Default authentication mechanism used if no other method is selected + - Basic: The client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. + - Bearer: The client sends a request with an Authorization header that includes a **bearer token**. +### Webhook service - ### Webhook service +A webhook service allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. - A webhook integration allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. In this document, we'll go over the steps to set up a generic webhook integration, including adding headers, and implementing different types of authorization mechanisms. +In this section, we'll go over the steps to set up a generic webhook service, including adding headers, and implementing different types of authorization mechanisms. - #### Netdata webhook integration +#### Netdata webhook integration - A webhook integration is a way for one service to notify another service about events that occur within it. This is done by sending an HTTP POST request to a specified URL (known as the "webhook URL") when an event occurs. +Netdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected. - Netdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected. +For alert notifications, the content sent to the destination service contains a JSON object with the following properties: - For alert notifications, the content sent to the destination service contains a JSON object with the following properties: +| field | type | description | +|:----------------------------------|:--------------------------------|:--------------------------------------------------------------------------| +| message | string | A summary message of the alert. | +| alert | string | The alert the notification is related to. | +| info | string | Additional info related with the alert. | +| chart | string | The chart associated with the alert. | +| context | string | The chart context. | +| space | string | The space where the node that raised the alert is assigned. | +| Rooms | object\[object(string,string)\] | Object with list of Rooms names and urls where the node belongs to. | +| family | string | Context family. | +| class | string | Classification of the alert, e.g. `Error`. | +| severity | string | Alert severity, can be one of `warning`, `critical` or `clear`. | +| date | string | Date of the alert in ISO8601 format. | +| duration | string | Duration the alert has been raised. | +| additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. | +| additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. | +| alert_url | string | Netdata Cloud URL for this alert. | - | field | type | description | - | :-- | :-- | :-- | - | message | string | A summary message of the alert. | - | alarm | string | The alarm the notification is about. | - | info | string | Additional info related with the alert. | - | chart | string | The chart associated with the alert. | - | context | string | The chart context. | - | space | string | The space where the node that raised the alert is assigned. | - | Rooms | object[object(string,string)] | Object with list of Rooms names and urls where the node belongs to. | - | family | string | Context family. | - | class | string | Classification of the alert, e.g. "Error". | - | severity | string | Alert severity, can be one of "warning", "critical" or "clear". | - | date | string | Date of the alert in ISO8601 format. | - | duration | string | Duration the alert has been raised. | - | additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. | - | additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. | - | alarm_url | string | Netdata Cloud URL for this alarm. | +For reachability notifications, the JSON object will contain the following properties: - For reachability notifications, the JSON object will contain the following properties: +| field | type | description | +|:-----------------|:--------|:------------------------------------------------------------------------------------------------------------------------------| +| message | string | A summary message of the reachability alert. | +| url | string | Netdata Cloud URL for the host experiencing the reachability alert. | +| host | string | The hostname experiencing the reachability alert. | +| severity | string | Severity for this notification. If host is reachable, severity will be `info`, if host is unreachable, it will be `critical`. | +| status | object | An object with the status information. | +| status.reachable | boolean | `true` if host is reachable, `false` otherwise | +| status.text | string | Can be `reachable` or `unreachable` | - | field | type | description | - | :-- | :-- | :-- | - | message | string | A summary message of the reachability alert. | - | url | string | Netdata Cloud URL for the host experiencing the reachability alert. | - | host | string | the host experiencing the reachability alert. | - | severity | string | severity for this notification. If host is reachable, severity will be 'info', if host is unreachable, it will be 'critical'. | - | status | object | an object with the status information. | - | status.reachable | boolean | true if host is reachable, false otherwise | - | status.text | string | can be 'reachable' or 'unreachable' | +#### Extra headers - #### Extra headers +When setting up a webhook service, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL. - When setting up a webhook integration, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL. +By default, the following headers will be sent in the HTTP request - By default, the following headers will be sent in the HTTP request + | **Header** | **Value** | + |:------------:|------------------| + | Content-Type | application/json | - | **Header** | **Value** | - |:-------------------------------:|-----------------------------| - | Content-Type | application/json | +#### Authentication mechanisms - #### Authentication mechanisms +Netdata webhook integration supports 3 different authentication mechanisms: - Netdata webhook integration supports 3 different authentication mechanisms: +##### Mutual TLS authentication (recommended) - ##### Mutual TLS authentication (recommended) +In mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients. - In mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients. +This is the default authentication mechanism used if no other method is selected. - This is the default authentication mechanism used if no other method is selected. +To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate. - To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate. +The steps to perform this validation are as follows: - The steps to perform this validation are as follows: - - - Store Netdata CA certificate on a file in your disk. The content of this file should be: +- Store Netdata CA certificate on a file in your disk. The content of this file should be:
Netdata CA certificate - ``` - -----BEGIN CERTIFICATE----- - MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN - BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH - Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL - EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx - MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK - Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0 - ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh - IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++ - ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs - QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL - qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8 - fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he - s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc - Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72 - jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+ - 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY - Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw - PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU - R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC - AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e - Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY - 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ - VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io - rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP - qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH - 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts - ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4 - X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH - FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR - Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y - nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3 - 5zrbwvQf - -----END CERTIFICATE----- - ``` + ```text + -----BEGIN CERTIFICATE----- + MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN + BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH + Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL + EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx + MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK + Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0 + ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh + IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++ + ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs + QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL + qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8 + fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he + s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc + Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72 + jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+ + 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY + Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw + PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU + R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC + AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e + Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY + 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ + VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io + rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP + qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH + 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts + ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4 + X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH + FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR + Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y + nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3 + 5zrbwvQf + -----END CERTIFICATE----- + ``` +
- - Enable client certificate validation on the web server that is doing the TLS termination. Below we show you how to perform this configuration in `NGINX` and `Apache` +- Enable client certificate validation on the web server that is doing the TLS termination. Below there are examples on how to perform this configuration in `NGINX` and `Apache`. - **NGINX** + **NGINX** ```bash server { @@ -172,7 +167,7 @@ From the Netdata Cloud UI, you can manage your space's notification settings and if ($ssl_client_s_dn !~ "CN=app.netdata.cloud") { return 403; } - # ... existing location configuration ... + # ... existing location configuration ... } } ``` @@ -192,68 +187,68 @@ From the Netdata Cloud UI, you can manage your space's notification settings and ``` - ##### Basic authentication +##### Basic authentication - In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service. +In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service. - ##### Bearer token authentication +##### Bearer token authentication - In bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service. +In bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service. - ##### Challenge secret +##### Challenge secret - To validate that you have ownership of the web application that will receive the webhook events, we are using a challenge response check mechanism. +To validate that you have ownership of the web application that will receive the webhook events, Netdata is using a challenge response check mechanism. - This mechanism works as follows: +This mechanism works as follows: - - The challenge secret parameter that you provide is a shared secret between you and Netdata only. - - On your request for creating a new Webhook integration, we will make a GET request to the url of the webhook, adding a query parameter `crc_token`, consisting of a random string. - - You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format: +- The challenge secret parameter that you provide is a shared secret between only you and Netdata. +- On your request for creating a new Webhook integration, Netdata will make a GET request to the URL of the webhook, adding a query parameter `crc_token`, consisting of a random string. +- You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format: - ```json - { + ```json + { "response_token": "sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=" - } - ``` + } + ``` - - We will compare your application's response with the hash that we will generate using the challenge secret, and if they are the same, the integration creation will succeed. +- Netdata will compare your application's response with the hash that it will generate using the challenge secret, and if they are the same, the integration creation will succeed. - We will do this validation everytime you update your integration configuration. +Netdata does this validation every time you update your integration configuration. - - Response requirements: - - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret. - - Valid response_token and JSON format. - - Latency less than 5 seconds. - - 200 HTTP response code. +- Response requirements: + - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret. + - Valid response_token and JSON format. + - Latency less than 5 seconds. + - 200 HTTP response code. - **Example response token generation in Python:** +**Example response token generation in Python:** - Here you can see how to define a handler for a Flask application in python 3: +Here you can see how to define a handler for a Flask application in python 3: - ```python - import base64 - import hashlib - import hmac - import json +```python +import base64 +import hashlib +import hmac +import json - key ='YOUR_CHALLENGE_SECRET' +key ='YOUR_CHALLENGE_SECRET' - @app.route('/webhooks/netdata') - def webhook_challenge(): - token = request.args.get('crc_token').encode('ascii') +@app.route('/webhooks/netdata') +def webhook_challenge(): +token = request.args.get('crc_token').encode('ascii') - # creates HMAC SHA-256 hash from incomming token and your consumer secret - sha256_hash_digest = hmac.new(key.encode(), - msg=token, - digestmod=hashlib.sha256).digest() +# creates HMAC SHA-256 hash from incomming token and your consumer secret +sha256_hash_digest = hmac.new(key.encode(), + msg=token, + digestmod=hashlib.sha256).digest() - # construct response data with base64 encoded hash - response = { - 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii') - } +# construct response data with base64 encoded hash +response = { + 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii') +} - # returns properly formatted json response - return json.dumps(response) - ``` +# returns properly formatted json response +return json.dumps(response) +``` diff --git a/integrations/cloud-notifications/metadata.yaml b/integrations/cloud-notifications/metadata.yaml index 6141034f1..586a16998 100644 --- a/integrations/cloud-notifications/metadata.yaml +++ b/integrations/cloud-notifications/metadata.yaml @@ -1,694 +1,625 @@ # yamllint disable rule:line-length --- -- id: 'notify-cloud-mobile-app' +- id: "notify-cloud-mobile-app" meta: - name: 'Netdata Mobile App' - link: 'https://netdata.cloud' + name: "Netdata Mobile App" + link: "https://netdata.cloud" categories: - notify.cloud - icon_filename: 'netdata.png' + icon_filename: "netdata.png" keywords: - mobile-app - phone - personal-notifications - overview: - notification_description: "From the Netdata Cloud UI, you can manage your user notification settings and enable the configuration to deliver notifications on the Netdata Mobile Application." - notification_limitations: '' setup: description: | ### Prerequisites + - A Netdata Cloud account - - You need to have the Netdata Mobile Application installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone. - - ### Netdata Mobile App Configuration - Steps to login to the Netdata Mobile Application to receive alert and reachability and alert notifications: - 1. Download the Netdata Mobile Application from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622) - 2. Open the App and Choose the Sign In Option - - Sign In with Email Address: Enter the Email Address of your registered Netdata Cloud Account and Click on the Verification link received by Email on your mobile device. - - Sign In with QR Code: Scan the QR Code from your `Netdata Cloud` UI under **User Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code** - 3. Start receiving alert and reachability notifications for your **Space(s)** on a **Paid Subscription plan** - - ### Netdata Configuration Steps - 1. Click on the **User settings** on the bottom left of your screen (your profile icon) - 2. Click on the **Notifications** tab - 3. Enable **Mobile App Notifications** if disabled (Enabled by default) - 4. Use the **Show QR Code** Option to login to your mobile device by scanning the **QR Code** -- id: 'notify-cloud-discord' + - You need to have the Netdata Mobile App installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone. + + ### Netdata Mobile App Configuration and device linking + + In order to login to the Netdata Mobile App + + 1. Download the Netdata Mobile App from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622) + 2. Open the App and Choose your Sign-in option + - Email Address: Enter the email address of your registered Netdata Cloud account and click on the verification link received by email on your mobile device. + - Sign-in with QR Code: Scan the QR code from the Netdata Cloud UI under **Profile Picture** --> **Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code** + + ### Netdata Configuration + + After linking your device, enable the toggle for **Mobile App Notifications** under the same settings panel. + +- id: "notify-cloud-discord" meta: - name: 'Discord' - link: 'https://discord.com/' + name: "Discord" + link: "https://discord.com/" categories: - notify.cloud - icon_filename: 'discord.png' + icon_filename: "discord.png" keywords: - discord - community - overview: - notification_description: "From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Discord." - notification_limitations: '' setup: description: | ### Prerequisites + - A Netdata Cloud account - - Access to the Netdata Space as an **Admin** - - You need to have a Discord server able to receive webhooks integrations. + - Access to the Space as an **Admin** ### Discord Server Configuration - Steps to configure your Discord server to receive [webhook notifications](https://support.discord.com/hc/en-us/articles/228383668) from Netdata: - 1. Go to `Server Settings` --> `Integrations` + + 1. Go to **Server Settings** --> **Integrations** 2. **Create Webhook** or **View Webhooks** if you already have some defined 3. Specify the **Name** and **Channel** on your new webhook - 4. Use Webhook URL to add your notification configuration on Netdata UI + 4. Keep note of the **Webhook URL** as you will need it for the configuration of the integration on the Netdata Cloud UI + + ### Netdata Configuration - ### Netdata Configuration Steps 1. Click on the **Space settings** cog (located above your profile icon) - 2. Click on the **Notification** tab - 3. Click on the **+ Add configuration** button (near the top-right corner of your screen) - 4. On the **Discord** card click on **+ Add** - 5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Discord: - - Define the type channel you want to send notifications to: **Text channel** or **Forum channel** - - Webhook URL - URL provided on Discord for the channel you want to receive your notifications. - - Thread name - if the Discord channel is a **Forum channel** you will need to provide the thread name as well - -- id: 'notify-cloud-pagerduty' + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the Discord Integration + 5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Webhook URL: The URL you copied from the previous section + - Channel Parameters: Select the channel type which the notifications will be sent to, if it is a Forum channel, you need to specify a thread name + +- id: "notify-cloud-pagerduty" meta: - name: 'PagerDuty' - link: 'https://www.pagerduty.com/' + name: "PagerDuty" + link: "https://www.pagerduty.com/" categories: - notify.cloud - icon_filename: 'pagerduty.png' + icon_filename: "pagerduty.png" keywords: - pagerduty - overview: - notification_description: "From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on PagerDuty." - notification_limitations: '' setup: description: | ### Prerequisites + - A Netdata Cloud account - - Access to the Netdata Space as an **Admin** - - The Netdata Space needs to be on a paid plan + - Access to the Space as an **Admin** + - The Space needs to be on a paid plan - You need to have a PagerDuty service to receive events using webhooks. - ### PagerDuty Server Configuration - Steps to configure your PagerDuty to receive notifications from Netdata: 1. Create a service to receive events from your services directory page on PagerDuty - 2. At step 3, select `Events API V2` Integration or **View Webhooks** if you already have some defined - 3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** fields to add them to your notification configuration in the Netdata UI. + 2. On the third step of the service creation, select `Events API V2` Integration + 3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** in order to add them to your integration configuration in the Netdata Cloud UI - ### Netdata Configuration Steps + ### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) - 2. Click on the **Notification** tab - 3. Click on the **+ Add configuration** button (near the top-right corner of your screen) - 4. On the **PagerDuty** card click on **+ Add** - 5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For PagerDuty: - - Integration Key - is a 32 character key provided by PagerDuty to receive events on your service. - - Integration URL (Alert Events) - is the URL provided by PagerDuty where we will send notifications. - -- id: 'notify-cloud-slack' + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the PagerDuty Integration + 5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Integration Key: A 32 character key provided by PagerDuty to receive events on your service. + - Integration URL (Alert Events): The URL provided by PagerDuty where Netdata Cloud will send notifications. + +- id: "notify-cloud-slack" meta: - name: 'Slack' - link: 'https://slack.com/' + name: "Slack" + link: "https://slack.com/" categories: - notify.cloud - icon_filename: 'slack.png' + icon_filename: "slack.png" keywords: - slack - overview: - notification_description: "From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Slack." - notification_limitations: '' setup: description: | ### Prerequisites - A Netdata Cloud account - - Access to the Netdata Space as an **Admin** - - The Netdata Space needs to be on a paid plan + - Access to the Space as an **Admin** + - The Space needs to be on a paid plan - You need to have a Slack app on your workspace to receive the Webhooks. ### Slack Server Configuration - Steps to configure your Slack to receive notifications from Netdata: - - 1. Create an app to receive webhook integrations. Check [Create an app](https://api.slack.com/apps?new_app=1) from Slack documentation for further details + 1. Create an app to receive webhook integrations. Check the [Slack documentation](https://api.slack.com/apps?new_app=1) for further details 2. Install the app on your workspace 3. Configure Webhook URLs for your workspace - - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks** - - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace** - - After pressing that specify the channel where you want your notifications to be delivered - - Once completed copy the Webhook URL that you will need to add to your notification configuration on Netdata UI + - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks** + - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace** + - Specify the channel where you want your notifications to be delivered + - Once completed, copy the Webhook URL in order to add it to your integration configuration in the Netdata Cloud UI - For more details please check Slacks's article [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack). + For more details please check [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack). - ### Netdata Configuration Steps + ### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) - 2. Click on the **Notification** tab - 3. Click on the **+ Add configuration** button (near the top-right corner of your screen) - 4. On the **Slack** card click on **+ Add** - 5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Slack: - - Webhook URL - URL provided on Slack for the channel you want to receive your notifications. - -- id: 'notify-cloud-opsgenie' + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the Slack Integration + 5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Webhook URL: URL provided on Slack for the channel you want to receive your notifications + +- id: "notify-cloud-opsgenie" meta: - name: 'Opsgenie' - link: 'https://www.atlassian.com/software/opsgenie' + name: "Opsgenie" + link: "https://www.atlassian.com/software/opsgenie" categories: - notify.cloud - icon_filename: 'opsgenie.png' + icon_filename: "opsgenie.png" keywords: - opsgenie - atlassian - overview: - notification_description: "From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Opsgenie." - notification_limitations: '' setup: description: | ### Prerequisites - A Netdata Cloud account - - Access to the Netdata Space as an **Admin** - - The Netdata Space needs to be on a paid plan + - Access to the Space as an **Admin** + - The Space needs to be on a paid plan - You need to have permissions on Opsgenie to add new integrations. ### Opsgenie Server Configuration - Steps to configure your Opsgenie to receive notifications from Netdata: - - 1. Go to integrations tab of your team, click **Add integration** - 2. Pick **API** from available integrations. Copy your API Key and press **Save Integration**. - 3. Paste copied API key into the corresponding field in **Integration configuration** section of Opsgenie modal window in Netdata. + 1. Go to the integrations tab of your team, click **Add integration** + 2. Pick **API** from the available integrations and copy the API Key in order to add it to your integration configuration in the Netdata Cloud UI - ### Netdata Configuration Steps + ### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) - 2. Click on the **Notification** tab - 3. Click on the **+ Add configuration** button (near the top-right corner of your screen) - 4. On the **Opsgenie** card click on **+ Add** - 5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Opsgenie: - - API Key - a key provided on Opsgenie for the channel you want to receive your notifications. - -- id: 'notify-cloud-mattermost' + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the Opsgenie Integration + 5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - API Key: The key provided on Opsgenie for the channel you want to receive your notifications + +- id: "notify-cloud-mattermost" meta: - name: 'Mattermost' - link: 'https://mattermost.com/' + name: "Mattermost" + link: "https://mattermost.com/" categories: - notify.cloud - icon_filename: 'mattermost.png' + icon_filename: "mattermost.png" keywords: - mattermost - overview: - notification_description: "From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Mattermost." - notification_limitations: '' setup: description: | ### Prerequisites - A Netdata Cloud account - - Access to the Netdata Space as an **Admin** - - The Netdata Space needs to be on a paid plan + - Access to the Space as an **Admin** + - The Space needs to be on a paid plan - You need to have permissions on Mattermost to add new integrations. - - You need to have a Mattermost app on your workspace to receive the webhooks. ### Mattermost Server Configuration - Steps to configure your Mattermost to receive notifications from Netdata: - 1. In Mattermost, go to Product menu > Integrations > Incoming Webhook - - If you don’t have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below. - 2. Select Add Incoming Webhook and add a name and description for the webhook. The description can be up to 500 characters + - If you don't have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below. + 2. Select Add Incoming Webhook and add a name and description for the webhook. 3. Select the channel to receive webhook payloads, then select Add to create the webhook - 4. You will end up with a webhook endpoint that looks like below: - `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx` - - - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your Mattermost instance. + 4. You will end up with a webhook URL that looks like `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`, copy it in order to add it to your integration configuration in the Netdata Cloud UI - For more details please check Mattermost's article [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/). + For more details please check [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/). - ### Netdata Configuration Steps + ### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) - 2. Click on the **Notification** tab - 3. Click on the **+ Add configuration** button (near the top-right corner of your screen) - 4. On the **Mattermost** card click on **+ Add** - 5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Mattermost: - - Webhook URL - URL provided on Mattermost for the channel you want to receive your notifications - -- id: 'notify-cloud-rocketchat' + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the Mattermost Integration + 5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Webhook URL: URL provided on Mattermost for the channel you want to receive your notifications + +- id: "notify-cloud-rocketchat" meta: - name: 'RocketChat' - link: 'https://www.rocket.chat/' + name: "RocketChat" + link: "https://www.rocket.chat/" categories: - notify.cloud - icon_filename: 'rocketchat.png' + icon_filename: "rocketchat.png" keywords: - rocketchat - overview: - notification_description: "From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on RocketChat." - notification_limitations: '' setup: description: | ### Prerequisites - A Netdata Cloud account - - Access to the Netdata Space as an **Admin** - - The Netdata Space needs to be on a paid plan - - You need to have permissions on Mattermost to add new integrations. - - You need to have a RocketChat app on your workspace to receive the webhooks. - - ### Mattermost Server Configuration + - Access to the Space as an **Admin** + - The Space needs to be on a paid plan + - You need to have permissions on RocketChat to add new integrations. - Steps to configure your RocketChat to receive notifications from Netdata: + ### RocketChat Server Configuration - 1. In RocketChat, Navigate to Administration > Workspace > Integrations. - 2. Click **+New** at the top right corner. - 3. For more details about each parameter, check [create-a-new-incoming-webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook). - 4. After configuring integration, click Save. - 5. You will end up with a webhook endpoint that looks like below: - `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX` - - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your RocketChat instance. + Steps to configure your RocketChat server to receive notifications from Netdata Cloud: + 1. In RocketChat, Navigate to Administration > Workspace > Integrations + 2. Click **+New** at the top right corner + 3. For more details about each parameter, check [Create a new incoming webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook) + 4. You will end up with a webhook endpoint that looks like `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`, copy it in order to add it to your integration configuration in the Netdata Cloud UI - For more details please check RocketChat's article Incoming webhooks for [RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/). + For more details please check [Incoming webhooks for RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/). - ### Netdata Configuration Steps + ### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) - 2. Click on the **Notification** tab - 3. Click on the **+ Add configuration** button (near the top-right corner of your screen) - 4. On the **RocketChat** card click on **+ Add** - 5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For RocketChat: - - Webhook URL - URL provided on RocketChat for the channel you want to receive your notifications. - -- id: 'notify-cloud-awssns' + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the PagerDuty Integration + 5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Webhook URL: URL provided on RocketChat for the channel you want to receive your notifications + +- id: "notify-cloud-awssns" meta: - name: 'Amazon SNS' - link: 'https://aws.amazon.com/sns/' + name: "Amazon SNS" + link: "https://aws.amazon.com/sns/" categories: - notify.cloud - icon_filename: 'awssns.png' + icon_filename: "awssns.png" keywords: - awssns - overview: - notification_description: "From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on AWS SNS." - notification_limitations: '' setup: description: | ### Prerequisites - To add AWS SNS notification you need: - - A Netdata Cloud account - Access to the space as an **Admin** - The Space needs to be on a paid plan - - Have an AWS account with AWS SNS access, for more details check [how to configure this on AWS SNS](#settings-on-aws-sns) + - An AWS account with AWS SNS access - ### Steps + ### AWS SNS Configuration - 1. Click on the **Space settings** cog (located above your profile icon) - 2. Click on the **Notification** tab - 3. Click on the **+ Add configuration** button (near the top-right corner of your screen) - 4. On the **AwsSns** card click on **+ Add** - 5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For AWS SNS: - - Topic ARN - topic provided on AWS SNS (with region) for where to publish your notifications. For more details check [how to configure this on AWS SNS](#settings-on-aws-sns) - - ### Settings on AWS SNS - - To enable the webhook integration on AWS SNS you need: 1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html) 2. Create a topic - On AWS SNS management console click on **Create topic** - - On the **Details** section, the standard type and provide the topic name + - On the **Details** section, select the standard type and provide the topic name - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created - - Finally, click on **Create topic** on the bottom of the page - 3. Now, use the new **Topic ARN** while adding AWS SNS integration on your space. + 3. Copy the **Topic ARN** in order to add it to your integration configuration in the Netdata Cloud UI + + ### Netdata Configuration -- id: 'notify-cloud-microsoftteams' + 1. Click on the **Space settings** cog (located above your profile icon) + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the AWS SNS Integration + 5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Topic ARN: The topic provided on AWS SNS (with region) for where to publish your notifications. + +- id: "notify-cloud-microsoftteams" meta: - name: 'Microsoft Teams' - link: 'https://www.microsoft.com/en-us/microsoft-teams' + name: "Microsoft Teams" + link: "https://www.microsoft.com/en-us/microsoft-teams" categories: - notify.cloud - icon_filename: 'teams.svg' + icon_filename: "teams.svg" keywords: - microsoft - teams - overview: - notification_description: "From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications to a Microsoft Teams channel." - notification_limitations: '' setup: description: | ### Prerequisites - To add Microsoft Teams notifications integration to your Netdata Cloud space you will need the following: - - - A Netdata Cloud account. - - Access to the Netdata Cloud space as an **Admin**. - - The Space to be on a paid plan. - - A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature. - - ### Settings on Microsoft Teams - - 1. **Access the Channel Settings**: Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots (ellipsis) icon that appears. - 2. **Create a New Workflow**: Select "Workflows" from the options, then choose "Post to a channel when a webhook request is received." - 3. **Configure Workflow Details**: - - Give your workflow a descriptive name, such as "Netdata Alerts." - - Select the target team and channel to receive notifications. - - Click "Add workflow." - 4. **Obtain the Webhook URL**: - - Once the workflow is created, you will receive a unique Workflow Webhook URL. - - Copy this URL, as it will be required to configure Netdata Cloud. + - A Netdata Cloud account + - Access to the Space as an **Admin** + - The Space needs to be on a paid plan + - A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature - ### Settings on Netdata Cloud + ### Microsoft Teams Configuration - 1. Click on the **Space settings** cog (located above your profile icon). - 2. Click on the **Notification** tab. - 3. Click on the **+ Add configuration** button (near the top-right corner of your screen). - 4. On the **Microsoft Teams** card click on **+ Add**. - 5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings: - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it. - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration. - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only. - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Microsoft Teams: - - Microsoft Teams Incoming Webhook URL - the _Incoming Webhook URL_ that was generated earlier. + 1. Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots icon that appears + 2. Select "Workflows" from the options, then choose "Post to a channel when a webhook request is received" + 3. **Configure Workflow Details** + - Give your workflow a name, such as "Netdata Alerts" + - Select the target team and channel where you will receive notifications + - Click "Add workflow" + 4. Once the workflow is created, you will receive a unique Workflow Webhook URL, copy it, in order to add it to your integration configuration in the Netdata Cloud UI + ### Netdata Configuration -- id: 'notify-cloud-telegram' + 1. Click on the **Space settings** cog (located above your profile icon) + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the Microsoft Teams Integration + 5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Microsoft Teams Incoming Webhook URL: The Incoming Webhook URL that you copied earlier. + +- id: "notify-cloud-telegram" meta: - name: 'Telegram' - link: 'https://telegram.org/' + name: "Telegram" + link: "https://telegram.org/" categories: - notify.cloud - icon_filename: 'telegram.svg' + icon_filename: "telegram.svg" keywords: - Telegram - overview: - notification_description: "From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Telegram." - notification_limitations: '' setup: description: | ### Prerequisites - To add Telegram notification you need: - - A Netdata Cloud account - - Access to the space as an **Admin** + - Access to the Space as an **Admin** - The Space needs to be on a paid plan - - The Telegram bot token, chat ID and _optionally_ the topic ID + - The Telegram bot token, chat ID and optionally the topic ID - ### Steps - - 1. Click on the **Space settings** cog (located above your profile icon) - 2. Click on the **Notification** tab - 3. Click on the **+ Add configuration** button (near the top-right corner of your screen) - 4. On the **Telegram** card click on **+ Add** - 5. A modal will be presented to you to enter the required details to enable the configuration: - - **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Telegram: - - Bot Token - the token of your bot - - Chat ID - the chat id where your bot will deliver messages to - - Topic ID - the identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat. - - ### Getting the Telegram bot token, chat ID and topic ID + ### Telegram Configuration - Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**. - To get the chat ID you have two options: - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID. - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates` - To get the topic ID, the easiest way is this: Post a message to that topic, then right-click on it and select `Copy Message Link`. Paste it on a scratchpad and notice that it has the following structure `https://t.me/c/XXXXXXXXXX/YY/ZZ`. The topic ID is `YY` (integer). -- id: 'notify-cloud-splunk' + + ### Netdata Configuration + + 1. Click on the **Space settings** cog (located above your profile icon) + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the Telegram Integration + 5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Bot Token: The token of your bot + - Chat ID: The chat id where your bot will deliver messages to + - Topic ID: The identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat. + +- id: "notify-cloud-splunk" meta: - name: 'Splunk' - link: 'https://splunk.com/' + name: "Splunk" + link: "https://splunk.com/" categories: - notify.cloud - icon_filename: 'splunk-black.svg' + icon_filename: "splunk-black.svg" keywords: - Splunk - overview: - notification_description: "From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Splunk." - notification_limitations: '' setup: description: | ### Prerequisites - To add Splunk notification you need: - - A Netdata Cloud account - - Access to the space as an **Admin** + - Access to the Space as an **Admin** - The Space needs to be on a paid plan - - URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions. + - The URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions on how to set it up. - ### Steps + ### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) - 2. Click on the **Notification** tab - 3. Click on the **+ Add configuration** button (near the top-right corner of your screen) - 4. On the **Splunk** card click on **+ Add** - 5. A modal will be presented to you to enter the required details to enable the configuration: - - **Notification settings** are Netdata specific settings - - Configuration name - provide a descriptive name for your configuration to easily identify it. - - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about. - - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only. - - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk: - - HTTP Event Collector URI - The URI of your HTTP event collector in Splunk - - HTTP Event Collector Token - the token that Splunk provided to you when you created the HTTP Event Collector - -- id: 'notify-cloud-victorops' + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the Splunk Integration + 5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - HTTP Event Collector URI: The URI of your HTTP event collector in Splunk + - HTTP Event Collector Token: The token that Splunk provided to you when you created the HTTP Event Collector + +- id: "notify-cloud-victorops" meta: - name: 'Splunk VictorOps' - link: 'https://www.splunk.com/en_us/about-splunk/acquisitions/splunk-on-call.html' + name: "Splunk VictorOps" + link: "https://www.splunk.com/en_us/about-splunk/acquisitions/splunk-on-call.html" categories: - notify.cloud - icon_filename: 'victorops.svg' + icon_filename: "victorops.svg" keywords: - VictorOps - Splunk - On-Call - overview: - notification_description: "From the Cloud interface, you can manage your space's notification settings and from there you can add a specific configuration to get notifications delivered on Splunk On-Call/VictorOps." - notification_limitations: '' setup: description: | ### Prerequisites - To add Splunk VictorOps notification (also known as Splunk On-Call) you need: - - A Netdata Cloud account - - Access to the space as an **Admin** + - Access to the Space as an **Admin** - The Space needs to be on a paid plan - - Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions. + - The Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions. - ### Steps + ### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) - 2. Click on the **Notification** tab - 3. Click on the **+ Add configuration** button (near the top-right corner of your screen) - 4. On the **Splunk VictorOps** card click on **+ Add** - 5. A modal will be presented to you to enter the required details to enable the configuration: - - **Notification settings** are Netdata specific settings - - Configuration name - provide a descriptive name for your configuration to easily identify it. - - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about. - - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only. - - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk VictorOps: + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the Splunk VictorOps Integration + 5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** - Destination URL - The URL provided by VictorOps of your REST endpoint. -- id: 'notify-cloud-webhook' +- id: "notify-cloud-webhook" meta: - name: 'Webhook' - link: 'https://en.wikipedia.org/wiki/Webhook' + name: "Webhook" + link: "https://en.wikipedia.org/wiki/Webhook" categories: - notify.cloud - icon_filename: 'webhook.svg' + icon_filename: "webhook.svg" keywords: - generic webhooks - webhooks - overview: - notification_description: "From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on a webhook using a predefined schema." - notification_limitations: '' setup: description: | ### Prerequisites - A Netdata Cloud account - - Access to the Netdata Space as an **Admin** - - The Netdata Space needs to be on a paid plan + - Access to the Space as an **Admin** + - The Space needs to be on a paid plan - You need to have an app that allows you to receive webhooks following a predefined schema. - ### Netdata Configuration Steps + ### Netdata Configuration 1. Click on the **Space settings** cog (located above your profile icon) - 2. Click on the **Notification** tab - 3. Click on the **+ Add configuration** button (near the top-right corner of your screen) - 4. On the **Webhook** card click on **+ Add** + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the Webhook integration 5. A modal will be presented to you to enter the required details to enable the configuration: - * **Notification settings** are Netdata specific settings - - Configuration name - you can optionally provide a name for your configuration you can easily refer to it - - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration - - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only - * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Webhook: - - Webhook URL - webhook URL is the url of the service that Netdata will send notifications to. In order to keep the communication secured, we only accept HTTPS urls. - - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL. - - Authentication Mechanism - Netdata webhook integration supports 3 different authentication mechanisms. - * Mutual TLS (recommended) - default authentication mechanism used if no other method is selected. - * Basic - the client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. These will settings will be required inputs. - * Bearer - the client sends a request with an Authorization header that includes a **bearer token**. This setting will be a required input. - + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Webhook URL: The url of the service that Netdata will send notifications to. In order to keep the communication secured, Netdata only accepts HTTPS urls. + - Extra headers: Optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL. + - Authentication Mechanism, Netdata webhook integration supports 3 different authentication mechanisms. + - Mutual TLS (recommended): Default authentication mechanism used if no other method is selected + - Basic: The client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. + - Bearer: The client sends a request with an Authorization header that includes a **bearer token**. - ### Webhook service + ### Webhook service - A webhook integration allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. In this document, we'll go over the steps to set up a generic webhook integration, including adding headers, and implementing different types of authorization mechanisms. + A webhook service allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. - #### Netdata webhook integration + In this section, we'll go over the steps to set up a generic webhook service, including adding headers, and implementing different types of authorization mechanisms. - A webhook integration is a way for one service to notify another service about events that occur within it. This is done by sending an HTTP POST request to a specified URL (known as the "webhook URL") when an event occurs. + #### Netdata webhook integration - Netdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected. + Netdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected. - For alert notifications, the content sent to the destination service contains a JSON object with the following properties: + For alert notifications, the content sent to the destination service contains a JSON object with the following properties: - | field | type | description | - | :-- | :-- | :-- | - | message | string | A summary message of the alert. | - | alarm | string | The alarm the notification is about. | - | info | string | Additional info related with the alert. | - | chart | string | The chart associated with the alert. | - | context | string | The chart context. | - | space | string | The space where the node that raised the alert is assigned. | - | Rooms | object[object(string,string)] | Object with list of Rooms names and urls where the node belongs to. | - | family | string | Context family. | - | class | string | Classification of the alert, e.g. "Error". | - | severity | string | Alert severity, can be one of "warning", "critical" or "clear". | - | date | string | Date of the alert in ISO8601 format. | - | duration | string | Duration the alert has been raised. | - | additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. | - | additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. | - | alarm_url | string | Netdata Cloud URL for this alarm. | + | field | type | description | + |:----------------------------------|:--------------------------------|:--------------------------------------------------------------------------| + | message | string | A summary message of the alert. | + | alert | string | The alert the notification is related to. | + | info | string | Additional info related with the alert. | + | chart | string | The chart associated with the alert. | + | context | string | The chart context. | + | space | string | The space where the node that raised the alert is assigned. | + | Rooms | object\[object(string,string)\] | Object with list of Rooms names and urls where the node belongs to. | + | family | string | Context family. | + | class | string | Classification of the alert, e.g. `Error`. | + | severity | string | Alert severity, can be one of `warning`, `critical` or `clear`. | + | date | string | Date of the alert in ISO8601 format. | + | duration | string | Duration the alert has been raised. | + | additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. | + | additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. | + | alert_url | string | Netdata Cloud URL for this alert. | - For reachability notifications, the JSON object will contain the following properties: + For reachability notifications, the JSON object will contain the following properties: - | field | type | description | - | :-- | :-- | :-- | - | message | string | A summary message of the reachability alert. | - | url | string | Netdata Cloud URL for the host experiencing the reachability alert. | - | host | string | the host experiencing the reachability alert. | - | severity | string | severity for this notification. If host is reachable, severity will be 'info', if host is unreachable, it will be 'critical'. | - | status | object | an object with the status information. | - | status.reachable | boolean | true if host is reachable, false otherwise | - | status.text | string | can be 'reachable' or 'unreachable' | + | field | type | description | + |:-----------------|:--------|:------------------------------------------------------------------------------------------------------------------------------| + | message | string | A summary message of the reachability alert. | + | url | string | Netdata Cloud URL for the host experiencing the reachability alert. | + | host | string | The hostname experiencing the reachability alert. | + | severity | string | Severity for this notification. If host is reachable, severity will be `info`, if host is unreachable, it will be `critical`. | + | status | object | An object with the status information. | + | status.reachable | boolean | `true` if host is reachable, `false` otherwise | + | status.text | string | Can be `reachable` or `unreachable` | - #### Extra headers + #### Extra headers - When setting up a webhook integration, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL. + When setting up a webhook service, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL. - By default, the following headers will be sent in the HTTP request + By default, the following headers will be sent in the HTTP request - | **Header** | **Value** | - |:-------------------------------:|-----------------------------| - | Content-Type | application/json | + | **Header** | **Value** | + |:------------:|------------------| + | Content-Type | application/json | - #### Authentication mechanisms + #### Authentication mechanisms - Netdata webhook integration supports 3 different authentication mechanisms: + Netdata webhook integration supports 3 different authentication mechanisms: - ##### Mutual TLS authentication (recommended) + ##### Mutual TLS authentication (recommended) - In mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients. + In mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients. - This is the default authentication mechanism used if no other method is selected. + This is the default authentication mechanism used if no other method is selected. - To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate. + To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate. - The steps to perform this validation are as follows: + The steps to perform this validation are as follows: - - Store Netdata CA certificate on a file in your disk. The content of this file should be: + - Store Netdata CA certificate on a file in your disk. The content of this file should be:
Netdata CA certificate - ``` - -----BEGIN CERTIFICATE----- - MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN - BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH - Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL - EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx - MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK - Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0 - ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh - IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++ - ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs - QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL - qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8 - fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he - s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc - Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72 - jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+ - 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY - Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw - PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU - R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC - AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e - Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY - 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ - VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io - rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP - qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH - 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts - ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4 - X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH - FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR - Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y - nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3 - 5zrbwvQf - -----END CERTIFICATE----- - ``` + ```text + -----BEGIN CERTIFICATE----- + MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN + BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH + Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL + EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx + MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK + Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0 + ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh + IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++ + ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs + QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL + qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8 + fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he + s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc + Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72 + jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+ + 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY + Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw + PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU + R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC + AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e + Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY + 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ + VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io + rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP + qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH + 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts + ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4 + X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH + FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR + Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y + nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3 + 5zrbwvQf + -----END CERTIFICATE----- + ``` +
- - Enable client certificate validation on the web server that is doing the TLS termination. Below we show you how to perform this configuration in `NGINX` and `Apache` + - Enable client certificate validation on the web server that is doing the TLS termination. Below there are examples on how to perform this configuration in `NGINX` and `Apache`. - **NGINX** + **NGINX** ```bash server { @@ -702,7 +633,7 @@ if ($ssl_client_s_dn !~ "CN=app.netdata.cloud") { return 403; } - # ... existing location configuration ... + # ... existing location configuration ... } } ``` @@ -722,66 +653,110 @@ ``` - ##### Basic authentication + ##### Basic authentication - In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service. + In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service. - ##### Bearer token authentication + ##### Bearer token authentication - In bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service. + In bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service. - ##### Challenge secret + ##### Challenge secret - To validate that you have ownership of the web application that will receive the webhook events, we are using a challenge response check mechanism. + To validate that you have ownership of the web application that will receive the webhook events, Netdata is using a challenge response check mechanism. - This mechanism works as follows: + This mechanism works as follows: - - The challenge secret parameter that you provide is a shared secret between you and Netdata only. - - On your request for creating a new Webhook integration, we will make a GET request to the url of the webhook, adding a query parameter `crc_token`, consisting of a random string. - - You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format: + - The challenge secret parameter that you provide is a shared secret between only you and Netdata. + - On your request for creating a new Webhook integration, Netdata will make a GET request to the URL of the webhook, adding a query parameter `crc_token`, consisting of a random string. + - You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format: - ```json - { + ```json + { "response_token": "sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=" - } - ``` + } + ``` - - We will compare your application's response with the hash that we will generate using the challenge secret, and if they are the same, the integration creation will succeed. + - Netdata will compare your application's response with the hash that it will generate using the challenge secret, and if they are the same, the integration creation will succeed. - We will do this validation everytime you update your integration configuration. + Netdata does this validation every time you update your integration configuration. - - Response requirements: - - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret. - - Valid response_token and JSON format. - - Latency less than 5 seconds. - - 200 HTTP response code. + - Response requirements: + - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret. + - Valid response_token and JSON format. + - Latency less than 5 seconds. + - 200 HTTP response code. - **Example response token generation in Python:** + **Example response token generation in Python:** - Here you can see how to define a handler for a Flask application in python 3: + Here you can see how to define a handler for a Flask application in python 3: - ```python - import base64 - import hashlib - import hmac - import json + ```python + import base64 + import hashlib + import hmac + import json - key ='YOUR_CHALLENGE_SECRET' + key ='YOUR_CHALLENGE_SECRET' - @app.route('/webhooks/netdata') - def webhook_challenge(): - token = request.args.get('crc_token').encode('ascii') + @app.route('/webhooks/netdata') + def webhook_challenge(): + token = request.args.get('crc_token').encode('ascii') - # creates HMAC SHA-256 hash from incomming token and your consumer secret - sha256_hash_digest = hmac.new(key.encode(), - msg=token, - digestmod=hashlib.sha256).digest() + # creates HMAC SHA-256 hash from incomming token and your consumer secret + sha256_hash_digest = hmac.new(key.encode(), + msg=token, + digestmod=hashlib.sha256).digest() - # construct response data with base64 encoded hash - response = { - 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii') - } + # construct response data with base64 encoded hash + response = { + 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii') + } - # returns properly formatted json response - return json.dumps(response) - ``` + # returns properly formatted json response + return json.dumps(response) + ``` + +- id: "notify-cloud-ilert" + meta: + name: "ilert" + link: "https://www.ilert.com/" + categories: + - notify.cloud + icon_filename: "ilert.svg" + keywords: + - ilert + setup: + description: | + ### Prerequisites + + - A Netdata Cloud account + - Access to the Space as an **Admin** + - The Space needs to be on a paid plan + - You need to have permissions on ilert to add new Alert sources. + + ### ilert Configuration + + 1. From the navigation bar, open the Alert sources drop down and click "Alert sources" + 2. Click on the "+ Create a new alert source" button + 3. Configure an Alert source: + - Select "API integration" and click Next + - Provide a name that suits the source's purpose, for example "Netdata" + - Select Escalation policy + - Select Alert grouping (optional) + 4. Obtain the API Key: + - Once the Alert source is created, you will be provided with an API key. Copy it in order to add it to your integration configuration in the Netdata Cloud UI + + ### Netdata Configuration + + 1. Click on the **Space settings** cog (located above your profile icon) + 2. Click on the **Alerts & Notifications** tab + 3. Click on the **+ Add configuration** button + 4. Add the ilert Integration + 5. A modal will be presented to you to enter the required details to enable the integration: + - **Notification settings** + - Configuration name (optional): A name for your configuration in order to easily refer to it + - Rooms: A list of Rooms for which you want to be notified + - Notifications: The notifications which you want to be notified + - **Integration configuration** + - Alert Source API key: The key you copied in the ilert configuration step. diff --git a/integrations/deploy.yaml b/integrations/deploy.yaml index 52fbdd3f1..c2557477d 100644 --- a/integrations/deploy.yaml +++ b/integrations/deploy.yaml @@ -16,23 +16,19 @@ commands: - channel: nightly command: > - wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh - --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %} + wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %} - channel: stable command: > - wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh - --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %} + wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %} - &ks_curl method: curl commands: - channel: nightly command: > - curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh - --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %} + curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %} - channel: stable command: > - curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh - --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %} + curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %} additional_info: &ref_containers > Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}? clean_additional_info: &ref_clean_containers > @@ -562,14 +558,47 @@ keywords: - windows install_description: | - 1. Install [Windows Exporter](https://github.com/prometheus-community/windows_exporter) on every Windows host you want to monitor. - 2. Install Netdata agent on Linux, FreeBSD or Mac. - 3. Configure Netdata to collect data remotely from your Windows hosts by adding one job per host to windows.conf file. See the [configuration section](https://learn.netdata.cloud/docs/data-collection/monitor-anything/System%20Metrics/Windows-machines#configuration) for details. - 4. Enable [virtual nodes](https://learn.netdata.cloud/docs/data-collection/windows-systems#virtual-nodes) configuration so the windows nodes are displayed as separate nodes. + Netdata offers a convenient Windows installer for easy setup. This executable provides two distinct installation modes, outlined below. + + The Windows installer is currently under beta, and thus it is only available in the nightly release channel. A stable version will be released soon. + + ## Graphical User Interface (GUI) + + 1. Download the Netdata [Windows installer](https://github.com/netdata/netdata-nightlies/releases) from the latest nightly release. + 2. Run the `.exe` file and proceed with the installation process. + 3. At a minimum, you will need your Netdata Cloud Space's claim token to connect your Agent to your Space. + + ## Silent Mode (Command line) + + If you prefer to install Netdata through the command line, you can do so by running the following command on Windows Powershell with administrator rights. methods: - - *ks_wget - - *ks_curl - additional_info: "" + - method: Silent Mode (Command line) + commands: + - channel: stable + command: | + $ProgressPreference = 'SilentlyContinue'; + Invoke-WebRequest https://github.com/netdata/netdata-nightlies/releases/latest/download/netdata-installer-x64.exe -OutFile "netdata-installer-x64.exe"; + .\netdata-installer-x64.exe /S /A ` + {% if $showClaimingOptions %}/TOKEN={% claim_token %} /ROOMS={% $claim_rooms %}{% /if %} + - channel: nightly + command: | + $ProgressPreference = 'SilentlyContinue'; + Invoke-WebRequest https://github.com/netdata/netdata-nightlies/releases/latest/download/netdata-installer-x64.exe -OutFile "netdata-installer-x64.exe"; + .\netdata-installer-x64.exe /S /A ` + {% if $showClaimingOptions %}/TOKEN={% claim_token %} /ROOMS={% $claim_rooms %}{% /if %} + additional_info: | + ### Available Options + + | Option | Description | + |-----------|--------------------------------------------------------------------------------------------------| + | `/S` | Enables silent mode installation. | + | `/A` | Accepts all Netdata licenses. This option is mandatory for silent installations. | + | `/D` | Specifies the desired installation directory (defaults to `C:\Program Files\Netdata`). | + | `/T` | Opens the `MSYS2` terminal after installation. | + | `/I` | Forces insecure connections, bypassing hostname verification (use only if absolutely necessary). | + | `/TOKEN=` | Sets the Claim Token for your Netdata Cloud Space. | + | `/ROOMS=` | Comma-separated list of Room IDs where you want your node to appear. | + | `/PROXY=` | Sets the proxy server address if your network requires one. | related_resources: {} most_popular: true platform_info: @@ -600,12 +629,10 @@ commands: - channel: nightly command: > - fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh - --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %} + fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %} - channel: stable command: > - fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh - --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %} + fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %} additional_info: | Netdata can also be installed via [FreeBSD ports](https://www.freshports.org/net-mgmt/netdata). related_resources: {} diff --git a/integrations/gen_docs_integrations.py b/integrations/gen_docs_integrations.py index 51a59ed48..2dcbc7a1d 100644 --- a/integrations/gen_docs_integrations.py +++ b/integrations/gen_docs_integrations.py @@ -1,7 +1,7 @@ import json +import re import shutil from pathlib import Path -import re # Dictionary responsible for making the symbolic links at the end of the script's run. symlink_dict = {} @@ -25,10 +25,14 @@ def cleanup(): for element in Path("integrations/cloud-notifications").glob('**/*/'): if "integrations" in str(element) and not "metadata.yaml" in str(element): shutil.rmtree(element) + for element in Path("integrations/logs").glob('**/*/'): + if "integrations" in str(element) and "metadata.yaml" not in str(element): + shutil.rmtree(element) for element in Path("integrations/cloud-authentication").glob('**/*/'): if "integrations" in str(element) and not "metadata.yaml" in str(element): shutil.rmtree(element) + def generate_category_from_name(category_fragment, category_array): """ Takes a category ID in splitted form ("." as delimiter) and the array of the categories, and returns the proper category name that Learn expects. @@ -46,7 +50,7 @@ def generate_category_from_name(category_fragment, category_array): try: # print("equals") # print(fragment, category_fragment[i+1]) - dummy_id = dummy_id + "." + category_fragment[i+1] + dummy_id = dummy_id + "." + category_fragment[i + 1] # print(dummy_id) except IndexError: return category_name.split("/", 1)[1] @@ -75,19 +79,23 @@ def add_custom_edit_url(markdown_string, meta_yaml_link, sidebar_label_string, m """ output = "" + path_to_md_file = "" if mode == 'default': path_to_md_file = f'{meta_yaml_link.replace("/metadata.yaml", "")}/integrations/{clean_string(sidebar_label_string)}' - elif mode == 'cloud-notifications': + elif mode == 'cloud-notification': path_to_md_file = meta_yaml_link.replace("metadata.yaml", f'integrations/{clean_string(sidebar_label_string)}') - elif mode == 'agent-notifications': + elif mode == 'agent-notification': path_to_md_file = meta_yaml_link.replace("metadata.yaml", "README") elif mode == 'cloud-authentication': path_to_md_file = meta_yaml_link.replace("metadata.yaml", f'integrations/{clean_string(sidebar_label_string)}') + elif mode == 'logs': + path_to_md_file = meta_yaml_link.replace("metadata.yaml", f'integrations/{clean_string(sidebar_label_string)}') + output = markdown_string.replace( " print("Exception in exporter md construction", e, integration['id']) # NOTIFICATIONS - elif mode == 'notification': + elif mode == 'agent-notification': try: # initiate the variables for the notification method meta_yaml = integration['edit_link'].replace("blob", "edit") @@ -238,7 +253,7 @@ learn_rel_path: "{learn_rel_path.replace("notifications", "Alerts & Notification message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE NOTIFICATION'S metadata.yaml FILE" endmeta--> -{create_overview(integration, integration['meta']['icon_filename'])}""" +{create_overview(integration, integration['meta']['icon_filename'], "overview")}""" if integration['setup']: md += f""" @@ -252,7 +267,67 @@ endmeta--> except Exception as e: print("Exception in notification md construction", e, integration['id']) - + + elif mode == 'cloud-notification': + try: + # initiate the variables for the notification method + meta_yaml = integration['edit_link'].replace("blob", "edit") + sidebar_label = integration['meta']['name'] + learn_rel_path = generate_category_from_name(integration['meta']['categories'][0].split("."), categories) + + # build the markdown string + md = \ + f""" + +{create_overview(integration, integration['meta']['icon_filename'], "")}""" + + if integration['setup']: + md += f""" +{integration['setup']} +""" + + if integration['troubleshooting']: + md += f""" +{integration['troubleshooting']} +""" + + except Exception as e: + print("Exception in notification md construction", e, integration['id']) + + elif mode == 'logs': + try: + # initiate the variables for the logs integration + meta_yaml = integration['edit_link'].replace("blob", "edit") + sidebar_label = integration['meta']['name'] + learn_rel_path = generate_category_from_name(integration['meta']['categories'][0].split("."), categories) + + # build the markdown string + md = \ + f""" + +{create_overview(integration, integration['meta']['icon_filename'])}""" + + if integration['setup']: + md += f""" +{integration['setup']} +""" + + except Exception as e: + print("Exception in logs md construction", e, integration['id']) + + # AUTHENTICATIONS elif mode == 'authentication': if True: @@ -339,27 +414,35 @@ def write_to_file(path, md, meta_yaml, sidebar_label, community, mode='default') except KeyError: # We don't need to print something here. pass - elif mode == 'notification': + elif mode == 'cloud-notification': - if "cloud-notifications" in path: - # for cloud notifications we generate them near their metadata.yaml - name = clean_string(integration['meta']['name']) + # for cloud notifications we generate them near their metadata.yaml + name = clean_string(integration['meta']['name']) - if not Path(f'{path}/integrations').exists(): - Path(f'{path}/integrations').mkdir() + if not Path(f'{path}/integrations').exists(): + Path(f'{path}/integrations').mkdir() - # proper_edit_name = meta_yaml.replace( - # "metadata.yaml", f'integrations/{clean_string(sidebar_label)}.md\"') + # proper_edit_name = meta_yaml.replace( + # "metadata.yaml", f'integrations/{clean_string(sidebar_label)}.md\"') - md = add_custom_edit_url(md, meta_yaml, sidebar_label, mode='cloud-notifications') + md = add_custom_edit_url(md, meta_yaml, sidebar_label, mode='cloud-notification') - finalpath = f'{path}/integrations/{name}.md' - else: - # add custom_edit_url as the md file, so we can have uniqueness in the ingest script - # afterwards the ingest will replace this metadata with meta_yaml - md = add_custom_edit_url(md, meta_yaml, sidebar_label, mode='agent-notifications') + finalpath = f'{path}/integrations/{name}.md' - finalpath = f'{path}/README.md' + try: + clean_and_write( + md, + Path(finalpath) + ) + except FileNotFoundError as e: + print("Exception in writing to file", e) + elif mode == 'agent-notification': + # add custom_edit_url as the md file, so we can have uniqueness in the ingest script + # afterwards the ingest will replace this metadata with meta_yaml + + md = add_custom_edit_url(md, meta_yaml, sidebar_label, mode='agent-notification') + + finalpath = f'{path}/README.md' try: clean_and_write( @@ -369,7 +452,29 @@ def write_to_file(path, md, meta_yaml, sidebar_label, community, mode='default') except FileNotFoundError as e: print("Exception in writing to file", e) + elif mode == 'logs': + + # for logs we generate them near their metadata.yaml + name = clean_string(integration['meta']['name']) + + if not Path(f'{path}/integrations').exists(): + Path(f'{path}/integrations').mkdir() + + # proper_edit_name = meta_yaml.replace( + # "metadata.yaml", f'integrations/{clean_string(sidebar_label)}.md\"') + + md = add_custom_edit_url(md, meta_yaml, sidebar_label, mode='logs') + + finalpath = f'{path}/integrations/{name}.md' + + try: + clean_and_write( + md, + Path(finalpath) + ) + except FileNotFoundError as e: + print("Exception in writing to file", e) elif mode == 'authentication': name = clean_string(integration['meta']['name']) @@ -383,7 +488,7 @@ def write_to_file(path, md, meta_yaml, sidebar_label, community, mode='default') md = add_custom_edit_url(md, meta_yaml, sidebar_label, mode='cloud-authentication') finalpath = f'{path}/integrations/{name}.md' - + try: clean_and_write( md, @@ -422,7 +527,6 @@ cleanup() categories, integrations = read_integrations_js('integrations/integrations.js') - # Iterate through every integration for integration in integrations: @@ -442,20 +546,32 @@ for integration in integrations: path = build_path(meta_yaml) write_to_file(path, md, meta_yaml, sidebar_label, community) - # kind of specific if clause, so we can avoid running excessive code in the go repo - elif integration['integration_type'] == "notification": + elif integration['integration_type'] == "agent_notification": + + meta_yaml, sidebar_label, learn_rel_path, md, community = build_readme_from_integration( + integration, mode='agent-notification') + path = build_path(meta_yaml) + write_to_file(path, md, meta_yaml, sidebar_label, community, mode='agent-notification') + + elif integration['integration_type'] == "cloud_notification": meta_yaml, sidebar_label, learn_rel_path, md, community = build_readme_from_integration( - integration, mode='notification') + integration, mode='cloud-notification') path = build_path(meta_yaml) - write_to_file(path, md, meta_yaml, sidebar_label, community, mode='notification') + write_to_file(path, md, meta_yaml, sidebar_label, community, mode='cloud-notification') + + elif integration['integration_type'] == "logs": + + meta_yaml, sidebar_label, learn_rel_path, md, community = build_readme_from_integration( + integration, mode='logs') + path = build_path(meta_yaml) + write_to_file(path, md, meta_yaml, sidebar_label, community, mode='logs') elif integration['integration_type'] == "authentication": meta_yaml, sidebar_label, learn_rel_path, md, community = build_readme_from_integration( integration, mode='authentication') path = build_path(meta_yaml) - write_to_file(path, md, meta_yaml, sidebar_label, community, mode='authentication') - + write_to_file(path, md, meta_yaml, sidebar_label, community, mode='authentication') make_symlinks(symlink_dict) diff --git a/integrations/gen_integrations.py b/integrations/gen_integrations.py index b4516eebe..d0bd9ab84 100755 --- a/integrations/gen_integrations.py +++ b/integrations/gen_integrations.py @@ -4,7 +4,6 @@ import json import os import re import sys - from copy import deepcopy from pathlib import Path @@ -40,11 +39,18 @@ EXPORTER_SOURCES = [ (AGENT_REPO, REPO_PATH / 'src' / 'exporting', True), ] -NOTIFICATION_SOURCES = [ +AGENT_NOTIFICATION_SOURCES = [ (AGENT_REPO, REPO_PATH / 'src' / 'health' / 'notifications', True), +] + +CLOUD_NOTIFICATION_SOURCES = [ (AGENT_REPO, INTEGRATIONS_PATH / 'cloud-notifications' / 'metadata.yaml', False), ] +LOGS_SOURCES = [ + (AGENT_REPO, INTEGRATIONS_PATH / 'logs' / 'metadata.yaml', False), +] + AUTHENTICATION_SOURCES = [ (AGENT_REPO, INTEGRATIONS_PATH / 'cloud-authentication' / 'metadata.yaml', False), ] @@ -64,12 +70,22 @@ EXPORTER_RENDER_KEYS = [ 'troubleshooting', ] -NOTIFICATION_RENDER_KEYS = [ +AGENT_NOTIFICATION_RENDER_KEYS = [ 'overview', 'setup', 'troubleshooting', ] +CLOUD_NOTIFICATION_RENDER_KEYS = [ + 'setup', + 'troubleshooting', +] + +LOGS_RENDER_KEYS = [ + 'overview', + 'setup', +] + AUTHENTICATION_RENDER_KEYS = [ 'overview', 'setup', @@ -85,18 +101,18 @@ DEBUG = os.environ.get('DEBUG', False) def debug(msg): if GITHUB_ACTIONS: - print(f':debug:{ msg }') + print(f':debug:{msg}') elif DEBUG: - print(f'>>> { msg }') + print(f'>>> {msg}') else: pass def warn(msg, path): if GITHUB_ACTIONS: - print(f':warning file={ path }:{ msg }') + print(f':warning file={path}:{msg}') else: - print(f'!!! WARNING:{ path }:{ msg }') + print(f'!!! WARNING:{path}:{msg}') def retrieve_from_filesystem(uri): @@ -122,8 +138,18 @@ EXPORTER_VALIDATOR = Draft7Validator( registry=registry, ) -NOTIFICATION_VALIDATOR = Draft7Validator( - {'$ref': './notification.json#'}, +AGENT_NOTIFICATION_VALIDATOR = Draft7Validator( + {'$ref': './agent_notification.json#'}, + registry=registry, +) + +CLOUD_NOTIFICATION_VALIDATOR = Draft7Validator( + {'$ref': './cloud_notification.json#'}, + registry=registry, +) + +LOGS_VALIDATOR = Draft7Validator( + {'$ref': './logs.json#'}, registry=registry, ) @@ -209,19 +235,19 @@ def load_yaml(src): yaml = YAML(typ='safe') if not src.is_file(): - warn(f'{ src } is not a file.', src) + warn(f'{src} is not a file.', src) return False try: contents = src.read_text() except (IOError, OSError): - warn(f'Failed to read { src }.', src) + warn(f'Failed to read {src}.', src) return False try: data = yaml.load(contents) except YAMLError: - warn(f'Failed to parse { src } as YAML.', src) + warn(f'Failed to parse {src} as YAML.', src) return False return data @@ -236,7 +262,7 @@ def load_categories(): try: CATEGORY_VALIDATOR.validate(categories) except ValidationError: - warn(f'Failed to validate { CATEGORIES_FILE } against the schema.', CATEGORIES_FILE) + warn(f'Failed to validate {CATEGORIES_FILE} against the schema.', CATEGORIES_FILE) sys.exit(1) return categories @@ -248,7 +274,7 @@ def load_collectors(): entries = get_collector_metadata_entries() for repo, path in entries: - debug(f'Loading { path }.') + debug(f'Loading {path}.') data = load_yaml(path) if not data: @@ -257,7 +283,7 @@ def load_collectors(): try: COLLECTOR_VALIDATOR.validate(data) except ValidationError: - warn(f'Failed to validate { path } against the schema.', path) + warn(f'Failed to validate {path} against the schema.', path) continue for idx, item in enumerate(data['modules']): @@ -273,7 +299,7 @@ def load_collectors(): def _load_deploy_file(file, repo): ret = [] - debug(f'Loading { file }.') + debug(f'Loading {file}.') data = load_yaml(file) if not data: @@ -282,7 +308,7 @@ def _load_deploy_file(file, repo): try: DEPLOY_VALIDATOR.validate(data) except ValidationError: - warn(f'Failed to validate { file } against the schema.', file) + warn(f'Failed to validate {file} against the schema.', file) return [] for idx, item in enumerate(data): @@ -309,7 +335,7 @@ def load_deploy(): def _load_exporter_file(file, repo): - debug(f'Loading { file }.') + debug(f'Loading {file}.') data = load_yaml(file) if not data: @@ -318,7 +344,7 @@ def _load_exporter_file(file, repo): try: EXPORTER_VALIDATOR.validate(data) except ValidationError: - warn(f'Failed to validate { file } against the schema.', file) + warn(f'Failed to validate {file} against the schema.', file) return [] if 'id' in data: @@ -354,21 +380,113 @@ def load_exporters(): return ret -def _load_notification_file(file, repo): - debug(f'Loading { file }.') +def _load_agent_notification_file(file, repo): + debug(f'Loading {file}.') + data = load_yaml(file) + + if not data: + return [] + + try: + AGENT_NOTIFICATION_VALIDATOR.validate(data) + except ValidationError: + warn(f'Failed to validate {file} against the schema.', file) + return [] + + if 'id' in data: + data['integration_type'] = 'agent_notification' + data['_src_path'] = file + data['_repo'] = repo + data['_index'] = 0 + + return [data] + else: + ret = [] + + for idx, item in enumerate(data): + item['integration_type'] = 'agent_notification' + item['_src_path'] = file + item['_repo'] = repo + item['_index'] = idx + ret.append(item) + + return ret + + +def load_agent_notifications(): + ret = [] + + for repo, path, match in AGENT_NOTIFICATION_SOURCES: + if match and path.exists() and path.is_dir(): + for file in path.glob(METADATA_PATTERN): + ret.extend(_load_agent_notification_file(file, repo)) + elif not match and path.exists() and path.is_file(): + ret.extend(_load_agent_notification_file(path, repo)) + + return ret + + +def _load_cloud_notification_file(file, repo): + debug(f'Loading {file}.') + data = load_yaml(file) + + if not data: + return [] + + try: + CLOUD_NOTIFICATION_VALIDATOR.validate(data) + except ValidationError: + warn(f'Failed to validate {file} against the schema.', file) + return [] + + if 'id' in data: + data['integration_type'] = 'cloud_notification' + data['_src_path'] = file + data['_repo'] = repo + data['_index'] = 0 + + return [data] + else: + ret = [] + + for idx, item in enumerate(data): + item['integration_type'] = 'cloud_notification' + item['_src_path'] = file + item['_repo'] = repo + item['_index'] = idx + ret.append(item) + + return ret + + +def load_cloud_notifications(): + ret = [] + + for repo, path, match in CLOUD_NOTIFICATION_SOURCES: + if match and path.exists() and path.is_dir(): + for file in path.glob(METADATA_PATTERN): + ret.extend(_load_cloud_notification_file(file, repo)) + elif not match and path.exists() and path.is_file(): + ret.extend(_load_cloud_notification_file(path, repo)) + + return ret + + +def _load_logs_file(file, repo): + debug(f'Loading {file}.') data = load_yaml(file) if not data: return [] try: - NOTIFICATION_VALIDATOR.validate(data) + LOGS_VALIDATOR.validate(data) except ValidationError: - warn(f'Failed to validate { file } against the schema.', file) + warn(f'Failed to validate {file} against the schema.', file) return [] if 'id' in data: - data['integration_type'] = 'notification' + data['integration_type'] = 'logs' data['_src_path'] = file data['_repo'] = repo data['_index'] = 0 @@ -378,7 +496,7 @@ def _load_notification_file(file, repo): ret = [] for idx, item in enumerate(data): - item['integration_type'] = 'notification' + item['integration_type'] = 'logs' item['_src_path'] = file item['_repo'] = repo item['_index'] = idx @@ -387,20 +505,21 @@ def _load_notification_file(file, repo): return ret -def load_notifications(): +def load_logs(): ret = [] - for repo, path, match in NOTIFICATION_SOURCES: + for repo, path, match in LOGS_SOURCES: if match and path.exists() and path.is_dir(): for file in path.glob(METADATA_PATTERN): - ret.extend(_load_notification_file(file, repo)) + ret.extend(_load_logs_file(file, repo)) elif not match and path.exists() and path.is_file(): - ret.extend(_load_notification_file(path, repo)) + ret.extend(_load_logs_file(path, repo)) return ret + def _load_authentication_file(file, repo): - debug(f'Loading { file }.') + debug(f'Loading {file}.') data = load_yaml(file) if not data: @@ -409,7 +528,7 @@ def _load_authentication_file(file, repo): try: AUTHENTICATION_VALIDATOR.validate(data) except ValidationError: - warn(f'Failed to validate { file } against the schema.', file) + warn(f'Failed to validate {file} against the schema.', file) return [] if 'id' in data: @@ -453,13 +572,13 @@ def make_id(meta): else: instance_name = '000_unknown' - return f'{ meta["plugin_name"] }-{ meta["module_name"] }-{ instance_name }' + return f'{meta["plugin_name"]}-{meta["module_name"]}-{instance_name}' def make_edit_link(item): item_path = item['_src_path'].relative_to(REPO_PATH) - return f'https://github.com/{ item["_repo"] }/blob/master/{ item_path }' + return f'https://github.com/{item["_repo"]}/blob/master/{item_path}' def sort_integrations(integrations): @@ -474,7 +593,9 @@ def dedupe_integrations(integrations, ids): for i in integrations: if ids.get(i['id'], False): first_path, first_index = ids[i['id']] - warn(f'Duplicate integration ID found at { i["_src_path"] } index { i["_index"] } (original definition at { first_path } index { first_index }), ignoring that integration.', i['_src_path']) + warn( + f'Duplicate integration ID found at {i["_src_path"]} index {i["_index"]} (original definition at {first_path} index {first_index}), ignoring that integration.', + i['_src_path']) else: tmp_integrations.append(i) ids[i['id']] = (i['_src_path'], i['_index']) @@ -504,7 +625,7 @@ def render_collectors(categories, collectors, ids): idmap = {i['id']: i for i in collectors} for item in collectors: - debug(f'Processing { item["id"] }.') + debug(f'Processing {item["id"]}.') item['edit_link'] = make_edit_link(item) @@ -516,7 +637,7 @@ def render_collectors(categories, collectors, ids): res_id = make_id(res) if res_id not in idmap.keys(): - warn(f'Could not find related integration { res_id }, ignoring it.', item['_src_path']) + warn(f'Could not find related integration {res_id}, ignoring it.', item['_src_path']) continue related.append({ @@ -532,17 +653,19 @@ def render_collectors(categories, collectors, ids): actual_cats = item_cats & valid_cats if bogus_cats: - warn(f'Ignoring invalid categories: { ", ".join(bogus_cats) }', item["_src_path"]) + warn(f'Ignoring invalid categories: {", ".join(bogus_cats)}', item["_src_path"]) if not item_cats: item['meta']['monitored_instance']['categories'] = list(default_cats) - warn(f'{ item["id"] } does not list any caregories, adding it to: { default_cats }', item["_src_path"]) + warn(f'{item["id"]} does not list any caregories, adding it to: {default_cats}', item["_src_path"]) else: - item['meta']['monitored_instance']['categories'] = [x for x in item['meta']['monitored_instance']['categories'] if x in list(actual_cats)] + item['meta']['monitored_instance']['categories'] = [x for x in + item['meta']['monitored_instance']['categories'] if + x in list(actual_cats)] for scope in item['metrics']['scopes']: if scope['name'] == 'global': - scope['name'] = f'{ item["meta"]["monitored_instance"]["name"] } instance' + scope['name'] = f'{item["meta"]["monitored_instance"]["name"]} instance' for cfg_example in item['setup']['configuration']['examples']['list']: if 'folding' not in cfg_example: @@ -552,7 +675,7 @@ def render_collectors(categories, collectors, ids): for key in COLLECTOR_RENDER_KEYS: if key in item.keys(): - template = get_jinja_env().get_template(f'{ key }.md') + template = get_jinja_env().get_template(f'{key}.md') data = template.render(entry=item, related=related, clean=False) clean_data = template.render(entry=item, related=related, clean=True) @@ -589,7 +712,7 @@ def render_deploy(distros, categories, deploy, ids): template = get_jinja_env().get_template('platform_info.md') for item in deploy: - debug(f'Processing { item["id"] }.') + debug(f'Processing {item["id"]}.') item['edit_link'] = make_edit_link(item) clean_item = deepcopy(item) @@ -646,7 +769,7 @@ def render_exporters(categories, exporters, ids): for key in EXPORTER_RENDER_KEYS: if key in item.keys(): - template = get_jinja_env().get_template(f'{ key }.md') + template = get_jinja_env().get_template(f'{key}.md') data = template.render(entry=item, clean=False) clean_data = template.render(entry=item, clean=True) @@ -670,7 +793,50 @@ def render_exporters(categories, exporters, ids): return exporters, clean_exporters, ids -def render_notifications(categories, notifications, ids): +def render_agent_notifications(categories, notifications, ids): + debug('Sorting notifications.') + + sort_integrations(notifications) + + debug('Checking notification ids.') + + notifications, ids = dedupe_integrations(notifications, ids) + + clean_notifications = [] + + for item in notifications: + item['edit_link'] = make_edit_link(item) + + clean_item = deepcopy(item) + + for key in AGENT_NOTIFICATION_RENDER_KEYS: + if key in item.keys(): + template = get_jinja_env().get_template(f'{key}.md') + data = template.render(entry=item, clean=False) + + clean_data = template.render(entry=item, clean=True) + + if 'variables' in item['meta']: + template = get_jinja_env().from_string(data) + data = template.render(variables=item['meta']['variables'], clean=False) + template = get_jinja_env().from_string(clean_data) + clean_data = template.render(variables=item['meta']['variables'], clean=True) + else: + data = '' + clean_data = '' + + item[key] = data + clean_item[key] = clean_data + + for k in ['_src_path', '_repo', '_index']: + del item[k], clean_item[k] + + clean_notifications.append(clean_item) + + return notifications, clean_notifications, ids + + +def render_cloud_notifications(categories, notifications, ids): debug('Sorting notifications.') sort_integrations(notifications) @@ -686,9 +852,9 @@ def render_notifications(categories, notifications, ids): clean_item = deepcopy(item) - for key in NOTIFICATION_RENDER_KEYS: + for key in CLOUD_NOTIFICATION_RENDER_KEYS: if key in item.keys(): - template = get_jinja_env().get_template(f'{ key }.md') + template = get_jinja_env().get_template(f'{key}.md') data = template.render(entry=item, clean=False) clean_data = template.render(entry=item, clean=True) @@ -712,6 +878,48 @@ def render_notifications(categories, notifications, ids): return notifications, clean_notifications, ids +def render_logs(categories, logs, ids): + debug('Sorting logs.') + + sort_integrations(logs) + + debug('Checking log ids.') + + logs, ids = dedupe_integrations(logs, ids) + + clean_logs = [] + + for item in logs: + item['edit_link'] = make_edit_link(item) + + clean_item = deepcopy(item) + + for key in LOGS_RENDER_KEYS: + if key in item.keys(): + template = get_jinja_env().get_template(f'{key}.md') + data = template.render(entry=item, clean=False) + clean_data = template.render(entry=item, clean=True) + + if 'variables' in item['meta']: + template = get_jinja_env().from_string(data) + data = template.render(variables=item['meta']['variables'], clean=False) + template = get_jinja_env().from_string(clean_data) + clean_data = template.render(variables=item['meta']['variables'], clean=True) + else: + data = '' + clean_data = '' + + item[key] = data + clean_item[key] = clean_data + + for k in ['_src_path', '_repo', '_index']: + del item[k], clean_item[k] + + clean_logs.append(clean_item) + + return logs, clean_logs, ids + + def render_authentications(categories, authentications, ids): debug('Sorting authentications.') @@ -729,9 +937,9 @@ def render_authentications(categories, authentications, ids): clean_item = deepcopy(item) for key in AUTHENTICATION_RENDER_KEYS: - + if key in item.keys(): - template = get_jinja_env().get_template(f'{ key }.md') + template = get_jinja_env().get_template(f'{key}.md') data = template.render(entry=item, clean=False) clean_data = template.render(entry=item, clean=True) @@ -746,7 +954,7 @@ def render_authentications(categories, authentications, ids): item[key] = data clean_item[key] = clean_data - + for k in ['_src_path', '_repo', '_index']: del item[k], clean_item[k] @@ -755,12 +963,17 @@ def render_authentications(categories, authentications, ids): return authentications, clean_authentications, ids +def convert_local_links(text, prefix): + return text.replace("](/", f"]({prefix}/") + + def render_integrations(categories, integrations): template = get_jinja_env().get_template('integrations.js') data = template.render( categories=json.dumps(categories, indent=4), integrations=json.dumps(integrations, indent=4), ) + data = convert_local_links(data, "https://github.com/netdata/netdata/blob/master") OUTPUT_PATH.write_text(data) @@ -777,20 +990,23 @@ def main(): collectors = load_collectors() deploy = load_deploy() exporters = load_exporters() - notifications = load_notifications() + agent_notifications = load_agent_notifications() + cloud_notifications = load_cloud_notifications() + logs = load_logs() authentications = load_authentications() collectors, clean_collectors, ids = render_collectors(categories, collectors, dict()) deploy, clean_deploy, ids = render_deploy(distros, categories, deploy, ids) exporters, clean_exporters, ids = render_exporters(categories, exporters, ids) - notifications, clean_notifications, ids = render_notifications(categories, notifications, ids) + agent_notifications, clean_agent_notifications, ids = render_agent_notifications(categories, agent_notifications,ids) + cloud_notifications, clean_cloud_notifications, ids = render_cloud_notifications(categories, cloud_notifications,ids) + logs, clean_logs, ids = render_logs(categories, logs,ids) authentications, clean_authentications, ids = render_authentications(categories, authentications, ids) - - integrations = collectors + deploy + exporters + notifications + authentications + integrations = collectors + deploy + exporters + agent_notifications + cloud_notifications + logs + authentications render_integrations(categories, integrations) - clean_integrations = clean_collectors + clean_deploy + clean_exporters + clean_notifications + clean_authentications + clean_integrations = clean_collectors + clean_deploy + clean_exporters + clean_agent_notifications + clean_cloud_notifications + clean_logs + clean_authentications render_json(categories, clean_integrations) diff --git a/integrations/integrations.js b/integrations/integrations.js index 29a7e9919..e53ab7c8c 100644 --- a/integrations/integrations.js +++ b/integrations/integrations.js @@ -1013,46 +1013,6 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": "" }, - { - "meta": { - "plugin_name": "charts.d.plugin", - "module_name": "apcupsd", - "monitored_instance": { - "name": "APC UPS", - "link": "https://www.apc.com", - "categories": [ - "data-collection.ups" - ], - "icon_filename": "apc.svg" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "ups", - "apc", - "power", - "supply", - "battery", - "apcupsd" - ], - "most_popular": false - }, - "overview": "# APC UPS\n\nPlugin: charts.d.plugin\nModule: apcupsd\n\n## Overview\n\nMonitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics.\n\nThe collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nMake sure the `apcaccess` and `apcupsd` are installed and running.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/apcupsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/apcupsd.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the apcupsd collector.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| apcupsd_sources | This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it. | 127.0.0.1:3551 | no |\n| apcupsd_timeout | How long to wait for apcupsd to respond. | 3 | no |\n| apcupsd_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| apcupsd_priority | The charts priority on the dashboard. | 90000 | no |\n| apcupsd_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Multiple apcupsd sources\n\nSpecify a multiple apcupsd sources along with a custom update interval\n\n```yaml\n# add all your APC UPSes in this array - uncomment it too\ndeclare -A apcupsd_sources=(\n [\"local\"]=\"127.0.0.1:3551\",\n [\"remote\"]=\"1.2.3.4:3551\"\n)\n\n# how long to wait for apcupsd to respond\n#apcupsd_timeout=3\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\napcupsd_update_every=5\n\n# the charts priority on the dashboard\n#apcupsd_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#apcupsd_retries=10\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `apcupsd` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 apcupsd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `apcupsd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep apcupsd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep apcupsd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep apcupsd\n```\n\n", - "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ apcupsd_ups_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.charge | average UPS charge over the last minute |\n| [ apcupsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | average UPS load over the last 10 minutes |\n| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | number of seconds since the last successful data collection |\n| [ apcupsd_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.selftest | self-test failed due to insufficient battery capacity or due to overload. |\n| [ apcupsd_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has switched to battery power because the input power has failed |\n| [ apcupsd_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS is overloaded and cannot supply enough power to the load |\n| [ apcupsd_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery is low and needs to be recharged |\n| [ apcupsd_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery has reached the end of its lifespan and needs to be replaced |\n| [ apcupsd_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has no battery |\n| [ apcupsd_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS communication link is lost |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nMetrics related to UPS. Each UPS provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| apcupsd.charge | charge | percentage |\n| apcupsd.battery.voltage | voltage, nominal | Volts |\n| apcupsd.input.voltage | voltage, min, max | Volts |\n| apcupsd.output.voltage | absolute, nominal | Volts |\n| apcupsd.input.frequency | frequency | Hz |\n| apcupsd.load | load | percentage |\n| apcupsd.load_usage | load | Watts |\n| apcupsd.temperature | temp | Celsius |\n| apcupsd.time | time | Minutes |\n| apcupsd.online | online | boolean |\n| apcupsd.selftest | OK, NO, BT, NG | status |\n| apcupsd.status | ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, CAL, TRIM, BOOST, SHUTTING_DOWN | status |\n\n", - "integration_type": "collector", - "id": "charts.d.plugin-apcupsd-APC_UPS", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/apcupsd/metadata.yaml", - "related_resources": "" - }, { "meta": { "plugin_name": "charts.d.plugin", @@ -1082,7 +1042,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Libreswan\n\nPlugin: charts.d.plugin\nModule: libreswan\n\n## Overview\n\nMonitor Libreswan performance for optimal IPsec VPN operations. Improve your VPN operations with Netdata''s real-time metrics and built-in alerts.\n\nThe collector uses the `ipsec` command to collect the information it needs.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Permissions to execute `ipsec`\n\nThe plugin executes 2 commands to collect all the information it needs:\n\n```sh\nipsec whack --status\nipsec whack --trafficstatus\n```\n\nThe first command is used to extract the currently established tunnels, their IDs and their names.\nThe second command is used to extract the current uptime and traffic.\n\nMost probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.\nThe plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.\n\nTo allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:\n\n```\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus\n```\n\nMake sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/libreswan.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/libreswan.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the libreswan collector.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| libreswan_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| libreswan_priority | The charts priority on the dashboard | 90000 | no |\n| libreswan_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n| libreswan_sudo | Whether to run `ipsec` with `sudo` or not. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Run `ipsec` without sudo\n\nRun the `ipsec` utility without sudo\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#libreswan_update_every=1\n\n# the charts priority on the dashboard\n#libreswan_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#libreswan_retries=10\n\n# set to 1, to run ipsec with sudo (the default)\n# set to 0, to run ipsec without sudo\nlibreswan_sudo=0\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Permissions to execute `ipsec`\n\nThe plugin executes 2 commands to collect all the information it needs:\n\n```sh\nipsec whack --status\nipsec whack --trafficstatus\n```\n\nThe first command is used to extract the currently established tunnels, their IDs and their names.\nThe second command is used to extract the current uptime and traffic.\n\nMost probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.\nThe plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.\n\nTo allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:\n\n```\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus\n```\n\nMake sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/libreswan.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/libreswan.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the libreswan collector.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| libreswan_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| libreswan_priority | The charts priority on the dashboard | 90000 | no |\n| libreswan_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n| libreswan_sudo | Whether to run `ipsec` with `sudo` or not. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Run `ipsec` without sudo\n\nRun the `ipsec` utility without sudo\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#libreswan_update_every=1\n\n# the charts priority on the dashboard\n#libreswan_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#libreswan_retries=10\n\n# set to 1, to run ipsec with sudo (the default)\n# set to 0, to run ipsec without sudo\nlibreswan_sudo=0\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `libreswan` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 libreswan\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `libreswan` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep libreswan\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep libreswan /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep libreswan\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPSEC tunnel\n\nMetrics related to IPSEC tunnels. Each tunnel provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| libreswan.net | in, out | kilobits/s |\n| libreswan.uptime | uptime | seconds |\n\n", @@ -1121,7 +1081,7 @@ export const integrations = [ "most_popular": false }, "overview": "# OpenSIPS\n\nPlugin: charts.d.plugin\nModule: opensips\n\n## Overview\n\nExamine OpenSIPS metrics for insights into SIP server operations. Study call rates, error rates, and response times for reliable voice over IP services.\n\nThe collector uses the `opensipsctl` command line utility to gather OpenSIPS metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to call `opensipsctl` along with a default number of parameters, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nThe collector requires the `opensipsctl` to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/opensips.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/opensips.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the opensips collector.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| opensips_opts | Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server. | fifo get_statistics all | no |\n| opensips_cmd | If `opensipsctl` is not in $PATH, specify it's full path here. | | no |\n| opensips_timeout | How long to wait for `opensipsctl` to respond. | 2 | no |\n| opensips_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 5 | no |\n| opensips_priority | The charts priority on the dashboard. | 80000 | no |\n| opensips_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom `opensipsctl` command\n\nSet a custom path to the `opensipsctl` command\n\n```yaml\n#opensips_opts=\"fifo get_statistics all\"\nopensips_cmd=/opt/opensips/bin/opensipsctl\n#opensips_timeout=2\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#opensips_update_every=5\n\n# the charts priority on the dashboard\n#opensips_priority=80000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#opensips_retries=10\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nThe collector requires the `opensipsctl` to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/opensips.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/opensips.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the opensips collector.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| opensips_opts | Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server. | fifo get_statistics all | no |\n| opensips_cmd | If `opensipsctl` is not in $PATH, specify it's full path here. | | no |\n| opensips_timeout | How long to wait for `opensipsctl` to respond. | 2 | no |\n| opensips_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 5 | no |\n| opensips_priority | The charts priority on the dashboard. | 80000 | no |\n| opensips_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom `opensipsctl` command\n\nSet a custom path to the `opensipsctl` command\n\n```yaml\n#opensips_opts=\"fifo get_statistics all\"\nopensips_cmd=/opt/opensips/bin/opensipsctl\n#opensips_timeout=2\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#opensips_update_every=5\n\n# the charts priority on the dashboard\n#opensips_priority=80000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#opensips_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `opensips` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 opensips\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `opensips` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep opensips\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep opensips /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep opensips\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenSIPS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| opensips.dialogs_active | active, early | dialogs |\n| opensips.users | registered, location, contacts, expires | users |\n| opensips.registrar | accepted, rejected | registrations/s |\n| opensips.transactions | UAS, UAC | transactions/s |\n| opensips.core_rcv | requests, replies | queries/s |\n| opensips.core_fwd | requests, replies | queries/s |\n| opensips.core_drop | requests, replies | queries/s |\n| opensips.core_err | requests, replies | queries/s |\n| opensips.core_bad | bad_URIs_rcvd, unsupported_methods, bad_msg_hdr | queries/s |\n| opensips.tm_replies | received, relayed, local | replies/s |\n| opensips.transactions_status | 2xx, 3xx, 4xx, 5xx, 6xx | transactions/s |\n| opensips.transactions_inuse | inuse | transactions |\n| opensips.sl_replies | 1xx, 2xx, 3xx, 4xx, 5xx, 6xx, sent, error, ACKed | replies/s |\n| opensips.dialogs | processed, expire, failed | dialogs/s |\n| opensips.net_waiting | UDP, TCP | kilobytes |\n| opensips.uri_checks | positive, negative | checks / sec |\n| opensips.traces | requests, replies | traces / sec |\n| opensips.shmem | total, used, real_used, max_used, free | kilobytes |\n| opensips.shmem_fragment | fragments | fragments |\n\n", @@ -1130,45 +1090,6 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/opensips/metadata.yaml", "related_resources": "" }, - { - "meta": { - "plugin_name": "charts.d.plugin", - "module_name": "sensors", - "monitored_instance": { - "name": "Linux Sensors (sysfs)", - "link": "https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface", - "categories": [ - "data-collection.hardware-devices-and-sensors" - ], - "icon_filename": "microchip.svg" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "sensors", - "sysfs", - "hwmon", - "rpi", - "raspberry pi" - ], - "most_popular": false - }, - "overview": "# Linux Sensors (sysfs)\n\nPlugin: charts.d.plugin\nModule: sensors\n\n## Overview\n\nUse this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures).\nFor all other cases use the [Go collector](/src/go/plugin/go.d/modules/sensors/README.md), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values.\"\n\n\nIt will provide charts for all configured system sensors, by reading sensors directly from the kernel.\nThe values graphed are the raw hardware values of the sensors.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the collector will try to read entries under `/sys/devices`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Enable the sensors collector\n\nThe `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config charts.d.conf\n```\n\nChange the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/sensors.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the sensors collector.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sensors_sys_dir | The directory the kernel exposes sensor data. | /sys/devices | no |\n| sensors_sys_depth | How deep in the tree to check for sensor data. | 10 | no |\n| sensors_source_update | If set to 1, the script will overwrite internal script functions with code generated ones. | 1 | no |\n| sensors_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| sensors_priority | The charts priority on the dashboard. | 90000 | no |\n| sensors_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Set sensors path depth\n\nSet a different sensors path depth\n\n```yaml\n# the directory the kernel keeps sensor data\n#sensors_sys_dir=\"/sys/devices\"\n\n# how deep in the tree to check for sensor data\nsensors_sys_depth=5\n\n# if set to 1, the script will overwrite internal\n# script functions with code generated ones\n# leave to 1, is faster\n#sensors_source_update=1\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#sensors_update_every=\n\n# the charts priority on the dashboard\n#sensors_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#sensors_retries=10\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `sensors` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 sensors\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `sensors` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep sensors\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep sensors /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep sensors\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor chip\n\nMetrics related to sensor chips. Each chip provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.temp | {filename} | Celsius |\n| sensors.volt | {filename} | Volts |\n| sensors.curr | {filename} | Ampere |\n| sensors.power | {filename} | Watt |\n| sensors.fans | {filename} | Rotations / Minute |\n| sensors.energy | {filename} | Joule |\n| sensors.humidity | {filename} | Percent |\n\n", - "integration_type": "collector", - "id": "charts.d.plugin-sensors-Linux_Sensors_(sysfs)", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/sensors/metadata.yaml", - "related_resources": "" - }, { "meta": { "plugin_name": "cups.plugin", @@ -1193,7 +1114,7 @@ export const integrations = [ "most_popular": false }, "overview": "# CUPS\n\nPlugin: cups.plugin\nModule: cups.plugin\n\n## Overview\n\nMonitor CUPS performance for achieving optimal printing system operations. Monitor job statuses, queue lengths, and error rates to ensure smooth printing tasks.\n\nThe plugin uses CUPS shared library to connect and monitor the server.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access the server. Netdata sets permissions during installation time to reach the server through its library.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin detects when CUPS server is running and tries to connect to it.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nThe CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:cups]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additional parameters for the collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nThe CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:cups]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additional parameters for the collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CUPS instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.dests_state | idle, printing, stopped | dests |\n| cups.dests_option | total, acceptingjobs, shared | dests |\n| cups.job_num | pending, held, processing | jobs |\n| cups.job_size | pending, held, processing | KB |\n\n### Per destination\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.destination_job_num | pending, held, processing | jobs |\n| cups.destination_job_size | pending, held, processing | KB |\n\n", @@ -1230,7 +1151,7 @@ export const integrations = [ "most_popular": false }, "overview": "# System Memory Fragmentation\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/extfrag\n\n## Overview\n\nCollects memory fragmentation statistics from the Linux kernel\n\nParse data from `debugfs` file\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/extfrag`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically run by default.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the overall memory fragmentation of the system.\n\n### Per node\n\nMemory fragmentation statistics for each NUMA node in the system.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| numa_node | The NUMA node the metrics are associated with. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.fragmentation_index_dma | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_dma32 | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_normal | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n\n", @@ -1268,7 +1189,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Linux ZSwap\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/zswap\n\n## Overview\n\nCollects zswap performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/zswap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the performance statistics of zswap.\n\n### Per Linux ZSwap instance\n\nGlobal zswap performance metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.zswap_pool_compression_ratio | compression_ratio | ratio |\n| system.zswap_pool_compressed_size | compressed_size | bytes |\n| system.zswap_pool_raw_size | uncompressed_size | bytes |\n| system.zswap_rejections | compress_poor, kmemcache_fail, alloc_fail, reclaim_fail | rejections/s |\n| system.zswap_pool_limit_hit | limit | events/s |\n| system.zswap_written_back_raw_bytes | written_back | bytes/s |\n| system.zswap_same_filled_raw_size | same_filled | bytes |\n| system.zswap_duplicate_entry | duplicate | entries/s |\n\n", @@ -1304,7 +1225,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Power Capping\n\nPlugin: debugfs.plugin\nModule: intel_rapl\n\n## Overview\n\nCollects power capping performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/devices/virtual/powercap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the Intel RAPL zones Consumption.\n\n### Per Power Capping instance\n\nGlobal Intel RAPL zones.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.powercap_intel_rapl_zone | Power | Watts |\n| cpu.powercap_intel_rapl_subzones | dram, core, uncore | Watts |\n\n", @@ -1347,7 +1268,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Disk space\n\nPlugin: diskspace.plugin\nModule: diskspace.plugin\n\n## Overview\n\nMonitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15 | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15s | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mount_point | Path used to mount a filesystem |\n| filesystem | The filesystem used to format a partition. |\n| mount_root | Root directory where mount points are present. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", @@ -1393,7 +1314,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF Cachestat\n\nPlugin: ebpf.plugin\nModule: cachestat\n\n## Overview\n\nMonitor Linux page cache events giving for users a general vision about how his kernel is manipulating files.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/cachestat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/cachestat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/cachestat.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/cachestat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Cachestat instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.cachestat_ratio | ratio | % |\n| mem.cachestat_dirties | dirty | page/s |\n| mem.cachestat_hits | hit | hits/s |\n| mem.cachestat_misses | miss | misses/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_cachestat_hit_ratio | ratio | % |\n| app.ebpf_cachestat_dirty_pages | pages | page/s |\n| app.ebpf_cachestat_access | hits | hits/s |\n| app.ebpf_cachestat_misses | misses | misses/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cachestat_ratio | ratio | % |\n| cgroup.cachestat_dirties | dirty | page/s |\n| cgroup.cachestat_hits | hit | hits/s |\n| cgroup.cachestat_misses | miss | misses/s |\n| services.cachestat_ratio | a dimension per systemd service | % |\n| services.cachestat_dirties | a dimension per systemd service | page/s |\n| services.cachestat_hits | a dimension per systemd service | hits/s |\n| services.cachestat_misses | a dimension per systemd service | misses/s |\n\n", @@ -1439,7 +1360,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF DCstat\n\nPlugin: ebpf.plugin\nModule: dcstat\n\n## Overview\n\nMonitor directory cache events per application given an overall vision about files on memory or storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/dcstat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/dcstat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/dcstat.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/dcstat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_dc_ratio | ratio | % |\n| app.ebpf_dc_reference | files | files |\n| app.ebpf_dc_not_cache | files | files |\n| app.ebpf_dc_not_found | files | files |\n\n### Per filesystem\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.dc_reference | reference, slow, miss | files |\n| filesystem.dc_hit_ratio | ratio | % |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.dc_ratio | ratio | % |\n| cgroup.dc_reference | reference | files |\n| cgroup.dc_not_cache | slow | files |\n| cgroup.dc_not_found | miss | files |\n| services.dc_ratio | a dimension per systemd service | % |\n| services.dc_reference | a dimension per systemd service | files |\n| services.dc_not_cache | a dimension per systemd service | files |\n| services.dc_not_found | a dimension per systemd service | files |\n\n", @@ -1477,7 +1398,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF Disk\n\nPlugin: ebpf.plugin\nModule: disk\n\n## Overview\n\nMeasure latency for I/O events on disk.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/disk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/disk.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/disk.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/disk.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\nThese metrics measure latency for I/O events on every hard disk present on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.latency_io | latency | calls/s |\n\n", @@ -1525,7 +1446,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF Filedescriptor\n\nPlugin: ebpf.plugin\nModule: filedescriptor\n\n## Overview\n\nMonitor calls for functions responsible to open or close a file descriptor and possible errors.\n\nAttach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nDepending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/fd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/fd.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/fd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/fd.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.fd_open | open | calls/s |\n| cgroup.fd_open_error | open | calls/s |\n| cgroup.fd_closed | close | calls/s |\n| cgroup.fd_close_error | close | calls/s |\n| services.file_open | a dimension per systemd service | calls/s |\n| services.file_open_error | a dimension per systemd service | calls/s |\n| services.file_closed | a dimension per systemd service | calls/s |\n| services.file_close_error | a dimension per systemd service | calls/s |\n\n### Per eBPF Filedescriptor instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.file_descriptor | open, close | calls/s |\n| filesystem.file_error | open, close | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_file_open | calls | calls/s |\n| app.ebpf_file_open_error | calls | calls/s |\n| app.ebpf_file_closed | calls | calls/s |\n| app.ebpf_file_close_error | calls | calls/s |\n\n", @@ -1568,7 +1489,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF Filesystem\n\nPlugin: ebpf.plugin\nModule: filesystem\n\n## Overview\n\nMonitor latency for main actions on filesystem like I/O events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/filesystem.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/filesystem.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| btrfsdist | Enable or disable latency monitoring for functions associated with btrfs filesystem. | yes | no |\n| ext4dist | Enable or disable latency monitoring for functions associated with ext4 filesystem. | yes | no |\n| nfsdist | Enable or disable latency monitoring for functions associated with nfs filesystem. | yes | no |\n| xfsdist | Enable or disable latency monitoring for functions associated with xfs filesystem. | yes | no |\n| zfsdist | Enable or disable latency monitoring for functions associated with zfs filesystem. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/filesystem.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/filesystem.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| btrfsdist | Enable or disable latency monitoring for functions associated with btrfs filesystem. | yes | no |\n| ext4dist | Enable or disable latency monitoring for functions associated with ext4 filesystem. | yes | no |\n| nfsdist | Enable or disable latency monitoring for functions associated with nfs filesystem. | yes | no |\n| xfsdist | Enable or disable latency monitoring for functions associated with xfs filesystem. | yes | no |\n| zfsdist | Enable or disable latency monitoring for functions associated with zfs filesystem. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per filesystem\n\nLatency charts associate with filesystem actions.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.read_latency | latency period | calls/s |\n| filesystem.open_latency | latency period | calls/s |\n| filesystem.sync_latency | latency period | calls/s |\n\n### Per iilesystem\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.write_latency | latency period | calls/s |\n\n### Per eBPF Filesystem instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.attributte_latency | latency period | calls/s |\n\n", @@ -1604,7 +1525,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF Hardirq\n\nPlugin: ebpf.plugin\nModule: hardirq\n\n## Overview\n\nMonitor latency for each HardIRQ available.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/hardirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/hardirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/hardirq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/hardirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Hardirq instance\n\nThese metrics show latest timestamp for each hardIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.hardirq_latency | hardirq names | milliseconds |\n\n", @@ -1641,7 +1562,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF MDflush\n\nPlugin: ebpf.plugin\nModule: mdflush\n\n## Overview\n\nMonitor when flush events happen between disks.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mdflush.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mdflush.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mdflush.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mdflush.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF MDflush instance\n\nNumber of times md_flush_request was called since last time.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mdstat.mdstat_flush | disk | flushes |\n\n", @@ -1679,7 +1600,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF Mount\n\nPlugin: ebpf.plugin\nModule: mount\n\n## Overview\n\nMonitor calls for mount and umount syscall.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mount.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mount.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mount.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mount.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Mount instance\n\nCalls for syscalls mount an umount.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mount_points.call | mount, umount | calls/s |\n| mount_points.error | mount, umount | calls/s |\n\n", @@ -1724,7 +1645,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF OOMkill\n\nPlugin: ebpf.plugin\nModule: oomkill\n\n## Overview\n\nMonitor applications that reach out of memory.\n\nAttach tracepoint to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/oomkill.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/oomkill.conf\n```\n#### Options\n\nOverwrite default configuration reducing number of I/O events\n\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/oomkill.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/oomkill.conf\n```\n#### Options\n\nOverwrite default configuration reducing number of I/O events\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### update every\n\n\n\n### ebpf load mode\n\n\n\n### lifetime\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese metrics show cgroup/service that reached OOM.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.oomkills | cgroup name | kills |\n| services.oomkills | a dimension per systemd service | kills |\n\n### Per apps\n\nThese metrics show cgroup/service that reached OOM.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.oomkill | kills | kills |\n\n", @@ -1808,7 +1729,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF Processes\n\nPlugin: ebpf.plugin\nModule: processes\n\n## Overview\n\nMonitor calls for function creating tasks (threads and processes) inside Linux kernel.\n\nAttach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/process.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/process.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation. | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/process.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/process.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation. | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Processes instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.process_thread | process | calls/s |\n| system.process_status | process, zombie | difference |\n| system.exit | process | calls/s |\n| system.task_error | task | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.process_create | calls | calls/s |\n| app.thread_create | call | calls/s |\n| app.task_exit | call | calls/s |\n| app.task_close | call | calls/s |\n| app.task_error | app | calls/s |\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.process_create | process | calls/s |\n| cgroup.thread_create | thread | calls/s |\n| cgroup.task_exit | exit | calls/s |\n| cgroup.task_close | process | calls/s |\n| cgroup.task_error | process | calls/s |\n| services.process_create | a dimension per systemd service | calls/s |\n| services.thread_create | a dimension per systemd service | calls/s |\n| services.task_close | a dimension per systemd service | calls/s |\n| services.task_exit | a dimension per systemd service | calls/s |\n| services.task_error | a dimension per systemd service | calls/s |\n\n", @@ -1854,7 +1775,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF SHM\n\nPlugin: ebpf.plugin\nModule: shm\n\n## Overview\n\nMonitor syscall responsible to manipulate shared memory.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/shm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/shm.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| shmget | Enable or disable monitoring for syscall `shmget` | yes | no |\n| shmat | Enable or disable monitoring for syscall `shmat` | yes | no |\n| shmdt | Enable or disable monitoring for syscall `shmdt` | yes | no |\n| shmctl | Enable or disable monitoring for syscall `shmctl` | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/shm.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/shm.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| shmget | Enable or disable monitoring for syscall `shmget` | yes | no |\n| shmat | Enable or disable monitoring for syscall `shmat` | yes | no |\n| shmdt | Enable or disable monitoring for syscall `shmdt` | yes | no |\n| shmctl | Enable or disable monitoring for syscall `shmctl` | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.shmget | get | calls/s |\n| cgroup.shmat | at | calls/s |\n| cgroup.shmdt | dt | calls/s |\n| cgroup.shmctl | ctl | calls/s |\n| services.shmget | a dimension per systemd service | calls/s |\n| services.shmat | a dimension per systemd service | calls/s |\n| services.shmdt | a dimension per systemd service | calls/s |\n| services.shmctl | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_shmget_call | calls | calls/s |\n| app.ebpf_shmat_call | calls | calls/s |\n| app.ebpf_shmdt_call | calls | calls/s |\n| app.ebpf_shmctl_call | calls | calls/s |\n\n### Per eBPF SHM instance\n\nThese Metrics show number of calls for specified syscall.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.shared_memory_calls | get, at, dt, ctl | calls/s |\n\n", @@ -1903,7 +1824,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF Socket\n\nPlugin: ebpf.plugin\nModule: socket\n\n## Overview\n\nMonitor bandwidth consumption per application for protocols TCP and UDP.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/network.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/network.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`. Options inside `network connections` are ignored for while.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| bandwidth table size | Number of elements stored inside hash tables used to monitor calls per PID. | 16384 | no |\n| ipv4 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV4 connections. | 16384 | no |\n| ipv6 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV6 connections. | 16384 | no |\n| udp connection table size | Number of temporary elements stored inside hash tables used to monitor UDP connections. | 4096 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/network.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/network.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`. Options inside `network connections` are ignored for while.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| bandwidth table size | Number of elements stored inside hash tables used to monitor calls per PID. | 16384 | no |\n| ipv4 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV4 connections. | 16384 | no |\n| ipv6 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV6 connections. | 16384 | no |\n| udp connection table size | Number of temporary elements stored inside hash tables used to monitor UDP connections. | 4096 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Socket instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.inbound_conn | connection_tcp | connections/s |\n| ip.tcp_outbound_conn | received | connections/s |\n| ip.tcp_functions | received, send, closed | calls/s |\n| ip.total_tcp_bandwidth | received, send | kilobits/s |\n| ip.tcp_error | received, send | calls/s |\n| ip.tcp_retransmit | retransmited | calls/s |\n| ip.udp_functions | received, send | calls/s |\n| ip.total_udp_bandwidth | received, send | kilobits/s |\n| ip.udp_error | received, send | calls/s |\n\n### Per apps\n\nThese metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_tcp_v4_connection | connections | connections/s |\n| app.ebpf_call_tcp_v6_connection | connections | connections/s |\n| app.ebpf_sock_total_bandwidth | received, sent | kilobits/s |\n| app.ebpf_call_tcp_sendmsg | calls | calls/s |\n| app.ebpf_call_tcp_cleanup_rbuf | calls | calls/s |\n| app.ebpf_call_tcp_retransmit | calls | calls/s |\n| app.ebpf_call_udp_sendmsg | calls | calls/s |\n| app.ebpf_call_udp_recvmsg | calls | calls/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_conn_ipv4 | connections | connections/s |\n| cgroup.net_conn_ipv6 | connections | connections/s |\n| cgroup.net_total_bandwidth | received, sent | kilobits/s |\n| cgroup.net_tcp_recv | calls | calls/s |\n| cgroup.net_tcp_send | calls | calls/s |\n| cgroup.net_retransmit | calls | calls/s |\n| cgroup.net_udp_send | calls | calls/s |\n| cgroup.net_udp_recv | calls | calls/s |\n| services.net_conn_ipv4 | connections | connections/s |\n| services.net_conn_ipv6 | connections | connections/s |\n| services.net_total_bandwidth | received, sent | kilobits/s |\n| services.net_tcp_recv | calls | calls/s |\n| services.net_tcp_send | calls | calls/s |\n| services.net_tcp_retransmit | calls | calls/s |\n| services.net_udp_send | calls | calls/s |\n| services.net_udp_recv | calls | calls/s |\n\n", @@ -1939,7 +1860,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF SoftIRQ\n\nPlugin: ebpf.plugin\nModule: softirq\n\n## Overview\n\nMonitor latency for each SoftIRQ available.\n\nAttach kprobe to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/softirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/softirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/softirq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/softirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF SoftIRQ instance\n\nThese metrics show latest timestamp for each softIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirq_latency | soft IRQs | milliseconds |\n\n", @@ -1986,7 +1907,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF SWAP\n\nPlugin: ebpf.plugin\nModule: swap\n\n## Overview\n\nMonitors when swap has I/O events and applications executing events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/swap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/swap.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/swap.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/swap.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.swap_read | read | calls/s |\n| cgroup.swap_write | write | calls/s |\n| services.swap_read | a dimension per systemd service | calls/s |\n| services.swap_write | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_swap_readpage | a dimension per app group | calls/s |\n| app.ebpf_call_swap_writepage | a dimension per app group | calls/s |\n\n### Per eBPF SWAP instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapcalls | write, read | calls/s |\n\n", @@ -2024,7 +1945,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF Sync\n\nPlugin: ebpf.plugin\nModule: sync\n\n## Overview\n\nMonitor syscall responsible to move data from memory to storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/sync.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/sync.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| sync | Enable or disable monitoring for syscall `sync` | yes | no |\n| msync | Enable or disable monitoring for syscall `msync` | yes | no |\n| fsync | Enable or disable monitoring for syscall `fsync` | yes | no |\n| fdatasync | Enable or disable monitoring for syscall `fdatasync` | yes | no |\n| syncfs | Enable or disable monitoring for syscall `syncfs` | yes | no |\n| sync_file_range | Enable or disable monitoring for syscall `sync_file_range` | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/sync.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/sync.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| sync | Enable or disable monitoring for syscall `sync` | yes | no |\n| msync | Enable or disable monitoring for syscall `msync` | yes | no |\n| fsync | Enable or disable monitoring for syscall `fsync` | yes | no |\n| fdatasync | Enable or disable monitoring for syscall `fdatasync` | yes | no |\n| syncfs | Enable or disable monitoring for syscall `syncfs` | yes | no |\n| sync_file_range | Enable or disable monitoring for syscall `sync_file_range` | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ sync_freq ](https://github.com/netdata/netdata/blob/master/src/health/health.d/synchronization.conf) | mem.sync | number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Sync instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.file_sync | fsync, fdatasync | calls/s |\n| mem.memory_map | msync | calls/s |\n| mem.sync | sync, syncfs | calls/s |\n| mem.file_segment | sync_file_range | calls/s |\n\n", @@ -2072,7 +1993,7 @@ export const integrations = [ "most_popular": false }, "overview": "# eBPF VFS\n\nPlugin: ebpf.plugin\nModule: vfs\n\n## Overview\n\nMonitor I/O events on Linux Virtual Filesystem.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/vfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/vfs.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/vfs.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/vfs.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.vfs_unlink | delete | calls/s |\n| cgroup.vfs_write | write | calls/s |\n| cgroup.vfs_write_error | write | calls/s |\n| cgroup.vfs_read | read | calls/s |\n| cgroup.vfs_read_error | read | calls/s |\n| cgroup.vfs_write_bytes | write | bytes/s |\n| cgroup.vfs_read_bytes | read | bytes/s |\n| cgroup.vfs_fsync | fsync | calls/s |\n| cgroup.vfs_fsync_error | fsync | calls/s |\n| cgroup.vfs_open | open | calls/s |\n| cgroup.vfs_open_error | open | calls/s |\n| cgroup.vfs_create | create | calls/s |\n| cgroup.vfs_create_error | create | calls/s |\n| services.vfs_unlink | a dimension per systemd service | calls/s |\n| services.vfs_write | a dimension per systemd service | calls/s |\n| services.vfs_write_error | a dimension per systemd service | calls/s |\n| services.vfs_read | a dimension per systemd service | calls/s |\n| services.vfs_read_error | a dimension per systemd service | calls/s |\n| services.vfs_write_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_read_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_fsync | a dimension per systemd service | calls/s |\n| services.vfs_fsync_error | a dimension per systemd service | calls/s |\n| services.vfs_open | a dimension per systemd service | calls/s |\n| services.vfs_open_error | a dimension per systemd service | calls/s |\n| services.vfs_create | a dimension per systemd service | calls/s |\n| services.vfs_create_error | a dimension per systemd service | calls/s |\n\n### Per eBPF VFS instance\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.vfs_deleted_objects | delete | calls/s |\n| filesystem.vfs_io | read, write | calls/s |\n| filesystem.vfs_io_bytes | read, write | bytes/s |\n| filesystem.vfs_io_error | read, write | calls/s |\n| filesystem.vfs_fsync | fsync | calls/s |\n| filesystem.vfs_fsync_error | fsync | calls/s |\n| filesystem.vfs_open | open | calls/s |\n| filesystem.vfs_open_error | open | calls/s |\n| filesystem.vfs_create | create | calls/s |\n| filesystem.vfs_create_error | create | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_vfs_unlink | calls | calls/s |\n| app.ebpf_call_vfs_write | calls | calls/s |\n| app.ebpf_call_vfs_write_error | calls | calls/s |\n| app.ebpf_call_vfs_read | calls | calls/s |\n| app.ebpf_call_vfs_read_error | calls | calls/s |\n| app.ebpf_call_vfs_write_bytes | writes | bytes/s |\n| app.ebpf_call_vfs_read_bytes | reads | bytes/s |\n| app.ebpf_call_vfs_fsync | calls | calls/s |\n| app.ebpf_call_vfs_fsync_error | calls | calls/s |\n| app.ebpf_call_vfs_open | calls | calls/s |\n| app.ebpf_call_vfs_open_error | calls | calls/s |\n| app.ebpf_call_vfs_create | calls | calls/s |\n| app.ebpf_call_vfs_create_error | calls | calls/s |\n\n", @@ -2105,7 +2026,7 @@ export const integrations = [ "most_popular": false }, "overview": "# dev.cpu.0.freq\n\nPlugin: freebsd.plugin\nModule: dev.cpu.0.freq\n\n## Overview\n\nRead current CPU Scaling frequency.\n\nCurrent CPU Scaling Frequency\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `Config options`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config Config options\n```\n#### Options\n\n\n\n{% details open=true summary=\"\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.0.freq | Enable or disable CPU Scaling frequency metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `Config options`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config Config options\n```\n#### Options\n\n\n\n{% details open=true summary=\"\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.0.freq | Enable or disable CPU Scaling frequency metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.0.freq instance\n\nThe metric shows status of CPU frequency, it is direct affected by system load.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.scaling_cur_freq | frequency | MHz |\n\n", @@ -2138,7 +2059,7 @@ export const integrations = [ "most_popular": false }, "overview": "# dev.cpu.temperature\n\nPlugin: freebsd.plugin\nModule: dev.cpu.temperature\n\n## Overview\n\nGet current CPU temperature\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.temperature | Enable or disable CPU temperature metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.temperature | Enable or disable CPU temperature metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.temperature instance\n\nThis metric show latest CPU temperature.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.temperature | a dimension per core | Celsius |\n\n", @@ -2171,7 +2092,7 @@ export const integrations = [ "most_popular": false }, "overview": "# devstat\n\nPlugin: freebsd.plugin\nModule: devstat\n\n## Overview\n\nCollect information per hard disk available on host.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:kern.devstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new disks detected at runtime | Enable or disable possibility to detect new disks. | auto | no |\n| performance metrics for pass devices | Enable or disable metrics for disks with type `PASS`. | auto | no |\n| total bandwidth for all disks | Enable or disable total bandwidth metric for all disks. | yes | no |\n| bandwidth for all disks | Enable or disable bandwidth for all disks metric. | auto | no |\n| operations for all disks | Enable or disable operations for all disks metric. | auto | no |\n| queued operations for all disks | Enable or disable queued operations for all disks metric. | auto | no |\n| utilization percentage for all disks | Enable or disable utilization percentage for all disks metric. | auto | no |\n| i/o time for all disks | Enable or disable I/O time for all disks metric. | auto | no |\n| average completed i/o time for all disks | Enable or disable average completed I/O time for all disks metric. | auto | no |\n| average completed i/o bandwidth for all disks | Enable or disable average completed I/O bandwidth for all disks metric. | auto | no |\n| average service time for all disks | Enable or disable average service time for all disks metric. | auto | no |\n| disable by default disks matching | Do not create charts for disks listed. | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:kern.devstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new disks detected at runtime | Enable or disable possibility to detect new disks. | auto | no |\n| performance metrics for pass devices | Enable or disable metrics for disks with type `PASS`. | auto | no |\n| total bandwidth for all disks | Enable or disable total bandwidth metric for all disks. | yes | no |\n| bandwidth for all disks | Enable or disable bandwidth for all disks metric. | auto | no |\n| operations for all disks | Enable or disable operations for all disks metric. | auto | no |\n| queued operations for all disks | Enable or disable queued operations for all disks metric. | auto | no |\n| utilization percentage for all disks | Enable or disable utilization percentage for all disks metric. | auto | no |\n| i/o time for all disks | Enable or disable I/O time for all disks metric. | auto | no |\n| average completed i/o time for all disks | Enable or disable average completed I/O time for all disks metric. | auto | no |\n| average completed i/o bandwidth for all disks | Enable or disable average completed I/O bandwidth for all disks metric. | auto | no |\n| average service time for all disks | Enable or disable average service time for all disks metric. | auto | no |\n| disable by default disks matching | Do not create charts for disks listed. | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per devstat instance\n\nThese metrics give a general vision about I/O events on disks.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | io, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes, frees | KiB/s |\n| disk.ops | reads, writes, other, frees | operations/s |\n| disk.qops | operations | operations |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes, other, frees | milliseconds/s |\n| disk.await | reads, writes, other, frees | milliseconds/operation |\n| disk.avgsz | reads, writes, frees | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n", @@ -2204,7 +2125,7 @@ export const integrations = [ "most_popular": false }, "overview": "# getifaddrs\n\nPlugin: freebsd.plugin\nModule: getifaddrs\n\n## Overview\n\nCollect traffic per network interface.\n\nThe plugin calls `getifaddrs` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getifaddrs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new interfaces detected at runtime | Enable or disable possibility to discover new interface after plugin starts. | auto | no |\n| total bandwidth for physical interfaces | Enable or disable total bandwidth for physical interfaces metric. | auto | no |\n| total packets for physical interfaces | Enable or disable total packets for physical interfaces metric. | auto | no |\n| total bandwidth for ipv4 interface | Enable or disable total bandwidth for IPv4 interface metric. | auto | no |\n| total bandwidth for ipv6 interfaces | Enable or disable total bandwidth for ipv6 interfaces metric. | auto | no |\n| bandwidth for all interfaces | Enable or disable bandwidth for all interfaces metric. | auto | no |\n| packets for all interfaces | Enable or disable packets for all interfaces metric. | auto | no |\n| errors for all interfaces | Enable or disable errors for all interfaces metric. | auto | no |\n| drops for all interfaces | Enable or disable drops for all interfaces metric. | auto | no |\n| collisions for all interface | Enable or disable collisions for all interface metric. | auto | no |\n| disable by default interfaces matching | Do not display data for intterfaces listed. | lo* | no |\n| set physical interfaces for system.net | Do not show network traffic for listed interfaces. | igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc* | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getifaddrs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new interfaces detected at runtime | Enable or disable possibility to discover new interface after plugin starts. | auto | no |\n| total bandwidth for physical interfaces | Enable or disable total bandwidth for physical interfaces metric. | auto | no |\n| total packets for physical interfaces | Enable or disable total packets for physical interfaces metric. | auto | no |\n| total bandwidth for ipv4 interface | Enable or disable total bandwidth for IPv4 interface metric. | auto | no |\n| total bandwidth for ipv6 interfaces | Enable or disable total bandwidth for ipv6 interfaces metric. | auto | no |\n| bandwidth for all interfaces | Enable or disable bandwidth for all interfaces metric. | auto | no |\n| packets for all interfaces | Enable or disable packets for all interfaces metric. | auto | no |\n| errors for all interfaces | Enable or disable errors for all interfaces metric. | auto | no |\n| drops for all interfaces | Enable or disable drops for all interfaces metric. | auto | no |\n| collisions for all interface | Enable or disable collisions for all interface metric. | auto | no |\n| disable by default interfaces matching | Do not display data for intterfaces listed. | lo* | no |\n| set physical interfaces for system.net | Do not show network traffic for listed interfaces. | igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc* | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ interface_inbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of inbound errors for the network interface ${label:device} in the last 10 minutes |\n| [ interface_outbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of outbound errors for the network interface ${label:device} in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per getifaddrs instance\n\nGeneral overview about network traffic.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n| system.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| system.ipv4 | received, sent | kilobits/s |\n| system.ipv6 | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.events | collisions | events/s |\n\n", @@ -2237,7 +2158,7 @@ export const integrations = [ "most_popular": false }, "overview": "# getmntinfo\n\nPlugin: freebsd.plugin\nModule: getmntinfo\n\n## Overview\n\nCollect information per mount point.\n\nThe plugin calls `getmntinfo` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getmntinfo]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new mount points detected at runtime | Cheeck new mount points during runtime. | auto | no |\n| space usage for all disks | Enable or disable space usage for all disks metric. | auto | no |\n| inodes usage for all disks | Enable or disable inodes usage for all disks metric. | auto | no |\n| exclude space metrics on paths | Do not show metrics for listed paths. | /proc/* | no |\n| exclude space metrics on filesystems | Do not monitor listed filesystems. | autofs procfs subfs devfs none | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getmntinfo]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new mount points detected at runtime | Cheeck new mount points during runtime. | auto | no |\n| space usage for all disks | Enable or disable space usage for all disks metric. | auto | no |\n| inodes usage for all disks | Enable or disable inodes usage for all disks metric. | auto | no |\n| exclude space metrics on paths | Do not show metrics for listed paths. | /proc/* | no |\n| exclude space metrics on filesystems | Do not monitor listed filesystems. | autofs procfs subfs devfs none | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\nThese metrics show detailss about mount point usages.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", @@ -2270,7 +2191,7 @@ export const integrations = [ "most_popular": false }, "overview": "# hw.intrcnt\n\nPlugin: freebsd.plugin\nModule: hw.intrcnt\n\n## Overview\n\nGet total number of interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| hw.intrcnt | Enable or disable Interrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| hw.intrcnt | Enable or disable Interrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per hw.intrcnt instance\n\nThese metrics show system interrupts frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.intr | interrupts | interrupts/s |\n| system.interrupts | a dimension per interrupt | interrupts/s |\n\n", @@ -2303,7 +2224,7 @@ export const integrations = [ "most_popular": false }, "overview": "# ipfw\n\nPlugin: freebsd.plugin\nModule: ipfw\n\n## Overview\n\nCollect information about FreeBSD firewall.\n\nThe plugin uses RAW socket to communicate with kernel and collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:ipfw]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| counters for static rules | Enable or disable counters for static rules metric. | yes | no |\n| number of dynamic rules | Enable or disable number of dynamic rules metric. | yes | no |\n| allocated memory | Enable or disable allocated memory metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:ipfw]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| counters for static rules | Enable or disable counters for static rules metric. | yes | no |\n| number of dynamic rules | Enable or disable number of dynamic rules metric. | yes | no |\n| allocated memory | Enable or disable allocated memory metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ipfw instance\n\nTheese metrics show FreeBSD firewall statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfw.mem | dynamic, static | bytes |\n| ipfw.packets | a dimension per static rule | packets/s |\n| ipfw.bytes | a dimension per static rule | bytes/s |\n| ipfw.active | a dimension per dynamic rule | rules |\n| ipfw.expired | a dimension per dynamic rule | rules |\n\n", @@ -2336,7 +2257,7 @@ export const integrations = [ "most_popular": false }, "overview": "# kern.cp_time\n\nPlugin: freebsd.plugin\nModule: kern.cp_time\n\n## Overview\n\nTotal CPU utilization\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe netdata main configuration file.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.cp_time | Enable or disable Total CPU usage. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe netdata main configuration file.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.cp_time | Enable or disable Total CPU usage. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding nice) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.cp_time instance\n\nThese metrics show CPU usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | nice, system, user, interrupt, idle | percentage |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | nice, system, user, interrupt, idle | percentage |\n\n", @@ -2369,7 +2290,7 @@ export const integrations = [ "most_popular": false }, "overview": "# kern.ipc.msq\n\nPlugin: freebsd.plugin\nModule: kern.ipc.msq\n\n## Overview\n\nCollect number of IPC message Queues\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.msq | Enable or disable IPC message queue metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.msq | Enable or disable IPC message queue metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.msq instance\n\nThese metrics show statistics IPC messages statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_msq_queues | queues | queues |\n| system.ipc_msq_messages | messages | messages |\n| system.ipc_msq_size | allocated, used | bytes |\n\n", @@ -2402,7 +2323,7 @@ export const integrations = [ "most_popular": false }, "overview": "# kern.ipc.sem\n\nPlugin: freebsd.plugin\nModule: kern.ipc.sem\n\n## Overview\n\nCollect information about semaphore.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.sem | Enable or disable semaphore metrics. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.sem | Enable or disable semaphore metrics. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.sem instance\n\nThese metrics shows counters for semaphores on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n\n", @@ -2435,7 +2356,7 @@ export const integrations = [ "most_popular": false }, "overview": "# kern.ipc.shm\n\nPlugin: freebsd.plugin\nModule: kern.ipc.shm\n\n## Overview\n\nCollect shared memory information.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.shm | Enable or disable shared memory metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.shm | Enable or disable shared memory metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.shm instance\n\nThese metrics give status about current shared memory segments.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_shared_mem_segs | segments | segments |\n| system.ipc_shared_mem_size | allocated | KiB |\n\n", @@ -2468,7 +2389,7 @@ export const integrations = [ "most_popular": false }, "overview": "# net.inet.icmp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.icmp.stats\n\n## Overview\n\nCollect information about ICMP traffic.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.icmp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| IPv4 ICMP packets | Enable or disable IPv4 ICMP packets metric. | yes | no |\n| IPv4 ICMP error | Enable or disable IPv4 ICMP error metric. | yes | no |\n| IPv4 ICMP messages | Enable or disable IPv4 ICMP messages metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.icmp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| IPv4 ICMP packets | Enable or disable IPv4 ICMP packets metric. | yes | no |\n| IPv4 ICMP error | Enable or disable IPv4 ICMP error metric. | yes | no |\n| IPv4 ICMP messages | Enable or disable IPv4 ICMP messages metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.icmp.stats instance\n\nThese metrics show ICMP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n\n", @@ -2501,7 +2422,7 @@ export const integrations = [ "most_popular": false }, "overview": "# net.inet.ip.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.ip.stats\n\n## Overview\n\nCollect IP stats\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.ip.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 packets | Enable or disable IPv4 packets metric. | yes | no |\n| ipv4 fragments sent | Enable or disable IPv4 fragments sent metric. | yes | no |\n| ipv4 fragments assembly | Enable or disable IPv4 fragments assembly metric. | yes | no |\n| ipv4 errors | Enable or disable IPv4 errors metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.ip.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 packets | Enable or disable IPv4 packets metric. | yes | no |\n| ipv4 fragments sent | Enable or disable IPv4 fragments sent metric. | yes | no |\n| ipv4 fragments assembly | Enable or disable IPv4 fragments assembly metric. | yes | no |\n| ipv4 errors | Enable or disable IPv4 errors metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.ip.stats instance\n\nThese metrics show IPv4 connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n\n", @@ -2534,7 +2455,7 @@ export const integrations = [ "most_popular": false }, "overview": "# net.inet.tcp.states\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.states\n\n## Overview\n\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| net.inet.tcp.states | Enable or disable TCP state metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| net.inet.tcp.states | Enable or disable TCP state metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ipv4.tcpsock | IPv4 TCP connections utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.states instance\n\nA counter for TCP connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcpsock | connections | active connections |\n\n", @@ -2567,7 +2488,7 @@ export const integrations = [ "most_popular": false }, "overview": "# net.inet.tcp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.stats\n\n## Overview\n\nCollect overall information about TCP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.tcp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 TCP packets | Enable or disable ipv4 TCP packets metric. | yes | no |\n| ipv4 TCP errors | Enable or disable pv4 TCP errors metric. | yes | no |\n| ipv4 TCP handshake issues | Enable or disable ipv4 TCP handshake issue metric. | yes | no |\n| TCP connection aborts | Enable or disable TCP connection aborts metric. | auto | no |\n| TCP out-of-order queue | Enable or disable TCP out-of-order queue metric. | auto | no |\n| TCP SYN cookies | Enable or disable TCP SYN cookies metric. | auto | no |\n| TCP listen issues | Enable or disable TCP listen issues metric. | auto | no |\n| ECN packets | Enable or disable ECN packets metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.tcp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 TCP packets | Enable or disable ipv4 TCP packets metric. | yes | no |\n| ipv4 TCP errors | Enable or disable pv4 TCP errors metric. | yes | no |\n| ipv4 TCP handshake issues | Enable or disable ipv4 TCP handshake issue metric. | yes | no |\n| TCP connection aborts | Enable or disable TCP connection aborts metric. | auto | no |\n| TCP out-of-order queue | Enable or disable TCP out-of-order queue metric. | auto | no |\n| TCP SYN cookies | Enable or disable TCP SYN cookies metric. | auto | no |\n| TCP listen issues | Enable or disable TCP listen issues metric. | auto | no |\n| ECN packets | Enable or disable ECN packets metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.stats instance\n\nThese metrics show TCP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.tcplistenissues | overflows | packets/s |\n| ipv4.ecnpkts | InCEPkts, InECT0Pkts, InECT1Pkts, OutECT0Pkts, OutECT1Pkts | packets/s |\n\n", @@ -2600,7 +2521,7 @@ export const integrations = [ "most_popular": false }, "overview": "# net.inet.udp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.udp.stats\n\n## Overview\n\nCollect information about UDP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.udp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 UDP packets | Enable or disable ipv4 UDP packets metric. | yes | no |\n| ipv4 UDP errors | Enable or disable ipv4 UDP errors metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.udp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 UDP packets | Enable or disable ipv4 UDP packets metric. | yes | no |\n| ipv4 UDP errors | Enable or disable ipv4 UDP errors metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.udp.stats instance\n\nThese metrics show UDP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | InErrors, NoPorts, RcvbufErrors, InCsumErrors, IgnoredMulti | events/s |\n\n", @@ -2633,7 +2554,7 @@ export const integrations = [ "most_popular": false }, "overview": "# net.inet6.icmp6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.icmp6.stats\n\n## Overview\n\nCollect information abou IPv6 ICMP\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.icmp6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| icmp | Enable or disable ICMP metric. | auto | no |\n| icmp redirects | Enable or disable ICMP redirects metric. | auto | no |\n| icmp errors | Enable or disable ICMP errors metric. | auto | no |\n| icmp echos | Enable or disable ICMP echos metric. | auto | no |\n| icmp router | Enable or disable ICMP router metric. | auto | no |\n| icmp neighbor | Enable or disable ICMP neighbor metric. | auto | no |\n| icmp types | Enable or disable ICMP types metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.icmp6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| icmp | Enable or disable ICMP metric. | auto | no |\n| icmp redirects | Enable or disable ICMP redirects metric. | auto | no |\n| icmp errors | Enable or disable ICMP errors metric. | auto | no |\n| icmp echos | Enable or disable ICMP echos metric. | auto | no |\n| icmp router | Enable or disable ICMP router metric. | auto | no |\n| icmp neighbor | Enable or disable ICMP neighbor metric. | auto | no |\n| icmp types | Enable or disable ICMP types metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.icmp6.stats instance\n\nCollect IPv6 ICMP traffic statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n\n", @@ -2666,7 +2587,7 @@ export const integrations = [ "most_popular": false }, "overview": "# net.inet6.ip6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.ip6.stats\n\n## Overview\n\nCollect information abou IPv6 stats.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.ip6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv6 packets | Enable or disable ipv6 packet metric. | auto | no |\n| ipv6 fragments sent | Enable or disable ipv6 fragments sent metric. | auto | no |\n| ipv6 fragments assembly | Enable or disable ipv6 fragments assembly metric. | auto | no |\n| ipv6 errors | Enable or disable ipv6 errors metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.ip6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv6 packets | Enable or disable ipv6 packet metric. | auto | no |\n| ipv6 fragments sent | Enable or disable ipv6 fragments sent metric. | auto | no |\n| ipv6 fragments assembly | Enable or disable ipv6 fragments assembly metric. | auto | no |\n| ipv6 errors | Enable or disable ipv6 errors metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.ip6.stats instance\n\nThese metrics show general information about IPv6 connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n\n", @@ -2699,7 +2620,7 @@ export const integrations = [ "most_popular": false }, "overview": "# net.isr\n\nPlugin: freebsd.plugin\nModule: net.isr\n\n## Overview\n\nCollect information about system softnet stat.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.isr]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| netisr | Enable or disable general vision about softnet stat metrics. | yes | no |\n| netisr per core | Enable or disable softnet stat metric per core. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.isr]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| netisr | Enable or disable general vision about softnet stat metrics. | yes | no |\n| netisr per core | Enable or disable softnet stat metric per core. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n| [ 10min_netisr_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of drops in the last minute due to exceeded sysctl net.route.netisr_maxqlen (this can be a cause for dropped packets) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.isr instance\n\nThese metrics show statistics about softnet stats.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n", @@ -2732,7 +2653,7 @@ export const integrations = [ "most_popular": false }, "overview": "# system.ram\n\nPlugin: freebsd.plugin\nModule: system.ram\n\n## Overview\n\nShow information about system memory usage.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| system.ram | Enable or disable system RAM metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| system.ram | Enable or disable system RAM metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per system.ram instance\n\nThis metric shows RAM usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, active, inactive, wired, cache, laundry, buffers | MiB |\n| mem.available | avail | MiB |\n\n", @@ -2765,7 +2686,7 @@ export const integrations = [ "most_popular": false }, "overview": "# uptime\n\nPlugin: freebsd.plugin\nModule: uptime\n\n## Overview\n\nShow period of time server is up.\n\nThe plugin calls `clock_gettime` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uptime instance\n\nHow long the system is running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n", @@ -2798,7 +2719,7 @@ export const integrations = [ "most_popular": false }, "overview": "# vm.loadavg\n\nPlugin: freebsd.plugin\nModule: vm.loadavg\n\n## Overview\n\nSystem Load Average\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.loadavg instance\n\nMonitoring for number of threads running or waiting.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n\n", @@ -2831,7 +2752,7 @@ export const integrations = [ "most_popular": false }, "overview": "# vm.stats.sys.v_intr\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_intr\n\n## Overview\n\nDevice interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_intr | Enable or disable device interrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_intr | Enable or disable device interrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_intr instance\n\nThe metric show device interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.dev_intr | interrupts | interrupts/s |\n\n", @@ -2864,7 +2785,7 @@ export const integrations = [ "most_popular": false }, "overview": "# vm.stats.sys.v_soft\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_soft\n\n## Overview\n\nSoftware Interrupt\n\nvm.stats.sys.v_soft\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_soft | Enable or disable software inerrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_soft | Enable or disable software inerrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_soft instance\n\nThis metric shows software interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.soft_intr | interrupts | interrupts/s |\n\n", @@ -2897,7 +2818,7 @@ export const integrations = [ "most_popular": false }, "overview": "# vm.stats.sys.v_swtch\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_swtch\n\n## Overview\n\nCPU context switch\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_swtch | Enable or disable CPU context switch metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_swtch | Enable or disable CPU context switch metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_swtch instance\n\nThe metric count the number of context switches happening on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n\n", @@ -2930,7 +2851,7 @@ export const integrations = [ "most_popular": false }, "overview": "# vm.stats.vm.v_pgfaults\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_pgfaults\n\n## Overview\n\nCollect memory page faults events.\n\nThe plugin calls `sysctl` function to collect necessary data\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_pgfaults | Enable or disable Memory page fault metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_pgfaults | Enable or disable Memory page fault metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_pgfaults instance\n\nThe number of page faults happened on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pgfaults | memory, io_requiring, cow, cow_optimized, in_transit | page faults/s |\n\n", @@ -2963,7 +2884,7 @@ export const integrations = [ "most_popular": false }, "overview": "# vm.stats.vm.v_swappgs\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_swappgs\n\n## Overview\n\nThe metric swap amount of data read from and written to SWAP.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_swappgs | Enable or disable infoormation about SWAP I/O metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_swappgs | Enable or disable infoormation about SWAP I/O metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_swappgs instance\n\nThis metric shows events happening on SWAP.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | io, out | KiB/s |\n\n", @@ -2996,7 +2917,7 @@ export const integrations = [ "most_popular": false }, "overview": "# vm.swap_info\n\nPlugin: freebsd.plugin\nModule: vm.swap_info\n\n## Overview\n\nCollect information about SWAP memory.\n\nThe plugin calls `sysctlnametomib` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.swap_info | Enable or disable SWAP metrics. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.swap_info | Enable or disable SWAP metrics. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.swap_info instance\n\nThis metric shows the SWAP usage.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swap | free, used | MiB |\n\n", @@ -3029,7 +2950,7 @@ export const integrations = [ "most_popular": false }, "overview": "# vm.vmtotal\n\nPlugin: freebsd.plugin\nModule: vm.vmtotal\n\n## Overview\n\nCollect Virtual Memory information from host.\n\nThe plugin calls function `sysctl` to collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:vm.vmtotal]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable total processes | Number of active processes. | yes | no |\n| processes running | Show number of processes running or blocked. | yes | no |\n| real memory | Memeory used on host. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:vm.vmtotal]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable total processes | Number of active processes. | yes | no |\n| processes running | Show number of processes running or blocked. | yes | no |\n| real memory | Memeory used on host. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.vmtotal instance\n\nThese metrics show an overall vision about processes running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.active_processes | active | processes |\n| system.processes | running, blocked | processes |\n| mem.real | used | MiB |\n\n", @@ -3062,7 +2983,7 @@ export const integrations = [ "most_popular": false }, "overview": "# zfs\n\nPlugin: freebsd.plugin\nModule: zfs\n\n## Overview\n\nCollect metrics for ZFS filesystem\n\nThe plugin uses `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:zfs_arcstats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| show zero charts | Do not show charts with zero metrics. | no | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:zfs_arcstats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| show zero charts | Do not show charts with zero metrics. | no | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs instance\n\nThese metrics show detailed information about ZFS filesystem.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | throttled | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n| zfs.trim_bytes | TRIMmed | bytes |\n| zfs.trim_requests | successful, failed, unsupported | requests |\n\n", @@ -3100,7 +3021,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Intelligent Platform Management Interface (IPMI)\n\nPlugin: freeipmi.plugin\nModule: freeipmi\n\n## Overview\n\n\"Monitor enterprise server sensor readings, event log entries, and hardware statuses to ensure reliable server operations.\"\n\n\nThe plugin uses open source library IPMImonitoring to communicate with sensors.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nLinux kernel module for IPMI can create big overhead.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install freeipmi.plugin\n\nWhen using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires.\n\nWhen using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin.\n\nWhen using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata.\n\n\n#### Preliminary actions\n\nIf you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root\nto initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freeipmi]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe configuration is set using command line options:\n\n```\n# netdata.conf\n[plugin:freeipmi]\n command options = opt1 opt2 ... optN\n```\n\nTo display a help message listing the available command line options:\n\n```bash\n./usr/libexec/netdata/plugins.d/freeipmi.plugin --help\n```\n\n\n{% details open=true summary=\"Command options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SECONDS | Data collection frequency. | | no |\n| debug | Enable verbose output. | disabled | no |\n| no-sel | Disable System Event Log (SEL) collection. | disabled | no |\n| reread-sdr-cache | Re-read SDR cache on every iteration. | disabled | no |\n| interpret-oem-data | Attempt to parse OEM data. | disabled | no |\n| assume-system-event-record | treat illegal SEL events records as normal. | disabled | no |\n| ignore-non-interpretable-sensors | Do not read sensors that cannot be interpreted. | disabled | no |\n| bridge-sensors | Bridge sensors not owned by the BMC. | disabled | no |\n| shared-sensors | Enable shared sensors if found. | disabled | no |\n| no-discrete-reading | Do not read sensors if their event/reading type code is invalid. | enabled | no |\n| ignore-scanning-disabled | Ignore the scanning bit and read sensors no matter what. | disabled | no |\n| assume-bmc-owner | Assume the BMC is the sensor owner no matter what (usually bridging is required too). | disabled | no |\n| hostname HOST | Remote IPMI hostname or IP address. | local | no |\n| username USER | Username that will be used when connecting to the remote host. | | no |\n| password PASS | Password that will be used when connecting to the remote host. | | no |\n| noauthcodecheck / no-auth-code-check | Don't check the authentication codes returned. | | no |\n| driver-type IPMIDRIVER | Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. | | no |\n| sdr-cache-dir PATH | SDR cache files directory. | /tmp | no |\n| sensor-config-file FILE | Sensors configuration filename. | system default | no |\n| sel-config-file FILE | SEL configuration filename. | system default | no |\n| ignore N1,N2,N3,... | Sensor IDs to ignore. | | no |\n| ignore-status N1,N2,N3,... | Sensor IDs to ignore status (nominal/warning/critical). | | no |\n| -v | Print version and exit. | | no |\n| --help | Print usage message and exit. | | no |\n\n{% /details %}\n#### Examples\n\n##### Decrease data collection frequency\n\nBasic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.\n\n```yaml\n[plugin:freeipmi]\n update every = 10\n\n```\n##### Disable SEL collection\n\nAppend to `command options =` the options you need.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:freeipmi]\n command options = no-sel\n\n```\n{% /details %}\n##### Ignore specific sensors\n\nSpecific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`.\n\n**However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).\n\nTo find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:\n\nID | Name | Type | State | Reading | Units | Event\n1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'\n2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'\n3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'\n4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'\n5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'\n6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'\n7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'\n8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'\n9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'\n10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'\n11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'\n12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'\n13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'\n14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n...\n\n`freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`:\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:freeipmi]\n command options = ignore 1,2,3,4,...\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install freeipmi.plugin\n\nWhen using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires.\n\nWhen using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin.\n\nWhen using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata.\n\n\n#### Preliminary actions\n\nIf you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root\nto initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freeipmi]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe configuration is set using command line options:\n\n```\n# netdata.conf\n[plugin:freeipmi]\n command options = opt1 opt2 ... optN\n```\n\nTo display a help message listing the available command line options:\n\n```bash\n./usr/libexec/netdata/plugins.d/freeipmi.plugin --help\n```\n\n\n{% details open=true summary=\"Command options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SECONDS | Data collection frequency. | | no |\n| debug | Enable verbose output. | disabled | no |\n| no-sel | Disable System Event Log (SEL) collection. | disabled | no |\n| reread-sdr-cache | Re-read SDR cache on every iteration. | disabled | no |\n| interpret-oem-data | Attempt to parse OEM data. | disabled | no |\n| assume-system-event-record | treat illegal SEL events records as normal. | disabled | no |\n| ignore-non-interpretable-sensors | Do not read sensors that cannot be interpreted. | disabled | no |\n| bridge-sensors | Bridge sensors not owned by the BMC. | disabled | no |\n| shared-sensors | Enable shared sensors if found. | disabled | no |\n| no-discrete-reading | Do not read sensors if their event/reading type code is invalid. | enabled | no |\n| ignore-scanning-disabled | Ignore the scanning bit and read sensors no matter what. | disabled | no |\n| assume-bmc-owner | Assume the BMC is the sensor owner no matter what (usually bridging is required too). | disabled | no |\n| hostname HOST | Remote IPMI hostname or IP address. | local | no |\n| username USER | Username that will be used when connecting to the remote host. | | no |\n| password PASS | Password that will be used when connecting to the remote host. | | no |\n| noauthcodecheck / no-auth-code-check | Don't check the authentication codes returned. | | no |\n| driver-type IPMIDRIVER | Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. | | no |\n| sdr-cache-dir PATH | SDR cache files directory. | /tmp | no |\n| sensor-config-file FILE | Sensors configuration filename. | system default | no |\n| sel-config-file FILE | SEL configuration filename. | system default | no |\n| ignore N1,N2,N3,... | Sensor IDs to ignore. | | no |\n| ignore-status N1,N2,N3,... | Sensor IDs to ignore status (nominal/warning/critical). | | no |\n| -v | Print version and exit. | | no |\n| --help | Print usage message and exit. | | no |\n\n{% /details %}\n#### Examples\n\n##### Decrease data collection frequency\n\nBasic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.\n\n```yaml\n[plugin:freeipmi]\n update every = 10\n\n```\n##### Disable SEL collection\n\nAppend to `command options =` the options you need.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:freeipmi]\n command options = no-sel\n\n```\n{% /details %}\n##### Ignore specific sensors\n\nSpecific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`.\n\n**However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).\n\nTo find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:\n\nID | Name | Type | State | Reading | Units | Event\n1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'\n2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'\n3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'\n4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'\n5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'\n6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'\n7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'\n8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'\n9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'\n10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'\n11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'\n12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'\n13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'\n14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n...\n\n`freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`:\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:freeipmi]\n command options = ignore 1,2,3,4,...\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\n\n### kimpi0 CPU usage\n\n\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipmi_sensor_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipmi.conf) | ipmi.sensor_state | IPMI sensor ${label:sensor} (${label:component}) state |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.\n\n\n### Per Intelligent Platform Management Interface (IPMI) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sel | events | events |\n\n### Per sensor\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| sensor | The sensor name |\n| type | One of 45 recognized sensor types (Battery, Voltage...) |\n| component | One of 25 recognized components (Processor, Peripheral). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sensor_state | nominal, critical, warning, unknown | state |\n| ipmi.sensor_temperature_c | temperature | Celsius |\n| ipmi.sensor_temperature_f | temperature | Fahrenheit |\n| ipmi.sensor_voltage | voltage | Volts |\n| ipmi.sensor_ampere | ampere | Amps |\n| ipmi.sensor_fan_speed | rotations | RPM |\n| ipmi.sensor_power | power | Watts |\n| ipmi.sensor_reading_percent | percentage | % |\n\n", @@ -3146,7 +3067,7 @@ export const integrations = [ } }, "overview": "# ActiveMQ\n\nPlugin: go.d.plugin\nModule: activemq\n\n## Overview\n\nThis collector monitors ActiveMQ queues and topics.\n\nIt collects metrics by sending HTTP requests to the Web Console API.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 8161.\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8161\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/activemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/activemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8161 | yes |\n| webadmin | Webadmin root path. | admin | yes |\n| max_queues | Maximum number of concurrently collected queues. | 50 | no |\n| max_topics | Maximum number of concurrently collected topics. | 50 | no |\n| queues_filter | Queues filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| topics_filter | Topics filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| timeout | HTTP request timeout. | 1 | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Filters and limits\n\nUsing filters and limits for queues and topics.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n max_queues: 100\n max_topics: 100\n queues_filter: '!sandr* *'\n topics_filter: '!sandr* *'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n - name: remote\n url: http://192.0.2.1:8161\n webadmin: admin\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/activemq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/activemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8161 | yes |\n| webadmin | Webadmin root path. | admin | yes |\n| max_queues | Maximum number of concurrently collected queues. | 50 | no |\n| max_topics | Maximum number of concurrently collected topics. | 50 | no |\n| queues_filter | Queues filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| topics_filter | Topics filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| timeout | HTTP request timeout. | 1 | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Filters and limits\n\nUsing filters and limits for queues and topics.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n max_queues: 100\n max_topics: 100\n queues_filter: '!sandr* *'\n topics_filter: '!sandr* *'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n - name: remote\n url: http://192.0.2.1:8161\n webadmin: admin\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `activemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m activemq\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `activemq` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep activemq\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep activemq /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep activemq\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ActiveMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| activemq.messages | enqueued, dequeued | messages/s |\n| activemq.unprocessed_messages | unprocessed | messages |\n| activemq.consumers | consumers | consumers |\n\n", @@ -3183,8 +3104,8 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# Adaptec RAID\n\nPlugin: go.d.plugin\nModule: adaptec_raid\n\n## Overview\n\nMonitors the health of Adaptec Hardware RAID by tracking the status of logical and physical devices in your storage system.\nIt relies on the `arcconf` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `arcconf GETCONFIG 1 LD`\n- `arcconf GETCONFIG 1 PD`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/adaptec_raid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/adaptec_raid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | arcconf binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: adaptec_raid\n update_every: 5 # Collect Adaptec Hardware RAID statistics every 5 seconds\n\n```\n{% /details %}\n", + "overview": "# Adaptec RAID\n\nPlugin: go.d.plugin\nModule: adaptec_raid\n\n## Overview\n\nMonitors the health of Adaptec Hardware RAID by tracking the status of logical and physical devices in your storage system.\nIt relies on the `arcconf` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `arcconf GETCONFIG 1 LD`\n- `arcconf GETCONFIG 1 PD`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/adaptec_raid.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/adaptec_raid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | arcconf binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: adaptec_raid\n update_every: 5 # Collect Adaptec Hardware RAID statistics every 5 seconds\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `adaptec_raid` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m adaptec_raid\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `adaptec_raid` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep adaptec_raid\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep adaptec_raid /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep adaptec_raid\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ adaptec_raid_ld_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptecraid.logical_device_status | Adaptec RAID logical device (number ${label:ld_number} name ${label:ld_name}) health status is critical |\n| [ adaptec_raid_pd_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptecraid.physical_device_state | Adaptec RAID physical device (number ${label:pd_number} location ${label:location}) health state is critical |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per logical device\n\nThese metrics refer to the Logical Device (LD).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| ld_number | Logical device index number |\n| ld_name | Logical device name |\n| raid_level | RAID level |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adaptecraid.logical_device_status | ok, critical | status |\n\n### Per physical device\n\nThese metrics refer to the Physical Device (PD).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pd_number | Physical device index number |\n| location | Physical device location (e.g. Connector 0, Device 1) |\n| vendor | Physical device vendor |\n| model | Physical device model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adaptecraid.physical_device_state | ok, critical | status |\n| adaptecraid.physical_device_smart_warnings | smart | warnings |\n| adaptecraid.physical_device_temperature | temperature | Celsius |\n\n", @@ -3223,7 +3144,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Access Points\n\nPlugin: go.d.plugin\nModule: ap\n\n## Overview\n\nThis collector monitors various wireless access point metrics like connected clients, bandwidth, packets, transmit issues, signal strength, and bitrate for each device and its associated SSID.\n\n\nThis tool uses the `iw` command-line utility to discover nearby access points. It starts by running `iw dev`, which provides information about all wireless interfaces. Then, for each interface identified as an access point (type AP), the `iw INTERFACE station dump` command is executed to gather relevant metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin is able to auto-detect any access points on your Linux machine.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### `iw` utility.\n\nMake sure the `iw` utility is installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ap.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `iw` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/sbin/iw | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: custom_iw\n binary_path: /usr/local/sbin/iw\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### `iw` utility.\n\nMake sure the `iw` utility is installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ap.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ap.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `iw` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/sbin/iw | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: custom_iw\n binary_path: /usr/local/sbin/iw\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ap` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ap\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ap` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ap\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ap /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ap\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\nThese metrics refer to the entire monitored application.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | Wireless interface name |\n| ssid | SSID |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ap.clients | clients | clients |\n| ap.net | received, sent | kilobits/s |\n| ap.packets | received, sent | packets/s |\n| ap.issues | retries, failures | issues/s |\n| ap.signal | average signal | dBm |\n| ap.bitrate | receive, transmit | Mbps |\n\n", @@ -3272,7 +3193,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Apache\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `apache` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep apache\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep apache /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep apache\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n", @@ -3321,7 +3242,7 @@ export const integrations = [ "most_popular": true }, "overview": "# HTTPD\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `apache` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep apache\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep apache /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep apache\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n", @@ -3330,6 +3251,44 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/apache/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-apcupsd", + "plugin_name": "go.d.plugin", + "module_name": "apcupsd", + "monitored_instance": { + "name": "APC UPS", + "link": "https://www.apc.com", + "icon_filename": "apc.svg", + "categories": [ + "data-collection.ups" + ] + }, + "keywords": [ + "ups", + "apcupsd", + "apc" + ], + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "most_popular": false + }, + "overview": "# APC UPS\n\nPlugin: go.d.plugin\nModule: apcupsd\n\n## Overview\n\nThis collector monitors Uninterruptible Power Supplies by polling the Apcupsd daemon.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apcupsd instances running on localhost that are listening on port 3551.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:3551\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apcupsd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apcupsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Apcupsd daemon address in IP:PORT format. | 127.0.0.1:3551 | yes |\n| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3551\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3551\n\n - name: remote\n address: 203.0.113.0:3551\n\n```\n{% /details %}\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `apcupsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apcupsd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `apcupsd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep apcupsd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep apcupsd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep apcupsd\n```\n\n", + "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ apcupsd_ups_load_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_load_capacity_utilization | APC UPS average load over the last 10 minutes |\n| [ apcupsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_battery_charge | APC UPS average battery charge over the last minute |\n| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS number of seconds since the last successful data collection |\n| [ apcupsd_ups_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_selftest | APC UPS self-test failed due to insufficient battery capacity or due to overload |\n| [ apcupsd_ups_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS has switched to battery power because the input power has failed |\n| [ apcupsd_ups_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS is overloaded and cannot supply enough power to the load |\n| [ apcupsd_ups_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS battery is low and needs to be recharged |\n| [ apcupsd_ups_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS battery has reached the end of its lifespan and needs to be replaced |\n| [ apcupsd_ups_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS has no battery |\n| [ apcupsd_ups_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS communication link is lost |\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nThese metrics refer to the UPS unit.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| apcupsd.ups_status | TRIM, BOOST, CAL, ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, SHUTTING_DOWN | status |\n| apcupsd.ups_selftest | NO, NG, WN, IP, OK, BT, UNK | status |\n| apcupsd.ups_battery_charge | charge | percent |\n| apcupsd.ups_battery_time_remaining | timeleft | seconds |\n| apcupsd.ups_battery_time_since_replacement | since_replacement | seconds |\n| apcupsd.ups_battery_voltage | voltage, nominal_voltage | Volts |\n| apcupsd.ups_load_capacity_utilization | load | percent |\n| apcupsd.ups_load | load | Watts |\n| apcupsd.ups_temperature | temperature | Celsius |\n| apcupsd.ups_input_voltage | voltage, min_voltage, max_voltage | Volts |\n| apcupsd.ups_input_frequency | frequency | Hz |\n| apcupsd.ups_output_voltage | voltage | Volts |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-apcupsd-APC_UPS", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/apcupsd/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-beanstalk", @@ -3359,7 +3318,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Beanstalk\n\nPlugin: go.d.plugin\nModule: beanstalk\n\n## Overview\n\nThis collector monitors Beanstalk server performance and provides detailed statistics for each tube.\n\n\nUsing the [beanstalkd protocol](https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt), it communicates with the Beanstalk daemon to gather essential metrics that help understand the server's performance and activity.\nExecuted commands:\n\n- [stats](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L553).\n- [list-tubes](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L688).\n- [stats-tube](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L497).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Beanstalk instances running on localhost that are listening on port 11300.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/beanstalk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/beanstalk.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the Beanstalk service listens for connections. | 127.0.0.1:11300 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| tube_selector | Specifies a [pattern](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme) for which Beanstalk tubes Netdata will collect statistics. | * | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11300\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11300\n\n - name: remote\n address: 203.0.113.0:11300\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/beanstalk.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/beanstalk.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the Beanstalk service listens for connections. | 127.0.0.1:11300 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| tube_selector | Specifies a [pattern](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme) for which Beanstalk tubes Netdata will collect statistics. | * | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11300\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11300\n\n - name: remote\n address: 203.0.113.0:11300\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `beanstalk` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m beanstalk\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `beanstalk` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep beanstalk\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep beanstalk /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep beanstalk\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Beanstalk instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.current_jobs | ready, buried, urgent, delayed, reserved | jobs |\n| beanstalk.jobs_rate | created | jobs/s |\n| beanstalk.jobs_timeouts | timeouts | jobs/s |\n| beanstalk.current_tubes | tubes | tubes |\n| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, reserve-with-timeout, touch, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s |\n| beanstalk.current_connections | open, producers, workers, waiting | connections |\n| beanstalk.connections_rate | created | connections/s |\n| beanstalk.binlog_records | written, migrated | records/s |\n| beanstalk.cpu_usage | user, system | percent |\n| beanstalk.uptime | uptime | seconds |\n\n### Per tube\n\nMetrics related to Beanstalk tubes. This set of metrics is provided for each tube.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| tube_name | Tube name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.tube_current_jobs | ready, buried, urgent, delayed, reserved | jobs |\n| beanstalk.tube_jobs_rate | created | jobs/s |\n| beanstalk.tube_commands_rate | delete, pause-tube | commands/s |\n| beanstalk.tube_current_connections | using, waiting, watching | connections |\n| beanstalk.tube_pause_time | since, left | seconds |\n\n", @@ -3368,6 +3327,43 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/beanstalk/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-boinc", + "plugin_name": "go.d.plugin", + "module_name": "boinc", + "monitored_instance": { + "name": "BOINC", + "link": "https://boinc.berkeley.edu/", + "categories": [ + "data-collection.database-servers" + ], + "icon_filename": "bolt.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "boinc", + "distributed" + ], + "most_popular": false + }, + "overview": "# BOINC\n\nPlugin: go.d.plugin\nModule: boinc\n\n## Overview\n\nThis collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.\n\n\nIt communicates with BOING using [GIU RPC Protocol](https://boinc.berkeley.edu/trac/wiki/GuiRpcProtocol).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects BOINC client instances running on localhost that are listening on port 31416.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:31416\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/boinc.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/boinc.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the BOINC client listens for connections. | 127.0.0.1:31416 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| password | The GUI RPC password for authentication. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:31416\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:31416\n\n - name: remote\n address: 203.0.113.0:31416\n password: somePassword\n\n```\n{% /details %}\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `boinc` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m boinc\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `boinc` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep boinc\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep boinc /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep boinc\n```\n\n", + "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |\n| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |\n| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks_state | average number of compute errors over the last 10 minutes |\n| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks_state | average number of failed uploads over the last 10 minutes |\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per BOINC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| boinc.tasks | total, active | tasks |\n| boinc.tasks_per_state | new, downloading, downloaded, compute_error, uploading, uploaded, aborted, upload_failed | tasks |\n| boinc.active_tasks_per_state | uninitialized, executing, abort_pending, quit_pending, suspended, copy_pending | tasks |\n| boinc.active_tasks_per_scheduler_state | uninitialized, preempted, scheduled | tasks |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-boinc-BOINC", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/boinc/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-cassandra", @@ -3399,7 +3395,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Cassandra\n\nPlugin: go.d.plugin\nModule: cassandra\n\n## Overview\n\nThis collector gathers metrics about client requests, cache hits, and many more, while also providing metrics per each thread pool.\n\n\nThe [JMX Exporter](https://github.com/prometheus/jmx_exporter) is used to fetch metrics from a Cassandra instance and make them available at an endpoint like `http://127.0.0.1:7072/metrics`.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 7072.\n\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:7072/metrics\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Cassandra with Prometheus JMX Exporter\n\nTo configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter):\n\n> **Note**: paths can differ depends on your setup.\n\n- Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file\n and install it in a directory where Cassandra can access it.\n- Add\n the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml)\n file to `/etc/cassandra`.\n- Add the following line to `/etc/cassandra/cassandra-env.sh`\n ```\n JVM_OPTS=\"$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml\n ```\n- Restart cassandra service.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cassandra.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cassandra.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:7072/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nLocal server with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:7072/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n - name: remote\n url: http://192.0.2.1:7072/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Cassandra with Prometheus JMX Exporter\n\nTo configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter):\n\n> **Note**: paths can differ depends on your setup.\n\n- Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file\n and install it in a directory where Cassandra can access it.\n- Add\n the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml)\n file to `/etc/cassandra`.\n- Add the following line to `/etc/cassandra/cassandra-env.sh`\n ```\n JVM_OPTS=\"$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml\n ```\n- Restart cassandra service.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cassandra.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cassandra.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:7072/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nLocal server with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:7072/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n - name: remote\n url: http://192.0.2.1:7072/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `cassandra` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cassandra\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `cassandra` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep cassandra\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep cassandra /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep cassandra\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Cassandra instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.client_requests_rate | read, write | requests/s |\n| cassandra.client_request_read_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_request_write_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_requests_latency | read, write | seconds |\n| cassandra.row_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.row_cache_hit_rate | hits, misses | events/s |\n| cassandra.row_cache_utilization | used | percentage |\n| cassandra.row_cache_size | size | bytes |\n| cassandra.key_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.key_cache_hit_rate | hits, misses | events/s |\n| cassandra.key_cache_utilization | used | percentage |\n| cassandra.key_cache_size | size | bytes |\n| cassandra.storage_live_disk_space_used | used | bytes |\n| cassandra.compaction_completed_tasks_rate | completed | tasks/s |\n| cassandra.compaction_pending_tasks_count | pending | tasks |\n| cassandra.compaction_compacted_rate | compacted | bytes/s |\n| cassandra.jvm_memory_used | heap, nonheap | bytes |\n| cassandra.jvm_gc_rate | parnew, cms | gc/s |\n| cassandra.jvm_gc_time | parnew, cms | seconds |\n| cassandra.dropped_messages_rate | dropped | messages/s |\n| cassandra.client_requests_timeouts_rate | read, write | timeout/s |\n| cassandra.client_requests_unavailables_rate | read, write | exceptions/s |\n| cassandra.client_requests_failures_rate | read, write | failures/s |\n| cassandra.storage_exceptions_rate | storage | exceptions/s |\n\n### Per thread pool\n\nMetrics related to Cassandra's thread pools. Each thread pool provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thread_pool | thread pool name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.thread_pool_active_tasks_count | active | tasks |\n| cassandra.thread_pool_pending_tasks_count | pending | tasks |\n| cassandra.thread_pool_blocked_tasks_count | blocked | tasks |\n| cassandra.thread_pool_blocked_tasks_rate | blocked | tasks/s |\n\n", @@ -3408,6 +3404,42 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/cassandra/metadata.yaml", "related_resources": "" }, + { + "meta": { + "plugin_name": "go.d.plugin", + "module_name": "ceph", + "monitored_instance": { + "name": "Ceph", + "link": "https://ceph.io/", + "categories": [ + "data-collection.storage-mount-points-and-filesystems" + ], + "icon_filename": "ceph.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "ceph", + "storage" + ], + "most_popular": false + }, + "overview": "# Ceph\n\nPlugin: go.d.plugin\nModule: ceph\n\n## Overview\n\nThis collector monitors the overall health status and performance of your Ceph clusters.\nIt gathers key metrics for the entire cluster, individual Pools, and OSDs.\n\n\nIt collects metrics by periodically issuing HTTP GET requests to the Ceph Manager [REST API](https://docs.ceph.com/en/reef/mgr/ceph_api/#):\n\n- [/api/monitor](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-monitor) (only once to get the Ceph cluster id (fsid)) \n- [/api/health/minimal](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-health-minimal)\n- [/api/osd](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-osd)\n- [/api/pool?stats=true](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-pool)\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect Ceph Manager instances running on:\n\n- localhost that are listening on port 8443\n- within Docker containers\n\n> **Note that the Ceph REST API requires a username and password**. \n> While Netdata can automatically detect Ceph Manager instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ceph.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ceph.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | The URL of the [Ceph Manager API](https://docs.ceph.com/en/reef/mgr/ceph_api/). | https://127.0.0.1:8443 | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8443\n username: user\n password: pass\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8443\n username: user\n password: pass\n\n - name: remote\n url: https://192.0.2.1:8443\n username: user\n password: pass\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ceph` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ceph\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ceph` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ceph\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ceph /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ceph\n```\n\n", + "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ceph_cluster_physical_capacity_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.cluster_physical_capacity_utilization | Ceph cluster ${label:fsid} disk space utilization |\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cluster\n\nThese metrics refer to the entire Ceph cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| fsid | A unique identifier of the cluster. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.cluster_status | ok, err, warn | status |\n| ceph.cluster_hosts_count | hosts | hosts |\n| ceph.cluster_monitors_count | monitors | monitors |\n| ceph.cluster_osds_count | osds | osds |\n| ceph.cluster_osds_by_status_count | up, down, in, out | status |\n| ceph.cluster_managers_count | active, standby | managers |\n| ceph.cluster_object_gateways_count | object | gateways |\n| ceph.cluster_iscsi_gateways_count | iscsi | gateways |\n| ceph.cluster_iscsi_gateways_by_status_count | up, down | gateways |\n| ceph.cluster_physical_capacity_utilization | utilization | percent |\n| ceph.cluster_physical_capacity_usage | avail, used | bytes |\n| ceph.cluster_objects_count | objects | objects |\n| ceph.cluster_objects_by_status_distribution | healthy, misplaced, degraded, unfound | percent |\n| ceph.cluster_pools_count | pools | pools |\n| ceph.cluster_pgs_count | pgs | pgs |\n| ceph.cluster_pgs_by_status_count | clean, working, warning, unknown | pgs |\n| ceph.cluster_pgs_per_osd_count | per_osd | pgs |\n\n### Per osd\n\nThese metrics refer to the Object Storage Daemon (OSD).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| fsid | A unique identifier of the cluster. |\n| osd_uuid | OSD UUID. |\n| osd_name | OSD name. |\n| device_class | OSD device class. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.osd_status | up, down, in, out | status |\n| ceph.osd_space_usage | avail, used | bytes |\n| ceph.osd_io | read, written | bytes/s |\n| ceph.osd_iops | read, write | ops/s |\n| ceph.osd_latency | commit, apply | milliseconds |\n\n### Per pool\n\nThese metrics refer to the Pool.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| fsid | A unique identifier of the cluster. |\n| pool_name | Pool name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.pool_space_utilization | utilization | percent |\n| ceph.pool_space_usage | avail, used | bytes |\n| ceph.pool_objects_count | object | objects |\n| ceph.pool_io | read, written | bytes/s |\n| ceph.pool_iops | read, write | ops/s |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-ceph-Ceph", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/ceph/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-chrony", @@ -3433,11 +3465,11 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# Chrony\n\nPlugin: go.d.plugin\nModule: chrony\n\n## Overview\n\nThis collector monitors the system's clock performance and peers activity status\n\nIt collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers Chrony instance running on the local host and listening on port 323.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:323\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/chrony.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/chrony.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:323 | yes |\n| timeout | Connection timeout. Zero means no timeout. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n - name: remote\n address: 192.0.2.1:323\n\n```\n{% /details %}\n", + "overview": "# Chrony\n\nPlugin: go.d.plugin\nModule: chrony\n\n## Overview\n\nThis collector monitors the system's clock performance and peers activity status\n\n\nIt collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6.\nAdditionally, for data collection jobs that connect to localhost Chrony instances, it collects serverstats metrics (NTP packets, command packets received/dropped) by executing the 'chronyc serverstats' command.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers Chrony instance running on the local host and listening on port 323.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:323\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/chrony.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/chrony.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:323 | yes |\n| timeout | Connection timeout. Zero means no timeout. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n - name: remote\n address: 192.0.2.1:323\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `chrony` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m chrony\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `chrony` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep chrony\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep chrony /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep chrony\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Chrony instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| chrony.stratum | stratum | level |\n| chrony.current_correction | current_correction | seconds |\n| chrony.root_delay | root_delay | seconds |\n| chrony.root_dispersion | root_delay | seconds |\n| chrony.last_offset | offset | seconds |\n| chrony.rms_offset | offset | seconds |\n| chrony.frequency | frequency | ppm |\n| chrony.residual_frequency | residual_frequency | ppm |\n| chrony.skew | skew | ppm |\n| chrony.update_interval | update_interval | seconds |\n| chrony.ref_measurement_time | ref_measurement_time | seconds |\n| chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status |\n| chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Chrony instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| chrony.stratum | stratum | level |\n| chrony.current_correction | current_correction | seconds |\n| chrony.root_delay | root_delay | seconds |\n| chrony.root_dispersion | root_delay | seconds |\n| chrony.last_offset | offset | seconds |\n| chrony.rms_offset | offset | seconds |\n| chrony.frequency | frequency | ppm |\n| chrony.residual_frequency | residual_frequency | ppm |\n| chrony.skew | skew | ppm |\n| chrony.update_interval | update_interval | seconds |\n| chrony.ref_measurement_time | ref_measurement_time | seconds |\n| chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status |\n| chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources |\n| chrony.ntp_packets | received, dropped | packets/s |\n| chrony.command_packets | received, dropped | packets/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-chrony-Chrony", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/chrony/metadata.yaml", @@ -3470,7 +3502,7 @@ export const integrations = [ "most_popular": false }, "overview": "# ClickHouse\n\nPlugin: go.d.plugin\nModule: clickhouse\n\n## Overview\n\nThis collector retrieves performance data from ClickHouse for connections, queries, resources, replication, IO, and data operations (inserts, selects, merges) using HTTP requests and ClickHouse system tables. It monitors your ClickHouse server's health and activity.\n\n\nIt sends HTTP requests to the ClickHouse [HTTP interface](https://clickhouse.com/docs/en/interfaces/http), executing SELECT queries to retrieve data from various system tables.\nSpecifically, it collects metrics from the following tables:\n\n- system.metrics\n- system.async_metrics\n- system.events\n- system.disks\n- system.parts\n- system.processes\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects ClickHouse instances running on localhost that are listening on port 8123.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:8123\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/clickhouse.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/clickhouse.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8123 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nClickHouse with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8123\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n\n - name: remote\n url: http://192.0.2.1:8123\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/clickhouse.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/clickhouse.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8123 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nClickHouse with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8123\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n\n - name: remote\n url: http://192.0.2.1:8123\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `clickhouse` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m clickhouse\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `clickhouse` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep clickhouse\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep clickhouse /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep clickhouse\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ clickhouse_restarted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.uptime | ClickHouse has recently been restarted |\n| [ clickhouse_queries_preempted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.queries_preempted | ClickHouse has queries that are stopped and waiting due to priority setting |\n| [ clickhouse_long_running_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.longest_running_query_time | ClickHouse has a long-running query exceeding the threshold |\n| [ clickhouse_rejected_inserts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.rejected_inserts | ClickHouse has INSERT queries that are rejected due to high number of active data parts for partition in a MergeTree |\n| [ clickhouse_delayed_inserts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.delayed_inserts | ClickHouse has INSERT queries that are throttled due to high number of active data parts for partition in a MergeTree |\n| [ clickhouse_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.replicas_max_absolute_delay | ClickHouse is experiencing replication lag greater than 5 minutes |\n| [ clickhouse_replicated_readonly_tables ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.replicated_readonly_tables | ClickHouse has replicated tables in readonly state due to ZooKeeper session loss/startup without ZooKeeper configured |\n| [ clickhouse_max_part_count_for_partition ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.max_part_count_for_partition | ClickHouse high number of parts per partition |\n| [ clickhouse_distributed_connections_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.distributed_connections_fail_exhausted_retries | ClickHouse has failed distributed connections after exhausting all retry attempts |\n| [ clickhouse_distributed_files_to_insert ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.distributed_files_to_insert | ClickHouse high number of pending files to process for asynchronous insertion into Distributed tables |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ClickHouse instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| clickhouse.connections | tcp, http, mysql, postgresql, interserver | connections |\n| clickhouse.slow_reads | slow | reads/s |\n| clickhouse.read_backoff | read_backoff | events/s |\n| clickhouse.memory_usage | used | bytes |\n| clickhouse.running_queries | running | queries |\n| clickhouse.queries_preempted | preempted | queries |\n| clickhouse.queries | successful, failed | queries/s |\n| clickhouse.select_queries | successful, failed | selects/s |\n| clickhouse.insert_queries | successful, failed | inserts/s |\n| clickhouse.queries_memory_limit_exceeded | mem_limit_exceeded | queries/s |\n| clickhouse.longest_running_query_time | longest_query_time | seconds |\n| clickhouse.queries_latency | queries_time | microseconds |\n| clickhouse.select_queries_latency | selects_time | microseconds |\n| clickhouse.insert_queries_latency | inserts_time | microseconds |\n| clickhouse.io | reads, writes | bytes/s |\n| clickhouse.iops | reads, writes | ops/s |\n| clickhouse.io_errors | read, write | errors/s |\n| clickhouse.io_seeks | lseek | ops/s |\n| clickhouse.io_file_opens | file_open | ops/s |\n| clickhouse.replicated_parts_current_activity | fetch, send, check | parts |\n| clickhouse.replicas_max_absolute_dela | replication_delay | seconds |\n| clickhouse.replicated_readonly_tables | read_only | tables |\n| clickhouse.replicated_data_loss | data_loss | events |\n| clickhouse.replicated_part_fetches | successful, failed | fetches/s |\n| clickhouse.inserted_rows | inserted | rows/s |\n| clickhouse.inserted_bytes | inserted | bytes/s |\n| clickhouse.rejected_inserts | rejected | inserts/s |\n| clickhouse.delayed_inserts | delayed | inserts/s |\n| clickhouse.delayed_inserts_throttle_time | delayed_inserts_throttle_time | milliseconds |\n| clickhouse.selected_bytes | selected | bytes/s |\n| clickhouse.selected_rows | selected | rows/s |\n| clickhouse.selected_parts | selected | parts/s |\n| clickhouse.selected_ranges | selected | ranges/s |\n| clickhouse.selected_marks | selected | marks/s |\n| clickhouse.merges | merge | ops/s |\n| clickhouse.merges_latency | merges_time | milliseconds |\n| clickhouse.merged_uncompressed_bytes | merged_uncompressed | bytes/s |\n| clickhouse.merged_rows | merged | rows/s |\n| clickhouse.merge_tree_data_writer_inserted_rows | inserted | rows/s |\n| clickhouse.merge_tree_data_writer_uncompressed_bytes | inserted | bytes/s |\n| clickhouse.merge_tree_data_writer_compressed_bytes | written | bytes/s |\n| clickhouse.uncompressed_cache_requests | hits, misses | requests/s |\n| clickhouse.mark_cache_requests | hits, misses | requests/s |\n| clickhouse.max_part_count_for_partition | max_parts_partition | parts |\n| clickhouse.parts_count | temporary, pre_active, active, deleting, delete_on_destroy, outdated, wide, compact | parts |\n| distributed_connections | active | connections |\n| distributed_connections_attempts | connection | attempts/s |\n| distributed_connections_fail_retries | connection_retry | fails/s |\n| distributed_connections_fail_exhausted_retries | connection_retry_exhausted | fails/s |\n| distributed_files_to_insert | pending_insertions | files |\n| distributed_rejected_inserts | rejected | inserts/s |\n| distributed_delayed_inserts | delayed | inserts/s |\n| distributed_delayed_inserts_latency | delayed_time | milliseconds |\n| distributed_sync_insertion_timeout_exceeded | sync_insertion | timeouts/s |\n| distributed_async_insertions_failures | async_insertions | failures/s |\n| clickhouse.uptime | uptime | seconds |\n\n### Per disk\n\nThese metrics refer to the Disk.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk_name | Name of the disk as defined in the [server configuration](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-multiple-volumes_configure). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| clickhouse.disk_space_usage | free, used | bytes |\n\n### Per table\n\nThese metrics refer to the Database Table.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | Name of the database. |\n| table | Name of the table. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| clickhouse.database_table_size | size | bytes |\n| clickhouse.database_table_parts | parts | parts |\n| clickhouse.database_table_rows | rows | rows |\n\n", @@ -3507,7 +3539,7 @@ export const integrations = [ "most_popular": false }, "overview": "# CockroachDB\n\nPlugin: go.d.plugin\nModule: cockroachdb\n\n## Overview\n\nThis collector monitors CockroachDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cockroachdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cockroachdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/_status/vars | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nCockroachDB with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/_status/vars\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n - name: remote\n url: http://203.0.113.10:8080/_status/vars\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cockroachdb.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cockroachdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/_status/vars | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nCockroachDB with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/_status/vars\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n - name: remote\n url: http://203.0.113.10:8080/_status/vars\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `cockroachdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cockroachdb\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `cockroachdb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep cockroachdb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep cockroachdb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep cockroachdb\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cockroachdb_used_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage capacity utilization |\n| [ cockroachdb_used_usable_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage usable space utilization |\n| [ cockroachdb_unavailable_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than needed for quorum |\n| [ cockroachdb_underreplicated_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than the replication target |\n| [ cockroachdb_open_file_descriptors_limit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.process_file_descriptors | open file descriptors utilization (against softlimit) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CockroachDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cockroachdb.process_cpu_time_combined_percentage | used | percentage |\n| cockroachdb.process_cpu_time_percentage | user, sys | percentage |\n| cockroachdb.process_cpu_time | user, sys | ms |\n| cockroachdb.process_memory | rss | KiB |\n| cockroachdb.process_file_descriptors | open | fd |\n| cockroachdb.process_uptime | uptime | seconds |\n| cockroachdb.host_disk_bandwidth | read, write | KiB |\n| cockroachdb.host_disk_operations | reads, writes | operations |\n| cockroachdb.host_disk_iops_in_progress | in_progress | iops |\n| cockroachdb.host_network_bandwidth | received, sent | kilobits |\n| cockroachdb.host_network_packets | received, sent | packets |\n| cockroachdb.live_nodes | live_nodes | nodes |\n| cockroachdb.node_liveness_heartbeats | successful, failed | heartbeats |\n| cockroachdb.total_storage_capacity | total | KiB |\n| cockroachdb.storage_capacity_usability | usable, unusable | KiB |\n| cockroachdb.storage_usable_capacity | available, used | KiB |\n| cockroachdb.storage_used_capacity_percentage | total, usable | percentage |\n| cockroachdb.sql_connections | active | connections |\n| cockroachdb.sql_bandwidth | received, sent | KiB |\n| cockroachdb.sql_statements_total | started, executed | statements |\n| cockroachdb.sql_errors | statement, transaction | errors |\n| cockroachdb.sql_started_ddl_statements | ddl | statements |\n| cockroachdb.sql_executed_ddl_statements | ddl | statements |\n| cockroachdb.sql_started_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_executed_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_started_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_executed_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_active_distributed_queries | active | queries |\n| cockroachdb.sql_distributed_flows | active, queued | flows |\n| cockroachdb.live_bytes | applications, system | KiB |\n| cockroachdb.logical_data | keys, values | KiB |\n| cockroachdb.logical_data_count | keys, values | num |\n| cockroachdb.kv_transactions | committed, fast-path_committed, aborted | transactions |\n| cockroachdb.kv_transaction_restarts | write_too_old, write_too_old_multiple, forwarded_timestamp, possible_reply, async_consensus_failure, read_within_uncertainty_interval, aborted, push_failure, unknown | restarts |\n| cockroachdb.ranges | ranges | ranges |\n| cockroachdb.ranges_replication_problem | unavailable, under_replicated, over_replicated | ranges |\n| cockroachdb.range_events | split, add, remove, merge | events |\n| cockroachdb.range_snapshot_events | generated, applied_raft_initiated, applied_learner, applied_preemptive | events |\n| cockroachdb.rocksdb_read_amplification | reads | reads/query |\n| cockroachdb.rocksdb_table_operations | compactions, flushes | operations |\n| cockroachdb.rocksdb_cache_usage | used | KiB |\n| cockroachdb.rocksdb_cache_operations | hits, misses | operations |\n| cockroachdb.rocksdb_cache_hit_rate | hit_rate | percentage |\n| cockroachdb.rocksdb_sstables | sstables | sstables |\n| cockroachdb.replicas | replicas | replicas |\n| cockroachdb.replicas_quiescence | quiescent, active | replicas |\n| cockroachdb.replicas_leaders | leaders, not_leaseholders | replicas |\n| cockroachdb.replicas_leaseholders | leaseholders | leaseholders |\n| cockroachdb.queue_processing_failures | gc, replica_gc, replication, split, consistency, raft_log, raft_snapshot, time_series_maintenance | failures |\n| cockroachdb.rebalancing_queries | avg | queries/s |\n| cockroachdb.rebalancing_writes | avg | writes/s |\n| cockroachdb.timeseries_samples | written | samples |\n| cockroachdb.timeseries_write_errors | write | errors |\n| cockroachdb.timeseries_write_bytes | written | KiB |\n| cockroachdb.slow_requests | acquiring_latches, acquiring_lease, in_raft | requests |\n| cockroachdb.code_heap_memory_usage | go, cgo | KiB |\n| cockroachdb.goroutines | goroutines | goroutines |\n| cockroachdb.gc_count | gc | invokes |\n| cockroachdb.gc_pause | pause | us |\n| cockroachdb.cgo_calls | cgo | calls |\n\n", @@ -3545,7 +3577,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Consul\n\nPlugin: go.d.plugin\nModule: consul\n\n## Overview\n\nThis collector monitors [key metrics](https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) of Consul Agents: transaction timings, leadership changes, memory usage and more.\n\n\nIt periodically sends HTTP requests to [Consul REST API](https://developer.hashicorp.com/consul/api-docs).\n\nUsed endpoints:\n\n- [/operator/autopilot/health](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health)\n- [/agent/checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks)\n- [/agent/self](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration)\n- [/agent/metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics)\n- [/coordinate/nodes](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host, that provide metrics on port 8500.\n\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8500\n- http://127.0.0.1:8500\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Prometheus telemetry\n\n[Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`.\n\n\n#### Add required ACLs to Token\n\nRequired **only if authentication is enabled**.\n\n| ACL | Endpoint |\n|:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) |\n| `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) |\n| `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) |\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/consul.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/consul.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8500 | yes |\n| acl_token | ACL token used in every request. | | no |\n| max_checks | Checks processing/charting limit. | | no |\n| max_filter | Checks processing/charting filter. Uses [simple patterns](/src/libnetdata/simple_pattern/README.md). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n - name: remote\n url: http://203.0.113.10:8500\n acl_token: \"ada7f751-f654-8872-7f93-498e799158b6\"\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Prometheus telemetry\n\n[Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`.\n\n\n#### Add required ACLs to Token\n\nRequired **only if authentication is enabled**.\n\n| ACL | Endpoint |\n|:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) |\n| `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) |\n| `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) |\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/consul.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/consul.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8500 | yes |\n| acl_token | ACL token used in every request. | | no |\n| max_checks | Checks processing/charting limit. | | no |\n| max_filter | Checks processing/charting filter. Uses [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n - name: remote\n url: http://203.0.113.10:8500\n acl_token: \"ada7f751-f654-8872-7f93-498e799158b6\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `consul` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m consul\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `consul` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep consul\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep consul /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep consul\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ consul_node_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.node_health_check_status | node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_service_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.service_health_check_status | service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_exceeded_rate | number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_failed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_failed_rate | number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_gc_pause_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.gc_pause_time | time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_autopilot_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_health_status | datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name} |\n| [ consul_autopilot_server_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_server_health_status | server ${label:node_name} from datacenter ${label:datacenter} is unhealthy |\n| [ consul_raft_leader_last_contact_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leader_last_contact_time | median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes |\n| [ consul_raft_leadership_transitions ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leadership_transitions_rate | there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader |\n| [ consul_raft_thread_main_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_main_saturation_perc | average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_raft_thread_fsm_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_fsm_saturation_perc | average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_license_expiration_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.license_expiration_time | Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe set of metrics depends on the [Consul Agent mode](https://developer.hashicorp.com/consul/docs/install/glossary#agent).\n\n\n### Per Consul instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.client_rpc_requests_rate | rpc | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_exceeded_rate | exceeded | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_failed_rate | failed | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.memory_allocated | allocated | bytes | \u2022 | \u2022 | \u2022 |\n| consul.memory_sys | sys | bytes | \u2022 | \u2022 | \u2022 |\n| consul.gc_pause_time | gc_pause | seconds | \u2022 | \u2022 | \u2022 |\n| consul.kvs_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.kvs_apply_operations_rate | kvs_apply | ops/s | \u2022 | \u2022 | |\n| consul.txn_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.txn_apply_operations_rate | txn_apply | ops/s | \u2022 | \u2022 | |\n| consul.autopilot_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_failure_tolerance | failure_tolerance | servers | \u2022 | \u2022 | |\n| consul.autopilot_server_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_server_stable_time | stable | seconds | \u2022 | \u2022 | |\n| consul.autopilot_server_serf_status | active, failed, left, none | status | \u2022 | \u2022 | |\n| consul.autopilot_server_voter_status | voter, not_voter | status | \u2022 | \u2022 | |\n| consul.network_lan_rtt | min, max, avg | ms | \u2022 | \u2022 | |\n| consul.raft_commit_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_commits_rate | commits | commits/s | \u2022 | | |\n| consul.raft_leader_last_contact_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_leader_oldest_log_age | oldest_log_age | seconds | \u2022 | | |\n| consul.raft_follower_last_contact_leader_time | leader_last_contact | ms | | \u2022 | |\n| consul.raft_rpc_install_snapshot_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | | \u2022 | |\n| consul.raft_leader_elections_rate | leader | elections/s | \u2022 | \u2022 | |\n| consul.raft_leadership_transitions_rate | leadership | transitions/s | \u2022 | \u2022 | |\n| consul.server_leadership_status | leader, not_leader | status | \u2022 | \u2022 | |\n| consul.raft_thread_main_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_thread_fsm_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_fsm_last_restore_duration | last_restore_duration | ms | \u2022 | \u2022 | |\n| consul.raft_boltdb_freelist_bytes | freelist | bytes | \u2022 | \u2022 | |\n| consul.raft_boltdb_logs_per_batch_rate | written | logs/s | \u2022 | \u2022 | |\n| consul.raft_boltdb_store_logs_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.license_expiration_time | license_expiration | seconds | \u2022 | \u2022 | \u2022 |\n\n### Per node check\n\nMetrics about checks on Node level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.node_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n### Per service check\n\nMetrics about checks at a Service level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n| service_name | The service's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.service_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n", @@ -3583,7 +3615,7 @@ export const integrations = [ "most_popular": false }, "overview": "# CoreDNS\n\nPlugin: go.d.plugin\nModule: coredns\n\n## Overview\n\nThis collector monitors CoreDNS instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/coredns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/coredns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9153/metrics | yes |\n| per_server_stats | Server filter. | | no |\n| per_zone_stats | Zone filter. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n##### per_server_stats\n\nMetrics of servers matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_server_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### per_zone_stats\n\nMetrics of zones matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_zone_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n - name: remote\n url: http://203.0.113.10:9153/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/coredns.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/coredns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9153/metrics | yes |\n| per_server_stats | Server filter. | | no |\n| per_zone_stats | Zone filter. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n##### per_server_stats\n\nMetrics of servers matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_server_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### per_zone_stats\n\nMetrics of zones matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_zone_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n - name: remote\n url: http://203.0.113.10:9153/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `coredns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m coredns\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `coredns` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep coredns\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep coredns /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep coredns\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CoreDNS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.dns_request_count_total | requests | requests/s |\n| coredns.dns_responses_count_total | responses | responses/s |\n| coredns.dns_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.dns_no_matching_zone_dropped_total | dropped | requests/s |\n| coredns.dns_panic_count_total | panics | panics/s |\n| coredns.dns_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.dns_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.dns_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.dns_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server_name | Server name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.server_dns_request_count_total | requests | requests/s |\n| coredns.server_dns_responses_count_total | responses | responses/s |\n| coredns.server_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.server_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.server_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.server_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.server_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per zone\n\nThese metrics refer to the DNS zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| zone_name | Zone name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.zone_dns_request_count_total | requests | requests/s |\n| coredns.zone_dns_responses_count_total | responses | responses/s |\n| coredns.zone_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.zone_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.zone_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.zone_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n", @@ -3620,7 +3652,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Couchbase\n\nPlugin: go.d.plugin\nModule: couchbase\n\n## Overview\n\nThis collector monitors Couchbase servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchbase.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchbase.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8091 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n - name: remote\n url: http://203.0.113.0:8091\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchbase.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchbase.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8091 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n - name: remote\n url: http://203.0.113.0:8091\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `couchbase` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchbase\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `couchbase` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep couchbase\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep couchbase /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep couchbase\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Couchbase instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchbase.bucket_quota_percent_used | a dimension per bucket | percentage |\n| couchbase.bucket_ops_per_sec | a dimension per bucket | ops/s |\n| couchbase.bucket_disk_fetches | a dimension per bucket | fetches |\n| couchbase.bucket_item_count | a dimension per bucket | items |\n| couchbase.bucket_disk_used_stats | a dimension per bucket | bytes |\n| couchbase.bucket_data_used | a dimension per bucket | bytes |\n| couchbase.bucket_mem_used | a dimension per bucket | bytes |\n| couchbase.bucket_vb_active_num_non_resident | a dimension per bucket | items |\n\n", @@ -3657,7 +3689,7 @@ export const integrations = [ "most_popular": false }, "overview": "# CouchDB\n\nPlugin: go.d.plugin\nModule: couchdb\n\n## Overview\n\nThis collector monitors CouchDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:5984 | yes |\n| node | CouchDB node name. Same as -name vm.args argument. | _local | no |\n| databases | List of database names for which db-specific stats should be displayed, space separated. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication, node name and multiple databases defined. Make sure to match the node name with the `NODENAME` value in your CouchDB's `etc/vm.args` file. Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n node: couchdb@127.0.0.1\n databases: my-db other-db\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n - name: remote\n url: http://203.0.113.0:5984\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchdb.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:5984 | yes |\n| node | CouchDB node name. Same as -name vm.args argument. | _local | no |\n| databases | List of database names for which db-specific stats should be displayed, space separated. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication, node name and multiple databases defined. Make sure to match the node name with the `NODENAME` value in your CouchDB's `etc/vm.args` file. Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n node: couchdb@127.0.0.1\n databases: my-db other-db\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n - name: remote\n url: http://203.0.113.0:5984\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `couchdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchdb\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `couchdb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep couchdb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep couchdb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep couchdb\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CouchDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchdb.activity | db_reads, db_writes, view_reads | requests/s |\n| couchdb.request_methods | copy, delete, get, head, options, post, put | requests/s |\n| couchdb.response_codes | 200, 201, 202, 204, 206, 301, 302, 304, 400, 401, 403, 404, 406, 409, 412, 413, 414, 415, 416, 417, 500, 501, 503 | responses/s |\n| couchdb.response_code_classes | 2xx, 3xx, 4xx, 5xx | responses/s |\n| couchdb.active_tasks | indexer, db_compaction, replication, view_compaction | tasks |\n| couchdb.replicator_jobs | running, pending, crashed, internal_replication_jobs | jobs |\n| couchdb.open_files | files | files |\n| couchdb.erlang_vm_memory | atom, binaries, code, ets, procs, other | B |\n| couchdb.proccounts | os_procs, erl_procs | processes |\n| couchdb.peakmsgqueue | peak_size | messages |\n| couchdb.reductions | reductions | reductions |\n| couchdb.db_sizes_file | a dimension per database | KiB |\n| couchdb.db_sizes_external | a dimension per database | KiB |\n| couchdb.db_sizes_active | a dimension per database | KiB |\n| couchdb.db_doc_count | a dimension per database | docs |\n| couchdb.db_doc_del_count | a dimension per database | docs |\n\n", @@ -3693,7 +3725,7 @@ export const integrations = [ "most_popular": false }, "overview": "# DMCache devices\n\nPlugin: go.d.plugin\nModule: dmcache\n\n## Overview\n\nThis collector monitors DMCache, providing insights into capacity usage, efficiency, and activity. It relies on the [`dmsetup`](https://man7.org/linux/man-pages/man8/dmsetup.8.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dmcache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dmcache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | dmsetup binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: dmcache\n update_every: 5 # Collect DMCache statistics every 5 seconds\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dmcache.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dmcache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | dmsetup binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: dmcache\n update_every: 5 # Collect DMCache statistics every 5 seconds\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dmcache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dmcache\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dmcache` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dmcache\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dmcache /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dmcache\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dmcache device\n\nThese metrics refer to the DMCache device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | Device name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dmcache.device_cache_space_usage | free, used | bytes |\n| dmcache.device_metadata_space_usage | free, used | bytes |\n| dmcache.device_cache_read_efficiency | hits, misses | requests/s |\n| dmcache.device_cache_write_efficiency | hits, misses | requests/s |\n| dmcache.device_cache_activity | promotions, demotions | bytes/s |\n| dmcache.device_cache_dirty_size | dirty | bytes |\n\n", @@ -3729,7 +3761,7 @@ export const integrations = [ "most_popular": false }, "overview": "# DNS query\n\nPlugin: go.d.plugin\nModule: dns_query\n\n## Overview\n\nThis module monitors DNS query round-trip time (RTT).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dns_query.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dns_query.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| domains | Domain or subdomains to query. The collector will choose a random domain from the list on every iteration. | | yes |\n| servers | Servers to query. | | yes |\n| port | DNS server port. | 53 | no |\n| network | Network protocol name. Available options: udp, tcp, tcp-tls. | udp | no |\n| record_types | Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV. | A | no |\n| timeout | Query read timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: job1\n record_types:\n - A\n - AAAA\n domains:\n - google.com\n - github.com\n - reddit.com\n servers:\n - 8.8.8.8\n - 8.8.4.4\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dns_query.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dns_query.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| domains | Domain or subdomains to query. The collector will choose a random domain from the list on every iteration. | | yes |\n| servers | Servers to query. | | yes |\n| port | DNS server port. | 53 | no |\n| network | Network protocol name. Available options: udp, tcp, tcp-tls. | udp | no |\n| record_types | Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV. | A | no |\n| timeout | Query read timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: job1\n record_types:\n - A\n - AAAA\n domains:\n - google.com\n - github.com\n - reddit.com\n servers:\n - 8.8.8.8\n - 8.8.4.4\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dns_query` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dns_query\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dns_query` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dns_query\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dns_query /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dns_query\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dns_query_query_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dns_query.conf) | dns_query.query_status | DNS request type ${label:record_type} to server ${label:server} is unsuccessful |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server | DNS server address. |\n| network | Network protocol name (tcp, udp, tcp-tls). |\n| record_type | DNS record type (e.g. A, AAAA, CNAME). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dns_query.query_status | success, network_error, dns_error | status |\n| dns_query.query_time | query_time | seconds |\n\n", @@ -3766,7 +3798,7 @@ export const integrations = [ "most_popular": false }, "overview": "# DNSdist\n\nPlugin: go.d.plugin\nModule: dnsdist\n\n## Overview\n\nThis collector monitors DNSDist servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable DNSdist built-in Webserver\n\nFor collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsdist.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsdist.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8083 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key).\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: 'your-api-key' # static pre-shared authentication key for access to the REST API (api-key).\n\n - name: remote\n url: http://203.0.113.0:8083\n headers:\n X-API-Key: 'your-api-key'\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable DNSdist built-in Webserver\n\nFor collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsdist.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsdist.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8083 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key).\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: 'your-api-key' # static pre-shared authentication key for access to the REST API (api-key).\n\n - name: remote\n url: http://203.0.113.0:8083\n headers:\n X-API-Key: 'your-api-key'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dnsdist` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsdist\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dnsdist` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dnsdist\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dnsdist /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dnsdist\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per DNSdist instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsdist.queries | all, recursive, empty | queries/s |\n| dnsdist.queries_dropped | rule_drop, dynamic_blocked, no_policy, non_queries | queries/s |\n| dnsdist.packets_dropped | acl | packets/s |\n| dnsdist.answers | self_answered, nxdomain, refused, trunc_failures | answers/s |\n| dnsdist.backend_responses | responses | responses/s |\n| dnsdist.backend_commerrors | send_errors | errors/s |\n| dnsdist.backend_errors | timeouts, servfail, non_compliant | responses/s |\n| dnsdist.cache | hits, misses | answers/s |\n| dnsdist.servercpu | system_state, user_state | ms/s |\n| dnsdist.servermem | memory_usage | MiB |\n| dnsdist.query_latency | 1ms, 10ms, 50ms, 100ms, 1sec, slow | queries/s |\n| dnsdist.query_latency_avg | 100, 1k, 10k, 1000k | microseconds |\n\n", @@ -3803,7 +3835,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Dnsmasq\n\nPlugin: go.d.plugin\nModule: dnsmasq\n\n## Overview\n\nThis collector monitors Dnsmasq servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in `ip:port` format. | 127.0.0.1:53 | yes |\n| protocol | DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls. | udp | no |\n| timeout | DNS query timeout (dial, write and read) in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n```\n{% /details %}\n##### Using TCP protocol\n\nLocal server with specific DNS query transport protocol.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n protocol: tcp\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n - name: remote\n address: 203.0.113.0:53\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in `ip:port` format. | 127.0.0.1:53 | yes |\n| protocol | DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls. | udp | no |\n| timeout | DNS query timeout (dial, write and read) in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n```\n{% /details %}\n##### Using TCP protocol\n\nLocal server with specific DNS query transport protocol.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n protocol: tcp\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n - name: remote\n address: 203.0.113.0:53\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dnsmasq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dnsmasq` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dnsmasq\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dnsmasq /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dnsmasq\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq instance\n\nThe metrics apply to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq.servers_queries | success, failed | queries/s |\n| dnsmasq.cache_performance | hist, misses | events/s |\n| dnsmasq.cache_operations | insertions, evictions | operations/s |\n| dnsmasq.cache_size | size | entries |\n\n", @@ -3839,8 +3871,8 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# Dnsmasq DHCP\n\nPlugin: go.d.plugin\nModule: dnsmasq_dhcp\n\n## Overview\n\nThis collector monitors Dnsmasq DHCP leases databases, depending on your configuration.\n\nBy default, it uses:\n\n- `/var/lib/misc/dnsmasq.leases` to read leases.\n- `/etc/dnsmasq.conf` to detect dhcp-ranges.\n- `/etc/dnsmasq.d` to find additional configurations.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAll configured dhcp-ranges are detected automatically\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq_dhcp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to dnsmasq DHCP leases file. | /var/lib/misc/dnsmasq.leases | no |\n| conf_path | Path to dnsmasq configuration file. | /etc/dnsmasq.conf | no |\n| conf_dir | Path to dnsmasq configuration directory. | /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /var/lib/misc/dnsmasq.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n{% /details %}\n##### Pi-hole\n\nDnsmasq DHCP on Pi-hole.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /etc/pihole/dhcp.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n{% /details %}\n", + "overview": "# Dnsmasq DHCP\n\nPlugin: go.d.plugin\nModule: dnsmasq_dhcp\n\n## Overview\n\nThis collector monitors Dnsmasq DHCP leases databases, depending on your configuration.\n\nBy default, it uses:\n\n- `/var/lib/misc/dnsmasq.leases` to read leases.\n- `/etc/dnsmasq.conf` to detect dhcp-ranges.\n- `/etc/dnsmasq.d` to find additional configurations.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAll configured dhcp-ranges are detected automatically\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq_dhcp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to dnsmasq DHCP leases file. | /var/lib/misc/dnsmasq.leases | no |\n| conf_path | Path to dnsmasq configuration file. | /etc/dnsmasq.conf | no |\n| conf_dir | Path to dnsmasq configuration directory. | /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /var/lib/misc/dnsmasq.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n{% /details %}\n##### Pi-hole\n\nDnsmasq DHCP on Pi-hole.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /etc/pihole/dhcp.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dnsmasq_dhcp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq_dhcp\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dnsmasq_dhcp` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dnsmasq_dhcp\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dnsmasq_dhcp /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dnsmasq_dhcp\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dnsmasq_dhcp_dhcp_range_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dnsmasq_dhcp.conf) | dnsmasq_dhcp.dhcp_range_utilization | DHCP range utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_ranges | ipv4, ipv6 | ranges |\n| dnsmasq_dhcp.dhcp_hosts | ipv4, ipv6 | hosts |\n\n### Per dhcp range\n\nThese metrics refer to the DHCP range.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dhcp_range | DHCP range in `START_IP:END_IP` format |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_range_utilization | used | percentage |\n| dnsmasq_dhcp.dhcp_range_allocated_leases | allocated | leases |\n\n", @@ -3877,7 +3909,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Docker\n\nPlugin: go.d.plugin\nModule: docker\n\n## Overview\n\nThis collector monitors Docker containers state, health status and more.\n\n\nIt connects to the Docker instance via a TCP or UNIX socket and executes the following commands:\n\n- [System info](https://docs.docker.com/engine/api/v1.43/#tag/System/operation/SystemInfo).\n- [List images](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageList).\n- [List containers](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nRequires netdata user to be in the docker group.\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker UNIX socket: `/var/run/docker.sock`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nEnabling `collect_container_size` may result in high CPU usage depending on the version of Docker Engine.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Docker daemon's listening address. When using a TCP socket, the format is: tcp://[ip]:[port] | unix:///var/run/docker.sock | yes |\n| timeout | Request timeout in seconds. | 2 | no |\n| collect_container_size | Whether to collect container writable layer size. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n - name: remote\n address: 'tcp://203.0.113.10:2375'\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Docker daemon's listening address. When using a TCP socket, the format is: tcp://[ip]:[port] | unix:///var/run/docker.sock | yes |\n| timeout | Request timeout in seconds. | 2 | no |\n| collect_container_size | Whether to collect container writable layer size. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n - name: remote\n address: 'tcp://203.0.113.10:2375'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `docker` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `docker` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep docker\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep docker /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep docker\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ docker_container_unhealthy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/docker.conf) | docker.container_health_status | ${label:container_name} docker container health status is unhealthy |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.containers_state | running, paused, stopped | containers |\n| docker.containers_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | containers |\n| docker.images | active, dangling | images |\n| docker.images_size | size | bytes |\n\n### Per container\n\nMetrics related to containers. Each container provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container's name |\n| image | The image name the container uses |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.container_state | running, paused, exited, created, restarting, removing, dead | state |\n| docker.container_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | status |\n| docker.container_writeable_layer_size | writeable_layer | size |\n\n", @@ -3915,7 +3947,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Docker Engine\n\nPlugin: go.d.plugin\nModule: docker_engine\n\n## Overview\n\nThis collector monitors the activity and health of Docker Engine and Docker Swarm.\n\n\nThe [built-in](https://docs.docker.com/config/daemon/prometheus/) Prometheus exporter is used to get the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker TCP socket: `http://127.0.0.1:9323/metrics`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker_engine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker_engine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9323/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nConfiguration with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n - name: remote\n url: http://192.0.2.1:9323/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker_engine.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker_engine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9323/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nConfiguration with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n - name: remote\n url: http://192.0.2.1:9323/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `docker_engine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker_engine\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `docker_engine` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep docker_engine\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep docker_engine /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep docker_engine\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Engine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker_engine.engine_daemon_container_actions | changes, commit, create, delete, start | actions/s |\n| docker_engine.engine_daemon_container_states_containers | running, paused, stopped | containers |\n| docker_engine.builder_builds_failed_total | build_canceled, build_target_not_reachable_error, command_not_supported_error, dockerfile_empty_error, dockerfile_syntax_error, error_processing_commands_error, missing_onbuild_arguments_error, unknown_instruction_error | fails/s |\n| docker_engine.engine_daemon_health_checks_failed_total | fails | events/s |\n| docker_engine.swarm_manager_leader | is_leader | bool |\n| docker_engine.swarm_manager_object_store | nodes, services, tasks, networks, secrets, configs | objects |\n| docker_engine.swarm_manager_nodes_per_state | ready, down, unknown, disconnected | nodes |\n| docker_engine.swarm_manager_tasks_per_state | running, failed, ready, rejected, starting, shutdown, new, orphaned, preparing, pending, complete, remove, accepted, assigned | tasks |\n\n", @@ -3951,7 +3983,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Docker Hub repository\n\nPlugin: go.d.plugin\nModule: dockerhub\n\n## Overview\n\nThis collector keeps track of DockerHub repositories statistics such as the number of stars, pulls, current status, and more.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dockerhub.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dockerhub.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | DockerHub URL. | https://hub.docker.com/v2/repositories | yes |\n| repositories | List of repositories to monitor. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: dockerhub\n repositories:\n - 'user1/name1'\n - 'user2/name2'\n - 'user3/name3'\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dockerhub.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dockerhub.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | DockerHub URL. | https://hub.docker.com/v2/repositories | yes |\n| repositories | List of repositories to monitor. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: dockerhub\n repositories:\n - 'user1/name1'\n - 'user2/name2'\n - 'user3/name3'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dockerhub` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dockerhub\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dockerhub` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dockerhub\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dockerhub /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dockerhub\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Hub repository instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dockerhub.pulls_sum | sum | pulls |\n| dockerhub.pulls | a dimension per repository | pulls |\n| dockerhub.pulls_rate | a dimension per repository | pulls/s |\n| dockerhub.stars | a dimension per repository | stars |\n| dockerhub.status | a dimension per repository | status |\n| dockerhub.last_updated | a dimension per repository | seconds |\n\n", @@ -3989,7 +4021,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Dovecot\n\nPlugin: go.d.plugin\nModule: dovecot\n\n## Overview\n\nThis collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.\n\n\nIt reads the server's response to the `EXPORT\\tglobal\\n` command.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically discovers and collects Dovecot statistics from the following default locations:\n\n- localhost:24242\n- unix:///var/run/dovecot/old-stats\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable old_stats plugin\n\nTo enable `old_stats` plugin, see [Old Statistics](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dovecot.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dovecot.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The Unix or TCP socket address where the Dovecot [old_stats](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics) plugin listens for connections. | 127.0.0.1:24242 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic (TCP)\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:24242\n\n```\n{% /details %}\n##### Basic (UNIX)\n\nA basic example configuration using a UNIX socket.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: unix:///var/run/dovecot/old-stats\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:24242\n\n - name: remote\n address: 203.0.113.0:24242\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable old_stats plugin\n\nTo enable `old_stats` plugin, see [Old Statistics](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dovecot.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dovecot.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The Unix or TCP socket address where the Dovecot [old_stats](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics) plugin listens for connections. | 127.0.0.1:24242 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic (TCP)\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:24242\n\n```\n{% /details %}\n##### Basic (UNIX)\n\nA basic example configuration using a UNIX socket.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: unix:///var/run/dovecot/old-stats\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:24242\n\n - name: remote\n address: 203.0.113.0:24242\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dovecot` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dovecot\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dovecot` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dovecot\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dovecot /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dovecot\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dovecot instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dovecot.session | active | sessions |\n| dovecot.logins | logins | logins |\n| dovecot.auth | ok, failed | attempts/s |\n| dovecot.commands | commands | commands |\n| dovecot.context_switches | voluntary, voluntary | switches/s |\n| dovecot.io | read, write | KiB/s |\n| dovecot.net | read, write | kilobits/s |\n| dovecot.syscalls | read, write | syscalls/s |\n| dovecot.lookup | path, attr | lookups/s |\n| dovecot.cache | hits | hits/s |\n| dovecot.auth_cache | hits, misses | requests/s |\n\n", @@ -4037,7 +4069,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Elasticsearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `elasticsearch` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep elasticsearch\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep elasticsearch /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep elasticsearch\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n", @@ -4085,7 +4117,7 @@ export const integrations = [ "most_popular": true }, "overview": "# OpenSearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `elasticsearch` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep elasticsearch\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep elasticsearch /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep elasticsearch\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n", @@ -4127,7 +4159,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Envoy\n\nPlugin: go.d.plugin\nModule: envoy\n\n## Overview\n\nThis collector monitors Envoy proxies. It collects server, cluster, and listener metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Envoy instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/envoy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/envoy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9091/stats/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9901/stats/prometheus\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n - name: remote\n url: http://192.0.2.1:9901/stats/prometheus\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/envoy.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/envoy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9091/stats/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9901/stats/prometheus\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n - name: remote\n url: http://192.0.2.1:9901/stats/prometheus\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `envoy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m envoy\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `envoy` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep envoy\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep envoy /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep envoy\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Envoy instance\n\nEnvoy exposes metrics in Prometheus format. All metric labels are added to charts.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| envoy.server_state | live, draining, pre_initializing, initializing | state |\n| envoy.server_connections_count | connections | connections |\n| envoy.server_parent_connections_count | connections | connections |\n| envoy.server_memory_allocated_size | allocated | bytes |\n| envoy.server_memory_heap_size | heap | bytes |\n| envoy.server_memory_physical_size | physical | bytes |\n| envoy.server_uptime | uptime | seconds |\n| envoy.cluster_manager_cluster_count | active, not_active | clusters |\n| envoy.cluster_manager_cluster_changes_rate | added, modified, removed | clusters/s |\n| envoy.cluster_manager_cluster_updates_rate | cluster | updates/s |\n| envoy.cluster_manager_cluster_updated_via_merge_rate | via_merge | updates/s |\n| envoy.cluster_manager_update_merge_cancelled_rate | merge_cancelled | updates/s |\n| envoy.cluster_manager_update_out_of_merge_window_rate | out_of_merge_window | updates/s |\n| envoy.cluster_membership_endpoints_count | healthy, degraded, excluded | endpoints |\n| envoy.cluster_membership_changes_rate | membership | changes/s |\n| envoy.cluster_membership_updates_rate | success, failure, empty, no_rebuild | updates/s |\n| envoy.cluster_upstream_cx_active_count | active | connections |\n| envoy.cluster_upstream_cx_rate | created | connections/s |\n| envoy.cluster_upstream_cx_http_rate | http1, http2, http3 | connections/s |\n| envoy.cluster_upstream_cx_destroy_rate | local, remote | connections/s |\n| envoy.cluster_upstream_cx_connect_fail_rate | failed | connections/s |\n| envoy.cluster_upstream_cx_connect_timeout_rate | timeout | connections/s |\n| envoy.cluster_upstream_cx_bytes_rate | received, sent | bytes/s |\n| envoy.cluster_upstream_cx_bytes_buffered_size | received, send | bytes |\n| envoy.cluster_upstream_rq_active_count | active | requests |\n| envoy.cluster_upstream_rq_rate | requests | requests/s |\n| envoy.cluster_upstream_rq_failed_rate | cancelled, maintenance_mode, timeout, max_duration_reached, per_try_timeout, reset_local, reset_remote | requests/s |\n| envoy.cluster_upstream_rq_pending_active_count | active_pending | requests |\n| envoy.cluster_upstream_rq_pending_rate | pending | requests/s |\n| envoy.cluster_upstream_rq_pending_failed_rate | overflow, failure_eject | requests/s |\n| envoy.cluster_upstream_rq_retry_rate | request | retries/s |\n| envoy.cluster_upstream_rq_retry_success_rate | success | retries/s |\n| envoy.cluster_upstream_rq_retry_backoff_rate | exponential, ratelimited | retries/s |\n| envoy.listener_manager_listeners_count | active, warming, draining | listeners |\n| envoy.listener_manager_listener_changes_rate | added, modified, removed, stopped | listeners/s |\n| envoy.listener_manager_listener_object_events_rate | create_success, create_failure, in_place_updated | objects/s |\n| envoy.listener_admin_downstream_cx_active_count | active | connections |\n| envoy.listener_admin_downstream_cx_rate | created | connections/s |\n| envoy.listener_admin_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_admin_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_admin_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_admin_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_admin_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_admin_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n| envoy.listener_downstream_cx_active_count | active | connections |\n| envoy.listener_downstream_cx_rate | created | connections/s |\n| envoy.listener_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n\n", @@ -4165,7 +4197,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Exim\n\nPlugin: go.d.plugin\nModule: exim\n\n## Overview\n\nThis collector monitors Exim mail queue. It relies on the [`exim`](https://www.exim.org/exim-html-3.20/doc/html/spec_5.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\nExecuted commands:\n- `exim -bpc`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/exim.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/exim.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | exim binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: exim\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/exim.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/exim.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | exim binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: exim\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `exim` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m exim\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `exim` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep exim\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep exim /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep exim\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Exim instance\n\nThese metrics refer to the the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exim.qemails | emails | emails |\n\n", @@ -4203,8 +4235,8 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# Fail2ban\n\nPlugin: go.d.plugin\nModule: fail2ban\n\n## Overview\n\nThis collector tracks two main metrics for each jail: currently banned IPs and active failure incidents. It relies on the [`fail2ban-client`](https://linux.die.net/man/1/fail2ban-client) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### For Netdata running in a Docker container\n\n1. **Install Fail2ban client**.\n\n Ensure `fail2ban-client` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=fail2ban` when starting the container.\n\n2. **Mount host's `/var/run` directory**.\n\n Mount the host machine's `/var/run` directory to `/host/var/run` inside your Netdata container. This grants Netdata access to the Fail2ban socket file, typically located at `/var/run/fail2ban/fail2ban.sock`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fail2ban.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fail2ban.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | fail2ban-client binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: fail2ban\n update_every: 5 # Collect Fail2Ban jails statistics every 5 seconds\n\n```\n{% /details %}\n", + "overview": "# Fail2ban\n\nPlugin: go.d.plugin\nModule: fail2ban\n\n## Overview\n\nThis collector tracks two main metrics for each jail: currently banned IPs and active failure incidents. It relies on the [`fail2ban-client`](https://linux.die.net/man/1/fail2ban-client) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### For Netdata running in a Docker container\n\n1. **Install Fail2ban client**.\n\n Ensure `fail2ban-client` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=fail2ban` when starting the container.\n\n2. **Mount host's `/var/run` directory**.\n\n Mount the host machine's `/var/run` directory to `/host/var/run` inside your Netdata container. This grants Netdata access to the Fail2ban socket file, typically located at `/var/run/fail2ban/fail2ban.sock`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fail2ban.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fail2ban.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | fail2ban-client binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: fail2ban\n update_every: 5 # Collect Fail2Ban jails statistics every 5 seconds\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `fail2ban` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m fail2ban\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `fail2ban` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep fail2ban\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep fail2ban /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep fail2ban\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per jail\n\nThese metrics refer to the Jail.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| jail | Jail's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fail2ban.jail_banned_ips | banned | addresses |\n| fail2ban.jail_active_failures | active_failures | failures |\n\n", @@ -4241,7 +4273,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Files and directories\n\nPlugin: go.d.plugin\nModule: filecheck\n\n## Overview\n\nThis collector monitors the existence, last modification time, and size of arbitrary files and directories on the system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the DAC_READ_SEARCH capability when monitoring files not normally accessible to the Netdata user, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/filecheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/filecheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| files | List of files to monitor. | | yes |\n| dirs | List of directories to monitor. | | yes |\n| discovery_every | Files and directories discovery interval. | 60 | no |\n\n##### files\n\nFiles matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nfiles:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### dirs\n\nDirectories matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\ndirs:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Files\n\nFiles monitoring example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: files_example\n files:\n include:\n - '/path/to/file1'\n - '/path/to/file2'\n - '/path/to/*.log'\n\n```\n{% /details %}\n##### Directories\n\nDirectories monitoring example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: files_example\n dirs:\n collect_dir_size: no\n include:\n - '/path/to/dir1'\n - '/path/to/dir2'\n - '/path/to/dir3*'\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/filecheck.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/filecheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| files | List of files to monitor. | | yes |\n| dirs | List of directories to monitor. | | yes |\n| discovery_every | Files and directories discovery interval. | 60 | no |\n\n##### files\n\nFiles matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nfiles:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### dirs\n\nDirectories matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\ndirs:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Files\n\nFiles monitoring example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: files_example\n files:\n include:\n - '/path/to/file1'\n - '/path/to/file2'\n - '/path/to/*.log'\n\n```\n{% /details %}\n##### Directories\n\nDirectories monitoring example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: files_example\n dirs:\n collect_dir_size: no\n include:\n - '/path/to/dir1'\n - '/path/to/dir2'\n - '/path/to/dir3*'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `filecheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m filecheck\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `filecheck` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep filecheck\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep filecheck /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep filecheck\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per file\n\nThese metrics refer to the File.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| file_path | File absolute path |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filecheck.file_existence_status | exist, not_exist | status |\n| filecheck.file_modification_time_ago | mtime_ago | seconds |\n| filecheck.file_size_bytes | size | bytes |\n\n### Per directory\n\nThese metrics refer to the Directory.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dir_path | Directory absolute path |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filecheck.dir_existence_status | exist, not_exist | status |\n| filecheck.dir_modification_time_ago | mtime_ago | seconds |\n| filecheck.dir_size_bytes | size | bytes |\n| filecheck.dir_files count | files | files |\n\n", @@ -4278,7 +4310,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Fluentd\n\nPlugin: go.d.plugin\nModule: fluentd\n\n## Overview\n\nThis collector monitors Fluentd servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable monitor agent\n\nTo enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fluentd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fluentd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:24220 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nFluentd with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:24220\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n - name: remote\n url: http://192.0.2.1:24220\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable monitor agent\n\nTo enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fluentd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fluentd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:24220 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nFluentd with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:24220\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n - name: remote\n url: http://192.0.2.1:24220\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `fluentd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m fluentd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `fluentd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep fluentd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep fluentd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep fluentd\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Fluentd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fluentd.retry_count | a dimension per plugin | count |\n| fluentd.buffer_queue_length | a dimension per plugin | queue_length |\n| fluentd.buffer_total_queued_size | a dimension per plugin | queued_size |\n\n", @@ -4315,7 +4347,7 @@ export const integrations = [ } }, "overview": "# FreeRADIUS\n\nPlugin: go.d.plugin\nModule: freeradius\n\n## Overview\n\nThis collector monitors FreeRADIUS servers.\n\nIt collect metrics by sending [status-server](https://wiki.freeradius.org/config/Status) messages to the server.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects FreeRadius instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status server\n\nTo enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/freeradius.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/freeradius.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. | 127.0.0.1 | yes |\n| port | Server port. | 18121 | no |\n| secret | FreeRADIUS secret. | adminsecret | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n - name: remote\n address: 192.0.2.1\n port: 18121\n secert: adminsecret\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status server\n\nTo enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/freeradius.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/freeradius.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. | 127.0.0.1 | yes |\n| port | Server port. | 18121 | no |\n| secret | FreeRADIUS secret. | adminsecret | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n - name: remote\n address: 192.0.2.1\n port: 18121\n secert: adminsecret\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `freeradius` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m freeradius\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `freeradius` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep freeradius\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep freeradius /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep freeradius\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per FreeRADIUS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| freeradius.authentication | requests, responses | packets/s |\n| freeradius.authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_authentication | requests, responses | packets/s |\n| freeradius.proxy_authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.proxy_bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.accounting | requests, responses | packets/s |\n| freeradius.bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_accounting | requests, responses | packets/s |\n| freeradius.proxy_bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n\n", @@ -4351,7 +4383,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Gearman\n\nPlugin: go.d.plugin\nModule: gearman\n\n## Overview\n\nMonitors jobs activity, priority and available workers. It collects summary and function-specific statistics.\n\n\nThis collector connects to a Gearman instance via TCP socket and executes the following commands:\n\n- status\n- priority-status\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Gearman instances running on localhost that are listening on port 4730.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/gearman.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/gearman.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the Gearman service listens for connections. | 127.0.0.1:11211 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:4730\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:4730\n\n - name: remote\n address: 203.0.113.0:4730\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/gearman.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/gearman.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the Gearman service listens for connections. | 127.0.0.1:11211 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:4730\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:4730\n\n - name: remote\n address: 203.0.113.0:4730\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `gearman` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m gearman\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `gearman` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep gearman\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep gearman /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep gearman\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Gearman instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.queued_jobs_activity | running, waiting | jobs |\n| gearman.queued_jobs_priority | high, normal, low | jobs |\n\n### Per Gearman instance\n\nThese metrics refer to the Function (task).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| function_name | Function name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.function_queued_jobs_activity | running, waiting | jobs |\n| gearman.function_queued_jobs_priority | high, normal, low | jobs |\n| gearman.function_workers | available | workers |\n\n", @@ -4394,7 +4426,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Go-ethereum\n\nPlugin: go.d.plugin\nModule: geth\n\n## Overview\n\nThis collector monitors Go-ethereum instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Go-ethereum instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/geth.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/geth.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:6060/debug/metrics/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n - name: remote\n url: http://192.0.2.1:6060/debug/metrics/prometheus\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/geth.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/geth.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:6060/debug/metrics/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n - name: remote\n url: http://192.0.2.1:6060/debug/metrics/prometheus\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `geth` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m geth\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `geth` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep geth\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep geth /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep geth\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go-ethereum instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| geth.eth_db_chaindata_ancient_io_rate | reads, writes | bytes/s |\n| geth.eth_db_chaindata_ancient_io | reads, writes | bytes |\n| geth.eth_db_chaindata_disk_io | reads, writes | bytes |\n| geth.goroutines | goroutines | goroutines |\n| geth.eth_db_chaindata_disk_io_rate | reads, writes | bytes/s |\n| geth.chaindata_db_size | level_db, ancient_db | bytes |\n| geth.chainhead | block, receipt, header | block |\n| geth.tx_pool_pending | invalid, pending, local, discard, no_funds, ratelimit, replace | transactions |\n| geth.tx_pool_current | invalid, pending, local, pool | transactions |\n| geth.tx_pool_queued | discard, eviction, no_funds, ratelimit | transactions |\n| geth.p2p_bandwidth | ingress, egress | bytes/s |\n| geth.reorgs | executed | reorgs |\n| geth.reorgs_blocks | added, dropped | blocks |\n| geth.p2p_peers | peers | peers |\n| geth.p2p_peers_calls | dials, serves | calls/s |\n| geth.rpc_calls | failed, successful | calls/s |\n\n", @@ -4434,7 +4466,7 @@ export const integrations = [ "most_popular": false }, "overview": "# HAProxy\n\nPlugin: go.d.plugin\nModule: haproxy\n\n## Overview\n\nThis collector monitors HAProxy servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable PROMEX addon.\n\nTo enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/haproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/haproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8404/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n - name: remote\n url: http://192.0.2.1:8404/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable PROMEX addon.\n\nTo enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/haproxy.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/haproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8404/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n - name: remote\n url: http://192.0.2.1:8404/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `haproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m haproxy\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `haproxy` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep haproxy\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep haproxy /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep haproxy\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HAProxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_current_sessions | a dimension per proxy | sessions |\n| haproxy.backend_sessions | a dimension per proxy | sessions/s |\n| haproxy.backend_response_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_queue_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_current_queue | a dimension per proxy | requests |\n\n### Per proxy\n\nThese metrics refer to the Proxy.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_http_responses | 1xx, 2xx, 3xx, 4xx, 5xx, other | responses/s |\n| haproxy.backend_network_io | in, out | bytes/s |\n\n", @@ -4473,7 +4505,7 @@ export const integrations = [ "most_popular": false }, "overview": "# HDD temperature\n\nPlugin: go.d.plugin\nModule: hddtemp\n\n## Overview\n\nThis collector monitors disk temperatures.\n\n\nIt retrieves temperature data for attached disks by querying the hddtemp daemon at regular intervals.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install hddtemp\n\nInstall `hddtemp` using your distribution's package manager.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hddtemp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hddtemp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the hddtemp daemon listens for connections. | 127.0.0.1:7634 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7634\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7634\n\n - name: remote\n address: 203.0.113.0:7634\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install hddtemp\n\nInstall `hddtemp` using your distribution's package manager.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hddtemp.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hddtemp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the hddtemp daemon listens for connections. | 127.0.0.1:7634 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7634\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7634\n\n - name: remote\n address: 203.0.113.0:7634\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `hddtemp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hddtemp\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `hddtemp` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep hddtemp\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep hddtemp /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep hddtemp\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\nThese metrics refer to the Disk.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk_id | Disk identifier. It is derived from the device path (e.g. sda or ata-HUP722020APA330_BFJ0WS3F) |\n| model | Disk model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hddtemp.disk_temperature | temperature | Celsius |\n| hddtemp.disk_temperature_sensor_status | ok, err, na, unk, nos, slp | status |\n\n", @@ -4510,7 +4542,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Hadoop Distributed File System (HDFS)\n\nPlugin: go.d.plugin\nModule: hfs\n\n## Overview\n\nThis collector monitors HDFS nodes.\n\nNetdata accesses HDFS metrics over `Java Management Extensions` (JMX) through the web interface of an HDFS daemon.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hdfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hdfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9870/jmx | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9870/jmx\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n - name: remote\n url: http://192.0.2.1:9870/jmx\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hdfs.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hdfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9870/jmx | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9870/jmx\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n - name: remote\n url: http://192.0.2.1:9870/jmx\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `hfs` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hfs\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `hfs` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep hfs\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep hfs /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep hfs\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ hdfs_capacity_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.capacity | summary datanodes space capacity utilization |\n| [ hdfs_missing_blocks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.blocks | number of missing blocks |\n| [ hdfs_stale_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes marked stale due to delayed heartbeat |\n| [ hdfs_dead_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes which are currently dead |\n| [ hdfs_num_failed_volumes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.num_failed_volumes | number of failed volumes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Hadoop Distributed File System (HDFS) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | DataNode | NameNode |\n|:------|:----------|:----|:---:|:---:|\n| hdfs.heap_memory | committed, used | MiB | \u2022 | \u2022 |\n| hdfs.gc_count_total | gc | events/s | \u2022 | \u2022 |\n| hdfs.gc_time_total | ms | ms | \u2022 | \u2022 |\n| hdfs.gc_threshold | info, warn | events/s | \u2022 | \u2022 |\n| hdfs.threads | new, runnable, blocked, waiting, timed_waiting, terminated | num | \u2022 | \u2022 |\n| hdfs.logs_total | info, error, warn, fatal | logs/s | \u2022 | \u2022 |\n| hdfs.rpc_bandwidth | received, sent | kilobits/s | \u2022 | \u2022 |\n| hdfs.rpc_calls | calls | calls/s | \u2022 | \u2022 |\n| hdfs.open_connections | open | connections | \u2022 | \u2022 |\n| hdfs.call_queue_length | length | num | \u2022 | \u2022 |\n| hdfs.avg_queue_time | time | ms | \u2022 | \u2022 |\n| hdfs.avg_processing_time | time | ms | \u2022 | \u2022 |\n| hdfs.capacity | remaining, used | KiB | | \u2022 |\n| hdfs.used_capacity | dfs, non_dfs | KiB | | \u2022 |\n| hdfs.load | load | load | | \u2022 |\n| hdfs.volume_failures_total | failures | events/s | | \u2022 |\n| hdfs.files_total | files | num | | \u2022 |\n| hdfs.blocks_total | blocks | num | | \u2022 |\n| hdfs.blocks | corrupt, missing, under_replicated | num | | \u2022 |\n| hdfs.data_nodes | live, dead, stale | num | | \u2022 |\n| hdfs.datanode_capacity | remaining, used | KiB | \u2022 | |\n| hdfs.datanode_used_capacity | dfs, non_dfs | KiB | \u2022 | |\n| hdfs.datanode_failed_volumes | failed volumes | num | \u2022 | |\n| hdfs.datanode_bandwidth | reads, writes | KiB/s | \u2022 | |\n\n", @@ -4550,7 +4582,7 @@ export const integrations = [ "most_popular": false }, "overview": "# HPE Smart Arrays\n\nPlugin: go.d.plugin\nModule: hpssa\n\n## Overview\n\nMonitors the health of HPE Smart Arrays by tracking the status of controllers, arrays, logical and physical drives in your storage system.\nIt relies on the `ssacli` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `ssacli ctrl all show config detail`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install ssacli\n\nSee [official installation instructions](https://support.hpe.com/connect/s/softwaredetails?language=en_US&collectionId=MTX-0cb3f808e2514d3d).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ssacli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ssacli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | ssacli binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: hpssa\n update_every: 5 # Collect HPE Smart Array statistics every 5 seconds\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install ssacli\n\nSee [official installation instructions](https://support.hpe.com/connect/s/softwaredetails?language=en_US&collectionId=MTX-0cb3f808e2514d3d).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ssacli.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ssacli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | ssacli binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: hpssa\n update_every: 5 # Collect HPE Smart Array statistics every 5 seconds\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `hpssa` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hpssa\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `hpssa` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep hpssa\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep hpssa /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep hpssa\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | Slot number |\n| model | Controller model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.controller_status | ok, nok | status |\n| hpssa.controller_temperature | temperature | Celsius |\n| hpssa.controller_cache_module_presence_status | present, not_present | status |\n| hpssa.controller_cache_module_status | ok, nok | status |\n| hpssa.controller_cache_module_temperature | temperature | Celsius |\n| hpssa.controller_cache_module_battery_status | ok, nok | status |\n\n### Per array\n\nThese metrics refer to the Array.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | Slot number |\n| array_id | Array id |\n| interface_type | Array interface type (e.g. SATA) |\n| array_type | Array type (e.g. Data) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.array_status | ok, nok | status |\n\n### Per logical drive\n\nThese metrics refer to the Logical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | Slot number |\n| array_id | Array id |\n| logical_drive_id | Logical Drive id (number) |\n| disk_name | Disk name (e.g. /dev/sda) |\n| drive_type | Drive type (e.g. Data) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.logical_drive_status | ok, nok | status |\n\n### Per physical drive\n\nThese metrics refer to the Physical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | Slot number |\n| array_id | Array id or \"na\" if unassigned |\n| logical_drive_id | Logical Drive id or \"na\" if unassigned |\n| location | Drive location in port:box:bay format (e.g. 1I:1:1) |\n| interface_type | Drive interface type (e.g. SATA) |\n| drive_type | Drive type (e.g. Data Drive, Unassigned Drive) |\n| model | Drive model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.physical_drive_status | ok, nok | status |\n| hpssa.physical_drive_temperature | temperature | status |\n\n", @@ -4586,7 +4618,7 @@ export const integrations = [ "most_popular": true }, "overview": "# HTTP Endpoints\n\nPlugin: go.d.plugin\nModule: httpcheck\n\n## Overview\n\nThis collector monitors HTTP servers availability status and response time.\n\nPossible statuses:\n\n| Status | Description |\n|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| success | HTTP request completed successfully with a status code matching the configured `status_accepted` range (default: 200), and the response body and headers (if configured) match expectations. |\n| timeout | HTTP request timed out before receiving a response (default: 1 second). |\n| no_connection | Failed to establish a connection to the target. |\n| redirect | Received a redirect response (3xx status code) while `not_follow_redirects` is configured. |\n| bad_status | HTTP request completed with a status code outside the configured `status_accepted` range (default: non-200). |\n| bad_content | HTTP request completed successfully but the response body does not match the expected content (when using `response_match`). |\n| bad_header | HTTP request completed successfully but response headers do not match the expected values (when using `headers_match`). |\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/httpcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/httpcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| status_accepted | HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart. | [200] | no |\n| response_match | If the status code is accepted, the content of the response will be matched against this regular expression. | | no |\n| headers_match | This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response. | [] | no |\n| headers_match.exclude | This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it. | no | no |\n| headers_match.key | The exact name of the HTTP header to check for. | | yes |\n| headers_match.value | The [pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format) to match against the value of the specified header. | | no |\n| cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n```\n{% /details %}\n##### With HTTP request headers\n\nConfiguration with HTTP request headers that will be sent by the client.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n headers:\n Host: localhost:8080\n User-Agent: netdata/go.d.plugin\n Accept: */*\n\n```\n{% /details %}\n##### With `status_accepted`\n\nA basic example configuration with non-default status_accepted.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n status_accepted:\n - 200\n - 204\n\n```\n{% /details %}\n##### With `header_match`\n\nExample configurations with `header_match`. See the value [pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format) syntax.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n # The \"X-Robots-Tag\" header must be present in the HTTP response header,\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n\n # The \"X-Robots-Tag\" header must be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n value: '= noindex,nofollow'\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n value: '= noindex,nofollow'\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n - name: remote\n url: http://192.0.2.1:8080\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/httpcheck.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/httpcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| status_accepted | HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart. | [200] | no |\n| response_match | If the status code is accepted, the content of the response will be matched against this regular expression. | | no |\n| headers_match | This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response. | [] | no |\n| headers_match.exclude | This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it. | no | no |\n| headers_match.key | The exact name of the HTTP header to check for. | | yes |\n| headers_match.value | The [pattern](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format) to match against the value of the specified header. | | no |\n| cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n```\n{% /details %}\n##### With HTTP request headers\n\nConfiguration with HTTP request headers that will be sent by the client.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n headers:\n Host: localhost:8080\n User-Agent: netdata/go.d.plugin\n Accept: */*\n\n```\n{% /details %}\n##### With `status_accepted`\n\nA basic example configuration with non-default status_accepted.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n status_accepted:\n - 200\n - 204\n\n```\n{% /details %}\n##### With `header_match`\n\nExample configurations with `header_match`. See the value [pattern](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format) syntax.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n # The \"X-Robots-Tag\" header must be present in the HTTP response header,\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n\n # The \"X-Robots-Tag\" header must be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n value: '= noindex,nofollow'\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n value: '= noindex,nofollow'\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n - name: remote\n url: http://192.0.2.1:8080\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `httpcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m httpcheck\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `httpcheck` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep httpcheck\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep httpcheck /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep httpcheck\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per target\n\nThe metrics refer to the monitored target.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| url | url value that is set in the configuration file. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| httpcheck.response_time | time | ms |\n| httpcheck.response_length | length | characters |\n| httpcheck.status | success, timeout, redirect, no_connection, bad_content, bad_header, bad_status | boolean |\n| httpcheck.in_state | time | boolean |\n\n", @@ -4623,7 +4655,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Icecast\n\nPlugin: go.d.plugin\nModule: icecast\n\n## Overview\n\nThis collector monitors Icecast listener counts.\n\nIt uses the Icecast server statistics `status-json.xsl` endpoint to retrieve the metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Icecast instances running on localhost that are listening on port 8000.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Icecast minimum version\n\nNeeds at least Icecast version >= 2.4.0\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/icecast.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/icecast.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8000 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n\n - name: remote\n url: http://192.0.2.1:8000\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Icecast minimum version\n\nNeeds at least Icecast version >= 2.4.0\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/icecast.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/icecast.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8000 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n\n - name: remote\n url: http://192.0.2.1:8000\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `icecast` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m icecast\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `icecast` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep icecast\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep icecast /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep icecast\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Icecast source\n\nThese metrics refer to an icecast source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| source | Source name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| icecast.listeners | listeners | listeners |\n\n", @@ -4661,7 +4693,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Intel GPU\n\nPlugin: go.d.plugin\nModule: intelgpu\n\n## Overview\n\nThis collector gathers performance metrics for Intel integrated GPUs.\nIt relies on the [`intel_gpu_top`](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to grant the CAP_PERFMON capability to `intel_gpu_top`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install intel-gpu-tools\n\nInstall `intel-gpu-tools` using your distribution's package manager.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/intelgpu.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/intelgpu.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| device | Select a specific GPU using [supported filter](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html#DESCRIPTION). | | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: intelgpu\n update_every: 5 # Collect Intel iGPU metrics every 5 seconds\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install intel-gpu-tools\n\nInstall `intel-gpu-tools` using your distribution's package manager.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/intelgpu.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/intelgpu.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| device | Select a specific GPU using [supported filter](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html#DESCRIPTION). | | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: intelgpu\n update_every: 5 # Collect Intel iGPU metrics every 5 seconds\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `intelgpu` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m intelgpu\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `intelgpu` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep intelgpu\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep intelgpu /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep intelgpu\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Intel GPU instance\n\nThese metrics refer to the Intel GPU.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| intelgpu.frequency | frequency | MHz |\n| intelgpu.power | gpu, package | Watts |\n\n### Per engine\n\nThese metrics refer to the GPU hardware engine.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| engine_class | Engine class (Render/3D, Blitter, VideoEnhance, Video, Compute). |\n| engine_instance | Engine instance (e.g. Render/3D/0, Video/0, Video/1). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| intelgpu.engine_busy_perc | busy | percentage |\n\n", @@ -4698,7 +4730,7 @@ export const integrations = [ "most_popular": false }, "overview": "# IPFS\n\nPlugin: go.d.plugin\nModule: ipfs\n\n## Overview\n\nThis collector monitors IPFS daemon health and network activity.\n\nIt uses [RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) to collect metrics.\n\nUsed endpoints:\n\n- [/api/v0/stats/bw](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-stats-bw)\n- [/api/v0/swarm/peers](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-swarm-peers)\n- [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-stats-repo)\n- [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects IPFS instances running on localhost that are listening on port 5001.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nCalls to the following endpoints are disabled by default due to IPFS bugs:\n\n- /api/v0/stats/repo ([#7528](https://github.com/ipfs/go-ipfs/issues/7528)).\n- /api/v0/pin/ls ([#3874](https://github.com/ipfs/go-ipfs/issues/3874)).\n\n**Disabled by default** due to potential high CPU usage. Consider enabling only if necessary.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ipfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ipfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| repoapi | Enables querying the [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-repo-stat) endpoint for repository statistics. | no | no |\n| pinapi | Enables querying the [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls) endpoint to retrieve a list of all pinned objects. | no | no |\n| url | Server URL. | http://127.0.0.1:5001 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5001\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5001\n\n - name: remote\n url: http://192.0.2.1:5001\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ipfs.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ipfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| repoapi | Enables querying the [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-repo-stat) endpoint for repository statistics. | no | no |\n| pinapi | Enables querying the [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls) endpoint to retrieve a list of all pinned objects. | no | no |\n| url | Server URL. | http://127.0.0.1:5001 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5001\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5001\n\n - name: remote\n url: http://192.0.2.1:5001\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ipfs` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ipfs\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ipfs` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ipfs\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ipfs /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ipfs\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipfs.conf) | ipfs.datastore_space_utilization | IPFS datastore utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPFS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfs.bandwidth | in, out | bytes/s |\n| ipfs.peers | peers | peers |\n| ipfs.datastore_space_utilization | used | percent |\n| ipfs.repo_size | size | bytes |\n| ipfs.repo_objects | objects | objects |\n| ipfs.repo_pinned_objects | pinned, recursive_pins | objects |\n\n", @@ -4734,8 +4766,8 @@ export const integrations = [ } } }, - "overview": "# ISC DHCP\n\nPlugin: go.d.plugin\nModule: isc_dhcpd\n\n## Overview\n\nThis collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/isc_dhcpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/isc_dhcpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to DHCP client lease database. | /var/lib/dhcp/dhcpd.leases | no |\n| pools | List of IP pools to monitor. | | yes |\n\n##### pools\n\nList of IP pools to monitor.\n\n- IP range syntax: see [supported formats](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/iprange#supported-formats).\n- Syntax:\n\n```yaml\npools:\n - name: \"POOL_NAME1\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n - name: \"POOL_NAME2\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n pools:\n - name: lan\n networks: \"192.168.0.0/24 192.168.1.0/24 192.168.2.0/24\"\n - name: wifi\n networks: \"10.0.0.0/24\"\n\n```\n{% /details %}\n", + "overview": "# ISC DHCP\n\nPlugin: go.d.plugin\nModule: isc_dhcpd\n\n## Overview\n\nThis collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases).\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/isc_dhcpd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/isc_dhcpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to DHCP client lease database. | /var/lib/dhcp/dhcpd.leases | no |\n| pools | List of IP pools to monitor. | | yes |\n\n##### pools\n\nList of IP pools to monitor.\n\n- IP range syntax: see [supported formats](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/iprange#supported-formats).\n- Syntax:\n\n```yaml\npools:\n - name: \"POOL_NAME1\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n - name: \"POOL_NAME2\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n pools:\n - name: lan\n networks: \"192.168.0.0/24 192.168.1.0/24 192.168.2.0/24\"\n - name: wifi\n networks: \"10.0.0.0/24\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `isc_dhcpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m isc_dhcpd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `isc_dhcpd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep isc_dhcpd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep isc_dhcpd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep isc_dhcpd\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ISC DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| isc_dhcpd.active_leases_total | active | leases |\n\n### Per ISC DHCP instance\n\nThese metrics refer to the DHCP pool.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dhcp_pool_name | The DHCP pool name defined in the collector configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| isc_dhcpd.dhcp_pool_utilization | utilization | percent |\n| isc_dhcpd.dhcp_pool_active_leases | active | leases |\n\n", @@ -4778,7 +4810,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Kubelet\n\nPlugin: go.d.plugin\nModule: k8s_kubelet\n\n## Overview\n\nThis collector monitors Kubelet instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubelet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubelet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10255/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10255/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10250/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubelet.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubelet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10255/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10255/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10250/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `k8s_kubelet` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubelet\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `k8s_kubelet` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep k8s_kubelet\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep k8s_kubelet /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep k8s_kubelet\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ kubelet_node_config_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_node_config_error | the node is experiencing a configuration-related error (0: false, 1: true) |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubelet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.apiserver_audit_requests_rejected | rejected | requests/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_failures | failures | events/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies | 5_\u00b5s, 10_\u00b5s, 20_\u00b5s, 40_\u00b5s, 80_\u00b5s, 160_\u00b5s, 320_\u00b5s, 640_\u00b5s, 1280_\u00b5s, 2560_\u00b5s, 5120_\u00b5s, 10240_\u00b5s, 20480_\u00b5s, 40960_\u00b5s, +Inf | observes/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent | 5_\u00b5s, 10_\u00b5s, 20_\u00b5s, 40_\u00b5s, 80_\u00b5s, 160_\u00b5s, 320_\u00b5s, 640_\u00b5s, 1280_\u00b5s, 2560_\u00b5s, 5120_\u00b5s, 10240_\u00b5s, 20480_\u00b5s, 40960_\u00b5s, +Inf | percentage |\n| k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses | cache misses | events/s |\n| k8s_kubelet.kubelet_containers_running | total | running_containers |\n| k8s_kubelet.kubelet_pods_running | total | running_pods |\n| k8s_kubelet.kubelet_pods_log_filesystem_used_bytes | a dimension per namespace and pod | B |\n| k8s_kubelet.kubelet_runtime_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_runtime_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_docker_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_docker_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_node_config_error | experiencing_error | bool |\n| k8s_kubelet.kubelet_pleg_relist_interval_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_pleg_relist_latency_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_token_requests | total, failed | token_requests/s |\n| k8s_kubelet.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubelet.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n\n### Per volume manager\n\nThese metrics refer to the Volume Manager.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.volume_manager_total_volumes | actual, desired | state |\n\n", @@ -4821,7 +4853,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Kubeproxy\n\nPlugin: go.d.plugin\nModule: k8s_kubeproxy\n\n## Overview\n\nThis collector monitors Kubeproxy instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubeproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubeproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10249/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10249/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:10249/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubeproxy.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubeproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10249/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10249/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:10249/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `k8s_kubeproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubeproxy\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `k8s_kubeproxy` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep k8s_kubeproxy\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep k8s_kubeproxy /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep k8s_kubeproxy\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubeproxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules | sync_proxy_rules | events/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | observes/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | percentage |\n| k8s_kubeproxy.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubeproxy.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n| k8s_kubeproxy.http_request_duration | 0.5, 0.9, 0.99 | microseconds |\n\n", @@ -4858,10 +4890,10 @@ export const integrations = [ "most_popular": true }, "overview": "# Kubernetes Cluster State\n\nPlugin: go.d.plugin\nModule: k8s_state\n\n## Overview\n\nThis collector monitors Kubernetes Nodes, Pods and Containers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_state.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_state.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_state.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_state.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `k8s_state` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_state\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `k8s_state` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep k8s_state\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep k8s_state /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep k8s_state\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the Node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.node_allocatable_cpu_requests_utilization | requests | % |\n| k8s_state.node_allocatable_cpu_requests_used | requests | millicpu |\n| k8s_state.node_allocatable_cpu_limits_utilization | limits | % |\n| k8s_state.node_allocatable_cpu_limits_used | limits | millicpu |\n| k8s_state.node_allocatable_mem_requests_utilization | requests | % |\n| k8s_state.node_allocatable_mem_requests_used | requests | bytes |\n| k8s_state.node_allocatable_mem_limits_utilization | limits | % |\n| k8s_state.node_allocatable_mem_limits_used | limits | bytes |\n| k8s_state.node_allocatable_pods_utilization | allocated | % |\n| k8s_state.node_allocatable_pods_usage | available, allocated | pods |\n| k8s_state.node_condition | a dimension per condition | status |\n| k8s_state.node_schedulability | schedulable, unschedulable | state |\n| k8s_state.node_pods_readiness | ready | % |\n| k8s_state.node_pods_readiness_state | ready, unready | pods |\n| k8s_state.node_pods_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | pods |\n| k8s_state.node_pods_phase | running, failed, succeeded, pending | pods |\n| k8s_state.node_containers | containers, init_containers | containers |\n| k8s_state.node_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_init_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_age | age | seconds |\n\n### Per pod\n\nThese metrics refer to the Pod.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_cpu_requests_used | requests | millicpu |\n| k8s_state.pod_cpu_limits_used | limits | millicpu |\n| k8s_state.pod_mem_requests_used | requests | bytes |\n| k8s_state.pod_mem_limits_used | limits | bytes |\n| k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state |\n| k8s_state.pod_phase | running, failed, succeeded, pending | state |\n| k8s_state.pod_age | age | seconds |\n| k8s_state.pod_containers | containers, init_containers | containers |\n| k8s_state.pod_containers_state | running, waiting, terminated | containers |\n| k8s_state.pod_init_containers_state | running, waiting, terminated | containers |\n\n### Per container\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n| k8s_container_name | Container name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_container_readiness_state | ready | state |\n| k8s_state.pod_container_restarts | restarts | restarts |\n| k8s_state.pod_container_state | running, waiting, terminated | state |\n| k8s_state.pod_container_waiting_state_reason | a dimension per reason | state |\n| k8s_state.pod_container_terminated_state_reason | a dimension per reason | state |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the Node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.node_allocatable_cpu_requests_utilization | requests | % |\n| k8s_state.node_allocatable_cpu_requests_used | requests | millicpu |\n| k8s_state.node_allocatable_cpu_limits_utilization | limits | % |\n| k8s_state.node_allocatable_cpu_limits_used | limits | millicpu |\n| k8s_state.node_allocatable_mem_requests_utilization | requests | % |\n| k8s_state.node_allocatable_mem_requests_used | requests | bytes |\n| k8s_state.node_allocatable_mem_limits_utilization | limits | % |\n| k8s_state.node_allocatable_mem_limits_used | limits | bytes |\n| k8s_state.node_allocatable_pods_utilization | allocated | % |\n| k8s_state.node_allocatable_pods_usage | available, allocated | pods |\n| k8s_state.node_condition | Ready, DiskPressure, MemoryPressure, NetworkUnavailable, PIDPressure | status |\n| k8s_state.node_schedulability | schedulable, unschedulable | state |\n| k8s_state.node_pods_readiness | ready | % |\n| k8s_state.node_pods_readiness_state | ready, unready | pods |\n| k8s_state.node_pods_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | pods |\n| k8s_state.node_pods_phase | running, failed, succeeded, pending | pods |\n| k8s_state.node_containers | containers, init_containers | containers |\n| k8s_state.node_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_init_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_age | age | seconds |\n\n### Per pod\n\nThese metrics refer to the Pod.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_cpu_requests_used | requests | millicpu |\n| k8s_state.pod_cpu_limits_used | limits | millicpu |\n| k8s_state.pod_mem_requests_used | requests | bytes |\n| k8s_state.pod_mem_limits_used | limits | bytes |\n| k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state |\n| k8s_state.pod_phase | running, failed, succeeded, pending | state |\n| k8s_state.pod_status_reason | Evicted, NodeAffinity, NodeLost, Shutdown, UnexpectedAdmissionError, Other | status |\n| k8s_state.pod_age | age | seconds |\n| k8s_state.pod_containers | containers, init_containers | containers |\n| k8s_state.pod_containers_state | running, waiting, terminated | containers |\n| k8s_state.pod_init_containers_state | running, waiting, terminated | containers |\n\n### Per container\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n| k8s_container_name | Container name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_container_readiness_state | ready | state |\n| k8s_state.pod_container_restarts | restarts | restarts |\n| k8s_state.pod_container_state | running, waiting, terminated | state |\n| k8s_state.pod_container_waiting_state_reason | ContainerCreating, CrashLoopBackOff, CreateContainerConfigError, CreateContainerError, ErrImagePull, ImagePullBackOff, InvalidImageName, PodInitializing, Other | state |\n| k8s_state.pod_container_terminated_state_reason | Completed, ContainerCannotRun, DeadlineExceeded, Error, Evicted, OOMKilled, Other | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_state-Kubernetes_Cluster_State", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/k8s_state/metadata.yaml", @@ -4907,7 +4939,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Lighttpd\n\nPlugin: go.d.plugin\nModule: lighttpd\n\n## Overview\n\nThis collector monitors the activity and performance of Lighttpd servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Lighttpd location [server-status](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status), \nwhich is a built-in location that provides metrics about the Lighttpd server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Lighttpd instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Lighttpd status support\n\nTo enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lighttpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lighttpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nLighttpd with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Lighttpd status support\n\nTo enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lighttpd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lighttpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nLighttpd with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `lighttpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m lighttpd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `lighttpd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep lighttpd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep lighttpd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep lighttpd\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Lighttpd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| lighttpd.requests | requests | requests/s |\n| lighttpd.net | sent | kilobits/s |\n| lighttpd.workers | idle, busy | servers |\n| lighttpd.scoreboard | waiting, open, close, hard_error, keepalive, read, read_post, write, handle_request, request_start, request_end | connections |\n| lighttpd.uptime | uptime | seconds |\n\n", @@ -4944,8 +4976,8 @@ export const integrations = [ ], "most_popular": false }, - "overview": "# Litespeed\n\nPlugin: go.d.plugin\nModule: litespeed\n\n## Overview\n\nExamine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.\n\nThe collector uses the statistics under /tmp/lshttpd to gather the metrics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/litespeed.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/litespeed.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| reports_dir | Directory containing Litespeed's real-time statistics files. | /tmp/lshttpd/ | no |\n\n{% /details %}\n#### Examples\n\n##### Set the path to statistics\n\nChange the path for the litespeed stats files\n\n```yaml\nlocal:\n name: 'local'\n path: '/tmp/lshttpd'\n\n```\n", + "overview": "# Litespeed\n\nPlugin: go.d.plugin\nModule: litespeed\n\n## Overview\n\nExamine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.\n\nThe collector uses the statistics under /tmp/lshttpd to gather the metrics.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/litespeed.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/litespeed.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| reports_dir | Directory containing Litespeed's real-time statistics files. | /tmp/lshttpd/ | no |\n\n{% /details %}\n#### Examples\n\n##### Set the path to statistics\n\nChange the path for the litespeed stats files\n\n```yaml\nlocal:\n name: 'local'\n path: '/tmp/lshttpd'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `litespeed` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m litespeed\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `litespeed` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep litespeed\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep litespeed /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep litespeed\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Litespeed instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| litespeed.requests | requests | requests/s |\n| litespeed.requests_processing | processing | requests |\n| litespeed.net_throughput | in, out | kilobits/s |\n| litespeed.net_ssl_throughput | in, out | kilobits/s |\n| litespeed.connections | free, used | conns |\n| litespeed.ssl_connections | free, used | conns |\n| litespeed.public_cache | hits | hits/s |\n| litespeed.private_cache | hits | hits/s |\n| litespeed.static | hits | hits/s |\n\n", @@ -4981,8 +5013,8 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# systemd-logind users\n\nPlugin: go.d.plugin\nModule: logind\n\n## Overview\n\nThis collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logind.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logind.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n#### Examples\nThere are no configuration examples.\n\n", + "overview": "# systemd-logind users\n\nPlugin: go.d.plugin\nModule: logind\n\n## Overview\n\nThis collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logind.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logind.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `logind` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logind\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `logind` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep logind\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep logind /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep logind\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd-logind users instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logind.sessions | remote, local | sessions |\n| logind.sessions_type | console, graphical, other | sessions |\n| logind.sessions_state | online, closing, active | sessions |\n| logind.users_state | offline, closing, online, lingering, active | users |\n\n", @@ -5018,7 +5050,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Logstash\n\nPlugin: go.d.plugin\nModule: logstash\n\n## Overview\n\nThis collector monitors Logstash instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logstatsh.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logstatsh.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:9600 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n```\n{% /details %}\n##### HTTP authentication\n\nHTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nHTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://localhost:9600\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n - name: remote\n url: http://192.0.2.1:9600\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logstatsh.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logstatsh.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:9600 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n```\n{% /details %}\n##### HTTP authentication\n\nHTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nHTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://localhost:9600\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n - name: remote\n url: http://192.0.2.1:9600\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `logstash` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logstash\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `logstash` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep logstash\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep logstash /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep logstash\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Logstash instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.jvm_threads | threads | count |\n| logstash.jvm_mem_heap_used | in_use | percentage |\n| logstash.jvm_mem_heap | committed, used | KiB |\n| logstash.jvm_mem_pools_eden | committed, used | KiB |\n| logstash.jvm_mem_pools_survivor | committed, used | KiB |\n| logstash.jvm_mem_pools_old | committed, used | KiB |\n| logstash.jvm_gc_collector_count | eden, old | counts/s |\n| logstash.jvm_gc_collector_time | eden, old | ms |\n| logstash.open_file_descriptors | open | fd |\n| logstash.event | in, filtered, out | events/s |\n| logstash.event_duration | event, queue | seconds |\n| logstash.uptime | uptime | seconds |\n\n### Per pipeline\n\nThese metrics refer to the pipeline.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pipeline | pipeline name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.pipeline_event | in, filtered, out | events/s |\n| logstash.pipeline_event_duration | event, queue | seconds |\n\n", @@ -5054,8 +5086,8 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# LVM logical volumes\n\nPlugin: go.d.plugin\nModule: lvm\n\n## Overview\n\nThis collector monitors the health of LVM logical volumes. It relies on the [`lvs`](https://man7.org/linux/man-pages/man8/lvs.8.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lvm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lvm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | lvs binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: lvm\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n{% /details %}\n", + "overview": "# LVM logical volumes\n\nPlugin: go.d.plugin\nModule: lvm\n\n## Overview\n\nThis collector monitors the health of LVM logical volumes. It relies on the [`lvs`](https://man7.org/linux/man-pages/man8/lvs.8.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- NetBSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lvm.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lvm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | lvs binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: lvm\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `lvm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m lvm\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `lvm` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep lvm\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep lvm /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep lvm\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ lvm_lv_data_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/lvm.conf) | lvm.lv_data_space_utilization | LVM logical volume high data space usage (LV ${label:lv_name} VG ${label:vg_name} Type ${label:volume_type}) |\n| [ lvm_lv_metadata_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/lvm.conf) | lvm.lv_metadata_space_utilization | LVM logical volume high metadata space usage (LV ${label:lv_name} VG ${label:vg_name} Type ${label:volume_type}) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per logical volume\n\nThese metrics refer to the LVM logical volume.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| lv_name | Logical volume name |\n| vg_name | Volume group name |\n| volume_type | Type of the volume |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| lvm.lv_data_space_utilization | utilization | % |\n| lvm.lv_metadata_space_utilization | utilization | % |\n\n", @@ -5064,6 +5096,47 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/lvm/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-maxscale", + "plugin_name": "go.d.plugin", + "module_name": "maxscale", + "monitored_instance": { + "name": "MaxScale", + "link": "https://mariadb.com/kb/en/maxscale/", + "categories": [ + "data-collection.database-servers" + ], + "icon_filename": "maxscale.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "alternative_monitored_instances": [], + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "maria", + "mariadb", + "maxscale", + "database", + "db" + ], + "most_popular": false + }, + "overview": "# MaxScale\n\nPlugin: go.d.plugin\nModule: maxscale\n\n## Overview\n\nThis collector monitors the activity and performance of MaxScale servers.\n\n\nIt sends HTTP requests to the MaxScale [REST API](https://mariadb.com/kb/en/maxscale-24-02rest-api/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect MaxScale instances running on:\n\n- localhost that are listening on port 8989\n- within Docker containers\n\n> **Note that the MaxScale REST API requires a username and password**. \n> While Netdata can automatically detect MaxScale instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/maxscale.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/maxscale.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | The URL of the MaxScale HTTP API endpoint. | http://127.0.0.1:8989 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | admin | no |\n| password | Password for basic HTTP authentication. | mariadb | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8989\n username: admin\n password: mariadb\n\n```\n##### HTTPS with self-signed certificate\n\nMaxScale with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8989\n username: admin\n password: mariadb\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8989\n username: admin\n password: mariadb\n\n - name: remote\n url: http://192.0.2.1:8989\n username: admin\n password: mariadb\n\n```\n{% /details %}\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `maxscale` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m maxscale\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `maxscale` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep maxscale\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep maxscale /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep maxscale\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MaxScale instance\n\nThese metrics refer to the monitored MaxScale instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| maxscale.poll_events | reads, writes, accepts, errors, hangups | events/s |\n| maxscale.current_sessions | sessions | sessions |\n| maxscale.current_zombie_connections | zombie | connections |\n| maxscale.threads_by_state | active, draining, dormant | threads |\n| maxscale.current_fds | managed | fds |\n| maxscale.qc_cache_efficiency | hits, misses | requests/s |\n| maxscale.qc_cache_operations | inserts, evictions | operations/s |\n| maxscale.uptime | uptime | seconds |\n\n### Per server\n\nThese metrics refer to the MariaDB server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server | Server ID. |\n| address | Server address. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| maxscale.server_state | master, slave, running, down, maintenance, draining, drained, relay_master, binlog_relay, synced | state |\n| maxscale.server_current_connections | connections | connections |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-maxscale-MaxScale", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/maxscale/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-megacli", @@ -5092,8 +5165,8 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# MegaCLI MegaRAID\n\nPlugin: go.d.plugin\nModule: megacli\n\n## Overview\n\nMonitors the health of MegaCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.\nIt relies on the `megacli` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `megacli -LDPDInfo -aAll -NoLog`\n- `megacli -AdpBbuCmd -aAll -NoLog`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/megacli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/megacli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | megacli binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: megacli\n update_every: 5 # Collect MegaCli Hardware RAID statistics every 5 seconds\n\n```\n{% /details %}\n", + "overview": "# MegaCLI MegaRAID\n\nPlugin: go.d.plugin\nModule: megacli\n\n## Overview\n\nMonitors the health of MegaCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.\nIt relies on the `megacli` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `megacli -LDPDInfo -aAll -NoLog`\n- `megacli -AdpBbuCmd -aAll -NoLog`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/megacli.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/megacli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | megacli binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: megacli\n update_every: 5 # Collect MegaCli Hardware RAID statistics every 5 seconds\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `megacli` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m megacli\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `megacli` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep megacli\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep megacli /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep megacli\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ megacli_adapter_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.adapter_health_state | MegaCLI adapter ${label:adapter_number} is in the degraded state |\n| [ megacli_phys_drive_media_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.phys_drive_media_errors | MegaCLI physical drive adapter ${label:adapter_number} slot ${label:slot_number} media errors |\n| [ megacli_phys_drive_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.phys_drive_predictive_failures | MegaCLI physical drive (adapter ${label:adapter_number} slot ${label:slot_number}) predictive failures |\n| [ megacli_bbu_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_charge | MegaCLI Backup Battery Unit (adapter ${label:adapter_number}) average charge over the last minute |\n| [ megacli_bbu_recharge_cycles ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_recharge_cycles | MegaCLI Backup Battery Unit (adapter ${label:adapter_number}) average charge over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per adapter\n\nThese metrics refer to the MegaCLI Adapter.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| adapter_number | Adapter number |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.adapter_health_state | optimal, degraded, partially_degraded, failed | state |\n\n### Per physical drive\n\nThese metrics refer to the MegaCLI Physical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| adapter_number | Adapter number |\n| wwn | World Wide Name |\n| slot_number | Slot number |\n| drive_position | Position (e.g. DiskGroup: 0, Span: 0, Arm: 2) |\n| drive_type | Type (e.g. SATA) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.phys_drive_media_errors_rate | media_errors | errors/s |\n| megacli.phys_drive_predictive_failures_rate | predictive_failures | failures/s |\n\n### Per backup battery unit\n\nThese metrics refer to the MegaCLI Backup Battery Unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| adapter_number | Adapter number |\n| battery_type | Battery type (e.g. BBU) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.bbu_charge | charge | percentage |\n| megacli.bbu_recharge_cycles | recharge | cycles |\n| megacli.bbu_capacity_degradation | cap_degradation | percent |\n| megacli.bbu_temperature | temperature | Celsius |\n\n", @@ -5132,7 +5205,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Memcached\n\nPlugin: go.d.plugin\nModule: memcached\n\n## Overview\n\nMonitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching.\n\nIt reads the server's response to the `stats` command.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/memcached.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/memcached.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the memcached service listens for connections. | 127.0.0.1:11211 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11211\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11211\n\n - name: remote\n address: 203.0.113.0:11211\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/memcached.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/memcached.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the memcached service listens for connections. | 127.0.0.1:11211 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11211\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11211\n\n - name: remote\n address: 203.0.113.0:11211\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `memcached` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m memcached\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `memcached` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep memcached\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep memcached /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep memcached\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ memcached_cache_memory_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | cache memory utilization |\n| [ memcached_cache_fill_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | average rate the cache fills up (positive), or frees up (negative) space over the last hour |\n| [ memcached_out_of_cache_space_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memcached instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| memcached.cache | available, used | MiB |\n| memcached.net | in, out | kilobits/s |\n| memcached.connections | current, rejected, total | connections/s |\n| memcached.items | current, total | items |\n| memcached.evicted_reclaimed | reclaimed, evicted | items |\n| memcached.get | hints, misses | requests |\n| memcached.get_rate | rate | requests/s |\n| memcached.set_rate | rate | requests/s |\n| memcached.delete | hits, misses | requests |\n| memcached.cas | hits, misses, bad value | requests |\n| memcached.increment | hits, misses | requests |\n| memcached.decrement | hits, misses | requests |\n| memcached.touch | hits, misses | requests |\n| memcached.touch_rate | rate | requests/s |\n\n", @@ -5169,7 +5242,7 @@ export const integrations = [ "most_popular": false }, "overview": "# MongoDB\n\nPlugin: go.d.plugin\nModule: mongodb\n\n## Overview\n\nThis collector monitors MongoDB servers.\n\nExecuted queries:\n\n- [serverStatus](https://docs.mongodb.com/manual/reference/command/serverStatus/)\n- [dbStats](https://docs.mongodb.com/manual/reference/command/dbStats/)\n- [replSetGetStatus](https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read-only user\n\nCreate a read-only user for Netdata in the admin database.\n\n- Authenticate as the admin user:\n\n ```bash\n use admin\n db.auth(\"admin\", \"\")\n ```\n\n- Create a user:\n\n ```bash\n db.createUser({\n \"user\":\"netdata\",\n \"pwd\": \"\",\n \"roles\" : [\n {role: 'read', db: 'admin' },\n {role: 'clusterMonitor', db: 'admin'},\n {role: 'read', db: 'local' }\n ]\n })\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mongodb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mongodb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| uri | MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). | mongodb://localhost:27017 | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n| databases | Databases selector. Determines which database metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n```\n{% /details %}\n##### With databases metrics\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n databases:\n includes:\n - \"* *\"\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n - name: remote\n uri: mongodb://netdata:password@203.0.113.0:27017\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read-only user\n\nCreate a read-only user for Netdata in the admin database.\n\n- Authenticate as the admin user:\n\n ```bash\n use admin\n db.auth(\"admin\", \"\")\n ```\n\n- Create a user:\n\n ```bash\n db.createUser({\n \"user\":\"netdata\",\n \"pwd\": \"\",\n \"roles\" : [\n {role: 'read', db: 'admin' },\n {role: 'clusterMonitor', db: 'admin'},\n {role: 'read', db: 'local' }\n ]\n })\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mongodb.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mongodb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| uri | MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). | mongodb://localhost:27017 | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n| databases | Databases selector. Determines which database metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n```\n{% /details %}\n##### With databases metrics\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n databases:\n includes:\n - \"* *\"\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n - name: remote\n uri: mongodb://netdata:password@203.0.113.0:27017\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mongodb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mongodb\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mongodb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mongodb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mongodb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mongodb\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- WireTiger metrics are available only if [WiredTiger](https://docs.mongodb.com/v6.0/core/wiredtiger/) is used as the\n storage engine.\n- Sharding metrics are available on shards only\n for [mongos](https://www.mongodb.com/docs/manual/reference/program/mongos/).\n\n\n### Per MongoDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.operations_rate | reads, writes, commands | operations/s |\n| mongodb.operations_latency_time | reads, writes, commands | milliseconds |\n| mongodb.operations_by_type_rate | insert, query, update, delete, getmore, command | operations/s |\n| mongodb.document_operations_rate | inserted, deleted, returned, updated | operations/s |\n| mongodb.scanned_indexes_rate | scanned | indexes/s |\n| mongodb.scanned_documents_rate | scanned | documents/s |\n| mongodb.active_clients_count | readers, writers | clients |\n| mongodb.queued_operations_count | reads, writes | operations |\n| mongodb.cursors_open_count | open | cursors |\n| mongodb.cursors_open_no_timeout_count | open_no_timeout | cursors |\n| mongodb.cursors_opened_rate | opened | cursors/s |\n| mongodb.cursors_timed_out_rate | timed_out | cursors/s |\n| mongodb.cursors_by_lifespan_count | le_1s, 1s_5s, 5s_15s, 15s_30s, 30s_1m, 1m_10m, ge_10m | cursors |\n| mongodb.transactions_count | active, inactive, open, prepared | transactions |\n| mongodb.transactions_rate | started, aborted, committed, prepared | transactions/s |\n| mongodb.connections_usage | available, used | connections |\n| mongodb.connections_by_state_count | active, threaded, exhaust_is_master, exhaust_hello, awaiting_topology_changes | connections |\n| mongodb.connections_rate | created | connections/s |\n| mongodb.asserts_rate | regular, warning, msg, user, tripwire, rollovers | asserts/s |\n| mongodb.network_traffic_rate | in, out | bytes/s |\n| mongodb.network_requests_rate | requests | requests/s |\n| mongodb.network_slow_dns_resolutions_rate | slow_dns | resolutions/s |\n| mongodb.network_slow_ssl_handshakes_rate | slow_ssl | handshakes/s |\n| mongodb.memory_resident_size | used | bytes |\n| mongodb.memory_virtual_size | used | bytes |\n| mongodb.memory_page_faults_rate | pgfaults | pgfaults/s |\n| mongodb.memory_tcmalloc_stats | allocated, central_cache_freelist, transfer_cache_freelist, thread_cache_freelists, pageheap_freelist, pageheap_unmapped | bytes |\n| mongodb.wiredtiger_concurrent_read_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_concurrent_write_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_cache_usage | used | bytes |\n| mongodb.wiredtiger_cache_dirty_space_size | dirty | bytes |\n| mongodb.wiredtiger_cache_io_rate | read, written | pages/s |\n| mongodb.wiredtiger_cache_evictions_rate | unmodified, modified | pages/s |\n| mongodb.sharding_nodes_count | shard_aware, shard_unaware | nodes |\n| mongodb.sharding_sharded_databases_count | partitioned, unpartitioned | databases |\n| mongodb.sharding_sharded_collections_count | partitioned, unpartitioned | collections |\n\n### Per lock type\n\nThese metrics refer to the lock type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| lock_type | lock type (e.g. global, database, collection, mutex) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.lock_acquisitions_rate | shared, exclusive, intent_shared, intent_exclusive | acquisitions/s |\n\n### Per commit type\n\nThese metrics refer to the commit type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| commit_type | commit type (e.g. noShards, singleShard, singleWriteShard) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.transactions_commits_rate | success, fail | commits/s |\n| mongodb.transactions_commits_duration_time | commits | milliseconds |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.database_collection_count | collections | collections |\n| mongodb.database_indexes_count | indexes | indexes |\n| mongodb.database_views_count | views | views |\n| mongodb.database_documents_count | documents | documents |\n| mongodb.database_data_size | data_size | bytes |\n| mongodb.database_storage_size | storage_size | bytes |\n| mongodb.database_index_size | index_size | bytes |\n\n### Per replica set member\n\nThese metrics refer to the replica set member.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| repl_set_member | replica set member name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.repl_set_member_state | primary, startup, secondary, recovering, startup2, unknown, arbiter, down, rollback, removed | state |\n| mongodb.repl_set_member_health_status | up, down | status |\n| mongodb.repl_set_member_replication_lag_time | replication_lag | milliseconds |\n| mongodb.repl_set_member_heartbeat_latency_time | heartbeat_latency | milliseconds |\n| mongodb.repl_set_member_ping_rtt_time | ping_rtt | milliseconds |\n| mongodb.repl_set_member_uptime | uptime | seconds |\n\n### Per shard\n\nThese metrics refer to the shard.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| shard_id | shard id |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.sharding_shard_chunks_count | chunks | chunks |\n\n", @@ -5209,7 +5282,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Monit\n\nPlugin: go.d.plugin\nModule: monit\n\n## Overview\n\nThis collector monitors status of Monit's service checks.\n\n\nIt sends HTTP requests to the Monit `/_status?format=xml&level=full` endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Monit instances running on localhost that are listening on port 2812.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:2812\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable TCP PORT\n\nSee [Syntax for TCP port](https://mmonit.com/monit/documentation/monit.html#TCP-PORT) for details.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/monit.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/monit.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:2812 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | admin | no |\n| password | Password for basic HTTP authentication. | monit | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:2812\n username: admin\n password: monit\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nWith enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:2812\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:2812\n\n - name: remote\n url: http://192.0.2.1:2812\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable TCP PORT\n\nSee [Syntax for TCP port](https://mmonit.com/monit/documentation/monit.html#TCP-PORT) for details.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/monit.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/monit.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:2812 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | admin | no |\n| password | Password for basic HTTP authentication. | monit | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:2812\n username: admin\n password: monit\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nWith enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:2812\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:2812\n\n - name: remote\n url: http://192.0.2.1:2812\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `monit` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m monit\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `monit` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep monit\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep monit /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep monit\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per service\n\nThese metrics refer to the monitored Service.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server_hostname | Hostname of the Monit server. |\n| service_check_name | Service check name. |\n| service_check_type | Service check type. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| monit.service_check_status | ok, error, initializing, not_monitored | status |\n\n", @@ -5259,7 +5332,7 @@ export const integrations = [ "most_popular": true }, "overview": "# MariaDB\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:\n\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mysql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mysql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mysql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mysql\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", @@ -5309,7 +5382,7 @@ export const integrations = [ "most_popular": true }, "overview": "# MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:\n\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mysql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mysql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mysql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mysql\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", @@ -5359,7 +5432,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Percona MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:\n\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mysql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mysql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mysql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mysql\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", @@ -5417,7 +5490,7 @@ export const integrations = [ "most_popular": true }, "overview": "# NGINX\n\nPlugin: go.d.plugin\nModule: nginx\n\n## Overview\n\nThis collector monitors the activity and performance of NGINX servers, and collects metrics such as the number of connections, their status, and client requests.\n\n\nIt sends HTTP requests to the NGINX location [stub-status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html), which is a built-in location that provides metrics about the NGINX server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1/basic_status\n- http://localhost/stub_status\n- http://127.0.0.1/stub_status\n- http://127.0.0.1/nginx_status\n- http://127.0.0.1/status\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status support\n\nConfigure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginx.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginx.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/stub_status | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n - name: remote\n url: http://192.0.2.1/stub_status\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status support\n\nConfigure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginx.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginx.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/stub_status | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n - name: remote\n url: http://192.0.2.1/stub_status\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nginx` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginx\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nginx` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nginx\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nginx /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nginx\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginx.connections | active | connections |\n| nginx.connections_status | reading, writing, idle | connections |\n| nginx.connections_accepted_handled | accepted, handled | connections/s |\n| nginx.requests | requests | requests/s |\n\n", @@ -5458,7 +5531,7 @@ export const integrations = [ "most_popular": false }, "overview": "# NGINX Plus\n\nPlugin: go.d.plugin\nModule: nginxplus\n\n## Overview\n\nThis collector monitors NGINX Plus servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Config API\n\nTo configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxplus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxplus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://192.0.2.1\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Config API\n\nTo configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxplus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxplus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://192.0.2.1\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nginxplus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxplus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nginxplus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nginxplus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nginxplus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nginxplus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX Plus instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.client_connections_rate | accepted, dropped | connections/s |\n| nginxplus.client_connections_count | active, idle | connections |\n| nginxplus.ssl_handshakes_rate | successful, failed | handshakes/s |\n| nginxplus.ssl_handshakes_failures_rate | no_common_protocol, no_common_cipher, timeout, peer_rejected_cert | failures/s |\n| nginxplus.ssl_verification_errors_rate | no_cert, expired_cert, revoked_cert, hostname_mismatch, other | errors/s |\n| nginxplus.ssl_session_reuses_rate | ssl_session | reuses/s |\n| nginxplus.http_requests_rate | requests | requests/s |\n| nginxplus.http_requests_count | requests | requests |\n| nginxplus.uptime | uptime | seconds |\n\n### Per http server zone\n\nThese metrics refer to the HTTP server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_server_zone | HTTP server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_server_zone_requests_rate | requests | requests/s |\n| nginxplus.http_server_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_server_zone_requests_processing_count | processing | requests |\n| nginxplus.http_server_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http location zone\n\nThese metrics refer to the HTTP location zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_location_zone | HTTP location zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_location_zone_requests_rate | requests | requests/s |\n| nginxplus.http_location_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_location_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_location_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http upstream\n\nThese metrics refer to the HTTP upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_peers_count | peers | peers |\n| nginxplus.http_upstream_zombies_count | zombie | servers |\n| nginxplus.http_upstream_keepalive_count | keepalive | connections |\n\n### Per http upstream server\n\nThese metrics refer to the HTTP upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n| http_upstream_server_address | HTTP upstream server address (e.g. 127.0.0.1:81) |\n| http_upstream_server_name | HTTP upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_server_requests_rate | requests | requests/s |\n| nginxplus.http_upstream_server_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_upstream_server_response_time | response | milliseconds |\n| nginxplus.http_upstream_server_response_header_time | header | milliseconds |\n| nginxplus.http_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_upstream_server_state | up, down, draining, unavail, checking, unhealthy | state |\n| nginxplus.http_upstream_server_connections_count | active | connections |\n| nginxplus.http_upstream_server_downtime | downtime | seconds |\n\n### Per http cache\n\nThese metrics refer to the HTTP cache.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_cache | HTTP cache name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_cache_state | warm, cold | state |\n| nginxplus.http_cache_iops | served, written, bypass | responses/s |\n| nginxplus.http_cache_io | served, written, bypass | bytes/s |\n| nginxplus.http_cache_size | size | bytes |\n\n### Per stream server zone\n\nThese metrics refer to the Stream server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_server_zone | Stream server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_server_zone_connections_rate | accepted | connections/s |\n| nginxplus.stream_server_zone_sessions_per_code_class_rate | 2xx, 4xx, 5xx | sessions/s |\n| nginxplus.stream_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_server_zone_connections_processing_count | processing | connections |\n| nginxplus.stream_server_zone_connections_discarded_rate | discarded | connections/s |\n\n### Per stream upstream\n\nThese metrics refer to the Stream upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_peers_count | peers | peers |\n| nginxplus.stream_upstream_zombies_count | zombie | servers |\n\n### Per stream upstream server\n\nThese metrics refer to the Stream upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n| stream_upstream_server_address | Stream upstream server address (e.g. 127.0.0.1:12346) |\n| stream_upstream_server_name | Stream upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_server_connections_rate | forwarded | connections/s |\n| nginxplus.stream_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_upstream_server_state | up, down, unavail, checking, unhealthy | state |\n| nginxplus.stream_upstream_server_downtime | downtime | seconds |\n| nginxplus.stream_upstream_server_connections_count | active | connections |\n\n### Per resolver zone\n\nThese metrics refer to the resolver zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| resolver_zone | resolver zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.resolver_zone_requests_rate | name, srv, addr | requests/s |\n| nginxplus.resolver_zone_responses_rate | noerror, formerr, servfail, nxdomain, notimp, refused, timedout, unknown | responses/s |\n\n", @@ -5467,6 +5540,47 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nginxplus/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-nginxunit", + "plugin_name": "go.d.plugin", + "module_name": "nginxunit", + "monitored_instance": { + "name": "NGINX Unit", + "link": "https://unit.nginx.org/", + "categories": [ + "data-collection.web-servers-and-web-proxies" + ], + "icon_filename": "nginx.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "alternative_monitored_instances": [], + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "nginx", + "unit", + "web", + "appserver", + "http" + ], + "most_popular": false + }, + "overview": "# NGINX Unit\n\nPlugin: go.d.plugin\nModule: nginxunit\n\n## Overview\n\nThis collector monitors the activity and performance of NGINX Unit servers, and collects metrics such as the number of connections, their status, and client requests.\n\n\nIt sends HTTP requests to the NGINX Unit [Status API](https://unit.nginx.org/statusapi/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect NGINX Unit instances running on:\n\n- localhost that are listening on port 8000\n- within Docker containers\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable HTTP Control API\n\nSee [Control API](https://unit.nginx.org/controlapi/#configuration-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxunit.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxunit.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | The URL of the NGINX Unit HTTP Control API. | http://127.0.0.1:8000 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1::8000\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX Unit with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n\n - name: remote\n url: http://192.0.2.1:8000\n\n```\n{% /details %}\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nginxunit` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxunit\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nginxunit` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nginxunit\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nginxunit /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nginxunit\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX Unit instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxunit.requests_rate | requests | requests/s |\n| nginxunit.connections_rate | accepted, closed | connections/s |\n| nginxunit.connections_current | active, idle | connections |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-nginxunit-NGINX_Unit", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nginxunit/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-nginxvts", @@ -5507,7 +5621,7 @@ export const integrations = [ "most_popular": true }, "overview": "# NGINX VTS\n\nPlugin: go.d.plugin\nModule: nginxvts\n\n## Overview\n\nThis collector monitors NGINX servers with [virtual host traffic status module](https://github.com/vozlt/nginx-module-vts).\n\n\nIt sends HTTP requests to the NGINX VTS location [status](https://github.com/vozlt/nginx-module-vts#synopsis), \nwhich is a built-in location that provides metrics about the NGINX VTS server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure nginx-vts module\n\nTo configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxvts.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxvts.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status/format/json | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/status/format/json\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n - name: remote\n url: http://192.0.2.1/status/format/json\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure nginx-vts module\n\nTo configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxvts.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxvts.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status/format/json | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/status/format/json\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n - name: remote\n url: http://192.0.2.1/status/format/json\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nginxvts` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxvts\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nginxvts` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nginxvts\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nginxvts /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nginxvts\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX VTS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxvts.requests_total | requests | requests/s |\n| nginxvts.active_connections | active | connections |\n| nginxvts.connections_total | reading, writing, waiting, accepted, handled | connections/s |\n| nginxvts.uptime | uptime | seconds |\n| nginxvts.shm_usage | max, used | bytes |\n| nginxvts.shm_used_node | used | nodes |\n| nginxvts.server_requests_total | requests | requests/s |\n| nginxvts.server_responses_total | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxvts.server_traffic_total | in, out | bytes/s |\n| nginxvts.server_cache_total | miss, bypass, expired, stale, updating, revalidated, hit, scarce | events/s |\n\n", @@ -5543,8 +5657,8 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# NSD\n\nPlugin: go.d.plugin\nModule: nsd\n\n## Overview\n\nThis collector monitors NSD statistics like queries, zones, protocols, query types and more. It relies on the [`nsd-control`](https://nsd.docs.nlnetlabs.nl/en/latest/manpages/nsd-control.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\nExecuted commands:\n- `nsd-control stats_noreset`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | nsd-control binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: nsd\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n{% /details %}\n", + "overview": "# NSD\n\nPlugin: go.d.plugin\nModule: nsd\n\n## Overview\n\nThis collector monitors NSD statistics like queries, zones, protocols, query types and more. It relies on the [`nsd-control`](https://nsd.docs.nlnetlabs.nl/en/latest/manpages/nsd-control.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\nExecuted commands:\n- `nsd-control stats_noreset`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n- macOS\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nsd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | nsd-control binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: nsd\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nsd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nsd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nsd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nsd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nsd\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NSD instance\n\nThese metrics refer to the the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nsd.queries | queries | queries/s |\n| nsd.queries_by_type | A, NS, MD, MF, CNAME, SOA, MB, MG, MR, NULL, WKS, PTR, HINFO, MINFO, MX, TXT, RP, AFSDB, X25, ISDN, RT, NSAP, SIG, KEY, PX, AAAA, LOC, NXT, SRV, NAPTR, KX, CERT, DNAME, OPT, APL, DS, SSHFP, IPSECKEY, RRSIG, NSEC, DNSKEY, DHCID, NSEC3, NSEC3PARAM, TLSA, SMIMEA, CDS, CDNSKEY, OPENPGPKEY, CSYNC, ZONEMD, SVCB, HTTPS, SPF, NID, L32, L64, LP, EUI48, EUI64, URI, CAA, AVC, DLV, IXFR, AXFR, MAILB, MAILA, ANY | queries/s |\n| nsd.queries_by_opcode | QUERY, IQUERY, STATUS, NOTIFY, UPDATE, OTHER | queries/s |\n| nsd.queries_by_class | IN, CS, CH, HS | queries/s |\n| nsd.queries_by_protocol | udp, udp6, tcp, tcp6, tls, tls6 | queries/s |\n| nsd.answers_by_rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN, YXRRSET, NXRRSET, NOTAUTH, NOTZONE, RCODE11, RCODE12, RCODE13, RCODE14, RCODE15, BADVERS | answers/s |\n| nsd.errors | query, answer | errors/s |\n| nsd.drops | query | drops/s |\n| nsd.zones | master, slave | zones |\n| nsd.zone_transfers_requests | AXFR, IXFR | requests/s |\n| nsd.zone_transfer_memory | used | bytes |\n| nsd.database_size | disk, mem | bytes |\n| nsd.uptime | uptime | seconds |\n\n", @@ -5582,7 +5696,7 @@ export const integrations = [ "most_popular": false }, "overview": "# NTPd\n\nPlugin: go.d.plugin\nModule: ntpd\n\n## Overview\n\nThis collector monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](https://doc.ntp.org/current-stable/ntpq.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ntpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ntpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:123 | yes |\n| timeout | Connection/read/write timeout. | 1 | no |\n| collect_peers | Determines whether peer metrics will be collected. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n```\n{% /details %}\n##### With peers metrics\n\nCollect peers metrics.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n collect_peers: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n - name: remote\n address: 203.0.113.0:123\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ntpd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ntpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:123 | yes |\n| timeout | Connection/read/write timeout. | 1 | no |\n| collect_peers | Determines whether peer metrics will be collected. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n```\n{% /details %}\n##### With peers metrics\n\nCollect peers metrics.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n collect_peers: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n - name: remote\n address: 203.0.113.0:123\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ntpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ntpd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ntpd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ntpd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ntpd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ntpd\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NTPd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.sys_offset | offset | milliseconds |\n| ntpd.sys_jitter | system, clock | milliseconds |\n| ntpd.sys_frequency | frequency | ppm |\n| ntpd.sys_wander | clock | ppm |\n| ntpd.sys_rootdelay | delay | milliseconds |\n| ntpd.sys_rootdisp | dispersion | milliseconds |\n| ntpd.sys_stratum | stratum | stratum |\n| ntpd.sys_tc | current, minimum | log2 |\n| ntpd.sys_precision | precision | log2 |\n\n### Per peer\n\nThese metrics refer to the NTPd peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| peer_address | peer's source IP address |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.peer_offset | offset | milliseconds |\n| ntpd.peer_delay | delay | milliseconds |\n| ntpd.peer_dispersion | dispersion | milliseconds |\n| ntpd.peer_jitter | jitter | milliseconds |\n| ntpd.peer_xleave | xleave | milliseconds |\n| ntpd.peer_rootdelay | rootdelay | milliseconds |\n| ntpd.peer_rootdisp | dispersion | milliseconds |\n| ntpd.peer_stratum | stratum | stratum |\n| ntpd.peer_hmode | hmode | hmode |\n| ntpd.peer_pmode | pmode | pmode |\n| ntpd.peer_hpoll | hpoll | log2 |\n| ntpd.peer_ppoll | ppoll | log2 |\n| ntpd.peer_precision | precision | log2 |\n\n", @@ -5620,10 +5734,10 @@ export const integrations = [ "most_popular": false }, "overview": "# Nvidia GPU\n\nPlugin: go.d.plugin\nModule: nvidia_smi\n\n## Overview\n\nThis collector monitors GPUs performance metrics using\nthe [nvidia-smi](https://developer.nvidia.com/nvidia-system-management-interface) CLI tool.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvidia_smi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvidia_smi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvidia_smi binary. The default is \"nvidia_smi\" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |\n| timeout | nvidia_smi binary execution timeout. | 2 | no |\n| loop_mode | When enabled, `nvidia-smi` is executed continuously in a separate thread using the `-l` option. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: nvidia_smi\n binary_path: /usr/local/sbin/nvidia_smi\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvidia_smi.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvidia_smi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvidia_smi binary. The default is \"nvidia_smi\" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |\n| timeout | The maximum duration, in seconds, to wait for an `nvidia-smi` command to complete. This setting applies differently based on the collector's mode. **Loop Mode:** In loop mode, the timeout primarily determines how long to wait for the initial `nvidia-smi` execution. If the initial query takes longer than the timeout, the collector may report an error. For systems with multiple GPUs, the initial load time can sometimes be significant (e.g., 5-10 seconds). **Regular Mode:** If the collector is in regular mode, the timeout specifies how long to wait for each individual `nvidia-smi` execution. | 10 | no |\n| loop_mode | When enabled, `nvidia-smi` is executed continuously in a separate thread using the `-l` option. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: nvidia_smi\n binary_path: /usr/local/sbin/nvidia_smi\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nvidia_smi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvidia_smi\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nvidia_smi` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nvidia_smi\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nvidia_smi /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nvidia_smi\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % |\n| nvidia_smi.gpu_utilization | gpu | % |\n| nvidia_smi.gpu_memory_utilization | memory | % |\n| nvidia_smi.gpu_decoder_utilization | decoder | % |\n| nvidia_smi.gpu_encoder_utilization | encoder | % |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B |\n| nvidia_smi.gpu_temperature | temperature | Celsius |\n| nvidia_smi.gpu_voltage | voltage | V |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz |\n| nvidia_smi.gpu_power_draw | power_draw | Watts |\n| nvidia_smi.gpu_performance_state | P0-P15 | state |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status |\n| nvidia_smi.gpu_mig_devices_count | mig | devices |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) |\n| index | GPU index (nvidia_smi typically orders GPUs by PCI bus ID) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % |\n| nvidia_smi.gpu_utilization | gpu | % |\n| nvidia_smi.gpu_memory_utilization | memory | % |\n| nvidia_smi.gpu_decoder_utilization | decoder | % |\n| nvidia_smi.gpu_encoder_utilization | encoder | % |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B |\n| nvidia_smi.gpu_temperature | temperature | Celsius |\n| nvidia_smi.gpu_voltage | voltage | V |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz |\n| nvidia_smi.gpu_power_draw | power_draw | Watts |\n| nvidia_smi.gpu_performance_state | P0-P15 | state |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status |\n| nvidia_smi.gpu_mig_devices_count | mig | devices |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B |\n\n", "integration_type": "collector", "id": "go.d.plugin-nvidia_smi-Nvidia_GPU", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nvidia_smi/metadata.yaml", @@ -5655,16 +5769,53 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# NVMe devices\n\nPlugin: go.d.plugin\nModule: nvme\n\n## Overview\n\nThis collector monitors the health of NVMe devices. It relies on the [`nvme`](https://github.com/linux-nvme/nvme-cli#nvme-cli) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install nvme-cli\n\nSee [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution's package manager.\n\n\n#### For Netdata running in a Docker container: grant NVMe device access\n\nYour NVMe devices need to be accessible within the Docker container for Netdata to monitor them.\n\nInclude the following option in your `docker run` command or add the device mapping in your `docker-compose.yml` file:\n\n- `docker run`\n\n ```bash\n --device '/dev/nvme0n1:/dev/nvme0n1'\n ```\n\n- `docker-compose.yml`\n\n ```yaml\n services:\n netdata:\n devices:\n - \"/dev/nvme0n1:/dev/nvme0n1\"\n ```\n\n**Note**: Replace `/dev/nvme0n1` with your actual NVMe device name.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvme.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvme.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| timeout | nvme binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: nvme\n update_every: 5 # Collect NVMe metrics every 5 seconds\n\n```\n{% /details %}\n", + "overview": "# NVMe devices\n\nPlugin: go.d.plugin\nModule: nvme\n\n## Overview\n\nThis collector monitors the health of NVMe devices. It relies on the [`nvme`](https://github.com/linux-nvme/nvme-cli#nvme-cli) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install nvme-cli\n\nSee [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution's package manager.\n\n\n#### For Netdata running in a Docker container: grant NVMe device access\n\nYour NVMe devices need to be accessible within the Docker container for Netdata to monitor them.\n\nInclude the following option in your `docker run` command or add the device mapping in your `docker-compose.yml` file:\n\n- `docker run`\n\n ```bash\n --device '/dev/nvme0n1:/dev/nvme0n1'\n ```\n\n- `docker-compose.yml`\n\n ```yaml\n services:\n netdata:\n devices:\n - \"/dev/nvme0n1:/dev/nvme0n1\"\n ```\n\n**Note**: Replace `/dev/nvme0n1` with your actual NVMe device name.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvme.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvme.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| timeout | nvme binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: nvme\n update_every: 5 # Collect NVMe metrics every 5 seconds\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nvme` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvme\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nvme` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nvme\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nvme /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nvme\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ nvme_device_critical_warnings_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/nvme.conf) | nvme.device_critical_warnings_state | NVMe device ${label:device} has critical warnings |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the NVME device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | NVMe device name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvme.device_estimated_endurance_perc | used | % |\n| nvme.device_available_spare_perc | spare | % |\n| nvme.device_composite_temperature | temperature | celsius |\n| nvme.device_io_transferred_count | read, written | bytes |\n| nvme.device_power_cycles_count | power | cycles |\n| nvme.device_power_on_time | power-on | seconds |\n| nvme.device_critical_warnings_state | available_spare, temp_threshold, nvm_subsystem_reliability, read_only, volatile_mem_backup_failed, persistent_memory_read_only | state |\n| nvme.device_unsafe_shutdowns_count | unsafe | shutdowns |\n| nvme.device_media_errors_rate | media | errors/s |\n| nvme.device_error_log_entries_rate | error_log | entries/s |\n| nvme.device_warning_composite_temperature_time | wctemp | seconds |\n| nvme.device_critical_composite_temperature_time | cctemp | seconds |\n| nvme.device_thermal_mgmt_temp1_transitions_rate | temp1 | transitions/s |\n| nvme.device_thermal_mgmt_temp2_transitions_rate | temp2 | transitions/s |\n| nvme.device_thermal_mgmt_temp1_time | temp1 | seconds |\n| nvme.device_thermal_mgmt_temp2_time | temp2 | seconds |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the NVME device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | NVMe device name |\n| model_number | NVMe device model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvme.device_estimated_endurance_perc | used | % |\n| nvme.device_available_spare_perc | spare | % |\n| nvme.device_composite_temperature | temperature | celsius |\n| nvme.device_io_transferred_count | read, written | bytes |\n| nvme.device_power_cycles_count | power | cycles |\n| nvme.device_power_on_time | power-on | seconds |\n| nvme.device_critical_warnings_state | available_spare, temp_threshold, nvm_subsystem_reliability, read_only, volatile_mem_backup_failed, persistent_memory_read_only | state |\n| nvme.device_unsafe_shutdowns_count | unsafe | shutdowns |\n| nvme.device_media_errors_rate | media | errors/s |\n| nvme.device_error_log_entries_rate | error_log | entries/s |\n| nvme.device_warning_composite_temperature_time | wctemp | seconds |\n| nvme.device_critical_composite_temperature_time | cctemp | seconds |\n| nvme.device_thermal_mgmt_temp1_transitions_rate | temp1 | transitions/s |\n| nvme.device_thermal_mgmt_temp2_transitions_rate | temp2 | transitions/s |\n| nvme.device_thermal_mgmt_temp1_time | temp1 | seconds |\n| nvme.device_thermal_mgmt_temp2_time | temp2 | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-nvme-NVMe_devices", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nvme/metadata.yaml", "related_resources": "" }, + { + "meta": { + "plugin_name": "go.d.plugin", + "module_name": "openldap", + "monitored_instance": { + "name": "OpenLDAP", + "link": "https://www.openldap.org/", + "categories": [ + "data-collection.authentication-and-authorization" + ], + "icon_filename": "openldap.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "openldap", + "RBAC", + "Directory access" + ], + "most_popular": false + }, + "overview": "# OpenLDAP\n\nPlugin: go.d.plugin\nModule: openldap\n\n## Overview\n\nThis collector monitors OpenLDAP metrics about connections, operations, referrals and more.\n\n\nIt gathers the metrics using the [go-ldap](https://github.com/go-ldap/ldap) module and the [Monitor backend](https://www.openldap.org/doc/admin24/monitoringslapd.html) of OpenLDAP.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector cannot auto-detect OpenLDAP instances, because credential configuration is required.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the openLDAP Monitor Backend.\n\nFollow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openldap.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openldap.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| timeout | Timeout for establishing a connection and communication (reading and writing) in seconds. | 2 | no |\n| url | LDAP server URL. | ldap://127.0.0.1:389 | yes |\n| username | The distinguished name (DN) of the user authorized to view the monitor database. | | yes |\n| password | The password associated with the user identified by the DN. | | yes |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: ldap://localhost:389\n username: cn=netdata,dc=example,dc=com \n password: secret\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: ldap://localhost:389\n username: cn=netdata,dc=example,dc=com \n password: secret\n\n - name: remote\n url: ldap://192.0.2.1:389\n username: cn=netdata,dc=example,dc=com \n password: secret\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `openldap` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openldap\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `openldap` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep openldap\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep openldap /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep openldap\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenLDAP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openldap.current_connections | active | connections |\n| openldap.connections | connections | connections/s |\n| openldap.traffic | sent | bytes/s |\n| openldap.entries | sent | entries/s |\n| openldap.referrals | sent | referrals/s |\n| openldap.operations | completed, initiated | operations/s |\n| openldap.operations_by_type | bind, search, unbind, add, delete, modify, compare | operations/s |\n| openldap.waiters | write, read | waiters/s |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-openldap-OpenLDAP", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/openldap/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-openvpn", @@ -5693,7 +5844,7 @@ export const integrations = [ "most_popular": false }, "overview": "# OpenVPN\n\nPlugin: go.d.plugin\nModule: openvpn\n\n## Overview\n\nThis collector monitors OpenVPN servers.\n\nIt uses OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) to collect metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d.conf).\n\nFrom the documentation for the OpenVPN Management Interface:\n> Currently, the OpenVPN daemon can at most support a single management client any one time.\n\nIt is disabled to not break other tools which use `Management Interface`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:7505 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n```\n{% /details %}\n##### With user metrics\n\nCollect metrics of all users.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n - name: remote\n address: 203.0.113.0:7505\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d.conf).\n\nFrom the documentation for the OpenVPN Management Interface:\n> Currently, the OpenVPN daemon can at most support a single management client any one time.\n\nIt is disabled to not break other tools which use `Management Interface`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:7505 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n```\n{% /details %}\n##### With user metrics\n\nCollect metrics of all users.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n - name: remote\n address: 203.0.113.0:7505\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `openvpn` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `openvpn` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep openvpn\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep openvpn /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep openvpn\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n", @@ -5730,7 +5881,7 @@ export const integrations = [ "most_popular": false }, "overview": "# OpenVPN status log\n\nPlugin: go.d.plugin\nModule: openvpn_status_log\n\n## Overview\n\nThis collector monitors OpenVPN server.\n\nIt parses server log files and provides summary and per user metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn_status_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn_status_log.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| log_path | Path to status log. | /var/log/openvpn/status.log | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### With user metrics\n\nCollect metrics of all users.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn_status_log.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn_status_log.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| log_path | Path to status log. | /var/log/openvpn/status.log | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### With user metrics\n\nCollect metrics of all users.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `openvpn_status_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn_status_log\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `openvpn_status_log` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep openvpn_status_log\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep openvpn_status_log /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep openvpn_status_log\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN status log instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n", @@ -5741,20 +5892,17 @@ export const integrations = [ }, { "meta": { - "id": "collector-go.d.plugin-pgbouncer", + "id": "collector-go.d.plugin-oracledb", "plugin_name": "go.d.plugin", - "module_name": "pgbouncer", + "module_name": "oracledb", "monitored_instance": { - "name": "PgBouncer", - "link": "https://www.pgbouncer.org/", - "icon_filename": "postgres.svg", + "name": "Oracle DB", + "link": "https://www.oracle.com/database/", "categories": [ "data-collection.database-servers" - ] + ], + "icon_filename": "oracle.svg" }, - "keywords": [ - "pgbouncer" - ], "related_resources": { "integrations": { "list": [] @@ -5763,26 +5911,67 @@ export const integrations = [ "info_provided_to_referring_integrations": { "description": "" }, + "keywords": [ + "database", + "oracle", + "sql" + ], "most_popular": false }, - "overview": "# PgBouncer\n\nPlugin: go.d.plugin\nModule: pgbouncer\n\n## Overview\n\nThis collector monitors PgBouncer servers.\n\nExecuted queries:\n\n- `SHOW VERSION;`\n- `SHOW CONFIG;`\n- `SHOW DATABASES;`\n- `SHOW STATS;`\n- `SHOW POOLS;`\n\nInformation about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with `stats_users` permissions to query your PgBouncer instance.\n\nTo create the `netdata` user:\n\n- Add `netdata` user to the `pgbouncer.ini` file:\n\n ```text\n stats_users = netdata\n ```\n\n- Add a password for the `netdata` user to the `userlist.txt` file:\n\n ```text\n \"netdata\" \"\"\n ```\n\n- To verify the credentials, run the following command\n\n ```bash\n psql -h localhost -U netdata -p 6432 pgbouncer -c \"SHOW VERSION;\" >/dev/null 2>&1 && echo OK || echo FAIL\n ```\n\n When it prompts for a password, enter the password you added to `userlist.txt`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pgbouncer.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pgbouncer.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:6432/pgbouncer | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n - name: remote\n dsn: 'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer'\n\n```\n{% /details %}\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pgbouncer` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pgbouncer\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pgbouncer` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pgbouncer\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pgbouncer /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pgbouncer\n```\n\n", + "overview": "# Oracle DB\n\nPlugin: go.d.plugin\nModule: oracledb\n\n## Overview\n\nThis collector monitors the health and performance of Oracle DB servers and collects general statistics, replication and user metrics.\n\n\nIt establishes a connection to the Oracle DB instance via a TCP or UNIX socket and extracts metrics from the following database tables:\n\n- `v$sysmetric`\n- `v$sysstat`\n- `v$waitclassmetric`\n- `v$system_wait_class`\n- `dba_data_files`\n- `dba_free_space`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect Oracle DB instances running on:\n\n- Localhost, listening on port 1521\n- Within Docker containers\n\n> **Note**: Oracle DB requires a username and password. While Netdata can automatically discover Oracle DB instances and create data collection jobs, these jobs will fail unless you provide the correct credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read only user for netdata\n\nFollow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach\n\nConnect to your Oracle database with an administrative user and execute:\n\n```bash\nCREATE USER netdata IDENTIFIED BY ;\n\nGRANT CONNECT TO netdata;\nGRANT SELECT_CATALOG_ROLE TO netdata;\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/oracledb.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/oracledb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Oracle server DSN (Data Source Name). Format is `oracle://username:password@host:port/service?param1=value1&...¶mN=valueN`. | | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: oracle://netdata:secret@127.0.0.1:1521/XE\n\n```\n{% /details %}\n##### TLS connection (TCPS)\n\nAn example configuration for TLS connection.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'oracle://netdata:secret@127.0.0.1:1521/XE?ssl=true&ssl verify=true'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: oracle://netdata:secret@127.0.0.1:1521/XE\n\n - name: remote\n dsn: oracle://netdata:secret@203.0.113.0:1521/XE\n\n```\n{% /details %}\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `oracledb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m oracledb\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `oracledb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep oracledb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep oracledb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep oracledb\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PgBouncer instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.client_connections_utilization | used | percentage |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| postgres_database | Postgres database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.db_client_connections | active, waiting, cancel_req | connections |\n| pgbouncer.db_server_connections | active, idle, used, tested, login | connections |\n| pgbouncer.db_server_connections_utilization | used | percentage |\n| pgbouncer.db_clients_wait_time | time | seconds |\n| pgbouncer.db_client_max_wait_time | time | seconds |\n| pgbouncer.db_transactions | transactions | transactions/s |\n| pgbouncer.db_transactions_time | time | seconds |\n| pgbouncer.db_transaction_avg_time | time | seconds |\n| pgbouncer.db_queries | queries | queries/s |\n| pgbouncer.db_queries_time | time | seconds |\n| pgbouncer.db_query_avg_time | time | seconds |\n| pgbouncer.db_network_io | received, sent | B/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Oracle DB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.sessions | session | sessions |\n| oracledb.average_active_sessions | active | sessions |\n| oracledb.sessions_utilization | session_limit | percent |\n| oracledb.current_logons | logons | logons |\n| oracledb.logons | logons | logons/s |\n| oracledb.database_wait_time_ratio | db_wait_time | percent |\n| oracledb.sql_service_response_time | sql_resp_time | seconds |\n| oracledb.enqueue_timeouts | enqueue | timeouts/s |\n| oracledb.disk_io | read, written | bytes/s |\n| oracledb.disk_iops | read, write | operations/s |\n| oracledb.sorts | memory, disk | sorts/s |\n| oracledb.table_scans | short_table, long_table | scans/s |\n| oracledb.cache_hit_ratio | buffer, cursor, library, row | percent |\n| oracledb.global_cache_blocks | corrupted, lost | blocks/s |\n| oracledb.activity | parse, execute, user_commits, user_rollbacks | events/s |\n\n### Per tablespace\n\nThese metrics refer to the Tablespace.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| tablespace | Tablespace name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.tablespace_utilization | utilization | percent |\n| oracledb.tablespace_usage | avail, used | bytes |\n\n### Per wait class\n\nThese metrics refer to the [Wait Class](https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/classes-of-wait-events.html).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| wait_class | [Wait Class name](https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/classes-of-wait-events.html). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.wait_class_wait_time | wait_time | milliseconds |\n\n", "integration_type": "collector", - "id": "go.d.plugin-pgbouncer-PgBouncer", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/pgbouncer/metadata.yaml", + "id": "go.d.plugin-oracledb-Oracle_DB", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/oracledb/metadata.yaml", "related_resources": "" }, { "meta": { - "id": "collector-go.d.plugin-phpdaemon", + "id": "collector-go.d.plugin-pgbouncer", "plugin_name": "go.d.plugin", - "module_name": "phpdaemon", + "module_name": "pgbouncer", "monitored_instance": { - "name": "phpDaemon", - "link": "https://github.com/kakserpom/phpdaemon", + "name": "PgBouncer", + "link": "https://www.pgbouncer.org/", + "icon_filename": "postgres.svg", + "categories": [ + "data-collection.database-servers" + ] + }, + "keywords": [ + "pgbouncer" + ], + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "most_popular": false + }, + "overview": "# PgBouncer\n\nPlugin: go.d.plugin\nModule: pgbouncer\n\n## Overview\n\nThis collector monitors PgBouncer servers.\n\nExecuted queries:\n\n- `SHOW VERSION;`\n- `SHOW CONFIG;`\n- `SHOW DATABASES;`\n- `SHOW STATS;`\n- `SHOW POOLS;`\n\nInformation about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with `stats_users` permissions to query your PgBouncer instance.\n\nTo create the `netdata` user:\n\n- Add `netdata` user to the `pgbouncer.ini` file:\n\n ```text\n stats_users = netdata\n ```\n\n- Add a password for the `netdata` user to the `userlist.txt` file:\n\n ```text\n \"netdata\" \"\"\n ```\n\n- To verify the credentials, run the following command\n\n ```bash\n psql -h localhost -U netdata -p 6432 pgbouncer -c \"SHOW VERSION;\" >/dev/null 2>&1 && echo OK || echo FAIL\n ```\n\n When it prompts for a password, enter the password you added to `userlist.txt`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pgbouncer.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pgbouncer.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:6432/pgbouncer | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n - name: remote\n dsn: 'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer'\n\n```\n{% /details %}\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pgbouncer` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pgbouncer\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pgbouncer` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pgbouncer\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pgbouncer /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pgbouncer\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PgBouncer instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.client_connections_utilization | used | percentage |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| postgres_database | Postgres database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.db_client_connections | active, waiting, cancel_req | connections |\n| pgbouncer.db_server_connections | active, idle, used, tested, login | connections |\n| pgbouncer.db_server_connections_utilization | used | percentage |\n| pgbouncer.db_clients_wait_time | time | seconds |\n| pgbouncer.db_client_max_wait_time | time | seconds |\n| pgbouncer.db_transactions | transactions | transactions/s |\n| pgbouncer.db_transactions_time | time | seconds |\n| pgbouncer.db_transaction_avg_time | time | seconds |\n| pgbouncer.db_queries | queries | queries/s |\n| pgbouncer.db_queries_time | time | seconds |\n| pgbouncer.db_query_avg_time | time | seconds |\n| pgbouncer.db_network_io | received, sent | B/s |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-pgbouncer-PgBouncer", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/pgbouncer/metadata.yaml", + "related_resources": "" + }, + { + "meta": { + "id": "collector-go.d.plugin-phpdaemon", + "plugin_name": "go.d.plugin", + "module_name": "phpdaemon", + "monitored_instance": { + "name": "phpDaemon", + "link": "https://github.com/kakserpom/phpdaemon", "icon_filename": "php.svg", "categories": [ "data-collection.apm" @@ -5803,7 +5992,7 @@ export const integrations = [ "most_popular": false }, "overview": "# phpDaemon\n\nPlugin: go.d.plugin\nModule: phpdaemon\n\n## Overview\n\nThis collector monitors phpDaemon instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable phpDaemon's HTTP server\n\nStatistics expected to be in JSON format.\n\n
\nphpDaemon configuration\n\nInstruction from [@METAJIJI](https://github.com/METAJIJI).\n\nTo enable `phpd` statistics on http, you must enable the http server and write an application.\nApplication is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`.\n\n```php\n// /opt/phpdaemon/conf/phpd.conf\n\npath /opt/phpdaemon/conf/AppResolver.php;\nPool:HTTPServer {\n privileged;\n listen '127.0.0.1';\n port 8509;\n}\n```\n\n```php\n// /opt/phpdaemon/conf/AppResolver.php\n\nattrs->server['DOCUMENT_URI'], $m)) {\n return $m[1];\n }\n }\n}\n\nreturn new MyAppResolver;\n```\n\n```php\n/opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php\n\nheader('Content-Type: application/javascript; charset=utf-8');\n\n $stat = Daemon::getStateOfWorkers();\n $stat['uptime'] = time() - Daemon::$startTime;\n echo json_encode($stat);\n }\n}\n```\n\n
\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpdaemon.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpdaemon.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8509/FullStatus | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n```\n{% /details %}\n##### HTTP authentication\n\nHTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nHTTPS with self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n - name: remote\n url: http://192.0.2.1:8509/FullStatus\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable phpDaemon's HTTP server\n\nStatistics expected to be in JSON format.\n\n
\nphpDaemon configuration\n\nInstruction from [@METAJIJI](https://github.com/METAJIJI).\n\nTo enable `phpd` statistics on http, you must enable the http server and write an application.\nApplication is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`.\n\n```php\n// /opt/phpdaemon/conf/phpd.conf\n\npath /opt/phpdaemon/conf/AppResolver.php;\nPool:HTTPServer {\n privileged;\n listen '127.0.0.1';\n port 8509;\n}\n```\n\n```php\n// /opt/phpdaemon/conf/AppResolver.php\n\nattrs->server['DOCUMENT_URI'], $m)) {\n return $m[1];\n }\n }\n}\n\nreturn new MyAppResolver;\n```\n\n```php\n/opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php\n\nheader('Content-Type: application/javascript; charset=utf-8');\n\n $stat = Daemon::getStateOfWorkers();\n $stat['uptime'] = time() - Daemon::$startTime;\n echo json_encode($stat);\n }\n}\n```\n\n
\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpdaemon.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpdaemon.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8509/FullStatus | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n```\n{% /details %}\n##### HTTP authentication\n\nHTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nHTTPS with self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n - name: remote\n url: http://192.0.2.1:8509/FullStatus\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `phpdaemon` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpdaemon\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `phpdaemon` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep phpdaemon\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep phpdaemon /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep phpdaemon\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per phpDaemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpdaemon.workers | alive, shutdown | workers |\n| phpdaemon.alive_workers | idle, busy, reloading | workers |\n| phpdaemon.idle_workers | preinit, init, initialized | workers |\n| phpdaemon.uptime | time | seconds |\n\n", @@ -5840,7 +6029,7 @@ export const integrations = [ "most_popular": false }, "overview": "# PHP-FPM\n\nPlugin: go.d.plugin\nModule: phpfpm\n\n## Overview\n\nThis collector monitors PHP-FPM instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status page\n\nUncomment the `pm.status_path = /status` variable in the `php-fpm` config file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpfpm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpfpm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status?full&json | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### HTTP\n\nCollecting data from a local instance over HTTP.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n```\n{% /details %}\n##### Unix socket\n\nCollecting data from a local instance over Unix socket.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n socket: '/tmp/php-fpm.sock'\n\n```\n{% /details %}\n##### TCP socket\n\nCollecting data from a local instance over TCP socket.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9000\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n - name: remote\n url: http://203.0.113.10/status?full&json\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status page\n\nUncomment the `pm.status_path = /status` variable in the `php-fpm` config file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpfpm.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpfpm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status?full&json | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### HTTP\n\nCollecting data from a local instance over HTTP.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n```\n{% /details %}\n##### Unix socket\n\nCollecting data from a local instance over Unix socket.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n socket: '/tmp/php-fpm.sock'\n\n```\n{% /details %}\n##### TCP socket\n\nCollecting data from a local instance over TCP socket.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9000\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n - name: remote\n url: http://203.0.113.10/status?full&json\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `phpfpm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpfpm\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `phpfpm` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep phpfpm\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep phpfpm /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep phpfpm\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PHP-FPM instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpfpm.connections | active, max_active, idle | connections |\n| phpfpm.requests | requests | requests/s |\n| phpfpm.performance | max_children_reached, slow_requests | status |\n| phpfpm.request_duration | min, max, avg | milliseconds |\n| phpfpm.request_cpu | min, max, avg | percentage |\n| phpfpm.request_mem | min, max, avg | KB |\n\n", @@ -5876,7 +6065,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Pi-hole\n\nPlugin: go.d.plugin\nModule: pihole\n\n## Overview\n\nThis collector monitors Pi-hole instances using [PHP API](https://github.com/pi-hole/AdminLTE).\n\nThe data provided by the API is for the last 24 hours. All collected values refer to this time period and not to the\nmodule's collection interval.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pihole.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pihole.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| setup_vars_path | Path to setupVars.conf. This file is used to get the web password. | /etc/pihole/setupVars.conf | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nRemote instance with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://203.0.113.11\n tls_skip_verify: yes\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://203.0.113.10\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pihole.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pihole.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| setup_vars_path | Path to setupVars.conf. This file is used to get the web password. | /etc/pihole/setupVars.conf | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nRemote instance with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://203.0.113.11\n tls_skip_verify: yes\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://203.0.113.10\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pihole` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pihole\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pihole` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pihole\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pihole /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pihole\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ pihole_blocklist_last_update ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.blocklist_last_update | gravity.list (blocklist) file last update time |\n| [ pihole_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.unwanted_domains_blocking_status | unwanted domains blocking is disabled |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pi-hole instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pihole.dns_queries_total | queries | queries |\n| pihole.dns_queries | cached, blocked, forwarded | queries |\n| pihole.dns_queries_percentage | cached, blocked, forwarded | percentage |\n| pihole.unique_clients | unique | clients |\n| pihole.domains_on_blocklist | blocklist | domains |\n| pihole.blocklist_last_update | ago | seconds |\n| pihole.unwanted_domains_blocking_status | enabled, disabled | status |\n| pihole.dns_queries_types | a, aaaa, any, ptr, soa, srv, txt | percentage |\n| pihole.dns_queries_forwarded_destination | cached, blocked, other | percentage |\n\n", @@ -5913,7 +6102,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Pika\n\nPlugin: go.d.plugin\nModule: pika\n\n## Overview\n\nThis collector monitors Pika servers.\n\nIt collects information and statistics about the server executing the following commands:\n\n- [`INFO ALL`](https://github.com/OpenAtomFoundation/pika/wiki/pika-info%E4%BF%A1%E6%81%AF%E8%AF%B4%E6%98%8E)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pika.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pika.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Pika server address. | redis://@localhost:9221 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://@localhost:9221'\n\n```\n{% /details %}\n##### TCP socket with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:9221'\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pika.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pika.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Pika server address. | redis://@localhost:9221 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://@localhost:9221'\n\n```\n{% /details %}\n##### TCP socket with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:9221'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pika` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pika\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pika` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pika\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pika /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pika\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pika instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pika.connections | accepted | connections |\n| pika.clients | connected | clients |\n| pika.memory | used | bytes |\n| pika.connected_replicas | connected | replicas |\n| pika.commands | processed | commands/s |\n| pika.commands_calls | a dimension per command | calls/s |\n| pika.database_strings_keys | a dimension per database | keys |\n| pika.database_strings_expires_keys | a dimension per database | keys |\n| pika.database_strings_invalid_keys | a dimension per database | keys |\n| pika.database_hashes_keys | a dimension per database | keys |\n| pika.database_hashes_expires_keys | a dimension per database | keys |\n| pika.database_hashes_invalid_keys | a dimension per database | keys |\n| pika.database_lists_keys | a dimension per database | keys |\n| pika.database_lists_expires_keys | a dimension per database | keys |\n| pika.database_lists_invalid_keys | a dimension per database | keys |\n| pika.database_zsets_keys | a dimension per database | keys |\n| pika.database_zsets_expires_keys | a dimension per database | keys |\n| pika.database_zsets_invalid_keys | a dimension per database | keys |\n| pika.database_sets_keys | a dimension per database | keys |\n| pika.database_sets_expires_keys | a dimension per database | keys |\n| pika.database_sets_invalid_keys | a dimension per database | keys |\n| pika.uptime | uptime | seconds |\n\n", @@ -5948,8 +6137,8 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# Ping\n\nPlugin: go.d.plugin\nModule: ping\n\n## Overview\n\nThis module measures round-trip time and packet loss by sending ping messages to network hosts.\n\nThere are two operational modes:\n\n- privileged (send raw ICMP ping, default). Requires\n CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges:\n > **Note**: set automatically during Netdata installation.\n\n ```bash\n sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin\n ```\n\n- unprivileged (send UDP ping, Linux only).\n Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html):\n\n ```bash\n sudo sysctl -w net.ipv4.ping_group_range=\"0 2147483647\"\n ```\n To persist the change add `net.ipv4.ping_group_range=0 2147483647` to `/etc/sysctl.conf` and\n execute `sudo sysctl -p`.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ping.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hosts | Network hosts. | | yes |\n| network | Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6). | ip | no |\n| privileged | Ping packets type. \"no\" means send an \"unprivileged\" UDP ping, \"yes\" - raw ICMP ping. | yes | no |\n| packets | Number of ping packets to send. | 5 | no |\n| interval | Timeout between sending ping packets. | 100ms | no |\n\n{% /details %}\n#### Examples\n\n##### IPv4 hosts\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: example\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n{% /details %}\n##### Unprivileged mode\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: example\n privileged: no\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: example1\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n - name: example2\n packets: 10\n hosts:\n - 192.0.2.3\n - 192.0.2.4\n\n```\n{% /details %}\n", + "overview": "# Ping\n\nPlugin: go.d.plugin\nModule: ping\n\n## Overview\n\nThis module measures round-trip time and packet loss by sending ping messages to network hosts.\n\nThere are two operational modes:\n\n- **Privileged** (send raw ICMP ping, default). Requires the necessary permissions ([CAP_NET_RAW](https://man7.org/linux/man-pages/man7/capabilities.7.html) on Linux, `setuid` bit on other systems).\n\n These permissions are **automatically** set during Netdata installation. However, if you need to set them manually:\n - set `CAP_NET_RAW` (Linux only).\n ```bash\n sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin\n ```\n - set `setuid` bit (Other OS).\n ```bash\n sudo chmod 4750 /usr/libexec/netdata/plugins.d/go.d.plugin\n ```\n\n- **Unprivileged** (send UDP ping, Linux only). Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html):\n\n This configuration is **not set automatically** and requires manual configuration.\n\n ```bash\n sudo sysctl -w net.ipv4.ping_group_range=\"0 2147483647\"\n ```\n\n To persist the change add `net.ipv4.ping_group_range=0 2147483647` to `/etc/sysctl.conf` and execute `sudo sysctl -p`.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ping.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ping.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hosts | Network hosts. | | yes |\n| network | Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6). | ip | no |\n| privileged | Ping packets type. \"no\" means send an \"unprivileged\" UDP ping, \"yes\" - raw ICMP ping. | yes | no |\n| packets | Number of ping packets to send. | 5 | no |\n| interval | Timeout between sending ping packets. | 100ms | no |\n\n{% /details %}\n#### Examples\n\n##### IPv4 hosts\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: example\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n{% /details %}\n##### Unprivileged mode\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: example\n privileged: no\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: example1\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n - name: example2\n packets: 10\n hosts:\n - 192.0.2.3\n - 192.0.2.4\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ping` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ping\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ping` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ping\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ping /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ping\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ping_host_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | network host ${lab1el:host} reachability status |\n| [ ping_packet_loss ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | packet loss percentage to the network host ${label:host} over the last 10 minutes |\n| [ ping_host_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_rtt | average latency to the network host ${label:host} over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per host\n\nThese metrics refer to the remote host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | remote host |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ping.host_rtt | min, max, avg | milliseconds |\n| ping.host_std_dev_rtt | std_dev | milliseconds |\n| ping.host_packet_loss | loss | percentage |\n| ping.host_packets | received, sent | packets |\n\n", @@ -5964,7 +6153,7 @@ export const integrations = [ "plugin_name": "go.d.plugin", "module_name": "portcheck", "monitored_instance": { - "name": "TCP Endpoints", + "name": "TCP/UDP Endpoints", "link": "", "icon_filename": "globe.svg", "categories": [ @@ -5982,13 +6171,13 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# TCP Endpoints\n\nPlugin: go.d.plugin\nModule: portcheck\n\n## Overview\n\nThis collector monitors TCP services availability and response time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/portcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/portcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes |\n| ports | Remote host ports. Must be specified in numeric format. | | yes |\n| timeout | HTTP request timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Check SSH and telnet\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n```\n{% /details %}\n##### Check webserver with IPv6 address\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: server2\n host: \"[2001:DB8::1]\"\n ports:\n - 80\n - 8080\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n - name: server2\n host: 203.0.113.10\n ports:\n - 22\n - 23\n\n```\n{% /details %}\n", + "overview": "# TCP/UDP Endpoints\n\nPlugin: go.d.plugin\nModule: portcheck\n\n## Overview\n\nCollector for monitoring service availability and response time. It can be used to check if specific ports are open or reachable on a target system.\n\nIt supports both TCP and UDP protocols over IPv4 and IPv6 networks.\n\n| Protocol | Check Description |\n|----------|-----------------------------------------------------------------------------------------------------------------------------|\n| TCP | Attempts to establish a TCP connection to the specified ports on the target system. |\n| UDP | Sends a 0-byte UDP packet to the specified ports on the target system and analyzes ICMP responses to determine port status. |\n\nPossible TCP statuses:\n\n| TCP Status | Description |\n|------------|-------------------------------------------------------------|\n| success | Connection established successfully. |\n| timeout | Connection timed out after waiting for configured duration. |\n| failed | An error occurred during the connection attempt. |\n\nPossible UDP statuses:\n\n| UDP Status | Description |\n|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| open/filtered | No response received within the configured timeout. This status indicates the port is either open or filtered, but the exact state cannot be determined definitively. |\n| closed | Received an ICMP Destination Unreachable message, indicating the port is closed. |\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/portcheck.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/portcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes |\n| ports | Target TCP ports. Must be specified in numeric format. | | no |\n| udp_ports | Target UDP ports. Must be specified in numeric format. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Check TCP ports (IPv4)\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n```\n{% /details %}\n##### Check TCP ports (IPv6)\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n host: \"[2001:DB8::1]\"\n ports:\n - 80\n - 8080\n\n```\n{% /details %}\n##### Check UDP ports (IPv4)\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n host: 127.0.0.1\n udp_ports:\n - 3120\n - 3121\n\n```\n{% /details %}\n##### Check UDP ports (IPv6)\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n host: [::1]\n udp_ports:\n - 3120\n - 3121\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n - name: server2\n host: 203.0.113.10\n ports:\n - 22\n - 23\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `portcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m portcheck\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `portcheck` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep portcheck\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep portcheck /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep portcheck\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ portcheck_service_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | TCP host ${label:host} port ${label:port} liveness status |\n| [ portcheck_connection_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n| [ portcheck_connection_fails ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per tcp endpoint\n\nThese metrics refer to the TCP endpoint.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | host |\n| port | port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.status | success, failed, timeout | boolean |\n| portcheck.state_duration | time | seconds |\n| portcheck.latency | time | ms |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per TCP endpoint\n\nThese metrics refer to the TCP endpoint.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | The hostname or IP address of the target system, as specified in the configuration. |\n| port | The TCP port being monitored, as defined in the 'ports' configuration parameter. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.status | success, failed, timeout | boolean |\n| portcheck.state_duration | time | seconds |\n| portcheck.latency | time | ms |\n\n### Per UDP endpoint\n\nThese metrics refer to the UDP endpoint.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | The hostname or IP address of the target system, as specified in the configuration. |\n| port | The UDP port being monitored, as defined in the 'udp_ports' configuration parameter. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.udp_port_status | open/filtered, closed | status |\n| portcheck.udp_port_status_duration | time | seconds |\n\n", "integration_type": "collector", - "id": "go.d.plugin-portcheck-TCP_Endpoints", + "id": "go.d.plugin-portcheck-TCP/UDP_Endpoints", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/portcheck/metadata.yaml", "related_resources": "" }, @@ -6020,7 +6209,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Postfix\n\nPlugin: go.d.plugin\nModule: postfix\n\n## Overview\n\nThis collector retrieves statistics about the Postfix mail queue using the [postqueue](https://www.postfix.org/postqueue.1.html) command-line tool.\n\n\nIt periodically executes the `postqueue -p` command. The collection interval is set to 10 seconds by default, but this can be configurable.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nPostfix has internal access controls for the mail queue. By default, all users can view the queue. If your system has stricter controls, grant the `netdata` user access by adding it to `authorized_mailq_users` in the `/etc/postfix/main.cf `file. For more details, refer to the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html).\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector executes `postqueue -p` to get Postfix queue statistics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postfix.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postfix.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `postqueue` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/sbin/postqueue | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary=\"\" %}\n```yaml\njobs:\n - name: custom_path\n binary_path: /usr/local/sbin/postqueue\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postfix.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postfix.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `postqueue` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/sbin/postqueue | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary=\"\" %}\n```yaml\njobs:\n - name: custom_path\n binary_path: /usr/local/sbin/postqueue\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `postfix` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m postfix\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `postfix` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep postfix\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep postfix /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep postfix\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Postfix instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postfix.qemails | emails | emails |\n| postfix.qsize | size | KiB |\n\n", @@ -6070,7 +6259,7 @@ export const integrations = [ "most_popular": true }, "overview": "# PostgreSQL\n\nPlugin: go.d.plugin\nModule: postgres\n\n## Overview\n\nThis collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more.\n\n\nIt establishes a connection to the Postgres instance via a TCP or UNIX socket.\nTo collect metrics for database tables and indexes, it establishes an additional connection for each discovered database.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets:\n\n- 127.0.0.1:5432\n- /var/run/postgresql/\n\n\n#### Limits\n\nTable and index metrics are not collected for databases with more than 50 tables or 250 indexes.\nThese limits can be changed in the configuration file.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with granted `pg_monitor`\nor `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).\n\nTo create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:\n\n```postgresql\nCREATE USER netdata;\nGRANT pg_monitor TO netdata;\n```\n\nAfter creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or\nthe [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your\nsystem.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postgres.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postgres.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#simple-patterns-matcher). | | no |\n| max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no |\n| max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n - name: remote\n dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with granted `pg_monitor`\nor `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).\n\nTo create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:\n\n```postgresql\nCREATE USER netdata;\nGRANT pg_monitor TO netdata;\n```\n\nAfter creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or\nthe [appropriate method](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/start-stop-restart.md) for your\nsystem.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postgres.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postgres.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#simple-patterns-matcher). | | no |\n| max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no |\n| max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n - name: remote\n dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `postgres` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m postgres\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `postgres` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep postgres\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep postgres /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep postgres\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ postgres_total_connection_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.connections_utilization | average total connection utilization over the last minute |\n| [ postgres_acquired_locks_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.locks_utilization | average acquired locks utilization over the last minute |\n| [ postgres_txid_exhaustion_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.txid_exhaustion_perc | percent towards TXID wraparound |\n| [ postgres_db_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average cache hit ratio in db ${label:database} over the last minute |\n| [ postgres_db_transactions_rollback_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average aborted transactions percentage in db ${label:database} over the last five minutes |\n| [ postgres_db_deadlocks_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_deadlocks_rate | number of deadlocks detected in db ${label:database} in the last minute |\n| [ postgres_table_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_cache_io_ratio | average cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_index_cache_io_ratio | average index cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_cache_io_ratio | average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_index_cache_io_ratio | average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} |\n| [ postgres_table_last_autovacuum_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autovacuum_since_time | time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon |\n| [ postgres_table_last_autoanalyze_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autoanalyze_since_time | time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon |\n| [ postgres_index_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.index_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} index ${label:index} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PostgreSQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.connections_utilization | used | percentage |\n| postgres.connections_usage | available, used | connections |\n| postgres.connections_state_count | active, idle, idle_in_transaction, idle_in_transaction_aborted, disabled | connections |\n| postgres.transactions_duration | a dimension per bucket | transactions/s |\n| postgres.queries_duration | a dimension per bucket | queries/s |\n| postgres.locks_utilization | used | percentage |\n| postgres.checkpoints_rate | scheduled, requested | checkpoints/s |\n| postgres.checkpoints_time | write, sync | milliseconds |\n| postgres.bgwriter_halts_rate | maxwritten | events/s |\n| postgres.buffers_io_rate | checkpoint, backend, bgwriter | B/s |\n| postgres.buffers_backend_fsync_rate | fsync | calls/s |\n| postgres.buffers_allocated_rate | allocated | B/s |\n| postgres.wal_io_rate | write | B/s |\n| postgres.wal_files_count | written, recycled | files |\n| postgres.wal_archiving_files_count | ready, done | files/s |\n| postgres.autovacuum_workers_count | analyze, vacuum_analyze, vacuum, vacuum_freeze, brin_summarize | workers |\n| postgres.txid_exhaustion_towards_autovacuum_perc | emergency_autovacuum | percentage |\n| postgres.txid_exhaustion_perc | txid_exhaustion | percentage |\n| postgres.txid_exhaustion_oldest_txid_num | xid | xid |\n| postgres.catalog_relations_count | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | relations |\n| postgres.catalog_relations_size | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | B |\n| postgres.uptime | uptime | seconds |\n| postgres.databases_count | databases | databases |\n\n### Per repl application\n\nThese metrics refer to the replication application.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| application | application name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_app_wal_lag_size | sent_lag, write_lag, flush_lag, replay_lag | B |\n| postgres.replication_app_wal_lag_time | write_lag, flush_lag, replay_lag | seconds |\n\n### Per repl slot\n\nThese metrics refer to the replication slot.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | replication slot name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_slot_files_count | wal_keep, pg_replslot_files | files |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.db_transactions_ratio | committed, rollback | percentage |\n| postgres.db_transactions_rate | committed, rollback | transactions/s |\n| postgres.db_connections_utilization | used | percentage |\n| postgres.db_connections_count | connections | connections |\n| postgres.db_cache_io_ratio | miss | percentage |\n| postgres.db_io_rate | memory, disk | B/s |\n| postgres.db_ops_fetched_rows_ratio | fetched | percentage |\n| postgres.db_ops_read_rows_rate | returned, fetched | rows/s |\n| postgres.db_ops_write_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.db_conflicts_rate | conflicts | queries/s |\n| postgres.db_conflicts_reason_rate | tablespace, lock, snapshot, bufferpin, deadlock | queries/s |\n| postgres.db_deadlocks_rate | deadlocks | deadlocks/s |\n| postgres.db_locks_held_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_locks_awaited_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_temp_files_created_rate | created | files/s |\n| postgres.db_temp_files_io_rate | written | B/s |\n| postgres.db_size | size | B |\n\n### Per table\n\nThese metrics refer to the database table.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.table_rows_dead_ratio | dead | percentage |\n| postgres.table_rows_count | live, dead | rows |\n| postgres.table_ops_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.table_ops_rows_hot_ratio | hot | percentage |\n| postgres.table_ops_rows_hot_rate | hot | rows/s |\n| postgres.table_cache_io_ratio | miss | percentage |\n| postgres.table_io_rate | memory, disk | B/s |\n| postgres.table_index_cache_io_ratio | miss | percentage |\n| postgres.table_index_io_rate | memory, disk | B/s |\n| postgres.table_toast_cache_io_ratio | miss | percentage |\n| postgres.table_toast_io_rate | memory, disk | B/s |\n| postgres.table_toast_index_cache_io_ratio | miss | percentage |\n| postgres.table_toast_index_io_rate | memory, disk | B/s |\n| postgres.table_scans_rate | index, sequential | scans/s |\n| postgres.table_scans_rows_rate | index, sequential | rows/s |\n| postgres.table_autovacuum_since_time | time | seconds |\n| postgres.table_vacuum_since_time | time | seconds |\n| postgres.table_autoanalyze_since_time | time | seconds |\n| postgres.table_analyze_since_time | time | seconds |\n| postgres.table_null_columns | null | columns |\n| postgres.table_size | size | B |\n| postgres.table_bloat_size_perc | bloat | percentage |\n| postgres.table_bloat_size | bloat | B |\n\n### Per index\n\nThese metrics refer to the table index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n| index | index name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.index_size | size | B |\n| postgres.index_bloat_size_perc | bloat | percentage |\n| postgres.index_bloat_size | bloat | B |\n| postgres.index_usage_status | used, unused | status |\n\n", @@ -6107,7 +6296,7 @@ export const integrations = [ "most_popular": false }, "overview": "# PowerDNS Authoritative Server\n\nPlugin: go.d.plugin\nModule: powerdns\n\n## Overview\n\nThis collector monitors PowerDNS Authoritative Server instances.\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/authoritative/http-api/statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `powerdns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `powerdns` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep powerdns\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep powerdns /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep powerdns\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Authoritative Server instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns.questions_in | udp, tcp | questions/s |\n| powerdns.questions_out | udp, tcp | questions/s |\n| powerdns.cache_usage | query-cache-hit, query-cache-miss, packetcache-hit, packetcache-miss | events/s |\n| powerdns.cache_size | query-cache, packet-cache, key-cache, meta-cache | entries |\n| powerdns.latency | latency | microseconds |\n\n", @@ -6144,7 +6333,7 @@ export const integrations = [ "most_popular": false }, "overview": "# PowerDNS Recursor\n\nPlugin: go.d.plugin\nModule: powerdns_recursor\n\n## Overview\n\nThis collector monitors PowerDNS Recursor instances.\n\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/recursor/http-api/index.html#built-in-webserver-and-http-api).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/recursor/common/api/endpoint-statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns_recursor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns_recursor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns_recursor.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns_recursor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `powerdns_recursor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns_recursor\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `powerdns_recursor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep powerdns_recursor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep powerdns_recursor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep powerdns_recursor\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Recursor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns_recursor.questions_in | total, tcp, ipv6 | questions/s |\n| powerdns_recursor.questions_out | udp, tcp, ipv6, throttled | questions/s |\n| powerdns_recursor.answer_time | 0-1ms, 1-10ms, 10-100ms, 100-1000ms, slow | queries/s |\n| powerdns_recursor.timeouts | total, ipv4, ipv6 | timeouts/s |\n| powerdns_recursor.drops | over-capacity-drops, query-pipe-full-drops, too-old-drops, truncated-drops, empty-queries | drops/s |\n| powerdns_recursor.cache_usage | cache-hits, cache-misses, packet-cache-hits, packet-cache-misses | events/s |\n| powerdns_recursor.cache_size | cache, packet-cache, negative-cache | entries |\n\n", @@ -6179,7 +6368,7 @@ export const integrations = [ "community": true }, "overview": "# 4D Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor 4D Server performance metrics for efficient application management and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6214,7 +6403,7 @@ export const integrations = [ "community": true }, "overview": "# 8430FT modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of vital metrics from the MTS 8430FT modem for streamlined network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6253,7 +6442,7 @@ export const integrations = [ "community": true }, "overview": "# A10 ACOS network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor A10 Networks device metrics for comprehensive management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6288,7 +6477,7 @@ export const integrations = [ "community": true }, "overview": "# AMD CPU & GPU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AMD System Management Interface performance for optimized hardware management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6323,7 +6512,7 @@ export const integrations = [ "community": true }, "overview": "# APIcast\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor APIcast performance metrics to optimize API gateway operations and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [APIcast](https://github.com/3scale/apicast).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6358,7 +6547,7 @@ export const integrations = [ "community": true }, "overview": "# ARM HWCPipe\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of ARM running Android devices and get metrics for efficient performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6397,7 +6586,7 @@ export const integrations = [ "community": true }, "overview": "# AWS EC2 Compute instances\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS EC2 instances key metrics for optimized performance and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6436,7 +6625,7 @@ export const integrations = [ "community": true }, "overview": "# AWS EC2 Spot Instance\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS EC2 Spot instances'' performance metrics for efficient resource allocation and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6475,7 +6664,7 @@ export const integrations = [ "community": true }, "overview": "# AWS ECS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on AWS ECS services and resources for optimized container management and orchestration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS ECS exporter](https://github.com/bevers222/ecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6514,7 +6703,7 @@ export const integrations = [ "community": true }, "overview": "# AWS Health events\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS service health metrics for proactive incident management and resolution.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6553,7 +6742,7 @@ export const integrations = [ "community": true }, "overview": "# AWS Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS service quotas for effective resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6592,7 +6781,7 @@ export const integrations = [ "community": true }, "overview": "# AWS RDS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Amazon RDS (Relational Database Service) metrics for efficient cloud database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [rds_exporter](https://github.com/percona/rds_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6631,7 +6820,7 @@ export const integrations = [ "community": true }, "overview": "# AWS S3 buckets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS S3 storage metrics for optimized performance, data management, and cost efficiency.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6670,7 +6859,7 @@ export const integrations = [ "community": true }, "overview": "# AWS SQS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS SQS messaging metrics for efficient message processing and queue management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6709,7 +6898,7 @@ export const integrations = [ "community": true }, "overview": "# AWS instance health\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor the health of AWS instances for improved performance and availability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6744,7 +6933,7 @@ export const integrations = [ "community": true }, "overview": "# Airthings Waveplus air sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Waveplus radon sensor metrics for efficient indoor air quality monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6779,7 +6968,7 @@ export const integrations = [ "community": true }, "overview": "# Akamai Edge DNS Traffic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze Akamai Edge DNS traffic for enhanced performance and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6814,7 +7003,7 @@ export const integrations = [ "community": true }, "overview": "# Akamai Global Traffic Management\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor vital metrics of Akamai Global Traffic Management (GTM) for optimized load balancing and failover.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6853,7 +7042,7 @@ export const integrations = [ "community": true }, "overview": "# Akami Cloudmonitor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Akamai cloudmonitor provider metrics for comprehensive cloud performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6888,7 +7077,7 @@ export const integrations = [ "community": true }, "overview": "# Alamos FE2 server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Alamos FE2 systems for improved performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6927,7 +7116,7 @@ export const integrations = [ "community": true }, "overview": "# Alibaba Cloud\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Alibaba Cloud services and resources for efficient management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6962,7 +7151,7 @@ export const integrations = [ "community": true }, "overview": "# Altaro Backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Altaro Backup performance metrics to ensure smooth data protection and recovery operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6997,7 +7186,7 @@ export const integrations = [ "community": true }, "overview": "# Andrews & Arnold line status\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Andrews & Arnold Ltd (AAISP) metrics for improved network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7032,7 +7221,7 @@ export const integrations = [ "community": true }, "overview": "# Apache Airflow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Airflow metrics to optimize task scheduling and workflow management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Airflow exporter](https://github.com/shalb/airflow-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7071,7 +7260,7 @@ export const integrations = [ "community": true }, "overview": "# Apache Flink\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Apache Flink metrics for efficient stream processing and application management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7106,7 +7295,7 @@ export const integrations = [ "community": true }, "overview": "# Apple Time Machine\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Apple Time Machine backup metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7145,7 +7334,7 @@ export const integrations = [ "community": true }, "overview": "# Aruba devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Aruba Networks devices performance metrics for comprehensive network management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Aruba Exporter](https://github.com/slashdoom/aruba_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7184,7 +7373,7 @@ export const integrations = [ "community": true }, "overview": "# ArvanCloud CDN\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze ArvanCloud CDN and cloud services performance metrics for optimized delivery and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7219,7 +7408,7 @@ export const integrations = [ "community": true }, "overview": "# Audisto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Audisto SEO and website metrics for improved search performance and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7254,7 +7443,7 @@ export const integrations = [ "community": true }, "overview": "# AuthLog\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor authentication logs for security insights and efficient access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AuthLog Exporter](https://github.com/woblerr/authlog_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7293,7 +7482,7 @@ export const integrations = [ "community": true }, "overview": "# Azure AD App passwords\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nSafeguard and track Azure App secrets for enhanced security and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7332,7 +7521,7 @@ export const integrations = [ "community": true }, "overview": "# Azure Elastic Pool SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Elastic SQL performance metrics for efficient database management and query optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7371,7 +7560,7 @@ export const integrations = [ "community": true }, "overview": "# Azure Resources\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Azure resources vital metrics for efficient cloud management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7410,7 +7599,7 @@ export const integrations = [ "community": true }, "overview": "# Azure SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure SQL performance metrics for efficient database management and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7449,7 +7638,7 @@ export const integrations = [ "community": true }, "overview": "# Azure Service Bus\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Service Bus messaging metrics for optimized communication and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7488,7 +7677,7 @@ export const integrations = [ "community": true }, "overview": "# Azure application\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure Monitor metrics for comprehensive resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7523,7 +7712,7 @@ export const integrations = [ "community": true }, "overview": "# BOSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on BOSH deployment metrics for improved cloud orchestration and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7558,7 +7747,7 @@ export const integrations = [ "community": true }, "overview": "# BigQuery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google BigQuery metrics for optimized data processing and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7593,7 +7782,7 @@ export const integrations = [ "community": true }, "overview": "# Bird Routing Daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Bird Routing Daemon metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7630,7 +7819,7 @@ export const integrations = [ "community": true }, "overview": "# Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack external service availability and response times with Blackbox monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Blackbox exporter](https://github.com/prometheus/blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7665,7 +7854,7 @@ export const integrations = [ "community": true }, "overview": "# Bobcat Miner 300\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Bobcat equipment metrics for optimized performance and maintenance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7700,7 +7889,7 @@ export const integrations = [ "community": true }, "overview": "# Borg backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Borg backup performance metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Borg backup exporter](https://github.com/k0ral/borg-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7735,7 +7924,7 @@ export const integrations = [ "community": true }, "overview": "# BungeeCord\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack BungeeCord proxy server metrics for efficient load balancing and performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7770,7 +7959,7 @@ export const integrations = [ "community": true }, "overview": "# CVMFS clients\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CernVM File System metrics for optimized distributed file system performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7805,7 +7994,7 @@ export const integrations = [ "community": true }, "overview": "# Celery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Celery task queue metrics for optimized task processing and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7840,7 +8029,7 @@ export const integrations = [ "community": true }, "overview": "# Certificate Transparency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack certificate transparency log metrics for enhanced\nSSL/TLS certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ct-exporter](https://github.com/Hsn723/ct-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7875,7 +8064,7 @@ export const integrations = [ "community": true }, "overview": "# Checkpoint device\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Check Point firewall and security metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7910,7 +8099,7 @@ export const integrations = [ "community": true }, "overview": "# Chia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Chia blockchain metrics for optimized farming and resource allocation.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Chia Exporter](https://github.com/chia-network/chia-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7945,7 +8134,7 @@ export const integrations = [ "community": true }, "overview": "# Christ Elektronik CLM5IP power panel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Christ Elektronik CLM5IP device metrics for efficient performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7980,7 +8169,7 @@ export const integrations = [ "community": true }, "overview": "# Cilium Agent\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Cilium Agent metrics for optimized network security and connectivity.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Agent](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8015,7 +8204,7 @@ export const integrations = [ "community": true }, "overview": "# Cilium Operator\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cilium Operator metrics for efficient Kubernetes network security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Operator](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8050,7 +8239,7 @@ export const integrations = [ "community": true }, "overview": "# Cilium Proxy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cilium Proxy metrics for enhanced network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Proxy](https://github.com/cilium/proxy).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8089,7 +8278,7 @@ export const integrations = [ "community": true }, "overview": "# Cisco ACI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cisco ACI infrastructure metrics for optimized network performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8128,7 +8317,7 @@ export const integrations = [ "community": true }, "overview": "# Citrix NetScaler\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetScaler performance metrics for efficient application delivery and load balancing.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8163,7 +8352,7 @@ export const integrations = [ "community": true }, "overview": "# ClamAV daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack ClamAV antivirus metrics for enhanced threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8198,7 +8387,7 @@ export const integrations = [ "community": true }, "overview": "# Clamscan results\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ClamAV scanning performance metrics for efficient malware detection and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8233,7 +8422,7 @@ export const integrations = [ "community": true }, "overview": "# Clash\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Clash proxy server metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Clash exporter](https://github.com/elonzh/clash_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8272,7 +8461,7 @@ export const integrations = [ "community": true }, "overview": "# CloudWatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS CloudWatch metrics for comprehensive AWS resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8311,7 +8500,7 @@ export const integrations = [ "community": true }, "overview": "# Cloud Foundry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cloud Foundry platform metrics for optimized application deployment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8350,7 +8539,7 @@ export const integrations = [ "community": true }, "overview": "# Cloud Foundry Firehose\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cloud Foundry Firehose metrics for comprehensive platform diagnostics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8389,7 +8578,7 @@ export const integrations = [ "community": true }, "overview": "# Cloudflare PCAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cloudflare CDN and security metrics for optimized content delivery and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8424,7 +8613,7 @@ export const integrations = [ "community": true }, "overview": "# ClusterControl CMON\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CMON metrics for Severalnines Cluster Control for efficient monitoring and management of database operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CMON Exporter](https://github.com/severalnines/cmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8459,7 +8648,7 @@ export const integrations = [ "community": true }, "overview": "# Collectd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system and application metrics with Collectd for comprehensive performance analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Collectd exporter](https://github.com/prometheus/collectd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8494,7 +8683,7 @@ export const integrations = [ "community": true }, "overview": "# Concourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Concourse CI/CD pipeline metrics for optimized workflow management and deployment.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Concourse built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8529,7 +8718,7 @@ export const integrations = [ "community": true }, "overview": "# CraftBeerPi\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on CraftBeerPi homebrewing metrics for optimized brewing process management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8564,7 +8753,7 @@ export const integrations = [ "community": true }, "overview": "# Crowdsec\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Crowdsec security metrics for efficient threat detection and response.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Crowdsec build-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8599,7 +8788,7 @@ export const integrations = [ "community": true }, "overview": "# Crypto exchanges\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack cryptocurrency market metrics for informed investment and trading decisions.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Crypto exporter](https://github.com/ix-ai/crypto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8634,7 +8823,7 @@ export const integrations = [ "community": true }, "overview": "# Cryptowatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cryptowatch market data metrics for comprehensive cryptocurrency market analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8669,7 +8858,7 @@ export const integrations = [ "community": true }, "overview": "# Custom Exporter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nCreate and monitor custom metrics tailored to your specific use case and requirements.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8704,7 +8893,7 @@ export const integrations = [ "community": true }, "overview": "# DDWRT Routers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on DD-WRT router metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8743,7 +8932,7 @@ export const integrations = [ "community": true }, "overview": "# DMARC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DMARC email authentication metrics for improved email security and deliverability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8778,7 +8967,7 @@ export const integrations = [ "community": true }, "overview": "# DNSBL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor DNSBL metrics for efficient domain reputation and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8813,7 +9002,7 @@ export const integrations = [ "community": true }, "overview": "# Dell EMC ECS cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC ECS object storage metrics for optimized storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8848,7 +9037,7 @@ export const integrations = [ "community": true }, "overview": "# Dell EMC Isilon cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Dell EMC Isilon scale-out NAS metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8883,7 +9072,7 @@ export const integrations = [ "community": true }, "overview": "# Dell EMC XtremIO cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Dell/EMC XtremIO storage metrics for optimized data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8918,7 +9107,7 @@ export const integrations = [ "community": true }, "overview": "# Dell PowerMax\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC PowerMax storage array metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8953,7 +9142,7 @@ export const integrations = [ "community": true }, "overview": "# Dependency-Track\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dependency-Track metrics for efficient vulnerability management and software supply chain analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8988,7 +9177,7 @@ export const integrations = [ "community": true }, "overview": "# DigitalOcean\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DigitalOcean cloud provider metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9023,7 +9212,7 @@ export const integrations = [ "community": true }, "overview": "# Discourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Discourse forum metrics for efficient community management and engagement.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Discourse Exporter](https://github.com/discourse/discourse-prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9058,7 +9247,7 @@ export const integrations = [ "community": true }, "overview": "# Dutch Electricity Smart Meter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Dutch smart meter P1 port metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9093,7 +9282,7 @@ export const integrations = [ "community": true }, "overview": "# Dynatrace\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dynatrace APM metrics for comprehensive application performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9128,7 +9317,7 @@ export const integrations = [ "community": true }, "overview": "# EOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor CERN EOS metrics for efficient storage management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [EOS exporter](https://github.com/cern-eos/eos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9163,7 +9352,7 @@ export const integrations = [ "community": true }, "overview": "# Eaton UPS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Eaton uninterruptible power supply (UPS) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9198,7 +9387,7 @@ export const integrations = [ "community": true }, "overview": "# Elgato Key Light devices.\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Elgato Key Light metrics for optimized lighting control and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9233,7 +9422,7 @@ export const integrations = [ "community": true }, "overview": "# Energomera smart power meters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Energomera electricity meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9268,7 +9457,7 @@ export const integrations = [ "community": true }, "overview": "# Excel spreadsheet\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport Prometheus metrics to Excel for versatile data analysis and reporting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9303,7 +9492,7 @@ export const integrations = [ "community": true }, "overview": "# FRRouting\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Free Range Routing (FRR) metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FRRouting Exporter](https://github.com/tynany/frr_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9338,7 +9527,7 @@ export const integrations = [ "community": true }, "overview": "# Fastd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Fastd VPN metrics for efficient virtual private network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9373,7 +9562,7 @@ export const integrations = [ "community": true }, "overview": "# Fortigate firewall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Fortigate firewall metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9408,7 +9597,7 @@ export const integrations = [ "community": true }, "overview": "# FreeBSD NFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor FreeBSD Network File System metrics for efficient file sharing management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9443,7 +9632,7 @@ export const integrations = [ "community": true }, "overview": "# FreeBSD RCTL-RACCT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on FreeBSD Resource Container metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9478,7 +9667,7 @@ export const integrations = [ "community": true }, "overview": "# Freifunk network\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Freifunk community network metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9513,7 +9702,7 @@ export const integrations = [ "community": true }, "overview": "# Fritzbox network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AVM Fritzbox router metrics for efficient home network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fritzbox exporter](https://github.com/pdreker/fritz_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9548,7 +9737,7 @@ export const integrations = [ "community": true }, "overview": "# GCP GCE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google Cloud Platform Compute Engine metrics for efficient cloud resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9583,7 +9772,7 @@ export const integrations = [ "community": true }, "overview": "# GCP Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform quota metrics for optimized resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9618,7 +9807,7 @@ export const integrations = [ "community": true }, "overview": "# GTP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GTP (GPRS Tunneling Protocol) metrics for optimized mobile data communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GTP Exporter](https://github.com/wmnsk/gtp_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9653,7 +9842,7 @@ export const integrations = [ "community": true }, "overview": "# Generic Command Line Output\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command line output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9688,7 +9877,7 @@ export const integrations = [ "community": true }, "overview": "# Generic storage enclosure tool\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor storage enclosure metrics for efficient storage device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9723,7 +9912,7 @@ export const integrations = [ "community": true }, "overview": "# GitHub API rate limit\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GitHub API rate limit metrics for efficient\nAPI usage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9758,7 +9947,7 @@ export const integrations = [ "community": true }, "overview": "# GitHub repository\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack GitHub repository metrics for optimized project and user analytics monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub Exporter](https://github.com/githubexporter/github-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9793,7 +9982,7 @@ export const integrations = [ "community": true }, "overview": "# GitLab Runner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GitLab CI/CD job metrics for efficient development and deployment management.\n\n\nMetrics are gathered by periodically sending HTTP requests to GitLab built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9828,7 +10017,7 @@ export const integrations = [ "community": true }, "overview": "# Gobetween\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Gobetween load balancer metrics for optimized network traffic management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to Gobetween built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9867,7 +10056,7 @@ export const integrations = [ "community": true }, "overview": "# Google Cloud Platform\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform metrics for comprehensive cloud resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9906,7 +10095,7 @@ export const integrations = [ "community": true }, "overview": "# Google Pagespeed\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google PageSpeed Insights performance metrics for efficient web page optimization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9945,7 +10134,7 @@ export const integrations = [ "community": true }, "overview": "# Google Stackdriver\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Google Stackdriver monitoring metrics for optimized cloud performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9980,7 +10169,7 @@ export const integrations = [ "community": true }, "overview": "# Grafana\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Grafana dashboard and visualization metrics for optimized monitoring and data analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Grafana built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10015,7 +10204,7 @@ export const integrations = [ "community": true }, "overview": "# Graylog Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Graylog server metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Graylog built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10050,7 +10239,7 @@ export const integrations = [ "community": true }, "overview": "# HANA\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SAP HANA database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HANA Exporter](https://github.com/jenningsloy318/hana_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10085,7 +10274,7 @@ export const integrations = [ "community": true }, "overview": "# HDSentinel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hard Disk Sentinel metrics for efficient storage device health management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10120,7 +10309,7 @@ export const integrations = [ "community": true }, "overview": "# HHVM\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HipHop Virtual Machine metrics for efficient\nPHP execution and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10155,7 +10344,7 @@ export const integrations = [ "community": true }, "overview": "# HP iLO\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HP Integrated Lights Out (iLO) metrics for efficient server management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10190,7 +10379,7 @@ export const integrations = [ "community": true }, "overview": "# Halon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Halon email security and delivery metrics for optimized email management and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Halon exporter](https://github.com/tobiasbp/halon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10225,7 +10414,7 @@ export const integrations = [ "community": true }, "overview": "# HashiCorp Vault secrets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack HashiCorp Vault security assessment metrics for efficient secrets management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10260,7 +10449,7 @@ export const integrations = [ "community": true }, "overview": "# Hasura GraphQL Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Hasura GraphQL engine metrics for optimized\nAPI performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hasura Exporter](https://github.com/zolamk/hasura-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10295,7 +10484,7 @@ export const integrations = [ "community": true }, "overview": "# Helium hotspot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Helium hotspot metrics for optimized LoRaWAN network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10330,7 +10519,7 @@ export const integrations = [ "community": true }, "overview": "# Helium miner (validator)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Helium miner and validator metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10365,7 +10554,7 @@ export const integrations = [ "community": true }, "overview": "# Hitron CGN series CPE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hitron CGNV4 gateway metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10400,7 +10589,7 @@ export const integrations = [ "community": true }, "overview": "# Hitron CODA Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Hitron CODA cable modem metrics for optimized internet connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10435,7 +10624,7 @@ export const integrations = [ "community": true }, "overview": "# Homebridge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Homebridge smart home metrics for efficient home automation management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10470,7 +10659,7 @@ export const integrations = [ "community": true }, "overview": "# Homey\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Homey smart home controller metrics for efficient home automation and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10505,7 +10694,7 @@ export const integrations = [ "community": true }, "overview": "# Honeypot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor honeypot metrics for efficient threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10540,7 +10729,7 @@ export const integrations = [ "community": true }, "overview": "# Huawei devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Huawei HiLink device metrics for optimized connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10575,7 +10764,7 @@ export const integrations = [ "community": true }, "overview": "# Hubble\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hubble network observability metrics for efficient network visibility and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to Hubble built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10610,7 +10799,7 @@ export const integrations = [ "community": true }, "overview": "# IBM AIX systems Njmon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NJmon system performance monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NJmon](https://github.com/crooks/njmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10645,7 +10834,7 @@ export const integrations = [ "community": true }, "overview": "# IBM CryptoExpress (CEX) cards\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack IBM Z Crypto Express device metrics for optimized cryptographic performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10680,7 +10869,7 @@ export const integrations = [ "community": true }, "overview": "# IBM MQ\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on IBM MQ message queue metrics for efficient message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQ Exporter](https://github.com/agebhar1/mq_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10715,7 +10904,7 @@ export const integrations = [ "community": true }, "overview": "# IBM Spectrum\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum storage metrics for efficient data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10750,7 +10939,7 @@ export const integrations = [ "community": true }, "overview": "# IBM Spectrum Virtualize\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum Virtualize metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10785,7 +10974,7 @@ export const integrations = [ "community": true }, "overview": "# IBM Z Hardware Management Console\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Z Hardware Management Console metrics for efficient mainframe management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10820,7 +11009,7 @@ export const integrations = [ "community": true }, "overview": "# IOTA full node\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on IOTA cryptocurrency network metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10859,7 +11048,7 @@ export const integrations = [ "community": true }, "overview": "# IPMI (By SoundCloud)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IPMI metrics externally for efficient server hardware management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10898,7 +11087,7 @@ export const integrations = [ "community": true }, "overview": "# InfluxDB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor InfluxDB time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10933,7 +11122,7 @@ export const integrations = [ "community": true }, "overview": "# JMX\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Java Management Extensions (JMX) metrics for efficient Java application management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JMX Exporter](https://github.com/prometheus/jmx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10968,7 +11157,7 @@ export const integrations = [ "community": true }, "overview": "# Jarvis Standing Desk\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jarvis standing desk usage metrics for efficient workspace ergonomics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11003,7 +11192,7 @@ export const integrations = [ "community": true }, "overview": "# Jenkins\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jenkins continuous integration server metrics for efficient development and build management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11038,7 +11227,7 @@ export const integrations = [ "community": true }, "overview": "# JetBrains Floating License Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor JetBrains floating license server metrics for efficient software licensing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11077,7 +11266,7 @@ export const integrations = [ "community": true }, "overview": "# Kafka\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kafka message queue metrics for optimized data streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11116,7 +11305,7 @@ export const integrations = [ "community": true }, "overview": "# Kafka Connect\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kafka Connect metrics for efficient data streaming and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11155,7 +11344,7 @@ export const integrations = [ "community": true }, "overview": "# Kafka Consumer Lag\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka consumer lag metrics for efficient message queue management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11194,7 +11383,7 @@ export const integrations = [ "community": true }, "overview": "# Kafka ZooKeeper\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka ZooKeeper metrics for optimized distributed coordination and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11229,7 +11418,7 @@ export const integrations = [ "community": true }, "overview": "# Kannel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kannel SMS gateway and WAP gateway metrics for efficient mobile communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kannel Exporter](https://github.com/apostvav/kannel_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11264,7 +11453,7 @@ export const integrations = [ "community": true }, "overview": "# Keepalived\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Keepalived metrics for efficient high-availability and load balancing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11303,7 +11492,7 @@ export const integrations = [ "community": true }, "overview": "# Kubernetes Cluster Cloud Cost\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kubernetes cloud cost metrics for efficient cloud resource management and budgeting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11338,7 +11527,7 @@ export const integrations = [ "community": true }, "overview": "# LDAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Lightweight Directory Access Protocol (LDAP) metrics for efficient directory service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [LDAP Exporter](https://github.com/titisan/ldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11373,7 +11562,7 @@ export const integrations = [ "community": true }, "overview": "# Lagerist Disk latency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack disk latency metrics for efficient storage performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11408,7 +11597,7 @@ export const integrations = [ "community": true }, "overview": "# Linode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Linode cloud hosting metrics for efficient virtual server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Linode Exporter](https://github.com/DazWilkin/linode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11443,7 +11632,7 @@ export const integrations = [ "community": true }, "overview": "# Lustre metadata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Lustre clustered file system for efficient management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11478,7 +11667,7 @@ export const integrations = [ "community": true }, "overview": "# Lynis audit reports\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Lynis security auditing tool metrics for efficient system security and compliance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11513,7 +11702,7 @@ export const integrations = [ "community": true }, "overview": "# MP707 USB thermometer\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MP707 power strip metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MP707 exporter](https://github.com/nradchenko/mp707_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11548,7 +11737,7 @@ export const integrations = [ "community": true }, "overview": "# MQTT Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MQTT message transport performance using blackbox testing methods.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11583,7 +11772,7 @@ export const integrations = [ "community": true }, "overview": "# Machbase\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Machbase time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11618,7 +11807,7 @@ export const integrations = [ "community": true }, "overview": "# Maildir\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack mail server metrics for optimized email management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mailexporter](https://github.com/cherti/mailexporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11653,7 +11842,7 @@ export const integrations = [ "community": true }, "overview": "# Meilisearch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Meilisearch search engine metrics for efficient search performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11688,7 +11877,7 @@ export const integrations = [ "community": true }, "overview": "# Memcached (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Memcached in-memory key-value store metrics for efficient caching performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Memcached exporter](https://github.com/prometheus/memcached_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11723,7 +11912,7 @@ export const integrations = [ "community": true }, "overview": "# Meraki dashboard\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cisco Meraki cloud-managed networking device metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11758,7 +11947,7 @@ export const integrations = [ "community": true }, "overview": "# Mesos\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Mesos cluster manager metrics for efficient resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Mesos exporter](http://github.com/mesosphere/mesos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11793,7 +11982,7 @@ export const integrations = [ "community": true }, "overview": "# MikroTik devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mikrotik-exporter](https://github.com/swoga/mikrotik-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11828,7 +12017,7 @@ export const integrations = [ "community": true }, "overview": "# Mikrotik RouterOS devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11863,7 +12052,7 @@ export const integrations = [ "community": true }, "overview": "# Minecraft\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Minecraft server metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11902,7 +12091,7 @@ export const integrations = [ "community": true }, "overview": "# Modbus protocol\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Modbus RTU protocol metrics for efficient industrial automation and control performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11937,7 +12126,7 @@ export const integrations = [ "community": true }, "overview": "# MogileFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor MogileFS distributed file system metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11972,7 +12161,7 @@ export const integrations = [ "community": true }, "overview": "# Monnit Sensors MQTT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Monnit sensor data via MQTT for efficient IoT device monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12007,7 +12196,7 @@ export const integrations = [ "community": true }, "overview": "# NRPE daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nagios Remote Plugin Executor (NRPE) metrics for efficient system and network monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NRPE exporter](https://github.com/canonical/nrpe_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12042,7 +12231,7 @@ export const integrations = [ "community": true }, "overview": "# NSX-T\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack VMware NSX-T software-defined networking metrics for efficient network virtualization and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12077,7 +12266,7 @@ export const integrations = [ "community": true }, "overview": "# NVML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NVIDIA Management Library (NVML) GPU metrics for efficient GPU performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NVML exporter](https://github.com/oko/nvml-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12112,7 +12301,7 @@ export const integrations = [ "community": true }, "overview": "# Naemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Naemon or Nagios network monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12147,7 +12336,7 @@ export const integrations = [ "community": true }, "overview": "# Nagios\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Nagios network monitoring metrics for efficient\nIT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nagios exporter](https://github.com/wbollock/nagios_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12182,7 +12371,7 @@ export const integrations = [ "community": true }, "overview": "# Nature Remo E lite devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nature Remo E series smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12221,7 +12410,7 @@ export const integrations = [ "community": true }, "overview": "# NetApp Solidfire\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetApp Solidfire storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12260,7 +12449,7 @@ export const integrations = [ "community": true }, "overview": "# NetFlow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetFlow network traffic metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [netflow exporter](https://github.com/paihu/netflow_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12299,7 +12488,7 @@ export const integrations = [ "community": true }, "overview": "# NetMeter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor NetMeter network traffic metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12338,7 +12527,7 @@ export const integrations = [ "community": true }, "overview": "# Netapp ONTAP API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetApp ONTAP storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12377,7 +12566,7 @@ export const integrations = [ "community": true }, "overview": "# Netatmo sensors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Netatmo smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netatmo exporter](https://github.com/xperimental/netatmo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12412,7 +12601,7 @@ export const integrations = [ "community": true }, "overview": "# New Relic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor New Relic application performance management metrics for efficient application monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [New Relic exporter](https://github.com/jfindley/newrelic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12447,7 +12636,7 @@ export const integrations = [ "community": true }, "overview": "# NextDNS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NextDNS DNS resolver and security platform metrics for efficient DNS management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nextdns-exporter](https://github.com/raylas/nextdns-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12486,7 +12675,7 @@ export const integrations = [ "community": true }, "overview": "# Nextcloud servers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Nextcloud cloud storage metrics for efficient file hosting and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12521,7 +12710,7 @@ export const integrations = [ "community": true }, "overview": "# OBS Studio\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OBS Studio live streaming and recording software metrics for efficient video production and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12560,7 +12749,7 @@ export const integrations = [ "community": true }, "overview": "# ODBC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Open Database Connectivity (ODBC) metrics for efficient database connection and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12595,7 +12784,7 @@ export const integrations = [ "community": true }, "overview": "# OTRS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OTRS (Open-Source Ticket Request System) metrics for efficient helpdesk management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12630,7 +12819,7 @@ export const integrations = [ "community": true }, "overview": "# OpenHAB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack openHAB smart home automation system metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenHAB exporter](https://github.com/pdreker/openhab_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12665,7 +12854,7 @@ export const integrations = [ "community": true }, "overview": "# OpenLDAP (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenLDAP directory service metrics for efficient directory management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12700,7 +12889,7 @@ export const integrations = [ "community": true }, "overview": "# OpenRC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on OpenRC init system metrics for efficient system startup and service management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12735,7 +12924,7 @@ export const integrations = [ "community": true }, "overview": "# OpenRCT2\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenRCT2 game metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12774,7 +12963,7 @@ export const integrations = [ "community": true }, "overview": "# OpenROADM devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenROADM optical transport network metrics using the NETCONF protocol for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12809,7 +12998,7 @@ export const integrations = [ "community": true }, "overview": "# OpenStack\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenStack cloud computing platform metrics for efficient infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12844,7 +13033,7 @@ export const integrations = [ "community": true }, "overview": "# OpenVAS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenVAS vulnerability scanner metrics for efficient security assessment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12879,7 +13068,7 @@ export const integrations = [ "community": true }, "overview": "# OpenWeatherMap\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenWeatherMap weather data and air pollution metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12914,7 +13103,7 @@ export const integrations = [ "community": true }, "overview": "# Open vSwitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Open vSwitch software-defined networking metrics for efficient network virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12954,7 +13143,7 @@ export const integrations = [ "community": true }, "overview": "# Oracle DB (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Oracle Database metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12989,7 +13178,7 @@ export const integrations = [ "community": true }, "overview": "# Patroni\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Patroni PostgreSQL high-availability metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Patroni Exporter](https://github.com/gopaytech/patroni_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13024,7 +13213,7 @@ export const integrations = [ "community": true }, "overview": "# Personal Weather Station\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack personal weather station metrics for efficient weather monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13059,7 +13248,7 @@ export const integrations = [ "community": true }, "overview": "# Pgpool-II\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pgpool-II PostgreSQL middleware metrics for efficient database connection management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13094,7 +13283,7 @@ export const integrations = [ "community": true }, "overview": "# Philips Hue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Philips Hue smart lighting metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Philips Hue Exporter](https://github.com/aexel90/hue_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13129,7 +13318,7 @@ export const integrations = [ "community": true }, "overview": "# Pimoroni Enviro+\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pimoroni Enviro+ air quality and environmental metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13164,7 +13353,7 @@ export const integrations = [ "community": true }, "overview": "# Pingdom\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Pingdom website monitoring service metrics for efficient website performance management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13199,7 +13388,7 @@ export const integrations = [ "community": true }, "overview": "# Podman\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Podman container runtime metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13234,7 +13423,7 @@ export const integrations = [ "community": true }, "overview": "# Powerpal devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Powerpal smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Powerpal Exporter](https://github.com/aashley/powerpal_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13269,7 +13458,7 @@ export const integrations = [ "community": true }, "overview": "# ProFTPD\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ProFTPD FTP server metrics for efficient file transfer and server performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13306,7 +13495,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Prometheus endpoint\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nThis generic Prometheus collector gathers metrics from any [`Prometheus`](https://prometheus.io/) endpoints.\n\n\nIt collects metrics by periodically sending HTTP requests to the target instance.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13341,7 +13530,7 @@ export const integrations = [ "community": true }, "overview": "# Proxmox VE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Proxmox Virtual Environment metrics for efficient virtualization and container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13376,7 +13565,7 @@ export const integrations = [ "community": true }, "overview": "# RADIUS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RADIUS (Remote Authentication Dial-In User Service) protocol metrics for efficient authentication and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RADIUS exporter](https://github.com/devon-mar/radius-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13411,7 +13600,7 @@ export const integrations = [ "community": true }, "overview": "# RIPE Atlas\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RIPE Atlas Internet measurement platform metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13446,7 +13635,7 @@ export const integrations = [ "community": true }, "overview": "# Radio Thermostat\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Radio Thermostat smart thermostat metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13481,7 +13670,7 @@ export const integrations = [ "community": true }, "overview": "# Rancher\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Rancher container orchestration platform metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13516,7 +13705,7 @@ export const integrations = [ "community": true }, "overview": "# Raritan PDU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Raritan Power Distribution Unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13551,7 +13740,7 @@ export const integrations = [ "community": true }, "overview": "# Redis Queue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Python RQ (Redis Queue) job queue metrics for efficient task management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Python RQ Exporter](https://github.com/mdawar/rq-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13586,7 +13775,7 @@ export const integrations = [ "community": true }, "overview": "# SABnzbd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SABnzbd Usenet client metrics for efficient file downloads and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13621,7 +13810,7 @@ export const integrations = [ "community": true }, "overview": "# SMA Inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SMA solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13656,7 +13845,7 @@ export const integrations = [ "community": true }, "overview": "# SONiC NOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Software for Open Networking in the Cloud (SONiC) metrics for efficient network switch management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13695,7 +13884,7 @@ export const integrations = [ "community": true }, "overview": "# SQL Database agnostic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nQuery SQL databases for efficient database performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SQL Exporter](https://github.com/free/sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13730,7 +13919,7 @@ export const integrations = [ "community": true }, "overview": "# SSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SSH server metrics for efficient secure shell server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSH Exporter](https://github.com/Nordstrom/ssh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13765,7 +13954,7 @@ export const integrations = [ "community": true }, "overview": "# SSL Certificate\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SSL/TLS certificate metrics for efficient web security and certificate management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13800,7 +13989,7 @@ export const integrations = [ "community": true }, "overview": "# Salicru EQX inverter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Salicru EQX solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13835,7 +14024,7 @@ export const integrations = [ "community": true }, "overview": "# Sense Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Sense Energy smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13870,7 +14059,7 @@ export const integrations = [ "community": true }, "overview": "# Sentry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sentry error tracking and monitoring platform metrics for efficient application performance and error management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13905,7 +14094,7 @@ export const integrations = [ "community": true }, "overview": "# ServerTech\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Server Technology power distribution unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ServerTech Exporter](https://github.com/tynany/servertech_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13940,7 +14129,7 @@ export const integrations = [ "community": true }, "overview": "# Shell command\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Command runner exporter](https://github.com/tomwilkie/prom-run).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13975,7 +14164,7 @@ export const integrations = [ "community": true }, "overview": "# Shelly humidity sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Shelly smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Shelly Exporter](https://github.com/aexel90/shelly_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14010,7 +14199,7 @@ export const integrations = [ "community": true }, "overview": "# Sia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sia decentralized storage platform metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sia Exporter](https://github.com/tbenz9/sia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14045,7 +14234,7 @@ export const integrations = [ "community": true }, "overview": "# Siemens S7 PLC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Siemens S7 Programmable Logic Controller (PLC) metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14080,7 +14269,7 @@ export const integrations = [ "community": true }, "overview": "# Site 24x7\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Site24x7 website and infrastructure monitoring metrics for efficient performance tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14115,7 +14304,7 @@ export const integrations = [ "community": true }, "overview": "# Slurm\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Slurm workload manager metrics for efficient high-performance computing (HPC) and cluster management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14150,7 +14339,7 @@ export const integrations = [ "community": true }, "overview": "# SmartRG 808AC Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SmartRG SR808ac router metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14185,7 +14374,7 @@ export const integrations = [ "community": true }, "overview": "# Smart meters SML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Smart Message Language (SML) metrics for efficient smart metering and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SML Exporter](https://github.com/mweinelt/sml-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14220,7 +14409,7 @@ export const integrations = [ "community": true }, "overview": "# SoftEther VPN Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SoftEther VPN Server metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoftEther Exporter](https://github.com/dalance/softether_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14255,7 +14444,7 @@ export const integrations = [ "community": true }, "overview": "# SolarEdge inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SolarEdge solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14290,7 +14479,7 @@ export const integrations = [ "community": true }, "overview": "# Solar logging stick\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor solar energy metrics using a solar logging stick for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14325,7 +14514,7 @@ export const integrations = [ "community": true }, "overview": "# Solis Ginlong 5G inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Solis solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solis Exporter](https://github.com/candlerb/solis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14360,7 +14549,7 @@ export const integrations = [ "community": true }, "overview": "# Spacelift\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Spacelift infrastructure-as-code (IaC) platform metrics for efficient infrastructure automation and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14395,7 +14584,7 @@ export const integrations = [ "community": true }, "overview": "# Speedify CLI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Speedify VPN metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Speedify Exporter](https://github.com/willshen/speedify_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14430,7 +14619,7 @@ export const integrations = [ "community": true }, "overview": "# Sphinx\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Sphinx search engine metrics for efficient search and indexing performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14465,7 +14654,7 @@ export const integrations = [ "community": true }, "overview": "# Starlink (SpaceX)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SpaceX Starlink satellite internet metrics for efficient internet service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14500,7 +14689,7 @@ export const integrations = [ "community": true }, "overview": "# Starwind VSAN VSphere Edition\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on StarWind Virtual SAN metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14535,7 +14724,7 @@ export const integrations = [ "community": true }, "overview": "# StatusPage\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor StatusPage.io incident and status metrics for efficient incident management and communication.\n\n\nMetrics are gathered by periodically sending HTTP requests to [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14570,7 +14759,7 @@ export const integrations = [ "community": true }, "overview": "# Steam\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nGain insights into Steam A2S-supported game servers for performance and availability through real-time metric monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A2S Exporter](https://github.com/armsnyder/a2s-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14605,7 +14794,7 @@ export const integrations = [ "community": true }, "overview": "# Storidge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Storidge storage metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14640,7 +14829,7 @@ export const integrations = [ "community": true }, "overview": "# Stream\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor streaming metrics for efficient media streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Stream exporter](https://github.com/carlpett/stream_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14675,7 +14864,7 @@ export const integrations = [ "community": true }, "overview": "# Sunspec Solar Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SunSpec Alliance solar energy metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14710,7 +14899,7 @@ export const integrations = [ "community": true }, "overview": "# Suricata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Suricata network intrusion detection and prevention system (IDS/IPS) metrics for efficient network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Suricata Exporter](https://github.com/corelight/suricata_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14745,7 +14934,7 @@ export const integrations = [ "community": true }, "overview": "# Synology ActiveBackup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Synology Active Backup metrics for efficient backup and data protection management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14780,7 +14969,7 @@ export const integrations = [ "community": true }, "overview": "# Sysload\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system load metrics for efficient system performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sysload Exporter](https://github.com/egmc/sysload_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14815,7 +15004,7 @@ export const integrations = [ "community": true }, "overview": "# T-Rex NVIDIA GPU Miner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor T-Rex NVIDIA GPU miner metrics for efficient cryptocurrency mining and GPU performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14850,7 +15039,7 @@ export const integrations = [ "community": true }, "overview": "# TACACS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Terminal Access Controller Access-Control System (TACACS) protocol metrics for efficient network authentication and authorization management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14885,7 +15074,7 @@ export const integrations = [ "community": true }, "overview": "# TP-Link P110\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack TP-Link P110 smart plug metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14920,7 +15109,7 @@ export const integrations = [ "community": true }, "overview": "# Tado smart heating solution\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tado smart thermostat metrics for efficient home heating and cooling management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tado\\xB0 Exporter](https://github.com/eko/tado-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14955,7 +15144,7 @@ export const integrations = [ "community": true }, "overview": "# Tankerkoenig API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tankerknig API fuel price metrics for efficient fuel price monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14990,7 +15179,7 @@ export const integrations = [ "community": true }, "overview": "# Tesla Powerwall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Powerwall metrics for efficient home energy storage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15025,7 +15214,7 @@ export const integrations = [ "community": true }, "overview": "# Tesla Wall Connector\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Wall Connector charging station metrics for efficient electric vehicle charging management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15060,7 +15249,7 @@ export const integrations = [ "community": true }, "overview": "# Tesla vehicle\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tesla vehicle metrics for efficient electric vehicle management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15095,7 +15284,7 @@ export const integrations = [ "community": true }, "overview": "# Traceroute\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport traceroute metrics for efficient network path analysis and performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15130,7 +15319,7 @@ export const integrations = [ "community": true }, "overview": "# TwinCAT ADS Web Service\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor TwinCAT ADS (Automation Device Specification) Web Service metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15165,7 +15354,7 @@ export const integrations = [ "community": true }, "overview": "# Twitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Twitch streaming platform metrics for efficient live streaming management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Twitch exporter](https://github.com/damoun/twitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15200,7 +15389,7 @@ export const integrations = [ "community": true }, "overview": "# Ubiquiti UFiber OLT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Ubiquiti UFiber GPON (Gigabit Passive Optical Network) device metrics for efficient fiber-optic network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ufiber-exporter](https://github.com/swoga/ufiber-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15235,7 +15424,7 @@ export const integrations = [ "community": true }, "overview": "# Uptimerobot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor UptimeRobot website uptime monitoring metrics for efficient website availability tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15270,7 +15459,7 @@ export const integrations = [ "community": true }, "overview": "# VSCode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Visual Studio Code editor metrics for efficient development environment management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [VSCode Exporter](https://github.com/guicaulada/vscode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15305,7 +15494,7 @@ export const integrations = [ "community": true }, "overview": "# Vault PKI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HashiCorp Vault Public Key Infrastructure (PKI) metrics for efficient certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15340,7 +15529,7 @@ export const integrations = [ "community": true }, "overview": "# Vertica\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Vertica analytics database platform metrics for efficient database performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15375,7 +15564,7 @@ export const integrations = [ "community": true }, "overview": "# Warp10\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Warp 10 time-series database metrics for efficient time-series data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15410,7 +15599,7 @@ export const integrations = [ "community": true }, "overview": "# XMPP Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor XMPP (Extensible Messaging and Presence Protocol) server metrics for efficient messaging and communication management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15445,7 +15634,7 @@ export const integrations = [ "community": true }, "overview": "# Xiaomi Mi Flora\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MiFlora plant monitor metrics for efficient plant care and growth management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15480,7 +15669,7 @@ export const integrations = [ "community": true }, "overview": "# YOURLS URL Shortener\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor YOURLS (Your Own URL Shortener) metrics for efficient URL shortening service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15515,7 +15704,7 @@ export const integrations = [ "community": true }, "overview": "# Zerto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zerto disaster recovery and data protection metrics for efficient backup and recovery management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zerto Exporter](https://github.com/claranet/zerto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15550,7 +15739,7 @@ export const integrations = [ "community": true }, "overview": "# Zulip\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zulip open-source group chat application metrics for efficient team communication management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15585,7 +15774,7 @@ export const integrations = [ "community": true }, "overview": "# Zyxel GS1200-8\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Zyxel GS1200 network switch metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15620,7 +15809,7 @@ export const integrations = [ "community": true }, "overview": "# bpftrace variables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack bpftrace metrics for advanced performance analysis and troubleshooting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15655,7 +15844,7 @@ export const integrations = [ "community": true }, "overview": "# cAdvisor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor container resource usage and performance metrics with cAdvisor for efficient container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [cAdvisor](https://github.com/google/cadvisor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15690,7 +15879,7 @@ export const integrations = [ "community": true }, "overview": "# etcd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack etcd database metrics for optimized distributed key-value store management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to etcd built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15725,7 +15914,7 @@ export const integrations = [ "community": true }, "overview": "# gpsd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GPSD (GPS daemon) metrics for efficient GPS data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [gpsd exporter](https://github.com/natesales/gpsd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15760,7 +15949,7 @@ export const integrations = [ "community": true }, "overview": "# iqAir AirVisual air quality monitors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor air quality data from IQAir devices for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IQair Exporter](https://github.com/Packetslave/iqair_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15795,7 +15984,7 @@ export const integrations = [ "community": true }, "overview": "# jolokia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Jolokia JVM metrics for optimized Java application performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15830,7 +16019,7 @@ export const integrations = [ "community": true }, "overview": "# journald\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on systemd-journald metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [journald-exporter](https://github.com/dead-claudia/journald-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15865,7 +16054,7 @@ export const integrations = [ "community": true }, "overview": "# loki\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Loki metrics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [loki](https://github.com/grafana/loki).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Loki\n\nInstall [loki](https://github.com/grafana/loki) according to its documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Loki\n\nInstall [loki](https://github.com/grafana/loki) according to its documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15900,7 +16089,7 @@ export const integrations = [ "community": true }, "overview": "# mosquitto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Mosquitto MQTT broker metrics for efficient IoT message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15935,7 +16124,7 @@ export const integrations = [ "community": true }, "overview": "# mtail\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor log data metrics using mtail log data extractor and parser.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mtail](https://github.com/google/mtail).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15970,7 +16159,7 @@ export const integrations = [ "community": true }, "overview": "# nftables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor nftables firewall metrics for efficient network security and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nftables_exporter](https://github.com/Sheridan/nftables_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -16005,7 +16194,7 @@ export const integrations = [ "community": true }, "overview": "# pgBackRest\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor pgBackRest PostgreSQL backup metrics for efficient database backup and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -16040,7 +16229,7 @@ export const integrations = [ "community": true }, "overview": "# strongSwan\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack strongSwan VPN and IPSec metrics using the vici interface for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -16078,7 +16267,7 @@ export const integrations = [ "most_popular": false }, "overview": "# ProxySQL\n\nPlugin: go.d.plugin\nModule: proxysql\n\n## Overview\n\nThis collector monitors ProxySQL servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/proxysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/proxysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | stats:stats@tcp(127.0.0.1:6032)/ | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n - name: remote\n dsn: stats:stats@tcp(203.0.113.0:6032)/\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/proxysql.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/proxysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | stats:stats@tcp(127.0.0.1:6032)/ | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n - name: remote\n dsn: stats:stats@tcp(203.0.113.0:6032)/\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `proxysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m proxysql\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `proxysql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep proxysql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep proxysql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep proxysql\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ProxySQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.client_connections_count | connected, non_idle, hostgroup_locked | connections |\n| proxysql.client_connections_rate | created, aborted | connections/s |\n| proxysql.server_connections_count | connected | connections |\n| proxysql.server_connections_rate | created, aborted, delayed | connections/s |\n| proxysql.backends_traffic | recv, sent | B/s |\n| proxysql.clients_traffic | recv, sent | B/s |\n| proxysql.active_transactions_count | client | connections |\n| proxysql.questions_rate | questions | questions/s |\n| proxysql.slow_queries_rate | slow | queries/s |\n| proxysql.queries_rate | autocommit, autocommit_filtered, commit_filtered, rollback, rollback_filtered, backend_change_user, backend_init_db, backend_set_names, frontend_init_db, frontend_set_names, frontend_use_db | queries/s |\n| proxysql.backend_statements_count | total, unique | statements |\n| proxysql.backend_statements_rate | prepare, execute, close | statements/s |\n| proxysql.client_statements_count | total, unique | statements |\n| proxysql.client_statements_rate | prepare, execute, close | statements/s |\n| proxysql.cached_statements_count | cached | statements |\n| proxysql.query_cache_entries_count | entries | entries |\n| proxysql.query_cache_memory_used | used | B |\n| proxysql.query_cache_io | in, out | B/s |\n| proxysql.query_cache_requests_rate | read, write, read_success | requests/s |\n| proxysql.mysql_monitor_workers_count | workers, auxiliary | threads |\n| proxysql.mysql_monitor_workers_rate | started | workers/s |\n| proxysql.mysql_monitor_connect_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_ping_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_read_only_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_replication_lag_checks_rate | succeed, failed | checks/s |\n| proxysql.jemalloc_memory_used | active, allocated, mapped, metadata, resident, retained | B |\n| proxysql.memory_used | auth, sqlite3, query_digest, query_rules, firewall_users_table, firewall_users_config, firewall_rules_table, firewall_rules_config, mysql_threads, admin_threads, cluster_threads | B |\n| proxysql.uptime | uptime | seconds |\n\n### Per command\n\nThese metrics refer to the SQL command.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| command | SQL command. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_command_execution_rate | uptime | seconds |\n| proxysql.mysql_command_execution_time | time | microseconds |\n| proxysql.mysql_command_execution_duration | 100us, 500us, 1ms, 5ms, 10ms, 50ms, 100ms, 500ms, 1s, 5s, 10s, +Inf | microseconds |\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username from the mysql_users table |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_user_connections_utilization | used | percentage |\n| proxysql.mysql_user_connections_count | used | connections |\n\n### Per backend\n\nThese metrics refer to the backend server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | backend server host |\n| port | backend server port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.backend_status | online, shunned, offline_soft, offline_hard | status |\n| proxysql.backend_connections_usage | free, used | connections |\n| proxysql.backend_connections_rate | succeed, failed | connections/s |\n| proxysql.backend_queries_rate | queries | queries/s |\n| proxysql.backend_traffic | recv, send | B/s |\n| proxysql.backend_latency | latency | microseconds |\n\n", @@ -16119,7 +16308,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Apache Pulsar\n\nPlugin: go.d.plugin\nModule: pulsar\n\n## Overview\n\nThis collector monitors Pulsar servers.\n\n\nIt collects broker statistics using Pulsar's [Prometheus endpoint](https://pulsar.apache.org/docs/en/deploy-monitoring/#broker-stats).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Pulsar instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pulsar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pulsar.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n - name: remote\n url: http://192.0.2.1:8080/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pulsar.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pulsar.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n - name: remote\n url: http://192.0.2.1:8080/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pulsar` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pulsar\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pulsar` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pulsar\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pulsar /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pulsar\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- topic_* metrics are available when `exposeTopicLevelMetricsInPrometheus` is set to true.\n- subscription_* and namespace_subscription metrics are available when `exposeTopicLevelMetricsInPrometheus` si set to true.\n- replication_* and namespace_replication_* metrics are available when replication is configured and `replicationMetricsEnabled` is set to true.\n\n\n### Per Apache Pulsar instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.broker_components | namespaces, topics, subscriptions, producers, consumers | components |\n| pulsar.messages_rate | publish, dispatch | messages/s |\n| pulsar.throughput_rate | publish, dispatch | KiB/s |\n| pulsar.storage_size | used | KiB |\n| pulsar.storage_operations_rate | read, write | message batches/s |\n| pulsar.msg_backlog | backlog | messages |\n| pulsar.storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.subscription_delayed | delayed | message batches |\n| pulsar.subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.replication_rate | in, out | messages/s |\n| pulsar.replication_throughput_rate | in, out | KiB/s |\n| pulsar.replication_backlog | backlog | messages |\n\n### Per namespace\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.namespace_broker_components | topics, subscriptions, producers, consumers | components |\n| pulsar.namespace_messages_rate | publish, dispatch | messages/s |\n| pulsar.namespace_throughput_rate | publish, dispatch | KiB/s |\n| pulsar.namespace_storage_size | used | KiB |\n| pulsar.namespace_storage_operations_rate | read, write | message batches/s |\n| pulsar.namespace_msg_backlog | backlog | messages |\n| pulsar.namespace_storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.namespace_entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.namespace_subscription_delayed | delayed | message batches |\n| pulsar.namespace_subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.namespace_subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.namespace_replication_rate | in, out | messages/s |\n| pulsar.namespace_replication_throughput_rate | in, out | KiB/s |\n| pulsar.namespace_replication_backlog | backlog | messages |\n| pulsar.topic_producers | a dimension per topic | producers |\n| pulsar.topic_subscriptions | a dimension per topic | subscriptions |\n| pulsar.topic_consumers | a dimension per topic | consumers |\n| pulsar.topic_messages_rate_in | a dimension per topic | publishes/s |\n| pulsar.topic_messages_rate_out | a dimension per topic | dispatches/s |\n| pulsar.topic_throughput_rate_in | a dimension per topic | KiB/s |\n| pulsar.topic_throughput_rate_out | a dimension per topic | KiB/s |\n| pulsar.topic_storage_size | a dimension per topic | KiB |\n| pulsar.topic_storage_read_rate | a dimension per topic | message batches/s |\n| pulsar.topic_storage_write_rate | a dimension per topic | message batches/s |\n| pulsar.topic_msg_backlog | a dimension per topic | messages |\n| pulsar.topic_subscription_delayed | a dimension per topic | message batches |\n| pulsar.topic_subscription_msg_rate_redeliver | a dimension per topic | messages/s |\n| pulsar.topic_subscription_blocked_on_unacked_messages | a dimension per topic | blocked subscriptions |\n| pulsar.topic_replication_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_backlog | a dimension per topic | messages |\n\n", @@ -16155,7 +16344,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Puppet\n\nPlugin: go.d.plugin\nModule: puppet\n\n## Overview\n\nThis collector monitors Puppet metrics, including JVM heap and non-heap memory, CPU usage, and file descriptors.\n\n\nIt uses Puppet's metrics API endpoint [/status/v1/services](https://www.puppet.com/docs/puppetserver/5.3/status-api/v1/services.html) to gather the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Puppet instances running on localhost that are listening on port 8140.\nOn startup, it tries to collect metrics from:\n\n- https://127.0.0.1:8140\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/puppet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/puppet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | The base URL where the Puppet instance can be accessed. | https://127.0.0.1:8140 | yes |\n| timeout | HTTPS request timeout. | 1 | no |\n| username | Username for basic HTTPS authentication. | | no |\n| password | Password for basic HTTPS authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTPS authentication. | | no |\n| proxy_password | Password for proxy basic HTTPS authentication. | | no |\n| method | HTTPS request method. | POST | no |\n| body | HTTPS request body. | | no |\n| headers | HTTPS request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic with self-signed certificate\n\nPuppet with self-signed TLS certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8140\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8140\n tls_skip_verify: yes\n\n - name: remote\n url: https://192.0.2.1:8140\n tls_skip_verify: yes\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/puppet.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/puppet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | The base URL where the Puppet instance can be accessed. | https://127.0.0.1:8140 | yes |\n| timeout | HTTPS request timeout. | 1 | no |\n| username | Username for basic HTTPS authentication. | | no |\n| password | Password for basic HTTPS authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTPS authentication. | | no |\n| proxy_password | Password for proxy basic HTTPS authentication. | | no |\n| method | HTTPS request method. | POST | no |\n| body | HTTPS request body. | | no |\n| headers | HTTPS request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic with self-signed certificate\n\nPuppet with self-signed TLS certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8140\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8140\n tls_skip_verify: yes\n\n - name: remote\n url: https://192.0.2.1:8140\n tls_skip_verify: yes\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `puppet` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m puppet\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `puppet` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep puppet\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep puppet /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep puppet\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Puppet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| puppet.jvm_heap | committed, used | MiB |\n| puppet.jvm_nonheap | committed, used | MiB |\n| puppet.cpu | execution, GC | percentage |\n| puppet.fdopen | used | descriptors |\n\n", @@ -16192,7 +16381,7 @@ export const integrations = [ "most_popular": false }, "overview": "# RabbitMQ\n\nPlugin: go.d.plugin\nModule: rabbitmq\n\n## Overview\n\nThis collector monitors RabbitMQ instances.\n\nIt collects data using an HTTP-based API provided by the [management plugin](https://www.rabbitmq.com/management.html).\nThe following endpoints are used:\n\n- `/api/overview`\n- `/api/node/{node_name}`\n- `/api/vhosts`\n- `/api/queues` (disabled by default)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable management plugin.\n\nThe management plugin is included in the RabbitMQ distribution, but disabled.\nTo enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rabbitmq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rabbitmq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:15672 | yes |\n| collect_queues_metrics | Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used. | no | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n - name: remote\n url: http://192.0.2.0:15672\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable management plugin.\n\nThe management plugin is included in the RabbitMQ distribution, but disabled.\nTo enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rabbitmq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rabbitmq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:15672 | yes |\n| collect_queues_metrics | Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used. | no | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n - name: remote\n url: http://192.0.2.0:15672\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `rabbitmq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m rabbitmq\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `rabbitmq` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep rabbitmq\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep rabbitmq /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep rabbitmq\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RabbitMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.messages_count | ready, unacknowledged | messages |\n| rabbitmq.messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n| rabbitmq.objects_count | channels, consumers, connections, queues, exchanges | messages |\n| rabbitmq.connection_churn_rate | created, closed | operations/s |\n| rabbitmq.channel_churn_rate | created, closed | operations/s |\n| rabbitmq.queue_churn_rate | created, deleted, declared | operations/s |\n| rabbitmq.file_descriptors_count | available, used | fd |\n| rabbitmq.sockets_count | available, used | sockets |\n| rabbitmq.erlang_processes_count | available, used | processes |\n| rabbitmq.erlang_run_queue_processes_count | length | processes |\n| rabbitmq.memory_usage | used | bytes |\n| rabbitmq.disk_space_free_size | free | bytes |\n\n### Per vhost\n\nThese metrics refer to the virtual host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.vhost_messages_count | ready, unacknowledged | messages |\n| rabbitmq.vhost_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n### Per queue\n\nThese metrics refer to the virtual host queue.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n| queue | queue name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.queue_messages_count | ready, unacknowledged, paged_out, persistent | messages |\n| rabbitmq.queue_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n", @@ -16239,7 +16428,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Redis\n\nPlugin: go.d.plugin\nModule: redis\n\n## Overview\n\nThis collector monitors the health and performance of Redis servers and collects general statistics, CPU and memory consumption, replication information, command statistics, and more.\n\n\nIt connects to the Redis instance via a TCP or UNIX socket and executes the following commands:\n\n- [INFO ALL](https://redis.io/commands/info)\n- [PING](https://redis.io/commands/ping/)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known Redis TCP and UNIX sockets:\n\n- 127.0.0.1:6379\n- /tmp/redis.sock\n- /var/run/redis/redis.sock\n- /var/lib/redis/redis.sock\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/redis.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/redis.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Redis server address. | redis://@localhost:6379 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://@127.0.0.1:6379'\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'unix://@/tmp/redis.sock'\n\n```\n{% /details %}\n##### TCP socket with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:6379'\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/redis.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/redis.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Redis server address. | redis://@localhost:6379 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://@127.0.0.1:6379'\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'unix://@/tmp/redis.sock'\n\n```\n{% /details %}\n##### TCP socket with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:6379'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `redis` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m redis\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `redis` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep redis\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep redis /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep redis\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ redis_connections_rejected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.connections | connections rejected because of maxclients limit in the last minute |\n| [ redis_bgsave_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_now | duration of the on-going RDB save operation |\n| [ redis_bgsave_broken ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_health | status of the last RDB save operation (0: ok, 1: error) |\n| [ redis_master_link_down ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.master_link_down_since_time | time elapsed since the link between master and slave is down |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Redis instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| redis.connections | accepted, rejected | connections/s |\n| redis.clients | connected, blocked, tracking, in_timeout_table | clients |\n| redis.ping_latency | min, max, avg | seconds |\n| redis.commands | processes | commands/s |\n| redis.keyspace_lookup_hit_rate | lookup_hit_rate | percentage |\n| redis.memory | max, used, rss, peak, dataset, lua, scripts | bytes |\n| redis.mem_fragmentation_ratio | mem_fragmentation | ratio |\n| redis.key_eviction_events | evicted | keys/s |\n| redis.net | received, sent | kilobits/s |\n| redis.rdb_changes | changes | operations |\n| redis.bgsave_now | current_bgsave_time | seconds |\n| redis.bgsave_health | last_bgsave | status |\n| redis.bgsave_last_rdb_save_since_time | last_bgsave_time | seconds |\n| redis.aof_file_size | current, base | bytes |\n| redis.commands_calls | a dimension per command | calls |\n| redis.commands_usec | a dimension per command | microseconds |\n| redis.commands_usec_per_sec | a dimension per command | microseconds/s |\n| redis.key_expiration_events | expired | keys/s |\n| redis.database_keys | a dimension per database | keys |\n| redis.database_expires_keys | a dimension per database | keys |\n| redis.connected_replicas | connected | replicas |\n| redis.master_link_status | up, down | status |\n| redis.master_last_io_since_time | time | seconds |\n| redis.master_link_down_since_time | time | seconds |\n| redis.uptime | uptime | seconds |\n\n", @@ -16277,7 +16466,7 @@ export const integrations = [ "most_popular": false }, "overview": "# RethinkDB\n\nPlugin: go.d.plugin\nModule: rethinkdb\n\n## Overview\n\nIt collects cluster-wide metrics such as server status, client connections, active clients, query rate, and document read/write rates.\nFor each server, it offers similar metrics.\n\n\nThe data is gathered by querying the stats table in RethinkDB, which stores real-time statistics related to the cluster and its individual servers.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, collector will attempt to connect to RethinkDB instance on `127.0.0.1:28015` address.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rethinkdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rethinkdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the RethinkDB service listens for connections. | 127.0.0.1:28015 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:28015\n\n```\n{% /details %}\n##### With authentication\n\nAn example configuration with authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:28015\n username: name\n password: pass\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:28015\n\n - name: remote\n address: 203.0.113.0:28015\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rethinkdb.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rethinkdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the RethinkDB service listens for connections. | 127.0.0.1:28015 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:28015\n\n```\n{% /details %}\n##### With authentication\n\nAn example configuration with authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:28015\n username: name\n password: pass\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:28015\n\n - name: remote\n address: 203.0.113.0:28015\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `rethinkdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m rethinkdb\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `rethinkdb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep rethinkdb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep rethinkdb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep rethinkdb\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RethinkDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.cluster_servers_stats_request | success, timeout | servers |\n| rethinkdb.cluster_client_connections | connections | connections |\n| rethinkdb.cluster_active_clients | active | clients |\n| rethinkdb.cluster_queries | queries | queries/s |\n| rethinkdb.cluster_documents | read, written | documents/s |\n\n### Per server\n\nThese metrics refer to the server (cluster member).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server_uuid | Server UUID. |\n| server_name | Server name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.server_stats_request_status | success, timeout | status |\n| rethinkdb.server_client_connections | connections | connections |\n| rethinkdb.server_active_clients | active | clients |\n| rethinkdb.server_queries | queries | queries/s |\n| rethinkdb.server_documents | read, written | documents/s |\n\n", @@ -16316,7 +16505,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Riak KV\n\nPlugin: go.d.plugin\nModule: riakkv\n\n## Overview\n\nThis collector monitors RiakKV metrics about throughput, latency, resources and more.\n\n\nIt sends HTTP requests to the Riak [/stats](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html) endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Riak instances running on localhost that are listening on port 8098.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:8098/stats\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable /stats endpoint\n\nSee the RiakKV [configuration reference](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/riakkv.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/riakkv.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8098/stats | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nWith enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n\n - name: remote\n url: http://192.0.2.1:8098/stats\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable /stats endpoint\n\nSee the RiakKV [configuration reference](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/riakkv.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/riakkv.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8098/stats | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nWith enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n\n - name: remote\n url: http://192.0.2.1:8098/stats\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `riakkv` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m riakkv\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `riakkv` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep riakkv\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep riakkv /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep riakkv\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Riak KV instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| riak.kv.throughput | gets, puts | operations/s |\n| riak.dt.vnode_updates | counters, sets, maps | operations/s |\n| riak.search | queries | queries/s |\n| riak.search.documents | indexed | documents/s |\n| riak.consistent.operations | gets, puts | operations/s |\n| riak.kv.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.kv.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms |\n| riak.search.latency.query | median, min, 95, 99, 999, max | ms |\n| riak.search.latency.index | median, min, 95, 99, 999, max | ms |\n| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.vm | processes | total |\n| riak.vm.memory.processes | allocated, used | MB |\n| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings |\n| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB |\n| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages |\n| riak.search.index | index_fail, bad_entry, extract_fail | errors |\n| riak.core.protobuf_connections | active | connections |\n| riak.core.repairs | read | repairs |\n| riak.core.fsm_active | get, put, secondary index, list keys | fsms |\n| riak.core.fsm_rejected | get, put | fsms |\n\n", @@ -16364,7 +16553,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Rspamd\n\nPlugin: go.d.plugin\nModule: rspamd\n\n## Overview\n\nThis collector monitors the activity and performance of Rspamd servers. It gathers various metrics including scanned emails, learned messages, spam/ham counts, and actions taken on emails (reject, rewrite, etc.).\n\n\nIt retrieves statistics from Rspamd's [built-in web server](https://rspamd.com/doc/workers/controller.html) by making HTTP requests to the `/stat` endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Rspamd instances running on localhost that are listening on port 11334.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rspamd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rspamd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:11334 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n\n - name: remote\n url: http://192.0.2.1:11334\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rspamd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rspamd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:11334 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n\n - name: remote\n url: http://192.0.2.1:11334\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `rspamd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m rspamd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `rspamd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep rspamd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep rspamd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep rspamd\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Rspamd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rspamd.classifications | ham, spam | messages/s |\n| rspamd.actions | reject, soft_reject, rewrite_subject, add_header, greylist, custom, discard, quarantine, no_action | messages/s |\n| rspamd.scans | scanned | messages/s |\n| rspamd.learns | learned | messages/s |\n| rspamd.connections | connections | connections/s |\n| rspamd.control_connections | control_connections | connections/s |\n\n", @@ -16373,6 +16562,44 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/rspamd/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-samba", + "plugin_name": "go.d.plugin", + "module_name": "samba", + "monitored_instance": { + "name": "Samba", + "link": "https://www.samba.org/samba/", + "icon_filename": "samba.svg", + "categories": [ + "data-collection.storage-mount-points-and-filesystems" + ] + }, + "keywords": [ + "samba", + "smb", + "file sharing" + ], + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "most_popular": false + }, + "overview": "# Samba\n\nPlugin: go.d.plugin\nModule: samba\n\n## Overview\n\nThis collector monitors Samba syscalls and SMB2 calls. It relies on the [`smbstatus`](https://www.samba.org/samba/docs/current/man-html/smbstatus.1.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\nExecuted commands:\n- `smbstatus -P`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Verifying and Enabling Profiling for SMBd\n\n1. **Check for Profiling Support**\n\n Before enabling profiling, it's important to verify if `smbd` was compiled with profiling capabilities. Run the following command as root user (using `sudo`) to check:\n\n ```bash\n $ sudo smbd --build-options | grep WITH_PROFILE\n WITH_PROFILE\n ```\n\n If the command outputs `WITH_PROFILE`, profiling is supported. If not, you'll need to recompile `smbd` with profiling enabled (refer to Samba documentation for specific instructions).\n\n2. **Enable Profiling**\n\n Once you've confirmed profiling support, you can enable it using one of the following methods:\n\n - **Command-Line Option**\n Start smbd with the `-P 1` option when invoking it directly from the command line.\n - **Configuration File**\n Modify the `smb.conf` configuration file located at `/etc/samba/smb.conf` (the path might vary slightly depending on your system). Add the following line to the `[global]` section:\n\n ```bash\n smbd profiling level = count\n ```\n3. **Restart the Samba service**\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/samba.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/samba.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | smbstatus binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: samba\n update_every: 5 # Collect statistics every 5 seconds\n\n```\n{% /details %}\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `samba` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m samba\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `samba` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep samba\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep samba /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep samba\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per syscall\n\nThese metrics refer to the the Syscall.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| syscall | Syscall name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| samba.syscall_calls | syscalls | calls/s |\n| samba.syscall_transferred_data | transferred | bytes/s |\n\n### Per smb2call\n\nThese metrics refer to the the SMB2 Call.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| smb2call | SMB2 call name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| samba.smb2_call_calls | smb2 | calls/s |\n| samba.smb2_call_transferred_data | in, out | bytes/s |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-samba-Samba", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/samba/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-scaleio", @@ -16400,7 +16627,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Dell EMC ScaleIO\n\nPlugin: go.d.plugin\nModule: scaleio\n\n## Overview\n\nThis collector monitors ScaleIO (VxFlex OS) instances via VxFlex OS Gateway API.\n\nIt collects metrics for the following ScaleIO components:\n\n- System\n- Storage Pool\n- Sdc\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/scaleio.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/scaleio.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | https://127.0.0.1:80 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instance.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n - name: remote\n url: https://203.0.113.10\n username: admin\n password: password\n tls_skip_verify: yes\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/scaleio.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/scaleio.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | https://127.0.0.1:80 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instance.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n - name: remote\n url: https://203.0.113.10\n username: admin\n password: password\n tls_skip_verify: yes\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `scaleio` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m scaleio\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `scaleio` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep scaleio\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep scaleio /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep scaleio\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dell EMC ScaleIO instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.system_capacity_total | total | KiB |\n| scaleio.system_capacity_in_use | in_use | KiB |\n| scaleio.system_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.system_capacity_available_volume_allocation | available | KiB |\n| scaleio.system_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.system_workload_primary_bandwidth_total | total | KiB/s |\n| scaleio.system_workload_primary_bandwidth | read, write | KiB/s |\n| scaleio.system_workload_primary_iops_total | total | iops/s |\n| scaleio.system_workload_primary_iops | read, write | iops/s |\n| scaleio.system_workload_primary_io_size_total | io_size | KiB |\n| scaleio.system_rebalance | read, write | KiB/s |\n| scaleio.system_rebalance_left | left | KiB |\n| scaleio.system_rebalance_time_until_finish | time | seconds |\n| scaleio.system_rebuild | read, write | KiB/s |\n| scaleio.system_rebuild_left | left | KiB |\n| scaleio.system_defined_components | devices, fault_sets, protection_domains, rfcache_devices, sdc, sds, snapshots, storage_pools, volumes, vtrees | components |\n| scaleio.system_components_volumes_by_type | thick, thin | volumes |\n| scaleio.system_components_volumes_by_mapping | mapped, unmapped | volumes |\n\n### Per storage pool\n\nThese metrics refer to the storage pool.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.storage_pool_capacity_total | total | KiB |\n| scaleio.storage_pool_capacity_in_use | in_use | KiB |\n| scaleio.storage_pool_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.storage_pool_capacity_utilization | used | percentage |\n| scaleio.storage_pool_capacity_available_volume_allocation | available | KiB |\n| scaleio.storage_pool_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.storage_pool_components | devices, snapshots, volumes, vtrees | components |\n\n### Per sdc\n\nThese metrics refer to the SDC (ScaleIO Data Client).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.sdc_mdm_connection_state | connected | boolean |\n| scaleio.sdc_bandwidth | read, write | KiB/s |\n| scaleio.sdc_iops | read, write | iops/s |\n| scaleio.sdc_io_size | read, write | KiB |\n| scaleio.sdc_num_of_mapped_volumed | mapped | volumes |\n\n", @@ -16415,7 +16642,7 @@ export const integrations = [ "plugin_name": "go.d.plugin", "module_name": "sensors", "monitored_instance": { - "name": "Linux Sensors (lm-sensors)", + "name": "Linux Sensors", "link": "https://hwmon.wiki.kernel.org/lm_sensors", "icon_filename": "microchip.svg", "categories": [ @@ -16430,7 +16657,8 @@ export const integrations = [ "power", "fan", "energy", - "humidity" + "humidity", + "intrusion" ], "related_resources": { "integrations": { @@ -16442,13 +16670,13 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# Linux Sensors (lm-sensors)\n\nPlugin: go.d.plugin\nModule: sensors\n\n## Overview\n\nThis collector gathers real-time system sensor statistics, including temperature, voltage, current, power, fan speed, energy consumption, and humidity, utilizing the [sensors](https://linux.die.net/man/1/sensors) binary.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe following type of sensors are auto-detected:\n\n- temperature\n- fan\n- voltage\n- current\n- power\n- energy\n- humidity\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install lm-sensors\n\n- Install `lm-sensors` using your distribution's package manager.\n- Run `sensors-detect` to detect hardware monitoring chips.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/sensors.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `sensors` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/bin/sensors | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: sensors\n binary_path: /usr/local/sbin/sensors\n\n```\n{% /details %}\n", + "overview": "# Linux Sensors\n\nPlugin: go.d.plugin\nModule: sensors\n\n## Overview\n\nThis collector gathers real-time system sensor statistics using the [sysfs](https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface) interface.\n\nSupported sensors:\n\n- Temperature\n- Voltage\n- Fan\n- Current\n- Power\n- Energy\n- Humidity\n- Intrusion\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically discovers and exposes all available sensors on the system through the [sysfs](https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface) interface.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/sensors.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/sensors.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| relabel | A list used to update existing sensor labels or add labels to sensors that don't have them. | [] | no |\n| relabel[].chip | [Pattern](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns) to match the `chip_id` label value. | | no |\n| relabel[].sensors | A list of sensors to be relabeled for the specified chip. | [] | no |\n| relabel[].sensors[].name | The exact sensor name (e.g., `'temp1'`, `'in1'`, `'voltage1'`). | | no |\n| relabel[].sensors[].label | The new label value for the sensor. | | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: sensors\n update_every: 5 # Collect sensors statistics every 5 seconds\n\n```\n{% /details %}\n##### Renaming labels\n\nAllows you to override/add labels.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: sensors\n relabel:\n - chip: as99127f-*\n sensors:\n - name: temp1\n label: Mobo Temp\n - name: temp2\n label: CPU0 Temp\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `sensors` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m sensors\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `sensors` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep sensors\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep sensors /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep sensors\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the sensor.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| chip | The hardware component responsible for the sensor monitoring. |\n| feature | The specific sensor or monitoring point provided by the chip. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.sensor_temperature | temperature | Celsius |\n| sensors.sensor_voltage | voltage | Volts |\n| sensors.sensor_current | current | Amperes |\n| sensors.sensor_power | power | Watts |\n| sensors.sensor_fan_speed | fan | RPM |\n| sensors.sensor_energy | energy | Joules |\n| sensors.sensor_humidity | humidity | percent |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the system sensor.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| chip | The path to the sensor's chip device, excluding the /sys/devices prefix. This provides a unique identifier for the physical hardware component. |\n| chip_id | A unique identifier for the sensor's chip, formatted as `chipName-busType-hash`. |\n| sensor | The name of the specific sensor within the chip device. This provides a direct identifier for the individual measurement point. |\n| label | A label provided by the kernel driver to indicate the intended use or purpose of the sensor. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.chip_sensor_temperature | input | Celsius |\n| sensors.chip_sensor_temperature_alarm | clear, triggered | status |\n| sensors.chip_sensor_voltage | input | Volts |\n| sensors.chip_sensor_voltage_average | average | Volts |\n| sensors.chip_sensor_voltage_alarm | clear, triggered | status |\n| sensors.chip_sensor_fan | input | RPM |\n| sensors.chip_sensor_fan_alarm | clear, triggered | status |\n| sensors.chip_sensor_current | input | Amperes |\n| sensors.chip_sensor_current_average | average | Amperes |\n| sensors.chip_sensor_current_alarm | clear, triggered | status |\n| sensors.chip_sensor_power | input | Watts |\n| sensors.chip_sensor_power_average | average | Watts |\n| sensors.chip_sensor_power_alarm | clear, triggered | status |\n| sensors.chip_sensor_energy | input | Joules |\n| sensors.chip_sensor_humidity | input | percent |\n| sensors.chip_sensor_intrusion_alarm | clear, triggered | status |\n\n", "integration_type": "collector", - "id": "go.d.plugin-sensors-Linux_Sensors_(lm-sensors)", + "id": "go.d.plugin-sensors-Linux_Sensors", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/sensors/metadata.yaml", "related_resources": "" }, @@ -16481,8 +16709,8 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# S.M.A.R.T.\n\nPlugin: go.d.plugin\nModule: smartctl\n\n## Overview\n\nThis collector monitors the health status of storage devices by analyzing S.M.A.R.T. (Self-Monitoring, Analysis, and Reporting Technology) counters.\nIt relies on the [`smartctl`](https://linux.die.net/man/8/smartctl) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `smartctl --json --scan`\n- `smartctl --json --all {deviceName} --device {deviceType} --nocheck {powerMode}`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install smartmontools (v7.0+)\n\nInstall `smartmontools` version 7.0 or later using your distribution's package manager. Version 7.0 introduced the `--json` output mode, which is required for this collector to function properly.\n\n\n#### For Netdata running in a Docker container\n\n1. **Install smartmontools**.\n\n Ensure `smartctl` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=smartmontools` when starting the container.\n\n2. **Provide access to storage devices**.\n\n Netdata requires the `SYS_RAWIO` capability and access to the storage devices to run the `smartctl` collector inside a Docker container. Here's how you can achieve this:\n\n - `docker run`\n\n ```bash\n docker run --cap-add SYS_RAWIO --device /dev/sda:/dev/sda ...\n ```\n\n - `docker-compose.yml`\n\n ```yaml\n services:\n netdata:\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n - SYS_RAWIO # smartctl\n devices:\n - \"/dev/sda:/dev/sda\"\n ```\n\n > **Multiple Devices**: These examples only show mapping of one device (/dev/sda). You'll need to add additional `--device` options (in docker run) or entries in the `devices` list (in docker-compose.yml) for each storage device you want Netdata's smartctl collector to monitor.\n\n > **NVMe Devices**: Do not map NVMe devices using this method. Netdata uses a [dedicated collector](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nvme#readme) to monitor NVMe devices.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/smartctl.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/smartctl.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | interval for updating Netdata charts, measured in seconds. Collector might use cached data if less than **Devices poll interval**. | 10 | no |\n| timeout | smartctl binary execution timeout. | 5 | no |\n| scan_every | interval for discovering new devices using `smartctl --scan`, measured in seconds. Set to 0 to scan devices only once on startup. | 900 | no |\n| poll_devices_every | interval for gathering data for every device, measured in seconds. Data is cached for this interval. | 300 | no |\n| device_selector | Specifies a pattern to match the 'info name' of devices as reported by `smartctl --scan --json`. | * | no |\n| extra_devices | Allows manual specification of devices not automatically detected by `smartctl --scan`. Each device entry must include both a name and a type. See \"Configuration Examples\" for details. | [] | no |\n| no_check_power_mode | Skip data collection when the device is in a low-power mode. Prevents unnecessary disk spin-up. | standby | no |\n\n##### no_check_power_mode\n\nThe valid arguments to this option are:\n\n| Mode | Description |\n|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| never | Check the device always. |\n| sleep | Check the device unless it is in SLEEP mode. |\n| standby | Check the device unless it is in SLEEP or STANDBY mode. In these modes most disks are not spinning, so if you want to prevent a disk from spinning up, this is probably what you want. |\n| idle | Check the device unless it is in SLEEP, STANDBY or IDLE mode. In the IDLE state, most disks are still spinning, so this is probably not what you want. |\n\n\n{% /details %}\n#### Examples\n\n##### Custom devices poll interval\n\nAllows you to override the default devices poll interval (data collection).\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: smartctl\n devices_poll_interval: 60 # Collect S.M.A.R.T statistics every 60 seconds\n\n```\n{% /details %}\n##### Extra devices\n\nThis example demonstrates using `extra_devices` to manually add a storage device (`/dev/sdc`) not automatically detected by `smartctl --scan`.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: smartctl\n extra_devices:\n - name: /dev/sdc\n type: jmb39x-q,3\n\n```\n{% /details %}\n", + "overview": "# S.M.A.R.T.\n\nPlugin: go.d.plugin\nModule: smartctl\n\n## Overview\n\nThis collector monitors the health status of storage devices by analyzing S.M.A.R.T. (Self-Monitoring, Analysis, and Reporting Technology) counters.\nIt relies on the [`smartctl`](https://linux.die.net/man/8/smartctl) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `smartctl --json --scan`\n- `smartctl --json --all {deviceName} --device {deviceType} --nocheck {powerMode}`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install smartmontools (v7.0+)\n\nInstall `smartmontools` version 7.0 or later using your distribution's package manager. Version 7.0 introduced the `--json` output mode, which is required for this collector to function properly.\n\n\n#### For Netdata running in a Docker container\n\n1. **Install smartmontools**.\n\n Ensure `smartctl` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=smartmontools` when starting the container.\n\n2. **Provide access to storage devices**.\n\n Netdata requires the `SYS_RAWIO` capability and access to the storage devices to run the `smartctl` collector inside a Docker container. Here's how you can achieve this:\n\n - `docker run`\n\n ```bash\n docker run --cap-add SYS_RAWIO --device /dev/sda:/dev/sda ...\n ```\n\n - `docker-compose.yml`\n\n ```yaml\n services:\n netdata:\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n - SYS_RAWIO # smartctl\n devices:\n - \"/dev/sda:/dev/sda\"\n ```\n\n > **Multiple Devices**: These examples only show mapping of one device (/dev/sda). You'll need to add additional `--device` options (in docker run) or entries in the `devices` list (in docker-compose.yml) for each storage device you want Netdata's smartctl collector to monitor.\n\n > **NVMe Devices**: Do not map NVMe devices using this method. Netdata uses a [dedicated collector](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nvme#readme) to monitor NVMe devices.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/smartctl.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/smartctl.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | interval for updating Netdata charts, measured in seconds. Collector might use cached data if less than **Devices poll interval**. | 10 | no |\n| timeout | smartctl binary execution timeout. | 5 | no |\n| scan_every | interval for discovering new devices using `smartctl --scan`, measured in seconds. Set to 0 to scan devices only once on startup. | 900 | no |\n| poll_devices_every | interval for gathering data for every device, measured in seconds. Data is cached for this interval. | 300 | no |\n| device_selector | Specifies a pattern to match the 'info name' of devices as reported by `smartctl --scan --json`. | * | no |\n| extra_devices | Allows manual specification of devices not automatically detected by `smartctl --scan`. Each device entry must include both a name and a type. See \"Configuration Examples\" for details. | [] | no |\n| no_check_power_mode | Skip data collection when the device is in a low-power mode. Prevents unnecessary disk spin-up. | standby | no |\n\n##### no_check_power_mode\n\nThe valid arguments to this option are:\n\n| Mode | Description |\n|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| never | Check the device always. |\n| sleep | Check the device unless it is in SLEEP mode. |\n| standby | Check the device unless it is in SLEEP or STANDBY mode. In these modes most disks are not spinning, so if you want to prevent a disk from spinning up, this is probably what you want. |\n| idle | Check the device unless it is in SLEEP, STANDBY or IDLE mode. In the IDLE state, most disks are still spinning, so this is probably not what you want. |\n\n\n{% /details %}\n#### Examples\n\n##### Custom devices poll interval\n\nAllows you to override the default devices poll interval (data collection).\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: smartctl\n devices_poll_interval: 60 # Collect S.M.A.R.T statistics every 60 seconds\n\n```\n{% /details %}\n##### Extra devices\n\nThis example demonstrates using `extra_devices` to manually add a storage device (`/dev/sdc`) not automatically detected by `smartctl --scan`.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: smartctl\n extra_devices:\n - name: /dev/sdc\n type: jmb39x-q,3\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `smartctl` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m smartctl\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `smartctl` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep smartctl\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep smartctl /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep smartctl\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Storage Device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device_name | Device name |\n| device_type | Device type |\n| model_name | Model name |\n| serial_number | Serial number |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| smartctl.device_smart_status | passed, failed | status |\n| smartctl.device_ata_smart_error_log_count | error_log | logs |\n| smartctl.device_power_on_time | power_on_time | seconds |\n| smartctl.device_temperature | temperature | Celsius |\n| smartctl.device_power_cycles_count | power | cycles |\n| smartctl.device_read_errors_rate | corrected, uncorrected | errors/s |\n| smartctl.device_write_errors_rate | corrected, uncorrected | errors/s |\n| smartctl.device_verify_errors_rate | corrected, uncorrected | errors/s |\n| smartctl.device_smart_attr_{attribute_name} | {attribute_name} | {attribute_unit} |\n| smartctl.device_smart_attr_{attribute_name}_normalized | {attribute_name} | value |\n\n", @@ -16518,7 +16746,7 @@ export const integrations = [ "most_popular": true }, "overview": "# SNMP devices\n\nPlugin: go.d.plugin\nModule: snmp\n\n## Overview\n\nThis SNMP collector discovers and gathers statistics for network interfaces on SNMP-enabled devices:\n\n- Traffic\n- Packets (unicast, multicast, broadcast)\n- Errors\n- Discards\n- Administrative and operational status\n\nAdditionally, it collects overall device uptime.\n\nIt is compatible with all SNMP versions (v1, v2c, and v3) and uses the [gosnmp](https://github.com/gosnmp/gosnmp) package.\n\n**For advanced users**:\n\n- You can manually specify custom OIDs (Object Identifiers) to retrieve specific data points beyond the default metrics.\n- However, defining custom charts with dimensions for these OIDs requires manual configuration.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\n**Device limitations**: Many SNMP switches and routers have limited processing power. They might not be able to report data as frequently as desired. You can monitor response times using go.d.plugin in debug mode to identify potential bottlenecks.\n\n**Concurrent access**: If multiple collectors or tools access the same SNMP device simultaneously, data points might be skipped. This is a limitation of the device itself, not this collector. To mitigate this, consider increasing the collection interval (update_every) to reduce the frequency of requests.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/snmp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/snmp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hostname | Target ipv4 address. | | yes |\n| community | SNMPv1/2 community string. | public | no |\n| options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no |\n| options.port | Target port. | 161 | no |\n| options.retries | Retries to attempt. | 1 | no |\n| options.timeout | SNMP request/response timeout. | 5 | no |\n| options.max_repetitions | Controls how many SNMP variables to retrieve in a single GETBULK request. | 25 | no |\n| options.max_request_size | Maximum number of OIDs allowed in a single GET request. | 60 | no |\n| network_interface_filter.by_name | Filter interfaces by their names using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| network_interface_filter.by_type | Filter interfaces by their types using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| user.name | SNMPv3 user name. | | no |\n| user.name | Security level of SNMPv3 messages. | | no |\n| user.auth_proto | Security level of SNMPv3 messages. | | no |\n| user.name | Authentication protocol for SNMPv3 messages. | | no |\n| user.auth_key | Authentication protocol pass phrase. | | no |\n| user.priv_proto | Privacy protocol for SNMPv3 messages. | | no |\n| user.priv_key | Privacy protocol pass phrase. | | no |\n| charts | List of charts. | [] | yes |\n| charts.id | Chart ID. Used to uniquely identify the chart. | | yes |\n| charts.title | Chart title. | Untitled chart | no |\n| charts.units | Chart units. | num | no |\n| charts.family | Chart family. | charts.id | no |\n| charts.type | Chart type (line, area, stacked). | line | no |\n| charts.priority | Chart priority. | 70000 | no |\n| charts.multiply_range | Used when you need to define many charts using incremental OIDs. | [] | no |\n| charts.dimensions | List of chart dimensions. | [] | yes |\n| charts.dimensions.oid | Collected metric OID. | | yes |\n| charts.dimensions.name | Dimension name. | | yes |\n| charts.dimensions.algorithm | Dimension algorithm (absolute, incremental). | absolute | no |\n| charts.dimensions.multiplier | Collected value multiplier, applied to convert it properly to units. | 1 | no |\n| charts.dimensions.divisor | Collected value divisor, applied to convert it properly to units. | 1 | no |\n\n##### user.auth_proto\n\nThe security of an SNMPv3 message as per RFC 3414 (`user.level`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|------------------------------------------|\n| none | 1 | no message authentication or encryption |\n| authNoPriv | 2 | message authentication and no encryption |\n| authPriv | 3 | message authentication and encryption |\n\n\n##### user.name\n\nThe digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------|\n| none | 1 | no message authentication |\n| md5 | 2 | MD5 message authentication (HMAC-MD5-96) |\n| sha | 3 | SHA message authentication (HMAC-SHA-96) |\n| sha224 | 4 | SHA message authentication (HMAC-SHA-224) |\n| sha256 | 5 | SHA message authentication (HMAC-SHA-256) |\n| sha384 | 6 | SHA message authentication (HMAC-SHA-384) |\n| sha512 | 7 | SHA message authentication (HMAC-SHA-512) |\n\n\n##### user.priv_proto\n\nThe encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------------------------------------|\n| none | 1 | no message encryption |\n| des | 2 | ES encryption (CBC-DES) |\n| aes | 3 | 128-bit AES encryption (CFB-AES-128) |\n| aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with \"Blumenthal\" key localization |\n| aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with \"Blumenthal\" key localization |\n| aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with \"Reeder\" key localization |\n| aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with \"Reeder\" key localization |\n\n\n{% /details %}\n#### Examples\n\n##### SNMPv1/2\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n\n```\n{% /details %}\n##### SNMPv3\n\nTo use SNMPv3:\n\n- use `user` instead of `community`.\n- set `options.version` to 3.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n options:\n version: 3\n user:\n name: username\n level: authPriv\n auth_proto: sha256\n auth_key: auth_protocol_passphrase\n priv_proto: aes256\n priv_key: priv_protocol_passphrase\n\n```\n{% /details %}\n##### Custom OIDs\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - id: \"bandwidth_port2\"\n title: \"Switch Bandwidth for port 2\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.2\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.2\"\n multiplier: -8\n divisor: 1000\n\n```\n{% /details %}\n##### Custom OIDs with multiply range\n\nIf you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option.\n\nThis is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`.\n\nEach of the 24 new charts will have its id (1-24) appended at:\n\n- its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`.\n- its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`.\n- its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`.\n- its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port\"\n title: \"Switch Bandwidth for port\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n multiply_range: [1, 24]\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16\"\n multiplier: -8\n divisor: 1000\n\n```\n{% /details %}\n##### Multiple devices with a common configuration\n\nYAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases). \nThe `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices.\n\nThe following example:\n\n- adds an `anchor` to the first job.\n- injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters.\n- injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - &anchor\n name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - <<: *anchor\n name: switch2\n hostname: \"192.0.2.2\"\n - <<: *anchor\n name: switch3\n hostname: \"192.0.2.3\"\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/snmp.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/snmp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hostname | Target ipv4 address. | | yes |\n| create_vnode | If set, the collector will create a Netdata Virtual Node for this SNMP device, which will appear as a separate Node in Netdata. | false | no |\n| vnode.guid | A unique identifier for the Virtual Node. If not set, a GUID will be automatically generated from the device's IP address. | | no |\n| vnode.hostname | The hostname that will be used for the Virtual Node. If not set, the device's hostname will be used. | | no |\n| vnode.labels | Additional key-value pairs to associate with the Virtual Node. | | no |\n| community | SNMPv1/2 community string. | public | no |\n| options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no |\n| options.port | Target port. | 161 | no |\n| options.retries | Retries to attempt. | 1 | no |\n| options.timeout | SNMP request/response timeout. | 5 | no |\n| options.max_repetitions | Controls how many SNMP variables to retrieve in a single GETBULK request. | 25 | no |\n| options.max_request_size | Maximum number of OIDs allowed in a single GET request. | 60 | no |\n| network_interface_filter.by_name | Filter interfaces by their names using [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| network_interface_filter.by_type | Filter interfaces by their types using [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| user.name | SNMPv3 user name. | | no |\n| user.name | Security level of SNMPv3 messages. | | no |\n| user.auth_proto | Security level of SNMPv3 messages. | | no |\n| user.name | Authentication protocol for SNMPv3 messages. | | no |\n| user.auth_key | Authentication protocol pass phrase. | | no |\n| user.priv_proto | Privacy protocol for SNMPv3 messages. | | no |\n| user.priv_key | Privacy protocol pass phrase. | | no |\n| charts | List of charts. | [] | yes |\n| charts.id | Chart ID. Used to uniquely identify the chart. | | yes |\n| charts.title | Chart title. | Untitled chart | no |\n| charts.units | Chart units. | num | no |\n| charts.family | Chart family. | charts.id | no |\n| charts.type | Chart type (line, area, stacked). | line | no |\n| charts.priority | Chart priority. | 70000 | no |\n| charts.multiply_range | Used when you need to define many charts using incremental OIDs. | [] | no |\n| charts.dimensions | List of chart dimensions. | [] | yes |\n| charts.dimensions.oid | Collected metric OID. | | yes |\n| charts.dimensions.name | Dimension name. | | yes |\n| charts.dimensions.algorithm | Dimension algorithm (absolute, incremental). | absolute | no |\n| charts.dimensions.multiplier | Collected value multiplier, applied to convert it properly to units. | 1 | no |\n| charts.dimensions.divisor | Collected value divisor, applied to convert it properly to units. | 1 | no |\n\n##### user.auth_proto\n\nThe security of an SNMPv3 message as per RFC 3414 (`user.level`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|------------------------------------------|\n| none | 1 | no message authentication or encryption |\n| authNoPriv | 2 | message authentication and no encryption |\n| authPriv | 3 | message authentication and encryption |\n\n\n##### user.name\n\nThe digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------|\n| none | 1 | no message authentication |\n| md5 | 2 | MD5 message authentication (HMAC-MD5-96) |\n| sha | 3 | SHA message authentication (HMAC-SHA-96) |\n| sha224 | 4 | SHA message authentication (HMAC-SHA-224) |\n| sha256 | 5 | SHA message authentication (HMAC-SHA-256) |\n| sha384 | 6 | SHA message authentication (HMAC-SHA-384) |\n| sha512 | 7 | SHA message authentication (HMAC-SHA-512) |\n\n\n##### user.priv_proto\n\nThe encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------------------------------------|\n| none | 1 | no message encryption |\n| des | 2 | ES encryption (CBC-DES) |\n| aes | 3 | 128-bit AES encryption (CFB-AES-128) |\n| aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with \"Blumenthal\" key localization |\n| aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with \"Blumenthal\" key localization |\n| aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with \"Reeder\" key localization |\n| aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with \"Reeder\" key localization |\n\n\n{% /details %}\n#### Examples\n\n##### SNMPv1/2\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n\n```\n{% /details %}\n##### SNMPv3\n\nTo use SNMPv3:\n\n- use `user` instead of `community`.\n- set `options.version` to 3.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n options:\n version: 3\n user:\n name: username\n level: authPriv\n auth_proto: sha256\n auth_key: auth_protocol_passphrase\n priv_proto: aes256\n priv_key: priv_protocol_passphrase\n\n```\n{% /details %}\n##### Custom OIDs\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - id: \"bandwidth_port2\"\n title: \"Switch Bandwidth for port 2\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.2\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.2\"\n multiplier: -8\n divisor: 1000\n\n```\n{% /details %}\n##### Custom OIDs with multiply range\n\nIf you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option.\n\nThis is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`.\n\nEach of the 24 new charts will have its id (1-24) appended at:\n\n- its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`.\n- its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`.\n- its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`.\n- its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port\"\n title: \"Switch Bandwidth for port\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n multiply_range: [1, 24]\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16\"\n multiplier: -8\n divisor: 1000\n\n```\n{% /details %}\n##### Multiple devices with a common configuration\n\nYAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases). \nThe `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices.\n\nThe following example:\n\n- adds an `anchor` to the first job.\n- injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters.\n- injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - &anchor\n name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - <<: *anchor\n name: switch2\n hostname: \"192.0.2.2\"\n - <<: *anchor\n name: switch3\n hostname: \"192.0.2.3\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `snmp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m snmp\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `snmp` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep snmp\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep snmp /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep snmp\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe metrics that will be collected are defined in the configuration file.\n\n### Per snmp device\n\nThese metrics refer to the SNMP device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| sysName | SNMP device's system name (OID: [1.3.6.1.2.1.1.5](https://oidref.com/1.3.6.1.2.1.1.5)). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| snmp.device_uptime | uptime | seconds |\n\n### Per network interface\n\nNetwork interfaces of the SNMP device being monitored. These metrics refer to each interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| sysName | SNMP device's system name (OID: [1.3.6.1.2.1.1.5](https://oidref.com/1.3.6.1.2.1.1.5)). |\n| ifDescr | Network interface description (OID: [1.3.6.1.2.1.2.2.1.2](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.2.2.1.2)). |\n| ifName | Network interface name (OID: [1.3.6.1.2.1.2.2.1.2](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.31.1.1.1.1)). |\n| ifType | Network interface type (OID: [1.3.6.1.2.1.2.2.1.2](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.2.2.1.3)). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| snmp.device_net_interface_traffic | received, sent | kilobits/s |\n| snmp.device_net_interface_unicast | received, sent | packets/s |\n| snmp.device_net_interface_multicast | received, sent | packets/s |\n| snmp.device_net_interface_broadcast | received, sent | packets/s |\n| snmp.device_net_interface_errors | inbound, outbound | errors/s |\n| snmp.device_net_interface_discards | inbound, outbound | discards/s |\n| snmp.device_net_interface_admin_status | up, down, testing | status |\n| snmp.device_net_interface_oper_status | up, down, testing, unknown, dormant, not_present, lower_layer_down | status |\n\n", @@ -16527,6 +16755,44 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/snmp/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-spigotmc", + "plugin_name": "go.d.plugin", + "module_name": "spigotmc", + "monitored_instance": { + "name": "SpigotMC", + "link": "https://www.spigotmc.org/", + "categories": [ + "data-collection.gaming" + ], + "icon_filename": "spigot.jfif" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "minecraft", + "spigotmc", + "spigot" + ], + "most_popular": false + }, + "overview": "# SpigotMC\n\nPlugin: go.d.plugin\nModule: spigotmc\n\n## Overview\n\nThis collector monitors SpigotMC server server performance, in the form of ticks per second average, memory utilization, and active users.\n\n\nIt sends the `tps` and `list` commands to the Server, and gathers the metrics from the responses.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects SpigotMC instances running on localhost that are listening on port 25575.\n\n> **Note that the SpigotMC RCON API requires a password**. \n> While Netdata can automatically detect SpigotMC instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/spigotmc.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/spigotmc.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the SpigotMC server listens for RCON connections. | 127.0.0.1:25575 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:25575\n password: somePassword\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:25575\n password: somePassword\n\n - name: remote\n address: 203.0.113.0:25575\n password: somePassword\n\n```\n{% /details %}\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `spigotmc` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m spigotmc\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `spigotmc` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep spigotmc\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep spigotmc /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep spigotmc\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SpigotMC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| spigotmc.players | players | players |\n| spigotmc.avg_tps | 1min, 5min, 15min | ticks |\n| spigotmc.memory | used, alloc | bytes |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-spigotmc-SpigotMC", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/spigotmc/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-squid", @@ -16556,7 +16822,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Squid\n\nPlugin: go.d.plugin\nModule: squid\n\n## Overview\n\nThis collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.\n\n\nIt collects metrics from the `squid-internal-mgr/counters` endpoint.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Squid instances running on localhost that are listening on port 3128.\nOn startup, it tries to collect metrics from:\n\n- https://127.0.0.1:3128\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:3128 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:3128\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:3128\n\n - name: remote\n url: http://192.0.2.1:3128\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squid.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:3128 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:3128\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:3128\n\n - name: remote\n url: http://192.0.2.1:3128\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `squid` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m squid\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `squid` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep squid\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep squid /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep squid\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid instance\n\nThese metrics refer to each monitored Squid instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squid.clients_net | in, out, hits | kilobits/s |\n| squid.clients_requests | requests, hits, errors | requests/s |\n| squid.servers_net | in, out | kilobits/s |\n| squid.servers_requests | requests, errors | requests/s |\n\n", @@ -16593,7 +16859,7 @@ export const integrations = [ "most_popular": true }, "overview": "# Squid log files\n\nPlugin: go.d.plugin\nModule: squidlog\n\n## Overview\n\nhis collector monitors Squid servers by parsing their access log files.\n\n\nIt automatically detects log files of Squid severs running on localhost.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squidlog.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squidlog.conf\n```\n#### Options\n\nSquid [log format codes](http://www.squid-cache.org/Doc/config/logformat/).\n\nSquidlog is aware how to parse and interpret the following codes:\n\n| field | squid format code | description |\n|----------------|-------------------|---------------------------------------------------------------|\n| resp_time | %tr | Response time (milliseconds). |\n| client_address | %>a | Client source IP address. |\n| client_address | %>A | Client FQDN. |\n| cache_code | %Ss | Squid request status (TCP_MISS etc). |\n| http_code | %>Hs | The HTTP response status code from Content Gateway to client. |\n| resp_size | %Hs | Cache code and http code. |\n| hierarchy | %Sh/% **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squidlog.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squidlog.conf\n```\n#### Options\n\nSquid [log format codes](https://www.squid-cache.org/Doc/config/logformat/).\n\nSquidlog is aware how to parse and interpret the following codes:\n\n| field | squid format code | description |\n|----------------|-------------------|---------------------------------------------------------------|\n| resp_time | %tr | Response time (milliseconds). |\n| client_address | %>a | Client source IP address. |\n| client_address | %>A | Client FQDN. |\n| cache_code | %Ss | Squid request status (TCP_MISS etc). |\n| http_code | %>Hs | The HTTP response status code from Content Gateway to client. |\n| resp_size | %Hs | Cache code and http code. |\n| hierarchy | %Sh/% **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `squidlog` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m squidlog\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `squidlog` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep squidlog\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep squidlog /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep squidlog\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squidlog.requests | requests | requests/s |\n| squidlog.excluded_requests | unmatched | requests/s |\n| squidlog.type_requests | success, bad, redirect, error | requests/s |\n| squidlog.http_status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| squidlog.http_status_code_responses | a dimension per HTTP response code | responses/s |\n| squidlog.bandwidth | sent | kilobits/s |\n| squidlog.response_time | min, max, avg | milliseconds |\n| squidlog.uniq_clients | clients | clients |\n| squidlog.cache_result_code_requests | a dimension per cache result code | requests/s |\n| squidlog.cache_result_code_transport_tag_requests | a dimension per cache result delivery transport tag | requests/s |\n| squidlog.cache_result_code_handling_tag_requests | a dimension per cache result handling tag | requests/s |\n| squidlog.cache_code_object_tag_requests | a dimension per cache result produced object tag | requests/s |\n| squidlog.cache_code_load_source_tag_requests | a dimension per cache result load source tag | requests/s |\n| squidlog.cache_code_error_tag_requests | a dimension per cache result error tag | requests/s |\n| squidlog.http_method_requests | a dimension per HTTP method | requests/s |\n| squidlog.mime_type_requests | a dimension per MIME type | requests/s |\n| squidlog.hier_code_requests | a dimension per hierarchy code | requests/s |\n| squidlog.server_address_forwarded_requests | a dimension per server address | requests/s |\n\n", @@ -16630,11 +16896,11 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# StoreCLI RAID\n\nPlugin: go.d.plugin\nModule: storcli\n\n## Overview\n\nMonitors the health of StoreCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.\nIt relies on the [`storcli`](https://docs.broadcom.com/doc/12352476) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `storcli /cALL show all J nolog`\n- `storcli /cALL/eALL/sALL show all J nolog`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/storcli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/storcli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | storcli binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: storcli\n update_every: 5 # Collect StorCLI RAID statistics every 5 seconds\n\n```\n{% /details %}\n", + "overview": "# StoreCLI RAID\n\nPlugin: go.d.plugin\nModule: storcli\n\n## Overview\n\nMonitors the health of StoreCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.\nIt relies on the [`storcli`](https://docs.broadcom.com/doc/12352476) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `storcli /cALL show all J nolog`\n- `storcli /cALL/eALL/sALL show all J nolog`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/storcli.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/storcli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | storcli binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: storcli\n update_every: 5 # Collect StorCLI RAID statistics every 5 seconds\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `storcli` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m storcli\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `storcli` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep storcli\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep storcli /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep storcli\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ storcli_controller_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.controller_health_status | RAID controller ${label:controller_number} is unhealthy |\n| [ storcli_controller_bbu_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.controller_bbu_status | RAID controller ${label:controller_number} BBU is unhealthy |\n| [ storcli_phys_drive_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.phys_drive_errors | RAID physical drive c${label:controller_number}/e${label:enclosure_number}/s${label:slot_number} errors |\n| [ storcli_phys_drive_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.phys_drive_predictive_failures | RAID physical drive c${label:controller_number}/e${label:enclosure_number}/s${label:slot_number} predictive failures |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| model | Controller model |\n| driver_name | Controller driver (megaraid_sas or mpt3sas) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.controller_health_status | healthy, unhealthy | status |\n| storcli.controller_status | optimal, degraded, partially_degraded, failed | status |\n| storcli.controller_bbu_status | healthy, unhealthy, na | status |\n\n### Per physical drive\n\nThese metrics refer to the Physical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| enclosure_number | Enclosure number (index) |\n| slot_number | Slot number (index) |\n| media type | Media type (e.g. HDD) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.phys_drive_errors | media, other | errors/s |\n| storcli.phys_drive_predictive_failures | predictive_failures | failures/s |\n| storcli.phys_drive_smart_alert_status | active, inactive | status |\n| storcli.phys_drive_temperature | temperature | Celsius |\n\n### Per bbu\n\nThese metrics refer to the Backup Battery Unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| bbu_number | BBU number (index) |\n| model | BBU model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.bbu_temperature | temperature | Celsius |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| model | Controller model |\n| driver_name | Controller driver (megaraid_sas or mpt3sas) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.controller_health_status | healthy, unhealthy | status |\n| storcli.controller_status | optimal, degraded, partially_degraded, failed | status |\n| storcli.controller_bbu_status | healthy, unhealthy, na | status |\n| storcli.controller_roc_temperature | temperature | Celsius |\n\n### Per physical drive\n\nThese metrics refer to the Physical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| enclosure_number | Enclosure number (index) |\n| slot_number | Slot number (index) |\n| media type | Media type (e.g. HDD) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.phys_drive_errors | media, other | errors/s |\n| storcli.phys_drive_predictive_failures | predictive_failures | failures/s |\n| storcli.phys_drive_smart_alert_status | active, inactive | status |\n| storcli.phys_drive_temperature | temperature | Celsius |\n\n### Per bbu\n\nThese metrics refer to the Backup Battery Unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| bbu_number | BBU number (index) |\n| model | BBU model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.bbu_temperature | temperature | Celsius |\n\n", "integration_type": "collector", "id": "go.d.plugin-storcli-StoreCLI_RAID", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/storcli/metadata.yaml", @@ -16667,7 +16933,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Supervisor\n\nPlugin: go.d.plugin\nModule: supervisord\n\n## Overview\n\nThis collector monitors Supervisor instances.\n\nIt can collect metrics from:\n\n- [unix socket](http://supervisord.org/configuration.html?highlight=unix_http_server#unix-http-server-section-values)\n- [internal http server](http://supervisord.org/configuration.html?highlight=unix_http_server#inet-http-server-section-settings)\n\nUsed methods:\n\n- [`supervisor.getAllProcessInfo`](http://supervisord.org/api.html#supervisor.rpcinterface.SupervisorNamespaceRPCInterface.getAllProcessInfo)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/supervisord.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/supervisord.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9001/RPC2 | yes |\n| timeout | System bus requests timeout. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### HTTP\n\nCollect metrics via HTTP.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n```\n{% /details %}\n##### Socket\n\nCollect metrics via Unix socket.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n- name: local\n url: 'unix:///run/supervisor.sock'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n - name: remote\n url: 'http://192.0.2.1:9001/RPC2'\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/supervisord.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/supervisord.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9001/RPC2 | yes |\n| timeout | System bus requests timeout. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### HTTP\n\nCollect metrics via HTTP.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n```\n{% /details %}\n##### Socket\n\nCollect metrics via Unix socket.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n- name: local\n url: 'unix:///run/supervisor.sock'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n - name: remote\n url: 'http://192.0.2.1:9001/RPC2'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `supervisord` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m supervisord\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `supervisord` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep supervisord\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep supervisord /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep supervisord\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Supervisor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.summary_processes | running, non-running | processes |\n\n### Per process group\n\nThese metrics refer to the process group.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.processes | running, non-running | processes |\n| supervisord.process_state_code | a dimension per process | code |\n| supervisord.process_exit_status | a dimension per process | exit status |\n| supervisord.process_uptime | a dimension per process | seconds |\n| supervisord.process_downtime | a dimension per process | seconds |\n\n", @@ -16703,7 +16969,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Systemd Units\n\nPlugin: go.d.plugin\nModule: systemdunits\n\n## Overview\n\nThis collector monitors the state of Systemd units and unit files.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/systemdunits.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/systemdunits.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| timeout | System bus requests timeout. | 1 | no |\n| include | Systemd units selector. | *.service | no |\n| skip_transient | If set, skip data collection for systemd transient units. | false | no |\n| collect_unit_files | If set to true, collect the state of installed unit files. Enabling this may increase system overhead. | false | no |\n| collect_unit_files_every | Interval for querying systemd about unit files and their enablement state, measured in seconds. Data is cached for this interval to reduce system overhead. | 300 | no |\n| include_unit_files | Systemd unit files selector. | *.service | no |\n\n##### include\n\nSystemd units matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n##### include_unit_files\n\nSystemd unit files matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n{% /details %}\n#### Examples\n\n##### Service units\n\nCollect state of all service type units.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n```\n{% /details %}\n##### One specific unit\n\nCollect state of one specific unit.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my-specific-service\n include:\n - 'my-specific.service'\n\n```\n{% /details %}\n##### All unit types\n\nCollect state of all units.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my-specific-service-unit\n include:\n - '*'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect state of all service and socket type units.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n - name: socket\n include:\n - '*.socket'\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/systemdunits.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/systemdunits.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| timeout | System bus requests timeout. | 1 | no |\n| include | Systemd units selector. | *.service | no |\n| skip_transient | If set, skip data collection for systemd transient units. | false | no |\n| collect_unit_files | If set to true, collect the state of installed unit files. Enabling this may increase system overhead. | false | no |\n| collect_unit_files_every | Interval for querying systemd about unit files and their enablement state, measured in seconds. Data is cached for this interval to reduce system overhead. | 300 | no |\n| include_unit_files | Systemd unit files selector. | *.service | no |\n\n##### include\n\nSystemd units matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n##### include_unit_files\n\nSystemd unit files matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n{% /details %}\n#### Examples\n\n##### Service units\n\nCollect state of all service type units.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n```\n{% /details %}\n##### One specific unit\n\nCollect state of one specific unit.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my-specific-service\n include:\n - 'my-specific.service'\n\n```\n{% /details %}\n##### All unit types\n\nCollect state of all units.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my-specific-service-unit\n include:\n - '*'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect state of all service and socket type units.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n - name: socket\n include:\n - '*.socket'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `systemdunits` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m systemdunits\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `systemdunits` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep systemdunits\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep systemdunits /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep systemdunits\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ systemd_service_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.service_unit_state | systemd service unit in the failed state |\n| [ systemd_socket_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.socket_unit_state | systemd socket unit in the failed state |\n| [ systemd_target_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.target_unit_state | systemd target unit in the failed state |\n| [ systemd_path_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.path_unit_state | systemd path unit in the failed state |\n| [ systemd_device_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.device_unit_state | systemd device unit in the failed state |\n| [ systemd_mount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.mount_unit_state | systemd mount unit in the failed state |\n| [ systemd_automount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.automount_unit_state | systemd automount unit in the failed state |\n| [ systemd_swap_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.swap_unit_state | systemd swap unit in the failed state |\n| [ systemd_scope_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.scope_unit_state | systemd scope unit in the failed state |\n| [ systemd_slice_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.slice_unit_state | systemd slice unit in the failed state |\n| [ systemd_timer_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.timer_unit_state | systemd timer unit in the failed state |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per unit\n\nThese metrics refer to the systemd unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| unit_name | systemd unit name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.socket_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.target_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.path_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.device_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.mount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.automount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.swap_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.timer_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.scope_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.slice_unit_state | active, inactive, activating, deactivating, failed | state |\n\n### Per unit file\n\nThese metrics refer to the systemd unit file.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| unit_file_name | systemd unit file name |\n| unit_file_type | systemd unit file type |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.unit_file_state | enabled, enabled-runtime, linked, linked-runtime, alias, masked, masked-runtime, static, disabled, indirect, generated, transient, bad | state |\n\n", @@ -16741,7 +17007,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Tengine\n\nPlugin: go.d.plugin\nModule: tengine\n\n## Overview\n\nThis collector monitors Tengine servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable ngx_http_reqstat_module module.\n\nTo enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html).\nThe default line format is the only supported format.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tengine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tengine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/us | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n username: foo\n password: bar\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nTengine with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/us\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n - name: remote\n url: http://203.0.113.10/us\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable ngx_http_reqstat_module module.\n\nTo enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html).\nThe default line format is the only supported format.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tengine.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tengine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/us | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n username: foo\n password: bar\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nTengine with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/us\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n - name: remote\n url: http://203.0.113.10/us\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `tengine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m tengine\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `tengine` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep tengine\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep tengine /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep tengine\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tengine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tengine.bandwidth_total | in, out | B/s |\n| tengine.connections_total | accepted | connections/s |\n| tengine.requests_total | processed | requests/s |\n| tengine.requests_per_response_code_family_total | 2xx, 3xx, 4xx, 5xx, other | requests/s |\n| tengine.requests_per_response_code_detailed_total | 200, 206, 302, 304, 403, 404, 419, 499, 500, 502, 503, 504, 508, other | requests/s |\n| tengine.requests_upstream_total | requests | requests/s |\n| tengine.tries_upstream_total | calls | calls/s |\n| tengine.requests_upstream_per_response_code_family_total | 4xx, 5xx | requests/s |\n\n", @@ -16781,7 +17047,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Tomcat\n\nPlugin: go.d.plugin\nModule: tomcat\n\n## Overview\n\nThis collector monitors Tomcat metrics about bandwidth, processing time, threads and more.\n\n\nIt parses the information provided by the [Server Status](https://tomcat.apache.org/tomcat-10.0-doc/manager-howto.html#Server_Status) HTTP endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nBy default, this Tomcat collector cannot access the server's status page. To enable data collection, you will need to configure access credentials with appropriate permissions.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the Netdata agent and Tomcat are on the same host, the collector will attempt to connect to the Tomcat server's status page at `http://localhost:8080/manager/status?XML=true`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Access to Tomcat Status Endpoint\n\nThe Netdata agent needs read-only access to its status endpoint to collect data from the Tomcat server.\n\nYou can achieve this by creating a dedicated user named `netdata` with read-only permissions specifically for accessing the [Server Status](https://tomcat.apache.org/tomcat-10.0-doc/manager-howto.html#Server_Status) endpoint.\n\nOnce you've created the `netdata` user, you'll need to configure the username and password in the collector configuration file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tomcat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tomcat.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: John\n password: Doe\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: admin1\n password: hackme1\n\n - name: remote\n url: http://192.0.2.1:8080\n username: admin2\n password: hackme2\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Access to Tomcat Status Endpoint\n\nThe Netdata agent needs read-only access to its status endpoint to collect data from the Tomcat server.\n\nYou can achieve this by creating a dedicated user named `netdata` with read-only permissions specifically for accessing the [Server Status](https://tomcat.apache.org/tomcat-10.0-doc/manager-howto.html#Server_Status) endpoint.\n\nOnce you've created the `netdata` user, you'll need to configure the username and password in the collector configuration file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tomcat.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tomcat.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: John\n password: Doe\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: admin1\n password: hackme1\n\n - name: remote\n url: http://192.0.2.1:8080\n username: admin2\n password: hackme2\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `tomcat` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m tomcat\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `tomcat` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep tomcat\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep tomcat /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep tomcat\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tomcat instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.jvm_memory_usage | free, used | bytes |\n\n### Per jvm memory pool\n\nThese metrics refer to the JVM memory pool.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mempool_name | Memory Pool name. |\n| mempool_type | Memory Pool type. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.jvm_mem_pool_memory_usage | commited, used, max | bytes |\n\n### Per connector\n\nThese metrics refer to the connector.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| connector_name | Connector name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.connector_requests | requests | requests/s |\n| tomcat.connector_bandwidth | received, sent | bytes/s |\n| tomcat.connector_requests_processing_time | processing_time | milliseconds |\n| tomcat.connector_errors | errors | errors/s |\n| tomcat.connector_request_threads | idle, busy | threads |\n\n", @@ -16819,7 +17085,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Tor\n\nPlugin: go.d.plugin\nModule: tor\n\n## Overview\n\nTracks Tor's download and upload traffic, as well as its uptime.\n\n\nIt reads the server's response to the [GETINFO](https://spec.torproject.org/control-spec/commands.html#getinfo) command.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Tor instances running on localhost that are listening on port 9051.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:9051\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Control Port\n\nEnable `ControlPort` in `/etc/tor/torrc`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the Tor's Control Port listens for connections. | 127.0.0.1:9051 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| password | Password for authentication. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9051\n password: somePassword\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9051\n password: somePassword\n\n - name: remote\n address: 203.0.113.0:9051\n password: somePassword\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Control Port\n\nEnable `ControlPort` in `/etc/tor/torrc`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tor.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the Tor's Control Port listens for connections. | 127.0.0.1:9051 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| password | Password for authentication. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9051\n password: somePassword\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9051\n password: somePassword\n\n - name: remote\n address: 203.0.113.0:9051\n password: somePassword\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `tor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m tor\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `tor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep tor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep tor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep tor\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tor.traffic | read, write | KiB/s |\n| tor.uptime | uptime | seconds |\n\n", @@ -16857,7 +17123,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Traefik\n\nPlugin: go.d.plugin\nModule: traefik\n\n## Overview\n\nThis collector monitors Traefik servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/traefik.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/traefik.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8082/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n http://127.0.0.1:8082/metrics\n\n - name: remote\n http://192.0.2.0:8082/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/traefik.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/traefik.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8082/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n http://127.0.0.1:8082/metrics\n\n - name: remote\n http://192.0.2.0:8082/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `traefik` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m traefik\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `traefik` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep traefik\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep traefik /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep traefik\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per entrypoint, protocol\n\nThese metrics refer to the endpoint.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| traefik.entrypoint_requests | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |\n| traefik.entrypoint_request_duration_average | 1xx, 2xx, 3xx, 4xx, 5xx | milliseconds |\n| traefik.entrypoint_open_connections | a dimension per HTTP method | connections |\n\n", @@ -16866,6 +17132,44 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/traefik/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-typesense", + "plugin_name": "go.d.plugin", + "module_name": "typesense", + "monitored_instance": { + "name": "Typesense", + "link": "https://typesense.org/", + "categories": [ + "data-collection.search-engines" + ], + "icon_filename": "typesense.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "alternative_monitored_instances": [], + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "typesense", + "search engine" + ], + "most_popular": false + }, + "overview": "# Typesense\n\nPlugin: go.d.plugin\nModule: typesense\n\n## Overview\n\nThis collector monitors the overall health status and performance of your Typesense servers.\nIt gathers detailed metrics, including the total number of requests processed, the breakdown of different request types, and the average latency experienced by each request.\n\n\nIt gathers metrics by periodically issuing HTTP GET requests to the Typesense server:\n\n- [/health](https://typesense.org/docs/27.0/api/cluster-operations.html#health) endpoint to check server health.\n- [/stats.json](https://typesense.org/docs/27.0/api/cluster-operations.html#api-stats) endpoint to collect data on requests and latency.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect Typesense instances running on:\n\n- localhost that are listening on port 8108\n- within Docker containers\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### API Key Configuration\n\nWhile optional, configuring an [API key](https://typesense.org/docs/0.20.0/api/api-keys.html#api-keys) is highly recommended to enable the collector to gather [stats metrics](https://typesense.org/docs/27.0/api/cluster-operations.html#api-stats), including request counts and latency.\nWithout an API key, the collector will only collect health status information.\n\n> If you're running Typesense with the API key provided as a command-line parameter (e.g., `--api-key=XYZ`), Netdata can automatically detect and use this key for queries.\n> In this case, no additional configuration is required.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/typesense.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/typesense.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8108 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| api_key | The Typesense [API Key](https://typesense.org/docs/0.20.0/api/api-keys.html#api-keys) (`X-TYPESENSE-API-KEY`). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8108\n api_key: XYZ\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8108\n api_key: XYZ\n\n - name: remote\n url: http://192.0.2.1:8108\n api_key: XYZ\n\n```\n{% /details %}\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `typesense` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m typesense\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `typesense` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep typesense\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep typesense /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep typesense\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Typesense instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| typesense.health_status | ok, out_of_disk, out_of_memory | status |\n| typesense.total_requests | requests | requests/s |\n| typesense.requests_by_operation | search, write, import, delete | requests/s |\n| typesense.latency_by_operation | search, write, import, delete | milliseconds |\n| typesense.overloaded_requests | overloaded | requests/s |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-typesense-Typesense", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/typesense/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-unbound", @@ -16894,7 +17198,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Unbound\n\nPlugin: go.d.plugin\nModule: unbound\n\n## Overview\n\nThis collector monitors Unbound servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable remote control interface\n\nSet `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf).\n\n\n#### Check permissions and adjust if necessary\n\nIf using unix socket:\n\n- socket should be readable and writeable by `netdata` user\n\nIf using ip socket and TLS is disabled:\n\n- socket should be accessible via network\n\nIf TLS is enabled, in addition:\n\n- `control-key-file` should be readable by `netdata` user\n- `control-cert-file` should be readable by `netdata` user\n\nFor auto-detection parameters from `unbound.conf`:\n\n- `unbound.conf` should be readable by `netdata` user\n- if you have several configuration files (include feature) all of them should be readable by `netdata` user\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/unbound.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/unbound.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:8953 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| conf_path | Absolute path to the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| cumulative_stats | Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file. | no | no |\n| use_tls | Whether to use TLS or not. | yes | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | /etc/unbound/unbound_control.pem | no |\n| tls_key | Client tls key. | /etc/unbound/unbound_control.key | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n```\n{% /details %}\n##### Unix socket\n\nConnecting through Unix socket.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: socket\n address: /var/run/unbound.sock\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n - name: remote\n address: 203.0.113.11:8953\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable remote control interface\n\nSet `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf).\n\n\n#### Check permissions and adjust if necessary\n\nIf using unix socket:\n\n- socket should be readable and writeable by `netdata` user\n\nIf using ip socket and TLS is disabled:\n\n- socket should be accessible via network\n\nIf TLS is enabled, in addition:\n\n- `control-key-file` should be readable by `netdata` user\n- `control-cert-file` should be readable by `netdata` user\n\nFor auto-detection parameters from `unbound.conf`:\n\n- `unbound.conf` should be readable by `netdata` user\n- if you have several configuration files (include feature) all of them should be readable by `netdata` user\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/unbound.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/unbound.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:8953 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| conf_path | Absolute path to the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| cumulative_stats | Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file. | no | no |\n| use_tls | Whether to use TLS or not. | yes | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | /etc/unbound/unbound_control.pem | no |\n| tls_key | Client tls key. | /etc/unbound/unbound_control.key | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n```\n{% /details %}\n##### Unix socket\n\nConnecting through Unix socket.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: socket\n address: /var/run/unbound.sock\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n - name: remote\n address: 203.0.113.11:8953\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `unbound` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m unbound\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `unbound` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep unbound\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep unbound /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep unbound\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Unbound instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.queries | queries | queries |\n| unbound.queries_ip_ratelimited | ratelimited | queries |\n| unbound.dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.cache | hits, miss | events |\n| unbound.cache_percentage | hits, miss | percentage |\n| unbound.prefetch | prefetches | prefetches |\n| unbound.expired | expired | replies |\n| unbound.zero_ttl_replies | zero_ttl | replies |\n| unbound.recursive_replies | recursive | replies |\n| unbound.recursion_time | avg, median | milliseconds |\n| unbound.request_list_usage | avg, max | queries |\n| unbound.current_request_list_usage | all, users | queries |\n| unbound.request_list_jostle_list | overwritten, dropped | queries |\n| unbound.tcpusage | usage | buffers |\n| unbound.uptime | time | seconds |\n| unbound.cache_memory | message, rrset, dnscrypt_nonce, dnscrypt_shared_secret | KB |\n| unbound.mod_memory | iterator, respip, validator, subnet, ipsec | KB |\n| unbound.mem_streamwait | streamwait | KB |\n| unbound.cache_count | infra, key, msg, rrset, dnscrypt_nonce, shared_secret | items |\n| unbound.type_queries | a dimension per query type | queries |\n| unbound.class_queries | a dimension per query class | queries |\n| unbound.opcode_queries | a dimension per query opcode | queries |\n| unbound.flag_queries | qr, aa, tc, rd, ra, z, ad, cd | queries |\n| unbound.rcode_answers | a dimension per reply rcode | replies |\n\n### Per thread\n\nThese metrics refer to threads.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.thread_queries | queries | queries |\n| unbound.thread_queries_ip_ratelimited | ratelimited | queries |\n| unbound.thread_dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.thread_cache | hits, miss | events |\n| unbound.thread_cache_percentage | hits, miss | percentage |\n| unbound.thread_prefetch | prefetches | prefetches |\n| unbound.thread_expired | expired | replies |\n| unbound.thread_zero_ttl_replies | zero_ttl | replies |\n| unbound.thread_recursive_replies | recursive | replies |\n| unbound.thread_recursion_time | avg, median | milliseconds |\n| unbound.thread_request_list_usage | avg, max | queries |\n| unbound.thread_current_request_list_usage | all, users | queries |\n| unbound.thread_request_list_jostle_list | overwritten, dropped | queries |\n| unbound.thread_tcpusage | usage | buffers |\n\n", @@ -16931,7 +17235,7 @@ export const integrations = [ "most_popular": false }, "overview": "# UPS (NUT)\n\nPlugin: go.d.plugin\nModule: upsd\n\n## Overview\n\nThis collector monitors Uninterruptible Power Supplies by polling the UPS daemon using the NUT network protocol.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/upsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/upsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | UPS daemon address in IP:PORT format. | 127.0.0.1:3493 | yes |\n| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n - name: remote\n address: 203.0.113.0:3493\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/upsd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/upsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | UPS daemon address in IP:PORT format. | 127.0.0.1:3493 | yes |\n| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n - name: remote\n address: 203.0.113.0:3493\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `upsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m upsd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `upsd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep upsd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep upsd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep upsd\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ upsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} average load over the last 10 minutes |\n| [ upsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_battery_charge | UPS ${label:ups_name} average battery charge over the last minute |\n| [ upsd_ups_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} number of seconds since the last successful data collection |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nThese metrics refer to the UPS unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| ups_name | UPS name. |\n| battery_type | Battery type (chemistry). \"battery.type\" variable value. |\n| device_model | Device model. \"device.mode\" variable value. |\n| device_serial | Device serial number. \"device.serial\" variable value. |\n| device_manufacturer | Device manufacturer. \"device.mfr\" variable value. |\n| device_type | Device type (ups, pdu, scd, psu, ats). \"device.type\" variable value. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| upsd.ups_load | load | percentage |\n| upsd.ups_load_usage | load_usage | Watts |\n| upsd.ups_status | on_line, on_battery, low_battery, high_battery, replace_battery, charging, discharging, bypass, calibration, offline, overloaded, trim_input_voltage, boost_input_voltage, forced_shutdown, other | status |\n| upsd.ups_temperature | temperature | Celsius |\n| upsd.ups_battery_charge | charge | percentage |\n| upsd.ups_battery_estimated_runtime | runtime | seconds |\n| upsd.ups_battery_voltage | voltage | Volts |\n| upsd.ups_battery_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_voltage | voltage | Volts |\n| upsd.ups_input_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_current | current | Ampere |\n| upsd.ups_input_current_nominal | nominal_current | Ampere |\n| upsd.ups_input_frequency | frequency | Hz |\n| upsd.ups_input_frequency_nominal | nominal_frequency | Hz |\n| upsd.ups_output_voltage | voltage | Volts |\n| upsd.ups_output_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_output_current | current | Ampere |\n| upsd.ups_output_current_nominal | nominal_current | Ampere |\n| upsd.ups_output_frequency | frequency | Hz |\n| upsd.ups_output_frequency_nominal | nominal_frequency | Hz |\n\n", @@ -16969,7 +17273,7 @@ export const integrations = [ "most_popular": false }, "overview": "# uWSGI\n\nPlugin: go.d.plugin\nModule: uwsgi\n\n## Overview\n\nMonitors UWSGI worker health and performance by collecting metrics like requests, transmitted data, exceptions, and harakiris.\n\n\nIt fetches [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) statistics over TCP.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically discovers and collects UWSGI statistics from the following default locations:\n\n- localhost:1717\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the uWSGI Stats Server\n\nSee [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) for details.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/uwsgi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/uwsgi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the UWSGI [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) listens for connections. | 127.0.0.1:1717 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:1717\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:1717\n\n - name: remote\n address: 203.0.113.0:1717\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the uWSGI Stats Server\n\nSee [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) for details.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/uwsgi.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/uwsgi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the UWSGI [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) listens for connections. | 127.0.0.1:1717 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:1717\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:1717\n\n - name: remote\n address: 203.0.113.0:1717\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `uwsgi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m uwsgi\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `uwsgi` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep uwsgi\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep uwsgi /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep uwsgi\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uWSGI instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| uwsgi.transmitted_data | tx | bytes/s |\n| uwsgi.requests | requests | requests/s |\n| uwsgi.harakiris | harakiris | harakiris/s |\n| uwsgi.respawns | respawns | respawns/s |\n\n### Per worker\n\nThese metrics refer to the Worker process.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| worker_id | Worker ID. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| uwsgi.worker_transmitted_data | tx | bytes/s |\n| uwsgi.worker_requests | requests | requests/s |\n| uwsgi.worker_delta_requests | delta_requests | requests/s |\n| uwsgi.worker_average_request_time | avg | milliseconds |\n| uwsgi.worker_harakiris | harakiris | harakiris/s |\n| uwsgi.worker_exceptions | exceptions | exceptions/s |\n| uwsgi.worker_status | idle, busy, cheap, pause, sig | status |\n| uwsgi.worker_request_handling_status | accepting, not_accepting | status |\n| uwsgi.worker_respawns | respawns | respawns/s |\n| uwsgi.worker_memory_rss | rss | bytes |\n| uwsgi.worker_memory_vsz | vsz | bytes |\n\n", @@ -16978,6 +17282,46 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/uwsgi/metadata.yaml", "related_resources": "" }, + { + "meta": { + "plugin_name": "go.d.plugin", + "module_name": "varnish", + "monitored_instance": { + "name": "Varnish", + "link": "https://varnish-cache.org/", + "categories": [ + "data-collection.web-servers-and-web-proxies" + ], + "icon_filename": "varnish.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "varnish", + "varnishstat", + "varnishd", + "cache", + "web server", + "web cache" + ], + "most_popular": false + }, + "overview": "# Varnish\n\nPlugin: go.d.plugin\nModule: varnish\n\n## Overview\n\nThis collector monitors Varnish instances, supporting both the open-source Varnish-Cache and the commercial Varnish-Plus.\n\nIt tracks key performance metrics, along with detailed statistics for Backends (VBE) and Storages (SMF, SMA, MSE).\n\nIt relies on the [`varnishstat`](https://varnish-cache.org/docs/trunk/reference/varnishstat.html) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically detects and monitors Varnish instances running on the host or inside Docker containers.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/varnish.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/varnish.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n| instance_name | Specifies the name of the Varnish instance to collect metrics from. This corresponds to the `-n` argument used with the [varnishstat](https://varnish-cache.org/docs/trunk/reference/varnishstat.html) command. | | no |\n| docker_container | Specifies the name of the Docker container where the Varnish instance is running. If set, the `varnishstat` command will be executed within this container. | | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: varnish\n update_every: 5\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `varnish` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m varnish\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `varnish` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep varnish\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep varnish /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep varnish\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Varnish instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.client_session_connections | accepted, dropped | connections/s |\n| varnish.client_requests | received | requests/s |\n| varnish.cache_hit_ratio_total | hit, miss, hitpass, hitmiss | percent |\n| varnish.cache_hit_ratio_delta | hit, miss, hitpass, hitmiss | percent |\n| varnish.cache_expired_objects | expired | objects/s |\n| varnish.cache_lru_activity | nuked, moved | objects/s |\n| varnish.threads | threads | threads |\n| varnish.thread_management_activity | created, failed, destroyed, limited | threads/s |\n| varnish.thread_queue_len | queue_length | threads |\n| varnish.backends_requests | sent | requests/s |\n| varnish.esi_parsing_issues | errors, warnings | issues/s |\n| varnish.mgmt_process_uptime | uptime | seconds |\n| varnish.child_process_uptime | uptime | seconds |\n\n### Per Backend\n\nThese metrics refer to the Backend (VBE).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.backend_data_transfer | req_header, req_body, resp_header, resp_body | bytes/s |\n\n### Per Storage\n\nThese metrics refer to the Storage (SMA, SMF, MSE).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.storage_space_usage | free, used | bytes |\n| varnish.storage_allocated_objects | allocated | objects |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-varnish-Varnish", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/varnish/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-vcsa", @@ -17005,7 +17349,7 @@ export const integrations = [ "most_popular": false }, "overview": "# vCenter Server Appliance\n\nPlugin: go.d.plugin\nModule: vcsa\n\n## Overview\n\nThis collector monitors [health statistics](https://developer.vmware.com/apis/vsphere-automation/latest/appliance/health/) of vCenter Server Appliance servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vcsa.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vcsa.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | false | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | false | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nTwo instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n - name: vcsa2\n url: https://203.0.113.10\n username: admin@vsphere.local\n password: password\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vcsa.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vcsa.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | false | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | false | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nTwo instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n - name: vcsa2\n url: https://203.0.113.10\n username: admin@vsphere.local\n password: password\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `vcsa` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vcsa\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `vcsa` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep vcsa\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep vcsa /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep vcsa\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vcsa_system_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is orange. One or more components are degraded. |\n| [ vcsa_system_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is red. One or more components are unavailable or will stop functioning soon. |\n| [ vcsa_applmgmt_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_applmgmt_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_load_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_load_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_mem_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_mem_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_swap_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_swap_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_database_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_database_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_software_packages_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.software_packages_health_status | VCSA software packages security updates are available. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vCenter Server Appliance instance\n\nThese metrics refer to the entire monitored application.\n
\nSee health statuses\nOverall System Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------------------------------------------------------------------|\n| green | All components in the appliance are healthy. |\n| yellow | One or more components in the appliance might become overloaded soon. |\n| orange | One or more components in the appliance might be degraded. |\n| red | One or more components in the appliance might be in an unusable status and the appliance might become unresponsive soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nComponents Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------|\n| green | The component is healthy. |\n| yellow | The component is healthy, but may have some problems. |\n| orange | The component is degraded, and may have serious problems. |\n| red | The component is unavailable, or will stop functioning soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nSoftware Updates Health:\n\n| Status | Description |\n|:-------:|:-----------------------------------------------------|\n| green | No updates available. |\n| orange | Non-security patches might be available. |\n| red | Security patches might be available. |\n| gray | An error retrieving information on software updates. |\n| unknown | Collector failed to decode status. |\n\n
\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vcsa.system_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.applmgmt_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.load_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.mem_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.swap_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.database_storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.software_packages_health_status | green, red, orange, gray, unknown | status |\n\n", @@ -17042,10 +17386,10 @@ export const integrations = [ "most_popular": false }, "overview": "# VerneMQ\n\nPlugin: go.d.plugin\nModule: vernemq\n\n## Overview\n\nThis collector monitors VerneMQ instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vernemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vernemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8888/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal instance with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n - name: remote\n url: http://203.0.113.10:8888/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vernemq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vernemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8888/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal instance with basic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n - name: remote\n url: http://203.0.113.10:8888/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `vernemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vernemq\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `vernemq` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep vernemq\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep vernemq /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep vernemq\n```\n\n", - "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.socket_errors | number of socket errors in the last minute |\n| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of dropped messaged due to full queues in the last minute |\n| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of messages which expired before delivery in the last minute |\n| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of unhandled messages (connections with clean session=true) in the last minute |\n| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.average_scheduler_utilization | average scheduler utilization over the last 10 minutes |\n| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.cluster_dropped | amount of traffic dropped during communication with the cluster nodes in the last minute |\n| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vvernemq.netsplits | number of detected netsplits (split brain situation) in the last minute |\n| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_connack_sent_reason | number of sent unsuccessful v3/v5 CONNACK packets in the last minute |\n| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_received_reason | number of received not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_sent_reason | number of sent not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_error | number of failed v3/v5 SUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_auth_error | number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute |\n| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_unsubscribe_error | number of failed v3/v5 UNSUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_errors | number of failed v3/v5 PUBLISH operations in the last minute |\n| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_auth_errors | number of unauthorized v3/v5 PUBLISH attempts in the last minute |\n| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_received_reason | number of received unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_sent_reason | number of sent unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_invalid_error | number of received unexpected v3/v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_received_reason | number of received unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_sent_reason | number of sent unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_invalid_error | number of received unexpected v3 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_received_reason | number of received unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_sent_reason | number of sent unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_received_reason | number of received unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_sent_reason | number of sent unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_invalid_error | number of received unexpected v3/v5 PUBCOMP packets in the last minute |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per VerneMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.sockets | open | sockets |\n| vernemq.socket_operations | open, close | sockets/s |\n| vernemq.client_keepalive_expired | closed | sockets/s |\n| vernemq.socket_close_timeout | closed | sockets/s |\n| vernemq.socket_errors | errors | errors/s |\n| vernemq.queue_processes | queue_processes | queue processes |\n| vernemq.queue_processes_operations | setup, teardown | events/s |\n| vernemq.queue_process_init_from_storage | queue_processes | queue processes/s |\n| vernemq.queue_messages | received, sent | messages/s |\n| vernemq.queue_undelivered_messages | dropped, expired, unhandled | messages/s |\n| vernemq.router_subscriptions | subscriptions | subscriptions |\n| vernemq.router_matched_subscriptions | local, remote | subscriptions/s |\n| vernemq.router_memory | used | KiB |\n| vernemq.average_scheduler_utilization | utilization | percentage |\n| vernemq.system_utilization_scheduler | a dimension per scheduler | percentage |\n| vernemq.system_processes | processes | processes |\n| vernemq.system_reductions | reductions | ops/s |\n| vernemq.system_context_switches | context_switches | ops/s |\n| vernemq.system_io | received, sent | kilobits/s |\n| vernemq.system_run_queue | ready | processes |\n| vernemq.system_gc_count | gc | ops/s |\n| vernemq.system_gc_words_reclaimed | words_reclaimed | ops/s |\n| vernemq.system_allocated_memory | processes, system | KiB |\n| vernemq.bandwidth | received, sent | kilobits/s |\n| vernemq.retain_messages | messages | messages |\n| vernemq.retain_memory | used | KiB |\n| vernemq.cluster_bandwidth | received, sent | kilobits/s |\n| vernemq.cluster_dropped | dropped | kilobits/s |\n| vernemq.netsplit_unresolved | unresolved | netsplits |\n| vernemq.netsplits | resolved, detected | netsplits/s |\n| vernemq.mqtt_auth | received, sent | packets/s |\n| vernemq.mqtt_auth_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_auth_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_connect | connect, connack | packets/s |\n| vernemq.mqtt_connack_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect | received, sent | packets/s |\n| vernemq.mqtt_disconnect_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_subscribe | subscribe, suback | packets/s |\n| vernemq.mqtt_subscribe_error | failed | ops/s |\n| vernemq.mqtt_subscribe_auth_error | unauth | attempts/s |\n| vernemq.mqtt_unsubscribe | unsubscribe, unsuback | packets/s |\n| vernemq.mqtt_unsubscribe_error | mqtt_unsubscribe_error | ops/s |\n| vernemq.mqtt_publish | received, sent | packets/s |\n| vernemq.mqtt_publish_errors | failed | ops/s |\n| vernemq.mqtt_publish_auth_errors | unauth | attempts/s |\n| vernemq.mqtt_puback | received, sent | packets/s |\n| vernemq.mqtt_puback_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrec | received, sent | packets/s |\n| vernemq.mqtt_pubrec_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrel | received, sent | packets/s |\n| vernemq.mqtt_pubrel_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrel_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcom | received, sent | packets/s |\n| vernemq.mqtt_pubcomp_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_ping | pingreq, pingresp | packets/s |\n| vernemq.node_uptime | time | seconds |\n\n", + "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_socket_errors | Node ${label:node} socket errors in the last minute |\n| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_queue_undelivered_messages | Node ${label:node} dropped messages due to full queues in the last minute |\n| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_queue_undelivered_messages | Node ${label:node} expired before delivery messages in the last minute |\n| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_queue_undelivered_messages | Node ${label:node} unhandled messages in the last minute |\n| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_average_scheduler_utilization | Node ${label:node} scheduler utilization over the last 10 minutes |\n| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_cluster_dropped | Node ${label:node} traffic dropped during communication with the cluster nodes in the last minute |\n| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_netsplits | Node ${label:node} detected netsplits (split brain) in the last minute |\n| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_connack_sent_by_reason_code | Node ${label:node} unsuccessful sent v5 CONNACK packets in the last minute |\n| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_disconnect_received_by_reason_code | Node ${label:node} received not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_disconnect_sent_by_reason_code | Node ${label:node} sent not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_subscribe_error | Node ${label:node} mqtt v${label:mqtt_version} failed SUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_subscribe_auth_error | Node ${label:node} mqtt v${label:mqtt_version} unauthorized SUBSCRIBE attempts in the last minute |\n| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_unsubscribe_error | Node ${label:node} mqtt v${label:mqtt_version} failed UNSUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_publish_errors | Node ${label:node} mqtt v${label:mqtt_version} failed PUBLISH operations in the last minute |\n| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_publish_auth_errors | Node ${label:node} mqtt v${label:mqtt_version} unauthorized PUBLISH attempts in the last minute |\n| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_puback_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_puback_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_puback_invalid_error | Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBACK messages in the last minute |\n| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrec_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrec_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrec_invalid_error | Node ${label:node} mqtt v${label:mqtt_version} received invalid PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrel_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrel_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubcomp_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubcomp_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubcomp_invalid_error | Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBCOMP packets in the last minute |\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the VerneMQ node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| node | The value of this label is identical to the value of the \"node\" label exposed by VerneMQ. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.node_socket | open | sockets |\n| vernemq.node_socket_operations | open, close | sockets/s |\n| vernemq.node_client_keepalive_expired | closed | sockets/s |\n| vernemq.node_socket_close_timeout | closed | sockets/s |\n| vernemq.node_socket_errors | errors | errors/s |\n| vernemq.node_queue_processes | queue_processes | queue processes |\n| vernemq.node_queue_processes_operations | setup, teardown | events/s |\n| vernemq.node_queue_process_init_from_storage | queue_processes | queue processes/s |\n| vernemq.node_queue_messages | received, sent | messages/s |\n| vernemq.node_queued_messages | queued | messages |\n| vernemq.node_queue_undelivered_messages | dropped, expired, unhandled | messages/s |\n| vernemq.node_router_subscriptions | subscriptions | subscriptions |\n| vernemq.node_router_matched_subscriptions | local, remote | subscriptions/s |\n| vernemq.node_router_memory | used | bytes |\n| vernemq.node_average_scheduler_utilization | utilization | percentage |\n| vernemq.node_system_processes | processes | processes |\n| vernemq.node_system_reductions | reductions | ops/s |\n| vernemq.node_system_context_switches | context_switches | ops/s |\n| vernemq.node_system_io | received, sent | bytes/s |\n| vernemq.node_system_run_queue | ready | processes |\n| vernemq.node_system_gc_count | gc | ops/s |\n| vernemq.node_system_gc_words_reclaimed | words_reclaimed | ops/s |\n| vernemq.node_system_allocated_memory | processes, system | bytes |\n| vernemq.node_traffic | received, sent | bytes/s |\n| vernemq.node_retain_messages | messages | messages |\n| vernemq.node_retain_memory | used | bytes |\n| vernemq.node_cluster_traffic | received, sent | bytes/s |\n| vernemq.node_cluster_dropped | dropped | bytes/s |\n| vernemq.node_netsplit_unresolved | unresolved | netsplits |\n| vernemq.node_netsplits | resolved, detected | netsplits/s |\n| vernemq.node_uptime | time | seconds |\n\n### Per mqtt\n\nThese metrics are specific to the used MQTT protocol version.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| node | The value of this label is identical to the value of the \"node\" label exposed by VerneMQ. |\n| mqtt_version | MQTT version. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.node_mqtt_auth | received, sent | packets/s |\n| vernemq.node_mqtt_auth_received_by_reason_code | success, continue_authentication, reauthenticate | packets/s |\n| vernemq.node_mqtt_auth_sent_by_reason_code | success, continue_authentication, reauthenticate | packets/s |\n| vernemq.node_mqtt_connect | connect, connack | packets/s |\n| vernemq.node_mqtt_connack_sent_by_return_code | success, unsupported_protocol_version, client_identifier_not_valid, server_unavailable, bad_username_or_password, not_authorized | packets/s |\n| vernemq.node_mqtt_connack_sent_by_reason_code | success, unspecified_error, malformed_packet, protocol_error, impl_specific_error, unsupported_protocol_version, client_identifier_not_valid, bad_username_or_password, not_authorized, server_unavailable, server_busy, banned, bad_authentication_method, topic_name_invalid, packet_too_large, quota_exceeded, payload_format_invalid, retain_not_supported, qos_not_supported, use_another_server, server_moved, connection_rate_exceeded | packets/s |\n| vernemq.node_mqtt_disconnect | received, sent | packets/s |\n| vernemq.node_mqtt_disconnect_received_by_reason_code | normal_disconnect, disconnect_with_will_msg, unspecified_error, malformed_packet, protocol_error, impl_specific_error, topic_name_invalid, receive_max_exceeded, topic_alias_invalid, packet_too_large, message_rate_too_high, quota_exceeded, administrative_action, payload_format_invalid | packets/s |\n| node_mqtt_disconnect_sent_by_reason_code | normal_disconnect, unspecified_error, malformed_packet, protocol_error, impl_specific_error, not_authorized, server_busy, server_shutting_down, keep_alive_timeout, session_taken_over, topic_filter_invalid, topic_name_invalid, receive_max_exceeded, topic_alias_invalid, packet_too_large, message_rate_too_high, quota_exceeded, administrative_action, payload_format_invalid, retain_not_supported, qos_not_supported, use_another_server, server_moved, shared_subs_not_supported, connection_rate_exceeded, max_connect_time, subscription_ids_not_supported, wildcard_subs_not_supported | packets/s |\n| vernemq.node_mqtt_subscribe | subscribe, suback | packets/s |\n| vernemq.node_mqtt_subscribe_error | subscribe | errors/s |\n| vernemq.node_mqtt_subscribe_auth_error | subscribe_auth | errors/s |\n| vernemq.node_mqtt_unsubscribe | unsubscribe, unsuback | packets/s |\n| vernemq.node_mqtt_unsubscribe_error | unsubscribe | errors/s |\n| vernemq.node_mqtt_publish | received, sent | packets/s |\n| vernemq.node_mqtt_publish_errors | publish | errors/s |\n| vernemq.node_mqtt_publish_auth_errors | publish_auth | errors/s |\n| vernemq.node_mqtt_puback | received, sent | packets/s |\n| vernemq.node_mqtt_puback_received_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s |\n| vernemq.node_mqtt_puback_sent_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s |\n| vernemq.node_mqtt_puback_invalid_error | unexpected | messages/s |\n| vernemq.node_mqtt_pubrec | received, sent | packets/s |\n| vernemq.node_mqtt_pubrec_received_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s |\n| vernemq.node_mqtt_pubrec_sent_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s |\n| vernemq.node_mqtt_pubrec_invalid_error | unexpected | messages/s |\n| vernemq.node_mqtt_pubrel | received, sent | packets/s |\n| vernemq.node_mqtt_pubrel_received_by_reason_code | success, packet_id_not_found | packets/s |\n| vernemq.node_mqtt_pubrel_sent_by_reason_code | success, packet_id_not_found | packets/s |\n| vernemq.node_mqtt_pubcomp | received, sent | packets/s |\n| vernemq.node_mqtt_pubcomp_received_by_reason_code | success, packet_id_not_found | packets/s |\n| vernemq.node_mqtt_pubcomp_sent_by_reason_cod | success, packet_id_not_found | packets/s |\n| vernemq.node_mqtt_pubcomp_invalid_error | unexpected | messages/s |\n| vernemq.node_mqtt_ping | pingreq, pingresp | packets/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-vernemq-VerneMQ", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/vernemq/metadata.yaml", @@ -17080,7 +17424,7 @@ export const integrations = [ "most_popular": true }, "overview": "# VMware vCenter Server\n\nPlugin: go.d.plugin\nModule: vsphere\n\n## Overview\n\nThis collector monitors hosts and vms performance statistics from `vCenter` servers.\n\n> **Warning**: The `vsphere` collector cannot re-login and continue collecting metrics after a vCenter reboot.\n> go.d.plugin needs to be restarted.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default `update_every` is 20 seconds, and it doesn't make sense to decrease the value.\n**VMware real-time statistics are generated at the 20-second specificity**.\n\nIt is likely that 20 seconds is not enough for big installations and the value should be tuned.\n\nTo get a better view we recommend running the collector in debug mode and seeing how much time it will take to collect metrics.\n\n
\nExample (all not related debug lines were removed)\n\n```\n[ilyam@pc]$ ./go.d.plugin -d -m vsphere\n[ DEBUG ] vsphere[vsphere] discover.go:94 discovering : starting resource discovering process\n[ DEBUG ] vsphere[vsphere] discover.go:102 discovering : found 3 dcs, process took 49.329656ms\n[ DEBUG ] vsphere[vsphere] discover.go:109 discovering : found 12 folders, process took 49.538688ms\n[ DEBUG ] vsphere[vsphere] discover.go:116 discovering : found 3 clusters, process took 47.722692ms\n[ DEBUG ] vsphere[vsphere] discover.go:123 discovering : found 2 hosts, process took 52.966995ms\n[ DEBUG ] vsphere[vsphere] discover.go:130 discovering : found 2 vms, process took 49.832979ms\n[ INFO ] vsphere[vsphere] discover.go:140 discovering : found 3 dcs, 12 folders, 3 clusters (2 dummy), 2 hosts, 3 vms, process took 249.655993ms\n[ DEBUG ] vsphere[vsphere] build.go:12 discovering : building : starting building resources process\n[ INFO ] vsphere[vsphere] build.go:23 discovering : building : built 3/3 dcs, 12/12 folders, 3/3 clusters, 2/2 hosts, 3/3 vms, process took 63.3\u00b5s\n[ DEBUG ] vsphere[vsphere] hierarchy.go:10 discovering : hierarchy : start setting resources hierarchy process\n[ INFO ] vsphere[vsphere] hierarchy.go:18 discovering : hierarchy : set 3/3 clusters, 2/2 hosts, 3/3 vms, process took 6.522\u00b5s\n[ DEBUG ] vsphere[vsphere] filter.go:24 discovering : filtering : starting filtering resources process\n[ DEBUG ] vsphere[vsphere] filter.go:45 discovering : filtering : removed 0 unmatched hosts\n[ DEBUG ] vsphere[vsphere] filter.go:56 discovering : filtering : removed 0 unmatched vms\n[ INFO ] vsphere[vsphere] filter.go:29 discovering : filtering : filtered 0/2 hosts, 0/3 vms, process took 42.973\u00b5s\n[ DEBUG ] vsphere[vsphere] metric_lists.go:14 discovering : metric lists : starting resources metric lists collection process\n[ INFO ] vsphere[vsphere] metric_lists.go:30 discovering : metric lists : collected metric lists for 2/2 hosts, 3/3 vms, process took 275.60764ms\n[ INFO ] vsphere[vsphere] discover.go:74 discovering : discovered 2/2 hosts, 3/3 vms, the whole process took 525.614041ms\n[ INFO ] vsphere[vsphere] discover.go:11 starting discovery process, will do discovery every 5m0s\n[ DEBUG ] vsphere[vsphere] collect.go:11 starting collection process\n[ DEBUG ] vsphere[vsphere] scrape.go:48 scraping : scraped metrics for 2/2 hosts, process took 96.257374ms\n[ DEBUG ] vsphere[vsphere] scrape.go:60 scraping : scraped metrics for 3/3 vms, process took 57.879697ms\n[ DEBUG ] vsphere[vsphere] collect.go:23 metrics collected, process took 154.77997ms\n```\n\n
\n\nThere you can see that discovering took `525.614041ms`, and collecting metrics took `154.77997ms`. Discovering is a separate thread, it doesn't affect collecting.\n`update_every` and `timeout` parameters should be adjusted based on these numbers.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vsphere.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vsphere.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 20 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | vCenter server URL. | | yes |\n| host_include | Hosts selector (filter). | | no |\n| vm_include | Virtual machines selector (filter). | | no |\n| discovery_interval | Hosts and VMs discovery interval. | 300 | no |\n| timeout | HTTP request timeout. | 20 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### host_include\n\nMetrics of hosts matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern\".\n- Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n host_include:\n - '/DC1/*' # select all hosts from datacenter DC1\n - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2\n - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3\n ```\n\n\n##### vm_include\n\nMetrics of VMs matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern/VM pattern\".\n- Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n vm_include:\n - '/DC1/*' # select all VMs from datacenter DC\n - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2\n - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3\n ```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n - name : vcenter2\n url : https://203.0.113.10\n username : admin@vsphere.local\n password : somepassword\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vsphere.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vsphere.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 20 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | vCenter server URL. | | yes |\n| host_include | Hosts selector (filter). | | no |\n| vm_include | Virtual machines selector (filter). | | no |\n| discovery_interval | Hosts and VMs discovery interval. | 300 | no |\n| timeout | HTTP request timeout. | 20 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### host_include\n\nMetrics of hosts matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern\".\n- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n host_include:\n - '/DC1/*' # select all hosts from datacenter DC1\n - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2\n - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3\n ```\n\n\n##### vm_include\n\nMetrics of VMs matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern/VM pattern\".\n- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n vm_include:\n - '/DC1/*' # select all VMs from datacenter DC\n - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2\n - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3\n ```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n - name : vcenter2\n url : https://203.0.113.10\n username : admin@vsphere.local\n password : somepassword\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `vsphere` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vsphere\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `vsphere` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep vsphere\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep vsphere /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep vsphere\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vsphere_vm_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_cpu_utilization | Virtual Machine CPU utilization |\n| [ vsphere_vm_mem_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_mem_utilization | Virtual Machine memory utilization |\n| [ vsphere_host_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_cpu_utilization | ESXi Host CPU utilization |\n| [ vsphere_host_mem_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_mem_utilization | ESXi Host memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per virtual machine\n\nThese metrics refer to the Virtual Machine.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n| vm | Virtual Machine name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.vm_cpu_utilization | used | percentage |\n| vsphere.vm_mem_utilization | used | percentage |\n| vsphere.vm_mem_usage | granted, consumed, active, shared | KiB |\n| vsphere.vm_mem_swap_usage | swapped | KiB |\n| vsphere.vm_mem_swap_io | in, out | KiB/s |\n| vsphere.vm_disk_io | read, write | KiB/s |\n| vsphere.vm_disk_max_latency | latency | milliseconds |\n| vsphere.vm_net_traffic | received, sent | KiB/s |\n| vsphere.vm_net_packets | received, sent | packets |\n| vsphere.vm_net_drops | received, sent | packets |\n| vsphere.vm_overall_status | green, red, yellow, gray | status |\n| vsphere.vm_system_uptime | uptime | seconds |\n\n### Per host\n\nThese metrics refer to the ESXi host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.host_cpu_utilization | used | percentage |\n| vsphere.host_mem_utilization | used | percentage |\n| vsphere.host_mem_usage | granted, consumed, active, shared, sharedcommon | KiB |\n| vsphere.host_mem_swap_io | in, out | KiB/s |\n| vsphere.host_disk_io | read, write | KiB/s |\n| vsphere.host_disk_max_latency | latency | milliseconds |\n| vsphere.host_net_traffic | received, sent | KiB/s |\n| vsphere.host_net_packets | received, sent | packets |\n| vsphere.host_net_drops | received, sent | packets |\n| vsphere.host_net_errors | received, sent | errors |\n| vsphere.host_overall_status | green, red, yellow, gray | status |\n| vsphere.host_system_uptime | uptime | seconds |\n\n", @@ -17089,6 +17433,43 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/vsphere/metadata.yaml", "related_resources": "" }, + { + "meta": { + "plugin_name": "go.d.plugin", + "module_name": "w1sensor", + "monitored_instance": { + "name": "1-Wire Sensors", + "link": "https://www.analog.com/en/product-category/1wire-temperature-sensors.html", + "categories": [ + "data-collection.hardware-devices-and-sensors" + ], + "icon_filename": "1-wire.png" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "temperature", + "sensor", + "1-wire" + ], + "most_popular": false + }, + "overview": "# 1-Wire Sensors\n\nPlugin: go.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/w1sensor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| sensors_path | Directory path containing sensor folders with w1_slave files. | /sys/bus/w1/devices | no |\n\n{% /details %}\n#### Examples\n\n##### Custom sensor device path\n\nMonitors a virtual sensor when the w1_slave file is located in a custom directory instead of the default location.\n\n```yaml\njobs:\n - name: custom_sensors_path\n sensors_path: /custom/path/devices\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `w1sensor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m w1sensor\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep w1sensor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep w1sensor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep w1sensor\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the 1-Wire Sensor.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temperature | temperature | Celsius |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-w1sensor-1-Wire_Sensors", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/w1sensor/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-web_log", @@ -17121,7 +17502,7 @@ export const integrations = [ } }, "overview": "# Web server log files\n\nPlugin: go.d.plugin\nModule: web_log\n\n## Overview\n\nThis collector monitors web servers by parsing their log files.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects log files of web servers running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/web_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/web_log.conf\n```\n#### Options\n\nWeblog is aware of how to parse and interpret the following fields (**known fields**):\n\n> [nginx](https://nginx.org/en/docs/varindex.html)\n>\n> [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html)\n\n| nginx | apache | description |\n|-------------------------|----------|------------------------------------------------------------------------------------------|\n| $host ($http_host) | %v | Name of the server which accepted a request. |\n| $server_port | %p | Port of the server which accepted a request. |\n| $scheme | - | Request scheme. \"http\" or \"https\". |\n| $remote_addr | %a (%h) | Client address. |\n| $request | %r | Full original request line. The line is \"$request_method $request_uri $server_protocol\". |\n| $request_method | %m | Request method. Usually \"GET\" or \"POST\". |\n| $request_uri | %U | Full original request URI. |\n| $server_protocol | %H | Request protocol. Usually \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2.0\". |\n| $status | %s (%>s) | Response status code. |\n| $request_length | %I | Bytes received from a client, including request and headers. |\n| $bytes_sent | %O | Bytes sent to a client, including request and headers. |\n| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. |\n| $request_time | %D | Request processing time. |\n| $upstream_response_time | - | Time spent on receiving the response from the upstream server. |\n| $ssl_protocol | - | Protocol of an established SSL connection. |\n| $ssl_cipher | - | String of ciphers used for an established SSL connection. |\n\nNotes:\n\n- Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`.\n- Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network.\n- To get `%I` and `%O` working you need to enable `mod_logio` on Apache.\n- NGINX logs URI with query parameters, Apache doesnt.\n- `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others.\n- Don't use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| path | Path to the web server log file. | | yes |\n| exclude_path | Path to exclude. | *.gz | no |\n| url_patterns | List of URL patterns. | [] | no |\n| url_patterns.name | Used as a dimension name. | | yes |\n| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format). | | yes |\n| log_type | Log parser type. | auto | no |\n| csv_config | CSV log parser config. | | no |\n| csv_config.delimiter | CSV field delimiter. | , | no |\n| csv_config.format | CSV log format. | | no |\n| ltsv_config | LTSV log parser config. | | no |\n| ltsv_config.field_delimiter | LTSV field delimiter. | \\t | no |\n| ltsv_config.value_delimiter | LTSV value delimiter. | : | no |\n| ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes |\n| json_config | JSON log parser config. | | no |\n| json_config.mapping | JSON fields mapping to **known fields**. | | yes |\n| regexp_config | RegExp log parser config. | | no |\n| regexp_config.pattern | RegExp pattern with named groups. | | yes |\n\n##### url_patterns\n\n\"URL pattern\" scope metrics will be collected for each URL pattern. \n\nOption syntax:\n\n```yaml\nurl_patterns:\n - name: name1\n pattern: pattern1\n - name: name2\n pattern: pattern2\n```\n\n\n##### log_type\n\nWeblog supports 5 different log parsers:\n\n| Parser type | Description |\n|-------------|-------------------------------------------|\n| auto | Use CSV and auto-detect format |\n| csv | A comma-separated values |\n| json | [JSON](https://www.json.org/json-en.html) |\n| ltsv | [LTSV](http://ltsv.org/) |\n| regexp | Regular expression with named groups |\n\nSyntax:\n\n```yaml\nlog_type: auto\n```\n\nIf `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file.\n\n- checks if format is `CSV` (using regexp).\n- checks if format is `JSON` (using regexp).\n- assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later):\n\n ```sh\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n ```\n\n If you're using the default Apache/NGINX log format, auto-detect will work for you. If it doesn't work you need to set the format manually.\n\n\n##### csv_config.format\n\n\n\n##### ltsv_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nlog_type: ltsv\nltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### json_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nlog_type: json\njson_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nlog_type: regexp\nregexp_config:\n pattern: PATTERN\n```\n\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/web_log.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/web_log.conf\n```\n#### Options\n\nWeblog is aware of how to parse and interpret the following fields (**known fields**):\n\n> [nginx](https://nginx.org/en/docs/varindex.html)\n>\n> [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html)\n\n| nginx | apache | description |\n|-------------------------|----------|------------------------------------------------------------------------------------------|\n| $host ($http_host) | %v | Name of the server which accepted a request. |\n| $server_port | %p | Port of the server which accepted a request. |\n| $scheme | - | Request scheme. \"http\" or \"https\". |\n| $remote_addr | %a (%h) | Client address. |\n| $request | %r | Full original request line. The line is \"$request_method $request_uri $server_protocol\". |\n| $request_method | %m | Request method. Usually \"GET\" or \"POST\". |\n| $request_uri | %U | Full original request URI. |\n| $server_protocol | %H | Request protocol. Usually \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2.0\". |\n| $status | %s (%>s) | Response status code. |\n| $request_length | %I | Bytes received from a client, including request and headers. |\n| $bytes_sent | %O | Bytes sent to a client, including request and headers. |\n| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. |\n| $request_time | %D | Request processing time. |\n| $upstream_response_time | - | Time spent on receiving the response from the upstream server. |\n| $ssl_protocol | - | Protocol of an established SSL connection. |\n| $ssl_cipher | - | String of ciphers used for an established SSL connection. |\n\nNotes:\n\n- Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`.\n- Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network.\n- To get `%I` and `%O` working you need to enable `mod_logio` on Apache.\n- NGINX logs URI with query parameters, Apache doesnt.\n- `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others.\n- Don't use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| path | Path to the web server log file. | | yes |\n| exclude_path | Path to exclude. | *.gz | no |\n| url_patterns | List of URL patterns. | [] | no |\n| url_patterns.name | Used as a dimension name. | | yes |\n| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format). | | yes |\n| log_type | Log parser type. | auto | no |\n| csv_config | CSV log parser config. | | no |\n| csv_config.delimiter | CSV field delimiter. | , | no |\n| csv_config.format | CSV log format. | | no |\n| ltsv_config | LTSV log parser config. | | no |\n| ltsv_config.field_delimiter | LTSV field delimiter. | \\t | no |\n| ltsv_config.value_delimiter | LTSV value delimiter. | : | no |\n| ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes |\n| json_config | JSON log parser config. | | no |\n| json_config.mapping | JSON fields mapping to **known fields**. | | yes |\n| regexp_config | RegExp log parser config. | | no |\n| regexp_config.pattern | RegExp pattern with named groups. | | yes |\n\n##### url_patterns\n\n\"URL pattern\" scope metrics will be collected for each URL pattern. \n\nOption syntax:\n\n```yaml\nurl_patterns:\n - name: name1\n pattern: pattern1\n - name: name2\n pattern: pattern2\n```\n\n\n##### log_type\n\nWeblog supports 5 different log parsers:\n\n| Parser type | Description |\n|-------------|-------------------------------------------|\n| auto | Use CSV and auto-detect format |\n| csv | A comma-separated values |\n| json | [JSON](https://www.json.org/json-en.html) |\n| ltsv | [LTSV](http://ltsv.org/) |\n| regexp | Regular expression with named groups |\n\nSyntax:\n\n```yaml\nlog_type: auto\n```\n\nIf `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file.\n\n- checks if format is `CSV` (using regexp).\n- checks if format is `JSON` (using regexp).\n- assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later):\n\n ```sh\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n ```\n\n If you're using the default Apache/NGINX log format, auto-detect will work for you. If it doesn't work you need to set the format manually.\n\n\n##### csv_config.format\n\n\n\n##### ltsv_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nlog_type: ltsv\nltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### json_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nlog_type: json\njson_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nlog_type: regexp\nregexp_config:\n pattern: PATTERN\n```\n\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `web_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m web_log\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `web_log` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep web_log\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep web_log /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep web_log\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ web_log_1m_unmatched ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.excluded_requests | percentage of unparsed log lines over the last minute |\n| [ web_log_1m_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over the last minute (1xx, 2xx, 304, 401) |\n| [ web_log_1m_redirects ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of redirection HTTP requests over the last minute (3xx except 304) |\n| [ web_log_1m_bad_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of client error HTTP requests over the last minute (4xx except 401) |\n| [ web_log_1m_internal_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of server error HTTP requests over the last minute (5xx) |\n| [ web_log_web_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.request_processing_time | average HTTP response time over the last 1 minute |\n| [ web_log_5m_requests_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over over the last 5 minutes, compared with the previous 5 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Web server log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.requests | requests | requests/s |\n| web_log.excluded_requests | unmatched | requests/s |\n| web_log.type_requests | success, bad, redirect, error | requests/s |\n| web_log.status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| web_log.status_code_class_1xx_responses | a dimension per 1xx code | responses/s |\n| web_log.status_code_class_2xx_responses | a dimension per 2xx code | responses/s |\n| web_log.status_code_class_3xx_responses | a dimension per 3xx code | responses/s |\n| web_log.status_code_class_4xx_responses | a dimension per 4xx code | responses/s |\n| web_log.status_code_class_5xx_responses | a dimension per 5xx code | responses/s |\n| web_log.bandwidth | received, sent | kilobits/s |\n| web_log.request_processing_time | min, max, avg | milliseconds |\n| web_log.requests_processing_time_histogram | a dimension per bucket | requests/s |\n| web_log.upstream_response_time | min, max, avg | milliseconds |\n| web_log.upstream_responses_time_histogram | a dimension per bucket | requests/s |\n| web_log.current_poll_uniq_clients | ipv4, ipv6 | clients |\n| web_log.vhost_requests | a dimension per vhost | requests/s |\n| web_log.port_requests | a dimension per port | requests/s |\n| web_log.scheme_requests | http, https | requests/s |\n| web_log.http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.http_version_requests | a dimension per HTTP version | requests/s |\n| web_log.ip_proto_requests | ipv4, ipv6 | requests/s |\n| web_log.ssl_proto_requests | a dimension per SSL protocol | requests/s |\n| web_log.ssl_cipher_suite_requests | a dimension per SSL cipher suite | requests/s |\n| web_log.url_pattern_requests | a dimension per URL pattern | requests/s |\n| web_log.custom_field_pattern_requests | a dimension per custom field pattern | requests/s |\n\n### Per custom time field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_time_field_summary | min, max, avg | milliseconds |\n| web_log.custom_time_field_histogram | a dimension per bucket | observations |\n\n### Per custom numeric field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_numeric_field_{{field_name}}_summary | min, max, avg | {{units}} |\n\n### Per URL pattern\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.url_pattern_status_code_responses | a dimension per pattern | responses/s |\n| web_log.url_pattern_http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.url_pattern_bandwidth | received, sent | kilobits/s |\n| web_log.url_pattern_request_processing_time | min, max, avg | milliseconds |\n\n", @@ -17157,7 +17538,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Domain expiration date\n\nPlugin: go.d.plugin\nModule: whoisquery\n\n## Overview\n\nThis collector monitors the remaining time before the domain expires.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/whoisquery.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/whoisquery.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 60 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Domain address. | | yes |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| timeout | The query timeout in seconds. | 5 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nBasic configuration example\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site\n source: my_site.com\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple domains.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site1\n source: my_site1.com\n\n - name: my_site2\n source: my_site2.com\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/whoisquery.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/whoisquery.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 60 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Domain address. | | yes |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| timeout | The query timeout in seconds. | 5 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nBasic configuration example\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site\n source: my_site.com\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple domains.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site1\n source: my_site1.com\n\n - name: my_site2\n source: my_site2.com\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `whoisquery` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m whoisquery\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `whoisquery` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep whoisquery\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep whoisquery /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep whoisquery\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ whoisquery_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/whoisquery.conf) | whoisquery.time_until_expiration | time until the domain name registration expires |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per domain\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| domain | Configured source |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| whoisquery.time_until_expiration | expiry | seconds |\n\n", @@ -17198,10 +17579,10 @@ export const integrations = [ } }, "overview": "# Active Directory\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep windows\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep windows /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep windows\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-Active_Directory", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/metadata.yaml", @@ -17238,10 +17619,10 @@ export const integrations = [ } }, "overview": "# HyperV\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep windows\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep windows /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep windows\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-HyperV", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/metadata.yaml", @@ -17276,10 +17657,10 @@ export const integrations = [ } }, "overview": "# MS Exchange\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep windows\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep windows /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep windows\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-MS_Exchange", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/metadata.yaml", @@ -17316,10 +17697,10 @@ export const integrations = [ } }, "overview": "# MS SQL Server\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep windows\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep windows /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep windows\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-MS_SQL_Server", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/metadata.yaml", @@ -17354,10 +17735,10 @@ export const integrations = [ } }, "overview": "# NET Framework\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep windows\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep windows /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep windows\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-NET_Framework", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/metadata.yaml", @@ -17391,10 +17772,10 @@ export const integrations = [ } }, "overview": "# Windows\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep windows\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep windows /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep windows\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-Windows", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/metadata.yaml", @@ -17429,7 +17810,7 @@ export const integrations = [ } }, "overview": "# WireGuard\n\nPlugin: go.d.plugin\nModule: wireguard\n\n## Overview\n\nThis collector monitors WireGuard VPN devices and peers traffic.\n\n\nIt connects to the local WireGuard instance using [wireguard-go client](https://github.com/WireGuard/wireguard-go).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the CAP_NET_ADMIN capability, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects instances running on localhost.\n\n\n#### Limits\n\nDoesn't work if Netdata or WireGuard is installed in the container.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/wireguard.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/wireguard.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/wireguard.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/wireguard.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `wireguard` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m wireguard\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `wireguard` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep wireguard\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep wireguard /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep wireguard\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the VPN network interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.device_network_io | receive, transmit | B/s |\n| wireguard.device_peers | peers | peers |\n\n### Per peer\n\nThese metrics refer to the VPN peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n| public_key | Public key of a peer |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.peer_network_io | receive, transmit | B/s |\n| wireguard.peer_latest_handshake_ago | time | seconds |\n\n", @@ -17466,7 +17847,7 @@ export const integrations = [ } }, "overview": "# X.509 certificate\n\nPlugin: go.d.plugin\nModule: x509check\n\n## Overview\n\n\n\nThis collectors monitors x509 certificates expiration time and revocation status.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/x509check.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/x509check.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file, smtp. | | no |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| check_revocation_status | Whether to check the revocation status of the certificate. | no | no |\n| timeout | SSL connection timeout. | 2 | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Website certificate\n\nWebsite certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site_cert\n source: https://my_site.org:443\n\n```\n{% /details %}\n##### Local file certificate\n\nLocal file certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my_file_cert\n source: file:///home/me/cert.pem\n\n```\n{% /details %}\n##### SMTP certificate\n\nSMTP certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my_smtp_cert\n source: smtp://smtp.my_mail.org:587\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple websites' certificates.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site_cert1\n source: https://my_site1.org:443\n\n - name: my_site_cert2\n source: https://my_site1.org:443\n\n - name: my_site_cert3\n source: https://my_site3.org:443\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/x509check.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/x509check.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file, smtp. | | no |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| check_revocation_status | Whether to check the revocation status of the certificate. | no | no |\n| timeout | SSL connection timeout. | 2 | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Website certificate\n\nWebsite certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site_cert\n source: https://my_site.org:443\n\n```\n{% /details %}\n##### Local file certificate\n\nLocal file certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my_file_cert\n source: file:///home/me/cert.pem\n\n```\n{% /details %}\n##### SMTP certificate\n\nSMTP certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my_smtp_cert\n source: smtp://smtp.my_mail.org:587\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple websites' certificates.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site_cert1\n source: https://my_site1.org:443\n\n - name: my_site_cert2\n source: https://my_site1.org:443\n\n - name: my_site_cert3\n source: https://my_site3.org:443\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `x509check` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m x509check\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `x509check` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep x509check\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep x509check /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep x509check\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ x509check_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.time_until_expiration | Time until x509 certificate expires for ${label:source} |\n| [ x509check_revocation_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.revocation_status | x509 certificate revocation status for ${label:source} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per source\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| source | Configured source. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| x509check.time_until_expiration | expiry | seconds |\n| x509check.revocation_status | not_revoked, revoked | boolean |\n\n", @@ -17504,8 +17885,8 @@ export const integrations = [ }, "most_popular": false }, - "overview": "# ZFS Pools\n\nPlugin: go.d.plugin\nModule: zfspool\n\n## Overview\n\nThis collector monitors the health and space usage of ZFS pools using the command line tool [zpool](https://openzfs.github.io/openzfs-docs/man/master/8/zpool-list.8.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zfspool.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zfspool.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `zpool` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/bin/zpool | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: zfspool\n binary_path: /usr/local/sbin/zpool\n\n```\n{% /details %}\n", + "overview": "# ZFS Pools\n\nPlugin: go.d.plugin\nModule: zfspool\n\n## Overview\n\nThis collector monitors the health and space usage of ZFS pools using the command line tool [zpool](https://openzfs.github.io/openzfs-docs/man/master/8/zpool-list.8.html).\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zfspool.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zfspool.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `zpool` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/bin/zpool | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: zfspool\n binary_path: /usr/local/sbin/zpool\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `zfspool` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m zfspool\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `zfspool` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep zfspool\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep zfspool /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep zfspool\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_pool_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_space_utilization | ZFS pool ${label:pool} is nearing capacity. Current space usage is above the threshold. |\n| [ zfs_pool_health_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_health_state | ZFS pool ${label:pool} state is degraded |\n| [ zfs_pool_health_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_health_state | ZFS pool ${label:pool} state is faulted or unavail |\n| [ zfs_vdev_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.vdev_health_state | ZFS vdev ${label:vdev} state is faulted or degraded |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs pool\n\nThese metrics refer to the ZFS pool.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pool | Zpool name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfspool.pool_space_utilization | utilization | % |\n| zfspool.pool_space_usage | free, used | bytes |\n| zfspool.pool_fragmentation | fragmentation | % |\n| zfspool.pool_health_state | online, degraded, faulted, offline, unavail, removed, suspended | state |\n\n### Per zfs pool vdev\n\nThese metrics refer to the ZFS pool virtual device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pool | Zpool name |\n| vdev | Unique identifier for a virtual device (vdev) within a ZFS pool. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfspool.vdev_health_state | online, degraded, faulted, offline, unavail, removed, suspended | state |\n\n", @@ -17546,7 +17927,7 @@ export const integrations = [ } }, "overview": "# ZooKeeper\n\nPlugin: go.d.plugin\nModule: zookeeper\n\n## Overview\n\n\n\nIt connects to the Zookeeper instance via a TCP and executes the following commands:\n\n- [mntr](https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known ZooKeeper TCP sockets:\n\n- 127.0.0.1:2181\n- 127.0.0.1:2182\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Whitelist `mntr` command\n\nAdd `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zookeeper.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zookeeper.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:2181 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| use_tls | Whether to use TLS or not. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nLocal server.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n```\n{% /details %}\n##### TLS with self-signed certificate\n\nZookeeper with TLS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n use_tls: yes\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n - name: remote\n address: 192.0.2.1:2181\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Whitelist `mntr` command\n\nAdd `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zookeeper.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zookeeper.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:2181 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| use_tls | Whether to use TLS or not. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nLocal server.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n```\n{% /details %}\n##### TLS with self-signed certificate\n\nZookeeper with TLS and self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n use_tls: yes\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n - name: remote\n address: 192.0.2.1:2181\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `zookeeper` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m zookeeper\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `zookeeper` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep zookeeper\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep zookeeper /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep zookeeper\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZooKeeper instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zookeeper.requests | outstanding | requests |\n| zookeeper.requests_latency | min, avg, max | ms |\n| zookeeper.connections | alive | connections |\n| zookeeper.packets | received, sent | pps |\n| zookeeper.file_descriptor | open | file descriptors |\n| zookeeper.nodes | znode, ephemerals | nodes |\n| zookeeper.watches | watches | watches |\n| zookeeper.approximate_data_size | size | KiB |\n| zookeeper.server_state | state | state |\n\n", @@ -17582,7 +17963,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Idle OS Jitter\n\nPlugin: idlejitter.plugin\nModule: idlejitter.plugin\n\n## Overview\n\nMonitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.\n\n\nA thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration will run by default on all supported systems.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20ms | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Idle OS Jitter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.idlejitter | min, max, average | microseconds lost/s |\n\n", @@ -17615,7 +17996,7 @@ export const integrations = [ "most_popular": false }, "overview": "# IOPing\n\nPlugin: ioping.plugin\nModule: ioping.plugin\n\n## Overview\n\nMonitor IOPing metrics for efficient disk I/O latency tracking. Keep track of read/write speeds, latency, and error rates for optimized disk operations.\n\nPlugin uses `ioping` command.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install ioping\n\nYou can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ioping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ioping.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1s | no |\n| destination | The directory/file/device to ioping. | | yes |\n| request_size | The request size in bytes to ioping the destination (symbolic modifiers are supported) | 4k | no |\n| ioping_opts | Options passed to `ioping` commands. | -T 1000000 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\nThis example has the minimum configuration necessary to have the plugin running.\n\n{% details open=true summary=\"Config\" %}\n```yaml\ndestination=\"/dev/sda\"\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install ioping\n\nYou can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ioping.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ioping.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1s | no |\n| destination | The directory/file/device to ioping. | | yes |\n| request_size | The request size in bytes to ioping the destination (symbolic modifiers are supported) | 4k | no |\n| ioping_opts | Options passed to `ioping` commands. | -T 1000000 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\nThis example has the minimum configuration necessary to have the plugin running.\n\n{% details open=true summary=\"Config\" %}\n```yaml\ndestination=\"/dev/sda\"\n\n```\n{% /details %}\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ioping_disk_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ioping.conf) | ioping.latency | average I/O latency over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ioping.latency | latency | microseconds |\n\n", @@ -17652,7 +18033,7 @@ export const integrations = [ "most_popular": false }, "overview": "# macOS\n\nPlugin: macos.plugin\nModule: mach_smi\n\n## Overview\n\nMonitor macOS metrics for efficient operating system performance.\n\nThe plugin uses three different methods to collect data:\n - The function `sysctlbyname` is called to collect network, swap, loadavg, and boot time.\n - The functtion `host_statistic` is called to collect CPU and Virtual memory data;\n - The function `IOServiceGetMatchingServices` to collect storage information.\n\n\nThis collector is only supported on the following platforms:\n\n- macOS\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThere are three sections in the file which you can configure:\n\n- `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time.\n- `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory.\n- `[plugin:macos:iokit]` - Enable or disable monitoring for storage device.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable load average | Enable or disable monitoring of load average metrics (load1, load5, load15). | yes | no |\n| system swap | Enable or disable monitoring of system swap metrics (free, used). | yes | no |\n| bandwidth | Enable or disable monitoring of network bandwidth metrics (received, sent). | yes | no |\n| ipv4 TCP packets | Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent). | yes | no |\n| ipv4 TCP errors | Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments). | yes | no |\n| ipv4 TCP handshake issues | Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails). | yes | no |\n| ECN packets | Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts). | auto | no |\n| TCP SYN cookies | Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed). | auto | no |\n| TCP out-of-order queue | Enable or disable monitoring of TCP out-of-order queue metrics (inqueue). | auto | no |\n| TCP connection aborts | Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout). | auto | no |\n| ipv4 UDP packets | Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.). | yes | no |\n| ipv4 UDP errors | Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi). | yes | no |\n| ipv4 icmp packets | Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error). | yes | no |\n| ipv4 icmp messages | Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum). | yes | no |\n| ipv4 packets | Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered). | yes | no |\n| ipv4 fragments sent | Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates). | yes | no |\n| ipv4 fragments assembly | Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all). | yes | no |\n| ipv4 errors | Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes). | yes | no |\n| ipv6 packets | Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered). | auto | no |\n| ipv6 fragments sent | Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all). | auto | no |\n| ipv6 fragments assembly | Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all). | auto | no |\n| ipv6 errors | Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes). | auto | no |\n| icmp | Enable or disable monitoring of ICMP metrics (sent, received). | auto | no |\n| icmp redirects | Enable or disable monitoring of ICMP redirects metrics (received, sent). | auto | no |\n| icmp errors | Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.). | auto | no |\n| icmp echos | Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply). | auto | no |\n| icmp router | Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp neighbor | Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp types | Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145). | auto | no |\n| space usage for all disks | Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root). | yes | no |\n| inodes usage for all disks | Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root). | yes | no |\n| bandwidth | Enable or disable monitoring of bandwidth metrics (received, sent). | yes | no |\n| system uptime | Enable or disable monitoring of system uptime metrics (uptime). | yes | no |\n| cpu utilization | Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel). | yes | no |\n| system ram | Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free). | yes | no |\n| swap i/o | Enable or disable monitoring of SWAP I/O metrics (I/O Swap). | yes | no |\n| memory page faults | Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge). | yes | no |\n| disk i/o | Enable or disable monitoring of disk I/O metrics (In, Out). | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Disable swap monitoring.\n\nA basic example that discards swap monitoring\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:macos:sysctl]\n system swap = no\n[plugin:macos:mach_smi]\n swap i/o = no\n\n```\n{% /details %}\n##### Disable complete Machine SMI section.\n\nA basic example that discards swap monitoring\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:macos:mach_smi]\n cpu utilization = no\n system ram = no\n swap i/o = no\n memory page faults = no\n disk i/o = no\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThere are three sections in the file which you can configure:\n\n- `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time.\n- `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory.\n- `[plugin:macos:iokit]` - Enable or disable monitoring for storage device.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable load average | Enable or disable monitoring of load average metrics (load1, load5, load15). | yes | no |\n| system swap | Enable or disable monitoring of system swap metrics (free, used). | yes | no |\n| bandwidth | Enable or disable monitoring of network bandwidth metrics (received, sent). | yes | no |\n| ipv4 TCP packets | Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent). | yes | no |\n| ipv4 TCP errors | Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments). | yes | no |\n| ipv4 TCP handshake issues | Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails). | yes | no |\n| ECN packets | Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts). | auto | no |\n| TCP SYN cookies | Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed). | auto | no |\n| TCP out-of-order queue | Enable or disable monitoring of TCP out-of-order queue metrics (inqueue). | auto | no |\n| TCP connection aborts | Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout). | auto | no |\n| ipv4 UDP packets | Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.). | yes | no |\n| ipv4 UDP errors | Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi). | yes | no |\n| ipv4 icmp packets | Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error). | yes | no |\n| ipv4 icmp messages | Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum). | yes | no |\n| ipv4 packets | Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered). | yes | no |\n| ipv4 fragments sent | Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates). | yes | no |\n| ipv4 fragments assembly | Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all). | yes | no |\n| ipv4 errors | Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes). | yes | no |\n| ipv6 packets | Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered). | auto | no |\n| ipv6 fragments sent | Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all). | auto | no |\n| ipv6 fragments assembly | Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all). | auto | no |\n| ipv6 errors | Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes). | auto | no |\n| icmp | Enable or disable monitoring of ICMP metrics (sent, received). | auto | no |\n| icmp redirects | Enable or disable monitoring of ICMP redirects metrics (received, sent). | auto | no |\n| icmp errors | Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.). | auto | no |\n| icmp echos | Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply). | auto | no |\n| icmp router | Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp neighbor | Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp types | Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145). | auto | no |\n| space usage for all disks | Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root). | yes | no |\n| inodes usage for all disks | Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root). | yes | no |\n| bandwidth | Enable or disable monitoring of bandwidth metrics (received, sent). | yes | no |\n| system uptime | Enable or disable monitoring of system uptime metrics (uptime). | yes | no |\n| cpu utilization | Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel). | yes | no |\n| system ram | Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free). | yes | no |\n| swap i/o | Enable or disable monitoring of SWAP I/O metrics (I/O Swap). | yes | no |\n| memory page faults | Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge). | yes | no |\n| disk i/o | Enable or disable monitoring of disk I/O metrics (In, Out). | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Disable swap monitoring.\n\nA basic example that discards swap monitoring\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:macos:sysctl]\n system swap = no\n[plugin:macos:mach_smi]\n swap i/o = no\n\n```\n{% /details %}\n##### Disable complete Machine SMI section.\n\nA basic example that discards swap monitoring\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:macos:mach_smi]\n cpu utilization = no\n system ram = no\n swap i/o = no\n memory page faults = no\n disk i/o = no\n\n```\n{% /details %}\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per macOS instance\n\nThese metrics refer to hardware and network monitoring.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | user, nice, system, idle | percentage |\n| system.ram | active, wired, throttled, compressor, inactive, purgeable, speculative, free | MiB |\n| mem.swapio | io, out | KiB/s |\n| mem.pgfaults | memory, cow, pagein, pageout, compress, decompress, zero_fill, reactivate, purge | faults/s |\n| system.load | load1, load5, load15 | load |\n| mem.swap | free, used | MiB |\n| system.ipv4 | received, sent | kilobits/s |\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| system.uptime | uptime | seconds |\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | read, writes | KiB/s |\n| disk.ops | read, writes | operations/s |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n### Per mount point\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound | drops/s |\n| net.events | frames, collisions, carrier | events/s |\n\n", @@ -17685,7 +18066,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Netfilter\n\nPlugin: nfacct.plugin\nModule: nfacct.plugin\n\n## Overview\n\nMonitor Netfilter metrics for optimal packet filtering and manipulation. Keep tabs on packet counts, dropped packets, and error rates to secure network operations.\n\nNetdata uses libmnl (https://www.netfilter.org/projects/libmnl/index.html) to collect information.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin uses socket to connect with netfilter to collect data\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install required packages\n\nInstall `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:nfacct]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install required packages\n\nInstall `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:nfacct]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netfilter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.netlink_new | new, ignore, invalid | connections/s |\n| netfilter.netlink_changes | insert, delete, delete_list | changes/s |\n| netfilter.netlink_search | searched, search_restart, found | searches/s |\n| netfilter.netlink_errors | icmp_error, insert_failed, drop, early_drop | events/s |\n| netfilter.netlink_expect | created, deleted, new | expectations/s |\n| netfilter.nfacct_packets | a dimension per nfacct object | packets/s |\n| netfilter.nfacct_bytes | a dimension per nfacct object | kilobytes/s |\n\n", @@ -17723,7 +18104,7 @@ export const integrations = [ "most_popular": false }, "overview": "# CPU performance\n\nPlugin: perf.plugin\nModule: perf.plugin\n\n## Overview\n\nThis collector monitors CPU performance metrics about cycles, instructions, migrations, cache operations and more.\n\nIt uses syscall (2) to open a file descriptor to monitor the perf events.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIt needs setuid to use the necessary syscall to collect perf events. Netdata sets the permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install perf plugin\n\nIf you are [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.\n\n\n#### Enable the perf plugin\n\nThe plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.\n\nTo enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config netdata.conf\n```\n\nChange the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:perf]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can get the available options running:\n\n```bash\n/usr/libexec/netdata/plugins.d/perf.plugin --help\n````\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Command options that specify charts shown by the plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. | 1 | yes |\n\n{% /details %}\n#### Examples\n\n##### All metrics\n\nMonitor all metrics available.\n\n```yaml\n[plugin:perf]\n command options = all\n\n```\n##### CPU cycles\n\nMonitor CPU cycles.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:perf]\n command options = cycles\n\n```\n{% /details %}\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install perf plugin\n\nIf you are [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.\n\n\n#### Enable the perf plugin\n\nThe plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.\n\nTo enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config netdata.conf\n```\n\nChange the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/start-stop-restart.md) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:perf]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can get the available options running:\n\n```bash\n/usr/libexec/netdata/plugins.d/perf.plugin --help\n````\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Command options that specify charts shown by the plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. | 1 | yes |\n\n{% /details %}\n#### Examples\n\n##### All metrics\n\nMonitor all metrics available.\n\n```yaml\n[plugin:perf]\n command options = all\n\n```\n##### CPU cycles\n\nMonitor CPU cycles.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:perf]\n command options = cycles\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CPU performance instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| perf.cpu_cycles | cpu, ref_cpu | cycles/s |\n| perf.instructions | instructions | instructions/s |\n| perf.instructions_per_cycle | ipc | instructions/cycle |\n| perf.branch_instructions | instructions, misses | instructions/s |\n| perf.cache | references, misses | operations/s |\n| perf.bus_cycles | bus | cycles/s |\n| perf.stalled_cycles | frontend, backend | cycles/s |\n| perf.migrations | migrations | migrations |\n| perf.alignment_faults | faults | faults |\n| perf.emulation_faults | faults | faults |\n| perf.l1d_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.l1d_cache_prefetch | prefetches | prefetches/s |\n| perf.l1i_cache | read_access, read_misses | events/s |\n| perf.ll_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.dtlb_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.itlb_cache | read_access, read_misses | events/s |\n| perf.pbu_cache | read_access | events/s |\n\n", @@ -18519,7 +18900,7 @@ export const integrations = [ "most_popular": false }, "overview": "# System statistics\n\nPlugin: proc.plugin\nModule: /proc/stat\n\n## Overview\n\nCPU utilization, states and frequencies and key Linux system performance metrics.\n\nThe `/proc/stat` file provides various types of system statistics:\n\n- The overall system CPU usage statistics\n- Per CPU core statistics\n- The total context switching of the system\n- The total number of processes running\n- The total CPU interrupts\n- The total CPU softirqs\n\nThe collector also reads:\n\n- `/proc/schedstat` for statistics about the process scheduler in the Linux kernel.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/core_throttle_count` to get the count of thermal throttling events for a specific CPU core on Linux systems.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/package_throttle_count` to get the count of thermal throttling events for a specific CPU package on a Linux system.\n- `/sys/devices/system/cpu/[X]/cpufreq/scaling_cur_freq` to get the current operating frequency of a specific CPU core.\n- `/sys/devices/system/cpu/[X]/cpufreq/stats/time_in_state` to get the amount of time the CPU has spent in each of its available frequency states.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/name` to get the names of the idle states for each CPU core in a Linux system.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/time` to get the total time each specific CPU core has spent in each idle state since the system was started.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector auto-detects all metrics. No configuration is needed.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe collector disables cpu frequency and idle state monitoring when there are more than 128 CPU cores available.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| system.intr | interrupts | interrupts/s |\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n| system.processes | running, blocked | processes |\n| cpu.core_throttling | a dimension per cpu core | events/s |\n| cpu.package_throttling | a dimension per package | events/s |\n| cpu.cpufreq | a dimension per cpu core | MHz |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| cpuidle.cpu_cstate_residency_time | a dimension per c-state | percentage |\n\n", @@ -18993,7 +19374,7 @@ export const integrations = [ "most_popular": false }, "overview": "# AM2320\n\nPlugin: python.d.plugin\nModule: am2320\n\n## Overview\n\nThis collector monitors AM2320 sensor metrics about temperature and humidity.\n\nIt retrieves temperature and humidity values by contacting an AM2320 sensor over i2c.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming prerequisites are met, the collector will try to connect to the sensor via i2c\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Sensor connection to a Raspberry Pi\n\nConnect the am2320 to the Raspberry Pi I2C pins\n\nRaspberry Pi 3B/4 Pins:\n\n- Board 3.3V (pin 1) to sensor VIN (pin 1)\n- Board SDA (pin 3) to sensor SDA (pin 2)\n- Board GND (pin 6) to sensor GND (pin 3)\n- Board SCL (pin 5) to sensor SCL (pin 4)\n\nYou may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.\n\n\n#### Software requirements\n\nInstall the Adafruit Circuit Python AM2320 library:\n\n`sudo pip3 install adafruit-circuitpython-am2320`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/am2320.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/am2320.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Local sensor\n\nA basic JOB configuration\n\n```yaml\nlocal_sensor:\n name: 'Local AM2320'\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Sensor connection to a Raspberry Pi\n\nConnect the am2320 to the Raspberry Pi I2C pins\n\nRaspberry Pi 3B/4 Pins:\n\n- Board 3.3V (pin 1) to sensor VIN (pin 1)\n- Board SDA (pin 3) to sensor SDA (pin 2)\n- Board GND (pin 6) to sensor GND (pin 3)\n- Board SCL (pin 5) to sensor SCL (pin 4)\n\nYou may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.\n\n\n#### Software requirements\n\nInstall the Adafruit Circuit Python AM2320 library:\n\n`sudo pip3 install adafruit-circuitpython-am2320`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/am2320.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/am2320.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Local sensor\n\nA basic JOB configuration\n\n```yaml\nlocal_sensor:\n name: 'Local AM2320'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `am2320` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin am2320 debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `am2320` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep am2320\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep am2320 /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep am2320\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per AM2320 instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| am2320.temperature | temperature | celsius |\n| am2320.humidity | humidity | percentage |\n\n", @@ -19005,14 +19386,14 @@ export const integrations = [ { "meta": { "plugin_name": "python.d.plugin", - "module_name": "boinc", + "module_name": "go_expvar", "monitored_instance": { - "name": "BOINC", - "link": "https://boinc.berkeley.edu/", + "name": "Go applications (EXPVAR)", + "link": "https://pkg.go.dev/expvar", "categories": [ - "data-collection.distributed-computing-systems" + "data-collection.apm" ], - "icon_filename": "bolt.svg" + "icon_filename": "go.png" }, "related_resources": { "integrations": { @@ -19023,32 +19404,33 @@ export const integrations = [ "description": "" }, "keywords": [ - "boinc", - "distributed" + "go", + "expvar", + "application" ], "most_popular": false }, - "overview": "# BOINC\n\nPlugin: python.d.plugin\nModule: boinc\n\n## Overview\n\nThis collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.\n\nIt uses the same RPC interface that the BOINC monitoring GUI does.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Boinc RPC interface\n\nBOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/boinc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/boinc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| hostname | Define a hostname where boinc is running. | localhost | no |\n| port | The port of boinc RPC interface. | | no |\n| password | Provide a password to connect to a boinc RPC interface. | | no |\n\n{% /details %}\n#### Examples\n\n##### Configuration of a remote boinc instance\n\nA basic JOB configuration for a remote boinc instance\n\n```yaml\nremote:\n hostname: '1.2.3.4'\n port: 1234\n password: 'some-password'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 1234\n password: 'some-password'\n\nremote_job:\n name: 'remote'\n host: '192.0.2.1'\n port: 1234\n password: some-other-password\n\n```\n{% /details %}\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin boinc debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `boinc` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep boinc\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep boinc /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep boinc\n```\n\n", - "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |\n| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |\n| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of compute errors over the last 10 minutes |\n| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of failed uploads over the last 10 minutes |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per BOINC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| boinc.tasks | Total, Active | tasks |\n| boinc.states | New, Downloading, Ready to Run, Compute Errors, Uploading, Uploaded, Aborted, Failed Uploads | tasks |\n| boinc.sched | Uninitialized, Preempted, Scheduled | tasks |\n| boinc.process | Uninitialized, Executing, Suspended, Aborted, Quit, Copy Pending | tasks |\n\n", + "overview": "# Go applications (EXPVAR)\n\nPlugin: python.d.plugin\nModule: go_expvar\n\n## Overview\n\nThis collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts.\n\nIt connects via http to gather the metrics exposed via the `expvar` package.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the go_expvar collector\n\nThe `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/start-stop-restart.md) for your system.\n\n\n#### Sample `expvar` usage in a Go application\n\nThe `expvar` package exposes metrics over HTTP and is very easy to use.\nConsider this minimal sample below:\n\n```go\npackage main\n\nimport (\n _ \"expvar\"\n \"net/http\"\n)\n\nfunc main() {\n http.ListenAndServe(\"127.0.0.1:8080\", nil)\n}\n```\n\nWhen imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that\nexposes Go runtime's memory statistics in JSON format. You can inspect the output by opening\nthe URL in your browser (or by using `wget` or `curl`).\n\nSample output:\n\n```json\n{\n\"cmdline\": [\"./expvar-demo-binary\"],\n\"memstats\": {\"Alloc\":630856,\"TotalAlloc\":630856,\"Sys\":3346432,\"Lookups\":27, }\n}\n```\n\nYou can of course expose and monitor your own variables as well.\nHere is a sample Go application that exposes a few custom variables:\n\n```go\npackage main\n\nimport (\n \"expvar\"\n \"net/http\"\n \"runtime\"\n \"time\"\n)\n\nfunc main() {\n\n tick := time.NewTicker(1 * time.Second)\n num_go := expvar.NewInt(\"runtime.goroutines\")\n counters := expvar.NewMap(\"counters\")\n counters.Set(\"cnt1\", new(expvar.Int))\n counters.Set(\"cnt2\", new(expvar.Float))\n\n go http.ListenAndServe(\":8080\", nil)\n\n for {\n select {\n case <- tick.C:\n num_go.Set(int64(runtime.NumGoroutine()))\n counters.Add(\"cnt1\", 1)\n counters.AddFloat(\"cnt2\", 1.452)\n }\n }\n}\n```\n\nApart from the runtime memory stats, this application publishes two counters and the\nnumber of currently running Goroutines and updates these stats every second.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/go_expvar.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/go_expvar.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes |\n| user | If the URL is password protected, this is the username to use. | | no |\n| pass | If the URL is password protected, this is the password to use. | | no |\n| collect_memstats | Enables charts for Go runtime's memory statistics. | | no |\n| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no |\n\n{% /details %}\n#### Examples\n\n##### Monitor a Go app1 application\n\nThe example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.\n\nThe `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.\n\nThe `extra_charts` variable is a YaML list of Netdata chart definitions.\nEach chart definition has the following keys:\n\n```\nid: Netdata chart ID\noptions: a key-value mapping of chart options\nlines: a list of line definitions\n```\n\n**Note: please do not use dots in the chart or line ID field.\nSee [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**\n\nPlease see these two links to the official Netdata documentation for more information about the values:\n\n- [External plugins - charts](https://github.com/netdata/netdata/blob/master/src/plugins.d/README.md#chart)\n- [Chart variables](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart)\n\n**Line definitions**\n\nEach chart can define multiple lines (dimensions).\nA line definition is a key-value mapping of line options.\nEach line can have the following options:\n\n```\n# mandatory\nexpvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint\nexpvar_type: value type; supported are \"float\" or \"int\"\nid: the id of this line/dimension in Netdata\n\n# optional - Netdata defaults are used if these options are not defined\nname: ''\nalgorithm: absolute\nmultiplier: 1\ndivisor: 100 if expvar_type == float, 1 if expvar_type == int\nhidden: False\n```\n\nPlease see the following link for more information about the options and their default values:\n[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/src/plugins.d/README.md#dimension)\n\nApart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;\nAll dicts in the resulting JSON document are then flattened to one level.\nExpvar names are joined together with '.' when flattening.\n\nExample:\n\n```\n{\n \"counters\": {\"cnt1\": 1042, \"cnt2\": 1512.9839999999983},\n \"runtime.goroutines\": 5\n}\n```\n\nIn the above case, the exported variables will be available under `runtime.goroutines`,\n`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,\nthe first defined key wins and all subsequent keys with the same name are ignored.\n\n\n```yaml\napp1:\n name : 'app1'\n url : 'http://127.0.0.1:8080/debug/vars'\n collect_memstats: true\n extra_charts:\n - id: \"runtime_goroutines\"\n options:\n name: num_goroutines\n title: \"runtime: number of goroutines\"\n units: goroutines\n family: runtime\n context: expvar.runtime.goroutines\n chart_type: line\n lines:\n - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}\n - id: \"foo_counters\"\n options:\n name: counters\n title: \"some random counters\"\n units: awesomeness\n family: counters\n context: expvar.foo.counters\n chart_type: line\n lines:\n - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}\n - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin go_expvar debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `go_expvar` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep go_expvar\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep go_expvar /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep go_expvar\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go applications (EXPVAR) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| expvar.memstats.heap | alloc, inuse | KiB |\n| expvar.memstats.stack | inuse | KiB |\n| expvar.memstats.mspan | inuse | KiB |\n| expvar.memstats.mcache | inuse | KiB |\n| expvar.memstats.live_objects | live | objects |\n| expvar.memstats.sys | sys | KiB |\n| expvar.memstats.gc_pauses | avg | ns |\n\n", "integration_type": "collector", - "id": "python.d.plugin-boinc-BOINC", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/boinc/metadata.yaml", + "id": "python.d.plugin-go_expvar-Go_applications_(EXPVAR)", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/go_expvar/metadata.yaml", "related_resources": "" }, { "meta": { "plugin_name": "python.d.plugin", - "module_name": "ceph", + "module_name": "pandas", "monitored_instance": { - "name": "Ceph", - "link": "https://ceph.io/", + "name": "Pandas", + "link": "https://pandas.pydata.org/", "categories": [ - "data-collection.storage-mount-points-and-filesystems" + "data-collection.generic-data-collection" ], - "icon_filename": "ceph.svg" + "icon_filename": "pandas.png" }, "related_resources": { "integrations": { @@ -19059,32 +19441,32 @@ export const integrations = [ "description": "" }, "keywords": [ - "ceph", - "storage" + "pandas", + "python" ], "most_popular": false }, - "overview": "# Ceph\n\nPlugin: python.d.plugin\nModule: ceph\n\n## Overview\n\nThis collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.\n\nUses the `rados` python module to connect to a Ceph cluster.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### `rados` python module\n\nMake sure the `rados` python module is installed\n\n#### Granting read permissions to ceph group from keyring file\n\nExecute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`\n\n#### Create a specific rados_id\n\nYou can optionally create a rados_id to use instead of admin\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/ceph.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/ceph.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| config_file | Ceph config file | | yes |\n| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes |\n| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no |\n\n{% /details %}\n#### Examples\n\n##### Basic local Ceph cluster\n\nA basic configuration to connect to a local Ceph cluster.\n\n```yaml\nlocal:\n config_file: '/etc/ceph/ceph.conf'\n keyring_file: '/etc/ceph/ceph.client.admin.keyring'\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin ceph debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ceph` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ceph\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ceph /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ceph\n```\n\n", - "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ceph_cluster_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.general_usage | cluster disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Ceph instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.general_usage | avail, used | KiB |\n| ceph.general_objects | cluster | objects |\n| ceph.general_bytes | read, write | KiB/s |\n| ceph.general_operations | read, write | operations |\n| ceph.general_latency | apply, commit | milliseconds |\n| ceph.pool_usage | a dimension per Ceph Pool | KiB |\n| ceph.pool_objects | a dimension per Ceph Pool | objects |\n| ceph.pool_read_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_write_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_read_operations | a dimension per Ceph Pool | operations |\n| ceph.pool_write_operations | a dimension per Ceph Pool | operations |\n| ceph.osd_usage | a dimension per Ceph OSD | KiB |\n| ceph.osd_size | a dimension per Ceph OSD | KiB |\n| ceph.apply_latency | a dimension per Ceph OSD | milliseconds |\n| ceph.commit_latency | a dimension per Ceph OSD | milliseconds |\n\n", + "overview": "# Pandas\n\nPlugin: python.d.plugin\nModule: pandas\n\n## Overview\n\n[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.\nIf you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),\neither locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.\n\nThis collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.\n\n\nThe collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.\n\n```bash\nsudo pip install pandas requests\n```\n\nNote: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.\n\n```bash\nsudo pip install 'sqlalchemy<2.0' psycopg2-binary\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/pandas.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/pandas.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| chart_configs | an array of chart configuration dictionaries | [] | yes |\n| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/dashboards-and-charts/netdata-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/dashboards-and-charts/netdata-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Temperature API Example\n\nexample pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.\n\n{% details open=true summary=\"Config\" %}\n```yaml\ntemperature:\n name: \"temperature\"\n update_every: 5\n chart_configs:\n - name: \"temperature_forecast_by_city\"\n title: \"Temperature By City - Today Forecast\"\n family: \"temperature.today\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.describe(); # get aggregate stats for each city;\n df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;\n df.rename(columns={'index':'city'}); # some column renaming;\n df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;\n df.rename(columns={0:'degrees'}); # some column renaming;\n pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;\n df.rename(columns={0:'measurement'}); # some column renaming;\n df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;\n df.sort_index(); # sort by city name;\n df.transpose(); # transpose so its just one wide row;\n - name: \"temperature_current_by_city\"\n title: \"Temperature By City - Current\"\n family: \"temperature.current\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}¤t_weather=true').json()['current_weather']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.transpose();\n df[['temperature']];\n df.transpose();\n\n```\n{% /details %}\n##### API CSV Example\n\nexample showing a read_csv from a url and some light pandas data wrangling.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nexample_csv:\n name: \"example_csv\"\n update_every: 2\n chart_configs:\n - name: \"london_system_cpu\"\n title: \"London System CPU - Ratios\"\n family: \"london_system_cpu\"\n context: \"pandas\"\n type: \"line\"\n units: \"n\"\n df_steps: >\n pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});\n df.drop('time', axis=1);\n df.mean().to_frame().transpose();\n df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();\n df.rename(columns={0:'average_user_system_ratio'});\n df*100;\n\n```\n{% /details %}\n##### API JSON Example\n\nexample showing a read_json from a url and some light pandas data wrangling.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nexample_json:\n name: \"example_json\"\n update_every: 2\n chart_configs:\n - name: \"london_system_net\"\n title: \"London System Net - Total Bandwidth\"\n family: \"london_system_net\"\n context: \"pandas\"\n type: \"area\"\n units: \"kilobits/s\"\n df_steps: >\n pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);\n df.drop('time', axis=1);\n abs(df);\n df.sum(axis=1).to_frame();\n df.rename(columns={0:'total_bandwidth'});\n\n```\n{% /details %}\n##### XML Example\n\nexample showing a read_xml from a url and some light pandas data wrangling.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nexample_xml:\n name: \"example_xml\"\n update_every: 2\n line_sep: \"|\"\n chart_configs:\n - name: \"temperature_forcast\"\n title: \"Temperature Forecast\"\n family: \"temp\"\n context: \"pandas.temp\"\n type: \"line\"\n units: \"celsius\"\n df_steps: >\n pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|\n df.rename(columns={'value': 'dublin'})|\n df[['dublin']]|\n\n```\n{% /details %}\n##### SQL Example\n\nexample showing a read_sql from a postgres database using sqlalchemy.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nsql:\n name: \"sql\"\n update_every: 5\n chart_configs:\n - name: \"sql\"\n title: \"SQL Example\"\n family: \"sql.example\"\n context: \"example\"\n type: \"line\"\n units: \"percent\"\n df_steps: >\n pd.read_sql_query(\n sql='\\\n select \\\n random()*100 as metric_1, \\\n random()*100 as metric_2 \\\n ',\n con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')\n );\n\n```\n{% /details %}\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin pandas debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pandas` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pandas\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pandas /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pandas\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThis collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken\nas the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).\nSee [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html).\"\n\n\n### Per Pandas instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n\n", "integration_type": "collector", - "id": "python.d.plugin-ceph-Ceph", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ceph/metadata.yaml", + "id": "python.d.plugin-pandas-Pandas", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/pandas/metadata.yaml", "related_resources": "" }, { "meta": { - "plugin_name": "python.d.plugin", - "module_name": "go_expvar", + "plugin_name": "slabinfo.plugin", + "module_name": "slabinfo.plugin", "monitored_instance": { - "name": "Go applications (EXPVAR)", - "link": "https://pkg.go.dev/expvar", + "name": "Linux kernel SLAB allocator statistics", + "link": "https://kernel.org/", "categories": [ - "data-collection.apm" + "data-collection.linux-systems.kernel-metrics" ], - "icon_filename": "go.png" + "icon_filename": "linuxserver.svg" }, "related_resources": { "integrations": { @@ -19095,144 +19477,35 @@ export const integrations = [ "description": "" }, "keywords": [ - "go", - "expvar", - "application" + "linux kernel", + "slab", + "slub", + "slob", + "slabinfo" ], "most_popular": false }, - "overview": "# Go applications (EXPVAR)\n\nPlugin: python.d.plugin\nModule: go_expvar\n\n## Overview\n\nThis collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts.\n\nIt connects via http to gather the metrics exposed via the `expvar` package.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the go_expvar collector\n\nThe `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.\n\n\n#### Sample `expvar` usage in a Go application\n\nThe `expvar` package exposes metrics over HTTP and is very easy to use.\nConsider this minimal sample below:\n\n```go\npackage main\n\nimport (\n _ \"expvar\"\n \"net/http\"\n)\n\nfunc main() {\n http.ListenAndServe(\"127.0.0.1:8080\", nil)\n}\n```\n\nWhen imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that\nexposes Go runtime's memory statistics in JSON format. You can inspect the output by opening\nthe URL in your browser (or by using `wget` or `curl`).\n\nSample output:\n\n```json\n{\n\"cmdline\": [\"./expvar-demo-binary\"],\n\"memstats\": {\"Alloc\":630856,\"TotalAlloc\":630856,\"Sys\":3346432,\"Lookups\":27, }\n}\n```\n\nYou can of course expose and monitor your own variables as well.\nHere is a sample Go application that exposes a few custom variables:\n\n```go\npackage main\n\nimport (\n \"expvar\"\n \"net/http\"\n \"runtime\"\n \"time\"\n)\n\nfunc main() {\n\n tick := time.NewTicker(1 * time.Second)\n num_go := expvar.NewInt(\"runtime.goroutines\")\n counters := expvar.NewMap(\"counters\")\n counters.Set(\"cnt1\", new(expvar.Int))\n counters.Set(\"cnt2\", new(expvar.Float))\n\n go http.ListenAndServe(\":8080\", nil)\n\n for {\n select {\n case <- tick.C:\n num_go.Set(int64(runtime.NumGoroutine()))\n counters.Add(\"cnt1\", 1)\n counters.AddFloat(\"cnt2\", 1.452)\n }\n }\n}\n```\n\nApart from the runtime memory stats, this application publishes two counters and the\nnumber of currently running Goroutines and updates these stats every second.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/go_expvar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/go_expvar.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes |\n| user | If the URL is password protected, this is the username to use. | | no |\n| pass | If the URL is password protected, this is the password to use. | | no |\n| collect_memstats | Enables charts for Go runtime's memory statistics. | | no |\n| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no |\n\n{% /details %}\n#### Examples\n\n##### Monitor a Go app1 application\n\nThe example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.\n\nThe `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.\n\nThe `extra_charts` variable is a YaML list of Netdata chart definitions.\nEach chart definition has the following keys:\n\n```\nid: Netdata chart ID\noptions: a key-value mapping of chart options\nlines: a list of line definitions\n```\n\n**Note: please do not use dots in the chart or line ID field.\nSee [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**\n\nPlease see these two links to the official Netdata documentation for more information about the values:\n\n- [External plugins - charts](/src/collectors/plugins.d/README.md#chart)\n- [Chart variables](/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart)\n\n**Line definitions**\n\nEach chart can define multiple lines (dimensions).\nA line definition is a key-value mapping of line options.\nEach line can have the following options:\n\n```\n# mandatory\nexpvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint\nexpvar_type: value type; supported are \"float\" or \"int\"\nid: the id of this line/dimension in Netdata\n\n# optional - Netdata defaults are used if these options are not defined\nname: ''\nalgorithm: absolute\nmultiplier: 1\ndivisor: 100 if expvar_type == float, 1 if expvar_type == int\nhidden: False\n```\n\nPlease see the following link for more information about the options and their default values:\n[External plugins - dimensions](/src/collectors/plugins.d/README.md#dimension)\n\nApart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;\nAll dicts in the resulting JSON document are then flattened to one level.\nExpvar names are joined together with '.' when flattening.\n\nExample:\n\n```\n{\n \"counters\": {\"cnt1\": 1042, \"cnt2\": 1512.9839999999983},\n \"runtime.goroutines\": 5\n}\n```\n\nIn the above case, the exported variables will be available under `runtime.goroutines`,\n`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,\nthe first defined key wins and all subsequent keys with the same name are ignored.\n\n\n```yaml\napp1:\n name : 'app1'\n url : 'http://127.0.0.1:8080/debug/vars'\n collect_memstats: true\n extra_charts:\n - id: \"runtime_goroutines\"\n options:\n name: num_goroutines\n title: \"runtime: number of goroutines\"\n units: goroutines\n family: runtime\n context: expvar.runtime.goroutines\n chart_type: line\n lines:\n - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}\n - id: \"foo_counters\"\n options:\n name: counters\n title: \"some random counters\"\n units: awesomeness\n family: counters\n context: expvar.foo.counters\n chart_type: line\n lines:\n - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}\n - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin go_expvar debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `go_expvar` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep go_expvar\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep go_expvar /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep go_expvar\n```\n\n", + "overview": "# Linux kernel SLAB allocator statistics\n\nPlugin: slabinfo.plugin\nModule: slabinfo.plugin\n\n## Overview\n\nCollects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads in the kernel.\n\n\nThe plugin parses `/proc/slabinfo`\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions sVko that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nDue to the large number of metrics generated by this integration, it is disabled by default and must be manually enabled inside `/etc/netdata/netdata.conf`\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nIf you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugins]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"The main configuration file.\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Enable plugin | As described above plugin is disabled by default, this option is used to enable plugin. | no | yes |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go applications (EXPVAR) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| expvar.memstats.heap | alloc, inuse | KiB |\n| expvar.memstats.stack | inuse | KiB |\n| expvar.memstats.mspan | inuse | KiB |\n| expvar.memstats.mcache | inuse | KiB |\n| expvar.memstats.live_objects | live | objects |\n| expvar.memstats.sys | sys | KiB |\n| expvar.memstats.gc_pauses | avg | ns |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nSLAB cache utilization metrics for the whole system.\n\n### Per Linux kernel SLAB allocator statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.slabmemory | a dimension per cache | B |\n| mem.slabfilling | a dimension per cache | % |\n| mem.slabwaste | a dimension per cache | B |\n\n", "integration_type": "collector", - "id": "python.d.plugin-go_expvar-Go_applications_(EXPVAR)", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/go_expvar/metadata.yaml", - "related_resources": "" - }, - { - "meta": { - "plugin_name": "python.d.plugin", - "module_name": "openldap", - "monitored_instance": { - "name": "OpenLDAP", - "link": "https://www.openldap.org/", - "categories": [ - "data-collection.authentication-and-authorization" - ], - "icon_filename": "statsd.png" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "openldap", - "RBAC", - "Directory access" - ], - "most_popular": false - }, - "overview": "# OpenLDAP\n\nPlugin: python.d.plugin\nModule: openldap\n\n## Overview\n\nThis collector monitors OpenLDAP metrics about connections, operations, referrals and more.\n\nStatistics are taken from the monitoring interface of a openLDAP (slapd) server\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector doesn't work until all the prerequisites are checked.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure the openLDAP server to expose metrics to monitor it.\n\nFollow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.\n\n\n#### Install python-ldap module\n\nInstall python ldap module \n\n1. From pip package manager\n\n```bash\npip install ldap\n```\n\n2. With apt package manager (in most deb based distros)\n\n\n```bash\napt-get install python-ldap\n```\n\n\n3. With yum package manager (in most rpm based distros)\n\n\n```bash\nyum install python-ldap\n```\n\n\n#### Insert credentials for Netdata to access openLDAP server\n\nUse the `ldappasswd` utility to set a password for the username you will use.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/openldap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/openldap.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| username | The bind user with right to access monitor statistics | | yes |\n| password | The password for the binded user | | yes |\n| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes |\n| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes |\n| use_tls | Make True if a TLS connection is used over ldaps:// | no | no |\n| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no |\n| cert_check | False if you want to ignore certificate check | True | yes |\n| timeout | Seconds to timeout if no connection exist | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nusername: \"cn=admin\"\npassword: \"pass\"\nserver: \"localhost\"\nport: \"389\"\ncheck_cert: True\ntimeout: 1\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin openldap debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `openldap` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep openldap\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep openldap /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep openldap\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenLDAP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openldap.total_connections | connections | connections/s |\n| openldap.traffic_stats | sent | KiB/s |\n| openldap.operations_status | completed, initiated | ops/s |\n| openldap.referrals | sent | referrals/s |\n| openldap.entries | sent | entries/s |\n| openldap.ldap_operations | bind, search, unbind, add, delete, modify, compare | ops/s |\n| openldap.waiters | write, read | waiters/s |\n\n", - "integration_type": "collector", - "id": "python.d.plugin-openldap-OpenLDAP", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/openldap/metadata.yaml", - "related_resources": "" - }, - { - "meta": { - "plugin_name": "python.d.plugin", - "module_name": "oracledb", - "monitored_instance": { - "name": "Oracle DB", - "link": "https://docs.oracle.com/en/database/oracle/oracle-database/", - "categories": [ - "data-collection.database-servers" - ], - "icon_filename": "oracle.svg" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "database", - "oracle", - "data warehouse", - "SQL" - ], - "most_popular": false - }, - "overview": "# Oracle DB\n\nPlugin: python.d.plugin\nModule: oracledb\n\n## Overview\n\nThis collector monitors OracleDB database metrics about sessions, tables, memory and more.\n\nIt collects the metrics via the supported database client library\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIn order for this collector to work, it needs a read-only user `netdata` in the RDBMS.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen the requirements are met, databases on the local host on port 1521 will be auto-detected\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install the python-oracledb package\n\nYou can follow the official guide below to install the required package:\n\nSource: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html\n\n\n#### Create a read only user for netdata\n\nFollow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach\n\nConnect to your Oracle database with an administrative user and execute:\n\n```bash\nCREATE USER netdata IDENTIFIED BY ;\n\nGRANT CONNECT TO netdata;\nGRANT SELECT_CATALOG_ROLE TO netdata;\n```\n\n\n#### Edit the configuration\n\nEdit the configuration troubleshooting:\n\n1. Provide a valid user for the netdata collector to access the database\n2. Specify the network target this database is listening.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/oracledb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/oracledb.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| user | The username for the user account. | no | yes |\n| password | The password for the user account. | no | yes |\n| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes |\n| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes |\n| protocol | one of the strings \"tcp\" or \"tcps\" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration, two jobs described for two databases.\n\n```yaml\nlocal:\n user: 'netdata'\n password: 'secret'\n server: 'localhost:1521'\n service: 'XE'\n protocol: 'tcps'\n\nremote:\n user: 'netdata'\n password: 'secret'\n server: '10.0.0.1:1521'\n service: 'XE'\n protocol: 'tcps'\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin oracledb debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `oracledb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep oracledb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep oracledb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep oracledb\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThese metrics refer to the entire monitored application.\n\n### Per Oracle DB instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.session_count | total, active | sessions |\n| oracledb.session_limit_usage | usage | % |\n| oracledb.logons | logons | events/s |\n| oracledb.physical_disk_read_writes | reads, writes | events/s |\n| oracledb.sorts_on_disks | sorts | events/s |\n| oracledb.full_table_scans | full table scans | events/s |\n| oracledb.database_wait_time_ratio | wait time ratio | % |\n| oracledb.shared_pool_free_memory | free memory | % |\n| oracledb.in_memory_sorts_ratio | in-memory sorts | % |\n| oracledb.sql_service_response_time | time | seconds |\n| oracledb.user_rollbacks | rollbacks | events/s |\n| oracledb.enqueue_timeouts | enqueue timeouts | events/s |\n| oracledb.cache_hit_ration | buffer, cursor, library, row | % |\n| oracledb.global_cache_blocks | corrupted, lost | events/s |\n| oracledb.activity | parse count, execute count, user commits, user rollbacks | events/s |\n| oracledb.wait_time | application, configuration, administrative, concurrency, commit, network, user I/O, system I/O, scheduler, other | ms |\n| oracledb.tablespace_size | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage_in_percent | a dimension per active tablespace | % |\n| oracledb.allocated_size | a dimension per active tablespace | B |\n| oracledb.allocated_usage | a dimension per active tablespace | B |\n| oracledb.allocated_usage_in_percent | a dimension per active tablespace | % |\n\n", - "integration_type": "collector", - "id": "python.d.plugin-oracledb-Oracle_DB", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/oracledb/metadata.yaml", - "related_resources": "" - }, - { - "meta": { - "plugin_name": "python.d.plugin", - "module_name": "pandas", - "monitored_instance": { - "name": "Pandas", - "link": "https://pandas.pydata.org/", - "categories": [ - "data-collection.generic-data-collection" - ], - "icon_filename": "pandas.png" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "pandas", - "python" - ], - "most_popular": false - }, - "overview": "# Pandas\n\nPlugin: python.d.plugin\nModule: pandas\n\n## Overview\n\n[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.\nIf you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),\neither locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.\n\nThis collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.\n\n\nThe collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.\n\n```bash\nsudo pip install pandas requests\n```\n\nNote: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.\n\n```bash\nsudo pip install 'sqlalchemy<2.0' psycopg2-binary\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/pandas.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/pandas.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| chart_configs | an array of chart configuration dictionaries | [] | yes |\n| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.family | [family](/docs/dashboards-and-charts/netdata-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.context | [context](/docs/dashboards-and-charts/netdata-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Temperature API Example\n\nexample pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.\n\n{% details open=true summary=\"Config\" %}\n```yaml\ntemperature:\n name: \"temperature\"\n update_every: 5\n chart_configs:\n - name: \"temperature_forecast_by_city\"\n title: \"Temperature By City - Today Forecast\"\n family: \"temperature.today\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.describe(); # get aggregate stats for each city;\n df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;\n df.rename(columns={'index':'city'}); # some column renaming;\n df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;\n df.rename(columns={0:'degrees'}); # some column renaming;\n pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;\n df.rename(columns={0:'measurement'}); # some column renaming;\n df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;\n df.sort_index(); # sort by city name;\n df.transpose(); # transpose so its just one wide row;\n - name: \"temperature_current_by_city\"\n title: \"Temperature By City - Current\"\n family: \"temperature.current\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}¤t_weather=true').json()['current_weather']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.transpose();\n df[['temperature']];\n df.transpose();\n\n```\n{% /details %}\n##### API CSV Example\n\nexample showing a read_csv from a url and some light pandas data wrangling.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nexample_csv:\n name: \"example_csv\"\n update_every: 2\n chart_configs:\n - name: \"london_system_cpu\"\n title: \"London System CPU - Ratios\"\n family: \"london_system_cpu\"\n context: \"pandas\"\n type: \"line\"\n units: \"n\"\n df_steps: >\n pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});\n df.drop('time', axis=1);\n df.mean().to_frame().transpose();\n df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();\n df.rename(columns={0:'average_user_system_ratio'});\n df*100;\n\n```\n{% /details %}\n##### API JSON Example\n\nexample showing a read_json from a url and some light pandas data wrangling.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nexample_json:\n name: \"example_json\"\n update_every: 2\n chart_configs:\n - name: \"london_system_net\"\n title: \"London System Net - Total Bandwidth\"\n family: \"london_system_net\"\n context: \"pandas\"\n type: \"area\"\n units: \"kilobits/s\"\n df_steps: >\n pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);\n df.drop('time', axis=1);\n abs(df);\n df.sum(axis=1).to_frame();\n df.rename(columns={0:'total_bandwidth'});\n\n```\n{% /details %}\n##### XML Example\n\nexample showing a read_xml from a url and some light pandas data wrangling.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nexample_xml:\n name: \"example_xml\"\n update_every: 2\n line_sep: \"|\"\n chart_configs:\n - name: \"temperature_forcast\"\n title: \"Temperature Forecast\"\n family: \"temp\"\n context: \"pandas.temp\"\n type: \"line\"\n units: \"celsius\"\n df_steps: >\n pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|\n df.rename(columns={'value': 'dublin'})|\n df[['dublin']]|\n\n```\n{% /details %}\n##### SQL Example\n\nexample showing a read_sql from a postgres database using sqlalchemy.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nsql:\n name: \"sql\"\n update_every: 5\n chart_configs:\n - name: \"sql\"\n title: \"SQL Example\"\n family: \"sql.example\"\n context: \"example\"\n type: \"line\"\n units: \"percent\"\n df_steps: >\n pd.read_sql_query(\n sql='\\\n select \\\n random()*100 as metric_1, \\\n random()*100 as metric_2 \\\n ',\n con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')\n );\n\n```\n{% /details %}\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin pandas debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pandas` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pandas\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pandas /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pandas\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThis collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken\nas the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).\nSee [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html).\"\n\n\n### Per Pandas instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n\n", - "integration_type": "collector", - "id": "python.d.plugin-pandas-Pandas", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/pandas/metadata.yaml", + "id": "slabinfo.plugin-slabinfo.plugin-Linux_kernel_SLAB_allocator_statistics", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/slabinfo.plugin/metadata.yaml", "related_resources": "" }, { "meta": { - "plugin_name": "python.d.plugin", - "module_name": "samba", + "plugin_name": "tc.plugin", + "module_name": "tc.plugin", "monitored_instance": { - "name": "Samba", - "link": "https://www.samba.org/samba/", + "name": "tc QoS classes", + "link": "https://wiki.linuxfoundation.org/networking/iproute2", "categories": [ - "data-collection.storage-mount-points-and-filesystems" + "data-collection.linux-systems.network-metrics" ], - "icon_filename": "samba.svg" + "icon_filename": "netdata.png" }, "related_resources": { "integrations": { @@ -19242,70 +19515,30 @@ export const integrations = [ "info_provided_to_referring_integrations": { "description": "" }, - "keywords": [ - "samba", - "file sharing" - ], + "keywords": [], "most_popular": false }, - "overview": "# Samba\n\nPlugin: python.d.plugin\nModule: samba\n\n## Overview\n\nThis collector monitors the performance metrics of Samba file sharing.\n\nIt is using the `smbstatus` command-line tool.\n\nExecuted commands:\n\n- `sudo -n smbstatus -P`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, the `smbstatus -P` binary is executed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the samba collector\n\nThe `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\nChange the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.\n\n\n#### Permissions and programs\n\nTo run the collector you need:\n\n- `smbstatus` program\n- `sudo` program\n- `smbd` must be compiled with profiling enabled\n- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`\n\nThe module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n- add to your `/etc/sudoers` file:\n\n `which smbstatus` shows the full path to the binary.\n\n ```bash\n netdata ALL=(root) NOPASSWD: /path/to/smbstatus\n ```\n\n- Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)\n\n The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.\n\n\n As the `root` user, do the following:\n\n ```cmd\n mkdir /etc/systemd/system/netdata.service.d\n echo -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\n systemctl daemon-reload\n systemctl restart netdata.service\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/samba.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/samba.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nmy_job_name:\n name: my_name\n update_every: 1\n\n```\n{% /details %}\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin samba debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `samba` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep samba\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep samba /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep samba\n```\n\n", + "overview": "# tc QoS classes\n\nPlugin: tc.plugin\nModule: tc.plugin\n\n## Overview\n\nExamine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow.\n\nThe plugin uses `tc` command to collect information about Traffic control.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create `tc-qos-helper.conf`\n\nIn order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:\n\n```text\ntc_show=\"class\"\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:tc]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| script to run to get tc values | Path to script `tc-qos-helper.sh` | usr/libexec/netdata/plugins.d/tc-qos-helper.s | no |\n| enable show all classes and qdiscs for all interfaces | yes/no flag to control what data is presented. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration using classes defined in `/etc/iproute2/tc_cls`.\n\nAn example of class IDs mapped to names in that file can be:\n\n```text\n2:1 Standard\n2:8 LowPriorityData\n2:10 HighThroughputData\n2:16 OAM\n2:18 LowLatencyData\n2:24 BroadcastVideo\n2:26 MultimediaStreaming\n2:32 RealTimeInteractive\n2:34 MultimediaConferencing\n2:40 Signalling\n2:46 Telephony\n2:48 NetworkControl\n```\n\nYou can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).\n\n\n```yaml\n[plugin:tc]\n script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh\n enable show all classes and qdiscs for all interfaces = yes\n\n```\n", + "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Samba instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| syscall.rw | sendfile, recvfile | KiB/s |\n| smb2.rw | readout, writein, readin, writeout | KiB/s |\n| smb2.create_close | create, close | operations/s |\n| smb2.get_set_info | getinfo, setinfo | operations/s |\n| smb2.find | find | operations/s |\n| smb2.notify | notify | operations/s |\n| smb2.sm_counters | tcon, negprot, tdis, cancel, logoff, flush, lock, keepalive, break, sessetup | count |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per network device direction\n\nMetrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The network interface. |\n| device_name | The network interface name |\n| group | The device family |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tc.qos | a dimension per class | kilobits/s |\n| tc.qos_packets | a dimension per class | packets/s |\n| tc.qos_dropped | a dimension per class | packets/s |\n| tc.qos_tokens | a dimension per class | tokens |\n| tc.qos_ctokens | a dimension per class | ctokens |\n\n", "integration_type": "collector", - "id": "python.d.plugin-samba-Samba", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/samba/metadata.yaml", + "id": "tc.plugin-tc.plugin-tc_QoS_classes", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/tc.plugin/metadata.yaml", "related_resources": "" }, { "meta": { - "plugin_name": "python.d.plugin", - "module_name": "spigotmc", + "plugin_name": "timex.plugin", + "module_name": "timex.plugin", "monitored_instance": { - "name": "SpigotMC", + "name": "Timex", "link": "", "categories": [ - "data-collection.gaming" - ], - "icon_filename": "spigot.jfif" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "minecraft server", - "spigotmc server", - "spigot" - ], - "most_popular": false - }, - "overview": "# SpigotMC\n\nPlugin: python.d.plugin\nModule: spigotmc\n\n## Overview\n\nThis collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.\n\n\nIt sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the Remote Console Protocol\n\nUnder your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`.\n\nThis will allow the Server to listen and respond to queries over the rcon protocol.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/spigotmc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/spigotmc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | The host's IP to connect to. | localhost | yes |\n| port | The port the remote console is listening on. | 25575 | yes |\n| password | Remote console password if any. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocal:\n name: local_server\n url: 127.0.0.1\n port: 25575\n\n```\n##### Basic Authentication\n\nAn example using basic password for authentication with the remote console.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocal:\n name: local_server_pass\n url: 127.0.0.1\n port: 25575\n password: 'foobar'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocal_server:\n name : my_local_server\n url : 127.0.0.1\n port: 25575\n\nremote_server:\n name : another_remote_server\n url : 192.0.2.1\n port: 25575\n\n```\n{% /details %}\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin spigotmc debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `spigotmc` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep spigotmc\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep spigotmc /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep spigotmc\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SpigotMC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| spigotmc.tps | 1 Minute Average, 5 Minute Average, 15 Minute Average | ticks |\n| spigotmc.users | Users | users |\n| spigotmc.mem | used, allocated, max | MiB |\n\n", - "integration_type": "collector", - "id": "python.d.plugin-spigotmc-SpigotMC", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/spigotmc/metadata.yaml", - "related_resources": "" - }, - { - "meta": { - "plugin_name": "python.d.plugin", - "module_name": "varnish", - "monitored_instance": { - "name": "Varnish", - "link": "https://varnish-cache.org/", - "categories": [ - "data-collection.web-servers-and-web-proxies" + "data-collection.system-clock-and-ntp" ], - "icon_filename": "varnish.svg" + "icon_filename": "syslog.png" }, "related_resources": { "integrations": { @@ -19315,37 +19548,30 @@ export const integrations = [ "info_provided_to_referring_integrations": { "description": "" }, - "keywords": [ - "varnish", - "varnishstat", - "varnishd", - "cache", - "web server", - "web cache" - ], + "keywords": [], "most_popular": false }, - "overview": "# Varnish\n\nPlugin: python.d.plugin\nModule: varnish\n\n## Overview\n\nThis collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.\n\nNote that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.\n\n\nIt uses the `varnishstat` tool in order to collect the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`netdata` user must be a member of the `varnish` group.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Provide the necessary permissions\n\nIn order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:\n\n```\nusermod -aG varnish netdata\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/varnish.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/varnish.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njob_name:\n instance_name: ''\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin varnish debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `varnish` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep varnish\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep varnish /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep varnish\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Varnish instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.session_connection | accepted, dropped | connections/s |\n| varnish.client_requests | received | requests/s |\n| varnish.all_time_hit_rate | hit, miss, hitpass | percentage |\n| varnish.current_poll_hit_rate | hit, miss, hitpass | percentage |\n| varnish.cached_objects_expired | objects | expired/s |\n| varnish.cached_objects_nuked | objects | nuked/s |\n| varnish.threads_total | None | number |\n| varnish.threads_statistics | created, failed, limited | threads/s |\n| varnish.threads_queue_len | in queue | requests |\n| varnish.backend_connections | successful, unhealthy, reused, closed, recycled, failed | connections/s |\n| varnish.backend_requests | sent | requests/s |\n| varnish.esi_statistics | errors, warnings | problems/s |\n| varnish.memory_usage | free, allocated | MiB |\n| varnish.uptime | uptime | seconds |\n\n### Per Backend\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.backend | header, body | kilobits/s |\n\n### Per Storage\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.storage_usage | free, allocated | KiB |\n| varnish.storage_alloc_objs | allocated | objects |\n\n", + "overview": "# Timex\n\nPlugin: timex.plugin\nModule: timex.plugin\n\n## Overview\n\nExamine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping.\n\nIt uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:timex]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nAt least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| clock synchronization state | Make chart showing system clock synchronization state. | yes | yes |\n| time offset | Make chart showing computed time offset between local system and reference clock | yes | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:timex]\n update every = 1\n clock synchronization state = yes\n time offset = yes\n\n```\n{% /details %}\n", + "troubleshooting": "", + "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ system_clock_sync_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/timex.conf) | system.clock_sync_state | when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server |\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Timex instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.clock_sync_state | state | state |\n| system.clock_status | unsync, clockerr | status |\n| system.clock_sync_offset | offset | milliseconds |\n\n", "integration_type": "collector", - "id": "python.d.plugin-varnish-Varnish", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/varnish/metadata.yaml", + "id": "timex.plugin-timex.plugin-Timex", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/timex.plugin/metadata.yaml", "related_resources": "" }, { "meta": { - "plugin_name": "python.d.plugin", - "module_name": "w1sensor", + "plugin_name": "windows.plugin", + "module_name": "PerflibMemory", "monitored_instance": { - "name": "1-Wire Sensors", - "link": "https://www.analog.com/en/product-category/1wire-temperature-sensors.html", + "name": "Memory statistics", + "link": "https://learn.microsoft.com/en-us/windows/win32/Memory/memory-management", "categories": [ - "data-collection.hardware-devices-and-sensors" + "data-collection.windows-systems" ], - "icon_filename": "1-wire.png" + "icon_filename": "windows.svg" }, "related_resources": { "integrations": { @@ -19356,33 +19582,32 @@ export const integrations = [ "description": "" }, "keywords": [ - "temperature", - "sensor", - "1-wire" + "memory", + "swap" ], "most_popular": false }, - "overview": "# 1-Wire Sensors\n\nPlugin: python.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/w1sensor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |\n\n{% /details %}\n#### Examples\n\n##### Provide human readable names\n\nAssociate two 1-Wire identifiers with human readable names.\n\n```yaml\nsensors:\n name_00000022276e: 'Machine room'\n name_00000022298f: 'Rack 12'\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin w1sensor debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep w1sensor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep w1sensor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep w1sensor\n```\n\n", + "overview": "# Memory statistics\n\nPlugin: windows.plugin\nModule: PerflibMemory\n\n## Overview\n\nThis collector monitors swap and memory pool statistics on Windows systems.\n\n\nIt queries for the 'Memory' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| PerflibMemory | An option to enable or disable the data collection. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per 1-Wire Sensors instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temp | a dimension per sensor | Celsius |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory statistics instance\n\nThese metrics refer to the entire monitored instance\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swap_iops | read, write | operations/s |\n| mem.swap_pages_io | read, write | pages/s |\n| mem.system_pool_size | paged, pool-paged | bytes |\n\n", "integration_type": "collector", - "id": "python.d.plugin-w1sensor-1-Wire_Sensors", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/w1sensor/metadata.yaml", + "id": "windows.plugin-PerflibMemory-Memory_statistics", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml", "related_resources": "" }, { "meta": { - "plugin_name": "python.d.plugin", - "module_name": "zscores", + "plugin_name": "windows.plugin", + "module_name": "PerflibProcesses", "monitored_instance": { - "name": "python.d zscores", - "link": "https://en.wikipedia.org/wiki/Standard_score", + "name": "System statistics", + "link": "https://learn.microsoft.com/en-us/windows/win32/procthread/processes-and-threads", "categories": [ - "data-collection.other" + "data-collection.windows-systems" ], - "icon_filename": "" + "icon_filename": "windows.svg" }, "related_resources": { "integrations": { @@ -19393,36 +19618,33 @@ export const integrations = [ "description": "" }, "keywords": [ - "zscore", - "z-score", - "standard score", - "standard deviation", - "anomaly detection", - "statistical anomaly detection" + "process counts", + "threads", + "context switch" ], "most_popular": false }, - "overview": "# python.d zscores\n\nPlugin: python.d.plugin\nModule: zscores\n\n## Overview\n\nBy using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.\n\n\nThis collector uses the [Netdata rest api](/src/web/api/README.md) to get the `mean` and `stddev`\nfor each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).\n\nFor each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over\ntime (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector will only work with Python 3 and requires the below packages be installed.\n\n```bash\n# become netdata user\nsudo su -s /bin/bash netdata\n# install required packages\npip3 install numpy pandas requests netdata-pandas==0.0.38\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/zscores.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/zscores.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| charts_regex | what charts to pull data for - A regex like `system\\..*/` or `system\\..*/apps.cpu/apps.mem` etc. | system\\..* | yes |\n| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes |\n| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes |\n| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes |\n| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes |\n| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes |\n| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes |\n| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes |\n| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes |\n| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\nlocal:\n name: 'local'\n host: '127.0.0.1:19999'\n charts_regex: 'system\\..*'\n charts_to_exclude: 'system.uptime'\n train_secs: 14400\n offset_secs: 300\n train_every_n: 900\n z_smooth_n: 15\n z_clip: 10\n z_abs: 'true'\n burn_in: 2\n mode: 'per_chart'\n per_chart_agg: 'mean'\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin zscores debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `zscores` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep zscores\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep zscores /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep zscores\n```\n\n", + "overview": "# System statistics\n\nPlugin: windows.plugin\nModule: PerflibProcesses\n\n## Overview\n\nThis collector monitors the current number of processes, threads, and context switches on Windows systems.\n\n\nIt queries the 'System' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| PerflibProcesses | An option to enable or disable the data collection. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per python.d zscores instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zscores.z | a dimension per chart or dimension | z |\n| zscores.3stddev | a dimension per chart or dimension | count |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System statistics instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.processes | running | processes |\n| system.threads | threads | threads |\n| system.ctxt | switches | context switches/s |\n\n", "integration_type": "collector", - "id": "python.d.plugin-zscores-python.d_zscores", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/zscores/metadata.yaml", + "id": "windows.plugin-PerflibProcesses-System_statistics", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml", "related_resources": "" }, { "meta": { - "plugin_name": "slabinfo.plugin", - "module_name": "slabinfo.plugin", + "plugin_name": "windows.plugin", + "module_name": "PerflibThermalZone", "monitored_instance": { - "name": "Linux kernel SLAB allocator statistics", - "link": "https://kernel.org/", + "name": "System thermal zone", + "link": "https://learn.microsoft.com/en-us/windows-hardware/design/device-experiences/design-guide", "categories": [ - "data-collection.linux-systems.kernel-metrics" + "data-collection.windows-systems" ], - "icon_filename": "linuxserver.svg" + "icon_filename": "windows.svg" }, "related_resources": { "integrations": { @@ -19433,88 +19655,19 @@ export const integrations = [ "description": "" }, "keywords": [ - "linux kernel", - "slab", - "slub", - "slob", - "slabinfo" + "thermal", + "temperature" ], "most_popular": false }, - "overview": "# Linux kernel SLAB allocator statistics\n\nPlugin: slabinfo.plugin\nModule: slabinfo.plugin\n\n## Overview\n\nCollects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads in the kernel.\n\n\nThe plugin parses `/proc/slabinfo`\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions sVko that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nDue to the large number of metrics generated by this integration, it is disabled by default and must be manually enabled inside `/etc/netdata/netdata.conf`\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nIf you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugins]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"The main configuration file.\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Enable plugin | As described above plugin is disabled by default, this option is used to enable plugin. | no | yes |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "overview": "# System thermal zone\n\nPlugin: windows.plugin\nModule: PerflibThermalZone\n\n## Overview\n\nThis collector monitors thermal zone statistics on Windows systems.\n\n\nIt queries for the 'Thermal Zone Information' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| PerflibThermalZone | An option to enable or disable the data collection. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nSLAB cache utilization metrics for the whole system.\n\n### Per Linux kernel SLAB allocator statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.slabmemory | a dimension per cache | B |\n| mem.slabfilling | a dimension per cache | % |\n| mem.slabwaste | a dimension per cache | B |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Thermal zone\n\nThese metrics refer to a Thermal zone\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.thermalzone_temperature | temperature | celsius |\n\n", "integration_type": "collector", - "id": "slabinfo.plugin-slabinfo.plugin-Linux_kernel_SLAB_allocator_statistics", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/slabinfo.plugin/metadata.yaml", - "related_resources": "" - }, - { - "meta": { - "plugin_name": "tc.plugin", - "module_name": "tc.plugin", - "monitored_instance": { - "name": "tc QoS classes", - "link": "https://wiki.linuxfoundation.org/networking/iproute2", - "categories": [ - "data-collection.linux-systems.network-metrics" - ], - "icon_filename": "netdata.png" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [], - "most_popular": false - }, - "overview": "# tc QoS classes\n\nPlugin: tc.plugin\nModule: tc.plugin\n\n## Overview\n\nExamine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow.\n\nThe plugin uses `tc` command to collect information about Traffic control.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create `tc-qos-helper.conf`\n\nIn order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:\n\n```conf\ntc_show=\"class\"\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:tc]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| script to run to get tc values | Path to script `tc-qos-helper.sh` | usr/libexec/netdata/plugins.d/tc-qos-helper.s | no |\n| enable show all classes and qdiscs for all interfaces | yes/no flag to control what data is presented. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration using classes defined in `/etc/iproute2/tc_cls`.\n\nAn example of class IDs mapped to names in that file can be:\n\n```conf\n2:1 Standard\n2:8 LowPriorityData\n2:10 HighThroughputData\n2:16 OAM\n2:18 LowLatencyData\n2:24 BroadcastVideo\n2:26 MultimediaStreaming\n2:32 RealTimeInteractive\n2:34 MultimediaConferencing\n2:40 Signalling\n2:46 Telephony\n2:48 NetworkControl\n```\n\nYou can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).\n\n\n```yaml\n[plugin:tc]\n script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh\n enable show all classes and qdiscs for all interfaces = yes\n\n```\n", - "troubleshooting": "", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per network device direction\n\nMetrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The network interface. |\n| device_name | The network interface name |\n| group | The device family |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tc.qos | a dimension per class | kilobits/s |\n| tc.qos_packets | a dimension per class | packets/s |\n| tc.qos_dropped | a dimension per class | packets/s |\n| tc.qos_tokens | a dimension per class | tokens |\n| tc.qos_ctokens | a dimension per class | ctokens |\n\n", - "integration_type": "collector", - "id": "tc.plugin-tc.plugin-tc_QoS_classes", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/tc.plugin/metadata.yaml", - "related_resources": "" - }, - { - "meta": { - "plugin_name": "timex.plugin", - "module_name": "timex.plugin", - "monitored_instance": { - "name": "Timex", - "link": "", - "categories": [ - "data-collection.system-clock-and-ntp" - ], - "icon_filename": "syslog.png" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [], - "most_popular": false - }, - "overview": "# Timex\n\nPlugin: timex.plugin\nModule: timex.plugin\n\n## Overview\n\nExamine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping.\n\nIt uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:timex]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nAt least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| clock synchronization state | Make chart showing system clock synchronization state. | yes | yes |\n| time offset | Make chart showing computed time offset between local system and reference clock | yes | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:timex]\n update every = 1\n clock synchronization state = yes\n time offset = yes\n\n```\n{% /details %}\n", - "troubleshooting": "", - "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ system_clock_sync_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/timex.conf) | system.clock_sync_state | when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Timex instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.clock_sync_state | state | state |\n| system.clock_status | unsync, clockerr | status |\n| system.clock_sync_offset | offset | milliseconds |\n\n", - "integration_type": "collector", - "id": "timex.plugin-timex.plugin-Timex", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/timex.plugin/metadata.yaml", + "id": "windows.plugin-PerflibThermalZone-System_thermal_zone", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml", "related_resources": "" }, { @@ -19541,7 +19694,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Xen XCP-ng\n\nPlugin: xenstat.plugin\nModule: xenstat.plugin\n\n## Overview\n\nThis collector monitors XenServer and XCP-ng host and domains statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin requires the `xen-dom0-libs-devel` and `yajl-devel` libraries to be installed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Libraries\n\n1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.\n\n Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`\n\n2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:xenstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Libraries\n\n1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.\n\n Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`\n\n2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:xenstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Xen XCP-ng instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xenstat.mem | free, used | MiB |\n| xenstat.domains | domains | domains |\n| xenstat.cpus | cpus | cpus |\n| xenstat.cpu_freq | frequency | MHz |\n\n### Per xendomain\n\nMetrics related to Xen domains. Each domain provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.states | running, blocked, paused, shutdown, crashed, dying | boolean |\n| xendomain.cpu | used | percentage |\n| xendomain.mem | maximum, current | MiB |\n| xendomain.vcpu | a dimension per vcpu | percentage |\n\n### Per xendomain vbd\n\nMetrics related to Xen domain Virtual Block Device. Each VBD provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.oo_req_vbd | requests | requests/s |\n| xendomain.requests_vbd | read, write | requests/s |\n| xendomain.sectors_vbd | read, write | sectors/s |\n\n### Per xendomain network\n\nMetrics related to Xen domain network interfaces. Each network interface provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.bytes_network | received, sent | kilobits/s |\n| xendomain.packets_network | received, sent | packets/s |\n| xendomain.errors_network | received, sent | errors/s |\n| xendomain.drops_network | received, sent | drops/s |\n\n", @@ -19595,7 +19748,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19645,7 +19798,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 2 | Core | x86_64, aarch64 | |\n| 2023 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 2 | Core | x86_64, aarch64 | |\n| 2023 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19695,7 +19848,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19745,7 +19898,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 7 | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 7 | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19795,7 +19948,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Community | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Community | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19845,7 +19998,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 12 | Core | i386, amd64, armhf, arm64 | |\n| 11 | Core | i386, amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 12 | Core | i386, amd64, armhf, arm64 | |\n| 11 | Core | i386, amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19910,7 +20063,7 @@ export const integrations = [ ], "additional_info": "", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 19.03 or newer | Core | linux/i386, linux/amd64, linux/arm/v7, linux/arm64, linux/ppc64le | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 19.03 or newer | Core | linux/i386, linux/amd64, linux/arm/v7, linux/arm64, linux/ppc64le | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": 3, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19960,7 +20113,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 40 | Core | x86_64, aarch64 | |\n| 39 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 41 | Core | x86_64, aarch64 | |\n| 40 | Core | x86_64, aarch64 | |\n| 39 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20010,7 +20163,7 @@ export const integrations = [ ], "additional_info": "Netdata can also be installed via [FreeBSD ports](https://www.freshports.org/net-mgmt/netdata).\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13-STABLE | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13-STABLE | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": 6, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20062,7 +20215,7 @@ export const integrations = [ "additional_info": "", "related_resources": {}, "most_popular": true, - "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "", "quick_start": 4, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20112,7 +20265,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "", "quick_start": 1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20151,7 +20304,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13 | Community | | |\n| 12 | Community | | |\n| 11 | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13 | Community | | |\n| 12 | Community | | |\n| 11 | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": 5, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20201,7 +20354,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20251,7 +20404,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 15.6 | Core | x86_64, aarch64 | |\n| 15.5 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 15.6 | Core | x86_64, aarch64 | |\n| 15.5 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20301,7 +20454,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 8 | Core | x86_64, aarch64 | |\n| 9 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 8 | Core | x86_64, aarch64 | |\n| 9 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20351,7 +20504,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9.x | Core | x86_64, aarch64 | |\n| 8.x | Core | x86_64, aarch64 | |\n| 7.x | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9.x | Core | x86_64, aarch64 | |\n| 8.x | Core | x86_64, aarch64 | |\n| 7.x | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20401,7 +20554,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Core | x86_64, aarch64 | |\n| 8 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Core | x86_64, aarch64 | |\n| 8 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20451,7 +20604,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 24.04 | Core | amd64, armhf, arm64 | |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 24.04 | Core | amd64, armhf, arm64 | |\n| 24.10 | Core | amd64, armhf, arm64 | |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20469,39 +20622,26 @@ export const integrations = [ "keywords": [ "windows" ], - "install_description": "1. Install [Windows Exporter](https://github.com/prometheus-community/windows_exporter) on every Windows host you want to monitor.\n2. Install Netdata agent on Linux, FreeBSD or Mac.\n3. Configure Netdata to collect data remotely from your Windows hosts by adding one job per host to windows.conf file. See the [configuration section](https://learn.netdata.cloud/docs/data-collection/monitor-anything/System%20Metrics/Windows-machines#configuration) for details.\n4. Enable [virtual nodes](https://learn.netdata.cloud/docs/data-collection/windows-systems#virtual-nodes) configuration so the windows nodes are displayed as separate nodes.\n", + "install_description": "Netdata offers a convenient Windows installer for easy setup. This executable provides two distinct installation modes, outlined below.\n\nThe Windows installer is currently under beta, and thus it is only available in the nightly release channel. A stable version will be released soon.\n\n## Graphical User Interface (GUI)\n\n1. Download the Netdata [Windows installer](https://github.com/netdata/netdata-nightlies/releases) from the latest nightly release.\n2. Run the `.exe` file and proceed with the installation process.\n3. At a minimum, you will need your Netdata Cloud Space's claim token to connect your Agent to your Space.\n\n## Silent Mode (Command line)\n\nIf you prefer to install Netdata through the command line, you can do so by running the following command on Windows Powershell with administrator rights.\n", "methods": [ { - "method": "wget", + "method": "Silent Mode (Command line)", "commands": [ - { - "channel": "nightly", - "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n" - }, { "channel": "stable", - "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n" - } - ] - }, - { - "method": "curl", - "commands": [ - { - "channel": "nightly", - "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n" + "command": "$ProgressPreference = 'SilentlyContinue';\nInvoke-WebRequest https://github.com/netdata/netdata-nightlies/releases/latest/download/netdata-installer-x64.exe -OutFile \"netdata-installer-x64.exe\";\n.\\netdata-installer-x64.exe /S /A `\n{% if $showClaimingOptions %}/TOKEN={% claim_token %} /ROOMS={% $claim_rooms %}{% /if %}\n" }, { - "channel": "stable", - "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n" + "channel": "nightly", + "command": "$ProgressPreference = 'SilentlyContinue';\nInvoke-WebRequest https://github.com/netdata/netdata-nightlies/releases/latest/download/netdata-installer-x64.exe -OutFile \"netdata-installer-x64.exe\";\n.\\netdata-installer-x64.exe /S /A `\n{% if $showClaimingOptions %}/TOKEN={% claim_token %} /ROOMS={% $claim_rooms %}{% /if %}\n" } ] } ], - "additional_info": "", + "additional_info": "### Available Options\n\n| Option | Description |\n|-----------|--------------------------------------------------------------------------------------------------|\n| `/S` | Enables silent mode installation. |\n| `/A` | Accepts all Netdata licenses. This option is mandatory for silent installations. |\n| `/D` | Specifies the desired installation directory (defaults to `C:\\Program Files\\Netdata`). |\n| `/T` | Opens the `MSYS2` terminal after installation. |\n| `/I` | Forces insecure connections, bypassing hostname verification (use only if absolutely necessary). |\n| `/TOKEN=` | Sets the Claim Token for your Netdata Cloud Space. |\n| `/ROOMS=` | Comma-separated list of Room IDs where you want your node to appear. |\n| `/PROXY=` | Sets the proxy server address if your network requires one. |\n", "related_resources": {}, "most_popular": true, - "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "", "quick_start": 2, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20528,7 +20668,7 @@ export const integrations = [ "time series" ], "overview": "# AppOptics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20549,7 +20689,7 @@ export const integrations = [ "Kinesis" ], "overview": "# AWS Kinesis\n\nExport metrics to AWS Kinesis Data Streams\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++\n- Here are the instructions when building from source, to ensure 3rd party dependencies are installed:\n ```bash\n git clone --recursive https://github.com/aws/aws-sdk-cpp.git\n cd aws-sdk-cpp/\n git submodule update --init --recursive\n mkdir BUILT\n cd BUILT\n cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis ..\n make\n make install\n ```\n- `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled.\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nNetdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly.\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n\n```\n##### Configuration with AWS credentials\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n # AWS credentials\n aws_access_key_id = your_access_key_id\n aws_secret_access_key = your_secret_access_key\n # destination stream\n stream name = your_stream_name\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++\n- Here are the instructions when building from source, to ensure 3rd party dependencies are installed:\n ```bash\n git clone --recursive https://github.com/aws/aws-sdk-cpp.git\n cd aws-sdk-cpp/\n git submodule update --init --recursive\n mkdir BUILT\n cd BUILT\n cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis ..\n make\n make install\n ```\n- `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled.\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nNetdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly.\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n\n```\n##### Configuration with AWS credentials\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n # AWS credentials\n aws_access_key_id = your_access_key_id\n aws_secret_access_key = your_secret_access_key\n # destination stream\n stream name = your_stream_name\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/aws_kinesis/metadata.yaml", "troubleshooting": "" @@ -20575,7 +20715,7 @@ export const integrations = [ "time series" ], "overview": "# Azure Data Explorer\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20601,7 +20741,7 @@ export const integrations = [ "time series" ], "overview": "# Azure Event Hub\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20629,7 +20769,7 @@ export const integrations = [ "time series" ], "overview": "# Google BigQuery\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20656,7 +20796,7 @@ export const integrations = [ "time series" ], "overview": "# Blueflood\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": "" @@ -20684,7 +20824,7 @@ export const integrations = [ "time series" ], "overview": "# Chronix\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20712,7 +20852,7 @@ export const integrations = [ "time series" ], "overview": "# Cortex\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20740,7 +20880,7 @@ export const integrations = [ "time series" ], "overview": "# CrateDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20768,7 +20908,7 @@ export const integrations = [ "time series" ], "overview": "# ElasticSearch\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20796,7 +20936,7 @@ export const integrations = [ "time series" ], "overview": "# Gnocchi\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20817,7 +20957,7 @@ export const integrations = [ "Pub Sub" ], "overview": "# Google Cloud Pub Sub\n\nExport metrics to Google Cloud Pub/Sub Service\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://github.com/googleapis/google-cloud-cpp/) install Google Cloud Platform C++ Client Libraries\n- Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = pubsub.googleapis.com\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\n- Set the destination option to a Pub/Sub service endpoint. pubsub.googleapis.com is the default one.\n- Create the credentials JSON file by following Google Cloud's authentication guide.\n- The user running the Agent (typically netdata) needs read access to google_cloud_credentials.json, which you can set\n `chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`\n- Set the credentials file option to the full path of the file.\n\n\n```yaml\n[pubsub:my_instance]\n enabled = yes\n destination = pubsub.googleapis.com\n credentials file = /etc/netdata/google_cloud_credentials.json\n project id = my_project\n topic id = my_topic\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://github.com/googleapis/google-cloud-cpp/) install Google Cloud Platform C++ Client Libraries\n- Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = pubsub.googleapis.com\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\n- Set the destination option to a Pub/Sub service endpoint. pubsub.googleapis.com is the default one.\n- Create the credentials JSON file by following Google Cloud's authentication guide.\n- The user running the Agent (typically netdata) needs read access to google_cloud_credentials.json, which you can set\n `chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`\n- Set the credentials file option to the full path of the file.\n\n\n```yaml\n[pubsub:my_instance]\n enabled = yes\n destination = pubsub.googleapis.com\n credentials file = /etc/netdata/google_cloud_credentials.json\n project id = my_project\n topic id = my_topic\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/pubsub/metadata.yaml", "troubleshooting": "" @@ -20839,7 +20979,7 @@ export const integrations = [ "time series" ], "overview": "# Graphite\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": "" @@ -20867,7 +21007,7 @@ export const integrations = [ "time series" ], "overview": "# GreptimeDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20895,7 +21035,7 @@ export const integrations = [ "time series" ], "overview": "# InfluxDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": "" @@ -20923,7 +21063,7 @@ export const integrations = [ "time series" ], "overview": "# IRONdb\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20943,7 +21083,7 @@ export const integrations = [ "json" ], "overview": "# JSON\n\nUse the JSON connector for the exporting engine to archive your agent's metrics to JSON document databases for long-term storage,\nfurther analysis, or correlation with data from other sources\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = localhost:5448\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\n\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `json:https:my_json_instance`.\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = localhost:5448\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\n\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `json:https:my_json_instance`.\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/json/metadata.yaml", "troubleshooting": "" @@ -20971,7 +21111,7 @@ export const integrations = [ "time series" ], "overview": "# Kafka\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20999,7 +21139,7 @@ export const integrations = [ "time series" ], "overview": "# KairosDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": "" @@ -21027,7 +21167,7 @@ export const integrations = [ "time series" ], "overview": "# M3DB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21055,7 +21195,7 @@ export const integrations = [ "time series" ], "overview": "# MetricFire\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21075,7 +21215,7 @@ export const integrations = [ "MongoDB" ], "overview": "# MongoDB\n\nUse the MongoDB connector for the exporting engine to archive your agent's metrics to a MongoDB database\nfor long-term storage, further analysis, or correlation with data from other sources.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.\n- Next, re-install Netdata from the source, which detects that the required library is now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | localhost | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\nThe default socket timeout depends on the exporting connector update interval.\nThe timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.\n\n\n```yaml\n[mongodb:my_instance]\n enabled = yes\n destination = mongodb://\n database = your_database_name\n collection = your_collection_name\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- To use MongoDB as an external storage for long-term archiving, you should first [install](https://www.mongodb.com/docs/languages/c/c-driver/current/libmongoc/tutorials/obtaining-libraries/installing/#std-label-installing) libmongoc 1.7.0 or higher.\n- Next, re-install Netdata from the source, which detects that the required library is now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | localhost | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\nThe default socket timeout depends on the exporting connector update interval.\nThe timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.\n\n\n```yaml\n[mongodb:my_instance]\n enabled = yes\n destination = mongodb://\n database = your_database_name\n collection = your_collection_name\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/mongodb/metadata.yaml", "troubleshooting": "" @@ -21103,7 +21243,7 @@ export const integrations = [ "time series" ], "overview": "# New Relic\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21131,7 +21271,7 @@ export const integrations = [ "time series" ], "overview": "# OpeanSearch\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21152,7 +21292,7 @@ export const integrations = [ "scalable time series" ], "overview": "# OpenTSDB\n\nUse the OpenTSDB connector for the exporting engine to archive your Netdata metrics to OpenTSDB databases for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- OpenTSDB and Netdata, installed, configured and operational.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used (opentsdb = 4242).\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Minimal configuration\n\nAdd `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol.\nFor example: `opentsdb:http:my_opentsdb_instance`, `opentsdb:https:my_opentsdb_instance`.\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n\n```\n##### HTTP authentication\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n username = my_username\n password = my_password\n\n```\n##### Using `send hosts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send hosts matching = localhost *\n\n```\n##### Using `send charts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send charts matching = *\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- OpenTSDB and Netdata, installed, configured and operational.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used (opentsdb = 4242).\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Minimal configuration\n\nAdd `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol.\nFor example: `opentsdb:http:my_opentsdb_instance`, `opentsdb:https:my_opentsdb_instance`.\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n\n```\n##### HTTP authentication\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n username = my_username\n password = my_password\n\n```\n##### Using `send hosts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send hosts matching = localhost *\n\n```\n##### Using `send charts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send charts matching = *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/opentsdb/metadata.yaml", "troubleshooting": "" @@ -21180,7 +21320,7 @@ export const integrations = [ "time series" ], "overview": "# PostgreSQL\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21202,7 +21342,7 @@ export const integrations = [ "time series" ], "overview": "# Prometheus Remote Write\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21231,7 +21371,7 @@ export const integrations = [ "time series" ], "overview": "# QuasarDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21260,7 +21400,7 @@ export const integrations = [ "time series" ], "overview": "# Splunk SignalFx\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21288,7 +21428,7 @@ export const integrations = [ "time series" ], "overview": "# Thanos\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21316,7 +21456,7 @@ export const integrations = [ "time series" ], "overview": "# TiKV\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21344,7 +21484,7 @@ export const integrations = [ "time series" ], "overview": "# TimescaleDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21372,7 +21512,7 @@ export const integrations = [ "time series" ], "overview": "# VictoriaMetrics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21402,7 +21542,7 @@ export const integrations = [ "time series" ], "overview": "# VMware Aria\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21430,7 +21570,7 @@ export const integrations = [ "time series" ], "overview": "# Wavefront\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21449,9 +21589,9 @@ export const integrations = [ "Alerta" ], "overview": "# Alerta\n\nThe [Alerta](https://alerta.io/) monitoring system is a tool used to consolidate and de-duplicate alerts from multiple sources for quick \u2018at-a-glance\u2019 visualization. With just one system you can monitor alerts from many other monitoring tools on a single screen.\nYou can send Netdata alerts to Alerta to see alerts coming from many Netdata hosts or also from a multi-host Netdata configuration.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working Alerta instance\n- An Alerta API key (if authentication in Alerta is enabled)\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ALERTA | Set `SEND_ALERTA` to YES | | yes |\n| ALERTA_WEBHOOK_URL | set `ALERTA_WEBHOOK_URL` to the API url you defined when you installed the Alerta server. | | yes |\n| ALERTA_API_KEY | Set `ALERTA_API_KEY` to your API key. | | yes |\n| DEFAULT_RECIPIENT_ALERTA | Set `DEFAULT_RECIPIENT_ALERTA` to the default recipient environment you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n| DEFAULT_RECIPIENT_CUSTOM | Set different recipient environments per role, by editing `DEFAULT_RECIPIENT_CUSTOM` with the environment name of your choice | | no |\n\n##### ALERTA_API_KEY\n\nYou will need an API key to send messages from any source, if Alerta is configured to use authentication (recommended). To create a new API key:\n1. Go to Configuration > API Keys.\n2. Create a new API key called \"netdata\" with `write:alerts` permission.\n\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_alerta[sysadmin]=\"Systems\"\nrole_recipients_alerta[domainadmin]=\"Domains\"\nrole_recipients_alerta[dba]=\"Databases Systems\"\nrole_recipients_alerta[webmaster]=\"Marketing Development\"\nrole_recipients_alerta[proxyadmin]=\"Proxy\"\nrole_recipients_alerta[sitemgr]=\"Sites\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# alerta (alerta.io) global notification options\n\nSEND_ALERTA=\"YES\"\nALERTA_WEBHOOK_URL=\"http://yourserver/alerta/api\"\nALERTA_API_KEY=\"INSERT_YOUR_API_KEY_HERE\"\nDEFAULT_RECIPIENT_ALERTA=\"Production\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working Alerta instance\n- An Alerta API key (if authentication in Alerta is enabled)\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ALERTA | Set `SEND_ALERTA` to YES | | yes |\n| ALERTA_WEBHOOK_URL | set `ALERTA_WEBHOOK_URL` to the API url you defined when you installed the Alerta server. | | yes |\n| ALERTA_API_KEY | Set `ALERTA_API_KEY` to your API key. | | yes |\n| DEFAULT_RECIPIENT_ALERTA | Set `DEFAULT_RECIPIENT_ALERTA` to the default recipient environment you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n| DEFAULT_RECIPIENT_CUSTOM | Set different recipient environments per role, by editing `DEFAULT_RECIPIENT_CUSTOM` with the environment name of your choice | | no |\n\n##### ALERTA_API_KEY\n\nYou will need an API key to send messages from any source, if Alerta is configured to use authentication (recommended). To create a new API key:\n1. Go to Configuration > API Keys.\n2. Create a new API key called \"netdata\" with `write:alerts` permission.\n\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_alerta[sysadmin]=\"Systems\"\nrole_recipients_alerta[domainadmin]=\"Domains\"\nrole_recipients_alerta[dba]=\"Databases Systems\"\nrole_recipients_alerta[webmaster]=\"Marketing Development\"\nrole_recipients_alerta[proxyadmin]=\"Proxy\"\nrole_recipients_alerta[sitemgr]=\"Sites\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# alerta (alerta.io) global notification options\n\nSEND_ALERTA=\"YES\"\nALERTA_WEBHOOK_URL=\"http://yourserver/alerta/api\"\nALERTA_API_KEY=\"INSERT_YOUR_API_KEY_HERE\"\nDEFAULT_RECIPIENT_ALERTA=\"Production\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/alerta/metadata.yaml" }, { @@ -21468,266 +21608,11 @@ export const integrations = [ "AWS SNS" ], "overview": "# AWS SNS\n\nAs part of its AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' (SNS). Amazon SNS works similarly to Netdata's own notification system, allowing to dispatch a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to:\n- Email addresses\n- Mobile Phones via SMS\n- HTTP or HTTPS web hooks\n- AWS Lambda functions\n- AWS SQS queues\n- Mobile applications via push notifications\nYou can send notifications through Amazon SNS using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- While Amazon SNS supports sending differently formatted messages for different delivery methods, Netdata does not currently support this functionality.\n- For email notification support, we recommend using Netdata's email notifications, as it is has the following benefits:\n - In most cases, it requires less configuration.\n - Netdata's emails are nicely pre-formatted and support features like threading, which requires a lot of manual effort in SNS.\n - It is less resource intensive and more cost-efficient than SNS.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The [Amazon Web Services CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (awscli).\n- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. The setup depends on the distribution, but `/var/lib/netdata` is the recommended directory. If you are using Netdata as a dedicated user, the permissions will already be correct.\n- An Amazon SNS topic to send notifications to with one or more subscribers. The Getting Started section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.\n- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. For an additional layer of security, you can create one for each system or group of systems.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| aws path | The full path of the aws command. If empty, the system `$PATH` will be searched for it. If not found, Amazon SNS notifications will be silently disabled. | | yes |\n| SEND_AWSNS | Set `SEND_AWSNS` to YES | YES | yes |\n| AWSSNS_MESSAGE_FORMAT | Set `AWSSNS_MESSAGE_FORMAT` to to the string that you want the alert to be sent into. | ${status} on ${host} at ${date}: ${chart} ${value_string} | yes |\n| DEFAULT_RECIPIENT_AWSSNS | Set `DEFAULT_RECIPIENT_AWSSNS` to the Topic ARN you noted down upon creating the Topic. | | yes |\n\n##### AWSSNS_MESSAGE_FORMAT\n\nThe supported variables are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n##### DEFAULT_RECIPIENT_AWSSNS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_awssns[sysadmin]=\"arn:aws:sns:us-east-2:123456789012:Systems\"\nrole_recipients_awssns[domainadmin]=\"arn:aws:sns:us-east-2:123456789012:Domains\"\nrole_recipients_awssns[dba]=\"arn:aws:sns:us-east-2:123456789012:Databases\"\nrole_recipients_awssns[webmaster]=\"arn:aws:sns:us-east-2:123456789012:Development\"\nrole_recipients_awssns[proxyadmin]=\"arn:aws:sns:us-east-2:123456789012:Proxy\"\nrole_recipients_awssns[sitemgr]=\"arn:aws:sns:us-east-2:123456789012:Sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\nAn example working configuration would be:\n\n```yaml\n```conf\n#------------------------------------------------------------------------------\n# Amazon SNS notifications\n\nSEND_AWSSNS=\"YES\"\nAWSSNS_MESSAGE_FORMAT=\"${status} on ${host} at ${date}: ${chart} ${value_string}\"\nDEFAULT_RECIPIENT_AWSSNS=\"arn:aws:sns:us-east-2:123456789012:MyTopic\"\n```\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The [Amazon Web Services CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (awscli).\n- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. The setup depends on the distribution, but `/var/lib/netdata` is the recommended directory. If you are using Netdata as a dedicated user, the permissions will already be correct.\n- An Amazon SNS topic to send notifications to with one or more subscribers. The Getting Started section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.\n- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. For an additional layer of security, you can create one for each system or group of systems.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| aws path | The full path of the aws command. If empty, the system `$PATH` will be searched for it. If not found, Amazon SNS notifications will be silently disabled. | | yes |\n| SEND_AWSNS | Set `SEND_AWSNS` to YES | YES | yes |\n| AWSSNS_MESSAGE_FORMAT | Set `AWSSNS_MESSAGE_FORMAT` to to the string that you want the alert to be sent into. | ${status} on ${host} at ${date}: ${chart} ${value_string} | yes |\n| DEFAULT_RECIPIENT_AWSSNS | Set `DEFAULT_RECIPIENT_AWSSNS` to the Topic ARN you noted down upon creating the Topic. | | yes |\n\n##### AWSSNS_MESSAGE_FORMAT\n\nThe supported variables are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n##### DEFAULT_RECIPIENT_AWSSNS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_awssns[sysadmin]=\"arn:aws:sns:us-east-2:123456789012:Systems\"\nrole_recipients_awssns[domainadmin]=\"arn:aws:sns:us-east-2:123456789012:Domains\"\nrole_recipients_awssns[dba]=\"arn:aws:sns:us-east-2:123456789012:Databases\"\nrole_recipients_awssns[webmaster]=\"arn:aws:sns:us-east-2:123456789012:Development\"\nrole_recipients_awssns[proxyadmin]=\"arn:aws:sns:us-east-2:123456789012:Proxy\"\nrole_recipients_awssns[sitemgr]=\"arn:aws:sns:us-east-2:123456789012:Sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\nAn example working configuration would be:\n\n```yaml\n```text\n#------------------------------------------------------------------------------\n# Amazon SNS notifications\n\nSEND_AWSSNS=\"YES\"\nAWSSNS_MESSAGE_FORMAT=\"${status} on ${host} at ${date}: ${chart} ${value_string}\"\nDEFAULT_RECIPIENT_AWSSNS=\"arn:aws:sns:us-east-2:123456789012:MyTopic\"\n```\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/awssns/metadata.yaml" }, - { - "id": "notify-cloud-awssns", - "meta": { - "name": "Amazon SNS", - "link": "https://aws.amazon.com/sns/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "awssns.png" - }, - "keywords": [ - "awssns" - ], - "overview": "# Amazon SNS\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on AWS SNS.\n", - "setup": "## Setup\n\n### Prerequisites\n\nTo add AWS SNS notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- Have an AWS account with AWS SNS access, for more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **AwsSns** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For AWS SNS:\n - Topic ARN - topic provided on AWS SNS (with region) for where to publish your notifications. For more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Settings on AWS SNS\n\nTo enable the webhook integration on AWS SNS you need:\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n - Finally, click on **Create topic** on the bottom of the page\n3. Now, use the new **Topic ARN** while adding AWS SNS integration on your space.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-discord", - "meta": { - "name": "Discord", - "link": "https://discord.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "discord.png" - }, - "keywords": [ - "discord", - "community" - ], - "overview": "# Discord\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Discord.\n", - "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- You need to have a Discord server able to receive webhooks integrations.\n\n### Discord Server Configuration\nSteps to configure your Discord server to receive [webhook notifications](https://support.discord.com/hc/en-us/articles/228383668) from Netdata:\n1. Go to `Server Settings` --> `Integrations`\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Use Webhook URL to add your notification configuration on Netdata UI\n\n### Netdata Configuration Steps\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Discord** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Discord:\n - Define the type channel you want to send notifications to: **Text channel** or **Forum channel**\n - Webhook URL - URL provided on Discord for the channel you want to receive your notifications.\n - Thread name - if the Discord channel is a **Forum channel** you will need to provide the thread name as well\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-mattermost", - "meta": { - "name": "Mattermost", - "link": "https://mattermost.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "mattermost.png" - }, - "keywords": [ - "mattermost" - ], - "overview": "# Mattermost\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Mattermost.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a Mattermost app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your Mattermost to receive notifications from Netdata:\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don\u2019t have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook. The description can be up to 500 characters\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook endpoint that looks like below:\n `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`\n\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your Mattermost instance.\n\nFor more details please check Mattermost's article [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Mattermost** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Mattermost:\n - Webhook URL - URL provided on Mattermost for the channel you want to receive your notifications\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-microsoftteams", - "meta": { - "name": "Microsoft Teams", - "link": "https://www.microsoft.com/en-us/microsoft-teams", - "categories": [ - "notify.cloud" - ], - "icon_filename": "teams.svg" - }, - "keywords": [ - "microsoft", - "teams" - ], - "overview": "# Microsoft Teams\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications to a Microsoft Teams channel.\n", - "setup": "## Setup\n\n### Prerequisites\n\nTo add Microsoft Teams notifications integration to your Netdata Cloud space you will need the following:\n\n- A Netdata Cloud account.\n- Access to the Netdata Cloud space as an **Admin**.\n- The Space to be on a paid plan.\n- A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature.\n\n### Settings on Microsoft Teams\n\n1. **Access the Channel Settings**: Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots (ellipsis) icon that appears.\n2. **Create a New Workflow**: Select \"Workflows\" from the options, then choose \"Post to a channel when a webhook request is received.\"\n3. **Configure Workflow Details**:\n - Give your workflow a descriptive name, such as \"Netdata Alerts.\"\n - Select the target team and channel to receive notifications.\n - Click \"Add workflow.\"\n4. **Obtain the Webhook URL**:\n - Once the workflow is created, you will receive a unique Workflow Webhook URL.\n - Copy this URL, as it will be required to configure Netdata Cloud.\n\n### Settings on Netdata Cloud\n\n1. Click on the **Space settings** cog (located above your profile icon).\n2. Click on the **Notification** tab.\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen).\n4. On the **Microsoft Teams** card click on **+ Add**.\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings:\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it.\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration.\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only.\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Microsoft Teams:\n - Microsoft Teams Incoming Webhook URL - the _Incoming Webhook URL_ that was generated earlier.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-mobile-app", - "meta": { - "name": "Netdata Mobile App", - "link": "https://netdata.cloud", - "categories": [ - "notify.cloud" - ], - "icon_filename": "netdata.png" - }, - "keywords": [ - "mobile-app", - "phone", - "personal-notifications" - ], - "overview": "# Netdata Mobile App\n\nFrom the Netdata Cloud UI, you can manage your user notification settings and enable the configuration to deliver notifications on the Netdata Mobile Application.\n", - "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- You need to have the Netdata Mobile Application installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration\nSteps to login to the Netdata Mobile Application to receive alert and reachability and alert notifications:\n1. Download the Netdata Mobile Application from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose the Sign In Option\n - Sign In with Email Address: Enter the Email Address of your registered Netdata Cloud Account and Click on the Verification link received by Email on your mobile device.\n - Sign In with QR Code: Scan the QR Code from your `Netdata Cloud` UI under **User Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code**\n3. Start receiving alert and reachability notifications for your **Space(s)** on a **Paid Subscription plan**\n\n### Netdata Configuration Steps\n1. Click on the **User settings** on the bottom left of your screen (your profile icon)\n2. Click on the **Notifications** tab\n3. Enable **Mobile App Notifications** if disabled (Enabled by default)\n4. Use the **Show QR Code** Option to login to your mobile device by scanning the **QR Code**\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-opsgenie", - "meta": { - "name": "Opsgenie", - "link": "https://www.atlassian.com/software/opsgenie", - "categories": [ - "notify.cloud" - ], - "icon_filename": "opsgenie.png" - }, - "keywords": [ - "opsgenie", - "atlassian" - ], - "overview": "# Opsgenie\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Opsgenie.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\nSteps to configure your Opsgenie to receive notifications from Netdata:\n\n1. Go to integrations tab of your team, click **Add integration**\n2. Pick **API** from available integrations. Copy your API Key and press **Save Integration**.\n3. Paste copied API key into the corresponding field in **Integration configuration** section of Opsgenie modal window in Netdata.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Opsgenie** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Opsgenie:\n - API Key - a key provided on Opsgenie for the channel you want to receive your notifications.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-pagerduty", - "meta": { - "name": "PagerDuty", - "link": "https://www.pagerduty.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "pagerduty.png" - }, - "keywords": [ - "pagerduty" - ], - "overview": "# PagerDuty\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on PagerDuty.\n", - "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have a PagerDuty service to receive events using webhooks.\n\n\n### PagerDuty Server Configuration\nSteps to configure your PagerDuty to receive notifications from Netdata:\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. At step 3, select `Events API V2` Integration or **View Webhooks** if you already have some defined\n3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** fields to add them to your notification configuration in the Netdata UI.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **PagerDuty** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For PagerDuty:\n - Integration Key - is a 32 character key provided by PagerDuty to receive events on your service.\n - Integration URL (Alert Events) - is the URL provided by PagerDuty where we will send notifications.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-rocketchat", - "meta": { - "name": "RocketChat", - "link": "https://www.rocket.chat/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "rocketchat.png" - }, - "keywords": [ - "rocketchat" - ], - "overview": "# RocketChat\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on RocketChat.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a RocketChat app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your RocketChat to receive notifications from Netdata:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations.\n2. Click **+New** at the top right corner.\n3. For more details about each parameter, check [create-a-new-incoming-webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook).\n4. After configuring integration, click Save.\n5. You will end up with a webhook endpoint that looks like below:\n `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your RocketChat instance.\n\n\nFor more details please check RocketChat's article Incoming webhooks for [RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **RocketChat** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For RocketChat:\n - Webhook URL - URL provided on RocketChat for the channel you want to receive your notifications.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-slack", - "meta": { - "name": "Slack", - "link": "https://slack.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "slack.png" - }, - "keywords": [ - "slack" - ], - "overview": "# Slack\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Slack.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\nSteps to configure your Slack to receive notifications from Netdata:\n\n1. Create an app to receive webhook integrations. Check [Create an app](https://api.slack.com/apps?new_app=1) from Slack documentation for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - After pressing that specify the channel where you want your notifications to be delivered\n - Once completed copy the Webhook URL that you will need to add to your notification configuration on Netdata UI\n\nFor more details please check Slacks's article [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Slack** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Slack:\n - Webhook URL - URL provided on Slack for the channel you want to receive your notifications.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-splunk", - "meta": { - "name": "Splunk", - "link": "https://splunk.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "splunk-black.svg" - }, - "keywords": [ - "Splunk" - ], - "overview": "# Splunk\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Splunk.\n", - "setup": "## Setup\n\n### Prerequisites\n\nTo add Splunk notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions.\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Splunk** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - provide a descriptive name for your configuration to easily identify it.\n - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about.\n - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only.\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk:\n - HTTP Event Collector URI - The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token - the token that Splunk provided to you when you created the HTTP Event Collector\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-telegram", - "meta": { - "name": "Telegram", - "link": "https://telegram.org/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "telegram.svg" - }, - "keywords": [ - "Telegram" - ], - "overview": "# Telegram\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Telegram.\n", - "setup": "## Setup\n\n### Prerequisites\n\nTo add Telegram notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- The Telegram bot token, chat ID and _optionally_ the topic ID\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Telegram** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Telegram:\n - Bot Token - the token of your bot\n - Chat ID - the chat id where your bot will deliver messages to\n - Topic ID - the identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat.\n\n### Getting the Telegram bot token, chat ID and topic ID\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n- To get the topic ID, the easiest way is this: Post a message to that topic, then right-click on it and select `Copy Message Link`. Paste it on a scratchpad and notice that it has the following structure `https://t.me/c/XXXXXXXXXX/YY/ZZ`. The topic ID is `YY` (integer).\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-victorops", - "meta": { - "name": "Splunk VictorOps", - "link": "https://www.splunk.com/en_us/about-splunk/acquisitions/splunk-on-call.html", - "categories": [ - "notify.cloud" - ], - "icon_filename": "victorops.svg" - }, - "keywords": [ - "VictorOps", - "Splunk", - "On-Call" - ], - "overview": "# Splunk VictorOps\n\nFrom the Cloud interface, you can manage your space's notification settings and from there you can add a specific configuration to get notifications delivered on Splunk On-Call/VictorOps.\n", - "setup": "## Setup\n\n### Prerequisites\n\nTo add Splunk VictorOps notification (also known as Splunk On-Call) you need:\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions.\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Splunk VictorOps** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - provide a descriptive name for your configuration to easily identify it.\n - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about.\n - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only.\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk VictorOps:\n - Destination URL - The URL provided by VictorOps of your REST endpoint.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-webhook", - "meta": { - "name": "Webhook", - "link": "https://en.wikipedia.org/wiki/Webhook", - "categories": [ - "notify.cloud" - ], - "icon_filename": "webhook.svg" - }, - "keywords": [ - "generic webhooks", - "webhooks" - ], - "overview": "# Webhook\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on a webhook using a predefined schema.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Webhook** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Webhook:\n - Webhook URL - webhook URL is the url of the service that Netdata will send notifications to. In order to keep the communication secured, we only accept HTTPS urls.\n - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism - Netdata webhook integration supports 3 different authentication mechanisms.\n * Mutual TLS (recommended) - default authentication mechanism used if no other method is selected.\n * Basic - the client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. These will settings will be required inputs.\n * Bearer - the client sends a request with an Authorization header that includes a **bearer token**. This setting will be a required input.\n\n\n ### Webhook service\n\n A webhook integration allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. In this document, we'll go over the steps to set up a generic webhook integration, including adding headers, and implementing different types of authorization mechanisms.\n\n #### Netdata webhook integration\n\n A webhook integration is a way for one service to notify another service about events that occur within it. This is done by sending an HTTP POST request to a specified URL (known as the \"webhook URL\") when an event occurs.\n\n Netdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected.\n\n For alert notifications, the content sent to the destination service contains a JSON object with the following properties:\n\n | field | type | description |\n | :-- | :-- | :-- |\n | message | string | A summary message of the alert. |\n | alarm | string | The alarm the notification is about. |\n | info | string | Additional info related with the alert. |\n | chart | string | The chart associated with the alert. |\n | context | string | The chart context. |\n | space | string | The space where the node that raised the alert is assigned. |\n | Rooms | object[object(string,string)] | Object with list of Rooms names and urls where the node belongs to. |\n | family | string | Context family. |\n | class | string | Classification of the alert, e.g. \"Error\". |\n | severity | string | Alert severity, can be one of \"warning\", \"critical\" or \"clear\". |\n | date | string | Date of the alert in ISO8601 format. |\n | duration | string | Duration the alert has been raised. |\n | additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n | additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n | alarm_url | string | Netdata Cloud URL for this alarm. |\n\n For reachability notifications, the JSON object will contain the following properties:\n\n | field | type | description |\n | :-- | :-- | :-- |\n | message | string | A summary message of the reachability alert. |\n | url | string | Netdata Cloud URL for the host experiencing the reachability alert. |\n | host | string | the host experiencing the reachability alert. |\n | severity | string | severity for this notification. If host is reachable, severity will be 'info', if host is unreachable, it will be 'critical'. |\n | status | object | an object with the status information. |\n | status.reachable | boolean | true if host is reachable, false otherwise |\n | status.text | string | can be 'reachable' or 'unreachable' |\n\n #### Extra headers\n\n When setting up a webhook integration, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\n By default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:-------------------------------:|-----------------------------|\n | Content-Type | application/json |\n\n #### Authentication mechanisms\n\n Netdata webhook integration supports 3 different authentication mechanisms:\n\n ##### Mutual TLS authentication (recommended)\n\n In mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\n This is the default authentication mechanism used if no other method is selected.\n\n To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\n The steps to perform this validation are as follows:\n\n - Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
\n Netdata CA certificate\n\n ```\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n
\n\n - Enable client certificate validation on the web server that is doing the TLS termination. Below we show you how to perform this configuration in `NGINX` and `Apache`\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n ##### Basic authentication\n\n In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n ##### Bearer token authentication\n\n In bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n ##### Challenge secret\n\n To validate that you have ownership of the web application that will receive the webhook events, we are using a challenge response check mechanism.\n\n This mechanism works as follows:\n\n - The challenge secret parameter that you provide is a shared secret between you and Netdata only.\n - On your request for creating a new Webhook integration, we will make a GET request to the url of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n - You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n - We will compare your application's response with the hash that we will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\n We will do this validation everytime you update your integration configuration.\n\n - Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n **Example response token generation in Python:**\n\n Here you can see how to define a handler for a Flask application in python 3:\n\n ```python\n import base64\n import hashlib\n import hmac\n import json\n\n key ='YOUR_CHALLENGE_SECRET'\n\n @app.route('/webhooks/netdata')\n def webhook_challenge():\n token = request.args.get('crc_token').encode('ascii')\n\n # creates HMAC SHA-256 hash from incomming token and your consumer secret\n sha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n # construct response data with base64 encoded hash\n response = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n }\n\n # returns properly formatted json response\n return json.dumps(response)\n ```\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, { "id": "notify-custom", "meta": { @@ -21742,9 +21627,9 @@ export const integrations = [ "custom" ], "overview": "# Custom\n\nNetdata Agent's alert notification feature allows you to send custom notifications to any endpoint you choose.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_CUSTOM | Set `SEND_CUSTOM` to YES | YES | yes |\n| DEFAULT_RECIPIENT_CUSTOM | This value is dependent on how you handle the `${to}` variable inside the `custom_sender()` function. | | yes |\n| custom_sender() | You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the function in this configuration file. | | no |\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nAll roles will default to this variable if left unconfigured. You can edit `DEFAULT_RECIPIENT_CUSTOM` with the variable you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_custom[sysadmin]=\"systems\"\nrole_recipients_custom[domainadmin]=\"domains\"\nrole_recipients_custom[dba]=\"databases systems\"\nrole_recipients_custom[webmaster]=\"marketing development\"\nrole_recipients_custom[proxyadmin]=\"proxy-admin\"\nrole_recipients_custom[sitemgr]=\"sites\"\n```\n\n\n##### custom_sender()\n\nThe following is a sample custom_sender() function in health_alarm_notify.conf, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:\n```\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n```\n\nThe supported variables that you can use for the function's `msg` variable are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# custom notifications\n\nSEND_CUSTOM=\"YES\"\nDEFAULT_RECIPIENT_CUSTOM=\"\"\n\n# The custom_sender() is a custom function to do whatever you need to do\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_CUSTOM | Set `SEND_CUSTOM` to YES | YES | yes |\n| DEFAULT_RECIPIENT_CUSTOM | This value is dependent on how you handle the `${to}` variable inside the `custom_sender()` function. | | yes |\n| custom_sender() | You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the function in this configuration file. | | no |\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nAll roles will default to this variable if left unconfigured. You can edit `DEFAULT_RECIPIENT_CUSTOM` with the variable you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_custom[sysadmin]=\"systems\"\nrole_recipients_custom[domainadmin]=\"domains\"\nrole_recipients_custom[dba]=\"databases systems\"\nrole_recipients_custom[webmaster]=\"marketing development\"\nrole_recipients_custom[proxyadmin]=\"proxy-admin\"\nrole_recipients_custom[sitemgr]=\"sites\"\n```\n\n\n##### custom_sender()\n\nThe following is a sample custom_sender() function in health_alarm_notify.conf, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:\n```\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n```\n\nThe supported variables that you can use for the function's `msg` variable are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# custom notifications\n\nSEND_CUSTOM=\"YES\"\nDEFAULT_RECIPIENT_CUSTOM=\"\"\n\n# The custom_sender() is a custom function to do whatever you need to do\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/custom/metadata.yaml" }, { @@ -21761,9 +21646,9 @@ export const integrations = [ "Discord" ], "overview": "# Discord\n\nSend notifications to Discord using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more Discord channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DISCORD | Set `SEND_DISCORD` to YES | YES | yes |\n| DISCORD_WEBHOOK_URL | set `DISCORD_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_DISCORD | Set `DEFAULT_RECIPIENT_DISCORD` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_DISCORD\n\nAll roles will default to this variable if left unconfigured.\nYou can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_discord[sysadmin]=\"systems\"\nrole_recipients_discord[domainadmin]=\"domains\"\nrole_recipients_discord[dba]=\"databases systems\"\nrole_recipients_discord[webmaster]=\"marketing development\"\nrole_recipients_discord[proxyadmin]=\"proxy-admin\"\nrole_recipients_discord[sitemgr]=\"sites\"\n```\n\nThe values you provide should already exist as Discord channels in your server.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# discord (discordapp.com) global notification options\n\nSEND_DISCORD=\"YES\"\nDISCORD_WEBHOOK_URL=\"https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_DISCORD=\"alerts\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more Discord channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DISCORD | Set `SEND_DISCORD` to YES | YES | yes |\n| DISCORD_WEBHOOK_URL | set `DISCORD_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_DISCORD | Set `DEFAULT_RECIPIENT_DISCORD` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_DISCORD\n\nAll roles will default to this variable if left unconfigured.\nYou can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_discord[sysadmin]=\"systems\"\nrole_recipients_discord[domainadmin]=\"domains\"\nrole_recipients_discord[dba]=\"databases systems\"\nrole_recipients_discord[webmaster]=\"marketing development\"\nrole_recipients_discord[proxyadmin]=\"proxy-admin\"\nrole_recipients_discord[sitemgr]=\"sites\"\n```\n\nThe values you provide should already exist as Discord channels in your server.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# discord (discordapp.com) global notification options\n\nSEND_DISCORD=\"YES\"\nDISCORD_WEBHOOK_URL=\"https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_DISCORD=\"alerts\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/discord/metadata.yaml" }, { @@ -21780,9 +21665,9 @@ export const integrations = [ "Dynatrace" ], "overview": "# Dynatrace\n\nDynatrace allows you to receive notifications using their Events REST API. See the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event) about POSTing an event in the Events API for more details.\nYou can send notifications to Dynatrace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts. The Dynatrace server should be with protocol prefixed (http:// or https://), for example: https://monitor.example.com.\n- An API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API. See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.\n- An API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n- A Server Tag. To generate one on your Dynatrace Server, go to Settings --> Tags --> Manually applied tags and create the Tag. The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DYNATRACE | Set `SEND_DYNATRACE` to YES | YES | yes |\n| DYNATRACE_SERVER | Set `DYNATRACE_SERVER` to the Dynatrace server with the protocol prefix, for example `https://monitor.example.com`. | | yes |\n| DYNATRACE_TOKEN | Set `DYNATRACE_TOKEN` to your Dynatrace API authentication token | | yes |\n| DYNATRACE_SPACE | Set `DYNATRACE_SPACE` to the API Space, it is the URL part of the page you have access in order to generate the API Token. | | yes |\n| DYNATRACE_TAG_VALUE | Set `DYNATRACE_TAG_VALUE` to your Dynatrace Server Tag. | | yes |\n| DYNATRACE_ANNOTATION_TYPE | `DYNATRACE_ANNOTATION_TYPE` can be left to its default value Netdata Alarm, but you can change it to better fit your needs. | Netdata Alarm | no |\n| DYNATRACE_EVENT | Set `DYNATRACE_EVENT` to the Dynatrace eventType you want. | Netdata Alarm | no |\n\n##### DYNATRACE_SPACE\n\nFor example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n\n\n##### DYNATRACE_EVENT\n\n`AVAILABILITY_EVENT`, `CUSTOM_ALERT`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, `CUSTOM_DEPLOYMENT`, `CUSTOM_INFO`, `ERROR_EVENT`,\n`MARKED_FOR_TERMINATION`, `PERFORMANCE_EVENT`, `RESOURCE_CONTENTION_EVENT`.\nYou can read more [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event#request-body-objects).\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Dynatrace global notification options\n\nSEND_DYNATRACE=\"YES\"\nDYNATRACE_SERVER=\"https://monitor.example.com\"\nDYNATRACE_TOKEN=\"XXXXXXX\"\nDYNATRACE_SPACE=\"2a93fe0e-4cd5-469a-9d0d-1a064235cfce\"\nDYNATRACE_TAG_VALUE=\"SERVERTAG\"\nDYNATRACE_ANNOTATION_TYPE=\"Netdata Alert\"\nDYNATRACE_EVENT=\"AVAILABILITY_EVENT\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts. The Dynatrace server should be with protocol prefixed (http:// or https://), for example: https://monitor.example.com.\n- An API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API. See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.\n- An API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n- A Server Tag. To generate one on your Dynatrace Server, go to Settings --> Tags --> Manually applied tags and create the Tag. The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DYNATRACE | Set `SEND_DYNATRACE` to YES | YES | yes |\n| DYNATRACE_SERVER | Set `DYNATRACE_SERVER` to the Dynatrace server with the protocol prefix, for example `https://monitor.example.com`. | | yes |\n| DYNATRACE_TOKEN | Set `DYNATRACE_TOKEN` to your Dynatrace API authentication token | | yes |\n| DYNATRACE_SPACE | Set `DYNATRACE_SPACE` to the API Space, it is the URL part of the page you have access in order to generate the API Token. | | yes |\n| DYNATRACE_TAG_VALUE | Set `DYNATRACE_TAG_VALUE` to your Dynatrace Server Tag. | | yes |\n| DYNATRACE_ANNOTATION_TYPE | `DYNATRACE_ANNOTATION_TYPE` can be left to its default value Netdata Alarm, but you can change it to better fit your needs. | Netdata Alarm | no |\n| DYNATRACE_EVENT | Set `DYNATRACE_EVENT` to the Dynatrace eventType you want. | Netdata Alarm | no |\n\n##### DYNATRACE_SPACE\n\nFor example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n\n\n##### DYNATRACE_EVENT\n\n`AVAILABILITY_EVENT`, `CUSTOM_ALERT`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, `CUSTOM_DEPLOYMENT`, `CUSTOM_INFO`, `ERROR_EVENT`,\n`MARKED_FOR_TERMINATION`, `PERFORMANCE_EVENT`, `RESOURCE_CONTENTION_EVENT`.\nYou can read more [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event#request-body-objects).\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Dynatrace global notification options\n\nSEND_DYNATRACE=\"YES\"\nDYNATRACE_SERVER=\"https://monitor.example.com\"\nDYNATRACE_TOKEN=\"XXXXXXX\"\nDYNATRACE_SPACE=\"2a93fe0e-4cd5-469a-9d0d-1a064235cfce\"\nDYNATRACE_TAG_VALUE=\"SERVERTAG\"\nDYNATRACE_ANNOTATION_TYPE=\"Netdata Alert\"\nDYNATRACE_EVENT=\"AVAILABILITY_EVENT\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/dynatrace/metadata.yaml" }, { @@ -21799,9 +21684,9 @@ export const integrations = [ "email" ], "overview": "# Email\n\nSend notifications via Email using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working sendmail command is required for email alerts to work. Almost all MTAs provide a sendmail interface. Netdata sends all emails as user netdata, so make sure your sendmail works for local users.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| EMAIL_SENDER | You can change `EMAIL_SENDER` to the email address sending the notifications. | netdata | no |\n| SEND_EMAIL | Set `SEND_EMAIL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_EMAIL | Set `DEFAULT_RECIPIENT_EMAIL` to the email address you want the email to be sent by default. You can define multiple email addresses like this: `alarms@example.com` `systems@example.com`. | root | yes |\n\n##### DEFAULT_RECIPIENT_EMAIL\n\nAll roles will default to this variable if left unconfigured.\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_email[sysadmin]=\"systems@example.com\"\nrole_recipients_email[domainadmin]=\"domains@example.com\"\nrole_recipients_email[dba]=\"databases@example.com systems@example.com\"\nrole_recipients_email[webmaster]=\"marketing@example.com development@example.com\"\nrole_recipients_email[proxyadmin]=\"proxy-admin@example.com\"\nrole_recipients_email[sitemgr]=\"sites@example.com\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# email global notification options\n\nEMAIL_SENDER=\"example@domain.com\"\nSEND_EMAIL=\"YES\"\nDEFAULT_RECIPIENT_EMAIL=\"recipient@example.com\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working sendmail command is required for email alerts to work. Almost all MTAs provide a sendmail interface. Netdata sends all emails as user netdata, so make sure your sendmail works for local users.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| EMAIL_SENDER | You can change `EMAIL_SENDER` to the email address sending the notifications. | netdata | no |\n| SEND_EMAIL | Set `SEND_EMAIL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_EMAIL | Set `DEFAULT_RECIPIENT_EMAIL` to the email address you want the email to be sent by default. You can define multiple email addresses like this: `alarms@example.com` `systems@example.com`. | root | yes |\n\n##### DEFAULT_RECIPIENT_EMAIL\n\nAll roles will default to this variable if left unconfigured.\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_email[sysadmin]=\"systems@example.com\"\nrole_recipients_email[domainadmin]=\"domains@example.com\"\nrole_recipients_email[dba]=\"databases@example.com systems@example.com\"\nrole_recipients_email[webmaster]=\"marketing@example.com development@example.com\"\nrole_recipients_email[proxyadmin]=\"proxy-admin@example.com\"\nrole_recipients_email[sitemgr]=\"sites@example.com\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# email global notification options\n\nEMAIL_SENDER=\"example@domain.com\"\nSEND_EMAIL=\"YES\"\nDEFAULT_RECIPIENT_EMAIL=\"recipient@example.com\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/email/metadata.yaml" }, { @@ -21818,9 +21703,9 @@ export const integrations = [ "Flock" ], "overview": "# Flock\n\nSend notifications to Flock using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by flock.com. You can use the same on all your Netdata servers (or you can have multiple if you like). Read more about flock webhooks and how to get one [here](https://admin.flock.com/webhooks).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_FLOCK | Set `SEND_FLOCK` to YES | YES | yes |\n| FLOCK_WEBHOOK_URL | set `FLOCK_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_FLOCK | Set `DEFAULT_RECIPIENT_FLOCK` to the Flock channel you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n\n##### DEFAULT_RECIPIENT_FLOCK\n\nYou can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_flock[sysadmin]=\"systems\"\nrole_recipients_flock[domainadmin]=\"domains\"\nrole_recipients_flock[dba]=\"databases systems\"\nrole_recipients_flock[webmaster]=\"marketing development\"\nrole_recipients_flock[proxyadmin]=\"proxy-admin\"\nrole_recipients_flock[sitemgr]=\"sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# flock (flock.com) global notification options\n\nSEND_FLOCK=\"YES\"\nFLOCK_WEBHOOK_URL=\"https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_FLOCK=\"alarms\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by flock.com. You can use the same on all your Netdata servers (or you can have multiple if you like). Read more about flock webhooks and how to get one [here](https://admin.flock.com/webhooks).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_FLOCK | Set `SEND_FLOCK` to YES | YES | yes |\n| FLOCK_WEBHOOK_URL | set `FLOCK_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_FLOCK | Set `DEFAULT_RECIPIENT_FLOCK` to the Flock channel you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n\n##### DEFAULT_RECIPIENT_FLOCK\n\nYou can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_flock[sysadmin]=\"systems\"\nrole_recipients_flock[domainadmin]=\"domains\"\nrole_recipients_flock[dba]=\"databases systems\"\nrole_recipients_flock[webmaster]=\"marketing development\"\nrole_recipients_flock[proxyadmin]=\"proxy-admin\"\nrole_recipients_flock[sitemgr]=\"sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# flock (flock.com) global notification options\n\nSEND_FLOCK=\"YES\"\nFLOCK_WEBHOOK_URL=\"https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_FLOCK=\"alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/flock/metadata.yaml" }, { @@ -21837,11 +21722,30 @@ export const integrations = [ "gotify" ], "overview": "# Gotify\n\n[Gotify](https://gotify.net/) is a self-hosted push notification service created for sending and receiving messages in real time.\nYou can send alerts to your Gotify instance using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An application token. You can generate a new token in the Gotify Web UI.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_GOTIFY | Set `SEND_GOTIFY` to YES | YES | yes |\n| GOTIFY_APP_TOKEN | set `GOTIFY_APP_TOKEN` to the app token you generated. | | yes |\n| GOTIFY_APP_URL | Set `GOTIFY_APP_URL` to point to your Gotify instance, for example `https://push.example.domain/` | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_GOTIFY=\"YES\"\nGOTIFY_APP_TOKEN=\"XXXXXXXXXXXXXXX\"\nGOTIFY_APP_URL=\"https://push.example.domain/\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An application token. You can generate a new token in the Gotify Web UI.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_GOTIFY | Set `SEND_GOTIFY` to YES | YES | yes |\n| GOTIFY_APP_TOKEN | set `GOTIFY_APP_TOKEN` to the app token you generated. | | yes |\n| GOTIFY_APP_URL | Set `GOTIFY_APP_URL` to point to your Gotify instance, for example `https://push.example.domain/` | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_GOTIFY=\"YES\"\nGOTIFY_APP_TOKEN=\"XXXXXXXXXXXXXXX\"\nGOTIFY_APP_URL=\"https://push.example.domain/\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/gotify/metadata.yaml" }, + { + "id": "notify-ilert", + "meta": { + "name": "ilert", + "link": "https://www.ilert.com/", + "categories": [ + "notify.agent" + ], + "icon_filename": "ilert.svg" + }, + "keywords": [ + "ilert" + ], + "overview": "# ilert\n\nilert is an alerting and incident management tool. It helps teams reduce response times by enhancing monitoring and ticketing tools with reliable alerts, automatic escalations, on-call schedules, and features for incident response, communication, and status updates.\nSending notification to ilert via Netdata's Agent alert notification feature includes links, images and resolving of corresponding alerts.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Netdata alert source in ilert. You can create a [Netdata alert source](https://docs.ilert.com/inbound-integrations/netdata) in [ilert](https://www.ilert.com/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ILERT | Set `SEND_ILERT` to YES | YES | yes |\n| ILERT_ALERT_SOURCE_URL | Set `ILERT_ALERT_SOURCE_URL` to your Netdata alert source url in ilert. | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_ILERT=\"YES\"\nILERT_ALERT_SOURCE_URL=\"https://api.ilert.com/api/v1/events/netdata/{API-KEY}\"\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", + "integration_type": "agent_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ilert/metadata.yaml" + }, { "id": "notify-irc", "meta": { @@ -21856,9 +21760,9 @@ export const integrations = [ "IRC" ], "overview": "# IRC\n\nSend notifications to IRC using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The `nc` utility. You can set the path to it, or Netdata will search for it in your system `$PATH`.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| nc path | Set the path for nc, otherwise Netdata will search for it in your system $PATH | | yes |\n| SEND_IRC | Set `SEND_IRC` YES. | YES | yes |\n| IRC_NETWORK | Set `IRC_NETWORK` to the IRC network which your preferred channels belong to. | | yes |\n| IRC_PORT | Set `IRC_PORT` to the IRC port to which a connection will occur. | | no |\n| IRC_NICKNAME | Set `IRC_NICKNAME` to the IRC nickname which is required to send the notification. It must not be an already registered name as the connection's MODE is defined as a guest. | | yes |\n| IRC_REALNAME | Set `IRC_REALNAME` to the IRC realname which is required in order to make the connection. | | yes |\n| DEFAULT_RECIPIENT_IRC | You can have different channels per role, by editing `DEFAULT_RECIPIENT_IRC` with the channel you want | | yes |\n\n##### nc path\n\n```sh\n#------------------------------------------------------------------------------\n# external commands\n#\n# The full path of the nc command.\n# If empty, the system $PATH will be searched for it.\n# If not found, irc notifications will be silently disabled.\nnc=\"/usr/bin/nc\"\n```\n\n\n##### DEFAULT_RECIPIENT_IRC\n\nThe `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_irc[sysadmin]=\"#systems\"\nrole_recipients_irc[domainadmin]=\"#domains\"\nrole_recipients_irc[dba]=\"#databases #systems\"\nrole_recipients_irc[webmaster]=\"#marketing #development\"\nrole_recipients_irc[proxyadmin]=\"#proxy-admin\"\nrole_recipients_irc[sitemgr]=\"#sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# irc notification options\n#\nSEND_IRC=\"YES\"\nDEFAULT_RECIPIENT_IRC=\"#system-alarms\"\nIRC_NETWORK=\"irc.freenode.net\"\nIRC_NICKNAME=\"netdata-alarm-user\"\nIRC_REALNAME=\"netdata-user\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The `nc` utility. You can set the path to it, or Netdata will search for it in your system `$PATH`.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| nc path | Set the path for nc, otherwise Netdata will search for it in your system $PATH | | yes |\n| SEND_IRC | Set `SEND_IRC` YES. | YES | yes |\n| IRC_NETWORK | Set `IRC_NETWORK` to the IRC network which your preferred channels belong to. | | yes |\n| IRC_PORT | Set `IRC_PORT` to the IRC port to which a connection will occur. | | no |\n| IRC_NICKNAME | Set `IRC_NICKNAME` to the IRC nickname which is required to send the notification. It must not be an already registered name as the connection's MODE is defined as a guest. | | yes |\n| IRC_REALNAME | Set `IRC_REALNAME` to the IRC realname which is required in order to make the connection. | | yes |\n| DEFAULT_RECIPIENT_IRC | You can have different channels per role, by editing `DEFAULT_RECIPIENT_IRC` with the channel you want | | yes |\n\n##### nc path\n\n```sh\n#------------------------------------------------------------------------------\n# external commands\n#\n# The full path of the nc command.\n# If empty, the system $PATH will be searched for it.\n# If not found, irc notifications will be silently disabled.\nnc=\"/usr/bin/nc\"\n```\n\n\n##### DEFAULT_RECIPIENT_IRC\n\nThe `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_irc[sysadmin]=\"#systems\"\nrole_recipients_irc[domainadmin]=\"#domains\"\nrole_recipients_irc[dba]=\"#databases #systems\"\nrole_recipients_irc[webmaster]=\"#marketing #development\"\nrole_recipients_irc[proxyadmin]=\"#proxy-admin\"\nrole_recipients_irc[sitemgr]=\"#sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# irc notification options\n#\nSEND_IRC=\"YES\"\nDEFAULT_RECIPIENT_IRC=\"#system-alarms\"\nIRC_NETWORK=\"irc.freenode.net\"\nIRC_NICKNAME=\"netdata-alarm-user\"\nIRC_REALNAME=\"netdata-user\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/irc/metadata.yaml" }, { @@ -21875,9 +21779,9 @@ export const integrations = [ "Kavenegar" ], "overview": "# Kavenegar\n\n[Kavenegar](https://kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs.\nYou can send notifications to Kavenegar using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The APIKEY and Sender from http://panel.kavenegar.com/client/setting/account\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_KAVENEGAR | Set `SEND_KAVENEGAR` to YES | YES | yes |\n| KAVENEGAR_API_KEY | Set `KAVENEGAR_API_KEY` to your API key. | | yes |\n| KAVENEGAR_SENDER | Set `KAVENEGAR_SENDER` to the value of your Sender. | | yes |\n| DEFAULT_RECIPIENT_KAVENEGAR | Set `DEFAULT_RECIPIENT_KAVENEGAR` to the SMS recipient you want the alert notifications to be sent to. You can define multiple recipients like this: 09155555555 09177777777. | | yes |\n\n##### DEFAULT_RECIPIENT_KAVENEGAR\n\nAll roles will default to this variable if lest unconfigured.\n\nYou can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_kavenegar[sysadmin]=\"09100000000\"\nrole_recipients_kavenegar[domainadmin]=\"09111111111\"\nrole_recipients_kavenegar[dba]=\"0922222222\"\nrole_recipients_kavenegar[webmaster]=\"0933333333\"\nrole_recipients_kavenegar[proxyadmin]=\"0944444444\"\nrole_recipients_kavenegar[sitemgr]=\"0955555555\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Kavenegar (Kavenegar.com) SMS options\n\nSEND_KAVENEGAR=\"YES\"\nKAVENEGAR_API_KEY=\"XXXXXXXXXXXX\"\nKAVENEGAR_SENDER=\"YYYYYYYY\"\nDEFAULT_RECIPIENT_KAVENEGAR=\"0912345678\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The APIKEY and Sender from http://panel.kavenegar.com/client/setting/account\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_KAVENEGAR | Set `SEND_KAVENEGAR` to YES | YES | yes |\n| KAVENEGAR_API_KEY | Set `KAVENEGAR_API_KEY` to your API key. | | yes |\n| KAVENEGAR_SENDER | Set `KAVENEGAR_SENDER` to the value of your Sender. | | yes |\n| DEFAULT_RECIPIENT_KAVENEGAR | Set `DEFAULT_RECIPIENT_KAVENEGAR` to the SMS recipient you want the alert notifications to be sent to. You can define multiple recipients like this: 09155555555 09177777777. | | yes |\n\n##### DEFAULT_RECIPIENT_KAVENEGAR\n\nAll roles will default to this variable if lest unconfigured.\n\nYou can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_kavenegar[sysadmin]=\"09100000000\"\nrole_recipients_kavenegar[domainadmin]=\"09111111111\"\nrole_recipients_kavenegar[dba]=\"0922222222\"\nrole_recipients_kavenegar[webmaster]=\"0933333333\"\nrole_recipients_kavenegar[proxyadmin]=\"0944444444\"\nrole_recipients_kavenegar[sitemgr]=\"0955555555\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Kavenegar (Kavenegar.com) SMS options\n\nSEND_KAVENEGAR=\"YES\"\nKAVENEGAR_API_KEY=\"XXXXXXXXXXXX\"\nKAVENEGAR_SENDER=\"YYYYYYYY\"\nDEFAULT_RECIPIENT_KAVENEGAR=\"0912345678\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/kavenegar/metadata.yaml" }, { @@ -21894,9 +21798,9 @@ export const integrations = [ "Matrix" ], "overview": "# Matrix\n\nSend notifications to Matrix network rooms using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The url of the homeserver (`https://homeserver:port`).\n- Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a dedicated notification account). These tokens usually don't expire.\n- The Room ids that you want to sent the notification to.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MATRIX | Set `SEND_MATRIX` to YES | YES | yes |\n| MATRIX_HOMESERVER | set `MATRIX_HOMESERVER` to the URL of the Matrix homeserver. | | yes |\n| MATRIX_ACCESSTOKEN | Set `MATRIX_ACCESSTOKEN` to the access token from your Matrix account. | | yes |\n| DEFAULT_RECIPIENT_MATRIX | Set `DEFAULT_RECIPIENT_MATRIX` to the Rooms you want the alert notifications to be sent to. The format is `!roomid:homeservername`. | | yes |\n\n##### MATRIX_ACCESSTOKEN\n\nTo obtain the access token, you can use the following curl command:\n```\ncurl -XPOST -d '{\"type\":\"m.login.password\", \"user\":\"example\", \"password\":\"wordpass\"}' \"https://homeserver:8448/_matrix/client/r0/login\"\n```\n\n\n##### DEFAULT_RECIPIENT_MATRIX\n\nThe Room ids are unique identifiers and can be obtained from the Room settings in a Matrix client (e.g. Riot).\n\nYou can define multiple Rooms like this: `!roomid1:homeservername` `!roomid2:homeservername`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different Rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_matrix[sysadmin]=\"!roomid1:homeservername\"\nrole_recipients_matrix[domainadmin]=\"!roomid2:homeservername\"\nrole_recipients_matrix[dba]=\"!roomid3:homeservername\"\nrole_recipients_matrix[webmaster]=\"!roomid4:homeservername\"\nrole_recipients_matrix[proxyadmin]=\"!roomid5:homeservername\"\nrole_recipients_matrix[sitemgr]=\"!roomid6:homeservername\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Matrix notifications\n\nSEND_MATRIX=\"YES\"\nMATRIX_HOMESERVER=\"https://matrix.org:8448\"\nMATRIX_ACCESSTOKEN=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MATRIX=\"!XXXXXXXXXXXX:matrix.org\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The url of the homeserver (`https://homeserver:port`).\n- Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a dedicated notification account). These tokens usually don't expire.\n- The Room ids that you want to sent the notification to.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MATRIX | Set `SEND_MATRIX` to YES | YES | yes |\n| MATRIX_HOMESERVER | set `MATRIX_HOMESERVER` to the URL of the Matrix homeserver. | | yes |\n| MATRIX_ACCESSTOKEN | Set `MATRIX_ACCESSTOKEN` to the access token from your Matrix account. | | yes |\n| DEFAULT_RECIPIENT_MATRIX | Set `DEFAULT_RECIPIENT_MATRIX` to the Rooms you want the alert notifications to be sent to. The format is `!roomid:homeservername`. | | yes |\n\n##### MATRIX_ACCESSTOKEN\n\nTo obtain the access token, you can use the following curl command:\n```\ncurl -XPOST -d '{\"type\":\"m.login.password\", \"user\":\"example\", \"password\":\"wordpass\"}' \"https://homeserver:8448/_matrix/client/r0/login\"\n```\n\n\n##### DEFAULT_RECIPIENT_MATRIX\n\nThe Room ids are unique identifiers and can be obtained from the Room settings in a Matrix client (e.g. Riot).\n\nYou can define multiple Rooms like this: `!roomid1:homeservername` `!roomid2:homeservername`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different Rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_matrix[sysadmin]=\"!roomid1:homeservername\"\nrole_recipients_matrix[domainadmin]=\"!roomid2:homeservername\"\nrole_recipients_matrix[dba]=\"!roomid3:homeservername\"\nrole_recipients_matrix[webmaster]=\"!roomid4:homeservername\"\nrole_recipients_matrix[proxyadmin]=\"!roomid5:homeservername\"\nrole_recipients_matrix[sitemgr]=\"!roomid6:homeservername\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Matrix notifications\n\nSEND_MATRIX=\"YES\"\nMATRIX_HOMESERVER=\"https://matrix.org:8448\"\nMATRIX_ACCESSTOKEN=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MATRIX=\"!XXXXXXXXXXXX:matrix.org\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/matrix/metadata.yaml" }, { @@ -21913,9 +21817,9 @@ export const integrations = [ "MessageBird" ], "overview": "# MessageBird\n\nSend notifications to MessageBird using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An access key under 'API ACCESS (REST)' (you will want a live key), you can read more [here](https://developers.messagebird.com/quickstarts/sms/test-credits-api-keys/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MESSAGEBIRD | Set `SEND_MESSAGEBIRD` to YES | YES | yes |\n| MESSAGEBIRD_ACCESS_KEY | Set `MESSAGEBIRD_ACCESS_KEY` to your API key. | | yes |\n| MESSAGEBIRD_NUMBER | Set `MESSAGEBIRD_NUMBER` to the MessageBird number you want to use for the alert. | | yes |\n| DEFAULT_RECIPIENT_MESSAGEBIRD | Set `DEFAULT_RECIPIENT_MESSAGEBIRD` to the number you want the alert notification to be sent as an SMS. You can define multiple recipients like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_MESSAGEBIRD\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_messagebird[sysadmin]=\"+15555555555\"\nrole_recipients_messagebird[domainadmin]=\"+15555555556\"\nrole_recipients_messagebird[dba]=\"+15555555557\"\nrole_recipients_messagebird[webmaster]=\"+15555555558\"\nrole_recipients_messagebird[proxyadmin]=\"+15555555559\"\nrole_recipients_messagebird[sitemgr]=\"+15555555550\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Messagebird (messagebird.com) SMS options\n\nSEND_MESSAGEBIRD=\"YES\"\nMESSAGEBIRD_ACCESS_KEY=\"XXXXXXXX\"\nMESSAGEBIRD_NUMBER=\"XXXXXXX\"\nDEFAULT_RECIPIENT_MESSAGEBIRD=\"+15555555555\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An access key under 'API ACCESS (REST)' (you will want a live key), you can read more [here](https://developers.messagebird.com/quickstarts/sms/test-credits-api-keys/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MESSAGEBIRD | Set `SEND_MESSAGEBIRD` to YES | YES | yes |\n| MESSAGEBIRD_ACCESS_KEY | Set `MESSAGEBIRD_ACCESS_KEY` to your API key. | | yes |\n| MESSAGEBIRD_NUMBER | Set `MESSAGEBIRD_NUMBER` to the MessageBird number you want to use for the alert. | | yes |\n| DEFAULT_RECIPIENT_MESSAGEBIRD | Set `DEFAULT_RECIPIENT_MESSAGEBIRD` to the number you want the alert notification to be sent as an SMS. You can define multiple recipients like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_MESSAGEBIRD\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_messagebird[sysadmin]=\"+15555555555\"\nrole_recipients_messagebird[domainadmin]=\"+15555555556\"\nrole_recipients_messagebird[dba]=\"+15555555557\"\nrole_recipients_messagebird[webmaster]=\"+15555555558\"\nrole_recipients_messagebird[proxyadmin]=\"+15555555559\"\nrole_recipients_messagebird[sitemgr]=\"+15555555550\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Messagebird (messagebird.com) SMS options\n\nSEND_MESSAGEBIRD=\"YES\"\nMESSAGEBIRD_ACCESS_KEY=\"XXXXXXXX\"\nMESSAGEBIRD_NUMBER=\"XXXXXXX\"\nDEFAULT_RECIPIENT_MESSAGEBIRD=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/messagebird/metadata.yaml" }, { @@ -21932,9 +21836,9 @@ export const integrations = [ "ntfy" ], "overview": "# ntfy\n\n[ntfy](https://ntfy.sh/) (pronounce: notify) is a simple HTTP-based [pub-sub](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) notification service. It allows you to send notifications to your phone or desktop via scripts from any computer, entirely without signup, cost or setup. It's also [open source](https://github.com/binwiederhier/ntfy) if you want to run your own server.\nYou can send alerts to an ntfy server using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- (Optional) A [self-hosted ntfy server](https://docs.ntfy.sh/faq/#can-i-self-host-it), in case you don't want to use https://ntfy.sh\n- A new [topic](https://ntfy.sh/#subscribe) for the notifications to be published to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_NTFY | Set `SEND_NTFY` to YES | YES | yes |\n| DEFAULT_RECIPIENT_NTFY | URL formed by the server-topic combination you want the alert notifications to be sent to. Unless hosting your own server, the server should always be set to https://ntfy.sh. | | yes |\n| NTFY_USERNAME | The username for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_PASSWORD | The password for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_ACCESS_TOKEN | The access token for netdata to use to authenticate with an ntfy server. | | no |\n\n##### DEFAULT_RECIPIENT_NTFY\n\nYou can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `https://SERVER2/TOPIC2`\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_ntfy[sysadmin]=\"https://SERVER1/TOPIC1\"\nrole_recipients_ntfy[domainadmin]=\"https://SERVER2/TOPIC2\"\nrole_recipients_ntfy[dba]=\"https://SERVER3/TOPIC3\"\nrole_recipients_ntfy[webmaster]=\"https://SERVER4/TOPIC4\"\nrole_recipients_ntfy[proxyadmin]=\"https://SERVER5/TOPIC5\"\nrole_recipients_ntfy[sitemgr]=\"https://SERVER6/TOPIC6\"\n```\n\n\n##### NTFY_USERNAME\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_PASSWORD\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_ACCESS_TOKEN\n\nThis can be used in place of `NTFY_USERNAME` and `NTFY_PASSWORD` to authenticate with a self-hosted ntfy instance. See [access tokens](https://docs.ntfy.sh/config/?h=access+to#access-tokens) for details.\nEnsure that the token user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_NTFY=\"YES\"\nDEFAULT_RECIPIENT_NTFY=\"https://ntfy.sh/netdata-X7seHg7d3Tw9zGOk https://ntfy.sh/netdata-oIPm4IK1IlUtlA30\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- (Optional) A [self-hosted ntfy server](https://docs.ntfy.sh/faq/#can-i-self-host-it), in case you don't want to use https://ntfy.sh\n- A new [topic](https://ntfy.sh/#subscribe) for the notifications to be published to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_NTFY | Set `SEND_NTFY` to YES | YES | yes |\n| DEFAULT_RECIPIENT_NTFY | URL formed by the server-topic combination you want the alert notifications to be sent to. Unless hosting your own server, the server should always be set to https://ntfy.sh. | | yes |\n| NTFY_USERNAME | The username for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_PASSWORD | The password for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_ACCESS_TOKEN | The access token for netdata to use to authenticate with an ntfy server. | | no |\n\n##### DEFAULT_RECIPIENT_NTFY\n\nYou can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `https://SERVER2/TOPIC2`\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_ntfy[sysadmin]=\"https://SERVER1/TOPIC1\"\nrole_recipients_ntfy[domainadmin]=\"https://SERVER2/TOPIC2\"\nrole_recipients_ntfy[dba]=\"https://SERVER3/TOPIC3\"\nrole_recipients_ntfy[webmaster]=\"https://SERVER4/TOPIC4\"\nrole_recipients_ntfy[proxyadmin]=\"https://SERVER5/TOPIC5\"\nrole_recipients_ntfy[sitemgr]=\"https://SERVER6/TOPIC6\"\n```\n\n\n##### NTFY_USERNAME\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_PASSWORD\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_ACCESS_TOKEN\n\nThis can be used in place of `NTFY_USERNAME` and `NTFY_PASSWORD` to authenticate with a self-hosted ntfy instance. See [access tokens](https://docs.ntfy.sh/config/?h=access+to#access-tokens) for details.\nEnsure that the token user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_NTFY=\"YES\"\nDEFAULT_RECIPIENT_NTFY=\"https://ntfy.sh/netdata-X7seHg7d3Tw9zGOk https://ntfy.sh/netdata-oIPm4IK1IlUtlA30\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ntfy/metadata.yaml" }, { @@ -21951,9 +21855,9 @@ export const integrations = [ "OpsGenie" ], "overview": "# OpsGenie\n\nOpsgenie is an alerting and incident response tool. It is designed to group and filter alarms, build custom routing rules for on-call teams, and correlate deployments and commits to incidents.\nYou can send notifications to Opsgenie using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Opsgenie integration. You can create an [integration](https://docs.opsgenie.com/docs/api-integration) in the [Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_OPSGENIE | Set `SEND_OPSGENIE` to YES | YES | yes |\n| OPSGENIE_API_KEY | Set `OPSGENIE_API_KEY` to your API key. | | yes |\n| OPSGENIE_API_URL | Set `OPSGENIE_API_URL` to the corresponding URL if required, for example there are region-specific API URLs such as `https://eu.api.opsgenie.com`. | https://api.opsgenie.com | no |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_OPSGENIE=\"YES\"\nOPSGENIE_API_KEY=\"11111111-2222-3333-4444-555555555555\"\nOPSGENIE_API_URL=\"\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Opsgenie integration. You can create an [integration](https://docs.opsgenie.com/docs/api-integration) in the [Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_OPSGENIE | Set `SEND_OPSGENIE` to YES | YES | yes |\n| OPSGENIE_API_KEY | Set `OPSGENIE_API_KEY` to your API key. | | yes |\n| OPSGENIE_API_URL | Set `OPSGENIE_API_URL` to the corresponding URL if required, for example there are region-specific API URLs such as `https://eu.api.opsgenie.com`. | https://api.opsgenie.com | no |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_OPSGENIE=\"YES\"\nOPSGENIE_API_KEY=\"11111111-2222-3333-4444-555555555555\"\nOPSGENIE_API_URL=\"\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/opsgenie/metadata.yaml" }, { @@ -21970,9 +21874,9 @@ export const integrations = [ "PagerDuty" ], "overview": "# PagerDuty\n\nPagerDuty is an enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times.\nYou can send notifications to PagerDuty using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An installation of the [PagerDuty](https://www.pagerduty.com/docs/guides/agent-install-guide/) agent on the node running the Netdata Agent\n- A PagerDuty Generic API service using either the `Events API v2` or `Events API v1`\n- [Add a new service](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) to PagerDuty. Click Use our API directly and select either `Events API v2` or `Events API v1`. Once you finish creating the service, click on the Integrations tab to find your Integration Key.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PD | Set `SEND_PD` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PD | Set `DEFAULT_RECIPIENT_PD` to the PagerDuty service key you want the alert notifications to be sent to. You can define multiple service keys like this: `pd_service_key_1` `pd_service_key_2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PD\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pd[sysadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa\"\nrole_recipients_pd[domainadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb\"\nrole_recipients_pd[dba]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc\"\nrole_recipients_pd[webmaster]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxd\"\nrole_recipients_pd[proxyadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe\"\nrole_recipients_pd[sitemgr]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxf\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pagerduty.com notification options\n\nSEND_PD=\"YES\"\nDEFAULT_RECIPIENT_PD=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\nUSE_PD_VERSION=\"2\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An installation of the [PagerDuty](https://www.pagerduty.com/docs/guides/agent-install-guide/) agent on the node running the Netdata Agent\n- A PagerDuty Generic API service using either the `Events API v2` or `Events API v1`\n- [Add a new service](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) to PagerDuty. Click Use our API directly and select either `Events API v2` or `Events API v1`. Once you finish creating the service, click on the Integrations tab to find your Integration Key.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PD | Set `SEND_PD` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PD | Set `DEFAULT_RECIPIENT_PD` to the PagerDuty service key you want the alert notifications to be sent to. You can define multiple service keys like this: `pd_service_key_1` `pd_service_key_2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PD\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pd[sysadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa\"\nrole_recipients_pd[domainadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb\"\nrole_recipients_pd[dba]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc\"\nrole_recipients_pd[webmaster]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxd\"\nrole_recipients_pd[proxyadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe\"\nrole_recipients_pd[sitemgr]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxf\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pagerduty.com notification options\n\nSEND_PD=\"YES\"\nDEFAULT_RECIPIENT_PD=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\nUSE_PD_VERSION=\"2\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pagerduty/metadata.yaml" }, { @@ -21989,9 +21893,9 @@ export const integrations = [ "Prowl" ], "overview": "# Prowl\n\nSend notifications to Prowl using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- Because of how Netdata integrates with Prowl, there is a hard limit of at most 1000 notifications per hour (starting from the first notification sent). Any alerts beyond the first thousand in an hour will be dropped.\n- Warning messages will be sent with the 'High' priority, critical messages will be sent with the 'Emergency' priority, and all other messages will be sent with the normal priority. Opening the notification's associated URL will take you to the Netdata dashboard of the system that issued the alert, directly to the chart that it triggered on.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Prowl API key, which can be requested through the Prowl website after registering\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PROWL | Set `SEND_PROWL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PROWL | Set `DEFAULT_RECIPIENT_PROWL` to the Prowl API key you want the alert notifications to be sent to. You can define multiple API keys like this: `APIKEY1`, `APIKEY2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PROWL\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_prowl[sysadmin]=\"AAAAAAAA\"\nrole_recipients_prowl[domainadmin]=\"BBBBBBBBB\"\nrole_recipients_prowl[dba]=\"CCCCCCCCC\"\nrole_recipients_prowl[webmaster]=\"DDDDDDDDDD\"\nrole_recipients_prowl[proxyadmin]=\"EEEEEEEEEE\"\nrole_recipients_prowl[sitemgr]=\"FFFFFFFFFF\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# iOS Push Notifications\n\nSEND_PROWL=\"YES\"\nDEFAULT_RECIPIENT_PROWL=\"XXXXXXXXXX\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Prowl API key, which can be requested through the Prowl website after registering\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PROWL | Set `SEND_PROWL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PROWL | Set `DEFAULT_RECIPIENT_PROWL` to the Prowl API key you want the alert notifications to be sent to. You can define multiple API keys like this: `APIKEY1`, `APIKEY2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PROWL\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_prowl[sysadmin]=\"AAAAAAAA\"\nrole_recipients_prowl[domainadmin]=\"BBBBBBBBB\"\nrole_recipients_prowl[dba]=\"CCCCCCCCC\"\nrole_recipients_prowl[webmaster]=\"DDDDDDDDDD\"\nrole_recipients_prowl[proxyadmin]=\"EEEEEEEEEE\"\nrole_recipients_prowl[sitemgr]=\"FFFFFFFFFF\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# iOS Push Notifications\n\nSEND_PROWL=\"YES\"\nDEFAULT_RECIPIENT_PROWL=\"XXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/prowl/metadata.yaml" }, { @@ -22008,9 +21912,9 @@ export const integrations = [ "Pushbullet" ], "overview": "# Pushbullet\n\nSend notifications to Pushbullet using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Pushbullet access token that can be created in your [account settings](https://www.pushbullet.com/#settings/account).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Send_PUSHBULLET | Set `Send_PUSHBULLET` to YES | YES | yes |\n| PUSHBULLET_ACCESS_TOKEN | set `PUSHBULLET_ACCESS_TOKEN` to the access token you generated. | | yes |\n| DEFAULT_RECIPIENT_PUSHBULLET | Set `DEFAULT_RECIPIENT_PUSHBULLET` to the email (e.g. `example@domain.com`) or the channel tag (e.g. `#channel`) you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHBULLET\n\nYou can define multiple entries like this: user1@email.com user2@email.com.\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushbullet[sysadmin]=\"user1@email.com\"\nrole_recipients_pushbullet[domainadmin]=\"user2@mail.com\"\nrole_recipients_pushbullet[dba]=\"#channel1\"\nrole_recipients_pushbullet[webmaster]=\"#channel2\"\nrole_recipients_pushbullet[proxyadmin]=\"user3@mail.com\"\nrole_recipients_pushbullet[sitemgr]=\"user4@mail.com\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushbullet (pushbullet.com) push notification options\n\nSEND_PUSHBULLET=\"YES\"\nPUSHBULLET_ACCESS_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHBULLET=\"admin1@example.com admin3@somemail.com #examplechanneltag #anotherchanneltag\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Pushbullet access token that can be created in your [account settings](https://www.pushbullet.com/#settings/account).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Send_PUSHBULLET | Set `Send_PUSHBULLET` to YES | YES | yes |\n| PUSHBULLET_ACCESS_TOKEN | set `PUSHBULLET_ACCESS_TOKEN` to the access token you generated. | | yes |\n| DEFAULT_RECIPIENT_PUSHBULLET | Set `DEFAULT_RECIPIENT_PUSHBULLET` to the email (e.g. `example@domain.com`) or the channel tag (e.g. `#channel`) you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHBULLET\n\nYou can define multiple entries like this: user1@email.com user2@email.com.\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pushbullet[sysadmin]=\"user1@email.com\"\nrole_recipients_pushbullet[domainadmin]=\"user2@mail.com\"\nrole_recipients_pushbullet[dba]=\"#channel1\"\nrole_recipients_pushbullet[webmaster]=\"#channel2\"\nrole_recipients_pushbullet[proxyadmin]=\"user3@mail.com\"\nrole_recipients_pushbullet[sitemgr]=\"user4@mail.com\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushbullet (pushbullet.com) push notification options\n\nSEND_PUSHBULLET=\"YES\"\nPUSHBULLET_ACCESS_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHBULLET=\"admin1@example.com admin3@somemail.com #examplechanneltag #anotherchanneltag\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushbullet/metadata.yaml" }, { @@ -22027,9 +21931,9 @@ export const integrations = [ "PushOver" ], "overview": "# PushOver\n\nSend notification to Pushover using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n- Netdata will send warning messages with priority 0 and critical messages with priority 1.\n- Pushover allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours.\n- All other notifications will be delivered silently.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Application token. You can use the same on all your Netdata servers.\n- A User token for each user you are going to send notifications to. This is the actual recipient of the notification.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PUSHOVER | Set `SEND_PUSHOVER` to YES | YES | yes |\n| PUSHOVER_WEBHOOK_URL | set `PUSHOVER_WEBHOOK_URL` to your Pushover Application token. | | yes |\n| DEFAULT_RECIPIENT_PUSHOVER | Set `DEFAULT_RECIPIENT_PUSHOVER` the Pushover User token you want the alert notifications to be sent to. You can define multiple User tokens like this: `USERTOKEN1` `USERTOKEN2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHOVER\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushover[sysadmin]=\"USERTOKEN1\"\nrole_recipients_pushover[domainadmin]=\"USERTOKEN2\"\nrole_recipients_pushover[dba]=\"USERTOKEN3 USERTOKEN4\"\nrole_recipients_pushover[webmaster]=\"USERTOKEN5\"\nrole_recipients_pushover[proxyadmin]=\"USERTOKEN6\"\nrole_recipients_pushover[sitemgr]=\"USERTOKEN7\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushover (pushover.net) global notification options\n\nSEND_PUSHOVER=\"YES\"\nPUSHOVER_APP_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHOVER=\"USERTOKEN\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Application token. You can use the same on all your Netdata servers.\n- A User token for each user you are going to send notifications to. This is the actual recipient of the notification.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PUSHOVER | Set `SEND_PUSHOVER` to YES | YES | yes |\n| PUSHOVER_WEBHOOK_URL | set `PUSHOVER_WEBHOOK_URL` to your Pushover Application token. | | yes |\n| DEFAULT_RECIPIENT_PUSHOVER | Set `DEFAULT_RECIPIENT_PUSHOVER` the Pushover User token you want the alert notifications to be sent to. You can define multiple User tokens like this: `USERTOKEN1` `USERTOKEN2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHOVER\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pushover[sysadmin]=\"USERTOKEN1\"\nrole_recipients_pushover[domainadmin]=\"USERTOKEN2\"\nrole_recipients_pushover[dba]=\"USERTOKEN3 USERTOKEN4\"\nrole_recipients_pushover[webmaster]=\"USERTOKEN5\"\nrole_recipients_pushover[proxyadmin]=\"USERTOKEN6\"\nrole_recipients_pushover[sitemgr]=\"USERTOKEN7\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushover (pushover.net) global notification options\n\nSEND_PUSHOVER=\"YES\"\nPUSHOVER_APP_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHOVER=\"USERTOKEN\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushover/metadata.yaml" }, { @@ -22046,9 +21950,9 @@ export const integrations = [ "RocketChat" ], "overview": "# RocketChat\n\nSend notifications to Rocket.Chat using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ROCKETCHAT | Set `SEND_ROCKETCHAT` to `YES` | YES | yes |\n| ROCKETCHAT_WEBHOOK_URL | set `ROCKETCHAT_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_ROCKETCHAT | Set `DEFAULT_RECIPIENT_ROCKETCHAT` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_ROCKETCHAT\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_rocketchat[sysadmin]=\"systems\"\nrole_recipients_rocketchat[domainadmin]=\"domains\"\nrole_recipients_rocketchat[dba]=\"databases systems\"\nrole_recipients_rocketchat[webmaster]=\"marketing development\"\nrole_recipients_rocketchat[proxyadmin]=\"proxy_admin\"\nrole_recipients_rocketchat[sitemgr]=\"sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# rocketchat (rocket.chat) global notification options\n\nSEND_ROCKETCHAT=\"YES\"\nROCKETCHAT_WEBHOOK_URL=\"\"\nDEFAULT_RECIPIENT_ROCKETCHAT=\"monitoring_alarms\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ROCKETCHAT | Set `SEND_ROCKETCHAT` to `YES` | YES | yes |\n| ROCKETCHAT_WEBHOOK_URL | set `ROCKETCHAT_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_ROCKETCHAT | Set `DEFAULT_RECIPIENT_ROCKETCHAT` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_ROCKETCHAT\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_rocketchat[sysadmin]=\"systems\"\nrole_recipients_rocketchat[domainadmin]=\"domains\"\nrole_recipients_rocketchat[dba]=\"databases systems\"\nrole_recipients_rocketchat[webmaster]=\"marketing development\"\nrole_recipients_rocketchat[proxyadmin]=\"proxy_admin\"\nrole_recipients_rocketchat[sitemgr]=\"sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# rocketchat (rocket.chat) global notification options\n\nSEND_ROCKETCHAT=\"YES\"\nROCKETCHAT_WEBHOOK_URL=\"\"\nDEFAULT_RECIPIENT_ROCKETCHAT=\"monitoring_alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/rocketchat/metadata.yaml" }, { @@ -22065,9 +21969,9 @@ export const integrations = [ "Slack" ], "overview": "# Slack\n\nSend notifications to a Slack workspace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Slack app along with an incoming webhook, read Slack's guide on the topic [here](https://api.slack.com/messaging/webhooks).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_SLACK | Set `SEND_SLACK` to YES | YES | yes |\n| SLACK_WEBHOOK_URL | set `SLACK_WEBHOOK_URL` to your Slack app's webhook URL. | | yes |\n| DEFAULT_RECIPIENT_SLACK | Set `DEFAULT_RECIPIENT_SLACK` to the Slack channel your Slack app is set to send messages to. The syntax for channels is `#channel` or `channel`. | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# slack (slack.com) global notification options\n\nSEND_SLACK=\"YES\"\nSLACK_WEBHOOK_URL=\"https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\" \nDEFAULT_RECIPIENT_SLACK=\"#alarms\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Slack app along with an incoming webhook, read Slack's guide on the topic [here](https://api.slack.com/messaging/webhooks).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_SLACK | Set `SEND_SLACK` to YES | YES | yes |\n| SLACK_WEBHOOK_URL | set `SLACK_WEBHOOK_URL` to your Slack app's webhook URL. | | yes |\n| DEFAULT_RECIPIENT_SLACK | Set `DEFAULT_RECIPIENT_SLACK` to the Slack channel your Slack app is set to send messages to. The syntax for channels is `#channel` or `channel`. | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# slack (slack.com) global notification options\n\nSEND_SLACK=\"YES\"\nSLACK_WEBHOOK_URL=\"https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\" \nDEFAULT_RECIPIENT_SLACK=\"#alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/slack/metadata.yaml" }, { @@ -22086,9 +21990,9 @@ export const integrations = [ "Messaging" ], "overview": "# SMS\n\nSend notifications to `smstools3` using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\nThe SMS Server Tools 3 is a SMS Gateway software which can send and receive short messages through GSM modems and mobile phones.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- [Install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) `smsd`\n- To ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:\n - Have write permissions to /tmp and /var/spool/sms/outgoing\n - Be a member of group smsd\n - To ensure that the steps above are successful, just su netdata and execute sendsms phone message.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sendsms | Set the path for `sendsms`, otherwise Netdata will search for it in your system `$PATH:` | YES | yes |\n| SEND_SMS | Set `SEND_SMS` to `YES`. | | yes |\n| DEFAULT_RECIPIENT_SMS | Set DEFAULT_RECIPIENT_SMS to the phone number you want the alert notifications to be sent to. You can define multiple phone numbers like this: PHONE1 PHONE2. | | yes |\n\n##### sendsms\n\n# The full path of the sendsms command (smstools3).\n# If empty, the system $PATH will be searched for it.\n# If not found, SMS notifications will be silently disabled.\nsendsms=\"/usr/bin/sendsms\"\n\n\n##### DEFAULT_RECIPIENT_SMS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_sms[sysadmin]=\"PHONE1\"\nrole_recipients_sms[domainadmin]=\"PHONE2\"\nrole_recipients_sms[dba]=\"PHONE3\"\nrole_recipients_sms[webmaster]=\"PHONE4\"\nrole_recipients_sms[proxyadmin]=\"PHONE5\"\nrole_recipients_sms[sitemgr]=\"PHONE6\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMS Server Tools 3 (smstools3) global notification options\nSEND_SMS=\"YES\"\nDEFAULT_RECIPIENT_SMS=\"1234567890\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- [Install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) `smsd`\n- To ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:\n - Have write permissions to /tmp and /var/spool/sms/outgoing\n - Be a member of group smsd\n - To ensure that the steps above are successful, just su netdata and execute sendsms phone message.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sendsms | Set the path for `sendsms`, otherwise Netdata will search for it in your system `$PATH:` | YES | yes |\n| SEND_SMS | Set `SEND_SMS` to `YES`. | | yes |\n| DEFAULT_RECIPIENT_SMS | Set DEFAULT_RECIPIENT_SMS to the phone number you want the alert notifications to be sent to. You can define multiple phone numbers like this: PHONE1 PHONE2. | | yes |\n\n##### sendsms\n\n# The full path of the sendsms command (smstools3).\n# If empty, the system $PATH will be searched for it.\n# If not found, SMS notifications will be silently disabled.\nsendsms=\"/usr/bin/sendsms\"\n\n\n##### DEFAULT_RECIPIENT_SMS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_sms[sysadmin]=\"PHONE1\"\nrole_recipients_sms[domainadmin]=\"PHONE2\"\nrole_recipients_sms[dba]=\"PHONE3\"\nrole_recipients_sms[webmaster]=\"PHONE4\"\nrole_recipients_sms[proxyadmin]=\"PHONE5\"\nrole_recipients_sms[sitemgr]=\"PHONE6\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMS Server Tools 3 (smstools3) global notification options\nSEND_SMS=\"YES\"\nDEFAULT_RECIPIENT_SMS=\"1234567890\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/smstools3/metadata.yaml" }, { @@ -22105,9 +22009,9 @@ export const integrations = [ "syslog" ], "overview": "# syslog\n\nSend notifications to Syslog using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SYSLOG_FACILITY | Set `SYSLOG_FACILITY` to the facility used for logging, by default this value is set to `local6`. | | yes |\n| DEFAULT_RECIPIENT_SYSLOG | Set `DEFAULT_RECIPIENT_SYSLOG` to the recipient you want the alert notifications to be sent to. | | yes |\n| SEND_SYSLOG | Set SEND_SYSLOG to YES, make sure you have everything else configured before turning this on. | | yes |\n\n##### DEFAULT_RECIPIENT_SYSLOG\n\nTargets are defined as follows:\n\n```\n[[facility.level][@host[:port]]/]prefix\n```\n\nprefix defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'.\n\nThe facility and level are the standard syslog facility and level options, for more info on them see your local logger and syslog documentation. By default, Netdata will log to the local6 facility, with a log level dependent on the type of message (crit for CRITICAL, warning for WARNING, and info for everything else).\n\nYou can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).\n\nYou can define multiple recipients like this: daemon.notice@loghost:514/netdata daemon.notice@loghost2:514/netdata.\nAll roles will default to this variable if left unconfigured.\n\n\n##### SEND_SYSLOG \n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_syslog[sysadmin]=\"daemon.notice@loghost1:514/netdata\"\nrole_recipients_syslog[domainadmin]=\"daemon.notice@loghost2:514/netdata\"\nrole_recipients_syslog[dba]=\"daemon.notice@loghost3:514/netdata\"\nrole_recipients_syslog[webmaster]=\"daemon.notice@loghost4:514/netdata\"\nrole_recipients_syslog[proxyadmin]=\"daemon.notice@loghost5:514/netdata\"\nrole_recipients_syslog[sitemgr]=\"daemon.notice@loghost6:514/netdata\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# syslog notifications\n\nSEND_SYSLOG=\"YES\"\nSYSLOG_FACILITY='local6'\nDEFAULT_RECIPIENT_SYSLOG=\"daemon.notice@loghost6:514/netdata\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SYSLOG_FACILITY | Set `SYSLOG_FACILITY` to the facility used for logging, by default this value is set to `local6`. | | yes |\n| DEFAULT_RECIPIENT_SYSLOG | Set `DEFAULT_RECIPIENT_SYSLOG` to the recipient you want the alert notifications to be sent to. | | yes |\n| SEND_SYSLOG | Set SEND_SYSLOG to YES, make sure you have everything else configured before turning this on. | | yes |\n\n##### DEFAULT_RECIPIENT_SYSLOG\n\nTargets are defined as follows:\n\n```\n[[facility.level][@host[:port]]/]prefix\n```\n\nprefix defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'.\n\nThe facility and level are the standard syslog facility and level options, for more info on them see your local logger and syslog documentation. By default, Netdata will log to the local6 facility, with a log level dependent on the type of message (crit for CRITICAL, warning for WARNING, and info for everything else).\n\nYou can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).\n\nYou can define multiple recipients like this: daemon.notice@loghost:514/netdata daemon.notice@loghost2:514/netdata.\nAll roles will default to this variable if left unconfigured.\n\n\n##### SEND_SYSLOG \n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_syslog[sysadmin]=\"daemon.notice@loghost1:514/netdata\"\nrole_recipients_syslog[domainadmin]=\"daemon.notice@loghost2:514/netdata\"\nrole_recipients_syslog[dba]=\"daemon.notice@loghost3:514/netdata\"\nrole_recipients_syslog[webmaster]=\"daemon.notice@loghost4:514/netdata\"\nrole_recipients_syslog[proxyadmin]=\"daemon.notice@loghost5:514/netdata\"\nrole_recipients_syslog[sitemgr]=\"daemon.notice@loghost6:514/netdata\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# syslog notifications\n\nSEND_SYSLOG=\"YES\"\nSYSLOG_FACILITY='local6'\nDEFAULT_RECIPIENT_SYSLOG=\"daemon.notice@loghost6:514/netdata\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/syslog/metadata.yaml" }, { @@ -22126,9 +22030,9 @@ export const integrations = [ "MS teams" ], "overview": "# Microsoft Teams\n\nYou can send Netdata alerts to Microsoft Teams using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Microsoft Teams. You can use the same on all your Netdata servers (or you can have multiple if you like).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MSTEAMS | Set `SEND_MSTEAMS` to YES | YES | yes |\n| MSTEAMS_WEBHOOK_URL | set `MSTEAMS_WEBHOOK_URL` to the incoming webhook URL as given by Microsoft Teams. | | yes |\n| DEFAULT_RECIPIENT_MSTEAMS | Set `DEFAULT_RECIPIENT_MSTEAMS` to the encoded Microsoft Teams channel name you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_MSTEAMS\n\nIn Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhook/`. You can define multiple channels like this: `CHANNEL1` `CHANNEL2`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_msteams[sysadmin]=\"CHANNEL1\"\nrole_recipients_msteams[domainadmin]=\"CHANNEL2\"\nrole_recipients_msteams[dba]=\"databases CHANNEL3\"\nrole_recipients_msteams[webmaster]=\"CHANNEL4\"\nrole_recipients_msteams[proxyadmin]=\"CHANNEL5\"\nrole_recipients_msteams[sitemgr]=\"CHANNEL6\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Microsoft Teams (office.com) global notification options\n\nSEND_MSTEAMS=\"YES\"\nMSTEAMS_WEBHOOK_URL=\"https://outlook.office.com/webhook/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/IncomingWebhook/CHANNEL/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MSTEAMS=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Microsoft Teams. You can use the same on all your Netdata servers (or you can have multiple if you like).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MSTEAMS | Set `SEND_MSTEAMS` to YES | YES | yes |\n| MSTEAMS_WEBHOOK_URL | set `MSTEAMS_WEBHOOK_URL` to the incoming webhook URL as given by Microsoft Teams. | | yes |\n| DEFAULT_RECIPIENT_MSTEAMS | Set `DEFAULT_RECIPIENT_MSTEAMS` to the encoded Microsoft Teams channel name you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_MSTEAMS\n\nIn Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhook/`. You can define multiple channels like this: `CHANNEL1` `CHANNEL2`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_msteams[sysadmin]=\"CHANNEL1\"\nrole_recipients_msteams[domainadmin]=\"CHANNEL2\"\nrole_recipients_msteams[dba]=\"databases CHANNEL3\"\nrole_recipients_msteams[webmaster]=\"CHANNEL4\"\nrole_recipients_msteams[proxyadmin]=\"CHANNEL5\"\nrole_recipients_msteams[sitemgr]=\"CHANNEL6\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Microsoft Teams (office.com) global notification options\n\nSEND_MSTEAMS=\"YES\"\nMSTEAMS_WEBHOOK_URL=\"https://outlook.office.com/webhook/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/IncomingWebhook/CHANNEL/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MSTEAMS=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/msteams/metadata.yaml" }, { @@ -22145,9 +22049,9 @@ export const integrations = [ "Telegram" ], "overview": "# Telegram\n\nSend notifications to Telegram using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. Invite your bot to a group where you want it to send messages.\n- The chat ID for every chat you want to send messages to. Invite [@myidbot](https://t.me/myidbot) bot to the group that will receive notifications, and write the command `/getgroupid@myidbot` to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TELEGRAM | Set `SEND_TELEGRAM` to YES | YES | yes |\n| TELEGRAM_BOT_TOKEN | set `TELEGRAM_BOT_TOKEN` to your bot token. | | yes |\n| DEFAULT_RECIPIENT_TELEGRAM | Set the `DEFAULT_RECIPIENT_TELEGRAM` variable in your config file to your Telegram chat ID (find it with @myidbot). Separate multiple chat IDs with spaces. To send alerts to a specific topic within a chat, use `chatID:topicID`. | | yes |\n\n##### DEFAULT_RECIPIENT_TELEGRAM\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_telegram[sysadmin]=\"-49999333324\"\nrole_recipients_telegram[domainadmin]=\"-49999333389\"\nrole_recipients_telegram[dba]=\"-10099992222\"\nrole_recipients_telegram[webmaster]=\"-10099992222 -49999333389\"\nrole_recipients_telegram[proxyadmin]=\"-49999333344\"\nrole_recipients_telegram[sitemgr]=\"-49999333876\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# telegram (telegram.org) global notification options\n\nSEND_TELEGRAM=\"YES\"\nTELEGRAM_BOT_TOKEN=\"111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5\"\nDEFAULT_RECIPIENT_TELEGRAM=\"-49999333876\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. Invite your bot to a group where you want it to send messages.\n- The chat ID for every chat you want to send messages to. Invite [@myidbot](https://t.me/myidbot) bot to the group that will receive notifications, and write the command `/getgroupid@myidbot` to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TELEGRAM | Set `SEND_TELEGRAM` to YES | YES | yes |\n| TELEGRAM_BOT_TOKEN | set `TELEGRAM_BOT_TOKEN` to your bot token. | | yes |\n| DEFAULT_RECIPIENT_TELEGRAM | Set the `DEFAULT_RECIPIENT_TELEGRAM` variable in your config file to your Telegram chat ID (find it with @myidbot). Separate multiple chat IDs with spaces. To send alerts to a specific topic within a chat, use `chatID:topicID`. | | yes |\n\n##### DEFAULT_RECIPIENT_TELEGRAM\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_telegram[sysadmin]=\"-49999333324\"\nrole_recipients_telegram[domainadmin]=\"-49999333389\"\nrole_recipients_telegram[dba]=\"-10099992222\"\nrole_recipients_telegram[webmaster]=\"-10099992222 -49999333389\"\nrole_recipients_telegram[proxyadmin]=\"-49999333344\"\nrole_recipients_telegram[sitemgr]=\"-49999333876\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# telegram (telegram.org) global notification options\n\nSEND_TELEGRAM=\"YES\"\nTELEGRAM_BOT_TOKEN=\"111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5\"\nDEFAULT_RECIPIENT_TELEGRAM=\"-49999333876\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/telegram/metadata.yaml" }, { @@ -22164,11 +22068,312 @@ export const integrations = [ "Twilio" ], "overview": "# Twilio\n\nSend notifications to Twilio using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Get your SID, and Token from https://www.twilio.com/console\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TWILIO | Set `SEND_TWILIO` to YES | YES | yes |\n| TWILIO_ACCOUNT_SID | set `TWILIO_ACCOUNT_SID` to your account SID. | | yes |\n| TWILIO_ACCOUNT_TOKEN | Set `TWILIO_ACCOUNT_TOKEN` to your account token. | | yes |\n| TWILIO_NUMBER | Set `TWILIO_NUMBER` to your account's number. | | yes |\n| DEFAULT_RECIPIENT_TWILIO | Set DEFAULT_RECIPIENT_TWILIO to the number you want the alert notifications to be sent to. You can define multiple numbers like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_TWILIO\n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient's number you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_twilio[sysadmin]=\"+15555555555\"\nrole_recipients_twilio[domainadmin]=\"+15555555556\"\nrole_recipients_twilio[dba]=\"+15555555557\"\nrole_recipients_twilio[webmaster]=\"+15555555558\"\nrole_recipients_twilio[proxyadmin]=\"+15555555559\"\nrole_recipients_twilio[sitemgr]=\"+15555555550\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Twilio (twilio.com) SMS options\n\nSEND_TWILIO=\"YES\"\nTWILIO_ACCOUNT_SID=\"xxxxxxxxx\"\nTWILIO_ACCOUNT_TOKEN=\"xxxxxxxxxx\"\nTWILIO_NUMBER=\"xxxxxxxxxxx\"\nDEFAULT_RECIPIENT_TWILIO=\"+15555555555\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Get your SID, and Token from https://www.twilio.com/console\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TWILIO | Set `SEND_TWILIO` to YES | YES | yes |\n| TWILIO_ACCOUNT_SID | set `TWILIO_ACCOUNT_SID` to your account SID. | | yes |\n| TWILIO_ACCOUNT_TOKEN | Set `TWILIO_ACCOUNT_TOKEN` to your account token. | | yes |\n| TWILIO_NUMBER | Set `TWILIO_NUMBER` to your account's number. | | yes |\n| DEFAULT_RECIPIENT_TWILIO | Set DEFAULT_RECIPIENT_TWILIO to the number you want the alert notifications to be sent to. You can define multiple numbers like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_TWILIO\n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient's number you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_twilio[sysadmin]=\"+15555555555\"\nrole_recipients_twilio[domainadmin]=\"+15555555556\"\nrole_recipients_twilio[dba]=\"+15555555557\"\nrole_recipients_twilio[webmaster]=\"+15555555558\"\nrole_recipients_twilio[proxyadmin]=\"+15555555559\"\nrole_recipients_twilio[sitemgr]=\"+15555555550\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Twilio (twilio.com) SMS options\n\nSEND_TWILIO=\"YES\"\nTWILIO_ACCOUNT_SID=\"xxxxxxxxx\"\nTWILIO_ACCOUNT_TOKEN=\"xxxxxxxxxx\"\nTWILIO_NUMBER=\"xxxxxxxxxxx\"\nDEFAULT_RECIPIENT_TWILIO=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/twilio/metadata.yaml" }, + { + "id": "notify-cloud-awssns", + "meta": { + "name": "Amazon SNS", + "link": "https://aws.amazon.com/sns/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "awssns.png" + }, + "keywords": [ + "awssns" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- An AWS account with AWS SNS access\n\n### AWS SNS Configuration\n\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, select the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n3. Copy the **Topic ARN** in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the AWS SNS Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Topic ARN: The topic provided on AWS SNS (with region) for where to publish your notifications.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-discord", + "meta": { + "name": "Discord", + "link": "https://discord.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "discord.png" + }, + "keywords": [ + "discord", + "community" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n\n### Discord Server Configuration\n\n1. Go to **Server Settings** --> **Integrations**\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Keep note of the **Webhook URL** as you will need it for the configuration of the integration on the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Discord Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: The URL you copied from the previous section\n - Channel Parameters: Select the channel type which the notifications will be sent to, if it is a Forum channel, you need to specify a thread name\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-ilert", + "meta": { + "name": "ilert", + "link": "https://www.ilert.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "ilert.svg" + }, + "keywords": [ + "ilert" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on ilert to add new Alert sources.\n\n### ilert Configuration\n\n1. From the navigation bar, open the Alert sources drop down and click \"Alert sources\"\n2. Click on the \"+ Create a new alert source\" button\n3. Configure an Alert source:\n - Select \"API integration\" and click Next\n - Provide a name that suits the source's purpose, for example \"Netdata\"\n - Select Escalation policy\n - Select Alert grouping (optional)\n4. Obtain the API Key:\n - Once the Alert source is created, you will be provided with an API key. Copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the ilert Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Alert Source API key: The key you copied in the ilert configuration step.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-mattermost", + "meta": { + "name": "Mattermost", + "link": "https://mattermost.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "mattermost.png" + }, + "keywords": [ + "mattermost" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n\n### Mattermost Server Configuration\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don't have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook.\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook URL that looks like `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Mattermost Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on Mattermost for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-microsoftteams", + "meta": { + "name": "Microsoft Teams", + "link": "https://www.microsoft.com/en-us/microsoft-teams", + "categories": [ + "notify.cloud" + ], + "icon_filename": "teams.svg" + }, + "keywords": [ + "microsoft", + "teams" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature\n\n### Microsoft Teams Configuration\n\n1. Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots icon that appears\n2. Select \"Workflows\" from the options, then choose \"Post to a channel when a webhook request is received\"\n3. **Configure Workflow Details**\n - Give your workflow a name, such as \"Netdata Alerts\"\n - Select the target team and channel where you will receive notifications\n - Click \"Add workflow\"\n4. Once the workflow is created, you will receive a unique Workflow Webhook URL, copy it, in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Microsoft Teams Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Microsoft Teams Incoming Webhook URL: The Incoming Webhook URL that you copied earlier.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-mobile-app", + "meta": { + "name": "Netdata Mobile App", + "link": "https://netdata.cloud", + "categories": [ + "notify.cloud" + ], + "icon_filename": "netdata.png" + }, + "keywords": [ + "mobile-app", + "phone", + "personal-notifications" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- You need to have the Netdata Mobile App installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration and device linking\n\nIn order to login to the Netdata Mobile App\n\n1. Download the Netdata Mobile App from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose your Sign-in option\n - Email Address: Enter the email address of your registered Netdata Cloud account and click on the verification link received by email on your mobile device.\n - Sign-in with QR Code: Scan the QR code from the Netdata Cloud UI under **Profile Picture** --> **Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code**\n\n### Netdata Configuration\n\nAfter linking your device, enable the toggle for **Mobile App Notifications** under the same settings panel.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-opsgenie", + "meta": { + "name": "Opsgenie", + "link": "https://www.atlassian.com/software/opsgenie", + "categories": [ + "notify.cloud" + ], + "icon_filename": "opsgenie.png" + }, + "keywords": [ + "opsgenie", + "atlassian" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\n1. Go to the integrations tab of your team, click **Add integration**\n2. Pick **API** from the available integrations and copy the API Key in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Opsgenie Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - API Key: The key provided on Opsgenie for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-pagerduty", + "meta": { + "name": "PagerDuty", + "link": "https://www.pagerduty.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "pagerduty.png" + }, + "keywords": [ + "pagerduty" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a PagerDuty service to receive events using webhooks.\n\n### PagerDuty Server Configuration\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. On the third step of the service creation, select `Events API V2` Integration\n3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** in order to add them to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Integration Key: A 32 character key provided by PagerDuty to receive events on your service.\n - Integration URL (Alert Events): The URL provided by PagerDuty where Netdata Cloud will send notifications.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-rocketchat", + "meta": { + "name": "RocketChat", + "link": "https://www.rocket.chat/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "rocketchat.png" + }, + "keywords": [ + "rocketchat" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on RocketChat to add new integrations.\n\n### RocketChat Server Configuration\n\nSteps to configure your RocketChat server to receive notifications from Netdata Cloud:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations\n2. Click **+New** at the top right corner\n3. For more details about each parameter, check [Create a new incoming webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook)\n4. You will end up with a webhook endpoint that looks like `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on RocketChat for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-slack", + "meta": { + "name": "Slack", + "link": "https://slack.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "slack.png" + }, + "keywords": [ + "slack" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\n1. Create an app to receive webhook integrations. Check the [Slack documentation](https://api.slack.com/apps?new_app=1) for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - Specify the channel where you want your notifications to be delivered\n - Once completed, copy the Webhook URL in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Slack Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on Slack for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-splunk", + "meta": { + "name": "Splunk", + "link": "https://splunk.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "splunk-black.svg" + }, + "keywords": [ + "Splunk" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions on how to set it up.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - HTTP Event Collector URI: The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token: The token that Splunk provided to you when you created the HTTP Event Collector\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-telegram", + "meta": { + "name": "Telegram", + "link": "https://telegram.org/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "telegram.svg" + }, + "keywords": [ + "Telegram" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Telegram bot token, chat ID and optionally the topic ID\n\n### Telegram Configuration\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n- To get the topic ID, the easiest way is this: Post a message to that topic, then right-click on it and select `Copy Message Link`. Paste it on a scratchpad and notice that it has the following structure `https://t.me/c/XXXXXXXXXX/YY/ZZ`. The topic ID is `YY` (integer).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Telegram Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Bot Token: The token of your bot\n - Chat ID: The chat id where your bot will deliver messages to\n - Topic ID: The identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-victorops", + "meta": { + "name": "Splunk VictorOps", + "link": "https://www.splunk.com/en_us/about-splunk/acquisitions/splunk-on-call.html", + "categories": [ + "notify.cloud" + ], + "icon_filename": "victorops.svg" + }, + "keywords": [ + "VictorOps", + "Splunk", + "On-Call" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk VictorOps Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Destination URL - The URL provided by VictorOps of your REST endpoint.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-webhook", + "meta": { + "name": "Webhook", + "link": "https://en.wikipedia.org/wiki/Webhook", + "categories": [ + "notify.cloud" + ], + "icon_filename": "webhook.svg" + }, + "keywords": [ + "generic webhooks", + "webhooks" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Webhook integration\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: The url of the service that Netdata will send notifications to. In order to keep the communication secured, Netdata only accepts HTTPS urls.\n - Extra headers: Optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism, Netdata webhook integration supports 3 different authentication mechanisms.\n - Mutual TLS (recommended): Default authentication mechanism used if no other method is selected\n - Basic: The client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**.\n - Bearer: The client sends a request with an Authorization header that includes a **bearer token**.\n\n### Webhook service\n\nA webhook service allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL.\n\nIn this section, we'll go over the steps to set up a generic webhook service, including adding headers, and implementing different types of authorization mechanisms.\n\n#### Netdata webhook integration\n\nNetdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected.\n\nFor alert notifications, the content sent to the destination service contains a JSON object with the following properties:\n\n| field | type | description |\n|:----------------------------------|:--------------------------------|:--------------------------------------------------------------------------|\n| message | string | A summary message of the alert. |\n| alert | string | The alert the notification is related to. |\n| info | string | Additional info related with the alert. |\n| chart | string | The chart associated with the alert. |\n| context | string | The chart context. |\n| space | string | The space where the node that raised the alert is assigned. |\n| Rooms | object\\[object(string,string)\\] | Object with list of Rooms names and urls where the node belongs to. |\n| family | string | Context family. |\n| class | string | Classification of the alert, e.g. `Error`. |\n| severity | string | Alert severity, can be one of `warning`, `critical` or `clear`. |\n| date | string | Date of the alert in ISO8601 format. |\n| duration | string | Duration the alert has been raised. |\n| additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n| additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n| alert_url | string | Netdata Cloud URL for this alert. |\n\nFor reachability notifications, the JSON object will contain the following properties:\n\n| field | type | description |\n|:-----------------|:--------|:------------------------------------------------------------------------------------------------------------------------------|\n| message | string | A summary message of the reachability alert. |\n| url | string | Netdata Cloud URL for the host experiencing the reachability alert. |\n| host | string | The hostname experiencing the reachability alert. |\n| severity | string | Severity for this notification. If host is reachable, severity will be `info`, if host is unreachable, it will be `critical`. |\n| status | object | An object with the status information. |\n| status.reachable | boolean | `true` if host is reachable, `false` otherwise |\n| status.text | string | Can be `reachable` or `unreachable` |\n\n#### Extra headers\n\nWhen setting up a webhook service, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\nBy default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:------------:|------------------|\n | Content-Type | application/json |\n\n#### Authentication mechanisms\n\nNetdata webhook integration supports 3 different authentication mechanisms:\n\n##### Mutual TLS authentication (recommended)\n\nIn mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\nThis is the default authentication mechanism used if no other method is selected.\n\nTo take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\nThe steps to perform this validation are as follows:\n\n- Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
\n Netdata CA certificate\n\n ```text\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n\n
\n\n- Enable client certificate validation on the web server that is doing the TLS termination. Below there are examples on how to perform this configuration in `NGINX` and `Apache`.\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n##### Basic authentication\n\nIn basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n##### Bearer token authentication\n\nIn bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n##### Challenge secret\n\nTo validate that you have ownership of the web application that will receive the webhook events, Netdata is using a challenge response check mechanism.\n\nThis mechanism works as follows:\n\n- The challenge secret parameter that you provide is a shared secret between only you and Netdata.\n- On your request for creating a new Webhook integration, Netdata will make a GET request to the URL of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n- You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n- Netdata will compare your application's response with the hash that it will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\nNetdata does this validation every time you update your integration configuration.\n\n- Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n**Example response token generation in Python:**\n\nHere you can see how to define a handler for a Flask application in python 3:\n\n```python\nimport base64\nimport hashlib\nimport hmac\nimport json\n\nkey ='YOUR_CHALLENGE_SECRET'\n\n@app.route('/webhooks/netdata')\ndef webhook_challenge():\ntoken = request.args.get('crc_token').encode('ascii')\n\n# creates HMAC SHA-256 hash from incomming token and your consumer secret\nsha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n# construct response data with base64 encoded hash\nresponse = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n}\n\n# returns properly formatted json response\nreturn json.dumps(response)\n```\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "logs-systemd-journal", + "meta": { + "name": "Systemd Journal Logs", + "link": "https://github.com/netdata/netdata/blob/master/src/collectors/systemd-journal.plugin/README.md", + "categories": [ + "logs" + ], + "icon_filename": "netdata.png" + }, + "keywords": [ + "systemd", + "journal", + "logs" + ], + "overview": "# Systemd Journal Logs\n\nThe `systemd` journal plugin by Netdata makes viewing, exploring and analyzing `systemd` journal logs simple and efficient.\n\nIt automatically discovers available journal sources, allows advanced filtering, offers interactive visual representations and supports exploring the logs of both individual servers and the logs on infrastructure wide journal centralization servers.\n\nThe plugin automatically detects the available journal sources, based on the journal files available in `/var/log/journal` (persistent logs) and `/run/log/journal` (volatile logs).\n\n\n## Visualization\n\nYou can start exploring `systemd` journal logs on the \"Logs\" tab of the Netdata UI.\n\n\n## Key features\n\n- Works on both **individual servers** and **journal centralization servers**.\n- Supports `persistent` and `volatile` journals.\n- Supports `system`, `user`, `namespaces` and `remote` journals.\n- Allows filtering on **any journal field** or **field value**, for any time-frame.\n- Allows **full text search** (`grep`) on all journal fields, for any time-frame.\n- Provides a **histogram** for log entries over time, with a break down per field-value, for any field and any time-frame.\n- Works directly on journal files, without any other third-party components.\n- Supports coloring log entries, the same way `journalctl` does.\n- In PLAY mode provides the same experience as `journalctl -f`, showing new log entries immediately after they are received.\n", + "setup": "## Setup\n\n## Prerequisites\n\n- A Netdata Cloud account\n\n\n## Configuration\n\nThere is no configuration needed for this integration.\n", + "integration_type": "logs", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml" + }, + { + "id": "windows-event-logs", + "meta": { + "name": "Windows Event Logs", + "link": "https://github.com/netdata/netdata/blob/master/src/collectors/windows-events.plugin/README.md", + "categories": [ + "logs", + "data-collection.windows-systems" + ], + "icon_filename": "windows.svg" + }, + "keywords": [ + "windows", + "windows events", + "logs" + ], + "overview": "# Windows Event Logs\n\nThe Windows Events plugin by Netdata makes viewing, exploring and analyzing Windows Events simple and\nefficient.\n\nThe plugin automatically detects all the available channels and offers a list of \"Event Channels\".\n\nBy default, it aggregates events from all event channels, providing a unified view of all events.\n\n\n## Visualization\n\nYou can start exploring Windows event logs on the \"Logs\" tab of the Netdata UI.\n\n\n## Key features\n\n- Supports **Windows Event Logs (WEL)**.\n- Supports **Event Tracing for Windows (ETW)** and **TraceLogging (TL)**, when events are routed to Event Log.\n- Allows filtering on all System Events fields.\n- Allows **full text search** (`grep`) on all System and User fields.\n- Provides a **histogram** for log entries over time, with a break down per field-value, for any System Event field and any\n time-frame.\n- Supports coloring log entries based on severity.\n- In PLAY mode it \"tails\" all the Events, showing new log entries immediately after they are received.\n", + "setup": "## Setup\n\n## Prerequisites\n\n- Netdata Cloud paid subscription\n\n\n## Configuration\n\nThere is no configuration needed for this integration.\n", + "integration_type": "logs", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml" + }, { "id": "oidc-authentication", "meta": { @@ -22184,7 +22389,7 @@ export const integrations = [ "oidc" ], "overview": "# OIDC\n\nIntegrate your organization's Authorization Servers with Netdata to better manage your team's access controls to Netdata Cloud.\n", - "setup": "## Setup\n\n### Prerequisites\n- Authorization Server with OIDC protocol supported\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- Space needs to be on a paid plan\n\n### Setting up Authorization Server\nYour server should follow the [full specification for OIDC](https://openid.net/specs/openid-connect-core-1_0.html).\nIn order to integrate your Authorization Server with Netdata the creation of a client is required. Clients are applications and services that can request authentication of a user.\nThe access settings for your client are the following:\n\n| field | value |\n| :-- | :-- |\n| Root URL | `https://app.netdata.cloud/`` |\n| Home/Initiate login URL | `https://app.netdata.cloud/api/v2/auth/account/auth-server?iss={your-server-issuer-url}&redirect_uri=https://app.netdata.cloud/sign-in®ister_uri=https://app.netdata.cloud/sign-up/verify` |\n| Redirect URL | `https://app.netdata.cloud/api/v2/auth/account/auth-server/callback` |\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon)\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. On the OIDC card, click on **Configure**\n4. Fill in the required credentials:\n - **Issuer URL** the Authorization Server Issuer URL, e.g. `https://my-auth-server.com/`\n - **Client ID** the Client ID from the created client\n - **Client Secret** the Client Secret from the created client\n - **Authorization URL** the Authorization Server authorization URL, e.g. `https://my-auth-server.com/openid-connect/auth`\n - **Token URL** the Authorization Server token URL, e.g. `https://my-auth-server.com/openid-connect/token`\n - **User URL** the Authorization Server user info URL, e.g. `https://my-auth-server.com/openid-connect/userinfo`\n\n### Supported features\n* SP-initiated SSO (Single Sign-On)\n* IdP-initiated SSO\n\n### SP-initiated SSO\n\nIf you start your authentication flow from Netdata sign-in page please check [these steps](/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md#from-netdata-sign-up-page).\n\n\n### Reference\nhttps://openid.net/developers/how-connect-works/\n\n", + "setup": "## Setup\n\n### Prerequisites\n- Authorization Server with OIDC protocol supported\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- Space needs to be on a paid plan\n\n### Setting up Authorization Server\nYour server should follow the [full specification for OIDC](https://openid.net/specs/openid-connect-core-1_0.html).\nIn order to integrate your Authorization Server with Netdata the creation of a client is required. Clients are applications and services that can request authentication of a user.\nThe access settings for your client are the following:\n\n| field | value |\n| :-- | :-- |\n| Root URL | `https://app.netdata.cloud/`` |\n| Home/Initiate login URL | `https://app.netdata.cloud/api/v2/auth/account/auth-server?iss={your-server-issuer-url}&redirect_uri=https://app.netdata.cloud/sign-in®ister_uri=https://app.netdata.cloud/sign-up/verify` |\n| Redirect URL | `https://app.netdata.cloud/api/v2/auth/account/auth-server/callback` |\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon)\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. On the OIDC card, click on **Configure**\n4. Fill in the required credentials:\n - **Issuer URL** the Authorization Server Issuer URL, e.g. `https://my-auth-server.com/`\n - **Client ID** the Client ID from the created client\n - **Client Secret** the Client Secret from the created client\n - **Authorization URL** the Authorization Server authorization URL, e.g. `https://my-auth-server.com/openid-connect/auth`\n - **Token URL** the Authorization Server token URL, e.g. `https://my-auth-server.com/openid-connect/token`\n - **User URL** the Authorization Server user info URL, e.g. `https://my-auth-server.com/openid-connect/userinfo`\n\n### Supported features\n* SP-initiated SSO (Single Sign-On)\n* IdP-initiated SSO\n\n### SP-initiated SSO\n\nIf you start your authentication flow from Netdata sign-in page please check [these steps](https://github.com/netdata/netdata/blob/master/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md#from-netdata-sign-up-page).\n\n\n### Reference\nhttps://openid.net/developers/how-connect-works/\n\n", "integration_type": "authentication", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-authentication/metadata.yaml", "troubleshooting": "" @@ -22205,7 +22410,7 @@ export const integrations = [ "okta-sso" ], "overview": "# Okta SSO\n\nIntegrate your organization's Okta account with Netdata to better manage your team's access controls to Netdata Cloud.\n", - "setup": "## Setup\n\n### Prerequisites\n- An Okta account\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- Space needs to be on a paid plan\n\n### Setting up Okta\nSteps needed to be done on Okta Admin Portal:\n1. Click on **Applications** tab and choose to **Browse App Catalogue**\n2. Find Netdata's preconfigured app for easy setup and click **Add Integration**\n3. Give the app, that will be in your apps dashboard, the preferred **Application label** and click **Next** to move to the Sign-On options tab\n4. In the **Sign-On Options** all the values we expect are already filled and no additional data is required\n5. Click **Done**. You are able to go back and edit any fields later if need be\n6. Go to the **Assignments** tab and enter the People or Group assignments as per your organization\u2019s policies\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon)\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. On the Okta SSO card, click on **Configure**\n4. Fill in the [required credentials](https://developer.okta.com/docs/guides/find-your-app-credentials/main/), you get them from **Okta Admin Portal**:\n - **Issuer URL** you can get it from your profile icon on top, e.g. `https://company-name.okta.com`\n - **Client ID** you can get it from **General** tab on application you configured on Okta\n - **Client Secret** you can get it from **General** tab on application you configured on Okta\n\n### Supported features\n* SP-initiated SSO (Single Sign-On)\n* IdP-initiated SSO\n\n### SP-initiated SSO\n\nIf you start your authentication flow from Netdata sign-in page please check [these steps](/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md#from-netdata-sign-up-page).\n\n", + "setup": "## Setup\n\n### Prerequisites\n- An Okta account\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- Space needs to be on a paid plan\n\n### Setting up Okta\nSteps needed to be done on Okta Admin Portal:\n1. Click on **Applications** tab and choose to **Browse App Catalogue**\n2. Find Netdata's preconfigured app for easy setup and click **Add Integration**\n3. Give the app, that will be in your apps dashboard, the preferred **Application label** and click **Next** to move to the Sign-On options tab\n4. In the **Sign-On Options** all the values we expect are already filled and no additional data is required\n5. Click **Done**. You are able to go back and edit any fields later if need be\n6. Go to the **Assignments** tab and enter the People or Group assignments as per your organization\u2019s policies\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon)\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. On the Okta SSO card, click on **Configure**\n4. Fill in the [required credentials](https://developer.okta.com/docs/guides/find-your-app-credentials/main/), you get them from **Okta Admin Portal**:\n - **Issuer URL** you can get it from your profile icon on top, e.g. `https://company-name.okta.com`\n - **Client ID** you can get it from **General** tab on application you configured on Okta\n - **Client Secret** you can get it from **General** tab on application you configured on Okta\n\n### Supported features\n* SP-initiated SSO (Single Sign-On)\n* IdP-initiated SSO\n\n### SP-initiated SSO\n\nIf you start your authentication flow from Netdata sign-in page please check [these steps](https://github.com/netdata/netdata/blob/master/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md#from-netdata-sign-up-page).\n\n", "integration_type": "authentication", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-authentication/metadata.yaml", "troubleshooting": "" @@ -22225,7 +22430,7 @@ export const integrations = [ "identity-management" ], "overview": "# SCIM\n\nThe System for Cross-domain Identity Management (SCIM) specification is designed to simplify the management of user identities in cloud-based applications and services.\n", - "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Admin access to the Space\n- The Space must be on a paid plan\n- OIDC/SSO integration must already be enabled in one of your Spaces\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon).\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. In the SCIM card, click on **Activate**.\n4. Depending on your situation:\n - If OIDC/SSO integration is already enabled in your Space, click **Activate**.\n - If you already have a SCIM integration in another Space and want to create a linked integration here, enter the SCIM token from the original integration and click **Activate**.\n5. If the setup is successful, you will receive two parameters:\n - **Base URL**: Use this URL as the base URL for your SCIM client.\n - **Token**: Use this token for Bearer Authentication with your SCIM client.\n\n### Rotating the SCIM Token\nYou can rotate the token provided during SCIM integration setup if needed.\n\nSteps to rotate the token:\n1. Click on the Space settings cog (located above your profile icon).\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. In the already configured SCIM card, click **Configure**.\n4. Click **Regenerate Token**.\n5. If successful, you will receive a new token for Bearer Authentication with your SCIM client.\n\n### Supported Features\nThis integration adheres to SCIM v2 specifications. Supported features include:\n\n- User Resource Management (urn:ietf:params:scim:schemas:core:2.0:User)\n- Patch operations: Supported\n- Bulk operations: Not supported\n- Filtering: Supported (max results: 200)\n- Password synchronization: Not supported, as we rely on SSO/OIDC authentication\n- eTag: Not supported\n- Authentication schemes: OAuth Bearer Token\n\n### User Keying Between SCIM and OIDC\nOur SCIM (System for Cross-domain Identity Management) integration utilizes OIDC (OpenID Connect) to authenticate users.\nTo ensure users are correctly identified and authenticated between SCIM and OIDC, we use the following mapping:\n\n- SCIM externalID \u2194 OIDC sub\n\nThis mapping ensures that the identity of users remains consistent and secure across both systems.\n\n**Important**: Ensure that your OIDC and SCIM systems follow this mapping strictly.\nThe externalID in SCIM must correspond to the subfield in OIDC. Any deviation from this mapping may result\nin incorrect user identification and authentication failures.\n\n### Reference\n[SCIM Specification](https://scim.org)\n\n", + "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Admin access to the Space\n- The Space must be on a paid plan\n- OIDC/SSO integration must already be enabled in one of your Spaces\n\n### Supported Features\nThis integration adheres to SCIM v2 specifications. Supported features include:\n\n- User Resource Management (urn:ietf:params:scim:schemas:core:2.0:User)\n- Create users\n- Update user attributes\n- Deactivate users\n- Patch operations: Supported\n- Bulk operations: Not supported\n- Filtering: Supported (max results: 200)\n- Password synchronization: Not supported, as we rely on SSO/OIDC authentication\n- eTag: Not supported\n- Authentication schemes: OAuth Bearer Token\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon).\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. In the SCIM card, click on **Activate**.\n4. Depending on your situation:\n - If OIDC/SSO integration is already enabled in your Space, click **Activate**.\n - If you already have a SCIM integration in another Space and want to create a linked integration here, enter the SCIM token from the original integration and click **Activate**.\n5. If the setup is successful, you will receive two parameters:\n - **Base URL**: Use this URL as the base URL for your SCIM client.\n - **Token**: Use this token for Bearer Authentication with your SCIM client.\n\n## Client Configuration Steps\n\n### Okta\nIf you're configuring SCIM in Okta, and you already have the Token from the previous section, follow these steps:\n\n1. Go to the **Applications** menu on the left-hand panel and select the **Netdata** application.\n2. In the **Netdata** application, navigate to the **Provisioning** tab.\n3. Click on **Configure API Integration** and check the box for **Enable API Integration**.\n4. Enter the Token (obtained in the *Netdata Configuration Steps* section) into the **API Token** field, then click **Test API Credentials** to ensure the connection is successful.\n5. If the test is successful, click **Save** to apply the configuration.\n\n## Troubleshoot\n\n### Rotating the SCIM Token\nYou can rotate the token provided during SCIM integration setup if needed.\n\nSteps to rotate the token:\n1. Click on the Space settings cog (located above your profile icon).\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. In the already configured SCIM card, click **Configure**.\n4. Click **Regenerate Token**.\n5. If successful, you will receive a new token for Bearer Authentication with your SCIM client.\n\n### User Keying Between SCIM and OIDC\nOur SCIM (System for Cross-domain Identity Management) integration utilizes OIDC (OpenID Connect) to authenticate users.\nTo ensure users are correctly identified and authenticated between SCIM and OIDC, we use the following mapping:\n\n- SCIM externalID \u2194 OIDC sub\n\nThis mapping ensures that the identity of users remains consistent and secure across both systems.\n\n**Important**: Ensure that your OIDC and SCIM systems follow this mapping strictly.\nThe externalID in SCIM must correspond to the subfield in OIDC. Any deviation from this mapping may result\nin incorrect user identification and authentication failures.\n\n## FAQ\n\n### Why aren\u2019t users automatically added to Netdata spaces when they\u2019re created through SCIM?\n\nCurrently, our SCIM server supports only the User resource. We plan to add support for the Group resource in the future.\n\nIn a Netdata space, users can belong to multiple rooms and have different roles (e.g., admin, manager). Additionally, the same organization may have multiple spaces.\n\nAs we don't yet support groups, when a user is created through SCIM, we don\u2019t have a way to determine which spaces, rooms, and roles the user should be assigned to.\n\nOnce we implement support for the Group resource, admins will be able to map SCIM groups to Netdata memberships, so this assignment will be done automatically.\n\nUntil then, SCIM can only be used to grant or block access to Netdata for users in your organization. After a user is created, it is up to the Netdata administrator to manually invite them to spaces, rooms and assign roles.\n\n### Reference\n[SCIM Specification](https://scim.org)\n\n", "integration_type": "authentication", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-authentication/metadata.yaml", "troubleshooting": "" diff --git a/integrations/integrations.json b/integrations/integrations.json index 08581d07a..06f022d76 100644 --- a/integrations/integrations.json +++ b/integrations/integrations.json @@ -1011,46 +1011,6 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": "" }, - { - "meta": { - "plugin_name": "charts.d.plugin", - "module_name": "apcupsd", - "monitored_instance": { - "name": "APC UPS", - "link": "https://www.apc.com", - "categories": [ - "data-collection.ups" - ], - "icon_filename": "apc.svg" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "ups", - "apc", - "power", - "supply", - "battery", - "apcupsd" - ], - "most_popular": false - }, - "overview": "# APC UPS\n\nPlugin: charts.d.plugin\nModule: apcupsd\n\n## Overview\n\nMonitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics.\n\nThe collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nMake sure the `apcaccess` and `apcupsd` are installed and running.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/apcupsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/apcupsd.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the apcupsd collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| apcupsd_sources | This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it. | 127.0.0.1:3551 | no |\n| apcupsd_timeout | How long to wait for apcupsd to respond. | 3 | no |\n| apcupsd_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| apcupsd_priority | The charts priority on the dashboard. | 90000 | no |\n| apcupsd_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n#### Examples\n\n##### Multiple apcupsd sources\n\nSpecify a multiple apcupsd sources along with a custom update interval\n\n```yaml\n# add all your APC UPSes in this array - uncomment it too\ndeclare -A apcupsd_sources=(\n [\"local\"]=\"127.0.0.1:3551\",\n [\"remote\"]=\"1.2.3.4:3551\"\n)\n\n# how long to wait for apcupsd to respond\n#apcupsd_timeout=3\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\napcupsd_update_every=5\n\n# the charts priority on the dashboard\n#apcupsd_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#apcupsd_retries=10\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `apcupsd` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 apcupsd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `apcupsd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep apcupsd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep apcupsd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep apcupsd\n```\n\n", - "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ apcupsd_ups_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.charge | average UPS charge over the last minute |\n| [ apcupsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | average UPS load over the last 10 minutes |\n| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | number of seconds since the last successful data collection |\n| [ apcupsd_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.selftest | self-test failed due to insufficient battery capacity or due to overload. |\n| [ apcupsd_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has switched to battery power because the input power has failed |\n| [ apcupsd_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS is overloaded and cannot supply enough power to the load |\n| [ apcupsd_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery is low and needs to be recharged |\n| [ apcupsd_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery has reached the end of its lifespan and needs to be replaced |\n| [ apcupsd_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has no battery |\n| [ apcupsd_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS communication link is lost |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nMetrics related to UPS. Each UPS provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| apcupsd.charge | charge | percentage |\n| apcupsd.battery.voltage | voltage, nominal | Volts |\n| apcupsd.input.voltage | voltage, min, max | Volts |\n| apcupsd.output.voltage | absolute, nominal | Volts |\n| apcupsd.input.frequency | frequency | Hz |\n| apcupsd.load | load | percentage |\n| apcupsd.load_usage | load | Watts |\n| apcupsd.temperature | temp | Celsius |\n| apcupsd.time | time | Minutes |\n| apcupsd.online | online | boolean |\n| apcupsd.selftest | OK, NO, BT, NG | status |\n| apcupsd.status | ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, CAL, TRIM, BOOST, SHUTTING_DOWN | status |\n\n", - "integration_type": "collector", - "id": "charts.d.plugin-apcupsd-APC_UPS", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/apcupsd/metadata.yaml", - "related_resources": "" - }, { "meta": { "plugin_name": "charts.d.plugin", @@ -1080,7 +1040,7 @@ "most_popular": false }, "overview": "# Libreswan\n\nPlugin: charts.d.plugin\nModule: libreswan\n\n## Overview\n\nMonitor Libreswan performance for optimal IPsec VPN operations. Improve your VPN operations with Netdata''s real-time metrics and built-in alerts.\n\nThe collector uses the `ipsec` command to collect the information it needs.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Permissions to execute `ipsec`\n\nThe plugin executes 2 commands to collect all the information it needs:\n\n```sh\nipsec whack --status\nipsec whack --trafficstatus\n```\n\nThe first command is used to extract the currently established tunnels, their IDs and their names.\nThe second command is used to extract the current uptime and traffic.\n\nMost probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.\nThe plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.\n\nTo allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:\n\n```\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus\n```\n\nMake sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/libreswan.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/libreswan.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the libreswan collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| libreswan_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| libreswan_priority | The charts priority on the dashboard | 90000 | no |\n| libreswan_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n| libreswan_sudo | Whether to run `ipsec` with `sudo` or not. | 1 | no |\n\n#### Examples\n\n##### Run `ipsec` without sudo\n\nRun the `ipsec` utility without sudo\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#libreswan_update_every=1\n\n# the charts priority on the dashboard\n#libreswan_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#libreswan_retries=10\n\n# set to 1, to run ipsec with sudo (the default)\n# set to 0, to run ipsec without sudo\nlibreswan_sudo=0\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Permissions to execute `ipsec`\n\nThe plugin executes 2 commands to collect all the information it needs:\n\n```sh\nipsec whack --status\nipsec whack --trafficstatus\n```\n\nThe first command is used to extract the currently established tunnels, their IDs and their names.\nThe second command is used to extract the current uptime and traffic.\n\nMost probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.\nThe plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.\n\nTo allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:\n\n```\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus\n```\n\nMake sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/libreswan.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/libreswan.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the libreswan collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| libreswan_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| libreswan_priority | The charts priority on the dashboard | 90000 | no |\n| libreswan_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n| libreswan_sudo | Whether to run `ipsec` with `sudo` or not. | 1 | no |\n\n#### Examples\n\n##### Run `ipsec` without sudo\n\nRun the `ipsec` utility without sudo\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#libreswan_update_every=1\n\n# the charts priority on the dashboard\n#libreswan_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#libreswan_retries=10\n\n# set to 1, to run ipsec with sudo (the default)\n# set to 0, to run ipsec without sudo\nlibreswan_sudo=0\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `libreswan` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 libreswan\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `libreswan` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep libreswan\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep libreswan /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep libreswan\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPSEC tunnel\n\nMetrics related to IPSEC tunnels. Each tunnel provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| libreswan.net | in, out | kilobits/s |\n| libreswan.uptime | uptime | seconds |\n\n", @@ -1119,7 +1079,7 @@ "most_popular": false }, "overview": "# OpenSIPS\n\nPlugin: charts.d.plugin\nModule: opensips\n\n## Overview\n\nExamine OpenSIPS metrics for insights into SIP server operations. Study call rates, error rates, and response times for reliable voice over IP services.\n\nThe collector uses the `opensipsctl` command line utility to gather OpenSIPS metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to call `opensipsctl` along with a default number of parameters, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nThe collector requires the `opensipsctl` to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/opensips.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/opensips.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the opensips collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| opensips_opts | Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server. | fifo get_statistics all | no |\n| opensips_cmd | If `opensipsctl` is not in $PATH, specify it's full path here. | | no |\n| opensips_timeout | How long to wait for `opensipsctl` to respond. | 2 | no |\n| opensips_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 5 | no |\n| opensips_priority | The charts priority on the dashboard. | 80000 | no |\n| opensips_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n#### Examples\n\n##### Custom `opensipsctl` command\n\nSet a custom path to the `opensipsctl` command\n\n```yaml\n#opensips_opts=\"fifo get_statistics all\"\nopensips_cmd=/opt/opensips/bin/opensipsctl\n#opensips_timeout=2\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#opensips_update_every=5\n\n# the charts priority on the dashboard\n#opensips_priority=80000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#opensips_retries=10\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nThe collector requires the `opensipsctl` to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/opensips.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/opensips.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the opensips collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| opensips_opts | Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server. | fifo get_statistics all | no |\n| opensips_cmd | If `opensipsctl` is not in $PATH, specify it's full path here. | | no |\n| opensips_timeout | How long to wait for `opensipsctl` to respond. | 2 | no |\n| opensips_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 5 | no |\n| opensips_priority | The charts priority on the dashboard. | 80000 | no |\n| opensips_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n#### Examples\n\n##### Custom `opensipsctl` command\n\nSet a custom path to the `opensipsctl` command\n\n```yaml\n#opensips_opts=\"fifo get_statistics all\"\nopensips_cmd=/opt/opensips/bin/opensipsctl\n#opensips_timeout=2\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#opensips_update_every=5\n\n# the charts priority on the dashboard\n#opensips_priority=80000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#opensips_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `opensips` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 opensips\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `opensips` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep opensips\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep opensips /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep opensips\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenSIPS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| opensips.dialogs_active | active, early | dialogs |\n| opensips.users | registered, location, contacts, expires | users |\n| opensips.registrar | accepted, rejected | registrations/s |\n| opensips.transactions | UAS, UAC | transactions/s |\n| opensips.core_rcv | requests, replies | queries/s |\n| opensips.core_fwd | requests, replies | queries/s |\n| opensips.core_drop | requests, replies | queries/s |\n| opensips.core_err | requests, replies | queries/s |\n| opensips.core_bad | bad_URIs_rcvd, unsupported_methods, bad_msg_hdr | queries/s |\n| opensips.tm_replies | received, relayed, local | replies/s |\n| opensips.transactions_status | 2xx, 3xx, 4xx, 5xx, 6xx | transactions/s |\n| opensips.transactions_inuse | inuse | transactions |\n| opensips.sl_replies | 1xx, 2xx, 3xx, 4xx, 5xx, 6xx, sent, error, ACKed | replies/s |\n| opensips.dialogs | processed, expire, failed | dialogs/s |\n| opensips.net_waiting | UDP, TCP | kilobytes |\n| opensips.uri_checks | positive, negative | checks / sec |\n| opensips.traces | requests, replies | traces / sec |\n| opensips.shmem | total, used, real_used, max_used, free | kilobytes |\n| opensips.shmem_fragment | fragments | fragments |\n\n", @@ -1128,45 +1088,6 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/opensips/metadata.yaml", "related_resources": "" }, - { - "meta": { - "plugin_name": "charts.d.plugin", - "module_name": "sensors", - "monitored_instance": { - "name": "Linux Sensors (sysfs)", - "link": "https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface", - "categories": [ - "data-collection.hardware-devices-and-sensors" - ], - "icon_filename": "microchip.svg" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "sensors", - "sysfs", - "hwmon", - "rpi", - "raspberry pi" - ], - "most_popular": false - }, - "overview": "# Linux Sensors (sysfs)\n\nPlugin: charts.d.plugin\nModule: sensors\n\n## Overview\n\nUse this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures).\nFor all other cases use the [Go collector](/src/go/plugin/go.d/modules/sensors/README.md), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values.\"\n\n\nIt will provide charts for all configured system sensors, by reading sensors directly from the kernel.\nThe values graphed are the raw hardware values of the sensors.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the collector will try to read entries under `/sys/devices`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Enable the sensors collector\n\nThe `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config charts.d.conf\n```\n\nChange the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/sensors.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the sensors collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sensors_sys_dir | The directory the kernel exposes sensor data. | /sys/devices | no |\n| sensors_sys_depth | How deep in the tree to check for sensor data. | 10 | no |\n| sensors_source_update | If set to 1, the script will overwrite internal script functions with code generated ones. | 1 | no |\n| sensors_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| sensors_priority | The charts priority on the dashboard. | 90000 | no |\n| sensors_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n#### Examples\n\n##### Set sensors path depth\n\nSet a different sensors path depth\n\n```yaml\n# the directory the kernel keeps sensor data\n#sensors_sys_dir=\"/sys/devices\"\n\n# how deep in the tree to check for sensor data\nsensors_sys_depth=5\n\n# if set to 1, the script will overwrite internal\n# script functions with code generated ones\n# leave to 1, is faster\n#sensors_source_update=1\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#sensors_update_every=\n\n# the charts priority on the dashboard\n#sensors_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#sensors_retries=10\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `sensors` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 sensors\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `sensors` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep sensors\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep sensors /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep sensors\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor chip\n\nMetrics related to sensor chips. Each chip provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.temp | {filename} | Celsius |\n| sensors.volt | {filename} | Volts |\n| sensors.curr | {filename} | Ampere |\n| sensors.power | {filename} | Watt |\n| sensors.fans | {filename} | Rotations / Minute |\n| sensors.energy | {filename} | Joule |\n| sensors.humidity | {filename} | Percent |\n\n", - "integration_type": "collector", - "id": "charts.d.plugin-sensors-Linux_Sensors_(sysfs)", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/sensors/metadata.yaml", - "related_resources": "" - }, { "meta": { "plugin_name": "cups.plugin", @@ -1191,7 +1112,7 @@ "most_popular": false }, "overview": "# CUPS\n\nPlugin: cups.plugin\nModule: cups.plugin\n\n## Overview\n\nMonitor CUPS performance for achieving optimal printing system operations. Monitor job statuses, queue lengths, and error rates to ensure smooth printing tasks.\n\nThe plugin uses CUPS shared library to connect and monitor the server.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access the server. Netdata sets permissions during installation time to reach the server through its library.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin detects when CUPS server is running and tries to connect to it.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nThe CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:cups]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additional parameters for the collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nThe CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:cups]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additional parameters for the collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CUPS instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.dests_state | idle, printing, stopped | dests |\n| cups.dests_option | total, acceptingjobs, shared | dests |\n| cups.job_num | pending, held, processing | jobs |\n| cups.job_size | pending, held, processing | KB |\n\n### Per destination\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.destination_job_num | pending, held, processing | jobs |\n| cups.destination_job_size | pending, held, processing | KB |\n\n", @@ -1228,7 +1149,7 @@ "most_popular": false }, "overview": "# System Memory Fragmentation\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/extfrag\n\n## Overview\n\nCollects memory fragmentation statistics from the Linux kernel\n\nParse data from `debugfs` file\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/extfrag`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically run by default.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the overall memory fragmentation of the system.\n\n### Per node\n\nMemory fragmentation statistics for each NUMA node in the system.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| numa_node | The NUMA node the metrics are associated with. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.fragmentation_index_dma | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_dma32 | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_normal | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n\n", @@ -1266,7 +1187,7 @@ "most_popular": false }, "overview": "# Linux ZSwap\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/zswap\n\n## Overview\n\nCollects zswap performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/zswap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the performance statistics of zswap.\n\n### Per Linux ZSwap instance\n\nGlobal zswap performance metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.zswap_pool_compression_ratio | compression_ratio | ratio |\n| system.zswap_pool_compressed_size | compressed_size | bytes |\n| system.zswap_pool_raw_size | uncompressed_size | bytes |\n| system.zswap_rejections | compress_poor, kmemcache_fail, alloc_fail, reclaim_fail | rejections/s |\n| system.zswap_pool_limit_hit | limit | events/s |\n| system.zswap_written_back_raw_bytes | written_back | bytes/s |\n| system.zswap_same_filled_raw_size | same_filled | bytes |\n| system.zswap_duplicate_entry | duplicate | entries/s |\n\n", @@ -1302,7 +1223,7 @@ "most_popular": false }, "overview": "# Power Capping\n\nPlugin: debugfs.plugin\nModule: intel_rapl\n\n## Overview\n\nCollects power capping performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/devices/virtual/powercap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the Intel RAPL zones Consumption.\n\n### Per Power Capping instance\n\nGlobal Intel RAPL zones.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.powercap_intel_rapl_zone | Power | Watts |\n| cpu.powercap_intel_rapl_subzones | dram, core, uncore | Watts |\n\n", @@ -1345,7 +1266,7 @@ "most_popular": false }, "overview": "# Disk space\n\nPlugin: diskspace.plugin\nModule: diskspace.plugin\n\n## Overview\n\nMonitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15 | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15s | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mount_point | Path used to mount a filesystem |\n| filesystem | The filesystem used to format a partition. |\n| mount_root | Root directory where mount points are present. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", @@ -1391,7 +1312,7 @@ "most_popular": false }, "overview": "# eBPF Cachestat\n\nPlugin: ebpf.plugin\nModule: cachestat\n\n## Overview\n\nMonitor Linux page cache events giving for users a general vision about how his kernel is manipulating files.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/cachestat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/cachestat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/cachestat.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/cachestat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Cachestat instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.cachestat_ratio | ratio | % |\n| mem.cachestat_dirties | dirty | page/s |\n| mem.cachestat_hits | hit | hits/s |\n| mem.cachestat_misses | miss | misses/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_cachestat_hit_ratio | ratio | % |\n| app.ebpf_cachestat_dirty_pages | pages | page/s |\n| app.ebpf_cachestat_access | hits | hits/s |\n| app.ebpf_cachestat_misses | misses | misses/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cachestat_ratio | ratio | % |\n| cgroup.cachestat_dirties | dirty | page/s |\n| cgroup.cachestat_hits | hit | hits/s |\n| cgroup.cachestat_misses | miss | misses/s |\n| services.cachestat_ratio | a dimension per systemd service | % |\n| services.cachestat_dirties | a dimension per systemd service | page/s |\n| services.cachestat_hits | a dimension per systemd service | hits/s |\n| services.cachestat_misses | a dimension per systemd service | misses/s |\n\n", @@ -1437,7 +1358,7 @@ "most_popular": false }, "overview": "# eBPF DCstat\n\nPlugin: ebpf.plugin\nModule: dcstat\n\n## Overview\n\nMonitor directory cache events per application given an overall vision about files on memory or storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/dcstat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/dcstat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/dcstat.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/dcstat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_dc_ratio | ratio | % |\n| app.ebpf_dc_reference | files | files |\n| app.ebpf_dc_not_cache | files | files |\n| app.ebpf_dc_not_found | files | files |\n\n### Per filesystem\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.dc_reference | reference, slow, miss | files |\n| filesystem.dc_hit_ratio | ratio | % |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.dc_ratio | ratio | % |\n| cgroup.dc_reference | reference | files |\n| cgroup.dc_not_cache | slow | files |\n| cgroup.dc_not_found | miss | files |\n| services.dc_ratio | a dimension per systemd service | % |\n| services.dc_reference | a dimension per systemd service | files |\n| services.dc_not_cache | a dimension per systemd service | files |\n| services.dc_not_found | a dimension per systemd service | files |\n\n", @@ -1475,7 +1396,7 @@ "most_popular": false }, "overview": "# eBPF Disk\n\nPlugin: ebpf.plugin\nModule: disk\n\n## Overview\n\nMeasure latency for I/O events on disk.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/disk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/disk.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/disk.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/disk.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\nThese metrics measure latency for I/O events on every hard disk present on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.latency_io | latency | calls/s |\n\n", @@ -1523,7 +1444,7 @@ "most_popular": false }, "overview": "# eBPF Filedescriptor\n\nPlugin: ebpf.plugin\nModule: filedescriptor\n\n## Overview\n\nMonitor calls for functions responsible to open or close a file descriptor and possible errors.\n\nAttach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nDepending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/fd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/fd.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/fd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/fd.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.fd_open | open | calls/s |\n| cgroup.fd_open_error | open | calls/s |\n| cgroup.fd_closed | close | calls/s |\n| cgroup.fd_close_error | close | calls/s |\n| services.file_open | a dimension per systemd service | calls/s |\n| services.file_open_error | a dimension per systemd service | calls/s |\n| services.file_closed | a dimension per systemd service | calls/s |\n| services.file_close_error | a dimension per systemd service | calls/s |\n\n### Per eBPF Filedescriptor instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.file_descriptor | open, close | calls/s |\n| filesystem.file_error | open, close | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_file_open | calls | calls/s |\n| app.ebpf_file_open_error | calls | calls/s |\n| app.ebpf_file_closed | calls | calls/s |\n| app.ebpf_file_close_error | calls | calls/s |\n\n", @@ -1566,7 +1487,7 @@ "most_popular": false }, "overview": "# eBPF Filesystem\n\nPlugin: ebpf.plugin\nModule: filesystem\n\n## Overview\n\nMonitor latency for main actions on filesystem like I/O events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/filesystem.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/filesystem.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| btrfsdist | Enable or disable latency monitoring for functions associated with btrfs filesystem. | yes | no |\n| ext4dist | Enable or disable latency monitoring for functions associated with ext4 filesystem. | yes | no |\n| nfsdist | Enable or disable latency monitoring for functions associated with nfs filesystem. | yes | no |\n| xfsdist | Enable or disable latency monitoring for functions associated with xfs filesystem. | yes | no |\n| zfsdist | Enable or disable latency monitoring for functions associated with zfs filesystem. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/filesystem.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/filesystem.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| btrfsdist | Enable or disable latency monitoring for functions associated with btrfs filesystem. | yes | no |\n| ext4dist | Enable or disable latency monitoring for functions associated with ext4 filesystem. | yes | no |\n| nfsdist | Enable or disable latency monitoring for functions associated with nfs filesystem. | yes | no |\n| xfsdist | Enable or disable latency monitoring for functions associated with xfs filesystem. | yes | no |\n| zfsdist | Enable or disable latency monitoring for functions associated with zfs filesystem. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per filesystem\n\nLatency charts associate with filesystem actions.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.read_latency | latency period | calls/s |\n| filesystem.open_latency | latency period | calls/s |\n| filesystem.sync_latency | latency period | calls/s |\n\n### Per iilesystem\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.write_latency | latency period | calls/s |\n\n### Per eBPF Filesystem instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.attributte_latency | latency period | calls/s |\n\n", @@ -1602,7 +1523,7 @@ "most_popular": false }, "overview": "# eBPF Hardirq\n\nPlugin: ebpf.plugin\nModule: hardirq\n\n## Overview\n\nMonitor latency for each HardIRQ available.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/hardirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/hardirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/hardirq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/hardirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Hardirq instance\n\nThese metrics show latest timestamp for each hardIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.hardirq_latency | hardirq names | milliseconds |\n\n", @@ -1639,7 +1560,7 @@ "most_popular": false }, "overview": "# eBPF MDflush\n\nPlugin: ebpf.plugin\nModule: mdflush\n\n## Overview\n\nMonitor when flush events happen between disks.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mdflush.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mdflush.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mdflush.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mdflush.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF MDflush instance\n\nNumber of times md_flush_request was called since last time.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mdstat.mdstat_flush | disk | flushes |\n\n", @@ -1677,7 +1598,7 @@ "most_popular": false }, "overview": "# eBPF Mount\n\nPlugin: ebpf.plugin\nModule: mount\n\n## Overview\n\nMonitor calls for mount and umount syscall.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mount.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mount.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mount.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mount.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Mount instance\n\nCalls for syscalls mount an umount.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mount_points.call | mount, umount | calls/s |\n| mount_points.error | mount, umount | calls/s |\n\n", @@ -1722,7 +1643,7 @@ "most_popular": false }, "overview": "# eBPF OOMkill\n\nPlugin: ebpf.plugin\nModule: oomkill\n\n## Overview\n\nMonitor applications that reach out of memory.\n\nAttach tracepoint to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/oomkill.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/oomkill.conf\n```\n#### Options\n\nOverwrite default configuration reducing number of I/O events\n\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/oomkill.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/oomkill.conf\n```\n#### Options\n\nOverwrite default configuration reducing number of I/O events\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### update every\n\n\n\n### ebpf load mode\n\n\n\n### lifetime\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese metrics show cgroup/service that reached OOM.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.oomkills | cgroup name | kills |\n| services.oomkills | a dimension per systemd service | kills |\n\n### Per apps\n\nThese metrics show cgroup/service that reached OOM.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.oomkill | kills | kills |\n\n", @@ -1806,7 +1727,7 @@ "most_popular": false }, "overview": "# eBPF Processes\n\nPlugin: ebpf.plugin\nModule: processes\n\n## Overview\n\nMonitor calls for function creating tasks (threads and processes) inside Linux kernel.\n\nAttach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/process.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/process.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation. | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/process.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/process.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation. | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Processes instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.process_thread | process | calls/s |\n| system.process_status | process, zombie | difference |\n| system.exit | process | calls/s |\n| system.task_error | task | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.process_create | calls | calls/s |\n| app.thread_create | call | calls/s |\n| app.task_exit | call | calls/s |\n| app.task_close | call | calls/s |\n| app.task_error | app | calls/s |\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.process_create | process | calls/s |\n| cgroup.thread_create | thread | calls/s |\n| cgroup.task_exit | exit | calls/s |\n| cgroup.task_close | process | calls/s |\n| cgroup.task_error | process | calls/s |\n| services.process_create | a dimension per systemd service | calls/s |\n| services.thread_create | a dimension per systemd service | calls/s |\n| services.task_close | a dimension per systemd service | calls/s |\n| services.task_exit | a dimension per systemd service | calls/s |\n| services.task_error | a dimension per systemd service | calls/s |\n\n", @@ -1852,7 +1773,7 @@ "most_popular": false }, "overview": "# eBPF SHM\n\nPlugin: ebpf.plugin\nModule: shm\n\n## Overview\n\nMonitor syscall responsible to manipulate shared memory.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/shm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/shm.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| shmget | Enable or disable monitoring for syscall `shmget` | yes | no |\n| shmat | Enable or disable monitoring for syscall `shmat` | yes | no |\n| shmdt | Enable or disable monitoring for syscall `shmdt` | yes | no |\n| shmctl | Enable or disable monitoring for syscall `shmctl` | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/shm.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/shm.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| shmget | Enable or disable monitoring for syscall `shmget` | yes | no |\n| shmat | Enable or disable monitoring for syscall `shmat` | yes | no |\n| shmdt | Enable or disable monitoring for syscall `shmdt` | yes | no |\n| shmctl | Enable or disable monitoring for syscall `shmctl` | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.shmget | get | calls/s |\n| cgroup.shmat | at | calls/s |\n| cgroup.shmdt | dt | calls/s |\n| cgroup.shmctl | ctl | calls/s |\n| services.shmget | a dimension per systemd service | calls/s |\n| services.shmat | a dimension per systemd service | calls/s |\n| services.shmdt | a dimension per systemd service | calls/s |\n| services.shmctl | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_shmget_call | calls | calls/s |\n| app.ebpf_shmat_call | calls | calls/s |\n| app.ebpf_shmdt_call | calls | calls/s |\n| app.ebpf_shmctl_call | calls | calls/s |\n\n### Per eBPF SHM instance\n\nThese Metrics show number of calls for specified syscall.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.shared_memory_calls | get, at, dt, ctl | calls/s |\n\n", @@ -1901,7 +1822,7 @@ "most_popular": false }, "overview": "# eBPF Socket\n\nPlugin: ebpf.plugin\nModule: socket\n\n## Overview\n\nMonitor bandwidth consumption per application for protocols TCP and UDP.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/network.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/network.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`. Options inside `network connections` are ignored for while.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| bandwidth table size | Number of elements stored inside hash tables used to monitor calls per PID. | 16384 | no |\n| ipv4 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV4 connections. | 16384 | no |\n| ipv6 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV6 connections. | 16384 | no |\n| udp connection table size | Number of temporary elements stored inside hash tables used to monitor UDP connections. | 4096 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/network.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/network.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`. Options inside `network connections` are ignored for while.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| bandwidth table size | Number of elements stored inside hash tables used to monitor calls per PID. | 16384 | no |\n| ipv4 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV4 connections. | 16384 | no |\n| ipv6 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV6 connections. | 16384 | no |\n| udp connection table size | Number of temporary elements stored inside hash tables used to monitor UDP connections. | 4096 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Socket instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.inbound_conn | connection_tcp | connections/s |\n| ip.tcp_outbound_conn | received | connections/s |\n| ip.tcp_functions | received, send, closed | calls/s |\n| ip.total_tcp_bandwidth | received, send | kilobits/s |\n| ip.tcp_error | received, send | calls/s |\n| ip.tcp_retransmit | retransmited | calls/s |\n| ip.udp_functions | received, send | calls/s |\n| ip.total_udp_bandwidth | received, send | kilobits/s |\n| ip.udp_error | received, send | calls/s |\n\n### Per apps\n\nThese metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_tcp_v4_connection | connections | connections/s |\n| app.ebpf_call_tcp_v6_connection | connections | connections/s |\n| app.ebpf_sock_total_bandwidth | received, sent | kilobits/s |\n| app.ebpf_call_tcp_sendmsg | calls | calls/s |\n| app.ebpf_call_tcp_cleanup_rbuf | calls | calls/s |\n| app.ebpf_call_tcp_retransmit | calls | calls/s |\n| app.ebpf_call_udp_sendmsg | calls | calls/s |\n| app.ebpf_call_udp_recvmsg | calls | calls/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_conn_ipv4 | connections | connections/s |\n| cgroup.net_conn_ipv6 | connections | connections/s |\n| cgroup.net_total_bandwidth | received, sent | kilobits/s |\n| cgroup.net_tcp_recv | calls | calls/s |\n| cgroup.net_tcp_send | calls | calls/s |\n| cgroup.net_retransmit | calls | calls/s |\n| cgroup.net_udp_send | calls | calls/s |\n| cgroup.net_udp_recv | calls | calls/s |\n| services.net_conn_ipv4 | connections | connections/s |\n| services.net_conn_ipv6 | connections | connections/s |\n| services.net_total_bandwidth | received, sent | kilobits/s |\n| services.net_tcp_recv | calls | calls/s |\n| services.net_tcp_send | calls | calls/s |\n| services.net_tcp_retransmit | calls | calls/s |\n| services.net_udp_send | calls | calls/s |\n| services.net_udp_recv | calls | calls/s |\n\n", @@ -1937,7 +1858,7 @@ "most_popular": false }, "overview": "# eBPF SoftIRQ\n\nPlugin: ebpf.plugin\nModule: softirq\n\n## Overview\n\nMonitor latency for each SoftIRQ available.\n\nAttach kprobe to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/softirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/softirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/softirq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/softirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF SoftIRQ instance\n\nThese metrics show latest timestamp for each softIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirq_latency | soft IRQs | milliseconds |\n\n", @@ -1984,7 +1905,7 @@ "most_popular": false }, "overview": "# eBPF SWAP\n\nPlugin: ebpf.plugin\nModule: swap\n\n## Overview\n\nMonitors when swap has I/O events and applications executing events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/swap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/swap.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/swap.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/swap.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.swap_read | read | calls/s |\n| cgroup.swap_write | write | calls/s |\n| services.swap_read | a dimension per systemd service | calls/s |\n| services.swap_write | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_swap_readpage | a dimension per app group | calls/s |\n| app.ebpf_call_swap_writepage | a dimension per app group | calls/s |\n\n### Per eBPF SWAP instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapcalls | write, read | calls/s |\n\n", @@ -2022,7 +1943,7 @@ "most_popular": false }, "overview": "# eBPF Sync\n\nPlugin: ebpf.plugin\nModule: sync\n\n## Overview\n\nMonitor syscall responsible to move data from memory to storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/sync.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/sync.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| sync | Enable or disable monitoring for syscall `sync` | yes | no |\n| msync | Enable or disable monitoring for syscall `msync` | yes | no |\n| fsync | Enable or disable monitoring for syscall `fsync` | yes | no |\n| fdatasync | Enable or disable monitoring for syscall `fdatasync` | yes | no |\n| syncfs | Enable or disable monitoring for syscall `syncfs` | yes | no |\n| sync_file_range | Enable or disable monitoring for syscall `sync_file_range` | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/sync.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/sync.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| sync | Enable or disable monitoring for syscall `sync` | yes | no |\n| msync | Enable or disable monitoring for syscall `msync` | yes | no |\n| fsync | Enable or disable monitoring for syscall `fsync` | yes | no |\n| fdatasync | Enable or disable monitoring for syscall `fdatasync` | yes | no |\n| syncfs | Enable or disable monitoring for syscall `syncfs` | yes | no |\n| sync_file_range | Enable or disable monitoring for syscall `sync_file_range` | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ sync_freq ](https://github.com/netdata/netdata/blob/master/src/health/health.d/synchronization.conf) | mem.sync | number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Sync instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.file_sync | fsync, fdatasync | calls/s |\n| mem.memory_map | msync | calls/s |\n| mem.sync | sync, syncfs | calls/s |\n| mem.file_segment | sync_file_range | calls/s |\n\n", @@ -2070,7 +1991,7 @@ "most_popular": false }, "overview": "# eBPF VFS\n\nPlugin: ebpf.plugin\nModule: vfs\n\n## Overview\n\nMonitor I/O events on Linux Virtual Filesystem.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/vfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/vfs.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/vfs.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/vfs.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.vfs_unlink | delete | calls/s |\n| cgroup.vfs_write | write | calls/s |\n| cgroup.vfs_write_error | write | calls/s |\n| cgroup.vfs_read | read | calls/s |\n| cgroup.vfs_read_error | read | calls/s |\n| cgroup.vfs_write_bytes | write | bytes/s |\n| cgroup.vfs_read_bytes | read | bytes/s |\n| cgroup.vfs_fsync | fsync | calls/s |\n| cgroup.vfs_fsync_error | fsync | calls/s |\n| cgroup.vfs_open | open | calls/s |\n| cgroup.vfs_open_error | open | calls/s |\n| cgroup.vfs_create | create | calls/s |\n| cgroup.vfs_create_error | create | calls/s |\n| services.vfs_unlink | a dimension per systemd service | calls/s |\n| services.vfs_write | a dimension per systemd service | calls/s |\n| services.vfs_write_error | a dimension per systemd service | calls/s |\n| services.vfs_read | a dimension per systemd service | calls/s |\n| services.vfs_read_error | a dimension per systemd service | calls/s |\n| services.vfs_write_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_read_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_fsync | a dimension per systemd service | calls/s |\n| services.vfs_fsync_error | a dimension per systemd service | calls/s |\n| services.vfs_open | a dimension per systemd service | calls/s |\n| services.vfs_open_error | a dimension per systemd service | calls/s |\n| services.vfs_create | a dimension per systemd service | calls/s |\n| services.vfs_create_error | a dimension per systemd service | calls/s |\n\n### Per eBPF VFS instance\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.vfs_deleted_objects | delete | calls/s |\n| filesystem.vfs_io | read, write | calls/s |\n| filesystem.vfs_io_bytes | read, write | bytes/s |\n| filesystem.vfs_io_error | read, write | calls/s |\n| filesystem.vfs_fsync | fsync | calls/s |\n| filesystem.vfs_fsync_error | fsync | calls/s |\n| filesystem.vfs_open | open | calls/s |\n| filesystem.vfs_open_error | open | calls/s |\n| filesystem.vfs_create | create | calls/s |\n| filesystem.vfs_create_error | create | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_vfs_unlink | calls | calls/s |\n| app.ebpf_call_vfs_write | calls | calls/s |\n| app.ebpf_call_vfs_write_error | calls | calls/s |\n| app.ebpf_call_vfs_read | calls | calls/s |\n| app.ebpf_call_vfs_read_error | calls | calls/s |\n| app.ebpf_call_vfs_write_bytes | writes | bytes/s |\n| app.ebpf_call_vfs_read_bytes | reads | bytes/s |\n| app.ebpf_call_vfs_fsync | calls | calls/s |\n| app.ebpf_call_vfs_fsync_error | calls | calls/s |\n| app.ebpf_call_vfs_open | calls | calls/s |\n| app.ebpf_call_vfs_open_error | calls | calls/s |\n| app.ebpf_call_vfs_create | calls | calls/s |\n| app.ebpf_call_vfs_create_error | calls | calls/s |\n\n", @@ -2103,7 +2024,7 @@ "most_popular": false }, "overview": "# dev.cpu.0.freq\n\nPlugin: freebsd.plugin\nModule: dev.cpu.0.freq\n\n## Overview\n\nRead current CPU Scaling frequency.\n\nCurrent CPU Scaling Frequency\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `Config options`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config Config options\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.0.freq | Enable or disable CPU Scaling frequency metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `Config options`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config Config options\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.0.freq | Enable or disable CPU Scaling frequency metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.0.freq instance\n\nThe metric shows status of CPU frequency, it is direct affected by system load.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.scaling_cur_freq | frequency | MHz |\n\n", @@ -2136,7 +2057,7 @@ "most_popular": false }, "overview": "# dev.cpu.temperature\n\nPlugin: freebsd.plugin\nModule: dev.cpu.temperature\n\n## Overview\n\nGet current CPU temperature\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.temperature | Enable or disable CPU temperature metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.temperature | Enable or disable CPU temperature metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.temperature instance\n\nThis metric show latest CPU temperature.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.temperature | a dimension per core | Celsius |\n\n", @@ -2169,7 +2090,7 @@ "most_popular": false }, "overview": "# devstat\n\nPlugin: freebsd.plugin\nModule: devstat\n\n## Overview\n\nCollect information per hard disk available on host.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:kern.devstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new disks detected at runtime | Enable or disable possibility to detect new disks. | auto | no |\n| performance metrics for pass devices | Enable or disable metrics for disks with type `PASS`. | auto | no |\n| total bandwidth for all disks | Enable or disable total bandwidth metric for all disks. | yes | no |\n| bandwidth for all disks | Enable or disable bandwidth for all disks metric. | auto | no |\n| operations for all disks | Enable or disable operations for all disks metric. | auto | no |\n| queued operations for all disks | Enable or disable queued operations for all disks metric. | auto | no |\n| utilization percentage for all disks | Enable or disable utilization percentage for all disks metric. | auto | no |\n| i/o time for all disks | Enable or disable I/O time for all disks metric. | auto | no |\n| average completed i/o time for all disks | Enable or disable average completed I/O time for all disks metric. | auto | no |\n| average completed i/o bandwidth for all disks | Enable or disable average completed I/O bandwidth for all disks metric. | auto | no |\n| average service time for all disks | Enable or disable average service time for all disks metric. | auto | no |\n| disable by default disks matching | Do not create charts for disks listed. | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:kern.devstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new disks detected at runtime | Enable or disable possibility to detect new disks. | auto | no |\n| performance metrics for pass devices | Enable or disable metrics for disks with type `PASS`. | auto | no |\n| total bandwidth for all disks | Enable or disable total bandwidth metric for all disks. | yes | no |\n| bandwidth for all disks | Enable or disable bandwidth for all disks metric. | auto | no |\n| operations for all disks | Enable or disable operations for all disks metric. | auto | no |\n| queued operations for all disks | Enable or disable queued operations for all disks metric. | auto | no |\n| utilization percentage for all disks | Enable or disable utilization percentage for all disks metric. | auto | no |\n| i/o time for all disks | Enable or disable I/O time for all disks metric. | auto | no |\n| average completed i/o time for all disks | Enable or disable average completed I/O time for all disks metric. | auto | no |\n| average completed i/o bandwidth for all disks | Enable or disable average completed I/O bandwidth for all disks metric. | auto | no |\n| average service time for all disks | Enable or disable average service time for all disks metric. | auto | no |\n| disable by default disks matching | Do not create charts for disks listed. | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per devstat instance\n\nThese metrics give a general vision about I/O events on disks.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | io, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes, frees | KiB/s |\n| disk.ops | reads, writes, other, frees | operations/s |\n| disk.qops | operations | operations |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes, other, frees | milliseconds/s |\n| disk.await | reads, writes, other, frees | milliseconds/operation |\n| disk.avgsz | reads, writes, frees | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n", @@ -2202,7 +2123,7 @@ "most_popular": false }, "overview": "# getifaddrs\n\nPlugin: freebsd.plugin\nModule: getifaddrs\n\n## Overview\n\nCollect traffic per network interface.\n\nThe plugin calls `getifaddrs` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getifaddrs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new interfaces detected at runtime | Enable or disable possibility to discover new interface after plugin starts. | auto | no |\n| total bandwidth for physical interfaces | Enable or disable total bandwidth for physical interfaces metric. | auto | no |\n| total packets for physical interfaces | Enable or disable total packets for physical interfaces metric. | auto | no |\n| total bandwidth for ipv4 interface | Enable or disable total bandwidth for IPv4 interface metric. | auto | no |\n| total bandwidth for ipv6 interfaces | Enable or disable total bandwidth for ipv6 interfaces metric. | auto | no |\n| bandwidth for all interfaces | Enable or disable bandwidth for all interfaces metric. | auto | no |\n| packets for all interfaces | Enable or disable packets for all interfaces metric. | auto | no |\n| errors for all interfaces | Enable or disable errors for all interfaces metric. | auto | no |\n| drops for all interfaces | Enable or disable drops for all interfaces metric. | auto | no |\n| collisions for all interface | Enable or disable collisions for all interface metric. | auto | no |\n| disable by default interfaces matching | Do not display data for intterfaces listed. | lo* | no |\n| set physical interfaces for system.net | Do not show network traffic for listed interfaces. | igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc* | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getifaddrs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new interfaces detected at runtime | Enable or disable possibility to discover new interface after plugin starts. | auto | no |\n| total bandwidth for physical interfaces | Enable or disable total bandwidth for physical interfaces metric. | auto | no |\n| total packets for physical interfaces | Enable or disable total packets for physical interfaces metric. | auto | no |\n| total bandwidth for ipv4 interface | Enable or disable total bandwidth for IPv4 interface metric. | auto | no |\n| total bandwidth for ipv6 interfaces | Enable or disable total bandwidth for ipv6 interfaces metric. | auto | no |\n| bandwidth for all interfaces | Enable or disable bandwidth for all interfaces metric. | auto | no |\n| packets for all interfaces | Enable or disable packets for all interfaces metric. | auto | no |\n| errors for all interfaces | Enable or disable errors for all interfaces metric. | auto | no |\n| drops for all interfaces | Enable or disable drops for all interfaces metric. | auto | no |\n| collisions for all interface | Enable or disable collisions for all interface metric. | auto | no |\n| disable by default interfaces matching | Do not display data for intterfaces listed. | lo* | no |\n| set physical interfaces for system.net | Do not show network traffic for listed interfaces. | igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc* | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ interface_inbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of inbound errors for the network interface ${label:device} in the last 10 minutes |\n| [ interface_outbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of outbound errors for the network interface ${label:device} in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per getifaddrs instance\n\nGeneral overview about network traffic.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n| system.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| system.ipv4 | received, sent | kilobits/s |\n| system.ipv6 | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.events | collisions | events/s |\n\n", @@ -2235,7 +2156,7 @@ "most_popular": false }, "overview": "# getmntinfo\n\nPlugin: freebsd.plugin\nModule: getmntinfo\n\n## Overview\n\nCollect information per mount point.\n\nThe plugin calls `getmntinfo` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getmntinfo]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new mount points detected at runtime | Cheeck new mount points during runtime. | auto | no |\n| space usage for all disks | Enable or disable space usage for all disks metric. | auto | no |\n| inodes usage for all disks | Enable or disable inodes usage for all disks metric. | auto | no |\n| exclude space metrics on paths | Do not show metrics for listed paths. | /proc/* | no |\n| exclude space metrics on filesystems | Do not monitor listed filesystems. | autofs procfs subfs devfs none | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getmntinfo]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new mount points detected at runtime | Cheeck new mount points during runtime. | auto | no |\n| space usage for all disks | Enable or disable space usage for all disks metric. | auto | no |\n| inodes usage for all disks | Enable or disable inodes usage for all disks metric. | auto | no |\n| exclude space metrics on paths | Do not show metrics for listed paths. | /proc/* | no |\n| exclude space metrics on filesystems | Do not monitor listed filesystems. | autofs procfs subfs devfs none | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\nThese metrics show detailss about mount point usages.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", @@ -2268,7 +2189,7 @@ "most_popular": false }, "overview": "# hw.intrcnt\n\nPlugin: freebsd.plugin\nModule: hw.intrcnt\n\n## Overview\n\nGet total number of interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| hw.intrcnt | Enable or disable Interrupts metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| hw.intrcnt | Enable or disable Interrupts metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per hw.intrcnt instance\n\nThese metrics show system interrupts frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.intr | interrupts | interrupts/s |\n| system.interrupts | a dimension per interrupt | interrupts/s |\n\n", @@ -2301,7 +2222,7 @@ "most_popular": false }, "overview": "# ipfw\n\nPlugin: freebsd.plugin\nModule: ipfw\n\n## Overview\n\nCollect information about FreeBSD firewall.\n\nThe plugin uses RAW socket to communicate with kernel and collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:ipfw]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| counters for static rules | Enable or disable counters for static rules metric. | yes | no |\n| number of dynamic rules | Enable or disable number of dynamic rules metric. | yes | no |\n| allocated memory | Enable or disable allocated memory metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:ipfw]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| counters for static rules | Enable or disable counters for static rules metric. | yes | no |\n| number of dynamic rules | Enable or disable number of dynamic rules metric. | yes | no |\n| allocated memory | Enable or disable allocated memory metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ipfw instance\n\nTheese metrics show FreeBSD firewall statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfw.mem | dynamic, static | bytes |\n| ipfw.packets | a dimension per static rule | packets/s |\n| ipfw.bytes | a dimension per static rule | bytes/s |\n| ipfw.active | a dimension per dynamic rule | rules |\n| ipfw.expired | a dimension per dynamic rule | rules |\n\n", @@ -2334,7 +2255,7 @@ "most_popular": false }, "overview": "# kern.cp_time\n\nPlugin: freebsd.plugin\nModule: kern.cp_time\n\n## Overview\n\nTotal CPU utilization\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe netdata main configuration file.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.cp_time | Enable or disable Total CPU usage. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe netdata main configuration file.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.cp_time | Enable or disable Total CPU usage. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding nice) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.cp_time instance\n\nThese metrics show CPU usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | nice, system, user, interrupt, idle | percentage |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | nice, system, user, interrupt, idle | percentage |\n\n", @@ -2367,7 +2288,7 @@ "most_popular": false }, "overview": "# kern.ipc.msq\n\nPlugin: freebsd.plugin\nModule: kern.ipc.msq\n\n## Overview\n\nCollect number of IPC message Queues\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.msq | Enable or disable IPC message queue metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.msq | Enable or disable IPC message queue metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.msq instance\n\nThese metrics show statistics IPC messages statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_msq_queues | queues | queues |\n| system.ipc_msq_messages | messages | messages |\n| system.ipc_msq_size | allocated, used | bytes |\n\n", @@ -2400,7 +2321,7 @@ "most_popular": false }, "overview": "# kern.ipc.sem\n\nPlugin: freebsd.plugin\nModule: kern.ipc.sem\n\n## Overview\n\nCollect information about semaphore.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.sem | Enable or disable semaphore metrics. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.sem | Enable or disable semaphore metrics. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.sem instance\n\nThese metrics shows counters for semaphores on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n\n", @@ -2433,7 +2354,7 @@ "most_popular": false }, "overview": "# kern.ipc.shm\n\nPlugin: freebsd.plugin\nModule: kern.ipc.shm\n\n## Overview\n\nCollect shared memory information.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.shm | Enable or disable shared memory metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.shm | Enable or disable shared memory metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.shm instance\n\nThese metrics give status about current shared memory segments.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_shared_mem_segs | segments | segments |\n| system.ipc_shared_mem_size | allocated | KiB |\n\n", @@ -2466,7 +2387,7 @@ "most_popular": false }, "overview": "# net.inet.icmp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.icmp.stats\n\n## Overview\n\nCollect information about ICMP traffic.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.icmp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| IPv4 ICMP packets | Enable or disable IPv4 ICMP packets metric. | yes | no |\n| IPv4 ICMP error | Enable or disable IPv4 ICMP error metric. | yes | no |\n| IPv4 ICMP messages | Enable or disable IPv4 ICMP messages metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.icmp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| IPv4 ICMP packets | Enable or disable IPv4 ICMP packets metric. | yes | no |\n| IPv4 ICMP error | Enable or disable IPv4 ICMP error metric. | yes | no |\n| IPv4 ICMP messages | Enable or disable IPv4 ICMP messages metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.icmp.stats instance\n\nThese metrics show ICMP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n\n", @@ -2499,7 +2420,7 @@ "most_popular": false }, "overview": "# net.inet.ip.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.ip.stats\n\n## Overview\n\nCollect IP stats\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.ip.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 packets | Enable or disable IPv4 packets metric. | yes | no |\n| ipv4 fragments sent | Enable or disable IPv4 fragments sent metric. | yes | no |\n| ipv4 fragments assembly | Enable or disable IPv4 fragments assembly metric. | yes | no |\n| ipv4 errors | Enable or disable IPv4 errors metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.ip.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 packets | Enable or disable IPv4 packets metric. | yes | no |\n| ipv4 fragments sent | Enable or disable IPv4 fragments sent metric. | yes | no |\n| ipv4 fragments assembly | Enable or disable IPv4 fragments assembly metric. | yes | no |\n| ipv4 errors | Enable or disable IPv4 errors metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.ip.stats instance\n\nThese metrics show IPv4 connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n\n", @@ -2532,7 +2453,7 @@ "most_popular": false }, "overview": "# net.inet.tcp.states\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.states\n\n## Overview\n\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| net.inet.tcp.states | Enable or disable TCP state metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| net.inet.tcp.states | Enable or disable TCP state metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ipv4.tcpsock | IPv4 TCP connections utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.states instance\n\nA counter for TCP connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcpsock | connections | active connections |\n\n", @@ -2565,7 +2486,7 @@ "most_popular": false }, "overview": "# net.inet.tcp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.stats\n\n## Overview\n\nCollect overall information about TCP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.tcp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 TCP packets | Enable or disable ipv4 TCP packets metric. | yes | no |\n| ipv4 TCP errors | Enable or disable pv4 TCP errors metric. | yes | no |\n| ipv4 TCP handshake issues | Enable or disable ipv4 TCP handshake issue metric. | yes | no |\n| TCP connection aborts | Enable or disable TCP connection aborts metric. | auto | no |\n| TCP out-of-order queue | Enable or disable TCP out-of-order queue metric. | auto | no |\n| TCP SYN cookies | Enable or disable TCP SYN cookies metric. | auto | no |\n| TCP listen issues | Enable or disable TCP listen issues metric. | auto | no |\n| ECN packets | Enable or disable ECN packets metric. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.tcp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 TCP packets | Enable or disable ipv4 TCP packets metric. | yes | no |\n| ipv4 TCP errors | Enable or disable pv4 TCP errors metric. | yes | no |\n| ipv4 TCP handshake issues | Enable or disable ipv4 TCP handshake issue metric. | yes | no |\n| TCP connection aborts | Enable or disable TCP connection aborts metric. | auto | no |\n| TCP out-of-order queue | Enable or disable TCP out-of-order queue metric. | auto | no |\n| TCP SYN cookies | Enable or disable TCP SYN cookies metric. | auto | no |\n| TCP listen issues | Enable or disable TCP listen issues metric. | auto | no |\n| ECN packets | Enable or disable ECN packets metric. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.stats instance\n\nThese metrics show TCP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.tcplistenissues | overflows | packets/s |\n| ipv4.ecnpkts | InCEPkts, InECT0Pkts, InECT1Pkts, OutECT0Pkts, OutECT1Pkts | packets/s |\n\n", @@ -2598,7 +2519,7 @@ "most_popular": false }, "overview": "# net.inet.udp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.udp.stats\n\n## Overview\n\nCollect information about UDP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.udp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 UDP packets | Enable or disable ipv4 UDP packets metric. | yes | no |\n| ipv4 UDP errors | Enable or disable ipv4 UDP errors metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.udp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 UDP packets | Enable or disable ipv4 UDP packets metric. | yes | no |\n| ipv4 UDP errors | Enable or disable ipv4 UDP errors metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.udp.stats instance\n\nThese metrics show UDP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | InErrors, NoPorts, RcvbufErrors, InCsumErrors, IgnoredMulti | events/s |\n\n", @@ -2631,7 +2552,7 @@ "most_popular": false }, "overview": "# net.inet6.icmp6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.icmp6.stats\n\n## Overview\n\nCollect information abou IPv6 ICMP\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.icmp6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| icmp | Enable or disable ICMP metric. | auto | no |\n| icmp redirects | Enable or disable ICMP redirects metric. | auto | no |\n| icmp errors | Enable or disable ICMP errors metric. | auto | no |\n| icmp echos | Enable or disable ICMP echos metric. | auto | no |\n| icmp router | Enable or disable ICMP router metric. | auto | no |\n| icmp neighbor | Enable or disable ICMP neighbor metric. | auto | no |\n| icmp types | Enable or disable ICMP types metric. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.icmp6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| icmp | Enable or disable ICMP metric. | auto | no |\n| icmp redirects | Enable or disable ICMP redirects metric. | auto | no |\n| icmp errors | Enable or disable ICMP errors metric. | auto | no |\n| icmp echos | Enable or disable ICMP echos metric. | auto | no |\n| icmp router | Enable or disable ICMP router metric. | auto | no |\n| icmp neighbor | Enable or disable ICMP neighbor metric. | auto | no |\n| icmp types | Enable or disable ICMP types metric. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.icmp6.stats instance\n\nCollect IPv6 ICMP traffic statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n\n", @@ -2664,7 +2585,7 @@ "most_popular": false }, "overview": "# net.inet6.ip6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.ip6.stats\n\n## Overview\n\nCollect information abou IPv6 stats.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.ip6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv6 packets | Enable or disable ipv6 packet metric. | auto | no |\n| ipv6 fragments sent | Enable or disable ipv6 fragments sent metric. | auto | no |\n| ipv6 fragments assembly | Enable or disable ipv6 fragments assembly metric. | auto | no |\n| ipv6 errors | Enable or disable ipv6 errors metric. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.ip6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv6 packets | Enable or disable ipv6 packet metric. | auto | no |\n| ipv6 fragments sent | Enable or disable ipv6 fragments sent metric. | auto | no |\n| ipv6 fragments assembly | Enable or disable ipv6 fragments assembly metric. | auto | no |\n| ipv6 errors | Enable or disable ipv6 errors metric. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.ip6.stats instance\n\nThese metrics show general information about IPv6 connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n\n", @@ -2697,7 +2618,7 @@ "most_popular": false }, "overview": "# net.isr\n\nPlugin: freebsd.plugin\nModule: net.isr\n\n## Overview\n\nCollect information about system softnet stat.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.isr]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| netisr | Enable or disable general vision about softnet stat metrics. | yes | no |\n| netisr per core | Enable or disable softnet stat metric per core. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.isr]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| netisr | Enable or disable general vision about softnet stat metrics. | yes | no |\n| netisr per core | Enable or disable softnet stat metric per core. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n| [ 10min_netisr_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of drops in the last minute due to exceeded sysctl net.route.netisr_maxqlen (this can be a cause for dropped packets) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.isr instance\n\nThese metrics show statistics about softnet stats.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n", @@ -2730,7 +2651,7 @@ "most_popular": false }, "overview": "# system.ram\n\nPlugin: freebsd.plugin\nModule: system.ram\n\n## Overview\n\nShow information about system memory usage.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| system.ram | Enable or disable system RAM metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| system.ram | Enable or disable system RAM metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per system.ram instance\n\nThis metric shows RAM usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, active, inactive, wired, cache, laundry, buffers | MiB |\n| mem.available | avail | MiB |\n\n", @@ -2763,7 +2684,7 @@ "most_popular": false }, "overview": "# uptime\n\nPlugin: freebsd.plugin\nModule: uptime\n\n## Overview\n\nShow period of time server is up.\n\nThe plugin calls `clock_gettime` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uptime instance\n\nHow long the system is running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n", @@ -2796,7 +2717,7 @@ "most_popular": false }, "overview": "# vm.loadavg\n\nPlugin: freebsd.plugin\nModule: vm.loadavg\n\n## Overview\n\nSystem Load Average\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.loadavg instance\n\nMonitoring for number of threads running or waiting.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n\n", @@ -2829,7 +2750,7 @@ "most_popular": false }, "overview": "# vm.stats.sys.v_intr\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_intr\n\n## Overview\n\nDevice interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_intr | Enable or disable device interrupts metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_intr | Enable or disable device interrupts metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_intr instance\n\nThe metric show device interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.dev_intr | interrupts | interrupts/s |\n\n", @@ -2862,7 +2783,7 @@ "most_popular": false }, "overview": "# vm.stats.sys.v_soft\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_soft\n\n## Overview\n\nSoftware Interrupt\n\nvm.stats.sys.v_soft\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_soft | Enable or disable software inerrupts metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_soft | Enable or disable software inerrupts metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_soft instance\n\nThis metric shows software interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.soft_intr | interrupts | interrupts/s |\n\n", @@ -2895,7 +2816,7 @@ "most_popular": false }, "overview": "# vm.stats.sys.v_swtch\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_swtch\n\n## Overview\n\nCPU context switch\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_swtch | Enable or disable CPU context switch metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_swtch | Enable or disable CPU context switch metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_swtch instance\n\nThe metric count the number of context switches happening on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n\n", @@ -2928,7 +2849,7 @@ "most_popular": false }, "overview": "# vm.stats.vm.v_pgfaults\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_pgfaults\n\n## Overview\n\nCollect memory page faults events.\n\nThe plugin calls `sysctl` function to collect necessary data\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_pgfaults | Enable or disable Memory page fault metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_pgfaults | Enable or disable Memory page fault metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_pgfaults instance\n\nThe number of page faults happened on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pgfaults | memory, io_requiring, cow, cow_optimized, in_transit | page faults/s |\n\n", @@ -2961,7 +2882,7 @@ "most_popular": false }, "overview": "# vm.stats.vm.v_swappgs\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_swappgs\n\n## Overview\n\nThe metric swap amount of data read from and written to SWAP.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_swappgs | Enable or disable infoormation about SWAP I/O metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_swappgs | Enable or disable infoormation about SWAP I/O metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_swappgs instance\n\nThis metric shows events happening on SWAP.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | io, out | KiB/s |\n\n", @@ -2994,7 +2915,7 @@ "most_popular": false }, "overview": "# vm.swap_info\n\nPlugin: freebsd.plugin\nModule: vm.swap_info\n\n## Overview\n\nCollect information about SWAP memory.\n\nThe plugin calls `sysctlnametomib` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.swap_info | Enable or disable SWAP metrics. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.swap_info | Enable or disable SWAP metrics. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.swap_info instance\n\nThis metric shows the SWAP usage.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swap | free, used | MiB |\n\n", @@ -3027,7 +2948,7 @@ "most_popular": false }, "overview": "# vm.vmtotal\n\nPlugin: freebsd.plugin\nModule: vm.vmtotal\n\n## Overview\n\nCollect Virtual Memory information from host.\n\nThe plugin calls function `sysctl` to collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:vm.vmtotal]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable total processes | Number of active processes. | yes | no |\n| processes running | Show number of processes running or blocked. | yes | no |\n| real memory | Memeory used on host. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:vm.vmtotal]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable total processes | Number of active processes. | yes | no |\n| processes running | Show number of processes running or blocked. | yes | no |\n| real memory | Memeory used on host. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.vmtotal instance\n\nThese metrics show an overall vision about processes running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.active_processes | active | processes |\n| system.processes | running, blocked | processes |\n| mem.real | used | MiB |\n\n", @@ -3060,7 +2981,7 @@ "most_popular": false }, "overview": "# zfs\n\nPlugin: freebsd.plugin\nModule: zfs\n\n## Overview\n\nCollect metrics for ZFS filesystem\n\nThe plugin uses `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:zfs_arcstats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| show zero charts | Do not show charts with zero metrics. | no | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:zfs_arcstats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| show zero charts | Do not show charts with zero metrics. | no | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs instance\n\nThese metrics show detailed information about ZFS filesystem.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | throttled | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n| zfs.trim_bytes | TRIMmed | bytes |\n| zfs.trim_requests | successful, failed, unsupported | requests |\n\n", @@ -3098,7 +3019,7 @@ "most_popular": true }, "overview": "# Intelligent Platform Management Interface (IPMI)\n\nPlugin: freeipmi.plugin\nModule: freeipmi\n\n## Overview\n\n\"Monitor enterprise server sensor readings, event log entries, and hardware statuses to ensure reliable server operations.\"\n\n\nThe plugin uses open source library IPMImonitoring to communicate with sensors.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nLinux kernel module for IPMI can create big overhead.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install freeipmi.plugin\n\nWhen using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires.\n\nWhen using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin.\n\nWhen using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata.\n\n\n#### Preliminary actions\n\nIf you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root\nto initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freeipmi]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe configuration is set using command line options:\n\n```\n# netdata.conf\n[plugin:freeipmi]\n command options = opt1 opt2 ... optN\n```\n\nTo display a help message listing the available command line options:\n\n```bash\n./usr/libexec/netdata/plugins.d/freeipmi.plugin --help\n```\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SECONDS | Data collection frequency. | | no |\n| debug | Enable verbose output. | disabled | no |\n| no-sel | Disable System Event Log (SEL) collection. | disabled | no |\n| reread-sdr-cache | Re-read SDR cache on every iteration. | disabled | no |\n| interpret-oem-data | Attempt to parse OEM data. | disabled | no |\n| assume-system-event-record | treat illegal SEL events records as normal. | disabled | no |\n| ignore-non-interpretable-sensors | Do not read sensors that cannot be interpreted. | disabled | no |\n| bridge-sensors | Bridge sensors not owned by the BMC. | disabled | no |\n| shared-sensors | Enable shared sensors if found. | disabled | no |\n| no-discrete-reading | Do not read sensors if their event/reading type code is invalid. | enabled | no |\n| ignore-scanning-disabled | Ignore the scanning bit and read sensors no matter what. | disabled | no |\n| assume-bmc-owner | Assume the BMC is the sensor owner no matter what (usually bridging is required too). | disabled | no |\n| hostname HOST | Remote IPMI hostname or IP address. | local | no |\n| username USER | Username that will be used when connecting to the remote host. | | no |\n| password PASS | Password that will be used when connecting to the remote host. | | no |\n| noauthcodecheck / no-auth-code-check | Don't check the authentication codes returned. | | no |\n| driver-type IPMIDRIVER | Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. | | no |\n| sdr-cache-dir PATH | SDR cache files directory. | /tmp | no |\n| sensor-config-file FILE | Sensors configuration filename. | system default | no |\n| sel-config-file FILE | SEL configuration filename. | system default | no |\n| ignore N1,N2,N3,... | Sensor IDs to ignore. | | no |\n| ignore-status N1,N2,N3,... | Sensor IDs to ignore status (nominal/warning/critical). | | no |\n| -v | Print version and exit. | | no |\n| --help | Print usage message and exit. | | no |\n\n#### Examples\n\n##### Decrease data collection frequency\n\nBasic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.\n\n```yaml\n[plugin:freeipmi]\n update every = 10\n\n```\n##### Disable SEL collection\n\nAppend to `command options =` the options you need.\n\n```yaml\n[plugin:freeipmi]\n command options = no-sel\n\n```\n##### Ignore specific sensors\n\nSpecific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`.\n\n**However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).\n\nTo find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:\n\nID | Name | Type | State | Reading | Units | Event\n1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'\n2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'\n3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'\n4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'\n5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'\n6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'\n7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'\n8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'\n9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'\n10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'\n11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'\n12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'\n13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'\n14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n...\n\n`freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`:\n\n\n```yaml\n[plugin:freeipmi]\n command options = ignore 1,2,3,4,...\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install freeipmi.plugin\n\nWhen using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires.\n\nWhen using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin.\n\nWhen using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata.\n\n\n#### Preliminary actions\n\nIf you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root\nto initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freeipmi]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe configuration is set using command line options:\n\n```\n# netdata.conf\n[plugin:freeipmi]\n command options = opt1 opt2 ... optN\n```\n\nTo display a help message listing the available command line options:\n\n```bash\n./usr/libexec/netdata/plugins.d/freeipmi.plugin --help\n```\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SECONDS | Data collection frequency. | | no |\n| debug | Enable verbose output. | disabled | no |\n| no-sel | Disable System Event Log (SEL) collection. | disabled | no |\n| reread-sdr-cache | Re-read SDR cache on every iteration. | disabled | no |\n| interpret-oem-data | Attempt to parse OEM data. | disabled | no |\n| assume-system-event-record | treat illegal SEL events records as normal. | disabled | no |\n| ignore-non-interpretable-sensors | Do not read sensors that cannot be interpreted. | disabled | no |\n| bridge-sensors | Bridge sensors not owned by the BMC. | disabled | no |\n| shared-sensors | Enable shared sensors if found. | disabled | no |\n| no-discrete-reading | Do not read sensors if their event/reading type code is invalid. | enabled | no |\n| ignore-scanning-disabled | Ignore the scanning bit and read sensors no matter what. | disabled | no |\n| assume-bmc-owner | Assume the BMC is the sensor owner no matter what (usually bridging is required too). | disabled | no |\n| hostname HOST | Remote IPMI hostname or IP address. | local | no |\n| username USER | Username that will be used when connecting to the remote host. | | no |\n| password PASS | Password that will be used when connecting to the remote host. | | no |\n| noauthcodecheck / no-auth-code-check | Don't check the authentication codes returned. | | no |\n| driver-type IPMIDRIVER | Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. | | no |\n| sdr-cache-dir PATH | SDR cache files directory. | /tmp | no |\n| sensor-config-file FILE | Sensors configuration filename. | system default | no |\n| sel-config-file FILE | SEL configuration filename. | system default | no |\n| ignore N1,N2,N3,... | Sensor IDs to ignore. | | no |\n| ignore-status N1,N2,N3,... | Sensor IDs to ignore status (nominal/warning/critical). | | no |\n| -v | Print version and exit. | | no |\n| --help | Print usage message and exit. | | no |\n\n#### Examples\n\n##### Decrease data collection frequency\n\nBasic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.\n\n```yaml\n[plugin:freeipmi]\n update every = 10\n\n```\n##### Disable SEL collection\n\nAppend to `command options =` the options you need.\n\n```yaml\n[plugin:freeipmi]\n command options = no-sel\n\n```\n##### Ignore specific sensors\n\nSpecific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`.\n\n**However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).\n\nTo find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:\n\nID | Name | Type | State | Reading | Units | Event\n1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'\n2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'\n3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'\n4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'\n5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'\n6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'\n7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'\n8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'\n9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'\n10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'\n11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'\n12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'\n13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'\n14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n...\n\n`freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`:\n\n\n```yaml\n[plugin:freeipmi]\n command options = ignore 1,2,3,4,...\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\n\n### kimpi0 CPU usage\n\n\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipmi_sensor_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipmi.conf) | ipmi.sensor_state | IPMI sensor ${label:sensor} (${label:component}) state |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.\n\n\n### Per Intelligent Platform Management Interface (IPMI) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sel | events | events |\n\n### Per sensor\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| sensor | The sensor name |\n| type | One of 45 recognized sensor types (Battery, Voltage...) |\n| component | One of 25 recognized components (Processor, Peripheral). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sensor_state | nominal, critical, warning, unknown | state |\n| ipmi.sensor_temperature_c | temperature | Celsius |\n| ipmi.sensor_temperature_f | temperature | Fahrenheit |\n| ipmi.sensor_voltage | voltage | Volts |\n| ipmi.sensor_ampere | ampere | Amps |\n| ipmi.sensor_fan_speed | rotations | RPM |\n| ipmi.sensor_power | power | Watts |\n| ipmi.sensor_reading_percent | percentage | % |\n\n", @@ -3144,7 +3065,7 @@ } }, "overview": "# ActiveMQ\n\nPlugin: go.d.plugin\nModule: activemq\n\n## Overview\n\nThis collector monitors ActiveMQ queues and topics.\n\nIt collects metrics by sending HTTP requests to the Web Console API.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 8161.\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8161\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/activemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/activemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8161 | yes |\n| webadmin | Webadmin root path. | admin | yes |\n| max_queues | Maximum number of concurrently collected queues. | 50 | no |\n| max_topics | Maximum number of concurrently collected topics. | 50 | no |\n| queues_filter | Queues filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| topics_filter | Topics filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| timeout | HTTP request timeout. | 1 | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n username: foo\n password: bar\n\n```\n##### Filters and limits\n\nUsing filters and limits for queues and topics.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n max_queues: 100\n max_topics: 100\n queues_filter: '!sandr* *'\n topics_filter: '!sandr* *'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n - name: remote\n url: http://192.0.2.1:8161\n webadmin: admin\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/activemq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/activemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8161 | yes |\n| webadmin | Webadmin root path. | admin | yes |\n| max_queues | Maximum number of concurrently collected queues. | 50 | no |\n| max_topics | Maximum number of concurrently collected topics. | 50 | no |\n| queues_filter | Queues filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| topics_filter | Topics filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| timeout | HTTP request timeout. | 1 | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n username: foo\n password: bar\n\n```\n##### Filters and limits\n\nUsing filters and limits for queues and topics.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n max_queues: 100\n max_topics: 100\n queues_filter: '!sandr* *'\n topics_filter: '!sandr* *'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n - name: remote\n url: http://192.0.2.1:8161\n webadmin: admin\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `activemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m activemq\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `activemq` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep activemq\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep activemq /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep activemq\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ActiveMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| activemq.messages | enqueued, dequeued | messages/s |\n| activemq.unprocessed_messages | unprocessed | messages |\n| activemq.consumers | consumers | consumers |\n\n", @@ -3181,8 +3102,8 @@ }, "most_popular": false }, - "overview": "# Adaptec RAID\n\nPlugin: go.d.plugin\nModule: adaptec_raid\n\n## Overview\n\nMonitors the health of Adaptec Hardware RAID by tracking the status of logical and physical devices in your storage system.\nIt relies on the `arcconf` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `arcconf GETCONFIG 1 LD`\n- `arcconf GETCONFIG 1 PD`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/adaptec_raid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/adaptec_raid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | arcconf binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: adaptec_raid\n update_every: 5 # Collect Adaptec Hardware RAID statistics every 5 seconds\n\n```\n", + "overview": "# Adaptec RAID\n\nPlugin: go.d.plugin\nModule: adaptec_raid\n\n## Overview\n\nMonitors the health of Adaptec Hardware RAID by tracking the status of logical and physical devices in your storage system.\nIt relies on the `arcconf` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `arcconf GETCONFIG 1 LD`\n- `arcconf GETCONFIG 1 PD`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/adaptec_raid.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/adaptec_raid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | arcconf binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: adaptec_raid\n update_every: 5 # Collect Adaptec Hardware RAID statistics every 5 seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `adaptec_raid` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m adaptec_raid\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `adaptec_raid` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep adaptec_raid\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep adaptec_raid /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep adaptec_raid\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ adaptec_raid_ld_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptecraid.logical_device_status | Adaptec RAID logical device (number ${label:ld_number} name ${label:ld_name}) health status is critical |\n| [ adaptec_raid_pd_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptecraid.physical_device_state | Adaptec RAID physical device (number ${label:pd_number} location ${label:location}) health state is critical |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per logical device\n\nThese metrics refer to the Logical Device (LD).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| ld_number | Logical device index number |\n| ld_name | Logical device name |\n| raid_level | RAID level |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adaptecraid.logical_device_status | ok, critical | status |\n\n### Per physical device\n\nThese metrics refer to the Physical Device (PD).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pd_number | Physical device index number |\n| location | Physical device location (e.g. Connector 0, Device 1) |\n| vendor | Physical device vendor |\n| model | Physical device model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adaptecraid.physical_device_state | ok, critical | status |\n| adaptecraid.physical_device_smart_warnings | smart | warnings |\n| adaptecraid.physical_device_temperature | temperature | Celsius |\n\n", @@ -3221,7 +3142,7 @@ "most_popular": false }, "overview": "# Access Points\n\nPlugin: go.d.plugin\nModule: ap\n\n## Overview\n\nThis collector monitors various wireless access point metrics like connected clients, bandwidth, packets, transmit issues, signal strength, and bitrate for each device and its associated SSID.\n\n\nThis tool uses the `iw` command-line utility to discover nearby access points. It starts by running `iw dev`, which provides information about all wireless interfaces. Then, for each interface identified as an access point (type AP), the `iw INTERFACE station dump` command is executed to gather relevant metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin is able to auto-detect any access points on your Linux machine.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### `iw` utility.\n\nMake sure the `iw` utility is installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ap.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `iw` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/sbin/iw | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: custom_iw\n binary_path: /usr/local/sbin/iw\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### `iw` utility.\n\nMake sure the `iw` utility is installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ap.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ap.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `iw` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/sbin/iw | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: custom_iw\n binary_path: /usr/local/sbin/iw\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ap` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ap\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ap` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ap\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ap /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ap\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\nThese metrics refer to the entire monitored application.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | Wireless interface name |\n| ssid | SSID |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ap.clients | clients | clients |\n| ap.net | received, sent | kilobits/s |\n| ap.packets | received, sent | packets/s |\n| ap.issues | retries, failures | issues/s |\n| ap.signal | average signal | dBm |\n| ap.bitrate | receive, transmit | Mbps |\n\n", @@ -3270,7 +3191,7 @@ "most_popular": true }, "overview": "# Apache\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `apache` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep apache\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep apache /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep apache\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n", @@ -3319,7 +3240,7 @@ "most_popular": true }, "overview": "# HTTPD\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `apache` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep apache\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep apache /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep apache\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n", @@ -3328,6 +3249,44 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/apache/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-apcupsd", + "plugin_name": "go.d.plugin", + "module_name": "apcupsd", + "monitored_instance": { + "name": "APC UPS", + "link": "https://www.apc.com", + "icon_filename": "apc.svg", + "categories": [ + "data-collection.ups" + ] + }, + "keywords": [ + "ups", + "apcupsd", + "apc" + ], + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "most_popular": false + }, + "overview": "# APC UPS\n\nPlugin: go.d.plugin\nModule: apcupsd\n\n## Overview\n\nThis collector monitors Uninterruptible Power Supplies by polling the Apcupsd daemon.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apcupsd instances running on localhost that are listening on port 3551.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:3551\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apcupsd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apcupsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Apcupsd daemon address in IP:PORT format. | 127.0.0.1:3551 | yes |\n| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3551\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3551\n\n - name: remote\n address: 203.0.113.0:3551\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `apcupsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apcupsd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `apcupsd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep apcupsd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep apcupsd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep apcupsd\n```\n\n", + "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ apcupsd_ups_load_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_load_capacity_utilization | APC UPS average load over the last 10 minutes |\n| [ apcupsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_battery_charge | APC UPS average battery charge over the last minute |\n| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS number of seconds since the last successful data collection |\n| [ apcupsd_ups_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_selftest | APC UPS self-test failed due to insufficient battery capacity or due to overload |\n| [ apcupsd_ups_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS has switched to battery power because the input power has failed |\n| [ apcupsd_ups_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS is overloaded and cannot supply enough power to the load |\n| [ apcupsd_ups_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS battery is low and needs to be recharged |\n| [ apcupsd_ups_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS battery has reached the end of its lifespan and needs to be replaced |\n| [ apcupsd_ups_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS has no battery |\n| [ apcupsd_ups_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS communication link is lost |\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nThese metrics refer to the UPS unit.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| apcupsd.ups_status | TRIM, BOOST, CAL, ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, SHUTTING_DOWN | status |\n| apcupsd.ups_selftest | NO, NG, WN, IP, OK, BT, UNK | status |\n| apcupsd.ups_battery_charge | charge | percent |\n| apcupsd.ups_battery_time_remaining | timeleft | seconds |\n| apcupsd.ups_battery_time_since_replacement | since_replacement | seconds |\n| apcupsd.ups_battery_voltage | voltage, nominal_voltage | Volts |\n| apcupsd.ups_load_capacity_utilization | load | percent |\n| apcupsd.ups_load | load | Watts |\n| apcupsd.ups_temperature | temperature | Celsius |\n| apcupsd.ups_input_voltage | voltage, min_voltage, max_voltage | Volts |\n| apcupsd.ups_input_frequency | frequency | Hz |\n| apcupsd.ups_output_voltage | voltage | Volts |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-apcupsd-APC_UPS", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/apcupsd/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-beanstalk", @@ -3357,7 +3316,7 @@ "most_popular": false }, "overview": "# Beanstalk\n\nPlugin: go.d.plugin\nModule: beanstalk\n\n## Overview\n\nThis collector monitors Beanstalk server performance and provides detailed statistics for each tube.\n\n\nUsing the [beanstalkd protocol](https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt), it communicates with the Beanstalk daemon to gather essential metrics that help understand the server's performance and activity.\nExecuted commands:\n\n- [stats](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L553).\n- [list-tubes](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L688).\n- [stats-tube](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L497).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Beanstalk instances running on localhost that are listening on port 11300.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/beanstalk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/beanstalk.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the Beanstalk service listens for connections. | 127.0.0.1:11300 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| tube_selector | Specifies a [pattern](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme) for which Beanstalk tubes Netdata will collect statistics. | * | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11300\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11300\n\n - name: remote\n address: 203.0.113.0:11300\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/beanstalk.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/beanstalk.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the Beanstalk service listens for connections. | 127.0.0.1:11300 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| tube_selector | Specifies a [pattern](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme) for which Beanstalk tubes Netdata will collect statistics. | * | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11300\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11300\n\n - name: remote\n address: 203.0.113.0:11300\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `beanstalk` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m beanstalk\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `beanstalk` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep beanstalk\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep beanstalk /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep beanstalk\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Beanstalk instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.current_jobs | ready, buried, urgent, delayed, reserved | jobs |\n| beanstalk.jobs_rate | created | jobs/s |\n| beanstalk.jobs_timeouts | timeouts | jobs/s |\n| beanstalk.current_tubes | tubes | tubes |\n| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, reserve-with-timeout, touch, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s |\n| beanstalk.current_connections | open, producers, workers, waiting | connections |\n| beanstalk.connections_rate | created | connections/s |\n| beanstalk.binlog_records | written, migrated | records/s |\n| beanstalk.cpu_usage | user, system | percent |\n| beanstalk.uptime | uptime | seconds |\n\n### Per tube\n\nMetrics related to Beanstalk tubes. This set of metrics is provided for each tube.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| tube_name | Tube name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.tube_current_jobs | ready, buried, urgent, delayed, reserved | jobs |\n| beanstalk.tube_jobs_rate | created | jobs/s |\n| beanstalk.tube_commands_rate | delete, pause-tube | commands/s |\n| beanstalk.tube_current_connections | using, waiting, watching | connections |\n| beanstalk.tube_pause_time | since, left | seconds |\n\n", @@ -3366,6 +3325,43 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/beanstalk/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-boinc", + "plugin_name": "go.d.plugin", + "module_name": "boinc", + "monitored_instance": { + "name": "BOINC", + "link": "https://boinc.berkeley.edu/", + "categories": [ + "data-collection.database-servers" + ], + "icon_filename": "bolt.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "boinc", + "distributed" + ], + "most_popular": false + }, + "overview": "# BOINC\n\nPlugin: go.d.plugin\nModule: boinc\n\n## Overview\n\nThis collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.\n\n\nIt communicates with BOING using [GIU RPC Protocol](https://boinc.berkeley.edu/trac/wiki/GuiRpcProtocol).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects BOINC client instances running on localhost that are listening on port 31416.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:31416\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/boinc.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/boinc.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the BOINC client listens for connections. | 127.0.0.1:31416 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| password | The GUI RPC password for authentication. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:31416\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:31416\n\n - name: remote\n address: 203.0.113.0:31416\n password: somePassword\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `boinc` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m boinc\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `boinc` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep boinc\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep boinc /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep boinc\n```\n\n", + "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |\n| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |\n| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks_state | average number of compute errors over the last 10 minutes |\n| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks_state | average number of failed uploads over the last 10 minutes |\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per BOINC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| boinc.tasks | total, active | tasks |\n| boinc.tasks_per_state | new, downloading, downloaded, compute_error, uploading, uploaded, aborted, upload_failed | tasks |\n| boinc.active_tasks_per_state | uninitialized, executing, abort_pending, quit_pending, suspended, copy_pending | tasks |\n| boinc.active_tasks_per_scheduler_state | uninitialized, preempted, scheduled | tasks |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-boinc-BOINC", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/boinc/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-cassandra", @@ -3397,7 +3393,7 @@ "most_popular": false }, "overview": "# Cassandra\n\nPlugin: go.d.plugin\nModule: cassandra\n\n## Overview\n\nThis collector gathers metrics about client requests, cache hits, and many more, while also providing metrics per each thread pool.\n\n\nThe [JMX Exporter](https://github.com/prometheus/jmx_exporter) is used to fetch metrics from a Cassandra instance and make them available at an endpoint like `http://127.0.0.1:7072/metrics`.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 7072.\n\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:7072/metrics\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Cassandra with Prometheus JMX Exporter\n\nTo configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter):\n\n> **Note**: paths can differ depends on your setup.\n\n- Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file\n and install it in a directory where Cassandra can access it.\n- Add\n the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml)\n file to `/etc/cassandra`.\n- Add the following line to `/etc/cassandra/cassandra-env.sh`\n ```\n JVM_OPTS=\"$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml\n ```\n- Restart cassandra service.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cassandra.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cassandra.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:7072/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n username: foo\n password: bar\n\n```\n##### HTTPS with self-signed certificate\n\nLocal server with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:7072/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n - name: remote\n url: http://192.0.2.1:7072/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Cassandra with Prometheus JMX Exporter\n\nTo configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter):\n\n> **Note**: paths can differ depends on your setup.\n\n- Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file\n and install it in a directory where Cassandra can access it.\n- Add\n the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml)\n file to `/etc/cassandra`.\n- Add the following line to `/etc/cassandra/cassandra-env.sh`\n ```\n JVM_OPTS=\"$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml\n ```\n- Restart cassandra service.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cassandra.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cassandra.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:7072/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n username: foo\n password: bar\n\n```\n##### HTTPS with self-signed certificate\n\nLocal server with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:7072/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n - name: remote\n url: http://192.0.2.1:7072/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `cassandra` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cassandra\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `cassandra` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep cassandra\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep cassandra /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep cassandra\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Cassandra instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.client_requests_rate | read, write | requests/s |\n| cassandra.client_request_read_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_request_write_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_requests_latency | read, write | seconds |\n| cassandra.row_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.row_cache_hit_rate | hits, misses | events/s |\n| cassandra.row_cache_utilization | used | percentage |\n| cassandra.row_cache_size | size | bytes |\n| cassandra.key_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.key_cache_hit_rate | hits, misses | events/s |\n| cassandra.key_cache_utilization | used | percentage |\n| cassandra.key_cache_size | size | bytes |\n| cassandra.storage_live_disk_space_used | used | bytes |\n| cassandra.compaction_completed_tasks_rate | completed | tasks/s |\n| cassandra.compaction_pending_tasks_count | pending | tasks |\n| cassandra.compaction_compacted_rate | compacted | bytes/s |\n| cassandra.jvm_memory_used | heap, nonheap | bytes |\n| cassandra.jvm_gc_rate | parnew, cms | gc/s |\n| cassandra.jvm_gc_time | parnew, cms | seconds |\n| cassandra.dropped_messages_rate | dropped | messages/s |\n| cassandra.client_requests_timeouts_rate | read, write | timeout/s |\n| cassandra.client_requests_unavailables_rate | read, write | exceptions/s |\n| cassandra.client_requests_failures_rate | read, write | failures/s |\n| cassandra.storage_exceptions_rate | storage | exceptions/s |\n\n### Per thread pool\n\nMetrics related to Cassandra's thread pools. Each thread pool provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thread_pool | thread pool name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.thread_pool_active_tasks_count | active | tasks |\n| cassandra.thread_pool_pending_tasks_count | pending | tasks |\n| cassandra.thread_pool_blocked_tasks_count | blocked | tasks |\n| cassandra.thread_pool_blocked_tasks_rate | blocked | tasks/s |\n\n", @@ -3406,6 +3402,42 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/cassandra/metadata.yaml", "related_resources": "" }, + { + "meta": { + "plugin_name": "go.d.plugin", + "module_name": "ceph", + "monitored_instance": { + "name": "Ceph", + "link": "https://ceph.io/", + "categories": [ + "data-collection.storage-mount-points-and-filesystems" + ], + "icon_filename": "ceph.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "ceph", + "storage" + ], + "most_popular": false + }, + "overview": "# Ceph\n\nPlugin: go.d.plugin\nModule: ceph\n\n## Overview\n\nThis collector monitors the overall health status and performance of your Ceph clusters.\nIt gathers key metrics for the entire cluster, individual Pools, and OSDs.\n\n\nIt collects metrics by periodically issuing HTTP GET requests to the Ceph Manager [REST API](https://docs.ceph.com/en/reef/mgr/ceph_api/#):\n\n- [/api/monitor](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-monitor) (only once to get the Ceph cluster id (fsid)) \n- [/api/health/minimal](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-health-minimal)\n- [/api/osd](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-osd)\n- [/api/pool?stats=true](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-pool)\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect Ceph Manager instances running on:\n\n- localhost that are listening on port 8443\n- within Docker containers\n\n> **Note that the Ceph REST API requires a username and password**. \n> While Netdata can automatically detect Ceph Manager instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ceph.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ceph.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | The URL of the [Ceph Manager API](https://docs.ceph.com/en/reef/mgr/ceph_api/). | https://127.0.0.1:8443 | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8443\n username: user\n password: pass\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8443\n username: user\n password: pass\n\n - name: remote\n url: https://192.0.2.1:8443\n username: user\n password: pass\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ceph` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ceph\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ceph` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ceph\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ceph /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ceph\n```\n\n", + "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ceph_cluster_physical_capacity_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.cluster_physical_capacity_utilization | Ceph cluster ${label:fsid} disk space utilization |\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cluster\n\nThese metrics refer to the entire Ceph cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| fsid | A unique identifier of the cluster. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.cluster_status | ok, err, warn | status |\n| ceph.cluster_hosts_count | hosts | hosts |\n| ceph.cluster_monitors_count | monitors | monitors |\n| ceph.cluster_osds_count | osds | osds |\n| ceph.cluster_osds_by_status_count | up, down, in, out | status |\n| ceph.cluster_managers_count | active, standby | managers |\n| ceph.cluster_object_gateways_count | object | gateways |\n| ceph.cluster_iscsi_gateways_count | iscsi | gateways |\n| ceph.cluster_iscsi_gateways_by_status_count | up, down | gateways |\n| ceph.cluster_physical_capacity_utilization | utilization | percent |\n| ceph.cluster_physical_capacity_usage | avail, used | bytes |\n| ceph.cluster_objects_count | objects | objects |\n| ceph.cluster_objects_by_status_distribution | healthy, misplaced, degraded, unfound | percent |\n| ceph.cluster_pools_count | pools | pools |\n| ceph.cluster_pgs_count | pgs | pgs |\n| ceph.cluster_pgs_by_status_count | clean, working, warning, unknown | pgs |\n| ceph.cluster_pgs_per_osd_count | per_osd | pgs |\n\n### Per osd\n\nThese metrics refer to the Object Storage Daemon (OSD).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| fsid | A unique identifier of the cluster. |\n| osd_uuid | OSD UUID. |\n| osd_name | OSD name. |\n| device_class | OSD device class. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.osd_status | up, down, in, out | status |\n| ceph.osd_space_usage | avail, used | bytes |\n| ceph.osd_io | read, written | bytes/s |\n| ceph.osd_iops | read, write | ops/s |\n| ceph.osd_latency | commit, apply | milliseconds |\n\n### Per pool\n\nThese metrics refer to the Pool.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| fsid | A unique identifier of the cluster. |\n| pool_name | Pool name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.pool_space_utilization | utilization | percent |\n| ceph.pool_space_usage | avail, used | bytes |\n| ceph.pool_objects_count | object | objects |\n| ceph.pool_io | read, written | bytes/s |\n| ceph.pool_iops | read, write | ops/s |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-ceph-Ceph", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/ceph/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-chrony", @@ -3431,11 +3463,11 @@ }, "most_popular": false }, - "overview": "# Chrony\n\nPlugin: go.d.plugin\nModule: chrony\n\n## Overview\n\nThis collector monitors the system's clock performance and peers activity status\n\nIt collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers Chrony instance running on the local host and listening on port 323.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:323\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/chrony.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/chrony.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:323 | yes |\n| timeout | Connection timeout. Zero means no timeout. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n - name: remote\n address: 192.0.2.1:323\n\n```\n", + "overview": "# Chrony\n\nPlugin: go.d.plugin\nModule: chrony\n\n## Overview\n\nThis collector monitors the system's clock performance and peers activity status\n\n\nIt collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6.\nAdditionally, for data collection jobs that connect to localhost Chrony instances, it collects serverstats metrics (NTP packets, command packets received/dropped) by executing the 'chronyc serverstats' command.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers Chrony instance running on the local host and listening on port 323.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:323\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/chrony.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/chrony.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:323 | yes |\n| timeout | Connection timeout. Zero means no timeout. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n - name: remote\n address: 192.0.2.1:323\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `chrony` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m chrony\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `chrony` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep chrony\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep chrony /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep chrony\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Chrony instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| chrony.stratum | stratum | level |\n| chrony.current_correction | current_correction | seconds |\n| chrony.root_delay | root_delay | seconds |\n| chrony.root_dispersion | root_delay | seconds |\n| chrony.last_offset | offset | seconds |\n| chrony.rms_offset | offset | seconds |\n| chrony.frequency | frequency | ppm |\n| chrony.residual_frequency | residual_frequency | ppm |\n| chrony.skew | skew | ppm |\n| chrony.update_interval | update_interval | seconds |\n| chrony.ref_measurement_time | ref_measurement_time | seconds |\n| chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status |\n| chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Chrony instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| chrony.stratum | stratum | level |\n| chrony.current_correction | current_correction | seconds |\n| chrony.root_delay | root_delay | seconds |\n| chrony.root_dispersion | root_delay | seconds |\n| chrony.last_offset | offset | seconds |\n| chrony.rms_offset | offset | seconds |\n| chrony.frequency | frequency | ppm |\n| chrony.residual_frequency | residual_frequency | ppm |\n| chrony.skew | skew | ppm |\n| chrony.update_interval | update_interval | seconds |\n| chrony.ref_measurement_time | ref_measurement_time | seconds |\n| chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status |\n| chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources |\n| chrony.ntp_packets | received, dropped | packets/s |\n| chrony.command_packets | received, dropped | packets/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-chrony-Chrony", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/chrony/metadata.yaml", @@ -3468,7 +3500,7 @@ "most_popular": false }, "overview": "# ClickHouse\n\nPlugin: go.d.plugin\nModule: clickhouse\n\n## Overview\n\nThis collector retrieves performance data from ClickHouse for connections, queries, resources, replication, IO, and data operations (inserts, selects, merges) using HTTP requests and ClickHouse system tables. It monitors your ClickHouse server's health and activity.\n\n\nIt sends HTTP requests to the ClickHouse [HTTP interface](https://clickhouse.com/docs/en/interfaces/http), executing SELECT queries to retrieve data from various system tables.\nSpecifically, it collects metrics from the following tables:\n\n- system.metrics\n- system.async_metrics\n- system.events\n- system.disks\n- system.parts\n- system.processes\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects ClickHouse instances running on localhost that are listening on port 8123.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:8123\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/clickhouse.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/clickhouse.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8123 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nClickHouse with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8123\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n\n - name: remote\n url: http://192.0.2.1:8123\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/clickhouse.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/clickhouse.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8123 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nClickHouse with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8123\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n\n - name: remote\n url: http://192.0.2.1:8123\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `clickhouse` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m clickhouse\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `clickhouse` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep clickhouse\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep clickhouse /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep clickhouse\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ clickhouse_restarted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.uptime | ClickHouse has recently been restarted |\n| [ clickhouse_queries_preempted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.queries_preempted | ClickHouse has queries that are stopped and waiting due to priority setting |\n| [ clickhouse_long_running_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.longest_running_query_time | ClickHouse has a long-running query exceeding the threshold |\n| [ clickhouse_rejected_inserts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.rejected_inserts | ClickHouse has INSERT queries that are rejected due to high number of active data parts for partition in a MergeTree |\n| [ clickhouse_delayed_inserts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.delayed_inserts | ClickHouse has INSERT queries that are throttled due to high number of active data parts for partition in a MergeTree |\n| [ clickhouse_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.replicas_max_absolute_delay | ClickHouse is experiencing replication lag greater than 5 minutes |\n| [ clickhouse_replicated_readonly_tables ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.replicated_readonly_tables | ClickHouse has replicated tables in readonly state due to ZooKeeper session loss/startup without ZooKeeper configured |\n| [ clickhouse_max_part_count_for_partition ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.max_part_count_for_partition | ClickHouse high number of parts per partition |\n| [ clickhouse_distributed_connections_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.distributed_connections_fail_exhausted_retries | ClickHouse has failed distributed connections after exhausting all retry attempts |\n| [ clickhouse_distributed_files_to_insert ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.distributed_files_to_insert | ClickHouse high number of pending files to process for asynchronous insertion into Distributed tables |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ClickHouse instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| clickhouse.connections | tcp, http, mysql, postgresql, interserver | connections |\n| clickhouse.slow_reads | slow | reads/s |\n| clickhouse.read_backoff | read_backoff | events/s |\n| clickhouse.memory_usage | used | bytes |\n| clickhouse.running_queries | running | queries |\n| clickhouse.queries_preempted | preempted | queries |\n| clickhouse.queries | successful, failed | queries/s |\n| clickhouse.select_queries | successful, failed | selects/s |\n| clickhouse.insert_queries | successful, failed | inserts/s |\n| clickhouse.queries_memory_limit_exceeded | mem_limit_exceeded | queries/s |\n| clickhouse.longest_running_query_time | longest_query_time | seconds |\n| clickhouse.queries_latency | queries_time | microseconds |\n| clickhouse.select_queries_latency | selects_time | microseconds |\n| clickhouse.insert_queries_latency | inserts_time | microseconds |\n| clickhouse.io | reads, writes | bytes/s |\n| clickhouse.iops | reads, writes | ops/s |\n| clickhouse.io_errors | read, write | errors/s |\n| clickhouse.io_seeks | lseek | ops/s |\n| clickhouse.io_file_opens | file_open | ops/s |\n| clickhouse.replicated_parts_current_activity | fetch, send, check | parts |\n| clickhouse.replicas_max_absolute_dela | replication_delay | seconds |\n| clickhouse.replicated_readonly_tables | read_only | tables |\n| clickhouse.replicated_data_loss | data_loss | events |\n| clickhouse.replicated_part_fetches | successful, failed | fetches/s |\n| clickhouse.inserted_rows | inserted | rows/s |\n| clickhouse.inserted_bytes | inserted | bytes/s |\n| clickhouse.rejected_inserts | rejected | inserts/s |\n| clickhouse.delayed_inserts | delayed | inserts/s |\n| clickhouse.delayed_inserts_throttle_time | delayed_inserts_throttle_time | milliseconds |\n| clickhouse.selected_bytes | selected | bytes/s |\n| clickhouse.selected_rows | selected | rows/s |\n| clickhouse.selected_parts | selected | parts/s |\n| clickhouse.selected_ranges | selected | ranges/s |\n| clickhouse.selected_marks | selected | marks/s |\n| clickhouse.merges | merge | ops/s |\n| clickhouse.merges_latency | merges_time | milliseconds |\n| clickhouse.merged_uncompressed_bytes | merged_uncompressed | bytes/s |\n| clickhouse.merged_rows | merged | rows/s |\n| clickhouse.merge_tree_data_writer_inserted_rows | inserted | rows/s |\n| clickhouse.merge_tree_data_writer_uncompressed_bytes | inserted | bytes/s |\n| clickhouse.merge_tree_data_writer_compressed_bytes | written | bytes/s |\n| clickhouse.uncompressed_cache_requests | hits, misses | requests/s |\n| clickhouse.mark_cache_requests | hits, misses | requests/s |\n| clickhouse.max_part_count_for_partition | max_parts_partition | parts |\n| clickhouse.parts_count | temporary, pre_active, active, deleting, delete_on_destroy, outdated, wide, compact | parts |\n| distributed_connections | active | connections |\n| distributed_connections_attempts | connection | attempts/s |\n| distributed_connections_fail_retries | connection_retry | fails/s |\n| distributed_connections_fail_exhausted_retries | connection_retry_exhausted | fails/s |\n| distributed_files_to_insert | pending_insertions | files |\n| distributed_rejected_inserts | rejected | inserts/s |\n| distributed_delayed_inserts | delayed | inserts/s |\n| distributed_delayed_inserts_latency | delayed_time | milliseconds |\n| distributed_sync_insertion_timeout_exceeded | sync_insertion | timeouts/s |\n| distributed_async_insertions_failures | async_insertions | failures/s |\n| clickhouse.uptime | uptime | seconds |\n\n### Per disk\n\nThese metrics refer to the Disk.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk_name | Name of the disk as defined in the [server configuration](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-multiple-volumes_configure). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| clickhouse.disk_space_usage | free, used | bytes |\n\n### Per table\n\nThese metrics refer to the Database Table.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | Name of the database. |\n| table | Name of the table. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| clickhouse.database_table_size | size | bytes |\n| clickhouse.database_table_parts | parts | parts |\n| clickhouse.database_table_rows | rows | rows |\n\n", @@ -3505,7 +3537,7 @@ "most_popular": false }, "overview": "# CockroachDB\n\nPlugin: go.d.plugin\nModule: cockroachdb\n\n## Overview\n\nThis collector monitors CockroachDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cockroachdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cockroachdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/_status/vars | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nCockroachDB with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/_status/vars\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n - name: remote\n url: http://203.0.113.10:8080/_status/vars\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cockroachdb.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cockroachdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/_status/vars | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nCockroachDB with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/_status/vars\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n - name: remote\n url: http://203.0.113.10:8080/_status/vars\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `cockroachdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cockroachdb\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `cockroachdb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep cockroachdb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep cockroachdb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep cockroachdb\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cockroachdb_used_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage capacity utilization |\n| [ cockroachdb_used_usable_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage usable space utilization |\n| [ cockroachdb_unavailable_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than needed for quorum |\n| [ cockroachdb_underreplicated_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than the replication target |\n| [ cockroachdb_open_file_descriptors_limit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.process_file_descriptors | open file descriptors utilization (against softlimit) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CockroachDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cockroachdb.process_cpu_time_combined_percentage | used | percentage |\n| cockroachdb.process_cpu_time_percentage | user, sys | percentage |\n| cockroachdb.process_cpu_time | user, sys | ms |\n| cockroachdb.process_memory | rss | KiB |\n| cockroachdb.process_file_descriptors | open | fd |\n| cockroachdb.process_uptime | uptime | seconds |\n| cockroachdb.host_disk_bandwidth | read, write | KiB |\n| cockroachdb.host_disk_operations | reads, writes | operations |\n| cockroachdb.host_disk_iops_in_progress | in_progress | iops |\n| cockroachdb.host_network_bandwidth | received, sent | kilobits |\n| cockroachdb.host_network_packets | received, sent | packets |\n| cockroachdb.live_nodes | live_nodes | nodes |\n| cockroachdb.node_liveness_heartbeats | successful, failed | heartbeats |\n| cockroachdb.total_storage_capacity | total | KiB |\n| cockroachdb.storage_capacity_usability | usable, unusable | KiB |\n| cockroachdb.storage_usable_capacity | available, used | KiB |\n| cockroachdb.storage_used_capacity_percentage | total, usable | percentage |\n| cockroachdb.sql_connections | active | connections |\n| cockroachdb.sql_bandwidth | received, sent | KiB |\n| cockroachdb.sql_statements_total | started, executed | statements |\n| cockroachdb.sql_errors | statement, transaction | errors |\n| cockroachdb.sql_started_ddl_statements | ddl | statements |\n| cockroachdb.sql_executed_ddl_statements | ddl | statements |\n| cockroachdb.sql_started_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_executed_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_started_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_executed_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_active_distributed_queries | active | queries |\n| cockroachdb.sql_distributed_flows | active, queued | flows |\n| cockroachdb.live_bytes | applications, system | KiB |\n| cockroachdb.logical_data | keys, values | KiB |\n| cockroachdb.logical_data_count | keys, values | num |\n| cockroachdb.kv_transactions | committed, fast-path_committed, aborted | transactions |\n| cockroachdb.kv_transaction_restarts | write_too_old, write_too_old_multiple, forwarded_timestamp, possible_reply, async_consensus_failure, read_within_uncertainty_interval, aborted, push_failure, unknown | restarts |\n| cockroachdb.ranges | ranges | ranges |\n| cockroachdb.ranges_replication_problem | unavailable, under_replicated, over_replicated | ranges |\n| cockroachdb.range_events | split, add, remove, merge | events |\n| cockroachdb.range_snapshot_events | generated, applied_raft_initiated, applied_learner, applied_preemptive | events |\n| cockroachdb.rocksdb_read_amplification | reads | reads/query |\n| cockroachdb.rocksdb_table_operations | compactions, flushes | operations |\n| cockroachdb.rocksdb_cache_usage | used | KiB |\n| cockroachdb.rocksdb_cache_operations | hits, misses | operations |\n| cockroachdb.rocksdb_cache_hit_rate | hit_rate | percentage |\n| cockroachdb.rocksdb_sstables | sstables | sstables |\n| cockroachdb.replicas | replicas | replicas |\n| cockroachdb.replicas_quiescence | quiescent, active | replicas |\n| cockroachdb.replicas_leaders | leaders, not_leaseholders | replicas |\n| cockroachdb.replicas_leaseholders | leaseholders | leaseholders |\n| cockroachdb.queue_processing_failures | gc, replica_gc, replication, split, consistency, raft_log, raft_snapshot, time_series_maintenance | failures |\n| cockroachdb.rebalancing_queries | avg | queries/s |\n| cockroachdb.rebalancing_writes | avg | writes/s |\n| cockroachdb.timeseries_samples | written | samples |\n| cockroachdb.timeseries_write_errors | write | errors |\n| cockroachdb.timeseries_write_bytes | written | KiB |\n| cockroachdb.slow_requests | acquiring_latches, acquiring_lease, in_raft | requests |\n| cockroachdb.code_heap_memory_usage | go, cgo | KiB |\n| cockroachdb.goroutines | goroutines | goroutines |\n| cockroachdb.gc_count | gc | invokes |\n| cockroachdb.gc_pause | pause | us |\n| cockroachdb.cgo_calls | cgo | calls |\n\n", @@ -3543,7 +3575,7 @@ "most_popular": true }, "overview": "# Consul\n\nPlugin: go.d.plugin\nModule: consul\n\n## Overview\n\nThis collector monitors [key metrics](https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) of Consul Agents: transaction timings, leadership changes, memory usage and more.\n\n\nIt periodically sends HTTP requests to [Consul REST API](https://developer.hashicorp.com/consul/api-docs).\n\nUsed endpoints:\n\n- [/operator/autopilot/health](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health)\n- [/agent/checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks)\n- [/agent/self](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration)\n- [/agent/metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics)\n- [/coordinate/nodes](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host, that provide metrics on port 8500.\n\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8500\n- http://127.0.0.1:8500\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Prometheus telemetry\n\n[Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`.\n\n\n#### Add required ACLs to Token\n\nRequired **only if authentication is enabled**.\n\n| ACL | Endpoint |\n|:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) |\n| `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) |\n| `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) |\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/consul.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/consul.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8500 | yes |\n| acl_token | ACL token used in every request. | | no |\n| max_checks | Checks processing/charting limit. | | no |\n| max_filter | Checks processing/charting filter. Uses [simple patterns](/src/libnetdata/simple_pattern/README.md). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n - name: remote\n url: http://203.0.113.10:8500\n acl_token: \"ada7f751-f654-8872-7f93-498e799158b6\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Prometheus telemetry\n\n[Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`.\n\n\n#### Add required ACLs to Token\n\nRequired **only if authentication is enabled**.\n\n| ACL | Endpoint |\n|:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) |\n| `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) |\n| `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) |\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/consul.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/consul.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8500 | yes |\n| acl_token | ACL token used in every request. | | no |\n| max_checks | Checks processing/charting limit. | | no |\n| max_filter | Checks processing/charting filter. Uses [simple patterns](/src/libnetdata/simple_pattern/README.md). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n - name: remote\n url: http://203.0.113.10:8500\n acl_token: \"ada7f751-f654-8872-7f93-498e799158b6\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `consul` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m consul\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `consul` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep consul\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep consul /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep consul\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ consul_node_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.node_health_check_status | node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_service_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.service_health_check_status | service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_exceeded_rate | number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_failed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_failed_rate | number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_gc_pause_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.gc_pause_time | time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_autopilot_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_health_status | datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name} |\n| [ consul_autopilot_server_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_server_health_status | server ${label:node_name} from datacenter ${label:datacenter} is unhealthy |\n| [ consul_raft_leader_last_contact_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leader_last_contact_time | median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes |\n| [ consul_raft_leadership_transitions ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leadership_transitions_rate | there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader |\n| [ consul_raft_thread_main_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_main_saturation_perc | average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_raft_thread_fsm_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_fsm_saturation_perc | average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_license_expiration_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.license_expiration_time | Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe set of metrics depends on the [Consul Agent mode](https://developer.hashicorp.com/consul/docs/install/glossary#agent).\n\n\n### Per Consul instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.client_rpc_requests_rate | rpc | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_exceeded_rate | exceeded | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_failed_rate | failed | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.memory_allocated | allocated | bytes | \u2022 | \u2022 | \u2022 |\n| consul.memory_sys | sys | bytes | \u2022 | \u2022 | \u2022 |\n| consul.gc_pause_time | gc_pause | seconds | \u2022 | \u2022 | \u2022 |\n| consul.kvs_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.kvs_apply_operations_rate | kvs_apply | ops/s | \u2022 | \u2022 | |\n| consul.txn_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.txn_apply_operations_rate | txn_apply | ops/s | \u2022 | \u2022 | |\n| consul.autopilot_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_failure_tolerance | failure_tolerance | servers | \u2022 | \u2022 | |\n| consul.autopilot_server_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_server_stable_time | stable | seconds | \u2022 | \u2022 | |\n| consul.autopilot_server_serf_status | active, failed, left, none | status | \u2022 | \u2022 | |\n| consul.autopilot_server_voter_status | voter, not_voter | status | \u2022 | \u2022 | |\n| consul.network_lan_rtt | min, max, avg | ms | \u2022 | \u2022 | |\n| consul.raft_commit_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_commits_rate | commits | commits/s | \u2022 | | |\n| consul.raft_leader_last_contact_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_leader_oldest_log_age | oldest_log_age | seconds | \u2022 | | |\n| consul.raft_follower_last_contact_leader_time | leader_last_contact | ms | | \u2022 | |\n| consul.raft_rpc_install_snapshot_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | | \u2022 | |\n| consul.raft_leader_elections_rate | leader | elections/s | \u2022 | \u2022 | |\n| consul.raft_leadership_transitions_rate | leadership | transitions/s | \u2022 | \u2022 | |\n| consul.server_leadership_status | leader, not_leader | status | \u2022 | \u2022 | |\n| consul.raft_thread_main_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_thread_fsm_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_fsm_last_restore_duration | last_restore_duration | ms | \u2022 | \u2022 | |\n| consul.raft_boltdb_freelist_bytes | freelist | bytes | \u2022 | \u2022 | |\n| consul.raft_boltdb_logs_per_batch_rate | written | logs/s | \u2022 | \u2022 | |\n| consul.raft_boltdb_store_logs_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.license_expiration_time | license_expiration | seconds | \u2022 | \u2022 | \u2022 |\n\n### Per node check\n\nMetrics about checks on Node level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.node_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n### Per service check\n\nMetrics about checks at a Service level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n| service_name | The service's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.service_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n", @@ -3581,7 +3613,7 @@ "most_popular": false }, "overview": "# CoreDNS\n\nPlugin: go.d.plugin\nModule: coredns\n\n## Overview\n\nThis collector monitors CoreDNS instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/coredns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/coredns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9153/metrics | yes |\n| per_server_stats | Server filter. | | no |\n| per_zone_stats | Zone filter. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n##### per_server_stats\n\nMetrics of servers matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_server_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### per_zone_stats\n\nMetrics of zones matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_zone_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n - name: remote\n url: http://203.0.113.10:9153/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/coredns.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/coredns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9153/metrics | yes |\n| per_server_stats | Server filter. | | no |\n| per_zone_stats | Zone filter. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n##### per_server_stats\n\nMetrics of servers matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_server_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### per_zone_stats\n\nMetrics of zones matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_zone_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n - name: remote\n url: http://203.0.113.10:9153/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `coredns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m coredns\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `coredns` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep coredns\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep coredns /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep coredns\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CoreDNS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.dns_request_count_total | requests | requests/s |\n| coredns.dns_responses_count_total | responses | responses/s |\n| coredns.dns_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.dns_no_matching_zone_dropped_total | dropped | requests/s |\n| coredns.dns_panic_count_total | panics | panics/s |\n| coredns.dns_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.dns_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.dns_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.dns_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server_name | Server name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.server_dns_request_count_total | requests | requests/s |\n| coredns.server_dns_responses_count_total | responses | responses/s |\n| coredns.server_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.server_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.server_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.server_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.server_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per zone\n\nThese metrics refer to the DNS zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| zone_name | Zone name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.zone_dns_request_count_total | requests | requests/s |\n| coredns.zone_dns_responses_count_total | responses | responses/s |\n| coredns.zone_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.zone_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.zone_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.zone_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n", @@ -3618,7 +3650,7 @@ "most_popular": false }, "overview": "# Couchbase\n\nPlugin: go.d.plugin\nModule: couchbase\n\n## Overview\n\nThis collector monitors Couchbase servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchbase.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchbase.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8091 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n - name: remote\n url: http://203.0.113.0:8091\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchbase.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchbase.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8091 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n - name: remote\n url: http://203.0.113.0:8091\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `couchbase` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchbase\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `couchbase` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep couchbase\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep couchbase /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep couchbase\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Couchbase instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchbase.bucket_quota_percent_used | a dimension per bucket | percentage |\n| couchbase.bucket_ops_per_sec | a dimension per bucket | ops/s |\n| couchbase.bucket_disk_fetches | a dimension per bucket | fetches |\n| couchbase.bucket_item_count | a dimension per bucket | items |\n| couchbase.bucket_disk_used_stats | a dimension per bucket | bytes |\n| couchbase.bucket_data_used | a dimension per bucket | bytes |\n| couchbase.bucket_mem_used | a dimension per bucket | bytes |\n| couchbase.bucket_vb_active_num_non_resident | a dimension per bucket | items |\n\n", @@ -3655,7 +3687,7 @@ "most_popular": false }, "overview": "# CouchDB\n\nPlugin: go.d.plugin\nModule: couchdb\n\n## Overview\n\nThis collector monitors CouchDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:5984 | yes |\n| node | CouchDB node name. Same as -name vm.args argument. | _local | no |\n| databases | List of database names for which db-specific stats should be displayed, space separated. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication, node name and multiple databases defined. Make sure to match the node name with the `NODENAME` value in your CouchDB's `etc/vm.args` file. Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n node: couchdb@127.0.0.1\n databases: my-db other-db\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n - name: remote\n url: http://203.0.113.0:5984\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchdb.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:5984 | yes |\n| node | CouchDB node name. Same as -name vm.args argument. | _local | no |\n| databases | List of database names for which db-specific stats should be displayed, space separated. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication, node name and multiple databases defined. Make sure to match the node name with the `NODENAME` value in your CouchDB's `etc/vm.args` file. Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n node: couchdb@127.0.0.1\n databases: my-db other-db\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n - name: remote\n url: http://203.0.113.0:5984\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `couchdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchdb\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `couchdb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep couchdb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep couchdb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep couchdb\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CouchDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchdb.activity | db_reads, db_writes, view_reads | requests/s |\n| couchdb.request_methods | copy, delete, get, head, options, post, put | requests/s |\n| couchdb.response_codes | 200, 201, 202, 204, 206, 301, 302, 304, 400, 401, 403, 404, 406, 409, 412, 413, 414, 415, 416, 417, 500, 501, 503 | responses/s |\n| couchdb.response_code_classes | 2xx, 3xx, 4xx, 5xx | responses/s |\n| couchdb.active_tasks | indexer, db_compaction, replication, view_compaction | tasks |\n| couchdb.replicator_jobs | running, pending, crashed, internal_replication_jobs | jobs |\n| couchdb.open_files | files | files |\n| couchdb.erlang_vm_memory | atom, binaries, code, ets, procs, other | B |\n| couchdb.proccounts | os_procs, erl_procs | processes |\n| couchdb.peakmsgqueue | peak_size | messages |\n| couchdb.reductions | reductions | reductions |\n| couchdb.db_sizes_file | a dimension per database | KiB |\n| couchdb.db_sizes_external | a dimension per database | KiB |\n| couchdb.db_sizes_active | a dimension per database | KiB |\n| couchdb.db_doc_count | a dimension per database | docs |\n| couchdb.db_doc_del_count | a dimension per database | docs |\n\n", @@ -3691,7 +3723,7 @@ "most_popular": false }, "overview": "# DMCache devices\n\nPlugin: go.d.plugin\nModule: dmcache\n\n## Overview\n\nThis collector monitors DMCache, providing insights into capacity usage, efficiency, and activity. It relies on the [`dmsetup`](https://man7.org/linux/man-pages/man8/dmsetup.8.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dmcache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dmcache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | dmsetup binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: dmcache\n update_every: 5 # Collect DMCache statistics every 5 seconds\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dmcache.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dmcache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | dmsetup binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: dmcache\n update_every: 5 # Collect DMCache statistics every 5 seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dmcache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dmcache\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dmcache` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dmcache\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dmcache /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dmcache\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dmcache device\n\nThese metrics refer to the DMCache device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | Device name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dmcache.device_cache_space_usage | free, used | bytes |\n| dmcache.device_metadata_space_usage | free, used | bytes |\n| dmcache.device_cache_read_efficiency | hits, misses | requests/s |\n| dmcache.device_cache_write_efficiency | hits, misses | requests/s |\n| dmcache.device_cache_activity | promotions, demotions | bytes/s |\n| dmcache.device_cache_dirty_size | dirty | bytes |\n\n", @@ -3727,7 +3759,7 @@ "most_popular": false }, "overview": "# DNS query\n\nPlugin: go.d.plugin\nModule: dns_query\n\n## Overview\n\nThis module monitors DNS query round-trip time (RTT).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dns_query.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dns_query.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| domains | Domain or subdomains to query. The collector will choose a random domain from the list on every iteration. | | yes |\n| servers | Servers to query. | | yes |\n| port | DNS server port. | 53 | no |\n| network | Network protocol name. Available options: udp, tcp, tcp-tls. | udp | no |\n| record_types | Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV. | A | no |\n| timeout | Query read timeout. | 2 | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: job1\n record_types:\n - A\n - AAAA\n domains:\n - google.com\n - github.com\n - reddit.com\n servers:\n - 8.8.8.8\n - 8.8.4.4\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dns_query.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dns_query.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| domains | Domain or subdomains to query. The collector will choose a random domain from the list on every iteration. | | yes |\n| servers | Servers to query. | | yes |\n| port | DNS server port. | 53 | no |\n| network | Network protocol name. Available options: udp, tcp, tcp-tls. | udp | no |\n| record_types | Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV. | A | no |\n| timeout | Query read timeout. | 2 | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: job1\n record_types:\n - A\n - AAAA\n domains:\n - google.com\n - github.com\n - reddit.com\n servers:\n - 8.8.8.8\n - 8.8.4.4\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dns_query` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dns_query\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dns_query` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dns_query\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dns_query /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dns_query\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dns_query_query_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dns_query.conf) | dns_query.query_status | DNS request type ${label:record_type} to server ${label:server} is unsuccessful |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server | DNS server address. |\n| network | Network protocol name (tcp, udp, tcp-tls). |\n| record_type | DNS record type (e.g. A, AAAA, CNAME). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dns_query.query_status | success, network_error, dns_error | status |\n| dns_query.query_time | query_time | seconds |\n\n", @@ -3764,7 +3796,7 @@ "most_popular": false }, "overview": "# DNSdist\n\nPlugin: go.d.plugin\nModule: dnsdist\n\n## Overview\n\nThis collector monitors DNSDist servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable DNSdist built-in Webserver\n\nFor collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsdist.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsdist.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8083 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key).\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: 'your-api-key' # static pre-shared authentication key for access to the REST API (api-key).\n\n - name: remote\n url: http://203.0.113.0:8083\n headers:\n X-API-Key: 'your-api-key'\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable DNSdist built-in Webserver\n\nFor collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsdist.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsdist.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8083 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key).\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: 'your-api-key' # static pre-shared authentication key for access to the REST API (api-key).\n\n - name: remote\n url: http://203.0.113.0:8083\n headers:\n X-API-Key: 'your-api-key'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dnsdist` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsdist\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dnsdist` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dnsdist\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dnsdist /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dnsdist\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per DNSdist instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsdist.queries | all, recursive, empty | queries/s |\n| dnsdist.queries_dropped | rule_drop, dynamic_blocked, no_policy, non_queries | queries/s |\n| dnsdist.packets_dropped | acl | packets/s |\n| dnsdist.answers | self_answered, nxdomain, refused, trunc_failures | answers/s |\n| dnsdist.backend_responses | responses | responses/s |\n| dnsdist.backend_commerrors | send_errors | errors/s |\n| dnsdist.backend_errors | timeouts, servfail, non_compliant | responses/s |\n| dnsdist.cache | hits, misses | answers/s |\n| dnsdist.servercpu | system_state, user_state | ms/s |\n| dnsdist.servermem | memory_usage | MiB |\n| dnsdist.query_latency | 1ms, 10ms, 50ms, 100ms, 1sec, slow | queries/s |\n| dnsdist.query_latency_avg | 100, 1k, 10k, 1000k | microseconds |\n\n", @@ -3801,7 +3833,7 @@ "most_popular": false }, "overview": "# Dnsmasq\n\nPlugin: go.d.plugin\nModule: dnsmasq\n\n## Overview\n\nThis collector monitors Dnsmasq servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in `ip:port` format. | 127.0.0.1:53 | yes |\n| protocol | DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls. | udp | no |\n| timeout | DNS query timeout (dial, write and read) in seconds. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n```\n##### Using TCP protocol\n\nLocal server with specific DNS query transport protocol.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n protocol: tcp\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n - name: remote\n address: 203.0.113.0:53\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in `ip:port` format. | 127.0.0.1:53 | yes |\n| protocol | DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls. | udp | no |\n| timeout | DNS query timeout (dial, write and read) in seconds. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n```\n##### Using TCP protocol\n\nLocal server with specific DNS query transport protocol.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n protocol: tcp\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n - name: remote\n address: 203.0.113.0:53\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dnsmasq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dnsmasq` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dnsmasq\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dnsmasq /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dnsmasq\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq instance\n\nThe metrics apply to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq.servers_queries | success, failed | queries/s |\n| dnsmasq.cache_performance | hist, misses | events/s |\n| dnsmasq.cache_operations | insertions, evictions | operations/s |\n| dnsmasq.cache_size | size | entries |\n\n", @@ -3837,8 +3869,8 @@ }, "most_popular": false }, - "overview": "# Dnsmasq DHCP\n\nPlugin: go.d.plugin\nModule: dnsmasq_dhcp\n\n## Overview\n\nThis collector monitors Dnsmasq DHCP leases databases, depending on your configuration.\n\nBy default, it uses:\n\n- `/var/lib/misc/dnsmasq.leases` to read leases.\n- `/etc/dnsmasq.conf` to detect dhcp-ranges.\n- `/etc/dnsmasq.d` to find additional configurations.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAll configured dhcp-ranges are detected automatically\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq_dhcp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to dnsmasq DHCP leases file. | /var/lib/misc/dnsmasq.leases | no |\n| conf_path | Path to dnsmasq configuration file. | /etc/dnsmasq.conf | no |\n| conf_dir | Path to dnsmasq configuration directory. | /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /var/lib/misc/dnsmasq.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n##### Pi-hole\n\nDnsmasq DHCP on Pi-hole.\n\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /etc/pihole/dhcp.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n", + "overview": "# Dnsmasq DHCP\n\nPlugin: go.d.plugin\nModule: dnsmasq_dhcp\n\n## Overview\n\nThis collector monitors Dnsmasq DHCP leases databases, depending on your configuration.\n\nBy default, it uses:\n\n- `/var/lib/misc/dnsmasq.leases` to read leases.\n- `/etc/dnsmasq.conf` to detect dhcp-ranges.\n- `/etc/dnsmasq.d` to find additional configurations.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAll configured dhcp-ranges are detected automatically\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq_dhcp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to dnsmasq DHCP leases file. | /var/lib/misc/dnsmasq.leases | no |\n| conf_path | Path to dnsmasq configuration file. | /etc/dnsmasq.conf | no |\n| conf_dir | Path to dnsmasq configuration directory. | /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /var/lib/misc/dnsmasq.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n##### Pi-hole\n\nDnsmasq DHCP on Pi-hole.\n\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /etc/pihole/dhcp.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dnsmasq_dhcp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq_dhcp\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dnsmasq_dhcp` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dnsmasq_dhcp\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dnsmasq_dhcp /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dnsmasq_dhcp\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dnsmasq_dhcp_dhcp_range_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dnsmasq_dhcp.conf) | dnsmasq_dhcp.dhcp_range_utilization | DHCP range utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_ranges | ipv4, ipv6 | ranges |\n| dnsmasq_dhcp.dhcp_hosts | ipv4, ipv6 | hosts |\n\n### Per dhcp range\n\nThese metrics refer to the DHCP range.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dhcp_range | DHCP range in `START_IP:END_IP` format |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_range_utilization | used | percentage |\n| dnsmasq_dhcp.dhcp_range_allocated_leases | allocated | leases |\n\n", @@ -3875,7 +3907,7 @@ "most_popular": true }, "overview": "# Docker\n\nPlugin: go.d.plugin\nModule: docker\n\n## Overview\n\nThis collector monitors Docker containers state, health status and more.\n\n\nIt connects to the Docker instance via a TCP or UNIX socket and executes the following commands:\n\n- [System info](https://docs.docker.com/engine/api/v1.43/#tag/System/operation/SystemInfo).\n- [List images](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageList).\n- [List containers](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nRequires netdata user to be in the docker group.\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker UNIX socket: `/var/run/docker.sock`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nEnabling `collect_container_size` may result in high CPU usage depending on the version of Docker Engine.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Docker daemon's listening address. When using a TCP socket, the format is: tcp://[ip]:[port] | unix:///var/run/docker.sock | yes |\n| timeout | Request timeout in seconds. | 2 | no |\n| collect_container_size | Whether to collect container writable layer size. | no | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n - name: remote\n address: 'tcp://203.0.113.10:2375'\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Docker daemon's listening address. When using a TCP socket, the format is: tcp://[ip]:[port] | unix:///var/run/docker.sock | yes |\n| timeout | Request timeout in seconds. | 2 | no |\n| collect_container_size | Whether to collect container writable layer size. | no | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n - name: remote\n address: 'tcp://203.0.113.10:2375'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `docker` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `docker` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep docker\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep docker /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep docker\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ docker_container_unhealthy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/docker.conf) | docker.container_health_status | ${label:container_name} docker container health status is unhealthy |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.containers_state | running, paused, stopped | containers |\n| docker.containers_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | containers |\n| docker.images | active, dangling | images |\n| docker.images_size | size | bytes |\n\n### Per container\n\nMetrics related to containers. Each container provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container's name |\n| image | The image name the container uses |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.container_state | running, paused, exited, created, restarting, removing, dead | state |\n| docker.container_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | status |\n| docker.container_writeable_layer_size | writeable_layer | size |\n\n", @@ -3913,7 +3945,7 @@ "most_popular": false }, "overview": "# Docker Engine\n\nPlugin: go.d.plugin\nModule: docker_engine\n\n## Overview\n\nThis collector monitors the activity and health of Docker Engine and Docker Swarm.\n\n\nThe [built-in](https://docs.docker.com/config/daemon/prometheus/) Prometheus exporter is used to get the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker TCP socket: `http://127.0.0.1:9323/metrics`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker_engine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker_engine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9323/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nConfiguration with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n - name: remote\n url: http://192.0.2.1:9323/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker_engine.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker_engine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9323/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nConfiguration with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n - name: remote\n url: http://192.0.2.1:9323/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `docker_engine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker_engine\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `docker_engine` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep docker_engine\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep docker_engine /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep docker_engine\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Engine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker_engine.engine_daemon_container_actions | changes, commit, create, delete, start | actions/s |\n| docker_engine.engine_daemon_container_states_containers | running, paused, stopped | containers |\n| docker_engine.builder_builds_failed_total | build_canceled, build_target_not_reachable_error, command_not_supported_error, dockerfile_empty_error, dockerfile_syntax_error, error_processing_commands_error, missing_onbuild_arguments_error, unknown_instruction_error | fails/s |\n| docker_engine.engine_daemon_health_checks_failed_total | fails | events/s |\n| docker_engine.swarm_manager_leader | is_leader | bool |\n| docker_engine.swarm_manager_object_store | nodes, services, tasks, networks, secrets, configs | objects |\n| docker_engine.swarm_manager_nodes_per_state | ready, down, unknown, disconnected | nodes |\n| docker_engine.swarm_manager_tasks_per_state | running, failed, ready, rejected, starting, shutdown, new, orphaned, preparing, pending, complete, remove, accepted, assigned | tasks |\n\n", @@ -3949,7 +3981,7 @@ "most_popular": false }, "overview": "# Docker Hub repository\n\nPlugin: go.d.plugin\nModule: dockerhub\n\n## Overview\n\nThis collector keeps track of DockerHub repositories statistics such as the number of stars, pulls, current status, and more.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dockerhub.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dockerhub.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | DockerHub URL. | https://hub.docker.com/v2/repositories | yes |\n| repositories | List of repositories to monitor. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: dockerhub\n repositories:\n - 'user1/name1'\n - 'user2/name2'\n - 'user3/name3'\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dockerhub.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dockerhub.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | DockerHub URL. | https://hub.docker.com/v2/repositories | yes |\n| repositories | List of repositories to monitor. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: dockerhub\n repositories:\n - 'user1/name1'\n - 'user2/name2'\n - 'user3/name3'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dockerhub` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dockerhub\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dockerhub` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dockerhub\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dockerhub /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dockerhub\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Hub repository instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dockerhub.pulls_sum | sum | pulls |\n| dockerhub.pulls | a dimension per repository | pulls |\n| dockerhub.pulls_rate | a dimension per repository | pulls/s |\n| dockerhub.stars | a dimension per repository | stars |\n| dockerhub.status | a dimension per repository | status |\n| dockerhub.last_updated | a dimension per repository | seconds |\n\n", @@ -3987,7 +4019,7 @@ "most_popular": false }, "overview": "# Dovecot\n\nPlugin: go.d.plugin\nModule: dovecot\n\n## Overview\n\nThis collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.\n\n\nIt reads the server's response to the `EXPORT\\tglobal\\n` command.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically discovers and collects Dovecot statistics from the following default locations:\n\n- localhost:24242\n- unix:///var/run/dovecot/old-stats\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable old_stats plugin\n\nTo enable `old_stats` plugin, see [Old Statistics](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dovecot.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dovecot.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The Unix or TCP socket address where the Dovecot [old_stats](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics) plugin listens for connections. | 127.0.0.1:24242 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n#### Examples\n\n##### Basic (TCP)\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:24242\n\n```\n##### Basic (UNIX)\n\nA basic example configuration using a UNIX socket.\n\n```yaml\njobs:\n - name: local\n address: unix:///var/run/dovecot/old-stats\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:24242\n\n - name: remote\n address: 203.0.113.0:24242\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable old_stats plugin\n\nTo enable `old_stats` plugin, see [Old Statistics](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dovecot.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dovecot.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The Unix or TCP socket address where the Dovecot [old_stats](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics) plugin listens for connections. | 127.0.0.1:24242 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n#### Examples\n\n##### Basic (TCP)\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:24242\n\n```\n##### Basic (UNIX)\n\nA basic example configuration using a UNIX socket.\n\n```yaml\njobs:\n - name: local\n address: unix:///var/run/dovecot/old-stats\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:24242\n\n - name: remote\n address: 203.0.113.0:24242\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dovecot` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dovecot\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dovecot` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dovecot\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dovecot /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dovecot\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dovecot instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dovecot.session | active | sessions |\n| dovecot.logins | logins | logins |\n| dovecot.auth | ok, failed | attempts/s |\n| dovecot.commands | commands | commands |\n| dovecot.context_switches | voluntary, voluntary | switches/s |\n| dovecot.io | read, write | KiB/s |\n| dovecot.net | read, write | kilobits/s |\n| dovecot.syscalls | read, write | syscalls/s |\n| dovecot.lookup | path, attr | lookups/s |\n| dovecot.cache | hits | hits/s |\n| dovecot.auth_cache | hits, misses | requests/s |\n\n", @@ -4035,7 +4067,7 @@ "most_popular": true }, "overview": "# Elasticsearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `elasticsearch` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep elasticsearch\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep elasticsearch /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep elasticsearch\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n", @@ -4083,7 +4115,7 @@ "most_popular": true }, "overview": "# OpenSearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `elasticsearch` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep elasticsearch\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep elasticsearch /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep elasticsearch\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n", @@ -4125,7 +4157,7 @@ "most_popular": true }, "overview": "# Envoy\n\nPlugin: go.d.plugin\nModule: envoy\n\n## Overview\n\nThis collector monitors Envoy proxies. It collects server, cluster, and listener metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Envoy instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/envoy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/envoy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9091/stats/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9901/stats/prometheus\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n - name: remote\n url: http://192.0.2.1:9901/stats/prometheus\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/envoy.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/envoy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9091/stats/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9901/stats/prometheus\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n - name: remote\n url: http://192.0.2.1:9901/stats/prometheus\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `envoy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m envoy\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `envoy` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep envoy\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep envoy /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep envoy\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Envoy instance\n\nEnvoy exposes metrics in Prometheus format. All metric labels are added to charts.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| envoy.server_state | live, draining, pre_initializing, initializing | state |\n| envoy.server_connections_count | connections | connections |\n| envoy.server_parent_connections_count | connections | connections |\n| envoy.server_memory_allocated_size | allocated | bytes |\n| envoy.server_memory_heap_size | heap | bytes |\n| envoy.server_memory_physical_size | physical | bytes |\n| envoy.server_uptime | uptime | seconds |\n| envoy.cluster_manager_cluster_count | active, not_active | clusters |\n| envoy.cluster_manager_cluster_changes_rate | added, modified, removed | clusters/s |\n| envoy.cluster_manager_cluster_updates_rate | cluster | updates/s |\n| envoy.cluster_manager_cluster_updated_via_merge_rate | via_merge | updates/s |\n| envoy.cluster_manager_update_merge_cancelled_rate | merge_cancelled | updates/s |\n| envoy.cluster_manager_update_out_of_merge_window_rate | out_of_merge_window | updates/s |\n| envoy.cluster_membership_endpoints_count | healthy, degraded, excluded | endpoints |\n| envoy.cluster_membership_changes_rate | membership | changes/s |\n| envoy.cluster_membership_updates_rate | success, failure, empty, no_rebuild | updates/s |\n| envoy.cluster_upstream_cx_active_count | active | connections |\n| envoy.cluster_upstream_cx_rate | created | connections/s |\n| envoy.cluster_upstream_cx_http_rate | http1, http2, http3 | connections/s |\n| envoy.cluster_upstream_cx_destroy_rate | local, remote | connections/s |\n| envoy.cluster_upstream_cx_connect_fail_rate | failed | connections/s |\n| envoy.cluster_upstream_cx_connect_timeout_rate | timeout | connections/s |\n| envoy.cluster_upstream_cx_bytes_rate | received, sent | bytes/s |\n| envoy.cluster_upstream_cx_bytes_buffered_size | received, send | bytes |\n| envoy.cluster_upstream_rq_active_count | active | requests |\n| envoy.cluster_upstream_rq_rate | requests | requests/s |\n| envoy.cluster_upstream_rq_failed_rate | cancelled, maintenance_mode, timeout, max_duration_reached, per_try_timeout, reset_local, reset_remote | requests/s |\n| envoy.cluster_upstream_rq_pending_active_count | active_pending | requests |\n| envoy.cluster_upstream_rq_pending_rate | pending | requests/s |\n| envoy.cluster_upstream_rq_pending_failed_rate | overflow, failure_eject | requests/s |\n| envoy.cluster_upstream_rq_retry_rate | request | retries/s |\n| envoy.cluster_upstream_rq_retry_success_rate | success | retries/s |\n| envoy.cluster_upstream_rq_retry_backoff_rate | exponential, ratelimited | retries/s |\n| envoy.listener_manager_listeners_count | active, warming, draining | listeners |\n| envoy.listener_manager_listener_changes_rate | added, modified, removed, stopped | listeners/s |\n| envoy.listener_manager_listener_object_events_rate | create_success, create_failure, in_place_updated | objects/s |\n| envoy.listener_admin_downstream_cx_active_count | active | connections |\n| envoy.listener_admin_downstream_cx_rate | created | connections/s |\n| envoy.listener_admin_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_admin_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_admin_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_admin_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_admin_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_admin_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n| envoy.listener_downstream_cx_active_count | active | connections |\n| envoy.listener_downstream_cx_rate | created | connections/s |\n| envoy.listener_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n\n", @@ -4163,7 +4195,7 @@ "most_popular": false }, "overview": "# Exim\n\nPlugin: go.d.plugin\nModule: exim\n\n## Overview\n\nThis collector monitors Exim mail queue. It relies on the [`exim`](https://www.exim.org/exim-html-3.20/doc/html/spec_5.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\nExecuted commands:\n- `exim -bpc`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/exim.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/exim.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | exim binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: exim\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/exim.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/exim.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | exim binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: exim\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `exim` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m exim\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `exim` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep exim\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep exim /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep exim\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Exim instance\n\nThese metrics refer to the the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exim.qemails | emails | emails |\n\n", @@ -4201,8 +4233,8 @@ }, "most_popular": false }, - "overview": "# Fail2ban\n\nPlugin: go.d.plugin\nModule: fail2ban\n\n## Overview\n\nThis collector tracks two main metrics for each jail: currently banned IPs and active failure incidents. It relies on the [`fail2ban-client`](https://linux.die.net/man/1/fail2ban-client) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### For Netdata running in a Docker container\n\n1. **Install Fail2ban client**.\n\n Ensure `fail2ban-client` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=fail2ban` when starting the container.\n\n2. **Mount host's `/var/run` directory**.\n\n Mount the host machine's `/var/run` directory to `/host/var/run` inside your Netdata container. This grants Netdata access to the Fail2ban socket file, typically located at `/var/run/fail2ban/fail2ban.sock`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fail2ban.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fail2ban.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | fail2ban-client binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: fail2ban\n update_every: 5 # Collect Fail2Ban jails statistics every 5 seconds\n\n```\n", + "overview": "# Fail2ban\n\nPlugin: go.d.plugin\nModule: fail2ban\n\n## Overview\n\nThis collector tracks two main metrics for each jail: currently banned IPs and active failure incidents. It relies on the [`fail2ban-client`](https://linux.die.net/man/1/fail2ban-client) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### For Netdata running in a Docker container\n\n1. **Install Fail2ban client**.\n\n Ensure `fail2ban-client` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=fail2ban` when starting the container.\n\n2. **Mount host's `/var/run` directory**.\n\n Mount the host machine's `/var/run` directory to `/host/var/run` inside your Netdata container. This grants Netdata access to the Fail2ban socket file, typically located at `/var/run/fail2ban/fail2ban.sock`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fail2ban.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fail2ban.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | fail2ban-client binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: fail2ban\n update_every: 5 # Collect Fail2Ban jails statistics every 5 seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `fail2ban` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m fail2ban\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `fail2ban` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep fail2ban\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep fail2ban /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep fail2ban\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per jail\n\nThese metrics refer to the Jail.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| jail | Jail's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fail2ban.jail_banned_ips | banned | addresses |\n| fail2ban.jail_active_failures | active_failures | failures |\n\n", @@ -4239,7 +4271,7 @@ "most_popular": false }, "overview": "# Files and directories\n\nPlugin: go.d.plugin\nModule: filecheck\n\n## Overview\n\nThis collector monitors the existence, last modification time, and size of arbitrary files and directories on the system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the DAC_READ_SEARCH capability when monitoring files not normally accessible to the Netdata user, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/filecheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/filecheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| files | List of files to monitor. | | yes |\n| dirs | List of directories to monitor. | | yes |\n| discovery_every | Files and directories discovery interval. | 60 | no |\n\n##### files\n\nFiles matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nfiles:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### dirs\n\nDirectories matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\ndirs:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n#### Examples\n\n##### Files\n\nFiles monitoring example configuration.\n\n```yaml\njobs:\n - name: files_example\n files:\n include:\n - '/path/to/file1'\n - '/path/to/file2'\n - '/path/to/*.log'\n\n```\n##### Directories\n\nDirectories monitoring example configuration.\n\n```yaml\njobs:\n - name: files_example\n dirs:\n collect_dir_size: no\n include:\n - '/path/to/dir1'\n - '/path/to/dir2'\n - '/path/to/dir3*'\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/filecheck.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/filecheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| files | List of files to monitor. | | yes |\n| dirs | List of directories to monitor. | | yes |\n| discovery_every | Files and directories discovery interval. | 60 | no |\n\n##### files\n\nFiles matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nfiles:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### dirs\n\nDirectories matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\ndirs:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n#### Examples\n\n##### Files\n\nFiles monitoring example configuration.\n\n```yaml\njobs:\n - name: files_example\n files:\n include:\n - '/path/to/file1'\n - '/path/to/file2'\n - '/path/to/*.log'\n\n```\n##### Directories\n\nDirectories monitoring example configuration.\n\n```yaml\njobs:\n - name: files_example\n dirs:\n collect_dir_size: no\n include:\n - '/path/to/dir1'\n - '/path/to/dir2'\n - '/path/to/dir3*'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `filecheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m filecheck\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `filecheck` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep filecheck\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep filecheck /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep filecheck\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per file\n\nThese metrics refer to the File.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| file_path | File absolute path |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filecheck.file_existence_status | exist, not_exist | status |\n| filecheck.file_modification_time_ago | mtime_ago | seconds |\n| filecheck.file_size_bytes | size | bytes |\n\n### Per directory\n\nThese metrics refer to the Directory.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dir_path | Directory absolute path |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filecheck.dir_existence_status | exist, not_exist | status |\n| filecheck.dir_modification_time_ago | mtime_ago | seconds |\n| filecheck.dir_size_bytes | size | bytes |\n| filecheck.dir_files count | files | files |\n\n", @@ -4276,7 +4308,7 @@ "most_popular": false }, "overview": "# Fluentd\n\nPlugin: go.d.plugin\nModule: fluentd\n\n## Overview\n\nThis collector monitors Fluentd servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable monitor agent\n\nTo enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fluentd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fluentd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:24220 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nFluentd with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:24220\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n - name: remote\n url: http://192.0.2.1:24220\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable monitor agent\n\nTo enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fluentd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fluentd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:24220 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nFluentd with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:24220\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n - name: remote\n url: http://192.0.2.1:24220\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `fluentd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m fluentd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `fluentd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep fluentd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep fluentd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep fluentd\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Fluentd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fluentd.retry_count | a dimension per plugin | count |\n| fluentd.buffer_queue_length | a dimension per plugin | queue_length |\n| fluentd.buffer_total_queued_size | a dimension per plugin | queued_size |\n\n", @@ -4313,7 +4345,7 @@ } }, "overview": "# FreeRADIUS\n\nPlugin: go.d.plugin\nModule: freeradius\n\n## Overview\n\nThis collector monitors FreeRADIUS servers.\n\nIt collect metrics by sending [status-server](https://wiki.freeradius.org/config/Status) messages to the server.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects FreeRadius instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status server\n\nTo enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/freeradius.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/freeradius.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. | 127.0.0.1 | yes |\n| port | Server port. | 18121 | no |\n| secret | FreeRADIUS secret. | adminsecret | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n - name: remote\n address: 192.0.2.1\n port: 18121\n secert: adminsecret\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status server\n\nTo enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/freeradius.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/freeradius.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. | 127.0.0.1 | yes |\n| port | Server port. | 18121 | no |\n| secret | FreeRADIUS secret. | adminsecret | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n - name: remote\n address: 192.0.2.1\n port: 18121\n secert: adminsecret\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `freeradius` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m freeradius\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `freeradius` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep freeradius\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep freeradius /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep freeradius\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per FreeRADIUS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| freeradius.authentication | requests, responses | packets/s |\n| freeradius.authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_authentication | requests, responses | packets/s |\n| freeradius.proxy_authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.proxy_bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.accounting | requests, responses | packets/s |\n| freeradius.bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_accounting | requests, responses | packets/s |\n| freeradius.proxy_bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n\n", @@ -4349,7 +4381,7 @@ "most_popular": false }, "overview": "# Gearman\n\nPlugin: go.d.plugin\nModule: gearman\n\n## Overview\n\nMonitors jobs activity, priority and available workers. It collects summary and function-specific statistics.\n\n\nThis collector connects to a Gearman instance via TCP socket and executes the following commands:\n\n- status\n- priority-status\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Gearman instances running on localhost that are listening on port 4730.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/gearman.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/gearman.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the Gearman service listens for connections. | 127.0.0.1:11211 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:4730\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:4730\n\n - name: remote\n address: 203.0.113.0:4730\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/gearman.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/gearman.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the Gearman service listens for connections. | 127.0.0.1:11211 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:4730\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:4730\n\n - name: remote\n address: 203.0.113.0:4730\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `gearman` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m gearman\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `gearman` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep gearman\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep gearman /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep gearman\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Gearman instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.queued_jobs_activity | running, waiting | jobs |\n| gearman.queued_jobs_priority | high, normal, low | jobs |\n\n### Per Gearman instance\n\nThese metrics refer to the Function (task).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| function_name | Function name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.function_queued_jobs_activity | running, waiting | jobs |\n| gearman.function_queued_jobs_priority | high, normal, low | jobs |\n| gearman.function_workers | available | workers |\n\n", @@ -4392,7 +4424,7 @@ "most_popular": true }, "overview": "# Go-ethereum\n\nPlugin: go.d.plugin\nModule: geth\n\n## Overview\n\nThis collector monitors Go-ethereum instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Go-ethereum instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/geth.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/geth.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:6060/debug/metrics/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n username: username\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n - name: remote\n url: http://192.0.2.1:6060/debug/metrics/prometheus\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/geth.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/geth.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:6060/debug/metrics/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n username: username\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n - name: remote\n url: http://192.0.2.1:6060/debug/metrics/prometheus\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `geth` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m geth\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `geth` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep geth\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep geth /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep geth\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go-ethereum instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| geth.eth_db_chaindata_ancient_io_rate | reads, writes | bytes/s |\n| geth.eth_db_chaindata_ancient_io | reads, writes | bytes |\n| geth.eth_db_chaindata_disk_io | reads, writes | bytes |\n| geth.goroutines | goroutines | goroutines |\n| geth.eth_db_chaindata_disk_io_rate | reads, writes | bytes/s |\n| geth.chaindata_db_size | level_db, ancient_db | bytes |\n| geth.chainhead | block, receipt, header | block |\n| geth.tx_pool_pending | invalid, pending, local, discard, no_funds, ratelimit, replace | transactions |\n| geth.tx_pool_current | invalid, pending, local, pool | transactions |\n| geth.tx_pool_queued | discard, eviction, no_funds, ratelimit | transactions |\n| geth.p2p_bandwidth | ingress, egress | bytes/s |\n| geth.reorgs | executed | reorgs |\n| geth.reorgs_blocks | added, dropped | blocks |\n| geth.p2p_peers | peers | peers |\n| geth.p2p_peers_calls | dials, serves | calls/s |\n| geth.rpc_calls | failed, successful | calls/s |\n\n", @@ -4432,7 +4464,7 @@ "most_popular": false }, "overview": "# HAProxy\n\nPlugin: go.d.plugin\nModule: haproxy\n\n## Overview\n\nThis collector monitors HAProxy servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable PROMEX addon.\n\nTo enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/haproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/haproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8404/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n - name: remote\n url: http://192.0.2.1:8404/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable PROMEX addon.\n\nTo enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/haproxy.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/haproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8404/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n - name: remote\n url: http://192.0.2.1:8404/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `haproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m haproxy\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `haproxy` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep haproxy\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep haproxy /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep haproxy\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HAProxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_current_sessions | a dimension per proxy | sessions |\n| haproxy.backend_sessions | a dimension per proxy | sessions/s |\n| haproxy.backend_response_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_queue_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_current_queue | a dimension per proxy | requests |\n\n### Per proxy\n\nThese metrics refer to the Proxy.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_http_responses | 1xx, 2xx, 3xx, 4xx, 5xx, other | responses/s |\n| haproxy.backend_network_io | in, out | bytes/s |\n\n", @@ -4471,7 +4503,7 @@ "most_popular": false }, "overview": "# HDD temperature\n\nPlugin: go.d.plugin\nModule: hddtemp\n\n## Overview\n\nThis collector monitors disk temperatures.\n\n\nIt retrieves temperature data for attached disks by querying the hddtemp daemon at regular intervals.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install hddtemp\n\nInstall `hddtemp` using your distribution's package manager.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hddtemp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hddtemp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the hddtemp daemon listens for connections. | 127.0.0.1:7634 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7634\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7634\n\n - name: remote\n address: 203.0.113.0:7634\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install hddtemp\n\nInstall `hddtemp` using your distribution's package manager.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hddtemp.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hddtemp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the hddtemp daemon listens for connections. | 127.0.0.1:7634 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7634\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7634\n\n - name: remote\n address: 203.0.113.0:7634\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `hddtemp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hddtemp\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `hddtemp` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep hddtemp\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep hddtemp /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep hddtemp\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\nThese metrics refer to the Disk.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk_id | Disk identifier. It is derived from the device path (e.g. sda or ata-HUP722020APA330_BFJ0WS3F) |\n| model | Disk model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hddtemp.disk_temperature | temperature | Celsius |\n| hddtemp.disk_temperature_sensor_status | ok, err, na, unk, nos, slp | status |\n\n", @@ -4508,7 +4540,7 @@ "most_popular": true }, "overview": "# Hadoop Distributed File System (HDFS)\n\nPlugin: go.d.plugin\nModule: hfs\n\n## Overview\n\nThis collector monitors HDFS nodes.\n\nNetdata accesses HDFS metrics over `Java Management Extensions` (JMX) through the web interface of an HDFS daemon.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hdfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hdfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9870/jmx | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9870/jmx\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n - name: remote\n url: http://192.0.2.1:9870/jmx\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hdfs.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hdfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9870/jmx | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9870/jmx\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n - name: remote\n url: http://192.0.2.1:9870/jmx\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `hfs` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hfs\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `hfs` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep hfs\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep hfs /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep hfs\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ hdfs_capacity_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.capacity | summary datanodes space capacity utilization |\n| [ hdfs_missing_blocks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.blocks | number of missing blocks |\n| [ hdfs_stale_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes marked stale due to delayed heartbeat |\n| [ hdfs_dead_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes which are currently dead |\n| [ hdfs_num_failed_volumes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.num_failed_volumes | number of failed volumes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Hadoop Distributed File System (HDFS) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | DataNode | NameNode |\n|:------|:----------|:----|:---:|:---:|\n| hdfs.heap_memory | committed, used | MiB | \u2022 | \u2022 |\n| hdfs.gc_count_total | gc | events/s | \u2022 | \u2022 |\n| hdfs.gc_time_total | ms | ms | \u2022 | \u2022 |\n| hdfs.gc_threshold | info, warn | events/s | \u2022 | \u2022 |\n| hdfs.threads | new, runnable, blocked, waiting, timed_waiting, terminated | num | \u2022 | \u2022 |\n| hdfs.logs_total | info, error, warn, fatal | logs/s | \u2022 | \u2022 |\n| hdfs.rpc_bandwidth | received, sent | kilobits/s | \u2022 | \u2022 |\n| hdfs.rpc_calls | calls | calls/s | \u2022 | \u2022 |\n| hdfs.open_connections | open | connections | \u2022 | \u2022 |\n| hdfs.call_queue_length | length | num | \u2022 | \u2022 |\n| hdfs.avg_queue_time | time | ms | \u2022 | \u2022 |\n| hdfs.avg_processing_time | time | ms | \u2022 | \u2022 |\n| hdfs.capacity | remaining, used | KiB | | \u2022 |\n| hdfs.used_capacity | dfs, non_dfs | KiB | | \u2022 |\n| hdfs.load | load | load | | \u2022 |\n| hdfs.volume_failures_total | failures | events/s | | \u2022 |\n| hdfs.files_total | files | num | | \u2022 |\n| hdfs.blocks_total | blocks | num | | \u2022 |\n| hdfs.blocks | corrupt, missing, under_replicated | num | | \u2022 |\n| hdfs.data_nodes | live, dead, stale | num | | \u2022 |\n| hdfs.datanode_capacity | remaining, used | KiB | \u2022 | |\n| hdfs.datanode_used_capacity | dfs, non_dfs | KiB | \u2022 | |\n| hdfs.datanode_failed_volumes | failed volumes | num | \u2022 | |\n| hdfs.datanode_bandwidth | reads, writes | KiB/s | \u2022 | |\n\n", @@ -4548,7 +4580,7 @@ "most_popular": false }, "overview": "# HPE Smart Arrays\n\nPlugin: go.d.plugin\nModule: hpssa\n\n## Overview\n\nMonitors the health of HPE Smart Arrays by tracking the status of controllers, arrays, logical and physical drives in your storage system.\nIt relies on the `ssacli` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `ssacli ctrl all show config detail`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install ssacli\n\nSee [official installation instructions](https://support.hpe.com/connect/s/softwaredetails?language=en_US&collectionId=MTX-0cb3f808e2514d3d).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ssacli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ssacli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | ssacli binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: hpssa\n update_every: 5 # Collect HPE Smart Array statistics every 5 seconds\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install ssacli\n\nSee [official installation instructions](https://support.hpe.com/connect/s/softwaredetails?language=en_US&collectionId=MTX-0cb3f808e2514d3d).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ssacli.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ssacli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | ssacli binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: hpssa\n update_every: 5 # Collect HPE Smart Array statistics every 5 seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `hpssa` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hpssa\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `hpssa` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep hpssa\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep hpssa /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep hpssa\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | Slot number |\n| model | Controller model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.controller_status | ok, nok | status |\n| hpssa.controller_temperature | temperature | Celsius |\n| hpssa.controller_cache_module_presence_status | present, not_present | status |\n| hpssa.controller_cache_module_status | ok, nok | status |\n| hpssa.controller_cache_module_temperature | temperature | Celsius |\n| hpssa.controller_cache_module_battery_status | ok, nok | status |\n\n### Per array\n\nThese metrics refer to the Array.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | Slot number |\n| array_id | Array id |\n| interface_type | Array interface type (e.g. SATA) |\n| array_type | Array type (e.g. Data) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.array_status | ok, nok | status |\n\n### Per logical drive\n\nThese metrics refer to the Logical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | Slot number |\n| array_id | Array id |\n| logical_drive_id | Logical Drive id (number) |\n| disk_name | Disk name (e.g. /dev/sda) |\n| drive_type | Drive type (e.g. Data) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.logical_drive_status | ok, nok | status |\n\n### Per physical drive\n\nThese metrics refer to the Physical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | Slot number |\n| array_id | Array id or \"na\" if unassigned |\n| logical_drive_id | Logical Drive id or \"na\" if unassigned |\n| location | Drive location in port:box:bay format (e.g. 1I:1:1) |\n| interface_type | Drive interface type (e.g. SATA) |\n| drive_type | Drive type (e.g. Data Drive, Unassigned Drive) |\n| model | Drive model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.physical_drive_status | ok, nok | status |\n| hpssa.physical_drive_temperature | temperature | status |\n\n", @@ -4584,7 +4616,7 @@ "most_popular": true }, "overview": "# HTTP Endpoints\n\nPlugin: go.d.plugin\nModule: httpcheck\n\n## Overview\n\nThis collector monitors HTTP servers availability status and response time.\n\nPossible statuses:\n\n| Status | Description |\n|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| success | HTTP request completed successfully with a status code matching the configured `status_accepted` range (default: 200), and the response body and headers (if configured) match expectations. |\n| timeout | HTTP request timed out before receiving a response (default: 1 second). |\n| no_connection | Failed to establish a connection to the target. |\n| redirect | Received a redirect response (3xx status code) while `not_follow_redirects` is configured. |\n| bad_status | HTTP request completed with a status code outside the configured `status_accepted` range (default: non-200). |\n| bad_content | HTTP request completed successfully but the response body does not match the expected content (when using `response_match`). |\n| bad_header | HTTP request completed successfully but response headers do not match the expected values (when using `headers_match`). |\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/httpcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/httpcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| status_accepted | HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart. | [200] | no |\n| response_match | If the status code is accepted, the content of the response will be matched against this regular expression. | | no |\n| headers_match | This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response. | [] | no |\n| headers_match.exclude | This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it. | no | no |\n| headers_match.key | The exact name of the HTTP header to check for. | | yes |\n| headers_match.value | The [pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format) to match against the value of the specified header. | | no |\n| cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n```\n##### With HTTP request headers\n\nConfiguration with HTTP request headers that will be sent by the client.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n headers:\n Host: localhost:8080\n User-Agent: netdata/go.d.plugin\n Accept: */*\n\n```\n##### With `status_accepted`\n\nA basic example configuration with non-default status_accepted.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n status_accepted:\n - 200\n - 204\n\n```\n##### With `header_match`\n\nExample configurations with `header_match`. See the value [pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format) syntax.\n\n```yaml\njobs:\n # The \"X-Robots-Tag\" header must be present in the HTTP response header,\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n\n # The \"X-Robots-Tag\" header must be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n value: '= noindex,nofollow'\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n value: '= noindex,nofollow'\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n - name: remote\n url: http://192.0.2.1:8080\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/httpcheck.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/httpcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| status_accepted | HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart. | [200] | no |\n| response_match | If the status code is accepted, the content of the response will be matched against this regular expression. | | no |\n| headers_match | This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response. | [] | no |\n| headers_match.exclude | This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it. | no | no |\n| headers_match.key | The exact name of the HTTP header to check for. | | yes |\n| headers_match.value | The [pattern](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format) to match against the value of the specified header. | | no |\n| cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n```\n##### With HTTP request headers\n\nConfiguration with HTTP request headers that will be sent by the client.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n headers:\n Host: localhost:8080\n User-Agent: netdata/go.d.plugin\n Accept: */*\n\n```\n##### With `status_accepted`\n\nA basic example configuration with non-default status_accepted.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n status_accepted:\n - 200\n - 204\n\n```\n##### With `header_match`\n\nExample configurations with `header_match`. See the value [pattern](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format) syntax.\n\n```yaml\njobs:\n # The \"X-Robots-Tag\" header must be present in the HTTP response header,\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n\n # The \"X-Robots-Tag\" header must be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n value: '= noindex,nofollow'\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n value: '= noindex,nofollow'\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n - name: remote\n url: http://192.0.2.1:8080\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `httpcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m httpcheck\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `httpcheck` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep httpcheck\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep httpcheck /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep httpcheck\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per target\n\nThe metrics refer to the monitored target.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| url | url value that is set in the configuration file. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| httpcheck.response_time | time | ms |\n| httpcheck.response_length | length | characters |\n| httpcheck.status | success, timeout, redirect, no_connection, bad_content, bad_header, bad_status | boolean |\n| httpcheck.in_state | time | boolean |\n\n", @@ -4621,7 +4653,7 @@ "most_popular": false }, "overview": "# Icecast\n\nPlugin: go.d.plugin\nModule: icecast\n\n## Overview\n\nThis collector monitors Icecast listener counts.\n\nIt uses the Icecast server statistics `status-json.xsl` endpoint to retrieve the metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Icecast instances running on localhost that are listening on port 8000.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Icecast minimum version\n\nNeeds at least Icecast version >= 2.4.0\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/icecast.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/icecast.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8000 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n\n - name: remote\n url: http://192.0.2.1:8000\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Icecast minimum version\n\nNeeds at least Icecast version >= 2.4.0\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/icecast.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/icecast.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8000 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n\n - name: remote\n url: http://192.0.2.1:8000\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `icecast` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m icecast\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `icecast` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep icecast\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep icecast /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep icecast\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Icecast source\n\nThese metrics refer to an icecast source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| source | Source name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| icecast.listeners | listeners | listeners |\n\n", @@ -4659,7 +4691,7 @@ "most_popular": false }, "overview": "# Intel GPU\n\nPlugin: go.d.plugin\nModule: intelgpu\n\n## Overview\n\nThis collector gathers performance metrics for Intel integrated GPUs.\nIt relies on the [`intel_gpu_top`](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to grant the CAP_PERFMON capability to `intel_gpu_top`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install intel-gpu-tools\n\nInstall `intel-gpu-tools` using your distribution's package manager.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/intelgpu.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/intelgpu.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| device | Select a specific GPU using [supported filter](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html#DESCRIPTION). | | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: intelgpu\n update_every: 5 # Collect Intel iGPU metrics every 5 seconds\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install intel-gpu-tools\n\nInstall `intel-gpu-tools` using your distribution's package manager.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/intelgpu.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/intelgpu.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| device | Select a specific GPU using [supported filter](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html#DESCRIPTION). | | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: intelgpu\n update_every: 5 # Collect Intel iGPU metrics every 5 seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `intelgpu` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m intelgpu\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `intelgpu` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep intelgpu\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep intelgpu /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep intelgpu\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Intel GPU instance\n\nThese metrics refer to the Intel GPU.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| intelgpu.frequency | frequency | MHz |\n| intelgpu.power | gpu, package | Watts |\n\n### Per engine\n\nThese metrics refer to the GPU hardware engine.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| engine_class | Engine class (Render/3D, Blitter, VideoEnhance, Video, Compute). |\n| engine_instance | Engine instance (e.g. Render/3D/0, Video/0, Video/1). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| intelgpu.engine_busy_perc | busy | percentage |\n\n", @@ -4696,7 +4728,7 @@ "most_popular": false }, "overview": "# IPFS\n\nPlugin: go.d.plugin\nModule: ipfs\n\n## Overview\n\nThis collector monitors IPFS daemon health and network activity.\n\nIt uses [RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) to collect metrics.\n\nUsed endpoints:\n\n- [/api/v0/stats/bw](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-stats-bw)\n- [/api/v0/swarm/peers](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-swarm-peers)\n- [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-stats-repo)\n- [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects IPFS instances running on localhost that are listening on port 5001.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nCalls to the following endpoints are disabled by default due to IPFS bugs:\n\n- /api/v0/stats/repo ([#7528](https://github.com/ipfs/go-ipfs/issues/7528)).\n- /api/v0/pin/ls ([#3874](https://github.com/ipfs/go-ipfs/issues/3874)).\n\n**Disabled by default** due to potential high CPU usage. Consider enabling only if necessary.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ipfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ipfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| repoapi | Enables querying the [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-repo-stat) endpoint for repository statistics. | no | no |\n| pinapi | Enables querying the [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls) endpoint to retrieve a list of all pinned objects. | no | no |\n| url | Server URL. | http://127.0.0.1:5001 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5001\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5001\n\n - name: remote\n url: http://192.0.2.1:5001\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ipfs.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ipfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| repoapi | Enables querying the [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-repo-stat) endpoint for repository statistics. | no | no |\n| pinapi | Enables querying the [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls) endpoint to retrieve a list of all pinned objects. | no | no |\n| url | Server URL. | http://127.0.0.1:5001 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5001\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5001\n\n - name: remote\n url: http://192.0.2.1:5001\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ipfs` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ipfs\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ipfs` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ipfs\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ipfs /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ipfs\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipfs.conf) | ipfs.datastore_space_utilization | IPFS datastore utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPFS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfs.bandwidth | in, out | bytes/s |\n| ipfs.peers | peers | peers |\n| ipfs.datastore_space_utilization | used | percent |\n| ipfs.repo_size | size | bytes |\n| ipfs.repo_objects | objects | objects |\n| ipfs.repo_pinned_objects | pinned, recursive_pins | objects |\n\n", @@ -4732,8 +4764,8 @@ } } }, - "overview": "# ISC DHCP\n\nPlugin: go.d.plugin\nModule: isc_dhcpd\n\n## Overview\n\nThis collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/isc_dhcpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/isc_dhcpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to DHCP client lease database. | /var/lib/dhcp/dhcpd.leases | no |\n| pools | List of IP pools to monitor. | | yes |\n\n##### pools\n\nList of IP pools to monitor.\n\n- IP range syntax: see [supported formats](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/iprange#supported-formats).\n- Syntax:\n\n```yaml\npools:\n - name: \"POOL_NAME1\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n - name: \"POOL_NAME2\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n```\n\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n pools:\n - name: lan\n networks: \"192.168.0.0/24 192.168.1.0/24 192.168.2.0/24\"\n - name: wifi\n networks: \"10.0.0.0/24\"\n\n```\n", + "overview": "# ISC DHCP\n\nPlugin: go.d.plugin\nModule: isc_dhcpd\n\n## Overview\n\nThis collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases).\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/isc_dhcpd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/isc_dhcpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to DHCP client lease database. | /var/lib/dhcp/dhcpd.leases | no |\n| pools | List of IP pools to monitor. | | yes |\n\n##### pools\n\nList of IP pools to monitor.\n\n- IP range syntax: see [supported formats](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/iprange#supported-formats).\n- Syntax:\n\n```yaml\npools:\n - name: \"POOL_NAME1\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n - name: \"POOL_NAME2\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n```\n\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n pools:\n - name: lan\n networks: \"192.168.0.0/24 192.168.1.0/24 192.168.2.0/24\"\n - name: wifi\n networks: \"10.0.0.0/24\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `isc_dhcpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m isc_dhcpd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `isc_dhcpd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep isc_dhcpd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep isc_dhcpd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep isc_dhcpd\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ISC DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| isc_dhcpd.active_leases_total | active | leases |\n\n### Per ISC DHCP instance\n\nThese metrics refer to the DHCP pool.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dhcp_pool_name | The DHCP pool name defined in the collector configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| isc_dhcpd.dhcp_pool_utilization | utilization | percent |\n| isc_dhcpd.dhcp_pool_active_leases | active | leases |\n\n", @@ -4776,7 +4808,7 @@ "most_popular": true }, "overview": "# Kubelet\n\nPlugin: go.d.plugin\nModule: k8s_kubelet\n\n## Overview\n\nThis collector monitors Kubelet instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubelet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubelet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10255/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10255/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10250/metrics\n tls_skip_verify: yes\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubelet.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubelet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10255/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10255/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10250/metrics\n tls_skip_verify: yes\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `k8s_kubelet` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubelet\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `k8s_kubelet` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep k8s_kubelet\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep k8s_kubelet /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep k8s_kubelet\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ kubelet_node_config_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_node_config_error | the node is experiencing a configuration-related error (0: false, 1: true) |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubelet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.apiserver_audit_requests_rejected | rejected | requests/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_failures | failures | events/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies | 5_\u00b5s, 10_\u00b5s, 20_\u00b5s, 40_\u00b5s, 80_\u00b5s, 160_\u00b5s, 320_\u00b5s, 640_\u00b5s, 1280_\u00b5s, 2560_\u00b5s, 5120_\u00b5s, 10240_\u00b5s, 20480_\u00b5s, 40960_\u00b5s, +Inf | observes/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent | 5_\u00b5s, 10_\u00b5s, 20_\u00b5s, 40_\u00b5s, 80_\u00b5s, 160_\u00b5s, 320_\u00b5s, 640_\u00b5s, 1280_\u00b5s, 2560_\u00b5s, 5120_\u00b5s, 10240_\u00b5s, 20480_\u00b5s, 40960_\u00b5s, +Inf | percentage |\n| k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses | cache misses | events/s |\n| k8s_kubelet.kubelet_containers_running | total | running_containers |\n| k8s_kubelet.kubelet_pods_running | total | running_pods |\n| k8s_kubelet.kubelet_pods_log_filesystem_used_bytes | a dimension per namespace and pod | B |\n| k8s_kubelet.kubelet_runtime_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_runtime_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_docker_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_docker_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_node_config_error | experiencing_error | bool |\n| k8s_kubelet.kubelet_pleg_relist_interval_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_pleg_relist_latency_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_token_requests | total, failed | token_requests/s |\n| k8s_kubelet.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubelet.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n\n### Per volume manager\n\nThese metrics refer to the Volume Manager.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.volume_manager_total_volumes | actual, desired | state |\n\n", @@ -4819,7 +4851,7 @@ "most_popular": true }, "overview": "# Kubeproxy\n\nPlugin: go.d.plugin\nModule: k8s_kubeproxy\n\n## Overview\n\nThis collector monitors Kubeproxy instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubeproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubeproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10249/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10249/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:10249/metrics\n tls_skip_verify: yes\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubeproxy.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubeproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10249/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10249/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:10249/metrics\n tls_skip_verify: yes\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `k8s_kubeproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubeproxy\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `k8s_kubeproxy` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep k8s_kubeproxy\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep k8s_kubeproxy /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep k8s_kubeproxy\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubeproxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules | sync_proxy_rules | events/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | observes/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | percentage |\n| k8s_kubeproxy.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubeproxy.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n| k8s_kubeproxy.http_request_duration | 0.5, 0.9, 0.99 | microseconds |\n\n", @@ -4856,10 +4888,10 @@ "most_popular": true }, "overview": "# Kubernetes Cluster State\n\nPlugin: go.d.plugin\nModule: k8s_state\n\n## Overview\n\nThis collector monitors Kubernetes Nodes, Pods and Containers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_state.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_state.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_state.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_state.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `k8s_state` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_state\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `k8s_state` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep k8s_state\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep k8s_state /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep k8s_state\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the Node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.node_allocatable_cpu_requests_utilization | requests | % |\n| k8s_state.node_allocatable_cpu_requests_used | requests | millicpu |\n| k8s_state.node_allocatable_cpu_limits_utilization | limits | % |\n| k8s_state.node_allocatable_cpu_limits_used | limits | millicpu |\n| k8s_state.node_allocatable_mem_requests_utilization | requests | % |\n| k8s_state.node_allocatable_mem_requests_used | requests | bytes |\n| k8s_state.node_allocatable_mem_limits_utilization | limits | % |\n| k8s_state.node_allocatable_mem_limits_used | limits | bytes |\n| k8s_state.node_allocatable_pods_utilization | allocated | % |\n| k8s_state.node_allocatable_pods_usage | available, allocated | pods |\n| k8s_state.node_condition | a dimension per condition | status |\n| k8s_state.node_schedulability | schedulable, unschedulable | state |\n| k8s_state.node_pods_readiness | ready | % |\n| k8s_state.node_pods_readiness_state | ready, unready | pods |\n| k8s_state.node_pods_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | pods |\n| k8s_state.node_pods_phase | running, failed, succeeded, pending | pods |\n| k8s_state.node_containers | containers, init_containers | containers |\n| k8s_state.node_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_init_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_age | age | seconds |\n\n### Per pod\n\nThese metrics refer to the Pod.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_cpu_requests_used | requests | millicpu |\n| k8s_state.pod_cpu_limits_used | limits | millicpu |\n| k8s_state.pod_mem_requests_used | requests | bytes |\n| k8s_state.pod_mem_limits_used | limits | bytes |\n| k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state |\n| k8s_state.pod_phase | running, failed, succeeded, pending | state |\n| k8s_state.pod_age | age | seconds |\n| k8s_state.pod_containers | containers, init_containers | containers |\n| k8s_state.pod_containers_state | running, waiting, terminated | containers |\n| k8s_state.pod_init_containers_state | running, waiting, terminated | containers |\n\n### Per container\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n| k8s_container_name | Container name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_container_readiness_state | ready | state |\n| k8s_state.pod_container_restarts | restarts | restarts |\n| k8s_state.pod_container_state | running, waiting, terminated | state |\n| k8s_state.pod_container_waiting_state_reason | a dimension per reason | state |\n| k8s_state.pod_container_terminated_state_reason | a dimension per reason | state |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the Node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.node_allocatable_cpu_requests_utilization | requests | % |\n| k8s_state.node_allocatable_cpu_requests_used | requests | millicpu |\n| k8s_state.node_allocatable_cpu_limits_utilization | limits | % |\n| k8s_state.node_allocatable_cpu_limits_used | limits | millicpu |\n| k8s_state.node_allocatable_mem_requests_utilization | requests | % |\n| k8s_state.node_allocatable_mem_requests_used | requests | bytes |\n| k8s_state.node_allocatable_mem_limits_utilization | limits | % |\n| k8s_state.node_allocatable_mem_limits_used | limits | bytes |\n| k8s_state.node_allocatable_pods_utilization | allocated | % |\n| k8s_state.node_allocatable_pods_usage | available, allocated | pods |\n| k8s_state.node_condition | Ready, DiskPressure, MemoryPressure, NetworkUnavailable, PIDPressure | status |\n| k8s_state.node_schedulability | schedulable, unschedulable | state |\n| k8s_state.node_pods_readiness | ready | % |\n| k8s_state.node_pods_readiness_state | ready, unready | pods |\n| k8s_state.node_pods_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | pods |\n| k8s_state.node_pods_phase | running, failed, succeeded, pending | pods |\n| k8s_state.node_containers | containers, init_containers | containers |\n| k8s_state.node_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_init_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_age | age | seconds |\n\n### Per pod\n\nThese metrics refer to the Pod.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_cpu_requests_used | requests | millicpu |\n| k8s_state.pod_cpu_limits_used | limits | millicpu |\n| k8s_state.pod_mem_requests_used | requests | bytes |\n| k8s_state.pod_mem_limits_used | limits | bytes |\n| k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state |\n| k8s_state.pod_phase | running, failed, succeeded, pending | state |\n| k8s_state.pod_status_reason | Evicted, NodeAffinity, NodeLost, Shutdown, UnexpectedAdmissionError, Other | status |\n| k8s_state.pod_age | age | seconds |\n| k8s_state.pod_containers | containers, init_containers | containers |\n| k8s_state.pod_containers_state | running, waiting, terminated | containers |\n| k8s_state.pod_init_containers_state | running, waiting, terminated | containers |\n\n### Per container\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n| k8s_container_name | Container name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_container_readiness_state | ready | state |\n| k8s_state.pod_container_restarts | restarts | restarts |\n| k8s_state.pod_container_state | running, waiting, terminated | state |\n| k8s_state.pod_container_waiting_state_reason | ContainerCreating, CrashLoopBackOff, CreateContainerConfigError, CreateContainerError, ErrImagePull, ImagePullBackOff, InvalidImageName, PodInitializing, Other | state |\n| k8s_state.pod_container_terminated_state_reason | Completed, ContainerCannotRun, DeadlineExceeded, Error, Evicted, OOMKilled, Other | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_state-Kubernetes_Cluster_State", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/k8s_state/metadata.yaml", @@ -4905,7 +4937,7 @@ "most_popular": true }, "overview": "# Lighttpd\n\nPlugin: go.d.plugin\nModule: lighttpd\n\n## Overview\n\nThis collector monitors the activity and performance of Lighttpd servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Lighttpd location [server-status](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status), \nwhich is a built-in location that provides metrics about the Lighttpd server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Lighttpd instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Lighttpd status support\n\nTo enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lighttpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lighttpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nLighttpd with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Lighttpd status support\n\nTo enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lighttpd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lighttpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nLighttpd with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `lighttpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m lighttpd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `lighttpd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep lighttpd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep lighttpd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep lighttpd\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Lighttpd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| lighttpd.requests | requests | requests/s |\n| lighttpd.net | sent | kilobits/s |\n| lighttpd.workers | idle, busy | servers |\n| lighttpd.scoreboard | waiting, open, close, hard_error, keepalive, read, read_post, write, handle_request, request_start, request_end | connections |\n| lighttpd.uptime | uptime | seconds |\n\n", @@ -4942,8 +4974,8 @@ ], "most_popular": false }, - "overview": "# Litespeed\n\nPlugin: go.d.plugin\nModule: litespeed\n\n## Overview\n\nExamine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.\n\nThe collector uses the statistics under /tmp/lshttpd to gather the metrics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/litespeed.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/litespeed.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| reports_dir | Directory containing Litespeed's real-time statistics files. | /tmp/lshttpd/ | no |\n\n#### Examples\n\n##### Set the path to statistics\n\nChange the path for the litespeed stats files\n\n```yaml\nlocal:\n name: 'local'\n path: '/tmp/lshttpd'\n\n```\n", + "overview": "# Litespeed\n\nPlugin: go.d.plugin\nModule: litespeed\n\n## Overview\n\nExamine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.\n\nThe collector uses the statistics under /tmp/lshttpd to gather the metrics.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/litespeed.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/litespeed.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| reports_dir | Directory containing Litespeed's real-time statistics files. | /tmp/lshttpd/ | no |\n\n#### Examples\n\n##### Set the path to statistics\n\nChange the path for the litespeed stats files\n\n```yaml\nlocal:\n name: 'local'\n path: '/tmp/lshttpd'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `litespeed` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m litespeed\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `litespeed` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep litespeed\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep litespeed /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep litespeed\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Litespeed instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| litespeed.requests | requests | requests/s |\n| litespeed.requests_processing | processing | requests |\n| litespeed.net_throughput | in, out | kilobits/s |\n| litespeed.net_ssl_throughput | in, out | kilobits/s |\n| litespeed.connections | free, used | conns |\n| litespeed.ssl_connections | free, used | conns |\n| litespeed.public_cache | hits | hits/s |\n| litespeed.private_cache | hits | hits/s |\n| litespeed.static | hits | hits/s |\n\n", @@ -4979,8 +5011,8 @@ }, "most_popular": false }, - "overview": "# systemd-logind users\n\nPlugin: go.d.plugin\nModule: logind\n\n## Overview\n\nThis collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logind.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logind.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n#### Examples\nThere are no configuration examples.\n\n", + "overview": "# systemd-logind users\n\nPlugin: go.d.plugin\nModule: logind\n\n## Overview\n\nThis collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logind.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logind.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `logind` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logind\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `logind` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep logind\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep logind /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep logind\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd-logind users instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logind.sessions | remote, local | sessions |\n| logind.sessions_type | console, graphical, other | sessions |\n| logind.sessions_state | online, closing, active | sessions |\n| logind.users_state | offline, closing, online, lingering, active | users |\n\n", @@ -5016,7 +5048,7 @@ "most_popular": false }, "overview": "# Logstash\n\nPlugin: go.d.plugin\nModule: logstash\n\n## Overview\n\nThis collector monitors Logstash instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logstatsh.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logstatsh.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:9600 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n```\n##### HTTP authentication\n\nHTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nHTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://localhost:9600\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n - name: remote\n url: http://192.0.2.1:9600\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logstatsh.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logstatsh.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:9600 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n```\n##### HTTP authentication\n\nHTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nHTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://localhost:9600\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n - name: remote\n url: http://192.0.2.1:9600\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `logstash` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logstash\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `logstash` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep logstash\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep logstash /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep logstash\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Logstash instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.jvm_threads | threads | count |\n| logstash.jvm_mem_heap_used | in_use | percentage |\n| logstash.jvm_mem_heap | committed, used | KiB |\n| logstash.jvm_mem_pools_eden | committed, used | KiB |\n| logstash.jvm_mem_pools_survivor | committed, used | KiB |\n| logstash.jvm_mem_pools_old | committed, used | KiB |\n| logstash.jvm_gc_collector_count | eden, old | counts/s |\n| logstash.jvm_gc_collector_time | eden, old | ms |\n| logstash.open_file_descriptors | open | fd |\n| logstash.event | in, filtered, out | events/s |\n| logstash.event_duration | event, queue | seconds |\n| logstash.uptime | uptime | seconds |\n\n### Per pipeline\n\nThese metrics refer to the pipeline.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pipeline | pipeline name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.pipeline_event | in, filtered, out | events/s |\n| logstash.pipeline_event_duration | event, queue | seconds |\n\n", @@ -5052,8 +5084,8 @@ }, "most_popular": false }, - "overview": "# LVM logical volumes\n\nPlugin: go.d.plugin\nModule: lvm\n\n## Overview\n\nThis collector monitors the health of LVM logical volumes. It relies on the [`lvs`](https://man7.org/linux/man-pages/man8/lvs.8.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lvm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lvm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | lvs binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: lvm\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n", + "overview": "# LVM logical volumes\n\nPlugin: go.d.plugin\nModule: lvm\n\n## Overview\n\nThis collector monitors the health of LVM logical volumes. It relies on the [`lvs`](https://man7.org/linux/man-pages/man8/lvs.8.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- NetBSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lvm.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lvm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | lvs binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: lvm\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `lvm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m lvm\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `lvm` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep lvm\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep lvm /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep lvm\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ lvm_lv_data_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/lvm.conf) | lvm.lv_data_space_utilization | LVM logical volume high data space usage (LV ${label:lv_name} VG ${label:vg_name} Type ${label:volume_type}) |\n| [ lvm_lv_metadata_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/lvm.conf) | lvm.lv_metadata_space_utilization | LVM logical volume high metadata space usage (LV ${label:lv_name} VG ${label:vg_name} Type ${label:volume_type}) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per logical volume\n\nThese metrics refer to the LVM logical volume.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| lv_name | Logical volume name |\n| vg_name | Volume group name |\n| volume_type | Type of the volume |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| lvm.lv_data_space_utilization | utilization | % |\n| lvm.lv_metadata_space_utilization | utilization | % |\n\n", @@ -5062,6 +5094,47 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/lvm/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-maxscale", + "plugin_name": "go.d.plugin", + "module_name": "maxscale", + "monitored_instance": { + "name": "MaxScale", + "link": "https://mariadb.com/kb/en/maxscale/", + "categories": [ + "data-collection.database-servers" + ], + "icon_filename": "maxscale.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "alternative_monitored_instances": [], + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "maria", + "mariadb", + "maxscale", + "database", + "db" + ], + "most_popular": false + }, + "overview": "# MaxScale\n\nPlugin: go.d.plugin\nModule: maxscale\n\n## Overview\n\nThis collector monitors the activity and performance of MaxScale servers.\n\n\nIt sends HTTP requests to the MaxScale [REST API](https://mariadb.com/kb/en/maxscale-24-02rest-api/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect MaxScale instances running on:\n\n- localhost that are listening on port 8989\n- within Docker containers\n\n> **Note that the MaxScale REST API requires a username and password**. \n> While Netdata can automatically detect MaxScale instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/maxscale.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/maxscale.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | The URL of the MaxScale HTTP API endpoint. | http://127.0.0.1:8989 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | admin | no |\n| password | Password for basic HTTP authentication. | mariadb | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8989\n username: admin\n password: mariadb\n\n```\n##### HTTPS with self-signed certificate\n\nMaxScale with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8989\n username: admin\n password: mariadb\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8989\n username: admin\n password: mariadb\n\n - name: remote\n url: http://192.0.2.1:8989\n username: admin\n password: mariadb\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `maxscale` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m maxscale\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `maxscale` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep maxscale\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep maxscale /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep maxscale\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MaxScale instance\n\nThese metrics refer to the monitored MaxScale instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| maxscale.poll_events | reads, writes, accepts, errors, hangups | events/s |\n| maxscale.current_sessions | sessions | sessions |\n| maxscale.current_zombie_connections | zombie | connections |\n| maxscale.threads_by_state | active, draining, dormant | threads |\n| maxscale.current_fds | managed | fds |\n| maxscale.qc_cache_efficiency | hits, misses | requests/s |\n| maxscale.qc_cache_operations | inserts, evictions | operations/s |\n| maxscale.uptime | uptime | seconds |\n\n### Per server\n\nThese metrics refer to the MariaDB server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server | Server ID. |\n| address | Server address. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| maxscale.server_state | master, slave, running, down, maintenance, draining, drained, relay_master, binlog_relay, synced | state |\n| maxscale.server_current_connections | connections | connections |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-maxscale-MaxScale", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/maxscale/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-megacli", @@ -5090,8 +5163,8 @@ }, "most_popular": false }, - "overview": "# MegaCLI MegaRAID\n\nPlugin: go.d.plugin\nModule: megacli\n\n## Overview\n\nMonitors the health of MegaCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.\nIt relies on the `megacli` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `megacli -LDPDInfo -aAll -NoLog`\n- `megacli -AdpBbuCmd -aAll -NoLog`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/megacli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/megacli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | megacli binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: megacli\n update_every: 5 # Collect MegaCli Hardware RAID statistics every 5 seconds\n\n```\n", + "overview": "# MegaCLI MegaRAID\n\nPlugin: go.d.plugin\nModule: megacli\n\n## Overview\n\nMonitors the health of MegaCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.\nIt relies on the `megacli` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `megacli -LDPDInfo -aAll -NoLog`\n- `megacli -AdpBbuCmd -aAll -NoLog`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/megacli.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/megacli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | megacli binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: megacli\n update_every: 5 # Collect MegaCli Hardware RAID statistics every 5 seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `megacli` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m megacli\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `megacli` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep megacli\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep megacli /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep megacli\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ megacli_adapter_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.adapter_health_state | MegaCLI adapter ${label:adapter_number} is in the degraded state |\n| [ megacli_phys_drive_media_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.phys_drive_media_errors | MegaCLI physical drive adapter ${label:adapter_number} slot ${label:slot_number} media errors |\n| [ megacli_phys_drive_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.phys_drive_predictive_failures | MegaCLI physical drive (adapter ${label:adapter_number} slot ${label:slot_number}) predictive failures |\n| [ megacli_bbu_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_charge | MegaCLI Backup Battery Unit (adapter ${label:adapter_number}) average charge over the last minute |\n| [ megacli_bbu_recharge_cycles ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_recharge_cycles | MegaCLI Backup Battery Unit (adapter ${label:adapter_number}) average charge over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per adapter\n\nThese metrics refer to the MegaCLI Adapter.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| adapter_number | Adapter number |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.adapter_health_state | optimal, degraded, partially_degraded, failed | state |\n\n### Per physical drive\n\nThese metrics refer to the MegaCLI Physical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| adapter_number | Adapter number |\n| wwn | World Wide Name |\n| slot_number | Slot number |\n| drive_position | Position (e.g. DiskGroup: 0, Span: 0, Arm: 2) |\n| drive_type | Type (e.g. SATA) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.phys_drive_media_errors_rate | media_errors | errors/s |\n| megacli.phys_drive_predictive_failures_rate | predictive_failures | failures/s |\n\n### Per backup battery unit\n\nThese metrics refer to the MegaCLI Backup Battery Unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| adapter_number | Adapter number |\n| battery_type | Battery type (e.g. BBU) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.bbu_charge | charge | percentage |\n| megacli.bbu_recharge_cycles | recharge | cycles |\n| megacli.bbu_capacity_degradation | cap_degradation | percent |\n| megacli.bbu_temperature | temperature | Celsius |\n\n", @@ -5130,7 +5203,7 @@ "most_popular": false }, "overview": "# Memcached\n\nPlugin: go.d.plugin\nModule: memcached\n\n## Overview\n\nMonitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching.\n\nIt reads the server's response to the `stats` command.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/memcached.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/memcached.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the memcached service listens for connections. | 127.0.0.1:11211 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11211\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11211\n\n - name: remote\n address: 203.0.113.0:11211\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/memcached.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/memcached.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the memcached service listens for connections. | 127.0.0.1:11211 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11211\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:11211\n\n - name: remote\n address: 203.0.113.0:11211\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `memcached` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m memcached\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `memcached` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep memcached\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep memcached /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep memcached\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ memcached_cache_memory_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | cache memory utilization |\n| [ memcached_cache_fill_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | average rate the cache fills up (positive), or frees up (negative) space over the last hour |\n| [ memcached_out_of_cache_space_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memcached instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| memcached.cache | available, used | MiB |\n| memcached.net | in, out | kilobits/s |\n| memcached.connections | current, rejected, total | connections/s |\n| memcached.items | current, total | items |\n| memcached.evicted_reclaimed | reclaimed, evicted | items |\n| memcached.get | hints, misses | requests |\n| memcached.get_rate | rate | requests/s |\n| memcached.set_rate | rate | requests/s |\n| memcached.delete | hits, misses | requests |\n| memcached.cas | hits, misses, bad value | requests |\n| memcached.increment | hits, misses | requests |\n| memcached.decrement | hits, misses | requests |\n| memcached.touch | hits, misses | requests |\n| memcached.touch_rate | rate | requests/s |\n\n", @@ -5167,7 +5240,7 @@ "most_popular": false }, "overview": "# MongoDB\n\nPlugin: go.d.plugin\nModule: mongodb\n\n## Overview\n\nThis collector monitors MongoDB servers.\n\nExecuted queries:\n\n- [serverStatus](https://docs.mongodb.com/manual/reference/command/serverStatus/)\n- [dbStats](https://docs.mongodb.com/manual/reference/command/dbStats/)\n- [replSetGetStatus](https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read-only user\n\nCreate a read-only user for Netdata in the admin database.\n\n- Authenticate as the admin user:\n\n ```bash\n use admin\n db.auth(\"admin\", \"\")\n ```\n\n- Create a user:\n\n ```bash\n db.createUser({\n \"user\":\"netdata\",\n \"pwd\": \"\",\n \"roles\" : [\n {role: 'read', db: 'admin' },\n {role: 'clusterMonitor', db: 'admin'},\n {role: 'read', db: 'local' }\n ]\n })\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mongodb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mongodb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| uri | MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). | mongodb://localhost:27017 | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n| databases | Databases selector. Determines which database metrics will be collected. | | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n```\n##### With databases metrics\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n databases:\n includes:\n - \"* *\"\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n - name: remote\n uri: mongodb://netdata:password@203.0.113.0:27017\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read-only user\n\nCreate a read-only user for Netdata in the admin database.\n\n- Authenticate as the admin user:\n\n ```bash\n use admin\n db.auth(\"admin\", \"\")\n ```\n\n- Create a user:\n\n ```bash\n db.createUser({\n \"user\":\"netdata\",\n \"pwd\": \"\",\n \"roles\" : [\n {role: 'read', db: 'admin' },\n {role: 'clusterMonitor', db: 'admin'},\n {role: 'read', db: 'local' }\n ]\n })\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mongodb.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mongodb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| uri | MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). | mongodb://localhost:27017 | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n| databases | Databases selector. Determines which database metrics will be collected. | | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n```\n##### With databases metrics\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n databases:\n includes:\n - \"* *\"\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n - name: remote\n uri: mongodb://netdata:password@203.0.113.0:27017\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mongodb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mongodb\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mongodb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mongodb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mongodb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mongodb\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- WireTiger metrics are available only if [WiredTiger](https://docs.mongodb.com/v6.0/core/wiredtiger/) is used as the\n storage engine.\n- Sharding metrics are available on shards only\n for [mongos](https://www.mongodb.com/docs/manual/reference/program/mongos/).\n\n\n### Per MongoDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.operations_rate | reads, writes, commands | operations/s |\n| mongodb.operations_latency_time | reads, writes, commands | milliseconds |\n| mongodb.operations_by_type_rate | insert, query, update, delete, getmore, command | operations/s |\n| mongodb.document_operations_rate | inserted, deleted, returned, updated | operations/s |\n| mongodb.scanned_indexes_rate | scanned | indexes/s |\n| mongodb.scanned_documents_rate | scanned | documents/s |\n| mongodb.active_clients_count | readers, writers | clients |\n| mongodb.queued_operations_count | reads, writes | operations |\n| mongodb.cursors_open_count | open | cursors |\n| mongodb.cursors_open_no_timeout_count | open_no_timeout | cursors |\n| mongodb.cursors_opened_rate | opened | cursors/s |\n| mongodb.cursors_timed_out_rate | timed_out | cursors/s |\n| mongodb.cursors_by_lifespan_count | le_1s, 1s_5s, 5s_15s, 15s_30s, 30s_1m, 1m_10m, ge_10m | cursors |\n| mongodb.transactions_count | active, inactive, open, prepared | transactions |\n| mongodb.transactions_rate | started, aborted, committed, prepared | transactions/s |\n| mongodb.connections_usage | available, used | connections |\n| mongodb.connections_by_state_count | active, threaded, exhaust_is_master, exhaust_hello, awaiting_topology_changes | connections |\n| mongodb.connections_rate | created | connections/s |\n| mongodb.asserts_rate | regular, warning, msg, user, tripwire, rollovers | asserts/s |\n| mongodb.network_traffic_rate | in, out | bytes/s |\n| mongodb.network_requests_rate | requests | requests/s |\n| mongodb.network_slow_dns_resolutions_rate | slow_dns | resolutions/s |\n| mongodb.network_slow_ssl_handshakes_rate | slow_ssl | handshakes/s |\n| mongodb.memory_resident_size | used | bytes |\n| mongodb.memory_virtual_size | used | bytes |\n| mongodb.memory_page_faults_rate | pgfaults | pgfaults/s |\n| mongodb.memory_tcmalloc_stats | allocated, central_cache_freelist, transfer_cache_freelist, thread_cache_freelists, pageheap_freelist, pageheap_unmapped | bytes |\n| mongodb.wiredtiger_concurrent_read_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_concurrent_write_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_cache_usage | used | bytes |\n| mongodb.wiredtiger_cache_dirty_space_size | dirty | bytes |\n| mongodb.wiredtiger_cache_io_rate | read, written | pages/s |\n| mongodb.wiredtiger_cache_evictions_rate | unmodified, modified | pages/s |\n| mongodb.sharding_nodes_count | shard_aware, shard_unaware | nodes |\n| mongodb.sharding_sharded_databases_count | partitioned, unpartitioned | databases |\n| mongodb.sharding_sharded_collections_count | partitioned, unpartitioned | collections |\n\n### Per lock type\n\nThese metrics refer to the lock type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| lock_type | lock type (e.g. global, database, collection, mutex) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.lock_acquisitions_rate | shared, exclusive, intent_shared, intent_exclusive | acquisitions/s |\n\n### Per commit type\n\nThese metrics refer to the commit type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| commit_type | commit type (e.g. noShards, singleShard, singleWriteShard) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.transactions_commits_rate | success, fail | commits/s |\n| mongodb.transactions_commits_duration_time | commits | milliseconds |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.database_collection_count | collections | collections |\n| mongodb.database_indexes_count | indexes | indexes |\n| mongodb.database_views_count | views | views |\n| mongodb.database_documents_count | documents | documents |\n| mongodb.database_data_size | data_size | bytes |\n| mongodb.database_storage_size | storage_size | bytes |\n| mongodb.database_index_size | index_size | bytes |\n\n### Per replica set member\n\nThese metrics refer to the replica set member.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| repl_set_member | replica set member name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.repl_set_member_state | primary, startup, secondary, recovering, startup2, unknown, arbiter, down, rollback, removed | state |\n| mongodb.repl_set_member_health_status | up, down | status |\n| mongodb.repl_set_member_replication_lag_time | replication_lag | milliseconds |\n| mongodb.repl_set_member_heartbeat_latency_time | heartbeat_latency | milliseconds |\n| mongodb.repl_set_member_ping_rtt_time | ping_rtt | milliseconds |\n| mongodb.repl_set_member_uptime | uptime | seconds |\n\n### Per shard\n\nThese metrics refer to the shard.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| shard_id | shard id |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.sharding_shard_chunks_count | chunks | chunks |\n\n", @@ -5207,7 +5280,7 @@ "most_popular": false }, "overview": "# Monit\n\nPlugin: go.d.plugin\nModule: monit\n\n## Overview\n\nThis collector monitors status of Monit's service checks.\n\n\nIt sends HTTP requests to the Monit `/_status?format=xml&level=full` endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Monit instances running on localhost that are listening on port 2812.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:2812\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable TCP PORT\n\nSee [Syntax for TCP port](https://mmonit.com/monit/documentation/monit.html#TCP-PORT) for details.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/monit.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/monit.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:2812 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | admin | no |\n| password | Password for basic HTTP authentication. | monit | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:2812\n username: admin\n password: monit\n\n```\n##### HTTPS with self-signed certificate\n\nWith enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:2812\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:2812\n\n - name: remote\n url: http://192.0.2.1:2812\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable TCP PORT\n\nSee [Syntax for TCP port](https://mmonit.com/monit/documentation/monit.html#TCP-PORT) for details.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/monit.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/monit.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:2812 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | admin | no |\n| password | Password for basic HTTP authentication. | monit | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:2812\n username: admin\n password: monit\n\n```\n##### HTTPS with self-signed certificate\n\nWith enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:2812\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:2812\n\n - name: remote\n url: http://192.0.2.1:2812\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `monit` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m monit\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `monit` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep monit\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep monit /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep monit\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per service\n\nThese metrics refer to the monitored Service.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server_hostname | Hostname of the Monit server. |\n| service_check_name | Service check name. |\n| service_check_type | Service check type. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| monit.service_check_status | ok, error, initializing, not_monitored | status |\n\n", @@ -5257,7 +5330,7 @@ "most_popular": true }, "overview": "# MariaDB\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:\n\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n##### Connection with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n##### Connection with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mysql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mysql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mysql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mysql\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", @@ -5307,7 +5380,7 @@ "most_popular": true }, "overview": "# MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:\n\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n##### Connection with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n##### Connection with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mysql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mysql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mysql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mysql\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", @@ -5357,7 +5430,7 @@ "most_popular": false }, "overview": "# Percona MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:\n\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n##### Connection with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n##### Connection with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mysql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mysql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mysql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mysql\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", @@ -5415,7 +5488,7 @@ "most_popular": true }, "overview": "# NGINX\n\nPlugin: go.d.plugin\nModule: nginx\n\n## Overview\n\nThis collector monitors the activity and performance of NGINX servers, and collects metrics such as the number of connections, their status, and client requests.\n\n\nIt sends HTTP requests to the NGINX location [stub-status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html), which is a built-in location that provides metrics about the NGINX server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1/basic_status\n- http://localhost/stub_status\n- http://127.0.0.1/stub_status\n- http://127.0.0.1/nginx_status\n- http://127.0.0.1/status\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status support\n\nConfigure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginx.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginx.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/stub_status | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n - name: remote\n url: http://192.0.2.1/stub_status\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status support\n\nConfigure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginx.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginx.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/stub_status | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n - name: remote\n url: http://192.0.2.1/stub_status\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nginx` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginx\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nginx` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nginx\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nginx /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nginx\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginx.connections | active | connections |\n| nginx.connections_status | reading, writing, idle | connections |\n| nginx.connections_accepted_handled | accepted, handled | connections/s |\n| nginx.requests | requests | requests/s |\n\n", @@ -5456,7 +5529,7 @@ "most_popular": false }, "overview": "# NGINX Plus\n\nPlugin: go.d.plugin\nModule: nginxplus\n\n## Overview\n\nThis collector monitors NGINX Plus servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Config API\n\nTo configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxplus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxplus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://192.0.2.1\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Config API\n\nTo configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxplus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxplus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://192.0.2.1\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nginxplus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxplus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nginxplus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nginxplus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nginxplus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nginxplus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX Plus instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.client_connections_rate | accepted, dropped | connections/s |\n| nginxplus.client_connections_count | active, idle | connections |\n| nginxplus.ssl_handshakes_rate | successful, failed | handshakes/s |\n| nginxplus.ssl_handshakes_failures_rate | no_common_protocol, no_common_cipher, timeout, peer_rejected_cert | failures/s |\n| nginxplus.ssl_verification_errors_rate | no_cert, expired_cert, revoked_cert, hostname_mismatch, other | errors/s |\n| nginxplus.ssl_session_reuses_rate | ssl_session | reuses/s |\n| nginxplus.http_requests_rate | requests | requests/s |\n| nginxplus.http_requests_count | requests | requests |\n| nginxplus.uptime | uptime | seconds |\n\n### Per http server zone\n\nThese metrics refer to the HTTP server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_server_zone | HTTP server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_server_zone_requests_rate | requests | requests/s |\n| nginxplus.http_server_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_server_zone_requests_processing_count | processing | requests |\n| nginxplus.http_server_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http location zone\n\nThese metrics refer to the HTTP location zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_location_zone | HTTP location zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_location_zone_requests_rate | requests | requests/s |\n| nginxplus.http_location_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_location_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_location_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http upstream\n\nThese metrics refer to the HTTP upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_peers_count | peers | peers |\n| nginxplus.http_upstream_zombies_count | zombie | servers |\n| nginxplus.http_upstream_keepalive_count | keepalive | connections |\n\n### Per http upstream server\n\nThese metrics refer to the HTTP upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n| http_upstream_server_address | HTTP upstream server address (e.g. 127.0.0.1:81) |\n| http_upstream_server_name | HTTP upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_server_requests_rate | requests | requests/s |\n| nginxplus.http_upstream_server_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_upstream_server_response_time | response | milliseconds |\n| nginxplus.http_upstream_server_response_header_time | header | milliseconds |\n| nginxplus.http_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_upstream_server_state | up, down, draining, unavail, checking, unhealthy | state |\n| nginxplus.http_upstream_server_connections_count | active | connections |\n| nginxplus.http_upstream_server_downtime | downtime | seconds |\n\n### Per http cache\n\nThese metrics refer to the HTTP cache.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_cache | HTTP cache name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_cache_state | warm, cold | state |\n| nginxplus.http_cache_iops | served, written, bypass | responses/s |\n| nginxplus.http_cache_io | served, written, bypass | bytes/s |\n| nginxplus.http_cache_size | size | bytes |\n\n### Per stream server zone\n\nThese metrics refer to the Stream server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_server_zone | Stream server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_server_zone_connections_rate | accepted | connections/s |\n| nginxplus.stream_server_zone_sessions_per_code_class_rate | 2xx, 4xx, 5xx | sessions/s |\n| nginxplus.stream_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_server_zone_connections_processing_count | processing | connections |\n| nginxplus.stream_server_zone_connections_discarded_rate | discarded | connections/s |\n\n### Per stream upstream\n\nThese metrics refer to the Stream upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_peers_count | peers | peers |\n| nginxplus.stream_upstream_zombies_count | zombie | servers |\n\n### Per stream upstream server\n\nThese metrics refer to the Stream upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n| stream_upstream_server_address | Stream upstream server address (e.g. 127.0.0.1:12346) |\n| stream_upstream_server_name | Stream upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_server_connections_rate | forwarded | connections/s |\n| nginxplus.stream_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_upstream_server_state | up, down, unavail, checking, unhealthy | state |\n| nginxplus.stream_upstream_server_downtime | downtime | seconds |\n| nginxplus.stream_upstream_server_connections_count | active | connections |\n\n### Per resolver zone\n\nThese metrics refer to the resolver zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| resolver_zone | resolver zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.resolver_zone_requests_rate | name, srv, addr | requests/s |\n| nginxplus.resolver_zone_responses_rate | noerror, formerr, servfail, nxdomain, notimp, refused, timedout, unknown | responses/s |\n\n", @@ -5465,6 +5538,47 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nginxplus/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-nginxunit", + "plugin_name": "go.d.plugin", + "module_name": "nginxunit", + "monitored_instance": { + "name": "NGINX Unit", + "link": "https://unit.nginx.org/", + "categories": [ + "data-collection.web-servers-and-web-proxies" + ], + "icon_filename": "nginx.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "alternative_monitored_instances": [], + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "nginx", + "unit", + "web", + "appserver", + "http" + ], + "most_popular": false + }, + "overview": "# NGINX Unit\n\nPlugin: go.d.plugin\nModule: nginxunit\n\n## Overview\n\nThis collector monitors the activity and performance of NGINX Unit servers, and collects metrics such as the number of connections, their status, and client requests.\n\n\nIt sends HTTP requests to the NGINX Unit [Status API](https://unit.nginx.org/statusapi/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect NGINX Unit instances running on:\n\n- localhost that are listening on port 8000\n- within Docker containers\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable HTTP Control API\n\nSee [Control API](https://unit.nginx.org/controlapi/#configuration-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxunit.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxunit.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | The URL of the NGINX Unit HTTP Control API. | http://127.0.0.1:8000 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1::8000\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX Unit with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8000\n\n - name: remote\n url: http://192.0.2.1:8000\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nginxunit` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxunit\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nginxunit` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nginxunit\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nginxunit /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nginxunit\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX Unit instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxunit.requests_rate | requests | requests/s |\n| nginxunit.connections_rate | accepted, closed | connections/s |\n| nginxunit.connections_current | active, idle | connections |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-nginxunit-NGINX_Unit", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nginxunit/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-nginxvts", @@ -5505,7 +5619,7 @@ "most_popular": true }, "overview": "# NGINX VTS\n\nPlugin: go.d.plugin\nModule: nginxvts\n\n## Overview\n\nThis collector monitors NGINX servers with [virtual host traffic status module](https://github.com/vozlt/nginx-module-vts).\n\n\nIt sends HTTP requests to the NGINX VTS location [status](https://github.com/vozlt/nginx-module-vts#synopsis), \nwhich is a built-in location that provides metrics about the NGINX VTS server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure nginx-vts module\n\nTo configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxvts.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxvts.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status/format/json | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/status/format/json\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n - name: remote\n url: http://192.0.2.1/status/format/json\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure nginx-vts module\n\nTo configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxvts.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxvts.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status/format/json | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/status/format/json\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n - name: remote\n url: http://192.0.2.1/status/format/json\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nginxvts` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxvts\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nginxvts` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nginxvts\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nginxvts /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nginxvts\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX VTS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxvts.requests_total | requests | requests/s |\n| nginxvts.active_connections | active | connections |\n| nginxvts.connections_total | reading, writing, waiting, accepted, handled | connections/s |\n| nginxvts.uptime | uptime | seconds |\n| nginxvts.shm_usage | max, used | bytes |\n| nginxvts.shm_used_node | used | nodes |\n| nginxvts.server_requests_total | requests | requests/s |\n| nginxvts.server_responses_total | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxvts.server_traffic_total | in, out | bytes/s |\n| nginxvts.server_cache_total | miss, bypass, expired, stale, updating, revalidated, hit, scarce | events/s |\n\n", @@ -5541,8 +5655,8 @@ }, "most_popular": false }, - "overview": "# NSD\n\nPlugin: go.d.plugin\nModule: nsd\n\n## Overview\n\nThis collector monitors NSD statistics like queries, zones, protocols, query types and more. It relies on the [`nsd-control`](https://nsd.docs.nlnetlabs.nl/en/latest/manpages/nsd-control.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\nExecuted commands:\n- `nsd-control stats_noreset`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | nsd-control binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: nsd\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n", + "overview": "# NSD\n\nPlugin: go.d.plugin\nModule: nsd\n\n## Overview\n\nThis collector monitors NSD statistics like queries, zones, protocols, query types and more. It relies on the [`nsd-control`](https://nsd.docs.nlnetlabs.nl/en/latest/manpages/nsd-control.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\nExecuted commands:\n- `nsd-control stats_noreset`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n- macOS\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nsd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | nsd-control binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: nsd\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nsd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nsd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nsd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nsd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nsd\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NSD instance\n\nThese metrics refer to the the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nsd.queries | queries | queries/s |\n| nsd.queries_by_type | A, NS, MD, MF, CNAME, SOA, MB, MG, MR, NULL, WKS, PTR, HINFO, MINFO, MX, TXT, RP, AFSDB, X25, ISDN, RT, NSAP, SIG, KEY, PX, AAAA, LOC, NXT, SRV, NAPTR, KX, CERT, DNAME, OPT, APL, DS, SSHFP, IPSECKEY, RRSIG, NSEC, DNSKEY, DHCID, NSEC3, NSEC3PARAM, TLSA, SMIMEA, CDS, CDNSKEY, OPENPGPKEY, CSYNC, ZONEMD, SVCB, HTTPS, SPF, NID, L32, L64, LP, EUI48, EUI64, URI, CAA, AVC, DLV, IXFR, AXFR, MAILB, MAILA, ANY | queries/s |\n| nsd.queries_by_opcode | QUERY, IQUERY, STATUS, NOTIFY, UPDATE, OTHER | queries/s |\n| nsd.queries_by_class | IN, CS, CH, HS | queries/s |\n| nsd.queries_by_protocol | udp, udp6, tcp, tcp6, tls, tls6 | queries/s |\n| nsd.answers_by_rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN, YXRRSET, NXRRSET, NOTAUTH, NOTZONE, RCODE11, RCODE12, RCODE13, RCODE14, RCODE15, BADVERS | answers/s |\n| nsd.errors | query, answer | errors/s |\n| nsd.drops | query | drops/s |\n| nsd.zones | master, slave | zones |\n| nsd.zone_transfers_requests | AXFR, IXFR | requests/s |\n| nsd.zone_transfer_memory | used | bytes |\n| nsd.database_size | disk, mem | bytes |\n| nsd.uptime | uptime | seconds |\n\n", @@ -5580,7 +5694,7 @@ "most_popular": false }, "overview": "# NTPd\n\nPlugin: go.d.plugin\nModule: ntpd\n\n## Overview\n\nThis collector monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](https://doc.ntp.org/current-stable/ntpq.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ntpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ntpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:123 | yes |\n| timeout | Connection/read/write timeout. | 1 | no |\n| collect_peers | Determines whether peer metrics will be collected. | no | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n```\n##### With peers metrics\n\nCollect peers metrics.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n collect_peers: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n - name: remote\n address: 203.0.113.0:123\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ntpd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ntpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:123 | yes |\n| timeout | Connection/read/write timeout. | 1 | no |\n| collect_peers | Determines whether peer metrics will be collected. | no | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n```\n##### With peers metrics\n\nCollect peers metrics.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n collect_peers: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n - name: remote\n address: 203.0.113.0:123\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ntpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ntpd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ntpd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ntpd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ntpd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ntpd\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NTPd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.sys_offset | offset | milliseconds |\n| ntpd.sys_jitter | system, clock | milliseconds |\n| ntpd.sys_frequency | frequency | ppm |\n| ntpd.sys_wander | clock | ppm |\n| ntpd.sys_rootdelay | delay | milliseconds |\n| ntpd.sys_rootdisp | dispersion | milliseconds |\n| ntpd.sys_stratum | stratum | stratum |\n| ntpd.sys_tc | current, minimum | log2 |\n| ntpd.sys_precision | precision | log2 |\n\n### Per peer\n\nThese metrics refer to the NTPd peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| peer_address | peer's source IP address |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.peer_offset | offset | milliseconds |\n| ntpd.peer_delay | delay | milliseconds |\n| ntpd.peer_dispersion | dispersion | milliseconds |\n| ntpd.peer_jitter | jitter | milliseconds |\n| ntpd.peer_xleave | xleave | milliseconds |\n| ntpd.peer_rootdelay | rootdelay | milliseconds |\n| ntpd.peer_rootdisp | dispersion | milliseconds |\n| ntpd.peer_stratum | stratum | stratum |\n| ntpd.peer_hmode | hmode | hmode |\n| ntpd.peer_pmode | pmode | pmode |\n| ntpd.peer_hpoll | hpoll | log2 |\n| ntpd.peer_ppoll | ppoll | log2 |\n| ntpd.peer_precision | precision | log2 |\n\n", @@ -5618,10 +5732,10 @@ "most_popular": false }, "overview": "# Nvidia GPU\n\nPlugin: go.d.plugin\nModule: nvidia_smi\n\n## Overview\n\nThis collector monitors GPUs performance metrics using\nthe [nvidia-smi](https://developer.nvidia.com/nvidia-system-management-interface) CLI tool.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvidia_smi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvidia_smi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvidia_smi binary. The default is \"nvidia_smi\" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |\n| timeout | nvidia_smi binary execution timeout. | 2 | no |\n| loop_mode | When enabled, `nvidia-smi` is executed continuously in a separate thread using the `-l` option. | yes | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: nvidia_smi\n binary_path: /usr/local/sbin/nvidia_smi\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvidia_smi.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvidia_smi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvidia_smi binary. The default is \"nvidia_smi\" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |\n| timeout | The maximum duration, in seconds, to wait for an `nvidia-smi` command to complete. This setting applies differently based on the collector's mode. **Loop Mode:** In loop mode, the timeout primarily determines how long to wait for the initial `nvidia-smi` execution. If the initial query takes longer than the timeout, the collector may report an error. For systems with multiple GPUs, the initial load time can sometimes be significant (e.g., 5-10 seconds). **Regular Mode:** If the collector is in regular mode, the timeout specifies how long to wait for each individual `nvidia-smi` execution. | 10 | no |\n| loop_mode | When enabled, `nvidia-smi` is executed continuously in a separate thread using the `-l` option. | yes | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: nvidia_smi\n binary_path: /usr/local/sbin/nvidia_smi\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nvidia_smi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvidia_smi\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nvidia_smi` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nvidia_smi\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nvidia_smi /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nvidia_smi\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % |\n| nvidia_smi.gpu_utilization | gpu | % |\n| nvidia_smi.gpu_memory_utilization | memory | % |\n| nvidia_smi.gpu_decoder_utilization | decoder | % |\n| nvidia_smi.gpu_encoder_utilization | encoder | % |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B |\n| nvidia_smi.gpu_temperature | temperature | Celsius |\n| nvidia_smi.gpu_voltage | voltage | V |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz |\n| nvidia_smi.gpu_power_draw | power_draw | Watts |\n| nvidia_smi.gpu_performance_state | P0-P15 | state |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status |\n| nvidia_smi.gpu_mig_devices_count | mig | devices |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) |\n| index | GPU index (nvidia_smi typically orders GPUs by PCI bus ID) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % |\n| nvidia_smi.gpu_utilization | gpu | % |\n| nvidia_smi.gpu_memory_utilization | memory | % |\n| nvidia_smi.gpu_decoder_utilization | decoder | % |\n| nvidia_smi.gpu_encoder_utilization | encoder | % |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B |\n| nvidia_smi.gpu_temperature | temperature | Celsius |\n| nvidia_smi.gpu_voltage | voltage | V |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz |\n| nvidia_smi.gpu_power_draw | power_draw | Watts |\n| nvidia_smi.gpu_performance_state | P0-P15 | state |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status |\n| nvidia_smi.gpu_mig_devices_count | mig | devices |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B |\n\n", "integration_type": "collector", "id": "go.d.plugin-nvidia_smi-Nvidia_GPU", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nvidia_smi/metadata.yaml", @@ -5653,16 +5767,53 @@ }, "most_popular": false }, - "overview": "# NVMe devices\n\nPlugin: go.d.plugin\nModule: nvme\n\n## Overview\n\nThis collector monitors the health of NVMe devices. It relies on the [`nvme`](https://github.com/linux-nvme/nvme-cli#nvme-cli) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install nvme-cli\n\nSee [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution's package manager.\n\n\n#### For Netdata running in a Docker container: grant NVMe device access\n\nYour NVMe devices need to be accessible within the Docker container for Netdata to monitor them.\n\nInclude the following option in your `docker run` command or add the device mapping in your `docker-compose.yml` file:\n\n- `docker run`\n\n ```bash\n --device '/dev/nvme0n1:/dev/nvme0n1'\n ```\n\n- `docker-compose.yml`\n\n ```yaml\n services:\n netdata:\n devices:\n - \"/dev/nvme0n1:/dev/nvme0n1\"\n ```\n\n**Note**: Replace `/dev/nvme0n1` with your actual NVMe device name.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvme.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvme.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| timeout | nvme binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: nvme\n update_every: 5 # Collect NVMe metrics every 5 seconds\n\n```\n", + "overview": "# NVMe devices\n\nPlugin: go.d.plugin\nModule: nvme\n\n## Overview\n\nThis collector monitors the health of NVMe devices. It relies on the [`nvme`](https://github.com/linux-nvme/nvme-cli#nvme-cli) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install nvme-cli\n\nSee [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution's package manager.\n\n\n#### For Netdata running in a Docker container: grant NVMe device access\n\nYour NVMe devices need to be accessible within the Docker container for Netdata to monitor them.\n\nInclude the following option in your `docker run` command or add the device mapping in your `docker-compose.yml` file:\n\n- `docker run`\n\n ```bash\n --device '/dev/nvme0n1:/dev/nvme0n1'\n ```\n\n- `docker-compose.yml`\n\n ```yaml\n services:\n netdata:\n devices:\n - \"/dev/nvme0n1:/dev/nvme0n1\"\n ```\n\n**Note**: Replace `/dev/nvme0n1` with your actual NVMe device name.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvme.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvme.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| timeout | nvme binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: nvme\n update_every: 5 # Collect NVMe metrics every 5 seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nvme` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvme\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nvme` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nvme\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nvme /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nvme\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ nvme_device_critical_warnings_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/nvme.conf) | nvme.device_critical_warnings_state | NVMe device ${label:device} has critical warnings |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the NVME device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | NVMe device name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvme.device_estimated_endurance_perc | used | % |\n| nvme.device_available_spare_perc | spare | % |\n| nvme.device_composite_temperature | temperature | celsius |\n| nvme.device_io_transferred_count | read, written | bytes |\n| nvme.device_power_cycles_count | power | cycles |\n| nvme.device_power_on_time | power-on | seconds |\n| nvme.device_critical_warnings_state | available_spare, temp_threshold, nvm_subsystem_reliability, read_only, volatile_mem_backup_failed, persistent_memory_read_only | state |\n| nvme.device_unsafe_shutdowns_count | unsafe | shutdowns |\n| nvme.device_media_errors_rate | media | errors/s |\n| nvme.device_error_log_entries_rate | error_log | entries/s |\n| nvme.device_warning_composite_temperature_time | wctemp | seconds |\n| nvme.device_critical_composite_temperature_time | cctemp | seconds |\n| nvme.device_thermal_mgmt_temp1_transitions_rate | temp1 | transitions/s |\n| nvme.device_thermal_mgmt_temp2_transitions_rate | temp2 | transitions/s |\n| nvme.device_thermal_mgmt_temp1_time | temp1 | seconds |\n| nvme.device_thermal_mgmt_temp2_time | temp2 | seconds |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the NVME device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | NVMe device name |\n| model_number | NVMe device model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvme.device_estimated_endurance_perc | used | % |\n| nvme.device_available_spare_perc | spare | % |\n| nvme.device_composite_temperature | temperature | celsius |\n| nvme.device_io_transferred_count | read, written | bytes |\n| nvme.device_power_cycles_count | power | cycles |\n| nvme.device_power_on_time | power-on | seconds |\n| nvme.device_critical_warnings_state | available_spare, temp_threshold, nvm_subsystem_reliability, read_only, volatile_mem_backup_failed, persistent_memory_read_only | state |\n| nvme.device_unsafe_shutdowns_count | unsafe | shutdowns |\n| nvme.device_media_errors_rate | media | errors/s |\n| nvme.device_error_log_entries_rate | error_log | entries/s |\n| nvme.device_warning_composite_temperature_time | wctemp | seconds |\n| nvme.device_critical_composite_temperature_time | cctemp | seconds |\n| nvme.device_thermal_mgmt_temp1_transitions_rate | temp1 | transitions/s |\n| nvme.device_thermal_mgmt_temp2_transitions_rate | temp2 | transitions/s |\n| nvme.device_thermal_mgmt_temp1_time | temp1 | seconds |\n| nvme.device_thermal_mgmt_temp2_time | temp2 | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-nvme-NVMe_devices", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nvme/metadata.yaml", "related_resources": "" }, + { + "meta": { + "plugin_name": "go.d.plugin", + "module_name": "openldap", + "monitored_instance": { + "name": "OpenLDAP", + "link": "https://www.openldap.org/", + "categories": [ + "data-collection.authentication-and-authorization" + ], + "icon_filename": "openldap.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "openldap", + "RBAC", + "Directory access" + ], + "most_popular": false + }, + "overview": "# OpenLDAP\n\nPlugin: go.d.plugin\nModule: openldap\n\n## Overview\n\nThis collector monitors OpenLDAP metrics about connections, operations, referrals and more.\n\n\nIt gathers the metrics using the [go-ldap](https://github.com/go-ldap/ldap) module and the [Monitor backend](https://www.openldap.org/doc/admin24/monitoringslapd.html) of OpenLDAP.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector cannot auto-detect OpenLDAP instances, because credential configuration is required.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the openLDAP Monitor Backend.\n\nFollow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openldap.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openldap.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| timeout | Timeout for establishing a connection and communication (reading and writing) in seconds. | 2 | no |\n| url | LDAP server URL. | ldap://127.0.0.1:389 | yes |\n| username | The distinguished name (DN) of the user authorized to view the monitor database. | | yes |\n| password | The password associated with the user identified by the DN. | | yes |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: ldap://localhost:389\n username: cn=netdata,dc=example,dc=com \n password: secret\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: ldap://localhost:389\n username: cn=netdata,dc=example,dc=com \n password: secret\n\n - name: remote\n url: ldap://192.0.2.1:389\n username: cn=netdata,dc=example,dc=com \n password: secret\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `openldap` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openldap\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `openldap` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep openldap\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep openldap /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep openldap\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenLDAP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openldap.current_connections | active | connections |\n| openldap.connections | connections | connections/s |\n| openldap.traffic | sent | bytes/s |\n| openldap.entries | sent | entries/s |\n| openldap.referrals | sent | referrals/s |\n| openldap.operations | completed, initiated | operations/s |\n| openldap.operations_by_type | bind, search, unbind, add, delete, modify, compare | operations/s |\n| openldap.waiters | write, read | waiters/s |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-openldap-OpenLDAP", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/openldap/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-openvpn", @@ -5691,7 +5842,7 @@ "most_popular": false }, "overview": "# OpenVPN\n\nPlugin: go.d.plugin\nModule: openvpn\n\n## Overview\n\nThis collector monitors OpenVPN servers.\n\nIt uses OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) to collect metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d.conf).\n\nFrom the documentation for the OpenVPN Management Interface:\n> Currently, the OpenVPN daemon can at most support a single management client any one time.\n\nIt is disabled to not break other tools which use `Management Interface`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:7505 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n```\n##### With user metrics\n\nCollect metrics of all users.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n - name: remote\n address: 203.0.113.0:7505\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d.conf).\n\nFrom the documentation for the OpenVPN Management Interface:\n> Currently, the OpenVPN daemon can at most support a single management client any one time.\n\nIt is disabled to not break other tools which use `Management Interface`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:7505 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n```\n##### With user metrics\n\nCollect metrics of all users.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n - name: remote\n address: 203.0.113.0:7505\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `openvpn` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `openvpn` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep openvpn\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep openvpn /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep openvpn\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n", @@ -5728,7 +5879,7 @@ "most_popular": false }, "overview": "# OpenVPN status log\n\nPlugin: go.d.plugin\nModule: openvpn_status_log\n\n## Overview\n\nThis collector monitors OpenVPN server.\n\nIt parses server log files and provides summary and per user metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn_status_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn_status_log.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| log_path | Path to status log. | /var/log/openvpn/status.log | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n#### Examples\n\n##### With user metrics\n\nCollect metrics of all users.\n\n```yaml\njobs:\n - name: local\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn_status_log.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn_status_log.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| log_path | Path to status log. | /var/log/openvpn/status.log | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n#### Examples\n\n##### With user metrics\n\nCollect metrics of all users.\n\n```yaml\njobs:\n - name: local\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `openvpn_status_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn_status_log\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `openvpn_status_log` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep openvpn_status_log\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep openvpn_status_log /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep openvpn_status_log\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN status log instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n", @@ -5739,20 +5890,17 @@ }, { "meta": { - "id": "collector-go.d.plugin-pgbouncer", + "id": "collector-go.d.plugin-oracledb", "plugin_name": "go.d.plugin", - "module_name": "pgbouncer", + "module_name": "oracledb", "monitored_instance": { - "name": "PgBouncer", - "link": "https://www.pgbouncer.org/", - "icon_filename": "postgres.svg", + "name": "Oracle DB", + "link": "https://www.oracle.com/database/", "categories": [ "data-collection.database-servers" - ] + ], + "icon_filename": "oracle.svg" }, - "keywords": [ - "pgbouncer" - ], "related_resources": { "integrations": { "list": [] @@ -5761,26 +5909,67 @@ "info_provided_to_referring_integrations": { "description": "" }, + "keywords": [ + "database", + "oracle", + "sql" + ], "most_popular": false }, - "overview": "# PgBouncer\n\nPlugin: go.d.plugin\nModule: pgbouncer\n\n## Overview\n\nThis collector monitors PgBouncer servers.\n\nExecuted queries:\n\n- `SHOW VERSION;`\n- `SHOW CONFIG;`\n- `SHOW DATABASES;`\n- `SHOW STATS;`\n- `SHOW POOLS;`\n\nInformation about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with `stats_users` permissions to query your PgBouncer instance.\n\nTo create the `netdata` user:\n\n- Add `netdata` user to the `pgbouncer.ini` file:\n\n ```text\n stats_users = netdata\n ```\n\n- Add a password for the `netdata` user to the `userlist.txt` file:\n\n ```text\n \"netdata\" \"\"\n ```\n\n- To verify the credentials, run the following command\n\n ```bash\n psql -h localhost -U netdata -p 6432 pgbouncer -c \"SHOW VERSION;\" >/dev/null 2>&1 && echo OK || echo FAIL\n ```\n\n When it prompts for a password, enter the password you added to `userlist.txt`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pgbouncer.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pgbouncer.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:6432/pgbouncer | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n - name: remote\n dsn: 'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer'\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pgbouncer` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pgbouncer\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pgbouncer` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pgbouncer\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pgbouncer /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pgbouncer\n```\n\n", + "overview": "# Oracle DB\n\nPlugin: go.d.plugin\nModule: oracledb\n\n## Overview\n\nThis collector monitors the health and performance of Oracle DB servers and collects general statistics, replication and user metrics.\n\n\nIt establishes a connection to the Oracle DB instance via a TCP or UNIX socket and extracts metrics from the following database tables:\n\n- `v$sysmetric`\n- `v$sysstat`\n- `v$waitclassmetric`\n- `v$system_wait_class`\n- `dba_data_files`\n- `dba_free_space`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect Oracle DB instances running on:\n\n- Localhost, listening on port 1521\n- Within Docker containers\n\n> **Note**: Oracle DB requires a username and password. While Netdata can automatically discover Oracle DB instances and create data collection jobs, these jobs will fail unless you provide the correct credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read only user for netdata\n\nFollow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach\n\nConnect to your Oracle database with an administrative user and execute:\n\n```bash\nCREATE USER netdata IDENTIFIED BY ;\n\nGRANT CONNECT TO netdata;\nGRANT SELECT_CATALOG_ROLE TO netdata;\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/oracledb.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/oracledb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Oracle server DSN (Data Source Name). Format is `oracle://username:password@host:port/service?param1=value1&...¶mN=valueN`. | | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: oracle://netdata:secret@127.0.0.1:1521/XE\n\n```\n##### TLS connection (TCPS)\n\nAn example configuration for TLS connection.\n\n```yaml\njobs:\n - name: local\n dsn: 'oracle://netdata:secret@127.0.0.1:1521/XE?ssl=true&ssl verify=true'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: oracle://netdata:secret@127.0.0.1:1521/XE\n\n - name: remote\n dsn: oracle://netdata:secret@203.0.113.0:1521/XE\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `oracledb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m oracledb\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `oracledb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep oracledb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep oracledb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep oracledb\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PgBouncer instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.client_connections_utilization | used | percentage |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| postgres_database | Postgres database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.db_client_connections | active, waiting, cancel_req | connections |\n| pgbouncer.db_server_connections | active, idle, used, tested, login | connections |\n| pgbouncer.db_server_connections_utilization | used | percentage |\n| pgbouncer.db_clients_wait_time | time | seconds |\n| pgbouncer.db_client_max_wait_time | time | seconds |\n| pgbouncer.db_transactions | transactions | transactions/s |\n| pgbouncer.db_transactions_time | time | seconds |\n| pgbouncer.db_transaction_avg_time | time | seconds |\n| pgbouncer.db_queries | queries | queries/s |\n| pgbouncer.db_queries_time | time | seconds |\n| pgbouncer.db_query_avg_time | time | seconds |\n| pgbouncer.db_network_io | received, sent | B/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Oracle DB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.sessions | session | sessions |\n| oracledb.average_active_sessions | active | sessions |\n| oracledb.sessions_utilization | session_limit | percent |\n| oracledb.current_logons | logons | logons |\n| oracledb.logons | logons | logons/s |\n| oracledb.database_wait_time_ratio | db_wait_time | percent |\n| oracledb.sql_service_response_time | sql_resp_time | seconds |\n| oracledb.enqueue_timeouts | enqueue | timeouts/s |\n| oracledb.disk_io | read, written | bytes/s |\n| oracledb.disk_iops | read, write | operations/s |\n| oracledb.sorts | memory, disk | sorts/s |\n| oracledb.table_scans | short_table, long_table | scans/s |\n| oracledb.cache_hit_ratio | buffer, cursor, library, row | percent |\n| oracledb.global_cache_blocks | corrupted, lost | blocks/s |\n| oracledb.activity | parse, execute, user_commits, user_rollbacks | events/s |\n\n### Per tablespace\n\nThese metrics refer to the Tablespace.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| tablespace | Tablespace name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.tablespace_utilization | utilization | percent |\n| oracledb.tablespace_usage | avail, used | bytes |\n\n### Per wait class\n\nThese metrics refer to the [Wait Class](https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/classes-of-wait-events.html).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| wait_class | [Wait Class name](https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/classes-of-wait-events.html). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.wait_class_wait_time | wait_time | milliseconds |\n\n", "integration_type": "collector", - "id": "go.d.plugin-pgbouncer-PgBouncer", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/pgbouncer/metadata.yaml", + "id": "go.d.plugin-oracledb-Oracle_DB", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/oracledb/metadata.yaml", "related_resources": "" }, { "meta": { - "id": "collector-go.d.plugin-phpdaemon", + "id": "collector-go.d.plugin-pgbouncer", "plugin_name": "go.d.plugin", - "module_name": "phpdaemon", + "module_name": "pgbouncer", "monitored_instance": { - "name": "phpDaemon", - "link": "https://github.com/kakserpom/phpdaemon", + "name": "PgBouncer", + "link": "https://www.pgbouncer.org/", + "icon_filename": "postgres.svg", + "categories": [ + "data-collection.database-servers" + ] + }, + "keywords": [ + "pgbouncer" + ], + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "most_popular": false + }, + "overview": "# PgBouncer\n\nPlugin: go.d.plugin\nModule: pgbouncer\n\n## Overview\n\nThis collector monitors PgBouncer servers.\n\nExecuted queries:\n\n- `SHOW VERSION;`\n- `SHOW CONFIG;`\n- `SHOW DATABASES;`\n- `SHOW STATS;`\n- `SHOW POOLS;`\n\nInformation about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with `stats_users` permissions to query your PgBouncer instance.\n\nTo create the `netdata` user:\n\n- Add `netdata` user to the `pgbouncer.ini` file:\n\n ```text\n stats_users = netdata\n ```\n\n- Add a password for the `netdata` user to the `userlist.txt` file:\n\n ```text\n \"netdata\" \"\"\n ```\n\n- To verify the credentials, run the following command\n\n ```bash\n psql -h localhost -U netdata -p 6432 pgbouncer -c \"SHOW VERSION;\" >/dev/null 2>&1 && echo OK || echo FAIL\n ```\n\n When it prompts for a password, enter the password you added to `userlist.txt`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pgbouncer.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pgbouncer.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:6432/pgbouncer | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n - name: remote\n dsn: 'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer'\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pgbouncer` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pgbouncer\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pgbouncer` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pgbouncer\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pgbouncer /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pgbouncer\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PgBouncer instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.client_connections_utilization | used | percentage |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| postgres_database | Postgres database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.db_client_connections | active, waiting, cancel_req | connections |\n| pgbouncer.db_server_connections | active, idle, used, tested, login | connections |\n| pgbouncer.db_server_connections_utilization | used | percentage |\n| pgbouncer.db_clients_wait_time | time | seconds |\n| pgbouncer.db_client_max_wait_time | time | seconds |\n| pgbouncer.db_transactions | transactions | transactions/s |\n| pgbouncer.db_transactions_time | time | seconds |\n| pgbouncer.db_transaction_avg_time | time | seconds |\n| pgbouncer.db_queries | queries | queries/s |\n| pgbouncer.db_queries_time | time | seconds |\n| pgbouncer.db_query_avg_time | time | seconds |\n| pgbouncer.db_network_io | received, sent | B/s |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-pgbouncer-PgBouncer", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/pgbouncer/metadata.yaml", + "related_resources": "" + }, + { + "meta": { + "id": "collector-go.d.plugin-phpdaemon", + "plugin_name": "go.d.plugin", + "module_name": "phpdaemon", + "monitored_instance": { + "name": "phpDaemon", + "link": "https://github.com/kakserpom/phpdaemon", "icon_filename": "php.svg", "categories": [ "data-collection.apm" @@ -5801,7 +5990,7 @@ "most_popular": false }, "overview": "# phpDaemon\n\nPlugin: go.d.plugin\nModule: phpdaemon\n\n## Overview\n\nThis collector monitors phpDaemon instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable phpDaemon's HTTP server\n\nStatistics expected to be in JSON format.\n\n
\nphpDaemon configuration\n\nInstruction from [@METAJIJI](https://github.com/METAJIJI).\n\nTo enable `phpd` statistics on http, you must enable the http server and write an application.\nApplication is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`.\n\n```php\n// /opt/phpdaemon/conf/phpd.conf\n\npath /opt/phpdaemon/conf/AppResolver.php;\nPool:HTTPServer {\n privileged;\n listen '127.0.0.1';\n port 8509;\n}\n```\n\n```php\n// /opt/phpdaemon/conf/AppResolver.php\n\nattrs->server['DOCUMENT_URI'], $m)) {\n return $m[1];\n }\n }\n}\n\nreturn new MyAppResolver;\n```\n\n```php\n/opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php\n\nheader('Content-Type: application/javascript; charset=utf-8');\n\n $stat = Daemon::getStateOfWorkers();\n $stat['uptime'] = time() - Daemon::$startTime;\n echo json_encode($stat);\n }\n}\n```\n\n
\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpdaemon.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpdaemon.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8509/FullStatus | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n```\n##### HTTP authentication\n\nHTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nHTTPS with self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n - name: remote\n url: http://192.0.2.1:8509/FullStatus\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable phpDaemon's HTTP server\n\nStatistics expected to be in JSON format.\n\n
\nphpDaemon configuration\n\nInstruction from [@METAJIJI](https://github.com/METAJIJI).\n\nTo enable `phpd` statistics on http, you must enable the http server and write an application.\nApplication is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`.\n\n```php\n// /opt/phpdaemon/conf/phpd.conf\n\npath /opt/phpdaemon/conf/AppResolver.php;\nPool:HTTPServer {\n privileged;\n listen '127.0.0.1';\n port 8509;\n}\n```\n\n```php\n// /opt/phpdaemon/conf/AppResolver.php\n\nattrs->server['DOCUMENT_URI'], $m)) {\n return $m[1];\n }\n }\n}\n\nreturn new MyAppResolver;\n```\n\n```php\n/opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php\n\nheader('Content-Type: application/javascript; charset=utf-8');\n\n $stat = Daemon::getStateOfWorkers();\n $stat['uptime'] = time() - Daemon::$startTime;\n echo json_encode($stat);\n }\n}\n```\n\n
\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpdaemon.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpdaemon.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8509/FullStatus | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n```\n##### HTTP authentication\n\nHTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nHTTPS with self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n - name: remote\n url: http://192.0.2.1:8509/FullStatus\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `phpdaemon` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpdaemon\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `phpdaemon` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep phpdaemon\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep phpdaemon /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep phpdaemon\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per phpDaemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpdaemon.workers | alive, shutdown | workers |\n| phpdaemon.alive_workers | idle, busy, reloading | workers |\n| phpdaemon.idle_workers | preinit, init, initialized | workers |\n| phpdaemon.uptime | time | seconds |\n\n", @@ -5838,7 +6027,7 @@ "most_popular": false }, "overview": "# PHP-FPM\n\nPlugin: go.d.plugin\nModule: phpfpm\n\n## Overview\n\nThis collector monitors PHP-FPM instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status page\n\nUncomment the `pm.status_path = /status` variable in the `php-fpm` config file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpfpm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpfpm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status?full&json | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### HTTP\n\nCollecting data from a local instance over HTTP.\n\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n```\n##### Unix socket\n\nCollecting data from a local instance over Unix socket.\n\n```yaml\njobs:\n - name: local\n socket: '/tmp/php-fpm.sock'\n\n```\n##### TCP socket\n\nCollecting data from a local instance over TCP socket.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9000\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n - name: remote\n url: http://203.0.113.10/status?full&json\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status page\n\nUncomment the `pm.status_path = /status` variable in the `php-fpm` config file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpfpm.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpfpm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status?full&json | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### HTTP\n\nCollecting data from a local instance over HTTP.\n\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n```\n##### Unix socket\n\nCollecting data from a local instance over Unix socket.\n\n```yaml\njobs:\n - name: local\n socket: '/tmp/php-fpm.sock'\n\n```\n##### TCP socket\n\nCollecting data from a local instance over TCP socket.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9000\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n - name: remote\n url: http://203.0.113.10/status?full&json\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `phpfpm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpfpm\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `phpfpm` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep phpfpm\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep phpfpm /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep phpfpm\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PHP-FPM instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpfpm.connections | active, max_active, idle | connections |\n| phpfpm.requests | requests | requests/s |\n| phpfpm.performance | max_children_reached, slow_requests | status |\n| phpfpm.request_duration | min, max, avg | milliseconds |\n| phpfpm.request_cpu | min, max, avg | percentage |\n| phpfpm.request_mem | min, max, avg | KB |\n\n", @@ -5874,7 +6063,7 @@ "most_popular": false }, "overview": "# Pi-hole\n\nPlugin: go.d.plugin\nModule: pihole\n\n## Overview\n\nThis collector monitors Pi-hole instances using [PHP API](https://github.com/pi-hole/AdminLTE).\n\nThe data provided by the API is for the last 24 hours. All collected values refer to this time period and not to the\nmodule's collection interval.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pihole.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pihole.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| setup_vars_path | Path to setupVars.conf. This file is used to get the web password. | /etc/pihole/setupVars.conf | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n##### HTTPS with self-signed certificate\n\nRemote instance with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://203.0.113.11\n tls_skip_verify: yes\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://203.0.113.10\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pihole.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pihole.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| setup_vars_path | Path to setupVars.conf. This file is used to get the web password. | /etc/pihole/setupVars.conf | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n##### HTTPS with self-signed certificate\n\nRemote instance with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://203.0.113.11\n tls_skip_verify: yes\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://203.0.113.10\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pihole` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pihole\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pihole` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pihole\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pihole /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pihole\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ pihole_blocklist_last_update ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.blocklist_last_update | gravity.list (blocklist) file last update time |\n| [ pihole_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.unwanted_domains_blocking_status | unwanted domains blocking is disabled |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pi-hole instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pihole.dns_queries_total | queries | queries |\n| pihole.dns_queries | cached, blocked, forwarded | queries |\n| pihole.dns_queries_percentage | cached, blocked, forwarded | percentage |\n| pihole.unique_clients | unique | clients |\n| pihole.domains_on_blocklist | blocklist | domains |\n| pihole.blocklist_last_update | ago | seconds |\n| pihole.unwanted_domains_blocking_status | enabled, disabled | status |\n| pihole.dns_queries_types | a, aaaa, any, ptr, soa, srv, txt | percentage |\n| pihole.dns_queries_forwarded_destination | cached, blocked, other | percentage |\n\n", @@ -5911,7 +6100,7 @@ "most_popular": false }, "overview": "# Pika\n\nPlugin: go.d.plugin\nModule: pika\n\n## Overview\n\nThis collector monitors Pika servers.\n\nIt collects information and statistics about the server executing the following commands:\n\n- [`INFO ALL`](https://github.com/OpenAtomFoundation/pika/wiki/pika-info%E4%BF%A1%E6%81%AF%E8%AF%B4%E6%98%8E)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pika.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pika.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Pika server address. | redis://@localhost:9221 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://@localhost:9221'\n\n```\n##### TCP socket with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:9221'\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pika.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pika.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Pika server address. | redis://@localhost:9221 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://@localhost:9221'\n\n```\n##### TCP socket with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:9221'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pika` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pika\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pika` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pika\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pika /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pika\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pika instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pika.connections | accepted | connections |\n| pika.clients | connected | clients |\n| pika.memory | used | bytes |\n| pika.connected_replicas | connected | replicas |\n| pika.commands | processed | commands/s |\n| pika.commands_calls | a dimension per command | calls/s |\n| pika.database_strings_keys | a dimension per database | keys |\n| pika.database_strings_expires_keys | a dimension per database | keys |\n| pika.database_strings_invalid_keys | a dimension per database | keys |\n| pika.database_hashes_keys | a dimension per database | keys |\n| pika.database_hashes_expires_keys | a dimension per database | keys |\n| pika.database_hashes_invalid_keys | a dimension per database | keys |\n| pika.database_lists_keys | a dimension per database | keys |\n| pika.database_lists_expires_keys | a dimension per database | keys |\n| pika.database_lists_invalid_keys | a dimension per database | keys |\n| pika.database_zsets_keys | a dimension per database | keys |\n| pika.database_zsets_expires_keys | a dimension per database | keys |\n| pika.database_zsets_invalid_keys | a dimension per database | keys |\n| pika.database_sets_keys | a dimension per database | keys |\n| pika.database_sets_expires_keys | a dimension per database | keys |\n| pika.database_sets_invalid_keys | a dimension per database | keys |\n| pika.uptime | uptime | seconds |\n\n", @@ -5946,8 +6135,8 @@ }, "most_popular": false }, - "overview": "# Ping\n\nPlugin: go.d.plugin\nModule: ping\n\n## Overview\n\nThis module measures round-trip time and packet loss by sending ping messages to network hosts.\n\nThere are two operational modes:\n\n- privileged (send raw ICMP ping, default). Requires\n CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges:\n > **Note**: set automatically during Netdata installation.\n\n ```bash\n sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin\n ```\n\n- unprivileged (send UDP ping, Linux only).\n Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html):\n\n ```bash\n sudo sysctl -w net.ipv4.ping_group_range=\"0 2147483647\"\n ```\n To persist the change add `net.ipv4.ping_group_range=0 2147483647` to `/etc/sysctl.conf` and\n execute `sudo sysctl -p`.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ping.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hosts | Network hosts. | | yes |\n| network | Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6). | ip | no |\n| privileged | Ping packets type. \"no\" means send an \"unprivileged\" UDP ping, \"yes\" - raw ICMP ping. | yes | no |\n| packets | Number of ping packets to send. | 5 | no |\n| interval | Timeout between sending ping packets. | 100ms | no |\n\n#### Examples\n\n##### IPv4 hosts\n\nAn example configuration.\n\n```yaml\njobs:\n - name: example\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n##### Unprivileged mode\n\nAn example configuration.\n\n```yaml\njobs:\n - name: example\n privileged: no\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n```yaml\njobs:\n - name: example1\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n - name: example2\n packets: 10\n hosts:\n - 192.0.2.3\n - 192.0.2.4\n\n```\n", + "overview": "# Ping\n\nPlugin: go.d.plugin\nModule: ping\n\n## Overview\n\nThis module measures round-trip time and packet loss by sending ping messages to network hosts.\n\nThere are two operational modes:\n\n- **Privileged** (send raw ICMP ping, default). Requires the necessary permissions ([CAP_NET_RAW](https://man7.org/linux/man-pages/man7/capabilities.7.html) on Linux, `setuid` bit on other systems).\n\n These permissions are **automatically** set during Netdata installation. However, if you need to set them manually:\n - set `CAP_NET_RAW` (Linux only).\n ```bash\n sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin\n ```\n - set `setuid` bit (Other OS).\n ```bash\n sudo chmod 4750 /usr/libexec/netdata/plugins.d/go.d.plugin\n ```\n\n- **Unprivileged** (send UDP ping, Linux only). Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html):\n\n This configuration is **not set automatically** and requires manual configuration.\n\n ```bash\n sudo sysctl -w net.ipv4.ping_group_range=\"0 2147483647\"\n ```\n\n To persist the change add `net.ipv4.ping_group_range=0 2147483647` to `/etc/sysctl.conf` and execute `sudo sysctl -p`.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ping.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ping.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hosts | Network hosts. | | yes |\n| network | Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6). | ip | no |\n| privileged | Ping packets type. \"no\" means send an \"unprivileged\" UDP ping, \"yes\" - raw ICMP ping. | yes | no |\n| packets | Number of ping packets to send. | 5 | no |\n| interval | Timeout between sending ping packets. | 100ms | no |\n\n#### Examples\n\n##### IPv4 hosts\n\nAn example configuration.\n\n```yaml\njobs:\n - name: example\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n##### Unprivileged mode\n\nAn example configuration.\n\n```yaml\njobs:\n - name: example\n privileged: no\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n```yaml\njobs:\n - name: example1\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n - name: example2\n packets: 10\n hosts:\n - 192.0.2.3\n - 192.0.2.4\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ping` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ping\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ping` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ping\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ping /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ping\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ping_host_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | network host ${lab1el:host} reachability status |\n| [ ping_packet_loss ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | packet loss percentage to the network host ${label:host} over the last 10 minutes |\n| [ ping_host_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_rtt | average latency to the network host ${label:host} over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per host\n\nThese metrics refer to the remote host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | remote host |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ping.host_rtt | min, max, avg | milliseconds |\n| ping.host_std_dev_rtt | std_dev | milliseconds |\n| ping.host_packet_loss | loss | percentage |\n| ping.host_packets | received, sent | packets |\n\n", @@ -5962,7 +6151,7 @@ "plugin_name": "go.d.plugin", "module_name": "portcheck", "monitored_instance": { - "name": "TCP Endpoints", + "name": "TCP/UDP Endpoints", "link": "", "icon_filename": "globe.svg", "categories": [ @@ -5980,13 +6169,13 @@ }, "most_popular": false }, - "overview": "# TCP Endpoints\n\nPlugin: go.d.plugin\nModule: portcheck\n\n## Overview\n\nThis collector monitors TCP services availability and response time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/portcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/portcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes |\n| ports | Remote host ports. Must be specified in numeric format. | | yes |\n| timeout | HTTP request timeout. | 2 | no |\n\n#### Examples\n\n##### Check SSH and telnet\n\nAn example configuration.\n\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n```\n##### Check webserver with IPv6 address\n\nAn example configuration.\n\n```yaml\njobs:\n - name: server2\n host: \"[2001:DB8::1]\"\n ports:\n - 80\n - 8080\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n - name: server2\n host: 203.0.113.10\n ports:\n - 22\n - 23\n\n```\n", + "overview": "# TCP/UDP Endpoints\n\nPlugin: go.d.plugin\nModule: portcheck\n\n## Overview\n\nCollector for monitoring service availability and response time. It can be used to check if specific ports are open or reachable on a target system.\n\nIt supports both TCP and UDP protocols over IPv4 and IPv6 networks.\n\n| Protocol | Check Description |\n|----------|-----------------------------------------------------------------------------------------------------------------------------|\n| TCP | Attempts to establish a TCP connection to the specified ports on the target system. |\n| UDP | Sends a 0-byte UDP packet to the specified ports on the target system and analyzes ICMP responses to determine port status. |\n\nPossible TCP statuses:\n\n| TCP Status | Description |\n|------------|-------------------------------------------------------------|\n| success | Connection established successfully. |\n| timeout | Connection timed out after waiting for configured duration. |\n| failed | An error occurred during the connection attempt. |\n\nPossible UDP statuses:\n\n| UDP Status | Description |\n|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| open/filtered | No response received within the configured timeout. This status indicates the port is either open or filtered, but the exact state cannot be determined definitively. |\n| closed | Received an ICMP Destination Unreachable message, indicating the port is closed. |\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/portcheck.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/portcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes |\n| ports | Target TCP ports. Must be specified in numeric format. | | no |\n| udp_ports | Target UDP ports. Must be specified in numeric format. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n\n#### Examples\n\n##### Check TCP ports (IPv4)\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n```\n##### Check TCP ports (IPv6)\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n host: \"[2001:DB8::1]\"\n ports:\n - 80\n - 8080\n\n```\n##### Check UDP ports (IPv4)\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n host: 127.0.0.1\n udp_ports:\n - 3120\n - 3121\n\n```\n##### Check UDP ports (IPv6)\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n host: [::1]\n udp_ports:\n - 3120\n - 3121\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n - name: server2\n host: 203.0.113.10\n ports:\n - 22\n - 23\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `portcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m portcheck\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `portcheck` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep portcheck\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep portcheck /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep portcheck\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ portcheck_service_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | TCP host ${label:host} port ${label:port} liveness status |\n| [ portcheck_connection_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n| [ portcheck_connection_fails ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per tcp endpoint\n\nThese metrics refer to the TCP endpoint.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | host |\n| port | port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.status | success, failed, timeout | boolean |\n| portcheck.state_duration | time | seconds |\n| portcheck.latency | time | ms |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per TCP endpoint\n\nThese metrics refer to the TCP endpoint.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | The hostname or IP address of the target system, as specified in the configuration. |\n| port | The TCP port being monitored, as defined in the 'ports' configuration parameter. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.status | success, failed, timeout | boolean |\n| portcheck.state_duration | time | seconds |\n| portcheck.latency | time | ms |\n\n### Per UDP endpoint\n\nThese metrics refer to the UDP endpoint.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | The hostname or IP address of the target system, as specified in the configuration. |\n| port | The UDP port being monitored, as defined in the 'udp_ports' configuration parameter. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.udp_port_status | open/filtered, closed | status |\n| portcheck.udp_port_status_duration | time | seconds |\n\n", "integration_type": "collector", - "id": "go.d.plugin-portcheck-TCP_Endpoints", + "id": "go.d.plugin-portcheck-TCP/UDP_Endpoints", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/portcheck/metadata.yaml", "related_resources": "" }, @@ -6018,7 +6207,7 @@ "most_popular": false }, "overview": "# Postfix\n\nPlugin: go.d.plugin\nModule: postfix\n\n## Overview\n\nThis collector retrieves statistics about the Postfix mail queue using the [postqueue](https://www.postfix.org/postqueue.1.html) command-line tool.\n\n\nIt periodically executes the `postqueue -p` command. The collection interval is set to 10 seconds by default, but this can be configurable.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nPostfix has internal access controls for the mail queue. By default, all users can view the queue. If your system has stricter controls, grant the `netdata` user access by adding it to `authorized_mailq_users` in the `/etc/postfix/main.cf `file. For more details, refer to the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html).\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector executes `postqueue -p` to get Postfix queue statistics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postfix.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postfix.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `postqueue` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/sbin/postqueue | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: custom_path\n binary_path: /usr/local/sbin/postqueue\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postfix.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postfix.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `postqueue` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/sbin/postqueue | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: custom_path\n binary_path: /usr/local/sbin/postqueue\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `postfix` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m postfix\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `postfix` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep postfix\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep postfix /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep postfix\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Postfix instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postfix.qemails | emails | emails |\n| postfix.qsize | size | KiB |\n\n", @@ -6068,7 +6257,7 @@ "most_popular": true }, "overview": "# PostgreSQL\n\nPlugin: go.d.plugin\nModule: postgres\n\n## Overview\n\nThis collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more.\n\n\nIt establishes a connection to the Postgres instance via a TCP or UNIX socket.\nTo collect metrics for database tables and indexes, it establishes an additional connection for each discovered database.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets:\n\n- 127.0.0.1:5432\n- /var/run/postgresql/\n\n\n#### Limits\n\nTable and index metrics are not collected for databases with more than 50 tables or 250 indexes.\nThese limits can be changed in the configuration file.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with granted `pg_monitor`\nor `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).\n\nTo create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:\n\n```postgresql\nCREATE USER netdata;\nGRANT pg_monitor TO netdata;\n```\n\nAfter creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or\nthe [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your\nsystem.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postgres.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postgres.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#simple-patterns-matcher). | | no |\n| max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no |\n| max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n - name: remote\n dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with granted `pg_monitor`\nor `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).\n\nTo create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:\n\n```postgresql\nCREATE USER netdata;\nGRANT pg_monitor TO netdata;\n```\n\nAfter creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or\nthe [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your\nsystem.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postgres.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postgres.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#simple-patterns-matcher). | | no |\n| max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no |\n| max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n - name: remote\n dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `postgres` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m postgres\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `postgres` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep postgres\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep postgres /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep postgres\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ postgres_total_connection_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.connections_utilization | average total connection utilization over the last minute |\n| [ postgres_acquired_locks_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.locks_utilization | average acquired locks utilization over the last minute |\n| [ postgres_txid_exhaustion_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.txid_exhaustion_perc | percent towards TXID wraparound |\n| [ postgres_db_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average cache hit ratio in db ${label:database} over the last minute |\n| [ postgres_db_transactions_rollback_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average aborted transactions percentage in db ${label:database} over the last five minutes |\n| [ postgres_db_deadlocks_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_deadlocks_rate | number of deadlocks detected in db ${label:database} in the last minute |\n| [ postgres_table_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_cache_io_ratio | average cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_index_cache_io_ratio | average index cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_cache_io_ratio | average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_index_cache_io_ratio | average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} |\n| [ postgres_table_last_autovacuum_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autovacuum_since_time | time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon |\n| [ postgres_table_last_autoanalyze_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autoanalyze_since_time | time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon |\n| [ postgres_index_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.index_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} index ${label:index} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PostgreSQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.connections_utilization | used | percentage |\n| postgres.connections_usage | available, used | connections |\n| postgres.connections_state_count | active, idle, idle_in_transaction, idle_in_transaction_aborted, disabled | connections |\n| postgres.transactions_duration | a dimension per bucket | transactions/s |\n| postgres.queries_duration | a dimension per bucket | queries/s |\n| postgres.locks_utilization | used | percentage |\n| postgres.checkpoints_rate | scheduled, requested | checkpoints/s |\n| postgres.checkpoints_time | write, sync | milliseconds |\n| postgres.bgwriter_halts_rate | maxwritten | events/s |\n| postgres.buffers_io_rate | checkpoint, backend, bgwriter | B/s |\n| postgres.buffers_backend_fsync_rate | fsync | calls/s |\n| postgres.buffers_allocated_rate | allocated | B/s |\n| postgres.wal_io_rate | write | B/s |\n| postgres.wal_files_count | written, recycled | files |\n| postgres.wal_archiving_files_count | ready, done | files/s |\n| postgres.autovacuum_workers_count | analyze, vacuum_analyze, vacuum, vacuum_freeze, brin_summarize | workers |\n| postgres.txid_exhaustion_towards_autovacuum_perc | emergency_autovacuum | percentage |\n| postgres.txid_exhaustion_perc | txid_exhaustion | percentage |\n| postgres.txid_exhaustion_oldest_txid_num | xid | xid |\n| postgres.catalog_relations_count | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | relations |\n| postgres.catalog_relations_size | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | B |\n| postgres.uptime | uptime | seconds |\n| postgres.databases_count | databases | databases |\n\n### Per repl application\n\nThese metrics refer to the replication application.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| application | application name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_app_wal_lag_size | sent_lag, write_lag, flush_lag, replay_lag | B |\n| postgres.replication_app_wal_lag_time | write_lag, flush_lag, replay_lag | seconds |\n\n### Per repl slot\n\nThese metrics refer to the replication slot.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | replication slot name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_slot_files_count | wal_keep, pg_replslot_files | files |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.db_transactions_ratio | committed, rollback | percentage |\n| postgres.db_transactions_rate | committed, rollback | transactions/s |\n| postgres.db_connections_utilization | used | percentage |\n| postgres.db_connections_count | connections | connections |\n| postgres.db_cache_io_ratio | miss | percentage |\n| postgres.db_io_rate | memory, disk | B/s |\n| postgres.db_ops_fetched_rows_ratio | fetched | percentage |\n| postgres.db_ops_read_rows_rate | returned, fetched | rows/s |\n| postgres.db_ops_write_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.db_conflicts_rate | conflicts | queries/s |\n| postgres.db_conflicts_reason_rate | tablespace, lock, snapshot, bufferpin, deadlock | queries/s |\n| postgres.db_deadlocks_rate | deadlocks | deadlocks/s |\n| postgres.db_locks_held_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_locks_awaited_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_temp_files_created_rate | created | files/s |\n| postgres.db_temp_files_io_rate | written | B/s |\n| postgres.db_size | size | B |\n\n### Per table\n\nThese metrics refer to the database table.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.table_rows_dead_ratio | dead | percentage |\n| postgres.table_rows_count | live, dead | rows |\n| postgres.table_ops_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.table_ops_rows_hot_ratio | hot | percentage |\n| postgres.table_ops_rows_hot_rate | hot | rows/s |\n| postgres.table_cache_io_ratio | miss | percentage |\n| postgres.table_io_rate | memory, disk | B/s |\n| postgres.table_index_cache_io_ratio | miss | percentage |\n| postgres.table_index_io_rate | memory, disk | B/s |\n| postgres.table_toast_cache_io_ratio | miss | percentage |\n| postgres.table_toast_io_rate | memory, disk | B/s |\n| postgres.table_toast_index_cache_io_ratio | miss | percentage |\n| postgres.table_toast_index_io_rate | memory, disk | B/s |\n| postgres.table_scans_rate | index, sequential | scans/s |\n| postgres.table_scans_rows_rate | index, sequential | rows/s |\n| postgres.table_autovacuum_since_time | time | seconds |\n| postgres.table_vacuum_since_time | time | seconds |\n| postgres.table_autoanalyze_since_time | time | seconds |\n| postgres.table_analyze_since_time | time | seconds |\n| postgres.table_null_columns | null | columns |\n| postgres.table_size | size | B |\n| postgres.table_bloat_size_perc | bloat | percentage |\n| postgres.table_bloat_size | bloat | B |\n\n### Per index\n\nThese metrics refer to the table index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n| index | index name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.index_size | size | B |\n| postgres.index_bloat_size_perc | bloat | percentage |\n| postgres.index_bloat_size | bloat | B |\n| postgres.index_usage_status | used, unused | status |\n\n", @@ -6105,7 +6294,7 @@ "most_popular": false }, "overview": "# PowerDNS Authoritative Server\n\nPlugin: go.d.plugin\nModule: powerdns\n\n## Overview\n\nThis collector monitors PowerDNS Authoritative Server instances.\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/authoritative/http-api/statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `powerdns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `powerdns` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep powerdns\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep powerdns /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep powerdns\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Authoritative Server instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns.questions_in | udp, tcp | questions/s |\n| powerdns.questions_out | udp, tcp | questions/s |\n| powerdns.cache_usage | query-cache-hit, query-cache-miss, packetcache-hit, packetcache-miss | events/s |\n| powerdns.cache_size | query-cache, packet-cache, key-cache, meta-cache | entries |\n| powerdns.latency | latency | microseconds |\n\n", @@ -6142,7 +6331,7 @@ "most_popular": false }, "overview": "# PowerDNS Recursor\n\nPlugin: go.d.plugin\nModule: powerdns_recursor\n\n## Overview\n\nThis collector monitors PowerDNS Recursor instances.\n\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/recursor/http-api/index.html#built-in-webserver-and-http-api).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/recursor/common/api/endpoint-statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns_recursor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns_recursor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns_recursor.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns_recursor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `powerdns_recursor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns_recursor\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `powerdns_recursor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep powerdns_recursor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep powerdns_recursor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep powerdns_recursor\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Recursor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns_recursor.questions_in | total, tcp, ipv6 | questions/s |\n| powerdns_recursor.questions_out | udp, tcp, ipv6, throttled | questions/s |\n| powerdns_recursor.answer_time | 0-1ms, 1-10ms, 10-100ms, 100-1000ms, slow | queries/s |\n| powerdns_recursor.timeouts | total, ipv4, ipv6 | timeouts/s |\n| powerdns_recursor.drops | over-capacity-drops, query-pipe-full-drops, too-old-drops, truncated-drops, empty-queries | drops/s |\n| powerdns_recursor.cache_usage | cache-hits, cache-misses, packet-cache-hits, packet-cache-misses | events/s |\n| powerdns_recursor.cache_size | cache, packet-cache, negative-cache | entries |\n\n", @@ -6177,7 +6366,7 @@ "community": true }, "overview": "# 4D Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor 4D Server performance metrics for efficient application management and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6212,7 +6401,7 @@ "community": true }, "overview": "# 8430FT modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of vital metrics from the MTS 8430FT modem for streamlined network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6251,7 +6440,7 @@ "community": true }, "overview": "# A10 ACOS network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor A10 Networks device metrics for comprehensive management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6286,7 +6475,7 @@ "community": true }, "overview": "# AMD CPU & GPU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AMD System Management Interface performance for optimized hardware management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6321,7 +6510,7 @@ "community": true }, "overview": "# APIcast\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor APIcast performance metrics to optimize API gateway operations and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [APIcast](https://github.com/3scale/apicast).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6356,7 +6545,7 @@ "community": true }, "overview": "# ARM HWCPipe\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of ARM running Android devices and get metrics for efficient performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6395,7 +6584,7 @@ "community": true }, "overview": "# AWS EC2 Compute instances\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS EC2 instances key metrics for optimized performance and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6434,7 +6623,7 @@ "community": true }, "overview": "# AWS EC2 Spot Instance\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS EC2 Spot instances'' performance metrics for efficient resource allocation and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6473,7 +6662,7 @@ "community": true }, "overview": "# AWS ECS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on AWS ECS services and resources for optimized container management and orchestration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS ECS exporter](https://github.com/bevers222/ecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6512,7 +6701,7 @@ "community": true }, "overview": "# AWS Health events\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS service health metrics for proactive incident management and resolution.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6551,7 +6740,7 @@ "community": true }, "overview": "# AWS Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS service quotas for effective resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6590,7 +6779,7 @@ "community": true }, "overview": "# AWS RDS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Amazon RDS (Relational Database Service) metrics for efficient cloud database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [rds_exporter](https://github.com/percona/rds_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6629,7 +6818,7 @@ "community": true }, "overview": "# AWS S3 buckets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS S3 storage metrics for optimized performance, data management, and cost efficiency.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6668,7 +6857,7 @@ "community": true }, "overview": "# AWS SQS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS SQS messaging metrics for efficient message processing and queue management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6707,7 +6896,7 @@ "community": true }, "overview": "# AWS instance health\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor the health of AWS instances for improved performance and availability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6742,7 +6931,7 @@ "community": true }, "overview": "# Airthings Waveplus air sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Waveplus radon sensor metrics for efficient indoor air quality monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6777,7 +6966,7 @@ "community": true }, "overview": "# Akamai Edge DNS Traffic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze Akamai Edge DNS traffic for enhanced performance and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6812,7 +7001,7 @@ "community": true }, "overview": "# Akamai Global Traffic Management\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor vital metrics of Akamai Global Traffic Management (GTM) for optimized load balancing and failover.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6851,7 +7040,7 @@ "community": true }, "overview": "# Akami Cloudmonitor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Akamai cloudmonitor provider metrics for comprehensive cloud performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6886,7 +7075,7 @@ "community": true }, "overview": "# Alamos FE2 server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Alamos FE2 systems for improved performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6925,7 +7114,7 @@ "community": true }, "overview": "# Alibaba Cloud\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Alibaba Cloud services and resources for efficient management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6960,7 +7149,7 @@ "community": true }, "overview": "# Altaro Backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Altaro Backup performance metrics to ensure smooth data protection and recovery operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -6995,7 +7184,7 @@ "community": true }, "overview": "# Andrews & Arnold line status\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Andrews & Arnold Ltd (AAISP) metrics for improved network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7030,7 +7219,7 @@ "community": true }, "overview": "# Apache Airflow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Airflow metrics to optimize task scheduling and workflow management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Airflow exporter](https://github.com/shalb/airflow-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7069,7 +7258,7 @@ "community": true }, "overview": "# Apache Flink\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Apache Flink metrics for efficient stream processing and application management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7104,7 +7293,7 @@ "community": true }, "overview": "# Apple Time Machine\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Apple Time Machine backup metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7143,7 +7332,7 @@ "community": true }, "overview": "# Aruba devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Aruba Networks devices performance metrics for comprehensive network management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Aruba Exporter](https://github.com/slashdoom/aruba_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7182,7 +7371,7 @@ "community": true }, "overview": "# ArvanCloud CDN\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze ArvanCloud CDN and cloud services performance metrics for optimized delivery and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7217,7 +7406,7 @@ "community": true }, "overview": "# Audisto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Audisto SEO and website metrics for improved search performance and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7252,7 +7441,7 @@ "community": true }, "overview": "# AuthLog\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor authentication logs for security insights and efficient access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AuthLog Exporter](https://github.com/woblerr/authlog_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7291,7 +7480,7 @@ "community": true }, "overview": "# Azure AD App passwords\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nSafeguard and track Azure App secrets for enhanced security and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7330,7 +7519,7 @@ "community": true }, "overview": "# Azure Elastic Pool SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Elastic SQL performance metrics for efficient database management and query optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7369,7 +7558,7 @@ "community": true }, "overview": "# Azure Resources\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Azure resources vital metrics for efficient cloud management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7408,7 +7597,7 @@ "community": true }, "overview": "# Azure SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure SQL performance metrics for efficient database management and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7447,7 +7636,7 @@ "community": true }, "overview": "# Azure Service Bus\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Service Bus messaging metrics for optimized communication and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7486,7 +7675,7 @@ "community": true }, "overview": "# Azure application\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure Monitor metrics for comprehensive resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7521,7 +7710,7 @@ "community": true }, "overview": "# BOSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on BOSH deployment metrics for improved cloud orchestration and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7556,7 +7745,7 @@ "community": true }, "overview": "# BigQuery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google BigQuery metrics for optimized data processing and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7591,7 +7780,7 @@ "community": true }, "overview": "# Bird Routing Daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Bird Routing Daemon metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7628,7 +7817,7 @@ "community": true }, "overview": "# Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack external service availability and response times with Blackbox monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Blackbox exporter](https://github.com/prometheus/blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7663,7 +7852,7 @@ "community": true }, "overview": "# Bobcat Miner 300\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Bobcat equipment metrics for optimized performance and maintenance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7698,7 +7887,7 @@ "community": true }, "overview": "# Borg backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Borg backup performance metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Borg backup exporter](https://github.com/k0ral/borg-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7733,7 +7922,7 @@ "community": true }, "overview": "# BungeeCord\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack BungeeCord proxy server metrics for efficient load balancing and performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7768,7 +7957,7 @@ "community": true }, "overview": "# CVMFS clients\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CernVM File System metrics for optimized distributed file system performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7803,7 +7992,7 @@ "community": true }, "overview": "# Celery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Celery task queue metrics for optimized task processing and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7838,7 +8027,7 @@ "community": true }, "overview": "# Certificate Transparency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack certificate transparency log metrics for enhanced\nSSL/TLS certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ct-exporter](https://github.com/Hsn723/ct-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7873,7 +8062,7 @@ "community": true }, "overview": "# Checkpoint device\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Check Point firewall and security metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7908,7 +8097,7 @@ "community": true }, "overview": "# Chia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Chia blockchain metrics for optimized farming and resource allocation.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Chia Exporter](https://github.com/chia-network/chia-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7943,7 +8132,7 @@ "community": true }, "overview": "# Christ Elektronik CLM5IP power panel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Christ Elektronik CLM5IP device metrics for efficient performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -7978,7 +8167,7 @@ "community": true }, "overview": "# Cilium Agent\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Cilium Agent metrics for optimized network security and connectivity.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Agent](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8013,7 +8202,7 @@ "community": true }, "overview": "# Cilium Operator\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cilium Operator metrics for efficient Kubernetes network security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Operator](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8048,7 +8237,7 @@ "community": true }, "overview": "# Cilium Proxy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cilium Proxy metrics for enhanced network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Proxy](https://github.com/cilium/proxy).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8087,7 +8276,7 @@ "community": true }, "overview": "# Cisco ACI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cisco ACI infrastructure metrics for optimized network performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8126,7 +8315,7 @@ "community": true }, "overview": "# Citrix NetScaler\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetScaler performance metrics for efficient application delivery and load balancing.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8161,7 +8350,7 @@ "community": true }, "overview": "# ClamAV daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack ClamAV antivirus metrics for enhanced threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8196,7 +8385,7 @@ "community": true }, "overview": "# Clamscan results\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ClamAV scanning performance metrics for efficient malware detection and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8231,7 +8420,7 @@ "community": true }, "overview": "# Clash\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Clash proxy server metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Clash exporter](https://github.com/elonzh/clash_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8270,7 +8459,7 @@ "community": true }, "overview": "# CloudWatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS CloudWatch metrics for comprehensive AWS resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8309,7 +8498,7 @@ "community": true }, "overview": "# Cloud Foundry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cloud Foundry platform metrics for optimized application deployment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8348,7 +8537,7 @@ "community": true }, "overview": "# Cloud Foundry Firehose\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cloud Foundry Firehose metrics for comprehensive platform diagnostics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8387,7 +8576,7 @@ "community": true }, "overview": "# Cloudflare PCAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cloudflare CDN and security metrics for optimized content delivery and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8422,7 +8611,7 @@ "community": true }, "overview": "# ClusterControl CMON\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CMON metrics for Severalnines Cluster Control for efficient monitoring and management of database operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CMON Exporter](https://github.com/severalnines/cmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8457,7 +8646,7 @@ "community": true }, "overview": "# Collectd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system and application metrics with Collectd for comprehensive performance analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Collectd exporter](https://github.com/prometheus/collectd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8492,7 +8681,7 @@ "community": true }, "overview": "# Concourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Concourse CI/CD pipeline metrics for optimized workflow management and deployment.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Concourse built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8527,7 +8716,7 @@ "community": true }, "overview": "# CraftBeerPi\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on CraftBeerPi homebrewing metrics for optimized brewing process management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8562,7 +8751,7 @@ "community": true }, "overview": "# Crowdsec\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Crowdsec security metrics for efficient threat detection and response.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Crowdsec build-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8597,7 +8786,7 @@ "community": true }, "overview": "# Crypto exchanges\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack cryptocurrency market metrics for informed investment and trading decisions.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Crypto exporter](https://github.com/ix-ai/crypto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8632,7 +8821,7 @@ "community": true }, "overview": "# Cryptowatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cryptowatch market data metrics for comprehensive cryptocurrency market analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8667,7 +8856,7 @@ "community": true }, "overview": "# Custom Exporter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nCreate and monitor custom metrics tailored to your specific use case and requirements.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8702,7 +8891,7 @@ "community": true }, "overview": "# DDWRT Routers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on DD-WRT router metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8741,7 +8930,7 @@ "community": true }, "overview": "# DMARC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DMARC email authentication metrics for improved email security and deliverability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8776,7 +8965,7 @@ "community": true }, "overview": "# DNSBL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor DNSBL metrics for efficient domain reputation and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8811,7 +9000,7 @@ "community": true }, "overview": "# Dell EMC ECS cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC ECS object storage metrics for optimized storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8846,7 +9035,7 @@ "community": true }, "overview": "# Dell EMC Isilon cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Dell EMC Isilon scale-out NAS metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8881,7 +9070,7 @@ "community": true }, "overview": "# Dell EMC XtremIO cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Dell/EMC XtremIO storage metrics for optimized data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8916,7 +9105,7 @@ "community": true }, "overview": "# Dell PowerMax\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC PowerMax storage array metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8951,7 +9140,7 @@ "community": true }, "overview": "# Dependency-Track\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dependency-Track metrics for efficient vulnerability management and software supply chain analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -8986,7 +9175,7 @@ "community": true }, "overview": "# DigitalOcean\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DigitalOcean cloud provider metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9021,7 +9210,7 @@ "community": true }, "overview": "# Discourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Discourse forum metrics for efficient community management and engagement.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Discourse Exporter](https://github.com/discourse/discourse-prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9056,7 +9245,7 @@ "community": true }, "overview": "# Dutch Electricity Smart Meter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Dutch smart meter P1 port metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9091,7 +9280,7 @@ "community": true }, "overview": "# Dynatrace\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dynatrace APM metrics for comprehensive application performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9126,7 +9315,7 @@ "community": true }, "overview": "# EOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor CERN EOS metrics for efficient storage management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [EOS exporter](https://github.com/cern-eos/eos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9161,7 +9350,7 @@ "community": true }, "overview": "# Eaton UPS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Eaton uninterruptible power supply (UPS) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9196,7 +9385,7 @@ "community": true }, "overview": "# Elgato Key Light devices.\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Elgato Key Light metrics for optimized lighting control and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9231,7 +9420,7 @@ "community": true }, "overview": "# Energomera smart power meters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Energomera electricity meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9266,7 +9455,7 @@ "community": true }, "overview": "# Excel spreadsheet\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport Prometheus metrics to Excel for versatile data analysis and reporting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9301,7 +9490,7 @@ "community": true }, "overview": "# FRRouting\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Free Range Routing (FRR) metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FRRouting Exporter](https://github.com/tynany/frr_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9336,7 +9525,7 @@ "community": true }, "overview": "# Fastd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Fastd VPN metrics for efficient virtual private network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9371,7 +9560,7 @@ "community": true }, "overview": "# Fortigate firewall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Fortigate firewall metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9406,7 +9595,7 @@ "community": true }, "overview": "# FreeBSD NFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor FreeBSD Network File System metrics for efficient file sharing management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9441,7 +9630,7 @@ "community": true }, "overview": "# FreeBSD RCTL-RACCT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on FreeBSD Resource Container metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9476,7 +9665,7 @@ "community": true }, "overview": "# Freifunk network\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Freifunk community network metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9511,7 +9700,7 @@ "community": true }, "overview": "# Fritzbox network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AVM Fritzbox router metrics for efficient home network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fritzbox exporter](https://github.com/pdreker/fritz_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9546,7 +9735,7 @@ "community": true }, "overview": "# GCP GCE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google Cloud Platform Compute Engine metrics for efficient cloud resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9581,7 +9770,7 @@ "community": true }, "overview": "# GCP Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform quota metrics for optimized resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9616,7 +9805,7 @@ "community": true }, "overview": "# GTP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GTP (GPRS Tunneling Protocol) metrics for optimized mobile data communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GTP Exporter](https://github.com/wmnsk/gtp_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9651,7 +9840,7 @@ "community": true }, "overview": "# Generic Command Line Output\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command line output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9686,7 +9875,7 @@ "community": true }, "overview": "# Generic storage enclosure tool\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor storage enclosure metrics for efficient storage device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9721,7 +9910,7 @@ "community": true }, "overview": "# GitHub API rate limit\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GitHub API rate limit metrics for efficient\nAPI usage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9756,7 +9945,7 @@ "community": true }, "overview": "# GitHub repository\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack GitHub repository metrics for optimized project and user analytics monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub Exporter](https://github.com/githubexporter/github-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9791,7 +9980,7 @@ "community": true }, "overview": "# GitLab Runner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GitLab CI/CD job metrics for efficient development and deployment management.\n\n\nMetrics are gathered by periodically sending HTTP requests to GitLab built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9826,7 +10015,7 @@ "community": true }, "overview": "# Gobetween\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Gobetween load balancer metrics for optimized network traffic management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to Gobetween built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9865,7 +10054,7 @@ "community": true }, "overview": "# Google Cloud Platform\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform metrics for comprehensive cloud resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9904,7 +10093,7 @@ "community": true }, "overview": "# Google Pagespeed\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google PageSpeed Insights performance metrics for efficient web page optimization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9943,7 +10132,7 @@ "community": true }, "overview": "# Google Stackdriver\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Google Stackdriver monitoring metrics for optimized cloud performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -9978,7 +10167,7 @@ "community": true }, "overview": "# Grafana\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Grafana dashboard and visualization metrics for optimized monitoring and data analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Grafana built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10013,7 +10202,7 @@ "community": true }, "overview": "# Graylog Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Graylog server metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Graylog built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10048,7 +10237,7 @@ "community": true }, "overview": "# HANA\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SAP HANA database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HANA Exporter](https://github.com/jenningsloy318/hana_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10083,7 +10272,7 @@ "community": true }, "overview": "# HDSentinel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hard Disk Sentinel metrics for efficient storage device health management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10118,7 +10307,7 @@ "community": true }, "overview": "# HHVM\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HipHop Virtual Machine metrics for efficient\nPHP execution and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10153,7 +10342,7 @@ "community": true }, "overview": "# HP iLO\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HP Integrated Lights Out (iLO) metrics for efficient server management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10188,7 +10377,7 @@ "community": true }, "overview": "# Halon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Halon email security and delivery metrics for optimized email management and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Halon exporter](https://github.com/tobiasbp/halon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10223,7 +10412,7 @@ "community": true }, "overview": "# HashiCorp Vault secrets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack HashiCorp Vault security assessment metrics for efficient secrets management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10258,7 +10447,7 @@ "community": true }, "overview": "# Hasura GraphQL Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Hasura GraphQL engine metrics for optimized\nAPI performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hasura Exporter](https://github.com/zolamk/hasura-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10293,7 +10482,7 @@ "community": true }, "overview": "# Helium hotspot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Helium hotspot metrics for optimized LoRaWAN network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10328,7 +10517,7 @@ "community": true }, "overview": "# Helium miner (validator)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Helium miner and validator metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10363,7 +10552,7 @@ "community": true }, "overview": "# Hitron CGN series CPE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hitron CGNV4 gateway metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10398,7 +10587,7 @@ "community": true }, "overview": "# Hitron CODA Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Hitron CODA cable modem metrics for optimized internet connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10433,7 +10622,7 @@ "community": true }, "overview": "# Homebridge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Homebridge smart home metrics for efficient home automation management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10468,7 +10657,7 @@ "community": true }, "overview": "# Homey\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Homey smart home controller metrics for efficient home automation and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10503,7 +10692,7 @@ "community": true }, "overview": "# Honeypot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor honeypot metrics for efficient threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10538,7 +10727,7 @@ "community": true }, "overview": "# Huawei devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Huawei HiLink device metrics for optimized connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10573,7 +10762,7 @@ "community": true }, "overview": "# Hubble\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hubble network observability metrics for efficient network visibility and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to Hubble built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10608,7 +10797,7 @@ "community": true }, "overview": "# IBM AIX systems Njmon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NJmon system performance monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NJmon](https://github.com/crooks/njmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10643,7 +10832,7 @@ "community": true }, "overview": "# IBM CryptoExpress (CEX) cards\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack IBM Z Crypto Express device metrics for optimized cryptographic performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10678,7 +10867,7 @@ "community": true }, "overview": "# IBM MQ\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on IBM MQ message queue metrics for efficient message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQ Exporter](https://github.com/agebhar1/mq_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10713,7 +10902,7 @@ "community": true }, "overview": "# IBM Spectrum\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum storage metrics for efficient data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10748,7 +10937,7 @@ "community": true }, "overview": "# IBM Spectrum Virtualize\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum Virtualize metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10783,7 +10972,7 @@ "community": true }, "overview": "# IBM Z Hardware Management Console\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Z Hardware Management Console metrics for efficient mainframe management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10818,7 +11007,7 @@ "community": true }, "overview": "# IOTA full node\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on IOTA cryptocurrency network metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10857,7 +11046,7 @@ "community": true }, "overview": "# IPMI (By SoundCloud)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IPMI metrics externally for efficient server hardware management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10896,7 +11085,7 @@ "community": true }, "overview": "# InfluxDB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor InfluxDB time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10931,7 +11120,7 @@ "community": true }, "overview": "# JMX\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Java Management Extensions (JMX) metrics for efficient Java application management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JMX Exporter](https://github.com/prometheus/jmx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -10966,7 +11155,7 @@ "community": true }, "overview": "# Jarvis Standing Desk\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jarvis standing desk usage metrics for efficient workspace ergonomics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11001,7 +11190,7 @@ "community": true }, "overview": "# Jenkins\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jenkins continuous integration server metrics for efficient development and build management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11036,7 +11225,7 @@ "community": true }, "overview": "# JetBrains Floating License Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor JetBrains floating license server metrics for efficient software licensing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11075,7 +11264,7 @@ "community": true }, "overview": "# Kafka\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kafka message queue metrics for optimized data streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11114,7 +11303,7 @@ "community": true }, "overview": "# Kafka Connect\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kafka Connect metrics for efficient data streaming and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11153,7 +11342,7 @@ "community": true }, "overview": "# Kafka Consumer Lag\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka consumer lag metrics for efficient message queue management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11192,7 +11381,7 @@ "community": true }, "overview": "# Kafka ZooKeeper\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka ZooKeeper metrics for optimized distributed coordination and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11227,7 +11416,7 @@ "community": true }, "overview": "# Kannel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kannel SMS gateway and WAP gateway metrics for efficient mobile communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kannel Exporter](https://github.com/apostvav/kannel_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11262,7 +11451,7 @@ "community": true }, "overview": "# Keepalived\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Keepalived metrics for efficient high-availability and load balancing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11301,7 +11490,7 @@ "community": true }, "overview": "# Kubernetes Cluster Cloud Cost\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kubernetes cloud cost metrics for efficient cloud resource management and budgeting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11336,7 +11525,7 @@ "community": true }, "overview": "# LDAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Lightweight Directory Access Protocol (LDAP) metrics for efficient directory service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [LDAP Exporter](https://github.com/titisan/ldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11371,7 +11560,7 @@ "community": true }, "overview": "# Lagerist Disk latency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack disk latency metrics for efficient storage performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11406,7 +11595,7 @@ "community": true }, "overview": "# Linode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Linode cloud hosting metrics for efficient virtual server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Linode Exporter](https://github.com/DazWilkin/linode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11441,7 +11630,7 @@ "community": true }, "overview": "# Lustre metadata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Lustre clustered file system for efficient management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11476,7 +11665,7 @@ "community": true }, "overview": "# Lynis audit reports\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Lynis security auditing tool metrics for efficient system security and compliance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11511,7 +11700,7 @@ "community": true }, "overview": "# MP707 USB thermometer\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MP707 power strip metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MP707 exporter](https://github.com/nradchenko/mp707_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11546,7 +11735,7 @@ "community": true }, "overview": "# MQTT Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MQTT message transport performance using blackbox testing methods.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11581,7 +11770,7 @@ "community": true }, "overview": "# Machbase\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Machbase time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11616,7 +11805,7 @@ "community": true }, "overview": "# Maildir\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack mail server metrics for optimized email management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mailexporter](https://github.com/cherti/mailexporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11651,7 +11840,7 @@ "community": true }, "overview": "# Meilisearch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Meilisearch search engine metrics for efficient search performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11686,7 +11875,7 @@ "community": true }, "overview": "# Memcached (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Memcached in-memory key-value store metrics for efficient caching performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Memcached exporter](https://github.com/prometheus/memcached_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11721,7 +11910,7 @@ "community": true }, "overview": "# Meraki dashboard\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cisco Meraki cloud-managed networking device metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11756,7 +11945,7 @@ "community": true }, "overview": "# Mesos\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Mesos cluster manager metrics for efficient resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Mesos exporter](http://github.com/mesosphere/mesos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11791,7 +11980,7 @@ "community": true }, "overview": "# MikroTik devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mikrotik-exporter](https://github.com/swoga/mikrotik-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11826,7 +12015,7 @@ "community": true }, "overview": "# Mikrotik RouterOS devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11861,7 +12050,7 @@ "community": true }, "overview": "# Minecraft\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Minecraft server metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11900,7 +12089,7 @@ "community": true }, "overview": "# Modbus protocol\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Modbus RTU protocol metrics for efficient industrial automation and control performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11935,7 +12124,7 @@ "community": true }, "overview": "# MogileFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor MogileFS distributed file system metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -11970,7 +12159,7 @@ "community": true }, "overview": "# Monnit Sensors MQTT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Monnit sensor data via MQTT for efficient IoT device monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12005,7 +12194,7 @@ "community": true }, "overview": "# NRPE daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nagios Remote Plugin Executor (NRPE) metrics for efficient system and network monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NRPE exporter](https://github.com/canonical/nrpe_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12040,7 +12229,7 @@ "community": true }, "overview": "# NSX-T\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack VMware NSX-T software-defined networking metrics for efficient network virtualization and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12075,7 +12264,7 @@ "community": true }, "overview": "# NVML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NVIDIA Management Library (NVML) GPU metrics for efficient GPU performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NVML exporter](https://github.com/oko/nvml-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12110,7 +12299,7 @@ "community": true }, "overview": "# Naemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Naemon or Nagios network monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12145,7 +12334,7 @@ "community": true }, "overview": "# Nagios\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Nagios network monitoring metrics for efficient\nIT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nagios exporter](https://github.com/wbollock/nagios_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12180,7 +12369,7 @@ "community": true }, "overview": "# Nature Remo E lite devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nature Remo E series smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12219,7 +12408,7 @@ "community": true }, "overview": "# NetApp Solidfire\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetApp Solidfire storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12258,7 +12447,7 @@ "community": true }, "overview": "# NetFlow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetFlow network traffic metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [netflow exporter](https://github.com/paihu/netflow_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12297,7 +12486,7 @@ "community": true }, "overview": "# NetMeter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor NetMeter network traffic metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12336,7 +12525,7 @@ "community": true }, "overview": "# Netapp ONTAP API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetApp ONTAP storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12375,7 +12564,7 @@ "community": true }, "overview": "# Netatmo sensors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Netatmo smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netatmo exporter](https://github.com/xperimental/netatmo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12410,7 +12599,7 @@ "community": true }, "overview": "# New Relic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor New Relic application performance management metrics for efficient application monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [New Relic exporter](https://github.com/jfindley/newrelic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12445,7 +12634,7 @@ "community": true }, "overview": "# NextDNS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NextDNS DNS resolver and security platform metrics for efficient DNS management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nextdns-exporter](https://github.com/raylas/nextdns-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12484,7 +12673,7 @@ "community": true }, "overview": "# Nextcloud servers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Nextcloud cloud storage metrics for efficient file hosting and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12519,7 +12708,7 @@ "community": true }, "overview": "# OBS Studio\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OBS Studio live streaming and recording software metrics for efficient video production and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12558,7 +12747,7 @@ "community": true }, "overview": "# ODBC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Open Database Connectivity (ODBC) metrics for efficient database connection and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12593,7 +12782,7 @@ "community": true }, "overview": "# OTRS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OTRS (Open-Source Ticket Request System) metrics for efficient helpdesk management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12628,7 +12817,7 @@ "community": true }, "overview": "# OpenHAB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack openHAB smart home automation system metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenHAB exporter](https://github.com/pdreker/openhab_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12663,7 +12852,7 @@ "community": true }, "overview": "# OpenLDAP (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenLDAP directory service metrics for efficient directory management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12698,7 +12887,7 @@ "community": true }, "overview": "# OpenRC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on OpenRC init system metrics for efficient system startup and service management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12733,7 +12922,7 @@ "community": true }, "overview": "# OpenRCT2\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenRCT2 game metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12772,7 +12961,7 @@ "community": true }, "overview": "# OpenROADM devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenROADM optical transport network metrics using the NETCONF protocol for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12807,7 +12996,7 @@ "community": true }, "overview": "# OpenStack\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenStack cloud computing platform metrics for efficient infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12842,7 +13031,7 @@ "community": true }, "overview": "# OpenVAS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenVAS vulnerability scanner metrics for efficient security assessment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12877,7 +13066,7 @@ "community": true }, "overview": "# OpenWeatherMap\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenWeatherMap weather data and air pollution metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12912,7 +13101,7 @@ "community": true }, "overview": "# Open vSwitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Open vSwitch software-defined networking metrics for efficient network virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12952,7 +13141,7 @@ "community": true }, "overview": "# Oracle DB (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Oracle Database metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -12987,7 +13176,7 @@ "community": true }, "overview": "# Patroni\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Patroni PostgreSQL high-availability metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Patroni Exporter](https://github.com/gopaytech/patroni_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13022,7 +13211,7 @@ "community": true }, "overview": "# Personal Weather Station\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack personal weather station metrics for efficient weather monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13057,7 +13246,7 @@ "community": true }, "overview": "# Pgpool-II\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pgpool-II PostgreSQL middleware metrics for efficient database connection management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13092,7 +13281,7 @@ "community": true }, "overview": "# Philips Hue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Philips Hue smart lighting metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Philips Hue Exporter](https://github.com/aexel90/hue_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13127,7 +13316,7 @@ "community": true }, "overview": "# Pimoroni Enviro+\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pimoroni Enviro+ air quality and environmental metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13162,7 +13351,7 @@ "community": true }, "overview": "# Pingdom\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Pingdom website monitoring service metrics for efficient website performance management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13197,7 +13386,7 @@ "community": true }, "overview": "# Podman\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Podman container runtime metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13232,7 +13421,7 @@ "community": true }, "overview": "# Powerpal devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Powerpal smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Powerpal Exporter](https://github.com/aashley/powerpal_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13267,7 +13456,7 @@ "community": true }, "overview": "# ProFTPD\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ProFTPD FTP server metrics for efficient file transfer and server performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13304,7 +13493,7 @@ "most_popular": true }, "overview": "# Prometheus endpoint\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nThis generic Prometheus collector gathers metrics from any [`Prometheus`](https://prometheus.io/) endpoints.\n\n\nIt collects metrics by periodically sending HTTP requests to the target instance.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13339,7 +13528,7 @@ "community": true }, "overview": "# Proxmox VE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Proxmox Virtual Environment metrics for efficient virtualization and container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13374,7 +13563,7 @@ "community": true }, "overview": "# RADIUS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RADIUS (Remote Authentication Dial-In User Service) protocol metrics for efficient authentication and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RADIUS exporter](https://github.com/devon-mar/radius-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13409,7 +13598,7 @@ "community": true }, "overview": "# RIPE Atlas\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RIPE Atlas Internet measurement platform metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13444,7 +13633,7 @@ "community": true }, "overview": "# Radio Thermostat\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Radio Thermostat smart thermostat metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13479,7 +13668,7 @@ "community": true }, "overview": "# Rancher\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Rancher container orchestration platform metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13514,7 +13703,7 @@ "community": true }, "overview": "# Raritan PDU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Raritan Power Distribution Unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13549,7 +13738,7 @@ "community": true }, "overview": "# Redis Queue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Python RQ (Redis Queue) job queue metrics for efficient task management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Python RQ Exporter](https://github.com/mdawar/rq-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13584,7 +13773,7 @@ "community": true }, "overview": "# SABnzbd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SABnzbd Usenet client metrics for efficient file downloads and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13619,7 +13808,7 @@ "community": true }, "overview": "# SMA Inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SMA solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13654,7 +13843,7 @@ "community": true }, "overview": "# SONiC NOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Software for Open Networking in the Cloud (SONiC) metrics for efficient network switch management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13693,7 +13882,7 @@ "community": true }, "overview": "# SQL Database agnostic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nQuery SQL databases for efficient database performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SQL Exporter](https://github.com/free/sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13728,7 +13917,7 @@ "community": true }, "overview": "# SSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SSH server metrics for efficient secure shell server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSH Exporter](https://github.com/Nordstrom/ssh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13763,7 +13952,7 @@ "community": true }, "overview": "# SSL Certificate\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SSL/TLS certificate metrics for efficient web security and certificate management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13798,7 +13987,7 @@ "community": true }, "overview": "# Salicru EQX inverter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Salicru EQX solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13833,7 +14022,7 @@ "community": true }, "overview": "# Sense Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Sense Energy smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13868,7 +14057,7 @@ "community": true }, "overview": "# Sentry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sentry error tracking and monitoring platform metrics for efficient application performance and error management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13903,7 +14092,7 @@ "community": true }, "overview": "# ServerTech\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Server Technology power distribution unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ServerTech Exporter](https://github.com/tynany/servertech_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13938,7 +14127,7 @@ "community": true }, "overview": "# Shell command\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Command runner exporter](https://github.com/tomwilkie/prom-run).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -13973,7 +14162,7 @@ "community": true }, "overview": "# Shelly humidity sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Shelly smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Shelly Exporter](https://github.com/aexel90/shelly_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14008,7 +14197,7 @@ "community": true }, "overview": "# Sia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sia decentralized storage platform metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sia Exporter](https://github.com/tbenz9/sia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14043,7 +14232,7 @@ "community": true }, "overview": "# Siemens S7 PLC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Siemens S7 Programmable Logic Controller (PLC) metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14078,7 +14267,7 @@ "community": true }, "overview": "# Site 24x7\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Site24x7 website and infrastructure monitoring metrics for efficient performance tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14113,7 +14302,7 @@ "community": true }, "overview": "# Slurm\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Slurm workload manager metrics for efficient high-performance computing (HPC) and cluster management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14148,7 +14337,7 @@ "community": true }, "overview": "# SmartRG 808AC Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SmartRG SR808ac router metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14183,7 +14372,7 @@ "community": true }, "overview": "# Smart meters SML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Smart Message Language (SML) metrics for efficient smart metering and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SML Exporter](https://github.com/mweinelt/sml-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14218,7 +14407,7 @@ "community": true }, "overview": "# SoftEther VPN Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SoftEther VPN Server metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoftEther Exporter](https://github.com/dalance/softether_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14253,7 +14442,7 @@ "community": true }, "overview": "# SolarEdge inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SolarEdge solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14288,7 +14477,7 @@ "community": true }, "overview": "# Solar logging stick\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor solar energy metrics using a solar logging stick for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14323,7 +14512,7 @@ "community": true }, "overview": "# Solis Ginlong 5G inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Solis solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solis Exporter](https://github.com/candlerb/solis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14358,7 +14547,7 @@ "community": true }, "overview": "# Spacelift\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Spacelift infrastructure-as-code (IaC) platform metrics for efficient infrastructure automation and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14393,7 +14582,7 @@ "community": true }, "overview": "# Speedify CLI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Speedify VPN metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Speedify Exporter](https://github.com/willshen/speedify_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14428,7 +14617,7 @@ "community": true }, "overview": "# Sphinx\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Sphinx search engine metrics for efficient search and indexing performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14463,7 +14652,7 @@ "community": true }, "overview": "# Starlink (SpaceX)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SpaceX Starlink satellite internet metrics for efficient internet service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14498,7 +14687,7 @@ "community": true }, "overview": "# Starwind VSAN VSphere Edition\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on StarWind Virtual SAN metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14533,7 +14722,7 @@ "community": true }, "overview": "# StatusPage\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor StatusPage.io incident and status metrics for efficient incident management and communication.\n\n\nMetrics are gathered by periodically sending HTTP requests to [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14568,7 +14757,7 @@ "community": true }, "overview": "# Steam\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nGain insights into Steam A2S-supported game servers for performance and availability through real-time metric monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A2S Exporter](https://github.com/armsnyder/a2s-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14603,7 +14792,7 @@ "community": true }, "overview": "# Storidge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Storidge storage metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14638,7 +14827,7 @@ "community": true }, "overview": "# Stream\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor streaming metrics for efficient media streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Stream exporter](https://github.com/carlpett/stream_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14673,7 +14862,7 @@ "community": true }, "overview": "# Sunspec Solar Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SunSpec Alliance solar energy metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14708,7 +14897,7 @@ "community": true }, "overview": "# Suricata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Suricata network intrusion detection and prevention system (IDS/IPS) metrics for efficient network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Suricata Exporter](https://github.com/corelight/suricata_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14743,7 +14932,7 @@ "community": true }, "overview": "# Synology ActiveBackup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Synology Active Backup metrics for efficient backup and data protection management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14778,7 +14967,7 @@ "community": true }, "overview": "# Sysload\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system load metrics for efficient system performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sysload Exporter](https://github.com/egmc/sysload_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14813,7 +15002,7 @@ "community": true }, "overview": "# T-Rex NVIDIA GPU Miner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor T-Rex NVIDIA GPU miner metrics for efficient cryptocurrency mining and GPU performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14848,7 +15037,7 @@ "community": true }, "overview": "# TACACS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Terminal Access Controller Access-Control System (TACACS) protocol metrics for efficient network authentication and authorization management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14883,7 +15072,7 @@ "community": true }, "overview": "# TP-Link P110\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack TP-Link P110 smart plug metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14918,7 +15107,7 @@ "community": true }, "overview": "# Tado smart heating solution\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tado smart thermostat metrics for efficient home heating and cooling management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tado\\xB0 Exporter](https://github.com/eko/tado-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14953,7 +15142,7 @@ "community": true }, "overview": "# Tankerkoenig API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tankerknig API fuel price metrics for efficient fuel price monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -14988,7 +15177,7 @@ "community": true }, "overview": "# Tesla Powerwall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Powerwall metrics for efficient home energy storage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15023,7 +15212,7 @@ "community": true }, "overview": "# Tesla Wall Connector\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Wall Connector charging station metrics for efficient electric vehicle charging management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15058,7 +15247,7 @@ "community": true }, "overview": "# Tesla vehicle\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tesla vehicle metrics for efficient electric vehicle management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15093,7 +15282,7 @@ "community": true }, "overview": "# Traceroute\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport traceroute metrics for efficient network path analysis and performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15128,7 +15317,7 @@ "community": true }, "overview": "# TwinCAT ADS Web Service\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor TwinCAT ADS (Automation Device Specification) Web Service metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15163,7 +15352,7 @@ "community": true }, "overview": "# Twitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Twitch streaming platform metrics for efficient live streaming management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Twitch exporter](https://github.com/damoun/twitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15198,7 +15387,7 @@ "community": true }, "overview": "# Ubiquiti UFiber OLT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Ubiquiti UFiber GPON (Gigabit Passive Optical Network) device metrics for efficient fiber-optic network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ufiber-exporter](https://github.com/swoga/ufiber-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15233,7 +15422,7 @@ "community": true }, "overview": "# Uptimerobot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor UptimeRobot website uptime monitoring metrics for efficient website availability tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15268,7 +15457,7 @@ "community": true }, "overview": "# VSCode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Visual Studio Code editor metrics for efficient development environment management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [VSCode Exporter](https://github.com/guicaulada/vscode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15303,7 +15492,7 @@ "community": true }, "overview": "# Vault PKI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HashiCorp Vault Public Key Infrastructure (PKI) metrics for efficient certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15338,7 +15527,7 @@ "community": true }, "overview": "# Vertica\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Vertica analytics database platform metrics for efficient database performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15373,7 +15562,7 @@ "community": true }, "overview": "# Warp10\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Warp 10 time-series database metrics for efficient time-series data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15408,7 +15597,7 @@ "community": true }, "overview": "# XMPP Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor XMPP (Extensible Messaging and Presence Protocol) server metrics for efficient messaging and communication management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15443,7 +15632,7 @@ "community": true }, "overview": "# Xiaomi Mi Flora\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MiFlora plant monitor metrics for efficient plant care and growth management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15478,7 +15667,7 @@ "community": true }, "overview": "# YOURLS URL Shortener\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor YOURLS (Your Own URL Shortener) metrics for efficient URL shortening service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15513,7 +15702,7 @@ "community": true }, "overview": "# Zerto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zerto disaster recovery and data protection metrics for efficient backup and recovery management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zerto Exporter](https://github.com/claranet/zerto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15548,7 +15737,7 @@ "community": true }, "overview": "# Zulip\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zulip open-source group chat application metrics for efficient team communication management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15583,7 +15772,7 @@ "community": true }, "overview": "# Zyxel GS1200-8\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Zyxel GS1200 network switch metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15618,7 +15807,7 @@ "community": true }, "overview": "# bpftrace variables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack bpftrace metrics for advanced performance analysis and troubleshooting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15653,7 +15842,7 @@ "community": true }, "overview": "# cAdvisor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor container resource usage and performance metrics with cAdvisor for efficient container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [cAdvisor](https://github.com/google/cadvisor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15688,7 +15877,7 @@ "community": true }, "overview": "# etcd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack etcd database metrics for optimized distributed key-value store management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to etcd built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15723,7 +15912,7 @@ "community": true }, "overview": "# gpsd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GPSD (GPS daemon) metrics for efficient GPS data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [gpsd exporter](https://github.com/natesales/gpsd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15758,7 +15947,7 @@ "community": true }, "overview": "# iqAir AirVisual air quality monitors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor air quality data from IQAir devices for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IQair Exporter](https://github.com/Packetslave/iqair_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15793,7 +15982,7 @@ "community": true }, "overview": "# jolokia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Jolokia JVM metrics for optimized Java application performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15828,7 +16017,7 @@ "community": true }, "overview": "# journald\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on systemd-journald metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [journald-exporter](https://github.com/dead-claudia/journald-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15863,7 +16052,7 @@ "community": true }, "overview": "# loki\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Loki metrics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [loki](https://github.com/grafana/loki).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Loki\n\nInstall [loki](https://github.com/grafana/loki) according to its documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Loki\n\nInstall [loki](https://github.com/grafana/loki) according to its documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15898,7 +16087,7 @@ "community": true }, "overview": "# mosquitto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Mosquitto MQTT broker metrics for efficient IoT message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15933,7 +16122,7 @@ "community": true }, "overview": "# mtail\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor log data metrics using mtail log data extractor and parser.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mtail](https://github.com/google/mtail).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -15968,7 +16157,7 @@ "community": true }, "overview": "# nftables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor nftables firewall metrics for efficient network security and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nftables_exporter](https://github.com/Sheridan/nftables_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -16003,7 +16192,7 @@ "community": true }, "overview": "# pgBackRest\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor pgBackRest PostgreSQL backup metrics for efficient database backup and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -16038,7 +16227,7 @@ "community": true }, "overview": "# strongSwan\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack strongSwan VPN and IPSec metrics using the vici interface for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", @@ -16076,7 +16265,7 @@ "most_popular": false }, "overview": "# ProxySQL\n\nPlugin: go.d.plugin\nModule: proxysql\n\n## Overview\n\nThis collector monitors ProxySQL servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/proxysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/proxysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | stats:stats@tcp(127.0.0.1:6032)/ | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n - name: remote\n dsn: stats:stats@tcp(203.0.113.0:6032)/\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/proxysql.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/proxysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | stats:stats@tcp(127.0.0.1:6032)/ | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n - name: remote\n dsn: stats:stats@tcp(203.0.113.0:6032)/\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `proxysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m proxysql\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `proxysql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep proxysql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep proxysql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep proxysql\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ProxySQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.client_connections_count | connected, non_idle, hostgroup_locked | connections |\n| proxysql.client_connections_rate | created, aborted | connections/s |\n| proxysql.server_connections_count | connected | connections |\n| proxysql.server_connections_rate | created, aborted, delayed | connections/s |\n| proxysql.backends_traffic | recv, sent | B/s |\n| proxysql.clients_traffic | recv, sent | B/s |\n| proxysql.active_transactions_count | client | connections |\n| proxysql.questions_rate | questions | questions/s |\n| proxysql.slow_queries_rate | slow | queries/s |\n| proxysql.queries_rate | autocommit, autocommit_filtered, commit_filtered, rollback, rollback_filtered, backend_change_user, backend_init_db, backend_set_names, frontend_init_db, frontend_set_names, frontend_use_db | queries/s |\n| proxysql.backend_statements_count | total, unique | statements |\n| proxysql.backend_statements_rate | prepare, execute, close | statements/s |\n| proxysql.client_statements_count | total, unique | statements |\n| proxysql.client_statements_rate | prepare, execute, close | statements/s |\n| proxysql.cached_statements_count | cached | statements |\n| proxysql.query_cache_entries_count | entries | entries |\n| proxysql.query_cache_memory_used | used | B |\n| proxysql.query_cache_io | in, out | B/s |\n| proxysql.query_cache_requests_rate | read, write, read_success | requests/s |\n| proxysql.mysql_monitor_workers_count | workers, auxiliary | threads |\n| proxysql.mysql_monitor_workers_rate | started | workers/s |\n| proxysql.mysql_monitor_connect_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_ping_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_read_only_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_replication_lag_checks_rate | succeed, failed | checks/s |\n| proxysql.jemalloc_memory_used | active, allocated, mapped, metadata, resident, retained | B |\n| proxysql.memory_used | auth, sqlite3, query_digest, query_rules, firewall_users_table, firewall_users_config, firewall_rules_table, firewall_rules_config, mysql_threads, admin_threads, cluster_threads | B |\n| proxysql.uptime | uptime | seconds |\n\n### Per command\n\nThese metrics refer to the SQL command.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| command | SQL command. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_command_execution_rate | uptime | seconds |\n| proxysql.mysql_command_execution_time | time | microseconds |\n| proxysql.mysql_command_execution_duration | 100us, 500us, 1ms, 5ms, 10ms, 50ms, 100ms, 500ms, 1s, 5s, 10s, +Inf | microseconds |\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username from the mysql_users table |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_user_connections_utilization | used | percentage |\n| proxysql.mysql_user_connections_count | used | connections |\n\n### Per backend\n\nThese metrics refer to the backend server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | backend server host |\n| port | backend server port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.backend_status | online, shunned, offline_soft, offline_hard | status |\n| proxysql.backend_connections_usage | free, used | connections |\n| proxysql.backend_connections_rate | succeed, failed | connections/s |\n| proxysql.backend_queries_rate | queries | queries/s |\n| proxysql.backend_traffic | recv, send | B/s |\n| proxysql.backend_latency | latency | microseconds |\n\n", @@ -16117,7 +16306,7 @@ "most_popular": true }, "overview": "# Apache Pulsar\n\nPlugin: go.d.plugin\nModule: pulsar\n\n## Overview\n\nThis collector monitors Pulsar servers.\n\n\nIt collects broker statistics using Pulsar's [Prometheus endpoint](https://pulsar.apache.org/docs/en/deploy-monitoring/#broker-stats).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Pulsar instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pulsar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pulsar.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n - name: remote\n url: http://192.0.2.1:8080/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pulsar.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pulsar.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n - name: remote\n url: http://192.0.2.1:8080/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pulsar` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pulsar\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pulsar` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pulsar\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pulsar /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pulsar\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- topic_* metrics are available when `exposeTopicLevelMetricsInPrometheus` is set to true.\n- subscription_* and namespace_subscription metrics are available when `exposeTopicLevelMetricsInPrometheus` si set to true.\n- replication_* and namespace_replication_* metrics are available when replication is configured and `replicationMetricsEnabled` is set to true.\n\n\n### Per Apache Pulsar instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.broker_components | namespaces, topics, subscriptions, producers, consumers | components |\n| pulsar.messages_rate | publish, dispatch | messages/s |\n| pulsar.throughput_rate | publish, dispatch | KiB/s |\n| pulsar.storage_size | used | KiB |\n| pulsar.storage_operations_rate | read, write | message batches/s |\n| pulsar.msg_backlog | backlog | messages |\n| pulsar.storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.subscription_delayed | delayed | message batches |\n| pulsar.subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.replication_rate | in, out | messages/s |\n| pulsar.replication_throughput_rate | in, out | KiB/s |\n| pulsar.replication_backlog | backlog | messages |\n\n### Per namespace\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.namespace_broker_components | topics, subscriptions, producers, consumers | components |\n| pulsar.namespace_messages_rate | publish, dispatch | messages/s |\n| pulsar.namespace_throughput_rate | publish, dispatch | KiB/s |\n| pulsar.namespace_storage_size | used | KiB |\n| pulsar.namespace_storage_operations_rate | read, write | message batches/s |\n| pulsar.namespace_msg_backlog | backlog | messages |\n| pulsar.namespace_storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.namespace_entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.namespace_subscription_delayed | delayed | message batches |\n| pulsar.namespace_subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.namespace_subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.namespace_replication_rate | in, out | messages/s |\n| pulsar.namespace_replication_throughput_rate | in, out | KiB/s |\n| pulsar.namespace_replication_backlog | backlog | messages |\n| pulsar.topic_producers | a dimension per topic | producers |\n| pulsar.topic_subscriptions | a dimension per topic | subscriptions |\n| pulsar.topic_consumers | a dimension per topic | consumers |\n| pulsar.topic_messages_rate_in | a dimension per topic | publishes/s |\n| pulsar.topic_messages_rate_out | a dimension per topic | dispatches/s |\n| pulsar.topic_throughput_rate_in | a dimension per topic | KiB/s |\n| pulsar.topic_throughput_rate_out | a dimension per topic | KiB/s |\n| pulsar.topic_storage_size | a dimension per topic | KiB |\n| pulsar.topic_storage_read_rate | a dimension per topic | message batches/s |\n| pulsar.topic_storage_write_rate | a dimension per topic | message batches/s |\n| pulsar.topic_msg_backlog | a dimension per topic | messages |\n| pulsar.topic_subscription_delayed | a dimension per topic | message batches |\n| pulsar.topic_subscription_msg_rate_redeliver | a dimension per topic | messages/s |\n| pulsar.topic_subscription_blocked_on_unacked_messages | a dimension per topic | blocked subscriptions |\n| pulsar.topic_replication_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_backlog | a dimension per topic | messages |\n\n", @@ -16153,7 +16342,7 @@ "most_popular": false }, "overview": "# Puppet\n\nPlugin: go.d.plugin\nModule: puppet\n\n## Overview\n\nThis collector monitors Puppet metrics, including JVM heap and non-heap memory, CPU usage, and file descriptors.\n\n\nIt uses Puppet's metrics API endpoint [/status/v1/services](https://www.puppet.com/docs/puppetserver/5.3/status-api/v1/services.html) to gather the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Puppet instances running on localhost that are listening on port 8140.\nOn startup, it tries to collect metrics from:\n\n- https://127.0.0.1:8140\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/puppet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/puppet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | The base URL where the Puppet instance can be accessed. | https://127.0.0.1:8140 | yes |\n| timeout | HTTPS request timeout. | 1 | no |\n| username | Username for basic HTTPS authentication. | | no |\n| password | Password for basic HTTPS authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTPS authentication. | | no |\n| proxy_password | Password for proxy basic HTTPS authentication. | | no |\n| method | HTTPS request method. | POST | no |\n| body | HTTPS request body. | | no |\n| headers | HTTPS request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic with self-signed certificate\n\nPuppet with self-signed TLS certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8140\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8140\n tls_skip_verify: yes\n\n - name: remote\n url: https://192.0.2.1:8140\n tls_skip_verify: yes\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/puppet.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/puppet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | The base URL where the Puppet instance can be accessed. | https://127.0.0.1:8140 | yes |\n| timeout | HTTPS request timeout. | 1 | no |\n| username | Username for basic HTTPS authentication. | | no |\n| password | Password for basic HTTPS authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTPS authentication. | | no |\n| proxy_password | Password for proxy basic HTTPS authentication. | | no |\n| method | HTTPS request method. | POST | no |\n| body | HTTPS request body. | | no |\n| headers | HTTPS request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic with self-signed certificate\n\nPuppet with self-signed TLS certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8140\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8140\n tls_skip_verify: yes\n\n - name: remote\n url: https://192.0.2.1:8140\n tls_skip_verify: yes\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `puppet` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m puppet\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `puppet` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep puppet\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep puppet /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep puppet\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Puppet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| puppet.jvm_heap | committed, used | MiB |\n| puppet.jvm_nonheap | committed, used | MiB |\n| puppet.cpu | execution, GC | percentage |\n| puppet.fdopen | used | descriptors |\n\n", @@ -16190,7 +16379,7 @@ "most_popular": false }, "overview": "# RabbitMQ\n\nPlugin: go.d.plugin\nModule: rabbitmq\n\n## Overview\n\nThis collector monitors RabbitMQ instances.\n\nIt collects data using an HTTP-based API provided by the [management plugin](https://www.rabbitmq.com/management.html).\nThe following endpoints are used:\n\n- `/api/overview`\n- `/api/node/{node_name}`\n- `/api/vhosts`\n- `/api/queues` (disabled by default)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable management plugin.\n\nThe management plugin is included in the RabbitMQ distribution, but disabled.\nTo enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rabbitmq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rabbitmq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:15672 | yes |\n| collect_queues_metrics | Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used. | no | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n username: admin\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n - name: remote\n url: http://192.0.2.0:15672\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable management plugin.\n\nThe management plugin is included in the RabbitMQ distribution, but disabled.\nTo enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rabbitmq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rabbitmq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:15672 | yes |\n| collect_queues_metrics | Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used. | no | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n username: admin\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n - name: remote\n url: http://192.0.2.0:15672\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `rabbitmq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m rabbitmq\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `rabbitmq` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep rabbitmq\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep rabbitmq /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep rabbitmq\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RabbitMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.messages_count | ready, unacknowledged | messages |\n| rabbitmq.messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n| rabbitmq.objects_count | channels, consumers, connections, queues, exchanges | messages |\n| rabbitmq.connection_churn_rate | created, closed | operations/s |\n| rabbitmq.channel_churn_rate | created, closed | operations/s |\n| rabbitmq.queue_churn_rate | created, deleted, declared | operations/s |\n| rabbitmq.file_descriptors_count | available, used | fd |\n| rabbitmq.sockets_count | available, used | sockets |\n| rabbitmq.erlang_processes_count | available, used | processes |\n| rabbitmq.erlang_run_queue_processes_count | length | processes |\n| rabbitmq.memory_usage | used | bytes |\n| rabbitmq.disk_space_free_size | free | bytes |\n\n### Per vhost\n\nThese metrics refer to the virtual host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.vhost_messages_count | ready, unacknowledged | messages |\n| rabbitmq.vhost_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n### Per queue\n\nThese metrics refer to the virtual host queue.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n| queue | queue name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.queue_messages_count | ready, unacknowledged, paged_out, persistent | messages |\n| rabbitmq.queue_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n", @@ -16237,7 +16426,7 @@ "most_popular": true }, "overview": "# Redis\n\nPlugin: go.d.plugin\nModule: redis\n\n## Overview\n\nThis collector monitors the health and performance of Redis servers and collects general statistics, CPU and memory consumption, replication information, command statistics, and more.\n\n\nIt connects to the Redis instance via a TCP or UNIX socket and executes the following commands:\n\n- [INFO ALL](https://redis.io/commands/info)\n- [PING](https://redis.io/commands/ping/)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known Redis TCP and UNIX sockets:\n\n- 127.0.0.1:6379\n- /tmp/redis.sock\n- /var/run/redis/redis.sock\n- /var/lib/redis/redis.sock\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/redis.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/redis.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Redis server address. | redis://@localhost:6379 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://@127.0.0.1:6379'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix://@/tmp/redis.sock'\n\n```\n##### TCP socket with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:6379'\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/redis.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/redis.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Redis server address. | redis://@localhost:6379 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://@127.0.0.1:6379'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix://@/tmp/redis.sock'\n\n```\n##### TCP socket with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:6379'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `redis` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m redis\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `redis` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep redis\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep redis /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep redis\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ redis_connections_rejected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.connections | connections rejected because of maxclients limit in the last minute |\n| [ redis_bgsave_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_now | duration of the on-going RDB save operation |\n| [ redis_bgsave_broken ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_health | status of the last RDB save operation (0: ok, 1: error) |\n| [ redis_master_link_down ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.master_link_down_since_time | time elapsed since the link between master and slave is down |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Redis instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| redis.connections | accepted, rejected | connections/s |\n| redis.clients | connected, blocked, tracking, in_timeout_table | clients |\n| redis.ping_latency | min, max, avg | seconds |\n| redis.commands | processes | commands/s |\n| redis.keyspace_lookup_hit_rate | lookup_hit_rate | percentage |\n| redis.memory | max, used, rss, peak, dataset, lua, scripts | bytes |\n| redis.mem_fragmentation_ratio | mem_fragmentation | ratio |\n| redis.key_eviction_events | evicted | keys/s |\n| redis.net | received, sent | kilobits/s |\n| redis.rdb_changes | changes | operations |\n| redis.bgsave_now | current_bgsave_time | seconds |\n| redis.bgsave_health | last_bgsave | status |\n| redis.bgsave_last_rdb_save_since_time | last_bgsave_time | seconds |\n| redis.aof_file_size | current, base | bytes |\n| redis.commands_calls | a dimension per command | calls |\n| redis.commands_usec | a dimension per command | microseconds |\n| redis.commands_usec_per_sec | a dimension per command | microseconds/s |\n| redis.key_expiration_events | expired | keys/s |\n| redis.database_keys | a dimension per database | keys |\n| redis.database_expires_keys | a dimension per database | keys |\n| redis.connected_replicas | connected | replicas |\n| redis.master_link_status | up, down | status |\n| redis.master_last_io_since_time | time | seconds |\n| redis.master_link_down_since_time | time | seconds |\n| redis.uptime | uptime | seconds |\n\n", @@ -16275,7 +16464,7 @@ "most_popular": false }, "overview": "# RethinkDB\n\nPlugin: go.d.plugin\nModule: rethinkdb\n\n## Overview\n\nIt collects cluster-wide metrics such as server status, client connections, active clients, query rate, and document read/write rates.\nFor each server, it offers similar metrics.\n\n\nThe data is gathered by querying the stats table in RethinkDB, which stores real-time statistics related to the cluster and its individual servers.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, collector will attempt to connect to RethinkDB instance on `127.0.0.1:28015` address.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rethinkdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rethinkdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the RethinkDB service listens for connections. | 127.0.0.1:28015 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:28015\n\n```\n##### With authentication\n\nAn example configuration with authentication.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:28015\n username: name\n password: pass\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:28015\n\n - name: remote\n address: 203.0.113.0:28015\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rethinkdb.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rethinkdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the RethinkDB service listens for connections. | 127.0.0.1:28015 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:28015\n\n```\n##### With authentication\n\nAn example configuration with authentication.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:28015\n username: name\n password: pass\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:28015\n\n - name: remote\n address: 203.0.113.0:28015\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `rethinkdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m rethinkdb\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `rethinkdb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep rethinkdb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep rethinkdb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep rethinkdb\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RethinkDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.cluster_servers_stats_request | success, timeout | servers |\n| rethinkdb.cluster_client_connections | connections | connections |\n| rethinkdb.cluster_active_clients | active | clients |\n| rethinkdb.cluster_queries | queries | queries/s |\n| rethinkdb.cluster_documents | read, written | documents/s |\n\n### Per server\n\nThese metrics refer to the server (cluster member).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server_uuid | Server UUID. |\n| server_name | Server name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.server_stats_request_status | success, timeout | status |\n| rethinkdb.server_client_connections | connections | connections |\n| rethinkdb.server_active_clients | active | clients |\n| rethinkdb.server_queries | queries | queries/s |\n| rethinkdb.server_documents | read, written | documents/s |\n\n", @@ -16314,7 +16503,7 @@ "most_popular": false }, "overview": "# Riak KV\n\nPlugin: go.d.plugin\nModule: riakkv\n\n## Overview\n\nThis collector monitors RiakKV metrics about throughput, latency, resources and more.\n\n\nIt sends HTTP requests to the Riak [/stats](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html) endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Riak instances running on localhost that are listening on port 8098.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:8098/stats\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable /stats endpoint\n\nSee the RiakKV [configuration reference](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/riakkv.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/riakkv.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8098/stats | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nWith enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n\n - name: remote\n url: http://192.0.2.1:8098/stats\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable /stats endpoint\n\nSee the RiakKV [configuration reference](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/riakkv.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/riakkv.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8098/stats | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nWith enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8098/stats\n\n - name: remote\n url: http://192.0.2.1:8098/stats\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `riakkv` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m riakkv\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `riakkv` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep riakkv\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep riakkv /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep riakkv\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Riak KV instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| riak.kv.throughput | gets, puts | operations/s |\n| riak.dt.vnode_updates | counters, sets, maps | operations/s |\n| riak.search | queries | queries/s |\n| riak.search.documents | indexed | documents/s |\n| riak.consistent.operations | gets, puts | operations/s |\n| riak.kv.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.kv.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms |\n| riak.search.latency.query | median, min, 95, 99, 999, max | ms |\n| riak.search.latency.index | median, min, 95, 99, 999, max | ms |\n| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.vm | processes | total |\n| riak.vm.memory.processes | allocated, used | MB |\n| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings |\n| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB |\n| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages |\n| riak.search.index | index_fail, bad_entry, extract_fail | errors |\n| riak.core.protobuf_connections | active | connections |\n| riak.core.repairs | read | repairs |\n| riak.core.fsm_active | get, put, secondary index, list keys | fsms |\n| riak.core.fsm_rejected | get, put | fsms |\n\n", @@ -16362,7 +16551,7 @@ "most_popular": false }, "overview": "# Rspamd\n\nPlugin: go.d.plugin\nModule: rspamd\n\n## Overview\n\nThis collector monitors the activity and performance of Rspamd servers. It gathers various metrics including scanned emails, learned messages, spam/ham counts, and actions taken on emails (reject, rewrite, etc.).\n\n\nIt retrieves statistics from Rspamd's [built-in web server](https://rspamd.com/doc/workers/controller.html) by making HTTP requests to the `/stat` endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Rspamd instances running on localhost that are listening on port 11334.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rspamd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rspamd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:11334 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n username: username\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n\n - name: remote\n url: http://192.0.2.1:11334\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rspamd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rspamd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:11334 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n username: username\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n\n - name: remote\n url: http://192.0.2.1:11334\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `rspamd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m rspamd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `rspamd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep rspamd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep rspamd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep rspamd\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Rspamd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rspamd.classifications | ham, spam | messages/s |\n| rspamd.actions | reject, soft_reject, rewrite_subject, add_header, greylist, custom, discard, quarantine, no_action | messages/s |\n| rspamd.scans | scanned | messages/s |\n| rspamd.learns | learned | messages/s |\n| rspamd.connections | connections | connections/s |\n| rspamd.control_connections | control_connections | connections/s |\n\n", @@ -16371,6 +16560,44 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/rspamd/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-samba", + "plugin_name": "go.d.plugin", + "module_name": "samba", + "monitored_instance": { + "name": "Samba", + "link": "https://www.samba.org/samba/", + "icon_filename": "samba.svg", + "categories": [ + "data-collection.storage-mount-points-and-filesystems" + ] + }, + "keywords": [ + "samba", + "smb", + "file sharing" + ], + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "most_popular": false + }, + "overview": "# Samba\n\nPlugin: go.d.plugin\nModule: samba\n\n## Overview\n\nThis collector monitors Samba syscalls and SMB2 calls. It relies on the [`smbstatus`](https://www.samba.org/samba/docs/current/man-html/smbstatus.1.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\nExecuted commands:\n- `smbstatus -P`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Verifying and Enabling Profiling for SMBd\n\n1. **Check for Profiling Support**\n\n Before enabling profiling, it's important to verify if `smbd` was compiled with profiling capabilities. Run the following command as root user (using `sudo`) to check:\n\n ```bash\n $ sudo smbd --build-options | grep WITH_PROFILE\n WITH_PROFILE\n ```\n\n If the command outputs `WITH_PROFILE`, profiling is supported. If not, you'll need to recompile `smbd` with profiling enabled (refer to Samba documentation for specific instructions).\n\n2. **Enable Profiling**\n\n Once you've confirmed profiling support, you can enable it using one of the following methods:\n\n - **Command-Line Option**\n Start smbd with the `-P 1` option when invoking it directly from the command line.\n - **Configuration File**\n Modify the `smb.conf` configuration file located at `/etc/samba/smb.conf` (the path might vary slightly depending on your system). Add the following line to the `[global]` section:\n\n ```bash\n smbd profiling level = count\n ```\n3. **Restart the Samba service**\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/samba.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/samba.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | smbstatus binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: samba\n update_every: 5 # Collect statistics every 5 seconds\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `samba` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m samba\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `samba` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep samba\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep samba /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep samba\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per syscall\n\nThese metrics refer to the the Syscall.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| syscall | Syscall name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| samba.syscall_calls | syscalls | calls/s |\n| samba.syscall_transferred_data | transferred | bytes/s |\n\n### Per smb2call\n\nThese metrics refer to the the SMB2 Call.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| smb2call | SMB2 call name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| samba.smb2_call_calls | smb2 | calls/s |\n| samba.smb2_call_transferred_data | in, out | bytes/s |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-samba-Samba", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/samba/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-scaleio", @@ -16398,7 +16625,7 @@ "most_popular": false }, "overview": "# Dell EMC ScaleIO\n\nPlugin: go.d.plugin\nModule: scaleio\n\n## Overview\n\nThis collector monitors ScaleIO (VxFlex OS) instances via VxFlex OS Gateway API.\n\nIt collects metrics for the following ScaleIO components:\n\n- System\n- Storage Pool\n- Sdc\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/scaleio.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/scaleio.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | https://127.0.0.1:80 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instance.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n - name: remote\n url: https://203.0.113.10\n username: admin\n password: password\n tls_skip_verify: yes\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/scaleio.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/scaleio.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | https://127.0.0.1:80 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instance.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n - name: remote\n url: https://203.0.113.10\n username: admin\n password: password\n tls_skip_verify: yes\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `scaleio` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m scaleio\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `scaleio` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep scaleio\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep scaleio /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep scaleio\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dell EMC ScaleIO instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.system_capacity_total | total | KiB |\n| scaleio.system_capacity_in_use | in_use | KiB |\n| scaleio.system_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.system_capacity_available_volume_allocation | available | KiB |\n| scaleio.system_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.system_workload_primary_bandwidth_total | total | KiB/s |\n| scaleio.system_workload_primary_bandwidth | read, write | KiB/s |\n| scaleio.system_workload_primary_iops_total | total | iops/s |\n| scaleio.system_workload_primary_iops | read, write | iops/s |\n| scaleio.system_workload_primary_io_size_total | io_size | KiB |\n| scaleio.system_rebalance | read, write | KiB/s |\n| scaleio.system_rebalance_left | left | KiB |\n| scaleio.system_rebalance_time_until_finish | time | seconds |\n| scaleio.system_rebuild | read, write | KiB/s |\n| scaleio.system_rebuild_left | left | KiB |\n| scaleio.system_defined_components | devices, fault_sets, protection_domains, rfcache_devices, sdc, sds, snapshots, storage_pools, volumes, vtrees | components |\n| scaleio.system_components_volumes_by_type | thick, thin | volumes |\n| scaleio.system_components_volumes_by_mapping | mapped, unmapped | volumes |\n\n### Per storage pool\n\nThese metrics refer to the storage pool.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.storage_pool_capacity_total | total | KiB |\n| scaleio.storage_pool_capacity_in_use | in_use | KiB |\n| scaleio.storage_pool_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.storage_pool_capacity_utilization | used | percentage |\n| scaleio.storage_pool_capacity_available_volume_allocation | available | KiB |\n| scaleio.storage_pool_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.storage_pool_components | devices, snapshots, volumes, vtrees | components |\n\n### Per sdc\n\nThese metrics refer to the SDC (ScaleIO Data Client).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.sdc_mdm_connection_state | connected | boolean |\n| scaleio.sdc_bandwidth | read, write | KiB/s |\n| scaleio.sdc_iops | read, write | iops/s |\n| scaleio.sdc_io_size | read, write | KiB |\n| scaleio.sdc_num_of_mapped_volumed | mapped | volumes |\n\n", @@ -16413,7 +16640,7 @@ "plugin_name": "go.d.plugin", "module_name": "sensors", "monitored_instance": { - "name": "Linux Sensors (lm-sensors)", + "name": "Linux Sensors", "link": "https://hwmon.wiki.kernel.org/lm_sensors", "icon_filename": "microchip.svg", "categories": [ @@ -16428,7 +16655,8 @@ "power", "fan", "energy", - "humidity" + "humidity", + "intrusion" ], "related_resources": { "integrations": { @@ -16440,13 +16668,13 @@ }, "most_popular": false }, - "overview": "# Linux Sensors (lm-sensors)\n\nPlugin: go.d.plugin\nModule: sensors\n\n## Overview\n\nThis collector gathers real-time system sensor statistics, including temperature, voltage, current, power, fan speed, energy consumption, and humidity, utilizing the [sensors](https://linux.die.net/man/1/sensors) binary.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe following type of sensors are auto-detected:\n\n- temperature\n- fan\n- voltage\n- current\n- power\n- energy\n- humidity\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install lm-sensors\n\n- Install `lm-sensors` using your distribution's package manager.\n- Run `sensors-detect` to detect hardware monitoring chips.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/sensors.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `sensors` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/bin/sensors | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: sensors\n binary_path: /usr/local/sbin/sensors\n\n```\n", + "overview": "# Linux Sensors\n\nPlugin: go.d.plugin\nModule: sensors\n\n## Overview\n\nThis collector gathers real-time system sensor statistics using the [sysfs](https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface) interface.\n\nSupported sensors:\n\n- Temperature\n- Voltage\n- Fan\n- Current\n- Power\n- Energy\n- Humidity\n- Intrusion\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically discovers and exposes all available sensors on the system through the [sysfs](https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface) interface.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/sensors.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/sensors.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| relabel | A list used to update existing sensor labels or add labels to sensors that don't have them. | [] | no |\n| relabel[].chip | [Pattern](/src/libnetdata/simple_pattern/README.md#simple-patterns) to match the `chip_id` label value. | | no |\n| relabel[].sensors | A list of sensors to be relabeled for the specified chip. | [] | no |\n| relabel[].sensors[].name | The exact sensor name (e.g., `'temp1'`, `'in1'`, `'voltage1'`). | | no |\n| relabel[].sensors[].label | The new label value for the sensor. | | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: sensors\n update_every: 5 # Collect sensors statistics every 5 seconds\n\n```\n##### Renaming labels\n\nAllows you to override/add labels.\n\n```yaml\njobs:\n - name: sensors\n relabel:\n - chip: as99127f-*\n sensors:\n - name: temp1\n label: Mobo Temp\n - name: temp2\n label: CPU0 Temp\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `sensors` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m sensors\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `sensors` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep sensors\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep sensors /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep sensors\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the sensor.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| chip | The hardware component responsible for the sensor monitoring. |\n| feature | The specific sensor or monitoring point provided by the chip. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.sensor_temperature | temperature | Celsius |\n| sensors.sensor_voltage | voltage | Volts |\n| sensors.sensor_current | current | Amperes |\n| sensors.sensor_power | power | Watts |\n| sensors.sensor_fan_speed | fan | RPM |\n| sensors.sensor_energy | energy | Joules |\n| sensors.sensor_humidity | humidity | percent |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the system sensor.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| chip | The path to the sensor's chip device, excluding the /sys/devices prefix. This provides a unique identifier for the physical hardware component. |\n| chip_id | A unique identifier for the sensor's chip, formatted as `chipName-busType-hash`. |\n| sensor | The name of the specific sensor within the chip device. This provides a direct identifier for the individual measurement point. |\n| label | A label provided by the kernel driver to indicate the intended use or purpose of the sensor. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.chip_sensor_temperature | input | Celsius |\n| sensors.chip_sensor_temperature_alarm | clear, triggered | status |\n| sensors.chip_sensor_voltage | input | Volts |\n| sensors.chip_sensor_voltage_average | average | Volts |\n| sensors.chip_sensor_voltage_alarm | clear, triggered | status |\n| sensors.chip_sensor_fan | input | RPM |\n| sensors.chip_sensor_fan_alarm | clear, triggered | status |\n| sensors.chip_sensor_current | input | Amperes |\n| sensors.chip_sensor_current_average | average | Amperes |\n| sensors.chip_sensor_current_alarm | clear, triggered | status |\n| sensors.chip_sensor_power | input | Watts |\n| sensors.chip_sensor_power_average | average | Watts |\n| sensors.chip_sensor_power_alarm | clear, triggered | status |\n| sensors.chip_sensor_energy | input | Joules |\n| sensors.chip_sensor_humidity | input | percent |\n| sensors.chip_sensor_intrusion_alarm | clear, triggered | status |\n\n", "integration_type": "collector", - "id": "go.d.plugin-sensors-Linux_Sensors_(lm-sensors)", + "id": "go.d.plugin-sensors-Linux_Sensors", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/sensors/metadata.yaml", "related_resources": "" }, @@ -16479,8 +16707,8 @@ }, "most_popular": false }, - "overview": "# S.M.A.R.T.\n\nPlugin: go.d.plugin\nModule: smartctl\n\n## Overview\n\nThis collector monitors the health status of storage devices by analyzing S.M.A.R.T. (Self-Monitoring, Analysis, and Reporting Technology) counters.\nIt relies on the [`smartctl`](https://linux.die.net/man/8/smartctl) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `smartctl --json --scan`\n- `smartctl --json --all {deviceName} --device {deviceType} --nocheck {powerMode}`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install smartmontools (v7.0+)\n\nInstall `smartmontools` version 7.0 or later using your distribution's package manager. Version 7.0 introduced the `--json` output mode, which is required for this collector to function properly.\n\n\n#### For Netdata running in a Docker container\n\n1. **Install smartmontools**.\n\n Ensure `smartctl` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=smartmontools` when starting the container.\n\n2. **Provide access to storage devices**.\n\n Netdata requires the `SYS_RAWIO` capability and access to the storage devices to run the `smartctl` collector inside a Docker container. Here's how you can achieve this:\n\n - `docker run`\n\n ```bash\n docker run --cap-add SYS_RAWIO --device /dev/sda:/dev/sda ...\n ```\n\n - `docker-compose.yml`\n\n ```yaml\n services:\n netdata:\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n - SYS_RAWIO # smartctl\n devices:\n - \"/dev/sda:/dev/sda\"\n ```\n\n > **Multiple Devices**: These examples only show mapping of one device (/dev/sda). You'll need to add additional `--device` options (in docker run) or entries in the `devices` list (in docker-compose.yml) for each storage device you want Netdata's smartctl collector to monitor.\n\n > **NVMe Devices**: Do not map NVMe devices using this method. Netdata uses a [dedicated collector](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nvme#readme) to monitor NVMe devices.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/smartctl.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/smartctl.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | interval for updating Netdata charts, measured in seconds. Collector might use cached data if less than **Devices poll interval**. | 10 | no |\n| timeout | smartctl binary execution timeout. | 5 | no |\n| scan_every | interval for discovering new devices using `smartctl --scan`, measured in seconds. Set to 0 to scan devices only once on startup. | 900 | no |\n| poll_devices_every | interval for gathering data for every device, measured in seconds. Data is cached for this interval. | 300 | no |\n| device_selector | Specifies a pattern to match the 'info name' of devices as reported by `smartctl --scan --json`. | * | no |\n| extra_devices | Allows manual specification of devices not automatically detected by `smartctl --scan`. Each device entry must include both a name and a type. See \"Configuration Examples\" for details. | [] | no |\n| no_check_power_mode | Skip data collection when the device is in a low-power mode. Prevents unnecessary disk spin-up. | standby | no |\n\n##### no_check_power_mode\n\nThe valid arguments to this option are:\n\n| Mode | Description |\n|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| never | Check the device always. |\n| sleep | Check the device unless it is in SLEEP mode. |\n| standby | Check the device unless it is in SLEEP or STANDBY mode. In these modes most disks are not spinning, so if you want to prevent a disk from spinning up, this is probably what you want. |\n| idle | Check the device unless it is in SLEEP, STANDBY or IDLE mode. In the IDLE state, most disks are still spinning, so this is probably not what you want. |\n\n\n#### Examples\n\n##### Custom devices poll interval\n\nAllows you to override the default devices poll interval (data collection).\n\n```yaml\njobs:\n - name: smartctl\n devices_poll_interval: 60 # Collect S.M.A.R.T statistics every 60 seconds\n\n```\n##### Extra devices\n\nThis example demonstrates using `extra_devices` to manually add a storage device (`/dev/sdc`) not automatically detected by `smartctl --scan`.\n\n\n```yaml\njobs:\n - name: smartctl\n extra_devices:\n - name: /dev/sdc\n type: jmb39x-q,3\n\n```\n", + "overview": "# S.M.A.R.T.\n\nPlugin: go.d.plugin\nModule: smartctl\n\n## Overview\n\nThis collector monitors the health status of storage devices by analyzing S.M.A.R.T. (Self-Monitoring, Analysis, and Reporting Technology) counters.\nIt relies on the [`smartctl`](https://linux.die.net/man/8/smartctl) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `smartctl --json --scan`\n- `smartctl --json --all {deviceName} --device {deviceType} --nocheck {powerMode}`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install smartmontools (v7.0+)\n\nInstall `smartmontools` version 7.0 or later using your distribution's package manager. Version 7.0 introduced the `--json` output mode, which is required for this collector to function properly.\n\n\n#### For Netdata running in a Docker container\n\n1. **Install smartmontools**.\n\n Ensure `smartctl` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=smartmontools` when starting the container.\n\n2. **Provide access to storage devices**.\n\n Netdata requires the `SYS_RAWIO` capability and access to the storage devices to run the `smartctl` collector inside a Docker container. Here's how you can achieve this:\n\n - `docker run`\n\n ```bash\n docker run --cap-add SYS_RAWIO --device /dev/sda:/dev/sda ...\n ```\n\n - `docker-compose.yml`\n\n ```yaml\n services:\n netdata:\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n - SYS_RAWIO # smartctl\n devices:\n - \"/dev/sda:/dev/sda\"\n ```\n\n > **Multiple Devices**: These examples only show mapping of one device (/dev/sda). You'll need to add additional `--device` options (in docker run) or entries in the `devices` list (in docker-compose.yml) for each storage device you want Netdata's smartctl collector to monitor.\n\n > **NVMe Devices**: Do not map NVMe devices using this method. Netdata uses a [dedicated collector](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nvme#readme) to monitor NVMe devices.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/smartctl.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/smartctl.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | interval for updating Netdata charts, measured in seconds. Collector might use cached data if less than **Devices poll interval**. | 10 | no |\n| timeout | smartctl binary execution timeout. | 5 | no |\n| scan_every | interval for discovering new devices using `smartctl --scan`, measured in seconds. Set to 0 to scan devices only once on startup. | 900 | no |\n| poll_devices_every | interval for gathering data for every device, measured in seconds. Data is cached for this interval. | 300 | no |\n| device_selector | Specifies a pattern to match the 'info name' of devices as reported by `smartctl --scan --json`. | * | no |\n| extra_devices | Allows manual specification of devices not automatically detected by `smartctl --scan`. Each device entry must include both a name and a type. See \"Configuration Examples\" for details. | [] | no |\n| no_check_power_mode | Skip data collection when the device is in a low-power mode. Prevents unnecessary disk spin-up. | standby | no |\n\n##### no_check_power_mode\n\nThe valid arguments to this option are:\n\n| Mode | Description |\n|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| never | Check the device always. |\n| sleep | Check the device unless it is in SLEEP mode. |\n| standby | Check the device unless it is in SLEEP or STANDBY mode. In these modes most disks are not spinning, so if you want to prevent a disk from spinning up, this is probably what you want. |\n| idle | Check the device unless it is in SLEEP, STANDBY or IDLE mode. In the IDLE state, most disks are still spinning, so this is probably not what you want. |\n\n\n#### Examples\n\n##### Custom devices poll interval\n\nAllows you to override the default devices poll interval (data collection).\n\n```yaml\njobs:\n - name: smartctl\n devices_poll_interval: 60 # Collect S.M.A.R.T statistics every 60 seconds\n\n```\n##### Extra devices\n\nThis example demonstrates using `extra_devices` to manually add a storage device (`/dev/sdc`) not automatically detected by `smartctl --scan`.\n\n\n```yaml\njobs:\n - name: smartctl\n extra_devices:\n - name: /dev/sdc\n type: jmb39x-q,3\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `smartctl` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m smartctl\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `smartctl` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep smartctl\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep smartctl /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep smartctl\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Storage Device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device_name | Device name |\n| device_type | Device type |\n| model_name | Model name |\n| serial_number | Serial number |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| smartctl.device_smart_status | passed, failed | status |\n| smartctl.device_ata_smart_error_log_count | error_log | logs |\n| smartctl.device_power_on_time | power_on_time | seconds |\n| smartctl.device_temperature | temperature | Celsius |\n| smartctl.device_power_cycles_count | power | cycles |\n| smartctl.device_read_errors_rate | corrected, uncorrected | errors/s |\n| smartctl.device_write_errors_rate | corrected, uncorrected | errors/s |\n| smartctl.device_verify_errors_rate | corrected, uncorrected | errors/s |\n| smartctl.device_smart_attr_{attribute_name} | {attribute_name} | {attribute_unit} |\n| smartctl.device_smart_attr_{attribute_name}_normalized | {attribute_name} | value |\n\n", @@ -16516,7 +16744,7 @@ "most_popular": true }, "overview": "# SNMP devices\n\nPlugin: go.d.plugin\nModule: snmp\n\n## Overview\n\nThis SNMP collector discovers and gathers statistics for network interfaces on SNMP-enabled devices:\n\n- Traffic\n- Packets (unicast, multicast, broadcast)\n- Errors\n- Discards\n- Administrative and operational status\n\nAdditionally, it collects overall device uptime.\n\nIt is compatible with all SNMP versions (v1, v2c, and v3) and uses the [gosnmp](https://github.com/gosnmp/gosnmp) package.\n\n**For advanced users**:\n\n- You can manually specify custom OIDs (Object Identifiers) to retrieve specific data points beyond the default metrics.\n- However, defining custom charts with dimensions for these OIDs requires manual configuration.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\n**Device limitations**: Many SNMP switches and routers have limited processing power. They might not be able to report data as frequently as desired. You can monitor response times using go.d.plugin in debug mode to identify potential bottlenecks.\n\n**Concurrent access**: If multiple collectors or tools access the same SNMP device simultaneously, data points might be skipped. This is a limitation of the device itself, not this collector. To mitigate this, consider increasing the collection interval (update_every) to reduce the frequency of requests.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/snmp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/snmp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hostname | Target ipv4 address. | | yes |\n| community | SNMPv1/2 community string. | public | no |\n| options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no |\n| options.port | Target port. | 161 | no |\n| options.retries | Retries to attempt. | 1 | no |\n| options.timeout | SNMP request/response timeout. | 5 | no |\n| options.max_repetitions | Controls how many SNMP variables to retrieve in a single GETBULK request. | 25 | no |\n| options.max_request_size | Maximum number of OIDs allowed in a single GET request. | 60 | no |\n| network_interface_filter.by_name | Filter interfaces by their names using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| network_interface_filter.by_type | Filter interfaces by their types using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| user.name | SNMPv3 user name. | | no |\n| user.name | Security level of SNMPv3 messages. | | no |\n| user.auth_proto | Security level of SNMPv3 messages. | | no |\n| user.name | Authentication protocol for SNMPv3 messages. | | no |\n| user.auth_key | Authentication protocol pass phrase. | | no |\n| user.priv_proto | Privacy protocol for SNMPv3 messages. | | no |\n| user.priv_key | Privacy protocol pass phrase. | | no |\n| charts | List of charts. | [] | yes |\n| charts.id | Chart ID. Used to uniquely identify the chart. | | yes |\n| charts.title | Chart title. | Untitled chart | no |\n| charts.units | Chart units. | num | no |\n| charts.family | Chart family. | charts.id | no |\n| charts.type | Chart type (line, area, stacked). | line | no |\n| charts.priority | Chart priority. | 70000 | no |\n| charts.multiply_range | Used when you need to define many charts using incremental OIDs. | [] | no |\n| charts.dimensions | List of chart dimensions. | [] | yes |\n| charts.dimensions.oid | Collected metric OID. | | yes |\n| charts.dimensions.name | Dimension name. | | yes |\n| charts.dimensions.algorithm | Dimension algorithm (absolute, incremental). | absolute | no |\n| charts.dimensions.multiplier | Collected value multiplier, applied to convert it properly to units. | 1 | no |\n| charts.dimensions.divisor | Collected value divisor, applied to convert it properly to units. | 1 | no |\n\n##### user.auth_proto\n\nThe security of an SNMPv3 message as per RFC 3414 (`user.level`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|------------------------------------------|\n| none | 1 | no message authentication or encryption |\n| authNoPriv | 2 | message authentication and no encryption |\n| authPriv | 3 | message authentication and encryption |\n\n\n##### user.name\n\nThe digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------|\n| none | 1 | no message authentication |\n| md5 | 2 | MD5 message authentication (HMAC-MD5-96) |\n| sha | 3 | SHA message authentication (HMAC-SHA-96) |\n| sha224 | 4 | SHA message authentication (HMAC-SHA-224) |\n| sha256 | 5 | SHA message authentication (HMAC-SHA-256) |\n| sha384 | 6 | SHA message authentication (HMAC-SHA-384) |\n| sha512 | 7 | SHA message authentication (HMAC-SHA-512) |\n\n\n##### user.priv_proto\n\nThe encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------------------------------------|\n| none | 1 | no message encryption |\n| des | 2 | ES encryption (CBC-DES) |\n| aes | 3 | 128-bit AES encryption (CFB-AES-128) |\n| aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with \"Blumenthal\" key localization |\n| aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with \"Blumenthal\" key localization |\n| aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with \"Reeder\" key localization |\n| aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with \"Reeder\" key localization |\n\n\n#### Examples\n\n##### SNMPv1/2\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n\n```\n##### SNMPv3\n\nTo use SNMPv3:\n\n- use `user` instead of `community`.\n- set `options.version` to 3.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n options:\n version: 3\n user:\n name: username\n level: authPriv\n auth_proto: sha256\n auth_key: auth_protocol_passphrase\n priv_proto: aes256\n priv_key: priv_protocol_passphrase\n\n```\n##### Custom OIDs\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - id: \"bandwidth_port2\"\n title: \"Switch Bandwidth for port 2\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.2\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.2\"\n multiplier: -8\n divisor: 1000\n\n```\n##### Custom OIDs with multiply range\n\nIf you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option.\n\nThis is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`.\n\nEach of the 24 new charts will have its id (1-24) appended at:\n\n- its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`.\n- its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`.\n- its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`.\n- its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port\"\n title: \"Switch Bandwidth for port\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n multiply_range: [1, 24]\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16\"\n multiplier: -8\n divisor: 1000\n\n```\n##### Multiple devices with a common configuration\n\nYAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases). \nThe `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices.\n\nThe following example:\n\n- adds an `anchor` to the first job.\n- injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters.\n- injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters.\n\n\n```yaml\njobs:\n - &anchor\n name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - <<: *anchor\n name: switch2\n hostname: \"192.0.2.2\"\n - <<: *anchor\n name: switch3\n hostname: \"192.0.2.3\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/snmp.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/snmp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hostname | Target ipv4 address. | | yes |\n| create_vnode | If set, the collector will create a Netdata Virtual Node for this SNMP device, which will appear as a separate Node in Netdata. | false | no |\n| vnode.guid | A unique identifier for the Virtual Node. If not set, a GUID will be automatically generated from the device's IP address. | | no |\n| vnode.hostname | The hostname that will be used for the Virtual Node. If not set, the device's hostname will be used. | | no |\n| vnode.labels | Additional key-value pairs to associate with the Virtual Node. | | no |\n| community | SNMPv1/2 community string. | public | no |\n| options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no |\n| options.port | Target port. | 161 | no |\n| options.retries | Retries to attempt. | 1 | no |\n| options.timeout | SNMP request/response timeout. | 5 | no |\n| options.max_repetitions | Controls how many SNMP variables to retrieve in a single GETBULK request. | 25 | no |\n| options.max_request_size | Maximum number of OIDs allowed in a single GET request. | 60 | no |\n| network_interface_filter.by_name | Filter interfaces by their names using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| network_interface_filter.by_type | Filter interfaces by their types using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| user.name | SNMPv3 user name. | | no |\n| user.name | Security level of SNMPv3 messages. | | no |\n| user.auth_proto | Security level of SNMPv3 messages. | | no |\n| user.name | Authentication protocol for SNMPv3 messages. | | no |\n| user.auth_key | Authentication protocol pass phrase. | | no |\n| user.priv_proto | Privacy protocol for SNMPv3 messages. | | no |\n| user.priv_key | Privacy protocol pass phrase. | | no |\n| charts | List of charts. | [] | yes |\n| charts.id | Chart ID. Used to uniquely identify the chart. | | yes |\n| charts.title | Chart title. | Untitled chart | no |\n| charts.units | Chart units. | num | no |\n| charts.family | Chart family. | charts.id | no |\n| charts.type | Chart type (line, area, stacked). | line | no |\n| charts.priority | Chart priority. | 70000 | no |\n| charts.multiply_range | Used when you need to define many charts using incremental OIDs. | [] | no |\n| charts.dimensions | List of chart dimensions. | [] | yes |\n| charts.dimensions.oid | Collected metric OID. | | yes |\n| charts.dimensions.name | Dimension name. | | yes |\n| charts.dimensions.algorithm | Dimension algorithm (absolute, incremental). | absolute | no |\n| charts.dimensions.multiplier | Collected value multiplier, applied to convert it properly to units. | 1 | no |\n| charts.dimensions.divisor | Collected value divisor, applied to convert it properly to units. | 1 | no |\n\n##### user.auth_proto\n\nThe security of an SNMPv3 message as per RFC 3414 (`user.level`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|------------------------------------------|\n| none | 1 | no message authentication or encryption |\n| authNoPriv | 2 | message authentication and no encryption |\n| authPriv | 3 | message authentication and encryption |\n\n\n##### user.name\n\nThe digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------|\n| none | 1 | no message authentication |\n| md5 | 2 | MD5 message authentication (HMAC-MD5-96) |\n| sha | 3 | SHA message authentication (HMAC-SHA-96) |\n| sha224 | 4 | SHA message authentication (HMAC-SHA-224) |\n| sha256 | 5 | SHA message authentication (HMAC-SHA-256) |\n| sha384 | 6 | SHA message authentication (HMAC-SHA-384) |\n| sha512 | 7 | SHA message authentication (HMAC-SHA-512) |\n\n\n##### user.priv_proto\n\nThe encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------------------------------------|\n| none | 1 | no message encryption |\n| des | 2 | ES encryption (CBC-DES) |\n| aes | 3 | 128-bit AES encryption (CFB-AES-128) |\n| aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with \"Blumenthal\" key localization |\n| aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with \"Blumenthal\" key localization |\n| aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with \"Reeder\" key localization |\n| aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with \"Reeder\" key localization |\n\n\n#### Examples\n\n##### SNMPv1/2\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n\n```\n##### SNMPv3\n\nTo use SNMPv3:\n\n- use `user` instead of `community`.\n- set `options.version` to 3.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n options:\n version: 3\n user:\n name: username\n level: authPriv\n auth_proto: sha256\n auth_key: auth_protocol_passphrase\n priv_proto: aes256\n priv_key: priv_protocol_passphrase\n\n```\n##### Custom OIDs\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - id: \"bandwidth_port2\"\n title: \"Switch Bandwidth for port 2\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.2\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.2\"\n multiplier: -8\n divisor: 1000\n\n```\n##### Custom OIDs with multiply range\n\nIf you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option.\n\nThis is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`.\n\nEach of the 24 new charts will have its id (1-24) appended at:\n\n- its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`.\n- its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`.\n- its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`.\n- its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port\"\n title: \"Switch Bandwidth for port\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n multiply_range: [1, 24]\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16\"\n multiplier: -8\n divisor: 1000\n\n```\n##### Multiple devices with a common configuration\n\nYAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases). \nThe `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices.\n\nThe following example:\n\n- adds an `anchor` to the first job.\n- injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters.\n- injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters.\n\n\n```yaml\njobs:\n - &anchor\n name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - <<: *anchor\n name: switch2\n hostname: \"192.0.2.2\"\n - <<: *anchor\n name: switch3\n hostname: \"192.0.2.3\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `snmp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m snmp\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `snmp` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep snmp\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep snmp /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep snmp\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe metrics that will be collected are defined in the configuration file.\n\n### Per snmp device\n\nThese metrics refer to the SNMP device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| sysName | SNMP device's system name (OID: [1.3.6.1.2.1.1.5](https://oidref.com/1.3.6.1.2.1.1.5)). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| snmp.device_uptime | uptime | seconds |\n\n### Per network interface\n\nNetwork interfaces of the SNMP device being monitored. These metrics refer to each interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| sysName | SNMP device's system name (OID: [1.3.6.1.2.1.1.5](https://oidref.com/1.3.6.1.2.1.1.5)). |\n| ifDescr | Network interface description (OID: [1.3.6.1.2.1.2.2.1.2](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.2.2.1.2)). |\n| ifName | Network interface name (OID: [1.3.6.1.2.1.2.2.1.2](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.31.1.1.1.1)). |\n| ifType | Network interface type (OID: [1.3.6.1.2.1.2.2.1.2](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.2.2.1.3)). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| snmp.device_net_interface_traffic | received, sent | kilobits/s |\n| snmp.device_net_interface_unicast | received, sent | packets/s |\n| snmp.device_net_interface_multicast | received, sent | packets/s |\n| snmp.device_net_interface_broadcast | received, sent | packets/s |\n| snmp.device_net_interface_errors | inbound, outbound | errors/s |\n| snmp.device_net_interface_discards | inbound, outbound | discards/s |\n| snmp.device_net_interface_admin_status | up, down, testing | status |\n| snmp.device_net_interface_oper_status | up, down, testing, unknown, dormant, not_present, lower_layer_down | status |\n\n", @@ -16525,6 +16753,44 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/snmp/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-spigotmc", + "plugin_name": "go.d.plugin", + "module_name": "spigotmc", + "monitored_instance": { + "name": "SpigotMC", + "link": "https://www.spigotmc.org/", + "categories": [ + "data-collection.gaming" + ], + "icon_filename": "spigot.jfif" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "minecraft", + "spigotmc", + "spigot" + ], + "most_popular": false + }, + "overview": "# SpigotMC\n\nPlugin: go.d.plugin\nModule: spigotmc\n\n## Overview\n\nThis collector monitors SpigotMC server server performance, in the form of ticks per second average, memory utilization, and active users.\n\n\nIt sends the `tps` and `list` commands to the Server, and gathers the metrics from the responses.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects SpigotMC instances running on localhost that are listening on port 25575.\n\n> **Note that the SpigotMC RCON API requires a password**. \n> While Netdata can automatically detect SpigotMC instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/spigotmc.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/spigotmc.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the SpigotMC server listens for RCON connections. | 127.0.0.1:25575 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:25575\n password: somePassword\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:25575\n password: somePassword\n\n - name: remote\n address: 203.0.113.0:25575\n password: somePassword\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `spigotmc` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m spigotmc\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `spigotmc` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep spigotmc\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep spigotmc /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep spigotmc\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SpigotMC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| spigotmc.players | players | players |\n| spigotmc.avg_tps | 1min, 5min, 15min | ticks |\n| spigotmc.memory | used, alloc | bytes |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-spigotmc-SpigotMC", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/spigotmc/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-squid", @@ -16554,7 +16820,7 @@ "most_popular": false }, "overview": "# Squid\n\nPlugin: go.d.plugin\nModule: squid\n\n## Overview\n\nThis collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.\n\n\nIt collects metrics from the `squid-internal-mgr/counters` endpoint.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Squid instances running on localhost that are listening on port 3128.\nOn startup, it tries to collect metrics from:\n\n- https://127.0.0.1:3128\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:3128 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:3128\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:3128\n\n - name: remote\n url: http://192.0.2.1:3128\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squid.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:3128 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:3128\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:3128\n\n - name: remote\n url: http://192.0.2.1:3128\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `squid` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m squid\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `squid` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep squid\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep squid /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep squid\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid instance\n\nThese metrics refer to each monitored Squid instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squid.clients_net | in, out, hits | kilobits/s |\n| squid.clients_requests | requests, hits, errors | requests/s |\n| squid.servers_net | in, out | kilobits/s |\n| squid.servers_requests | requests, errors | requests/s |\n\n", @@ -16591,7 +16857,7 @@ "most_popular": true }, "overview": "# Squid log files\n\nPlugin: go.d.plugin\nModule: squidlog\n\n## Overview\n\nhis collector monitors Squid servers by parsing their access log files.\n\n\nIt automatically detects log files of Squid severs running on localhost.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squidlog.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squidlog.conf\n```\n#### Options\n\nSquid [log format codes](http://www.squid-cache.org/Doc/config/logformat/).\n\nSquidlog is aware how to parse and interpret the following codes:\n\n| field | squid format code | description |\n|----------------|-------------------|---------------------------------------------------------------|\n| resp_time | %tr | Response time (milliseconds). |\n| client_address | %>a | Client source IP address. |\n| client_address | %>A | Client FQDN. |\n| cache_code | %Ss | Squid request status (TCP_MISS etc). |\n| http_code | %>Hs | The HTTP response status code from Content Gateway to client. |\n| resp_size | %Hs | Cache code and http code. |\n| hierarchy | %Sh/%
**Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squidlog.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squidlog.conf\n```\n#### Options\n\nSquid [log format codes](https://www.squid-cache.org/Doc/config/logformat/).\n\nSquidlog is aware how to parse and interpret the following codes:\n\n| field | squid format code | description |\n|----------------|-------------------|---------------------------------------------------------------|\n| resp_time | %tr | Response time (milliseconds). |\n| client_address | %>a | Client source IP address. |\n| client_address | %>A | Client FQDN. |\n| cache_code | %Ss | Squid request status (TCP_MISS etc). |\n| http_code | %>Hs | The HTTP response status code from Content Gateway to client. |\n| resp_size | %Hs | Cache code and http code. |\n| hierarchy | %Sh/% **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `squidlog` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m squidlog\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `squidlog` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep squidlog\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep squidlog /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep squidlog\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squidlog.requests | requests | requests/s |\n| squidlog.excluded_requests | unmatched | requests/s |\n| squidlog.type_requests | success, bad, redirect, error | requests/s |\n| squidlog.http_status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| squidlog.http_status_code_responses | a dimension per HTTP response code | responses/s |\n| squidlog.bandwidth | sent | kilobits/s |\n| squidlog.response_time | min, max, avg | milliseconds |\n| squidlog.uniq_clients | clients | clients |\n| squidlog.cache_result_code_requests | a dimension per cache result code | requests/s |\n| squidlog.cache_result_code_transport_tag_requests | a dimension per cache result delivery transport tag | requests/s |\n| squidlog.cache_result_code_handling_tag_requests | a dimension per cache result handling tag | requests/s |\n| squidlog.cache_code_object_tag_requests | a dimension per cache result produced object tag | requests/s |\n| squidlog.cache_code_load_source_tag_requests | a dimension per cache result load source tag | requests/s |\n| squidlog.cache_code_error_tag_requests | a dimension per cache result error tag | requests/s |\n| squidlog.http_method_requests | a dimension per HTTP method | requests/s |\n| squidlog.mime_type_requests | a dimension per MIME type | requests/s |\n| squidlog.hier_code_requests | a dimension per hierarchy code | requests/s |\n| squidlog.server_address_forwarded_requests | a dimension per server address | requests/s |\n\n", @@ -16628,11 +16894,11 @@ }, "most_popular": false }, - "overview": "# StoreCLI RAID\n\nPlugin: go.d.plugin\nModule: storcli\n\n## Overview\n\nMonitors the health of StoreCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.\nIt relies on the [`storcli`](https://docs.broadcom.com/doc/12352476) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `storcli /cALL show all J nolog`\n- `storcli /cALL/eALL/sALL show all J nolog`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/storcli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/storcli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | storcli binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: storcli\n update_every: 5 # Collect StorCLI RAID statistics every 5 seconds\n\n```\n", + "overview": "# StoreCLI RAID\n\nPlugin: go.d.plugin\nModule: storcli\n\n## Overview\n\nMonitors the health of StoreCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.\nIt relies on the [`storcli`](https://docs.broadcom.com/doc/12352476) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `storcli /cALL show all J nolog`\n- `storcli /cALL/eALL/sALL show all J nolog`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/storcli.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/storcli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | storcli binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: storcli\n update_every: 5 # Collect StorCLI RAID statistics every 5 seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `storcli` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m storcli\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `storcli` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep storcli\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep storcli /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep storcli\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ storcli_controller_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.controller_health_status | RAID controller ${label:controller_number} is unhealthy |\n| [ storcli_controller_bbu_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.controller_bbu_status | RAID controller ${label:controller_number} BBU is unhealthy |\n| [ storcli_phys_drive_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.phys_drive_errors | RAID physical drive c${label:controller_number}/e${label:enclosure_number}/s${label:slot_number} errors |\n| [ storcli_phys_drive_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.phys_drive_predictive_failures | RAID physical drive c${label:controller_number}/e${label:enclosure_number}/s${label:slot_number} predictive failures |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| model | Controller model |\n| driver_name | Controller driver (megaraid_sas or mpt3sas) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.controller_health_status | healthy, unhealthy | status |\n| storcli.controller_status | optimal, degraded, partially_degraded, failed | status |\n| storcli.controller_bbu_status | healthy, unhealthy, na | status |\n\n### Per physical drive\n\nThese metrics refer to the Physical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| enclosure_number | Enclosure number (index) |\n| slot_number | Slot number (index) |\n| media type | Media type (e.g. HDD) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.phys_drive_errors | media, other | errors/s |\n| storcli.phys_drive_predictive_failures | predictive_failures | failures/s |\n| storcli.phys_drive_smart_alert_status | active, inactive | status |\n| storcli.phys_drive_temperature | temperature | Celsius |\n\n### Per bbu\n\nThese metrics refer to the Backup Battery Unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| bbu_number | BBU number (index) |\n| model | BBU model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.bbu_temperature | temperature | Celsius |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| model | Controller model |\n| driver_name | Controller driver (megaraid_sas or mpt3sas) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.controller_health_status | healthy, unhealthy | status |\n| storcli.controller_status | optimal, degraded, partially_degraded, failed | status |\n| storcli.controller_bbu_status | healthy, unhealthy, na | status |\n| storcli.controller_roc_temperature | temperature | Celsius |\n\n### Per physical drive\n\nThese metrics refer to the Physical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| enclosure_number | Enclosure number (index) |\n| slot_number | Slot number (index) |\n| media type | Media type (e.g. HDD) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.phys_drive_errors | media, other | errors/s |\n| storcli.phys_drive_predictive_failures | predictive_failures | failures/s |\n| storcli.phys_drive_smart_alert_status | active, inactive | status |\n| storcli.phys_drive_temperature | temperature | Celsius |\n\n### Per bbu\n\nThese metrics refer to the Backup Battery Unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| bbu_number | BBU number (index) |\n| model | BBU model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.bbu_temperature | temperature | Celsius |\n\n", "integration_type": "collector", "id": "go.d.plugin-storcli-StoreCLI_RAID", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/storcli/metadata.yaml", @@ -16665,7 +16931,7 @@ "most_popular": false }, "overview": "# Supervisor\n\nPlugin: go.d.plugin\nModule: supervisord\n\n## Overview\n\nThis collector monitors Supervisor instances.\n\nIt can collect metrics from:\n\n- [unix socket](http://supervisord.org/configuration.html?highlight=unix_http_server#unix-http-server-section-values)\n- [internal http server](http://supervisord.org/configuration.html?highlight=unix_http_server#inet-http-server-section-settings)\n\nUsed methods:\n\n- [`supervisor.getAllProcessInfo`](http://supervisord.org/api.html#supervisor.rpcinterface.SupervisorNamespaceRPCInterface.getAllProcessInfo)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/supervisord.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/supervisord.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9001/RPC2 | yes |\n| timeout | System bus requests timeout. | 1 | no |\n\n#### Examples\n\n##### HTTP\n\nCollect metrics via HTTP.\n\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n```\n##### Socket\n\nCollect metrics via Unix socket.\n\n```yaml\n- name: local\n url: 'unix:///run/supervisor.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n - name: remote\n url: 'http://192.0.2.1:9001/RPC2'\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/supervisord.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/supervisord.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9001/RPC2 | yes |\n| timeout | System bus requests timeout. | 1 | no |\n\n#### Examples\n\n##### HTTP\n\nCollect metrics via HTTP.\n\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n```\n##### Socket\n\nCollect metrics via Unix socket.\n\n```yaml\n- name: local\n url: 'unix:///run/supervisor.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n - name: remote\n url: 'http://192.0.2.1:9001/RPC2'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `supervisord` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m supervisord\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `supervisord` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep supervisord\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep supervisord /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep supervisord\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Supervisor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.summary_processes | running, non-running | processes |\n\n### Per process group\n\nThese metrics refer to the process group.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.processes | running, non-running | processes |\n| supervisord.process_state_code | a dimension per process | code |\n| supervisord.process_exit_status | a dimension per process | exit status |\n| supervisord.process_uptime | a dimension per process | seconds |\n| supervisord.process_downtime | a dimension per process | seconds |\n\n", @@ -16701,7 +16967,7 @@ "most_popular": false }, "overview": "# Systemd Units\n\nPlugin: go.d.plugin\nModule: systemdunits\n\n## Overview\n\nThis collector monitors the state of Systemd units and unit files.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/systemdunits.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/systemdunits.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| timeout | System bus requests timeout. | 1 | no |\n| include | Systemd units selector. | *.service | no |\n| skip_transient | If set, skip data collection for systemd transient units. | false | no |\n| collect_unit_files | If set to true, collect the state of installed unit files. Enabling this may increase system overhead. | false | no |\n| collect_unit_files_every | Interval for querying systemd about unit files and their enablement state, measured in seconds. Data is cached for this interval to reduce system overhead. | 300 | no |\n| include_unit_files | Systemd unit files selector. | *.service | no |\n\n##### include\n\nSystemd units matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n##### include_unit_files\n\nSystemd unit files matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n#### Examples\n\n##### Service units\n\nCollect state of all service type units.\n\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n```\n##### One specific unit\n\nCollect state of one specific unit.\n\n```yaml\njobs:\n - name: my-specific-service\n include:\n - 'my-specific.service'\n\n```\n##### All unit types\n\nCollect state of all units.\n\n```yaml\njobs:\n - name: my-specific-service-unit\n include:\n - '*'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect state of all service and socket type units.\n\n\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n - name: socket\n include:\n - '*.socket'\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/systemdunits.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/systemdunits.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| timeout | System bus requests timeout. | 1 | no |\n| include | Systemd units selector. | *.service | no |\n| skip_transient | If set, skip data collection for systemd transient units. | false | no |\n| collect_unit_files | If set to true, collect the state of installed unit files. Enabling this may increase system overhead. | false | no |\n| collect_unit_files_every | Interval for querying systemd about unit files and their enablement state, measured in seconds. Data is cached for this interval to reduce system overhead. | 300 | no |\n| include_unit_files | Systemd unit files selector. | *.service | no |\n\n##### include\n\nSystemd units matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n##### include_unit_files\n\nSystemd unit files matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n#### Examples\n\n##### Service units\n\nCollect state of all service type units.\n\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n```\n##### One specific unit\n\nCollect state of one specific unit.\n\n```yaml\njobs:\n - name: my-specific-service\n include:\n - 'my-specific.service'\n\n```\n##### All unit types\n\nCollect state of all units.\n\n```yaml\njobs:\n - name: my-specific-service-unit\n include:\n - '*'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect state of all service and socket type units.\n\n\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n - name: socket\n include:\n - '*.socket'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `systemdunits` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m systemdunits\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `systemdunits` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep systemdunits\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep systemdunits /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep systemdunits\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ systemd_service_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.service_unit_state | systemd service unit in the failed state |\n| [ systemd_socket_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.socket_unit_state | systemd socket unit in the failed state |\n| [ systemd_target_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.target_unit_state | systemd target unit in the failed state |\n| [ systemd_path_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.path_unit_state | systemd path unit in the failed state |\n| [ systemd_device_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.device_unit_state | systemd device unit in the failed state |\n| [ systemd_mount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.mount_unit_state | systemd mount unit in the failed state |\n| [ systemd_automount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.automount_unit_state | systemd automount unit in the failed state |\n| [ systemd_swap_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.swap_unit_state | systemd swap unit in the failed state |\n| [ systemd_scope_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.scope_unit_state | systemd scope unit in the failed state |\n| [ systemd_slice_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.slice_unit_state | systemd slice unit in the failed state |\n| [ systemd_timer_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.timer_unit_state | systemd timer unit in the failed state |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per unit\n\nThese metrics refer to the systemd unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| unit_name | systemd unit name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.socket_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.target_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.path_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.device_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.mount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.automount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.swap_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.timer_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.scope_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.slice_unit_state | active, inactive, activating, deactivating, failed | state |\n\n### Per unit file\n\nThese metrics refer to the systemd unit file.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| unit_file_name | systemd unit file name |\n| unit_file_type | systemd unit file type |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.unit_file_state | enabled, enabled-runtime, linked, linked-runtime, alias, masked, masked-runtime, static, disabled, indirect, generated, transient, bad | state |\n\n", @@ -16739,7 +17005,7 @@ "most_popular": false }, "overview": "# Tengine\n\nPlugin: go.d.plugin\nModule: tengine\n\n## Overview\n\nThis collector monitors Tengine servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable ngx_http_reqstat_module module.\n\nTo enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html).\nThe default line format is the only supported format.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tengine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tengine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/us | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n username: foo\n password: bar\n\n```\n##### HTTPS with self-signed certificate\n\nTengine with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/us\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n - name: remote\n url: http://203.0.113.10/us\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable ngx_http_reqstat_module module.\n\nTo enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html).\nThe default line format is the only supported format.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tengine.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tengine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/us | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n username: foo\n password: bar\n\n```\n##### HTTPS with self-signed certificate\n\nTengine with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/us\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n - name: remote\n url: http://203.0.113.10/us\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `tengine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m tengine\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `tengine` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep tengine\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep tengine /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep tengine\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tengine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tengine.bandwidth_total | in, out | B/s |\n| tengine.connections_total | accepted | connections/s |\n| tengine.requests_total | processed | requests/s |\n| tengine.requests_per_response_code_family_total | 2xx, 3xx, 4xx, 5xx, other | requests/s |\n| tengine.requests_per_response_code_detailed_total | 200, 206, 302, 304, 403, 404, 419, 499, 500, 502, 503, 504, 508, other | requests/s |\n| tengine.requests_upstream_total | requests | requests/s |\n| tengine.tries_upstream_total | calls | calls/s |\n| tengine.requests_upstream_per_response_code_family_total | 4xx, 5xx | requests/s |\n\n", @@ -16779,7 +17045,7 @@ "most_popular": false }, "overview": "# Tomcat\n\nPlugin: go.d.plugin\nModule: tomcat\n\n## Overview\n\nThis collector monitors Tomcat metrics about bandwidth, processing time, threads and more.\n\n\nIt parses the information provided by the [Server Status](https://tomcat.apache.org/tomcat-10.0-doc/manager-howto.html#Server_Status) HTTP endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nBy default, this Tomcat collector cannot access the server's status page. To enable data collection, you will need to configure access credentials with appropriate permissions.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the Netdata agent and Tomcat are on the same host, the collector will attempt to connect to the Tomcat server's status page at `http://localhost:8080/manager/status?XML=true`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Access to Tomcat Status Endpoint\n\nThe Netdata agent needs read-only access to its status endpoint to collect data from the Tomcat server.\n\nYou can achieve this by creating a dedicated user named `netdata` with read-only permissions specifically for accessing the [Server Status](https://tomcat.apache.org/tomcat-10.0-doc/manager-howto.html#Server_Status) endpoint.\n\nOnce you've created the `netdata` user, you'll need to configure the username and password in the collector configuration file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tomcat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tomcat.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: John\n password: Doe\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: admin1\n password: hackme1\n\n - name: remote\n url: http://192.0.2.1:8080\n username: admin2\n password: hackme2\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Access to Tomcat Status Endpoint\n\nThe Netdata agent needs read-only access to its status endpoint to collect data from the Tomcat server.\n\nYou can achieve this by creating a dedicated user named `netdata` with read-only permissions specifically for accessing the [Server Status](https://tomcat.apache.org/tomcat-10.0-doc/manager-howto.html#Server_Status) endpoint.\n\nOnce you've created the `netdata` user, you'll need to configure the username and password in the collector configuration file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tomcat.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tomcat.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | POST | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: John\n password: Doe\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: admin1\n password: hackme1\n\n - name: remote\n url: http://192.0.2.1:8080\n username: admin2\n password: hackme2\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `tomcat` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m tomcat\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `tomcat` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep tomcat\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep tomcat /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep tomcat\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tomcat instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.jvm_memory_usage | free, used | bytes |\n\n### Per jvm memory pool\n\nThese metrics refer to the JVM memory pool.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mempool_name | Memory Pool name. |\n| mempool_type | Memory Pool type. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.jvm_mem_pool_memory_usage | commited, used, max | bytes |\n\n### Per connector\n\nThese metrics refer to the connector.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| connector_name | Connector name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.connector_requests | requests | requests/s |\n| tomcat.connector_bandwidth | received, sent | bytes/s |\n| tomcat.connector_requests_processing_time | processing_time | milliseconds |\n| tomcat.connector_errors | errors | errors/s |\n| tomcat.connector_request_threads | idle, busy | threads |\n\n", @@ -16817,7 +17083,7 @@ "most_popular": false }, "overview": "# Tor\n\nPlugin: go.d.plugin\nModule: tor\n\n## Overview\n\nTracks Tor's download and upload traffic, as well as its uptime.\n\n\nIt reads the server's response to the [GETINFO](https://spec.torproject.org/control-spec/commands.html#getinfo) command.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Tor instances running on localhost that are listening on port 9051.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:9051\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Control Port\n\nEnable `ControlPort` in `/etc/tor/torrc`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the Tor's Control Port listens for connections. | 127.0.0.1:9051 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| password | Password for authentication. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9051\n password: somePassword\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9051\n password: somePassword\n\n - name: remote\n address: 203.0.113.0:9051\n password: somePassword\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Control Port\n\nEnable `ControlPort` in `/etc/tor/torrc`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tor.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the Tor's Control Port listens for connections. | 127.0.0.1:9051 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| password | Password for authentication. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9051\n password: somePassword\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9051\n password: somePassword\n\n - name: remote\n address: 203.0.113.0:9051\n password: somePassword\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `tor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m tor\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `tor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep tor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep tor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep tor\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tor.traffic | read, write | KiB/s |\n| tor.uptime | uptime | seconds |\n\n", @@ -16855,7 +17121,7 @@ "most_popular": false }, "overview": "# Traefik\n\nPlugin: go.d.plugin\nModule: traefik\n\n## Overview\n\nThis collector monitors Traefik servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/traefik.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/traefik.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8082/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n http://127.0.0.1:8082/metrics\n\n - name: remote\n http://192.0.2.0:8082/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/traefik.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/traefik.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8082/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n http://127.0.0.1:8082/metrics\n\n - name: remote\n http://192.0.2.0:8082/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `traefik` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m traefik\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `traefik` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep traefik\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep traefik /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep traefik\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per entrypoint, protocol\n\nThese metrics refer to the endpoint.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| traefik.entrypoint_requests | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |\n| traefik.entrypoint_request_duration_average | 1xx, 2xx, 3xx, 4xx, 5xx | milliseconds |\n| traefik.entrypoint_open_connections | a dimension per HTTP method | connections |\n\n", @@ -16864,6 +17130,44 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/traefik/metadata.yaml", "related_resources": "" }, + { + "meta": { + "id": "collector-go.d.plugin-typesense", + "plugin_name": "go.d.plugin", + "module_name": "typesense", + "monitored_instance": { + "name": "Typesense", + "link": "https://typesense.org/", + "categories": [ + "data-collection.search-engines" + ], + "icon_filename": "typesense.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "alternative_monitored_instances": [], + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "typesense", + "search engine" + ], + "most_popular": false + }, + "overview": "# Typesense\n\nPlugin: go.d.plugin\nModule: typesense\n\n## Overview\n\nThis collector monitors the overall health status and performance of your Typesense servers.\nIt gathers detailed metrics, including the total number of requests processed, the breakdown of different request types, and the average latency experienced by each request.\n\n\nIt gathers metrics by periodically issuing HTTP GET requests to the Typesense server:\n\n- [/health](https://typesense.org/docs/27.0/api/cluster-operations.html#health) endpoint to check server health.\n- [/stats.json](https://typesense.org/docs/27.0/api/cluster-operations.html#api-stats) endpoint to collect data on requests and latency.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect Typesense instances running on:\n\n- localhost that are listening on port 8108\n- within Docker containers\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### API Key Configuration\n\nWhile optional, configuring an [API key](https://typesense.org/docs/0.20.0/api/api-keys.html#api-keys) is highly recommended to enable the collector to gather [stats metrics](https://typesense.org/docs/27.0/api/cluster-operations.html#api-stats), including request counts and latency.\nWithout an API key, the collector will only collect health status information.\n\n> If you're running Typesense with the API key provided as a command-line parameter (e.g., `--api-key=XYZ`), Netdata can automatically detect and use this key for queries.\n> In this case, no additional configuration is required.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/typesense.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/typesense.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8108 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| api_key | The Typesense [API Key](https://typesense.org/docs/0.20.0/api/api-keys.html#api-keys) (`X-TYPESENSE-API-KEY`). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8108\n api_key: XYZ\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8108\n api_key: XYZ\n\n - name: remote\n url: http://192.0.2.1:8108\n api_key: XYZ\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `typesense` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m typesense\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `typesense` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep typesense\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep typesense /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep typesense\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Typesense instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| typesense.health_status | ok, out_of_disk, out_of_memory | status |\n| typesense.total_requests | requests | requests/s |\n| typesense.requests_by_operation | search, write, import, delete | requests/s |\n| typesense.latency_by_operation | search, write, import, delete | milliseconds |\n| typesense.overloaded_requests | overloaded | requests/s |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-typesense-Typesense", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/typesense/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-unbound", @@ -16892,7 +17196,7 @@ "most_popular": false }, "overview": "# Unbound\n\nPlugin: go.d.plugin\nModule: unbound\n\n## Overview\n\nThis collector monitors Unbound servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable remote control interface\n\nSet `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf).\n\n\n#### Check permissions and adjust if necessary\n\nIf using unix socket:\n\n- socket should be readable and writeable by `netdata` user\n\nIf using ip socket and TLS is disabled:\n\n- socket should be accessible via network\n\nIf TLS is enabled, in addition:\n\n- `control-key-file` should be readable by `netdata` user\n- `control-cert-file` should be readable by `netdata` user\n\nFor auto-detection parameters from `unbound.conf`:\n\n- `unbound.conf` should be readable by `netdata` user\n- if you have several configuration files (include feature) all of them should be readable by `netdata` user\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/unbound.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/unbound.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:8953 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| conf_path | Absolute path to the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| cumulative_stats | Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file. | no | no |\n| use_tls | Whether to use TLS or not. | yes | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | /etc/unbound/unbound_control.pem | no |\n| tls_key | Client tls key. | /etc/unbound/unbound_control.key | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n```\n##### Unix socket\n\nConnecting through Unix socket.\n\n```yaml\njobs:\n - name: socket\n address: /var/run/unbound.sock\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n - name: remote\n address: 203.0.113.11:8953\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable remote control interface\n\nSet `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf).\n\n\n#### Check permissions and adjust if necessary\n\nIf using unix socket:\n\n- socket should be readable and writeable by `netdata` user\n\nIf using ip socket and TLS is disabled:\n\n- socket should be accessible via network\n\nIf TLS is enabled, in addition:\n\n- `control-key-file` should be readable by `netdata` user\n- `control-cert-file` should be readable by `netdata` user\n\nFor auto-detection parameters from `unbound.conf`:\n\n- `unbound.conf` should be readable by `netdata` user\n- if you have several configuration files (include feature) all of them should be readable by `netdata` user\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/unbound.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/unbound.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:8953 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| conf_path | Absolute path to the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| cumulative_stats | Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file. | no | no |\n| use_tls | Whether to use TLS or not. | yes | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | /etc/unbound/unbound_control.pem | no |\n| tls_key | Client tls key. | /etc/unbound/unbound_control.key | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n```\n##### Unix socket\n\nConnecting through Unix socket.\n\n```yaml\njobs:\n - name: socket\n address: /var/run/unbound.sock\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n - name: remote\n address: 203.0.113.11:8953\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `unbound` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m unbound\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `unbound` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep unbound\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep unbound /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep unbound\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Unbound instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.queries | queries | queries |\n| unbound.queries_ip_ratelimited | ratelimited | queries |\n| unbound.dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.cache | hits, miss | events |\n| unbound.cache_percentage | hits, miss | percentage |\n| unbound.prefetch | prefetches | prefetches |\n| unbound.expired | expired | replies |\n| unbound.zero_ttl_replies | zero_ttl | replies |\n| unbound.recursive_replies | recursive | replies |\n| unbound.recursion_time | avg, median | milliseconds |\n| unbound.request_list_usage | avg, max | queries |\n| unbound.current_request_list_usage | all, users | queries |\n| unbound.request_list_jostle_list | overwritten, dropped | queries |\n| unbound.tcpusage | usage | buffers |\n| unbound.uptime | time | seconds |\n| unbound.cache_memory | message, rrset, dnscrypt_nonce, dnscrypt_shared_secret | KB |\n| unbound.mod_memory | iterator, respip, validator, subnet, ipsec | KB |\n| unbound.mem_streamwait | streamwait | KB |\n| unbound.cache_count | infra, key, msg, rrset, dnscrypt_nonce, shared_secret | items |\n| unbound.type_queries | a dimension per query type | queries |\n| unbound.class_queries | a dimension per query class | queries |\n| unbound.opcode_queries | a dimension per query opcode | queries |\n| unbound.flag_queries | qr, aa, tc, rd, ra, z, ad, cd | queries |\n| unbound.rcode_answers | a dimension per reply rcode | replies |\n\n### Per thread\n\nThese metrics refer to threads.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.thread_queries | queries | queries |\n| unbound.thread_queries_ip_ratelimited | ratelimited | queries |\n| unbound.thread_dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.thread_cache | hits, miss | events |\n| unbound.thread_cache_percentage | hits, miss | percentage |\n| unbound.thread_prefetch | prefetches | prefetches |\n| unbound.thread_expired | expired | replies |\n| unbound.thread_zero_ttl_replies | zero_ttl | replies |\n| unbound.thread_recursive_replies | recursive | replies |\n| unbound.thread_recursion_time | avg, median | milliseconds |\n| unbound.thread_request_list_usage | avg, max | queries |\n| unbound.thread_current_request_list_usage | all, users | queries |\n| unbound.thread_request_list_jostle_list | overwritten, dropped | queries |\n| unbound.thread_tcpusage | usage | buffers |\n\n", @@ -16929,7 +17233,7 @@ "most_popular": false }, "overview": "# UPS (NUT)\n\nPlugin: go.d.plugin\nModule: upsd\n\n## Overview\n\nThis collector monitors Uninterruptible Power Supplies by polling the UPS daemon using the NUT network protocol.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/upsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/upsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | UPS daemon address in IP:PORT format. | 127.0.0.1:3493 | yes |\n| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n - name: remote\n address: 203.0.113.0:3493\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/upsd.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/upsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | UPS daemon address in IP:PORT format. | 127.0.0.1:3493 | yes |\n| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n - name: remote\n address: 203.0.113.0:3493\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `upsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m upsd\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `upsd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep upsd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep upsd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep upsd\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ upsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} average load over the last 10 minutes |\n| [ upsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_battery_charge | UPS ${label:ups_name} average battery charge over the last minute |\n| [ upsd_ups_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} number of seconds since the last successful data collection |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nThese metrics refer to the UPS unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| ups_name | UPS name. |\n| battery_type | Battery type (chemistry). \"battery.type\" variable value. |\n| device_model | Device model. \"device.mode\" variable value. |\n| device_serial | Device serial number. \"device.serial\" variable value. |\n| device_manufacturer | Device manufacturer. \"device.mfr\" variable value. |\n| device_type | Device type (ups, pdu, scd, psu, ats). \"device.type\" variable value. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| upsd.ups_load | load | percentage |\n| upsd.ups_load_usage | load_usage | Watts |\n| upsd.ups_status | on_line, on_battery, low_battery, high_battery, replace_battery, charging, discharging, bypass, calibration, offline, overloaded, trim_input_voltage, boost_input_voltage, forced_shutdown, other | status |\n| upsd.ups_temperature | temperature | Celsius |\n| upsd.ups_battery_charge | charge | percentage |\n| upsd.ups_battery_estimated_runtime | runtime | seconds |\n| upsd.ups_battery_voltage | voltage | Volts |\n| upsd.ups_battery_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_voltage | voltage | Volts |\n| upsd.ups_input_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_current | current | Ampere |\n| upsd.ups_input_current_nominal | nominal_current | Ampere |\n| upsd.ups_input_frequency | frequency | Hz |\n| upsd.ups_input_frequency_nominal | nominal_frequency | Hz |\n| upsd.ups_output_voltage | voltage | Volts |\n| upsd.ups_output_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_output_current | current | Ampere |\n| upsd.ups_output_current_nominal | nominal_current | Ampere |\n| upsd.ups_output_frequency | frequency | Hz |\n| upsd.ups_output_frequency_nominal | nominal_frequency | Hz |\n\n", @@ -16967,7 +17271,7 @@ "most_popular": false }, "overview": "# uWSGI\n\nPlugin: go.d.plugin\nModule: uwsgi\n\n## Overview\n\nMonitors UWSGI worker health and performance by collecting metrics like requests, transmitted data, exceptions, and harakiris.\n\n\nIt fetches [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) statistics over TCP.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically discovers and collects UWSGI statistics from the following default locations:\n\n- localhost:1717\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the uWSGI Stats Server\n\nSee [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) for details.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/uwsgi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/uwsgi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the UWSGI [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) listens for connections. | 127.0.0.1:1717 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:1717\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:1717\n\n - name: remote\n address: 203.0.113.0:1717\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the uWSGI Stats Server\n\nSee [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) for details.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/uwsgi.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/uwsgi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the UWSGI [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) listens for connections. | 127.0.0.1:1717 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:1717\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:1717\n\n - name: remote\n address: 203.0.113.0:1717\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `uwsgi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m uwsgi\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `uwsgi` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep uwsgi\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep uwsgi /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep uwsgi\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uWSGI instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| uwsgi.transmitted_data | tx | bytes/s |\n| uwsgi.requests | requests | requests/s |\n| uwsgi.harakiris | harakiris | harakiris/s |\n| uwsgi.respawns | respawns | respawns/s |\n\n### Per worker\n\nThese metrics refer to the Worker process.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| worker_id | Worker ID. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| uwsgi.worker_transmitted_data | tx | bytes/s |\n| uwsgi.worker_requests | requests | requests/s |\n| uwsgi.worker_delta_requests | delta_requests | requests/s |\n| uwsgi.worker_average_request_time | avg | milliseconds |\n| uwsgi.worker_harakiris | harakiris | harakiris/s |\n| uwsgi.worker_exceptions | exceptions | exceptions/s |\n| uwsgi.worker_status | idle, busy, cheap, pause, sig | status |\n| uwsgi.worker_request_handling_status | accepting, not_accepting | status |\n| uwsgi.worker_respawns | respawns | respawns/s |\n| uwsgi.worker_memory_rss | rss | bytes |\n| uwsgi.worker_memory_vsz | vsz | bytes |\n\n", @@ -16976,6 +17280,46 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/uwsgi/metadata.yaml", "related_resources": "" }, + { + "meta": { + "plugin_name": "go.d.plugin", + "module_name": "varnish", + "monitored_instance": { + "name": "Varnish", + "link": "https://varnish-cache.org/", + "categories": [ + "data-collection.web-servers-and-web-proxies" + ], + "icon_filename": "varnish.svg" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "varnish", + "varnishstat", + "varnishd", + "cache", + "web server", + "web cache" + ], + "most_popular": false + }, + "overview": "# Varnish\n\nPlugin: go.d.plugin\nModule: varnish\n\n## Overview\n\nThis collector monitors Varnish instances, supporting both the open-source Varnish-Cache and the commercial Varnish-Plus.\n\nIt tracks key performance metrics, along with detailed statistics for Backends (VBE) and Storages (SMF, SMA, MSE).\n\nIt relies on the [`varnishstat`](https://varnish-cache.org/docs/trunk/reference/varnishstat.html) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically detects and monitors Varnish instances running on the host or inside Docker containers.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/varnish.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/varnish.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n| instance_name | Specifies the name of the Varnish instance to collect metrics from. This corresponds to the `-n` argument used with the [varnishstat](https://varnish-cache.org/docs/trunk/reference/varnishstat.html) command. | | no |\n| docker_container | Specifies the name of the Docker container where the Varnish instance is running. If set, the `varnishstat` command will be executed within this container. | | no |\n\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n - name: varnish\n update_every: 5\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `varnish` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m varnish\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `varnish` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep varnish\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep varnish /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep varnish\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Varnish instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.client_session_connections | accepted, dropped | connections/s |\n| varnish.client_requests | received | requests/s |\n| varnish.cache_hit_ratio_total | hit, miss, hitpass, hitmiss | percent |\n| varnish.cache_hit_ratio_delta | hit, miss, hitpass, hitmiss | percent |\n| varnish.cache_expired_objects | expired | objects/s |\n| varnish.cache_lru_activity | nuked, moved | objects/s |\n| varnish.threads | threads | threads |\n| varnish.thread_management_activity | created, failed, destroyed, limited | threads/s |\n| varnish.thread_queue_len | queue_length | threads |\n| varnish.backends_requests | sent | requests/s |\n| varnish.esi_parsing_issues | errors, warnings | issues/s |\n| varnish.mgmt_process_uptime | uptime | seconds |\n| varnish.child_process_uptime | uptime | seconds |\n\n### Per Backend\n\nThese metrics refer to the Backend (VBE).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.backend_data_transfer | req_header, req_body, resp_header, resp_body | bytes/s |\n\n### Per Storage\n\nThese metrics refer to the Storage (SMA, SMF, MSE).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.storage_space_usage | free, used | bytes |\n| varnish.storage_allocated_objects | allocated | objects |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-varnish-Varnish", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/varnish/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-vcsa", @@ -17003,7 +17347,7 @@ "most_popular": false }, "overview": "# vCenter Server Appliance\n\nPlugin: go.d.plugin\nModule: vcsa\n\n## Overview\n\nThis collector monitors [health statistics](https://developer.vmware.com/apis/vsphere-automation/latest/appliance/health/) of vCenter Server Appliance servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vcsa.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vcsa.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | false | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | false | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nTwo instances.\n\n\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n - name: vcsa2\n url: https://203.0.113.10\n username: admin@vsphere.local\n password: password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vcsa.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vcsa.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | false | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | false | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nTwo instances.\n\n\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n - name: vcsa2\n url: https://203.0.113.10\n username: admin@vsphere.local\n password: password\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `vcsa` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vcsa\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `vcsa` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep vcsa\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep vcsa /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep vcsa\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vcsa_system_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is orange. One or more components are degraded. |\n| [ vcsa_system_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is red. One or more components are unavailable or will stop functioning soon. |\n| [ vcsa_applmgmt_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_applmgmt_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_load_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_load_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_mem_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_mem_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_swap_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_swap_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_database_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_database_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_software_packages_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.software_packages_health_status | VCSA software packages security updates are available. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vCenter Server Appliance instance\n\nThese metrics refer to the entire monitored application.\n
\nSee health statuses\nOverall System Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------------------------------------------------------------------|\n| green | All components in the appliance are healthy. |\n| yellow | One or more components in the appliance might become overloaded soon. |\n| orange | One or more components in the appliance might be degraded. |\n| red | One or more components in the appliance might be in an unusable status and the appliance might become unresponsive soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nComponents Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------|\n| green | The component is healthy. |\n| yellow | The component is healthy, but may have some problems. |\n| orange | The component is degraded, and may have serious problems. |\n| red | The component is unavailable, or will stop functioning soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nSoftware Updates Health:\n\n| Status | Description |\n|:-------:|:-----------------------------------------------------|\n| green | No updates available. |\n| orange | Non-security patches might be available. |\n| red | Security patches might be available. |\n| gray | An error retrieving information on software updates. |\n| unknown | Collector failed to decode status. |\n\n
\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vcsa.system_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.applmgmt_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.load_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.mem_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.swap_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.database_storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.software_packages_health_status | green, red, orange, gray, unknown | status |\n\n", @@ -17040,10 +17384,10 @@ "most_popular": false }, "overview": "# VerneMQ\n\nPlugin: go.d.plugin\nModule: vernemq\n\n## Overview\n\nThis collector monitors VerneMQ instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vernemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vernemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8888/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n```\n##### HTTP authentication\n\nLocal instance with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n username: username\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n - name: remote\n url: http://203.0.113.10:8888/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vernemq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vernemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8888/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n```\n##### HTTP authentication\n\nLocal instance with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n username: username\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n - name: remote\n url: http://203.0.113.10:8888/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `vernemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vernemq\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `vernemq` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep vernemq\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep vernemq /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep vernemq\n```\n\n", - "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.socket_errors | number of socket errors in the last minute |\n| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of dropped messaged due to full queues in the last minute |\n| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of messages which expired before delivery in the last minute |\n| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of unhandled messages (connections with clean session=true) in the last minute |\n| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.average_scheduler_utilization | average scheduler utilization over the last 10 minutes |\n| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.cluster_dropped | amount of traffic dropped during communication with the cluster nodes in the last minute |\n| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vvernemq.netsplits | number of detected netsplits (split brain situation) in the last minute |\n| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_connack_sent_reason | number of sent unsuccessful v3/v5 CONNACK packets in the last minute |\n| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_received_reason | number of received not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_sent_reason | number of sent not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_error | number of failed v3/v5 SUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_auth_error | number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute |\n| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_unsubscribe_error | number of failed v3/v5 UNSUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_errors | number of failed v3/v5 PUBLISH operations in the last minute |\n| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_auth_errors | number of unauthorized v3/v5 PUBLISH attempts in the last minute |\n| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_received_reason | number of received unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_sent_reason | number of sent unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_invalid_error | number of received unexpected v3/v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_received_reason | number of received unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_sent_reason | number of sent unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_invalid_error | number of received unexpected v3 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_received_reason | number of received unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_sent_reason | number of sent unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_received_reason | number of received unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_sent_reason | number of sent unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_invalid_error | number of received unexpected v3/v5 PUBCOMP packets in the last minute |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per VerneMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.sockets | open | sockets |\n| vernemq.socket_operations | open, close | sockets/s |\n| vernemq.client_keepalive_expired | closed | sockets/s |\n| vernemq.socket_close_timeout | closed | sockets/s |\n| vernemq.socket_errors | errors | errors/s |\n| vernemq.queue_processes | queue_processes | queue processes |\n| vernemq.queue_processes_operations | setup, teardown | events/s |\n| vernemq.queue_process_init_from_storage | queue_processes | queue processes/s |\n| vernemq.queue_messages | received, sent | messages/s |\n| vernemq.queue_undelivered_messages | dropped, expired, unhandled | messages/s |\n| vernemq.router_subscriptions | subscriptions | subscriptions |\n| vernemq.router_matched_subscriptions | local, remote | subscriptions/s |\n| vernemq.router_memory | used | KiB |\n| vernemq.average_scheduler_utilization | utilization | percentage |\n| vernemq.system_utilization_scheduler | a dimension per scheduler | percentage |\n| vernemq.system_processes | processes | processes |\n| vernemq.system_reductions | reductions | ops/s |\n| vernemq.system_context_switches | context_switches | ops/s |\n| vernemq.system_io | received, sent | kilobits/s |\n| vernemq.system_run_queue | ready | processes |\n| vernemq.system_gc_count | gc | ops/s |\n| vernemq.system_gc_words_reclaimed | words_reclaimed | ops/s |\n| vernemq.system_allocated_memory | processes, system | KiB |\n| vernemq.bandwidth | received, sent | kilobits/s |\n| vernemq.retain_messages | messages | messages |\n| vernemq.retain_memory | used | KiB |\n| vernemq.cluster_bandwidth | received, sent | kilobits/s |\n| vernemq.cluster_dropped | dropped | kilobits/s |\n| vernemq.netsplit_unresolved | unresolved | netsplits |\n| vernemq.netsplits | resolved, detected | netsplits/s |\n| vernemq.mqtt_auth | received, sent | packets/s |\n| vernemq.mqtt_auth_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_auth_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_connect | connect, connack | packets/s |\n| vernemq.mqtt_connack_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect | received, sent | packets/s |\n| vernemq.mqtt_disconnect_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_subscribe | subscribe, suback | packets/s |\n| vernemq.mqtt_subscribe_error | failed | ops/s |\n| vernemq.mqtt_subscribe_auth_error | unauth | attempts/s |\n| vernemq.mqtt_unsubscribe | unsubscribe, unsuback | packets/s |\n| vernemq.mqtt_unsubscribe_error | mqtt_unsubscribe_error | ops/s |\n| vernemq.mqtt_publish | received, sent | packets/s |\n| vernemq.mqtt_publish_errors | failed | ops/s |\n| vernemq.mqtt_publish_auth_errors | unauth | attempts/s |\n| vernemq.mqtt_puback | received, sent | packets/s |\n| vernemq.mqtt_puback_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrec | received, sent | packets/s |\n| vernemq.mqtt_pubrec_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrel | received, sent | packets/s |\n| vernemq.mqtt_pubrel_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrel_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcom | received, sent | packets/s |\n| vernemq.mqtt_pubcomp_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_ping | pingreq, pingresp | packets/s |\n| vernemq.node_uptime | time | seconds |\n\n", + "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_socket_errors | Node ${label:node} socket errors in the last minute |\n| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_queue_undelivered_messages | Node ${label:node} dropped messages due to full queues in the last minute |\n| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_queue_undelivered_messages | Node ${label:node} expired before delivery messages in the last minute |\n| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_queue_undelivered_messages | Node ${label:node} unhandled messages in the last minute |\n| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_average_scheduler_utilization | Node ${label:node} scheduler utilization over the last 10 minutes |\n| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_cluster_dropped | Node ${label:node} traffic dropped during communication with the cluster nodes in the last minute |\n| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_netsplits | Node ${label:node} detected netsplits (split brain) in the last minute |\n| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_connack_sent_by_reason_code | Node ${label:node} unsuccessful sent v5 CONNACK packets in the last minute |\n| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_disconnect_received_by_reason_code | Node ${label:node} received not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_disconnect_sent_by_reason_code | Node ${label:node} sent not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_subscribe_error | Node ${label:node} mqtt v${label:mqtt_version} failed SUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_subscribe_auth_error | Node ${label:node} mqtt v${label:mqtt_version} unauthorized SUBSCRIBE attempts in the last minute |\n| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_unsubscribe_error | Node ${label:node} mqtt v${label:mqtt_version} failed UNSUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_publish_errors | Node ${label:node} mqtt v${label:mqtt_version} failed PUBLISH operations in the last minute |\n| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_publish_auth_errors | Node ${label:node} mqtt v${label:mqtt_version} unauthorized PUBLISH attempts in the last minute |\n| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_puback_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_puback_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_puback_invalid_error | Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBACK messages in the last minute |\n| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrec_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrec_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrec_invalid_error | Node ${label:node} mqtt v${label:mqtt_version} received invalid PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrel_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrel_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubcomp_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubcomp_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubcomp_invalid_error | Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBCOMP packets in the last minute |\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the VerneMQ node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| node | The value of this label is identical to the value of the \"node\" label exposed by VerneMQ. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.node_socket | open | sockets |\n| vernemq.node_socket_operations | open, close | sockets/s |\n| vernemq.node_client_keepalive_expired | closed | sockets/s |\n| vernemq.node_socket_close_timeout | closed | sockets/s |\n| vernemq.node_socket_errors | errors | errors/s |\n| vernemq.node_queue_processes | queue_processes | queue processes |\n| vernemq.node_queue_processes_operations | setup, teardown | events/s |\n| vernemq.node_queue_process_init_from_storage | queue_processes | queue processes/s |\n| vernemq.node_queue_messages | received, sent | messages/s |\n| vernemq.node_queued_messages | queued | messages |\n| vernemq.node_queue_undelivered_messages | dropped, expired, unhandled | messages/s |\n| vernemq.node_router_subscriptions | subscriptions | subscriptions |\n| vernemq.node_router_matched_subscriptions | local, remote | subscriptions/s |\n| vernemq.node_router_memory | used | bytes |\n| vernemq.node_average_scheduler_utilization | utilization | percentage |\n| vernemq.node_system_processes | processes | processes |\n| vernemq.node_system_reductions | reductions | ops/s |\n| vernemq.node_system_context_switches | context_switches | ops/s |\n| vernemq.node_system_io | received, sent | bytes/s |\n| vernemq.node_system_run_queue | ready | processes |\n| vernemq.node_system_gc_count | gc | ops/s |\n| vernemq.node_system_gc_words_reclaimed | words_reclaimed | ops/s |\n| vernemq.node_system_allocated_memory | processes, system | bytes |\n| vernemq.node_traffic | received, sent | bytes/s |\n| vernemq.node_retain_messages | messages | messages |\n| vernemq.node_retain_memory | used | bytes |\n| vernemq.node_cluster_traffic | received, sent | bytes/s |\n| vernemq.node_cluster_dropped | dropped | bytes/s |\n| vernemq.node_netsplit_unresolved | unresolved | netsplits |\n| vernemq.node_netsplits | resolved, detected | netsplits/s |\n| vernemq.node_uptime | time | seconds |\n\n### Per mqtt\n\nThese metrics are specific to the used MQTT protocol version.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| node | The value of this label is identical to the value of the \"node\" label exposed by VerneMQ. |\n| mqtt_version | MQTT version. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.node_mqtt_auth | received, sent | packets/s |\n| vernemq.node_mqtt_auth_received_by_reason_code | success, continue_authentication, reauthenticate | packets/s |\n| vernemq.node_mqtt_auth_sent_by_reason_code | success, continue_authentication, reauthenticate | packets/s |\n| vernemq.node_mqtt_connect | connect, connack | packets/s |\n| vernemq.node_mqtt_connack_sent_by_return_code | success, unsupported_protocol_version, client_identifier_not_valid, server_unavailable, bad_username_or_password, not_authorized | packets/s |\n| vernemq.node_mqtt_connack_sent_by_reason_code | success, unspecified_error, malformed_packet, protocol_error, impl_specific_error, unsupported_protocol_version, client_identifier_not_valid, bad_username_or_password, not_authorized, server_unavailable, server_busy, banned, bad_authentication_method, topic_name_invalid, packet_too_large, quota_exceeded, payload_format_invalid, retain_not_supported, qos_not_supported, use_another_server, server_moved, connection_rate_exceeded | packets/s |\n| vernemq.node_mqtt_disconnect | received, sent | packets/s |\n| vernemq.node_mqtt_disconnect_received_by_reason_code | normal_disconnect, disconnect_with_will_msg, unspecified_error, malformed_packet, protocol_error, impl_specific_error, topic_name_invalid, receive_max_exceeded, topic_alias_invalid, packet_too_large, message_rate_too_high, quota_exceeded, administrative_action, payload_format_invalid | packets/s |\n| node_mqtt_disconnect_sent_by_reason_code | normal_disconnect, unspecified_error, malformed_packet, protocol_error, impl_specific_error, not_authorized, server_busy, server_shutting_down, keep_alive_timeout, session_taken_over, topic_filter_invalid, topic_name_invalid, receive_max_exceeded, topic_alias_invalid, packet_too_large, message_rate_too_high, quota_exceeded, administrative_action, payload_format_invalid, retain_not_supported, qos_not_supported, use_another_server, server_moved, shared_subs_not_supported, connection_rate_exceeded, max_connect_time, subscription_ids_not_supported, wildcard_subs_not_supported | packets/s |\n| vernemq.node_mqtt_subscribe | subscribe, suback | packets/s |\n| vernemq.node_mqtt_subscribe_error | subscribe | errors/s |\n| vernemq.node_mqtt_subscribe_auth_error | subscribe_auth | errors/s |\n| vernemq.node_mqtt_unsubscribe | unsubscribe, unsuback | packets/s |\n| vernemq.node_mqtt_unsubscribe_error | unsubscribe | errors/s |\n| vernemq.node_mqtt_publish | received, sent | packets/s |\n| vernemq.node_mqtt_publish_errors | publish | errors/s |\n| vernemq.node_mqtt_publish_auth_errors | publish_auth | errors/s |\n| vernemq.node_mqtt_puback | received, sent | packets/s |\n| vernemq.node_mqtt_puback_received_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s |\n| vernemq.node_mqtt_puback_sent_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s |\n| vernemq.node_mqtt_puback_invalid_error | unexpected | messages/s |\n| vernemq.node_mqtt_pubrec | received, sent | packets/s |\n| vernemq.node_mqtt_pubrec_received_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s |\n| vernemq.node_mqtt_pubrec_sent_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s |\n| vernemq.node_mqtt_pubrec_invalid_error | unexpected | messages/s |\n| vernemq.node_mqtt_pubrel | received, sent | packets/s |\n| vernemq.node_mqtt_pubrel_received_by_reason_code | success, packet_id_not_found | packets/s |\n| vernemq.node_mqtt_pubrel_sent_by_reason_code | success, packet_id_not_found | packets/s |\n| vernemq.node_mqtt_pubcomp | received, sent | packets/s |\n| vernemq.node_mqtt_pubcomp_received_by_reason_code | success, packet_id_not_found | packets/s |\n| vernemq.node_mqtt_pubcomp_sent_by_reason_cod | success, packet_id_not_found | packets/s |\n| vernemq.node_mqtt_pubcomp_invalid_error | unexpected | messages/s |\n| vernemq.node_mqtt_ping | pingreq, pingresp | packets/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-vernemq-VerneMQ", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/vernemq/metadata.yaml", @@ -17078,7 +17422,7 @@ "most_popular": true }, "overview": "# VMware vCenter Server\n\nPlugin: go.d.plugin\nModule: vsphere\n\n## Overview\n\nThis collector monitors hosts and vms performance statistics from `vCenter` servers.\n\n> **Warning**: The `vsphere` collector cannot re-login and continue collecting metrics after a vCenter reboot.\n> go.d.plugin needs to be restarted.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default `update_every` is 20 seconds, and it doesn't make sense to decrease the value.\n**VMware real-time statistics are generated at the 20-second specificity**.\n\nIt is likely that 20 seconds is not enough for big installations and the value should be tuned.\n\nTo get a better view we recommend running the collector in debug mode and seeing how much time it will take to collect metrics.\n\n
\nExample (all not related debug lines were removed)\n\n```\n[ilyam@pc]$ ./go.d.plugin -d -m vsphere\n[ DEBUG ] vsphere[vsphere] discover.go:94 discovering : starting resource discovering process\n[ DEBUG ] vsphere[vsphere] discover.go:102 discovering : found 3 dcs, process took 49.329656ms\n[ DEBUG ] vsphere[vsphere] discover.go:109 discovering : found 12 folders, process took 49.538688ms\n[ DEBUG ] vsphere[vsphere] discover.go:116 discovering : found 3 clusters, process took 47.722692ms\n[ DEBUG ] vsphere[vsphere] discover.go:123 discovering : found 2 hosts, process took 52.966995ms\n[ DEBUG ] vsphere[vsphere] discover.go:130 discovering : found 2 vms, process took 49.832979ms\n[ INFO ] vsphere[vsphere] discover.go:140 discovering : found 3 dcs, 12 folders, 3 clusters (2 dummy), 2 hosts, 3 vms, process took 249.655993ms\n[ DEBUG ] vsphere[vsphere] build.go:12 discovering : building : starting building resources process\n[ INFO ] vsphere[vsphere] build.go:23 discovering : building : built 3/3 dcs, 12/12 folders, 3/3 clusters, 2/2 hosts, 3/3 vms, process took 63.3\u00b5s\n[ DEBUG ] vsphere[vsphere] hierarchy.go:10 discovering : hierarchy : start setting resources hierarchy process\n[ INFO ] vsphere[vsphere] hierarchy.go:18 discovering : hierarchy : set 3/3 clusters, 2/2 hosts, 3/3 vms, process took 6.522\u00b5s\n[ DEBUG ] vsphere[vsphere] filter.go:24 discovering : filtering : starting filtering resources process\n[ DEBUG ] vsphere[vsphere] filter.go:45 discovering : filtering : removed 0 unmatched hosts\n[ DEBUG ] vsphere[vsphere] filter.go:56 discovering : filtering : removed 0 unmatched vms\n[ INFO ] vsphere[vsphere] filter.go:29 discovering : filtering : filtered 0/2 hosts, 0/3 vms, process took 42.973\u00b5s\n[ DEBUG ] vsphere[vsphere] metric_lists.go:14 discovering : metric lists : starting resources metric lists collection process\n[ INFO ] vsphere[vsphere] metric_lists.go:30 discovering : metric lists : collected metric lists for 2/2 hosts, 3/3 vms, process took 275.60764ms\n[ INFO ] vsphere[vsphere] discover.go:74 discovering : discovered 2/2 hosts, 3/3 vms, the whole process took 525.614041ms\n[ INFO ] vsphere[vsphere] discover.go:11 starting discovery process, will do discovery every 5m0s\n[ DEBUG ] vsphere[vsphere] collect.go:11 starting collection process\n[ DEBUG ] vsphere[vsphere] scrape.go:48 scraping : scraped metrics for 2/2 hosts, process took 96.257374ms\n[ DEBUG ] vsphere[vsphere] scrape.go:60 scraping : scraped metrics for 3/3 vms, process took 57.879697ms\n[ DEBUG ] vsphere[vsphere] collect.go:23 metrics collected, process took 154.77997ms\n```\n\n
\n\nThere you can see that discovering took `525.614041ms`, and collecting metrics took `154.77997ms`. Discovering is a separate thread, it doesn't affect collecting.\n`update_every` and `timeout` parameters should be adjusted based on these numbers.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vsphere.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vsphere.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 20 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | vCenter server URL. | | yes |\n| host_include | Hosts selector (filter). | | no |\n| vm_include | Virtual machines selector (filter). | | no |\n| discovery_interval | Hosts and VMs discovery interval. | 300 | no |\n| timeout | HTTP request timeout. | 20 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### host_include\n\nMetrics of hosts matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern\".\n- Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n host_include:\n - '/DC1/*' # select all hosts from datacenter DC1\n - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2\n - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3\n ```\n\n\n##### vm_include\n\nMetrics of VMs matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern/VM pattern\".\n- Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n vm_include:\n - '/DC1/*' # select all VMs from datacenter DC\n - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2\n - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3\n ```\n\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n - name : vcenter2\n url : https://203.0.113.10\n username : admin@vsphere.local\n password : somepassword\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vsphere.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vsphere.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 20 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | vCenter server URL. | | yes |\n| host_include | Hosts selector (filter). | | no |\n| vm_include | Virtual machines selector (filter). | | no |\n| discovery_interval | Hosts and VMs discovery interval. | 300 | no |\n| timeout | HTTP request timeout. | 20 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### host_include\n\nMetrics of hosts matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern\".\n- Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n host_include:\n - '/DC1/*' # select all hosts from datacenter DC1\n - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2\n - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3\n ```\n\n\n##### vm_include\n\nMetrics of VMs matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern/VM pattern\".\n- Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n vm_include:\n - '/DC1/*' # select all VMs from datacenter DC\n - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2\n - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3\n ```\n\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n - name : vcenter2\n url : https://203.0.113.10\n username : admin@vsphere.local\n password : somepassword\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `vsphere` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vsphere\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `vsphere` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep vsphere\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep vsphere /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep vsphere\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vsphere_vm_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_cpu_utilization | Virtual Machine CPU utilization |\n| [ vsphere_vm_mem_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_mem_utilization | Virtual Machine memory utilization |\n| [ vsphere_host_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_cpu_utilization | ESXi Host CPU utilization |\n| [ vsphere_host_mem_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_mem_utilization | ESXi Host memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per virtual machine\n\nThese metrics refer to the Virtual Machine.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n| vm | Virtual Machine name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.vm_cpu_utilization | used | percentage |\n| vsphere.vm_mem_utilization | used | percentage |\n| vsphere.vm_mem_usage | granted, consumed, active, shared | KiB |\n| vsphere.vm_mem_swap_usage | swapped | KiB |\n| vsphere.vm_mem_swap_io | in, out | KiB/s |\n| vsphere.vm_disk_io | read, write | KiB/s |\n| vsphere.vm_disk_max_latency | latency | milliseconds |\n| vsphere.vm_net_traffic | received, sent | KiB/s |\n| vsphere.vm_net_packets | received, sent | packets |\n| vsphere.vm_net_drops | received, sent | packets |\n| vsphere.vm_overall_status | green, red, yellow, gray | status |\n| vsphere.vm_system_uptime | uptime | seconds |\n\n### Per host\n\nThese metrics refer to the ESXi host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.host_cpu_utilization | used | percentage |\n| vsphere.host_mem_utilization | used | percentage |\n| vsphere.host_mem_usage | granted, consumed, active, shared, sharedcommon | KiB |\n| vsphere.host_mem_swap_io | in, out | KiB/s |\n| vsphere.host_disk_io | read, write | KiB/s |\n| vsphere.host_disk_max_latency | latency | milliseconds |\n| vsphere.host_net_traffic | received, sent | KiB/s |\n| vsphere.host_net_packets | received, sent | packets |\n| vsphere.host_net_drops | received, sent | packets |\n| vsphere.host_net_errors | received, sent | errors |\n| vsphere.host_overall_status | green, red, yellow, gray | status |\n| vsphere.host_system_uptime | uptime | seconds |\n\n", @@ -17087,6 +17431,43 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/vsphere/metadata.yaml", "related_resources": "" }, + { + "meta": { + "plugin_name": "go.d.plugin", + "module_name": "w1sensor", + "monitored_instance": { + "name": "1-Wire Sensors", + "link": "https://www.analog.com/en/product-category/1wire-temperature-sensors.html", + "categories": [ + "data-collection.hardware-devices-and-sensors" + ], + "icon_filename": "1-wire.png" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "temperature", + "sensor", + "1-wire" + ], + "most_popular": false + }, + "overview": "# 1-Wire Sensors\n\nPlugin: go.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/w1sensor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| sensors_path | Directory path containing sensor folders with w1_slave files. | /sys/bus/w1/devices | no |\n\n#### Examples\n\n##### Custom sensor device path\n\nMonitors a virtual sensor when the w1_slave file is located in a custom directory instead of the default location.\n\n```yaml\njobs:\n - name: custom_sensors_path\n sensors_path: /custom/path/devices\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `w1sensor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m w1sensor\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep w1sensor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep w1sensor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep w1sensor\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the 1-Wire Sensor.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temperature | temperature | Celsius |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-w1sensor-1-Wire_Sensors", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/w1sensor/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-web_log", @@ -17119,7 +17500,7 @@ } }, "overview": "# Web server log files\n\nPlugin: go.d.plugin\nModule: web_log\n\n## Overview\n\nThis collector monitors web servers by parsing their log files.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects log files of web servers running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/web_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/web_log.conf\n```\n#### Options\n\nWeblog is aware of how to parse and interpret the following fields (**known fields**):\n\n> [nginx](https://nginx.org/en/docs/varindex.html)\n>\n> [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html)\n\n| nginx | apache | description |\n|-------------------------|----------|------------------------------------------------------------------------------------------|\n| $host ($http_host) | %v | Name of the server which accepted a request. |\n| $server_port | %p | Port of the server which accepted a request. |\n| $scheme | - | Request scheme. \"http\" or \"https\". |\n| $remote_addr | %a (%h) | Client address. |\n| $request | %r | Full original request line. The line is \"$request_method $request_uri $server_protocol\". |\n| $request_method | %m | Request method. Usually \"GET\" or \"POST\". |\n| $request_uri | %U | Full original request URI. |\n| $server_protocol | %H | Request protocol. Usually \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2.0\". |\n| $status | %s (%>s) | Response status code. |\n| $request_length | %I | Bytes received from a client, including request and headers. |\n| $bytes_sent | %O | Bytes sent to a client, including request and headers. |\n| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. |\n| $request_time | %D | Request processing time. |\n| $upstream_response_time | - | Time spent on receiving the response from the upstream server. |\n| $ssl_protocol | - | Protocol of an established SSL connection. |\n| $ssl_cipher | - | String of ciphers used for an established SSL connection. |\n\nNotes:\n\n- Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`.\n- Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network.\n- To get `%I` and `%O` working you need to enable `mod_logio` on Apache.\n- NGINX logs URI with query parameters, Apache doesnt.\n- `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others.\n- Don't use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| path | Path to the web server log file. | | yes |\n| exclude_path | Path to exclude. | *.gz | no |\n| url_patterns | List of URL patterns. | [] | no |\n| url_patterns.name | Used as a dimension name. | | yes |\n| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format). | | yes |\n| log_type | Log parser type. | auto | no |\n| csv_config | CSV log parser config. | | no |\n| csv_config.delimiter | CSV field delimiter. | , | no |\n| csv_config.format | CSV log format. | | no |\n| ltsv_config | LTSV log parser config. | | no |\n| ltsv_config.field_delimiter | LTSV field delimiter. | \\t | no |\n| ltsv_config.value_delimiter | LTSV value delimiter. | : | no |\n| ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes |\n| json_config | JSON log parser config. | | no |\n| json_config.mapping | JSON fields mapping to **known fields**. | | yes |\n| regexp_config | RegExp log parser config. | | no |\n| regexp_config.pattern | RegExp pattern with named groups. | | yes |\n\n##### url_patterns\n\n\"URL pattern\" scope metrics will be collected for each URL pattern. \n\nOption syntax:\n\n```yaml\nurl_patterns:\n - name: name1\n pattern: pattern1\n - name: name2\n pattern: pattern2\n```\n\n\n##### log_type\n\nWeblog supports 5 different log parsers:\n\n| Parser type | Description |\n|-------------|-------------------------------------------|\n| auto | Use CSV and auto-detect format |\n| csv | A comma-separated values |\n| json | [JSON](https://www.json.org/json-en.html) |\n| ltsv | [LTSV](http://ltsv.org/) |\n| regexp | Regular expression with named groups |\n\nSyntax:\n\n```yaml\nlog_type: auto\n```\n\nIf `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file.\n\n- checks if format is `CSV` (using regexp).\n- checks if format is `JSON` (using regexp).\n- assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later):\n\n ```sh\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n ```\n\n If you're using the default Apache/NGINX log format, auto-detect will work for you. If it doesn't work you need to set the format manually.\n\n\n##### csv_config.format\n\n\n\n##### ltsv_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nlog_type: ltsv\nltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### json_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nlog_type: json\njson_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nlog_type: regexp\nregexp_config:\n pattern: PATTERN\n```\n\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/web_log.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/web_log.conf\n```\n#### Options\n\nWeblog is aware of how to parse and interpret the following fields (**known fields**):\n\n> [nginx](https://nginx.org/en/docs/varindex.html)\n>\n> [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html)\n\n| nginx | apache | description |\n|-------------------------|----------|------------------------------------------------------------------------------------------|\n| $host ($http_host) | %v | Name of the server which accepted a request. |\n| $server_port | %p | Port of the server which accepted a request. |\n| $scheme | - | Request scheme. \"http\" or \"https\". |\n| $remote_addr | %a (%h) | Client address. |\n| $request | %r | Full original request line. The line is \"$request_method $request_uri $server_protocol\". |\n| $request_method | %m | Request method. Usually \"GET\" or \"POST\". |\n| $request_uri | %U | Full original request URI. |\n| $server_protocol | %H | Request protocol. Usually \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2.0\". |\n| $status | %s (%>s) | Response status code. |\n| $request_length | %I | Bytes received from a client, including request and headers. |\n| $bytes_sent | %O | Bytes sent to a client, including request and headers. |\n| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. |\n| $request_time | %D | Request processing time. |\n| $upstream_response_time | - | Time spent on receiving the response from the upstream server. |\n| $ssl_protocol | - | Protocol of an established SSL connection. |\n| $ssl_cipher | - | String of ciphers used for an established SSL connection. |\n\nNotes:\n\n- Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`.\n- Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network.\n- To get `%I` and `%O` working you need to enable `mod_logio` on Apache.\n- NGINX logs URI with query parameters, Apache doesnt.\n- `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others.\n- Don't use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| path | Path to the web server log file. | | yes |\n| exclude_path | Path to exclude. | *.gz | no |\n| url_patterns | List of URL patterns. | [] | no |\n| url_patterns.name | Used as a dimension name. | | yes |\n| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format). | | yes |\n| log_type | Log parser type. | auto | no |\n| csv_config | CSV log parser config. | | no |\n| csv_config.delimiter | CSV field delimiter. | , | no |\n| csv_config.format | CSV log format. | | no |\n| ltsv_config | LTSV log parser config. | | no |\n| ltsv_config.field_delimiter | LTSV field delimiter. | \\t | no |\n| ltsv_config.value_delimiter | LTSV value delimiter. | : | no |\n| ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes |\n| json_config | JSON log parser config. | | no |\n| json_config.mapping | JSON fields mapping to **known fields**. | | yes |\n| regexp_config | RegExp log parser config. | | no |\n| regexp_config.pattern | RegExp pattern with named groups. | | yes |\n\n##### url_patterns\n\n\"URL pattern\" scope metrics will be collected for each URL pattern. \n\nOption syntax:\n\n```yaml\nurl_patterns:\n - name: name1\n pattern: pattern1\n - name: name2\n pattern: pattern2\n```\n\n\n##### log_type\n\nWeblog supports 5 different log parsers:\n\n| Parser type | Description |\n|-------------|-------------------------------------------|\n| auto | Use CSV and auto-detect format |\n| csv | A comma-separated values |\n| json | [JSON](https://www.json.org/json-en.html) |\n| ltsv | [LTSV](http://ltsv.org/) |\n| regexp | Regular expression with named groups |\n\nSyntax:\n\n```yaml\nlog_type: auto\n```\n\nIf `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file.\n\n- checks if format is `CSV` (using regexp).\n- checks if format is `JSON` (using regexp).\n- assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later):\n\n ```sh\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n ```\n\n If you're using the default Apache/NGINX log format, auto-detect will work for you. If it doesn't work you need to set the format manually.\n\n\n##### csv_config.format\n\n\n\n##### ltsv_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nlog_type: ltsv\nltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### json_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nlog_type: json\njson_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nlog_type: regexp\nregexp_config:\n pattern: PATTERN\n```\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `web_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m web_log\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `web_log` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep web_log\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep web_log /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep web_log\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ web_log_1m_unmatched ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.excluded_requests | percentage of unparsed log lines over the last minute |\n| [ web_log_1m_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over the last minute (1xx, 2xx, 304, 401) |\n| [ web_log_1m_redirects ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of redirection HTTP requests over the last minute (3xx except 304) |\n| [ web_log_1m_bad_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of client error HTTP requests over the last minute (4xx except 401) |\n| [ web_log_1m_internal_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of server error HTTP requests over the last minute (5xx) |\n| [ web_log_web_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.request_processing_time | average HTTP response time over the last 1 minute |\n| [ web_log_5m_requests_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over over the last 5 minutes, compared with the previous 5 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Web server log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.requests | requests | requests/s |\n| web_log.excluded_requests | unmatched | requests/s |\n| web_log.type_requests | success, bad, redirect, error | requests/s |\n| web_log.status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| web_log.status_code_class_1xx_responses | a dimension per 1xx code | responses/s |\n| web_log.status_code_class_2xx_responses | a dimension per 2xx code | responses/s |\n| web_log.status_code_class_3xx_responses | a dimension per 3xx code | responses/s |\n| web_log.status_code_class_4xx_responses | a dimension per 4xx code | responses/s |\n| web_log.status_code_class_5xx_responses | a dimension per 5xx code | responses/s |\n| web_log.bandwidth | received, sent | kilobits/s |\n| web_log.request_processing_time | min, max, avg | milliseconds |\n| web_log.requests_processing_time_histogram | a dimension per bucket | requests/s |\n| web_log.upstream_response_time | min, max, avg | milliseconds |\n| web_log.upstream_responses_time_histogram | a dimension per bucket | requests/s |\n| web_log.current_poll_uniq_clients | ipv4, ipv6 | clients |\n| web_log.vhost_requests | a dimension per vhost | requests/s |\n| web_log.port_requests | a dimension per port | requests/s |\n| web_log.scheme_requests | http, https | requests/s |\n| web_log.http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.http_version_requests | a dimension per HTTP version | requests/s |\n| web_log.ip_proto_requests | ipv4, ipv6 | requests/s |\n| web_log.ssl_proto_requests | a dimension per SSL protocol | requests/s |\n| web_log.ssl_cipher_suite_requests | a dimension per SSL cipher suite | requests/s |\n| web_log.url_pattern_requests | a dimension per URL pattern | requests/s |\n| web_log.custom_field_pattern_requests | a dimension per custom field pattern | requests/s |\n\n### Per custom time field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_time_field_summary | min, max, avg | milliseconds |\n| web_log.custom_time_field_histogram | a dimension per bucket | observations |\n\n### Per custom numeric field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_numeric_field_{{field_name}}_summary | min, max, avg | {{units}} |\n\n### Per URL pattern\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.url_pattern_status_code_responses | a dimension per pattern | responses/s |\n| web_log.url_pattern_http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.url_pattern_bandwidth | received, sent | kilobits/s |\n| web_log.url_pattern_request_processing_time | min, max, avg | milliseconds |\n\n", @@ -17155,7 +17536,7 @@ "most_popular": false }, "overview": "# Domain expiration date\n\nPlugin: go.d.plugin\nModule: whoisquery\n\n## Overview\n\nThis collector monitors the remaining time before the domain expires.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/whoisquery.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/whoisquery.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 60 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Domain address. | | yes |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| timeout | The query timeout in seconds. | 5 | no |\n\n#### Examples\n\n##### Basic\n\nBasic configuration example\n\n```yaml\njobs:\n - name: my_site\n source: my_site.com\n\n```\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple domains.\n\n\n```yaml\njobs:\n - name: my_site1\n source: my_site1.com\n\n - name: my_site2\n source: my_site2.com\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/whoisquery.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/whoisquery.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 60 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Domain address. | | yes |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| timeout | The query timeout in seconds. | 5 | no |\n\n#### Examples\n\n##### Basic\n\nBasic configuration example\n\n```yaml\njobs:\n - name: my_site\n source: my_site.com\n\n```\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple domains.\n\n\n```yaml\njobs:\n - name: my_site1\n source: my_site1.com\n\n - name: my_site2\n source: my_site2.com\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `whoisquery` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m whoisquery\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `whoisquery` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep whoisquery\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep whoisquery /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep whoisquery\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ whoisquery_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/whoisquery.conf) | whoisquery.time_until_expiration | time until the domain name registration expires |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per domain\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| domain | Configured source |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| whoisquery.time_until_expiration | expiry | seconds |\n\n", @@ -17196,10 +17577,10 @@ } }, "overview": "# Active Directory\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep windows\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep windows /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep windows\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-Active_Directory", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/metadata.yaml", @@ -17236,10 +17617,10 @@ } }, "overview": "# HyperV\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep windows\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep windows /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep windows\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-HyperV", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/metadata.yaml", @@ -17274,10 +17655,10 @@ } }, "overview": "# MS Exchange\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep windows\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep windows /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep windows\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-MS_Exchange", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/metadata.yaml", @@ -17314,10 +17695,10 @@ } }, "overview": "# MS SQL Server\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep windows\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep windows /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep windows\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-MS_SQL_Server", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/metadata.yaml", @@ -17352,10 +17733,10 @@ } }, "overview": "# NET Framework\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep windows\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep windows /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep windows\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-NET_Framework", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/metadata.yaml", @@ -17389,10 +17770,10 @@ } }, "overview": "# Windows\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep windows\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep windows /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep windows\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-Windows", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/metadata.yaml", @@ -17427,7 +17808,7 @@ } }, "overview": "# WireGuard\n\nPlugin: go.d.plugin\nModule: wireguard\n\n## Overview\n\nThis collector monitors WireGuard VPN devices and peers traffic.\n\n\nIt connects to the local WireGuard instance using [wireguard-go client](https://github.com/WireGuard/wireguard-go).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the CAP_NET_ADMIN capability, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects instances running on localhost.\n\n\n#### Limits\n\nDoesn't work if Netdata or WireGuard is installed in the container.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/wireguard.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/wireguard.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/wireguard.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/wireguard.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `wireguard` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m wireguard\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `wireguard` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep wireguard\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep wireguard /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep wireguard\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the VPN network interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.device_network_io | receive, transmit | B/s |\n| wireguard.device_peers | peers | peers |\n\n### Per peer\n\nThese metrics refer to the VPN peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n| public_key | Public key of a peer |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.peer_network_io | receive, transmit | B/s |\n| wireguard.peer_latest_handshake_ago | time | seconds |\n\n", @@ -17464,7 +17845,7 @@ } }, "overview": "# X.509 certificate\n\nPlugin: go.d.plugin\nModule: x509check\n\n## Overview\n\n\n\nThis collectors monitors x509 certificates expiration time and revocation status.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/x509check.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/x509check.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file, smtp. | | no |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| check_revocation_status | Whether to check the revocation status of the certificate. | no | no |\n| timeout | SSL connection timeout. | 2 | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Website certificate\n\nWebsite certificate.\n\n```yaml\njobs:\n - name: my_site_cert\n source: https://my_site.org:443\n\n```\n##### Local file certificate\n\nLocal file certificate.\n\n```yaml\njobs:\n - name: my_file_cert\n source: file:///home/me/cert.pem\n\n```\n##### SMTP certificate\n\nSMTP certificate.\n\n```yaml\njobs:\n - name: my_smtp_cert\n source: smtp://smtp.my_mail.org:587\n\n```\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple websites' certificates.\n\n\n```yaml\njobs:\n - name: my_site_cert1\n source: https://my_site1.org:443\n\n - name: my_site_cert2\n source: https://my_site1.org:443\n\n - name: my_site_cert3\n source: https://my_site3.org:443\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/x509check.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/x509check.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file, smtp. | | no |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| check_revocation_status | Whether to check the revocation status of the certificate. | no | no |\n| timeout | SSL connection timeout. | 2 | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Website certificate\n\nWebsite certificate.\n\n```yaml\njobs:\n - name: my_site_cert\n source: https://my_site.org:443\n\n```\n##### Local file certificate\n\nLocal file certificate.\n\n```yaml\njobs:\n - name: my_file_cert\n source: file:///home/me/cert.pem\n\n```\n##### SMTP certificate\n\nSMTP certificate.\n\n```yaml\njobs:\n - name: my_smtp_cert\n source: smtp://smtp.my_mail.org:587\n\n```\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple websites' certificates.\n\n\n```yaml\njobs:\n - name: my_site_cert1\n source: https://my_site1.org:443\n\n - name: my_site_cert2\n source: https://my_site1.org:443\n\n - name: my_site_cert3\n source: https://my_site3.org:443\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `x509check` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m x509check\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `x509check` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep x509check\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep x509check /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep x509check\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ x509check_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.time_until_expiration | Time until x509 certificate expires for ${label:source} |\n| [ x509check_revocation_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.revocation_status | x509 certificate revocation status for ${label:source} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per source\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| source | Configured source. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| x509check.time_until_expiration | expiry | seconds |\n| x509check.revocation_status | not_revoked, revoked | boolean |\n\n", @@ -17502,8 +17883,8 @@ }, "most_popular": false }, - "overview": "# ZFS Pools\n\nPlugin: go.d.plugin\nModule: zfspool\n\n## Overview\n\nThis collector monitors the health and space usage of ZFS pools using the command line tool [zpool](https://openzfs.github.io/openzfs-docs/man/master/8/zpool-list.8.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zfspool.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zfspool.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `zpool` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/bin/zpool | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: zfspool\n binary_path: /usr/local/sbin/zpool\n\n```\n", + "overview": "# ZFS Pools\n\nPlugin: go.d.plugin\nModule: zfspool\n\n## Overview\n\nThis collector monitors the health and space usage of ZFS pools using the command line tool [zpool](https://openzfs.github.io/openzfs-docs/man/master/8/zpool-list.8.html).\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zfspool.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zfspool.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `zpool` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/bin/zpool | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: zfspool\n binary_path: /usr/local/sbin/zpool\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `zfspool` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m zfspool\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `zfspool` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep zfspool\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep zfspool /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep zfspool\n```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_pool_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_space_utilization | ZFS pool ${label:pool} is nearing capacity. Current space usage is above the threshold. |\n| [ zfs_pool_health_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_health_state | ZFS pool ${label:pool} state is degraded |\n| [ zfs_pool_health_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_health_state | ZFS pool ${label:pool} state is faulted or unavail |\n| [ zfs_vdev_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.vdev_health_state | ZFS vdev ${label:vdev} state is faulted or degraded |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs pool\n\nThese metrics refer to the ZFS pool.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pool | Zpool name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfspool.pool_space_utilization | utilization | % |\n| zfspool.pool_space_usage | free, used | bytes |\n| zfspool.pool_fragmentation | fragmentation | % |\n| zfspool.pool_health_state | online, degraded, faulted, offline, unavail, removed, suspended | state |\n\n### Per zfs pool vdev\n\nThese metrics refer to the ZFS pool virtual device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pool | Zpool name |\n| vdev | Unique identifier for a virtual device (vdev) within a ZFS pool. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfspool.vdev_health_state | online, degraded, faulted, offline, unavail, removed, suspended | state |\n\n", @@ -17544,7 +17925,7 @@ } }, "overview": "# ZooKeeper\n\nPlugin: go.d.plugin\nModule: zookeeper\n\n## Overview\n\n\n\nIt connects to the Zookeeper instance via a TCP and executes the following commands:\n\n- [mntr](https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known ZooKeeper TCP sockets:\n\n- 127.0.0.1:2181\n- 127.0.0.1:2182\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Whitelist `mntr` command\n\nAdd `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zookeeper.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zookeeper.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:2181 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| use_tls | Whether to use TLS or not. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nLocal server.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n```\n##### TLS with self-signed certificate\n\nZookeeper with TLS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n use_tls: yes\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n - name: remote\n address: 192.0.2.1:2181\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Whitelist `mntr` command\n\nAdd `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zookeeper.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zookeeper.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:2181 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| use_tls | Whether to use TLS or not. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nLocal server.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n```\n##### TLS with self-signed certificate\n\nZookeeper with TLS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n use_tls: yes\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n - name: remote\n address: 192.0.2.1:2181\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `zookeeper` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m zookeeper\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `zookeeper` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep zookeeper\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep zookeeper /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep zookeeper\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZooKeeper instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zookeeper.requests | outstanding | requests |\n| zookeeper.requests_latency | min, avg, max | ms |\n| zookeeper.connections | alive | connections |\n| zookeeper.packets | received, sent | pps |\n| zookeeper.file_descriptor | open | file descriptors |\n| zookeeper.nodes | znode, ephemerals | nodes |\n| zookeeper.watches | watches | watches |\n| zookeeper.approximate_data_size | size | KiB |\n| zookeeper.server_state | state | state |\n\n", @@ -17580,7 +17961,7 @@ "most_popular": false }, "overview": "# Idle OS Jitter\n\nPlugin: idlejitter.plugin\nModule: idlejitter.plugin\n\n## Overview\n\nMonitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.\n\n\nA thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration will run by default on all supported systems.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20ms | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Idle OS Jitter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.idlejitter | min, max, average | microseconds lost/s |\n\n", @@ -17613,7 +17994,7 @@ "most_popular": false }, "overview": "# IOPing\n\nPlugin: ioping.plugin\nModule: ioping.plugin\n\n## Overview\n\nMonitor IOPing metrics for efficient disk I/O latency tracking. Keep track of read/write speeds, latency, and error rates for optimized disk operations.\n\nPlugin uses `ioping` command.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install ioping\n\nYou can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ioping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ioping.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1s | no |\n| destination | The directory/file/device to ioping. | | yes |\n| request_size | The request size in bytes to ioping the destination (symbolic modifiers are supported) | 4k | no |\n| ioping_opts | Options passed to `ioping` commands. | -T 1000000 | no |\n\n#### Examples\n\n##### Basic Configuration\n\nThis example has the minimum configuration necessary to have the plugin running.\n\n```yaml\ndestination=\"/dev/sda\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install ioping\n\nYou can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ioping.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ioping.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1s | no |\n| destination | The directory/file/device to ioping. | | yes |\n| request_size | The request size in bytes to ioping the destination (symbolic modifiers are supported) | 4k | no |\n| ioping_opts | Options passed to `ioping` commands. | -T 1000000 | no |\n\n#### Examples\n\n##### Basic Configuration\n\nThis example has the minimum configuration necessary to have the plugin running.\n\n```yaml\ndestination=\"/dev/sda\"\n\n```\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ioping_disk_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ioping.conf) | ioping.latency | average I/O latency over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ioping.latency | latency | microseconds |\n\n", @@ -17650,7 +18031,7 @@ "most_popular": false }, "overview": "# macOS\n\nPlugin: macos.plugin\nModule: mach_smi\n\n## Overview\n\nMonitor macOS metrics for efficient operating system performance.\n\nThe plugin uses three different methods to collect data:\n - The function `sysctlbyname` is called to collect network, swap, loadavg, and boot time.\n - The functtion `host_statistic` is called to collect CPU and Virtual memory data;\n - The function `IOServiceGetMatchingServices` to collect storage information.\n\n\nThis collector is only supported on the following platforms:\n\n- macOS\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThere are three sections in the file which you can configure:\n\n- `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time.\n- `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory.\n- `[plugin:macos:iokit]` - Enable or disable monitoring for storage device.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable load average | Enable or disable monitoring of load average metrics (load1, load5, load15). | yes | no |\n| system swap | Enable or disable monitoring of system swap metrics (free, used). | yes | no |\n| bandwidth | Enable or disable monitoring of network bandwidth metrics (received, sent). | yes | no |\n| ipv4 TCP packets | Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent). | yes | no |\n| ipv4 TCP errors | Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments). | yes | no |\n| ipv4 TCP handshake issues | Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails). | yes | no |\n| ECN packets | Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts). | auto | no |\n| TCP SYN cookies | Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed). | auto | no |\n| TCP out-of-order queue | Enable or disable monitoring of TCP out-of-order queue metrics (inqueue). | auto | no |\n| TCP connection aborts | Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout). | auto | no |\n| ipv4 UDP packets | Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.). | yes | no |\n| ipv4 UDP errors | Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi). | yes | no |\n| ipv4 icmp packets | Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error). | yes | no |\n| ipv4 icmp messages | Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum). | yes | no |\n| ipv4 packets | Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered). | yes | no |\n| ipv4 fragments sent | Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates). | yes | no |\n| ipv4 fragments assembly | Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all). | yes | no |\n| ipv4 errors | Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes). | yes | no |\n| ipv6 packets | Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered). | auto | no |\n| ipv6 fragments sent | Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all). | auto | no |\n| ipv6 fragments assembly | Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all). | auto | no |\n| ipv6 errors | Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes). | auto | no |\n| icmp | Enable or disable monitoring of ICMP metrics (sent, received). | auto | no |\n| icmp redirects | Enable or disable monitoring of ICMP redirects metrics (received, sent). | auto | no |\n| icmp errors | Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.). | auto | no |\n| icmp echos | Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply). | auto | no |\n| icmp router | Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp neighbor | Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp types | Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145). | auto | no |\n| space usage for all disks | Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root). | yes | no |\n| inodes usage for all disks | Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root). | yes | no |\n| bandwidth | Enable or disable monitoring of bandwidth metrics (received, sent). | yes | no |\n| system uptime | Enable or disable monitoring of system uptime metrics (uptime). | yes | no |\n| cpu utilization | Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel). | yes | no |\n| system ram | Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free). | yes | no |\n| swap i/o | Enable or disable monitoring of SWAP I/O metrics (I/O Swap). | yes | no |\n| memory page faults | Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge). | yes | no |\n| disk i/o | Enable or disable monitoring of disk I/O metrics (In, Out). | yes | no |\n\n#### Examples\n\n##### Disable swap monitoring.\n\nA basic example that discards swap monitoring\n\n```yaml\n[plugin:macos:sysctl]\n system swap = no\n[plugin:macos:mach_smi]\n swap i/o = no\n\n```\n##### Disable complete Machine SMI section.\n\nA basic example that discards swap monitoring\n\n```yaml\n[plugin:macos:mach_smi]\n cpu utilization = no\n system ram = no\n swap i/o = no\n memory page faults = no\n disk i/o = no\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThere are three sections in the file which you can configure:\n\n- `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time.\n- `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory.\n- `[plugin:macos:iokit]` - Enable or disable monitoring for storage device.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable load average | Enable or disable monitoring of load average metrics (load1, load5, load15). | yes | no |\n| system swap | Enable or disable monitoring of system swap metrics (free, used). | yes | no |\n| bandwidth | Enable or disable monitoring of network bandwidth metrics (received, sent). | yes | no |\n| ipv4 TCP packets | Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent). | yes | no |\n| ipv4 TCP errors | Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments). | yes | no |\n| ipv4 TCP handshake issues | Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails). | yes | no |\n| ECN packets | Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts). | auto | no |\n| TCP SYN cookies | Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed). | auto | no |\n| TCP out-of-order queue | Enable or disable monitoring of TCP out-of-order queue metrics (inqueue). | auto | no |\n| TCP connection aborts | Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout). | auto | no |\n| ipv4 UDP packets | Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.). | yes | no |\n| ipv4 UDP errors | Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi). | yes | no |\n| ipv4 icmp packets | Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error). | yes | no |\n| ipv4 icmp messages | Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum). | yes | no |\n| ipv4 packets | Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered). | yes | no |\n| ipv4 fragments sent | Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates). | yes | no |\n| ipv4 fragments assembly | Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all). | yes | no |\n| ipv4 errors | Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes). | yes | no |\n| ipv6 packets | Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered). | auto | no |\n| ipv6 fragments sent | Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all). | auto | no |\n| ipv6 fragments assembly | Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all). | auto | no |\n| ipv6 errors | Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes). | auto | no |\n| icmp | Enable or disable monitoring of ICMP metrics (sent, received). | auto | no |\n| icmp redirects | Enable or disable monitoring of ICMP redirects metrics (received, sent). | auto | no |\n| icmp errors | Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.). | auto | no |\n| icmp echos | Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply). | auto | no |\n| icmp router | Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp neighbor | Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp types | Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145). | auto | no |\n| space usage for all disks | Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root). | yes | no |\n| inodes usage for all disks | Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root). | yes | no |\n| bandwidth | Enable or disable monitoring of bandwidth metrics (received, sent). | yes | no |\n| system uptime | Enable or disable monitoring of system uptime metrics (uptime). | yes | no |\n| cpu utilization | Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel). | yes | no |\n| system ram | Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free). | yes | no |\n| swap i/o | Enable or disable monitoring of SWAP I/O metrics (I/O Swap). | yes | no |\n| memory page faults | Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge). | yes | no |\n| disk i/o | Enable or disable monitoring of disk I/O metrics (In, Out). | yes | no |\n\n#### Examples\n\n##### Disable swap monitoring.\n\nA basic example that discards swap monitoring\n\n```yaml\n[plugin:macos:sysctl]\n system swap = no\n[plugin:macos:mach_smi]\n swap i/o = no\n\n```\n##### Disable complete Machine SMI section.\n\nA basic example that discards swap monitoring\n\n```yaml\n[plugin:macos:mach_smi]\n cpu utilization = no\n system ram = no\n swap i/o = no\n memory page faults = no\n disk i/o = no\n\n```\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per macOS instance\n\nThese metrics refer to hardware and network monitoring.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | user, nice, system, idle | percentage |\n| system.ram | active, wired, throttled, compressor, inactive, purgeable, speculative, free | MiB |\n| mem.swapio | io, out | KiB/s |\n| mem.pgfaults | memory, cow, pagein, pageout, compress, decompress, zero_fill, reactivate, purge | faults/s |\n| system.load | load1, load5, load15 | load |\n| mem.swap | free, used | MiB |\n| system.ipv4 | received, sent | kilobits/s |\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| system.uptime | uptime | seconds |\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | read, writes | KiB/s |\n| disk.ops | read, writes | operations/s |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n### Per mount point\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound | drops/s |\n| net.events | frames, collisions, carrier | events/s |\n\n", @@ -17683,7 +18064,7 @@ "most_popular": false }, "overview": "# Netfilter\n\nPlugin: nfacct.plugin\nModule: nfacct.plugin\n\n## Overview\n\nMonitor Netfilter metrics for optimal packet filtering and manipulation. Keep tabs on packet counts, dropped packets, and error rates to secure network operations.\n\nNetdata uses libmnl (https://www.netfilter.org/projects/libmnl/index.html) to collect information.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin uses socket to connect with netfilter to collect data\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install required packages\n\nInstall `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:nfacct]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install required packages\n\nInstall `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:nfacct]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netfilter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.netlink_new | new, ignore, invalid | connections/s |\n| netfilter.netlink_changes | insert, delete, delete_list | changes/s |\n| netfilter.netlink_search | searched, search_restart, found | searches/s |\n| netfilter.netlink_errors | icmp_error, insert_failed, drop, early_drop | events/s |\n| netfilter.netlink_expect | created, deleted, new | expectations/s |\n| netfilter.nfacct_packets | a dimension per nfacct object | packets/s |\n| netfilter.nfacct_bytes | a dimension per nfacct object | kilobytes/s |\n\n", @@ -17721,7 +18102,7 @@ "most_popular": false }, "overview": "# CPU performance\n\nPlugin: perf.plugin\nModule: perf.plugin\n\n## Overview\n\nThis collector monitors CPU performance metrics about cycles, instructions, migrations, cache operations and more.\n\nIt uses syscall (2) to open a file descriptor to monitor the perf events.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIt needs setuid to use the necessary syscall to collect perf events. Netdata sets the permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install perf plugin\n\nIf you are [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.\n\n\n#### Enable the perf plugin\n\nThe plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.\n\nTo enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config netdata.conf\n```\n\nChange the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:perf]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can get the available options running:\n\n```bash\n/usr/libexec/netdata/plugins.d/perf.plugin --help\n````\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Command options that specify charts shown by the plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. | 1 | yes |\n\n#### Examples\n\n##### All metrics\n\nMonitor all metrics available.\n\n```yaml\n[plugin:perf]\n command options = all\n\n```\n##### CPU cycles\n\nMonitor CPU cycles.\n\n```yaml\n[plugin:perf]\n command options = cycles\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Install perf plugin\n\nIf you are [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.\n\n\n#### Enable the perf plugin\n\nThe plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.\n\nTo enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config netdata.conf\n```\n\nChange the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:perf]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can get the available options running:\n\n```bash\n/usr/libexec/netdata/plugins.d/perf.plugin --help\n````\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Command options that specify charts shown by the plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. | 1 | yes |\n\n#### Examples\n\n##### All metrics\n\nMonitor all metrics available.\n\n```yaml\n[plugin:perf]\n command options = all\n\n```\n##### CPU cycles\n\nMonitor CPU cycles.\n\n```yaml\n[plugin:perf]\n command options = cycles\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CPU performance instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| perf.cpu_cycles | cpu, ref_cpu | cycles/s |\n| perf.instructions | instructions | instructions/s |\n| perf.instructions_per_cycle | ipc | instructions/cycle |\n| perf.branch_instructions | instructions, misses | instructions/s |\n| perf.cache | references, misses | operations/s |\n| perf.bus_cycles | bus | cycles/s |\n| perf.stalled_cycles | frontend, backend | cycles/s |\n| perf.migrations | migrations | migrations |\n| perf.alignment_faults | faults | faults |\n| perf.emulation_faults | faults | faults |\n| perf.l1d_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.l1d_cache_prefetch | prefetches | prefetches/s |\n| perf.l1i_cache | read_access, read_misses | events/s |\n| perf.ll_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.dtlb_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.itlb_cache | read_access, read_misses | events/s |\n| perf.pbu_cache | read_access | events/s |\n\n", @@ -18517,7 +18898,7 @@ "most_popular": false }, "overview": "# System statistics\n\nPlugin: proc.plugin\nModule: /proc/stat\n\n## Overview\n\nCPU utilization, states and frequencies and key Linux system performance metrics.\n\nThe `/proc/stat` file provides various types of system statistics:\n\n- The overall system CPU usage statistics\n- Per CPU core statistics\n- The total context switching of the system\n- The total number of processes running\n- The total CPU interrupts\n- The total CPU softirqs\n\nThe collector also reads:\n\n- `/proc/schedstat` for statistics about the process scheduler in the Linux kernel.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/core_throttle_count` to get the count of thermal throttling events for a specific CPU core on Linux systems.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/package_throttle_count` to get the count of thermal throttling events for a specific CPU package on a Linux system.\n- `/sys/devices/system/cpu/[X]/cpufreq/scaling_cur_freq` to get the current operating frequency of a specific CPU core.\n- `/sys/devices/system/cpu/[X]/cpufreq/stats/time_in_state` to get the amount of time the CPU has spent in each of its available frequency states.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/name` to get the names of the idle states for each CPU core in a Linux system.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/time` to get the total time each specific CPU core has spent in each idle state since the system was started.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector auto-detects all metrics. No configuration is needed.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe collector disables cpu frequency and idle state monitoring when there are more than 128 CPU cores available.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| system.intr | interrupts | interrupts/s |\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n| system.processes | running, blocked | processes |\n| cpu.core_throttling | a dimension per cpu core | events/s |\n| cpu.package_throttling | a dimension per package | events/s |\n| cpu.cpufreq | a dimension per cpu core | MHz |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| cpuidle.cpu_cstate_residency_time | a dimension per c-state | percentage |\n\n", @@ -18991,7 +19372,7 @@ "most_popular": false }, "overview": "# AM2320\n\nPlugin: python.d.plugin\nModule: am2320\n\n## Overview\n\nThis collector monitors AM2320 sensor metrics about temperature and humidity.\n\nIt retrieves temperature and humidity values by contacting an AM2320 sensor over i2c.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming prerequisites are met, the collector will try to connect to the sensor via i2c\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Sensor connection to a Raspberry Pi\n\nConnect the am2320 to the Raspberry Pi I2C pins\n\nRaspberry Pi 3B/4 Pins:\n\n- Board 3.3V (pin 1) to sensor VIN (pin 1)\n- Board SDA (pin 3) to sensor SDA (pin 2)\n- Board GND (pin 6) to sensor GND (pin 3)\n- Board SCL (pin 5) to sensor SCL (pin 4)\n\nYou may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.\n\n\n#### Software requirements\n\nInstall the Adafruit Circuit Python AM2320 library:\n\n`sudo pip3 install adafruit-circuitpython-am2320`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/am2320.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/am2320.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Local sensor\n\nA basic JOB configuration\n\n```yaml\nlocal_sensor:\n name: 'Local AM2320'\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Sensor connection to a Raspberry Pi\n\nConnect the am2320 to the Raspberry Pi I2C pins\n\nRaspberry Pi 3B/4 Pins:\n\n- Board 3.3V (pin 1) to sensor VIN (pin 1)\n- Board SDA (pin 3) to sensor SDA (pin 2)\n- Board GND (pin 6) to sensor GND (pin 3)\n- Board SCL (pin 5) to sensor SCL (pin 4)\n\nYou may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.\n\n\n#### Software requirements\n\nInstall the Adafruit Circuit Python AM2320 library:\n\n`sudo pip3 install adafruit-circuitpython-am2320`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/am2320.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/am2320.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Local sensor\n\nA basic JOB configuration\n\n```yaml\nlocal_sensor:\n name: 'Local AM2320'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `am2320` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin am2320 debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `am2320` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep am2320\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep am2320 /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep am2320\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per AM2320 instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| am2320.temperature | temperature | celsius |\n| am2320.humidity | humidity | percentage |\n\n", @@ -19003,14 +19384,14 @@ { "meta": { "plugin_name": "python.d.plugin", - "module_name": "boinc", + "module_name": "go_expvar", "monitored_instance": { - "name": "BOINC", - "link": "https://boinc.berkeley.edu/", + "name": "Go applications (EXPVAR)", + "link": "https://pkg.go.dev/expvar", "categories": [ - "data-collection.distributed-computing-systems" + "data-collection.apm" ], - "icon_filename": "bolt.svg" + "icon_filename": "go.png" }, "related_resources": { "integrations": { @@ -19021,32 +19402,33 @@ "description": "" }, "keywords": [ - "boinc", - "distributed" + "go", + "expvar", + "application" ], "most_popular": false }, - "overview": "# BOINC\n\nPlugin: python.d.plugin\nModule: boinc\n\n## Overview\n\nThis collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.\n\nIt uses the same RPC interface that the BOINC monitoring GUI does.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Boinc RPC interface\n\nBOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/boinc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/boinc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| hostname | Define a hostname where boinc is running. | localhost | no |\n| port | The port of boinc RPC interface. | | no |\n| password | Provide a password to connect to a boinc RPC interface. | | no |\n\n#### Examples\n\n##### Configuration of a remote boinc instance\n\nA basic JOB configuration for a remote boinc instance\n\n```yaml\nremote:\n hostname: '1.2.3.4'\n port: 1234\n password: 'some-password'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 1234\n password: 'some-password'\n\nremote_job:\n name: 'remote'\n host: '192.0.2.1'\n port: 1234\n password: some-other-password\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin boinc debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `boinc` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep boinc\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep boinc /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep boinc\n```\n\n", - "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |\n| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |\n| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of compute errors over the last 10 minutes |\n| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of failed uploads over the last 10 minutes |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per BOINC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| boinc.tasks | Total, Active | tasks |\n| boinc.states | New, Downloading, Ready to Run, Compute Errors, Uploading, Uploaded, Aborted, Failed Uploads | tasks |\n| boinc.sched | Uninitialized, Preempted, Scheduled | tasks |\n| boinc.process | Uninitialized, Executing, Suspended, Aborted, Quit, Copy Pending | tasks |\n\n", + "overview": "# Go applications (EXPVAR)\n\nPlugin: python.d.plugin\nModule: go_expvar\n\n## Overview\n\nThis collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts.\n\nIt connects via http to gather the metrics exposed via the `expvar` package.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the go_expvar collector\n\nThe `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system.\n\n\n#### Sample `expvar` usage in a Go application\n\nThe `expvar` package exposes metrics over HTTP and is very easy to use.\nConsider this minimal sample below:\n\n```go\npackage main\n\nimport (\n _ \"expvar\"\n \"net/http\"\n)\n\nfunc main() {\n http.ListenAndServe(\"127.0.0.1:8080\", nil)\n}\n```\n\nWhen imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that\nexposes Go runtime's memory statistics in JSON format. You can inspect the output by opening\nthe URL in your browser (or by using `wget` or `curl`).\n\nSample output:\n\n```json\n{\n\"cmdline\": [\"./expvar-demo-binary\"],\n\"memstats\": {\"Alloc\":630856,\"TotalAlloc\":630856,\"Sys\":3346432,\"Lookups\":27, }\n}\n```\n\nYou can of course expose and monitor your own variables as well.\nHere is a sample Go application that exposes a few custom variables:\n\n```go\npackage main\n\nimport (\n \"expvar\"\n \"net/http\"\n \"runtime\"\n \"time\"\n)\n\nfunc main() {\n\n tick := time.NewTicker(1 * time.Second)\n num_go := expvar.NewInt(\"runtime.goroutines\")\n counters := expvar.NewMap(\"counters\")\n counters.Set(\"cnt1\", new(expvar.Int))\n counters.Set(\"cnt2\", new(expvar.Float))\n\n go http.ListenAndServe(\":8080\", nil)\n\n for {\n select {\n case <- tick.C:\n num_go.Set(int64(runtime.NumGoroutine()))\n counters.Add(\"cnt1\", 1)\n counters.AddFloat(\"cnt2\", 1.452)\n }\n }\n}\n```\n\nApart from the runtime memory stats, this application publishes two counters and the\nnumber of currently running Goroutines and updates these stats every second.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/go_expvar.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/go_expvar.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes |\n| user | If the URL is password protected, this is the username to use. | | no |\n| pass | If the URL is password protected, this is the password to use. | | no |\n| collect_memstats | Enables charts for Go runtime's memory statistics. | | no |\n| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no |\n\n#### Examples\n\n##### Monitor a Go app1 application\n\nThe example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.\n\nThe `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.\n\nThe `extra_charts` variable is a YaML list of Netdata chart definitions.\nEach chart definition has the following keys:\n\n```\nid: Netdata chart ID\noptions: a key-value mapping of chart options\nlines: a list of line definitions\n```\n\n**Note: please do not use dots in the chart or line ID field.\nSee [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**\n\nPlease see these two links to the official Netdata documentation for more information about the values:\n\n- [External plugins - charts](/src/plugins.d/README.md#chart)\n- [Chart variables](/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart)\n\n**Line definitions**\n\nEach chart can define multiple lines (dimensions).\nA line definition is a key-value mapping of line options.\nEach line can have the following options:\n\n```\n# mandatory\nexpvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint\nexpvar_type: value type; supported are \"float\" or \"int\"\nid: the id of this line/dimension in Netdata\n\n# optional - Netdata defaults are used if these options are not defined\nname: ''\nalgorithm: absolute\nmultiplier: 1\ndivisor: 100 if expvar_type == float, 1 if expvar_type == int\nhidden: False\n```\n\nPlease see the following link for more information about the options and their default values:\n[External plugins - dimensions](/src/plugins.d/README.md#dimension)\n\nApart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;\nAll dicts in the resulting JSON document are then flattened to one level.\nExpvar names are joined together with '.' when flattening.\n\nExample:\n\n```\n{\n \"counters\": {\"cnt1\": 1042, \"cnt2\": 1512.9839999999983},\n \"runtime.goroutines\": 5\n}\n```\n\nIn the above case, the exported variables will be available under `runtime.goroutines`,\n`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,\nthe first defined key wins and all subsequent keys with the same name are ignored.\n\n\n```yaml\napp1:\n name : 'app1'\n url : 'http://127.0.0.1:8080/debug/vars'\n collect_memstats: true\n extra_charts:\n - id: \"runtime_goroutines\"\n options:\n name: num_goroutines\n title: \"runtime: number of goroutines\"\n units: goroutines\n family: runtime\n context: expvar.runtime.goroutines\n chart_type: line\n lines:\n - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}\n - id: \"foo_counters\"\n options:\n name: counters\n title: \"some random counters\"\n units: awesomeness\n family: counters\n context: expvar.foo.counters\n chart_type: line\n lines:\n - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}\n - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin go_expvar debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `go_expvar` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep go_expvar\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep go_expvar /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep go_expvar\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go applications (EXPVAR) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| expvar.memstats.heap | alloc, inuse | KiB |\n| expvar.memstats.stack | inuse | KiB |\n| expvar.memstats.mspan | inuse | KiB |\n| expvar.memstats.mcache | inuse | KiB |\n| expvar.memstats.live_objects | live | objects |\n| expvar.memstats.sys | sys | KiB |\n| expvar.memstats.gc_pauses | avg | ns |\n\n", "integration_type": "collector", - "id": "python.d.plugin-boinc-BOINC", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/boinc/metadata.yaml", + "id": "python.d.plugin-go_expvar-Go_applications_(EXPVAR)", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/go_expvar/metadata.yaml", "related_resources": "" }, { "meta": { "plugin_name": "python.d.plugin", - "module_name": "ceph", + "module_name": "pandas", "monitored_instance": { - "name": "Ceph", - "link": "https://ceph.io/", + "name": "Pandas", + "link": "https://pandas.pydata.org/", "categories": [ - "data-collection.storage-mount-points-and-filesystems" + "data-collection.generic-data-collection" ], - "icon_filename": "ceph.svg" + "icon_filename": "pandas.png" }, "related_resources": { "integrations": { @@ -19057,32 +19439,32 @@ "description": "" }, "keywords": [ - "ceph", - "storage" + "pandas", + "python" ], "most_popular": false }, - "overview": "# Ceph\n\nPlugin: python.d.plugin\nModule: ceph\n\n## Overview\n\nThis collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.\n\nUses the `rados` python module to connect to a Ceph cluster.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### `rados` python module\n\nMake sure the `rados` python module is installed\n\n#### Granting read permissions to ceph group from keyring file\n\nExecute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`\n\n#### Create a specific rados_id\n\nYou can optionally create a rados_id to use instead of admin\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/ceph.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/ceph.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| config_file | Ceph config file | | yes |\n| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes |\n| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no |\n\n#### Examples\n\n##### Basic local Ceph cluster\n\nA basic configuration to connect to a local Ceph cluster.\n\n```yaml\nlocal:\n config_file: '/etc/ceph/ceph.conf'\n keyring_file: '/etc/ceph/ceph.client.admin.keyring'\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin ceph debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ceph` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ceph\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ceph /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ceph\n```\n\n", - "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ceph_cluster_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.general_usage | cluster disk space utilization |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Ceph instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.general_usage | avail, used | KiB |\n| ceph.general_objects | cluster | objects |\n| ceph.general_bytes | read, write | KiB/s |\n| ceph.general_operations | read, write | operations |\n| ceph.general_latency | apply, commit | milliseconds |\n| ceph.pool_usage | a dimension per Ceph Pool | KiB |\n| ceph.pool_objects | a dimension per Ceph Pool | objects |\n| ceph.pool_read_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_write_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_read_operations | a dimension per Ceph Pool | operations |\n| ceph.pool_write_operations | a dimension per Ceph Pool | operations |\n| ceph.osd_usage | a dimension per Ceph OSD | KiB |\n| ceph.osd_size | a dimension per Ceph OSD | KiB |\n| ceph.apply_latency | a dimension per Ceph OSD | milliseconds |\n| ceph.commit_latency | a dimension per Ceph OSD | milliseconds |\n\n", + "overview": "# Pandas\n\nPlugin: python.d.plugin\nModule: pandas\n\n## Overview\n\n[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.\nIf you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),\neither locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.\n\nThis collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.\n\n\nThe collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.\n\n```bash\nsudo pip install pandas requests\n```\n\nNote: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.\n\n```bash\nsudo pip install 'sqlalchemy<2.0' psycopg2-binary\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/pandas.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/pandas.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| chart_configs | an array of chart configuration dictionaries | [] | yes |\n| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.family | [family](/docs/dashboards-and-charts/netdata-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.context | [context](/docs/dashboards-and-charts/netdata-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Temperature API Example\n\nexample pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.\n\n```yaml\ntemperature:\n name: \"temperature\"\n update_every: 5\n chart_configs:\n - name: \"temperature_forecast_by_city\"\n title: \"Temperature By City - Today Forecast\"\n family: \"temperature.today\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.describe(); # get aggregate stats for each city;\n df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;\n df.rename(columns={'index':'city'}); # some column renaming;\n df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;\n df.rename(columns={0:'degrees'}); # some column renaming;\n pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;\n df.rename(columns={0:'measurement'}); # some column renaming;\n df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;\n df.sort_index(); # sort by city name;\n df.transpose(); # transpose so its just one wide row;\n - name: \"temperature_current_by_city\"\n title: \"Temperature By City - Current\"\n family: \"temperature.current\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}¤t_weather=true').json()['current_weather']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.transpose();\n df[['temperature']];\n df.transpose();\n\n```\n##### API CSV Example\n\nexample showing a read_csv from a url and some light pandas data wrangling.\n\n```yaml\nexample_csv:\n name: \"example_csv\"\n update_every: 2\n chart_configs:\n - name: \"london_system_cpu\"\n title: \"London System CPU - Ratios\"\n family: \"london_system_cpu\"\n context: \"pandas\"\n type: \"line\"\n units: \"n\"\n df_steps: >\n pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});\n df.drop('time', axis=1);\n df.mean().to_frame().transpose();\n df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();\n df.rename(columns={0:'average_user_system_ratio'});\n df*100;\n\n```\n##### API JSON Example\n\nexample showing a read_json from a url and some light pandas data wrangling.\n\n```yaml\nexample_json:\n name: \"example_json\"\n update_every: 2\n chart_configs:\n - name: \"london_system_net\"\n title: \"London System Net - Total Bandwidth\"\n family: \"london_system_net\"\n context: \"pandas\"\n type: \"area\"\n units: \"kilobits/s\"\n df_steps: >\n pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);\n df.drop('time', axis=1);\n abs(df);\n df.sum(axis=1).to_frame();\n df.rename(columns={0:'total_bandwidth'});\n\n```\n##### XML Example\n\nexample showing a read_xml from a url and some light pandas data wrangling.\n\n```yaml\nexample_xml:\n name: \"example_xml\"\n update_every: 2\n line_sep: \"|\"\n chart_configs:\n - name: \"temperature_forcast\"\n title: \"Temperature Forecast\"\n family: \"temp\"\n context: \"pandas.temp\"\n type: \"line\"\n units: \"celsius\"\n df_steps: >\n pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|\n df.rename(columns={'value': 'dublin'})|\n df[['dublin']]|\n\n```\n##### SQL Example\n\nexample showing a read_sql from a postgres database using sqlalchemy.\n\n```yaml\nsql:\n name: \"sql\"\n update_every: 5\n chart_configs:\n - name: \"sql\"\n title: \"SQL Example\"\n family: \"sql.example\"\n context: \"example\"\n type: \"line\"\n units: \"percent\"\n df_steps: >\n pd.read_sql_query(\n sql='\\\n select \\\n random()*100 as metric_1, \\\n random()*100 as metric_2 \\\n ',\n con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')\n );\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin pandas debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pandas` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pandas\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pandas /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pandas\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThis collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken\nas the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).\nSee [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html).\"\n\n\n### Per Pandas instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n\n", "integration_type": "collector", - "id": "python.d.plugin-ceph-Ceph", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ceph/metadata.yaml", + "id": "python.d.plugin-pandas-Pandas", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/pandas/metadata.yaml", "related_resources": "" }, { "meta": { - "plugin_name": "python.d.plugin", - "module_name": "go_expvar", + "plugin_name": "slabinfo.plugin", + "module_name": "slabinfo.plugin", "monitored_instance": { - "name": "Go applications (EXPVAR)", - "link": "https://pkg.go.dev/expvar", + "name": "Linux kernel SLAB allocator statistics", + "link": "https://kernel.org/", "categories": [ - "data-collection.apm" + "data-collection.linux-systems.kernel-metrics" ], - "icon_filename": "go.png" + "icon_filename": "linuxserver.svg" }, "related_resources": { "integrations": { @@ -19093,144 +19475,35 @@ "description": "" }, "keywords": [ - "go", - "expvar", - "application" + "linux kernel", + "slab", + "slub", + "slob", + "slabinfo" ], "most_popular": false }, - "overview": "# Go applications (EXPVAR)\n\nPlugin: python.d.plugin\nModule: go_expvar\n\n## Overview\n\nThis collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts.\n\nIt connects via http to gather the metrics exposed via the `expvar` package.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the go_expvar collector\n\nThe `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.\n\n\n#### Sample `expvar` usage in a Go application\n\nThe `expvar` package exposes metrics over HTTP and is very easy to use.\nConsider this minimal sample below:\n\n```go\npackage main\n\nimport (\n _ \"expvar\"\n \"net/http\"\n)\n\nfunc main() {\n http.ListenAndServe(\"127.0.0.1:8080\", nil)\n}\n```\n\nWhen imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that\nexposes Go runtime's memory statistics in JSON format. You can inspect the output by opening\nthe URL in your browser (or by using `wget` or `curl`).\n\nSample output:\n\n```json\n{\n\"cmdline\": [\"./expvar-demo-binary\"],\n\"memstats\": {\"Alloc\":630856,\"TotalAlloc\":630856,\"Sys\":3346432,\"Lookups\":27, }\n}\n```\n\nYou can of course expose and monitor your own variables as well.\nHere is a sample Go application that exposes a few custom variables:\n\n```go\npackage main\n\nimport (\n \"expvar\"\n \"net/http\"\n \"runtime\"\n \"time\"\n)\n\nfunc main() {\n\n tick := time.NewTicker(1 * time.Second)\n num_go := expvar.NewInt(\"runtime.goroutines\")\n counters := expvar.NewMap(\"counters\")\n counters.Set(\"cnt1\", new(expvar.Int))\n counters.Set(\"cnt2\", new(expvar.Float))\n\n go http.ListenAndServe(\":8080\", nil)\n\n for {\n select {\n case <- tick.C:\n num_go.Set(int64(runtime.NumGoroutine()))\n counters.Add(\"cnt1\", 1)\n counters.AddFloat(\"cnt2\", 1.452)\n }\n }\n}\n```\n\nApart from the runtime memory stats, this application publishes two counters and the\nnumber of currently running Goroutines and updates these stats every second.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/go_expvar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/go_expvar.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes |\n| user | If the URL is password protected, this is the username to use. | | no |\n| pass | If the URL is password protected, this is the password to use. | | no |\n| collect_memstats | Enables charts for Go runtime's memory statistics. | | no |\n| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no |\n\n#### Examples\n\n##### Monitor a Go app1 application\n\nThe example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.\n\nThe `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.\n\nThe `extra_charts` variable is a YaML list of Netdata chart definitions.\nEach chart definition has the following keys:\n\n```\nid: Netdata chart ID\noptions: a key-value mapping of chart options\nlines: a list of line definitions\n```\n\n**Note: please do not use dots in the chart or line ID field.\nSee [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**\n\nPlease see these two links to the official Netdata documentation for more information about the values:\n\n- [External plugins - charts](/src/collectors/plugins.d/README.md#chart)\n- [Chart variables](/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart)\n\n**Line definitions**\n\nEach chart can define multiple lines (dimensions).\nA line definition is a key-value mapping of line options.\nEach line can have the following options:\n\n```\n# mandatory\nexpvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint\nexpvar_type: value type; supported are \"float\" or \"int\"\nid: the id of this line/dimension in Netdata\n\n# optional - Netdata defaults are used if these options are not defined\nname: ''\nalgorithm: absolute\nmultiplier: 1\ndivisor: 100 if expvar_type == float, 1 if expvar_type == int\nhidden: False\n```\n\nPlease see the following link for more information about the options and their default values:\n[External plugins - dimensions](/src/collectors/plugins.d/README.md#dimension)\n\nApart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;\nAll dicts in the resulting JSON document are then flattened to one level.\nExpvar names are joined together with '.' when flattening.\n\nExample:\n\n```\n{\n \"counters\": {\"cnt1\": 1042, \"cnt2\": 1512.9839999999983},\n \"runtime.goroutines\": 5\n}\n```\n\nIn the above case, the exported variables will be available under `runtime.goroutines`,\n`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,\nthe first defined key wins and all subsequent keys with the same name are ignored.\n\n\n```yaml\napp1:\n name : 'app1'\n url : 'http://127.0.0.1:8080/debug/vars'\n collect_memstats: true\n extra_charts:\n - id: \"runtime_goroutines\"\n options:\n name: num_goroutines\n title: \"runtime: number of goroutines\"\n units: goroutines\n family: runtime\n context: expvar.runtime.goroutines\n chart_type: line\n lines:\n - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}\n - id: \"foo_counters\"\n options:\n name: counters\n title: \"some random counters\"\n units: awesomeness\n family: counters\n context: expvar.foo.counters\n chart_type: line\n lines:\n - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}\n - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin go_expvar debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `go_expvar` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep go_expvar\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep go_expvar /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep go_expvar\n```\n\n", + "overview": "# Linux kernel SLAB allocator statistics\n\nPlugin: slabinfo.plugin\nModule: slabinfo.plugin\n\n## Overview\n\nCollects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads in the kernel.\n\n\nThe plugin parses `/proc/slabinfo`\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions sVko that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nDue to the large number of metrics generated by this integration, it is disabled by default and must be manually enabled inside `/etc/netdata/netdata.conf`\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nIf you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugins]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Enable plugin | As described above plugin is disabled by default, this option is used to enable plugin. | no | yes |\n\n#### Examples\nThere are no configuration examples.\n\n", + "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go applications (EXPVAR) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| expvar.memstats.heap | alloc, inuse | KiB |\n| expvar.memstats.stack | inuse | KiB |\n| expvar.memstats.mspan | inuse | KiB |\n| expvar.memstats.mcache | inuse | KiB |\n| expvar.memstats.live_objects | live | objects |\n| expvar.memstats.sys | sys | KiB |\n| expvar.memstats.gc_pauses | avg | ns |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nSLAB cache utilization metrics for the whole system.\n\n### Per Linux kernel SLAB allocator statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.slabmemory | a dimension per cache | B |\n| mem.slabfilling | a dimension per cache | % |\n| mem.slabwaste | a dimension per cache | B |\n\n", "integration_type": "collector", - "id": "python.d.plugin-go_expvar-Go_applications_(EXPVAR)", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/go_expvar/metadata.yaml", - "related_resources": "" - }, - { - "meta": { - "plugin_name": "python.d.plugin", - "module_name": "openldap", - "monitored_instance": { - "name": "OpenLDAP", - "link": "https://www.openldap.org/", - "categories": [ - "data-collection.authentication-and-authorization" - ], - "icon_filename": "statsd.png" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "openldap", - "RBAC", - "Directory access" - ], - "most_popular": false - }, - "overview": "# OpenLDAP\n\nPlugin: python.d.plugin\nModule: openldap\n\n## Overview\n\nThis collector monitors OpenLDAP metrics about connections, operations, referrals and more.\n\nStatistics are taken from the monitoring interface of a openLDAP (slapd) server\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector doesn't work until all the prerequisites are checked.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Configure the openLDAP server to expose metrics to monitor it.\n\nFollow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.\n\n\n#### Install python-ldap module\n\nInstall python ldap module \n\n1. From pip package manager\n\n```bash\npip install ldap\n```\n\n2. With apt package manager (in most deb based distros)\n\n\n```bash\napt-get install python-ldap\n```\n\n\n3. With yum package manager (in most rpm based distros)\n\n\n```bash\nyum install python-ldap\n```\n\n\n#### Insert credentials for Netdata to access openLDAP server\n\nUse the `ldappasswd` utility to set a password for the username you will use.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/openldap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/openldap.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| username | The bind user with right to access monitor statistics | | yes |\n| password | The password for the binded user | | yes |\n| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes |\n| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes |\n| use_tls | Make True if a TLS connection is used over ldaps:// | no | no |\n| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no |\n| cert_check | False if you want to ignore certificate check | True | yes |\n| timeout | Seconds to timeout if no connection exist | | yes |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nusername: \"cn=admin\"\npassword: \"pass\"\nserver: \"localhost\"\nport: \"389\"\ncheck_cert: True\ntimeout: 1\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin openldap debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `openldap` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep openldap\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep openldap /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep openldap\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenLDAP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openldap.total_connections | connections | connections/s |\n| openldap.traffic_stats | sent | KiB/s |\n| openldap.operations_status | completed, initiated | ops/s |\n| openldap.referrals | sent | referrals/s |\n| openldap.entries | sent | entries/s |\n| openldap.ldap_operations | bind, search, unbind, add, delete, modify, compare | ops/s |\n| openldap.waiters | write, read | waiters/s |\n\n", - "integration_type": "collector", - "id": "python.d.plugin-openldap-OpenLDAP", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/openldap/metadata.yaml", - "related_resources": "" - }, - { - "meta": { - "plugin_name": "python.d.plugin", - "module_name": "oracledb", - "monitored_instance": { - "name": "Oracle DB", - "link": "https://docs.oracle.com/en/database/oracle/oracle-database/", - "categories": [ - "data-collection.database-servers" - ], - "icon_filename": "oracle.svg" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "database", - "oracle", - "data warehouse", - "SQL" - ], - "most_popular": false - }, - "overview": "# Oracle DB\n\nPlugin: python.d.plugin\nModule: oracledb\n\n## Overview\n\nThis collector monitors OracleDB database metrics about sessions, tables, memory and more.\n\nIt collects the metrics via the supported database client library\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIn order for this collector to work, it needs a read-only user `netdata` in the RDBMS.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen the requirements are met, databases on the local host on port 1521 will be auto-detected\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Install the python-oracledb package\n\nYou can follow the official guide below to install the required package:\n\nSource: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html\n\n\n#### Create a read only user for netdata\n\nFollow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach\n\nConnect to your Oracle database with an administrative user and execute:\n\n```bash\nCREATE USER netdata IDENTIFIED BY ;\n\nGRANT CONNECT TO netdata;\nGRANT SELECT_CATALOG_ROLE TO netdata;\n```\n\n\n#### Edit the configuration\n\nEdit the configuration troubleshooting:\n\n1. Provide a valid user for the netdata collector to access the database\n2. Specify the network target this database is listening.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/oracledb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/oracledb.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| user | The username for the user account. | no | yes |\n| password | The password for the user account. | no | yes |\n| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes |\n| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes |\n| protocol | one of the strings \"tcp\" or \"tcps\" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration, two jobs described for two databases.\n\n```yaml\nlocal:\n user: 'netdata'\n password: 'secret'\n server: 'localhost:1521'\n service: 'XE'\n protocol: 'tcps'\n\nremote:\n user: 'netdata'\n password: 'secret'\n server: '10.0.0.1:1521'\n service: 'XE'\n protocol: 'tcps'\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin oracledb debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `oracledb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep oracledb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep oracledb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep oracledb\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThese metrics refer to the entire monitored application.\n\n### Per Oracle DB instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.session_count | total, active | sessions |\n| oracledb.session_limit_usage | usage | % |\n| oracledb.logons | logons | events/s |\n| oracledb.physical_disk_read_writes | reads, writes | events/s |\n| oracledb.sorts_on_disks | sorts | events/s |\n| oracledb.full_table_scans | full table scans | events/s |\n| oracledb.database_wait_time_ratio | wait time ratio | % |\n| oracledb.shared_pool_free_memory | free memory | % |\n| oracledb.in_memory_sorts_ratio | in-memory sorts | % |\n| oracledb.sql_service_response_time | time | seconds |\n| oracledb.user_rollbacks | rollbacks | events/s |\n| oracledb.enqueue_timeouts | enqueue timeouts | events/s |\n| oracledb.cache_hit_ration | buffer, cursor, library, row | % |\n| oracledb.global_cache_blocks | corrupted, lost | events/s |\n| oracledb.activity | parse count, execute count, user commits, user rollbacks | events/s |\n| oracledb.wait_time | application, configuration, administrative, concurrency, commit, network, user I/O, system I/O, scheduler, other | ms |\n| oracledb.tablespace_size | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage_in_percent | a dimension per active tablespace | % |\n| oracledb.allocated_size | a dimension per active tablespace | B |\n| oracledb.allocated_usage | a dimension per active tablespace | B |\n| oracledb.allocated_usage_in_percent | a dimension per active tablespace | % |\n\n", - "integration_type": "collector", - "id": "python.d.plugin-oracledb-Oracle_DB", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/oracledb/metadata.yaml", - "related_resources": "" - }, - { - "meta": { - "plugin_name": "python.d.plugin", - "module_name": "pandas", - "monitored_instance": { - "name": "Pandas", - "link": "https://pandas.pydata.org/", - "categories": [ - "data-collection.generic-data-collection" - ], - "icon_filename": "pandas.png" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "pandas", - "python" - ], - "most_popular": false - }, - "overview": "# Pandas\n\nPlugin: python.d.plugin\nModule: pandas\n\n## Overview\n\n[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.\nIf you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),\neither locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.\n\nThis collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.\n\n\nThe collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.\n\n```bash\nsudo pip install pandas requests\n```\n\nNote: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.\n\n```bash\nsudo pip install 'sqlalchemy<2.0' psycopg2-binary\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/pandas.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/pandas.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| chart_configs | an array of chart configuration dictionaries | [] | yes |\n| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.family | [family](/docs/dashboards-and-charts/netdata-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.context | [context](/docs/dashboards-and-charts/netdata-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Temperature API Example\n\nexample pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.\n\n```yaml\ntemperature:\n name: \"temperature\"\n update_every: 5\n chart_configs:\n - name: \"temperature_forecast_by_city\"\n title: \"Temperature By City - Today Forecast\"\n family: \"temperature.today\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.describe(); # get aggregate stats for each city;\n df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;\n df.rename(columns={'index':'city'}); # some column renaming;\n df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;\n df.rename(columns={0:'degrees'}); # some column renaming;\n pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;\n df.rename(columns={0:'measurement'}); # some column renaming;\n df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;\n df.sort_index(); # sort by city name;\n df.transpose(); # transpose so its just one wide row;\n - name: \"temperature_current_by_city\"\n title: \"Temperature By City - Current\"\n family: \"temperature.current\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}¤t_weather=true').json()['current_weather']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.transpose();\n df[['temperature']];\n df.transpose();\n\n```\n##### API CSV Example\n\nexample showing a read_csv from a url and some light pandas data wrangling.\n\n```yaml\nexample_csv:\n name: \"example_csv\"\n update_every: 2\n chart_configs:\n - name: \"london_system_cpu\"\n title: \"London System CPU - Ratios\"\n family: \"london_system_cpu\"\n context: \"pandas\"\n type: \"line\"\n units: \"n\"\n df_steps: >\n pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});\n df.drop('time', axis=1);\n df.mean().to_frame().transpose();\n df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();\n df.rename(columns={0:'average_user_system_ratio'});\n df*100;\n\n```\n##### API JSON Example\n\nexample showing a read_json from a url and some light pandas data wrangling.\n\n```yaml\nexample_json:\n name: \"example_json\"\n update_every: 2\n chart_configs:\n - name: \"london_system_net\"\n title: \"London System Net - Total Bandwidth\"\n family: \"london_system_net\"\n context: \"pandas\"\n type: \"area\"\n units: \"kilobits/s\"\n df_steps: >\n pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);\n df.drop('time', axis=1);\n abs(df);\n df.sum(axis=1).to_frame();\n df.rename(columns={0:'total_bandwidth'});\n\n```\n##### XML Example\n\nexample showing a read_xml from a url and some light pandas data wrangling.\n\n```yaml\nexample_xml:\n name: \"example_xml\"\n update_every: 2\n line_sep: \"|\"\n chart_configs:\n - name: \"temperature_forcast\"\n title: \"Temperature Forecast\"\n family: \"temp\"\n context: \"pandas.temp\"\n type: \"line\"\n units: \"celsius\"\n df_steps: >\n pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|\n df.rename(columns={'value': 'dublin'})|\n df[['dublin']]|\n\n```\n##### SQL Example\n\nexample showing a read_sql from a postgres database using sqlalchemy.\n\n```yaml\nsql:\n name: \"sql\"\n update_every: 5\n chart_configs:\n - name: \"sql\"\n title: \"SQL Example\"\n family: \"sql.example\"\n context: \"example\"\n type: \"line\"\n units: \"percent\"\n df_steps: >\n pd.read_sql_query(\n sql='\\\n select \\\n random()*100 as metric_1, \\\n random()*100 as metric_2 \\\n ',\n con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')\n );\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin pandas debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pandas` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pandas\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pandas /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pandas\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThis collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken\nas the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).\nSee [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html).\"\n\n\n### Per Pandas instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n\n", - "integration_type": "collector", - "id": "python.d.plugin-pandas-Pandas", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/pandas/metadata.yaml", + "id": "slabinfo.plugin-slabinfo.plugin-Linux_kernel_SLAB_allocator_statistics", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/slabinfo.plugin/metadata.yaml", "related_resources": "" }, { "meta": { - "plugin_name": "python.d.plugin", - "module_name": "samba", + "plugin_name": "tc.plugin", + "module_name": "tc.plugin", "monitored_instance": { - "name": "Samba", - "link": "https://www.samba.org/samba/", + "name": "tc QoS classes", + "link": "https://wiki.linuxfoundation.org/networking/iproute2", "categories": [ - "data-collection.storage-mount-points-and-filesystems" + "data-collection.linux-systems.network-metrics" ], - "icon_filename": "samba.svg" + "icon_filename": "netdata.png" }, "related_resources": { "integrations": { @@ -19240,70 +19513,30 @@ "info_provided_to_referring_integrations": { "description": "" }, - "keywords": [ - "samba", - "file sharing" - ], + "keywords": [], "most_popular": false }, - "overview": "# Samba\n\nPlugin: python.d.plugin\nModule: samba\n\n## Overview\n\nThis collector monitors the performance metrics of Samba file sharing.\n\nIt is using the `smbstatus` command-line tool.\n\nExecuted commands:\n\n- `sudo -n smbstatus -P`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, the `smbstatus -P` binary is executed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the samba collector\n\nThe `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\nChange the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.\n\n\n#### Permissions and programs\n\nTo run the collector you need:\n\n- `smbstatus` program\n- `sudo` program\n- `smbd` must be compiled with profiling enabled\n- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`\n\nThe module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n- add to your `/etc/sudoers` file:\n\n `which smbstatus` shows the full path to the binary.\n\n ```bash\n netdata ALL=(root) NOPASSWD: /path/to/smbstatus\n ```\n\n- Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)\n\n The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.\n\n\n As the `root` user, do the following:\n\n ```cmd\n mkdir /etc/systemd/system/netdata.service.d\n echo -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\n systemctl daemon-reload\n systemctl restart netdata.service\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/samba.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/samba.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nmy_job_name:\n name: my_name\n update_every: 1\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin samba debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `samba` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep samba\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep samba /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep samba\n```\n\n", + "overview": "# tc QoS classes\n\nPlugin: tc.plugin\nModule: tc.plugin\n\n## Overview\n\nExamine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow.\n\nThe plugin uses `tc` command to collect information about Traffic control.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Create `tc-qos-helper.conf`\n\nIn order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:\n\n```text\ntc_show=\"class\"\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:tc]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| script to run to get tc values | Path to script `tc-qos-helper.sh` | usr/libexec/netdata/plugins.d/tc-qos-helper.s | no |\n| enable show all classes and qdiscs for all interfaces | yes/no flag to control what data is presented. | yes | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration using classes defined in `/etc/iproute2/tc_cls`.\n\nAn example of class IDs mapped to names in that file can be:\n\n```text\n2:1 Standard\n2:8 LowPriorityData\n2:10 HighThroughputData\n2:16 OAM\n2:18 LowLatencyData\n2:24 BroadcastVideo\n2:26 MultimediaStreaming\n2:32 RealTimeInteractive\n2:34 MultimediaConferencing\n2:40 Signalling\n2:46 Telephony\n2:48 NetworkControl\n```\n\nYou can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).\n\n\n```yaml\n[plugin:tc]\n script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh\n enable show all classes and qdiscs for all interfaces = yes\n\n```\n", + "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Samba instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| syscall.rw | sendfile, recvfile | KiB/s |\n| smb2.rw | readout, writein, readin, writeout | KiB/s |\n| smb2.create_close | create, close | operations/s |\n| smb2.get_set_info | getinfo, setinfo | operations/s |\n| smb2.find | find | operations/s |\n| smb2.notify | notify | operations/s |\n| smb2.sm_counters | tcon, negprot, tdis, cancel, logoff, flush, lock, keepalive, break, sessetup | count |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per network device direction\n\nMetrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The network interface. |\n| device_name | The network interface name |\n| group | The device family |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tc.qos | a dimension per class | kilobits/s |\n| tc.qos_packets | a dimension per class | packets/s |\n| tc.qos_dropped | a dimension per class | packets/s |\n| tc.qos_tokens | a dimension per class | tokens |\n| tc.qos_ctokens | a dimension per class | ctokens |\n\n", "integration_type": "collector", - "id": "python.d.plugin-samba-Samba", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/samba/metadata.yaml", + "id": "tc.plugin-tc.plugin-tc_QoS_classes", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/tc.plugin/metadata.yaml", "related_resources": "" }, { "meta": { - "plugin_name": "python.d.plugin", - "module_name": "spigotmc", + "plugin_name": "timex.plugin", + "module_name": "timex.plugin", "monitored_instance": { - "name": "SpigotMC", + "name": "Timex", "link": "", "categories": [ - "data-collection.gaming" - ], - "icon_filename": "spigot.jfif" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "minecraft server", - "spigotmc server", - "spigot" - ], - "most_popular": false - }, - "overview": "# SpigotMC\n\nPlugin: python.d.plugin\nModule: spigotmc\n\n## Overview\n\nThis collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.\n\n\nIt sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the Remote Console Protocol\n\nUnder your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`.\n\nThis will allow the Server to listen and respond to queries over the rcon protocol.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/spigotmc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/spigotmc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | The host's IP to connect to. | localhost | yes |\n| port | The port the remote console is listening on. | 25575 | yes |\n| password | Remote console password if any. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocal:\n name: local_server\n url: 127.0.0.1\n port: 25575\n\n```\n##### Basic Authentication\n\nAn example using basic password for authentication with the remote console.\n\n```yaml\nlocal:\n name: local_server_pass\n url: 127.0.0.1\n port: 25575\n password: 'foobar'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocal_server:\n name : my_local_server\n url : 127.0.0.1\n port: 25575\n\nremote_server:\n name : another_remote_server\n url : 192.0.2.1\n port: 25575\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin spigotmc debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `spigotmc` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep spigotmc\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep spigotmc /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep spigotmc\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SpigotMC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| spigotmc.tps | 1 Minute Average, 5 Minute Average, 15 Minute Average | ticks |\n| spigotmc.users | Users | users |\n| spigotmc.mem | used, allocated, max | MiB |\n\n", - "integration_type": "collector", - "id": "python.d.plugin-spigotmc-SpigotMC", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/spigotmc/metadata.yaml", - "related_resources": "" - }, - { - "meta": { - "plugin_name": "python.d.plugin", - "module_name": "varnish", - "monitored_instance": { - "name": "Varnish", - "link": "https://varnish-cache.org/", - "categories": [ - "data-collection.web-servers-and-web-proxies" + "data-collection.system-clock-and-ntp" ], - "icon_filename": "varnish.svg" + "icon_filename": "syslog.png" }, "related_resources": { "integrations": { @@ -19313,37 +19546,30 @@ "info_provided_to_referring_integrations": { "description": "" }, - "keywords": [ - "varnish", - "varnishstat", - "varnishd", - "cache", - "web server", - "web cache" - ], + "keywords": [], "most_popular": false }, - "overview": "# Varnish\n\nPlugin: python.d.plugin\nModule: varnish\n\n## Overview\n\nThis collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.\n\nNote that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.\n\n\nIt uses the `varnishstat` tool in order to collect the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`netdata` user must be a member of the `varnish` group.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Provide the necessary permissions\n\nIn order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:\n\n```\nusermod -aG varnish netdata\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/varnish.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/varnish.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njob_name:\n instance_name: ''\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin varnish debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `varnish` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep varnish\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep varnish /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep varnish\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Varnish instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.session_connection | accepted, dropped | connections/s |\n| varnish.client_requests | received | requests/s |\n| varnish.all_time_hit_rate | hit, miss, hitpass | percentage |\n| varnish.current_poll_hit_rate | hit, miss, hitpass | percentage |\n| varnish.cached_objects_expired | objects | expired/s |\n| varnish.cached_objects_nuked | objects | nuked/s |\n| varnish.threads_total | None | number |\n| varnish.threads_statistics | created, failed, limited | threads/s |\n| varnish.threads_queue_len | in queue | requests |\n| varnish.backend_connections | successful, unhealthy, reused, closed, recycled, failed | connections/s |\n| varnish.backend_requests | sent | requests/s |\n| varnish.esi_statistics | errors, warnings | problems/s |\n| varnish.memory_usage | free, allocated | MiB |\n| varnish.uptime | uptime | seconds |\n\n### Per Backend\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.backend | header, body | kilobits/s |\n\n### Per Storage\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.storage_usage | free, allocated | KiB |\n| varnish.storage_alloc_objs | allocated | objects |\n\n", + "overview": "# Timex\n\nPlugin: timex.plugin\nModule: timex.plugin\n\n## Overview\n\nExamine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping.\n\nIt uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:timex]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nAt least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| clock synchronization state | Make chart showing system clock synchronization state. | yes | yes |\n| time offset | Make chart showing computed time offset between local system and reference clock | yes | yes |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\n[plugin:timex]\n update every = 1\n clock synchronization state = yes\n time offset = yes\n\n```\n", + "troubleshooting": "", + "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ system_clock_sync_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/timex.conf) | system.clock_sync_state | when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server |\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Timex instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.clock_sync_state | state | state |\n| system.clock_status | unsync, clockerr | status |\n| system.clock_sync_offset | offset | milliseconds |\n\n", "integration_type": "collector", - "id": "python.d.plugin-varnish-Varnish", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/varnish/metadata.yaml", + "id": "timex.plugin-timex.plugin-Timex", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/timex.plugin/metadata.yaml", "related_resources": "" }, { "meta": { - "plugin_name": "python.d.plugin", - "module_name": "w1sensor", + "plugin_name": "windows.plugin", + "module_name": "PerflibMemory", "monitored_instance": { - "name": "1-Wire Sensors", - "link": "https://www.analog.com/en/product-category/1wire-temperature-sensors.html", + "name": "Memory statistics", + "link": "https://learn.microsoft.com/en-us/windows/win32/Memory/memory-management", "categories": [ - "data-collection.hardware-devices-and-sensors" + "data-collection.windows-systems" ], - "icon_filename": "1-wire.png" + "icon_filename": "windows.svg" }, "related_resources": { "integrations": { @@ -19354,33 +19580,32 @@ "description": "" }, "keywords": [ - "temperature", - "sensor", - "1-wire" + "memory", + "swap" ], "most_popular": false }, - "overview": "# 1-Wire Sensors\n\nPlugin: python.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/w1sensor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |\n\n#### Examples\n\n##### Provide human readable names\n\nAssociate two 1-Wire identifiers with human readable names.\n\n```yaml\nsensors:\n name_00000022276e: 'Machine room'\n name_00000022298f: 'Rack 12'\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin w1sensor debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep w1sensor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep w1sensor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep w1sensor\n```\n\n", + "overview": "# Memory statistics\n\nPlugin: windows.plugin\nModule: PerflibMemory\n\n## Overview\n\nThis collector monitors swap and memory pool statistics on Windows systems.\n\n\nIt queries for the 'Memory' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| PerflibMemory | An option to enable or disable the data collection. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per 1-Wire Sensors instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temp | a dimension per sensor | Celsius |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory statistics instance\n\nThese metrics refer to the entire monitored instance\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swap_iops | read, write | operations/s |\n| mem.swap_pages_io | read, write | pages/s |\n| mem.system_pool_size | paged, pool-paged | bytes |\n\n", "integration_type": "collector", - "id": "python.d.plugin-w1sensor-1-Wire_Sensors", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/w1sensor/metadata.yaml", + "id": "windows.plugin-PerflibMemory-Memory_statistics", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml", "related_resources": "" }, { "meta": { - "plugin_name": "python.d.plugin", - "module_name": "zscores", + "plugin_name": "windows.plugin", + "module_name": "PerflibProcesses", "monitored_instance": { - "name": "python.d zscores", - "link": "https://en.wikipedia.org/wiki/Standard_score", + "name": "System statistics", + "link": "https://learn.microsoft.com/en-us/windows/win32/procthread/processes-and-threads", "categories": [ - "data-collection.other" + "data-collection.windows-systems" ], - "icon_filename": "" + "icon_filename": "windows.svg" }, "related_resources": { "integrations": { @@ -19391,36 +19616,33 @@ "description": "" }, "keywords": [ - "zscore", - "z-score", - "standard score", - "standard deviation", - "anomaly detection", - "statistical anomaly detection" + "process counts", + "threads", + "context switch" ], "most_popular": false }, - "overview": "# python.d zscores\n\nPlugin: python.d.plugin\nModule: zscores\n\n## Overview\n\nBy using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.\n\n\nThis collector uses the [Netdata rest api](/src/web/api/README.md) to get the `mean` and `stddev`\nfor each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).\n\nFor each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over\ntime (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector will only work with Python 3 and requires the below packages be installed.\n\n```bash\n# become netdata user\nsudo su -s /bin/bash netdata\n# install required packages\npip3 install numpy pandas requests netdata-pandas==0.0.38\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/zscores.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/zscores.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| charts_regex | what charts to pull data for - A regex like `system\\..*/` or `system\\..*/apps.cpu/apps.mem` etc. | system\\..* | yes |\n| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes |\n| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes |\n| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes |\n| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes |\n| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes |\n| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes |\n| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes |\n| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes |\n| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\nlocal:\n name: 'local'\n host: '127.0.0.1:19999'\n charts_regex: 'system\\..*'\n charts_to_exclude: 'system.uptime'\n train_secs: 14400\n offset_secs: 300\n train_every_n: 900\n z_smooth_n: 15\n z_clip: 10\n z_abs: 'true'\n burn_in: 2\n mode: 'per_chart'\n per_chart_agg: 'mean'\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin zscores debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `zscores` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep zscores\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep zscores /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep zscores\n```\n\n", + "overview": "# System statistics\n\nPlugin: windows.plugin\nModule: PerflibProcesses\n\n## Overview\n\nThis collector monitors the current number of processes, threads, and context switches on Windows systems.\n\n\nIt queries the 'System' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| PerflibProcesses | An option to enable or disable the data collection. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per python.d zscores instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zscores.z | a dimension per chart or dimension | z |\n| zscores.3stddev | a dimension per chart or dimension | count |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System statistics instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.processes | running | processes |\n| system.threads | threads | threads |\n| system.ctxt | switches | context switches/s |\n\n", "integration_type": "collector", - "id": "python.d.plugin-zscores-python.d_zscores", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/zscores/metadata.yaml", + "id": "windows.plugin-PerflibProcesses-System_statistics", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml", "related_resources": "" }, { "meta": { - "plugin_name": "slabinfo.plugin", - "module_name": "slabinfo.plugin", + "plugin_name": "windows.plugin", + "module_name": "PerflibThermalZone", "monitored_instance": { - "name": "Linux kernel SLAB allocator statistics", - "link": "https://kernel.org/", + "name": "System thermal zone", + "link": "https://learn.microsoft.com/en-us/windows-hardware/design/device-experiences/design-guide", "categories": [ - "data-collection.linux-systems.kernel-metrics" + "data-collection.windows-systems" ], - "icon_filename": "linuxserver.svg" + "icon_filename": "windows.svg" }, "related_resources": { "integrations": { @@ -19431,88 +19653,19 @@ "description": "" }, "keywords": [ - "linux kernel", - "slab", - "slub", - "slob", - "slabinfo" + "thermal", + "temperature" ], "most_popular": false }, - "overview": "# Linux kernel SLAB allocator statistics\n\nPlugin: slabinfo.plugin\nModule: slabinfo.plugin\n\n## Overview\n\nCollects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads in the kernel.\n\n\nThe plugin parses `/proc/slabinfo`\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions sVko that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nDue to the large number of metrics generated by this integration, it is disabled by default and must be manually enabled inside `/etc/netdata/netdata.conf`\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nIf you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugins]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Enable plugin | As described above plugin is disabled by default, this option is used to enable plugin. | no | yes |\n\n#### Examples\nThere are no configuration examples.\n\n", + "overview": "# System thermal zone\n\nPlugin: windows.plugin\nModule: PerflibThermalZone\n\n## Overview\n\nThis collector monitors thermal zone statistics on Windows systems.\n\n\nIt queries for the 'Thermal Zone Information' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| PerflibThermalZone | An option to enable or disable the data collection. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nSLAB cache utilization metrics for the whole system.\n\n### Per Linux kernel SLAB allocator statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.slabmemory | a dimension per cache | B |\n| mem.slabfilling | a dimension per cache | % |\n| mem.slabwaste | a dimension per cache | B |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Thermal zone\n\nThese metrics refer to a Thermal zone\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.thermalzone_temperature | temperature | celsius |\n\n", "integration_type": "collector", - "id": "slabinfo.plugin-slabinfo.plugin-Linux_kernel_SLAB_allocator_statistics", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/slabinfo.plugin/metadata.yaml", - "related_resources": "" - }, - { - "meta": { - "plugin_name": "tc.plugin", - "module_name": "tc.plugin", - "monitored_instance": { - "name": "tc QoS classes", - "link": "https://wiki.linuxfoundation.org/networking/iproute2", - "categories": [ - "data-collection.linux-systems.network-metrics" - ], - "icon_filename": "netdata.png" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [], - "most_popular": false - }, - "overview": "# tc QoS classes\n\nPlugin: tc.plugin\nModule: tc.plugin\n\n## Overview\n\nExamine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow.\n\nThe plugin uses `tc` command to collect information about Traffic control.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Create `tc-qos-helper.conf`\n\nIn order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:\n\n```conf\ntc_show=\"class\"\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:tc]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| script to run to get tc values | Path to script `tc-qos-helper.sh` | usr/libexec/netdata/plugins.d/tc-qos-helper.s | no |\n| enable show all classes and qdiscs for all interfaces | yes/no flag to control what data is presented. | yes | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration using classes defined in `/etc/iproute2/tc_cls`.\n\nAn example of class IDs mapped to names in that file can be:\n\n```conf\n2:1 Standard\n2:8 LowPriorityData\n2:10 HighThroughputData\n2:16 OAM\n2:18 LowLatencyData\n2:24 BroadcastVideo\n2:26 MultimediaStreaming\n2:32 RealTimeInteractive\n2:34 MultimediaConferencing\n2:40 Signalling\n2:46 Telephony\n2:48 NetworkControl\n```\n\nYou can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).\n\n\n```yaml\n[plugin:tc]\n script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh\n enable show all classes and qdiscs for all interfaces = yes\n\n```\n", - "troubleshooting": "", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per network device direction\n\nMetrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The network interface. |\n| device_name | The network interface name |\n| group | The device family |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tc.qos | a dimension per class | kilobits/s |\n| tc.qos_packets | a dimension per class | packets/s |\n| tc.qos_dropped | a dimension per class | packets/s |\n| tc.qos_tokens | a dimension per class | tokens |\n| tc.qos_ctokens | a dimension per class | ctokens |\n\n", - "integration_type": "collector", - "id": "tc.plugin-tc.plugin-tc_QoS_classes", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/tc.plugin/metadata.yaml", - "related_resources": "" - }, - { - "meta": { - "plugin_name": "timex.plugin", - "module_name": "timex.plugin", - "monitored_instance": { - "name": "Timex", - "link": "", - "categories": [ - "data-collection.system-clock-and-ntp" - ], - "icon_filename": "syslog.png" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [], - "most_popular": false - }, - "overview": "# Timex\n\nPlugin: timex.plugin\nModule: timex.plugin\n\n## Overview\n\nExamine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping.\n\nIt uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:timex]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nAt least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| clock synchronization state | Make chart showing system clock synchronization state. | yes | yes |\n| time offset | Make chart showing computed time offset between local system and reference clock | yes | yes |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\n[plugin:timex]\n update every = 1\n clock synchronization state = yes\n time offset = yes\n\n```\n", - "troubleshooting": "", - "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ system_clock_sync_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/timex.conf) | system.clock_sync_state | when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server |\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Timex instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.clock_sync_state | state | state |\n| system.clock_status | unsync, clockerr | status |\n| system.clock_sync_offset | offset | milliseconds |\n\n", - "integration_type": "collector", - "id": "timex.plugin-timex.plugin-Timex", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/timex.plugin/metadata.yaml", + "id": "windows.plugin-PerflibThermalZone-System_thermal_zone", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml", "related_resources": "" }, { @@ -19539,7 +19692,7 @@ "most_popular": false }, "overview": "# Xen XCP-ng\n\nPlugin: xenstat.plugin\nModule: xenstat.plugin\n\n## Overview\n\nThis collector monitors XenServer and XCP-ng host and domains statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin requires the `xen-dom0-libs-devel` and `yajl-devel` libraries to be installed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Libraries\n\n1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.\n\n Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`\n\n2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:xenstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Libraries\n\n1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.\n\n Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`\n\n2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:xenstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Xen XCP-ng instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xenstat.mem | free, used | MiB |\n| xenstat.domains | domains | domains |\n| xenstat.cpus | cpus | cpus |\n| xenstat.cpu_freq | frequency | MHz |\n\n### Per xendomain\n\nMetrics related to Xen domains. Each domain provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.states | running, blocked, paused, shutdown, crashed, dying | boolean |\n| xendomain.cpu | used | percentage |\n| xendomain.mem | maximum, current | MiB |\n| xendomain.vcpu | a dimension per vcpu | percentage |\n\n### Per xendomain vbd\n\nMetrics related to Xen domain Virtual Block Device. Each VBD provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.oo_req_vbd | requests | requests/s |\n| xendomain.requests_vbd | read, write | requests/s |\n| xendomain.sectors_vbd | read, write | sectors/s |\n\n### Per xendomain network\n\nMetrics related to Xen domain network interfaces. Each network interface provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.bytes_network | received, sent | kilobits/s |\n| xendomain.packets_network | received, sent | packets/s |\n| xendomain.errors_network | received, sent | errors/s |\n| xendomain.drops_network | received, sent | drops/s |\n\n", @@ -19593,7 +19746,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19643,7 +19796,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 2 | Core | x86_64, aarch64 | |\n| 2023 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 2 | Core | x86_64, aarch64 | |\n| 2023 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19693,7 +19846,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19743,7 +19896,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 7 | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 7 | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19793,7 +19946,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Community | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Community | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19843,7 +19996,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 12 | Core | i386, amd64, armhf, arm64 | |\n| 11 | Core | i386, amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 12 | Core | i386, amd64, armhf, arm64 | |\n| 11 | Core | i386, amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19908,7 +20061,7 @@ ], "additional_info": "", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 19.03 or newer | Core | linux/i386, linux/amd64, linux/arm/v7, linux/arm64, linux/ppc64le | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 19.03 or newer | Core | linux/i386, linux/amd64, linux/arm/v7, linux/arm64, linux/ppc64le | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": 3, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -19958,7 +20111,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 40 | Core | x86_64, aarch64 | |\n| 39 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 41 | Core | x86_64, aarch64 | |\n| 40 | Core | x86_64, aarch64 | |\n| 39 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20008,7 +20161,7 @@ ], "additional_info": "Netdata can also be installed via [FreeBSD ports](https://www.freshports.org/net-mgmt/netdata).\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13-STABLE | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13-STABLE | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": 6, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20060,7 +20213,7 @@ "additional_info": "", "related_resources": {}, "most_popular": true, - "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "", "quick_start": 4, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20110,7 +20263,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "", "quick_start": 1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20149,7 +20302,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13 | Community | | |\n| 12 | Community | | |\n| 11 | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13 | Community | | |\n| 12 | Community | | |\n| 11 | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": 5, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20199,7 +20352,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20249,7 +20402,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 15.6 | Core | x86_64, aarch64 | |\n| 15.5 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 15.6 | Core | x86_64, aarch64 | |\n| 15.5 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20299,7 +20452,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 8 | Core | x86_64, aarch64 | |\n| 9 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 8 | Core | x86_64, aarch64 | |\n| 9 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20349,7 +20502,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9.x | Core | x86_64, aarch64 | |\n| 8.x | Core | x86_64, aarch64 | |\n| 7.x | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9.x | Core | x86_64, aarch64 | |\n| 8.x | Core | x86_64, aarch64 | |\n| 7.x | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20399,7 +20552,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Core | x86_64, aarch64 | |\n| 8 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Core | x86_64, aarch64 | |\n| 8 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20449,7 +20602,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 24.04 | Core | amd64, armhf, arm64 | |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 24.04 | Core | amd64, armhf, arm64 | |\n| 24.10 | Core | amd64, armhf, arm64 | |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20467,39 +20620,26 @@ "keywords": [ "windows" ], - "install_description": "1. Install [Windows Exporter](https://github.com/prometheus-community/windows_exporter) on every Windows host you want to monitor.\n2. Install Netdata agent on Linux, FreeBSD or Mac.\n3. Configure Netdata to collect data remotely from your Windows hosts by adding one job per host to windows.conf file. See the [configuration section](https://learn.netdata.cloud/docs/data-collection/monitor-anything/System%20Metrics/Windows-machines#configuration) for details.\n4. Enable [virtual nodes](https://learn.netdata.cloud/docs/data-collection/windows-systems#virtual-nodes) configuration so the windows nodes are displayed as separate nodes.\n", + "install_description": "Netdata offers a convenient Windows installer for easy setup. This executable provides two distinct installation modes, outlined below.\n\nThe Windows installer is currently under beta, and thus it is only available in the nightly release channel. A stable version will be released soon.\n\n## Graphical User Interface (GUI)\n\n1. Download the Netdata [Windows installer](https://github.com/netdata/netdata-nightlies/releases) from the latest nightly release.\n2. Run the `.exe` file and proceed with the installation process.\n3. At a minimum, you will need your Netdata Cloud Space's claim token to connect your Agent to your Space.\n\n## Silent Mode (Command line)\n\nIf you prefer to install Netdata through the command line, you can do so by running the following command on Windows Powershell with administrator rights.\n", "methods": [ { - "method": "wget", + "method": "Silent Mode (Command line)", "commands": [ - { - "channel": "nightly", - "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n" - }, { "channel": "stable", - "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n" - } - ] - }, - { - "method": "curl", - "commands": [ - { - "channel": "nightly", - "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n" + "command": "$ProgressPreference = 'SilentlyContinue';\nInvoke-WebRequest https://github.com/netdata/netdata-nightlies/releases/latest/download/netdata-installer-x64.exe -OutFile \"netdata-installer-x64.exe\";\n.\\netdata-installer-x64.exe /S /A `\n\n" }, { - "channel": "stable", - "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n" + "channel": "nightly", + "command": "$ProgressPreference = 'SilentlyContinue';\nInvoke-WebRequest https://github.com/netdata/netdata-nightlies/releases/latest/download/netdata-installer-x64.exe -OutFile \"netdata-installer-x64.exe\";\n.\\netdata-installer-x64.exe /S /A `\n\n" } ] } ], - "additional_info": "", + "additional_info": "### Available Options\n\n| Option | Description |\n|-----------|--------------------------------------------------------------------------------------------------|\n| `/S` | Enables silent mode installation. |\n| `/A` | Accepts all Netdata licenses. This option is mandatory for silent installations. |\n| `/D` | Specifies the desired installation directory (defaults to `C:\\Program Files\\Netdata`). |\n| `/T` | Opens the `MSYS2` terminal after installation. |\n| `/I` | Forces insecure connections, bypassing hostname verification (use only if absolutely necessary). |\n| `/TOKEN=` | Sets the Claim Token for your Netdata Cloud Space. |\n| `/ROOMS=` | Comma-separated list of Room IDs where you want your node to appear. |\n| `/PROXY=` | Sets the proxy server address if your network requires one. |\n", "related_resources": {}, "most_popular": true, - "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", + "platform_info": "", "quick_start": 2, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20526,7 +20666,7 @@ "time series" ], "overview": "# AppOptics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20547,7 +20687,7 @@ "Kinesis" ], "overview": "# AWS Kinesis\n\nExport metrics to AWS Kinesis Data Streams\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++\n- Here are the instructions when building from source, to ensure 3rd party dependencies are installed:\n ```bash\n git clone --recursive https://github.com/aws/aws-sdk-cpp.git\n cd aws-sdk-cpp/\n git submodule update --init --recursive\n mkdir BUILT\n cd BUILT\n cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis ..\n make\n make install\n ```\n- `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled.\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nNetdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly.\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n\n```\n##### Configuration with AWS credentials\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n # AWS credentials\n aws_access_key_id = your_access_key_id\n aws_secret_access_key = your_secret_access_key\n # destination stream\n stream name = your_stream_name\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++\n- Here are the instructions when building from source, to ensure 3rd party dependencies are installed:\n ```bash\n git clone --recursive https://github.com/aws/aws-sdk-cpp.git\n cd aws-sdk-cpp/\n git submodule update --init --recursive\n mkdir BUILT\n cd BUILT\n cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis ..\n make\n make install\n ```\n- `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled.\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nNetdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly.\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n\n```\n##### Configuration with AWS credentials\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n # AWS credentials\n aws_access_key_id = your_access_key_id\n aws_secret_access_key = your_secret_access_key\n # destination stream\n stream name = your_stream_name\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/aws_kinesis/metadata.yaml", "troubleshooting": "" @@ -20573,7 +20713,7 @@ "time series" ], "overview": "# Azure Data Explorer\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20599,7 +20739,7 @@ "time series" ], "overview": "# Azure Event Hub\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20627,7 +20767,7 @@ "time series" ], "overview": "# Google BigQuery\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20654,7 +20794,7 @@ "time series" ], "overview": "# Blueflood\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": "" @@ -20682,7 +20822,7 @@ "time series" ], "overview": "# Chronix\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20710,7 +20850,7 @@ "time series" ], "overview": "# Cortex\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20738,7 +20878,7 @@ "time series" ], "overview": "# CrateDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20766,7 +20906,7 @@ "time series" ], "overview": "# ElasticSearch\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20794,7 +20934,7 @@ "time series" ], "overview": "# Gnocchi\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20815,7 +20955,7 @@ "Pub Sub" ], "overview": "# Google Cloud Pub Sub\n\nExport metrics to Google Cloud Pub/Sub Service\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://github.com/googleapis/google-cloud-cpp/) install Google Cloud Platform C++ Client Libraries\n- Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = pubsub.googleapis.com\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Basic configuration\n\n- Set the destination option to a Pub/Sub service endpoint. pubsub.googleapis.com is the default one.\n- Create the credentials JSON file by following Google Cloud's authentication guide.\n- The user running the Agent (typically netdata) needs read access to google_cloud_credentials.json, which you can set\n `chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`\n- Set the credentials file option to the full path of the file.\n\n\n```yaml\n[pubsub:my_instance]\n enabled = yes\n destination = pubsub.googleapis.com\n credentials file = /etc/netdata/google_cloud_credentials.json\n project id = my_project\n topic id = my_topic\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://github.com/googleapis/google-cloud-cpp/) install Google Cloud Platform C++ Client Libraries\n- Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = pubsub.googleapis.com\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Basic configuration\n\n- Set the destination option to a Pub/Sub service endpoint. pubsub.googleapis.com is the default one.\n- Create the credentials JSON file by following Google Cloud's authentication guide.\n- The user running the Agent (typically netdata) needs read access to google_cloud_credentials.json, which you can set\n `chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`\n- Set the credentials file option to the full path of the file.\n\n\n```yaml\n[pubsub:my_instance]\n enabled = yes\n destination = pubsub.googleapis.com\n credentials file = /etc/netdata/google_cloud_credentials.json\n project id = my_project\n topic id = my_topic\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/pubsub/metadata.yaml", "troubleshooting": "" @@ -20837,7 +20977,7 @@ "time series" ], "overview": "# Graphite\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": "" @@ -20865,7 +21005,7 @@ "time series" ], "overview": "# GreptimeDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20893,7 +21033,7 @@ "time series" ], "overview": "# InfluxDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": "" @@ -20921,7 +21061,7 @@ "time series" ], "overview": "# IRONdb\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20941,7 +21081,7 @@ "json" ], "overview": "# JSON\n\nUse the JSON connector for the exporting engine to archive your agent's metrics to JSON document databases for long-term storage,\nfurther analysis, or correlation with data from other sources\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = localhost:5448\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Basic configuration\n\n\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `json:https:my_json_instance`.\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = localhost:5448\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Basic configuration\n\n\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `json:https:my_json_instance`.\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/json/metadata.yaml", "troubleshooting": "" @@ -20969,7 +21109,7 @@ "time series" ], "overview": "# Kafka\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -20997,7 +21137,7 @@ "time series" ], "overview": "# KairosDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": "" @@ -21025,7 +21165,7 @@ "time series" ], "overview": "# M3DB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21053,7 +21193,7 @@ "time series" ], "overview": "# MetricFire\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21073,7 +21213,7 @@ "MongoDB" ], "overview": "# MongoDB\n\nUse the MongoDB connector for the exporting engine to archive your agent's metrics to a MongoDB database\nfor long-term storage, further analysis, or correlation with data from other sources.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.\n- Next, re-install Netdata from the source, which detects that the required library is now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | localhost | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Basic configuration\n\nThe default socket timeout depends on the exporting connector update interval.\nThe timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.\n\n\n```yaml\n[mongodb:my_instance]\n enabled = yes\n destination = mongodb://\n database = your_database_name\n collection = your_collection_name\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- To use MongoDB as an external storage for long-term archiving, you should first [install](https://www.mongodb.com/docs/languages/c/c-driver/current/libmongoc/tutorials/obtaining-libraries/installing/#std-label-installing) libmongoc 1.7.0 or higher.\n- Next, re-install Netdata from the source, which detects that the required library is now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | localhost | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Basic configuration\n\nThe default socket timeout depends on the exporting connector update interval.\nThe timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.\n\n\n```yaml\n[mongodb:my_instance]\n enabled = yes\n destination = mongodb://\n database = your_database_name\n collection = your_collection_name\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/mongodb/metadata.yaml", "troubleshooting": "" @@ -21101,7 +21241,7 @@ "time series" ], "overview": "# New Relic\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21129,7 +21269,7 @@ "time series" ], "overview": "# OpeanSearch\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21150,7 +21290,7 @@ "scalable time series" ], "overview": "# OpenTSDB\n\nUse the OpenTSDB connector for the exporting engine to archive your Netdata metrics to OpenTSDB databases for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- OpenTSDB and Netdata, installed, configured and operational.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used (opentsdb = 4242).\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Minimal configuration\n\nAdd `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol.\nFor example: `opentsdb:http:my_opentsdb_instance`, `opentsdb:https:my_opentsdb_instance`.\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n\n```\n##### HTTP authentication\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n username = my_username\n password = my_password\n\n```\n##### Using `send hosts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send hosts matching = localhost *\n\n```\n##### Using `send charts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send charts matching = *\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- OpenTSDB and Netdata, installed, configured and operational.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used (opentsdb = 4242).\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Minimal configuration\n\nAdd `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol.\nFor example: `opentsdb:http:my_opentsdb_instance`, `opentsdb:https:my_opentsdb_instance`.\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n\n```\n##### HTTP authentication\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n username = my_username\n password = my_password\n\n```\n##### Using `send hosts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send hosts matching = localhost *\n\n```\n##### Using `send charts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send charts matching = *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/opentsdb/metadata.yaml", "troubleshooting": "" @@ -21178,7 +21318,7 @@ "time series" ], "overview": "# PostgreSQL\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21200,7 +21340,7 @@ "time series" ], "overview": "# Prometheus Remote Write\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21229,7 +21369,7 @@ "time series" ], "overview": "# QuasarDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21258,7 +21398,7 @@ "time series" ], "overview": "# Splunk SignalFx\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21286,7 +21426,7 @@ "time series" ], "overview": "# Thanos\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21314,7 +21454,7 @@ "time series" ], "overview": "# TiKV\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21342,7 +21482,7 @@ "time series" ], "overview": "# TimescaleDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21370,7 +21510,7 @@ "time series" ], "overview": "# VictoriaMetrics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21400,7 +21540,7 @@ "time series" ], "overview": "# VMware Aria\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21428,7 +21568,7 @@ "time series" ], "overview": "# Wavefront\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": "" @@ -21447,9 +21587,9 @@ "Alerta" ], "overview": "# Alerta\n\nThe [Alerta](https://alerta.io/) monitoring system is a tool used to consolidate and de-duplicate alerts from multiple sources for quick \u2018at-a-glance\u2019 visualization. With just one system you can monitor alerts from many other monitoring tools on a single screen.\nYou can send Netdata alerts to Alerta to see alerts coming from many Netdata hosts or also from a multi-host Netdata configuration.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working Alerta instance\n- An Alerta API key (if authentication in Alerta is enabled)\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ALERTA | Set `SEND_ALERTA` to YES | | yes |\n| ALERTA_WEBHOOK_URL | set `ALERTA_WEBHOOK_URL` to the API url you defined when you installed the Alerta server. | | yes |\n| ALERTA_API_KEY | Set `ALERTA_API_KEY` to your API key. | | yes |\n| DEFAULT_RECIPIENT_ALERTA | Set `DEFAULT_RECIPIENT_ALERTA` to the default recipient environment you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n| DEFAULT_RECIPIENT_CUSTOM | Set different recipient environments per role, by editing `DEFAULT_RECIPIENT_CUSTOM` with the environment name of your choice | | no |\n\n##### ALERTA_API_KEY\n\nYou will need an API key to send messages from any source, if Alerta is configured to use authentication (recommended). To create a new API key:\n1. Go to Configuration > API Keys.\n2. Create a new API key called \"netdata\" with `write:alerts` permission.\n\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_alerta[sysadmin]=\"Systems\"\nrole_recipients_alerta[domainadmin]=\"Domains\"\nrole_recipients_alerta[dba]=\"Databases Systems\"\nrole_recipients_alerta[webmaster]=\"Marketing Development\"\nrole_recipients_alerta[proxyadmin]=\"Proxy\"\nrole_recipients_alerta[sitemgr]=\"Sites\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# alerta (alerta.io) global notification options\n\nSEND_ALERTA=\"YES\"\nALERTA_WEBHOOK_URL=\"http://yourserver/alerta/api\"\nALERTA_API_KEY=\"INSERT_YOUR_API_KEY_HERE\"\nDEFAULT_RECIPIENT_ALERTA=\"Production\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working Alerta instance\n- An Alerta API key (if authentication in Alerta is enabled)\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ALERTA | Set `SEND_ALERTA` to YES | | yes |\n| ALERTA_WEBHOOK_URL | set `ALERTA_WEBHOOK_URL` to the API url you defined when you installed the Alerta server. | | yes |\n| ALERTA_API_KEY | Set `ALERTA_API_KEY` to your API key. | | yes |\n| DEFAULT_RECIPIENT_ALERTA | Set `DEFAULT_RECIPIENT_ALERTA` to the default recipient environment you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n| DEFAULT_RECIPIENT_CUSTOM | Set different recipient environments per role, by editing `DEFAULT_RECIPIENT_CUSTOM` with the environment name of your choice | | no |\n\n##### ALERTA_API_KEY\n\nYou will need an API key to send messages from any source, if Alerta is configured to use authentication (recommended). To create a new API key:\n1. Go to Configuration > API Keys.\n2. Create a new API key called \"netdata\" with `write:alerts` permission.\n\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_alerta[sysadmin]=\"Systems\"\nrole_recipients_alerta[domainadmin]=\"Domains\"\nrole_recipients_alerta[dba]=\"Databases Systems\"\nrole_recipients_alerta[webmaster]=\"Marketing Development\"\nrole_recipients_alerta[proxyadmin]=\"Proxy\"\nrole_recipients_alerta[sitemgr]=\"Sites\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# alerta (alerta.io) global notification options\n\nSEND_ALERTA=\"YES\"\nALERTA_WEBHOOK_URL=\"http://yourserver/alerta/api\"\nALERTA_API_KEY=\"INSERT_YOUR_API_KEY_HERE\"\nDEFAULT_RECIPIENT_ALERTA=\"Production\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/alerta/metadata.yaml" }, { @@ -21466,266 +21606,11 @@ "AWS SNS" ], "overview": "# AWS SNS\n\nAs part of its AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' (SNS). Amazon SNS works similarly to Netdata's own notification system, allowing to dispatch a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to:\n- Email addresses\n- Mobile Phones via SMS\n- HTTP or HTTPS web hooks\n- AWS Lambda functions\n- AWS SQS queues\n- Mobile applications via push notifications\nYou can send notifications through Amazon SNS using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- While Amazon SNS supports sending differently formatted messages for different delivery methods, Netdata does not currently support this functionality.\n- For email notification support, we recommend using Netdata's email notifications, as it is has the following benefits:\n - In most cases, it requires less configuration.\n - Netdata's emails are nicely pre-formatted and support features like threading, which requires a lot of manual effort in SNS.\n - It is less resource intensive and more cost-efficient than SNS.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The [Amazon Web Services CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (awscli).\n- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. The setup depends on the distribution, but `/var/lib/netdata` is the recommended directory. If you are using Netdata as a dedicated user, the permissions will already be correct.\n- An Amazon SNS topic to send notifications to with one or more subscribers. The Getting Started section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.\n- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. For an additional layer of security, you can create one for each system or group of systems.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| aws path | The full path of the aws command. If empty, the system `$PATH` will be searched for it. If not found, Amazon SNS notifications will be silently disabled. | | yes |\n| SEND_AWSNS | Set `SEND_AWSNS` to YES | YES | yes |\n| AWSSNS_MESSAGE_FORMAT | Set `AWSSNS_MESSAGE_FORMAT` to to the string that you want the alert to be sent into. | ${status} on ${host} at ${date}: ${chart} ${value_string} | yes |\n| DEFAULT_RECIPIENT_AWSSNS | Set `DEFAULT_RECIPIENT_AWSSNS` to the Topic ARN you noted down upon creating the Topic. | | yes |\n\n##### AWSSNS_MESSAGE_FORMAT\n\nThe supported variables are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n##### DEFAULT_RECIPIENT_AWSSNS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_awssns[sysadmin]=\"arn:aws:sns:us-east-2:123456789012:Systems\"\nrole_recipients_awssns[domainadmin]=\"arn:aws:sns:us-east-2:123456789012:Domains\"\nrole_recipients_awssns[dba]=\"arn:aws:sns:us-east-2:123456789012:Databases\"\nrole_recipients_awssns[webmaster]=\"arn:aws:sns:us-east-2:123456789012:Development\"\nrole_recipients_awssns[proxyadmin]=\"arn:aws:sns:us-east-2:123456789012:Proxy\"\nrole_recipients_awssns[sitemgr]=\"arn:aws:sns:us-east-2:123456789012:Sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\nAn example working configuration would be:\n\n```yaml\n```conf\n#------------------------------------------------------------------------------\n# Amazon SNS notifications\n\nSEND_AWSSNS=\"YES\"\nAWSSNS_MESSAGE_FORMAT=\"${status} on ${host} at ${date}: ${chart} ${value_string}\"\nDEFAULT_RECIPIENT_AWSSNS=\"arn:aws:sns:us-east-2:123456789012:MyTopic\"\n```\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The [Amazon Web Services CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (awscli).\n- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. The setup depends on the distribution, but `/var/lib/netdata` is the recommended directory. If you are using Netdata as a dedicated user, the permissions will already be correct.\n- An Amazon SNS topic to send notifications to with one or more subscribers. The Getting Started section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.\n- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. For an additional layer of security, you can create one for each system or group of systems.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| aws path | The full path of the aws command. If empty, the system `$PATH` will be searched for it. If not found, Amazon SNS notifications will be silently disabled. | | yes |\n| SEND_AWSNS | Set `SEND_AWSNS` to YES | YES | yes |\n| AWSSNS_MESSAGE_FORMAT | Set `AWSSNS_MESSAGE_FORMAT` to to the string that you want the alert to be sent into. | ${status} on ${host} at ${date}: ${chart} ${value_string} | yes |\n| DEFAULT_RECIPIENT_AWSSNS | Set `DEFAULT_RECIPIENT_AWSSNS` to the Topic ARN you noted down upon creating the Topic. | | yes |\n\n##### AWSSNS_MESSAGE_FORMAT\n\nThe supported variables are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n##### DEFAULT_RECIPIENT_AWSSNS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_awssns[sysadmin]=\"arn:aws:sns:us-east-2:123456789012:Systems\"\nrole_recipients_awssns[domainadmin]=\"arn:aws:sns:us-east-2:123456789012:Domains\"\nrole_recipients_awssns[dba]=\"arn:aws:sns:us-east-2:123456789012:Databases\"\nrole_recipients_awssns[webmaster]=\"arn:aws:sns:us-east-2:123456789012:Development\"\nrole_recipients_awssns[proxyadmin]=\"arn:aws:sns:us-east-2:123456789012:Proxy\"\nrole_recipients_awssns[sitemgr]=\"arn:aws:sns:us-east-2:123456789012:Sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\nAn example working configuration would be:\n\n```yaml\n```text\n#------------------------------------------------------------------------------\n# Amazon SNS notifications\n\nSEND_AWSSNS=\"YES\"\nAWSSNS_MESSAGE_FORMAT=\"${status} on ${host} at ${date}: ${chart} ${value_string}\"\nDEFAULT_RECIPIENT_AWSSNS=\"arn:aws:sns:us-east-2:123456789012:MyTopic\"\n```\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/awssns/metadata.yaml" }, - { - "id": "notify-cloud-awssns", - "meta": { - "name": "Amazon SNS", - "link": "https://aws.amazon.com/sns/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "awssns.png" - }, - "keywords": [ - "awssns" - ], - "overview": "# Amazon SNS\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on AWS SNS.\n", - "setup": "## Setup\n\n### Prerequisites\n\nTo add AWS SNS notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- Have an AWS account with AWS SNS access, for more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **AwsSns** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For AWS SNS:\n - Topic ARN - topic provided on AWS SNS (with region) for where to publish your notifications. For more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Settings on AWS SNS\n\nTo enable the webhook integration on AWS SNS you need:\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n - Finally, click on **Create topic** on the bottom of the page\n3. Now, use the new **Topic ARN** while adding AWS SNS integration on your space.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-discord", - "meta": { - "name": "Discord", - "link": "https://discord.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "discord.png" - }, - "keywords": [ - "discord", - "community" - ], - "overview": "# Discord\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Discord.\n", - "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- You need to have a Discord server able to receive webhooks integrations.\n\n### Discord Server Configuration\nSteps to configure your Discord server to receive [webhook notifications](https://support.discord.com/hc/en-us/articles/228383668) from Netdata:\n1. Go to `Server Settings` --> `Integrations`\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Use Webhook URL to add your notification configuration on Netdata UI\n\n### Netdata Configuration Steps\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Discord** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Discord:\n - Define the type channel you want to send notifications to: **Text channel** or **Forum channel**\n - Webhook URL - URL provided on Discord for the channel you want to receive your notifications.\n - Thread name - if the Discord channel is a **Forum channel** you will need to provide the thread name as well\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-mattermost", - "meta": { - "name": "Mattermost", - "link": "https://mattermost.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "mattermost.png" - }, - "keywords": [ - "mattermost" - ], - "overview": "# Mattermost\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Mattermost.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a Mattermost app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your Mattermost to receive notifications from Netdata:\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don\u2019t have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook. The description can be up to 500 characters\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook endpoint that looks like below:\n `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`\n\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your Mattermost instance.\n\nFor more details please check Mattermost's article [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Mattermost** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Mattermost:\n - Webhook URL - URL provided on Mattermost for the channel you want to receive your notifications\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-microsoftteams", - "meta": { - "name": "Microsoft Teams", - "link": "https://www.microsoft.com/en-us/microsoft-teams", - "categories": [ - "notify.cloud" - ], - "icon_filename": "teams.svg" - }, - "keywords": [ - "microsoft", - "teams" - ], - "overview": "# Microsoft Teams\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications to a Microsoft Teams channel.\n", - "setup": "## Setup\n\n### Prerequisites\n\nTo add Microsoft Teams notifications integration to your Netdata Cloud space you will need the following:\n\n- A Netdata Cloud account.\n- Access to the Netdata Cloud space as an **Admin**.\n- The Space to be on a paid plan.\n- A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature.\n\n### Settings on Microsoft Teams\n\n1. **Access the Channel Settings**: Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots (ellipsis) icon that appears.\n2. **Create a New Workflow**: Select \"Workflows\" from the options, then choose \"Post to a channel when a webhook request is received.\"\n3. **Configure Workflow Details**:\n - Give your workflow a descriptive name, such as \"Netdata Alerts.\"\n - Select the target team and channel to receive notifications.\n - Click \"Add workflow.\"\n4. **Obtain the Webhook URL**:\n - Once the workflow is created, you will receive a unique Workflow Webhook URL.\n - Copy this URL, as it will be required to configure Netdata Cloud.\n\n### Settings on Netdata Cloud\n\n1. Click on the **Space settings** cog (located above your profile icon).\n2. Click on the **Notification** tab.\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen).\n4. On the **Microsoft Teams** card click on **+ Add**.\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings:\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it.\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration.\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only.\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Microsoft Teams:\n - Microsoft Teams Incoming Webhook URL - the _Incoming Webhook URL_ that was generated earlier.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-mobile-app", - "meta": { - "name": "Netdata Mobile App", - "link": "https://netdata.cloud", - "categories": [ - "notify.cloud" - ], - "icon_filename": "netdata.png" - }, - "keywords": [ - "mobile-app", - "phone", - "personal-notifications" - ], - "overview": "# Netdata Mobile App\n\nFrom the Netdata Cloud UI, you can manage your user notification settings and enable the configuration to deliver notifications on the Netdata Mobile Application.\n", - "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- You need to have the Netdata Mobile Application installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration\nSteps to login to the Netdata Mobile Application to receive alert and reachability and alert notifications:\n1. Download the Netdata Mobile Application from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose the Sign In Option\n - Sign In with Email Address: Enter the Email Address of your registered Netdata Cloud Account and Click on the Verification link received by Email on your mobile device.\n - Sign In with QR Code: Scan the QR Code from your `Netdata Cloud` UI under **User Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code**\n3. Start receiving alert and reachability notifications for your **Space(s)** on a **Paid Subscription plan**\n\n### Netdata Configuration Steps\n1. Click on the **User settings** on the bottom left of your screen (your profile icon)\n2. Click on the **Notifications** tab\n3. Enable **Mobile App Notifications** if disabled (Enabled by default)\n4. Use the **Show QR Code** Option to login to your mobile device by scanning the **QR Code**\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-opsgenie", - "meta": { - "name": "Opsgenie", - "link": "https://www.atlassian.com/software/opsgenie", - "categories": [ - "notify.cloud" - ], - "icon_filename": "opsgenie.png" - }, - "keywords": [ - "opsgenie", - "atlassian" - ], - "overview": "# Opsgenie\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Opsgenie.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\nSteps to configure your Opsgenie to receive notifications from Netdata:\n\n1. Go to integrations tab of your team, click **Add integration**\n2. Pick **API** from available integrations. Copy your API Key and press **Save Integration**.\n3. Paste copied API key into the corresponding field in **Integration configuration** section of Opsgenie modal window in Netdata.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Opsgenie** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Opsgenie:\n - API Key - a key provided on Opsgenie for the channel you want to receive your notifications.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-pagerduty", - "meta": { - "name": "PagerDuty", - "link": "https://www.pagerduty.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "pagerduty.png" - }, - "keywords": [ - "pagerduty" - ], - "overview": "# PagerDuty\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on PagerDuty.\n", - "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have a PagerDuty service to receive events using webhooks.\n\n\n### PagerDuty Server Configuration\nSteps to configure your PagerDuty to receive notifications from Netdata:\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. At step 3, select `Events API V2` Integration or **View Webhooks** if you already have some defined\n3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** fields to add them to your notification configuration in the Netdata UI.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **PagerDuty** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For PagerDuty:\n - Integration Key - is a 32 character key provided by PagerDuty to receive events on your service.\n - Integration URL (Alert Events) - is the URL provided by PagerDuty where we will send notifications.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-rocketchat", - "meta": { - "name": "RocketChat", - "link": "https://www.rocket.chat/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "rocketchat.png" - }, - "keywords": [ - "rocketchat" - ], - "overview": "# RocketChat\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on RocketChat.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a RocketChat app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your RocketChat to receive notifications from Netdata:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations.\n2. Click **+New** at the top right corner.\n3. For more details about each parameter, check [create-a-new-incoming-webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook).\n4. After configuring integration, click Save.\n5. You will end up with a webhook endpoint that looks like below:\n `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your RocketChat instance.\n\n\nFor more details please check RocketChat's article Incoming webhooks for [RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **RocketChat** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For RocketChat:\n - Webhook URL - URL provided on RocketChat for the channel you want to receive your notifications.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-slack", - "meta": { - "name": "Slack", - "link": "https://slack.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "slack.png" - }, - "keywords": [ - "slack" - ], - "overview": "# Slack\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Slack.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\nSteps to configure your Slack to receive notifications from Netdata:\n\n1. Create an app to receive webhook integrations. Check [Create an app](https://api.slack.com/apps?new_app=1) from Slack documentation for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - After pressing that specify the channel where you want your notifications to be delivered\n - Once completed copy the Webhook URL that you will need to add to your notification configuration on Netdata UI\n\nFor more details please check Slacks's article [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Slack** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Slack:\n - Webhook URL - URL provided on Slack for the channel you want to receive your notifications.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-splunk", - "meta": { - "name": "Splunk", - "link": "https://splunk.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "splunk-black.svg" - }, - "keywords": [ - "Splunk" - ], - "overview": "# Splunk\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Splunk.\n", - "setup": "## Setup\n\n### Prerequisites\n\nTo add Splunk notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions.\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Splunk** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - provide a descriptive name for your configuration to easily identify it.\n - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about.\n - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only.\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk:\n - HTTP Event Collector URI - The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token - the token that Splunk provided to you when you created the HTTP Event Collector\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-telegram", - "meta": { - "name": "Telegram", - "link": "https://telegram.org/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "telegram.svg" - }, - "keywords": [ - "Telegram" - ], - "overview": "# Telegram\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Telegram.\n", - "setup": "## Setup\n\n### Prerequisites\n\nTo add Telegram notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- The Telegram bot token, chat ID and _optionally_ the topic ID\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Telegram** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Telegram:\n - Bot Token - the token of your bot\n - Chat ID - the chat id where your bot will deliver messages to\n - Topic ID - the identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat.\n\n### Getting the Telegram bot token, chat ID and topic ID\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n- To get the topic ID, the easiest way is this: Post a message to that topic, then right-click on it and select `Copy Message Link`. Paste it on a scratchpad and notice that it has the following structure `https://t.me/c/XXXXXXXXXX/YY/ZZ`. The topic ID is `YY` (integer).\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-victorops", - "meta": { - "name": "Splunk VictorOps", - "link": "https://www.splunk.com/en_us/about-splunk/acquisitions/splunk-on-call.html", - "categories": [ - "notify.cloud" - ], - "icon_filename": "victorops.svg" - }, - "keywords": [ - "VictorOps", - "Splunk", - "On-Call" - ], - "overview": "# Splunk VictorOps\n\nFrom the Cloud interface, you can manage your space's notification settings and from there you can add a specific configuration to get notifications delivered on Splunk On-Call/VictorOps.\n", - "setup": "## Setup\n\n### Prerequisites\n\nTo add Splunk VictorOps notification (also known as Splunk On-Call) you need:\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions.\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Splunk VictorOps** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - provide a descriptive name for your configuration to easily identify it.\n - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about.\n - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only.\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk VictorOps:\n - Destination URL - The URL provided by VictorOps of your REST endpoint.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-webhook", - "meta": { - "name": "Webhook", - "link": "https://en.wikipedia.org/wiki/Webhook", - "categories": [ - "notify.cloud" - ], - "icon_filename": "webhook.svg" - }, - "keywords": [ - "generic webhooks", - "webhooks" - ], - "overview": "# Webhook\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on a webhook using a predefined schema.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Webhook** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Webhook:\n - Webhook URL - webhook URL is the url of the service that Netdata will send notifications to. In order to keep the communication secured, we only accept HTTPS urls.\n - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism - Netdata webhook integration supports 3 different authentication mechanisms.\n * Mutual TLS (recommended) - default authentication mechanism used if no other method is selected.\n * Basic - the client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. These will settings will be required inputs.\n * Bearer - the client sends a request with an Authorization header that includes a **bearer token**. This setting will be a required input.\n\n\n ### Webhook service\n\n A webhook integration allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. In this document, we'll go over the steps to set up a generic webhook integration, including adding headers, and implementing different types of authorization mechanisms.\n\n #### Netdata webhook integration\n\n A webhook integration is a way for one service to notify another service about events that occur within it. This is done by sending an HTTP POST request to a specified URL (known as the \"webhook URL\") when an event occurs.\n\n Netdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected.\n\n For alert notifications, the content sent to the destination service contains a JSON object with the following properties:\n\n | field | type | description |\n | :-- | :-- | :-- |\n | message | string | A summary message of the alert. |\n | alarm | string | The alarm the notification is about. |\n | info | string | Additional info related with the alert. |\n | chart | string | The chart associated with the alert. |\n | context | string | The chart context. |\n | space | string | The space where the node that raised the alert is assigned. |\n | Rooms | object[object(string,string)] | Object with list of Rooms names and urls where the node belongs to. |\n | family | string | Context family. |\n | class | string | Classification of the alert, e.g. \"Error\". |\n | severity | string | Alert severity, can be one of \"warning\", \"critical\" or \"clear\". |\n | date | string | Date of the alert in ISO8601 format. |\n | duration | string | Duration the alert has been raised. |\n | additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n | additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n | alarm_url | string | Netdata Cloud URL for this alarm. |\n\n For reachability notifications, the JSON object will contain the following properties:\n\n | field | type | description |\n | :-- | :-- | :-- |\n | message | string | A summary message of the reachability alert. |\n | url | string | Netdata Cloud URL for the host experiencing the reachability alert. |\n | host | string | the host experiencing the reachability alert. |\n | severity | string | severity for this notification. If host is reachable, severity will be 'info', if host is unreachable, it will be 'critical'. |\n | status | object | an object with the status information. |\n | status.reachable | boolean | true if host is reachable, false otherwise |\n | status.text | string | can be 'reachable' or 'unreachable' |\n\n #### Extra headers\n\n When setting up a webhook integration, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\n By default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:-------------------------------:|-----------------------------|\n | Content-Type | application/json |\n\n #### Authentication mechanisms\n\n Netdata webhook integration supports 3 different authentication mechanisms:\n\n ##### Mutual TLS authentication (recommended)\n\n In mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\n This is the default authentication mechanism used if no other method is selected.\n\n To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\n The steps to perform this validation are as follows:\n\n - Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
\n Netdata CA certificate\n\n ```\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n
\n\n - Enable client certificate validation on the web server that is doing the TLS termination. Below we show you how to perform this configuration in `NGINX` and `Apache`\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n ##### Basic authentication\n\n In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n ##### Bearer token authentication\n\n In bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n ##### Challenge secret\n\n To validate that you have ownership of the web application that will receive the webhook events, we are using a challenge response check mechanism.\n\n This mechanism works as follows:\n\n - The challenge secret parameter that you provide is a shared secret between you and Netdata only.\n - On your request for creating a new Webhook integration, we will make a GET request to the url of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n - You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n - We will compare your application's response with the hash that we will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\n We will do this validation everytime you update your integration configuration.\n\n - Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n **Example response token generation in Python:**\n\n Here you can see how to define a handler for a Flask application in python 3:\n\n ```python\n import base64\n import hashlib\n import hmac\n import json\n\n key ='YOUR_CHALLENGE_SECRET'\n\n @app.route('/webhooks/netdata')\n def webhook_challenge():\n token = request.args.get('crc_token').encode('ascii')\n\n # creates HMAC SHA-256 hash from incomming token and your consumer secret\n sha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n # construct response data with base64 encoded hash\n response = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n }\n\n # returns properly formatted json response\n return json.dumps(response)\n ```\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, { "id": "notify-custom", "meta": { @@ -21740,9 +21625,9 @@ "custom" ], "overview": "# Custom\n\nNetdata Agent's alert notification feature allows you to send custom notifications to any endpoint you choose.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_CUSTOM | Set `SEND_CUSTOM` to YES | YES | yes |\n| DEFAULT_RECIPIENT_CUSTOM | This value is dependent on how you handle the `${to}` variable inside the `custom_sender()` function. | | yes |\n| custom_sender() | You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the function in this configuration file. | | no |\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nAll roles will default to this variable if left unconfigured. You can edit `DEFAULT_RECIPIENT_CUSTOM` with the variable you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_custom[sysadmin]=\"systems\"\nrole_recipients_custom[domainadmin]=\"domains\"\nrole_recipients_custom[dba]=\"databases systems\"\nrole_recipients_custom[webmaster]=\"marketing development\"\nrole_recipients_custom[proxyadmin]=\"proxy-admin\"\nrole_recipients_custom[sitemgr]=\"sites\"\n```\n\n\n##### custom_sender()\n\nThe following is a sample custom_sender() function in health_alarm_notify.conf, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:\n```\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n```\n\nThe supported variables that you can use for the function's `msg` variable are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# custom notifications\n\nSEND_CUSTOM=\"YES\"\nDEFAULT_RECIPIENT_CUSTOM=\"\"\n\n# The custom_sender() is a custom function to do whatever you need to do\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_CUSTOM | Set `SEND_CUSTOM` to YES | YES | yes |\n| DEFAULT_RECIPIENT_CUSTOM | This value is dependent on how you handle the `${to}` variable inside the `custom_sender()` function. | | yes |\n| custom_sender() | You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the function in this configuration file. | | no |\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nAll roles will default to this variable if left unconfigured. You can edit `DEFAULT_RECIPIENT_CUSTOM` with the variable you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_custom[sysadmin]=\"systems\"\nrole_recipients_custom[domainadmin]=\"domains\"\nrole_recipients_custom[dba]=\"databases systems\"\nrole_recipients_custom[webmaster]=\"marketing development\"\nrole_recipients_custom[proxyadmin]=\"proxy-admin\"\nrole_recipients_custom[sitemgr]=\"sites\"\n```\n\n\n##### custom_sender()\n\nThe following is a sample custom_sender() function in health_alarm_notify.conf, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:\n```\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n```\n\nThe supported variables that you can use for the function's `msg` variable are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# custom notifications\n\nSEND_CUSTOM=\"YES\"\nDEFAULT_RECIPIENT_CUSTOM=\"\"\n\n# The custom_sender() is a custom function to do whatever you need to do\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/custom/metadata.yaml" }, { @@ -21759,9 +21644,9 @@ "Discord" ], "overview": "# Discord\n\nSend notifications to Discord using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more Discord channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DISCORD | Set `SEND_DISCORD` to YES | YES | yes |\n| DISCORD_WEBHOOK_URL | set `DISCORD_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_DISCORD | Set `DEFAULT_RECIPIENT_DISCORD` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_DISCORD\n\nAll roles will default to this variable if left unconfigured.\nYou can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_discord[sysadmin]=\"systems\"\nrole_recipients_discord[domainadmin]=\"domains\"\nrole_recipients_discord[dba]=\"databases systems\"\nrole_recipients_discord[webmaster]=\"marketing development\"\nrole_recipients_discord[proxyadmin]=\"proxy-admin\"\nrole_recipients_discord[sitemgr]=\"sites\"\n```\n\nThe values you provide should already exist as Discord channels in your server.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# discord (discordapp.com) global notification options\n\nSEND_DISCORD=\"YES\"\nDISCORD_WEBHOOK_URL=\"https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_DISCORD=\"alerts\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more Discord channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DISCORD | Set `SEND_DISCORD` to YES | YES | yes |\n| DISCORD_WEBHOOK_URL | set `DISCORD_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_DISCORD | Set `DEFAULT_RECIPIENT_DISCORD` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_DISCORD\n\nAll roles will default to this variable if left unconfigured.\nYou can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_discord[sysadmin]=\"systems\"\nrole_recipients_discord[domainadmin]=\"domains\"\nrole_recipients_discord[dba]=\"databases systems\"\nrole_recipients_discord[webmaster]=\"marketing development\"\nrole_recipients_discord[proxyadmin]=\"proxy-admin\"\nrole_recipients_discord[sitemgr]=\"sites\"\n```\n\nThe values you provide should already exist as Discord channels in your server.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# discord (discordapp.com) global notification options\n\nSEND_DISCORD=\"YES\"\nDISCORD_WEBHOOK_URL=\"https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_DISCORD=\"alerts\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/discord/metadata.yaml" }, { @@ -21778,9 +21663,9 @@ "Dynatrace" ], "overview": "# Dynatrace\n\nDynatrace allows you to receive notifications using their Events REST API. See the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event) about POSTing an event in the Events API for more details.\nYou can send notifications to Dynatrace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts. The Dynatrace server should be with protocol prefixed (http:// or https://), for example: https://monitor.example.com.\n- An API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API. See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.\n- An API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n- A Server Tag. To generate one on your Dynatrace Server, go to Settings --> Tags --> Manually applied tags and create the Tag. The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DYNATRACE | Set `SEND_DYNATRACE` to YES | YES | yes |\n| DYNATRACE_SERVER | Set `DYNATRACE_SERVER` to the Dynatrace server with the protocol prefix, for example `https://monitor.example.com`. | | yes |\n| DYNATRACE_TOKEN | Set `DYNATRACE_TOKEN` to your Dynatrace API authentication token | | yes |\n| DYNATRACE_SPACE | Set `DYNATRACE_SPACE` to the API Space, it is the URL part of the page you have access in order to generate the API Token. | | yes |\n| DYNATRACE_TAG_VALUE | Set `DYNATRACE_TAG_VALUE` to your Dynatrace Server Tag. | | yes |\n| DYNATRACE_ANNOTATION_TYPE | `DYNATRACE_ANNOTATION_TYPE` can be left to its default value Netdata Alarm, but you can change it to better fit your needs. | Netdata Alarm | no |\n| DYNATRACE_EVENT | Set `DYNATRACE_EVENT` to the Dynatrace eventType you want. | Netdata Alarm | no |\n\n##### DYNATRACE_SPACE\n\nFor example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n\n\n##### DYNATRACE_EVENT\n\n`AVAILABILITY_EVENT`, `CUSTOM_ALERT`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, `CUSTOM_DEPLOYMENT`, `CUSTOM_INFO`, `ERROR_EVENT`,\n`MARKED_FOR_TERMINATION`, `PERFORMANCE_EVENT`, `RESOURCE_CONTENTION_EVENT`.\nYou can read more [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event#request-body-objects).\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Dynatrace global notification options\n\nSEND_DYNATRACE=\"YES\"\nDYNATRACE_SERVER=\"https://monitor.example.com\"\nDYNATRACE_TOKEN=\"XXXXXXX\"\nDYNATRACE_SPACE=\"2a93fe0e-4cd5-469a-9d0d-1a064235cfce\"\nDYNATRACE_TAG_VALUE=\"SERVERTAG\"\nDYNATRACE_ANNOTATION_TYPE=\"Netdata Alert\"\nDYNATRACE_EVENT=\"AVAILABILITY_EVENT\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts. The Dynatrace server should be with protocol prefixed (http:// or https://), for example: https://monitor.example.com.\n- An API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API. See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.\n- An API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n- A Server Tag. To generate one on your Dynatrace Server, go to Settings --> Tags --> Manually applied tags and create the Tag. The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DYNATRACE | Set `SEND_DYNATRACE` to YES | YES | yes |\n| DYNATRACE_SERVER | Set `DYNATRACE_SERVER` to the Dynatrace server with the protocol prefix, for example `https://monitor.example.com`. | | yes |\n| DYNATRACE_TOKEN | Set `DYNATRACE_TOKEN` to your Dynatrace API authentication token | | yes |\n| DYNATRACE_SPACE | Set `DYNATRACE_SPACE` to the API Space, it is the URL part of the page you have access in order to generate the API Token. | | yes |\n| DYNATRACE_TAG_VALUE | Set `DYNATRACE_TAG_VALUE` to your Dynatrace Server Tag. | | yes |\n| DYNATRACE_ANNOTATION_TYPE | `DYNATRACE_ANNOTATION_TYPE` can be left to its default value Netdata Alarm, but you can change it to better fit your needs. | Netdata Alarm | no |\n| DYNATRACE_EVENT | Set `DYNATRACE_EVENT` to the Dynatrace eventType you want. | Netdata Alarm | no |\n\n##### DYNATRACE_SPACE\n\nFor example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n\n\n##### DYNATRACE_EVENT\n\n`AVAILABILITY_EVENT`, `CUSTOM_ALERT`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, `CUSTOM_DEPLOYMENT`, `CUSTOM_INFO`, `ERROR_EVENT`,\n`MARKED_FOR_TERMINATION`, `PERFORMANCE_EVENT`, `RESOURCE_CONTENTION_EVENT`.\nYou can read more [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event#request-body-objects).\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Dynatrace global notification options\n\nSEND_DYNATRACE=\"YES\"\nDYNATRACE_SERVER=\"https://monitor.example.com\"\nDYNATRACE_TOKEN=\"XXXXXXX\"\nDYNATRACE_SPACE=\"2a93fe0e-4cd5-469a-9d0d-1a064235cfce\"\nDYNATRACE_TAG_VALUE=\"SERVERTAG\"\nDYNATRACE_ANNOTATION_TYPE=\"Netdata Alert\"\nDYNATRACE_EVENT=\"AVAILABILITY_EVENT\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/dynatrace/metadata.yaml" }, { @@ -21797,9 +21682,9 @@ "email" ], "overview": "# Email\n\nSend notifications via Email using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working sendmail command is required for email alerts to work. Almost all MTAs provide a sendmail interface. Netdata sends all emails as user netdata, so make sure your sendmail works for local users.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| EMAIL_SENDER | You can change `EMAIL_SENDER` to the email address sending the notifications. | netdata | no |\n| SEND_EMAIL | Set `SEND_EMAIL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_EMAIL | Set `DEFAULT_RECIPIENT_EMAIL` to the email address you want the email to be sent by default. You can define multiple email addresses like this: `alarms@example.com` `systems@example.com`. | root | yes |\n\n##### DEFAULT_RECIPIENT_EMAIL\n\nAll roles will default to this variable if left unconfigured.\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_email[sysadmin]=\"systems@example.com\"\nrole_recipients_email[domainadmin]=\"domains@example.com\"\nrole_recipients_email[dba]=\"databases@example.com systems@example.com\"\nrole_recipients_email[webmaster]=\"marketing@example.com development@example.com\"\nrole_recipients_email[proxyadmin]=\"proxy-admin@example.com\"\nrole_recipients_email[sitemgr]=\"sites@example.com\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# email global notification options\n\nEMAIL_SENDER=\"example@domain.com\"\nSEND_EMAIL=\"YES\"\nDEFAULT_RECIPIENT_EMAIL=\"recipient@example.com\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working sendmail command is required for email alerts to work. Almost all MTAs provide a sendmail interface. Netdata sends all emails as user netdata, so make sure your sendmail works for local users.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| EMAIL_SENDER | You can change `EMAIL_SENDER` to the email address sending the notifications. | netdata | no |\n| SEND_EMAIL | Set `SEND_EMAIL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_EMAIL | Set `DEFAULT_RECIPIENT_EMAIL` to the email address you want the email to be sent by default. You can define multiple email addresses like this: `alarms@example.com` `systems@example.com`. | root | yes |\n\n##### DEFAULT_RECIPIENT_EMAIL\n\nAll roles will default to this variable if left unconfigured.\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_email[sysadmin]=\"systems@example.com\"\nrole_recipients_email[domainadmin]=\"domains@example.com\"\nrole_recipients_email[dba]=\"databases@example.com systems@example.com\"\nrole_recipients_email[webmaster]=\"marketing@example.com development@example.com\"\nrole_recipients_email[proxyadmin]=\"proxy-admin@example.com\"\nrole_recipients_email[sitemgr]=\"sites@example.com\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# email global notification options\n\nEMAIL_SENDER=\"example@domain.com\"\nSEND_EMAIL=\"YES\"\nDEFAULT_RECIPIENT_EMAIL=\"recipient@example.com\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/email/metadata.yaml" }, { @@ -21816,9 +21701,9 @@ "Flock" ], "overview": "# Flock\n\nSend notifications to Flock using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by flock.com. You can use the same on all your Netdata servers (or you can have multiple if you like). Read more about flock webhooks and how to get one [here](https://admin.flock.com/webhooks).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_FLOCK | Set `SEND_FLOCK` to YES | YES | yes |\n| FLOCK_WEBHOOK_URL | set `FLOCK_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_FLOCK | Set `DEFAULT_RECIPIENT_FLOCK` to the Flock channel you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n\n##### DEFAULT_RECIPIENT_FLOCK\n\nYou can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_flock[sysadmin]=\"systems\"\nrole_recipients_flock[domainadmin]=\"domains\"\nrole_recipients_flock[dba]=\"databases systems\"\nrole_recipients_flock[webmaster]=\"marketing development\"\nrole_recipients_flock[proxyadmin]=\"proxy-admin\"\nrole_recipients_flock[sitemgr]=\"sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# flock (flock.com) global notification options\n\nSEND_FLOCK=\"YES\"\nFLOCK_WEBHOOK_URL=\"https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_FLOCK=\"alarms\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by flock.com. You can use the same on all your Netdata servers (or you can have multiple if you like). Read more about flock webhooks and how to get one [here](https://admin.flock.com/webhooks).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_FLOCK | Set `SEND_FLOCK` to YES | YES | yes |\n| FLOCK_WEBHOOK_URL | set `FLOCK_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_FLOCK | Set `DEFAULT_RECIPIENT_FLOCK` to the Flock channel you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n\n##### DEFAULT_RECIPIENT_FLOCK\n\nYou can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_flock[sysadmin]=\"systems\"\nrole_recipients_flock[domainadmin]=\"domains\"\nrole_recipients_flock[dba]=\"databases systems\"\nrole_recipients_flock[webmaster]=\"marketing development\"\nrole_recipients_flock[proxyadmin]=\"proxy-admin\"\nrole_recipients_flock[sitemgr]=\"sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# flock (flock.com) global notification options\n\nSEND_FLOCK=\"YES\"\nFLOCK_WEBHOOK_URL=\"https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_FLOCK=\"alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/flock/metadata.yaml" }, { @@ -21835,11 +21720,30 @@ "gotify" ], "overview": "# Gotify\n\n[Gotify](https://gotify.net/) is a self-hosted push notification service created for sending and receiving messages in real time.\nYou can send alerts to your Gotify instance using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An application token. You can generate a new token in the Gotify Web UI.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_GOTIFY | Set `SEND_GOTIFY` to YES | YES | yes |\n| GOTIFY_APP_TOKEN | set `GOTIFY_APP_TOKEN` to the app token you generated. | | yes |\n| GOTIFY_APP_URL | Set `GOTIFY_APP_URL` to point to your Gotify instance, for example `https://push.example.domain/` | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_GOTIFY=\"YES\"\nGOTIFY_APP_TOKEN=\"XXXXXXXXXXXXXXX\"\nGOTIFY_APP_URL=\"https://push.example.domain/\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An application token. You can generate a new token in the Gotify Web UI.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_GOTIFY | Set `SEND_GOTIFY` to YES | YES | yes |\n| GOTIFY_APP_TOKEN | set `GOTIFY_APP_TOKEN` to the app token you generated. | | yes |\n| GOTIFY_APP_URL | Set `GOTIFY_APP_URL` to point to your Gotify instance, for example `https://push.example.domain/` | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_GOTIFY=\"YES\"\nGOTIFY_APP_TOKEN=\"XXXXXXXXXXXXXXX\"\nGOTIFY_APP_URL=\"https://push.example.domain/\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/gotify/metadata.yaml" }, + { + "id": "notify-ilert", + "meta": { + "name": "ilert", + "link": "https://www.ilert.com/", + "categories": [ + "notify.agent" + ], + "icon_filename": "ilert.svg" + }, + "keywords": [ + "ilert" + ], + "overview": "# ilert\n\nilert is an alerting and incident management tool. It helps teams reduce response times by enhancing monitoring and ticketing tools with reliable alerts, automatic escalations, on-call schedules, and features for incident response, communication, and status updates.\nSending notification to ilert via Netdata's Agent alert notification feature includes links, images and resolving of corresponding alerts.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Netdata alert source in ilert. You can create a [Netdata alert source](https://docs.ilert.com/inbound-integrations/netdata) in [ilert](https://www.ilert.com/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ILERT | Set `SEND_ILERT` to YES | YES | yes |\n| ILERT_ALERT_SOURCE_URL | Set `ILERT_ALERT_SOURCE_URL` to your Netdata alert source url in ilert. | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_ILERT=\"YES\"\nILERT_ALERT_SOURCE_URL=\"https://api.ilert.com/api/v1/events/netdata/{API-KEY}\"\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", + "integration_type": "agent_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ilert/metadata.yaml" + }, { "id": "notify-irc", "meta": { @@ -21854,9 +21758,9 @@ "IRC" ], "overview": "# IRC\n\nSend notifications to IRC using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The `nc` utility. You can set the path to it, or Netdata will search for it in your system `$PATH`.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| nc path | Set the path for nc, otherwise Netdata will search for it in your system $PATH | | yes |\n| SEND_IRC | Set `SEND_IRC` YES. | YES | yes |\n| IRC_NETWORK | Set `IRC_NETWORK` to the IRC network which your preferred channels belong to. | | yes |\n| IRC_PORT | Set `IRC_PORT` to the IRC port to which a connection will occur. | | no |\n| IRC_NICKNAME | Set `IRC_NICKNAME` to the IRC nickname which is required to send the notification. It must not be an already registered name as the connection's MODE is defined as a guest. | | yes |\n| IRC_REALNAME | Set `IRC_REALNAME` to the IRC realname which is required in order to make the connection. | | yes |\n| DEFAULT_RECIPIENT_IRC | You can have different channels per role, by editing `DEFAULT_RECIPIENT_IRC` with the channel you want | | yes |\n\n##### nc path\n\n```sh\n#------------------------------------------------------------------------------\n# external commands\n#\n# The full path of the nc command.\n# If empty, the system $PATH will be searched for it.\n# If not found, irc notifications will be silently disabled.\nnc=\"/usr/bin/nc\"\n```\n\n\n##### DEFAULT_RECIPIENT_IRC\n\nThe `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_irc[sysadmin]=\"#systems\"\nrole_recipients_irc[domainadmin]=\"#domains\"\nrole_recipients_irc[dba]=\"#databases #systems\"\nrole_recipients_irc[webmaster]=\"#marketing #development\"\nrole_recipients_irc[proxyadmin]=\"#proxy-admin\"\nrole_recipients_irc[sitemgr]=\"#sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# irc notification options\n#\nSEND_IRC=\"YES\"\nDEFAULT_RECIPIENT_IRC=\"#system-alarms\"\nIRC_NETWORK=\"irc.freenode.net\"\nIRC_NICKNAME=\"netdata-alarm-user\"\nIRC_REALNAME=\"netdata-user\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The `nc` utility. You can set the path to it, or Netdata will search for it in your system `$PATH`.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| nc path | Set the path for nc, otherwise Netdata will search for it in your system $PATH | | yes |\n| SEND_IRC | Set `SEND_IRC` YES. | YES | yes |\n| IRC_NETWORK | Set `IRC_NETWORK` to the IRC network which your preferred channels belong to. | | yes |\n| IRC_PORT | Set `IRC_PORT` to the IRC port to which a connection will occur. | | no |\n| IRC_NICKNAME | Set `IRC_NICKNAME` to the IRC nickname which is required to send the notification. It must not be an already registered name as the connection's MODE is defined as a guest. | | yes |\n| IRC_REALNAME | Set `IRC_REALNAME` to the IRC realname which is required in order to make the connection. | | yes |\n| DEFAULT_RECIPIENT_IRC | You can have different channels per role, by editing `DEFAULT_RECIPIENT_IRC` with the channel you want | | yes |\n\n##### nc path\n\n```sh\n#------------------------------------------------------------------------------\n# external commands\n#\n# The full path of the nc command.\n# If empty, the system $PATH will be searched for it.\n# If not found, irc notifications will be silently disabled.\nnc=\"/usr/bin/nc\"\n```\n\n\n##### DEFAULT_RECIPIENT_IRC\n\nThe `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_irc[sysadmin]=\"#systems\"\nrole_recipients_irc[domainadmin]=\"#domains\"\nrole_recipients_irc[dba]=\"#databases #systems\"\nrole_recipients_irc[webmaster]=\"#marketing #development\"\nrole_recipients_irc[proxyadmin]=\"#proxy-admin\"\nrole_recipients_irc[sitemgr]=\"#sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# irc notification options\n#\nSEND_IRC=\"YES\"\nDEFAULT_RECIPIENT_IRC=\"#system-alarms\"\nIRC_NETWORK=\"irc.freenode.net\"\nIRC_NICKNAME=\"netdata-alarm-user\"\nIRC_REALNAME=\"netdata-user\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/irc/metadata.yaml" }, { @@ -21873,9 +21777,9 @@ "Kavenegar" ], "overview": "# Kavenegar\n\n[Kavenegar](https://kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs.\nYou can send notifications to Kavenegar using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The APIKEY and Sender from http://panel.kavenegar.com/client/setting/account\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_KAVENEGAR | Set `SEND_KAVENEGAR` to YES | YES | yes |\n| KAVENEGAR_API_KEY | Set `KAVENEGAR_API_KEY` to your API key. | | yes |\n| KAVENEGAR_SENDER | Set `KAVENEGAR_SENDER` to the value of your Sender. | | yes |\n| DEFAULT_RECIPIENT_KAVENEGAR | Set `DEFAULT_RECIPIENT_KAVENEGAR` to the SMS recipient you want the alert notifications to be sent to. You can define multiple recipients like this: 09155555555 09177777777. | | yes |\n\n##### DEFAULT_RECIPIENT_KAVENEGAR\n\nAll roles will default to this variable if lest unconfigured.\n\nYou can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_kavenegar[sysadmin]=\"09100000000\"\nrole_recipients_kavenegar[domainadmin]=\"09111111111\"\nrole_recipients_kavenegar[dba]=\"0922222222\"\nrole_recipients_kavenegar[webmaster]=\"0933333333\"\nrole_recipients_kavenegar[proxyadmin]=\"0944444444\"\nrole_recipients_kavenegar[sitemgr]=\"0955555555\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Kavenegar (Kavenegar.com) SMS options\n\nSEND_KAVENEGAR=\"YES\"\nKAVENEGAR_API_KEY=\"XXXXXXXXXXXX\"\nKAVENEGAR_SENDER=\"YYYYYYYY\"\nDEFAULT_RECIPIENT_KAVENEGAR=\"0912345678\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The APIKEY and Sender from http://panel.kavenegar.com/client/setting/account\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_KAVENEGAR | Set `SEND_KAVENEGAR` to YES | YES | yes |\n| KAVENEGAR_API_KEY | Set `KAVENEGAR_API_KEY` to your API key. | | yes |\n| KAVENEGAR_SENDER | Set `KAVENEGAR_SENDER` to the value of your Sender. | | yes |\n| DEFAULT_RECIPIENT_KAVENEGAR | Set `DEFAULT_RECIPIENT_KAVENEGAR` to the SMS recipient you want the alert notifications to be sent to. You can define multiple recipients like this: 09155555555 09177777777. | | yes |\n\n##### DEFAULT_RECIPIENT_KAVENEGAR\n\nAll roles will default to this variable if lest unconfigured.\n\nYou can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_kavenegar[sysadmin]=\"09100000000\"\nrole_recipients_kavenegar[domainadmin]=\"09111111111\"\nrole_recipients_kavenegar[dba]=\"0922222222\"\nrole_recipients_kavenegar[webmaster]=\"0933333333\"\nrole_recipients_kavenegar[proxyadmin]=\"0944444444\"\nrole_recipients_kavenegar[sitemgr]=\"0955555555\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Kavenegar (Kavenegar.com) SMS options\n\nSEND_KAVENEGAR=\"YES\"\nKAVENEGAR_API_KEY=\"XXXXXXXXXXXX\"\nKAVENEGAR_SENDER=\"YYYYYYYY\"\nDEFAULT_RECIPIENT_KAVENEGAR=\"0912345678\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/kavenegar/metadata.yaml" }, { @@ -21892,9 +21796,9 @@ "Matrix" ], "overview": "# Matrix\n\nSend notifications to Matrix network rooms using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The url of the homeserver (`https://homeserver:port`).\n- Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a dedicated notification account). These tokens usually don't expire.\n- The Room ids that you want to sent the notification to.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MATRIX | Set `SEND_MATRIX` to YES | YES | yes |\n| MATRIX_HOMESERVER | set `MATRIX_HOMESERVER` to the URL of the Matrix homeserver. | | yes |\n| MATRIX_ACCESSTOKEN | Set `MATRIX_ACCESSTOKEN` to the access token from your Matrix account. | | yes |\n| DEFAULT_RECIPIENT_MATRIX | Set `DEFAULT_RECIPIENT_MATRIX` to the Rooms you want the alert notifications to be sent to. The format is `!roomid:homeservername`. | | yes |\n\n##### MATRIX_ACCESSTOKEN\n\nTo obtain the access token, you can use the following curl command:\n```\ncurl -XPOST -d '{\"type\":\"m.login.password\", \"user\":\"example\", \"password\":\"wordpass\"}' \"https://homeserver:8448/_matrix/client/r0/login\"\n```\n\n\n##### DEFAULT_RECIPIENT_MATRIX\n\nThe Room ids are unique identifiers and can be obtained from the Room settings in a Matrix client (e.g. Riot).\n\nYou can define multiple Rooms like this: `!roomid1:homeservername` `!roomid2:homeservername`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different Rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_matrix[sysadmin]=\"!roomid1:homeservername\"\nrole_recipients_matrix[domainadmin]=\"!roomid2:homeservername\"\nrole_recipients_matrix[dba]=\"!roomid3:homeservername\"\nrole_recipients_matrix[webmaster]=\"!roomid4:homeservername\"\nrole_recipients_matrix[proxyadmin]=\"!roomid5:homeservername\"\nrole_recipients_matrix[sitemgr]=\"!roomid6:homeservername\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Matrix notifications\n\nSEND_MATRIX=\"YES\"\nMATRIX_HOMESERVER=\"https://matrix.org:8448\"\nMATRIX_ACCESSTOKEN=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MATRIX=\"!XXXXXXXXXXXX:matrix.org\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The url of the homeserver (`https://homeserver:port`).\n- Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a dedicated notification account). These tokens usually don't expire.\n- The Room ids that you want to sent the notification to.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MATRIX | Set `SEND_MATRIX` to YES | YES | yes |\n| MATRIX_HOMESERVER | set `MATRIX_HOMESERVER` to the URL of the Matrix homeserver. | | yes |\n| MATRIX_ACCESSTOKEN | Set `MATRIX_ACCESSTOKEN` to the access token from your Matrix account. | | yes |\n| DEFAULT_RECIPIENT_MATRIX | Set `DEFAULT_RECIPIENT_MATRIX` to the Rooms you want the alert notifications to be sent to. The format is `!roomid:homeservername`. | | yes |\n\n##### MATRIX_ACCESSTOKEN\n\nTo obtain the access token, you can use the following curl command:\n```\ncurl -XPOST -d '{\"type\":\"m.login.password\", \"user\":\"example\", \"password\":\"wordpass\"}' \"https://homeserver:8448/_matrix/client/r0/login\"\n```\n\n\n##### DEFAULT_RECIPIENT_MATRIX\n\nThe Room ids are unique identifiers and can be obtained from the Room settings in a Matrix client (e.g. Riot).\n\nYou can define multiple Rooms like this: `!roomid1:homeservername` `!roomid2:homeservername`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different Rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_matrix[sysadmin]=\"!roomid1:homeservername\"\nrole_recipients_matrix[domainadmin]=\"!roomid2:homeservername\"\nrole_recipients_matrix[dba]=\"!roomid3:homeservername\"\nrole_recipients_matrix[webmaster]=\"!roomid4:homeservername\"\nrole_recipients_matrix[proxyadmin]=\"!roomid5:homeservername\"\nrole_recipients_matrix[sitemgr]=\"!roomid6:homeservername\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Matrix notifications\n\nSEND_MATRIX=\"YES\"\nMATRIX_HOMESERVER=\"https://matrix.org:8448\"\nMATRIX_ACCESSTOKEN=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MATRIX=\"!XXXXXXXXXXXX:matrix.org\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/matrix/metadata.yaml" }, { @@ -21911,9 +21815,9 @@ "MessageBird" ], "overview": "# MessageBird\n\nSend notifications to MessageBird using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An access key under 'API ACCESS (REST)' (you will want a live key), you can read more [here](https://developers.messagebird.com/quickstarts/sms/test-credits-api-keys/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MESSAGEBIRD | Set `SEND_MESSAGEBIRD` to YES | YES | yes |\n| MESSAGEBIRD_ACCESS_KEY | Set `MESSAGEBIRD_ACCESS_KEY` to your API key. | | yes |\n| MESSAGEBIRD_NUMBER | Set `MESSAGEBIRD_NUMBER` to the MessageBird number you want to use for the alert. | | yes |\n| DEFAULT_RECIPIENT_MESSAGEBIRD | Set `DEFAULT_RECIPIENT_MESSAGEBIRD` to the number you want the alert notification to be sent as an SMS. You can define multiple recipients like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_MESSAGEBIRD\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_messagebird[sysadmin]=\"+15555555555\"\nrole_recipients_messagebird[domainadmin]=\"+15555555556\"\nrole_recipients_messagebird[dba]=\"+15555555557\"\nrole_recipients_messagebird[webmaster]=\"+15555555558\"\nrole_recipients_messagebird[proxyadmin]=\"+15555555559\"\nrole_recipients_messagebird[sitemgr]=\"+15555555550\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Messagebird (messagebird.com) SMS options\n\nSEND_MESSAGEBIRD=\"YES\"\nMESSAGEBIRD_ACCESS_KEY=\"XXXXXXXX\"\nMESSAGEBIRD_NUMBER=\"XXXXXXX\"\nDEFAULT_RECIPIENT_MESSAGEBIRD=\"+15555555555\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An access key under 'API ACCESS (REST)' (you will want a live key), you can read more [here](https://developers.messagebird.com/quickstarts/sms/test-credits-api-keys/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MESSAGEBIRD | Set `SEND_MESSAGEBIRD` to YES | YES | yes |\n| MESSAGEBIRD_ACCESS_KEY | Set `MESSAGEBIRD_ACCESS_KEY` to your API key. | | yes |\n| MESSAGEBIRD_NUMBER | Set `MESSAGEBIRD_NUMBER` to the MessageBird number you want to use for the alert. | | yes |\n| DEFAULT_RECIPIENT_MESSAGEBIRD | Set `DEFAULT_RECIPIENT_MESSAGEBIRD` to the number you want the alert notification to be sent as an SMS. You can define multiple recipients like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_MESSAGEBIRD\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_messagebird[sysadmin]=\"+15555555555\"\nrole_recipients_messagebird[domainadmin]=\"+15555555556\"\nrole_recipients_messagebird[dba]=\"+15555555557\"\nrole_recipients_messagebird[webmaster]=\"+15555555558\"\nrole_recipients_messagebird[proxyadmin]=\"+15555555559\"\nrole_recipients_messagebird[sitemgr]=\"+15555555550\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Messagebird (messagebird.com) SMS options\n\nSEND_MESSAGEBIRD=\"YES\"\nMESSAGEBIRD_ACCESS_KEY=\"XXXXXXXX\"\nMESSAGEBIRD_NUMBER=\"XXXXXXX\"\nDEFAULT_RECIPIENT_MESSAGEBIRD=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/messagebird/metadata.yaml" }, { @@ -21930,9 +21834,9 @@ "ntfy" ], "overview": "# ntfy\n\n[ntfy](https://ntfy.sh/) (pronounce: notify) is a simple HTTP-based [pub-sub](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) notification service. It allows you to send notifications to your phone or desktop via scripts from any computer, entirely without signup, cost or setup. It's also [open source](https://github.com/binwiederhier/ntfy) if you want to run your own server.\nYou can send alerts to an ntfy server using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- (Optional) A [self-hosted ntfy server](https://docs.ntfy.sh/faq/#can-i-self-host-it), in case you don't want to use https://ntfy.sh\n- A new [topic](https://ntfy.sh/#subscribe) for the notifications to be published to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_NTFY | Set `SEND_NTFY` to YES | YES | yes |\n| DEFAULT_RECIPIENT_NTFY | URL formed by the server-topic combination you want the alert notifications to be sent to. Unless hosting your own server, the server should always be set to https://ntfy.sh. | | yes |\n| NTFY_USERNAME | The username for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_PASSWORD | The password for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_ACCESS_TOKEN | The access token for netdata to use to authenticate with an ntfy server. | | no |\n\n##### DEFAULT_RECIPIENT_NTFY\n\nYou can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `https://SERVER2/TOPIC2`\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_ntfy[sysadmin]=\"https://SERVER1/TOPIC1\"\nrole_recipients_ntfy[domainadmin]=\"https://SERVER2/TOPIC2\"\nrole_recipients_ntfy[dba]=\"https://SERVER3/TOPIC3\"\nrole_recipients_ntfy[webmaster]=\"https://SERVER4/TOPIC4\"\nrole_recipients_ntfy[proxyadmin]=\"https://SERVER5/TOPIC5\"\nrole_recipients_ntfy[sitemgr]=\"https://SERVER6/TOPIC6\"\n```\n\n\n##### NTFY_USERNAME\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_PASSWORD\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_ACCESS_TOKEN\n\nThis can be used in place of `NTFY_USERNAME` and `NTFY_PASSWORD` to authenticate with a self-hosted ntfy instance. See [access tokens](https://docs.ntfy.sh/config/?h=access+to#access-tokens) for details.\nEnsure that the token user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_NTFY=\"YES\"\nDEFAULT_RECIPIENT_NTFY=\"https://ntfy.sh/netdata-X7seHg7d3Tw9zGOk https://ntfy.sh/netdata-oIPm4IK1IlUtlA30\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- (Optional) A [self-hosted ntfy server](https://docs.ntfy.sh/faq/#can-i-self-host-it), in case you don't want to use https://ntfy.sh\n- A new [topic](https://ntfy.sh/#subscribe) for the notifications to be published to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_NTFY | Set `SEND_NTFY` to YES | YES | yes |\n| DEFAULT_RECIPIENT_NTFY | URL formed by the server-topic combination you want the alert notifications to be sent to. Unless hosting your own server, the server should always be set to https://ntfy.sh. | | yes |\n| NTFY_USERNAME | The username for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_PASSWORD | The password for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_ACCESS_TOKEN | The access token for netdata to use to authenticate with an ntfy server. | | no |\n\n##### DEFAULT_RECIPIENT_NTFY\n\nYou can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `https://SERVER2/TOPIC2`\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_ntfy[sysadmin]=\"https://SERVER1/TOPIC1\"\nrole_recipients_ntfy[domainadmin]=\"https://SERVER2/TOPIC2\"\nrole_recipients_ntfy[dba]=\"https://SERVER3/TOPIC3\"\nrole_recipients_ntfy[webmaster]=\"https://SERVER4/TOPIC4\"\nrole_recipients_ntfy[proxyadmin]=\"https://SERVER5/TOPIC5\"\nrole_recipients_ntfy[sitemgr]=\"https://SERVER6/TOPIC6\"\n```\n\n\n##### NTFY_USERNAME\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_PASSWORD\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_ACCESS_TOKEN\n\nThis can be used in place of `NTFY_USERNAME` and `NTFY_PASSWORD` to authenticate with a self-hosted ntfy instance. See [access tokens](https://docs.ntfy.sh/config/?h=access+to#access-tokens) for details.\nEnsure that the token user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_NTFY=\"YES\"\nDEFAULT_RECIPIENT_NTFY=\"https://ntfy.sh/netdata-X7seHg7d3Tw9zGOk https://ntfy.sh/netdata-oIPm4IK1IlUtlA30\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ntfy/metadata.yaml" }, { @@ -21949,9 +21853,9 @@ "OpsGenie" ], "overview": "# OpsGenie\n\nOpsgenie is an alerting and incident response tool. It is designed to group and filter alarms, build custom routing rules for on-call teams, and correlate deployments and commits to incidents.\nYou can send notifications to Opsgenie using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Opsgenie integration. You can create an [integration](https://docs.opsgenie.com/docs/api-integration) in the [Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_OPSGENIE | Set `SEND_OPSGENIE` to YES | YES | yes |\n| OPSGENIE_API_KEY | Set `OPSGENIE_API_KEY` to your API key. | | yes |\n| OPSGENIE_API_URL | Set `OPSGENIE_API_URL` to the corresponding URL if required, for example there are region-specific API URLs such as `https://eu.api.opsgenie.com`. | https://api.opsgenie.com | no |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_OPSGENIE=\"YES\"\nOPSGENIE_API_KEY=\"11111111-2222-3333-4444-555555555555\"\nOPSGENIE_API_URL=\"\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Opsgenie integration. You can create an [integration](https://docs.opsgenie.com/docs/api-integration) in the [Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_OPSGENIE | Set `SEND_OPSGENIE` to YES | YES | yes |\n| OPSGENIE_API_KEY | Set `OPSGENIE_API_KEY` to your API key. | | yes |\n| OPSGENIE_API_URL | Set `OPSGENIE_API_URL` to the corresponding URL if required, for example there are region-specific API URLs such as `https://eu.api.opsgenie.com`. | https://api.opsgenie.com | no |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_OPSGENIE=\"YES\"\nOPSGENIE_API_KEY=\"11111111-2222-3333-4444-555555555555\"\nOPSGENIE_API_URL=\"\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/opsgenie/metadata.yaml" }, { @@ -21968,9 +21872,9 @@ "PagerDuty" ], "overview": "# PagerDuty\n\nPagerDuty is an enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times.\nYou can send notifications to PagerDuty using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An installation of the [PagerDuty](https://www.pagerduty.com/docs/guides/agent-install-guide/) agent on the node running the Netdata Agent\n- A PagerDuty Generic API service using either the `Events API v2` or `Events API v1`\n- [Add a new service](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) to PagerDuty. Click Use our API directly and select either `Events API v2` or `Events API v1`. Once you finish creating the service, click on the Integrations tab to find your Integration Key.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PD | Set `SEND_PD` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PD | Set `DEFAULT_RECIPIENT_PD` to the PagerDuty service key you want the alert notifications to be sent to. You can define multiple service keys like this: `pd_service_key_1` `pd_service_key_2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PD\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pd[sysadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa\"\nrole_recipients_pd[domainadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb\"\nrole_recipients_pd[dba]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc\"\nrole_recipients_pd[webmaster]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxd\"\nrole_recipients_pd[proxyadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe\"\nrole_recipients_pd[sitemgr]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxf\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pagerduty.com notification options\n\nSEND_PD=\"YES\"\nDEFAULT_RECIPIENT_PD=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\nUSE_PD_VERSION=\"2\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An installation of the [PagerDuty](https://www.pagerduty.com/docs/guides/agent-install-guide/) agent on the node running the Netdata Agent\n- A PagerDuty Generic API service using either the `Events API v2` or `Events API v1`\n- [Add a new service](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) to PagerDuty. Click Use our API directly and select either `Events API v2` or `Events API v1`. Once you finish creating the service, click on the Integrations tab to find your Integration Key.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PD | Set `SEND_PD` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PD | Set `DEFAULT_RECIPIENT_PD` to the PagerDuty service key you want the alert notifications to be sent to. You can define multiple service keys like this: `pd_service_key_1` `pd_service_key_2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PD\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pd[sysadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa\"\nrole_recipients_pd[domainadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb\"\nrole_recipients_pd[dba]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc\"\nrole_recipients_pd[webmaster]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxd\"\nrole_recipients_pd[proxyadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe\"\nrole_recipients_pd[sitemgr]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxf\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pagerduty.com notification options\n\nSEND_PD=\"YES\"\nDEFAULT_RECIPIENT_PD=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\nUSE_PD_VERSION=\"2\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pagerduty/metadata.yaml" }, { @@ -21987,9 +21891,9 @@ "Prowl" ], "overview": "# Prowl\n\nSend notifications to Prowl using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- Because of how Netdata integrates with Prowl, there is a hard limit of at most 1000 notifications per hour (starting from the first notification sent). Any alerts beyond the first thousand in an hour will be dropped.\n- Warning messages will be sent with the 'High' priority, critical messages will be sent with the 'Emergency' priority, and all other messages will be sent with the normal priority. Opening the notification's associated URL will take you to the Netdata dashboard of the system that issued the alert, directly to the chart that it triggered on.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Prowl API key, which can be requested through the Prowl website after registering\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PROWL | Set `SEND_PROWL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PROWL | Set `DEFAULT_RECIPIENT_PROWL` to the Prowl API key you want the alert notifications to be sent to. You can define multiple API keys like this: `APIKEY1`, `APIKEY2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PROWL\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_prowl[sysadmin]=\"AAAAAAAA\"\nrole_recipients_prowl[domainadmin]=\"BBBBBBBBB\"\nrole_recipients_prowl[dba]=\"CCCCCCCCC\"\nrole_recipients_prowl[webmaster]=\"DDDDDDDDDD\"\nrole_recipients_prowl[proxyadmin]=\"EEEEEEEEEE\"\nrole_recipients_prowl[sitemgr]=\"FFFFFFFFFF\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# iOS Push Notifications\n\nSEND_PROWL=\"YES\"\nDEFAULT_RECIPIENT_PROWL=\"XXXXXXXXXX\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Prowl API key, which can be requested through the Prowl website after registering\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PROWL | Set `SEND_PROWL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PROWL | Set `DEFAULT_RECIPIENT_PROWL` to the Prowl API key you want the alert notifications to be sent to. You can define multiple API keys like this: `APIKEY1`, `APIKEY2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PROWL\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_prowl[sysadmin]=\"AAAAAAAA\"\nrole_recipients_prowl[domainadmin]=\"BBBBBBBBB\"\nrole_recipients_prowl[dba]=\"CCCCCCCCC\"\nrole_recipients_prowl[webmaster]=\"DDDDDDDDDD\"\nrole_recipients_prowl[proxyadmin]=\"EEEEEEEEEE\"\nrole_recipients_prowl[sitemgr]=\"FFFFFFFFFF\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# iOS Push Notifications\n\nSEND_PROWL=\"YES\"\nDEFAULT_RECIPIENT_PROWL=\"XXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/prowl/metadata.yaml" }, { @@ -22006,9 +21910,9 @@ "Pushbullet" ], "overview": "# Pushbullet\n\nSend notifications to Pushbullet using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Pushbullet access token that can be created in your [account settings](https://www.pushbullet.com/#settings/account).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Send_PUSHBULLET | Set `Send_PUSHBULLET` to YES | YES | yes |\n| PUSHBULLET_ACCESS_TOKEN | set `PUSHBULLET_ACCESS_TOKEN` to the access token you generated. | | yes |\n| DEFAULT_RECIPIENT_PUSHBULLET | Set `DEFAULT_RECIPIENT_PUSHBULLET` to the email (e.g. `example@domain.com`) or the channel tag (e.g. `#channel`) you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHBULLET\n\nYou can define multiple entries like this: user1@email.com user2@email.com.\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushbullet[sysadmin]=\"user1@email.com\"\nrole_recipients_pushbullet[domainadmin]=\"user2@mail.com\"\nrole_recipients_pushbullet[dba]=\"#channel1\"\nrole_recipients_pushbullet[webmaster]=\"#channel2\"\nrole_recipients_pushbullet[proxyadmin]=\"user3@mail.com\"\nrole_recipients_pushbullet[sitemgr]=\"user4@mail.com\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushbullet (pushbullet.com) push notification options\n\nSEND_PUSHBULLET=\"YES\"\nPUSHBULLET_ACCESS_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHBULLET=\"admin1@example.com admin3@somemail.com #examplechanneltag #anotherchanneltag\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Pushbullet access token that can be created in your [account settings](https://www.pushbullet.com/#settings/account).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Send_PUSHBULLET | Set `Send_PUSHBULLET` to YES | YES | yes |\n| PUSHBULLET_ACCESS_TOKEN | set `PUSHBULLET_ACCESS_TOKEN` to the access token you generated. | | yes |\n| DEFAULT_RECIPIENT_PUSHBULLET | Set `DEFAULT_RECIPIENT_PUSHBULLET` to the email (e.g. `example@domain.com`) or the channel tag (e.g. `#channel`) you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHBULLET\n\nYou can define multiple entries like this: user1@email.com user2@email.com.\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pushbullet[sysadmin]=\"user1@email.com\"\nrole_recipients_pushbullet[domainadmin]=\"user2@mail.com\"\nrole_recipients_pushbullet[dba]=\"#channel1\"\nrole_recipients_pushbullet[webmaster]=\"#channel2\"\nrole_recipients_pushbullet[proxyadmin]=\"user3@mail.com\"\nrole_recipients_pushbullet[sitemgr]=\"user4@mail.com\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushbullet (pushbullet.com) push notification options\n\nSEND_PUSHBULLET=\"YES\"\nPUSHBULLET_ACCESS_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHBULLET=\"admin1@example.com admin3@somemail.com #examplechanneltag #anotherchanneltag\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushbullet/metadata.yaml" }, { @@ -22025,9 +21929,9 @@ "PushOver" ], "overview": "# PushOver\n\nSend notification to Pushover using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n- Netdata will send warning messages with priority 0 and critical messages with priority 1.\n- Pushover allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours.\n- All other notifications will be delivered silently.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Application token. You can use the same on all your Netdata servers.\n- A User token for each user you are going to send notifications to. This is the actual recipient of the notification.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PUSHOVER | Set `SEND_PUSHOVER` to YES | YES | yes |\n| PUSHOVER_WEBHOOK_URL | set `PUSHOVER_WEBHOOK_URL` to your Pushover Application token. | | yes |\n| DEFAULT_RECIPIENT_PUSHOVER | Set `DEFAULT_RECIPIENT_PUSHOVER` the Pushover User token you want the alert notifications to be sent to. You can define multiple User tokens like this: `USERTOKEN1` `USERTOKEN2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHOVER\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushover[sysadmin]=\"USERTOKEN1\"\nrole_recipients_pushover[domainadmin]=\"USERTOKEN2\"\nrole_recipients_pushover[dba]=\"USERTOKEN3 USERTOKEN4\"\nrole_recipients_pushover[webmaster]=\"USERTOKEN5\"\nrole_recipients_pushover[proxyadmin]=\"USERTOKEN6\"\nrole_recipients_pushover[sitemgr]=\"USERTOKEN7\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushover (pushover.net) global notification options\n\nSEND_PUSHOVER=\"YES\"\nPUSHOVER_APP_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHOVER=\"USERTOKEN\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Application token. You can use the same on all your Netdata servers.\n- A User token for each user you are going to send notifications to. This is the actual recipient of the notification.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PUSHOVER | Set `SEND_PUSHOVER` to YES | YES | yes |\n| PUSHOVER_WEBHOOK_URL | set `PUSHOVER_WEBHOOK_URL` to your Pushover Application token. | | yes |\n| DEFAULT_RECIPIENT_PUSHOVER | Set `DEFAULT_RECIPIENT_PUSHOVER` the Pushover User token you want the alert notifications to be sent to. You can define multiple User tokens like this: `USERTOKEN1` `USERTOKEN2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHOVER\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pushover[sysadmin]=\"USERTOKEN1\"\nrole_recipients_pushover[domainadmin]=\"USERTOKEN2\"\nrole_recipients_pushover[dba]=\"USERTOKEN3 USERTOKEN4\"\nrole_recipients_pushover[webmaster]=\"USERTOKEN5\"\nrole_recipients_pushover[proxyadmin]=\"USERTOKEN6\"\nrole_recipients_pushover[sitemgr]=\"USERTOKEN7\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushover (pushover.net) global notification options\n\nSEND_PUSHOVER=\"YES\"\nPUSHOVER_APP_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHOVER=\"USERTOKEN\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushover/metadata.yaml" }, { @@ -22044,9 +21948,9 @@ "RocketChat" ], "overview": "# RocketChat\n\nSend notifications to Rocket.Chat using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ROCKETCHAT | Set `SEND_ROCKETCHAT` to `YES` | YES | yes |\n| ROCKETCHAT_WEBHOOK_URL | set `ROCKETCHAT_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_ROCKETCHAT | Set `DEFAULT_RECIPIENT_ROCKETCHAT` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_ROCKETCHAT\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_rocketchat[sysadmin]=\"systems\"\nrole_recipients_rocketchat[domainadmin]=\"domains\"\nrole_recipients_rocketchat[dba]=\"databases systems\"\nrole_recipients_rocketchat[webmaster]=\"marketing development\"\nrole_recipients_rocketchat[proxyadmin]=\"proxy_admin\"\nrole_recipients_rocketchat[sitemgr]=\"sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# rocketchat (rocket.chat) global notification options\n\nSEND_ROCKETCHAT=\"YES\"\nROCKETCHAT_WEBHOOK_URL=\"\"\nDEFAULT_RECIPIENT_ROCKETCHAT=\"monitoring_alarms\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ROCKETCHAT | Set `SEND_ROCKETCHAT` to `YES` | YES | yes |\n| ROCKETCHAT_WEBHOOK_URL | set `ROCKETCHAT_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_ROCKETCHAT | Set `DEFAULT_RECIPIENT_ROCKETCHAT` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_ROCKETCHAT\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_rocketchat[sysadmin]=\"systems\"\nrole_recipients_rocketchat[domainadmin]=\"domains\"\nrole_recipients_rocketchat[dba]=\"databases systems\"\nrole_recipients_rocketchat[webmaster]=\"marketing development\"\nrole_recipients_rocketchat[proxyadmin]=\"proxy_admin\"\nrole_recipients_rocketchat[sitemgr]=\"sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# rocketchat (rocket.chat) global notification options\n\nSEND_ROCKETCHAT=\"YES\"\nROCKETCHAT_WEBHOOK_URL=\"\"\nDEFAULT_RECIPIENT_ROCKETCHAT=\"monitoring_alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/rocketchat/metadata.yaml" }, { @@ -22063,9 +21967,9 @@ "Slack" ], "overview": "# Slack\n\nSend notifications to a Slack workspace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Slack app along with an incoming webhook, read Slack's guide on the topic [here](https://api.slack.com/messaging/webhooks).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_SLACK | Set `SEND_SLACK` to YES | YES | yes |\n| SLACK_WEBHOOK_URL | set `SLACK_WEBHOOK_URL` to your Slack app's webhook URL. | | yes |\n| DEFAULT_RECIPIENT_SLACK | Set `DEFAULT_RECIPIENT_SLACK` to the Slack channel your Slack app is set to send messages to. The syntax for channels is `#channel` or `channel`. | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# slack (slack.com) global notification options\n\nSEND_SLACK=\"YES\"\nSLACK_WEBHOOK_URL=\"https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\" \nDEFAULT_RECIPIENT_SLACK=\"#alarms\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Slack app along with an incoming webhook, read Slack's guide on the topic [here](https://api.slack.com/messaging/webhooks).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_SLACK | Set `SEND_SLACK` to YES | YES | yes |\n| SLACK_WEBHOOK_URL | set `SLACK_WEBHOOK_URL` to your Slack app's webhook URL. | | yes |\n| DEFAULT_RECIPIENT_SLACK | Set `DEFAULT_RECIPIENT_SLACK` to the Slack channel your Slack app is set to send messages to. The syntax for channels is `#channel` or `channel`. | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# slack (slack.com) global notification options\n\nSEND_SLACK=\"YES\"\nSLACK_WEBHOOK_URL=\"https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\" \nDEFAULT_RECIPIENT_SLACK=\"#alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/slack/metadata.yaml" }, { @@ -22084,9 +21988,9 @@ "Messaging" ], "overview": "# SMS\n\nSend notifications to `smstools3` using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\nThe SMS Server Tools 3 is a SMS Gateway software which can send and receive short messages through GSM modems and mobile phones.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- [Install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) `smsd`\n- To ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:\n - Have write permissions to /tmp and /var/spool/sms/outgoing\n - Be a member of group smsd\n - To ensure that the steps above are successful, just su netdata and execute sendsms phone message.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sendsms | Set the path for `sendsms`, otherwise Netdata will search for it in your system `$PATH:` | YES | yes |\n| SEND_SMS | Set `SEND_SMS` to `YES`. | | yes |\n| DEFAULT_RECIPIENT_SMS | Set DEFAULT_RECIPIENT_SMS to the phone number you want the alert notifications to be sent to. You can define multiple phone numbers like this: PHONE1 PHONE2. | | yes |\n\n##### sendsms\n\n# The full path of the sendsms command (smstools3).\n# If empty, the system $PATH will be searched for it.\n# If not found, SMS notifications will be silently disabled.\nsendsms=\"/usr/bin/sendsms\"\n\n\n##### DEFAULT_RECIPIENT_SMS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_sms[sysadmin]=\"PHONE1\"\nrole_recipients_sms[domainadmin]=\"PHONE2\"\nrole_recipients_sms[dba]=\"PHONE3\"\nrole_recipients_sms[webmaster]=\"PHONE4\"\nrole_recipients_sms[proxyadmin]=\"PHONE5\"\nrole_recipients_sms[sitemgr]=\"PHONE6\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMS Server Tools 3 (smstools3) global notification options\nSEND_SMS=\"YES\"\nDEFAULT_RECIPIENT_SMS=\"1234567890\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- [Install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) `smsd`\n- To ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:\n - Have write permissions to /tmp and /var/spool/sms/outgoing\n - Be a member of group smsd\n - To ensure that the steps above are successful, just su netdata and execute sendsms phone message.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sendsms | Set the path for `sendsms`, otherwise Netdata will search for it in your system `$PATH:` | YES | yes |\n| SEND_SMS | Set `SEND_SMS` to `YES`. | | yes |\n| DEFAULT_RECIPIENT_SMS | Set DEFAULT_RECIPIENT_SMS to the phone number you want the alert notifications to be sent to. You can define multiple phone numbers like this: PHONE1 PHONE2. | | yes |\n\n##### sendsms\n\n# The full path of the sendsms command (smstools3).\n# If empty, the system $PATH will be searched for it.\n# If not found, SMS notifications will be silently disabled.\nsendsms=\"/usr/bin/sendsms\"\n\n\n##### DEFAULT_RECIPIENT_SMS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_sms[sysadmin]=\"PHONE1\"\nrole_recipients_sms[domainadmin]=\"PHONE2\"\nrole_recipients_sms[dba]=\"PHONE3\"\nrole_recipients_sms[webmaster]=\"PHONE4\"\nrole_recipients_sms[proxyadmin]=\"PHONE5\"\nrole_recipients_sms[sitemgr]=\"PHONE6\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMS Server Tools 3 (smstools3) global notification options\nSEND_SMS=\"YES\"\nDEFAULT_RECIPIENT_SMS=\"1234567890\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/smstools3/metadata.yaml" }, { @@ -22103,9 +22007,9 @@ "syslog" ], "overview": "# syslog\n\nSend notifications to Syslog using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SYSLOG_FACILITY | Set `SYSLOG_FACILITY` to the facility used for logging, by default this value is set to `local6`. | | yes |\n| DEFAULT_RECIPIENT_SYSLOG | Set `DEFAULT_RECIPIENT_SYSLOG` to the recipient you want the alert notifications to be sent to. | | yes |\n| SEND_SYSLOG | Set SEND_SYSLOG to YES, make sure you have everything else configured before turning this on. | | yes |\n\n##### DEFAULT_RECIPIENT_SYSLOG\n\nTargets are defined as follows:\n\n```\n[[facility.level][@host[:port]]/]prefix\n```\n\nprefix defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'.\n\nThe facility and level are the standard syslog facility and level options, for more info on them see your local logger and syslog documentation. By default, Netdata will log to the local6 facility, with a log level dependent on the type of message (crit for CRITICAL, warning for WARNING, and info for everything else).\n\nYou can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).\n\nYou can define multiple recipients like this: daemon.notice@loghost:514/netdata daemon.notice@loghost2:514/netdata.\nAll roles will default to this variable if left unconfigured.\n\n\n##### SEND_SYSLOG \n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_syslog[sysadmin]=\"daemon.notice@loghost1:514/netdata\"\nrole_recipients_syslog[domainadmin]=\"daemon.notice@loghost2:514/netdata\"\nrole_recipients_syslog[dba]=\"daemon.notice@loghost3:514/netdata\"\nrole_recipients_syslog[webmaster]=\"daemon.notice@loghost4:514/netdata\"\nrole_recipients_syslog[proxyadmin]=\"daemon.notice@loghost5:514/netdata\"\nrole_recipients_syslog[sitemgr]=\"daemon.notice@loghost6:514/netdata\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# syslog notifications\n\nSEND_SYSLOG=\"YES\"\nSYSLOG_FACILITY='local6'\nDEFAULT_RECIPIENT_SYSLOG=\"daemon.notice@loghost6:514/netdata\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SYSLOG_FACILITY | Set `SYSLOG_FACILITY` to the facility used for logging, by default this value is set to `local6`. | | yes |\n| DEFAULT_RECIPIENT_SYSLOG | Set `DEFAULT_RECIPIENT_SYSLOG` to the recipient you want the alert notifications to be sent to. | | yes |\n| SEND_SYSLOG | Set SEND_SYSLOG to YES, make sure you have everything else configured before turning this on. | | yes |\n\n##### DEFAULT_RECIPIENT_SYSLOG\n\nTargets are defined as follows:\n\n```\n[[facility.level][@host[:port]]/]prefix\n```\n\nprefix defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'.\n\nThe facility and level are the standard syslog facility and level options, for more info on them see your local logger and syslog documentation. By default, Netdata will log to the local6 facility, with a log level dependent on the type of message (crit for CRITICAL, warning for WARNING, and info for everything else).\n\nYou can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).\n\nYou can define multiple recipients like this: daemon.notice@loghost:514/netdata daemon.notice@loghost2:514/netdata.\nAll roles will default to this variable if left unconfigured.\n\n\n##### SEND_SYSLOG \n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_syslog[sysadmin]=\"daemon.notice@loghost1:514/netdata\"\nrole_recipients_syslog[domainadmin]=\"daemon.notice@loghost2:514/netdata\"\nrole_recipients_syslog[dba]=\"daemon.notice@loghost3:514/netdata\"\nrole_recipients_syslog[webmaster]=\"daemon.notice@loghost4:514/netdata\"\nrole_recipients_syslog[proxyadmin]=\"daemon.notice@loghost5:514/netdata\"\nrole_recipients_syslog[sitemgr]=\"daemon.notice@loghost6:514/netdata\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# syslog notifications\n\nSEND_SYSLOG=\"YES\"\nSYSLOG_FACILITY='local6'\nDEFAULT_RECIPIENT_SYSLOG=\"daemon.notice@loghost6:514/netdata\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/syslog/metadata.yaml" }, { @@ -22124,9 +22028,9 @@ "MS teams" ], "overview": "# Microsoft Teams\n\nYou can send Netdata alerts to Microsoft Teams using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Microsoft Teams. You can use the same on all your Netdata servers (or you can have multiple if you like).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MSTEAMS | Set `SEND_MSTEAMS` to YES | YES | yes |\n| MSTEAMS_WEBHOOK_URL | set `MSTEAMS_WEBHOOK_URL` to the incoming webhook URL as given by Microsoft Teams. | | yes |\n| DEFAULT_RECIPIENT_MSTEAMS | Set `DEFAULT_RECIPIENT_MSTEAMS` to the encoded Microsoft Teams channel name you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_MSTEAMS\n\nIn Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhook/`. You can define multiple channels like this: `CHANNEL1` `CHANNEL2`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_msteams[sysadmin]=\"CHANNEL1\"\nrole_recipients_msteams[domainadmin]=\"CHANNEL2\"\nrole_recipients_msteams[dba]=\"databases CHANNEL3\"\nrole_recipients_msteams[webmaster]=\"CHANNEL4\"\nrole_recipients_msteams[proxyadmin]=\"CHANNEL5\"\nrole_recipients_msteams[sitemgr]=\"CHANNEL6\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Microsoft Teams (office.com) global notification options\n\nSEND_MSTEAMS=\"YES\"\nMSTEAMS_WEBHOOK_URL=\"https://outlook.office.com/webhook/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/IncomingWebhook/CHANNEL/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MSTEAMS=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Microsoft Teams. You can use the same on all your Netdata servers (or you can have multiple if you like).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MSTEAMS | Set `SEND_MSTEAMS` to YES | YES | yes |\n| MSTEAMS_WEBHOOK_URL | set `MSTEAMS_WEBHOOK_URL` to the incoming webhook URL as given by Microsoft Teams. | | yes |\n| DEFAULT_RECIPIENT_MSTEAMS | Set `DEFAULT_RECIPIENT_MSTEAMS` to the encoded Microsoft Teams channel name you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_MSTEAMS\n\nIn Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhook/`. You can define multiple channels like this: `CHANNEL1` `CHANNEL2`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_msteams[sysadmin]=\"CHANNEL1\"\nrole_recipients_msteams[domainadmin]=\"CHANNEL2\"\nrole_recipients_msteams[dba]=\"databases CHANNEL3\"\nrole_recipients_msteams[webmaster]=\"CHANNEL4\"\nrole_recipients_msteams[proxyadmin]=\"CHANNEL5\"\nrole_recipients_msteams[sitemgr]=\"CHANNEL6\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Microsoft Teams (office.com) global notification options\n\nSEND_MSTEAMS=\"YES\"\nMSTEAMS_WEBHOOK_URL=\"https://outlook.office.com/webhook/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/IncomingWebhook/CHANNEL/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MSTEAMS=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/msteams/metadata.yaml" }, { @@ -22143,9 +22047,9 @@ "Telegram" ], "overview": "# Telegram\n\nSend notifications to Telegram using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. Invite your bot to a group where you want it to send messages.\n- The chat ID for every chat you want to send messages to. Invite [@myidbot](https://t.me/myidbot) bot to the group that will receive notifications, and write the command `/getgroupid@myidbot` to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TELEGRAM | Set `SEND_TELEGRAM` to YES | YES | yes |\n| TELEGRAM_BOT_TOKEN | set `TELEGRAM_BOT_TOKEN` to your bot token. | | yes |\n| DEFAULT_RECIPIENT_TELEGRAM | Set the `DEFAULT_RECIPIENT_TELEGRAM` variable in your config file to your Telegram chat ID (find it with @myidbot). Separate multiple chat IDs with spaces. To send alerts to a specific topic within a chat, use `chatID:topicID`. | | yes |\n\n##### DEFAULT_RECIPIENT_TELEGRAM\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_telegram[sysadmin]=\"-49999333324\"\nrole_recipients_telegram[domainadmin]=\"-49999333389\"\nrole_recipients_telegram[dba]=\"-10099992222\"\nrole_recipients_telegram[webmaster]=\"-10099992222 -49999333389\"\nrole_recipients_telegram[proxyadmin]=\"-49999333344\"\nrole_recipients_telegram[sitemgr]=\"-49999333876\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# telegram (telegram.org) global notification options\n\nSEND_TELEGRAM=\"YES\"\nTELEGRAM_BOT_TOKEN=\"111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5\"\nDEFAULT_RECIPIENT_TELEGRAM=\"-49999333876\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. Invite your bot to a group where you want it to send messages.\n- The chat ID for every chat you want to send messages to. Invite [@myidbot](https://t.me/myidbot) bot to the group that will receive notifications, and write the command `/getgroupid@myidbot` to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TELEGRAM | Set `SEND_TELEGRAM` to YES | YES | yes |\n| TELEGRAM_BOT_TOKEN | set `TELEGRAM_BOT_TOKEN` to your bot token. | | yes |\n| DEFAULT_RECIPIENT_TELEGRAM | Set the `DEFAULT_RECIPIENT_TELEGRAM` variable in your config file to your Telegram chat ID (find it with @myidbot). Separate multiple chat IDs with spaces. To send alerts to a specific topic within a chat, use `chatID:topicID`. | | yes |\n\n##### DEFAULT_RECIPIENT_TELEGRAM\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_telegram[sysadmin]=\"-49999333324\"\nrole_recipients_telegram[domainadmin]=\"-49999333389\"\nrole_recipients_telegram[dba]=\"-10099992222\"\nrole_recipients_telegram[webmaster]=\"-10099992222 -49999333389\"\nrole_recipients_telegram[proxyadmin]=\"-49999333344\"\nrole_recipients_telegram[sitemgr]=\"-49999333876\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# telegram (telegram.org) global notification options\n\nSEND_TELEGRAM=\"YES\"\nTELEGRAM_BOT_TOKEN=\"111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5\"\nDEFAULT_RECIPIENT_TELEGRAM=\"-49999333876\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/telegram/metadata.yaml" }, { @@ -22162,11 +22066,312 @@ "Twilio" ], "overview": "# Twilio\n\nSend notifications to Twilio using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Get your SID, and Token from https://www.twilio.com/console\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TWILIO | Set `SEND_TWILIO` to YES | YES | yes |\n| TWILIO_ACCOUNT_SID | set `TWILIO_ACCOUNT_SID` to your account SID. | | yes |\n| TWILIO_ACCOUNT_TOKEN | Set `TWILIO_ACCOUNT_TOKEN` to your account token. | | yes |\n| TWILIO_NUMBER | Set `TWILIO_NUMBER` to your account's number. | | yes |\n| DEFAULT_RECIPIENT_TWILIO | Set DEFAULT_RECIPIENT_TWILIO to the number you want the alert notifications to be sent to. You can define multiple numbers like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_TWILIO\n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient's number you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_twilio[sysadmin]=\"+15555555555\"\nrole_recipients_twilio[domainadmin]=\"+15555555556\"\nrole_recipients_twilio[dba]=\"+15555555557\"\nrole_recipients_twilio[webmaster]=\"+15555555558\"\nrole_recipients_twilio[proxyadmin]=\"+15555555559\"\nrole_recipients_twilio[sitemgr]=\"+15555555550\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Twilio (twilio.com) SMS options\n\nSEND_TWILIO=\"YES\"\nTWILIO_ACCOUNT_SID=\"xxxxxxxxx\"\nTWILIO_ACCOUNT_TOKEN=\"xxxxxxxxxx\"\nTWILIO_NUMBER=\"xxxxxxxxxxx\"\nDEFAULT_RECIPIENT_TWILIO=\"+15555555555\"\n\n```\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Get your SID, and Token from https://www.twilio.com/console\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TWILIO | Set `SEND_TWILIO` to YES | YES | yes |\n| TWILIO_ACCOUNT_SID | set `TWILIO_ACCOUNT_SID` to your account SID. | | yes |\n| TWILIO_ACCOUNT_TOKEN | Set `TWILIO_ACCOUNT_TOKEN` to your account token. | | yes |\n| TWILIO_NUMBER | Set `TWILIO_NUMBER` to your account's number. | | yes |\n| DEFAULT_RECIPIENT_TWILIO | Set DEFAULT_RECIPIENT_TWILIO to the number you want the alert notifications to be sent to. You can define multiple numbers like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_TWILIO\n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient's number you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_twilio[sysadmin]=\"+15555555555\"\nrole_recipients_twilio[domainadmin]=\"+15555555556\"\nrole_recipients_twilio[dba]=\"+15555555557\"\nrole_recipients_twilio[webmaster]=\"+15555555558\"\nrole_recipients_twilio[proxyadmin]=\"+15555555559\"\nrole_recipients_twilio[sitemgr]=\"+15555555550\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Twilio (twilio.com) SMS options\n\nSEND_TWILIO=\"YES\"\nTWILIO_ACCOUNT_SID=\"xxxxxxxxx\"\nTWILIO_ACCOUNT_TOKEN=\"xxxxxxxxxx\"\nTWILIO_NUMBER=\"xxxxxxxxxxx\"\nDEFAULT_RECIPIENT_TWILIO=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/twilio/metadata.yaml" }, + { + "id": "notify-cloud-awssns", + "meta": { + "name": "Amazon SNS", + "link": "https://aws.amazon.com/sns/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "awssns.png" + }, + "keywords": [ + "awssns" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- An AWS account with AWS SNS access\n\n### AWS SNS Configuration\n\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, select the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n3. Copy the **Topic ARN** in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the AWS SNS Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Topic ARN: The topic provided on AWS SNS (with region) for where to publish your notifications.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-discord", + "meta": { + "name": "Discord", + "link": "https://discord.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "discord.png" + }, + "keywords": [ + "discord", + "community" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n\n### Discord Server Configuration\n\n1. Go to **Server Settings** --> **Integrations**\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Keep note of the **Webhook URL** as you will need it for the configuration of the integration on the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Discord Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: The URL you copied from the previous section\n - Channel Parameters: Select the channel type which the notifications will be sent to, if it is a Forum channel, you need to specify a thread name\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-ilert", + "meta": { + "name": "ilert", + "link": "https://www.ilert.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "ilert.svg" + }, + "keywords": [ + "ilert" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on ilert to add new Alert sources.\n\n### ilert Configuration\n\n1. From the navigation bar, open the Alert sources drop down and click \"Alert sources\"\n2. Click on the \"+ Create a new alert source\" button\n3. Configure an Alert source:\n - Select \"API integration\" and click Next\n - Provide a name that suits the source's purpose, for example \"Netdata\"\n - Select Escalation policy\n - Select Alert grouping (optional)\n4. Obtain the API Key:\n - Once the Alert source is created, you will be provided with an API key. Copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the ilert Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Alert Source API key: The key you copied in the ilert configuration step.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-mattermost", + "meta": { + "name": "Mattermost", + "link": "https://mattermost.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "mattermost.png" + }, + "keywords": [ + "mattermost" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n\n### Mattermost Server Configuration\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don't have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook.\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook URL that looks like `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Mattermost Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on Mattermost for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-microsoftteams", + "meta": { + "name": "Microsoft Teams", + "link": "https://www.microsoft.com/en-us/microsoft-teams", + "categories": [ + "notify.cloud" + ], + "icon_filename": "teams.svg" + }, + "keywords": [ + "microsoft", + "teams" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature\n\n### Microsoft Teams Configuration\n\n1. Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots icon that appears\n2. Select \"Workflows\" from the options, then choose \"Post to a channel when a webhook request is received\"\n3. **Configure Workflow Details**\n - Give your workflow a name, such as \"Netdata Alerts\"\n - Select the target team and channel where you will receive notifications\n - Click \"Add workflow\"\n4. Once the workflow is created, you will receive a unique Workflow Webhook URL, copy it, in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Microsoft Teams Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Microsoft Teams Incoming Webhook URL: The Incoming Webhook URL that you copied earlier.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-mobile-app", + "meta": { + "name": "Netdata Mobile App", + "link": "https://netdata.cloud", + "categories": [ + "notify.cloud" + ], + "icon_filename": "netdata.png" + }, + "keywords": [ + "mobile-app", + "phone", + "personal-notifications" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- You need to have the Netdata Mobile App installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration and device linking\n\nIn order to login to the Netdata Mobile App\n\n1. Download the Netdata Mobile App from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose your Sign-in option\n - Email Address: Enter the email address of your registered Netdata Cloud account and click on the verification link received by email on your mobile device.\n - Sign-in with QR Code: Scan the QR code from the Netdata Cloud UI under **Profile Picture** --> **Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code**\n\n### Netdata Configuration\n\nAfter linking your device, enable the toggle for **Mobile App Notifications** under the same settings panel.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-opsgenie", + "meta": { + "name": "Opsgenie", + "link": "https://www.atlassian.com/software/opsgenie", + "categories": [ + "notify.cloud" + ], + "icon_filename": "opsgenie.png" + }, + "keywords": [ + "opsgenie", + "atlassian" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\n1. Go to the integrations tab of your team, click **Add integration**\n2. Pick **API** from the available integrations and copy the API Key in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Opsgenie Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - API Key: The key provided on Opsgenie for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-pagerduty", + "meta": { + "name": "PagerDuty", + "link": "https://www.pagerduty.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "pagerduty.png" + }, + "keywords": [ + "pagerduty" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a PagerDuty service to receive events using webhooks.\n\n### PagerDuty Server Configuration\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. On the third step of the service creation, select `Events API V2` Integration\n3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** in order to add them to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Integration Key: A 32 character key provided by PagerDuty to receive events on your service.\n - Integration URL (Alert Events): The URL provided by PagerDuty where Netdata Cloud will send notifications.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-rocketchat", + "meta": { + "name": "RocketChat", + "link": "https://www.rocket.chat/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "rocketchat.png" + }, + "keywords": [ + "rocketchat" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on RocketChat to add new integrations.\n\n### RocketChat Server Configuration\n\nSteps to configure your RocketChat server to receive notifications from Netdata Cloud:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations\n2. Click **+New** at the top right corner\n3. For more details about each parameter, check [Create a new incoming webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook)\n4. You will end up with a webhook endpoint that looks like `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on RocketChat for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-slack", + "meta": { + "name": "Slack", + "link": "https://slack.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "slack.png" + }, + "keywords": [ + "slack" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\n1. Create an app to receive webhook integrations. Check the [Slack documentation](https://api.slack.com/apps?new_app=1) for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - Specify the channel where you want your notifications to be delivered\n - Once completed, copy the Webhook URL in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Slack Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on Slack for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-splunk", + "meta": { + "name": "Splunk", + "link": "https://splunk.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "splunk-black.svg" + }, + "keywords": [ + "Splunk" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions on how to set it up.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - HTTP Event Collector URI: The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token: The token that Splunk provided to you when you created the HTTP Event Collector\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-telegram", + "meta": { + "name": "Telegram", + "link": "https://telegram.org/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "telegram.svg" + }, + "keywords": [ + "Telegram" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Telegram bot token, chat ID and optionally the topic ID\n\n### Telegram Configuration\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n- To get the topic ID, the easiest way is this: Post a message to that topic, then right-click on it and select `Copy Message Link`. Paste it on a scratchpad and notice that it has the following structure `https://t.me/c/XXXXXXXXXX/YY/ZZ`. The topic ID is `YY` (integer).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Telegram Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Bot Token: The token of your bot\n - Chat ID: The chat id where your bot will deliver messages to\n - Topic ID: The identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-victorops", + "meta": { + "name": "Splunk VictorOps", + "link": "https://www.splunk.com/en_us/about-splunk/acquisitions/splunk-on-call.html", + "categories": [ + "notify.cloud" + ], + "icon_filename": "victorops.svg" + }, + "keywords": [ + "VictorOps", + "Splunk", + "On-Call" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk VictorOps Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Destination URL - The URL provided by VictorOps of your REST endpoint.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-webhook", + "meta": { + "name": "Webhook", + "link": "https://en.wikipedia.org/wiki/Webhook", + "categories": [ + "notify.cloud" + ], + "icon_filename": "webhook.svg" + }, + "keywords": [ + "generic webhooks", + "webhooks" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Webhook integration\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: The url of the service that Netdata will send notifications to. In order to keep the communication secured, Netdata only accepts HTTPS urls.\n - Extra headers: Optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism, Netdata webhook integration supports 3 different authentication mechanisms.\n - Mutual TLS (recommended): Default authentication mechanism used if no other method is selected\n - Basic: The client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**.\n - Bearer: The client sends a request with an Authorization header that includes a **bearer token**.\n\n### Webhook service\n\nA webhook service allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL.\n\nIn this section, we'll go over the steps to set up a generic webhook service, including adding headers, and implementing different types of authorization mechanisms.\n\n#### Netdata webhook integration\n\nNetdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected.\n\nFor alert notifications, the content sent to the destination service contains a JSON object with the following properties:\n\n| field | type | description |\n|:----------------------------------|:--------------------------------|:--------------------------------------------------------------------------|\n| message | string | A summary message of the alert. |\n| alert | string | The alert the notification is related to. |\n| info | string | Additional info related with the alert. |\n| chart | string | The chart associated with the alert. |\n| context | string | The chart context. |\n| space | string | The space where the node that raised the alert is assigned. |\n| Rooms | object\\[object(string,string)\\] | Object with list of Rooms names and urls where the node belongs to. |\n| family | string | Context family. |\n| class | string | Classification of the alert, e.g. `Error`. |\n| severity | string | Alert severity, can be one of `warning`, `critical` or `clear`. |\n| date | string | Date of the alert in ISO8601 format. |\n| duration | string | Duration the alert has been raised. |\n| additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n| additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n| alert_url | string | Netdata Cloud URL for this alert. |\n\nFor reachability notifications, the JSON object will contain the following properties:\n\n| field | type | description |\n|:-----------------|:--------|:------------------------------------------------------------------------------------------------------------------------------|\n| message | string | A summary message of the reachability alert. |\n| url | string | Netdata Cloud URL for the host experiencing the reachability alert. |\n| host | string | The hostname experiencing the reachability alert. |\n| severity | string | Severity for this notification. If host is reachable, severity will be `info`, if host is unreachable, it will be `critical`. |\n| status | object | An object with the status information. |\n| status.reachable | boolean | `true` if host is reachable, `false` otherwise |\n| status.text | string | Can be `reachable` or `unreachable` |\n\n#### Extra headers\n\nWhen setting up a webhook service, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\nBy default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:------------:|------------------|\n | Content-Type | application/json |\n\n#### Authentication mechanisms\n\nNetdata webhook integration supports 3 different authentication mechanisms:\n\n##### Mutual TLS authentication (recommended)\n\nIn mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\nThis is the default authentication mechanism used if no other method is selected.\n\nTo take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\nThe steps to perform this validation are as follows:\n\n- Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
\n Netdata CA certificate\n\n ```text\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n\n
\n\n- Enable client certificate validation on the web server that is doing the TLS termination. Below there are examples on how to perform this configuration in `NGINX` and `Apache`.\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n##### Basic authentication\n\nIn basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n##### Bearer token authentication\n\nIn bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n##### Challenge secret\n\nTo validate that you have ownership of the web application that will receive the webhook events, Netdata is using a challenge response check mechanism.\n\nThis mechanism works as follows:\n\n- The challenge secret parameter that you provide is a shared secret between only you and Netdata.\n- On your request for creating a new Webhook integration, Netdata will make a GET request to the URL of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n- You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n- Netdata will compare your application's response with the hash that it will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\nNetdata does this validation every time you update your integration configuration.\n\n- Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n**Example response token generation in Python:**\n\nHere you can see how to define a handler for a Flask application in python 3:\n\n```python\nimport base64\nimport hashlib\nimport hmac\nimport json\n\nkey ='YOUR_CHALLENGE_SECRET'\n\n@app.route('/webhooks/netdata')\ndef webhook_challenge():\ntoken = request.args.get('crc_token').encode('ascii')\n\n# creates HMAC SHA-256 hash from incomming token and your consumer secret\nsha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n# construct response data with base64 encoded hash\nresponse = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n}\n\n# returns properly formatted json response\nreturn json.dumps(response)\n```\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "logs-systemd-journal", + "meta": { + "name": "Systemd Journal Logs", + "link": "https://github.com/netdata/netdata/blob/master/src/collectors/systemd-journal.plugin/README.md", + "categories": [ + "logs" + ], + "icon_filename": "netdata.png" + }, + "keywords": [ + "systemd", + "journal", + "logs" + ], + "overview": "# Systemd Journal Logs\n\nThe `systemd` journal plugin by Netdata makes viewing, exploring and analyzing `systemd` journal logs simple and efficient.\n\nIt automatically discovers available journal sources, allows advanced filtering, offers interactive visual representations and supports exploring the logs of both individual servers and the logs on infrastructure wide journal centralization servers.\n\nThe plugin automatically detects the available journal sources, based on the journal files available in `/var/log/journal` (persistent logs) and `/run/log/journal` (volatile logs).\n\n\n## Visualization\n\nYou can start exploring `systemd` journal logs on the \"Logs\" tab of the Netdata UI.\n\n\n## Key features\n\n- Works on both **individual servers** and **journal centralization servers**.\n- Supports `persistent` and `volatile` journals.\n- Supports `system`, `user`, `namespaces` and `remote` journals.\n- Allows filtering on **any journal field** or **field value**, for any time-frame.\n- Allows **full text search** (`grep`) on all journal fields, for any time-frame.\n- Provides a **histogram** for log entries over time, with a break down per field-value, for any field and any time-frame.\n- Works directly on journal files, without any other third-party components.\n- Supports coloring log entries, the same way `journalctl` does.\n- In PLAY mode provides the same experience as `journalctl -f`, showing new log entries immediately after they are received.\n", + "setup": "## Setup\n\n## Prerequisites\n\n- A Netdata Cloud account\n\n\n## Configuration\n\nThere is no configuration needed for this integration.\n", + "integration_type": "logs", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml" + }, + { + "id": "windows-event-logs", + "meta": { + "name": "Windows Event Logs", + "link": "https://github.com/netdata/netdata/blob/master/src/collectors/windows-events.plugin/README.md", + "categories": [ + "logs", + "data-collection.windows-systems" + ], + "icon_filename": "windows.svg" + }, + "keywords": [ + "windows", + "windows events", + "logs" + ], + "overview": "# Windows Event Logs\n\nThe Windows Events plugin by Netdata makes viewing, exploring and analyzing Windows Events simple and\nefficient.\n\nThe plugin automatically detects all the available channels and offers a list of \"Event Channels\".\n\nBy default, it aggregates events from all event channels, providing a unified view of all events.\n\n\n## Visualization\n\nYou can start exploring Windows event logs on the \"Logs\" tab of the Netdata UI.\n\n\n## Key features\n\n- Supports **Windows Event Logs (WEL)**.\n- Supports **Event Tracing for Windows (ETW)** and **TraceLogging (TL)**, when events are routed to Event Log.\n- Allows filtering on all System Events fields.\n- Allows **full text search** (`grep`) on all System and User fields.\n- Provides a **histogram** for log entries over time, with a break down per field-value, for any System Event field and any\n time-frame.\n- Supports coloring log entries based on severity.\n- In PLAY mode it \"tails\" all the Events, showing new log entries immediately after they are received.\n", + "setup": "## Setup\n\n## Prerequisites\n\n- Netdata Cloud paid subscription\n\n\n## Configuration\n\nThere is no configuration needed for this integration.\n", + "integration_type": "logs", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml" + }, { "id": "oidc-authentication", "meta": { @@ -22223,7 +22428,7 @@ "identity-management" ], "overview": "# SCIM\n\nThe System for Cross-domain Identity Management (SCIM) specification is designed to simplify the management of user identities in cloud-based applications and services.\n", - "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Admin access to the Space\n- The Space must be on a paid plan\n- OIDC/SSO integration must already be enabled in one of your Spaces\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon).\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. In the SCIM card, click on **Activate**.\n4. Depending on your situation:\n - If OIDC/SSO integration is already enabled in your Space, click **Activate**.\n - If you already have a SCIM integration in another Space and want to create a linked integration here, enter the SCIM token from the original integration and click **Activate**.\n5. If the setup is successful, you will receive two parameters:\n - **Base URL**: Use this URL as the base URL for your SCIM client.\n - **Token**: Use this token for Bearer Authentication with your SCIM client.\n\n### Rotating the SCIM Token\nYou can rotate the token provided during SCIM integration setup if needed.\n\nSteps to rotate the token:\n1. Click on the Space settings cog (located above your profile icon).\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. In the already configured SCIM card, click **Configure**.\n4. Click **Regenerate Token**.\n5. If successful, you will receive a new token for Bearer Authentication with your SCIM client.\n\n### Supported Features\nThis integration adheres to SCIM v2 specifications. Supported features include:\n\n- User Resource Management (urn:ietf:params:scim:schemas:core:2.0:User)\n- Patch operations: Supported\n- Bulk operations: Not supported\n- Filtering: Supported (max results: 200)\n- Password synchronization: Not supported, as we rely on SSO/OIDC authentication\n- eTag: Not supported\n- Authentication schemes: OAuth Bearer Token\n\n### User Keying Between SCIM and OIDC\nOur SCIM (System for Cross-domain Identity Management) integration utilizes OIDC (OpenID Connect) to authenticate users.\nTo ensure users are correctly identified and authenticated between SCIM and OIDC, we use the following mapping:\n\n- SCIM externalID \u2194 OIDC sub\n\nThis mapping ensures that the identity of users remains consistent and secure across both systems.\n\n**Important**: Ensure that your OIDC and SCIM systems follow this mapping strictly.\nThe externalID in SCIM must correspond to the subfield in OIDC. Any deviation from this mapping may result\nin incorrect user identification and authentication failures.\n\n### Reference\n[SCIM Specification](https://scim.org)\n\n", + "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Admin access to the Space\n- The Space must be on a paid plan\n- OIDC/SSO integration must already be enabled in one of your Spaces\n\n### Supported Features\nThis integration adheres to SCIM v2 specifications. Supported features include:\n\n- User Resource Management (urn:ietf:params:scim:schemas:core:2.0:User)\n- Create users\n- Update user attributes\n- Deactivate users\n- Patch operations: Supported\n- Bulk operations: Not supported\n- Filtering: Supported (max results: 200)\n- Password synchronization: Not supported, as we rely on SSO/OIDC authentication\n- eTag: Not supported\n- Authentication schemes: OAuth Bearer Token\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon).\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. In the SCIM card, click on **Activate**.\n4. Depending on your situation:\n - If OIDC/SSO integration is already enabled in your Space, click **Activate**.\n - If you already have a SCIM integration in another Space and want to create a linked integration here, enter the SCIM token from the original integration and click **Activate**.\n5. If the setup is successful, you will receive two parameters:\n - **Base URL**: Use this URL as the base URL for your SCIM client.\n - **Token**: Use this token for Bearer Authentication with your SCIM client.\n\n## Client Configuration Steps\n\n### Okta\nIf you're configuring SCIM in Okta, and you already have the Token from the previous section, follow these steps:\n\n1. Go to the **Applications** menu on the left-hand panel and select the **Netdata** application.\n2. In the **Netdata** application, navigate to the **Provisioning** tab.\n3. Click on **Configure API Integration** and check the box for **Enable API Integration**.\n4. Enter the Token (obtained in the *Netdata Configuration Steps* section) into the **API Token** field, then click **Test API Credentials** to ensure the connection is successful.\n5. If the test is successful, click **Save** to apply the configuration.\n\n## Troubleshoot\n\n### Rotating the SCIM Token\nYou can rotate the token provided during SCIM integration setup if needed.\n\nSteps to rotate the token:\n1. Click on the Space settings cog (located above your profile icon).\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. In the already configured SCIM card, click **Configure**.\n4. Click **Regenerate Token**.\n5. If successful, you will receive a new token for Bearer Authentication with your SCIM client.\n\n### User Keying Between SCIM and OIDC\nOur SCIM (System for Cross-domain Identity Management) integration utilizes OIDC (OpenID Connect) to authenticate users.\nTo ensure users are correctly identified and authenticated between SCIM and OIDC, we use the following mapping:\n\n- SCIM externalID \u2194 OIDC sub\n\nThis mapping ensures that the identity of users remains consistent and secure across both systems.\n\n**Important**: Ensure that your OIDC and SCIM systems follow this mapping strictly.\nThe externalID in SCIM must correspond to the subfield in OIDC. Any deviation from this mapping may result\nin incorrect user identification and authentication failures.\n\n## FAQ\n\n### Why aren\u2019t users automatically added to Netdata spaces when they\u2019re created through SCIM?\n\nCurrently, our SCIM server supports only the User resource. We plan to add support for the Group resource in the future.\n\nIn a Netdata space, users can belong to multiple rooms and have different roles (e.g., admin, manager). Additionally, the same organization may have multiple spaces.\n\nAs we don't yet support groups, when a user is created through SCIM, we don\u2019t have a way to determine which spaces, rooms, and roles the user should be assigned to.\n\nOnce we implement support for the Group resource, admins will be able to map SCIM groups to Netdata memberships, so this assignment will be done automatically.\n\nUntil then, SCIM can only be used to grant or block access to Netdata for users in your organization. After a user is created, it is up to the Netdata administrator to manually invite them to spaces, rooms and assign roles.\n\n### Reference\n[SCIM Specification](https://scim.org)\n\n", "integration_type": "authentication", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-authentication/metadata.yaml", "troubleshooting": "" diff --git a/integrations/logs/integrations/systemd_journal_logs.md b/integrations/logs/integrations/systemd_journal_logs.md new file mode 100644 index 000000000..c61e6d6cb --- /dev/null +++ b/integrations/logs/integrations/systemd_journal_logs.md @@ -0,0 +1,53 @@ + + +# Systemd Journal Logs + + + + + +The `systemd` journal plugin by Netdata makes viewing, exploring and analyzing `systemd` journal logs simple and efficient. + +It automatically discovers available journal sources, allows advanced filtering, offers interactive visual representations and supports exploring the logs of both individual servers and the logs on infrastructure wide journal centralization servers. + +The plugin automatically detects the available journal sources, based on the journal files available in `/var/log/journal` (persistent logs) and `/run/log/journal` (volatile logs). + + + + +## Visualization + +You can start exploring `systemd` journal logs on the "Logs" tab of the Netdata UI. + + +## Key features + +- Works on both **individual servers** and **journal centralization servers**. +- Supports `persistent` and `volatile` journals. +- Supports `system`, `user`, `namespaces` and `remote` journals. +- Allows filtering on **any journal field** or **field value**, for any time-frame. +- Allows **full text search** (`grep`) on all journal fields, for any time-frame. +- Provides a **histogram** for log entries over time, with a break down per field-value, for any field and any time-frame. +- Works directly on journal files, without any other third-party components. +- Supports coloring log entries, the same way `journalctl` does. +- In PLAY mode provides the same experience as `journalctl -f`, showing new log entries immediately after they are received. + + +## Setup + +## Prerequisites + +- A Netdata Cloud account + + +## Configuration + +There is no configuration needed for this integration. + diff --git a/integrations/logs/integrations/windows_event_logs.md b/integrations/logs/integrations/windows_event_logs.md new file mode 100644 index 000000000..486f5d7fe --- /dev/null +++ b/integrations/logs/integrations/windows_event_logs.md @@ -0,0 +1,53 @@ + + +# Windows Event Logs + + + + + +The Windows Events plugin by Netdata makes viewing, exploring and analyzing Windows Events simple and +efficient. + +The plugin automatically detects all the available channels and offers a list of "Event Channels". + +By default, it aggregates events from all event channels, providing a unified view of all events. + + + + +## Visualization + +You can start exploring Windows event logs on the "Logs" tab of the Netdata UI. + + +## Key features + +- Supports **Windows Event Logs (WEL)**. +- Supports **Event Tracing for Windows (ETW)** and **TraceLogging (TL)**, when events are routed to Event Log. +- Allows filtering on all System Events fields. +- Allows **full text search** (`grep`) on all System and User fields. +- Provides a **histogram** for log entries over time, with a break down per field-value, for any System Event field and any + time-frame. +- Supports coloring log entries based on severity. +- In PLAY mode it "tails" all the Events, showing new log entries immediately after they are received. + + +## Setup + +## Prerequisites + +- Netdata Cloud paid subscription + + +## Configuration + +There is no configuration needed for this integration. + diff --git a/integrations/logs/metadata.yaml b/integrations/logs/metadata.yaml new file mode 100644 index 000000000..42c313171 --- /dev/null +++ b/integrations/logs/metadata.yaml @@ -0,0 +1,75 @@ +# yamllint disable rule:line-length +--- +- id: "logs-systemd-journal" + meta: + name: "Systemd Journal Logs" + link: "https://github.com/netdata/netdata/blob/master/src/collectors/systemd-journal.plugin/README.md" + categories: + - logs + icon_filename: "netdata.png" + keywords: + - systemd + - journal + - logs + overview: + description: | + The `systemd` journal plugin by Netdata makes viewing, exploring and analyzing `systemd` journal logs simple and efficient. + + It automatically discovers available journal sources, allows advanced filtering, offers interactive visual representations and supports exploring the logs of both individual servers and the logs on infrastructure wide journal centralization servers. + + The plugin automatically detects the available journal sources, based on the journal files available in `/var/log/journal` (persistent logs) and `/run/log/journal` (volatile logs). + visualization: + description: | + You can start exploring `systemd` journal logs on the "Logs" tab of the Netdata UI. + key_features: + description: | + - Works on both **individual servers** and **journal centralization servers**. + - Supports `persistent` and `volatile` journals. + - Supports `system`, `user`, `namespaces` and `remote` journals. + - Allows filtering on **any journal field** or **field value**, for any time-frame. + - Allows **full text search** (`grep`) on all journal fields, for any time-frame. + - Provides a **histogram** for log entries over time, with a break down per field-value, for any field and any time-frame. + - Works directly on journal files, without any other third-party components. + - Supports coloring log entries, the same way `journalctl` does. + - In PLAY mode provides the same experience as `journalctl -f`, showing new log entries immediately after they are received. + setup: + prerequisites: + description: | + - A Netdata Cloud account +- id: "windows-event-logs" + meta: + name: "Windows Event Logs" + link: "https://github.com/netdata/netdata/blob/master/src/collectors/windows-events.plugin/README.md" + categories: + - logs + - data-collection.windows-systems + icon_filename: "windows.svg" + keywords: + - windows + - windows events + - logs + overview: + description: | + The Windows Events plugin by Netdata makes viewing, exploring and analyzing Windows Events simple and + efficient. + + The plugin automatically detects all the available channels and offers a list of "Event Channels". + + By default, it aggregates events from all event channels, providing a unified view of all events. + visualization: + description: | + You can start exploring Windows event logs on the "Logs" tab of the Netdata UI. + key_features: + description: | + - Supports **Windows Event Logs (WEL)**. + - Supports **Event Tracing for Windows (ETW)** and **TraceLogging (TL)**, when events are routed to Event Log. + - Allows filtering on all System Events fields. + - Allows **full text search** (`grep`) on all System and User fields. + - Provides a **histogram** for log entries over time, with a break down per field-value, for any System Event field and any + time-frame. + - Supports coloring log entries based on severity. + - In PLAY mode it "tails" all the Events, showing new log entries immediately after they are received. + setup: + prerequisites: + description: | + - Netdata Cloud paid subscription diff --git a/integrations/schemas/agent_notification.json b/integrations/schemas/agent_notification.json new file mode 100644 index 000000000..f157a65d9 --- /dev/null +++ b/integrations/schemas/agent_notification.json @@ -0,0 +1,87 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Netdata notification mechanism metadata.", + "oneOf": [ + { + "$ref": "#/$defs/entry" + }, + { + "type": "array", + "minLength": 1, + "items": { + "$ref": "#/$defs/entry" + } + } + ], + "$defs": { + "entry": { + "type": "object", + "description": "Data for a single notification method.", + "properties": { + "id": { + "$ref": "./shared.json#/$defs/id" + }, + "meta": { + "$ref": "./shared.json#/$defs/instance" + }, + "keywords": { + "$ref": "./shared.json#/$defs/keywords" + }, + "overview": { + "type": "object", + "description": "General information about the notification method.", + "properties": { + "notification_description": { + "type": "string", + "description": "General description of what the notification method does." + }, + "notification_limitations": { + "type": "string", + "description": "Explanation of any limitations of the notification method." + } + }, + "required": [ + "notification_description", + "notification_limitations" + ] + }, + "global_setup": { + "type": "object", + "description": "Flags that show which global setup sections are relevant for this notification method.", + "properties": { + "severity_filtering": { + "type": "boolean" + }, + "http_proxy": { + "type": "boolean" + } + }, + "required": [ + "severity_filtering", + "http_proxy" + ] + }, + "setup": { + "oneOf": [ + { + "$ref": "./shared.json#/$defs/short_setup" + }, + { + "$ref": "./shared.json#/$defs/full_setup" + } + ] + }, + "troubleshooting": { + "$ref": "./shared.json#/$defs/troubleshooting" + } + }, + "required": [ + "id", + "meta", + "keywords", + "overview", + "setup" + ] + } + } +} \ No newline at end of file diff --git a/integrations/schemas/cloud_notification.json b/integrations/schemas/cloud_notification.json new file mode 100644 index 000000000..60bd66e8f --- /dev/null +++ b/integrations/schemas/cloud_notification.json @@ -0,0 +1,68 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Netdata notification mechanism metadata.", + "oneOf": [ + { + "$ref": "#/$defs/entry" + }, + { + "type": "array", + "minLength": 1, + "items": { + "$ref": "#/$defs/entry" + } + } + ], + "$defs": { + "entry": { + "type": "object", + "description": "Data for a single notification method.", + "properties": { + "id": { + "$ref": "./shared.json#/$defs/id" + }, + "meta": { + "$ref": "./shared.json#/$defs/instance" + }, + "keywords": { + "$ref": "./shared.json#/$defs/keywords" + }, + "global_setup": { + "type": "object", + "description": "Flags that show which global setup sections are relevant for this notification method.", + "properties": { + "severity_filtering": { + "type": "boolean" + }, + "http_proxy": { + "type": "boolean" + } + }, + "required": [ + "severity_filtering", + "http_proxy" + ] + }, + "setup": { + "oneOf": [ + { + "$ref": "./shared.json#/$defs/short_setup" + }, + { + "$ref": "./shared.json#/$defs/full_setup" + } + ] + }, + "troubleshooting": { + "$ref": "./shared.json#/$defs/troubleshooting" + } + }, + "required": [ + "id", + "meta", + "keywords", + "setup" + ] + } + } +} \ No newline at end of file diff --git a/integrations/schemas/logs.json b/integrations/schemas/logs.json new file mode 100644 index 000000000..8209dc979 --- /dev/null +++ b/integrations/schemas/logs.json @@ -0,0 +1,97 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Netdata Logs integrations metadata.", + "oneOf": [ + { + "$ref": "#/$defs/entry" + }, + { + "type": "array", + "minLength": 1, + "items": { + "$ref": "#/$defs/entry" + } + } + ], + "$defs": { + "entry": { + "type": "object", + "description": "Data for a single logs integration.", + "properties": { + "id": { + "$ref": "./shared.json#/$defs/id" + }, + "meta": { + "$ref": "./shared.json#/$defs/instance" + }, + "keywords": { + "$ref": "./shared.json#/$defs/keywords" + }, + "overview": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "General description of what the integration does." + }, + "visualization": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "How the user can access the data provided by the integration" + } + }, + "required": [ + "description" + ] + }, + "key_features": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "The key features of the integration." + } + }, + "required": [ + "description" + ] + } + }, + "required": [ + "description", + "visualization", + "key_features" + ] + }, + "setup": { + "type": "object", + "properties": { + "prerequisites": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "Prerequisites of getting the integration working. For Log Functions only a Netdata account should be needed." + } + }, + "required": [ + "description" + ] + }, + "required": [ + "prerequisites" + ] + } + } + }, + "required": [ + "id", + "meta", + "keywords", + "overview" + ] + } + } +} \ No newline at end of file diff --git a/integrations/schemas/notification.json b/integrations/schemas/notification.json deleted file mode 100644 index 2596ca441..000000000 --- a/integrations/schemas/notification.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Netdata notification mechanism metadata.", - "oneOf": [ - { - "$ref": "#/$defs/entry" - }, - { - "type": "array", - "minLength": 1, - "items": { - "$ref": "#/$defs/entry" - } - } - ], - "$defs": { - "entry": { - "type": "object", - "description": "Data for a single notification method.", - "properties": { - "id": { - "$ref": "./shared.json#/$defs/id" - }, - "meta": { - "$ref": "./shared.json#/$defs/instance" - }, - "keywords": { - "$ref": "./shared.json#/$defs/keywords" - }, - "overview": { - "type": "object", - "description": "General information about the notification method.", - "properties": { - "notification_description": { - "type": "string", - "description": "General description of what the notification method does." - }, - "notification_limitations": { - "type": "string", - "description": "Explanation of any limitations of the notification method." - } - }, - "required": [ - "notification_description", - "notification_limitations" - ] - }, - "global_setup": { - "type": "object", - "description": "Flags that show which global setup sections are relevant for this notification method.", - "properties": { - "severity_filtering": { - "type": "boolean" - }, - "http_proxy": { - "type": "boolean" - } - }, - "required": [ - "severity_filtering", - "http_proxy" - ] - }, - "setup": { - "oneOf": [ - { - "$ref": "./shared.json#/$defs/short_setup" - }, - { - "$ref": "./shared.json#/$defs/full_setup" - } - ] - }, - "troubleshooting": { - "$ref": "./shared.json#/$defs/troubleshooting" - } - }, - "required": [ - "id", - "meta", - "keywords", - "overview", - "setup" - ] - } - } -} diff --git a/integrations/templates/overview.md b/integrations/templates/overview.md index 3063b6860..d75b32d88 100644 --- a/integrations/templates/overview.md +++ b/integrations/templates/overview.md @@ -2,8 +2,10 @@ [% include 'overview/collector.md' %] [% elif entry.integration_type == 'exporter' %] [% include 'overview/exporter.md' %] -[% elif entry.integration_type == 'notification' %] +[% elif entry.integration_type == 'agent_notification' %] [% include 'overview/notification.md' %] [% elif entry.integration_type == 'authentication' %] [% include 'overview/authentication.md' %] +[% elif entry.integration_type == 'logs' %] +[% include 'overview/logs.md' %] [% endif %] diff --git a/integrations/templates/overview/logs.md b/integrations/templates/overview/logs.md new file mode 100644 index 000000000..2ff0f69a9 --- /dev/null +++ b/integrations/templates/overview/logs.md @@ -0,0 +1,11 @@ +# [[ entry.meta.name ]] + +[[ entry.overview.description ]] + +## Visualization + +[[ entry.overview.visualization.description ]] + +## Key features + +[[ entry.overview.key_features.description ]] \ No newline at end of file diff --git a/integrations/templates/platform_info.md b/integrations/templates/platform_info.md index 7fc0dc790..db9e61f6b 100644 --- a/integrations/templates/platform_info.md +++ b/integrations/templates/platform_info.md @@ -6,6 +6,6 @@ We build native packages for the following releases: [% for e in entries %] | [[ e.version ]] | [[ e.support ]] | [[ ', '.join(e.arches) ]] | [[ e.notes ]] | [% endfor %] -[% endif %] On other releases of this distribution, a static binary will be installed in `/opt/netdata`. +[% endif %] diff --git a/integrations/templates/setup.md b/integrations/templates/setup.md index 1c41e8cf9..2cac9ec7f 100644 --- a/integrations/templates/setup.md +++ b/integrations/templates/setup.md @@ -1,4 +1,14 @@ ## Setup +[% if entry.integration_type == 'logs' %] + +## Prerequisites + +[[ entry.setup.prerequisites.description]] + +## Configuration + +There is no configuration needed for this integration. +[% else %] [% if entry.setup.description %] [[ entry.setup.description ]] @@ -44,7 +54,7 @@ Configuration for this specific integration is located in the `[[ entry.setup.co [% include 'setup/sample-netdata-config.md' %] [% endif %] -You can edit the configuration file using the `edit-config` script from the +You can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash @@ -106,3 +116,4 @@ There are no configuration examples. [% endif %] [% endif %] +[% endif %] \ No newline at end of file diff --git a/integrations/templates/troubleshooting.md b/integrations/templates/troubleshooting.md index 2176dd010..7c72bdde9 100644 --- a/integrations/templates/troubleshooting.md +++ b/integrations/templates/troubleshooting.md @@ -85,13 +85,12 @@ docker logs netdata 2>&1 | grep [[ entry.meta.module_name ]] [% endif %] [% endif %] -[% elif entry.integration_type == 'notification' %] -[% if 'cloud-notifications' in entry._src_path|string %] +[% elif entry.integration_type == 'cloud_notification' %] [% if entry.troubleshooting.problems.list %] ## Troubleshooting [% endif %] -[% else %] +[% elif entry.integration_type == 'agent_notification' %] ## Troubleshooting ### Test Notification @@ -114,7 +113,6 @@ export NETDATA_ALARM_NOTIFY_DEBUG=1 Note that this will test _all_ alert mechanisms for the selected role. -[% endif %] [% elif entry.integration_type == 'exporter' %] [% if entry.troubleshooting.problems.list %] ## Troubleshooting diff --git a/netdata-installer.sh b/netdata-installer.sh index 539348018..9f00b882b 100755 --- a/netdata-installer.sh +++ b/netdata-installer.sh @@ -202,12 +202,9 @@ USAGE: ${PROGRAM} [options] --nightly-channel Use most recent nightly updates instead of GitHub releases. This results in more frequent updates. --disable-ebpf Disable eBPF Kernel plugin. Default: enabled. - --disable-cloud Disable all Netdata Cloud functionality. - --require-cloud Fail the install if it can't build Netdata Cloud support. --force-legacy-cxx Force usage of an older C++ standard to allow building on older systems. This will usually be autodetected. --enable-plugin-freeipmi Enable the FreeIPMI plugin. Default: enable it when libipmimonitoring is available. --disable-plugin-freeipmi Explicitly disable the FreeIPMI plugin. - --disable-https Explicitly disable TLS support. --disable-dbengine Explicitly disable DB engine support. --enable-plugin-go Enable the Go plugin. Default: Enabled when possible. --disable-plugin-go Disable the Go plugin. @@ -256,8 +253,9 @@ LIBS_ARE_HERE=0 NETDATA_ENABLE_ML="" ENABLE_DBENGINE=1 ENABLE_GO=1 +ENABLE_PYTHON=1 +ENABLE_CHARTS=1 ENABLE_H2O=1 -ENABLE_CLOUD=1 FORCE_LEGACY_CXX=0 NETDATA_CMAKE_OPTIONS="${NETDATA_CMAKE_OPTIONS-}" @@ -279,14 +277,16 @@ while [ -n "${1}" ]; do "--enable-plugin-freeipmi") ENABLE_FREEIPMI=1 ;; "--disable-plugin-freeipmi") ENABLE_FREEIPMI=0 ;; "--disable-https") - ENABLE_DBENGINE=0 - ENABLE_H2O=0 - ENABLE_CLOUD=0 + warning "HTTPS cannot be disabled." ;; "--disable-dbengine") ENABLE_DBENGINE=0 ;; "--enable-plugin-go") ENABLE_GO=1 ;; "--disable-plugin-go") ENABLE_GO=0 ;; "--disable-go") ENABLE_GO=0 ;; + "--enable-plugin-python") ENABLE_PYTHON=1 ;; + "--disable-plugin-python") ENABLE_PYTHON=0 ;; + "--enable-plugin-charts") ENABLE_CHARTS=1 ;; + "--disable-plugin-charts") ENABLE_CHARTS=0 ;; "--enable-plugin-nfacct") ENABLE_NFACCT=1 ;; "--disable-plugin-nfacct") ENABLE_NFACCT=0 ;; "--enable-plugin-xenstat") ENABLE_XENSTAT=1 ;; @@ -328,21 +328,9 @@ while [ -n "${1}" ]; do # XXX: No longer supported ;; "--disable-cloud") - if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then - warning "Cloud explicitly enabled, ignoring --disable-cloud." - else - ENABLE_CLOUD=0 - NETDATA_DISABLE_CLOUD=1 - fi - ;; - "--require-cloud") - if [ -n "${NETDATA_DISABLE_CLOUD}" ]; then - warning "Cloud explicitly disabled, ignoring --require-cloud." - else - ENABLE_CLOUD=1 - NETDATA_REQUIRE_CLOUD=1 - fi + warning "Cloud cannot be disabled." ;; + "--require-cloud") ;; "--build-json-c") NETDATA_BUILD_JSON_C=1 ;; @@ -941,11 +929,11 @@ if [ "$(id -u)" -eq 0 ]; then if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin" ]; then run chown "root:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin" - capabilities=1 + capabilities=0 if ! iscontainer && command -v setcap 1> /dev/null 2>&1; then run chmod 0750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin" - if ! run setcap "cap_dac_read_search+epi cap_net_admin+epi cap_net_raw=eip" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin"; then - capabilities=0 + if run setcap "cap_dac_read_search+epi cap_net_admin+epi cap_net_raw=eip" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin"; then + capabilities=1 fi fi diff --git a/netdata.spec.in b/netdata.spec.in index 67d7de4a0..e7cd1c3f5 100644 --- a/netdata.spec.in +++ b/netdata.spec.in @@ -196,6 +196,7 @@ Requires(pre): /usr/sbin/useradd # to support weak dependencies. Explicitly requiring our default plugins # makes it impossible to properly test the packages prior to upload, # so we just skip depending on them on CentOS 7. +Requires: %{name}-dashboard %if 0%{?_have_ebpf} Requires: %{name}-plugin-ebpf = %{version} %endif @@ -269,9 +270,9 @@ BuildRequires: cups-devel # distro handling of versioning). %if %{?_upstream_go_toolchain:0}%{!?_upstream_go_toolchain:1} %if 0%{?suse_version} -BuildRequires: go >= 1.22 +BuildRequires: go >= 1.23 %else -BuildRequires: golang >= 1.22 +BuildRequires: golang >= 1.23 %endif %endif # end - go.d.plugin plugin dependencies @@ -388,14 +389,14 @@ happened, on your systems and applications. %else -DENABLE_EXPORTER_MONGODB=Off \ %endif - -DENABLE_ACLK=On \ - -DENABLE_CLOUD=On \ -DENABLE_DBENGINE=On \ -DENABLE_H2O=On \ -DENABLE_PLUGIN_APPS=On \ -DENABLE_PLUGIN_CGROUP_NETWORK=On \ -DENABLE_PLUGIN_DEBUGFS=On \ -DENABLE_PLUGIN_GO=On \ + -DENABLE_PLUGIN_PYTHON=On \ + -DENABLE_PLUGIN_CHARTS=On \ -DENABLE_PLUGIN_LOCAL_LISTENERS=On \ -DENABLE_PLUGIN_PERF=On \ -DENABLE_PLUGIN_SLABINFO=On \ @@ -551,11 +552,6 @@ rm -rf "${RPM_BUILD_ROOT}" # ndsudo a helper to run privileged commands %attr(4750,root,netdata) %{_libexecdir}/%{name}/plugins.d/ndsudo -# Enforce 0644 for files and 0755 for directories -# for the netdata web directory -%defattr(0644,root,root,0755) -%{_datadir}/%{name}/web - # Enforce 0660 for files and 0770 for directories # for the netdata lib, cache and log dirs %defattr(0660,root,netdata,0770) @@ -565,6 +561,9 @@ rm -rf "${RPM_BUILD_ROOT}" %attr(0770,netdata,netdata) %dir %{_localstatedir}/lib/%{name}/registry %attr(0770,netdata,netdata) %dir %{_localstatedir}/lib/%{name}/cloud.d +# Dashboard belongs to a different sub-package +%exclude %{_datadir}/%{name}/web + # Free IPMI belongs to a different sub-package %if %{_have_freeipmi} %exclude %{_libexecdir}/%{name}/plugins.d/freeipmi.plugin @@ -987,7 +986,27 @@ fi # CAP_SYS_ADMIN, CAP_SYS_PTRACE and CAP_DAC_READ_SEARCH needed for data collection. %caps(cap_sys_admin,cap_sys_ptrace,cap_dac_read_search=ep) %attr(0750,root,netdata) %{_libexecdir}/%{name}/plugins.d/network-viewer.plugin +%package dashboard +Summary: The local dashboard for the Netdata Agent +Group: Applications/System +Requires: %{name} >= %{version} +Conflicts: %{name} < %{version} + +%description dashboard + This allows access to the dashboard on the local node without internet access. + +%pre dashboard +if ! getent group %{name} > /dev/null; then + groupadd --system %{name} +fi + +%files dashboard +%defattr(0644,root,root,0755) +%{_datadir}/%{name}/web + %changelog +* Thu Aug 29 2024 Austin Hemmelgarn +- Split dashboard to it’s own package * Fri Jul 19 2024 Austin Hemmelgarn - Fix dependency issues with old logs-management plugin * Tue Jul 16 2024 Austin Hemmelgarn diff --git a/packaging/PLATFORM_SUPPORT.md b/packaging/PLATFORM_SUPPORT.md index ad1cd4168..8fa2b977f 100644 --- a/packaging/PLATFORM_SUPPORT.md +++ b/packaging/PLATFORM_SUPPORT.md @@ -1,14 +1,3 @@ - - # Platform support policy Netdata defines three tiers of official support: @@ -30,13 +19,13 @@ Any platforms not listed in any of these categories may or may not work. The following table shows a general outline of the various support tiers and categories. -| | Bug Support | Guaranteed Configurations | CI Coverage | Native Packages | Static Build Support | -| - | ----------- | ------------------------- | ----------- | --------------- | -------------------- | -| Core | High priority | Everything but rare edge cases | Full | Yes, if we can provide them | Full | -| Intermediate | Normal priority | Common cases | Partial (CI mostly equivalent to **Core**, but possibly with some gaps, and not required to pass) | Possibly | Full | -| Community | Best Effort | Default only | None | No | Best Effort | -| Third-party Supported | Users directed to platform maintainers | None | None | No | Best Effort | -| Previously Supported | Users asked to upgrade | None | None | Yes, but only already published versions | Best Effort | +| | Bug Support | Guaranteed Configurations | CI Coverage | Native Packages | Static Build Support | +|-----------------------|----------------------------------------|--------------------------------|---------------------------------------------------------------------------------------------------|------------------------------------------|----------------------| +| Core | High priority | Everything but rare edge cases | Full | Yes, if we can provide them | Full | +| Intermediate | Normal priority | Common cases | Partial (CI mostly equivalent to **Core**, but possibly with some gaps, and not required to pass) | Possibly | Full | +| Community | Best Effort | Default only | None | No | Best Effort | +| Third-party Supported | Users directed to platform maintainers | None | None | No | Best Effort | +| Previously Supported | Users asked to upgrade | None | None | Yes, but only already published versions | Best Effort | - ‘Bug Support’: How we handle of platform-specific bugs. - ‘Guaranteed Configurations’: Which runtime configurations for the agent we try to guarantee will work with minimal @@ -56,30 +45,29 @@ Our [static builds](#static-builds) are expected to work on these platforms if a expected to work on these platforms with minimal user effort. -| Platform | Version | Official Native Packages | Notes | -|--------------------------|----------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------| -| Alpine Linux | 3.18 | No | The latest release of Alpine Linux is guaranteed to remain at **Core** tier due to usage for our Docker images | -| Alma Linux | 9.x | x86\_64, AArch64 | Also includes support for Rocky Linux and other ABI compatible RHEL derivatives | -| Alma Linux | 8.x | x86\_64, AArch64 | Also includes support for Rocky Linux and other ABI compatible RHEL derivatives | -| Amazon Linux | 2023 | x86\_64, AArch64 | | -| Amazon Linux | 2 | x86\_64, AArch64 | | -| CentOS | 7.x | x86\_64 | | -| Docker | 19.03 or newer | x86\_64, i386, ARMv7, AArch64, POWER8+ | See our [Docker documentation](/packaging/docker/README.md) for more info on using Netdata on Docker | -| Debian | 12.x | x86\_64, i386, ARMv7, AArch64 | | -| Debian | 11.x | x86\_64, i386, ARMv7, AArch64 | | -| Fedora | 40 | x86\_64, AArch64 | | -| Fedora | 39 | x86\_64, AArch64 | | -| openSUSE | Leap 15.5 | x86\_64, AArch64 | | -| openSUSE | Leap 15.4 | x86\_64, AArch64 | | -| Oracle Linux | 9.x | x86\_64, AArch64 | | -| Oracle Linux | 8.x | x86\_64, AArch64 | | -| Red Hat Enterprise Linux | 9.x | x86\_64, AArch64 | | -| Red Hat Enterprise Linux | 8.x | x86\_64, AArch64 | | -| Red Hat Enterprise Linux | 7.x | x86\_64 | | -| Ubuntu | 24.04 | x86\_64, AArch64, ARMv7 | | -| Ubuntu | 22.04 | x86\_64, ARMv7, AArch64 | | -| Ubuntu | 20.04 | x86\_64, ARMv7, AArch64 | | - +| Platform | Version | Official Native Packages | Notes | +|--------------------------|----------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------| +| Alpine Linux | 3.18 | No | The latest release of Alpine Linux is guaranteed to remain at **Core** tier due to usage for our Docker images | +| Alma Linux | 9.x | x86\_64, AArch64 | Also includes support for Rocky Linux and other ABI compatible RHEL derivatives | +| Alma Linux | 8.x | x86\_64, AArch64 | Also includes support for Rocky Linux and other ABI compatible RHEL derivatives | +| Amazon Linux | 2023 | x86\_64, AArch64 | | +| Amazon Linux | 2 | x86\_64, AArch64 | | +| CentOS | 7.x | x86\_64 | | +| Docker | 19.03 or newer | x86\_64, i386, ARMv7, AArch64, POWER8+ | See our [Docker documentation](/packaging/docker/README.md) for more info on using Netdata on Docker | +| Debian | 12.x | x86\_64, i386, ARMv7, AArch64 | | +| Debian | 11.x | x86\_64, i386, ARMv7, AArch64 | | +| Fedora | 40 | x86\_64, AArch64 | | +| Fedora | 39 | x86\_64, AArch64 | | +| openSUSE | Leap 15.5 | x86\_64, AArch64 | | +| openSUSE | Leap 15.4 | x86\_64, AArch64 | | +| Oracle Linux | 9.x | x86\_64, AArch64 | | +| Oracle Linux | 8.x | x86\_64, AArch64 | | +| Red Hat Enterprise Linux | 9.x | x86\_64, AArch64 | | +| Red Hat Enterprise Linux | 8.x | x86\_64, AArch64 | | +| Red Hat Enterprise Linux | 7.x | x86\_64 | | +| Ubuntu | 24.04 | x86\_64, AArch64, ARMv7 | | +| Ubuntu | 22.04 | x86\_64, ARMv7, AArch64 | | +| Ubuntu | 20.04 | x86\_64, ARMv7, AArch64 | | ### Intermediate @@ -106,16 +94,16 @@ to add support for a new platform, that platform generally will start in this ti are expected to work on these platforms if available. Source-based installs are usually expected to work on these platforms, but may require some extra effort from users. -| Platform | Version | Official Native Packages | Notes | -|--------------|------------|--------------------------|-----------------------------------------------------------------------------------------------------------| -| Clear Linux | Latest | No | | -| Debian | Sid | No | | -| Fedora | Rawhide | No | | -| FreeBSD | 13-STABLE | No | Netdata is included in the FreeBSD Ports Tree, and this is the recommended installation method on FreeBSD | -| Gentoo | Latest | No | | -| macOS | 13 | No | Currently only works for Intel-based hardware. Requires Homebrew for dependencies | -| macOS | 12 | No | Currently only works for Intel-based hardware. Requires Homebrew for dependencies | -| macOS | 11 | No | Currently only works for Intel-based hardware. Requires Homebrew for dependencies. | +| Platform | Version | Official Native Packages | Notes | +|-------------|-----------|--------------------------|-----------------------------------------------------------------------------------------------------------| +| Clear Linux | Latest | No | | +| Debian | Sid | No | | +| Fedora | Rawhide | No | | +| FreeBSD | 13-STABLE | No | Netdata is included in the FreeBSD Ports Tree, and this is the recommended installation method on FreeBSD | +| Gentoo | Latest | No | | +| macOS | 13 | No | Currently only works for Intel-based hardware. Requires Homebrew for dependencies | +| macOS | 12 | No | Currently only works for Intel-based hardware. Requires Homebrew for dependencies | +| macOS | 11 | No | Currently only works for Intel-based hardware. Requires Homebrew for dependencies. | ## Third-party supported platforms diff --git a/packaging/VERSIONING_AND_PUBLIC_API.md b/packaging/VERSIONING_AND_PUBLIC_API.md index dc0a5def5..ce672a6fc 100644 --- a/packaging/VERSIONING_AND_PUBLIC_API.md +++ b/packaging/VERSIONING_AND_PUBLIC_API.md @@ -36,7 +36,7 @@ will be as up-to-date as possible. Versions for nightly builds of the Netdata Agent consist of four parts, a major version, a minor version, a revision number, and an optional commit ID, presented like `..0--`. For example, a version of `1.43.0-11-gb15437502` has a major version of 1, a minor version of 43, a revision of 11, and a commit ID of -`gb15437502`. A commit ID consists of a lowercaase letter `g`, followed by the short commit hash for the corresponding +`gb15437502`. A commit ID consists of a lowercase letter `g`, followed by the short commit hash for the corresponding commit. If the commit ID is not included, it may be replaced by the word ‘nightly’. The major and minor version numbers for a nightly build correspond exactly to an associated stable release. A @@ -80,9 +80,9 @@ notes at least one minor release prior to being merged: - All mandatory build dependencies which are not vendored in the Netdata Agent code. This includes, but is not limited to: - - The underlying build system (such as autotools or CMake). - - Primary library dependencies (such as libuv). - - Any external tooling that is required at build time. + - The underlying build system (such as autotools or CMake). + - Primary library dependencies (such as libuv). + - Any external tooling that is required at build time. - The REST API provided by the Netdata Agent’s internal web server, accessible via the `/api` endpoint. This does not extend to the charts, labels, or other system-specific data returned by some API endpoints. - The protocol used for streaming and replicating data between Netdata Agents. @@ -138,7 +138,7 @@ Any components which are not explicitly listed above as being part of the public API. This includes, but is not limited to: - Any mandatory build components which are vendored as part of the Netdata sources, such as SQLite3 or libJudy. This - extends to both the presence or abscence of such components, as well as the exact version being bundled. + extends to both the presence or absence of such components, as well as the exact version being bundled. - The exact installation mechanism that will be used on any given system when using our `kickstart.sh` installation script. - The exact underlying implementation of any data collection plugin. diff --git a/packaging/build-package.sh b/packaging/build-package.sh index 453e167f4..a4feb59b9 100755 --- a/packaging/build-package.sh +++ b/packaging/build-package.sh @@ -26,8 +26,7 @@ add_cmake_option() { add_cmake_option CMAKE_BUILD_TYPE RelWithDebInfo add_cmake_option CMAKE_INSTALL_PREFIX / -add_cmake_option ENABLE_ACLK On -add_cmake_option ENABLE_CLOUD On +add_cmake_option ENABLE_DASHBOARD on add_cmake_option ENABLE_DBENGINE On add_cmake_option ENABLE_H2O On add_cmake_option ENABLE_ML On @@ -37,6 +36,8 @@ add_cmake_option ENABLE_PLUGIN_CGROUP_NETWORK On add_cmake_option ENABLE_PLUGIN_DEBUGFS On add_cmake_option ENABLE_PLUGIN_FREEIPMI On add_cmake_option ENABLE_PLUGIN_GO On +add_cmake_option ENABLE_PLUGIN_PYTHON On +add_cmake_option ENABLE_PLUGIN_CHARTS On add_cmake_option ENABLE_PLUGIN_LOCAL_LISTENERS On add_cmake_option ENABLE_PLUGIN_NFACCT On add_cmake_option ENABLE_PLUGIN_PERF On diff --git a/packaging/building-native-packages-locally.md b/packaging/building-native-packages-locally.md index 6ad1d6045..0c84f505d 100644 --- a/packaging/building-native-packages-locally.md +++ b/packaging/building-native-packages-locally.md @@ -39,16 +39,16 @@ inspect the state of the container and look at build logs. ### Detailed explanation -The environments used for building our packages are fully self-contianed Docker images built from [Dockerfiles](https://github.com/netdata/helper-images/tree/master/package-builders) +The environments used for building our packages are fully self-contained Docker images built from [Dockerfiles](https://github.com/netdata/helper-images/tree/master/package-builders) These are published on Docker Hub with the image name `netdata/package-builders`, and tagged using the name and version of the distribution (with the tag corresponding to the suffix on the associated Dockerfile). The build code expects the following requirements to be met: -- It expects the source tree it should build from to be located at `/netdata`, and expects that said source tree +* It expects the source tree it should build from to be located at `/netdata`, and expects that said source tree is clean (no artifacts left over from previous builds). -- It expects an environment variable named `VERSION` to be defined, and uses this to control what version number +* It expects an environment variable named `VERSION` to be defined, and uses this to control what version number will be shown in the package metadata and filenames. Internally, the source tree gets copied to a temporary location for the build process so that the source tree can @@ -95,7 +95,7 @@ Once you have that set up, the command to build the packages is the same as abov `--platform` option to the `docker run` or `podman run` command. The current list of architectures we build for, and the correct value for the `--platform` option is: -- 32-bit ARMv7: `linux/arm/v7` -- 64-bit ARMv8: `linux/arm64/v8` -- 32-bit x86: `linux/i386` -- 64-bit x86: `linux/amd64` +* 32-bit ARMv7: `linux/arm/v7` +* 64-bit ARMv8: `linux/arm64/v8` +* 32-bit x86: `linux/i386` +* 64-bit x86: `linux/amd64` diff --git a/packaging/check-for-go-toolchain.sh b/packaging/check-for-go-toolchain.sh index fe5dabfad..2c6298969 100644 --- a/packaging/check-for-go-toolchain.sh +++ b/packaging/check-for-go-toolchain.sh @@ -14,163 +14,163 @@ # GOLANG_FAILURE_REASON set to an error message indicating what went wrong. GOLANG_MIN_MAJOR_VERSION='1' -GOLANG_MIN_MINOR_VERSION='22' -GOLANG_MIN_PATCH_VERSION='0' +GOLANG_MIN_MINOR_VERSION='23' +GOLANG_MIN_PATCH_VERSION='3' GOLANG_MIN_VERSION="${GOLANG_MIN_MAJOR_VERSION}.${GOLANG_MIN_MINOR_VERSION}.${GOLANG_MIN_PATCH_VERSION}" GOLANG_TEMP_PATH="${TMPDIR}/go-toolchain" check_go_version() { - version="$("${go}" version | awk '{ print $3 }' | sed 's/^go//')" - version_major="$(echo "${version}" | cut -f 1 -d '.')" - version_minor="$(echo "${version}" | cut -f 2 -d '.')" - version_patch="$(echo "${version}" | cut -f 3 -d '.')" - - if [ -z "${version_major}" ] || [ "${version_major}" -lt "${GOLANG_MIN_MAJOR_VERSION}" ]; then - return 1 - elif [ "${version_major}" -gt "${GOLANG_MIN_MAJOR_VERSION}" ]; then - return 0 - fi - - if [ -z "${version_minor}" ] || [ "${version_minor}" -lt "${GOLANG_MIN_MINOR_VERSION}" ]; then - return 1 - elif [ "${version_minor}" -gt "${GOLANG_MIN_MINOR_VERSION}" ]; then - return 0 - fi + version="$("${go}" version | awk '{ print $3 }' | sed 's/^go//')" + version_major="$(echo "${version}" | cut -f 1 -d '.')" + version_minor="$(echo "${version}" | cut -f 2 -d '.')" + version_patch="$(echo "${version}" | cut -f 3 -d '.')" - if [ -n "${version_patch}" ] && [ "${version_patch}" -ge "${GOLANG_MIN_PATCH_VERSION}" ]; then - return 0 - fi + if [ -z "${version_major}" ] || [ "${version_major}" -lt "${GOLANG_MIN_MAJOR_VERSION}" ]; then + return 1 + elif [ "${version_major}" -gt "${GOLANG_MIN_MAJOR_VERSION}" ]; then + return 0 + fi + if [ -z "${version_minor}" ] || [ "${version_minor}" -lt "${GOLANG_MIN_MINOR_VERSION}" ]; then return 1 + elif [ "${version_minor}" -gt "${GOLANG_MIN_MINOR_VERSION}" ]; then + return 0 + fi + + if [ -n "${version_patch}" ] && [ "${version_patch}" -ge "${GOLANG_MIN_PATCH_VERSION}" ]; then + return 0 + fi + + return 1 } install_go_toolchain() { - GOLANG_ARCHIVE_NAME="${GOLANG_TEMP_PATH}/golang.tar.gz" - GOLANG_CHECKSUM_FILE="${GOLANG_TEMP_PATH}/golang.sha256sums" - - case "$(uname -s)" in - Linux) - case "$(uname -m)" in - i?86) - GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-386.tar.gz" - GOLANG_ARCHIVE_CHECKSUM="1e209c4abde069067ac9afb341c8003db6a210f8173c77777f02d3a524313da3" - ;; - x86_64) - GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-amd64.tar.gz" - GOLANG_ARCHIVE_CHECKSUM="f6c8a87aa03b92c4b0bf3d558e28ea03006eb29db78917daec5cfb6ec1046265" - ;; - aarch64) - GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-arm64.tar.gz" - GOLANG_ARCHIVE_CHECKSUM="6a63fef0e050146f275bf02a0896badfe77c11b6f05499bb647e7bd613a45a10" - ;; - armv*) - GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-armv6l.tar.gz" - GOLANG_ARCHIVE_CHECKSUM="0525f92f79df7ed5877147bce7b955f159f3962711b69faac66bc7121d36dcc4" - ;; - ppc64le) - GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-ppc64le.tar.gz" - GOLANG_ARCHIVE_CHECKSUM="0e57f421df9449066f00155ce98a5be93744b3d81b00ee4c2c9b511be2a31d93" - ;; - riscv64) - GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-riscv64.tar.gz" - GOLANG_ARCHIVE_CHECKSUM="afe9cedcdbd6fdff27c57efd30aa5ce0f666f471fed5fa96cd4fb38d6b577086" - ;; - s390x) - GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.linux-s390x.tar.gz" - GOLANG_ARCHIVE_CHECKSUM="2e546a3583ba7bd3988f8f476245698f6a93dfa9fe206a8ca8f85c1ceecb2446" - ;; - *) - GOLANG_FAILURE_REASON="Linux $(uname -m) platform is not supported out-of-box by Go, you must install a toolchain for it yourself." - return 1 - ;; - esac - ;; - FreeBSD) - case "$(uname -m)" in - 386) - GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.freebsd-386.tar.gz" - GOLANG_ARCHIVE_CHECKSUM="b8065da37783e8b9e7086365a54d74537e832c92311b61101a66989ab2458d8e" - ;; - amd64) - GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.freebsd-amd64.tar.gz" - GOLANG_ARCHIVE_CHECKSUM="50f421c7f217083ac94aab1e09400cb9c2fea7d337679ec11f1638a11460da30" - ;; - arm) - GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.freebsd-arm.tar.gz" - GOLANG_ARCHIVE_CHECKSUM="c9c8b305f90903536f4981bad9f029828c2483b3216ca1783777344fbe603f2d" - ;; - arm64) - GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.freebsd-arm64.tar.gz" - GOLANG_ARCHIVE_CHECKSUM="e23385e5c640787fa02cd58f2301ea09e162c4d99f8ca9fa6d52766f428a933d" - ;; - riscv64) - GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.22.0.freebsd-riscv64.tar.gz" - GOLANG_ARCHIVE_CHECKSUM="c8f94d1de6024546194d58e7b9370dc7ea06176aad94a675b0062c25c40cb645" - ;; - *) - GOLANG_FAILURE_REASON="FreeBSD $(uname -m) platform is not supported out-of-box by Go, you must install a toolchain for it yourself." - return 1 - ;; - esac - ;; + GOLANG_ARCHIVE_NAME="${GOLANG_TEMP_PATH}/golang.tar.gz" + GOLANG_CHECKSUM_FILE="${GOLANG_TEMP_PATH}/golang.sha256sums" + + case "$(uname -s)" in + Linux) + case "$(uname -m)" in + i?86) + GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.23.3.linux-386.tar.gz" + GOLANG_ARCHIVE_CHECKSUM="0c8e9f824bf443f51e06ac017b9ae402ea066d761b309d880dbb2ca5793db8a2" + ;; + x86_64) + GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.23.3.linux-amd64.tar.gz" + GOLANG_ARCHIVE_CHECKSUM="a0afb9744c00648bafb1b90b4aba5bdb86f424f02f9275399ce0c20b93a2c3a8" + ;; + aarch64) + GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.23.3.linux-arm64.tar.gz" + GOLANG_ARCHIVE_CHECKSUM="1f7cbd7f668ea32a107ecd41b6488aaee1f5d77a66efd885b175494439d4e1ce" + ;; + armv*) + GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.23.3.linux-armv6l.tar.gz" + GOLANG_ARCHIVE_CHECKSUM="5f0332754beffc65af65a7b2da76e9dd997567d0d81b6f4f71d3588dc7b4cb00" + ;; + ppc64le) + GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.23.3.linux-ppc64le.tar.gz" + GOLANG_ARCHIVE_CHECKSUM="e3b926c81e8099d3cee6e6e270b85b39c3bd44263f8d3df29aacb4d7e00507c8" + ;; + riscv64) + GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.23.3.linux-riscv64.tar.gz" + GOLANG_ARCHIVE_CHECKSUM="324e03b6f59be841dfbaeabc466224b0f0905f5ad3a225b7c0703090e6c4b1a5" + ;; + s390x) + GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.23.3.linux-s390x.tar.gz" + GOLANG_ARCHIVE_CHECKSUM="6bd72fcef72b046b6282c2d1f2c38f31600e4fe9361fcd8341500c754fb09c38" + ;; + *) + GOLANG_FAILURE_REASON="Linux $(uname -m) platform is not supported out-of-box by Go, you must install a toolchain for it yourself." + return 1 + ;; + esac + ;; + FreeBSD) + case "$(uname -m)" in + 386) + GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.23.3.freebsd-386.tar.gz" + GOLANG_ARCHIVE_CHECKSUM="69479fa016ec5b4605885643ce0c2dd5c583e02353978feb6de38c961863b9cc" + ;; + amd64) + GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.23.3.freebsd-amd64.tar.gz" + GOLANG_ARCHIVE_CHECKSUM="bf1de22a900646ef4f79480ed88337856d47089cc610f87e6fef46f6b8db0e1f" + ;; + arm) + GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.23.3.freebsd-arm.tar.gz" + GOLANG_ARCHIVE_CHECKSUM="e461f866479bc36bdd4cfec32bfecb1bb243152268a1b3223de109410dec3407" + ;; + arm64) + GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.23.3.freebsd-arm64.tar.gz" + GOLANG_ARCHIVE_CHECKSUM="24154b4018a45540aefeb6b5b9ffdcc8d9a8cdb78cd7fec262787b89fed19997" + ;; + riscv64) + GOLANG_ARCHIVE_URL="https://go.dev/dl/go1.23.3.freebsd-riscv64.tar.gz" + GOLANG_ARCHIVE_CHECKSUM="218f3f1532e61dd65c330c2a5fc85bec18cc3690489763e62ffa9bb9fc85a68e" + ;; *) - GOLANG_FAILURE_REASON="We do not support automatic handling of a Go toolchain on this system, you must install one manually." - return 1 - ;; - esac - - if [ -d '/usr/local/go' ]; then - if [ -f '/usr/local/go/.installed-by-netdata' ]; then - rm -rf /usr/local/go - else - GOLANG_FAILURE_REASON="Refusing to overwrite existing Go toolchain install at /usr/local/go, it needs to be updated manually." - return 1 - fi + GOLANG_FAILURE_REASON="FreeBSD $(uname -m) platform is not supported out-of-box by Go, you must install a toolchain for it yourself." + return 1 + ;; + esac + ;; + *) + GOLANG_FAILURE_REASON="We do not support automatic handling of a Go toolchain on this system, you must install one manually." + return 1 + ;; + esac + + if [ -d '/usr/local/go' ]; then + if [ -f '/usr/local/go/.installed-by-netdata' ]; then + rm -rf /usr/local/go + else + GOLANG_FAILURE_REASON="Refusing to overwrite existing Go toolchain install at /usr/local/go, it needs to be updated manually." + return 1 fi + fi - mkdir -p "${GOLANG_TEMP_PATH}" + mkdir -p "${GOLANG_TEMP_PATH}" - if ! curl --fail -q -sSL --connect-timeout 10 --retry 3 --output "${GOLANG_ARCHIVE_NAME}" "${GOLANG_ARCHIVE_URL}"; then - GOLANG_FAILURE_REASON="Failed to download Go toolchain." - return 1 - fi + if ! curl --fail -q -sSL --connect-timeout 10 --retry 3 --output "${GOLANG_ARCHIVE_NAME}" "${GOLANG_ARCHIVE_URL}"; then + GOLANG_FAILURE_REASON="Failed to download Go toolchain." + return 1 + fi - echo "${GOLANG_ARCHIVE_CHECKSUM} ${GOLANG_ARCHIVE_NAME}" > "${GOLANG_CHECKSUM_FILE}" + echo "${GOLANG_ARCHIVE_CHECKSUM} ${GOLANG_ARCHIVE_NAME}" >"${GOLANG_CHECKSUM_FILE}" - if ! sha256sum -c "${GOLANG_CHECKSUM_FILE}"; then - GOLANG_FAILURE_REASON="Invalid checksum for downloaded Go toolchain." - return 1 - fi + if ! sha256sum -c "${GOLANG_CHECKSUM_FILE}"; then + GOLANG_FAILURE_REASON="Invalid checksum for downloaded Go toolchain." + return 1 + fi - if ! tar -C /usr/local/ -xzf "${GOLANG_ARCHIVE_NAME}"; then - GOLANG_FAILURE_REASON="Failed to extract Go toolchain." - return 1 - fi + if ! tar -C /usr/local/ -xzf "${GOLANG_ARCHIVE_NAME}"; then + GOLANG_FAILURE_REASON="Failed to extract Go toolchain." + return 1 + fi - touch /usr/local/go/.installed-by-netdata + touch /usr/local/go/.installed-by-netdata - rm -rf "${GOLANG_TEMP_PATH}" + rm -rf "${GOLANG_TEMP_PATH}" } ensure_go_toolchain() { - go="$(PATH="/usr/local/go/bin:${PATH}" command -v go 2>/dev/null)" - - need_go_install=0 + go="$(PATH="/usr/local/go/bin:${PATH}" command -v go 2>/dev/null)" - if [ -z "${go}" ]; then - need_go_install=1 - elif ! check_go_version; then - need_go_install=1 - fi + need_go_install=0 - if [ "${need_go_install}" -eq 1 ]; then - if ! install_go_toolchain; then - return 1 - fi + if [ -z "${go}" ]; then + need_go_install=1 + elif ! check_go_version; then + need_go_install=1 + fi - rm -rf "${GOLANG_TEMP_PATH}" || true + if [ "${need_go_install}" -eq 1 ]; then + if ! install_go_toolchain; then + return 1 fi - return 0 + rm -rf "${GOLANG_TEMP_PATH}" || true + fi + + return 0 } diff --git a/packaging/cmake/Modules/NetdataDashboard.cmake b/packaging/cmake/Modules/NetdataDashboard.cmake new file mode 100644 index 000000000..070ce18e8 --- /dev/null +++ b/packaging/cmake/Modules/NetdataDashboard.cmake @@ -0,0 +1,85 @@ +# CMake module to handle fetching and installing the dashboard code +# +# Copyright (c) 2024 Netdata Inc. +# SPDX-License-Identifier: GPL-3.0-or-later + +include(NetdataUtil) + +function(handle_braindead_versioning_insanity prefix) + if(IS_DIRECTORY "${prefix}/v2" AND NOT IS_DIRECTORY "${prefix}/v3") + message(STATUS " Fixing incorrectly versioned paths generated by poorly written CI") + file(RENAME "${prefix}/v2" "${prefix}/v3") + + if(IS_DIRECTORY "${prefix}/v3" AND NOT IS_DIRECTORY "${prefix}/v2") + message(STATUS " Fixing incorrectly versioned paths generated by poorly written CI -- Done") + else() + message(FATAL_ERROR "Failed to fix incorrectly versioned paths") + endif() + endif() +endfunction() + +# Bundle the dashboard code for inclusion during install. +# +# This is unfortunately complicated due to how we need to handle the +# generation of the CMakeLists file for the dashboard code. +function(bundle_dashboard) + include(ExternalProject) + + set(dashboard_src_dir "${CMAKE_BINARY_DIR}/dashboard-src") + set(dashboard_src_prefix "${dashboard_src_dir}/dist/agent") + set(dashboard_bin_dir "${CMAKE_BINARY_DIR}/dashboard-bin") + set(DASHBOARD_URL "https://app.netdata.cloud/agent.tar.gz" CACHE STRING + "URL used to fetch the local agent dashboard code") + + message(STATUS "Preparing local agent dashboard code") + + message(STATUS " Fetching ${DASHBOARD_URL}") + file(DOWNLOAD + "${DASHBOARD_URL}" + "${CMAKE_BINARY_DIR}/dashboard.tar.gz" + TIMEOUT 180 + STATUS fetch_status) + + list(GET fetch_status 0 result) + + if(result) + message(FATAL_ERROR "Failed to fetch dashboard code") + else() + message(STATUS " Fetching ${DASHBOARD_URL} -- Done") + endif() + + message(STATUS " Extracting dashboard code") + extract_gzipped_tarball( + "${CMAKE_BINARY_DIR}/dashboard.tar.gz" + "${dashboard_src_dir}" + ) + message(STATUS " Extracting dashboard code -- Done") + + handle_braindead_versioning_insanity("${dashboard_src_prefix}") + + message(STATUS " Generating CMakeLists.txt file for dashboard code") + set(rules "") + + subdirlist(dash_dirs "${dashboard_src_prefix}") + + foreach(dir IN LISTS dash_dirs) + file(GLOB files + LIST_DIRECTORIES FALSE + RELATIVE "${dashboard_src_dir}" + "${dashboard_src_prefix}/${dir}/*") + + set(rules "${rules}install(FILES ${files} COMPONENT dashboard DESTINATION ${WEB_DEST}/${dir})\n") + endforeach() + + file(GLOB files + LIST_DIRECTORIES FALSE + RELATIVE "${dashboard_src_dir}" + "${dashboard_src_prefix}/*") + + set(rules "${rules}install(FILES ${files} COMPONENT dashboard DESTINATION ${WEB_DEST})\n") + + file(WRITE "${dashboard_src_dir}/CMakeLists.txt" "${rules}") + message(STATUS " Generating CMakeLists.txt file for dashboard code -- Done") + add_subdirectory("${dashboard_src_dir}" "${dashboard_bin_dir}") + message(STATUS "Preparing local agent dashboard code -- Done") +endfunction() diff --git a/packaging/cmake/Modules/NetdataEBPFCORE.cmake b/packaging/cmake/Modules/NetdataEBPFCORE.cmake index f4c918bfe..bf98ce581 100644 --- a/packaging/cmake/Modules/NetdataEBPFCORE.cmake +++ b/packaging/cmake/Modules/NetdataEBPFCORE.cmake @@ -11,8 +11,8 @@ set(ebpf-co-re_SOURCE_DIR "${CMAKE_BINARY_DIR}/ebpf-co-re") function(netdata_fetch_ebpf_co_re) ExternalProject_Add( ebpf-co-re - URL https://github.com/netdata/ebpf-co-re/releases/download/v1.4.5.1/netdata-ebpf-co-re-glibc-v1.4.5.1.tar.xz - URL_HASH SHA256=10d49602c873932a4e0a3717a4af2137434b480d0170c2fb000ec70ae02f6e30 + URL https://github.com/netdata/ebpf-co-re/releases/download/v1.5.0/netdata-ebpf-co-re-glibc-v1.5.0.tar.xz + URL_HASH SHA256=9585a5a48853f70efa51c48f57df34b4e47b1af56eaaef731f57525ebd76b90c SOURCE_DIR "${ebpf-co-re_SOURCE_DIR}" CONFIGURE_COMMAND "" BUILD_COMMAND "" diff --git a/packaging/cmake/Modules/NetdataEBPFLegacy.cmake b/packaging/cmake/Modules/NetdataEBPFLegacy.cmake index 12dfce486..251ef464c 100644 --- a/packaging/cmake/Modules/NetdataEBPFLegacy.cmake +++ b/packaging/cmake/Modules/NetdataEBPFLegacy.cmake @@ -20,19 +20,19 @@ function(netdata_fetch_legacy_ebpf_code) endif() if(need_static) - set(_hash 1c0c8f1177514e9e21a23c28841406595e57b7cfacd93746ff2d6b25987b94a6) + set(_hash ca0c6186b5c9c4640f8ba13ea375b66882203e8ca831a2e6e5e4e039b37f277a) set(_libc "static") elseif(_libc STREQUAL "glibc") - set(_hash e365a76a2bb25190f1d91e4dea2cfc5ff5db63b5238fbfbc89f72755cf85a12c) + set(_hash d3027685e15fdb6406fd36faf45287d79534d40601d388aca5ab87e90959846d) elseif(_libc STREQUAL "musl") - set(_hash ec14dcdfa29d4fba1cea6763740b9d37683515bde88a1a29b6e7c70ce01a604d) + set(_hash e2268ca1fa012e87d4d3d588e01b3a2389ad8a4b1c878508f6226ce1dad34acf) else() message(FATAL_ERROR "Could not determine libc implementation, unable to install eBPF legacy code.") endif() ExternalProject_Add( ebpf-code-legacy - URL https://github.com/netdata/kernel-collector/releases/download/v1.4.5.1/netdata-kernel-collector-${_libc}-v1.4.5.1.tar.xz + URL https://github.com/netdata/kernel-collector/releases/download/v1.5.0/netdata-kernel-collector-${_libc}-v1.5.0.tar.xz URL_HASH SHA256=${_hash} SOURCE_DIR "${ebpf-legacy_SOURCE_DIR}" CONFIGURE_COMMAND "" diff --git a/packaging/cmake/Modules/NetdataJSONC.cmake b/packaging/cmake/Modules/NetdataJSONC.cmake index 89ec70265..db18c14b2 100644 --- a/packaging/cmake/Modules/NetdataJSONC.cmake +++ b/packaging/cmake/Modules/NetdataJSONC.cmake @@ -71,7 +71,7 @@ endfunction() # NETDATA_JSONC_* variables for later use. macro(netdata_detect_jsonc) if(NOT ENABLE_BUNDLED_JSONC) - pkg_check_modules(JSONC json-c) + pkg_check_modules(JSONC json-c>=0.14) endif() if(NOT JSONC_FOUND) diff --git a/packaging/cmake/Modules/NetdataLibBPF.cmake b/packaging/cmake/Modules/NetdataLibBPF.cmake index 9c3bf6d2f..bc41e5336 100644 --- a/packaging/cmake/Modules/NetdataLibBPF.cmake +++ b/packaging/cmake/Modules/NetdataLibBPF.cmake @@ -31,7 +31,7 @@ function(netdata_bundle_libbpf) if(USE_LEGACY_LIBBPF) set(_libbpf_tag 673424c56127bb556e64095f41fd60c26f9083ec) # v0.0.9_netdata-1 else() - set(_libbpf_tag 6923eb970e22682eaedff79f5be4f9934b99cf50) # v1.4.5p_netdata + set(_libbpf_tag ad7c3a4266bf5ce301a5691eb7b405dbb27c7f3d) # v1.5.0p_netdata endif() if(DEFINED BUILD_SHARED_LIBS) diff --git a/packaging/cmake/Modules/NetdataUtil.cmake b/packaging/cmake/Modules/NetdataUtil.cmake index c6a13309f..0c1d803ea 100644 --- a/packaging/cmake/Modules/NetdataUtil.cmake +++ b/packaging/cmake/Modules/NetdataUtil.cmake @@ -144,3 +144,78 @@ function(netdata_identify_libc _libc_name) set(${_libc_name} ${_ND_DETECTED_LIBC} PARENT_SCOPE) endif() endfunction() + +# Extract a tar archive. +# +# This will use CMake’s native support if available, but will still +# fall back cleanly if CMake is too old. +function(extract_gzipped_tarball tarball target) + if(CMAKE_VERSION VERSION_LESS 3.18) + find_program(TAR NAMES tar bsdtar DOC "TAR archive program") + + if(TAR STREQUAL "TAR-NOTFOUND") + message(FATAL_ERROR "Unable to find tar command") + endif() + + find_program(GZIP NAMES gzip DOC "GZIP compression program") + + if(GZIP STREQUAL "GZIP-NOTFOUND") + message(FATAL_ERROR "Unable to find gzip command") + endif() + + file(MAKE_DIRECTORY "${target}") + execute_process(COMMAND tar -x -z -f "${tarball}" -C "${target}" + RESULT_VARIABLE result) + + if(result) + message(FATAL_ERROR "Failed to extract ${tarball}") + endif() + else() + file(ARCHIVE_EXTRACT + INPUT "${tarball}" + DESTINATION "${target}") + endif() +endfunction() + +# Get a recursive list of all sub-directories of the specified directory, +# relative to that directory. +function(subdirlist result curdir) + file(GLOB_RECURSE children + LIST_DIRECTORIES TRUE + RELATIVE ${curdir} + ${curdir}/*) + + set(dirlist "") + + foreach(child ${children}) + if(IS_DIRECTORY ${curdir}/${child}) + list(APPEND dirlist ${child}) + endif() + endforeach() + + set(${result} ${dirlist} PARENT_SCOPE) +endfunction() + +# Precompile python code in the specified directory relative to the +# CMake install prefix at install time. +# This must be called _after_ the install directive for the python code +# in the specified directory +function(precompile_python dir component) + find_package(Python3) + + if(NOT ${Python3_Interpreter_FOUND}) + message(STATUS "Could not find Python3, skipping precompilation of Python code.") + return() + endif() + + set(prefix [=[${CMAKE_INSTALL_PREFIX}]=]) + + install( + CODE "message(STATUS \"Precompiling Python3 code in ${prefix}/${dir}\")" + COMPONENT ${component} + ) + install( + CODE "execute_process(COMMAND ${Python3_Interpreter} -O -m compileall -j0 -o2 ${prefix}/${dir} WORKING_DIRECTORY ${prefix}/${dir})" + COMPONENT ${component} + ) +endfunction() diff --git a/packaging/cmake/Modules/Packaging.cmake b/packaging/cmake/Modules/Packaging.cmake index 663dbe27c..92960ca2c 100644 --- a/packaging/cmake/Modules/Packaging.cmake +++ b/packaging/cmake/Modules/Packaging.cmake @@ -63,8 +63,17 @@ netdata-plugin-network-viewer") set(CPACK_DEBIAN_NETDATA_PACKAGE_CONFLICTS "netdata-core, netdata-plugins-bash, netdata-plugins-python, netdata-web") -list(APPEND _main_deps "netdata-plugin-chartsd") -list(APPEND _main_deps "netdata-plugin-pythond") +if(ENABLE_DASHBOARD) + list(APPEND _main_deps "netdata-dashboard") +endif() + +if(ENABLE_PLUGIN_CHARTS) + list(APPEND _main_deps "netdata-plugin-chartsd") +endif() + +if(ENABLE_PLUGIN_PYTHON) + list(APPEND _main_deps "netdata-plugin-pythond") +endif() if(ENABLE_PLUGIN_APPS) list(APPEND _main_deps "netdata-plugin-apps") @@ -104,6 +113,27 @@ set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA set(CPACK_DEBIAN_NETDATA_DEBUGINFO_PACKAGE On) +# +# dashboard +# + +set(CPACK_COMPONENT_DASHBOARD_DEPENDS "netdata") +set(CPACK_COMPONENT_DASHBOARD_DESCRIPTION + "The local dashboard for the Netdata Agent. + This allows access to the dashboard on the local node without internet access.") + +set(CPACK_DEBIAN_DASHBOARD_PACKAGE_NAME "netdata-dashboard") +set(CPACK_DEBIAN_DASHBOARD_PACKAGE_SECTION "net") +set(CPACK_DEBIAN_DASHBOARD_PACKAGE_CONFLICTS "netdata (<< ${CPACK_PACKAGE_VERSION})") +set(CPACK_DEBIAN_DASHBOARD_PACKAGE_PREDEPENDS "adduser") + +set(CPACK_DEBIAN_DASHBOARD_PACKAGE_CONTROL_EXTRA + "${PKG_FILES_PATH}/deb/plugin-apps/preinst" + "${PKG_FILES_PATH}/deb/plugin-apps/postinst" + "${PKG_FILES_PATH}/deb/plugin-apps/postrm") + +set(CPACK_DEBIAN_DASHBOARD_DEBUGINFO_PACKAGE Off) + # # apps.plugin # @@ -434,10 +464,15 @@ set(CPACK_DEBIAN_PLUGIN-XENSTAT_DEBUGINFO_PACKAGE On) # list(APPEND CPACK_COMPONENTS_ALL "netdata") +if(ENABLE_DASHBOARD) + list(APPEND CPACK_COMPONENTS_ALL "dashboard") +endif() if(ENABLE_PLUGIN_APPS) list(APPEND CPACK_COMPONENTS_ALL "plugin-apps") endif() -list(APPEND CPACK_COMPONENTS_ALL "plugin-chartsd") +if(ENABLE_PLUGIN_CHARTS) + list(APPEND CPACK_COMPONENTS_ALL "plugin-chartsd") +endif() if(ENABLE_PLUGIN_CUPS) list(APPEND CPACK_COMPONENTS_ALL "plugin-cups") endif() @@ -465,7 +500,9 @@ endif() if(ENABLE_PLUGIN_PERF) list(APPEND CPACK_COMPONENTS_ALL "plugin-perf") endif() -list(APPEND CPACK_COMPONENTS_ALL "plugin-pythond") +if(ENABLE_PLUGIN_PYTHON) + list(APPEND CPACK_COMPONENTS_ALL "plugin-pythond") +endif() if(ENABLE_PLUGIN_SLABINFO) list(APPEND CPACK_COMPONENTS_ALL "plugin-slabinfo") endif() diff --git a/packaging/cmake/config.cmake.h.in b/packaging/cmake/config.cmake.h.in index 57d032693..0ea6ddd21 100644 --- a/packaging/cmake/config.cmake.h.in +++ b/packaging/cmake/config.cmake.h.in @@ -67,6 +67,12 @@ #cmakedefine HAVE_GETPRIORITY #cmakedefine HAVE_SETENV #cmakedefine HAVE_DLSYM +#cmakedefine HAVE_LIBCURL + +#cmakedefine HAVE_ARC4RANDOM_BUF +#cmakedefine HAVE_ARC4RANDOM_UNIFORM +#cmakedefine HAVE_RAND_S +#cmakedefine HAVE_GETRANDOM #cmakedefine HAVE_BACKTRACE #cmakedefine HAVE_CLOSE_RANGE @@ -103,14 +109,10 @@ // enabled features -#cmakedefine ENABLE_OPENSSL -#cmakedefine ENABLE_CLOUD -#cmakedefine ENABLE_ACLK #cmakedefine ENABLE_ML #cmakedefine ENABLE_EXPORTING_MONGODB #cmakedefine ENABLE_H2O #cmakedefine ENABLE_DBENGINE -#cmakedefine ENABLE_HTTPS #cmakedefine ENABLE_LZ4 #cmakedefine ENABLE_ZSTD #cmakedefine ENABLE_BROTLI @@ -172,6 +174,10 @@ #cmakedefine HAVE_LIBYAML #cmakedefine HAVE_LIBMNL +#cmakedefine HAVE_WEL +#cmakedefine HAVE_ETW +#cmakedefine RUN_UNDER_CLION + // /* Enable GNU extensions on systems that have them. */ // #ifndef _GNU_SOURCE // # define _GNU_SOURCE 1 @@ -182,7 +188,6 @@ // #cmakedefine ENABLE_PROMETHEUS_REMOTE_WRITE // /* NSA spy stuff */ -// #define ENABLE_HTTPS 1 // #cmakedefine01 HAVE_X509_VERIFY_PARAM_set1_host #define HAVE_CRYPTO diff --git a/packaging/cmake/pkg-files/deb/dashboard/postinst b/packaging/cmake/pkg-files/deb/dashboard/postinst new file mode 100755 index 000000000..320a649d9 --- /dev/null +++ b/packaging/cmake/pkg-files/deb/dashboard/postinst @@ -0,0 +1,11 @@ +#!/bin/sh + +set -e + +case "$1" in + configure|reconfigure) + if ! dpkg-statoverride --list /usr/share/netdata/www > /dev/null 2>&1; then + dpkg-statoverride --update --add root netdata 0755 /usr/share/netdata/www + fi + ;; +esac diff --git a/packaging/cmake/pkg-files/deb/dashboard/postrm b/packaging/cmake/pkg-files/deb/dashboard/postrm new file mode 100755 index 000000000..85438ad12 --- /dev/null +++ b/packaging/cmake/pkg-files/deb/dashboard/postrm @@ -0,0 +1,17 @@ +#!/bin/sh + +set -e + +case "$1" in + remove) ;; + + purge) + if dpkg-statoverride --list | grep -qw /var/lib/netdata/www; then + dpkg-statoverride --remove /var/lib/netdata/www + fi + + if dpkg-statoverride --list | grep -qw /usr/share/netdata/www; then + dpkg-statoverride --remove /usr/share/netdata/www + fi + ;; +esac diff --git a/packaging/cmake/pkg-files/deb/dashboard/preinst b/packaging/cmake/pkg-files/deb/dashboard/preinst new file mode 100755 index 000000000..57615ec06 --- /dev/null +++ b/packaging/cmake/pkg-files/deb/dashboard/preinst @@ -0,0 +1,11 @@ +#!/bin/sh + +set -e + +case "$1" in + install) + if ! getent group netdata > /dev/null; then + addgroup --quiet --system netdata + fi + ;; +esac diff --git a/packaging/cmake/pkg-files/deb/netdata/postinst b/packaging/cmake/pkg-files/deb/netdata/postinst index 97593c23b..7d2f0690a 100755 --- a/packaging/cmake/pkg-files/deb/netdata/postinst +++ b/packaging/cmake/pkg-files/deb/netdata/postinst @@ -20,10 +20,6 @@ case "$1" in dpkg-statoverride --update --add netdata adm 02750 /var/log/netdata fi - if ! dpkg-statoverride --list /usr/share/netdata/www > /dev/null 2>&1; then - dpkg-statoverride --update --add root netdata 0755 /usr/share/netdata/www - fi - dpkg-statoverride --force --update --add root netdata 0775 /var/lib/netdata/registry > /dev/null 2>&1 grep /usr/libexec/netdata /var/lib/dpkg/info/netdata.list | xargs -n 30 chown root:netdata diff --git a/packaging/cmake/pkg-files/deb/netdata/postrm b/packaging/cmake/pkg-files/deb/netdata/postrm index 7a636863b..daf28b74d 100755 --- a/packaging/cmake/pkg-files/deb/netdata/postrm +++ b/packaging/cmake/pkg-files/deb/netdata/postrm @@ -10,14 +10,6 @@ case "$1" in dpkg-statoverride --remove /var/cache/netdata fi - if dpkg-statoverride --list | grep -qw /var/lib/netdata/www; then - dpkg-statoverride --remove /var/lib/netdata/www - fi - - if dpkg-statoverride --list | grep -qw /usr/share/netdata/www; then - dpkg-statoverride --remove /usr/share/netdata/www - fi - if dpkg-statoverride --list | grep -qw /var/lib/netdata/registry; then dpkg-statoverride --remove /var/lib/netdata/registry fi diff --git a/packaging/dag/files/child_stream.conf b/packaging/dag/files/child_stream.conf index ed78bd3fb..4e37d0a91 100644 --- a/packaging/dag/files/child_stream.conf +++ b/packaging/dag/files/child_stream.conf @@ -2,9 +2,9 @@ enabled = {{ enabled }} destination = {{ destination }} api key = {{ api_key }} - timeout seconds = {{ timeout_seconds }} + timeout = {{ timeout_seconds }} default port = {{ default_port }} send charts matching = {{ send_charts_matching }} buffer size bytes = {{ buffer_size_bytes }} - reconnect delay seconds = {{ reconnect_delay_seconds }} + reconnect delay = {{ reconnect_delay_seconds }} initial clock resync iterations = {{ initial_clock_resync_iterations }} diff --git a/packaging/dag/files/parent_stream.conf b/packaging/dag/files/parent_stream.conf index 15f303f97..4c190a758 100644 --- a/packaging/dag/files/parent_stream.conf +++ b/packaging/dag/files/parent_stream.conf @@ -1,7 +1,7 @@ [{{ api_key }}] enabled = {{ enabled }} allow from = {{ allow_from }} - default history = {{ default_history }} + retention = {{ default_history }} health enabled by default = {{ health_enabled_by_default }} - default postpone alarms on connect seconds = {{ default_postpone_alarms_on_connect_seconds }} + postpone alerts on connect = {{ default_postpone_alarms_on_connect_seconds }} multiple connections = {{ multiple_connections }} diff --git a/packaging/dag/imageutils.py b/packaging/dag/imageutils.py index fd1e8ad26..42aba077c 100644 --- a/packaging/dag/imageutils.py +++ b/packaging/dag/imageutils.py @@ -345,7 +345,6 @@ def static_build_netdata( "--dont-wait", "--dont-start-it", "--disable-exporting-mongodb", - "--require-cloud", "--use-system-protobuf", "--dont-scrub-cflags-even-though-it-may-break-things", "--one-time-build", diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index b12af313d..bbc223a20 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -3,7 +3,7 @@ # This image contains preinstalled dependencies # hadolint ignore=DL3007 -FROM netdata/builder:v2 as builder +FROM netdata/builder:v2 AS builder # One of 'nightly' or 'stable' ARG RELEASE_CHANNEL=nightly @@ -47,7 +47,6 @@ RUN mkdir -p /app/usr/sbin/ \ mv /var/lib/netdata /app/var/lib/ && \ mv /etc/netdata /app/etc/ && \ mv /usr/sbin/netdata /app/usr/sbin/ && \ - mv /usr/sbin/netdata-claim.sh /app/usr/sbin/ && \ mv /usr/sbin/netdatacli /app/usr/sbin/ && \ mv /usr/sbin/systemd-cat-native /app/usr/sbin/ && \ mv packaging/docker/run.sh /app/usr/sbin/ && \ @@ -60,7 +59,7 @@ RUN mkdir -p /app/usr/sbin/ \ ##################################################################### # This image contains preinstalled dependencies # hadolint ignore=DL3007 -FROM netdata/base:v2 as base +FROM netdata/base:v2 AS base LABEL org.opencontainers.image.authors="Netdatabot " LABEL org.opencontainers.image.url="https://netdata.cloud" @@ -77,11 +76,11 @@ ONBUILD ENV NETDATA_OFFICIAL_IMAGE=false ARG NETDATA_UID=201 ARG NETDATA_GID=201 -ENV DOCKER_GRP netdata -ENV DOCKER_USR netdata +ENV DOCKER_GRP=netdata +ENV DOCKER_USR=netdata # If DISABLE_TELEMETRY is set, it will disable anonymous stats collection and reporting #ENV DISABLE_TELEMETRY=1 -ENV NETDATA_LISTENER_PORT 19999 +ENV NETDATA_LISTENER_PORT=19999 EXPOSE $NETDATA_LISTENER_PORT ENV NETDATA_EXTRA_DEB_PACKAGES="" diff --git a/packaging/docker/README.md b/packaging/docker/README.md index 6deb0cfa9..0f9ad23d6 100644 --- a/packaging/docker/README.md +++ b/packaging/docker/README.md @@ -1,12 +1,3 @@ - - import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; @@ -34,7 +25,7 @@ along with their descriptions.
Privileges -| Component | Privileges | Description | +| Component | Privileges | Description | |:---------------------:|:-----------------------------:|--------------------------------------------------------------------------------------------------------------------------| | cgroups.plugin | host PID mode, SYS_ADMIN | Container network interfaces monitoring. Map virtual interfaces in the system namespace to interfaces inside containers. | | proc.plugin | host network mode | Host system networking stack monitoring. | @@ -47,7 +38,7 @@ along with their descriptions.
Mounts -| Component | Mounts | Description | +| Component | Mounts | Description | |:----------------------:|:--------------------------:|--------------------------------------------------------------------------------------------------------------------------------------------| | netdata | /etc/os-release | Host info detection. | | diskspace.plugin | / | Host mount points monitoring. | @@ -177,14 +168,12 @@ Add `- /run/dbus:/run/dbus:ro` to the netdata service `volumes`. ### With NVIDIA GPUs monitoring - Monitoring NVIDIA GPUs requires: - Using official [NVIDIA driver](https://www.nvidia.com/Download/index.aspx). - Installing [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html). - Allowing the Netdata container to access GPU resources. - @@ -366,6 +355,7 @@ services: volumes: caddy_data: caddy_config: + netdataconfig: netdatalib: netdatacache: ``` diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh index 6ba16d1ce..56a818d2f 100755 --- a/packaging/docker/run.sh +++ b/packaging/docker/run.sh @@ -110,14 +110,4 @@ if [ -w "/etc/netdata" ]; then fi fi -if [ -n "${NETDATA_CLAIM_URL}" ] && [ -n "${NETDATA_CLAIM_TOKEN}" ] && [ ! -f /var/lib/netdata/cloud.d/claimed_id ]; then - # shellcheck disable=SC2086 - /usr/sbin/netdata-claim.sh -token="${NETDATA_CLAIM_TOKEN}" \ - -url="${NETDATA_CLAIM_URL}" \ - ${NETDATA_CLAIM_ROOMS:+-rooms="${NETDATA_CLAIM_ROOMS}"} \ - ${NETDATA_CLAIM_PROXY:+-proxy="${NETDATA_CLAIM_PROXY}"} \ - ${NETDATA_EXTRA_CLAIM_OPTS} \ - -daemon-not-running -fi - exec /usr/sbin/netdata -u "${DOCKER_USR}" -D -s /host -p "${NETDATA_LISTENER_PORT}" "$@" diff --git a/packaging/installer/README.md b/packaging/installer/README.md index d15925dca..1c43cdf89 100644 --- a/packaging/installer/README.md +++ b/packaging/installer/README.md @@ -6,150 +6,15 @@ Netdata is very flexible and can be used to monitor all kinds of infrastructure. The easiest way to install Netdata on your system is via Netdata Cloud, to do so: -1. Sign up to . -2. You will be presented with an empty space, and a prompt to "Connect Nodes" with the install command for each platform. -3. Select the platform you want to install Netdata to, copy and paste the script into your node's terminal, and run it. +1. Sign in to . +2. Select a [Space](/docs/netdata-cloud/organize-your-infrastructure-invite-your-team.md#netdata-cloud-spaces), and click the "Connect Nodes" prompt, which will show the install command for your platform of choice. +3. Copy and paste the script into your node's terminal, and run it. Once Netdata is installed, you can see the node live in your Netdata Space and charts in the [Metrics tab](/docs/dashboards-and-charts/metrics-tab-and-single-node-tabs.md). -Take a look at our [Dashboards and Charts](/docs/dashboards-and-charts/README.md) section to read more about Netdata's features. +## Anonymous statistics -## Post-install - -### Configuration - -If you are looking to configure your Netdata Agent installation, refer to the [respective section in our Documentation](/docs/netdata-agent/configuration/README.md). - -### Data collection - -If Netdata didn't autodetect all the hardware, containers, services, or applications running on your node, you should learn more about [how data collectors work](/src/collectors/README.md). If there's a [supported integration](/src/collectors/COLLECTORS.md) for metrics you need, refer to its respective page and read about its requirements to configure your endpoint to publish metrics in the correct format and endpoint. - -### Alerts & notifications - -Netdata comes with hundreds of pre-configured alerts, designed by our monitoring gurus in parallel with our open-source community, but you may want to [edit alerts](/src/health/REFERENCE.md) or [enable notifications](/docs/alerts-and-notifications/notifications/README.md) to customize your Netdata experience. - -### Make your deployment production ready - -Go through our [deployment guides](/docs/deployment-guides/README.md), for suggested configuration changes for production deployments. - -## Advanced installation options and troubleshooting - -### Automatic updates - -By default, Netdata's installation scripts enable automatic updates for both nightly and stable release channels. - -If you preferred to update your Netdata Agent manually, you can disable automatic updates by using the `--no-updates` -option when you install or update Netdata using the [automatic one-line installation script](/packaging/installer/methods/kickstart.md). - -```bash -wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --no-updates -``` - -With automatic updates disabled, you can choose exactly when and how you [update Netdata](/packaging/installer/UPDATE.md). - -### Nightly vs. Stable Releases - -**Nightly**: We create nightly builds every 24 hours. They contain fully-tested code that fixes bugs or security flaws, -or introduces new features to Netdata. Every nightly release is a candidate for then becoming a stable release—when -we're ready, we simply change the release tags on GitHub. That means nightly releases are stable and proven to function -correctly in the vast majority of Netdata use cases. That's why nightly is the _best choice for most Netdata users_. - -**Stable**: We create stable releases whenever we believe the code has reached a major milestone. Most often, stable -releases correlate with the introduction of new, significant features. Stable releases might be a better choice for -those who run Netdata in _mission-critical production systems_, as updates will come more infrequently, and only after -the community helps fix any bugs that might have been introduced in previous releases. - -**Pros of using nightly releases:** - -- Get the latest features and bug fixes as soon as they're available -- Receive security-related fixes immediately -- Use stable, fully-tested code that's always improving -- Leverage the same Netdata experience our community is using - -**Pros of using stable releases:** - -- Protect yourself from the rare instance when major bugs slip through our testing and negatively affect a Netdata installation -- Retain more control over the Netdata version you use - -### Anonymous statistics - -Starting with v1.30, Netdata collects anonymous usage information by default and sends it to a self-hosted PostHog instance within the Netdata infrastructure. Read about the information collected, and learn how to-opt, on our [anonymous statistics](/docs/netdata-agent/configuration/anonymous-telemetry-events.md) page. +Netdata collects anonymous usage information by default and sends it to a self-hosted PostHog instance within the Netdata infrastructure. Read about the information collected on our [anonymous statistics](/docs/netdata-agent/configuration/anonymous-telemetry-events.md) documentation page. The usage statistics are _vital_ for us, as we use them to discover bugs and prioritize new features. We thank you for _actively_ contributing to Netdata's future. - -### Troubleshooting and known issues - -We are tracking a few issues related to installation and packaging. - -#### Installs on hosts without IPv4 connectivity - -Our regular installation process requires access to a number of GitHub services that do not have IPv6 connectivity. As -such, using the kickstart install script on such hosts generally does not work, and will typically fail with an -error from cURL or wget about connection timeouts. You can check if your system is affected by this by attempting -to connect to (or ping) `https://api.github.com/`. Failing to connect indicates that you are affected by this issue. - -There are three potential workarounds for this: - -1. You can configure your system with a proper IPv6 transition mechanism, such as NAT64. GitHub’s anachronisms - affect many projects other than just Netdata, and there are unfortunately a number of other services out there - that do not provide IPv6 connectivity, so taking this route is likely to save you time in the future as well. -2. If you are using a system that we publish native packages for (see our [platform support - policy](/docs/netdata-agent/versions-and-platforms.md) for more details), - you can manually set up our native package repositories as outlined in our [native package install - documentation](/packaging/installer/methods/packages.md). Our official - package repositories do provide service over IPv6, so they work without issue on hosts without IPv4 connectivity. -3. If neither of the above options work for you, you can still install using our [offline installation - instructions](/packaging/installer/methods/offline.md), though - do note that the offline install source must be prepared from a system with IPv4 connectivity. - -#### Older distributions (Ubuntu 14.04, Debian 8, CentOS 6) and OpenSSL - -If you're running an older Linux distribution or one that has reached EOL, such as Ubuntu 14.04 LTS, Debian 8, or CentOS -6, your Agent may not be able to securely connect to Netdata Cloud due to an outdated version of OpenSSL. These old -versions of OpenSSL cannot perform [hostname validation](https://wiki.openssl.org/index.php/Hostname_validation), which -helps securely encrypt SSL connections. - -If you choose to continue using the outdated version of OpenSSL, your node will still connect to Netdata Cloud, albeit -with hostname verification disabled. Without verification, your Netdata Cloud connection could be vulnerable to -man-in-the-middle attacks. - -#### CentOS 6 and CentOS 8 - -To install the Agent on certain CentOS and RHEL systems, you must enable non-default repositories, such as EPEL or -PowerTools, to gather hard dependencies. See the [CentOS 6](/packaging/installer/methods/manual.md#centos--rhel-6x) and -[CentOS 8](/packaging/installer/methods/manual.md#centos--rhel-8x) sections for more information. - -#### Access to file is not permitted - -If you see an error similar to `Access to file is not permitted: /usr/share/netdata/web/index.html` when you try to -visit the Agent dashboard at `http://NODE:19999`, you need to update Netdata's permissions to match those of your -system. - -Run `ls -la /usr/share/netdata/web/index.html` to find the file's permissions. You may need to change this path based on -the error you're seeing in your browser. In the below example, the file is owned by the user `root` and the group -`root`. - -```bash -ls -la /usr/share/netdata/web/index.html --rw-r--r--. 1 root root 89377 May 5 06:30 /usr/share/netdata/web/index.html -``` - -These files need to have the same user and group used to install your netdata. Suppose you installed netdata with user -`netdata` and group `netdata`, in this scenario you will need to run the following command to fix the error: - -```bash -# chown -R netdata:netdata /usr/share/netdata/web -``` - -#### Multiple versions of OpenSSL - -We've received reports from the community about issues with running the `kickstart.sh` script on systems that have both -a distribution-installed version of OpenSSL and a manually-installed local version. The Agent's installer cannot handle -both. - -#### Clang compiler on Linux - -Our current build process has some issues when using certain configurations of the `clang` C compiler on Linux. See [the -section on `nonrepresentable section on output` -errors](/packaging/installer/methods/manual.md#nonrepresentable-section-on-output-errors) for a workaround. diff --git a/packaging/installer/REINSTALL.md b/packaging/installer/REINSTALL.md deleted file mode 100644 index eeb0e2313..000000000 --- a/packaging/installer/REINSTALL.md +++ /dev/null @@ -1,65 +0,0 @@ -# Reinstall Netdata - -In certain situations, such as needing to enable a feature or troubleshoot an issue, you may need to reinstall the -Netdata Agent on your node. - -## One-line installer script (`kickstart.sh`) - -### Reinstalling with the same install type - -Run the one-line installer script with the `--reinstall` parameter to reinstall the Netdata Agent. This will preserve -any [user configuration](/docs/netdata-agent/configuration/README.md) in `netdata.conf` or other files, and will keep the same install -type that was used for the original install. - -If you used any [optional -parameters](/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) during initial -installation, you need to pass them to the script again during reinstallation. If you cannot remember which options you -used, read the contents of the `.environment` file and look for a `REINSTALL_OPTIONS` line. This line contains a list of -optional parameters. - -```bash -wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --reinstall -``` - -### Performing a clean reinstall - -Run the one-line installer script with the `--reinstall-clean` parameter to perform a clean reinstall of the -Netdata Agent. This will wipe all existing configuration and historical data, but can be useful sometimes for -getting a badly broken installation working again. Unlike the regular `--reinstall` parameter, this may use a -different install type than the original install used. - -If you used any [optional -parameters](/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) during initial -installation, you need to pass them to the script again during reinstallation. If you cannot remember which options you -used, read the contents of the `.environment` file and look for a `REINSTALL_OPTIONS` line. This line contains a list of -optional parameters. - -```bash -wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --reinstall-clean -``` - -### Changing the install type of an existing installation - -The clean reinstall procedure outlined above can also be used to manually change the install type for an existing -installation. Without any extra parameters, it will automatically pick the preferred installation type for your -system, even if that has changed since the original install. If you want to force use of a specific install type, -you can use the `--native-only`, `--static-only`, or `--build-only` parameter to control which install type gets -used, just like with a new install. - -When using the `--reinstall-clean` option to change the install type, you will need to manually preserve any -configuration or historical data you want to keep. The following directories may need to be preserved: - -- `/etc/netdata` (`/opt/netdata/etc/netdata` for static installs): For agent configuration. -- `/var/lib/netdata` (`/opt/netdata/var/lib/netdata` for static installs): For claiming configuration. -- `/var/cache/netdata` (`/opt/netdata/var/cache/netdata` for static installs): For historical data. - -When copying these directories back after the reinstall, you may need to update file ownership by running `chown --R netdata:netdata` on them. - -## Troubleshooting - -If you still experience problems with your Netdata Agent installation after following one of these processes, the next -best route is to [uninstall](/packaging/installer/UNINSTALL.md) and then try a fresh installation using the [one-line -installer](/packaging/installer/methods/kickstart.md). - -You can also post to our [community forums](https://community.netdata.cloud) or create a new [bug report](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml). diff --git a/packaging/installer/UNINSTALL.md b/packaging/installer/UNINSTALL.md index c7de90d95..87fe93955 100644 --- a/packaging/installer/UNINSTALL.md +++ b/packaging/installer/UNINSTALL.md @@ -1,77 +1,76 @@ # Uninstall Netdata -> ### Note -> -> If you're having trouble updating Netdata, moving from one installation method to another, or generally having -> issues with your Netdata Agent installation, consider our [reinstalling Netdata](/packaging/installer/REINSTALL.md) instead of removing the Netdata Agent entirely. +## UNIX -The recommended method to uninstall Netdata on a system is to use our kickstart installer script with the `--uninstall` option like so: +> **Note** +> +> This method assumes you installed Netdata using the `kickstart.sh` or `netdata-installer.sh` script. +> If you used a different method, it might not work and could complicate the removal process. -```sh +Similarly with our documentation on updating Netdata, you need to [determine your installation type](/packaging/installer/UPDATE.md). + +**If your installation type indicates a native package, then proceed to uninstall Netdata using your package manager.** + +The recommended way to uninstall Netdata is to use the same script you used for installation. Just add the `--uninstall` flag: + +```bash wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --uninstall ``` -Or (if you have curl but not wget): +
+if you have curl but not wget ```sh curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --uninstall ``` -This will work in most cases without you needing to do anything more other than accepting removal of configuration -and data files. +
-If you used a non-standard installation prefix, you may need to specify that prefix using the `--old-install-prefix` -option when uninstalling this way. -## Unofficial installs +**What to Expect**: -If you used a third-party package to install Netdata, then the above method will usually not work, and you will -need to use whatever mechanism you used to originally install Netdata to uninstall it. +In most cases, these commands will guide you through the uninstallation process and remove configuration and data files automatically. -## Uninstalling manually +**Non-Standard Installations**: -Most official installs of Netdata include an uninstaller script that can be manually invoked instead of using the -kickstart script (internally, the kickstart script also uses this uninstaller script, it just handles the process -outlined below for you). +If you installed Netdata with a custom prefix (different directory location), you may need to specify the original prefix during uninstallation with the `--old-install-prefix` option. -This uninstaller script is self-contained other than requiring a `.environment` file that was generated during -installation. In most cases, this will be found in `/etc/netdata/.environment`, though if you used a non-standard -installation prefix it will usually be located in a similar place under that prefix. +### Uninstalling manually -A workflow for uninstallation looks like this: +Most official installations of Netdata include an uninstaller script that can be manually invoked instead of using the kickstart script (internally, the kickstart script also uses this uninstaller script, it just handles the process outlined below for you). -1. Find your `.environment` file, which is usually `/etc/netdata/.environment` in a default installation. -2. If you cannot find that file and would like to uninstall Netdata, then create a new file with the following content: +This uninstaller script is self-contained, other than requiring a `.environment` file that was generated during installation. In most cases, this will be found in `/etc/netdata/.environment`, though if you used a custom installation prefix it be located under that directory. -```sh -NETDATA_PREFIX="" # put what you used as a parameter to shell installed `--install-prefix` flag. Otherwise it should be empty -NETDATA_ADDED_TO_GROUPS="" # Additional groups for a user running the Netdata process -``` +1. Find your `.environment` file +2. If you can’t find that file and would like to uninstall Netdata, then create a new file with the following content: + + ```sh + NETDATA_PREFIX="" # put what you used as a parameter to shell installed `--install-prefix` flag. Otherwise it should be empty + NETDATA_ADDED_TO_GROUPS="" # Additional groups for a user running the Netdata process + ``` -3. Run `netdata-uninstaller.sh` as follows +3. Run `netdata-uninstaller.sh` as follows - 3.1 **Interactive mode (Default)** + - **Interactive mode (Default)** - The default mode in the uninstaller script is **interactive**. This means that the script provides you - the option to reply with "yes" (`y`/`Y`) or "no" (`n`/`N`) to control the removal of each Netdata asset in - the filesystem. + The default mode in the uninstaller script is **interactive**. This means that the script provides you the option to reply with "yes" (`y`/`Y`) or "no" (`n`/`N`) to control the removal of each Netdata asset in the filesystem. - ```sh - ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh --yes --env - ``` + ```sh + ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh --yes --env + ``` - 3.2 **Non-interactive mode** + - **Non-interactive mode** - If you are sure and you know what you are doing, you can speed up the removal of the Netdata assets from the - filesystem without any questions by using the force option (`-f`/`--force`). This option will remove all the - Netdata assets in a **non-interactive** mode. + If you’re sure, and you know what you’re doing, you can speed up the removal of the Netdata assets from the filesystem without any questions by using the force option (`-f`/`--force`). This option will remove all the + Netdata assets in a **non-interactive** mode. - ```sh - ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh --yes --force --env - ``` + ```sh + ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh --yes --force --env + ``` -Note: Existing installations may still need to download the file if it's not present. To execute uninstall in that case, -run the following commands: +> **Note** +> +> Existing installations may still need to download the file if it's not present. To execute the uninstaller in that case, run the following commands: ```sh wget https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/netdata-uninstaller.sh @@ -79,7 +78,9 @@ chmod +x ./netdata-uninstaller.sh ./netdata-uninstaller.sh --yes --env ``` -The default `environment_file` is `/etc/netdata/.environment`. +## Windows + +Currently, the Windows version of Netdata is in beta. To uninstall Netdata on Windows: -> Note: This uninstallation method assumes previous installation with `netdata-installer.sh` or the kickstart script. -> Using it when Netdata was installed in some other way will usually not work correctly, and may make it harder to uninstall Netdata. +1. Locate the `Uninstall.exe` file in your Netdata installation directory. +2. Double-click the `Uninstall.exe` file and follow the on-screen instructions. diff --git a/packaging/installer/UPDATE.md b/packaging/installer/UPDATE.md index 94faa881b..d77e9c620 100644 --- a/packaging/installer/UPDATE.md +++ b/packaging/installer/UPDATE.md @@ -1,31 +1,15 @@ # Update Netdata -By default, the Netdata Agent automatically updates with the latest nightly or stable version depending on which -you installed. If you opted out of automatic updates, you need to update your Netdata Agent to the latest nightly -or stable version. You can also [enable or disable automatic updates on an existing install](#control-automatic-updates). +The update process can differ based on the install type: -> 💡 Looking to reinstall the Netdata Agent to enable a feature, update an Agent that cannot update automatically, or -> troubleshoot an error during the installation process? See our [reinstallation doc](/packaging/installer/REINSTALL.md) -> for reinstallation steps. - -Before you update the Netdata Agent, check to see if your Netdata Agent is already up-to-date by clicking on the update -icon in the local Agent dashboard's top navigation. This modal informs you whether your Agent needs an update or not. - -The exact update method to use depends on the install type: - -- Installs with an install type of 'custom' usually indicate installing a third-party package through the system - package manager. To update these installs, you should update the package just like you would any other package - on your system. -- Installs with an install type starting with `binpkg` or ending with `build` or `static` can be updated using - our [regular update method](#updates-for-most-systems). -- Installs with an install type of 'oci' were created from our official Docker images, and should be updated - using our [Docker](#docker) update procedure. -- macOS users should check [our update instructions for macOS](#macos). -- Manually built installs should check [our update instructions for manual builds](#manual-installation-from-git). +- Install types starting with `binpkg` or ending with `build` or `static` can be updated using our [kickstart script update method](#unix). +- Installs with an install type of `custom` usually indicate installing a third-party package through the system package manager. To update these installs, you should update the package just like you would any other package on your system. +- macOS users should check [our update instructions for macOS](#macos). +- Manually built installs should check [our update instructions for manual builds](#manual-installation-from-git). ## Determine which installation method you used -Starting with netdata v1.33.0, you can use Netdata itself to determine the installation type by running: +You can run the following to determine your installation type: ```bash netdata -W buildinfo | grep -E 'Installation Type|Install type:' @@ -46,82 +30,38 @@ The following table contains all possible installation types: | custom | Anything not covered by the other identifiers, including manual builds, manually running netdata-installer.sh, and third-party packages (community). | | Unknown | Same as custom. | - -If you are using an older version of Netdata, or the above command produces no output, you can run our one-line -installation script in dry-run mode to attempt to determine what method to use to update by running the following -command: +If you're using an older Netdata version or the above command doesn't output anything, try our one-line installation script in dry-run mode. Run the following command to determine the appropriate update method: ```bash wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --dry-run ``` -Note that if you installed Netdata using an installation prefix, you will need to add an `--install-prefix` option -specifying that prefix to make sure it finds the existing install. +> **Note** +> +> if you installed Netdata using an installation prefix, you will need to add an `--install-prefix` option specifying that prefix to make sure it finds the existing install. -If you see a line starting with `--- Would attempt to update existing installation by running the updater script -located at:`, then our [regular update method](#updates-for-most-systems) will work for you. +If you see a line starting with `--- Would attempt to update existing installation by running the updater script located at:`, then our [kickstart script update method](#unix) will work for you. -Otherwise, it should either indicate that the installation type is not supported (which probably means you either -have a `custom` install or built Netdata manually) or indicate that it would create a new install (which means that -you either used a non-standard install path, or that you don’t actually have Netdata installed). +Otherwise, it should either indicate that the installation type is not supported (which probably means you either have a `custom` install or built Netdata manually) or indicate that it would create a new install (which means that you either used a non-standard install path, or that you don’t actually have Netdata installed). -## Updates for most systems +## UNIX -In most cases, you can update netdata using our one-line installation script. This script will automatically -run the update script that was installed as part of the initial install (even if you disabled automatic updates) -and preserve the existing install options you specified. +In most cases, you can update Netdata using our one-line kickstart script. This script will automatically +run the update script installed as part of the initial install and preserve the existing install options you specified. -If you installed Netdata using an installation prefix, you will need to add an `--install-prefix` option specifying -that prefix to this command to make sure it finds Netdata. +If you installed Netdata using an installation prefix, you will need to add an `--install-prefix` option specifying that prefix to this command to make sure it finds Netdata. ```bash wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh ``` -### Issues with older binpkg installs - -The above command is known not to work with binpkg type installs for stable releases with a version number of -v1.33.1 or earlier, and nightly builds with a version number of v1.33.1-93 or earlier. If you have such a system, -the above command will report that it found an existing install, and then issue a warning about not being able to -find the updater script. - -On such installs, you can update Netdata using your distribution package manager. - -### Updates on hosts without IPv4 connectivity - -The update process outlined above suffers from the same issues that installing on hosts without IPv4 -connectivity does, and requires similar workarounds. For more details check [the explanation in our install -documentation](/packaging/installer/README.md#installs-on-hosts-without-ipv4-connectivity). - -### If the kickstart script does not work - -If the above command fails, you can [reinstall -Netdata](/packaging/installer/REINSTALL.md#one-line-installer-script-kickstartsh) to get the latest version. This -also preserves your [configuration](/docs/netdata-agent/configuration/README.md) in `netdata.conf` or other files just like updating -normally would, though you will need to specify any installation options you used originally again. +## Windows -## Docker - -Docker-based installations do not update automatically. To update an Netdata Agent running in a Docker container, you -must pull the [latest image from Docker Hub](https://hub.docker.com/r/netdata/netdata), stop and remove the container, -and re-create it using the latest image. - -First, pull the latest version of the image. - -```bash -docker pull netdata/netdata:latest -``` - -Next, to stop and remove any containers using the `netdata/netdata` image. Replace `netdata` if you changed it from the -default. - -```bash -docker stop netdata -docker rm netdata -``` +To update a Windows Netdata installation, download the executable and proceed to reinstall the Agent. This will update the installation. -You can now re-create your Netdata container using the `docker` command or a `docker-compose.yml` file. See our [Docker -installation instructions](/packaging/docker/README.md#create-a-new-netdata-agent-container) for details. +> **Note** +> +> The Windows Agent is currently under beta and only available for Nightly releases, and the installer can be found in our [nightlies repo](https://github.com/netdata/netdata-nightlies). A stable version will be released soon. ## macOS @@ -131,25 +71,17 @@ If you installed Netdata on your macOS system using Homebrew, you can explicitly brew upgrade netdata ``` -Homebrew downloads the latest Netdata via the -[formulae](https://github.com/Homebrew/homebrew-core/blob/master/Formula/n/netdata.rb), ensures all dependencies are met, -and updates Netdata via reinstallation. - -If you instead installed Netdata using our one-line installation script, you can use our [regular update -instructions](#updates-for-most-systems) to update Netdata. +Homebrew downloads the latest Netdata via the [formula](https://github.com/Homebrew/homebrew-core/blob/master/Formula/n/netdata.rb), ensures all dependencies are met, and updates Netdata via reinstallation. ## Manual installation from Git -If you installed [Netdata manually from Git](/packaging/installer/methods/manual.md), you can run that installer again -to update your agent. First, run our automatic requirements installer, which works on many Linux distributions, to -ensure your system has the dependencies necessary for new features. +If you installed [Netdata manually from Git](/packaging/installer/methods/manual.md) run our automatic requirements installer, which works on many Linux distributions, to ensure your system has the dependencies necessary for new features. ```bash bash <(curl -sSL https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh) ``` -Navigate to the directory where you first cloned the Netdata repository, pull the latest source code, and run -`netdata-install.sh` again. This process compiles Netdata with the latest source code and updates it via reinstallation. +Navigate to the directory where you first cloned the Netdata repository, pull the latest source code, and run `netdata-install.sh` again. This process compiles Netdata with the latest source code and updates it via reinstallation. ```bash cd /path/to/netdata/git @@ -157,55 +89,20 @@ git pull origin master sudo ./netdata-installer.sh ``` -> ⚠️ If you installed Netdata with any optional parameters, such as `--no-updates` to disable automatic updates, and -> want to retain those settings, you need to set them again during this process. - -## Control automatic updates - -Starting with Netdata v1.34.0, you can easily enable or disable automatic updates on an existing installation -using the updater script. - -For most installs on Linux, you can enable auto-updates with: - -```bash -/usr/libexec/netdata/netdata-updater.sh --enable-auto-updates -``` - -and disable them with: - -```bash -/usr/libexec/netdata/netdata-updater.sh --disable-auto-updates -``` - -For static installs, instead use: +> **Note** +> +> If you installed Netdata with any optional parameters, such as `--install-prefix` to install under a specific directory, you need to set them again during this process. -```bash -/opt/netdata/usr/libexec/netdata/netdata-updater.sh --enable-auto-updates -``` - -and: - -```bash -/opt/netdata/usr/libexec/netdata/netdata-updater.sh --disable-auto-updates -``` +## Additional info -## Control runtime behavior of the updater script. +### Control runtime behavior of the updater script -Starting with v1.40.0, the `netdata-updater.sh` script supports a config file called `netdata-updater.conf`, -located in the same directory as the main `netdata.conf` file. This file uses POSIX shell script syntax to define -variables that are used by the updater. +Starting with v1.40.0, the `netdata-updater.sh` script supports a config file called `netdata-updater.conf`, located in the same directory as the main `netdata.conf` file. This file uses POSIX shell script syntax to define variables that are used by the updater. -This configuration file can be edited [using our `edit-config` -script](/docs/netdata-agent/configuration/README.md). +This configuration file can be edited using our [`edit-config` script](/docs/netdata-agent/configuration/README.md). The following configuration options are currently supported: -- `NETDATA_UPDATER_JITTER`: Sets an upper limit in seconds on the random delay in the updater script when running - as a scheduled task. This random delay helps avoid issues resulting from too many nodes trying to reconnect to - the Cloud at the same time. The default value is 3600, which corresponds to one hour. Most users should not ever - need to change this. -- `NETDATA_MAJOR_VERSION_UPDATES`: If set to a value other than 0, then new major versions will be installed - without user confirmation. Must be set to a non-zero value for automated updates to install new major versions. -- `NETDATA_NO_SYSTEMD_JOURNAL`: If set to a value other than 0, skip attempting to install the - `netdata-plugin-systemd-journal` package on supported systems on update. This optional package will be installed - by default on supported systems by the updater if this option is not set. Only affects systems using native packages. +- `NETDATA_UPDATER_JITTER`: Sets an upper limit in seconds on the random delay in the updater script when running as a scheduled task. This random delay helps avoid issues resulting from too many nodes trying to reconnect to the Cloud at the same time. The default value is 3600, which corresponds to one hour. Most users shouldn’t ever need to change this. +- `NETDATA_MAJOR_VERSION_UPDATES`: If set to a value other than 0, then new major versions will be installed without user confirmation. Must be set to a non-zero value for automated updates to install new major versions. +- `NETDATA_NO_SYSTEMD_JOURNAL`: If set to a value other than 0, skip attempting to install the `netdata-plugin-systemd-journal` package on supported systems on update. The updater will install this optional package by default on supported systems if this option is not set. It only affects systems using native packages. diff --git a/packaging/installer/functions.sh b/packaging/installer/functions.sh index c339ac87c..23328a7a6 100644 --- a/packaging/installer/functions.sh +++ b/packaging/installer/functions.sh @@ -303,6 +303,18 @@ prepare_cmake_options() { enable_feature PLUGIN_GO 0 fi + if [ "${ENABLE_PYTHON:-1}" -eq 1 ]; then + enable_feature PLUGIN_PYTHON 1 + else + enable_feature PLUGIN_PYTHON 0 + fi + + if [ "${ENABLE_CHARTS:-1}" -eq 1 ]; then + enable_feature PLUGIN_CHARTS 1 + else + enable_feature PLUGIN_CHARTS 0 + fi + if [ "${USE_SYSTEM_PROTOBUF:-0}" -eq 1 ]; then enable_feature BUNDLED_PROTOBUF 0 else @@ -341,8 +353,6 @@ prepare_cmake_options() { enable_feature PLUGIN_NETWORK_VIEWER "${IS_LINUX}" enable_feature PLUGIN_EBPF "${ENABLE_EBPF:-0}" - enable_feature ACLK "${ENABLE_CLOUD:-1}" - enable_feature CLOUD "${ENABLE_CLOUD:-1}" enable_feature BUNDLED_JSONC "${NETDATA_BUILD_JSON_C:-0}" enable_feature DBENGINE "${ENABLE_DBENGINE:-1}" enable_feature H2O "${ENABLE_H2O:-1}" diff --git a/packaging/installer/install-required-packages.sh b/packaging/installer/install-required-packages.sh index e97902026..f6d6a3e28 100755 --- a/packaging/installer/install-required-packages.sh +++ b/packaging/installer/install-required-packages.sh @@ -411,25 +411,35 @@ detect_package_manager_from_distribution() { centos* | clearos* | rocky* | almalinux*) package_installer="" tree="centos" - [[ -n "${yum}" ]] && package_installer="install_yum" [[ -n "${dnf}" ]] && package_installer="install_dnf" + [[ -n "${yum}" ]] && package_installer="install_yum" if [[ "${IGNORE_INSTALLED}" -eq 0 ]] && [[ -z "${package_installer}" ]]; then echo >&2 "command 'yum' or 'dnf' is required to install packages on a '${distribution} ${version}' system." exit 1 fi ;; - fedora* | redhat* | red\ hat* | rhel*) + redhat* | red\ hat* | rhel*) package_installer= tree="rhel" - [[ -n "${yum}" ]] && package_installer="install_yum" [[ -n "${dnf}" ]] && package_installer="install_dnf" + [[ -n "${yum}" ]] && package_installer="install_yum" + if [[ "${IGNORE_INSTALLED}" -eq 0 ]] && [[ -z "${package_installer}" ]]; then + echo >&2 "command 'yum' or 'dnf' is required to install packages on a '${distribution} ${version}' system." + exit 1 + fi + ;; + + fedora*) + package_installer="install_dnf" + tree="rhel" if [[ "${IGNORE_INSTALLED}" -eq 0 ]] && [[ -z "${package_installer}" ]]; then echo >&2 "command 'yum' or 'dnf' is required to install packages on a '${distribution} ${version}' system." exit 1 fi ;; + ol*) package_installer= tree="ol" @@ -825,6 +835,18 @@ declare -A pkg_libuuid_dev=( ['default']="" ) +declare -A pkg_libcurl_dev=( + ['alpine']="curl-dev" + ['arch']="curl" + ['clearlinux']="devpkg-curl" + ['debian']="libcurl4-openssl-dev" + ['gentoo']="net-misc/curl" + ['ubuntu']="libcurl4-openssl-dev" + ['macos']="curl" + ['freebsd']="curl" + ['default']="libcurl-devel" +) + declare -A pkg_libmnl_dev=( ['alpine']="libmnl-dev" ['arch']="libmnl" @@ -1246,6 +1268,7 @@ packages() { suitable_package libyaml-dev suitable_package libsystemd-dev suitable_package pcre2 + suitable_package libcurl-dev fi # ------------------------------------------------------------------------- @@ -1531,7 +1554,7 @@ install_yum() { validate_install_dnf() { echo >&2 " > Checking if package '${*}' is installed..." - dnf list installed "${*}" > /dev/null 2>&1 || echo "${*}" + dnf list --installed "${*}" > /dev/null 2>&1 || echo "${*}" } install_dnf() { diff --git a/packaging/installer/installer.nsi b/packaging/installer/installer.nsi deleted file mode 100644 index c14ccb599..000000000 --- a/packaging/installer/installer.nsi +++ /dev/null @@ -1,128 +0,0 @@ -!include "MUI2.nsh" -!include "nsDialogs.nsh" -!include "FileFunc.nsh" - -Name "Netdata" -Outfile "netdata-installer.exe" -InstallDir "$PROGRAMFILES\Netdata" -RequestExecutionLevel admin - -!define MUI_ICON "NetdataWhite.ico" -!define MUI_UNICON "NetdataWhite.ico" - -!define ND_UININSTALL_REG "Software\Microsoft\Windows\CurrentVersion\Uninstall\Netdata" - -!define MUI_ABORTWARNING -!define MUI_UNABORTWARNING - -!insertmacro MUI_PAGE_WELCOME -!insertmacro MUI_PAGE_LICENSE "C:\msys64\gpl-3.0.txt" -!insertmacro MUI_PAGE_DIRECTORY -!insertmacro MUI_PAGE_INSTFILES -!insertmacro MUI_PAGE_FINISH - -!insertmacro MUI_UNPAGE_CONFIRM -!insertmacro MUI_UNPAGE_INSTFILES -!insertmacro MUI_UNPAGE_FINISH - -!insertmacro MUI_LANGUAGE "English" - -Function .onInit - nsExec::ExecToLog '$SYSDIR\sc.exe stop Netdata' - pop $0 - ${If} $0 == 0 - nsExec::ExecToLog '$SYSDIR\sc.exe delete Netdata' - pop $0 - ${EndIf} -FunctionEnd - -Function NetdataUninstallRegistry - ClearErrors - WriteRegStr HKLM "${ND_UININSTALL_REG}" \ - "DisplayName" "Netdata - Real-time system monitoring." - WriteRegStr HKLM "${ND_UININSTALL_REG}" \ - "DisplayIcon" "$INSTDIR\Uninstall.exe,0" - WriteRegStr HKLM "${ND_UININSTALL_REG}" \ - "UninstallString" "$INSTDIR\Uninstall.exe" - WriteRegStr HKLM "${ND_UININSTALL_REG}" \ - "RegOwner" "Netdata Inc." - WriteRegStr HKLM "${ND_UININSTALL_REG}" \ - "RegCompany" "Netdata Inc." - WriteRegStr HKLM "${ND_UININSTALL_REG}" \ - "Publisher" "Netdata Inc." - WriteRegStr HKLM "${ND_UININSTALL_REG}" \ - "HelpLink" "https://learn.netdata.cloud/" - WriteRegStr HKLM "${ND_UININSTALL_REG}" \ - "URLInfoAbout" "https://www.netdata.cloud/" - WriteRegStr HKLM "${ND_UININSTALL_REG}" \ - "DisplayVersion" "${CURRVERSION}" - WriteRegStr HKLM "${ND_UININSTALL_REG}" \ - "VersionMajor" "${MAJORVERSION}" - WriteRegStr HKLM "${ND_UININSTALL_REG}" \ - "VersionMinor" "${MINORVERSION}" - - IfErrors 0 +2 - MessageBox MB_ICONEXCLAMATION|MB_OK "Unable to create an entry in the Control Panel!" IDOK end - - ClearErrors - ${GetSize} "$INSTDIR" "/S=0K" $0 $1 $2 - IntFmt $0 "0x%08X" $0 - WriteRegDWORD HKLM "${ND_UININSTALL_REG}" "EstimatedSize" "$0" - - IfErrors 0 +2 - MessageBox MB_ICONEXCLAMATION|MB_OK "Cannot estimate the installation size." IDOK end - end: -FunctionEnd - -Section "Install Netdata" - SetOutPath $INSTDIR - SetCompress off - - File /r "C:\msys64\opt\netdata\*.*" - - ClearErrors - nsExec::ExecToLog '$SYSDIR\sc.exe create Netdata binPath= "$INSTDIR\usr\bin\netdata.exe" start= delayed-auto' - pop $0 - ${If} $0 != 0 - DetailPrint "Warning: Failed to create Netdata service." - ${EndIf} - - ClearErrors - nsExec::ExecToLog '$SYSDIR\sc.exe description Netdata "Real-time system monitoring service"' - pop $0 - ${If} $0 != 0 - DetailPrint "Warning: Failed to add Netdata service description." - ${EndIf} - - ClearErrors - nsExec::ExecToLog '$SYSDIR\sc.exe start Netdata' - pop $0 - ${If} $0 != 0 - DetailPrint "Warning: Failed to start Netdata service." - ${EndIf} - - WriteUninstaller "$INSTDIR\Uninstall.exe" - - Call NetdataUninstallRegistry -SectionEnd - -Section "Uninstall" - ClearErrors - nsExec::ExecToLog '$SYSDIR\sc.exe stop Netdata' - pop $0 - ${If} $0 != 0 - DetailPrint "Warning: Failed to stop Netdata service." - ${EndIf} - - ClearErrors - nsExec::ExecToLog '$SYSDIR\sc.exe delete Netdata' - pop $0 - ${If} $0 != 0 - DetailPrint "Warning: Failed to delete Netdata service." - ${EndIf} - - RMDir /r "$INSTDIR" - - DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Netdata" -SectionEnd - diff --git a/packaging/installer/kickstart.sh b/packaging/installer/kickstart.sh index 72b82be26..6b9676788 100755 --- a/packaging/installer/kickstart.sh +++ b/packaging/installer/kickstart.sh @@ -53,11 +53,9 @@ INSTALL_PREFIX="" NETDATA_AUTO_UPDATES="default" NETDATA_CLAIM_URL="https://app.netdata.cloud" NETDATA_COMMAND="default" -NETDATA_DISABLE_CLOUD=0 NETDATA_INSTALLER_OPTIONS="" NETDATA_FORCE_METHOD="" NETDATA_OFFLINE_INSTALL_SOURCE="" -NETDATA_REQUIRE_CLOUD=1 NETDATA_WARNINGS="" RELEASE_CHANNEL="default" @@ -149,8 +147,6 @@ main() { if [ -n "${NETDATA_CLAIM_TOKEN}" ]; then claim - elif [ "${NETDATA_DISABLE_CLOUD}" -eq 1 ]; then - soft_disable_cloud fi set_auto_updates @@ -185,8 +181,6 @@ USAGE: kickstart.sh [options] --native-only Only install if native binary packages are available. --static-only Only install if a static build is available. --build-only Only install using a local build. - --disable-cloud Disable support for Netdata Cloud (default: detect) - --require-cloud Only install if Netdata Cloud can be enabled. Overrides --disable-cloud. --install-prefix Specify an installation prefix for local builds (default: autodetect based on system type). --old-install-prefix Specify an old local builds installation prefix for uninstall/reinstall (if it's not default). --install-version Specify the version of Netdata to install. @@ -1183,41 +1177,6 @@ handle_existing_install() { esac } -soft_disable_cloud() { - set_tmpdir - - cloud_prefix="${INSTALL_PREFIX}/var/lib/netdata/cloud.d" - - run_as_root mkdir -p "${cloud_prefix}" - - cat > "${tmpdir}/cloud.conf" << EOF -[global] - enabled = no -EOF - - run_as_root cp "${tmpdir}/cloud.conf" "${cloud_prefix}/cloud.conf" - - if [ -z "${NETDATA_NO_START}" ]; then - case "${SYSTYPE}" in - Darwin) run_as_root launchctl kickstart -k com.github.netdata ;; - FreeBSD) run_as_root service netdata restart ;; - Linux) - initpath="$(run_as_root readlink /proc/1/exe)" - - if command -v service > /dev/null 2>&1; then - run_as_root service netdata restart - elif command -v rc-service > /dev/null 2>&1; then - run_as_root rc-service netdata restart - elif [ "$(basename "${initpath}" 2> /dev/null)" = "systemd" ]; then - run_as_root systemctl restart netdata - elif [ -f /etc/init.d/netdata ]; then - run_as_root /etc/init.d/netdata restart - fi - ;; - esac - fi -} - confirm_install_prefix() { if [ -n "${INSTALL_PREFIX}" ] && [ "${NETDATA_FORCE_METHOD}" != 'build' ]; then fatal "The --install-prefix option is only supported together with the --build-only option." F0204 @@ -1246,10 +1205,9 @@ check_claim_opts() { # shellcheck disable=SC2235,SC2030 if [ -z "${NETDATA_CLAIM_TOKEN}" ] && [ -n "${NETDATA_CLAIM_ROOMS}" ]; then fatal "Invalid claiming options, claim rooms may only be specified when a token is specified." F0204 - elif [ -z "${NETDATA_CLAIM_TOKEN}" ] && [ -n "${NETDATA_CLAIM_EXTRA}" ]; then + elif [ -z "${NETDATA_CLAIM_TOKEN}" ] && [ -n "${NETDATA_CLAIM_EXTRA}${NETDATA_CLAIM_PROXY}${NETDATA_CLAIM_NORELOAD}${NETDATA_CLAIM_INSECURE}" ]; then + # The above condition checks if _any_ claiming options other than the rooms have been set when the token is unset. fatal "Invalid claiming options, a claiming token must be specified." F0204 - elif [ "${NETDATA_DISABLE_CLOUD}" -eq 1 ] && [ -n "${NETDATA_CLAIM_TOKEN}" ]; then - fatal "Cloud explicitly disabled, but automatic claiming requested. Either enable Netdata Cloud, or remove the --claim-* options." F0204 fi } @@ -1277,6 +1235,93 @@ is_netdata_running() { fi } +write_claim_config() { + if [ -z "${INSTALL_PREFIX}" ] || [ "${INSTALL_PREFIX}" = "/" ]; then + config_path="/etc/netdata" + netdatacli="$(command -v netdatacli)" + elif [ "${INSTALL_PREFIX}" = "/opt/netdata" ]; then + config_path="/opt/netdata/etc/netdata" + netdatacli="/opt/netdata/bin/netdatacli" + elif [ ! -d "${INSTALL_PREFIX}/netdata" ]; then + config_path="${INSTALL_PREFIX}/etc/netdata" + netdatacli="${INSTALL_PREFIX}/usr/sbin/netdatacli" + else + config_path="${INSTALL_PREFIX}/netdata/etc/netdata" + netdatacli="${INSTALL_PREFIX}/netdata/usr/sbin/netdatacli" + fi + + claim_config="${config_path}/claim.conf" + + if [ "${DRY_RUN}" -eq 1 ]; then + progress "Would attempt to write claiming configuration to ${claim_config}" + return 0 + fi + + progress "Writing claiming configuration to ${claim_config}" + + config="[global]" + config="${config}\n url = ${NETDATA_CLAIM_URL}" + config="${config}\n token = ${NETDATA_CLAIM_TOKEN}" + if [ -n "${NETDATA_CLAIM_ROOMS}" ]; then + config="${config}\n rooms = ${NETDATA_CLAIM_ROOMS}" + fi + if [ -n "${NETDATA_CLAIM_PROXY}" ]; then + config="${config}\n proxy = ${NETDATA_CLAIM_PROXY}" + fi + if [ -n "${NETDATA_CLAIM_INSECURE}" ]; then + config="${config}\n insecure = ${NETDATA_CLAIM_INSECURE}" + fi + + run_as_root touch "${claim_config}.tmp" || return 1 + run_as_root chmod 0640 "${claim_config}.tmp" || return 1 + run_as_root chown ":${NETDATA_CLAIM_GROUP:-netdata}" "${claim_config}.tmp" || return 1 + run_as_root sh -c "printf '${config}\\n' > \"${claim_config}.tmp\"" || return 1 + run_as_root mv -f "${claim_config}.tmp" "${claim_config}" || return 1 + + if [ -z "${NETDATA_CLAIM_NORELOAD}" ]; then + run_as_root "${netdatacli}" reload-claiming-state || return 1 + fi +} + +run_claim_script() { + if [ -n "${NETDATA_CLAIM_NORELOAD}" ]; then + NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -daemon-not-running" + fi + + if [ -n "${NETDATA_CLAIM_INSECURE}" ]; then + NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -insecure" + fi + + if [ -n "${NETDATA_CLAIM_PROXY}" ]; then + if [ "${NETDATA_CLAIM_PROXY}" = "none" ]; then + NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -noproxy" + else + NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -proxy=${NETDATA_CLAIM_PROXY}" + fi + fi + + # shellcheck disable=SC2086 + run_as_root "${NETDATA_CLAIM_PATH}" -token="${NETDATA_CLAIM_TOKEN}" -rooms="${NETDATA_CLAIM_ROOMS}" -url="${NETDATA_CLAIM_URL}" ${NETDATA_CLAIM_EXTRA} + case $? in + 0) progress "Successfully claimed node" ;; + 1) warning "Unable to claim node due to invalid claiming options. If you are seeing this message, you’ve probably found a bug and should open a bug report at ${AGENT_BUG_REPORT_URL}" ;; + 2) warning "Unable to claim node due to issues creating the claiming directory or preparing the local claiming key. Make sure you have a working openssl command and that ${INSTALL_PREFIX}/var/lib/netdata/cloud.d exists, then try again." ;; + 3) warning "Unable to claim node due to missing dependencies. Usually this means that the Netdata Agent was built without support for Netdata Cloud. If you built the agent from source, please install all needed dependencies for Cloud support. If you used the regular installation script and see this error, please file a bug report at ${AGENT_BUG_REPORT_URL}." ;; + 4) warning "Failed to claim node due to inability to connect to ${NETDATA_CLAIM_URL}. Usually this either means that the specified claiming URL is wrong, or that you are having networking problems." ;; + 5) progress "Successfully claimed node, but was not able to notify the Netdata Agent. You will need to restart the Netdata service on this node before it will show up in the Cloud." ;; + 8) warning "Failed to claim node due to an invalid agent ID. You can usually resolve this by removing ${INSTALL_PREFIX}/var/lib/netdata/registry/netdata.public.unique.id and restarting the agent. Then try to claim it again using the same options." ;; + 9) warning "Failed to claim node due to an invalid node name. This probably means you tried to specify a custom name for this node (for example, using the --claim-hostname option), but the hostname itself was either empty or consisted solely of whitespace. You can resolve this by specifying a valid host name and trying again." ;; + 10) warning "Failed to claim node due to an invalid room ID. This issue is most likely caused by a typo. Please check if the room(s) you are trying to add appear on the list of rooms provided to the --claim-rooms option ('${NETDATA_CLAIM_ROOMS}'). Then verify if the rooms are visible in Netdata Cloud and try again." ;; + 11) warning "Failed to claim node due to an issue with the generated RSA key pair. You can usually resolve this by removing all files in ${INSTALL_PREFIX}/var/lib/netdata/cloud.d and then trying again." ;; + 12) warning "Failed to claim node due to an invalid or expired claiming token. Please check that the token specified with the --claim-token option ('${NETDATA_CLAIM_TOKEN}') matches what you see in the Cloud and try again." ;; + 13) warning "Failed to claim node because the Cloud thinks it is already claimed. If this node was created by cloning a VM or as a container from a template, please remove the file ${INSTALL_PREFIX}/var/lib/netdata/registry/netdata.public.unique.id and restart the agent. Then try to claim it again with the same options. Otherwise, if you are certain this node has never been claimed before, you can use the --claim-id option to specify a new node ID to use for claiming, for example by using the uuidgen command like so: --claim-id \"\$(uuidgen)\"" ;; + 14) warning "Failed to claim node because the node is already in the process of being claimed. You should not need to do anything to resolve this, the node should show up properly in the Cloud soon. If it does not, please report a bug at ${AGENT_BUG_REPORT_URL}." ;; + 15|16|17) warning "Failed to claim node due to an internal server error in the Cloud. Please retry claiming this node later, and if you still see this message file a bug report at ${CLOUD_BUG_REPORT_URL}." ;; + 18) warning "Unable to claim node because this Netdata installation does not have a unique ID yet. Make sure the agent is running and started up correctly, and then try again." ;; + *) warning "Failed to claim node for an unknown reason. This usually means either networking problems or a bug. Please retry claiming later, and if you still see this message file a bug report at ${AGENT_BUG_REPORT_URL}" ;; + esac +} + claim() { if [ "${DRY_RUN}" -eq 1 ]; then progress "Would attempt to claim agent to ${NETDATA_CLAIM_URL}" @@ -1300,17 +1345,18 @@ claim() { NETDATA_CLAIM_PATH="${INSTALL_PREFIX}/netdata/usr/sbin/netdata-claim.sh" fi + method="script" err_msg= err_code= if [ -z "${NETDATA_CLAIM_PATH}" ]; then - err_msg="Unable to claim node: could not find usable claiming script. Reinstalling Netdata may resolve this." - err_code=F050B + method="config" elif [ ! -e "${NETDATA_CLAIM_PATH}" ]; then - err_msg="Unable to claim node: ${NETDATA_CLAIM_PATH} does not exist." - err_code=F0512 + method="config" elif [ ! -f "${NETDATA_CLAIM_PATH}" ]; then err_msg="Unable to claim node: ${NETDATA_CLAIM_PATH} is not a file." err_code=F0513 + elif grep -q '%%NEW_CLAIMING_METHOD%%' "${NETDATA_CLAIM_PATH}"; then + method="config" elif [ ! -x "${NETDATA_CLAIM_PATH}" ]; then err_msg="Unable to claim node: claiming script at ${NETDATA_CLAIM_PATH} is not executable. Reinstalling Netdata may resolve this." err_code=F0514 @@ -1326,34 +1372,16 @@ claim() { fi if ! is_netdata_running; then - NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -daemon-not-running" + NETDATA_CLAIM_NORELOAD=1 fi - # shellcheck disable=SC2086 - run_as_root "${NETDATA_CLAIM_PATH}" -token="${NETDATA_CLAIM_TOKEN}" -rooms="${NETDATA_CLAIM_ROOMS}" -url="${NETDATA_CLAIM_URL}" ${NETDATA_CLAIM_EXTRA} - case $? in - 0) - progress "Successfully claimed node" - return 0 - ;; - 1) warning "Unable to claim node due to invalid claiming options. If you are seeing this message, you’ve probably found a bug and should open a bug report at ${AGENT_BUG_REPORT_URL}" ;; - 2) warning "Unable to claim node due to issues creating the claiming directory or preparing the local claiming key. Make sure you have a working openssl command and that ${INSTALL_PREFIX}/var/lib/netdata/cloud.d exists, then try again." ;; - 3) warning "Unable to claim node due to missing dependencies. Usually this means that the Netdata Agent was built without support for Netdata Cloud. If you built the agent from source, please install all needed dependencies for Cloud support. If you used the regular installation script and see this error, please file a bug report at ${AGENT_BUG_REPORT_URL}." ;; - 4) warning "Failed to claim node due to inability to connect to ${NETDATA_CLAIM_URL}. Usually this either means that the specified claiming URL is wrong, or that you are having networking problems." ;; - 5) - progress "Successfully claimed node, but was not able to notify the Netdata Agent. You will need to restart the Netdata service on this node before it will show up in the Cloud." - return 0 + case ${method} in + script) run_claim_script ;; + config) + if ! write_claim_config; then + warning "Failed to write claiming configuration. This usually means you do not have permissions to access the configuration directory." + fi ;; - 8) warning "Failed to claim node due to an invalid agent ID. You can usually resolve this by removing ${INSTALL_PREFIX}/var/lib/netdata/registry/netdata.public.unique.id and restarting the agent. Then try to claim it again using the same options." ;; - 9) warning "Failed to claim node due to an invalid node name. This probably means you tried to specify a custom name for this node (for example, using the --claim-hostname option), but the hostname itself was either empty or consisted solely of whitespace. You can resolve this by specifying a valid host name and trying again." ;; - 10) warning "Failed to claim node due to an invalid room ID. This issue is most likely caused by a typo. Please check if the room(s) you are trying to add appear on the list of rooms provided to the --claim-rooms option ('${NETDATA_CLAIM_ROOMS}'). Then verify if the rooms are visible in Netdata Cloud and try again." ;; - 11) warning "Failed to claim node due to an issue with the generated RSA key pair. You can usually resolve this by removing all files in ${INSTALL_PREFIX}/var/lib/netdata/cloud.d and then trying again." ;; - 12) warning "Failed to claim node due to an invalid or expired claiming token. Please check that the token specified with the --claim-token option ('${NETDATA_CLAIM_TOKEN}') matches what you see in the Cloud and try again." ;; - 13) warning "Failed to claim node because the Cloud thinks it is already claimed. If this node was created by cloning a VM or as a container from a template, please remove the file ${INSTALL_PREFIX}/var/lib/netdata/registry/netdata.public.unique.id and restart the agent. Then try to claim it again with the same options. Otherwise, if you are certain this node has never been claimed before, you can use the --claim-id option to specify a new node ID to use for claiming, for example by using the uuidgen command like so: --claim-id \"\$(uuidgen)\"" ;; - 14) warning "Failed to claim node because the node is already in the process of being claimed. You should not need to do anything to resolve this, the node should show up properly in the Cloud soon. If it does not, please report a bug at ${AGENT_BUG_REPORT_URL}." ;; - 15|16|17) warning "Failed to claim node due to an internal server error in the Cloud. Please retry claiming this node later, and if you still see this message file a bug report at ${CLOUD_BUG_REPORT_URL}." ;; - 18) warning "Unable to claim node because this Netdata installation does not have a unique ID yet. Make sure the agent is running and started up correctly, and then try again." ;; - *) warning "Failed to claim node for an unknown reason. This usually means either networking problems or a bug. Please retry claiming later, and if you still see this message file a bug report at ${AGENT_BUG_REPORT_URL}" ;; esac if [ "${ACTION}" = "claim" ]; then @@ -1938,12 +1966,6 @@ build_and_install() { opts="${opts} --stable-channel" fi - if [ "${NETDATA_REQUIRE_CLOUD}" -eq 1 ]; then - opts="${opts} --require-cloud" - elif [ "${NETDATA_DISABLE_CLOUD}" -eq 1 ]; then - opts="${opts} --disable-cloud" - fi - # shellcheck disable=SC2086 run_script ./netdata-installer.sh ${opts} @@ -2392,12 +2414,10 @@ parse_args() { esac ;; "--disable-cloud") - NETDATA_DISABLE_CLOUD=1 - NETDATA_REQUIRE_CLOUD=0 + warning "Cloud cannot be disabled" ;; "--require-cloud") - NETDATA_DISABLE_CLOUD=0 - NETDATA_REQUIRE_CLOUD=1 + warning "Cloud is always required" ;; "--dont-start-it") NETDATA_NO_START=1 @@ -2447,26 +2467,21 @@ parse_args() { "--native-only") NETDATA_FORCE_METHOD="native" ;; "--static-only") NETDATA_FORCE_METHOD="static" ;; "--build-only") NETDATA_FORCE_METHOD="build" ;; - "--claim-token") - NETDATA_CLAIM_TOKEN="${2}" - shift 1 - ;; - "--claim-rooms") - NETDATA_CLAIM_ROOMS="${2}" - shift 1 - ;; - "--claim-url") - NETDATA_CLAIM_URL="${2}" - shift 1 - ;; "--claim-"*) optname="$(echo "${1}" | cut -d '-' -f 4-)" case "${optname}" in - id|proxy|user|hostname) + token) NETDATA_CLAIM_TOKEN="${2}"; shift 1 ;; + rooms) NETDATA_CLAIM_ROOMS="${2}"; shift 1 ;; + url) NETDATA_CLAIM_URL="${2}"; shift 1 ;; + proxy) NETDATA_CLAIM_PROXY="${2}"; shift 1 ;; + noproxy) NETDATA_CLAIM_PROXY="none" ;; + insecure) NETDATA_CLAIM_INSECURE=yes ;; + noreload) NETDATA_CLAIM_NORELOAD=1 ;; + id|user|hostname) NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -${optname}=${2}" shift 1 ;; - verbose|insecure|noproxy|noreload|daemon-not-running) NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -${optname}" ;; + verbose|daemon-not-running) NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -${optname}" ;; *) warning "Ignoring unrecognized claiming option ${optname}" ;; esac ;; diff --git a/packaging/installer/methods/ansible.md b/packaging/installer/methods/ansible.md index 0aadeff91..82e4095f7 100644 --- a/packaging/installer/methods/ansible.md +++ b/packaging/installer/methods/ansible.md @@ -1,16 +1,6 @@ - - # Deploy Netdata with Ansible -Netdata's [one-line kickstart](/packaging/installer/README.md#install-on-linux-with-one-line-installer) is zero-configuration, highly adaptable, and compatible with tons +Netdata's [one-line kickstart](/packaging/installer/README.md) is zero-configuration, highly adaptable, and compatible with tons of different operating systems and Linux distributions. You can use it on bare metal, VMs, containers, and everything in-between. @@ -22,7 +12,7 @@ code? Enter [Ansible](https://ansible.com), a popular system provisioning, configuration management, and infrastructure as code (IaC) tool. Ansible uses **playbooks** to glue many standardized operations together with a simple syntax, then run those operations over standard and secure SSH connections. There's no agent to install on the remote system, so all you -have to worry about is your application and your monitoring software. +have to worry about is your application and your monitoring software. Ansible has some competition from the likes of [Puppet](https://puppet.com/) or [Chef](https://www.chef.io/), but the most valuable feature about Ansible is **idempotent**. From the [Ansible @@ -42,9 +32,9 @@ minutes. ## Prerequisites -- A Netdata Cloud account. [Sign in and create one](https://app.netdata.cloud) if you don't have one already. -- An administration system with [Ansible](https://www.ansible.com/) installed. -- One or more nodes that your administration system can access via [SSH public +- A Netdata Cloud account. [Sign in and create one](https://app.netdata.cloud) if you don't have one already. +- An administration system with [Ansible](https://www.ansible.com/) installed. +- One or more nodes that your administration system can access via [SSH public keys](https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key) (preferably password-less). ## Download and configure the playbook @@ -71,9 +61,9 @@ cd ansible-quickstart The `hosts` file contains a list of IP addresses or hostnames that Ansible will try to run the playbook against. The `hosts` file that comes with the repository contains two example IP addresses, which you should replace according to the -IP address/hostname of your nodes. +IP address/hostname of your nodes. -```conf +```text 203.0.113.0 hostname=node-01 203.0.113.1 hostname=node-02 ``` @@ -86,7 +76,7 @@ omit the `hostname=` string entirely to use the system's default hostname. If you SSH into your nodes as a user other than `root`, you need to configure `hosts` according to those user names. Use the `ansible_user` variable to set the login user. For example: -```conf +```text 203.0.113.0 hostname=ansible-01 ansible_user=example ``` @@ -96,7 +86,7 @@ If you use an SSH key other than `~/.ssh/id_rsa` for logging into your nodes, yo the `hosts` file with the `ansible_ssh_private_key_file` variable. For example, to log into a Lightsail instance using two different SSH keys supplied by AWS. -```conf +```text 203.0.113.0 hostname=ansible-01 ansible_ssh_private_key_file=~/.ssh/LightsailDefaultKey-us-west-2.pem 203.0.113.1 hostname=ansible-02 ansible_ssh_private_key_file=~/.ssh/LightsailDefaultKey-us-east-1.pem ``` @@ -110,7 +100,7 @@ and `claim_room` variables. To find your `claim_token` and `claim_room`, go to Netdata Cloud, then click on your Space's name in the top navigation, then click on **Manage your Space**. Click on the **Nodes** tab in the panel that appears, which displays a script with -`token` and `room` strings. +`token` and `room` strings. ![Animated GIF of finding the claiming script and the token and room strings](https://user-images.githubusercontent.com/1153921/98740235-f4c3ac00-2367-11eb-8ffd-e9ab0f04c463.gif) @@ -123,7 +113,7 @@ claim_rooms: XXXXX ``` Change the `dbengine_multihost_disk_space` if you want to change the metrics retention policy by allocating more or less -disk space for storing metrics. The default is 2048 Mib, or 2 GiB. +disk space for storing metrics. The default is 2048 Mib, or 2 GiB. Because we're connecting this node to Netdata Cloud, and will view its dashboards there instead of via the IP address or hostname of the node, the playbook disables that local dashboard by setting `web_mode` to `none`. This gives a small diff --git a/packaging/installer/methods/aws.md b/packaging/installer/methods/aws.md index 8648a8f0b..b6818709a 100644 --- a/packaging/installer/methods/aws.md +++ b/packaging/installer/methods/aws.md @@ -1,12 +1,3 @@ - - # Install Netdata on AWS Netdata is fully compatible with Amazon Web Services (AWS). @@ -41,11 +32,11 @@ command from a remote system, and it fails, it's likely that a firewall is block Another option is to put Netdata behind web server, which will proxy requests through standard HTTP/HTTPS ports (80/443), which are likely already open on your instance. We have a number of guides available: -- [Apache](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md) -- [Nginx](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md) -- [Caddy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md) -- [HAProxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md) -- [lighttpd](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md) +- [Apache](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md) +- [Nginx](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md) +- [Caddy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md) +- [HAProxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md) +- [lighttpd](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md) Sign in to the [AWS console](https://console.aws.amazon.com/) and navigate to the EC2 dashboard. Click on the **Security Groups** link in the navigation, beneath the **Network & Security** heading. Find the Security Group your instance @@ -54,7 +45,7 @@ inbound rules**. Add a new rule with the following options: -```conf +```text Type: Custom TCP Protocol: TCP Port Range: 19999 diff --git a/packaging/installer/methods/azure.md b/packaging/installer/methods/azure.md index 94590eecb..baccacd95 100644 --- a/packaging/installer/methods/azure.md +++ b/packaging/installer/methods/azure.md @@ -1,15 +1,6 @@ - - # Install Netdata on Azure -Netdata is fully compatible with Azure. +Netdata is fully compatible with Azure. You can install Netdata on cloud instances to monitor the apps/services running there, or use multiple instances in a [parent-child streaming](/src/streaming/README.md) configuration. @@ -41,18 +32,18 @@ command from a remote system, and it fails, it's likely that a firewall is block Another option is to put Netdata behind web server, which will proxy requests through standard HTTP/HTTPS ports (80/443), which are likely already open on your instance. We have a number of guides available: -- [Apache](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md) -- [Nginx](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md) -- [Caddy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md) -- [HAProxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md) -- [lighttpd](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md) +- [Apache](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md) +- [Nginx](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md) +- [Caddy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md) +- [HAProxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md) +- [lighttpd](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md) Sign in to the [Azure portal](https://portal.azure.com) and open the virtual machine running Netdata. Click on the **Networking** link beneath the **Settings** header, then click on the **Add inbound security rule** button. Add a new rule with the following options: -```conf +```text Source: Any Source port ranges: 19999 Destination: Any @@ -64,5 +55,3 @@ Name: Netdata ``` Click **Add** to apply your new inbound security rule. - - diff --git a/packaging/installer/methods/freebsd.md b/packaging/installer/methods/freebsd.md index 3a33d2e90..05137598b 100644 --- a/packaging/installer/methods/freebsd.md +++ b/packaging/installer/methods/freebsd.md @@ -1,12 +1,3 @@ - - # Install Netdata on FreeBSD > 💡 This document is maintained by Netdata's community, and may not be completely up-to-date. Please double-check the @@ -24,7 +15,7 @@ This step needs root privileges. pkg install bash e2fsprogs-libuuid git curl autoconf automake pkgconf pidof liblz4 libuv json-c cmake gmake ``` -Please respond in the affirmative for any relevant prompts during the installation process. +Please respond in the affirmative for any relevant prompts during the installation process. ## Install Netdata @@ -35,9 +26,10 @@ If you have a Netdata cloud account then clicking on the **Connect Nodes** butto ```sh wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --claim-token --claim-url https://app.netdata.cloud ``` -Please respond in the affirmative for any relevant prompts during the installation process. -Once the installation is completed, you should be able to start monitoring the FreeBSD server using Netdata. +Please respond in the affirmative for any relevant prompts during the installation process. + +Once the installation is completed, you should be able to start monitoring the FreeBSD server using Netdata. Netdata can also be installed via [FreeBSD ports](https://www.freshports.org/net-mgmt/netdata). @@ -67,7 +59,7 @@ gunzip netdata*.tar.gz && tar xf netdata*.tar && rm -rf netdata*.tar Install Netdata in `/opt/netdata`. If you want to enable automatic updates, add `--auto-update` or `-u` to install `netdata-updater` in `cron` (**need root permission**): ```sh -cd netdata-v* && ./netdata-installer.sh --install-prefix /opt && cp /opt/netdata/usr/sbin/netdata-claim.sh /usr/sbin/ +cd netdata-v* && ./netdata-installer.sh --install-prefix /opt ``` You also need to enable the `netdata` service in `/etc/rc.conf`: @@ -89,7 +81,8 @@ more about the information collected and how to opt-out, check the [anonymous st page](/docs/netdata-agent/configuration/anonymous-telemetry-events.md). ## Updating the Agent on FreeBSD -If you have not passed the `--auto-update` or `-u` parameter for the installer to enable automatic updating, repeat the last step to update Netdata whenever a new version becomes available. + +If you have not passed the `--auto-update` or `-u` parameter for the installer to enable automatic updating, repeat the last step to update Netdata whenever a new version becomes available. The `netdata-updater.sh` script will update your Agent. ## Optional parameters to alter your installation @@ -102,9 +95,9 @@ The `kickstart.sh` script accepts a number of optional parameters to control how - `--dry-run`: Show what the installer would do, but don’t actually do any of it. - `--dont-start-it`: Don’t auto-start the daemon after installing. This parameter is not guaranteed to work. - `--release-channel`: Specify a particular release channel to install from. Currently supported release channels are: - - `nightly`: Installs a nightly build (this is currently the default). - - `stable`: Installs a stable release. - - `default`: Explicitly request whatever the current default is. + - `nightly`: Installs a nightly build (this is currently the default). + - `stable`: Installs a stable release. + - `default`: Explicitly request whatever the current default is. - `--nightly-channel`: Synonym for `--release-channel nightly`. - `--stable-channel`: Synonym for `--release-channel stable`. - `--auto-update`: Enable automatic updates (this is the default). @@ -113,24 +106,21 @@ The `kickstart.sh` script accepts a number of optional parameters to control how - `--native-only`: Only install if native binary packages are available. - `--static-only`: Only install if a static build is available. - `--build-only`: Only install using a local build. -- `--disable-cloud`: For local builds, don’t build any of the cloud code at all. For native packages and static builds, - use runtime configuration to disable cloud support. -- `--require-cloud`: Only install if Netdata Cloud can be enabled. Overrides `--disable-cloud`. - `--install-prefix`: Specify an installation prefix for local builds (by default, we use a sane prefix based on the type of system). - `--install-version`: Specify the version of Netdata to install. - `--old-install-prefix`: Specify the custom local build's installation prefix that should be removed. - `--local-build-options`: Specify additional options to pass to the installer code when building locally. Only valid if `--build-only` is also specified. - `--static-install-options`: Specify additional options to pass to the static installer code. Only valid if --static-only is also specified. -The following options are mutually exclusive and specifiy special operations other than trying to install Netdata normally or update an existing install: +The following options are mutually exclusive and specify special operations other than trying to install Netdata normally or update an existing install: - `--reinstall`: If there is an existing install, reinstall it instead of trying to update it. If there is not an existing install, install netdata normally. -- `--reinstall-even-if-unsafe`: If there is an existing install, reinstall it instead of trying to update it, even if doing so is known to potentially break things (for example, if we cannot detect what tyep of installation it is). If there is not an existing install, install Netdata normally. +- `--reinstall-even-if-unsafe`: If there is an existing install, reinstall it instead of trying to update it, even if doing so is known to potentially break things (for example, if we cannot detect what type of installation it is). If there is not an existing install, install Netdata normally. - `--reinstall-clean`: If there is an existing install, uninstall it before trying to install Netdata. Fails if there is no existing install. - `--uninstall`: Uninstall an existing installation of Netdata. Fails if there is no existing install. - `--claim-only`: If there is an existing install, only try to claim it without attempting to update it. If there is no existing install, install and claim Netdata normally. - `--repositories-only`: Only install repository configuration packages instead of doing a full install of Netdata. Automatically sets --native-only. -- `--prepare-offline-install-source`: Instead of insallling the agent, prepare a directory that can be used to install on another system without needing to download anything. See our [offline installation documentation](/packaging/installer/methods/offline.md) for more info. +- `--prepare-offline-install-source`: Instead of installing the agent, prepare a directory that can be used to install on another system without needing to download anything. See our [offline installation documentation](/packaging/installer/methods/offline.md) for more info. Additionally, the following environment variables may be used to further customize how the script runs (most users should not need to use special values for any of these): diff --git a/packaging/installer/methods/gcp.md b/packaging/installer/methods/gcp.md index 5003decb4..59e293690 100644 --- a/packaging/installer/methods/gcp.md +++ b/packaging/installer/methods/gcp.md @@ -1,13 +1,3 @@ - - # Install Netdata on GCP Netdata is fully compatible with the Google Cloud Platform (GCP). @@ -42,12 +32,11 @@ command from a remote system, and it fails, it's likely that a firewall is block Another option is to put Netdata behind web server, which will proxy requests through standard HTTP/HTTPS ports (80/443), which are likely already open on your instance. We have a number of guides available: -- [Apache](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md) -- [Nginx](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md) -- [Caddy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md) -- [HAProxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md) -- [lighttpd](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md) - +- [Apache](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-apache.md) +- [Nginx](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md) +- [Caddy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-caddy.md) +- [HAProxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-haproxy.md) +- [lighttpd](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-lighttpd.md) To add a firewall rule, go to the [Firewall rules page](https://console.cloud.google.com/networking/firewalls/list) and click **Create firewall rule**. @@ -55,7 +44,7 @@ click **Create firewall rule**. The following configuration has previously worked for Netdata running on GCP instances ([see #7786](https://github.com/netdata/netdata/issues/7786)): -```conf +```text Name: Type: Ingress Targets: @@ -67,4 +56,3 @@ Priority: 1000 Read GCP's [firewall documentation](https://cloud.google.com/vpc/docs/using-firewalls) for specific instructions on how to create a new firewall rule. - diff --git a/packaging/installer/methods/kickstart.md b/packaging/installer/methods/kickstart.md index a525cc70d..ed5a4ae41 100644 --- a/packaging/installer/methods/kickstart.md +++ b/packaging/installer/methods/kickstart.md @@ -5,17 +5,29 @@ import TabItem from '@theme/TabItem'; # Install Netdata with kickstart.sh -![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_by_url_pattern&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=kickstart%20downloads&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_by_url_pattern&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=kickstart%20downloads&precision=0) +![last hour badge](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_by_url_pattern&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=kickstart%20downloads&precision=0) ![today badge](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_by_url_pattern&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=kickstart%20downloads&precision=0) -`kickstart.sh` is the recommended way of installing Netdata. +**`kickstart.sh` is the recommended way of installing Netdata.** This script works on all Linux distributions and macOS environments, by detecting the optimal method of installing Netdata directly to the operating system. +## What does `kickstart.sh` do? + +The `kickstart.sh` script does the following after being downloaded and run using `sh`: + +- Determines what platform you’re running on. +- Checks for an existing installation, and if found updates that instead of creating a new installation. +- Attempts to install Netdata using our [official native binary packages](/packaging/installer/methods/packages.md). +- If there are no official native binary packages for your system (or installing that way failed), tries to install using a [static build of Netdata](/packaging/makeself/README.md) if one is available. +- If no static build is available, installs required dependencies and then attempts to install by building Netdata locally (by downloading the sources and building them directly). +- Installs `netdata-updater.sh` to `cron.daily`, so your Netdata installation will be updated with new nightly versions, unless you override that with an [optional parameter](#optional-parameters-to-alter-your-installation). +- Prints a message whether installation succeeded or failed for QA purposes. + ## Installation -> :bulb: Tip +> **Tip** > -> If you are unsure whether you want nightly or stable releases, read the [related section](/packaging/installer/README.md#nightly-vs-stable-releases) of our Documentation, detailing the pros and cons of each release type. +> If you are unsure whether you want nightly or stable releases, read the [related section](/docs/netdata-agent/versions-and-platforms.md) of our Documentation, detailing the pros and cons of each release type. To install Netdata, run the following as your normal user: @@ -32,146 +44,10 @@ To install Netdata, run the following as your normal user:
-> :bookmark_tabs: Note +> **Note** > > If you plan to also connect the node to Netdata Cloud, make sure to replace `YOUR_CLAIM_TOKEN` with the claim token of your space, -> and `YOUR_ROOM_ID` with the ID of the Room you are willing to connect the node to. - -## Verify script integrity - -To use `md5sum` to verify the integrity of the `kickstart.sh` script you will download using the one-line command above, -run the following: - -```bash -[ "@KICKSTART_CHECKSUM@" = "$(curl -Ss https://get.netdata.cloud/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" -``` - -If the script is valid, this command will return `OK, VALID`. - -## What does `kickstart.sh` do? - -The `kickstart.sh` script does the following after being downloaded and run using `sh`: - -- Determines what platform you are running on. -- Checks for an existing installation, and if found updates that instead of creating a new install. -- Attempts to install Netdata using our [official native binary packages](#native-packages). -- If there are no official native binary packages for your system (or installing that way failed), tries to install - using a [static build of Netdata](#static-builds) if one is available. -- If no static build is available, installs required dependencies and then attempts to install by - [building Netdata locally](#local-builds) (by downloading the sources and building them directly). -- Installs `netdata-updater.sh` to `cron.daily`, so your Netdata installation will be updated with new nightly - versions, unless you override that with an [optional parameter](#optional-parameters-to-alter-your-installation). -- Prints a message whether installation succeeded or failed for QA purposes. - -## Start stop or restart the Netdata Agent - -You will most often need to _restart_ the Agent to load new or edited configuration files. - -> **Note** -> Stopping or restarting the Netdata Agent will cause gaps in stored metrics until the `netdata` process initiates collectors and the database engine. -> -> You do not need to restart the Netdata Agent between changes to health configuration files, see the relevant section on [reloading health configuration](/src/health/REFERENCE.md#reload-health-configuration). - -### Using `systemctl` or `service` - -This is the recommended way to start, stop, or restart the Netdata daemon. - -- To **start** Netdata, run `sudo systemctl start netdata`. -- To **stop** Netdata, run `sudo systemctl stop netdata`. -- To **restart** Netdata, run `sudo systemctl restart netdata`. - -If the above commands fail, or you know that you're using a non-systemd system, try using the `service` command: - -- Starting: `sudo service netdata start`. -- Stopping: `sudo service netdata stop`. -- Restarting: `sudo service netdata restart`. - -### Using the `netdata` command - -Use the `netdata` command, typically located at `/usr/sbin/netdata`, to start the Netdata daemon: - -```bash -sudo netdata -``` - -If you start the daemon this way, close it with `sudo killall netdata`. - -### Shutdown using `netdatacli` - -The Netdata Agent also comes with a [CLI tool](/src/cli/README.md) capable of performing shutdowns. Start the Agent back up using your preferred method listed above. - -```bash -sudo netdatacli shutdown-agent -``` - -## Starting Netdata at boot - -In the `system` directory you can find scripts and configurations for the -various distros. - -### systemd - -The installer already installs `netdata.service` if it detects a systemd system. - -To install `netdata.service` by hand, run: - -```sh -# stop Netdata -killall netdata - -# copy netdata.service to systemd -cp system/netdata.service /etc/systemd/system/ - -# let systemd know there is a new service -systemctl daemon-reload - -# enable Netdata at boot -systemctl enable netdata - -# start Netdata -systemctl start netdata -``` - -### init.d - -In the system directory you can find `netdata-lsb`. Copy it to the proper place according to your distribution's documentation. For Ubuntu, this can be done via running the following commands as root. - -```sh -# copy the Netdata startup file to /etc/init.d -cp system/netdata-lsb /etc/init.d/netdata - -# make sure it is executable -chmod +x /etc/init.d/netdata - -# enable it -update-rc.d netdata defaults -``` - -### openrc / Gentoo Linux - -In the `system` directory you can find `netdata-openrc`. Copy it to the proper -place according to your distribution documentation. - -### CentOS / Red Hat Enterprise Linux - -For older versions of RHEL/CentOS that don't have systemd, an init script is included in the system directory. This can be installed by running the following commands as root. - -```sh -# copy the Netdata startup file to /etc/init.d -cp system/netdata-init-d /etc/init.d/netdata - -# make sure it is executable -chmod +x /etc/init.d/netdata - -# enable it -chkconfig --add netdata -``` - -_There have been some recent work on the init script, see the following PR _ - -### Other operating systems - -You can start Netdata by running it from `/etc/rc.local` or your system's equivalent. +> and `YOUR_ROOM_ID` with the ID of the Room you’re willing to connect the node to. ## Optional parameters to alter your installation @@ -180,9 +56,9 @@ The `kickstart.sh` script accepts a number of optional parameters to control how ### destination directory - `--install-prefix` - Specify an installation prefix for local builds (by default, we use a sane prefix based on the type of system). + Specify a custom installation directory for local builds. If not provided, a default directory will be used based on your system. - `--old-install-prefix` - Specify the custom local build's installation prefix that should be removed. + Specify the previous custom installation directory to be removed during the update process. ### interactivity @@ -211,7 +87,7 @@ By default, the script installs the nightly channel of Netdata, providing you wi ### install type -By default the script will prefer native builds when they are available, and then static builds. It will fallback to build from source when all others are not available. +By default, the script will prefer native builds when they’re available, and then static builds. It will fallback to build from source when all others aren’t available. - `--native-only` Only install if native binary packages are available. It fails otherwise. @@ -224,7 +100,7 @@ By default the script will prefer native builds when they are available, and the ### automatic updates -By default the script installs a cron job to automatically update Netdata to the latest version of the release channel used. +By default, the script installs a cron job to automatically update Netdata to the latest version of the release channel used. - `--auto-update` Enable automatic updates (this is the default). @@ -233,22 +109,18 @@ By default the script installs a cron job to automatically update Netdata to the ### Netdata Cloud related options -By default, the kickstart script will provide a Netdata agent installation that can potentially communicate with Netdata Cloud, if of course the Netdata agent is further configured to do so. +By default, the kickstart script will provide a Netdata agent installation that can potentially communicate with Netdata Cloud if the Netdata agent is further configured to do so. - `--claim-token` - Specify a unique claiming token associated with your Space in Netdata Cloud to be used to connect to the node after the install. This will enable, connect and claim the Netdata agent, to Netdata Cloud. + Specify a unique claiming token associated with your Space in Netdata Cloud to be used to connect to the node after the installation. This will connect and claim the Netdata Agent to Netdata Cloud. - `--claim-url` Specify a URL to use when connecting to the cloud. Defaults to `https://app.netdata.cloud`. Use this option to change the Netdata Cloud URL to point to your Netdata Cloud installation. - `--claim-rooms` Specify a comma-separated list of tokens for each Room this node should appear in. - `--claim-proxy` - Specify a proxy to use when connecting to the cloud in the form of `http://[user:pass@]host:ip` for an HTTP(S) proxy. See [connecting through a proxy](/src/claim/README.md#connect-through-a-proxy) for details. + Specify a proxy to use when connecting to the cloud in the form of `http://[user:pass@]host:ip` for an HTTP(S) proxy. See [connecting through a proxy](/src/claim/README.md#automatically-via-a-provisioning-system-or-the-command-line) for details. - `--claim-only` - If there is an existing install, only try to claim it without attempting to update it. If there is no existing install, install and claim Netdata normally. -- `--require-cloud` - Only install if Netdata Cloud can be enabled. -- `--disable-cloud` - For local builds, don’t build any of the Netdata Cloud code at all. For native packages and static builds, use runtime configuration to disable Netdata Cloud support. + If there is an existing installation, only try to claim it without attempting to update it. If there is no existing installation, install and claim Netdata normally. ### anonymous telemetry @@ -260,11 +132,11 @@ By default, the agent is sending anonymous telemetry data to help us take identi ### reinstalling - `--reinstall` - If there is an existing install, reinstall it instead of trying to update it. If there is not an existing install, install netdata normally. + If there is an existing installation, reinstall it instead of trying to update it. If there is not an existing installation, install netdata normally. - `--reinstall-even-if-unsafe` - If there is an existing install, reinstall it instead of trying to update it, even if doing so is known to potentially break things (for example, if we cannot detect what type of installation it is). If there is not an existing install, install Netdata normally. + If there is an existing installation, reinstall it instead of trying to update it, even if doing so is known to potentially break things (for example, if we can’t detect what type of installation it is). If there is not an existing install, install Netdata normally. - `--reinstall-clean` - If there is an existing install, uninstall it before trying to install Netdata. Fails if there is no existing install. + If there is an existing installation, uninstall it before trying to install Netdata. Fails if there is no existing installation. ### uninstall @@ -274,7 +146,7 @@ By default, the agent is sending anonymous telemetry data to help us take identi ### other options - `--dry-run` - Show what the installer would do, but don’t actually do any of it. + Simulates the installation process without making any changes to your system. This allows you to review the steps and potential impacts before proceeding with the actual installation. - `--dont-start-it` Don’t auto-start the daemon after installing. This parameter is not guaranteed to work. - `--distro-override` @@ -285,48 +157,28 @@ The following options are mutually exclusive and specify special operations othe - `--repositories-only` Only install repository configuration packages instead of doing a full install of Netdata. Automatically sets --native-only. - `--prepare-offline-install-source` - Instead of insallling the agent, prepare a directory that can be used to install on another system without needing to download anything. See our [offline installation documentation](/packaging/installer/methods/offline.md) for more info. + Instead of installing the agent, prepare a directory that can be used to install on another system without needing to download anything. See our [offline installation documentation](/packaging/installer/methods/offline.md) for more info. ### environment variables Additionally, the following environment variables may be used to further customize how the script runs (most users -should not need to use special values for any of these): +shouldn’t need to use special values for any of these): - `TMPDIR`: Used to specify where to put temporary files. On most systems, the default we select automatically should be fine. The user running the script needs to both be able to write files to the temporary directory, and run files from that location. -- `ROOTCMD`: Used to specify a command to use to run another command with root privileges if needed. By default - we try to use sudo, doas, or pkexec (in that order of preference), but if you need special options for one of +- `ROOTCMD`: Used to specify a command to use to run another command with root privileges if needed. By default, + we try to use sudo, doas, or pkexec (in that order of preference). However, if you need special options for one of those to work, or have a different tool to do the same thing on your system, you can specify it here. - `DISABLE_TELEMETRY`: If set to a value other than 0, behave as if `--disable-telemetry` was specified. -## Native packages - -We publish [official DEB/RPM packages](/packaging/installer/methods/packages.md) for a number of common Linux distributions as part of our releases and nightly -builds. These packages are available for 64-bit x86 systems. Depending on the distribution and release they may -also be available for 32-bit x86, ARMv7, and AArch64 systems. If a native package is available, it will be used as the -default installation method. This allows you to handle Netdata updates as part of your usual system update procedure. - -If you want to enforce the usage of native packages and have the installer return a failure if they are not available, -you can do so by adding `--native-only` to the options you pass to the installer. - -## Static builds - -We publish pre-built [static builds](/packaging/makeself/README.md) of Netdata for Linux systems. Currently, these are published for 64-bit x86, ARMv7, -AArch64, and POWER8+ hardware. These static builds are able to operate in a mostly self-contained manner and only -require a POSIX compliant shell and a supported init system. These static builds install under `/opt/netdata`. If -you are on a platform which we provide static builds for but do not provide native packages for, a static build -will be used by default for installation. - -If you want to enforce the usage of a static build and have the installer return a failure if one is not available, -you can do so by adding `--static-only` to the options you pass to the installer. +## Verify script integrity -## Local builds +To use `md5sum` to verify the integrity of the `kickstart.sh` script you will download using the one-line command above, +run the following: -For systems which do not have available native packages or static builds, we support building Netdata locally on -the system it will be installed on. When using this approach, the installer will attempt to install any required -dependencies for building Netdata, though this may not always work correctly. +```bash +[ "@KICKSTART_CHECKSUM@" = "$(curl -Ss https://get.netdata.cloud/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" +``` -If you want to enforce the usage of a local build (perhaps because you require a custom installation prefix, -which is not supported with native packages or static builds), you can do so by adding `--build-only` to the -options you pass to the installer. +If the script is valid, this command will return `OK, VALID`. diff --git a/packaging/installer/methods/kubernetes.md b/packaging/installer/methods/kubernetes.md index 6a0dee98a..3740dcf00 100644 --- a/packaging/installer/methods/kubernetes.md +++ b/packaging/installer/methods/kubernetes.md @@ -35,21 +35,21 @@ The installation process securely connects your Kubernetes cluster to stream met 1. Add the Netdata Helm chart repository by running: - ```bash - helm repo add netdata https://netdata.github.io/helmchart/ - ``` + ```bash + helm repo add netdata https://netdata.github.io/helmchart/ + ``` 2. To install Netdata using the `helm install` command, run: - ```bash - helm install netdata netdata/netdata - ``` + ```bash + helm install netdata netdata/netdata + ``` - > ### Note - > - > If you plan to connect the node to Netdata Cloud, you can find the command with the right parameters by clicking the "Add Nodes" button in your Space's Nodes tab. + > **Note** + > + > If you plan to connect the node to Netdata Cloud, you can find the command with the right parameters by clicking the "Add Nodes" button in your Space's Nodes tab. - For more installation options, please read our [Netdata Helm chart for Kubernetes](https://github.com/netdata/helmchart/blob/master/charts/netdata/README.md) reference. + For more installation options, please read our [Netdata Helm chart for Kubernetes](https://github.com/netdata/helmchart/blob/master/charts/netdata/README.md) reference. #### Expected Result @@ -66,50 +66,50 @@ On an existing installation, in order to connect it to Netdata Cloud you will ne 1. You can start with creating a file called `override.yml` - ```bash - touch override.yml - ``` - + ```bash + touch override.yml + ``` + 2. Paste the following into your `override.yml` file. - ```yaml - parent: - claiming: - enabled: true - token: YOUR_CLAIM_TOKEN - rooms: YOUR_ROOM_ID_A,YOUR_ROOM_ID_B - - child: - claiming: - enabled: true - token: YOUR_CLAIM_TOKEN - rooms: YOUR_ROOM_ID_A,YOUR_ROOM_ID_B - configs: - netdata: - data: | - [global] - memory mode = ram - history = 3600 - [health] - enabled = no - ``` - - > :bookmark_tabs: Note - > - > Make sure to replace `YOUR_CLAIM_TOKEN` with the claim token of your space, - > and `YOUR_ROOM_ID` with the ID of the Room you are willing to connect to. - - These settings connect your `parent`/`child` nodes to Netdata Cloud and store more metrics in the nodes' time-series databases. - - > :bookmark_tabs: Info - > - > These override settings, along with the Helm chart's defaults, will retain an hour's worth of metrics (`history = 3600`, or `3600 seconds`) on each child node. Based on your metrics retention needs, and the resources available on your cluster, you may want to increase the `history` setting. + ```yaml + parent: + claiming: + enabled: true + token: YOUR_CLAIM_TOKEN + rooms: YOUR_ROOM_ID_A,YOUR_ROOM_ID_B + + child: + claiming: + enabled: true + token: YOUR_CLAIM_TOKEN + rooms: YOUR_ROOM_ID_A,YOUR_ROOM_ID_B + configs: + netdata: + data: | + [db] + db = ram + retention = 3600 + [health] + enabled = no + ``` + + > **Note** + > + > Make sure to replace `YOUR_CLAIM_TOKEN` with the claim token of your space, + > and `YOUR_ROOM_ID` with the ID of the Room you are willing to connect to. + + These settings connect your `parent`/`child` nodes to Netdata Cloud and store more metrics in the nodes' time-series databases. + + > **Info** + > + > These override settings, along with the Helm chart's defaults, will retain an hour's worth of metrics (`retention = 3600`, or `3600 seconds`) on each child node. Based on your metrics retention needs, and the resources available on your cluster, you may want to increase the `history` setting. 3. To apply these new settings, run: - ```bash - helm upgrade -f override.yml netdata netdata/netdata - ``` + ```bash + helm upgrade -f override.yml netdata netdata/netdata + ``` #### Expected Result @@ -188,13 +188,3 @@ To update Netdata's Helm chart to the latest version, run `helm repo update`, th helm repo update helm upgrade netdata netdata/netdata ``` - -## What's next? - -[Start Kubernetes monitoring](/docs/dashboards-and-charts/kubernetes-tab.md) in Netdata Cloud, which comes with meaningful visualizations out of the box. - -### Related reference documentation - -- [Netdata Cloud · Kubernetes monitoring](/docs/dashboards-and-charts/kubernetes-tab.md) -- [Netdata Helm chart](https://github.com/netdata/helmchart) -- [Netdata service discovery](https://github.com/netdata/agent-service-discovery/) diff --git a/packaging/installer/methods/macos.md b/packaging/installer/methods/macos.md index 31aaebf98..0843753b6 100644 --- a/packaging/installer/methods/macos.md +++ b/packaging/installer/methods/macos.md @@ -1,39 +1,33 @@ - - # Install Netdata on macOS -Netdata works on macOS, albeit with some limitations. -The number of charts displaying system metrics is limited, but you can use any of Netdata's [external plugins](/src/collectors/plugins.d/README.md) to monitor any services you might have installed on your macOS system. +Netdata works on macOS, albeit with some limitations. +The number of charts displaying system metrics is limited, but you can use any of Netdata's [external plugins](/src/plugins.d/README.md) to monitor any services you might have installed on your macOS system. You could also use a macOS system as the parent node in a [streaming configuration](/src/streaming/README.md). -You can install Netdata in one of the three following ways: +You can install Netdata in one of the three following ways: -- **[Install Netdata with the our automatic one-line installation script (recommended)](#install-netdata-with-our-automatic-one-line-installation-script)**, +- **[Install Netdata with the our automatic one-line installation script (recommended)](#install-netdata-with-our-automatic-one-line-installation-script)**, - [Install Netdata via Homebrew](#install-netdata-via-homebrew) - [Install Netdata from source](#install-netdata-from-source) -Each of these installation option requires [Homebrew](https://brew.sh/) for handling dependencies. +Each of these installation option requires [Homebrew](https://brew.sh/) for handling dependencies. > The Netdata Homebrew package is community-created and -maintained. > Community-maintained packages _may_ receive support from Netdata, but are only a best-effort affair. Learn more about [Netdata's platform support policy](/docs/netdata-agent/versions-and-platforms.md). ## Install Netdata with our automatic one-line installation script -**Local Netdata Agent installation** -To install Netdata using our automatic [kickstart](/packaging/installer/README.md#automatic-one-line-installation-script) open a new terminal and run: +### Local Netdata Agent installation + +To install Netdata using our automatic [kickstart](/packaging/installer/methods/kickstart.md) open a new terminal and run: ```bash curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh ``` + The Netdata Agent is installed under `/usr/local/netdata`. Dependencies are handled via Homebrew. -**Automatically connect to Netdata Cloud during installation** +### Automatically connect to Netdata Cloud during installation The `kickstart.sh` script accepts additional parameters to automatically [connect](/src/claim/README.md) your node to Netdata Cloud immediately after installation. Find the `token` and `rooms` strings by [signing in to Netdata @@ -44,38 +38,42 @@ area](/docs/netdata-cloud/organize-your-infrastructure-invite-your-team.md#netda after the install. - `--claim-rooms`: Specify a comma-separated list of tokens for each Room this node should appear in. - `--claim-proxy`: Specify a proxy to use when connecting to the cloud in the form of `http://[user:pass@]host:ip` for an HTTP(S) proxy. - See [connecting through a proxy](/src/claim/README.md#connect-through-a-proxy) for details. + See [connecting through a proxy](/src/claim/README.md#automatically-via-a-provisioning-system-or-the-command-line) for details. - `--claim-url`: Specify a URL to use when connecting to the cloud. Defaults to `https://app.netdata.cloud`. -For example: +For example: + ```bash curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --install-prefix /usr/local/ --claim-token TOKEN --claim-rooms ROOM1,ROOM2 --claim-url https://app.netdata.cloud ``` + The Netdata Agent is installed under `/usr/local/netdata` on your machine. Your machine will also show up as a node in your Netdata Cloud. -If you experience issues while claiming your node, follow the steps in our [Troubleshooting](/src/claim/README.md#troubleshooting) documentation. +If you experience issues while claiming your node, follow the steps in our [Troubleshoot](/src/claim/README.md#troubleshoot) documentation. + ## Install Netdata via Homebrew ### For macOS Intel -To install Netdata and all its dependencies, run Homebrew using the following command: +To install Netdata and all its dependencies, run Homebrew using the following command: ```sh brew install netdata ``` -Homebrew will place your Netdata configuration directory at `/usr/local/etc/netdata/`. + +Homebrew will place your Netdata configuration directory at `/usr/local/etc/netdata/`. Use the `edit-config` script and the files in this directory to configure Netdata. For reference, you can find stock configuration files at `/usr/local/Cellar/netdata/{NETDATA_VERSION}/lib/netdata/conf.d/`. ### For Apple Silicon -To install Netdata and all its dependencies, run Homebrew using the following command: +To install Netdata and all its dependencies, run Homebrew using the following command: ```sh brew install netdata ``` -Homebrew will place your Netdata configuration directory at `/opt/homebrew/etc/netdata/`. +Homebrew will place your Netdata configuration directory at `/opt/homebrew/etc/netdata/`. Use the `edit-config` script and the files in this directory to configure Netdata. For reference, you can find stock configuration files at `/opt/homebrew/Cellar/netdata/{NETDATA_VERSION}/lib/netdata/conf.d/`. @@ -89,13 +87,13 @@ We don't recommend installing Netdata from source on macOS, as it can be difficu xcode-select --install ``` -2. Click **Install** on the Software Update popup window that appears. +2. Click **Install** on the Software Update popup window that appears. 3. Use the same terminal session to install some of Netdata's prerequisites using Homebrew. If you don't want to use [Netdata Cloud](/docs/netdata-cloud/README.md), you can omit `cmake`. ```bash brew install ossp-uuid autoconf automake pkg-config libuv lz4 json-c openssl libtool cmake ``` - + 4. Download Netdata from our GitHub repository: ```bash @@ -109,6 +107,6 @@ We don't recommend installing Netdata from source on macOS, as it can be difficu sudo ./netdata-installer.sh --install-prefix /usr/local ``` -> Your Netdata configuration directory will be at `/usr/local/netdata/`. +> Your Netdata configuration directory will be at `/usr/local/netdata/`. > Your stock configuration directory will be at `/usr/local/lib/netdata/conf.d/`. > The installer will also install a startup plist to start Netdata when your macOS system boots. diff --git a/packaging/installer/methods/manual.md b/packaging/installer/methods/manual.md index 31bc392e5..0b7bdb279 100644 --- a/packaging/installer/methods/manual.md +++ b/packaging/installer/methods/manual.md @@ -1,22 +1,12 @@ - - # Install Netdata on Linux from a Git checkout To install the latest git version of Netdata, please follow these 2 steps: -1. [Prepare your system](#prepare-your-system) +1. [Prepare your system](#prepare-your-system) Install the required packages on your system. -2. [Install Netdata](#install-netdata) +2. [Install Netdata](#install-netdata) Download and install Netdata. You can also update it the same way. @@ -29,23 +19,23 @@ Use our automatic requirements installer (_no need to be `root`_), which attempt should be installed on your system to build and run Netdata. It supports a large variety of major Linux distributions and other operating systems and is regularly tested. You can find this tool [here](https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh) or run it directly with `bash <(curl -sSL https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh)`. Otherwise read on for how to get requires packages manually: -- **Alpine** Linux and its derivatives - - You have to install `bash` yourself, before using the installer. +- **Alpine** Linux and its derivatives + - You have to install `bash` yourself, before using the installer. -- **Gentoo** Linux and its derivatives +- **Gentoo** Linux and its derivatives -- **Debian** Linux and its derivatives (including **Ubuntu**, **Mint**) +- **Debian** Linux and its derivatives (including **Ubuntu**, **Mint**) -- **Red Hat Enterprise Linux** and its derivatives (including **Fedora**, **CentOS**, **Amazon Machine Image**) - - Please note that for RHEL/CentOS you need +- **Red Hat Enterprise Linux** and its derivatives (including **Fedora**, **CentOS**, **Amazon Machine Image**) + - Please note that for RHEL/CentOS you need [EPEL](http://www.tecmint.com/how-to-enable-epel-repository-for-rhel-centos-6-5/). In addition, RHEL/CentOS version 6 also need [OKay](https://okay.com.mx) for package libuv version 1. - - CentOS 8 / RHEL 8 requires a bit of extra work. See the dedicated section below. + - CentOS 8 / RHEL 8 requires a bit of extra work. See the dedicated section below. -- **SUSE** Linux and its derivatives (including **openSUSE**) +- **SUSE** Linux and its derivatives (including **openSUSE**) -- **SLE12** Must have your system registered with SUSE Customer Center or have the DVD. See +- **SLE12** Must have your system registered with SUSE Customer Center or have the DVD. See [#1162](https://github.com/netdata/netdata/issues/1162) Install the packages for having a **basic Netdata installation** (system monitoring and many applications, without `mysql` / `mariadb`, `named`, hardware sensors and `SNMP`): @@ -85,51 +75,51 @@ zypper install zlib-devel libuuid-devel libuv-devel liblz4-devel libopenssl-deve Once Netdata is compiled, to run it the following packages are required (already installed using the above commands): -| package | description| -|:-----:|-----------| -| `libuuid` | part of `util-linux` for GUIDs management| -| `zlib` | gzip compression for the internal Netdata web server| -| `libuv` | Multi-platform support library with a focus on asynchronous I/O, version 1 or greater| +| package | description | +|:---------:|---------------------------------------------------------------------------------------| +| `libuuid` | part of `util-linux` for GUIDs management | +| `zlib` | gzip compression for the internal Netdata web server | +| `libuv` | Multi-platform support library with a focus on asynchronous I/O, version 1 or greater | -*Netdata will fail to start without the above.* +_Netdata will fail to start without the above._ Netdata plugins and various aspects of Netdata can be enabled or benefit when these are installed (they are optional): -| package |description| -|:-----:|-----------| -| `bash`|for shell plugins and **alert notifications**| -| `curl`|for shell plugins and **alert notifications**| -| `iproute` or `iproute2`|for monitoring **Linux traffic QoS**
use `iproute2` if `iproute` reports as not available or obsolete| -| `python`|for most of the external plugins| -| `python-yaml`|used for monitoring **beanstalkd**| -| `python-beanstalkc`|used for monitoring **beanstalkd**| -| `python-mysqldb`
or
`python-pymysql`|used for monitoring **mysql** or **mariadb** databases
`python-mysqldb` is a lot faster and thus preferred| -| `nodejs`|used for `node.js` plugins for monitoring **named** and **SNMP** devices| -| `lm-sensors`|for monitoring **hardware sensors**| -| `libelf`|for monitoring kernel-level metrics using eBPF| -| `libmnl`|for collecting netfilter metrics| -| `netcat`|for shell plugins to collect metrics from remote systems| - -*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.* +| package | description | +|:--------------------------------------------:|----------------------------------------------------------------------------------------------------------------| +| `bash` | for shell plugins and **alert notifications** | +| `curl` | for shell plugins and **alert notifications** | +| `iproute` or `iproute2` | for monitoring **Linux traffic QoS**
use `iproute2` if `iproute` reports as not available or obsolete | +| `python` | for most of the external plugins | +| `python-yaml` | used for monitoring **beanstalkd** | +| `python-beanstalkc` | used for monitoring **beanstalkd** | +| `python-mysqldb`
or
`python-pymysql` | used for monitoring **mysql** or **mariadb** databases
`python-mysqldb` is a lot faster and thus preferred | +| `nodejs` | used for `node.js` plugins for monitoring **named** and **SNMP** devices | +| `lm-sensors` | for monitoring **hardware sensors** | +| `libelf` | for monitoring kernel-level metrics using eBPF | +| `libmnl` | for collecting netfilter metrics | +| `netcat` | for shell plugins to collect metrics from remote systems | + +_Netdata will greatly benefit if you have the above packages installed, but it will still work without them._ Netdata DB engine can be enabled when these are installed (they are optional): -| package | description| -|:-----:|-----------| -| `liblz4` | Extremely fast compression algorithm, version r129 or greater| -| `openssl`| Cryptography and SSL/TLS toolkit| +| package | description | +|:---------:|---------------------------------------------------------------| +| `liblz4` | Extremely fast compression algorithm, version r129 or greater | +| `openssl` | Cryptography and SSL/TLS toolkit | -*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.* +_Netdata will greatly benefit if you have the above packages installed, but it will still work without them._ Netdata Cloud support may require the following packages to be installed: -| package | description | -|:---------:|--------------------------------------------------------------------------------------------------------------------------------------| -| `cmake` | Needed at build time if you aren't using your distribution's version of libwebsockets or are building on a platform other than Linux | -| `openssl` | Needed to secure communications with the Netdata Cloud | -| `protobuf`| Used for the new Cloud<->Agent binary protocol | +| package | description | +|:----------:|--------------------------------------------------------------------------------------------------------------------------------------| +| `cmake` | Needed at build time if you aren't using your distribution's version of libwebsockets or are building on a platform other than Linux | +| `openssl` | Needed to secure communications with the Netdata Cloud | +| `protobuf` | Used for the new Cloud<->Agent binary protocol | -*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.* +_Netdata will greatly benefit if you have the above packages installed, but it will still work without them._ ### CentOS / RHEL 6.x @@ -189,28 +179,28 @@ cd netdata ./netdata-installer.sh ``` -- If you don't want to run it straight-away, add `--dont-start-it` option. +- If you don't want to run it straight-away, add `--dont-start-it` option. -- You can also append `--stable-channel` to fetch and install only the official releases from GitHub, instead of the nightly builds. +- You can also append `--stable-channel` to fetch and install only the official releases from GitHub, instead of the nightly builds. -- If you don't want to install it on the default directories, you can run the installer like this: `./netdata-installer.sh --install-prefix /opt`. This one will install Netdata in `/opt/netdata`. +- If you don't want to install it on the default directories, you can run the installer like this: `./netdata-installer.sh --install-prefix /opt`. This one will install Netdata in `/opt/netdata`. -- If your server does not have access to the internet and you have manually put the installation directory on your server, you will need to pass the option `--disable-go` to the installer. The option will prevent the installer from attempting to download and install `go.d.plugin`. +- If your server does not have access to the internet and you have manually put the installation directory on your server, you will need to pass the option `--disable-go` to the installer. The option will prevent the installer from attempting to download and install `go.d.plugin`. ## Optional parameters to alter your installation `netdata-installer.sh` accepts a few parameters to customize your installation: -- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages. -- `--dont-start-it`: Prevent the installer from starting Netdata automatically. -- `--stable-channel`: Automatically update only on the release of new major versions. -- `--nightly-channel`: Automatically update on every new nightly build. -- `--disable-telemetry`: Opt-out of [anonymous statistics](/docs/netdata-agent/configuration/anonymous-telemetry-events.md) we use to make +- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages. +- `--dont-start-it`: Prevent the installer from starting Netdata automatically. +- `--stable-channel`: Automatically update only on the release of new major versions. +- `--nightly-channel`: Automatically update on every new nightly build. +- `--disable-telemetry`: Opt-out of [anonymous statistics](/docs/netdata-agent/configuration/anonymous-telemetry-events.md) we use to make Netdata better. -- `--no-updates`: Prevent automatic updates of any kind. -- `--reinstall`: If an existing install is detected, reinstall instead of trying to update it. Note that this +- `--no-updates`: Prevent automatic updates of any kind. +- `--reinstall`: If an existing install is detected, reinstall instead of trying to update it. Note that this cannot be used to change installation types. -- `--local-files`: Used for [offline installations](/packaging/installer/methods/offline.md). Pass four file paths: the Netdata +- `--local-files`: Used for [offline installations](/packaging/installer/methods/offline.md). Pass four file paths: the Netdata tarball, the checksum file, the go.d plugin tarball, and the go.d plugin config tarball, to force kickstart run the process using those files. This option conflicts with the `--stable-channel` option. If you set this _and_ `--stable-channel`, Netdata will use the local files. @@ -226,23 +216,19 @@ See the [connect to cloud](/src/claim/README.md) doc for details on connecting a Our current build process unfortunately has some issues when using certain configurations of the `clang` C compiler on Linux. -If the installation fails with errors like `/bin/ld: externaldeps/libwebsockets/libwebsockets.a(context.c.o): relocation R_X86_64_32 against '.rodata.str1.1' can not be used when making a PIE object; recompile with -fPIC`, and you are trying to build with `clang` on Linux, you will need to build Netdata using GCC to get a fully functional install. +If the installation fails with errors like `/bin/ld: externaldeps/libwebsockets/libwebsockets.a(context.c.o): relocation R_X86_64_32 against '.rodata.str1.1' can not be used when making a PIE object; recompile with -fPIC`, and you are trying to build with `clang` on Linux, you will need to build Netdata using GCC to get a fully functional install. In most cases, you can do this by running `CC=gcc ./netdata-installer.sh`. - ### Perform a cleanup in your netdata repo The Netdata repo consist of the main git tree and it's submodules. Either working on a fork or on the main repo you need to make sure that there are no "leftover" artifacts from previous builds and that your submodules are up to date to the **corresponding checkouts**. -> #### Important: Make sure that you have commited any work in progress, before you proceed the with the clean up instruction below - +> #### Important: Make sure that you have committed any work in progress, before you proceed the with the clean up instruction below ```sh git clean -dfx && git submodule foreach 'git clean -dfx' && git submodule update --recursive --init ``` - -> Note: In previous builds, you may have created artifacts belonging to an another user (e.g root), so you may need to run -> each of the _git clean_ commands as sudoer. +> Note: In previous builds, you may have created artifacts belonging to an another user (e.g root), so you may need to run each of the _git clean_ commands as a sudoer. diff --git a/packaging/installer/methods/methods.md b/packaging/installer/methods/methods.md index bc6e879a8..701f3b932 100644 --- a/packaging/installer/methods/methods.md +++ b/packaging/installer/methods/methods.md @@ -1,13 +1,3 @@ - - # Installation methods Netdata can be installed: @@ -21,6 +11,4 @@ The [one line installer kickstart.sh](/packaging/installer/methods/kickstart.md) picks the most appropriate method out of the first three for any system and is the recommended installation method, if you don't use containers. -`kickstart.sh` can also be used for -[offline installation](/packaging/installer/methods/offline.md), -suitable for air-gapped systems. +`kickstart.sh` can also be used for [offline installation](/packaging/installer/methods/offline.md), suitable for air-gapped systems. diff --git a/packaging/installer/methods/no_ipv4.md b/packaging/installer/methods/no_ipv4.md new file mode 100644 index 000000000..c22b64085 --- /dev/null +++ b/packaging/installer/methods/no_ipv4.md @@ -0,0 +1,13 @@ +# Installing on hosts without IPv4 connectivity + +Our regular installation process requires access to a number of GitHub services that do not have IPv6 connectivity. + +As such, using the kickstart install script on such hosts generally does not work, and will typically fail with an error from cURL or wget about connection timeouts. + +You can check if your system is affected by this by attempting to connect to (or ping) `https://api.github.com/`. Failing to connect indicates that this issue affects you. + +There are three potential workarounds for this: + +1. You can configure your system with a proper IPv6 transition mechanism, such as NAT64. GitHub’s anachronisms affect many projects other than just Netdata. There are, unfortunately, a number of other services out there that do not provide IPv6 connectivity, so taking this route is likely to save you time in the future as well. +2. If you are using a system that we publish native packages for (see our [platform support policy](/docs/netdata-agent/versions-and-platforms.md) for more details), you can manually set up our native package repositories as outlined in our [native package install documentation](/packaging/installer/methods/packages.md). Our official package repositories do provide service over IPv6, so they work without issue on hosts without IPv4 connectivity. +3. If neither of the above options work for you, you can still install using our [offline installation instructions](/packaging/installer/methods/offline.md), though do note that the offline install source must be prepared from a system with IPv4 connectivity. diff --git a/packaging/installer/methods/offline.md b/packaging/installer/methods/offline.md index 83155848f..3a64135de 100644 --- a/packaging/installer/methods/offline.md +++ b/packaging/installer/methods/offline.md @@ -1,13 +1,3 @@ - - # Install Netdata on offline systems Our kickstart install script provides support for installing the Netdata Agent on air-gapped systems which do not have a @@ -53,7 +43,6 @@ Once you have prepared the offline install source, you need to copy the offline target system. This can be done in any manner you like, as long as filenames are not changed. After copying the files, simply run the `install.sh` script located in the -offline install source directory. It accepts all the [same options as the kickstart -script](/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) for further +offline install source directory. It accepts all the [same options as the kickstart script](/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) for further customization of the installation, though it will default to not enabling automatic updates (as they are not supported on offline installs). diff --git a/packaging/installer/methods/packages.md b/packaging/installer/methods/packages.md index 90556c1ab..eff33118c 100644 --- a/packaging/installer/methods/packages.md +++ b/packaging/installer/methods/packages.md @@ -1,13 +1,3 @@ - - # Install Netdata using native DEB/RPM packages For most common Linux distributions that use either DEB or RPM packages, Netdata provides pre-built native packages @@ -20,7 +10,7 @@ When using the kickstart script, you can force usage of native DEB or RPM packag `--native-only` when invoking the script. This will cause it to only attempt to use native packages for the install, and fail if it cannot do so. -> ### Note +> **Note** > > In July 2022, we switched hosting of our native packages from Package Cloud to self-hosted repositories. > We still maintain the Package cloud repositories, but they are not guaranteed to work and may be removed @@ -63,9 +53,9 @@ appropriate repository configuration package from ### Note +> **Note** > -> On RHEL and other systems that use the `el` repostiroies, some of the dependencies for Netdata can only be found +> On RHEL and other systems that use the `el` repositories, some of the dependencies for Netdata can only be found > in the EPEL repository, which is not enabled or installed by default on most of these systems. This additional > repository _should_ be pulled in automatically by our repository config packages, but if it is not you may need > to manually install `epel-release` to be able to successfully install the Netdata packages. @@ -91,7 +81,7 @@ These repositories are set up as what Debian calls ‘flat repositories’, and As a result of this structure, the required APT sources entry for stable packages for Debian 11 (Bullseye) is: -``` +```text deb http://repo.netdata.cloud/repos/stable/debian/ bullseye/ ``` @@ -134,7 +124,7 @@ but we do have some tips for anyone looking to do so: - A full mirror of all of our repositories currently requires up to 100 GB of storage space, though the exact amount of space needed fluctuates over time. Because of this, users seeking to mirror our repositories are encouraged to mirror only those repositories they actually need instead of mirroring everything. -- If syncing daily (or less frequently), some time between 05:00 and 08:00 UTC each day is usually the saftest +- If syncing daily (or less frequently), some time between 05:00 and 08:00 UTC each day is usually the safest time to do so, as publishing nightly packages will almost always be done by this point, and publishing of stable releases typically happens after that time window. - If you intend to use our existing GPG signatures on the repository metadata and packages, you probably also want diff --git a/packaging/installer/methods/pfsense.md b/packaging/installer/methods/pfsense.md index 965fba8dd..2cc1842bf 100644 --- a/packaging/installer/methods/pfsense.md +++ b/packaging/installer/methods/pfsense.md @@ -1,15 +1,8 @@ - - # Install Netdata on pfSense CE -> 💡 This document is maintained by Netdata's community, and may not be completely up-to-date. Please double-check the +> **Info** +> +> This document is maintained by Netdata's community, and may not be completely up-to-date. Please double-check the > details of the installation process, such as version numbers for downloadable packages, before proceeding. > > You can help improve this document by [submitting a @@ -46,10 +39,10 @@ pkg install py39-yaml > ⚠️ If any of the above commands return a `Not Found` error, you need to manually search for the latest package in the > [FreeBSD repository](https://www.freebsd.org/ports/) or by running `pkg search`. Search for the package's name, such as `py37-cffi`, find the > latest version number, and update the command accordingly. - +> > ⚠️ On pfSense 2.4.5, Python version 3.7 may be installed by the system, in which case you should should not install > Python from the FreeBSD repository as instructed above. - +> > ⚠️ If you are using the `apcupsd` collector, you need to make sure that apcupsd is up before starting Netdata. > Otherwise a infinitely running `cat` process triggered by the default activated apcupsd charts plugin will eat up CPU > and RAM (`/tmp/.netdata-charts.d-*/run-*`). This also applies to `OPNsense`. @@ -75,7 +68,7 @@ Visit the Netdata dashboard to confirm it's working: `http://:19999` To start Netdata automatically every boot, add `service netdata onestart` as a Shellcmd entry within the pfSense web interface under **Services/Shellcmd**. You'll need to install the Shellcmd package beforehand under **System/Package Manager/Available Packages**. The Shellcmd Type should be set to `Shellcmd`. -![](https://i.imgur.com/wcKiPe1.png) Alternatively more information can be found in +![interface](https://i.imgur.com/wcKiPe1.png) Alternatively more information can be found in , for achieving the same via the command line and scripts. @@ -83,5 +76,3 @@ If you experience an issue with `/usr/bin/install` being absent in pfSense 2.3 o workaround from **Note:** In pfSense, the Netdata configuration files are located under `/usr/local/etc/netdata`. - - diff --git a/packaging/installer/methods/source.md b/packaging/installer/methods/source.md index c6ff6e6fe..f09db53d0 100644 --- a/packaging/installer/methods/source.md +++ b/packaging/installer/methods/source.md @@ -1,13 +1,3 @@ - - # Manually build Netdata from source These instructions are for advanced users and distribution package @@ -20,33 +10,33 @@ checkout](/packaging/installer/methods/manual.md) instead. At a bare minimum, Netdata requires the following libraries and tools to build and run successfully: -- libuuid -- libuv version 1.0 or newer -- zlib -- CMake 3.13 or newer -- GCC or Xcode (Clang is known to have issues in certain configurations, see [Using Clang](#using-clang)) -- Ninja or Make (Ninja is preferred as it results in significantly faster builds) -- Git (we use git in the build system to generate version info, you don't need a full install, just a working `git show` command) +- libuuid +- libuv version 1.0 or newer +- zlib +- CMake 3.13 or newer +- GCC or Xcode (Clang is known to have issues in certain configurations, see [Using Clang](#using-clang)) +- Ninja or Make (Ninja is preferred as it results in significantly faster builds) +- Git (we use git in the build system to generate version info, you don't need a full install, just a working `git show` command) The following additional dependencies are also needed, but will be prepared automatically by CMake if they are not available on the build system. -- libyaml -- JSON-C +- libyaml +- JSON-C Additionally, the following build time features require additional dependencies: -- TLS support for the web GUI: - - OpenSSL 1.0.2 or newer _or_ LibreSSL 3.0.0 or newer. -- dbengine metric storage: - - liblz4 r129 or newer - - OpenSSL 1.0 or newer (LibreSSL _amy_ work, but is largely untested). -- Netdata Cloud support: - - A working internet connection - - OpenSSL 1.0.2 or newer _or_ LibreSSL 3.0.0 or newer. - - protobuf (Google Protocol Buffers) and protoc compiler. If protobuf is not available on the system, +- TLS support for the web GUI: + - OpenSSL 1.0.2 or newer _or_ LibreSSL 3.0.0 or newer. +- dbengine metric storage: + - liblz4 r129 or newer + - OpenSSL 1.0 or newer (LibreSSL _amy_ work, but is largely untested). +- Netdata Cloud support: + - A working internet connection + - OpenSSL 1.0.2 or newer _or_ LibreSSL 3.0.0 or newer. + - protobuf (Google Protocol Buffers) and protoc compiler. If protobuf is not available on the system, CMake can be instructed to fetch and build a usable version for Netdata. -- Netdata Go collectors: - - Go 1.21 or newer +- Netdata Go collectors: + - Go 1.21 or newer ## Preparing the source tree @@ -111,12 +101,12 @@ a pre-built copy of the required code, or build it locally. We provide pre-built copies of the eBPF code for 64-bit x86 systems using glibc or musl. To use one of these: -1. Verify the release version that Netdata expects to be used by checking +1. Verify the release version that Netdata expects to be used by checking the contents of `packaging/ebpf.version` in your Netdata sources. -2. Go to https://github.com/netdata/kernel-collector/releases, select the +2. Go to , select the required release, and download the `netdata-kernel-collector-*.tar.xz` file for the libc variant your system uses (either rmusl or glibc). -3. Extract the contents of the archive to a temporary location, and then +3. Extract the contents of the archive to a temporary location, and then copy all of the `.o` and `.so.*` files and the contents of the `library/` directory to `/usr/libexec/netdata/plugins.d` or the equivalent location for your build of Netdata. @@ -128,5 +118,3 @@ instructions, please consult [the README file for our kernel-collector repository](https://github.com/netdata/kernel-collector/#readme), which outlines both the required dependencies, as well as multiple options for building the code. - - diff --git a/packaging/installer/methods/synology.md b/packaging/installer/methods/synology.md index 742b3abb0..10c13fe8a 100644 --- a/packaging/installer/methods/synology.md +++ b/packaging/installer/methods/synology.md @@ -1,24 +1,14 @@ - - # Install Netdata on Synology > 💡 This document is maintained by Netdata's community, and may not be completely up-to-date. Please double-check the > details of the installation process, before proceeding. > -> You can help improve this document by -> [submitting a PR](https://github.com/netdata/netdata/edit/master/packaging/installer/methods/synology.md) +> You can help improve this document by +> [submitting a PR](https://github.com/netdata/netdata/edit/master/packaging/installer/methods/synology.md) > with your recommended improvements or changes. Thank you! - -The good news is that our -[one-line installation script](/packaging/installer/methods/kickstart.md) +The good news is that our +[one-line installation script](/packaging/installer/methods/kickstart.md) works fine if your NAS is one that uses the amd64 architecture. It will install the content into `/opt/netdata`, making future removal safe and simple. @@ -49,15 +39,13 @@ installations run it as the `netdata` user, you might wish to do the same. This Additionally, as of 2018/06/24, the Netdata installer doesn't recognize DSM as an operating system, so no init script is installed. You'll have to do this manually: -1. Add [this file](https://gist.github.com/oskapt/055d474d7bfef32c49469c1b53e8225f) as `/etc/rc.netdata`. Make it +1. Add [this file](https://gist.github.com/oskapt/055d474d7bfef32c49469c1b53e8225f) as `/etc/rc.netdata`. Make it executable with `chmod 0755 /etc/rc.netdata`. -2. Add or edit `/etc/rc.local` and add a line calling `/etc/rc.netdata` to have it start on boot: +2. Add or edit `/etc/rc.local` and add a line calling `/etc/rc.netdata` to have it start on boot: -```conf -# Netdata startup -[ -x /etc/rc.netdata ] && /etc/rc.netdata start -``` + ```text + # Netdata startup + [ -x /etc/rc.netdata ] && /etc/rc.netdata start + ``` 3. Make sure `/etc/rc.local` is executable: `chmod 0755 /etc/rc.local`. - - diff --git a/packaging/installer/methods/systems.md b/packaging/installer/methods/systems.md index 8715a57af..0f4c104ae 100644 --- a/packaging/installer/methods/systems.md +++ b/packaging/installer/methods/systems.md @@ -1,12 +1,3 @@ - - # Install on specific environments This category contains specific instructions for some popular environments. diff --git a/packaging/installer/netdata-updater.sh b/packaging/installer/netdata-updater.sh index 5ebb6baa2..51b5b14ac 100755 --- a/packaging/installer/netdata-updater.sh +++ b/packaging/installer/netdata-updater.sh @@ -590,7 +590,9 @@ self_update() { export ENVIRONMENT_FILE="${ENVIRONMENT_FILE}" force_update="" [ "$NETDATA_FORCE_UPDATE" = "1" ] && force_update="--force-update" - exec ./netdata-updater.sh --not-running-from-cron --no-updater-self-update "$force_update" --tmpdir-path "$(pwd)" + interactive="" + [ "$INTERACTIVE" = "0" ] && interactive="--non-interactive" + exec ./netdata-updater.sh --not-running-from-cron --no-updater-self-update "$force_update" "$interactive" --tmpdir-path "$(pwd)" else error "Failed to download newest version of updater script, continuing with current version." fi diff --git a/packaging/maintainers/README.md b/packaging/maintainers/README.md index 612c7d0d7..30b56e7d3 100644 --- a/packaging/maintainers/README.md +++ b/packaging/maintainers/README.md @@ -8,72 +8,63 @@ This page tracks the package maintainers for Netdata, for various operating syst ## Official Linux Distributions -| Linux Distribution | Netdata Version | Maintainer | Related URL | -| :-: | :-: | :-: | :-- | -| Arch Linux | Release | @svenstaro | [netdata @ Arch Linux](https://www.archlinux.org/packages/community/x86_64/netdata/) | -| Arch Linux AUR | Git | @sanskritfritz | [netdata @ AUR](https://aur.archlinux.org/packages/netdata-git/) | -| Gentoo Linux | Release + Git | @candrews | [netdata @ gentoo](https://github.com/gentoo/gentoo/tree/master/net-analyzer/netdata) | -| Debian | Release | @lhw @FedericoCeratto | [netdata @ debian](http://salsa.debian.org/debian/netdata) | -| Slackware | Release | @willysr | [netdata @ slackbuilds](https://slackbuilds.org/repository/14.2/system/netdata/) | -| Ubuntu | | | | -| Red Hat / Fedora / CentOS | | | | -| SUSE SLE / openSUSE Tumbleweed & Leap | | | [netdata @ SUSE OpenBuildService](https://software.opensuse.org/package/netdata) | +| Linux Distribution | Netdata Version | Maintainer | Related URL | +|:-------------------------------------:|:---------------:|:---------------------:|:--------------------------------------------------------------------------------------| +| Arch Linux | Release | @svenstaro | [netdata @ Arch Linux](https://www.archlinux.org/packages/community/x86_64/netdata/) | +| Arch Linux AUR | Git | @sanskritfritz | [netdata @ AUR](https://aur.archlinux.org/packages/netdata-git/) | +| Gentoo Linux | Release + Git | @candrews | [netdata @ gentoo](https://github.com/gentoo/gentoo/tree/master/net-analyzer/netdata) | +| Debian | Release | @lhw @FedericoCeratto | [netdata @ debian](http://salsa.debian.org/debian/netdata) | +| Slackware | Release | @willysr | [netdata @ slackbuilds](https://slackbuilds.org/repository/14.2/system/netdata/) | +| Ubuntu | | | | +| Red Hat / Fedora / CentOS | | | | +| SUSE SLE / openSUSE Tumbleweed & Leap | | | [netdata @ SUSE OpenBuildService](https://software.opensuse.org/package/netdata) | --- ## FreeBSD -| System | Initial PR | Core Developer | Package Maintainer -|:-:|:-:|:-:|:-:| -| FreeBSD | #1321 | @vlvkobal|@mmokhi +| System | Initial PR | Core Developer | Package Maintainer | +|:-------:|:----------:|:--------------:|:------------------:| +| FreeBSD | #1321 | | @mmokhi | --- ## macOS -| System | URL | Core Developer | Package Maintainer -|:-:|:-:|:-:|:-:| -| macOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/n/netdata.rb)|@vlvkobal|@rickard-von-essen +| System | URL | Core Developer | Package Maintainer | +|:----------------------:|:----------------------------------------------------------------------------------:|:--------------:|:------------------:| +| macOS Homebrew Formula | [link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/n/netdata.rb) | | | --- ## Unofficial Linux Packages -| Linux Distribution | Netdata Version | Maintainer | Related URL | -| :-: | :-: | :-: | :-- | -| Ubuntu | Release | @gslin | [netdata @ gslin ppa](https://launchpad.net/~gslin/+archive/ubuntu/netdata) https://github.com/netdata/netdata/issues/69#issuecomment-217458543 | ---- - -## Embedded Linux - -| Embedded Linux | Netdata Version | Maintainer | Related URL | -| :-: | :-: | :-: | :-- | -| OpenWRT | Release | @nitroshift | [openwrt package](https://github.com/openwrt/packages/tree/master/admin/netdata) | -| ReadyNAS | Release | @NAStools | https://github.com/nastools/netdata | -| QNAP | Release | QNAP_Stephane | https://forum.qnap.com/viewtopic.php?t=121518 | -| DietPi | Release | @Fourdee | https://github.com/Fourdee/DietPi | +| Linux Distribution | Netdata Version | Maintainer | Related URL | +|:------------------:|:---------------:|:----------:|:--------------------------------------------------------------------------------------------------------------------------------------------------| +| Ubuntu | Release | @gslin | [netdata @ gslin ppa](https://launchpad.net/~gslin/+archive/ubuntu/netdata) | --- -## Linux Containers +## Embedded Linux -| Containers | Netdata Version | Maintainer | Related URL | -| :-: | :-: | :-: | :-- | -| Docker | Git | @titpetric | https://github.com/titpetric/netdata | +| Embedded Linux | Netdata Version | Maintainer | Related URL | +|:--------------:|:---------------:|:-------------:|:---------------------------------------------------------------------------------| +| OpenWRT | Release | @nitroshift | [openwrt package](https://github.com/openwrt/packages/tree/master/admin/netdata) | +| ReadyNAS | Release | @NAStools | | +| QNAP | Release | QNAP_Stephane | | +| DietPi | Release | @Fourdee | | --- ## Automation Systems -| Automation Systems | Netdata Version | Maintainer | Related URL | -| :-: | :-: | :-: | :-- | -| Ansible | git | @jffz | https://galaxy.ansible.com/jffz/netdata/ | -| Chef | ? | @sergiopena | https://github.com/sergiopena/netdata-cookbook | +| Automation Systems | Netdata Version | Maintainer | Related URL | +|:------------------:|:---------------:|:-----------:|:-------------------------------------------------| +| Ansible | git | @jffz | | +| Chef | ? | @sergiopena | | --- ## Packages summary from repology.org [![Packaging status](https://repology.org/badge/vertical-allrepos/netdata.svg)](https://repology.org/metapackage/netdata/versions) - - diff --git a/packaging/makeself/README.md b/packaging/makeself/README.md index d1c492f62..7344f3381 100644 --- a/packaging/makeself/README.md +++ b/packaging/makeself/README.md @@ -1,13 +1,3 @@ - - # Netdata static binary build We publish pre-built static builds of Netdata for Linux systems. Currently, these are published for 64-bit x86, ARMv7, diff --git a/packaging/makeself/jobs/70-netdata-git.install.sh b/packaging/makeself/jobs/70-netdata-git.install.sh index 59074ec59..33531608b 100755 --- a/packaging/makeself/jobs/70-netdata-git.install.sh +++ b/packaging/makeself/jobs/70-netdata-git.install.sh @@ -32,7 +32,6 @@ run ./netdata-installer.sh \ --dont-wait \ --dont-start-it \ --disable-exporting-mongodb \ - --require-cloud \ --use-system-protobuf \ --dont-scrub-cflags-even-though-it-may-break-things \ --one-time-build \ diff --git a/packaging/makeself/jobs/99-makeself.install.sh b/packaging/makeself/jobs/99-makeself.install.sh index 2695e8eb4..3b0608388 100755 --- a/packaging/makeself/jobs/99-makeself.install.sh +++ b/packaging/makeself/jobs/99-makeself.install.sh @@ -104,13 +104,13 @@ run mkdir -p artifacts run mv "${NETDATA_INSTALL_PATH}.gz.run" "artifacts/${FILE}" [ -f "netdata-${BUILDARCH}-latest.gz.run" ] && rm "netdata-${BUILDARCH}-latest.gz.run" -run ln -s "artifacts/${FILE}" "netdata-${BUILDARCH}-latest.gz.run" +run cp "artifacts/${FILE}" "netdata-${BUILDARCH}-latest.gz.run" if [ "${BUILDARCH}" = "x86_64" ]; then [ -f "netdata-latest.gz.run" ] && rm "netdata-latest.gz.run" - run ln -s "artifacts/${FILE}" "netdata-latest.gz.run" + run cp "artifacts/${FILE}" "netdata-latest.gz.run" [ -f "artifacts/netdata-${VERSION}.gz.run" ] && rm "netdata-${VERSION}.gz.run" - run ln -s "./${FILE}" "artifacts/netdata-${VERSION}.gz.run" + run cp "artifacts/${FILE}" "artifacts/netdata-${VERSION}.gz.run" fi # shellcheck disable=SC2015 diff --git a/packaging/repoconfig/CMakeLists.txt b/packaging/repoconfig/CMakeLists.txt index 415ad8807..54ac3c494 100644 --- a/packaging/repoconfig/CMakeLists.txt +++ b/packaging/repoconfig/CMakeLists.txt @@ -9,8 +9,8 @@ list(APPEND DEB_DISTROS debian ubuntu) set(DEB_GPG_KEY_SOURCE "https://repo.netdata.cloud/netdatabot.gpg.key") -set(PACKAGE_VERSION 3) -set(PACKAGE_RELEASE 4) +set(PACKAGE_VERSION 4) +set(PACKAGE_RELEASE 1) set(CPACK_THREADS 0) set(CPACK_STRIP_FILES NO) diff --git a/packaging/repoconfig/deb.changelog b/packaging/repoconfig/deb.changelog index 6d1dca883..28f44705f 100644 --- a/packaging/repoconfig/deb.changelog +++ b/packaging/repoconfig/deb.changelog @@ -1,3 +1,15 @@ +@PKG_NAME@ (4-1) unstable; urgency=medium + + * Update repositories to new subdomain + + -- Austin Hemmelgarn Thu, 31 Oct 2024 11:00:00 -0400 + +@PKG_NAME@ (3-5) unstable; urgency=medium + + * Switch DEB packages to fetch repo metadata by hash. + + -- Austin Hemmelgarn Thu, 12 Sep 2024 07:27:00 -0400 + @PKG_NAME@ (3-4) unstable; urgency=medium * Convert sources to DEB822 format diff --git a/packaging/repoconfig/netdata.repo.dnf b/packaging/repoconfig/netdata.repo.dnf index 3a64a2a58..42fed9b48 100644 --- a/packaging/repoconfig/netdata.repo.dnf +++ b/packaging/repoconfig/netdata.repo.dnf @@ -1,19 +1,19 @@ [netdata] name=Netdata -baseurl=https://repo.netdata.cloud/repos/@VARIANT@/@DIST_NAME@/@DIST_VERSION@/$basearch +baseurl=https://repository.netdata.cloud/repos/@VARIANT@/@DIST_NAME@/@DIST_VERSION@/$basearch repo_gpgcheck=1 gpgcheck=1 -gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key +gpgkey=https://repository.netdata.cloud/netdatabot.gpg.key enabled=1 sslverify=1 priority=50 [netdata-repoconfig] name=Netdata Repository Config -baseurl=https://repo.netdata.cloud/repos/repoconfig/@DIST_NAME@/@DIST_VERSION@/$basearch +baseurl=https://repository.netdata.cloud/repos/repoconfig/@DIST_NAME@/@DIST_VERSION@/$basearch repo_gpgcheck=1 gpgcheck=1 -gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key +gpgkey=https://repository.netdata.cloud/netdatabot.gpg.key enabled=1 sslverify=1 priority=50 diff --git a/packaging/repoconfig/netdata.repo.zypp b/packaging/repoconfig/netdata.repo.zypp index 9ab847343..c352f9c4e 100644 --- a/packaging/repoconfig/netdata.repo.zypp +++ b/packaging/repoconfig/netdata.repo.zypp @@ -1,19 +1,19 @@ [netdata] name=Netdata -baseurl=https://repo.netdata.cloud/repos/@VARIANT@/@DIST_NAME@/@DIST_VERSION@/$basearch +baseurl=https://repository.netdata.cloud/repos/@VARIANT@/@DIST_NAME@/@DIST_VERSION@/$basearch repo_gpgcheck=1 pkg_gpgcheck=1 -gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key +gpgkey=https://repository.netdata.cloud/netdatabot.gpg.key enabled=1 type=rpm-md autorefresh=1 [netdata-repoconfig] name=Netdata Repoconfig -baseurl=https://repo.netdata.cloud/repos/repoconfig/@DIST_NAME@/@DIST_VERSION@/$basearch +baseurl=https://repository.netdata.cloud/repos/repoconfig/@DIST_NAME@/@DIST_VERSION@/$basearch repo_gpgcheck=1 pkg_gpgcheck=1 -gpgkey=https://repo.netdata.cloud/netdatabot.gpg.key +gpgkey=https://repository.netdata.cloud/netdatabot.gpg.key enabled=1 type=rpm-md autorefresh=1 diff --git a/packaging/repoconfig/netdata.sources.in b/packaging/repoconfig/netdata.sources.in index 926b2c453..6579a4f0e 100644 --- a/packaging/repoconfig/netdata.sources.in +++ b/packaging/repoconfig/netdata.sources.in @@ -1,15 +1,15 @@ X-Repolib-Name: Netdata @VARIANT@ repository Types: deb -URIs: http://repo.netdata.cloud/repos/@VARIANT@/@DIST_NAME@/ +URIs: http://repository.netdata.cloud/repos/@VARIANT@/@DIST_NAME@/ Suites: @SUITE@/ Signed-By: /usr/share/keyrings/netdata-archive-keyring.gpg -By-Hash: No +By-Hash: Yes Enabled: Yes X-Repolib-Name: Netdata repository configuration repository Types: deb -URIs: http://repo.netdata.cloud/repos/repoconfig/@DIST_NAME@/ +URIs: http://repository.netdata.cloud/repos/repoconfig/@DIST_NAME@/ Suites: @SUITE@/ Signed-By: /usr/share/keyrings/netdata-archive-keyring.gpg -By-Hash: No +By-Hash: Yes Enabled: Yes diff --git a/packaging/repoconfig/rpm.changelog b/packaging/repoconfig/rpm.changelog index dab81a2cf..7469eb26f 100644 --- a/packaging/repoconfig/rpm.changelog +++ b/packaging/repoconfig/rpm.changelog @@ -1,4 +1,8 @@ -* Mon Aug 19 2024 Austin Hemmelgarn 4-1 +- Switch repos to new subdomain. +* Thu Sep 17 2024 Austin Hemmelgarn 3-5 +- Fix changelog formatting. +* Mon Aug 19 2024 Austin Hemmelgarn 3-4 - Version bump to stay in sync with DEB packages. * Fri Aug 9 2024 Austin Hemmelgarn 3-3 - Use system certificate config for Yum/DNF repos. diff --git a/packaging/utils/compile-and-run-windows.sh b/packaging/utils/compile-and-run-windows.sh new file mode 100644 index 000000000..2d540eee9 --- /dev/null +++ b/packaging/utils/compile-and-run-windows.sh @@ -0,0 +1,92 @@ +#!/bin/sh + +# On MSYS2, install these dependencies to build netdata: +install_dependencies() { + pacman -S \ + git cmake ninja clang base-devel msys2-devel \ + libyaml-devel libzstd-devel libutil-linux libutil-linux-devel \ + mingw-w64-x86_64-toolchain mingw-w64-ucrt-x86_64-toolchain \ + mingw64/mingw-w64-x86_64-mold ucrt64/mingw-w64-ucrt-x86_64-mold \ + msys/gdb ucrt64/mingw-w64-ucrt-x86_64-gdb mingw64/mingw-w64-x86_64-gdb \ + msys/zlib-devel mingw64/mingw-w64-x86_64-zlib ucrt64/mingw-w64-ucrt-x86_64-zlib \ + msys/libuv-devel ucrt64/mingw-w64-ucrt-x86_64-libuv mingw64/mingw-w64-x86_64-libuv \ + liblz4-devel mingw64/mingw-w64-x86_64-lz4 ucrt64/mingw-w64-ucrt-x86_64-lz4 \ + openssl-devel mingw64/mingw-w64-x86_64-openssl ucrt64/mingw-w64-ucrt-x86_64-openssl \ + protobuf-devel mingw64/mingw-w64-x86_64-protobuf ucrt64/mingw-w64-ucrt-x86_64-protobuf \ + msys/pcre2-devel mingw64/mingw-w64-x86_64-pcre2 ucrt64/mingw-w64-ucrt-x86_64-pcre2 \ + msys/brotli-devel mingw64/mingw-w64-x86_64-brotli ucrt64/mingw-w64-ucrt-x86_64-brotli \ + msys/ccache ucrt64/mingw-w64-ucrt-x86_64-ccache mingw64/mingw-w64-x86_64-ccache \ + mingw64/mingw-w64-x86_64-go ucrt64/mingw-w64-ucrt-x86_64-go \ + mingw64/mingw-w64-x86_64-nsis \ + msys/libcurl msys/libcurl-devel +} + +if [ "${1}" = "install" ] +then + install_dependencies || exit 1 + exit 0 +fi + +BUILD_FOR_PACKAGING="Off" +if [ "${1}" = "package" ] +then + BUILD_FOR_PACKAGING="On" +fi + +export PATH="/usr/local/bin:${PATH}" + +WT_ROOT="$(pwd)" +BUILD_TYPE="Debug" +NULL="" + +if [ -z "${MSYSTEM}" ]; then + build="${WT_ROOT}/build-${OSTYPE}" +else + build="${WT_ROOT}/build-${OSTYPE}-${MSYSTEM}" +fi + +if [ "$USER" = "vk" ]; then + build="${WT_ROOT}/build" +fi + +set -exu -o pipefail + +if [ ! -d "${build}" ] +then + /usr/bin/cmake -S "${WT_ROOT}" -B "${build}" \ + -G Ninja \ + -DCMAKE_INSTALL_PREFIX="/opt/netdata" \ + -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \ + -DCMAKE_C_FLAGS="-fstack-protector-all -O0 -ggdb -Wall -Wextra -Wno-char-subscripts -Wa,-mbig-obj -pipe -DNETDATA_INTERNAL_CHECKS=1 -D_FILE_OFFSET_BITS=64 -D__USE_MINGW_ANSI_STDIO=1" \ + -DBUILD_FOR_PACKAGING=${BUILD_FOR_PACKAGING} \ + -DUSE_MOLD=Off \ + -DNETDATA_USER="${USER}" \ + -DDEFAULT_FEATURE_STATE=Off \ + -DENABLE_H2O=Off \ + -DENABLE_ML=On \ + -DENABLE_BUNDLED_JSONC=On \ + -DENABLE_BUNDLED_PROTOBUF=Off \ + -DENABLE_PLUGIN_APPS=On \ + ${NULL} +fi + +ninja -v -C "${build}" || ninja -v -C "${build}" -j 1 + +echo "Stopping service Netdata" +sc stop "Netdata" || echo "Failed" + +ninja -v -C "${build}" install || ninja -v -C "${build}" -j 1 + +# register the event log publisher +cmd.exe //c "$(cygpath -w -a "/opt/netdata/usr/bin/wevt_netdata_install.bat")" + +#echo +#echo "Compile with:" +#echo "ninja -v -C \"${build}\" install || ninja -v -C \"${build}\" -j 1" + +echo "starting netdata..." +# enable JIT debug with gdb +export MSYS="error_start:$(cygpath -w /usr/bin/gdb)" + +rm -rf /opt/netdata/var/log/netdata/*.log || echo +/opt/netdata/usr/bin/netdata -D diff --git a/packaging/version b/packaging/version index 3ea09128f..f256be603 100644 --- a/packaging/version +++ b/packaging/version @@ -1 +1 @@ -v1.47.5 +v2.0.3 diff --git a/packaging/windows/BackGround.bmp b/packaging/windows/BackGround.bmp new file mode 100644 index 000000000..bceebb410 Binary files /dev/null and b/packaging/windows/BackGround.bmp differ diff --git a/packaging/windows/Top.bmp b/packaging/windows/Top.bmp new file mode 100644 index 000000000..ef12f7a0b Binary files /dev/null and b/packaging/windows/Top.bmp differ diff --git a/packaging/windows/WINDOWS_INSTALLER.md b/packaging/windows/WINDOWS_INSTALLER.md new file mode 100644 index 000000000..fb727bb3e --- /dev/null +++ b/packaging/windows/WINDOWS_INSTALLER.md @@ -0,0 +1,64 @@ +# Netdata Windows Installer + +Netdata offers a convenient Windows installer for easy setup. This executable provides two distinct installation modes, outlined below. + +> **Note** +> +> This feature is currently under beta and only available for Nightly releases, and the installer can be found in our [nightlies repo](https://github.com/netdata/netdata-nightlies). A stable version will be released soon. + +## Graphical User Interface (GUI) + +Double-clicking the installer initiates the setup process. Since Netdata adds a service to your system, you'll need to provide administrator privileges. + +The installer will then guide you through these steps: + +1. **Welcome**: This screen provides a summary of the actions the installer will perform. +2. **License Agreements**: + - [Netdata Cloud UI License](https://app.netdata.cloud/LICENSE.txt): Review and accept the license terms to proceed. + - [GPLv3 License](/LICENSE): Read the GNU General Public License v3, which governs the Netdata software. +3. **Destination**: Choose the installation directory. By default, Netdata installs in `C:\Program Files\Netdata`. +4. **Installation**: The installer will copy the necessary files to the chosen directory. +5. **Connect to Netdata Cloud**: To [connect](/src/claim/README.md) your Agent to your Netdata Cloud Space you need to provide the following: + - **Claim Token**: The Claim Token that securely authenticates and links your Agent to your Space. + - **Room IDs**: A comma-separated list of Room IDs where you want to add your Agent. + - **Proxy address**: The address of a proxy server, if one is required for communication with Netdata Cloud. + - **Insecure connection**: By default, Netdata verifies the server's certificate. Enabling this option bypasses verification (use only if necessary). + - **Open Terminal**: Select this option to launch the `MSYS2` terminal after installation completes. +6. **Finish**: The installation process is complete! + +## Silent Mode (Command line) + +This section provides instructions for installing Netdata in silent mode, which is ideal for automated deployments. + +> **Info** +> +> Run the installer as admin to avoid the Windows prompt. +> +> Silent mode skips displaying license agreements, but requires explicitly accepting them using the `/A` option. + +### Available Options + +| Option | Description | +|-----------|--------------------------------------------------------------------------------------------------| +| `/S` | Enables silent mode installation. | +| `/A` | Accepts all Netdata licenses. This option is mandatory for silent installations. | +| `/D` | Specifies the desired installation directory (defaults to `C:\Program Files\Netdata`). | +| `/T` | Opens the `MSYS2` terminal after installation. | +| `/I` | Forces insecure connections, bypassing hostname verification (use only if absolutely necessary). | +| `/TOKEN=` | Sets the Claim Token for your Netdata Cloud Space. | +| `/ROOMS=` | Comma-separated list of Room IDs where you want your node to appear. | +| `/PROXY=` | Sets the proxy server address if your network requires one. | + +### Example Usage + +Connect your Agent to your Netdata Cloud Space with token `` and room ``: + +```bash +netdata-installer-x64.exe /S /A /TOKEN= /ROOMS= +``` + +Replace `` and `` with your actual Netdata Cloud Space claim token and room ID, respectively. + +> **Note** +> +> The Windows version of Netdata is intended for users on paid plans. diff --git a/packaging/windows/clion-msys-msys-environment.bat b/packaging/windows/clion-msys-msys-environment.bat index 9f0c095d3..16934951d 100644 --- a/packaging/windows/clion-msys-msys-environment.bat +++ b/packaging/windows/clion-msys-msys-environment.bat @@ -13,7 +13,9 @@ set MSYSTEM=MSYS :: go exists only mingw64 / ucrt64 / etc, not under msys profile set GOROOT=C:\msys64\mingw64 -set PATH="%PATH%;C:\msys64\usr\bin;C:\msys64\bin;C:\msys64\mingw64\bin" +set "PATH=%PATH%;C:\Program Files (x86)\Windows Kits\10\bin\10.0.26100.0\x64" +set "PATH=%PATH%;C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.39.33519\bin\Hostx64\x64" +set "PATH=%PATH%;C:\msys64\usr\bin;C:\msys64\bin;C:\msys64\mingw64\bin" ::set PKG_CONFIG_EXECUTABLE=C:\msys64\mingw64\bin\pkg-config.exe ::set CMAKE_C_COMPILER=C:\msys64\mingw64\bin\gcc.exe ::set CMAKE_CC_COMPILER=C:\msys64\mingw64\bin\g++.exe diff --git a/packaging/windows/compile-on-windows.sh b/packaging/windows/compile-on-windows.sh index ceb4f5502..a2c66a2e3 100755 --- a/packaging/windows/compile-on-windows.sh +++ b/packaging/windows/compile-on-windows.sh @@ -6,7 +6,7 @@ CMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE:-RelWithDebInfo}" # shellcheck source=./win-build-dir.sh . "${REPO_ROOT}/packaging/windows/win-build-dir.sh" -set -exu -o pipefail +set -eu -o pipefail if [ -d "${build}" ]; then rm -rf "${build}" diff --git a/packaging/windows/find-sdk-path.sh b/packaging/windows/find-sdk-path.sh new file mode 100644 index 000000000..fb0eb600b --- /dev/null +++ b/packaging/windows/find-sdk-path.sh @@ -0,0 +1,217 @@ +#!/bin/bash + +# Function to output the path in Windows format (convert from MSYS2/Unix format using cygpath) +convert_to_windows_format() { + cygpath -w -a "$1" +} + +# Function to display help message +display_help() { + echo "Usage: $0 [-s|--sdk] [-v|--visualstudio] [-w|--windows] [--help]" + echo + echo "Options:" + echo " -s, --sdk Search for tools in the Windows SDK." + echo " -v, --visualstudio Search for tools in Visual Studio." + echo " -w, --windows Output the path in Windows format (using cygpath)." + echo " --help Display this help message." + exit 0 +} + +# Function to find tools in the Windows SDK +find_sdk_tools() { + sdk_base_path="/c/Program Files (x86)/Windows Kits/10/bin" + + if [ ! -d "$sdk_base_path" ]; then + echo "ERROR: SDK base path \"$sdk_base_path\" does not exist. No SDK installations found." >&2 + echo "$system_root" + return 1 + fi + + echo "SDK base path exists: \"$sdk_base_path\"" >&2 + + # Find all SDK versions + sdk_versions=($(ls "$sdk_base_path" | tr ' ' '\n' | grep -E "^[0-9]+\..*$")) + echo "Found SDK versions: ${sdk_versions[*]}" >&2 + + if [ ${#sdk_versions[@]} -eq 0 ]; then + echo "ERROR: No valid Windows SDK versions found in \"$sdk_base_path\"." >&2 + echo "$system_root" + return 1 + fi + + # Sort versions and pick the latest + sorted_versions=$(printf '%s\n' "${sdk_versions[@]}" | sort -V) + latest_sdk_version=$(echo "$sorted_versions" | tail -n 1) + sdk_tool_path="$sdk_base_path/$latest_sdk_version/x64" + + echo "Latest SDK version: \"$latest_sdk_version\"" >&2 + + if [ ! -d "$sdk_tool_path" ]; then + echo "ERROR: Tool path \"$sdk_tool_path\" does not exist." >&2 + echo "$system_root" + return 1 + fi + + # Check if required tools exist + tools=("mc.exe" "rc.exe") + for tool in "${tools[@]}"; do + if [ ! -f "$sdk_tool_path/$tool" ]; then + echo "ERROR: $tool not found in \"$sdk_tool_path\"" >&2 + echo "$system_root" + return 1 + else + echo "$tool found in \"$sdk_tool_path\"" >&2 + fi + done + + echo >&2 + echo "DONE: All required tools found in \"$sdk_tool_path\"" >&2 + echo >&2 + + echo "$sdk_tool_path" +} + +# Function to find tools in Visual Studio +find_visual_studio_tools() { + studio_base_path="/c/Program Files/Microsoft Visual Studio/2022" + echo "Checking for Visual Studio installations in: \"$studio_base_path\"" >&2 + + if [ ! -d "$studio_base_path" ]; then + echo "ERROR: Visual Studio base path \"$studio_base_path\" does not exist. No Visual Studio installations found." >&2 + echo "$system_root" + return 1 + fi + + # Visual Studio editions we want to check + editions=("Enterprise" "Professional" "Community") + available_editions=() + + # Loop through each edition and check for tools + for edition in "${editions[@]}"; do + edition_path="$studio_base_path/$edition/VC/Tools/MSVC" + if [ -d "$edition_path" ]; then + available_editions+=("$edition") + echo "Checking edition: $edition in $studio_base_path" >&2 + + # Find all MSVC versions and sort them + msvc_versions=($(ls "$edition_path" | tr ' ' '\n' | grep -E "^[0-9]+\..*$")) + echo "Found MSVC versions in $edition: ${msvc_versions[*]}" >&2 + + if [ ${#msvc_versions[@]} -gt 0 ]; then + sorted_versions=$(printf '%s\n' "${msvc_versions[@]}" | sort -V) + latest_msvc_version=$(echo "${sorted_versions[@]}" | tail -n 1) + vs_tool_path="$edition_path/$latest_msvc_version/bin/Hostx64/x64" + + echo "Latest MSVC version: \"$latest_msvc_version\" in $edition" >&2 + + if [ ! -d "$vs_tool_path" ]; then + echo "WARNING: Tool path \"$vs_tool_path\" does not exist." >&2 + continue + fi + + # Check if required tools exist + tools=("link.exe") + missing_tool=0 + + for tool in "${tools[@]}"; do + if [ ! -f "$vs_tool_path/$tool" ]; then + echo "WARNING: $tool not found in \"$vs_tool_path\" for $edition" >&2 + missing_tool=1 + else + echo "$tool found in \"$vs_tool_path\"" >&2 + fi + done + + if [ $missing_tool -eq 0 ]; then + echo >&2 + echo "All required tools found in \"$vs_tool_path\"" >&2 + echo >&2 + + echo "$vs_tool_path" + return 0 + else + echo "WARNING: skipping edition '$edition', directory does not exist." >&2 + fi + else + echo "WARNING: skipping edition '$edition', MSVC directory does not exist." >&2 + fi + else + echo "WARNING: skipping edition '$edition', directory does not exist." >&2 + fi + done + + echo "ERROR: No valid Visual Studio editions found in \"$studio_base_path\"." >&2 + echo "$system_root" + return 1 +} + +# Parse options using getopt +TEMP=$(getopt -o svwh --long sdk,visualstudio,windows,help -- "$@") +if [ $? != 0 ]; then + echo "ERROR: Invalid options provided." >&2 + exit 1 +fi + +eval set -- "$TEMP" + +search_mode="sdk" +windows_format=0 +system_root="/usr/bin" + +# Process getopt options +while true; do + case "$1" in + -s|--sdk) + search_mode="sdk" + shift + ;; + -v|--visualstudio) + search_mode="visualstudio" + shift + ;; + -w|--windows) + system_root="%SYSTEMROOT%" + windows_format=1 + shift + ;; + --help|-h) + display_help + ;; + --) + shift + break + ;; + *) + echo "ERROR: Invalid option: $1" >&2 + exit 1 + ;; + esac +done + +# Ensure that one of --sdk or --visualstudio is selected +if [ -z "$search_mode" ]; then + echo "ERROR: You must specify either --sdk or --visualstudio." >&2 + display_help +fi + +# Determine which function to call based on the search mode +if [ "$search_mode" = "sdk" ]; then + tool_path=$(find_sdk_tools) +else + tool_path=$(find_visual_studio_tools) +fi + +# If a valid path is found, output it +if [ "$tool_path" != "$system_root" ]; then + if [ "$windows_format" -eq 1 ]; then + windows_tool_path=$(convert_to_windows_format "$tool_path") + echo "$windows_tool_path" + else + echo "$tool_path" + fi +else + echo "$system_root" + exit 1 +fi + +exit 0 diff --git a/packaging/windows/get-convert-licenses.sh b/packaging/windows/get-convert-licenses.sh new file mode 100755 index 000000000..b5d5191de --- /dev/null +++ b/packaging/windows/get-convert-licenses.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-3.0-or-later + +set -e + +function txt_to_rtf() { + INPUT="$1" + OUTPUT="$2" + + echo '{\rtf1\ansi\deff0 {\fonttbl {\f0 Times New Roman;}}' > "$OUTPUT" + echo '\paperh15840 \paperw12240' >> "$OUTPUT" + echo '\margl720 \margr720 \margt720 \margb720' >> "$OUTPUT" + echo '\f0\fs24' >> "$OUTPUT" + + sed s/\$/'\\line'/ "$INPUT" | sed s/\\f/'\\page'/ >> "$OUTPUT" + echo '}' >> "$OUTPUT" +} + +function check_and_get_file() { + if [ ! -f "$1" ]; then + curl -o "tmp.txt" "$2" + txt_to_rtf "tmp.txt" "$1" + rm "tmp.txt" + fi +} + +check_and_get_file "gpl-3.0.rtf" "https://www.gnu.org/licenses/gpl-3.0.txt" +check_and_get_file "ncul1.rtf" "https://app.netdata.cloud/LICENSE.txt" + diff --git a/packaging/windows/get-win-build-path.sh b/packaging/windows/get-win-build-path.sh new file mode 100755 index 000000000..0f05f26df --- /dev/null +++ b/packaging/windows/get-win-build-path.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +REPO_ROOT="$(dirname "$(dirname "$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null && pwd -P)")")" + +# shellcheck source=./win-build-dir.sh +. "${REPO_ROOT}/packaging/windows/win-build-dir.sh" + +cygpath -wa "${build}" diff --git a/packaging/windows/gpl-3.0.rtf b/packaging/windows/gpl-3.0.rtf new file mode 100644 index 000000000..3cffae8ad --- /dev/null +++ b/packaging/windows/gpl-3.0.rtf @@ -0,0 +1,679 @@ +{\rtf1\ansi\deff0 {\fonttbl {\f0 Times New Roman;}} +\paperh15840 \paperw12240 +\margl720 \margr720 \margt720 \margb720 +\f0\fs24 + GNU GENERAL PUBLIC LICENSE\line + Version 3, 29 June 2007\line +\line + Copyright (C) 2007 Free Software Foundation, Inc. \line + Everyone is permitted to copy and distribute verbatim copies\line + of this license document, but changing it is not allowed.\line +\line + Preamble\line +\line + The GNU General Public License is a free, copyleft license for\line +software and other kinds of works.\line +\line + The licenses for most software and other practical works are designed\line +to take away your freedom to share and change the works. By contrast,\line +the GNU General Public License is intended to guarantee your freedom to\line +share and change all versions of a program--to make sure it remains free\line +software for all its users. We, the Free Software Foundation, use the\line +GNU General Public License for most of our software; it applies also to\line +any other work released this way by its authors. You can apply it to\line +your programs, too.\line +\line + When we speak of free software, we are referring to freedom, not\line +price. Our General Public Licenses are designed to make sure that you\line +have the freedom to distribute copies of free software (and charge for\line +them if you wish), that you receive source code or can get it if you\line +want it, that you can change the software or use pieces of it in new\line +free programs, and that you know you can do these things.\line +\line + To protect your rights, we need to prevent others from denying you\line +these rights or asking you to surrender the rights. Therefore, you have\line +certain responsibilities if you distribute copies of the software, or if\line +you modify it: responsibilities to respect the freedom of others.\line +\line + For example, if you distribute copies of such a program, whether\line +gratis or for a fee, you must pass on to the recipients the same\line +freedoms that you received. You must make sure that they, too, receive\line +or can get the source code. And you must show them these terms so they\line +know their rights.\line +\line + Developers that use the GNU GPL protect your rights with two steps:\line +(1) assert copyright on the software, and (2) offer you this License\line +giving you legal permission to copy, distribute and/or modify it.\line +\line + For the developers' and authors' protection, the GPL clearly explains\line +that there is no warranty for this free software. For both users' and\line +authors' sake, the GPL requires that modified versions be marked as\line +changed, so that their problems will not be attributed erroneously to\line +authors of previous versions.\line +\line + Some devices are designed to deny users access to install or run\line +modified versions of the software inside them, although the manufacturer\line +can do so. This is fundamentally incompatible with the aim of\line +protecting users' freedom to change the software. The systematic\line +pattern of such abuse occurs in the area of products for individuals to\line +use, which is precisely where it is most unacceptable. Therefore, we\line +have designed this version of the GPL to prohibit the practice for those\line +products. If such problems arise substantially in other domains, we\line +stand ready to extend this provision to those domains in future versions\line +of the GPL, as needed to protect the freedom of users.\line +\line + Finally, every program is threatened constantly by software patents.\line +States should not allow patents to restrict development and use of\line +software on general-purpose computers, but in those that do, we wish to\line +avoid the special danger that patents applied to a free program could\line +make it effectively proprietary. To prevent this, the GPL assures that\line +patents cannot be used to render the program non-free.\line +\line + The precise terms and conditions for copying, distribution and\line +modification follow.\line +\line + TERMS AND CONDITIONS\line +\line + 0. Definitions.\line +\line + "This License" refers to version 3 of the GNU General Public License.\line +\line + "Copyright" also means copyright-like laws that apply to other kinds of\line +works, such as semiconductor masks.\line +\line + "The Program" refers to any copyrightable work licensed under this\line +License. Each licensee is addressed as "you". "Licensees" and\line +"recipients" may be individuals or organizations.\line +\line + To "modify" a work means to copy from or adapt all or part of the work\line +in a fashion requiring copyright permission, other than the making of an\line +exact copy. The resulting work is called a "modified version" of the\line +earlier work or a work "based on" the earlier work.\line +\line + A "covered work" means either the unmodified Program or a work based\line +on the Program.\line +\line + To "propagate" a work means to do anything with it that, without\line +permission, would make you directly or secondarily liable for\line +infringement under applicable copyright law, except executing it on a\line +computer or modifying a private copy. Propagation includes copying,\line +distribution (with or without modification), making available to the\line +public, and in some countries other activities as well.\line +\line + To "convey" a work means any kind of propagation that enables other\line +parties to make or receive copies. Mere interaction with a user through\line +a computer network, with no transfer of a copy, is not conveying.\line +\line + An interactive user interface displays "Appropriate Legal Notices"\line +to the extent that it includes a convenient and prominently visible\line +feature that (1) displays an appropriate copyright notice, and (2)\line +tells the user that there is no warranty for the work (except to the\line +extent that warranties are provided), that licensees may convey the\line +work under this License, and how to view a copy of this License. If\line +the interface presents a list of user commands or options, such as a\line +menu, a prominent item in the list meets this criterion.\line +\line + 1. Source Code.\line +\line + The "source code" for a work means the preferred form of the work\line +for making modifications to it. "Object code" means any non-source\line +form of a work.\line +\line + A "Standard Interface" means an interface that either is an official\line +standard defined by a recognized standards body, or, in the case of\line +interfaces specified for a particular programming language, one that\line +is widely used among developers working in that language.\line +\line + The "System Libraries" of an executable work include anything, other\line +than the work as a whole, that (a) is included in the normal form of\line +packaging a Major Component, but which is not part of that Major\line +Component, and (b) serves only to enable use of the work with that\line +Major Component, or to implement a Standard Interface for which an\line +implementation is available to the public in source code form. A\line +"Major Component", in this context, means a major essential component\line +(kernel, window system, and so on) of the specific operating system\line +(if any) on which the executable work runs, or a compiler used to\line +produce the work, or an object code interpreter used to run it.\line +\line + The "Corresponding Source" for a work in object code form means all\line +the source code needed to generate, install, and (for an executable\line +work) run the object code and to modify the work, including scripts to\line +control those activities. However, it does not include the work's\line +System Libraries, or general-purpose tools or generally available free\line +programs which are used unmodified in performing those activities but\line +which are not part of the work. For example, Corresponding Source\line +includes interface definition files associated with source files for\line +the work, and the source code for shared libraries and dynamically\line +linked subprograms that the work is specifically designed to require,\line +such as by intimate data communication or control flow between those\line +subprograms and other parts of the work.\line +\line + The Corresponding Source need not include anything that users\line +can regenerate automatically from other parts of the Corresponding\line +Source.\line +\line + The Corresponding Source for a work in source code form is that\line +same work.\line +\line + 2. Basic Permissions.\line +\line + All rights granted under this License are granted for the term of\line +copyright on the Program, and are irrevocable provided the stated\line +conditions are met. This License explicitly affirms your unlimited\line +permission to run the unmodified Program. The output from running a\line +covered work is covered by this License only if the output, given its\line +content, constitutes a covered work. This License acknowledges your\line +rights of fair use or other equivalent, as provided by copyright law.\line +\line + You may make, run and propagate covered works that you do not\line +convey, without conditions so long as your license otherwise remains\line +in force. You may convey covered works to others for the sole purpose\line +of having them make modifications exclusively for you, or provide you\line +with facilities for running those works, provided that you comply with\line +the terms of this License in conveying all material for which you do\line +not control copyright. Those thus making or running the covered works\line +for you must do so exclusively on your behalf, under your direction\line +and control, on terms that prohibit them from making any copies of\line +your copyrighted material outside their relationship with you.\line +\line + Conveying under any other circumstances is permitted solely under\line +the conditions stated below. Sublicensing is not allowed; section 10\line +makes it unnecessary.\line +\line + 3. Protecting Users' Legal Rights From Anti-Circumvention Law.\line +\line + No covered work shall be deemed part of an effective technological\line +measure under any applicable law fulfilling obligations under article\line +11 of the WIPO copyright treaty adopted on 20 December 1996, or\line +similar laws prohibiting or restricting circumvention of such\line +measures.\line +\line + When you convey a covered work, you waive any legal power to forbid\line +circumvention of technological measures to the extent such circumvention\line +is effected by exercising rights under this License with respect to\line +the covered work, and you disclaim any intention to limit operation or\line +modification of the work as a means of enforcing, against the work's\line +users, your or third parties' legal rights to forbid circumvention of\line +technological measures.\line +\line + 4. Conveying Verbatim Copies.\line +\line + You may convey verbatim copies of the Program's source code as you\line +receive it, in any medium, provided that you conspicuously and\line +appropriately publish on each copy an appropriate copyright notice;\line +keep intact all notices stating that this License and any\line +non-permissive terms added in accord with section 7 apply to the code;\line +keep intact all notices of the absence of any warranty; and give all\line +recipients a copy of this License along with the Program.\line +\line + You may charge any price or no price for each copy that you convey,\line +and you may offer support or warranty protection for a fee.\line +\line + 5. Conveying Modified Source Versions.\line +\line + You may convey a work based on the Program, or the modifications to\line +produce it from the Program, in the form of source code under the\line +terms of section 4, provided that you also meet all of these conditions:\line +\line + a) The work must carry prominent notices stating that you modified\line + it, and giving a relevant date.\line +\line + b) The work must carry prominent notices stating that it is\line + released under this License and any conditions added under section\line + 7. This requirement modifies the requirement in section 4 to\line + "keep intact all notices".\line +\line + c) You must license the entire work, as a whole, under this\line + License to anyone who comes into possession of a copy. This\line + License will therefore apply, along with any applicable section 7\line + additional terms, to the whole of the work, and all its parts,\line + regardless of how they are packaged. This License gives no\line + permission to license the work in any other way, but it does not\line + invalidate such permission if you have separately received it.\line +\line + d) If the work has interactive user interfaces, each must display\line + Appropriate Legal Notices; however, if the Program has interactive\line + interfaces that do not display Appropriate Legal Notices, your\line + work need not make them do so.\line +\line + A compilation of a covered work with other separate and independent\line +works, which are not by their nature extensions of the covered work,\line +and which are not combined with it such as to form a larger program,\line +in or on a volume of a storage or distribution medium, is called an\line +"aggregate" if the compilation and its resulting copyright are not\line +used to limit the access or legal rights of the compilation's users\line +beyond what the individual works permit. Inclusion of a covered work\line +in an aggregate does not cause this License to apply to the other\line +parts of the aggregate.\line +\line + 6. Conveying Non-Source Forms.\line +\line + You may convey a covered work in object code form under the terms\line +of sections 4 and 5, provided that you also convey the\line +machine-readable Corresponding Source under the terms of this License,\line +in one of these ways:\line +\line + a) Convey the object code in, or embodied in, a physical product\line + (including a physical distribution medium), accompanied by the\line + Corresponding Source fixed on a durable physical medium\line + customarily used for software interchange.\line +\line + b) Convey the object code in, or embodied in, a physical product\line + (including a physical distribution medium), accompanied by a\line + written offer, valid for at least three years and valid for as\line + long as you offer spare parts or customer support for that product\line + model, to give anyone who possesses the object code either (1) a\line + copy of the Corresponding Source for all the software in the\line + product that is covered by this License, on a durable physical\line + medium customarily used for software interchange, for a price no\line + more than your reasonable cost of physically performing this\line + conveying of source, or (2) access to copy the\line + Corresponding Source from a network server at no charge.\line +\line + c) Convey individual copies of the object code with a copy of the\line + written offer to provide the Corresponding Source. This\line + alternative is allowed only occasionally and noncommercially, and\line + only if you received the object code with such an offer, in accord\line + with subsection 6b.\line +\line + d) Convey the object code by offering access from a designated\line + place (gratis or for a charge), and offer equivalent access to the\line + Corresponding Source in the same way through the same place at no\line + further charge. You need not require recipients to copy the\line + Corresponding Source along with the object code. If the place to\line + copy the object code is a network server, the Corresponding Source\line + may be on a different server (operated by you or a third party)\line + that supports equivalent copying facilities, provided you maintain\line + clear directions next to the object code saying where to find the\line + Corresponding Source. Regardless of what server hosts the\line + Corresponding Source, you remain obligated to ensure that it is\line + available for as long as needed to satisfy these requirements.\line +\line + e) Convey the object code using peer-to-peer transmission, provided\line + you inform other peers where the object code and Corresponding\line + Source of the work are being offered to the general public at no\line + charge under subsection 6d.\line +\line + A separable portion of the object code, whose source code is excluded\line +from the Corresponding Source as a System Library, need not be\line +included in conveying the object code work.\line +\line + A "User Product" is either (1) a "consumer product", which means any\line +tangible personal property which is normally used for personal, family,\line +or household purposes, or (2) anything designed or sold for incorporation\line +into a dwelling. In determining whether a product is a consumer product,\line +doubtful cases shall be resolved in favor of coverage. For a particular\line +product received by a particular user, "normally used" refers to a\line +typical or common use of that class of product, regardless of the status\line +of the particular user or of the way in which the particular user\line +actually uses, or expects or is expected to use, the product. A product\line +is a consumer product regardless of whether the product has substantial\line +commercial, industrial or non-consumer uses, unless such uses represent\line +the only significant mode of use of the product.\line +\line + "Installation Information" for a User Product means any methods,\line +procedures, authorization keys, or other information required to install\line +and execute modified versions of a covered work in that User Product from\line +a modified version of its Corresponding Source. The information must\line +suffice to ensure that the continued functioning of the modified object\line +code is in no case prevented or interfered with solely because\line +modification has been made.\line +\line + If you convey an object code work under this section in, or with, or\line +specifically for use in, a User Product, and the conveying occurs as\line +part of a transaction in which the right of possession and use of the\line +User Product is transferred to the recipient in perpetuity or for a\line +fixed term (regardless of how the transaction is characterized), the\line +Corresponding Source conveyed under this section must be accompanied\line +by the Installation Information. But this requirement does not apply\line +if neither you nor any third party retains the ability to install\line +modified object code on the User Product (for example, the work has\line +been installed in ROM).\line +\line + The requirement to provide Installation Information does not include a\line +requirement to continue to provide support service, warranty, or updates\line +for a work that has been modified or installed by the recipient, or for\line +the User Product in which it has been modified or installed. Access to a\line +network may be denied when the modification itself materially and\line +adversely affects the operation of the network or violates the rules and\line +protocols for communication across the network.\line +\line + Corresponding Source conveyed, and Installation Information provided,\line +in accord with this section must be in a format that is publicly\line +documented (and with an implementation available to the public in\line +source code form), and must require no special password or key for\line +unpacking, reading or copying.\line +\line + 7. Additional Terms.\line +\line + "Additional permissions" are terms that supplement the terms of this\line +License by making exceptions from one or more of its conditions.\line +Additional permissions that are applicable to the entire Program shall\line +be treated as though they were included in this License, to the extent\line +that they are valid under applicable law. If additional permissions\line +apply only to part of the Program, that part may be used separately\line +under those permissions, but the entire Program remains governed by\line +this License without regard to the additional permissions.\line +\line + When you convey a copy of a covered work, you may at your option\line +remove any additional permissions from that copy, or from any part of\line +it. (Additional permissions may be written to require their own\line +removal in certain cases when you modify the work.) You may place\line +additional permissions on material, added by you to a covered work,\line +for which you have or can give appropriate copyright permission.\line +\line + Notwithstanding any other provision of this License, for material you\line +add to a covered work, you may (if authorized by the copyright holders of\line +that material) supplement the terms of this License with terms:\line +\line + a) Disclaiming warranty or limiting liability differently from the\line + terms of sections 15 and 16 of this License; or\line +\line + b) Requiring preservation of specified reasonable legal notices or\line + author attributions in that material or in the Appropriate Legal\line + Notices displayed by works containing it; or\line +\line + c) Prohibiting misrepresentation of the origin of that material, or\line + requiring that modified versions of such material be marked in\line + reasonable ways as different from the original version; or\line +\line + d) Limiting the use for publicity purposes of names of licensors or\line + authors of the material; or\line +\line + e) Declining to grant rights under trademark law for use of some\line + trade names, trademarks, or service marks; or\line +\line + f) Requiring indemnification of licensors and authors of that\line + material by anyone who conveys the material (or modified versions of\line + it) with contractual assumptions of liability to the recipient, for\line + any liability that these contractual assumptions directly impose on\line + those licensors and authors.\line +\line + All other non-permissive additional terms are considered "further\line +restrictions" within the meaning of section 10. If the Program as you\line +received it, or any part of it, contains a notice stating that it is\line +governed by this License along with a term that is a further\line +restriction, you may remove that term. If a license document contains\line +a further restriction but permits relicensing or conveying under this\line +License, you may add to a covered work material governed by the terms\line +of that license document, provided that the further restriction does\line +not survive such relicensing or conveying.\line +\line + If you add terms to a covered work in accord with this section, you\line +must place, in the relevant source files, a statement of the\line +additional terms that apply to those files, or a notice indicating\line +where to find the applicable terms.\line +\line + Additional terms, permissive or non-permissive, may be stated in the\line +form of a separately written license, or stated as exceptions;\line +the above requirements apply either way.\line +\line + 8. Termination.\line +\line + You may not propagate or modify a covered work except as expressly\line +provided under this License. Any attempt otherwise to propagate or\line +modify it is void, and will automatically terminate your rights under\line +this License (including any patent licenses granted under the third\line +paragraph of section 11).\line +\line + However, if you cease all violation of this License, then your\line +license from a particular copyright holder is reinstated (a)\line +provisionally, unless and until the copyright holder explicitly and\line +finally terminates your license, and (b) permanently, if the copyright\line +holder fails to notify you of the violation by some reasonable means\line +prior to 60 days after the cessation.\line +\line + Moreover, your license from a particular copyright holder is\line +reinstated permanently if the copyright holder notifies you of the\line +violation by some reasonable means, this is the first time you have\line +received notice of violation of this License (for any work) from that\line +copyright holder, and you cure the violation prior to 30 days after\line +your receipt of the notice.\line +\line + Termination of your rights under this section does not terminate the\line +licenses of parties who have received copies or rights from you under\line +this License. If your rights have been terminated and not permanently\line +reinstated, you do not qualify to receive new licenses for the same\line +material under section 10.\line +\line + 9. Acceptance Not Required for Having Copies.\line +\line + You are not required to accept this License in order to receive or\line +run a copy of the Program. Ancillary propagation of a covered work\line +occurring solely as a consequence of using peer-to-peer transmission\line +to receive a copy likewise does not require acceptance. However,\line +nothing other than this License grants you permission to propagate or\line +modify any covered work. These actions infringe copyright if you do\line +not accept this License. Therefore, by modifying or propagating a\line +covered work, you indicate your acceptance of this License to do so.\line +\line + 10. Automatic Licensing of Downstream Recipients.\line +\line + Each time you convey a covered work, the recipient automatically\line +receives a license from the original licensors, to run, modify and\line +propagate that work, subject to this License. You are not responsible\line +for enforcing compliance by third parties with this License.\line +\line + An "entity transaction" is a transaction transferring control of an\line +organization, or substantially all assets of one, or subdividing an\line +organization, or merging organizations. If propagation of a covered\line +work results from an entity transaction, each party to that\line +transaction who receives a copy of the work also receives whatever\line +licenses to the work the party's predecessor in interest had or could\line +give under the previous paragraph, plus a right to possession of the\line +Corresponding Source of the work from the predecessor in interest, if\line +the predecessor has it or can get it with reasonable efforts.\line +\line + You may not impose any further restrictions on the exercise of the\line +rights granted or affirmed under this License. For example, you may\line +not impose a license fee, royalty, or other charge for exercise of\line +rights granted under this License, and you may not initiate litigation\line +(including a cross-claim or counterclaim in a lawsuit) alleging that\line +any patent claim is infringed by making, using, selling, offering for\line +sale, or importing the Program or any portion of it.\line +\line + 11. Patents.\line +\line + A "contributor" is a copyright holder who authorizes use under this\line +License of the Program or a work on which the Program is based. The\line +work thus licensed is called the contributor's "contributor version".\line +\line + A contributor's "essential patent claims" are all patent claims\line +owned or controlled by the contributor, whether already acquired or\line +hereafter acquired, that would be infringed by some manner, permitted\line +by this License, of making, using, or selling its contributor version,\line +but do not include claims that would be infringed only as a\line +consequence of further modification of the contributor version. For\line +purposes of this definition, "control" includes the right to grant\line +patent sublicenses in a manner consistent with the requirements of\line +this License.\line +\line + Each contributor grants you a non-exclusive, worldwide, royalty-free\line +patent license under the contributor's essential patent claims, to\line +make, use, sell, offer for sale, import and otherwise run, modify and\line +propagate the contents of its contributor version.\line +\line + In the following three paragraphs, a "patent license" is any express\line +agreement or commitment, however denominated, not to enforce a patent\line +(such as an express permission to practice a patent or covenant not to\line +sue for patent infringement). To "grant" such a patent license to a\line +party means to make such an agreement or commitment not to enforce a\line +patent against the party.\line +\line + If you convey a covered work, knowingly relying on a patent license,\line +and the Corresponding Source of the work is not available for anyone\line +to copy, free of charge and under the terms of this License, through a\line +publicly available network server or other readily accessible means,\line +then you must either (1) cause the Corresponding Source to be so\line +available, or (2) arrange to deprive yourself of the benefit of the\line +patent license for this particular work, or (3) arrange, in a manner\line +consistent with the requirements of this License, to extend the patent\line +license to downstream recipients. "Knowingly relying" means you have\line +actual knowledge that, but for the patent license, your conveying the\line +covered work in a country, or your recipient's use of the covered work\line +in a country, would infringe one or more identifiable patents in that\line +country that you have reason to believe are valid.\line +\line + If, pursuant to or in connection with a single transaction or\line +arrangement, you convey, or propagate by procuring conveyance of, a\line +covered work, and grant a patent license to some of the parties\line +receiving the covered work authorizing them to use, propagate, modify\line +or convey a specific copy of the covered work, then the patent license\line +you grant is automatically extended to all recipients of the covered\line +work and works based on it.\line +\line + A patent license is "discriminatory" if it does not include within\line +the scope of its coverage, prohibits the exercise of, or is\line +conditioned on the non-exercise of one or more of the rights that are\line +specifically granted under this License. You may not convey a covered\line +work if you are a party to an arrangement with a third party that is\line +in the business of distributing software, under which you make payment\line +to the third party based on the extent of your activity of conveying\line +the work, and under which the third party grants, to any of the\line +parties who would receive the covered work from you, a discriminatory\line +patent license (a) in connection with copies of the covered work\line +conveyed by you (or copies made from those copies), or (b) primarily\line +for and in connection with specific products or compilations that\line +contain the covered work, unless you entered into that arrangement,\line +or that patent license was granted, prior to 28 March 2007.\line +\line + Nothing in this License shall be construed as excluding or limiting\line +any implied license or other defenses to infringement that may\line +otherwise be available to you under applicable patent law.\line +\line + 12. No Surrender of Others' Freedom.\line +\line + If conditions are imposed on you (whether by court order, agreement or\line +otherwise) that contradict the conditions of this License, they do not\line +excuse you from the conditions of this License. If you cannot convey a\line +covered work so as to satisfy simultaneously your obligations under this\line +License and any other pertinent obligations, then as a consequence you may\line +not convey it at all. For example, if you agree to terms that obligate you\line +to collect a royalty for further conveying from those to whom you convey\line +the Program, the only way you could satisfy both those terms and this\line +License would be to refrain entirely from conveying the Program.\line +\line + 13. Use with the GNU Affero General Public License.\line +\line + Notwithstanding any other provision of this License, you have\line +permission to link or combine any covered work with a work licensed\line +under version 3 of the GNU Affero General Public License into a single\line +combined work, and to convey the resulting work. The terms of this\line +License will continue to apply to the part which is the covered work,\line +but the special requirements of the GNU Affero General Public License,\line +section 13, concerning interaction through a network will apply to the\line +combination as such.\line +\line + 14. Revised Versions of this License.\line +\line + The Free Software Foundation may publish revised and/or new versions of\line +the GNU General Public License from time to time. Such new versions will\line +be similar in spirit to the present version, but may differ in detail to\line +address new problems or concerns.\line +\line + Each version is given a distinguishing version number. If the\line +Program specifies that a certain numbered version of the GNU General\line +Public License "or any later version" applies to it, you have the\line +option of following the terms and conditions either of that numbered\line +version or of any later version published by the Free Software\line +Foundation. If the Program does not specify a version number of the\line +GNU General Public License, you may choose any version ever published\line +by the Free Software Foundation.\line +\line + If the Program specifies that a proxy can decide which future\line +versions of the GNU General Public License can be used, that proxy's\line +public statement of acceptance of a version permanently authorizes you\line +to choose that version for the Program.\line +\line + Later license versions may give you additional or different\line +permissions. However, no additional obligations are imposed on any\line +author or copyright holder as a result of your choosing to follow a\line +later version.\line +\line + 15. Disclaimer of Warranty.\line +\line + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\line +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\line +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY\line +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\line +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\line +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\line +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\line +ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\line +\line + 16. Limitation of Liability.\line +\line + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\line +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\line +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\line +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\line +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\line +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\line +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\line +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\line +SUCH DAMAGES.\line +\line + 17. Interpretation of Sections 15 and 16.\line +\line + If the disclaimer of warranty and limitation of liability provided\line +above cannot be given local legal effect according to their terms,\line +reviewing courts shall apply local law that most closely approximates\line +an absolute waiver of all civil liability in connection with the\line +Program, unless a warranty or assumption of liability accompanies a\line +copy of the Program in return for a fee.\line +\line + END OF TERMS AND CONDITIONS\line +\line + How to Apply These Terms to Your New Programs\line +\line + If you develop a new program, and you want it to be of the greatest\line +possible use to the public, the best way to achieve this is to make it\line +free software which everyone can redistribute and change under these terms.\line +\line + To do so, attach the following notices to the program. It is safest\line +to attach them to the start of each source file to most effectively\line +state the exclusion of warranty; and each file should have at least\line +the "copyright" line and a pointer to where the full notice is found.\line +\line + \line + Copyright (C) \line +\line + This program is free software: you can redistribute it and/or modify\line + it under the terms of the GNU General Public License as published by\line + the Free Software Foundation, either version 3 of the License, or\line + (at your option) any later version.\line +\line + This program is distributed in the hope that it will be useful,\line + but WITHOUT ANY WARRANTY; without even the implied warranty of\line + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\line + GNU General Public License for more details.\line +\line + You should have received a copy of the GNU General Public License\line + along with this program. If not, see .\line +\line +Also add information on how to contact you by electronic and paper mail.\line +\line + If the program does terminal interaction, make it output a short\line +notice like this when it starts in an interactive mode:\line +\line + Copyright (C) \line + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\line + This is free software, and you are welcome to redistribute it\line + under certain conditions; type `show c' for details.\line +\line +The hypothetical commands `show w' and `show c' should show the appropriate\line +parts of the General Public License. Of course, your program's commands\line +might be different; for a GUI interface, you would use an "about box".\line +\line + You should also get your employer (if you work as a programmer) or school,\line +if any, to sign a "copyright disclaimer" for the program, if necessary.\line +For more information on this, and how to apply and follow the GNU GPL, see\line +.\line +\line + The GNU General Public License does not permit incorporating your program\line +into proprietary programs. If your program is a subroutine library, you\line +may consider it more useful to permit linking proprietary applications with\line +the library. If this is what you want to do, use the GNU Lesser General\line +Public License instead of this License. But first, please read\line +.\line +} diff --git a/packaging/windows/install-dependencies.ps1 b/packaging/windows/install-dependencies.ps1 index 66ec73160..7250e2bcd 100644 --- a/packaging/windows/install-dependencies.ps1 +++ b/packaging/windows/install-dependencies.ps1 @@ -82,3 +82,24 @@ if ($LastExitcode -ne 0) { exit 1 } } + +Write-Host "Installing WiX toolset" +dotnet tool install -g wix + +if ($LastExitcode -ne 0) { + exit 1 +} + +Write-Host "Adding WiX extensions" + +wix extension -g add WixToolset.Util.wixext + +if ($LastExitcode -ne 0) { + exit 1 +} + +wix extension -g add WixToolset.UI.wixext + +if ($LastExitcode -ne 0) { + exit 1 +} diff --git a/packaging/windows/installer.nsi b/packaging/windows/installer.nsi index 88d160a1d..a462d4997 100644 --- a/packaging/windows/installer.nsi +++ b/packaging/windows/installer.nsi @@ -3,7 +3,7 @@ !include "FileFunc.nsh" Name "Netdata" -Outfile "netdata-installer.exe" +Outfile "netdata-installer-x64.exe" InstallDir "$PROGRAMFILES\Netdata" RequestExecutionLevel admin @@ -19,8 +19,8 @@ RequestExecutionLevel admin !insertmacro MUI_PAGE_LICENSE "C:\msys64\cloud.txt" !insertmacro MUI_PAGE_LICENSE "C:\msys64\gpl-3.0.txt" !insertmacro MUI_PAGE_DIRECTORY -!insertmacro MUI_PAGE_INSTFILES Page Custom NetdataConfigPage NetdataConfigLeave +!insertmacro MUI_PAGE_INSTFILES !insertmacro MUI_PAGE_FINISH !insertmacro MUI_UNPAGE_CONFIRM @@ -29,15 +29,60 @@ Page Custom NetdataConfigPage NetdataConfigLeave !insertmacro MUI_LANGUAGE "English" +!define INSTALLERLOCKFILEGUID "f787d5ef-5c41-4dc0-a115-a1fb654fad1c" + +# https://nsis.sourceforge.io/Allow_only_one_installer_instance +!macro SingleInstanceFile + !if "${NSIS_PTR_SIZE}" > 4 + !include "Util.nsh" + !else ifndef IntPtrCmp + !define IntPtrCmp IntCmp + !endif + + !ifndef NSIS_PTR_SIZE & SYSTYPE_PTR + !define SYSTYPE_PTR i ; NSIS v2.x + !else + !define /ifndef SYSTYPE_PTR p ; NSIS v3.0+ + !endif + + !if "${NSIS_CHAR_SIZE}" < 2 + Push "$TEMP\${INSTALLERLOCKFILEGUID}.lock" + !else + Push "$APPDATA\${INSTALLERLOCKFILEGUID}.lock" + !endif + + System::Call 'KERNEL32::CreateFile(ts,i0x40000000,i0,${SYSTYPE_PTR}0,i4,i0x04000000,${SYSTYPE_PTR}0)${SYSTYPE_PTR}.r0' + ${IntPtrCmp} $0 -1 "" launch launch + System::Call 'kernel32::AttachConsole(i -1)i.r0' + ${If} $0 != 0 + System::Call 'kernel32::GetStdHandle(i -11)i.r0' + FileWrite $0 "The installer is already running.$\r$\n" + ${EndIf} + Quit + launch: +!macroend + +var hCtrlButton var hStartMsys var startMsys +var hCloudURL +var cloudURL var hCloudToken var cloudToken -var hCloudRoom -var cloudRoom +var hCloudRooms +var cloudRooms +var hProxy +var proxy +var hInsecure +var insecure +var accepted + +var avoidClaim Function .onInit + !insertmacro SingleInstanceFile + nsExec::ExecToLog '$SYSDIR\sc.exe stop Netdata' pop $0 ${If} $0 == 0 @@ -46,10 +91,71 @@ Function .onInit ${EndIf} StrCpy $startMsys ${BST_UNCHECKED} + StrCpy $insecure ${BST_UNCHECKED} + StrCpy $avoidClaim ${BST_UNCHECKED} + StrCpy $accepted ${BST_UNCHECKED} + + ${GetParameters} $R0 + ${GetOptions} $R0 "/s" $0 + IfErrors +2 0 + SetSilent silent + ClearErrors + + ${GetOptions} $R0 "/t" $0 + IfErrors +2 0 + StrCpy $startMsys ${BST_CHECKED} + ClearErrors + + ${GetOptions} $R0 "/i" $0 + IfErrors +2 0 + StrCpy $insecure ${BST_CHECKED} + ClearErrors + + ${GetOptions} $R0 "/a" $0 + IfErrors +2 0 + StrCpy $accepted ${BST_CHECKED} + ClearErrors + + ${GetOptions} $R0 "/token=" $0 + IfErrors +2 0 + StrCpy $cloudToken $0 + ClearErrors + + ${GetOptions} $R0 "/rooms=" $0 + IfErrors +2 0 + StrCpy $cloudRooms $0 + ClearErrors + + ${GetOptions} $R0 "/proxy=" $0 + IfErrors +2 0 + StrCpy $proxy $0 + ClearErrors + + IfSilent checklicense goahead + checklicense: + ${If} $accepted == ${BST_UNCHECKED} + System::Call 'kernel32::AttachConsole(i -1)i.r0' + ${If} $0 != 0 + System::Call 'kernel32::GetStdHandle(i -11)i.r0' + FileWrite $0 "You must accept the licenses (/A) to continue.$\r$\n" + ${EndIf} + Quit + ${EndIf} + goahead: +FunctionEnd + +Function un.onInit +!insertmacro SingleInstanceFile +FunctionEnd + +Function ShowHelp +Pop $0 + MessageBox MB_ICONQUESTION|MB_OK "$\"Cloud URL$\" The Netdata Cloud base URL.$\n$\n$\"Proxy URL$\" set the proxy server address to use if your network requires one.$\n$\n$\"Insecure connection$\" disable verification of the server's certificate chain and host name.$\n$\n$\"Open Terminal$\" open MSYS2 terminal to run additional commands after installation." IDOK endHelp + endHelp: FunctionEnd Function NetdataConfigPage - !insertmacro MUI_HEADER_TEXT "Netdata configuration" "Claim your agent on Netdata Cloud" + !insertmacro MUI_HEADER_TEXT "Netdata configuration" "Connect your Agent to your Netdata Cloud Space" nsDialogs::Create 1018 Pop $0 @@ -57,40 +163,59 @@ Function NetdataConfigPage Abort ${EndIf} - ${NSD_CreateLabel} 0 0 100% 12u "Enter your Token and Cloud Room." - ${NSD_CreateLabel} 0 15% 100% 12u "Optionally, you can open a terminal to execute additional commands." + IfFileExists "$INSTDIR\etc\netdata\claim.conf" NotNeeded - ${NSD_CreateLabel} 0 35% 20% 10% "Token" + ${NSD_CreateLabel} 0 0 100% 12u "Enter your Space's Claim Token and the Room IDs where you want to add the Agent." + ${NSD_CreateLabel} 0 12% 100% 12u "If no Room IDs are specified, the Agent will be added to the $\"All nodes$\" Room." + + ${NSD_CreateLabel} 0 30% 20% 10% "Claim Token" Pop $0 - ${NSD_CreateText} 21% 35% 79% 10% "" + ${NSD_CreateText} 21% 30% 79% 10% "" Pop $hCloudToken - ${NSD_CreateLabel} 0 55% 20% 10% "Room" + ${NSD_CreateLabel} 0 45% 20% 10% "Room ID(s)" + Pop $0 + ${NSD_CreateText} 21% 45% 79% 10% "" + Pop $hCloudRooms + + ${NSD_CreateLabel} 0 60% 20% 10% "Proxy URL" Pop $0 - ${NSD_CreateText} 21% 55% 79% 10% "" - Pop $hCloudRoom + ${NSD_CreateText} 21% 60% 79% 10% "" + Pop $hProxy - ${NSD_CreateCheckbox} 0 70% 100% 10u "Open terminal" + ${NSD_CreateLabel} 0 75% 20% 10% "Cloud URL" + Pop $0 + ${NSD_CreateText} 21% 75% 79% 10% "https://app.netdata.cloud" + Pop $hCloudURL + + ${NSD_CreateCheckbox} 0 92% 25% 10u "Insecure connection" + Pop $hInsecure + + ${NSD_CreateCheckbox} 50% 92% 25% 10u "Open terminal" Pop $hStartMsys + + ${NSD_CreateButton} 90% 90% 30u 15u "&Help" + Pop $hCtrlButton + ${NSD_OnClick} $hCtrlButton ShowHelp + + Goto EndDialogDraw + + NotNeeded: + StrCpy $avoidClaim ${BST_CHECKED} + ${NSD_CreateLabel} 0 0 100% 12u "Your host has already been claimed. You can proceed with the update." + + EndDialogDraw: nsDialogs::Show FunctionEnd Function NetdataConfigLeave - ${NSD_GetText} $hCloudToken $cloudToken - ${NSD_GetText} $hCloudRoom $cloudRoom - ${NSD_GetState} $hStartMsys $startMsys - - StrLen $0 $cloudToken - StrLen $1 $cloudRoom - ${If} $0 == 125 - ${AndIf} $0 == 36 - # We should start our new claiming software here - MessageBox MB_OK "$cloudToken | $cloudRoom | $startMsys" - ${EndIf} - - ${If} $startMsys == 1 - nsExec::ExecToLog '$INSTDIR\msys2.exe' - pop $0 + ${If} $avoidClaim == ${BST_UNCHECKED} + ${NSD_GetText} $hCloudToken $cloudToken + ${NSD_GetText} $hCloudURL $cloudURL + ${NSD_GetText} $hCloudRooms $cloudRooms + ${NSD_GetText} $hProxy $proxy + ${NSD_GetState} $hStartMsys $startMsys + ${NSD_GetState} $hInsecure $insecure ${EndIf} FunctionEnd @@ -132,36 +257,125 @@ Function NetdataUninstallRegistry end: FunctionEnd +Function InstallDLL + ; Check if certutil is available + nsExec::ExecToStack 'where certutil' + Pop $R0 + StrCmp $R0 "" NoCertUtil FoundCertUtil + + NoCertUtil: + DetailPrint "certutil not found, assuming files are different." + Goto CopyDLL + + FoundCertUtil: + ; Calculate hash of the existing DLL + nsExec::ExecToStack 'certutil -hashfile "$SYSDIR\wevt_netdata.dll" MD5' + Pop $R0 + + ; Calculate hash of the new DLL + nsExec::ExecToStack 'certutil -hashfile "$INSTDIR\usr\bin\wevt_netdata.dll" MD5' + Pop $R1 + + StrCmp $R0 $R1 SetPermissions + + CopyDLL: + ClearErrors + CopyFiles /SILENT "$INSTDIR\usr\bin\wevt_netdata.dll" "$SYSDIR" + IfErrors RetryPrompt SetPermissions + + RetryPrompt: + MessageBox MB_RETRYCANCEL|MB_ICONEXCLAMATION "Failed to copy wevt_netdata.dll probably because it is in use. Please close the Event Viewer (or other Event Log applications) and press Retry." + StrCmp $R0 IDRETRY CopyDLL + StrCmp $R0 IDCANCEL ExitInstall + + Goto End + + SetPermissions: + nsExec::ExecToLog 'icacls "$SYSDIR\wevt_netdata.dll" /grant "NT SERVICE\EventLog":R' + Goto End + + ExitInstall: + Abort + + End: +FunctionEnd + +Function InstallManifest + IfFileExists "$INSTDIR\usr\bin\wevt_netdata_manifest.xml" CopyManifest End + + CopyManifest: + ClearErrors + CopyFiles /SILENT "$INSTDIR\usr\bin\wevt_netdata_manifest.xml" "$SYSDIR" + IfErrors RetryPrompt InstallManifest + + RetryPrompt: + MessageBox MB_RETRYCANCEL|MB_ICONEXCLAMATION "Failed to copy wevt_netdata_manifest.xml." + StrCmp $R0 IDRETRY CopyManifest + StrCmp $R0 IDCANCEL ExitInstall + + InstallManifest: + nsExec::ExecToLog 'wevtutil im "$SYSDIR\wevt_netdata_manifest.xml" "/mf:$SYSDIR\wevt_netdata.dll" "/rf:$SYSDIR\wevt_netdata.dll"' + Goto End + + ExitInstall: + Abort + + End: +FunctionEnd + Section "Install Netdata" - SetOutPath $INSTDIR - SetCompress off + SetOutPath $INSTDIR + SetCompress off - File /r "C:\msys64\opt\netdata\*.*" + File /r "C:\msys64\opt\netdata\*.*" - ClearErrors + ClearErrors nsExec::ExecToLog '$SYSDIR\sc.exe create Netdata binPath= "$INSTDIR\usr\bin\netdata.exe" start= delayed-auto' pop $0 ${If} $0 != 0 - DetailPrint "Warning: Failed to create Netdata service." + DetailPrint "Warning: Failed to create Netdata service." ${EndIf} - ClearErrors + ClearErrors nsExec::ExecToLog '$SYSDIR\sc.exe description Netdata "Real-time system monitoring service"' pop $0 ${If} $0 != 0 - DetailPrint "Warning: Failed to add Netdata service description." + DetailPrint "Warning: Failed to add Netdata service description." ${EndIf} - ClearErrors + WriteUninstaller "$INSTDIR\Uninstall.exe" + + Call NetdataUninstallRegistry + Call InstallDLL + Call InstallManifest + + StrLen $0 $cloudToken + StrLen $1 $cloudRooms + ${If} $0 == 0 + ${OrIf} $1 == 0 + Goto runCmds + ${EndIf} + + ${If} $0 == 135 + ${AndIf} $1 >= 36 + nsExec::ExecToLog '$INSTDIR\usr\bin\NetdataClaim.exe /T $cloudToken /R $cloudRooms /P $proxy /I $insecure /U $cloudURL' + pop $0 + ${Else} + MessageBox MB_OK "The Cloud information does not have the expected length." + ${EndIf} + + runCmds: + ClearErrors nsExec::ExecToLog '$SYSDIR\sc.exe start Netdata' pop $0 ${If} $0 != 0 - DetailPrint "Warning: Failed to start Netdata service." + MessageBox MB_OK "Warning: Failed to start Netdata service." ${EndIf} - WriteUninstaller "$INSTDIR\Uninstall.exe" - - Call NetdataUninstallRegistry + ${If} $startMsys == ${BST_CHECKED} + nsExec::ExecToLog '$INSTDIR\msys2.exe' + pop $0 + ${EndIf} SectionEnd Section "Uninstall" @@ -179,8 +393,27 @@ Section "Uninstall" DetailPrint "Warning: Failed to delete Netdata service." ${EndIf} - RMDir /r "$INSTDIR" + ; Check if the manifest exists before uninstalling it + IfFileExists "$SYSDIR\wevt_netdata_manifest.xml" ManifestExistsForUninstall ManifestNotExistsForUninstall + +ManifestExistsForUninstall: + nsExec::ExecToLog 'wevtutil um "$SYSDIR\wevt_netdata_manifest.xml"' + pop $0 + ${If} $0 != 0 + DetailPrint "Warning: Failed to uninstall the event manifest." + ${EndIf} + Delete "$SYSDIR\wevt_netdata_manifest.xml" + Delete "$SYSDIR\wevt_netdata.dll" + Goto DoneUninstall + +ManifestNotExistsForUninstall: + DetailPrint "Manifest not found, skipping manifest uninstall." + +DoneUninstall: + + ; Remove files + SetOutPath "$PROGRAMFILES" + RMDir /r /REBOOTOK "$INSTDIR" DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Netdata" SectionEnd - diff --git a/packaging/windows/msi-extension.bat b/packaging/windows/msi-extension.bat new file mode 100644 index 000000000..73707f4e9 --- /dev/null +++ b/packaging/windows/msi-extension.bat @@ -0,0 +1,2 @@ +wix extension -g add WixToolset.Util.wixext/5.0.2 +wix extension -g add WixToolset.UI.wixext/5.0.2 diff --git a/packaging/windows/msys2-dependencies.sh b/packaging/windows/msys2-dependencies.sh index 95a1952df..9a32ea4ec 100755 --- a/packaging/windows/msys2-dependencies.sh +++ b/packaging/windows/msys2-dependencies.sh @@ -15,11 +15,23 @@ pacman -S --noconfirm --needed \ base-devel \ cmake \ git \ + ninja \ + python \ liblz4-devel \ libutil-linux \ libutil-linux-devel \ libyaml-devel \ libzstd-devel \ + msys2-devel \ + msys/brotli-devel \ + msys/libuv-devel \ + msys/pcre2-devel \ + msys/zlib-devel \ + msys/libcurl-devel \ + openssl-devel \ + protobuf-devel \ + mingw-w64-x86_64-toolchain \ + mingw-w64-ucrt-x86_64-toolchain \ mingw64/mingw-w64-x86_64-brotli \ mingw64/mingw-w64-x86_64-go \ mingw64/mingw-w64-x86_64-libuv \ @@ -29,16 +41,6 @@ pacman -S --noconfirm --needed \ mingw64/mingw-w64-x86_64-pcre2 \ mingw64/mingw-w64-x86_64-protobuf \ mingw64/mingw-w64-x86_64-zlib \ - mingw-w64-ucrt-x86_64-toolchain \ - mingw-w64-x86_64-toolchain \ - msys2-devel \ - msys/brotli-devel \ - msys/libuv-devel \ - msys/pcre2-devel \ - msys/zlib-devel \ - openssl-devel \ - protobuf-devel \ - python \ ucrt64/mingw-w64-ucrt-x86_64-brotli \ ucrt64/mingw-w64-ucrt-x86_64-go \ ucrt64/mingw-w64-ucrt-x86_64-libuv \ diff --git a/packaging/windows/ncul1.rtf b/packaging/windows/ncul1.rtf new file mode 100644 index 000000000..983f0d84a --- /dev/null +++ b/packaging/windows/ncul1.rtf @@ -0,0 +1,47 @@ +{\rtf1\ansi\deff0 {\fonttbl {\f0 Times New Roman;}} +\paperh15840 \paperw12240 +\margl720 \margr720 \margt720 \margb720 +\f0\fs24 +# Netdata Cloud UI License v1.0 (NCUL1)\line +\line +## Acceptance\line +By using the software, you agree to all of the terms and conditions below.\line +\line +## Copyright License\line +The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available the software, in each case subject to the limitations, restrictions and conditions below.\line +\line +## Limitations\line +This license allows you to use the Software only to interface with the licensor's other software components, such as Netdata Agents and Netdata Cloud. Any use with replacements for these components is not permitted.\line +\line +## Restrictions\line +The Software is provided in a binary form for use by end-users. You may not reverse engineer, decompile, disassemble, or modify the Software. The Software is licensed as a single product and its component parts may not be separated.\line +\line +## Patents\line +If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your license for the software granted under these terms ends immediately. If your company makes such a claim, your license ends immediately for work on behalf of your company.\line +\line +## Notices\line +You must ensure that anyone who gets a copy of the Software from you also gets a copy of these terms.\line +\line +## No Other Rights\line +These terms do not imply any licenses other than those expressly granted in these terms.\line +\line +## Termination\line +If you use the Software in violation of any of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violations of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently.\line +\line +## No Warranties and No Liability\line +The software comes "As Is", without any express or implied warranties of any kind, including but not limited to any warranties of merchantability, non-infringement, or fitness for a particular purpose. The licensor will not be liable to you for any damages arising out of these terms or the use or nature of the Software, under any kind of legal claim.\line +\line +## Open Source Components\line +The software includes certain third party open source components. Each of these components is subject to its own license. The list of open-source components used by Netdata Cloud UI is [here](https://app.netdata.cloud/3D_PARTY_LICENSES.txt).\line +\line +## Definitions\line +The "licensor" is Netdata Inc., the entity offering these terms, and the "**software**" is the Netdata Cloud UI software the licensor makes available under these terms, including any portion of it.\line +\line +"**you**" refers to the individual or entity agreeing to these terms.\line +\line +"**your company**" is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. "**Control**" means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect.\line +\line +"**your licenses**" are all the licenses granted to you for the software under these terms.\line +\line +"**use**" means anything you do with the software requiring one of your licenses.\line +} diff --git a/packaging/windows/netdata.wxs.in b/packaging/windows/netdata.wxs.in new file mode 100644 index 000000000..59369ac86 --- /dev/null +++ b/packaging/windows/netdata.wxs.in @@ -0,0 +1,276 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/packaging/windows/package-windows.sh b/packaging/windows/package-windows.sh index 03f72a692..997bb7d4f 100755 --- a/packaging/windows/package-windows.sh +++ b/packaging/windows/package-windows.sh @@ -2,21 +2,20 @@ repo_root="$(dirname "$(dirname "$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null && pwd -P)")")" -if [ -n "${BUILD_DIR}" ]; then - build="${BUILD_DIR}" -elif [ -n "${OSTYPE}" ]; then - if [ -n "${MSYSTEM}" ]; then - build="${repo_root}/build-${OSTYPE}-${MSYSTEM}" - else - build="${repo_root}/build-${OSTYPE}" - fi -elif [ "$USER" = "vk" ]; then - build="${repo_root}/build" -else - build="${repo_root}/build" +# shellcheck source=./win-build-dir.sh +. "${repo_root}/packaging/windows/win-build-dir.sh" + +set -eu -o pipefail + +# Regenerate keys everytime there is an update +if [ -d /opt/netdata/etc/pki/ ]; then + rm -rf /opt/netdata/etc/pki/ fi -set -exu -o pipefail +# Remove previous installation of msys2 script +if [ -f /opt/netdata/usr/bin/bashbug ]; then + rm -rf /opt/netdata/usr/bin/bashbug +fi ${GITHUB_ACTIONS+echo "::group::Installing"} cmake --install "${build}" @@ -34,18 +33,12 @@ if [ ! -f "/gpl-3.0.txt" ]; then fi if [ ! -f "/cloud.txt" ]; then - curl -o /cloud.txt "https://raw.githubusercontent.com/netdata/netdata/master/src/web/gui/v2/LICENSE.md" + curl -o /cloud.txt "https://app.netdata.cloud/LICENSE.txt" fi ${GITHUB_ACTIONS+echo "::endgroup::"} -${GITHUB_ACTIONS+echo "::group::Packaging"} +${GITHUB_ACTIONS+echo "::group::Copy Files"} tar -xf /msys2-latest.tar.zst -C /opt/netdata/ || exit 1 cp -R /opt/netdata/msys64/* /opt/netdata/ || exit 1 rm -rf /opt/netdata/msys64/ -NDVERSION=$"$(grep 'CMAKE_PROJECT_VERSION:STATIC' "${build}/CMakeCache.txt"| cut -d= -f2)" -NDMAJORVERSION=$"$(grep 'CMAKE_PROJECT_VERSION_MAJOR:STATIC' "${build}/CMakeCache.txt"| cut -d= -f2)" -NDMINORVERSION=$"$(grep 'CMAKE_PROJECT_VERSION_MINOR:STATIC' "${build}/CMakeCache.txt"| cut -d= -f2)" - -/mingw64/bin/makensis.exe -DCURRVERSION="${NDVERSION}" -DMAJORVERSION="${NDMAJORVERSION}" -DMINORVERSION="${NDMINORVERSION}" "${repo_root}/packaging/windows/installer.nsi" ${GITHUB_ACTIONS+echo "::endgroup::"} - diff --git a/packaging/windows/package.ps1 b/packaging/windows/package.ps1 index 828e105f1..15ee29a12 100644 --- a/packaging/windows/package.ps1 +++ b/packaging/windows/package.ps1 @@ -14,3 +14,26 @@ $env:CHERE_INVOKING = 'yes' if ($LastExitcode -ne 0) { exit 1 } + +if ($null -eq $env:BUILD_DIR) { + $builddir = & $msysbash -l "$PSScriptRoot\get-win-build-path.sh" + + if ($LastExitcode -ne 0) { + exit 1 + } +} else { + $builddir = $env:BUILD_DIR +} + +Push-Location "$builddir" + +$wixarch = "x64" + +wix build -arch $wixarch -ext WixToolset.Util.wixext -ext WixToolset.UI.wixext -out "$PSScriptRoot\netdata-$wixarch.msi" netdata.wxs + +if ($LastExitcode -ne 0) { + Pop-Location + exit 1 +} + +Pop-Location diff --git a/packaging/windows/resources/netdata.manifest.in b/packaging/windows/resources/netdata.manifest.in new file mode 100644 index 000000000..15796e744 --- /dev/null +++ b/packaging/windows/resources/netdata.manifest.in @@ -0,0 +1,29 @@ + + + + Netdata is a high-performance, cloud-native, and on-premises observability platform designed to monitor metrics and logs with unparalleled efficiency. + + + + + + + + + + + + + + + + + + + + + + diff --git a/packaging/windows/resources/netdata.rc b/packaging/windows/resources/netdata.rc new file mode 100644 index 000000000..0bc75d99b --- /dev/null +++ b/packaging/windows/resources/netdata.rc @@ -0,0 +1,3 @@ +#include "winuser.h" +1 RT_MANIFEST "netdata.manifest" +11 ICON "../NetdataWhite.ico" diff --git a/packaging/windows/resources/netdata_claim.manifest.in b/packaging/windows/resources/netdata_claim.manifest.in new file mode 100644 index 000000000..ae4e040cb --- /dev/null +++ b/packaging/windows/resources/netdata_claim.manifest.in @@ -0,0 +1,16 @@ + + + + Netdata Claim! + + + + + + + + + diff --git a/packaging/windows/resources/netdata_claim.rc b/packaging/windows/resources/netdata_claim.rc new file mode 100644 index 000000000..7ba02833a --- /dev/null +++ b/packaging/windows/resources/netdata_claim.rc @@ -0,0 +1,3 @@ +#include "winuser.h" +1 RT_MANIFEST "netdata_claim.manifest" +11 ICON "../NetdataWhite.ico" diff --git a/packaging/windows/resources/netdatacli.manifest.in b/packaging/windows/resources/netdatacli.manifest.in new file mode 100644 index 000000000..ff2f48d31 --- /dev/null +++ b/packaging/windows/resources/netdatacli.manifest.in @@ -0,0 +1,16 @@ + + + + The netdatacli executable provides a simple way to control the Netdata agent's operation. + + + + + + + + + diff --git a/packaging/windows/resources/netdatacli.rc b/packaging/windows/resources/netdatacli.rc new file mode 100644 index 000000000..baa9e8d62 --- /dev/null +++ b/packaging/windows/resources/netdatacli.rc @@ -0,0 +1,3 @@ +#include "winuser.h" +1 RT_MANIFEST "netdatacli.manifest" +11 ICON "../NetdataWhite.ico" diff --git a/packaging/windows/win-build-dir.sh b/packaging/windows/win-build-dir.sh index 09dd6b977..035ab7d9f 100644 --- a/packaging/windows/win-build-dir.sh +++ b/packaging/windows/win-build-dir.sh @@ -1,11 +1,7 @@ #!/bin/bash if [ -n "${BUILD_DIR}" ]; then - if (echo "${BUILD_DIR}" | grep -q -E "^[A-Z]:\\\\"); then - build="$(echo "${BUILD_DIR}" | sed -e 's/\\/\//g' -e 's/^\([A-Z]\):\//\/\1\//' -)" - else - build="${BUILD_DIR}" - fi + build="$(cygpath -u "${BUILD_DIR}")" elif [ -n "${OSTYPE}" ]; then if [ -n "${MSYSTEM}" ]; then build="${REPO_ROOT}/build-${OSTYPE}-${MSYSTEM}" diff --git a/src/aclk/README.md b/src/aclk/README.md index 0a260868c..450f9dced 100644 --- a/src/aclk/README.md +++ b/src/aclk/README.md @@ -4,13 +4,13 @@ The Agent-Cloud link (ACLK) is the mechanism responsible for securely connecting through Netdata Cloud. The ACLK establishes an outgoing secure WebSocket (WSS) connection to Netdata Cloud on port `443`. The ACLK is encrypted, safe, and _is only established if you connect your node_. -The Cloud App lives at app.netdata.cloud which currently resolves to the following list of IPs: +The Cloud App lives at app.netdata.cloud which currently resolves to the following list of IPs: - 54.198.178.11 - 44.207.131.212 -- 44.196.50.41 +- 44.196.50.41 -> ### Caution +> **Caution** > >This list of IPs can change without notice, we strongly advise you to whitelist following domains `app.netdata.cloud`, `mqtt.netdata.cloud`, if this is not an option in your case always verify the current domain resolution (e.g via the `host` command). @@ -28,106 +28,18 @@ However, to be able to offer the stunning visualizations and advanced functional ## Enable and configure the ACLK -The ACLK is enabled by default, with its settings automatically configured and stored in the Agent's memory. No file is -created at `/var/lib/netdata/cloud.d/cloud.conf` until you either connect a node or create it yourself. The default -configuration uses two settings: - -```conf -[global] - enabled = yes - cloud base url = https://app.netdata.cloud -``` +The ACLK is enabled by default, with its settings automatically configured and stored in the Agent's memory. If your Agent needs to use a proxy to access the internet, you must [set up a proxy for -connecting to cloud](/src/claim/README.md#connect-through-a-proxy). +connecting to cloud](/src/claim/README.md). You can configure following keys in the `netdata.conf` section `[cloud]`: -``` + +```text [cloud] - statistics = yes - query thread count = 2 + statistics = yes + query thread count = 2 ``` - `statistics` enables/disables ACLK related statistics and their charts. You can disable this to save some space in the database and slightly reduce memory usage of Netdata Agent. - `query thread count` specifies the number of threads to process cloud queries. Increasing this setting is useful for nodes with many children (streaming), which can expect to handle more queries (and/or more complicated queries). - -## Disable the ACLK - -You have two options if you prefer to disable the ACLK and not use Netdata Cloud. - -### Disable at installation - -You can pass the `--disable-cloud` parameter to the Agent installation when using a kickstart script -([kickstart.sh](/packaging/installer/methods/kickstart.md), or a [manual installation from -Git](/packaging/installer/methods/manual.md). - -When you pass this parameter, the installer does not download or compile any extra libraries. Once running, the Agent -kills the thread responsible for the ACLK and connecting behavior, and behaves as though the ACLK, and thus Netdata Cloud, -does not exist. - -### Disable at runtime - -You can change a runtime setting in your `cloud.conf` file to disable the ACLK. This setting only stops the Agent from -attempting any connection via the ACLK, but does not prevent the installer from downloading and compiling the ACLK's -dependencies. - -The file typically exists at `/var/lib/netdata/cloud.d/cloud.conf`, but can change if you set a prefix during -installation. To disable the ACLK, open that file and change the `enabled` setting to `no`: - -```conf -[global] - enabled = no -``` - -If the file at `/var/lib/netdata/cloud.d/cloud.conf` doesn't exist, you need to create it. - -Copy and paste the first two lines from below, which will change your prompt to `cat`. - -```bash -cd /var/lib/netdata/cloud.d -cat > cloud.conf << EOF -``` - -Copy and paste in lines 3-6, and after the final `EOF`, hit **Enter**. The final line must contain only `EOF`. Hit **Enter** again to return to your normal prompt with the newly-created file. - -To get your normal prompt back, the final line -must contain only `EOF`. - -```bash -[global] - enabled = no - cloud base url = https://app.netdata.cloud -EOF -``` - -You also need to change the file's permissions. Use `grep "run as user" /etc/netdata/netdata.conf` to figure out which -user your Agent runs as (typically `netdata`), and replace `netdata:netdata` as shown below if necessary: - -```bash -sudo chmod 0770 cloud.conf -sudo chown netdata:netdata cloud.conf -``` - -Restart your Agent to disable the ACLK. - -### Re-enable the ACLK - -If you first disable the ACLK and any Cloud functionality and then decide you would like to use Cloud, you must either -[reinstall Netdata](/packaging/installer/REINSTALL.md) with Cloud enabled or change the runtime setting in your -`cloud.conf` file. - -If you passed `--disable-cloud` to `netdata-installer.sh` during installation, you must -[reinstall](/packaging/installer/REINSTALL.md) your Agent. Use the same method as before, but pass `--require-cloud` to -the installer. When installation finishes you can [connect your node](/src/claim/README.md#how-to-connect-a-node). - -If you changed the runtime setting in your `var/lib/netdata/cloud.d/cloud.conf` file, edit the file again and change -`enabled` to `yes`: - -```conf -[global] - enabled = yes -``` - -Restart your Agent and [connect your node](/src/claim/README.md#how-to-connect-a-node). - - diff --git a/src/aclk/aclk-schemas/.gitignore b/src/aclk/aclk-schemas/.gitignore new file mode 100644 index 000000000..bd495e9a7 --- /dev/null +++ b/src/aclk/aclk-schemas/.gitignore @@ -0,0 +1,11 @@ +*.pb.go + +#Agent +*.pb.cc +*.pb.h +*.pb.o +*.pb.Po +.dirstamp + +#Jetbrains +.idea diff --git a/src/aclk/aclk-schemas/.travis.yml b/src/aclk/aclk-schemas/.travis.yml new file mode 100644 index 000000000..7c99550fe --- /dev/null +++ b/src/aclk/aclk-schemas/.travis.yml @@ -0,0 +1,4 @@ +--- +language: minimal +install: make deps +script: make CI diff --git a/src/aclk/aclk-schemas/LICENSE b/src/aclk/aclk-schemas/LICENSE new file mode 100644 index 000000000..f288702d2 --- /dev/null +++ b/src/aclk/aclk-schemas/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/aclk/aclk-schemas/Makefile b/src/aclk/aclk-schemas/Makefile new file mode 100644 index 000000000..8f4003070 --- /dev/null +++ b/src/aclk/aclk-schemas/Makefile @@ -0,0 +1,74 @@ +SHELL := /usr/bin/env bash -o pipefail + +# This controls the location of the cache. +PROJECT := cloud-schemas +# This controls the remote HTTPS git location to compare against for breaking changes in CI. +# +# Most CI providers only clone the branch under test and to a certain depth, so when +# running buf check breaking in CI, it is generally preferable to compare against +# the remote repository directly. +# +# Basic authentication is available, see https://buf.build/docs/inputs#https for more details. +HTTPS_GIT := https://github.com/netdata/cloud-schemas.git + +# This controls the version of buf to install and use. +BUF_VERSION := 0.6.0 + +### Everything below this line is meant to be static, i.e. only adjust the above variables. ### + +UNAME_OS := $(shell uname -s) +UNAME_ARCH := $(shell uname -m) +# Buf will be cached to ~/.cache/buf-example. +CACHE_BASE := $(HOME)/.cache/$(PROJECT) +# This allows switching between i.e a Docker container and your local setup without overwriting. +CACHE := $(CACHE_BASE)/$(UNAME_OS)/$(UNAME_ARCH) +# The location where buf will be installed. +CACHE_BIN := $(CACHE)/bin +# Marker files are put into this directory to denote the current version of binaries that are installed. +CACHE_VERSIONS := $(CACHE)/versions + +# Update the $PATH so we can use buf directly +export PATH := $(abspath $(CACHE_BIN)):$(PATH) + +# BUF points to the marker file for the installed version. +# +# If BUF_VERSION is changed, the binary will be re-downloaded. +BUF := $(CACHE_VERSIONS)/buf/$(BUF_VERSION) +$(BUF): + @rm -f $(CACHE_BIN)/buf + @mkdir -p $(CACHE_BIN) + curl -sSL \ + "https://github.com/bufbuild/buf/releases/download/v$(BUF_VERSION)/buf-$(UNAME_OS)-$(UNAME_ARCH)" \ + -o "$(CACHE_BIN)/buf" + chmod +x "$(CACHE_BIN)/buf" + @rm -rf $(dir $(BUF)) + @mkdir -p $(dir $(BUF)) + @touch $(BUF) + +.DEFAULT_GOAL := local + +# deps allows us to install deps without running any checks. + +.PHONY: deps +deps: $(BUF) + +# local is what we run when testing locally. +# This does breaking change detection against our local git repository. + +.PHONY: local +local: $(BUF) + buf check lint ./proto + buf check breaking --against '.git#branch=master' + +# https is what we run when testing in most CI providers. +# This does breaking change detection against our git repository. + +.PHONY: CI +CI: $(BUF) + buf check lint ./proto + buf check breaking --against ".git#branch=master" + +.PHONY: clean +clean: + git clean -xdf + rm -rf $(CACHE_BASE) \ No newline at end of file diff --git a/src/aclk/aclk-schemas/README.md b/src/aclk/aclk-schemas/README.md new file mode 100644 index 000000000..aa6188977 --- /dev/null +++ b/src/aclk/aclk-schemas/README.md @@ -0,0 +1,2 @@ +# aclk-schemas +Protobuf schemas used in ACLK connection diff --git a/src/aclk/aclk-schemas/buf.yml b/src/aclk/aclk-schemas/buf.yml new file mode 100644 index 000000000..532053d91 --- /dev/null +++ b/src/aclk/aclk-schemas/buf.yml @@ -0,0 +1,9 @@ +build: + roots: + - proto +lint: + use: + - DEFAULT +breaking: + use: + - FILE diff --git a/src/aclk/aclk-schemas/proto/aclk/v1/lib.proto b/src/aclk/aclk-schemas/proto/aclk/v1/lib.proto new file mode 100644 index 000000000..f32c32c6e --- /dev/null +++ b/src/aclk/aclk-schemas/proto/aclk/v1/lib.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package aclk_lib.v1; + +import "google/protobuf/timestamp.proto"; + +option go_package = "aclk_lib/v1;aclklib"; + +// ACLKMessagePosition is used by sequenced messages to define their exact position +message ACLKMessagePosition { + uint64 sequence_id = 1; + // auto generated in Agent's DB upon sequence_id creation + google.protobuf.Timestamp seq_id_created_at = 2; + uint64 previous_sequence_id = 3; +} + +message Capability { + string name = 1; + uint32 version = 2; + // version == 0 is equivalent to not having the capability at all + bool enabled = 3; +} diff --git a/src/aclk/aclk-schemas/proto/agent/v1/cmds.proto b/src/aclk/aclk-schemas/proto/agent/v1/cmds.proto new file mode 100644 index 000000000..c37c00c3a --- /dev/null +++ b/src/aclk/aclk-schemas/proto/agent/v1/cmds.proto @@ -0,0 +1,79 @@ +syntax = "proto3"; +option go_package = "agent/v1;agent"; + +package agent.v1; + +import "google/protobuf/timestamp.proto"; +import "proto/aclk/v1/lib.proto"; + +message CancelPendingRequest { + // must match the ID sent with the request originally made + // other than this agent will not put conditions on it + // and will treat it as opaque string (it simply has to match) + // However this doesn't mean there are no conditions on the id + // made on the request side + string request_id = 1; + + // time when the cancellation request was generated + google.protobuf.Timestamp timestamp = 2; + + // optional might be useful for debugging purposes + string trace_id = 3; +} + +// AgentCommand is sent from the Cloud to the Agent at `/agent/{claim_id}/inbound/v1/cmd/AgentCommand` +// the message includes the resource that the Cloud needs to GET from the Agent HTTP API along with related metadata +message AgentCommand { + // the topic to which the Cloud awaits for the AgentCommandResponse. + // example: `/svc/agent-data-ctrl/2d7b7edd-561e-4aec-8ac1-466a585520f5/resp` + string callback_topic = 1; + // the topic to which the Cloud awaits for the AgentCommandAck. + // example: `/svc/agent-data-ctrl/2d7b7edd-561e-4aec-8ac1-466a585520f5/resp` + string ack_topic = 2; + // unique identifier for the AgentCommand + // example: `617038b3-7c2a-4617-a78f-ab37bd820198` + string message_id = 3; + // defined in milliseconds, the time the Agent has to respond before Cloud + // considering the request as timed-out + // example: `60000` + uint64 timeout = 4; + // defined in milliseconds, the time the Agent has to send back to the Cloud + // an AgentCommandAck message signaling that is still working on the request + // example: `3000` + uint64 ack_timeout = 5; + // the requested Agent resource + // example: `/api/v2/data?query_params_go_here` + string resource = 6; +} + +// AgentCommandAck is sent from the Agent to the Cloud at predefined intervals (`AgentCommand.ack_timeout`) to predefined topic (`AgentCommand.ack_topic`) +// signaling that the Agent is still working to serve an AgentCommand (referenced by the message_id) that the Cloud sent +message AgentCommandAck { + // unique identifier to reference AgentCommand on which the Agent is still working on serving + // example: `617038b3-7c2a-4617-a78f-ab37bd820198` + string message_id = 1; + // the timestamp when the Agent created this AgentCommandAck message + google.protobuf.Timestamp created_at = 2; + // integer revealing the progress of completion to serve the AgentCommand with the given message_id + // example: `25` + uint32 progress_percent = 3; +} + +// AgentCommandResponse is sent from the Agent to the Cloud at `/agent/{claim_id}/inbound/v1/cmd/AgentCommand` +// the message includes the resource that the Cloud needs to GET from the Agent HTTP API along with related metadata +message AgentCommandResponse { + // unique identifier for the AgentCommand + // example: `617038b3-7c2a-4617-a78f-ab37bd820198` + string message_id = 1; + // the (http) status code of the Agent's API response + // example: `200` + uint32 status_code = 2; + // the dumped raw (http) response the Agent's API returned + bytes response = 3; + // the Agent's timestamp (aka legacy `timestamp`) + google.protobuf.Timestamp timestamp = 4; + // the timestamp when the Agent received the AgentCommand for execution (aka legacy `t-rx`) + google.protobuf.Timestamp received_at = 5; + // the amount of microseconds the Agent needed to execute the HTTP request of the AgentCommand (aka legacy`t-exec`) + uint64 exec_time = 6; +} diff --git a/src/aclk/aclk-schemas/proto/agent/v1/connection.proto b/src/aclk/aclk-schemas/proto/agent/v1/connection.proto new file mode 100644 index 000000000..a792214ff --- /dev/null +++ b/src/aclk/aclk-schemas/proto/agent/v1/connection.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; +option go_package = "agent/v1;agent"; + +package agent.v1; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; +import "proto/aclk/v1/lib.proto"; + +message UpdateAgentConnection { + string claim_id = 1; + bool reachable = 2; + + int64 session_id = 3; + + ConnectionUpdateSource update_source = 4; + + // mqtt_broker_addr shard to use for reaching the agent + // cloud injects this information + string mqtt_broker_addr = 5; + + google.protobuf.Timestamp updated_at = 6; + + // vmq_instance_id broker shard to use for reaching the agent + // cloud injects this information + int32 vmq_instance_id = 7; + + // > 15 optional fields: + // How long the system was running until connection (only applicable when reachable=true) + google.protobuf.Duration system_uptime = 15; + + // How long the netdata agent was running until connection (only applicable when reachable=true) + google.protobuf.Duration agent_uptime = 16; + + repeated aclk_lib.v1.Capability capabilities = 17; +} + +message SendNodeInstances { + string claim_id = 1; + Config config = 2; + // The ID of the space where this agent is claimed. + string space_id = 3; +} + +// ConnectionUpdateSource is to determine whether the connection update was issued +enum ConnectionUpdateSource { + // CONNECTION_UPDATE_SOURCE_UNSPECIFIED acts as default value for protobuf and is never specified + CONNECTION_UPDATE_SOURCE_UNSPECIFIED = 0; + // CONNECTION_UPDATE_SOURCE_AGENT A direct message from an agent + CONNECTION_UPDATE_SOURCE_AGENT = 1; + // CONNECTION_UPDATE_SOURCE_LWT message delivered as the Last Will and Testiment from MQTT broker if an agent connection with the broker is lost + CONNECTION_UPDATE_SOURCE_LWT = 2; + // CONNECTION_UPDATE_SOURCE_HEURISTIC A cloud generated message to sanitize incorrect internal state + CONNECTION_UPDATE_SOURCE_HEURISTIC = 3; +} + +message AgentConfig { + string dashboards = 1; + string alert_notifications = 2; +} + +message CloudConfig { + string dashboards = 1; + string alert_notifications = 2; +} + +message Config { + reserved 1 to 3; + AgentConfig agent_config = 4; + CloudConfig cloud_config = 5; +} diff --git a/src/aclk/aclk-schemas/proto/agent/v1/disconnect.proto b/src/aclk/aclk-schemas/proto/agent/v1/disconnect.proto new file mode 100644 index 000000000..852ef702a --- /dev/null +++ b/src/aclk/aclk-schemas/proto/agent/v1/disconnect.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package agent.v1; + +import "google/protobuf/timestamp.proto"; + +option go_package = "agent/v1;agent"; + +// Sent by Cloud to instruct Agent to disconnect ASAP +message DisconnectReq { + uint64 reconnect_after_seconds = 1; + bool permaban = 2; + google.protobuf.Timestamp created_at = 3; + uint32 error_code = 4; + string error_description = 5; +} diff --git a/src/aclk/aclk-schemas/proto/alarm/v1/config.proto b/src/aclk/aclk-schemas/proto/alarm/v1/config.proto new file mode 100644 index 000000000..97050fecd --- /dev/null +++ b/src/aclk/aclk-schemas/proto/alarm/v1/config.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; + +package alarms.v1; + +option go_package = "alarms/v1;alarms"; + +message SendAlarmConfiguration{ + string config_hash = 1; +} + +message ProvideAlarmConfiguration { + string config_hash = 1; + AlarmConfiguration config = 2; +} + +message AlarmConfiguration{ + string alarm = 1; + string template = 2; + string on_chart = 3; + + string classification = 4; + string type = 5; + string component = 6; + + string os = 7; + string hosts = 8; + string plugin = 9; + string module = 10; + string charts = 11; + string families = 12 [deprecated=true]; + string lookup = 13; + string every = 14; + string units = 15; + + string green = 16; + string red = 17; + + string calculation_expr = 18; + string warning_expr = 19; + string critical_expr = 20; + + string recipient = 21; + string exec = 22; + string delay = 23; + string repeat = 24; + string info = 25; + string options = 26; + string host_labels = 27; + + //parsed values from above config values + //indicated by p_ + int32 p_db_lookup_after = 28; + int32 p_db_lookup_before = 29; + string p_db_lookup_dimensions = 30; + string p_db_lookup_method = 31; + string p_db_lookup_options = 32; + int32 p_update_every = 33; + + string chart_labels = 34; + string summary = 35; +} diff --git a/src/aclk/aclk-schemas/proto/alarm/v1/stream.proto b/src/aclk/aclk-schemas/proto/alarm/v1/stream.proto new file mode 100644 index 000000000..f782becdc --- /dev/null +++ b/src/aclk/aclk-schemas/proto/alarm/v1/stream.proto @@ -0,0 +1,151 @@ +syntax = "proto3"; + +package alarms.v1; + +import "google/protobuf/timestamp.proto"; + +option go_package = "alarms/v1;alarms"; + +message SendAlarmLogHealth { + string node_id = 1; +} + +message AlarmLogHealth { + string claim_id = 1; + string node_id = 2; + bool enabled = 3; + AlarmLogStatus status = 4; + LogEntries log_entries = 5; +} + +message LogEntries { + int64 first_sequence_id = 1; + google.protobuf.Timestamp first_when = 2; + + int64 last_sequence_id = 3; + google.protobuf.Timestamp last_when = 4; +} + +enum AlarmLogStatus { + ALARM_LOG_STATUS_UNSPECIFIED = 0; + ALARM_LOG_STATUS_RUNNING = 1; + ALARM_LOG_STATUS_IDLE = 2; +} + +message StartAlarmStreaming { + string node_id = 1; + uint64 batch_id = 2 [deprecated=true]; + uint64 start_sequnce_id = 3 [deprecated=true]; + // Instructs the agent to sync all configured alarms + bool resets = 4; + uint64 version = 5; +} + +message SendAlarmCheckpoint { + string node_id = 1; + string claim_id = 2; + uint64 version = 3; +} + +message AlarmCheckpoint { + string node_id = 1; + string claim_id = 2; + bytes checksum = 3; +} + +message AlarmLogEntry { + string node_id = 1; + string claim_id = 2; + + // The chart's id field + string chart = 3; + string name = 4; + string family = 5 [deprecated=true]; + uint64 batch_id = 6 [deprecated=true]; + uint64 sequence_id = 7 [deprecated=true]; + uint64 when = 8; + + string config_hash = 9; + + int32 utc_offset = 10; + string timezone = 11; + + // Paths that can be custom for the same alarm, but depend on installation path for each user. Should be here or in config ? + string exec_path = 12; + string conf_source = 13; + string command = 14; + + // In seconds, uint32 is safe ? + uint32 duration = 15; + uint32 non_clear_duration = 16; + + AlarmStatus status = 17; + AlarmStatus old_status = 18; + uint64 delay = 19; + uint64 delay_up_to_timestamp = 20; + // Todo: verify that we need these. sequence_id doesn't suffice? + // uint64 updated_by_id = 12; + // uint64 updates_id = 13; + uint64 last_repeat = 21; + bool silenced = 22; + + // Check if string values are needed + string value_string = 23; + string old_value_string = 24; + + double value = 25; + double old_value = 26; + + // Updated alarm entry, when the status of the alarm has been updated by a later entry + bool updated = 27; + + // Rendered_info + string rendered_info = 28; + + // The chart's context field + string chart_context = 29; + + // Counter of alert transitions for this alert chain + uint64 event_id = 30; + + // A unique uuid for this alert event + string transition_id = 31; + + // The chart's name field + string chart_name = 32; + + // The rendered summary + string summary = 33; + uint64 alert_version = 34; +} + +enum AlarmStatus { + ALARM_STATUS_NULL = 0; + ALARM_STATUS_UNKNOWN = 1; + ALARM_STATUS_REMOVED = 2; + ALARM_STATUS_NOT_A_NUMBER = 3; + ALARM_STATUS_CLEAR = 4; + ALARM_STATUS_WARNING = 5; + ALARM_STATUS_CRITICAL = 6; +} + +// SendAlarmSnapshot: send from cloud to the agent, to initiate an AlarmSnapshot image of current alarms back to the cloud +message SendAlarmSnapshot { + string node_id = 1; + string claim_id = 2; + uint64 snapshot_id = 3 [deprecated=true]; + uint64 sequence_id = 4 [deprecated=true]; + string snapshot_uuid = 5; +} + +// Agent responds with AlarmSnapshot to a SendAlarmSnapshot message +message AlarmSnapshot{ + string node_id = 1; + string claim_id = 2; + uint64 snapshot_id = 3 [deprecated=true]; // Same id from SendAlarmSnapshot message + uint32 chunks = 4; // In case full snapshot can not fit in a single message, indicates the total number of messages for this snapshot_id + uint32 chunk_size = 5; // How many alerts this chunk contains + uint32 chunk = 6; // Chunk index of this message + repeated AlarmLogEntry alarms = 7; // a list of AlarmLogEntry's + string snapshot_uuid = 8; +} diff --git a/src/aclk/aclk-schemas/proto/chart/v1/config.proto b/src/aclk/aclk-schemas/proto/chart/v1/config.proto new file mode 100644 index 000000000..f0c5e3a35 --- /dev/null +++ b/src/aclk/aclk-schemas/proto/chart/v1/config.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package chart.v1; + +option go_package = "chart/config/v1;chartconfig"; + +// UpdateChartConfigs command contains the list of missing chart configs from the cloud to agent +message UpdateChartConfigs { + // claim_id, node_id pair is used to identify the Node Instance + string claim_id = 1; + string node_id = 2; + // list of config hashes missing from cloud and requested from the agent + repeated string config_hashes = 3; +} + +message ChartConfigsUpdated { + repeated ChartConfigUpdated configs = 1; +} + +message ChartConfigUpdated { + string type = 1; + string family = 2; + string context = 3; + string title = 4; + uint64 priority = 5; + string plugin = 6; + string module = 7; + ChartType chart_type = 8; + string units = 9; + string config_hash = 10; +} + +enum ChartType { + LINE = 0; + AREA = 1; + STACKED = 2; +} diff --git a/src/aclk/aclk-schemas/proto/chart/v1/dimension.proto b/src/aclk/aclk-schemas/proto/chart/v1/dimension.proto new file mode 100644 index 000000000..8bcb564b8 --- /dev/null +++ b/src/aclk/aclk-schemas/proto/chart/v1/dimension.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package chart.v1; + +import "google/protobuf/timestamp.proto"; + +import "proto/aclk/v1/lib.proto"; + +option go_package = "chart/dimension/v1;chartdimension"; + +// ChartDimensionUpdated is a single event sent from the Agent to the Cloud containing chart dimension data. +// +// ChartDimensionUpdated messages are dispatched in bulk to the Cloud wrapped in ChartsAndDimensionsUpdated messages. +message ChartDimensionUpdated { + string id = 1; + string chart_id = 2; + string node_id = 3; + string claim_id = 4; + string name = 5; + google.protobuf.Timestamp created_at = 6; + // null value means that the dimension is currently collected (live) + google.protobuf.Timestamp last_timestamp = 7; + aclk_lib.v1.ACLKMessagePosition position = 8; +} diff --git a/src/aclk/aclk-schemas/proto/chart/v1/instance.proto b/src/aclk/aclk-schemas/proto/chart/v1/instance.proto new file mode 100644 index 000000000..25c99e7c7 --- /dev/null +++ b/src/aclk/aclk-schemas/proto/chart/v1/instance.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package chart.v1; + +import "proto/aclk/v1/lib.proto"; + +option go_package = "chart/instance/v1;chartinstance"; + +// ChartInstanceUpdated is a single event sent from the Agent to the Cloud containing chart instance data. +// +// ChartInstanceUpdated messages are dispatched in bulk to the Cloud wrapped in ChartsAndDimensionsUpdated messages. +message ChartInstanceUpdated { + string id = 1; + string claim_id = 2; + string node_id = 3; + string name = 4; + map chart_labels = 5; + MemoryMode memory_mode = 6; + // in seconds + uint32 update_every_interval = 7; + string config_hash = 8; + aclk_lib.v1.ACLKMessagePosition position = 9; +} + +enum MemoryMode { + NONE = 0; + RAM = 1; + MAP = 2; + SAVE = 3; + ALLOC = 4; + DB_ENGINE = 5; +} diff --git a/src/aclk/aclk-schemas/proto/chart/v1/stream.proto b/src/aclk/aclk-schemas/proto/chart/v1/stream.proto new file mode 100644 index 000000000..9473538f2 --- /dev/null +++ b/src/aclk/aclk-schemas/proto/chart/v1/stream.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; + +package chart.v1; + +import "google/protobuf/timestamp.proto"; + +import "proto/chart/v1/instance.proto"; +import "proto/chart/v1/dimension.proto"; + +option go_package = "chart/stream/v1;chartstream"; + +// StreamChartsAndDimensions is a Command produced by the Cloud, consumed by the Agent. +// +// It instructs the Agent to start sending ChartsAndDimensionsUpdated messages for a NodeInstance +// after the last sequence_id that the Cloud has successfully ingested. +message StreamChartsAndDimensions { + // claim_id, node_id pair is used to identify the Node Instance + string claim_id = 1; + string node_id = 2; + + // sequence_id last verified sequence sent by the Agent + uint64 sequence_id = 3; + // batch_id identifies the stream_id and gets incremented every time the Cloud sends a new StreamChartsAndDimensions command + uint64 batch_id = 4; + // seq_id_created_at autogenerated timestamp in Agent's DB upon sequence_id creation + google.protobuf.Timestamp seq_id_created_at = 5; +} + + +// ChartsAndDimensionsAck is an Event produced by the Cloud, consumed by the Agent. +// +// This Event is an acknowledgment from the Cloud side that Chart messages up to a specific last_sequence_id +// have been successfully ingested, and could be potentially deleted from the Agent's DB. +message ChartsAndDimensionsAck { + string claim_id = 1; + string node_id = 2; + // the last verified stored message's seq_id + uint64 last_sequence_id = 3; +} + +// ResetChartMessages is a Command produced by the Agent, consumed by the Cloud. +// +// This Command instructs the Cloud to clear its Chart state for a specific NodeInstance and re-sync +// because of a ResetReason. +message ResetChartMessages { + // claim_id, node_id pair is used to identify the Node Instance + string claim_id = 1; + string node_id = 2; + + ResetReason reason = 3; +} + +enum ResetReason { + DB_EMPTY = 0; + SEQ_ID_NOT_EXISTS = 1; + TIMESTAMP_MISMATCH = 2; +} + +// ChartsAndDimensionsUpdated is a wrapper Event (`fat` message) produced by the Agent, consumed by the Cloud. +// +// It potentially includes a collection of ChartInstanceUpdated messages and|or a collection of ChartDimensionUpdated messages. +message ChartsAndDimensionsUpdated { + repeated chart.v1.ChartInstanceUpdated charts = 1; + repeated chart.v1.ChartDimensionUpdated dimensions = 2; + uint64 batch_id = 3; +} + +// RetentionUpdated includes the available retentions (in seconds) of the dimensions - of a specific node instance and memory-mode - +// on a per update_every level. +// This message is sent over upon Agent Database rotation events to inform the Cloud in total about the newly updated data retentions +// of a node instance's dimensions. +message RetentionUpdated { + // claim_id, node_id pair is used to identify the Node Instance + string claim_id = 1; + string node_id = 2; + // the memory_mode used by the node instance's chart instances + chart.v1.MemoryMode memory_mode = 3; + // this mapping identifies the newly updated available retention (in seconds) of the node instance's dimensions + // the keys are the update_every categories of various dimensions (1, 2, 4, 10 etc.), + // and the values are the available retention (in seconds) of each dimension belonging to the update_every category + // denoted by the key + map interval_durations = 4; + // the timestamp when the db rotation event took place. Can be used in conjunction with the interval_durations + // to compute the beginning of each `updated_every` group's retention + google.protobuf.Timestamp rotation_timestamp = 5; +} diff --git a/src/aclk/aclk-schemas/proto/context/v1/context.proto b/src/aclk/aclk-schemas/proto/context/v1/context.proto new file mode 100644 index 000000000..eb771f8eb --- /dev/null +++ b/src/aclk/aclk-schemas/proto/context/v1/context.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package context.v1; + +option go_package = "context/v1;context"; + +// ContextsUpdated is an Event produced by the Agent, consumed by the Cloud. +// +// it contains a collection of ContextUpdated messages for a specific NodeInstance. +message ContextsUpdated { + // contexUpdates contains the collection of context updates + repeated ContextUpdated contextUpdates = 1; + // claim_id, node_id pair identifies the node instance + string claim_id = 2; + string node_id = 3; + // version_hash is the contexts version_hash result the cloud should + // get after applying this message updates. + uint64 version_hash = 4; + // it's and always increasing number to compare + // which version_hash is more recent between multiple + // ContextsUpdated messages. Bigger means more recent. + uint64 created_at = 5; +} + +// ContextUpdated contains context data. +message ContextUpdated { + // context id + string id = 1; + // context version is an epoch in seconds. + uint64 version = 2; + // first_entry, last_entry are epochs in seconds + uint64 first_entry = 3; + uint64 last_entry = 4; + // deleted flag is used to signal a context deletion + bool deleted = 5; + // context configuration fields + string title = 6; + uint64 priority = 7; + string chart_type = 8; + string units = 9; + string family = 10; +} + +// ContextsSnapshot is an Event produced by the Agent, consumed by the Cloud. +// +// it contains a snapshot of the existing contexts on the Agent. +// snapshot version and context versions are epochs in seconds so we can +// identify if a context version was generated after a specific snapshot. +message ContextsSnapshot { + // contexts contains the collection of existing contexts + repeated ContextUpdated contexts = 1; + // claim_id, node_id pair identifies the node instance + string claim_id = 2; + string node_id = 3; + // version is an epoch in seconds + uint64 version = 4; +} diff --git a/src/aclk/aclk-schemas/proto/context/v1/stream.proto b/src/aclk/aclk-schemas/proto/context/v1/stream.proto new file mode 100644 index 000000000..a6e7e3abf --- /dev/null +++ b/src/aclk/aclk-schemas/proto/context/v1/stream.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package context.v1; + +option go_package = "context/v1;context"; + +// ContextsCheckpoint is a Command produced by the Cloud, consumed by the Agent. +// +// It informs the Agent the contexts' version_hash that the cloud has for a specific NodeInstance. +message ContextsCheckpoint { + // claim_id, node_id pair is used to identify the NodeInstance. + string claim_id = 1; + string node_id = 2; + // version_hash tells the Agent the current version hash for the contexts received + // if the version hash calculated by the Agent is different, Agent will request + // to re-sync all contexts. + uint64 version_hash= 3; +} + +// StopStreamingContexts is a Command produced by the Cloud, consumed by the Agent. +// +// It instructs the Agent to stop sending ContextsUpdated messages for a NodeInstance +// due to a reason. +message StopStreamingContexts { + // claim_id, node_id pair is used to identify the node instance + string claim_id = 1; + string node_id = 2; + + StopStreamingContextsReason reason = 3; +} + +enum StopStreamingContextsReason { + RATE_LIMIT_EXCEEDED = 0; +} diff --git a/src/aclk/aclk-schemas/proto/nodeinstance/connection/v1/connection.proto b/src/aclk/aclk-schemas/proto/nodeinstance/connection/v1/connection.proto new file mode 100644 index 000000000..f0c02461e --- /dev/null +++ b/src/aclk/aclk-schemas/proto/nodeinstance/connection/v1/connection.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; +option go_package = "nodeinstance/connection/v1;nodeinstanceconnection"; + +package nodeinstance.v1; + +import "google/protobuf/timestamp.proto"; +import "proto/aclk/v1/lib.proto"; + +message UpdateNodeInstanceConnection { + string claim_id = 1; + string node_id = 2; + + // liveness whether node data are actively streamed to the agent. + bool liveness = 3; + + // queryable whether the agent has data about the node. + bool queryable = 4; + + int64 session_id = 5; + + google.protobuf.Timestamp updated_at = 6; + + // mqtt_broker_addr shard to use for reaching the agent + // cloud injects this information. + string mqtt_broker_addr = 7; + + // vmq_instance_id broker shard to use for reaching the agent + // cloud injects this information. + int32 vmq_instance_id = 8; + + // hops is the number of streaming hops between collection of node data + // and the claimed agent. Zero if no streaming is involved. + int32 hops = 9; + + // capabilities of node instance NOT the NODE or agent!!! + repeated aclk_lib.v1.Capability capabilities = 10; +} diff --git a/src/aclk/aclk-schemas/proto/nodeinstance/create/v1/creation.proto b/src/aclk/aclk-schemas/proto/nodeinstance/create/v1/creation.proto new file mode 100644 index 000000000..922337154 --- /dev/null +++ b/src/aclk/aclk-schemas/proto/nodeinstance/create/v1/creation.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; +option go_package = "node_instance/creation/v1;node_instancecreation"; + +package nodeinstance.create.v1; + +message CreateNodeInstance { + // Claim ID of the Agent the Node Instance belongs to. + // Eventually, the NodeInstance will be identified by the compilation of + // this claim_id and NodeID returned by `CreateNodeInstanceResult` + string claim_id = 1; + // Machine GUID of the Machine the request comes from + // Used to look for an existing NodeID in the space claim_id belongs to + string machine_guid = 2; + string hostname = 3; + + // vmq_instance_id broker shard to use for reaching the agent + // cloud injects this information. + int32 vmq_instance_id = 4; + // mqtt_broker_addr shard to use for reaching the agent + // cloud injects this information. + string mqtt_broker_addr = 5; + + // hops is the number of streaming hops between collection of node data + // and the claimed agent. Zero if no streaming is involved. + int32 hops = 6; +} + +message CreateNodeInstanceResult { + string node_id = 1; + string machine_guid = 2; +} + diff --git a/src/aclk/aclk-schemas/proto/nodeinstance/info/v1/info.proto b/src/aclk/aclk-schemas/proto/nodeinstance/info/v1/info.proto new file mode 100644 index 000000000..7aa9d0448 --- /dev/null +++ b/src/aclk/aclk-schemas/proto/nodeinstance/info/v1/info.proto @@ -0,0 +1,148 @@ +syntax = "proto3"; +option go_package = "node_instance/info/v1;nodeinstanceinfo"; + +package nodeinstance.info.v1; + +import "google/protobuf/timestamp.proto"; +import "proto/aclk/v1/lib.proto"; + +// UpdateNodeInfo (Command) +// +// pulsar topic: `UpdateNodeInfo` (sharded) +// +// key: `claim_id,node_id` +// +// Publishers: `netdata/agent` +// Subscribers: `cloud-node-mqtt-output-service` +// +// When: +// On nodeinstance connect +// +message UpdateNodeInfo { + string node_id = 7; + + string claim_id = 1; + + NodeInfo data = 2; + // to be obsoleted in future + // all new fields should go into node_info + // or node_instance_info respectively + + google.protobuf.Timestamp updated_at = 3; + + int64 session_id = 4; + + string machine_guid = 5; + + bool child = 6; + + MachineLearningInfo ml_info = 8; + // to be obsoleted in far future + + NodeInfo2 node_info = 9; + // node_info shows data about actual node + // for example feature (ml) for this + // node (child) might be available/enabled on the node (child) directly + // but not available trough the parent (node_instance) + + NodeInstanceInfo node_instance_info = 10; + // info specific to the node_instance for this node available trough agent + // who sends this message. + // e.g. machine learning is enabled for this node and processing is done + // by the actual agent (parent). (child itself might or might not be + // ml ml_capable by itself (see node_info)) +} + +message NodeInfo2 { + repeated aclk_lib.v1.Capability capabilities = 1; +} + +message NodeInstanceInfo { + repeated aclk_lib.v1.Capability capabilities = 1; +} + +// NodeInfo describes the metadata of a node +message NodeInfo { + string name = 1; + + string os = 2; + string os_name = 3; + string os_version = 4; + + string kernel_name = 5; + string kernel_version = 6; + + string architecture = 7; + + // number of cpu cores in the node + uint32 cpus = 8; + + // human readable (value + unit) frequency of cpu + string cpu_frequency = 9; + + // human readable (value + unit) size of node's memory + string memory = 10; + + // human readable (value + unit) size of all (sum) node's disks + string disk_space = 11; + + // version of the netdata agent + string version = 12; + + // release channel of netdata agent (example: nightly) + string release_channel = 13; + + string timezone = 14; + + // virtualization_type example: kvm (optional) + string virtualization_type = 15; + + // container_type example: docker (optional) + string container_type = 16; + + string custom_info = 17; + + // [Obsolete] repeated string services = 18; + reserved 18; + + string machine_guid = 19; + + // [Obsolete] repeated MirroredHostStatus mirrored_hosts_status = 20; + reserved 20; + + map host_labels = 21; + + MachineLearningInfo ml_info = 22; + + // [Obsolete] repeated string collectors = 23; + reserved 23; +} + +message MachineLearningInfo { + // have ML capability + bool ml_capable = 1; + + // runs ML functionality + bool ml_enabled = 2; +} + +// UpdateNodeCollectors (Command) +// +// key: `claim_id,node_id` +// +// Publishers: `netdata/agent` +// +// When: +// On nodeinstance connect (after agent settles) and on detection of change of collectors +// + +message CollectorInfo { + string module = 1; + string plugin = 2; +} + +message UpdateNodeCollectors { + string claim_id = 1; + string node_id = 2; + repeated CollectorInfo collectors = 3; +} diff --git a/src/aclk/aclk.c b/src/aclk/aclk.c index 389d7455f..7bc620a61 100644 --- a/src/aclk/aclk.c +++ b/src/aclk/aclk.c @@ -2,8 +2,6 @@ #include "aclk.h" -#ifdef ENABLE_ACLK -#include "aclk_stats.h" #include "mqtt_websockets/mqtt_wss_client.h" #include "aclk_otp.h" #include "aclk_tx_msgs.h" @@ -14,7 +12,6 @@ #include "https_client.h" #include "schema-wrappers/schema_wrappers.h" #include "aclk_capas.h" - #include "aclk_proxy.h" #ifdef ACLK_LOG_CONVERSATION_DIR @@ -23,20 +20,38 @@ #include #endif -#define ACLK_STABLE_TIMEOUT 3 // Minimum delay to mark AGENT as stable - -#endif /* ENABLE_ACLK */ - int aclk_pubacks_per_conn = 0; // How many PubAcks we got since MQTT conn est. int aclk_rcvd_cloud_msgs = 0; int aclk_connection_counter = 0; -int disconnect_req = 0; -int aclk_connected = 0; +static bool aclk_connected = false; +static inline void aclk_set_connected(void) { + __atomic_store_n(&aclk_connected, true, __ATOMIC_RELAXED); +} +static inline void aclk_set_disconnected(void) { + __atomic_store_n(&aclk_connected, false, __ATOMIC_RELAXED); +} + +inline bool aclk_online(void) { + return __atomic_load_n(&aclk_connected, __ATOMIC_RELAXED); +} + +bool aclk_online_for_contexts(void) { + return aclk_online() && aclk_query_scope_has(HTTP_ACL_METRICS); +} + +bool aclk_online_for_alerts(void) { + return aclk_online() && aclk_query_scope_has(HTTP_ACL_ALERTS); +} + +bool aclk_online_for_nodes(void) { + return aclk_online() && aclk_query_scope_has(HTTP_ACL_NODES); +} + int aclk_ctx_based = 0; int aclk_disable_runtime = 0; -int aclk_stats_enabled; -int aclk_kill_link = 0; + +ACLK_DISCONNECT_ACTION disconnect_req = ACLK_NO_DISCONNECT; usec_t aclk_session_us = 0; time_t aclk_session_sec = 0; @@ -49,13 +64,8 @@ float last_backoff_value = 0; time_t aclk_block_until = 0; -#ifdef ENABLE_ACLK mqtt_wss_client mqttwss_client; -//netdata_mutex_t aclk_shared_state_mutex = NETDATA_MUTEX_INITIALIZER; -//#define ACLK_SHARED_STATE_LOCK netdata_mutex_lock(&aclk_shared_state_mutex) -//#define ACLK_SHARED_STATE_UNLOCK netdata_mutex_unlock(&aclk_shared_state_mutex) - struct aclk_shared_state aclk_shared_state = { .mqtt_shutdown_msg_id = -1, .mqtt_shutdown_msg_rcvd = 0 @@ -152,19 +162,6 @@ biofailed: return 1; } -static int wait_till_cloud_enabled() -{ - nd_log(NDLS_DAEMON, NDLP_INFO, - "Waiting for Cloud to be enabled"); - - while (!netdata_cloud_enabled) { - sleep_usec(USEC_PER_SEC * 1); - if (!service_running(SERVICE_ACLK)) - return 1; - } - return 0; -} - /** * Will block until agent is claimed. Returns only if agent claimed * or if agent needs to shutdown. @@ -174,15 +171,13 @@ static int wait_till_cloud_enabled() */ static int wait_till_agent_claimed(void) { - //TODO prevent malloc and freez - char *agent_id = get_agent_claimid(); - while (likely(!agent_id)) { + ND_UUID uuid = claim_id_get_uuid(); + while (likely(UUIDiszero(uuid))) { sleep_usec(USEC_PER_SEC * 1); if (!service_running(SERVICE_ACLK)) return 1; - agent_id = get_agent_claimid(); + uuid = claim_id_get_uuid(); } - freez(agent_id); return 0; } @@ -204,9 +199,9 @@ static int wait_till_agent_claim_ready() // The NULL return means the value was never initialised, but this value has been initialized in post_conf_load. // We trap the impossible NULL here to keep the linter happy without using a fatal() in the code. - char *cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL); + const char *cloud_base_url = cloud_config_url_get(); if (cloud_base_url == NULL) { - netdata_log_error("Do not move the cloud base url out of post_conf_load!!"); + netdata_log_error("Do not move the \"url\" out of post_conf_load!!"); return 1; } @@ -214,7 +209,7 @@ static int wait_till_agent_claim_ready() // TODO make it without malloc/free memset(&url, 0, sizeof(url_t)); if (url_parse(cloud_base_url, &url)) { - netdata_log_error("Agent is claimed but the URL in configuration key \"cloud base url\" is invalid, please fix"); + netdata_log_error("Agent is claimed but the URL in configuration key \"url\" is invalid, please fix"); url_t_destroy(&url); sleep(5); continue; @@ -230,30 +225,6 @@ static int wait_till_agent_claim_ready() return 1; } -void aclk_mqtt_wss_log_cb(mqtt_wss_log_type_t log_type, const char* str) -{ - switch(log_type) { - case MQTT_WSS_LOG_ERROR: - case MQTT_WSS_LOG_FATAL: - nd_log(NDLS_DAEMON, NDLP_ERR, "%s", str); - return; - - case MQTT_WSS_LOG_WARN: - nd_log(NDLS_DAEMON, NDLP_WARNING, "%s", str); - return; - - case MQTT_WSS_LOG_INFO: - nd_log(NDLS_DAEMON, NDLP_INFO, "%s", str); - return; - - case MQTT_WSS_LOG_DEBUG: - return; - - default: - nd_log(NDLS_DAEMON, NDLP_ERR, "Unknown log type from mqtt_wss"); - } -} - static void msg_callback(const char *topic, const void *msg, size_t msglen, int qos) { UNUSED(qos); @@ -299,9 +270,9 @@ static void puback_callback(uint16_t packet_id) aclk_tbeb_reset(); } -#ifdef NETDATA_INTERNAL_CHECKS - aclk_stats_msg_puback(packet_id); -#endif +//#ifdef NETDATA_INTERNAL_CHECKS +// aclk_stats_msg_puback(packet_id); +//#endif if (aclk_shared_state.mqtt_shutdown_msg_id == (int)packet_id) { nd_log(NDLS_DAEMON, NDLP_DEBUG, @@ -311,21 +282,9 @@ static void puback_callback(uint16_t packet_id) } } -static int read_query_thread_count() -{ - int threads = MIN(get_netdata_cpus()/2, 6); - threads = MAX(threads, 2); - threads = config_get_number(CONFIG_SECTION_CLOUD, "query thread count", threads); - if(threads < 1) { - netdata_log_error("You need at least one query thread. Overriding configured setting of \"%d\"", threads); - threads = 1; - config_set_number(CONFIG_SECTION_CLOUD, "query thread count", threads); - } - return threads; -} - void aclk_graceful_disconnect(mqtt_wss_client client); +bool schedule_node_update = false; /* Keeps connection alive and handles all network communications. * Returns on error or when netdata is shutting down. * @param client instance of mqtt_wss_client @@ -334,7 +293,6 @@ void aclk_graceful_disconnect(mqtt_wss_client client); */ static int handle_connection(mqtt_wss_client client) { - time_t last_periodic_query_wakeup = now_monotonic_sec(); while (service_running(SERVICE_ACLK)) { // timeout 1000 to check at least once a second // for netdata_exit @@ -343,30 +301,32 @@ static int handle_connection(mqtt_wss_client client) return 1; } - if (disconnect_req || aclk_kill_link) { - nd_log(NDLS_DAEMON, NDLP_NOTICE, - "Going to restart connection due to disconnect_req=%s (cloud req), aclk_kill_link=%s (reclaim)", - disconnect_req ? "true" : "false", - aclk_kill_link ? "true" : "false"); + if (disconnect_req != ACLK_NO_DISCONNECT) { + const char *reason; + switch (disconnect_req) { + case ACLK_CLOUD_DISCONNECT: + reason = "cloud request"; + break; + case ACLK_PING_TIMEOUT: + reason = "ping timeout"; + schedule_node_update = true; + break; + case ACLK_RELOAD_CONF: + reason = "reclaim"; + break; + default: + reason = "unknown"; + break; + } + + nd_log(NDLS_DAEMON, NDLP_NOTICE, "Going to restart connection due to \"%s\"", reason); - disconnect_req = 0; - aclk_kill_link = 0; + disconnect_req = ACLK_NO_DISCONNECT; aclk_graceful_disconnect(client); - aclk_queue_unlock(); aclk_shared_state.mqtt_shutdown_msg_id = -1; aclk_shared_state.mqtt_shutdown_msg_rcvd = 0; return 1; } - - // mqtt_wss_service will return faster than in one second - // if there is enough work to do - time_t now = now_monotonic_sec(); - if (last_periodic_query_wakeup < now) { - // wake up at least one Query Thread at least - // once per second - last_periodic_query_wakeup = now; - QUERY_THREAD_WAKEUP; - } } return 0; } @@ -386,13 +346,12 @@ static inline void mqtt_connected_actions(mqtt_wss_client client) else mqtt_wss_subscribe(client, topic, 1); - aclk_stats_upd_online(1); - aclk_connected = 1; + aclk_set_connected(); aclk_pubacks_per_conn = 0; aclk_rcvd_cloud_msgs = 0; aclk_connection_counter++; - aclk_topic_cache_iter_t iter = ACLK_TOPIC_CACHE_ITER_T_INITIALIZER; + size_t iter = 0; while ((topic = (char*)aclk_topic_cache_iterate(&iter)) != NULL) mqtt_wss_set_topic_alias(client, topic); @@ -404,9 +363,6 @@ void aclk_graceful_disconnect(mqtt_wss_client client) nd_log(NDLS_DAEMON, NDLP_DEBUG, "Preparing to gracefully shutdown ACLK connection"); - aclk_queue_lock(); - aclk_queue_flush(); - aclk_shared_state.mqtt_shutdown_msg_id = aclk_send_agent_connection_update(client, 0); time_t t = now_monotonic_sec(); @@ -425,9 +381,8 @@ void aclk_graceful_disconnect(mqtt_wss_client client) nd_log(NDLS_DAEMON, NDLP_WARNING, "ACLK link is down"); nd_log(NDLS_ACCESS, NDLP_WARNING, "ACLK DISCONNECTED"); - aclk_stats_upd_online(0); last_disconnect_time = now_realtime_sec(); - aclk_connected = 0; + aclk_set_disconnected(); nd_log(NDLS_DAEMON, NDLP_DEBUG, "Attempting to gracefully shutdown the MQTT/WSS connection"); @@ -602,9 +557,9 @@ static int aclk_attempt_to_connect(mqtt_wss_client client) bool fallback_ipv4 = false; while (service_running(SERVICE_ACLK)) { - aclk_cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL); + aclk_cloud_base_url = cloud_config_url_get(); if (aclk_cloud_base_url == NULL) { - error_report("Do not move the cloud base url out of post_conf_load!!"); + error_report("Do not move the \"url\" out of post_conf_load!!"); aclk_status = ACLK_STATUS_NO_CLOUD_URL; return -1; } @@ -802,12 +757,7 @@ static int aclk_attempt_to_connect(mqtt_wss_client client) */ void *aclk_main(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - - struct aclk_stats_thread *stats_thread = NULL; - - struct aclk_query_threads query_threads; - query_threads.thread_list = NULL; + struct netdata_static_thread *static_thread = ptr; ACLK_PROXY_TYPE proxy_type; aclk_get_proxy(&proxy_type); @@ -817,24 +767,12 @@ void *aclk_main(void *ptr) return NULL; } - unsigned int proto_hdl_cnt = aclk_init_rx_msg_handlers(); - -#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK ) - nd_log(NDLS_DAEMON, NDLP_INFO, - "Killing ACLK thread -> cloud functionality has been disabled"); - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; - return NULL; -#endif - query_threads.count = read_query_thread_count(); - - if (wait_till_cloud_enabled()) - goto exit; + aclk_init_rx_msg_handlers(); if (wait_till_agent_claim_ready()) goto exit; - if (!(mqttwss_client = mqtt_wss_new("mqtt_wss", aclk_mqtt_wss_log_cb, msg_callback, puback_callback))) { + if (!((mqttwss_client = mqtt_wss_new(msg_callback, puback_callback)))) { netdata_log_error("Couldn't initialize MQTT_WSS network library"); goto exit; } @@ -856,28 +794,22 @@ void *aclk_main(void *ptr) // that send JSON payloads of 10 MB as single messages mqtt_wss_set_max_buf_size(mqttwss_client, 25*1024*1024); - aclk_stats_enabled = config_get_boolean(CONFIG_SECTION_CLOUD, "statistics", global_statistics_enabled); - if (aclk_stats_enabled) { - stats_thread = callocz(1, sizeof(struct aclk_stats_thread)); - stats_thread->query_thread_count = query_threads.count; - stats_thread->client = mqttwss_client; - aclk_stats_thread_prepare(query_threads.count, proto_hdl_cnt); - stats_thread->thread = nd_thread_create("ACLK_STATS", NETDATA_THREAD_OPTION_JOINABLE, aclk_stats_main_thread, stats_thread); - } - // Keep reconnecting and talking until our time has come // and the Grim Reaper (netdata_exit) calls + netdata_log_info("Starting ACLK query event loop"); + aclk_query_init(mqttwss_client); do { if (aclk_attempt_to_connect(mqttwss_client)) goto exit_full; - if (unlikely(!query_threads.thread_list)) - aclk_query_threads_start(&query_threads, mqttwss_client); + if (schedule_node_update) { + schedule_node_state_update(localhost, 0); + schedule_node_update = false; + } if (handle_connection(mqttwss_client)) { - aclk_stats_upd_online(0); last_disconnect_time = now_realtime_sec(); - aclk_connected = 0; + aclk_set_disconnected(); nd_log(NDLS_ACCESS, NDLP_WARNING, "ACLK DISCONNECTED"); } } while (service_running(SERVICE_ACLK)); @@ -890,16 +822,6 @@ void *aclk_main(void *ptr) #endif exit_full: -// Tear Down - QUERY_THREAD_WAKEUP_ALL; - - aclk_query_threads_cleanup(&query_threads); - - if (aclk_stats_enabled) { - nd_thread_join(stats_thread->thread); - aclk_stats_thread_cleanup(); - freez(stats_thread); - } free_topic_cache(); mqtt_wss_destroy(mqttwss_client); exit: @@ -913,17 +835,16 @@ exit: void aclk_host_state_update(RRDHOST *host, int cmd, int queryable) { - nd_uuid_t node_id; - int ret = 0; + ND_UUID node_id; - if (!aclk_connected) + if (!aclk_online()) return; - if (host->node_id && !uuid_is_null(*host->node_id)) { - uuid_copy(node_id, *host->node_id); + if (!UUIDiszero(host->node_id)) { + node_id = host->node_id; } else { - ret = get_node_id(&host->host_uuid, &node_id); + int ret = get_node_id(&host->host_id.uuid, &node_id.uuid); if (ret > 0) { // this means we were not able to check if node_id already present netdata_log_error("Unable to check for node_id. Ignoring the host state update."); @@ -933,21 +854,23 @@ void aclk_host_state_update(RRDHOST *host, int cmd, int queryable) // node_id not found aclk_query_t create_query; create_query = aclk_query_new(REGISTER_NODE); - rrdhost_aclk_state_lock(localhost); + CLAIM_ID claim_id = claim_id_get(); + node_instance_creation_t node_instance_creation = { - .claim_id = localhost->aclk_state.claimed_id, + .claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL, .hops = host->system_info->hops, .hostname = rrdhost_hostname(host), .machine_guid = host->machine_guid}; + create_query->data.bin_payload.payload = generate_node_instance_creation(&create_query->data.bin_payload.size, &node_instance_creation); - rrdhost_aclk_state_unlock(localhost); + create_query->data.bin_payload.topic = ACLK_TOPICID_CREATE_NODE; create_query->data.bin_payload.msg_name = "CreateNodeInstance"; nd_log(NDLS_DAEMON, NDLP_DEBUG, "Registering host=%s, hops=%u", host->machine_guid, host->system_info->hops); - aclk_queue_query(create_query); + aclk_execute_query(create_query); return; } } @@ -960,14 +883,13 @@ void aclk_host_state_update(RRDHOST *host, int cmd, int queryable) .session_id = aclk_session_newarch }; node_state_update.node_id = mallocz(UUID_STR_LEN); - uuid_unparse_lower(node_id, (char*)node_state_update.node_id); + uuid_unparse_lower(node_id.uuid, (char*)node_state_update.node_id); node_state_update.capabilities = aclk_get_agent_capas(); - rrdhost_aclk_state_lock(localhost); - node_state_update.claim_id = localhost->aclk_state.claimed_id; + CLAIM_ID claim_id = claim_id_get(); + node_state_update.claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL; query->data.bin_payload.payload = generate_node_instance_connection(&query->data.bin_payload.size, &node_state_update); - rrdhost_aclk_state_unlock(localhost); nd_log(NDLS_DAEMON, NDLP_DEBUG, "Queuing status update for node=%s, live=%d, hops=%u, queryable=%d", @@ -975,7 +897,7 @@ void aclk_host_state_update(RRDHOST *host, int cmd, int queryable) freez((void*)node_state_update.node_id); query->data.bin_payload.msg_name = "UpdateNodeInstanceConnection"; query->data.bin_payload.topic = ACLK_TOPICID_NODE_CONN; - aclk_queue_query(query); + aclk_execute_query(query); } void aclk_send_node_instances() @@ -1009,10 +931,9 @@ void aclk_send_node_instances() } node_state_update.capabilities = aclk_get_node_instance_capas(host); - rrdhost_aclk_state_lock(localhost); - node_state_update.claim_id = localhost->aclk_state.claimed_id; + CLAIM_ID claim_id = claim_id_get(); + node_state_update.claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL; query->data.bin_payload.payload = generate_node_instance_connection(&query->data.bin_payload.size, &node_state_update); - rrdhost_aclk_state_unlock(localhost); nd_log(NDLS_DAEMON, NDLP_DEBUG, "Queuing status update for node=%s, live=%d, hops=%d, queryable=1", @@ -1022,7 +943,7 @@ void aclk_send_node_instances() freez((void*)node_state_update.node_id); query->data.bin_payload.msg_name = "UpdateNodeInstanceConnection"; query->data.bin_payload.topic = ACLK_TOPICID_NODE_CONN; - aclk_queue_query(query); + aclk_execute_query(query); } else { aclk_query_t create_query; create_query = aclk_query_new(REGISTER_NODE); @@ -1034,17 +955,17 @@ void aclk_send_node_instances() uuid_unparse_lower(list->host_id, (char*)node_instance_creation.machine_guid); create_query->data.bin_payload.topic = ACLK_TOPICID_CREATE_NODE; create_query->data.bin_payload.msg_name = "CreateNodeInstance"; - rrdhost_aclk_state_lock(localhost); - node_instance_creation.claim_id = localhost->aclk_state.claimed_id, + + CLAIM_ID claim_id = claim_id_get(); + node_instance_creation.claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL, create_query->data.bin_payload.payload = generate_node_instance_creation(&create_query->data.bin_payload.size, &node_instance_creation); - rrdhost_aclk_state_unlock(localhost); nd_log(NDLS_DAEMON, NDLP_DEBUG, "Queuing registration for host=%s, hops=%d", (char*)node_instance_creation.machine_guid, list->hops); freez((void *)node_instance_creation.machine_guid); - aclk_queue_query(create_query); + aclk_execute_query(create_query); } freez(list->hostname); @@ -1089,38 +1010,37 @@ char *aclk_state(void) ); buffer_sprintf(wb, "Protocol Used: Protobuf\nMQTT Version: %d\nClaimed: ", 5); - char *agent_id = get_agent_claimid(); - if (agent_id == NULL) + CLAIM_ID claim_id = claim_id_get(); + if (!claim_id_is_set(claim_id)) buffer_strcat(wb, "No\n"); else { - char *cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL); - buffer_sprintf(wb, "Yes\nClaimed Id: %s\nCloud URL: %s\n", agent_id, cloud_base_url ? cloud_base_url : "null"); - freez(agent_id); + const char *cloud_base_url = cloud_config_url_get(); + buffer_sprintf(wb, "Yes\nClaimed Id: %s\nCloud URL: %s\n", claim_id.str, cloud_base_url ? cloud_base_url : "null"); } - buffer_sprintf(wb, "Online: %s\nReconnect count: %d\nBanned By Cloud: %s\n", aclk_connected ? "Yes" : "No", aclk_connection_counter > 0 ? (aclk_connection_counter - 1) : 0, aclk_disable_runtime ? "Yes" : "No"); - if (last_conn_time_mqtt && (tmptr = localtime_r(&last_conn_time_mqtt, &tmbuf)) ) { + buffer_sprintf(wb, "Online: %s\nReconnect count: %d\nBanned By Cloud: %s\n", aclk_online() ? "Yes" : "No", aclk_connection_counter > 0 ? (aclk_connection_counter - 1) : 0, aclk_disable_runtime ? "Yes" : "No"); + if (last_conn_time_mqtt && ((tmptr = localtime_r(&last_conn_time_mqtt, &tmbuf))) ) { char timebuf[26]; strftime(timebuf, 26, "%Y-%m-%d %H:%M:%S", tmptr); buffer_sprintf(wb, "Last Connection Time: %s\n", timebuf); } - if (last_conn_time_appl && (tmptr = localtime_r(&last_conn_time_appl, &tmbuf)) ) { + if (last_conn_time_appl && ((tmptr = localtime_r(&last_conn_time_appl, &tmbuf))) ) { char timebuf[26]; strftime(timebuf, 26, "%Y-%m-%d %H:%M:%S", tmptr); buffer_sprintf(wb, "Last Connection Time + %d PUBACKs received: %s\n", ACLK_PUBACKS_CONN_STABLE, timebuf); } - if (last_disconnect_time && (tmptr = localtime_r(&last_disconnect_time, &tmbuf)) ) { + if (last_disconnect_time && ((tmptr = localtime_r(&last_disconnect_time, &tmbuf))) ) { char timebuf[26]; strftime(timebuf, 26, "%Y-%m-%d %H:%M:%S", tmptr); buffer_sprintf(wb, "Last Disconnect Time: %s\n", timebuf); } - if (!aclk_connected && next_connection_attempt && (tmptr = localtime_r(&next_connection_attempt, &tmbuf)) ) { + if (!aclk_connected && next_connection_attempt && ((tmptr = localtime_r(&next_connection_attempt, &tmbuf))) ) { char timebuf[26]; strftime(timebuf, 26, "%Y-%m-%d %H:%M:%S", tmptr); buffer_sprintf(wb, "Next Connection Attempt At: %s\nLast Backoff: %.3f", timebuf, last_backoff_value); } - if (aclk_connected) { + if (aclk_online()) { buffer_sprintf(wb, "Received Cloud MQTT Messages: %d\nMQTT Messages Confirmed by Remote Broker (PUBACKs): %d", aclk_rcvd_cloud_msgs, aclk_pubacks_per_conn); RRDHOST *host; @@ -1129,20 +1049,18 @@ char *aclk_state(void) buffer_sprintf(wb, "\n\n> Node Instance for mGUID: \"%s\" hostname \"%s\"\n", host->machine_guid, rrdhost_hostname(host)); buffer_strcat(wb, "\tClaimed ID: "); - rrdhost_aclk_state_lock(host); - if (host->aclk_state.claimed_id) - buffer_strcat(wb, host->aclk_state.claimed_id); + claim_id = rrdhost_claim_id_get(host); + if(claim_id_is_set(claim_id)) + buffer_strcat(wb, claim_id.str); else buffer_strcat(wb, "null"); - rrdhost_aclk_state_unlock(host); - - if (host->node_id == NULL || uuid_is_null(*host->node_id)) { + if (UUIDiszero(host->node_id)) buffer_strcat(wb, "\n\tNode ID: null\n"); - } else { - char node_id[GUID_LEN + 1]; - uuid_unparse_lower(*host->node_id, node_id); - buffer_sprintf(wb, "\n\tNode ID: %s\n", node_id); + else { + char node_id_str[UUID_STR_LEN]; + uuid_unparse_lower(host->node_id.uuid, node_id_str); + buffer_sprintf(wb, "\n\tNode ID: %s\n", node_id_str); } buffer_sprintf(wb, "\tStreaming Hops: %d\n\tRelationship: %s", host->system_info->hops, host == localhost ? "self" : "child"); @@ -1183,7 +1101,7 @@ static void fill_alert_status_for_host_json(json_object *obj, RRDHOST *host) static json_object *timestamp_to_json(const time_t *t) { struct tm *tmptr, tmbuf; - if (*t && (tmptr = gmtime_r(t, &tmbuf)) ) { + if (*t && ((tmptr = gmtime_r(t, &tmbuf))) ) { char timebuf[26]; strftime(timebuf, 26, "%Y-%m-%d %H:%M:%S", tmptr); return json_object_new_string(timebuf); @@ -1206,22 +1124,21 @@ char *aclk_state_json(void) json_object_array_add(grp, tmp); json_object_object_add(msg, "protocols-supported", grp); - char *agent_id = get_agent_claimid(); - tmp = json_object_new_boolean(agent_id != NULL); + CLAIM_ID claim_id = claim_id_get(); + tmp = json_object_new_boolean(claim_id_is_set(claim_id)); json_object_object_add(msg, "agent-claimed", tmp); - if (agent_id) { - tmp = json_object_new_string(agent_id); - freez(agent_id); - } else + if (claim_id_is_set(claim_id)) + tmp = json_object_new_string(claim_id.str); + else tmp = NULL; json_object_object_add(msg, "claimed-id", tmp); - char *cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL); + const char *cloud_base_url = cloud_config_url_get(); tmp = cloud_base_url ? json_object_new_string(cloud_base_url) : NULL; json_object_object_add(msg, "cloud-url", tmp); - tmp = json_object_new_boolean(aclk_connected); + tmp = json_object_new_boolean(aclk_online()); json_object_object_add(msg, "online", tmp); tmp = json_object_new_string("Protobuf"); @@ -1242,9 +1159,9 @@ char *aclk_state_json(void) json_object_object_add(msg, "last-connect-time-utc", timestamp_to_json(&last_conn_time_mqtt)); json_object_object_add(msg, "last-connect-time-puback-utc", timestamp_to_json(&last_conn_time_appl)); json_object_object_add(msg, "last-disconnect-time-utc", timestamp_to_json(&last_disconnect_time)); - json_object_object_add(msg, "next-connection-attempt-utc", !aclk_connected ? timestamp_to_json(&next_connection_attempt) : NULL); + json_object_object_add(msg, "next-connection-attempt-utc", !aclk_online() ? timestamp_to_json(&next_connection_attempt) : NULL); tmp = NULL; - if (!aclk_connected && last_backoff_value) + if (!aclk_online() && last_backoff_value) tmp = json_object_new_double(last_backoff_value); json_object_object_add(msg, "last-backoff-value", tmp); @@ -1264,20 +1181,19 @@ char *aclk_state_json(void) tmp = json_object_new_string(host->machine_guid); json_object_object_add(nodeinstance, "mguid", tmp); - rrdhost_aclk_state_lock(host); - if (host->aclk_state.claimed_id) { - tmp = json_object_new_string(host->aclk_state.claimed_id); + claim_id = rrdhost_claim_id_get(host); + if(claim_id_is_set(claim_id)) { + tmp = json_object_new_string(claim_id.str); json_object_object_add(nodeinstance, "claimed_id", tmp); } else json_object_object_add(nodeinstance, "claimed_id", NULL); - rrdhost_aclk_state_unlock(host); - if (host->node_id == NULL || uuid_is_null(*host->node_id)) { + if (UUIDiszero(host->node_id)) { json_object_object_add(nodeinstance, "node-id", NULL); } else { - char node_id[GUID_LEN + 1]; - uuid_unparse_lower(*host->node_id, node_id); - tmp = json_object_new_string(node_id); + char node_id_str[UUID_STR_LEN]; + uuid_unparse_lower(host->node_id.uuid, node_id_str); + tmp = json_object_new_string(node_id_str); json_object_object_add(nodeinstance, "node-id", tmp); } @@ -1303,12 +1219,10 @@ char *aclk_state_json(void) json_object_put(msg); return str; } -#endif /* ENABLE_ACLK */ void add_aclk_host_labels(void) { RRDLABELS *labels = localhost->rrdlabels; -#ifdef ENABLE_ACLK rrdlabels_add(labels, "_aclk_available", "true", RRDLABEL_SRC_AUTO|RRDLABEL_SRC_ACLK); ACLK_PROXY_TYPE aclk_proxy; char *proxy_str; @@ -1329,9 +1243,6 @@ void add_aclk_host_labels(void) { rrdlabels_add(labels, "_mqtt_version", "5", RRDLABEL_SRC_AUTO); rrdlabels_add(labels, "_aclk_proxy", proxy_str, RRDLABEL_SRC_AUTO); rrdlabels_add(labels, "_aclk_ng_new_cloud_protocol", "true", RRDLABEL_SRC_AUTO|RRDLABEL_SRC_ACLK); -#else - rrdlabels_add(labels, "_aclk_available", "false", RRDLABEL_SRC_AUTO|RRDLABEL_SRC_ACLK); -#endif } void aclk_queue_node_info(RRDHOST *host, bool immediate) diff --git a/src/aclk/aclk.h b/src/aclk/aclk.h index 72d1a2e11..45a2eac85 100644 --- a/src/aclk/aclk.h +++ b/src/aclk/aclk.h @@ -4,14 +4,19 @@ #include "daemon/common.h" -#ifdef ENABLE_ACLK #include "aclk_util.h" -#include "aclk_rrdhost_state.h" +//#include "aclk_rrdhost_state.h" // How many MQTT PUBACKs we need to get to consider connection // stable for the purposes of TBEB (truncated binary exponential backoff) #define ACLK_PUBACKS_CONN_STABLE 3 -#endif /* ENABLE_ACLK */ + +typedef enum { + ACLK_NO_DISCONNECT = 0, + ACLK_CLOUD_DISCONNECT = 1, + ACLK_RELOAD_CONF = 2, + ACLK_PING_TIMEOUT = 3 +} ACLK_DISCONNECT_ACTION; typedef enum __attribute__((packed)) { ACLK_STATUS_CONNECTED = 0, @@ -39,12 +44,19 @@ extern ACLK_STATUS aclk_status; extern const char *aclk_cloud_base_url; const char *aclk_status_to_string(void); -extern int aclk_connected; extern int aclk_ctx_based; extern int aclk_disable_runtime; -extern int aclk_stats_enabled; +//extern int aclk_stats_enabled; extern int aclk_kill_link; +bool aclk_online(void); +bool aclk_online_for_contexts(void); +bool aclk_online_for_alerts(void); +bool aclk_online_for_nodes(void); + +void aclk_config_get_query_scope(void); +bool aclk_query_scope_has(HTTP_ACL acl); + extern time_t last_conn_time_mqtt; extern time_t last_conn_time_appl; extern time_t last_disconnect_time; @@ -57,15 +69,10 @@ extern time_t aclk_session_sec; extern time_t aclk_block_until; extern int aclk_connection_counter; -extern int disconnect_req; +extern ACLK_DISCONNECT_ACTION disconnect_req; -#ifdef ENABLE_ACLK void *aclk_main(void *ptr); -extern netdata_mutex_t aclk_shared_state_mutex; -#define ACLK_SHARED_STATE_LOCK netdata_mutex_lock(&aclk_shared_state_mutex) -#define ACLK_SHARED_STATE_UNLOCK netdata_mutex_unlock(&aclk_shared_state_mutex) - extern struct aclk_shared_state { // To wait for `disconnect` message PUBACK // when shutting down @@ -80,8 +87,6 @@ void aclk_send_node_instances(void); void aclk_send_bin_msg(char *msg, size_t msg_len, enum aclk_topics subtopic, const char *msgname); -#endif /* ENABLE_ACLK */ - char *aclk_state(void); char *aclk_state_json(void); void add_aclk_host_labels(void); diff --git a/src/aclk/aclk_alarm_api.c b/src/aclk/aclk_alarm_api.c index 664671f70..a23ad0ff7 100644 --- a/src/aclk/aclk_alarm_api.c +++ b/src/aclk/aclk_alarm_api.c @@ -8,15 +8,6 @@ #include "aclk.h" -void aclk_send_provide_alarm_checkpoint(struct alarm_checkpoint *checkpoint) -{ - aclk_query_t query = aclk_query_new(ALARM_PROVIDE_CHECKPOINT); - query->data.bin_payload.payload = generate_alarm_checkpoint(&query->data.bin_payload.size, checkpoint); - query->data.bin_payload.topic = ACLK_TOPICID_ALARM_CHECKPOINT; - query->data.bin_payload.msg_name = "AlarmCheckpoint"; - QUEUE_IF_PAYLOAD_PRESENT(query); -} - void aclk_send_alarm_log_entry(struct alarm_log_entry *log_entry) { size_t payload_size; diff --git a/src/aclk/aclk_alarm_api.h b/src/aclk/aclk_alarm_api.h index 4d9d9447a..952d55007 100644 --- a/src/aclk/aclk_alarm_api.h +++ b/src/aclk/aclk_alarm_api.h @@ -6,7 +6,6 @@ #include "../daemon/common.h" #include "schema-wrappers/schema_wrappers.h" -void aclk_send_provide_alarm_checkpoint(struct alarm_checkpoint *checkpoint); void aclk_send_alarm_log_entry(struct alarm_log_entry *log_entry); void aclk_send_provide_alarm_cfg(struct provide_alarm_configuration *cfg); void aclk_send_alarm_snapshot(alarm_snapshot_proto_ptr_t snapshot); diff --git a/src/aclk/aclk_capas.c b/src/aclk/aclk_capas.c index 0f7870fdd..dee6bf0c5 100644 --- a/src/aclk/aclk_capas.c +++ b/src/aclk/aclk_capas.c @@ -4,7 +4,11 @@ #include "ml/ml.h" -#define HTTP_API_V2_VERSION 6 +#define HTTP_API_V2_VERSION 7 + +size_t aclk_get_http_api_version(void) { + return HTTP_API_V2_VERSION; +} const struct capability *aclk_get_agent_capas() { @@ -24,8 +28,8 @@ const struct capability *aclk_get_agent_capas() agent_capabilities[2].version = ml_capable() ? 1 : 0; agent_capabilities[2].enabled = ml_enabled(localhost); - agent_capabilities[3].version = enable_metric_correlations ? metric_correlations_version : 0; - agent_capabilities[3].enabled = enable_metric_correlations; + agent_capabilities[3].version = metric_correlations_version; + agent_capabilities[3].enabled = 1; agent_capabilities[7].enabled = localhost->health.health_enabled; @@ -40,9 +44,7 @@ struct capability *aclk_get_node_instance_capas(RRDHOST *host) struct capability ni_caps[] = { { .name = "proto", .version = 1, .enabled = 1 }, { .name = "ml", .version = ml_capable(), .enabled = ml_enabled(host) }, - { .name = "mc", - .version = enable_metric_correlations ? metric_correlations_version : 0, - .enabled = enable_metric_correlations }, + { .name = "mc", .version = metric_correlations_version, .enabled = 1 }, { .name = "ctx", .version = 1, .enabled = 1 }, { .name = "funcs", .version = functions ? 1 : 0, .enabled = functions ? 1 : 0 }, { .name = "http_api_v2", .version = HTTP_API_V2_VERSION, .enabled = 1 }, diff --git a/src/aclk/aclk_capas.h b/src/aclk/aclk_capas.h index c39a197b8..d3808e640 100644 --- a/src/aclk/aclk_capas.h +++ b/src/aclk/aclk_capas.h @@ -8,6 +8,7 @@ #include "schema-wrappers/capability.h" +size_t aclk_get_http_api_version(void); const struct capability *aclk_get_agent_capas(); struct capability *aclk_get_node_instance_capas(RRDHOST *host); diff --git a/src/aclk/aclk_otp.c b/src/aclk/aclk_otp.c index 3b8222931..3e4f7835a 100644 --- a/src/aclk/aclk_otp.c +++ b/src/aclk/aclk_otp.c @@ -4,10 +4,6 @@ #include "aclk_util.h" #include "aclk.h" -#include "daemon/common.h" - -#include "mqtt_websockets/c-rbuf/cringbuffer.h" - static int aclk_https_request(https_req_t *request, https_req_response_t *response, bool *fallback_ipv4) { int rc; // wrapper for ACLK only which loads ACLK specific proxy settings @@ -271,40 +267,8 @@ exit: } #endif -#if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110 -static EVP_ENCODE_CTX *EVP_ENCODE_CTX_new(void) -{ - EVP_ENCODE_CTX *ctx = OPENSSL_malloc(sizeof(*ctx)); - - if (ctx != NULL) { - memset(ctx, 0, sizeof(*ctx)); - } - return ctx; -} -static void EVP_ENCODE_CTX_free(EVP_ENCODE_CTX *ctx) -{ - OPENSSL_free(ctx); - return; -} -#endif - #define CHALLENGE_LEN 256 #define CHALLENGE_LEN_BASE64 344 -inline static int base64_decode_helper(unsigned char *out, int *outl, const unsigned char *in, int in_len) -{ - unsigned char remaining_data[CHALLENGE_LEN]; - EVP_ENCODE_CTX *ctx = EVP_ENCODE_CTX_new(); - EVP_DecodeInit(ctx); - EVP_DecodeUpdate(ctx, out, outl, in, in_len); - int remainder = 0; - EVP_DecodeFinal(ctx, remaining_data, &remainder); - EVP_ENCODE_CTX_free(ctx); - if (remainder) { - netdata_log_error("Unexpected data at EVP_DecodeFinal"); - return 1; - } - return 0; -} #define OTP_URL_PREFIX "/api/v1/auth/node/" int aclk_get_otp_challenge(url_t *target, const char *agent_id, unsigned char **challenge, int *challenge_bytes, bool *fallback_ipv4) @@ -351,7 +315,7 @@ int aclk_get_otp_challenge(url_t *target, const char *agent_id, unsigned char ** goto cleanup_json; } const char *challenge_base64; - if (!(challenge_base64 = json_object_get_string(challenge_json))) { + if (!((challenge_base64 = json_object_get_string(challenge_json)))) { netdata_log_error("Failed to extract challenge from JSON object"); goto cleanup_json; } @@ -360,8 +324,9 @@ int aclk_get_otp_challenge(url_t *target, const char *agent_id, unsigned char ** goto cleanup_json; } - *challenge = mallocz((CHALLENGE_LEN_BASE64 / 4) * 3); - base64_decode_helper(*challenge, challenge_bytes, (const unsigned char*)challenge_base64, strlen(challenge_base64)); + *challenge = mallocz((CHALLENGE_LEN_BASE64 / 4) * 3 + 1); + *challenge_bytes = netdata_base64_decode(*challenge, (const unsigned char *) challenge_base64, CHALLENGE_LEN_BASE64); + if (*challenge_bytes != CHALLENGE_LEN) { netdata_log_error("Unexpected challenge length of %d instead of %d", *challenge_bytes, CHALLENGE_LEN); freez(*challenge); @@ -379,7 +344,6 @@ cleanup_resp: int aclk_send_otp_response(const char *agent_id, const unsigned char *response, int response_bytes, url_t *target, struct auth_data *mqtt_auth, bool *fallback_ipv4) { - int len; int rc = 1; https_req_t req = HTTPS_REQ_T_INITIALIZER; https_req_response_t resp = HTTPS_REQ_RESPONSE_T_INITIALIZER; @@ -391,7 +355,7 @@ int aclk_send_otp_response(const char *agent_id, const unsigned char *response, unsigned char base64[CHALLENGE_LEN_BASE64 + 1]; memset(base64, 0, CHALLENGE_LEN_BASE64 + 1); - base64_encode_helper(base64, &len, response, response_bytes); + (void) netdata_base64_encode(base64, response, response_bytes); BUFFER *url = buffer_create(strlen(OTP_URL_PREFIX) + UUID_STR_LEN + 20, &netdata_buffers_statistics.buffers_aclk); BUFFER *resp_json = buffer_create(strlen(OTP_URL_PREFIX) + UUID_STR_LEN + 20, &netdata_buffers_statistics.buffers_aclk); @@ -487,16 +451,15 @@ int aclk_get_mqtt_otp(RSA *p_key, char **mqtt_id, char **mqtt_usr, char **mqtt_p unsigned char *challenge = NULL; int challenge_bytes; - char *agent_id = get_agent_claimid(); - if (agent_id == NULL) { + CLAIM_ID claim_id = claim_id_get(); + if (!claim_id_is_set(claim_id)) { netdata_log_error("Agent was not claimed - cannot perform challenge/response"); return 1; } // Get Challenge - if (aclk_get_otp_challenge(target, agent_id, &challenge, &challenge_bytes, fallback_ipv4)) { + if (aclk_get_otp_challenge(target, claim_id.str, &challenge, &challenge_bytes, fallback_ipv4)) { netdata_log_error("Error getting challenge"); - freez(agent_id); return 1; } @@ -507,17 +470,15 @@ int aclk_get_mqtt_otp(RSA *p_key, char **mqtt_id, char **mqtt_usr, char **mqtt_p netdata_log_error("Couldn't decrypt the challenge received"); freez(response_plaintext); freez(challenge); - freez(agent_id); return 1; } freez(challenge); // Encode and Send Challenge struct auth_data data = { .client_id = NULL, .passwd = NULL, .username = NULL }; - if (aclk_send_otp_response(agent_id, response_plaintext, response_plaintext_bytes, target, &data, fallback_ipv4)) { + if (aclk_send_otp_response(claim_id.str, response_plaintext, response_plaintext_bytes, target, &data, fallback_ipv4)) { netdata_log_error("Error getting response"); freez(response_plaintext); - freez(agent_id); return 1; } @@ -526,7 +487,6 @@ int aclk_get_mqtt_otp(RSA *p_key, char **mqtt_id, char **mqtt_usr, char **mqtt_p *mqtt_id = data.client_id; freez(response_plaintext); - freez(agent_id); return 0; } @@ -830,17 +790,14 @@ int aclk_get_env(aclk_env_t *env, const char* aclk_hostname, int aclk_port, bool req.request_type = HTTP_REQ_GET; - char *agent_id = get_agent_claimid(); - if (agent_id == NULL) - { + CLAIM_ID claim_id = claim_id_get(); + if (!claim_id_is_set(claim_id)) { netdata_log_error("Agent was not claimed - cannot perform challenge/response"); buffer_free(buf); return 1; } - buffer_sprintf(buf, "/api/v1/env?v=%s&cap=proto,ctx&claim_id=%s", &(NETDATA_VERSION[1]) /* skip 'v' at beginning */, agent_id); - - freez(agent_id); + buffer_sprintf(buf, "/api/v1/env?v=%s&cap=proto,ctx&claim_id=%s", &(NETDATA_VERSION[1]) /* skip 'v' at beginning */, claim_id.str); req.host = (char*)aclk_hostname; req.port = aclk_port; diff --git a/src/aclk/aclk_proxy.c b/src/aclk/aclk_proxy.c index 8d0e2d657..a6185db7c 100644 --- a/src/aclk/aclk_proxy.c +++ b/src/aclk/aclk_proxy.c @@ -79,7 +79,7 @@ static inline int check_socks_enviroment(const char **proxy) { char *tmp = getenv("socks_proxy"); - if (!tmp) + if (!tmp || !*tmp) return 1; if (aclk_verify_proxy(tmp) == PROXY_TYPE_SOCKS5) { @@ -97,7 +97,7 @@ static inline int check_http_enviroment(const char **proxy) { char *tmp = getenv("http_proxy"); - if (!tmp) + if (!tmp || !*tmp) return 1; if (aclk_verify_proxy(tmp) == PROXY_TYPE_HTTP) { @@ -113,15 +113,11 @@ static inline int check_http_enviroment(const char **proxy) const char *aclk_lws_wss_get_proxy_setting(ACLK_PROXY_TYPE *type) { - const char *proxy = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, ACLK_PROXY_CONFIG_VAR, ACLK_PROXY_ENV); - - // backward compatibility: "proxy" was in "netdata.conf" - if (config_exists(CONFIG_SECTION_CLOUD, ACLK_PROXY_CONFIG_VAR)) - proxy = config_get(CONFIG_SECTION_CLOUD, ACLK_PROXY_CONFIG_VAR, ACLK_PROXY_ENV); + const char *proxy = cloud_config_proxy_get(); *type = PROXY_DISABLED; - if (strcmp(proxy, "none") == 0) + if (!proxy || !*proxy || strcmp(proxy, "none") == 0) return proxy; if (strcmp(proxy, ACLK_PROXY_ENV) == 0) { diff --git a/src/aclk/aclk_query.c b/src/aclk/aclk_query.c index 08bc2acf3..1d93a5e2d 100644 --- a/src/aclk/aclk_query.c +++ b/src/aclk/aclk_query.c @@ -1,16 +1,10 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "aclk_query.h" -#include "aclk_stats.h" #include "aclk_tx_msgs.h" #include "../../web/server/web_client_cache.h" -#define WEB_HDR_ACCEPT_ENC "Accept-Encoding:" - -pthread_cond_t query_cond_wait = PTHREAD_COND_INITIALIZER; -pthread_mutex_t query_lock_wait = PTHREAD_MUTEX_INITIALIZER; -#define QUERY_THREAD_LOCK pthread_mutex_lock(&query_lock_wait) -#define QUERY_THREAD_UNLOCK pthread_mutex_unlock(&query_lock_wait) +static HTTP_ACL default_aclk_http_acl = HTTP_ACL_ALL_FEATURES; struct pending_req_list { const char *msg_id; @@ -22,7 +16,17 @@ struct pending_req_list { }; static struct pending_req_list *pending_req_list_head = NULL; -static pthread_mutex_t pending_req_list_lock = PTHREAD_MUTEX_INITIALIZER; +static SPINLOCK pending_req_list_lock = NETDATA_SPINLOCK_INITIALIZER; + +void aclk_config_get_query_scope(void) { + const char *s = config_get(CONFIG_SECTION_CLOUD, "scope", "full"); + if(strcmp(s, "license manager") == 0) + default_aclk_http_acl = HTTP_ACL_ACLK_LICENSE_MANAGER; +} + +bool aclk_query_scope_has(HTTP_ACL acl) { + return (default_aclk_http_acl & acl) == acl; +} static struct pending_req_list *pending_req_list_add(const char *msg_id) { @@ -30,10 +34,10 @@ static struct pending_req_list *pending_req_list_add(const char *msg_id) new->msg_id = msg_id; new->hash = simple_hash(msg_id); - pthread_mutex_lock(&pending_req_list_lock); + spinlock_lock(&pending_req_list_lock); new->next = pending_req_list_head; pending_req_list_head = new; - pthread_mutex_unlock(&pending_req_list_lock); + spinlock_unlock(&pending_req_list_lock); return new; } @@ -42,7 +46,7 @@ void pending_req_list_rm(const char *msg_id) uint32_t hash = simple_hash(msg_id); struct pending_req_list *prev = NULL; - pthread_mutex_lock(&pending_req_list_lock); + spinlock_lock(&pending_req_list_lock); struct pending_req_list *curr = pending_req_list_head; while (curr) { @@ -59,26 +63,26 @@ void pending_req_list_rm(const char *msg_id) prev = curr; curr = curr->next; } - pthread_mutex_unlock(&pending_req_list_lock); + spinlock_unlock(&pending_req_list_lock); } int mark_pending_req_cancelled(const char *msg_id) { uint32_t hash = simple_hash(msg_id); - pthread_mutex_lock(&pending_req_list_lock); + spinlock_lock(&pending_req_list_lock); struct pending_req_list *curr = pending_req_list_head; while (curr) { if (curr->hash == hash && strcmp(curr->msg_id, msg_id) == 0) { curr->canceled = 1; - pthread_mutex_unlock(&pending_req_list_lock); + spinlock_unlock(&pending_req_list_lock); return 0; } curr = curr->next; } - pthread_mutex_unlock(&pending_req_list_lock); + spinlock_unlock(&pending_req_list_lock); return 1; } @@ -88,7 +92,8 @@ static bool aclk_web_client_interrupt_cb(struct web_client *w __maybe_unused, vo return req->canceled; } -static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) { +int http_api_v2(mqtt_wss_client client, aclk_query_t query) +{ ND_LOG_STACK lgs[] = { ND_LOG_FIELD_TXT(NDF_SRC_TRANSPORT, "aclk"), ND_LOG_FIELD_END(), @@ -97,8 +102,6 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) int retval = 0; BUFFER *local_buffer = NULL; - size_t size = 0; - size_t sent = 0; usec_t dt_ut = 0; int z_ret; @@ -106,7 +109,7 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) struct web_client *w = web_client_get_from_cache(); web_client_set_conn_cloud(w); - w->port_acl = HTTP_ACL_ACLK | HTTP_ACL_ALL_FEATURES; + w->port_acl = HTTP_ACL_ACLK | default_aclk_http_acl; w->acl = w->port_acl; web_client_set_permissions(w, HTTP_ACCESS_MAP_OLD_MEMBER, HTTP_USER_ROLE_MEMBER, WEB_CLIENT_FLAG_AUTH_CLOUD); @@ -124,7 +127,7 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) nd_log(NDLS_ACCESS, NDLP_ERR, "ACLK received request is not valid, code %d", validation); retval = 1; w->response.code = HTTP_RESP_BAD_REQUEST; - w->response.code = (short)aclk_http_msg_v2(query_thr->client, query->callback_topic, query->msg_id, + w->response.code = (short)aclk_http_msg_v2(client, query->callback_topic, query->msg_id, dt_ut, query->created, w->response.code, NULL, 0); goto cleanup; @@ -137,39 +140,18 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) dt_ut / USEC_PER_MS, query->timeout); retval = 1; w->response.code = HTTP_RESP_SERVICE_UNAVAILABLE; - aclk_http_msg_v2_err(query_thr->client, query->callback_topic, query->msg_id, w->response.code, CLOUD_EC_SND_TIMEOUT, CLOUD_EMSG_SND_TIMEOUT, NULL, 0); + aclk_http_msg_v2_err(client, query->callback_topic, query->msg_id, w->response.code, CLOUD_EC_SND_TIMEOUT, CLOUD_EMSG_SND_TIMEOUT, NULL, 0); goto cleanup; } char *path = (char *)buffer_tostring(w->url_path_decoded); - if (aclk_stats_enabled) { - char *url_path_endpoint = strrchr(path, '/'); - ACLK_STATS_LOCK; - int stat_idx = aclk_cloud_req_http_type_to_idx(url_path_endpoint ? url_path_endpoint + 1 : "other"); - aclk_metrics_per_sample.cloud_req_http_by_type[stat_idx]++; - ACLK_STATS_UNLOCK; - } - w->response.code = (short)web_client_api_request_with_node_selection(localhost, w, path); web_client_timeout_checkpoint_response_ready(w, &dt_ut); - if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.cloud_q_process_total += dt_ut; - aclk_metrics_per_sample.cloud_q_process_count++; - if (aclk_metrics_per_sample.cloud_q_process_max < dt_ut) - aclk_metrics_per_sample.cloud_q_process_max = dt_ut; - ACLK_STATS_UNLOCK; - } - - size = w->response.data->len; - sent = size; - if (w->response.data->len && w->response.zinitialized) { w->response.zstream.next_in = (Bytef *)w->response.data->buffer; w->response.zstream.avail_in = w->response.data->len; - do { w->response.zstream.avail_out = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE; w->response.zstream.next_out = w->response.zbuffer; @@ -181,7 +163,7 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) netdata_log_error("Unknown error during zlib compression."); retval = 1; w->response.code = 500; - aclk_http_msg_v2_err(query_thr->client, query->callback_topic, query->msg_id, w->response.code, CLOUD_EC_ZLIB_ERROR, CLOUD_EMSG_ZLIB_ERROR, NULL, 0); + aclk_http_msg_v2_err(client, query->callback_topic, query->msg_id, w->response.code, CLOUD_EC_ZLIB_ERROR, CLOUD_EMSG_ZLIB_ERROR, NULL, 0); goto cleanup; } int bytes_to_cpy = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE - w->response.zstream.avail_out; @@ -208,16 +190,20 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) buffer_need_bytes(local_buffer, w->response.data->len); memcpy(&local_buffer->buffer[local_buffer->len], w->response.data->buffer, w->response.data->len); local_buffer->len += w->response.data->len; - sent = sent - size + w->response.data->len; - } else { + } else buffer_strcat(local_buffer, w->response.data->buffer); - } } // send msg. - w->response.code = (short)aclk_http_msg_v2(query_thr->client, query->callback_topic, query->msg_id, - dt_ut, query->created, w->response.code, - local_buffer->buffer, local_buffer->len); + w->response.code = (short)aclk_http_msg_v2( + client, + query->callback_topic, + query->msg_id, + dt_ut, + query->created, + w->response.code, + local_buffer->buffer, + local_buffer->len); cleanup: web_client_log_completed_request(w, false); @@ -230,144 +216,14 @@ cleanup: return retval; } -static int send_bin_msg(struct aclk_query_thread *query_thr, aclk_query_t query) +int send_bin_msg(mqtt_wss_client client, aclk_query_t query) { // this will be simplified when legacy support is removed - aclk_send_bin_message_subtopic_pid(query_thr->client, query->data.bin_payload.payload, query->data.bin_payload.size, query->data.bin_payload.topic, query->data.bin_payload.msg_name); + aclk_send_bin_message_subtopic_pid( + client, + query->data.bin_payload.payload, + query->data.bin_payload.size, + query->data.bin_payload.topic, + query->data.bin_payload.msg_name); return 0; } - -const char *aclk_query_get_name(aclk_query_type_t qt, int unknown_ok) -{ - switch (qt) { - case HTTP_API_V2: return "http_api_request_v2"; - case REGISTER_NODE: return "register_node"; - case NODE_STATE_UPDATE: return "node_state_update"; - case CHART_DIMS_UPDATE: return "chart_and_dim_update"; - case CHART_CONFIG_UPDATED: return "chart_config_updated"; - case CHART_RESET: return "reset_chart_messages"; - case RETENTION_UPDATED: return "update_retention_info"; - case UPDATE_NODE_INFO: return "update_node_info"; - case ALARM_PROVIDE_CHECKPOINT: return "alarm_checkpoint"; - case ALARM_PROVIDE_CFG: return "provide_alarm_config"; - case ALARM_SNAPSHOT: return "alarm_snapshot"; - case UPDATE_NODE_COLLECTORS: return "update_node_collectors"; - case PROTO_BIN_MESSAGE: return "generic_binary_proto_message"; - default: - if (!unknown_ok) - error_report("Unknown query type used %d", (int) qt); - return "unknown"; - } -} - -static void aclk_query_process_msg(struct aclk_query_thread *query_thr, aclk_query_t query) -{ - if (query->type == UNKNOWN || query->type >= ACLK_QUERY_TYPE_COUNT) { - error_report("Unknown query in query queue. %u", query->type); - aclk_query_free(query); - return; - } - - worker_is_busy(query->type); - if (query->type == HTTP_API_V2) { - netdata_log_debug(D_ACLK, "Processing Queued Message of type: \"http_api_request_v2\""); - http_api_v2(query_thr, query); - } else { - netdata_log_debug(D_ACLK, "Processing Queued Message of type: \"%s\"", query->data.bin_payload.msg_name); - send_bin_msg(query_thr, query); - } - - if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.queries_dispatched++; - aclk_queries_per_thread[query_thr->idx]++; - aclk_metrics_per_sample.queries_per_type[query->type]++; - ACLK_STATS_UNLOCK; - } - - aclk_query_free(query); - - worker_is_idle(); -} - -/* Processes messages from queue. Compete for work with other threads - */ -int aclk_query_process_msgs(struct aclk_query_thread *query_thr) -{ - aclk_query_t query; - while ((query = aclk_queue_pop())) - aclk_query_process_msg(query_thr, query); - - return 0; -} - -static void worker_aclk_register(void) { - worker_register("ACLKQUERY"); - for (int i = 1; i < ACLK_QUERY_TYPE_COUNT; i++) { - worker_register_job_name(i, aclk_query_get_name(i, 0)); - } -} - -static void aclk_query_request_cancel(void *data) -{ - pthread_cond_broadcast((pthread_cond_t *) data); -} - -/** - * Main query processing thread - */ -void *aclk_query_main_thread(void *ptr) -{ - worker_aclk_register(); - - struct aclk_query_thread *query_thr = ptr; - - service_register(SERVICE_THREAD_TYPE_NETDATA, aclk_query_request_cancel, NULL, &query_cond_wait, false); - - while (service_running(SERVICE_ACLK | ABILITY_DATA_QUERIES)) { - aclk_query_process_msgs(query_thr); - - worker_is_idle(); - QUERY_THREAD_LOCK; - if (unlikely(pthread_cond_wait(&query_cond_wait, &query_lock_wait))) - sleep_usec(USEC_PER_SEC * 1); - QUERY_THREAD_UNLOCK; - } - - worker_unregister(); - return NULL; -} - -#define TASK_LEN_MAX 22 -void aclk_query_threads_start(struct aclk_query_threads *query_threads, mqtt_wss_client client) -{ - netdata_log_info("Starting %d query threads.", query_threads->count); - - char thread_name[TASK_LEN_MAX]; - query_threads->thread_list = callocz(query_threads->count, sizeof(struct aclk_query_thread)); - for (int i = 0; i < query_threads->count; i++) { - query_threads->thread_list[i].idx = i; //thread needs to know its index for statistics - query_threads->thread_list[i].client = client; - - if(unlikely(snprintfz(thread_name, TASK_LEN_MAX, "ACLK_QRY[%d]", i) < 0)) - netdata_log_error("snprintf encoding error"); - - query_threads->thread_list[i].thread = nd_thread_create( - thread_name, - NETDATA_THREAD_OPTION_JOINABLE, - aclk_query_main_thread, - &query_threads->thread_list[i]); - } -} - -void aclk_query_threads_cleanup(struct aclk_query_threads *query_threads) -{ - if (query_threads && query_threads->thread_list) { - for (int i = 0; i < query_threads->count; i++) { - nd_thread_join(query_threads->thread_list[i].thread); - } - freez(query_threads->thread_list); - } - aclk_queue_lock(); - aclk_queue_flush(); -} diff --git a/src/aclk/aclk_query.h b/src/aclk/aclk_query.h index 900583237..04cb460d0 100644 --- a/src/aclk/aclk_query.h +++ b/src/aclk/aclk_query.h @@ -9,30 +9,11 @@ #include "aclk_query_queue.h" -extern pthread_cond_t query_cond_wait; -extern pthread_mutex_t query_lock_wait; -#define QUERY_THREAD_WAKEUP pthread_cond_signal(&query_cond_wait) -#define QUERY_THREAD_WAKEUP_ALL pthread_cond_broadcast(&query_cond_wait) - -// TODO -//extern volatile int aclk_connected; - -struct aclk_query_thread { - ND_THREAD *thread; - int idx; - mqtt_wss_client client; -}; - -struct aclk_query_threads { - struct aclk_query_thread *thread_list; - int count; -}; - -void aclk_query_threads_start(struct aclk_query_threads *query_threads, mqtt_wss_client client); -void aclk_query_threads_cleanup(struct aclk_query_threads *query_threads); - -const char *aclk_query_get_name(aclk_query_type_t qt, int unknown_ok); - int mark_pending_req_cancelled(const char *msg_id); +void aclk_execute_query(aclk_query_t query); +void aclk_query_init(mqtt_wss_client client); +int http_api_v2(mqtt_wss_client client, aclk_query_t query); +int send_bin_msg(mqtt_wss_client client, aclk_query_t query); + #endif //NETDATA_AGENT_CLOUD_LINK_H diff --git a/src/aclk/aclk_query_queue.c b/src/aclk/aclk_query_queue.c index 3edadc002..acaa2d9c6 100644 --- a/src/aclk/aclk_query_queue.c +++ b/src/aclk/aclk_query_queue.c @@ -1,87 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "aclk_query_queue.h" -#include "aclk_query.h" -#include "aclk_stats.h" - -static netdata_mutex_t aclk_query_queue_mutex = NETDATA_MUTEX_INITIALIZER; -#define ACLK_QUEUE_LOCK netdata_mutex_lock(&aclk_query_queue_mutex) -#define ACLK_QUEUE_UNLOCK netdata_mutex_unlock(&aclk_query_queue_mutex) - -static struct aclk_query_queue { - aclk_query_t head; - int block_push; -} aclk_query_queue = { - .head = NULL, - .block_push = 0 -}; - -static inline int _aclk_queue_query(aclk_query_t query) -{ - now_monotonic_high_precision_timeval(&query->created_tv); - query->created = now_realtime_usec(); - - ACLK_QUEUE_LOCK; - if (aclk_query_queue.block_push) { - ACLK_QUEUE_UNLOCK; - if(service_running(SERVICE_ACLK | ABILITY_DATA_QUERIES)) - netdata_log_error("Query Queue is blocked from accepting new requests. This is normally the case when ACLK prepares to shutdown."); - aclk_query_free(query); - return 1; - } - DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(aclk_query_queue.head, query, prev, next); - ACLK_QUEUE_UNLOCK; - return 0; - -} - -int aclk_queue_query(aclk_query_t query) -{ - int ret = _aclk_queue_query(query); - if (!ret) { - QUERY_THREAD_WAKEUP; - if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.queries_queued++; - ACLK_STATS_UNLOCK; - } - } - return ret; -} - -aclk_query_t aclk_queue_pop(void) -{ - aclk_query_t ret; - - ACLK_QUEUE_LOCK; - if (aclk_query_queue.block_push) { - ACLK_QUEUE_UNLOCK; - if(service_running(SERVICE_ACLK | ABILITY_DATA_QUERIES)) - netdata_log_error("POP Query Queue is blocked from accepting new requests. This is normally the case when ACLK prepares to shutdown."); - return NULL; - } - - ret = aclk_query_queue.head; - if (!ret) { - ACLK_QUEUE_UNLOCK; - return ret; - } - - DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(aclk_query_queue.head, ret, prev, next); - ACLK_QUEUE_UNLOCK; - - ret->next = NULL; - return ret; -} - -void aclk_queue_flush(void) -{ - aclk_query_t query = aclk_queue_pop(); - while (query) { - aclk_query_free(query); - query = aclk_queue_pop(); - } -} aclk_query_t aclk_query_new(aclk_query_type_t type) { @@ -93,14 +12,14 @@ aclk_query_t aclk_query_new(aclk_query_type_t type) void aclk_query_free(aclk_query_t query) { switch (query->type) { - case HTTP_API_V2: - freez(query->data.http_api_v2.payload); - if (query->data.http_api_v2.query != query->dedup_id) - freez(query->data.http_api_v2.query); - break; - - default: - break; + case HTTP_API_V2: + freez(query->data.http_api_v2.payload); + if (query->data.http_api_v2.query != query->dedup_id) + freez(query->data.http_api_v2.query); + break; + + default: + break; } freez(query->dedup_id); @@ -108,17 +27,3 @@ void aclk_query_free(aclk_query_t query) freez(query->msg_id); freez(query); } - -void aclk_queue_lock(void) -{ - ACLK_QUEUE_LOCK; - aclk_query_queue.block_push = 1; - ACLK_QUEUE_UNLOCK; -} - -void aclk_queue_unlock(void) -{ - ACLK_QUEUE_LOCK; - aclk_query_queue.block_push = 0; - ACLK_QUEUE_UNLOCK; -} diff --git a/src/aclk/aclk_query_queue.h b/src/aclk/aclk_query_queue.h index 4a4a36a3f..8b7e3a10c 100644 --- a/src/aclk/aclk_query_queue.h +++ b/src/aclk/aclk_query_queue.h @@ -14,12 +14,7 @@ typedef enum { HTTP_API_V2, REGISTER_NODE, NODE_STATE_UPDATE, - CHART_DIMS_UPDATE, - CHART_CONFIG_UPDATED, - CHART_RESET, - RETENTION_UPDATED, UPDATE_NODE_INFO, - ALARM_PROVIDE_CHECKPOINT, ALARM_PROVIDE_CFG, ALARM_SNAPSHOT, UPDATE_NODE_COLLECTORS, @@ -32,7 +27,7 @@ struct aclk_query_http_api_v2 { char *query; }; -struct aclk_bin_payload { +struct aclk_bin_payload { char *payload; size_t size; enum aclk_topics topic; @@ -55,7 +50,6 @@ struct aclk_query { struct timeval created_tv; usec_t created; int timeout; - aclk_query_t prev, next; // TODO maybe remove? int version; @@ -68,20 +62,16 @@ struct aclk_query { aclk_query_t aclk_query_new(aclk_query_type_t type); void aclk_query_free(aclk_query_t query); -int aclk_queue_query(aclk_query_t query); -aclk_query_t aclk_queue_pop(void); -void aclk_queue_flush(void); +void aclk_execute_query(aclk_query_t query); -void aclk_queue_lock(void); -void aclk_queue_unlock(void); - -#define QUEUE_IF_PAYLOAD_PRESENT(query) do { \ - if (likely(query->data.bin_payload.payload)) { \ - aclk_queue_query(query); \ - } else { \ - nd_log(NDLS_DAEMON, NDLP_ERR, "Failed to generate payload"); \ - aclk_query_free(query); \ - } \ -} while(0) +#define QUEUE_IF_PAYLOAD_PRESENT(query) \ + do { \ + if (likely((query)->data.bin_payload.payload)) { \ + aclk_execute_query(query); \ + } else { \ + nd_log(NDLS_DAEMON, NDLP_ERR, "Failed to generate payload"); \ + aclk_query_free(query); \ + } \ + } while (0) #endif /* NETDATA_ACLK_QUERY_QUEUE_H */ diff --git a/src/aclk/aclk_rrdhost_state.h b/src/aclk/aclk_rrdhost_state.h deleted file mode 100644 index 5c8a2ddc9..000000000 --- a/src/aclk/aclk_rrdhost_state.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef ACLK_RRDHOST_STATE_H -#define ACLK_RRDHOST_STATE_H - -#include "libnetdata/libnetdata.h" - -typedef struct aclk_rrdhost_state { - char *claimed_id; // Claimed ID if host has one otherwise NULL - char *prev_claimed_id; // Claimed ID if changed (reclaimed) during runtime -} aclk_rrdhost_state; - -#endif /* ACLK_RRDHOST_STATE_H */ diff --git a/src/aclk/aclk_rx_msgs.c b/src/aclk/aclk_rx_msgs.c index 8db8e3f1e..36bd3599d 100644 --- a/src/aclk/aclk_rx_msgs.c +++ b/src/aclk/aclk_rx_msgs.c @@ -2,7 +2,6 @@ #include "aclk_rx_msgs.h" -#include "aclk_stats.h" #include "aclk_query_queue.h" #include "aclk.h" #include "aclk_capas.h" @@ -165,7 +164,7 @@ static int aclk_handle_cloud_http_request_v2(struct aclk_request *cloud_to_agent // it would be strange to get URL from `dedup_id` query->data.http_api_v2.query = query->dedup_id; query->msg_id = cloud_to_agent->msg_id; - aclk_queue_query(query); + aclk_execute_query(query); return 0; error: @@ -268,7 +267,7 @@ int create_node_instance_result(const char *msg, size_t msg_len) freez(res.node_id); return 1; } - update_node_id(&host_id, &node_id); + sql_update_node_id(&host_id, &node_id); aclk_query_t query = aclk_query_new(NODE_STATE_UPDATE); node_instance_connection_t node_state_update = { @@ -292,17 +291,16 @@ int create_node_instance_result(const char *msg, size_t msg_len) node_state_update.capabilities = aclk_get_node_instance_capas(host); } - rrdhost_aclk_state_lock(localhost); - node_state_update.claim_id = localhost->aclk_state.claimed_id; + CLAIM_ID claim_id = claim_id_get(); + node_state_update.claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL; query->data.bin_payload.payload = generate_node_instance_connection(&query->data.bin_payload.size, &node_state_update); - rrdhost_aclk_state_unlock(localhost); freez((void *)node_state_update.capabilities); query->data.bin_payload.msg_name = "UpdateNodeInstanceConnection"; query->data.bin_payload.topic = ACLK_TOPICID_NODE_CONN; - aclk_queue_query(query); + aclk_execute_query(query); freez(res.node_id); freez(res.machine_guid); return 0; @@ -409,7 +407,7 @@ int handle_disconnect_req(const char *msg, size_t msg_len) "Cloud asks not to reconnect for %u seconds. We shall honor that request", (unsigned int)cmd->reconnect_after_s); } - disconnect_req = 1; + disconnect_req = ACLK_CLOUD_DISCONNECT; freez(cmd->error_description); freez(cmd); return 0; @@ -503,12 +501,7 @@ new_cloud_rx_msg_t *find_rx_handler_by_hash(simple_hash_t hash) return NULL; } -const char *rx_handler_get_name(size_t i) -{ - return rx_msgs[i].name; -} - -unsigned int aclk_init_rx_msg_handlers(void) +void aclk_init_rx_msg_handlers(void) { int i; for (i = 0; rx_msgs[i].fnc; i++) { @@ -521,29 +514,17 @@ unsigned int aclk_init_rx_msg_handlers(void) } rx_msgs[i].name_hash = hash; } - return i; } void aclk_handle_new_cloud_msg(const char *message_type, const char *msg, size_t msg_len, const char *topic __maybe_unused) { - if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.cloud_req_recvd++; - ACLK_STATS_UNLOCK; - } new_cloud_rx_msg_t *msg_descriptor = find_rx_handler_by_hash(simple_hash(message_type)); netdata_log_debug(D_ACLK, "Got message named '%s' from cloud", message_type); if (unlikely(!msg_descriptor)) { netdata_log_error("Do not know how to handle message of type '%s'. Ignoring", message_type); - if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.cloud_req_err++; - ACLK_STATS_UNLOCK; - } return; } - if (aclklog_enabled) { if (!strncmp(message_type, "cmd", strlen("cmd"))) { log_aclk_message_bin(msg, msg_len, 0, topic, msg_descriptor->name); @@ -554,18 +535,8 @@ void aclk_handle_new_cloud_msg(const char *message_type, const char *msg, size_t } } - if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_proto_rx_msgs_sample[msg_descriptor-rx_msgs]++; - ACLK_STATS_UNLOCK; - } if (msg_descriptor->fnc(msg, msg_len)) { netdata_log_error("Error processing message of type '%s'", message_type); - if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.cloud_req_err++; - ACLK_STATS_UNLOCK; - } return; } } diff --git a/src/aclk/aclk_rx_msgs.h b/src/aclk/aclk_rx_msgs.h index 61921faec..ae5dc18b8 100644 --- a/src/aclk/aclk_rx_msgs.h +++ b/src/aclk/aclk_rx_msgs.h @@ -1,17 +1,12 @@ - - // SPDX-License-Identifier: GPL-3.0-or-later #ifndef ACLK_RX_MSGS_H #define ACLK_RX_MSGS_H -#include "daemon/common.h" #include "libnetdata/libnetdata.h" int aclk_handle_cloud_cmd_message(char *payload); - -const char *rx_handler_get_name(size_t i); -unsigned int aclk_init_rx_msg_handlers(void); +void aclk_init_rx_msg_handlers(void); void aclk_handle_new_cloud_msg(const char *message_type, const char *msg, size_t msg_len, const char *topic); #endif /* ACLK_RX_MSGS_H */ diff --git a/src/aclk/aclk_stats.c b/src/aclk/aclk_stats.c deleted file mode 100644 index 47a48c366..000000000 --- a/src/aclk/aclk_stats.c +++ /dev/null @@ -1,483 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef MQTT_WSS_CPUSTATS -#define MQTT_WSS_CPUSTATS -#endif - -#include "aclk_stats.h" - -#include "aclk_query.h" - -netdata_mutex_t aclk_stats_mutex = NETDATA_MUTEX_INITIALIZER; - -struct { - int query_thread_count; - unsigned int proto_hdl_cnt; - uint32_t *aclk_proto_rx_msgs_sample; - RRDDIM **rx_msg_dims; -} aclk_stats_cfg; // there is only 1 stats thread at a time - -// data ACLK stats need per query thread -struct aclk_qt_data { - RRDDIM *dim; -} *aclk_qt_data = NULL; - -uint32_t *aclk_queries_per_thread = NULL; -uint32_t *aclk_queries_per_thread_sample = NULL; -uint32_t *aclk_proto_rx_msgs_sample = NULL; - -struct aclk_metrics aclk_metrics = { - .online = 0, -}; - -struct aclk_metrics_per_sample aclk_metrics_per_sample; - -static void aclk_stats_collect(struct aclk_metrics_per_sample *per_sample, struct aclk_metrics *permanent) -{ - static RRDSET *st_aclkstats = NULL; - static RRDDIM *rd_online_status = NULL; - - if (unlikely(!st_aclkstats)) { - st_aclkstats = rrdset_create_localhost( - "netdata", "aclk_status", NULL, "aclk", NULL, "ACLK/Cloud connection status", - "connected", "netdata", "stats", 200000, localhost->rrd_update_every, RRDSET_TYPE_LINE); - - rd_online_status = rrddim_add(st_aclkstats, "online", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - - rrddim_set_by_pointer(st_aclkstats, rd_online_status, per_sample->offline_during_sample ? 0 : permanent->online); - - rrdset_done(st_aclkstats); -} - -static void aclk_stats_query_queue(struct aclk_metrics_per_sample *per_sample) -{ - static RRDSET *st_query_thread = NULL; - static RRDDIM *rd_queued = NULL; - static RRDDIM *rd_dispatched = NULL; - - if (unlikely(!st_query_thread)) { - st_query_thread = rrdset_create_localhost( - "netdata", "aclk_query_per_second", NULL, "aclk", NULL, "ACLK Queries per second", "queries/s", - "netdata", "stats", 200001, localhost->rrd_update_every, RRDSET_TYPE_AREA); - - rd_queued = rrddim_add(st_query_thread, "added", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - rd_dispatched = rrddim_add(st_query_thread, "dispatched", NULL, -1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - } - - rrddim_set_by_pointer(st_query_thread, rd_queued, per_sample->queries_queued); - rrddim_set_by_pointer(st_query_thread, rd_dispatched, per_sample->queries_dispatched); - - rrdset_done(st_query_thread); -} - -#ifdef NETDATA_INTERNAL_CHECKS -static void aclk_stats_latency(struct aclk_metrics_per_sample *per_sample) -{ - static RRDSET *st = NULL; - static RRDDIM *rd_avg = NULL; - static RRDDIM *rd_max = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "netdata", "aclk_latency_mqtt", NULL, "aclk", NULL, "ACLK Message Publish Latency", "ms", - "netdata", "stats", 200002, localhost->rrd_update_every, RRDSET_TYPE_LINE); - - rd_avg = rrddim_add(st, "avg", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_max = rrddim_add(st, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - - if(per_sample->latency_count) - rrddim_set_by_pointer(st, rd_avg, roundf((float)per_sample->latency_total / per_sample->latency_count)); - else - rrddim_set_by_pointer(st, rd_avg, 0); - - rrddim_set_by_pointer(st, rd_max, per_sample->latency_max); - - rrdset_done(st); -} -#endif - -static void aclk_stats_cloud_req(struct aclk_metrics_per_sample *per_sample) -{ - static RRDSET *st = NULL; - static RRDDIM *rd_rq_rcvd = NULL; - static RRDDIM *rd_rq_err = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "netdata", "aclk_cloud_req", NULL, "aclk", NULL, "Requests received from cloud", "req/s", - "netdata", "stats", 200005, localhost->rrd_update_every, RRDSET_TYPE_STACKED); - - rd_rq_rcvd = rrddim_add(st, "received", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - rd_rq_err = rrddim_add(st, "malformed", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - } - - rrddim_set_by_pointer(st, rd_rq_rcvd, per_sample->cloud_req_recvd - per_sample->cloud_req_err); - rrddim_set_by_pointer(st, rd_rq_err, per_sample->cloud_req_err); - - rrdset_done(st); -} - -static void aclk_stats_cloud_req_type(struct aclk_metrics_per_sample *per_sample) -{ - static RRDSET *st = NULL; - static RRDDIM *dims[ACLK_QUERY_TYPE_COUNT]; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "netdata", "aclk_processed_query_type", NULL, "aclk", NULL, "Query thread commands processed by their type", "cmd/s", - "netdata", "stats", 200006, localhost->rrd_update_every, RRDSET_TYPE_STACKED); - - for (int i = 0; i < ACLK_QUERY_TYPE_COUNT; i++) - dims[i] = rrddim_add(st, aclk_query_get_name(i, 1), NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - - } - - for (int i = 0; i < ACLK_QUERY_TYPE_COUNT; i++) - rrddim_set_by_pointer(st, dims[i], per_sample->queries_per_type[i]); - - rrdset_done(st); -} - -static char *cloud_req_http_type_names[ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT] = { - "other", - "info", - "data", - "alarms", - "alarm_log", - "chart", - "charts", - "function", - "functions" - // if you change then update `ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT`. -}; - -int aclk_cloud_req_http_type_to_idx(const char *name) -{ - for (int i = 1; i < ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT; i++) - if (!strcmp(cloud_req_http_type_names[i], name)) - return i; - return 0; -} - -static void aclk_stats_cloud_req_http_type(struct aclk_metrics_per_sample *per_sample) -{ - static RRDSET *st = NULL; - static RRDDIM *rd_rq_types[ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT]; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "netdata", "aclk_cloud_req_http_type", NULL, "aclk", NULL, "Requests received from cloud via HTTP by their type", "req/s", - "netdata", "stats", 200007, localhost->rrd_update_every, RRDSET_TYPE_STACKED); - - for (int i = 0; i < ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT; i++) - rd_rq_types[i] = rrddim_add(st, cloud_req_http_type_names[i], NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - } - - for (int i = 0; i < ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT; i++) - rrddim_set_by_pointer(st, rd_rq_types[i], per_sample->cloud_req_http_by_type[i]); - - rrdset_done(st); -} - -#define MAX_DIM_NAME 22 -static void aclk_stats_query_threads(uint32_t *queries_per_thread) -{ - static RRDSET *st = NULL; - - char dim_name[MAX_DIM_NAME]; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "netdata", "aclk_query_threads", NULL, "aclk", NULL, "Queries Processed Per Thread", "req/s", - "netdata", "stats", 200009, localhost->rrd_update_every, RRDSET_TYPE_STACKED); - - for (int i = 0; i < aclk_stats_cfg.query_thread_count; i++) { - if (snprintfz(dim_name, MAX_DIM_NAME, "Query %d", i) < 0) - netdata_log_error("snprintf encoding error"); - aclk_qt_data[i].dim = rrddim_add(st, dim_name, NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - } - } - - for (int i = 0; i < aclk_stats_cfg.query_thread_count; i++) { - rrddim_set_by_pointer(st, aclk_qt_data[i].dim, queries_per_thread[i]); - } - - rrdset_done(st); -} - -static void aclk_stats_query_time(struct aclk_metrics_per_sample *per_sample) -{ - static RRDSET *st = NULL; - static RRDDIM *rd_rq_avg = NULL; - static RRDDIM *rd_rq_max = NULL; - static RRDDIM *rd_rq_total = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "netdata", "aclk_query_time", NULL, "aclk", NULL, "Time it took to process cloud requested DB queries", "us", - "netdata", "stats", 200008, localhost->rrd_update_every, RRDSET_TYPE_LINE); - - rd_rq_avg = rrddim_add(st, "avg", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - rd_rq_max = rrddim_add(st, "max", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - rd_rq_total = rrddim_add(st, "total", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - } - - if(per_sample->cloud_q_process_count) - rrddim_set_by_pointer(st, rd_rq_avg, roundf((float)per_sample->cloud_q_process_total / per_sample->cloud_q_process_count)); - else - rrddim_set_by_pointer(st, rd_rq_avg, 0); - rrddim_set_by_pointer(st, rd_rq_max, per_sample->cloud_q_process_max); - rrddim_set_by_pointer(st, rd_rq_total, per_sample->cloud_q_process_total); - - rrdset_done(st); -} - -const char *rx_handler_get_name(size_t i); -static void aclk_stats_newproto_rx(uint32_t *rx_msgs_sample) -{ - static RRDSET *st = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "netdata", "aclk_protobuf_rx_types", NULL, "aclk", NULL, "Received new cloud architecture messages by their type.", "msg/s", - "netdata", "stats", 200010, localhost->rrd_update_every, RRDSET_TYPE_STACKED); - - for (unsigned int i = 0; i < aclk_stats_cfg.proto_hdl_cnt; i++) { - aclk_stats_cfg.rx_msg_dims[i] = rrddim_add(st, rx_handler_get_name(i), NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - } - } - - for (unsigned int i = 0; i < aclk_stats_cfg.proto_hdl_cnt; i++) - rrddim_set_by_pointer(st, aclk_stats_cfg.rx_msg_dims[i], rx_msgs_sample[i]); - - rrdset_done(st); -} - -static void aclk_stats_mqtt_wss(struct mqtt_wss_stats *stats) -{ - static RRDSET *st = NULL; - static RRDDIM *rd_sent = NULL; - static RRDDIM *rd_recvd = NULL; - static uint64_t sent = 0; - static uint64_t recvd = 0; - - static RRDSET *st_txbuf_perc = NULL; - static RRDDIM *rd_txbuf_perc = NULL; - - static RRDSET *st_txbuf = NULL; - static RRDDIM *rd_tx_buffer_usable = NULL; - static RRDDIM *rd_tx_buffer_reclaimable = NULL; - static RRDDIM *rd_tx_buffer_used = NULL; - static RRDDIM *rd_tx_buffer_free = NULL; - static RRDDIM *rd_tx_buffer_size = NULL; - - static RRDSET *st_timing = NULL; - static RRDDIM *rd_keepalive = NULL; - static RRDDIM *rd_read_socket = NULL; - static RRDDIM *rd_write_socket = NULL; - static RRDDIM *rd_process_websocket = NULL; - static RRDDIM *rd_process_mqtt = NULL; - - sent += stats->bytes_tx; - recvd += stats->bytes_rx; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "netdata", "aclk_openssl_bytes", NULL, "aclk", NULL, "Received and Sent bytes.", "B/s", - "netdata", "stats", 200011, localhost->rrd_update_every, RRDSET_TYPE_STACKED); - - rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_recvd = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - if (unlikely(!st_txbuf_perc)) { - st_txbuf_perc = rrdset_create_localhost( - "netdata", "aclk_mqtt_tx_perc", NULL, "aclk", NULL, "Actively used percentage of MQTT Tx Buffer,", "%", - "netdata", "stats", 200012, localhost->rrd_update_every, RRDSET_TYPE_LINE); - - rd_txbuf_perc = rrddim_add(st_txbuf_perc, "used", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); - } - - if (unlikely(!st_txbuf)) { - st_txbuf = rrdset_create_localhost( - "netdata", "aclk_mqtt_tx_queue", NULL, "aclk", NULL, "State of transmit MQTT queue.", "B", - "netdata", "stats", 200013, localhost->rrd_update_every, RRDSET_TYPE_LINE); - - rd_tx_buffer_usable = rrddim_add(st_txbuf, "usable", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_tx_buffer_reclaimable = rrddim_add(st_txbuf, "reclaimable", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_tx_buffer_used = rrddim_add(st_txbuf, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_tx_buffer_free = rrddim_add(st_txbuf, "free", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_tx_buffer_size = rrddim_add(st_txbuf, "size", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - - if (unlikely(!st_timing)) { - st_timing = rrdset_create_localhost( - "netdata", "aclk_mqtt_wss_time", NULL, "aclk", NULL, "Time spent handling MQTT, WSS, SSL and network communication.", "us", - "netdata", "stats", 200014, localhost->rrd_update_every, RRDSET_TYPE_STACKED); - - rd_keepalive = rrddim_add(st_timing, "keep-alive", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_read_socket = rrddim_add(st_timing, "socket_read_ssl", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_write_socket = rrddim_add(st_timing, "socket_write_ssl", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_process_websocket = rrddim_add(st_timing, "process_websocket", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_process_mqtt = rrddim_add(st_timing, "process_mqtt", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - - rrddim_set_by_pointer(st, rd_sent, sent); - rrddim_set_by_pointer(st, rd_recvd, recvd); - - float usage = ((float)stats->mqtt.tx_buffer_free + stats->mqtt.tx_buffer_reclaimable) / stats->mqtt.tx_buffer_size; - usage = (1 - usage) * 10000; - rrddim_set_by_pointer(st_txbuf_perc, rd_txbuf_perc, usage); - - rrddim_set_by_pointer(st_txbuf, rd_tx_buffer_usable, stats->mqtt.tx_buffer_reclaimable + stats->mqtt.tx_buffer_free); - rrddim_set_by_pointer(st_txbuf, rd_tx_buffer_reclaimable, stats->mqtt.tx_buffer_reclaimable); - rrddim_set_by_pointer(st_txbuf, rd_tx_buffer_used, stats->mqtt.tx_buffer_used); - rrddim_set_by_pointer(st_txbuf, rd_tx_buffer_free, stats->mqtt.tx_buffer_free); - rrddim_set_by_pointer(st_txbuf, rd_tx_buffer_size, stats->mqtt.tx_buffer_size); - - rrddim_set_by_pointer(st_timing, rd_keepalive, stats->time_keepalive); - rrddim_set_by_pointer(st_timing, rd_read_socket, stats->time_read_socket); - rrddim_set_by_pointer(st_timing, rd_write_socket, stats->time_write_socket); - rrddim_set_by_pointer(st_timing, rd_process_websocket, stats->time_process_websocket); - rrddim_set_by_pointer(st_timing, rd_process_mqtt, stats->time_process_mqtt); - - rrdset_done(st); - rrdset_done(st_txbuf_perc); - rrdset_done(st_txbuf); - rrdset_done(st_timing); -} - -void aclk_stats_thread_prepare(int query_thread_count, unsigned int proto_hdl_cnt) -{ - aclk_qt_data = callocz(query_thread_count, sizeof(struct aclk_qt_data)); - aclk_queries_per_thread = callocz(query_thread_count, sizeof(uint32_t)); - aclk_queries_per_thread_sample = callocz(query_thread_count, sizeof(uint32_t)); - - memset(&aclk_metrics_per_sample, 0, sizeof(struct aclk_metrics_per_sample)); - - aclk_stats_cfg.proto_hdl_cnt = proto_hdl_cnt; - aclk_stats_cfg.aclk_proto_rx_msgs_sample = callocz(proto_hdl_cnt, sizeof(*aclk_proto_rx_msgs_sample)); - aclk_proto_rx_msgs_sample = callocz(proto_hdl_cnt, sizeof(*aclk_proto_rx_msgs_sample)); - aclk_stats_cfg.rx_msg_dims = callocz(proto_hdl_cnt, sizeof(RRDDIM*)); -} - -void aclk_stats_thread_cleanup() -{ - freez(aclk_stats_cfg.rx_msg_dims); - freez(aclk_proto_rx_msgs_sample); - freez(aclk_stats_cfg.aclk_proto_rx_msgs_sample); - freez(aclk_qt_data); - freez(aclk_queries_per_thread); - freez(aclk_queries_per_thread_sample); -} - -void *aclk_stats_main_thread(void *ptr) -{ - struct aclk_stats_thread *args = ptr; - - aclk_stats_cfg.query_thread_count = args->query_thread_count; - - heartbeat_t hb; - heartbeat_init(&hb); - usec_t step_ut = localhost->rrd_update_every * USEC_PER_SEC; - - struct aclk_metrics_per_sample per_sample; - struct aclk_metrics permanent; - - while (service_running(SERVICE_ACLK | SERVICE_COLLECTORS)) { - - // ------------------------------------------------------------------------ - // Wait for the next iteration point. - - heartbeat_next(&hb, step_ut); - - if (!service_running(SERVICE_ACLK | SERVICE_COLLECTORS)) break; - - ACLK_STATS_LOCK; - // to not hold lock longer than necessary, especially not to hold it - // during database rrd* operations - memcpy(&per_sample, &aclk_metrics_per_sample, sizeof(struct aclk_metrics_per_sample)); - - memcpy(aclk_stats_cfg.aclk_proto_rx_msgs_sample, aclk_proto_rx_msgs_sample, sizeof(*aclk_proto_rx_msgs_sample) * aclk_stats_cfg.proto_hdl_cnt); - memset(aclk_proto_rx_msgs_sample, 0, sizeof(*aclk_proto_rx_msgs_sample) * aclk_stats_cfg.proto_hdl_cnt); - - memcpy(&permanent, &aclk_metrics, sizeof(struct aclk_metrics)); - memset(&aclk_metrics_per_sample, 0, sizeof(struct aclk_metrics_per_sample)); - - memcpy(aclk_queries_per_thread_sample, aclk_queries_per_thread, sizeof(uint32_t) * aclk_stats_cfg.query_thread_count); - memset(aclk_queries_per_thread, 0, sizeof(uint32_t) * aclk_stats_cfg.query_thread_count); - ACLK_STATS_UNLOCK; - - aclk_stats_collect(&per_sample, &permanent); - aclk_stats_query_queue(&per_sample); -#ifdef NETDATA_INTERNAL_CHECKS - aclk_stats_latency(&per_sample); -#endif - - aclk_stats_cloud_req(&per_sample); - aclk_stats_cloud_req_type(&per_sample); - aclk_stats_cloud_req_http_type(&per_sample); - - aclk_stats_query_threads(aclk_queries_per_thread_sample); - - aclk_stats_query_time(&per_sample); - - struct mqtt_wss_stats mqtt_wss_stats = mqtt_wss_get_stats(args->client); - aclk_stats_mqtt_wss(&mqtt_wss_stats); - - aclk_stats_newproto_rx(aclk_stats_cfg.aclk_proto_rx_msgs_sample); - } - - return 0; -} - -void aclk_stats_upd_online(int online) { - if(!aclk_stats_enabled) - return; - - ACLK_STATS_LOCK; - aclk_metrics.online = online; - - if(!online) - aclk_metrics_per_sample.offline_during_sample = 1; - ACLK_STATS_UNLOCK; -} - -#ifdef NETDATA_INTERNAL_CHECKS -static usec_t pub_time[UINT16_MAX + 1] = {0}; -void aclk_stats_msg_published(uint16_t id) -{ - ACLK_STATS_LOCK; - pub_time[id] = now_boottime_usec(); - ACLK_STATS_UNLOCK; -} - -void aclk_stats_msg_puback(uint16_t id) -{ - ACLK_STATS_LOCK; - usec_t t; - - if (!aclk_stats_enabled) { - ACLK_STATS_UNLOCK; - return; - } - - if (unlikely(!pub_time[id])) { - ACLK_STATS_UNLOCK; - netdata_log_error("Received PUBACK for unknown message?!"); - return; - } - - t = now_boottime_usec() - pub_time[id]; - t /= USEC_PER_MS; - pub_time[id] = 0; - if (aclk_metrics_per_sample.latency_max < t) - aclk_metrics_per_sample.latency_max = t; - - aclk_metrics_per_sample.latency_total += t; - aclk_metrics_per_sample.latency_count++; - ACLK_STATS_UNLOCK; -} -#endif /* NETDATA_INTERNAL_CHECKS */ diff --git a/src/aclk/aclk_stats.h b/src/aclk/aclk_stats.h deleted file mode 100644 index e13269557..000000000 --- a/src/aclk/aclk_stats.h +++ /dev/null @@ -1,77 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_ACLK_STATS_H -#define NETDATA_ACLK_STATS_H - -#include "daemon/common.h" -#include "libnetdata/libnetdata.h" -#include "aclk_query_queue.h" -#include "mqtt_websockets/mqtt_wss_client.h" - -extern netdata_mutex_t aclk_stats_mutex; - -#define ACLK_STATS_LOCK netdata_mutex_lock(&aclk_stats_mutex) -#define ACLK_STATS_UNLOCK netdata_mutex_unlock(&aclk_stats_mutex) - -// if you change update `cloud_req_http_type_names`. -#define ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT 9 - -int aclk_cloud_req_http_type_to_idx(const char *name); - -struct aclk_stats_thread { - ND_THREAD *thread; - int query_thread_count; - mqtt_wss_client client; -}; - -// preserve between samples -struct aclk_metrics { - volatile uint8_t online; -}; - -// reset to 0 on every sample -extern struct aclk_metrics_per_sample { - /* in the unlikely event of ACLK disconnecting - and reconnecting under 1 sampling rate - we want to make sure we record the disconnection - despite it being then seemingly longer in graph */ - volatile uint8_t offline_during_sample; - - volatile uint32_t queries_queued; - volatile uint32_t queries_dispatched; - -#ifdef NETDATA_INTERNAL_CHECKS - volatile uint32_t latency_max; - volatile uint32_t latency_total; - volatile uint32_t latency_count; -#endif - - volatile uint32_t cloud_req_recvd; - volatile uint32_t cloud_req_err; - - // query types. - volatile uint32_t queries_per_type[ACLK_QUERY_TYPE_COUNT]; - - // HTTP-specific request types. - volatile uint32_t cloud_req_http_by_type[ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT]; - - volatile uint32_t cloud_q_process_total; - volatile uint32_t cloud_q_process_count; - volatile uint32_t cloud_q_process_max; -} aclk_metrics_per_sample; - -extern uint32_t *aclk_proto_rx_msgs_sample; - -extern uint32_t *aclk_queries_per_thread; - -void *aclk_stats_main_thread(void *ptr); -void aclk_stats_thread_prepare(int query_thread_count, unsigned int proto_hdl_cnt); -void aclk_stats_thread_cleanup(); -void aclk_stats_upd_online(int online); - -#ifdef NETDATA_INTERNAL_CHECKS -void aclk_stats_msg_published(uint16_t id); -void aclk_stats_msg_puback(uint16_t id); -#endif /* NETDATA_INTERNAL_CHECKS */ - -#endif /* NETDATA_ACLK_STATS_H */ diff --git a/src/aclk/aclk_tx_msgs.c b/src/aclk/aclk_tx_msgs.c index c1ed68052..2d256279e 100644 --- a/src/aclk/aclk_tx_msgs.c +++ b/src/aclk/aclk_tx_msgs.c @@ -1,9 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "aclk_tx_msgs.h" -#include "daemon/common.h" #include "aclk_util.h" -#include "aclk_stats.h" #include "aclk.h" #include "aclk_capas.h" @@ -13,9 +11,6 @@ #pragma region aclk_tx_msgs helper functions #endif -// version for aclk legacy (old cloud arch) -#define ACLK_VERSION 2 - static void freez_aclk_publish5a(void *ptr) { freez(ptr); } @@ -23,6 +18,8 @@ static void freez_aclk_publish5b(void *ptr) { freez(ptr); } +#define ACLK_HEADER_VERSION (2) + uint16_t aclk_send_bin_message_subtopic_pid(mqtt_wss_client client, char *msg, size_t msg_len, enum aclk_topics subtopic, const char *msgname) { #ifndef ACLK_LOG_CONVERSATION_DIR @@ -38,10 +35,6 @@ uint16_t aclk_send_bin_message_subtopic_pid(mqtt_wss_client client, char *msg, s mqtt_wss_publish5(client, (char*)topic, NULL, msg, &freez_aclk_publish5a, msg_len, MQTT_WSS_PUB_QOS1, &packet_id); -#ifdef NETDATA_INTERNAL_CHECKS - aclk_stats_msg_published(packet_id); -#endif - if (aclklog_enabled) { char *json = protomsg_to_json(msg, msg_len, msgname); log_aclk_message_bin(json, strlen(json), 1, topic, msgname); @@ -51,14 +44,13 @@ uint16_t aclk_send_bin_message_subtopic_pid(mqtt_wss_client client, char *msg, s return packet_id; } -#define TOPIC_MAX_LEN 512 #define V2_BIN_PAYLOAD_SEPARATOR "\x0D\x0A\x0D\x0A" -static int aclk_send_message_with_bin_payload(mqtt_wss_client client, json_object *msg, const char *topic, const void *payload, size_t payload_len) +static short aclk_send_message_with_bin_payload(mqtt_wss_client client, json_object *msg, const char *topic, const void *payload, size_t payload_len) { uint16_t packet_id; const char *str; char *full_msg = NULL; - int len; + size_t len; if (unlikely(!topic || topic[0] != '/')) { netdata_log_error("Full topic required!"); @@ -78,20 +70,16 @@ static int aclk_send_message_with_bin_payload(mqtt_wss_client client, json_objec json_object_put(msg); if (payload_len) { - memcpy(&full_msg[len], V2_BIN_PAYLOAD_SEPARATOR, strlen(V2_BIN_PAYLOAD_SEPARATOR)); + memcpy(&full_msg[len], V2_BIN_PAYLOAD_SEPARATOR, sizeof(V2_BIN_PAYLOAD_SEPARATOR) - 1); len += strlen(V2_BIN_PAYLOAD_SEPARATOR); memcpy(&full_msg[len], payload, payload_len); } int rc = mqtt_wss_publish5(client, (char*)topic, NULL, full_msg, &freez_aclk_publish5b, full_msg_len, MQTT_WSS_PUB_QOS1, &packet_id); - if (rc == MQTT_WSS_ERR_TOO_BIG_FOR_SERVER) + if (rc == MQTT_WSS_ERR_MSG_TOO_BIG) return HTTP_RESP_CONTENT_TOO_LONG; -#ifdef NETDATA_INTERNAL_CHECKS - aclk_stats_msg_published(packet_id); -#endif - return 0; } @@ -99,12 +87,14 @@ static int aclk_send_message_with_bin_payload(mqtt_wss_client client, json_objec * Creates universal header common for all ACLK messages. User gets ownership of json object created. * Usually this is freed by send function after message has been sent. */ -static struct json_object *create_hdr(const char *type, const char *msg_id, time_t ts_secs, usec_t ts_us, int version) +static struct json_object *create_hdr(const char *type, const char *msg_id) { nd_uuid_t uuid; - char uuid_str[36 + 1]; + char uuid_str[UUID_STR_LEN]; json_object *tmp; json_object *obj = json_object_new_object(); + time_t ts_secs; + usec_t ts_us; tmp = json_object_new_string(type); json_object_object_add(obj, "type", tmp); @@ -115,11 +105,9 @@ static struct json_object *create_hdr(const char *type, const char *msg_id, time msg_id = uuid_str; } - if (ts_secs == 0) { - ts_us = now_realtime_usec(); - ts_secs = ts_us / USEC_PER_SEC; - ts_us = ts_us % USEC_PER_SEC; - } + ts_us = now_realtime_usec(); + ts_secs = ts_us / USEC_PER_SEC; + ts_us = ts_us % USEC_PER_SEC; tmp = json_object_new_string(msg_id); json_object_object_add(obj, "msg-id", tmp); @@ -144,7 +132,7 @@ static struct json_object *create_hdr(const char *type, const char *msg_id, time tmp = json_object_new_int64(aclk_session_us); json_object_object_add(obj, "connect-offset-usec", tmp); - tmp = json_object_new_int(version); + tmp = json_object_new_int(ACLK_HEADER_VERSION); json_object_object_add(obj, "version", tmp); return obj; @@ -161,7 +149,7 @@ static struct json_object *create_hdr(const char *type, const char *msg_id, time void aclk_http_msg_v2_err(mqtt_wss_client client, const char *topic, const char *msg_id, int http_code, int ec, const char* emsg, const char *payload, size_t payload_len) { json_object *tmp, *msg; - msg = create_hdr("http", msg_id, 0, 0, 2); + msg = create_hdr("http", msg_id); tmp = json_object_new_int(http_code); json_object_object_add(msg, "http-code", tmp); @@ -176,11 +164,12 @@ void aclk_http_msg_v2_err(mqtt_wss_client client, const char *topic, const char } } -int aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg_id, usec_t t_exec, usec_t created, int http_code, const char *payload, size_t payload_len) +short aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg_id, usec_t t_exec, usec_t created, + short http_code, const char *payload, size_t payload_len) { json_object *tmp, *msg; - msg = create_hdr("http", msg_id, 0, 0, 2); + msg = create_hdr("http", msg_id); tmp = json_object_new_int64(t_exec); json_object_object_add(msg, "t-exec", tmp); @@ -191,7 +180,7 @@ int aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg_ tmp = json_object_new_int(http_code); json_object_object_add(msg, "http-code", tmp); - int rc = aclk_send_message_with_bin_payload(client, msg, topic, payload, payload_len); + short rc = aclk_send_message_with_bin_payload(client, msg, topic, payload, payload_len); switch (rc) { case HTTP_RESP_CONTENT_TOO_LONG: @@ -200,12 +189,11 @@ int aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg_ case HTTP_RESP_INTERNAL_SERVER_ERROR: aclk_http_msg_v2_err(client, topic, msg_id, rc, CLOUD_EC_FAIL_TOPIC, CLOUD_EMSG_FAIL_TOPIC, payload, payload_len); break; - case HTTP_RESP_GATEWAY_TIMEOUT: - case HTTP_RESP_SERVICE_UNAVAILABLE: - aclk_http_msg_v2_err(client, topic, msg_id, rc, CLOUD_EC_SND_TIMEOUT, CLOUD_EMSG_SND_TIMEOUT, payload, payload_len); + default: + rc = http_code; break; } - return rc ? rc : http_code; + return rc; } uint16_t aclk_send_agent_connection_update(mqtt_wss_client client, int reachable) { @@ -219,19 +207,19 @@ uint16_t aclk_send_agent_connection_update(mqtt_wss_client client, int reachable .capabilities = aclk_get_agent_capas() }; - rrdhost_aclk_state_lock(localhost); - if (unlikely(!localhost->aclk_state.claimed_id)) { + CLAIM_ID claim_id = claim_id_get(); + if (unlikely(!claim_id_is_set(claim_id))) { netdata_log_error("Internal error. Should not come here if not claimed"); - rrdhost_aclk_state_unlock(localhost); return 0; } - if (localhost->aclk_state.prev_claimed_id) - conn.claim_id = localhost->aclk_state.prev_claimed_id; + + CLAIM_ID previous_claim_id = claim_id_get_last_working(); + if (claim_id_is_set(previous_claim_id)) + conn.claim_id = previous_claim_id.str; else - conn.claim_id = localhost->aclk_state.claimed_id; + conn.claim_id = claim_id.str; char *msg = generate_update_agent_connection(&len, &conn); - rrdhost_aclk_state_unlock(localhost); if (!msg) { netdata_log_error("Error generating agent::v1::UpdateAgentConnection payload"); @@ -239,10 +227,9 @@ uint16_t aclk_send_agent_connection_update(mqtt_wss_client client, int reachable } pid = aclk_send_bin_message_subtopic_pid(client, msg, len, ACLK_TOPICID_AGENT_CONN, "UpdateAgentConnection"); - if (localhost->aclk_state.prev_claimed_id) { - freez(localhost->aclk_state.prev_claimed_id); - localhost->aclk_state.prev_claimed_id = NULL; - } + if (claim_id_is_set(previous_claim_id)) + claim_id_clear_previous_working(); + return pid; } @@ -254,16 +241,14 @@ char *aclk_generate_lwt(size_t *size) { .capabilities = NULL }; - rrdhost_aclk_state_lock(localhost); - if (unlikely(!localhost->aclk_state.claimed_id)) { + CLAIM_ID claim_id = claim_id_get(); + if(!claim_id_is_set(claim_id)) { netdata_log_error("Internal error. Should not come here if not claimed"); - rrdhost_aclk_state_unlock(localhost); return NULL; } - conn.claim_id = localhost->aclk_state.claimed_id; + conn.claim_id = claim_id.str; char *msg = generate_update_agent_connection(size, &conn); - rrdhost_aclk_state_unlock(localhost); if (!msg) netdata_log_error("Error generating agent::v1::UpdateAgentConnection payload for LWT"); diff --git a/src/aclk/aclk_tx_msgs.h b/src/aclk/aclk_tx_msgs.h index 86ed20c38..6b11996d1 100644 --- a/src/aclk/aclk_tx_msgs.h +++ b/src/aclk/aclk_tx_msgs.h @@ -4,7 +4,6 @@ #include #include "libnetdata/libnetdata.h" -#include "daemon/common.h" #include "mqtt_websockets/mqtt_wss_client.h" #include "schema-wrappers/schema_wrappers.h" #include "aclk_util.h" @@ -12,7 +11,8 @@ uint16_t aclk_send_bin_message_subtopic_pid(mqtt_wss_client client, char *msg, size_t msg_len, enum aclk_topics subtopic, const char *msgname); void aclk_http_msg_v2_err(mqtt_wss_client client, const char *topic, const char *msg_id, int http_code, int ec, const char* emsg, const char *payload, size_t payload_len); -int aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg_id, usec_t t_exec, usec_t created, int http_code, const char *payload, size_t payload_len); +short aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg_id, usec_t t_exec, usec_t created, + short http_code, const char *payload, size_t payload_len); uint16_t aclk_send_agent_connection_update(mqtt_wss_client client, int reachable); char *aclk_generate_lwt(size_t *size); diff --git a/src/aclk/aclk_util.c b/src/aclk/aclk_util.c index 3bf2e3f18..d01fa8f2c 100644 --- a/src/aclk/aclk_util.c +++ b/src/aclk/aclk_util.c @@ -2,8 +2,6 @@ #include "aclk_util.h" -#ifdef ENABLE_ACLK - #include "aclk_proxy.h" #include "daemon/common.h" @@ -12,8 +10,6 @@ usec_t aclk_session_newarch = 0; aclk_env_t *aclk_env = NULL; -int chart_batch_id; - aclk_encoding_type_t aclk_encoding_type_t_from_str(const char *str) { if (!strcmp(str, "json")) { return ACLK_ENC_JSON; @@ -186,20 +182,18 @@ static void topic_generate_final(struct aclk_topic *t) { if (!replace_tag) return; - rrdhost_aclk_state_lock(localhost); - if (unlikely(!localhost->aclk_state.claimed_id)) { + CLAIM_ID claim_id = claim_id_get(); + if (unlikely(!claim_id_is_set(claim_id))) { netdata_log_error("This should never be called if agent not claimed"); - rrdhost_aclk_state_unlock(localhost); return; } - t->topic = mallocz(strlen(t->topic_recvd) + 1 - strlen(CLAIM_ID_REPLACE_TAG) + strlen(localhost->aclk_state.claimed_id)); + t->topic = mallocz(strlen(t->topic_recvd) + 1 - strlen(CLAIM_ID_REPLACE_TAG) + strlen(claim_id.str)); memcpy(t->topic, t->topic_recvd, replace_tag - t->topic_recvd); dest = t->topic + (replace_tag - t->topic_recvd); - memcpy(dest, localhost->aclk_state.claimed_id, strlen(localhost->aclk_state.claimed_id)); - dest += strlen(localhost->aclk_state.claimed_id); - rrdhost_aclk_state_unlock(localhost); + memcpy(dest, claim_id.str, strlen(claim_id.str)); + dest += strlen(claim_id.str); replace_tag += strlen(CLAIM_ID_REPLACE_TAG); strcpy(dest, replace_tag); dest += strlen(replace_tag); @@ -315,7 +309,7 @@ const char *aclk_get_topic(enum aclk_topics topic) * having to resort to callbacks. */ -const char *aclk_topic_cache_iterate(aclk_topic_cache_iter_t *iter) +const char *aclk_topic_cache_iterate(size_t *iter) { if (!aclk_topic_cache) { netdata_log_error("Topic cache not initialized when %s was called.", __FUNCTION__); @@ -348,15 +342,13 @@ unsigned long int aclk_tbeb_delay(int reset, int base, unsigned long int min, un attempt++; - if (attempt == 0) { - srandom(time(NULL)); + if (attempt == 0) return 0; - } unsigned long int delay = pow(base, attempt - 1); delay *= MSEC_PER_SEC; - delay += (random() % (MAX(1000, delay/2))); + delay += (os_random32() % (MAX(1000, delay/2))); if (delay <= min * MSEC_PER_SEC) return min; @@ -440,45 +432,3 @@ void aclk_set_proxy(char **ohost, int *port, char **uname, char **pwd, enum mqtt freez(proxy); } -#endif /* ENABLE_ACLK */ - -#if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110 -static EVP_ENCODE_CTX *EVP_ENCODE_CTX_new(void) -{ - EVP_ENCODE_CTX *ctx = OPENSSL_malloc(sizeof(*ctx)); - - if (ctx != NULL) { - memset(ctx, 0, sizeof(*ctx)); - } - return ctx; -} -static void EVP_ENCODE_CTX_free(EVP_ENCODE_CTX *ctx) -{ - OPENSSL_free(ctx); - return; -} -#endif - -int base64_encode_helper(unsigned char *out, int *outl, const unsigned char *in, int in_len) -{ - int len; - unsigned char *str = out; - EVP_ENCODE_CTX *ctx = EVP_ENCODE_CTX_new(); - EVP_EncodeInit(ctx); - EVP_EncodeUpdate(ctx, str, outl, in, in_len); - str += *outl; - EVP_EncodeFinal(ctx, str, &len); - *outl += len; - - str = out; - while(*str) { - if (*str != 0x0D && *str != 0x0A) - *out++ = *str++; - else - str++; - } - *out = 0; - - EVP_ENCODE_CTX_free(ctx); - return 0; -} diff --git a/src/aclk/aclk_util.h b/src/aclk/aclk_util.h index 6c0239cc3..24e179964 100644 --- a/src/aclk/aclk_util.h +++ b/src/aclk/aclk_util.h @@ -3,8 +3,6 @@ #define ACLK_UTIL_H #include "libnetdata/libnetdata.h" - -#ifdef ENABLE_ACLK #include "mqtt_websockets/mqtt_wss_client.h" #define CLOUD_EC_MALFORMED_NODE_ID 1 @@ -95,15 +93,10 @@ enum aclk_topics { ACLK_TOPICID_CTXS_UPDATED = 20 }; -typedef size_t aclk_topic_cache_iter_t; -#define ACLK_TOPIC_CACHE_ITER_T_INITIALIZER (0) - const char *aclk_get_topic(enum aclk_topics topic); -int aclk_generate_topic_cache(struct json_object *json); +int aclk_generate_topic_cache(json_object *json); void free_topic_cache(void); -const char *aclk_topic_cache_iterate(aclk_topic_cache_iter_t *iter); -// TODO -// aclk_topics_reload //when claim id changes +const char *aclk_topic_cache_iterate(size_t *iter); #ifdef ACLK_LOG_CONVERSATION_DIR extern volatile int aclk_conversation_log_counter; @@ -114,8 +107,5 @@ unsigned long int aclk_tbeb_delay(int reset, int base, unsigned long int min, un #define aclk_tbeb_reset(x) aclk_tbeb_delay(1, 0, 0, 0) void aclk_set_proxy(char **ohost, int *port, char **uname, char **pwd, enum mqtt_wss_proxy_type *type); -#endif /* ENABLE_ACLK */ - -int base64_encode_helper(unsigned char *out, int *outl, const unsigned char *in, int in_len); #endif /* ACLK_UTIL_H */ diff --git a/src/aclk/helpers/mqtt_wss_pal.h b/src/aclk/helpers/mqtt_wss_pal.h deleted file mode 100644 index fe1aacf49..000000000 --- a/src/aclk/helpers/mqtt_wss_pal.h +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef MQTT_WSS_PAL_H -#define MQTT_WSS_PAL_H - -#include "libnetdata/libnetdata.h" - -#undef OPENSSL_VERSION_095 -#undef OPENSSL_VERSION_097 -#undef OPENSSL_VERSION_110 -#undef OPENSSL_VERSION_111 - -#endif /* MQTT_WSS_PAL_H */ diff --git a/src/aclk/helpers/ringbuffer_pal.h b/src/aclk/helpers/ringbuffer_pal.h deleted file mode 100644 index 2f7e1cb93..000000000 --- a/src/aclk/helpers/ringbuffer_pal.h +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef RINGBUFFER_PAL_H -#define RINGBUFFER_PAL_H - -#include "libnetdata/libnetdata.h" - -#define crbuf_malloc(...) mallocz(__VA_ARGS__) -#define crbuf_free(...) freez(__VA_ARGS__) - -#endif /* RINGBUFFER_PAL_H */ diff --git a/src/aclk/https_client.c b/src/aclk/https_client.c index 4a0362992..f144eaf15 100644 --- a/src/aclk/https_client.c +++ b/src/aclk/https_client.c @@ -105,7 +105,8 @@ static int parse_http_hdr(rbuf_t buf, http_parse_ctx *parse_ctx) int idx, idx_end; char buf_key[HTTP_HDR_BUFFER_SIZE]; char buf_val[HTTP_HDR_BUFFER_SIZE]; - char *ptr = buf_key; + char *ptr; + if (!rbuf_find_bytes(buf, HTTP_LINE_TERM, strlen(HTTP_LINE_TERM), &idx_end)) { netdata_log_error("CRLF expected"); return 1; @@ -555,7 +556,7 @@ static int handle_http_request(https_req_ctx_t *ctx) { // we remove those but during encoding we need that space in the buffer creds_base64_len += (1+(creds_base64_len/64)) * strlen("\n"); char *creds_base64 = callocz(1, creds_base64_len + 1); - base64_encode_helper((unsigned char*)creds_base64, &creds_base64_len, (unsigned char*)creds_plain, creds_plain_len); + (void) netdata_base64_encode((unsigned char *)creds_base64, (unsigned char *)creds_plain, creds_plain_len); buffer_sprintf(hdr, "Proxy-Authorization: Basic %s\x0D\x0A", creds_base64); freez(creds_plain); } @@ -583,7 +584,6 @@ static int handle_http_request(https_req_ctx_t *ctx) { if (ctx->parse_ctx.chunked_response) freez(ctx->parse_ctx.chunked_response); rc = 4; - goto err_exit; } err_exit: diff --git a/src/aclk/https_client.h b/src/aclk/https_client.h index cf14ffd87..b1445a5b7 100644 --- a/src/aclk/https_client.h +++ b/src/aclk/https_client.h @@ -5,9 +5,6 @@ #include "libnetdata/libnetdata.h" -#include "mqtt_websockets/c-rbuf/cringbuffer.h" -#include "mqtt_websockets/c_rhash/c_rhash.h" - typedef enum http_req_type { HTTP_REQ_GET = 0, HTTP_REQ_POST, diff --git a/src/aclk/mqtt_websockets/.github/workflows/run-tests.yaml b/src/aclk/mqtt_websockets/.github/workflows/run-tests.yaml deleted file mode 100644 index da5dde821..000000000 --- a/src/aclk/mqtt_websockets/.github/workflows/run-tests.yaml +++ /dev/null @@ -1,14 +0,0 @@ -name: run-tests -on: - push: - schedule: - - cron: '5 3 * * 0' - pull_request: -jobs: - run-tests: - runs-on: ubuntu-latest - steps: - - name: Install ruby and deps - run: sudo apt-get install ruby ruby-dev mosquitto - - name: Checkout - uses: actions/checkout@v2 diff --git a/src/aclk/mqtt_websockets/.gitignore b/src/aclk/mqtt_websockets/.gitignore deleted file mode 100644 index 9f1a0d89a..000000000 --- a/src/aclk/mqtt_websockets/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -build/* -!build/.keep -test -.vscode -mqtt/mqtt.c -mqtt/include/mqtt.h -libmqttwebsockets.* -*.o -.dirstamp -.deps diff --git a/src/aclk/mqtt_websockets/README.md b/src/aclk/mqtt_websockets/README.md index b159686df..9507fedb5 100644 --- a/src/aclk/mqtt_websockets/README.md +++ b/src/aclk/mqtt_websockets/README.md @@ -4,4 +4,4 @@ Library to connect MQTT client over Websockets Secure (WSS). ## License -The Project is released under GPL v3 license. See [License](LICENSE) +The Project is released under GPL v3 license. See [License](/LICENSE) diff --git a/src/aclk/mqtt_websockets/c-rbuf/cringbuffer.c b/src/aclk/mqtt_websockets/c-rbuf/cringbuffer.c deleted file mode 100644 index 8950c6906..000000000 --- a/src/aclk/mqtt_websockets/c-rbuf/cringbuffer.c +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright: SPDX-License-Identifier: GPL-3.0-only - -#include "cringbuffer.h" -#include "cringbuffer_internal.h" - -#include -#include -#include - -#define MIN(a,b) (((a)<(b))?(a):(b)) -#define MAX(a,b) (((a)>(b))?(a):(b)) - -// this allows user to use their own -// custom memory allocation functions -#ifdef RBUF_CUSTOM_MALLOC -#include "../../helpers/ringbuffer_pal.h" -#else -#define crbuf_malloc(...) malloc(__VA_ARGS__) -#define crbuf_free(...) free(__VA_ARGS__) -#endif - -rbuf_t rbuf_create(size_t size) -{ - rbuf_t buffer = crbuf_malloc(sizeof(struct rbuf_t) + size); - if (!buffer) - return NULL; - - memset(buffer, 0, sizeof(struct rbuf_t)); - - buffer->data = ((char*)buffer) + sizeof(struct rbuf_t); - - buffer->head = buffer->data; - buffer->tail = buffer->data; - buffer->size = size; - buffer->end = buffer->data + size; - - return buffer; -} - -void rbuf_free(rbuf_t buffer) -{ - crbuf_free(buffer); -} - -void rbuf_flush(rbuf_t buffer) -{ - buffer->head = buffer->data; - buffer->tail = buffer->data; - buffer->size_data = 0; -} - -char *rbuf_get_linear_insert_range(rbuf_t buffer, size_t *bytes) -{ - *bytes = 0; - if (buffer->head == buffer->tail && buffer->size_data) - return NULL; - - *bytes = ((buffer->head >= buffer->tail) ? buffer->end : buffer->tail) - buffer->head; - return buffer->head; -} - -char *rbuf_get_linear_read_range(rbuf_t buffer, size_t *bytes) -{ - *bytes = 0; - if(buffer->head == buffer->tail && !buffer->size_data) - return NULL; - - *bytes = ((buffer->tail >= buffer->head) ? buffer->end : buffer->head) - buffer->tail; - - return buffer->tail; -} - -int rbuf_bump_head(rbuf_t buffer, size_t bytes) -{ - size_t free_bytes = rbuf_bytes_free(buffer); - if (bytes > free_bytes) - return 0; - int i = buffer->head - buffer->data; - buffer->head = &buffer->data[(i + bytes) % buffer->size]; - buffer->size_data += bytes; - return 1; -} - -int rbuf_bump_tail(rbuf_t buffer, size_t bytes) -{ - if(!rbuf_bump_tail_noopt(buffer, bytes)) - return 0; - - // if tail catched up with head - // start writing buffer from beggining - // this is not necessary (rbuf must work well without it) - // but helps to optimize big writes as rbuf_get_linear_insert_range - // will return bigger continuous region - if(buffer->tail == buffer->head) { - assert(buffer->size_data == 0); - rbuf_flush(buffer); - } - - return 1; -} - -size_t rbuf_get_capacity(rbuf_t buffer) -{ - return buffer->size; -} - -size_t rbuf_bytes_available(rbuf_t buffer) -{ - return buffer->size_data; -} - -size_t rbuf_bytes_free(rbuf_t buffer) -{ - return buffer->size - buffer->size_data; -} - -size_t rbuf_push(rbuf_t buffer, const char *data, size_t len) -{ - size_t to_cpy; - char *w_ptr = rbuf_get_linear_insert_range(buffer, &to_cpy); - if(!to_cpy) - return to_cpy; - - to_cpy = MIN(to_cpy, len); - memcpy(w_ptr, data, to_cpy); - rbuf_bump_head(buffer, to_cpy); - if(to_cpy < len) - to_cpy += rbuf_push(buffer, &data[to_cpy], len - to_cpy); - return to_cpy; -} - -size_t rbuf_pop(rbuf_t buffer, char *data, size_t len) -{ - size_t to_cpy; - const char *r_ptr = rbuf_get_linear_read_range(buffer, &to_cpy); - if(!to_cpy) - return to_cpy; - - to_cpy = MIN(to_cpy, len); - memcpy(data, r_ptr, to_cpy); - rbuf_bump_tail(buffer, to_cpy); - if(to_cpy < len) - to_cpy += rbuf_pop(buffer, &data[to_cpy], len - to_cpy); - return to_cpy; -} - -static inline void rbuf_ptr_inc(rbuf_t buffer, const char **ptr) -{ - (*ptr)++; - if(*ptr >= buffer->end) - *ptr = buffer->data; -} - -int rbuf_memcmp(rbuf_t buffer, const char *haystack, const char *needle, size_t needle_bytes) -{ - const char *end = needle + needle_bytes; - - // as head==tail can mean 2 things here - if (haystack == buffer->head && buffer->size_data) { - if (*haystack != *needle) - return (*haystack - *needle); - rbuf_ptr_inc(buffer, &haystack); - needle++; - } - - while (haystack != buffer->head && needle != end) { - if (*haystack != *needle) - return (*haystack - *needle); - rbuf_ptr_inc(buffer, &haystack); - needle++; - } - return 0; -} - -int rbuf_memcmp_n(rbuf_t buffer, const char *to_cmp, size_t to_cmp_bytes) -{ - return rbuf_memcmp(buffer, buffer->tail, to_cmp, to_cmp_bytes); -} - -char *rbuf_find_bytes(rbuf_t buffer, const char *needle, size_t needle_bytes, int *found_idx) -{ - const char *ptr = buffer->tail; - *found_idx = 0; - - if (!rbuf_bytes_available(buffer)) - return NULL; - - if (buffer->head == buffer->tail && buffer->size_data) { - if(!rbuf_memcmp(buffer, ptr, needle, needle_bytes)) - return (char *)ptr; - rbuf_ptr_inc(buffer, &ptr); - (*found_idx)++; - } - - while (ptr != buffer->head) - { - if(!rbuf_memcmp(buffer, ptr, needle, needle_bytes)) - return (char *)ptr; - rbuf_ptr_inc(buffer, &ptr); - (*found_idx)++; - } - return NULL; -} diff --git a/src/aclk/mqtt_websockets/c-rbuf/cringbuffer.h b/src/aclk/mqtt_websockets/c-rbuf/cringbuffer.h deleted file mode 100644 index eb98035a9..000000000 --- a/src/aclk/mqtt_websockets/c-rbuf/cringbuffer.h +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright: SPDX-License-Identifier: GPL-3.0-only - -#ifndef CRINGBUFFER_H -#define CRINGBUFFER_H - -#include - -typedef struct rbuf_t *rbuf_t; - -rbuf_t rbuf_create(size_t size); -void rbuf_free(rbuf_t buffer); -void rbuf_flush(rbuf_t buffer); - -/* /param bytes how much bytes can be copied into pointer returned - * /return pointer where data can be copied to or NULL if buffer full - */ -char *rbuf_get_linear_insert_range(rbuf_t buffer, size_t *bytes); -char *rbuf_get_linear_read_range(rbuf_t buffer, size_t *bytes); - -int rbuf_bump_head(rbuf_t buffer, size_t bytes); -int rbuf_bump_tail(rbuf_t buffer, size_t bytes); - -/* @param buffer related buffer instance - * @returns total capacity of buffer in bytes (not free/used) - */ -size_t rbuf_get_capacity(rbuf_t buffer); - -/* @param buffer related buffer instance - * @returns count of bytes stored in the buffer - */ -size_t rbuf_bytes_available(rbuf_t buffer); - -/* @param buffer related buffer instance - * @returns count of bytes available/free in the buffer (how many more bytes you can store in this buffer) - */ -size_t rbuf_bytes_free(rbuf_t buffer); - -/* writes as many bytes from `data` into the `buffer` as possible - * but maximum `len` bytes - */ -size_t rbuf_push(rbuf_t buffer, const char *data, size_t len); -size_t rbuf_pop(rbuf_t buffer, char *data, size_t len); - -char *rbuf_find_bytes(rbuf_t buffer, const char *needle, size_t needle_bytes, int *found_idx); -int rbuf_memcmp_n(rbuf_t buffer, const char *to_cmp, size_t to_cmp_bytes); - -#endif diff --git a/src/aclk/mqtt_websockets/c-rbuf/cringbuffer_internal.h b/src/aclk/mqtt_websockets/c-rbuf/cringbuffer_internal.h deleted file mode 100644 index d32de187c..000000000 --- a/src/aclk/mqtt_websockets/c-rbuf/cringbuffer_internal.h +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright: SPDX-License-Identifier: GPL-3.0-only - -#ifndef CRINGBUFFER_INTERNAL_H -#define CRINGBUFFER_INTERNAL_H - -struct rbuf_t { - char *data; - - // points to next byte where we can write - char *head; - // points to oldest (next to be poped) readable byte - char *tail; - - // to avoid calculating data + size - // all the time - char *end; - - size_t size; - size_t size_data; -}; - -/* this exists so that it can be tested by unit tests - * without optimization that resets head and tail to - * beginning if buffer empty - */ -inline static int rbuf_bump_tail_noopt(rbuf_t buffer, size_t bytes) -{ - if (bytes > buffer->size_data) - return 0; - int i = buffer->tail - buffer->data; - buffer->tail = &buffer->data[(i + bytes) % buffer->size]; - buffer->size_data -= bytes; - - return 1; -} - -#endif diff --git a/src/aclk/mqtt_websockets/c-rbuf/ringbuffer_test.c b/src/aclk/mqtt_websockets/c-rbuf/ringbuffer_test.c deleted file mode 100644 index 6a17c9956..000000000 --- a/src/aclk/mqtt_websockets/c-rbuf/ringbuffer_test.c +++ /dev/null @@ -1,485 +0,0 @@ -// Copyright: SPDX-License-Identifier: GPL-3.0-only - -#include "ringbuffer.h" - -// to be able to access internals -// never do this from app -#include "../src/ringbuffer_internal.h" - -#include -#include - -#define KNRM "\x1B[0m" -#define KRED "\x1B[31m" -#define KGRN "\x1B[32m" -#define KYEL "\x1B[33m" -#define KBLU "\x1B[34m" -#define KMAG "\x1B[35m" -#define KCYN "\x1B[36m" -#define KWHT "\x1B[37m" - -#define UNUSED(x) (void)(x) - -int total_fails = 0; -int total_tests = 0; -int total_checks = 0; - -#define CHECK_EQ_RESULT(x, y) \ - while (s_len--) \ - putchar('.'); \ - printf("%s%s " KNRM "\n", (((x) == (y)) ? KGRN : KRED), (((x) == (y)) ? " PASS " : " FAIL ")); \ - if ((x) != (y)) \ - total_fails++; \ - total_checks++; - -#define CHECK_EQ_PREFIX(x, y, prefix, subtest_name, ...) \ - { \ - int s_len = \ - 100 - \ - printf(("Checking: " KWHT "%s %s%2d " subtest_name " " KNRM), __func__, prefix, subtest_no, ##__VA_ARGS__); \ - CHECK_EQ_RESULT(x, y) \ - } - -#define CHECK_EQ(x, y, subtest_name, ...) \ - { \ - int s_len = \ - 100 - printf(("Checking: " KWHT "%s %2d " subtest_name " " KNRM), __func__, subtest_no, ##__VA_ARGS__); \ - CHECK_EQ_RESULT(x, y) \ - } - -#define TEST_DECL() \ - int subtest_no = 0; \ - printf(KYEL "TEST SUITE: %s\n" KNRM, __func__); \ - total_tests++; - -static void test_rbuf_get_linear_insert_range() -{ - TEST_DECL(); - - // check empty buffer behaviour - rbuf_t buff = rbuf_create(5); - char *to_write; - size_t ret; - to_write = rbuf_get_linear_insert_range(buff, &ret); - CHECK_EQ(ret, 5, "empty size"); - CHECK_EQ(to_write, buff->head, "empty write ptr"); - rbuf_free(buff); - - // check full buffer behaviour - subtest_no++; - buff = rbuf_create(5); - ret = rbuf_bump_head(buff, 5); - CHECK_EQ(ret, 1, "ret"); - to_write = rbuf_get_linear_insert_range(buff, &ret); - CHECK_EQ(to_write, NULL, "writable NULL"); - CHECK_EQ(ret, 0, "writable count = 0"); - - // check buffer flush - subtest_no++; - rbuf_flush(buff); - CHECK_EQ(rbuf_bytes_free(buff), 5, "size_free"); - CHECK_EQ(rbuf_bytes_available(buff), 0, "size_avail"); - CHECK_EQ(buff->head, buff->data, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - // check behaviour head > tail - subtest_no++; - rbuf_flush(buff); - rbuf_bump_head(buff, 3); - to_write = rbuf_get_linear_insert_range(buff, &ret); - CHECK_EQ(to_write, buff->head, "write location"); - CHECK_EQ(ret, 2, "availible to linear write"); - - // check behaviour tail > head - subtest_no++; - rbuf_flush(buff); - rbuf_bump_head(buff, 5); - rbuf_bump_tail(buff, 3); - CHECK_EQ(buff->head, buff->data, "head_ptr"); - CHECK_EQ(buff->tail, buff->data + 3, "tail_ptr"); - to_write = rbuf_get_linear_insert_range(buff, &ret); - CHECK_EQ(to_write, buff->head, "write location"); - CHECK_EQ(ret, 3, "availible to linear write"); - -/* // check behaviour tail and head at last element - subtest_no++; - rbuf_flush(buff); - rbuf_bump_head(buff, 4); - rbuf_bump_tail(buff, 4); - CHECK_EQ(buff->head, buff->end - 1, "head_ptr"); - CHECK_EQ(buff->tail, buff->end - 1, "tail_ptr"); - to_write = rbuf_get_linear_insert_range(buff, &ret); - CHECK_EQ(to_write, buff->head, "write location"); - CHECK_EQ(ret, 1, "availible to linear write");*/ - - // check behaviour tail and head at last element - // after rbuf_bump_tail optimisation that restarts buffer - // in case tail catches up with head - subtest_no++; - rbuf_flush(buff); - rbuf_bump_head(buff, 4); - rbuf_bump_tail(buff, 4); - CHECK_EQ(buff->head, buff->data, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - to_write = rbuf_get_linear_insert_range(buff, &ret); - CHECK_EQ(to_write, buff->head, "write location"); - CHECK_EQ(ret, 5, "availible to linear write"); -} - -#define _CHECK_EQ(x, y, subtest_name, ...) CHECK_EQ_PREFIX(x, y, prefix, subtest_name, ##__VA_ARGS__) -#define _PREFX "(size = %5zu) " -static void test_rbuf_bump_head_bsize(size_t size) -{ - char prefix[16]; - snprintf(prefix, 16, _PREFX, size); - int subtest_no = 0; - rbuf_t buff = rbuf_create(size); - _CHECK_EQ(rbuf_bytes_free(buff), size, "size_free"); - - subtest_no++; - int ret = rbuf_bump_head(buff, size); - _CHECK_EQ(buff->data, buff->head, "loc"); - _CHECK_EQ(ret, 1, "ret"); - _CHECK_EQ(buff->size_data, buff->size, "size"); - _CHECK_EQ(rbuf_bytes_free(buff), 0, "size_free"); - - subtest_no++; - ret = rbuf_bump_head(buff, 1); - _CHECK_EQ(buff->data, buff->head, "loc no move"); - _CHECK_EQ(ret, 0, "ret error"); - _CHECK_EQ(buff->size_data, buff->size, "size"); - _CHECK_EQ(rbuf_bytes_free(buff), 0, "size_free"); - rbuf_free(buff); - - subtest_no++; - buff = rbuf_create(size); - ret = rbuf_bump_head(buff, size - 1); - _CHECK_EQ(buff->head, buff->end-1, "loc end"); - rbuf_free(buff); -} -#undef _CHECK_EQ - -static void test_rbuf_bump_head() -{ - TEST_DECL(); - UNUSED(subtest_no); - - size_t test_sizes[] = { 1, 2, 3, 5, 6, 7, 8, 100, 99999, 0 }; - for (int i = 0; test_sizes[i]; i++) - test_rbuf_bump_head_bsize(test_sizes[i]); -} - -static void test_rbuf_bump_tail_noopt(int subtest_no) -{ - rbuf_t buff = rbuf_create(10); - CHECK_EQ(rbuf_bytes_free(buff), 10, "size_free"); - CHECK_EQ(rbuf_bytes_available(buff), 0, "size_avail"); - - subtest_no++; - int ret = rbuf_bump_head(buff, 5); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_free(buff), 5, "size_free"); - CHECK_EQ(rbuf_bytes_available(buff), 5, "size_avail"); - CHECK_EQ(buff->head, buff->data + 5, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - subtest_no++; - ret = rbuf_bump_tail_noopt(buff, 2); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 3, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 7, "size_free"); - CHECK_EQ(buff->head, buff->data + 5, "head_ptr"); - CHECK_EQ(buff->tail, buff->data + 2, "tail_ptr"); - - subtest_no++; - ret = rbuf_bump_tail_noopt(buff, 3); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 0, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 10, "size_free"); - CHECK_EQ(buff->head, buff->data + 5, "head_ptr"); - CHECK_EQ(buff->tail, buff->data + 5, "tail_ptr"); - - subtest_no++; - ret = rbuf_bump_tail_noopt(buff, 1); - CHECK_EQ(ret, 0, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 0, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 10, "size_free"); - CHECK_EQ(buff->head, buff->data + 5, "head_ptr"); - CHECK_EQ(buff->tail, buff->data + 5, "tail_ptr"); - - subtest_no++; - ret = rbuf_bump_head(buff, 7); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 7, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 3, "size_free"); - CHECK_EQ(buff->head, buff->data + 2, "head_ptr"); - CHECK_EQ(buff->tail, buff->data + 5, "tail_ptr"); - - subtest_no++; - ret = rbuf_bump_tail_noopt(buff, 5); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 2, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 8, "size_free"); - CHECK_EQ(buff->head, buff->data + 2, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - // check tail can't overrun head - subtest_no++; - ret = rbuf_bump_tail_noopt(buff, 3); - CHECK_EQ(ret, 0, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 2, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 8, "size_free"); - CHECK_EQ(buff->head, buff->data + 2, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - // check head can't overrun tail - subtest_no++; - ret = rbuf_bump_head(buff, 9); - CHECK_EQ(ret, 0, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 2, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 8, "size_free"); - CHECK_EQ(buff->head, buff->data + 2, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - // check head can fill the buffer - subtest_no++; - ret = rbuf_bump_head(buff, 8); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 10, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 0, "size_free"); - CHECK_EQ(buff->head, buff->data, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - // check can empty the buffer - subtest_no++; - ret = rbuf_bump_tail_noopt(buff, 10); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 0, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 10, "size_free"); - CHECK_EQ(buff->head, buff->data, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); -} - -static void test_rbuf_bump_tail_opt(int subtest_no) -{ - subtest_no++; - rbuf_t buff = rbuf_create(10); - CHECK_EQ(rbuf_bytes_free(buff), 10, "size_free"); - CHECK_EQ(rbuf_bytes_available(buff), 0, "size_avail"); - - subtest_no++; - int ret = rbuf_bump_head(buff, 5); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_free(buff), 5, "size_free"); - CHECK_EQ(rbuf_bytes_available(buff), 5, "size_avail"); - CHECK_EQ(buff->head, buff->data + 5, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - subtest_no++; - ret = rbuf_bump_tail(buff, 2); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 3, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 7, "size_free"); - CHECK_EQ(buff->head, buff->data + 5, "head_ptr"); - CHECK_EQ(buff->tail, buff->data + 2, "tail_ptr"); - - subtest_no++; - ret = rbuf_bump_tail(buff, 3); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 0, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 10, "size_free"); - CHECK_EQ(buff->head, buff->data, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - subtest_no++; - ret = rbuf_bump_tail_noopt(buff, 1); - CHECK_EQ(ret, 0, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 0, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 10, "size_free"); - CHECK_EQ(buff->head, buff->data, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - subtest_no++; - ret = rbuf_bump_head(buff, 6); - ret = rbuf_bump_tail(buff, 5); - ret = rbuf_bump_head(buff, 6); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 7, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 3, "size_free"); - CHECK_EQ(buff->head, buff->data + 2, "head_ptr"); - CHECK_EQ(buff->tail, buff->data + 5, "tail_ptr"); - - subtest_no++; - ret = rbuf_bump_tail(buff, 5); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 2, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 8, "size_free"); - CHECK_EQ(buff->head, buff->data + 2, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - // check tail can't overrun head - subtest_no++; - ret = rbuf_bump_tail(buff, 3); - CHECK_EQ(ret, 0, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 2, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 8, "size_free"); - CHECK_EQ(buff->head, buff->data + 2, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - // check head can't overrun tail - subtest_no++; - ret = rbuf_bump_head(buff, 9); - CHECK_EQ(ret, 0, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 2, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 8, "size_free"); - CHECK_EQ(buff->head, buff->data + 2, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - // check head can fill the buffer - subtest_no++; - ret = rbuf_bump_head(buff, 8); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 10, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 0, "size_free"); - CHECK_EQ(buff->head, buff->data, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - - // check can empty the buffer - subtest_no++; - ret = rbuf_bump_tail(buff, 10); - CHECK_EQ(ret, 1, "ret"); - CHECK_EQ(rbuf_bytes_available(buff), 0, "size_avail"); - CHECK_EQ(rbuf_bytes_free(buff), 10, "size_free"); - CHECK_EQ(buff->head, buff->data, "head_ptr"); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); -} - -static void test_rbuf_bump_tail() -{ - TEST_DECL(); - test_rbuf_bump_tail_noopt(subtest_no); - test_rbuf_bump_tail_opt(subtest_no); -} - -#define ASCII_A 0x61 -#define ASCII_Z 0x7A -#define TEST_DATA_SIZE ASCII_Z-ASCII_A+1 -static void test_rbuf_push() -{ - TEST_DECL(); - rbuf_t buff = rbuf_create(10); - int i; - char test_data[TEST_DATA_SIZE]; - - for (int i = 0; i <= TEST_DATA_SIZE; i++) - test_data[i] = i + ASCII_A; - - int ret = rbuf_push(buff, test_data, 10); - CHECK_EQ(ret, 10, "written 10 bytes"); - CHECK_EQ(rbuf_bytes_free(buff), 0, "empty size == 0"); - for (i = 0; i < 10; i++) - CHECK_EQ(buff->data[i], i + ASCII_A, "Check data"); - - subtest_no++; - rbuf_flush(buff); - rbuf_bump_head(buff, 5); - rbuf_bump_tail_noopt(buff, 5); //to not reset both pointers to beginning - ret = rbuf_push(buff, test_data, 10); - CHECK_EQ(ret, 10, "written 10 bytes"); - for (i = 0; i < 10; i++) - CHECK_EQ(buff->data[i], ((i+5)%10) + ASCII_A, "Check Data"); - - subtest_no++; - rbuf_flush(buff); - rbuf_bump_head(buff, 9); - rbuf_bump_tail_noopt(buff, 9); - ret = rbuf_push(buff, test_data, 10); - CHECK_EQ(ret, 10, "written 10 bytes"); - for (i = 0; i < 10; i++) - CHECK_EQ(buff->data[i], ((i + 1) % 10) + ASCII_A, "Check data"); - - // let tail > head - subtest_no++; - rbuf_flush(buff); - rbuf_bump_head(buff, 9); - rbuf_bump_tail_noopt(buff, 9); - rbuf_bump_head(buff, 1); - ret = rbuf_push(buff, test_data, 9); - CHECK_EQ(ret, 9, "written 9 bytes"); - CHECK_EQ(buff->head, buff->end - 1, "head_ptr"); - CHECK_EQ(buff->tail, buff->head, "tail_ptr"); - rbuf_bump_tail(buff, 1); - //TODO push byte can be usefull optimisation - ret = rbuf_push(buff, &test_data[9], 1); - CHECK_EQ(ret, 1, "written 1 byte"); - CHECK_EQ(rbuf_bytes_free(buff), 0, "empty size == 0"); - for (i = 0; i < 10; i++) - CHECK_EQ(buff->data[i], i + ASCII_A, "Check data"); - - subtest_no++; - rbuf_flush(buff); - rbuf_bump_head(buff, 9); - rbuf_bump_tail_noopt(buff, 7); - rbuf_bump_head(buff, 1); - ret = rbuf_push(buff, test_data, 7); - CHECK_EQ(ret, 7, "written 7 bytes"); - CHECK_EQ(buff->head, buff->data + 7, "head_ptr"); - CHECK_EQ(buff->tail, buff->head, "tail_ptr"); - rbuf_bump_tail(buff, 3); - CHECK_EQ(buff->tail, buff->data, "tail_ptr"); - //TODO push byte can be usefull optimisation - ret = rbuf_push(buff, &test_data[7], 3); - CHECK_EQ(ret, 3, "written 3 bytes"); - CHECK_EQ(rbuf_bytes_free(buff), 0, "empty size == 0"); - for (i = 0; i < 10; i++) - CHECK_EQ(buff->data[i], i + ASCII_A, "Check data"); - - // test can't overfill the buffer - subtest_no++; - rbuf_flush(buff); - rbuf_push(buff, test_data, TEST_DATA_SIZE); - CHECK_EQ(ret, 3, "written 10 bytes"); - for (i = 0; i < 10; i++) - CHECK_EQ(buff->data[i], i + ASCII_A, "Check data"); -} - -#define TEST_RBUF_FIND_BYTES_SIZE 10 -void test_rbuf_find_bytes() -{ - TEST_DECL(); - rbuf_t buff = rbuf_create(TEST_RBUF_FIND_BYTES_SIZE); - char *filler_3 = " "; - char *needle = "needle"; - int idx; - char *ptr; - - // make sure needle is wrapped aroung in the buffer - // to test we still can find it - // target "edle ne" - rbuf_bump_head(buff, TEST_RBUF_FIND_BYTES_SIZE / 2); - rbuf_push(buff, filler_3, strlen(filler_3)); - rbuf_bump_tail(buff, TEST_RBUF_FIND_BYTES_SIZE / 2); - rbuf_push(buff, needle, strlen(needle)); - ptr = rbuf_find_bytes(buff, needle, strlen(needle), &idx); - CHECK_EQ(ptr, buff->data + (TEST_RBUF_FIND_BYTES_SIZE / 2) + strlen(filler_3), "Pointer to needle correct"); - CHECK_EQ(idx, ptr - buff->tail, "Check needle index"); -} - -int main() -{ - test_rbuf_bump_head(); - test_rbuf_bump_tail(); - test_rbuf_get_linear_insert_range(); - test_rbuf_push(); - test_rbuf_find_bytes(); - - printf( - KNRM "Total Tests %d, Total Checks %d, Successful Checks %d, Failed Checks %d\n", - total_tests, total_checks, total_checks - total_fails, total_fails); - if (total_fails) - printf(KRED "!!!Some test(s) Failed!!!\n"); - else - printf(KGRN "ALL TESTS PASSED\n"); - - return total_fails; -} diff --git a/src/aclk/mqtt_websockets/c_rhash/c_rhash.c b/src/aclk/mqtt_websockets/c_rhash/c_rhash.c deleted file mode 100644 index a71b500e2..000000000 --- a/src/aclk/mqtt_websockets/c_rhash/c_rhash.c +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright: SPDX-License-Identifier: GPL-3.0-only - -#include "c_rhash_internal.h" - -#include -#include - -#ifdef DEBUG_VERBOSE -#include -#endif - -#define c_rmalloc(...) malloc(__VA_ARGS__) -#define c_rcalloc(...) calloc(__VA_ARGS__) -#define c_rfree(...) free(__VA_ARGS__) - -static inline uint32_t simple_hash(const char *name) { - unsigned char *s = (unsigned char *) name; - uint32_t hval = 0x811c9dc5; - while (*s) { - hval *= 16777619; - hval ^= (uint32_t) *s++; - } - return hval; -} - -c_rhash c_rhash_new(size_t bin_count) { - if (!bin_count) - bin_count = 1000; - - c_rhash hash = c_rcalloc(1, sizeof(struct c_rhash_s) + (bin_count * sizeof(struct bin_ll*)) ); - if (hash == NULL) - return NULL; - - hash->bin_count = bin_count; - hash->bins = (c_rhash_bin *)((char*)hash + sizeof(struct c_rhash_s)); - - return hash; -} - -static size_t get_itemtype_len(uint8_t item_type, const void* item_data) { - switch (item_type) { - case ITEMTYPE_STRING: - return strlen(item_data) + 1; - case ITEMTYPE_UINT64: - return sizeof(uint64_t); - case ITEMTYPE_UINT8: - return 1; - case ITEMTYPE_OPAQUE_PTR: - return sizeof(void*); - default: - return 0; - } -} - -static int compare_bin_item(struct bin_item *item, uint8_t key_type, const void *key) { - if (item->key_type != key_type) - return 1; - - size_t key_value_len = get_itemtype_len(key_type, key); - - if(key_type == ITEMTYPE_STRING) { - size_t new_key_value_len = get_itemtype_len(item->key_type, item->key); - if (new_key_value_len != key_value_len) - return 1; - } - - if(memcmp(item->key, key, key_value_len) == 0) { - return 0; - } - - return 1; -} - -static int insert_into_bin(c_rhash_bin *bin, uint8_t key_type, const void *key, uint8_t value_type, const void *value) { - struct bin_item *prev = NULL; - while (*bin != NULL) { - if (!compare_bin_item(*bin, key_type, key)) { -#ifdef DEBUG_VERBOSE - printf("Key already present! Updating value!\n"); -#endif -// TODO: optimize here if the new value is of different kind compared to the old one -// in case it is not crazily bigger we can reuse the memory and avoid malloc and free - c_rfree((*bin)->value); - (*bin)->value_type = value_type; - (*bin)->value = c_rmalloc(get_itemtype_len(value_type, value)); - if ((*bin)->value == NULL) - return 1; - memcpy((*bin)->value, value, get_itemtype_len(value_type, value)); - return 0; - } - prev = *bin; - bin = &(*bin)->next; - } - - if (*bin == NULL) - *bin = c_rcalloc(1, sizeof(struct bin_item)); - if (prev != NULL) - prev->next = *bin; - - (*bin)->key_type = key_type; - size_t len = get_itemtype_len(key_type, key); - (*bin)->key = c_rmalloc(len); - memcpy((*bin)->key, key, len); - - (*bin)->value_type = value_type; - len = get_itemtype_len(value_type, value); - (*bin)->value = c_rmalloc(len); - memcpy((*bin)->value, value, len); - return 0; -} - -static inline uint32_t get_bin_idx_str(c_rhash hash, const char *key) { - uint32_t nhash = simple_hash(key); - return nhash % hash->bin_count; -} - -static inline c_rhash_bin *get_binptr_by_str(c_rhash hash, const char *key) { - return &hash->bins[get_bin_idx_str(hash, key)]; -} - -int c_rhash_insert_str_ptr(c_rhash hash, const char *key, void *value) { - c_rhash_bin *bin = get_binptr_by_str(hash, key); - -#ifdef DEBUG_VERBOSE - if (bin != NULL) - printf("COLLISION. There will be more than one item in bin idx=%d\n", nhash); -#endif - - return insert_into_bin(bin, ITEMTYPE_STRING, key, ITEMTYPE_OPAQUE_PTR, &value); -} - -int c_rhash_insert_str_uint8(c_rhash hash, const char *key, uint8_t value) { - c_rhash_bin *bin = get_binptr_by_str(hash, key); - -#ifdef DEBUG_VERBOSE - if (bin != NULL) - printf("COLLISION. There will be more than one item in bin idx=%d\n", nhash); -#endif - - return insert_into_bin(bin, ITEMTYPE_STRING, key, ITEMTYPE_UINT8, &value); -} - -int c_rhash_insert_uint64_ptr(c_rhash hash, uint64_t key, void *value) { - c_rhash_bin *bin = &hash->bins[key % hash->bin_count]; - -#ifdef DEBUG_VERBOSE - if (bin != NULL) - printf("COLLISION. There will be more than one item in bin idx=%d\n", nhash); -#endif - - return insert_into_bin(bin, ITEMTYPE_UINT64, &key, ITEMTYPE_OPAQUE_PTR, &value); -} - -int c_rhash_get_uint8_by_str(c_rhash hash, const char *key, uint8_t *ret_val) { - uint32_t nhash = get_bin_idx_str(hash, key); - - struct bin_item *bin = hash->bins[nhash]; - - while (bin) { - if (bin->key_type == ITEMTYPE_STRING) { - if (!strcmp(bin->key, key)) { - *ret_val = *(uint8_t*)bin->value; - return 0; - } - } - bin = bin->next; - } - return 1; -} - -int c_rhash_get_ptr_by_str(c_rhash hash, const char *key, void **ret_val) { - uint32_t nhash = get_bin_idx_str(hash, key); - - struct bin_item *bin = hash->bins[nhash]; - - while (bin) { - if (bin->key_type == ITEMTYPE_STRING) { - if (!strcmp(bin->key, key)) { - *ret_val = *((void**)bin->value); - return 0; - } - } - bin = bin->next; - } - *ret_val = NULL; - return 1; -} - -int c_rhash_get_ptr_by_uint64(c_rhash hash, uint64_t key, void **ret_val) { - uint32_t nhash = key % hash->bin_count; - - struct bin_item *bin = hash->bins[nhash]; - - while (bin) { - if (bin->key_type == ITEMTYPE_UINT64) { - if (*((uint64_t *)bin->key) == key) { - *ret_val = *((void**)bin->value); - return 0; - } - } - bin = bin->next; - } - *ret_val = NULL; - return 1; -} - -static void c_rhash_destroy_bin(c_rhash_bin bin) { - struct bin_item *next; - do { - next = bin->next; - c_rfree(bin->key); - c_rfree(bin->value); - c_rfree(bin); - bin = next; - } while (bin != NULL); -} - -int c_rhash_iter_uint64_keys(c_rhash hash, c_rhash_iter_t *iter, uint64_t *key) { - while (iter->bin < hash->bin_count) { - if (iter->item != NULL) - iter->item = iter->item->next; - if (iter->item == NULL) { - if (iter->initialized) - iter->bin++; - else - iter->initialized = 1; - if (iter->bin < hash->bin_count) - iter->item = hash->bins[iter->bin]; - } - if (iter->item != NULL && iter->item->key_type == ITEMTYPE_UINT64) { - *key = *(uint64_t*)iter->item->key; - return 0; - } - } - return 1; -} - -int c_rhash_iter_str_keys(c_rhash hash, c_rhash_iter_t *iter, const char **key) { - while (iter->bin < hash->bin_count) { - if (iter->item != NULL) - iter->item = iter->item->next; - if (iter->item == NULL) { - if (iter->initialized) - iter->bin++; - else - iter->initialized = 1; - if (iter->bin < hash->bin_count) - iter->item = hash->bins[iter->bin]; - } - if (iter->item != NULL && iter->item->key_type == ITEMTYPE_STRING) { - *key = (const char*)iter->item->key; - return 0; - } - } - return 1; -} - -void c_rhash_destroy(c_rhash hash) { - for (size_t i = 0; i < hash->bin_count; i++) { - if (hash->bins[i] != NULL) - c_rhash_destroy_bin(hash->bins[i]); - } - c_rfree(hash); -} diff --git a/src/aclk/mqtt_websockets/c_rhash/c_rhash.h b/src/aclk/mqtt_websockets/c_rhash/c_rhash.h deleted file mode 100644 index 37addd161..000000000 --- a/src/aclk/mqtt_websockets/c_rhash/c_rhash.h +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright: SPDX-License-Identifier: GPL-3.0-only - -#include -#include -#include - -#ifndef DEFAULT_BIN_COUNT - #define DEFAULT_BIN_COUNT 1000 -#endif - -#define ITEMTYPE_UNSET (0x0) -#define ITEMTYPE_STRING (0x1) -#define ITEMTYPE_UINT8 (0x2) -#define ITEMTYPE_UINT64 (0x3) -#define ITEMTYPE_OPAQUE_PTR (0x4) - -typedef struct c_rhash_s *c_rhash; - -c_rhash c_rhash_new(size_t bin_count); - -void c_rhash_destroy(c_rhash hash); - -// # Insert -// ## Insert where key is string -int c_rhash_insert_str_ptr(c_rhash hash, const char *key, void *value); -int c_rhash_insert_str_uint8(c_rhash hash, const char *key, uint8_t value); -// ## Insert where key is uint64 -int c_rhash_insert_uint64_ptr(c_rhash hash, uint64_t key, void *value); - -// # Get -// ## Get where key is string -int c_rhash_get_ptr_by_str(c_rhash hash, const char *key, void **ret_val); -int c_rhash_get_uint8_by_str(c_rhash hash, const char *key, uint8_t *ret_val); -// ## Get where key is uint64 -int c_rhash_get_ptr_by_uint64(c_rhash hash, uint64_t key, void **ret_val); - -typedef struct { - size_t bin; - struct bin_item *item; - int initialized; -} c_rhash_iter_t; - -#define C_RHASH_ITER_T_INITIALIZER { .bin = 0, .item = NULL, .initialized = 0 } - -#define c_rhash_iter_t_initialize(p_iter) memset(p_iter, 0, sizeof(c_rhash_iter_t)) - -/* - * goes trough whole hash map and returns every - * type uint64 key present/stored - * - * it is not necessary to finish iterating and iterator can be reinitialized - * there are no guarantees on the order in which the keys will come - * behavior here is implementation dependent and can change any time - * - * returns: - * 0 for every key and stores the key in *key - * 1 on error or when all keys of this type has been already iterated over - */ -int c_rhash_iter_uint64_keys(c_rhash hash, c_rhash_iter_t *iter, uint64_t *key); - -int c_rhash_iter_str_keys(c_rhash hash, c_rhash_iter_t *iter, const char **key); diff --git a/src/aclk/mqtt_websockets/c_rhash/c_rhash_internal.h b/src/aclk/mqtt_websockets/c_rhash/c_rhash_internal.h deleted file mode 100644 index 20f741076..000000000 --- a/src/aclk/mqtt_websockets/c_rhash/c_rhash_internal.h +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright: SPDX-License-Identifier: GPL-3.0-only - -#include "c_rhash.h" - -struct bin_item { - uint8_t key_type:4; - void *key; - uint8_t value_type:4; - void *value; - - struct bin_item *next; -}; - -typedef struct bin_item *c_rhash_bin; - -struct c_rhash_s { - size_t bin_count; - c_rhash_bin *bins; -}; diff --git a/src/aclk/mqtt_websockets/c_rhash/tests.c b/src/aclk/mqtt_websockets/c_rhash/tests.c deleted file mode 100644 index 909c5562d..000000000 --- a/src/aclk/mqtt_websockets/c_rhash/tests.c +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright: SPDX-License-Identifier: GPL-3.0-only - -#include -#include - -#include "c_rhash.h" - -// terminal color codes -#define KNRM "\x1B[0m" -#define KRED "\x1B[31m" -#define KGRN "\x1B[32m" -#define KYEL "\x1B[33m" -#define KBLU "\x1B[34m" -#define KMAG "\x1B[35m" -#define KCYN "\x1B[36m" -#define KWHT "\x1B[37m" - -#define KEY_1 "key1" -#define KEY_2 "keya" - -#define PRINT_ERR(str, ...) fprintf(stderr, "└─╼ ❌ " KRED str KNRM "\n" __VA_OPT__(,) __VA_ARGS__) - -#define ASSERT_RETVAL(fnc, comparator, expected_retval, ...) \ -{ int rval; \ -if(!((rval = fnc(__VA_ARGS__)) comparator expected_retval)) { \ - PRINT_ERR("Failed test. Value returned by \"%s\" in fnc:\"%s\",line:%d is not equal to expected value. Expected:%d, Got:%d", #fnc, __FUNCTION__, __LINE__, expected_retval, rval); \ - rc = 1; \ - goto test_cleanup; \ -} passed_subtest_count++;}; - -#define ASSERT_VAL_UINT8(returned, expected) \ -if(returned != expected) { \ - PRINT_ERR("Failed test. Value returned (%d) doesn't match expected (%d)! fnc:\"%s\",line:%d", returned, expected, __FUNCTION__, __LINE__); \ - rc = 1; \ - goto test_cleanup; \ -} passed_subtest_count++; - -#define ASSERT_VAL_PTR(returned, expected) \ -if((void*)returned != (void*)expected) { \ - PRINT_ERR("Failed test. Value returned(%p) doesn't match expected(%p)! fnc:\"%s\",line:%d", (void*)returned, (void*)expected, __FUNCTION__, __LINE__); \ - rc = 1; \ - goto test_cleanup; \ -} passed_subtest_count++; - -#define ALL_SUBTESTS_PASS() printf("└─╼ ✅" KGRN " Test \"%s\" DONE. All of %zu subtests PASS. (line:%d)\n" KNRM, __FUNCTION__, passed_subtest_count, __LINE__); - -#define TEST_START() size_t passed_subtest_count = 0; int rc = 0; printf("╒═ Starting test \"%s\"\n", __FUNCTION__); - -int test_str_uint8() { - c_rhash hash = c_rhash_new(100); - uint8_t val; - - TEST_START(); - // function should fail on empty hash - ASSERT_RETVAL(c_rhash_get_uint8_by_str, !=, 0, hash, KEY_1, &val); - - ASSERT_RETVAL(c_rhash_insert_str_uint8, ==, 0, hash, KEY_1, 5); - ASSERT_RETVAL(c_rhash_get_uint8_by_str, ==, 0, hash, KEY_1, &val); - ASSERT_VAL_UINT8(5, val); - - ASSERT_RETVAL(c_rhash_insert_str_uint8, ==, 0, hash, KEY_2, 8); - ASSERT_RETVAL(c_rhash_get_uint8_by_str, ==, 0, hash, KEY_1, &val); - ASSERT_VAL_UINT8(5, val); - ASSERT_RETVAL(c_rhash_get_uint8_by_str, ==, 0, hash, KEY_2, &val); - ASSERT_VAL_UINT8(8, val); - ASSERT_RETVAL(c_rhash_get_uint8_by_str, !=, 0, hash, "sndnskjdf", &val); - - // test update of key - ASSERT_RETVAL(c_rhash_insert_str_uint8, ==, 0, hash, KEY_1, 100); - ASSERT_RETVAL(c_rhash_get_uint8_by_str, ==, 0, hash, KEY_1, &val); - ASSERT_VAL_UINT8(100, val); - - ALL_SUBTESTS_PASS(); -test_cleanup: - c_rhash_destroy(hash); - return rc; -} - -int test_uint64_ptr() { - c_rhash hash = c_rhash_new(100); - void *val; - - TEST_START(); - - // function should fail on empty hash - ASSERT_RETVAL(c_rhash_get_ptr_by_uint64, !=, 0, hash, 0, &val); - - ASSERT_RETVAL(c_rhash_insert_uint64_ptr, ==, 0, hash, 0, &hash); - ASSERT_RETVAL(c_rhash_get_ptr_by_uint64, ==, 0, hash, 0, &val); - ASSERT_VAL_PTR(&hash, val); - - ASSERT_RETVAL(c_rhash_insert_uint64_ptr, ==, 0, hash, 1, &val); - ASSERT_RETVAL(c_rhash_get_ptr_by_uint64, ==, 0, hash, 0, &val); - ASSERT_VAL_PTR(&hash, val); - ASSERT_RETVAL(c_rhash_get_ptr_by_uint64, ==, 0, hash, 1, &val); - ASSERT_VAL_PTR(&val, val); - ASSERT_RETVAL(c_rhash_get_ptr_by_uint64, !=, 0, hash, 2, &val); - - ALL_SUBTESTS_PASS(); -test_cleanup: - c_rhash_destroy(hash); - return rc; -} - -#define UINT64_PTR_INC_ITERATION_COUNT 5000 -int test_uint64_ptr_incremental() { - c_rhash hash = c_rhash_new(100); - void *val; - - TEST_START(); - - char a = 0x20; - char *ptr = &a; - while(ptr < &a + UINT64_PTR_INC_ITERATION_COUNT) { - ASSERT_RETVAL(c_rhash_insert_uint64_ptr, ==, 0, hash, (ptr-&a), ptr); - ptr++; - } - - ptr = &a; - char *retptr; - for(int i = 0; i < UINT64_PTR_INC_ITERATION_COUNT; i++) { - ASSERT_RETVAL(c_rhash_get_ptr_by_uint64, ==, 0, hash, i, (void**)&retptr); - ASSERT_VAL_PTR(retptr, (&a+i)); - } - - ALL_SUBTESTS_PASS(); -test_cleanup: - c_rhash_destroy(hash); - return rc; -} - -struct test_string { - const char *str; - int counter; -}; - -struct test_string test_strings[] = { - { .str = "Cillum reprehenderit eiusmod elit nisi aliquip esse exercitation commodo Lorem voluptate esse.", .counter = 0 }, - { .str = "Ullamco eiusmod tempor occaecat ad.", .counter = 0 }, - { .str = "Esse aliquip tempor sint tempor ullamco duis aute incididunt ad.", .counter = 0 }, - { .str = "Cillum Lorem labore cupidatat commodo proident adipisicing.", .counter = 0 }, - { .str = "Quis ad cillum officia exercitation.", .counter = 0 }, - { .str = "Ipsum enim dolor ullamco amet sint nisi ut occaecat sint non.", .counter = 0 }, - { .str = "Id duis officia ipsum cupidatat velit fugiat.", .counter = 0 }, - { .str = "Aliqua non occaecat voluptate reprehenderit reprehenderit veniam minim exercitation ea aliquip enim aliqua deserunt qui.", .counter = 0 }, - { .str = "Ullamco elit tempor laboris reprehenderit quis deserunt duis quis tempor reprehenderit magna dolore reprehenderit exercitation.", .counter = 0 }, - { .str = "Culpa do dolor quis incididunt et labore in ex.", .counter = 0 }, - { .str = "Aliquip velit cupidatat qui incididunt ipsum nostrud eiusmod ut proident nisi magna fugiat excepteur.", .counter = 0 }, - { .str = "Aliqua qui dolore tempor id proident ullamco sunt magna.", .counter = 0 }, - { .str = "Labore eiusmod ut fugiat dolore reprehenderit mollit magna.", .counter = 0 }, - { .str = "Veniam aliquip dolor excepteur minim nulla esse cupidatat esse.", .counter = 0 }, - { .str = "Do quis dolor irure nostrud occaecat aute proident anim.", .counter = 0 }, - { .str = "Enim veniam non nulla ad quis sit amet.", .counter = 0 }, - { .str = "Cillum reprehenderit do enim esse do ullamco consectetur ea.", .counter = 0 }, - { .str = "Sit et duis sint anim qui ad anim labore exercitation sunt cupidatat.", .counter = 0 }, - { .str = "Dolor officia adipisicing sint pariatur in dolor occaecat officia reprehenderit magna.", .counter = 0 }, - { .str = "Aliquip dolore qui occaecat eiusmod sunt incididunt reprehenderit minim et.", .counter = 0 }, - { .str = "Aute fugiat laboris cillum tempor consequat tempor do non laboris culpa officia nisi.", .counter = 0 }, - { .str = "Et excepteur do aliquip fugiat nisi velit tempor officia enim quis elit incididunt.", .counter = 0 }, - { .str = "Eu officia adipisicing incididunt occaecat officia cupidatat enim sit sit officia.", .counter = 0 }, - { .str = "Do amet cillum duis pariatur commodo nulla cillum magna nulla Lorem veniam cupidatat.", .counter = 0 }, - { .str = "Dolor adipisicing voluptate laboris occaecat culpa aliquip ipsum ut consequat aliqua aliquip commodo sunt velit.", .counter = 0 }, - { .str = "Nulla proident ipsum quis nulla.", .counter = 0 }, - { .str = "Laborum adipisicing nulla do aute aliqua est quis sint culpa pariatur laborum voluptate qui.", .counter = 0 }, - { .str = "Proident eiusmod sunt et nulla elit pariatur dolore irure ex voluptate excepteur adipisicing consectetur.", .counter = 0 }, - { .str = "Consequat ex voluptate officia excepteur aute deserunt proident commodo et.", .counter = 0 }, - { .str = "Velit sit cupidatat dolor dolore.", .counter = 0 }, - { .str = "Sunt enim do non anim nostrud exercitation ullamco ex proident commodo.", .counter = 0 }, - { .str = "Id ex officia cillum ad.", .counter = 0 }, - { .str = "Laboris in sunt eiusmod veniam laboris nostrud.", .counter = 0 }, - { .str = "Ex magna occaecat ea ea incididunt aliquip.", .counter = 0 }, - { .str = "Sunt eiusmod ex nostrud eu pariatur sit cupidatat ea adipisicing cillum culpa esse consequat aliquip.", .counter = 0 }, - { .str = "Excepteur commodo qui incididunt enim culpa sunt non excepteur Lorem adipisicing.", .counter = 0 }, - { .str = "Quis officia est ullamco reprehenderit incididunt occaecat pariatur ex reprehenderit nisi.", .counter = 0 }, - { .str = "Culpa irure proident proident et eiusmod irure aliqua ipsum cupidatat minim sit.", .counter = 0 }, - { .str = "Qui cupidatat aliquip est velit magna veniam.", .counter = 0 }, - { .str = "Pariatur ad ad mollit nostrud non irure minim veniam anim aliquip quis eu.", .counter = 0 }, - { .str = "Nisi ex minim eu adipisicing tempor Lorem nisi do ad exercitation est non eu.", .counter = 0 }, - { .str = "Cupidatat do mollit ad commodo cupidatat ut.", .counter = 0 }, - { .str = "Est non excepteur eiusmod nostrud et eu.", .counter = 0 }, - { .str = "Cupidatat mollit nisi magna officia ut elit eiusmod.", .counter = 0 }, - { .str = "Est aliqua consectetur laboris ex consequat est ut dolor.", .counter = 0 }, - { .str = "Duis eu laboris laborum ut id Lorem nostrud qui ad velit proident fugiat minim ullamco.", .counter = 0 }, - { .str = "Pariatur esse excepteur anim amet excepteur irure sint quis esse ex cupidatat ut.", .counter = 0 }, - { .str = "Esse reprehenderit amet qui excepteur aliquip amet.", .counter = 0 }, - { .str = "Ullamco laboris elit labore adipisicing aute nulla qui laborum tempor officia ut dolor aute.", .counter = 0 }, - { .str = "Commodo sunt cillum velit minim laborum Lorem aliqua tempor ad id eu.", .counter = 0 }, - { .str = NULL, .counter = 0 } -}; - -uint32_t test_strings_contain_element(const char *str) { - struct test_string *str_desc = test_strings; - while(str_desc->str) { - if (!strcmp(str, str_desc->str)) - return str_desc - test_strings; - str_desc++; - } - return -1; -} - -#define TEST_INCREMENT_STR_KEYS_HASH_SIZE 20 -int test_increment_str_keys() { - c_rhash hash; - const char *key; - - TEST_START(); - - hash = c_rhash_new(TEST_INCREMENT_STR_KEYS_HASH_SIZE); // less than element count of test_strings - - c_rhash_iter_t iter = C_RHASH_ITER_T_INITIALIZER; - - // check iter on empty hash - ASSERT_RETVAL(c_rhash_iter_str_keys, !=, 0, hash, &iter, &key); - - int32_t element_count = 0; - while (test_strings[element_count].str) { - ASSERT_RETVAL(c_rhash_insert_str_ptr, ==, 0, hash, test_strings[element_count].str, NULL); - test_strings[element_count].counter++; // we want to test we got each key exactly once - element_count++; - } - - if (element_count <= TEST_INCREMENT_STR_KEYS_HASH_SIZE * 2) { - // verify we are actually test also iteration trough single bin (when 2 keys have same hash pointing them to same bin) - PRINT_ERR("For this test to properly test all the hash size needs to be much smaller than all test key count."); - rc = 1; - goto test_cleanup; - } - - // we insert another type of key as iterator should skip it - // in case is another type - ASSERT_RETVAL(c_rhash_insert_uint64_ptr, ==, 0, hash, 5, NULL); - - c_rhash_iter_t_initialize(&iter); - while(!c_rhash_iter_str_keys(hash, &iter, &key)) { - element_count--; - int i; - if ( (i = test_strings_contain_element(key)) < 0) { - PRINT_ERR("Key \"%s\" is not present in test_strings array! (Fnc: %s, Line: %d)", key, __FUNCTION__, __LINE__); - rc = 1; - goto test_cleanup; - } - passed_subtest_count++; - - test_strings[i].counter--; - } - ASSERT_VAL_UINT8(element_count, 0); // we added also same non string keys - - // check each key was present exactly once - struct test_string *str_desc = test_strings; - while (str_desc->str) { - ASSERT_VAL_UINT8(str_desc->counter, 0); - str_desc++; - } - - ALL_SUBTESTS_PASS(); -test_cleanup: - c_rhash_destroy(hash); - return rc; -} - -#define RUN_TEST(fnc) \ -if(fnc()) \ - return 1; - -int main(int argc, char *argv[]) { - RUN_TEST(test_str_uint8); - RUN_TEST(test_uint64_ptr); - RUN_TEST(test_uint64_ptr_incremental); - RUN_TEST(test_increment_str_keys); - // TODO hash with mixed key tests - // TODO iterator test - return 0; -} diff --git a/src/aclk/mqtt_websockets/common_internal.h b/src/aclk/mqtt_websockets/common_internal.h index 2be1c45b8..d79dbb3f3 100644 --- a/src/aclk/mqtt_websockets/common_internal.h +++ b/src/aclk/mqtt_websockets/common_internal.h @@ -1,27 +1,12 @@ -// SPDX-License-Identifier: GPL-3.0-only +// SPDX-License-Identifier: GPL-3.0-or-later #ifndef COMMON_INTERNAL_H #define COMMON_INTERNAL_H #include "endian_compat.h" -#ifdef MQTT_WSS_CUSTOM_ALLOC -#include "../helpers/mqtt_wss_pal.h" -#else -#define mw_malloc(...) malloc(__VA_ARGS__) -#define mw_calloc(...) calloc(__VA_ARGS__) -#define mw_free(...) free(__VA_ARGS__) -#define mw_strdup(...) strdup(__VA_ARGS__) -#define mw_realloc(...) realloc(__VA_ARGS__) -#endif - #ifndef MQTT_WSS_FRAG_MEMALIGN #define MQTT_WSS_FRAG_MEMALIGN (8) #endif -#define OPENSSL_VERSION_095 0x00905100L -#define OPENSSL_VERSION_097 0x00907000L -#define OPENSSL_VERSION_110 0x10100000L -#define OPENSSL_VERSION_111 0x10101000L - #endif /* COMMON_INTERNAL_H */ diff --git a/src/aclk/mqtt_websockets/common_public.h b/src/aclk/mqtt_websockets/common_public.h index a855737f9..8f3b4f7d1 100644 --- a/src/aclk/mqtt_websockets/common_public.h +++ b/src/aclk/mqtt_websockets/common_public.h @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + #ifndef MQTT_WEBSOCKETS_COMMON_PUBLIC_H #define MQTT_WEBSOCKETS_COMMON_PUBLIC_H diff --git a/src/aclk/mqtt_websockets/mqtt_ng.c b/src/aclk/mqtt_websockets/mqtt_ng.c index 8ad6bd5c9..9abe77b5f 100644 --- a/src/aclk/mqtt_websockets/mqtt_ng.c +++ b/src/aclk/mqtt_websockets/mqtt_ng.c @@ -1,35 +1,19 @@ -// Copyright: SPDX-License-Identifier: GPL-3.0-only +// SPDX-License-Identifier: GPL-3.0-or-later #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif -#include -#include -#include -#include -#include - -#include "c_rhash/c_rhash.h" +#include "libnetdata/libnetdata.h" #include "common_internal.h" #include "mqtt_constants.h" -#include "mqtt_wss_log.h" #include "mqtt_ng.h" -#define UNIT_LOG_PREFIX "mqtt_client: " -#define FATAL(fmt, ...) mws_fatal(client->log, UNIT_LOG_PREFIX fmt, ##__VA_ARGS__) -#define ERROR(fmt, ...) mws_error(client->log, UNIT_LOG_PREFIX fmt, ##__VA_ARGS__) -#define WARN(fmt, ...) mws_warn (client->log, UNIT_LOG_PREFIX fmt, ##__VA_ARGS__) -#define INFO(fmt, ...) mws_info (client->log, UNIT_LOG_PREFIX fmt, ##__VA_ARGS__) -#define DEBUG(fmt, ...) mws_debug(client->log, UNIT_LOG_PREFIX fmt, ##__VA_ARGS__) - #define SMALL_STRING_DONT_FRAGMENT_LIMIT 128 -#define MIN(a,b) (((a)<(b))?(a):(b)) - -#define LOCK_HDR_BUFFER(buffer) pthread_mutex_lock(&((buffer)->mutex)) -#define UNLOCK_HDR_BUFFER(buffer) pthread_mutex_unlock(&((buffer)->mutex)) +#define LOCK_HDR_BUFFER(buffer) spinlock_lock(&((buffer)->spinlock)) +#define UNLOCK_HDR_BUFFER(buffer) spinlock_unlock(&((buffer)->spinlock)) #define BUFFER_FRAG_GARBAGE_COLLECT 0x01 // some packets can be marked for garbage collection @@ -75,17 +59,17 @@ struct transaction_buffer { // to be able to revert state easily // in case of error mid processing struct header_buffer state_backup; - pthread_mutex_t mutex; + SPINLOCK spinlock; struct buffer_fragment *sending_frag; }; enum mqtt_client_state { - RAW = 0, - CONNECT_PENDING, - CONNECTING, - CONNECTED, - ERROR, - DISCONNECTED + MQTT_STATE_RAW = 0, + MQTT_STATE_CONNECT_PENDING, + MQTT_STATE_CONNECTING, + MQTT_STATE_CONNECTED, + MQTT_STATE_ERROR, + MQTT_STATE_DISCONNECTED }; enum parser_state { @@ -224,7 +208,7 @@ struct topic_aliases_data { c_rhash stoi_dict; uint32_t idx_max; uint32_t idx_assigned; - pthread_rwlock_t rwlock; + SPINLOCK spinlock; }; struct mqtt_ng_client { @@ -234,8 +218,6 @@ struct mqtt_ng_client { mqtt_msg_data connect_msg; - mqtt_wss_log_ctx_t log; - mqtt_ng_send_fnc_t send_fnc_ptr; void *user_ctx; @@ -253,7 +235,7 @@ struct mqtt_ng_client { unsigned int ping_pending:1; struct mqtt_ng_stats stats; - pthread_mutex_t stats_mutex; + SPINLOCK stats_spinlock; struct topic_aliases_data tx_topic_aliases; c_rhash rx_aliases; @@ -407,7 +389,7 @@ enum memory_mode { CALLER_RESPONSIBLE }; -static inline enum memory_mode ptr2memory_mode(void * ptr) { +static enum memory_mode ptr2memory_mode(void * ptr) { if (ptr == NULL) return MEMCPY; if (ptr == CALLER_RESPONSIBILITY) @@ -492,15 +474,8 @@ static void buffer_rebuild(struct header_buffer *buf) } while(frag); } -static void buffer_garbage_collect(struct header_buffer *buf, mqtt_wss_log_ctx_t log_ctx) +static void buffer_garbage_collect(struct header_buffer *buf) { -#if !defined(MQTT_DEBUG_VERBOSE) && !defined(ADDITIONAL_CHECKS) - (void) log_ctx; -#endif -#ifdef MQTT_DEBUG_VERBOSE - mws_debug(log_ctx, "Buffer Garbage Collection!"); -#endif - struct buffer_fragment *frag = BUFFER_FIRST_FRAG(buf); while (frag) { if (!frag_is_marked_for_gc(frag)) @@ -511,12 +486,8 @@ static void buffer_garbage_collect(struct header_buffer *buf, mqtt_wss_log_ctx_t frag = frag->next; } - if (frag == BUFFER_FIRST_FRAG(buf)) { -#ifdef MQTT_DEBUG_VERBOSE - mws_debug(log_ctx, "Buffer Garbage Collection! No Space Reclaimed!"); -#endif + if (frag == BUFFER_FIRST_FRAG(buf)) return; - } if (!frag) { buf->tail_frag = NULL; @@ -535,21 +506,17 @@ static void buffer_garbage_collect(struct header_buffer *buf, mqtt_wss_log_ctx_t buffer_rebuild(buf); } -static void transaction_buffer_garbage_collect(struct transaction_buffer *buf, mqtt_wss_log_ctx_t log_ctx) +static void transaction_buffer_garbage_collect(struct transaction_buffer *buf) { -#ifdef MQTT_DEBUG_VERBOSE - mws_debug(log_ctx, "Transaction Buffer Garbage Collection! %s", buf->sending_frag == NULL ? "NULL" : "in flight message"); -#endif - // Invalidate the cached sending fragment // as we will move data around if (buf->sending_frag != &ping_frag) buf->sending_frag = NULL; - buffer_garbage_collect(&buf->hdr_buffer, log_ctx); + buffer_garbage_collect(&buf->hdr_buffer); } -static int transaction_buffer_grow(struct transaction_buffer *buf, mqtt_wss_log_ctx_t log_ctx, float rate, size_t max) +static int transaction_buffer_grow(struct transaction_buffer *buf, float rate, size_t max) { if (buf->hdr_buffer.size >= max) return 0; @@ -565,35 +532,30 @@ static int transaction_buffer_grow(struct transaction_buffer *buf, mqtt_wss_log_ void *ret = reallocz(buf->hdr_buffer.data, buf->hdr_buffer.size); if (ret == NULL) { - mws_warn(log_ctx, "Buffer growth failed (realloc)"); + nd_log(NDLS_DAEMON, NDLP_WARNING, "Buffer growth failed (realloc)"); return 1; } - mws_debug(log_ctx, "Message metadata buffer was grown"); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "Message metadata buffer was grown"); buf->hdr_buffer.data = ret; buffer_rebuild(&buf->hdr_buffer); return 0; } -inline static int transaction_buffer_init(struct transaction_buffer *to_init, size_t size) +inline static void transaction_buffer_init(struct transaction_buffer *to_init, size_t size) { - pthread_mutex_init(&to_init->mutex, NULL); + spinlock_init(&to_init->spinlock); to_init->hdr_buffer.size = size; to_init->hdr_buffer.data = mallocz(size); - if (to_init->hdr_buffer.data == NULL) - return 1; - to_init->hdr_buffer.tail = to_init->hdr_buffer.data; to_init->hdr_buffer.tail_frag = NULL; - return 0; } static void transaction_buffer_destroy(struct transaction_buffer *to_init) { buffer_purge(&to_init->hdr_buffer); - pthread_mutex_destroy(&to_init->mutex); freez(to_init->hdr_buffer.data); } @@ -629,54 +591,30 @@ void transaction_buffer_transaction_rollback(struct transaction_buffer *buf, str struct mqtt_ng_client *mqtt_ng_init(struct mqtt_ng_init *settings) { struct mqtt_ng_client *client = callocz(1, sizeof(struct mqtt_ng_client)); - if (client == NULL) - return NULL; - if (transaction_buffer_init(&client->main_buffer, HEADER_BUFFER_SIZE)) - goto err_free_client; + transaction_buffer_init(&client->main_buffer, HEADER_BUFFER_SIZE); client->rx_aliases = RX_ALIASES_INITIALIZE(); - if (client->rx_aliases == NULL) - goto err_free_trx_buf; - if (pthread_mutex_init(&client->stats_mutex, NULL)) - goto err_free_rx_alias; + spinlock_init(&client->stats_spinlock); + spinlock_init(&client->tx_topic_aliases.spinlock); client->tx_topic_aliases.stoi_dict = TX_ALIASES_INITIALIZE(); - if (client->tx_topic_aliases.stoi_dict == NULL) - goto err_free_stats_mutex; client->tx_topic_aliases.idx_max = UINT16_MAX; - if (pthread_rwlock_init(&client->tx_topic_aliases.rwlock, NULL)) - goto err_free_tx_alias; - // TODO just embed the struct into mqtt_ng_client client->parser.received_data = settings->data_in; client->send_fnc_ptr = settings->data_out_fnc; client->user_ctx = settings->user_ctx; - client->log = settings->log; - client->puback_callback = settings->puback_callback; client->connack_callback = settings->connack_callback; client->msg_callback = settings->msg_callback; return client; - -err_free_tx_alias: - c_rhash_destroy(client->tx_topic_aliases.stoi_dict); -err_free_stats_mutex: - pthread_mutex_destroy(&client->stats_mutex); -err_free_rx_alias: - c_rhash_destroy(client->rx_aliases); -err_free_trx_buf: - transaction_buffer_destroy(&client->main_buffer); -err_free_client: - freez(client); - return NULL; } -static inline uint8_t get_control_packet_type(uint8_t first_hdr_byte) +static uint8_t get_control_packet_type(uint8_t first_hdr_byte) { return first_hdr_byte >> 4; } @@ -708,33 +646,27 @@ static void mqtt_ng_destroy_tx_alias_hash(c_rhash hash) void mqtt_ng_destroy(struct mqtt_ng_client *client) { transaction_buffer_destroy(&client->main_buffer); - pthread_mutex_destroy(&client->stats_mutex); mqtt_ng_destroy_tx_alias_hash(client->tx_topic_aliases.stoi_dict); - pthread_rwlock_destroy(&client->tx_topic_aliases.rwlock); mqtt_ng_destroy_rx_alias_hash(client->rx_aliases); freez(client); } -int frag_set_external_data(mqtt_wss_log_ctx_t log, struct buffer_fragment *frag, void *data, size_t data_len, free_fnc_t data_free_fnc) +int frag_set_external_data(struct buffer_fragment *frag, void *data, size_t data_len, free_fnc_t data_free_fnc) { if (frag->len) { // TODO?: This could potentially be done in future if we set rule // external data always follows in buffer data // could help reduce fragmentation in some messages but // currently not worth it considering time is tight - mws_fatal(log, UNIT_LOG_PREFIX "INTERNAL ERROR: Cannot set external data to fragment already containing in buffer data!"); + nd_log(NDLS_DAEMON, NDLP_ERR, "INTERNAL ERROR: Cannot set external data to fragment already containing in buffer data!"); return 1; } switch (ptr2memory_mode(data_free_fnc)) { case MEMCPY: frag->data = mallocz(data_len); - if (frag->data == NULL) { - mws_error(log, UNIT_LOG_PREFIX "OOM while malloc @_optimized_add"); - return 1; - } memcpy(frag->data, data, data_len); break; case EXTERNAL_FREE_AFTER_USE: @@ -816,18 +748,18 @@ static size_t mqtt_ng_connect_size(struct mqtt_auth_properties *auth, #define PACK_2B_INT(buffer, integer, frag) { *(uint16_t *)WRITE_POS(frag) = htobe16((integer)); \ DATA_ADVANCE(buffer, sizeof(uint16_t), frag); } -static int _optimized_add(struct header_buffer *buf, mqtt_wss_log_ctx_t log_ctx, void *data, size_t data_len, free_fnc_t data_free_fnc, struct buffer_fragment **frag) +static int _optimized_add(struct header_buffer *buf, void *data, size_t data_len, free_fnc_t data_free_fnc, struct buffer_fragment **frag) { if (data_len > SMALL_STRING_DONT_FRAGMENT_LIMIT) { buffer_frag_flag_t flags = BUFFER_FRAG_DATA_EXTERNAL; if ((*frag)->flags & BUFFER_FRAG_GARBAGE_COLLECT_ON_SEND) flags |= BUFFER_FRAG_GARBAGE_COLLECT_ON_SEND; if( (*frag = buffer_new_frag(buf, flags)) == NULL ) { - mws_error(log_ctx, "Out of buffer space while generating the message"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Out of buffer space while generating the message"); return 1; } - if (frag_set_external_data(log_ctx, *frag, data, data_len, data_free_fnc)) { - mws_error(log_ctx, "Error adding external data to newly created fragment"); + if (frag_set_external_data(*frag, data, data_len, data_free_fnc)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Error adding external data to newly created fragment"); return 1; } // we dont want to write to this fragment anymore @@ -842,31 +774,30 @@ static int _optimized_add(struct header_buffer *buf, mqtt_wss_log_ctx_t log_ctx, return 0; } -#define TRY_GENERATE_MESSAGE(generator_function, client, ...) \ - int rc = generator_function(&client->main_buffer, client->log, ##__VA_ARGS__); \ +#define TRY_GENERATE_MESSAGE(generator_function, ...) \ + int rc = generator_function(&client->main_buffer, ##__VA_ARGS__); \ if (rc == MQTT_NG_MSGGEN_BUFFER_OOM) { \ LOCK_HDR_BUFFER(&client->main_buffer); \ - transaction_buffer_garbage_collect((&client->main_buffer), client->log); \ + transaction_buffer_garbage_collect((&client->main_buffer)); \ UNLOCK_HDR_BUFFER(&client->main_buffer); \ - rc = generator_function(&client->main_buffer, client->log, ##__VA_ARGS__); \ + rc = generator_function(&client->main_buffer, ##__VA_ARGS__); \ if (rc == MQTT_NG_MSGGEN_BUFFER_OOM && client->max_mem_bytes) { \ LOCK_HDR_BUFFER(&client->main_buffer); \ - transaction_buffer_grow((&client->main_buffer), client->log, GROWTH_FACTOR, client->max_mem_bytes); \ + transaction_buffer_grow((&client->main_buffer),GROWTH_FACTOR, client->max_mem_bytes); \ UNLOCK_HDR_BUFFER(&client->main_buffer); \ - rc = generator_function(&client->main_buffer, client->log, ##__VA_ARGS__); \ + rc = generator_function(&client->main_buffer, ##__VA_ARGS__); \ } \ if (rc == MQTT_NG_MSGGEN_BUFFER_OOM) \ - mws_error(client->log, "%s failed to generate message due to insufficient buffer space (line %d)", __FUNCTION__, __LINE__); \ + nd_log(NDLS_DAEMON, NDLP_ERR, "%s failed to generate message due to insufficient buffer space (line %d)", __FUNCTION__, __LINE__); \ } \ if (rc == MQTT_NG_MSGGEN_OK) { \ - pthread_mutex_lock(&client->stats_mutex); \ + spinlock_lock(&client->stats_spinlock); \ client->stats.tx_messages_queued++; \ - pthread_mutex_unlock(&client->stats_mutex); \ + spinlock_unlock(&client->stats_spinlock); \ } \ return rc; mqtt_msg_data mqtt_ng_generate_connect(struct transaction_buffer *trx_buf, - mqtt_wss_log_ctx_t log_ctx, struct mqtt_auth_properties *auth, struct mqtt_lwt_properties *lwt, uint8_t clean_start, @@ -874,7 +805,7 @@ mqtt_msg_data mqtt_ng_generate_connect(struct transaction_buffer *trx_buf, { // Sanity Checks First (are given parameters correct and up to MQTT spec) if (!auth->client_id) { - mws_error(log_ctx, "ClientID must be set. [MQTT-3.1.3-3]"); + nd_log(NDLS_DAEMON, NDLP_ERR, "ClientID must be set. [MQTT-3.1.3-3]"); return NULL; } @@ -885,29 +816,29 @@ mqtt_msg_data mqtt_ng_generate_connect(struct transaction_buffer *trx_buf, // however server MUST allow ClientIDs between 1-23 bytes [MQTT-3.1.3-5] // so we will warn client server might not like this and he is using it // at his own risk! - mws_warn(log_ctx, "client_id provided is empty string. This might not be allowed by server [MQTT-3.1.3-6]"); + nd_log(NDLS_DAEMON, NDLP_WARNING, "client_id provided is empty string. This might not be allowed by server [MQTT-3.1.3-6]"); } if(len > MQTT_MAX_CLIENT_ID) { // [MQTT-3.1.3-5] server MUST allow client_id length 1-32 // server MAY allow longer client_id, if user provides longer client_id // warn them he is doing so at his own risk! - mws_warn(log_ctx, "client_id provided is longer than 23 bytes, server might not allow that [MQTT-3.1.3-5]"); + nd_log(NDLS_DAEMON, NDLP_WARNING, "client_id provided is longer than 23 bytes, server might not allow that [MQTT-3.1.3-5]"); } if (lwt) { if (lwt->will_message && lwt->will_message_size > 65535) { - mws_error(log_ctx, "Will message cannot be longer than 65535 bytes due to MQTT protocol limitations [MQTT-3.1.3-4] and [MQTT-1.5.6]"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Will message cannot be longer than 65535 bytes due to MQTT protocol limitations [MQTT-3.1.3-4] and [MQTT-1.5.6]"); return NULL; } if (!lwt->will_topic) { //TODO topic given with strlen==0 ? check specs - mws_error(log_ctx, "If will message is given will topic must also be given [MQTT-3.1.3.3]"); + nd_log(NDLS_DAEMON, NDLP_ERR, "If will message is given will topic must also be given [MQTT-3.1.3.3]"); return NULL; } if (lwt->will_qos > MQTT_MAX_QOS) { // refer to [MQTT-3-1.2-12] - mws_error(log_ctx, "QOS for LWT message is bigger than max"); + nd_log(NDLS_DAEMON, NDLP_ERR, "QOS for LWT message is bigger than max"); return NULL; } } @@ -941,8 +872,10 @@ mqtt_msg_data mqtt_ng_generate_connect(struct transaction_buffer *trx_buf, *connect_flags = 0; if (auth->username) *connect_flags |= MQTT_CONNECT_FLAG_USERNAME; + if (auth->password) *connect_flags |= MQTT_CONNECT_FLAG_PASSWORD; + if (lwt) { *connect_flags |= MQTT_CONNECT_FLAG_LWT; *connect_flags |= lwt->will_qos << MQTT_CONNECT_FLAG_QOS_BITSHIFT; @@ -966,7 +899,7 @@ mqtt_msg_data mqtt_ng_generate_connect(struct transaction_buffer *trx_buf, // [MQTT-3.1.3.1] Client identifier CHECK_BYTES_AVAILABLE(&trx_buf->hdr_buffer, 2, goto fail_rollback); PACK_2B_INT(&trx_buf->hdr_buffer, strlen(auth->client_id), frag); - if (_optimized_add(&trx_buf->hdr_buffer, log_ctx, auth->client_id, strlen(auth->client_id), auth->client_id_free, &frag)) + if (_optimized_add(&trx_buf->hdr_buffer, auth->client_id, strlen(auth->client_id), auth->client_id_free, &frag)) goto fail_rollback; if (lwt != NULL) { @@ -980,7 +913,7 @@ mqtt_msg_data mqtt_ng_generate_connect(struct transaction_buffer *trx_buf, // Will Topic [MQTT-3.1.3.3] CHECK_BYTES_AVAILABLE(&trx_buf->hdr_buffer, 2, goto fail_rollback); PACK_2B_INT(&trx_buf->hdr_buffer, strlen(lwt->will_topic), frag); - if (_optimized_add(&trx_buf->hdr_buffer, log_ctx, lwt->will_topic, strlen(lwt->will_topic), lwt->will_topic_free, &frag)) + if (_optimized_add(&trx_buf->hdr_buffer, lwt->will_topic, strlen(lwt->will_topic), lwt->will_topic_free, &frag)) goto fail_rollback; // Will Payload [MQTT-3.1.3.4] @@ -988,7 +921,7 @@ mqtt_msg_data mqtt_ng_generate_connect(struct transaction_buffer *trx_buf, BUFFER_TRANSACTION_NEW_FRAG(&trx_buf->hdr_buffer, 0, frag, goto fail_rollback); CHECK_BYTES_AVAILABLE(&trx_buf->hdr_buffer, 2, goto fail_rollback); PACK_2B_INT(&trx_buf->hdr_buffer, lwt->will_message_size, frag); - if (_optimized_add(&trx_buf->hdr_buffer, log_ctx, lwt->will_message, lwt->will_message_size, lwt->will_topic_free, &frag)) + if (_optimized_add(&trx_buf->hdr_buffer, lwt->will_message, lwt->will_message_size, lwt->will_topic_free, &frag)) goto fail_rollback; } } @@ -998,7 +931,7 @@ mqtt_msg_data mqtt_ng_generate_connect(struct transaction_buffer *trx_buf, BUFFER_TRANSACTION_NEW_FRAG(&trx_buf->hdr_buffer, 0, frag, goto fail_rollback); CHECK_BYTES_AVAILABLE(&trx_buf->hdr_buffer, 2, goto fail_rollback); PACK_2B_INT(&trx_buf->hdr_buffer, strlen(auth->username), frag); - if (_optimized_add(&trx_buf->hdr_buffer, log_ctx, auth->username, strlen(auth->username), auth->username_free, &frag)) + if (_optimized_add(&trx_buf->hdr_buffer, auth->username, strlen(auth->username), auth->username_free, &frag)) goto fail_rollback; } @@ -1007,7 +940,7 @@ mqtt_msg_data mqtt_ng_generate_connect(struct transaction_buffer *trx_buf, BUFFER_TRANSACTION_NEW_FRAG(&trx_buf->hdr_buffer, 0, frag, goto fail_rollback); CHECK_BYTES_AVAILABLE(&trx_buf->hdr_buffer, 2, goto fail_rollback); PACK_2B_INT(&trx_buf->hdr_buffer, strlen(auth->password), frag); - if (_optimized_add(&trx_buf->hdr_buffer, log_ctx, auth->password, strlen(auth->password), auth->password_free, &frag)) + if (_optimized_add(&trx_buf->hdr_buffer, auth->password, strlen(auth->password), auth->password_free, &frag)) goto fail_rollback; } trx_buf->hdr_buffer.tail_frag->flags |= BUFFER_FRAG_MQTT_PACKET_TAIL; @@ -1024,7 +957,7 @@ int mqtt_ng_connect(struct mqtt_ng_client *client, uint8_t clean_start, uint16_t keep_alive) { - client->client_state = RAW; + client->client_state = MQTT_STATE_RAW; client->parser.state = MQTT_PARSE_FIXED_HEADER_PACKET_TYPE; LOCK_HDR_BUFFER(&client->main_buffer); @@ -1033,28 +966,23 @@ int mqtt_ng_connect(struct mqtt_ng_client *client, buffer_purge(&client->main_buffer.hdr_buffer); UNLOCK_HDR_BUFFER(&client->main_buffer); - pthread_rwlock_wrlock(&client->tx_topic_aliases.rwlock); + spinlock_lock(&client->tx_topic_aliases.spinlock); // according to MQTT spec topic aliases should not be persisted // even if clean session is true mqtt_ng_destroy_tx_alias_hash(client->tx_topic_aliases.stoi_dict); + client->tx_topic_aliases.stoi_dict = TX_ALIASES_INITIALIZE(); - if (client->tx_topic_aliases.stoi_dict == NULL) { - pthread_rwlock_unlock(&client->tx_topic_aliases.rwlock); - return 1; - } client->tx_topic_aliases.idx_assigned = 0; - pthread_rwlock_unlock(&client->tx_topic_aliases.rwlock); + spinlock_unlock(&client->tx_topic_aliases.spinlock); mqtt_ng_destroy_rx_alias_hash(client->rx_aliases); client->rx_aliases = RX_ALIASES_INITIALIZE(); - if (client->rx_aliases == NULL) - return 1; - client->connect_msg = mqtt_ng_generate_connect(&client->main_buffer, client->log, auth, lwt, clean_start, keep_alive); + client->connect_msg = mqtt_ng_generate_connect(&client->main_buffer, auth, lwt, clean_start, keep_alive); if (client->connect_msg == NULL) return 1; - pthread_mutex_lock(&client->stats_mutex); + spinlock_lock(&client->stats_spinlock); if (clean_start) client->stats.tx_messages_queued = 1; else @@ -1062,9 +990,9 @@ int mqtt_ng_connect(struct mqtt_ng_client *client, client->stats.tx_messages_sent = 0; client->stats.rx_messages_rcvd = 0; - pthread_mutex_unlock(&client->stats_mutex); + spinlock_unlock(&client->stats_spinlock); - client->client_state = CONNECT_PENDING; + client->client_state = MQTT_STATE_CONNECT_PENDING; return 0; } @@ -1074,15 +1002,16 @@ uint16_t get_unused_packet_id() { return packet_id ? packet_id : ++packet_id; } -static inline size_t mqtt_ng_publish_size(const char *topic, - size_t msg_len, - uint16_t topic_id) +static size_t mqtt_ng_publish_size( + const char *topic, + size_t msg_len, + uint16_t topic_id) { - size_t retval = 2 /* Topic Name Length */ - + (topic == NULL ? 0 : strlen(topic)) - + 2 /* Packet identifier */ - + 1 /* Properties Length TODO for now fixed to 1 property */ - + msg_len; + size_t retval = 2 + + (topic == NULL ? 0 : strlen(topic)) /* Topic Name Length */ + + 2 /* Packet identifier */ + + 1 /* Properties Length for now fixed to 1 property */ + + msg_len; if (topic_id) retval += 3; @@ -1091,7 +1020,6 @@ static inline size_t mqtt_ng_publish_size(const char *topic, } int mqtt_ng_generate_publish(struct transaction_buffer *trx_buf, - mqtt_wss_log_ctx_t log_ctx, char *topic, free_fnc_t topic_free, void *msg, @@ -1130,7 +1058,7 @@ int mqtt_ng_generate_publish(struct transaction_buffer *trx_buf, // [MQTT-3.3.2.1] PACK_2B_INT(&trx_buf->hdr_buffer, topic == NULL ? 0 : strlen(topic), frag); if (topic != NULL) { - if (_optimized_add(&trx_buf->hdr_buffer, log_ctx, topic, strlen(topic), topic_free, &frag)) + if (_optimized_add(&trx_buf->hdr_buffer, topic, strlen(topic), topic_free, &frag)) goto fail_rollback; BUFFER_TRANSACTION_NEW_FRAG(&trx_buf->hdr_buffer, 0, frag, goto fail_rollback); } @@ -1154,7 +1082,7 @@ int mqtt_ng_generate_publish(struct transaction_buffer *trx_buf, if( (frag = buffer_new_frag(&trx_buf->hdr_buffer, BUFFER_FRAG_DATA_EXTERNAL)) == NULL ) goto fail_rollback; - if (frag_set_external_data(log_ctx, frag, msg, msg_len, msg_free)) + if (frag_set_external_data(frag, msg, msg_len, msg_free)) goto fail_rollback; trx_buf->hdr_buffer.tail_frag->flags |= BUFFER_FRAG_MQTT_PACKET_TAIL; @@ -1178,9 +1106,9 @@ int mqtt_ng_publish(struct mqtt_ng_client *client, uint16_t *packet_id) { struct topic_alias_data *alias = NULL; - pthread_rwlock_rdlock(&client->tx_topic_aliases.rwlock); + spinlock_lock(&client->tx_topic_aliases.spinlock); c_rhash_get_ptr_by_str(client->tx_topic_aliases.stoi_dict, topic, (void**)&alias); - pthread_rwlock_unlock(&client->tx_topic_aliases.rwlock); + spinlock_unlock(&client->tx_topic_aliases.spinlock); uint16_t topic_id = 0; @@ -1194,14 +1122,14 @@ int mqtt_ng_publish(struct mqtt_ng_client *client, } if (client->max_msg_size && PUBLISH_SP_SIZE + mqtt_ng_publish_size(topic, msg_len, topic_id) > client->max_msg_size) { - mws_error(client->log, "Message too big for server: %zu", msg_len); + nd_log(NDLS_DAEMON, NDLP_ERR, "Message too big for server: %zu", msg_len); return MQTT_NG_MSGGEN_MSG_TOO_BIG; } - TRY_GENERATE_MESSAGE(mqtt_ng_generate_publish, client, topic, topic_free, msg, msg_free, msg_len, publish_flags, packet_id, topic_id); + TRY_GENERATE_MESSAGE(mqtt_ng_generate_publish, topic, topic_free, msg, msg_free, msg_len, publish_flags, packet_id, topic_id); } -static inline size_t mqtt_ng_subscribe_size(struct mqtt_sub *subs, size_t sub_count) +static size_t mqtt_ng_subscribe_size(struct mqtt_sub *subs, size_t sub_count) { size_t len = 2 /* Packet Identifier */ + 1 /* Properties Length TODO for now fixed 0 */; len += sub_count * (2 /* topic filter string length */ + 1 /* [MQTT-3.8.3.1] Subscription Options Byte */); @@ -1212,7 +1140,7 @@ static inline size_t mqtt_ng_subscribe_size(struct mqtt_sub *subs, size_t sub_co return len; } -int mqtt_ng_generate_subscribe(struct transaction_buffer *trx_buf, mqtt_wss_log_ctx_t log_ctx, struct mqtt_sub *subs, size_t sub_count) +int mqtt_ng_generate_subscribe(struct transaction_buffer *trx_buf, struct mqtt_sub *subs, size_t sub_count) { // >> START THE RODEO << transaction_buffer_transaction_start(trx_buf); @@ -1247,7 +1175,7 @@ int mqtt_ng_generate_subscribe(struct transaction_buffer *trx_buf, mqtt_wss_log_ for (size_t i = 0; i < sub_count; i++) { BUFFER_TRANSACTION_NEW_FRAG(&trx_buf->hdr_buffer, 0, frag, goto fail_rollback); PACK_2B_INT(&trx_buf->hdr_buffer, strlen(subs[i].topic), frag); - if (_optimized_add(&trx_buf->hdr_buffer, log_ctx, subs[i].topic, strlen(subs[i].topic), subs[i].topic_free, &frag)) + if (_optimized_add(&trx_buf->hdr_buffer, subs[i].topic, strlen(subs[i].topic), subs[i].topic_free, &frag)) goto fail_rollback; BUFFER_TRANSACTION_NEW_FRAG(&trx_buf->hdr_buffer, 0, frag, goto fail_rollback); *WRITE_POS(frag) = subs[i].options; @@ -1264,12 +1192,11 @@ fail_rollback: int mqtt_ng_subscribe(struct mqtt_ng_client *client, struct mqtt_sub *subs, size_t sub_count) { - TRY_GENERATE_MESSAGE(mqtt_ng_generate_subscribe, client, subs, sub_count); + TRY_GENERATE_MESSAGE(mqtt_ng_generate_subscribe, subs, sub_count); } -int mqtt_ng_generate_disconnect(struct transaction_buffer *trx_buf, mqtt_wss_log_ctx_t log_ctx, uint8_t reason_code) +int mqtt_ng_generate_disconnect(struct transaction_buffer *trx_buf, uint8_t reason_code) { - (void) log_ctx; // >> START THE RODEO << transaction_buffer_transaction_start(trx_buf); @@ -1308,12 +1235,11 @@ fail_rollback: int mqtt_ng_disconnect(struct mqtt_ng_client *client, uint8_t reason_code) { - TRY_GENERATE_MESSAGE(mqtt_ng_generate_disconnect, client, reason_code); + TRY_GENERATE_MESSAGE(mqtt_ng_generate_disconnect, reason_code); } -static int mqtt_generate_puback(struct transaction_buffer *trx_buf, mqtt_wss_log_ctx_t log_ctx, uint16_t packet_id, uint8_t reason_code) +static int mqtt_generate_puback(struct transaction_buffer *trx_buf, uint16_t packet_id, uint8_t reason_code) { - (void) log_ctx; // >> START THE RODEO << transaction_buffer_transaction_start(trx_buf); @@ -1353,7 +1279,7 @@ fail_rollback: static int mqtt_ng_puback(struct mqtt_ng_client *client, uint16_t packet_id, uint8_t reason_code) { - TRY_GENERATE_MESSAGE(mqtt_generate_puback, client, packet_id, reason_code); + TRY_GENERATE_MESSAGE(mqtt_generate_puback, packet_id, reason_code); } int mqtt_ng_ping(struct mqtt_ng_client *client) @@ -1370,7 +1296,6 @@ int mqtt_ng_ping(struct mqtt_ng_client *client) #define MQTT_NG_CLIENT_PROTOCOL_ERROR -1 #define MQTT_NG_CLIENT_SERVER_RETURNED_ERROR -2 #define MQTT_NG_CLIENT_NOT_IMPL_YET -3 -#define MQTT_NG_CLIENT_OOM -4 #define MQTT_NG_CLIENT_INTERNAL_ERROR -5 #define BUF_READ_CHECK_AT_LEAST(buf, x) \ @@ -1379,10 +1304,10 @@ int mqtt_ng_ping(struct mqtt_ng_client *client) #define vbi_parser_reset_ctx(ctx) memset(ctx, 0, sizeof(struct mqtt_vbi_parser_ctx)) -static int vbi_parser_parse(struct mqtt_vbi_parser_ctx *ctx, rbuf_t data, mqtt_wss_log_ctx_t log) +static int vbi_parser_parse(struct mqtt_vbi_parser_ctx *ctx, rbuf_t data) { if (ctx->bytes > MQTT_VBI_MAXBYTES - 1) { - mws_error(log, "MQTT Variable Byte Integer can't be longer than %d bytes", MQTT_VBI_MAXBYTES); + nd_log(NDLS_DAEMON, NDLP_ERR, "MQTT Variable Byte Integer can't be longer than %d bytes", MQTT_VBI_MAXBYTES); return MQTT_NG_CLIENT_PROTOCOL_ERROR; } if (!ctx->bytes || ctx->data[ctx->bytes-1] & MQTT_VBI_CONTINUATION_FLAG) { @@ -1394,7 +1319,7 @@ static int vbi_parser_parse(struct mqtt_vbi_parser_ctx *ctx, rbuf_t data, mqtt_w } if (mqtt_vbi_to_uint32(ctx->data, &ctx->result)) { - mws_error(log, "MQTT Variable Byte Integer failed to be parsed."); + nd_log(NDLS_DAEMON, NDLP_ERR, "MQTT Variable Byte Integer failed to be parsed."); return MQTT_NG_CLIENT_PROTOCOL_ERROR; } @@ -1480,12 +1405,12 @@ struct mqtt_property *get_property_by_id(struct mqtt_property *props, uint8_t pr } // Parses [MQTT-2.2.2] -static int parse_properties_array(struct mqtt_properties_parser_ctx *ctx, rbuf_t data, mqtt_wss_log_ctx_t log) +static int parse_properties_array(struct mqtt_properties_parser_ctx *ctx, rbuf_t data) { int rc; switch (ctx->state) { case PROPERTIES_LENGTH: - rc = vbi_parser_parse(&ctx->vbi_parser_ctx, data, log); + rc = vbi_parser_parse(&ctx->vbi_parser_ctx, data); if (rc == MQTT_NG_CLIENT_PARSE_DONE) { ctx->properties_length = ctx->vbi_parser_ctx.result; ctx->bytes_consumed += ctx->vbi_parser_ctx.bytes; @@ -1534,7 +1459,7 @@ static int parse_properties_array(struct mqtt_properties_parser_ctx *ctx, rbuf_t ctx->state = PROPERTY_TYPE_STR_BIN_LEN; break; default: - mws_error(log, "Unsupported property type %d for property id %d.", (int)ctx->tail->type, (int)ctx->tail->id); + nd_log(NDLS_DAEMON, NDLP_ERR, "Unsupported property type %d for property id %d.", (int)ctx->tail->type, (int)ctx->tail->id); return MQTT_NG_CLIENT_PROTOCOL_ERROR; } break; @@ -1552,7 +1477,7 @@ static int parse_properties_array(struct mqtt_properties_parser_ctx *ctx, rbuf_t ctx->state = PROPERTY_TYPE_STR; break; default: - mws_error(log, "Unexpected datatype in PROPERTY_TYPE_STR_BIN_LEN %d", (int)ctx->tail->type); + nd_log(NDLS_DAEMON, NDLP_ERR, "Unexpected datatype in PROPERTY_TYPE_STR_BIN_LEN %d", (int)ctx->tail->type); return MQTT_NG_CLIENT_INTERNAL_ERROR; } break; @@ -1577,7 +1502,7 @@ static int parse_properties_array(struct mqtt_properties_parser_ctx *ctx, rbuf_t ctx->state = PROPERTY_NEXT; break; case PROPERTY_TYPE_VBI: - rc = vbi_parser_parse(&ctx->vbi_parser_ctx, data, log); + rc = vbi_parser_parse(&ctx->vbi_parser_ctx, data); if (rc == MQTT_NG_CLIENT_PARSE_DONE) { ctx->tail->data.uint32 = ctx->vbi_parser_ctx.result; ctx->bytes_consumed += ctx->vbi_parser_ctx.bytes; @@ -1627,9 +1552,9 @@ static int parse_connack_varhdr(struct mqtt_ng_client *client) mqtt_properties_parser_ctx_reset(&parser->properties_parser); break; case MQTT_PARSE_VARHDR_PROPS: - return parse_properties_array(&parser->properties_parser, parser->received_data, client->log); + return parse_properties_array(&parser->properties_parser, parser->received_data); default: - ERROR("invalid state for connack varhdr parser"); + nd_log(NDLS_DAEMON, NDLP_ERR, "invalid state for connack varhdr parser"); return MQTT_NG_CLIENT_INTERNAL_ERROR; } return MQTT_NG_CLIENT_OK_CALL_AGAIN; @@ -1653,9 +1578,9 @@ static int parse_disconnect_varhdr(struct mqtt_ng_client *client) mqtt_properties_parser_ctx_reset(&parser->properties_parser); break; case MQTT_PARSE_VARHDR_PROPS: - return parse_properties_array(&parser->properties_parser, parser->received_data, client->log); + return parse_properties_array(&parser->properties_parser, parser->received_data); default: - ERROR("invalid state for connack varhdr parser"); + nd_log(NDLS_DAEMON, NDLP_ERR, "invalid state for connack varhdr parser"); return MQTT_NG_CLIENT_INTERNAL_ERROR; } return MQTT_NG_CLIENT_OK_CALL_AGAIN; @@ -1691,9 +1616,9 @@ static int parse_puback_varhdr(struct mqtt_ng_client *client) mqtt_properties_parser_ctx_reset(&parser->properties_parser); /* FALLTHROUGH */ case MQTT_PARSE_VARHDR_PROPS: - return parse_properties_array(&parser->properties_parser, parser->received_data, client->log); + return parse_properties_array(&parser->properties_parser, parser->received_data); default: - ERROR("invalid state for puback varhdr parser"); + nd_log(NDLS_DAEMON, NDLP_ERR, "invalid state for puback varhdr parser"); return MQTT_NG_CLIENT_INTERNAL_ERROR; } return MQTT_NG_CLIENT_OK_CALL_AGAIN; @@ -1716,7 +1641,7 @@ static int parse_suback_varhdr(struct mqtt_ng_client *client) mqtt_properties_parser_ctx_reset(&parser->properties_parser); /* FALLTHROUGH */ case MQTT_PARSE_VARHDR_PROPS: - rc = parse_properties_array(&parser->properties_parser, parser->received_data, client->log); + rc = parse_properties_array(&parser->properties_parser, parser->received_data); if (rc != MQTT_NG_CLIENT_PARSE_DONE) return rc; parser->mqtt_parsed_len += parser->properties_parser.bytes_consumed; @@ -1737,7 +1662,7 @@ static int parse_suback_varhdr(struct mqtt_ng_client *client) return MQTT_NG_CLIENT_NEED_MORE_BYTES; default: - ERROR("invalid state for suback varhdr parser"); + nd_log(NDLS_DAEMON, NDLP_ERR, "invalid state for suback varhdr parser"); return MQTT_NG_CLIENT_INTERNAL_ERROR; } return MQTT_NG_CLIENT_OK_CALL_AGAIN; @@ -1761,8 +1686,6 @@ static int parse_publish_varhdr(struct mqtt_ng_client *client) break; } publish->topic = callocz(1, publish->topic_len + 1 /* add 0x00 */); - if (publish->topic == NULL) - return MQTT_NG_CLIENT_OOM; parser->varhdr_state = MQTT_PARSE_VARHDR_TOPICNAME; /* FALLTHROUGH */ case MQTT_PARSE_VARHDR_TOPICNAME: @@ -1788,7 +1711,7 @@ static int parse_publish_varhdr(struct mqtt_ng_client *client) parser->mqtt_parsed_len += 2; /* FALLTHROUGH */ case MQTT_PARSE_VARHDR_PROPS: - rc = parse_properties_array(&parser->properties_parser, parser->received_data, client->log); + rc = parse_properties_array(&parser->properties_parser, parser->received_data); if (rc != MQTT_NG_CLIENT_PARSE_DONE) return rc; parser->mqtt_parsed_len += parser->properties_parser.bytes_consumed; @@ -1798,7 +1721,7 @@ static int parse_publish_varhdr(struct mqtt_ng_client *client) if (parser->mqtt_fixed_hdr_remaining_length < parser->mqtt_parsed_len) { freez(publish->topic); publish->topic = NULL; - ERROR("Error parsing PUBLISH message"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Error parsing PUBLISH message"); return MQTT_NG_CLIENT_PROTOCOL_ERROR; } publish->data_len = parser->mqtt_fixed_hdr_remaining_length - parser->mqtt_parsed_len; @@ -1809,18 +1732,12 @@ static int parse_publish_varhdr(struct mqtt_ng_client *client) BUF_READ_CHECK_AT_LEAST(parser->received_data, publish->data_len); publish->data = mallocz(publish->data_len); - if (publish->data == NULL) { - freez(publish->topic); - publish->topic = NULL; - return MQTT_NG_CLIENT_OOM; - } - rbuf_pop(parser->received_data, publish->data, publish->data_len); parser->mqtt_parsed_len += publish->data_len; return MQTT_NG_CLIENT_PARSE_DONE; default: - ERROR("invalid state for publish varhdr parser"); + nd_log(NDLS_DAEMON, NDLP_ERR, "invalid state for publish varhdr parser"); return MQTT_NG_CLIENT_INTERNAL_ERROR; } return MQTT_NG_CLIENT_OK_CALL_AGAIN; @@ -1840,7 +1757,7 @@ static int parse_data(struct mqtt_ng_client *client) parser->state = MQTT_PARSE_FIXED_HEADER_LEN; break; case MQTT_PARSE_FIXED_HEADER_LEN: - rc = vbi_parser_parse(&parser->vbi_parser, parser->received_data, client->log); + rc = vbi_parser_parse(&parser->vbi_parser, parser->received_data); if (rc == MQTT_NG_CLIENT_PARSE_DONE) { parser->mqtt_fixed_hdr_remaining_length = parser->vbi_parser.result; parser->state = MQTT_PARSE_VARIABLE_HEADER; @@ -1883,10 +1800,11 @@ static int parse_data(struct mqtt_ng_client *client) return rc; case MQTT_CPT_PINGRESP: if (parser->mqtt_fixed_hdr_remaining_length) { - ERROR ("PINGRESP has to be 0 Remaining Length."); // [MQTT-3.13.1] + nd_log(NDLS_DAEMON, NDLP_ERR, "PINGRESP has to be 0 Remaining Length."); // [MQTT-3.13.1] return MQTT_NG_CLIENT_PROTOCOL_ERROR; } parser->state = MQTT_PARSE_MQTT_PACKET_DONE; + ping_timeout = 0; break; case MQTT_CPT_DISCONNECT: rc = parse_disconnect_varhdr(client); @@ -1896,7 +1814,7 @@ static int parse_data(struct mqtt_ng_client *client) } return rc; default: - ERROR("Parsing Control Packet Type %" PRIu8 " not implemented yet.", get_control_packet_type(parser->mqtt_control_packet_type)); + nd_log(NDLS_DAEMON, NDLP_ERR, "Parsing Control Packet Type %" PRIu8 " not implemented yet.", get_control_packet_type(parser->mqtt_control_packet_type)); rbuf_bump_tail(parser->received_data, parser->mqtt_fixed_hdr_remaining_length); parser->state = MQTT_PARSE_MQTT_PACKET_DONE; return MQTT_NG_CLIENT_NOT_IMPL_YET; @@ -1916,12 +1834,12 @@ static int parse_data(struct mqtt_ng_client *client) // return -1 on error // return 0 if there is fragment set static int mqtt_ng_next_to_send(struct mqtt_ng_client *client) { - if (client->client_state == CONNECT_PENDING) { + if (client->client_state == MQTT_STATE_CONNECT_PENDING) { client->main_buffer.sending_frag = client->connect_msg; - client->client_state = CONNECTING; + client->client_state = MQTT_STATE_CONNECTING; return 0; } - if (client->client_state != CONNECTED) + if (client->client_state != MQTT_STATE_CONNECTED) return -1; struct buffer_fragment *frag = BUFFER_FIRST_FRAG(&client->main_buffer.hdr_buffer); @@ -1959,7 +1877,7 @@ static int send_fragment(struct mqtt_ng_client *client) { if (bytes) processed = client->send_fnc_ptr(client->user_ctx, ptr, bytes); else - WARN("This fragment was fully sent already. This should not happen!"); + nd_log(NDLS_DAEMON, NDLP_WARNING, "This fragment was fully sent already. This should not happen!"); frag->sent += processed; if (frag->sent != frag->len) @@ -1967,11 +1885,11 @@ static int send_fragment(struct mqtt_ng_client *client) { if (frag->flags & BUFFER_FRAG_MQTT_PACKET_TAIL) { client->time_of_last_send = time(NULL); - pthread_mutex_lock(&client->stats_mutex); + spinlock_lock(&client->stats_spinlock); if (client->main_buffer.sending_frag != &ping_frag) client->stats.tx_messages_queued--; client->stats.tx_messages_sent++; - pthread_mutex_unlock(&client->stats_mutex); + spinlock_unlock(&client->stats_spinlock); client->main_buffer.sending_frag = NULL; return 1; } @@ -1995,7 +1913,7 @@ static void try_send_all(struct mqtt_ng_client *client) { } while(send_all_message_fragments(client) >= 0); } -static inline void mark_message_for_gc(struct buffer_fragment *frag) +static void mark_message_for_gc(struct buffer_fragment *frag) { while (frag) { frag->flags |= BUFFER_FRAG_GARBAGE_COLLECT; @@ -2013,7 +1931,7 @@ static int mark_packet_acked(struct mqtt_ng_client *client, uint16_t packet_id) while (frag) { if ( (frag->flags & BUFFER_FRAG_MQTT_PACKET_HEAD) && frag->packet_id == packet_id) { if (!frag->sent) { - ERROR("Received packet_id (%" PRIu16 ") belongs to MQTT packet which was not yet sent!", packet_id); + nd_log(NDLS_DAEMON, NDLP_ERR, "Received packet_id (%" PRIu16 ") belongs to MQTT packet which was not yet sent!", packet_id); UNLOCK_HDR_BUFFER(&client->main_buffer); return 1; } @@ -2023,7 +1941,7 @@ static int mark_packet_acked(struct mqtt_ng_client *client, uint16_t packet_id) } frag = frag->next; } - ERROR("Received packet_id (%" PRIu16 ") is unknown!", packet_id); + nd_log(NDLS_DAEMON, NDLP_ERR, "Received packet_id (%" PRIu16 ") is unknown!", packet_id); UNLOCK_HDR_BUFFER(&client->main_buffer); return 1; } @@ -2031,110 +1949,113 @@ static int mark_packet_acked(struct mqtt_ng_client *client, uint16_t packet_id) int handle_incoming_traffic(struct mqtt_ng_client *client) { int rc; + while ((rc = parse_data(client)) == MQTT_NG_CLIENT_OK_CALL_AGAIN) { + ; + } + if (rc != MQTT_NG_CLIENT_MQTT_PACKET_DONE) + return rc; + struct mqtt_publish *pub; - while( (rc = parse_data(client)) == MQTT_NG_CLIENT_OK_CALL_AGAIN ); - if ( rc == MQTT_NG_CLIENT_MQTT_PACKET_DONE ) { - struct mqtt_property *prop; -#ifdef MQTT_DEBUG_VERBOSE - DEBUG("MQTT Packet Parsed Successfully!"); -#endif - pthread_mutex_lock(&client->stats_mutex); - client->stats.rx_messages_rcvd++; - pthread_mutex_unlock(&client->stats_mutex); - - switch (get_control_packet_type(client->parser.mqtt_control_packet_type)) { - case MQTT_CPT_CONNACK: -#ifdef MQTT_DEBUG_VERBOSE - DEBUG("Received CONNACK"); -#endif - LOCK_HDR_BUFFER(&client->main_buffer); - mark_message_for_gc(client->connect_msg); - UNLOCK_HDR_BUFFER(&client->main_buffer); - client->connect_msg = NULL; - if (client->client_state != CONNECTING) { - ERROR("Received unexpected CONNACK"); - client->client_state = ERROR; - return MQTT_NG_CLIENT_PROTOCOL_ERROR; - } - if ((prop = get_property_by_id(client->parser.properties_parser.head, MQTT_PROP_MAX_PKT_SIZE)) != NULL) { - INFO("MQTT server limits message size to %" PRIu32, prop->data.uint32); - client->max_msg_size = prop->data.uint32; - } - if (client->connack_callback) - client->connack_callback(client->user_ctx, client->parser.mqtt_packet.connack.reason_code); - if (!client->parser.mqtt_packet.connack.reason_code) { - INFO("MQTT Connection Accepted By Server"); - client->client_state = CONNECTED; - break; - } - client->client_state = ERROR; - return MQTT_NG_CLIENT_SERVER_RETURNED_ERROR; - case MQTT_CPT_PUBACK: -#ifdef MQTT_DEBUG_VERBOSE - DEBUG("Received PUBACK %" PRIu16, client->parser.mqtt_packet.puback.packet_id); -#endif - if (mark_packet_acked(client, client->parser.mqtt_packet.puback.packet_id)) - return MQTT_NG_CLIENT_PROTOCOL_ERROR; - if (client->puback_callback) - client->puback_callback(client->parser.mqtt_packet.puback.packet_id); - break; - case MQTT_CPT_PINGRESP: -#ifdef MQTT_DEBUG_VERBOSE - DEBUG("Received PINGRESP"); -#endif - break; - case MQTT_CPT_SUBACK: -#ifdef MQTT_DEBUG_VERBOSE - DEBUG("Received SUBACK %" PRIu16, client->parser.mqtt_packet.suback.packet_id); -#endif - if (mark_packet_acked(client, client->parser.mqtt_packet.suback.packet_id)) - return MQTT_NG_CLIENT_PROTOCOL_ERROR; + struct mqtt_property *prop; + spinlock_lock(&client->stats_spinlock); + client->stats.rx_messages_rcvd++; + spinlock_unlock(&client->stats_spinlock); + + uint8_t ctrl_packet_type = get_control_packet_type(client->parser.mqtt_control_packet_type); + switch (ctrl_packet_type) { + case MQTT_CPT_CONNACK: + LOCK_HDR_BUFFER(&client->main_buffer); + mark_message_for_gc(client->connect_msg); + UNLOCK_HDR_BUFFER(&client->main_buffer); + + client->connect_msg = NULL; + + if (client->client_state != MQTT_STATE_CONNECTING) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Received unexpected CONNACK"); + client->client_state = MQTT_STATE_ERROR; + return MQTT_NG_CLIENT_PROTOCOL_ERROR; + } + + if ((prop = get_property_by_id(client->parser.properties_parser.head, MQTT_PROP_MAX_PKT_SIZE)) != NULL) { + nd_log(NDLS_DAEMON, NDLP_INFO, "MQTT server limits message size to %" PRIu32, prop->data.uint32); + client->max_msg_size = prop->data.uint32; + } + + if (client->connack_callback) + client->connack_callback(client->user_ctx, client->parser.mqtt_packet.connack.reason_code); + if (!client->parser.mqtt_packet.connack.reason_code) { + nd_log(NDLS_DAEMON, NDLP_INFO, "MQTT Connection Accepted By Server"); + client->client_state = MQTT_STATE_CONNECTED; break; - case MQTT_CPT_PUBLISH: -#ifdef MQTT_DEBUG_VERBOSE - DEBUG("Recevied PUBLISH"); -#endif - pub = &client->parser.mqtt_packet.publish; - if (pub->qos > 1) { - freez(pub->topic); - freez(pub->data); - return MQTT_NG_CLIENT_NOT_IMPL_YET; - } - if ( pub->qos == 1 && (rc = mqtt_ng_puback(client, pub->packet_id, 0)) ) { - client->client_state = ERROR; - ERROR("Error generating PUBACK reply for PUBLISH"); - return rc; - } - if ( (prop = get_property_by_id(client->parser.properties_parser.head, MQTT_PROP_TOPIC_ALIAS)) != NULL ) { - // Topic Alias property was sent from server - void *topic_ptr; - if (!c_rhash_get_ptr_by_uint64(client->rx_aliases, prop->data.uint8, &topic_ptr)) { - if (pub->topic != NULL) { - ERROR("We do not yet support topic alias reassignment"); - return MQTT_NG_CLIENT_NOT_IMPL_YET; - } - pub->topic = topic_ptr; - } else { - if (pub->topic == NULL) { - ERROR("Topic alias with id %d unknown and topic not set by server!", prop->data.uint8); - return MQTT_NG_CLIENT_PROTOCOL_ERROR; - } - c_rhash_insert_uint64_ptr(client->rx_aliases, prop->data.uint8, pub->topic); + } + client->client_state = MQTT_STATE_ERROR; + return MQTT_NG_CLIENT_SERVER_RETURNED_ERROR; + + case MQTT_CPT_PUBACK: + if (mark_packet_acked(client, client->parser.mqtt_packet.puback.packet_id)) + return MQTT_NG_CLIENT_PROTOCOL_ERROR; + if (client->puback_callback) + client->puback_callback(client->parser.mqtt_packet.puback.packet_id); + break; + + case MQTT_CPT_PINGRESP: + break; + + case MQTT_CPT_SUBACK: + if (mark_packet_acked(client, client->parser.mqtt_packet.suback.packet_id)) + return MQTT_NG_CLIENT_PROTOCOL_ERROR; + break; + + case MQTT_CPT_PUBLISH: + pub = &client->parser.mqtt_packet.publish; + + if (pub->qos > 1) { + freez(pub->topic); + freez(pub->data); + return MQTT_NG_CLIENT_NOT_IMPL_YET; + } + + if ( pub->qos == 1 && ((rc = mqtt_ng_puback(client, pub->packet_id, 0))) ) { + client->client_state = MQTT_STATE_ERROR; + nd_log(NDLS_DAEMON, NDLP_ERR, "Error generating PUBACK reply for PUBLISH"); + return rc; + } + + if ( (prop = get_property_by_id(client->parser.properties_parser.head, MQTT_PROP_TOPIC_ALIAS)) != NULL ) { + // Topic Alias property was sent from server + void *topic_ptr; + if (!c_rhash_get_ptr_by_uint64(client->rx_aliases, prop->data.uint8, &topic_ptr)) { + if (pub->topic != NULL) { + nd_log(NDLS_DAEMON, NDLP_ERR, "We do not yet support topic alias reassignment"); + return MQTT_NG_CLIENT_NOT_IMPL_YET; } + pub->topic = topic_ptr; + } else { + if (pub->topic == NULL) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Topic alias with id %d unknown and topic not set by server!", prop->data.uint8); + return MQTT_NG_CLIENT_PROTOCOL_ERROR; + } + c_rhash_insert_uint64_ptr(client->rx_aliases, prop->data.uint8, pub->topic); } - if (client->msg_callback) - client->msg_callback(pub->topic, pub->data, pub->data_len, pub->qos); - // in case we have property topic alias and we have topic we take over the string - // and add pointer to it into topic alias list - if (prop == NULL) - freez(pub->topic); - freez(pub->data); - return MQTT_NG_CLIENT_WANT_WRITE; - case MQTT_CPT_DISCONNECT: - INFO ("Got MQTT DISCONNECT control packet from server. Reason code: %d", (int)client->parser.mqtt_packet.disconnect.reason_code); - client->client_state = DISCONNECTED; - break; - } + } + + if (client->msg_callback) + client->msg_callback(pub->topic, pub->data, pub->data_len, pub->qos); + // in case we have property topic alias and we have topic we take over the string + // and add pointer to it into topic alias list + if (prop == NULL) + freez(pub->topic); + freez(pub->data); + return MQTT_NG_CLIENT_WANT_WRITE; + + case MQTT_CPT_DISCONNECT: + nd_log(NDLS_DAEMON, NDLP_INFO, "Got MQTT DISCONNECT control packet from server. Reason code: %d", (int)client->parser.mqtt_packet.disconnect.reason_code); + client->client_state = MQTT_STATE_DISCONNECTED; + break; + + default: + nd_log(NDLS_DAEMON, NDLP_INFO, "Got unknown control packet %u from server", ctrl_packet_type); + break; } return rc; @@ -2142,10 +2063,10 @@ int handle_incoming_traffic(struct mqtt_ng_client *client) int mqtt_ng_sync(struct mqtt_ng_client *client) { - if (client->client_state == RAW || client->client_state == DISCONNECTED) + if (client->client_state == MQTT_STATE_RAW || client->client_state == MQTT_STATE_DISCONNECTED) return 0; - if (client->client_state == ERROR) + if (client->client_state == MQTT_STATE_ERROR) return 1; LOCK_HDR_BUFFER(&client->main_buffer); @@ -2182,9 +2103,9 @@ void mqtt_ng_set_max_mem(struct mqtt_ng_client *client, size_t bytes) void mqtt_ng_get_stats(struct mqtt_ng_client *client, struct mqtt_ng_stats *stats) { - pthread_mutex_lock(&client->stats_mutex); + spinlock_lock(&client->stats_spinlock); memcpy(stats, &client->stats, sizeof(struct mqtt_ng_stats)); - pthread_mutex_unlock(&client->stats_mutex); + spinlock_unlock(&client->stats_spinlock); stats->tx_bytes_queued = 0; stats->tx_buffer_reclaimable = 0; @@ -2207,11 +2128,11 @@ void mqtt_ng_get_stats(struct mqtt_ng_client *client, struct mqtt_ng_stats *stat int mqtt_ng_set_topic_alias(struct mqtt_ng_client *client, const char *topic) { uint16_t idx; - pthread_rwlock_wrlock(&client->tx_topic_aliases.rwlock); + spinlock_lock(&client->tx_topic_aliases.spinlock); if (client->tx_topic_aliases.idx_assigned >= client->tx_topic_aliases.idx_max) { - pthread_rwlock_unlock(&client->tx_topic_aliases.rwlock); - mws_error(client->log, "Tx topic alias indexes were exhausted (current version of the library doesn't support reassigning yet. Feel free to contribute."); + spinlock_unlock(&client->tx_topic_aliases.spinlock); + nd_log(NDLS_DAEMON, NDLP_ERR, "Tx topic alias indexes were exhausted (current version of the library doesn't support reassigning yet. Feel free to contribute."); return 0; //0 is not a valid topic alias } @@ -2220,8 +2141,8 @@ int mqtt_ng_set_topic_alias(struct mqtt_ng_client *client, const char *topic) // this is not a problem for library but might be helpful to warn user // as it might indicate bug in their program (but also might be expected) idx = alias->idx; - pthread_rwlock_unlock(&client->tx_topic_aliases.rwlock); - mws_debug(client->log, "%s topic \"%s\" already has alias set. Ignoring.", __FUNCTION__, topic); + spinlock_unlock(&client->tx_topic_aliases.spinlock); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "%s topic \"%s\" already has alias set. Ignoring.", __FUNCTION__, topic); return idx; } @@ -2232,6 +2153,6 @@ int mqtt_ng_set_topic_alias(struct mqtt_ng_client *client, const char *topic) c_rhash_insert_str_ptr(client->tx_topic_aliases.stoi_dict, topic, (void*)alias); - pthread_rwlock_unlock(&client->tx_topic_aliases.rwlock); + spinlock_unlock(&client->tx_topic_aliases.spinlock); return idx; } diff --git a/src/aclk/mqtt_websockets/mqtt_ng.h b/src/aclk/mqtt_websockets/mqtt_ng.h index 4b0584d58..c5f6d94cc 100644 --- a/src/aclk/mqtt_websockets/mqtt_ng.h +++ b/src/aclk/mqtt_websockets/mqtt_ng.h @@ -1,10 +1,5 @@ -// Copyright: SPDX-License-Identifier: GPL-3.0-only +// SPDX-License-Identifier: GPL-3.0-or-later -#include -#include -#include - -#include "c-rbuf/cringbuffer.h" #include "common_public.h" #define MQTT_NG_MSGGEN_OK 0 @@ -15,7 +10,7 @@ #define MQTT_NG_MSGGEN_MSG_TOO_BIG 3 struct mqtt_ng_client; - +extern time_t ping_timeout; /* Converts integer to MQTT Variable Byte Integer as per 1.5.5 of MQTT 5 specs * @param input value to be converted * @param output pointer to memory where output will be written to. Must allow up to 4 bytes to be written. @@ -72,7 +67,6 @@ int mqtt_ng_ping(struct mqtt_ng_client *client); typedef ssize_t (*mqtt_ng_send_fnc_t)(void *user_ctx, const void* buf, size_t len); struct mqtt_ng_init { - mqtt_wss_log_ctx_t log; rbuf_t data_in; mqtt_ng_send_fnc_t data_out_fnc; void *user_ctx; diff --git a/src/aclk/mqtt_websockets/mqtt_wss_client.c b/src/aclk/mqtt_websockets/mqtt_wss_client.c index bb0e17262..5c576ced5 100644 --- a/src/aclk/mqtt_websockets/mqtt_wss_client.c +++ b/src/aclk/mqtt_websockets/mqtt_wss_client.c @@ -1,32 +1,24 @@ -// SPDX-License-Identifier: GPL-3.0-only +// SPDX-License-Identifier: GPL-3.0-or-later #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif +#include "libnetdata/libnetdata.h" #include "mqtt_wss_client.h" #include "mqtt_ng.h" #include "ws_client.h" #include "common_internal.h" - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include +#include "../aclk.h" #define PIPE_READ_END 0 #define PIPE_WRITE_END 1 #define POLLFD_SOCKET 0 #define POLLFD_PIPE 1 +#define PING_TIMEOUT (60) //Expect a ping response within this time (seconds) +time_t ping_timeout = 0; + #if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110) && (SSLEAY_VERSION_NUMBER >= OPENSSL_VERSION_097) #include #endif @@ -69,6 +61,8 @@ char *util_openssl_ret_err(int err) return "SSL_ERROR_SYSCALL"; case SSL_ERROR_SSL: return "SSL_ERROR_SSL"; + default: + break; } return "UNKNOWN"; } @@ -76,8 +70,6 @@ char *util_openssl_ret_err(int err) struct mqtt_wss_client_struct { ws_client *ws_client; - mqtt_wss_log_ctx_t log; - // immediate connection (e.g. proxy server) char *host; int port; @@ -129,69 +121,49 @@ static void mws_connack_callback_ng(void *user_ctx, int code) switch(code) { case 0: client->mqtt_connected = 1; - return; + break; //TODO manual labor: all the CONNACK error codes with some nice error message default: - mws_error(client->log, "MQTT CONNACK returned error %d", code); - return; + nd_log(NDLS_DAEMON, NDLP_ERR, "MQTT CONNACK returned error %d", code); + break; } } static ssize_t mqtt_send_cb(void *user_ctx, const void* buf, size_t len) { mqtt_wss_client client = user_ctx; -#ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, "mqtt_pal_sendall(len=%d)", len); -#endif int ret = ws_client_send(client->ws_client, WS_OP_BINARY_FRAME, buf, len); - if (ret >= 0 && (size_t)ret != len) { -#ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, "Not complete message sent (Msg=%d,Sent=%d). Need to arm POLLOUT!", len, ret); -#endif + if (ret >= 0 && (size_t)ret != len) client->mqtt_didnt_finish_write = 1; - } return ret; } -mqtt_wss_client mqtt_wss_new(const char *log_prefix, - mqtt_wss_log_callback_t log_callback, - msg_callback_fnc_t msg_callback, - void (*puback_callback)(uint16_t packet_id)) +mqtt_wss_client mqtt_wss_new( + msg_callback_fnc_t msg_callback, + void (*puback_callback)(uint16_t packet_id)) { - mqtt_wss_log_ctx_t log; - - log = mqtt_wss_log_ctx_create(log_prefix, log_callback); - if(!log) - return NULL; - SSL_library_init(); SSL_load_error_strings(); mqtt_wss_client client = callocz(1, sizeof(struct mqtt_wss_client_struct)); - if (!client) { - mws_error(log, "OOM alocating mqtt_wss_client"); - goto fail; - } spinlock_init(&client->stat_lock); client->msg_callback = msg_callback; client->puback_callback = puback_callback; - client->ws_client = ws_client_new(0, &client->target_host, log); + client->ws_client = ws_client_new(0, &client->target_host); if (!client->ws_client) { - mws_error(log, "Error creating ws_client"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Error creating ws_client"); goto fail_1; } - client->log = log; - #ifdef __APPLE__ if (pipe(client->write_notif_pipe)) { #else if (pipe2(client->write_notif_pipe, O_CLOEXEC /*| O_DIRECT*/)) { #endif - mws_error(log, "Couldn't create pipe"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Couldn't create pipe"); goto fail_2; } @@ -201,7 +173,6 @@ mqtt_wss_client mqtt_wss_new(const char *log_prefix, client->poll_fds[POLLFD_SOCKET].events = POLLIN; struct mqtt_ng_init settings = { - .log = log, .data_in = client->ws_client->buf_to_mqtt, .data_out_fnc = &mqtt_send_cb, .user_ctx = client, @@ -209,22 +180,14 @@ mqtt_wss_client mqtt_wss_new(const char *log_prefix, .puback_callback = puback_callback, .msg_callback = msg_callback }; - if ( (client->mqtt = mqtt_ng_init(&settings)) == NULL ) { - mws_error(log, "Error initializing internal MQTT client"); - goto fail_3; - } + client->mqtt = mqtt_ng_init(&settings); return client; -fail_3: - close(client->write_notif_pipe[PIPE_WRITE_END]); - close(client->write_notif_pipe[PIPE_READ_END]); fail_2: ws_client_destroy(client->ws_client); fail_1: freez(client); -fail: - mqtt_wss_log_ctx_destroy(log); return NULL; } @@ -265,30 +228,25 @@ void mqtt_wss_destroy(mqtt_wss_client client) if (client->sockfd > 0) close(client->sockfd); - mqtt_wss_log_ctx_destroy(client->log); freez(client); } static int cert_verify_callback(int preverify_ok, X509_STORE_CTX *ctx) { - SSL *ssl; - X509 *err_cert; - mqtt_wss_client client; - int err = 0, depth; - char *err_str; + int err = 0; - ssl = X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx()); - client = SSL_get_ex_data(ssl, 0); + SSL* ssl = X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx()); + mqtt_wss_client client = SSL_get_ex_data(ssl, 0); // TODO handle depth as per https://www.openssl.org/docs/man1.0.2/man3/SSL_CTX_set_verify.html if (!preverify_ok) { err = X509_STORE_CTX_get_error(ctx); - depth = X509_STORE_CTX_get_error_depth(ctx); - err_cert = X509_STORE_CTX_get_current_cert(ctx); - err_str = X509_NAME_oneline(X509_get_subject_name(err_cert), NULL, 0); + int depth = X509_STORE_CTX_get_error_depth(ctx); + X509* err_cert = X509_STORE_CTX_get_current_cert(ctx); + char* err_str = X509_NAME_oneline(X509_get_subject_name(err_cert), NULL, 0); - mws_error(client->log, "verify error:num=%d:%s:depth=%d:%s", err, + nd_log(NDLS_DAEMON, NDLP_ERR, "verify error:num=%d:%s:depth=%d:%s", err, X509_verify_cert_error_string(err), depth, err_str); freez(err_str); @@ -298,7 +256,7 @@ static int cert_verify_callback(int preverify_ok, X509_STORE_CTX *ctx) client->ssl_flags & MQTT_WSS_SSL_ALLOW_SELF_SIGNED) { preverify_ok = 1; - mws_error(client->log, "Self Signed Certificate Accepted as the connection was " + nd_log(NDLS_DAEMON, NDLP_ERR, "Self Signed Certificate Accepted as the connection was " "requested with MQTT_WSS_SSL_ALLOW_SELF_SIGNED"); } @@ -312,16 +270,14 @@ static int cert_verify_callback(int preverify_ok, X509_STORE_CTX *ctx) #define HTTP_HDR_TERMINATOR "\x0D\x0A\x0D\x0A" #define HTTP_CODE_LEN 4 #define HTTP_REASON_MAX_LEN 512 -static int http_parse_reply(mqtt_wss_client client, rbuf_t buf) +static int http_parse_reply(rbuf_t buf) { - char *ptr; char http_code_s[4]; - int http_code; int idx; if (rbuf_memcmp_n(buf, PROXY_HTTP, strlen(PROXY_HTTP))) { if (rbuf_memcmp_n(buf, PROXY_HTTP10, strlen(PROXY_HTTP10))) { - mws_error(client->log, "http_proxy expected reply with \"" PROXY_HTTP "\" or \"" PROXY_HTTP10 "\""); + nd_log(NDLS_DAEMON, NDLP_ERR, "http_proxy expected reply with \"" PROXY_HTTP "\" or \"" PROXY_HTTP10 "\""); return 1; } } @@ -329,39 +285,37 @@ static int http_parse_reply(mqtt_wss_client client, rbuf_t buf) rbuf_bump_tail(buf, strlen(PROXY_HTTP)); if (!rbuf_pop(buf, http_code_s, 1) || http_code_s[0] != 0x20) { - mws_error(client->log, "http_proxy missing space after \"" PROXY_HTTP "\" or \"" PROXY_HTTP10 "\""); + nd_log(NDLS_DAEMON, NDLP_ERR, "http_proxy missing space after \"" PROXY_HTTP "\" or \"" PROXY_HTTP10 "\""); return 2; } if (!rbuf_pop(buf, http_code_s, HTTP_CODE_LEN)) { - mws_error(client->log, "http_proxy missing HTTP code"); + nd_log(NDLS_DAEMON, NDLP_ERR, "http_proxy missing HTTP code"); return 3; } for (int i = 0; i < HTTP_CODE_LEN - 1; i++) if (http_code_s[i] > 0x39 || http_code_s[i] < 0x30) { - mws_error(client->log, "http_proxy HTTP code non numeric"); + nd_log(NDLS_DAEMON, NDLP_ERR, "http_proxy HTTP code non numeric"); return 4; } http_code_s[HTTP_CODE_LEN - 1] = 0; - http_code = atoi(http_code_s); + int http_code = str2i(http_code_s); // TODO check if we ever have more headers here rbuf_find_bytes(buf, HTTP_ENDLINE, strlen(HTTP_ENDLINE), &idx); if (idx >= HTTP_REASON_MAX_LEN) { - mws_error(client->log, "http_proxy returned reason that is too long"); + nd_log(NDLS_DAEMON, NDLP_ERR, "http_proxy returned reason that is too long"); return 5; } if (http_code != 200) { - ptr = mallocz(idx + 1); - if (!ptr) - return 6; + char *ptr = mallocz(idx + 1); rbuf_pop(buf, ptr, idx); ptr[idx] = 0; - mws_error(client->log, "http_proxy returned error code %d \"%s\"", http_code, ptr); + nd_log(NDLS_DAEMON, NDLP_ERR, "http_proxy returned error code %d \"%s\"", http_code, ptr); freez(ptr); return 7; }/* else @@ -374,52 +328,11 @@ static int http_parse_reply(mqtt_wss_client client, rbuf_t buf) rbuf_bump_tail(buf, strlen(HTTP_HDR_TERMINATOR)); if (rbuf_bytes_available(buf)) { - mws_error(client->log, "http_proxy unexpected trailing bytes after end of HTTP hdr"); + nd_log(NDLS_DAEMON, NDLP_ERR, "http_proxy unexpected trailing bytes after end of HTTP hdr"); return 8; } - mws_debug(client->log, "http_proxy CONNECT succeeded"); - return 0; -} - -#if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110 -static EVP_ENCODE_CTX *EVP_ENCODE_CTX_new(void) -{ - EVP_ENCODE_CTX *ctx = OPENSSL_malloc(sizeof(*ctx)); - - if (ctx != NULL) { - memset(ctx, 0, sizeof(*ctx)); - } - return ctx; -} -static void EVP_ENCODE_CTX_free(EVP_ENCODE_CTX *ctx) -{ - OPENSSL_free(ctx); - return; -} -#endif - -inline static int base64_encode_helper(unsigned char *out, int *outl, const unsigned char *in, int in_len) -{ - int len; - unsigned char *str = out; - EVP_ENCODE_CTX *ctx = EVP_ENCODE_CTX_new(); - EVP_EncodeInit(ctx); - EVP_EncodeUpdate(ctx, str, outl, in, in_len); - str += *outl; - EVP_EncodeFinal(ctx, str, &len); - *outl += len; - - str = out; - while(*str) { - if (*str != 0x0D && *str != 0x0A) - *out++ = *str++; - else - str++; - } - *out = 0; - - EVP_ENCODE_CTX_free(ctx); + nd_log(NDLS_DAEMON, NDLP_DEBUG, "http_proxy CONNECT succeeded"); return 0; } @@ -430,13 +343,12 @@ static int http_proxy_connect(mqtt_wss_client client) rbuf_t r_buf = rbuf_create(4096); if (!r_buf) return 1; - char *r_buf_ptr; size_t r_buf_linear_insert_capacity; poll_fd.fd = client->sockfd; poll_fd.events = POLLIN; - r_buf_ptr = rbuf_get_linear_insert_range(r_buf, &r_buf_linear_insert_capacity); + char *r_buf_ptr = rbuf_get_linear_insert_range(r_buf, &r_buf_linear_insert_capacity); snprintf(r_buf_ptr, r_buf_linear_insert_capacity,"%s %s:%d %s" HTTP_ENDLINE "Host: %s" HTTP_ENDLINE, PROXY_CONNECT, client->target_host, client->target_port, PROXY_HTTP, client->target_host); write(client->sockfd, r_buf_ptr, strlen(r_buf_ptr)); @@ -445,7 +357,7 @@ static int http_proxy_connect(mqtt_wss_client client) size_t creds_plain_len = strlen(client->proxy_uname) + strlen(client->proxy_passwd) + 2; char *creds_plain = mallocz(creds_plain_len); if (!creds_plain) { - mws_error(client->log, "OOM creds_plain"); + nd_log(NDLS_DAEMON, NDLP_ERR, "OOM creds_plain"); rc = 6; goto cleanup; } @@ -456,7 +368,7 @@ static int http_proxy_connect(mqtt_wss_client client) char *creds_base64 = mallocz(creds_base64_len + 1); if (!creds_base64) { freez(creds_plain); - mws_error(client->log, "OOM creds_base64"); + nd_log(NDLS_DAEMON, NDLP_ERR, "OOM creds_base64"); rc = 6; goto cleanup; } @@ -466,8 +378,7 @@ static int http_proxy_connect(mqtt_wss_client client) *ptr++ = ':'; strcpy(ptr, client->proxy_passwd); - int b64_len; - base64_encode_helper((unsigned char*)creds_base64, &b64_len, (unsigned char*)creds_plain, strlen(creds_plain)); + (void) netdata_base64_encode((unsigned char*)creds_base64, (unsigned char*)creds_plain, strlen(creds_plain)); freez(creds_plain); r_buf_ptr = rbuf_get_linear_insert_range(r_buf, &r_buf_linear_insert_capacity); @@ -482,13 +393,13 @@ static int http_proxy_connect(mqtt_wss_client client) // or timeout while ((rc = poll(&poll_fd, 1, 1000)) >= 0) { if (!rc) { - mws_error(client->log, "http_proxy timeout waiting reply from proxy server"); + nd_log(NDLS_DAEMON, NDLP_ERR, "http_proxy timeout waiting reply from proxy server"); rc = 2; goto cleanup; } r_buf_ptr = rbuf_get_linear_insert_range(r_buf, &r_buf_linear_insert_capacity); if (!r_buf_ptr) { - mws_error(client->log, "http_proxy read ring buffer full"); + nd_log(NDLS_DAEMON, NDLP_ERR, "http_proxy read ring buffer full"); rc = 3; goto cleanup; } @@ -496,20 +407,20 @@ static int http_proxy_connect(mqtt_wss_client client) if (errno == EWOULDBLOCK || errno == EAGAIN) { continue; } - mws_error(client->log, "http_proxy error reading from socket \"%s\"", strerror(errno)); + nd_log(NDLS_DAEMON, NDLP_ERR, "http_proxy error reading from socket \"%s\"", strerror(errno)); rc = 4; goto cleanup; } rbuf_bump_head(r_buf, rc); if (rbuf_find_bytes(r_buf, HTTP_HDR_TERMINATOR, strlen(HTTP_HDR_TERMINATOR), &rc)) { rc = 0; - if (http_parse_reply(client, r_buf)) + if (http_parse_reply(r_buf)) rc = 5; goto cleanup; } } - mws_error(client->log, "proxy negotiation poll error \"%s\"", strerror(errno)); + nd_log(NDLS_DAEMON, NDLP_ERR, "proxy negotiation poll error \"%s\"", strerror(errno)); rc = 5; cleanup: rbuf_free(r_buf); @@ -522,11 +433,11 @@ int mqtt_wss_connect( int port, struct mqtt_connect_params *mqtt_params, int ssl_flags, - struct mqtt_wss_proxy *proxy, + const struct mqtt_wss_proxy *proxy, bool *fallback_ipv4) { if (!mqtt_params) { - mws_error(client->log, "mqtt_params can't be null!"); + nd_log(NDLS_DAEMON, NDLP_ERR, "mqtt_params can't be null!"); return -1; } @@ -583,7 +494,7 @@ int mqtt_wss_connect( struct timeval timeout = { .tv_sec = 10, .tv_usec = 0 }; int fd = connect_to_this_ip46(IPPROTO_TCP, SOCK_STREAM, client->host, 0, port_str, &timeout, fallback_ipv4); if (fd < 0) { - mws_error(client->log, "Could not connect to remote endpoint \"%s\", port %d.\n", client->host, port); + nd_log(NDLS_DAEMON, NDLP_ERR, "Could not connect to remote endpoint \"%s\", port %d.\n", client->host, port); return -3; } @@ -598,12 +509,12 @@ int mqtt_wss_connect( int flag = 1; int result = setsockopt(client->sockfd, IPPROTO_TCP, TCP_NODELAY, &flag, sizeof(int)); if (result < 0) - mws_error(client->log, "Could not dissable NAGLE"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Could not dissable NAGLE"); client->poll_fds[POLLFD_SOCKET].fd = client->sockfd; if (fcntl(client->sockfd, F_SETFL, fcntl(client->sockfd, F_GETFL, 0) | O_NONBLOCK) == -1) { - mws_error(client->log, "Error setting O_NONBLOCK to TCP socket. \"%s\"", strerror(errno)); + nd_log(NDLS_DAEMON, NDLP_ERR, "Error setting O_NONBLOCK to TCP socket. \"%s\"", strerror(errno)); return -8; } @@ -619,7 +530,7 @@ int mqtt_wss_connect( SSL_library_init(); #else if (OPENSSL_init_ssl(OPENSSL_INIT_LOAD_CONFIG, NULL) != 1) { - mws_error(client->log, "Failed to initialize SSL"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Failed to initialize SSL"); return -1; }; #endif @@ -636,7 +547,7 @@ int mqtt_wss_connect( SSL_CTX_set_default_verify_paths(client->ssl_ctx); SSL_CTX_set_verify(client->ssl_ctx, SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE, cert_verify_callback); } else - mws_error(client->log, "SSL Certificate checking completely disabled!!!"); + nd_log(NDLS_DAEMON, NDLP_ERR, "SSL Certificate checking completely disabled!!!"); #ifdef MQTT_WSS_DEBUG if(client->ssl_ctx_keylog_cb) @@ -646,7 +557,7 @@ int mqtt_wss_connect( client->ssl = SSL_new(client->ssl_ctx); if (!(client->ssl_flags & MQTT_WSS_SSL_DONT_CHECK_CERTS)) { if (!SSL_set_ex_data(client->ssl, 0, client)) { - mws_error(client->log, "Could not SSL_set_ex_data"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Could not SSL_set_ex_data"); return -4; } } @@ -654,27 +565,27 @@ int mqtt_wss_connect( SSL_set_connect_state(client->ssl); if (!SSL_set_tlsext_host_name(client->ssl, client->target_host)) { - mws_error(client->log, "Error setting TLS SNI host"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Error setting TLS SNI host"); return -7; } result = SSL_connect(client->ssl); if (result != -1 && result != 1) { - mws_error(client->log, "SSL could not connect"); + nd_log(NDLS_DAEMON, NDLP_ERR, "SSL could not connect"); return -5; } if (result == -1) { int ec = SSL_get_error(client->ssl, result); if (ec != SSL_ERROR_WANT_READ && ec != SSL_ERROR_WANT_WRITE) { - mws_error(client->log, "Failed to start SSL connection"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Failed to start SSL connection"); return -6; } } client->mqtt_keepalive = (mqtt_params->keep_alive ? mqtt_params->keep_alive : 400); - mws_info(client->log, "Going to connect using internal MQTT 5 implementation"); + nd_log(NDLS_DAEMON, NDLP_INFO, "Going to connect using internal MQTT 5 implementation"); struct mqtt_auth_properties auth; auth.client_id = (char*)mqtt_params->clientid; auth.client_id_free = NULL; @@ -694,7 +605,7 @@ int mqtt_wss_connect( int ret = mqtt_ng_connect(client->mqtt, &auth, mqtt_params->will_msg ? &lwt : NULL, 1, client->mqtt_keepalive); if (ret) { - mws_error(client->log, "Error generating MQTT connect"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Error generating MQTT connect"); return 1; } @@ -703,7 +614,7 @@ int mqtt_wss_connect( // wait till MQTT connection is established while (!client->mqtt_connected) { if(mqtt_wss_service(client, -1)) { - mws_error(client->log, "Error connecting to MQTT WSS server \"%s\", port %d.", host, port); + nd_log(NDLS_DAEMON, NDLP_ERR, "Error connecting to MQTT WSS server \"%s\", port %d.", host, port); return 2; } } @@ -716,14 +627,14 @@ int mqtt_wss_connect( #define NSEC_PER_MSEC 1000000ULL #define NSEC_PER_SEC 1000000000ULL -static inline uint64_t boottime_usec(mqtt_wss_client client) { +static uint64_t boottime_usec(void) { struct timespec ts; #if defined(__APPLE__) || defined(__FreeBSD__) if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1) { #else if (clock_gettime(CLOCK_BOOTTIME, &ts) == -1) { #endif - mws_error(client->log, "clock_gettimte failed"); + nd_log(NDLS_DAEMON, NDLP_ERR, "clock_gettimte failed"); return 0; } return (uint64_t)ts.tv_sec * USEC_PER_SEC + (ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC; @@ -732,7 +643,7 @@ static inline uint64_t boottime_usec(mqtt_wss_client client) { #define MWS_TIMED_OUT 1 #define MWS_ERROR 2 #define MWS_OK 0 -static inline const char *mqtt_wss_error_tos(int ec) +static const char *mqtt_wss_error_tos(int ec) { switch(ec) { case MWS_TIMED_OUT: @@ -745,13 +656,12 @@ static inline const char *mqtt_wss_error_tos(int ec) } -static inline int mqtt_wss_service_all(mqtt_wss_client client, int timeout_ms) +static int mqtt_wss_service_all(mqtt_wss_client client, int timeout_ms) { - uint64_t exit_by = boottime_usec(client) + (timeout_ms * NSEC_PER_MSEC); - uint64_t now; + uint64_t exit_by = boottime_usec() + (timeout_ms * NSEC_PER_MSEC); client->poll_fds[POLLFD_SOCKET].events |= POLLOUT; // TODO when entering mwtt_wss_service use out buffer size to arm POLLOUT while (rbuf_bytes_available(client->ws_client->buf_write)) { - now = boottime_usec(client); + const uint64_t now = boottime_usec(); if (now >= exit_by) return MWS_TIMED_OUT; if (mqtt_wss_service(client, exit_by - now)) @@ -762,15 +672,13 @@ static inline int mqtt_wss_service_all(mqtt_wss_client client, int timeout_ms) void mqtt_wss_disconnect(mqtt_wss_client client, int timeout_ms) { - int ret; - // block application from sending more MQTT messages client->mqtt_disconnecting = 1; // send whatever was left at the time of calling this function - ret = mqtt_wss_service_all(client, timeout_ms / 4); + int ret = mqtt_wss_service_all(client, timeout_ms / 4); if(ret) - mws_error(client->log, + nd_log(NDLS_DAEMON, NDLP_ERR, "Error while trying to send all remaining data in an attempt " "to gracefully disconnect! EC=%d Desc:\"%s\"", ret, @@ -782,7 +690,7 @@ void mqtt_wss_disconnect(mqtt_wss_client client, int timeout_ms) ret = mqtt_wss_service_all(client, timeout_ms / 4); if(ret) - mws_error(client->log, + nd_log(NDLS_DAEMON, NDLP_ERR, "Error while trying to send MQTT disconnect message in an attempt " "to gracefully disconnect! EC=%d Desc:\"%s\"", ret, @@ -795,7 +703,7 @@ void mqtt_wss_disconnect(mqtt_wss_client client, int timeout_ms) if(ret) { // Some MQTT/WSS servers will close socket on receipt of MQTT disconnect and // do not wait for WebSocket to be closed properly - mws_warn(client->log, + nd_log(NDLS_DAEMON, NDLP_WARNING, "Error while trying to send WebSocket disconnect message in an attempt " "to gracefully disconnect! EC=%d Desc:\"%s\".", ret, @@ -810,22 +718,19 @@ void mqtt_wss_disconnect(mqtt_wss_client client, int timeout_ms) client->sockfd = -1; } -static inline void mqtt_wss_wakeup(mqtt_wss_client client) +static void mqtt_wss_wakeup(mqtt_wss_client client) { -#ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, "mqtt_wss_wakup - forcing wake up of main loop"); -#endif write(client->write_notif_pipe[PIPE_WRITE_END], " ", 1); } #define THROWAWAY_BUF_SIZE 32 char throwaway[THROWAWAY_BUF_SIZE]; -static inline void util_clear_pipe(int fd) +static void util_clear_pipe(int fd) { (void)read(fd, throwaway, THROWAWAY_BUF_SIZE); } -static inline void set_socket_pollfds(mqtt_wss_client client, int ssl_ret) { +static void set_socket_pollfds(mqtt_wss_client client, int ssl_ret) { if (ssl_ret == SSL_ERROR_WANT_WRITE) client->poll_fds[POLLFD_SOCKET].events |= POLLOUT; if (ssl_ret == SSL_ERROR_WANT_READ) @@ -836,27 +741,25 @@ static int handle_mqtt_internal(mqtt_wss_client client) { int rc = mqtt_ng_sync(client->mqtt); if (rc) { - mws_error(client->log, "mqtt_ng_sync returned %d != 0", rc); + nd_log(NDLS_DAEMON, NDLP_ERR, "mqtt_ng_sync returned %d != 0", rc); client->mqtt_connected = 0; return 1; } return 0; } -#define SEC_TO_MSEC 1000 -static inline long long int t_till_next_keepalive_ms(mqtt_wss_client client) +static int t_till_next_keepalive_ms(mqtt_wss_client client) { time_t last_send = mqtt_ng_last_send_time(client->mqtt); - long long int next_mqtt_keep_alive = (last_send * SEC_TO_MSEC) - + (client->mqtt_keepalive * (SEC_TO_MSEC * 0.75 /* SEND IN ADVANCE */)); - return(next_mqtt_keep_alive - (time(NULL) * SEC_TO_MSEC)); + time_t next_mqtt_keep_alive = last_send + client->mqtt_keepalive * 0.75; + return ((next_mqtt_keep_alive - now_realtime_sec()) * MSEC_PER_SEC); } #ifdef MQTT_WSS_CPUSTATS -static inline uint64_t mqtt_wss_now_usec(mqtt_wss_client client) { +static uint64_t mqtt_wss_now_usec(void) { struct timespec ts; if(clock_gettime(CLOCK_MONOTONIC, &ts) == -1) { - mws_error(client->log, "clock_gettime(CLOCK_MONOTONIC, ×pec) failed."); + nd_log(NDLS_DAEMON, NDLP_ERR, "clock_gettime(CLOCK_MONOTONIC, ×pec) failed."); return 0; } return (uint64_t)ts.tv_sec * USEC_PER_SEC + (ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC; @@ -871,63 +774,51 @@ int mqtt_wss_service(mqtt_wss_client client, int timeout_ms) int send_keepalive = 0; #ifdef MQTT_WSS_CPUSTATS - uint64_t t1,t2; - t1 = mqtt_wss_now_usec(client); -#endif - -#ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, ">>>>> mqtt_wss_service <<<<<"); - mws_debug(client->log, "Waiting for events: %s%s%s", - (client->poll_fds[POLLFD_SOCKET].events & POLLIN) ? "SOCKET_POLLIN " : "", - (client->poll_fds[POLLFD_SOCKET].events & POLLOUT) ? "SOCKET_POLLOUT " : "", - (client->poll_fds[POLLFD_PIPE].events & POLLIN) ? "PIPE_POLLIN" : "" ); + uint64_t t2; + uint64_t t1 = mqtt_wss_now_usec(); #endif // Check user requested TO doesn't interfere with MQTT keep alives - long long int till_next_keep_alive = t_till_next_keepalive_ms(client); - if (client->mqtt_connected && (timeout_ms < 0 || timeout_ms >= till_next_keep_alive)) { - #ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, "Shortening Timeout requested %d to %lld to ensure keep-alive can be sent", timeout_ms, till_next_keep_alive); - #endif - timeout_ms = till_next_keep_alive; - send_keepalive = 1; + if (!ping_timeout) { + int till_next_keep_alive = t_till_next_keepalive_ms(client); + if (till_next_keep_alive < 0) + till_next_keep_alive = 0; + if (client->mqtt_connected && (timeout_ms < 0 || timeout_ms >= till_next_keep_alive)) { + timeout_ms = till_next_keep_alive; + send_keepalive = 1; + } } #ifdef MQTT_WSS_CPUSTATS - t2 = mqtt_wss_now_usec(client); + t2 = mqtt_wss_now_usec(); client->stats.time_keepalive += t2 - t1; #endif if ((ret = poll(client->poll_fds, 2, timeout_ms >= 0 ? timeout_ms : -1)) < 0) { if (errno == EINTR) { - mws_warn(client->log, "poll interrupted by EINTR"); + nd_log(NDLS_DAEMON, NDLP_WARNING, "poll interrupted by EINTR"); return 0; } - mws_error(client->log, "poll error \"%s\"", strerror(errno)); + nd_log(NDLS_DAEMON, NDLP_ERR, "poll error \"%s\"", strerror(errno)); return -2; } -#ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, "Poll events happened: %s%s%s%s", - (client->poll_fds[POLLFD_SOCKET].revents & POLLIN) ? "SOCKET_POLLIN " : "", - (client->poll_fds[POLLFD_SOCKET].revents & POLLOUT) ? "SOCKET_POLLOUT " : "", - (client->poll_fds[POLLFD_PIPE].revents & POLLIN) ? "PIPE_POLLIN " : "", - (!ret) ? "POLL_TIMEOUT" : ""); -#endif - #ifdef MQTT_WSS_CPUSTATS - t1 = mqtt_wss_now_usec(client); + t1 = mqtt_wss_now_usec(); #endif if (ret == 0) { + time_t now = now_realtime_sec(); if (send_keepalive) { // otherwise we shortened the timeout ourselves to take care of // MQTT keep alives -#ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, "Forcing MQTT Ping/keep-alive"); -#endif mqtt_ng_ping(client->mqtt); + ping_timeout = now + PING_TIMEOUT; } else { + if (ping_timeout && ping_timeout < now) { + disconnect_req = ACLK_PING_TIMEOUT; + ping_timeout = 0; + } // if poll timed out and user requested timeout was being used // return here let user do his work and he will call us back soon return 0; @@ -935,7 +826,7 @@ int mqtt_wss_service(mqtt_wss_client client, int timeout_ms) } #ifdef MQTT_WSS_CPUSTATS - t2 = mqtt_wss_now_usec(client); + t2 = mqtt_wss_now_usec(); client->stats.time_keepalive += t2 - t1; #endif @@ -943,9 +834,6 @@ int mqtt_wss_service(mqtt_wss_client client, int timeout_ms) if ((ptr = rbuf_get_linear_insert_range(client->ws_client->buf_read, &size))) { if((ret = SSL_read(client->ssl, ptr, size)) > 0) { -#ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, "SSL_Read: Read %d.", ret); -#endif spinlock_lock(&client->stat_lock); client->stats.bytes_rx += ret; spinlock_unlock(&client->stat_lock); @@ -953,22 +841,19 @@ int mqtt_wss_service(mqtt_wss_client client, int timeout_ms) } else { int errnobkp = errno; ret = SSL_get_error(client->ssl, ret); -#ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, "Read Err: %s", util_openssl_ret_err(ret)); -#endif set_socket_pollfds(client, ret); if (ret != SSL_ERROR_WANT_READ && ret != SSL_ERROR_WANT_WRITE) { - mws_error(client->log, "SSL_read error: %d %s", ret, util_openssl_ret_err(ret)); + nd_log(NDLS_DAEMON, NDLP_ERR, "SSL_read error: %d %s", ret, util_openssl_ret_err(ret)); if (ret == SSL_ERROR_SYSCALL) - mws_error(client->log, "SSL_read SYSCALL errno: %d %s", errnobkp, strerror(errnobkp)); + nd_log(NDLS_DAEMON, NDLP_ERR, "SSL_read SYSCALL errno: %d %s", errnobkp, strerror(errnobkp)); return MQTT_WSS_ERR_CONN_DROP; } } } #ifdef MQTT_WSS_CPUSTATS - t1 = mqtt_wss_now_usec(client); + t1 = mqtt_wss_now_usec(); client->stats.time_read_socket += t1 - t2; #endif @@ -976,18 +861,20 @@ int mqtt_wss_service(mqtt_wss_client client, int timeout_ms) switch(ret) { case WS_CLIENT_PROTOCOL_ERROR: return MQTT_WSS_ERR_PROTO_WS; + case WS_CLIENT_NEED_MORE_BYTES: -#ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, "WSCLIENT WANT READ"); -#endif client->poll_fds[POLLFD_SOCKET].events |= POLLIN; break; + case WS_CLIENT_CONNECTION_CLOSED: return MQTT_WSS_ERR_CONN_DROP; + + default: + return MQTT_WSS_ERR_PROTO_WS; } #ifdef MQTT_WSS_CPUSTATS - t2 = mqtt_wss_now_usec(client); + t2 = mqtt_wss_now_usec(); client->stats.time_process_websocket += t2 - t1; #endif @@ -1002,18 +889,12 @@ int mqtt_wss_service(mqtt_wss_client client, int timeout_ms) } #ifdef MQTT_WSS_CPUSTATS - t1 = mqtt_wss_now_usec(client); + t1 = mqtt_wss_now_usec(); client->stats.time_process_mqtt += t1 - t2; #endif if ((ptr = rbuf_get_linear_read_range(client->ws_client->buf_write, &size))) { -#ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, "Have data to write to SSL"); -#endif if ((ret = SSL_write(client->ssl, ptr, size)) > 0) { -#ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, "SSL_Write: Written %d of avail %d.", ret, size); -#endif spinlock_lock(&client->stat_lock); client->stats.bytes_tx += ret; spinlock_unlock(&client->stat_lock); @@ -1021,15 +902,12 @@ int mqtt_wss_service(mqtt_wss_client client, int timeout_ms) } else { int errnobkp = errno; ret = SSL_get_error(client->ssl, ret); -#ifdef DEBUG_ULTRA_VERBOSE - mws_debug(client->log, "Write Err: %s", util_openssl_ret_err(ret)); -#endif set_socket_pollfds(client, ret); if (ret != SSL_ERROR_WANT_READ && ret != SSL_ERROR_WANT_WRITE) { - mws_error(client->log, "SSL_write error: %d %s", ret, util_openssl_ret_err(ret)); + nd_log(NDLS_DAEMON, NDLP_ERR, "SSL_write error: %d %s", ret, util_openssl_ret_err(ret)); if (ret == SSL_ERROR_SYSCALL) - mws_error(client->log, "SSL_write SYSCALL errno: %d %s", errnobkp, strerror(errnobkp)); + nd_log(NDLS_DAEMON, NDLP_ERR, "SSL_write SYSCALL errno: %d %s", errnobkp, strerror(errnobkp)); return MQTT_WSS_ERR_CONN_DROP; } } @@ -1039,7 +917,7 @@ int mqtt_wss_service(mqtt_wss_client client, int timeout_ms) util_clear_pipe(client->write_notif_pipe[PIPE_READ_END]); #ifdef MQTT_WSS_CPUSTATS - t2 = mqtt_wss_now_usec(client); + t2 = mqtt_wss_now_usec(); client->stats.time_write_socket += t2 - t1; #endif @@ -1056,12 +934,12 @@ int mqtt_wss_publish5(mqtt_wss_client client, uint16_t *packet_id) { if (client->mqtt_disconnecting) { - mws_error(client->log, "mqtt_wss is disconnecting can't publish"); + nd_log(NDLS_DAEMON, NDLP_ERR, "mqtt_wss is disconnecting can't publish"); return 1; } if (!client->mqtt_connected) { - mws_error(client->log, "MQTT is offline. Can't send message."); + nd_log(NDLS_DAEMON, NDLP_ERR, "MQTT is offline. Can't send message."); return 1; } uint8_t mqtt_flags = 0; @@ -1072,7 +950,7 @@ int mqtt_wss_publish5(mqtt_wss_client client, int rc = mqtt_ng_publish(client->mqtt, topic, topic_free, msg, msg_free, msg_len, mqtt_flags, packet_id); if (rc == MQTT_NG_MSGGEN_MSG_TOO_BIG) - return MQTT_WSS_ERR_TOO_BIG_FOR_SERVER; + return MQTT_WSS_ERR_MSG_TOO_BIG; mqtt_wss_wakeup(client); @@ -1083,12 +961,12 @@ int mqtt_wss_subscribe(mqtt_wss_client client, char *topic, int max_qos_level) { (void)max_qos_level; //TODO now hardcoded if (!client->mqtt_connected) { - mws_error(client->log, "MQTT is offline. Can't subscribe."); + nd_log(NDLS_DAEMON, NDLP_ERR, "MQTT is offline. Can't subscribe."); return 1; } if (client->mqtt_disconnecting) { - mws_error(client->log, "mqtt_wss is disconnecting can't subscribe"); + nd_log(NDLS_DAEMON, NDLP_ERR, "mqtt_wss is disconnecting can't subscribe"); return 1; } diff --git a/src/aclk/mqtt_websockets/mqtt_wss_client.h b/src/aclk/mqtt_websockets/mqtt_wss_client.h index f0bdce98b..2fd94075d 100644 --- a/src/aclk/mqtt_websockets/mqtt_wss_client.h +++ b/src/aclk/mqtt_websockets/mqtt_wss_client.h @@ -1,56 +1,36 @@ -// SPDX-License-Identifier: GPL-3.0-only -// Copyright (C) 2020 Timotej Šiškovič +// SPDX-License-Identifier: GPL-3.0-or-later #ifndef MQTT_WSS_CLIENT_H #define MQTT_WSS_CLIENT_H -#include -#include -#include //size_t - -#include "mqtt_wss_log.h" #include "common_public.h" -// All OK call me at your earliest convinience -#define MQTT_WSS_OK 0 -/* All OK, poll timeout you requested when calling mqtt_wss_service expired - you might want to know if timeout - * happened or we got some data or handle same as MQTT_WSS_OK - */ -#define MQTT_WSS_OK_TO 1 -// Connection was closed by remote -#define MQTT_WSS_ERR_CONN_DROP -1 -// Error in MQTT protocol (e.g. malformed packet) -#define MQTT_WSS_ERR_PROTO_MQTT -2 -// Error in WebSocket protocol (e.g. malformed packet) -#define MQTT_WSS_ERR_PROTO_WS -3 - -#define MQTT_WSS_ERR_TX_BUF_TOO_SMALL -4 -#define MQTT_WSS_ERR_RX_BUF_TOO_SMALL -5 - -#define MQTT_WSS_ERR_TOO_BIG_FOR_SERVER -6 -// if client was initialized with MQTT 3 but MQTT 5 feature -// was requested by user of library -#define MQTT_WSS_ERR_CANT_DO -8 + +#define MQTT_WSS_OK 0 // All OK call me at your earliest convinience +#define MQTT_WSS_OK_TO 1 // All OK, poll timeout you requested when calling mqtt_wss_service expired + //you might want to know if timeout + //happened or we got some data or handle same as MQTT_WSS_OK +#define MQTT_WSS_ERR_CONN_DROP -1 // Connection was closed by remote +#define MQTT_WSS_ERR_PROTO_MQTT -2 // Error in MQTT protocol (e.g. malformed packet) +#define MQTT_WSS_ERR_PROTO_WS -3 // Error in WebSocket protocol (e.g. malformed packet) +#define MQTT_WSS_ERR_MSG_TOO_BIG -6 // Message size too big for server +#define MQTT_WSS_ERR_CANT_DO -8 // if client was initialized with MQTT 3 but MQTT 5 feature + // was requested by user of library typedef struct mqtt_wss_client_struct *mqtt_wss_client; typedef void (*msg_callback_fnc_t)(const char *topic, const void *msg, size_t msglen, int qos); + /* Creates new instance of MQTT over WSS. Doesn't start connection. - * @param log_prefix this is prefix to be used when logging to discern between multiple - * mqtt_wss instances. Can be NULL. - * @param log_callback is function pointer to fnc to be called when mqtt_wss wants - * to log. This allows plugging this library into your own logging system/solution. - * If NULL STDOUT/STDERR will be used. * @param msg_callback is function pointer to function which will be called * when application level message arrives from broker (for subscribed topics). * Can be NULL if you are not interested about incoming messages. * @param puback_callback is function pointer to function to be called when QOS1 Publish * is acknowledged by server */ -mqtt_wss_client mqtt_wss_new(const char *log_prefix, - mqtt_wss_log_callback_t log_callback, - msg_callback_fnc_t msg_callback, - void (*puback_callback)(uint16_t packet_id)); +mqtt_wss_client mqtt_wss_new( + msg_callback_fnc_t msg_callback, + void (*puback_callback)(uint16_t packet_id)); void mqtt_wss_set_max_buf_size(mqtt_wss_client client, size_t size); @@ -76,7 +56,7 @@ int mqtt_wss_connect( int port, struct mqtt_connect_params *mqtt_params, int ssl_flags, - struct mqtt_wss_proxy *proxy, + const struct mqtt_wss_proxy *proxy, bool *fallback_ipv4); int mqtt_wss_service(mqtt_wss_client client, int timeout_ms); void mqtt_wss_disconnect(mqtt_wss_client client, int timeout_ms); diff --git a/src/aclk/mqtt_websockets/mqtt_wss_log.c b/src/aclk/mqtt_websockets/mqtt_wss_log.c deleted file mode 100644 index e5da76fcf..000000000 --- a/src/aclk/mqtt_websockets/mqtt_wss_log.c +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright: SPDX-License-Identifier: GPL-3.0-only - -#include -#include -#include -#include - -#include "mqtt_wss_log.h" -#include "common_internal.h" - -struct mqtt_wss_log_ctx { - mqtt_wss_log_callback_t extern_log_fnc; - char *ctx_prefix; - char *buffer; - char *buffer_w_ptr; - size_t buffer_bytes_avail; -}; - -#define LOG_BUFFER_SIZE 1024 * 4 -#define LOG_CTX_PREFIX_SEV_STR " : " -#define LOG_CTX_PREFIX_LIMIT 15 -#define LOG_CTX_PREFIX_LIMIT_STR (LOG_CTX_PREFIX_LIMIT - (2 + strlen(LOG_CTX_PREFIX_SEV_STR))) // with [] characters and affixed ' ' it is total 15 chars -#if (LOG_CTX_PREFIX_LIMIT * 10) > LOG_BUFFER_SIZE -#error "LOG_BUFFER_SIZE too small" -#endif -mqtt_wss_log_ctx_t mqtt_wss_log_ctx_create(const char *ctx_prefix, mqtt_wss_log_callback_t log_callback) -{ - mqtt_wss_log_ctx_t ctx = callocz(1, sizeof(struct mqtt_wss_log_ctx)); - if(!ctx) - return NULL; - - if(log_callback) { - ctx->extern_log_fnc = log_callback; - ctx->buffer = callocz(1, LOG_BUFFER_SIZE); - if(!ctx->buffer) - goto cleanup; - - ctx->buffer_w_ptr = ctx->buffer; - if(ctx_prefix) { - *(ctx->buffer_w_ptr++) = '['; - strncpy(ctx->buffer_w_ptr, ctx_prefix, LOG_CTX_PREFIX_LIMIT_STR); - ctx->buffer_w_ptr += strnlen(ctx_prefix, LOG_CTX_PREFIX_LIMIT_STR); - *(ctx->buffer_w_ptr++) = ']'; - } - strcpy(ctx->buffer_w_ptr, LOG_CTX_PREFIX_SEV_STR); - ctx->buffer_w_ptr += strlen(LOG_CTX_PREFIX_SEV_STR); - // no term '\0' -> calloc is used - - ctx->buffer_bytes_avail = LOG_BUFFER_SIZE - strlen(ctx->buffer); - - return ctx; - } - - if(ctx_prefix) { - ctx->ctx_prefix = strndup(ctx_prefix, LOG_CTX_PREFIX_LIMIT_STR); - if(!ctx->ctx_prefix) - goto cleanup; - } - - return ctx; - -cleanup: - freez(ctx); - return NULL; -} - -void mqtt_wss_log_ctx_destroy(mqtt_wss_log_ctx_t ctx) -{ - freez(ctx->ctx_prefix); - freez(ctx->buffer); - freez(ctx); -} - -static inline char severity_to_c(int severity) -{ - switch (severity) { - case MQTT_WSS_LOG_FATAL: - return 'F'; - case MQTT_WSS_LOG_ERROR: - return 'E'; - case MQTT_WSS_LOG_WARN: - return 'W'; - case MQTT_WSS_LOG_INFO: - return 'I'; - case MQTT_WSS_LOG_DEBUG: - return 'D'; - default: - return '?'; - } -} - -void mws_log(int severity, mqtt_wss_log_ctx_t ctx, const char *fmt, va_list args) -{ - size_t size; - - if(ctx->extern_log_fnc) { - size = vsnprintf(ctx->buffer_w_ptr, ctx->buffer_bytes_avail, fmt, args); - *(ctx->buffer_w_ptr - 3) = severity_to_c(severity); - - ctx->extern_log_fnc(severity, ctx->buffer); - - if(size >= ctx->buffer_bytes_avail) - mws_error(ctx, "Last message of this type was truncated! Consider what you log or increase LOG_BUFFER_SIZE if really needed."); - - return; - } - - if(ctx->ctx_prefix) - printf("[%s] ", ctx->ctx_prefix); - - printf("%c: ", severity_to_c(severity)); - - vprintf(fmt, args); - putchar('\n'); -} - -#define DEFINE_MWS_SEV_FNC(severity_fncname, severity) \ -void mws_ ## severity_fncname(mqtt_wss_log_ctx_t ctx, const char *fmt, ...) \ -{ \ - va_list args; \ - va_start(args, fmt); \ - mws_log(severity, ctx, fmt, args); \ - va_end(args); \ -} - -DEFINE_MWS_SEV_FNC(fatal, MQTT_WSS_LOG_FATAL) -DEFINE_MWS_SEV_FNC(error, MQTT_WSS_LOG_ERROR) -DEFINE_MWS_SEV_FNC(warn, MQTT_WSS_LOG_WARN ) -DEFINE_MWS_SEV_FNC(info, MQTT_WSS_LOG_INFO ) -DEFINE_MWS_SEV_FNC(debug, MQTT_WSS_LOG_DEBUG) diff --git a/src/aclk/mqtt_websockets/mqtt_wss_log.h b/src/aclk/mqtt_websockets/mqtt_wss_log.h deleted file mode 100644 index 6ae60d870..000000000 --- a/src/aclk/mqtt_websockets/mqtt_wss_log.h +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright: SPDX-License-Identifier: GPL-3.0-only - -#ifndef MQTT_WSS_LOG_H -#define MQTT_WSS_LOG_H - -typedef enum mqtt_wss_log_type { - MQTT_WSS_LOG_DEBUG = 0x01, - MQTT_WSS_LOG_INFO = 0x02, - MQTT_WSS_LOG_WARN = 0x03, - MQTT_WSS_LOG_ERROR = 0x81, - MQTT_WSS_LOG_FATAL = 0x88 -} mqtt_wss_log_type_t; - -typedef void (*mqtt_wss_log_callback_t)(mqtt_wss_log_type_t, const char*); - -typedef struct mqtt_wss_log_ctx *mqtt_wss_log_ctx_t; - -/** Creates logging context with optional prefix and optional callback - * @param ctx_prefix String to be prefixed to every log message. - * This is useful if multiple clients are instantiated to be able to - * know which one this message belongs to. Can be `NULL` for no prefix. - * @param log_callback Callback to be called instead of logging to - * `STDOUT` or `STDERR` (if debug enabled otherwise silent). Callback has to be - * pointer to function of `void function(mqtt_wss_log_type_t, const char*)` type. - * If `NULL` default will be used (silent or STDERR/STDOUT). - * @return mqtt_wss_log_ctx_t or `NULL` on error */ -mqtt_wss_log_ctx_t mqtt_wss_log_ctx_create(const char *ctx_prefix, mqtt_wss_log_callback_t log_callback); - -/** Destroys logging context and cleans up the memory - * @param ctx Context to destroy */ -void mqtt_wss_log_ctx_destroy(mqtt_wss_log_ctx_t ctx); - -void mws_fatal(mqtt_wss_log_ctx_t ctx, const char *fmt, ...); -void mws_error(mqtt_wss_log_ctx_t ctx, const char *fmt, ...); -void mws_warn (mqtt_wss_log_ctx_t ctx, const char *fmt, ...); -void mws_info (mqtt_wss_log_ctx_t ctx, const char *fmt, ...); -void mws_debug(mqtt_wss_log_ctx_t ctx, const char *fmt, ...); - -#endif /* MQTT_WSS_LOG_H */ diff --git a/src/aclk/mqtt_websockets/ws_client.c b/src/aclk/mqtt_websockets/ws_client.c index a6b9b23f3..99ea266c8 100644 --- a/src/aclk/mqtt_websockets/ws_client.c +++ b/src/aclk/mqtt_websockets/ws_client.c @@ -1,103 +1,43 @@ -// Copyright (C) 2020 Timotej Šiškovič -// SPDX-License-Identifier: GPL-3.0-only -// -// This program is free software: you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by the Free Software Foundation, version 3. -// -// This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with this program. -// If not, see . - -#include -#include -#include -#include -#include - -#include +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "libnetdata/libnetdata.h" #include "ws_client.h" #include "common_internal.h" -#ifdef MQTT_WEBSOCKETS_DEBUG -#include "../c-rbuf/src/ringbuffer_internal.h" -#endif - -#define UNIT_LOG_PREFIX "ws_client: " -#define FATAL(fmt, ...) mws_fatal(client->log, UNIT_LOG_PREFIX fmt, ##__VA_ARGS__) -#define ERROR(fmt, ...) mws_error(client->log, UNIT_LOG_PREFIX fmt, ##__VA_ARGS__) -#define WARN(fmt, ...) mws_warn (client->log, UNIT_LOG_PREFIX fmt, ##__VA_ARGS__) -#define INFO(fmt, ...) mws_info (client->log, UNIT_LOG_PREFIX fmt, ##__VA_ARGS__) -#define DEBUG(fmt, ...) mws_debug(client->log, UNIT_LOG_PREFIX fmt, ##__VA_ARGS__) - const char *websocket_upgrage_hdr = "GET /mqtt HTTP/1.1\x0D\x0A" "Host: %s\x0D\x0A" "Upgrade: websocket\x0D\x0A" "Connection: Upgrade\x0D\x0A" "Sec-WebSocket-Key: %s\x0D\x0A" - "Origin: http://example.com\x0D\x0A" + "Origin: \x0D\x0A" "Sec-WebSocket-Protocol: mqtt\x0D\x0A" "Sec-WebSocket-Version: 13\x0D\x0A\x0D\x0A"; const char *mqtt_protoid = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; #define DEFAULT_RINGBUFFER_SIZE (1024*128) -#define ENTROPY_SOURCE "/dev/urandom" -ws_client *ws_client_new(size_t buf_size, char **host, mqtt_wss_log_ctx_t log) -{ - ws_client *client; +ws_client *ws_client_new(size_t buf_size, char **host) +{ if(!host) return NULL; - client = callocz(1, sizeof(ws_client)); - if (!client) - return NULL; - + ws_client *client = callocz(1, sizeof(ws_client)); client->host = host; - client->log = log; - client->buf_read = rbuf_create(buf_size ? buf_size : DEFAULT_RINGBUFFER_SIZE); - if (!client->buf_read) - goto cleanup; - client->buf_write = rbuf_create(buf_size ? buf_size : DEFAULT_RINGBUFFER_SIZE); - if (!client->buf_write) - goto cleanup_1; - client->buf_to_mqtt = rbuf_create(buf_size ? buf_size : DEFAULT_RINGBUFFER_SIZE); - if (!client->buf_to_mqtt) - goto cleanup_2; - - client->entropy_fd = open(ENTROPY_SOURCE, O_RDONLY | O_CLOEXEC); - if (client->entropy_fd < 1) { - ERROR("Error opening entropy source \"" ENTROPY_SOURCE "\". Reason: \"%s\"", strerror(errno)); - goto cleanup_3; - } return client; - -cleanup_3: - rbuf_free(client->buf_to_mqtt); -cleanup_2: - rbuf_free(client->buf_write); -cleanup_1: - rbuf_free(client->buf_read); -cleanup: - freez(client); - return NULL; } void ws_client_free_headers(ws_client *client) { struct http_header *ptr = client->hs.headers; - struct http_header *tmp; while (ptr) { - tmp = ptr; + struct http_header *tmp = ptr; ptr = ptr->next; freez(tmp); } @@ -112,7 +52,6 @@ void ws_client_destroy(ws_client *client) ws_client_free_headers(client); freez(client->hs.nonce_reply); freez(client->hs.http_reply_msg); - close(client->entropy_fd); rbuf_free(client->buf_read); rbuf_free(client->buf_write); rbuf_free(client->buf_to_mqtt); @@ -141,7 +80,7 @@ void ws_client_reset(ws_client *client) int ws_client_add_http_header(ws_client *client, struct http_header *hdr) { if (client->hs.hdr_count > MAX_HTTP_HDR_COUNT) { - ERROR("Too many HTTP response header fields"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Too many HTTP response header fields"); return -1; } @@ -156,7 +95,7 @@ int ws_client_add_http_header(ws_client *client, struct http_header *hdr) return 0; } -int ws_client_want_write(ws_client *client) +int ws_client_want_write(const ws_client *client) { return rbuf_bytes_available(client->buf_write); } @@ -165,78 +104,89 @@ int ws_client_want_write(ws_client *client) #define TEMP_BUF_SIZE 4096 int ws_client_start_handshake(ws_client *client) { - nd_uuid_t nonce; + unsigned char nonce[WEBSOCKET_NONCE_SIZE]; char nonce_b64[256]; char second[TEMP_BUF_SIZE]; unsigned int md_len; - unsigned char *digest; + unsigned char digest[EVP_MAX_MD_SIZE]; // EVP_MAX_MD_SIZE ensures enough space EVP_MD_CTX *md_ctx; const EVP_MD *md; + int rc = 1; if(!client->host || !*client->host) { - ERROR("Hostname has not been set. We should not be able to come here!"); - return 1; - } - - uuid_generate_random(nonce); - EVP_EncodeBlock((unsigned char *)nonce_b64, (const unsigned char *)nonce, WEBSOCKET_NONCE_SIZE); - snprintf(second, TEMP_BUF_SIZE, websocket_upgrage_hdr, *client->host, nonce_b64); - - if(rbuf_bytes_free(client->buf_write) < strlen(second)) { - ERROR("Write buffer capacity too low."); + nd_log(NDLS_DAEMON, NDLP_ERR, "Hostname has not been set. We should not be able to come here!"); return 1; } - rbuf_push(client->buf_write, second, strlen(second)); - client->state = WS_HANDSHAKE; - - //Calculating expected Sec-WebSocket-Accept reply - snprintf(second, TEMP_BUF_SIZE, "%s%s", nonce_b64, mqtt_protoid); + // Generate a random 16-byte nonce + os_random_bytes(nonce, sizeof(nonce)); + // Initialize the digest context #if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110) md_ctx = EVP_MD_CTX_create(); #else md_ctx = EVP_MD_CTX_new(); #endif if (md_ctx == NULL) { - ERROR("Cant create EVP_MD Context"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Can't create EVP_MD context"); return 1; } - md = EVP_get_digestbyname("sha1"); + md = EVP_sha1(); // Use SHA-1 for WebSocket handshake if (!md) { - ERROR("Unknown message digest"); - return 1; + nd_log(NDLS_DAEMON, NDLP_ERR, "Unknown message digest SHA-1"); + goto exit_with_error; } - if ((digest = (unsigned char *)OPENSSL_malloc(EVP_MD_size(EVP_sha256()))) == NULL) { - ERROR("Cant alloc digest"); - return 1; + (void) netdata_base64_encode((unsigned char *) nonce_b64, nonce, WEBSOCKET_NONCE_SIZE); + + // Format and push the upgrade header to the write buffer + size_t bytes = snprintf(second, TEMP_BUF_SIZE, websocket_upgrage_hdr, *client->host, nonce_b64); + if(rbuf_bytes_free(client->buf_write) < bytes) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Write buffer capacity too low."); + goto exit_with_error; } + rbuf_push(client->buf_write, second, bytes); + + client->state = WS_HANDSHAKE; - EVP_DigestInit_ex(md_ctx, md, NULL); - EVP_DigestUpdate(md_ctx, second, strlen(second)); - EVP_DigestFinal_ex(md_ctx, digest, &md_len); + // Create the expected Sec-WebSocket-Accept value + bytes = snprintf(second, TEMP_BUF_SIZE, "%s%s", nonce_b64, mqtt_protoid); - EVP_EncodeBlock((unsigned char *)nonce_b64, digest, (int) md_len); + if (!EVP_DigestInit_ex(md_ctx, md, NULL)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Failed to initialize digest context"); + goto exit_with_error; + } + + if (!EVP_DigestUpdate(md_ctx, second, bytes)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Failed to update digest"); + goto exit_with_error; + } + + if (!EVP_DigestFinal_ex(md_ctx, digest, &md_len)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Failed to finalize digest"); + goto exit_with_error; + } + + (void) netdata_base64_encode((unsigned char *) nonce_b64, digest, md_len); freez(client->hs.nonce_reply); client->hs.nonce_reply = strdupz(nonce_b64); + rc = 0; - OPENSSL_free(digest); - +exit_with_error: #if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110) EVP_MD_CTX_destroy(md_ctx); #else EVP_MD_CTX_free(md_ctx); #endif - return 0; + return rc; } #define BUF_READ_MEMCMP_CONST(const, err) \ if (rbuf_memcmp_n(client->buf_read, const, strlen(const))) { \ - ERROR(err); \ + nd_log(NDLS_DAEMON, NDLP_ERR, err); \ rbuf_flush(client->buf_read); \ return WS_CLIENT_PROTOCOL_ERROR; \ } @@ -262,7 +212,7 @@ int ws_client_start_handshake(ws_client *client) #define HTTP_HDR_LINE_CHECK_LIMIT(x) \ if ((x) >= MAX_HTTP_LINE_LENGTH) { \ - ERROR("HTTP line received is too long. Maximum is %d", MAX_HTTP_LINE_LENGTH); \ + nd_log(NDLS_DAEMON, NDLP_ERR, "HTTP line received is too long. Maximum is %d", MAX_HTTP_LINE_LENGTH); \ return WS_CLIENT_PROTOCOL_ERROR; \ } @@ -285,13 +235,13 @@ int ws_client_parse_handshake_resp(ws_client *client) BUF_READ_CHECK_AT_LEAST(HTTP_SC_LENGTH); // "XXX " http return code rbuf_pop(client->buf_read, buf, HTTP_SC_LENGTH); if (buf[HTTP_SC_LENGTH - 1] != 0x20) { - ERROR("HTTP status code received is not terminated by space (0x20)"); + nd_log(NDLS_DAEMON, NDLP_ERR, "HTTP status code received is not terminated by space (0x20)"); return WS_CLIENT_PROTOCOL_ERROR; } buf[HTTP_SC_LENGTH - 1] = 0; client->hs.http_code = atoi(buf); if (client->hs.http_code < 100 || client->hs.http_code >= 600) { - ERROR("HTTP status code received not in valid range 100-600"); + nd_log(NDLS_DAEMON, NDLP_ERR, "HTTP status code received not in valid range 100-600"); return WS_CLIENT_PROTOCOL_ERROR; } client->hs.hdr_state = WS_HDR_ENDLINE; @@ -330,16 +280,16 @@ int ws_client_parse_handshake_resp(ws_client *client) ptr = rbuf_find_bytes(client->buf_read, HTTP_HDR_SEPARATOR, strlen(HTTP_HDR_SEPARATOR), &idx_sep); if (!ptr || idx_sep > idx_crlf) { - ERROR("Expected HTTP hdr field key/value separator \": \" before endline in non empty HTTP header line"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Expected HTTP hdr field key/value separator \": \" before endline in non empty HTTP header line"); return WS_CLIENT_PROTOCOL_ERROR; } if (idx_crlf == idx_sep + (int)strlen(HTTP_HDR_SEPARATOR)) { - ERROR("HTTP Header value cannot be empty"); + nd_log(NDLS_DAEMON, NDLP_ERR, "HTTP Header value cannot be empty"); return WS_CLIENT_PROTOCOL_ERROR; } if (idx_sep > HTTP_HEADER_NAME_MAX_LEN) { - ERROR("HTTP header too long (%d)", idx_sep); + nd_log(NDLS_DAEMON, NDLP_ERR, "HTTP header too long (%d)", idx_sep); return WS_CLIENT_PROTOCOL_ERROR; } @@ -347,23 +297,21 @@ int ws_client_parse_handshake_resp(ws_client *client) hdr->key = ((char*)hdr) + sizeof(struct http_header); hdr->value = hdr->key + idx_sep + 1; - bytes = rbuf_pop(client->buf_read, hdr->key, idx_sep); + rbuf_pop(client->buf_read, hdr->key, idx_sep); rbuf_bump_tail(client->buf_read, strlen(HTTP_HDR_SEPARATOR)); - bytes = rbuf_pop(client->buf_read, hdr->value, idx_crlf - idx_sep - strlen(HTTP_HDR_SEPARATOR)); + rbuf_pop(client->buf_read, hdr->value, idx_crlf - idx_sep - strlen(HTTP_HDR_SEPARATOR)); rbuf_bump_tail(client->buf_read, strlen(WS_HTTP_NEWLINE)); for (int i = 0; hdr->key[i]; i++) hdr->key[i] = tolower(hdr->key[i]); -// DEBUG("HTTP header \"%s\" received. Value \"%s\"", hdr->key, hdr->value); - if (ws_client_add_http_header(client, hdr)) return WS_CLIENT_PROTOCOL_ERROR; if (!strcmp(hdr->key, WS_CONN_ACCEPT)) { if (strcmp(client->hs.nonce_reply, hdr->value)) { - ERROR("Received NONCE \"%s\" does not match expected nonce of \"%s\"", hdr->value, client->hs.nonce_reply); + nd_log(NDLS_DAEMON, NDLP_ERR, "Received NONCE \"%s\" does not match expected nonce of \"%s\"", hdr->value, client->hs.nonce_reply); return WS_CLIENT_PROTOCOL_ERROR; } client->hs.nonce_matched = 1; @@ -373,21 +321,21 @@ int ws_client_parse_handshake_resp(ws_client *client) case WS_HDR_PARSE_DONE: if (!client->hs.nonce_matched) { - ERROR("Missing " WS_CONN_ACCEPT " header"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Missing " WS_CONN_ACCEPT " header"); return WS_CLIENT_PROTOCOL_ERROR; } if (client->hs.http_code != 101) { - ERROR("HTTP return code not 101. Received %d with msg \"%s\".", client->hs.http_code, client->hs.http_reply_msg); + nd_log(NDLS_DAEMON, NDLP_ERR, "HTTP return code not 101. Received %d with msg \"%s\".", client->hs.http_code, client->hs.http_reply_msg); return WS_CLIENT_PROTOCOL_ERROR; } client->state = WS_ESTABLISHED; client->hs.hdr_state = WS_HDR_ALL_DONE; - INFO("Websocket Connection Accepted By Server"); + nd_log(NDLS_DAEMON, NDLP_INFO, "Websocket Connection Accepted By Server"); return WS_CLIENT_PARSING_DONE; case WS_HDR_ALL_DONE: - FATAL("This is error we should never come here!"); + nd_log(NDLS_DAEMON, NDLP_CRIT, "This is error we should never come here!"); return WS_CLIENT_PROTOCOL_ERROR; } return 0; @@ -397,7 +345,7 @@ int ws_client_parse_handshake_resp(ws_client *client) #define WS_FINAL_FRAG BYTE_MSB #define WS_PAYLOAD_MASKED BYTE_MSB -static inline size_t get_ws_hdr_size(size_t payload_size) +static size_t get_ws_hdr_size(size_t payload_size) { size_t hdr_len = 2 + 4 /*mask*/; if(payload_size > 125) @@ -408,7 +356,7 @@ static inline size_t get_ws_hdr_size(size_t payload_size) } #define MAX_POSSIBLE_HDR_LEN 14 -int ws_client_send(ws_client *client, enum websocket_opcode frame_type, const char *data, size_t size) +int ws_client_send(const ws_client *client, enum websocket_opcode frame_type, const char *data, size_t size) { // TODO maybe? implement fragmenting, it is not necessary though // as both tested MQTT brokers have no reuirement of one MQTT envelope @@ -416,24 +364,16 @@ int ws_client_send(ws_client *client, enum websocket_opcode frame_type, const ch // one big MQTT message as single fragmented WebSocket envelope char hdr[MAX_POSSIBLE_HDR_LEN]; char *ptr = hdr; - char *mask; int size_written = 0; size_t j = 0; size_t w_buff_free = rbuf_bytes_free(client->buf_write); size_t hdr_len = get_ws_hdr_size(size); - if (w_buff_free < hdr_len * 2) { -#ifdef DEBUG_ULTRA_VERBOSE - DEBUG("Write buffer full. Can't write requested %d size.", size); -#endif + if (w_buff_free < hdr_len * 2) return 0; - } if (w_buff_free < (hdr_len + size)) { -#ifdef DEBUG_ULTRA_VERBOSE - DEBUG("Can't write whole MQTT packet of %d bytes into the buffer. Will do partial send of %d.", size, w_buff_free - hdr_len); -#endif size = w_buff_free - hdr_len; hdr_len = get_ws_hdr_size(size); // the actual needed header size might decrease if we cut number of bytes @@ -459,12 +399,10 @@ int ws_client_send(ws_client *client, enum websocket_opcode frame_type, const ch ptr += sizeof(be); } else *ptr++ |= size; - - mask = ptr; - if (read(client->entropy_fd, mask, sizeof(uint32_t)) < (ssize_t)sizeof(uint32_t)) { - ERROR("Unable to get mask from \"" ENTROPY_SOURCE "\""); - return -2; - } + + char *mask = ptr; + uint32_t mask32 = os_random32() + 1; + memcpy(mask, &mask32, sizeof(mask32)); rbuf_push(client->buf_write, hdr, hdr_len); @@ -490,7 +428,7 @@ int ws_client_send(ws_client *client, enum websocket_opcode frame_type, const ch return size_written; } -static int check_opcode(ws_client *client,enum websocket_opcode oc) +static int check_opcode(enum websocket_opcode oc) { switch(oc) { case WS_OP_BINARY_FRAME: @@ -498,34 +436,34 @@ static int check_opcode(ws_client *client,enum websocket_opcode oc) case WS_OP_PING: return 0; case WS_OP_CONTINUATION_FRAME: - FATAL("WS_OP_CONTINUATION_FRAME NOT IMPLEMENTED YET!!!!"); + nd_log(NDLS_DAEMON, NDLP_ERR, "WS_OP_CONTINUATION_FRAME NOT IMPLEMENTED YET!!!!"); return 0; case WS_OP_TEXT_FRAME: - FATAL("WS_OP_TEXT_FRAME NOT IMPLEMENTED YET!!!!"); + nd_log(NDLS_DAEMON, NDLP_ERR, "WS_OP_TEXT_FRAME NOT IMPLEMENTED YET!!!!"); return 0; case WS_OP_PONG: - FATAL("WS_OP_PONG NOT IMPLEMENTED YET!!!!"); + nd_log(NDLS_DAEMON, NDLP_ERR, "WS_OP_PONG NOT IMPLEMENTED YET!!!!"); return 0; default: return WS_CLIENT_PROTOCOL_ERROR; } } -static inline void ws_client_rx_post_hdr_state(ws_client *client) +static void ws_client_rx_post_hdr_state(ws_client *client) { switch(client->rx.opcode) { case WS_OP_BINARY_FRAME: client->rx.parse_state = WS_PAYLOAD_DATA; - return; + break; case WS_OP_CONNECTION_CLOSE: client->rx.parse_state = WS_PAYLOAD_CONNECTION_CLOSE; - return; + break; case WS_OP_PING: client->rx.parse_state = WS_PAYLOAD_PING_REQ_PAYLOAD; - return; + break; default: client->rx.parse_state = WS_PAYLOAD_SKIP_UNKNOWN_PAYLOAD; - return; + break; } } @@ -541,15 +479,15 @@ int ws_client_process_rx_ws(ws_client *client) client->rx.opcode = buf[0] & (char)~BYTE_MSB; if (!(buf[0] & (char)~WS_FINAL_FRAG)) { - ERROR("Not supporting fragmented messages yet!"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Not supporting fragmented messages yet!"); return WS_CLIENT_PROTOCOL_ERROR; } - if (check_opcode(client, client->rx.opcode) == WS_CLIENT_PROTOCOL_ERROR) + if (check_opcode(client->rx.opcode) == WS_CLIENT_PROTOCOL_ERROR) return WS_CLIENT_PROTOCOL_ERROR; if (buf[1] & (char)WS_PAYLOAD_MASKED) { - ERROR("Mask is not allowed in Server->Client Websocket direction."); + nd_log(NDLS_DAEMON, NDLP_ERR, "Mask is not allowed in Server->Client Websocket direction."); return WS_CLIENT_PROTOCOL_ERROR; } @@ -584,12 +522,8 @@ int ws_client_process_rx_ws(ws_client *client) if (!rbuf_bytes_available(client->buf_read)) return WS_CLIENT_NEED_MORE_BYTES; char *insert = rbuf_get_linear_insert_range(client->buf_to_mqtt, &size); - if (!insert) { -#ifdef DEBUG_ULTRA_VERBOSE - DEBUG("BUFFER TOO FULL. Avail %d req %d", (int)size, (int)remaining); -#endif + if (!insert) return WS_CLIENT_BUFFER_FULL; - } size = (size > remaining) ? remaining : size; size = rbuf_pop(client->buf_read, insert, size); rbuf_bump_head(client->buf_to_mqtt, size); @@ -603,11 +537,11 @@ int ws_client_process_rx_ws(ws_client *client) // b) 2byte reason code // c) 2byte reason code followed by message if (client->rx.payload_length == 1) { - ERROR("WebScoket CONNECTION_CLOSE can't have payload of size 1"); + nd_log(NDLS_DAEMON, NDLP_ERR, "WebScoket CONNECTION_CLOSE can't have payload of size 1"); return WS_CLIENT_PROTOCOL_ERROR; } if (!client->rx.payload_length) { - INFO("WebSocket server closed the connection without giving reason."); + nd_log(NDLS_DAEMON, NDLP_INFO, "WebSocket server closed the connection without giving reason."); client->rx.parse_state = WS_PACKET_DONE; break; } @@ -621,7 +555,7 @@ int ws_client_process_rx_ws(ws_client *client) client->rx.payload_processed += sizeof(uint16_t); if(client->rx.payload_processed == client->rx.payload_length) { - INFO("WebSocket server closed the connection with EC=%d. Without message.", + nd_log(NDLS_DAEMON, NDLP_INFO, "WebSocket server closed the connection with EC=%d. Without message.", client->rx.specific_data.op_close.ec); client->rx.parse_state = WS_PACKET_DONE; break; @@ -640,7 +574,7 @@ int ws_client_process_rx_ws(ws_client *client) client->rx.payload_length - client->rx.payload_processed); } client->rx.specific_data.op_close.reason[client->rx.payload_length] = 0; - INFO("WebSocket server closed the connection with EC=%d and reason \"%s\"", + nd_log(NDLS_DAEMON, NDLP_INFO, "WebSocket server closed the connection with EC=%d and reason \"%s\"", client->rx.specific_data.op_close.ec, client->rx.specific_data.op_close.reason); freez(client->rx.specific_data.op_close.reason); @@ -649,14 +583,14 @@ int ws_client_process_rx_ws(ws_client *client) break; case WS_PAYLOAD_SKIP_UNKNOWN_PAYLOAD: BUF_READ_CHECK_AT_LEAST(client->rx.payload_length); - WARN("Skipping Websocket Packet of unsupported/unknown type"); + nd_log(NDLS_DAEMON, NDLP_WARNING, "Skipping Websocket Packet of unsupported/unknown type"); if (client->rx.payload_length) rbuf_bump_tail(client->buf_read, client->rx.payload_length); client->rx.parse_state = WS_PACKET_DONE; return WS_CLIENT_PARSING_DONE; case WS_PAYLOAD_PING_REQ_PAYLOAD: if (client->rx.payload_length > rbuf_get_capacity(client->buf_read) / 2) { - ERROR("Ping arrived with payload which is too big!"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Ping arrived with payload which is too big!"); return WS_CLIENT_INTERNAL_ERROR; } BUF_READ_CHECK_AT_LEAST(client->rx.payload_length); @@ -666,7 +600,7 @@ int ws_client_process_rx_ws(ws_client *client) // then attempt to send as soon as buffer space clears up size = ws_client_send(client, WS_OP_PONG, client->rx.specific_data.ping_msg, client->rx.payload_length); if (size != client->rx.payload_length) { - ERROR("Unable to send the PONG as one packet back. Closing connection."); + nd_log(NDLS_DAEMON, NDLP_ERR, "Unable to send the PONG as one packet back. Closing connection."); return WS_CLIENT_PROTOCOL_ERROR; } client->rx.parse_state = WS_PACKET_DONE; @@ -678,7 +612,7 @@ int ws_client_process_rx_ws(ws_client *client) return WS_CLIENT_CONNECTION_CLOSED; return WS_CLIENT_PARSING_DONE; default: - FATAL("Unknown parse state"); + nd_log(NDLS_DAEMON, NDLP_ERR, "Unknown parse state"); return WS_CLIENT_INTERNAL_ERROR; } return 0; @@ -711,6 +645,8 @@ int ws_client_process(ws_client *client) case WS_CLIENT_CONNECTION_CLOSED: client->state = WS_CONN_CLOSED_GRACEFUL; break; + default: + break; } // if ret == 0 we can continue parsing // if ret == WS_CLIENT_PARSING_DONE we processed @@ -719,13 +655,13 @@ int ws_client_process(ws_client *client) } while (!ret || ret == WS_CLIENT_PARSING_DONE); break; case WS_ERROR: - ERROR("ws_client is in error state. Restart the connection!"); + nd_log(NDLS_DAEMON, NDLP_ERR, "ws_client is in error state. Restart the connection!"); return WS_CLIENT_PROTOCOL_ERROR; case WS_CONN_CLOSED_GRACEFUL: - ERROR("Connection has been gracefully closed. Calling this is useless (and probably bug) until you reconnect again."); + nd_log(NDLS_DAEMON, NDLP_ERR, "Connection has been gracefully closed. Calling this is useless (and probably bug) until you reconnect again."); return WS_CLIENT_CONNECTION_CLOSED; default: - FATAL("Unknown connection state! Probably memory corruption."); + nd_log(NDLS_DAEMON, NDLP_CRIT, "Unknown connection state! Probably memory corruption."); return WS_CLIENT_INTERNAL_ERROR; } return ret; diff --git a/src/aclk/mqtt_websockets/ws_client.h b/src/aclk/mqtt_websockets/ws_client.h index 0ccbd29a8..67e5835a2 100644 --- a/src/aclk/mqtt_websockets/ws_client.h +++ b/src/aclk/mqtt_websockets/ws_client.h @@ -1,14 +1,8 @@ -// SPDX-License-Identifier: GPL-3.0-only -// Copyright (C) 2020 Timotej Šiškovič +// SPDX-License-Identifier: GPL-3.0-or-later #ifndef WS_CLIENT_H #define WS_CLIENT_H -#include "c-rbuf/cringbuffer.h" -#include "mqtt_wss_log.h" - -#include - #define WS_CLIENT_NEED_MORE_BYTES 0x10 #define WS_CLIENT_PARSING_DONE 0x11 #define WS_CLIENT_CONNECTION_CLOSED 0x12 @@ -98,23 +92,20 @@ typedef struct websocket_client { // memory usage and remove one more memcpy buf_read->buf_to_mqtt rbuf_t buf_to_mqtt; // RAW data for MQTT lib - int entropy_fd; - // careful host is borrowed, don't free char **host; - mqtt_wss_log_ctx_t log; } ws_client; -ws_client *ws_client_new(size_t buf_size, char **host, mqtt_wss_log_ctx_t log); +ws_client *ws_client_new(size_t buf_size, char **host); void ws_client_destroy(ws_client *client); void ws_client_reset(ws_client *client); int ws_client_start_handshake(ws_client *client); -int ws_client_want_write(ws_client *client); +int ws_client_want_write(const ws_client *client); int ws_client_process(ws_client *client); -int ws_client_send(ws_client *client, enum websocket_opcode frame_type, const char *data, size_t size); +int ws_client_send(const ws_client *client, enum websocket_opcode frame_type, const char *data, size_t size); #endif /* WS_CLIENT_H */ diff --git a/src/claim/README.md b/src/claim/README.md index 51e2a9ebe..a0af190b9 100644 --- a/src/claim/README.md +++ b/src/claim/README.md @@ -11,8 +11,7 @@ features like centralized monitoring and easier collaboration. There are two places in the UI where you can add/connect your Node: - **Space/Room settings**: Click the cogwheel (the bottom-left corner or next to the Room name at the top) and - select "Nodes." Click the "+" button to add - a new node. + select "Nodes." Click the "+" button to add a new node. - [**Nodes tab**](/docs/dashboards-and-charts/nodes-tab.md): Click on the "Add nodes" button. Netdata Cloud will generate a command that you can execute on your Node to install and claim the Agent. The command is @@ -28,12 +27,13 @@ Once you've chosen your installation method, follow the provided instructions to ### Connect an Existing Agent -There are two methods to connect an already installed Netdata Agent to your Netdata Cloud Space: +There are three methods to connect an already installed Netdata Agent to your Netdata Cloud Space: -- using the Netdata Cloud user interface (UI). -- using the claiming script. +- Manually, via the UI +- Automatically, via a provisioning system (or the command line) +- Automatically, via environment variables (e.g. kubernetes, docker, etc) -#### Using the UI (recommended) +#### Manually, via the UI The UI method is the easiest and recommended way to connect your Agent. Here's how: @@ -42,36 +42,52 @@ The UI method is the easiest and recommended way to connect your Agent. Here's h 3. Click the "Connect" button. 4. Follow the on-screen instructions to connect your Agent. -#### Using claiming script +#### Automatically, via a provisioning system or the command line -You can connect an Agent by running -the [netdata-claim.sh](https://github.com/netdata/netdata/blob/master/src/claim/netdata-claim.sh.in) script directly. -You can either run it with root privileges using `sudo` or as the user running the Agent (typically `netdata`). - -The claiming script accepts options that control the connection process. You can specify these options using the -following format: +Netdata Agents can be connected to Netdata Cloud by creating the file `/etc/netdata/claim.conf` +(or `/opt/netdata/etc/netdata/claim.conf` depending on your installation), like this: ```bash -netdata-claim.sh -OPTION=VALUE ... +[global] + url = The Netdata Cloud base URL (optional, defaults to `https://app.netdata.cloud`) + token = The claiming token for your Netdata Cloud Space (required) + rooms = A comma-separated list of Rooms to add the Agent to (optional) + proxy = The URL of a proxy server to use for the connection, or none, or env (optional, defaults to env) + insecure = Either yes or no (optional) ``` -Claiming script options: +- `proxy` can get anything libcurl accepts as a proxy, or the `none` and `env` keywords. `none` (or just an empty value) disables proxy configuration, while `env` tells libcurl to use the environment to determine the proxy configuration (usually the `https_proxy` environment variable). +- `insecure` is a boolean (either `yes`, or `no`) and when set to `yes` it instructs libcurl to disable host verification. -| Option | Description | Required | Default value | -|--------|--------------------------------------------------------------------|:--------:|:------------------------------------------------------| -| token | The claiming token for your Netdata Cloud Space. | yes | | -| rooms | A comma-separated list of Rooms to add the Agent to. | no | The Agent will be added to the "All nodes" Room only. | -| id | The unique identifier of the Agent. | no | The Agent's MACHINE_GUID. | -| proxy | The URL of a proxy server to use for the connection, if necessary. | no | | - -Example: +example: ```bash -netdata-claim.sh -token=MYTOKEN1234567 -rooms=room1,room2 +[global] + url = https://app.netdata.cloud + token = NETDATA_CLOUD_SPACE_TOKEN + rooms = ROOM_KEY1,ROOM_KEY2,ROOM_KEY3 + proxy = http://username:password@myproxy:8080 + insecure = no ``` -This command connects the Agent and adds it to the "room1" and "room2" Rooms using your claiming token -MYTOKEN1234567. +If the agent is already running, you can either run `netdatacli reload-claiming-state` or restart the agent. +Otherwise, the agent will be claimed when it starts. + +If the claiming process fails, the reason will be logged in daemon.log (search for "CLAIM") and the `cloud` section of `http://ip:19999/api/v2/info`. + +#### Automatically, via environment variables + +Netdata will use the following environment variables: + +- `NETDATA_CLAIM_URL`: The Netdata Cloud base URL (optional, defaults to `https://app.netdata.cloud`) +- `NETDATA_CLAIM_TOKEN`: The claiming token for your Netdata Cloud Space (required) +- `NETDATA_CLAIM_ROOMS`: A comma-separated list of Rooms to add the Agent to (optional) +- `NETDATA_CLAIM_PROXY`: The URL of a proxy server to use for the connection (optional) +- `NETDATA_EXTRA_CLAIM_OPTS`, may contain a space separated list of options. The option `-insecure` is the only currently used. + +The `NETDATA_CLAIM_TOKEN` alone is enough for triggering the claiming process. + +If the claiming process fails, the reason will be logged in daemon.log (search for "CLAIM") and the `cloud` section of `http://ip:19999/api/v2/info`. ## Reconnect @@ -84,22 +100,16 @@ cd /var/lib/netdata # Replace with your Netdata library directory, if not /var sudo rm -rf cloud.d/ ``` +> **IMPORTANT** +> +> Keep in mind that the Agent will be **re-claimed automatically** if the environment variables or `claim.conf` exist when the agent is restarted. + This node no longer has access to the credentials it was used when connecting to Netdata Cloud via the ACLK. You will still be able to see this node in your Rooms in an **unreachable** state. -If you want to reconnect this node, you need to: - -1. Ensure that the `/var/lib/netdata/cloud.d` directory doesn't exist. In some installations, the path - is `/opt/netdata/var/lib/netdata/cloud.d` -2. Stop the Agent -3. Ensure that the `uuidgen-runtime` package is installed. Run ```echo "$(uuidgen)"``` and validate you get back a UUID -4. Copy the kickstart.sh command to add a node from your space and add to the end of it `--claim-id "$(uuidgen)"`. Run - the command and look for the message `Node was successfully claimed.` -5. Start the Agent - ### Docker based installations -To remove a node from you Space in Netdata Cloud, and connect it to another Space, follow these steps: +To remove a node from your Space in Netdata Cloud and connect it to another Space, follow these steps: 1. Enter the running container you wish to remove from your Space @@ -113,7 +123,6 @@ To remove a node from you Space in Netdata Cloud, and connect it to another Spac ```bash rm -rf /var/lib/netdata/cloud.d/ - rm /var/lib/netdata/registry/netdata.public.unique.id ``` @@ -123,7 +132,6 @@ To remove a node from you Space in Netdata Cloud, and connect it to another Spac ```bash docker stop CONTAINER_NAME - docker rm CONTAINER_NAME ``` @@ -144,35 +152,31 @@ To remove a node from you Space in Netdata Cloud, and connect it to another Spac ``` 4. Finally, go to your new Space, copy the installation command with the new claim token and run it. - If you are using a `docker-compose.yml` file, you will have to overwrite it with the new claiming token. + If you’re using a `docker-compose.yml` file, you will have to overwrite it with the new claiming token. The node should now appear online in that Space. ## Regenerate Claiming Token -If in case of some security reason, or other, you need to revoke your previous claiming token and generate a new one you -can achieve that from the Netdata Cloud UI. +There may be situations where you need to revoke your previous Netdata Cloud claiming token and generate a new one for security reasons. Here's how to do it: + +**Requirements**: + +- Only administrators of Space in Netdata Cloud can regenerate tokens. -On any screen where you see the connect the node to Netdata Cloud command you'll see above it, next to -the [updates channel](/docs/netdata-agent/versions-and-platforms.md), a -button to **Regenerate token**. This action will invalidate your previous token and generate a fresh new one. +**Steps**: -Only the administrators of a Space in Netdata Cloud can trigger this action. +1. Navigate to any screen within the Netdata Cloud UI where you see the "Connect the node to Netdata Cloud" command. +2. Look above this command, near the [Updates channel](/docs/netdata-agent/versions-and-platforms.md). You should see a button that says "Regenerate token." +3. Click the "Regenerate token" button. This action will invalidate your previous token and generate a new one. ## Troubleshoot If you're having trouble connecting a node, this may be because the [ACLK](/src/aclk/README.md) cannot connect to Cloud. -With the Netdata Agent running, visit `http://NODE:19999/api/v1/info` in your browser, replacing `NODE` with the IP -address or hostname of your Agent. The returned JSON contains four keys that will be helpful to diagnose any issues you -might be having with the ACLK or connection process. - -``` -"cloud-enabled" -"cloud-available" -"agent-claimed" -"aclk-available" -``` +With the Netdata Agent running, visit `http://NODE:19999/api/v2/info` in your browser, replacing `NODE` with the IP +address or hostname of your Agent. The returned JSON contains a section called `cloud` with helpful information to +diagnose any issues you might be having with the ACLK or connection process. > **Note** > @@ -199,149 +203,33 @@ installed Netdata using an unsupported package. > **Note** > -> If you are using an unsupported package, such as a third-party `.deb`/`.rpm` package provided by your distribution, +> If you’re using an unsupported package, such as a third-party `.deb`/`.rpm` package provided by your distribution, > please remove that package and reinstall using > our [recommended kickstart script](/packaging/installer/methods/kickstart.md). ### kickstart: Failed to write new machine GUID -If you run the kickstart script but don't have privileges required for the actions done on the connecting to Netdata -Cloud process you will get the following error: +You might encounter this error if you run the Netdata kickstart script without sufficient permissions: ```bash Failed to write new machine GUID. Please make sure you have rights to write to /var/lib/netdata/registry/netdata.public.unique.id. ``` -For a successful execution you will need to run the script with root privileges or run it with the user that is running -the Agent. - -### bash: netdata-claim.sh: command not found +To resolve this issue, you have two options: -If you run the claiming script and see a `command not found` error, you either installed Netdata in a non-standard -location or are using an unsupported package. If you installed Netdata in a non-standard path using -the `--install-prefix` option, you need to update your `$PATH` or run `netdata-claim.sh` using the full path. +1. Run the script with root privileges. +2. Run the script with the user that runs the Netdata Agent. -For example, if you installed Netdata to `/opt/netdata`, use `/opt/netdata/bin/netdata-claim.sh` to run the claiming -script. - -> **Note** -> -> If you are using an unsupported package, such as a third-party `.deb`/`.rpm` package provided by your distribution, -> please remove that package and reinstall using -> -our [recommended kickstart script](/packaging/installer/methods/kickstart.md). - -### Connecting on older distributions (Ubuntu 14.04, Debian 8, CentOS 6) +### Connecting to Cloud on older distributions (Ubuntu 14.04, Debian 8, CentOS 6) If you're running an older Linux distribution or one that has reached EOL, such as Ubuntu 14.04 LTS, Debian 8, or CentOS 6, your Agent may not be able to securely connect to Netdata Cloud due to an outdated version of OpenSSL. These old -versions of OpenSSL cannot perform [hostname validation](https://wiki.openssl.org/index.php/Hostname_validation), which -helps securely encrypt SSL connections. +versions of OpenSSL cannot perform [hostname validation](https://wiki.openssl.org/index.php/Hostname_validation), +which helps securely encrypt SSL connections. -We recommend you reinstall Netdata with -a [static build](/packaging/installer/methods/kickstart.md#static-builds), -which uses an up-to-date version of OpenSSL with hostname validation enabled. +We recommend you reinstall Netdata with a [static build](/packaging/installer/methods/kickstart.md#install-type), which uses an up-to-date version of OpenSSL with hostname validation enabled. If you choose to continue using the outdated version of OpenSSL, your node will still connect to Netdata Cloud, albeit with hostname verification disabled. Without verification, your Netdata Cloud connection could be vulnerable to man-in-the-middle attacks. - -### cloud-enabled is false - -If `cloud-enabled` is `false`, you probably ran the installer with `--disable-cloud` option. - -Additionally, check that the `enabled` setting in `var/lib/netdata/cloud.d/cloud.conf` is set to `true`: - -```conf -[global] - enabled = true -``` - -To fix this issue, reinstall Netdata using -your [preferred method](/packaging/installer/README.md) and do not add -the `--disable-cloud` option. - -### cloud-available is false / ACLK Available: No - -If `cloud-available` is `false` after you verified Cloud is enabled in the previous step, the most likely issue is that -Cloud features failed to build during installation. - -If Cloud features fail to build, the installer continues and finishes the process without Cloud functionality as opposed -to failing the installation altogether. - -We do this to ensure the Agent will always finish installing. - -If you can't see an explicit error in the installer's output, you can run the installer with the `--require-cloud` -option. This option causes the installation to fail if Cloud functionality can't be built and enabled, and the -installer's output should give you more error details. - -You may see one of the following error messages during installation: - -- `Failed to build libmosquitto. The install process will continue, but you will not be able to connect this node to Netdata Cloud.` -- `Unable to fetch sources for libmosquitto. The install process will continue, but you will not be able to connect this node to Netdata Cloud.` -- `Failed to build libwebsockets. The install process will continue, but you may not be able to connect this node to Netdata Cloud.` -- `Unable to fetch sources for libwebsockets. The install process will continue, but you may not be able to connect this node to Netdata Cloud.` -- `Could not find cmake, which is required to build libwebsockets. The install process will continue, but you may not be able to connect this node to Netdata Cloud.` -- `Could not find cmake, which is required to build JSON-C. The install process will continue, but Netdata Cloud support will be disabled.` -- `Failed to build JSON-C. Netdata Cloud support will be disabled.` -- `Unable to fetch sources for JSON-C. Netdata Cloud support will be disabled.` - -One common cause of the installer failing to build Cloud features is not having one of the following dependencies on -your system: `cmake`, `json-c` and `OpenSSL`, including corresponding `devel` packages. - -You can also look for error messages in `/var/log/netdata/error.log`. Try one of the following two commands to search -for ACLK-related errors. - -```bash -less /var/log/netdata/error.log -grep -i ACLK /var/log/netdata/error.log -``` - -If the installer's output does not help you enable Cloud features, contact us -by [creating an issue on GitHub](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml&title=The+installer+failed+to+prepare+the+required+dependencies+for+Netdata+Cloud+functionality) -with details about your system and relevant output from `error.log`. - -### agent-claimed is false / Claimed: No - -You must [connect your node](#connect). - -### aclk-available is false / Online: No - -If `aclk-available` is `false` and all other keys are `true`, your Agent is having trouble connecting to the Cloud -through the ACLK. Please check your system's firewall. - -If your Agent needs to use a proxy to access the internet, you must set up a proxy for connecting. - -If you are certain firewall and proxy settings are not the issue, you should consult the Agent's `error.log` -at `/var/log/netdata/error.log` and contact us -by [creating an issue on GitHub](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml&title=ACLK-available-is-false) -with details about your system and relevant output from `error.log`. - -## Connecting reference - -In the sections below, you can find reference material for the kickstart script, claiming script, connecting via the -Agent's command line tool, and details about the files found in `cloud.d`. - -### The `cloud.conf` file - -This section defines how and whether your Agent connects to Netdata Cloud using -the [Agent-Cloud link](/src/aclk/README.md)(ACLK). - -| setting | default | info | -|:---------------|:----------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------| -| enabled | yes | Controls whether the ACLK is active. Set to no to prevent the Agent from connecting to Netdata Cloud. | -| cloud base url | | The URL for the Netdata Cloud web application. Typically, this should not be changed. | -| proxy | env | Specifies the proxy setting for the ACLK. Options: none (no proxy), env (use environment's proxy), or a URL (e.g., `http://proxy.example.com:1080`). | - -### Connection directory - -Netdata stores the Agent's connection-related state in the Netdata library directory under `cloud.d`. For a default -installation, this directory exists at `/var/lib/netdata/cloud.d`. The directory and its files should be owned by the -user that runs the Agent, which is typically the `netdata` user. - -The `cloud.d/token` file should contain the claiming-token and the `cloud.d/rooms` file should contain the list of War -Rooms you added that node to. - -The user can also put the Cloud endpoint's full certificate chain in `cloud.d/cloud_fullchain.pem` so that the Agent -can trust the endpoint if necessary. diff --git a/src/claim/claim-with-api.c b/src/claim/claim-with-api.c new file mode 100644 index 000000000..534d4511a --- /dev/null +++ b/src/claim/claim-with-api.c @@ -0,0 +1,486 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "claim.h" + +#include "registry/registry.h" + +#include +#include +#include +#include + +static bool check_and_generate_certificates() { + FILE *fp; + EVP_PKEY *pkey = NULL; + EVP_PKEY_CTX *pctx = NULL; + + CLEAN_CHAR_P *private_key_file = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "private.pem"); + CLEAN_CHAR_P *public_key_file = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "public.pem"); + + // Check if private key exists + fp = fopen(public_key_file, "r"); + if (fp) { + fclose(fp); + return true; + } + + // Generate the RSA key + pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_RSA, NULL); + if (!pctx) { + claim_agent_failure_reason_set("Cannot generate RSA key, EVP_PKEY_CTX_new_id() failed"); + return false; + } + + if (EVP_PKEY_keygen_init(pctx) <= 0) { + claim_agent_failure_reason_set("Cannot generate RSA key, EVP_PKEY_keygen_init() failed"); + EVP_PKEY_CTX_free(pctx); + return false; + } + + if (EVP_PKEY_CTX_set_rsa_keygen_bits(pctx, 2048) <= 0) { + claim_agent_failure_reason_set("Cannot generate RSA key, EVP_PKEY_CTX_set_rsa_keygen_bits() failed"); + EVP_PKEY_CTX_free(pctx); + return false; + } + + if (EVP_PKEY_keygen(pctx, &pkey) <= 0) { + claim_agent_failure_reason_set("Cannot generate RSA key, EVP_PKEY_keygen() failed"); + EVP_PKEY_CTX_free(pctx); + return false; + } + + EVP_PKEY_CTX_free(pctx); + + // Save private key + fp = fopen(private_key_file, "wb"); + if (!fp || !PEM_write_PrivateKey(fp, pkey, NULL, NULL, 0, NULL, NULL)) { + claim_agent_failure_reason_set("Cannot write private key file: %s", private_key_file); + if (fp) fclose(fp); + EVP_PKEY_free(pkey); + return false; + } + fclose(fp); + + // Save public key + fp = fopen(public_key_file, "wb"); + if (!fp || !PEM_write_PUBKEY(fp, pkey)) { + claim_agent_failure_reason_set("Cannot write public key file: %s", public_key_file); + if (fp) fclose(fp); + EVP_PKEY_free(pkey); + return false; + } + fclose(fp); + + EVP_PKEY_free(pkey); + return true; +} + +static size_t response_write_callback(void *ptr, size_t size, size_t nmemb, void *stream) { + BUFFER *wb = stream; + size_t real_size = size * nmemb; + + buffer_memcat(wb, ptr, real_size); + + return real_size; +} + +static const char *curl_add_json_room(BUFFER *wb, const char *start, const char *end) { + size_t len = end - start; + + // copy the item to an new buffer and terminate it + char buf[len + 1]; + memcpy(buf, start, len); + buf[len] = '\0'; + + // add it to the json array + const char *trimmed = trim(buf); // remove leading and trailing spaces + if(trimmed) + buffer_json_add_array_item_string(wb, trimmed); + + // prepare for the next item + start = end + 1; + + // skip multiple separators or spaces + while(*start == ',' || *start == ' ') start++; + + return start; +} + +void curl_add_rooms_json_array(BUFFER *wb, const char *rooms) { + buffer_json_member_add_array(wb, "rooms"); + if(rooms && *rooms) { + const char *start = rooms, *end = NULL; + + // Skip initial separators or spaces + while (*start == ',' || *start == ' ') + start++; + + // Process each item in the comma-separated list + while ((end = strchr(start, ',')) != NULL) + start = curl_add_json_room(wb, start, end); + + // Process the last item if any + if (*start) + curl_add_json_room(wb, start, &start[strlen(start)]); + } + buffer_json_array_close(wb); +} + +static int debug_callback(CURL *handle, curl_infotype type, char *data, size_t size, void *userptr) { + (void)handle; // Unused + (void)userptr; // Unused + + if (type == CURLINFO_TEXT) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Info: %s", data); + else if (type == CURLINFO_HEADER_OUT) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Send header: %.*s", (int)size, data); + else if (type == CURLINFO_DATA_OUT) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Send data: %.*s", (int)size, data); + else if (type == CURLINFO_SSL_DATA_OUT) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Send SSL data: %.*s", (int)size, data); + else if (type == CURLINFO_HEADER_IN) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Receive header: %.*s", (int)size, data); + else if (type == CURLINFO_DATA_IN) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Receive data: %.*s", (int)size, data); + else if (type == CURLINFO_SSL_DATA_IN) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Receive SSL data: %.*s", (int)size, data); + + return 0; +} + +static bool send_curl_request(const char *machine_guid, const char *hostname, const char *token, const char *rooms, const char *url, const char *proxy, int insecure, bool *can_retry) { + CURL *curl; + CURLcode res; + char target_url[2048]; + char public_key[2048] = ""; + FILE *fp; + struct curl_slist *headers = NULL; + + // create a new random claim id + nd_uuid_t claimed_id; + uuid_generate_random(claimed_id); + char claimed_id_str[UUID_STR_LEN]; + uuid_unparse_lower(claimed_id, claimed_id_str); + + // generate the URL to post + snprintf(target_url, sizeof(target_url), "%s%sapi/v1/spaces/nodes/%s", + url, strendswith(url, "/") ? "" : "/", claimed_id_str); + + // Read the public key + CLEAN_CHAR_P *public_key_file = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "public.pem"); + fp = fopen(public_key_file, "r"); + if (!fp || fread(public_key, 1, sizeof(public_key), fp) == 0) { + claim_agent_failure_reason_set("cannot read public key file '%s'", public_key_file); + if (fp) fclose(fp); + *can_retry = false; + return false; + } + fclose(fp); + + // check if we have trusted.pem + // or cloud_fullchain.pem, for backwards compatibility + CLEAN_CHAR_P *trusted_key_file = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "trusted.pem"); + fp = fopen(trusted_key_file, "r"); + if(fp) + fclose(fp); + else { + freez(trusted_key_file); + trusted_key_file = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "cloud_fullchain.pem"); + fp = fopen(trusted_key_file, "r"); + if(fp) + fclose(fp); + else { + freez(trusted_key_file); + trusted_key_file = NULL; + } + } + + // generate the JSON request message + CLEAN_BUFFER *wb = buffer_create(0, NULL); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + + buffer_json_member_add_object(wb, "node"); + { + buffer_json_member_add_string(wb, "id", claimed_id_str); + buffer_json_member_add_string(wb, "hostname", hostname); + } + buffer_json_object_close(wb); // node + + buffer_json_member_add_string(wb, "token", token); + curl_add_rooms_json_array(wb, rooms); + buffer_json_member_add_string(wb, "publicKey", public_key); + buffer_json_member_add_string(wb, "mGUID", machine_guid); + buffer_json_finalize(wb); + + // initialize libcurl + curl = curl_easy_init(); + if(!curl) { + claim_agent_failure_reason_set("Cannot initialize request (curl_easy_init() failed)"); + *can_retry = true; + return false; + } + + // curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L); + curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, debug_callback); + + // we will receive the response in this + CLEAN_BUFFER *response = buffer_create(0, NULL); + + // configure the request + headers = curl_slist_append(headers, "Content-Type: application/json"); + curl_easy_setopt(curl, CURLOPT_URL, target_url); + curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "PUT"); + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, buffer_tostring(wb)); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, response_write_callback); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, response); + + if(trusted_key_file) + curl_easy_setopt(curl, CURLOPT_CAINFO, trusted_key_file); + + // Proxy configuration + if (proxy) { + if (!*proxy || strcmp(proxy, "none") == 0) + // disable proxy configuration in libcurl + curl_easy_setopt(curl, CURLOPT_PROXY, ""); + + else if (strcmp(proxy, "env") != 0) + // set the custom proxy for libcurl + curl_easy_setopt(curl, CURLOPT_PROXY, proxy); + + // otherwise, libcurl will use its own proxy environment variables + } + + // Insecure option + if (insecure) { + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L); + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0L); + } + + // Set timeout options + curl_easy_setopt(curl, CURLOPT_TIMEOUT, 10); + curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 5); + + // execute the request + res = curl_easy_perform(curl); + if (res != CURLE_OK) { + claim_agent_failure_reason_set("Request failed with error: %s", curl_easy_strerror(res)); + curl_easy_cleanup(curl); + curl_slist_free_all(headers); + *can_retry = true; + return false; + } + + // Get HTTP response code + long http_status_code; + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_status_code); + + bool ret = false; + if(http_status_code == 204) { + if(!cloud_conf_regenerate(claimed_id_str, machine_guid, hostname, token, rooms, url, proxy, insecure)) { + claim_agent_failure_reason_set("Failed to save claiming info to disk"); + } + else { + claim_agent_failure_reason_set(NULL); + ret = true; + } + + *can_retry = false; + } + else if (http_status_code == 422) { + if(buffer_strlen(response)) { + struct json_object *parsed_json; + struct json_object *error_key_obj; + const char *error_key = NULL; + + parsed_json = json_tokener_parse(buffer_tostring(response)); + if(parsed_json) { + if (json_object_object_get_ex(parsed_json, "errorMsgKey", &error_key_obj)) + error_key = json_object_get_string(error_key_obj); + + if (strcmp(error_key, "ErrInvalidNodeID") == 0) + claim_agent_failure_reason_set("Failed: the node id is invalid"); + else if (strcmp(error_key, "ErrInvalidNodeName") == 0) + claim_agent_failure_reason_set("Failed: the node name is invalid"); + else if (strcmp(error_key, "ErrInvalidRoomID") == 0) + claim_agent_failure_reason_set("Failed: one or more room ids are invalid"); + else if (strcmp(error_key, "ErrInvalidPublicKey") == 0) + claim_agent_failure_reason_set("Failed: the public key is invalid"); + else + claim_agent_failure_reason_set("Failed with description '%s'", error_key); + + json_object_put(parsed_json); + } + else + claim_agent_failure_reason_set("Failed with a response code %ld", http_status_code); + } + else + claim_agent_failure_reason_set("Failed with an empty response, code %ld", http_status_code); + + *can_retry = false; + } + else if(http_status_code == 102) { + claim_agent_failure_reason_set("Claiming is in progress"); + *can_retry = false; + } + else if(http_status_code == 403) { + claim_agent_failure_reason_set("Failed: token is expired, not found, or invalid"); + *can_retry = false; + } + else if(http_status_code == 409) { + claim_agent_failure_reason_set("Failed: agent is already claimed"); + *can_retry = false; + } + else if(http_status_code == 500) { + claim_agent_failure_reason_set("Failed: received Internal Server Error"); + *can_retry = true; + } + else if(http_status_code == 503) { + claim_agent_failure_reason_set("Failed: Netdata Cloud is unavailable"); + *can_retry = true; + } + else if(http_status_code == 504) { + claim_agent_failure_reason_set("Failed: Gateway Timeout"); + *can_retry = true; + } + else { + claim_agent_failure_reason_set("Failed with response code %ld", http_status_code); + *can_retry = true; + } + + curl_easy_cleanup(curl); + curl_slist_free_all(headers); + return ret; +} + +bool claim_agent(const char *url, const char *token, const char *rooms, const char *proxy, bool insecure) { + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + spinlock_lock(&spinlock); + + if (!check_and_generate_certificates()) { + spinlock_unlock(&spinlock); + return false; + } + + bool done = false, can_retry = true; + size_t retries = 0; + do { + done = send_curl_request(registry_get_this_machine_guid(), registry_get_this_machine_hostname(), token, rooms, url, proxy, insecure, &can_retry); + if (done) break; + sleep_usec(300 * USEC_PER_MS + 100 * retries * USEC_PER_MS); + retries++; + } while(can_retry && retries < 5); + + spinlock_unlock(&spinlock); + return done; +} + +bool claim_agent_from_environment(void) { + const char *url = getenv("NETDATA_CLAIM_URL"); + if(!url || !*url) { + url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "url", DEFAULT_CLOUD_BASE_URL); + if(!url || !*url) return false; + } + + const char *token = getenv("NETDATA_CLAIM_TOKEN"); + if(!token || !*token) + return false; + + const char *rooms = getenv("NETDATA_CLAIM_ROOMS"); + if(!rooms) + rooms = ""; + + const char *proxy = getenv("NETDATA_CLAIM_PROXY"); + if(!proxy || !*proxy) + proxy = ""; + + bool insecure = CONFIG_BOOLEAN_NO; + const char *from_env = getenv("NETDATA_EXTRA_CLAIM_OPTS"); + if(from_env && *from_env && strstr(from_env, "-insecure") == 0) + insecure = CONFIG_BOOLEAN_YES; + + return claim_agent(url, token, rooms, proxy, insecure); +} + +bool claim_agent_from_claim_conf(void) { + static struct config claim_config = APPCONFIG_INITIALIZER; + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + bool ret = false; + + spinlock_lock(&spinlock); + + errno_clear(); + char *filename = filename_from_path_entry_strdupz(netdata_configured_user_config_dir, "claim.conf"); + bool loaded = appconfig_load(&claim_config, filename, 1, NULL); + freez(filename); + + if(loaded) { + const char *url = appconfig_get(&claim_config, CONFIG_SECTION_GLOBAL, "url", DEFAULT_CLOUD_BASE_URL); + const char *token = appconfig_get(&claim_config, CONFIG_SECTION_GLOBAL, "token", ""); + const char *rooms = appconfig_get(&claim_config, CONFIG_SECTION_GLOBAL, "rooms", ""); + const char *proxy = appconfig_get(&claim_config, CONFIG_SECTION_GLOBAL, "proxy", ""); + bool insecure = appconfig_get_boolean(&claim_config, CONFIG_SECTION_GLOBAL, "insecure", CONFIG_BOOLEAN_NO); + + if(token && *token && url && *url) + ret = claim_agent(url, token, rooms, proxy, insecure); + } + + spinlock_unlock(&spinlock); + + return ret; +} + +bool claim_agent_from_split_files(void) { + char filename[FILENAME_MAX + 1]; + + snprintfz(filename, sizeof(filename), "%s/token", netdata_configured_cloud_dir); + long token_len = 0; + char *token = read_by_filename(filename, &token_len); + if(!token || !*token) { + freez(token); + return false; + } + + snprintfz(filename, sizeof(filename), "%s/rooms", netdata_configured_cloud_dir); + long rooms_len = 0; + char *rooms = read_by_filename(filename, &rooms_len); + if(!rooms || !*rooms) { + freez(rooms); + rooms = NULL; + } + + bool ret = claim_agent(cloud_config_url_get(), token, rooms, cloud_config_proxy_get(), cloud_config_insecure_get()); + + if(ret) { + snprintfz(filename, sizeof(filename), "%s/token", netdata_configured_cloud_dir); + unlink(filename); + + snprintfz(filename, sizeof(filename), "%s/rooms", netdata_configured_cloud_dir); + unlink(filename); + } + + return ret; +} + +bool claim_agent_automatically(void) { + // Use /etc/netdata/claim.conf + + if(claim_agent_from_claim_conf()) + return true; + + // Users may set NETDATA_CLAIM_TOKEN and NETDATA_CLAIM_ROOMS + // A good choice for docker container users. + + if(claim_agent_from_environment()) + return true; + + // Users may store token and rooms in /var/lib/netdata/cloud.d + // This was a bad choice, since users may have to create this directory + // which may end up with the wrong permissions, preventing netdata from storing + // the required information there. + + if(claim_agent_from_split_files()) + return true; + + return false; +} diff --git a/src/claim/claim.c b/src/claim/claim.c index 5383aac37..24e4e1c3c 100644 --- a/src/claim/claim.c +++ b/src/claim/claim.c @@ -1,470 +1,209 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "claim.h" -#include "registry/registry_internals.h" -#include "aclk/aclk.h" -#include "aclk/aclk_proxy.h" - -char *claiming_pending_arguments = NULL; - -static char *claiming_errors[] = { - "Agent claimed successfully", // 0 - "Unknown argument", // 1 - "Problems with claiming working directory", // 2 - "Missing dependencies", // 3 - "Failure to connect to endpoint", // 4 - "The CLI didn't work", // 5 - "Wrong user", // 6 - "Unknown HTTP error message", // 7 - "invalid node id", // 8 - "invalid node name", // 9 - "invalid room id", // 10 - "invalid public key", // 11 - "token expired/token not found/invalid token", // 12 - "already claimed", // 13 - "processing claiming", // 14 - "Internal Server Error", // 15 - "Gateway Timeout", // 16 - "Service Unavailable", // 17 - "Agent Unique Id Not Readable" // 18 -}; - -/* Retrieve the claim id for the agent. - * Caller owns the string. -*/ -char *get_agent_claimid() -{ - char *result; - rrdhost_aclk_state_lock(localhost); - result = (localhost->aclk_state.claimed_id == NULL) ? NULL : strdupz(localhost->aclk_state.claimed_id); - rrdhost_aclk_state_unlock(localhost); - return result; -} - -#define CLAIMING_COMMAND_LENGTH 16384 -#define CLAIMING_PROXY_LENGTH (CLAIMING_COMMAND_LENGTH/4) -/* rrd_init() and post_conf_load() must have been called before this function */ -CLAIM_AGENT_RESPONSE claim_agent(const char *claiming_arguments, bool force, const char **msg __maybe_unused) -{ - if (!force || !netdata_cloud_enabled) { - netdata_log_error("Refusing to claim agent -> cloud functionality has been disabled"); - return CLAIM_AGENT_CLOUD_DISABLED; - } +// -------------------------------------------------------------------------------------------------------------------- +// keep track of the last claiming failure reason -#ifndef DISABLE_CLOUD - char command_exec_buffer[CLAIMING_COMMAND_LENGTH + 1]; - char command_line_buffer[CLAIMING_COMMAND_LENGTH + 1]; +static char cloud_claim_failure_reason[4096] = ""; - // This is guaranteed to be set early in main via post_conf_load() - char *cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL); - if (cloud_base_url == NULL) { - internal_fatal(true, "Do not move the cloud base url out of post_conf_load!!"); - return CLAIM_AGENT_NO_CLOUD_URL; +void claim_agent_failure_reason_set(const char *format, ...) { + if(!format || !*format) { + cloud_claim_failure_reason[0] = '\0'; + return; } - const char *proxy_str; - ACLK_PROXY_TYPE proxy_type; - char proxy_flag[CLAIMING_PROXY_LENGTH] = "-noproxy"; - - proxy_str = aclk_get_proxy(&proxy_type); - - if (proxy_type == PROXY_TYPE_SOCKS5 || proxy_type == PROXY_TYPE_HTTP) - snprintf(proxy_flag, CLAIMING_PROXY_LENGTH, "-proxy=\"%s\"", proxy_str); - - snprintfz(command_exec_buffer, CLAIMING_COMMAND_LENGTH, - "exec \"%s%snetdata-claim.sh\"", - netdata_exe_path[0] ? netdata_exe_path : "", - netdata_exe_path[0] ? "/" : "" - ); - - snprintfz(command_line_buffer, - CLAIMING_COMMAND_LENGTH, - "%s %s -hostname=%s -id=%s -url=%s -noreload %s", - command_exec_buffer, - proxy_flag, - netdata_configured_hostname, - localhost->machine_guid, - cloud_base_url, - claiming_arguments); - - netdata_log_info("Executing agent claiming command: %s", command_exec_buffer); - POPEN_INSTANCE *instance = spawn_popen_run(command_line_buffer); - if(!instance) { - netdata_log_error("Cannot popen(\"%s\").", command_exec_buffer); - return CLAIM_AGENT_CANNOT_EXECUTE_CLAIM_SCRIPT; - } + va_list args; + va_start(args, format); + vsnprintf(cloud_claim_failure_reason, sizeof(cloud_claim_failure_reason), format, args); + va_end(args); + + nd_log(NDLS_DAEMON, NDLP_ERR, + "CLAIM: %s", cloud_claim_failure_reason); +} - netdata_log_info("Waiting for claiming command '%s' to finish.", command_exec_buffer); - char read_buffer[100 + 1]; - while (fgets(read_buffer, 100, instance->child_stdout_fp) != NULL) ; +const char *claim_agent_failure_reason_get(void) { + if(!cloud_claim_failure_reason[0]) + return "Agent is not claimed yet"; + else + return cloud_claim_failure_reason; +} - int exit_code = spawn_popen_wait(instance); +// -------------------------------------------------------------------------------------------------------------------- +// claimed_id load/save - netdata_log_info("Agent claiming command '%s' returned with code %d", command_exec_buffer, exit_code); - if (0 == exit_code) { - load_claiming_state(); - return CLAIM_AGENT_OK; - } - if (exit_code < 0) { - netdata_log_error("Agent claiming command '%s' failed to complete its run", command_exec_buffer); - return CLAIM_AGENT_CLAIM_SCRIPT_FAILED; +bool claimed_id_save_to_file(const char *claimed_id_str) { + bool ret; + const char *filename = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "claimed_id"); + FILE *fp = fopen(filename, "w"); + if(fp) { + fprintf(fp, "%s", claimed_id_str); + fclose(fp); + ret = true; } - errno_clear(); - unsigned maximum_known_exit_code = sizeof(claiming_errors) / sizeof(claiming_errors[0]) - 1; - - if ((unsigned)exit_code > maximum_known_exit_code) { - netdata_log_error("Agent failed to be claimed with an unknown error. Cmd: '%s'", command_exec_buffer); - return CLAIM_AGENT_CLAIM_SCRIPT_RETURNED_INVALID_CODE; + else { + nd_log(NDLS_DAEMON, NDLP_ERR, + "CLAIM: cannot open file '%s' for writing.", filename); + ret = false; } - netdata_log_error("Agent failed to be claimed using the command '%s' with the following error message: %s", - command_exec_buffer, claiming_errors[exit_code]); + freez((void *)filename); + return ret; +} - if(msg) *msg = claiming_errors[exit_code]; +static ND_UUID claimed_id_parse(const char *claimed_id, const char *source) { + ND_UUID uuid; -#else - UNUSED(claiming_arguments); - UNUSED(claiming_errors); -#endif + if(uuid_parse_flexi(claimed_id, uuid.uuid) != 0) { + uuid = UUID_ZERO; + nd_log(NDLS_DAEMON, NDLP_ERR, + "CLAIM: claimed_id '%s' (loaded from '%s'), is not a valid UUID.", + claimed_id, source); + } - return CLAIM_AGENT_FAILED_WITH_MESSAGE; + return uuid; } -/* Change the claimed state of the agent. - * - * This only happens when the user has explicitly requested it: - * - via the cli tool by reloading the claiming state - * - after spawning the claim because of a command-line argument - * If this happens with the ACLK active under an old claim then we MUST KILL THE LINK - */ -void load_claiming_state(void) -{ - // -------------------------------------------------------------------- - // Check if the cloud is enabled -#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK ) - netdata_cloud_enabled = false; -#else - nd_uuid_t uuid; - - // Propagate into aclk and registry. Be kind of atomic... - appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", DEFAULT_CLOUD_BASE_URL); - - rrdhost_aclk_state_lock(localhost); - if (localhost->aclk_state.claimed_id) { - if (aclk_connected) - localhost->aclk_state.prev_claimed_id = strdupz(localhost->aclk_state.claimed_id); - freez(localhost->aclk_state.claimed_id); - localhost->aclk_state.claimed_id = NULL; - } - if (aclk_connected) - { - netdata_log_info("Agent was already connected to Cloud - forcing reconnection under new credentials"); - aclk_kill_link = 1; - } - aclk_disable_runtime = 0; - - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/cloud.d/claimed_id", netdata_configured_varlib_dir); +static ND_UUID claimed_id_load_from_file(void) { + ND_UUID uuid; long bytes_read; + const char *filename = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "claimed_id"); char *claimed_id = read_by_filename(filename, &bytes_read); - if(claimed_id && uuid_parse(claimed_id, uuid)) { - netdata_log_error("claimed_id \"%s\" doesn't look like valid UUID", claimed_id); - freez(claimed_id); - claimed_id = NULL; - } - - if(claimed_id) { - localhost->aclk_state.claimed_id = mallocz(UUID_STR_LEN); - uuid_unparse_lower(uuid, localhost->aclk_state.claimed_id); - } - rrdhost_aclk_state_unlock(localhost); - invalidate_node_instances(&localhost->host_uuid, claimed_id ? &uuid : NULL); - metaqueue_store_claim_id(&localhost->host_uuid, claimed_id ? &uuid : NULL); - - if (!claimed_id) { - netdata_log_info("Unable to load '%s', setting state to AGENT_UNCLAIMED", filename); - return; - } + if(!claimed_id) + uuid = UUID_ZERO; + else + uuid = claimed_id_parse(claimed_id, filename); freez(claimed_id); - - netdata_log_info("File '%s' was found. Setting state to AGENT_CLAIMED.", filename); - netdata_cloud_enabled = appconfig_get_boolean_ondemand(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", netdata_cloud_enabled); -#endif + freez((void *)filename); + return uuid; } -struct config cloud_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; - -void load_cloud_conf(int silent) -{ - char *nd_disable_cloud = getenv("NETDATA_DISABLE_CLOUD"); - if (nd_disable_cloud && !strncmp(nd_disable_cloud, "1", 1)) - netdata_cloud_enabled = CONFIG_BOOLEAN_NO; - - char *filename; - errno_clear(); - - int ret = 0; - - filename = strdupz_path_subpath(netdata_configured_varlib_dir, "cloud.d/cloud.conf"); - - ret = appconfig_load(&cloud_config, filename, 1, NULL); - if(!ret && !silent) - netdata_log_info("CONFIG: cannot load cloud config '%s'. Running with internal defaults.", filename); - - freez(filename); - - // -------------------------------------------------------------------- - // Check if the cloud is enabled - -#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK ) - netdata_cloud_enabled = CONFIG_BOOLEAN_NO; -#else - netdata_cloud_enabled = appconfig_get_boolean_ondemand(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", netdata_cloud_enabled); -#endif - - // This must be set before any point in the code that accesses it. Do not move it from this function. - appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", DEFAULT_CLOUD_BASE_URL); -} - -static char *netdata_random_session_id_filename = NULL; -static nd_uuid_t netdata_random_session_id = { 0 }; - -bool netdata_random_session_id_generate(void) { - static char guid[UUID_STR_LEN] = ""; - - uuid_generate_random(netdata_random_session_id); - uuid_unparse_lower(netdata_random_session_id, guid); - - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/netdata_random_session_id", netdata_configured_varlib_dir); - - bool ret = true; - - (void)unlink(filename); - - // save it - int fd = open(filename, O_WRONLY|O_CREAT|O_TRUNC|O_CLOEXEC, 640); - if(fd == -1) { - netdata_log_error("Cannot create random session id file '%s'.", filename); - ret = false; +static ND_UUID claimed_id_get_from_cloud_conf(void) { + if(appconfig_exists(&cloud_config, CONFIG_SECTION_GLOBAL, "claimed_id")) { + const char *claimed_id = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "claimed_id", ""); + if(claimed_id && *claimed_id) + return claimed_id_parse(claimed_id, "cloud.conf"); } - else { - if (write(fd, guid, UUID_STR_LEN - 1) != UUID_STR_LEN - 1) { - netdata_log_error("Cannot write the random session id file '%s'.", filename); - ret = false; - } else { - ssize_t bytes = write(fd, "\n", 1); - UNUSED(bytes); - } - close(fd); - } - - if(ret && (!netdata_random_session_id_filename || strcmp(netdata_random_session_id_filename, filename) != 0)) { - freez(netdata_random_session_id_filename); - netdata_random_session_id_filename = strdupz(filename); - } - - return ret; + return UUID_ZERO; } -const char *netdata_random_session_id_get_filename(void) { - if(!netdata_random_session_id_filename) - netdata_random_session_id_generate(); +static ND_UUID claimed_id_load(void) { + ND_UUID uuid = claimed_id_get_from_cloud_conf(); + if(UUIDiszero(uuid)) + uuid = claimed_id_load_from_file(); - return netdata_random_session_id_filename; + return uuid; } -bool netdata_random_session_id_matches(const char *guid) { - if(uuid_is_null(netdata_random_session_id)) - return false; +bool is_agent_claimed(void) { + ND_UUID uuid = claim_id_get_uuid(); + return !UUIDiszero(uuid); +} - nd_uuid_t uuid; +// -------------------------------------------------------------------------------------------------------------------- - if(uuid_parse(guid, uuid)) +bool claim_id_matches(const char *claim_id) { + ND_UUID this_one = UUID_ZERO; + if(uuid_parse_flexi(claim_id, this_one.uuid) != 0 || UUIDiszero(this_one)) return false; - if(uuid_compare(netdata_random_session_id, uuid) == 0) + ND_UUID having = claim_id_get_uuid(); + if(!UUIDiszero(having) && UUIDeq(having, this_one)) return true; return false; } -static bool check_claim_param(const char *s) { - if(!s || !*s) return true; +bool claim_id_matches_any(const char *claim_id) { + ND_UUID this_one = UUID_ZERO; + if(uuid_parse_flexi(claim_id, this_one.uuid) != 0 || UUIDiszero(this_one)) + return false; - do { - if(isalnum((uint8_t)*s) || *s == '.' || *s == ',' || *s == '-' || *s == ':' || *s == '/' || *s == '_') - ; - else - return false; + ND_UUID having = claim_id_get_uuid(); + if(!UUIDiszero(having) && UUIDeq(having, this_one)) + return true; - } while(*++s); + having = localhost->aclk.claim_id_of_parent; + if(!UUIDiszero(having) && UUIDeq(having, this_one)) + return true; - return true; -} + having = localhost->aclk.claim_id_of_origin; + if(!UUIDiszero(having) && UUIDeq(having, this_one)) + return true; -void claim_reload_all(void) { - nd_log_limits_unlimited(); - load_claiming_state(); - registry_update_cloud_base_url(); - rrdpush_send_claimed_id(localhost); - nd_log_limits_reset(); + return false; } -int api_v2_claim(struct web_client *w, char *url) { - char *key = NULL; - char *token = NULL; - char *rooms = NULL; - char *base_url = NULL; - - while (url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) continue; - if (!value || !*value) continue; - - if(!strcmp(name, "key")) - key = value; - else if(!strcmp(name, "token")) - token = value; - else if(!strcmp(name, "rooms")) - rooms = value; - else if(!strcmp(name, "url")) - base_url = value; +/* Change the claimed state of the agent. + * + * This only happens when the user has explicitly requested it: + * - via the cli tool by reloading the claiming state + * - after spawning the claim because of a command-line argument + * If this happens with the ACLK active under an old claim then we MUST KILL THE LINK + */ +bool load_claiming_state(void) { + if (aclk_online()) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "CLAIM: agent was already connected to NC - forcing reconnection under new credentials"); + disconnect_req = ACLK_RELOAD_CONF; } + aclk_disable_runtime = 0; - BUFFER *wb = w->response.data; - buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - - time_t now_s = now_realtime_sec(); - CLOUD_STATUS status = buffer_json_cloud_status(wb, now_s); - - bool can_be_claimed = false; - switch(status) { - case CLOUD_STATUS_AVAILABLE: - case CLOUD_STATUS_DISABLED: - case CLOUD_STATUS_OFFLINE: - can_be_claimed = true; - break; - - case CLOUD_STATUS_UNAVAILABLE: - case CLOUD_STATUS_BANNED: - case CLOUD_STATUS_ONLINE: - can_be_claimed = false; - break; + ND_UUID uuid = claimed_id_load(); + if(UUIDiszero(uuid)) { + // not found + if(claim_agent_automatically()) + uuid = claimed_id_load(); } - buffer_json_member_add_boolean(wb, "can_be_claimed", can_be_claimed); - - if(can_be_claimed && key) { - if(!netdata_random_session_id_matches(key)) { - buffer_reset(wb); - buffer_strcat(wb, "invalid key"); - netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it - return HTTP_RESP_FORBIDDEN; - } - - if(!token || !base_url || !check_claim_param(token) || !check_claim_param(base_url) || (rooms && !check_claim_param(rooms))) { - buffer_reset(wb); - buffer_strcat(wb, "invalid parameters"); - netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it - return HTTP_RESP_BAD_REQUEST; - } - - netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it - - netdata_cloud_enabled = CONFIG_BOOLEAN_AUTO; - appconfig_set_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", CONFIG_BOOLEAN_AUTO); - appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", base_url); - - nd_uuid_t claimed_id; - uuid_generate_random(claimed_id); - char claimed_id_str[UUID_STR_LEN]; - uuid_unparse_lower(claimed_id, claimed_id_str); - - BUFFER *t = buffer_create(1024, NULL); - if(rooms) - buffer_sprintf(t, "-id=%s -token=%s -rooms=%s", claimed_id_str, token, rooms); - else - buffer_sprintf(t, "-id=%s -token=%s", claimed_id_str, token); - - bool success = false; - const char *msg = NULL; - CLAIM_AGENT_RESPONSE rc = claim_agent(buffer_tostring(t), true, &msg); - switch(rc) { - case CLAIM_AGENT_OK: - msg = "ok"; - success = true; - can_be_claimed = false; - claim_reload_all(); - { - int ms = 0; - do { - status = cloud_status(); - if (status == CLOUD_STATUS_ONLINE && __atomic_load_n(&localhost->node_id, __ATOMIC_RELAXED)) - break; - - sleep_usec(50 * USEC_PER_MS); - ms += 50; - } while (ms < 10000); - } - break; + bool have_claimed_id = false; + if(!UUIDiszero(uuid)) { + // we go it somehow + claim_id_set(uuid); + have_claimed_id = true; + } - case CLAIM_AGENT_NO_CLOUD_URL: - msg = "No Netdata Cloud URL."; - break; + invalidate_node_instances(&localhost->host_id.uuid, have_claimed_id ? &uuid.uuid : NULL); + metaqueue_store_claim_id(&localhost->host_id.uuid, have_claimed_id ? &uuid.uuid : NULL); - case CLAIM_AGENT_CLAIM_SCRIPT_FAILED: - msg = "Claiming script failed."; - break; + errno_clear(); - case CLAIM_AGENT_CLOUD_DISABLED: - msg = "Netdata Cloud is disabled on this agent."; - break; + if (!have_claimed_id) + nd_log(NDLS_DAEMON, NDLP_ERR, + "CLAIM: Unable to find our claimed_id, setting state to AGENT_UNCLAIMED"); + else + nd_log(NDLS_DAEMON, NDLP_INFO, + "CLAIM: Found a valid claimed_id, setting state to AGENT_CLAIMED"); - case CLAIM_AGENT_CANNOT_EXECUTE_CLAIM_SCRIPT: - msg = "Failed to execute claiming script."; - break; + return have_claimed_id; +} - case CLAIM_AGENT_CLAIM_SCRIPT_RETURNED_INVALID_CODE: - msg = "Claiming script returned invalid code."; - break; +CLOUD_STATUS claim_reload_and_wait_online(void) { + nd_log(NDLS_DAEMON, NDLP_INFO, + "CLAIM: Reloading Agent Claiming configuration."); - default: - case CLAIM_AGENT_FAILED_WITH_MESSAGE: - if(!msg) - msg = "Unknown error"; - break; - } - - // our status may have changed - // refresh the status in our output - buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - now_s = now_realtime_sec(); - buffer_json_cloud_status(wb, now_s); - - // and this is the status of the claiming command we run - buffer_json_member_add_boolean(wb, "success", success); - buffer_json_member_add_string(wb, "message", msg); - } + nd_log_limits_unlimited(); + cloud_conf_load(0); + bool claimed = load_claiming_state(); + registry_update_cloud_base_url(); + rrdpush_sender_send_claimed_id(localhost); + nd_log_limits_reset(); - if(can_be_claimed) - buffer_json_member_add_string(wb, "key_filename", netdata_random_session_id_get_filename()); + CLOUD_STATUS status = cloud_status(); + if(claimed) { + int ms = 0; + do { + status = cloud_status(); + if ((status == CLOUD_STATUS_ONLINE) && !UUIDiszero(localhost->node_id)) + break; - buffer_json_agents_v2(wb, NULL, now_s, false, false); - buffer_json_finalize(wb); + sleep_usec(50 * USEC_PER_MS); + ms += 50; + } while (ms < 10000); + } - return HTTP_RESP_OK; + return status; } diff --git a/src/claim/claim.h b/src/claim/claim.h index ccab8aaa1..073771d1c 100644 --- a/src/claim/claim.h +++ b/src/claim/claim.h @@ -4,29 +4,32 @@ #define NETDATA_CLAIM_H 1 #include "daemon/common.h" +#include "cloud-status.h" +#include "claim_id.h" + +const char *claim_agent_failure_reason_get(void); +void claim_agent_failure_reason_set(const char *format, ...) PRINTFLIKE(1, 2); -extern char *claiming_pending_arguments; extern struct config cloud_config; -typedef enum __attribute__((packed)) { - CLAIM_AGENT_OK, - CLAIM_AGENT_CLOUD_DISABLED, - CLAIM_AGENT_NO_CLOUD_URL, - CLAIM_AGENT_CANNOT_EXECUTE_CLAIM_SCRIPT, - CLAIM_AGENT_CLAIM_SCRIPT_FAILED, - CLAIM_AGENT_CLAIM_SCRIPT_RETURNED_INVALID_CODE, - CLAIM_AGENT_FAILED_WITH_MESSAGE, -} CLAIM_AGENT_RESPONSE; - -CLAIM_AGENT_RESPONSE claim_agent(const char *claiming_arguments, bool force, const char **msg); -char *get_agent_claimid(void); -void load_claiming_state(void); -void load_cloud_conf(int silent); -void claim_reload_all(void); - -bool netdata_random_session_id_generate(void); -const char *netdata_random_session_id_get_filename(void); -bool netdata_random_session_id_matches(const char *guid); -int api_v2_claim(struct web_client *w, char *url); +bool claim_agent(const char *url, const char *token, const char *rooms, const char *proxy, bool insecure); +bool claim_agent_automatically(void); + +bool claimed_id_save_to_file(const char *claimed_id_str); + +bool is_agent_claimed(void); +bool claim_id_matches(const char *claim_id); +bool claim_id_matches_any(const char *claim_id); +bool load_claiming_state(void); +void cloud_conf_load(int silent); +void cloud_conf_init_after_registry(void); +bool cloud_conf_save(void); +bool cloud_conf_regenerate(const char *claimed_id_str, const char *machine_guid, const char *hostname, const char *token, const char *rooms, const char *url, const char *proxy, int insecure); +CLOUD_STATUS claim_reload_and_wait_online(void); + +const char *cloud_config_url_get(void); +void cloud_config_url_set(const char *url); +const char *cloud_config_proxy_get(void); +bool cloud_config_insecure_get(void); #endif //NETDATA_CLAIM_H diff --git a/src/claim/claim_id.c b/src/claim/claim_id.c new file mode 100644 index 000000000..dd79eb640 --- /dev/null +++ b/src/claim/claim_id.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "claim_id.h" + +static struct { + SPINLOCK spinlock; + ND_UUID claim_uuid; + ND_UUID claim_uuid_saved; +} claim = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, +}; + +void claim_id_clear_previous_working(void) { + spinlock_lock(&claim.spinlock); + claim.claim_uuid_saved = UUID_ZERO; + spinlock_unlock(&claim.spinlock); +} + +void claim_id_set(ND_UUID new_claim_id) { + spinlock_lock(&claim.spinlock); + + if(!UUIDiszero(claim.claim_uuid)) { + if(aclk_online()) + claim.claim_uuid_saved = claim.claim_uuid; + claim.claim_uuid = UUID_ZERO; + } + + claim.claim_uuid = new_claim_id; + if(localhost) + localhost->aclk.claim_id_of_origin = claim.claim_uuid; + + spinlock_unlock(&claim.spinlock); +} + +// returns true when the supplied str is a valid UUID. +// giving NULL, an empty string, or "NULL" is valid. +bool claim_id_set_str(const char *claim_id_str) { + bool rc; + + ND_UUID uuid; + if(!claim_id_str || !*claim_id_str || strcmp(claim_id_str, "NULL") == 0) { + uuid = UUID_ZERO, + rc = true; + } + else + rc = uuid_parse(claim_id_str, uuid.uuid) == 0; + + claim_id_set(uuid); + + return rc; +} + +ND_UUID claim_id_get_uuid(void) { + static ND_UUID uuid; + spinlock_lock(&claim.spinlock); + uuid = claim.claim_uuid; + spinlock_unlock(&claim.spinlock); + return uuid; +} + +void claim_id_get_str(char str[UUID_STR_LEN]) { + ND_UUID uuid = claim_id_get_uuid(); + + if(UUIDiszero(uuid)) + memset(str, 0, UUID_STR_LEN); + else + uuid_unparse_lower(uuid.uuid, str); +} + +const char *claim_id_get_str_mallocz(void) { + char *str = mallocz(UUID_STR_LEN); + claim_id_get_str(str); + return str; +} + +CLAIM_ID claim_id_get(void) { + CLAIM_ID ret = { + .uuid = claim_id_get_uuid(), + }; + + if(claim_id_is_set(ret)) + uuid_unparse_lower(ret.uuid.uuid, ret.str); + else + ret.str[0] = '\0'; + + return ret; +} + +CLAIM_ID claim_id_get_last_working(void) { + CLAIM_ID ret = { 0 }; + + spinlock_lock(&claim.spinlock); + ret.uuid = claim.claim_uuid_saved; + spinlock_unlock(&claim.spinlock); + + if(claim_id_is_set(ret)) + uuid_unparse_lower(ret.uuid.uuid, ret.str); + else + ret.str[0] = '\0'; + + return ret; +} + +CLAIM_ID rrdhost_claim_id_get(RRDHOST *host) { + CLAIM_ID ret = { 0 }; + + if(host == localhost) { + ret.uuid = claim_id_get_uuid(); + if(UUIDiszero(ret.uuid)) + ret.uuid = host->aclk.claim_id_of_parent; + } + else { + if (!UUIDiszero(host->aclk.claim_id_of_origin)) + ret.uuid = host->aclk.claim_id_of_origin; + else + ret.uuid = host->aclk.claim_id_of_parent; + } + + if(claim_id_is_set(ret)) + uuid_unparse_lower(ret.uuid.uuid, ret.str); + + return ret; +} diff --git a/src/claim/claim_id.h b/src/claim/claim_id.h new file mode 100644 index 000000000..95958d430 --- /dev/null +++ b/src/claim/claim_id.h @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CLAIM_ID_H +#define NETDATA_CLAIM_ID_H + +#include "claim.h" + +void claim_id_keep_current(void); + +bool claim_id_set_str(const char *claim_id_str); +void claim_id_set(ND_UUID new_claim_id); +void claim_id_clear_previous_working(void); +ND_UUID claim_id_get_uuid(void); +void claim_id_get_str(char str[UUID_STR_LEN]); +const char *claim_id_get_str_mallocz(void); + +typedef struct { + ND_UUID uuid; + char str[UUID_STR_LEN]; +} CLAIM_ID; + +#define claim_id_is_set(claim_id) (!UUIDiszero(claim_id.uuid)) + +CLAIM_ID claim_id_get(void); +CLAIM_ID claim_id_get_last_working(void); +CLAIM_ID rrdhost_claim_id_get(RRDHOST *host); + +#endif //NETDATA_CLAIM_ID_H diff --git a/src/claim/cloud-conf.c b/src/claim/cloud-conf.c new file mode 100644 index 000000000..bfa971b99 --- /dev/null +++ b/src/claim/cloud-conf.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "claim.h" + +struct config cloud_config = APPCONFIG_INITIALIZER; + +const char *cloud_config_url_get(void) { + return appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "url", DEFAULT_CLOUD_BASE_URL); +} + +void cloud_config_url_set(const char *url) { + if(!url || *url) return; + + const char *existing = cloud_config_url_get(); + if(strcmp(existing, url) != 0) + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "url", url); +} + +const char *cloud_config_proxy_get(void) { + // load cloud.conf or internal default + const char *proxy = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "proxy", "env"); + + // backwards compatibility, from when proxy was in netdata.conf + // netdata.conf has bigger priority + if (config_exists(CONFIG_SECTION_CLOUD, "proxy")) { + // get it from netdata.conf + proxy = config_get(CONFIG_SECTION_CLOUD, "proxy", proxy); + + // update cloud.conf + proxy = appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "proxy", proxy); + } + else { + // set in netdata.conf the proxy of cloud.conf + config_set(CONFIG_SECTION_CLOUD, "proxy", proxy); + } + + return proxy; +} + +bool cloud_config_insecure_get(void) { + // load it from cloud.conf or use internal default + return appconfig_get_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "insecure", CONFIG_BOOLEAN_NO); +} + +static void cloud_conf_load_defaults(void) { + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "url", DEFAULT_CLOUD_BASE_URL); + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "proxy", "env"); + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "token", ""); + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "rooms", ""); + appconfig_get_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "insecure", CONFIG_BOOLEAN_NO); + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "machine_guid", ""); + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "claimed_id", ""); + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "hostname", ""); +} + +void cloud_conf_load(int silent) { + errno_clear(); + char *filename = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "cloud.conf"); + int ret = appconfig_load(&cloud_config, filename, 1, NULL); + + if(!ret && !silent) + nd_log(NDLS_DAEMON, NDLP_ERR, + "CLAIM: cannot load cloud config '%s'. Running with internal defaults.", filename); + + freez(filename); + + appconfig_move(&cloud_config, + CONFIG_SECTION_GLOBAL, "cloud base url", + CONFIG_SECTION_GLOBAL, "url"); + + cloud_conf_load_defaults(); +} + +void cloud_conf_init_after_registry(void) { + const char *machine_guid = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "machine_guid", ""); + const char *hostname = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "hostname", ""); + + // for machine guid and hostname we have to use appconfig_set() for that they will be saved uncommented + if(!machine_guid || !*machine_guid) + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "machine_guid", registry_get_this_machine_guid()); + + if(!hostname || !*hostname) + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "hostname", registry_get_this_machine_hostname()); +} + +bool cloud_conf_save(void) { + char filename[FILENAME_MAX + 1]; + + CLEAN_BUFFER *wb = buffer_create(0, NULL); + appconfig_generate(&cloud_config, wb, false, false); + snprintfz(filename, sizeof(filename), "%s/cloud.conf", netdata_configured_cloud_dir); + FILE *fp = fopen(filename, "w"); + if(!fp) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Cannot open file '%s' for writing.", filename); + return false; + } + + fprintf(fp, "%s", buffer_tostring(wb)); + fclose(fp); + return true; +} + +bool cloud_conf_regenerate(const char *claimed_id_str, const char *machine_guid, const char *hostname, const char *token, const char *rooms, const char *url, const char *proxy, int insecure) { + // for backwards compatibility (older agents), save the claimed_id to its file + claimed_id_save_to_file(claimed_id_str); + + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "url", url); + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "proxy", proxy ? proxy : ""); + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "token", token ? token : ""); + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "rooms", rooms ? rooms : ""); + appconfig_set_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "insecure", insecure); + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "machine_guid", machine_guid ? machine_guid : ""); + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "claimed_id", claimed_id_str ? claimed_id_str : ""); + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "hostname", hostname ? hostname : ""); + + return cloud_conf_save(); +} diff --git a/src/claim/cloud-status.c b/src/claim/cloud-status.c new file mode 100644 index 000000000..45db177e9 --- /dev/null +++ b/src/claim/cloud-status.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "claim.h" + +const char *cloud_status_to_string(CLOUD_STATUS status) { + switch(status) { + default: + case CLOUD_STATUS_AVAILABLE: + return "available"; + + case CLOUD_STATUS_BANNED: + return "banned"; + + case CLOUD_STATUS_OFFLINE: + return "offline"; + + case CLOUD_STATUS_ONLINE: + return "online"; + + case CLOUD_STATUS_INDIRECT: + return "indirect"; + } +} + +CLOUD_STATUS cloud_status(void) { + if(unlikely(aclk_disable_runtime)) + return CLOUD_STATUS_BANNED; + + if(likely(aclk_online())) + return CLOUD_STATUS_ONLINE; + + if(localhost->sender && + rrdhost_flag_check(localhost, RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS) && + stream_has_capability(localhost->sender, STREAM_CAP_NODE_ID) && + !UUIDiszero(localhost->node_id) && + !UUIDiszero(localhost->aclk.claim_id_of_parent)) + return CLOUD_STATUS_INDIRECT; + + if(is_agent_claimed()) + return CLOUD_STATUS_OFFLINE; + + return CLOUD_STATUS_AVAILABLE; +} + +time_t cloud_last_change(void) { + time_t ret = MAX(last_conn_time_mqtt, last_disconnect_time); + if(!ret) ret = netdata_start_time; + return ret; +} + +time_t cloud_next_connection_attempt(void) { + return next_connection_attempt; +} + +size_t cloud_connection_id(void) { + return aclk_connection_counter; +} + +const char *cloud_status_aclk_offline_reason() { + if(aclk_disable_runtime) + return "banned"; + + return aclk_status_to_string(); +} + +const char *cloud_status_aclk_base_url() { + return aclk_cloud_base_url; +} + +CLOUD_STATUS buffer_json_cloud_status(BUFFER *wb, time_t now_s) { + CLOUD_STATUS status = cloud_status(); + + buffer_json_member_add_object(wb, "cloud"); + { + size_t id = cloud_connection_id(); + time_t last_change = cloud_last_change(); + time_t next_connect = cloud_next_connection_attempt(); + buffer_json_member_add_uint64(wb, "id", id); + buffer_json_member_add_string(wb, "status", cloud_status_to_string(status)); + buffer_json_member_add_time_t(wb, "since", last_change); + buffer_json_member_add_time_t(wb, "age", now_s - last_change); + + switch(status) { + default: + case CLOUD_STATUS_AVAILABLE: + // the agent is not claimed + buffer_json_member_add_string(wb, "url", cloud_config_url_get()); + buffer_json_member_add_string(wb, "reason", claim_agent_failure_reason_get()); + break; + + case CLOUD_STATUS_BANNED: { + // the agent is claimed, but has been banned from NC + CLAIM_ID claim_id = claim_id_get(); + buffer_json_member_add_string(wb, "claim_id", claim_id.str); + buffer_json_member_add_string(wb, "url", cloud_status_aclk_base_url()); + buffer_json_member_add_string(wb, "reason", "Agent is banned from Netdata Cloud"); + buffer_json_member_add_string(wb, "url", cloud_config_url_get()); + break; + } + + case CLOUD_STATUS_OFFLINE: { + // the agent is claimed, but cannot get online + CLAIM_ID claim_id = claim_id_get(); + buffer_json_member_add_string(wb, "claim_id", claim_id.str); + buffer_json_member_add_string(wb, "url", cloud_status_aclk_base_url()); + buffer_json_member_add_string(wb, "reason", cloud_status_aclk_offline_reason()); + if (next_connect > now_s) { + buffer_json_member_add_time_t(wb, "next_check", next_connect); + buffer_json_member_add_time_t(wb, "next_in", next_connect - now_s); + } + break; + } + + case CLOUD_STATUS_ONLINE: { + // the agent is claimed and online + CLAIM_ID claim_id = claim_id_get(); + buffer_json_member_add_string(wb, "claim_id", claim_id.str); + buffer_json_member_add_string(wb, "url", cloud_status_aclk_base_url()); + buffer_json_member_add_string(wb, "reason", ""); + break; + } + + case CLOUD_STATUS_INDIRECT: { + CLAIM_ID claim_id = rrdhost_claim_id_get(localhost); + buffer_json_member_add_string(wb, "claim_id", claim_id.str); + buffer_json_member_add_string(wb, "url", cloud_config_url_get()); + break; + } + } + } + buffer_json_object_close(wb); // cloud + + return status; +} diff --git a/src/claim/cloud-status.h b/src/claim/cloud-status.h new file mode 100644 index 000000000..648c114f9 --- /dev/null +++ b/src/claim/cloud-status.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CLOUD_STATUS_H +#define NETDATA_CLOUD_STATUS_H + +#include "daemon/common.h" + +typedef enum __attribute__((packed)) { + CLOUD_STATUS_AVAILABLE = 1, // cloud and aclk functionality is available, but the agent is not claimed + CLOUD_STATUS_BANNED, // the agent has been banned from cloud + CLOUD_STATUS_OFFLINE, // the agent tries to connect to cloud, but cannot do it + CLOUD_STATUS_INDIRECT, // the agent is connected to cloud via a parent + CLOUD_STATUS_ONLINE, // the agent is connected to cloud +} CLOUD_STATUS; + +const char *cloud_status_to_string(CLOUD_STATUS status); +CLOUD_STATUS cloud_status(void); + +time_t cloud_last_change(void); +time_t cloud_next_connection_attempt(void); +size_t cloud_connection_id(void); +const char *cloud_status_aclk_offline_reason(void); +const char *cloud_status_aclk_base_url(void); +CLOUD_STATUS buffer_json_cloud_status(BUFFER *wb, time_t now_s); + +#endif //NETDATA_CLOUD_STATUS_H diff --git a/src/claim/main.c b/src/claim/main.c new file mode 100644 index 000000000..8e3c4402c --- /dev/null +++ b/src/claim/main.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define UNICODE +#define _UNICODE +#include +#include +#include +#include +#include +#include + +#include "main.h" + +LPWSTR token = NULL; +LPWSTR room = NULL; +LPWSTR proxy = NULL; +LPWSTR url = NULL; +LPWSTR extPath = NULL; +LPWSTR *argv = NULL; + +char *aToken = NULL; +char *aRoom = NULL; +char *aProxy = NULL; +char *aURL = NULL; +int insecure = 0; + +LPWSTR netdata_claim_get_formatted_message(LPWSTR pMessage, ...) +{ + LPWSTR pBuffer = NULL; + + va_list args = NULL; + va_start(args, pMessage); + + FormatMessage(FORMAT_MESSAGE_FROM_STRING | FORMAT_MESSAGE_ALLOCATE_BUFFER, pMessage, 0, 0, (LPWSTR)&pBuffer, + 0, &args); + va_end(args); + + return pBuffer; +} + +// Common Functions +void netdata_claim_error_exit(wchar_t *function) +{ + DWORD error = GetLastError(); + LPWSTR pMessage = L"The function %1 failed with error %2."; + LPWSTR pBuffer = netdata_claim_get_formatted_message(pMessage, function, error); + + if (pBuffer) { + MessageBoxW(NULL, pBuffer, L"Error", MB_OK|MB_ICONERROR); + LocalFree(pBuffer); + } + + ExitProcess(error); +} + +/** + * Parse Args + * + * Parse arguments identifying necessity to make a window + * + * @param argc number of arguments + * @param argv A pointer for all arguments given + * + * @return it return the number of arguments parsed. + */ +int nd_claim_parse_args(int argc, LPWSTR *argv) +{ + int i; + for (i = 1 ; i < argc; i++) { + // We are working with Microsoft, thus it does not make sense wait for only smallcase + if(wcscasecmp(L"/T", argv[i]) == 0) { + if (argc <= i + 1) + continue; + i++; + token = argv[i]; + } + + if(wcscasecmp(L"/R", argv[i]) == 0) { + if (argc <= i + 1) + continue; + i++; + room = argv[i]; + } + + if(wcscasecmp(L"/P", argv[i]) == 0) { + if (argc <= i + 1) + continue; + i++; + // Minimum IPV4 + if(wcslen(argv[i]) >= 8) { + proxy = argv[i]; + } + } + + if(wcscasecmp(L"/F", argv[i]) == 0) { + if (argc <= i + 1) + continue; + i++; + extPath = argv[i]; + } + + if(wcscasecmp(L"/U", argv[i]) == 0) { + if (argc <= i + 1) + continue; + i++; + url = argv[i]; + } + + if(wcscasecmp(L"/I", argv[i]) == 0) { + if (argc <= i + 1) + continue; + + i++; + size_t length = wcslen(argv[i]); + char *tmp = calloc(sizeof(char), length); + if (!tmp) + ExitProcess(1); + + netdata_claim_convert_str(tmp, argv[i], length - 1); + if (i < argc) + insecure = atoi(tmp); + else + insecure = 1; + + free(tmp); + } + } + + if (!token || !room) + return 0; + + return argc; +} + +static int netdata_claim_prepare_strings() +{ + if (!token || !room) + return -1; + + size_t length = wcslen(token) + 1; + aToken = calloc(sizeof(char), length); + if (!aToken) + return -1; + + netdata_claim_convert_str(aToken, token, length - 1); + + length = wcslen(room) + 1; + aRoom = calloc(sizeof(char), length - 1); + if (!aRoom) + return -1; + + netdata_claim_convert_str(aRoom, room, length - 1); + + if (proxy) { + length = wcslen(proxy) + 1; + aProxy = calloc(sizeof(char), length - 1); + if (!aProxy) + return -1; + + netdata_claim_convert_str(aProxy, proxy, length - 1); + } + + if (url) { + length = wcslen(url) + 1; + aURL = calloc(sizeof(char), length - 1); + if (!aURL) + return -1; + + netdata_claim_convert_str(aURL, url, length - 1); + } + return 0; +} + +static void netdata_claim_exit_callback(int signal) +{ + (void)signal; + if (aToken) + free(aToken); + + if (aRoom) + free(aRoom); + + if (aProxy) + free(aProxy); + + if (aURL) + free(aURL); + + if (argv) + LocalFree(argv); + + if (extPath) + LocalFree(extPath); +} + +static inline int netdata_claim_prepare_data(char *out, size_t length) +{ + char *proxyLabel = (aProxy) ? "proxy = " : "# proxy = "; + char *proxyValue = (aProxy) ? aProxy : ""; + + char *urlValue = (aURL) ? aURL : "https://app.netdata.cloud"; + return snprintf(out, + length, + "[global]\n url = %s\n token = %s\n rooms = %s\n %s%s\n insecure = %s", + urlValue, + aToken, + aRoom, + proxyLabel, + proxyValue, + (insecure) ? "yes" : "no" + ); +} + +static int netdata_claim_get_path(char *path) +{ + if (extPath) { + size_t length = wcslen(extPath) + 1; + if (length >= WINDOWS_MAX_PATH) + return -1; + + netdata_claim_convert_str(path, extPath, length - 1); + return 0; + } + + char *usrPath = { "\\usr\\bin" }; + DWORD length = GetCurrentDirectoryA(WINDOWS_MAX_PATH, path); + if (!length) { + return -1; + } + + if (strstr(path, usrPath)) { + length -= 7; + path[length] = '\0'; + } + + return 0; +} + +static void netdata_claim_write_config(char *path) +{ +#define NETDATA_MIN_CLOUD_LENGTH 135 +#define NETDATA_MIN_ROOM_LENGTH 36 + if (strlen(aToken) != NETDATA_MIN_CLOUD_LENGTH || strlen(aRoom) < NETDATA_MIN_ROOM_LENGTH) + return; + + char configPath[WINDOWS_MAX_PATH + 1]; + char data[WINDOWS_MAX_PATH + 1]; + char *filename; + if (!extPath) { + snprintf(configPath, WINDOWS_MAX_PATH - 1, "%s\\etc\\netdata\\claim.conf", path); + filename = configPath; + } else { + filename = path; + } + + HANDLE hf = CreateFileA(filename, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); + if (hf == INVALID_HANDLE_VALUE) + netdata_claim_error_exit(L"CreateFileA"); + + DWORD length = netdata_claim_prepare_data(data, WINDOWS_MAX_PATH); + DWORD written = 0; + + BOOL ret = WriteFile(hf, data, length, &written, NULL); + if (!ret) { + CloseHandle(hf); + netdata_claim_error_exit(L"WriteFileA"); + } + + if (length != written) + MessageBoxW(NULL, L"Cannot write claim.conf.", L"Error", MB_OK|MB_ICONERROR); + + CloseHandle(hf); +} + +int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow) +{ + signal(SIGABRT, netdata_claim_exit_callback); + signal(SIGINT, netdata_claim_exit_callback); + signal(SIGTERM, netdata_claim_exit_callback); + + int argc; + LPWSTR *argv = CommandLineToArgvW(GetCommandLineW(), &argc); + if (argc) + argc = nd_claim_parse_args(argc, argv); + + // When no data is given, user must to use graphic mode + int ret = 0; + if (!argc) { + ret = netdata_claim_window_loop(hInstance, nCmdShow); + } else { + if (netdata_claim_prepare_strings()) { + goto exit_claim; + } + + char basePath[WINDOWS_MAX_PATH]; + if (!netdata_claim_get_path(basePath)) { + netdata_claim_write_config(basePath); + } + } + +exit_claim: + netdata_claim_exit_callback(0); + + return ret; +} diff --git a/src/claim/main.h b/src/claim/main.h new file mode 100644 index 000000000..b5d9e3f89 --- /dev/null +++ b/src/claim/main.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CLAIM_H_ +# define NETDATA_CLAIM_H_ 1 + +#include +#include "ui.h" + +extern LPWSTR token; +extern LPWSTR room; +extern LPWSTR proxy; + +void netdata_claim_error_exit(wchar_t *function); +static inline void netdata_claim_convert_str(char *dst, wchar_t *src, size_t len) { + size_t copied = wcstombs(dst, src, len); + dst[copied] = '\0'; +} + +#endif //NETDATA_CLAIM_H_ diff --git a/src/claim/netdata-claim.sh.in b/src/claim/netdata-claim.sh.in index f4fa382b6..15c166e3f 100755 --- a/src/claim/netdata-claim.sh.in +++ b/src/claim/netdata-claim.sh.in @@ -1,451 +1,111 @@ -#!/usr/bin/env bash -# netdata -# real-time performance and health monitoring, done right! -# (C) 2023 Netdata Inc. -# SPDX-License-Identifier: GPL-3.0-or-later - -# Exit code: 0 - Success -# Exit code: 1 - Unknown argument -# Exit code: 2 - Problems with claiming working directory -# Exit code: 3 - Missing dependencies -# Exit code: 4 - Failure to connect to endpoint -# Exit code: 5 - The CLI didn't work -# Exit code: 6 - Wrong user -# Exit code: 7 - Unknown HTTP error message -# -# OK: Agent claimed successfully -# HTTP Status code: 204 -# Exit code: 0 +#!/bin/sh # -# Unknown HTTP error message -# HTTP Status code: 422 -# Exit code: 7 -ERROR_KEYS[7]="None" -ERROR_MESSAGES[7]="Unknown HTTP error message" - -# Error: The agent id is invalid; it does not fulfill the constraints -# HTTP Status code: 422 -# Exit code: 8 -ERROR_KEYS[8]="ErrInvalidNodeID" -ERROR_MESSAGES[8]="invalid node id" - -# Error: The agent hostname is invalid; it does not fulfill the constraints -# HTTP Status code: 422 -# Exit code: 9 -ERROR_KEYS[9]="ErrInvalidNodeName" -ERROR_MESSAGES[9]="invalid node name" - -# Error: At least one of the given rooms ids is invalid; it does not fulfill the constraints -# HTTP Status code: 422 -# Exit code: 10 -ERROR_KEYS[10]="ErrInvalidRoomID" -ERROR_MESSAGES[10]="invalid room id" - -# Error: Invalid public key; the public key is empty or not present -# HTTP Status code: 422 -# Exit code: 11 -ERROR_KEYS[11]="ErrInvalidPublicKey" -ERROR_MESSAGES[11]="invalid public key" +# Copyright (c) 2024 Netdata Inc. +# SPDX-License-Identifier: GPL-3.0-or-later # -# Error: Expired, missing or invalid token -# HTTP Status code: 403 -# Exit code: 12 -ERROR_KEYS[12]="ErrForbidden" -ERROR_MESSAGES[12]="token expired/token not found/invalid token" - -# Error: Duplicate agent id; an agent with the same id is already registered in the cloud -# HTTP Status code: 409 -# Exit code: 13 -ERROR_KEYS[13]="ErrAlreadyClaimed" -ERROR_MESSAGES[13]="already claimed" - -# Error: The node claiming process is still in progress. -# HTTP Status code: 102 -# Exit code: 14 -ERROR_KEYS[14]="ErrProcessingClaim" -ERROR_MESSAGES[14]="processing claiming" +# %%NEW_CLAIMING_METHOD%% -# Error: Internal server error. Any other unexpected error (DB problems, etc.) -# HTTP Status code: 500 -# Exit code: 15 -ERROR_KEYS[15]="ErrInternalServerError" -ERROR_MESSAGES[15]="Internal Server Error" +set -e -# Error: There was a timeout processing the claim. -# HTTP Status code: 504 -# Exit code: 16 -ERROR_KEYS[16]="ErrGatewayTimeout" -ERROR_MESSAGES[16]="Gateway Timeout" - -# Error: The service cannot handle the claiming request at this time. -# HTTP Status code: 503 -# Exit code: 17 -ERROR_KEYS[17]="ErrServiceUnavailable" -ERROR_MESSAGES[17]="Service Unavailable" - -# Exit code: 18 - Agent unique id is not generated yet. - -NETDATA_RUNNING=1 - -get_config_value() { - conf_file="${1}" - section="${2}" - key_name="${3}" - if [ "${NETDATA_RUNNING}" -eq 1 ]; then - config_result=$(@sbindir_POST@/netdatacli 2>/dev/null read-config "$conf_file|$section|$key_name"; exit $?) - result="$?" - if [ "${result}" -ne 0 ]; then - echo >&2 "Unable to communicate with Netdata daemon, querying config from disk instead." - NETDATA_RUNNING=0 - fi - fi - if [ "${NETDATA_RUNNING}" -eq 0 ]; then - config_result=$(@sbindir_POST@/netdata 2>/dev/null -W get2 "$conf_file" "$section" "$key_name" unknown_default) - fi - echo "$config_result" +warning() { + printf "WARNING: %s\n" "${1}" 1>&2 } -if command -v curl >/dev/null 2>&1 ; then - URLTOOL="curl" -elif command -v wget >/dev/null 2>&1 ; then - URLTOOL="wget" -else - echo >&2 "I need curl or wget to proceed, but neither is available on this system." - exit 3 -fi -if ! command -v openssl >/dev/null 2>&1 ; then - echo >&2 "I need openssl to proceed, but it is not available on this system." - exit 3 -fi - -# shellcheck disable=SC2050 -if [ "@enable_cloud_POST@" = "no" ]; then - echo >&2 "This agent was built with --disable-cloud and cannot be claimed" - exit 3 -fi -# shellcheck disable=SC2050 -if [ "@enable_aclk_POST@" != "yes" ]; then - echo >&2 "This agent was built without the dependencies for Cloud and cannot be claimed" - exit 3 -fi - -# ----------------------------------------------------------------------------- -# defaults to allow running this script by hand - -[ -z "${NETDATA_VARLIB_DIR}" ] && NETDATA_VARLIB_DIR="@varlibdir_POST@" -MACHINE_GUID_FILE="@registrydir_POST@/netdata.public.unique.id" -CLAIMING_DIR="${NETDATA_VARLIB_DIR}/cloud.d" -TOKEN="unknown" -URL_BASE=$(get_config_value cloud global "cloud base url") -[ -z "$URL_BASE" ] && URL_BASE="https://app.netdata.cloud" # Cover post-install with --dont-start -ID="unknown" -ROOMS="" -[ -z "$HOSTNAME" ] && HOSTNAME=$(hostname) -CLOUD_CERTIFICATE_FILE="${CLAIMING_DIR}/cloud_fullchain.pem" -VERBOSE=0 -INSECURE=0 -RELOAD=1 -NETDATA_USER=$(get_config_value netdata global "run as user") -[ -z "$EUID" ] && EUID="$(id -u)" +error() { + printf "ERROR: %s\n" "${1}" 1>&2 + exit "${2}" +} -gen_id() { - local id - - if command -v uuidgen > /dev/null 2>&1; then - id="$(uuidgen | tr '[:upper:]' '[:lower:]')" - elif [ -r /proc/sys/kernel/random/uuid ]; then - id="$(cat /proc/sys/kernel/random/uuid)" - else - echo >&2 "Unable to generate machine ID." - exit 18 - fi - - if [ "${id}" = "8a795b0c-2311-11e6-8563-000c295076a6" ] || [ "${id}" = "4aed1458-1c3e-11e6-a53f-000c290fc8f5" ]; then - gen_id +get_templated_value() { + value="$1" + default="$2" + override="$3" + + if [ -n "${override}" ]; then + echo "${override}" + elif [ -z "${value}" ]; then + error "Expected templated value not present" + elif (echo "${value}" | grep -q '@'); then + echo "${default}" else - echo "${id}" + echo "${value}" fi } -# get the MACHINE_GUID by default -if [ -r "${MACHINE_GUID_FILE}" ]; then - ID="$(cat "${MACHINE_GUID_FILE}")" - MGUID=$ID -elif [ -f "${MACHINE_GUID_FILE}" ]; then - echo >&2 "netdata.public.unique.id is not readable. Please make sure you have rights to read it (Filename: ${MACHINE_GUID_FILE})." - exit 18 -else - if mkdir -p "${MACHINE_GUID_FILE%/*}" && echo -n "$(gen_id)" > "${MACHINE_GUID_FILE}"; then - ID="$(cat "${MACHINE_GUID_FILE}")" - MGUID=$ID - else - echo >&2 "Failed to write new machine GUID. Please make sure you have rights to write to ${MACHINE_GUID_FILE}." - exit 18 - fi -fi - -# get token from file -if [ -r "${CLAIMING_DIR}/token" ]; then - TOKEN="$(cat "${CLAIMING_DIR}/token")" -fi - -# get rooms from file -if [ -r "${CLAIMING_DIR}/rooms" ]; then - ROOMS="$(cat "${CLAIMING_DIR}/rooms")" -fi - -variable_to_set= -for arg in "$@" -do - if [ -z "$variable_to_set" ]; then - case $arg in - --claim-token) variable_to_set="TOKEN" ;; - --claim-rooms) variable_to_set="ROOMS" ;; - --claim-url) variable_to_set="URL_BASE" ;; - -token=*) TOKEN=${arg:7} ;; - -url=*) [ -n "${arg:5}" ] && URL_BASE=${arg:5} ;; - -id=*) ID=$(echo "${arg:4}" | tr '[:upper:]' '[:lower:]');; - -rooms=*) ROOMS=${arg:7} ;; - -hostname=*) HOSTNAME=${arg:10} ;; - -verbose) VERBOSE=1 ;; - -insecure) INSECURE=1 ;; - -proxy=*) PROXY=${arg:7} ;; - -noproxy) NOPROXY=yes ;; - -noreload) RELOAD=0 ;; - -user=*) NETDATA_USER=${arg:6} ;; - -daemon-not-running) NETDATA_RUNNING=0 ;; - *) echo >&2 "Unknown argument ${arg}" - exit 1 ;; - esac - else - case "$variable_to_set" in - TOKEN) TOKEN="$arg" ;; - ROOMS) ROOMS="$arg" ;; - URL_BASE) URL_BASE="$arg" ;; - esac - variable_to_set= - fi - shift 1 -done - -if [ "$EUID" != "0" ] && [ "$(whoami)" != "$NETDATA_USER" ]; then - echo >&2 "This script must be run by the $NETDATA_USER user account" - exit 6 -fi - -# if curl not installed give warning SOCKS can't be used -if [[ "${URLTOOL}" != "curl" && "${PROXY:0:5}" = socks ]] ; then - echo >&2 "wget doesn't support SOCKS. Please install curl or disable SOCKS proxy." - exit 1 -fi - -echo >&2 "Token: ****************" -echo >&2 "Base URL: $URL_BASE" -echo >&2 "Id: $ID" -echo >&2 "Rooms: $ROOMS" -echo >&2 "Hostname: $HOSTNAME" -echo >&2 "Proxy: $PROXY" -echo >&2 "Netdata user: $NETDATA_USER" - -# create the claiming directory for this user -if [ ! -d "${CLAIMING_DIR}" ] ; then - mkdir -p "${CLAIMING_DIR}" && chmod 0770 "${CLAIMING_DIR}" -# shellcheck disable=SC2181 - if [ $? -ne 0 ] ; then - echo >&2 "Failed to create claiming working directory ${CLAIMING_DIR}" - exit 2 - fi -fi -if [ ! -w "${CLAIMING_DIR}" ] ; then - echo >&2 "No write permission in claiming working directory ${CLAIMING_DIR}" - exit 2 -fi - -if [ ! -f "${CLAIMING_DIR}/private.pem" ] ; then - echo >&2 "Generating private/public key for the first time." - if ! openssl genrsa -out "${CLAIMING_DIR}/private.pem" 2048 ; then - echo >&2 "Failed to generate private/public key pair." - exit 2 - fi -fi -if [ ! -f "${CLAIMING_DIR}/public.pem" ] ; then - echo >&2 "Extracting public key from private key." - if ! openssl rsa -in "${CLAIMING_DIR}/private.pem" -outform PEM -pubout -out "${CLAIMING_DIR}/public.pem" ; then - echo >&2 "Failed to extract public key." - exit 2 - fi -fi - -TARGET_URL="${URL_BASE%/}/api/v1/spaces/nodes/${ID}" -# shellcheck disable=SC2002 -KEY=$(cat "${CLAIMING_DIR}/public.pem" | tr '\n' '!' | sed -e 's/!/\\n/g') -# shellcheck disable=SC2001 -[ -n "$ROOMS" ] && ROOMS=\"$(echo "$ROOMS" | sed s'/,/", "/g')\" +config_dir="$(get_templated_value "@configdir_POST@" "/etc/netdata" "${NETDATA_CLAIM_CONFIG_DIR}")" +claim_config="${config_dir}/claim.conf" +netdatacli="$(get_templated_value "@sbindir_POST@/netdatacli" "$(command -v netdatacli 2>/dev/null)" "${NETDATA_CLAIM_NETDATACLI_PATH}")" +netdata_group="$(get_templated_value "@netdata_group_POST@" "netdata" "${NETDATA_CLAIM_CONFIG_GROUP}")" + +write_config() { + config="[global]" + config="${config}\n url = ${NETDATA_CLAIM_URL}" + config="${config}\n token = ${NETDATA_CLAIM_TOKEN}" + if [ -n "${NETDATA_CLAIM_ROOMS}" ]; then + config="${config}\n rooms = ${NETDATA_CLAIM_ROOMS}" + fi + if [ -n "${NETDATA_CLAIM_PROXY}" ]; then + config="${config}\n proxy = ${NETDATA_CLAIM_PROXY}" + fi + if [ -n "${NETDATA_CLAIM_INSECURE}" ]; then + config="${config}\n insecure = ${NETDATA_CLAIM_INSECURE}" + fi -cat > "${CLAIMING_DIR}/tmpin.txt" < "${claim_config}.tmp" + chmod 0640 "${claim_config}.tmp" + mv -f "${claim_config}.tmp" "${claim_config}" } -EMBED_JSON - -if [ "${VERBOSE}" == 1 ] ; then - echo "Request to server:" - cat "${CLAIMING_DIR}/tmpin.txt" -fi - -if [ "${URLTOOL}" = "curl" ] ; then - URLCOMMAND="curl --connect-timeout 30 --retry 0 -s -i -X PUT -d \"@${CLAIMING_DIR}/tmpin.txt\"" - if [ "${NOPROXY}" = "yes" ] ; then - URLCOMMAND="${URLCOMMAND} -x \"\"" - elif [ -n "${PROXY}" ] ; then - URLCOMMAND="${URLCOMMAND} -x \"${PROXY}\"" - fi -else - URLCOMMAND="wget -T 15 -O - -q --server-response --content-on-error=on --method=PUT \ - --body-file=\"${CLAIMING_DIR}/tmpin.txt\"" - if [ "${NOPROXY}" = "yes" ] ; then - URLCOMMAND="${URLCOMMAND} --no-proxy" - elif [ "${PROXY:0:4}" = http ] ; then - URLCOMMAND="export http_proxy=${PROXY}; ${URLCOMMAND}" - fi -fi - -if [ "${INSECURE}" == 1 ] ; then - if [ "${URLTOOL}" = "curl" ] ; then - URLCOMMAND="${URLCOMMAND} --insecure" - else - URLCOMMAND="${URLCOMMAND} --no-check-certificate" +reload_claiming() { + if [ -z "${NORELOAD}" ]; then + "${netdatacli}" reload-claiming-state fi -fi - -if [ -r "${CLOUD_CERTIFICATE_FILE}" ] ; then - if [ "${URLTOOL}" = "curl" ] ; then - URLCOMMAND="${URLCOMMAND} --cacert \"${CLOUD_CERTIFICATE_FILE}\"" - else - URLCOMMAND="${URLCOMMAND} --ca-certificate \"${CLOUD_CERTIFICATE_FILE}\"" - fi -fi - -if [ "${VERBOSE}" == 1 ]; then - echo "${URLCOMMAND} \"${TARGET_URL}\"" -fi - -attempt_contact () { - if [ "${URLTOOL}" = "curl" ] ; then - eval "${URLCOMMAND} \"${TARGET_URL}\"" >"${CLAIMING_DIR}/tmpout.txt" - else - eval "${URLCOMMAND} \"${TARGET_URL}\"" >"${CLAIMING_DIR}/tmpout.txt" 2>&1 - fi - URLCOMMAND_EXIT_CODE=$? - if [ "${URLTOOL}" = "wget" ] && [ "${URLCOMMAND_EXIT_CODE}" -eq 8 ] ; then - # We consider the server issuing an error response a successful attempt at communicating - URLCOMMAND_EXIT_CODE=0 - fi - - # Check if URLCOMMAND connected and received reply - if [ "${URLCOMMAND_EXIT_CODE}" -ne 0 ] ; then - echo >&2 "Failed to connect to ${URL_BASE}, return code ${URLCOMMAND_EXIT_CODE}" - rm -f "${CLAIMING_DIR}/tmpout.txt" - return 4 - fi - - if [ "${VERBOSE}" == 1 ] ; then - echo "Response from server:" - cat "${CLAIMING_DIR}/tmpout.txt" - fi - - return 0 } -for i in {1..3} -do - if attempt_contact ; then - echo "Connection attempt $i successful" - break - fi - echo "Connection attempt $i failed. Retry in ${i}s." - if [ "$i" -eq 5 ] ; then - rm -f "${CLAIMING_DIR}/tmpin.txt" - exit 4 - fi - sleep "$i" -done +parse_args() { + while [ -n "${1}" ]; do + case "${1}" in + --claim-token) NETDATA_CLAIM_TOKEN="${2}"; shift 1 ;; + -token=*) NETDATA_CLAIM_TOKEN="$(echo "${1}" | sed 's/^-token=//')" ;; + --claim-rooms) NETDATA_CLAIM_ROOMS="${2}"; shift 1 ;; + -rooms=*) NETDATA_CLAIM_ROOMS="$(echo "${1}" | sed 's/^-rooms=//')" ;; + --claim-url) NETDATA_CLAIM_URL="${2}"; shift 1 ;; + -url=*) NETDATA_CLAIM_URL="$(echo "${1}" | sed 's/^-url=//')" ;; + --claim-proxy) NETDATA_CLAIM_PROXY="${2}"; shift 1 ;; + -proxy=*) NETDATA_CLAIM_PROXY="$(echo "${1}" | sed 's/-proxy=//')" ;; + -noproxy|--noproxy) NETDATA_CLAIM_PROXY="none" ;; + -noreload|--noreload) NORELOAD=1 ;; + -insecure|--insecure) NETDATA_CLAIM_INSECURE=yes ;; + -verbose) true ;; + -daemon-not-running) true ;; + -id=*) warning "-id option is no longer supported. Remove the node ID file instead." ;; + -hostname=*) warning "-hostname option is no longer supported. Update the main netdata configuration manually instead." ;; + -user=*) warning "-user option is no longer supported." ;; + *) warning "Ignoring unrecognized option ${1}";; + esac -rm -f "${CLAIMING_DIR}/tmpin.txt" - -ERROR_KEY=$(grep "\"errorMsgKey\":" "${CLAIMING_DIR}/tmpout.txt" | awk -F "errorMsgKey\":\"" '{print $2}' | awk -F "\"" '{print $1}') -case ${ERROR_KEY} in - "ErrInvalidNodeID") EXIT_CODE=8 ;; - "ErrInvalidNodeName") EXIT_CODE=9 ;; - "ErrInvalidRoomID") EXIT_CODE=10 ;; - "ErrInvalidPublicKey") EXIT_CODE=11 ;; - "ErrForbidden") EXIT_CODE=12 ;; - "ErrAlreadyClaimed") EXIT_CODE=13 ;; - "ErrProcessingClaim") EXIT_CODE=14 ;; - "ErrInternalServerError") EXIT_CODE=15 ;; - "ErrGatewayTimeout") EXIT_CODE=16 ;; - "ErrServiceUnavailable") EXIT_CODE=17 ;; - *) EXIT_CODE=7 ;; -esac - -HTTP_STATUS_CODE=$(grep "HTTP" "${CLAIMING_DIR}/tmpout.txt" | tail -1 | awk -F " " '{print $2}') -if [ "${HTTP_STATUS_CODE}" = "204" ] ; then - EXIT_CODE=0 -fi + shift 1 + done -if [ "${HTTP_STATUS_CODE}" = "204" ] || [ "${ERROR_KEY}" = "ErrAlreadyClaimed" ] ; then - rm -f "${CLAIMING_DIR}/tmpout.txt" - if [ "${HTTP_STATUS_CODE}" = "204" ] ; then - echo -n "${ID}" >"${CLAIMING_DIR}/claimed_id" || (echo >&2 "Claiming failed"; set -e; exit 2) - fi - rm -f "${CLAIMING_DIR}/token" || (echo >&2 "Claiming failed"; set -e; exit 2) + if [ -z "${NETDATA_CLAIM_TOKEN}" ]; then + error "Claim token must be specified" 1 + fi - # Rewrite the cloud.conf on the disk - cat > "$CLAIMING_DIR/cloud.conf" <&2 "Claiming failed"; set -e; exit 2) - fi - if [ "${RELOAD}" == "0" ] ; then - exit $EXIT_CODE - fi + if [ -z "${NETDATA_CLAIM_URL}" ]; then + NETDATA_CLAIM_URL="https://app.netdata.cloud/" + fi +} - # Update cloud.conf in the agent memory - @sbindir_POST@/netdatacli write-config 'cloud|global|enabled|yes' && \ - @sbindir_POST@/netdatacli write-config "cloud|global|cloud base url|$URL_BASE" && \ - @sbindir_POST@/netdatacli reload-claiming-state && \ - if [ "${HTTP_STATUS_CODE}" = "204" ] ; then - echo >&2 "Node was successfully claimed." - else - echo >&2 "The agent cloud base url is set to the url provided." - echo >&2 "The cloud may have different credentials already registered for this agent ID and it cannot be reclaimed under different credentials for security reasons. If you are unable to connect use -id=\$(uuidgen) to overwrite this agent ID with a fresh value if the original credentials cannot be restored." - echo >&2 "Failed to claim node with the following error message:\"${ERROR_MESSAGES[$EXIT_CODE]}\"" - fi && exit $EXIT_CODE - if [ "${ERROR_KEY}" = "ErrAlreadyClaimed" ] ; then - echo >&2 "The cloud may have different credentials already registered for this agent ID and it cannot be reclaimed under different credentials for security reasons. If you are unable to connect use -id=\$(uuidgen) to overwrite this agent ID with a fresh value if the original credentials cannot be restored." - echo >&2 "Failed to claim node with the following error message:\"${ERROR_MESSAGES[$EXIT_CODE]}\"" - exit $EXIT_CODE - fi - echo >&2 "The claim was successful but the agent could not be notified ($?)- it requires a restart to connect to the cloud." - [ "$NETDATA_RUNNING" -eq 0 ] && exit 0 || exit 5 +[ -z "$EUID" ] && EUID="$(id -u)" +if [ "${EUID}" != "0" ] && [ ! -w "${config_dir}" ]; then + error "Script must be run by a user with write access to ${config_dir}." 32 fi -echo >&2 "Failed to claim node with the following error message:\"${ERROR_MESSAGES[$EXIT_CODE]}\"" -if [ "${VERBOSE}" == 1 ]; then - echo >&2 "Error key was:\"${ERROR_KEYS[$EXIT_CODE]}\"" -fi -rm -f "${CLAIMING_DIR}/tmpout.txt" -exit $EXIT_CODE +warning "This script is deprecated and will be officially unsupported in the near future. Please either use the kickstart script with the appropriate '--claim-*' options, or directly write out the claiming configuration instead." +parse_args "${@}" +write_config +reload_claiming diff --git a/src/claim/ui.c b/src/claim/ui.c new file mode 100644 index 000000000..30a001d0b --- /dev/null +++ b/src/claim/ui.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define UNICODE +#define _UNICODE +#include +#include "richedit.h" +#include "tchar.h" +#include "main.h" + +static LPCTSTR szWindowClass = _T("DesktopApp"); + +static HINSTANCE hInst; +static HWND hToken; +static HWND hRoom; + +LRESULT CALLBACK WndProc(HWND hNetdatawnd, UINT message, WPARAM wParam, LPARAM lParam) +{ + PAINTSTRUCT ps; + HDC hdc; + LPCTSTR topMsg[] = { L" Help", + L" ", + L"In this initial version of the software, there are no fields for data", + L" entry. To claim your agent, you must use the following options:", + L" ", + L"/T TOKEN: The cloud token;", + L"/R ROOMS: A list of rooms to claim;", + L"/P PROXY: The proxy information;", + L"/U URL : The cloud URL;", + L"/I : Use insecure connection;", + L"/F File : file to store cloud info;" + }; + + switch (message) + { + case WM_PAINT: { + hdc = BeginPaint(hNetdatawnd, &ps); + + int i; + for (i = 0; i < sizeof(topMsg) / sizeof(LPCTSTR); i++) { + TextOut(hdc, 5, 5 + 15*i, topMsg[i], wcslen(topMsg[i])); + } + EndPaint(hNetdatawnd, &ps); + break; + } + case WM_COMMAND: + case WM_DESTROY: { + PostQuitMessage(0); + break; + } + default: { + return DefWindowProc(hNetdatawnd, message, wParam, lParam); + break; + } + } + + return 0; +} + +int netdata_claim_window_loop(HINSTANCE hInstance, int nCmdShow) +{ + WNDCLASSEX wcex; + + wcex.cbSize = sizeof(WNDCLASSEX); + wcex.style = CS_HREDRAW | CS_VREDRAW; + wcex.lpfnWndProc = WndProc; + wcex.cbClsExtra = 0; + wcex.cbWndExtra = 0; + wcex.hInstance = hInstance; + wcex.hIcon = LoadIcon(wcex.hInstance, MAKEINTRESOURCEW(11)); + wcex.hCursor = LoadCursor(NULL, IDC_ARROW); + wcex.hbrBackground = (HBRUSH)(COLOR_WINDOW+1); + wcex.lpszMenuName = NULL; + wcex.lpszClassName = szWindowClass; + wcex.hIconSm = LoadIcon(wcex.hInstance, IDI_APPLICATION); + + if (!RegisterClassEx(&wcex)) { + MessageBoxW(NULL, L"Call to RegisterClassEx failed!", L"Error", 0); + return 1; + } + + hInst = hInstance; + + HWND hNetdatawnd = CreateWindowExW(WS_EX_OVERLAPPEDWINDOW, + szWindowClass, + L"Netdata Claim", + WS_OVERLAPPEDWINDOW, + CW_USEDEFAULT, CW_USEDEFAULT, + 460, 240, + NULL, + NULL, + hInstance, + NULL + ); + + if (!hNetdatawnd) { + MessageBoxW(NULL, L"Call to CreateWindow failed!", L"Error", 0); + return 1; + } + + ShowWindow(hNetdatawnd, nCmdShow); + UpdateWindow(hNetdatawnd); + + MSG msg; + while (GetMessage(&msg, NULL, 0, 0)) { + TranslateMessage(&msg); + DispatchMessage(&msg); + } + + return (int) msg.wParam; +} diff --git a/src/claim/ui.h b/src/claim/ui.h new file mode 100644 index 000000000..583ed1cda --- /dev/null +++ b/src/claim/ui.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CLAIM_WINDOW_H_ +# define NETDATA_CLAIM_WINDOW_H_ 1 + +// https://learn.microsoft.com/en-us/troubleshoot/windows-client/shell-experience/command-line-string-limitation +// https://sourceforge.net/p/mingw/mailman/mingw-users/thread/4C8FD4EB.4050503@xs4all.nl/ +#define WINDOWS_MAX_PATH 8191 + +int netdata_claim_window_loop(HINSTANCE hInstance, int nCmdShow); + +#endif //NETDATA_CLAIM_WINDOW_H_ diff --git a/src/cli/README.md b/src/cli/README.md index 29c75d450..b990f4775 100644 --- a/src/cli/README.md +++ b/src/cli/README.md @@ -1,38 +1,24 @@ # Netdata Agent CLI -The `netdatacli` executable provides a simple way to control the Netdata agent's operation. +The `netdatacli` executable offers a straightforward way to manage the Netdata Agent's operations. -You can see the commands `netdatacli` supports by executing it with `netdatacli` and entering `help` in -standard input. All commands are given as standard input to `netdatacli`. +It is located in the same directory as the `netdata` binary. -The commands that a running netdata agent can execute are the following: +Available commands: -```sh -The commands are (arguments are in brackets): -help - Show this help menu. -reload-health - Reload health configuration. -reload-labels - Reload all labels. -save-database - Save internal DB to disk for database mode save. -reopen-logs - Close and reopen log files. -shutdown-agent - Cleanup and exit the netdata agent. -fatal-agent - Log the state and halt the netdata agent. -reload-claiming-state - Reload agent claiming state from disk. -ping - Return with 'pong' if agent is alive. -aclk-state [json] - Returns current state of ACLK and Cloud connection. (optionally in json) -dumpconfig - Returns the current netdata.conf on stdout. -``` +| Command | Description | +|------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `help` | Display usage information and exit. | +| `reload-health` | Reloads the Netdata health configuration, updating alerts based on changes made to configuration files. | +| `reload-labels` | Reloads [host labels](/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts.md#custom-labels) from netdata.conf. | +| `reopen-logs` | Close and reopen log files. | +| `shutdown-agent` | Gracefully shut down the Netdata Agent. | +| `fatal-agent` | Log the current state and forcefully halt the Netdata Agent. | +| `reload-claiming-state` | Reload the Agent's claiming state from disk. | +| `ping` | Checks the Agent's status. If the Agent is alive, it exits with status code 0 and prints 'pong' to standard output. Exits with status code 255 otherwise. | +| `aclk-state [json]` | Return the current state of ACLK and Cloud connection. Optionally in JSON. | +| `dumpconfig` | Display the current netdata.conf configuration. | +| `remove-stale-node ` | Un-registers a stale child Node, removing it from the parent Node's UI and Netdata Cloud. This is useful for ephemeral Nodes that may stop streaming and remain visible as stale. | +| `version` | Display the Netdata Agent version. | See also the Netdata daemon [command line options](/src/daemon/README.md#command-line-options). - - diff --git a/src/cli/cli.c b/src/cli/cli.c index 366f6e9e3..2a6e570e4 100644 --- a/src/cli/cli.c +++ b/src/cli/cli.c @@ -155,7 +155,6 @@ static void connect_cb(uv_connect_t* req, int status) int main(int argc, char **argv) { - clocks_init(); nd_log_initialize_for_external_plugins("netdatacli"); int ret, i; diff --git a/src/collectors/COLLECTORS.md b/src/collectors/COLLECTORS.md index 608649a38..f5aa095e7 100644 --- a/src/collectors/COLLECTORS.md +++ b/src/collectors/COLLECTORS.md @@ -23,8 +23,6 @@ If you don't see the app/service you'd like to monitor in this list: - If you don't see the collector there, you can make a [feature request](https://github.com/netdata/netdata/issues/new/choose) on GitHub. -- If you have basic software development skills, you can add your own plugin in [Go](/src/go/plugin/go.d/README.md#how-to-develop-a-collector) or [Python](/docs/developer-and-contributor-corner/python-collector.md) - ## Available Data Collection Integrations ### APM @@ -77,7 +75,7 @@ If you don't see the app/service you'd like to monitor in this list: - [OpenLDAP (community)](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/openldap_community.md) -- [OpenLDAP](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/openldap/integrations/openldap.md) +- [OpenLDAP](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/openldap/integrations/openldap.md) - [RADIUS](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/radius.md) @@ -215,6 +213,8 @@ If you don't see the app/service you'd like to monitor in this list: - [AWS RDS](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/aws_rds.md) +- [BOINC](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/boinc/integrations/boinc.md) + - [Cassandra](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/cassandra/integrations/cassandra.md) - [ClickHouse](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/clickhouse/integrations/clickhouse.md) @@ -237,6 +237,8 @@ If you don't see the app/service you'd like to monitor in this list: - [MariaDB](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/mysql/integrations/mariadb.md) +- [MaxScale](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/maxscale/integrations/maxscale.md) + - [Memcached (community)](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/memcached_community.md) - [Memcached](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/memcached/integrations/memcached.md) @@ -249,7 +251,7 @@ If you don't see the app/service you'd like to monitor in this list: - [Oracle DB (community)](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/oracle_db_community.md) -- [Oracle DB](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md) +- [Oracle DB](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/oracledb/integrations/oracle_db.md) - [Patroni](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/patroni.md) @@ -281,8 +283,6 @@ If you don't see the app/service you'd like to monitor in this list: ### Distributed Computing Systems -- [BOINC](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/boinc/integrations/boinc.md) - - [Gearman](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/gearman/integrations/gearman.md) ### DNS and DHCP Servers @@ -429,7 +429,7 @@ If you don't see the app/service you'd like to monitor in this list: - [OpenRCT2](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/openrct2.md) -- [SpigotMC](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md) +- [SpigotMC](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/spigotmc/integrations/spigotmc.md) - [Steam](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/steam.md) @@ -459,7 +459,7 @@ If you don't see the app/service you'd like to monitor in this list: ### Hardware Devices and Sensors -- [1-Wire Sensors](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md) +- [1-Wire Sensors](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/w1sensor/integrations/1-wire_sensors.md) - [AM2320](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/am2320/integrations/am2320.md) @@ -485,9 +485,7 @@ If you don't see the app/service you'd like to monitor in this list: - [Intelligent Platform Management Interface (IPMI)](https://github.com/netdata/netdata/blob/master/src/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md) -- [Linux Sensors (lm-sensors)](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md) - -- [Linux Sensors (sysfs)](https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md) +- [Linux Sensors](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors.md) - [NVML](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/nvml.md) @@ -891,8 +889,6 @@ If you don't see the app/service you'd like to monitor in this list: - [GitHub repository](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/github_repository.md) -- [python.d zscores](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md) - ### Processes and System Services - [Applications](https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/integrations/applications.md) @@ -923,6 +919,8 @@ If you don't see the app/service you'd like to monitor in this list: - [Sphinx](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/sphinx.md) +- [Typesense](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/typesense/integrations/typesense.md) + ### Security Systems - [Certificate Transparency](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/certificate_transparency.md) @@ -967,7 +965,7 @@ If you don't see the app/service you'd like to monitor in this list: - [CVMFS clients](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/cvmfs_clients.md) -- [Ceph](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ceph/integrations/ceph.md) +- [Ceph](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/ceph/integrations/ceph.md) - [DMCache devices](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/dmcache/integrations/dmcache_devices.md) @@ -1009,7 +1007,7 @@ If you don't see the app/service you'd like to monitor in this list: - [Netapp ONTAP API](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/netapp_ontap_api.md) -- [Samba](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/samba/integrations/samba.md) +- [Samba](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/samba/integrations/samba.md) - [Starwind VSAN VSphere Edition](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md) @@ -1041,7 +1039,7 @@ If you don't see the app/service you'd like to monitor in this list: - [Site 24x7](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/site_24x7.md) -- [TCP Endpoints](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/portcheck/integrations/tcp_endpoints.md) +- [TCP/UDP Endpoints](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/portcheck/integrations/tcp-udp_endpoints.md) - [Uptimerobot](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/uptimerobot.md) @@ -1081,7 +1079,7 @@ If you don't see the app/service you'd like to monitor in this list: ### UPS -- [APC UPS](https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md) +- [APC UPS](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/apcupsd/integrations/apc_ups.md) - [Eaton UPS](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/eaton_ups.md) @@ -1133,6 +1131,8 @@ If you don't see the app/service you'd like to monitor in this list: - [NGINX Plus](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nginxplus/integrations/nginx_plus.md) +- [NGINX Unit](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nginxunit/integrations/nginx_unit.md) + - [NGINX VTS](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nginxvts/integrations/nginx_vts.md) - [NGINX](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nginx/integrations/nginx.md) @@ -1149,7 +1149,7 @@ If you don't see the app/service you'd like to monitor in this list: - [Traefik](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/traefik/integrations/traefik.md) -- [Varnish](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/varnish/integrations/varnish.md) +- [Varnish](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/varnish/integrations/varnish.md) - [Web server log files](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/weblog/integrations/web_server_log_files.md) @@ -1165,6 +1165,12 @@ If you don't see the app/service you'd like to monitor in this list: - [MS SQL Server](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/integrations/ms_sql_server.md) +- [Memory statistics](https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/integrations/memory_statistics.md) + - [NET Framework](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/integrations/net_framework.md) +- [System statistics](https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/integrations/system_statistics.md) + +- [System thermal zone](https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/integrations/system_thermal_zone.md) + - [Windows](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/integrations/windows.md) diff --git a/src/collectors/README.md b/src/collectors/README.md index 0fd5983b7..e7b9c1552 100644 --- a/src/collectors/README.md +++ b/src/collectors/README.md @@ -1,62 +1,58 @@ # Collectors -When Netdata starts, and with zero configuration, it auto-detects thousands of data sources and immediately collects -per-second metrics. +Netdata automatically collects per-second metrics from thousands of data sources without any configuration: -Netdata can immediately collect metrics from these endpoints thanks to 300+ **collectors**, which all come pre-installed -when you [install Netdata](/packaging/installer/README.md). +- **Zero-touch setup**: All collectors are pre-installed, allowing you to start collecting detailed metrics right after Netdata starts. +- **Universal Monitoring**: Monitor virtually anything with Netdata's extensive collector library. -All collectors are **installed by default** with every installation of Netdata. You do not need to install -collectors manually to collect metrics from new sources. -See how you can [monitor anything with Netdata](/src/collectors/COLLECTORS.md). +If you don't see charts for your application, check our collectors' [configuration reference](/src/collectors/REFERENCE.md) to ensure both the collector and your application are properly configured. -Upon startup, Netdata will **auto-detect** any application or service that has a collector, as long as both the collector -and the app/service are configured correctly. If you don't see charts for your application, see -our [collectors' configuration reference](/src/collectors/REFERENCE.md). +## Collector Types -## How Netdata's metrics collectors work +Netdata's collectors are specialized data collection plugins that gather metrics from various sources. They are divided into two main categories: -Every collector has two primary jobs: +| Type | Description | Key Features | +|----------|-----------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Internal | Native collectors that gather system-level metrics | • Written in `C` for optimal performance
• Run as threads within Netdata daemon
• Zero external dependencies
• Minimal system overhead | +| External | Modular collectors that gather metrics from applications and services | • Support multiple programming languages
• Run as independent processes
• Communicate via pipes with Netdata
• Managed by [plugins.d](/src/plugins.d/README.md)
• Examples: MySQL, Nginx, Redis collectors | -- Look for exposed metrics at a pre- or user-defined endpoint. -- Gather exposed metrics and use additional logic to build meaningful, interactive visualizations. -If the collector finds compatible metrics exposed on the configured endpoint, it begins a per-second collection job. The -Netdata Agent gathers these metrics, sends them to the -[database engine for storage](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md) -, and immediately -[visualizes them meaningfully](/docs/dashboards-and-charts/netdata-charts.md) -on dashboards. +## Collector Privileges -Each collector comes with a pre-defined configuration that matches the default setup for that application. This endpoint -can be a URL and port, a socket, a file, a web page, and more. The endpoint is user-configurable, as are many other -specifics of what a given collector does. +Netdata uses various plugins and helper binaries that require elevated privileges to collect system metrics. +This section outlines the required privileges and how they are configured in different environments. -## Collector architecture and terminology +### Privileges -- **Collectors** are the processes/programs that actually gather metrics from various sources. +| Plugin/Binary | Privileges (Linux) | Privileges (Non-Linux or Containerized Environment) | +|------------------------|-------------------------------------------------|-----------------------------------------------------| +| apps.plugin | CAP_DAC_READ_SEARCH, CAP_SYS_PTRACE | setuid root | +| debugfs.plugin | CAP_DAC_READ_SEARCH | setuid root | +| systemd-journal.plugin | CAP_DAC_READ_SEARCH | setuid root | +| perf.plugin | CAP_PERFMON | setuid root | +| slabinfo.plugin | CAP_DAC_READ_SEARCH | setuid root | +| go.d.plugin | CAP_DAC_READ_SEARCH, CAP_NET_ADMIN, CAP_NET_RAW | setuid root | +| freeipmi.plugin | setuid root | setuid root | +| nfacct.plugin | setuid root | setuid root | +| xenstat.plugin | setuid root | setuid root | +| ioping | setuid root | setuid root | +| ebpf.plugin | setuid root | setuid root | +| cgroup-network | setuid root | setuid root | +| local-listeners | setuid root | setuid root | +| network-viewer.plugin | setuid root | setuid root | +| ndsudo | setuid root | setuid root | -- **Plugins** help manage all the independent data collection processes in a variety of programming languages, based on - their purpose and performance requirements. There are three types of plugins: +**About ndsudo**: - - **Internal** plugins organize collectors that gather metrics from `/proc`, `/sys` and other Linux kernel sources. - They are written in `C`, and run as threads within the Netdata daemon. +`ndsudo` is a purpose-built privilege escalation utility for Netdata that executes a predefined set of commands with root privileges. Unlike traditional `sudo`, it operates with a [hard-coded list of allowed commands](https://github.com/netdata/netdata/blob/master/src/collectors/utils/ndsudo.c), providing better security through reduced scope and eliminating the need for `sudo` configuration. - - **External** plugins organize collectors that gather metrics from external processes, such as a MySQL database or - Nginx web server. They can be written in any language, and the `netdata` daemon spawns them as long-running - independent processes. They communicate with the daemon via pipes. All external plugins are managed by - [plugins.d](/src/collectors/plugins.d/README.md), which provides additional management options. +It’s used by the `go.d.plugin` to collect data by executing certain binaries that require root access. -- **Orchestrators** are external plugins that run and manage one or more modules. They run as independent processes. - The Go orchestrator is in active development. +### File Permissions and Ownership - - [go.d.plugin](/src/go/plugin/go.d/README.md): An orchestrator for data - collection modules written in `go`. +To ensure security, all plugin and helper binary files have the following permissions and ownership: - - [python.d.plugin](/src/collectors/python.d.plugin/README.md): - An orchestrator for data collection modules written in `python` v2/v3. +- **Ownership**: `root:netdata`. +- **Permissions**: `0750` (for non-setuid binaries) or `4750` (for setuid binaries). - - [charts.d.plugin](/src/collectors/charts.d.plugin/README.md): - An orchestrator for data collection modules written in`bash` v4+. - -- **Modules** are the individual programs controlled by an orchestrator to collect data from a specific application, or type of endpoint. +This configuration limits access to the files to the `netdata` user and the `root` user, while allowing execution by the `netdata` user. diff --git a/src/collectors/REFERENCE.md b/src/collectors/REFERENCE.md index e480a16d8..af745013c 100644 --- a/src/collectors/REFERENCE.md +++ b/src/collectors/REFERENCE.md @@ -1,32 +1,23 @@ - - # Collectors configuration reference -The list of supported collectors can be found in [the documentation](/src/collectors/COLLECTORS.md), -and on [our website](https://www.netdata.cloud/integrations). The documentation of each collector provides all the -necessary configuration options and prerequisites for that collector. In most cases, either the charts are automatically generated +The list of supported collectors can be found in [the documentation](/src/collectors/COLLECTORS.md), +and on [our website](https://www.netdata.cloud/integrations). The documentation of each collector provides all the +necessary configuration options and prerequisites for that collector. In most cases, either the charts are automatically generated without any configuration, or you just fulfil those prerequisites and [configure the collector](#configure-a-collector). -If the application you are interested in monitoring is not listed in our integrations, the collectors list includes -the available options to +If the application you are interested in monitoring is not listed in our integrations, the collectors list includes +the available options to [add your application to Netdata](https://github.com/netdata/netdata/edit/master/src/collectors/COLLECTORS.md#add-your-application-to-netdata). -If we do support your collector but the charts described in the documentation don't appear on your dashboard, the reason will +If we do support your collector but the charts described in the documentation don't appear on your dashboard, the reason will be one of the following: -- The entire data collection plugin is disabled by default. Read how to [enable and disable plugins](#enable-and-disable-plugins) +- The entire data collection plugin is disabled by default. Read how to [enable and disable plugins](#enable-and-disable-plugins) -- The data collection plugin is enabled, but a specific data collection module is disabled. Read how to - [enable and disable a specific collection module](#enable-and-disable-a-specific-collection-module). +- The data collection plugin is enabled, but a specific data collection module is disabled. Read how to + [enable and disable a specific collection module](#enable-and-disable-a-specific-collection-module). -- Autodetection failed. Read how to [configure](#configure-a-collector) and [troubleshoot](#troubleshoot-a-collector) a collector. +- Autodetection failed. Read how to [configure](#configure-a-collector) and [troubleshoot](#troubleshoot-a-collector) a collector. ## Enable and disable plugins @@ -34,28 +25,28 @@ You can enable or disable individual plugins by opening `netdata.conf` and scrol This section features a list of Netdata's plugins, with a boolean setting to enable or disable them. The exception is `statsd.plugin`, which has its own `[statsd]` section. Your `[plugins]` section should look similar to this: -```conf +```text [plugins] - # timex = yes - # idlejitter = yes - # netdata monitoring = yes - # tc = yes - # diskspace = yes - # proc = yes - # cgroups = yes - # enable running new plugins = yes - # check for new plugins every = 60 - # slabinfo = no - # python.d = yes - # perf = yes - # ioping = yes - # fping = yes - # nfacct = yes - # go.d = yes - # apps = yes - # ebpf = yes - # charts.d = yes - # statsd = yes + # timex = yes + # idlejitter = yes + # netdata monitoring = yes + # tc = yes + # diskspace = yes + # proc = yes + # cgroups = yes + # enable running new plugins = yes + # check for new plugins every = 60 + # slabinfo = no + # python.d = yes + # perf = yes + # ioping = yes + # fping = yes + # nfacct = yes + # go.d = yes + # apps = yes + # ebpf = yes + # charts.d = yes + # statsd = yes ``` By default, most plugins are enabled, so you don't need to enable them explicitly to use their collectors. To enable or @@ -63,11 +54,11 @@ disable any specific plugin, remove the comment (`#`) and change the boolean set ## Enable and disable a specific collection module -You can enable/disable of the collection modules supported by `go.d`, `python.d` or `charts.d` individually, using the -configuration file of that orchestrator. For example, you can change the behavior of the Go orchestrator, or any of its +You can enable/disable of the collection modules supported by `go.d`, `python.d` or `charts.d` individually, using the +configuration file of that orchestrator. For example, you can change the behavior of the Go orchestrator, or any of its collectors, by editing `go.d.conf`. -Use `edit-config` from your [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory) +Use `edit-config` from your [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory) to open the orchestrator primary configuration file: ```bash @@ -79,20 +70,19 @@ Within this file, you can either disable the orchestrator entirely (`enabled: ye enable/disable it with `yes` and `no` settings. Uncomment any line you change to ensure the Netdata daemon reads it on start. -After you make your changes, restart the Agent with `sudo systemctl restart netdata`, or the [appropriate -method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system. +After you make your changes, restart the Agent with the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system. ## Configure a collector Most collector modules come with **auto-detection**, configured to work out-of-the-box on popular operating systems with -the default settings. +the default settings. However, there are cases that auto-detection fails. Usually, the reason is that the applications to be monitored do not allow Netdata to connect. In most of the cases, allowing the user `netdata` from `localhost` to connect and collect metrics, will automatically enable data collection for the application in question (it will require a Netdata restart). When Netdata starts up, each collector searches for exposed metrics on the default endpoint established by that service -or application's standard installation procedure. For example, +or application's standard installation procedure. For example, the [Nginx collector](/src/go/plugin/go.d/modules/nginx/README.md) searches at `http://127.0.0.1/stub_status` for exposed metrics in the correct format. If an Nginx web server is running and exposes metrics on that endpoint, the collector begins gathering them. @@ -100,12 +90,12 @@ metrics on that endpoint, the collector begins gathering them. However, not every node or infrastructure uses standard ports, paths, files, or naming conventions. You may need to enable or configure a collector to gather all available metrics from your systems, containers, or applications. -First, [find the collector](/src/collectors/COLLECTORS.md) you want to edit -and open its documentation. Some software has collectors written in multiple languages. In these cases, you should always +First, [find the collector](/src/collectors/COLLECTORS.md) you want to edit +and open its documentation. Some software has collectors written in multiple languages. In these cases, you should always pick the collector written in Go. -Use `edit-config` from your -[Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory) +Use `edit-config` from your +[Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory) to open a collector's configuration file. For example, edit the Nginx collector with the following: ```bash @@ -117,8 +107,7 @@ according to your needs. In addition, every collector's documentation shows the configure that collector. Uncomment any line you change to ensure the collector's orchestrator or the Netdata daemon read it on start. -After you make your changes, restart the Agent with `sudo systemctl restart netdata`, or the [appropriate -method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system. +After you make your changes, restart the Agent with the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system. ## Troubleshoot a collector @@ -131,7 +120,7 @@ cd /usr/libexec/netdata/plugins.d/ sudo su -s /bin/bash netdata ``` -The next step is based on the collector's orchestrator. +The next step is based on the collector's orchestrator. ```bash # Go orchestrator (go.d.plugin) @@ -145,5 +134,5 @@ The next step is based on the collector's orchestrator. ``` The output from the relevant command will provide valuable troubleshooting information. If you can't figure out how to -enable the collector using the details from this output, feel free to [join our Discord server](https://discord.com/invite/2mEmfW735j), +enable the collector using the details from this output, feel free to [join our Discord server](https://discord.com/invite/2mEmfW735j), to get help from our experts. diff --git a/src/collectors/all.h b/src/collectors/all.h index 3b96faa10..6892d131e 100644 --- a/src/collectors/all.h +++ b/src/collectors/all.h @@ -55,7 +55,54 @@ #define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_CALLS 1207 #define NETDATA_CHART_PRIO_SYSTEM_PACKETS 7001 // freebsd only #define NETDATA_CHART_PRIO_WINDOWS_THREADS 8001 // Windows only +#define NETDATA_CHART_PRIO_WINDOWS_THERMAL_ZONES 8002 // Windows only +// ---------------------------------------------------------------------------- +// Hyper-V + +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_CPU_USAGE 20000 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_CPU_USAGE_BY_RUN_CONTEXT 20010 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_MEMORY_PHYSICAL 20020 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_MEMORY_PHYSICAL_GUEST_VISIBLE 20030 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_MEMORY_PRESSURE_CURRENT 20040 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_VID_PHYSICAL_PAGES_ALLOCATED 20050 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_VID_REMOTE_PHYSICAL_PAGES 20060 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_TRAFFIC 20070 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_IPSEC_TRAFFIC 20080 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_PACKETS 20090 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_BROADCAST_PACKETS 20100 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_MULTICAST_PACKETS 20110 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_DIRECTED_PACKETS 20120 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_PACKETS_DROPPED 20130 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_STORAGE_DEVICE_BYTES 20140 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_STORAGE_DEVICE_ERRORS 20150 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_STORAGE_DEVICE_OPERATIONS 20160 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VMS_HEALTH 20170 + +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_TRAFFIC 20400 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_PACKETS 20410 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_BROADCAST_PACKETS 20420 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_MULTICAST_PACKETS 20430 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_DIRECTED_PACKETS 20440 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_PACKETS_FLOODED 20450 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_DROPPED_PACKETS 20460 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_EXTENSIONS_DROPPED_PACKETS 20470 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_LEARNED_MAC_ADDRESSES 20470 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_PURGED_MAC_ADDRESSES 20480 + +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_IO_TLB_FLUSH 20600 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_VIRTUAL_TLB_FLUSH_ENTRIES 20610 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_VIRTUAL_TLB_PAGES 20620 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_ADDRESS_SPACE 20630 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_ATTACHED_DEVICES 20640 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_DMA_ERRORS 20650 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_INTERRUPT_ERRORS 20660 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_INTERRUPT_THROTTLE_EVENTS 20670 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEPOSITED_PAGES 20680 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_GPA_SPACE_PAGES 20690 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_SPACE_PAGES 20700 +#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_GPA_SPACE_MODIFICATIONS 20710 +// ---------------------------------------------------------------------------- // CPU per core @@ -76,7 +123,9 @@ #define NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED 1030 #define NETDATA_CHART_PRIO_MEM_SWAP 1035 #define NETDATA_CHART_PRIO_MEM_SWAP_CALLS 1037 +#define NETDATA_CHART_PRIO_MEM_SWAP_PAGES 1037 // Windows only #define NETDATA_CHART_PRIO_MEM_SWAPIO 1038 +#define NETDATA_CHART_PRIO_MEM_SYSTEM_POOL 1039 // Windows only #define NETDATA_CHART_PRIO_MEM_ZSWAP 1036 #define NETDATA_CHART_PRIO_MEM_ZSWAPIO 1037 #define NETDATA_CHART_PRIO_MEM_ZSWAP_COMPRESS_RATIO 1038 @@ -109,7 +158,9 @@ #define NETDATA_CHART_PRIO_MEM_KSM_COW 1303 #define NETDATA_CHART_PRIO_MEM_BALLOON 1350 #define NETDATA_CHART_PRIO_MEM_NUMA 1400 -#define NETDATA_CHART_PRIO_MEM_NUMA_NODES 1410 +#define NETDATA_CHART_PRIO_MEM_NUMA_NODES_NUMASTAT 1410 +#define NETDATA_CHART_PRIO_MEM_NUMA_NODES_MEMINFO 1411 +#define NETDATA_CHART_PRIO_MEM_NUMA_NODES_ACTIVITY 1412 #define NETDATA_CHART_PRIO_MEM_PAGEFRAG 1450 #define NETDATA_CHART_PRIO_MEM_HW 1500 #define NETDATA_CHART_PRIO_MEM_HW_ECC_CE 1550 @@ -134,6 +185,7 @@ #define NETDATA_CHART_PRIO_DISK_MOPS 2080 #define NETDATA_CHART_PRIO_DISK_IOTIME 2090 #define NETDATA_CHART_PRIO_DISK_LATENCY 2095 +#define NETDATA_CHART_PRIO_DISK_SPLIT 2096 #define NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC 2120 #define NETDATA_CHART_PRIO_BCACHE_HIT_RATIO 2120 #define NETDATA_CHART_PRIO_BCACHE_RATES 2121 @@ -316,6 +368,7 @@ #define NETDATA_CHART_PRIO_IPV6_TCP_PACKETS 6130 #define NETDATA_CHART_PRIO_IPV6_TCP_SOCKETS 6140 #define NETDATA_CHART_PRIO_IPV6_ICMP_PACKETS 6145 +#define NETDATA_CHART_PRIO_IPV6_ICMP_MESSAGES 6146 #define NETDATA_CHART_PRIO_IPV6_ICMP 6150 #define NETDATA_CHART_PRIO_IPV6_ICMP_REDIR 6155 #define NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS 6160 diff --git a/src/collectors/apps.plugin/README.md b/src/collectors/apps.plugin/README.md index ced91d8ae..f3822e466 100644 --- a/src/collectors/apps.plugin/README.md +++ b/src/collectors/apps.plugin/README.md @@ -1,402 +1,180 @@ - +# Applications monitoring (apps.plugin) -# Application monitoring (apps.plugin) +`apps.plugin` monitors the resources utilization of all processes running. -`apps.plugin` breaks down system resource usage to **processes**, **users** and **user groups**. -It is enabled by default on every Netdata installation. +## Process Aggregation and Grouping -To achieve this task, it iterates through the whole process tree, collecting resource usage information -for every process found running. +`apps.plugin` aggregates processes in three distinct ways to provide a more insightful breakdown of resource utilization: -Since Netdata needs to present this information in charts and track them through time, -instead of presenting a `top` like list, `apps.plugin` uses a pre-defined list of **process groups** -to which it assigns all running processes. This list is customizable via `apps_groups.conf`, and Netdata -ships with a good default for most cases (to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`). +| Grouping | Description | +|------------|----------------------------------------------------------------------------------------------------------------------------------------------------| +| App | Grouped by the position in the process tree. This is customizable and allows aggregation by process managers and individual processes of interest. | +| User | Grouped by the effective user (UID) under which the processes run. | +| User Group | Grouped by the effective group (GID) under which the processes run. | -So, `apps.plugin` builds a process tree (much like `ps fax` does in Linux), and groups -processes together (evaluating both child and parent processes) so that the result is always a list with -a predefined set of members (of course, only process groups found running are reported). +## Short-Lived Process Handling -> If you find that `apps.plugin` categorizes standard applications as `other`, we would be -> glad to accept pull requests improving the defaults shipped with Netdata in `apps_groups.conf`. - -Unlike traditional process monitoring tools (like `top`), `apps.plugin` is able to account the resource -utilization of exit processes. Their utilization is accounted at their currently running parents. -So, `apps.plugin` is perfectly able to measure the resources used by shell scripts and other processes -that fork/spawn other short-lived processes hundreds of times per second. +`apps.plugin` accurately captures resource utilization for both running and exited processes, ensuring that the impact of short-lived subprocesses is fully accounted for. +This is particularly valuable for scenarios where processes spawn numerous short-lived subprocesses, such as shell scripts that fork hundreds or thousands of times per second. +Even though these subprocesses may have a brief lifespan, `apps.plugin` effectively aggregates their resource utilization, providing a comprehensive overview of how resources are shared among all processes within the system. ## Charts -`apps.plugin` provides charts for 3 sections: - -1. Per application charts as **Applications** at Netdata dashboards -2. Per user charts as **Users** at Netdata dashboards -3. Per user group charts as **User Groups** at Netdata dashboards - -Each of these sections provides the same number of charts: - -- CPU utilization (`apps.cpu`) - - Total CPU usage - - User/system CPU usage (`apps.cpu_user`/`apps.cpu_system`) -- Disk I/O - - Physical reads/writes (`apps.preads`/`apps.pwrites`) - - Logical reads/writes (`apps.lreads`/`apps.lwrites`) - - Open unique files (if a file is found open multiple times, it is counted just once, `apps.files`) -- Memory - - Real Memory Used (non-shared, `apps.mem`) - - Virtual Memory Allocated (`apps.vmem`) - - Minor page faults (i.e. memory activity, `apps.minor_faults`) -- Processes - - Threads running (`apps.threads`) - - Processes running (`apps.processes`) - - Carried over uptime (since the last Netdata Agent restart, `apps.uptime`) - - Minimum uptime (`apps.uptime_min`) - - Average uptime (`apps.uptime_average`) - - Maximum uptime (`apps.uptime_max`) - - Pipes open (`apps.pipes`) -- Swap memory - - Swap memory used (`apps.swap`) - - Major page faults (i.e. swap activity, `apps.major_faults`) -- Network - - Sockets open (`apps.sockets`) - +`apps.plugin` offers a set of charts for three groups within the **System->Processes** section of the Netdata dashboard: **Apps**, **Users**, and **Groups**. + +Each of these sections presents the same number of charts: + +- CPU utilization + - Total CPU usage + - User/system CPU usage +- Memory + - Real Memory Used (non-shared) + - Virtual Memory Allocated + - Minor page faults (i.e. memory activity) +- Swap memory + - Swap memory used + - Major page faults (i.e. swap activity) +- Disk + - Physical reads/writes + - Logical reads/writes +- Tasks + - Threads + - Processes +- FDs + - Open file descriptors limit % + - Open file descriptors +- Uptime + - Carried over uptime (since the last Netdata Agent restart) + In addition, if the [eBPF collector](/src/collectors/ebpf.plugin/README.md) is running, your dashboard will also show an additional [list of charts](/src/collectors/ebpf.plugin/README.md#integration-with-appsplugin) using low-level Linux metrics. -The above are reported: - -- For **Applications** per target configured. -- For **Users** per username or UID (when the username is not available). -- For **User Groups** per group name or GID (when group name is not available). - ## Performance -`apps.plugin` is a complex piece of software and has a lot of work to do -We are proud that `apps.plugin` is a lot faster compared to any other similar tool, -while collecting a lot more information for the processes, however the fact is that -this plugin requires more CPU resources than the `netdata` daemon itself. +`apps.plugin` is designed to be highly efficient, collecting significantly more process information than other similar tools while maintaining exceptional speed. +However, due to its comprehensive approach of traversing the entire process tree on each iteration, its resource usage may become noticeable, especially on systems with a large number of processes. -Under Linux, for each process running, `apps.plugin` reads several `/proc` files -per process. Doing this work per-second, especially on hosts with several thousands -of processes, may increase the CPU resources consumed by the plugin. +Under Linux, `apps.plugin` reads multiple `/proc` files for each running process, performing this operation on a per-second basis. +This can lead to increased CPU consumption on hosts with several thousands of processes. -In such cases, you many need to lower its data collection frequency. +In such cases, you may need to adjust the data collection frequency to reduce the plugin's resource usage. To do this, edit `/etc/netdata/netdata.conf` and find this section: -``` +```text [plugin:apps] - # update every = 1 - # command options = + # update every = 1 + # command options = ``` -Uncomment the line `update every` and set it to a higher number. If you just set it to `2`, -its CPU resources will be cut in half, and data collection will be once every 2 seconds. +Uncomment the `update every` line and set it to a higher value. +For example, setting it to 2 will halve the plugin's CPU usage and collect data once every 2 seconds. ## Configuration -The configuration file is `/etc/netdata/apps_groups.conf`. To edit it on your system, run `/etc/netdata/edit-config apps_groups.conf`. - -The configuration file works accepts multiple lines, each having this format: - -```txt -group: process1 process2 ... -``` - -Each group can be given multiple times, to add more processes to it. - -For the **Applications** section, only groups configured in this file are reported. -All other processes will be reported as `other`. - -For each process given, its whole process tree will be grouped, not just the process matched. -The plugin will include both parents and children. If including the parents into the group is -undesirable, the line `other: *` should be appended to the `apps_groups.conf`. - -The process names are the ones returned by: - -- `ps -e` or `cat /proc/PID/stat` -- in case of substring mode (see below): `/proc/PID/cmdline` - -To add process names with spaces, enclose them in quotes (single or double) -example: `'Plex Media Serv'` or `"my other process"`. - -You can add an asterisk `*` at the beginning and/or the end of a process: - -- `*name` _suffix_ mode: will search for processes ending with `name` (at `/proc/PID/stat`) -- `name*` _prefix_ mode: will search for processes beginning with `name` (at `/proc/PID/stat`) -- `*name*` _substring_ mode: will search for `name` in the whole command line (at `/proc/PID/cmdline`) - -If you enter even just one _name_ (substring), `apps.plugin` will process -`/proc/PID/cmdline` for all processes (of course only once per process: when they are first seen). - -To add processes with single quotes, enclose them in double quotes: `"process with this ' single quote"` - -To add processes with double quotes, enclose them in single quotes: `'process with this " double quote'` - -If a group or process name starts with a `-`, the dimension will be hidden from the chart (cpu chart only). - -If a process starts with a `+`, debugging will be enabled for it (debugging produces a lot of output - do not enable it in production systems). - -You can add any number of groups. Only the ones found running will affect the charts generated. -However, producing charts with hundreds of dimensions may slow down your web browser. - -The order of the entries in this list is important: the first that matches a process is used, so put important -ones at the top. Processes not matched by any row, will inherit it from their parents or children. - -The order also controls the order of the dimensions on the generated charts (although applications started -after apps.plugin is started, will be appended to the existing list of dimensions the `netdata` daemon maintains). - -There are a few command line options you can pass to `apps.plugin`. The list of available options can be acquired with the `--help` flag. The options can be set in the `netdata.conf` file. For example, to disable user and user group charts you should set - -``` -[plugin:apps] - command options = without-users without-groups -``` - -### Integration with eBPF - -If you don't see charts under the **eBPF syscall** or **eBPF net** sections, you should edit your -[`ebpf.d.conf`](/src/collectors/ebpf.plugin/README.md#configure-the-ebpf-collector) file to ensure the eBPF program is enabled. - -Also see our [guide on troubleshooting apps with eBPF -metrics](/docs/developer-and-contributor-corner/monitor-debug-applications-ebpf.md) for ideas on how to interpret these charts in a -few scenarios. - -## Permissions - -`apps.plugin` requires additional privileges to collect all the information it needs. -The problem is described in issue #157. - -When Netdata is installed, `apps.plugin` is given the capabilities `cap_dac_read_search,cap_sys_ptrace+ep`. -If this fails (i.e. `setcap` fails), `apps.plugin` is setuid to `root`. +The configuration file is `/etc/netdata/apps_groups.conf`. You can edit this +file using our [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script. -### linux capabilities in containers +### Configuring process managers -There are a few cases, like `docker` and `virtuozzo` containers, where `setcap` succeeds, but the capabilities -are silently ignored (in `lxc` containers `setcap` fails). +`apps.plugin` needs to know the common process managers, which are the processes that spawn other processes. +These process managers allow `apps.plugin` to automatically include their subprocesses in the monitoring process, ensuring that important processes are not overlooked. -In this case, you will have to setuid to root `apps.plugin` by running these commands: +- Process managers are configured in the `apps_groups.conf` file using the `managers:` prefix, as follows: -```sh -chown root:netdata /usr/libexec/netdata/plugins.d/apps.plugin -chmod 4750 /usr/libexec/netdata/plugins.d/apps.plugin -``` + ```text + managers: process1 process2 process3 + ``` -You will have to run these, every time you update Netdata. - -## Security +- Multiple lines can be used to define additional process managers, all starting with `managers:`. -`apps.plugin` performs a hard-coded function of building the process tree in memory, -iterating forever, collecting metrics for each running process and sending them to Netdata. -This is a one-way communication, from `apps.plugin` to Netdata. +- If you want to clear all existing process managers, you can use the line `managers: clear`. This will remove all previously configured managers, allowing you to provide a new list. -So, since `apps.plugin` cannot be instructed by Netdata for the actions it performs, -we think it is pretty safe to allow it to have these increased privileges. +### Configuring interpreters -Keep in mind that `apps.plugin` will still run without escalated permissions, -but it will not be able to collect all the information. +Interpreted languages like `python`, `bash`, `sh`, `node`, and others may obfuscate the actual name of a process. -## Application Badges +To address this, `apps.plugin` allows you to configure interpreters and specify that the actual process name can be found in one of the command-line parameters of the interpreter. +When a process matches a configured interpreter, `apps.plugin` will examine all the parameters of the interpreter and locate the first parameter that is an absolute filename existing on disk. If such a filename is found, `apps.plugin` will name the process using the name of that filename. -You can create badges that you can embed anywhere you like, with URLs like this: +- Interpreters are configured in the `apps_groups.conf` file using the `interpreters:` prefix, as follows: +```text +interpreters: process1 process2 process3 ``` -https://your.netdata.ip:19999/api/v1/badge.svg?chart=apps.processes&dimensions=myapp&value_color=green%3E0%7Cred -``` - -The color expression unescaped is this: `value_color=green>0|red`. - -Here is an example for the process group `sql` at `https://registry.my-netdata.io`: -![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred) +- Multiple lines can be used to define additional process managers, all starting with `interpreters:`. -Netdata is able to give you a lot more badges for your app. -Examples below for process group `sql`: +- If you want to clear all existing process interpreters, you can use the line `interpreters: clear`. This will remove all previously configured interpreters, allowing you to provide a new list. -- CPU usage: ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.cpu&dimensions=sql&value_color=green=0%7Corange%3C50%7Cred) -- Disk Physical Reads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.preads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred) -- Disk Physical Writes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred) -- Disk Logical Reads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lreads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred) -- Disk Logical Writes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred) -- Open Files ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_files&dimensions=sql&value_color=green%3E30%7Cred) -- Real Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.mem&dimensions=sql&value_color=green%3C100%7Corange%3C200%7Cred) -- Virtual Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.vmem&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred) -- Swap Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.swap&dimensions=sql&value_color=green=0%7Cred) -- Minor Page Faults ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.minor_faults&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred) -- Processes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred) -- Threads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.threads&dimensions=sql&value_color=green%3E=28%7Cred) -- Major Faults (swap activity) ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.major_faults&dimensions=sql&value_color=green=0%7Cred) -- Open Pipes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_pipes&dimensions=sql&value_color=green=0%7Cred) -- Open Sockets ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_sockets&dimensions=sql&value_color=green%3E=3%7Cred) +### Configuring process groups and renaming processes -For more information about badges check [Generating Badges](/src/web/api/badges/README.md) +- The configuration file supports multiple lines, each following this format: -## Comparison with console tools + ```text + group: process1 process2 ... + ``` -SSH to a server running Netdata and execute this: - -```sh -while true; do ls -l /var/run >/dev/null; done -``` +- You can define a group multiple times to include additional processes within it. -In most systems `/var/run` is a `tmpfs` device, so there is nothing that can stop this command -from consuming entirely one of the CPU cores of the machine. +- For each process specified, all of its subprocesses will be automatically grouped, not just the matched process itself. -As we will see below, **none** of the console performance monitoring tools can report that this -command is using 100% CPU. They do report of course that the CPU is busy, but **they fail to -identify the process that consumes so much CPU**. +### Matching processes -Here is what common Linux console monitoring tools report: +The following methods are used for matching against the specified patterns: -### top - -`top` reports that `bash` is using just 14%. - -If you check the total system CPU utilization, it says there is no idle CPU at all, but `top` -fails to provide a breakdown of the CPU consumption in the system. The sum of the CPU utilization -of all processes reported by `top`, is 15.6%. - -``` -top - 18:46:28 up 3 days, 20:14, 2 users, load average: 0.22, 0.05, 0.02 -Tasks: 76 total, 2 running, 74 sleeping, 0 stopped, 0 zombie -%Cpu(s): 32.8 us, 65.6 sy, 0.0 ni, 0.0 id, 0.0 wa, 1.3 hi, 0.3 si, 0.0 st -KiB Mem : 1016576 total, 244112 free, 52012 used, 720452 buff/cache -KiB Swap: 0 total, 0 free, 0 used. 753712 avail Mem - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND -12789 root 20 0 14980 4180 3020 S 14.0 0.4 0:02.82 bash - 9 root 20 0 0 0 0 S 1.0 0.0 0:22.36 rcuos/0 - 642 netdata 20 0 132024 20112 2660 S 0.3 2.0 14:26.29 netdata -12522 netdata 20 0 9508 2476 1828 S 0.3 0.2 0:02.26 apps.plugin - 1 root 20 0 67196 10216 7500 S 0.0 1.0 0:04.83 systemd - 2 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kthreadd -``` - -### htop - -Exactly like `top`, `htop` is providing an incomplete breakdown of the system CPU utilization. - -``` - CPU[||||||||||||||||||||||||100.0%] Tasks: 27, 11 thr; 2 running - Mem[||||||||||||||||||||85.4M/993M] Load average: 1.16 0.88 0.90 - Swp[ 0K/0K] Uptime: 3 days, 21:37:03 - - PID USER PRI NI VIRT RES SHR S CPU% MEM% TIME+ Command -12789 root 20 0 15104 4484 3208 S 14.0 0.4 10:57.15 -bash - 7024 netdata 20 0 9544 2480 1744 S 0.7 0.2 0:00.88 /usr/libexec/netd - 7009 netdata 20 0 138M 21016 2712 S 0.7 2.1 0:00.89 /usr/sbin/netdata - 7012 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.31 /usr/sbin/netdata - 563 root 20 0 308M 202M 202M S 0.0 20.4 1:00.81 /usr/lib/systemd/ - 7019 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.14 /usr/sbin/netdata -``` +| Method | Description | +|---------|----------------------------------------------------------------------| +| comm | Process name as reported by `ps -e` or `cat /proc/{PID}/comm` | +| cmdline | The complete command line (`cat /proc/{PID}/cmdline \| tr '\0' ' '`) | -### atop +> On Linux, the **comm** field is limited to 15 characters. +> `apps.plugin` attempts to obtain the full process name by searching for it in the **cmdline**. +> If successful, the entire process name is used; otherwise, the shortened version is used. -`atop` also fails to break down CPU usage. +You can use asterisks (`*`) to create patterns: -``` -ATOP - localhost 2016/12/10 20:11:27 ----------- 10s elapsed -PRC | sys 1.13s | user 0.43s | #proc 75 | #zombie 0 | #exit 5383 | -CPU | sys 67% | user 31% | irq 2% | idle 0% | wait 0% | -CPL | avg1 1.34 | avg5 1.05 | avg15 0.96 | csw 51346 | intr 10508 | -MEM | tot 992.8M | free 211.5M | cache 470.0M | buff 87.2M | slab 164.7M | -SWP | tot 0.0M | free 0.0M | | vmcom 207.6M | vmlim 496.4M | -DSK | vda | busy 0% | read 0 | write 4 | avio 1.50 ms | -NET | transport | tcpi 16 | tcpo 15 | udpi 0 | udpo 0 | -NET | network | ipi 16 | ipo 15 | ipfrw 0 | deliv 16 | -NET | eth0 ---- | pcki 16 | pcko 15 | si 1 Kbps | so 4 Kbps | - - PID SYSCPU USRCPU VGROW RGROW RDDSK WRDSK ST EXC S CPU CMD 1/600 -12789 0.98s 0.40s 0K 0K 0K 336K -- - S 14% bash - 9 0.08s 0.00s 0K 0K 0K 0K -- - S 1% rcuos/0 - 7024 0.03s 0.00s 0K 0K 0K 0K -- - S 0% apps.plugin - 7009 0.01s 0.01s 0K 0K 0K 4K -- - S 0% netdata -``` +| Mode | Pattern | Description | +|-----------|----------|------------------------------------------| +| prefix | `name*` | Matches a **comm** that begins with name | +| suffix | `*name` | Matches a **comm** that ends with name | +| substring | `*name*` | Searches for name within the **cmdline** | -### glances +- Asterisks can be placed anywhere within name (e.g., `na*me`) without affecting the matching criteria (**comm** or **cmdline**). +- To include process names with spaces, enclose them in quotes (single or double), like this: `'Plex Media Serv'` or `"my other process"`. +- To include processes with single quotes, enclose them in double quotes: `"process with this ' single quote"`. +- To include processes with double quotes, enclose them in single quotes: `'process with this " double quote'`. +- The order of the entries in the configuration list is crucial. The first matching entry will be used, so it's important to follow a top-down hierarchy. Processes that don't match any entry will inherit the group from their parent processes. -And the same is true for `glances`. The system runs at 100%, but `glances` reports only 17% -per process utilization. +There are a few command line options you can pass to `apps.plugin`. The list of available options can be acquired with the `--help` flag. +The options can be set in the `netdata.conf` using the [`edit-config` script](/docs/netdata-agent/configuration/README.md). -Note also, that being a `python` program, `glances` uses 1.6% CPU while it runs. +For example, to disable user and user group charts you would set: +```text +[plugin:apps] + command options = without-users without-groups ``` -localhost Uptime: 3 days, 21:42:00 - -CPU [100.0%] CPU 100.0% MEM 23.7% SWAP 0.0% LOAD 1-core -MEM [ 23.7%] user: 30.9% total: 993M total: 0 1 min: 1.18 -SWAP [ 0.0%] system: 67.8% used: 236M used: 0 5 min: 1.08 - idle: 0.0% free: 757M free: 0 15 min: 1.00 - -NETWORK Rx/s Tx/s TASKS 75 (90 thr), 1 run, 74 slp, 0 oth -eth0 168b 2Kb -eth1 0b 0b CPU% MEM% PID USER NI S Command -lo 0b 0b 13.5 0.4 12789 root 0 S -bash - 1.6 2.2 7025 root 0 R /usr/bin/python /u -DISK I/O R/s W/s 1.0 0.0 9 root 0 S rcuos/0 -vda1 0 4K 0.3 0.2 7024 netdata 0 S /usr/libexec/netda - 0.3 0.0 7 root 0 S rcu_sched -FILE SYS Used Total 0.3 2.1 7009 netdata 0 S /usr/sbin/netdata -/ (vda1) 1.56G 29.5G 0.0 0.0 17 root 0 S oom_reaper -``` - -### why does this happen? - -All the console tools report usage based on the processes found running *at the moment they -examine the process tree*. So, they see just one `ls` command, which is actually very quick -with minor CPU utilization. But the shell, is spawning hundreds of them, one after another -(much like shell scripts do). - -### What does Netdata report? - -The total CPU utilization of the system: -![image](https://cloud.githubusercontent.com/assets/2662304/21076212/9198e5a6-bf2e-11e6-9bc0-6bdea25befb2.png) -
***Figure 1**: The system overview section at Netdata, just a few seconds after the command was run* - -And at the applications `apps.plugin` breaks down CPU usage per application: - -![image](https://cloud.githubusercontent.com/assets/2662304/21076220/c9687848-bf2e-11e6-8d81-348592c5aca2.png) -
***Figure 2**: The Applications section at Netdata, just a few seconds after the command was run* - -So, the `ssh` session is using 95% CPU time. - -Why `ssh`? +### Integration with eBPF -`apps.plugin` groups all processes based on its configuration file. -The default configuration has nothing for `bash`, but it has for `sshd`, so Netdata accumulates -all ssh sessions to a dimension on the charts, called `ssh`. This includes all the processes in -the process tree of `sshd`, **including the exited children**. +If you don't see charts under the **eBPF syscall** or **eBPF net** sections, you should edit your +[`ebpf.d.conf`](/src/collectors/ebpf.plugin/README.md#configure-the-ebpf-collector) file to ensure the eBPF program is enabled. -> Distributions based on `systemd`, provide another way to get cpu utilization per user session -> or service running: control groups, or cgroups, commonly used as part of containers -> `apps.plugin` does not use these mechanisms. The process grouping made by `apps.plugin` works -> on any Linux, `systemd` based or not. +Also see our [guide on troubleshooting apps with eBPF metrics](/docs/developer-and-contributor-corner/monitor-debug-applications-ebpf.md) for ideas on how to interpret these charts in a few scenarios. -#### a more technical description of how Netdata works +## Permissions -Netdata reads `/proc//stat` for all processes, once per second and extracts `utime` and -`stime` (user and system cpu utilization), much like all the console tools do. +`apps.plugin` requires additional privileges to collect all the necessary information. -But it also extracts `cutime` and `cstime` that account the user and system time of the exit children of each process. -By keeping a map in memory of the whole process tree, it is capable of assigning the right time to every process, taking -into account all its exited children. +During Netdata installation, `apps.plugin` is granted the `cap_dac_read_search` and `cap_sys_ptrace+ep` capabilities. +If this fails (i.e., `setcap` fails), `apps.plugin` is setuid to `root`. -It is tricky, since a process may be running for 1 hour and once it exits, its parent should not -receive the whole 1 hour of cpu time in just 1 second - you have to subtract the cpu time that has -been reported for it prior to this iteration. +## Security -It is even trickier, because walking through the entire process tree takes some time itself. So, -if you sum the CPU utilization of all processes, you might have more CPU time than the reported -total cpu time of the system. Netdata solves this, by adapting the per process cpu utilization to -the total of the system. [Netdata adds charts that document this normalization](https://london.my-netdata.io/default.html#menu_netdata_submenu_apps_plugin). +`apps.plugin` operates on a one-way communication model, sending metrics to Netdata without receiving instructions. This design minimizes potential security risks. +Although `apps.plugin` can function without escalated privileges, it may not be able to collect all the necessary information. To ensure comprehensive data collection, it's recommended to grant the required privileges. +The increased privileges are primarily used for building the process tree in memory, iterating over running processes, collecting metrics, and sending them to Netdata. This process does not involve any external communication or user interaction, further reducing security concerns. diff --git a/src/collectors/apps.plugin/apps_aggregations.c b/src/collectors/apps.plugin/apps_aggregations.c new file mode 100644 index 000000000..d8846d6e7 --- /dev/null +++ b/src/collectors/apps.plugin/apps_aggregations.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "apps_plugin.h" + +// ---------------------------------------------------------------------------- +// update statistics on the targets + +static size_t zero_all_targets(struct target *root) { + struct target *w; + size_t count = 0; + + for (w = root; w ; w = w->next) { + count++; + + for(size_t f = 0; f < PDF_MAX ;f++) + w->values[f] = 0; + + w->uptime_min = 0; + w->uptime_max = 0; + +#if (PROCESSES_HAVE_FDS == 1) + // zero file counters + if(w->target_fds) { + memset(w->target_fds, 0, sizeof(int) * w->target_fds_size); + w->openfds.files = 0; + w->openfds.pipes = 0; + w->openfds.sockets = 0; + w->openfds.inotifies = 0; + w->openfds.eventfds = 0; + w->openfds.timerfds = 0; + w->openfds.signalfds = 0; + w->openfds.eventpolls = 0; + w->openfds.other = 0; + + w->max_open_files_percent = 0.0; + } +#endif + + if(unlikely(w->root_pid)) { + struct pid_on_target *pid_on_target = w->root_pid; + + while(pid_on_target) { + struct pid_on_target *pid_on_target_to_free = pid_on_target; + pid_on_target = pid_on_target->next; + freez(pid_on_target_to_free); + } + + w->root_pid = NULL; + } + } + + return count; +} + +static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o __maybe_unused) { + if(unlikely(!p->updated)) { + // the process is not running + return; + } + + if(unlikely(!w)) { + netdata_log_error("pid %d %s was left without a target!", p->pid, pid_stat_comm(p)); + return; + } + +#if (PROCESSES_HAVE_FDS == 1) && (PROCESSES_HAVE_PID_LIMITS == 1) + if(p->openfds_limits_percent > w->max_open_files_percent) + w->max_open_files_percent = p->openfds_limits_percent; +#endif + + for(size_t f = 0; f < PDF_MAX ;f++) + w->values[f] += p->values[f]; + + if(!w->uptime_min || p->values[PDF_UPTIME] < w->uptime_min) w->uptime_min = p->values[PDF_UPTIME]; + if(!w->uptime_max || w->uptime_max < p->values[PDF_UPTIME]) w->uptime_max = p->values[PDF_UPTIME]; + + if(unlikely(debug_enabled)) { + struct pid_on_target *pid_on_target = mallocz(sizeof(struct pid_on_target)); + pid_on_target->pid = p->pid; + pid_on_target->next = w->root_pid; + w->root_pid = pid_on_target; + } +} + +static inline void cleanup_exited_pids(void) { + struct pid_stat *p = NULL; + + for(p = root_of_pids(); p ;) { + if(!p->updated && (!p->keep || p->keeploops > 0)) { + if(unlikely(debug_enabled && (p->keep || p->keeploops))) + debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, pid_stat_comm(p)); + +#if (PROCESSES_HAVE_FDS == 1) + for(size_t c = 0; c < p->fds_size; c++) + if(p->fds[c].fd > 0) { + file_descriptor_not_used(p->fds[c].fd); + clear_pid_fd(&p->fds[c]); + } +#endif + + const pid_t r = p->pid; + p = p->next; + del_pid_entry(r); + } + else { + if(unlikely(p->keep)) p->keeploops++; + p->keep = false; + p = p->next; + } + } +} + +static struct target *get_apps_groups_target_for_pid(struct pid_stat *p) { + targets_assignment_counter++; + + for(struct target *w = apps_groups_root_target; w ; w = w->next) { + if(w->type != TARGET_TYPE_APP_GROUP) continue; + + if(pid_match_check(p, &w->match)) { + if(p->is_manager) + return NULL; + + p->matched_by_config = true; + return w->target ? w->target : w; + } + } + + return NULL; +} + +static void assign_a_target_to_all_processes(void) { + // assign targets from app_groups.conf + for(struct pid_stat *p = root_of_pids(); p ; p = p->next) { + if(!p->target) + p->target = get_apps_groups_target_for_pid(p); + } + + // assign targets from their parents, if they have + for(struct pid_stat *p = root_of_pids(); p ; p = p->next) { + if(!p->target) { + if(!p->is_manager) { + for (struct pid_stat *pp = p->parent; pp; pp = pp->parent) { + if(pp->is_manager) break; + + if (pp->target) { + p->target = pp->target; + break; + } + } + } + + if(!p->target) { + // there is no target, get it from the tree + p->target = get_tree_target(p); + } + } + + fatal_assert(p->target != NULL); + } +} + +void aggregate_processes_to_targets(void) { + assign_a_target_to_all_processes(); + apps_groups_targets_count = zero_all_targets(apps_groups_root_target); + +#if (PROCESSES_HAVE_UID == 1) + zero_all_targets(users_root_target); +#endif +#if (PROCESSES_HAVE_GID == 1) + zero_all_targets(groups_root_target); +#endif +#if (PROCESSES_HAVE_SID == 1) + zero_all_targets(sids_root_target); +#endif + + // this has to be done, before the cleanup + struct target *w = NULL, *o = NULL; + (void)w; (void)o; + + // concentrate everything on the targets + for(struct pid_stat *p = root_of_pids(); p ; p = p->next) { + + // -------------------------------------------------------------------- + // apps_groups and tree target + + aggregate_pid_on_target(p->target, p, NULL); + + + // -------------------------------------------------------------------- + // user target + +#if (PROCESSES_HAVE_UID == 1) + update_cached_host_users(); + + o = p->uid_target; + if(likely(p->uid_target && p->uid_target->uid == p->uid)) + w = p->uid_target; + else { + if(unlikely(debug_enabled && p->uid_target)) + debug_log("pid %d (%s) switched user from %u (%s) to %u.", p->pid, pid_stat_comm(p), p->uid_target->uid, p->uid_target->name, p->uid); + + w = p->uid_target = get_uid_target(p->uid); + } + + aggregate_pid_on_target(w, p, o); +#endif + + // -------------------------------------------------------------------- + // user group target + +#if (PROCESSES_HAVE_GID == 1) + update_cached_host_users(); + + o = p->gid_target; + if(likely(p->gid_target && p->gid_target->gid == p->gid)) + w = p->gid_target; + else { + if(unlikely(debug_enabled && p->gid_target)) + debug_log("pid %d (%s) switched group from %u (%s) to %u.", p->pid, pid_stat_comm(p), p->gid_target->gid, p->gid_target->name, p->gid); + + w = p->gid_target = get_gid_target(p->gid); + } + + aggregate_pid_on_target(w, p, o); +#endif + + // -------------------------------------------------------------------- + // sid target + +#if (PROCESSES_HAVE_SID == 1) + o = p->sid_target; + if(likely(p->sid_target && p->sid_target->sid_name == p->sid_name)) + w = p->sid_target; + else + w = p->sid_target = get_sid_target(p->sid_name); + + aggregate_pid_on_target(w, p, o); +#endif + + // -------------------------------------------------------------------- + // aggregate all file descriptors + +#if (PROCESSES_HAVE_FDS == 1) + if(enable_file_charts) + aggregate_pid_fds_on_targets(p); +#endif + } + + cleanup_exited_pids(); +} diff --git a/src/collectors/apps.plugin/apps_functions.c b/src/collectors/apps.plugin/apps_functions.c index 54eaeeb90..6f8d1dc38 100644 --- a/src/collectors/apps.plugin/apps_functions.c +++ b/src/collectors/apps.plugin/apps_functions.c @@ -24,28 +24,42 @@ static void apps_plugin_function_processes_help(const char *transaction) { " category:NAME\n" " Shows only processes that are assigned the category `NAME` in apps_groups.conf\n" "\n" + " parent:NAME\n" + " Shows only processes that are aggregated under parent `NAME`\n" + "\n" +#if (PROCESSES_HAVE_UID == 1) || (PROCESSES_HAVE_SID == 1) " user:NAME\n" " Shows only processes that are running as user name `NAME`.\n" "\n" +#endif +#if (PROCESSES_HAVE_GID == 1) " group:NAME\n" " Shows only processes that are running as group name `NAME`.\n" "\n" +#endif " process:NAME\n" " Shows only processes that their Command is `NAME` or their parent's Command is `NAME`.\n" "\n" " pid:NUMBER\n" " Shows only processes that their PID is `NUMBER` or their parent's PID is `NUMBER`\n" "\n" +#if (PROCESSES_HAVE_UID == 1) " uid:NUMBER\n" " Shows only processes that their UID is `NUMBER`\n" "\n" +#endif +#if (PROCESSES_HAVE_GID == 1) " gid:NUMBER\n" " Shows only processes that their GID is `NUMBER`\n" "\n" +#endif "Filters can be combined. Each filter can be given only one time.\n" ); - pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb); + wb->response_code = HTTP_RESP_OK; + wb->content_type = CT_TEXT_PLAIN; + wb->expires = now_realtime_sec() + 3600; + pluginsd_function_result_to_stdout(transaction, wb); buffer_free(wb); } @@ -69,21 +83,26 @@ void function_processes(const char *transaction, char *function, struct pid_stat *p; bool show_cmdline = http_access_user_has_enough_access_level_for_endpoint( - access, - HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA | - HTTP_ACCESS_VIEW_AGENT_CONFIG) || enable_function_cmdline; + access, HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA | HTTP_ACCESS_VIEW_AGENT_CONFIG) || enable_function_cmdline; char *words[PLUGINSD_MAX_WORDS] = { NULL }; - size_t num_words = quoted_strings_splitter_pluginsd(function, words, PLUGINSD_MAX_WORDS); + size_t num_words = quoted_strings_splitter_whitespace(function, words, PLUGINSD_MAX_WORDS); - struct target *category = NULL, *user = NULL, *group = NULL; + struct target *category = NULL, *user = NULL, *group = NULL; (void)category; (void)user; (void)group; +#if (PROCESSES_HAVE_UID == 1) + struct target *users_sid_root = users_root_target; +#endif +#if (PROCESSES_HAVE_SID == 1) + struct target *users_sid_root = sids_root_target; +#endif const char *process_name = NULL; pid_t pid = 0; - uid_t uid = 0; - gid_t gid = 0; + uid_t uid = 0; (void)uid; + gid_t gid = 0; (void)gid; bool info = false; bool filter_pid = false, filter_uid = false, filter_gid = false; + (void)filter_uid; (void)filter_gid; for(int i = 1; i < PLUGINSD_MAX_WORDS ;i++) { const char *keyword = get_word(words, num_words, i); @@ -97,14 +116,17 @@ void function_processes(const char *transaction, char *function, return; } } +#if (PROCESSES_HAVE_UID == 1) || (PROCESSES_HAVE_SID == 1) else if(!user && strncmp(keyword, PROCESS_FILTER_USER, strlen(PROCESS_FILTER_USER)) == 0) { - user = find_target_by_name(users_root_target, &keyword[strlen(PROCESS_FILTER_USER)]); + user = find_target_by_name(users_sid_root, &keyword[strlen(PROCESS_FILTER_USER)]); if(!user) { pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST, "No user with that name found."); return; } } +#endif +#if (PROCESSES_HAVE_GID == 1) else if(strncmp(keyword, PROCESS_FILTER_GROUP, strlen(PROCESS_FILTER_GROUP)) == 0) { group = find_target_by_name(groups_root_target, &keyword[strlen(PROCESS_FILTER_GROUP)]); if(!group) { @@ -113,6 +135,7 @@ void function_processes(const char *transaction, char *function, return; } } +#endif else if(!process_name && strncmp(keyword, PROCESS_FILTER_PROCESS, strlen(PROCESS_FILTER_PROCESS)) == 0) { process_name = &keyword[strlen(PROCESS_FILTER_PROCESS)]; } @@ -120,14 +143,18 @@ void function_processes(const char *transaction, char *function, pid = str2i(&keyword[strlen(PROCESS_FILTER_PID)]); filter_pid = true; } +#if (PROCESSES_HAVE_UID == 1) else if(!uid && strncmp(keyword, PROCESS_FILTER_UID, strlen(PROCESS_FILTER_UID)) == 0) { uid = str2i(&keyword[strlen(PROCESS_FILTER_UID)]); filter_uid = true; } +#endif +#if (PROCESSES_HAVE_GID == 1) else if(!gid && strncmp(keyword, PROCESS_FILTER_GID, strlen(PROCESS_FILTER_GID)) == 0) { gid = str2i(&keyword[strlen(PROCESS_FILTER_GID)]); filter_gid = true; } +#endif else if(strcmp(keyword, "help") == 0) { apps_plugin_function_processes_help(transaction); return; @@ -137,10 +164,6 @@ void function_processes(const char *transaction, char *function, } } - unsigned int cpu_divisor = time_factor * RATES_DETAIL / 100; - unsigned int memory_divisor = 1024; - unsigned int io_divisor = 1024 * RATES_DETAIL; - BUFFER *wb = buffer_create(4096, NULL); buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); @@ -153,38 +176,71 @@ void function_processes(const char *transaction, char *function, if(info) goto close_and_send; + uint64_t cpu_divisor = NSEC_PER_SEC / 100; + unsigned int memory_divisor = 1024 * 1024; + unsigned int io_divisor = 1024 * RATES_DETAIL; + + uint64_t total_memory_bytes = OS_FUNCTION(apps_os_get_total_memory)(); + NETDATA_DOUBLE - UserCPU_max = 0.0 + UserCPU_max = 0.0 , SysCPU_max = 0.0 +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) , GuestCPU_max = 0.0 +#endif +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) , CUserCPU_max = 0.0 , CSysCPU_max = 0.0 +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) , CGuestCPU_max = 0.0 +#endif +#endif , CPU_max = 0.0 , VMSize_max = 0.0 , RSS_max = 0.0 +#if (PROCESSES_HAVE_VMSHARED == 1) , Shared_max = 0.0 +#endif , Swap_max = 0.0 , Memory_max = 0.0 +#if (PROCESSES_HAVE_FDS == 1) && (PROCESSES_HAVE_PID_LIMITS == 1) , FDsLimitPercent_max = 0.0 +#endif ; unsigned long long Processes_max = 0 , Threads_max = 0 +#if (PROCESSES_HAVE_VOLCTX == 1) , VoluntaryCtxtSwitches_max = 0 +#endif +#if (PROCESSES_HAVE_NVOLCTX == 1) , NonVoluntaryCtxtSwitches_max = 0 +#endif , Uptime_max = 0 , MinFlt_max = 0 - , CMinFlt_max = 0 - , TMinFlt_max = 0 +#if (PROCESSES_HAVE_MAJFLT == 1) , MajFlt_max = 0 +#endif +#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) + , CMinFlt_max = 0 , CMajFlt_max = 0 + , TMinFlt_max = 0 , TMajFlt_max = 0 +#endif +#if (PROCESSES_HAVE_LOGICAL_IO == 1) + , LReads_max = 0 + , LWrites_max = 0 +#endif +#if (PROCESSES_HAVE_PHYSICAL_IO == 1) , PReads_max = 0 , PWrites_max = 0 - , RCalls_max = 0 - , WCalls_max = 0 +#endif +#if (PROCESSES_HAVE_IO_CALLS == 1) + , ROps_max = 0 + , WOps_max = 0 +#endif +#if (PROCESSES_HAVE_FDS == 1) , Files_max = 0 , Pipes_max = 0 , Sockets_max = 0 @@ -195,40 +251,52 @@ void function_processes(const char *transaction, char *function, , EvPollFDs_max = 0 , OtherFDs_max = 0 , FDs_max = 0 +#endif +#if (PROCESSES_HAVE_HANDLES == 1) + , Handles_max = 0 +#endif ; -#if !defined(__FreeBSD__) && !defined(__APPLE__) - unsigned long long - LReads_max = 0 - , LWrites_max = 0 - ; -#endif // !__FreeBSD__ !__APPLE_ + netdata_mutex_lock(&apps_and_stdout_mutex); int rows= 0; - for(p = root_of_pids; p ; p = p->next) { + for(p = root_of_pids(); p ; p = p->next) { if(!p->updated) continue; if(category && p->target != category) continue; - if(user && p->user_target != user) +#if (PROCESSES_HAVE_UID == 1) + if(user && p->uid_target != user) continue; +#endif - if(group && p->group_target != group) +#if (PROCESSES_HAVE_GID == 1) + if(group && p->gid_target != group) continue; +#endif - if(process_name && ((strcmp(p->comm, process_name) != 0 && !p->parent) || (p->parent && strcmp(p->comm, process_name) != 0 && strcmp(p->parent->comm, process_name) != 0))) +#if (PROCESSES_HAVE_SID == 1) + if(user && p->sid_target != user) + continue; +#endif + + if(process_name && ((strcmp(pid_stat_comm(p), process_name) != 0 && !p->parent) || (p->parent && strcmp(pid_stat_comm(p), process_name) != 0 && strcmp(pid_stat_comm(p->parent), process_name) != 0))) continue; if(filter_pid && p->pid != pid && p->ppid != pid) continue; +#if (PROCESSES_HAVE_UID == 1) if(filter_uid && p->uid != uid) continue; +#endif +#if (PROCESSES_HAVE_GID == 1) if(filter_gid && p->gid != gid) continue; +#endif rows++; @@ -241,80 +309,130 @@ void function_processes(const char *transaction, char *function, buffer_json_add_array_item_uint64(wb, p->pid); // cmd - buffer_json_add_array_item_string(wb, p->comm); + buffer_json_add_array_item_string(wb, string2str(p->comm)); + +#if (PROCESSES_HAVE_COMM_AND_NAME == 1) + // name + buffer_json_add_array_item_string(wb, string2str(p->name ? p->name : p->comm)); +#endif // cmdline if (show_cmdline) { - buffer_json_add_array_item_string(wb, (p->cmdline && *p->cmdline) ? p->cmdline : p->comm); + buffer_json_add_array_item_string(wb, (string_strlen(p->cmdline)) ? pid_stat_cmdline(p) : pid_stat_comm(p)); } // ppid buffer_json_add_array_item_uint64(wb, p->ppid); // category - buffer_json_add_array_item_string(wb, p->target ? p->target->name : "-"); + buffer_json_add_array_item_string(wb, p->target ? string2str(p->target->name) : "-"); +#if (PROCESSES_HAVE_UID == 1) // user - buffer_json_add_array_item_string(wb, p->user_target ? p->user_target->name : "-"); + buffer_json_add_array_item_string(wb, p->uid_target ? string2str(p->uid_target->name) : "-"); // uid buffer_json_add_array_item_uint64(wb, p->uid); +#endif +#if (PROCESSES_HAVE_SID == 1) + // account + buffer_json_add_array_item_string(wb, p->sid_target ? string2str(p->sid_target->name) : "-"); +#endif +#if (PROCESSES_HAVE_GID == 1) // group - buffer_json_add_array_item_string(wb, p->group_target ? p->group_target->name : "-"); + buffer_json_add_array_item_string(wb, p->gid_target ? string2str(p->gid_target->name) : "-"); // gid buffer_json_add_array_item_uint64(wb, p->gid); +#endif // CPU utilization % - add_value_field_ndd_with_max(wb, CPU, (NETDATA_DOUBLE)(p->utime + p->stime + p->gtime + p->cutime + p->cstime + p->cgtime) / cpu_divisor); - add_value_field_ndd_with_max(wb, UserCPU, (NETDATA_DOUBLE)(p->utime) / cpu_divisor); - add_value_field_ndd_with_max(wb, SysCPU, (NETDATA_DOUBLE)(p->stime) / cpu_divisor); - add_value_field_ndd_with_max(wb, GuestCPU, (NETDATA_DOUBLE)(p->gtime) / cpu_divisor); - add_value_field_ndd_with_max(wb, CUserCPU, (NETDATA_DOUBLE)(p->cutime) / cpu_divisor); - add_value_field_ndd_with_max(wb, CSysCPU, (NETDATA_DOUBLE)(p->cstime) / cpu_divisor); - add_value_field_ndd_with_max(wb, CGuestCPU, (NETDATA_DOUBLE)(p->cgtime) / cpu_divisor); + kernel_uint_t total_cpu = p->values[PDF_UTIME] + p->values[PDF_STIME]; + +#if (PROCESSES_HAVE_CPU_GUEST_TIME) + total_cpu += p->values[PDF_GTIME]; +#endif +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME) + total_cpu += p->values[PDF_CUTIME] + p->values[PDF_CSTIME]; +#if (PROCESSES_HAVE_CPU_GUEST_TIME) + total_cpu += p->values[PDF_CGTIME]; +#endif +#endif + add_value_field_ndd_with_max(wb, CPU, (NETDATA_DOUBLE)(total_cpu) / cpu_divisor); + add_value_field_ndd_with_max(wb, UserCPU, (NETDATA_DOUBLE)(p->values[PDF_UTIME]) / cpu_divisor); + add_value_field_ndd_with_max(wb, SysCPU, (NETDATA_DOUBLE)(p->values[PDF_STIME]) / cpu_divisor); +#if (PROCESSES_HAVE_CPU_GUEST_TIME) + add_value_field_ndd_with_max(wb, GuestCPU, (NETDATA_DOUBLE)(p->values[PDF_GTIME]) / cpu_divisor); +#endif +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME) + add_value_field_ndd_with_max(wb, CUserCPU, (NETDATA_DOUBLE)(p->values[PDF_CUTIME]) / cpu_divisor); + add_value_field_ndd_with_max(wb, CSysCPU, (NETDATA_DOUBLE)(p->values[PDF_CSTIME]) / cpu_divisor); +#if (PROCESSES_HAVE_CPU_GUEST_TIME) + add_value_field_ndd_with_max(wb, CGuestCPU, (NETDATA_DOUBLE)(p->values[PDF_CGTIME]) / cpu_divisor); +#endif +#endif - add_value_field_llu_with_max(wb, VoluntaryCtxtSwitches, p->status_voluntary_ctxt_switches / RATES_DETAIL); - add_value_field_llu_with_max(wb, NonVoluntaryCtxtSwitches, p->status_nonvoluntary_ctxt_switches / RATES_DETAIL); +#if (PROCESSES_HAVE_VOLCTX == 1) + add_value_field_llu_with_max(wb, VoluntaryCtxtSwitches, p->values[PDF_VOLCTX] / RATES_DETAIL); +#endif +#if (PROCESSES_HAVE_NVOLCTX == 1) + add_value_field_llu_with_max(wb, NonVoluntaryCtxtSwitches, p->values[PDF_NVOLCTX] / RATES_DETAIL); +#endif // memory MiB - if(MemTotal) - add_value_field_ndd_with_max(wb, Memory, (NETDATA_DOUBLE)p->status_vmrss * 100.0 / (NETDATA_DOUBLE)MemTotal); + if(total_memory_bytes) + add_value_field_ndd_with_max(wb, Memory, (NETDATA_DOUBLE)p->values[PDF_VMRSS] * 100.0 / (NETDATA_DOUBLE)total_memory_bytes); + + add_value_field_ndd_with_max(wb, RSS, (NETDATA_DOUBLE)p->values[PDF_VMRSS] / memory_divisor); - add_value_field_ndd_with_max(wb, RSS, (NETDATA_DOUBLE)p->status_vmrss / memory_divisor); - add_value_field_ndd_with_max(wb, Shared, (NETDATA_DOUBLE)p->status_vmshared / memory_divisor); -#if !defined(__APPLE__) - add_value_field_ndd_with_max(wb, VMSize, (NETDATA_DOUBLE)p->status_vmsize / memory_divisor); +#if (PROCESSES_HAVE_VMSHARED == 1) + add_value_field_ndd_with_max(wb, Shared, (NETDATA_DOUBLE)p->values[PDF_VMSHARED] / memory_divisor); #endif - add_value_field_ndd_with_max(wb, Swap, (NETDATA_DOUBLE)p->status_vmswap / memory_divisor); + add_value_field_ndd_with_max(wb, VMSize, (NETDATA_DOUBLE)p->values[PDF_VMSIZE] / memory_divisor); +#if (PROCESSES_HAVE_VMSWAP == 1) + add_value_field_ndd_with_max(wb, Swap, (NETDATA_DOUBLE)p->values[PDF_VMSWAP] / memory_divisor); +#endif + +#if (PROCESSES_HAVE_PHYSICAL_IO == 1) // Physical I/O - add_value_field_llu_with_max(wb, PReads, p->io_storage_bytes_read / io_divisor); - add_value_field_llu_with_max(wb, PWrites, p->io_storage_bytes_written / io_divisor); + add_value_field_llu_with_max(wb, PReads, p->values[PDF_PREAD] / io_divisor); + add_value_field_llu_with_max(wb, PWrites, p->values[PDF_PWRITE] / io_divisor); +#endif +#if (PROCESSES_HAVE_LOGICAL_IO == 1) // Logical I/O -#if !defined(__FreeBSD__) && !defined(__APPLE__) - add_value_field_llu_with_max(wb, LReads, p->io_logical_bytes_read / io_divisor); - add_value_field_llu_with_max(wb, LWrites, p->io_logical_bytes_written / io_divisor); + add_value_field_llu_with_max(wb, LReads, p->values[PDF_LREAD] / io_divisor); + add_value_field_llu_with_max(wb, LWrites, p->values[PDF_LWRITE] / io_divisor); #endif +#if (PROCESSES_HAVE_IO_CALLS == 1) // I/O calls - add_value_field_llu_with_max(wb, RCalls, p->io_read_calls / RATES_DETAIL); - add_value_field_llu_with_max(wb, WCalls, p->io_write_calls / RATES_DETAIL); + add_value_field_llu_with_max(wb, ROps, p->values[PDF_OREAD] / RATES_DETAIL); + add_value_field_llu_with_max(wb, WOps, p->values[PDF_OWRITE] / RATES_DETAIL); +#endif // minor page faults - add_value_field_llu_with_max(wb, MinFlt, p->minflt / RATES_DETAIL); - add_value_field_llu_with_max(wb, CMinFlt, p->cminflt / RATES_DETAIL); - add_value_field_llu_with_max(wb, TMinFlt, (p->minflt + p->cminflt) / RATES_DETAIL); + add_value_field_llu_with_max(wb, MinFlt, p->values[PDF_MINFLT] / RATES_DETAIL); +#if (PROCESSES_HAVE_MAJFLT == 1) // major page faults - add_value_field_llu_with_max(wb, MajFlt, p->majflt / RATES_DETAIL); - add_value_field_llu_with_max(wb, CMajFlt, p->cmajflt / RATES_DETAIL); - add_value_field_llu_with_max(wb, TMajFlt, (p->majflt + p->cmajflt) / RATES_DETAIL); + add_value_field_llu_with_max(wb, MajFlt, p->values[PDF_MAJFLT] / RATES_DETAIL); +#endif + +#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) + add_value_field_llu_with_max(wb, CMinFlt, p->values[PDF_CMINFLT] / RATES_DETAIL); + add_value_field_llu_with_max(wb, CMajFlt, p->values[PDF_CMAJFLT] / RATES_DETAIL); + add_value_field_llu_with_max(wb, TMinFlt, (p->values[PDF_MINFLT] + p->values[PDF_CMINFLT]) / RATES_DETAIL); + add_value_field_llu_with_max(wb, TMajFlt, (p->values[PDF_MAJFLT] + p->values[PDF_CMAJFLT]) / RATES_DETAIL); +#endif +#if (PROCESSES_HAVE_FDS == 1) // open file descriptors +#if (PROCESSES_HAVE_PID_LIMITS == 1) add_value_field_ndd_with_max(wb, FDsLimitPercent, p->openfds_limits_percent); +#endif add_value_field_llu_with_max(wb, FDs, pid_openfds_sum(p)); add_value_field_llu_with_max(wb, Files, p->openfds.files); add_value_field_llu_with_max(wb, Pipes, p->openfds.pipes); @@ -325,12 +443,16 @@ void function_processes(const char *transaction, char *function, add_value_field_llu_with_max(wb, SigFDs, p->openfds.signalfds); add_value_field_llu_with_max(wb, EvPollFDs, p->openfds.eventpolls); add_value_field_llu_with_max(wb, OtherFDs, p->openfds.other); +#endif +#if (PROCESSES_HAVE_HANDLES == 1) + add_value_field_llu_with_max(wb, Handles, p->values[PDF_HANDLES]); +#endif // processes, threads, uptime - add_value_field_llu_with_max(wb, Processes, p->children_count); - add_value_field_llu_with_max(wb, Threads, p->num_threads); - add_value_field_llu_with_max(wb, Uptime, p->uptime); + add_value_field_llu_with_max(wb, Processes, p->values[PDF_PROCESSES]); + add_value_field_llu_with_max(wb, Threads, p->values[PDF_THREADS]); + add_value_field_llu_with_max(wb, Uptime, p->values[PDF_UPTIME]); buffer_json_array_close(wb); // for each pid } @@ -357,6 +479,14 @@ void function_processes(const char *transaction, char *function, RRDF_FIELD_FILTER_MULTISELECT, RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL); +#if (PROCESSES_HAVE_COMM_AND_NAME == 1) + buffer_rrdf_table_add_field(wb, field_id++, "Name", "Process Friendly Name", RRDF_FIELD_TYPE_STRING, + RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, + RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, + RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL); +#endif + if (show_cmdline) { buffer_rrdf_table_add_field(wb, field_id++, "CmdLine", "Command Line", RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, @@ -370,22 +500,30 @@ void function_processes(const char *transaction, char *function, NAN, RRDF_FIELD_SORT_ASCENDING, "PID", RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, RRDF_FIELD_OPTS_NONE, NULL); + buffer_rrdf_table_add_field(wb, field_id++, "Category", "Category (apps_groups.conf)", RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL); + +#if (PROCESSES_HAVE_UID == 1) || (PROCESSES_HAVE_SID == 1) buffer_rrdf_table_add_field(wb, field_id++, "User", "User Owner", RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, RRDF_FIELD_OPTS_VISIBLE, NULL); +#endif +#if (PROCESSES_HAVE_UID == 1) buffer_rrdf_table_add_field(wb, field_id++, "Uid", "User ID", RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, RRDF_FIELD_OPTS_NONE, NULL); +#endif + +#if (PROCESSES_HAVE_GID == 1) buffer_rrdf_table_add_field(wb, field_id++, "Group", "Group Owner", RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, @@ -396,6 +534,7 @@ void function_processes(const char *transaction, char *function, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, RRDF_FIELD_OPTS_NONE, NULL); +#endif // CPU utilization buffer_rrdf_table_add_field(wb, field_id++, "CPU", "Total CPU Time (100% = 1 core)", @@ -413,11 +552,14 @@ void function_processes(const char *transaction, char *function, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", SysCPU_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) buffer_rrdf_table_add_field(wb, field_id++, "GuestCPU", "Guest CPU Time (100% = 1 core)", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", GuestCPU_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); +#endif +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) buffer_rrdf_table_add_field(wb, field_id++, "CUserCPU", "Children User CPU Time (100% = 1 core)", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", CUserCPU_max, RRDF_FIELD_SORT_DESCENDING, NULL, @@ -428,26 +570,33 @@ void function_processes(const char *transaction, char *function, RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", CSysCPU_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) buffer_rrdf_table_add_field(wb, field_id++, "CGuestCPU", "Children Guest CPU Time (100% = 1 core)", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", CGuestCPU_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); +#endif +#endif +#if (PROCESSES_HAVE_VOLCTX == 1) // CPU context switches buffer_rrdf_table_add_field(wb, field_id++, "vCtxSwitch", "Voluntary Context Switches", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "switches/s", VoluntaryCtxtSwitches_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); +#endif +#if (PROCESSES_HAVE_NVOLCTX == 1) buffer_rrdf_table_add_field(wb, field_id++, "iCtxSwitch", "Involuntary Context Switches", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "switches/s", NonVoluntaryCtxtSwitches_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); +#endif // memory - if (MemTotal) + if (total_memory_bytes) buffer_rrdf_table_add_field(wb, field_id++, "Memory", "Memory Percentage", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", 100.0, RRDF_FIELD_SORT_DESCENDING, NULL, @@ -460,25 +609,30 @@ void function_processes(const char *transaction, char *function, 2, "MiB", RSS_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_VISIBLE, NULL); +#if (PROCESSES_HAVE_VMSHARED == 1) buffer_rrdf_table_add_field(wb, field_id++, "Shared", "Shared Pages", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "MiB", Shared_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_VISIBLE, NULL); -#if !defined(__APPLE__) +#endif + buffer_rrdf_table_add_field(wb, field_id++, "Virtual", "Virtual Memory Size", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "MiB", VMSize_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_VISIBLE, NULL); -#endif + +#if (PROCESSES_HAVE_VMSWAP == 1) buffer_rrdf_table_add_field(wb, field_id++, "Swap", "Swap Memory", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "MiB", Swap_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); +#endif +#if (PROCESSES_HAVE_PHYSICAL_IO == 1) // Physical I/O buffer_rrdf_table_add_field(wb, field_id++, "PReads", "Physical I/O Reads", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, @@ -490,33 +644,41 @@ void function_processes(const char *transaction, char *function, RRDF_FIELD_TRANSFORM_NUMBER, 2, "KiB/s", PWrites_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_VISIBLE, NULL); +#endif +#if (PROCESSES_HAVE_LOGICAL_IO == 1) +#if (PROCESSES_HAVE_PHYSICAL_IO == 1) + RRDF_FIELD_OPTIONS logical_io_options = RRDF_FIELD_OPTS_NONE; +#else + RRDF_FIELD_OPTIONS logical_io_options = RRDF_FIELD_OPTS_VISIBLE; +#endif // Logical I/O -#if !defined(__FreeBSD__) && !defined(__APPLE__) buffer_rrdf_table_add_field(wb, field_id++, "LReads", "Logical I/O Reads", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "KiB/s", LReads_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); + logical_io_options, NULL); buffer_rrdf_table_add_field(wb, field_id++, "LWrites", "Logical I/O Writes", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "KiB/s", LWrites_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); + logical_io_options, NULL); #endif +#if (PROCESSES_HAVE_IO_CALLS == 1) // I/O calls - buffer_rrdf_table_add_field(wb, field_id++, "RCalls", "I/O Read Calls", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, + buffer_rrdf_table_add_field(wb, field_id++, "ROps", "I/O Read Operations", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, - "calls/s", RCalls_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, + "ops/s", ROps_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); - buffer_rrdf_table_add_field(wb, field_id++, "WCalls", "I/O Write Calls", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, + buffer_rrdf_table_add_field(wb, field_id++, "WOps", "I/O Write Operations", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, - "calls/s", WCalls_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, + "ops/s", WOps_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); +#endif // minor page faults buffer_rrdf_table_add_field(wb, field_id++, "MinFlt", "Minor Page Faults/s", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, @@ -525,18 +687,8 @@ void function_processes(const char *transaction, char *function, 2, "pgflts/s", MinFlt_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); - buffer_rrdf_table_add_field(wb, field_id++, "CMinFlt", "Children Minor Page Faults/s", - RRDF_FIELD_TYPE_BAR_WITH_INTEGER, - RRDF_FIELD_VISUAL_BAR, - RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", CMinFlt_max, RRDF_FIELD_SORT_DESCENDING, - NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - buffer_rrdf_table_add_field(wb, field_id++, "TMinFlt", "Total Minor Page Faults/s", - RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, - RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", TMinFlt_max, RRDF_FIELD_SORT_DESCENDING, - NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); +#if (PROCESSES_HAVE_MAJFLT == 1) // major page faults buffer_rrdf_table_add_field(wb, field_id++, "MajFlt", "Major Page Faults/s", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, @@ -544,24 +696,42 @@ void function_processes(const char *transaction, char *function, 2, "pgflts/s", MajFlt_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); +#endif + +#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) + buffer_rrdf_table_add_field(wb, field_id++, "CMinFlt", "Children Minor Page Faults/s", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, + RRDF_FIELD_VISUAL_BAR, + RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", CMinFlt_max, RRDF_FIELD_SORT_DESCENDING, + NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "CMajFlt", "Children Major Page Faults/s", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", CMajFlt_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); + buffer_rrdf_table_add_field(wb, field_id++, "TMinFlt", "Total Minor Page Faults/s", + RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, + RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", TMinFlt_max, RRDF_FIELD_SORT_DESCENDING, + NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); buffer_rrdf_table_add_field(wb, field_id++, "TMajFlt", "Total Major Page Faults/s", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", TMajFlt_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); +#endif +#if (PROCESSES_HAVE_FDS == 1) // open file descriptors +#if (PROCESSES_HAVE_PID_LIMITS == 1) buffer_rrdf_table_add_field(wb, field_id++, "FDsLimitPercent", "Percentage of Open Descriptors vs Limits", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", FDsLimitPercent_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); +#endif buffer_rrdf_table_add_field(wb, field_id++, "FDs", "All Open File Descriptors", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", FDs_max, RRDF_FIELD_SORT_DESCENDING, NULL, @@ -614,6 +784,16 @@ void function_processes(const char *transaction, char *function, RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", OtherFDs_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL); +#endif + +#if (PROCESSES_HAVE_HANDLES == 1) + buffer_rrdf_table_add_field(wb, field_id++, "Handles", "Open Handles", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, + RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0, + "handles", + Handles_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, + RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_VISIBLE, NULL); +#endif // processes, threads, uptime buffer_rrdf_table_add_field(wb, field_id++, "Processes", "Processes", RRDF_FIELD_TYPE_BAR_WITH_INTEGER, @@ -647,27 +827,39 @@ void function_processes(const char *transaction, char *function, { buffer_json_add_array_item_string(wb, "UserCPU"); buffer_json_add_array_item_string(wb, "SysCPU"); +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) buffer_json_add_array_item_string(wb, "GuestCPU"); +#endif +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) buffer_json_add_array_item_string(wb, "CUserCPU"); buffer_json_add_array_item_string(wb, "CSysCPU"); +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) buffer_json_add_array_item_string(wb, "CGuestCPU"); +#endif +#endif } buffer_json_array_close(wb); } buffer_json_object_close(wb); +#if (PROCESSES_HAVE_VOLCTX == 1) || (PROCESSES_HAVE_NVOLCTX == 1) buffer_json_member_add_object(wb, "CPUCtxSwitches"); { buffer_json_member_add_string(wb, "name", "CPU Context Switches"); buffer_json_member_add_string(wb, "type", "stacked-bar"); buffer_json_member_add_array(wb, "columns"); { +#if (PROCESSES_HAVE_VOLCTX == 1) buffer_json_add_array_item_string(wb, "vCtxSwitch"); +#endif +#if (PROCESSES_HAVE_NVOLCTX == 1) buffer_json_add_array_item_string(wb, "iCtxSwitch"); +#endif } buffer_json_array_close(wb); } buffer_json_object_close(wb); +#endif // Memory chart buffer_json_member_add_object(wb, "Memory"); @@ -685,7 +877,7 @@ void function_processes(const char *transaction, char *function, } buffer_json_object_close(wb); - if(MemTotal) { + if(total_memory_bytes) { // Memory chart buffer_json_member_add_object(wb, "MemoryPercent"); { @@ -700,7 +892,7 @@ void function_processes(const char *transaction, char *function, buffer_json_object_close(wb); } -#if !defined(__FreeBSD__) && !defined(__APPLE__) +#if (PROCESSES_HAVE_LOGICAL_IO == 1) || (PROCESSES_HAVE_PHYSICAL_IO == 1) // I/O Reads chart buffer_json_member_add_object(wb, "Reads"); { @@ -708,8 +900,12 @@ void function_processes(const char *transaction, char *function, buffer_json_member_add_string(wb, "type", "stacked-bar"); buffer_json_member_add_array(wb, "columns"); { +#if (PROCESSES_HAVE_LOGICAL_IO == 1) buffer_json_add_array_item_string(wb, "LReads"); +#endif +#if (PROCESSES_HAVE_PHYSICAL_IO == 1) buffer_json_add_array_item_string(wb, "PReads"); +#endif } buffer_json_array_close(wb); } @@ -722,13 +918,19 @@ void function_processes(const char *transaction, char *function, buffer_json_member_add_string(wb, "type", "stacked-bar"); buffer_json_member_add_array(wb, "columns"); { +#if (PROCESSES_HAVE_LOGICAL_IO == 1) buffer_json_add_array_item_string(wb, "LWrites"); +#endif +#if (PROCESSES_HAVE_PHYSICAL_IO == 1) buffer_json_add_array_item_string(wb, "PWrites"); +#endif } buffer_json_array_close(wb); } buffer_json_object_close(wb); +#endif +#if (PROCESSES_HAVE_LOGICAL_IO == 1) // Logical I/O chart buffer_json_member_add_object(wb, "LogicalIO"); { @@ -744,6 +946,7 @@ void function_processes(const char *transaction, char *function, buffer_json_object_close(wb); #endif +#if (PROCESSES_HAVE_PHYSICAL_IO == 1) // Physical I/O chart buffer_json_member_add_object(wb, "PhysicalIO"); { @@ -757,7 +960,9 @@ void function_processes(const char *transaction, char *function, buffer_json_array_close(wb); } buffer_json_object_close(wb); +#endif +#if (PROCESSES_HAVE_IO_CALLS == 1) // I/O Calls chart buffer_json_member_add_object(wb, "IOCalls"); { @@ -765,12 +970,13 @@ void function_processes(const char *transaction, char *function, buffer_json_member_add_string(wb, "type", "stacked-bar"); buffer_json_member_add_array(wb, "columns"); { - buffer_json_add_array_item_string(wb, "RCalls"); + buffer_json_add_array_item_string(wb, "ROps"); buffer_json_add_array_item_string(wb, "WCalls"); } buffer_json_array_close(wb); } buffer_json_object_close(wb); +#endif // Minor Page Faults chart buffer_json_member_add_object(wb, "MinFlt"); @@ -890,6 +1096,7 @@ void function_processes(const char *transaction, char *function, } buffer_json_object_close(wb); +#if (PROCESSES_HAVE_UID == 1) || (PROCESSES_HAVE_SID == 1) // group by User buffer_json_member_add_object(wb, "User"); { @@ -902,7 +1109,9 @@ void function_processes(const char *transaction, char *function, buffer_json_array_close(wb); } buffer_json_object_close(wb); +#endif +#if (PROCESSES_HAVE_GID == 1) // group by Group buffer_json_member_add_object(wb, "Group"); { @@ -915,14 +1124,20 @@ void function_processes(const char *transaction, char *function, buffer_json_array_close(wb); } buffer_json_object_close(wb); +#endif } buffer_json_object_close(wb); // group_by + netdata_mutex_unlock(&apps_and_stdout_mutex); + close_and_send: buffer_json_member_add_time_t(wb, "expires", now_s + update_every); buffer_json_finalize(wb); - pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", now_s + update_every, wb); + wb->response_code = HTTP_RESP_OK; + wb->content_type = CT_APPLICATION_JSON; + wb->expires = now_s + update_every; + pluginsd_function_result_to_stdout(transaction, wb); buffer_free(wb); } diff --git a/src/collectors/apps.plugin/apps_groups.conf b/src/collectors/apps.plugin/apps_groups.conf index 724616c18..8d4b1722c 100644 --- a/src/collectors/apps.plugin/apps_groups.conf +++ b/src/collectors/apps.plugin/apps_groups.conf @@ -1,438 +1,247 @@ -# -# apps.plugin process grouping -# -# The apps.plugin displays charts with information about the processes running. -# This config allows grouping processes together, so that several processes -# will be reported as one. -# -# Only groups in this file are reported. All other processes will be reported -# as 'other'. -# -# For each process given, its whole process tree will be grouped, not just -# the process matched. The plugin will include both parents and childs. -# -# The format is: -# -# group: process1 process2 process3 ... -# -# Each group can be given multiple times, to add more processes to it. -# -# The process names are the ones returned by: -# -# - ps -e or /proc/PID/stat -# - in case of substring mode (see below): /proc/PID/cmdline -# -# To add process names with spaces, enclose them in quotes (single or double) -# example: 'Plex Media Serv' "my other process". -# -# Note that spaces are not supported for process groups. Use a dash "-" instead. -# example-process-group: process1 process2 -# -# Wildcard support: -# You can add an asterisk (*) at the beginning and/or the end of a process: -# -# *name suffix mode: will search for processes ending with 'name' -# (/proc/PID/stat) -# -# name* prefix mode: will search for processes beginning with 'name' -# (/proc/PID/stat) -# -# *name* substring mode: will search for 'name' in the whole command line -# (/proc/PID/cmdline) -# -# If you enter even just one *name* (substring), apps.plugin will process -# /proc/PID/cmdline for all processes, just once (when they are first seen). -# -# To add processes with single quotes, enclose them in double quotes -# example: "process with this ' single quote" -# -# To add processes with double quotes, enclose them in single quotes: -# example: 'process with this " double quote' -# -# If a group or process name starts with a -, the dimension will be hidden -# (cpu chart only). -# -# If a process starts with a +, debugging will be enabled for it -# (debugging produces a lot of output - do not enable it in production systems) -# -# You can add any number of groups you like. Only the ones found running will -# affect the charts generated. However, producing charts with hundreds of -# dimensions may slow down your web browser. -# -# The order of the entries in this list is important: the first that matches -# a process is used, so put important ones at the top. Processes not matched -# by any row, will inherit it from their parents or children. -# -# The order also controls the order of the dimensions on the generated charts -# (although applications started after apps.plugin is started, will be appended -# to the existing list of dimensions the netdata daemon maintains). - -# ----------------------------------------------------------------------------- -# NETDATA processes accounting - -# netdata main process -netdata: netdata - -# netdata known plugins -# plugins not defined here will be accumulated in netdata, above -apps.plugin: apps.plugin -freeipmi.plugin: freeipmi.plugin -nfacct.plugin: nfacct.plugin -cups.plugin: cups.plugin -xenstat.plugin: xenstat.plugin -perf.plugin: perf.plugin -charts.d.plugin: *charts.d.plugin* -python.d.plugin: *python.d.plugin* -systemd-journal.plugin: *systemd-journal.plugin* -network-viewer.plugin: *network-viewer.plugin* -tc-qos-helper: *tc-qos-helper.sh* -fping: fping -ioping: ioping -go.d.plugin: *go.d.plugin* -slabinfo.plugin: *slabinfo.plugin* -ebpf.plugin: *ebpf.plugin* -debugfs.plugin: *debugfs.plugin* - -# agent-service-discovery -agent_sd: agent_sd +## +## apps.plugin process grouping +## +## Documentation at: +## https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/README.md +## +## ----------------------------------------------------------------------------- +## Subprocesses of process managers are monitored individually. +## (uncomment to add or edit - the default is also hardcoded into the plugin) -# ----------------------------------------------------------------------------- -# authentication/authorization related servers +## Clear all the managers, to set yours, otherwise append to the internal list. +#managers: clear -auth: radius* openldap* ldap* slapd authelia sssd saslauthd polkitd gssproxy -fail2ban: fail2ban* +## Linux process managers +#managers: init systemd containerd-shim-runc-v2 dumb-init gnome-shell docker-init tini +#managers: spawn-plugins openrc-run.sh crond plasmashell xfwm4 -# ----------------------------------------------------------------------------- -# web/ftp servers +## FreeBSD process managers +#managers: init spawn-plugins -httpd: apache* httpd nginx* lighttpd hiawatha caddy h2o -proxy: squid* c-icap squidGuard varnish* -php: php* lsphp* -ftpd: proftpd in.tftpd vsftpd -uwsgi: uwsgi -unicorn: *unicorn* -puma: *puma* +## MacOS process managers +#managers: launchd spawn-plugins -# ----------------------------------------------------------------------------- -# database servers +## Windows process managers +#managers: wininit services explorer System netdata -sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* sqlservr -nosql: mongod redis* valkey* memcached *couchdb* -timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain* influxd* +## ----------------------------------------------------------------------------- +## Interpreters to search for the actual command name in command line. +## (uncomment to add or edit - the default is also hardcoded into the plugin) -clickhouse: clickhouse-serv* clickhouse-cli* clckhouse-watch +## Clear all the interpreters, to set yours, otherwise append to the internal list. +#interpreters: clear -# ----------------------------------------------------------------------------- -# email servers +#interpreters: python python2 python3 +#interpreters: sh bash zsh +#interpreters: node perl awk -mta: amavis* zmstat-* zmdiaglog zmmailboxdmgr opendkim postfwd2 smtp* lmtp* sendmail postfix master pickup qmgr showq tlsmgr postscreen oqmgr msmtp* nullmailer* -mda: dovecot *imapd *pop3d *popd +## ----------------------------------------------------------------------------- +## Processes of interest +## Grouping and/or rename individual processes. +## (there is no internal default for this section) -# ----------------------------------------------------------------------------- -# network, routing, VPN +## NETDATA processes accounting +netdata: netdata -ppp: ppp* -vpn: openvpn pptp* cjdroute gvpe tincd wireguard tailscaled -wifi: hostapd wpa_supplicant -routing: ospfd* ospf6d* bgpd bfdd fabricd isisd eigrpd sharpd staticd ripd ripngd pimd pbrd nhrpd ldpd zebra vrrpd vtysh bird* -modem: ModemManager -netmanager: NetworkManager nm* systemd-networkd networkctl netplan connmand wicked* avahi-autoipd networkd-dispatcher -firewall: firewalld ufw nft -tor: tor -bluetooth: bluetooth bluetoothd bluez bluedevil obexd +## NETDATA agent-service-discovery (kubernetes) +agent_sd: agent_sd -# ----------------------------------------------------------------------------- -# high availability and balancers +## ----------------------------------------------------------------------------- +oracledb: ora_* oracle_* *runOracle.sh* +unicorn: *unicorn* +puma: *puma* +couchdb: *couchdb* +graphite: *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* +opentsdb: *net.opentsdb.tools.TSDMain* +imapd: *imapd +pop3d: *pop3d +popd: *popd camo: *camo* -balancer: ipvs_* haproxy -ha: corosync hs_logd ha_logd stonithd pacemakerd lrmd crmd keepalived ucarp* - -# ----------------------------------------------------------------------------- -# telephony - -pbx: asterisk safe_asterisk *vicidial* -sip: opensips* stund - -# ----------------------------------------------------------------------------- -# chat - -chat: irssi *vines* *prosody* murmurd - -# ----------------------------------------------------------------------------- -# monitoring - -logs: ulogd* syslog* rsyslog* logrotate *systemd-journal* rotatelogs sysklogd metalog -nms: snmpd vnstatd smokeping zabbix* munin* mon openhpid tailon nrpe -monit: monit -splunk: splunkd +vicidial: *vicidial* +vines: *vines* +prosody: *prosody* azure: mdsd *waagent* *omiserver* *omiagent* hv_kvp_daemon hv_vss_daemon *auoms* *omsagent* datadog: *datadog* -edgedelta: edgedelta newrelic: newrelic* google-agent: *google_guest_agent* *google_osconfig_agent* -nvidia-smi: nvidia-smi -intel_gpu_top: intel_gpu_top -htop: htop -watchdog: watchdog -telegraf: telegraf -grafana: grafana* - -# ----------------------------------------------------------------------------- -# storage, file systems and file servers - -ceph: ceph-* ceph_* radosgw* rbd-* cephfs-* osdmaptool crushtool -samba: smbd nmbd winbindd ctdbd ctdb-* ctdb_* -nfs: rpcbind rpc.* nfs* -zfs: spl_* z_* txg_* zil_* arc_* l2arc* -btrfs: btrfs* -iscsi: iscsid iscsi_eh -afp: netatalk afpd cnid_dbd cnid_metad -ntfs-3g: ntfs-3g - -# ----------------------------------------------------------------------------- -# kubernetes - -kubelet: kubelet -kube-dns: kube-dns -kube-proxy: kube-proxy -metrics-server: metrics-server -heapster: heapster - -# ----------------------------------------------------------------------------- -# AWS - aws-s3: '*aws s3*' s3cmd s5cmd -aws: aws - -# ----------------------------------------------------------------------------- -# virtualization platform - proxmox-ve: pve* spiceproxy - -# ----------------------------------------------------------------------------- -# containers & virtual machines - -containers: lxc* docker* balena* containerd -VMs: vbox* VBox* qemu* kvm* libvirt: virtlogd virtqemud virtstoraged virtnetworkd virtlockd virtinterfaced libvirt: virtnodedevd virtproxyd virtsecretd libvirtd guest-agent: qemu-ga spice-vdagent cloud-init* - -# ----------------------------------------------------------------------------- -# ssh servers and clients - -ssh: ssh* scp sftp* dropbear - -# ----------------------------------------------------------------------------- -# print servers and clients - -print: cups* lpd lpq - -# ----------------------------------------------------------------------------- -# time servers and clients - -time: ntp* systemd-timesyn* chronyd ptp* - -# ----------------------------------------------------------------------------- -# dhcp servers and clients - -dhcp: *dhcp* dhclient - -# ----------------------------------------------------------------------------- -# name servers and clients - -dns: named unbound nsd pdns_server knotd gdnsd yadifad dnsmasq *systemd-resolve* pihole* avahi-daemon avahi-dnsconfd -dnsdist: dnsdist - -# ----------------------------------------------------------------------------- -# installation / compilation / debugging +dhcp: dhcp* dhclient build: cc1 cc1plus as gcc* cppcheck ld make cmake automake autoconf autoreconf build: cargo rustc bazel buck git gdb valgrind* rpmbuild dpkg-buildpackage - -# ----------------------------------------------------------------------------- -# package management - -packagemanager: apt* dpkg* dselect dnf yum rpm zypp* yast* pacman xbps* swupd* emerge* -packagemanager: packagekitd pkgin pkg apk snapd slackpkg slapt-get - -# ----------------------------------------------------------------------------- -# antivirus - -antivirus: clam* *clam imunify360* - -# ----------------------------------------------------------------------------- -# torrent clients - -torrents: *deluge* transmission* *SickBeard* *CouchPotato* *rtorrent* - -# ----------------------------------------------------------------------------- -# backup servers and clients - +packagemanager: apt* dpkg* dselect dnf yum rpm zypp* yast* pacman xbps* swupd* +packagemanager: packagekitd pkgin pkg apk snapd slackpkg slapt-get emerge* +clam: clam* *clam backup: rsync lsyncd bacula* borg rclone - -# ----------------------------------------------------------------------------- -# cron - cron: cron* atd anacron *systemd-cron* incrond - -# ----------------------------------------------------------------------------- -# UPS - ups: upsmon upsd */nut/* apcupsd -# ----------------------------------------------------------------------------- -# media players, servers, clients - -media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd -media: mpd minidlnad mt-daapd Plex* jellyfin squeeze* jackett Ombi -media: strawberry* clementine* +rabbitmq: *rabbitmq* +sidekiq: *sidekiq* +vernemq: *beam.smp*vernemq* *start_vernemq* *run_erl*vernemq* *vernemq*epmd* +erlang: beam.smp +postfix: *postfix* -audio: pulse* pipewire wireplumber jack* +dagster: *dagster* -# ----------------------------------------------------------------------------- -# java applications +## ----------------------------------------------------------------------------- +## java applications hdfsdatanode: *org.apache.hadoop.hdfs.server.datanode.DataNode* hdfsnamenode: *org.apache.hadoop.hdfs.server.namenode.NameNode* hdfsjournalnode: *org.apache.hadoop.hdfs.qjournal.server.JournalNode* hdfszkfc: *org.apache.hadoop.hdfs.tools.DFSZKFailoverController* - yarnnode: *org.apache.hadoop.yarn.server.nodemanager.NodeManager* yarnmgr: *org.apache.hadoop.yarn.server.resourcemanager.ResourceManager* yarnproxy: *org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer* - sparkworker: *org.apache.spark.deploy.worker.Worker* sparkmaster: *org.apache.spark.deploy.master.Master* - hbaseregion: *org.apache.hadoop.hbase.regionserver.HRegionServer* hbaserest: *org.apache.hadoop.hbase.rest.RESTServer* hbasethrift: *org.apache.hadoop.hbase.thrift.ThriftServer* hbasemaster: *org.apache.hadoop.hbase.master.HMaster* - zookeeper: *org.apache.zookeeper.server.quorum.QuorumPeerMain* - hive2: *org.apache.hive.service.server.HiveServer2* hivemetastore: *org.apache.hadoop.hive.metastore.HiveMetaStore* - solr: *solr.install.dir* - airflow: *airflow* +kafka: *kafka.Kafka* -# ----------------------------------------------------------------------------- -# GUI - -X: X Xorg xinit xdm Xwayland xsettingsd touchegg -wayland: swaylock swayidle waypipe wayvnc -kde: *kdeinit* kdm sddm plasmashell startplasma-* kwin* kwallet* krunner kactivitymanager* -gnome: gnome-* gdm gconf* mutter -mate: mate-* msd-* marco* -cinnamon: cinnamon* muffin -xfce: xfwm4 xfdesktop xfce* Thunar xfsettingsd xfconf* -lxde: lxde* startlxde lxdm lxappearance* lxlauncher* lxpanel* lxsession* lxsettings* -lxqt: lxqt* startlxqt -enlightenment: entrance enlightenment* -i3: i3* -awesome: awesome awesome-client -dwm: dwm.* -sway: sway -weston: weston -cage: cage -wayfire: wayfire -gui: lightdm colord seatd greetd gkrellm slim qingy dconf* *gvfs gvfs* -gui: '*systemd --user*' xdg-* at-spi-* - -webbrowser: *chrome-sandbox* *google-chrome* *chromium* *firefox* vivaldi* opera* epiphany chrome* -webbrowser: lynx elinks w3m w3mmee links -mua: evolution-* thunderbird* mutt neomutt pine mailx alpine - -# ----------------------------------------------------------------------------- -# Kernel / System +## ----------------------------------------------------------------------------- +## Kernel / System +## The following are interesting kernel threads and related processes to +## monitor individually, mainly for their CPU utilization. +## These kernel threads switch tasks all the time, so they should never be +## categorized as anything specific. +kernel: kworker/* + +## Kernel Samepage Merging (KSM) daemon that looks for identical memory pages +## across processes and merges them to save memory. ksmd: ksmd -khugepaged: khugepaged + +## Handles migration of processes between CPU cores to balance load. +kmigration: migration/* + +## Manages memory compaction, moving memory pages around to reduce +## fragmentation. +kcompactd: kcompactd* + +## Responsible for freeing up memory by swapping pages to disk when needed. +kswapd: kswapd* + +## DAMON is a mechanism designed to efficiently monitor the memory access +## patterns of running processes or the system itself. kdamond: kdamond -kswapd: kswapd -zswap: zswap -kcompactd: kcompactd -system: systemd* udisks* udevd* *udevd ipv6_addrconf dbus-* rtkit* -system: mdadm acpid uuidd upowerd elogind* eudev mdev lvmpolld dmeventd -system: accounts-daemon rngd haveged rasdaemon irqbalance start-stop-daemon -system: supervise-daemon openrc* init runit runsvdir runsv auditd lsmd -system: abrt* nscd rtkit-daemon gpg-agent usbguard* boltd geoclue +## Manages ballooning in virtualized environments. +vballoon: vballoon* -kernel: kworker kthreadd kauditd lockd khelper kdevtmpfs khungtaskd rpciod -kernel: fsnotify_mark kthrotld deferwq scsi_* kdmflush oom_reaper kdevtempfs -kernel: ksoftirqd +## virtio - Handles or I/O (storage and network) on virtual machines. +kvirtio: virtio-* vhost-* -# ----------------------------------------------------------------------------- -# inetd +## Layer 4 (transport layer) load balancing +ipvs: ipvsd ipvs_* ip_vs_* -inetd: inetd xinetd +## Hugepages +## Scans memory regions and tries to promote regular-sized pages (4KB) into +## hugepages (2MB) where possible. Merge smaller contiguous 4KB pages into 2MB +## pages. Hugepages also use: kswapd, kcompactd, and migration. +khugepaged: khugepaged -# ----------------------------------------------------------------------------- -# other application servers +## Note about zswap: +## zswap does not introduce its own dedicated kernel threads. Instead, it +## operates within the existing memory management and swapping framework of the +## kernel: +## - kswapd: swaps pages in/out of memory, using compression in the process. +## - kcompactd: compacts memory when pages are compressed or moved around. -i2pd: i2pd +## ----------------------------------------------------------------------------- +## Block Devices -rethinkdb: rethinkdb +## Handles deferred block I/O operations for block devices. +kblockd: kblockd -beanstalkd: beanstalkd +## Device Mapper (DM) +device-mapper: kcopyd/* kcryptd/* kdmflush/* dm_bufio_cache +device-mapper: raid1/* raid5/* raid10/* multipathd bioset/* -rspamd: rspamd +## Software RAID (MD) +md-raid: md*_raid* md*_resync md*_reshape md*_recovery md_thread +md-raid: flush_md* raid*_sync -consul: consul +## iSCSI +iscsi: iscsid iscsiadm iscsi_eh/* iscsi_xmit/* iscsi_ttx/* iscsi_rx/* iscsi_trx/* -kafka: *kafka.Kafka* +## SCSI +scsi: scsi_eh/* scsi_tmf/* scsi_wq/* -rabbitmq: *rabbitmq* +## BCACHE +bcache: bcache* bch_btree_io bch_journal -sidekiq: *sidekiq* -java: java -ipfs: ipfs -erlang: beam.smp +## SAS +sas: sas_task/* mpt* -node: node -factorio: factorio +## Fibre Channel (FC) +fc: fc_transport qla2xxx* -p4: p4* +## loop devices +loop: loop* flush-loop* -git-services: gitea gitlab-runner +## ----------------------------------------------------------------------------- +## Filesystems -freeswitch: freeswitch* +## Ext4 +ext4: ext4-* jbd2/* -# -------- web3 / blockchains ---------- +## XFS +xfs: xfs* -go-ethereum: geth* -nethermind-ethereum: nethermind* -besu-ethereum: besu* -openEthereum: openethereum* -urbit: urbit* -bitcoin-node: *bitcoind* lnd* -filecoin: lotus* lotus-miner* lotus-worker* -solana: solana* -web3: *hardhat* *ganache* *truffle* *brownie* *waffle* -terra: terra* mantle* +## BTRFS +btrfs: btrfs* -# ----------------------------------------------------------------------------- -# chaos engineering tools +## NFS +nfs: rpcbind rpc.* nfs* rpciod -stress: stress stress-ng* -gremlin: gremlin* +## ZFS +zfs: spl_* z_* txg_* zil_* arc_* l2arc* zfs* zed zdb zpool* + +## CEPH +ceph: ceph-* ceph_* radosgw* rbd-* cephfs-* +ceph: ceph cephadm osdmaptool crushtool rados rbd + +## CIFS & Samba +cifs: smbd nmbd winbindd ctdbd ctdb-* ctdb_* +cifs: cifsd cifscreds cifs.upcall + +## Apple Filling Protocol (AFP) +afp: netatalk afpd cnid_dbd cnid_metad -# ----------------------------------------------------------------------------- -# load testing tools +## ----------------------------------------------------------------------------- +## Desktops -locust: locust +systemd-journald: *systemd-journal* +systemd: systemd systemd-* -# ----------------------------------------------------------------------------- -# data science and machine learning tools +## GNOME +desktop: gnome-* gsd-* gjs goa-* gcr-* gvfs-* *xdg-*-gnome* passimd gvfsd* +desktop: at-spi-* at-spi2-* dconf-service gcr-* -jupyter: jupyter* +## KDE +desktop: plasmashell kwin-* kde* *-kde-* klauncher kactivitymanagerd krunner +desktop: kdeconnectd ksmserver kglobalaccel5 plasma-* *org.kde.* +desktop: sddm* kwalletd5 knotify5 kmix kscreen kwayland-* -# ----------------------------------------------------------------------------- -# File synchronization tools +## XFCE4 +desktop: xfce4-* xfwm4 xfdesktop xfce4-panel xfsettingsd xfconfd +desktop: lightdm lightdm-* -filesync: dropbox syncthing +## Generic tools related to desktop +desktop: gdm gdm-* dbus-* xdg-* ibus-* evolution-* accounts-daemon colord +desktop: geoclue pulse* pipewire* wireplumber jack* touchegg pulseaudio +desktop: Xwayland Xorg diff --git a/src/collectors/apps.plugin/apps_incremental_collection.c b/src/collectors/apps.plugin/apps_incremental_collection.c new file mode 100644 index 000000000..e2f0e3ab7 --- /dev/null +++ b/src/collectors/apps.plugin/apps_incremental_collection.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "apps_plugin.h" + +#if (INCREMENTAL_DATA_COLLECTION == 1) +bool managed_log(struct pid_stat *p, PID_LOG log, bool status) { + if(unlikely(!status)) { + // netdata_log_error("command failed log %u, errno %d", log, errno); + + if(unlikely(debug_enabled || errno != ENOENT)) { + if(unlikely(debug_enabled || !(p->log_thrown & log))) { + p->log_thrown |= log; + switch(log) { + case PID_LOG_IO: +#if !defined(OS_LINUX) + netdata_log_error("Cannot fetch process %d I/O info (command '%s')", p->pid, pid_stat_comm(p)); +#else + netdata_log_error("Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid, pid_stat_comm(p)); +#endif + break; + + case PID_LOG_STATUS: +#if !defined(OS_LINUX) + netdata_log_error("Cannot fetch process %d status info (command '%s')", p->pid, pid_stat_comm(p)); +#else + netdata_log_error("Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid, pid_stat_comm(p)); +#endif + break; + + case PID_LOG_CMDLINE: +#if !defined(OS_LINUX) + netdata_log_error("Cannot fetch process %d command line (command '%s')", p->pid, pid_stat_comm(p)); +#else + netdata_log_error("Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid, pid_stat_comm(p)); +#endif + break; + + case PID_LOG_FDS: +#if !defined(OS_LINUX) + netdata_log_error("Cannot fetch process %d files (command '%s')", p->pid, pid_stat_comm(p)); +#else + netdata_log_error("Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix, p->pid, pid_stat_comm(p)); +#endif + break; + + case PID_LOG_LIMITS: +#if !defined(OS_LINUX) + ; +#else + netdata_log_error("Cannot process %s/proc/%d/limits (command '%s')", netdata_configured_host_prefix, p->pid, pid_stat_comm(p)); +#endif + + case PID_LOG_STAT: + break; + + default: + netdata_log_error("unhandled error for pid %d, command '%s'", p->pid, pid_stat_comm(p)); + break; + } + } + } + errno_clear(); + } + else if(unlikely(p->log_thrown & log)) { + // netdata_log_error("unsetting log %u on pid %d", log, p->pid); + p->log_thrown &= ~log; + } + + return status; +} + +static inline bool incrementally_read_pid_stat(struct pid_stat *p, void *ptr) { + p->last_stat_collected_usec = p->stat_collected_usec; + p->stat_collected_usec = now_monotonic_usec(); + calls_counter++; + + if(!OS_FUNCTION(apps_os_read_pid_stat)(p, ptr)) + return 0; + + return 1; +} + +static inline int incrementally_read_pid_io(struct pid_stat *p, void *ptr) { + p->last_io_collected_usec = p->io_collected_usec; + p->io_collected_usec = now_monotonic_usec(); + calls_counter++; + + bool ret = OS_FUNCTION(apps_os_read_pid_io)(p, ptr); + + return ret ? 1 : 0; +} + +// -------------------------------------------------------------------------------------------------------------------- + +int incrementally_collect_data_for_pid_stat(struct pid_stat *p, void *ptr) { + if(unlikely(p->read)) return 0; + + pid_collection_started(p); + + // -------------------------------------------------------------------- + // /proc//stat + + if(unlikely(!managed_log(p, PID_LOG_STAT, incrementally_read_pid_stat(p, ptr)))) { + // there is no reason to proceed if we cannot get its status + pid_collection_failed(p); + return 0; + } + + // check its parent pid + if(unlikely(p->ppid < INIT_PID)) + p->ppid = 0; + + // -------------------------------------------------------------------- + // /proc//io + + managed_log(p, PID_LOG_IO, incrementally_read_pid_io(p, ptr)); + + // -------------------------------------------------------------------- + // /proc//status + + if(unlikely(!managed_log(p, PID_LOG_STATUS, OS_FUNCTION(apps_os_read_pid_status)(p, ptr)))) { + // there is no reason to proceed if we cannot get its status + pid_collection_failed(p); + return 0; + } + + // -------------------------------------------------------------------- + // /proc//fd + +#if (PROCESSES_HAVE_FDS == 1) + if(enable_file_charts) { + managed_log(p, PID_LOG_FDS, read_pid_file_descriptors(p, ptr)); +#if (PROCESSES_HAVE_PID_LIMITS == 1) + managed_log(p, PID_LOG_LIMITS, OS_FUNCTION(apps_os_read_pid_limits)(p, ptr)); +#endif + } +#endif + + // -------------------------------------------------------------------- + // done! + +#if defined(NETDATA_INTERNAL_CHECKS) && (ALL_PIDS_ARE_READ_INSTANTLY == 0) + struct pid_stat *pp = p->parent; + if(unlikely(include_exited_childs && pp && !pp->read)) + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "Read process %d (%s) sortlisted %"PRIu32", but its parent %d (%s) sortlisted %"PRIu32", is not read", + p->pid, pid_stat_comm(p), p->sortlist, pp->pid, pid_stat_comm(pp), pp->sortlist); +#endif + + pid_collection_completed(p); + + return 1; +} + +int incrementally_collect_data_for_pid(pid_t pid, void *ptr) { + if(unlikely(pid < INIT_PID)) { + netdata_log_error("Invalid pid %d read (expected >= %d). Ignoring process.", pid, INIT_PID); + return 0; + } + + struct pid_stat *p = get_or_allocate_pid_entry(pid); + if(unlikely(!p)) return 0; + + return incrementally_collect_data_for_pid_stat(p, ptr); +} +#endif + +// -------------------------------------------------------------------------------------------------------------------- + +#if (PROCESSES_HAVE_CMDLINE == 1) +int read_proc_pid_cmdline(struct pid_stat *p) { + static char cmdline[MAX_CMDLINE]; + + if(unlikely(!OS_FUNCTION(apps_os_get_pid_cmdline)(p, cmdline, sizeof(cmdline)))) + goto cleanup; + + update_pid_cmdline(p, cmdline); + + return 1; + +cleanup: + // copy the command to the command line + string_freez(p->cmdline); + p->cmdline = NULL; + return 0; +} +#endif diff --git a/src/collectors/apps.plugin/apps_os_freebsd.c b/src/collectors/apps.plugin/apps_os_freebsd.c new file mode 100644 index 000000000..1877410d6 --- /dev/null +++ b/src/collectors/apps.plugin/apps_os_freebsd.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "apps_plugin.h" + +#if defined(OS_FREEBSD) + +usec_t system_current_time_ut; +long global_block_size = 512; + +static long get_fs_block_size(void) { + struct statvfs vfs; + static long block_size = 0; + + if (block_size == 0) { + if (statvfs("/", &vfs) == 0) { + block_size = vfs.f_frsize ? vfs.f_frsize : vfs.f_bsize; + } else { + // If statvfs fails, fall back to the typical block size + block_size = 512; + } + } + + return block_size; +} + +void apps_os_init_freebsd(void) { + global_block_size = get_fs_block_size(); +} + +static inline void get_current_time(void) { + struct timeval current_time; + gettimeofday(¤t_time, NULL); + system_current_time_ut = timeval_usec(¤t_time); +} + +uint64_t apps_os_get_total_memory_freebsd(void) { + uint64_t ret = 0; + + int mib[2] = {CTL_HW, HW_PHYSMEM}; + size_t size = sizeof(ret); + if (sysctl(mib, 2, &ret, &size, NULL, 0) == -1) { + netdata_log_error("Failed to get total memory using sysctl"); + return 0; + } + + return ret; +} + +bool apps_os_read_pid_fds_freebsd(struct pid_stat *p, void *ptr) { + int mib[4]; + size_t size; + struct kinfo_file *fds; + static char *fdsbuf; + char *bfdsbuf, *efdsbuf; + char fdsname[FILENAME_MAX + 1]; +#define SHM_FORMAT_LEN 31 // format: 21 + size: 10 + char shm_name[FILENAME_MAX - SHM_FORMAT_LEN + 1]; + + // we make all pid fds negative, so that + // we can detect unused file descriptors + // at the end, to free them + make_all_pid_fds_negative(p); + + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_FILEDESC; + mib[3] = p->pid; + + if (unlikely(sysctl(mib, 4, NULL, &size, NULL, 0))) { + netdata_log_error("sysctl error: Can't get file descriptors data size for pid %d", p->pid); + return false; + } + if (likely(size > 0)) + fdsbuf = reallocz(fdsbuf, size); + if (unlikely(sysctl(mib, 4, fdsbuf, &size, NULL, 0))) { + netdata_log_error("sysctl error: Can't get file descriptors data for pid %d", p->pid); + return false; + } + + bfdsbuf = fdsbuf; + efdsbuf = fdsbuf + size; + while (bfdsbuf < efdsbuf) { + fds = (struct kinfo_file *)(uintptr_t)bfdsbuf; + if (unlikely(fds->kf_structsize == 0)) + break; + + // do not process file descriptors for current working directory, root directory, + // jail directory, ktrace vnode, text vnode and controlling terminal + if (unlikely(fds->kf_fd < 0)) { + bfdsbuf += fds->kf_structsize; + continue; + } + + // get file descriptors array index + size_t fdid = fds->kf_fd; + + // check if the fds array is small + if (unlikely(fdid >= p->fds_size)) { + // it is small, extend it + + uint32_t new_size = fds_new_size(p->fds_size, fdid); + + debug_log("extending fd memory slots for %s from %u to %u", + pid_stat_comm(p), p->fds_size, new_size); + + p->fds = reallocz(p->fds, new_size * sizeof(struct pid_fd)); + + // and initialize it + init_pid_fds(p, p->fds_size, new_size - p->fds_size); + p->fds_size = new_size; + } + + if (unlikely(p->fds[fdid].fd == 0)) { + // we don't know this fd, get it + + switch (fds->kf_type) { + case KF_TYPE_FIFO: + case KF_TYPE_VNODE: + if (unlikely(!fds->kf_path[0])) { + sprintf(fdsname, "other: inode: %lu", fds->kf_un.kf_file.kf_file_fileid); + break; + } + sprintf(fdsname, "%s", fds->kf_path); + break; + case KF_TYPE_SOCKET: + switch (fds->kf_sock_domain) { + case AF_INET: + case AF_INET6: +#if __FreeBSD_version < 1400074 + if (fds->kf_sock_protocol == IPPROTO_TCP) + sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_inpcb); + else +#endif + sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_pcb); + break; + case AF_UNIX: + /* print address of pcb and connected pcb */ + sprintf(fdsname, "socket: %lx %lx", fds->kf_un.kf_sock.kf_sock_pcb, fds->kf_un.kf_sock.kf_sock_unpconn); + break; + default: + /* print protocol number and socket address */ +#if __FreeBSD_version < 1200031 + sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_sa_local.__ss_pad1, fds->kf_sa_local.__ss_pad2); +#else + sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sa_local.__ss_pad1, fds->kf_un.kf_sock.kf_sa_local.__ss_pad2); +#endif + } + break; + case KF_TYPE_PIPE: + sprintf(fdsname, "pipe: %lu %lu", fds->kf_un.kf_pipe.kf_pipe_addr, fds->kf_un.kf_pipe.kf_pipe_peer); + break; + case KF_TYPE_PTS: +#if __FreeBSD_version < 1200031 + sprintf(fdsname, "other: pts: %u", fds->kf_un.kf_pts.kf_pts_dev); +#else + sprintf(fdsname, "other: pts: %lu", fds->kf_un.kf_pts.kf_pts_dev); +#endif + break; + case KF_TYPE_SHM: + strncpyz(shm_name, fds->kf_path, FILENAME_MAX - SHM_FORMAT_LEN); + sprintf(fdsname, "other: shm: %s size: %lu", shm_name, fds->kf_un.kf_file.kf_file_size); + break; + case KF_TYPE_SEM: + sprintf(fdsname, "other: sem: %u", fds->kf_un.kf_sem.kf_sem_value); + break; + default: + sprintf(fdsname, "other: pid: %d fd: %d", fds->kf_un.kf_proc.kf_pid, fds->kf_fd); + } + + // if another process already has this, we will get + // the same id + p->fds[fdid].fd = file_descriptor_find_or_add(fdsname, 0); + } + + // else make it positive again, we need it + // of course, the actual file may have changed + + else + p->fds[fdid].fd = -p->fds[fdid].fd; + + bfdsbuf += fds->kf_structsize; + } + + return true; +} + +bool apps_os_get_pid_cmdline_freebsd(struct pid_stat *p, char *cmdline, size_t bytes) { + size_t i, b = bytes - 1; + int mib[4]; + + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_ARGS; + mib[3] = p->pid; + if (unlikely(sysctl(mib, 4, cmdline, &b, NULL, 0))) + return false; + + cmdline[b] = '\0'; + for(i = 0; i < b ; i++) + if(unlikely(!cmdline[i])) cmdline[i] = ' '; + + return true; +} + +bool apps_os_read_pid_io_freebsd(struct pid_stat *p, void *ptr) { + struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr; + + pid_incremental_rate(io, PDF_LREAD, proc_info->ki_rusage.ru_inblock * global_block_size); + pid_incremental_rate(io, PDF_LWRITE, proc_info->ki_rusage.ru_oublock * global_block_size); + + return true; +} + +bool apps_os_read_pid_limits_freebsd(struct pid_stat *p __maybe_unused, void *ptr __maybe_unused) { + return false; +} + +bool apps_os_read_pid_status_freebsd(struct pid_stat *p, void *ptr) { + struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr; + + p->uid = proc_info->ki_uid; + p->gid = proc_info->ki_groups[0]; + p->values[PDF_VMSIZE] = proc_info->ki_size; + p->values[PDF_VMRSS] = proc_info->ki_rssize * pagesize; + // TODO: what about shared and swap memory on FreeBSD? + return true; +} + +//bool apps_os_read_global_cpu_utilization_freebsd(void) { +// static kernel_uint_t utime_raw = 0, stime_raw = 0, ntime_raw = 0; +// static usec_t collected_usec = 0, last_collected_usec = 0; +// long cp_time[CPUSTATES]; +// +// if (unlikely(CPUSTATES != 5)) { +// goto cleanup; +// } else { +// static int mib[2] = {0, 0}; +// +// if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) { +// goto cleanup; +// } +// } +// +// last_collected_usec = collected_usec; +// collected_usec = now_monotonic_usec(); +// +// calls_counter++; +// +// // temporary - it is added global_ntime; +// kernel_uint_t global_ntime = 0; +// +// incremental_rate(global_utime, utime_raw, cp_time[0], collected_usec, last_collected_usec, (NSEC_PER_SEC / system_hz)); +// incremental_rate(global_ntime, ntime_raw, cp_time[1], collected_usec, last_collected_usec, (NSEC_PER_SEC / system_hz)); +// incremental_rate(global_stime, stime_raw, cp_time[2], collected_usec, last_collected_usec, (NSEC_PER_SEC / system_hz)); +// +// global_utime += global_ntime; +// +// if(unlikely(global_iterations_counter == 1)) { +// global_utime = 0; +// global_stime = 0; +// global_gtime = 0; +// } +// +// return 1; +// +//cleanup: +// global_utime = 0; +// global_stime = 0; +// global_gtime = 0; +// return 0; +//} + +bool apps_os_read_pid_stat_freebsd(struct pid_stat *p, void *ptr) { + struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr; + if (unlikely(proc_info->ki_tdflags & TDF_IDLETD)) + goto cleanup; + + char *comm = proc_info->ki_comm; + p->ppid = proc_info->ki_ppid; + + update_pid_comm(p, comm); + + pid_incremental_rate(stat, PDF_MINFLT, (kernel_uint_t)proc_info->ki_rusage.ru_minflt); + pid_incremental_rate(stat, PDF_CMINFLT, (kernel_uint_t)proc_info->ki_rusage_ch.ru_minflt); + pid_incremental_rate(stat, PDF_MAJFLT, (kernel_uint_t)proc_info->ki_rusage.ru_majflt); + pid_incremental_rate(stat, PDF_CMAJFLT, (kernel_uint_t)proc_info->ki_rusage_ch.ru_majflt); + pid_incremental_cpu(stat, PDF_UTIME, (kernel_uint_t)proc_info->ki_rusage.ru_utime.tv_sec * NSEC_PER_SEC + proc_info->ki_rusage.ru_utime.tv_usec * NSEC_PER_USEC); + pid_incremental_cpu(stat, PDF_STIME, (kernel_uint_t)proc_info->ki_rusage.ru_stime.tv_sec * NSEC_PER_SEC + proc_info->ki_rusage.ru_stime.tv_usec * NSEC_PER_USEC); + pid_incremental_cpu(stat, PDF_CUTIME, (kernel_uint_t)proc_info->ki_rusage_ch.ru_utime.tv_sec * NSEC_PER_SEC + proc_info->ki_rusage_ch.ru_utime.tv_usec * NSEC_PER_USEC); + pid_incremental_cpu(stat, PDF_CSTIME, (kernel_uint_t)proc_info->ki_rusage_ch.ru_stime.tv_sec * NSEC_PER_SEC + proc_info->ki_rusage_ch.ru_stime.tv_usec * NSEC_PER_USEC); + + p->values[PDF_THREADS] = proc_info->ki_numthreads; + + usec_t started_ut = timeval_usec(&proc_info->ki_start); + p->values[PDF_UPTIME] = (system_current_time_ut > started_ut) ? (system_current_time_ut - started_ut) / USEC_PER_SEC : 0; + + if(unlikely(debug_enabled)) + debug_log_int("READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=%d", + netdata_configured_host_prefix, p->pid, pid_stat_comm(p), (p->target)?string2str(p->target->name):"UNSET", + p->stat_collected_usec - p->last_stat_collected_usec, + p->values[PDF_UTIME], + p->values[PDF_STIME], + p->values[PDF_CUTIME], + p->values[PDF_CSTIME], + p->values[PDF_MINFLT], + p->values[PDF_MAJFLT], + p->values[PDF_CMINFLT], + p->values[PDF_CMAJFLT], + p->values[PDF_THREADS]); + + return true; + +cleanup: + return false; +} + +bool apps_os_collect_all_pids_freebsd(void) { + // Mark all processes as unread before collecting new data + struct pid_stat *p = NULL; + int i, procnum; + + static size_t procbase_size = 0; + static struct kinfo_proc *procbase = NULL; + + size_t new_procbase_size; + + int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC }; + if (unlikely(sysctl(mib, 3, NULL, &new_procbase_size, NULL, 0))) { + netdata_log_error("sysctl error: Can't get processes data size"); + return false; + } + + // give it some air for processes that may be started + // during this little time. + new_procbase_size += 100 * sizeof(struct kinfo_proc); + + // increase the buffer if needed + if(new_procbase_size > procbase_size) { + procbase_size = new_procbase_size; + procbase = reallocz(procbase, procbase_size); + } + + // sysctl() gets from new_procbase_size the buffer size + // and also returns to it the amount of data filled in + new_procbase_size = procbase_size; + + // get the processes from the system + if (unlikely(sysctl(mib, 3, procbase, &new_procbase_size, NULL, 0))) { + netdata_log_error("sysctl error: Can't get processes data"); + return false; + } + + // based on the amount of data filled in + // calculate the number of processes we got + procnum = new_procbase_size / sizeof(struct kinfo_proc); + + get_current_time(); + + for (i = 0 ; i < procnum ; ++i) { + pid_t pid = procbase[i].ki_pid; + if (pid <= 0) continue; + incrementally_collect_data_for_pid(pid, &procbase[i]); + } + + return true; +} + +#endif diff --git a/src/collectors/apps.plugin/apps_os_linux.c b/src/collectors/apps.plugin/apps_os_linux.c new file mode 100644 index 000000000..824addfd6 --- /dev/null +++ b/src/collectors/apps.plugin/apps_os_linux.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "apps_plugin.h" + +#if defined(OS_LINUX) + +#define MAX_PROC_PID_LIMITS 8192 +#define PROC_PID_LIMITS_MAX_OPEN_FILES_KEY "\nMax open files " + +int max_fds_cache_seconds = 60; +kernel_uint_t system_uptime_secs; + +void apps_os_init_linux(void) { + ; +} + +// -------------------------------------------------------------------------------------------------------------------- +// /proc/pid/fd + +struct arl_callback_ptr { + struct pid_stat *p; + procfile *ff; + size_t line; +}; + +bool apps_os_read_pid_fds_linux(struct pid_stat *p, void *ptr __maybe_unused) { + if(unlikely(!p->fds_dirname)) { + char dirname[FILENAME_MAX+1]; + snprintfz(dirname, FILENAME_MAX, "%s/proc/%d/fd", netdata_configured_host_prefix, p->pid); + p->fds_dirname = strdupz(dirname); + } + + DIR *fds = opendir(p->fds_dirname); + if(unlikely(!fds)) return false; + + struct dirent *de; + char linkname[FILENAME_MAX + 1]; + + // we make all pid fds negative, so that + // we can detect unused file descriptors + // at the end, to free them + make_all_pid_fds_negative(p); + + while((de = readdir(fds))) { + // we need only files with numeric names + + if(unlikely(de->d_name[0] < '0' || de->d_name[0] > '9')) + continue; + + // get its number + int fdid = (int) str2l(de->d_name); + if(unlikely(fdid < 0)) continue; + + // check if the fds array is small + if(unlikely((size_t)fdid >= p->fds_size)) { + // it is small, extend it + + uint32_t new_size = fds_new_size(p->fds_size, fdid); + + debug_log("extending fd memory slots for %s from %u to %u", + pid_stat_comm(p), p->fds_size, new_size); + + p->fds = reallocz(p->fds, new_size * sizeof(struct pid_fd)); + + // and initialize it + init_pid_fds(p, p->fds_size, new_size - p->fds_size); + p->fds_size = new_size; + } + + if(unlikely(p->fds[fdid].fd < 0 && de->d_ino != p->fds[fdid].inode)) { + // inodes do not match, clear the previous entry + inodes_changed_counter++; + file_descriptor_not_used(-p->fds[fdid].fd); + clear_pid_fd(&p->fds[fdid]); + } + + if(p->fds[fdid].fd < 0 && p->fds[fdid].cache_iterations_counter > 0) { + p->fds[fdid].fd = -p->fds[fdid].fd; + p->fds[fdid].cache_iterations_counter--; + continue; + } + + if(unlikely(!p->fds[fdid].filename)) { + filenames_allocated_counter++; + char fdname[FILENAME_MAX + 1]; + snprintfz(fdname, FILENAME_MAX, "%s/proc/%d/fd/%s", netdata_configured_host_prefix, p->pid, de->d_name); + p->fds[fdid].filename = strdupz(fdname); + } + + file_counter++; + ssize_t l = readlink(p->fds[fdid].filename, linkname, FILENAME_MAX); + if(unlikely(l == -1)) { + // cannot read the link + + if(debug_enabled) + netdata_log_error("Cannot read link %s", p->fds[fdid].filename); + + if(unlikely(p->fds[fdid].fd < 0)) { + file_descriptor_not_used(-p->fds[fdid].fd); + clear_pid_fd(&p->fds[fdid]); + } + + continue; + } + else + linkname[l] = '\0'; + + uint32_t link_hash = simple_hash(linkname); + + if(unlikely(p->fds[fdid].fd < 0 && p->fds[fdid].link_hash != link_hash)) { + // the link changed + links_changed_counter++; + file_descriptor_not_used(-p->fds[fdid].fd); + clear_pid_fd(&p->fds[fdid]); + } + + if(unlikely(p->fds[fdid].fd == 0)) { + // we don't know this fd, get it + + // if another process already has this, we will get + // the same id + p->fds[fdid].fd = (int)file_descriptor_find_or_add(linkname, link_hash); + p->fds[fdid].inode = de->d_ino; + p->fds[fdid].link_hash = link_hash; + } + else { + // else make it positive again, we need it + p->fds[fdid].fd = -p->fds[fdid].fd; + } + + // caching control + // without this we read all the files on every iteration + if(max_fds_cache_seconds > 0) { + size_t spread = ((size_t)max_fds_cache_seconds > 10) ? 10 : (size_t)max_fds_cache_seconds; + + // cache it for a few iterations + size_t max = ((size_t) max_fds_cache_seconds + (fdid % spread)) / (size_t) update_every; + p->fds[fdid].cache_iterations_reset++; + + if(unlikely(p->fds[fdid].cache_iterations_reset % spread == (size_t) fdid % spread)) + p->fds[fdid].cache_iterations_reset++; + + if(unlikely((fdid <= 2 && p->fds[fdid].cache_iterations_reset > 5) || + p->fds[fdid].cache_iterations_reset > max)) { + // for stdin, stdout, stderr (fdid <= 2) we have checked a few times, or if it goes above the max, goto max + p->fds[fdid].cache_iterations_reset = max; + } + + p->fds[fdid].cache_iterations_counter = p->fds[fdid].cache_iterations_reset; + } + } + + closedir(fds); + + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- +// /proc/meminfo + +uint64_t apps_os_get_total_memory_linux(void) { + uint64_t ret = 0; + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/meminfo", netdata_configured_host_prefix); + + procfile *ff = procfile_open(filename, ": \t", PROCFILE_FLAG_DEFAULT); + if(!ff) + return ret; + + ff = procfile_readall(ff); + if(!ff) + return ret; + + size_t line, lines = procfile_lines(ff); + + for(line = 0; line < lines ;line++) { + size_t words = procfile_linewords(ff, line); + if(words == 3 && strcmp(procfile_lineword(ff, line, 0), "MemTotal") == 0 && strcmp(procfile_lineword(ff, line, 2), "kB") == 0) { + ret = str2ull(procfile_lineword(ff, line, 1), NULL) * 1024; + break; + } + } + + procfile_close(ff); + + return ret; +} + +// -------------------------------------------------------------------------------------------------------------------- +// /proc/pid/cmdline + +bool apps_os_get_pid_cmdline_linux(struct pid_stat *p, char *cmdline, size_t bytes) { + if(unlikely(!p->cmdline_filename)) { + char filename[FILENAME_MAX]; + snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid); + p->cmdline_filename = strdupz(filename); + } + + int fd = open(p->cmdline_filename, procfile_open_flags, 0666); + if(unlikely(fd == -1)) + return false; + + ssize_t i, b = read(fd, cmdline, bytes - 1); + close(fd); + + if(unlikely(b < 0)) + return false; + + cmdline[b] = '\0'; + for(i = 0; i < b ; i++) + if(unlikely(!cmdline[i])) cmdline[i] = ' '; + + // remove trailing spaces + while(b > 0 && cmdline[b - 1] == ' ') + cmdline[--b] = '\0'; + + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- +// /proc/pid/io + +bool apps_os_read_pid_io_linux(struct pid_stat *p, void *ptr __maybe_unused) { + static procfile *ff = NULL; + + if(unlikely(!p->io_filename)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/%d/io", netdata_configured_host_prefix, p->pid); + p->io_filename = strdupz(filename); + } + + // open the file + ff = procfile_reopen(ff, p->io_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); + if(unlikely(!ff)) goto cleanup; + + ff = procfile_readall(ff); + if(unlikely(!ff)) goto cleanup; + + pid_incremental_rate(io, PDF_LREAD, str2kernel_uint_t(procfile_lineword(ff, 0, 1))); + pid_incremental_rate(io, PDF_LWRITE, str2kernel_uint_t(procfile_lineword(ff, 1, 1))); + pid_incremental_rate(io, PDF_OREAD, str2kernel_uint_t(procfile_lineword(ff, 2, 1))); + pid_incremental_rate(io, PDF_OWRITE, str2kernel_uint_t(procfile_lineword(ff, 3, 1))); + pid_incremental_rate(io, PDF_PREAD, str2kernel_uint_t(procfile_lineword(ff, 4, 1))); + pid_incremental_rate(io, PDF_PWRITE, str2kernel_uint_t(procfile_lineword(ff, 5, 1))); + + return true; + +cleanup: + return false; +} + +// -------------------------------------------------------------------------------------------------------------------- +// /proc/pid/limits + +static inline kernel_uint_t get_proc_pid_limits_limit(char *buf, const char *key, size_t key_len, kernel_uint_t def) { + char *line = strstr(buf, key); + if(!line) + return def; + + char *v = &line[key_len]; + while(isspace((uint8_t)*v)) v++; + + if(strcmp(v, "unlimited") == 0) + return 0; + + return str2ull(v, NULL); +} + +bool apps_os_read_pid_limits_linux(struct pid_stat *p, void *ptr __maybe_unused) { + static char proc_pid_limits_buffer[MAX_PROC_PID_LIMITS + 1]; + bool ret = false; + bool read_limits = false; + + errno_clear(); + proc_pid_limits_buffer[0] = '\0'; + + kernel_uint_t all_fds = pid_openfds_sum(p); + if(all_fds < p->limits.max_open_files / 2 && p->io_collected_usec > p->last_limits_collected_usec && p->io_collected_usec - p->last_limits_collected_usec <= 60 * USEC_PER_SEC) { + // too frequent, we want to collect limits once per minute + ret = true; + goto cleanup; + } + + if(unlikely(!p->limits_filename)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/%d/limits", netdata_configured_host_prefix, p->pid); + p->limits_filename = strdupz(filename); + } + + int fd = open(p->limits_filename, procfile_open_flags, 0666); + if(unlikely(fd == -1)) goto cleanup; + + ssize_t bytes = read(fd, proc_pid_limits_buffer, MAX_PROC_PID_LIMITS); + close(fd); + + if(bytes <= 0) + goto cleanup; + + // make it '\0' terminated + if(bytes < MAX_PROC_PID_LIMITS) + proc_pid_limits_buffer[bytes] = '\0'; + else + proc_pid_limits_buffer[MAX_PROC_PID_LIMITS - 1] = '\0'; + + p->limits.max_open_files = get_proc_pid_limits_limit(proc_pid_limits_buffer, PROC_PID_LIMITS_MAX_OPEN_FILES_KEY, sizeof(PROC_PID_LIMITS_MAX_OPEN_FILES_KEY) - 1, 0); + if(p->limits.max_open_files == 1) { + // it seems a bug in the kernel or something similar + // it sets max open files to 1 but the number of files + // the process has open are more than 1... + // https://github.com/netdata/netdata/issues/15443 + p->limits.max_open_files = 0; + ret = true; + goto cleanup; + } + + p->last_limits_collected_usec = p->io_collected_usec; + read_limits = true; + + ret = true; + +cleanup: + if(p->limits.max_open_files) + p->openfds_limits_percent = (NETDATA_DOUBLE)all_fds * 100.0 / (NETDATA_DOUBLE)p->limits.max_open_files; + else + p->openfds_limits_percent = 0.0; + + if(p->openfds_limits_percent > 100.0) { + if(!(p->log_thrown & PID_LOG_LIMITS_DETAIL)) { + char *line; + + if(!read_limits) { + proc_pid_limits_buffer[0] = '\0'; + line = "NOT READ"; + } + else { + line = strstr(proc_pid_limits_buffer, PROC_PID_LIMITS_MAX_OPEN_FILES_KEY); + if (line) { + line++; // skip the initial newline + + char *end = strchr(line, '\n'); + if (end) + *end = '\0'; + } + } + + netdata_log_info( + "FDS_LIMITS: PID %d (%s) is using " + "%0.2f %% of its fds limits, " + "open fds = %"PRIu64 "(" + "files = %"PRIu64 ", " + "pipes = %"PRIu64 ", " + "sockets = %"PRIu64", " + "inotifies = %"PRIu64", " + "eventfds = %"PRIu64", " + "timerfds = %"PRIu64", " + "signalfds = %"PRIu64", " + "eventpolls = %"PRIu64" " + "other = %"PRIu64" " + "), open fds limit = %"PRIu64", " + "%s, " + "original line [%s]", + p->pid, pid_stat_comm(p), p->openfds_limits_percent, all_fds, + p->openfds.files, + p->openfds.pipes, + p->openfds.sockets, + p->openfds.inotifies, + p->openfds.eventfds, + p->openfds.timerfds, + p->openfds.signalfds, + p->openfds.eventpolls, + p->openfds.other, + p->limits.max_open_files, + read_limits ? "and we have read the limits AFTER counting the fds" + : "but we have read the limits BEFORE counting the fds", + line); + + p->log_thrown |= PID_LOG_LIMITS_DETAIL; + } + } + else + p->log_thrown &= ~PID_LOG_LIMITS_DETAIL; + + return ret; +} + +// -------------------------------------------------------------------------------------------------------------------- +// /proc/pid/status + +void arl_callback_status_uid(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return; + + //const char *real_uid = procfile_lineword(aptr->ff, aptr->line, 1); + const char *effective_uid = procfile_lineword(aptr->ff, aptr->line, 2); + //const char *saved_uid = procfile_lineword(aptr->ff, aptr->line, 3); + //const char *filesystem_uid = procfile_lineword(aptr->ff, aptr->line, 4); + + if(likely(effective_uid && *effective_uid)) + aptr->p->uid = (uid_t)str2l(effective_uid); +} + +void arl_callback_status_gid(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return; + + //const char *real_gid = procfile_lineword(aptr->ff, aptr->line, 1); + const char *effective_gid = procfile_lineword(aptr->ff, aptr->line, 2); + //const char *saved_gid = procfile_lineword(aptr->ff, aptr->line, 3); + //const char *filesystem_gid = procfile_lineword(aptr->ff, aptr->line, 4); + + if(likely(effective_gid && *effective_gid)) + aptr->p->gid = (uid_t)str2l(effective_gid); +} + +void arl_callback_status_vmsize(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; + + aptr->p->values[PDF_VMSIZE] = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)) * 1024; +} + +void arl_callback_status_vmswap(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; + + aptr->p->values[PDF_VMSWAP] = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)) * 1024; +} + +void arl_callback_status_vmrss(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; + + aptr->p->values[PDF_VMRSS] = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)) * 1024; +} + +void arl_callback_status_rssfile(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; + + aptr->p->values[PDF_RSSFILE] = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)) * 1024; +} + +void arl_callback_status_rssshmem(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; + + aptr->p->values[PDF_RSSSHMEM] = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)) * 1024; +} + +void arl_callback_status_voluntary_ctxt_switches(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 2)) return; + + struct pid_stat *p = aptr->p; + pid_incremental_rate(stat, PDF_VOLCTX, str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1))); +} + +void arl_callback_status_nonvoluntary_ctxt_switches(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; (void)hash; (void)value; + struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; + if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 2)) return; + + struct pid_stat *p = aptr->p; + pid_incremental_rate(stat, PDF_NVOLCTX, str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1))); +} + +bool apps_os_read_pid_status_linux(struct pid_stat *p, void *ptr __maybe_unused) { + static struct arl_callback_ptr arl_ptr; + static procfile *ff = NULL; + + if(unlikely(!p->status_arl)) { + p->status_arl = arl_create("/proc/pid/status", NULL, 60); + arl_expect_custom(p->status_arl, "Uid", arl_callback_status_uid, &arl_ptr); + arl_expect_custom(p->status_arl, "Gid", arl_callback_status_gid, &arl_ptr); + arl_expect_custom(p->status_arl, "VmSize", arl_callback_status_vmsize, &arl_ptr); + arl_expect_custom(p->status_arl, "VmRSS", arl_callback_status_vmrss, &arl_ptr); + arl_expect_custom(p->status_arl, "RssFile", arl_callback_status_rssfile, &arl_ptr); + arl_expect_custom(p->status_arl, "RssShmem", arl_callback_status_rssshmem, &arl_ptr); + arl_expect_custom(p->status_arl, "VmSwap", arl_callback_status_vmswap, &arl_ptr); + arl_expect_custom(p->status_arl, "voluntary_ctxt_switches", arl_callback_status_voluntary_ctxt_switches, &arl_ptr); + arl_expect_custom(p->status_arl, "nonvoluntary_ctxt_switches", arl_callback_status_nonvoluntary_ctxt_switches, &arl_ptr); + } + + if(unlikely(!p->status_filename)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/%d/status", netdata_configured_host_prefix, p->pid); + p->status_filename = strdupz(filename); + } + + ff = procfile_reopen(ff, p->status_filename, (!ff)?" \t:,-()/":NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); + if(unlikely(!ff)) return false; + + ff = procfile_readall(ff); + if(unlikely(!ff)) return false; + + calls_counter++; + + // let ARL use this pid + arl_ptr.p = p; + arl_ptr.ff = ff; + + size_t lines = procfile_lines(ff), l; + arl_begin(p->status_arl); + + for(l = 0; l < lines ;l++) { + // debug_log("CHECK: line %zu of %zu, key '%s' = '%s'", l, lines, procfile_lineword(ff, l, 0), procfile_lineword(ff, l, 1)); + arl_ptr.line = l; + if(unlikely(arl_check(p->status_arl, + procfile_lineword(ff, l, 0), + procfile_lineword(ff, l, 1)))) break; + } + + p->values[PDF_VMSHARED] = p->values[PDF_RSSFILE] + p->values[PDF_RSSSHMEM]; + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- +// global CPU utilization + +bool apps_os_read_global_cpu_utilization_linux(void) { + static char filename[FILENAME_MAX + 1] = ""; + static procfile *ff = NULL; + static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, gntime_raw = 0, ntime_raw = 0; + static usec_t collected_usec = 0, last_collected_usec = 0; + + if(unlikely(!ff)) { + snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix); + ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) goto cleanup; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) goto cleanup; + + last_collected_usec = collected_usec; + collected_usec = now_monotonic_usec(); + + calls_counter++; + + // temporary - it is added global_ntime; + kernel_uint_t global_ntime = 0; + + incremental_rate(global_utime, utime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 1)), collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES); + incremental_rate(global_ntime, ntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 2)), collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES); + incremental_rate(global_stime, stime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 3)), collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES); + incremental_rate(global_gtime, gtime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 10)), collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES); + + global_utime += global_ntime; + + if(enable_guest_charts) { + // temporary - it is added global_ntime; + kernel_uint_t global_gntime = 0; + + // guest nice time, on guest time + incremental_rate(global_gntime, gntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 11)), collected_usec, last_collected_usec, 1); + + global_gtime += global_gntime; + + // remove guest time from user time + global_utime -= (global_utime > global_gtime) ? global_gtime : global_utime; + } + + if(unlikely(global_iterations_counter == 1)) { + global_utime = 0; + global_stime = 0; + global_gtime = 0; + } + + return true; + +cleanup: + global_utime = 0; + global_stime = 0; + global_gtime = 0; + return false; +} + +// -------------------------------------------------------------------------------------------------------------------- +// /proc/pid/stat + +static inline void update_proc_state_count(char proc_stt) { + switch (proc_stt) { + case 'S': + proc_state_count[PROC_STATUS_SLEEPING] += 1; + break; + case 'R': + proc_state_count[PROC_STATUS_RUNNING] += 1; + break; + case 'D': + proc_state_count[PROC_STATUS_SLEEPING_D] += 1; + break; + case 'Z': + proc_state_count[PROC_STATUS_ZOMBIE] += 1; + break; + case 'T': + proc_state_count[PROC_STATUS_STOPPED] += 1; + break; + default: + break; + } +} + +bool apps_os_read_pid_stat_linux(struct pid_stat *p, void *ptr __maybe_unused) { + static procfile *ff = NULL; + + if(unlikely(!p->stat_filename)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid); + p->stat_filename = strdupz(filename); + } + + bool set_quotes = (!ff) ? true : false; + + ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); + if(unlikely(!ff)) goto cleanup; + + // if(set_quotes) procfile_set_quotes(ff, "()"); + if(unlikely(set_quotes)) + procfile_set_open_close(ff, "(", ")"); + + ff = procfile_readall(ff); + if(unlikely(!ff)) goto cleanup; + + // p->pid = str2pid_t(procfile_lineword(ff, 0, 0)); + char *comm = procfile_lineword(ff, 0, 1); + p->state = *(procfile_lineword(ff, 0, 2)); + p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3)); + // p->pgrp = (int32_t)str2pid_t(procfile_lineword(ff, 0, 4)); + // p->session = (int32_t)str2pid_t(procfile_lineword(ff, 0, 5)); + // p->tty_nr = (int32_t)str2pid_t(procfile_lineword(ff, 0, 6)); + // p->tpgid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 7)); + // p->flags = str2uint64_t(procfile_lineword(ff, 0, 8)); + + update_pid_comm(p, comm); + + pid_incremental_rate(stat, PDF_MINFLT, str2kernel_uint_t(procfile_lineword(ff, 0, 9))); + pid_incremental_rate(stat, PDF_CMINFLT, str2kernel_uint_t(procfile_lineword(ff, 0, 10))); + pid_incremental_rate(stat, PDF_MAJFLT, str2kernel_uint_t(procfile_lineword(ff, 0, 11))); + pid_incremental_rate(stat, PDF_CMAJFLT, str2kernel_uint_t(procfile_lineword(ff, 0, 12))); + pid_incremental_cpu(stat, PDF_UTIME, str2kernel_uint_t(procfile_lineword(ff, 0, 13))); + pid_incremental_cpu(stat, PDF_STIME, str2kernel_uint_t(procfile_lineword(ff, 0, 14))); + pid_incremental_cpu(stat, PDF_CUTIME, str2kernel_uint_t(procfile_lineword(ff, 0, 15))); + pid_incremental_cpu(stat, PDF_CSTIME, str2kernel_uint_t(procfile_lineword(ff, 0, 16))); + // p->priority = str2kernel_uint_t(procfile_lineword(ff, 0, 17)); + // p->nice = str2kernel_uint_t(procfile_lineword(ff, 0, 18)); + p->values[PDF_THREADS] = (int32_t) str2uint32_t(procfile_lineword(ff, 0, 19), NULL); + // p->itrealvalue = str2kernel_uint_t(procfile_lineword(ff, 0, 20)); + kernel_uint_t collected_starttime = str2kernel_uint_t(procfile_lineword(ff, 0, 21)) / system_hz; + p->values[PDF_UPTIME] = (system_uptime_secs > collected_starttime)?(system_uptime_secs - collected_starttime):0; + // p->vsize = str2kernel_uint_t(procfile_lineword(ff, 0, 22)); + // p->rss = str2kernel_uint_t(procfile_lineword(ff, 0, 23)); + // p->rsslim = str2kernel_uint_t(procfile_lineword(ff, 0, 24)); + // p->starcode = str2kernel_uint_t(procfile_lineword(ff, 0, 25)); + // p->endcode = str2kernel_uint_t(procfile_lineword(ff, 0, 26)); + // p->startstack = str2kernel_uint_t(procfile_lineword(ff, 0, 27)); + // p->kstkesp = str2kernel_uint_t(procfile_lineword(ff, 0, 28)); + // p->kstkeip = str2kernel_uint_t(procfile_lineword(ff, 0, 29)); + // p->signal = str2kernel_uint_t(procfile_lineword(ff, 0, 30)); + // p->blocked = str2kernel_uint_t(procfile_lineword(ff, 0, 31)); + // p->sigignore = str2kernel_uint_t(procfile_lineword(ff, 0, 32)); + // p->sigcatch = str2kernel_uint_t(procfile_lineword(ff, 0, 33)); + // p->wchan = str2kernel_uint_t(procfile_lineword(ff, 0, 34)); + // p->nswap = str2kernel_uint_t(procfile_lineword(ff, 0, 35)); + // p->cnswap = str2kernel_uint_t(procfile_lineword(ff, 0, 36)); + // p->exit_signal = str2kernel_uint_t(procfile_lineword(ff, 0, 37)); + // p->processor = str2kernel_uint_t(procfile_lineword(ff, 0, 38)); + // p->rt_priority = str2kernel_uint_t(procfile_lineword(ff, 0, 39)); + // p->policy = str2kernel_uint_t(procfile_lineword(ff, 0, 40)); + // p->delayacct_blkio_ticks = str2kernel_uint_t(procfile_lineword(ff, 0, 41)); + + if(enable_guest_charts) { + pid_incremental_cpu(stat, PDF_GTIME, str2kernel_uint_t(procfile_lineword(ff, 0, 42))); + pid_incremental_cpu(stat, PDF_CGTIME, str2kernel_uint_t(procfile_lineword(ff, 0, 43))); + + if (show_guest_time || p->values[PDF_GTIME] || p->values[PDF_CGTIME]) { + p->values[PDF_UTIME] -= (p->values[PDF_UTIME] >= p->values[PDF_GTIME]) ? p->values[PDF_GTIME] : p->values[PDF_UTIME]; + p->values[PDF_CUTIME] -= (p->values[PDF_CUTIME] >= p->values[PDF_CGTIME]) ? p->values[PDF_CGTIME] : p->values[PDF_CUTIME]; + show_guest_time = true; + } + } + + if(unlikely(debug_enabled)) + debug_log_int("READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=" KERNEL_UINT_FORMAT, + netdata_configured_host_prefix, p->pid, pid_stat_comm(p), (p->target)?string2str(p->target->name):"UNSET", p->stat_collected_usec - p->last_stat_collected_usec, + p->values[PDF_UTIME], + p->values[PDF_STIME], + p->values[PDF_CUTIME], + p->values[PDF_CSTIME], + p->values[PDF_MINFLT], + p->values[PDF_MAJFLT], + p->values[PDF_CMINFLT], + p->values[PDF_CMAJFLT], + p->values[PDF_THREADS]); + + update_proc_state_count(p->state); + return true; + +cleanup: + return false; +} + +// ---------------------------------------------------------------------------- + +// 1. read all files in /proc +// 2. for each numeric directory: +// i. read /proc/pid/stat +// ii. read /proc/pid/status +// iii. read /proc/pid/io (requires root access) +// iii. read the entries in directory /proc/pid/fd (requires root access) +// for each entry: +// a. find or create a struct file_descriptor +// b. cleanup any old/unused file_descriptors + +// after all these, some pids may be linked to targets, while others may not + +// in case of errors, only 1 every 1000 errors is printed +// to avoid filling up all disk space +// if debug is enabled, all errors are printed + +bool apps_os_collect_all_pids_linux(void) { +#if (PROCESSES_HAVE_STATE == 1) + // clear process state counter + memset(proc_state_count, 0, sizeof proc_state_count); +#endif + + // preload the parents and then their children + collect_parents_before_children(); + + static char uptime_filename[FILENAME_MAX + 1] = ""; + if(*uptime_filename == '\0') + snprintfz(uptime_filename, FILENAME_MAX, "%s/proc/uptime", netdata_configured_host_prefix); + + system_uptime_secs = (kernel_uint_t)(uptime_msec(uptime_filename) / MSEC_PER_SEC); + + char dirname[FILENAME_MAX + 1]; + + snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix); + DIR *dir = opendir(dirname); + if(!dir) return false; + + struct dirent *de = NULL; + + while((de = readdir(dir))) { + char *endptr = de->d_name; + + if(unlikely(de->d_type != DT_DIR || de->d_name[0] < '0' || de->d_name[0] > '9')) + continue; + + pid_t pid = (pid_t) strtoul(de->d_name, &endptr, 10); + + // make sure we read a valid number + if(unlikely(endptr == de->d_name || *endptr != '\0')) + continue; + + incrementally_collect_data_for_pid(pid, NULL); + } + closedir(dir); + + return true; +} +#endif diff --git a/src/collectors/apps.plugin/apps_os_macos.c b/src/collectors/apps.plugin/apps_os_macos.c new file mode 100644 index 000000000..27fb0ca7f --- /dev/null +++ b/src/collectors/apps.plugin/apps_os_macos.c @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "apps_plugin.h" + +#if defined(OS_MACOS) + +usec_t system_current_time_ut; +mach_timebase_info_data_t mach_info; + +void apps_os_init_macos(void) { + mach_timebase_info(&mach_info); +} + +uint64_t apps_os_get_total_memory_macos(void) { + uint64_t ret = 0; + int mib[2] = {CTL_HW, HW_MEMSIZE}; + size_t size = sizeof(ret); + if (sysctl(mib, 2, &ret, &size, NULL, 0) == -1) { + netdata_log_error("Failed to get total memory using sysctl"); + return 0; + } + + return ret; +} + +bool apps_os_read_pid_fds_macos(struct pid_stat *p, void *ptr __maybe_unused) { + static struct proc_fdinfo *fds = NULL; + static int fdsCapacity = 0; + + int bufferSize = proc_pidinfo(p->pid, PROC_PIDLISTFDS, 0, NULL, 0); + if (bufferSize <= 0) { + netdata_log_error("Failed to get the size of file descriptors for PID %d", p->pid); + return false; + } + + // Resize buffer if necessary + if (bufferSize > fdsCapacity) { + if(fds) + freez(fds); + + fds = mallocz(bufferSize); + fdsCapacity = bufferSize; + } + + int num_fds = proc_pidinfo(p->pid, PROC_PIDLISTFDS, 0, fds, bufferSize) / PROC_PIDLISTFD_SIZE; + if (num_fds <= 0) { + netdata_log_error("Failed to get the file descriptors for PID %d", p->pid); + return false; + } + + for (int i = 0; i < num_fds; i++) { + switch (fds[i].proc_fdtype) { + case PROX_FDTYPE_VNODE: { + struct vnode_fdinfowithpath vi; + if (proc_pidfdinfo(p->pid, fds[i].proc_fd, PROC_PIDFDVNODEPATHINFO, &vi, sizeof(vi)) > 0) + p->openfds.files++; + else + p->openfds.other++; + + break; + } + case PROX_FDTYPE_SOCKET: { + p->openfds.sockets++; + break; + } + case PROX_FDTYPE_PIPE: { + p->openfds.pipes++; + break; + } + + default: + p->openfds.other++; + break; + } + } + + return true; +} + +bool apps_os_get_pid_cmdline_macos(struct pid_stat *p, char *cmdline, size_t maxBytes) { + int mib[3] = {CTL_KERN, KERN_PROCARGS2, p->pid}; + static char *args = NULL; + static size_t size = 0; + + size_t new_size; + if (sysctl(mib, 3, NULL, &new_size, NULL, 0) == -1) { + return false; + } + + if (new_size > size) { + if (args) + freez(args); + + args = (char *)mallocz(new_size); + size = new_size; + } + + memset(cmdline, 0, new_size < maxBytes ? new_size : maxBytes); + + size_t used_size = size; + if (sysctl(mib, 3, args, &used_size, NULL, 0) == -1) + return false; + + int argc; + memcpy(&argc, args, sizeof(argc)); + char *ptr = args + sizeof(argc); + used_size -= sizeof(argc); + + // Skip the executable path + while (*ptr && used_size > 0) { + ptr++; + used_size--; + } + + // Copy only the arguments to the cmdline buffer, skipping the environment variables + size_t i = 0, copied_args = 0; + bool inArg = false; + for (; used_size > 0 && i < maxBytes - 1 && copied_args < argc; --used_size, ++ptr) { + if (*ptr == '\0') { + if (inArg) { + cmdline[i++] = ' '; // Replace nulls between arguments with spaces + inArg = false; + copied_args++; + } + } else { + cmdline[i++] = *ptr; + inArg = true; + } + } + + if (i > 0 && cmdline[i - 1] == ' ') + i--; // Remove the trailing space if present + + cmdline[i] = '\0'; // Null-terminate the string + + return true; +} + +bool apps_os_read_pid_io_macos(struct pid_stat *p, void *ptr) { + struct pid_info *pi = ptr; + + // On MacOS, the proc_pid_rusage provides disk_io_statistics which includes io bytes read and written + // but does not provide the same level of detail as Linux, like separating logical and physical I/O bytes. + pid_incremental_rate(io, PDF_LREAD, pi->rusageinfo.ri_diskio_bytesread); + pid_incremental_rate(io, PDF_LWRITE, pi->rusageinfo.ri_diskio_byteswritten); + + return true; +} + +bool apps_os_read_pid_limits_macos(struct pid_stat *p __maybe_unused, void *ptr __maybe_unused) { + return false; +} + +bool apps_os_read_pid_status_macos(struct pid_stat *p, void *ptr) { + struct pid_info *pi = ptr; + + p->uid = pi->bsdinfo.pbi_uid; + p->gid = pi->bsdinfo.pbi_gid; + p->values[PDF_VMSIZE] = pi->taskinfo.pti_virtual_size; + p->values[PDF_VMRSS] = pi->taskinfo.pti_resident_size; + // p->values[PDF_VMSWAP] = rusageinfo.ri_swapins + rusageinfo.ri_swapouts; // This is not directly available, consider an alternative representation + p->values[PDF_VOLCTX] = pi->taskinfo.pti_csw; + // p->values[PDF_NVOLCTX] = taskinfo.pti_nivcsw; + + return true; +} + +static inline void get_current_time(void) { + struct timeval current_time; + gettimeofday(¤t_time, NULL); + system_current_time_ut = timeval_usec(¤t_time); +} + +// bool apps_os_read_global_cpu_utilization_macos(void) { +// static kernel_uint_t utime_raw = 0, stime_raw = 0, ntime_raw = 0; +// static usec_t collected_usec = 0, last_collected_usec = 0; +// +// host_cpu_load_info_data_t cpuinfo; +// mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT; +// +// if (host_statistics(mach_host_self(), HOST_CPU_LOAD_INFO, (host_info_t)&cpuinfo, &count) != KERN_SUCCESS) { +// // Handle error +// goto cleanup; +// } +// +// last_collected_usec = collected_usec; +// collected_usec = now_monotonic_usec(); +// +// calls_counter++; +// +// // Convert ticks to time +// // Note: MacOS does not separate nice time from user time in the CPU stats, so you might need to adjust this logic +// kernel_uint_t global_ntime = 0; // Assuming you want to keep track of nice time separately +// +// incremental_rate(global_utime, utime_raw, cpuinfo.cpu_ticks[CPU_STATE_USER] + cpuinfo.cpu_ticks[CPU_STATE_NICE], collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES); +// incremental_rate(global_ntime, ntime_raw, cpuinfo.cpu_ticks[CPU_STATE_NICE], collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES); +// incremental_rate(global_stime, stime_raw, cpuinfo.cpu_ticks[CPU_STATE_SYSTEM], collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES); +// +// global_utime += global_ntime; +// +// if(unlikely(global_iterations_counter == 1)) { +// global_utime = 0; +// global_stime = 0; +// global_gtime = 0; +// } +// +// return 1; +// +// cleanup: +// global_utime = 0; +// global_stime = 0; +// global_gtime = 0; +// return 0; +// } + +bool apps_os_read_pid_stat_macos(struct pid_stat *p, void *ptr) { + struct pid_info *pi = ptr; + + p->ppid = pi->proc.kp_eproc.e_ppid; + + // Update command name and target if changed + char comm[PROC_PIDPATHINFO_MAXSIZE]; + int ret = proc_name(p->pid, comm, sizeof(comm)); + if (ret <= 0) + strncpyz(comm, "unknown", sizeof(comm) - 1); + + update_pid_comm(p, comm); + + kernel_uint_t userCPU = (pi->taskinfo.pti_total_user * mach_info.numer) / mach_info.denom; + kernel_uint_t systemCPU = (pi->taskinfo.pti_total_system * mach_info.numer) / mach_info.denom; + + // Map the values from taskinfo to the pid_stat structure + pid_incremental_rate(stat, PDF_MINFLT, pi->taskinfo.pti_faults); + pid_incremental_rate(stat, PDF_MAJFLT, pi->taskinfo.pti_pageins); + pid_incremental_cpu(stat, PDF_UTIME, userCPU); + pid_incremental_cpu(stat, PDF_STIME, systemCPU); + p->values[PDF_THREADS] = pi->taskinfo.pti_threadnum; + + usec_t started_ut = timeval_usec(&pi->proc.kp_proc.p_starttime); + p->values[PDF_UPTIME] = (system_current_time_ut > started_ut) ? (system_current_time_ut - started_ut) / USEC_PER_SEC : 0; + + // Note: Some values such as guest time, cutime, cstime, etc., are not directly available in MacOS. + // You might need to approximate or leave them unset depending on your needs. + + if(unlikely(debug_enabled)) { + debug_log_int("READ PROC/PID/STAT for MacOS: process: '%s' on target '%s' VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", threads=%d", + pid_stat_comm(p), (p->target) ? string2str(p->target->name) : "UNSET", + p->values[PDF_UTIME], + p->values[PDF_STIME], + p->values[PDF_MINFLT], + p->values[PDF_MAJFLT], + p->values[PDF_THREADS]); + } + + // MacOS doesn't have a direct concept of process state like Linux, + // so updating process state count might need a different approach. + + return true; +} + +bool apps_os_collect_all_pids_macos(void) { + // Mark all processes as unread before collecting new data + struct pid_stat *p; + static pid_t *pids = NULL; + static int allocatedProcessCount = 0; + + // Get the number of processes + int numberOfProcesses = proc_listpids(PROC_ALL_PIDS, 0, NULL, 0); + if (numberOfProcesses <= 0) { + netdata_log_error("Failed to retrieve the process count"); + return false; + } + + // Allocate or reallocate space to hold all the process IDs if necessary + if (numberOfProcesses > allocatedProcessCount) { + // Allocate additional space to avoid frequent reallocations + allocatedProcessCount = numberOfProcesses + 100; + pids = reallocz(pids, allocatedProcessCount * sizeof(pid_t)); + } + + // this is required, otherwise the PIDs become totally random + memset(pids, 0, allocatedProcessCount * sizeof(pid_t)); + + // get the list of PIDs + numberOfProcesses = proc_listpids(PROC_ALL_PIDS, 0, pids, allocatedProcessCount * sizeof(pid_t)); + if (numberOfProcesses <= 0) { + netdata_log_error("Failed to retrieve the process IDs"); + return false; + } + + get_current_time(); + + // Collect data for each process + for (int i = 0; i < numberOfProcesses; ++i) { + pid_t pid = pids[i]; + if (pid <= 0) continue; + + struct pid_info pi = { 0 }; + + int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid}; + + size_t procSize = sizeof(pi.proc); + if(sysctl(mib, 4, &pi.proc, &procSize, NULL, 0) == -1) { + netdata_log_error("Failed to get proc for PID %d", pid); + continue; + } + if(procSize == 0) // no such process + continue; + + int st = proc_pidinfo(pid, PROC_PIDTASKINFO, 0, &pi.taskinfo, sizeof(pi.taskinfo)); + if (st <= 0) { + netdata_log_error("Failed to get task info for PID %d", pid); + continue; + } + + st = proc_pidinfo(pid, PROC_PIDTBSDINFO, 0, &pi.bsdinfo, sizeof(pi.bsdinfo)); + if (st <= 0) { + netdata_log_error("Failed to get BSD info for PID %d", pid); + continue; + } + + st = proc_pid_rusage(pid, RUSAGE_INFO_V4, (rusage_info_t *)&pi.rusageinfo); + if (st < 0) { + netdata_log_error("Failed to get resource usage info for PID %d", pid); + continue; + } + + incrementally_collect_data_for_pid(pid, &pi); + } + + return true; +} + +#endif diff --git a/src/collectors/apps.plugin/apps_os_windows.c b/src/collectors/apps.plugin/apps_os_windows.c new file mode 100644 index 000000000..6c2cabc50 --- /dev/null +++ b/src/collectors/apps.plugin/apps_os_windows.c @@ -0,0 +1,1011 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "apps_plugin.h" +/* +{ + "SystemName": "WIN11", + "NumObjectTypes": 1, + "LittleEndian": 1, + "Version": 1, + "Revision": 1, + "DefaultObject": 238, + "PerfFreq": 10000000, + "PerfTime": 9242655165203, + "PerfTime100nSec": 133716612800215149, + "SystemTime": { + "Year": 2024, + "Month": 9, + "DayOfWeek": 2, + "Day": 24, + "Hour": 14, + "Minute": 21, + "Second": 20, + "Milliseconds": 21 + }, + "Objects": [ + { + "NameId": 230, + "Name": "Process", + "HelpId": 231, + "Help": "The Process performance object consists of counters that monitor running application program and system processes. All the threads in a process share the same address space and have access to the same data.", + "NumInstances": 274, + "NumCounters": 28, + "PerfTime": 133716612800215149, + "PerfFreq": 10000000, + "CodePage": 0, + "DefaultCounter": 0, + "DetailLevel": "Novice (100)", + "Instances": [ + { + "Instance": "Idle", + "UniqueID": -1, + "Labels": [ + { + "key": "Process", + "value": "Idle" + } + ], + "Counters": [ + { + "Counter": "% Processor Time", + "Value": { + "data": 106881107812500, + "time": 133716612800215149, + "type": 542180608, + "multi": 0, + "frequency": 0 + }, + "Help": "% Processor Time is the percentage of elapsed time that all of process threads used the processor to execution instructions. An instruction is the basic unit of execution in a computer, a thread is the object that executes instructions, and a process is the object created when a program is run. Code executed to handle some hardware interrupts and trap conditions are included in this count.", + "Type": "PERF_100NSEC_TIMER", + "Algorithm": "100 * (data1 - data0) / (time1 - time0)", + "Description": "64-bit Timer in 100 nsec units. Display delta divided by delta time. Display suffix: \"%\"" + }, + { + "Counter": "% User Time", + "Value": { + "data": 0, + "time": 133716612800215149, + "type": 542180608, + "multi": 0, + "frequency": 0 + }, + "Help": "% User Time is the percentage of elapsed time that the process threads spent executing code in user mode. Applications, environment subsystems, and integral subsystems execute in user mode. Code executing in user mode cannot damage the integrity of the Windows executive, kernel, and device drivers. Unlike some early operating systems, Windows uses process boundaries for subsystem protection in addition to the traditional protection of user and privileged modes. Some work done by Windows on behalf of the application might appear in other subsystem processes in addition to the privileged time in the process.", + "Type": "PERF_100NSEC_TIMER", + "Algorithm": "100 * (data1 - data0) / (time1 - time0)", + "Description": "64-bit Timer in 100 nsec units. Display delta divided by delta time. Display suffix: \"%\"" + }, + { + "Counter": "% Privileged Time", + "Value": { + "data": 106881107812500, + "time": 133716612800215149, + "type": 542180608, + "multi": 0, + "frequency": 0 + }, + "Help": "% Privileged Time is the percentage of elapsed time that the process threads spent executing code in privileged mode. When a Windows system service is called, the service will often run in privileged mode to gain access to system-private data. Such data is protected from access by threads executing in user mode. Calls to the system can be explicit or implicit, such as page faults or interrupts. Unlike some early operating systems, Windows uses process boundaries for subsystem protection in addition to the traditional protection of user and privileged modes. Some work done by Windows on behalf of the application might appear in other subsystem processes in addition to the privileged time in the process.", + "Type": "PERF_100NSEC_TIMER", + "Algorithm": "100 * (data1 - data0) / (time1 - time0)", + "Description": "64-bit Timer in 100 nsec units. Display delta divided by delta time. Display suffix: \"%\"" + }, + { + "Counter": "Virtual Bytes Peak", + "Value": { + "data": 8192, + "time": 0, + "type": 65792, + "multi": 0, + "frequency": 0 + }, + "Help": "Virtual Bytes Peak is the maximum size, in bytes, of virtual address space the process has used at any one time. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. However, virtual space is finite, and the process might limit its ability to load libraries.", + "Type": "PERF_COUNTER_LARGE_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Virtual Bytes", + "Value": { + "data": 8192, + "time": 0, + "type": 65792, + "multi": 0, + "frequency": 0 + }, + "Help": "Virtual Bytes is the current size, in bytes, of the virtual address space the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite, and the process can limit its ability to load libraries.", + "Type": "PERF_COUNTER_LARGE_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Page Faults/sec", + "Value": { + "data": 9, + "time": 9242655165203, + "type": 272696320, + "multi": 0, + "frequency": 10000000 + }, + "Help": "Page Faults/sec is the rate at which page faults by the threads executing in this process are occurring. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. This may not cause the page to be fetched from disk if it is on the standby list and hence already in main memory, or if it is in use by another process with whom the page is shared.", + "Type": "PERF_COUNTER_COUNTER", + "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)", + "Description": "32-bit Counter. Divide delta by delta time. Display suffix: \"/sec\"" + }, + { + "Counter": "Working Set Peak", + "Value": { + "data": 8192, + "time": 0, + "type": 65792, + "multi": 0, + "frequency": 0 + }, + "Help": "Working Set Peak is the maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the Working Set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from Working Sets. If they are needed they will then be soft-faulted back into the Working Set before they leave main memory.", + "Type": "PERF_COUNTER_LARGE_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Working Set", + "Value": { + "data": 8192, + "time": 0, + "type": 65792, + "multi": 0, + "frequency": 0 + }, + "Help": "Working Set is the current size, in bytes, of the Working Set of this process. The Working Set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the Working Set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from Working Sets. If they are needed they will then be soft-faulted back into the Working Set before leaving main memory.", + "Type": "PERF_COUNTER_LARGE_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Page File Bytes Peak", + "Value": { + "data": 61440, + "time": 0, + "type": 65792, + "multi": 0, + "frequency": 0 + }, + "Help": "Page File Bytes Peak is the maximum amount of virtual memory, in bytes, that this process has reserved for use in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and the lack of space in paging files can prevent other processes from allocating memory. If there is no paging file, this counter reflects the maximum amount of virtual memory that the process has reserved for use in physical memory.", + "Type": "PERF_COUNTER_LARGE_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Page File Bytes", + "Value": { + "data": 61440, + "time": 0, + "type": 65792, + "multi": 0, + "frequency": 0 + }, + "Help": "Page File Bytes is the current amount of virtual memory, in bytes, that this process has reserved for use in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and the lack of space in paging files can prevent other processes from allocating memory. If there is no paging file, this counter reflects the current amount of virtual memory that the process has reserved for use in physical memory.", + "Type": "PERF_COUNTER_LARGE_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Private Bytes", + "Value": { + "data": 61440, + "time": 0, + "type": 65792, + "multi": 0, + "frequency": 0 + }, + "Help": "Private Bytes is the current size, in bytes, of memory that this process has allocated that cannot be shared with other processes.", + "Type": "PERF_COUNTER_LARGE_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Thread Count", + "Value": { + "data": 24, + "time": 0, + "type": 65536, + "multi": 0, + "frequency": 0 + }, + "Help": "The number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread.", + "Type": "PERF_COUNTER_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Priority Base", + "Value": { + "data": 0, + "time": 0, + "type": 65536, + "multi": 0, + "frequency": 0 + }, + "Help": "The current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process' base priority.", + "Type": "PERF_COUNTER_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Elapsed Time", + "Value": { + "data": 133707369666486855, + "time": 133716612800215149, + "type": 807666944, + "multi": 0, + "frequency": 10000000 + }, + "Help": "The total elapsed time, in seconds, that this process has been running.", + "Type": "PERF_ELAPSED_TIME", + "Algorithm": "(time0 - data0) / frequency0", + "Description": "The data collected in this counter is actually the start time of the item being measured. For display, this data is subtracted from the sample time to yield the elapsed time as the difference between the two. In the definition below, the PerfTime field of the Object contains the sample time as indicated by the PERF_OBJECT_TIMER bit and the difference is scaled by the PerfFreq of the Object to convert the time units into seconds." + }, + { + "Counter": "ID Process", + "Value": { + "data": 0, + "time": 0, + "type": 65536, + "multi": 0, + "frequency": 0 + }, + "Help": "ID Process is the unique identifier of this process. ID Process numbers are reused, so they only identify a process for the lifetime of that process.", + "Type": "PERF_COUNTER_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Creating Process ID", + "Value": { + "data": 0, + "time": 0, + "type": 65536, + "multi": 0, + "frequency": 0 + }, + "Help": "The Creating Process ID value is the Process ID of the process that created the process. The creating process may have terminated, so this value may no longer identify a running process.", + "Type": "PERF_COUNTER_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Pool Paged Bytes", + "Value": { + "data": 0, + "time": 0, + "type": 65536, + "multi": 0, + "frequency": 0 + }, + "Help": "Pool Paged Bytes is the size, in bytes, of the paged pool, an area of the system virtual memory that is used for objects that can be written to disk when they are not being used. Memory\\\\Pool Paged Bytes is calculated differently than Process\\\\Pool Paged Bytes, so it might not equal Process(_Total)\\\\Pool Paged Bytes. This counter displays the last observed value only; it is not an average.", + "Type": "PERF_COUNTER_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Pool Nonpaged Bytes", + "Value": { + "data": 272, + "time": 0, + "type": 65536, + "multi": 0, + "frequency": 0 + }, + "Help": "Pool Nonpaged Bytes is the size, in bytes, of the nonpaged pool, an area of the system virtual memory that is used for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. Memory\\\\Pool Nonpaged Bytes is calculated differently than Process\\\\Pool Nonpaged Bytes, so it might not equal Process(_Total)\\\\Pool Nonpaged Bytes. This counter displays the last observed value only; it is not an average.", + "Type": "PERF_COUNTER_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "Handle Count", + "Value": { + "data": 0, + "time": 0, + "type": 65536, + "multi": 0, + "frequency": 0 + }, + "Help": "The total number of handles currently open by this process. This number is equal to the sum of the handles currently open by each thread in this process.", + "Type": "PERF_COUNTER_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + }, + { + "Counter": "IO Read Operations/sec", + "Value": { + "data": 0, + "time": 9242655165203, + "type": 272696576, + "multi": 0, + "frequency": 10000000 + }, + "Help": "The rate at which the process is issuing read I/O operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.", + "Type": "PERF_COUNTER_BULK_COUNT", + "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)", + "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\"" + }, + { + "Counter": "IO Write Operations/sec", + "Value": { + "data": 0, + "time": 9242655165203, + "type": 272696576, + "multi": 0, + "frequency": 10000000 + }, + "Help": "The rate at which the process is issuing write I/O operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.", + "Type": "PERF_COUNTER_BULK_COUNT", + "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)", + "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\"" + }, + { + "Counter": "IO Data Operations/sec", + "Value": { + "data": 0, + "time": 9242655165203, + "type": 272696576, + "multi": 0, + "frequency": 10000000 + }, + "Help": "The rate at which the process is issuing read and write I/O operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.", + "Type": "PERF_COUNTER_BULK_COUNT", + "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)", + "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\"" + }, + { + "Counter": "IO Other Operations/sec", + "Value": { + "data": 0, + "time": 9242655165203, + "type": 272696576, + "multi": 0, + "frequency": 10000000 + }, + "Help": "The rate at which the process is issuing I/O operations that are neither read nor write operations (for example, a control function). This counter counts all I/O activity generated by the process to include file, network and device I/Os.", + "Type": "PERF_COUNTER_BULK_COUNT", + "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)", + "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\"" + }, + { + "Counter": "IO Read Bytes/sec", + "Value": { + "data": 0, + "time": 9242655165203, + "type": 272696576, + "multi": 0, + "frequency": 10000000 + }, + "Help": "The rate at which the process is reading bytes from I/O operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.", + "Type": "PERF_COUNTER_BULK_COUNT", + "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)", + "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\"" + }, + { + "Counter": "IO Write Bytes/sec", + "Value": { + "data": 0, + "time": 9242655165203, + "type": 272696576, + "multi": 0, + "frequency": 10000000 + }, + "Help": "The rate at which the process is writing bytes to I/O operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.", + "Type": "PERF_COUNTER_BULK_COUNT", + "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)", + "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\"" + }, + { + "Counter": "IO Data Bytes/sec", + "Value": { + "data": 0, + "time": 9242655165203, + "type": 272696576, + "multi": 0, + "frequency": 10000000 + }, + "Help": "The rate at which the process is reading and writing bytes in I/O operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.", + "Type": "PERF_COUNTER_BULK_COUNT", + "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)", + "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\"" + }, + { + "Counter": "IO Other Bytes/sec", + "Value": { + "data": 0, + "time": 9242655165203, + "type": 272696576, + "multi": 0, + "frequency": 10000000 + }, + "Help": "The rate at which the process is issuing bytes to I/O operations that do not involve data such as control operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.", + "Type": "PERF_COUNTER_BULK_COUNT", + "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)", + "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\"" + }, + { + "Counter": "Working Set - Private", + "Value": { + "data": 8192, + "time": 0, + "type": 65792, + "multi": 0, + "frequency": 0 + }, + "Help": "Working Set - Private displays the size of the working set, in bytes, that is use for this process only and not shared nor sharable by other processes.", + "Type": "PERF_COUNTER_LARGE_RAWCOUNT", + "Algorithm": "data0", + "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix." + } + ] + }, + */ + + +#if defined(OS_WINDOWS) + +#include +#include +#include +#include +#include + +WCHAR* GetProcessCommandLine(HANDLE hProcess); + +struct perflib_data { + PERF_DATA_BLOCK *pDataBlock; + PERF_OBJECT_TYPE *pObjectType; + PERF_INSTANCE_DEFINITION *pi; + DWORD pid; +}; + +void apps_os_init_windows(void) { + PerflibNamesRegistryInitialize(); + + if(!EnableWindowsPrivilege(SE_DEBUG_NAME)) + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to enable %s privilege", SE_DEBUG_NAME); + + if(!EnableWindowsPrivilege(SE_SYSTEM_PROFILE_NAME)) + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to enable %s privilege", SE_SYSTEM_PROFILE_NAME); + + if(!EnableWindowsPrivilege(SE_PROF_SINGLE_PROCESS_NAME)) + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to enable %s privilege", SE_PROF_SINGLE_PROCESS_NAME); +} + +uint64_t apps_os_get_total_memory_windows(void) { + MEMORYSTATUSEX memStat = { 0 }; + memStat.dwLength = sizeof(memStat); + + if (!GlobalMemoryStatusEx(&memStat)) { + netdata_log_error("GlobalMemoryStatusEx() failed."); + return 0; + } + + return memStat.ullTotalPhys; +} + +// remove the PID suffix and .exe suffix, if any +static void fix_windows_comm(struct pid_stat *p, char *comm) { + char pid[UINT64_MAX_LENGTH + 1]; // +1 for the underscore + pid[0] = '_'; + print_uint64(&pid[1], p->pid); + size_t pid_len = strlen(pid); + size_t comm_len = strlen(comm); + if (pid_len < comm_len) { + char *compare = &comm[comm_len - pid_len]; + if (strcmp(pid, compare) == 0) + *compare = '\0'; + } + + // remove the .exe suffix, if any + comm_len = strlen(comm); + size_t exe_len = strlen(".exe"); + if(exe_len < comm_len) { + char *compare = &comm[comm_len - exe_len]; + if (strcmp(".exe", compare) == 0) + *compare = '\0'; + } +} + +// Convert wide string to UTF-8 +static char *wchar_to_utf8(WCHAR *s) { + static __thread char utf8[PATH_MAX]; + static __thread int utf8_size = sizeof(utf8); + + int len = WideCharToMultiByte(CP_UTF8, 0, s, -1, NULL, 0, NULL, NULL); + if (len <= 0 || len >= utf8_size) + return NULL; + + WideCharToMultiByte(CP_UTF8, 0, s, -1, utf8, utf8_size, NULL, NULL); + return utf8; +} + +static char *ansi_to_utf8(LPCSTR str) { + static __thread WCHAR unicode[PATH_MAX]; + + // Step 1: Convert ANSI string (LPSTR) to wide string (UTF-16) + size_t count = any_to_utf16(CP_ACP, unicode, _countof(unicode), str, -1, NULL); + if (!count) return NULL; + + return wchar_to_utf8(unicode); +} + +// -------------------------------------------------------------------------------------------------------------------- + +// return a sanitized name for the process +STRING *GetProcessFriendlyNameFromPathSanitized(WCHAR *path) { + static __thread uint8_t void_buf[1024 * 1024]; + static __thread DWORD void_buf_size = sizeof(void_buf); + static __thread wchar_t unicode[PATH_MAX]; + static __thread DWORD unicode_size = sizeof(unicode) / sizeof(*unicode); + + DWORD handle; + DWORD size = GetFileVersionInfoSizeW(path, &handle); + if (size == 0 || size > void_buf_size) + return FALSE; + + if (GetFileVersionInfoW(path, handle, size, void_buf)) { + LPWSTR value = NULL; + UINT len = 0; + if (VerQueryValueW(void_buf, L"\\StringFileInfo\\040904B0\\FileDescription", (LPVOID*)&value, &len) && + len > 0 && len < unicode_size) { + wcsncpy(unicode, value, unicode_size - 1); + unicode[unicode_size - 1] = L'\0'; + char *name = wchar_to_utf8(unicode); + sanitize_apps_plugin_chart_meta(name); + return string_strdupz(name); + } + } + + return NULL; +} + +#define SERVICE_PREFIX "Service " +// return a sanitized name for the process +static STRING *GetNameFromCmdlineSanitized(struct pid_stat *p) { + if(!p->cmdline) return NULL; + + char buf[string_strlen(p->cmdline) + 1]; + memcpy(buf, string2str(p->cmdline), sizeof(buf)); + char *words[100]; + size_t num_words = quoted_strings_splitter(buf, words, 100, isspace_map_pluginsd); + + if(string_strcmp(p->comm, "svchost") == 0) { + // find -s SERVICE in the command line + for(size_t i = 0; i < num_words ;i++) { + if(strcmp(words[i], "-s") == 0 && i + 1 < num_words) { + char service[strlen(words[i + 1]) + sizeof(SERVICE_PREFIX)]; // sizeof() includes a null + strcpy(service, SERVICE_PREFIX); + strcpy(&service[sizeof(SERVICE_PREFIX) - 1], words[i + 1]); + sanitize_apps_plugin_chart_meta(service); + return string_strdupz(service); + } + } + } + + return NULL; +} + +static void GetServiceNames(void) { + SC_HANDLE hSCManager = OpenSCManager(NULL, NULL, SC_MANAGER_ENUMERATE_SERVICE); + if (hSCManager == NULL) return; + + DWORD dwBytesNeeded = 0, dwServicesReturned = 0, dwResumeHandle = 0; + ENUM_SERVICE_STATUS_PROCESS *pServiceStatus = NULL; + + // First, query the required buffer size + EnumServicesStatusEx( + hSCManager, SC_ENUM_PROCESS_INFO, SERVICE_WIN32, SERVICE_STATE_ALL, + NULL, 0, &dwBytesNeeded, &dwServicesReturned, &dwResumeHandle, NULL); + + if (dwBytesNeeded == 0) { + CloseServiceHandle(hSCManager); + return; + } + + // Allocate memory to hold the services + pServiceStatus = mallocz(dwBytesNeeded); + + // Now, retrieve the list of services + if (!EnumServicesStatusEx( + hSCManager, SC_ENUM_PROCESS_INFO, SERVICE_WIN32, SERVICE_STATE_ALL, + (LPBYTE)pServiceStatus, dwBytesNeeded, &dwBytesNeeded, &dwServicesReturned, + &dwResumeHandle, NULL)) { + freez(pServiceStatus); + CloseServiceHandle(hSCManager); + return; + } + + // Loop through the services + for (DWORD i = 0; i < dwServicesReturned; i++) { + if(!pServiceStatus[i].lpDisplayName || !*pServiceStatus[i].lpDisplayName) + continue; + + struct pid_stat *p = find_pid_entry((pid_t)pServiceStatus[i].ServiceStatusProcess.dwProcessId); + if(p && !p->got_service) { + p->got_service = true; + + char *name = ansi_to_utf8(pServiceStatus[i].lpDisplayName); + if(name) { + sanitize_apps_plugin_chart_meta(name); + string_freez(p->name); + p->name = string_strdupz(name); + } + } + } + + free(pServiceStatus); + CloseServiceHandle(hSCManager); +} + +static WCHAR *executable_path_from_cmdline(WCHAR *cmdline) { + if (!cmdline || !*cmdline) return NULL; + + WCHAR *exe_path_start = cmdline; + WCHAR *exe_path_end = NULL; + + if (cmdline[0] == L'"') { + // Command line starts with a double quote + exe_path_start++; // Move past the first double quote + exe_path_end = wcschr(exe_path_start, L'"'); // Find the next quote + } + else { + // Command line does not start with a double quote + exe_path_end = wcschr(exe_path_start, L' '); // Find the first space + } + + if (exe_path_end) { + // Null-terminate the string at the end of the executable path + *exe_path_end = L'\0'; + return exe_path_start; + } + + return NULL; +} + +static BOOL GetProcessUserSID(HANDLE hProcess, PSID *ppSid) { + HANDLE hToken; + BOOL result = FALSE; + DWORD dwSize = 0; + PTOKEN_USER pTokenUser = NULL; + + if (!OpenProcessToken(hProcess, TOKEN_QUERY, &hToken)) + return FALSE; + + GetTokenInformation(hToken, TokenUser, NULL, 0, &dwSize); + if (dwSize == 0) { + CloseHandle(hToken); + return FALSE; + } + + pTokenUser = (PTOKEN_USER)LocalAlloc(LPTR, dwSize); + if (pTokenUser == NULL) { + CloseHandle(hToken); + return FALSE; + } + + if (GetTokenInformation(hToken, TokenUser, pTokenUser, dwSize, &dwSize)) { + DWORD sidSize = GetLengthSid(pTokenUser->User.Sid); + *ppSid = (PSID)LocalAlloc(LPTR, sidSize); + if (*ppSid) { + if (CopySid(sidSize, *ppSid, pTokenUser->User.Sid)) { + result = TRUE; + } else { + LocalFree(*ppSid); + *ppSid = NULL; + } + } + } + + LocalFree(pTokenUser); + CloseHandle(hToken); + return result; +} + +void GetAllProcessesInfo(void) { + static __thread wchar_t unicode[PATH_MAX]; + static __thread DWORD unicode_size = sizeof(unicode) / sizeof(*unicode); + + calls_counter++; + + HANDLE hSnapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0); + if (hSnapshot == INVALID_HANDLE_VALUE) return; + + PROCESSENTRY32W pe32; + pe32.dwSize = sizeof(PROCESSENTRY32W); + + if (!Process32FirstW(hSnapshot, &pe32)) { + CloseHandle(hSnapshot); + return; + } + + bool need_service_names = false; + + do { + if(!pe32.th32ProcessID) continue; + + struct pid_stat *p = get_or_allocate_pid_entry((pid_t)pe32.th32ProcessID); + p->ppid = (pid_t)pe32.th32ParentProcessID; + if(p->got_info) continue; + p->got_info = true; + + HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, p->pid); + if (hProcess == NULL) + continue; + + // Get the full command line, if possible + { + WCHAR *cmdline = GetProcessCommandLine(hProcess); // returns malloc'd buffer + if (cmdline) { + update_pid_cmdline(p, wchar_to_utf8(cmdline)); + + // extract the process full path from the command line + WCHAR *path = executable_path_from_cmdline(cmdline); + if(path) { + string_freez(p->name); + p->name = GetProcessFriendlyNameFromPathSanitized(path); + } + + free(cmdline); // free(), not freez() + } + } + + if(!p->cmdline || !p->name) { + if (QueryFullProcessImageNameW(hProcess, 0, unicode, &unicode_size)) { + // put the full path name to the command into cmdline + if(!p->cmdline) + update_pid_cmdline(p, wchar_to_utf8(unicode)); + + if(!p->name) + p->name = GetProcessFriendlyNameFromPathSanitized(unicode); + } + } + + if(!p->sid_name) { + PSID pSid = NULL; + if (GetProcessUserSID(hProcess, &pSid)) + p->sid_name = cached_sid_fullname_or_sid_str(pSid); + else + p->sid_name = string_strdupz("Unknown"); + } + + CloseHandle(hProcess); + + char *comm = wchar_to_utf8(pe32.szExeFile); + fix_windows_comm(p, comm); + update_pid_comm(p, comm); // will sanitize p->comm + + if(!need_service_names && string_strcmp(p->comm, "svchost") == 0) + need_service_names = true; + + STRING *better_name = GetNameFromCmdlineSanitized(p); + if(better_name) { + string_freez(p->name); + p->name = better_name; + } + + } while (Process32NextW(hSnapshot, &pe32)); + + CloseHandle(hSnapshot); + + if(need_service_names) + GetServiceNames(); +} + +static inline kernel_uint_t perflib_cpu_utilization(COUNTER_DATA *d) { + internal_fatal(d->current.CounterType != PERF_100NSEC_TIMER, + "Wrong timer type"); + + ULONGLONG data1 = d->current.Data; + ULONGLONG data0 = d->previous.Data; + LONGLONG time1 = d->current.Time; + LONGLONG time0 = d->previous.Time; + + /* + * The Windows documentation provides the formula for percentage: + * + * 100 * (data1 - data0) / (time1 - time0) + * + * To get a fraction (0.0 to 1.0) instead of a percentage, we + * simply remove the 100 multiplier: + * + * (data1 - data0) / (time1 - time0) + * + * This fraction represents the portion of a single CPU core used + * over the time period. Multiplying this fraction by NSEC_PER_SEC + * converts it to nanosecond-cores: + * + * NSEC_PER_SEC * (data1 - data0) / (time1 - time0) + */ + + LONGLONG dt = time1 - time0; + if(dt > 0) + return NSEC_PER_SEC * (data1 - data0) / dt; + else + return 0; +} + +static inline kernel_uint_t perflib_rate(COUNTER_DATA *d) { + ULONGLONG data1 = d->current.Data; + ULONGLONG data0 = d->previous.Data; + LONGLONG time1 = d->current.Time; + LONGLONG time0 = d->previous.Time; + + LONGLONG dt = (time1 - time0); + if(dt > 0) + return (RATES_DETAIL * (data1 - data0)) / dt; + else + return 0; +} + +static inline kernel_uint_t perflib_value(COUNTER_DATA *d) { + internal_fatal(d->current.CounterType != PERF_COUNTER_LARGE_RAWCOUNT && + d->current.CounterType != PERF_COUNTER_RAWCOUNT, + "Wrong gauge type"); + + return d->current.Data; +} + +static inline kernel_uint_t perflib_elapsed(COUNTER_DATA *d) { + ULONGLONG data1 = d->current.Data; + LONGLONG time1 = d->current.Time; + LONGLONG freq1 = d->current.Frequency; + + internal_fatal(d->current.CounterType != PERF_ELAPSED_TIME || !freq1, + "Wrong gauge type"); + + if(!data1 || !time1 || !freq1 || data1 > (ULONGLONG)time1) + return 0; + + return (time1 - data1) / freq1; +} + +bool apps_os_collect_all_pids_windows(void) { + calls_counter++; + + struct perflib_data d = { 0 }; + d.pDataBlock = perflibGetPerformanceData(RegistryFindIDByName("Process")); + if(!d.pDataBlock) return false; + + d.pObjectType = perflibFindObjectTypeByName(d.pDataBlock, "Process"); + if(!d.pObjectType) { + perflibFreePerformanceData(); + return false; + } + + // we need these outside the loop to avoid searching by name all the time + // (our perflib library caches the id inside the COUNTER_DATA). + COUNTER_DATA processId = {.key = "ID Process"}; + + d.pi = NULL; + size_t added = 0; + for(LONG i = 0; i < d.pObjectType->NumInstances; i++) { + d.pi = perflibForEachInstance(d.pDataBlock, d.pObjectType, d.pi); + if (!d.pi) break; + + perflibGetInstanceCounter(d.pDataBlock, d.pObjectType, d.pi, &processId); + d.pid = (DWORD) processId.current.Data; + if (d.pid <= 0) continue; // 0 = Idle (this takes all the spare resources) + + // Get or create pid_stat structure + struct pid_stat *p = get_or_allocate_pid_entry((pid_t) d.pid); + + if (unlikely(!p->initialized)) { + // a new pid + p->initialized = true; + + static __thread char comm[MAX_PATH]; + + if (getInstanceName(d.pDataBlock, d.pObjectType, d.pi, comm, sizeof(comm))) + fix_windows_comm(p, comm); + else + strncpyz(comm, "unknown", sizeof(comm) - 1); + + if(strcmp(comm, "wininit") == 0) + INIT_PID = p->pid; + + update_pid_comm(p, comm); // will sanitize p->comm + added++; + + COUNTER_DATA ppid = {.key = "Creating Process ID"}; + perflibGetInstanceCounter(d.pDataBlock, d.pObjectType, d.pi, &ppid); + p->ppid = (pid_t) ppid.current.Data; + + p->perflib[PDF_UTIME].key = "% User Time"; + p->perflib[PDF_STIME].key = "% Privileged Time"; + p->perflib[PDF_VMSIZE].key = "Virtual Bytes"; + p->perflib[PDF_VMRSS].key = "Working Set"; + p->perflib[PDF_VMSWAP].key = "Page File Bytes"; + p->perflib[PDF_LREAD].key = "IO Read Bytes/sec"; + p->perflib[PDF_LWRITE].key = "IO Write Bytes/sec"; + p->perflib[PDF_OREAD].key = "IO Read Operations/sec"; + p->perflib[PDF_OWRITE].key = "IO Write Operations/sec"; + p->perflib[PDF_THREADS].key = "Thread Count"; + p->perflib[PDF_HANDLES].key = "Handle Count"; + p->perflib[PDF_MINFLT].key = "Page Faults/sec"; + p->perflib[PDF_UPTIME].key = "Elapsed Time"; + } + + pid_collection_started(p); + + // get all data from perflib + size_t ok = 0, failed = 0, invalid = 0; + for (PID_FIELD f = 0; f < PDF_MAX; f++) { + if (p->perflib[f].key) { + if (!perflibGetInstanceCounter(d.pDataBlock, d.pObjectType, d.pi, &p->perflib[f])) { + failed++; + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot find field '%s' in processes data", p->perflib[f].key); + } else + ok++; + } else + invalid++; + } + + if(failed) { + pid_collection_failed(p); + continue; + } + + // CPU time + p->values[PDF_UTIME] = perflib_cpu_utilization(&p->perflib[PDF_UTIME]); + p->values[PDF_STIME] = perflib_cpu_utilization(&p->perflib[PDF_STIME]); + + // Memory + p->values[PDF_VMRSS] = perflib_value(&p->perflib[PDF_VMRSS]); + p->values[PDF_VMSIZE] = perflib_value(&p->perflib[PDF_VMSIZE]); + p->values[PDF_VMSWAP] = perflib_value(&p->perflib[PDF_VMSWAP]); + + // I/O + p->values[PDF_LREAD] = perflib_rate(&p->perflib[PDF_LREAD]); + p->values[PDF_LWRITE] = perflib_rate(&p->perflib[PDF_LWRITE]); + p->values[PDF_OREAD] = perflib_rate(&p->perflib[PDF_OREAD]); + p->values[PDF_OWRITE] = perflib_rate(&p->perflib[PDF_OWRITE]); + + // Threads + p->values[PDF_THREADS] = perflib_value(&p->perflib[PDF_THREADS]); + + // Handle count + p->values[PDF_HANDLES] = perflib_value(&p->perflib[PDF_HANDLES]); + + // Page faults + // Windows doesn't distinguish between minor and major page faults + p->values[PDF_MINFLT] = perflib_rate(&p->perflib[PDF_MINFLT]); + + // Process uptime + // Convert 100-nanosecond units to seconds + p->values[PDF_UPTIME] = perflib_elapsed(&p->perflib[PDF_UPTIME]); + + pid_collection_completed(p); + +// if(p->perflib[PDF_UTIME].current.Data != p->perflib[PDF_UTIME].previous.Data && +// p->perflib[PDF_UTIME].current.Data && p->perflib[PDF_UTIME].previous.Data && +// p->pid == 61812) { +// const char *cmd = string2str(p->comm); +// uint64_t cpu_divisor = NSEC_PER_SEC / 100ULL; +// uint64_t cpus = os_get_system_cpus(); +// double u = (double)p->values[PDF_UTIME] / cpu_divisor; +// double s = (double)p->values[PDF_STIME] / cpu_divisor; +// int x = 0; +// x++; +// } + } + + perflibFreePerformanceData(); + + if(added) { + GetAllProcessesInfo(); + +#if (USE_APPS_GROUPS_CONF == 1) + for(struct pid_stat *p = root_of_pids(); p ;p = p->next) { + if(!p->assigned_to_target) + assign_app_group_target_to_pid(p); + } +#endif + } + + return true; +} + +#endif diff --git a/src/collectors/apps.plugin/apps_os_windows_nt.c b/src/collectors/apps.plugin/apps_os_windows_nt.c new file mode 100644 index 000000000..ff47cbcab --- /dev/null +++ b/src/collectors/apps.plugin/apps_os_windows_nt.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +// this must not include libnetdata.h because STRING is defined in winternl.h + +#include "libnetdata/common.h" + +#if defined(OS_WINDOWS) +#include + +// -------------------------------------------------------------------------------------------------------------------- +// Get the full windows command line + +WCHAR* GetProcessCommandLine(HANDLE hProcess) { + PROCESS_BASIC_INFORMATION pbi; + ULONG len; + NTSTATUS status = NtQueryInformationProcess(hProcess, 0, &pbi, sizeof(pbi), &len); + if (status != 0) + return NULL; + + // The rest of the function remains the same as before + PEB peb; + if (!ReadProcessMemory(hProcess, pbi.PebBaseAddress, &peb, sizeof(peb), NULL)) + return NULL; + + RTL_USER_PROCESS_PARAMETERS procParams; + if (!ReadProcessMemory(hProcess, peb.ProcessParameters, &procParams, sizeof(procParams), NULL)) + return NULL; + + WCHAR* commandLine = (WCHAR*)malloc(procParams.CommandLine.MaximumLength); + if (!commandLine) + return NULL; + + if (!ReadProcessMemory(hProcess, procParams.CommandLine.Buffer, commandLine, procParams.CommandLine.MaximumLength, NULL)) { + free(commandLine); + return NULL; + } + + return commandLine; +} + +#endif diff --git a/src/collectors/apps.plugin/apps_output.c b/src/collectors/apps.plugin/apps_output.c index 84928e641..b9ee5252a 100644 --- a/src/collectors/apps.plugin/apps_output.c +++ b/src/collectors/apps.plugin/apps_output.c @@ -76,7 +76,7 @@ void send_resource_usage_to_netdata(usec_t dt) { "SET inode_changes = %zu\n" "SET link_changes = %zu\n" "SET pids = %zu\n" - "SET fds = %d\n" + "SET fds = %"PRIu32"\n" "SET targets = %zu\n" "SET new_pids = %zu\n" "END\n" @@ -89,8 +89,8 @@ void send_resource_usage_to_netdata(usec_t dt) { , filenames_allocated_counter , inodes_changed_counter , links_changed_counter - , all_pids_count - , all_files_len + , all_pids_count() + , all_files_len_get() , apps_groups_targets_count , targets_assignment_counter ); @@ -103,103 +103,124 @@ void send_collected_data_to_netdata(struct target *root, const char *type, usec_ if (unlikely(!w->exposed)) continue; - send_BEGIN(type, w->clean_name, "processes", dt); - send_SET("processes", w->processes); + send_BEGIN(type, string2str(w->clean_name), "processes", dt); + send_SET("processes", w->values[PDF_PROCESSES]); send_END(); - send_BEGIN(type, w->clean_name, "threads", dt); - send_SET("threads", w->num_threads); + send_BEGIN(type, string2str(w->clean_name), "threads", dt); + send_SET("threads", w->values[PDF_THREADS]); send_END(); - if (unlikely(!w->processes && !w->is_other)) + if (unlikely(!w->values[PDF_PROCESSES])) continue; - send_BEGIN(type, w->clean_name, "cpu_utilization", dt); - send_SET("user", (kernel_uint_t)(w->utime * utime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cutime * cutime_fix_ratio)) : 0ULL)); - send_SET("system", (kernel_uint_t)(w->stime * stime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cstime * cstime_fix_ratio)) : 0ULL)); +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME) + send_BEGIN(type, string2str(w->clean_name), "cpu_utilization", dt); + send_SET("user", (kernel_uint_t)(w->values[PDF_UTIME] * utime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->values[PDF_CUTIME] * cutime_fix_ratio)) : 0ULL)); + send_SET("system", (kernel_uint_t)(w->values[PDF_STIME] * stime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->values[PDF_CSTIME] * cstime_fix_ratio)) : 0ULL)); send_END(); +#else + send_BEGIN(type, string2str(w->clean_name), "cpu_utilization", dt); + send_SET("user", (kernel_uint_t)(w->values[PDF_UTIME] * utime_fix_ratio)); + send_SET("system", (kernel_uint_t)(w->values[PDF_STIME] * stime_fix_ratio)); + send_END(); +#endif -#if !defined(__FreeBSD__) && !defined(__APPLE__) +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) if (enable_guest_charts) { - send_BEGIN(type, w->clean_name, "cpu_guest_utilization", dt); - send_SET("guest", (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cgtime * cgtime_fix_ratio)) : 0ULL)); + send_BEGIN(type, string2str(w->clean_name), "cpu_guest_utilization", dt); + send_SET("guest", (kernel_uint_t)(w->values[PDF_GTIME] * gtime_fix_ratio) +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) + + (include_exited_childs ? ((kernel_uint_t)(w->values[PDF_CGTIME] * cgtime_fix_ratio)) : 0ULL) +#endif + ); send_END(); } +#endif - send_BEGIN(type, w->clean_name, "cpu_context_switches", dt); - send_SET("voluntary", w->status_voluntary_ctxt_switches); - send_SET("involuntary", w->status_nonvoluntary_ctxt_switches); + send_BEGIN(type, string2str(w->clean_name), "mem_private_usage", dt); +#if (PROCESSES_HAVE_VMSHARED == 1) + send_SET("mem", (w->values[PDF_VMRSS] > w->values[PDF_VMSHARED])?(w->values[PDF_VMRSS] - w->values[PDF_VMSHARED]) : 0ULL); +#else + send_SET("mem", w->values[PDF_VMRSS]); +#endif send_END(); - send_BEGIN(type, w->clean_name, "mem_private_usage", dt); - send_SET("mem", (w->status_vmrss > w->status_vmshared)?(w->status_vmrss - w->status_vmshared) : 0ULL); +#if (PROCESSES_HAVE_VOLCTX == 1) || (PROCESSES_HAVE_NVOLCTX == 1) + send_BEGIN(type, string2str(w->clean_name), "cpu_context_switches", dt); +#if (PROCESSES_HAVE_VOLCTX == 1) + send_SET("voluntary", w->values[PDF_VOLCTX]); +#endif +#if (PROCESSES_HAVE_NVOLCTX == 1) + send_SET("involuntary", w->values[PDF_NVOLCTX]); +#endif send_END(); #endif - send_BEGIN(type, w->clean_name, "mem_usage", dt); - send_SET("rss", w->status_vmrss); + send_BEGIN(type, string2str(w->clean_name), "mem_usage", dt); + send_SET("rss", w->values[PDF_VMRSS]); send_END(); -#if !defined(__APPLE__) - send_BEGIN(type, w->clean_name, "vmem_usage", dt); - send_SET("vmem", w->status_vmsize); + send_BEGIN(type, string2str(w->clean_name), "vmem_usage", dt); + send_SET("vmem", w->values[PDF_VMSIZE]); send_END(); -#endif - send_BEGIN(type, w->clean_name, "mem_page_faults", dt); - send_SET("minor", (kernel_uint_t)(w->minflt * minflt_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cminflt * cminflt_fix_ratio)) : 0ULL)); - send_SET("major", (kernel_uint_t)(w->majflt * majflt_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cmajflt * cmajflt_fix_ratio)) : 0ULL)); + send_BEGIN(type, string2str(w->clean_name), "mem_page_faults", dt); + send_SET("minor", (kernel_uint_t)(w->values[PDF_MINFLT] * minflt_fix_ratio) +#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) + + (include_exited_childs ? ((kernel_uint_t)(w->values[PDF_CMINFLT] * cminflt_fix_ratio)) : 0ULL) +#endif + ); +#if (PROCESSES_HAVE_MAJFLT == 1) + send_SET("major", (kernel_uint_t)(w->values[PDF_MAJFLT] * majflt_fix_ratio) +#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) + + (include_exited_childs ? ((kernel_uint_t)(w->values[PDF_CMAJFLT] * cmajflt_fix_ratio)) : 0ULL) +#endif + ); +#endif send_END(); -#if !defined(__FreeBSD__) && !defined(__APPLE__) - send_BEGIN(type, w->clean_name, "swap_usage", dt); - send_SET("swap", w->status_vmswap); +#if (PROCESSES_HAVE_VMSWAP == 1) + send_BEGIN(type, string2str(w->clean_name), "swap_usage", dt); + send_SET("swap", w->values[PDF_VMSWAP]); send_END(); #endif - if (w->processes == 0) { - send_BEGIN(type, w->clean_name, "uptime", dt); - send_SET("uptime", 0); - send_END(); + send_BEGIN(type, string2str(w->clean_name), "uptime", dt); + send_SET("uptime", w->uptime_max); + send_END(); - if (enable_detailed_uptime_charts) { - send_BEGIN(type, w->clean_name, "uptime_summary", dt); - send_SET("min", 0); - send_SET("avg", 0); - send_SET("max", 0); - send_END(); - } - } else { - send_BEGIN(type, w->clean_name, "uptime", dt); - send_SET("uptime", w->uptime_max); + if (enable_detailed_uptime_charts) { + send_BEGIN(type, string2str(w->clean_name), "uptime_summary", dt); + send_SET("min", w->uptime_min); + send_SET("avg", w->values[PDF_PROCESSES] > 0 ? w->values[PDF_UPTIME] / w->values[PDF_PROCESSES] : 0); + send_SET("max", w->uptime_max); send_END(); - - if (enable_detailed_uptime_charts) { - send_BEGIN(type, w->clean_name, "uptime_summary", dt); - send_SET("min", w->uptime_min); - send_SET("avg", w->processes > 0 ? w->uptime_sum / w->processes : 0); - send_SET("max", w->uptime_max); - send_END(); - } } - send_BEGIN(type, w->clean_name, "disk_physical_io", dt); - send_SET("reads", w->io_storage_bytes_read); - send_SET("writes", w->io_storage_bytes_written); +#if (PROCESSES_HAVE_PHYSICAL_IO == 1) + send_BEGIN(type, string2str(w->clean_name), "disk_physical_io", dt); + send_SET("reads", w->values[PDF_PREAD]); + send_SET("writes", w->values[PDF_PWRITE]); send_END(); +#endif -#if !defined(__FreeBSD__) && !defined(__APPLE__) - send_BEGIN(type, w->clean_name, "disk_logical_io", dt); - send_SET("reads", w->io_logical_bytes_read); - send_SET("writes", w->io_logical_bytes_written); +#if (PROCESSES_HAVE_LOGICAL_IO == 1) + send_BEGIN(type, string2str(w->clean_name), "disk_logical_io", dt); + send_SET("reads", w->values[PDF_LREAD]); + send_SET("writes", w->values[PDF_LWRITE]); send_END(); #endif + if (enable_file_charts) { - send_BEGIN(type, w->clean_name, "fds_open_limit", dt); +#if (PROCESSES_HAVE_FDS == 1) + send_BEGIN(type, string2str(w->clean_name), "fds_open_limit", dt); send_SET("limit", w->max_open_files_percent * 100.0); send_END(); +#endif - send_BEGIN(type, w->clean_name, "fds_open", dt); + send_BEGIN(type, string2str(w->clean_name), "fds_open", dt); +#if (PROCESSES_HAVE_FDS == 1) send_SET("files", w->openfds.files); send_SET("sockets", w->openfds.sockets); send_SET("pipes", w->openfds.sockets); @@ -209,6 +230,10 @@ void send_collected_data_to_netdata(struct target *root, const char *type, usec_ send_SET("signal", w->openfds.signalfds); send_SET("eventpolls", w->openfds.eventpolls); send_SET("other", w->openfds.other); +#endif +#if (PROCESSES_HAVE_HANDLES == 1) + send_SET("handles", w->values[PDF_HANDLES]); +#endif send_END(); } } @@ -218,137 +243,166 @@ void send_collected_data_to_netdata(struct target *root, const char *type, usec_ // ---------------------------------------------------------------------------- // generate the charts +static void send_file_charts_to_netdata(struct target *w, const char *type, const char *lbl_name, const char *title, bool obsolete) { +#if (PROCESSES_HAVE_FDS == 1) + fprintf(stdout, "CHART %s.%s_fds_open_limit '' '%s open file descriptors limit' '%%' fds %s.fds_open_limit line 20200 %d %s\n", + type, string2str(w->clean_name), title, type, update_every, obsolete ? "obsolete" : ""); + + if(!obsolete) { + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); + fprintf(stdout, "CLABEL_COMMIT\n"); + fprintf(stdout, "DIMENSION limit '' absolute 1 100\n"); + } +#endif + +#if (PROCESSES_HAVE_FDS == 1) || (PROCESSES_HAVE_HANDLES == 1) + fprintf(stdout, "CHART %s.%s_fds_open '' '%s open files descriptors' 'fds' fds %s.fds_open stacked 20210 %d %s\n", + type, string2str(w->clean_name), title, type, update_every, obsolete ? "obsolete" : ""); + + if(!obsolete) { + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); + fprintf(stdout, "CLABEL_COMMIT\n"); +#if (PROCESSES_HAVE_FDS == 1) + fprintf(stdout, "DIMENSION files '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION sockets '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION pipes '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION inotifies '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION event '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION timer '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION signal '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION eventpolls '' absolute 1 1\n"); + fprintf(stdout, "DIMENSION other '' absolute 1 1\n"); +#endif // PROCESSES_HAVE_FDS +#if (PROCESSES_HAVE_HANDLES == 1) + fprintf(stdout, "DIMENSION handles '' absolute 1 1\n"); +#endif // PROCESSES_HAVE_HANDLES + } +#endif // PROCESSES_HAVE_FDS || PROCESSES_HAVE_HANDLES +} + void send_charts_updates_to_netdata(struct target *root, const char *type, const char *lbl_name, const char *title) { struct target *w; - if (debug_enabled) { - for (w = root; w; w = w->next) { - if (unlikely(!w->target && w->processes)) { - struct pid_on_target *pid_on_target; - fprintf(stderr, "apps.plugin: target '%s' has aggregated %u process(es):", w->name, w->processes); - for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) { - fprintf(stderr, " %d", pid_on_target->pid); - } - fputc('\n', stderr); - } - } - } + bool disable_file_charts_on_this_run = obsolete_file_charts; + obsolete_file_charts = false; for (w = root; w; w = w->next) { - if (likely(w->exposed || (!w->processes && !w->is_other))) + if (likely(w->exposed || (!w->values[PDF_PROCESSES]))) { + if(w->exposed && disable_file_charts_on_this_run) + send_file_charts_to_netdata(w, type, lbl_name, title, true); continue; + } - w->exposed = 1; + w->exposed = true; - fprintf(stdout, "CHART %s.%s_cpu_utilization '' '%s CPU utilization (100%% = 1 core)' 'percentage' cpu %s.cpu_utilization stacked 20001 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CHART %s.%s_cpu_utilization '' '%s CPU utilization (100%% = 1 core)' 'percentage' cpu %s.cpu_utilization stacked 20001 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); - fprintf(stdout, "DIMENSION user '' absolute 1 %llu\n", time_factor * RATES_DETAIL / 100LLU); - fprintf(stdout, "DIMENSION system '' absolute 1 %llu\n", time_factor * RATES_DETAIL / 100LLU); + fprintf(stdout, "DIMENSION user '' absolute 1 %llu\n", NSEC_PER_SEC / 100ULL); + fprintf(stdout, "DIMENSION system '' absolute 1 %llu\n", NSEC_PER_SEC / 100ULL); -#if !defined(__FreeBSD__) && !defined(__APPLE__) +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) if (enable_guest_charts) { - fprintf(stdout, "CHART %s.%s_cpu_guest_utilization '' '%s CPU guest utlization (100%% = 1 core)' 'percentage' cpu %s.cpu_guest_utilization line 20005 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CHART %s.%s_cpu_guest_utilization '' '%s CPU guest utlization (100%% = 1 core)' 'percentage' cpu %s.cpu_guest_utilization line 20005 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); - fprintf(stdout, "DIMENSION guest '' absolute 1 %llu\n", time_factor * RATES_DETAIL / 100LLU); + fprintf(stdout, "DIMENSION guest '' absolute 1 %llu\n", NSEC_PER_SEC / 100ULL); } +#endif - fprintf(stdout, "CHART %s.%s_cpu_context_switches '' '%s CPU context switches' 'switches/s' cpu %s.cpu_context_switches stacked 20010 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CHART %s.%s_mem_private_usage '' '%s memory usage without shared' 'MiB' mem %s.mem_private_usage area 20050 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); - fprintf(stdout, "DIMENSION voluntary '' absolute 1 %llu\n", RATES_DETAIL); - fprintf(stdout, "DIMENSION involuntary '' absolute 1 %llu\n", RATES_DETAIL); + fprintf(stdout, "DIMENSION mem '' absolute %ld %ld\n", 1L, 1024L * 1024L); - fprintf(stdout, "CHART %s.%s_mem_private_usage '' '%s memory usage without shared' 'MiB' mem %s.mem_private_usage area 20050 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); +#if (PROCESSES_HAVE_VOLCTX == 1) || (PROCESSES_HAVE_NVOLCTX == 1) + fprintf(stdout, "CHART %s.%s_cpu_context_switches '' '%s CPU context switches' 'switches/s' cpu %s.cpu_context_switches stacked 20010 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); - fprintf(stdout, "DIMENSION mem '' absolute %ld %ld\n", 1L, 1024L); +#if (PROCESSES_HAVE_VOLCTX == 1) + fprintf(stdout, "DIMENSION voluntary '' absolute 1 %llu\n", RATES_DETAIL); +#endif +#if (PROCESSES_HAVE_NVOLCTX == 1) + fprintf(stdout, "DIMENSION involuntary '' absolute 1 %llu\n", RATES_DETAIL); +#endif #endif - fprintf(stdout, "CHART %s.%s_mem_usage '' '%s memory RSS usage' 'MiB' mem %s.mem_usage area 20055 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CHART %s.%s_mem_usage '' '%s memory RSS usage' 'MiB' mem %s.mem_usage area 20055 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); - fprintf(stdout, "DIMENSION rss '' absolute %ld %ld\n", 1L, 1024L); + fprintf(stdout, "DIMENSION rss '' absolute %ld %ld\n", 1L, 1024L * 1024L); -#if !defined(__APPLE__) - fprintf(stdout, "CHART %s.%s_vmem_usage '' '%s virtual memory size' 'MiB' mem %s.vmem_usage line 20065 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CHART %s.%s_vmem_usage '' '%s virtual memory size' 'MiB' mem %s.vmem_usage line 20065 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); - fprintf(stdout, "DIMENSION vmem '' absolute %ld %ld\n", 1L, 1024L); -#endif + fprintf(stdout, "DIMENSION vmem '' absolute %ld %ld\n", 1L, 1024L * 1024L); - fprintf(stdout, "CHART %s.%s_mem_page_faults '' '%s memory page faults' 'pgfaults/s' mem %s.mem_page_faults stacked 20060 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CHART %s.%s_mem_page_faults '' '%s memory page faults' 'pgfaults/s' mem %s.mem_page_faults stacked 20060 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); - fprintf(stdout, "DIMENSION major '' absolute 1 %llu\n", RATES_DETAIL); fprintf(stdout, "DIMENSION minor '' absolute 1 %llu\n", RATES_DETAIL); +#if (PROCESSES_HAVE_MAJFLT == 1) + fprintf(stdout, "DIMENSION major '' absolute 1 %llu\n", RATES_DETAIL); +#endif -#if !defined(__FreeBSD__) && !defined(__APPLE__) - fprintf(stdout, "CHART %s.%s_swap_usage '' '%s swap usage' 'MiB' mem %s.swap_usage area 20065 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); +#if (PROCESSES_HAVE_VMSWAP == 1) + fprintf(stdout, "CHART %s.%s_swap_usage '' '%s swap usage' 'MiB' mem %s.swap_usage area 20065 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); - fprintf(stdout, "DIMENSION swap '' absolute %ld %ld\n", 1L, 1024L); + fprintf(stdout, "DIMENSION swap '' absolute %ld %ld\n", 1L, 1024L * 1024L); #endif -#if !defined(__FreeBSD__) && !defined(__APPLE__) - fprintf(stdout, "CHART %s.%s_disk_physical_io '' '%s disk physical IO' 'KiB/s' disk %s.disk_physical_io area 20100 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); +#if (PROCESSES_HAVE_PHYSICAL_IO == 1) + fprintf(stdout, "CHART %s.%s_disk_physical_io '' '%s disk physical IO' 'KiB/s' disk %s.disk_physical_io area 20100 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); fprintf(stdout, "DIMENSION reads '' absolute 1 %llu\n", 1024LLU * RATES_DETAIL); fprintf(stdout, "DIMENSION writes '' absolute -1 %llu\n", 1024LLU * RATES_DETAIL); +#endif - fprintf(stdout, "CHART %s.%s_disk_logical_io '' '%s disk logical IO' 'KiB/s' disk %s.disk_logical_io area 20105 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); +#if (PROCESSES_HAVE_LOGICAL_IO == 1) + fprintf(stdout, "CHART %s.%s_disk_logical_io '' '%s disk logical IO' 'KiB/s' disk %s.disk_logical_io area 20105 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); fprintf(stdout, "DIMENSION reads '' absolute 1 %llu\n", 1024LLU * RATES_DETAIL); fprintf(stdout, "DIMENSION writes '' absolute -1 %llu\n", 1024LLU * RATES_DETAIL); -#else - fprintf(stdout, "CHART %s.%s_disk_physical_io '' '%s disk physical IO' 'blocks/s' disk %s.disk_physical_block_io area 20100 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); - fprintf(stdout, "CLABEL_COMMIT\n"); - fprintf(stdout, "DIMENSION reads '' absolute 1 %llu\n", RATES_DETAIL); - fprintf(stdout, "DIMENSION writes '' absolute -1 %llu\n", RATES_DETAIL); #endif - fprintf(stdout, "CHART %s.%s_processes '' '%s processes' 'processes' processes %s.processes line 20150 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CHART %s.%s_processes '' '%s processes' 'processes' processes %s.processes line 20150 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); fprintf(stdout, "DIMENSION processes '' absolute 1 1\n"); - fprintf(stdout, "CHART %s.%s_threads '' '%s threads' 'threads' processes %s.threads line 20155 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CHART %s.%s_threads '' '%s threads' 'threads' processes %s.threads line 20155 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); fprintf(stdout, "DIMENSION threads '' absolute 1 1\n"); - if (enable_file_charts) { - fprintf(stdout, "CHART %s.%s_fds_open_limit '' '%s open file descriptors limit' '%%' fds %s.fds_open_limit line 20200 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); - fprintf(stdout, "CLABEL_COMMIT\n"); - fprintf(stdout, "DIMENSION limit '' absolute 1 100\n"); + if (enable_file_charts) + send_file_charts_to_netdata(w, type, lbl_name, title, false); - fprintf(stdout, "CHART %s.%s_fds_open '' '%s open files descriptors' 'fds' fds %s.fds_open stacked 20210 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); - fprintf(stdout, "CLABEL_COMMIT\n"); - fprintf(stdout, "DIMENSION files '' absolute 1 1\n"); - fprintf(stdout, "DIMENSION sockets '' absolute 1 1\n"); - fprintf(stdout, "DIMENSION pipes '' absolute 1 1\n"); - fprintf(stdout, "DIMENSION inotifies '' absolute 1 1\n"); - fprintf(stdout, "DIMENSION event '' absolute 1 1\n"); - fprintf(stdout, "DIMENSION timer '' absolute 1 1\n"); - fprintf(stdout, "DIMENSION signal '' absolute 1 1\n"); - fprintf(stdout, "DIMENSION eventpolls '' absolute 1 1\n"); - fprintf(stdout, "DIMENSION other '' absolute 1 1\n"); - } - - fprintf(stdout, "CHART %s.%s_uptime '' '%s uptime' 'seconds' uptime %s.uptime line 20250 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CHART %s.%s_uptime '' '%s uptime' 'seconds' uptime %s.uptime line 20250 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); fprintf(stdout, "DIMENSION uptime '' absolute 1 1\n"); if (enable_detailed_uptime_charts) { - fprintf(stdout, "CHART %s.%s_uptime_summary '' '%s uptime summary' 'seconds' uptime %s.uptime_summary area 20255 %d\n", type, w->clean_name, title, type, update_every); - fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name); + fprintf(stdout, "CHART %s.%s_uptime_summary '' '%s uptime summary' 'seconds' uptime %s.uptime_summary area 20255 %d\n", + type, string2str(w->clean_name), title, type, update_every); + fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name)); fprintf(stdout, "CLABEL_COMMIT\n"); fprintf(stdout, "DIMENSION min '' absolute 1 1\n"); fprintf(stdout, "DIMENSION avg '' absolute 1 1\n"); @@ -357,8 +411,8 @@ void send_charts_updates_to_netdata(struct target *root, const char *type, const } } +#if (PROCESSES_HAVE_STATE == 1) void send_proc_states_count(usec_t dt __maybe_unused) { -#if !defined(__FreeBSD__) && !defined(__APPLE__) static bool chart_added = false; // create chart for count of processes in different states if (!chart_added) { @@ -379,6 +433,6 @@ void send_proc_states_count(usec_t dt __maybe_unused) { send_SET(proc_states[i], proc_state_count[i]); } send_END(); -#endif } +#endif diff --git a/src/collectors/apps.plugin/apps_pid.c b/src/collectors/apps.plugin/apps_pid.c new file mode 100644 index 000000000..0dcee7cce --- /dev/null +++ b/src/collectors/apps.plugin/apps_pid.c @@ -0,0 +1,927 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "apps_plugin.h" + +static inline void link_pid_to_its_parent(struct pid_stat *p); + +// -------------------------------------------------------------------------------------------------------------------- +// The index of all pids + +#define SIMPLE_HASHTABLE_NAME _PID +#define SIMPLE_HASHTABLE_VALUE_TYPE struct pid_stat +#define SIMPLE_HASHTABLE_KEY_TYPE int32_t +#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION pid_stat_to_pid_ptr +#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION pid_ptr_eq +#define SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION 0 +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +static inline int32_t *pid_stat_to_pid_ptr(struct pid_stat *p) { + return &p->pid; +} + +static inline bool pid_ptr_eq(int32_t *a, int32_t *b) { + return *a == *b; +} + +struct { +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) + // Another pre-allocated list of all possible pids. + // We need it to assign them a unique sortlist id, so that we + // read parents before children. This is needed to prevent a situation where + // a child is found running, but until we read its parent, it has exited and + // its parent has accumulated its resources. + struct { + size_t size; + struct pid_stat **array; + } sorted; +#endif + + struct { + size_t count; // the number of processes running + struct pid_stat *root; + SIMPLE_HASHTABLE_PID ht; + ARAL *aral; + } all_pids; +} pids = { 0 }; + +struct pid_stat *root_of_pids(void) { + return pids.all_pids.root; +} + +size_t all_pids_count(void) { + return pids.all_pids.count; +} + +void apps_pids_init(void) { + pids.all_pids.aral = aral_create("pid_stat", sizeof(struct pid_stat), 1, 65536, NULL, NULL, NULL, false, true); + simple_hashtable_init_PID(&pids.all_pids.ht, 1024); +} + +static inline uint64_t pid_hash(pid_t pid) { + return XXH3_64bits(&pid, sizeof(pid)); +} + +inline struct pid_stat *find_pid_entry(pid_t pid) { + if(pid < INIT_PID) return NULL; + + uint64_t hash = pid_hash(pid); + int32_t key = pid; + SIMPLE_HASHTABLE_SLOT_PID *sl = simple_hashtable_get_slot_PID(&pids.all_pids.ht, hash, &key, true); + return(SIMPLE_HASHTABLE_SLOT_DATA(sl)); +} + +struct pid_stat *get_or_allocate_pid_entry(pid_t pid) { + uint64_t hash = pid_hash(pid); + int32_t key = pid; + SIMPLE_HASHTABLE_SLOT_PID *sl = simple_hashtable_get_slot_PID(&pids.all_pids.ht, hash, &key, true); + struct pid_stat *p = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(likely(p)) + return p; + + p = aral_callocz(pids.all_pids.aral); + +#if (PROCESSES_HAVE_FDS == 1) + p->fds = mallocz(sizeof(struct pid_fd) * 3); // stdin, stdout, stderr + p->fds_size = 3; + init_pid_fds(p, 0, p->fds_size); +#endif + + p->pid = pid; + p->values[PDF_PROCESSES] = 1; + + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(pids.all_pids.root, p, prev, next); + simple_hashtable_set_slot_PID(&pids.all_pids.ht, sl, hash, p); + pids.all_pids.count++; + + return p; +} + +void del_pid_entry(pid_t pid) { + uint64_t hash = pid_hash(pid); + int32_t key = pid; + SIMPLE_HASHTABLE_SLOT_PID *sl = simple_hashtable_get_slot_PID(&pids.all_pids.ht, hash, &key, true); + struct pid_stat *p = SIMPLE_HASHTABLE_SLOT_DATA(sl); + + if(unlikely(!p)) { + netdata_log_error("attempted to free pid %d that is not allocated.", pid); + return; + } + + debug_log("process %d %s exited, deleting it.", pid, pid_stat_comm(p)); + + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(pids.all_pids.root, p, prev, next); + simple_hashtable_del_slot_PID(&pids.all_pids.ht, sl); + +#if defined(OS_LINUX) + { + size_t i; + for(i = 0; i < p->fds_size; i++) + if(p->fds[i].filename) + freez(p->fds[i].filename); + } + + arl_free(p->status_arl); + + freez(p->fds_dirname); + freez(p->stat_filename); + freez(p->status_filename); + freez(p->limits_filename); + freez(p->io_filename); + freez(p->cmdline_filename); +#endif + +#if (PROCESSES_HAVE_FDS == 1) + freez(p->fds); +#endif + +#if (PROCESSES_HAVE_SID == 1) + string_freez(p->sid_name); +#endif + + string_freez(p->comm_orig); + string_freez(p->comm); + string_freez(p->cmdline); + aral_freez(pids.all_pids.aral, p); + + pids.all_pids.count--; +} + +// -------------------------------------------------------------------------------------------------------------------- + +static __thread pid_t current_pid; +static __thread kernel_uint_t current_pid_values[PDF_MAX]; + +void pid_collection_started(struct pid_stat *p) { + fatal_assert(sizeof(current_pid_values) == sizeof(p->values)); + current_pid = p->pid; + memcpy(current_pid_values, p->values, sizeof(current_pid_values)); + memset(p->values, 0, sizeof(p->values)); + p->values[PDF_PROCESSES] = 1; + p->read = true; +} + +void pid_collection_failed(struct pid_stat *p) { + fatal_assert(current_pid == p->pid); + fatal_assert(sizeof(current_pid_values) == sizeof(p->values)); + memcpy(p->values, current_pid_values, sizeof(p->values)); +} + +void pid_collection_completed(struct pid_stat *p) { + p->updated = true; + p->keep = false; + p->keeploops = 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// preloading of parents before their children + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) +static inline size_t compute_new_sorted_size(size_t old_size, size_t required_size) { + size_t size = (required_size % 1024 == 0) ? required_size : required_size + 1024; + size = (size / 1024) * 1024; + + if(size < old_size * 2) + size = old_size * 2; + + return size; +} + +static int compar_pid_sortlist(const void *a, const void *b) { + const struct pid_stat *p1 = *(struct pid_stat **)a; + const struct pid_stat *p2 = *(struct pid_stat **)b; + + if(p1->sortlist > p2->sortlist) + return -1; + else + return 1; +} + +bool collect_parents_before_children(void) { + if (!pids.all_pids.count) return false; + + if (pids.all_pids.count > pids.sorted.size) { + size_t new_size = compute_new_sorted_size(pids.sorted.size, pids.all_pids.count); + freez(pids.sorted.array); + pids.sorted.array = mallocz(new_size * sizeof(struct pid_stat *)); + pids.sorted.size = new_size; + } + + size_t slc = 0; + struct pid_stat *p = NULL; + uint32_t sortlist = 1; + for (p = root_of_pids(); p && slc < pids.sorted.size; p = p->next) { + pids.sorted.array[slc++] = p; + + // assign a sortlist id to all it and its parents + for (struct pid_stat *pp = p; pp ; pp = pp->parent) + pp->sortlist = sortlist++; + } + size_t sorted = slc; + + static bool logged = false; + if (unlikely(p && !logged)) { + nd_log( + NDLS_COLLECTORS, + NDLP_ERR, + "Internal error: I was thinking I had %zu processes in my arrays, but it seems there are more.", + pids.all_pids.count); + logged = true; + } + + if (include_exited_childs && sorted) { + // Read parents before childs + // This is needed to prevent a situation where + // a child is found running, but until we read + // its parent, it has exited and its parent + // has accumulated its resources. + + qsort((void *)pids.sorted.array, sorted, sizeof(struct pid_stat *), compar_pid_sortlist); + + // we forward read all running processes + // incrementally_collect_data_for_pid() is smart enough, + // not to read the same pid twice per iteration + for (slc = 0; slc < sorted; slc++) { + p = pids.sorted.array[slc]; + incrementally_collect_data_for_pid_stat(p, NULL); + } + } + + return true; +} +#endif + +// -------------------------------------------------------------------------------------------------------------------- + +static void log_parent_loop(struct pid_stat *p) { + CLEAN_BUFFER *wb = buffer_create(0, NULL); + buffer_sprintf(wb, "original pid %d (%s)", p->pid, string2str(p->comm)); + + size_t loops = 0; + for(struct pid_stat *t = p->parent; t && loops < 2 ;t = t->parent) { + buffer_sprintf(wb, " => %d (%s)", t->pid, string2str(t->comm)); + if(t == p->parent) loops++; + } + + buffer_sprintf(wb, " : broke loop at %d (%s)", p->pid, string2str(p->comm)); + + errno_clear(); + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Parents loop detected: %s", buffer_tostring(wb)); +} + +static inline bool is_already_a_parent(struct pid_stat *p, struct pid_stat *pp) { + for(struct pid_stat *t = pp; t ;t = t->parent) + if(t == p) return true; + + return false; +} + +static inline void link_pid_to_its_parent(struct pid_stat *p) { + p->parent = NULL; + if(unlikely(!p->ppid)) + return; + + if(unlikely(p->ppid == p->pid)) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "Process %d (%s) states parent %d, which is the same PID. Ignoring it.", + p->pid, string2str(p->comm), p->ppid); + p->ppid = 0; + return; + } + + struct pid_stat *pp = find_pid_entry(p->ppid); + if(likely(pp)) { + fatal_assert(pp->pid == p->ppid); + + if(!is_already_a_parent(p, pp)) { + p->parent = pp; + pp->children_count++; + } + else { + p->parent = pp; + log_parent_loop(p); + p->parent = NULL; + p->ppid = 0; + } + } +#if (PPID_SHOULD_BE_RUNNING == 1) + else { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "pid %d %s states parent %d, but the later does not exist.", + p->pid, pid_stat_comm(p), p->ppid); + } +#endif +} + +static inline void link_all_processes_to_their_parents(void) { + // link all children to their parents + // and update children count on parents + for(struct pid_stat *p = root_of_pids(); p ; p = p->next) + link_pid_to_its_parent(p); +} + +// -------------------------------------------------------------------------------------------------------------------- + +static bool is_filename(const char *s) { + if(!s || !*s) return false; + +#if defined(OS_WINDOWS) + if( (isalpha((uint8_t)*s) || (s[1] == ':' && s[2] == '\\')) || // windows native "x:\" + (isalpha((uint8_t)*s) || (s[1] == ':' && s[2] == '/')) || // windows native "x:/" + (*s == '\\' && s[1] == '\\' && isalpha((uint8_t)s[2]) && s[3] == '\\') || // windows native "\\x\" + (*s == '/' && s[1] == '/' && isalpha((uint8_t)s[2]) && s[3] == '/')) { // windows native "//x/" + + WCHAR ws[FILENAME_MAX]; + if(utf8_to_utf16(ws, _countof(ws), s, -1) > 0) { + DWORD attributes = GetFileAttributesW(ws); + if (attributes != INVALID_FILE_ATTRIBUTES) + return true; + } + } +#endif + + // for: sh -c "exec /path/to/command parameters" + if(strncmp(s, "exec ", 5) == 0 && s[5]) { + s += 5; + char look_for = ' '; + if(*s == '\'') { look_for = '\''; s++; } + if(*s == '"') { look_for = '"'; s++; } + char *end = strchr(s, look_for); + if(end) *end = '\0'; + } + + // linux, freebsd, macos, msys, cygwin + if(*s == '/') { + struct statvfs stat; + return statvfs(s, &stat) == 0; + } + + return false; +} + +static const char *extensions_to_strip[] = { + ".sh", // shell scripts + ".py", // python scripts + ".pl", // perl scripts + ".js", // node.js +#if defined(OS_WINDOWS) + ".exe", +#endif + NULL, +}; + +// strip extensions we don't want to show +static void remove_extension(char *name) { + size_t name_len = strlen(name); + for(size_t i = 0; extensions_to_strip[i] != NULL; i++) { + const char *ext = extensions_to_strip[i]; + size_t ext_len = strlen(ext); + if(name_len > ext_len) { + char *check = &name[name_len - ext_len]; + if(strcmp(check, ext) == 0) { + *check = '\0'; + break; + } + } + } +} + +static inline STRING *comm_from_cmdline_param_sanitized(STRING *cmdline) { + if(!cmdline) return NULL; + + char buf[string_strlen(cmdline) + 1]; + memcpy(buf, string2str(cmdline), sizeof(buf)); + + char *words[100]; + size_t num_words = quoted_strings_splitter_whitespace(buf, words, 100); + for(size_t word = 1; word < num_words ;word++) { + char *s = words[word]; + if(is_filename(s)) { + char *name = strrchr(s, '/'); + +#if defined(OS_WINDOWS) + if(!name) + name = strrchr(s, '\\'); +#endif + + if(name && *name) { + name++; + remove_extension(name); + sanitize_apps_plugin_chart_meta(name); + return string_strdupz(name); + } + } + } + + return NULL; +} + +static inline STRING *comm_from_cmdline_sanitized(STRING *comm, STRING *cmdline) { + if(!cmdline) return NULL; + + char buf[string_strlen(cmdline) + 1]; + memcpy(buf, string2str(cmdline), sizeof(buf)); + + size_t comm_len = string_strlen(comm); + char *start = strstr(buf, string2str(comm)); + while (start) { + char *end = start + comm_len; + while (*end && + !isspace((uint8_t) *end) && + *end != '/' && // path separator - linux + *end != '\\' && // path separator - windows + *end != '"' && // closing double quotes + *end != '\'' && // closing single quotes + *end != ')' && // sometimes process add ) at their end + *end != ':') // sometimes process add : at their end + end++; + + *end = '\0'; + + remove_extension(start); + sanitize_apps_plugin_chart_meta(start); + return string_strdupz(start); + } + + return NULL; +} + +static void update_pid_comm_from_cmdline(struct pid_stat *p) { + bool updated = false; + + STRING *new_comm = comm_from_cmdline_sanitized(p->comm, p->cmdline); + if(new_comm) { + string_freez(p->comm); + p->comm = new_comm; + updated = true; + } + + if(is_process_an_interpreter(p)) { + new_comm = comm_from_cmdline_param_sanitized(p->cmdline); + if(new_comm) { + string_freez(p->comm); + p->comm = new_comm; + updated = true; + } + } + + if(updated) { + p->is_manager = is_process_a_manager(p); + p->is_aggregator = is_process_an_aggregator(p); + } +} + +void update_pid_cmdline(struct pid_stat *p, const char *cmdline) { + string_freez(p->cmdline); + p->cmdline = cmdline ? string_strdupz(cmdline) : NULL; + + if(p->cmdline) + update_pid_comm_from_cmdline(p); +} + +void update_pid_comm(struct pid_stat *p, const char *comm) { + if(p->comm_orig && string_strcmp(p->comm_orig, comm) == 0) + // no change + return; + + string_freez(p->comm_orig); + p->comm_orig = string_strdupz(comm); + + // some process names have ( and ), remove the parenthesis + size_t len = strlen(comm); + char buf[len + 1]; + if(comm[0] == '(' && comm[len - 1] == ')') { + memcpy(buf, &comm[1], len - 2); + buf[len - 2] = '\0'; + } + else + memcpy(buf, comm, sizeof(buf)); + + sanitize_apps_plugin_chart_meta(buf); + p->comm = string_strdupz(buf); + p->is_manager = is_process_a_manager(p); + p->is_aggregator = is_process_an_aggregator(p); + +#if (PROCESSES_HAVE_CMDLINE == 1) + if(likely(proc_pid_cmdline_is_needed && !p->cmdline)) + managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p)); +#else + update_pid_comm_from_cmdline(p); +#endif + + // the process changed comm, we may have to reassign it to + // an apps_groups.conf target. + p->target = NULL; +} + +// -------------------------------------------------------------------------------------------------------------------- + +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) || (PROCESSES_HAVE_CHILDREN_FLTS == 1) +//static inline int debug_print_process_and_parents(struct pid_stat *p, usec_t time) { +// char *prefix = "\\_ "; +// int indent = 0; +// +// if(p->parent) +// indent = debug_print_process_and_parents(p->parent, p->stat_collected_usec); +// else +// prefix = " > "; +// +// char buffer[indent + 1]; +// int i; +// +// for(i = 0; i < indent ;i++) buffer[i] = ' '; +// buffer[i] = '\0'; +// +// fprintf(stderr, " %s %s%s (%d %s %"PRIu64"" +// , buffer +// , prefix +// , pid_stat_comm(p) +// , p->pid +// , p->updated?"running":"exited" +// , p->stat_collected_usec - time +// ); +// +// if(p->values[PDF_UTIME]) fprintf(stderr, " utime=" KERNEL_UINT_FORMAT, p->values[PDF_UTIME]); +// if(p->values[PDF_STIME]) fprintf(stderr, " stime=" KERNEL_UINT_FORMAT, p->values[PDF_STIME]); +//#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) +// if(p->values[PDF_GTIME]) fprintf(stderr, " gtime=" KERNEL_UINT_FORMAT, p->values[PDF_GTIME]); +//#endif +//#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) +// if(p->values[PDF_CUTIME]) fprintf(stderr, " cutime=" KERNEL_UINT_FORMAT, p->values[PDF_CUTIME]); +// if(p->values[PDF_CSTIME]) fprintf(stderr, " cstime=" KERNEL_UINT_FORMAT, p->values[PDF_CSTIME]); +//#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) +// if(p->values[PDF_CGTIME]) fprintf(stderr, " cgtime=" KERNEL_UINT_FORMAT, p->values[PDF_CGTIME]); +//#endif +//#endif +// if(p->values[PDF_MINFLT]) fprintf(stderr, " minflt=" KERNEL_UINT_FORMAT, p->values[PDF_MINFLT]); +//#if (PROCESSES_HAVE_MAJFLT == 1) +// if(p->values[PDF_MAJFLT]) fprintf(stderr, " majflt=" KERNEL_UINT_FORMAT, p->values[PDF_MAJFLT]); +//#endif +//#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) +// if(p->values[PDF_CMINFLT]) fprintf(stderr, " cminflt=" KERNEL_UINT_FORMAT, p->values[PDF_CMINFLT]); +// if(p->values[PDF_CMAJFLT]) fprintf(stderr, " cmajflt=" KERNEL_UINT_FORMAT, p->values[PDF_CMAJFLT]); +//#endif +// fprintf(stderr, ")\n"); +// +// return indent + 1; +//} +// +//static inline void debug_print_process_tree(struct pid_stat *p, char *msg __maybe_unused) { +// debug_log("%s: process %s (%d, %s) with parents:", msg, pid_stat_comm(p), p->pid, p->updated?"running":"exited"); +// debug_print_process_and_parents(p, p->stat_collected_usec); +//} +// +//static inline void debug_find_lost_child(struct pid_stat *pe, kernel_uint_t lost, int type) { +// int found = 0; +// struct pid_stat *p = NULL; +// +// for(p = root_of_pids(); p ; p = p->next) { +// if(p == pe) continue; +// +// switch(type) { +// case 1: +//#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) +// if(p->values[PDF_CMINFLT] > lost) { +// fprintf(stderr, " > process %d (%s) could use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", +// p->pid, pid_stat_comm(p), lost, pe->pid, pid_stat_comm(pe)); +// found++; +// } +//#endif +// break; +// +// case 2: +//#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) +// if(p->values[PDF_CMAJFLT] > lost) { +// fprintf(stderr, " > process %d (%s) could use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", +// p->pid, pid_stat_comm(p), lost, pe->pid, pid_stat_comm(pe)); +// found++; +// } +//#endif +// break; +// +// case 3: +//#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) +// if(p->values[PDF_CUTIME] > lost) { +// fprintf(stderr, " > process %d (%s) could use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", +// p->pid, pid_stat_comm(p), lost, pe->pid, pid_stat_comm(pe)); +// found++; +// } +//#endif +// break; +// +// case 4: +//#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) +// if(p->values[PDF_CSTIME] > lost) { +// fprintf(stderr, " > process %d (%s) could use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", +// p->pid, pid_stat_comm(p), lost, pe->pid, pid_stat_comm(pe)); +// found++; +// } +//#endif +// break; +// +// case 5: +//#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) && (PROCESSES_HAVE_CPU_GUEST_TIME == 1) +// if(p->values[PDF_CGTIME] > lost) { +// fprintf(stderr, " > process %d (%s) could use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", +// p->pid, pid_stat_comm(p), lost, pe->pid, pid_stat_comm(pe)); +// found++; +// } +//#endif +// break; +// } +// } +// +// if(!found) { +// switch(type) { +// case 1: +// fprintf(stderr, " > cannot find any process to use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", +// lost, pe->pid, pid_stat_comm(pe)); +// break; +// +// case 2: +// fprintf(stderr, " > cannot find any process to use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", +// lost, pe->pid, pid_stat_comm(pe)); +// break; +// +// case 3: +// fprintf(stderr, " > cannot find any process to use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", +// lost, pe->pid, pid_stat_comm(pe)); +// break; +// +// case 4: +// fprintf(stderr, " > cannot find any process to use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", +// lost, pe->pid, pid_stat_comm(pe)); +// break; +// +// case 5: +// fprintf(stderr, " > cannot find any process to use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", +// lost, pe->pid, pid_stat_comm(pe)); +// break; +// } +// } +//} + +static inline kernel_uint_t remove_exited_child_from_parent(kernel_uint_t *field, kernel_uint_t *pfield) { + kernel_uint_t absorbed = 0; + + if(*field > *pfield) { + absorbed += *pfield; + *field -= *pfield; + *pfield = 0; + } + else { + absorbed += *field; + *pfield -= *field; + *field = 0; + } + + return absorbed; +} + +static inline void process_exited_pids(void) { + /* + * WHY WE NEED THIS? + * + * When a child process exits in Linux, its accumulated user time (utime) and its children's accumulated + * user time (cutime) are added to the parent's cutime. This means the parent process's cutime reflects + * the total user time spent by its exited children and their descendants + * + * This results in spikes in the charts. + * In this function we remove the exited children resources from the parent's cutime, but only for the + * children we have been monitoring and to the degree we have data for them. Since previously running + * children have already been reported by us, removing them is the right thing to do. + * + */ + + for(struct pid_stat *p = root_of_pids(); p ; p = p->next) { + if(p->updated || !p->stat_collected_usec) + continue; + + bool have_work = false; + +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) + kernel_uint_t utime = (p->raw[PDF_UTIME] + p->raw[PDF_CUTIME]) * CPU_TO_NANOSECONDCORES; + kernel_uint_t stime = (p->raw[PDF_STIME] + p->raw[PDF_CSTIME]) * CPU_TO_NANOSECONDCORES; + if(utime + stime) have_work = true; +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) + kernel_uint_t gtime = (p->raw[PDF_GTIME] + p->raw[PDF_CGTIME]) * CPU_TO_NANOSECONDCORES; + if(gtime) have_work = true; +#endif +#endif + +#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) + kernel_uint_t minflt = (p->raw[PDF_MINFLT] + p->raw[PDF_CMINFLT]) * RATES_DETAIL; + if(minflt) have_work = true; +#if (PROCESSES_HAVE_MAJFLT == 1) + kernel_uint_t majflt = (p->raw[PDF_MAJFLT] + p->raw[PDF_CMAJFLT]) * RATES_DETAIL; + if(majflt) have_work = true; +#endif +#endif + + if(!have_work) + continue; + +// if(unlikely(debug_enabled)) { +// debug_log("Absorb %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")" +// , pid_stat_comm(p) +// , p->pid +// , p->updated?"running":"exited" +// , utime +// , stime +// , gtime +// , minflt +// , majflt +// ); +// debug_print_process_tree(p, "Searching parents"); +// } + + for(struct pid_stat *pp = p->parent; pp ; pp = pp->parent) { + if(!pp->updated) continue; + + kernel_uint_t absorbed; +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) + absorbed = remove_exited_child_from_parent(&utime, &pp->values[PDF_CUTIME]); +// if(unlikely(debug_enabled && absorbed)) +// debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " utime (remaining: " KERNEL_UINT_FORMAT ")", +// pid_stat_comm(pp), pp->pid, pp->updated?"running":"exited", absorbed, utime); + + absorbed = remove_exited_child_from_parent(&stime, &pp->values[PDF_CSTIME]); +// if(unlikely(debug_enabled && absorbed)) +// debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " stime (remaining: " KERNEL_UINT_FORMAT ")", +// pid_stat_comm(pp), pp->pid, pp->updated?"running":"exited", absorbed, stime); + +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) + absorbed = remove_exited_child_from_parent(>ime, &pp->values[PDF_CGTIME]); +// if(unlikely(debug_enabled && absorbed)) +// debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " gtime (remaining: " KERNEL_UINT_FORMAT ")", +// pid_stat_comm(pp), pp->pid, pp->updated?"running":"exited", absorbed, gtime); +#endif +#endif + +#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) + absorbed = remove_exited_child_from_parent(&minflt, &pp->values[PDF_CMINFLT]); +// if(unlikely(debug_enabled && absorbed)) +// debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " minflt (remaining: " KERNEL_UINT_FORMAT ")", +// pid_stat_comm(pp), pp->pid, pp->updated?"running":"exited", absorbed, minflt); + +#if (PROCESSES_HAVE_MAJFLT == 1) + absorbed = remove_exited_child_from_parent(&majflt, &pp->values[PDF_CMAJFLT]); +// if(unlikely(debug_enabled && absorbed)) +// debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " majflt (remaining: " KERNEL_UINT_FORMAT ")", +// pid_stat_comm(pp), pp->pid, pp->updated?"running":"exited", absorbed, majflt); +#endif +#endif + + (void)absorbed; + break; + } + +// if(unlikely(debug_enabled)) { +// if(utime) debug_find_lost_child(p, utime, 3); +// if(stime) debug_find_lost_child(p, stime, 4); +// if(gtime) debug_find_lost_child(p, gtime, 5); +// if(minflt) debug_find_lost_child(p, minflt, 1); +// if(majflt) debug_find_lost_child(p, majflt, 2); +// } + +// debug_log(" > remaining resources - KEEP - for another loop: %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")" +// , pid_stat_comm(p) +// , p->pid +// , p->updated?"running":"exited" +// , utime +// , stime +// , gtime +// , minflt +// , majflt +// ); + + bool done = true; + +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) + p->values[PDF_UTIME] = utime / CPU_TO_NANOSECONDCORES; + p->values[PDF_STIME] = stime / CPU_TO_NANOSECONDCORES; + p->values[PDF_CUTIME] = 0; + p->values[PDF_CSTIME] = 0; + if(utime + stime) done = false; +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) + p->values[PDF_GTIME] = gtime / CPU_TO_NANOSECONDCORES; + p->values[PDF_CGTIME] = 0; + if(gtime) done = false; +#endif +#endif + +#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) + p->values[PDF_MINFLT] = minflt / RATES_DETAIL; + p->values[PDF_CMINFLT] = 0; + if(minflt) done = false; +#if (PROCESSES_HAVE_MAJFLT == 1) + p->values[PDF_MAJFLT] = majflt / RATES_DETAIL; + p->values[PDF_CMAJFLT] = 0; + if(majflt) done = false; +#endif +#endif + + p->keep = !done; + + if(p->keep) { + // we need to keep its exited parents too, to ensure we will have + // the information to reach the running parent at the next iteration + for (struct pid_stat *pp = p->parent; pp; pp = pp->parent) { + if (pp->updated) break; + pp->keep = true; + } + } + } +} +#endif + +// -------------------------------------------------------------------------------------------------------------------- +// the main loop for collecting process data + +static inline void clear_pid_rates(struct pid_stat *p) { + p->values[PDF_UTIME] = 0; + p->values[PDF_STIME] = 0; + +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) + p->values[PDF_GTIME] = 0; +#endif + +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) + p->values[PDF_CUTIME] = 0; + p->values[PDF_CSTIME] = 0; +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) + p->values[PDF_CGTIME] = 0; +#endif +#endif + + p->values[PDF_MINFLT] = 0; +#if (PROCESSES_HAVE_MAJFLT == 1) + p->values[PDF_MAJFLT] = 0; +#endif + +#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) + p->values[PDF_CMINFLT] = 0; + p->values[PDF_CMAJFLT] = 0; +#endif + +#if (PROCESSES_HAVE_LOGICAL_IO == 1) + p->values[PDF_LREAD] = 0; + p->values[PDF_LWRITE] = 0; +#endif + +#if (PROCESSES_HAVE_PHYSICAL_IO == 1) + p->values[PDF_PREAD] = 0; + p->values[PDF_PWRITE] = 0; +#endif + +#if (PROCESSES_HAVE_IO_CALLS == 1) + p->values[PDF_OREAD] = 0; + p->values[PDF_OWRITE] = 0; +#endif + +#if (PROCESSES_HAVE_VOLCTX == 1) + p->values[PDF_VOLCTX] = 0; +#endif + +#if (PROCESSES_HAVE_NVOLCTX == 1) + p->values[PDF_NVOLCTX] = 0; +#endif +} + +bool collect_data_for_all_pids(void) { + // mark all pids as unread +#if (INCREMENTAL_DATA_COLLECTION == 0) + usec_t now_mon_ut = now_monotonic_usec(); +#endif + + for(struct pid_stat *p = root_of_pids(); p ; p = p->next) { + p->read = p->updated = p->merged = false; + p->children_count = 0; + +#if (INCREMENTAL_DATA_COLLECTION == 0) + p->last_stat_collected_usec = p->stat_collected_usec; + p->last_io_collected_usec = p->io_collected_usec; + p->stat_collected_usec = p->io_collected_usec = now_mon_ut; +#endif + } + + // collect data for all pids + if(!OS_FUNCTION(apps_os_collect_all_pids)()) + return false; + + // build the process tree + link_all_processes_to_their_parents(); + +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) || (PROCESSES_HAVE_CHILDREN_FLTS == 1) + // merge exited pids to their parents + process_exited_pids(); +#endif + + // the first iteration needs to be eliminated + // since we are looking for rates + if(unlikely(global_iterations_counter == 1)) { + for(struct pid_stat *p = root_of_pids(); p ; p = p->next) + if(p->read) clear_pid_rates(p); + } + + return true; +} diff --git a/src/collectors/apps.plugin/apps_pid_files.c b/src/collectors/apps.plugin/apps_pid_files.c new file mode 100644 index 000000000..53e53899c --- /dev/null +++ b/src/collectors/apps.plugin/apps_pid_files.c @@ -0,0 +1,450 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "apps_plugin.h" + +static uint32_t + all_files_len = 0, + all_files_size = 0; + +uint32_t all_files_len_get(void) { + (void)all_files_size; + return all_files_len; +} + +#if (PROCESSES_HAVE_FDS == 1) +// ---------------------------------------------------------------------------- +// file descriptor +// +// this is used to keep a global list of all open files of the system. +// it is needed in order to calculate the unique files processes have open. + +#define FILE_DESCRIPTORS_INCREASE_STEP 100 + +// types for struct file_descriptor->type +typedef enum __attribute__((packed)) fd_filetype { + FILETYPE_OTHER, + FILETYPE_FILE, + FILETYPE_PIPE, + FILETYPE_SOCKET, + FILETYPE_INOTIFY, + FILETYPE_EVENTFD, + FILETYPE_EVENTPOLL, + FILETYPE_TIMERFD, + FILETYPE_SIGNALFD +} FD_FILETYPE; + +struct file_descriptor { + avl_t avl; + +#ifdef NETDATA_INTERNAL_CHECKS + uint32_t magic; +#endif /* NETDATA_INTERNAL_CHECKS */ + + const char *name; + uint32_t hash; + uint32_t count; + uint32_t pos; + FD_FILETYPE type; +} *all_files = NULL; + +// ---------------------------------------------------------------------------- + +static inline void reallocate_target_fds(struct target *w) { + if(unlikely(!w)) + return; + + if(unlikely(!w->target_fds || w->target_fds_size < all_files_size)) { + w->target_fds = reallocz(w->target_fds, sizeof(int) * all_files_size); + memset(&w->target_fds[w->target_fds_size], 0, sizeof(int) * (all_files_size - w->target_fds_size)); + w->target_fds_size = all_files_size; + } +} + +static void aggregage_fd_type_on_openfds(FD_FILETYPE type, struct openfds *openfds) { + switch(type) { + case FILETYPE_SOCKET: + openfds->sockets++; + break; + + case FILETYPE_FILE: + openfds->files++; + break; + + case FILETYPE_PIPE: + openfds->pipes++; + break; + + case FILETYPE_INOTIFY: + openfds->inotifies++; + break; + + case FILETYPE_EVENTFD: + openfds->eventfds++; + break; + + case FILETYPE_TIMERFD: + openfds->timerfds++; + break; + + case FILETYPE_SIGNALFD: + openfds->signalfds++; + break; + + case FILETYPE_EVENTPOLL: + openfds->eventpolls++; + break; + + case FILETYPE_OTHER: + openfds->other++; + break; + } +} + +static inline void aggregate_fd_on_target(int fd, struct target *w) { + if(unlikely(!w)) + return; + + if(unlikely(w->target_fds[fd])) { + // it is already aggregated + // just increase its usage counter + w->target_fds[fd]++; + return; + } + + // increase its usage counter + // so that we will not add it again + w->target_fds[fd]++; + + aggregage_fd_type_on_openfds(all_files[fd].type, &w->openfds); +} + +void aggregate_pid_fds_on_targets(struct pid_stat *p) { + if(enable_file_charts == CONFIG_BOOLEAN_AUTO && all_files_len > MAX_SYSTEM_FD_TO_ALLOW_FILES_PROCESSING) { + nd_log(NDLS_COLLECTORS, NDLP_NOTICE, "apps.plugin: the number of system file descriptors are too many (%u), " + "disabling file charts. If you want this enabled, set the 'with-files' " + "parameter to [plugin:apps] section of netdata.conf", all_files_size); + + enable_file_charts = CONFIG_BOOLEAN_NO; + obsolete_file_charts = true; + return; + } + + if(unlikely(!p->updated)) { + // the process is not running + return; + } + + struct target +#if (PROCESSES_HAVE_UID == 1) + *u = p->uid_target, +#endif +#if (PROCESSES_HAVE_GID == 1) + *g = p->gid_target, +#endif + *w = p->target; + + reallocate_target_fds(w); +#if (PROCESSES_HAVE_UID == 1) + reallocate_target_fds(u); +#endif +#if (PROCESSES_HAVE_GID == 1) + reallocate_target_fds(g); +#endif + +#if (PROCESSES_HAVE_FDS == 1) + p->openfds.files = 0; + p->openfds.pipes = 0; + p->openfds.sockets = 0; + p->openfds.inotifies = 0; + p->openfds.eventfds = 0; + p->openfds.timerfds = 0; + p->openfds.signalfds = 0; + p->openfds.eventpolls = 0; + p->openfds.other = 0; + + uint32_t c, size = p->fds_size; + struct pid_fd *fds = p->fds; + for(c = 0; c < size ;c++) { + int fd = fds[c].fd; + + if(likely(fd <= 0 || (uint32_t)fd >= all_files_size)) + continue; + + aggregage_fd_type_on_openfds(all_files[fd].type, &p->openfds); + + aggregate_fd_on_target(fd, w); +#if (PROCESSES_HAVE_UID == 1) + aggregate_fd_on_target(fd, u); +#endif +#if (PROCESSES_HAVE_GID == 1) + aggregate_fd_on_target(fd, g); +#endif + } +#endif +} + +// ---------------------------------------------------------------------------- + +int file_descriptor_compare(void* a, void* b) { +#ifdef NETDATA_INTERNAL_CHECKS + if(((struct file_descriptor *)a)->magic != 0x0BADCAFE || ((struct file_descriptor *)b)->magic != 0x0BADCAFE) + netdata_log_error("Corrupted index data detected. Please report this."); +#endif /* NETDATA_INTERNAL_CHECKS */ + + if(((struct file_descriptor *)a)->hash < ((struct file_descriptor *)b)->hash) + return -1; + + else if(((struct file_descriptor *)a)->hash > ((struct file_descriptor *)b)->hash) + return 1; + + else + return strcmp(((struct file_descriptor *)a)->name, ((struct file_descriptor *)b)->name); +} + +// int file_descriptor_iterator(avl_t *a) { if(a) {}; return 0; } + +avl_tree_type all_files_index = { + NULL, + file_descriptor_compare +}; + +static struct file_descriptor *file_descriptor_find(const char *name, uint32_t hash) { + struct file_descriptor tmp; + tmp.hash = (hash)?hash:simple_hash(name); + tmp.name = name; + tmp.count = 0; + tmp.pos = 0; +#ifdef NETDATA_INTERNAL_CHECKS + tmp.magic = 0x0BADCAFE; +#endif /* NETDATA_INTERNAL_CHECKS */ + + return (struct file_descriptor *)avl_search(&all_files_index, (avl_t *) &tmp); +} + +#define file_descriptor_add(fd) avl_insert(&all_files_index, (avl_t *)(fd)) +#define file_descriptor_remove(fd) avl_remove(&all_files_index, (avl_t *)(fd)) + +// ---------------------------------------------------------------------------- + +void file_descriptor_not_used(int id) { + if(id > 0 && (uint32_t)id < all_files_size) { + +#ifdef NETDATA_INTERNAL_CHECKS + if(all_files[id].magic != 0x0BADCAFE) { + netdata_log_error("Ignoring request to remove empty file id %d.", id); + return; + } +#endif /* NETDATA_INTERNAL_CHECKS */ + + debug_log("decreasing slot %d (count = %d).", id, all_files[id].count); + + if(all_files[id].count > 0) { + all_files[id].count--; + + if(!all_files[id].count) { + debug_log(" >> slot %d is empty.", id); + + if(unlikely(file_descriptor_remove(&all_files[id]) != (void *)&all_files[id])) + netdata_log_error("INTERNAL ERROR: removal of unused fd from index, removed a different fd"); + +#ifdef NETDATA_INTERNAL_CHECKS + all_files[id].magic = 0x00000000; +#endif /* NETDATA_INTERNAL_CHECKS */ + all_files_len--; + } + } + else + netdata_log_error("Request to decrease counter of fd %d (%s), while the use counter is 0", + id, all_files[id].name); + } + else + netdata_log_error("Request to decrease counter of fd %d, which is outside the array size (1 to %"PRIu32")", + id, all_files_size); +} + +static inline void all_files_grow() { + void *old = all_files; + + uint32_t new_size = (all_files_size > 0) ? all_files_size * 2 : 2048; + + // there is no empty slot + all_files = reallocz(all_files, new_size * sizeof(struct file_descriptor)); + + // if the address changed, we have to rebuild the index + // since all pointers are now invalid + + if(unlikely(old && old != (void *)all_files)) { + all_files_index.root = NULL; + for(uint32_t i = 0; i < all_files_size; i++) { + if(!all_files[i].count) continue; + if(unlikely(file_descriptor_add(&all_files[i]) != (void *)&all_files[i])) + netdata_log_error("INTERNAL ERROR: duplicate indexing of fd during realloc."); + } + } + + // initialize the newly added entries + + for(uint32_t i = all_files_size; i < new_size; i++) { + all_files[i].count = 0; + all_files[i].name = NULL; +#ifdef NETDATA_INTERNAL_CHECKS + all_files[i].magic = 0x00000000; +#endif /* NETDATA_INTERNAL_CHECKS */ + all_files[i].pos = i; + } + + if(unlikely(!all_files_size)) all_files_len = 1; + all_files_size = new_size; +} + +static inline uint32_t file_descriptor_set_on_empty_slot(const char *name, uint32_t hash, FD_FILETYPE type) { + // check we have enough memory to add it + if(!all_files || all_files_len == all_files_size) + all_files_grow(); + + debug_log(" >> searching for empty slot."); + + // search for an empty slot + + static int last_pos = 0; + uint32_t i, c; + for(i = 0, c = last_pos ; i < all_files_size ; i++, c++) { + if(c >= all_files_size) c = 0; + if(c == 0) continue; + + if(!all_files[c].count) { + debug_log(" >> Examining slot %d.", c); + +#ifdef NETDATA_INTERNAL_CHECKS + if(all_files[c].magic == 0x0BADCAFE && all_files[c].name && file_descriptor_find(all_files[c].name, all_files[c].hash)) + netdata_log_error("fd on position %"PRIu32" is not cleared properly. It still has %s in it.", c, all_files[c].name); +#endif /* NETDATA_INTERNAL_CHECKS */ + + debug_log(" >> %s fd position %d for %s (last name: %s)", all_files[c].name?"re-using":"using", c, name, all_files[c].name); + + freez((void *)all_files[c].name); + all_files[c].name = NULL; + last_pos = c; + break; + } + } + + all_files_len++; + + if(i == all_files_size) { + fatal("We should find an empty slot, but there isn't any"); + exit(1); + } + // else we have an empty slot in 'c' + + debug_log(" >> updating slot %d.", c); + + all_files[c].name = strdupz(name); + all_files[c].hash = hash; + all_files[c].type = type; + all_files[c].pos = c; + all_files[c].count = 1; +#ifdef NETDATA_INTERNAL_CHECKS + all_files[c].magic = 0x0BADCAFE; +#endif /* NETDATA_INTERNAL_CHECKS */ + if(unlikely(file_descriptor_add(&all_files[c]) != (void *)&all_files[c])) + netdata_log_error("INTERNAL ERROR: duplicate indexing of fd."); + + return c; +} + +uint32_t file_descriptor_find_or_add(const char *name, uint32_t hash) { + if(unlikely(!hash)) + hash = simple_hash(name); + + debug_log("adding or finding name '%s' with hash %u", name, hash); + + struct file_descriptor *fd = file_descriptor_find(name, hash); + if(fd) { + // found + debug_log(" >> found on slot %d", fd->pos); + + fd->count++; + return fd->pos; + } + // not found + + FD_FILETYPE type; + if(likely(name[0] == '/')) type = FILETYPE_FILE; + else if(likely(strncmp(name, "pipe:", 5) == 0)) type = FILETYPE_PIPE; + else if(likely(strncmp(name, "socket:", 7) == 0)) type = FILETYPE_SOCKET; + else if(likely(strncmp(name, "anon_inode:", 11) == 0)) { + const char *t = &name[11]; + + if(strcmp(t, "inotify") == 0) type = FILETYPE_INOTIFY; + else if(strcmp(t, "[eventfd]") == 0) type = FILETYPE_EVENTFD; + else if(strcmp(t, "[eventpoll]") == 0) type = FILETYPE_EVENTPOLL; + else if(strcmp(t, "[timerfd]") == 0) type = FILETYPE_TIMERFD; + else if(strcmp(t, "[signalfd]") == 0) type = FILETYPE_SIGNALFD; + else { + debug_log("UNKNOWN anonymous inode: %s", name); + type = FILETYPE_OTHER; + } + } + else if(likely(strcmp(name, "inotify") == 0)) type = FILETYPE_INOTIFY; + else { + debug_log("UNKNOWN linkname: %s", name); + type = FILETYPE_OTHER; + } + + return file_descriptor_set_on_empty_slot(name, hash, type); +} + +void clear_pid_fd(struct pid_fd *pfd) { + pfd->fd = 0; + +#if defined(OS_LINUX) + pfd->link_hash = 0; + pfd->inode = 0; + pfd->cache_iterations_counter = 0; + pfd->cache_iterations_reset = 0; +#endif +} + +void make_all_pid_fds_negative(struct pid_stat *p) { + struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size]; + while(pfd < pfdend) { + pfd->fd = -(pfd->fd); + pfd++; + } +} + +static inline void cleanup_negative_pid_fds(struct pid_stat *p) { + struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size]; + + while(pfd < pfdend) { + int fd = pfd->fd; + + if(unlikely(fd < 0)) { + file_descriptor_not_used(-(fd)); + clear_pid_fd(pfd); + } + + pfd++; + } +} + +void init_pid_fds(struct pid_stat *p, size_t first, size_t size) { + struct pid_fd *pfd = &p->fds[first], *pfdend = &p->fds[first + size]; + + while(pfd < pfdend) { +#if defined(OS_LINUX) + pfd->filename = NULL; +#endif + clear_pid_fd(pfd); + pfd++; + } +} + +int read_pid_file_descriptors(struct pid_stat *p, void *ptr) { + bool ret = OS_FUNCTION(apps_os_read_pid_fds)(p, ptr); + cleanup_negative_pid_fds(p); + + return ret ? 1 : 0; +} +#endif \ No newline at end of file diff --git a/src/collectors/apps.plugin/apps_pid_match.c b/src/collectors/apps.plugin/apps_pid_match.c new file mode 100644 index 000000000..121899b09 --- /dev/null +++ b/src/collectors/apps.plugin/apps_pid_match.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "apps_plugin.h" + +bool pid_match_check(struct pid_stat *p, APPS_MATCH *match) { + if(!match->starts_with && !match->ends_with) { + if(match->pattern) { + if(simple_pattern_matches_string(match->pattern, p->comm)) + return true; + } + else { + if(match->compare == p->comm || match->compare == p->comm_orig) + return true; + } + } + else if(match->starts_with && !match->ends_with) { + if(match->pattern) { + if(simple_pattern_matches_string(match->pattern, p->comm)) + return true; + } + else { + if(string_starts_with_string(p->comm, match->compare) || + (p->comm != p->comm_orig && string_starts_with_string(p->comm, match->compare))) + return true; + } + } + else if(!match->starts_with && match->ends_with) { + if(match->pattern) { + if(simple_pattern_matches_string(match->pattern, p->comm)) + return true; + } + else { + if(string_ends_with_string(p->comm, match->compare) || + (p->comm != p->comm_orig && string_ends_with_string(p->comm, match->compare))) + return true; + } + } + else if(match->starts_with && match->ends_with && p->cmdline) { + if(match->pattern) { + if(simple_pattern_matches_string(match->pattern, p->cmdline)) + return true; + } + else { + if(strstr(string2str(p->cmdline), string2str(match->compare))) + return true; + } + } + + return false; +} + +APPS_MATCH pid_match_create(const char *comm) { + APPS_MATCH m = { + .starts_with = false, + .ends_with = false, + .compare = NULL, + .pattern = NULL, + }; + + // copy comm to make changes to it + size_t len = strlen(comm); + char buf[len + 1]; + memcpy(buf, comm, sizeof(buf)); + + trim_all(buf); + + if(buf[len - 1] == '*') { + buf[--len] = '\0'; + m.starts_with = true; + } + + const char *nid = buf; + if (nid[0] == '*') { + m.ends_with = true; + nid++; + } + + m.compare = string_strdupz(nid); + + if(strchr(nid, '*')) + m.pattern = simple_pattern_create(comm, SIMPLE_PATTERN_NO_SEPARATORS, SIMPLE_PATTERN_EXACT, true); + + return m; +} + +void pid_match_cleanup(APPS_MATCH *m) { + string_freez(m->compare); + simple_pattern_free(m->pattern); +} + diff --git a/src/collectors/apps.plugin/apps_plugin.c b/src/collectors/apps.plugin/apps_plugin.c index 8fe1ff008..b8ea0e797 100644 --- a/src/collectors/apps.plugin/apps_plugin.c +++ b/src/collectors/apps.plugin/apps_plugin.c @@ -27,18 +27,21 @@ // options bool debug_enabled = false; -bool enable_guest_charts = false; + bool enable_detailed_uptime_charts = false; bool enable_users_charts = true; bool enable_groups_charts = true; bool include_exited_childs = true; -bool proc_pid_cmdline_is_needed = false; // true when we need to read /proc/cmdline - -#if defined(__FreeBSD__) || defined(__APPLE__) -bool enable_file_charts = false; -#else -bool enable_file_charts = true; +bool proc_pid_cmdline_is_needed = true; // true when we need to read /proc/cmdline + +#if defined(OS_FREEBSD) || defined(OS_MACOS) +int enable_file_charts = CONFIG_BOOLEAN_NO; +#elif defined(OS_LINUX) +int enable_file_charts = CONFIG_BOOLEAN_AUTO; +#elif defined(OS_WINDOWS) +int enable_file_charts = CONFIG_BOOLEAN_YES; #endif +bool obsolete_file_charts = false; // ---------------------------------------------------------------------------- // internal counters @@ -53,19 +56,16 @@ size_t targets_assignment_counter = 0, apps_groups_targets_count = 0; // # of apps_groups.conf targets -int - all_files_len = 0, - all_files_size = 0, - show_guest_time = 0, // 1 when guest values are collected - show_guest_time_old = 0; - -#if defined(__FreeBSD__) || defined(__APPLE__) -usec_t system_current_time_ut; -#else -kernel_uint_t system_uptime_secs; +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) +bool enable_guest_charts = false; +bool show_guest_time = false; // set when guest values are collected #endif -// ---------------------------------------------------------------------------- +uint32_t + all_files_len = 0, + all_files_size = 0; + +// -------------------------------------------------------------------------------------------------------------------- // Normalization // // With normalization we lower the collected metrics by a factor to make them @@ -74,16 +74,18 @@ kernel_uint_t system_uptime_secs; // the metrics. This results in utilization that exceeds the total utilization // of the system. // -// During normalization, we align the per-process utilization, to the total of -// the system. We first consume the exited children utilization and it the -// collected values is above the total, we proportionally scale each reported -// metric. +// During normalization, we align the per-process utilization to the global +// utilization of the system. We first consume the exited children utilization +// and it the collected values is above the total, we proportionally scale each +// reported metric. // the total system time, as reported by /proc/stat +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) kernel_uint_t global_utime = 0, global_stime = 0, global_gtime = 0; +#endif // the normalization ratios, as calculated by normalize_utilization() NETDATA_DOUBLE @@ -98,21 +100,11 @@ NETDATA_DOUBLE cminflt_fix_ratio = 1.0, cmajflt_fix_ratio = 1.0; -// ---------------------------------------------------------------------------- -// factor for calculating correct CPU time values depending on units of raw data -unsigned int time_factor = 0; - -// ---------------------------------------------------------------------------- -// command line options +// -------------------------------------------------------------------------------------------------------------------- int update_every = 1; -#if defined(__APPLE__) -mach_timebase_info_data_t mach_info; -#endif - -#if !defined(__FreeBSD__) && !defined(__APPLE__) -int max_fds_cache_seconds = 60; +#if defined(OS_LINUX) proc_state proc_state_count[PROC_STATUS_END]; const char *proc_states[] = { [PROC_STATUS_RUNNING] = "running", @@ -127,420 +119,104 @@ const char *proc_states[] = { static char *user_config_dir = CONFIG_DIR; static char *stock_config_dir = LIBCONFIG_DIR; -struct target - *apps_groups_default_target = NULL, // the default target - *apps_groups_root_target = NULL, // apps_groups.conf defined - *users_root_target = NULL, // users - *groups_root_target = NULL; // user groups - size_t pagesize; -// ---------------------------------------------------------------------------- - -int managed_log(struct pid_stat *p, PID_LOG log, int status) { - if(unlikely(!status)) { - // netdata_log_error("command failed log %u, errno %d", log, errno); - - if(unlikely(debug_enabled || errno != ENOENT)) { - if(unlikely(debug_enabled || !(p->log_thrown & log))) { - p->log_thrown |= log; - switch(log) { - case PID_LOG_IO: - #if defined(__FreeBSD__) || defined(__APPLE__) - netdata_log_error("Cannot fetch process %d I/O info (command '%s')", p->pid, p->comm); - #else - netdata_log_error("Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); - #endif - break; - - case PID_LOG_STATUS: - #if defined(__FreeBSD__) || defined(__APPLE__) - netdata_log_error("Cannot fetch process %d status info (command '%s')", p->pid, p->comm); - #else - netdata_log_error("Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); - #endif - break; - - case PID_LOG_CMDLINE: - #if defined(__FreeBSD__) || defined(__APPLE__) - netdata_log_error("Cannot fetch process %d command line (command '%s')", p->pid, p->comm); - #else - netdata_log_error("Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); - #endif - break; - - case PID_LOG_FDS: - #if defined(__FreeBSD__) || defined(__APPLE__) - netdata_log_error("Cannot fetch process %d files (command '%s')", p->pid, p->comm); - #else - netdata_log_error("Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); - #endif - break; - - case PID_LOG_LIMITS: - #if defined(__FreeBSD__) || defined(__APPLE__) - ; - #else - netdata_log_error("Cannot process %s/proc/%d/limits (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); - #endif - - case PID_LOG_STAT: - break; - - default: - netdata_log_error("unhandled error for pid %d, command '%s'", p->pid, p->comm); - break; - } - } - } - errno_clear(); - } - else if(unlikely(p->log_thrown & log)) { - // netdata_log_error("unsetting log %u on pid %d", log, p->pid); - p->log_thrown &= ~log; - } - - return status; +void sanitize_apps_plugin_chart_meta(char *buf) { + external_plugins_sanitize(buf, buf, strlen(buf) + 1); } // ---------------------------------------------------------------------------- -// update statistics on the targets - -// 1. link all childs to their parents -// 2. go from bottom to top, marking as merged all children to their parents, -// this step links all parents without a target to the child target, if any -// 3. link all top level processes (the ones not merged) to default target -// 4. go from top to bottom, linking all children without a target to their parent target -// after this step all processes have a target. -// [5. for each killed pid (updated = 0), remove its usage from its target] -// 6. zero all apps_groups_targets -// 7. concentrate all values on the apps_groups_targets -// 8. remove all killed processes -// 9. find the unique file count for each target -// check: update_apps_groups_statistics() - -static void apply_apps_groups_targets_inheritance(void) { - struct pid_stat *p = NULL; - - // children that do not have a target - // inherit their target from their parent - int found = 1, loops = 0; - while(found) { - if(unlikely(debug_enabled)) loops++; - found = 0; - for(p = root_of_pids; p ; p = p->next) { - // if this process does not have a target, - // and it has a parent - // and its parent has a target - // then, set the parent's target to this process - if(unlikely(!p->target && p->parent && p->parent->target)) { - p->target = p->parent->target; - found++; - - if(debug_enabled || (p->target && p->target->debug_enabled)) - debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s).", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm); - } - } - } - - // find all the procs with 0 childs and merge them to their parents - // repeat, until nothing more can be done. - int sortlist = 1; - found = 1; - while(found) { - if(unlikely(debug_enabled)) loops++; - found = 0; - - for(p = root_of_pids; p ; p = p->next) { - if(unlikely(!p->sortlist && !p->children_count)) - p->sortlist = sortlist++; - - if(unlikely( - !p->children_count // if this process does not have any children - && !p->merged // and is not already merged - && p->parent // and has a parent - && p->parent->children_count // and its parent has children - // and the target of this process and its parent is the same, - // or the parent does not have a target - && (p->target == p->parent->target || !p->parent->target) - && p->ppid != INIT_PID // and its parent is not init - )) { - // mark it as merged - p->parent->children_count--; - p->merged = true; - - // the parent inherits the child's target, if it does not have a target itself - if(unlikely(p->target && !p->parent->target)) { - p->parent->target = p->target; - - if(debug_enabled || (p->target && p->target->debug_enabled)) - debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its child %d (%s).", p->target->name, p->parent->pid, p->parent->comm, p->pid, p->comm); - } - - found++; - } - } - - debug_log("TARGET INHERITANCE: merged %d processes", found); - } - - // init goes always to default target - struct pid_stat *pi = find_pid_entry(INIT_PID); - if(pi && !pi->matched_by_config) - pi->target = apps_groups_default_target; - - // pid 0 goes always to default target - pi = find_pid_entry(0); - if(pi && !pi->matched_by_config) - pi->target = apps_groups_default_target; - - // give a default target on all top level processes - if(unlikely(debug_enabled)) loops++; - for(p = root_of_pids; p ; p = p->next) { - // if the process is not merged itself - // then it is a top level process - if(unlikely(!p->merged && !p->target)) - p->target = apps_groups_default_target; - - // make sure all processes have a sortlist - if(unlikely(!p->sortlist)) - p->sortlist = sortlist++; - } - - pi = find_pid_entry(1); - if(pi) - pi->sortlist = sortlist++; - - // give a target to all merged child processes - found = 1; - while(found) { - if(unlikely(debug_enabled)) loops++; - found = 0; - for(p = root_of_pids; p ; p = p->next) { - if(unlikely(!p->target && p->merged && p->parent && p->parent->target)) { - p->target = p->parent->target; - found++; - - if(debug_enabled || (p->target && p->target->debug_enabled)) - debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s) at phase 2.", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm); - } - } - } - - debug_log("apply_apps_groups_targets_inheritance() made %d loops on the process tree", loops); -} - -static size_t zero_all_targets(struct target *root) { - struct target *w; - size_t count = 0; - - for (w = root; w ; w = w->next) { - count++; - - w->minflt = 0; - w->majflt = 0; - w->utime = 0; - w->stime = 0; - w->gtime = 0; - w->cminflt = 0; - w->cmajflt = 0; - w->cutime = 0; - w->cstime = 0; - w->cgtime = 0; - w->num_threads = 0; - // w->rss = 0; - w->processes = 0; - - w->status_vmsize = 0; - w->status_vmrss = 0; - w->status_vmshared = 0; - w->status_rssfile = 0; - w->status_rssshmem = 0; - w->status_vmswap = 0; - w->status_voluntary_ctxt_switches = 0; - w->status_nonvoluntary_ctxt_switches = 0; - - w->io_logical_bytes_read = 0; - w->io_logical_bytes_written = 0; - w->io_read_calls = 0; - w->io_write_calls = 0; - w->io_storage_bytes_read = 0; - w->io_storage_bytes_written = 0; - w->io_cancelled_write_bytes = 0; - - // zero file counters - if(w->target_fds) { - memset(w->target_fds, 0, sizeof(int) * w->target_fds_size); - w->openfds.files = 0; - w->openfds.pipes = 0; - w->openfds.sockets = 0; - w->openfds.inotifies = 0; - w->openfds.eventfds = 0; - w->openfds.timerfds = 0; - w->openfds.signalfds = 0; - w->openfds.eventpolls = 0; - w->openfds.other = 0; - - w->max_open_files_percent = 0.0; - } - - w->uptime_min = 0; - w->uptime_sum = 0; - w->uptime_max = 0; - - if(unlikely(w->root_pid)) { - struct pid_on_target *pid_on_target = w->root_pid; +// update chart dimensions - while(pid_on_target) { - struct pid_on_target *pid_on_target_to_free = pid_on_target; - pid_on_target = pid_on_target->next; - freez(pid_on_target_to_free); - } +// Helper function to count the number of processes in the linked list +int count_processes(struct pid_stat *root) { + int count = 0; - w->root_pid = NULL; - } - } + for(struct pid_stat *p = root; p ; p = p->next) + if(p->updated) count++; return count; } -static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o) { - (void)o; - - if(unlikely(!p->updated)) { - // the process is not running - return; - } - - if(unlikely(!w)) { - netdata_log_error("pid %d %s was left without a target!", p->pid, p->comm); - return; - } - - if(p->openfds_limits_percent > w->max_open_files_percent) - w->max_open_files_percent = p->openfds_limits_percent; - - w->cutime += p->cutime; - w->cstime += p->cstime; - w->cgtime += p->cgtime; - w->cminflt += p->cminflt; - w->cmajflt += p->cmajflt; - - w->utime += p->utime; - w->stime += p->stime; - w->gtime += p->gtime; - w->minflt += p->minflt; - w->majflt += p->majflt; - - // w->rss += p->rss; - - w->status_vmsize += p->status_vmsize; - w->status_vmrss += p->status_vmrss; - w->status_vmshared += p->status_vmshared; - w->status_rssfile += p->status_rssfile; - w->status_rssshmem += p->status_rssshmem; - w->status_vmswap += p->status_vmswap; - w->status_voluntary_ctxt_switches += p->status_voluntary_ctxt_switches; - w->status_nonvoluntary_ctxt_switches += p->status_nonvoluntary_ctxt_switches; - - w->io_logical_bytes_read += p->io_logical_bytes_read; - w->io_logical_bytes_written += p->io_logical_bytes_written; - w->io_read_calls += p->io_read_calls; - w->io_write_calls += p->io_write_calls; - w->io_storage_bytes_read += p->io_storage_bytes_read; - w->io_storage_bytes_written += p->io_storage_bytes_written; - w->io_cancelled_write_bytes += p->io_cancelled_write_bytes; - - w->processes++; - w->num_threads += p->num_threads; - - if(!w->uptime_min || p->uptime < w->uptime_min) w->uptime_min = p->uptime; - if(!w->uptime_max || w->uptime_max < p->uptime) w->uptime_max = p->uptime; - w->uptime_sum += p->uptime; - - if(unlikely(debug_enabled || w->debug_enabled)) { - debug_log_int("aggregating '%s' pid %d on target '%s' utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->comm, p->pid, w->name, p->utime, p->stime, p->gtime, p->cutime, p->cstime, p->cgtime, p->minflt, p->majflt, p->cminflt, p->cmajflt); - - struct pid_on_target *pid_on_target = mallocz(sizeof(struct pid_on_target)); - pid_on_target->pid = p->pid; - pid_on_target->next = w->root_pid; - w->root_pid = pid_on_target; - } +// Comparator function to sort by pid +int compare_by_pid(const void *a, const void *b) { + struct pid_stat *pa = *(struct pid_stat **)a; + struct pid_stat *pb = *(struct pid_stat **)b; + return ((int)pa->pid - (int)pb->pid); } -static void calculate_netdata_statistics(void) { - apply_apps_groups_targets_inheritance(); - - zero_all_targets(users_root_target); - zero_all_targets(groups_root_target); - apps_groups_targets_count = zero_all_targets(apps_groups_root_target); - - // this has to be done, before the cleanup - struct pid_stat *p = NULL; - struct target *w = NULL, *o = NULL; - - // concentrate everything on the targets - for(p = root_of_pids; p ; p = p->next) { - - // -------------------------------------------------------------------- - // apps_groups target - - aggregate_pid_on_target(p->target, p, NULL); +// Function to print a process and its children recursively +void print_process_tree(struct pid_stat *root, struct pid_stat *parent, int depth, int total_processes) { + // Allocate an array of pointers for processes with the given parent + struct pid_stat **children = (struct pid_stat **)malloc(total_processes * sizeof(struct pid_stat *)); + int children_count = 0; - - // -------------------------------------------------------------------- - // user target - - o = p->user_target; - if(likely(p->user_target && p->user_target->uid == p->uid)) - w = p->user_target; - else { - if(unlikely(debug_enabled && p->user_target)) - debug_log("pid %d (%s) switched user from %u (%s) to %u.", p->pid, p->comm, p->user_target->uid, p->user_target->name, p->uid); - - w = p->user_target = get_users_target(p->uid); + // Populate the array with processes that have the given parent + struct pid_stat *p = root; + while (p != NULL) { + if (p->updated && p->parent == parent) { + children[children_count++] = p; } + p = p->next; + } - aggregate_pid_on_target(w, p, o); - - - // -------------------------------------------------------------------- - // user group target - - o = p->group_target; - if(likely(p->group_target && p->group_target->gid == p->gid)) - w = p->group_target; - else { - if(unlikely(debug_enabled && p->group_target)) - debug_log("pid %d (%s) switched group from %u (%s) to %u.", p->pid, p->comm, p->group_target->gid, p->group_target->name, p->gid); + // Sort the children array by pid + qsort(children, children_count, sizeof(struct pid_stat *), compare_by_pid); - w = p->group_target = get_groups_target(p->gid); + // Print each child and recurse + for (int i = 0; i < children_count; i++) { + // Print the current process with indentation based on depth + if (depth > 0) { + for (int j = 0; j < (depth - 1) * 4; j++) { + printf(" "); + } + printf(" \\_ "); } - aggregate_pid_on_target(w, p, o); +#if (PROCESSES_HAVE_COMM_AND_NAME == 1) + printf("[%d] %s (name: %s) [%s]: %s\n", children[i]->pid, + string2str(children[i]->comm), + string2str(children[i]->name), + string2str(children[i]->target->name), + string2str(children[i]->cmdline)); +#else + printf("[%d] orig: '%s' new: '%s' [target: %s]: cmdline: %s\n", children[i]->pid, + string2str(children[i]->comm_orig), + string2str(children[i]->comm), + string2str(children[i]->target->name), + string2str(children[i]->cmdline)); +#endif + // Recurse to print this child's children + print_process_tree(root, children[i], depth + 1, total_processes); + } - // -------------------------------------------------------------------- - // aggregate all file descriptors + // Free the allocated array + free(children); +} - if(enable_file_charts) - aggregate_pid_fds_on_targets(p); - } +// Function to print the full hierarchy +void print_hierarchy(struct pid_stat *root) { + // Count the total number of processes + int total_processes = count_processes(root); - cleanup_exited_pids(); + // Start printing from processes with parent = NULL (i.e., root processes) + print_process_tree(root, NULL, 0, total_processes); } // ---------------------------------------------------------------------------- // update chart dimensions +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) static void normalize_utilization(struct target *root) { struct target *w; - // childs processing introduces spikes - // here we try to eliminate them by disabling childs processing either for specific dimensions - // or entirely. Of course, either way, we disable it just a single iteration. + // children processing introduces spikes, + // here we try to eliminate them by disabling children processing either + // for specific dimensions or entirely. + // of course, either way, we disable it just for a single iteration. - kernel_uint_t max_time = os_get_system_cpus() * time_factor * RATES_DETAIL; + kernel_uint_t max_time = os_get_system_cpus() * NSEC_PER_SEC; kernel_uint_t utime = 0, cutime = 0, stime = 0, cstime = 0, gtime = 0, cgtime = 0, minflt = 0, cminflt = 0, majflt = 0, cmajflt = 0; if(global_utime > max_time) global_utime = max_time; @@ -548,19 +224,19 @@ static void normalize_utilization(struct target *root) { if(global_gtime > max_time) global_gtime = max_time; for(w = root; w ; w = w->next) { - if(w->target || (!w->processes && !w->exposed)) continue; - - utime += w->utime; - stime += w->stime; - gtime += w->gtime; - cutime += w->cutime; - cstime += w->cstime; - cgtime += w->cgtime; - - minflt += w->minflt; - majflt += w->majflt; - cminflt += w->cminflt; - cmajflt += w->cmajflt; + if(w->target || (!w->values[PDF_PROCESSES] && !w->exposed)) continue; + + utime += w->values[PDF_UTIME]; + stime += w->values[PDF_STIME]; + gtime += w->values[PDF_GTIME]; + cutime += w->values[PDF_CUTIME]; + cstime += w->values[PDF_CSTIME]; + cgtime += w->values[PDF_CGTIME]; + + minflt += w->values[PDF_MINFLT]; + majflt += w->values[PDF_MAJFLT]; + cminflt += w->values[PDF_CMINFLT]; + cmajflt += w->values[PDF_CMAJFLT]; } if(global_utime || global_stime || global_gtime) { @@ -574,7 +250,7 @@ static void normalize_utilization(struct target *root) { cgtime_fix_ratio = 1.0; //(NETDATA_DOUBLE)(global_utime + global_stime) / (NETDATA_DOUBLE)(utime + cutime + stime + cstime); } else if((global_utime + global_stime > utime + stime) && (cutime || cstime)) { - // children resources are too high + // children resources are too high, // lower only the children resources utime_fix_ratio = stime_fix_ratio = @@ -683,6 +359,7 @@ static void normalize_utilization(struct target *root) { , (kernel_uint_t)(cgtime * cgtime_fix_ratio) ); } +#endif // ---------------------------------------------------------------------------- // parse command line arguments @@ -690,6 +367,7 @@ static void normalize_utilization(struct target *root) { int check_proc_1_io() { int ret = 0; +#if defined(OS_LINUX) procfile *ff = procfile_open("/proc/1/io", NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); if(!ff) goto cleanup; @@ -700,9 +378,14 @@ int check_proc_1_io() { cleanup: procfile_close(ff); +#endif + return ret; } +static bool profile_speed = false; +static bool print_tree_and_exit = false; + static void parse_args(int argc, char **argv) { int i, freq = 0; @@ -721,6 +404,12 @@ static void parse_args(int argc, char **argv) exit(0); } + if(strcmp("print", argv[i]) == 0 || strcmp("-print", argv[i]) == 0 || strcmp("--print", argv[i]) == 0) { + print_tree_and_exit = true; + continue; + } + +#if defined(OS_LINUX) if(strcmp("test-permissions", argv[i]) == 0 || strcmp("-t", argv[i]) == 0) { if(!check_proc_1_io()) { perror("Tried to read /proc/1/io and it failed"); @@ -729,6 +418,7 @@ static void parse_args(int argc, char **argv) printf("OK\n"); exit(0); } +#endif if(strcmp("debug", argv[i]) == 0) { debug_enabled = true; @@ -738,7 +428,12 @@ static void parse_args(int argc, char **argv) continue; } -#if !defined(__FreeBSD__) && !defined(__APPLE__) + if(strcmp("profile-speed", argv[i]) == 0) { + profile_speed = true; + continue; + } + +#if defined(OS_LINUX) if(strcmp("fds-cache-secs", argv[i]) == 0) { if(argc <= i + 1) { fprintf(stderr, "Parameter 'fds-cache-secs' requires a number as argument.\n"); @@ -751,6 +446,7 @@ static void parse_args(int argc, char **argv) } #endif +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) || (PROCESSES_HAVE_CHILDREN_FLTS == 1) if(strcmp("no-childs", argv[i]) == 0 || strcmp("without-childs", argv[i]) == 0) { include_exited_childs = 0; continue; @@ -760,7 +456,9 @@ static void parse_args(int argc, char **argv) include_exited_childs = 1; continue; } +#endif +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) if(strcmp("with-guest", argv[i]) == 0) { enable_guest_charts = true; continue; @@ -770,26 +468,33 @@ static void parse_args(int argc, char **argv) enable_guest_charts = false; continue; } +#endif +#if (PROCESSES_HAVE_FDS == 1) if(strcmp("with-files", argv[i]) == 0) { - enable_file_charts = 1; + enable_file_charts = CONFIG_BOOLEAN_YES; continue; } if(strcmp("no-files", argv[i]) == 0 || strcmp("without-files", argv[i]) == 0) { - enable_file_charts = 0; + enable_file_charts = CONFIG_BOOLEAN_NO; continue; } +#endif +#if (PROCESSES_HAVE_UID == 1) || (PROCESSES_HAVE_SID == 1) if(strcmp("no-users", argv[i]) == 0 || strcmp("without-users", argv[i]) == 0) { enable_users_charts = 0; continue; } +#endif +#if (PROCESSES_HAVE_GID == 1) if(strcmp("no-groups", argv[i]) == 0 || strcmp("without-groups", argv[i]) == 0) { enable_groups_charts = 0; continue; } +#endif if(strcmp("with-detailed-uptime", argv[i]) == 0) { enable_detailed_uptime_charts = 1; @@ -821,26 +526,36 @@ static void parse_args(int argc, char **argv) " it may include sensitive data such as passwords and tokens\n" " enabling this could be a security risk\n" "\n" +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) || (PROCESSES_HAVE_CHILDREN_FLTS == 1) " with-childs\n" " without-childs enable / disable aggregating exited\n" " children resources into parents\n" " (default is enabled)\n" "\n" +#endif +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) " with-guest\n" " without-guest enable / disable reporting guest charts\n" " (default is disabled)\n" "\n" +#endif +#if (PROCESSES_HAVE_FDS == 1) " with-files\n" " without-files enable / disable reporting files, sockets, pipes\n" " (default is enabled)\n" "\n" +#endif +#if (PROCESSES_HAVE_UID == 1) || (PROCESSES_HAVE_SID == 1) " without-users disable reporting per user charts\n" "\n" +#endif +#if (PROCESSES_HAVE_GID == 1) " without-groups disable reporting per user group charts\n" "\n" +#endif " with-detailed-uptime enable reporting min/avg/max uptime charts\n" "\n" -#if !defined(__FreeBSD__) && !defined(__APPLE__) +#if defined(OS_LINUX) " fds-cache-secs N cache the files of processed for N seconds\n" " caching is adaptive per file (when a file\n" " is found, it starts at 0 and while the file\n" @@ -852,15 +567,17 @@ static void parse_args(int argc, char **argv) " version or -v or -V print program version and exit\n" "\n" , NETDATA_VERSION -#if !defined(__FreeBSD__) && !defined(__APPLE__) +#if defined(OS_LINUX) , max_fds_cache_seconds #endif ); - exit(1); + exit(0); } +#if !defined(OS_WINDOWS) || !defined(RUN_UNDER_CLION) netdata_log_error("Cannot understand option %s", argv[i]); exit(1); +#endif } if(freq > 0) update_every = freq; @@ -879,7 +596,8 @@ static void parse_args(int argc, char **argv) netdata_log_info("Loaded config file '%s/apps_groups.conf'", user_config_dir); } -static int am_i_running_as_root() { +#if !defined(OS_WINDOWS) +static inline int am_i_running_as_root() { uid_t uid = getuid(), euid = geteuid(); if(uid == 0 || euid == 0) { @@ -892,7 +610,7 @@ static int am_i_running_as_root() { } #ifdef HAVE_SYS_CAPABILITY_H -static int check_capabilities() { +static inline int check_capabilities() { cap_t caps = cap_get_proc(); if(!caps) { netdata_log_error("Cannot get current capabilities."); @@ -936,27 +654,17 @@ static int check_capabilities() { return ret; } #else -static int check_capabilities() { +static inline int check_capabilities() { return 0; } #endif +#endif -static netdata_mutex_t apps_and_stdout_mutex = NETDATA_MUTEX_INITIALIZER; - -struct target *find_target_by_name(struct target *base, const char *name) { - struct target *t; - for(t = base; t ; t = t->next) { - if (strcmp(t->name, name) == 0) - return t; - } - - return NULL; -} +netdata_mutex_t apps_and_stdout_mutex = NETDATA_MUTEX_INITIALIZER; static bool apps_plugin_exit = false; int main(int argc, char **argv) { - clocks_init(); nd_log_initialize_for_external_plugins("apps.plugin"); pagesize = (size_t)sysconf(_SC_PAGESIZE); @@ -999,48 +707,46 @@ int main(int argc, char **argv) { } #endif /* NETDATA_INTERNAL_CHECKS */ - procfile_adaptive_initial_allocation = 1; - - os_get_system_HZ(); -#if defined(__FreeBSD__) - time_factor = 1000000ULL / RATES_DETAIL; // FreeBSD uses usecs -#endif -#if defined(__APPLE__) - mach_timebase_info(&mach_info); - time_factor = 1000000ULL / RATES_DETAIL; -#endif -#if !defined(__FreeBSD__) && !defined(__APPLE__) - time_factor = system_hz; // Linux uses clock ticks -#endif - - os_get_system_pid_max(); + procfile_set_adaptive_allocation(true, 0, 0, 0); os_get_system_cpus_uncached(); - + apps_managers_and_aggregators_init(); // before parsing args! parse_args(argc, argv); +#if !defined(OS_WINDOWS) if(!check_capabilities() && !am_i_running_as_root() && !check_proc_1_io()) { uid_t uid = getuid(), euid = geteuid(); #ifdef HAVE_SYS_CAPABILITY_H netdata_log_error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. " - "Without these, apps.plugin cannot report disk I/O utilization of other processes. " - "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; " - "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; " - , uid, euid, argv[0], argv[0], argv[0] - ); + "Without these, apps.plugin cannot report disk I/O utilization of other processes. " + "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; " + "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; " + , uid, euid, argv[0], argv[0], argv[0]); #else netdata_log_error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. " - "Without these, apps.plugin cannot report disk I/O utilization of other processes. " - "Your system does not support capabilities. " - "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; " - , uid, euid, argv[0], argv[0] - ); + "Without these, apps.plugin cannot report disk I/O utilization of other processes. " + "Your system does not support capabilities. " + "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; " + , uid, euid, argv[0], argv[0]); #endif } +#endif netdata_log_info("started on pid %d", getpid()); - users_and_groups_init(); - pids_init(); +#if (PROCESSES_HAVE_UID == 1) + cached_usernames_init(); +#endif + +#if (PROCESSES_HAVE_GID == 1) + cached_groupnames_init(); +#endif + +#if (PROCESSES_HAVE_SID == 1) + cached_sid_username_init(); +#endif + + apps_pids_init(); + OS_FUNCTION(apps_os_init)(); // ------------------------------------------------------------------------ // the event loop for functions @@ -1055,22 +761,22 @@ int main(int argc, char **argv) { netdata_mutex_lock(&apps_and_stdout_mutex); APPS_PLUGIN_GLOBAL_FUNCTIONS(); - usec_t step = update_every * USEC_PER_SEC; global_iterations_counter = 1; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, update_every * USEC_PER_SEC); for(; !apps_plugin_exit ; global_iterations_counter++) { netdata_mutex_unlock(&apps_and_stdout_mutex); -#ifdef NETDATA_PROFILING -#warning "compiling for profiling" - static int profiling_count=0; - profiling_count++; - if(unlikely(profiling_count > 2000)) exit(0); - usec_t dt = update_every * USEC_PER_SEC; -#else - usec_t dt = heartbeat_next(&hb, step); -#endif + usec_t dt; + if(profile_speed) { + static int profiling_count=0; + profiling_count++; + if(unlikely(profiling_count > 500)) exit(0); + dt = update_every * USEC_PER_SEC; + } + else + dt = heartbeat_next(&hb); + netdata_mutex_lock(&apps_and_stdout_mutex); struct pollfd pollfd = { .fd = fileno(stdout), .events = POLLERR }; @@ -1083,9 +789,6 @@ int main(int argc, char **argv) { fatal("Received error on read pipe."); } - if(global_iterations_counter % 10 == 0) - get_MemTotal(); - if(!collect_data_for_all_pids()) { netdata_log_error("Cannot collect /proc data for running processes. Disabling apps.plugin..."); printf("DISABLE\n"); @@ -1093,29 +796,50 @@ int main(int argc, char **argv) { exit(1); } - calculate_netdata_statistics(); + aggregate_processes_to_targets(); + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) + OS_FUNCTION(apps_os_read_global_cpu_utilization)(); normalize_utilization(apps_groups_root_target); +#endif + + if(unlikely(print_tree_and_exit)) { + print_hierarchy(root_of_pids()); + exit(0); + } if(send_resource_usage) send_resource_usage_to_netdata(dt); +#if (PROCESSES_HAVE_STATE == 1) send_proc_states_count(dt); - send_charts_updates_to_netdata(apps_groups_root_target, "app", "app_group", "Apps"); +#endif + + send_charts_updates_to_netdata(apps_groups_root_target, "app", "app_group", "Applications Groups"); send_collected_data_to_netdata(apps_groups_root_target, "app", dt); +#if (PROCESSES_HAVE_UID == 1) if (enable_users_charts) { - send_charts_updates_to_netdata(users_root_target, "user", "user", "Users"); + send_charts_updates_to_netdata(users_root_target, "user", "user", "User Processes"); send_collected_data_to_netdata(users_root_target, "user", dt); } +#endif +#if (PROCESSES_HAVE_GID == 1) if (enable_groups_charts) { - send_charts_updates_to_netdata(groups_root_target, "usergroup", "user_group", "User Groups"); + send_charts_updates_to_netdata(groups_root_target, "usergroup", "user_group", "User Group Processes"); send_collected_data_to_netdata(groups_root_target, "usergroup", dt); } +#endif - fflush(stdout); +#if (PROCESSES_HAVE_SID == 1) + if (enable_users_charts) { + send_charts_updates_to_netdata(sids_root_target, "user", "user", "User Processes"); + send_collected_data_to_netdata(sids_root_target, "user", dt); + } +#endif - show_guest_time_old = show_guest_time; + fflush(stdout); debug_log("done Loop No %zu", global_iterations_counter); } diff --git a/src/collectors/apps.plugin/apps_plugin.h b/src/collectors/apps.plugin/apps_plugin.h index a085872d9..1abd07f22 100644 --- a/src/collectors/apps.plugin/apps_plugin.h +++ b/src/collectors/apps.plugin/apps_plugin.h @@ -6,11 +6,41 @@ #include "collectors/all.h" #include "libnetdata/libnetdata.h" -#ifdef __FreeBSD__ +#define OS_FUNC_CONCAT(a, b) a##b + +#if defined(OS_FREEBSD) #include -#endif -#ifdef __APPLE__ +#define OS_INIT_PID 1 +#define ALL_PIDS_ARE_READ_INSTANTLY 1 +#define PROCESSES_HAVE_CPU_GUEST_TIME 0 +#define PROCESSES_HAVE_CPU_CHILDREN_TIME 1 +#define PROCESSES_HAVE_VOLCTX 0 +#define PROCESSES_HAVE_NVOLCTX 0 +#define PROCESSES_HAVE_PHYSICAL_IO 0 +#define PROCESSES_HAVE_LOGICAL_IO 1 +#define PROCESSES_HAVE_IO_CALLS 0 +#define PROCESSES_HAVE_UID 1 +#define PROCESSES_HAVE_GID 1 +#define PROCESSES_HAVE_SID 0 +#define PROCESSES_HAVE_MAJFLT 1 +#define PROCESSES_HAVE_CHILDREN_FLTS 1 +#define PROCESSES_HAVE_VMSWAP 0 +#define PROCESSES_HAVE_VMSHARED 0 +#define PROCESSES_HAVE_RSSFILE 0 +#define PROCESSES_HAVE_RSSSHMEM 0 +#define PROCESSES_HAVE_FDS 1 +#define PROCESSES_HAVE_HANDLES 0 +#define PROCESSES_HAVE_CMDLINE 1 +#define PROCESSES_HAVE_PID_LIMITS 0 +#define PROCESSES_HAVE_COMM_AND_NAME 0 +#define PROCESSES_HAVE_STATE 0 +#define PPID_SHOULD_BE_RUNNING 1 +#define INCREMENTAL_DATA_COLLECTION 1 +#define CPU_TO_NANOSECONDCORES (1) +#define OS_FUNCTION(func) OS_FUNC_CONCAT(func, _freebsd) + +#elif defined(OS_MACOS) #include #include #include @@ -18,47 +48,129 @@ #include #include // For mach_timebase_info_data_t and mach_timebase_info -extern mach_timebase_info_data_t mach_info; -#endif - -// ---------------------------------------------------------------------------- -// per O/S configuration - -// the minimum PID of the system -// this is also the pid of the init process -#define INIT_PID 1 - -// if the way apps.plugin will work, will read the entire process list, -// including the resource utilization of each process, instantly -// set this to 1 -// when set to 0, apps.plugin builds a sort list of processes, in order -// to process children processes, before parent processes -#if defined(__FreeBSD__) || defined(__APPLE__) -#define ALL_PIDS_ARE_READ_INSTANTLY 1 -#else -#define ALL_PIDS_ARE_READ_INSTANTLY 0 -#endif - -#if defined(__APPLE__) struct pid_info { struct kinfo_proc proc; struct proc_taskinfo taskinfo; struct proc_bsdinfo bsdinfo; struct rusage_info_v4 rusageinfo; }; + +#define OS_INIT_PID 1 +#define ALL_PIDS_ARE_READ_INSTANTLY 1 +#define PROCESSES_HAVE_CPU_GUEST_TIME 0 +#define PROCESSES_HAVE_CPU_CHILDREN_TIME 0 +#define PROCESSES_HAVE_VOLCTX 1 +#define PROCESSES_HAVE_NVOLCTX 0 +#define PROCESSES_HAVE_PHYSICAL_IO 0 +#define PROCESSES_HAVE_LOGICAL_IO 1 +#define PROCESSES_HAVE_IO_CALLS 0 +#define PROCESSES_HAVE_UID 1 +#define PROCESSES_HAVE_GID 1 +#define PROCESSES_HAVE_SID 0 +#define PROCESSES_HAVE_MAJFLT 1 +#define PROCESSES_HAVE_CHILDREN_FLTS 0 +#define PROCESSES_HAVE_VMSWAP 0 +#define PROCESSES_HAVE_VMSHARED 0 +#define PROCESSES_HAVE_RSSFILE 0 +#define PROCESSES_HAVE_RSSSHMEM 0 +#define PROCESSES_HAVE_FDS 1 +#define PROCESSES_HAVE_HANDLES 0 +#define PROCESSES_HAVE_CMDLINE 1 +#define PROCESSES_HAVE_PID_LIMITS 0 +#define PROCESSES_HAVE_COMM_AND_NAME 0 +#define PROCESSES_HAVE_STATE 0 +#define PPID_SHOULD_BE_RUNNING 1 +#define INCREMENTAL_DATA_COLLECTION 1 +#define CPU_TO_NANOSECONDCORES (1) // already in nanoseconds +#define OS_FUNCTION(func) OS_FUNC_CONCAT(func, _macos) + +#elif defined(OS_WINDOWS) +#define OS_INIT_PID 0 // dynamic, is set during data collection +#define ALL_PIDS_ARE_READ_INSTANTLY 1 +#define PROCESSES_HAVE_CPU_GUEST_TIME 0 +#define PROCESSES_HAVE_CPU_CHILDREN_TIME 0 +#define PROCESSES_HAVE_VOLCTX 0 +#define PROCESSES_HAVE_NVOLCTX 0 +#define PROCESSES_HAVE_PHYSICAL_IO 0 +#define PROCESSES_HAVE_LOGICAL_IO 1 +#define PROCESSES_HAVE_IO_CALLS 1 +#define PROCESSES_HAVE_UID 0 +#define PROCESSES_HAVE_GID 0 +#define PROCESSES_HAVE_SID 1 +#define PROCESSES_HAVE_MAJFLT 0 +#define PROCESSES_HAVE_CHILDREN_FLTS 0 +#define PROCESSES_HAVE_VMSWAP 1 +#define PROCESSES_HAVE_VMSHARED 0 +#define PROCESSES_HAVE_RSSFILE 0 +#define PROCESSES_HAVE_RSSSHMEM 0 +#define PROCESSES_HAVE_FDS 0 +#define PROCESSES_HAVE_HANDLES 1 +#define PROCESSES_HAVE_CMDLINE 0 +#define PROCESSES_HAVE_PID_LIMITS 0 +#define PROCESSES_HAVE_COMM_AND_NAME 1 +#define PROCESSES_HAVE_STATE 0 +#define PPID_SHOULD_BE_RUNNING 0 +#define INCREMENTAL_DATA_COLLECTION 0 +#define CPU_TO_NANOSECONDCORES (100) // convert 100ns to ns +#define OS_FUNCTION(func) OS_FUNC_CONCAT(func, _windows) + +#elif defined(OS_LINUX) +#define OS_INIT_PID 1 +#define ALL_PIDS_ARE_READ_INSTANTLY 0 +#define PROCESSES_HAVE_CPU_GUEST_TIME 1 +#define PROCESSES_HAVE_CPU_CHILDREN_TIME 1 +#define PROCESSES_HAVE_VOLCTX 1 +#define PROCESSES_HAVE_NVOLCTX 1 +#define PROCESSES_HAVE_PHYSICAL_IO 1 +#define PROCESSES_HAVE_LOGICAL_IO 1 +#define PROCESSES_HAVE_IO_CALLS 1 +#define PROCESSES_HAVE_UID 1 +#define PROCESSES_HAVE_GID 1 +#define PROCESSES_HAVE_SID 0 +#define PROCESSES_HAVE_MAJFLT 1 +#define PROCESSES_HAVE_CHILDREN_FLTS 1 +#define PROCESSES_HAVE_VMSWAP 1 +#define PROCESSES_HAVE_VMSHARED 1 +#define PROCESSES_HAVE_RSSFILE 1 +#define PROCESSES_HAVE_RSSSHMEM 1 +#define PROCESSES_HAVE_FDS 1 +#define PROCESSES_HAVE_HANDLES 0 +#define PROCESSES_HAVE_CMDLINE 1 +#define PROCESSES_HAVE_PID_LIMITS 1 +#define PROCESSES_HAVE_COMM_AND_NAME 0 +#define PROCESSES_HAVE_STATE 1 +#define PPID_SHOULD_BE_RUNNING 1 +#define USE_APPS_GROUPS_CONF 1 +#define INCREMENTAL_DATA_COLLECTION 1 +#define CPU_TO_NANOSECONDCORES (NSEC_PER_SEC / system_hz) +#define OS_FUNCTION(func) OS_FUNC_CONCAT(func, _linux) + +extern int max_fds_cache_seconds; + +#else +#error "Unsupported operating system" +#endif + +#if (PROCESSES_HAVE_UID == 1) && (PROCESSES_HAVE_SID == 1) +#error "Do not enable SID and UID at the same time" #endif -// ---------------------------------------------------------------------------- +// -------------------------------------------------------------------------------------------------------------------- + +#define MAX_SYSTEM_FD_TO_ALLOW_FILES_PROCESSING 100000 + +extern pid_t INIT_PID; extern bool debug_enabled; -extern bool enable_guest_charts; + extern bool enable_detailed_uptime_charts; extern bool enable_users_charts; extern bool enable_groups_charts; extern bool include_exited_childs; extern bool enable_function_cmdline; extern bool proc_pid_cmdline_is_needed; -extern bool enable_file_charts; +extern int enable_file_charts; +extern bool obsolete_file_charts; extern size_t global_iterations_counter, @@ -68,19 +180,19 @@ extern size_t inodes_changed_counter, links_changed_counter, targets_assignment_counter, - all_pids_count, apps_groups_targets_count; -extern int - all_files_len, - all_files_size, - show_guest_time, - show_guest_time_old; +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) +extern bool enable_guest_charts; +extern bool show_guest_time; +#endif +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) extern kernel_uint_t global_utime, global_stime, global_gtime; +#endif // the normalization ratios, as calculated by normalize_utilization() extern NETDATA_DOUBLE @@ -95,35 +207,26 @@ extern NETDATA_DOUBLE cminflt_fix_ratio, cmajflt_fix_ratio; -#if defined(__FreeBSD__) || defined(__APPLE__) -extern usec_t system_current_time_ut; -#else -extern kernel_uint_t system_uptime_secs; -#endif - extern size_t pagesize; -// ---------------------------------------------------------------------------- +extern netdata_mutex_t apps_and_stdout_mutex; + +// -------------------------------------------------------------------------------------------------------------------- // string lengths -#define MAX_COMPARE_NAME 100 -#define MAX_NAME 100 #define MAX_CMDLINE 65536 -// ---------------------------------------------------------------------------- -// to avoid reallocating too frequently, we can increase the number of spare -// file descriptors used by processes. -// IMPORTANT: -// having a lot of spares, increases the CPU utilization of the plugin. -#define MAX_SPARE_FDS 1 +// -------------------------------------------------------------------------------------------------------------------- +// to avoid reallocating too frequently when we add file descriptors, +// we double the allocation at every increase request. -#if !defined(__FreeBSD__) && !defined(__APPLE__) -extern int max_fds_cache_seconds; -#endif +static inline uint32_t fds_new_size(uint32_t old_size, uint32_t new_fd) { + return MAX(old_size * 2, new_fd + 1); // 1 space always +} -// ---------------------------------------------------------------------------- +// -------------------------------------------------------------------------------------------------------------------- // some variables for keeping track of processes count by states - +#if (PROCESSES_HAVE_STATE == 1) typedef enum { PROC_STATUS_RUNNING = 0, PROC_STATUS_SLEEPING_D, // uninterruptible sleep @@ -135,8 +238,9 @@ typedef enum { extern proc_state proc_state_count[PROC_STATUS_END]; extern const char *proc_states[]; +#endif -// ---------------------------------------------------------------------------- +// -------------------------------------------------------------------------------------------------------------------- // the rates we are going to send to netdata will have this detail a value of: // - 1 will send just integer parts to netdata // - 100 will send 2 decimal points @@ -144,6 +248,7 @@ extern const char *proc_states[]; // etc. #define RATES_DETAIL 10000ULL +#if (PROCESSES_HAVE_FDS == 1) struct openfds { kernel_uint_t files; kernel_uint_t pipes; @@ -155,10 +260,10 @@ struct openfds { kernel_uint_t eventpolls; kernel_uint_t other; }; - #define pid_openfds_sum(p) ((p)->openfds.files + (p)->openfds.pipes + (p)->openfds.sockets + (p)->openfds.inotifies + (p)->openfds.eventfds + (p)->openfds.timerfds + (p)->openfds.signalfds + (p)->openfds.eventpolls + (p)->openfds.other) +#endif -// ---------------------------------------------------------------------------- +// -------------------------------------------------------------------------------------------------------------------- // target // // target is the structure that processes are aggregated to be reported @@ -172,69 +277,139 @@ struct pid_on_target { struct pid_on_target *next; }; -struct target { - char compare[MAX_COMPARE_NAME + 1]; - uint32_t comparehash; - size_t comparelen; +typedef enum __attribute__((packed)) { + TARGET_TYPE_APP_GROUP = 1, +#if (PROCESSES_HAVE_UID == 1) + TARGET_TYPE_UID, +#endif +#if (PROCESSES_HAVE_GID == 1) + TARGET_TYPE_GID, +#endif +#if (PROCESSES_HAVE_SID == 1) + TARGET_TYPE_SID, +#endif + TARGET_TYPE_TREE, +} TARGET_TYPE; - char id[MAX_NAME + 1]; - uint32_t idhash; +typedef enum __attribute__((packed)) { + // CPU utilization time + // The values are expressed in "NANOSECONDCORES". + // 1 x "NANOSECONDCORE" = 1 x NSEC_PER_SEC (1 billion). + PDF_UTIME, // CPU user time + PDF_STIME, // CPU system time +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) + PDF_GTIME, // CPU guest time +#endif +#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) + PDF_CUTIME, // exited children CPU user time + PDF_CSTIME, // exited children CPU system time +#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1) + PDF_CGTIME, // exited children CPU guest time +#endif +#endif - char name[MAX_NAME + 1]; - char clean_name[MAX_NAME + 1]; // sanitized name used in chart id (need to replace at least dots) - uid_t uid; - gid_t gid; + PDF_MINFLT, // rate, unit: faults * RATES_DETAIL - bool is_other; - - kernel_uint_t minflt; - kernel_uint_t cminflt; - kernel_uint_t majflt; - kernel_uint_t cmajflt; - kernel_uint_t utime; - kernel_uint_t stime; - kernel_uint_t gtime; - kernel_uint_t cutime; - kernel_uint_t cstime; - kernel_uint_t cgtime; - kernel_uint_t num_threads; - // kernel_uint_t rss; - - kernel_uint_t status_vmsize; - kernel_uint_t status_vmrss; - kernel_uint_t status_vmshared; - kernel_uint_t status_rssfile; - kernel_uint_t status_rssshmem; - kernel_uint_t status_vmswap; - kernel_uint_t status_voluntary_ctxt_switches; - kernel_uint_t status_nonvoluntary_ctxt_switches; - - kernel_uint_t io_logical_bytes_read; - kernel_uint_t io_logical_bytes_written; - kernel_uint_t io_read_calls; - kernel_uint_t io_write_calls; - kernel_uint_t io_storage_bytes_read; - kernel_uint_t io_storage_bytes_written; - kernel_uint_t io_cancelled_write_bytes; +#if (PROCESSES_HAVE_MAJFLT == 1) + PDF_MAJFLT, // rate, unit: faults * RATES_DETAIL +#endif - int *target_fds; - int target_fds_size; +#if (PROCESSES_HAVE_CHILDREN_FLTS == 1) + PDF_CMINFLT, // rate, unit: faults * RATES_DETAIL + PDF_CMAJFLT, // rate, unit: faults * RATES_DETAIL +#endif - struct openfds openfds; + PDF_VMSIZE, // the current virtual memory used by the process, in bytes + PDF_VMRSS, // the resident memory used by the process, in bytes - NETDATA_DOUBLE max_open_files_percent; +#if (PROCESSES_HAVE_VMSHARED == 1) + PDF_VMSHARED, // the shared memory used by the process, in bytes +#endif + +#if (PROCESSES_HAVE_RSSFILE == 1) + PDF_RSSFILE, // unit: bytes +#endif + +#if (PROCESSES_HAVE_RSSSHMEM == 1) + PDF_RSSSHMEM, // unit: bytes +#endif + +#if (PROCESSES_HAVE_VMSWAP == 1) + PDF_VMSWAP, // the swap memory used by the process, in bytes +#endif + +#if (PROCESSES_HAVE_VOLCTX == 1) + PDF_VOLCTX, // rate, unit: switches * RATES_DETAIL +#endif + +#if (PROCESSES_HAVE_NVOLCTX == 1) + PDF_NVOLCTX, // rate, unit: switches * RATES_DETAIL +#endif + +#if (PROCESSES_HAVE_LOGICAL_IO == 1) + PDF_LREAD, // rate, logical reads in bytes/sec * RATES_DETAIL + PDF_LWRITE, // rate, logical writes in bytes/sec * RATES_DETAIL +#endif + +#if (PROCESSES_HAVE_PHYSICAL_IO == 1) + PDF_PREAD, // rate, physical reads in bytes/sec * RATES_DETAIL + PDF_PWRITE, // rate, physical writes in bytes/sec * RATES_DETAIL +#endif + +#if (PROCESSES_HAVE_IO_CALLS == 1) + PDF_OREAD, // rate, read ops/sec * RATES_DETAIL + PDF_OWRITE, // rate, write ops/sec * RATES_DETAIL +#endif + + PDF_UPTIME, // the process uptime in seconds + PDF_THREADS, // the number of threads + PDF_PROCESSES, // the number of processes + +#if (PROCESSES_HAVE_HANDLES == 1) + PDF_HANDLES, // the number of handles the process maintains +#endif + + // terminator + PDF_MAX +} PID_FIELD; + +typedef struct apps_match { + bool starts_with:1; + bool ends_with:1; + STRING *compare; + SIMPLE_PATTERN *pattern; +} APPS_MATCH; + +struct target { + STRING *id; + STRING *name; + STRING *clean_name; + + TARGET_TYPE type; + APPS_MATCH match; +#if (PROCESSES_HAVE_UID == 1) + uid_t uid; +#endif +#if (PROCESSES_HAVE_GID == 1) + gid_t gid; +#endif +#if (PROCESSES_HAVE_SID == 1) + STRING *sid_name; +#endif + + kernel_uint_t values[PDF_MAX]; kernel_uint_t uptime_min; - kernel_uint_t uptime_sum; kernel_uint_t uptime_max; - unsigned int processes; // how many processes have been merged to this - int exposed; // if set, we have sent this to netdata - int hidden; // if set, we set the hidden flag on the dimension - int debug_enabled; - int ends_with; - int starts_with; // if set, the compare string matches only the - // beginning of the command +#if (PROCESSES_HAVE_FDS == 1) + struct openfds openfds; + NETDATA_DOUBLE max_open_files_percent; + int *target_fds; + uint32_t target_fds_size; +#endif + + bool exposed:1; // if set, we have sent this to netdata struct pid_on_target *root_pid; // list of aggregated pids for target debugging @@ -242,7 +417,7 @@ struct target { struct target *next; }; -// ---------------------------------------------------------------------------- +// -------------------------------------------------------------------------------------------------------------------- // internal flags // handled in code (automatically set) @@ -258,12 +433,13 @@ typedef enum __attribute__((packed)) { PID_LOG_LIMITS_DETAIL = (1 << 6), } PID_LOG; -// ---------------------------------------------------------------------------- +// -------------------------------------------------------------------------------------------------------------------- // pid_stat // // structure to store data for each process running // see: man proc for the description of the fields +#if (PROCESSES_HAVE_PID_LIMITS == 1) struct pid_limits { // kernel_uint_t max_cpu_time; // kernel_uint_t max_file_size; @@ -282,11 +458,12 @@ struct pid_limits { // kernel_uint_t max_realtime_priority; // kernel_uint_t max_realtime_timeout; }; +#endif struct pid_fd { int fd; -#if !defined(__FreeBSD__) && !defined(__APPLE__) +#if defined(OS_LINUX) ino_t inode; char *filename; uint32_t link_hash; @@ -295,6 +472,10 @@ struct pid_fd { #endif }; +#define pid_stat_comm(p) (string2str((p)->comm)) +#define pid_stat_cmdline(p) (string2str((p)->cmdline)) +uint32_t all_files_len_get(void); + struct pid_stat { int32_t pid; int32_t ppid; @@ -304,122 +485,86 @@ struct pid_stat { // int32_t tpgid; // uint64_t flags; - char state; - - char comm[MAX_COMPARE_NAME + 1]; - char *cmdline; - - // these are raw values collected - kernel_uint_t minflt_raw; - kernel_uint_t cminflt_raw; - kernel_uint_t majflt_raw; - kernel_uint_t cmajflt_raw; - kernel_uint_t utime_raw; - kernel_uint_t stime_raw; - kernel_uint_t gtime_raw; // guest_time - kernel_uint_t cutime_raw; - kernel_uint_t cstime_raw; - kernel_uint_t cgtime_raw; // cguest_time - - // these are rates - kernel_uint_t minflt; - kernel_uint_t cminflt; - kernel_uint_t majflt; - kernel_uint_t cmajflt; - kernel_uint_t utime; - kernel_uint_t stime; - kernel_uint_t gtime; - kernel_uint_t cutime; - kernel_uint_t cstime; - kernel_uint_t cgtime; - - // int64_t priority; - // int64_t nice; - int32_t num_threads; - // int64_t itrealvalue; - // kernel_uint_t collected_starttime; - // kernel_uint_t vsize; - // kernel_uint_t rss; - // kernel_uint_t rsslim; - // kernel_uint_t starcode; - // kernel_uint_t endcode; - // kernel_uint_t startstack; - // kernel_uint_t kstkesp; - // kernel_uint_t kstkeip; - // uint64_t signal; - // uint64_t blocked; - // uint64_t sigignore; - // uint64_t sigcatch; - // uint64_t wchan; - // uint64_t nswap; - // uint64_t cnswap; - // int32_t exit_signal; - // int32_t processor; - // uint32_t rt_priority; - // uint32_t policy; - // kernel_uint_t delayacct_blkio_ticks; + struct pid_stat *parent; + struct pid_stat *next; + struct pid_stat *prev; - uid_t uid; - gid_t gid; + struct target *target; // app_groups.conf/tree targets - kernel_uint_t status_voluntary_ctxt_switches_raw; - kernel_uint_t status_nonvoluntary_ctxt_switches_raw; - - kernel_uint_t status_vmsize; - kernel_uint_t status_vmrss; - kernel_uint_t status_vmshared; - kernel_uint_t status_rssfile; - kernel_uint_t status_rssshmem; - kernel_uint_t status_vmswap; - kernel_uint_t status_voluntary_ctxt_switches; - kernel_uint_t status_nonvoluntary_ctxt_switches; -#ifndef __FreeBSD__ - ARL_BASE *status_arl; +#if (PROCESSES_HAVE_UID == 1) + struct target *uid_target; // uid based targets +#endif +#if (PROCESSES_HAVE_GID == 1) + struct target *gid_target; // gid based targets +#endif +#if (PROCESSES_HAVE_SID == 1) + struct target *sid_target; // sid based targets #endif - kernel_uint_t io_logical_bytes_read_raw; - kernel_uint_t io_logical_bytes_written_raw; - kernel_uint_t io_read_calls_raw; - kernel_uint_t io_write_calls_raw; - kernel_uint_t io_storage_bytes_read_raw; - kernel_uint_t io_storage_bytes_written_raw; - kernel_uint_t io_cancelled_write_bytes_raw; + STRING *comm_orig; // the command, as-collected + STRING *comm; // the command, sanitized + STRING *name; // the command name, if any, sanitized + STRING *cmdline; // the full command line of the program - kernel_uint_t io_logical_bytes_read; - kernel_uint_t io_logical_bytes_written; - kernel_uint_t io_read_calls; - kernel_uint_t io_write_calls; - kernel_uint_t io_storage_bytes_read; - kernel_uint_t io_storage_bytes_written; - kernel_uint_t io_cancelled_write_bytes; +#if defined(OS_WINDOWS) + COUNTER_DATA perflib[PDF_MAX]; +#else + kernel_uint_t raw[PDF_MAX]; +#endif - kernel_uint_t uptime; + kernel_uint_t values[PDF_MAX]; - struct pid_fd *fds; // array of fds it uses - size_t fds_size; // the size of the fds array +#if (PROCESSES_HAVE_UID == 1) + uid_t uid; +#endif +#if (PROCESSES_HAVE_GID == 1) + gid_t gid; +#endif +#if (PROCESSES_HAVE_SID == 1) + STRING *sid_name; +#endif + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) + uint32_t sortlist; // higher numbers = top on the process tree + // each process gets a unique number (non-sequential though) +#endif +#if (PROCESSES_HAVE_FDS == 1) struct openfds openfds; +#if (PROCESSES_HAVE_PID_LIMITS == 1) struct pid_limits limits; - NETDATA_DOUBLE openfds_limits_percent; +#endif + struct pid_fd *fds; // array of fds it uses + uint32_t fds_size; // the size of the fds array +#endif - int sortlist; // higher numbers = top on the process tree - // each process gets a unique number + uint32_t children_count; // the number of processes directly referencing this. + // used internally for apps_groups.conf inheritance. + // don't rely on it for anything else. - int children_count; // number of processes directly referencing this - int keeploops; // increases by 1 every time keep is 1 and updated 0 + uint32_t keeploops; // increases by 1 every time keep is 1 and updated 0 PID_LOG log_thrown; - bool keep; // true when we need to keep this process in memory even after it exited - bool updated; // true when the process is currently running - bool merged; // true when it has been merged to its parent - bool read; // true when we have already read this process for this iteration - bool matched_by_config; + bool read:1; // true when we have already read this process for this iteration + bool updated:1; // true when the process is currently running + bool merged:1; // true when it has been merged to its parent + bool keep:1; // true when we need to keep this process in memory even after it exited + bool is_manager:1; // true when this pid is a process manager + bool is_aggregator:1; // true when this pid is a process aggregator + + bool matched_by_config:1; + +#if (PROCESSES_HAVE_STATE == 1) + char state; +#endif - struct target *target; // app_groups.conf targets - struct target *user_target; // uid based targets - struct target *group_target; // gid based targets +#if defined(OS_WINDOWS) + bool got_info:1; + bool got_service:1; + bool initialized:1; +#endif usec_t stat_collected_usec; usec_t last_stat_collected_usec; @@ -428,70 +573,22 @@ struct pid_stat { usec_t last_io_collected_usec; usec_t last_limits_collected_usec; +#if defined(OS_LINUX) + ARL_BASE *status_arl; char *fds_dirname; // the full directory name in /proc/PID/fd - char *stat_filename; char *status_filename; char *io_filename; char *cmdline_filename; char *limits_filename; - - struct pid_stat *parent; - struct pid_stat *prev; - struct pid_stat *next; -}; - -// ---------------------------------------------------------------------------- - -struct user_or_group_id { - avl_t avl; - - union { - uid_t uid; - gid_t gid; - } id; - - char *name; - - int updated; - - struct user_or_group_id * next; +#endif }; -extern struct target - *apps_groups_default_target, - *apps_groups_root_target, - *users_root_target, - *groups_root_target; - -extern struct pid_stat *root_of_pids; +// -------------------------------------------------------------------------------------------------------------------- extern int update_every; -extern unsigned int time_factor; -extern kernel_uint_t MemTotal; - -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) -extern pid_t *all_pids_sortlist; -#endif - -#define APPS_PLUGIN_PROCESSES_FUNCTION_DESCRIPTION "Detailed information on the currently running processes." - -void function_processes(const char *transaction, char *function, - usec_t *stop_monotonic_ut __maybe_unused, bool *cancelled __maybe_unused, - BUFFER *payload __maybe_unused, HTTP_ACCESS access, - const char *source __maybe_unused, void *data __maybe_unused); - -struct target *find_target_by_name(struct target *base, const char *name); - -struct target *get_users_target(uid_t uid); -struct target *get_groups_target(gid_t gid); -int read_apps_groups_conf(const char *path, const char *file); - -void users_and_groups_init(void); -struct user_or_group_id *user_id_find(struct user_or_group_id *user_id_to_find); -struct user_or_group_id *group_id_find(struct user_or_group_id *group_id_to_find); -// ---------------------------------------------------------------------------- +// -------------------------------------------------------------------------------------------------------------------- // debugging static inline void debug_log_int(const char *fmt, ... ) { @@ -515,46 +612,152 @@ static inline void debug_log_dummy(void) {} #define debug_log(fmt, args...) debug_log_dummy() #endif -int managed_log(struct pid_stat *p, PID_LOG log, int status); +bool managed_log(struct pid_stat *p, PID_LOG log, bool status); +void sanitize_apps_plugin_chart_meta(char *buf); -// ---------------------------------------------------------------------------- +// -------------------------------------------------------------------------------------------------------------------- // macro to calculate the incremental rate of a value // each parameter is accessed only ONCE - so it is safe to pass function calls // or other macros as parameters -#define incremental_rate(rate_variable, last_kernel_variable, new_kernel_value, collected_usec, last_collected_usec) do { \ +#define incremental_rate(rate_variable, last_kernel_variable, new_kernel_value, collected_usec, last_collected_usec, multiplier) do { \ kernel_uint_t _new_tmp = new_kernel_value; \ - (rate_variable) = (_new_tmp - (last_kernel_variable)) * (USEC_PER_SEC * RATES_DETAIL) / ((collected_usec) - (last_collected_usec)); \ + (rate_variable) = (_new_tmp - (last_kernel_variable)) * (USEC_PER_SEC * (multiplier)) / ((collected_usec) - (last_collected_usec)); \ (last_kernel_variable) = _new_tmp; \ } while(0) // the same macro for struct pid members -#define pid_incremental_rate(type, var, value) \ - incremental_rate(var, var##_raw, value, p->type##_collected_usec, p->last_##type##_collected_usec) +#define pid_incremental_rate(type, idx, value) \ + incremental_rate(p->values[idx], p->raw[idx], value, p->type##_collected_usec, p->last_##type##_collected_usec, RATES_DETAIL) -int read_proc_pid_stat(struct pid_stat *p, void *ptr); -int read_proc_pid_limits(struct pid_stat *p, void *ptr); -int read_proc_pid_status(struct pid_stat *p, void *ptr); -int read_proc_pid_cmdline(struct pid_stat *p); -int read_proc_pid_io(struct pid_stat *p, void *ptr); -int read_pid_file_descriptors(struct pid_stat *p, void *ptr); -int read_global_time(void); -void get_MemTotal(void); +#define pid_incremental_cpu(type, idx, value) \ + incremental_rate(p->values[idx], p->raw[idx], value, p->type##_collected_usec, p->last_##type##_collected_usec, CPU_TO_NANOSECONDCORES) -bool collect_data_for_all_pids(void); -void cleanup_exited_pids(void); +void apps_managers_and_aggregators_init(void); +void apps_pids_init(void); + +#if (PROCESSES_HAVE_CMDLINE == 1) +int read_proc_pid_cmdline(struct pid_stat *p); +#endif +#if (PROCESSES_HAVE_FDS == 1) void clear_pid_fd(struct pid_fd *pfd); void file_descriptor_not_used(int id); void init_pid_fds(struct pid_stat *p, size_t first, size_t size); void aggregate_pid_fds_on_targets(struct pid_stat *p); +int read_pid_file_descriptors(struct pid_stat *p, void *ptr); +void make_all_pid_fds_negative(struct pid_stat *p); +uint32_t file_descriptor_find_or_add(const char *name, uint32_t hash); +#endif + +// -------------------------------------------------------------------------------------------------------------------- +// data collection management + +bool pid_match_check(struct pid_stat *p, APPS_MATCH *match); +APPS_MATCH pid_match_create(const char *comm); +void pid_match_cleanup(APPS_MATCH *m); + +bool collect_data_for_all_pids(void); + +void pid_collection_started(struct pid_stat *p); +void pid_collection_failed(struct pid_stat *p); +void pid_collection_completed(struct pid_stat *p); + +#if (INCREMENTAL_DATA_COLLECTION == 1) +bool collect_parents_before_children(void); +int incrementally_collect_data_for_pid(pid_t pid, void *ptr); +int incrementally_collect_data_for_pid_stat(struct pid_stat *p, void *ptr); +#endif + +// -------------------------------------------------------------------------------------------------------------------- +// pid management + +struct pid_stat *root_of_pids(void); +size_t all_pids_count(void); + +struct pid_stat *get_or_allocate_pid_entry(pid_t pid); +struct pid_stat *find_pid_entry(pid_t pid); +void del_pid_entry(pid_t pid); +void update_pid_comm(struct pid_stat *p, const char *comm); +void update_pid_cmdline(struct pid_stat *p, const char *cmdline); + +bool is_process_a_manager(struct pid_stat *p); +bool is_process_an_aggregator(struct pid_stat *p); +bool is_process_an_interpreter(struct pid_stat *p); + +// -------------------------------------------------------------------------------------------------------------------- +// targets management + +struct target *find_target_by_name(struct target *base, const char *name); +struct target *get_tree_target(struct pid_stat *p); + +void aggregate_processes_to_targets(void); + +#if (PROCESSES_HAVE_UID == 1) +extern struct target *users_root_target; +struct target *get_uid_target(uid_t uid); +#endif + +#if (PROCESSES_HAVE_GID == 1) +extern struct target *groups_root_target; +struct target *get_gid_target(gid_t gid); +#endif + +#if (PROCESSES_HAVE_SID == 1) +extern struct target *sids_root_target; +struct target *get_sid_target(STRING *sid_name); +#endif + +extern struct target *apps_groups_root_target; +int read_apps_groups_conf(const char *path, const char *file); + +// -------------------------------------------------------------------------------------------------------------------- +// output -void send_proc_states_count(usec_t dt); void send_charts_updates_to_netdata(struct target *root, const char *type, const char *lbl_name, const char *title); void send_collected_data_to_netdata(struct target *root, const char *type, usec_t dt); void send_resource_usage_to_netdata(usec_t dt); -void pids_init(void); -struct pid_stat *find_pid_entry(pid_t pid); +#if (PROCESSES_HAVE_STATE == 1) +void send_proc_states_count(usec_t dt); +#endif + +#define APPS_PLUGIN_PROCESSES_FUNCTION_DESCRIPTION "Detailed information on the currently running processes." +void function_processes(const char *transaction, char *function, + usec_t *stop_monotonic_ut __maybe_unused, bool *cancelled __maybe_unused, + BUFFER *payload __maybe_unused, HTTP_ACCESS access, + const char *source __maybe_unused, void *data __maybe_unused); + +// -------------------------------------------------------------------------------------------------------------------- +// operating system functions + +// one time initialization per operating system +void OS_FUNCTION(apps_os_init)(void); + +// collect all the available information for all processes running +bool OS_FUNCTION(apps_os_collect_all_pids)(void); + +bool OS_FUNCTION(apps_os_read_pid_status)(struct pid_stat *p, void *ptr); +bool OS_FUNCTION(apps_os_read_pid_stat)(struct pid_stat *p, void *ptr); +bool OS_FUNCTION(apps_os_read_pid_io)(struct pid_stat *p, void *ptr); + +#if (PROCESSES_HAVE_PID_LIMITS == 1) +bool OS_FUNCTION(apps_os_read_pid_limits)(struct pid_stat *p, void *ptr); +#endif + +#if (PROCESSES_HAVE_CMDLINE == 1) +bool OS_FUNCTION(apps_os_get_pid_cmdline)(struct pid_stat *p, char *cmdline, size_t bytes); +#endif + +#if (PROCESSES_HAVE_FDS == 1) +bool OS_FUNCTION(apps_os_read_pid_fds)(struct pid_stat *p, void *ptr); +#endif + +#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) +bool OS_FUNCTION(apps_os_read_global_cpu_utilization)(void); +#endif + +// return the total physical memory of the system, in bytes +uint64_t OS_FUNCTION(apps_os_get_total_memory)(void); #endif //NETDATA_APPS_PLUGIN_H diff --git a/src/collectors/apps.plugin/apps_proc_meminfo.c b/src/collectors/apps.plugin/apps_proc_meminfo.c deleted file mode 100644 index a7227c213..000000000 --- a/src/collectors/apps.plugin/apps_proc_meminfo.c +++ /dev/null @@ -1,68 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "apps_plugin.h" - -kernel_uint_t MemTotal = 0; - -#ifdef __FreeBSD__ -static inline bool get_MemTotal_per_os(void) { - int mib[2] = {CTL_HW, HW_PHYSMEM}; - size_t size = sizeof(MemTotal); - if (sysctl(mib, 2, &MemTotal, &size, NULL, 0) == -1) { - netdata_log_error("Failed to get total memory using sysctl"); - return false; - } - // FreeBSD returns bytes; convert to kB - MemTotal /= 1024; - return true; -} -#endif // __FreeBSD__ - -#ifdef __APPLE__ -static inline bool get_MemTotal_per_os(void) { - int mib[2] = {CTL_HW, HW_MEMSIZE}; - size_t size = sizeof(MemTotal); - if (sysctl(mib, 2, &MemTotal, &size, NULL, 0) == -1) { - netdata_log_error("Failed to get total memory using sysctl"); - return false; - } - // MacOS returns bytes; convert to kB - MemTotal /= 1024; - return true; -} -#endif // __APPLE__ - -#if !defined(__FreeBSD__) && !defined(__APPLE__) -static inline bool get_MemTotal_per_os(void) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/meminfo", netdata_configured_host_prefix); - - procfile *ff = procfile_open(filename, ": \t", PROCFILE_FLAG_DEFAULT); - if(!ff) - return false; - - ff = procfile_readall(ff); - if(!ff) - return false; - - size_t line, lines = procfile_lines(ff); - - for(line = 0; line < lines ;line++) { - size_t words = procfile_linewords(ff, line); - if(words == 3 && strcmp(procfile_lineword(ff, line, 0), "MemTotal") == 0 && strcmp(procfile_lineword(ff, line, 2), "kB") == 0) { - kernel_uint_t n = str2ull(procfile_lineword(ff, line, 1), NULL); - if(n) MemTotal = n; - break; - } - } - - procfile_close(ff); - - return true; -} -#endif - -void get_MemTotal(void) { - if(!get_MemTotal_per_os()) - MemTotal = 0; -} diff --git a/src/collectors/apps.plugin/apps_proc_pid_cmdline.c b/src/collectors/apps.plugin/apps_proc_pid_cmdline.c deleted file mode 100644 index 75a60fa3a..000000000 --- a/src/collectors/apps.plugin/apps_proc_pid_cmdline.c +++ /dev/null @@ -1,130 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "apps_plugin.h" - -#ifdef __APPLE__ -bool get_cmdline_per_os(struct pid_stat *p, char *cmdline, size_t maxBytes) { - int mib[3] = {CTL_KERN, KERN_PROCARGS2, p->pid}; - static char *args = NULL; - static size_t size = 0; - - size_t new_size; - if (sysctl(mib, 3, NULL, &new_size, NULL, 0) == -1) { - return false; - } - - if (new_size > size) { - if (args) - freez(args); - - args = (char *)mallocz(new_size); - size = new_size; - } - - memset(cmdline, 0, new_size < maxBytes ? new_size : maxBytes); - - size_t used_size = size; - if (sysctl(mib, 3, args, &used_size, NULL, 0) == -1) - return false; - - int argc; - memcpy(&argc, args, sizeof(argc)); - char *ptr = args + sizeof(argc); - used_size -= sizeof(argc); - - // Skip the executable path - while (*ptr && used_size > 0) { - ptr++; - used_size--; - } - - // Copy only the arguments to the cmdline buffer, skipping the environment variables - size_t i = 0, copied_args = 0; - bool inArg = false; - for (; used_size > 0 && i < maxBytes - 1 && copied_args < argc; --used_size, ++ptr) { - if (*ptr == '\0') { - if (inArg) { - cmdline[i++] = ' '; // Replace nulls between arguments with spaces - inArg = false; - copied_args++; - } - } else { - cmdline[i++] = *ptr; - inArg = true; - } - } - - if (i > 0 && cmdline[i - 1] == ' ') - i--; // Remove the trailing space if present - - cmdline[i] = '\0'; // Null-terminate the string - - return true; -} -#endif // __APPLE__ - -#if defined(__FreeBSD__) -static inline bool get_cmdline_per_os(struct pid_stat *p, char *cmdline, size_t bytes) { - size_t i, b = bytes - 1; - int mib[4]; - - mib[0] = CTL_KERN; - mib[1] = KERN_PROC; - mib[2] = KERN_PROC_ARGS; - mib[3] = p->pid; - if (unlikely(sysctl(mib, 4, cmdline, &b, NULL, 0))) - return false; - - cmdline[b] = '\0'; - for(i = 0; i < b ; i++) - if(unlikely(!cmdline[i])) cmdline[i] = ' '; - - return true; -} -#endif // __FreeBSD__ - -#if !defined(__FreeBSD__) && !defined(__APPLE__) -static inline bool get_cmdline_per_os(struct pid_stat *p, char *cmdline, size_t bytes) { - if(unlikely(!p->cmdline_filename)) { - char filename[FILENAME_MAX]; - snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid); - p->cmdline_filename = strdupz(filename); - } - - int fd = open(p->cmdline_filename, procfile_open_flags, 0666); - if(unlikely(fd == -1)) - return false; - - ssize_t i, b = read(fd, cmdline, bytes - 1); - close(fd); - - if(unlikely(b < 0)) - return false; - - cmdline[b] = '\0'; - for(i = 0; i < b ; i++) - if(unlikely(!cmdline[i])) cmdline[i] = ' '; - - return true; -} -#endif // !__FreeBSD__ !__APPLE__ - -int read_proc_pid_cmdline(struct pid_stat *p) { - static char cmdline[MAX_CMDLINE]; - - if(unlikely(!get_cmdline_per_os(p, cmdline, sizeof(cmdline)))) - goto cleanup; - - if(p->cmdline) freez(p->cmdline); - p->cmdline = strdupz(cmdline); - - debug_log("Read file '%s' contents: %s", p->cmdline_filename, p->cmdline); - - return 1; - -cleanup: - // copy the command to the command line - if(p->cmdline) freez(p->cmdline); - p->cmdline = strdupz(p->comm); - return 0; -} diff --git a/src/collectors/apps.plugin/apps_proc_pid_fd.c b/src/collectors/apps.plugin/apps_proc_pid_fd.c deleted file mode 100644 index 519b0794d..000000000 --- a/src/collectors/apps.plugin/apps_proc_pid_fd.c +++ /dev/null @@ -1,753 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "apps_plugin.h" - -// ---------------------------------------------------------------------------- -// file descriptor -// -// this is used to keep a global list of all open files of the system. -// it is needed in order to calculate the unique files processes have open. - -#define FILE_DESCRIPTORS_INCREASE_STEP 100 - -// types for struct file_descriptor->type -typedef enum fd_filetype { - FILETYPE_OTHER, - FILETYPE_FILE, - FILETYPE_PIPE, - FILETYPE_SOCKET, - FILETYPE_INOTIFY, - FILETYPE_EVENTFD, - FILETYPE_EVENTPOLL, - FILETYPE_TIMERFD, - FILETYPE_SIGNALFD -} FD_FILETYPE; - -struct file_descriptor { - avl_t avl; - -#ifdef NETDATA_INTERNAL_CHECKS - uint32_t magic; -#endif /* NETDATA_INTERNAL_CHECKS */ - - const char *name; - uint32_t hash; - - FD_FILETYPE type; - int count; - int pos; -} *all_files = NULL; - -// ---------------------------------------------------------------------------- - -static inline void reallocate_target_fds(struct target *w) { - if(unlikely(!w)) - return; - - if(unlikely(!w->target_fds || w->target_fds_size < all_files_size)) { - w->target_fds = reallocz(w->target_fds, sizeof(int) * all_files_size); - memset(&w->target_fds[w->target_fds_size], 0, sizeof(int) * (all_files_size - w->target_fds_size)); - w->target_fds_size = all_files_size; - } -} - -static void aggregage_fd_type_on_openfds(FD_FILETYPE type, struct openfds *openfds) { - switch(type) { - case FILETYPE_FILE: - openfds->files++; - break; - - case FILETYPE_PIPE: - openfds->pipes++; - break; - - case FILETYPE_SOCKET: - openfds->sockets++; - break; - - case FILETYPE_INOTIFY: - openfds->inotifies++; - break; - - case FILETYPE_EVENTFD: - openfds->eventfds++; - break; - - case FILETYPE_TIMERFD: - openfds->timerfds++; - break; - - case FILETYPE_SIGNALFD: - openfds->signalfds++; - break; - - case FILETYPE_EVENTPOLL: - openfds->eventpolls++; - break; - - case FILETYPE_OTHER: - openfds->other++; - break; - } -} - -static inline void aggregate_fd_on_target(int fd, struct target *w) { - if(unlikely(!w)) - return; - - if(unlikely(w->target_fds[fd])) { - // it is already aggregated - // just increase its usage counter - w->target_fds[fd]++; - return; - } - - // increase its usage counter - // so that we will not add it again - w->target_fds[fd]++; - - aggregage_fd_type_on_openfds(all_files[fd].type, &w->openfds); -} - -void aggregate_pid_fds_on_targets(struct pid_stat *p) { - - if(unlikely(!p->updated)) { - // the process is not running - return; - } - - struct target *w = p->target, *u = p->user_target, *g = p->group_target; - - reallocate_target_fds(w); - reallocate_target_fds(u); - reallocate_target_fds(g); - - p->openfds.files = 0; - p->openfds.pipes = 0; - p->openfds.sockets = 0; - p->openfds.inotifies = 0; - p->openfds.eventfds = 0; - p->openfds.timerfds = 0; - p->openfds.signalfds = 0; - p->openfds.eventpolls = 0; - p->openfds.other = 0; - - long currentfds = 0; - size_t c, size = p->fds_size; - struct pid_fd *fds = p->fds; - for(c = 0; c < size ;c++) { - int fd = fds[c].fd; - - if(likely(fd <= 0 || fd >= all_files_size)) - continue; - - currentfds++; - aggregage_fd_type_on_openfds(all_files[fd].type, &p->openfds); - - aggregate_fd_on_target(fd, w); - aggregate_fd_on_target(fd, u); - aggregate_fd_on_target(fd, g); - } -} - -// ---------------------------------------------------------------------------- - -int file_descriptor_compare(void* a, void* b) { -#ifdef NETDATA_INTERNAL_CHECKS - if(((struct file_descriptor *)a)->magic != 0x0BADCAFE || ((struct file_descriptor *)b)->magic != 0x0BADCAFE) - netdata_log_error("Corrupted index data detected. Please report this."); -#endif /* NETDATA_INTERNAL_CHECKS */ - - if(((struct file_descriptor *)a)->hash < ((struct file_descriptor *)b)->hash) - return -1; - - else if(((struct file_descriptor *)a)->hash > ((struct file_descriptor *)b)->hash) - return 1; - - else - return strcmp(((struct file_descriptor *)a)->name, ((struct file_descriptor *)b)->name); -} - -// int file_descriptor_iterator(avl_t *a) { if(a) {}; return 0; } - -avl_tree_type all_files_index = { - NULL, - file_descriptor_compare -}; - -static struct file_descriptor *file_descriptor_find(const char *name, uint32_t hash) { - struct file_descriptor tmp; - tmp.hash = (hash)?hash:simple_hash(name); - tmp.name = name; - tmp.count = 0; - tmp.pos = 0; -#ifdef NETDATA_INTERNAL_CHECKS - tmp.magic = 0x0BADCAFE; -#endif /* NETDATA_INTERNAL_CHECKS */ - - return (struct file_descriptor *)avl_search(&all_files_index, (avl_t *) &tmp); -} - -#define file_descriptor_add(fd) avl_insert(&all_files_index, (avl_t *)(fd)) -#define file_descriptor_remove(fd) avl_remove(&all_files_index, (avl_t *)(fd)) - -// ---------------------------------------------------------------------------- - -void file_descriptor_not_used(int id) { - if(id > 0 && id < all_files_size) { - -#ifdef NETDATA_INTERNAL_CHECKS - if(all_files[id].magic != 0x0BADCAFE) { - netdata_log_error("Ignoring request to remove empty file id %d.", id); - return; - } -#endif /* NETDATA_INTERNAL_CHECKS */ - - debug_log("decreasing slot %d (count = %d).", id, all_files[id].count); - - if(all_files[id].count > 0) { - all_files[id].count--; - - if(!all_files[id].count) { - debug_log(" >> slot %d is empty.", id); - - if(unlikely(file_descriptor_remove(&all_files[id]) != (void *)&all_files[id])) - netdata_log_error("INTERNAL ERROR: removal of unused fd from index, removed a different fd"); - -#ifdef NETDATA_INTERNAL_CHECKS - all_files[id].magic = 0x00000000; -#endif /* NETDATA_INTERNAL_CHECKS */ - all_files_len--; - } - } - else - netdata_log_error("Request to decrease counter of fd %d (%s), while the use counter is 0", - id, - all_files[id].name); - } - else - netdata_log_error("Request to decrease counter of fd %d, which is outside the array size (1 to %d)", - id, - all_files_size); -} - -static inline void all_files_grow() { - void *old = all_files; - int i; - - // there is no empty slot - debug_log("extending fd array to %d entries", all_files_size + FILE_DESCRIPTORS_INCREASE_STEP); - - all_files = reallocz(all_files, (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP) * sizeof(struct file_descriptor)); - - // if the address changed, we have to rebuild the index - // since all pointers are now invalid - - if(unlikely(old && old != (void *)all_files)) { - debug_log(" >> re-indexing."); - - all_files_index.root = NULL; - for(i = 0; i < all_files_size; i++) { - if(!all_files[i].count) continue; - if(unlikely(file_descriptor_add(&all_files[i]) != (void *)&all_files[i])) - netdata_log_error("INTERNAL ERROR: duplicate indexing of fd during realloc."); - } - - debug_log(" >> re-indexing done."); - } - - // initialize the newly added entries - - for(i = all_files_size; i < (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP); i++) { - all_files[i].count = 0; - all_files[i].name = NULL; -#ifdef NETDATA_INTERNAL_CHECKS - all_files[i].magic = 0x00000000; -#endif /* NETDATA_INTERNAL_CHECKS */ - all_files[i].pos = i; - } - - if(unlikely(!all_files_size)) all_files_len = 1; - all_files_size += FILE_DESCRIPTORS_INCREASE_STEP; -} - -static inline int file_descriptor_set_on_empty_slot(const char *name, uint32_t hash, FD_FILETYPE type) { - // check we have enough memory to add it - if(!all_files || all_files_len == all_files_size) - all_files_grow(); - - debug_log(" >> searching for empty slot."); - - // search for an empty slot - - static int last_pos = 0; - int i, c; - for(i = 0, c = last_pos ; i < all_files_size ; i++, c++) { - if(c >= all_files_size) c = 0; - if(c == 0) continue; - - if(!all_files[c].count) { - debug_log(" >> Examining slot %d.", c); - -#ifdef NETDATA_INTERNAL_CHECKS - if(all_files[c].magic == 0x0BADCAFE && all_files[c].name && file_descriptor_find(all_files[c].name, all_files[c].hash)) - netdata_log_error("fd on position %d is not cleared properly. It still has %s in it.", c, all_files[c].name); -#endif /* NETDATA_INTERNAL_CHECKS */ - - debug_log(" >> %s fd position %d for %s (last name: %s)", all_files[c].name?"re-using":"using", c, name, all_files[c].name); - - freez((void *)all_files[c].name); - all_files[c].name = NULL; - last_pos = c; - break; - } - } - - all_files_len++; - - if(i == all_files_size) { - fatal("We should find an empty slot, but there isn't any"); - exit(1); - } - // else we have an empty slot in 'c' - - debug_log(" >> updating slot %d.", c); - - all_files[c].name = strdupz(name); - all_files[c].hash = hash; - all_files[c].type = type; - all_files[c].pos = c; - all_files[c].count = 1; -#ifdef NETDATA_INTERNAL_CHECKS - all_files[c].magic = 0x0BADCAFE; -#endif /* NETDATA_INTERNAL_CHECKS */ - if(unlikely(file_descriptor_add(&all_files[c]) != (void *)&all_files[c])) - netdata_log_error("INTERNAL ERROR: duplicate indexing of fd."); - - debug_log("using fd position %d (name: %s)", c, all_files[c].name); - - return c; -} - -static inline int file_descriptor_find_or_add(const char *name, uint32_t hash) { - if(unlikely(!hash)) - hash = simple_hash(name); - - debug_log("adding or finding name '%s' with hash %u", name, hash); - - struct file_descriptor *fd = file_descriptor_find(name, hash); - if(fd) { - // found - debug_log(" >> found on slot %d", fd->pos); - - fd->count++; - return fd->pos; - } - // not found - - FD_FILETYPE type; - if(likely(name[0] == '/')) type = FILETYPE_FILE; - else if(likely(strncmp(name, "pipe:", 5) == 0)) type = FILETYPE_PIPE; - else if(likely(strncmp(name, "socket:", 7) == 0)) type = FILETYPE_SOCKET; - else if(likely(strncmp(name, "anon_inode:", 11) == 0)) { - const char *t = &name[11]; - - if(strcmp(t, "inotify") == 0) type = FILETYPE_INOTIFY; - else if(strcmp(t, "[eventfd]") == 0) type = FILETYPE_EVENTFD; - else if(strcmp(t, "[eventpoll]") == 0) type = FILETYPE_EVENTPOLL; - else if(strcmp(t, "[timerfd]") == 0) type = FILETYPE_TIMERFD; - else if(strcmp(t, "[signalfd]") == 0) type = FILETYPE_SIGNALFD; - else { - debug_log("UNKNOWN anonymous inode: %s", name); - type = FILETYPE_OTHER; - } - } - else if(likely(strcmp(name, "inotify") == 0)) type = FILETYPE_INOTIFY; - else { - debug_log("UNKNOWN linkname: %s", name); - type = FILETYPE_OTHER; - } - - return file_descriptor_set_on_empty_slot(name, hash, type); -} - -void clear_pid_fd(struct pid_fd *pfd) { - pfd->fd = 0; - -#if !defined(__FreeBSD__) && !defined(__APPLE__) - pfd->link_hash = 0; - pfd->inode = 0; - pfd->cache_iterations_counter = 0; - pfd->cache_iterations_reset = 0; -#endif -} - -static inline void make_all_pid_fds_negative(struct pid_stat *p) { - struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size]; - while(pfd < pfdend) { - pfd->fd = -(pfd->fd); - pfd++; - } -} - -static inline void cleanup_negative_pid_fds(struct pid_stat *p) { - struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size]; - - while(pfd < pfdend) { - int fd = pfd->fd; - - if(unlikely(fd < 0)) { - file_descriptor_not_used(-(fd)); - clear_pid_fd(pfd); - } - - pfd++; - } -} - -void init_pid_fds(struct pid_stat *p, size_t first, size_t size) { - struct pid_fd *pfd = &p->fds[first], *pfdend = &p->fds[first + size]; - - while(pfd < pfdend) { -#if !defined(__FreeBSD__) && !defined(__APPLE__) - pfd->filename = NULL; -#endif - clear_pid_fd(pfd); - pfd++; - } -} - -#ifdef __APPLE__ -static bool read_pid_file_descriptors_per_os(struct pid_stat *p, void *ptr __maybe_unused) { - static struct proc_fdinfo *fds = NULL; - static int fdsCapacity = 0; - - int bufferSize = proc_pidinfo(p->pid, PROC_PIDLISTFDS, 0, NULL, 0); - if (bufferSize <= 0) { - netdata_log_error("Failed to get the size of file descriptors for PID %d", p->pid); - return false; - } - - // Resize buffer if necessary - if (bufferSize > fdsCapacity) { - if(fds) - freez(fds); - - fds = mallocz(bufferSize); - fdsCapacity = bufferSize; - } - - int num_fds = proc_pidinfo(p->pid, PROC_PIDLISTFDS, 0, fds, bufferSize) / PROC_PIDLISTFD_SIZE; - if (num_fds <= 0) { - netdata_log_error("Failed to get the file descriptors for PID %d", p->pid); - return false; - } - - for (int i = 0; i < num_fds; i++) { - switch (fds[i].proc_fdtype) { - case PROX_FDTYPE_VNODE: { - struct vnode_fdinfowithpath vi; - if (proc_pidfdinfo(p->pid, fds[i].proc_fd, PROC_PIDFDVNODEPATHINFO, &vi, sizeof(vi)) > 0) - p->openfds.files++; - else - p->openfds.other++; - - break; - } - case PROX_FDTYPE_SOCKET: { - p->openfds.sockets++; - break; - } - case PROX_FDTYPE_PIPE: { - p->openfds.pipes++; - break; - } - - default: - p->openfds.other++; - break; - } - } - - return true; -} -#endif // __APPLE__ - -#if defined(__FreeBSD__) -static bool read_pid_file_descriptors_per_os(struct pid_stat *p, void *ptr) { - int mib[4]; - size_t size; - struct kinfo_file *fds; - static char *fdsbuf; - char *bfdsbuf, *efdsbuf; - char fdsname[FILENAME_MAX + 1]; -#define SHM_FORMAT_LEN 31 // format: 21 + size: 10 - char shm_name[FILENAME_MAX - SHM_FORMAT_LEN + 1]; - - // we make all pid fds negative, so that - // we can detect unused file descriptors - // at the end, to free them - make_all_pid_fds_negative(p); - - mib[0] = CTL_KERN; - mib[1] = KERN_PROC; - mib[2] = KERN_PROC_FILEDESC; - mib[3] = p->pid; - - if (unlikely(sysctl(mib, 4, NULL, &size, NULL, 0))) { - netdata_log_error("sysctl error: Can't get file descriptors data size for pid %d", p->pid); - return false; - } - if (likely(size > 0)) - fdsbuf = reallocz(fdsbuf, size); - if (unlikely(sysctl(mib, 4, fdsbuf, &size, NULL, 0))) { - netdata_log_error("sysctl error: Can't get file descriptors data for pid %d", p->pid); - return false; - } - - bfdsbuf = fdsbuf; - efdsbuf = fdsbuf + size; - while (bfdsbuf < efdsbuf) { - fds = (struct kinfo_file *)(uintptr_t)bfdsbuf; - if (unlikely(fds->kf_structsize == 0)) - break; - - // do not process file descriptors for current working directory, root directory, - // jail directory, ktrace vnode, text vnode and controlling terminal - if (unlikely(fds->kf_fd < 0)) { - bfdsbuf += fds->kf_structsize; - continue; - } - - // get file descriptors array index - size_t fdid = fds->kf_fd; - - // check if the fds array is small - if (unlikely(fdid >= p->fds_size)) { - // it is small, extend it - - debug_log("extending fd memory slots for %s from %d to %d", p->comm, p->fds_size, fdid + MAX_SPARE_FDS); - - p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(struct pid_fd)); - - // and initialize it - init_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size); - p->fds_size = fdid + MAX_SPARE_FDS; - } - - if (unlikely(p->fds[fdid].fd == 0)) { - // we don't know this fd, get it - - switch (fds->kf_type) { - case KF_TYPE_FIFO: - case KF_TYPE_VNODE: - if (unlikely(!fds->kf_path[0])) { - sprintf(fdsname, "other: inode: %lu", fds->kf_un.kf_file.kf_file_fileid); - break; - } - sprintf(fdsname, "%s", fds->kf_path); - break; - case KF_TYPE_SOCKET: - switch (fds->kf_sock_domain) { - case AF_INET: - case AF_INET6: -#if __FreeBSD_version < 1400074 - if (fds->kf_sock_protocol == IPPROTO_TCP) - sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_inpcb); - else -#endif - sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_pcb); - break; - case AF_UNIX: - /* print address of pcb and connected pcb */ - sprintf(fdsname, "socket: %lx %lx", fds->kf_un.kf_sock.kf_sock_pcb, fds->kf_un.kf_sock.kf_sock_unpconn); - break; - default: - /* print protocol number and socket address */ -#if __FreeBSD_version < 1200031 - sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_sa_local.__ss_pad1, fds->kf_sa_local.__ss_pad2); -#else - sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sa_local.__ss_pad1, fds->kf_un.kf_sock.kf_sa_local.__ss_pad2); -#endif - } - break; - case KF_TYPE_PIPE: - sprintf(fdsname, "pipe: %lu %lu", fds->kf_un.kf_pipe.kf_pipe_addr, fds->kf_un.kf_pipe.kf_pipe_peer); - break; - case KF_TYPE_PTS: -#if __FreeBSD_version < 1200031 - sprintf(fdsname, "other: pts: %u", fds->kf_un.kf_pts.kf_pts_dev); -#else - sprintf(fdsname, "other: pts: %lu", fds->kf_un.kf_pts.kf_pts_dev); -#endif - break; - case KF_TYPE_SHM: - strncpyz(shm_name, fds->kf_path, FILENAME_MAX - SHM_FORMAT_LEN); - sprintf(fdsname, "other: shm: %s size: %lu", shm_name, fds->kf_un.kf_file.kf_file_size); - break; - case KF_TYPE_SEM: - sprintf(fdsname, "other: sem: %u", fds->kf_un.kf_sem.kf_sem_value); - break; - default: - sprintf(fdsname, "other: pid: %d fd: %d", fds->kf_un.kf_proc.kf_pid, fds->kf_fd); - } - - // if another process already has this, we will get - // the same id - p->fds[fdid].fd = file_descriptor_find_or_add(fdsname, 0); - } - - // else make it positive again, we need it - // of course, the actual file may have changed - - else - p->fds[fdid].fd = -p->fds[fdid].fd; - - bfdsbuf += fds->kf_structsize; - } - - return true; -} -#endif // __FreeBSD__ - -#if !defined(__FreeBSD__) && !defined(__APPLE__) -static bool read_pid_file_descriptors_per_os(struct pid_stat *p, void *ptr __maybe_unused) { - if(unlikely(!p->fds_dirname)) { - char dirname[FILENAME_MAX+1]; - snprintfz(dirname, FILENAME_MAX, "%s/proc/%d/fd", netdata_configured_host_prefix, p->pid); - p->fds_dirname = strdupz(dirname); - } - - DIR *fds = opendir(p->fds_dirname); - if(unlikely(!fds)) return false; - - struct dirent *de; - char linkname[FILENAME_MAX + 1]; - - // we make all pid fds negative, so that - // we can detect unused file descriptors - // at the end, to free them - make_all_pid_fds_negative(p); - - while((de = readdir(fds))) { - // we need only files with numeric names - - if(unlikely(de->d_name[0] < '0' || de->d_name[0] > '9')) - continue; - - // get its number - int fdid = (int) str2l(de->d_name); - if(unlikely(fdid < 0)) continue; - - // check if the fds array is small - if(unlikely((size_t)fdid >= p->fds_size)) { - // it is small, extend it - - debug_log("extending fd memory slots for %s from %d to %d" - , p->comm - , p->fds_size - , fdid + MAX_SPARE_FDS - ); - - p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(struct pid_fd)); - - // and initialize it - init_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size); - p->fds_size = (size_t)fdid + MAX_SPARE_FDS; - } - - if(unlikely(p->fds[fdid].fd < 0 && de->d_ino != p->fds[fdid].inode)) { - // inodes do not match, clear the previous entry - inodes_changed_counter++; - file_descriptor_not_used(-p->fds[fdid].fd); - clear_pid_fd(&p->fds[fdid]); - } - - if(p->fds[fdid].fd < 0 && p->fds[fdid].cache_iterations_counter > 0) { - p->fds[fdid].fd = -p->fds[fdid].fd; - p->fds[fdid].cache_iterations_counter--; - continue; - } - - if(unlikely(!p->fds[fdid].filename)) { - filenames_allocated_counter++; - char fdname[FILENAME_MAX + 1]; - snprintfz(fdname, FILENAME_MAX, "%s/proc/%d/fd/%s", netdata_configured_host_prefix, p->pid, de->d_name); - p->fds[fdid].filename = strdupz(fdname); - } - - file_counter++; - ssize_t l = readlink(p->fds[fdid].filename, linkname, FILENAME_MAX); - if(unlikely(l == -1)) { - // cannot read the link - - if(debug_enabled || (p->target && p->target->debug_enabled)) - netdata_log_error("Cannot read link %s", p->fds[fdid].filename); - - if(unlikely(p->fds[fdid].fd < 0)) { - file_descriptor_not_used(-p->fds[fdid].fd); - clear_pid_fd(&p->fds[fdid]); - } - - continue; - } - else - linkname[l] = '\0'; - - uint32_t link_hash = simple_hash(linkname); - - if(unlikely(p->fds[fdid].fd < 0 && p->fds[fdid].link_hash != link_hash)) { - // the link changed - links_changed_counter++; - file_descriptor_not_used(-p->fds[fdid].fd); - clear_pid_fd(&p->fds[fdid]); - } - - if(unlikely(p->fds[fdid].fd == 0)) { - // we don't know this fd, get it - - // if another process already has this, we will get - // the same id - p->fds[fdid].fd = file_descriptor_find_or_add(linkname, link_hash); - p->fds[fdid].inode = de->d_ino; - p->fds[fdid].link_hash = link_hash; - } - else { - // else make it positive again, we need it - p->fds[fdid].fd = -p->fds[fdid].fd; - } - - // caching control - // without this we read all the files on every iteration - if(max_fds_cache_seconds > 0) { - size_t spread = ((size_t)max_fds_cache_seconds > 10) ? 10 : (size_t)max_fds_cache_seconds; - - // cache it for a few iterations - size_t max = ((size_t) max_fds_cache_seconds + (fdid % spread)) / (size_t) update_every; - p->fds[fdid].cache_iterations_reset++; - - if(unlikely(p->fds[fdid].cache_iterations_reset % spread == (size_t) fdid % spread)) - p->fds[fdid].cache_iterations_reset++; - - if(unlikely((fdid <= 2 && p->fds[fdid].cache_iterations_reset > 5) || - p->fds[fdid].cache_iterations_reset > max)) { - // for stdin, stdout, stderr (fdid <= 2) we have checked a few times, or if it goes above the max, goto max - p->fds[fdid].cache_iterations_reset = max; - } - - p->fds[fdid].cache_iterations_counter = p->fds[fdid].cache_iterations_reset; - } - } - - closedir(fds); - - return true; -} -#endif // !__FreeBSD__ !__APPLE - -int read_pid_file_descriptors(struct pid_stat *p, void *ptr) { - bool ret = read_pid_file_descriptors_per_os(p, ptr); - cleanup_negative_pid_fds(p); - - return ret ? 1 : 0; -} diff --git a/src/collectors/apps.plugin/apps_proc_pid_io.c b/src/collectors/apps.plugin/apps_proc_pid_io.c deleted file mode 100644 index 0fef3fc24..000000000 --- a/src/collectors/apps.plugin/apps_proc_pid_io.c +++ /dev/null @@ -1,95 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "apps_plugin.h" - -static inline void clear_pid_io(struct pid_stat *p) { - p->io_logical_bytes_read = 0; - p->io_logical_bytes_written = 0; - p->io_read_calls = 0; - p->io_write_calls = 0; - p->io_storage_bytes_read = 0; - p->io_storage_bytes_written = 0; - p->io_cancelled_write_bytes = 0; -} - -#if defined(__FreeBSD__) -static inline bool read_proc_pid_io_per_os(struct pid_stat *p, void *ptr) { - struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr; - - pid_incremental_rate(io, p->io_storage_bytes_read, proc_info->ki_rusage.ru_inblock); - pid_incremental_rate(io, p->io_storage_bytes_written, proc_info->ki_rusage.ru_oublock); - - p->io_logical_bytes_read = 0; - p->io_logical_bytes_written = 0; - p->io_read_calls = 0; - p->io_write_calls = 0; - p->io_cancelled_write_bytes = 0; - - return true; -} -#endif - -#ifdef __APPLE__ -static inline bool read_proc_pid_io_per_os(struct pid_stat *p, void *ptr) { - struct pid_info *pi = ptr; - - // On MacOS, the proc_pid_rusage provides disk_io_statistics which includes io bytes read and written - // but does not provide the same level of detail as Linux, like separating logical and physical I/O bytes. - pid_incremental_rate(io, p->io_storage_bytes_read, pi->rusageinfo.ri_diskio_bytesread); - pid_incremental_rate(io, p->io_storage_bytes_written, pi->rusageinfo.ri_diskio_byteswritten); - - p->io_logical_bytes_read = 0; - p->io_logical_bytes_written = 0; - p->io_read_calls = 0; - p->io_write_calls = 0; - p->io_cancelled_write_bytes = 0; - - return true; -} -#endif // __APPLE__ - -#if !defined(__FreeBSD__) && !defined(__APPLE__) -static inline int read_proc_pid_io_per_os(struct pid_stat *p, void *ptr __maybe_unused) { - static procfile *ff = NULL; - - if(unlikely(!p->io_filename)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/%d/io", netdata_configured_host_prefix, p->pid); - p->io_filename = strdupz(filename); - } - - // open the file - ff = procfile_reopen(ff, p->io_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); - if(unlikely(!ff)) goto cleanup; - - ff = procfile_readall(ff); - if(unlikely(!ff)) goto cleanup; - - pid_incremental_rate(io, p->io_logical_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 0, 1))); - pid_incremental_rate(io, p->io_logical_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 1, 1))); - pid_incremental_rate(io, p->io_read_calls, str2kernel_uint_t(procfile_lineword(ff, 2, 1))); - pid_incremental_rate(io, p->io_write_calls, str2kernel_uint_t(procfile_lineword(ff, 3, 1))); - pid_incremental_rate(io, p->io_storage_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 4, 1))); - pid_incremental_rate(io, p->io_storage_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 5, 1))); - pid_incremental_rate(io, p->io_cancelled_write_bytes, str2kernel_uint_t(procfile_lineword(ff, 6, 1))); - - return true; - -cleanup: - clear_pid_io(p); - return false; -} -#endif // !__FreeBSD__ !__APPLE__ - -int read_proc_pid_io(struct pid_stat *p, void *ptr) { - p->last_io_collected_usec = p->io_collected_usec; - p->io_collected_usec = now_monotonic_usec(); - calls_counter++; - - bool ret = read_proc_pid_io_per_os(p, ptr); - - if(unlikely(global_iterations_counter == 1)) - clear_pid_io(p); - - return ret ? 1 : 0; -} diff --git a/src/collectors/apps.plugin/apps_proc_pid_limits.c b/src/collectors/apps.plugin/apps_proc_pid_limits.c deleted file mode 100644 index 7485086ba..000000000 --- a/src/collectors/apps.plugin/apps_proc_pid_limits.c +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "apps_plugin.h" - -// ---------------------------------------------------------------------------- - -#define MAX_PROC_PID_LIMITS 8192 -#define PROC_PID_LIMITS_MAX_OPEN_FILES_KEY "\nMax open files " - -static inline kernel_uint_t get_proc_pid_limits_limit(char *buf, const char *key, size_t key_len, kernel_uint_t def) { - char *line = strstr(buf, key); - if(!line) - return def; - - char *v = &line[key_len]; - while(isspace(*v)) v++; - - if(strcmp(v, "unlimited") == 0) - return 0; - - return str2ull(v, NULL); -} - -#if defined(__FreeBSD__) || defined(__APPLE__) -int read_proc_pid_limits_per_os(struct pid_stat *p, void *ptr __maybe_unused) { - return false; -} -#endif - -#if !defined(__FreeBSD__) && !defined(__APPLE__) -static inline bool read_proc_pid_limits_per_os(struct pid_stat *p, void *ptr __maybe_unused) { - static char proc_pid_limits_buffer[MAX_PROC_PID_LIMITS + 1]; - bool ret = false; - bool read_limits = false; - - errno_clear(); - proc_pid_limits_buffer[0] = '\0'; - - kernel_uint_t all_fds = pid_openfds_sum(p); - if(all_fds < p->limits.max_open_files / 2 && p->io_collected_usec > p->last_limits_collected_usec && p->io_collected_usec - p->last_limits_collected_usec <= 60 * USEC_PER_SEC) { - // too frequent, we want to collect limits once per minute - ret = true; - goto cleanup; - } - - if(unlikely(!p->limits_filename)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/%d/limits", netdata_configured_host_prefix, p->pid); - p->limits_filename = strdupz(filename); - } - - int fd = open(p->limits_filename, procfile_open_flags, 0666); - if(unlikely(fd == -1)) goto cleanup; - - ssize_t bytes = read(fd, proc_pid_limits_buffer, MAX_PROC_PID_LIMITS); - close(fd); - - if(bytes <= 0) - goto cleanup; - - // make it '\0' terminated - if(bytes < MAX_PROC_PID_LIMITS) - proc_pid_limits_buffer[bytes] = '\0'; - else - proc_pid_limits_buffer[MAX_PROC_PID_LIMITS - 1] = '\0'; - - p->limits.max_open_files = get_proc_pid_limits_limit(proc_pid_limits_buffer, PROC_PID_LIMITS_MAX_OPEN_FILES_KEY, sizeof(PROC_PID_LIMITS_MAX_OPEN_FILES_KEY) - 1, 0); - if(p->limits.max_open_files == 1) { - // it seems a bug in the kernel or something similar - // it sets max open files to 1 but the number of files - // the process has open are more than 1... - // https://github.com/netdata/netdata/issues/15443 - p->limits.max_open_files = 0; - ret = true; - goto cleanup; - } - - p->last_limits_collected_usec = p->io_collected_usec; - read_limits = true; - - ret = true; - -cleanup: - if(p->limits.max_open_files) - p->openfds_limits_percent = (NETDATA_DOUBLE)all_fds * 100.0 / (NETDATA_DOUBLE)p->limits.max_open_files; - else - p->openfds_limits_percent = 0.0; - - if(p->openfds_limits_percent > 100.0) { - if(!(p->log_thrown & PID_LOG_LIMITS_DETAIL)) { - char *line; - - if(!read_limits) { - proc_pid_limits_buffer[0] = '\0'; - line = "NOT READ"; - } - else { - line = strstr(proc_pid_limits_buffer, PROC_PID_LIMITS_MAX_OPEN_FILES_KEY); - if (line) { - line++; // skip the initial newline - - char *end = strchr(line, '\n'); - if (end) - *end = '\0'; - } - } - - netdata_log_info( - "FDS_LIMITS: PID %d (%s) is using " - "%0.2f %% of its fds limits, " - "open fds = %"PRIu64 "(" - "files = %"PRIu64 ", " - "pipes = %"PRIu64 ", " - "sockets = %"PRIu64", " - "inotifies = %"PRIu64", " - "eventfds = %"PRIu64", " - "timerfds = %"PRIu64", " - "signalfds = %"PRIu64", " - "eventpolls = %"PRIu64" " - "other = %"PRIu64" " - "), open fds limit = %"PRIu64", " - "%s, " - "original line [%s]", - p->pid, p->comm, p->openfds_limits_percent, all_fds, - p->openfds.files, - p->openfds.pipes, - p->openfds.sockets, - p->openfds.inotifies, - p->openfds.eventfds, - p->openfds.timerfds, - p->openfds.signalfds, - p->openfds.eventpolls, - p->openfds.other, - p->limits.max_open_files, - read_limits ? "and we have read the limits AFTER counting the fds" - : "but we have read the limits BEFORE counting the fds", - line); - - p->log_thrown |= PID_LOG_LIMITS_DETAIL; - } - } - else - p->log_thrown &= ~PID_LOG_LIMITS_DETAIL; - - return ret; -} -#endif // !__FreeBSD__ !__APPLE__ - -int read_proc_pid_limits(struct pid_stat *p, void *ptr) { - return read_proc_pid_limits_per_os(p, ptr) ? 1 : 0; -} diff --git a/src/collectors/apps.plugin/apps_proc_pid_stat.c b/src/collectors/apps.plugin/apps_proc_pid_stat.c deleted file mode 100644 index 8767f7831..000000000 --- a/src/collectors/apps.plugin/apps_proc_pid_stat.c +++ /dev/null @@ -1,293 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "apps_plugin.h" - -// ---------------------------------------------------------------------------- - -static inline void assign_target_to_pid(struct pid_stat *p) { - targets_assignment_counter++; - - uint32_t hash = simple_hash(p->comm); - size_t pclen = strlen(p->comm); - - struct target *w; - for(w = apps_groups_root_target; w ; w = w->next) { - // if(debug_enabled || (p->target && p->target->debug_enabled)) debug_log_int("\t\tcomparing '%s' with '%s'", w->compare, p->comm); - - // find it - 4 cases: - // 1. the target is not a pattern - // 2. the target has the prefix - // 3. the target has the suffix - // 4. the target is something inside cmdline - - if(unlikely(( (!w->starts_with && !w->ends_with && w->comparehash == hash && !strcmp(w->compare, p->comm)) - || (w->starts_with && !w->ends_with && !strncmp(w->compare, p->comm, w->comparelen)) - || (!w->starts_with && w->ends_with && pclen >= w->comparelen && !strcmp(w->compare, &p->comm[pclen - w->comparelen])) - || (proc_pid_cmdline_is_needed && w->starts_with && w->ends_with && p->cmdline && strstr(p->cmdline, w->compare)) - ))) { - - p->matched_by_config = true; - if(w->target) p->target = w->target; - else p->target = w; - - if(debug_enabled || (p->target && p->target->debug_enabled)) - debug_log_int("%s linked to target %s", p->comm, p->target->name); - - break; - } - } -} - -static inline void update_pid_comm(struct pid_stat *p, const char *comm) { - if(strcmp(p->comm, comm) != 0) { - if(unlikely(debug_enabled)) { - if(p->comm[0]) - debug_log("\tpid %d (%s) changed name to '%s'", p->pid, p->comm, comm); - else - debug_log("\tJust added %d (%s)", p->pid, comm); - } - - strncpyz(p->comm, comm, MAX_COMPARE_NAME); - - // /proc//cmdline - if(likely(proc_pid_cmdline_is_needed)) - managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p)); - - assign_target_to_pid(p); - } -} - -static inline void clear_pid_stat(struct pid_stat *p, bool threads) { - p->minflt = 0; - p->cminflt = 0; - p->majflt = 0; - p->cmajflt = 0; - p->utime = 0; - p->stime = 0; - p->gtime = 0; - p->cutime = 0; - p->cstime = 0; - p->cgtime = 0; - - if(threads) - p->num_threads = 0; - - // p->rss = 0; -} - -#if defined(__FreeBSD__) -static inline bool read_proc_pid_stat_per_os(struct pid_stat *p, void *ptr) { - struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr; - if (unlikely(proc_info->ki_tdflags & TDF_IDLETD)) - goto cleanup; - - char *comm = proc_info->ki_comm; - p->ppid = proc_info->ki_ppid; - - update_pid_comm(p, comm); - - pid_incremental_rate(stat, p->minflt, (kernel_uint_t)proc_info->ki_rusage.ru_minflt); - pid_incremental_rate(stat, p->cminflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_minflt); - pid_incremental_rate(stat, p->majflt, (kernel_uint_t)proc_info->ki_rusage.ru_majflt); - pid_incremental_rate(stat, p->cmajflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_majflt); - pid_incremental_rate(stat, p->utime, (kernel_uint_t)proc_info->ki_rusage.ru_utime.tv_sec * 100 + proc_info->ki_rusage.ru_utime.tv_usec / 10000); - pid_incremental_rate(stat, p->stime, (kernel_uint_t)proc_info->ki_rusage.ru_stime.tv_sec * 100 + proc_info->ki_rusage.ru_stime.tv_usec / 10000); - pid_incremental_rate(stat, p->cutime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_utime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_utime.tv_usec / 10000); - pid_incremental_rate(stat, p->cstime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_stime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_stime.tv_usec / 10000); - - p->num_threads = proc_info->ki_numthreads; - - usec_t started_ut = timeval_usec(&proc_info->ki_start); - p->uptime = (system_current_time_ut > started_ut) ? (system_current_time_ut - started_ut) / USEC_PER_SEC : 0; - - if(enable_guest_charts) { - enable_guest_charts = false; - netdata_log_info("Guest charts aren't supported by FreeBSD"); - } - - if(unlikely(debug_enabled || (p->target && p->target->debug_enabled))) - debug_log_int("READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=%d", netdata_configured_host_prefix, p->pid, p->comm, (p->target)?p->target->name:"UNSET", p->stat_collected_usec - p->last_stat_collected_usec, p->utime, p->stime, p->cutime, p->cstime, p->minflt, p->majflt, p->cminflt, p->cmajflt, p->num_threads); - - if(unlikely(global_iterations_counter == 1)) - clear_pid_stat(p, false); - - return true; - -cleanup: - clear_pid_stat(p, true); - return false; -} -#endif // __FreeBSD__ - -#ifdef __APPLE__ -static inline bool read_proc_pid_stat_per_os(struct pid_stat *p, void *ptr) { - struct pid_info *pi = ptr; - - p->ppid = pi->proc.kp_eproc.e_ppid; - - // Update command name and target if changed - char comm[PROC_PIDPATHINFO_MAXSIZE]; - int ret = proc_name(p->pid, comm, sizeof(comm)); - if (ret <= 0) - strncpyz(comm, "unknown", sizeof(comm) - 1); - - update_pid_comm(p, comm); - - kernel_uint_t userCPU = (pi->taskinfo.pti_total_user * mach_info.numer) / mach_info.denom / NSEC_PER_USEC / 10000; - kernel_uint_t systemCPU = (pi->taskinfo.pti_total_system * mach_info.numer) / mach_info.denom / NSEC_PER_USEC / 10000; - - // Map the values from taskinfo to the pid_stat structure - pid_incremental_rate(stat, p->minflt, pi->taskinfo.pti_faults); - pid_incremental_rate(stat, p->majflt, pi->taskinfo.pti_pageins); - pid_incremental_rate(stat, p->utime, userCPU); - pid_incremental_rate(stat, p->stime, systemCPU); - p->num_threads = pi->taskinfo.pti_threadnum; - - usec_t started_ut = timeval_usec(&pi->proc.kp_proc.p_starttime); - p->uptime = (system_current_time_ut > started_ut) ? (system_current_time_ut - started_ut) / USEC_PER_SEC : 0; - - // Note: Some values such as guest time, cutime, cstime, etc., are not directly available in MacOS. - // You might need to approximate or leave them unset depending on your needs. - - if(unlikely(debug_enabled || (p->target && p->target->debug_enabled))) { - debug_log_int("READ PROC/PID/STAT for MacOS: process: '%s' on target '%s' VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", threads=%d", - p->comm, (p->target) ? p->target->name : "UNSET", p->utime, p->stime, p->minflt, p->majflt, p->num_threads); - } - - if(unlikely(global_iterations_counter == 1)) - clear_pid_stat(p, false); - - // MacOS doesn't have a direct concept of process state like Linux, - // so updating process state count might need a different approach. - - return true; -} -#endif // __APPLE__ - -#if !defined(__FreeBSD__) && !defined(__APPLE__) -static inline void update_proc_state_count(char proc_stt) { - switch (proc_stt) { - case 'S': - proc_state_count[PROC_STATUS_SLEEPING] += 1; - break; - case 'R': - proc_state_count[PROC_STATUS_RUNNING] += 1; - break; - case 'D': - proc_state_count[PROC_STATUS_SLEEPING_D] += 1; - break; - case 'Z': - proc_state_count[PROC_STATUS_ZOMBIE] += 1; - break; - case 'T': - proc_state_count[PROC_STATUS_STOPPED] += 1; - break; - default: - break; - } -} - -static inline bool read_proc_pid_stat_per_os(struct pid_stat *p, void *ptr __maybe_unused) { - static procfile *ff = NULL; - - if(unlikely(!p->stat_filename)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid); - p->stat_filename = strdupz(filename); - } - - int set_quotes = (!ff)?1:0; - - ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); - if(unlikely(!ff)) goto cleanup; - - // if(set_quotes) procfile_set_quotes(ff, "()"); - if(unlikely(set_quotes)) - procfile_set_open_close(ff, "(", ")"); - - ff = procfile_readall(ff); - if(unlikely(!ff)) goto cleanup; - - // p->pid = str2pid_t(procfile_lineword(ff, 0, 0)); - char *comm = procfile_lineword(ff, 0, 1); - p->state = *(procfile_lineword(ff, 0, 2)); - p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3)); - // p->pgrp = (int32_t)str2pid_t(procfile_lineword(ff, 0, 4)); - // p->session = (int32_t)str2pid_t(procfile_lineword(ff, 0, 5)); - // p->tty_nr = (int32_t)str2pid_t(procfile_lineword(ff, 0, 6)); - // p->tpgid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 7)); - // p->flags = str2uint64_t(procfile_lineword(ff, 0, 8)); - - update_pid_comm(p, comm); - - pid_incremental_rate(stat, p->minflt, str2kernel_uint_t(procfile_lineword(ff, 0, 9))); - pid_incremental_rate(stat, p->cminflt, str2kernel_uint_t(procfile_lineword(ff, 0, 10))); - pid_incremental_rate(stat, p->majflt, str2kernel_uint_t(procfile_lineword(ff, 0, 11))); - pid_incremental_rate(stat, p->cmajflt, str2kernel_uint_t(procfile_lineword(ff, 0, 12))); - pid_incremental_rate(stat, p->utime, str2kernel_uint_t(procfile_lineword(ff, 0, 13))); - pid_incremental_rate(stat, p->stime, str2kernel_uint_t(procfile_lineword(ff, 0, 14))); - pid_incremental_rate(stat, p->cutime, str2kernel_uint_t(procfile_lineword(ff, 0, 15))); - pid_incremental_rate(stat, p->cstime, str2kernel_uint_t(procfile_lineword(ff, 0, 16))); - // p->priority = str2kernel_uint_t(procfile_lineword(ff, 0, 17)); - // p->nice = str2kernel_uint_t(procfile_lineword(ff, 0, 18)); - p->num_threads = (int32_t) str2uint32_t(procfile_lineword(ff, 0, 19), NULL); - // p->itrealvalue = str2kernel_uint_t(procfile_lineword(ff, 0, 20)); - kernel_uint_t collected_starttime = str2kernel_uint_t(procfile_lineword(ff, 0, 21)) / system_hz; - p->uptime = (system_uptime_secs > collected_starttime)?(system_uptime_secs - collected_starttime):0; - // p->vsize = str2kernel_uint_t(procfile_lineword(ff, 0, 22)); - // p->rss = str2kernel_uint_t(procfile_lineword(ff, 0, 23)); - // p->rsslim = str2kernel_uint_t(procfile_lineword(ff, 0, 24)); - // p->starcode = str2kernel_uint_t(procfile_lineword(ff, 0, 25)); - // p->endcode = str2kernel_uint_t(procfile_lineword(ff, 0, 26)); - // p->startstack = str2kernel_uint_t(procfile_lineword(ff, 0, 27)); - // p->kstkesp = str2kernel_uint_t(procfile_lineword(ff, 0, 28)); - // p->kstkeip = str2kernel_uint_t(procfile_lineword(ff, 0, 29)); - // p->signal = str2kernel_uint_t(procfile_lineword(ff, 0, 30)); - // p->blocked = str2kernel_uint_t(procfile_lineword(ff, 0, 31)); - // p->sigignore = str2kernel_uint_t(procfile_lineword(ff, 0, 32)); - // p->sigcatch = str2kernel_uint_t(procfile_lineword(ff, 0, 33)); - // p->wchan = str2kernel_uint_t(procfile_lineword(ff, 0, 34)); - // p->nswap = str2kernel_uint_t(procfile_lineword(ff, 0, 35)); - // p->cnswap = str2kernel_uint_t(procfile_lineword(ff, 0, 36)); - // p->exit_signal = str2kernel_uint_t(procfile_lineword(ff, 0, 37)); - // p->processor = str2kernel_uint_t(procfile_lineword(ff, 0, 38)); - // p->rt_priority = str2kernel_uint_t(procfile_lineword(ff, 0, 39)); - // p->policy = str2kernel_uint_t(procfile_lineword(ff, 0, 40)); - // p->delayacct_blkio_ticks = str2kernel_uint_t(procfile_lineword(ff, 0, 41)); - - if(enable_guest_charts) { - pid_incremental_rate(stat, p->gtime, str2kernel_uint_t(procfile_lineword(ff, 0, 42))); - pid_incremental_rate(stat, p->cgtime, str2kernel_uint_t(procfile_lineword(ff, 0, 43))); - - if (show_guest_time || p->gtime || p->cgtime) { - p->utime -= (p->utime >= p->gtime) ? p->gtime : p->utime; - p->cutime -= (p->cutime >= p->cgtime) ? p->cgtime : p->cutime; - show_guest_time = 1; - } - } - - if(unlikely(debug_enabled || (p->target && p->target->debug_enabled))) - debug_log_int("READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=%d", netdata_configured_host_prefix, p->pid, p->comm, (p->target)?p->target->name:"UNSET", p->stat_collected_usec - p->last_stat_collected_usec, p->utime, p->stime, p->cutime, p->cstime, p->minflt, p->majflt, p->cminflt, p->cmajflt, p->num_threads); - - if(unlikely(global_iterations_counter == 1)) - clear_pid_stat(p, false); - - update_proc_state_count(p->state); - return true; - -cleanup: - clear_pid_stat(p, true); - return false; -} -#endif // !__FreeBSD__ !__APPLE__ - -int read_proc_pid_stat(struct pid_stat *p, void *ptr) { - p->last_stat_collected_usec = p->stat_collected_usec; - p->stat_collected_usec = now_monotonic_usec(); - calls_counter++; - - if(!read_proc_pid_stat_per_os(p, ptr)) - return 0; - - return 1; -} diff --git a/src/collectors/apps.plugin/apps_proc_pid_status.c b/src/collectors/apps.plugin/apps_proc_pid_status.c deleted file mode 100644 index 364d48047..000000000 --- a/src/collectors/apps.plugin/apps_proc_pid_status.c +++ /dev/null @@ -1,192 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "apps_plugin.h" - -#if defined(__FreeBSD__) -static inline bool read_proc_pid_status_per_os(struct pid_stat *p, void *ptr) { - struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr; - - p->uid = proc_info->ki_uid; - p->gid = proc_info->ki_groups[0]; - p->status_vmsize = proc_info->ki_size / 1024; // in KiB - p->status_vmrss = proc_info->ki_rssize * pagesize / 1024; // in KiB - // TODO: what about shared and swap memory on FreeBSD? - return true; -} -#endif - -#ifdef __APPLE__ -static inline bool read_proc_pid_status_per_os(struct pid_stat *p, void *ptr) { - struct pid_info *pi = ptr; - - p->uid = pi->bsdinfo.pbi_uid; - p->gid = pi->bsdinfo.pbi_gid; - p->status_vmsize = pi->taskinfo.pti_virtual_size / 1024; // Convert bytes to KiB - p->status_vmrss = pi->taskinfo.pti_resident_size / 1024; // Convert bytes to KiB - // p->status_vmswap = rusageinfo.ri_swapins + rusageinfo.ri_swapouts; // This is not directly available, consider an alternative representation - p->status_voluntary_ctxt_switches = pi->taskinfo.pti_csw; - // p->status_nonvoluntary_ctxt_switches = taskinfo.pti_nivcsw; - - return true; -} -#endif // __APPLE__ - -#if !defined(__FreeBSD__) && !defined(__APPLE__) -struct arl_callback_ptr { - struct pid_stat *p; - procfile *ff; - size_t line; -}; - -void arl_callback_status_uid(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return; - - //const char *real_uid = procfile_lineword(aptr->ff, aptr->line, 1); - const char *effective_uid = procfile_lineword(aptr->ff, aptr->line, 2); - //const char *saved_uid = procfile_lineword(aptr->ff, aptr->line, 3); - //const char *filesystem_uid = procfile_lineword(aptr->ff, aptr->line, 4); - - if(likely(effective_uid && *effective_uid)) - aptr->p->uid = (uid_t)str2l(effective_uid); -} - -void arl_callback_status_gid(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return; - - //const char *real_gid = procfile_lineword(aptr->ff, aptr->line, 1); - const char *effective_gid = procfile_lineword(aptr->ff, aptr->line, 2); - //const char *saved_gid = procfile_lineword(aptr->ff, aptr->line, 3); - //const char *filesystem_gid = procfile_lineword(aptr->ff, aptr->line, 4); - - if(likely(effective_gid && *effective_gid)) - aptr->p->gid = (uid_t)str2l(effective_gid); -} - -void arl_callback_status_vmsize(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; - - aptr->p->status_vmsize = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); -} - -void arl_callback_status_vmswap(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; - - aptr->p->status_vmswap = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); -} - -void arl_callback_status_vmrss(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; - - aptr->p->status_vmrss = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); -} - -void arl_callback_status_rssfile(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; - - aptr->p->status_rssfile = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); -} - -void arl_callback_status_rssshmem(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return; - - aptr->p->status_rssshmem = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)); -} - -void arl_callback_status_voluntary_ctxt_switches(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 2)) return; - - struct pid_stat *p = aptr->p; - pid_incremental_rate(stat, p->status_voluntary_ctxt_switches, str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1))); -} - -void arl_callback_status_nonvoluntary_ctxt_switches(const char *name, uint32_t hash, const char *value, void *dst) { - (void)name; (void)hash; (void)value; - struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst; - if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 2)) return; - - struct pid_stat *p = aptr->p; - pid_incremental_rate(stat, p->status_nonvoluntary_ctxt_switches, str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1))); -} - -static inline bool read_proc_pid_status_per_os(struct pid_stat *p, void *ptr __maybe_unused) { - static struct arl_callback_ptr arl_ptr; - static procfile *ff = NULL; - - if(unlikely(!p->status_arl)) { - p->status_arl = arl_create("/proc/pid/status", NULL, 60); - arl_expect_custom(p->status_arl, "Uid", arl_callback_status_uid, &arl_ptr); - arl_expect_custom(p->status_arl, "Gid", arl_callback_status_gid, &arl_ptr); - arl_expect_custom(p->status_arl, "VmSize", arl_callback_status_vmsize, &arl_ptr); - arl_expect_custom(p->status_arl, "VmRSS", arl_callback_status_vmrss, &arl_ptr); - arl_expect_custom(p->status_arl, "RssFile", arl_callback_status_rssfile, &arl_ptr); - arl_expect_custom(p->status_arl, "RssShmem", arl_callback_status_rssshmem, &arl_ptr); - arl_expect_custom(p->status_arl, "VmSwap", arl_callback_status_vmswap, &arl_ptr); - arl_expect_custom(p->status_arl, "voluntary_ctxt_switches", arl_callback_status_voluntary_ctxt_switches, &arl_ptr); - arl_expect_custom(p->status_arl, "nonvoluntary_ctxt_switches", arl_callback_status_nonvoluntary_ctxt_switches, &arl_ptr); - } - - if(unlikely(!p->status_filename)) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/proc/%d/status", netdata_configured_host_prefix, p->pid); - p->status_filename = strdupz(filename); - } - - ff = procfile_reopen(ff, p->status_filename, (!ff)?" \t:,-()/":NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); - if(unlikely(!ff)) return false; - - ff = procfile_readall(ff); - if(unlikely(!ff)) return false; - - calls_counter++; - - // let ARL use this pid - arl_ptr.p = p; - arl_ptr.ff = ff; - - size_t lines = procfile_lines(ff), l; - arl_begin(p->status_arl); - - for(l = 0; l < lines ;l++) { - // debug_log("CHECK: line %zu of %zu, key '%s' = '%s'", l, lines, procfile_lineword(ff, l, 0), procfile_lineword(ff, l, 1)); - arl_ptr.line = l; - if(unlikely(arl_check(p->status_arl, - procfile_lineword(ff, l, 0), - procfile_lineword(ff, l, 1)))) break; - } - - p->status_vmshared = p->status_rssfile + p->status_rssshmem; - - // debug_log("%s uid %d, gid %d, VmSize %zu, VmRSS %zu, RssFile %zu, RssShmem %zu, shared %zu", p->comm, (int)p->uid, (int)p->gid, p->status_vmsize, p->status_vmrss, p->status_rssfile, p->status_rssshmem, p->status_vmshared); - - return true; -} -#endif // !__FreeBSD__ !__APPLE__ - -int read_proc_pid_status(struct pid_stat *p, void *ptr) { - p->status_vmsize = 0; - p->status_vmrss = 0; - p->status_vmshared = 0; - p->status_rssfile = 0; - p->status_rssshmem = 0; - p->status_vmswap = 0; - p->status_voluntary_ctxt_switches = 0; - p->status_nonvoluntary_ctxt_switches = 0; - - return read_proc_pid_status_per_os(p, ptr) ? 1 : 0; -} diff --git a/src/collectors/apps.plugin/apps_proc_pids.c b/src/collectors/apps.plugin/apps_proc_pids.c deleted file mode 100644 index b53060d60..000000000 --- a/src/collectors/apps.plugin/apps_proc_pids.c +++ /dev/null @@ -1,720 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "apps_plugin.h" - -static struct pid_stat **all_pids = NULL; -size_t all_pids_count = 0; // the number of processes running - -struct pid_stat *root_of_pids = NULL; // global linked list of all processes running - -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) -// Another pre-allocated list of all possible pids. -// We need it to assign them a unique sortlist id, so that we -// read parents before children. This is needed to prevent a situation where -// a child is found running, but until we read its parent, it has exited and -// its parent has accumulated its resources. -pid_t *all_pids_sortlist = NULL; -#endif - -void pids_init(void) { -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) - all_pids_sortlist = callocz(sizeof(pid_t), (size_t)pid_max + 1); -#endif - - all_pids = callocz(sizeof(struct pid_stat *), (size_t) pid_max + 1); -} - -inline struct pid_stat *find_pid_entry(pid_t pid) { - return all_pids[pid]; -} - -static inline struct pid_stat *get_or_allocate_pid_entry(pid_t pid) { - struct pid_stat *p = find_pid_entry(pid); - if(likely(p)) - return p; - - p = callocz(sizeof(struct pid_stat), 1); - p->fds = mallocz(sizeof(struct pid_fd) * MAX_SPARE_FDS); - p->fds_size = MAX_SPARE_FDS; - init_pid_fds(p, 0, p->fds_size); - p->pid = pid; - - DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(root_of_pids, p, prev, next); - all_pids[pid] = p; - all_pids_count++; - - return p; -} - -static inline void del_pid_entry(pid_t pid) { - struct pid_stat *p = find_pid_entry(pid); - - if(unlikely(!p)) { - netdata_log_error("attempted to free pid %d that is not allocated.", pid); - return; - } - - debug_log("process %d %s exited, deleting it.", pid, p->comm); - - DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(root_of_pids, p, prev, next); - -#if !defined(__FreeBSD__) && !defined(__APPLE__) - { - size_t i; - for(i = 0; i < p->fds_size; i++) - if(p->fds[i].filename) - freez(p->fds[i].filename); - } - arl_free(p->status_arl); -#endif - - freez(p->fds); - freez(p->fds_dirname); - freez(p->stat_filename); - freez(p->status_filename); - freez(p->limits_filename); - freez(p->io_filename); - freez(p->cmdline_filename); - freez(p->cmdline); - freez(p); - - all_pids[pid] = NULL; - all_pids_count--; -} - -static inline int collect_data_for_pid(pid_t pid, void *ptr) { - if(unlikely(pid < 0 || pid > pid_max)) { - netdata_log_error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max); - return 0; - } - - struct pid_stat *p = get_or_allocate_pid_entry(pid); - if(unlikely(!p || p->read)) return 0; - p->read = true; - - // debug_log("Reading process %d (%s), sortlist %d", p->pid, p->comm, p->sortlist); - - // -------------------------------------------------------------------- - // /proc//stat - - if(unlikely(!managed_log(p, PID_LOG_STAT, read_proc_pid_stat(p, ptr)))) - // there is no reason to proceed if we cannot get its status - return 0; - - // check its parent pid - if(unlikely(p->ppid < 0 || p->ppid > pid_max)) { - netdata_log_error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid); - p->ppid = 0; - } - - // -------------------------------------------------------------------- - // /proc//io - - managed_log(p, PID_LOG_IO, read_proc_pid_io(p, ptr)); - - // -------------------------------------------------------------------- - // /proc//status - - if(unlikely(!managed_log(p, PID_LOG_STATUS, read_proc_pid_status(p, ptr)))) - // there is no reason to proceed if we cannot get its status - return 0; - - // -------------------------------------------------------------------- - // /proc//fd - - if(enable_file_charts) { - managed_log(p, PID_LOG_FDS, read_pid_file_descriptors(p, ptr)); - managed_log(p, PID_LOG_LIMITS, read_proc_pid_limits(p, ptr)); - } - - // -------------------------------------------------------------------- - // done! - - if(unlikely(debug_enabled && include_exited_childs && all_pids_count && p->ppid && all_pids[p->ppid] && !all_pids[p->ppid]->read)) - debug_log("Read process %d (%s) sortlisted %d, but its parent %d (%s) sortlisted %d, is not read", p->pid, p->comm, p->sortlist, all_pids[p->ppid]->pid, all_pids[p->ppid]->comm, all_pids[p->ppid]->sortlist); - - // mark it as updated - p->updated = true; - p->keep = false; - p->keeploops = 0; - - return 1; -} - -void cleanup_exited_pids(void) { - size_t c; - struct pid_stat *p = NULL; - - for(p = root_of_pids; p ;) { - if(!p->updated && (!p->keep || p->keeploops > 0)) { - if(unlikely(debug_enabled && (p->keep || p->keeploops))) - debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, p->comm); - - for(c = 0; c < p->fds_size; c++) - if(p->fds[c].fd > 0) { - file_descriptor_not_used(p->fds[c].fd); - clear_pid_fd(&p->fds[c]); - } - - pid_t r = p->pid; - p = p->next; - del_pid_entry(r); - } - else { - if(unlikely(p->keep)) p->keeploops++; - p->keep = false; - p = p->next; - } - } -} - -// ---------------------------------------------------------------------------- - -static inline void link_all_processes_to_their_parents(void) { - struct pid_stat *p, *pp; - - // link all children to their parents - // and update children count on parents - for(p = root_of_pids; p ; p = p->next) { - // for each process found - - p->sortlist = 0; - p->parent = NULL; - - if(unlikely(!p->ppid)) { - //unnecessary code from apps_plugin.c - //p->parent = NULL; - continue; - } - - pp = all_pids[p->ppid]; - if(likely(pp)) { - p->parent = pp; - pp->children_count++; - - if(unlikely(debug_enabled || (p->target && p->target->debug_enabled))) - debug_log_int("child %d (%s, %s) on target '%s' has parent %d (%s, %s). Parent: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->pid, p->comm, p->updated?"running":"exited", (p->target)?p->target->name:"UNSET", pp->pid, pp->comm, pp->updated?"running":"exited", pp->utime, pp->stime, pp->gtime, pp->minflt, pp->majflt, pp->cutime, pp->cstime, pp->cgtime, pp->cminflt, pp->cmajflt); - } - else { - p->parent = NULL; - netdata_log_error("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid); - } - } -} - -// ---------------------------------------------------------------------------- - -static inline int debug_print_process_and_parents(struct pid_stat *p, usec_t time) { - char *prefix = "\\_ "; - int indent = 0; - - if(p->parent) - indent = debug_print_process_and_parents(p->parent, p->stat_collected_usec); - else - prefix = " > "; - - char buffer[indent + 1]; - int i; - - for(i = 0; i < indent ;i++) buffer[i] = ' '; - buffer[i] = '\0'; - - fprintf(stderr, " %s %s%s (%d %s %"PRIu64"" - , buffer - , prefix - , p->comm - , p->pid - , p->updated?"running":"exited" - , p->stat_collected_usec - time - ); - - if(p->utime) fprintf(stderr, " utime=" KERNEL_UINT_FORMAT, p->utime); - if(p->stime) fprintf(stderr, " stime=" KERNEL_UINT_FORMAT, p->stime); - if(p->gtime) fprintf(stderr, " gtime=" KERNEL_UINT_FORMAT, p->gtime); - if(p->cutime) fprintf(stderr, " cutime=" KERNEL_UINT_FORMAT, p->cutime); - if(p->cstime) fprintf(stderr, " cstime=" KERNEL_UINT_FORMAT, p->cstime); - if(p->cgtime) fprintf(stderr, " cgtime=" KERNEL_UINT_FORMAT, p->cgtime); - if(p->minflt) fprintf(stderr, " minflt=" KERNEL_UINT_FORMAT, p->minflt); - if(p->cminflt) fprintf(stderr, " cminflt=" KERNEL_UINT_FORMAT, p->cminflt); - if(p->majflt) fprintf(stderr, " majflt=" KERNEL_UINT_FORMAT, p->majflt); - if(p->cmajflt) fprintf(stderr, " cmajflt=" KERNEL_UINT_FORMAT, p->cmajflt); - fprintf(stderr, ")\n"); - - return indent + 1; -} - -static inline void debug_print_process_tree(struct pid_stat *p, char *msg __maybe_unused) { - debug_log("%s: process %s (%d, %s) with parents:", msg, p->comm, p->pid, p->updated?"running":"exited"); - debug_print_process_and_parents(p, p->stat_collected_usec); -} - -static inline void debug_find_lost_child(struct pid_stat *pe, kernel_uint_t lost, int type) { - int found = 0; - struct pid_stat *p = NULL; - - for(p = root_of_pids; p ; p = p->next) { - if(p == pe) continue; - - switch(type) { - case 1: - if(p->cminflt > lost) { - fprintf(stderr, " > process %d (%s) could use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); - found++; - } - break; - - case 2: - if(p->cmajflt > lost) { - fprintf(stderr, " > process %d (%s) could use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); - found++; - } - break; - - case 3: - if(p->cutime > lost) { - fprintf(stderr, " > process %d (%s) could use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); - found++; - } - break; - - case 4: - if(p->cstime > lost) { - fprintf(stderr, " > process %d (%s) could use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); - found++; - } - break; - - case 5: - if(p->cgtime > lost) { - fprintf(stderr, " > process %d (%s) could use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm); - found++; - } - break; - } - } - - if(!found) { - switch(type) { - case 1: - fprintf(stderr, " > cannot find any process to use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); - break; - - case 2: - fprintf(stderr, " > cannot find any process to use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); - break; - - case 3: - fprintf(stderr, " > cannot find any process to use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); - break; - - case 4: - fprintf(stderr, " > cannot find any process to use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); - break; - - case 5: - fprintf(stderr, " > cannot find any process to use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm); - break; - } - } -} - -static inline kernel_uint_t remove_exited_child_from_parent(kernel_uint_t *field, kernel_uint_t *pfield) { - kernel_uint_t absorbed = 0; - - if(*field > *pfield) { - absorbed += *pfield; - *field -= *pfield; - *pfield = 0; - } - else { - absorbed += *field; - *pfield -= *field; - *field = 0; - } - - return absorbed; -} - -static inline void process_exited_pids() { - struct pid_stat *p; - - for(p = root_of_pids; p ; p = p->next) { - if(p->updated || !p->stat_collected_usec) - continue; - - kernel_uint_t utime = (p->utime_raw + p->cutime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); - kernel_uint_t stime = (p->stime_raw + p->cstime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); - kernel_uint_t gtime = (p->gtime_raw + p->cgtime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); - kernel_uint_t minflt = (p->minflt_raw + p->cminflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); - kernel_uint_t majflt = (p->majflt_raw + p->cmajflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec); - - if(utime + stime + gtime + minflt + majflt == 0) - continue; - - if(unlikely(debug_enabled)) { - debug_log("Absorb %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")" - , p->comm - , p->pid - , p->updated?"running":"exited" - , utime - , stime - , gtime - , minflt - , majflt - ); - debug_print_process_tree(p, "Searching parents"); - } - - struct pid_stat *pp; - for(pp = p->parent; pp ; pp = pp->parent) { - if(!pp->updated) continue; - - kernel_uint_t absorbed; - absorbed = remove_exited_child_from_parent(&utime, &pp->cutime); - if(unlikely(debug_enabled && absorbed)) - debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " utime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, utime); - - absorbed = remove_exited_child_from_parent(&stime, &pp->cstime); - if(unlikely(debug_enabled && absorbed)) - debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " stime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, stime); - - absorbed = remove_exited_child_from_parent(>ime, &pp->cgtime); - if(unlikely(debug_enabled && absorbed)) - debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " gtime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, gtime); - - absorbed = remove_exited_child_from_parent(&minflt, &pp->cminflt); - if(unlikely(debug_enabled && absorbed)) - debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " minflt (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, minflt); - - absorbed = remove_exited_child_from_parent(&majflt, &pp->cmajflt); - if(unlikely(debug_enabled && absorbed)) - debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " majflt (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, majflt); - } - - if(unlikely(utime + stime + gtime + minflt + majflt > 0)) { - if(unlikely(debug_enabled)) { - if(utime) debug_find_lost_child(p, utime, 3); - if(stime) debug_find_lost_child(p, stime, 4); - if(gtime) debug_find_lost_child(p, gtime, 5); - if(minflt) debug_find_lost_child(p, minflt, 1); - if(majflt) debug_find_lost_child(p, majflt, 2); - } - - p->keep = true; - - debug_log(" > remaining resources - KEEP - for another loop: %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")" - , p->comm - , p->pid - , p->updated?"running":"exited" - , utime - , stime - , gtime - , minflt - , majflt - ); - - for(pp = p->parent; pp ; pp = pp->parent) { - if(pp->updated) break; - pp->keep = true; - - debug_log(" > - KEEP - parent for another loop: %s (%d %s)" - , pp->comm - , pp->pid - , pp->updated?"running":"exited" - ); - } - - p->utime_raw = utime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); - p->stime_raw = stime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); - p->gtime_raw = gtime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); - p->minflt_raw = minflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); - p->majflt_raw = majflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL); - p->cutime_raw = p->cstime_raw = p->cgtime_raw = p->cminflt_raw = p->cmajflt_raw = 0; - - debug_log(" "); - } - else - debug_log(" > totally absorbed - DONE - %s (%d %s)" - , p->comm - , p->pid - , p->updated?"running":"exited" - ); - } -} - -// ---------------------------------------------------------------------------- - -// 1. read all files in /proc -// 2. for each numeric directory: -// i. read /proc/pid/stat -// ii. read /proc/pid/status -// iii. read /proc/pid/io (requires root access) -// iii. read the entries in directory /proc/pid/fd (requires root access) -// for each entry: -// a. find or create a struct file_descriptor -// b. cleanup any old/unused file_descriptors - -// after all these, some pids may be linked to targets, while others may not - -// in case of errors, only 1 every 1000 errors is printed -// to avoid filling up all disk space -// if debug is enabled, all errors are printed - -static inline void mark_pid_as_unread(struct pid_stat *p) { - p->read = false; // mark it as not read, so that collect_data_for_pid() will read it - p->updated = false; - p->merged = false; - p->children_count = 0; - p->parent = NULL; -} - -#if defined(__FreeBSD__) || defined(__APPLE__) -static inline void get_current_time(void) { - struct timeval current_time; - gettimeofday(¤t_time, NULL); - system_current_time_ut = timeval_usec(¤t_time); -} -#endif - -#if defined(__FreeBSD__) -static inline bool collect_data_for_all_pids_per_os(void) { - // Mark all processes as unread before collecting new data - struct pid_stat *p = NULL; - if(all_pids_count) { - for(p = root_of_pids; p ; p = p->next) - mark_pid_as_unread(p); - } - - int i, procnum; - - static size_t procbase_size = 0; - static struct kinfo_proc *procbase = NULL; - - size_t new_procbase_size; - - int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC }; - if (unlikely(sysctl(mib, 3, NULL, &new_procbase_size, NULL, 0))) { - netdata_log_error("sysctl error: Can't get processes data size"); - return false; - } - - // give it some air for processes that may be started - // during this little time. - new_procbase_size += 100 * sizeof(struct kinfo_proc); - - // increase the buffer if needed - if(new_procbase_size > procbase_size) { - procbase_size = new_procbase_size; - procbase = reallocz(procbase, procbase_size); - } - - // sysctl() gets from new_procbase_size the buffer size - // and also returns to it the amount of data filled in - new_procbase_size = procbase_size; - - // get the processes from the system - if (unlikely(sysctl(mib, 3, procbase, &new_procbase_size, NULL, 0))) { - netdata_log_error("sysctl error: Can't get processes data"); - return false; - } - - // based on the amount of data filled in - // calculate the number of processes we got - procnum = new_procbase_size / sizeof(struct kinfo_proc); - - get_current_time(); - - for (i = 0 ; i < procnum ; ++i) { - pid_t pid = procbase[i].ki_pid; - if (pid <= 0) continue; - collect_data_for_pid(pid, &procbase[i]); - } - - return true; -} -#endif // __FreeBSD__ - -#if defined(__APPLE__) -static inline bool collect_data_for_all_pids_per_os(void) { - // Mark all processes as unread before collecting new data - struct pid_stat *p; - if(all_pids_count) { - for(p = root_of_pids; p; p = p->next) - mark_pid_as_unread(p); - } - - static pid_t *pids = NULL; - static int allocatedProcessCount = 0; - - // Get the number of processes - int numberOfProcesses = proc_listpids(PROC_ALL_PIDS, 0, NULL, 0); - if (numberOfProcesses <= 0) { - netdata_log_error("Failed to retrieve the process count"); - return false; - } - - // Allocate or reallocate space to hold all the process IDs if necessary - if (numberOfProcesses > allocatedProcessCount) { - // Allocate additional space to avoid frequent reallocations - allocatedProcessCount = numberOfProcesses + 100; - pids = reallocz(pids, allocatedProcessCount * sizeof(pid_t)); - } - - // this is required, otherwise the PIDs become totally random - memset(pids, 0, allocatedProcessCount * sizeof(pid_t)); - - // get the list of PIDs - numberOfProcesses = proc_listpids(PROC_ALL_PIDS, 0, pids, allocatedProcessCount * sizeof(pid_t)); - if (numberOfProcesses <= 0) { - netdata_log_error("Failed to retrieve the process IDs"); - return false; - } - - get_current_time(); - - // Collect data for each process - for (int i = 0; i < numberOfProcesses; ++i) { - pid_t pid = pids[i]; - if (pid <= 0) continue; - - struct pid_info pi = { 0 }; - - int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid}; - - size_t procSize = sizeof(pi.proc); - if(sysctl(mib, 4, &pi.proc, &procSize, NULL, 0) == -1) { - netdata_log_error("Failed to get proc for PID %d", pid); - continue; - } - if(procSize == 0) // no such process - continue; - - int st = proc_pidinfo(pid, PROC_PIDTASKINFO, 0, &pi.taskinfo, sizeof(pi.taskinfo)); - if (st <= 0) { - netdata_log_error("Failed to get task info for PID %d", pid); - continue; - } - - st = proc_pidinfo(pid, PROC_PIDTBSDINFO, 0, &pi.bsdinfo, sizeof(pi.bsdinfo)); - if (st <= 0) { - netdata_log_error("Failed to get BSD info for PID %d", pid); - continue; - } - - st = proc_pid_rusage(pid, RUSAGE_INFO_V4, (rusage_info_t *)&pi.rusageinfo); - if (st < 0) { - netdata_log_error("Failed to get resource usage info for PID %d", pid); - continue; - } - - collect_data_for_pid(pid, &pi); - } - - return true; -} -#endif // __APPLE__ - -#if !defined(__FreeBSD__) && !defined(__APPLE__) -static int compar_pid(const void *pid1, const void *pid2) { - - struct pid_stat *p1 = all_pids[*((pid_t *)pid1)]; - struct pid_stat *p2 = all_pids[*((pid_t *)pid2)]; - - if(p1->sortlist > p2->sortlist) - return -1; - else - return 1; -} - -static inline bool collect_data_for_all_pids_per_os(void) { - struct pid_stat *p = NULL; - - // clear process state counter - memset(proc_state_count, 0, sizeof proc_state_count); - - if(all_pids_count) { - size_t slc = 0; - for(p = root_of_pids; p ; p = p->next) { - mark_pid_as_unread(p); - all_pids_sortlist[slc++] = p->pid; - } - - if(unlikely(slc != all_pids_count)) { - netdata_log_error("Internal error: I was thinking I had %zu processes in my arrays, but it seems there are %zu.", all_pids_count, slc); - all_pids_count = slc; - } - - if(include_exited_childs) { - // Read parents before childs - // This is needed to prevent a situation where - // a child is found running, but until we read - // its parent, it has exited and its parent - // has accumulated its resources. - - qsort((void *)all_pids_sortlist, (size_t)all_pids_count, sizeof(pid_t), compar_pid); - - // we forward read all running processes - // collect_data_for_pid() is smart enough, - // not to read the same pid twice per iteration - for(slc = 0; slc < all_pids_count; slc++) { - collect_data_for_pid(all_pids_sortlist[slc], NULL); - } - } - } - - static char uptime_filename[FILENAME_MAX + 1] = ""; - if(*uptime_filename == '\0') - snprintfz(uptime_filename, FILENAME_MAX, "%s/proc/uptime", netdata_configured_host_prefix); - - system_uptime_secs = (kernel_uint_t)(uptime_msec(uptime_filename) / MSEC_PER_SEC); - - char dirname[FILENAME_MAX + 1]; - - snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix); - DIR *dir = opendir(dirname); - if(!dir) return false; - - struct dirent *de = NULL; - - while((de = readdir(dir))) { - char *endptr = de->d_name; - - if(unlikely(de->d_type != DT_DIR || de->d_name[0] < '0' || de->d_name[0] > '9')) - continue; - - pid_t pid = (pid_t) strtoul(de->d_name, &endptr, 10); - - // make sure we read a valid number - if(unlikely(endptr == de->d_name || *endptr != '\0')) - continue; - - collect_data_for_pid(pid, NULL); - } - closedir(dir); - - return true; -} -#endif // !__FreeBSD__ && !__APPLE__ - -bool collect_data_for_all_pids(void) { - if(!collect_data_for_all_pids_per_os()) - return false; - - if(!all_pids_count) - return false; - - // we need /proc/stat to normalize the cpu consumption of the exited childs - read_global_time(); - - // build the process tree - link_all_processes_to_their_parents(); - - // normally this is done - // however we may have processes exited while we collected values - // so let's find the exited ones - // we do this by collecting the ownership of process - // if we manage to get the ownership, the process still runs - process_exited_pids(); - - return true; -} diff --git a/src/collectors/apps.plugin/apps_proc_stat.c b/src/collectors/apps.plugin/apps_proc_stat.c deleted file mode 100644 index 8564ddd55..000000000 --- a/src/collectors/apps.plugin/apps_proc_stat.c +++ /dev/null @@ -1,154 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "apps_plugin.h" - -#if defined(__APPLE__) -int read_global_time(void) { - static kernel_uint_t utime_raw = 0, stime_raw = 0, ntime_raw = 0; - static usec_t collected_usec = 0, last_collected_usec = 0; - - host_cpu_load_info_data_t cpuinfo; - mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT; - - if (host_statistics(mach_host_self(), HOST_CPU_LOAD_INFO, (host_info_t)&cpuinfo, &count) != KERN_SUCCESS) { - // Handle error - goto cleanup; - } - - last_collected_usec = collected_usec; - collected_usec = now_monotonic_usec(); - - calls_counter++; - - // Convert ticks to time - // Note: MacOS does not separate nice time from user time in the CPU stats, so you might need to adjust this logic - kernel_uint_t global_ntime = 0; // Assuming you want to keep track of nice time separately - - incremental_rate(global_utime, utime_raw, cpuinfo.cpu_ticks[CPU_STATE_USER] + cpuinfo.cpu_ticks[CPU_STATE_NICE], collected_usec, last_collected_usec); - incremental_rate(global_ntime, ntime_raw, cpuinfo.cpu_ticks[CPU_STATE_NICE], collected_usec, last_collected_usec); - incremental_rate(global_stime, stime_raw, cpuinfo.cpu_ticks[CPU_STATE_SYSTEM], collected_usec, last_collected_usec); - - global_utime += global_ntime; - - if(unlikely(global_iterations_counter == 1)) { - global_utime = 0; - global_stime = 0; - global_gtime = 0; - } - - return 1; - -cleanup: - global_utime = 0; - global_stime = 0; - global_gtime = 0; - return 0; -} -#endif // __APPLE__ - - -#if defined(__FreeBSD__) -int read_global_time(void) { - static kernel_uint_t utime_raw = 0, stime_raw = 0, ntime_raw = 0; - static usec_t collected_usec = 0, last_collected_usec = 0; - long cp_time[CPUSTATES]; - - if (unlikely(CPUSTATES != 5)) { - goto cleanup; - } else { - static int mib[2] = {0, 0}; - - if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) { - goto cleanup; - } - } - - last_collected_usec = collected_usec; - collected_usec = now_monotonic_usec(); - - calls_counter++; - - // temporary - it is added global_ntime; - kernel_uint_t global_ntime = 0; - - incremental_rate(global_utime, utime_raw, cp_time[0] * 100LLU / system_hz, collected_usec, last_collected_usec); - incremental_rate(global_ntime, ntime_raw, cp_time[1] * 100LLU / system_hz, collected_usec, last_collected_usec); - incremental_rate(global_stime, stime_raw, cp_time[2] * 100LLU / system_hz, collected_usec, last_collected_usec); - - global_utime += global_ntime; - - if(unlikely(global_iterations_counter == 1)) { - global_utime = 0; - global_stime = 0; - global_gtime = 0; - } - - return 1; - -cleanup: - global_utime = 0; - global_stime = 0; - global_gtime = 0; - return 0; -} -#endif // __APPLE__ - -#if !defined(__FreeBSD__) && !defined(__APPLE__) -int read_global_time(void) { - static char filename[FILENAME_MAX + 1] = ""; - static procfile *ff = NULL; - static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, gntime_raw = 0, ntime_raw = 0; - static usec_t collected_usec = 0, last_collected_usec = 0; - - if(unlikely(!ff)) { - snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix); - ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) goto cleanup; - } - - ff = procfile_readall(ff); - if(unlikely(!ff)) goto cleanup; - - last_collected_usec = collected_usec; - collected_usec = now_monotonic_usec(); - - calls_counter++; - - // temporary - it is added global_ntime; - kernel_uint_t global_ntime = 0; - - incremental_rate(global_utime, utime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 1)), collected_usec, last_collected_usec); - incremental_rate(global_ntime, ntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 2)), collected_usec, last_collected_usec); - incremental_rate(global_stime, stime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 3)), collected_usec, last_collected_usec); - incremental_rate(global_gtime, gtime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 10)), collected_usec, last_collected_usec); - - global_utime += global_ntime; - - if(enable_guest_charts) { - // temporary - it is added global_ntime; - kernel_uint_t global_gntime = 0; - - // guest nice time, on guest time - incremental_rate(global_gntime, gntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 11)), collected_usec, last_collected_usec); - - global_gtime += global_gntime; - - // remove guest time from user time - global_utime -= (global_utime > global_gtime) ? global_gtime : global_utime; - } - - if(unlikely(global_iterations_counter == 1)) { - global_utime = 0; - global_stime = 0; - global_gtime = 0; - } - - return 1; - -cleanup: - global_utime = 0; - global_stime = 0; - global_gtime = 0; - return 0; -} -#endif // !__FreeBSD__ !__APPLE__ diff --git a/src/collectors/apps.plugin/apps_targets.c b/src/collectors/apps.plugin/apps_targets.c index 7deaa798c..46db128cc 100644 --- a/src/collectors/apps.plugin/apps_targets.c +++ b/src/collectors/apps.plugin/apps_targets.c @@ -2,199 +2,370 @@ #include "apps_plugin.h" -// ---------------------------------------------------------------------------- -// apps_groups.conf -// aggregate all processes in groups, to have a limited number of dimensions +pid_t INIT_PID = OS_INIT_PID; -struct target *get_users_target(uid_t uid) { - struct target *w; - for(w = users_root_target ; w ; w = w->next) - if(w->uid == uid) return w; +static STRING *get_clean_name(STRING *name) { + char buf[string_strlen(name) + 1]; + memcpy(buf, string2str(name), string_strlen(name) + 1); + netdata_fix_chart_name(buf); - w = callocz(sizeof(struct target), 1); - snprintfz(w->compare, MAX_COMPARE_NAME, "%u", uid); - w->comparehash = simple_hash(w->compare); - w->comparelen = strlen(w->compare); + for (char *d = buf; *d ; d++) + if (*d == '.') *d = '_'; - snprintfz(w->id, MAX_NAME, "%u", uid); - w->idhash = simple_hash(w->id); + return string_strdupz(buf); +} - struct user_or_group_id user_id_to_find = { - .id = { - .uid = uid, - } - }; - struct user_or_group_id *user_or_group_id = user_id_find(&user_id_to_find); +static inline STRING *get_numeric_string(uint64_t n) { + char buf[UINT64_MAX_LENGTH]; + print_uint64(buf, n); + return string_strdupz(buf); +} - if(user_or_group_id && user_or_group_id->name && *user_or_group_id->name) - snprintfz(w->name, MAX_NAME, "%s", user_or_group_id->name); +struct target *find_target_by_name(struct target *base, const char *name) { + struct target *t; + for(t = base; t ; t = t->next) { + if (string_strcmp(t->name, name) == 0) + return t; + } - else { - struct passwd *pw = getpwuid(uid); - if(!pw || !pw->pw_name || !*pw->pw_name) - snprintfz(w->name, MAX_NAME, "%u", uid); + return NULL; +} + +// -------------------------------------------------------------------------------------------------------------------- +// Process managers and aggregators + +struct comm_list { + APPS_MATCH match; +}; + +struct managed_list { + size_t used; + size_t size; + struct comm_list *array; +}; + +static struct { + struct managed_list managers; + struct managed_list aggregators; + struct managed_list interpreters; +} tree = { + .managers = { + .array = NULL, + .size = 0, + .used = 0, + }, + .aggregators = { + .array = NULL, + .size = 0, + .used = 0, + } +}; + +static void managed_list_clear(struct managed_list *list) { + for(size_t c = 0; c < list->used ; c++) + pid_match_cleanup(&list->array[c].match); + + freez(list->array); + list->array = NULL; + list->used = 0; + list->size = 0; +} + +static void managed_list_add(struct managed_list *list, const char *s) { + if(list->used >= list->size) { + if(!list->size) + list->size = 16; else - snprintfz(w->name, MAX_NAME, "%s", pw->pw_name); + list->size *= 2; + list->array = reallocz(list->array, sizeof(*list->array) * list->size); + } + + list->array[list->used++].match = pid_match_create(s); +} + +static STRING *KernelAggregator = NULL; + +void apps_managers_and_aggregators_init(void) { + KernelAggregator = string_strdupz("kernel"); + + managed_list_clear(&tree.managers); +#if defined(OS_LINUX) + managed_list_add(&tree.managers, "init"); // linux systems + managed_list_add(&tree.managers, "systemd"); // lxc containers and host systems (this also catches "systemd --user") + managed_list_add(&tree.managers, "containerd-shim-runc-v2"); // docker containers + managed_list_add(&tree.managers, "docker-init"); // docker containers + managed_list_add(&tree.managers, "tini"); // docker containers (https://github.com/krallin/tini) + managed_list_add(&tree.managers, "dumb-init"); // some docker containers use this + managed_list_add(&tree.managers, "openrc-run.sh"); // openrc + managed_list_add(&tree.managers, "crond"); // linux crond + managed_list_add(&tree.managers, "gnome-shell"); // gnome user applications + managed_list_add(&tree.managers, "plasmashell"); // kde user applications + managed_list_add(&tree.managers, "xfwm4"); // xfce4 user applications +#elif defined(OS_WINDOWS) + managed_list_add(&tree.managers, "wininit"); + managed_list_add(&tree.managers, "services"); + managed_list_add(&tree.managers, "explorer"); + managed_list_add(&tree.managers, "System"); +#elif defined(OS_FREEBSD) + managed_list_add(&tree.managers, "init"); +#elif defined(OS_MACOS) + managed_list_add(&tree.managers, "launchd"); +#endif + +#if defined(OS_WINDOWS) + managed_list_add(&tree.managers, "netdata"); +#else + managed_list_add(&tree.managers, "spawn-plugins"); +#endif + + managed_list_clear(&tree.aggregators); +#if defined(OS_LINUX) + managed_list_add(&tree.aggregators, "kthread"); +#elif defined(OS_WINDOWS) +#elif defined(OS_FREEBSD) + managed_list_add(&tree.aggregators, "kernel"); +#elif defined(OS_MACOS) +#endif + + managed_list_clear(&tree.interpreters); + managed_list_add(&tree.interpreters, "python"); + managed_list_add(&tree.interpreters, "python2"); + managed_list_add(&tree.interpreters, "python3"); + managed_list_add(&tree.interpreters, "sh"); + managed_list_add(&tree.interpreters, "bash"); + managed_list_add(&tree.interpreters, "node"); + managed_list_add(&tree.interpreters, "perl"); +} + +bool is_process_a_manager(struct pid_stat *p) { + for(size_t c = 0; c < tree.managers.used ; c++) { + if(pid_match_check(p, &tree.managers.array[c].match)) + return true; + } + + return false; +} + +bool is_process_an_aggregator(struct pid_stat *p) { + for(size_t c = 0; c < tree.aggregators.used ; c++) { + if(pid_match_check(p, &tree.aggregators.array[c].match)) + return true; + } + + return false; +} + +bool is_process_an_interpreter(struct pid_stat *p) { + for(size_t c = 0; c < tree.interpreters.used ; c++) { + if(pid_match_check(p, &tree.interpreters.array[c].match)) + return true; + } + + return false; +} + +// -------------------------------------------------------------------------------------------------------------------- +// Tree + +struct target *get_tree_target(struct pid_stat *p) { +// // skip fast all the children that are more than 3 levels down +// while(p->parent && p->parent->pid != INIT_PID && p->parent->parent && p->parent->parent->parent) +// p = p->parent; + + // keep the children of INIT_PID, and process orchestrators + while(p->parent && p->parent->pid != INIT_PID && p->parent->pid != 0 && !p->parent->is_manager) + p = p->parent; + + // merge all processes into process aggregators + STRING *search_for = NULL; + if((p->ppid == 0 && p->pid != INIT_PID) || (p->parent && p->parent->is_aggregator)) { + search_for = string_dup(KernelAggregator); } + else { +#if (PROCESSES_HAVE_COMM_AND_NAME == 1) + search_for = string_dup(p->name ? p->name : p->comm); +#else + search_for = string_dup(p->comm); +#endif + } + + // find an existing target with the required name + struct target *w; + for(w = apps_groups_root_target; w ; w = w->next) { + if (w->name == search_for) { + string_freez(search_for); + return w; + } + } + + w = callocz(sizeof(struct target), 1); + w->type = TARGET_TYPE_TREE; + w->match.starts_with = w->match.ends_with = false; + w->match.compare = string_dup(search_for); + w->match.pattern = NULL; + w->id = search_for; + w->name = string_dup(search_for); + w->clean_name = get_clean_name(w->name); + + w->next = apps_groups_root_target; + apps_groups_root_target = w; + + return w; +} + +// -------------------------------------------------------------------------------------------------------------------- +// Users + +#if (PROCESSES_HAVE_UID == 1) +struct target *users_root_target = NULL; - strncpyz(w->clean_name, w->name, MAX_NAME); - netdata_fix_chart_name(w->clean_name); +struct target *get_uid_target(uid_t uid) { + struct target *w; + for(w = users_root_target ; w ; w = w->next) + if(w->uid == uid) return w; + w = callocz(sizeof(struct target), 1); + w->type = TARGET_TYPE_UID; w->uid = uid; + w->id = get_numeric_string(uid); + + CACHED_USERNAME cu = cached_username_get_by_uid(uid); + w->name = string_dup(cu.username); + w->clean_name = get_clean_name(w->name); + cached_username_release(cu); w->next = users_root_target; users_root_target = w; - debug_log("added uid %u ('%s') target", w->uid, w->name); + debug_log("added uid %u ('%s') target", w->uid, string2str(w->name)); return w; } +#endif + +// -------------------------------------------------------------------------------------------------------------------- +// Groups + +#if (PROCESSES_HAVE_GID == 1) +struct target *groups_root_target = NULL; -struct target *get_groups_target(gid_t gid) { +struct target *get_gid_target(gid_t gid) { struct target *w; for(w = groups_root_target ; w ; w = w->next) if(w->gid == gid) return w; w = callocz(sizeof(struct target), 1); - snprintfz(w->compare, MAX_COMPARE_NAME, "%u", gid); - w->comparehash = simple_hash(w->compare); - w->comparelen = strlen(w->compare); + w->type = TARGET_TYPE_GID; + w->gid = gid; + w->id = get_numeric_string(gid); - snprintfz(w->id, MAX_NAME, "%u", gid); - w->idhash = simple_hash(w->id); + CACHED_GROUPNAME cg = cached_groupname_get_by_gid(gid); + w->name = string_dup(cg.groupname); + w->clean_name = get_clean_name(w->name); + cached_groupname_release(cg); - struct user_or_group_id group_id_to_find = { - .id = { - .gid = gid, - } - }; - struct user_or_group_id *group_id = group_id_find(&group_id_to_find); + w->next = groups_root_target; + groups_root_target = w; - if(group_id && group_id->name && *group_id->name) { - snprintfz(w->name, MAX_NAME, "%s", group_id->name); - } - else { - struct group *gr = getgrgid(gid); - if(!gr || !gr->gr_name || !*gr->gr_name) - snprintfz(w->name, MAX_NAME, "%u", gid); - else - snprintfz(w->name, MAX_NAME, "%s", gr->gr_name); - } + debug_log("added gid %u ('%s') target", w->gid, w->name); - strncpyz(w->clean_name, w->name, MAX_NAME); - netdata_fix_chart_name(w->clean_name); + return w; +} +#endif - w->gid = gid; +// -------------------------------------------------------------------------------------------------------------------- +// SID - w->next = groups_root_target; - groups_root_target = w; +#if (PROCESSES_HAVE_SID == 1) +struct target *sids_root_target = NULL; - debug_log("added gid %u ('%s') target", w->gid, w->name); +struct target *get_sid_target(STRING *sid_name) { + struct target *w; + for(w = sids_root_target ; w ; w = w->next) + if(w->sid_name == sid_name) return w; + + w = callocz(sizeof(struct target), 1); + w->type = TARGET_TYPE_SID; + w->sid_name = string_dup(sid_name); + w->id = string_dup(sid_name); + w->name = string_dup(sid_name); + w->clean_name = get_clean_name(w->name); + + w->next = sids_root_target; + sids_root_target = w; + + debug_log("added uid %s ('%s') target", string2str(w->sid_name), string2str(w->name)); return w; } +#endif + +// -------------------------------------------------------------------------------------------------------------------- +// apps_groups.conf + +struct target *apps_groups_root_target = NULL; // find or create a new target // there are targets that are just aggregated to other target (the second argument) -static struct target *get_apps_groups_target(const char *id, struct target *target, const char *name) { - int tdebug = 0, thidden = target?target->hidden:0, ends_with = 0; - const char *nid = id; - - // extract the options - while(nid[0] == '-' || nid[0] == '+' || nid[0] == '*') { - if(nid[0] == '-') thidden = 1; - if(nid[0] == '+') tdebug = 1; - if(nid[0] == '*') ends_with = 1; - nid++; - } - uint32_t hash = simple_hash(id); +static struct target *get_apps_groups_target(const char *comm, struct target *target, const char *name) { + APPS_MATCH match = pid_match_create(comm); + STRING *name_lookup = string_strdupz(name); // find if it already exists struct target *w, *last = apps_groups_root_target; for(w = apps_groups_root_target ; w ; w = w->next) { - if(w->idhash == hash && strncmp(nid, w->id, MAX_NAME) == 0) + if(w->id == match.compare) { + pid_match_cleanup(&match); + string_freez(name_lookup); return w; + } last = w; } // find an existing target if(unlikely(!target)) { - while(*name == '-') { - if(*name == '-') thidden = 1; - name++; - } - - for(target = apps_groups_root_target ; target != NULL ; target = target->next) { - if(!target->target && strcmp(name, target->name) == 0) + for(target = apps_groups_root_target ; target ; target = target->next) { + if(!target->target && name_lookup == target->name) break; } - - if(unlikely(debug_enabled)) { - if(unlikely(target)) - debug_log("REUSING TARGET NAME '%s' on ID '%s'", target->name, target->id); - else - debug_log("NEW TARGET NAME '%s' on ID '%s'", name, id); - } } if(target && target->target) - fatal("Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'", id, target->id, target->target->id); + fatal("Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'", + comm, string2str(target->id), string2str(target->target->id)); w = callocz(sizeof(struct target), 1); - strncpyz(w->id, nid, MAX_NAME); - w->idhash = simple_hash(w->id); + w->type = TARGET_TYPE_APP_GROUP; + w->match = match; + w->id = string_dup(w->match.compare); if(unlikely(!target)) - // copy the name - strncpyz(w->name, name, MAX_NAME); + w->name = string_dup(name_lookup); // copy the name else - // copy the id - strncpyz(w->name, nid, MAX_NAME); + w->name = string_dup(w->id); // copy the id // dots are used to distinguish chart type and id in streaming, so we should replace them - strncpyz(w->clean_name, w->name, MAX_NAME); - netdata_fix_chart_name(w->clean_name); - for (char *d = w->clean_name; *d; d++) { - if (*d == '.') - *d = '_'; - } - - strncpyz(w->compare, nid, MAX_COMPARE_NAME); - size_t len = strlen(w->compare); - if(w->compare[len - 1] == '*') { - w->compare[len - 1] = '\0'; - w->starts_with = 1; - } - w->ends_with = ends_with; + w->clean_name = get_clean_name(w->name); - if(w->starts_with && w->ends_with) + if(w->match.starts_with && w->match.ends_with) proc_pid_cmdline_is_needed = true; - w->comparehash = simple_hash(w->compare); - w->comparelen = strlen(w->compare); - - w->hidden = thidden; -#ifdef NETDATA_INTERNAL_CHECKS - w->debug_enabled = tdebug; -#else - if(tdebug) - fprintf(stderr, "apps.plugin has been compiled without debugging\n"); -#endif w->target = target; // append it, to maintain the order in apps_groups.conf if(last) last->next = w; else apps_groups_root_target = w; - debug_log("ADDING TARGET ID '%s', process name '%s' (%s), aggregated on target '%s', options: %s %s" - , w->id - , w->compare, (w->starts_with && w->ends_with)?"substring":((w->starts_with)?"prefix":((w->ends_with)?"suffix":"exact")) - , w->target?w->target->name:w->name - , (w->hidden)?"hidden":"-" - , (w->debug_enabled)?"debug":"-" + debug_log("ADDING TARGET ID '%s', process name '%s' (%s), aggregated on target '%s'" + , string2str(w->id) + , string2str(w->match.compare) + , (w->match.starts_with && w->match.ends_with) ? "substring" : ((w->match.starts_with) ? "prefix" : ((w->match.ends_with) ? "suffix" : "exact")) + , w->target?w->target->name:w->name ); + string_freez(name_lookup); + return w; } @@ -224,20 +395,49 @@ int read_apps_groups_conf(const char *path, const char *file) { if(!words) continue; char *name = procfile_lineword(ff, line, 0); - if(!name || !*name) continue; + if(!name || !*name || *name == '#') continue; + + if(strcmp(name, "managers") == 0) { + if(words == 2 && strcmp(procfile_lineword(ff, line, 1), "clear") == 0) + managed_list_clear(&tree.managers); + + for(word = 1; word < words ;word++) { + char *s = procfile_lineword(ff, line, word); + if (!s || !*s) continue; + if (*s == '#') break; + + managed_list_add(&tree.managers, s); + } + + // done with managers, proceed to next line + continue; + } + + if(strcmp(name, "interpreters") == 0) { + if(words == 2 && strcmp(procfile_lineword(ff, line, 1), "clear") == 0) + managed_list_clear(&tree.interpreters); + + for(word = 1; word < words ;word++) { + char *s = procfile_lineword(ff, line, word); + if (!s || !*s) continue; + if (*s == '#') break; + + managed_list_add(&tree.interpreters, s); + } + + // done with managers, proceed to the next line + continue; + } // find a possibly existing target struct target *w = NULL; // loop through all words, skipping the first one (the name) - for(word = 0; word < words ;word++) { + for(word = 1; word < words ;word++) { char *s = procfile_lineword(ff, line, word); if(!s || !*s) continue; if(*s == '#') break; - // is this the first word? skip it - if(s == name) continue; - // add this target struct target *n = get_apps_groups_target(s, w, name); if(!n) { @@ -252,15 +452,5 @@ int read_apps_groups_conf(const char *path, const char *file) { } procfile_close(ff); - - apps_groups_default_target = get_apps_groups_target("p+!o@w#e$i^r&7*5(-i)l-o_", NULL, "other"); // match nothing - if(!apps_groups_default_target) - fatal("Cannot create default target"); - apps_groups_default_target->is_other = true; - - // allow the user to override group 'other' - if(apps_groups_default_target->target) - apps_groups_default_target = apps_groups_default_target->target; - return 0; } diff --git a/src/collectors/apps.plugin/apps_users_and_groups.c b/src/collectors/apps.plugin/apps_users_and_groups.c deleted file mode 100644 index d28b39e79..000000000 --- a/src/collectors/apps.plugin/apps_users_and_groups.c +++ /dev/null @@ -1,206 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "apps_plugin.h" - -// ---------------------------------------------------------------------------- -// read users and groups from files - -enum user_or_group_id_type { - USER_ID, - GROUP_ID -}; - -struct user_or_group_ids { - enum user_or_group_id_type type; - - avl_tree_type index; - struct user_or_group_id *root; - - char filename[FILENAME_MAX + 1]; -}; - -int user_id_compare(void* a, void* b) { - if(((struct user_or_group_id *)a)->id.uid < ((struct user_or_group_id *)b)->id.uid) - return -1; - - else if(((struct user_or_group_id *)a)->id.uid > ((struct user_or_group_id *)b)->id.uid) - return 1; - - else - return 0; -} - -struct user_or_group_ids all_user_ids = { - .type = USER_ID, - - .index = { - NULL, - user_id_compare - }, - - .root = NULL, - - .filename = "", -}; - -int group_id_compare(void* a, void* b) { - if(((struct user_or_group_id *)a)->id.gid < ((struct user_or_group_id *)b)->id.gid) - return -1; - - else if(((struct user_or_group_id *)a)->id.gid > ((struct user_or_group_id *)b)->id.gid) - return 1; - - else - return 0; -} - -struct user_or_group_ids all_group_ids = { - .type = GROUP_ID, - - .index = { - NULL, - group_id_compare - }, - - .root = NULL, - - .filename = "", -}; - -int file_changed(const struct stat *statbuf __maybe_unused, struct timespec *last_modification_time __maybe_unused) { -#if defined(__APPLE__) - return 0; -#else - if(likely(statbuf->st_mtim.tv_sec == last_modification_time->tv_sec && - statbuf->st_mtim.tv_nsec == last_modification_time->tv_nsec)) return 0; - - last_modification_time->tv_sec = statbuf->st_mtim.tv_sec; - last_modification_time->tv_nsec = statbuf->st_mtim.tv_nsec; - - return 1; -#endif -} - -int read_user_or_group_ids(struct user_or_group_ids *ids, struct timespec *last_modification_time) { - struct stat statbuf; - if(unlikely(stat(ids->filename, &statbuf))) - return 1; - else - if(likely(!file_changed(&statbuf, last_modification_time))) return 0; - - procfile *ff = procfile_open(ids->filename, " :\t", PROCFILE_FLAG_DEFAULT); - if(unlikely(!ff)) return 1; - - ff = procfile_readall(ff); - if(unlikely(!ff)) return 1; - - size_t line, lines = procfile_lines(ff); - - for(line = 0; line < lines ;line++) { - size_t words = procfile_linewords(ff, line); - if(unlikely(words < 3)) continue; - - char *name = procfile_lineword(ff, line, 0); - if(unlikely(!name || !*name)) continue; - - char *id_string = procfile_lineword(ff, line, 2); - if(unlikely(!id_string || !*id_string)) continue; - - - struct user_or_group_id *user_or_group_id = callocz(1, sizeof(struct user_or_group_id)); - - if(ids->type == USER_ID) - user_or_group_id->id.uid = (uid_t) str2ull(id_string, NULL); - else - user_or_group_id->id.gid = (uid_t) str2ull(id_string, NULL); - - user_or_group_id->name = strdupz(name); - user_or_group_id->updated = 1; - - struct user_or_group_id *existing_user_id = NULL; - - if(likely(ids->root)) - existing_user_id = (struct user_or_group_id *)avl_search(&ids->index, (avl_t *) user_or_group_id); - - if(unlikely(existing_user_id)) { - freez(existing_user_id->name); - existing_user_id->name = user_or_group_id->name; - existing_user_id->updated = 1; - freez(user_or_group_id); - } - else { - if(unlikely(avl_insert(&ids->index, (avl_t *) user_or_group_id) != (void *) user_or_group_id)) { - netdata_log_error("INTERNAL ERROR: duplicate indexing of id during realloc"); - } - - user_or_group_id->next = ids->root; - ids->root = user_or_group_id; - } - } - - procfile_close(ff); - - // remove unused ids - struct user_or_group_id *user_or_group_id = ids->root, *prev_user_id = NULL; - - while(user_or_group_id) { - if(unlikely(!user_or_group_id->updated)) { - if(unlikely((struct user_or_group_id *)avl_remove(&ids->index, (avl_t *) user_or_group_id) != user_or_group_id)) - netdata_log_error("INTERNAL ERROR: removal of unused id from index, removed a different id"); - - if(prev_user_id) - prev_user_id->next = user_or_group_id->next; - else - ids->root = user_or_group_id->next; - - freez(user_or_group_id->name); - freez(user_or_group_id); - - if(prev_user_id) - user_or_group_id = prev_user_id->next; - else - user_or_group_id = ids->root; - } - else { - user_or_group_id->updated = 0; - - prev_user_id = user_or_group_id; - user_or_group_id = user_or_group_id->next; - } - } - - return 0; -} - -struct user_or_group_id *user_id_find(struct user_or_group_id *user_id_to_find) { - if(*netdata_configured_host_prefix) { - static struct timespec last_passwd_modification_time; - int ret = read_user_or_group_ids(&all_user_ids, &last_passwd_modification_time); - - if(likely(!ret && all_user_ids.index.root)) - return (struct user_or_group_id *)avl_search(&all_user_ids.index, (avl_t *)user_id_to_find); - } - - return NULL; -} - -struct user_or_group_id *group_id_find(struct user_or_group_id *group_id_to_find) { - if(*netdata_configured_host_prefix) { - static struct timespec last_group_modification_time; - int ret = read_user_or_group_ids(&all_group_ids, &last_group_modification_time); - - if(likely(!ret && all_group_ids.index.root)) - return (struct user_or_group_id *)avl_search(&all_group_ids.index, (avl_t *) &group_id_to_find); - } - - return NULL; -} - -void users_and_groups_init(void) { - snprintfz(all_user_ids.filename, FILENAME_MAX, "%s/etc/passwd", netdata_configured_host_prefix); - debug_log("passwd file: '%s'", all_user_ids.filename); - - snprintfz(all_group_ids.filename, FILENAME_MAX, "%s/etc/group", netdata_configured_host_prefix); - debug_log("group file: '%s'", all_group_ids.filename); -} - diff --git a/src/collectors/apps.plugin/busy_threads.c b/src/collectors/apps.plugin/busy_threads.c new file mode 100644 index 000000000..490c66148 --- /dev/null +++ b/src/collectors/apps.plugin/busy_threads.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +/* + * A very simple pthreads program to spawn N busy threads. + * It is just used for validating apps.plugin CPU utilization + * calculations per operating system. + * + * Compile with: + * + * gcc -O2 -ggdb -o busy_threads busy_threads.c -pthread + * + * Run as: + * + * busy_threads 2 + * + * The above will create 2 busy threads, each using 1 core in user time. + * + */ + +#include +#include +#include +#include +#include + +volatile int keep_running = 1; + +void handle_signal(int signal) { + keep_running = 0; +} + +void *busy_loop(void *arg) { + while (keep_running) { + // Busy loop to keep CPU at 100% + } + return NULL; +} + +int main(int argc, char *argv[]) { + if (argc != 2) { + fprintf(stderr, "Usage: %s \n", argv[0]); + exit(EXIT_FAILURE); + } + + int num_threads = atoi(argv[1]); + if (num_threads <= 0) { + fprintf(stderr, "Number of threads must be a positive integer.\n"); + exit(EXIT_FAILURE); + } + + // Register the signal handler to gracefully exit on Ctrl-C + signal(SIGINT, handle_signal); + + pthread_t *threads = malloc(sizeof(pthread_t) * num_threads); + if (threads == NULL) { + perror("malloc"); + exit(EXIT_FAILURE); + } + + // Create threads + for (int i = 0; i < num_threads; i++) { + if (pthread_create(&threads[i], NULL, busy_loop, NULL) != 0) { + perror("pthread_create"); + free(threads); + exit(EXIT_FAILURE); + } + } + + // Wait for threads to finish (they never will unless interrupted) + for (int i = 0; i < num_threads; i++) { + pthread_join(threads[i], NULL); + } + + free(threads); + return 0; +} diff --git a/src/collectors/cgroups.plugin/README.md b/src/collectors/cgroups.plugin/README.md index efa868bfb..dc58973af 100644 --- a/src/collectors/cgroups.plugin/README.md +++ b/src/collectors/cgroups.plugin/README.md @@ -1,12 +1,3 @@ - - # Monitor Cgroups (cgroups.plugin) You can monitor containers and virtual machines using **cgroups**. diff --git a/src/collectors/cgroups.plugin/cgroup-discovery.c b/src/collectors/cgroups.plugin/cgroup-discovery.c index d880f8a71..5d3027a47 100644 --- a/src/collectors/cgroups.plugin/cgroup-discovery.c +++ b/src/collectors/cgroups.plugin/cgroup-discovery.c @@ -23,7 +23,7 @@ struct cgroup *discovered_cgroup_root = NULL; char cgroup_chart_id_prefix[] = "cgroup_"; char services_chart_id_prefix[] = "systemd_"; -char *cgroups_rename_script = NULL; +const char *cgroups_rename_script = NULL; // Shared memory with information from detected cgroups netdata_ebpf_cgroup_shm_t shm_cgroup_ebpf = {NULL, NULL}; @@ -188,7 +188,7 @@ static inline void discovery_rename_cgroup(struct cgroup *cg) { } char buffer[CGROUP_CHARTID_LINE_MAX + 1]; - char *new_name = fgets(buffer, CGROUP_CHARTID_LINE_MAX, instance->child_stdout_fp); + char *new_name = fgets(buffer, CGROUP_CHARTID_LINE_MAX, spawn_popen_stdout(instance)); int exit_code = spawn_popen_wait(instance); switch (exit_code) { @@ -1101,7 +1101,7 @@ static inline void read_cgroup_network_interfaces(struct cgroup *cg) { char *s; char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; - while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, instance->child_stdout_fp))) { + while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, spawn_popen_stdout(instance)))) { trim(s); if(*s && *s != '\n') { diff --git a/src/collectors/cgroups.plugin/cgroup-internals.h b/src/collectors/cgroups.plugin/cgroup-internals.h index e0d53dc93..cdb5837bd 100644 --- a/src/collectors/cgroups.plugin/cgroup-internals.h +++ b/src/collectors/cgroups.plugin/cgroup-internals.h @@ -273,7 +273,7 @@ struct discovery_thread { extern struct discovery_thread discovery_thread; -extern char *cgroups_rename_script; +extern const char *cgroups_rename_script; extern char cgroup_chart_id_prefix[]; extern char services_chart_id_prefix[]; extern uv_mutex_t cgroup_root_mutex; @@ -313,7 +313,7 @@ extern SIMPLE_PATTERN *enabled_cgroup_renames; extern SIMPLE_PATTERN *systemd_services_cgroups; extern SIMPLE_PATTERN *entrypoint_parent_process_comm; -extern char *cgroups_network_interface_script; +extern const char *cgroups_network_interface_script; extern int cgroups_check; @@ -394,8 +394,8 @@ static inline char *cgroup_chart_type(char *buffer, struct cgroup *cg) { #define RRDFUNCTIONS_CGTOP_HELP "View running containers" #define RRDFUNCTIONS_SYSTEMD_SERVICES_HELP "View systemd services" -int cgroup_function_cgroup_top(BUFFER *wb, const char *function); -int cgroup_function_systemd_top(BUFFER *wb, const char *function); +int cgroup_function_cgroup_top(BUFFER *wb, const char *function, BUFFER *payload, const char *source); +int cgroup_function_systemd_top(BUFFER *wb, const char *function, BUFFER *payload, const char *source); void cgroup_netdev_link_init(void); const DICTIONARY_ITEM *cgroup_netdev_get(struct cgroup *cg); diff --git a/src/collectors/cgroups.plugin/cgroup-name.sh.in b/src/collectors/cgroups.plugin/cgroup-name.sh.in index 0f8b63256..18755b622 100755 --- a/src/collectors/cgroups.plugin/cgroup-name.sh.in +++ b/src/collectors/cgroups.plugin/cgroup-name.sh.in @@ -155,7 +155,7 @@ function docker_like_get_name_api() { info "Running API command: curl \"${host}${path}\"" JSON=$(curl -sS "${host}${path}") fi - if OUTPUT=$(echo "${JSON}" | jq -r '.Config.Env[],"CONT_NAME=\(.Name)","IMAGE_NAME=\(.Config.Image)"') && [ -n "$OUTPUT" ]; then + if OUTPUT=$(echo "${JSON}" | jq -r '.Config.Env[]?,"CONT_NAME=\(.Name)","IMAGE_NAME=\(.Config.Image)"') && [ -n "$OUTPUT" ]; then parse_docker_like_inspect_output "$OUTPUT" fi return 0 @@ -610,7 +610,7 @@ function podman_validate_id() { DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}" PODMAN_HOST="${PODMAN_HOST:=/run/podman/podman.sock}" CGROUP_PATH="${1}" # the path as it is (e.g. '/docker/efcf4c409') -CGROUP="${2}" # the modified path (e.g. 'docker_efcf4c409') +CGROUP="${2//\//_}" # the modified path (e.g. 'docker_efcf4c409') EXIT_SUCCESS=0 EXIT_RETRY=2 EXIT_DISABLE=3 diff --git a/src/collectors/cgroups.plugin/cgroup-network.c b/src/collectors/cgroups.plugin/cgroup-network.c index 4cb5cbabe..d64b31288 100644 --- a/src/collectors/cgroups.plugin/cgroup-network.c +++ b/src/collectors/cgroups.plugin/cgroup-network.c @@ -3,6 +3,8 @@ #include "libnetdata/libnetdata.h" #include "libnetdata/required_dummies.h" +SPAWN_SERVER *spawn_server = NULL; + char env_netdata_host_prefix[FILENAME_MAX + 50] = ""; char env_netdata_log_method[FILENAME_MAX + 50] = ""; char env_netdata_log_format[FILENAME_MAX + 50] = ""; @@ -42,7 +44,7 @@ unsigned int read_iface_iflink(const char *prefix, const char *iface) { unsigned long long iflink = 0; int ret = read_single_number_file(filename, &iflink); - if(ret) collector_error("Cannot read '%s'.", filename); + if(ret) nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot read '%s'.", filename); return (unsigned int)iflink; } @@ -55,7 +57,7 @@ unsigned int read_iface_ifindex(const char *prefix, const char *iface) { unsigned long long ifindex = 0; int ret = read_single_number_file(filename, &ifindex); - if(ret) collector_error("Cannot read '%s'.", filename); + if(ret) nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot read '%s'.", filename); return (unsigned int)ifindex; } @@ -68,19 +70,15 @@ struct iface *read_proc_net_dev(const char *scope __maybe_unused, const char *pr snprintfz(filename, FILENAME_MAX, "%s%s", prefix, (*prefix)?"/proc/1/net/dev":"/proc/net/dev"); -#ifdef NETDATA_INTERNAL_CHECKS - collector_info("parsing '%s'", filename); -#endif - ff = procfile_open(filename, " \t,:|", PROCFILE_FLAG_DEFAULT); if(unlikely(!ff)) { - collector_error("Cannot open file '%s'", filename); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot open file '%s'", filename); return NULL; } ff = procfile_readall(ff); if(unlikely(!ff)) { - collector_error("Cannot read file '%s'", filename); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot read file '%s'", filename); return NULL; } @@ -97,9 +95,7 @@ struct iface *read_proc_net_dev(const char *scope __maybe_unused, const char *pr t->next = root; root = t; -#ifdef NETDATA_INTERNAL_CHECKS - collector_info("added %s interface '%s', ifindex %u, iflink %u", scope, t->device, t->ifindex, t->iflink); -#endif + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "added %s interface '%s', ifindex %u, iflink %u", scope, t->device, t->ifindex, t->iflink); } procfile_close(ff); @@ -143,13 +139,18 @@ static void continue_as_child(void) { int status; pid_t ret; - if (child < 0) - collector_error("fork() failed"); + if (child < 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "fork() failed"); + exit(1); + } - /* Only the child returns */ - if (child == 0) + if (child == 0) { + // the child returns + gettid_uncached(); return; + } + // here is the parent for (;;) { ret = waitpid(child, &status, WUNTRACED); if ((ret == child) && (WIFSTOPPED(status))) { @@ -159,9 +160,36 @@ static void continue_as_child(void) { } else { break; } + tinysleep(); } /* Return the child's exit code if possible */ + +#ifdef __SANITIZE_ADDRESS__ + /* + * With sanitization, exiting leads to an infinite loop (100% cpu) here: + * + * #0 0x00007ffff690ea8b in sched_yield () from /usr/lib/libc.so.6 + * #1 0x00007ffff792c4a6 in __sanitizer::StopTheWorld (callback=, argument=) at /usr/src/debug/gcc/gcc/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp:457 + * #2 0x00007ffff793f6f9 in __lsan::LockStuffAndStopTheWorldCallback (info=, size=, data=0x7fffffffde20) at /usr/src/debug/gcc/gcc/libsanitizer/lsan/lsan_common_linux.cpp:127 + * #3 0x00007ffff6977909 in dl_iterate_phdr () from /usr/lib/libc.so.6 + * #4 0x00007ffff793fb24 in __lsan::LockStuffAndStopTheWorld (callback=callback@entry=0x7ffff793d9d0 <__lsan::CheckForLeaksCallback(__sanitizer::SuspendedThreadsList const&, void*)>, argument=argument@entry=0x7fffffffdea0) + * at /usr/src/debug/gcc/gcc/libsanitizer/lsan/lsan_common_linux.cpp:142 + * #5 0x00007ffff793c965 in __lsan::CheckForLeaks () at /usr/src/debug/gcc/gcc/libsanitizer/lsan/lsan_common.cpp:778 + * #6 0x00007ffff793cc68 in __lsan::DoLeakCheck () at /usr/src/debug/gcc/gcc/libsanitizer/lsan/lsan_common.cpp:821 + * #7 0x00007ffff684e340 in __cxa_finalize () from /usr/lib/libc.so.6 + * #8 0x00007ffff7838c58 in __do_global_dtors_aux () from /usr/lib/libasan.so.8 + * #9 0x00007fffffffdfe0 in ?? () + * + * Probably is something related to switching name spaces. + * So, we kill -9 self. + * + */ + + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "sanitizers detected, killing myself to avoid lockup"); + kill(getpid(), SIGKILL); +#endif + if (WIFEXITED(status)) { exit(WEXITSTATUS(status)); } else if (WIFSIGNALED(status)) { @@ -179,7 +207,7 @@ int proc_pid_fd(const char *prefix, const char *ns, pid_t pid) { int fd = open(filename, O_RDONLY | O_CLOEXEC); if(fd == -1) - collector_error("Cannot open proc_pid_fd() file '%s'", filename); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot open proc_pid_fd() file '%s'", filename); return fd; } @@ -203,10 +231,8 @@ static struct ns { { .nstype = 0, .fd = -1, .status = -1, .name = NULL, .path = NULL } }; -int switch_namespace(const char *prefix, pid_t pid) { - +static int switch_namespace(const char *prefix, pid_t pid) { #ifdef HAVE_SETNS - int i; for(i = 0; all_ns[i].name ; i++) all_ns[i].fd = proc_pid_fd(prefix, all_ns[i].path, pid); @@ -229,7 +255,9 @@ int switch_namespace(const char *prefix, pid_t pid) { if(setns(all_ns[i].fd, all_ns[i].nstype) == -1) { if(pass == 1) { all_ns[i].status = 0; - collector_error("Cannot switch to %s namespace of pid %d", all_ns[i].name, (int) pid); + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot switch to %s namespace of pid %d", + all_ns[i].name, (int) pid); } } else @@ -238,21 +266,22 @@ int switch_namespace(const char *prefix, pid_t pid) { } } + gettid_uncached(); setgroups(0, NULL); if(root_fd != -1) { if(fchdir(root_fd) < 0) - collector_error("Cannot fchdir() to pid %d root directory", (int)pid); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot fchdir() to pid %d root directory", (int)pid); if(chroot(".") < 0) - collector_error("Cannot chroot() to pid %d root directory", (int)pid); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot chroot() to pid %d root directory", (int)pid); close(root_fd); } if(cwd_fd != -1) { if(fchdir(cwd_fd) < 0) - collector_error("Cannot fchdir() to pid %d current working directory", (int)pid); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot fchdir() to pid %d current working directory", (int)pid); close(cwd_fd); } @@ -276,9 +305,8 @@ int switch_namespace(const char *prefix, pid_t pid) { #else errno = ENOSYS; - collector_error("setns() is missing on this system."); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "setns() is missing on this system."); return 1; - #endif } @@ -286,13 +314,13 @@ pid_t read_pid_from_cgroup_file(const char *filename) { int fd = open(filename, procfile_open_flags); if(fd == -1) { if (errno != ENOENT) - collector_error("Cannot open pid_from_cgroup() file '%s'.", filename); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot open pid_from_cgroup() file '%s'.", filename); return 0; } FILE *fp = fdopen(fd, "r"); if(!fp) { - collector_error("Cannot upgrade fd to fp for file '%s'.", filename); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot upgrade fd to fp for file '%s'.", filename); return 0; } @@ -307,9 +335,8 @@ pid_t read_pid_from_cgroup_file(const char *filename) { fclose(fp); -#ifdef NETDATA_INTERNAL_CHECKS - if(pid > 0) collector_info("found pid %d on file '%s'", pid, filename); -#endif + if(pid > 0) + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "found pid %d on file '%s'", pid, filename); return pid; } @@ -331,7 +358,7 @@ pid_t read_pid_from_cgroup(const char *path) { DIR *dir = opendir(path); if (!dir) { - collector_error("cannot read directory '%s'", path); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "cannot read directory '%s'", path); return 0; } @@ -368,9 +395,8 @@ struct found_device { } *detected_devices = NULL; void add_device(const char *host, const char *guest) { -#ifdef NETDATA_INTERNAL_CHECKS - collector_info("adding device with host '%s', guest '%s'", host, guest); -#endif + errno_clear(); + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "adding device with host '%s', guest '%s'", host, guest); uint32_t hash = simple_hash(host); @@ -422,36 +448,34 @@ void detect_veth_interfaces(pid_t pid) { host = read_proc_net_dev("host", netdata_configured_host_prefix); if(!host) { errno_clear(); - collector_error("cannot read host interface list."); + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "no host interface list."); goto cleanup; } if(!eligible_ifaces(host)) { errno_clear(); - collector_info("there are no double-linked host interfaces available."); + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "no double-linked host interfaces available."); goto cleanup; } if(switch_namespace(netdata_configured_host_prefix, pid)) { errno_clear(); - collector_error("cannot switch to the namespace of pid %u", (unsigned int) pid); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "cannot switch to the namespace of pid %u", (unsigned int) pid); goto cleanup; } -#ifdef NETDATA_INTERNAL_CHECKS - collector_info("switched to namespaces of pid %d", pid); -#endif + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "switched to namespaces of pid %d", pid); cgroup = read_proc_net_dev("cgroup", NULL); if(!cgroup) { errno_clear(); - collector_error("cannot read cgroup interface list."); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "cannot read cgroup interface list."); goto cleanup; } if(!eligible_ifaces(cgroup)) { errno_clear(); - collector_error("there are not double-linked cgroup interfaces available."); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "there are not double-linked cgroup interfaces available."); goto cleanup; } @@ -478,66 +502,113 @@ void detect_veth_interfaces(pid_t pid) { if(iface_is_eligible(h)) { for (c = cgroup; c; c = c->next) { if(iface_is_eligible(c) && h->ifindex == c->iflink && h->iflink == c->ifindex) { - add_device(h->device, c->device); + printf("%s %s\n", h->device, c->device); + // add_device(h->device, c->device); } } } } + printf("EXIT DONE\n"); + fflush(stdout); + cleanup: free_host_ifaces(cgroup); free_host_ifaces(host); } +struct send_to_spawned_process { + pid_t pid; + char host_prefix[FILENAME_MAX]; +}; + + +static int spawn_callback(SPAWN_REQUEST *request) { + const struct send_to_spawned_process *d = request->data; + detect_veth_interfaces(d->pid); + return 0; +} + +#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048 +static void read_from_spawned(SPAWN_INSTANCE *si, const char *name __maybe_unused) { + char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; + char *s; + FILE *fp = fdopen(spawn_server_instance_read_fd(si), "r"); + while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) { + trim(s); + + if(*s && *s != '\n') { + char *t = s; + while(*t && *t != ' ') t++; + if(*t == ' ') { + *t = '\0'; + t++; + } + + if(strcmp(s, "EXIT") == 0) + break; + + if(!*s || !*t) continue; + add_device(s, t); + } + } + fclose(fp); + spawn_server_instance_read_fd_unset(si); + spawn_server_exec_kill(spawn_server, si); +} + +void detect_veth_interfaces_spawn(pid_t pid) { + struct send_to_spawned_process d = { + .pid = pid, + }; + strncpyz(d.host_prefix, netdata_configured_host_prefix, sizeof(d.host_prefix) - 1); + SPAWN_INSTANCE *si = spawn_server_exec(spawn_server, STDERR_FILENO, 0, NULL, &d, sizeof(d), SPAWN_INSTANCE_TYPE_CALLBACK); + if(si) + read_from_spawned(si, "switch namespace callback"); + else + nd_log(NDLS_COLLECTORS, NDLP_ERR, "cgroup-network cannot spawn switch namespace callback"); +} + // ---------------------------------------------------------------------------- // call the external helper #define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048 void call_the_helper(pid_t pid, const char *cgroup) { - if(setresuid(0, 0, 0) == -1) - collector_error("setresuid(0, 0, 0) failed."); - char command[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; if(cgroup) snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --cgroup '%s'", cgroup); else snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --pid %d", pid); - collector_info("running: %s", command); + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "running: %s", command); - POPEN_INSTANCE *pi; + SPAWN_INSTANCE *si; - if(cgroup) - pi = spawn_popen_run_variadic(PLUGINS_DIR "/cgroup-network-helper.sh", "--cgroup", cgroup, NULL); + if(cgroup) { + const char *argv[] = { + PLUGINS_DIR "/cgroup-network-helper.sh", + "--cgroup", + cgroup, + NULL, + }; + si = spawn_server_exec(spawn_server, nd_log_collectors_fd(), 0, argv, NULL, 0, SPAWN_INSTANCE_TYPE_EXEC); + } else { char buffer[100]; snprintfz(buffer, sizeof(buffer) - 1, "%d", pid); - pi = spawn_popen_run_variadic(PLUGINS_DIR "/cgroup-network-helper.sh", "--pid", buffer, NULL); + const char *argv[] = { + PLUGINS_DIR "/cgroup-network-helper.sh", + "--pid", + buffer, + NULL, + }; + si = spawn_server_exec(spawn_server, nd_log_collectors_fd(), 0, argv, NULL, 0, SPAWN_INSTANCE_TYPE_EXEC); } - if(pi) { - char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; - char *s; - while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, pi->child_stdout_fp))) { - trim(s); - - if(*s && *s != '\n') { - char *t = s; - while(*t && *t != ' ') t++; - if(*t == ' ') { - *t = '\0'; - t++; - } - - if(!*s || !*t) continue; - add_device(s, t); - } - } - - spawn_popen_kill(pi); - } + if(si) + read_from_spawned(si, command); else - collector_error("cannot execute cgroup-network helper script: %s", command); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "cannot execute cgroup-network helper script: %s", command); } int is_valid_path_symbol(char c) { @@ -568,33 +639,33 @@ int verify_path(const char *path) { const char *s = path; while((c = *s++)) { if(!( isalnum(c) || is_valid_path_symbol(c) )) { - collector_error("invalid character in path '%s'", path); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "invalid character in path '%s'", path); return -1; } } if(strstr(path, "\\") && !strstr(path, "\\x")) { - collector_error("invalid escape sequence in path '%s'", path); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "invalid escape sequence in path '%s'", path); return 1; } if(strstr(path, "/../")) { - collector_error("invalid parent path sequence detected in '%s'", path); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "invalid parent path sequence detected in '%s'", path); return 1; } if(path[0] != '/') { - collector_error("only absolute path names are supported - invalid path '%s'", path); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "only absolute path names are supported - invalid path '%s'", path); return -1; } if (stat(path, &sb) == -1) { - collector_error("cannot stat() path '%s'", path); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "cannot stat() path '%s'", path); return -1; } if((sb.st_mode & S_IFMT) != S_IFDIR) { - collector_error("path '%s' is not a directory", path); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "path '%s' is not a directory", path); return -1; } @@ -616,10 +687,10 @@ char *fix_path_variable(void) { char *s = strsep(&ptr, ":"); if(s && *s) { if(verify_path(s) == -1) { - collector_error("the PATH variable includes an invalid path '%s' - removed it.", s); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "the PATH variable includes an invalid path '%s' - removed it.", s); } else { - collector_info("the PATH variable includes a valid path '%s'.", s); + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "the PATH variable includes a valid path '%s'.", s); if(added) strcat(safe_path, ":"); strcat(safe_path, s); added++; @@ -627,8 +698,8 @@ char *fix_path_variable(void) { } } - collector_info("unsafe PATH: '%s'.", path); - collector_info(" safe PATH: '%s'.", safe_path); + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "unsafe PATH: '%s'.", path); + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, " safe PATH: '%s'.", safe_path); freez(p); return safe_path; @@ -643,11 +714,14 @@ void usage(void) { exit(1); } -int main(int argc, char **argv) { +int main(int argc, const char **argv) { pid_t pid = 0; - clocks_init(); + if (setresuid(0, 0, 0) == -1) + collector_error("setresuid(0, 0, 0) failed."); + nd_log_initialize_for_external_plugins("cgroup-network"); + spawn_server = spawn_server_create(SPAWN_SERVER_OPTION_EXEC | SPAWN_SERVER_OPTION_CALLBACK, NULL, spawn_callback, argc, argv); // since cgroup-network runs as root, prevent it from opening symbolic links procfile_open_flags = O_RDONLY|O_NOFOLLOW; @@ -700,16 +774,16 @@ int main(int argc, char **argv) { if(pid <= 0) { errno_clear(); - collector_error("Invalid pid %d given", (int) pid); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Invalid pid %d given", (int) pid); return 2; } if(helper) call_the_helper(pid, NULL); } else if(!strcmp(argv[arg], "--cgroup")) { - char *cgroup = argv[arg+1]; + const char *cgroup = argv[arg+1]; if(verify_path(cgroup) == -1) { - collector_error("cgroup '%s' does not exist or is not valid.", cgroup); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "cgroup '%s' does not exist or is not valid.", cgroup); return 1; } @@ -718,16 +792,19 @@ int main(int argc, char **argv) { if(pid <= 0 && !detected_devices) { errno_clear(); - collector_error("Cannot find a cgroup PID from cgroup '%s'", cgroup); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot find a cgroup PID from cgroup '%s'", cgroup); } } else usage(); if(pid > 0) - detect_veth_interfaces(pid); + detect_veth_interfaces_spawn(pid); int found = send_devices(); + + spawn_server_destroy(spawn_server); + if(found <= 0) return 1; return 0; } diff --git a/src/collectors/cgroups.plugin/cgroup-top.c b/src/collectors/cgroups.plugin/cgroup-top.c index aa413dad1..7b98502b5 100644 --- a/src/collectors/cgroups.plugin/cgroup-top.c +++ b/src/collectors/cgroups.plugin/cgroup-top.c @@ -98,7 +98,7 @@ void cgroup_netdev_get_bandwidth(struct cgroup *cg, NETDATA_DOUBLE *received, NE *sent = t->sent[slot]; } -int cgroup_function_cgroup_top(BUFFER *wb, const char *function __maybe_unused) { +int cgroup_function_cgroup_top(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { buffer_flush(wb); wb->content_type = CT_APPLICATION_JSON; buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); @@ -341,7 +341,7 @@ int cgroup_function_cgroup_top(BUFFER *wb, const char *function __maybe_unused) return HTTP_RESP_OK; } -int cgroup_function_systemd_top(BUFFER *wb, const char *function __maybe_unused) { +int cgroup_function_systemd_top(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { buffer_flush(wb); wb->content_type = CT_APPLICATION_JSON; buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); diff --git a/src/collectors/cgroups.plugin/sys_fs_cgroup.c b/src/collectors/cgroups.plugin/sys_fs_cgroup.c index 5fdefa863..d41575fa6 100644 --- a/src/collectors/cgroups.plugin/sys_fs_cgroup.c +++ b/src/collectors/cgroups.plugin/sys_fs_cgroup.c @@ -39,7 +39,7 @@ SIMPLE_PATTERN *search_cgroup_paths = NULL; SIMPLE_PATTERN *enabled_cgroup_renames = NULL; SIMPLE_PATTERN *systemd_services_cgroups = NULL; SIMPLE_PATTERN *entrypoint_parent_process_comm = NULL; -char *cgroups_network_interface_script = NULL; +const char *cgroups_network_interface_script = NULL; int cgroups_check = 0; uint32_t Read_hash = 0; uint32_t Write_hash = 0; @@ -82,7 +82,7 @@ static enum cgroups_systemd_setting cgroups_detect_systemd(const char *exec) return retval; struct pollfd pfd; - pfd.fd = spawn_server_instance_read_fd(pi->si); + pfd.fd = spawn_popen_read_fd(pi); pfd.events = POLLIN; int timeout = 3000; // milliseconds @@ -93,7 +93,7 @@ static enum cgroups_systemd_setting cgroups_detect_systemd(const char *exec) } else if (ret == 0) { collector_info("Cannot get the output of \"%s\" within timeout (%d ms)", exec, timeout); } else { - while (fgets(buf, MAXSIZE_PROC_CMDLINE, pi->child_stdout_fp) != NULL) { + while (fgets(buf, MAXSIZE_PROC_CMDLINE, spawn_popen_stdout(pi)) != NULL) { if ((begin = strstr(buf, SYSTEMD_HIERARCHY_STRING))) { end = begin = begin + strlen(SYSTEMD_HIERARCHY_STRING); if (!*begin) @@ -153,18 +153,18 @@ static enum cgroups_type cgroups_try_detect_version() int cgroups2_available = 0; // 1. check if cgroups2 available on system at all - POPEN_INSTANCE *instance = spawn_popen_run("grep cgroup /proc/filesystems"); - if(!instance) { + POPEN_INSTANCE *pi = spawn_popen_run("grep cgroup /proc/filesystems"); + if(!pi) { collector_error("cannot run 'grep cgroup /proc/filesystems'"); return CGROUPS_AUTODETECT_FAIL; } - while (fgets(buf, MAXSIZE_PROC_CMDLINE, instance->child_stdout_fp) != NULL) { + while (fgets(buf, MAXSIZE_PROC_CMDLINE, spawn_popen_stdout(pi)) != NULL) { if (strstr(buf, "cgroup2")) { cgroups2_available = 1; break; } } - if(spawn_popen_wait(instance) != 0) + if(spawn_popen_wait(pi) != 0) return CGROUPS_AUTODETECT_FAIL; if(!cgroups2_available) @@ -229,13 +229,17 @@ void read_cgroup_plugin_configuration() { throttled_time_hash = simple_hash("throttled_time"); throttled_usec_hash = simple_hash("throttled_usec"); - cgroup_update_every = (int)config_get_number("plugin:cgroups", "update every", localhost->rrd_update_every); - if(cgroup_update_every < localhost->rrd_update_every) + cgroup_update_every = (int)config_get_duration_seconds("plugin:cgroups", "update every", localhost->rrd_update_every); + if(cgroup_update_every < localhost->rrd_update_every) { cgroup_update_every = localhost->rrd_update_every; + config_set_duration_seconds("plugin:cgroups", "update every", localhost->rrd_update_every); + } - cgroup_check_for_new_every = (int)config_get_number("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every); - if(cgroup_check_for_new_every < cgroup_update_every) + cgroup_check_for_new_every = (int)config_get_duration_seconds("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every); + if(cgroup_check_for_new_every < cgroup_update_every) { cgroup_check_for_new_every = cgroup_update_every; + config_set_duration_seconds("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every); + } cgroup_use_unified_cgroups = config_get_boolean_ondemand("plugin:cgroups", "use unified cgroups", CONFIG_BOOLEAN_AUTO); if (cgroup_use_unified_cgroups == CONFIG_BOOLEAN_AUTO) @@ -1401,24 +1405,25 @@ void *cgroups_main(void *ptr) { cgroup_netdev_link_init(); rrd_function_add_inline(localhost, NULL, "containers-vms", 10, - RRDFUNCTIONS_PRIORITY_DEFAULT / 2, RRDFUNCTIONS_CGTOP_HELP, + RRDFUNCTIONS_PRIORITY_DEFAULT / 2, RRDFUNCTIONS_VERSION_DEFAULT, + RRDFUNCTIONS_CGTOP_HELP, "top", HTTP_ACCESS_ANONYMOUS_DATA, cgroup_function_cgroup_top); rrd_function_add_inline(localhost, NULL, "systemd-services", 10, - RRDFUNCTIONS_PRIORITY_DEFAULT / 3, RRDFUNCTIONS_SYSTEMD_SERVICES_HELP, + RRDFUNCTIONS_PRIORITY_DEFAULT / 3, RRDFUNCTIONS_VERSION_DEFAULT, + RRDFUNCTIONS_SYSTEMD_SERVICES_HELP, "top", HTTP_ACCESS_ANONYMOUS_DATA, cgroup_function_systemd_top); heartbeat_t hb; - heartbeat_init(&hb); - usec_t step = cgroup_update_every * USEC_PER_SEC; + heartbeat_init(&hb, cgroup_update_every * USEC_PER_SEC); usec_t find_every = cgroup_check_for_new_every * USEC_PER_SEC, find_dt = 0; while(service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - usec_t hb_dt = heartbeat_next(&hb, step); + usec_t hb_dt = heartbeat_next(&hb); if (unlikely(!service_running(SERVICE_COLLECTORS))) break; diff --git a/src/collectors/charts.d.plugin/README.md b/src/collectors/charts.d.plugin/README.md index 3558985db..309f60e63 100644 --- a/src/collectors/charts.d.plugin/README.md +++ b/src/collectors/charts.d.plugin/README.md @@ -7,8 +7,7 @@ 3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon) 4. Supports any number of data collection **modules** -To better understand the guidelines and the API behind our External plugins, please have a look at the [Introduction to External plugins](/src/collectors/plugins.d/README.md) prior to reading this page. - +To better understand the guidelines and the API behind our External plugins, please have a look at the [Introduction to External plugins](/src/plugins.d/README.md) prior to reading this page. `charts.d.plugin` has been designed so that the actual script that will do data collection will be permanently in memory, collecting data with as little overheads as possible @@ -21,11 +20,11 @@ By default, `charts.d.plugin` is not included as part of the install when using ## Configuration -`charts.d.plugin` itself can be [configured](/docs/netdata-agent/configuration/README.md#edit-netdataconf)using the configuration file `/etc/netdata/charts.d.conf`. This file is also a BASH script. +`charts.d.plugin` itself can be [configured](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config)using the configuration file `/etc/netdata/charts.d.conf`. This file is also a BASH script. In this file, you can place statements like this: -```conf +```text enable_all_charts="yes" X="yes" Y="no" @@ -121,7 +120,7 @@ Using the above, if the command `mysql` is not available in the system, the `mys `fixid()` will get a string and return a properly formatted id for a chart or dimension. This is an expensive function that should not be used in `X_update()`. -You can keep the generated id in a BASH associative array to have the values availables in `X_update()`, like this: +You can keep the generated id in a BASH associative array to have the values available in `X_update()`, like this: ```sh declare -A X_ids=() diff --git a/src/collectors/charts.d.plugin/apcupsd/README.md b/src/collectors/charts.d.plugin/apcupsd/README.md deleted file mode 120000 index fc6681fe6..000000000 --- a/src/collectors/charts.d.plugin/apcupsd/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/apc_ups.md \ No newline at end of file diff --git a/src/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh b/src/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh deleted file mode 100644 index 58132024b..000000000 --- a/src/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh +++ /dev/null @@ -1,306 +0,0 @@ -# shellcheck shell=bash -# no need for shebang - this file is loaded from charts.d.plugin -# SPDX-License-Identifier: GPL-3.0-or-later - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2016 Costa Tsaousis -# - -apcupsd_ip= -apcupsd_port= - -declare -A apcupsd_sources=( - ["local"]="127.0.0.1:3551" -) - -# how frequently to collect UPS data -apcupsd_update_every=10 - -apcupsd_timeout=3 - -# the priority of apcupsd related to other charts -apcupsd_priority=90000 - -apcupsd_get() { - run -t $apcupsd_timeout apcaccess status "$1" -} - -is_ups_alive() { - local status - status="$(apcupsd_get "$1" | sed -e 's/STATUS.*: //' -e 't' -e 'd')" - case "$status" in - "" | "COMMLOST" | "SHUTTING DOWN") return 1 ;; - *) return 0 ;; - esac -} - -apcupsd_check() { - - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - require_cmd apcaccess || return 1 - - # backwards compatibility - if [ "${apcupsd_ip}:${apcupsd_port}" != ":" ]; then - apcupsd_sources["local"]="${apcupsd_ip}:${apcupsd_port}" - fi - - local host working=0 failed=0 - for host in "${!apcupsd_sources[@]}"; do - apcupsd_get "${apcupsd_sources[${host}]}" >/dev/null - # shellcheck disable=2181 - if [ $? -ne 0 ]; then - error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}." - failed=$((failed + 1)) - else - if ! is_ups_alive ${apcupsd_sources[${host}]}; then - error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online." - failed=$((failed + 1)) - else - working=$((working + 1)) - fi - fi - done - - if [ ${working} -eq 0 ]; then - error "No APC UPSes found available." - return 1 - fi - - return 0 -} - -apcupsd_create() { - local host - for host in "${!apcupsd_sources[@]}"; do - # create the charts - cat < -# GPL v3+ - -# add all your APC UPSes in this array - uncomment it too -#declare -A apcupsd_sources=( -# ["local"]="127.0.0.1:3551" -#) - -# how long to wait for apcupsd to respond -#apcupsd_timeout=3 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#apcupsd_update_every=10 - -# the charts priority on the dashboard -#apcupsd_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#apcupsd_retries=10 diff --git a/src/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md b/src/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md deleted file mode 100644 index fdf1ccc9e..000000000 --- a/src/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md +++ /dev/null @@ -1,237 +0,0 @@ - - -# APC UPS - - - - - -Plugin: charts.d.plugin -Module: apcupsd - - - -## Overview - -Monitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics. - -The collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics. - -This collector is supported on all platforms. - -This collector only supports collecting metrics from a single instance of this integration. - - -### Default Behavior - -#### Auto-Detection - -By default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility. - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per ups - -Metrics related to UPS. Each UPS provides its own set of the following metrics. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| apcupsd.charge | charge | percentage | -| apcupsd.battery.voltage | voltage, nominal | Volts | -| apcupsd.input.voltage | voltage, min, max | Volts | -| apcupsd.output.voltage | absolute, nominal | Volts | -| apcupsd.input.frequency | frequency | Hz | -| apcupsd.load | load | percentage | -| apcupsd.load_usage | load | Watts | -| apcupsd.temperature | temp | Celsius | -| apcupsd.time | time | Minutes | -| apcupsd.online | online | boolean | -| apcupsd.selftest | OK, NO, BT, NG | status | -| apcupsd.status | ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, CAL, TRIM, BOOST, SHUTTING_DOWN | status | - - - -## Alerts - - -The following alerts are available: - -| Alert name | On metric | Description | -|:------------|:----------|:------------| -| [ apcupsd_ups_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.charge | average UPS charge over the last minute | -| [ apcupsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | average UPS load over the last 10 minutes | -| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | number of seconds since the last successful data collection | -| [ apcupsd_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.selftest | self-test failed due to insufficient battery capacity or due to overload. | -| [ apcupsd_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has switched to battery power because the input power has failed | -| [ apcupsd_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS is overloaded and cannot supply enough power to the load | -| [ apcupsd_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery is low and needs to be recharged | -| [ apcupsd_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery has reached the end of its lifespan and needs to be replaced | -| [ apcupsd_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has no battery | -| [ apcupsd_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS communication link is lost | - - -## Setup - -### Prerequisites - -#### Install charts.d plugin - -If [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. - - -#### Required software - -Make sure the `apcaccess` and `apcupsd` are installed and running. - - -### Configuration - -#### File - -The configuration file name for this integration is `charts.d/apcupsd.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config charts.d/apcupsd.conf -``` -#### Options - -The config file is sourced by the charts.d plugin. It's a standard bash file. - -The following collapsed table contains all the options that can be configured for the apcupsd collector. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| apcupsd_sources | This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it. | 127.0.0.1:3551 | no | -| apcupsd_timeout | How long to wait for apcupsd to respond. | 3 | no | -| apcupsd_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no | -| apcupsd_priority | The charts priority on the dashboard. | 90000 | no | -| apcupsd_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no | - -
- -#### Examples - -##### Multiple apcupsd sources - -Specify a multiple apcupsd sources along with a custom update interval - -```yaml -# add all your APC UPSes in this array - uncomment it too -declare -A apcupsd_sources=( - ["local"]="127.0.0.1:3551", - ["remote"]="1.2.3.4:3551" -) - -# how long to wait for apcupsd to respond -#apcupsd_timeout=3 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -apcupsd_update_every=5 - -# the charts priority on the dashboard -#apcupsd_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#apcupsd_retries=10 - -``` - - -## Troubleshooting - -### Debug Mode - - -To troubleshoot issues with the `apcupsd` collector, run the `charts.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `charts.d.plugin` to debug the collector: - - ```bash - ./charts.d.plugin debug 1 apcupsd - ``` - -### Getting Logs - -If you're encountering problems with the `apcupsd` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep apcupsd -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep apcupsd /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep apcupsd -``` - - diff --git a/src/collectors/charts.d.plugin/apcupsd/metadata.yaml b/src/collectors/charts.d.plugin/apcupsd/metadata.yaml deleted file mode 100644 index 01d86e795..000000000 --- a/src/collectors/charts.d.plugin/apcupsd/metadata.yaml +++ /dev/null @@ -1,256 +0,0 @@ -plugin_name: charts.d.plugin -modules: - - meta: - plugin_name: charts.d.plugin - module_name: apcupsd - monitored_instance: - name: APC UPS - link: "https://www.apc.com" - categories: - - data-collection.ups - icon_filename: "apc.svg" - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: "" - keywords: - - ups - - apc - - power - - supply - - battery - - apcupsd - most_popular: false - overview: - data_collection: - metrics_description: "Monitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics." - method_description: "The collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics." - supported_platforms: - include: [] - exclude: [] - multi_instance: false - additional_permissions: - description: "" - default_behavior: - auto_detection: - description: "By default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility." - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: - - title: "Install charts.d plugin" - description: | - If [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. - - title: "Required software" - description: "Make sure the `apcaccess` and `apcupsd` are installed and running." - configuration: - file: - name: charts.d/apcupsd.conf - options: - description: | - The config file is sourced by the charts.d plugin. It's a standard bash file. - - The following collapsed table contains all the options that can be configured for the apcupsd collector. - folding: - title: "Config options" - enabled: true - list: - - name: apcupsd_sources - description: This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it. - default_value: "127.0.0.1:3551" - required: false - - name: apcupsd_timeout - description: How long to wait for apcupsd to respond. - default_value: 3 - required: false - - name: apcupsd_update_every - description: The data collection frequency. If unset, will inherit the netdata update frequency. - default_value: 1 - required: false - - name: apcupsd_priority - description: The charts priority on the dashboard. - default_value: 90000 - required: false - - name: apcupsd_retries - description: The number of retries to do in case of failure before disabling the collector. - default_value: 10 - required: false - examples: - folding: - enabled: false - title: "Config" - list: - - name: Multiple apcupsd sources - description: Specify a multiple apcupsd sources along with a custom update interval - config: | - # add all your APC UPSes in this array - uncomment it too - declare -A apcupsd_sources=( - ["local"]="127.0.0.1:3551", - ["remote"]="1.2.3.4:3551" - ) - - # how long to wait for apcupsd to respond - #apcupsd_timeout=3 - - # the data collection frequency - # if unset, will inherit the netdata update frequency - apcupsd_update_every=5 - - # the charts priority on the dashboard - #apcupsd_priority=90000 - - # the number of retries to do in case of failure - # before disabling the module - #apcupsd_retries=10 - troubleshooting: - problems: - list: [] - alerts: - - name: apcupsd_ups_charge - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf - metric: apcupsd.charge - info: average UPS charge over the last minute - os: "*" - - name: apcupsd_10min_ups_load - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf - metric: apcupsd.load - info: average UPS load over the last 10 minutes - os: "*" - - name: apcupsd_last_collected_secs - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf - metric: apcupsd.load - info: number of seconds since the last successful data collection - - name: apcupsd_selftest_warning - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf - metric: apcupsd.selftest - info: self-test failed due to insufficient battery capacity or due to overload. - - name: apcupsd_status_onbatt - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf - metric: apcupsd.status - info: APC UPS has switched to battery power because the input power has failed - - name: apcupsd_status_overload - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf - metric: apcupsd.status - info: APC UPS is overloaded and cannot supply enough power to the load - - name: apcupsd_status_lowbatt - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf - metric: apcupsd.status - info: APC UPS battery is low and needs to be recharged - - name: apcupsd_status_replacebatt - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf - metric: apcupsd.status - info: APC UPS battery has reached the end of its lifespan and needs to be replaced - - name: apcupsd_status_nobatt - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf - metric: apcupsd.status - info: APC UPS has no battery - - name: apcupsd_status_commlost - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf - metric: apcupsd.status - info: APC UPS communication link is lost - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: ups - description: "Metrics related to UPS. Each UPS provides its own set of the following metrics." - labels: [] - metrics: - - name: apcupsd.charge - description: UPS Charge - unit: "percentage" - chart_type: area - dimensions: - - name: charge - - name: apcupsd.battery.voltage - description: UPS Battery Voltage - unit: "Volts" - chart_type: line - dimensions: - - name: voltage - - name: nominal - - name: apcupsd.input.voltage - description: UPS Input Voltage - unit: "Volts" - chart_type: line - dimensions: - - name: voltage - - name: min - - name: max - - name: apcupsd.output.voltage - description: UPS Output Voltage - unit: "Volts" - chart_type: line - dimensions: - - name: absolute - - name: nominal - - name: apcupsd.input.frequency - description: UPS Input Voltage - unit: "Hz" - chart_type: line - dimensions: - - name: frequency - - name: apcupsd.load - description: UPS Load - unit: "percentage" - chart_type: area - dimensions: - - name: load - - name: apcupsd.load_usage - description: UPS Load Usage - unit: "Watts" - chart_type: area - dimensions: - - name: load - - name: apcupsd.temperature - description: UPS Temperature - unit: "Celsius" - chart_type: line - dimensions: - - name: temp - - name: apcupsd.time - description: UPS Time Remaining - unit: "Minutes" - chart_type: area - dimensions: - - name: time - - name: apcupsd.online - description: UPS ONLINE flag - unit: "boolean" - chart_type: line - dimensions: - - name: online - - name: apcupsd.selftest - description: UPS Self-Test status - unit: status - chart_type: line - dimensions: - - name: OK - - name: NO - - name: BT - - name: NG - - name: apcupsd.status - description: UPS Status - unit: status - chart_type: line - dimensions: - - name: ONLINE - - name: ONBATT - - name: OVERLOAD - - name: LOWBATT - - name: REPLACEBATT - - name: NOBATT - - name: SLAVE - - name: SLAVEDOWN - - name: COMMLOST - - name: CAL - - name: TRIM - - name: BOOST - - name: SHUTTING_DOWN diff --git a/src/collectors/charts.d.plugin/example/README.md b/src/collectors/charts.d.plugin/example/README.md index a16180581..d676cea77 100644 --- a/src/collectors/charts.d.plugin/example/README.md +++ b/src/collectors/charts.d.plugin/example/README.md @@ -1,12 +1,3 @@ - - # Example If you want to understand how charts.d data collector functions, check out the [charts.d example](https://raw.githubusercontent.com/netdata/netdata/master/src/collectors/charts.d.plugin/example/example.chart.sh). diff --git a/src/collectors/charts.d.plugin/libreswan/integrations/libreswan.md b/src/collectors/charts.d.plugin/libreswan/integrations/libreswan.md index fa8eb7a97..96691443b 100644 --- a/src/collectors/charts.d.plugin/libreswan/integrations/libreswan.md +++ b/src/collectors/charts.d.plugin/libreswan/integrations/libreswan.md @@ -79,7 +79,7 @@ There are no alerts configured by default for this integration. #### Install charts.d plugin -If [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. +If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. #### Permissions to execute `ipsec` @@ -115,8 +115,8 @@ Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to fi The configuration file name for this integration is `charts.d/libreswan.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/charts.d.plugin/opensips/integrations/opensips.md b/src/collectors/charts.d.plugin/opensips/integrations/opensips.md index 7fa610eb4..13b573627 100644 --- a/src/collectors/charts.d.plugin/opensips/integrations/opensips.md +++ b/src/collectors/charts.d.plugin/opensips/integrations/opensips.md @@ -96,7 +96,7 @@ There are no alerts configured by default for this integration. #### Install charts.d plugin -If [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. +If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. #### Required software @@ -111,8 +111,8 @@ The collector requires the `opensipsctl` to be installed. The configuration file name for this integration is `charts.d/opensips.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/charts.d.plugin/sensors/README.md b/src/collectors/charts.d.plugin/sensors/README.md deleted file mode 120000 index 7e5a416c4..000000000 --- a/src/collectors/charts.d.plugin/sensors/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/linux_sensors_sysfs.md \ No newline at end of file diff --git a/src/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md b/src/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md deleted file mode 100644 index f9221caa1..000000000 --- a/src/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md +++ /dev/null @@ -1,235 +0,0 @@ - - -# Linux Sensors (sysfs) - - - - - -Plugin: charts.d.plugin -Module: sensors - - - -## Overview - -Use this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures). -For all other cases use the [Go collector](/src/go/plugin/go.d/modules/sensors/README.md), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values." - - -It will provide charts for all configured system sensors, by reading sensors directly from the kernel. -The values graphed are the raw hardware values of the sensors. - - -This collector is only supported on the following platforms: - -- Linux - -This collector supports collecting metrics from multiple instances of this integration, including remote instances. - - -### Default Behavior - -#### Auto-Detection - -By default, the collector will try to read entries under `/sys/devices` - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per sensor chip - -Metrics related to sensor chips. Each chip provides its own set of the following metrics. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| sensors.temp | {filename} | Celsius | -| sensors.volt | {filename} | Volts | -| sensors.curr | {filename} | Ampere | -| sensors.power | {filename} | Watt | -| sensors.fans | {filename} | Rotations / Minute | -| sensors.energy | {filename} | Joule | -| sensors.humidity | {filename} | Percent | - - - -## Alerts - -There are no alerts configured by default for this integration. - - -## Setup - -### Prerequisites - -#### Install charts.d plugin - -If [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. - - -#### Enable the sensors collector - -The `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config charts.d.conf -``` - -Change the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system. - - - -### Configuration - -#### File - -The configuration file name for this integration is `charts.d/sensors.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config charts.d/sensors.conf -``` -#### Options - -The config file is sourced by the charts.d plugin. It's a standard bash file. - -The following collapsed table contains all the options that can be configured for the sensors collector. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| sensors_sys_dir | The directory the kernel exposes sensor data. | /sys/devices | no | -| sensors_sys_depth | How deep in the tree to check for sensor data. | 10 | no | -| sensors_source_update | If set to 1, the script will overwrite internal script functions with code generated ones. | 1 | no | -| sensors_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no | -| sensors_priority | The charts priority on the dashboard. | 90000 | no | -| sensors_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no | - -
- -#### Examples - -##### Set sensors path depth - -Set a different sensors path depth - -```yaml -# the directory the kernel keeps sensor data -#sensors_sys_dir="/sys/devices" - -# how deep in the tree to check for sensor data -sensors_sys_depth=5 - -# if set to 1, the script will overwrite internal -# script functions with code generated ones -# leave to 1, is faster -#sensors_source_update=1 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#sensors_update_every= - -# the charts priority on the dashboard -#sensors_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#sensors_retries=10 - -``` - - -## Troubleshooting - -### Debug Mode - - -To troubleshoot issues with the `sensors` collector, run the `charts.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `charts.d.plugin` to debug the collector: - - ```bash - ./charts.d.plugin debug 1 sensors - ``` - -### Getting Logs - -If you're encountering problems with the `sensors` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep sensors -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep sensors /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep sensors -``` - - diff --git a/src/collectors/charts.d.plugin/sensors/metadata.yaml b/src/collectors/charts.d.plugin/sensors/metadata.yaml deleted file mode 100644 index 9aacdd353..000000000 --- a/src/collectors/charts.d.plugin/sensors/metadata.yaml +++ /dev/null @@ -1,182 +0,0 @@ -plugin_name: charts.d.plugin -modules: - - meta: - plugin_name: charts.d.plugin - module_name: sensors - monitored_instance: - name: Linux Sensors (sysfs) - link: "https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface" - categories: - - data-collection.hardware-devices-and-sensors - icon_filename: "microchip.svg" - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: "" - keywords: - - sensors - - sysfs - - hwmon - - rpi - - raspberry pi - most_popular: false - overview: - data_collection: - metrics_description: | - Use this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures). - For all other cases use the [Go collector](/src/go/plugin/go.d/modules/sensors/README.md), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values." - method_description: | - It will provide charts for all configured system sensors, by reading sensors directly from the kernel. - The values graphed are the raw hardware values of the sensors. - supported_platforms: - include: [Linux] - exclude: [] - multi_instance: true - additional_permissions: - description: "" - default_behavior: - auto_detection: - description: "By default, the collector will try to read entries under `/sys/devices`" - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: - - title: "Install charts.d plugin" - description: | - If [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed. - - title: "Enable the sensors collector" - description: | - The `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file. - - ```bash - cd /etc/netdata # Replace this path with your Netdata config directory, if different - sudo ./edit-config charts.d.conf - ``` - - Change the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system. - configuration: - file: - name: charts.d/sensors.conf - options: - description: | - The config file is sourced by the charts.d plugin. It's a standard bash file. - - The following collapsed table contains all the options that can be configured for the sensors collector. - folding: - title: "Config options" - enabled: true - list: - - name: sensors_sys_dir - description: The directory the kernel exposes sensor data. - default_value: "/sys/devices" - required: false - - name: sensors_sys_depth - description: How deep in the tree to check for sensor data. - default_value: 10 - required: false - - name: sensors_source_update - description: If set to 1, the script will overwrite internal script functions with code generated ones. - default_value: 1 - required: false - - name: sensors_update_every - description: The data collection frequency. If unset, will inherit the netdata update frequency. - default_value: 1 - required: false - - name: sensors_priority - description: The charts priority on the dashboard. - default_value: 90000 - required: false - - name: sensors_retries - description: The number of retries to do in case of failure before disabling the collector. - default_value: 10 - required: false - examples: - folding: - enabled: false - title: "Config" - list: - - name: Set sensors path depth - description: Set a different sensors path depth - config: | - # the directory the kernel keeps sensor data - #sensors_sys_dir="/sys/devices" - - # how deep in the tree to check for sensor data - sensors_sys_depth=5 - - # if set to 1, the script will overwrite internal - # script functions with code generated ones - # leave to 1, is faster - #sensors_source_update=1 - - # the data collection frequency - # if unset, will inherit the netdata update frequency - #sensors_update_every= - - # the charts priority on the dashboard - #sensors_priority=90000 - - # the number of retries to do in case of failure - # before disabling the module - #sensors_retries=10 - troubleshooting: - problems: - list: [] - alerts: [] - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: sensor chip - description: "Metrics related to sensor chips. Each chip provides its own set of the following metrics." - labels: [] - metrics: - - name: sensors.temp - description: Temperature - unit: "Celsius" - chart_type: line - dimensions: - - name: "{filename}" - - name: sensors.volt - description: Voltage - unit: "Volts" - chart_type: line - dimensions: - - name: "{filename}" - - name: sensors.curr - description: Current - unit: "Ampere" - chart_type: line - dimensions: - - name: "{filename}" - - name: sensors.power - description: Power - unit: "Watt" - chart_type: line - dimensions: - - name: "{filename}" - - name: sensors.fans - description: Fans Speed - unit: "Rotations / Minute" - chart_type: line - dimensions: - - name: "{filename}" - - name: sensors.energy - description: Energy - unit: "Joule" - chart_type: area - dimensions: - - name: "{filename}" - - name: sensors.humidity - description: Humidity - unit: "Percent" - chart_type: line - dimensions: - - name: "{filename}" diff --git a/src/collectors/charts.d.plugin/sensors/sensors.chart.sh b/src/collectors/charts.d.plugin/sensors/sensors.chart.sh deleted file mode 100644 index 9576e2ab2..000000000 --- a/src/collectors/charts.d.plugin/sensors/sensors.chart.sh +++ /dev/null @@ -1,250 +0,0 @@ -# shellcheck shell=bash -# no need for shebang - this file is loaded from charts.d.plugin -# SPDX-License-Identifier: GPL-3.0-or-later - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2016 Costa Tsaousis -# - -# sensors docs -# https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface - -# if this chart is called X.chart.sh, then all functions and global variables -# must start with X_ - -# the directory the kernel keeps sensor data -sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices" - -# how deep in the tree to check for sensor data -sensors_sys_depth=10 - -# if set to 1, the script will overwrite internal -# script functions with code generated ones -# leave to 1, is faster -sensors_source_update=1 - -# how frequently to collect sensor data -# the default is to collect it at every iteration of charts.d -sensors_update_every= - -sensors_priority=90000 - -declare -A sensors_excluded=() - -sensors_find_all_files() { - find "$1" -maxdepth $sensors_sys_depth -name \*_input -o -name temp 2>/dev/null -} - -sensors_find_all_dirs() { - # shellcheck disable=SC2162 - sensors_find_all_files "$1" | while read; do - dirname "$REPLY" - done | sort -u -} - -# _check is called once, to find out if this chart should be enabled or not -sensors_check() { - - # this should return: - # - 0 to enable the chart - # - 1 to disable the chart - - [ -z "$(sensors_find_all_files "$sensors_sys_dir")" ] && error "no sensors found in '$sensors_sys_dir'." && return 1 - return 0 -} - -sensors_check_files() { - # we only need sensors that report a non-zero value - # also remove not needed sensors - - local f v excluded - for f in "$@"; do - [ ! -f "$f" ] && continue - for ex in "${sensors_excluded[@]}"; do - [[ $f =~ .*$ex$ ]] && excluded='1' && break - done - - [ "$excluded" != "1" ] && v="$(cat "$f")" || v=0 - v=$((v + 1 - 1)) - [ $v -ne 0 ] && echo "$f" && continue - excluded= - - error "$f gives zero values" - done -} - -sensors_check_temp_type() { - # valid temp types are 1 to 6 - # disabled sensors have the value 0 - - local f t v - for f in "$@"; do - # shellcheck disable=SC2001 - t=$(echo "$f" | sed "s|_input$|_type|g") - [ "$f" = "$t" ] && echo "$f" && continue - [ ! -f "$t" ] && echo "$f" && continue - - v="$(cat "$t")" - v=$((v + 1 - 1)) - [ $v -ne 0 ] && echo "$f" && continue - - error "$f is disabled" - done -} - -# _create is called once, to create the charts -sensors_create() { - local path dir name x file lfile labelname device subsystem id type mode files multiplier divisor - - # we create a script with the source of the - # sensors_update() function - # - the highest speed we can achieve - - [ $sensors_source_update -eq 1 ] && echo >"$TMP_DIR/sensors.sh" "sensors_update() {" - - for path in $(sensors_find_all_dirs "$sensors_sys_dir" | sort -u); do - dir=$(basename "$path") - device= - subsystem= - id= - type= - name= - - [ -h "$path/device" ] && device=$(readlink -f "$path/device") - [ ! -z "$device" ] && device=$(basename "$device") - [ -z "$device" ] && device="$dir" - - [ -h "$path/subsystem" ] && subsystem=$(readlink -f "$path/subsystem") - [ ! -z "$subsystem" ] && subsystem=$(basename "$subsystem") - [ -z "$subsystem" ] && subsystem="$dir" - - [ -f "$path/name" ] && name=$(cat "$path/name") - [ -z "$name" ] && name="$dir" - - [ -f "$path/type" ] && type=$(cat "$path/type") - [ -z "$type" ] && type="$dir" - - id="$(fixid "$device.$subsystem.$dir")" - - debug "path='$path', dir='$dir', device='$device', subsystem='$subsystem', id='$id', name='$name'" - - for mode in temperature voltage fans power current energy humidity; do - files= - multiplier=1 - divisor=1 - algorithm="absolute" - - case $mode in - temperature) - files="$( - ls "$path"/temp*_input 2>/dev/null - ls "$path/temp" 2>/dev/null - )" - files="$(sensors_check_files "$files")" - files="$(sensors_check_temp_type "$files")" - [ -z "$files" ] && continue - echo "CHART 'sensors.temp_${id}_${name}' '' 'Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every '' '' 'sensors'" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.temp_${id}_${name}' \$1\"" - divisor=1000 - ;; - - voltage) - files="$(ls "$path"/in*_input 2>/dev/null)" - files="$(sensors_check_files "$files")" - [ -z "$files" ] && continue - echo "CHART 'sensors.volt_${id}_${name}' '' 'Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every '' '' 'sensors'" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.volt_${id}_${name}' \$1\"" - divisor=1000 - ;; - - current) - files="$(ls "$path"/curr*_input 2>/dev/null)" - files="$(sensors_check_files "$files")" - [ -z "$files" ] && continue - echo "CHART 'sensors.curr_${id}_${name}' '' 'Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every '' '' 'sensors'" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.curr_${id}_${name}' \$1\"" - divisor=1000 - ;; - - power) - files="$(ls "$path"/power*_input 2>/dev/null)" - files="$(sensors_check_files "$files")" - [ -z "$files" ] && continue - echo "CHART 'sensors.power_${id}_${name}' '' 'Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every '' '' 'sensors'" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.power_${id}_${name}' \$1\"" - divisor=1000000 - ;; - - fans) - files="$(ls "$path"/fan*_input 2>/dev/null)" - files="$(sensors_check_files "$files")" - [ -z "$files" ] && continue - echo "CHART 'sensors.fan_${id}_${name}' '' 'Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every '' '' 'sensors'" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.fan_${id}_${name}' \$1\"" - ;; - - energy) - files="$(ls "$path"/energy*_input 2>/dev/null)" - files="$(sensors_check_files "$files")" - [ -z "$files" ] && continue - echo "CHART 'sensors.energy_${id}_${name}' '' 'Energy' 'Joule' 'energy' 'sensors.energy' area $((sensors_priority + 6)) $sensors_update_every '' '' 'sensors'" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.energy_${id}_${name}' \$1\"" - algorithm="incremental" - divisor=1000000 - ;; - - humidity) - files="$(ls "$path"/humidity*_input 2>/dev/null)" - files="$(sensors_check_files "$files")" - [ -z "$files" ] && continue - echo "CHART 'sensors.humidity_${id}_${name}' '' 'Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every '' '' 'sensors'" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.humidity_${id}_${name}' \$1\"" - divisor=1000 - ;; - - *) - continue - ;; - esac - - for x in $files; do - file="$x" - fid="$(fixid "$file")" - lfile="$(basename "$file" | sed "s|_input$|_label|g")" - labelname="$(basename "$file" | sed "s|_input$||g")" - - if [ ! "$path/$lfile" = "$file" ] && [ -f "$path/$lfile" ]; then - labelname="$(cat "$path/$lfile")" - fi - - echo "DIMENSION $fid '$labelname' $algorithm $multiplier $divisor" - echo >>"$TMP_DIR/sensors.sh" "echo \"SET $fid = \"\$(< $file )" - done - - echo >>"$TMP_DIR/sensors.sh" "echo END" - done - done - - [ $sensors_source_update -eq 1 ] && echo >>"$TMP_DIR/sensors.sh" "}" - - # ok, load the function sensors_update() we created - # shellcheck source=/dev/null - [ $sensors_source_update -eq 1 ] && . "$TMP_DIR/sensors.sh" - - return 0 -} - -# _update is called continuously, to collect the values -sensors_update() { - # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see below). - - # do all the work to collect / calculate the values - # for each dimension - # remember: KEEP IT SIMPLE AND SHORT - - # shellcheck source=/dev/null - [ $sensors_source_update -eq 0 ] && . "$TMP_DIR/sensors.sh" "$1" - - return 0 -} diff --git a/src/collectors/charts.d.plugin/sensors/sensors.conf b/src/collectors/charts.d.plugin/sensors/sensors.conf deleted file mode 100644 index bcb28807d..000000000 --- a/src/collectors/charts.d.plugin/sensors/sensors.conf +++ /dev/null @@ -1,32 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# the directory the kernel keeps sensor data -#sensors_sys_dir="/sys/devices" - -# how deep in the tree to check for sensor data -#sensors_sys_depth=10 - -# if set to 1, the script will overwrite internal -# script functions with code generated ones -# leave to 1, is faster -#sensors_source_update=1 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#sensors_update_every= - -# the charts priority on the dashboard -#sensors_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#sensors_retries=10 - diff --git a/src/collectors/checks.plugin/README.md b/src/collectors/checks.plugin/README.md deleted file mode 100644 index 806b1e66c..000000000 --- a/src/collectors/checks.plugin/README.md +++ /dev/null @@ -1,12 +0,0 @@ - - -# checks.plugin - -A debugging plugin (by default it is disabled) - - diff --git a/src/collectors/common-contexts/common-contexts.h b/src/collectors/common-contexts/common-contexts.h index 1938230dc..4c7e58e7f 100644 --- a/src/collectors/common-contexts/common-contexts.h +++ b/src/collectors/common-contexts/common-contexts.h @@ -18,14 +18,22 @@ typedef void (*instance_labels_cb_t)(RRDSET *st, void *data); -#include "system.io.h" -#include "system.ram.h" -#include "system.interrupts.h" -#include "system.processes.h" -#include "system.ipc.h" -#include "mem.swap.h" -#include "mem.pgfaults.h" -#include "mem.available.h" -#include "disk.io.h" +#include "system-io.h" +#include "system-ram.h" +#include "system-interrupts.h" +#include "system-processes.h" +#include "system-ipc.h" +#include "mem-swap.h" +#include "mem-pgfaults.h" +#include "mem-available.h" +#include "disk-io.h" +#include "disk-ops.h" +#include "disk-qops.h" +#include "disk-util.h" +#include "disk-busy.h" +#include "disk-iotime.h" +#include "disk-await.h" +#include "disk-svctm.h" +#include "disk-avgsz.h" #endif //NETDATA_COMMON_CONTEXTS_H diff --git a/src/collectors/common-contexts/disk-avgsz.h b/src/collectors/common-contexts/disk-avgsz.h new file mode 100644 index 000000000..16cca247a --- /dev/null +++ b/src/collectors/common-contexts/disk-avgsz.h @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DISK_AVGSZ_H +#define NETDATA_DISK_AVGSZ_H + +#include "common-contexts.h" + +typedef struct { + RRDSET *st_avgsz; + RRDDIM *rd_avgsz_reads; + RRDDIM *rd_avgsz_writes; +} ND_DISK_AVGSZ; + +static inline void common_disk_avgsz(ND_DISK_AVGSZ *d, const char *id, const char *name, uint64_t avg_bytes_read, uint64_t avg_bytes_write, int update_every, instance_labels_cb_t cb, void *data) { + if(unlikely(!d->st_avgsz)) { + d->st_avgsz = rrdset_create_localhost( + "disk_avgsz" + , id + , name + , "io" + , "disk.avgsz" + , "Average Completed I/O Operation Bandwidth" + , "KiB/operation" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_DISK_AVGSZ + , update_every + , RRDSET_TYPE_AREA + ); + + d->rd_avgsz_reads = rrddim_add(d->st_avgsz, "reads", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + d->rd_avgsz_writes = rrddim_add(d->st_avgsz, "writes", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE); + + if(cb) + cb(d->st_avgsz, data); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_reads, (collected_number)avg_bytes_read); + rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_writes, (collected_number)avg_bytes_write); + rrdset_done(d->st_avgsz); +} + +#endif //NETDATA_DISK_AVGSZ_H diff --git a/src/collectors/common-contexts/disk-await.h b/src/collectors/common-contexts/disk-await.h new file mode 100644 index 000000000..b4142569e --- /dev/null +++ b/src/collectors/common-contexts/disk-await.h @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DISK_AWAIT_H +#define NETDATA_DISK_AWAIT_H + +#include "common-contexts.h" + +typedef struct { + RRDSET *st_await; + RRDDIM *rd_await_reads; + RRDDIM *rd_await_writes; +} ND_DISK_AWAIT; + +static inline void common_disk_await(ND_DISK_AWAIT *d, const char *id, const char *name, double read_avg_ms, double write_avg_ms, int update_every, instance_labels_cb_t cb, void *data) { + if(unlikely(!d->st_await)) { + d->st_await = rrdset_create_localhost( + "disk_await" + , id + , name + , "latency" + , "disk.await" + , "Average Completed I/O Operation Time" + , "milliseconds/operation" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_DISK_AWAIT + , update_every + , RRDSET_TYPE_LINE + ); + + d->rd_await_reads = rrddim_add(d->st_await, "reads", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + d->rd_await_writes = rrddim_add(d->st_await, "writes", NULL, -1, 1000, RRD_ALGORITHM_ABSOLUTE); + + if(cb) + cb(d->st_await, data); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(d->st_await, d->rd_await_reads, (collected_number)(read_avg_ms * 1000.0)); + rrddim_set_by_pointer(d->st_await, d->rd_await_writes, (collected_number)(write_avg_ms * 1000.0)); + rrdset_done(d->st_await); +} + +#endif //NETDATA_DISK_AWAIT_H diff --git a/src/collectors/common-contexts/disk-busy.h b/src/collectors/common-contexts/disk-busy.h new file mode 100644 index 000000000..92679d9ef --- /dev/null +++ b/src/collectors/common-contexts/disk-busy.h @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DISK_BUSY_H +#define NETDATA_DISK_BUSY_H + +#include "common-contexts.h" + +typedef struct { + RRDSET *st_busy; + RRDDIM *rd_busy; +} ND_DISK_BUSY; + +static inline void common_disk_busy(ND_DISK_BUSY *d, const char *id, const char *name, uint64_t busy_ms, int update_every, instance_labels_cb_t cb, void *data) { + if(unlikely(!d->st_busy)) { + d->st_busy = rrdset_create_localhost( + "disk_busy" + , id + , name + , "utilization" + , "disk.busy" + , "Disk Busy Time" + , "milliseconds" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_DISK_BUSY + , update_every + , RRDSET_TYPE_AREA + ); + + d->rd_busy = rrddim_add(d->st_busy, "busy", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + if(cb) + cb(d->st_busy, data); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(d->st_busy, d->rd_busy, (collected_number)busy_ms); + rrdset_done(d->st_busy); +} + +#endif //NETDATA_DISK_BUSY_H diff --git a/src/collectors/common-contexts/disk-io.h b/src/collectors/common-contexts/disk-io.h new file mode 100644 index 000000000..26f98b9be --- /dev/null +++ b/src/collectors/common-contexts/disk-io.h @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DISK_IO_H +#define NETDATA_DISK_IO_H + +#include "common-contexts.h" + +typedef struct { + RRDSET *st_io; + RRDDIM *rd_io_reads; + RRDDIM *rd_io_writes; +} ND_DISK_IO; + +static inline void common_disk_io(ND_DISK_IO *d, const char *id, const char *name, uint64_t bytes_read, uint64_t bytes_write, int update_every, instance_labels_cb_t cb, void *data) { + if(unlikely(!d->st_io)) { + d->st_io = rrdset_create_localhost( + "disk" + , id + , name + , "io" + , "disk.io" + , "Disk I/O Bandwidth" + , "KiB/s" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_DISK_IO + , update_every + , RRDSET_TYPE_AREA + ); + + d->rd_io_reads = rrddim_add(d->st_io, "reads", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); + d->rd_io_writes = rrddim_add(d->st_io, "writes", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); + + if(cb) + cb(d->st_io, data); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(d->st_io, d->rd_io_reads, (collected_number)bytes_read); + rrddim_set_by_pointer(d->st_io, d->rd_io_writes, (collected_number)bytes_write); + rrdset_done(d->st_io); +} + +#endif //NETDATA_DISK_IO_H diff --git a/src/collectors/common-contexts/disk-iotime.h b/src/collectors/common-contexts/disk-iotime.h new file mode 100644 index 000000000..29707287a --- /dev/null +++ b/src/collectors/common-contexts/disk-iotime.h @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DISK_IOTIME_H +#define NETDATA_DISK_IOTIME_H + +#include "common-contexts.h" + +typedef struct { + RRDSET *st_iotime; + RRDDIM *rd_reads_ms; + RRDDIM *rd_writes_ms; +} ND_DISK_IOTIME; + +static inline void common_disk_iotime(ND_DISK_IOTIME *d, const char *id, const char *name, uint64_t reads_ms, uint64_t writes_ms, int update_every, instance_labels_cb_t cb, void *data) { + if(unlikely(!d->st_iotime)) { + d->st_iotime = rrdset_create_localhost( + "disk_iotime" + , id + , name + , "utilization" + , "disk.iotime" + , "Disk Total I/O Time" + , "milliseconds/s" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_DISK_IOTIME + , update_every + , RRDSET_TYPE_AREA + ); + + d->rd_reads_ms = rrddim_add(d->st_iotime, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_writes_ms = rrddim_add(d->st_iotime, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + if(cb) + cb(d->st_iotime, data); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(d->st_iotime, d->rd_reads_ms, (collected_number)reads_ms); + rrddim_set_by_pointer(d->st_iotime, d->rd_writes_ms, (collected_number)writes_ms); + rrdset_done(d->st_iotime); +} + +#endif //NETDATA_DISK_IOTIME_H diff --git a/src/collectors/common-contexts/disk-ops.h b/src/collectors/common-contexts/disk-ops.h new file mode 100644 index 000000000..6e1ac4690 --- /dev/null +++ b/src/collectors/common-contexts/disk-ops.h @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DISK_OPS_H +#define NETDATA_DISK_OPS_H + +#include "common-contexts.h" + +typedef struct { + RRDSET *st_ops; + RRDDIM *rd_ops_reads; + RRDDIM *rd_ops_writes; +} ND_DISK_OPS; + +static inline void common_disk_ops(ND_DISK_OPS *d, const char *id, const char *name, uint64_t ops_read, uint64_t ops_write, int update_every, instance_labels_cb_t cb, void *data) { + if(unlikely(!d->st_ops)) { + d->st_ops = rrdset_create_localhost( + "disk_ops" + , id + , name + , "ops" + , "disk.ops" + , "Disk Completed I/O Operations" + , "operations/s" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_DISK_OPS + , update_every + , RRDSET_TYPE_LINE + ); + + d->rd_ops_reads = rrddim_add(d->st_ops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->rd_ops_writes = rrddim_add(d->st_ops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + if(cb) + cb(d->st_ops, data); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(d->st_ops, d->rd_ops_reads, (collected_number)ops_read); + rrddim_set_by_pointer(d->st_ops, d->rd_ops_writes, (collected_number)ops_write); + rrdset_done(d->st_ops); +} + +#endif //NETDATA_DISK_OPS_H diff --git a/src/collectors/common-contexts/disk-qops.h b/src/collectors/common-contexts/disk-qops.h new file mode 100644 index 000000000..89f38cb27 --- /dev/null +++ b/src/collectors/common-contexts/disk-qops.h @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DISK_QOPS_H +#define NETDATA_DISK_QOPS_H + +#include "common-contexts.h" + +typedef struct { + RRDSET *st_qops; + RRDDIM *rd_qops; +} ND_DISK_QOPS; + +static inline void common_disk_qops(ND_DISK_QOPS *d, const char *id, const char *name, uint64_t queued_ops, int update_every, instance_labels_cb_t cb, void *data) { + if(unlikely(!d->st_qops)) { + d->st_qops = rrdset_create_localhost( + "disk_qops" + , id + , name + , "ops" + , "disk.qops" + , "Disk Current I/O Operations" + , "operations" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_DISK_QOPS + , update_every + , RRDSET_TYPE_LINE + ); + + d->rd_qops = rrddim_add(d->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + if(cb) + cb(d->st_qops, data); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(d->st_qops, d->rd_qops, (collected_number)queued_ops); + rrdset_done(d->st_qops); +} + +#endif //NETDATA_DISK_QOPS_H diff --git a/src/collectors/common-contexts/disk-svctm.h b/src/collectors/common-contexts/disk-svctm.h new file mode 100644 index 000000000..f1d07c150 --- /dev/null +++ b/src/collectors/common-contexts/disk-svctm.h @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DISK_SVCTM_H +#define NETDATA_DISK_SVCTM_H + +#include "common-contexts.h" + +typedef struct { + RRDSET *st_svctm; + RRDDIM *rd_svctm; +} ND_DISK_SVCTM; + +static inline void common_disk_svctm(ND_DISK_SVCTM *d, const char *id, const char *name, double svctm_ms, int update_every, instance_labels_cb_t cb, void *data) { + if(unlikely(!d->st_svctm)) { + d->st_svctm = rrdset_create_localhost( + "disk_svctm" + , id + , name + , "latency" + , "disk.svctm" + , "Average Service Time" + , "milliseconds/operation" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_DISK_SVCTM + , update_every + , RRDSET_TYPE_LINE + ); + + d->rd_svctm = rrddim_add(d->st_svctm, "svctm", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + + if(cb) + cb(d->st_svctm, data); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(d->st_svctm, d->rd_svctm, (collected_number)(svctm_ms * 1000.0)); + rrdset_done(d->st_svctm); +} + +#endif //NETDATA_DISK_SVCTM_H diff --git a/src/collectors/common-contexts/disk-util.h b/src/collectors/common-contexts/disk-util.h new file mode 100644 index 000000000..8733975f6 --- /dev/null +++ b/src/collectors/common-contexts/disk-util.h @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DISK_UTIL_H +#define NETDATA_DISK_UTIL_H + +#include "common-contexts.h" + +typedef struct { + RRDSET *st_util; + RRDDIM *rd_util; +} ND_DISK_UTIL; + +static inline void common_disk_util(ND_DISK_UTIL *d, const char *id, const char *name, uint64_t percent, int update_every, instance_labels_cb_t cb, void *data) { + if(unlikely(!d->st_util)) { + d->st_util = rrdset_create_localhost( + "disk_util" + , id + , name + , "utilization" + , "disk.util" + , "Disk Utilization Time" + , "% of time working" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_DISK_UTIL + , update_every + , RRDSET_TYPE_AREA + ); + + d->rd_util = rrddim_add(d->st_util, "utilization", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + if(cb) + cb(d->st_util, data); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(d->st_util, d->rd_util, (collected_number)percent); + rrdset_done(d->st_util); +} + +#endif //NETDATA_DISK_UTIL_H diff --git a/src/collectors/common-contexts/disk.io.h b/src/collectors/common-contexts/disk.io.h deleted file mode 100644 index 26f98b9be..000000000 --- a/src/collectors/common-contexts/disk.io.h +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_DISK_IO_H -#define NETDATA_DISK_IO_H - -#include "common-contexts.h" - -typedef struct { - RRDSET *st_io; - RRDDIM *rd_io_reads; - RRDDIM *rd_io_writes; -} ND_DISK_IO; - -static inline void common_disk_io(ND_DISK_IO *d, const char *id, const char *name, uint64_t bytes_read, uint64_t bytes_write, int update_every, instance_labels_cb_t cb, void *data) { - if(unlikely(!d->st_io)) { - d->st_io = rrdset_create_localhost( - "disk" - , id - , name - , "io" - , "disk.io" - , "Disk I/O Bandwidth" - , "KiB/s" - , _COMMON_PLUGIN_NAME - , _COMMON_PLUGIN_MODULE_NAME - , NETDATA_CHART_PRIO_DISK_IO - , update_every - , RRDSET_TYPE_AREA - ); - - d->rd_io_reads = rrddim_add(d->st_io, "reads", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL); - d->rd_io_writes = rrddim_add(d->st_io, "writes", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL); - - if(cb) - cb(d->st_io, data); - } - - // this always have to be in base units, so that exporting sends base units to other time-series db - rrddim_set_by_pointer(d->st_io, d->rd_io_reads, (collected_number)bytes_read); - rrddim_set_by_pointer(d->st_io, d->rd_io_writes, (collected_number)bytes_write); - rrdset_done(d->st_io); -} - -#endif //NETDATA_DISK_IO_H diff --git a/src/collectors/common-contexts/mem-available.h b/src/collectors/common-contexts/mem-available.h new file mode 100644 index 000000000..3f763fe18 --- /dev/null +++ b/src/collectors/common-contexts/mem-available.h @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_MEM_AVAILABLE_H +#define NETDATA_MEM_AVAILABLE_H +#include "common-contexts.h" + +static inline void common_mem_available(uint64_t available_bytes, int update_every) { + static RRDSET *st_mem_available = NULL; + static RRDDIM *rd_avail = NULL; + + if(unlikely(!st_mem_available)) { + st_mem_available = rrdset_create_localhost( + "mem" + , "available" + , NULL + , "overview" + , NULL + , "Available RAM for applications" + , "MiB" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE + , update_every + , RRDSET_TYPE_AREA + ); + + rd_avail = rrddim_add(st_mem_available, "avail", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(st_mem_available, rd_avail, (collected_number)available_bytes); + rrdset_done(st_mem_available); +} + +#endif //NETDATA_MEM_AVAILABLE_H diff --git a/src/collectors/common-contexts/mem-pgfaults.h b/src/collectors/common-contexts/mem-pgfaults.h new file mode 100644 index 000000000..8a10449e6 --- /dev/null +++ b/src/collectors/common-contexts/mem-pgfaults.h @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_MEM_PGFAULTS_H +#define NETDATA_MEM_PGFAULTS_H + +#include "common-contexts.h" + +static inline void common_mem_pgfaults(uint64_t minor, uint64_t major, int update_every) { + static RRDSET *st_pgfaults = NULL; + static RRDDIM *rd_minor = NULL, *rd_major = NULL; + + if(unlikely(!st_pgfaults)) { + st_pgfaults = rrdset_create_localhost( + "mem" + , "pgfaults" + , NULL + , "page faults" + , NULL + , "Memory Page Faults" + , "faults/s" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_minor = rrddim_add(st_pgfaults, "minor", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rd_major = rrddim_add(st_pgfaults, "major", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(st_pgfaults, rd_minor, minor); + rrddim_set_by_pointer(st_pgfaults, rd_major, major); + rrdset_done(st_pgfaults); +} + +#endif //NETDATA_MEM_PGFAULTS_H diff --git a/src/collectors/common-contexts/mem-swap.h b/src/collectors/common-contexts/mem-swap.h new file mode 100644 index 000000000..d4c0cfc89 --- /dev/null +++ b/src/collectors/common-contexts/mem-swap.h @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common-contexts.h" + +static inline void common_mem_swap(uint64_t free_bytes, uint64_t used_bytes, int update_every) { + static RRDSET *st_system_swap = NULL; + static RRDDIM *rd_free = NULL, *rd_used = NULL; + + if (free_bytes == 0 && used_bytes == 0 && st_system_swap) { + rrdset_is_obsolete___safe_from_collector_thread(st_system_swap); + st_system_swap = NULL; + rd_free = NULL; + rd_used = NULL; + return; + } + + if(unlikely(!st_system_swap)) { + st_system_swap = rrdset_create_localhost( + "mem" + , "swap" + , NULL + , "swap" + , NULL + , "System Swap" + , "MiB" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_MEM_SWAP + , update_every + , RRDSET_TYPE_STACKED + ); + + rd_free = rrddim_add(st_system_swap, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_used = rrddim_add(st_system_swap, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(st_system_swap, rd_used, (collected_number)used_bytes); + rrddim_set_by_pointer(st_system_swap, rd_free, (collected_number)free_bytes); + rrdset_done(st_system_swap); +} diff --git a/src/collectors/common-contexts/mem.available.h b/src/collectors/common-contexts/mem.available.h deleted file mode 100644 index 3f763fe18..000000000 --- a/src/collectors/common-contexts/mem.available.h +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_MEM_AVAILABLE_H -#define NETDATA_MEM_AVAILABLE_H -#include "common-contexts.h" - -static inline void common_mem_available(uint64_t available_bytes, int update_every) { - static RRDSET *st_mem_available = NULL; - static RRDDIM *rd_avail = NULL; - - if(unlikely(!st_mem_available)) { - st_mem_available = rrdset_create_localhost( - "mem" - , "available" - , NULL - , "overview" - , NULL - , "Available RAM for applications" - , "MiB" - , _COMMON_PLUGIN_NAME - , _COMMON_PLUGIN_MODULE_NAME - , NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE - , update_every - , RRDSET_TYPE_AREA - ); - - rd_avail = rrddim_add(st_mem_available, "avail", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - - // this always have to be in base units, so that exporting sends base units to other time-series db - rrddim_set_by_pointer(st_mem_available, rd_avail, (collected_number)available_bytes); - rrdset_done(st_mem_available); -} - -#endif //NETDATA_MEM_AVAILABLE_H diff --git a/src/collectors/common-contexts/mem.pgfaults.h b/src/collectors/common-contexts/mem.pgfaults.h deleted file mode 100644 index 503b9f7e8..000000000 --- a/src/collectors/common-contexts/mem.pgfaults.h +++ /dev/null @@ -1,40 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_MEM_PGFAULTS_H -#define NETDATA_MEM_PGFAULTS_H - -#include "common-contexts.h" - -static inline void common_mem_pgfaults(uint64_t minor, uint64_t major, int update_every) { - static RRDSET *st_pgfaults = NULL; - static RRDDIM *rd_minor = NULL, *rd_major = NULL; - - if(unlikely(!st_pgfaults)) { - st_pgfaults = rrdset_create_localhost( - "mem" - , "pgfaults" - , NULL - , "page faults" - , NULL - , "Memory Page Faults" - , "faults/s" - , _COMMON_PLUGIN_NAME - , _COMMON_PLUGIN_MODULE_NAME - , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(st_pgfaults, RRDSET_FLAG_DETAIL); - - rd_minor = rrddim_add(st_pgfaults, "minor", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_major = rrddim_add(st_pgfaults, "major", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - // this always have to be in base units, so that exporting sends base units to other time-series db - rrddim_set_by_pointer(st_pgfaults, rd_minor, minor); - rrddim_set_by_pointer(st_pgfaults, rd_major, major); - rrdset_done(st_pgfaults); -} - -#endif //NETDATA_MEM_PGFAULTS_H diff --git a/src/collectors/common-contexts/mem.swap.h b/src/collectors/common-contexts/mem.swap.h deleted file mode 100644 index 1c1b053d7..000000000 --- a/src/collectors/common-contexts/mem.swap.h +++ /dev/null @@ -1,43 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "common-contexts.h" - -static inline void common_mem_swap(uint64_t free_bytes, uint64_t used_bytes, int update_every) { - static RRDSET *st_system_swap = NULL; - static RRDDIM *rd_free = NULL, *rd_used = NULL; - - if (free_bytes == 0 && used_bytes == 0 && st_system_swap) { - rrdset_is_obsolete___safe_from_collector_thread(st_system_swap); - st_system_swap = NULL; - rd_free = NULL; - rd_used = NULL; - return; - } - - if(unlikely(!st_system_swap)) { - st_system_swap = rrdset_create_localhost( - "mem" - , "swap" - , NULL - , "swap" - , NULL - , "System Swap" - , "MiB" - , _COMMON_PLUGIN_NAME - , _COMMON_PLUGIN_MODULE_NAME - , NETDATA_CHART_PRIO_MEM_SWAP - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdset_flag_set(st_system_swap, RRDSET_FLAG_DETAIL); - - rd_free = rrddim_add(st_system_swap, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_used = rrddim_add(st_system_swap, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - - // this always have to be in base units, so that exporting sends base units to other time-series db - rrddim_set_by_pointer(st_system_swap, rd_used, (collected_number)used_bytes); - rrddim_set_by_pointer(st_system_swap, rd_free, (collected_number)free_bytes); - rrdset_done(st_system_swap); -} diff --git a/src/collectors/common-contexts/system-interrupts.h b/src/collectors/common-contexts/system-interrupts.h new file mode 100644 index 000000000..4b78e9469 --- /dev/null +++ b/src/collectors/common-contexts/system-interrupts.h @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SYSTEM_INTERRUPTS_H +#define NETDATA_SYSTEM_INTERRUPTS_H + +#include "common-contexts.h" + +#define _ + +static inline void common_interrupts(uint64_t interrupts, int update_every, char *ext_module) { + static RRDSET *st_intr = NULL; + static RRDDIM *rd_interrupts = NULL; + + char *module = (!ext_module) ? _COMMON_PLUGIN_MODULE_NAME: ext_module; + + if(unlikely(!st_intr)) { + st_intr = rrdset_create_localhost( "system" + , "intr" + , NULL + , "interrupts" + , NULL + , "CPU Interrupts" + , "interrupts/s" + , _COMMON_PLUGIN_NAME + , module + , NETDATA_CHART_PRIO_SYSTEM_INTR + , update_every + , RRDSET_TYPE_LINE); + + rd_interrupts = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(st_intr, rd_interrupts, (collected_number)interrupts); + rrdset_done(st_intr); +} + +#endif //NETDATA_SYSTEM_INTERRUPTS_H diff --git a/src/collectors/common-contexts/system-io.h b/src/collectors/common-contexts/system-io.h new file mode 100644 index 000000000..84440c9b8 --- /dev/null +++ b/src/collectors/common-contexts/system-io.h @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SYSTEM_IO_H +#define NETDATA_SYSTEM_IO_H + +#include "common-contexts.h" + +static inline void common_system_io(uint64_t read_bytes, uint64_t write_bytes, int update_every) { + static RRDSET *st_io = NULL; + static RRDDIM *rd_in = NULL, *rd_out = NULL; + + if(unlikely(!st_io)) { + st_io = rrdset_create_localhost( + "system" + , "io" + , NULL + , "disk" + , NULL + , "Disk I/O" + , "KiB/s" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_SYSTEM_IO + , update_every + , RRDSET_TYPE_AREA + ); + + rd_in = rrddim_add(st_io, "in", "reads", 1, 1024, RRD_ALGORITHM_INCREMENTAL); + rd_out = rrddim_add(st_io, "out", "writes", -1, 1024, RRD_ALGORITHM_INCREMENTAL); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(st_io, rd_in, (collected_number)read_bytes); + rrddim_set_by_pointer(st_io, rd_out, (collected_number)write_bytes); + rrdset_done(st_io); +} + +#endif //NETDATA_SYSTEM_IO_H diff --git a/src/collectors/common-contexts/system-ipc.h b/src/collectors/common-contexts/system-ipc.h new file mode 100644 index 000000000..129ce6dfa --- /dev/null +++ b/src/collectors/common-contexts/system-ipc.h @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SYSTEM_IPC_H +#define NETDATA_SYSTEM_IPC_H + +#include "common-contexts.h" + +static inline void common_semaphore_ipc(uint64_t semaphore, NETDATA_DOUBLE red, char *module, int update_every) { + static RRDSET *st_semaphores = NULL; + static RRDDIM *rd_semaphores = NULL; + if(unlikely(!st_semaphores)) { + st_semaphores = rrdset_create_localhost("system" + , "ipc_semaphores" + , NULL + , "ipc semaphores" + , NULL + , "IPC Semaphores" + , "semaphores" + , _COMMON_PLUGIN_NAME + , module + , NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES + , update_every + , RRDSET_TYPE_AREA + ); + rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(st_semaphores, rd_semaphores, semaphore); + rrdset_done(st_semaphores); + if (!strcmp(module, "ipc")) + st_semaphores->red = red; +} + +#endif //NETDATA_SYSTEM_IPC_H diff --git a/src/collectors/common-contexts/system-processes.h b/src/collectors/common-contexts/system-processes.h new file mode 100644 index 000000000..1b886d65f --- /dev/null +++ b/src/collectors/common-contexts/system-processes.h @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SYSTEM_PROCESSES_H +#define NETDATA_SYSTEM_PROCESSES_H + +#include "common-contexts.h" + +#define _system_process_chart() \ + rrdset_create_localhost( \ + "system" \ + , "processes" \ + , NULL \ + , "processes" \ + , NULL \ + , "System Processes" \ + , "processes" \ + , _COMMON_PLUGIN_NAME \ + , _COMMON_PLUGIN_MODULE_NAME \ + , NETDATA_CHART_PRIO_SYSTEM_PROCESSES \ + , update_every \ + , RRDSET_TYPE_LINE \ + ) + +#if defined(OS_WINDOWS) +static inline void common_system_processes(uint64_t running, int update_every) { + static RRDSET *st_processes = NULL; + static RRDDIM *rd_running = NULL; + + if(unlikely(!st_processes)) { + st_processes = _system_process_chart(); + + rd_running = rrddim_add(st_processes, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(st_processes, rd_running, running); + rrdset_done(st_processes); +} + +// EBPF COUNTER PART +static inline void common_system_threads(uint64_t threads, int update_every) { + static RRDSET *st_threads = NULL; + static RRDDIM *rd_threads = NULL; + + if(unlikely(!st_threads)) { + st_threads = rrdset_create_localhost( + "system" + , "threads" + , NULL + , "processes" + , NULL + , "Threads" + , "threads" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_WINDOWS_THREADS + , update_every + , RRDSET_TYPE_LINE + ); + + rd_threads = rrddim_add(st_threads, "threads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(st_threads, rd_threads, threads); + rrdset_done(st_threads); +} +#endif + +#if defined(OS_LINUX) +static inline void common_system_processes(uint64_t running, uint64_t blocked, int update_every) { + static RRDSET *st_processes = NULL; + static RRDDIM *rd_running = NULL; + static RRDDIM *rd_blocked = NULL; + + if(unlikely(!st_processes)) { + st_processes = _system_process_chart(); + + rd_running = rrddim_add(st_processes, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rd_blocked = rrddim_add(st_processes, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(st_processes, rd_running, (collected_number)running); + rrddim_set_by_pointer(st_processes, rd_blocked, (collected_number)blocked); + rrdset_done(st_processes); +} +#endif + +static inline void common_system_context_switch(uint64_t value, int update_every) { + static RRDSET *st_ctxt = NULL; + static RRDDIM *rd_switches = NULL; + + if(unlikely(!st_ctxt)) { + st_ctxt = rrdset_create_localhost( + "system" + , "ctxt" + , NULL + , "processes" + , NULL + , "CPU Context Switches" + , "context switches/s" + , _COMMON_PLUGIN_NAME + , _COMMON_PLUGIN_MODULE_NAME + , NETDATA_CHART_PRIO_SYSTEM_CTXT + , update_every + , RRDSET_TYPE_LINE + ); + + rd_switches = rrddim_add(st_ctxt, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(st_ctxt, rd_switches, (collected_number)value); + rrdset_done(st_ctxt); +} + + +#endif //NETDATA_SYSTEM_PROCESSES_H diff --git a/src/collectors/common-contexts/system-ram.h b/src/collectors/common-contexts/system-ram.h new file mode 100644 index 000000000..6b108405c --- /dev/null +++ b/src/collectors/common-contexts/system-ram.h @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SYSTEM_RAM_H +#define NETDATA_SYSTEM_RAM_H + +#include "common-contexts.h" + +#define _system_ram_chart() \ + rrdset_create_localhost( \ + "system" \ + , "ram" \ + , NULL \ + , "ram" \ + , NULL \ + , "System RAM" \ + , "MiB" \ + , _COMMON_PLUGIN_NAME \ + , _COMMON_PLUGIN_MODULE_NAME \ + , NETDATA_CHART_PRIO_SYSTEM_RAM \ + , update_every \ + , RRDSET_TYPE_STACKED \ + ) + +#ifdef OS_WINDOWS +static inline void common_system_ram(uint64_t free_bytes, uint64_t used_bytes, int update_every) { + static RRDSET *st_system_ram = NULL; + static RRDDIM *rd_free = NULL; + static RRDDIM *rd_used = NULL; + + if(unlikely(!st_system_ram)) { + st_system_ram = _system_ram_chart(); + rd_free = rrddim_add(st_system_ram, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_used = rrddim_add(st_system_ram, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(st_system_ram, rd_free, (collected_number)free_bytes); + rrddim_set_by_pointer(st_system_ram, rd_used, (collected_number)used_bytes); + rrdset_done(st_system_ram); +} +#endif + +#ifdef OS_LINUX +static inline void common_system_ram(uint64_t free_bytes, uint64_t used_bytes, uint64_t cached_bytes, uint64_t buffers_bytes, int update_every) { + static RRDSET *st_system_ram = NULL; + static RRDDIM *rd_free = NULL; + static RRDDIM *rd_used = NULL; + static RRDDIM *rd_cached = NULL; + static RRDDIM *rd_buffers = NULL; + + if(unlikely(!st_system_ram)) { + st_system_ram = _system_ram_chart(); + rd_free = rrddim_add(st_system_ram, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_used = rrddim_add(st_system_ram, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_cached = rrddim_add(st_system_ram, "cached", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + rd_buffers = rrddim_add(st_system_ram, "buffers", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); + } + + // this always have to be in base units, so that exporting sends base units to other time-series db + rrddim_set_by_pointer(st_system_ram, rd_free, (collected_number)free_bytes); + rrddim_set_by_pointer(st_system_ram, rd_used, (collected_number)used_bytes); + rrddim_set_by_pointer(st_system_ram, rd_cached, (collected_number)cached_bytes); + rrddim_set_by_pointer(st_system_ram, rd_buffers, (collected_number)buffers_bytes); + rrdset_done(st_system_ram); +} +#endif + +#endif //NETDATA_SYSTEM_RAM_H diff --git a/src/collectors/common-contexts/system.interrupts.h b/src/collectors/common-contexts/system.interrupts.h deleted file mode 100644 index dffd70572..000000000 --- a/src/collectors/common-contexts/system.interrupts.h +++ /dev/null @@ -1,39 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_SYSTEM_INTERRUPTS_H -#define NETDATA_SYSTEM_INTERRUPTS_H - -#include "common-contexts.h" - -#define _ - -static inline void common_interrupts(uint64_t interrupts, int update_every, char *ext_module) { - static RRDSET *st_intr = NULL; - static RRDDIM *rd_interrupts = NULL; - - char *module = (!ext_module) ? _COMMON_PLUGIN_MODULE_NAME: ext_module; - - if(unlikely(!st_intr)) { - st_intr = rrdset_create_localhost( "system" - , "intr" - , NULL - , "interrupts" - , NULL - , "CPU Interrupts" - , "interrupts/s" - , _COMMON_PLUGIN_NAME - , module - , NETDATA_CHART_PRIO_SYSTEM_INTR - , update_every - , RRDSET_TYPE_LINE); - - rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL); - - rd_interrupts = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - rrddim_set_by_pointer(st_intr, rd_interrupts, (collected_number)interrupts); - rrdset_done(st_intr); -} - -#endif //NETDATA_SYSTEM_INTERRUPTS_H diff --git a/src/collectors/common-contexts/system.io.h b/src/collectors/common-contexts/system.io.h deleted file mode 100644 index 84440c9b8..000000000 --- a/src/collectors/common-contexts/system.io.h +++ /dev/null @@ -1,38 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_SYSTEM_IO_H -#define NETDATA_SYSTEM_IO_H - -#include "common-contexts.h" - -static inline void common_system_io(uint64_t read_bytes, uint64_t write_bytes, int update_every) { - static RRDSET *st_io = NULL; - static RRDDIM *rd_in = NULL, *rd_out = NULL; - - if(unlikely(!st_io)) { - st_io = rrdset_create_localhost( - "system" - , "io" - , NULL - , "disk" - , NULL - , "Disk I/O" - , "KiB/s" - , _COMMON_PLUGIN_NAME - , _COMMON_PLUGIN_MODULE_NAME - , NETDATA_CHART_PRIO_SYSTEM_IO - , update_every - , RRDSET_TYPE_AREA - ); - - rd_in = rrddim_add(st_io, "in", "reads", 1, 1024, RRD_ALGORITHM_INCREMENTAL); - rd_out = rrddim_add(st_io, "out", "writes", -1, 1024, RRD_ALGORITHM_INCREMENTAL); - } - - // this always have to be in base units, so that exporting sends base units to other time-series db - rrddim_set_by_pointer(st_io, rd_in, (collected_number)read_bytes); - rrddim_set_by_pointer(st_io, rd_out, (collected_number)write_bytes); - rrdset_done(st_io); -} - -#endif //NETDATA_SYSTEM_IO_H diff --git a/src/collectors/common-contexts/system.ipc.h b/src/collectors/common-contexts/system.ipc.h deleted file mode 100644 index 129ce6dfa..000000000 --- a/src/collectors/common-contexts/system.ipc.h +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_SYSTEM_IPC_H -#define NETDATA_SYSTEM_IPC_H - -#include "common-contexts.h" - -static inline void common_semaphore_ipc(uint64_t semaphore, NETDATA_DOUBLE red, char *module, int update_every) { - static RRDSET *st_semaphores = NULL; - static RRDDIM *rd_semaphores = NULL; - if(unlikely(!st_semaphores)) { - st_semaphores = rrdset_create_localhost("system" - , "ipc_semaphores" - , NULL - , "ipc semaphores" - , NULL - , "IPC Semaphores" - , "semaphores" - , _COMMON_PLUGIN_NAME - , module - , NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES - , update_every - , RRDSET_TYPE_AREA - ); - rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - - rrddim_set_by_pointer(st_semaphores, rd_semaphores, semaphore); - rrdset_done(st_semaphores); - if (!strcmp(module, "ipc")) - st_semaphores->red = red; -} - -#endif //NETDATA_SYSTEM_IPC_H diff --git a/src/collectors/common-contexts/system.processes.h b/src/collectors/common-contexts/system.processes.h deleted file mode 100644 index 1b886d65f..000000000 --- a/src/collectors/common-contexts/system.processes.h +++ /dev/null @@ -1,115 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_SYSTEM_PROCESSES_H -#define NETDATA_SYSTEM_PROCESSES_H - -#include "common-contexts.h" - -#define _system_process_chart() \ - rrdset_create_localhost( \ - "system" \ - , "processes" \ - , NULL \ - , "processes" \ - , NULL \ - , "System Processes" \ - , "processes" \ - , _COMMON_PLUGIN_NAME \ - , _COMMON_PLUGIN_MODULE_NAME \ - , NETDATA_CHART_PRIO_SYSTEM_PROCESSES \ - , update_every \ - , RRDSET_TYPE_LINE \ - ) - -#if defined(OS_WINDOWS) -static inline void common_system_processes(uint64_t running, int update_every) { - static RRDSET *st_processes = NULL; - static RRDDIM *rd_running = NULL; - - if(unlikely(!st_processes)) { - st_processes = _system_process_chart(); - - rd_running = rrddim_add(st_processes, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - - rrddim_set_by_pointer(st_processes, rd_running, running); - rrdset_done(st_processes); -} - -// EBPF COUNTER PART -static inline void common_system_threads(uint64_t threads, int update_every) { - static RRDSET *st_threads = NULL; - static RRDDIM *rd_threads = NULL; - - if(unlikely(!st_threads)) { - st_threads = rrdset_create_localhost( - "system" - , "threads" - , NULL - , "processes" - , NULL - , "Threads" - , "threads" - , _COMMON_PLUGIN_NAME - , _COMMON_PLUGIN_MODULE_NAME - , NETDATA_CHART_PRIO_WINDOWS_THREADS - , update_every - , RRDSET_TYPE_LINE - ); - - rd_threads = rrddim_add(st_threads, "threads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - - rrddim_set_by_pointer(st_threads, rd_threads, threads); - rrdset_done(st_threads); -} -#endif - -#if defined(OS_LINUX) -static inline void common_system_processes(uint64_t running, uint64_t blocked, int update_every) { - static RRDSET *st_processes = NULL; - static RRDDIM *rd_running = NULL; - static RRDDIM *rd_blocked = NULL; - - if(unlikely(!st_processes)) { - st_processes = _system_process_chart(); - - rd_running = rrddim_add(st_processes, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_blocked = rrddim_add(st_processes, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE); - } - - rrddim_set_by_pointer(st_processes, rd_running, (collected_number)running); - rrddim_set_by_pointer(st_processes, rd_blocked, (collected_number)blocked); - rrdset_done(st_processes); -} -#endif - -static inline void common_system_context_switch(uint64_t value, int update_every) { - static RRDSET *st_ctxt = NULL; - static RRDDIM *rd_switches = NULL; - - if(unlikely(!st_ctxt)) { - st_ctxt = rrdset_create_localhost( - "system" - , "ctxt" - , NULL - , "processes" - , NULL - , "CPU Context Switches" - , "context switches/s" - , _COMMON_PLUGIN_NAME - , _COMMON_PLUGIN_MODULE_NAME - , NETDATA_CHART_PRIO_SYSTEM_CTXT - , update_every - , RRDSET_TYPE_LINE - ); - - rd_switches = rrddim_add(st_ctxt, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - rrddim_set_by_pointer(st_ctxt, rd_switches, (collected_number)value); - rrdset_done(st_ctxt); -} - - -#endif //NETDATA_SYSTEM_PROCESSES_H diff --git a/src/collectors/common-contexts/system.ram.h b/src/collectors/common-contexts/system.ram.h deleted file mode 100644 index 6b108405c..000000000 --- a/src/collectors/common-contexts/system.ram.h +++ /dev/null @@ -1,68 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_SYSTEM_RAM_H -#define NETDATA_SYSTEM_RAM_H - -#include "common-contexts.h" - -#define _system_ram_chart() \ - rrdset_create_localhost( \ - "system" \ - , "ram" \ - , NULL \ - , "ram" \ - , NULL \ - , "System RAM" \ - , "MiB" \ - , _COMMON_PLUGIN_NAME \ - , _COMMON_PLUGIN_MODULE_NAME \ - , NETDATA_CHART_PRIO_SYSTEM_RAM \ - , update_every \ - , RRDSET_TYPE_STACKED \ - ) - -#ifdef OS_WINDOWS -static inline void common_system_ram(uint64_t free_bytes, uint64_t used_bytes, int update_every) { - static RRDSET *st_system_ram = NULL; - static RRDDIM *rd_free = NULL; - static RRDDIM *rd_used = NULL; - - if(unlikely(!st_system_ram)) { - st_system_ram = _system_ram_chart(); - rd_free = rrddim_add(st_system_ram, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_used = rrddim_add(st_system_ram, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - - // this always have to be in base units, so that exporting sends base units to other time-series db - rrddim_set_by_pointer(st_system_ram, rd_free, (collected_number)free_bytes); - rrddim_set_by_pointer(st_system_ram, rd_used, (collected_number)used_bytes); - rrdset_done(st_system_ram); -} -#endif - -#ifdef OS_LINUX -static inline void common_system_ram(uint64_t free_bytes, uint64_t used_bytes, uint64_t cached_bytes, uint64_t buffers_bytes, int update_every) { - static RRDSET *st_system_ram = NULL; - static RRDDIM *rd_free = NULL; - static RRDDIM *rd_used = NULL; - static RRDDIM *rd_cached = NULL; - static RRDDIM *rd_buffers = NULL; - - if(unlikely(!st_system_ram)) { - st_system_ram = _system_ram_chart(); - rd_free = rrddim_add(st_system_ram, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_used = rrddim_add(st_system_ram, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_cached = rrddim_add(st_system_ram, "cached", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rd_buffers = rrddim_add(st_system_ram, "buffers", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - } - - // this always have to be in base units, so that exporting sends base units to other time-series db - rrddim_set_by_pointer(st_system_ram, rd_free, (collected_number)free_bytes); - rrddim_set_by_pointer(st_system_ram, rd_used, (collected_number)used_bytes); - rrddim_set_by_pointer(st_system_ram, rd_cached, (collected_number)cached_bytes); - rrddim_set_by_pointer(st_system_ram, rd_buffers, (collected_number)buffers_bytes); - rrdset_done(st_system_ram); -} -#endif - -#endif //NETDATA_SYSTEM_RAM_H diff --git a/src/collectors/cups.plugin/cups_plugin.c b/src/collectors/cups.plugin/cups_plugin.c index 20b155e14..8d9e46cb1 100644 --- a/src/collectors/cups.plugin/cups_plugin.c +++ b/src/collectors/cups.plugin/cups_plugin.c @@ -226,7 +226,6 @@ void reset_metrics() { } int main(int argc, char **argv) { - clocks_init(); nd_log_initialize_for_external_plugins("cups.plugin"); parse_command_line(argc, argv); @@ -243,12 +242,11 @@ int main(int argc, char **argv) { time_t started_t = now_monotonic_sec(); size_t iteration = 0; - usec_t step = netdata_update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, netdata_update_every * USEC_PER_SEC); for (iteration = 0; 1; iteration++) { - heartbeat_next(&hb, step); + heartbeat_next(&hb); if (unlikely(netdata_exit)) break; diff --git a/src/collectors/cups.plugin/integrations/cups.md b/src/collectors/cups.plugin/integrations/cups.md index 828a7717e..1fec7135f 100644 --- a/src/collectors/cups.plugin/integrations/cups.md +++ b/src/collectors/cups.plugin/integrations/cups.md @@ -115,8 +115,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/debugfs.plugin/debugfs_extfrag.c b/src/collectors/debugfs.plugin/debugfs_extfrag.c index 75da4deca..eb0946125 100644 --- a/src/collectors/debugfs.plugin/debugfs_extfrag.c +++ b/src/collectors/debugfs.plugin/debugfs_extfrag.c @@ -60,23 +60,20 @@ static void extfrag_send_chart(char *chart_id, collected_number *values) } int do_debugfs_extfrag(int update_every, const char *name) { - static procfile *ff = NULL; - static int chart_order = NETDATA_CHART_PRIO_MEM_FRAGMENTATION; + static procfile *ff = NULL;; if (unlikely(!ff)) { char filename[FILENAME_MAX + 1]; - snprintfz(filename, - FILENAME_MAX, - "%s%s", - netdata_configured_host_prefix, - "/sys/kernel/debug/extfrag/extfrag_index"); + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/debug/extfrag/extfrag_index"); ff = procfile_open(filename, " \t,", PROCFILE_FLAG_DEFAULT); - if (unlikely(!ff)) return 1; + if (unlikely(!ff)) + return 1; } ff = procfile_readall(ff); - if (unlikely(!ff)) return 1; + if (unlikely(!ff)) + return 1; size_t l, i, j, lines = procfile_lines(ff); for (l = 0; l < lines; l++) { @@ -102,19 +99,21 @@ int do_debugfs_extfrag(int update_every, const char *name) { extrafrag->id = extrafrag->node_zone; fprintf( stdout, - "CHART mem.fragmentation_index_%s '' 'Memory fragmentation index for each order' 'index' 'fragmentation' 'mem.fragmentation_index_%s' 'line' %d %d '' 'debugfs.plugin' '%s'\n", + "CHART mem.fragmentation_index_%s '' 'Memory fragmentation index for each order' 'index' 'fragmentation' 'mem.numa_node_zone_fragmentation_index' 'line' %d %d '' 'debugfs.plugin' '%s'\n", extrafrag->node_zone, - zone_lowercase, - chart_order++, // FIXME: the same zones must have the same order + NETDATA_CHART_PRIO_MEM_FRAGMENTATION, update_every, name); for (i = 0; i < NETDATA_ORDER_FRAGMENTATION; i++) { fprintf(stdout, "DIMENSION '%s' '%s' absolute 1 1000 ''\n", orders[i], orders[i]); } - fprintf(stdout, - "CLABEL 'numa_node' 'node%s' 1\n" - "CLABEL_COMMIT\n", - id); + fprintf( + stdout, + "CLABEL 'numa_node' 'node%s' 1\n" + "CLABEL 'zone' '%s' 1\n" + "CLABEL_COMMIT\n", + id, + zone); } extfrag_send_chart(chart_id, line_orders); } diff --git a/src/collectors/debugfs.plugin/debugfs_plugin.c b/src/collectors/debugfs.plugin/debugfs_plugin.c index 94e3db631..37b4c83d8 100644 --- a/src/collectors/debugfs.plugin/debugfs_plugin.c +++ b/src/collectors/debugfs.plugin/debugfs_plugin.c @@ -159,7 +159,6 @@ static void debugfs_parse_args(int argc, char **argv) int main(int argc, char **argv) { - clocks_init(); nd_log_initialize_for_external_plugins("debugfs.plugin"); netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX"); @@ -214,12 +213,11 @@ int main(int argc, char **argv) debugfs_parse_args(argc, argv); size_t iteration; - usec_t step = update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, update_every * USEC_PER_SEC); for (iteration = 0; iteration < 86400; iteration++) { - heartbeat_next(&hb, step); + heartbeat_next(&hb); int enabled = 0; for (int i = 0; debugfs_modules[i].name; i++) { diff --git a/src/collectors/debugfs.plugin/integrations/linux_zswap.md b/src/collectors/debugfs.plugin/integrations/linux_zswap.md index b41a480f9..7c15dd50e 100644 --- a/src/collectors/debugfs.plugin/integrations/linux_zswap.md +++ b/src/collectors/debugfs.plugin/integrations/linux_zswap.md @@ -112,8 +112,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/debugfs.plugin/integrations/power_capping.md b/src/collectors/debugfs.plugin/integrations/power_capping.md index 5acb6bed6..5e9775fd7 100644 --- a/src/collectors/debugfs.plugin/integrations/power_capping.md +++ b/src/collectors/debugfs.plugin/integrations/power_capping.md @@ -106,8 +106,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md b/src/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md index 3c43a592a..7ff57309f 100644 --- a/src/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md +++ b/src/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md @@ -110,8 +110,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/diskspace.plugin/integrations/disk_space.md b/src/collectors/diskspace.plugin/integrations/disk_space.md index 61015120d..d27e7da25 100644 --- a/src/collectors/diskspace.plugin/integrations/disk_space.md +++ b/src/collectors/diskspace.plugin/integrations/disk_space.md @@ -108,8 +108,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -125,7 +125,7 @@ You can also specify per mount point `[plugin:proc:diskspace:mountpoint]` |:----|:-----------|:-------|:--------:| | update every | Data collection frequency. | 1 | no | | remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no | -| check for new mount points every | Parse proc files frequency. | 15 | no | +| check for new mount points every | Parse proc files frequency. | 15s | no | | exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no | | exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no | | exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no | diff --git a/src/collectors/diskspace.plugin/metadata.yaml b/src/collectors/diskspace.plugin/metadata.yaml index 578f56bd0..a00a9e91d 100644 --- a/src/collectors/diskspace.plugin/metadata.yaml +++ b/src/collectors/diskspace.plugin/metadata.yaml @@ -63,7 +63,7 @@ modules: required: false - name: check for new mount points every description: Parse proc files frequency. - default_value: 15 + default_value: 15s required: false - name: exclude space metrics on paths description: Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. diff --git a/src/collectors/diskspace.plugin/plugin_diskspace.c b/src/collectors/diskspace.plugin/plugin_diskspace.c index f1d8909b2..c9f6fe599 100644 --- a/src/collectors/diskspace.plugin/plugin_diskspace.c +++ b/src/collectors/diskspace.plugin/plugin_diskspace.c @@ -544,11 +544,11 @@ void *diskspace_slow_worker(void *ptr) usec_t step = slow_update_every * USEC_PER_SEC; usec_t real_step = USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); while(service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (real_step < step) { real_step += USEC_PER_SEC; @@ -629,7 +629,7 @@ static void diskspace_main_cleanup(void *pptr) { #error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 3 #endif -int diskspace_function_mount_points(BUFFER *wb, const char *function __maybe_unused) { +static int diskspace_function_mount_points(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { netdata_mutex_lock(&slow_mountinfo_mutex); buffer_flush(wb); @@ -849,17 +849,20 @@ void *diskspace_main(void *ptr) { worker_register_job_name(WORKER_JOB_CLEANUP, "cleanup"); rrd_function_add_inline(localhost, NULL, "mount-points", 10, - RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_DISKSPACE_HELP, + RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_VERSION_DEFAULT, + RRDFUNCTIONS_DISKSPACE_HELP, "top", HTTP_ACCESS_ANONYMOUS_DATA, diskspace_function_mount_points); cleanup_mount_points = config_get_boolean(CONFIG_SECTION_DISKSPACE, "remove charts of unmounted disks" , cleanup_mount_points); - int update_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every); - if(update_every < localhost->rrd_update_every) + int update_every = (int)config_get_duration_seconds(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every); + if(update_every < localhost->rrd_update_every) { update_every = localhost->rrd_update_every; + config_set_duration_seconds(CONFIG_SECTION_DISKSPACE, "update every", update_every); + } - check_for_new_mountpoints_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every); + check_for_new_mountpoints_every = (int)config_get_duration_seconds(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every); if(check_for_new_mountpoints_every < update_every) check_for_new_mountpoints_every = update_every; @@ -873,12 +876,11 @@ void *diskspace_main(void *ptr) { diskspace_slow_worker, &slow_worker_data); - usec_t step = update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, update_every * USEC_PER_SEC); while(service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - /* usec_t hb_dt = */ heartbeat_next(&hb, step); + /* usec_t hb_dt = */ heartbeat_next(&hb); if(unlikely(!service_running(SERVICE_COLLECTORS))) break; diff --git a/src/collectors/ebpf.plugin/README.md b/src/collectors/ebpf.plugin/README.md index e9243966b..1246fec04 100644 --- a/src/collectors/ebpf.plugin/README.md +++ b/src/collectors/ebpf.plugin/README.md @@ -1,16 +1,6 @@ - - # Kernel traces/metrics (eBPF) collector -The Netdata Agent provides many [eBPF](https://ebpf.io/what-is-ebpf/) programs to help you troubleshoot and debug how applications interact with the Linux kernel. The `ebpf.plugin` uses [tracepoints, trampoline, and2 kprobes](#how-netdata-collects-data-using-probes-and-tracepoints) to collect a wide array of high value data about the host that would otherwise be impossible to capture. +The Netdata Agent provides many [eBPF](https://ebpf.io/what-is-ebpf/) programs to help you troubleshoot and debug how applications interact with the Linux kernel. The `ebpf.plugin` uses [tracepoints, trampoline, and2 kprobes](#how-netdata-collects-data-using-probes-and-tracepoints) to collect a wide array of high value data about the host that would otherwise be impossible to capture. > ❗ eBPF monitoring only works on Linux systems and with specific Linux kernels, including all kernels newer than `4.11.0`, and all kernels on CentOS 7.6 or later. For kernels older than `4.11.0`, improved support is in active development. @@ -26,10 +16,10 @@ For hands-on configuration and troubleshooting tips see our [tutorial on trouble Netdata uses the following features from the Linux kernel to run eBPF programs: -- Tracepoints are hooks to call specific functions. Tracepoints are more stable than `kprobes` and are preferred when +- Tracepoints are hooks to call specific functions. Tracepoints are more stable than `kprobes` and are preferred when both options are available. -- Trampolines are bridges between kernel functions, and BPF programs. Netdata uses them by default whenever available. -- Kprobes and return probes (`kretprobe`): Probes can insert virtually into any kernel instruction. When eBPF runs in `entry` mode, it attaches only `kprobes` for internal functions monitoring calls and some arguments every time a function is called. The user can also change configuration to use [`return`](#global-configuration-options) mode, and this will allow users to monitor return from these functions and detect possible failures. +- Trampolines are bridges between kernel functions, and BPF programs. Netdata uses them by default whenever available. +- Kprobes and return probes (`kretprobe`): Probes can insert virtually into any kernel instruction. When eBPF runs in `entry` mode, it attaches only `kprobes` for internal functions monitoring calls and some arguments every time a function is called. The user can also change configuration to use [`return`](#global-configuration-options) mode, and this will allow users to monitor return from these functions and detect possible failures. In each case, wherever a normal kprobe, kretprobe, or tracepoint would have run its hook function, an eBPF program is run instead, performing various collection logic before letting the kernel continue its normal control flow. @@ -38,42 +28,45 @@ There are more methods to trigger eBPF programs, such as uprobes, but currently ## Configuring ebpf.plugin The eBPF collector is installed and enabled by default on most new installations of the Agent. -If your Agent is v1.22 or older, you may to enable the collector yourself. +If your Agent is v1.22 or older, you may to enable the collector yourself. ### Enable the eBPF collector -To enable or disable the entire eBPF collector: +To enable or disable the entire eBPF collector: + +1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). -1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata ``` -2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script to edit `netdata.conf`. +2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script to edit `netdata.conf`. ```bash ./edit-config netdata.conf ``` -3. Enable the collector by scrolling down to the `[plugins]` section. Uncomment the line `ebpf` (not +3. Enable the collector by scrolling down to the `[plugins]` section. Uncomment the line `ebpf` (not `ebpf_process`) and set it to `yes`. - ```conf + ```text [plugins] ebpf = yes ``` ### Configure the eBPF collector -You can configure the eBPF collector's behavior to fine-tune which metrics you receive and [optimize performance]\(#performance opimization). +You can configure the eBPF collector's behavior to fine-tune which metrics you receive and [optimize performance](#performance-opimization). To edit the `ebpf.d.conf`: -1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + ```bash cd /etc/netdata ``` -2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script to edit [`ebpf.d.conf`](https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/ebpf.d.conf). + +2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script to edit [`ebpf.d.conf`](https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/ebpf.d.conf). ```bash ./edit-config ebpf.d.conf @@ -94,9 +87,9 @@ By default, this plugin uses the `entry` mode. Changing this mode can create sig system, but also offer valuable information if you are developing or debugging software. The `ebpf load mode` option accepts the following values: -- `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described in +- `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described in the sections above, and does not show charts related to errors. -- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates new +- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates new charts for the return of these functions, such as errors. Monitoring function returns can help in debugging software, such as failing to close file descriptors or creating zombie processes. @@ -108,7 +101,7 @@ interact with the Linux kernel. If you want to enable `apps.plugin` integration, change the "apps" setting to "yes". -```conf +```text [global] apps = yes ``` @@ -122,7 +115,7 @@ interacts with the Linux kernel. The integration with `cgroups.plugin` is disabled by default to avoid creating overhead on your system. If you want to _enable_ the integration with `cgroups.plugin`, change the `cgroups` setting to `yes`. -```conf +```text [global] cgroups = yes ``` @@ -133,10 +126,7 @@ If you do not need to monitor specific metrics for your `cgroups`, you can enabl #### Maps per Core -When netdata is running on kernels newer than `4.6` users are allowed to modify how the `ebpf.plugin` creates maps (hash or -array). When `maps per core` is defined as `yes`, plugin will create a map per core on host, on the other hand, -when the value is set as `no` only one hash table will be created, this option will use less memory, but it also can -increase overhead for processes. +When netdata is running on kernels newer than `4.6` users are allowed to modify how the `ebpf.plugin` creates maps (hash or array). When `maps per core` is defined as `yes`, plugin will create a map per core on host, on the other hand, when the value is set as `no` only one hash table will be created, this option will use less memory, but it also can increase overhead for processes. #### Collect PID @@ -146,10 +136,10 @@ process group for which it needs to plot data. There are different ways to collect PID, and you can select the way `ebpf.plugin` collects data with the following values: -- `real parent`: This is the default mode. Collection will aggregate data for the real parent, the thread that creates +- `real parent`: This is the default mode. Collection will aggregate data for the real parent, the thread that creates child threads. -- `parent`: Parent and real parent are the same when a process starts, but this value can be changed during run time. -- `all`: This option will store all PIDs that run on the host. Note, this method can be expensive for the host, +- `parent`: Parent and real parent are the same when a process starts, but this value can be changed during run time. +- `all`: This option will store all PIDs that run on the host. Note, this method can be expensive for the host, because more memory needs to be allocated and parsed. The threads that have integration with other collectors have an internal clean up wherein they attach either a @@ -174,97 +164,97 @@ Linux metrics: > Note: The parenthetical accompanying each bulleted item provides the chart name. -- mem - - Number of processes killed due out of memory. (`oomkills`) -- process - - Number of processes created with `do_fork`. (`process_create`) - - Number of threads created with `do_fork` or `clone (2)`, depending on your system's kernel +- mem + - Number of processes killed due out of memory. (`oomkills`) +- process + - Number of processes created with `do_fork`. (`process_create`) + - Number of threads created with `do_fork` or `clone (2)`, depending on your system's kernel version. (`thread_create`) - - Number of times that a process called `do_exit`. (`task_exit`) - - Number of times that a process called `release_task`. (`task_close`) - - Number of times that an error happened to create thread or process. (`task_error`) -- swap - - Number of calls to `swap_readpage`. (`swap_read_call`) - - Number of calls to `swap_writepage`. (`swap_write_call`) -- network - - Number of outbound connections using TCP/IPv4. (`outbound_conn_ipv4`) - - Number of outbound connections using TCP/IPv6. (`outbound_conn_ipv6`) - - Number of bytes sent. (`total_bandwidth_sent`) - - Number of bytes received. (`total_bandwidth_recv`) - - Number of calls to `tcp_sendmsg`. (`bandwidth_tcp_send`) - - Number of calls to `tcp_cleanup_rbuf`. (`bandwidth_tcp_recv`) - - Number of calls to `tcp_retransmit_skb`. (`bandwidth_tcp_retransmit`) - - Number of calls to `udp_sendmsg`. (`bandwidth_udp_send`) - - Number of calls to `udp_recvmsg`. (`bandwidth_udp_recv`) -- file access - - Number of calls to open files. (`file_open`) - - Number of calls to open files that returned errors. (`open_error`) - - Number of files closed. (`file_closed`) - - Number of calls to close files that returned errors. (`file_error_closed`) -- vfs - - Number of calls to `vfs_unlink`. (`file_deleted`) - - Number of calls to `vfs_write`. (`vfs_write_call`) - - Number of calls to write a file that returned errors. (`vfs_write_error`) - - Number of calls to `vfs_read`. (`vfs_read_call`) - - - Number of calls to read a file that returned errors. (`vfs_read_error`) - - Number of bytes written with `vfs_write`. (`vfs_write_bytes`) - - Number of bytes read with `vfs_read`. (`vfs_read_bytes`) - - Number of calls to `vfs_fsync`. (`vfs_fsync`) - - Number of calls to sync file that returned errors. (`vfs_fsync_error`) - - Number of calls to `vfs_open`. (`vfs_open`) - - Number of calls to open file that returned errors. (`vfs_open_error`) - - Number of calls to `vfs_create`. (`vfs_create`) - - Number of calls to open file that returned errors. (`vfs_create_error`) -- page cache - - Ratio of pages accessed. (`cachestat_ratio`) - - Number of modified pages ("dirty"). (`cachestat_dirties`) - - Number of accessed pages. (`cachestat_hits`) - - Number of pages brought from disk. (`cachestat_misses`) -- directory cache - - Ratio of files available in directory cache. (`dc_hit_ratio`) - - Number of files accessed. (`dc_reference`) - - Number of files accessed that were not in cache. (`dc_not_cache`) - - Number of files not found. (`dc_not_found`) -- ipc shm - - Number of calls to `shm_get`. (`shmget_call`) - - Number of calls to `shm_at`. (`shmat_call`) - - Number of calls to `shm_dt`. (`shmdt_call`) - - Number of calls to `shm_ctl`. (`shmctl_call`) + - Number of times that a process called `do_exit`. (`task_exit`) + - Number of times that a process called `release_task`. (`task_close`) + - Number of times that an error happened to create thread or process. (`task_error`) +- swap + - Number of calls to `swap_readpage`. (`swap_read_call`) + - Number of calls to `swap_writepage`. (`swap_write_call`) +- network + - Number of outbound connections using TCP/IPv4. (`outbound_conn_ipv4`) + - Number of outbound connections using TCP/IPv6. (`outbound_conn_ipv6`) + - Number of bytes sent. (`total_bandwidth_sent`) + - Number of bytes received. (`total_bandwidth_recv`) + - Number of calls to `tcp_sendmsg`. (`bandwidth_tcp_send`) + - Number of calls to `tcp_cleanup_rbuf`. (`bandwidth_tcp_recv`) + - Number of calls to `tcp_retransmit_skb`. (`bandwidth_tcp_retransmit`) + - Number of calls to `udp_sendmsg`. (`bandwidth_udp_send`) + - Number of calls to `udp_recvmsg`. (`bandwidth_udp_recv`) +- file access + - Number of calls to open files. (`file_open`) + - Number of calls to open files that returned errors. (`open_error`) + - Number of files closed. (`file_closed`) + - Number of calls to close files that returned errors. (`file_error_closed`) +- vfs + - Number of calls to `vfs_unlink`. (`file_deleted`) + - Number of calls to `vfs_write`. (`vfs_write_call`) + - Number of calls to write a file that returned errors. (`vfs_write_error`) + - Number of calls to `vfs_read`. (`vfs_read_call`) + - - Number of calls to read a file that returned errors. (`vfs_read_error`) + - Number of bytes written with `vfs_write`. (`vfs_write_bytes`) + - Number of bytes read with `vfs_read`. (`vfs_read_bytes`) + - Number of calls to `vfs_fsync`. (`vfs_fsync`) + - Number of calls to sync file that returned errors. (`vfs_fsync_error`) + - Number of calls to `vfs_open`. (`vfs_open`) + - Number of calls to open file that returned errors. (`vfs_open_error`) + - Number of calls to `vfs_create`. (`vfs_create`) + - Number of calls to open file that returned errors. (`vfs_create_error`) +- page cache + - Ratio of pages accessed. (`cachestat_ratio`) + - Number of modified pages ("dirty"). (`cachestat_dirties`) + - Number of accessed pages. (`cachestat_hits`) + - Number of pages brought from disk. (`cachestat_misses`) +- directory cache + - Ratio of files available in directory cache. (`dc_hit_ratio`) + - Number of files accessed. (`dc_reference`) + - Number of files accessed that were not in cache. (`dc_not_cache`) + - Number of files not found. (`dc_not_found`) +- ipc shm + - Number of calls to `shm_get`. (`shmget_call`) + - Number of calls to `shm_at`. (`shmat_call`) + - Number of calls to `shm_dt`. (`shmdt_call`) + - Number of calls to `shm_ctl`. (`shmctl_call`) ### `[ebpf programs]` configuration options The eBPF collector enables and runs the following eBPF programs by default: -- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with +- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with [`apps.plugin`](/src/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_ for each application. -- `fd` : This eBPF program creates charts that show information about calls to open files. -- `mount`: This eBPF program creates charts that show calls to syscalls mount(2) and umount(2). -- `shm`: This eBPF program creates charts that show calls to syscalls shmget(2), shmat(2), shmdt(2) and shmctl(2). -- `process`: This eBPF program creates charts that show information about process life. When in `return` mode, it also +- `fd` : This eBPF program creates charts that show information about calls to open files. +- `mount`: This eBPF program creates charts that show calls to syscalls mount(2) and umount(2). +- `shm`: This eBPF program creates charts that show calls to syscalls shmget(2), shmat(2), shmdt(2) and shmctl(2). +- `process`: This eBPF program creates charts that show information about process life. When in `return` mode, it also creates charts showing errors when these operations are executed. -- `hardirq`: This eBPF program creates charts that show information about time spent servicing individual hardware +- `hardirq`: This eBPF program creates charts that show information about time spent servicing individual hardware interrupt requests (hard IRQs). -- `softirq`: This eBPF program creates charts that show information about time spent servicing individual software +- `softirq`: This eBPF program creates charts that show information about time spent servicing individual software interrupt requests (soft IRQs). -- `oomkill`: This eBPF program creates a chart that shows OOM kills for all applications recognized via +- `oomkill`: This eBPF program creates a chart that shows OOM kills for all applications recognized via the `apps.plugin` integration. Note that this program will show application charts regardless of whether apps integration is turned on or off. You can also enable the following eBPF programs: -- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends +- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends `kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and files are not found. -- `disk` : This eBPF program creates charts that show information about disk latency independent of filesystem. -- `filesystem` : This eBPF program creates charts that show information about some filesystem latency. -- `swap` : This eBPF program creates charts that show information about swap access. -- `mdflush`: This eBPF program creates charts that show information about -- `sync`: Monitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2). -- `socket`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the +- `disk` : This eBPF program creates charts that show information about disk latency independent of filesystem. +- `filesystem` : This eBPF program creates charts that show information about some filesystem latency. +- `swap` : This eBPF program creates charts that show information about swap access. +- `mdflush`: This eBPF program creates charts that show information about +- `sync`: Monitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2). +- `socket`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the bandwidth consumed by each. multi-device software flushes. -- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions. +- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions. ### Configuring eBPF threads @@ -272,24 +262,26 @@ You can configure each thread of the eBPF data collector. This allows you to ove To configure an eBPF thread: -1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + ```bash cd /etc/netdata ``` -2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script to edit a thread configuration file. The following configuration files are available: - - `network.conf`: Configuration for the [`network` thread](#network-configuration). This config file overwrites the global options and also +2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script to edit a thread configuration file. The following configuration files are available: + + - `network.conf`: Configuration for the [`network` thread](#network-configuration). This config file overwrites the global options and also lets you specify which network the eBPF collector monitors. - - `process.conf`: Configuration for the [`process` thread](#sync-configuration). - - `cachestat.conf`: Configuration for the `cachestat` thread(#filesystem-configuration). - - `dcstat.conf`: Configuration for the `dcstat` thread. - - `disk.conf`: Configuration for the `disk` thread. - - `fd.conf`: Configuration for the `file descriptor` thread. - - `filesystem.conf`: Configuration for the `filesystem` thread. - - `hardirq.conf`: Configuration for the `hardirq` thread. - - `softirq.conf`: Configuration for the `softirq` thread. - - `sync.conf`: Configuration for the `sync` thread. - - `vfs.conf`: Configuration for the `vfs` thread. + - `process.conf`: Configuration for the [`process` thread](#sync-configuration). + - `cachestat.conf`: Configuration for the `cachestat` thread(#filesystem-configuration). + - `dcstat.conf`: Configuration for the `dcstat` thread. + - `disk.conf`: Configuration for the `disk` thread. + - `fd.conf`: Configuration for the `file descriptor` thread. + - `filesystem.conf`: Configuration for the `filesystem` thread. + - `hardirq.conf`: Configuration for the `hardirq` thread. + - `softirq.conf`: Configuration for the `softirq` thread. + - `sync.conf`: Configuration for the `sync` thread. + - `vfs.conf`: Configuration for the `vfs` thread. ```bash ./edit-config FILE.conf @@ -304,7 +296,7 @@ are divided in the following sections: You can configure the information shown with function `ebpf_socket` using the settings in this section. -```conf +```text [network connections] enabled = yes resolve hostname ips = no @@ -324,13 +316,13 @@ and `145`. The following options are available: -- `enabled`: Disable network connections monitoring. This can affect directly some funcion output. -- `resolve hostname ips`: Enable resolving IPs to hostnames. It is disabled by default because it can be too slow. -- `resolve service names`: Convert destination ports into service names, for example, port `53` protocol `UDP` becomes `domain`. +- `enabled`: Disable network connections monitoring. This can affect directly some funcion output. +- `resolve hostname ips`: Enable resolving IPs to hostnames. It is disabled by default because it can be too slow. +- `resolve service names`: Convert destination ports into service names, for example, port `53` protocol `UDP` becomes `domain`. all names are read from /etc/services. -- `ports`: Define the destination ports for Netdata to monitor. -- `hostnames`: The list of hostnames that can be resolved to an IP address. -- `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a +- `ports`: Define the destination ports for Netdata to monitor. +- `hostnames`: The list of hostnames that can be resolved to an IP address. +- `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a range of IPs, or use CIDR values. By default the traffic table is created using the destination IPs and ports of the sockets. This can be @@ -346,7 +338,7 @@ section. For example, Netdata's default port (`19999`) is not listed in `/etc/services`. To associate that port with the Netdata service in network connection charts, and thus see the name of the service instead of its port, define it: -```conf +```text [service name] 19999 = Netdata ``` @@ -355,7 +347,7 @@ service in network connection charts, and thus see the name of the service inste The sync configuration has specific options to disable monitoring for syscalls. All syscalls are monitored by default. -```conf +```text [syscalls] sync = yes msync = yes @@ -370,7 +362,7 @@ The sync configuration has specific options to disable monitoring for syscalls. The filesystem configuration has specific options to disable monitoring for filesystems; by default, all filesystems are monitored. -```conf +```text [filesystem] btrfsdist = yes ext4dist = yes @@ -408,19 +400,18 @@ You can run our helper script to determine whether your system can support eBPF curl -sSL https://raw.githubusercontent.com/netdata/kernel-collector/master/tools/check-kernel-config.sh | sudo bash ``` - If you see a warning about a missing kernel configuration (`KPROBES KPROBES_ON_FTRACE HAVE_KPROBES BPF BPF_SYSCALL BPF_JIT`), you will need to recompile your kernel to support this configuration. The process of recompiling Linux kernels varies based on your distribution and version. Read the documentation for your system's distribution to learn more about the specific workflow for recompiling the kernel, ensuring that you set all the necessary -- [Ubuntu](https://wiki.ubuntu.com/Kernel/BuildYourOwnKernel) -- [Debian](https://kernel-team.pages.debian.net/kernel-handbook/ch-common-tasks.html#s-common-official) -- [Fedora](https://fedoraproject.org/wiki/Building_a_custom_kernel) -- [CentOS](https://wiki.centos.org/HowTos/Custom_Kernel) -- [Arch Linux](https://wiki.archlinux.org/index.php/Kernel/Traditional_compilation) -- [Slackware](https://docs.slackware.com/howtos:slackware_admin:kernelbuilding) +- [Ubuntu](https://wiki.ubuntu.com/Kernel/BuildYourOwnKernel) +- [Debian](https://kernel-team.pages.debian.net/kernel-handbook/ch-common-tasks.html#s-common-official) +- [Fedora](https://fedoraproject.org/wiki/Building_a_custom_kernel) +- [CentOS](https://wiki.centos.org/HowTos/Custom_Kernel) +- [Arch Linux](https://wiki.archlinux.org/index.php/Kernel/Traditional_compilation) +- [Slackware](https://docs.slackware.com/howtos:slackware_admin:kernelbuilding) ### Mount `debugfs` and `tracefs` @@ -455,12 +446,12 @@ Internally, the Linux kernel treats both processes and threads as `tasks`. To cr system calls: `fork(2)`, `vfork(2)`, and `clone(2)`. To generate this chart, the eBPF collector uses the following `tracepoints` and `kprobe`: -- `sched/sched_process_fork`: Tracepoint called after a call for `fork (2)`, `vfork (2)` and `clone (2)`. -- `sched/sched_process_exec`: Tracepoint called after a exec-family syscall. -- `kprobe/kernel_clone`: This is the main [`fork()`](https://elixir.bootlin.com/linux/v5.10/source/kernel/fork.c#L2415) +- `sched/sched_process_fork`: Tracepoint called after a call for `fork (2)`, `vfork (2)` and `clone (2)`. +- `sched/sched_process_exec`: Tracepoint called after a exec-family syscall. +- `kprobe/kernel_clone`: This is the main [`fork()`](https://elixir.bootlin.com/linux/v5.10/source/kernel/fork.c#L2415) routine since kernel `5.10.0` was released. -- `kprobe/_do_fork`: Like `kernel_clone`, but this was the main function between kernels `4.2.0` and `5.9.16` -- `kprobe/do_fork`: This was the main function before kernel `4.2.0`. +- `kprobe/_do_fork`: Like `kernel_clone`, but this was the main function between kernels `4.2.0` and `5.9.16` +- `kprobe/do_fork`: This was the main function before kernel `4.2.0`. #### Process Exit @@ -469,8 +460,8 @@ system that the task is finishing its work. The second step is to release the ke function `release_task`. The difference between the two dimensions can help you discover [zombie processes](https://en.wikipedia.org/wiki/Zombie_process). To get the metrics, the collector uses: -- `sched/sched_process_exit`: Tracepoint called after a task exits. -- `kprobe/release_task`: This function is called when a process exits, as the kernel still needs to remove the process +- `sched/sched_process_exit`: Tracepoint called after a task exits. +- `kprobe/release_task`: This function is called when a process exits, as the kernel still needs to remove the process descriptor. #### Task error @@ -489,9 +480,9 @@ the collector attaches `kprobes` for cited functions. The following `tracepoints` are used to measure time usage for soft IRQs: -- [`irq/softirq_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_entry): Called +- [`irq/softirq_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_entry): Called before softirq handler -- [`irq/softirq_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_exit): Called when +- [`irq/softirq_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_exit): Called when softirq handler returns. #### Hard IRQ @@ -499,60 +490,60 @@ The following `tracepoints` are used to measure time usage for soft IRQs: The following tracepoints are used to measure the latency of servicing a hardware interrupt request (hard IRQ). -- [`irq/irq_handler_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_entry): +- [`irq/irq_handler_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_entry): Called immediately before the IRQ action handler. -- [`irq/irq_handler_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_exit): +- [`irq/irq_handler_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_exit): Called immediately after the IRQ action handler returns. -- `irq_vectors`: These are traces from `irq_handler_entry` and +- `irq_vectors`: These are traces from `irq_handler_entry` and `irq_handler_exit` when an IRQ is handled. The following elements from vector are triggered: - - `irq_vectors/local_timer_entry` - - `irq_vectors/local_timer_exit` - - `irq_vectors/reschedule_entry` - - `irq_vectors/reschedule_exit` - - `irq_vectors/call_function_entry` - - `irq_vectors/call_function_exit` - - `irq_vectors/call_function_single_entry` - - `irq_vectors/call_function_single_xit` - - `irq_vectors/irq_work_entry` - - `irq_vectors/irq_work_exit` - - `irq_vectors/error_apic_entry` - - `irq_vectors/error_apic_exit` - - `irq_vectors/thermal_apic_entry` - - `irq_vectors/thermal_apic_exit` - - `irq_vectors/threshold_apic_entry` - - `irq_vectors/threshold_apic_exit` - - `irq_vectors/deferred_error_entry` - - `irq_vectors/deferred_error_exit` - - `irq_vectors/spurious_apic_entry` - - `irq_vectors/spurious_apic_exit` - - `irq_vectors/x86_platform_ipi_entry` - - `irq_vectors/x86_platform_ipi_exit` + - `irq_vectors/local_timer_entry` + - `irq_vectors/local_timer_exit` + - `irq_vectors/reschedule_entry` + - `irq_vectors/reschedule_exit` + - `irq_vectors/call_function_entry` + - `irq_vectors/call_function_exit` + - `irq_vectors/call_function_single_entry` + - `irq_vectors/call_function_single_xit` + - `irq_vectors/irq_work_entry` + - `irq_vectors/irq_work_exit` + - `irq_vectors/error_apic_entry` + - `irq_vectors/error_apic_exit` + - `irq_vectors/thermal_apic_entry` + - `irq_vectors/thermal_apic_exit` + - `irq_vectors/threshold_apic_entry` + - `irq_vectors/threshold_apic_exit` + - `irq_vectors/deferred_error_entry` + - `irq_vectors/deferred_error_exit` + - `irq_vectors/spurious_apic_entry` + - `irq_vectors/spurious_apic_exit` + - `irq_vectors/x86_platform_ipi_entry` + - `irq_vectors/x86_platform_ipi_exit` #### IPC shared memory To monitor shared memory system call counts, Netdata attaches tracing in the following functions: -- `shmget`: Runs when [`shmget`](https://man7.org/linux/man-pages/man2/shmget.2.html) is called. -- `shmat`: Runs when [`shmat`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called. -- `shmdt`: Runs when [`shmdt`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called. -- `shmctl`: Runs when [`shmctl`](https://man7.org/linux/man-pages/man2/shmctl.2.html) is called. +- `shmget`: Runs when [`shmget`](https://man7.org/linux/man-pages/man2/shmget.2.html) is called. +- `shmat`: Runs when [`shmat`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called. +- `shmdt`: Runs when [`shmdt`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called. +- `shmctl`: Runs when [`shmctl`](https://man7.org/linux/man-pages/man2/shmctl.2.html) is called. ### Memory In the memory submenu the eBPF plugin creates two submenus **page cache** and **synchronization** with the following organization: -- Page Cache - - Page cache ratio - - Dirty pages - - Page cache hits - - Page cache misses -- Synchronization - - File sync - - Memory map sync - - File system sync - - File range sync +- Page Cache + - Page cache ratio + - Dirty pages + - Page cache hits + - Page cache misses +- Synchronization + - File sync + - Memory map sync + - File system sync + - File range sync #### Page cache hits @@ -587,10 +578,10 @@ The chart `cachestat_ratio` shows how processes are accessing page cache. In a n 100%, which means that the majority of the work on the machine is processed in memory. To calculate the ratio, Netdata attaches `kprobes` for kernel functions: -- `add_to_page_cache_lru`: Page addition. -- `mark_page_accessed`: Access to cache. -- `account_page_dirtied`: Dirty (modified) pages. -- `mark_buffer_dirty`: Writes to page cache. +- `add_to_page_cache_lru`: Page addition. +- `mark_page_accessed`: Access to cache. +- `account_page_dirtied`: Dirty (modified) pages. +- `mark_buffer_dirty`: Writes to page cache. #### Page cache misses @@ -629,7 +620,7 @@ in [disk latency](#disk) charts. By default, MD flush is disabled. To enable it, configure your `/etc/netdata/ebpf.d.conf` file as: -```conf +```text [global] mdflush = yes ``` @@ -638,7 +629,7 @@ By default, MD flush is disabled. To enable it, configure your To collect data related to Linux multi-device (MD) flushing, the following kprobe is used: -- `kprobe/md_flush_request`: called whenever a request for flushing multi-device data is made. +- `kprobe/md_flush_request`: called whenever a request for flushing multi-device data is made. ### Disk @@ -648,9 +639,9 @@ The eBPF plugin also shows a chart in the Disk section when the `disk` thread is This will create the chart `disk_latency_io` for each disk on the host. The following tracepoints are used: -- [`block/block_rq_issue`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_issue): +- [`block/block_rq_issue`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_issue): IO request operation to a device drive. -- [`block/block_rq_complete`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_complete): +- [`block/block_rq_complete`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_complete): IO operation completed by device. Disk Latency is the single most important metric to focus on when it comes to storage performance, under most circumstances. @@ -675,10 +666,10 @@ To measure the latency of executing some actions in an collector needs to attach `kprobes` and `kretprobes` for each of the following functions: -- `ext4_file_read_iter`: Function used to measure read latency. -- `ext4_file_write_iter`: Function used to measure write latency. -- `ext4_file_open`: Function used to measure open latency. -- `ext4_sync_file`: Function used to measure sync latency. +- `ext4_file_read_iter`: Function used to measure read latency. +- `ext4_file_write_iter`: Function used to measure write latency. +- `ext4_file_open`: Function used to measure open latency. +- `ext4_sync_file`: Function used to measure sync latency. #### ZFS @@ -686,10 +677,10 @@ To measure the latency of executing some actions in a zfs filesystem, the collector needs to attach `kprobes` and `kretprobes` for each of the following functions: -- `zpl_iter_read`: Function used to measure read latency. -- `zpl_iter_write`: Function used to measure write latency. -- `zpl_open`: Function used to measure open latency. -- `zpl_fsync`: Function used to measure sync latency. +- `zpl_iter_read`: Function used to measure read latency. +- `zpl_iter_write`: Function used to measure write latency. +- `zpl_open`: Function used to measure open latency. +- `zpl_fsync`: Function used to measure sync latency. #### XFS @@ -698,10 +689,10 @@ To measure the latency of executing some actions in an collector needs to attach `kprobes` and `kretprobes` for each of the following functions: -- `xfs_file_read_iter`: Function used to measure read latency. -- `xfs_file_write_iter`: Function used to measure write latency. -- `xfs_file_open`: Function used to measure open latency. -- `xfs_file_fsync`: Function used to measure sync latency. +- `xfs_file_read_iter`: Function used to measure read latency. +- `xfs_file_write_iter`: Function used to measure write latency. +- `xfs_file_open`: Function used to measure open latency. +- `xfs_file_fsync`: Function used to measure sync latency. #### NFS @@ -710,11 +701,11 @@ To measure the latency of executing some actions in an collector needs to attach `kprobes` and `kretprobes` for each of the following functions: -- `nfs_file_read`: Function used to measure read latency. -- `nfs_file_write`: Function used to measure write latency. -- `nfs_file_open`: Functions used to measure open latency. -- `nfs4_file_open`: Functions used to measure open latency for NFS v4. -- `nfs_getattr`: Function used to measure sync latency. +- `nfs_file_read`: Function used to measure read latency. +- `nfs_file_write`: Function used to measure write latency. +- `nfs_file_open`: Functions used to measure open latency. +- `nfs4_file_open`: Functions used to measure open latency for NFS v4. +- `nfs_getattr`: Function used to measure sync latency. #### btrfs @@ -724,24 +715,24 @@ filesystem, the collector needs to attach `kprobes` and `kretprobes` for each of > Note: We are listing two functions used to measure `read` latency, but we use either `btrfs_file_read_iter` or > `generic_file_read_iter`, depending on kernel version. -- `btrfs_file_read_iter`: Function used to measure read latency since kernel `5.10.0`. -- `generic_file_read_iter`: Like `btrfs_file_read_iter`, but this function was used before kernel `5.10.0`. -- `btrfs_file_write_iter`: Function used to write data. -- `btrfs_file_open`: Function used to open files. -- `btrfs_sync_file`: Function used to synchronize data to filesystem. +- `btrfs_file_read_iter`: Function used to measure read latency since kernel `5.10.0`. +- `generic_file_read_iter`: Like `btrfs_file_read_iter`, but this function was used before kernel `5.10.0`. +- `btrfs_file_write_iter`: Function used to write data. +- `btrfs_file_open`: Function used to open files. +- `btrfs_sync_file`: Function used to synchronize data to filesystem. #### File descriptor To give metrics related to `open` and `close` events, instead of attaching kprobes for each syscall used to do these events, the collector attaches `kprobes` for the common function used for syscalls: -- [`do_sys_open`](https://0xax.gitbooks.io/linux-insides/content/SysCall/linux-syscall-5.html): Internal function used to +- [`do_sys_open`](https://0xax.gitbooks.io/linux-insides/content/SysCall/linux-syscall-5.html): Internal function used to open files. -- [`do_sys_openat2`](https://elixir.bootlin.com/linux/v5.6/source/fs/open.c#L1162): +- [`do_sys_openat2`](https://elixir.bootlin.com/linux/v5.6/source/fs/open.c#L1162): Function called from `do_sys_open` since version `5.6.0`. -- [`close_fd`](https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg2271761.html): Function used to close file +- [`close_fd`](https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg2271761.html): Function used to close file descriptor since kernel `5.11.0`. -- `__close_fd`: Function used to close files before version `5.11.0`. +- `__close_fd`: Function used to close files before version `5.11.0`. #### File error @@ -761,21 +752,21 @@ To measure the latency and total quantity of executing some VFS-level functions, ebpf.plugin needs to attach kprobes and kretprobes for each of the following functions: -- `vfs_write`: Function used monitoring the number of successful & failed +- `vfs_write`: Function used monitoring the number of successful & failed filesystem write calls, as well as the total number of written bytes. -- `vfs_writev`: Same function as `vfs_write` but for vector writes (i.e. a +- `vfs_writev`: Same function as `vfs_write` but for vector writes (i.e. a single write operation using a group of buffers rather than 1). -- `vfs_read`: Function used for monitoring the number of successful & failed +- `vfs_read`: Function used for monitoring the number of successful & failed filesystem read calls, as well as the total number of read bytes. -- `vfs_readv` Same function as `vfs_read` but for vector reads (i.e. a single +- `vfs_readv` Same function as `vfs_read` but for vector reads (i.e. a single read operation using a group of buffers rather than 1). -- `vfs_unlink`: Function used for monitoring the number of successful & failed +- `vfs_unlink`: Function used for monitoring the number of successful & failed filesystem unlink calls. -- `vfs_fsync`: Function used for monitoring the number of successful & failed +- `vfs_fsync`: Function used for monitoring the number of successful & failed filesystem fsync calls. -- `vfs_open`: Function used for monitoring the number of successful & failed +- `vfs_open`: Function used for monitoring the number of successful & failed filesystem open calls. -- `vfs_create`: Function used for monitoring the number of successful & failed +- `vfs_create`: Function used for monitoring the number of successful & failed filesystem create calls. ##### VFS Deleted objects @@ -816,8 +807,8 @@ Metrics for directory cache are collected using kprobe for `lookup_fast`, becaus times this function is accessed. On the other hand, for `d_lookup` we are not only interested in the number of times it is accessed, but also in possible errors, so we need to attach a `kretprobe`. For this reason, the following is used: -- [`lookup_fast`](https://lwn.net/Articles/649115/): Called to look at data inside the directory cache. -- [`d_lookup`](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/dcache.c?id=052b398a43a7de8c68c13e7fa05d6b3d16ce6801#n2223): +- [`lookup_fast`](https://lwn.net/Articles/649115/): Called to look at data inside the directory cache. +- [`d_lookup`](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/dcache.c?id=052b398a43a7de8c68c13e7fa05d6b3d16ce6801#n2223): Called when the desired file is not inside the directory cache. ##### Directory Cache Interpretation @@ -830,8 +821,8 @@ accessed before. The following `tracing` are used to collect `mount` & `unmount` call counts: -- [`mount`](https://man7.org/linux/man-pages/man2/mount.2.html): mount filesystem on host. -- [`umount`](https://man7.org/linux/man-pages/man2/umount.2.html): umount filesystem on host. +- [`mount`](https://man7.org/linux/man-pages/man2/mount.2.html): mount filesystem on host. +- [`umount`](https://man7.org/linux/man-pages/man2/umount.2.html): umount filesystem on host. ### Networking Stack @@ -855,10 +846,10 @@ to send & receive data and to close connections when `TCP` protocol is used. This chart demonstrates calls to functions: -- `tcp_sendmsg`: Function responsible to send data for a specified destination. -- `tcp_cleanup_rbuf`: We use this function instead of `tcp_recvmsg`, because the last one misses `tcp_read_sock` traffic +- `tcp_sendmsg`: Function responsible to send data for a specified destination. +- `tcp_cleanup_rbuf`: We use this function instead of `tcp_recvmsg`, because the last one misses `tcp_read_sock` traffic and we would also need to add more `tracing` to get the socket and package size. -- `tcp_close`: Function responsible to close connection. +- `tcp_close`: Function responsible to close connection. #### TCP retransmit @@ -881,7 +872,7 @@ calls, it monitors the number of bytes sent and received. These are tracepoints related to [OOM](https://en.wikipedia.org/wiki/Out_of_memory) killing processes. -- `oom/mark_victim`: Monitors when an oomkill event happens. +- `oom/mark_victim`: Monitors when an oomkill event happens. ## Known issues @@ -897,15 +888,14 @@ node is experiencing high memory usage and there is no obvious culprit to be fou - Disable [integration with apps](#integration-with-appsplugin). - Disable [integration with cgroup](#integration-with-cgroupsplugin). -If with these changes you still suspect eBPF using too much memory, and there is no obvious culprit to be found +If with these changes you still suspect eBPF using too much memory, and there is no obvious culprit to be found in the `apps.mem` chart, consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuring-ebpfplugin). -Next, [restart Netdata](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) with -`sudo systemctl restart netdata` to see if system memory usage (see the `system.ram` chart) has dropped significantly. +Next, [restart Netdata](/docs/netdata-agent/start-stop-restart.md) to see if system memory usage (see the `system.ram` chart) has dropped significantly. Beginning with `v1.31`, kernel memory usage is configurable via the [`pid table size` setting](#pid-table-size) in `ebpf.conf`. -The total memory usage is a well known [issue](https://lore.kernel.org/all/167821082315.1693.6957546778534183486.git-patchwork-notify@kernel.org/) +The total memory usage is a well known [issue](https://lore.kernel.org/all/167821082315.1693.6957546778534183486.git-patchwork-notify@kernel.org/) for eBPF, this is not a bug present in plugin. ### SELinux @@ -950,7 +940,7 @@ This will create two new files: `netdata_ebpf.te` and `netdata_ebpf.mod`. Edit the `netdata_ebpf.te` file to change the options `class` and `allow`. You should have the following at the end of the `netdata_ebpf.te` file. -```conf +```text module netdata_ebpf 1.0; require { type unconfined_service_t; @@ -981,7 +971,7 @@ a feature called "lockdown," which may affect `ebpf.plugin` depending how the ke shows how the lockdown module impacts `ebpf.plugin` based on the selected options: | Enforcing kernel lockdown | Enable lockdown LSM early in init | Default lockdown mode | Can `ebpf.plugin` run with this? | -| :------------------------ | :-------------------------------- | :-------------------- | :------------------------------- | +|:--------------------------|:----------------------------------|:----------------------|:---------------------------------| | YES | NO | NO | YES | | YES | Yes | None | YES | | YES | Yes | Integrity | YES | diff --git a/src/collectors/ebpf.plugin/ebpf.c b/src/collectors/ebpf.plugin/ebpf.c index 5424ea8f0..4cc263e73 100644 --- a/src/collectors/ebpf.plugin/ebpf.c +++ b/src/collectors/ebpf.plugin/ebpf.c @@ -19,11 +19,7 @@ char *ebpf_plugin_dir = PLUGINS_DIR; static char *ebpf_configured_log_dir = LOG_DIR; char *ebpf_algorithms[] = { EBPF_CHART_ALGORITHM_ABSOLUTE, EBPF_CHART_ALGORITHM_INCREMENTAL}; -struct config collector_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config collector_config = APPCONFIG_INITIALIZER; int running_on_kernel = 0; int ebpf_nprocs; @@ -661,7 +657,7 @@ struct vfs_bpf *vfs_bpf_obj = NULL; #else void *default_btf = NULL; #endif -char *btf_path = NULL; +const char *btf_path = NULL; /***************************************************************** * @@ -1415,7 +1411,7 @@ void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em) char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY }; char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL }; - struct aral_statistics *stats = aral_statistics(memory); + struct aral_statistics *stats = aral_get_statistics(memory); ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_usage, ""); write_chart_dimension(mem, (long long)stats->structures.allocated_bytes); @@ -1608,7 +1604,7 @@ static void get_ipv6_last_addr(union netdata_ip_t *out, union netdata_ip_t *in, * * @return it returns 0 on success and -1 otherwise. */ -static inline int ebpf_ip2nl(uint8_t *dst, char *ip, int domain, char *source) +static inline int ebpf_ip2nl(uint8_t *dst, const char *ip, int domain, char *source) { if (inet_pton(domain, ip, dst) <= 0) { netdata_log_error("The address specified (%s) is invalid ", source); @@ -1666,14 +1662,14 @@ void ebpf_clean_ip_structure(ebpf_network_viewer_ip_list_t **clean) * @param out a pointer to store the link list * @param ip the value given as parameter */ -static void ebpf_parse_ip_list_unsafe(void **out, char *ip) +static void ebpf_parse_ip_list_unsafe(void **out, const char *ip) { ebpf_network_viewer_ip_list_t **list = (ebpf_network_viewer_ip_list_t **)out; char *ipdup = strdupz(ip); union netdata_ip_t first = { }; union netdata_ip_t last = { }; - char *is_ipv6; + const char *is_ipv6; if (*ip == '*' && *(ip+1) == '\0') { memset(first.addr8, 0, sizeof(first.addr8)); memset(last.addr8, 0xFF, sizeof(last.addr8)); @@ -1684,7 +1680,8 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip) goto storethisip; } - char *end = ip; + char *enddup = strdupz(ip); + char *end = enddup; // Move while I cannot find a separator while (*end && *end != '/' && *end != '-') end++; @@ -1814,7 +1811,7 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip) ebpf_network_viewer_ip_list_t *store; - storethisip: +storethisip: store = callocz(1, sizeof(ebpf_network_viewer_ip_list_t)); store->value = ipdup; store->hash = simple_hash(ipdup); @@ -1825,8 +1822,9 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip) ebpf_fill_ip_list_unsafe(list, store, "socket"); return; - cleanipdup: +cleanipdup: freez(ipdup); + freez(enddup); } /** @@ -1836,7 +1834,7 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip) * * @param ptr is a pointer with the text to parse. */ -void ebpf_parse_ips_unsafe(char *ptr) +void ebpf_parse_ips_unsafe(const char *ptr) { // No value if (unlikely(!ptr)) @@ -1927,7 +1925,7 @@ static inline void fill_port_list(ebpf_network_viewer_port_list_t **out, ebpf_ne * @param out a pointer to store the link list * @param service the service used to create the structure that will be linked. */ -static void ebpf_parse_service_list(void **out, char *service) +static void ebpf_parse_service_list(void **out, const char *service) { ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out; struct servent *serv = getservbyname((const char *)service, "tcp"); @@ -1956,8 +1954,10 @@ static void ebpf_parse_service_list(void **out, char *service) * @param out a pointer to store the link list * @param range the informed range for the user. */ -static void ebpf_parse_port_list(void **out, char *range) -{ +static void ebpf_parse_port_list(void **out, const char *range_param) { + char range[strlen(range_param) + 1]; + strncpyz(range, range_param, strlen(range_param)); + int first, last; ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out; @@ -2029,7 +2029,7 @@ static void ebpf_parse_port_list(void **out, char *range) * * @param ptr is a pointer with the text to parse. */ -void ebpf_parse_ports(char *ptr) +void ebpf_parse_ports(const char *ptr) { // No value if (unlikely(!ptr)) @@ -2480,7 +2480,7 @@ static void ebpf_link_hostname(ebpf_network_viewer_hostname_list_t **out, ebpf_n * @param out is the output link list * @param parse is a pointer with the text to parser. */ -static void ebpf_link_hostnames(char *parse) +static void ebpf_link_hostnames(const char *parse) { // No value if (unlikely(!parse)) @@ -2536,7 +2536,7 @@ void parse_network_viewer_section(struct config *cfg) EBPF_CONFIG_RESOLVE_SERVICE, CONFIG_BOOLEAN_YES); - char *value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_PORTS, NULL); + const char *value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_PORTS, NULL); ebpf_parse_ports(value); if (network_viewer_opt.hostname_resolution_enabled) { @@ -2684,7 +2684,7 @@ static void ebpf_allocate_common_vectors() * * @param ptr the option given by users */ -static inline void ebpf_how_to_load(char *ptr) +static inline void ebpf_how_to_load(const char *ptr) { if (!strcasecmp(ptr, EBPF_CFG_LOAD_MODE_RETURN)) ebpf_set_thread_mode(MODE_RETURN); @@ -2775,7 +2775,7 @@ static inline void ebpf_set_load_mode(netdata_ebpf_load_mode_t load, netdata_ebp * @param str value read from configuration file. * @param origin specify the configuration file loaded */ -static inline void epbf_update_load_mode(char *str, netdata_ebpf_load_mode_t origin) +static inline void epbf_update_load_mode(const char *str, netdata_ebpf_load_mode_t origin) { netdata_ebpf_load_mode_t load = epbf_convert_string_to_load_mode(str); @@ -2808,7 +2808,7 @@ static void read_collector_values(int *disable_cgroups, int update_every, netdata_ebpf_load_mode_t origin) { // Read global section - char *value; + const char *value; if (appconfig_exists(&collector_config, EBPF_GLOBAL_SECTION, "load")) // Backward compatibility value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, "load", EBPF_CFG_LOAD_MODE_DEFAULT); @@ -4005,7 +4005,6 @@ static void ebpf_manage_pid(pid_t pid) */ int main(int argc, char **argv) { - clocks_init(); nd_log_initialize_for_external_plugins(NETDATA_EBPF_PLUGIN_NAME); ebpf_set_global_variables(); @@ -4034,6 +4033,10 @@ int main(int argc, char **argv) #ifdef LIBBPF_MAJOR_VERSION libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + +#ifndef NETDATA_INTERNAL_CHECKS + libbpf_set_print(netdata_silent_libbpf_vfprintf); +#endif #endif ebpf_read_local_addresses_unsafe(); @@ -4072,16 +4075,14 @@ int main(int argc, char **argv) } } - usec_t step = USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT; - uint32_t max_period = EBPF_CLEANUP_FACTOR; int update_apps_list = update_apps_every - 1; int process_maps_per_core = ebpf_modules[EBPF_MODULE_PROCESS_IDX].maps_per_core; //Plugin will be killed when it receives a signal for ( ; !ebpf_plugin_stop(); global_iterations_counter++) { - (void)heartbeat_next(&hb, step); + (void)heartbeat_next(&hb); if (global_iterations_counter % EBPF_DEFAULT_UPDATE_EVERY == 0) { pthread_mutex_lock(&lock); @@ -4099,7 +4100,7 @@ int main(int argc, char **argv) pthread_mutex_lock(&collect_data_mutex); ebpf_parse_proc_files(); if (collect_pids & (1<pid); + snprintfz(filename, FILENAME_MAX, "%s/proc/%u/cmdline", netdata_configured_host_prefix, p->pid); int ret = 0; @@ -490,7 +490,7 @@ static inline int read_proc_pid_stat(ebpf_pid_data_t *p) char *comm = procfile_lineword(ff, 0, 1); int32_t ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3)); - if (p->ppid == ppid && p->target) + if (p->ppid == (uint32_t)ppid && p->target) goto without_cmdline_target; p->ppid = ppid; @@ -546,7 +546,7 @@ static inline int ebpf_collect_data_for_pid(pid_t pid) read_proc_pid_stat(p); // check its parent pid - if (unlikely( p->ppid > pid_max)) { + if (unlikely( p->ppid > (uint32_t)pid_max)) { netdata_log_error("Pid %d (command '%s') states invalid parent pid %u. Using 0.", pid, p->comm, p->ppid); p->ppid = 0; } @@ -906,9 +906,8 @@ void ebpf_process_sum_values_for_pids(ebpf_process_stat_t *process, struct ebpf_ * * @param tbl_pid_stats_fd The mapped file descriptor for the hash table. * @param maps_per_core do I have hash maps per core? - * @param max_period max period to wait before remove from hash table. */ -void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core, uint32_t max_period) +void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core) { if (tbl_pid_stats_fd == -1) return; diff --git a/src/collectors/ebpf.plugin/ebpf_apps.h b/src/collectors/ebpf.plugin/ebpf_apps.h index 98c9995da..5bf8953ad 100644 --- a/src/collectors/ebpf.plugin/ebpf_apps.h +++ b/src/collectors/ebpf.plugin/ebpf_apps.h @@ -495,7 +495,7 @@ int ebpf_read_hash_table(void *ep, int fd, uint32_t pid); int get_pid_comm(pid_t pid, size_t n, char *dest); -void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core, uint32_t max_period); +void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core); void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core); // The default value is at least 32 times smaller than maximum number of PIDs allowed on system, diff --git a/src/collectors/ebpf.plugin/ebpf_cachestat.c b/src/collectors/ebpf.plugin/ebpf_cachestat.c index 8c0260d51..49a5d98a1 100644 --- a/src/collectors/ebpf.plugin/ebpf_cachestat.c +++ b/src/collectors/ebpf.plugin/ebpf_cachestat.c @@ -43,11 +43,7 @@ ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input = #endif }}; -struct config cachestat_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config cachestat_config = APPCONFIG_INITIALIZER; netdata_ebpf_targets_t cachestat_targets[] = { {.name = "add_to_page_cache_lru", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = "mark_page_accessed", .mode = EBPF_LOAD_TRAMPOLINE}, @@ -716,9 +712,8 @@ static inline void cachestat_save_pid_values(netdata_publish_cachestat_t *out, n * Read the apps table and store data inside the structure. * * @param maps_per_core do I need to read all cores? - * @param max_period limit of iterations without updates before remove data from hash table */ -static void ebpf_read_cachestat_apps_table(int maps_per_core, uint32_t max_period) +static void ebpf_read_cachestat_apps_table(int maps_per_core) { netdata_cachestat_pid_t *cv = cachestat_vector; int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd; @@ -842,28 +837,25 @@ void ebpf_resume_apps_data() */ void *ebpf_read_cachestat_thread(void *ptr) { - heartbeat_t hb; - heartbeat_init(&hb); - ebpf_module_t *em = (ebpf_module_t *)ptr; int maps_per_core = em->maps_per_core; int update_every = em->update_every; - uint32_t max_period = EBPF_CLEANUP_FACTOR; int counter = update_every - 1; uint32_t lifetime = em->lifetime; uint32_t running_time = 0; - usec_t period = update_every * USEC_PER_SEC; pids_fd[EBPF_PIDS_CACHESTAT_IDX] = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd; + heartbeat_t hb; + heartbeat_init(&hb, update_every * USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, period); + (void)heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; pthread_mutex_lock(&collect_data_mutex); - ebpf_read_cachestat_apps_table(maps_per_core, max_period); + ebpf_read_cachestat_apps_table(maps_per_core); ebpf_resume_apps_data(); pthread_mutex_unlock(&collect_data_mutex); @@ -1407,7 +1399,7 @@ static void cachestat_collector(ebpf_module_t *em) int update_every = em->update_every; int maps_per_core = em->maps_per_core; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); int counter = update_every - 1; //This will be cancelled by its parent uint32_t running_time = 0; @@ -1415,7 +1407,7 @@ static void cachestat_collector(ebpf_module_t *em) netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + (void)heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_cgroup.c b/src/collectors/ebpf.plugin/ebpf_cgroup.c index 9e1fa8231..0bc5989e1 100644 --- a/src/collectors/ebpf.plugin/ebpf_cgroup.c +++ b/src/collectors/ebpf.plugin/ebpf_cgroup.c @@ -373,13 +373,12 @@ void ebpf_create_charts_on_systemd(ebpf_systemd_args_t *chart) */ void *ebpf_cgroup_integration(void *ptr __maybe_unused) { - usec_t step = USEC_PER_SEC; int counter = NETDATA_EBPF_CGROUP_UPDATE - 1; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); //Plugin will be killed when it receives a signal while (!ebpf_plugin_stop()) { - (void)heartbeat_next(&hb, step); + heartbeat_next(&hb); // We are using a small heartbeat time to wake up thread, // but we should not update so frequently the shared memory data diff --git a/src/collectors/ebpf.plugin/ebpf_dcstat.c b/src/collectors/ebpf.plugin/ebpf_dcstat.c index e6053cb4a..e84517686 100644 --- a/src/collectors/ebpf.plugin/ebpf_dcstat.c +++ b/src/collectors/ebpf.plugin/ebpf_dcstat.c @@ -12,11 +12,7 @@ netdata_dcstat_pid_t *dcstat_vector = NULL; static netdata_idx_t dcstat_hash_values[NETDATA_DCSTAT_IDX_END]; static netdata_idx_t *dcstat_values = NULL; -struct config dcstat_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config dcstat_config = APPCONFIG_INITIALIZER; ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_global", .internal_input = NETDATA_DIRECTORY_CACHE_END, .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, @@ -542,9 +538,8 @@ static void ebpf_dcstat_apps_accumulator(netdata_dcstat_pid_t *out, int maps_per * Read the apps table and store data inside the structure. * * @param maps_per_core do I need to read all cores? - * @param max_period limit of iterations without updates before remove data from hash table */ -static void ebpf_read_dc_apps_table(int maps_per_core, uint32_t max_period) +static void ebpf_read_dc_apps_table(int maps_per_core) { netdata_dcstat_pid_t *cv = dcstat_vector; int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd; @@ -644,9 +639,6 @@ void ebpf_dc_resume_apps_data() */ void *ebpf_read_dcstat_thread(void *ptr) { - heartbeat_t hb; - heartbeat_init(&hb); - ebpf_module_t *em = (ebpf_module_t *)ptr; int maps_per_core = em->maps_per_core; @@ -659,16 +651,16 @@ void *ebpf_read_dcstat_thread(void *ptr) uint32_t lifetime = em->lifetime; uint32_t running_time = 0; - usec_t period = update_every * USEC_PER_SEC; - uint32_t max_period = EBPF_CLEANUP_FACTOR; pids_fd[EBPF_PIDS_DCSTAT_IDX] = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd; + heartbeat_t hb; + heartbeat_init(&hb, update_every * USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, period); + (void)heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; pthread_mutex_lock(&collect_data_mutex); - ebpf_read_dc_apps_table(maps_per_core, max_period); + ebpf_read_dc_apps_table(maps_per_core); ebpf_dc_resume_apps_data(); pthread_mutex_unlock(&collect_data_mutex); @@ -1271,7 +1263,7 @@ static void dcstat_collector(ebpf_module_t *em) int cgroups = em->cgroup_charts; int update_every = em->update_every; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); int counter = update_every - 1; int maps_per_core = em->maps_per_core; uint32_t running_time = 0; @@ -1279,7 +1271,7 @@ static void dcstat_collector(ebpf_module_t *em) netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_disk.c b/src/collectors/ebpf.plugin/ebpf_disk.c index 246f98702..3d9c5789c 100644 --- a/src/collectors/ebpf.plugin/ebpf_disk.c +++ b/src/collectors/ebpf.plugin/ebpf_disk.c @@ -6,11 +6,7 @@ #include "ebpf.h" #include "ebpf_disk.h" -struct config disk_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config disk_config = APPCONFIG_INITIALIZER; static ebpf_local_maps_t disk_maps[] = {{.name = "tbl_disk_iocall", .internal_input = NETDATA_DISK_HISTOGRAM_LENGTH, .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, @@ -775,13 +771,13 @@ static void disk_collector(ebpf_module_t *em) int update_every = em->update_every; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); int counter = update_every - 1; int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_fd.c b/src/collectors/ebpf.plugin/ebpf_fd.c index 61a9595cc..256efa4fe 100644 --- a/src/collectors/ebpf.plugin/ebpf_fd.c +++ b/src/collectors/ebpf.plugin/ebpf_fd.c @@ -46,9 +46,7 @@ static ebpf_local_maps_t fd_maps[] = {{.name = "tbl_fd_pid", .internal_input = N }}; -struct config fd_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, - .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config fd_config = APPCONFIG_INITIALIZER; static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER]; static netdata_idx_t *fd_values = NULL; @@ -683,9 +681,8 @@ static void fd_apps_accumulator(netdata_fd_stat_t *out, int maps_per_core) * Read the apps table and store data inside the structure. * * @param maps_per_core do I need to read all cores? - * @param max_period limit of iterations without updates before remove data from hash table */ -static void ebpf_read_fd_apps_table(int maps_per_core, uint32_t max_period) +static void ebpf_read_fd_apps_table(int maps_per_core) { netdata_fd_stat_t *fv = fd_vector; int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd; @@ -783,9 +780,6 @@ void ebpf_fd_resume_apps_data() */ void *ebpf_read_fd_thread(void *ptr) { - heartbeat_t hb; - heartbeat_init(&hb); - ebpf_module_t *em = (ebpf_module_t *)ptr; int maps_per_core = em->maps_per_core; @@ -798,16 +792,17 @@ void *ebpf_read_fd_thread(void *ptr) uint32_t lifetime = em->lifetime; uint32_t running_time = 0; - int period = USEC_PER_SEC; - uint32_t max_period = EBPF_CLEANUP_FACTOR; pids_fd[EBPF_PIDS_FD_IDX] = fd_maps[NETDATA_FD_PID_STATS].map_fd; + + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, period); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; pthread_mutex_lock(&collect_data_mutex); - ebpf_read_fd_apps_table(maps_per_core, max_period); + ebpf_read_fd_apps_table(maps_per_core); ebpf_fd_resume_apps_data(); pthread_mutex_unlock(&collect_data_mutex); @@ -1217,8 +1212,6 @@ static void ebpf_fd_send_cgroup_data(ebpf_module_t *em) static void fd_collector(ebpf_module_t *em) { int cgroups = em->cgroup_charts; - heartbeat_t hb; - heartbeat_init(&hb); int update_every = em->update_every; int counter = update_every - 1; int maps_per_core = em->maps_per_core; @@ -1226,8 +1219,10 @@ static void fd_collector(ebpf_module_t *em) uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_filesystem.c b/src/collectors/ebpf.plugin/ebpf_filesystem.c index 1187b03e9..30f3c7460 100644 --- a/src/collectors/ebpf.plugin/ebpf_filesystem.c +++ b/src/collectors/ebpf.plugin/ebpf_filesystem.c @@ -2,11 +2,7 @@ #include "ebpf_filesystem.h" -struct config fs_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config fs_config = APPCONFIG_INITIALIZER; ebpf_local_maps_t ext4_maps[] = {{.name = "tbl_ext4", .internal_input = NETDATA_KEY_CALLS_SYNC, .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, @@ -984,13 +980,13 @@ static void ebpf_histogram_send_data() static void filesystem_collector(ebpf_module_t *em) { int update_every = em->update_every; - heartbeat_t hb; - heartbeat_init(&hb); int counter = update_every - 1; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_functions.c b/src/collectors/ebpf.plugin/ebpf_functions.c index 8e9fb01ed..267159a40 100644 --- a/src/collectors/ebpf.plugin/ebpf_functions.c +++ b/src/collectors/ebpf.plugin/ebpf_functions.c @@ -287,7 +287,7 @@ static void ebpf_function_socket_manipulation(const char *transaction, ebpf_module_t *em = &ebpf_modules[EBPF_MODULE_SOCKET_IDX]; char *words[PLUGINSD_MAX_WORDS] = {NULL}; - size_t num_words = quoted_strings_splitter_pluginsd(function, words, PLUGINSD_MAX_WORDS); + size_t num_words = quoted_strings_splitter_whitespace(function, words, PLUGINSD_MAX_WORDS); const char *name; int period = -1; rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock); @@ -712,9 +712,9 @@ void *ebpf_function_thread(void *ptr) pthread_mutex_unlock(&lock); heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); while(!ebpf_plugin_stop()) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (ebpf_plugin_stop()) { break; diff --git a/src/collectors/ebpf.plugin/ebpf_hardirq.c b/src/collectors/ebpf.plugin/ebpf_hardirq.c index 911425e54..e7974ac05 100644 --- a/src/collectors/ebpf.plugin/ebpf_hardirq.c +++ b/src/collectors/ebpf.plugin/ebpf_hardirq.c @@ -3,11 +3,7 @@ #include "ebpf.h" #include "ebpf_hardirq.h" -struct config hardirq_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config hardirq_config = APPCONFIG_INITIALIZER; static ebpf_local_maps_t hardirq_maps[] = { { @@ -575,15 +571,15 @@ static void hardirq_collector(ebpf_module_t *em) pthread_mutex_unlock(&lock); // loop and read from published data until ebpf plugin is closed. - heartbeat_t hb; - heartbeat_init(&hb); int update_every = em->update_every; int counter = update_every - 1; //This will be cancelled by its parent uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_mdflush.c b/src/collectors/ebpf.plugin/ebpf_mdflush.c index 77c109bff..3d70b7792 100644 --- a/src/collectors/ebpf.plugin/ebpf_mdflush.c +++ b/src/collectors/ebpf.plugin/ebpf_mdflush.c @@ -3,11 +3,7 @@ #include "ebpf.h" #include "ebpf_mdflush.h" -struct config mdflush_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config mdflush_config = APPCONFIG_INITIALIZER; #define MDFLUSH_MAP_COUNT 0 static ebpf_local_maps_t mdflush_maps[] = { @@ -341,14 +337,14 @@ static void mdflush_collector(ebpf_module_t *em) pthread_mutex_unlock(&lock); // loop and read from published data until ebpf plugin is closed. - heartbeat_t hb; - heartbeat_init(&hb); int counter = update_every - 1; int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_mount.c b/src/collectors/ebpf.plugin/ebpf_mount.c index 7441cc6e2..4e310c8a6 100644 --- a/src/collectors/ebpf.plugin/ebpf_mount.c +++ b/src/collectors/ebpf.plugin/ebpf_mount.c @@ -22,9 +22,7 @@ static char *mount_dimension_name[NETDATA_EBPF_MOUNT_SYSCALL] = { "mount", "umou static netdata_syscall_stat_t mount_aggregated_data[NETDATA_EBPF_MOUNT_SYSCALL]; static netdata_publish_syscall_t mount_publish_aggregated[NETDATA_EBPF_MOUNT_SYSCALL]; -struct config mount_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, - .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config mount_config = APPCONFIG_INITIALIZER; static netdata_idx_t mount_hash_values[NETDATA_MOUNT_END]; @@ -363,15 +361,15 @@ static void mount_collector(ebpf_module_t *em) { memset(mount_hash_values, 0, sizeof(mount_hash_values)); - heartbeat_t hb; - heartbeat_init(&hb); int update_every = em->update_every; int counter = update_every - 1; int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_oomkill.c b/src/collectors/ebpf.plugin/ebpf_oomkill.c index 34361550b..d32095abc 100644 --- a/src/collectors/ebpf.plugin/ebpf_oomkill.c +++ b/src/collectors/ebpf.plugin/ebpf_oomkill.c @@ -3,11 +3,7 @@ #include "ebpf.h" #include "ebpf_oomkill.h" -struct config oomkill_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config oomkill_config = APPCONFIG_INITIALIZER; #define OOMKILL_MAP_KILLCNT 0 static ebpf_local_maps_t oomkill_maps[] = { @@ -463,14 +459,14 @@ static void oomkill_collector(ebpf_module_t *em) memset(keys, 0, sizeof(keys)); // loop and read until ebpf plugin is closed. - heartbeat_t hb; - heartbeat_init(&hb); int counter = update_every - 1; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + (void)heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_process.c b/src/collectors/ebpf.plugin/ebpf_process.c index d2810f899..d80f7a3e8 100644 --- a/src/collectors/ebpf.plugin/ebpf_process.c +++ b/src/collectors/ebpf.plugin/ebpf_process.c @@ -57,11 +57,7 @@ ebpf_process_stat_t *process_stat_vector = NULL; static netdata_syscall_stat_t process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_END]; static netdata_publish_syscall_t process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_END]; -struct config process_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config process_config = APPCONFIG_INITIALIZER; /***************************************************************** * @@ -1124,8 +1120,6 @@ void ebpf_process_update_cgroup_algorithm() */ static void process_collector(ebpf_module_t *em) { - heartbeat_t hb; - heartbeat_init(&hb); int publish_global = em->global_charts; int cgroups = em->cgroup_charts; pthread_mutex_lock(&ebpf_exit_cleanup); @@ -1141,9 +1135,11 @@ static void process_collector(ebpf_module_t *em) uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - usec_t dt = heartbeat_next(&hb, USEC_PER_SEC); - (void)dt; + heartbeat_next(&hb); + if (ebpf_plugin_stop()) break; diff --git a/src/collectors/ebpf.plugin/ebpf_shm.c b/src/collectors/ebpf.plugin/ebpf_shm.c index ac44549b2..6282a2547 100644 --- a/src/collectors/ebpf.plugin/ebpf_shm.c +++ b/src/collectors/ebpf.plugin/ebpf_shm.c @@ -12,11 +12,7 @@ netdata_ebpf_shm_t *shm_vector = NULL; static netdata_idx_t shm_hash_values[NETDATA_SHM_END]; static netdata_idx_t *shm_values = NULL; -struct config shm_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config shm_config = APPCONFIG_INITIALIZER; static ebpf_local_maps_t shm_maps[] = {{.name = "tbl_pid_shm", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, .user_input = 0, @@ -569,9 +565,8 @@ static void ebpf_update_shm_cgroup() * Read the apps table and store data inside the structure. * * @param maps_per_core do I need to read all cores? - * @param max_period limit of iterations without updates before remove data from hash table */ -static void ebpf_read_shm_apps_table(int maps_per_core, uint32_t max_period) +static void ebpf_read_shm_apps_table(int maps_per_core) { netdata_ebpf_shm_t *cv = shm_vector; int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd; @@ -1063,9 +1058,6 @@ void ebpf_shm_resume_apps_data() { */ void *ebpf_read_shm_thread(void *ptr) { - heartbeat_t hb; - heartbeat_init(&hb); - ebpf_module_t *em = (ebpf_module_t *)ptr; int maps_per_core = em->maps_per_core; @@ -1078,16 +1070,16 @@ void *ebpf_read_shm_thread(void *ptr) uint32_t lifetime = em->lifetime; uint32_t running_time = 0; - usec_t period = update_every * USEC_PER_SEC; - uint32_t max_period = EBPF_CLEANUP_FACTOR; pids_fd[EBPF_PIDS_SHM_IDX] = shm_maps[NETDATA_PID_SHM_TABLE].map_fd; + heartbeat_t hb; + heartbeat_init(&hb, update_every * USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, period); + (void)heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; pthread_mutex_lock(&collect_data_mutex); - ebpf_read_shm_apps_table(maps_per_core, max_period); + ebpf_read_shm_apps_table(maps_per_core); ebpf_shm_resume_apps_data(); pthread_mutex_unlock(&collect_data_mutex); @@ -1113,16 +1105,17 @@ static void shm_collector(ebpf_module_t *em) { int cgroups = em->cgroup_charts; int update_every = em->update_every; - heartbeat_t hb; - heartbeat_init(&hb); int counter = update_every - 1; int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); + if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_socket.c b/src/collectors/ebpf.plugin/ebpf_socket.c index 5b87a3256..f0d376f43 100644 --- a/src/collectors/ebpf.plugin/ebpf_socket.c +++ b/src/collectors/ebpf.plugin/ebpf_socket.c @@ -77,11 +77,7 @@ netdata_socket_t *socket_values; ebpf_network_viewer_port_list_t *listen_ports = NULL; ebpf_addresses_t tcp_v6_connect_address = {.function = "tcp_v6_connect", .hash = 0, .addr = 0, .type = 0}; -struct config socket_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config socket_config = APPCONFIG_INITIALIZER; netdata_ebpf_targets_t socket_targets[] = { {.name = "inet_csk_accept", .mode = EBPF_LOAD_PROBE}, {.name = "tcp_retransmit_skb", .mode = EBPF_LOAD_PROBE}, @@ -1815,9 +1811,6 @@ void ebpf_socket_resume_apps_data() */ void *ebpf_read_socket_thread(void *ptr) { - heartbeat_t hb; - heartbeat_init(&hb); - ebpf_module_t *em = (ebpf_module_t *)ptr; ebpf_update_array_vectors(em); @@ -1830,9 +1823,10 @@ void *ebpf_read_socket_thread(void *ptr) uint32_t running_time = 0; uint32_t lifetime = em->lifetime; - usec_t period = update_every * USEC_PER_SEC; + heartbeat_t hb; + heartbeat_init(&hb, update_every * USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, period); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; @@ -2612,9 +2606,6 @@ static void ebpf_socket_send_cgroup_data(int update_every) */ static void socket_collector(ebpf_module_t *em) { - heartbeat_t hb; - heartbeat_init(&hb); - int cgroups = em->cgroup_charts; if (cgroups) ebpf_socket_update_cgroup_algorithm(); @@ -2627,8 +2618,10 @@ static void socket_collector(ebpf_module_t *em) uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; @@ -2708,7 +2701,7 @@ static void ebpf_socket_initialize_global_vectors() * @param hash the calculated hash for the dimension name. * @param name the dimension name. */ -static void ebpf_link_dimension_name(char *port, uint32_t hash, char *value) +static void ebpf_link_dimension_name(const char *port, uint32_t hash, const char *value) { int test = str2i(port); if (test < NETDATA_MINIMUM_PORT_VALUE || test > NETDATA_MAXIMUM_PORT_VALUE){ @@ -2753,15 +2746,15 @@ static void ebpf_link_dimension_name(char *port, uint32_t hash, char *value) * * @param cfg the configuration structure */ + +static bool config_service_value_cb(void *data __maybe_unused, const char *name, const char *value) { + ebpf_link_dimension_name(name, simple_hash(name), value); + return true; +} + void ebpf_parse_service_name_section(struct config *cfg) { - struct section *co = appconfig_get_section(cfg, EBPF_SERVICE_NAME_SECTION); - if (co) { - struct config_option *cv; - for (cv = co->values; cv ; cv = cv->next) { - ebpf_link_dimension_name(cv->name, cv->hash, cv->value); - } - } + appconfig_foreach_value_in_section(cfg, EBPF_SERVICE_NAME_SECTION, config_service_value_cb, NULL); // Always associated the default port to Netdata ebpf_network_viewer_dim_name_t *names = network_viewer_opt.names; diff --git a/src/collectors/ebpf.plugin/ebpf_socket.h b/src/collectors/ebpf.plugin/ebpf_socket.h index e01126035..a236985eb 100644 --- a/src/collectors/ebpf.plugin/ebpf_socket.h +++ b/src/collectors/ebpf.plugin/ebpf_socket.h @@ -339,8 +339,8 @@ extern ebpf_network_viewer_port_list_t *listen_ports; void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_connection_t *values); void ebpf_fill_ip_list_unsafe(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table); void ebpf_parse_service_name_section(struct config *cfg); -void ebpf_parse_ips_unsafe(char *ptr); -void ebpf_parse_ports(char *ptr); +void ebpf_parse_ips_unsafe(const char *ptr); +void ebpf_parse_ports(const char *ptr); void ebpf_socket_read_open_connections(BUFFER *buf, struct ebpf_module *em); void ebpf_socket_fill_publish_apps(ebpf_socket_publish_apps_t *curr, netdata_socket_t *ns); diff --git a/src/collectors/ebpf.plugin/ebpf_softirq.c b/src/collectors/ebpf.plugin/ebpf_softirq.c index 21bd83a3e..19c495eea 100644 --- a/src/collectors/ebpf.plugin/ebpf_softirq.c +++ b/src/collectors/ebpf.plugin/ebpf_softirq.c @@ -3,11 +3,7 @@ #include "ebpf.h" #include "ebpf_softirq.h" -struct config softirq_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config softirq_config = APPCONFIG_INITIALIZER; #define SOFTIRQ_MAP_LATENCY 0 static ebpf_local_maps_t softirq_maps[] = { @@ -213,7 +209,7 @@ static void softirq_collector(ebpf_module_t *em) // loop and read from published data until ebpf plugin is closed. heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); int update_every = em->update_every; int counter = update_every - 1; int maps_per_core = em->maps_per_core; @@ -221,7 +217,7 @@ static void softirq_collector(ebpf_module_t *em) uint32_t running_time = 0; uint32_t lifetime = em->lifetime; while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_swap.c b/src/collectors/ebpf.plugin/ebpf_swap.c index 933353178..3be56cfa4 100644 --- a/src/collectors/ebpf.plugin/ebpf_swap.c +++ b/src/collectors/ebpf.plugin/ebpf_swap.c @@ -12,11 +12,7 @@ static netdata_idx_t *swap_values = NULL; netdata_ebpf_swap_t *swap_vector = NULL; -struct config swap_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config swap_config = APPCONFIG_INITIALIZER; static ebpf_local_maps_t swap_maps[] = {{.name = "tbl_pid_swap", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, .user_input = 0, @@ -543,9 +539,8 @@ void ebpf_swap_resume_apps_data() { * Read the apps table and store data inside the structure. * * @param maps_per_core do I need to read all cores? - * @param max_period limit of iterations without updates before remove data from hash table */ -static void ebpf_read_swap_apps_table(int maps_per_core, uint32_t max_period) +static void ebpf_read_swap_apps_table(int maps_per_core) { netdata_ebpf_swap_t *cv = swap_vector; int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd; @@ -597,9 +592,6 @@ end_swap_loop: */ void *ebpf_read_swap_thread(void *ptr) { - heartbeat_t hb; - heartbeat_init(&hb); - ebpf_module_t *em = (ebpf_module_t *)ptr; int maps_per_core = em->maps_per_core; @@ -612,17 +604,17 @@ void *ebpf_read_swap_thread(void *ptr) uint32_t lifetime = em->lifetime; uint32_t running_time = 0; - usec_t period = update_every * USEC_PER_SEC; - uint32_t max_period = EBPF_CLEANUP_FACTOR; pids_fd[EBPF_PIDS_SWAP_IDX] = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd; + heartbeat_t hb; + heartbeat_init(&hb, update_every * USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, period); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; pthread_mutex_lock(&collect_data_mutex); - ebpf_read_swap_apps_table(maps_per_core, max_period); + ebpf_read_swap_apps_table(maps_per_core); ebpf_swap_resume_apps_data(); pthread_mutex_unlock(&collect_data_mutex); @@ -930,16 +922,17 @@ static void swap_collector(ebpf_module_t *em) { int cgroup = em->cgroup_charts; int update_every = em->update_every; - heartbeat_t hb; - heartbeat_init(&hb); int counter = update_every - 1; int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); + + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + (void)heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_sync.c b/src/collectors/ebpf.plugin/ebpf_sync.c index 2be9192c5..094de7019 100644 --- a/src/collectors/ebpf.plugin/ebpf_sync.c +++ b/src/collectors/ebpf.plugin/ebpf_sync.c @@ -100,11 +100,7 @@ ebpf_local_maps_t sync_file_range_maps[] = {{.name = "tbl_syncfr", .internal_inp #endif }}; -struct config sync_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config sync_config = APPCONFIG_INITIALIZER; netdata_ebpf_targets_t sync_targets[] = { {.name = NETDATA_SYSCALLS_SYNC, .mode = EBPF_LOAD_TRAMPOLINE}, {.name = NETDATA_SYSCALLS_SYNCFS, .mode = EBPF_LOAD_TRAMPOLINE}, @@ -558,15 +554,15 @@ static void sync_send_data() */ static void sync_collector(ebpf_module_t *em) { - heartbeat_t hb; - heartbeat_init(&hb); int update_every = em->update_every; int counter = update_every - 1; int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/ebpf_vfs.c b/src/collectors/ebpf.plugin/ebpf_vfs.c index cf1f50e99..c0c1bee38 100644 --- a/src/collectors/ebpf.plugin/ebpf_vfs.c +++ b/src/collectors/ebpf.plugin/ebpf_vfs.c @@ -52,11 +52,7 @@ struct netdata_static_thread ebpf_read_vfs = { .start_routine = NULL }; -struct config vfs_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config vfs_config = APPCONFIG_INITIALIZER; netdata_ebpf_targets_t vfs_targets[] = { {.name = "vfs_write", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = "vfs_writev", .mode = EBPF_LOAD_TRAMPOLINE}, @@ -2064,9 +2060,6 @@ void ebpf_vfs_resume_apps_data() { */ void *ebpf_read_vfs_thread(void *ptr) { - heartbeat_t hb; - heartbeat_init(&hb); - ebpf_module_t *em = (ebpf_module_t *)ptr; int maps_per_core = em->maps_per_core; @@ -2079,11 +2072,12 @@ void *ebpf_read_vfs_thread(void *ptr) uint32_t lifetime = em->lifetime; uint32_t running_time = 0; - usec_t period = update_every * USEC_PER_SEC; uint32_t max_period = EBPF_CLEANUP_FACTOR; pids_fd[EBPF_PIDS_VFS_IDX] = vfs_maps[NETDATA_VFS_PID].map_fd; + heartbeat_t hb; + heartbeat_init(&hb, update_every * USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, period); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; @@ -2116,8 +2110,6 @@ void *ebpf_read_vfs_thread(void *ptr) static void vfs_collector(ebpf_module_t *em) { int cgroups = em->cgroup_charts; - heartbeat_t hb; - heartbeat_init(&hb); int update_every = em->update_every; int counter = update_every - 1; int maps_per_core = em->maps_per_core; @@ -2125,8 +2117,10 @@ static void vfs_collector(ebpf_module_t *em) uint32_t lifetime = em->lifetime; netdata_idx_t *stats = em->hash_table_stats; memset(stats, 0, sizeof(em->hash_table_stats)); + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); while (!ebpf_plugin_stop() && running_time < lifetime) { - (void)heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (ebpf_plugin_stop() || ++counter != update_every) continue; diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md b/src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md index 352bc0721..4bfb238ba 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md @@ -145,8 +145,8 @@ Now follow steps: The configuration file name for this integration is `ebpf.d/cachestat.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md b/src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md index 5ca7a6a68..9e6f8ef32 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md @@ -143,8 +143,8 @@ Now follow steps: The configuration file name for this integration is `ebpf.d/dcstat.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_disk.md b/src/collectors/ebpf.plugin/integrations/ebpf_disk.md index 4fc3dc700..7dccc51c4 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_disk.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_disk.md @@ -109,8 +109,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e The configuration file name for this integration is `ebpf.d/disk.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md b/src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md index 2f917d183..f9c9aa1a6 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md @@ -143,8 +143,8 @@ Now follow steps: The configuration file name for this integration is `ebpf.d/fd.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md b/src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md index ea55a6c04..b4b8e490c 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md @@ -130,8 +130,8 @@ Now follow steps: The configuration file name for this integration is `ebpf.d/filesystem.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md b/src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md index d5f79353f..8d77f9ee3 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md @@ -109,8 +109,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e The configuration file name for this integration is `ebpf.d/hardirq.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md b/src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md index 369e8958f..663557eca 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md @@ -104,8 +104,8 @@ Now follow steps: The configuration file name for this integration is `ebpf.d/mdflush.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_mount.md b/src/collectors/ebpf.plugin/integrations/ebpf_mount.md index 5e6738e2c..64dcaeacd 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_mount.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_mount.md @@ -110,8 +110,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e The configuration file name for this integration is `ebpf.d/mount.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md b/src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md index d9e14f4fb..bc40c883b 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md @@ -126,8 +126,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e The configuration file name for this integration is `ebpf.d/oomkill.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_processes.md b/src/collectors/ebpf.plugin/integrations/ebpf_processes.md index 8ff091da0..f3bc209d0 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_processes.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_processes.md @@ -153,8 +153,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e The configuration file name for this integration is `ebpf.d/process.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_shm.md b/src/collectors/ebpf.plugin/integrations/ebpf_shm.md index c65d3a85e..2e037ea30 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_shm.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_shm.md @@ -147,8 +147,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e The configuration file name for this integration is `ebpf.d/shm.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_socket.md b/src/collectors/ebpf.plugin/integrations/ebpf_socket.md index 917dcaba6..441e72963 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_socket.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_socket.md @@ -162,8 +162,8 @@ Now follow steps: The configuration file name for this integration is `ebpf.d/network.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_softirq.md b/src/collectors/ebpf.plugin/integrations/ebpf_softirq.md index 1571dd4b5..e8214cff6 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_softirq.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_softirq.md @@ -109,8 +109,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e The configuration file name for this integration is `ebpf.d/softirq.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_swap.md b/src/collectors/ebpf.plugin/integrations/ebpf_swap.md index 4358ac71b..0fe6cd6ca 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_swap.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_swap.md @@ -136,8 +136,8 @@ Now follow steps: The configuration file name for this integration is `ebpf.d/swap.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_sync.md b/src/collectors/ebpf.plugin/integrations/ebpf_sync.md index 08d69fada..237f340ed 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_sync.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_sync.md @@ -117,8 +117,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e The configuration file name for this integration is `ebpf.d/sync.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_vfs.md b/src/collectors/ebpf.plugin/integrations/ebpf_vfs.md index 3adb00e9b..bf45d3858 100644 --- a/src/collectors/ebpf.plugin/integrations/ebpf_vfs.md +++ b/src/collectors/ebpf.plugin/integrations/ebpf_vfs.md @@ -178,8 +178,8 @@ Now follow steps: The configuration file name for this integration is `ebpf.d/vfs.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/README.md b/src/collectors/freebsd.plugin/README.md index 9fae20aec..e8e7fd42e 100644 --- a/src/collectors/freebsd.plugin/README.md +++ b/src/collectors/freebsd.plugin/README.md @@ -1,16 +1,5 @@ - - # FreeBSD system metrics (freebsd.plugin) Collects resource usage and performance data on FreeBSD systems By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins. - - diff --git a/src/collectors/freebsd.plugin/freebsd_devstat.c b/src/collectors/freebsd.plugin/freebsd_devstat.c index e0e2e97b8..c3ee43961 100644 --- a/src/collectors/freebsd.plugin/freebsd_devstat.c +++ b/src/collectors/freebsd.plugin/freebsd_devstat.c @@ -393,8 +393,6 @@ int do_kern_devstat(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(dm->st_ops, RRDSET_FLAG_DETAIL); - dm->rd_ops_in = rrddim_add(dm->st_ops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); dm->rd_ops_out = rrddim_add(dm->st_ops, "writes", NULL, -1, 1, @@ -428,8 +426,6 @@ int do_kern_devstat(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(dm->st_qops, RRDSET_FLAG_DETAIL); - dm->rd_qops = rrddim_add(dm->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); } @@ -453,8 +449,6 @@ int do_kern_devstat(int update_every, usec_t dt) { RRDSET_TYPE_AREA ); - rrdset_flag_set(dm->st_util, RRDSET_FLAG_DETAIL); - dm->rd_util = rrddim_add(dm->st_util, "utilization", NULL, 1, 10, RRD_ALGORITHM_INCREMENTAL); } @@ -479,8 +473,6 @@ int do_kern_devstat(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(dm->st_iotime, RRDSET_FLAG_DETAIL); - dm->rd_iotime_in = rrddim_add(dm->st_iotime, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); dm->rd_iotime_out = rrddim_add(dm->st_iotime, "writes", NULL, -1, 1, @@ -518,8 +510,6 @@ int do_kern_devstat(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(dm->st_await, RRDSET_FLAG_DETAIL); - dm->rd_await_in = rrddim_add(dm->st_await, "reads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); dm->rd_await_out = rrddim_add(dm->st_await, "writes", NULL, -1, 1, @@ -577,8 +567,6 @@ int do_kern_devstat(int update_every, usec_t dt) { RRDSET_TYPE_AREA ); - rrdset_flag_set(dm->st_avagsz, RRDSET_FLAG_DETAIL); - dm->rd_avagsz_in = rrddim_add(dm->st_avagsz, "reads", NULL, 1, KILO_FACTOR, RRD_ALGORITHM_ABSOLUTE); dm->rd_avagsz_out = rrddim_add(dm->st_avagsz, "writes", NULL, -1, KILO_FACTOR, @@ -627,8 +615,6 @@ int do_kern_devstat(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(dm->st_svctm, RRDSET_FLAG_DETAIL); - dm->rd_svctm = rrddim_add(dm->st_svctm, "svctm", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); } diff --git a/src/collectors/freebsd.plugin/freebsd_getifaddrs.c b/src/collectors/freebsd.plugin/freebsd_getifaddrs.c index 153ab8b84..a33db85e7 100644 --- a/src/collectors/freebsd.plugin/freebsd_getifaddrs.c +++ b/src/collectors/freebsd.plugin/freebsd_getifaddrs.c @@ -297,8 +297,6 @@ int do_getifaddrs(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_packets_in = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_packets_out = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); rd_packets_m_in = rrddim_add(st, "multicast_received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -464,8 +462,6 @@ int do_getifaddrs(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(ifm->st_packets, RRDSET_FLAG_DETAIL); - ifm->rd_packets_in = rrddim_add(ifm->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); ifm->rd_packets_out = rrddim_add(ifm->st_packets, "sent", NULL, -1, 1, @@ -499,8 +495,6 @@ int do_getifaddrs(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(ifm->st_errors, RRDSET_FLAG_DETAIL); - ifm->rd_errors_in = rrddim_add(ifm->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); ifm->rd_errors_out = rrddim_add(ifm->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); } @@ -526,8 +520,6 @@ int do_getifaddrs(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(ifm->st_drops, RRDSET_FLAG_DETAIL); - ifm->rd_drops_in = rrddim_add(ifm->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); #if __FreeBSD__ >= 11 ifm->rd_drops_out = rrddim_add(ifm->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -557,8 +549,6 @@ int do_getifaddrs(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(ifm->st_events, RRDSET_FLAG_DETAIL); - ifm->rd_events_coll = rrddim_add(ifm->st_events, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); } diff --git a/src/collectors/freebsd.plugin/freebsd_ipfw.c b/src/collectors/freebsd.plugin/freebsd_ipfw.c index dcb771ce9..b94bf15c2 100644 --- a/src/collectors/freebsd.plugin/freebsd_ipfw.c +++ b/src/collectors/freebsd.plugin/freebsd_ipfw.c @@ -168,7 +168,6 @@ int do_ipfw(int update_every, usec_t dt) { update_every, RRDSET_TYPE_STACKED ); - rrdset_flag_set(st_mem, RRDSET_FLAG_DETAIL); rd_dyn_mem = rrddim_add(st_mem, "dynamic", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); rd_stat_mem = rrddim_add(st_mem, "static", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); diff --git a/src/collectors/freebsd.plugin/freebsd_sysctl.c b/src/collectors/freebsd.plugin/freebsd_sysctl.c index 0fa710275..525170e47 100644 --- a/src/collectors/freebsd.plugin/freebsd_sysctl.c +++ b/src/collectors/freebsd.plugin/freebsd_sysctl.c @@ -271,7 +271,6 @@ int do_vm_vmtotal(int update_every, usec_t dt) { update_every, RRDSET_TYPE_AREA ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd = rrddim_add(st, "used", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); } @@ -798,8 +797,6 @@ int do_vm_stats_sys_v_forks(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd = rrddim_add(st, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); } @@ -871,8 +868,6 @@ int do_vm_swap_info(int update_every, usec_t dt) { RRDSET_TYPE_STACKED ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_free = rrddim_add(st, "free", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); rd_used = rrddim_add(st, "used", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE); } @@ -1081,8 +1076,6 @@ int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_memory = rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_io_requiring = rrddim_add(st, "io_requiring", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_cow = rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1756,8 +1749,6 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_in_errs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_in_csum_errs = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_retrans_segs = rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1795,8 +1786,6 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_estab_resets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_active_opens = rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_passive_opens = rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1971,8 +1960,6 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_rcvce = rrddim_add(st, "InCEPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); #if __FreeBSD_version < 1400074 rd_ect0 = rrddim_add(st, "ECT0Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2082,8 +2069,6 @@ int do_net_inet_udp_stats(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_in_errors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_no_ports = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_recv_buf_errors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2332,8 +2317,6 @@ int do_net_inet_ip_stats(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_fails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); rd_created = rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2365,8 +2348,6 @@ int do_net_inet_ip_stats(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); rd_all = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2400,8 +2381,6 @@ int do_net_inet_ip_stats(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2519,8 +2498,6 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); rd_all = rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2554,8 +2531,6 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); rd_timeout = rrddim_add(st, "timeout", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2593,8 +2568,6 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) { RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); diff --git a/src/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md b/src/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md index 322b3fd5d..13415c7a6 100644 --- a/src/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md +++ b/src/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md @@ -86,8 +86,8 @@ The configuration file name for this integration is `Config options`. Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md b/src/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md index 38bbba341..326928bb7 100644 --- a/src/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md +++ b/src/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md @@ -95,8 +95,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/devstat.md b/src/collectors/freebsd.plugin/integrations/devstat.md index 1cc2795b4..56e69d382 100644 --- a/src/collectors/freebsd.plugin/integrations/devstat.md +++ b/src/collectors/freebsd.plugin/integrations/devstat.md @@ -119,8 +119,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/getifaddrs.md b/src/collectors/freebsd.plugin/integrations/getifaddrs.md index ce9d9e337..41845cf62 100644 --- a/src/collectors/freebsd.plugin/integrations/getifaddrs.md +++ b/src/collectors/freebsd.plugin/integrations/getifaddrs.md @@ -125,8 +125,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/getmntinfo.md b/src/collectors/freebsd.plugin/integrations/getmntinfo.md index 186487d11..1779cfd1b 100644 --- a/src/collectors/freebsd.plugin/integrations/getmntinfo.md +++ b/src/collectors/freebsd.plugin/integrations/getmntinfo.md @@ -102,8 +102,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/hw.intrcnt.md b/src/collectors/freebsd.plugin/integrations/hw.intrcnt.md index 713d388f9..b18e9afd0 100644 --- a/src/collectors/freebsd.plugin/integrations/hw.intrcnt.md +++ b/src/collectors/freebsd.plugin/integrations/hw.intrcnt.md @@ -96,8 +96,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/ipfw.md b/src/collectors/freebsd.plugin/integrations/ipfw.md index 33aa4a249..59b93f2a1 100644 --- a/src/collectors/freebsd.plugin/integrations/ipfw.md +++ b/src/collectors/freebsd.plugin/integrations/ipfw.md @@ -99,8 +99,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/kern.cp_time.md b/src/collectors/freebsd.plugin/integrations/kern.cp_time.md index 158e7fc1e..4bf39aa1b 100644 --- a/src/collectors/freebsd.plugin/integrations/kern.cp_time.md +++ b/src/collectors/freebsd.plugin/integrations/kern.cp_time.md @@ -114,8 +114,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/kern.ipc.msq.md b/src/collectors/freebsd.plugin/integrations/kern.ipc.msq.md index a0c6504f2..987d73029 100644 --- a/src/collectors/freebsd.plugin/integrations/kern.ipc.msq.md +++ b/src/collectors/freebsd.plugin/integrations/kern.ipc.msq.md @@ -97,8 +97,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/kern.ipc.sem.md b/src/collectors/freebsd.plugin/integrations/kern.ipc.sem.md index 71f5605e8..73f603690 100644 --- a/src/collectors/freebsd.plugin/integrations/kern.ipc.sem.md +++ b/src/collectors/freebsd.plugin/integrations/kern.ipc.sem.md @@ -102,8 +102,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/kern.ipc.shm.md b/src/collectors/freebsd.plugin/integrations/kern.ipc.shm.md index 278445e64..e9691013d 100644 --- a/src/collectors/freebsd.plugin/integrations/kern.ipc.shm.md +++ b/src/collectors/freebsd.plugin/integrations/kern.ipc.shm.md @@ -96,8 +96,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md b/src/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md index 42ceb19ca..80b18fabd 100644 --- a/src/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md +++ b/src/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md @@ -97,8 +97,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md b/src/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md index 8c5c4355d..538a6054a 100644 --- a/src/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md +++ b/src/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md @@ -98,8 +98,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md b/src/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md index 41bacfedd..8f56f0150 100644 --- a/src/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md +++ b/src/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md @@ -100,8 +100,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md b/src/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md index 259846ea1..b2beb3681 100644 --- a/src/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md +++ b/src/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md @@ -110,8 +110,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md b/src/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md index ec672a686..2dd3fc06f 100644 --- a/src/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md +++ b/src/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md @@ -102,8 +102,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md b/src/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md index fe23457f6..e0fe3bd90 100644 --- a/src/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md +++ b/src/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md @@ -101,8 +101,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md b/src/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md index ac4015787..ecfac70e9 100644 --- a/src/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md +++ b/src/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md @@ -98,8 +98,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/net.isr.md b/src/collectors/freebsd.plugin/integrations/net.isr.md index f9819be80..962ffc3ee 100644 --- a/src/collectors/freebsd.plugin/integrations/net.isr.md +++ b/src/collectors/freebsd.plugin/integrations/net.isr.md @@ -114,8 +114,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/system.ram.md b/src/collectors/freebsd.plugin/integrations/system.ram.md index b11b39390..791bcf3f7 100644 --- a/src/collectors/freebsd.plugin/integrations/system.ram.md +++ b/src/collectors/freebsd.plugin/integrations/system.ram.md @@ -104,8 +104,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/uptime.md b/src/collectors/freebsd.plugin/integrations/uptime.md index 58ad767ec..cc2e7091e 100644 --- a/src/collectors/freebsd.plugin/integrations/uptime.md +++ b/src/collectors/freebsd.plugin/integrations/uptime.md @@ -95,8 +95,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/vm.loadavg.md b/src/collectors/freebsd.plugin/integrations/vm.loadavg.md index f6ae59e7e..4c97eda2e 100644 --- a/src/collectors/freebsd.plugin/integrations/vm.loadavg.md +++ b/src/collectors/freebsd.plugin/integrations/vm.loadavg.md @@ -103,8 +103,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md b/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md index 7f1d88ed7..2d28bbf75 100644 --- a/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md +++ b/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md @@ -95,8 +95,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md b/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md index baa102d2c..670ad88a0 100644 --- a/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md +++ b/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md @@ -95,8 +95,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md b/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md index 569f50ed6..7cd30fea1 100644 --- a/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md +++ b/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md @@ -96,8 +96,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md b/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md index a99e24df9..4fcf3433e 100644 --- a/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md +++ b/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md @@ -95,8 +95,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md b/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md index fd595e2cc..8613fe30e 100644 --- a/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md +++ b/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md @@ -100,8 +100,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/vm.swap_info.md b/src/collectors/freebsd.plugin/integrations/vm.swap_info.md index a92689a15..978d067c3 100644 --- a/src/collectors/freebsd.plugin/integrations/vm.swap_info.md +++ b/src/collectors/freebsd.plugin/integrations/vm.swap_info.md @@ -100,8 +100,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/vm.vmtotal.md b/src/collectors/freebsd.plugin/integrations/vm.vmtotal.md index 3b3955de4..69c98a7f2 100644 --- a/src/collectors/freebsd.plugin/integrations/vm.vmtotal.md +++ b/src/collectors/freebsd.plugin/integrations/vm.vmtotal.md @@ -102,8 +102,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/integrations/zfs.md b/src/collectors/freebsd.plugin/integrations/zfs.md index d34a5c5ca..4f0538450 100644 --- a/src/collectors/freebsd.plugin/integrations/zfs.md +++ b/src/collectors/freebsd.plugin/integrations/zfs.md @@ -127,8 +127,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/freebsd.plugin/plugin_freebsd.c b/src/collectors/freebsd.plugin/plugin_freebsd.c index 10f7e66b9..225534373 100644 --- a/src/collectors/freebsd.plugin/plugin_freebsd.c +++ b/src/collectors/freebsd.plugin/plugin_freebsd.c @@ -105,14 +105,13 @@ void *freebsd_main(void *ptr) worker_register_job_name(i, freebsd_modules[i].dim); } - usec_t step = localhost->rrd_update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, localhost->rrd_update_every * USEC_PER_SEC); while(service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - usec_t hb_dt = heartbeat_next(&hb, step); + usec_t hb_dt = heartbeat_next(&hb); if (!service_running(SERVICE_COLLECTORS)) break; diff --git a/src/collectors/freeipmi.plugin/freeipmi_plugin.c b/src/collectors/freeipmi.plugin/freeipmi_plugin.c index 38fb1d19b..a0eb0783d 100644 --- a/src/collectors/freeipmi.plugin/freeipmi_plugin.c +++ b/src/collectors/freeipmi.plugin/freeipmi_plugin.c @@ -1240,9 +1240,9 @@ void *netdata_ipmi_collection_thread(void *ptr) { usec_t step = t->freq_s * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, step); while(++iteration) { - heartbeat_next(&hb, step); + heartbeat_next(&hb); if(t->debug) fprintf(stderr, "%s: calling netdata_ipmi_collect_data() for %s\n", @@ -1488,7 +1488,7 @@ static void freeimi_function_sensors(const char *transaction, char *function __m char function_copy[strlen(function) + 1]; memcpy(function_copy, function, sizeof(function_copy)); char *words[1024]; - size_t num_words = quoted_strings_splitter_pluginsd(function_copy, words, 1024); + size_t num_words = quoted_strings_splitter_whitespace(function_copy, words, 1024); for(size_t i = 1; i < num_words ;i++) { char *param = get_word(words, num_words, i); if(strcmp(param, "info") == 0) { @@ -1629,7 +1629,10 @@ close_and_send: buffer_json_member_add_time_t(wb, "expires", now_s + update_every); buffer_json_finalize(wb); - pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", now_s + update_every, wb); + wb->response_code = HTTP_RESP_OK; + wb->content_type = CT_APPLICATION_JSON; + wb->expires = now_s + update_every; + pluginsd_function_result_to_stdout(transaction, wb); buffer_free(wb); } @@ -1637,14 +1640,13 @@ close_and_send: // ---------------------------------------------------------------------------- // main, command line arguments parsing -static NORETURN void plugin_exit(int code) { +static void plugin_exit(int code) { fflush(stdout); function_plugin_should_exit = true; exit(code); } int main (int argc, char **argv) { - clocks_init(); nd_log_initialize_for_external_plugins("freeipmi.plugin"); netdata_threads_init_for_external_plugins(0); // set the default threads stack size here @@ -1997,15 +1999,13 @@ int main (int argc, char **argv) { time_t started_t = now_monotonic_sec(); size_t iteration = 0; - usec_t step = 100 * USEC_PER_MS; bool global_chart_created = false; bool tty = isatty(fileno(stdout)) == 1; heartbeat_t hb; - heartbeat_init(&hb); - + heartbeat_init(&hb, update_every * USEC_PER_SEC); for(iteration = 0; 1 ; iteration++) { - usec_t dt = heartbeat_next(&hb, step); + usec_t dt = heartbeat_next(&hb); if (!tty) { netdata_mutex_lock(&stdout_mutex); @@ -2024,7 +2024,6 @@ int main (int argc, char **argv) { switch(state.sensors.status) { case ICS_RUNNING: - step = update_every * USEC_PER_SEC; if(state.sensors.last_iteration_ut < now_monotonic_usec() - IPMI_RESTART_IF_SENSORS_DONT_ITERATE_EVERY_SECONDS * USEC_PER_SEC) { collector_error("%s(): sensors have not be collected for %zu seconds. Exiting to restart.", __FUNCTION__, (size_t)((now_monotonic_usec() - state.sensors.last_iteration_ut) / USEC_PER_SEC)); @@ -2041,11 +2040,13 @@ int main (int argc, char **argv) { collector_error("%s(): sensors failed to initialize. Calling DISABLE.", __FUNCTION__); fprintf(stdout, "DISABLE\n"); plugin_exit(0); + break; case ICS_FAILED: collector_error("%s(): sensors fails repeatedly to collect metrics. Exiting to restart.", __FUNCTION__); fprintf(stdout, "EXIT\n"); plugin_exit(0); + break; } if(netdata_do_sel) { diff --git a/src/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md b/src/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md index 9bd75f975..284db5199 100644 --- a/src/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md +++ b/src/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md @@ -143,8 +143,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md b/src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md index d5baa094a..0cb3a6e0b 100644 --- a/src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md +++ b/src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md @@ -96,8 +96,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -110,7 +110,7 @@ This integration only supports a single configuration option, and most users wil | Name | Description | Default | Required | |:----|:-----------|:-------|:--------:| -| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no | +| loop time | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20ms | no | #### Examples There are no configuration examples. diff --git a/src/collectors/idlejitter.plugin/metadata.yaml b/src/collectors/idlejitter.plugin/metadata.yaml index 0ad946994..7c49a6ec7 100644 --- a/src/collectors/idlejitter.plugin/metadata.yaml +++ b/src/collectors/idlejitter.plugin/metadata.yaml @@ -55,10 +55,10 @@ modules: title: '' enabled: false list: - - name: loop time in ms + - name: loop time description: > Specifies the target time for the data collection thread to sleep, measured in miliseconds. - default_value: 20 + default_value: 20ms required: false examples: folding: diff --git a/src/collectors/idlejitter.plugin/plugin_idlejitter.c b/src/collectors/idlejitter.plugin/plugin_idlejitter.c index 99645b1d2..2a212a669 100644 --- a/src/collectors/idlejitter.plugin/plugin_idlejitter.c +++ b/src/collectors/idlejitter.plugin/plugin_idlejitter.c @@ -22,9 +22,9 @@ void *cpuidlejitter_main(void *ptr) { worker_register("IDLEJITTER"); worker_register_job_name(0, "measurements"); - usec_t sleep_ut = config_get_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS; + usec_t sleep_ut = config_get_duration_ms("plugin:idlejitter", "loop time", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS; if(sleep_ut <= 0) { - config_set_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS); + config_set_duration_ms("plugin:idlejitter", "loop time", CPU_IDLEJITTER_SLEEP_TIME_MS); sleep_ut = CPU_IDLEJITTER_SLEEP_TIME_MS * USEC_PER_MS; } diff --git a/src/collectors/ioping.plugin/integrations/ioping.md b/src/collectors/ioping.plugin/integrations/ioping.md index 24630ae39..c3a697a1a 100644 --- a/src/collectors/ioping.plugin/integrations/ioping.md +++ b/src/collectors/ioping.plugin/integrations/ioping.md @@ -94,8 +94,8 @@ You can install the command by passing the argument `install` to the plugin (`/u The configuration file name for this integration is `ioping.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/log2journal/README.md b/src/collectors/log2journal/README.md index 9807b33ee..d9764d5d5 100644 --- a/src/collectors/log2journal/README.md +++ b/src/collectors/log2journal/README.md @@ -1,4 +1,3 @@ - # log2journal `log2journal` and `systemd-cat-native` can be used to convert a structured log file, such as the ones generated by web servers, into `systemd-journal` entries. @@ -11,7 +10,6 @@ The result is like this: nginx logs into systemd-journal: ![image](https://github.com/netdata/netdata/assets/2662304/16b471ff-c5a1-4fcc-bcd5-83551e089f6c) - The overall process looks like this: ```bash @@ -23,7 +21,8 @@ tail -F /var/log/nginx/*.log |\ # outputs log lines These are the steps: 1. `tail -F /var/log/nginx/*.log`
this command will tail all `*.log` files in `/var/log/nginx/`. We use `-F` instead of `-f` to ensure that files will still be tailed after log rotation. -2. `log2joural` is a Netdata program. It reads log entries and extracts fields, according to the PCRE2 pattern it accepts. It can also apply some basic operations on the fields, like injecting new fields or duplicating existing ones or rewriting their values. The output of `log2journal` is in Systemd Journal Export Format, and it looks like this: +2. `log2journal` is a Netdata program. It reads log entries and extracts fields, according to the PCRE2 pattern it accepts. It can also apply some basic operations on the fields, like injecting new fields or duplicating existing ones or rewriting their values. The output of `log2journal` is in Systemd Journal Export Format, and it looks like this: + ```bash KEY1=VALUE1 # << start of the first log line KEY2=VALUE2 @@ -31,8 +30,8 @@ These are the steps: KEY1=VALUE1 # << start of the second log line KEY2=VALUE2 ``` -3. `systemd-cat-native` is a Netdata program. I can send the logs to a local `systemd-journald` (journal namespaces supported), or to a remote `systemd-journal-remote`. +3. `systemd-cat-native` is a Netdata program. I can send the logs to a local `systemd-journald` (journal namespaces supported), or to a remote `systemd-journal-remote`. ## Processing pipeline @@ -44,19 +43,19 @@ The sequence of processing in Netdata's `log2journal` is designed to methodicall 2. **Extract Fields and Values**
Based on the input format (JSON, logfmt, or custom pattern), it extracts fields and their values from each log line. In the case of JSON and logfmt, it automatically extracts all fields. For custom patterns, it uses PCRE2 regular expressions, and fields are extracted based on sub-expressions defined in the pattern. -3. **Transliteration**
+3. **Transliteration**
Extracted fields are transliterated to the limited character set accepted by systemd-journal: capitals A-Z, digits 0-9, underscores. 4. **Apply Optional Prefix**
If a prefix is specified, it is added to all keys. This happens before any other processing so that all subsequent matches and manipulations take the prefix into account. -5. **Rename Fields**
+5. **Rename Fields**
Renames fields as specified in the configuration. This is used to change the names of the fields to match desired or required naming conventions. 6. **Inject New Fields**
New fields are injected into the log data. This can include constants or values derived from other fields, using variable substitution. -7. **Rewrite Field Values**
+7. **Rewrite Field Values**
Applies rewriting rules to alter the values of the fields. This can involve complex transformations, including regular expressions and variable substitutions. The rewrite rules can also inject new fields into the data. 8. **Filter Fields**
@@ -81,7 +80,7 @@ We have an nginx server logging in this standard combined log format: First, let's find the right pattern for `log2journal`. We ask ChatGPT: -``` +```text My nginx log uses this log format: log_format access '$remote_addr - $remote_user [$time_local] ' @@ -122,11 +121,11 @@ ChatGPT replies with this: Let's see what the above says: 1. `(?x)`: enable PCRE2 extended mode. In this mode spaces and newlines in the pattern are ignored. To match a space you have to use `\s`. This mode allows us to split the pattern is multiple lines and add comments to it. -1. `^`: match the beginning of the line -2. `(?flags & HK_HASHTABLE_ALLOCATED) + txt_l2j_cleanup(&k->value); + else + k->hashtable_ptr = NULL; + + freez((void *)k->key); + k->key = NULL; + k->len = 0; + k->hash = 0; + k->flags = HK_NONE; +} + +static inline void hashed_key_set(HASHED_KEY *k, const char *name, int32_t len) { + hashed_key_cleanup(k); + + if(len == -1) { + k->key = strdupz(name); + k->len = strlen(k->key); + } + else { + k->key = strndupz(name, len); + k->len = len; + } + + k->hash = XXH3_64bits(k->key, k->len); + k->flags = HK_NONE; +} + +static inline bool hashed_keys_match(HASHED_KEY *k1, HASHED_KEY *k2) { + return ((k1 == k2) || (k1->hash == k2->hash && strcmp(k1->key, k2->key) == 0)); +} + +static inline int compare_keys(struct hashed_key *k1, struct hashed_key *k2) { + return strcmp(k1->key, k2->key); +} + +#endif //NETDATA_LOG2JOURNAL_HASHED_KEY_H diff --git a/src/collectors/log2journal/log2journal-help.c b/src/collectors/log2journal/log2journal-help.c index 23ff4c056..0cb35bb0f 100644 --- a/src/collectors/log2journal/log2journal-help.c +++ b/src/collectors/log2journal/log2journal-help.c @@ -10,7 +10,7 @@ static void config_dir_print_available(void) { dir = opendir(path); if (dir == NULL) { - log2stderr(" >>> Cannot open directory:\n %s", path); + l2j_log(" >>> Cannot open directory:\n %s", path); return; } diff --git a/src/collectors/log2journal/log2journal-inject.c b/src/collectors/log2journal/log2journal-inject.c index 45158066b..f1a70ac8b 100644 --- a/src/collectors/log2journal/log2journal-inject.c +++ b/src/collectors/log2journal/log2journal-inject.c @@ -9,12 +9,13 @@ void injection_cleanup(INJECTION *inj) { static inline bool log_job_injection_replace(INJECTION *inj, const char *key, size_t key_len, const char *value, size_t value_len) { if(key_len > JOURNAL_MAX_KEY_LEN) - log2stderr("WARNING: injection key '%.*s' is too long for journal. Will be truncated.", (int)key_len, key); + l2j_log("WARNING: injection key '%.*s' is too long for journal. Will be truncated.", (int)key_len, key); if(value_len > JOURNAL_MAX_VALUE_LEN) - log2stderr("WARNING: injection value of key '%.*s' is too long for journal. Will be truncated.", (int)key_len, key); + l2j_log( + "WARNING: injection value of key '%.*s' is too long for journal. Will be truncated.", (int)key_len, key); - hashed_key_len_set(&inj->key, key, key_len); + hashed_key_set(&inj->key, key, key_len); char *v = strndupz(value, value_len); bool ret = replace_pattern_set(&inj->value, v); freez(v); @@ -25,13 +26,13 @@ static inline bool log_job_injection_replace(INJECTION *inj, const char *key, si bool log_job_injection_add(LOG_JOB *jb, const char *key, size_t key_len, const char *value, size_t value_len, bool unmatched) { if (unmatched) { if (jb->unmatched.injections.used >= MAX_INJECTIONS) { - log2stderr("Error: too many unmatched injections. You can inject up to %d lines.", MAX_INJECTIONS); + l2j_log("Error: too many unmatched injections. You can inject up to %d lines.", MAX_INJECTIONS); return false; } } else { if (jb->injections.used >= MAX_INJECTIONS) { - log2stderr("Error: too many injections. You can inject up to %d lines.", MAX_INJECTIONS); + l2j_log("Error: too many injections. You can inject up to %d lines.", MAX_INJECTIONS); return false; } } diff --git a/src/collectors/log2journal/log2journal-params.c b/src/collectors/log2journal/log2journal-params.c index a7bb3e263..a56d1711e 100644 --- a/src/collectors/log2journal/log2journal-params.c +++ b/src/collectors/log2journal/log2journal-params.c @@ -7,7 +7,7 @@ void log_job_init(LOG_JOB *jb) { memset(jb, 0, sizeof(*jb)); simple_hashtable_init_KEY(&jb->hashtable, 32); - hashed_key_set(&jb->line.key, "LINE"); + hashed_key_set(&jb->line.key, "LINE", -1); } static void simple_hashtable_cleanup_allocated_keys(SIMPLE_HASHTABLE_KEY *ht) { @@ -47,8 +47,14 @@ void log_job_cleanup(LOG_JOB *jb) { for(size_t i = 0; i < jb->rewrites.used; i++) rewrite_cleanup(&jb->rewrites.array[i]); - txt_cleanup(&jb->rewrites.tmp); - txt_cleanup(&jb->filename.current); + search_pattern_cleanup(&jb->filter.include); + search_pattern_cleanup(&jb->filter.exclude); + + hashed_key_cleanup(&jb->filename.key); + hashed_key_cleanup(&jb->unmatched.key); + + txt_l2j_cleanup(&jb->rewrites.tmp); + txt_l2j_cleanup(&jb->filename.current); simple_hashtable_cleanup_allocated_keys(&jb->hashtable); simple_hashtable_destroy_KEY(&jb->hashtable); @@ -61,18 +67,18 @@ void log_job_cleanup(LOG_JOB *jb) { bool log_job_filename_key_set(LOG_JOB *jb, const char *key, size_t key_len) { if(!key || !*key) { - log2stderr("filename key cannot be empty."); + l2j_log("filename key cannot be empty."); return false; } - hashed_key_len_set(&jb->filename.key, key, key_len); + hashed_key_set(&jb->filename.key, key, key_len); return true; } bool log_job_key_prefix_set(LOG_JOB *jb, const char *prefix, size_t prefix_len) { if(!prefix || !*prefix) { - log2stderr("filename key cannot be empty."); + l2j_log("filename key cannot be empty."); return false; } @@ -86,7 +92,7 @@ bool log_job_key_prefix_set(LOG_JOB *jb, const char *prefix, size_t prefix_len) bool log_job_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) { if(!pattern || !*pattern) { - log2stderr("filename key cannot be empty."); + l2j_log("filename key cannot be empty."); return false; } @@ -100,12 +106,12 @@ bool log_job_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) { bool log_job_include_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) { if(jb->filter.include.re) { - log2stderr("FILTER INCLUDE: there is already an include filter set"); + l2j_log("FILTER INCLUDE: there is already an include filter set"); return false; } if(!search_pattern_set(&jb->filter.include, pattern, pattern_len)) { - log2stderr("FILTER INCLUDE: failed: %s", jb->filter.include.error.txt); + l2j_log("FILTER INCLUDE: failed: %s", jb->filter.include.error.txt); return false; } @@ -114,12 +120,12 @@ bool log_job_include_pattern_set(LOG_JOB *jb, const char *pattern, size_t patter bool log_job_exclude_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) { if(jb->filter.exclude.re) { - log2stderr("FILTER INCLUDE: there is already an exclude filter set"); + l2j_log("FILTER INCLUDE: there is already an exclude filter set"); return false; } if(!search_pattern_set(&jb->filter.exclude, pattern, pattern_len)) { - log2stderr("FILTER EXCLUDE: failed: %s", jb->filter.exclude.error.txt); + l2j_log("FILTER EXCLUDE: failed: %s", jb->filter.exclude.error.txt); return false; } @@ -132,7 +138,7 @@ static bool parse_rename(LOG_JOB *jb, const char *param) { // Search for '=' in param const char *equal_sign = strchr(param, '='); if (!equal_sign || equal_sign == param) { - log2stderr("Error: Invalid rename format, '=' not found in %s", param); + l2j_log("Error: Invalid rename format, '=' not found in %s", param); return false; } @@ -210,7 +216,7 @@ RW_FLAGS parse_rewrite_flags(const char *options) { } if(!found) - log2stderr("Warning: rewrite options '%s' is not understood.", token); + l2j_log("Warning: rewrite options '%s' is not understood.", token); // Get the next token token = strtok(NULL, ","); @@ -226,33 +232,33 @@ static bool parse_rewrite(LOG_JOB *jb, const char *param) { // Search for '=' in param const char *equal_sign = strchr(param, '='); if (!equal_sign || equal_sign == param) { - log2stderr("Error: Invalid rewrite format, '=' not found in %s", param); + l2j_log("Error: Invalid rewrite format, '=' not found in %s", param); return false; } // Get the next character as the separator char separator = *(equal_sign + 1); if (!separator || !is_symbol(separator)) { - log2stderr("Error: rewrite separator not found after '=', or is not one of /\\|-# in: %s", param); + l2j_log("Error: rewrite separator not found after '=', or is not one of /\\|-# in: %s", param); return false; } // Find the next occurrence of the separator const char *second_separator = strchr(equal_sign + 2, separator); if (!second_separator) { - log2stderr("Error: rewrite second separator not found in: %s", param); + l2j_log("Error: rewrite second separator not found in: %s", param); return false; } // Check if the search pattern is empty if (equal_sign + 1 == second_separator) { - log2stderr("Error: rewrite search pattern is empty in: %s", param); + l2j_log("Error: rewrite search pattern is empty in: %s", param); return false; } // Check if the replacement pattern is empty if (*(second_separator + 1) == '\0') { - log2stderr("Error: rewrite replacement pattern is empty in: %s", param); + l2j_log("Error: rewrite replacement pattern is empty in: %s", param); return false; } @@ -281,7 +287,7 @@ static bool parse_rewrite(LOG_JOB *jb, const char *param) { static bool parse_inject(LOG_JOB *jb, const char *value, bool unmatched) { const char *equal = strchr(value, '='); if (!equal) { - log2stderr("Error: injection '%s' does not have an equal sign.", value); + l2j_log("Error: injection '%s' does not have an equal sign.", value); return false; } @@ -330,7 +336,10 @@ bool log_job_command_line_parse_parameters(LOG_JOB *jb, int argc, char **argv) { log_job_pattern_set(jb, arg, strlen(arg)); continue; } else { - log2stderr("Error: Multiple patterns detected. Specify only one pattern. The first is '%s', the second is '%s'", jb->pattern, arg); + l2j_log( + "Error: Multiple patterns detected. Specify only one pattern. The first is '%s', the second is '%s'", + jb->pattern, + arg); return false; } } @@ -355,7 +364,7 @@ bool log_job_command_line_parse_parameters(LOG_JOB *jb, int argc, char **argv) { } #endif else if (strcmp(param, "--unmatched-key") == 0) - hashed_key_set(&jb->unmatched.key, value); + hashed_key_set(&jb->unmatched.key, value, -1); else if (strcmp(param, "--inject") == 0) { if (!parse_inject(jb, value, false)) return false; @@ -386,7 +395,10 @@ bool log_job_command_line_parse_parameters(LOG_JOB *jb, int argc, char **argv) { log_job_pattern_set(jb, arg, strlen(arg)); continue; } else { - log2stderr("Error: Multiple patterns detected. Specify only one pattern. The first is '%s', the second is '%s'", jb->pattern, arg); + l2j_log( + "Error: Multiple patterns detected. Specify only one pattern. The first is '%s', the second is '%s'", + jb->pattern, + arg); return false; } } @@ -395,7 +407,7 @@ bool log_job_command_line_parse_parameters(LOG_JOB *jb, int argc, char **argv) { // Check if a pattern is set and exactly one pattern is specified if (!jb->pattern) { - log2stderr("Warning: pattern not specified. Try the default config with: -c default"); + l2j_log("Warning: pattern not specified. Try the default config with: -c default"); log_job_command_line_help(argv[0]); return false; } diff --git a/src/collectors/log2journal/log2journal-pattern.c b/src/collectors/log2journal/log2journal-pattern.c index 4b7e9026b..158ac1129 100644 --- a/src/collectors/log2journal/log2journal-pattern.c +++ b/src/collectors/log2journal/log2journal-pattern.c @@ -18,13 +18,13 @@ void search_pattern_cleanup(SEARCH_PATTERN *sp) { sp->match_data = NULL; } - txt_cleanup(&sp->error); + txt_l2j_cleanup(&sp->error); } static void pcre2_error_message(SEARCH_PATTERN *sp, int rc, int pos) { char msg[1024]; pcre2_get_error_in_buffer(msg, sizeof(msg), rc, pos); - txt_replace(&sp->error, msg, strlen(msg)); + txt_l2j_set(&sp->error, msg, strlen(msg)); } static inline bool compile_pcre2(SEARCH_PATTERN *sp) { diff --git a/src/collectors/log2journal/log2journal-pcre2.c b/src/collectors/log2journal/log2journal-pcre2.c index 185e69108..77f804cc8 100644 --- a/src/collectors/log2journal/log2journal-pcre2.c +++ b/src/collectors/log2journal/log2journal-pcre2.c @@ -102,8 +102,15 @@ PCRE2_STATE *pcre2_parser_create(LOG_JOB *jb) { } void pcre2_parser_destroy(PCRE2_STATE *pcre2) { - if(pcre2) + if(pcre2) { + if(pcre2->re) + pcre2_code_free(pcre2->re); + + if(pcre2->match_data) + pcre2_match_data_free(pcre2->match_data); + freez(pcre2); + } } const char *pcre2_parser_error(PCRE2_STATE *pcre2) { diff --git a/src/collectors/log2journal/log2journal-rename.c b/src/collectors/log2journal/log2journal-rename.c index c6975779f..11b3d2178 100644 --- a/src/collectors/log2journal/log2journal-rename.c +++ b/src/collectors/log2journal/log2journal-rename.c @@ -9,13 +9,13 @@ void rename_cleanup(RENAME *rn) { bool log_job_rename_add(LOG_JOB *jb, const char *new_key, size_t new_key_len, const char *old_key, size_t old_key_len) { if(jb->renames.used >= MAX_RENAMES) { - log2stderr("Error: too many renames. You can rename up to %d fields.", MAX_RENAMES); + l2j_log("Error: too many renames. You can rename up to %d fields.", MAX_RENAMES); return false; } RENAME *rn = &jb->renames.array[jb->renames.used++]; - hashed_key_len_set(&rn->new_key, new_key, new_key_len); - hashed_key_len_set(&rn->old_key, old_key, old_key_len); + hashed_key_set(&rn->new_key, new_key, new_key_len); + hashed_key_set(&rn->old_key, old_key, old_key_len); return true; } diff --git a/src/collectors/log2journal/log2journal-replace.c b/src/collectors/log2journal/log2journal-replace.c index 7075d109d..66ba48d9f 100644 --- a/src/collectors/log2journal/log2journal-replace.c +++ b/src/collectors/log2journal/log2journal-replace.c @@ -26,7 +26,7 @@ static REPLACE_NODE *replace_pattern_add_node(REPLACE_NODE **head, bool is_varia if (!new_node) return NULL; - hashed_key_set(&new_node->name, text); + hashed_key_set(&new_node->name, text, -1); new_node->is_variable = is_variable; new_node->next = NULL; @@ -57,21 +57,21 @@ bool replace_pattern_set(REPLACE_PATTERN *rp, const char *pattern) { // Start of a variable const char *end = strchr(current, '}'); if (!end) { - log2stderr("Error: Missing closing brace in replacement pattern: %s", rp->pattern); + l2j_log("Error: Missing closing brace in replacement pattern: %s", rp->pattern); return false; } size_t name_length = end - current - 2; // Length of the variable name char *variable_name = strndupz(current + 2, name_length); if (!variable_name) { - log2stderr("Error: Memory allocation failed for variable name."); + l2j_log("Error: Memory allocation failed for variable name."); return false; } REPLACE_NODE *node = replace_pattern_add_node(&(rp->nodes), true, variable_name); if (!node) { freez(variable_name); - log2stderr("Error: Failed to add replacement node for variable."); + l2j_log("Error: Failed to add replacement node for variable."); return false; } freez(variable_name); @@ -88,14 +88,14 @@ bool replace_pattern_set(REPLACE_PATTERN *rp, const char *pattern) { size_t text_length = current - start; char *text = strndupz(start, text_length); if (!text) { - log2stderr("Error: Memory allocation failed for literal text."); + l2j_log("Error: Memory allocation failed for literal text."); return false; } REPLACE_NODE *node = replace_pattern_add_node(&(rp->nodes), false, text); if (!node) { freez(text); - log2stderr("Error: Failed to add replacement node for text."); + l2j_log("Error: Failed to add replacement node for text."); return false; } freez(text); diff --git a/src/collectors/log2journal/log2journal-rewrite.c b/src/collectors/log2journal/log2journal-rewrite.c index 112391bf0..0c9a8ddea 100644 --- a/src/collectors/log2journal/log2journal-rewrite.c +++ b/src/collectors/log2journal/log2journal-rewrite.c @@ -7,6 +7,7 @@ void rewrite_cleanup(REWRITE *rw) { if(rw->flags & RW_MATCH_PCRE2) search_pattern_cleanup(&rw->match_pcre2); + else if(rw->flags & RW_MATCH_NON_EMPTY) replace_pattern_cleanup(&rw->match_non_empty); @@ -16,19 +17,19 @@ void rewrite_cleanup(REWRITE *rw) { bool log_job_rewrite_add(LOG_JOB *jb, const char *key, RW_FLAGS flags, const char *search_pattern, const char *replace_pattern) { if(jb->rewrites.used >= MAX_REWRITES) { - log2stderr("Error: too many rewrites. You can add up to %d rewrite rules.", MAX_REWRITES); + l2j_log("Error: too many rewrites. You can add up to %d rewrite rules.", MAX_REWRITES); return false; } if((flags & (RW_MATCH_PCRE2|RW_MATCH_NON_EMPTY)) && (!search_pattern || !*search_pattern)) { - log2stderr("Error: rewrite for key '%s' does not specify a search pattern.", key); + l2j_log("Error: rewrite for key '%s' does not specify a search pattern.", key); return false; } REWRITE *rw = &jb->rewrites.array[jb->rewrites.used++]; rw->flags = flags; - hashed_key_set(&rw->key, key); + hashed_key_set(&rw->key, key, -1); if((flags & RW_MATCH_PCRE2) && !search_pattern_set(&rw->match_pcre2, search_pattern, strlen(search_pattern))) { rewrite_cleanup(rw); diff --git a/src/collectors/log2journal/log2journal-txt.h b/src/collectors/log2journal/log2journal-txt.h new file mode 100644 index 000000000..f68b85a3d --- /dev/null +++ b/src/collectors/log2journal/log2journal-txt.h @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_LOG2JOURNAL_TXT_H +#define NETDATA_LOG2JOURNAL_TXT_H + +#include "log2journal.h" + +// ---------------------------------------------------------------------------- +// A dynamically sized, reusable text buffer, +// allowing us to be fast (no allocations during iterations) while having the +// smallest possible allocations. + +typedef struct txt_l2j { + char *txt; + uint32_t size; + uint32_t len; +} TXT_L2J; + +static inline void txt_l2j_cleanup(TXT_L2J *t) { + if(!t) + return; + + if(t->txt) + freez(t->txt); + + t->txt = NULL; + t->size = 0; + t->len = 0; +} + +#define TXT_L2J_ALLOC_ALIGN 1024 + +static inline size_t txt_l2j_compute_new_size(size_t old_size, size_t required_size) { + size_t size = (required_size % TXT_L2J_ALLOC_ALIGN == 0) ? required_size : required_size + TXT_L2J_ALLOC_ALIGN; + size = (size / TXT_L2J_ALLOC_ALIGN) * TXT_L2J_ALLOC_ALIGN; + + if(size < old_size * 2) + size = old_size * 2; + + return size; +} + +static inline void txt_l2j_resize(TXT_L2J *dst, size_t required_size, bool keep) { + if(required_size <= dst->size) + return; + + size_t new_size = txt_l2j_compute_new_size(dst->size, required_size); + + if(keep && dst->txt) + dst->txt = reallocz(dst->txt, new_size); + else { + txt_l2j_cleanup(dst); + dst->txt = mallocz(new_size); + dst->len = 0; + } + + dst->size = new_size; +} + +static inline void txt_l2j_set(TXT_L2J *dst, const char *s, int32_t len) { + if(!s || !*s || len == 0) { + s = ""; + len = 0; + } + + if(len == -1) + len = (int32_t)strlen(s); + + txt_l2j_resize(dst, len + 1, false); + memcpy(dst->txt, s, len); + dst->txt[len] = '\0'; + dst->len = len; +} + +static inline void txt_l2j_append(TXT_L2J *dst, const char *s, int32_t len) { + if(!dst->txt || !dst->len) + txt_l2j_set(dst, s, len); + + else { + if(len == -1) + len = (int32_t)strlen(s); + + txt_l2j_resize(dst, dst->len + len + 1, true); + memcpy(&dst->txt[dst->len], s, len); + dst->len += len; + dst->txt[dst->len] = '\0'; + } +} + +#endif //NETDATA_LOG2JOURNAL_TXT_H diff --git a/src/collectors/log2journal/log2journal-yaml.c b/src/collectors/log2journal/log2journal-yaml.c index e73a469f5..53f83d623 100644 --- a/src/collectors/log2journal/log2journal-yaml.c +++ b/src/collectors/log2journal/log2journal-yaml.c @@ -280,6 +280,8 @@ static bool yaml_parse_constant_field_injection(yaml_parser_t *parser, LOG_JOB * goto cleanup; } + yaml_event_delete(&event); + if (!yaml_parse(parser, &event) || event.type != YAML_SCALAR_EVENT) { yaml_error(parser, &event, "Expected scalar for constant field injection value"); goto cleanup; @@ -315,7 +317,7 @@ static bool yaml_parse_injection_mapping(yaml_parser_t *parser, LOG_JOB *jb, boo switch (event.type) { case YAML_SCALAR_EVENT: if (yaml_scalar_matches(&event, "key", strlen("key"))) { - errors += yaml_parse_constant_field_injection(parser, jb, unmatched); + errors += yaml_parse_constant_field_injection(parser, jb, unmatched) ? 1 : 0; } else { yaml_error(parser, &event, "Unexpected scalar in injection mapping"); errors++; @@ -396,7 +398,8 @@ static size_t yaml_parse_unmatched(yaml_parser_t *parser, LOG_JOB *jb) { errors++; } else { if (sub_event.type == YAML_SCALAR_EVENT) { - hashed_key_len_set(&jb->unmatched.key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length); + hashed_key_set( + &jb->unmatched.key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length); } else { yaml_error(parser, &sub_event, "expected a scalar value for 'key'"); errors++; @@ -427,6 +430,149 @@ static size_t yaml_parse_unmatched(yaml_parser_t *parser, LOG_JOB *jb) { return errors; } +static bool yaml_parse_scalar_boolean(yaml_parser_t *parser, bool def, const char *where, size_t *errors) { + bool rc = def; + + yaml_event_t value_event; + if (!yaml_parse(parser, &value_event)) { + (*errors)++; + return rc; + } + + if (value_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &value_event, "Expected scalar for %s boolean", where); + (*errors)++; + } + else if(strncmp((char*)value_event.data.scalar.value, "yes", 3) == 0 || + strncmp((char*)value_event.data.scalar.value, "true", 4) == 0) + rc = true; + else if(strncmp((char*)value_event.data.scalar.value, "no", 2) == 0 || + strncmp((char*)value_event.data.scalar.value, "false", 5) == 0) + rc = false; + else { + yaml_error(parser, &value_event, "Expected scalar for %s boolean: invalid value %s", where, value_event.data.scalar.value); + rc = def; + } + + yaml_event_delete(&value_event); + return rc; +} + +static bool handle_rewrite_event(yaml_parser_t *parser, yaml_event_t *event, + char **key, char **search_pattern, char **replace_pattern, + RW_FLAGS *flags, bool *mapping_finished, + LOG_JOB *jb, size_t *errors) { + switch (event->type) { + case YAML_SCALAR_EVENT: + if (yaml_scalar_matches(event, "key", strlen("key"))) { + yaml_event_t value_event; + if (!yaml_parse(parser, &value_event)) { + (*errors)++; + return false; + } + + if (value_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &value_event, "Expected scalar for rewrite key"); + (*errors)++; + } else { + freez(*key); + *key = strndupz((char *)value_event.data.scalar.value, value_event.data.scalar.length); + } + yaml_event_delete(&value_event); + } + else if (yaml_scalar_matches(event, "match", strlen("match"))) { + yaml_event_t value_event; + if (!yaml_parse(parser, &value_event)) { + (*errors)++; + return false; + } + + if (value_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &value_event, "Expected scalar for rewrite match PCRE2 pattern"); + (*errors)++; + } + else { + freez(*search_pattern); + *flags |= RW_MATCH_PCRE2; + *flags &= ~RW_MATCH_NON_EMPTY; + *search_pattern = strndupz((char *)value_event.data.scalar.value, value_event.data.scalar.length); + } + yaml_event_delete(&value_event); + } + else if (yaml_scalar_matches(event, "not_empty", strlen("not_empty"))) { + yaml_event_t value_event; + if (!yaml_parse(parser, &value_event)) { + (*errors)++; + return false; + } + + if (value_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &value_event, "Expected scalar for rewrite not empty condition"); + (*errors)++; + } + else { + freez(*search_pattern); + *flags |= RW_MATCH_NON_EMPTY; + *flags &= ~RW_MATCH_PCRE2; + *search_pattern = strndupz((char *)value_event.data.scalar.value, value_event.data.scalar.length); + } + yaml_event_delete(&value_event); + } + else if (yaml_scalar_matches(event, "value", strlen("value"))) { + yaml_event_t value_event; + if (!yaml_parse(parser, &value_event)) { + (*errors)++; + return false; + } + + if (value_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &value_event, "Expected scalar for rewrite value"); + (*errors)++; + } else { + freez(*replace_pattern); + *replace_pattern = strndupz((char *)value_event.data.scalar.value, value_event.data.scalar.length); + } + yaml_event_delete(&value_event); + } + else if (yaml_scalar_matches(event, "stop", strlen("stop"))) { + if(yaml_parse_scalar_boolean(parser, true, "rewrite stop", errors)) + *flags &= ~RW_DONT_STOP; + else + *flags |= RW_DONT_STOP; + } + else if (yaml_scalar_matches(event, "inject", strlen("inject"))) { + if(yaml_parse_scalar_boolean(parser, false, "rewrite inject", errors)) + *flags |= RW_INJECT; + else + *flags &= ~RW_INJECT; + } + else { + yaml_error(parser, event, "Unexpected scalar in rewrite mapping"); + (*errors)++; + } + break; + + case YAML_MAPPING_END_EVENT: + if(*key) { + if (!log_job_rewrite_add(jb, *key, *flags, *search_pattern, *replace_pattern)) + (*errors)++; + } + + freez(*key); + freez(*search_pattern); + freez(*replace_pattern); + *mapping_finished = true; + break; + + default: + yaml_error(parser, event, "Unexpected event in rewrite mapping"); + (*errors)++; + break; + } + + return true; +} + static size_t yaml_parse_rewrites(yaml_parser_t *parser, LOG_JOB *jb) { size_t errors = 0; @@ -457,120 +603,14 @@ static size_t yaml_parse_rewrites(yaml_parser_t *parser, LOG_JOB *jb) { continue; } - switch (sub_event.type) { - case YAML_SCALAR_EVENT: - if (yaml_scalar_matches(&sub_event, "key", strlen("key"))) { - if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { - yaml_error(parser, &sub_event, "Expected scalar for rewrite key"); - errors++; - } else { - freez(key); - key = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length); - yaml_event_delete(&sub_event); - } - } else if (yaml_scalar_matches(&sub_event, "match", strlen("match"))) { - if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { - yaml_error(parser, &sub_event, "Expected scalar for rewrite match PCRE2 pattern"); - errors++; - } - else { - if(search_pattern) - freez(search_pattern); - flags |= RW_MATCH_PCRE2; - flags &= ~RW_MATCH_NON_EMPTY; - search_pattern = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length); - yaml_event_delete(&sub_event); - } - } else if (yaml_scalar_matches(&sub_event, "not_empty", strlen("not_empty"))) { - if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { - yaml_error(parser, &sub_event, "Expected scalar for rewrite not empty condition"); - errors++; - } - else { - if(search_pattern) - freez(search_pattern); - flags |= RW_MATCH_NON_EMPTY; - flags &= ~RW_MATCH_PCRE2; - search_pattern = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length); - yaml_event_delete(&sub_event); - } - } else if (yaml_scalar_matches(&sub_event, "value", strlen("value"))) { - if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { - yaml_error(parser, &sub_event, "Expected scalar for rewrite value"); - errors++; - } else { - freez(replace_pattern); - replace_pattern = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length); - yaml_event_delete(&sub_event); - } - } else if (yaml_scalar_matches(&sub_event, "stop", strlen("stop"))) { - if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { - yaml_error(parser, &sub_event, "Expected scalar for rewrite stop boolean"); - errors++; - } else { - if(strncmp((char*)sub_event.data.scalar.value, "no", 2) == 0 || - strncmp((char*)sub_event.data.scalar.value, "false", 5) == 0) - flags |= RW_DONT_STOP; - else - flags &= ~RW_DONT_STOP; - - yaml_event_delete(&sub_event); - } - } else if (yaml_scalar_matches(&sub_event, "inject", strlen("inject"))) { - if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { - yaml_error(parser, &sub_event, "Expected scalar for rewrite inject boolean"); - errors++; - } else { - if(strncmp((char*)sub_event.data.scalar.value, "yes", 3) == 0 || - strncmp((char*)sub_event.data.scalar.value, "true", 4) == 0) - flags |= RW_INJECT; - else - flags &= ~RW_INJECT; - - yaml_event_delete(&sub_event); - } - } else { - yaml_error(parser, &sub_event, "Unexpected scalar in rewrite mapping"); - errors++; - } - break; - - case YAML_MAPPING_END_EVENT: - if(key) { - if (!log_job_rewrite_add(jb, key, flags, search_pattern, replace_pattern)) - errors++; - } - - freez(key); - key = NULL; - - freez(search_pattern); - search_pattern = NULL; - - freez(replace_pattern); - replace_pattern = NULL; - - flags = RW_NONE; - - mapping_finished = true; - break; - - default: - yaml_error(parser, &sub_event, "Unexpected event in rewrite mapping"); - errors++; - break; - } + handle_rewrite_event(parser, &sub_event, &key, + &search_pattern, &replace_pattern, + &flags, &mapping_finished, jb, &errors); yaml_event_delete(&sub_event); } - freez(replace_pattern); - replace_pattern = NULL; - freez(search_pattern); - search_pattern = NULL; - freez(key); - key = NULL; - } break; + } case YAML_SEQUENCE_END_EVENT: finished = true; @@ -618,25 +658,36 @@ static size_t yaml_parse_renames(yaml_parser_t *parser, LOG_JOB *jb) { switch (sub_event.type) { case YAML_SCALAR_EVENT: if (yaml_scalar_matches(&sub_event, "new_key", strlen("new_key"))) { - if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { - yaml_error(parser, &sub_event, "Expected scalar for rename new_key"); + yaml_event_t value_event; + + if (!yaml_parse(parser, &value_event) || value_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &value_event, "Expected scalar for rename new_key"); errors++; } else { - hashed_key_len_set(&rn.new_key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length); - yaml_event_delete(&sub_event); + hashed_key_set( + &rn.new_key, + (char *)value_event.data.scalar.value, + value_event.data.scalar.length); + yaml_event_delete(&value_event); } } else if (yaml_scalar_matches(&sub_event, "old_key", strlen("old_key"))) { - if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) { - yaml_error(parser, &sub_event, "Expected scalar for rename old_key"); + yaml_event_t value_event; + + if (!yaml_parse(parser, &value_event) || value_event.type != YAML_SCALAR_EVENT) { + yaml_error(parser, &value_event, "Expected scalar for rename old_key"); errors++; } else { - hashed_key_len_set(&rn.old_key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length); - yaml_event_delete(&sub_event); + hashed_key_set( + &rn.old_key, + (char *)value_event.data.scalar.value, + value_event.data.scalar.length); + yaml_event_delete(&value_event); } } else { yaml_error(parser, &sub_event, "Unexpected scalar in rewrite mapping"); errors++; } + break; case YAML_MAPPING_END_EVENT: @@ -782,18 +833,22 @@ cleanup: bool yaml_parse_file(const char *config_file_path, LOG_JOB *jb) { if(!config_file_path || !*config_file_path) { - log2stderr("yaml configuration filename cannot be empty."); + l2j_log("yaml configuration filename cannot be empty."); return false; } FILE *fp = fopen(config_file_path, "r"); if (!fp) { - log2stderr("Error opening config file: %s", config_file_path); + l2j_log("Error opening config file: %s", config_file_path); return false; } yaml_parser_t parser; - yaml_parser_initialize(&parser); + if (!yaml_parser_initialize(&parser)) { + fclose(fp); + return false; + } + yaml_parser_set_input_file(&parser, fp); size_t errors = yaml_parse_initialized(&parser, jb); diff --git a/src/collectors/log2journal/log2journal.c b/src/collectors/log2journal/log2journal.c index 0fbba0b0c..769547bc1 100644 --- a/src/collectors/log2journal/log2journal.c +++ b/src/collectors/log2journal/log2journal.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "log2journal.h" +#include "libnetdata/required_dummies.h" // ---------------------------------------------------------------------------- @@ -73,10 +74,13 @@ static inline HASHED_KEY *get_key_from_hashtable(LOG_JOB *jb, HASHED_KEY *k) { ht_key->flags |= HK_COLLISION_CHECKED; if(strcmp(ht_key->key, k->key) != 0) - log2stderr("Hashtable collision detected on key '%s' (hash %lx) and '%s' (hash %lx). " - "Please file a bug report.", ht_key->key, (unsigned long) ht_key->hash, k->key - , (unsigned long) k->hash - ); + l2j_log( + "Hashtable collision detected on key '%s' (hash %lx) and '%s' (hash %lx). " + "Please file a bug report.", + ht_key->key, + (unsigned long)ht_key->hash, + k->key, + (unsigned long)k->hash); } } else { @@ -97,8 +101,9 @@ static inline HASHED_KEY *get_key_from_hashtable(LOG_JOB *jb, HASHED_KEY *k) { static inline HASHED_KEY *get_key_from_hashtable_with_char_ptr(LOG_JOB *jb, const char *key) { HASHED_KEY find = { - .key = key, - .len = strlen(key), + .flags = HK_NONE, + .key = key, + .len = strlen(key), }; find.hash = XXH3_64bits(key, find.len); @@ -109,24 +114,29 @@ static inline HASHED_KEY *get_key_from_hashtable_with_char_ptr(LOG_JOB *jb, cons static inline void validate_key(LOG_JOB *jb __maybe_unused, HASHED_KEY *k) { if(k->len > JOURNAL_MAX_KEY_LEN) - log2stderr("WARNING: key '%s' has length %zu, which is more than %zu, the max systemd-journal allows", - k->key, (size_t)k->len, (size_t)JOURNAL_MAX_KEY_LEN); + l2j_log( + "WARNING: key '%s' has length %zu, which is more than %zu, the max systemd-journal allows", + k->key, + (size_t)k->len, + (size_t)JOURNAL_MAX_KEY_LEN); for(size_t i = 0; i < k->len ;i++) { char c = k->key[i]; if((c < 'A' || c > 'Z') && !isdigit(c) && c != '_') { - log2stderr("WARNING: key '%s' contains characters that are not allowed by systemd-journal.", k->key); + l2j_log("WARNING: key '%s' contains characters that are not allowed by systemd-journal.", k->key); break; } } if(isdigit(k->key[0])) - log2stderr("WARNING: key '%s' starts with a digit and may not be accepted by systemd-journal.", k->key); + l2j_log("WARNING: key '%s' starts with a digit and may not be accepted by systemd-journal.", k->key); if(k->key[0] == '_') - log2stderr("WARNING: key '%s' starts with an underscore, which makes it a systemd-journal trusted field. " - "Such fields are accepted by systemd-journal-remote, but not by systemd-journald.", k->key); + l2j_log( + "WARNING: key '%s' starts with an underscore, which makes it a systemd-journal trusted field. " + "Such fields are accepted by systemd-journal-remote, but not by systemd-journald.", + k->key); } // ---------------------------------------------------------------------------- @@ -170,16 +180,16 @@ static inline void replace_evaluate(LOG_JOB *jb, HASHED_KEY *k, REPLACE_PATTERN for(REPLACE_NODE *node = rp->nodes; node != NULL; node = node->next) { if(node->is_variable) { if(hashed_keys_match(&node->name, &jb->line.key)) - txt_expand_and_append(&ht_key->value, jb->line.trimmed, jb->line.trimmed_len); + txt_l2j_append(&ht_key->value, jb->line.trimmed, jb->line.trimmed_len); else { HASHED_KEY *ktmp = get_key_from_hashtable_with_char_ptr(jb, node->name.key); if(ktmp->value.len) - txt_expand_and_append(&ht_key->value, ktmp->value.txt, ktmp->value.len); + txt_l2j_append(&ht_key->value, ktmp->value.txt, ktmp->value.len); } } else - txt_expand_and_append(&ht_key->value, node->name.key, node->name.len); + txt_l2j_append(&ht_key->value, node->name.key, node->name.len); } } @@ -202,26 +212,26 @@ static inline void replace_evaluate_from_pcre2(LOG_JOB *jb, HASHED_KEY *k, REPLA PCRE2_SIZE end_offset = ovector[2 * group_number + 1]; PCRE2_SIZE length = end_offset - start_offset; - txt_expand_and_append(&jb->rewrites.tmp, k->value.txt + start_offset, length); + txt_l2j_append(&jb->rewrites.tmp, k->value.txt + start_offset, length); } else { if(hashed_keys_match(&node->name, &jb->line.key)) - txt_expand_and_append(&jb->rewrites.tmp, jb->line.trimmed, jb->line.trimmed_len); + txt_l2j_append(&jb->rewrites.tmp, jb->line.trimmed, jb->line.trimmed_len); else { HASHED_KEY *ktmp = get_key_from_hashtable_with_char_ptr(jb, node->name.key); if(ktmp->value.len) - txt_expand_and_append(&jb->rewrites.tmp, ktmp->value.txt, ktmp->value.len); + txt_l2j_append(&jb->rewrites.tmp, ktmp->value.txt, ktmp->value.len); } } } else { - txt_expand_and_append(&jb->rewrites.tmp, node->name.key, node->name.len); + txt_l2j_append(&jb->rewrites.tmp, node->name.key, node->name.len); } } // swap the values of the temporary TEXT and the key value - TEXT tmp = k->value; + TXT_L2J tmp = k->value; k->value = jb->rewrites.tmp; jb->rewrites.tmp = tmp; } @@ -271,7 +281,7 @@ static inline HASHED_KEY *rename_key(LOG_JOB *jb, HASHED_KEY *k) { static inline void send_key_value_constant(LOG_JOB *jb __maybe_unused, HASHED_KEY *key, const char *value, size_t len) { HASHED_KEY *ht_key = get_key_from_hashtable(jb, key); - txt_replace(&ht_key->value, value, len); + txt_l2j_set(&ht_key->value, value, len); ht_key->flags |= HK_VALUE_FROM_LOG; // fprintf(stderr, "SET %s=%.*s\n", ht_key->key, (int)ht_key->value.len, ht_key->value.txt); @@ -292,7 +302,7 @@ static inline void send_key_value_error(LOG_JOB *jb, HASHED_KEY *key, const char inline void log_job_send_extracted_key_value(LOG_JOB *jb, const char *key, const char *value, size_t len) { HASHED_KEY *ht_key = get_key_from_hashtable_with_char_ptr(jb, key); HASHED_KEY *nk = rename_key(jb, ht_key); - txt_replace(&nk->value, value, len); + txt_l2j_set(&nk->value, value, len); ht_key->flags |= HK_VALUE_FROM_LOG; // fprintf(stderr, "SET %s=%.*s\n", ht_key->key, (int)ht_key->value.len, ht_key->value.txt); @@ -417,7 +427,7 @@ static inline bool jb_switched_filename(LOG_JOB *jb, const char *line, size_t le const char *end = strstr(line, " <=="); while (*start == ' ') start++; if (*start != '\n' && *start != '\0' && end) { - txt_replace(&jb->filename.current, start, end - start); + txt_l2j_set(&jb->filename.current, start, end - start); return true; } } @@ -486,7 +496,7 @@ int log_job_run(LOG_JOB *jb) { else if(strcmp(jb->pattern, "none") != 0) { pcre2 = pcre2_parser_create(jb); if(pcre2_has_error(pcre2)) { - log2stderr("%s", pcre2_parser_error(pcre2)); + l2j_log("%s", pcre2_parser_error(pcre2)); pcre2_parser_destroy(pcre2); return 1; } @@ -515,11 +525,11 @@ int log_job_run(LOG_JOB *jb) { if(!line_is_matched) { if(json) - log2stderr("%s", json_parser_error(json)); + l2j_log("%s", json_parser_error(json)); else if(logfmt) - log2stderr("%s", logfmt_parser_error(logfmt)); + l2j_log("%s", logfmt_parser_error(logfmt)); else if(pcre2) - log2stderr("%s", pcre2_parser_error(pcre2)); + l2j_log("%s", pcre2_parser_error(pcre2)); if(!jb_send_unmatched_line(jb, line)) // just logging to stderr, not sending unmatched lines diff --git a/src/collectors/log2journal/log2journal.h b/src/collectors/log2journal/log2journal.h index 5bdf7276b..480c0598c 100644 --- a/src/collectors/log2journal/log2journal.h +++ b/src/collectors/log2journal/log2journal.h @@ -3,49 +3,16 @@ #ifndef NETDATA_LOG2JOURNAL_H #define NETDATA_LOG2JOURNAL_H -// only for PACKAGE_VERSION -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// ---------------------------------------------------------------------------- -// compatibility - -#ifndef HAVE_STRNDUP -// strndup() is not available on Windows -static inline char *os_strndup( const char *s1, size_t n) -{ - char *copy= (char*)malloc( n+1 ); - memcpy( copy, s1, n ); - copy[n] = 0; - return copy; -}; -#define strndup(s, n) os_strndup(s, n) -#endif - -#if defined(HAVE_FUNC_ATTRIBUTE_FORMAT_GNU_PRINTF) -#define PRINTFLIKE(f, a) __attribute__ ((format(gnu_printf, f, a))) -#elif defined(HAVE_FUNC_ATTRIBUTE_FORMAT_PRINTF) -#define PRINTFLIKE(f, a) __attribute__ ((format(printf, f, a))) -#else -#define PRINTFLIKE(f, a) -#endif +#include "libnetdata/libnetdata.h" +#include "log2journal-txt.h" +#include "log2journal-hashed-key.h" // ---------------------------------------------------------------------------- // logging // enable the compiler to check for printf like errors on our log2stderr() function -static inline void log2stderr(const char *format, ...) PRINTFLIKE(1, 2); -static inline void log2stderr(const char *format, ...) { +static inline void l2j_log(const char *format, ...) PRINTFLIKE(1, 2); +static inline void l2j_log(const char *format, ...) { va_list args; va_start(args, format); vfprintf(stderr, format, args); @@ -54,62 +21,6 @@ static inline void log2stderr(const char *format, ...) { } // ---------------------------------------------------------------------------- -// allocation functions abstraction - -static inline void *mallocz(size_t size) { - void *ptr = malloc(size); - if (!ptr) { - log2stderr("Fatal Error: Memory allocation failed. Requested size: %zu bytes.", size); - exit(EXIT_FAILURE); - } - return ptr; -} - -static inline void *callocz(size_t elements, size_t size) { - void *ptr = calloc(elements, size); - if (!ptr) { - log2stderr("Fatal Error: Memory allocation failed. Requested size: %zu bytes.", elements * size); - exit(EXIT_FAILURE); - } - return ptr; -} - -static inline void *reallocz(void *ptr, size_t size) { - void *new_ptr = realloc(ptr, size); - if (!new_ptr) { - log2stderr("Fatal Error: Memory reallocation failed. Requested size: %zu bytes.", size); - exit(EXIT_FAILURE); - } - return new_ptr; -} - -static inline char *strdupz(const char *s) { - char *ptr = strdup(s); - if (!ptr) { - log2stderr("Fatal Error: Memory allocation failed in strdup."); - exit(EXIT_FAILURE); - } - return ptr; -} - -static inline char *strndupz(const char *s, size_t n) { - char *ptr = strndup(s, n); - if (!ptr) { - log2stderr("Fatal Error: Memory allocation failed in strndup. Requested size: %zu bytes.", n); - exit(EXIT_FAILURE); - } - return ptr; -} - -static inline void freez(void *ptr) { - if (ptr) - free(ptr); -} - -// ---------------------------------------------------------------------------- - -#define XXH_INLINE_ALL -#include "libnetdata/xxhash.h" #define PCRE2_CODE_UNIT_WIDTH 8 #include @@ -121,15 +32,12 @@ static inline void freez(void *ptr) { // ---------------------------------------------------------------------------- // hashtable for HASHED_KEY -// cleanup hashtable defines -#include "libnetdata/simple_hashtable_undef.h" - struct hashed_key; static inline int compare_keys(struct hashed_key *k1, struct hashed_key *k2); #define SIMPLE_HASHTABLE_SORT_FUNCTION compare_keys -#define SIMPLE_HASHTABLE_VALUE_TYPE struct hashed_key +#define SIMPLE_HASHTABLE_VALUE_TYPE HASHED_KEY #define SIMPLE_HASHTABLE_NAME _KEY -#include "libnetdata/simple_hashtable.h" +#include "libnetdata/simple_hashtable/simple_hashtable.h" // ---------------------------------------------------------------------------- @@ -172,152 +80,13 @@ static inline size_t copy_to_buffer(char *dst, size_t dst_size, const char *src, } } -// ---------------------------------------------------------------------------- -// A dynamically sized, reusable text buffer, -// allowing us to be fast (no allocations during iterations) while having the -// smallest possible allocations. - -typedef struct txt { - char *txt; - uint32_t size; - uint32_t len; -} TEXT; - -static inline void txt_cleanup(TEXT *t) { - if(!t) - return; - - if(t->txt) - freez(t->txt); - - t->txt = NULL; - t->size = 0; - t->len = 0; -} - -static inline void txt_replace(TEXT *t, const char *s, size_t len) { - if(!s || !*s || len == 0) { - s = ""; - len = 0; - } - - if(len + 1 <= t->size) { - // the existing value allocation, fits our value - - memcpy(t->txt, s, len); - t->txt[len] = '\0'; - t->len = len; - } - else { - // no existing value allocation, or too small for our value - // cleanup and increase the buffer - - txt_cleanup(t); - - t->txt = strndupz(s, len); - t->size = len + 1; - t->len = len; - } -} - -static inline void txt_expand_and_append(TEXT *t, const char *s, size_t len) { - if(len + 1 > (t->size - t->len)) { - size_t new_size = t->len + len + 1; - if(new_size < t->size * 2) - new_size = t->size * 2; - - t->txt = reallocz(t->txt, new_size); - t->size = new_size; - } - - char *copy_to = &t->txt[t->len]; - memcpy(copy_to, s, len); - copy_to[len] = '\0'; - t->len += len; -} - -// ---------------------------------------------------------------------------- - -typedef enum __attribute__((__packed__)) { - HK_NONE = 0, - - // permanent flags - they are set once to optimize various decisions and lookups - - HK_HASHTABLE_ALLOCATED = (1 << 0), // this is key object allocated in the hashtable - // objects that do not have this, have a pointer to a key in the hashtable - // objects that have this, value a value allocated - - HK_FILTERED = (1 << 1), // we checked once if this key in filtered - HK_FILTERED_INCLUDED = (1 << 2), // the result of the filtering was to include it in the output - - HK_COLLISION_CHECKED = (1 << 3), // we checked once for collision check of this key - - HK_RENAMES_CHECKED = (1 << 4), // we checked once if there are renames on this key - HK_HAS_RENAMES = (1 << 5), // and we found there is a rename rule related to it - - // ephemeral flags - they are unset at the end of each log line - - HK_VALUE_FROM_LOG = (1 << 14), // the value of this key has been read from the log (or from injection, duplication) - HK_VALUE_REWRITTEN = (1 << 15), // the value of this key has been rewritten due to one of our rewrite rules - -} HASHED_KEY_FLAGS; - -typedef struct hashed_key { - const char *key; - uint32_t len; - HASHED_KEY_FLAGS flags; - XXH64_hash_t hash; - union { - struct hashed_key *hashtable_ptr; // HK_HASHTABLE_ALLOCATED is not set - TEXT value; // HK_HASHTABLE_ALLOCATED is set - }; -} HASHED_KEY; - -static inline void hashed_key_cleanup(HASHED_KEY *k) { - if(k->key) { - freez((void *)k->key); - k->key = NULL; - } - - if(k->flags & HK_HASHTABLE_ALLOCATED) - txt_cleanup(&k->value); - else - k->hashtable_ptr = NULL; -} - -static inline void hashed_key_set(HASHED_KEY *k, const char *name) { - hashed_key_cleanup(k); - - k->key = strdupz(name); - k->len = strlen(k->key); - k->hash = XXH3_64bits(k->key, k->len); - k->flags = HK_NONE; -} - -static inline void hashed_key_len_set(HASHED_KEY *k, const char *name, size_t len) { - hashed_key_cleanup(k); - - k->key = strndupz(name, len); - k->len = len; - k->hash = XXH3_64bits(k->key, k->len); - k->flags = HK_NONE; -} - -static inline bool hashed_keys_match(HASHED_KEY *k1, HASHED_KEY *k2) { - return ((k1 == k2) || (k1->hash == k2->hash && strcmp(k1->key, k2->key) == 0)); -} - -static inline int compare_keys(struct hashed_key *k1, struct hashed_key *k2) { - return strcmp(k1->key, k2->key); -} - // ---------------------------------------------------------------------------- typedef struct search_pattern { const char *pattern; pcre2_code *re; pcre2_match_data *match_data; - TEXT error; + TXT_L2J error; } SEARCH_PATTERN; void search_pattern_cleanup(SEARCH_PATTERN *sp); @@ -416,7 +185,7 @@ typedef struct log_job { struct { bool last_line_was_empty; HASHED_KEY key; - TEXT current; + TXT_L2J current; } filename; struct { @@ -435,7 +204,7 @@ typedef struct log_job { struct { uint32_t used; REWRITE array[MAX_REWRITES]; - TEXT tmp; + TXT_L2J tmp; } rewrites; struct { diff --git a/src/collectors/macos.plugin/integrations/macos.md b/src/collectors/macos.plugin/integrations/macos.md index 9445b9a61..6b9e3f239 100644 --- a/src/collectors/macos.plugin/integrations/macos.md +++ b/src/collectors/macos.plugin/integrations/macos.md @@ -188,8 +188,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/macos.plugin/macos_fw.c b/src/collectors/macos.plugin/macos_fw.c index 75ef386b9..a97c5bc25 100644 --- a/src/collectors/macos.plugin/macos_fw.c +++ b/src/collectors/macos.plugin/macos_fw.c @@ -199,7 +199,6 @@ int do_macos_iokit(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -235,7 +234,6 @@ int do_macos_iokit(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_AREA ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "utilization", NULL, 1, 10000000, RRD_ALGORITHM_INCREMENTAL); } @@ -270,7 +268,6 @@ int do_macos_iokit(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_INCREMENTAL); @@ -302,7 +299,6 @@ int do_macos_iokit(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE); rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_ABSOLUTE); @@ -330,7 +326,6 @@ int do_macos_iokit(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_AREA ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "reads", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); rrddim_add(st, "writes", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE); @@ -358,7 +353,6 @@ int do_macos_iokit(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "svctm", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE); } @@ -549,7 +543,6 @@ int do_macos_iokit(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -579,7 +572,6 @@ int do_macos_iokit(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -605,7 +597,6 @@ int do_macos_iokit(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); } @@ -629,7 +620,6 @@ int do_macos_iokit(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); diff --git a/src/collectors/macos.plugin/macos_mach_smi.c b/src/collectors/macos.plugin/macos_mach_smi.c index 30c957187..fa88a5b7c 100644 --- a/src/collectors/macos.plugin/macos_mach_smi.c +++ b/src/collectors/macos.plugin/macos_mach_smi.c @@ -192,7 +192,6 @@ int do_macos_mach_smi(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); diff --git a/src/collectors/macos.plugin/macos_sysctl.c b/src/collectors/macos.plugin/macos_sysctl.c index 825125365..83e941df4 100644 --- a/src/collectors/macos.plugin/macos_sysctl.c +++ b/src/collectors/macos.plugin/macos_sysctl.c @@ -278,7 +278,6 @@ int do_macos_sysctl(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_STACKED ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "free", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE); rrddim_add(st, "used", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE); @@ -411,7 +410,6 @@ int do_macos_sysctl(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -441,7 +439,6 @@ int do_macos_sysctl(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -564,7 +561,6 @@ int do_macos_sysctl(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -631,7 +627,6 @@ int do_macos_sysctl(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -819,7 +814,6 @@ int do_macos_sysctl(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -849,7 +843,6 @@ int do_macos_sysctl(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -879,7 +872,6 @@ int do_macos_sysctl(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -963,7 +955,6 @@ int do_macos_sysctl(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -994,7 +985,6 @@ int do_macos_sysctl(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1027,7 +1017,6 @@ int do_macos_sysctl(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); diff --git a/src/collectors/macos.plugin/plugin_macos.c b/src/collectors/macos.plugin/plugin_macos.c index 0d651ae69..6f5b892d8 100644 --- a/src/collectors/macos.plugin/plugin_macos.c +++ b/src/collectors/macos.plugin/plugin_macos.c @@ -54,13 +54,12 @@ void *macos_main(void *ptr) worker_register_job_name(i, macos_modules[i].dim); } - usec_t step = localhost->rrd_update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, localhost->rrd_update_every * USEC_PER_SEC); while(service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - usec_t hb_dt = heartbeat_next(&hb, step); + usec_t hb_dt = heartbeat_next(&hb); if (!service_running(SERVICE_COLLECTORS)) break; diff --git a/src/collectors/network-viewer.plugin/network-viewer.c b/src/collectors/network-viewer.plugin/network-viewer.c index 06dde7382..c0ea8af5e 100644 --- a/src/collectors/network-viewer.plugin/network-viewer.c +++ b/src/collectors/network-viewer.plugin/network-viewer.c @@ -23,20 +23,18 @@ static SPAWN_SERVER *spawn_srv = NULL; } aggregated_key; \ } network_viewer; -#include "libnetdata/maps/local-sockets.h" -#include "libnetdata/maps/system-users.h" -#include "libnetdata/maps/system-services.h" +#include "libnetdata/local-sockets/local-sockets.h" +#include "libnetdata/os/system-maps/system-services.h" #define NETWORK_CONNECTIONS_VIEWER_FUNCTION "network-connections" #define NETWORK_CONNECTIONS_VIEWER_HELP "Network connections explorer" #define SIMPLE_HASHTABLE_VALUE_TYPE LOCAL_SOCKET #define SIMPLE_HASHTABLE_NAME _AGGREGATED_SOCKETS -#include "libnetdata/simple_hashtable.h" +#include "libnetdata/simple_hashtable/simple_hashtable.h" netdata_mutex_t stdout_mutex = NETDATA_MUTEX_INITIALIZER; static bool plugin_should_exit = false; -static USERNAMES_CACHE *uc; static SERVICENAMES_CACHE *sc; ENUM_STR_MAP_DEFINE(SOCKET_DIRECTION) = { @@ -80,7 +78,7 @@ struct sockets_stats { } max; }; -static void local_socket_to_json_array(struct sockets_stats *st, LOCAL_SOCKET *n, uint64_t proc_self_net_ns_inode, bool aggregated) { +static void local_socket_to_json_array(struct sockets_stats *st, const LOCAL_SOCKET *n, uint64_t proc_self_net_ns_inode, bool aggregated) { if(n->direction == SOCKET_DIRECTION_NONE) return; @@ -151,12 +149,12 @@ static void local_socket_to_json_array(struct sockets_stats *st, LOCAL_SOCKET *n } else { // buffer_json_add_array_item_uint64(wb, n->uid); - STRING *u = system_usernames_cache_lookup_uid(uc, n->uid); - buffer_json_add_array_item_string(wb, string2str(u)); - string_freez(u); + CACHED_USERNAME cu = cached_username_get_by_uid(n->uid); + buffer_json_add_array_item_string(wb, string2str(cu.username)); + cached_username_release(cu); } - struct socket_endpoint *server_endpoint; + const struct socket_endpoint *server_endpoint; const char *server_address; const char *client_address_space; const char *server_address_space; @@ -240,7 +238,9 @@ static void local_socket_to_json_array(struct sockets_stats *st, LOCAL_SOCKET *n buffer_json_array_close(wb); } -static void populate_aggregated_key(LOCAL_SOCKET *n) { +static void populate_aggregated_key(const LOCAL_SOCKET *nn) { + LOCAL_SOCKET *n = (LOCAL_SOCKET *)nn; + n->network_viewer.count = 1; n->network_viewer.aggregated_key.pid = n->pid; @@ -269,7 +269,7 @@ static void populate_aggregated_key(LOCAL_SOCKET *n) { n->network_viewer.aggregated_key.remote_address_space = local_sockets_address_space(&n->remote); } -static void local_sockets_cb_to_json(LS_STATE *ls, LOCAL_SOCKET *n, void *data) { +static void local_sockets_cb_to_json(LS_STATE *ls, const LOCAL_SOCKET *n, void *data) { struct sockets_stats *st = data; populate_aggregated_key(n); local_socket_to_json_array(st, n, ls->proc_self_net_ns_inode, false); @@ -280,12 +280,12 @@ static void local_sockets_cb_to_json(LS_STATE *ls, LOCAL_SOCKET *n, void *data) #define SUM_THEM_ALL(a, b) (a) += (b) #define OR_THEM_ALL(a, b) (a) |= (b) -static void local_sockets_cb_to_aggregation(LS_STATE *ls __maybe_unused, LOCAL_SOCKET *n, void *data) { +static void local_sockets_cb_to_aggregation(LS_STATE *ls __maybe_unused, const LOCAL_SOCKET *n, void *data) { SIMPLE_HASHTABLE_AGGREGATED_SOCKETS *ht = data; populate_aggregated_key(n); XXH64_hash_t hash = XXH3_64bits(&n->network_viewer.aggregated_key, sizeof(n->network_viewer.aggregated_key)); - SIMPLE_HASHTABLE_SLOT_AGGREGATED_SOCKETS *sl = simple_hashtable_get_slot_AGGREGATED_SOCKETS(ht, hash, n, true); + SIMPLE_HASHTABLE_SLOT_AGGREGATED_SOCKETS *sl = simple_hashtable_get_slot_AGGREGATED_SOCKETS(ht, hash, (LOCAL_SOCKET *)n, true); LOCAL_SOCKET *t = SIMPLE_HASHTABLE_SLOT_DATA(sl); if(t) { t->network_viewer.count++; @@ -464,7 +464,7 @@ void network_viewer_function(const char *transaction, char *function __maybe_unu char function_copy[strlen(function) + 1]; memcpy(function_copy, function, sizeof(function_copy)); char *words[1024]; - size_t num_words = quoted_strings_splitter_pluginsd(function_copy, words, 1024); + size_t num_words = quoted_strings_splitter_whitespace(function_copy, words, 1024); for(size_t i = 1; i < num_words ;i++) { char *param = get_word(words, num_words, i); if(strcmp(param, "sockets:aggregated") == 0) { @@ -511,7 +511,9 @@ void network_viewer_function(const char *transaction, char *function __maybe_unu .max_errors = 10, .max_concurrent_namespaces = 5, }, +#if defined(LOCAL_SOCKETS_USE_SETNS) .spawn_server = spawn_srv, +#endif .stats = { 0 }, .sockets_hashtable = { 0 }, .local_ips_hashtable = { 0 }, @@ -945,7 +947,10 @@ close_and_send: buffer_json_finalize(wb); netdata_mutex_lock(&stdout_mutex); - pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", now_s + 1, wb); + wb->response_code = HTTP_RESP_OK; + wb->content_type = CT_APPLICATION_JSON; + wb->expires = now_s + 1; + pluginsd_function_result_to_stdout(transaction, wb); netdata_mutex_unlock(&stdout_mutex); } @@ -953,20 +958,22 @@ close_and_send: // main int main(int argc __maybe_unused, char **argv __maybe_unused) { - clocks_init(); nd_thread_tag_set("NETWORK-VIEWER"); nd_log_initialize_for_external_plugins("network-viewer.plugin"); netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX"); if(verify_netdata_host_prefix(true) == -1) exit(1); +#if defined(LOCAL_SOCKETS_USE_SETNS) spawn_srv = spawn_server_create(SPAWN_SERVER_OPTION_CALLBACK, "setns", local_sockets_spawn_server_callback, argc, (const char **)argv); if(spawn_srv == NULL) { fprintf(stderr, "Cannot create spawn server.\n"); exit(1); } +#endif - uc = system_usernames_cache_init(); + cached_usernames_init(); + update_cached_host_users(); sc = system_servicenames_cache_init(); // ---------------------------------------------------------------------------------------------------------------- @@ -1008,15 +1015,14 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) { // ---------------------------------------------------------------------------------------------------------------- - usec_t step_ut = 100 * USEC_PER_MS; usec_t send_newline_ut = 0; bool tty = isatty(fileno(stdout)) == 1; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); while(!plugin_should_exit) { - usec_t dt_ut = heartbeat_next(&hb, step_ut); + usec_t dt_ut = heartbeat_next(&hb); send_newline_ut += dt_ut; if(!tty && send_newline_ut > USEC_PER_SEC) { diff --git a/src/collectors/nfacct.plugin/integrations/netfilter.md b/src/collectors/nfacct.plugin/integrations/netfilter.md index b8dcb8520..f8ba4ef74 100644 --- a/src/collectors/nfacct.plugin/integrations/netfilter.md +++ b/src/collectors/nfacct.plugin/integrations/netfilter.md @@ -106,8 +106,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/nfacct.plugin/plugin_nfacct.c b/src/collectors/nfacct.plugin/plugin_nfacct.c index 92c82351a..6225ec4a6 100644 --- a/src/collectors/nfacct.plugin/plugin_nfacct.c +++ b/src/collectors/nfacct.plugin/plugin_nfacct.c @@ -747,7 +747,6 @@ void nfacct_signals() } int main(int argc, char **argv) { - clocks_init(); nd_log_initialize_for_external_plugins("nfacct.plugin"); // ------------------------------------------------------------------------ @@ -832,12 +831,11 @@ int main(int argc, char **argv) { time_t started_t = now_monotonic_sec(); size_t iteration; - usec_t step = netdata_update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, netdata_update_every * USEC_PER_SEC); for(iteration = 0; 1; iteration++) { - usec_t dt = heartbeat_next(&hb, step); + usec_t dt = heartbeat_next(&hb); if(unlikely(netdata_exit)) break; diff --git a/src/collectors/perf.plugin/integrations/cpu_performance.md b/src/collectors/perf.plugin/integrations/cpu_performance.md index c24a14a99..0db211167 100644 --- a/src/collectors/perf.plugin/integrations/cpu_performance.md +++ b/src/collectors/perf.plugin/integrations/cpu_performance.md @@ -97,21 +97,21 @@ There are no alerts configured by default for this integration. #### Install perf plugin -If you are [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed. +If you are [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed. #### Enable the perf plugin The plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software. -To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file. +To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file. ```bash cd /etc/netdata # Replace this path with your Netdata config directory, if different sudo ./edit-config netdata.conf ``` -Change the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system. +Change the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/start-stop-restart.md) for your system. @@ -132,8 +132,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/perf.plugin/metadata.yaml b/src/collectors/perf.plugin/metadata.yaml index 18841d53a..d72be7f5d 100644 --- a/src/collectors/perf.plugin/metadata.yaml +++ b/src/collectors/perf.plugin/metadata.yaml @@ -55,7 +55,7 @@ modules: sudo ./edit-config netdata.conf ``` - Change the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system. + Change the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system. configuration: file: name: "netdata.conf" diff --git a/src/collectors/perf.plugin/perf_plugin.c b/src/collectors/perf.plugin/perf_plugin.c index 8fb4014e4..ccc7016e2 100644 --- a/src/collectors/perf.plugin/perf_plugin.c +++ b/src/collectors/perf.plugin/perf_plugin.c @@ -240,7 +240,7 @@ static struct perf_event { {EV_ID_END, 0, 0, NULL, NULL, 0, 0, 0, NULL, NULL, NULL} }; -static int perf_init() { +static bool perf_init() { int cpu, group; struct perf_event_attr perf_event_attr; struct perf_event *current_event = NULL; @@ -270,6 +270,8 @@ static int perf_init() { memset(&perf_event_attr, 0, sizeof(perf_event_attr)); + int enabled = 0; + for(cpu = 0; cpu < number_of_cpus; cpu++) { for(current_event = &perf_events[0]; current_event->id != EV_ID_END; current_event++) { if(unlikely(current_event->disabled)) continue; @@ -304,6 +306,8 @@ static int perf_init() { } collector_error("Disabling event %u", current_event->id); current_event->disabled = 1; + } else { + enabled++; } *(current_event->fd + cpu) = fd; @@ -313,7 +317,7 @@ static int perf_init() { } } - return 0; + return enabled > 0; } static void perf_free(void) { @@ -1283,7 +1287,6 @@ void parse_command_line(int argc, char **argv) { } int main(int argc, char **argv) { - clocks_init(); nd_log_initialize_for_external_plugins("perf.plugin"); parse_command_line(argc, argv); @@ -1295,8 +1298,16 @@ int main(int argc, char **argv) { else if(freq) collector_error("update frequency %d seconds is too small for PERF. Using %d.", freq, update_every); - if(unlikely(debug)) fprintf(stderr, "perf.plugin: calling perf_init()\n"); - int perf = !perf_init(); + if (unlikely(debug)) + fprintf(stderr, "perf.plugin: calling perf_init()\n"); + + if (!perf_init()) { + perf_free(); + collector_info("all perf counters are disabled"); + fprintf(stdout, "EXIT\n"); + fflush(stdout); + exit(1); + } // ------------------------------------------------------------------------ // the main loop @@ -1306,27 +1317,30 @@ int main(int argc, char **argv) { time_t started_t = now_monotonic_sec(); size_t iteration; - usec_t step = update_every * USEC_PER_SEC; + + int perf = 1; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, update_every * USEC_PER_SEC); for(iteration = 0; 1; iteration++) { - usec_t dt = heartbeat_next(&hb, step); + usec_t dt = heartbeat_next(&hb); - if(unlikely(netdata_exit)) break; + if (unlikely(netdata_exit)) + break; - if(unlikely(debug && iteration)) - fprintf(stderr, "perf.plugin: iteration %zu, dt %"PRIu64" usec\n" - , iteration - , dt - ); + if (unlikely(debug && iteration)) + fprintf(stderr, "perf.plugin: iteration %zu, dt %" PRIu64 " usec\n", iteration, dt); if(likely(perf)) { - if(unlikely(debug)) fprintf(stderr, "perf.plugin: calling perf_collect()\n"); + if (unlikely(debug)) + fprintf(stderr, "perf.plugin: calling perf_collect()\n"); + perf = !perf_collect(); if(likely(perf)) { - if(unlikely(debug)) fprintf(stderr, "perf.plugin: calling perf_send_metrics()\n"); + if (unlikely(debug)) + fprintf(stderr, "perf.plugin: calling perf_send_metrics()\n"); + perf_send_metrics(); } } @@ -1334,7 +1348,8 @@ int main(int argc, char **argv) { fflush(stdout); // restart check (14400 seconds) - if(now_monotonic_sec() - started_t > 14400) break; + if (now_monotonic_sec() - started_t > 14400) + break; } collector_info("process exiting"); diff --git a/src/collectors/plugins.d/README.md b/src/collectors/plugins.d/README.md deleted file mode 100644 index 6b53dbed6..000000000 --- a/src/collectors/plugins.d/README.md +++ /dev/null @@ -1,875 +0,0 @@ - - -# External plugins - -`plugins.d` is the Netdata internal plugin that collects metrics -from external processes, thus allowing Netdata to use **external plugins**. - -## Provided External Plugins - -| plugin | language | O/S | description | -|:------------------------------------------------------------------------------------------------------:|:--------:|:--------------:|:----------------------------------------------------------------------------------------------------------------------------------------| -| [apps.plugin](/src/collectors/apps.plugin/README.md) | `C` | linux, freebsd | monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**. | -| [charts.d.plugin](/src/collectors/charts.d.plugin/README.md) | `BASH` | all | a **plugin orchestrator** for data collection modules written in `BASH` v4+. | -| [cups.plugin](/src/collectors/cups.plugin/README.md) | `C` | all | monitors **CUPS** | -| [ebpf.plugin](/src/collectors/ebpf.plugin/README.md) | `C` | linux | monitors different metrics on environments using kernel internal functions. | -| [go.d.plugin](/src/go/plugin/go.d/README.md) | `GO` | all | collects metrics from the system, applications, or third-party APIs. | -| [ioping.plugin](/src/collectors/ioping.plugin/README.md) | `C` | all | measures disk latency. | -| [freeipmi.plugin](/src/collectors/freeipmi.plugin/README.md) | `C` | linux | collects metrics from enterprise hardware sensors, on Linux servers. | -| [nfacct.plugin](/src/collectors/nfacct.plugin/README.md) | `C` | linux | collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`. | -| [xenstat.plugin](/src/collectors/xenstat.plugin/README.md) | `C` | linux | collects XenServer and XCP-ng metrics using `lxenstat`. | -| [perf.plugin](/src/collectors/perf.plugin/README.md) | `C` | linux | collects CPU performance metrics using performance monitoring units (PMU). | -| [python.d.plugin](/src/collectors/python.d.plugin/README.md) | `python` | all | a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported). | -| [slabinfo.plugin](/src/collectors/slabinfo.plugin/README.md) | `C` | linux | collects kernel internal cache objects (SLAB) metrics. | - -Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native Netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator). -Each of these modular plugins has each own methods for defining modules. Please check the examples and their documentation. - -## Motivation - -This plugin allows Netdata to use **external plugins** for data collection: - -1. external data collection plugins may be written in any computer language. - -2. external data collection plugins may use O/S capabilities or `setuid` to - run with escalated privileges (compared to the `netdata` daemon). - The communication between the external plugin and Netdata is unidirectional - (from the plugin to Netdata), so that Netdata cannot manipulate an external - plugin running with escalated privileges. - -## Operation - -Each of the external plugins is expected to run forever. -Netdata will start it when it starts and stop it when it exits. - -If the external plugin exits or crashes, Netdata will log an error. -If the external plugin exits or crashes without pushing metrics to Netdata, Netdata will not start it again. - -- Plugins that exit with any value other than zero, will be disabled. Plugins that exit with zero, will be restarted after some time. -- Plugins may also be disabled by Netdata if they output things that Netdata does not understand. - -The `stdout` of external plugins is connected to Netdata to receive metrics, -with the API defined below. - -The `stderr` of external plugins is connected to Netdata's `error.log`. - -Plugins can create any number of charts with any number of dimensions each. Each chart can have its own characteristics independently of the others generated by the same plugin. For example, one chart may have an update frequency of 1 second, another may have 5 seconds and a third may have 10 seconds. - -## Configuration - -Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for Netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs. - -The `netdata.conf` section `[plugins]` section contains a list of all the plugins found at the system where Netdata runs, with a boolean setting to enable them or not. - -Example: - -``` -[plugins] - # enable running new plugins = yes - # check for new plugins every = 60 - - # charts.d = yes - # ioping = yes - # python.d = yes -``` - -The setting `enable running new plugins` sets the default behavior for all external plugins. It can be -overridden for distinct plugins by modifying the appropriate plugin value configuration to either `yes` or `no`. - -The setting `check for new plugins every` sets the interval between scans of the directory -`/usr/libexec/netdata/plugins.d`. New plugins can be added any time, and Netdata will detect them in a timely manner. - -For each of the external plugins enabled, another `netdata.conf` section -is created, in the form of `[plugin:NAME]`, where `NAME` is the name of the external plugin. -This section allows controlling the update frequency of the plugin and provide -additional command line arguments to it. - -For example, for `apps.plugin` the following section is available: - -``` -[plugin:apps] - # update every = 1 - # command options = -``` - -- `update every` controls the granularity of the external plugin. -- `command options` allows giving additional command line options to the plugin. - -Netdata will provide to the external plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources. - -Netdata will call the plugin with just one command line parameter: the number of seconds the user requested this plugin to update its data (by default is also 1). - -Other than the above, the plugin configuration is up to the plugin. - -Keep in mind, that the user may use Netdata configuration to overwrite chart and dimension parameters. This is transparent to the plugin. - -### Autoconfiguration - -Plugins should attempt to autoconfigure themselves when possible. - -For example, if your plugin wants to monitor `squid`, you can search for it on port `3128` or `8080`. If any succeeds, you can proceed. If it fails you can output an error (on stderr) saying that you cannot find `squid` running and giving instructions about the plugin configuration. Then you can stop (exit with non-zero value), so that Netdata will not attempt to start the plugin again. - -## External Plugins API - -Any program that can print a few values to its standard output can become a Netdata external plugin. - -Netdata parses lines starting with: - -- `CHART` - create or update a chart -- `DIMENSION` - add or update a dimension to the chart just created -- `VARIABLE` - define a variable (to be used in health calculations) -- `CLABEL` - add a label to a chart -- `CLABEL_COMMIT` - commit added labels to the chart -- `FUNCTION` - define a function that can be called later to execute it -- `BEGIN` - initialize data collection for a chart -- `SET` - set the value of a dimension for the initialized chart -- `END` - complete data collection for the initialized chart -- `FLUSH` - ignore the last collected values -- `DISABLE` - disable this plugin -- `FUNCTION` - define functions -- `FUNCTION_PROGRESS` - report the progress of a function execution -- `FUNCTION_RESULT_BEGIN` - to initiate the transmission of function results -- `FUNCTION_RESULT_END` - to end the transmission of function result -- `CONFIG` - to define dynamic configuration entities - -a single program can produce any number of charts with any number of dimensions each. - -Charts can be added any time (not just the beginning). - -Netdata may send the following commands to the plugin's `stdin`: - -- `FUNCTION` - to call a specific function, with all parameters inline -- `FUNCTION_PAYLOAD` - to call a specific function, with a payload of parameters -- `FUNCTION_PAYLOAD_END` - to end the payload of parameters -- `FUNCTION_CANCEL` - to cancel a running function transaction - no response is required -- `FUNCTION_PROGRESS` - to report that a user asked the progress of running function call - no response is required - -### Command line parameters - -The plugin **MUST** accept just **one** parameter: **the number of seconds it is -expected to update the values for its charts**. The value passed by Netdata -to the plugin is controlled via its configuration file (so there is no need -for the plugin to handle this configuration option). - -The external plugin can overwrite the update frequency. For example, the server may -request per second updates, but the plugin may ignore it and update its charts -every 5 seconds. - -### Environment variables - -There are a few environment variables that are set by `netdata` and are -available for the plugin to use. - -| variable | description | -|:--------------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `NETDATA_USER_CONFIG_DIR` | The directory where all Netdata-related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`). | -| `NETDATA_STOCK_CONFIG_DIR` | The directory where all Netdata -related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`). | -| `NETDATA_PLUGINS_DIR` | The directory where all Netdata plugins are stored. | -| `NETDATA_USER_PLUGINS_DIRS` | The list of directories where custom plugins are stored. | -| `NETDATA_WEB_DIR` | The directory where the web files of Netdata are saved. | -| `NETDATA_CACHE_DIR` | The directory where the cache files of Netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory. | -| `NETDATA_LOG_DIR` | The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of Netdata. | -| `NETDATA_HOST_PREFIX` | This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path. | -| `NETDATA_DEBUG_FLAGS` | This is a number (probably in hex starting with `0x`), that enables certain Netdata debugging features. Check **\[[Tracing Options]]** for more information. | -| `NETDATA_UPDATE_EVERY` | The minimum number of seconds between chart refreshes. This is like the **internal clock** of Netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds. | -| `NETDATA_INVOCATION_ID` | A random UUID in compact form, representing the unique invocation identifier of Netdata. When running under systemd, Netdata uses the `INVOCATION_ID` set by systemd. | -| `NETDATA_LOG_METHOD` | One of `syslog`, `journal`, `stderr` or `none`, indicating the preferred log method of external plugins. | -| `NETDATA_LOG_FORMAT` | One of `journal`, `logfmt` or `json`, indicating the format of the logs. Plugins can use the Netdata `systemd-cat-native` command to log always in `journal` format, and have it automatically converted to the format expected by netdata. | -| `NETDATA_LOG_LEVEL` | One of `emergency`, `alert`, `critical`, `error`, `warning`, `notice`, `info`, `debug`. Plugins are expected to log events with the given priority and the more important ones. | -| `NETDATA_SYSLOG_FACILITY` | Set only when the `NETDATA_LOG_METHOD` is `syslog`. Possible values are `auth`, `authpriv`, `cron`, `daemon`, `ftp`, `kern`, `lpr`, `mail`, `news`, `syslog`, `user`, `uucp` and `local0` to `local7` | -| `NETDATA_ERRORS_THROTTLE_PERIOD` | The log throttling period in seconds. | -| `NETDATA_ERRORS_PER_PERIOD` | The allowed number of log events per period. | -| `NETDATA_SYSTEMD_JOURNAL_PATH` | When `NETDATA_LOG_METHOD` is set to `journal`, this is the systemd-journald socket path to use. | - -### The output of the plugin - -The plugin should output instructions for Netdata to its output (`stdout`). Since this uses pipes, please make sure you flush stdout after every iteration. - -#### DISABLE - -`DISABLE` will disable this plugin. This will prevent Netdata from restarting the plugin. You can also exit with the value `1` to have the same effect. - -#### HOST_DEFINE - -`HOST_DEFINE` defines a new (or updates an existing) virtual host. - -The template is: - -> HOST_DEFINE machine_guid hostname - -where: - -- `machine_guid` - - uniquely identifies the host, this is what will be needed to add charts to the host. - -- `hostname` - - is the hostname of the virtual host - -#### HOST_LABEL - -`HOST_LABEL` adds a key-value pair to the virtual host labels. It has to be given between `HOST_DEFINE` and `HOST_DEFINE_END`. - -The template is: - -> HOST_LABEL key value - -where: - -- `key` - - uniquely identifies the key of the label - -- `value` - - is the value associated with this key - -There are a few special keys that are used to define the system information of the monitored system: - -- `_cloud_provider_type` -- `_cloud_instance_type` -- `_cloud_instance_region` -- `_os_name` -- `_os_version` -- `_kernel_version` -- `_system_cores` -- `_system_cpu_freq` -- `_system_ram_total` -- `_system_disk_space` -- `_architecture` -- `_virtualization` -- `_container` -- `_container_detection` -- `_virt_detection` -- `_is_k8s_node` -- `_install_type` -- `_prebuilt_arch` -- `_prebuilt_dist` - -#### HOST_DEFINE_END - -`HOST_DEFINE_END` commits the host information, creating a new host entity, or updating an existing one with the same `machine_guid`. - -#### HOST - -`HOST` switches data collection between hosts. - -The template is: - -> HOST machine_guid - -where: - -- `machine_guid` - - is the UUID of the host to switch to. After this command, every other command following it is assumed to be associated with this host. - Setting machine_guid to `localhost` switches data collection to the local host. - -#### CHART - -`CHART` defines a new chart. - -the template is: - -> CHART type.id name title units \[family \[context \[charttype \[priority \[update_every \[options \[plugin [module]]]]]]]] - - where: - -- `type.id` - - uniquely identifies the chart, - this is what will be needed to add values to the chart - - the `type` part controls the menu the charts will appear in - -- `name` - - is the name that will be presented to the user instead of `id` in `type.id`. This means that only the `id` part of - `type.id` is changed. When a name has been given, the chart is indexed (and can be referred) as both `type.id` and - `type.name`. You can set name to `''`, or `null`, or `(null)` to disable it. If a chart with the same name already - exists, a serial number is automatically attached to the name to avoid naming collisions. - -- `title` - - the text above the chart - -- `units` - - the label of the vertical axis of the chart, - all dimensions added to a chart should have the same units - of measurement - -- `family` - - is used to group charts together - (for example all eth0 charts should say: eth0), - if empty or missing, the `id` part of `type.id` will be used - - this controls the sub-menu on the dashboard - -- `context` - - the context is giving the template of the chart. For example, if multiple charts present the same information for a different family, they should have the same `context` - - this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alerts to it - -- `charttype` - - one of `line`, `area` or `stacked`, - if empty or missing, the `line` will be used - -- `priority` - - is the relative priority of the charts as rendered on the web page, - lower numbers make the charts appear before the ones with higher numbers, - if empty or missing, `1000` will be used - -- `update_every` - - overwrite the update frequency set by the server, - if empty or missing, the user configured value will be used - -- `options` - - a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (Netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make Netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to external databases). `CHART` options have been added in Netdata v1.7 and the `hidden` option was added in 1.10. - -- `plugin` and `module` - - both are just names that are used to let the user identify the plugin and the module that generated the chart. If `plugin` is unset or empty, Netdata will automatically set the filename of the plugin that generated the chart. `module` has not default. - -#### DIMENSION - -`DIMENSION` defines a new dimension for the chart - -the template is: - -> DIMENSION id \[name \[algorithm \[multiplier \[divisor [options]]]]] - - where: - -- `id` - - the `id` of this dimension (it is a text value, not numeric), - this will be needed later to add values to the dimension - - We suggest to avoid using `.` in dimension ids. External databases expect metrics to be `.` separated and people will get confused if a dimension id contains a dot. - -- `name` - - the name of the dimension as it will appear at the legend of the chart, - if empty or missing the `id` will be used - -- `algorithm` - - one of: - - - `absolute` - - the value is to drawn as-is (interpolated to second boundary), - if `algorithm` is empty, invalid or missing, `absolute` is used - - - `incremental` - - the value increases over time, - the difference from the last value is presented in the chart, - the server interpolates the value and calculates a per second figure - - - `percentage-of-absolute-row` - - the % of this value compared to the total of all dimensions - - - `percentage-of-incremental-row` - - the % of this value compared to the incremental total of - all dimensions - -- `multiplier` - - an integer value to multiply the collected value, - if empty or missing, `1` is used - -- `divisor` - - an integer value to divide the collected value, - if empty or missing, `1` is used - -- `options` - - a space separated list of options, enclosed in quotes. Options supported: `obsolete` to mark a dimension as obsolete (Netdata will delete it after some time) and `hidden` to make this dimension hidden, it will take part in the calculations but will not be presented in the chart. - -#### VARIABLE - -> VARIABLE [SCOPE] name = value - -`VARIABLE` defines a variable that can be used in alerts. This is to used for setting constants (like the max connections a server may accept). - -Variables support 2 scopes: - -- `GLOBAL` or `HOST` to define the variable at the host level. -- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. Netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alert templates. - -The position of the `VARIABLE` line, sets its default scope (in case you do not specify a scope). So, defining a `VARIABLE` before any `CHART`, or between `END` and `BEGIN` (outside any chart), sets `GLOBAL` scope, while defining a `VARIABLE` just after a `CHART` or a `DIMENSION`, or within the `BEGIN` - `END` block of a chart, sets `LOCAL` scope. - -These variables can be set and updated at any point. - -Variable names should use alphanumeric characters, the `.` and the `_`. - -The `value` is floating point (Netdata used `long double`). - -Variables are transferred to upstream Netdata servers (streaming and database replication). - -#### CLABEL - -> CLABEL name value source - -`CLABEL` defines a label used to organize and identify a chart. - -Name and value accept characters according to the following table: - -| Character | Symbol | Label Name | Label Value | -|---------------------|:------:|:----------:|:-----------:| -| UTF-8 character | UTF-8 | _ | keep | -| Lower case letter | [a-z] | keep | keep | -| Upper case letter | [A-Z] | keep | [a-z] | -| Digit | [0-9] | keep | keep | -| Underscore | _ | keep | keep | -| Minus | - | keep | keep | -| Plus | + | _ | keep | -| Colon | : | _ | keep | -| Semicolon | ; | _ | : | -| Equal | = | _ | : | -| Period | . | keep | keep | -| Comma | , | . | . | -| Slash | / | keep | keep | -| Backslash | \ | / | / | -| At | @ | _ | keep | -| Space | ' ' | _ | keep | -| Opening parenthesis | ( | _ | keep | -| Closing parenthesis | ) | _ | keep | -| Anything else | | _ | _ | - -The `source` is an integer field that can have the following values: -- `1`: The value was set automatically. -- `2`: The value was set manually. -- `4`: This is a K8 label. -- `8`: This is a label defined using `netdata` agent cloud link. - -#### CLABEL_COMMIT - -`CLABEL_COMMIT` indicates that all labels were defined and the chart can be updated. - -#### FUNCTION - -The plugin can register functions to Netdata, like this: - -> FUNCTION [GLOBAL] "name and parameters of the function" timeout "help string for users" "tags" "access" - -- Tags currently recognized are either `top` or `logs` (or both, space separated). -- Access is one of `any`, `member`, or `admin`: - - `any` to offer the function to all users of Netdata, even if they are not authenticated. - - `member` to offer the function to all authenticated members of Netdata. - - `admin` to offer the function only to authenticated administrators. - -Users can use a function to ask for more information from the collector. Netdata maintains a registry of functions in 2 levels: - -- per node -- per chart - -Both node and chart functions are exactly the same, but chart functions allow Netdata to relate functions with charts and therefore present a context-sensitive menu of functions related to the chart the user is using. - -Users can get a list of all the registered functions using the `/api/v1/functions` endpoint of Netdata and call functions using the `/api/v1/function` API call of Netdata. - -Once a function is called, the plugin will receive at its standard input a command that looks like this: - -``` -FUNCTION transaction_id timeout "name and parameters of the function as one quoted parameter" "user permissions value" "source of request" -``` - -When the function to be called is to receive a payload of parameters, the call looks like this: - -``` -FUNCTION_PAYLOAD transaction_id timeout "name and parameters of the function as one quoted parameter" "user permissions value" "source of request" "content/type" -body of the payload, formatted according to content/type -FUNCTION PAYLOAD END -``` - -In this case, Netdata will send: - -- A line starting with `FUNCTION_PAYLOAD` together with the required metadata for the function, like the transaction id, the function name and its parameters, the timeout and the content type. This line ends with a newline. -- Then, the payload itself (which may or may not have newlines in it). The payload should be parsed according to the content type parameter. -- Finally, a line starting with `FUNCTION_PAYLOAD_END`, so it is expected like `\nFUNCTION_PAYLOAD_END\n`. - -Note 1: The plugins.d protocol allows parameters without single or double quotes if they don't contain spaces. However, the plugin should be able to parse parameters even if they are enclosed in single or double quotes. If the first character of a parameter is a single quote, its last character should also be a single quote too, and similarly for double quotes. - -Note 2: Netdata always sends the function and its parameters enclosed in double quotes. If the function command and its parameters contain quotes, they are converted to single quotes. - -The plugin is expected to parse and validate `name and parameters of the function as one quotes parameter`. Netdata allows the user interface to manipulate this string by appending more parameters. - -If the plugin rejects the request, it should respond with this: - -``` -FUNCTION_RESULT_BEGIN transaction_id 400 application/json -{ - "status": 400, - "error_message": "description of the rejection reasons" -} -FUNCTION_RESULT_END -``` - -If the plugin prepares a response, it should send (via its standard output, together with the collected data, but not interleaved with them): - -``` -FUNCTION_RESULT_BEGIN transaction_id http_response_code content_type expiration -``` - -Where: - - - `transaction_id` is the transaction id that Netdata sent for this function execution - - `http_response_code` is the http error code Netdata should respond with, 200 is the "ok" response - - `content_type` is the content type of the response - - `expiration` is the absolute timestamp (number, unix epoch) this response expires - -Immediately after this, all text is assumed to be the response content. -The content is text and line oriented. The maximum line length accepted is 15kb. Longer lines will be truncated. -The type of the context itself depends on the plugin and the UI. - -To terminate the message, Netdata seeks a line with just this: - -``` -FUNCTION_RESULT_END -``` - -This defines the end of the message. `FUNCTION_RESULT_END` should appear in a line alone, without any other text, so it is wise to add `\n` before and after it. - -After this line, Netdata resumes processing collected metrics from the plugin. - -The maximum uncompressed payload size Netdata will accept is 100MB. - -##### Functions cancellation - -Netdata is able to detect when a user made an API request, but abandoned it before it was completed. If this happens to an API called for a function served by the plugin, Netdata will generate a `FUNCTION_CANCEL` request to let the plugin know that it can stop processing the query. - -After receiving such a command, the plugin **must still send a response for the original function request**, to wake up any waiting threads before they timeout. The http response code is not important, since the response will be discarded, however for auditing reasons we suggest to send back a 499 http response code. This is not a standard response code according to the HTTP protocol, but web servers like `nginx` are using it to indicate that a request was abandoned by a user. - -##### Functions progress - -When a request takes too long to be processed, Netdata allows the plugin to report progress to Netdata, which in turn will report progress to the caller. - -The plugin can send `FUNCTION_PROGRESS` like this: - -``` -FUNCTION_PROGRESS transaction_id done all -``` - -Where: - -- `transaction_id` is the transaction id of the function request -- `done` is an integer value indicating the amount of work done -- `all` is an integer value indicating the total amount of work to be done - -Netdata supports two kinds of progress: -- progress as a percentage, which is calculated as `done * 100 / all` -- progress without knowing the total amount of work to be done, which is enabled when the plugin reports `all` as zero. - -##### Functions timeout - -All functions calls specify a timeout, at which all the intermediate routing nodes (parents, web server threads) will time out and abort the call. - -However, all intermediate routing nodes are configured to extend the timeout when the caller asks for progress. This works like this: - -When a progress request is received, if the expected timeout of the request is less than or equal to 10 seconds, the expected timeout is extended by 10 seconds. - -Usually, the user interface asks for a progress every second. So, during the last 10 seconds of the timeout, every progress request made shifts the timeout 10 seconds to the future. - -To accomplish this, when Netdata receives a progress request by a user, it generates progress requests to the plugin, updating all the intermediate nodes to extend their timeout if necessary. - -The plugin will receive progress requests like this: - -``` -FUNCTION_PROGRESS transaction_id -``` - -There is no need to respond to this command. It is only there to let the plugin know that a user is still waiting for the query to finish. - -#### CONFIG - -`CONFIG` commands sent from the plugin to Netdata define dynamic configuration entities. These configurable entities are exposed to the user interface, allowing users to change configuration at runtime. - -Dynamically configurations made this way are saved to disk by Netdata and are replayed automatically when Netdata or the plugin restarts. - -`CONFIG` commands look like this: - -``` -CONFIG id action ... -``` - -Where: - -- `id` is a unique identifier for the configurable entity. This should by design be unique across Netdata. It should be something like `plugin:module:jobs`, e.g. `go.d:postgresql:jobs:masterdb`. This is assumed to be colon-separated with the last part (`masterdb` in our example), being the one displayed to users when there ano conflicts under the same configuration path. -- `action` can be: - - `create`, to declare the dynamic configuration entity - - `delete`, to delete the dynamic configuration entity - this does not delete user configuration, we if an entity with the same id is created in the future, the saved configuration will be given to it. - - `status`, to update the dynamic configuration entity status - -> IMPORTANT:
-> The plugin should blindly create, delete and update the status of its dynamic configuration entities, without any special logic applied to it. Netdata needs to be updated of what is actually happening at the plugin. Keep in mind that creating dynamic configuration entities triggers responses from Netdata, depending on its type and status. Re-creating a job, triggers the same responses every time, so make sure you create jobs only when you add jobs. - -When the `action` is `create`, the following additional parameters are expected: - -``` -CONFIG id action status type "path" source_type "source" "supported commands" "view permissions" "edit permissions" -``` - -Where: - -- `action` should be `create` -- `status` can be: - - `accepted`, the plugin accepted the configuration, but it is not running yet. - - `running`, the plugin accepted and runs the configuration. - - `failed`, the plugin tries to run the configuration but it fails. - - `incomplete`, the plugin needs additional settings to run this configuration. This is usually used for the cases the plugin discovered a job, but important information is missing for it to work. - - `disabled`, the configuration has been disabled by a user. - - `orphan`, the configuration is not claimed by any plugin. This is used internally by Netdata to mark the configuration nodes available, for which there is no plugin related to them. Do not use in plugins directly. -- `type` can be `single`, `template` or `job`: - - `single` is used when the configurable entity is fixed and users should never be able to add or delete it. - - `template` is used to define a template based on which users can add multiple configurations, like adding data collection jobs. So, the plugin defines the template of the jobs and users are presented with a `[+]` button to add such configuration jobs. The plugin can define multiple templates by giving different `id`s to them. - - `job` is used to define a job of a template. The plugin should always add all its jobs, independently of the way they have been discovered. It is important to note the relation between `template` and `job` when it comes it the `id`: The `id` of the template should be the prefix of the `job`'s `id`. For example, if the template is `go.d:postgresql:jobs`, then all its jobs be like `go.d:postgresql:jobs:jobname`. -- `path` is the absolute path of the configurable entity inside the tree of Netdata configurations. Usually, this is should be `/collectors`. -- `source` can be `internal`, `stock`, `user`, `discovered` or `dyncfg`: - - `internal` is used for configurations that are based on internal code settings - - `stock` is used for default configurations - - `discovered` is used for dynamic configurations the plugin discovers by its own - - `user` is used for user configurations, usually via a configuration file - - `dyncfg` is used for configuration received via this dynamic configuration mechanism -- `source` should provide more details about the exact source of the configuration, like `line@file`, or `user@ip`, etc. -- `supported_commands` is a space separated list of the following keywords, enclosed in single or double quotes. These commands are used by the user interface to determine the actions the users can take: - - `schema`, to expose the JSON schema for the user interface. This is mandatory for all configurable entities. When `schema` requests are received, Netdata will first attempt to load the schema from `/etc/netdata/schema.d/` and `/var/lib/netdata/conf.d/schema.d`. For jobs, it will serve the schema of their template. If no schema is found for the required `id`, the `schema` request will be forwarded to the plugin, which is expected to send back the relevant schema. - - `get`, to expose the current configuration values, according the schema defined. `templates` cannot support `get`, since they don't maintain any data. - - `update`, to receive configuration updates for this entity. `templates` cannot support `update`, since they don't maintain any data. - - `test`, like `update` but only test the configuration and report success or failure. - - `add`, to receive job creation commands for templates. Only `templates` should support this command. - - `remove`, to remove a configuration. Only `jobs` should support this command. - - `enable` and `disable`, to receive user requests to enable and disable this entity. Adding only one of `enable` or `disable` to the supported commands, Netdata will add both of them. The plugin should expose these commands on `templates` only when it wants to receive `enable` and `disable` commands for all the `jobs` of this `template`. - - `restart`, to restart a job. -- `view permissions` and `edit permissions` are bitmaps of the Netdata permission system to control access to the configuration. If set to zero, Netdata will require a signed in user with view and edit permissions to the Netdata's configuration system. - -The plugin receives commands as if it had exposed a `FUNCTION` named `config`. Netdata formats all these calls like this: - -``` -config id command -``` - -Where `id` is the unique id of the configurable entity and `command` is one of the supported commands the plugin sent to Netdata. - -The plugin will receive (for commands: `schema`, `get`, `remove`, `enable`, `disable` and `restart`): - -``` -FUNCTION transaction_id timeout "config id command" "user permissions value" "source string" -``` - -or (for commands: `update`, `add` and `test`): - -``` -FUNCTION_PAYLOAD transaction_id timeout "config id command" "user permissions value" "source string" "content/type" -body of the payload formatted according to content/type -FUNCTION_PAYLOAD_END -``` - -Once received, the plugin should process it and respond accordingly. - -Immediately after the plugin adds a configuration entity, if the commands `enable` and `disable` are supported by it, Netdata will send either `enable` or `disable` for it, based on the last user action, which has been persisted to disk. - -Plugin responses follow the same format `FUNCTIONS` do: - -``` -FUNCTION_RESULT_BEGIN transaction_id http_response_code content/type expiration -body of the response formatted according to content/type -FUNCTION_RESULT_END -``` - -Successful responses (HTTP response code 200) to `schema` and `get` should send back the relevant JSON object. -All other responses should have the following response body: - -```json -{ - "status" : 404, - "message" : "some text" -} -``` - -The user interface presents the message to users, even when the response is successful (HTTP code 200). - -When responding to additions and updates, Netdata uses the following success response codes to derive additional information: - -- `200`, responding with 200, means the configuration has been accepted and it is running. -- `202`, responding with 202, means the configuration has been accepted but it is not yet running. A subsequent `status` action will update it. -- `298`, responding with 298, means the configuration has been accepted but it is disabled for some reason (probably because it matches nothing or the contents are not useful - use the `message` to provide additional information). -- `299`, responding with 299, means the configuration has been accepted but a restart is required to apply it. - -## Data collection - -data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines - -> BEGIN type.id [microseconds] - -- `type.id` - - is the unique identification of the chart (as given in `CHART`) - -- `microseconds` - - is the number of microseconds since the last update of the chart. It is optional. - - Under heavy system load, the system may have some latency transferring - data from the plugins to Netdata via the pipe. This number improves - accuracy significantly, since the plugin is able to calculate the - duration between its iterations better than Netdata. - - The first time the plugin is started, no microseconds should be given - to Netdata. - -> SET id = value - -- `id` - - is the unique identification of the dimension (of the chart just began) - -- `value` - - is the collected value, only integer values are collected. If you want to push fractional values, multiply this value by 100 or 1000 and set the `DIMENSION` divider to 1000. - -> END - - END does not take any parameters, it commits the collected values for all dimensions to the chart. If a dimensions was not `SET`, its value will be empty for this commit. - -More `SET` lines may appear to update all the dimensions of the chart. -All of them in one `BEGIN` -> `END` block. - -All `SET` lines within a single `BEGIN` -> `END` block have to refer to the -same chart. - -If more charts need to be updated, each chart should have its own -`BEGIN` -> `SET` -> `END` block. - -If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it, -it can issue a `FLUSH`. The `FLUSH` command will instruct Netdata to ignore -all the values collected since the last `BEGIN` command. - -If a plugin does not behave properly (outputs invalid lines, or does not -follow these guidelines), will be disabled by Netdata. - -### collected values - -Netdata will collect any **signed** value in the 64bit range: -`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807` - -If a value is not collected, leave it empty, like this: - -`SET id =` - -or do not output the line at all. - -## Modular Plugins - -1. **python**, use `python.d.plugin`, there are many examples in the [python.d - directory](/src/collectors/python.d.plugin/README.md) - - python is ideal for Netdata plugins. It is a simple, yet powerful way to collect data, it has a very small memory footprint, although it is not the most CPU efficient way to do it. - -2. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d - directory](/src/collectors/charts.d.plugin/README.md) - - BASH is the simplest scripting language for collecting values. It is the less efficient though in terms of CPU resources. You can use it to collect data quickly, but extensive use of it might use a lot of system resources. - -3. **C** - - Of course, C is the most efficient way of collecting data. This is why Netdata itself is written in C. - -## Writing Plugins Properly - -There are a few rules for writing plugins properly: - -1. Respect system resources - - Pay special attention to efficiency: - - - Initialize everything once, at the beginning. Initialization is not an expensive operation. Your plugin will most probably be started once and run forever. So, do whatever heavy operation is needed at the beginning, just once. - - Do the absolutely minimum while iterating to collect values repeatedly. - - If you need to connect to another server to collect values, avoid re-connects if possible. Connect just once, with keep-alive (for HTTP) enabled and collect values using the same connection. - - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation while iterating to collect values. - - Avoid running external commands when possible. If you are writing shell scripts avoid especially pipes (each pipe is another fork, a very expensive operation). - -2. The best way to iterate at a constant pace is this pseudo code: - -```js - var update_every = argv[1] * 1000; /* seconds * 1000 = milliseconds */ - - readConfiguration(); - - if(!verifyWeCanCollectValues()) { - print("DISABLE"); - exit(1); - } - - createCharts(); /* print CHART and DIMENSION statements */ - - var loops = 0; - var last_run = 0; - var next_run = 0; - var dt_since_last_run = 0; - var now = 0; - - while(true) { - /* find the current time in milliseconds */ - now = currentTimeStampInMilliseconds(); - - /* - * find the time of the next loop - * this makes sure we are always aligned - * with the Netdata daemon - */ - next_run = now - (now % update_every) + update_every; - - /* - * wait until it is time - * it is important to do it in a loop - * since many wait functions can be interrupted - */ - while( now < next_run ) { - sleepMilliseconds(next_run - now); - now = currentTimeStampInMilliseconds(); - } - - /* calculate the time passed since the last run */ - if ( loops > 0 ) - dt_since_last_run = (now - last_run) * 1000; /* in microseconds */ - - /* prepare for the next loop */ - last_run = now; - loops++; - - /* do your magic here to collect values */ - collectValues(); - - /* send the collected data to Netdata */ - printValues(dt_since_last_run); /* print BEGIN, SET, END statements */ - } -``` - - Using the above procedure, your plugin will be synchronized to start data collection on steps of `update_every`. There will be no need to keep track of latencies in data collection. - - Netdata interpolates values to second boundaries, so even if your plugin is not perfectly aligned it does not matter. Netdata will find out. When your plugin works in increments of `update_every`, there will be no gaps in the charts due to the possible cumulative micro-delays in data collection. Gaps will only appear if the data collection is really delayed. - -3. If you are not sure of memory leaks, exit every one hour. Netdata will re-start your process. - -4. If possible, try to autodetect if your plugin should be enabled, without any configuration. - - diff --git a/src/collectors/plugins.d/functions-table.md b/src/collectors/plugins.d/functions-table.md deleted file mode 100644 index f3a8bcf36..000000000 --- a/src/collectors/plugins.d/functions-table.md +++ /dev/null @@ -1,418 +0,0 @@ - -> This document is a work in progress. - -Plugin functions can support any kind of responses. However, the UI of Netdata has defined some structures as responses it can parse, understand and visualize. - -One of these responses is the `table`. This is used in almost all functions implemented today. - -# Functions Tables - -Tables are defined when `"type": "table"` is set. The following is the standard header that should be available on all `table` responses: - -```json -{ - "type": "table", - "status": 200, - "update_every": 1, - "help": "help text", - "hostname": "the hostname of the server sending this response, to appear at the title of the UI.", - "expires": "UNIX epoch timestamp that the response expires", - "has_history": "boolean: true when the datetime picker plays a role in the result set", - // rest of the response -} -``` - -## Preflight `info` request - -The UI, before making the first call to a function, it does a preflight request to understand what the function supports. The plugin receives this request as a FUNCTION call specifying the `info` parameter (possible among others). - -The response from the plugin is expected to have the following: - -```json -{ - // standard table header - as above - "accepted_params": [ "a", "b", "c", ...], - "required_params": [ - { - "id": "the keyword to use when sending / receiving this parameter", - "name": "the name to present to users for this parameter", - "help": "a help string to help users understand this parameter", - "type": "the type of the parameter, either: 'select' or 'multiselect'", - "options": [ - { - "id": "the keyword to use when sending / receiving this option", - "name": "the name to present to users for this option", - "pill": "a short text to show next to this option as a pill", - "info": "a longer text to show on a tooltip when the user is hovering this option" - }, - // more options for this required parameter - ] - }, - // more required parameters - ] -} -``` - -If there are no required parameters, `required_params` can be omitted. -If there are no accepted parameters, `accepted_params` can be omitted. `accepted_param` can be sent during normal responses to update the UI with a new set of parameters available, between calls. - -For `logs`, the UI requires this set of `accepted_params`. - -Ref [Pagination](#pagination), [Deltas](#incremental-responses) -```json -[ - "info", // boolean: requests the preflight `info` request - "after", // interval start timestamp - "before", // interval end timestamp - "direction", // sort direction [backward,forward] - "last", // number of records to retrieve - "anchor", // timestamp to divide records in pages - "facets", - "histogram", // selects facet to be used on the histogram - "if_modified_since", // used in PLAY mode, to indicate that the UI wants data newer than the specified timestamp - "data_only", // boolean: requests data (logs) only - "delta", // boolean: requests incremental responses - "tail", - "sampling", - "slice" -] -``` - -If there are `required_params`, the UI by default selects the first option. [](VERIFY_WITH_UI) - -## Table data - -To define table data, the UI expects this: - -```json -{ - // header - "columns": { - "id": { - "index": "number: the sort order for the columns, lower numbers are first", - "name": "string: the name of the column as it should be presented to users", - "unique_key": "boolean: true when the column uniquely identifies the row", - "visible": "boolean: true when the column should be visible by default", - "type": "enum: see column types", - "units": "string: the units of the value, if any - this item can be omitted if the column does not have units [](VERIFY_WITH_UI)", - "visualization": "enum: see visualization types", - "value_options": { - "units": "string: the units of the value [](VERIFY_WITH_UI)", - "transform": "enum: see transformation types", - "decimal_points": "number: the number of fractional digits for the number", - "default_value": "whatever the value is: when the value is null, show this instead" - }, - "max": "number: when the column is numeric, this is the max value the data have - this is used when range filtering is set and value bars", - "pointer_to": "id of another field: this is used when detail-string is set, to point to the column this column is detail of", - "sort": "enum: sorting order", - "sortable": "boolean: whether the column is sortable by users", - "sticky": "boolean: whether the column should always be visible in the UI", - "summary": "string: ???", - "filter": "enum: the filtering type for this column", - "full_width": "boolean: the value is expected to get most of the available column space. When multiple columns are full_width, the available space is given to all of them.", - "wrap": "boolean: true when the entire value should be shown, even when it occupies a big space.", - "default_expanded_filter": "boolean: true when the filter of this column should be expanded by default.", - "dummy": "boolean: when set to true, the column is not to be presented to users." - }, - // more IDs - }, - "data": [ // array of rows - [ // array of columns - // values for each column linked to their "index" in the columns - ], - // next row - ], - "default_sort_column": "id: the id of the column that should be sorted by default" -} -``` - -**IMPORTANT** - -On Data values, `timestamp` column value must be in unix micro. - - -### Sorting order - -- `ascending` -- `descending` - -### Transformation types - -- `none`, just show the value, without any processing -- `number`, just show a number with its units, respecting `decimal_points` -- `duration`, makes the UI show a human readable duration, of the seconds given -- `datetime`, makes the UI show a human readable datetime of the timestamp in UNIX epoch -- `datetime_usec`, makes the UI show a human readable datetime of the timestamp in USEC UNIX epoch - -### Visualization types - -- `value` -- `bar` -- `pill` -- `richValue`, this is not used yet, it is supposed to be a structure that will provide a value and options for it -- `rowOptions`, defines options for the entire row - this column is hidden from the UI - -### rowOptions - -TBD - -### Column types - -- `none` -- `integer` -- `boolean` -- `string` -- `detail-string` -- `bar-with-integer` -- `duration` -- `timestamp` -- `array` - -### Filter types - -- `none`, this facet is not selectable by users -- `multiselect`, the user can select any number of the available options -- `facet`, similar to `multiselect`, but it also indicates that the column has been indexed and has values with counters. Columns set to `facet` must appear in the `facets` list. -- `range`, the user can select a range of values (numeric) - -The plugin may send non visible columns with filter type `facet`. This means that the plugin can enable indexing on these columns, but it has not done it. Then the UI may send `facets:{ID1},{ID2},{ID3},...` to enable indexing of the columns specified. - -What is the default? - -#### Facets - -Facets are a special case of `multiselect` fields. They are used to provide additional information about each possible value, including their relative sort order and the number of times each value appears in the result set. Facets are filters handled by the plugin. So, the plugin will receive user selected filter like: `{KEY}:{VALUE1},{VALUE2},...`, where `{KEY}` is the id of the column and `{VALUEX}` is the id the facet option the user selected. - -```json -{ - // header, - "columns": ..., - "data": ..., - "facets": [ - { - "id": "string: the unique id of the facet", - "name": "string: the human readable name of the facet", - "order": "integer: the sorting order of this facet - lower numbers move items above others" - "options": [ - { - "id": "string: the unique id of the facet value", - "name": "string: the human readable version of the facet value", - "count": "integer: the number of times this value appears in the result set", - "order": "integer: the sorting order of this facet value - lower numbers move items above others" - }, - // next option - ], - }, - // next facet - ] -} -``` - -## Charts - -```json -{ - // header, - "charts": { - - }, - "default_charts": [ - - ] -} -``` - - -## Histogram - -```json -{ - "available_histograms": [ - { - "id": "string: the unique id of the histogram", - "name": "string: the human readable name of the histogram", - "order": "integer: the sorting order of available histograms - lower numbers move items above others" - } - ], - "histogram": { - "id": "string: the unique id of the histogram", - "name": "string: the human readable name of the histogram", - "chart": { - "summary": { - "nodes": [ - { - "mg": "string", - "nm": "string: node name", - "ni": "integer: node index" - } - ], - "contexts": [ - { - "id": "string: context id" - } - ], - "instances": [ - { - "id": "string: instance id", - "ni": "integer: instance index" - } - ], - "dimensions": [ - { - "id": "string: dimension id", - "pri": "integer", - "sts": { - "min": "float: dimension min value", - "max": "float: dimension max value", - "avg": "float: dimension avarage value", - "arp": "float", - "con": "float" - } - } - ] - }, - "result": { - "labels": [ - // histogram labels - ], - "point": { - "value": "integer", - "arp": "integer", - "pa": "integer" - }, - "data": [ - [ - "timestamp" // unix milli - // one array per label - [ - // values - ], - ] - ] - }, - "view": { - "title": "string: histogram tittle", - "update_every": "integer", - "after": "timestamp: histogram window start", - "before": "timestamp: histogram window end", - "units": "string: histogram units", - "chart_type": "string: histogram chart type", - "min": "integer: histogram min value", - "max": "integer: histogram max value", - "dimensions": { - "grouped_by": [ - // "string: histogram grouped by", - ], - "ids": [ - // "string: histogram label id", - ], - "names": [ - // "string: histogram human readable label name", - ], - "colors": [], - "units": [ - // "string: histogram label unit", - ], - "sts": { - "min": [ - // "float: label min value", - ], - "max": [ - // "float: label max value", - ], - "avg": [ - // "float: label avarage value", - ], - "arp": [ - // "float", - ], - "con": [ - // "float", - ] - } - } - }, - "totals": { - "nodes": { - "sl": "integer", - "qr": "integer" - }, - "contexts": { - "sl": "integer", - "qr": "integer" - }, - "instances": { - "sl": "integer", - "qr": "integer" - }, - "dimensions": { - "sl": "integer", - "qr": "integer" - } - }, - "db": { - "update_every": "integer" - } - } - } -} -``` - -**IMPORTANT** - -On Result Data, `timestamps` must be in unix milli. - -## Grouping - -```json -{ - // header, - "group_by": { - - } -} -``` - -## Datetime picker - -When `has_history: true`, the plugin must accept `after:TIMESTAMP_IN_SECONDS` and `before:TIMESTAMP_IN_SECONDS` parameters. -The plugin can also turn pagination on, so that only a small set of the data are sent to the UI at a time. - - -## Pagination - -The UI supports paginating results when `has_history: true`. So, when the result depends on the datetime picker and it is too big to be sent to the UI in one response, the plugin can enable datetime pagination like this: - -```json -{ - // header, - "columns": ..., - "data": ..., - "has_history": true, - "pagination": { - "enabled": "boolean: true to enable it", - "column": "string: the column id that is used for pagination", - "key": "string: the accepted_param that is used as the pagination anchor", - "units": "enum: a transformation of the datetime picker to make it compatible with the anchor: timestamp, timestamp_usec" - } -} -``` - -Once pagination is enabled, the plugin must support the following parameters: - -- `{ANCHOR}:{VALUE}`, `{ANCHOR}` is the `pagination.key`, `{VALUE}` is the point the user wants to see entries at, formatted according to `pagination.units`. -- `direction:backward` or `direction:forward` to specify if the data to be returned if before are after the anchor. -- `last:NUMER`, the number of entries the plugin should return in the table data. -- `query:STRING`, the full text search string the user wants to search for. -- `if_modified_since:TIMESTAMP_USEC` and `tail:true`, used in PLAY mode, to indicate that the UI wants data newer than the specified timestamp. If there are no new data, the plugin must respond with 304 (Not Modified). - -### Incremental Responses - -- `delta:true` or `delta:false`, when the plugin supports incremental queries, it can accept the parameter `delta`. When set to true, the response of the plugin will be "added" to the previous response already available. This is used in combination with `if_modified_since` to optimize the amount of work the plugin has to do to respond. - - -### Other - -- `slice:BOOLEAN` [](VERIFY_WITH_UI) -- `sampling:NUMBER` - diff --git a/src/collectors/plugins.d/gperf-config.txt b/src/collectors/plugins.d/gperf-config.txt deleted file mode 100644 index 721b771b7..000000000 --- a/src/collectors/plugins.d/gperf-config.txt +++ /dev/null @@ -1,112 +0,0 @@ -%{ - -#define PLUGINSD_KEYWORD_ID_FLUSH 97 -#define PLUGINSD_KEYWORD_ID_DISABLE 98 -#define PLUGINSD_KEYWORD_ID_EXIT 99 -#define PLUGINSD_KEYWORD_ID_HOST 71 -#define PLUGINSD_KEYWORD_ID_HOST_DEFINE 72 -#define PLUGINSD_KEYWORD_ID_HOST_DEFINE_END 73 -#define PLUGINSD_KEYWORD_ID_HOST_LABEL 74 - -#define PLUGINSD_KEYWORD_ID_BEGIN 12 -#define PLUGINSD_KEYWORD_ID_CHART 32 -#define PLUGINSD_KEYWORD_ID_CLABEL 34 -#define PLUGINSD_KEYWORD_ID_CLABEL_COMMIT 35 -#define PLUGINSD_KEYWORD_ID_DIMENSION 31 -#define PLUGINSD_KEYWORD_ID_END 13 -#define PLUGINSD_KEYWORD_ID_FUNCTION 41 -#define PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN 42 -#define PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS 43 -#define PLUGINSD_KEYWORD_ID_LABEL 51 -#define PLUGINSD_KEYWORD_ID_OVERWRITE 52 -#define PLUGINSD_KEYWORD_ID_SET 11 -#define PLUGINSD_KEYWORD_ID_VARIABLE 53 -#define PLUGINSD_KEYWORD_ID_CONFIG 100 - -#define PLUGINSD_KEYWORD_ID_CLAIMED_ID 61 -#define PLUGINSD_KEYWORD_ID_BEGIN2 2 -#define PLUGINSD_KEYWORD_ID_SET2 1 -#define PLUGINSD_KEYWORD_ID_END2 3 - -#define PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END 33 -#define PLUGINSD_KEYWORD_ID_RBEGIN 22 -#define PLUGINSD_KEYWORD_ID_RDSTATE 23 -#define PLUGINSD_KEYWORD_ID_REND 25 -#define PLUGINSD_KEYWORD_ID_RSET 21 -#define PLUGINSD_KEYWORD_ID_RSSTATE 24 - -#define PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE 901 -#define PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE 902 -#define PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB 903 -#define PLUGINSD_KEYWORD_ID_DYNCFG_RESET 904 -#define PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS 905 -#define PLUGINSD_KEYWORD_ID_DELETE_JOB 906 - -%} - -%struct-type -%omit-struct-type -%define hash-function-name gperf_keyword_hash_function -%define lookup-function-name gperf_lookup_keyword -%define word-array-name gperf_keywords -%define constants-prefix GPERF_PARSER_ -%define slot-name keyword -%define initializer-suffix ,0,PARSER_INIT_PLUGINSD,0 -%global-table -%readonly-tables -%null-strings -PARSER_KEYWORD; - -%% -# -# Plugins Only Keywords -# -FLUSH, PLUGINSD_KEYWORD_ID_FLUSH, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1 -DISABLE, PLUGINSD_KEYWORD_ID_DISABLE, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2 -EXIT, PLUGINSD_KEYWORD_ID_EXIT, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3 -HOST, PLUGINSD_KEYWORD_ID_HOST, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 4 -HOST_DEFINE, PLUGINSD_KEYWORD_ID_HOST_DEFINE, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 5 -HOST_DEFINE_END, PLUGINSD_KEYWORD_ID_HOST_DEFINE_END, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 6 -HOST_LABEL, PLUGINSD_KEYWORD_ID_HOST_LABEL, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 7 -# -# Common keywords -# -BEGIN, PLUGINSD_KEYWORD_ID_BEGIN, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8 -CHART, PLUGINSD_KEYWORD_ID_CHART, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 9 -CLABEL, PLUGINSD_KEYWORD_ID_CLABEL, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 10 -CLABEL_COMMIT, PLUGINSD_KEYWORD_ID_CLABEL_COMMIT, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 11 -DIMENSION, PLUGINSD_KEYWORD_ID_DIMENSION, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 12 -END, PLUGINSD_KEYWORD_ID_END, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13 -FUNCTION, PLUGINSD_KEYWORD_ID_FUNCTION, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 14 -FUNCTION_RESULT_BEGIN, PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15 -FUNCTION_PROGRESS, PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 16 -LABEL, PLUGINSD_KEYWORD_ID_LABEL, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 17 -OVERWRITE, PLUGINSD_KEYWORD_ID_OVERWRITE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 18 -SET, PLUGINSD_KEYWORD_ID_SET, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 19 -VARIABLE, PLUGINSD_KEYWORD_ID_VARIABLE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 20 -CONFIG, PLUGINSD_KEYWORD_ID_CONFIG, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 21 -# -# Streaming only keywords -# -CLAIMED_ID, PLUGINSD_KEYWORD_ID_CLAIMED_ID, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 22 -BEGIN2, PLUGINSD_KEYWORD_ID_BEGIN2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23 -SET2, PLUGINSD_KEYWORD_ID_SET2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24 -END2, PLUGINSD_KEYWORD_ID_END2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25 -# -# Streaming Replication keywords -# -CHART_DEFINITION_END, PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 26 -RBEGIN, PLUGINSD_KEYWORD_ID_RBEGIN, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27 -RDSTATE, PLUGINSD_KEYWORD_ID_RDSTATE, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28 -REND, PLUGINSD_KEYWORD_ID_REND, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29 -RSET, PLUGINSD_KEYWORD_ID_RSET, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 30 -RSSTATE, PLUGINSD_KEYWORD_ID_RSSTATE, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31 -# -# obsolete - do nothing commands -# -DYNCFG_ENABLE, PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32 -DYNCFG_REGISTER_MODULE, PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33 -DYNCFG_REGISTER_JOB, PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34 -DYNCFG_RESET, PLUGINSD_KEYWORD_ID_DYNCFG_RESET, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35 -REPORT_JOB_STATUS, PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 36 -DELETE_JOB, PLUGINSD_KEYWORD_ID_DELETE_JOB, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 37 diff --git a/src/collectors/plugins.d/gperf-hashtable.h b/src/collectors/plugins.d/gperf-hashtable.h deleted file mode 100644 index 315e2f7c7..000000000 --- a/src/collectors/plugins.d/gperf-hashtable.h +++ /dev/null @@ -1,237 +0,0 @@ -/* ANSI-C code produced by gperf version 3.1 */ -/* Command-line: gperf --multiple-iterations=1000 --output-file=gperf-hashtable.h gperf-config.txt */ -/* Computed positions: -k'1-2' */ - -#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \ - && ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) \ - && (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) \ - && ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) \ - && ('1' == 49) && ('2' == 50) && ('3' == 51) && ('4' == 52) \ - && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) \ - && ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) \ - && ('=' == 61) && ('>' == 62) && ('?' == 63) && ('A' == 65) \ - && ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) \ - && ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) \ - && ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) \ - && ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) \ - && ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) \ - && ('V' == 86) && ('W' == 87) && ('X' == 88) && ('Y' == 89) \ - && ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) \ - && ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) \ - && ('c' == 99) && ('d' == 100) && ('e' == 101) && ('f' == 102) \ - && ('g' == 103) && ('h' == 104) && ('i' == 105) && ('j' == 106) \ - && ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) \ - && ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) \ - && ('s' == 115) && ('t' == 116) && ('u' == 117) && ('v' == 118) \ - && ('w' == 119) && ('x' == 120) && ('y' == 121) && ('z' == 122) \ - && ('{' == 123) && ('|' == 124) && ('}' == 125) && ('~' == 126)) -/* The character set is not based on ISO-646. */ -#error "gperf generated tables don't work with this execution character set. Please report a bug to ." -#endif - -#line 1 "gperf-config.txt" - - -#define PLUGINSD_KEYWORD_ID_FLUSH 97 -#define PLUGINSD_KEYWORD_ID_DISABLE 98 -#define PLUGINSD_KEYWORD_ID_EXIT 99 -#define PLUGINSD_KEYWORD_ID_HOST 71 -#define PLUGINSD_KEYWORD_ID_HOST_DEFINE 72 -#define PLUGINSD_KEYWORD_ID_HOST_DEFINE_END 73 -#define PLUGINSD_KEYWORD_ID_HOST_LABEL 74 - -#define PLUGINSD_KEYWORD_ID_BEGIN 12 -#define PLUGINSD_KEYWORD_ID_CHART 32 -#define PLUGINSD_KEYWORD_ID_CLABEL 34 -#define PLUGINSD_KEYWORD_ID_CLABEL_COMMIT 35 -#define PLUGINSD_KEYWORD_ID_DIMENSION 31 -#define PLUGINSD_KEYWORD_ID_END 13 -#define PLUGINSD_KEYWORD_ID_FUNCTION 41 -#define PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN 42 -#define PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS 43 -#define PLUGINSD_KEYWORD_ID_LABEL 51 -#define PLUGINSD_KEYWORD_ID_OVERWRITE 52 -#define PLUGINSD_KEYWORD_ID_SET 11 -#define PLUGINSD_KEYWORD_ID_VARIABLE 53 -#define PLUGINSD_KEYWORD_ID_CONFIG 100 - -#define PLUGINSD_KEYWORD_ID_CLAIMED_ID 61 -#define PLUGINSD_KEYWORD_ID_BEGIN2 2 -#define PLUGINSD_KEYWORD_ID_SET2 1 -#define PLUGINSD_KEYWORD_ID_END2 3 - -#define PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END 33 -#define PLUGINSD_KEYWORD_ID_RBEGIN 22 -#define PLUGINSD_KEYWORD_ID_RDSTATE 23 -#define PLUGINSD_KEYWORD_ID_REND 25 -#define PLUGINSD_KEYWORD_ID_RSET 21 -#define PLUGINSD_KEYWORD_ID_RSSTATE 24 - -#define PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE 901 -#define PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE 902 -#define PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB 903 -#define PLUGINSD_KEYWORD_ID_DYNCFG_RESET 904 -#define PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS 905 -#define PLUGINSD_KEYWORD_ID_DELETE_JOB 906 - - -#define GPERF_PARSER_TOTAL_KEYWORDS 37 -#define GPERF_PARSER_MIN_WORD_LENGTH 3 -#define GPERF_PARSER_MAX_WORD_LENGTH 22 -#define GPERF_PARSER_MIN_HASH_VALUE 7 -#define GPERF_PARSER_MAX_HASH_VALUE 52 -/* maximum key range = 46, duplicates = 0 */ - -#ifdef __GNUC__ -__inline -#else -#ifdef __cplusplus -inline -#endif -#endif -static unsigned int -gperf_keyword_hash_function (register const char *str, register size_t len) -{ - static const unsigned char asso_values[] = - { - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 6, 24, 3, 9, 6, - 0, 53, 3, 27, 53, 53, 33, 53, 42, 0, - 53, 53, 0, 30, 53, 12, 3, 53, 9, 0, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, - 53, 53, 53, 53, 53, 53 - }; - return len + asso_values[(unsigned char)str[1]] + asso_values[(unsigned char)str[0]]; -} - -static const PARSER_KEYWORD gperf_keywords[] = - { - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, -#line 67 "gperf-config.txt" - {"HOST", PLUGINSD_KEYWORD_ID_HOST, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 4}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, -#line 87 "gperf-config.txt" - {"CONFIG", PLUGINSD_KEYWORD_ID_CONFIG, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 21}, -#line 101 "gperf-config.txt" - {"REND", PLUGINSD_KEYWORD_ID_REND, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29}, -#line 75 "gperf-config.txt" - {"CHART", PLUGINSD_KEYWORD_ID_CHART, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 9}, -#line 84 "gperf-config.txt" - {"OVERWRITE", PLUGINSD_KEYWORD_ID_OVERWRITE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 18}, -#line 70 "gperf-config.txt" - {"HOST_LABEL", PLUGINSD_KEYWORD_ID_HOST_LABEL, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 7}, -#line 68 "gperf-config.txt" - {"HOST_DEFINE", PLUGINSD_KEYWORD_ID_HOST_DEFINE, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 5}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, -#line 100 "gperf-config.txt" - {"RDSTATE", PLUGINSD_KEYWORD_ID_RDSTATE, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28}, -#line 86 "gperf-config.txt" - {"VARIABLE", PLUGINSD_KEYWORD_ID_VARIABLE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 20}, -#line 69 "gperf-config.txt" - {"HOST_DEFINE_END", PLUGINSD_KEYWORD_ID_HOST_DEFINE_END, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 6}, -#line 66 "gperf-config.txt" - {"EXIT", PLUGINSD_KEYWORD_ID_EXIT, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3}, -#line 80 "gperf-config.txt" - {"FUNCTION", PLUGINSD_KEYWORD_ID_FUNCTION, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 14}, -#line 110 "gperf-config.txt" - {"DYNCFG_RESET", PLUGINSD_KEYWORD_ID_DYNCFG_RESET, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35}, -#line 107 "gperf-config.txt" - {"DYNCFG_ENABLE", PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32}, -#line 111 "gperf-config.txt" - {"REPORT_JOB_STATUS", PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 36}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, -#line 112 "gperf-config.txt" - {"DELETE_JOB", PLUGINSD_KEYWORD_ID_DELETE_JOB, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 37}, -#line 98 "gperf-config.txt" - {"CHART_DEFINITION_END", PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 26}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, -#line 109 "gperf-config.txt" - {"DYNCFG_REGISTER_JOB", PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34}, -#line 82 "gperf-config.txt" - {"FUNCTION_PROGRESS", PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 16}, -#line 99 "gperf-config.txt" - {"RBEGIN", PLUGINSD_KEYWORD_ID_RBEGIN, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27}, -#line 108 "gperf-config.txt" - {"DYNCFG_REGISTER_MODULE", PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, -#line 81 "gperf-config.txt" - {"FUNCTION_RESULT_BEGIN", PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15}, -#line 102 "gperf-config.txt" - {"RSET", PLUGINSD_KEYWORD_ID_RSET, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 30}, -#line 74 "gperf-config.txt" - {"BEGIN", PLUGINSD_KEYWORD_ID_BEGIN, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8}, -#line 92 "gperf-config.txt" - {"BEGIN2", PLUGINSD_KEYWORD_ID_BEGIN2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23}, -#line 103 "gperf-config.txt" - {"RSSTATE", PLUGINSD_KEYWORD_ID_RSSTATE, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31}, -#line 64 "gperf-config.txt" - {"FLUSH", PLUGINSD_KEYWORD_ID_FLUSH, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1}, -#line 85 "gperf-config.txt" - {"SET", PLUGINSD_KEYWORD_ID_SET, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 19}, -#line 93 "gperf-config.txt" - {"SET2", PLUGINSD_KEYWORD_ID_SET2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, -#line 76 "gperf-config.txt" - {"CLABEL", PLUGINSD_KEYWORD_ID_CLABEL, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 10}, -#line 65 "gperf-config.txt" - {"DISABLE", PLUGINSD_KEYWORD_ID_DISABLE, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2}, -#line 83 "gperf-config.txt" - {"LABEL", PLUGINSD_KEYWORD_ID_LABEL, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 17}, -#line 78 "gperf-config.txt" - {"DIMENSION", PLUGINSD_KEYWORD_ID_DIMENSION, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 12}, -#line 91 "gperf-config.txt" - {"CLAIMED_ID", PLUGINSD_KEYWORD_ID_CLAIMED_ID, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 22}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, -#line 77 "gperf-config.txt" - {"CLABEL_COMMIT", PLUGINSD_KEYWORD_ID_CLABEL_COMMIT, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 11}, - {(char*)0,0,PARSER_INIT_PLUGINSD,0}, -#line 79 "gperf-config.txt" - {"END", PLUGINSD_KEYWORD_ID_END, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13}, -#line 94 "gperf-config.txt" - {"END2", PLUGINSD_KEYWORD_ID_END2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25} - }; - -const PARSER_KEYWORD * -gperf_lookup_keyword (register const char *str, register size_t len) -{ - if (len <= GPERF_PARSER_MAX_WORD_LENGTH && len >= GPERF_PARSER_MIN_WORD_LENGTH) - { - register unsigned int key = gperf_keyword_hash_function (str, len); - - if (key <= GPERF_PARSER_MAX_HASH_VALUE) - { - register const char *s = gperf_keywords[key].keyword; - - if (s && *str == *s && !strcmp (str + 1, s + 1)) - return &gperf_keywords[key]; - } - } - return 0; -} diff --git a/src/collectors/plugins.d/local_listeners.c b/src/collectors/plugins.d/local_listeners.c deleted file mode 100644 index 2a729b34d..000000000 --- a/src/collectors/plugins.d/local_listeners.c +++ /dev/null @@ -1,316 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "libnetdata/libnetdata.h" -#include "libnetdata/maps/local-sockets.h" -#include "libnetdata/required_dummies.h" - -// -------------------------------------------------------------------------------------------------------------------- - -static const char *protocol_name(LOCAL_SOCKET *n) { - if(n->local.family == AF_INET) { - if(n->local.protocol == IPPROTO_TCP) - return "TCP"; - else if(n->local.protocol == IPPROTO_UDP) - return "UDP"; - else - return "UNKNOWN_IPV4"; - } - else if(is_local_socket_ipv46(n)) { - if (n->local.protocol == IPPROTO_TCP) - return "TCP46"; - else if(n->local.protocol == IPPROTO_UDP) - return "UDP46"; - else - return "UNKNOWN_IPV46"; - } - else if(n->local.family == AF_INET6) { - if (n->local.protocol == IPPROTO_TCP) - return "TCP6"; - else if(n->local.protocol == IPPROTO_UDP) - return "UDP6"; - else - return "UNKNOWN_IPV6"; - } - else - return "UNKNOWN"; -} - -static void print_local_listeners(LS_STATE *ls __maybe_unused, LOCAL_SOCKET *n, void *data __maybe_unused) { - char local_address[INET6_ADDRSTRLEN]; - char remote_address[INET6_ADDRSTRLEN]; - - if(n->local.family == AF_INET) { - ipv4_address_to_txt(n->local.ip.ipv4, local_address); - ipv4_address_to_txt(n->remote.ip.ipv4, remote_address); - } - else if(is_local_socket_ipv46(n)) { - strncpyz(local_address, "*", sizeof(local_address) - 1); - remote_address[0] = '\0'; - } - else if(n->local.family == AF_INET6) { - ipv6_address_to_txt(&n->local.ip.ipv6, local_address); - ipv6_address_to_txt(&n->remote.ip.ipv6, remote_address); - } - - printf("%s|%s|%u|%s\n", protocol_name(n), local_address, n->local.port, string2str(n->cmdline)); -} - -static void print_local_listeners_debug(LS_STATE *ls __maybe_unused, LOCAL_SOCKET *n, void *data __maybe_unused) { - char local_address[INET6_ADDRSTRLEN]; - char remote_address[INET6_ADDRSTRLEN]; - - if(n->local.family == AF_INET) { - ipv4_address_to_txt(n->local.ip.ipv4, local_address); - ipv4_address_to_txt(n->remote.ip.ipv4, remote_address); - } - else if(n->local.family == AF_INET6) { - ipv6_address_to_txt(&n->local.ip.ipv6, local_address); - ipv6_address_to_txt(&n->remote.ip.ipv6, remote_address); - } - - printf("%s, direction=%s%s%s%s%s pid=%d, state=0x%0x, ns=%"PRIu64", local=%s[:%u], remote=%s[:%u], uid=%u, comm=%s\n", - protocol_name(n), - (n->direction & SOCKET_DIRECTION_LISTEN) ? "LISTEN," : "", - (n->direction & SOCKET_DIRECTION_INBOUND) ? "INBOUND," : "", - (n->direction & SOCKET_DIRECTION_OUTBOUND) ? "OUTBOUND," : "", - (n->direction & (SOCKET_DIRECTION_LOCAL_INBOUND|SOCKET_DIRECTION_LOCAL_OUTBOUND)) ? "LOCAL," : "", - (n->direction == 0) ? "NONE," : "", - n->pid, - (unsigned int)n->state, - n->net_ns_inode, - local_address, n->local.port, - remote_address, n->remote.port, - n->uid, - n->comm); -} - -// -------------------------------------------------------------------------------------------------------------------- - -int main(int argc, char **argv) { - static struct rusage started, ended; - getrusage(RUSAGE_SELF, &started); - bool debug = false; - - LS_STATE ls = { - .config = { - .listening = true, - .inbound = false, - .outbound = false, - .local = false, - .tcp4 = true, - .tcp6 = true, - .udp4 = true, - .udp6 = true, - .pid = false, - .cmdline = true, - .comm = false, - .namespaces = true, - .tcp_info = false, - - .max_errors = 10, - .max_concurrent_namespaces = 10, - - .cb = print_local_listeners, - .data = NULL, - }, - .stats = { 0 }, - .sockets_hashtable = { 0 }, - .local_ips_hashtable = { 0 }, - .listening_ports_hashtable = { 0 }, - }; - - netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX"); - if(!netdata_configured_host_prefix) netdata_configured_host_prefix = ""; - - for (int i = 1; i < argc; i++) { - char *s = argv[i]; - bool positive = true; - - if(strcmp(s, "-h") == 0 || strcmp(s, "--help") == 0) { - fprintf(stderr, - "\n" - " Netdata local-listeners\n" - " (C) 2024 Netdata Inc.\n" - "\n" - " This program prints a list of all the processes that have a listening socket.\n" - " It is used by Netdata to auto-detect the services running.\n" - "\n" - " Options:\n" - "\n" - " The options:\n" - "\n" - " udp, udp4, udp6, tcp, tcp4, tcp6, ipv4, ipv6\n" - "\n" - " select the sources to read currently available sockets.\n" - "\n" - " while:\n" - "\n" - " listening, local, inbound, outbound, namespaces\n" - "\n" - " filter the output based on the direction of the sockets.\n" - "\n" - " Prepending any option with 'no-', 'not-' or 'non-' will disable them.\n" - "\n" - " Current options:\n" - "\n" - " %s %s %s %s %s %s %s %s %s\n" - "\n" - " Option 'debug' enables all sources and all directions and provides\n" - " a full dump of current sockets.\n" - "\n" - " DIRECTION DETECTION\n" - " The program detects the direction of the sockets using these rules:\n" - "\n" - " - listening are all the TCP sockets that are in listen state\n" - " and all sockets that their remote IP is zero.\n" - "\n" - " - local are all the non-listening sockets that either their source IP\n" - " or their remote IP are loopback addresses. Loopback addresses are\n" - " those in 127.0.0.0/8 and ::1. When IPv4 addresses are mapped\n" - " into IPv6, the program extracts the IPv4 addresses to check them.\n" - "\n" - " Also, local are considered all the sockets that their remote\n" - " IP is one of the IPs that appear as local on another socket.\n" - "\n" - " - inbound are all the non-listening and non-local sockets that their local\n" - " port is a port of another socket that is marked as listening.\n" - "\n" - " - outbound are all the other sockets.\n" - "\n" - " Keep in mind that this kind of socket direction detection is not 100%% accurate,\n" - " and there may be cases (e.g. reusable sockets) that this code may incorrectly\n" - " mark sockets as inbound or outbound.\n" - "\n" - " WARNING:\n" - " This program reads the entire /proc/net/{tcp,udp,tcp6,upd6} files, builds\n" - " multiple hash maps in memory and traverses the entire /proc filesystem to\n" - " associate sockets with processes. We have made the most to make it as\n" - " lightweight and fast as possible, but still this program has a lot of work\n" - " to do and it may have some impact on very busy servers with millions of.\n" - " established connections." - "\n" - " Therefore, we suggest to avoid running it repeatedly for data collection.\n" - "\n" - " Netdata executes it only when it starts to auto-detect data collection sources\n" - " and initialize the network dependencies explorer." - "\n" - , ls.config.udp4 ? "udp4" :"no-udp4" - , ls.config.udp6 ? "udp6" :"no-udp6" - , ls.config.tcp4 ? "tcp4" :"no-tcp4" - , ls.config.tcp6 ? "tcp6" :"no-tcp6" - , ls.config.listening ? "listening" : "no-listening" - , ls.config.local ? "local" : "no-local" - , ls.config.inbound ? "inbound" : "no-inbound" - , ls.config.outbound ? "outbound" : "no-outbound" - , ls.config.namespaces ? "namespaces" : "no-namespaces" - ); - exit(1); - } - - if(strncmp(s, "no-", 3) == 0) { - positive = false; - s += 3; - } - else if(strncmp(s, "not-", 4) == 0 || strncmp(s, "non-", 4) == 0) { - positive = false; - s += 4; - } - - if(strcmp(s, "debug") == 0 || strcmp(s, "--debug") == 0) { - fprintf(stderr, "%s debugging\n", positive ? "enabling" : "disabling"); - ls.config.listening = true; - ls.config.local = true; - ls.config.inbound = true; - ls.config.outbound = true; - ls.config.pid = true; - ls.config.comm = true; - ls.config.cmdline = true; - ls.config.namespaces = true; - ls.config.tcp_info = true; - ls.config.uid = true; - ls.config.max_errors = SIZE_MAX; - ls.config.cb = print_local_listeners_debug; - - debug = true; - } - else if (strcmp("tcp", s) == 0) { - ls.config.tcp4 = ls.config.tcp6 = positive; - // fprintf(stderr, "%s tcp4 and tcp6\n", positive ? "enabling" : "disabling"); - } - else if (strcmp("tcp4", s) == 0) { - ls.config.tcp4 = positive; - // fprintf(stderr, "%s tcp4\n", positive ? "enabling" : "disabling"); - } - else if (strcmp("tcp6", s) == 0) { - ls.config.tcp6 = positive; - // fprintf(stderr, "%s tcp6\n", positive ? "enabling" : "disabling"); - } - else if (strcmp("udp", s) == 0) { - ls.config.udp4 = ls.config.udp6 = positive; - // fprintf(stderr, "%s udp4 and udp6\n", positive ? "enabling" : "disabling"); - } - else if (strcmp("udp4", s) == 0) { - ls.config.udp4 = positive; - // fprintf(stderr, "%s udp4\n", positive ? "enabling" : "disabling"); - } - else if (strcmp("udp6", s) == 0) { - ls.config.udp6 = positive; - // fprintf(stderr, "%s udp6\n", positive ? "enabling" : "disabling"); - } - else if (strcmp("ipv4", s) == 0) { - ls.config.tcp4 = ls.config.udp4 = positive; - // fprintf(stderr, "%s udp4 and tcp4\n", positive ? "enabling" : "disabling"); - } - else if (strcmp("ipv6", s) == 0) { - ls.config.tcp6 = ls.config.udp6 = positive; - // fprintf(stderr, "%s udp6 and tcp6\n", positive ? "enabling" : "disabling"); - } - else if (strcmp("listening", s) == 0) { - ls.config.listening = positive; - // fprintf(stderr, "%s listening\n", positive ? "enabling" : "disabling"); - } - else if (strcmp("local", s) == 0) { - ls.config.local = positive; - // fprintf(stderr, "%s local\n", positive ? "enabling" : "disabling"); - } - else if (strcmp("inbound", s) == 0) { - ls.config.inbound = positive; - // fprintf(stderr, "%s inbound\n", positive ? "enabling" : "disabling"); - } - else if (strcmp("outbound", s) == 0) { - ls.config.outbound = positive; - // fprintf(stderr, "%s outbound\n", positive ? "enabling" : "disabling"); - } - else if (strcmp("namespaces", s) == 0 || strcmp("ns", s) == 0) { - ls.config.namespaces = positive; - // fprintf(stderr, "%s namespaces\n", positive ? "enabling" : "disabling"); - } - else { - fprintf(stderr, "Unknown parameter %s\n", s); - exit(1); - } - } - - SPAWN_SERVER *spawn_server = spawn_server_create(SPAWN_SERVER_OPTION_CALLBACK, NULL, local_sockets_spawn_server_callback, argc, (const char **)argv); - if(spawn_server == NULL) { - fprintf(stderr, "Cannot create spawn server.\n"); - exit(1); - } - ls.spawn_server = spawn_server; - - local_sockets_process(&ls); - - spawn_server_destroy(spawn_server); - - getrusage(RUSAGE_SELF, &ended); - - if(debug) { - unsigned long long user = ended.ru_utime.tv_sec * 1000000ULL + ended.ru_utime.tv_usec - started.ru_utime.tv_sec * 1000000ULL + started.ru_utime.tv_usec; - unsigned long long system = ended.ru_stime.tv_sec * 1000000ULL + ended.ru_stime.tv_usec - started.ru_stime.tv_sec * 1000000ULL + started.ru_stime.tv_usec; - unsigned long long total = user + system; - - fprintf(stderr, "CPU Usage %llu user, %llu system, %llu total, %zu namespaces, %zu nl requests (without namespaces)\n", user, system, total, ls.stats.namespaces_found, ls.stats.mnl_sends); - } - - return 0; -} diff --git a/src/collectors/plugins.d/ndsudo.c b/src/collectors/plugins.d/ndsudo.c deleted file mode 100644 index d2cf4fae1..000000000 --- a/src/collectors/plugins.d/ndsudo.c +++ /dev/null @@ -1,458 +0,0 @@ -#include -#include -#include -#include -#include - -#define MAX_SEARCH 2 -#define MAX_PARAMETERS 128 -#define ERROR_BUFFER_SIZE 1024 - -struct command { - const char *name; - const char *params; - const char *search[MAX_SEARCH]; -} allowed_commands[] = { - { - .name = "exim-bpc", - .params = "-bpc", - .search = - { - [0] = "exim", - [1] = NULL, - }, - }, - { - .name = "nsd-control-stats", - .params = "stats_noreset", - .search = { - [0] = "nsd-control", - [1] = NULL, - }, - }, - { - .name = "chronyc-serverstats", - .params = "serverstats", - .search = { - [0] = "chronyc", - [1] = NULL, - }, - }, - { - .name = "dmsetup-status-cache", - .params = "status --target cache --noflush", - .search = { - [0] = "dmsetup", - [1] = NULL, - }, - }, - { - .name = "ssacli-controllers-info", - .params = "ctrl all show config detail", - .search = { - [0] = "ssacli", - [1] = NULL, - }, - }, - { - .name = "smartctl-json-scan", - .params = "--json --scan", - .search = { - [0] = "smartctl", - [1] = NULL, - }, - }, - { - .name = "smartctl-json-scan-open", - .params = "--json --scan-open", - .search = { - [0] = "smartctl", - [1] = NULL, - }, - }, - { - .name = "smartctl-json-device-info", - .params = "--json --all {{deviceName}} --device {{deviceType}} --nocheck {{powerMode}}", - .search = { - [0] = "smartctl", - [1] = NULL, - }, - }, - { - .name = "fail2ban-client-status", - .params = "status", - .search = { - [0] = "fail2ban-client", - [1] = NULL, - }, - }, - { - .name = "fail2ban-client-status-socket", - .params = "-s {{socket_path}} status", - .search = { - [0] = "fail2ban-client", - [1] = NULL, - }, - }, - { - .name = "fail2ban-client-status-jail", - .params = "status {{jail}}", - .search = { - [0] = "fail2ban-client", - [1] = NULL, - }, - }, - { - .name = "fail2ban-client-status-jail-socket", - .params = "-s {{socket_path}} status {{jail}}", - .search = { - [0] = "fail2ban-client", - [1] = NULL, - }, - }, - { - .name = "storcli-controllers-info", - .params = "/cALL show all J nolog", - .search = { - [0] = "storcli", - [1] = NULL, - }, - }, - { - .name = "storcli-drives-info", - .params = "/cALL/eALL/sALL show all J nolog", - .search = { - [0] = "storcli", - [1] = NULL, - }, - }, - { - .name = "lvs-report-json", - .params = "--reportformat json --units b --nosuffix -o {{options}}", - .search = { - [0] = "lvs", - [1] = NULL, - }, - }, - { - .name = "igt-list-gpus", - .params = "-L", - .search = { - [0] = "intel_gpu_top", - [1] = NULL, - }, - }, - { - .name = "igt-device-json", - .params = "-d {{device}} -J -s {{interval}}", - .search = { - [0] = "intel_gpu_top", - [1] = NULL, - }, - }, - { - .name = "igt-json", - .params = "-J -s {{interval}}", - .search = { - [0] = "intel_gpu_top", - [1] = NULL, - }, - }, - { - .name = "nvme-list", - .params = "list --output-format=json", - .search = { - [0] = "nvme", - [1] = NULL, - }, - }, - { - .name = "nvme-smart-log", - .params = "smart-log {{device}} --output-format=json", - .search = { - [0] = "nvme", - [1] = NULL, - }, - }, - { - .name = "megacli-disk-info", - .params = "-LDPDInfo -aAll -NoLog", - .search = { - [0] = "megacli", - [1] = "MegaCli", - }, - }, - { - .name = "megacli-battery-info", - .params = "-AdpBbuCmd -aAll -NoLog", - .search = { - [0] = "megacli", - [1] = "MegaCli", - }, - }, - { - .name = "arcconf-ld-info", - .params = "GETCONFIG 1 LD", - .search = { - [0] = "arcconf", - [1] = NULL, - }, - }, - { - .name = "arcconf-pd-info", - .params = "GETCONFIG 1 PD", - .search = { - [0] = "arcconf", - [1] = NULL, - }, - } -}; - -bool command_exists_in_dir(const char *dir, const char *cmd, char *dst, size_t dst_size) { - snprintf(dst, dst_size, "%s/%s", dir, cmd); - return access(dst, X_OK) == 0; -} - -bool command_exists_in_PATH(const char *cmd, char *dst, size_t dst_size) { - if(!dst || !dst_size) - return false; - - char *path = getenv("PATH"); - if(!path) - return false; - - char *path_copy = strdup(path); - if (!path_copy) - return false; - - char *dir; - bool found = false; - dir = strtok(path_copy, ":"); - while(dir && !found) { - found = command_exists_in_dir(dir, cmd, dst, dst_size); - dir = strtok(NULL, ":"); - } - - free(path_copy); - return found; -} - -struct command *find_command(const char *cmd) { - size_t size = sizeof(allowed_commands) / sizeof(allowed_commands[0]); - for(size_t i = 0; i < size ;i++) { - if(strcmp(cmd, allowed_commands[i].name) == 0) - return &allowed_commands[i]; - } - - return NULL; -} - -bool check_string(const char *str, size_t index, char *err, size_t err_size) { - const char *s = str; - while(*s) { - char c = *s++; - if(!((c >= 'A' && c <= 'Z') || - (c >= 'a' && c <= 'z') || - (c >= '0' && c <= '9') || - c == ' ' || c == '_' || c == '-' || c == '/' || - c == '.' || c == ',' || c == ':' || c == '=')) { - snprintf(err, err_size, "command line argument No %zu includes invalid character '%c'", index, c); - return false; - } - } - - return true; -} - -bool check_params(int argc, char **argv, char *err, size_t err_size) { - for(int i = 0 ; i < argc ;i++) - if(!check_string(argv[i], i, err, err_size)) - return false; - - return true; -} - -char *find_variable_in_argv(const char *variable, int argc, char **argv, char *err, size_t err_size) { - for (int i = 1; i < argc - 1; i++) { - if (strcmp(argv[i], variable) == 0) - return strdup(argv[i + 1]); - } - - snprintf(err, err_size, "variable '%s' is required, but was not provided in the command line parameters", variable); - - return NULL; -} - -bool search_and_replace_params(struct command *cmd, char **params, size_t max_params, const char *filename, int argc, char **argv, char *err, size_t err_size) { - if (!cmd || !params || !max_params) { - snprintf(err, err_size, "search_and_replace_params() internal error"); - return false; - } - - const char *delim = " "; - char *token; - char *temp_params = strdup(cmd->params); - if (!temp_params) { - snprintf(err, err_size, "search_and_replace_params() cannot allocate memory"); - return false; - } - - size_t param_count = 0; - params[param_count++] = strdup(filename); - - token = strtok(temp_params, delim); - while (token && param_count < max_params - 1) { - size_t len = strlen(token); - - char *value = NULL; - - if (strncmp(token, "{{", 2) == 0 && strncmp(token + len - 2, "}}", 2) == 0) { - token[0] = '-'; - token[1] = '-'; - token[len - 2] = '\0'; - - value = find_variable_in_argv(token, argc, argv, err, err_size); - } - else - value = strdup(token); - - if(!value) - goto cleanup; - - params[param_count++] = value; - token = strtok(NULL, delim); - } - - params[param_count] = NULL; // Null-terminate the params array - free(temp_params); - return true; - -cleanup: - if(!err[0]) - snprintf(err, err_size, "memory allocation failure"); - - free(temp_params); - for (size_t i = 0; i < param_count; ++i) { - free(params[i]); - params[i] = NULL; - } - return false; -} - -void show_help() { - fprintf(stdout, "\n"); - fprintf(stdout, "ndsudo\n"); - fprintf(stdout, "\n"); - fprintf(stdout, "(C) Netdata Inc.\n"); - fprintf(stdout, "\n"); - fprintf(stdout, "A helper to allow Netdata run privileged commands.\n"); - fprintf(stdout, "\n"); - fprintf(stdout, " --test\n"); - fprintf(stdout, " print the generated command that will be run, without running it.\n"); - fprintf(stdout, "\n"); - fprintf(stdout, " --help\n"); - fprintf(stdout, " print this message.\n"); - fprintf(stdout, "\n"); - - fprintf(stdout, "The following commands are supported:\n\n"); - - size_t size = sizeof(allowed_commands) / sizeof(allowed_commands[0]); - for(size_t i = 0; i < size ;i++) { - fprintf(stdout, "- Command : %s\n", allowed_commands[i].name); - fprintf(stdout, " Executables: "); - for(size_t j = 0; j < MAX_SEARCH && allowed_commands[i].search[j] ;j++) { - fprintf(stdout, "%s ", allowed_commands[i].search[j]); - } - fprintf(stdout, "\n"); - fprintf(stdout, " Parameters : %s\n\n", allowed_commands[i].params); - } - - fprintf(stdout, "The program searches for executables in the system path.\n"); - fprintf(stdout, "\n"); - fprintf(stdout, "Variables given as {{variable}} are expected on the command line as:\n"); - fprintf(stdout, " --variable VALUE\n"); - fprintf(stdout, "\n"); - fprintf(stdout, "VALUE can include space, A-Z, a-z, 0-9, _, -, /, and .\n"); - fprintf(stdout, "\n"); -} - -int main(int argc, char *argv[]) { - char error_buffer[ERROR_BUFFER_SIZE] = ""; - - if (argc < 2) { - fprintf(stderr, "at least 2 parameters are needed, but %d were given.\n", argc); - return 1; - } - - if(!check_params(argc, argv, error_buffer, sizeof(error_buffer))) { - fprintf(stderr, "invalid characters in parameters: %s\n", error_buffer); - return 2; - } - - bool test = false; - const char *cmd = argv[1]; - if(strcmp(cmd, "--help") == 0 || strcmp(cmd, "-h") == 0) { - show_help(); - exit(0); - } - else if(strcmp(cmd, "--test") == 0) { - cmd = argv[2]; - test = true; - } - - struct command *command = find_command(cmd); - if(!command) { - fprintf(stderr, "command not recognized: %s\n", cmd); - return 3; - } - - char new_path[] = "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin"; - putenv(new_path); - - setuid(0); - setgid(0); - setegid(0); - - bool found = false; - char filename[FILENAME_MAX]; - - for(size_t i = 0; i < MAX_SEARCH && !found ;i++) { - if(command->search[i]) { - found = command_exists_in_PATH(command->search[i], filename, sizeof(filename)); - if(!found) { - size_t len = strlen(error_buffer); - snprintf(&error_buffer[len], sizeof(error_buffer) - len, "%s ", command->search[i]); - } - } - } - - if(!found) { - fprintf(stderr, "%s: not available in PATH.\n", error_buffer); - return 4; - } - else - error_buffer[0] = '\0'; - - char *params[MAX_PARAMETERS]; - if(!search_and_replace_params(command, params, MAX_PARAMETERS, filename, argc, argv, error_buffer, sizeof(error_buffer))) { - fprintf(stderr, "command line parameters are not satisfied: %s\n", error_buffer); - return 5; - } - - if(test) { - fprintf(stderr, "Command to run: \n"); - - for(size_t i = 0; i < MAX_PARAMETERS && params[i] ;i++) - fprintf(stderr, "'%s' ", params[i]); - - fprintf(stderr, "\n"); - - exit(0); - } - else { - char *clean_env[] = {NULL}; - execve(filename, params, clean_env); - perror("execve"); // execve only returns on error - return 6; - } -} diff --git a/src/collectors/plugins.d/plugins_d.c b/src/collectors/plugins.d/plugins_d.c deleted file mode 100644 index 85f1563c3..000000000 --- a/src/collectors/plugins.d/plugins_d.c +++ /dev/null @@ -1,350 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "plugins_d.h" -#include "pluginsd_parser.h" - -char *plugin_directories[PLUGINSD_MAX_DIRECTORIES] = { [0] = PLUGINS_DIR, }; -struct plugind *pluginsd_root = NULL; - -static inline void pluginsd_sleep(const int seconds) { - int timeout_ms = seconds * 1000; - int waited_ms = 0; - while(waited_ms < timeout_ms) { - if(!service_running(SERVICE_COLLECTORS)) break; - sleep_usec(ND_CHECK_CANCELLABILITY_WHILE_WAITING_EVERY_MS * USEC_PER_MS); - waited_ms += ND_CHECK_CANCELLABILITY_WHILE_WAITING_EVERY_MS; - } -} - -inline size_t pluginsd_initialize_plugin_directories() -{ - char plugins_dirs[(FILENAME_MAX * 2) + 1]; - static char *plugins_dir_list = NULL; - - // Get the configuration entry - if (likely(!plugins_dir_list)) { - snprintfz(plugins_dirs, FILENAME_MAX * 2, "\"%s\" \"%s/custom-plugins.d\"", PLUGINS_DIR, CONFIG_DIR); - plugins_dir_list = strdupz(config_get(CONFIG_SECTION_DIRECTORIES, "plugins", plugins_dirs)); - } - - // Parse it and store it to plugin directories - return quoted_strings_splitter_config(plugins_dir_list, plugin_directories, PLUGINSD_MAX_DIRECTORIES); -} - -static inline void plugin_set_disabled(struct plugind *cd) { - spinlock_lock(&cd->unsafe.spinlock); - cd->unsafe.enabled = false; - spinlock_unlock(&cd->unsafe.spinlock); -} - -bool plugin_is_enabled(struct plugind *cd) { - spinlock_lock(&cd->unsafe.spinlock); - bool ret = cd->unsafe.enabled; - spinlock_unlock(&cd->unsafe.spinlock); - return ret; -} - -static inline void plugin_set_running(struct plugind *cd) { - spinlock_lock(&cd->unsafe.spinlock); - cd->unsafe.running = true; - spinlock_unlock(&cd->unsafe.spinlock); -} - -static inline bool plugin_is_running(struct plugind *cd) { - spinlock_lock(&cd->unsafe.spinlock); - bool ret = cd->unsafe.running; - spinlock_unlock(&cd->unsafe.spinlock); - return ret; -} - -static void pluginsd_worker_thread_cleanup(void *pptr) { - struct plugind *cd = CLEANUP_FUNCTION_GET_PTR(pptr); - if(!cd) return; - - worker_unregister(); - - spinlock_lock(&cd->unsafe.spinlock); - - cd->unsafe.running = false; - cd->unsafe.thread = 0; - - cd->unsafe.pid = 0; - - POPEN_INSTANCE *pi = cd->unsafe.pi; - cd->unsafe.pi = NULL; - - spinlock_unlock(&cd->unsafe.spinlock); - - if (pi) - spawn_popen_kill(pi); -} - -#define SERIAL_FAILURES_THRESHOLD 10 -static void pluginsd_worker_thread_handle_success(struct plugind *cd) { - if (likely(cd->successful_collections)) { - pluginsd_sleep(cd->update_every); - return; - } - - if (likely(cd->serial_failures <= SERIAL_FAILURES_THRESHOLD)) { - netdata_log_info("PLUGINSD: 'host:%s', '%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.", - rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, - plugin_is_enabled(cd) ? "Waiting a bit before starting it again." : "Will not start it again - it is now disabled."); - - pluginsd_sleep(cd->update_every * 10); - return; - } - - if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) { - netdata_log_error("PLUGINSD: 'host:'%s', '%s' (pid %d) does not generate useful output, " - "although it reports success (exits with 0)." - "We have tried to collect something %zu times - unsuccessfully. Disabling it.", - rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, cd->serial_failures); - plugin_set_disabled(cd); - return; - } -} - -static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_ret_code) { - if (worker_ret_code == -1) { - netdata_log_info("PLUGINSD: 'host:%s', '%s' (pid %d) was killed with SIGTERM. Disabling it.", - rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid); - plugin_set_disabled(cd); - return; - } - - if (!cd->successful_collections) { - netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d and haven't collected any data. Disabling it.", - rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code); - plugin_set_disabled(cd); - return; - } - - if (cd->serial_failures <= SERIAL_FAILURES_THRESHOLD) { - netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s", - rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code, cd->successful_collections, - plugin_is_enabled(cd) ? "Waiting a bit before starting it again." : "Will not start it again - it is disabled."); - - pluginsd_sleep(cd->update_every * 10); - return; - } - - if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) { - netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times)." - "We tried to restart it %zu times, but it failed to generate data. Disabling it.", - rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code, - cd->successful_collections, cd->serial_failures); - plugin_set_disabled(cd); - return; - } -} - -#undef SERIAL_FAILURES_THRESHOLD - -static void *pluginsd_worker_thread(void *arg) { - struct plugind *cd = (struct plugind *) arg; - CLEANUP_FUNCTION_REGISTER(pluginsd_worker_thread_cleanup) cleanup_ptr = cd; - - worker_register("PLUGINSD"); - - plugin_set_running(cd); - - size_t count = 0; - - while(service_running(SERVICE_COLLECTORS)) { - cd->unsafe.pi = spawn_popen_run(cd->cmd); - if(!cd->unsafe.pi) { - netdata_log_error("PLUGINSD: 'host:%s', cannot popen(\"%s\", \"r\").", - rrdhost_hostname(cd->host), cd->cmd); - break; - } - cd->unsafe.pid = spawn_server_instance_pid(cd->unsafe.pi->si); - - nd_log(NDLS_DAEMON, NDLP_DEBUG, - "PLUGINSD: 'host:%s' connected to '%s' running on pid %d", - rrdhost_hostname(cd->host), - cd->fullfilename, cd->unsafe.pid); - - const char *plugin = strrchr(cd->fullfilename, '/'); - if(plugin) - plugin++; - else - plugin = cd->fullfilename; - - char module[100]; - snprintfz(module, sizeof(module), "plugins.d[%s]", plugin); - ND_LOG_STACK lgs[] = { - ND_LOG_FIELD_TXT(NDF_MODULE, module), - ND_LOG_FIELD_TXT(NDF_NIDL_NODE, rrdhost_hostname(cd->host)), - ND_LOG_FIELD_TXT(NDF_SRC_TRANSPORT, "pluginsd"), - ND_LOG_FIELD_END(), - }; - ND_LOG_STACK_PUSH(lgs); - - count = pluginsd_process(cd->host, cd, cd->unsafe.pi->child_stdin_fp, cd->unsafe.pi->child_stdout_fp, 0); - - nd_log(NDLS_DAEMON, NDLP_DEBUG, - "PLUGINSD: 'host:%s', '%s' (pid %d) disconnected after %zu successful data collections (ENDs).", - rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, count); - - int worker_ret_code = spawn_popen_kill(cd->unsafe.pi); - cd->unsafe.pi = NULL; - - if(likely(worker_ret_code == 0)) - pluginsd_worker_thread_handle_success(cd); - else - pluginsd_worker_thread_handle_error(cd, worker_ret_code); - - cd->unsafe.pid = 0; - - if(unlikely(!plugin_is_enabled(cd))) - break; - } - return NULL; -} - -static void pluginsd_main_cleanup(void *pptr) { - struct netdata_static_thread *static_thread = CLEANUP_FUNCTION_GET_PTR(pptr); - if(!static_thread) return; - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - netdata_log_info("PLUGINSD: cleaning up..."); - - struct plugind *cd; - for (cd = pluginsd_root; cd; cd = cd->next) { - spinlock_lock(&cd->unsafe.spinlock); - if (cd->unsafe.enabled && cd->unsafe.running && cd->unsafe.thread != 0) { - netdata_log_info("PLUGINSD: 'host:%s', stopping plugin thread: %s", - rrdhost_hostname(cd->host), cd->id); - - nd_thread_signal_cancel(cd->unsafe.thread); - } - spinlock_unlock(&cd->unsafe.spinlock); - } - - netdata_log_info("PLUGINSD: cleanup completed."); - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; - - worker_unregister(); -} - -void *pluginsd_main(void *ptr) { - CLEANUP_FUNCTION_REGISTER(pluginsd_main_cleanup) cleanup_ptr = ptr; - - int automatic_run = config_get_boolean(CONFIG_SECTION_PLUGINS, "enable running new plugins", 1); - int scan_frequency = (int)config_get_number(CONFIG_SECTION_PLUGINS, "check for new plugins every", 60); - if (scan_frequency < 1) - scan_frequency = 1; - - // disable some plugins by default - config_get_boolean(CONFIG_SECTION_PLUGINS, "slabinfo", CONFIG_BOOLEAN_NO); - // it crashes (both threads) on Alpine after we made it multi-threaded - // works with "--device /dev/ipmi0", but this is not default - // see https://github.com/netdata/netdata/pull/15564 for details - if (getenv("NETDATA_LISTENER_PORT")) - config_get_boolean(CONFIG_SECTION_PLUGINS, "freeipmi", CONFIG_BOOLEAN_NO); - - // store the errno for each plugins directory - // so that we don't log broken directories on each loop - int directory_errors[PLUGINSD_MAX_DIRECTORIES] = { 0 }; - - while (service_running(SERVICE_COLLECTORS)) { - int idx; - const char *directory_name; - - for (idx = 0; idx < PLUGINSD_MAX_DIRECTORIES && (directory_name = plugin_directories[idx]); idx++) { - if (unlikely(!service_running(SERVICE_COLLECTORS))) - break; - - errno_clear(); - DIR *dir = opendir(directory_name); - if (unlikely(!dir)) { - if (directory_errors[idx] != errno) { - directory_errors[idx] = errno; - netdata_log_error("cannot open plugins directory '%s'", directory_name); - } - continue; - } - - struct dirent *file = NULL; - while (likely((file = readdir(dir)))) { - if (unlikely(!service_running(SERVICE_COLLECTORS))) - break; - - netdata_log_debug(D_PLUGINSD, "examining file '%s'", file->d_name); - - if (unlikely(strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0)) - continue; - - int len = (int)strlen(file->d_name); - if (unlikely(len <= (int)PLUGINSD_FILE_SUFFIX_LEN)) - continue; - if (unlikely(strcmp(PLUGINSD_FILE_SUFFIX, &file->d_name[len - (int)PLUGINSD_FILE_SUFFIX_LEN]) != 0)) { - netdata_log_debug(D_PLUGINSD, "file '%s' does not end in '%s'", file->d_name, PLUGINSD_FILE_SUFFIX); - continue; - } - - char pluginname[CONFIG_MAX_NAME + 1]; - snprintfz(pluginname, CONFIG_MAX_NAME, "%.*s", (int)(len - PLUGINSD_FILE_SUFFIX_LEN), file->d_name); - int enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, pluginname, automatic_run); - - if (unlikely(!enabled)) { - netdata_log_debug(D_PLUGINSD, "plugin '%s' is not enabled", file->d_name); - continue; - } - - // check if it runs already - struct plugind *cd; - for (cd = pluginsd_root; cd; cd = cd->next) - if (unlikely(strcmp(cd->filename, file->d_name) == 0)) - break; - - if (likely(cd && plugin_is_running(cd))) { - netdata_log_debug(D_PLUGINSD, "plugin '%s' is already running", cd->filename); - continue; - } - - // it is not running - // allocate a new one, or use the obsolete one - if (unlikely(!cd)) { - cd = callocz(sizeof(struct plugind), 1); - - snprintfz(cd->id, CONFIG_MAX_NAME, "plugin:%s", pluginname); - - strncpyz(cd->filename, file->d_name, FILENAME_MAX); - snprintfz(cd->fullfilename, FILENAME_MAX, "%s/%s", directory_name, cd->filename); - - cd->host = localhost; - cd->unsafe.enabled = enabled; - cd->unsafe.running = false; - - cd->update_every = (int)config_get_number(cd->id, "update every", localhost->rrd_update_every); - cd->started_t = now_realtime_sec(); - - char *def = ""; - snprintfz( - cd->cmd, PLUGINSD_CMD_MAX, "exec %s %d %s", cd->fullfilename, cd->update_every, - config_get(cd->id, "command options", def)); - - // link it - DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(pluginsd_root, cd, prev, next); - - if (plugin_is_enabled(cd)) { - char tag[NETDATA_THREAD_TAG_MAX + 1]; - snprintfz(tag, NETDATA_THREAD_TAG_MAX, "PD[%s]", pluginname); - - // spawn a new thread for it - cd->unsafe.thread = nd_thread_create(tag, NETDATA_THREAD_OPTION_DEFAULT, - pluginsd_worker_thread, cd); - } - } - } - - closedir(dir); - } - - pluginsd_sleep(scan_frequency); - } - - return NULL; -} diff --git a/src/collectors/plugins.d/plugins_d.h b/src/collectors/plugins.d/plugins_d.h deleted file mode 100644 index 51efa5a72..000000000 --- a/src/collectors/plugins.d/plugins_d.h +++ /dev/null @@ -1,54 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_PLUGINS_D_H -#define NETDATA_PLUGINS_D_H 1 - -#include "daemon/common.h" - -#define PLUGINSD_FILE_SUFFIX ".plugin" -#define PLUGINSD_FILE_SUFFIX_LEN strlen(PLUGINSD_FILE_SUFFIX) -#define PLUGINSD_CMD_MAX (FILENAME_MAX*2) -#define PLUGINSD_STOCK_PLUGINS_DIRECTORY_PATH 0 - -#define PLUGINSD_MAX_DIRECTORIES 20 -extern char *plugin_directories[PLUGINSD_MAX_DIRECTORIES]; - -struct plugind { - char id[CONFIG_MAX_NAME+1]; // config node id - - char filename[FILENAME_MAX+1]; // just the filename - char fullfilename[FILENAME_MAX+1]; // with path - char cmd[PLUGINSD_CMD_MAX+1]; // the command that it executes - - size_t successful_collections; // the number of times we have seen - // values collected from this plugin - - size_t serial_failures; // the number of times the plugin started - // without collecting values - - RRDHOST *host; // the host the plugin collects data for - int update_every; // the plugin default data collection frequency - - struct { - SPINLOCK spinlock; - bool running; // do not touch this structure after setting this to 1 - bool enabled; // if this is enabled or not - ND_THREAD *thread; - POPEN_INSTANCE *pi; - pid_t pid; - } unsafe; - - time_t started_t; - - struct plugind *prev; - struct plugind *next; -}; - -extern struct plugind *pluginsd_root; - -size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugin_input, FILE *fp_plugin_output, int trust_durations); -void pluginsd_process_thread_cleanup(void *pptr); - -size_t pluginsd_initialize_plugin_directories(); - -#endif /* NETDATA_PLUGINS_D_H */ diff --git a/src/collectors/plugins.d/pluginsd_dyncfg.c b/src/collectors/plugins.d/pluginsd_dyncfg.c deleted file mode 100644 index c4dd42a73..000000000 --- a/src/collectors/plugins.d/pluginsd_dyncfg.c +++ /dev/null @@ -1,69 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "pluginsd_dyncfg.h" - - -// ---------------------------------------------------------------------------- - -PARSER_RC pluginsd_config(char **words, size_t num_words, PARSER *parser) { - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CONFIG); - if(!host) return PARSER_RC_ERROR; - - size_t i = 1; - char *id = get_word(words, num_words, i++); - char *action = get_word(words, num_words, i++); - - if(strcmp(action, PLUGINSD_KEYWORD_CONFIG_ACTION_CREATE) == 0) { - char *status_str = get_word(words, num_words, i++); - char *type_str = get_word(words, num_words, i++); - char *path = get_word(words, num_words, i++); - char *source_type_str = get_word(words, num_words, i++); - char *source = get_word(words, num_words, i++); - char *supported_cmds_str = get_word(words, num_words, i++); - char *view_permissions_str = get_word(words, num_words, i++); - char *edit_permissions_str = get_word(words, num_words, i++); - - DYNCFG_STATUS status = dyncfg_status2id(status_str); - DYNCFG_TYPE type = dyncfg_type2id(type_str); - DYNCFG_SOURCE_TYPE source_type = dyncfg_source_type2id(source_type_str); - DYNCFG_CMDS cmds = dyncfg_cmds2id(supported_cmds_str); - HTTP_ACCESS view_access = http_access_from_hex(view_permissions_str); - HTTP_ACCESS edit_access = http_access_from_hex(edit_permissions_str); - - if(!dyncfg_add_low_level( - host, - id, - path, - status, - type, - source_type, - source, - cmds, - 0, - 0, - false, - view_access, - edit_access, - pluginsd_function_execute_cb, - parser)) - return PARSER_RC_ERROR; - } - else if(strcmp(action, PLUGINSD_KEYWORD_CONFIG_ACTION_DELETE) == 0) { - dyncfg_del_low_level(host, id); - } - else if(strcmp(action, PLUGINSD_KEYWORD_CONFIG_ACTION_STATUS) == 0) { - char *status_str = get_word(words, num_words, i++); - dyncfg_status_low_level(host, id, dyncfg_status2id(status_str)); - } - else - nd_log(NDLS_COLLECTORS, NDLP_WARNING, "DYNCFG: unknown action '%s' received from plugin", action); - - parser->user.data_collections_count++; - return PARSER_RC_OK; -} - -// ---------------------------------------------------------------------------- - -PARSER_RC pluginsd_dyncfg_noop(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) { - return PARSER_RC_OK; -} diff --git a/src/collectors/plugins.d/pluginsd_dyncfg.h b/src/collectors/plugins.d/pluginsd_dyncfg.h deleted file mode 100644 index fd35a3c36..000000000 --- a/src/collectors/plugins.d/pluginsd_dyncfg.h +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_PLUGINSD_DYNCFG_H -#define NETDATA_PLUGINSD_DYNCFG_H - -#include "pluginsd_internals.h" - -PARSER_RC pluginsd_config(char **words, size_t num_words, PARSER *parser); -PARSER_RC pluginsd_dyncfg_noop(char **words, size_t num_words, PARSER *parser); - -#endif //NETDATA_PLUGINSD_DYNCFG_H diff --git a/src/collectors/plugins.d/pluginsd_functions.c b/src/collectors/plugins.d/pluginsd_functions.c deleted file mode 100644 index 4ea6d4812..000000000 --- a/src/collectors/plugins.d/pluginsd_functions.c +++ /dev/null @@ -1,412 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "pluginsd_functions.h" - -#define LOG_FUNCTIONS false - -// ---------------------------------------------------------------------------- -// execution of functions - -static void inflight_functions_insert_callback(const DICTIONARY_ITEM *item, void *func, void *parser_ptr) { - struct inflight_function *pf = func; - - PARSER *parser = parser_ptr; - - // leave this code as default, so that when the dictionary is destroyed this will be sent back to the caller - pf->code = HTTP_RESP_SERVICE_UNAVAILABLE; - - const char *transaction = dictionary_acquired_item_name(item); - - int rc = uuid_parse_flexi(transaction, pf->transaction); - if(rc != 0) - netdata_log_error("FUNCTION: '%s': cannot parse transaction UUID", string2str(pf->function)); - - CLEAN_BUFFER *buffer = buffer_create(1024, NULL); - if(pf->payload && buffer_strlen(pf->payload)) { - buffer_sprintf( - buffer, - PLUGINSD_CALL_FUNCTION_PAYLOAD_BEGIN " %s %d \"%s\" \""HTTP_ACCESS_FORMAT"\" \"%s\" \"%s\"\n", - transaction, - pf->timeout_s, - string2str(pf->function), - (HTTP_ACCESS_FORMAT_CAST)pf->access, - pf->source ? pf->source : "", - content_type_id2string(pf->payload->content_type) - ); - - buffer_fast_strcat(buffer, buffer_tostring(pf->payload), buffer_strlen(pf->payload)); - buffer_strcat(buffer, "\nFUNCTION_PAYLOAD_END\n"); - } - else { - buffer_sprintf( - buffer, - PLUGINSD_CALL_FUNCTION " %s %d \"%s\" \""HTTP_ACCESS_FORMAT"\" \"%s\"\n", - transaction, - pf->timeout_s, - string2str(pf->function), - (HTTP_ACCESS_FORMAT_CAST)pf->access, - pf->source ? pf->source : "" - ); - } - - // send the command to the plugin - // IMPORTANT: make sure all commands are sent in 1 call, because in streaming they may interfere with others - ssize_t ret = send_to_plugin(buffer_tostring(buffer), parser); - pf->sent_monotonic_ut = now_monotonic_usec(); - - if(ret < 0) { - pf->sent_successfully = false; - - pf->code = HTTP_RESP_SERVICE_UNAVAILABLE; - netdata_log_error("FUNCTION '%s': failed to send it to the plugin, error %zd", string2str(pf->function), ret); - rrd_call_function_error(pf->result_body_wb, "Failed to communicate with collector", pf->code); - } - else { - pf->sent_successfully = true; - - internal_error(LOG_FUNCTIONS, - "FUNCTION '%s' with transaction '%s' sent to collector (%zd bytes, in %"PRIu64" usec)", - string2str(pf->function), dictionary_acquired_item_name(item), ret, - pf->sent_monotonic_ut - pf->started_monotonic_ut); - } -} - -static bool inflight_functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func __maybe_unused, void *new_func, void *parser_ptr __maybe_unused) { - struct inflight_function *pf = new_func; - - netdata_log_error("PLUGINSD_PARSER: duplicate UUID on pending function '%s' detected. Ignoring the second one.", string2str(pf->function)); - pf->code = rrd_call_function_error(pf->result_body_wb, "This request is already in progress", HTTP_RESP_BAD_REQUEST); - pf->result.cb(pf->result_body_wb, pf->code, pf->result.data); - string_freez(pf->function); - - return false; -} - -static void inflight_functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func, void *parser_ptr) { - struct inflight_function *pf = func; - struct parser *parser = (struct parser *)parser_ptr; (void)parser; - - internal_error(LOG_FUNCTIONS, - "FUNCTION '%s' result of transaction '%s' received from collector " - "(%zu bytes, request %"PRIu64" usec, response %"PRIu64" usec)", - string2str(pf->function), dictionary_acquired_item_name(item), - buffer_strlen(pf->result_body_wb), - pf->sent_monotonic_ut - pf->started_monotonic_ut, now_realtime_usec() - pf->sent_monotonic_ut); - - if(pf->code == HTTP_RESP_SERVICE_UNAVAILABLE && !buffer_strlen(pf->result_body_wb)) - rrd_call_function_error(pf->result_body_wb, "The plugin exited while servicing this call.", pf->code); - - pf->result.cb(pf->result_body_wb, pf->code, pf->result.data); - - string_freez(pf->function); - buffer_free((void *)pf->payload); - freez((void *)pf->source); -} - -void pluginsd_inflight_functions_init(PARSER *parser) { - parser->inflight.functions = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE, &dictionary_stats_category_functions, 0); - dictionary_register_insert_callback(parser->inflight.functions, inflight_functions_insert_callback, parser); - dictionary_register_delete_callback(parser->inflight.functions, inflight_functions_delete_callback, parser); - dictionary_register_conflict_callback(parser->inflight.functions, inflight_functions_conflict_callback, parser); -} - -void pluginsd_inflight_functions_cleanup(PARSER *parser) { - dictionary_destroy(parser->inflight.functions); -} - -// ---------------------------------------------------------------------------- - -void pluginsd_inflight_functions_garbage_collect(PARSER *parser, usec_t now_ut) { - parser->inflight.smaller_monotonic_timeout_ut = 0; - struct inflight_function *pf; - dfe_start_write(parser->inflight.functions, pf) { - if (*pf->stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < now_ut) { - internal_error(true, - "FUNCTION '%s' removing expired transaction '%s', after %"PRIu64" usec.", - string2str(pf->function), pf_dfe.name, now_ut - pf->started_monotonic_ut); - - if(!buffer_strlen(pf->result_body_wb) || pf->code == HTTP_RESP_OK) - pf->code = rrd_call_function_error(pf->result_body_wb, - "Timeout waiting for collector response.", - HTTP_RESP_GATEWAY_TIMEOUT); - - dictionary_del(parser->inflight.functions, pf_dfe.name); - } - - else if(!parser->inflight.smaller_monotonic_timeout_ut || *pf->stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < parser->inflight.smaller_monotonic_timeout_ut) - parser->inflight.smaller_monotonic_timeout_ut = *pf->stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT; - } - dfe_done(pf); -} - -// ---------------------------------------------------------------------------- - -static void pluginsd_function_cancel(void *data) { - struct inflight_function *look_for = data, *t; - - bool sent = false; - dfe_start_read(look_for->parser->inflight.functions, t) { - if(look_for == t) { - const char *transaction = t_dfe.name; - - internal_error(true, "PLUGINSD: sending function cancellation to plugin for transaction '%s'", transaction); - - char buffer[2048]; - snprintfz(buffer, sizeof(buffer), PLUGINSD_CALL_FUNCTION_CANCEL " %s\n", transaction); - - // send the command to the plugin - ssize_t ret = send_to_plugin(buffer, t->parser); - if(ret < 0) - sent = true; - - break; - } - } - dfe_done(t); - - if(sent <= 0) - nd_log(NDLS_DAEMON, NDLP_DEBUG, - "PLUGINSD: FUNCTION_CANCEL request didn't match any pending function requests in pluginsd.d."); -} - -static void pluginsd_function_progress_to_plugin(void *data) { - struct inflight_function *look_for = data, *t; - - bool sent = false; - dfe_start_read(look_for->parser->inflight.functions, t) { - if(look_for == t) { - const char *transaction = t_dfe.name; - - internal_error(true, "PLUGINSD: sending function progress to plugin for transaction '%s'", transaction); - - char buffer[2048]; - snprintfz(buffer, sizeof(buffer), PLUGINSD_CALL_FUNCTION_PROGRESS " %s\n", transaction); - - // send the command to the plugin - ssize_t ret = send_to_plugin(buffer, t->parser); - if(ret < 0) - sent = true; - - break; - } - } - dfe_done(t); - - if(sent <= 0) - nd_log(NDLS_DAEMON, NDLP_DEBUG, - "PLUGINSD: FUNCTION_PROGRESS request didn't match any pending function requests in pluginsd.d."); -} - -// this is the function called from -// rrd_call_function_and_wait() and rrd_call_function_async() -int pluginsd_function_execute_cb(struct rrd_function_execute *rfe, void *data) { - - // IMPORTANT: this function MUST call the result_cb even on failures - - PARSER *parser = data; - - usec_t now_ut = now_monotonic_usec(); - - int timeout_s = (int)((*rfe->stop_monotonic_ut - now_ut + USEC_PER_SEC / 2) / USEC_PER_SEC); - - struct inflight_function tmp = { - .started_monotonic_ut = now_ut, - .stop_monotonic_ut = rfe->stop_monotonic_ut, - .result_body_wb = rfe->result.wb, - .timeout_s = timeout_s, - .function = string_strdupz(rfe->function), - .payload = buffer_dup(rfe->payload), - .access = rfe->user_access, - .source = rfe->source ? strdupz(rfe->source) : NULL, - .parser = parser, - - .result = { - .cb = rfe->result.cb, - .data = rfe->result.data, - }, - .progress = { - .cb = rfe->progress.cb, - .data = rfe->progress.data, - }, - }; - uuid_copy(tmp.transaction, *rfe->transaction); - - char transaction_str[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(tmp.transaction, transaction_str); - - dictionary_write_lock(parser->inflight.functions); - - // if there is any error, our dictionary callbacks will call the caller callback to notify - // the caller about the error - no need for error handling here. - struct inflight_function *t = dictionary_set(parser->inflight.functions, transaction_str, &tmp, sizeof(struct inflight_function)); - if(!t->sent_successfully) { - int code = t->code; - dictionary_write_unlock(parser->inflight.functions); - dictionary_del(parser->inflight.functions, transaction_str); - pluginsd_inflight_functions_garbage_collect(parser, now_ut); - return code; - } - else { - if (rfe->register_canceller.cb) - rfe->register_canceller.cb(rfe->register_canceller.data, pluginsd_function_cancel, t); - - if (rfe->register_progresser.cb && - (parser->repertoire == PARSER_INIT_PLUGINSD || (parser->repertoire == PARSER_INIT_STREAMING && - stream_has_capability(&parser->user, STREAM_CAP_PROGRESS)))) - rfe->register_progresser.cb(rfe->register_progresser.data, pluginsd_function_progress_to_plugin, t); - - if (!parser->inflight.smaller_monotonic_timeout_ut || - *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < parser->inflight.smaller_monotonic_timeout_ut) - parser->inflight.smaller_monotonic_timeout_ut = *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT; - - // garbage collect stale inflight functions - if (parser->inflight.smaller_monotonic_timeout_ut < now_ut) - pluginsd_inflight_functions_garbage_collect(parser, now_ut); - - dictionary_write_unlock(parser->inflight.functions); - - return HTTP_RESP_OK; - } -} - -PARSER_RC pluginsd_function(char **words, size_t num_words, PARSER *parser) { - // a plugin or a child is registering a function - - bool global = false; - size_t i = 1; - if(num_words >= 2 && strcmp(get_word(words, num_words, 1), "GLOBAL") == 0) { - i++; - global = true; - } - - char *name = get_word(words, num_words, i++); - char *timeout_str = get_word(words, num_words, i++); - char *help = get_word(words, num_words, i++); - char *tags = get_word(words, num_words, i++); - char *access_str = get_word(words, num_words, i++); - char *priority_str = get_word(words, num_words, i++); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_FUNCTION); - if(!host) return PARSER_RC_ERROR; - - RRDSET *st = (global)? NULL: pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_FUNCTION, PLUGINSD_KEYWORD_CHART); - if(!st) global = true; - - if (unlikely(!timeout_str || !name || !help || (!global && !st))) { - netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a FUNCTION, without providing the required data (global = '%s', name = '%s', timeout = '%s', help = '%s'). Ignoring it.", - rrdhost_hostname(host), - st?rrdset_id(st):"(unset)", - global?"yes":"no", - name?name:"(unset)", - timeout_str ? timeout_str : "(unset)", - help?help:"(unset)" - ); - return PARSER_RC_ERROR; - } - - int timeout_s = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT; - if (timeout_str && *timeout_str) { - timeout_s = str2i(timeout_str); - if (unlikely(timeout_s <= 0)) - timeout_s = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT; - } - - int priority = RRDFUNCTIONS_PRIORITY_DEFAULT; - if(priority_str && *priority_str) { - priority = str2i(priority_str); - if(priority <= 0) - priority = RRDFUNCTIONS_PRIORITY_DEFAULT; - } - - rrd_function_add(host, st, name, timeout_s, priority, help, tags, - http_access_from_hex_mapping_old_roles(access_str), false, - pluginsd_function_execute_cb, parser); - - parser->user.data_collections_count++; - - return PARSER_RC_OK; -} - -static void pluginsd_function_result_end(struct parser *parser, void *action_data) { - STRING *key = action_data; - if(key) - dictionary_del(parser->inflight.functions, string2str(key)); - string_freez(key); - - parser->user.data_collections_count++; -} - -static inline struct inflight_function *inflight_function_find(PARSER *parser, const char *transaction) { - struct inflight_function *pf = NULL; - - if(transaction && *transaction) - pf = (struct inflight_function *)dictionary_get(parser->inflight.functions, transaction); - - if(!pf) - netdata_log_error("got a " PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " for transaction '%s', but the transaction is not found.", transaction ? transaction : "(unset)"); - - return pf; -} - -PARSER_RC pluginsd_function_result_begin(char **words, size_t num_words, PARSER *parser) { - char *transaction = get_word(words, num_words, 1); - char *status = get_word(words, num_words, 2); - char *format = get_word(words, num_words, 3); - char *expires = get_word(words, num_words, 4); - - if (unlikely(!transaction || !*transaction || !status || !*status || !format || !*format || !expires || !*expires)) { - netdata_log_error("got a " PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " without providing the required data (key = '%s', status = '%s', format = '%s', expires = '%s')." - , transaction ? transaction : "(unset)" - , status ? status : "(unset)" - , format ? format : "(unset)" - , expires ? expires : "(unset)" - ); - } - - int code = (status && *status) ? str2i(status) : 0; - if (code <= 0) - code = HTTP_RESP_BACKEND_RESPONSE_INVALID; - - time_t expiration = (expires && *expires) ? str2l(expires) : 0; - - struct inflight_function *pf = inflight_function_find(parser, transaction); - if(pf) { - if(format && *format) - pf->result_body_wb->content_type = content_type_string2id(format); - - pf->code = code; - - pf->result_body_wb->expires = expiration; - if(expiration <= now_realtime_sec()) - buffer_no_cacheable(pf->result_body_wb); - else - buffer_cacheable(pf->result_body_wb); - } - - parser->defer.response = (pf) ? pf->result_body_wb : NULL; - parser->defer.end_keyword = PLUGINSD_KEYWORD_FUNCTION_RESULT_END; - parser->defer.action = pluginsd_function_result_end; - parser->defer.action_data = string_strdupz(transaction); // it is ok is key is NULL - parser->flags |= PARSER_DEFER_UNTIL_KEYWORD; - - return PARSER_RC_OK; -} - -PARSER_RC pluginsd_function_progress(char **words, size_t num_words, PARSER *parser) { - size_t i = 1; - - char *transaction = get_word(words, num_words, i++); - char *done_str = get_word(words, num_words, i++); - char *all_str = get_word(words, num_words, i++); - - struct inflight_function *pf = inflight_function_find(parser, transaction); - if(pf) { - size_t done = done_str && *done_str ? str2u(done_str) : 0; - size_t all = all_str && *all_str ? str2u(all_str) : 0; - - if(pf->progress.cb) - pf->progress.cb(pf->progress.data, done, all); - } - - return PARSER_RC_OK; -} diff --git a/src/collectors/plugins.d/pluginsd_functions.h b/src/collectors/plugins.d/pluginsd_functions.h deleted file mode 100644 index ad47dc23a..000000000 --- a/src/collectors/plugins.d/pluginsd_functions.h +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_PLUGINSD_FUNCTIONS_H -#define NETDATA_PLUGINSD_FUNCTIONS_H - -#include "pluginsd_internals.h" - -struct inflight_function { - nd_uuid_t transaction; - - int code; - int timeout_s; - STRING *function; - BUFFER *payload; - HTTP_ACCESS access; - const char *source; - - BUFFER *result_body_wb; - - usec_t *stop_monotonic_ut; // pointer to caller data - usec_t started_monotonic_ut; - usec_t sent_monotonic_ut; - PARSER *parser; - - bool sent_successfully; - - struct { - rrd_function_result_callback_t cb; - void *data; - } result; - - struct { - rrd_function_progress_cb_t cb; - void *data; - } progress; -}; - -PARSER_RC pluginsd_function(char **words, size_t num_words, PARSER *parser); -PARSER_RC pluginsd_function_result_begin(char **words, size_t num_words, PARSER *parser); -PARSER_RC pluginsd_function_progress(char **words, size_t num_words, PARSER *parser); - -void pluginsd_inflight_functions_init(PARSER *parser); -void pluginsd_inflight_functions_cleanup(PARSER *parser); -void pluginsd_inflight_functions_garbage_collect(PARSER *parser, usec_t now_ut); - -int pluginsd_function_execute_cb(struct rrd_function_execute *rfe, void *data); - -#endif //NETDATA_PLUGINSD_FUNCTIONS_H diff --git a/src/collectors/plugins.d/pluginsd_internals.c b/src/collectors/plugins.d/pluginsd_internals.c deleted file mode 100644 index 31f0f7539..000000000 --- a/src/collectors/plugins.d/pluginsd_internals.c +++ /dev/null @@ -1,120 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "pluginsd_internals.h" - -ssize_t send_to_plugin(const char *txt, void *data) { - PARSER *parser = data; - - if(!txt || !*txt) - return 0; - -#ifdef ENABLE_H2O - if(parser->h2o_ctx) - return h2o_stream_write(parser->h2o_ctx, txt, strlen(txt)); -#endif - - errno_clear(); - spinlock_lock(&parser->writer.spinlock); - ssize_t bytes = -1; - -#ifdef ENABLE_HTTPS - NETDATA_SSL *ssl = parser->ssl_output; - if(ssl) { - - if(SSL_connection(ssl)) - bytes = netdata_ssl_write(ssl, (void *) txt, strlen(txt)); - - else - netdata_log_error("PLUGINSD: cannot send command (SSL)"); - - spinlock_unlock(&parser->writer.spinlock); - return bytes; - } -#endif - - if(parser->fp_output) { - - bytes = fprintf(parser->fp_output, "%s", txt); - if(bytes <= 0) { - netdata_log_error("PLUGINSD: cannot send command (FILE)"); - bytes = -2; - } - else - fflush(parser->fp_output); - - spinlock_unlock(&parser->writer.spinlock); - return bytes; - } - - if(parser->fd != -1) { - bytes = 0; - ssize_t total = (ssize_t)strlen(txt); - ssize_t sent; - - do { - sent = write(parser->fd, &txt[bytes], total - bytes); - if(sent <= 0) { - netdata_log_error("PLUGINSD: cannot send command (fd)"); - spinlock_unlock(&parser->writer.spinlock); - return -3; - } - bytes += sent; - } - while(bytes < total); - - spinlock_unlock(&parser->writer.spinlock); - return (int)bytes; - } - - spinlock_unlock(&parser->writer.spinlock); - netdata_log_error("PLUGINSD: cannot send command (no output socket/pipe/file given to plugins.d parser)"); - return -4; -} - -PARSER_RC PLUGINSD_DISABLE_PLUGIN(PARSER *parser, const char *keyword, const char *msg) { - parser->user.enabled = 0; - - if(keyword && msg) { - nd_log_limit_static_global_var(erl, 1, 0); - nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_INFO, - "PLUGINSD: keyword %s: %s", keyword, msg); - } - - return PARSER_RC_ERROR; -} - -void pluginsd_keywords_init(PARSER *parser, PARSER_REPERTOIRE repertoire) { - parser_init_repertoire(parser, repertoire); - - if (repertoire & (PARSER_INIT_PLUGINSD | PARSER_INIT_STREAMING)) - pluginsd_inflight_functions_init(parser); -} - -void parser_destroy(PARSER *parser) { - if (unlikely(!parser)) - return; - - pluginsd_inflight_functions_cleanup(parser); - - freez(parser); -} - - -PARSER *parser_init(struct parser_user_object *user, FILE *fp_input, FILE *fp_output, int fd, - PARSER_INPUT_TYPE flags, void *ssl __maybe_unused) { - PARSER *parser; - - parser = callocz(1, sizeof(*parser)); - if(user) - parser->user = *user; - parser->fd = fd; - parser->fp_input = fp_input; - parser->fp_output = fp_output; -#ifdef ENABLE_HTTPS - parser->ssl_output = ssl; -#endif - parser->flags = flags; - - spinlock_init(&parser->writer.spinlock); - return parser; -} diff --git a/src/collectors/plugins.d/pluginsd_internals.h b/src/collectors/plugins.d/pluginsd_internals.h deleted file mode 100644 index ae7e99427..000000000 --- a/src/collectors/plugins.d/pluginsd_internals.h +++ /dev/null @@ -1,355 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_PLUGINSD_INTERNALS_H -#define NETDATA_PLUGINSD_INTERNALS_H - -#include "pluginsd_parser.h" -#include "pluginsd_functions.h" -#include "pluginsd_dyncfg.h" -#include "pluginsd_replication.h" - -#define SERVING_STREAMING(parser) ((parser)->repertoire == PARSER_INIT_STREAMING) -#define SERVING_PLUGINSD(parser) ((parser)->repertoire == PARSER_INIT_PLUGINSD) - -PARSER_RC PLUGINSD_DISABLE_PLUGIN(PARSER *parser, const char *keyword, const char *msg); - -ssize_t send_to_plugin(const char *txt, void *data); - -static inline RRDHOST *pluginsd_require_scope_host(PARSER *parser, const char *cmd) { - RRDHOST *host = parser->user.host; - - if(unlikely(!host)) - netdata_log_error("PLUGINSD: command %s requires a host, but is not set.", cmd); - - return host; -} - -static inline RRDSET *pluginsd_require_scope_chart(PARSER *parser, const char *cmd, const char *parent_cmd) { - RRDSET *st = parser->user.st; - - if(unlikely(!st)) - netdata_log_error("PLUGINSD: command %s requires a chart defined via command %s, but is not set.", cmd, parent_cmd); - - return st; -} - -static inline RRDSET *pluginsd_get_scope_chart(PARSER *parser) { - return parser->user.st; -} - -static inline void pluginsd_lock_rrdset_data_collection(PARSER *parser) { - if(parser->user.st && !parser->user.v2.locked_data_collection) { - spinlock_lock(&parser->user.st->data_collection_lock); - parser->user.v2.locked_data_collection = true; - } -} - -static inline bool pluginsd_unlock_rrdset_data_collection(PARSER *parser) { - if(parser->user.st && parser->user.v2.locked_data_collection) { - spinlock_unlock(&parser->user.st->data_collection_lock); - parser->user.v2.locked_data_collection = false; - return true; - } - - return false; -} - -static inline void pluginsd_unlock_previous_scope_chart(PARSER *parser, const char *keyword, bool stale) { - if(unlikely(pluginsd_unlock_rrdset_data_collection(parser))) { - if(stale) - netdata_log_error("PLUGINSD: 'host:%s/chart:%s/' stale data collection lock found during %s; it has been unlocked", - rrdhost_hostname(parser->user.st->rrdhost), - rrdset_id(parser->user.st), - keyword); - } - - if(unlikely(parser->user.v2.ml_locked)) { - ml_chart_update_end(parser->user.st); - parser->user.v2.ml_locked = false; - - if(stale) - netdata_log_error("PLUGINSD: 'host:%s/chart:%s/' stale ML lock found during %s, it has been unlocked", - rrdhost_hostname(parser->user.st->rrdhost), - rrdset_id(parser->user.st), - keyword); - } -} - -static inline void pluginsd_clear_scope_chart(PARSER *parser, const char *keyword) { - pluginsd_unlock_previous_scope_chart(parser, keyword, true); - - if(parser->user.cleanup_slots && parser->user.st) - rrdset_pluginsd_receive_unslot(parser->user.st); - - parser->user.st = NULL; - parser->user.cleanup_slots = false; -} - -static inline bool pluginsd_set_scope_chart(PARSER *parser, RRDSET *st, const char *keyword) { - RRDSET *old_st = parser->user.st; - pid_t old_collector_tid = (old_st) ? old_st->pluginsd.collector_tid : 0; - pid_t my_collector_tid = gettid_cached(); - - if(unlikely(old_collector_tid)) { - if(old_collector_tid != my_collector_tid) { - nd_log_limit_static_global_var(erl, 1, 0); - nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_WARNING, - "PLUGINSD: keyword %s: 'host:%s/chart:%s' is collected twice (my tid %d, other collector tid %d)", - keyword ? keyword : "UNKNOWN", - rrdhost_hostname(st->rrdhost), rrdset_id(st), - my_collector_tid, old_collector_tid); - - return false; - } - - old_st->pluginsd.collector_tid = 0; - } - - st->pluginsd.collector_tid = my_collector_tid; - - pluginsd_clear_scope_chart(parser, keyword); - - st->pluginsd.pos = 0; - parser->user.st = st; - parser->user.cleanup_slots = false; - - return true; -} - -static inline void pluginsd_rrddim_put_to_slot(PARSER *parser, RRDSET *st, RRDDIM *rd, ssize_t slot, bool obsolete) { - size_t wanted_size = st->pluginsd.size; - - if(slot >= 1) { - st->pluginsd.dims_with_slots = true; - wanted_size = slot; - } - else { - st->pluginsd.dims_with_slots = false; - wanted_size = dictionary_entries(st->rrddim_root_index); - } - - if(wanted_size > st->pluginsd.size) { - st->pluginsd.prd_array = reallocz(st->pluginsd.prd_array, wanted_size * sizeof(struct pluginsd_rrddim)); - - // initialize the empty slots - for(ssize_t i = (ssize_t) wanted_size - 1; i >= (ssize_t) st->pluginsd.size; i--) { - st->pluginsd.prd_array[i].rda = NULL; - st->pluginsd.prd_array[i].rd = NULL; - st->pluginsd.prd_array[i].id = NULL; - } - - st->pluginsd.size = wanted_size; - } - - if(st->pluginsd.dims_with_slots) { - struct pluginsd_rrddim *prd = &st->pluginsd.prd_array[slot - 1]; - - if(prd->rd != rd) { - prd->rda = rrddim_find_and_acquire(st, string2str(rd->id)); - prd->rd = rrddim_acquired_to_rrddim(prd->rda); - prd->id = string2str(prd->rd->id); - } - - if(obsolete) - parser->user.cleanup_slots = true; - } -} - -static inline RRDDIM *pluginsd_acquire_dimension(RRDHOST *host, RRDSET *st, const char *dimension, ssize_t slot, const char *cmd) { - if (unlikely(!dimension || !*dimension)) { - netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s, without a dimension.", - rrdhost_hostname(host), rrdset_id(st), cmd); - return NULL; - } - - if (unlikely(!st->pluginsd.size)) { - netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s, but the chart has no dimensions.", - rrdhost_hostname(host), rrdset_id(st), cmd); - return NULL; - } - - struct pluginsd_rrddim *prd; - RRDDIM *rd; - - if(likely(st->pluginsd.dims_with_slots)) { - // caching with slots - - if(unlikely(slot < 1 || slot > st->pluginsd.size)) { - netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s with slot %zd, but slots in the range [1 - %u] are expected.", - rrdhost_hostname(host), rrdset_id(st), cmd, slot, st->pluginsd.size); - return NULL; - } - - prd = &st->pluginsd.prd_array[slot - 1]; - - rd = prd->rd; - if(likely(rd)) { -#ifdef NETDATA_INTERNAL_CHECKS - if(strcmp(prd->id, dimension) != 0) { - ssize_t t; - for(t = 0; t < st->pluginsd.size ;t++) { - if (strcmp(st->pluginsd.prd_array[t].id, dimension) == 0) - break; - } - if(t >= st->pluginsd.size) - t = -1; - - internal_fatal(true, - "PLUGINSD: expected to find dimension '%s' on slot %zd, but found '%s', " - "the right slot is %zd", - dimension, slot, prd->id, t); - } -#endif - return rd; - } - } - else { - // caching without slots - - if(unlikely(st->pluginsd.pos >= st->pluginsd.size)) - st->pluginsd.pos = 0; - - prd = &st->pluginsd.prd_array[st->pluginsd.pos++]; - - rd = prd->rd; - if(likely(rd)) { - const char *id = prd->id; - - if(strcmp(id, dimension) == 0) { - // we found it cached - return rd; - } - else { - // the cached one is not good for us - rrddim_acquired_release(prd->rda); - prd->rda = NULL; - prd->rd = NULL; - prd->id = NULL; - } - } - } - - // we need to find the dimension and set it to prd - - RRDDIM_ACQUIRED *rda = rrddim_find_and_acquire(st, dimension); - if (unlikely(!rda)) { - netdata_log_error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s but dimension does not exist.", - rrdhost_hostname(host), rrdset_id(st), dimension, cmd); - - return NULL; - } - - prd->rda = rda; - prd->rd = rd = rrddim_acquired_to_rrddim(rda); - prd->id = string2str(rd->id); - - return rd; -} - -static inline RRDSET *pluginsd_find_chart(RRDHOST *host, const char *chart, const char *cmd) { - if (unlikely(!chart || !*chart)) { - netdata_log_error("PLUGINSD: 'host:%s' got a %s without a chart id.", - rrdhost_hostname(host), cmd); - return NULL; - } - - RRDSET *st = rrdset_find(host, chart); - if (unlikely(!st)) - netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s but chart does not exist.", - rrdhost_hostname(host), chart, cmd); - - return st; -} - -static inline ssize_t pluginsd_parse_rrd_slot(char **words, size_t num_words) { - ssize_t slot = -1; - char *id = get_word(words, num_words, 1); - if(id && id[0] == PLUGINSD_KEYWORD_SLOT[0] && id[1] == PLUGINSD_KEYWORD_SLOT[1] && - id[2] == PLUGINSD_KEYWORD_SLOT[2] && id[3] == PLUGINSD_KEYWORD_SLOT[3] && id[4] == ':') { - slot = (ssize_t) str2ull_encoded(&id[5]); - if(slot < 0) slot = 0; // to make the caller increment its idx of the words - } - - return slot; -} - -static inline void pluginsd_rrdset_cache_put_to_slot(PARSER *parser, RRDSET *st, ssize_t slot, bool obsolete) { - // clean possible old cached data - rrdset_pluginsd_receive_unslot(st); - - if(unlikely(slot < 1 || slot >= INT32_MAX)) - return; - - RRDHOST *host = st->rrdhost; - - if(unlikely((size_t)slot > host->rrdpush.receive.pluginsd_chart_slots.size)) { - spinlock_lock(&host->rrdpush.receive.pluginsd_chart_slots.spinlock); - size_t old_slots = host->rrdpush.receive.pluginsd_chart_slots.size; - size_t new_slots = (old_slots < PLUGINSD_MIN_RRDSET_POINTERS_CACHE) ? PLUGINSD_MIN_RRDSET_POINTERS_CACHE : old_slots * 2; - - if(new_slots < (size_t)slot) - new_slots = slot; - - host->rrdpush.receive.pluginsd_chart_slots.array = - reallocz(host->rrdpush.receive.pluginsd_chart_slots.array, new_slots * sizeof(RRDSET *)); - - for(size_t i = old_slots; i < new_slots ;i++) - host->rrdpush.receive.pluginsd_chart_slots.array[i] = NULL; - - host->rrdpush.receive.pluginsd_chart_slots.size = new_slots; - spinlock_unlock(&host->rrdpush.receive.pluginsd_chart_slots.spinlock); - } - - host->rrdpush.receive.pluginsd_chart_slots.array[slot - 1] = st; - st->pluginsd.last_slot = (int32_t)slot - 1; - parser->user.cleanup_slots = obsolete; -} - -static inline RRDSET *pluginsd_rrdset_cache_get_from_slot(PARSER *parser, RRDHOST *host, const char *id, ssize_t slot, const char *keyword) { - if(unlikely(slot < 1 || (size_t)slot > host->rrdpush.receive.pluginsd_chart_slots.size)) - return pluginsd_find_chart(host, id, keyword); - - RRDSET *st = host->rrdpush.receive.pluginsd_chart_slots.array[slot - 1]; - - if(!st) { - st = pluginsd_find_chart(host, id, keyword); - if(st) - pluginsd_rrdset_cache_put_to_slot(parser, st, slot, rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)); - } - else { - internal_fatal(string_strcmp(st->id, id) != 0, - "PLUGINSD: wrong chart in slot %zd, expected '%s', found '%s'", - slot - 1, id, string2str(st->id)); - } - - return st; -} - -static inline SN_FLAGS pluginsd_parse_storage_number_flags(const char *flags_str) { - SN_FLAGS flags = SN_FLAG_NONE; - - char c; - while ((c = *flags_str++)) { - switch (c) { - case 'A': - flags |= SN_FLAG_NOT_ANOMALOUS; - break; - - case 'R': - flags |= SN_FLAG_RESET; - break; - - case 'E': - flags = SN_EMPTY_SLOT; - return flags; - - default: - internal_error(true, "Unknown SN_FLAGS flag '%c'", c); - break; - } - } - - return flags; -} - -#endif //NETDATA_PLUGINSD_INTERNALS_H diff --git a/src/collectors/plugins.d/pluginsd_parser.c b/src/collectors/plugins.d/pluginsd_parser.c deleted file mode 100644 index d15ecbe94..000000000 --- a/src/collectors/plugins.d/pluginsd_parser.c +++ /dev/null @@ -1,1402 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "pluginsd_internals.h" - -static inline PARSER_RC pluginsd_set(char **words, size_t num_words, PARSER *parser) { - int idx = 1; - ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); - if(slot >= 0) idx++; - - char *dimension = get_word(words, num_words, idx++); - char *value = get_word(words, num_words, idx++); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_SET); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_SET, PLUGINSD_KEYWORD_CHART); - if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_SET); - if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - st->pluginsd.set = true; - - if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) - netdata_log_debug(D_PLUGINSD, "PLUGINSD: 'host:%s/chart:%s/dim:%s' SET is setting value to '%s'", - rrdhost_hostname(host), rrdset_id(st), dimension, value && *value ? value : "UNSET"); - - if (value && *value) - rrddim_set_by_pointer(st, rd, str2ll_encoded(value)); - - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_begin(char **words, size_t num_words, PARSER *parser) { - int idx = 1; - ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); - if(slot >= 0) idx++; - - char *id = get_word(words, num_words, idx++); - char *microseconds_txt = get_word(words, num_words, idx++); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_BEGIN); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_BEGIN); - if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_BEGIN)) - return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - usec_t microseconds = 0; - if (microseconds_txt && *microseconds_txt) { - long long t = str2ll(microseconds_txt, NULL); - if(t >= 0) - microseconds = t; - } - -#ifdef NETDATA_LOG_REPLICATION_REQUESTS - if(st->replay.log_next_data_collection) { - st->replay.log_next_data_collection = false; - - internal_error(true, - "REPLAY: 'host:%s/chart:%s' first BEGIN after replication, last collected %llu, last updated %llu, microseconds %llu", - rrdhost_hostname(host), rrdset_id(st), - st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec, - st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec, - microseconds - ); - } -#endif - - if (likely(st->counter_done)) { - if (likely(microseconds)) { - if (parser->user.trust_durations) - rrdset_next_usec_unfiltered(st, microseconds); - else - rrdset_next_usec(st, microseconds); - } - else - rrdset_next(st); - } - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_end(char **words, size_t num_words, PARSER *parser) { - char *tv_sec = get_word(words, num_words, 1); - char *tv_usec = get_word(words, num_words, 2); - char *pending_rrdset_next = get_word(words, num_words, 3); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_END); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_END, PLUGINSD_KEYWORD_BEGIN); - if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) - netdata_log_debug(D_PLUGINSD, "requested an END on chart '%s'", rrdset_id(st)); - - pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_END); - parser->user.data_collections_count++; - - struct timeval tv = { - .tv_sec = (tv_sec && *tv_sec) ? str2ll(tv_sec, NULL) : 0, - .tv_usec = (tv_usec && *tv_usec) ? str2ll(tv_usec, NULL) : 0 - }; - - if(!tv.tv_sec) - now_realtime_timeval(&tv); - - rrdset_timed_done(st, tv, pending_rrdset_next && *pending_rrdset_next ? true : false); - - return PARSER_RC_OK; -} - -static void pluginsd_host_define_cleanup(PARSER *parser) { - string_freez(parser->user.host_define.hostname); - rrdlabels_destroy(parser->user.host_define.rrdlabels); - - parser->user.host_define.hostname = NULL; - parser->user.host_define.rrdlabels = NULL; - parser->user.host_define.parsing_host = false; -} - -static inline bool pluginsd_validate_machine_guid(const char *guid, nd_uuid_t *uuid, char *output) { - if(uuid_parse(guid, *uuid)) - return false; - - uuid_unparse_lower(*uuid, output); - - return true; -} - -static inline PARSER_RC pluginsd_host_define(char **words, size_t num_words, PARSER *parser) { - char *guid = get_word(words, num_words, 1); - char *hostname = get_word(words, num_words, 2); - - if(unlikely(!guid || !*guid || !hostname || !*hostname)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE, "missing parameters"); - - if(unlikely(parser->user.host_define.parsing_host)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE, - "another host definition is already open - did you send " PLUGINSD_KEYWORD_HOST_DEFINE_END "?"); - - if(!pluginsd_validate_machine_guid(guid, &parser->user.host_define.machine_guid, parser->user.host_define.machine_guid_str)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE, "cannot parse MACHINE_GUID - is it a valid UUID?"); - - parser->user.host_define.hostname = string_strdupz(hostname); - parser->user.host_define.rrdlabels = rrdlabels_create(); - parser->user.host_define.parsing_host = true; - - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_host_dictionary(char **words, size_t num_words, PARSER *parser, RRDLABELS *labels, const char *keyword) { - char *name = get_word(words, num_words, 1); - char *value = get_word(words, num_words, 2); - - if(!name || !*name || !value) - return PLUGINSD_DISABLE_PLUGIN(parser, keyword, "missing parameters"); - - if(!parser->user.host_define.parsing_host || !labels) - return PLUGINSD_DISABLE_PLUGIN(parser, keyword, "host is not defined, send " PLUGINSD_KEYWORD_HOST_DEFINE " before this"); - - rrdlabels_add(labels, name, value, RRDLABEL_SRC_CONFIG); - - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_host_labels(char **words, size_t num_words, PARSER *parser) { - return pluginsd_host_dictionary(words, num_words, parser, - parser->user.host_define.rrdlabels, - PLUGINSD_KEYWORD_HOST_LABEL); -} - -static inline PARSER_RC pluginsd_host_define_end(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { - if(!parser->user.host_define.parsing_host) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE_END, "missing initialization, send " PLUGINSD_KEYWORD_HOST_DEFINE " before this"); - - RRDHOST *host = rrdhost_find_or_create( - string2str(parser->user.host_define.hostname), - string2str(parser->user.host_define.hostname), - parser->user.host_define.machine_guid_str, - "Netdata Virtual Host 1.0", - netdata_configured_timezone, - netdata_configured_abbrev_timezone, - netdata_configured_utc_offset, - program_name, - NETDATA_VERSION, - default_rrd_update_every, - default_rrd_history_entries, - default_rrd_memory_mode, - health_plugin_enabled(), - default_rrdpush_enabled, - default_rrdpush_destination, - default_rrdpush_api_key, - default_rrdpush_send_charts_matching, - default_rrdpush_enable_replication, - default_rrdpush_seconds_to_replicate, - default_rrdpush_replication_step, - rrdhost_labels_to_system_info(parser->user.host_define.rrdlabels), - false); - - rrdhost_option_set(host, RRDHOST_OPTION_VIRTUAL_HOST); - dyncfg_host_init(host); - - if(host->rrdlabels) { - rrdlabels_migrate_to_these(host->rrdlabels, parser->user.host_define.rrdlabels); - } - else { - host->rrdlabels = parser->user.host_define.rrdlabels; - parser->user.host_define.rrdlabels = NULL; - } - - pluginsd_host_define_cleanup(parser); - - parser->user.host = host; - pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_HOST_DEFINE_END); - - rrdhost_flag_clear(host, RRDHOST_FLAG_ORPHAN); - rrdcontext_host_child_connected(host); - schedule_node_info_update(host); - - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_host(char **words, size_t num_words, PARSER *parser) { - char *guid = get_word(words, num_words, 1); - - if(!guid || !*guid || strcmp(guid, "localhost") == 0) { - parser->user.host = localhost; - return PARSER_RC_OK; - } - - nd_uuid_t uuid; - char uuid_str[UUID_STR_LEN]; - if(!pluginsd_validate_machine_guid(guid, &uuid, uuid_str)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST, "cannot parse MACHINE_GUID - is it a valid UUID?"); - - RRDHOST *host = rrdhost_find_by_guid(uuid_str); - if(unlikely(!host)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST, "cannot find a host with this machine guid - have you created it?"); - - parser->user.host = host; - - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_chart(char **words, size_t num_words, PARSER *parser) { - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CHART); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - int idx = 1; - ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); - if(slot >= 0) idx++; - - char *type = get_word(words, num_words, idx++); - char *name = get_word(words, num_words, idx++); - char *title = get_word(words, num_words, idx++); - char *units = get_word(words, num_words, idx++); - char *family = get_word(words, num_words, idx++); - char *context = get_word(words, num_words, idx++); - char *chart = get_word(words, num_words, idx++); - char *priority_s = get_word(words, num_words, idx++); - char *update_every_s = get_word(words, num_words, idx++); - char *options = get_word(words, num_words, idx++); - char *plugin = get_word(words, num_words, idx++); - char *module = get_word(words, num_words, idx++); - - // parse the id from type - char *id = NULL; - if (likely(type && (id = strchr(type, '.')))) { - *id = '\0'; - id++; - } - - // make sure we have the required variables - if (unlikely((!type || !*type || !id || !*id))) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_CHART, "missing parameters"); - - // parse the name, and make sure it does not include 'type.' - if (unlikely(name && *name)) { - // when data are streamed from child nodes - // name will be type.name - // so, we have to remove 'type.' from name too - size_t len = strlen(type); - if (strncmp(type, name, len) == 0 && name[len] == '.') - name = &name[len + 1]; - - // if the name is the same with the id, - // or is just 'NULL', clear it. - if (unlikely(strcmp(name, id) == 0 || strcasecmp(name, "NULL") == 0 || strcasecmp(name, "(NULL)") == 0)) - name = NULL; - } - - int priority = 1000; - if (likely(priority_s && *priority_s)) - priority = str2i(priority_s); - - int update_every = parser->user.cd->update_every; - if (likely(update_every_s && *update_every_s)) - update_every = str2i(update_every_s); - if (unlikely(!update_every)) - update_every = parser->user.cd->update_every; - - RRDSET_TYPE chart_type = RRDSET_TYPE_LINE; - if (unlikely(chart)) - chart_type = rrdset_type_id(chart); - - if (unlikely(name && !*name)) - name = NULL; - if (unlikely(family && !*family)) - family = NULL; - if (unlikely(context && !*context)) - context = NULL; - if (unlikely(!title)) - title = ""; - if (unlikely(!units)) - units = "unknown"; - - netdata_log_debug( - D_PLUGINSD, - "creating chart type='%s', id='%s', name='%s', family='%s', context='%s', chart='%s', priority=%d, update_every=%d", - type, id, name ? name : "", family ? family : "", context ? context : "", rrdset_type_name(chart_type), - priority, update_every); - - RRDSET *st = NULL; - - st = rrdset_create( - host, type, id, name, family, context, title, units, - (plugin && *plugin) ? plugin : parser->user.cd->filename, - module, priority, update_every, - chart_type); - - bool obsolete = false; - if (likely(st)) { - if (options && *options) { - if (strstr(options, "obsolete")) { - rrdset_is_obsolete___safe_from_collector_thread(st); - obsolete = true; - } - else - rrdset_isnot_obsolete___safe_from_collector_thread(st); - - if (strstr(options, "detail")) - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - else - rrdset_flag_clear(st, RRDSET_FLAG_DETAIL); - - if (strstr(options, "hidden")) - rrdset_flag_set(st, RRDSET_FLAG_HIDDEN); - else - rrdset_flag_clear(st, RRDSET_FLAG_HIDDEN); - - if (strstr(options, "store_first")) - rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST); - else - rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST); - } - else { - rrdset_isnot_obsolete___safe_from_collector_thread(st); - rrdset_flag_clear(st, RRDSET_FLAG_DETAIL); - rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST); - } - - if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_CHART)) - return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - pluginsd_rrdset_cache_put_to_slot(parser, st, slot, obsolete); - } - else - pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_CHART); - - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_chart_definition_end(char **words, size_t num_words, PARSER *parser) { - const char *first_entry_txt = get_word(words, num_words, 1); - const char *last_entry_txt = get_word(words, num_words, 2); - const char *wall_clock_time_txt = get_word(words, num_words, 3); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CHART_DEFINITION_END); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_CHART_DEFINITION_END, PLUGINSD_KEYWORD_CHART); - if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - time_t first_entry_child = (first_entry_txt && *first_entry_txt) ? (time_t)str2ul(first_entry_txt) : 0; - time_t last_entry_child = (last_entry_txt && *last_entry_txt) ? (time_t)str2ul(last_entry_txt) : 0; - time_t child_wall_clock_time = (wall_clock_time_txt && *wall_clock_time_txt) ? (time_t)str2ul(wall_clock_time_txt) : now_realtime_sec(); - - bool ok = true; - if(!rrdset_flag_check(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS)) { - -#ifdef NETDATA_LOG_REPLICATION_REQUESTS - st->replay.start_streaming = false; - st->replay.after = 0; - st->replay.before = 0; -#endif - - rrdset_flag_set(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS); - rrdset_flag_clear(st, RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED); - rrdhost_receiver_replicating_charts_plus_one(st->rrdhost); - - ok = replicate_chart_request(send_to_plugin, parser, host, st, - first_entry_child, last_entry_child, child_wall_clock_time, - 0, 0); - } -#ifdef NETDATA_LOG_REPLICATION_REQUESTS - else { - internal_error(true, "REPLAY: 'host:%s/chart:%s' not sending duplicate replication request", - rrdhost_hostname(st->rrdhost), rrdset_id(st)); - } -#endif - - return ok ? PARSER_RC_OK : PARSER_RC_ERROR; -} - -static inline PARSER_RC pluginsd_dimension(char **words, size_t num_words, PARSER *parser) { - int idx = 1; - ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); - if(slot >= 0) idx++; - - char *id = get_word(words, num_words, idx++); - char *name = get_word(words, num_words, idx++); - char *algorithm = get_word(words, num_words, idx++); - char *multiplier_s = get_word(words, num_words, idx++); - char *divisor_s = get_word(words, num_words, idx++); - char *options = get_word(words, num_words, idx++); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_DIMENSION); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_DIMENSION, PLUGINSD_KEYWORD_CHART); - if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - if (unlikely(!id || !*id)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DIMENSION, "missing dimension id"); - - long multiplier = 1; - if (multiplier_s && *multiplier_s) { - multiplier = str2ll_encoded(multiplier_s); - if (unlikely(!multiplier)) - multiplier = 1; - } - - long divisor = 1; - if (likely(divisor_s && *divisor_s)) { - divisor = str2ll_encoded(divisor_s); - if (unlikely(!divisor)) - divisor = 1; - } - - if (unlikely(!algorithm || !*algorithm)) - algorithm = "absolute"; - - if (unlikely(st && rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) - netdata_log_debug( - D_PLUGINSD, - "creating dimension in chart %s, id='%s', name='%s', algorithm='%s', multiplier=%ld, divisor=%ld, hidden='%s'", - rrdset_id(st), id, name ? name : "", rrd_algorithm_name(rrd_algorithm_id(algorithm)), multiplier, divisor, - options ? options : ""); - - RRDDIM *rd = rrddim_add(st, id, name, multiplier, divisor, rrd_algorithm_id(algorithm)); - if (unlikely(!rd)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DIMENSION, "failed to create dimension"); - - int unhide_dimension = 1; - - rrddim_option_clear(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS); - bool obsolete = false; - if (options && *options) { - if (strstr(options, "obsolete") != NULL) { - obsolete = true; - rrddim_is_obsolete___safe_from_collector_thread(st, rd); - } - else - rrddim_isnot_obsolete___safe_from_collector_thread(st, rd); - - unhide_dimension = !strstr(options, "hidden"); - - if (strstr(options, "noreset") != NULL) - rrddim_option_set(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS); - if (strstr(options, "nooverflow") != NULL) - rrddim_option_set(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS); - } - else - rrddim_isnot_obsolete___safe_from_collector_thread(st, rd); - - bool should_update_dimension = false; - - if (likely(unhide_dimension)) { - rrddim_option_clear(rd, RRDDIM_OPTION_HIDDEN); - should_update_dimension = rrddim_flag_check(rd, RRDDIM_FLAG_META_HIDDEN); - } - else { - rrddim_option_set(rd, RRDDIM_OPTION_HIDDEN); - should_update_dimension = !rrddim_flag_check(rd, RRDDIM_FLAG_META_HIDDEN); - } - - if (should_update_dimension) { - rrddim_flag_set(rd, RRDDIM_FLAG_METADATA_UPDATE); - rrdhost_flag_set(rd->rrdset->rrdhost, RRDHOST_FLAG_METADATA_UPDATE); - } - - pluginsd_rrddim_put_to_slot(parser, st, rd, slot, obsolete); - - return PARSER_RC_OK; -} - -// ---------------------------------------------------------------------------- - -static inline PARSER_RC pluginsd_variable(char **words, size_t num_words, PARSER *parser) { - char *name = get_word(words, num_words, 1); - char *value = get_word(words, num_words, 2); - NETDATA_DOUBLE v; - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_VARIABLE); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_get_scope_chart(parser); - - int global = (st) ? 0 : 1; - - if (name && *name) { - if ((strcmp(name, "GLOBAL") == 0 || strcmp(name, "HOST") == 0)) { - global = 1; - name = get_word(words, num_words, 2); - value = get_word(words, num_words, 3); - } else if ((strcmp(name, "LOCAL") == 0 || strcmp(name, "CHART") == 0)) { - global = 0; - name = get_word(words, num_words, 2); - value = get_word(words, num_words, 3); - } - } - - if (unlikely(!name || !*name)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_VARIABLE, "missing variable name"); - - if (unlikely(!value || !*value)) - value = NULL; - - if (unlikely(!value)) { - netdata_log_error("PLUGINSD: 'host:%s/chart:%s' cannot set %s VARIABLE '%s' to an empty value", - rrdhost_hostname(host), - st ? rrdset_id(st):"UNSET", - (global) ? "HOST" : "CHART", - name); - return PARSER_RC_OK; - } - - if (!global && !st) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_VARIABLE, "no chart is defined and no GLOBAL is given"); - - char *endptr = NULL; - v = (NETDATA_DOUBLE) str2ndd_encoded(value, &endptr); - if (unlikely(endptr && *endptr)) { - if (endptr == value) - netdata_log_error("PLUGINSD: 'host:%s/chart:%s' the value '%s' of VARIABLE '%s' cannot be parsed as a number", - rrdhost_hostname(host), - st ? rrdset_id(st):"UNSET", - value, - name); - else - netdata_log_error("PLUGINSD: 'host:%s/chart:%s' the value '%s' of VARIABLE '%s' has leftovers: '%s'", - rrdhost_hostname(host), - st ? rrdset_id(st):"UNSET", - value, - name, - endptr); - } - - if (global) { - const RRDVAR_ACQUIRED *rva = rrdvar_host_variable_add_and_acquire(host, name); - if (rva) { - rrdvar_host_variable_set(host, rva, v); - rrdvar_host_variable_release(host, rva); - } - else - netdata_log_error("PLUGINSD: 'host:%s' cannot find/create HOST VARIABLE '%s'", - rrdhost_hostname(host), - name); - } else { - const RRDVAR_ACQUIRED *rsa = rrdvar_chart_variable_add_and_acquire(st, name); - if (rsa) { - rrdvar_chart_variable_set(st, rsa, v); - rrdvar_chart_variable_release(st, rsa); - } - else - netdata_log_error("PLUGINSD: 'host:%s/chart:%s' cannot find/create CHART VARIABLE '%s'", - rrdhost_hostname(host), rrdset_id(st), name); - } - - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_flush(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { - netdata_log_debug(D_PLUGINSD, "requested a " PLUGINSD_KEYWORD_FLUSH); - pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_FLUSH); - parser->user.replay.start_time = 0; - parser->user.replay.end_time = 0; - parser->user.replay.start_time_ut = 0; - parser->user.replay.end_time_ut = 0; - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_disable(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { - netdata_log_info("PLUGINSD: plugin called DISABLE. Disabling it."); - parser->user.enabled = 0; - return PARSER_RC_STOP; -} - -static inline PARSER_RC pluginsd_label(char **words, size_t num_words, PARSER *parser) { - const char *name = get_word(words, num_words, 1); - const char *label_source = get_word(words, num_words, 2); - const char *value = get_word(words, num_words, 3); - - if (!name || !label_source || !value) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_LABEL, "missing parameters"); - - char *store = (char *)value; - bool allocated_store = false; - - if(unlikely(num_words > 4)) { - allocated_store = true; - store = mallocz(PLUGINSD_LINE_MAX + 1); - size_t remaining = PLUGINSD_LINE_MAX; - char *move = store; - char *word; - for(size_t i = 3; i < num_words && remaining > 2 && (word = get_word(words, num_words, i)) ;i++) { - if(i > 3) { - *move++ = ' '; - *move = '\0'; - remaining--; - } - - size_t length = strlen(word); - if (length > remaining) - length = remaining; - - remaining -= length; - memcpy(move, word, length); - move += length; - *move = '\0'; - } - } - - if(unlikely(!(parser->user.new_host_labels))) - parser->user.new_host_labels = rrdlabels_create(); - - if (strcmp(name,HOST_LABEL_IS_EPHEMERAL) == 0) { - int is_ephemeral = appconfig_test_boolean_value((char *) value); - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_LABEL); - if (host) { - if (is_ephemeral) - rrdhost_option_set(host, RRDHOST_OPTION_EPHEMERAL_HOST); - else - rrdhost_option_clear(host, RRDHOST_OPTION_EPHEMERAL_HOST); - } - } - - rrdlabels_add(parser->user.new_host_labels, name, store, str2l(label_source)); - - if (allocated_store) - freez(store); - - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_overwrite(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_OVERWRITE); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - netdata_log_debug(D_PLUGINSD, "requested to OVERWRITE host labels"); - - if(unlikely(!host->rrdlabels)) - host->rrdlabels = rrdlabels_create(); - - rrdlabels_migrate_to_these(host->rrdlabels, parser->user.new_host_labels); - if (rrdhost_option_check(host, RRDHOST_OPTION_EPHEMERAL_HOST)) - rrdlabels_add(host->rrdlabels, HOST_LABEL_IS_EPHEMERAL, "true", RRDLABEL_SRC_CONFIG); - - if(!rrdlabels_exist(host->rrdlabels, "_os")) - rrdlabels_add(host->rrdlabels, "_os", string2str(host->os), RRDLABEL_SRC_AUTO); - - if(!rrdlabels_exist(host->rrdlabels, "_hostname")) - rrdlabels_add(host->rrdlabels, "_hostname", string2str(host->hostname), RRDLABEL_SRC_AUTO); - - rrdhost_flag_set(host, RRDHOST_FLAG_METADATA_LABELS | RRDHOST_FLAG_METADATA_UPDATE); - - rrdlabels_destroy(parser->user.new_host_labels); - parser->user.new_host_labels = NULL; - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_clabel(char **words, size_t num_words, PARSER *parser) { - const char *name = get_word(words, num_words, 1); - const char *value = get_word(words, num_words, 2); - const char *label_source = get_word(words, num_words, 3); - - if (!name || !value || !label_source) { - netdata_log_error("Ignoring malformed or empty CHART LABEL command."); - return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - } - - if(unlikely(!parser->user.chart_rrdlabels_linked_temporarily)) { - RRDSET *st = pluginsd_get_scope_chart(parser); - parser->user.chart_rrdlabels_linked_temporarily = st->rrdlabels; - rrdlabels_unmark_all(parser->user.chart_rrdlabels_linked_temporarily); - } - - rrdlabels_add(parser->user.chart_rrdlabels_linked_temporarily, name, value, str2l(label_source)); - - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_clabel_commit(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CLABEL_COMMIT); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_CLABEL_COMMIT, PLUGINSD_KEYWORD_BEGIN); - if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - netdata_log_debug(D_PLUGINSD, "requested to commit chart labels"); - - if(!parser->user.chart_rrdlabels_linked_temporarily) { - netdata_log_error("PLUGINSD: 'host:%s' got CLABEL_COMMIT, without a CHART or BEGIN. Ignoring it.", rrdhost_hostname(host)); - return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - } - - rrdlabels_remove_all_unmarked(parser->user.chart_rrdlabels_linked_temporarily); - - rrdset_flag_set(st, RRDSET_FLAG_METADATA_UPDATE); - rrdhost_flag_set(st->rrdhost, RRDHOST_FLAG_METADATA_UPDATE); - rrdset_metadata_updated(st); - - parser->user.chart_rrdlabels_linked_temporarily = NULL; - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_begin_v2(char **words, size_t num_words, PARSER *parser) { - timing_init(); - - int idx = 1; - ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); - if(slot >= 0) idx++; - - char *id = get_word(words, num_words, idx++); - char *update_every_str = get_word(words, num_words, idx++); - char *end_time_str = get_word(words, num_words, idx++); - char *wall_clock_time_str = get_word(words, num_words, idx++); - - if(unlikely(!id || !update_every_str || !end_time_str || !wall_clock_time_str)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_BEGIN_V2, "missing parameters"); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_BEGIN_V2); - if(unlikely(!host)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - timing_step(TIMING_STEP_BEGIN2_PREPARE); - - RRDSET *st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_BEGIN_V2); - - if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_BEGIN_V2)) - return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE))) - rrdset_isnot_obsolete___safe_from_collector_thread(st); - - timing_step(TIMING_STEP_BEGIN2_FIND_CHART); - - // ------------------------------------------------------------------------ - // parse the parameters - - time_t update_every = (time_t) str2ull_encoded(update_every_str); - time_t end_time = (time_t) str2ull_encoded(end_time_str); - - time_t wall_clock_time; - if(likely(*wall_clock_time_str == '#')) - wall_clock_time = end_time; - else - wall_clock_time = (time_t) str2ull_encoded(wall_clock_time_str); - - if (unlikely(update_every != st->update_every)) - rrdset_set_update_every_s(st, update_every); - - timing_step(TIMING_STEP_BEGIN2_PARSE); - - // ------------------------------------------------------------------------ - // prepare our state - - pluginsd_lock_rrdset_data_collection(parser); - - parser->user.v2.update_every = update_every; - parser->user.v2.end_time = end_time; - parser->user.v2.wall_clock_time = wall_clock_time; - parser->user.v2.ml_locked = ml_chart_update_begin(st); - - timing_step(TIMING_STEP_BEGIN2_ML); - - // ------------------------------------------------------------------------ - // propagate it forward in v2 - - if(!parser->user.v2.stream_buffer.wb && rrdhost_has_rrdpush_sender_enabled(st->rrdhost)) - parser->user.v2.stream_buffer = rrdset_push_metric_initialize(parser->user.st, wall_clock_time); - - if(parser->user.v2.stream_buffer.v2 && parser->user.v2.stream_buffer.wb) { - // check receiver capabilities - bool can_copy = stream_has_capability(&parser->user, STREAM_CAP_IEEE754) == stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754); - - // check sender capabilities - bool with_slots = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_SLOTS) ? true : false; - NUMBER_ENCODING integer_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX; - - BUFFER *wb = parser->user.v2.stream_buffer.wb; - - buffer_need_bytes(wb, 1024); - - if(unlikely(parser->user.v2.stream_buffer.begin_v2_added)) - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1); - - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_BEGIN_V2, sizeof(PLUGINSD_KEYWORD_BEGIN_V2) - 1); - - if(with_slots) { - buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); - buffer_print_uint64_encoded(wb, integer_encoding, st->rrdpush.sender.chart_slot); - } - - buffer_fast_strcat(wb, " '", 2); - buffer_fast_strcat(wb, rrdset_id(st), string_strlen(st->id)); - buffer_fast_strcat(wb, "' ", 2); - - if(can_copy) - buffer_strcat(wb, update_every_str); - else - buffer_print_uint64_encoded(wb, integer_encoding, update_every); - - buffer_fast_strcat(wb, " ", 1); - - if(can_copy) - buffer_strcat(wb, end_time_str); - else - buffer_print_uint64_encoded(wb, integer_encoding, end_time); - - buffer_fast_strcat(wb, " ", 1); - - if(can_copy) - buffer_strcat(wb, wall_clock_time_str); - else - buffer_print_uint64_encoded(wb, integer_encoding, wall_clock_time); - - buffer_fast_strcat(wb, "\n", 1); - - parser->user.v2.stream_buffer.last_point_end_time_s = end_time; - parser->user.v2.stream_buffer.begin_v2_added = true; - } - - timing_step(TIMING_STEP_BEGIN2_PROPAGATE); - - // ------------------------------------------------------------------------ - // store it - - st->last_collected_time.tv_sec = end_time; - st->last_collected_time.tv_usec = 0; - st->last_updated.tv_sec = end_time; - st->last_updated.tv_usec = 0; - st->counter++; - st->counter_done++; - - // these are only needed for db mode RAM, ALLOC - st->db.current_entry++; - if(st->db.current_entry >= st->db.entries) - st->db.current_entry -= st->db.entries; - - timing_step(TIMING_STEP_BEGIN2_STORE); - - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_set_v2(char **words, size_t num_words, PARSER *parser) { - timing_init(); - - int idx = 1; - ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); - if(slot >= 0) idx++; - - char *dimension = get_word(words, num_words, idx++); - char *collected_str = get_word(words, num_words, idx++); - char *value_str = get_word(words, num_words, idx++); - char *flags_str = get_word(words, num_words, idx++); - - if(unlikely(!dimension || !collected_str || !value_str || !flags_str)) - return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_SET_V2, "missing parameters"); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_SET_V2); - if(unlikely(!host)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_SET_V2, PLUGINSD_KEYWORD_BEGIN_V2); - if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - timing_step(TIMING_STEP_SET2_PREPARE); - - RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_SET_V2); - if(unlikely(!rd)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - st->pluginsd.set = true; - - if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE | RRDDIM_FLAG_ARCHIVED))) - rrddim_isnot_obsolete___safe_from_collector_thread(st, rd); - - timing_step(TIMING_STEP_SET2_LOOKUP_DIMENSION); - - // ------------------------------------------------------------------------ - // parse the parameters - - collected_number collected_value = (collected_number) str2ll_encoded(collected_str); - - NETDATA_DOUBLE value; - if(*value_str == '#') - value = (NETDATA_DOUBLE)collected_value; - else - value = str2ndd_encoded(value_str, NULL); - - SN_FLAGS flags = pluginsd_parse_storage_number_flags(flags_str); - - timing_step(TIMING_STEP_SET2_PARSE); - - // ------------------------------------------------------------------------ - // check value and ML - - if (unlikely(!netdata_double_isnumber(value) || (flags == SN_EMPTY_SLOT))) { - value = NAN; - flags = SN_EMPTY_SLOT; - - if(parser->user.v2.ml_locked) - ml_dimension_is_anomalous(rd, parser->user.v2.end_time, 0, false); - } - else if(parser->user.v2.ml_locked) { - if (ml_dimension_is_anomalous(rd, parser->user.v2.end_time, value, true)) { - // clear anomaly bit: 0 -> is anomalous, 1 -> not anomalous - flags &= ~((storage_number) SN_FLAG_NOT_ANOMALOUS); - } - else - flags |= SN_FLAG_NOT_ANOMALOUS; - } - - timing_step(TIMING_STEP_SET2_ML); - - // ------------------------------------------------------------------------ - // propagate it forward in v2 - - if(parser->user.v2.stream_buffer.v2 && parser->user.v2.stream_buffer.begin_v2_added && parser->user.v2.stream_buffer.wb) { - // check if receiver and sender have the same number parsing capabilities - bool can_copy = stream_has_capability(&parser->user, STREAM_CAP_IEEE754) == stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754); - - // check the sender capabilities - bool with_slots = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_SLOTS) ? true : false; - NUMBER_ENCODING integer_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX; - NUMBER_ENCODING doubles_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_DECIMAL; - - BUFFER *wb = parser->user.v2.stream_buffer.wb; - buffer_need_bytes(wb, 1024); - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_SET_V2, sizeof(PLUGINSD_KEYWORD_SET_V2) - 1); - - if(with_slots) { - buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); - buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdpush.sender.dim_slot); - } - - buffer_fast_strcat(wb, " '", 2); - buffer_fast_strcat(wb, rrddim_id(rd), string_strlen(rd->id)); - buffer_fast_strcat(wb, "' ", 2); - if(can_copy) - buffer_strcat(wb, collected_str); - else - buffer_print_int64_encoded(wb, integer_encoding, collected_value); // original v2 had hex - buffer_fast_strcat(wb, " ", 1); - if(can_copy) - buffer_strcat(wb, value_str); - else - buffer_print_netdata_double_encoded(wb, doubles_encoding, value); // original v2 had decimal - buffer_fast_strcat(wb, " ", 1); - buffer_print_sn_flags(wb, flags, true); - buffer_fast_strcat(wb, "\n", 1); - } - - timing_step(TIMING_STEP_SET2_PROPAGATE); - - // ------------------------------------------------------------------------ - // store it - - rrddim_store_metric(rd, parser->user.v2.end_time * USEC_PER_SEC, value, flags); - rd->collector.last_collected_time.tv_sec = parser->user.v2.end_time; - rd->collector.last_collected_time.tv_usec = 0; - rd->collector.last_collected_value = collected_value; - rd->collector.last_stored_value = value; - rd->collector.last_calculated_value = value; - rd->collector.counter++; - rrddim_set_updated(rd); - - timing_step(TIMING_STEP_SET2_STORE); - - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_end_v2(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { - timing_init(); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_END_V2); - if(unlikely(!host)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_END_V2, PLUGINSD_KEYWORD_BEGIN_V2); - if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - parser->user.data_collections_count++; - - timing_step(TIMING_STEP_END2_PREPARE); - - // ------------------------------------------------------------------------ - // propagate the whole chart update in v1 - - if(unlikely(!parser->user.v2.stream_buffer.v2 && !parser->user.v2.stream_buffer.begin_v2_added && parser->user.v2.stream_buffer.wb)) - rrdset_push_metrics_v1(&parser->user.v2.stream_buffer, st); - - timing_step(TIMING_STEP_END2_PUSH_V1); - - // ------------------------------------------------------------------------ - // unblock data collection - - pluginsd_unlock_previous_scope_chart(parser, PLUGINSD_KEYWORD_END_V2, false); - rrdcontext_collected_rrdset(st); - store_metric_collection_completed(); - - timing_step(TIMING_STEP_END2_RRDSET); - - // ------------------------------------------------------------------------ - // propagate it forward - - rrdset_push_metrics_finished(&parser->user.v2.stream_buffer, st); - - timing_step(TIMING_STEP_END2_PROPAGATE); - - // ------------------------------------------------------------------------ - // cleanup RRDSET / RRDDIM - - if(likely(st->pluginsd.dims_with_slots)) { - for(size_t i = 0; i < st->pluginsd.size ;i++) { - RRDDIM *rd = st->pluginsd.prd_array[i].rd; - - if(!rd) - continue; - - rd->collector.calculated_value = 0; - rd->collector.collected_value = 0; - rrddim_clear_updated(rd); - } - } - else { - RRDDIM *rd; - rrddim_foreach_read(rd, st){ - rd->collector.calculated_value = 0; - rd->collector.collected_value = 0; - rrddim_clear_updated(rd); - } - rrddim_foreach_done(rd); - } - - // ------------------------------------------------------------------------ - // reset state - - parser->user.v2 = (struct parser_user_object_v2){ 0 }; - - timing_step(TIMING_STEP_END2_STORE); - timing_report(); - - return PARSER_RC_OK; -} - -static inline PARSER_RC pluginsd_exit(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) { - netdata_log_info("PLUGINSD: plugin called EXIT."); - return PARSER_RC_STOP; -} - -static inline PARSER_RC streaming_claimed_id(char **words, size_t num_words, PARSER *parser) -{ - const char *host_uuid_str = get_word(words, num_words, 1); - const char *claim_id_str = get_word(words, num_words, 2); - - if (!host_uuid_str || !claim_id_str) { - netdata_log_error("Command CLAIMED_ID came malformed, uuid = '%s', claim_id = '%s'", - host_uuid_str ? host_uuid_str : "[unset]", - claim_id_str ? claim_id_str : "[unset]"); - return PARSER_RC_ERROR; - } - - nd_uuid_t uuid; - RRDHOST *host = parser->user.host; - - // We don't need the parsed UUID - // just do it to check the format - if(uuid_parse(host_uuid_str, uuid)) { - netdata_log_error("1st parameter (host GUID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", host_uuid_str); - return PARSER_RC_ERROR; - } - if(uuid_parse(claim_id_str, uuid) && strcmp(claim_id_str, "NULL") != 0) { - netdata_log_error("2nd parameter (Claim ID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", claim_id_str); - return PARSER_RC_ERROR; - } - - if(strcmp(host_uuid_str, host->machine_guid) != 0) { - netdata_log_error("Claim ID is for host \"%s\" but it came over connection for \"%s\"", host_uuid_str, host->machine_guid); - return PARSER_RC_OK; //the message is OK problem must be somewhere else - } - - rrdhost_aclk_state_lock(host); - - if (host->aclk_state.claimed_id) - freez(host->aclk_state.claimed_id); - - host->aclk_state.claimed_id = strcmp(claim_id_str, "NULL") ? strdupz(claim_id_str) : NULL; - - rrdhost_aclk_state_unlock(host); - - rrdhost_flag_set(host, RRDHOST_FLAG_METADATA_CLAIMID |RRDHOST_FLAG_METADATA_UPDATE); - - rrdpush_send_claimed_id(host); - - return PARSER_RC_OK; -} - -// ---------------------------------------------------------------------------- - -void pluginsd_cleanup_v2(PARSER *parser) { - // this is called when the thread is stopped while processing - pluginsd_clear_scope_chart(parser, "THREAD CLEANUP"); -} - -void pluginsd_process_thread_cleanup(void *pptr) { - PARSER *parser = CLEANUP_FUNCTION_GET_PTR(pptr); - if(!parser) return; - - pluginsd_cleanup_v2(parser); - pluginsd_host_define_cleanup(parser); - - rrd_collector_finished(); - -#ifdef NETDATA_LOG_STREAM_RECEIVE - if(parser->user.stream_log_fp) { - fclose(parser->user.stream_log_fp); - parser->user.stream_log_fp = NULL; - } -#endif - - parser_destroy(parser); -} - -bool parser_reconstruct_node(BUFFER *wb, void *ptr) { - PARSER *parser = ptr; - if(!parser || !parser->user.host) - return false; - - buffer_strcat(wb, rrdhost_hostname(parser->user.host)); - return true; -} - -bool parser_reconstruct_instance(BUFFER *wb, void *ptr) { - PARSER *parser = ptr; - if(!parser || !parser->user.st) - return false; - - buffer_strcat(wb, rrdset_name(parser->user.st)); - return true; -} - -bool parser_reconstruct_context(BUFFER *wb, void *ptr) { - PARSER *parser = ptr; - if(!parser || !parser->user.st) - return false; - - buffer_strcat(wb, string2str(parser->user.st->context)); - return true; -} - -inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugin_input, FILE *fp_plugin_output, int trust_durations) -{ - int enabled = cd->unsafe.enabled; - - if (!fp_plugin_input || !fp_plugin_output || !enabled) { - cd->unsafe.enabled = 0; - return 0; - } - - if (unlikely(fileno(fp_plugin_input) == -1)) { - netdata_log_error("input file descriptor given is not a valid stream"); - cd->serial_failures++; - return 0; - } - - if (unlikely(fileno(fp_plugin_output) == -1)) { - netdata_log_error("output file descriptor given is not a valid stream"); - cd->serial_failures++; - return 0; - } - - clearerr(fp_plugin_input); - clearerr(fp_plugin_output); - - PARSER *parser; - { - PARSER_USER_OBJECT user = { - .enabled = cd->unsafe.enabled, - .host = host, - .cd = cd, - .trust_durations = trust_durations - }; - - // fp_plugin_output = our input; fp_plugin_input = our output - parser = parser_init(&user, fp_plugin_output, fp_plugin_input, -1, PARSER_INPUT_SPLIT, NULL); - } - - pluginsd_keywords_init(parser, PARSER_INIT_PLUGINSD); - - rrd_collector_started(); - - size_t count = 0; - - ND_LOG_STACK lgs[] = { - ND_LOG_FIELD_CB(NDF_REQUEST, line_splitter_reconstruct_line, &parser->line), - ND_LOG_FIELD_CB(NDF_NIDL_NODE, parser_reconstruct_node, parser), - ND_LOG_FIELD_CB(NDF_NIDL_INSTANCE, parser_reconstruct_instance, parser), - ND_LOG_FIELD_CB(NDF_NIDL_CONTEXT, parser_reconstruct_context, parser), - ND_LOG_FIELD_END(), - }; - ND_LOG_STACK_PUSH(lgs); - - CLEANUP_FUNCTION_REGISTER(pluginsd_process_thread_cleanup) cleanup_parser = parser; - buffered_reader_init(&parser->reader); - CLEAN_BUFFER *buffer = buffer_create(sizeof(parser->reader.read_buffer) + 2, NULL); - while(likely(service_running(SERVICE_COLLECTORS))) { - - if(unlikely(!buffered_reader_next_line(&parser->reader, buffer))) { - buffered_reader_ret_t ret = buffered_reader_read_timeout( - &parser->reader, - fileno((FILE *) parser->fp_input), - 2 * 60 * MSEC_PER_SEC, true - ); - - if(unlikely(ret != BUFFERED_READER_READ_OK)) - break; - - continue; - } - - if(unlikely(parser_action(parser, buffer->buffer))) - break; - - buffer->len = 0; - buffer->buffer[0] = '\0'; - } - - cd->unsafe.enabled = parser->user.enabled; - count = parser->user.data_collections_count; - - if(likely(count)) { - cd->successful_collections += count; - cd->serial_failures = 0; - } - else - cd->serial_failures++; - - return count; -} - -#include "gperf-hashtable.h" - -PARSER_RC parser_execute(PARSER *parser, const PARSER_KEYWORD *keyword, char **words, size_t num_words) { - switch(keyword->id) { - case PLUGINSD_KEYWORD_ID_SET2: - return pluginsd_set_v2(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_BEGIN2: - return pluginsd_begin_v2(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_END2: - return pluginsd_end_v2(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_SET: - return pluginsd_set(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_BEGIN: - return pluginsd_begin(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_END: - return pluginsd_end(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_RSET: - return pluginsd_replay_set(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_RBEGIN: - return pluginsd_replay_begin(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_RDSTATE: - return pluginsd_replay_rrddim_collection_state(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_RSSTATE: - return pluginsd_replay_rrdset_collection_state(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_REND: - return pluginsd_replay_end(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_DIMENSION: - return pluginsd_dimension(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_CHART: - return pluginsd_chart(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END: - return pluginsd_chart_definition_end(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_CLABEL: - return pluginsd_clabel(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_CLABEL_COMMIT: - return pluginsd_clabel_commit(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_FUNCTION: - return pluginsd_function(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN: - return pluginsd_function_result_begin(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS: - return pluginsd_function_progress(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_LABEL: - return pluginsd_label(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_OVERWRITE: - return pluginsd_overwrite(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_VARIABLE: - return pluginsd_variable(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_CLAIMED_ID: - return streaming_claimed_id(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_HOST: - return pluginsd_host(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_HOST_DEFINE: - return pluginsd_host_define(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_HOST_DEFINE_END: - return pluginsd_host_define_end(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_HOST_LABEL: - return pluginsd_host_labels(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_FLUSH: - return pluginsd_flush(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_DISABLE: - return pluginsd_disable(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_EXIT: - return pluginsd_exit(words, num_words, parser); - case PLUGINSD_KEYWORD_ID_CONFIG: - return pluginsd_config(words, num_words, parser); - - case PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE: - case PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE: - case PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB: - case PLUGINSD_KEYWORD_ID_DYNCFG_RESET: - case PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS: - case PLUGINSD_KEYWORD_ID_DELETE_JOB: - return pluginsd_dyncfg_noop(words, num_words, parser); - - default: - netdata_log_error("Unknown keyword '%s' with id %zu", keyword->keyword, keyword->id); - return PARSER_RC_ERROR;; - } -} - -void parser_init_repertoire(PARSER *parser, PARSER_REPERTOIRE repertoire) { - parser->repertoire = repertoire; - - for(size_t i = GPERF_PARSER_MIN_HASH_VALUE ; i <= GPERF_PARSER_MAX_HASH_VALUE ;i++) { - if(gperf_keywords[i].keyword && *gperf_keywords[i].keyword && (parser->repertoire & gperf_keywords[i].repertoire)) - worker_register_job_name(gperf_keywords[i].worker_job_id, gperf_keywords[i].keyword); - } -} - -int pluginsd_parser_unittest(void) { - PARSER *p = parser_init(NULL, NULL, NULL, -1, PARSER_INPUT_SPLIT, NULL); - pluginsd_keywords_init(p, PARSER_INIT_PLUGINSD | PARSER_INIT_STREAMING); - - char *lines[] = { - "BEGIN2 abcdefghijklmnopqr 123", - "SET2 abcdefg 0x12345678 0 0", - "SET2 hijklmnoqr 0x12345678 0 0", - "SET2 stuvwxyz 0x12345678 0 0", - "END2", - NULL, - }; - - char *words[PLUGINSD_MAX_WORDS]; - size_t iterations = 1000000; - size_t count = 0; - char input[PLUGINSD_LINE_MAX + 1]; - - usec_t started = now_realtime_usec(); - while(--iterations) { - for(size_t line = 0; lines[line] ;line++) { - strncpyz(input, lines[line], PLUGINSD_LINE_MAX); - size_t num_words = quoted_strings_splitter_pluginsd(input, words, PLUGINSD_MAX_WORDS); - const char *command = get_word(words, num_words, 0); - const PARSER_KEYWORD *keyword = parser_find_keyword(p, command); - if(unlikely(!keyword)) - fatal("Cannot parse the line '%s'", lines[line]); - count++; - } - } - usec_t ended = now_realtime_usec(); - - netdata_log_info("Parsed %zu lines in %0.2f secs, %0.2f klines/sec", count, - (double)(ended - started) / (double)USEC_PER_SEC, - (double)count / ((double)(ended - started) / (double)USEC_PER_SEC) / 1000.0); - - parser_destroy(p); - return 0; -} diff --git a/src/collectors/plugins.d/pluginsd_parser.h b/src/collectors/plugins.d/pluginsd_parser.h deleted file mode 100644 index 6c126964b..000000000 --- a/src/collectors/plugins.d/pluginsd_parser.h +++ /dev/null @@ -1,244 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_PLUGINSD_PARSER_H -#define NETDATA_PLUGINSD_PARSER_H - -#include "daemon/common.h" - -#define WORKER_PARSER_FIRST_JOB 3 - -// this has to be in-sync with the same at receiver.c -#define WORKER_RECEIVER_JOB_REPLICATION_COMPLETION (WORKER_PARSER_FIRST_JOB - 3) - -// this controls the max response size of a function -#define PLUGINSD_MAX_DEFERRED_SIZE (100 * 1024 * 1024) - -#define PLUGINSD_MIN_RRDSET_POINTERS_CACHE 1024 - -#define HOST_LABEL_IS_EPHEMERAL "_is_ephemeral" -// PARSER return codes -typedef enum __attribute__ ((__packed__)) parser_rc { - PARSER_RC_OK, // Callback was successful, go on - PARSER_RC_STOP, // Callback says STOP - PARSER_RC_ERROR // Callback failed (abort rest of callbacks) -} PARSER_RC; - -typedef enum __attribute__ ((__packed__)) parser_input_type { - PARSER_INPUT_SPLIT = (1 << 1), - PARSER_DEFER_UNTIL_KEYWORD = (1 << 2), -} PARSER_INPUT_TYPE; - -typedef enum __attribute__ ((__packed__)) { - PARSER_INIT_PLUGINSD = (1 << 1), - PARSER_INIT_STREAMING = (1 << 2), - PARSER_REP_METADATA = (1 << 3), -} PARSER_REPERTOIRE; - -struct parser; -typedef PARSER_RC (*keyword_function)(char **words, size_t num_words, struct parser *parser); - -typedef struct parser_keyword { - char *keyword; - size_t id; - PARSER_REPERTOIRE repertoire; - size_t worker_job_id; -} PARSER_KEYWORD; - -typedef struct parser_user_object { - bool cleanup_slots; - RRDSET *st; - RRDHOST *host; - void *opaque; - struct plugind *cd; - int trust_durations; - RRDLABELS *new_host_labels; - RRDLABELS *chart_rrdlabels_linked_temporarily; - size_t data_collections_count; - int enabled; - -#ifdef NETDATA_LOG_STREAM_RECEIVE - FILE *stream_log_fp; - PARSER_REPERTOIRE stream_log_repertoire; -#endif - - STREAM_CAPABILITIES capabilities; // receiver capabilities - - struct { - bool parsing_host; - nd_uuid_t machine_guid; - char machine_guid_str[UUID_STR_LEN]; - STRING *hostname; - RRDLABELS *rrdlabels; - } host_define; - - struct parser_user_object_replay { - time_t start_time; - time_t end_time; - - usec_t start_time_ut; - usec_t end_time_ut; - - time_t wall_clock_time; - - bool rset_enabled; - } replay; - - struct parser_user_object_v2 { - bool locked_data_collection; - RRDSET_STREAM_BUFFER stream_buffer; // sender capabilities in this - time_t update_every; - time_t end_time; - time_t wall_clock_time; - bool ml_locked; - } v2; -} PARSER_USER_OBJECT; - -typedef struct parser { - uint8_t version; // Parser version - PARSER_REPERTOIRE repertoire; - uint32_t flags; - int fd; // Socket - FILE *fp_input; // Input source e.g. stream - FILE *fp_output; // Stream to send commands to plugin - -#ifdef ENABLE_HTTPS - NETDATA_SSL *ssl_output; -#endif -#ifdef ENABLE_H2O - void *h2o_ctx; // if set we use h2o_stream functions to send data -#endif - - PARSER_USER_OBJECT user; // User defined structure to hold extra state between calls - - struct buffered_reader reader; - struct line_splitter line; - const PARSER_KEYWORD *keyword; - - struct { - const char *end_keyword; - BUFFER *response; - void (*action)(struct parser *parser, void *action_data); - void *action_data; - } defer; - - struct { - DICTIONARY *functions; - usec_t smaller_monotonic_timeout_ut; - } inflight; - - struct { - SPINLOCK spinlock; - } writer; - -} PARSER; - -PARSER *parser_init(struct parser_user_object *user, FILE *fp_input, FILE *fp_output, int fd, PARSER_INPUT_TYPE flags, void *ssl); -void parser_init_repertoire(PARSER *parser, PARSER_REPERTOIRE repertoire); -void parser_destroy(PARSER *working_parser); -void pluginsd_cleanup_v2(PARSER *parser); -void pluginsd_keywords_init(PARSER *parser, PARSER_REPERTOIRE repertoire); -PARSER_RC parser_execute(PARSER *parser, const PARSER_KEYWORD *keyword, char **words, size_t num_words); - -static inline int find_first_keyword(const char *src, char *dst, int dst_size, bool *isspace_map) { - const char *s = src, *keyword_start; - - while (unlikely(isspace_map[(uint8_t)*s])) s++; - keyword_start = s; - - while (likely(*s && !isspace_map[(uint8_t)*s]) && dst_size > 1) { - *dst++ = *s++; - dst_size--; - } - *dst = '\0'; - return dst_size == 0 ? 0 : (int) (s - keyword_start); -} - -const PARSER_KEYWORD *gperf_lookup_keyword(register const char *str, register size_t len); - -static inline const PARSER_KEYWORD *parser_find_keyword(PARSER *parser, const char *command) { - const PARSER_KEYWORD *t = gperf_lookup_keyword(command, strlen(command)); - if(t && (t->repertoire & parser->repertoire)) - return t; - - return NULL; -} - -bool parser_reconstruct_node(BUFFER *wb, void *ptr); -bool parser_reconstruct_instance(BUFFER *wb, void *ptr); -bool parser_reconstruct_context(BUFFER *wb, void *ptr); - -static inline int parser_action(PARSER *parser, char *input) { -#ifdef NETDATA_LOG_STREAM_RECEIVE - static __thread char line[PLUGINSD_LINE_MAX + 1]; - strncpyz(line, input, sizeof(line) - 1); -#endif - - parser->line.count++; - - if(unlikely(parser->flags & PARSER_DEFER_UNTIL_KEYWORD)) { - char command[100 + 1]; - bool has_keyword = find_first_keyword(input, command, 100, isspace_map_pluginsd); - - if(!has_keyword || strcmp(command, parser->defer.end_keyword) != 0) { - if(parser->defer.response) { - buffer_strcat(parser->defer.response, input); - if(buffer_strlen(parser->defer.response) > PLUGINSD_MAX_DEFERRED_SIZE) { - // more than PLUGINSD_MAX_DEFERRED_SIZE of data, - // or a bad plugin that did not send the end_keyword - nd_log(NDLS_DAEMON, NDLP_ERR, "PLUGINSD: deferred response is too big (%zu bytes). Stopping this plugin.", buffer_strlen(parser->defer.response)); - return 1; - } - } - return 0; - } - else { - // call the action - parser->defer.action(parser, parser->defer.action_data); - - // empty everything - parser->defer.action = NULL; - parser->defer.action_data = NULL; - parser->defer.end_keyword = NULL; - parser->defer.response = NULL; - parser->flags &= ~PARSER_DEFER_UNTIL_KEYWORD; - } - return 0; - } - - parser->line.num_words = quoted_strings_splitter_pluginsd(input, parser->line.words, PLUGINSD_MAX_WORDS); - const char *command = get_word(parser->line.words, parser->line.num_words, 0); - - if(unlikely(!command)) { - line_splitter_reset(&parser->line); - return 0; - } - - PARSER_RC rc; - parser->keyword = parser_find_keyword(parser, command); - if(likely(parser->keyword)) { - worker_is_busy(parser->keyword->worker_job_id); - -#ifdef NETDATA_LOG_STREAM_RECEIVE - if(parser->user.stream_log_fp && parser->keyword->repertoire & parser->user.stream_log_repertoire) - fprintf(parser->user.stream_log_fp, "%s", line); -#endif - - rc = parser_execute(parser, parser->keyword, parser->line.words, parser->line.num_words); - // rc = (*t->func)(words, num_words, parser); - worker_is_idle(); - } - else - rc = PARSER_RC_ERROR; - - if(rc == PARSER_RC_ERROR) { - CLEAN_BUFFER *wb = buffer_create(1024, NULL); - line_splitter_reconstruct_line(wb, &parser->line); - netdata_log_error("PLUGINSD: parser_action('%s') failed on line %zu: { %s } (quotes added to show parsing)", - command, parser->line.count, buffer_tostring(wb)); - } - - line_splitter_reset(&parser->line); - return (rc == PARSER_RC_ERROR || rc == PARSER_RC_STOP); -} - -#endif //NETDATA_PLUGINSD_PARSER_H diff --git a/src/collectors/plugins.d/pluginsd_replication.c b/src/collectors/plugins.d/pluginsd_replication.c deleted file mode 100644 index 8d0975210..000000000 --- a/src/collectors/plugins.d/pluginsd_replication.c +++ /dev/null @@ -1,371 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "pluginsd_replication.h" - -PARSER_RC pluginsd_replay_begin(char **words, size_t num_words, PARSER *parser) { - int idx = 1; - ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); - if(slot >= 0) idx++; - - char *id = get_word(words, num_words, idx++); - char *start_time_str = get_word(words, num_words, idx++); - char *end_time_str = get_word(words, num_words, idx++); - char *child_now_str = get_word(words, num_words, idx++); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_BEGIN); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st; - if (likely(!id || !*id)) - st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_BEGIN, PLUGINSD_KEYWORD_REPLAY_BEGIN); - else - st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_REPLAY_BEGIN); - - if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_REPLAY_BEGIN)) - return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - if(start_time_str && end_time_str) { - time_t start_time = (time_t) str2ull_encoded(start_time_str); - time_t end_time = (time_t) str2ull_encoded(end_time_str); - - time_t wall_clock_time = 0, tolerance; - bool wall_clock_comes_from_child; (void)wall_clock_comes_from_child; - if(child_now_str) { - wall_clock_time = (time_t) str2ull_encoded(child_now_str); - tolerance = st->update_every + 1; - wall_clock_comes_from_child = true; - } - - if(wall_clock_time <= 0) { - wall_clock_time = now_realtime_sec(); - tolerance = st->update_every + 5; - wall_clock_comes_from_child = false; - } - -#ifdef NETDATA_LOG_REPLICATION_REQUESTS - internal_error( - (!st->replay.start_streaming && (end_time < st->replay.after || start_time > st->replay.before)), - "REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN " from %ld to %ld, which does not match our request (%ld to %ld).", - rrdhost_hostname(st->rrdhost), rrdset_id(st), start_time, end_time, st->replay.after, st->replay.before); - - internal_error( - true, - "REPLAY: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN " from %ld to %ld, child wall clock is %ld (%s), had requested %ld to %ld", - rrdhost_hostname(st->rrdhost), rrdset_id(st), - start_time, end_time, wall_clock_time, wall_clock_comes_from_child ? "from child" : "parent time", - st->replay.after, st->replay.before); -#endif - - if(start_time && end_time && start_time < wall_clock_time + tolerance && end_time < wall_clock_time + tolerance && start_time < end_time) { - if (unlikely(end_time - start_time != st->update_every)) - rrdset_set_update_every_s(st, end_time - start_time); - - st->last_collected_time.tv_sec = end_time; - st->last_collected_time.tv_usec = 0; - - st->last_updated.tv_sec = end_time; - st->last_updated.tv_usec = 0; - - st->counter++; - st->counter_done++; - - // these are only needed for db mode RAM, ALLOC - st->db.current_entry++; - if(st->db.current_entry >= st->db.entries) - st->db.current_entry -= st->db.entries; - - parser->user.replay.start_time = start_time; - parser->user.replay.end_time = end_time; - parser->user.replay.start_time_ut = (usec_t) start_time * USEC_PER_SEC; - parser->user.replay.end_time_ut = (usec_t) end_time * USEC_PER_SEC; - parser->user.replay.wall_clock_time = wall_clock_time; - parser->user.replay.rset_enabled = true; - - return PARSER_RC_OK; - } - - netdata_log_error("PLUGINSD REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN - " from %ld to %ld, but timestamps are invalid " - "(now is %ld [%s], tolerance %ld). Ignoring " PLUGINSD_KEYWORD_REPLAY_SET, - rrdhost_hostname(st->rrdhost), rrdset_id(st), start_time, end_time, - wall_clock_time, wall_clock_comes_from_child ? "child wall clock" : "parent wall clock", - tolerance); - } - - // the child sends an RBEGIN without any parameters initially - // setting rset_enabled to false, means the RSET should not store any metrics - // to store metrics, the RBEGIN needs to have timestamps - parser->user.replay.start_time = 0; - parser->user.replay.end_time = 0; - parser->user.replay.start_time_ut = 0; - parser->user.replay.end_time_ut = 0; - parser->user.replay.wall_clock_time = 0; - parser->user.replay.rset_enabled = false; - return PARSER_RC_OK; -} - -PARSER_RC pluginsd_replay_set(char **words, size_t num_words, PARSER *parser) { - int idx = 1; - ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); - if(slot >= 0) idx++; - - char *dimension = get_word(words, num_words, idx++); - char *value_str = get_word(words, num_words, idx++); - char *flags_str = get_word(words, num_words, idx++); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_SET); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_SET, PLUGINSD_KEYWORD_REPLAY_BEGIN); - if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - if(!parser->user.replay.rset_enabled) { - nd_log_limit_static_thread_var(erl, 1, 0); - nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_ERR, - "PLUGINSD: 'host:%s/chart:%s' got a %s but it is disabled by %s errors", - rrdhost_hostname(host), rrdset_id(st), PLUGINSD_KEYWORD_REPLAY_SET, PLUGINSD_KEYWORD_REPLAY_BEGIN); - - // we have to return OK here - return PARSER_RC_OK; - } - - RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_REPLAY_SET); - if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - st->pluginsd.set = true; - - if (unlikely(!parser->user.replay.start_time || !parser->user.replay.end_time)) { - netdata_log_error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s with invalid timestamps %ld to %ld from a %s. Disabling it.", - rrdhost_hostname(host), - rrdset_id(st), - dimension, - PLUGINSD_KEYWORD_REPLAY_SET, - parser->user.replay.start_time, - parser->user.replay.end_time, - PLUGINSD_KEYWORD_REPLAY_BEGIN); - return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - } - - if (unlikely(!value_str || !*value_str)) - value_str = "NAN"; - - if(unlikely(!flags_str)) - flags_str = ""; - - if (likely(value_str)) { - RRDDIM_FLAGS rd_flags = rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE | RRDDIM_FLAG_ARCHIVED); - - if(!(rd_flags & RRDDIM_FLAG_ARCHIVED)) { - NETDATA_DOUBLE value = str2ndd_encoded(value_str, NULL); - SN_FLAGS flags = pluginsd_parse_storage_number_flags(flags_str); - - if (!netdata_double_isnumber(value) || (flags == SN_EMPTY_SLOT)) { - value = NAN; - flags = SN_EMPTY_SLOT; - } - - rrddim_store_metric(rd, parser->user.replay.end_time_ut, value, flags); - rd->collector.last_collected_time.tv_sec = parser->user.replay.end_time; - rd->collector.last_collected_time.tv_usec = 0; - rd->collector.counter++; - } - else { - nd_log_limit_static_global_var(erl, 1, 0); - nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_WARNING, - "PLUGINSD: 'host:%s/chart:%s/dim:%s' has the ARCHIVED flag set, but it is replicated. " - "Ignoring data.", - rrdhost_hostname(st->rrdhost), rrdset_id(st), rrddim_name(rd)); - } - } - - return PARSER_RC_OK; -} - -PARSER_RC pluginsd_replay_rrddim_collection_state(char **words, size_t num_words, PARSER *parser) { - if(parser->user.replay.rset_enabled == false) - return PARSER_RC_OK; - - int idx = 1; - ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); - if(slot >= 0) idx++; - - char *dimension = get_word(words, num_words, idx++); - char *last_collected_ut_str = get_word(words, num_words, idx++); - char *last_collected_value_str = get_word(words, num_words, idx++); - char *last_calculated_value_str = get_word(words, num_words, idx++); - char *last_stored_value_str = get_word(words, num_words, idx++); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE, PLUGINSD_KEYWORD_REPLAY_BEGIN); - if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - if(st->pluginsd.set) { - // reset pos to reuse the same RDAs - st->pluginsd.pos = 0; - st->pluginsd.set = false; - } - - RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE); - if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - usec_t dim_last_collected_ut = (usec_t)rd->collector.last_collected_time.tv_sec * USEC_PER_SEC + (usec_t)rd->collector.last_collected_time.tv_usec; - usec_t last_collected_ut = last_collected_ut_str ? str2ull_encoded(last_collected_ut_str) : 0; - if(last_collected_ut > dim_last_collected_ut) { - rd->collector.last_collected_time.tv_sec = (time_t)(last_collected_ut / USEC_PER_SEC); - rd->collector.last_collected_time.tv_usec = (last_collected_ut % USEC_PER_SEC); - } - - rd->collector.last_collected_value = last_collected_value_str ? str2ll_encoded(last_collected_value_str) : 0; - rd->collector.last_calculated_value = last_calculated_value_str ? str2ndd_encoded(last_calculated_value_str, NULL) : 0; - rd->collector.last_stored_value = last_stored_value_str ? str2ndd_encoded(last_stored_value_str, NULL) : 0.0; - - return PARSER_RC_OK; -} - -PARSER_RC pluginsd_replay_rrdset_collection_state(char **words, size_t num_words, PARSER *parser) { - if(parser->user.replay.rset_enabled == false) - return PARSER_RC_OK; - - char *last_collected_ut_str = get_word(words, num_words, 1); - char *last_updated_ut_str = get_word(words, num_words, 2); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_RRDSET_STATE); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_RRDSET_STATE, - PLUGINSD_KEYWORD_REPLAY_BEGIN); - if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - usec_t chart_last_collected_ut = (usec_t)st->last_collected_time.tv_sec * USEC_PER_SEC + (usec_t)st->last_collected_time.tv_usec; - usec_t last_collected_ut = last_collected_ut_str ? str2ull_encoded(last_collected_ut_str) : 0; - if(last_collected_ut > chart_last_collected_ut) { - st->last_collected_time.tv_sec = (time_t)(last_collected_ut / USEC_PER_SEC); - st->last_collected_time.tv_usec = (last_collected_ut % USEC_PER_SEC); - } - - usec_t chart_last_updated_ut = (usec_t)st->last_updated.tv_sec * USEC_PER_SEC + (usec_t)st->last_updated.tv_usec; - usec_t last_updated_ut = last_updated_ut_str ? str2ull_encoded(last_updated_ut_str) : 0; - if(last_updated_ut > chart_last_updated_ut) { - st->last_updated.tv_sec = (time_t)(last_updated_ut / USEC_PER_SEC); - st->last_updated.tv_usec = (last_updated_ut % USEC_PER_SEC); - } - - st->counter++; - st->counter_done++; - - return PARSER_RC_OK; -} - -PARSER_RC pluginsd_replay_end(char **words, size_t num_words, PARSER *parser) { - if (num_words < 7) { // accepts 7, but the 7th is optional - netdata_log_error("REPLAY: malformed " PLUGINSD_KEYWORD_REPLAY_END " command"); - return PARSER_RC_ERROR; - } - - const char *update_every_child_txt = get_word(words, num_words, 1); - const char *first_entry_child_txt = get_word(words, num_words, 2); - const char *last_entry_child_txt = get_word(words, num_words, 3); - const char *start_streaming_txt = get_word(words, num_words, 4); - const char *first_entry_requested_txt = get_word(words, num_words, 5); - const char *last_entry_requested_txt = get_word(words, num_words, 6); - const char *child_world_time_txt = get_word(words, num_words, 7); // optional - - time_t update_every_child = (time_t) str2ull_encoded(update_every_child_txt); - time_t first_entry_child = (time_t) str2ull_encoded(first_entry_child_txt); - time_t last_entry_child = (time_t) str2ull_encoded(last_entry_child_txt); - - bool start_streaming = (strcmp(start_streaming_txt, "true") == 0); - time_t first_entry_requested = (time_t) str2ull_encoded(first_entry_requested_txt); - time_t last_entry_requested = (time_t) str2ull_encoded(last_entry_requested_txt); - - // the optional child world time - time_t child_world_time = (child_world_time_txt && *child_world_time_txt) ? (time_t) str2ull_encoded( - child_world_time_txt) : now_realtime_sec(); - - RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_END); - if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - - RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_END, PLUGINSD_KEYWORD_REPLAY_BEGIN); - if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); - -#ifdef NETDATA_LOG_REPLICATION_REQUESTS - internal_error(true, - "PLUGINSD REPLAY: 'host:%s/chart:%s': got a " PLUGINSD_KEYWORD_REPLAY_END " child db from %llu to %llu, start_streaming %s, had requested from %llu to %llu, wall clock %llu", - rrdhost_hostname(host), rrdset_id(st), - (unsigned long long)first_entry_child, (unsigned long long)last_entry_child, - start_streaming?"true":"false", - (unsigned long long)first_entry_requested, (unsigned long long)last_entry_requested, - (unsigned long long)child_world_time - ); -#endif - - parser->user.data_collections_count++; - - if(parser->user.replay.rset_enabled && st->rrdhost->receiver) { - time_t now = now_realtime_sec(); - time_t started = st->rrdhost->receiver->replication_first_time_t; - time_t current = parser->user.replay.end_time; - - if(started && current > started) { - host->rrdpush_receiver_replication_percent = (NETDATA_DOUBLE) (current - started) * 100.0 / (NETDATA_DOUBLE) (now - started); - worker_set_metric(WORKER_RECEIVER_JOB_REPLICATION_COMPLETION, - host->rrdpush_receiver_replication_percent); - } - } - - parser->user.replay.start_time = 0; - parser->user.replay.end_time = 0; - parser->user.replay.start_time_ut = 0; - parser->user.replay.end_time_ut = 0; - parser->user.replay.wall_clock_time = 0; - parser->user.replay.rset_enabled = false; - - st->counter++; - st->counter_done++; - store_metric_collection_completed(); - -#ifdef NETDATA_LOG_REPLICATION_REQUESTS - st->replay.start_streaming = false; - st->replay.after = 0; - st->replay.before = 0; - if(start_streaming) - st->replay.log_next_data_collection = true; -#endif - - if (start_streaming) { - if (st->update_every != update_every_child) - rrdset_set_update_every_s(st, update_every_child); - - if(rrdset_flag_check(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS)) { - rrdset_flag_set(st, RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED); - rrdset_flag_clear(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS); - rrdset_flag_clear(st, RRDSET_FLAG_SYNC_CLOCK); - rrdhost_receiver_replicating_charts_minus_one(st->rrdhost); - } -#ifdef NETDATA_LOG_REPLICATION_REQUESTS - else - internal_error(true, "REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_END " with enable_streaming = true, but there is no replication in progress for this chart.", - rrdhost_hostname(host), rrdset_id(st)); -#endif - - pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_END); - - host->rrdpush_receiver_replication_percent = 100.0; - worker_set_metric(WORKER_RECEIVER_JOB_REPLICATION_COMPLETION, host->rrdpush_receiver_replication_percent); - - return PARSER_RC_OK; - } - - pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_END); - - rrdcontext_updated_retention_rrdset(st); - - bool ok = replicate_chart_request(send_to_plugin, parser, host, st, - first_entry_child, last_entry_child, child_world_time, - first_entry_requested, last_entry_requested); - return ok ? PARSER_RC_OK : PARSER_RC_ERROR; -} diff --git a/src/collectors/plugins.d/pluginsd_replication.h b/src/collectors/plugins.d/pluginsd_replication.h deleted file mode 100644 index 1c6f617e6..000000000 --- a/src/collectors/plugins.d/pluginsd_replication.h +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_PLUGINSD_REPLICATION_H -#define NETDATA_PLUGINSD_REPLICATION_H - -#include "pluginsd_internals.h" - -PARSER_RC pluginsd_replay_begin(char **words, size_t num_words, PARSER *parser); -PARSER_RC pluginsd_replay_set(char **words, size_t num_words, PARSER *parser); -PARSER_RC pluginsd_replay_rrddim_collection_state(char **words, size_t num_words, PARSER *parser); -PARSER_RC pluginsd_replay_rrdset_collection_state(char **words, size_t num_words, PARSER *parser); -PARSER_RC pluginsd_replay_end(char **words, size_t num_words, PARSER *parser); - -#endif //NETDATA_PLUGINSD_REPLICATION_H diff --git a/src/collectors/proc.plugin/README.md b/src/collectors/proc.plugin/README.md index 79bfd8645..8523309c7 100644 --- a/src/collectors/proc.plugin/README.md +++ b/src/collectors/proc.plugin/README.md @@ -6,35 +6,35 @@ This plugin is not an external plugin, but one of Netdata's threads. In detail, it collects metrics from: -- `/proc/net/dev` (all network interfaces for all their values) -- `/proc/diskstats` (all disks for all their values) -- `/proc/mdstat` (status of RAID arrays) -- `/proc/net/snmp` (total IPv4, TCP and UDP usage) -- `/proc/net/snmp6` (total IPv6 usage) -- `/proc/net/netstat` (more IPv4 usage) -- `/proc/net/wireless` (wireless extension) -- `/proc/net/stat/nf_conntrack` (connection tracking performance) -- `/proc/net/stat/synproxy` (synproxy performance) -- `/proc/net/ip_vs/stats` (IPVS connection statistics) -- `/proc/stat` (CPU utilization and attributes) -- `/proc/meminfo` (memory information) -- `/proc/vmstat` (system performance) -- `/proc/net/rpc/nfsd` (NFS server statistics for both v3 and v4 NFS servers) -- `/sys/fs/cgroup` (Control Groups - Linux Containers) -- `/proc/self/mountinfo` (mount points) -- `/proc/interrupts` (total and per core hardware interrupts) -- `/proc/softirqs` (total and per core software interrupts) -- `/proc/loadavg` (system load and total processes running) -- `/proc/pressure/{cpu,memory,io}` (pressure stall information) -- `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography) -- `/proc/spl/kstat/zfs/arcstats` (status of ZFS adaptive replacement cache) -- `/proc/spl/kstat/zfs/pool/state` (state of ZFS pools) -- `/sys/class/power_supply` (power supply properties) -- `/sys/class/infiniband` (infiniband interconnect) -- `/sys/class/drm` (AMD GPUs) -- `ipc` (IPC semaphores and message queues) -- `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`). -- `netdata` (internal Netdata resources utilization) +- `/proc/net/dev` (all network interfaces for all their values) +- `/proc/diskstats` (all disks for all their values) +- `/proc/mdstat` (status of RAID arrays) +- `/proc/net/snmp` (total IPv4, TCP and UDP usage) +- `/proc/net/snmp6` (total IPv6 usage) +- `/proc/net/netstat` (more IPv4 usage) +- `/proc/net/wireless` (wireless extension) +- `/proc/net/stat/nf_conntrack` (connection tracking performance) +- `/proc/net/stat/synproxy` (synproxy performance) +- `/proc/net/ip_vs/stats` (IPVS connection statistics) +- `/proc/stat` (CPU utilization and attributes) +- `/proc/meminfo` (memory information) +- `/proc/vmstat` (system performance) +- `/proc/net/rpc/nfsd` (NFS server statistics for both v3 and v4 NFS servers) +- `/sys/fs/cgroup` (Control Groups - Linux Containers) +- `/proc/self/mountinfo` (mount points) +- `/proc/interrupts` (total and per core hardware interrupts) +- `/proc/softirqs` (total and per core software interrupts) +- `/proc/loadavg` (system load and total processes running) +- `/proc/pressure/{cpu,memory,io}` (pressure stall information) +- `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography) +- `/proc/spl/kstat/zfs/arcstats` (status of ZFS adaptive replacement cache) +- `/proc/spl/kstat/zfs/pool/state` (state of ZFS pools) +- `/sys/class/power_supply` (power supply properties) +- `/sys/class/infiniband` (infiniband interconnect) +- `/sys/class/drm` (AMD GPUs) +- `ipc` (IPC semaphores and message queues) +- `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`). +- `netdata` (internal Netdata resources utilization) - - - @@ -48,47 +48,47 @@ Hopefully, the Linux kernel provides many metrics that can provide deep insights ### Monitored disk metrics -- **I/O bandwidth/s (kb/s)** +- **I/O bandwidth/s (kb/s)** The amount of data transferred from and to the disk. -- **Amount of discarded data (kb/s)** -- **I/O operations/s** +- **Amount of discarded data (kb/s)** +- **I/O operations/s** The number of I/O operations completed. -- **Extended I/O operations/s** +- **Extended I/O operations/s** The number of extended I/O operations completed. -- **Queued I/O operations** +- **Queued I/O operations** The number of currently queued I/O operations. For traditional disks that execute commands one after another, one of them is being run by the disk and the rest are just waiting in a queue. -- **Backlog size (time in ms)** +- **Backlog size (time in ms)** The expected duration of the currently queued I/O operations. -- **Utilization (time percentage)** +- **Utilization (time percentage)** The percentage of time the disk was busy with something. This is a very interesting metric, since for most disks, that execute commands sequentially, **this is the key indication of congestion**. A sequential disk that is 100% of the available time busy, has no time to do anything more, so even if the bandwidth or the number of operations executed by the disk is low, its capacity has been reached. Of course, for newer disk technologies (like fusion cards) that are capable to execute multiple commands in parallel, this metric is just meaningless. -- **Average I/O operation time (ms)** +- **Average I/O operation time (ms)** The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them. -- **Average I/O operation time for extended operations (ms)** +- **Average I/O operation time for extended operations (ms)** The average time for extended I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them. -- **Average I/O operation size (kb)** +- **Average I/O operation size (kb)** The average amount of data of the completed I/O operations. -- **Average amount of discarded data (kb)** +- **Average amount of discarded data (kb)** The average amount of data of the completed discard operations. -- **Average Service Time (ms)** +- **Average Service Time (ms)** The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading. -- **Average Service Time for extended I/O operations (ms)** +- **Average Service Time for extended I/O operations (ms)** The average service time for completed extended I/O operations. -- **Merged I/O operations/s** +- **Merged I/O operations/s** The Linux kernel is capable of merging I/O operations. So, if two requests to read data from the disk are adjacent, the Linux kernel may merge them to one before giving them to disk. This metric measures the number of operations that have been merged by the Linux kernel. -- **Merged discard operations/s** -- **Total I/O time** +- **Merged discard operations/s** +- **Total I/O time** The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute multiple I/O operations in parallel. -- **Space usage** +- **Space usage** For mounted disks, Netdata will provide a chart for their space, with 3 dimensions: - 1. free - 2. used - 3. reserved for root -- **inode usage** + 1. free + 2. used + 3. reserved for root +- **inode usage** For mounted disks, Netdata will provide a chart for their inodes (number of file and directories), with 3 dimensions: - 1. free - 2. used - 3. reserved for root + 1. free + 2. used + 3. reserved for root ### disk names @@ -100,9 +100,9 @@ By default, Netdata will enable monitoring metrics only when they are not zero. Netdata categorizes all block devices in 3 categories: -1. physical disks (i.e. block devices that do not have child devices and are not partitions) -2. virtual disks (i.e. block devices that have child devices - like RAID devices) -3. disk partitions (i.e. block devices that are part of a physical disk) +1. physical disks (i.e. block devices that do not have child devices and are not partitions) +2. virtual disks (i.e. block devices that have child devices - like RAID devices) +3. disk partitions (i.e. block devices that are part of a physical disk) Performance metrics are enabled by default for all disk devices, except partitions and not-mounted virtual disks. Of course, you can enable/disable monitoring any block device by editing the Netdata configuration file. @@ -118,7 +118,7 @@ mv netdata.conf.new netdata.conf Then edit `netdata.conf` and find the following section. This is the basic plugin configuration. -``` +```text [plugin:proc:/proc/diskstats] # enable new disks detected at runtime = yes # performance metrics for physical disks = auto @@ -133,7 +133,7 @@ Then edit `netdata.conf` and find the following section. This is the basic plugi # extended operations for all disks = auto # backlog for all disks = auto # bcache for all disks = auto - # bcache priority stats update every = 0 + # bcache priority stats update every = off # remove charts of removed disks = yes # path to get block device = /sys/block/%s # path to get block device bcache = /sys/block/%s/bcache @@ -152,25 +152,25 @@ Then edit `netdata.conf` and find the following section. This is the basic plugi For each virtual disk, physical disk and partition you will have a section like this: -``` +```text [plugin:proc:/proc/diskstats:sda] - # enable = yes - # enable performance metrics = auto - # bandwidth = auto - # operations = auto - # merged operations = auto - # i/o time = auto - # queued operations = auto - # utilization percentage = auto + # enable = yes + # enable performance metrics = auto + # bandwidth = auto + # operations = auto + # merged operations = auto + # i/o time = auto + # queued operations = auto + # utilization percentage = auto # extended operations = auto - # backlog = auto + # backlog = auto ``` For all configuration options: -- `auto` = enable monitoring if the collected values are not zero -- `yes` = enable monitoring -- `no` = disable monitoring +- `auto` = enable monitoring if the collected values are not zero +- `yes` = enable monitoring +- `no` = disable monitoring Of course, to set options, you will have to uncomment them. The comments show the internal defaults. @@ -180,14 +180,14 @@ After saving `/etc/netdata/netdata.conf`, restart your Netdata to apply them. You can pretty easy disable performance metrics for individual device, for ex.: -``` +```text [plugin:proc:/proc/diskstats:sda] - enable performance metrics = no + enable performance metrics = no ``` But sometimes you need disable performance metrics for all devices with the same type, to do it you need to figure out device type from `/proc/diskstats` for ex.: -``` +```text 7 0 loop0 1651 0 3452 168 0 0 0 0 0 8 168 7 1 loop1 4955 0 11924 880 0 0 0 0 0 64 880 7 2 loop2 36 0 216 4 0 0 0 0 0 4 4 @@ -200,7 +200,7 @@ But sometimes you need disable performance metrics for all devices with the same All zram devices starts with `251` number and all loop devices starts with `7`. So, to disable performance metrics for all loop devices you could add `performance metrics for disks with major 7 = no` to `[plugin:proc:/proc/diskstats]` section. -``` +```text [plugin:proc:/proc/diskstats] performance metrics for disks with major 7 = no ``` @@ -209,34 +209,34 @@ So, to disable performance metrics for all loop devices you could add `performan ### Monitored RAID array metrics -1. **Health** Number of failed disks in every array (aggregate chart). +1. **Health** Number of failed disks in every array (aggregate chart). -2. **Disks stats** +2. **Disks stats** -- total (number of devices array ideally would have) -- inuse (number of devices currently are in use) + - total (number of devices array ideally would have) + - inuse (number of devices currently are in use) -3. **Mismatch count** +3. **Mismatch count** -- unsynchronized blocks + - unsynchronized blocks -4. **Current status** +4. **Current status** -- resync in percent -- recovery in percent -- reshape in percent -- check in percent + - resync in percent + - recovery in percent + - reshape in percent + - check in percent -5. **Operation status** (if resync/recovery/reshape/check is active) +5. **Operation status** (if resync/recovery/reshape/check is active) -- finish in minutes -- speed in megabytes/s + - finish in minutes + - speed in megabytes/s -6. **Nonredundant array availability** +6. **Non-redundant array availability** #### configuration -``` +```text [plugin:proc:/proc/mdstat] # faulty devices = yes # nonredundant arrays availability = yes @@ -267,7 +267,7 @@ If your system has more than 50 processors and you would like to see the CPU the state charts that are automatically disabled, you can set the following boolean options in the `[plugin:proc:/proc/stat]` section. -```conf +```text keep per core files open = yes keep cpuidle files open = yes core_throttle_count = yes @@ -311,50 +311,50 @@ each state. ### Monitored memory metrics -- Amount of memory swapped in/out -- Amount of memory paged from/to disk -- Number of memory page faults -- Number of out of memory kills -- Number of NUMA events +- Amount of memory swapped in/out +- Amount of memory paged from/to disk +- Number of memory page faults +- Number of out of memory kills +- Number of NUMA events ### Configuration -```conf +```text [plugin:proc:/proc/vmstat] - filename to monitor = /proc/vmstat - swap i/o = auto - disk i/o = yes - memory page faults = yes - out of memory kills = yes - system-wide numa metric summary = auto + filename to monitor = /proc/vmstat + swap i/o = auto + disk i/o = yes + memory page faults = yes + out of memory kills = yes + system-wide numa metric summary = auto ``` ## Monitoring Network Interfaces ### Monitored network interface metrics -- **Physical Network Interfaces Aggregated Bandwidth (kilobits/s)** +- **Physical Network Interfaces Aggregated Bandwidth (kilobits/s)** The amount of data received and sent through all physical interfaces in the system. This is the source of data for the Net Inbound and Net Outbound dials in the System Overview section. -- **Bandwidth (kilobits/s)** +- **Bandwidth (kilobits/s)** The amount of data received and sent through the interface. -- **Packets (packets/s)** +- **Packets (packets/s)** The number of packets received, packets sent, and multicast packets transmitted through the interface. -- **Interface Errors (errors/s)** +- **Interface Errors (errors/s)** The number of errors for the inbound and outbound traffic on the interface. -- **Interface Drops (drops/s)** +- **Interface Drops (drops/s)** The number of packets dropped for the inbound and outbound traffic on the interface. -- **Interface FIFO Buffer Errors (errors/s)** +- **Interface FIFO Buffer Errors (errors/s)** The number of FIFO buffer errors encountered while receiving and transmitting data through the interface. -- **Compressed Packets (packets/s)** +- **Compressed Packets (packets/s)** The number of compressed packets transmitted or received by the device driver. -- **Network Interface Events (events/s)** +- **Network Interface Events (events/s)** The number of packet framing errors, collisions detected on the interface, and carrier losses detected by the device driver. By default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). @@ -363,7 +363,7 @@ By default Netdata will enable monitoring metrics only when they are not zero. I The settings for monitoring wireless is in the `[plugin:proc:/proc/net/wireless]` section of your `netdata.conf` file. -```conf +```text status for all interfaces = yes quality for all interfaces = yes discarded packets for all interfaces = yes @@ -372,62 +372,62 @@ The settings for monitoring wireless is in the `[plugin:proc:/proc/net/wireless] You can set the following values for each configuration option: -- `auto` = enable monitoring if the collected values are not zero -- `yes` = enable monitoring -- `no` = disable monitoring +- `auto` = enable monitoring if the collected values are not zero +- `yes` = enable monitoring +- `no` = disable monitoring #### Monitored wireless interface metrics -- **Status** +- **Status** The current state of the interface. This is a device-dependent option. -- **Link** - Overall quality of the link. +- **Link** + Overall quality of the link. -- **Level** +- **Level** Received signal strength (RSSI), which indicates how strong the received signal is. - -- **Noise** - Background noise level. - -- **Discarded packets** - Discarded packets for: Number of packets received with a different NWID or ESSID (`nwid`), unable to decrypt (`crypt`), hardware was not able to properly re-assemble the link layer fragments (`frag`), packets failed to deliver (`retry`), and packets lost in relation with specific wireless operations (`misc`). - -- **Missed beacon** + +- **Noise** + Background noise level. + +- **Discarded packets** + Discarded packets for: Number of packets received with a different NWID or ESSID (`nwid`), unable to decrypt (`crypt`), hardware was not able to properly re-assemble the link layer fragments (`frag`), packets failed to deliver (`retry`), and packets lost in relation with specific wireless operations (`misc`). + +- **Missed beacon** Number of periodic beacons from the cell or the access point the interface has missed. - -#### Wireless configuration + +#### Wireless configuration #### alerts There are several alerts defined in `health.d/net.conf`. -The tricky ones are `inbound packets dropped` and `inbound packets dropped ratio`. They have quite a strict policy so that they warn users about possible issues. These alerts can be annoying for some network configurations. It is especially true for some bonding configurations if an interface is a child or a bonding interface itself. If it is expected to have a certain number of drops on an interface for a certain network configuration, a separate alert with different triggering thresholds can be created or the existing one can be disabled for this specific interface. It can be done with the help of the [families](/src/health/REFERENCE.md#alert-line-families) line in the alert configuration. For example, if you want to disable the `inbound packets dropped` alert for `eth0`, set `families: !eth0 *` in the alert definition for `template: inbound_packets_dropped`. +The tricky ones are `inbound packets dropped` and `inbound packets dropped ratio`. They have quite a strict policy so that they warn users about possible issues. These alerts can be annoying for some network configurations. It is especially true for some bonding configurations if an interface is a child or a bonding interface itself. If it is expected to have a certain number of drops on an interface for a certain network configuration, a separate alert with different triggering thresholds can be created or the existing one can be disabled for this specific interface. It can be done with the help of the families line in the alert configuration. For example, if you want to disable the `inbound packets dropped` alert for `eth0`, set `families: !eth0 *` in the alert definition for `template: inbound_packets_dropped`. #### configuration Module configuration: -``` +```text [plugin:proc:/proc/net/dev] - # filename to monitor = /proc/net/dev - # path to get virtual interfaces = /sys/devices/virtual/net/%s - # path to get net device speed = /sys/class/net/%s/speed - # enable new interfaces detected at runtime = auto - # bandwidth for all interfaces = auto - # packets for all interfaces = auto - # errors for all interfaces = auto - # drops for all interfaces = auto - # fifo for all interfaces = auto - # compressed packets for all interfaces = auto - # frames, collisions, carrier counters for all interfaces = auto - # disable by default interfaces matching = lo fireqos* *-ifb - # refresh interface speed every seconds = 10 + # filename to monitor = /proc/net/dev + # path to get virtual interfaces = /sys/devices/virtual/net/%s + # path to get net device speed = /sys/class/net/%s/speed + # enable new interfaces detected at runtime = auto + # bandwidth for all interfaces = auto + # packets for all interfaces = auto + # errors for all interfaces = auto + # drops for all interfaces = auto + # fifo for all interfaces = auto + # compressed packets for all interfaces = auto + # frames, collisions, carrier counters for all interfaces = auto + # disable by default interfaces matching = lo fireqos* *-ifb + # refresh interface speed every seconds = 10 ``` Per interface configuration: -``` +```text [plugin:proc:/proc/net/dev:enp0s3] # enabled = yes # virtual = no @@ -444,8 +444,6 @@ Per interface configuration: ![image6](https://cloud.githubusercontent.com/assets/2662304/14253733/53550b16-fa95-11e5-8d9d-4ed171df4735.gif) ---- - SYNPROXY is a TCP SYN packets proxy. It can be used to protect any TCP server (like a web server) from SYN floods and similar DDos attacks. SYNPROXY is a netfilter module, in the Linux kernel (since version 3.12). It is optimized to handle millions of packets per second utilizing all CPUs available without any concurrency locking between the connections. @@ -454,8 +452,8 @@ The net effect of this, is that the real servers will not notice any change duri Netdata does not enable SYNPROXY. It just uses the SYNPROXY metrics exposed by your kernel, so you will first need to configure it. The hard way is to run iptables SYNPROXY commands directly on the console. An easier way is to use [FireHOL](https://firehol.org/), which, is a firewall manager for iptables. FireHOL can configure SYNPROXY using the following setup guides: -- **[Working with SYNPROXY](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY)** -- **[Working with SYNPROXY and traps](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY-and-traps)** +- **[Working with SYNPROXY](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY)** +- **[Working with SYNPROXY and traps](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY-and-traps)** ### Real-time monitoring of Linux Anti-DDoS @@ -463,10 +461,10 @@ Netdata is able to monitor in real-time (per second updates) the operation of th It visualizes 4 charts: -1. TCP SYN Packets received on ports operated by SYNPROXY -2. TCP Cookies (valid, invalid, retransmits) -3. Connections Reopened -4. Entries used +1. TCP SYN Packets received on ports operated by SYNPROXY +2. TCP Cookies (valid, invalid, retransmits) +3. Connections Reopened +4. Entries used Example image: @@ -483,37 +481,37 @@ battery capacity. Depending on the underlying driver, it may provide the following charts and metrics: -1. Capacity: The power supply capacity expressed as a percentage. +1. Capacity: The power supply capacity expressed as a percentage. - - capacity_now + - capacity_now -2. Charge: The charge for the power supply, expressed as amphours. +2. Charge: The charge for the power supply, expressed as amp-hours. - - charge_full_design - - charge_full - - charge_now - - charge_empty - - charge_empty_design + - charge_full_design + - charge_full + - charge_now + - charge_empty + - charge_empty_design -3. Energy: The energy for the power supply, expressed as watthours. +3. Energy: The energy for the power supply, expressed as watthours. - - energy_full_design - - energy_full - - energy_now - - energy_empty - - energy_empty_design + - energy_full_design + - energy_full + - energy_now + - energy_empty + - energy_empty_design -4. Voltage: The voltage for the power supply, expressed as volts. +4. Voltage: The voltage for the power supply, expressed as volts. - - voltage_max_design - - voltage_max - - voltage_now - - voltage_min - - voltage_min_design + - voltage_max_design + - voltage_max + - voltage_now + - voltage_min + - voltage_min_design -#### configuration +### configuration -``` +```text [plugin:proc:/sys/class/power_supply] # battery capacity = yes # battery charge = no @@ -524,18 +522,18 @@ and metrics: # directory to monitor = /sys/class/power_supply ``` -#### notes +### notes -- Most drivers provide at least the first chart. Battery powered ACPI +- Most drivers provide at least the first chart. Battery powered ACPI compliant systems (like most laptops) provide all but the third, but do not provide all of the metrics for each chart. -- Current, energy, and voltages are reported with a *very* high precision +- Current, energy, and voltages are reported with a *very* high precision by the power_supply framework. Usually, this is far higher than the actual hardware supports reporting, so expect to see changes in these charts jump instead of scaling smoothly. -- If `max` or `full` attribute is defined by the driver, but not a +- If `max` or `full` attribute is defined by the driver, but not a corresponding `min` or `empty` attribute, then Netdata will still provide the corresponding `min` or `empty`, which will then always read as zero. This way, alerts which match on these will still work. @@ -548,17 +546,17 @@ This module monitors every active Infiniband port. It provides generic counters Each port will have its counters metrics monitored, grouped in the following charts: -- **Bandwidth usage** +- **Bandwidth usage** Sent/Received data, in KB/s -- **Packets Statistics** +- **Packets Statistics** Sent/Received packets, in 3 categories: total, unicast and multicast. -- **Errors Statistics** +- **Errors Statistics** Many errors counters are provided, presenting statistics for: - - Packets: malformed, sent/received discarded by card/switch, missing resource - - Link: downed, recovered, integrity error, minor error - - Other events: Tick Wait to send, buffer overrun + - Packets: malformed, sent/received discarded by card/switch, missing resource + - Link: downed, recovered, integrity error, minor error + - Other events: Tick Wait to send, buffer overrun If your vendor is supported, you'll also get HW-Counters statistics. These being vendor specific, please refer to their documentation. @@ -568,7 +566,7 @@ If your vendor is supported, you'll also get HW-Counters statistics. These being Default configuration will monitor only enabled infiniband ports, and refresh newly activated or created ports every 30 seconds -``` +```text [plugin:proc:/sys/class/infiniband] # dirname to monitor = /sys/class/infiniband # bandwidth counters = yes @@ -578,7 +576,7 @@ Default configuration will monitor only enabled infiniband ports, and refresh ne # hardware errors counters = auto # monitor only ports being active = auto # disable by default interfaces matching = - # refresh ports state every seconds = 30 + # refresh ports state every = 30s ``` ## AMD GPUs @@ -589,45 +587,46 @@ This module monitors every AMD GPU card discovered at agent startup. The following charts will be provided: -- **GPU utilization** -- **GPU memory utilization** -- **GPU clock frequency** -- **GPU memory clock frequency** -- **VRAM memory usage percentage** -- **VRAM memory usage** -- **visible VRAM memory usage percentage** -- **visible VRAM memory usage** -- **GTT memory usage percentage** -- **GTT memory usage** +- **GPU utilization** +- **GPU memory utilization** +- **GPU clock frequency** +- **GPU memory clock frequency** +- **VRAM memory usage percentage** +- **VRAM memory usage** +- **visible VRAM memory usage percentage** +- **visible VRAM memory usage** +- **GTT memory usage percentage** +- **GTT memory usage** ### configuration The `drm` path can be configured if it differs from the default: -``` +```text [plugin:proc:/sys/class/drm] # directory to monitor = /sys/class/drm ``` -> [!NOTE] -> Temperature, fan speed, voltage and power metrics for AMD GPUs can be monitored using the [Sensors](/src/collectors/charts.d.plugin/sensors/README.md) plugin. +> **Note** +> +> Temperature, fan speed, voltage and power metrics for AMD GPUs can be monitored using the [Sensors](/src/go/plugin/go.d/modules/sensors/README.md) plugin. ## IPC ### Monitored IPC metrics -- **number of messages in message queues** -- **amount of memory used by message queues** -- **number of semaphores** -- **number of semaphore arrays** -- **number of shared memory segments** -- **amount of memory used by shared memory segments** +- **number of messages in message queues** +- **amount of memory used by message queues** +- **number of semaphores** +- **number of semaphore arrays** +- **number of shared memory segments** +- **amount of memory used by shared memory segments** As far as the message queue charts are dynamic, sane limits are applied for the number of dimensions per chart (the limit is configurable). ### configuration -``` +```text [plugin:proc:ipc] # message queues = yes # semaphore totals = yes @@ -636,5 +635,3 @@ As far as the message queue charts are dynamic, sane limits are applied for the # shm filename to monitor = /proc/sysvipc/shm # max dimensions in memory allowed = 50 ``` - - diff --git a/src/collectors/proc.plugin/integrations/system_statistics.md b/src/collectors/proc.plugin/integrations/system_statistics.md index 0b6e38820..1381bdb1d 100644 --- a/src/collectors/proc.plugin/integrations/system_statistics.md +++ b/src/collectors/proc.plugin/integrations/system_statistics.md @@ -150,8 +150,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/proc.plugin/ipc.c b/src/collectors/proc.plugin/ipc.c index 5b47116b9..c280254ac 100644 --- a/src/collectors/proc.plugin/ipc.c +++ b/src/collectors/proc.plugin/ipc.c @@ -182,7 +182,7 @@ static inline int ipc_sem_get_status(struct ipc_status *st) { return 0; } -int ipc_msq_get_info(char *msg_filename, struct message_queue **message_queue_root) { +static int ipc_msq_get_info(const char *msg_filename, struct message_queue **message_queue_root) { static procfile *ff; struct message_queue *msq; @@ -238,7 +238,7 @@ int ipc_msq_get_info(char *msg_filename, struct message_queue **message_queue_ro return 0; } -int ipc_shm_get_info(char *shm_filename, struct shm_stats *shm) { +static int ipc_shm_get_info(const char *shm_filename, struct shm_stats *shm) { static procfile *ff; if(unlikely(!ff)) { @@ -287,10 +287,10 @@ int do_ipc(int update_every, usec_t dt) { static const RRDVAR_ACQUIRED *arrays_max = NULL, *semaphores_max = NULL; static RRDSET *st_arrays = NULL; static RRDDIM *rd_arrays = NULL; - static char *msg_filename = NULL; + static const char *msg_filename = NULL; static struct message_queue *message_queue_root = NULL; static long long dimensions_limit; - static char *shm_filename = NULL; + static const char *shm_filename = NULL; if(unlikely(do_sem == -1)) { do_msg = config_get_boolean("plugin:proc:ipc", "message queues", CONFIG_BOOLEAN_YES); diff --git a/src/collectors/proc.plugin/plugin_proc.c b/src/collectors/proc.plugin/plugin_proc.c index b4a856467..0a1903ca0 100644 --- a/src/collectors/proc.plugin/plugin_proc.c +++ b/src/collectors/proc.plugin/plugin_proc.c @@ -226,9 +226,8 @@ void *proc_main(void *ptr) worker_register_job_name(i, proc_modules[i].dim); } - usec_t step = localhost->rrd_update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, localhost->rrd_update_every * USEC_PER_SEC); inside_lxc_container = is_lxcfs_proc_mounted(); is_mem_swap_enabled = is_swap_enabled(); @@ -245,7 +244,7 @@ void *proc_main(void *ptr) while(service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - usec_t hb_dt = heartbeat_next(&hb, step); + usec_t hb_dt = heartbeat_next(&hb); if(unlikely(!service_running(SERVICE_COLLECTORS))) break; @@ -279,7 +278,7 @@ int get_numa_node_count(void) char name[FILENAME_MAX + 1]; snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node"); - char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name); + const char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name); DIR *dir = opendir(dirname); if (dir) { diff --git a/src/collectors/proc.plugin/proc_diskstats.c b/src/collectors/proc.plugin/proc_diskstats.c index 015a985cc..2c7320aa1 100644 --- a/src/collectors/proc.plugin/proc_diskstats.c +++ b/src/collectors/proc.plugin/proc_diskstats.c @@ -81,30 +81,25 @@ static struct disk { usec_t bcache_priority_stats_elapsed_usec; ND_DISK_IO disk_io; + ND_DISK_OPS disk_ops; + ND_DISK_QOPS disk_qops; + ND_DISK_UTIL disk_util; + ND_DISK_BUSY disk_busy; + ND_DISK_IOTIME disk_iotime; + ND_DISK_AWAIT disk_await; + ND_DISK_SVCTM disk_svctm; + ND_DISK_AVGSZ disk_avgsz; RRDSET *st_ext_io; RRDDIM *rd_io_discards; - RRDSET *st_ops; - RRDDIM *rd_ops_reads; - RRDDIM *rd_ops_writes; - RRDSET *st_ext_ops; RRDDIM *rd_ops_discards; RRDDIM *rd_ops_flushes; - RRDSET *st_qops; - RRDDIM *rd_qops_operations; - RRDSET *st_backlog; RRDDIM *rd_backlog_backlog; - RRDSET *st_busy; - RRDDIM *rd_busy_busy; - - RRDSET *st_util; - RRDDIM *rd_util_utilization; - RRDSET *st_mops; RRDDIM *rd_mops_reads; RRDDIM *rd_mops_writes; @@ -112,32 +107,17 @@ static struct disk { RRDSET *st_ext_mops; RRDDIM *rd_mops_discards; - RRDSET *st_iotime; - RRDDIM *rd_iotime_reads; - RRDDIM *rd_iotime_writes; - RRDSET *st_ext_iotime; RRDDIM *rd_iotime_discards; RRDDIM *rd_iotime_flushes; - RRDSET *st_await; - RRDDIM *rd_await_reads; - RRDDIM *rd_await_writes; - RRDSET *st_ext_await; RRDDIM *rd_await_discards; RRDDIM *rd_await_flushes; - RRDSET *st_avgsz; - RRDDIM *rd_avgsz_reads; - RRDDIM *rd_avgsz_writes; - RRDSET *st_ext_avgsz; RRDDIM *rd_avgsz_discards; - RRDSET *st_svctm; - RRDDIM *rd_svctm_svctm; - RRDSET *st_bcache_size; RRDDIM *rd_bcache_dirty_size; @@ -180,16 +160,16 @@ static struct disk { #define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete___safe_from_collector_thread(st); (st) = NULL; } } while(st) -static char *path_to_sys_dev_block_major_minor_string = NULL; -static char *path_to_sys_block_device = NULL; -static char *path_to_sys_block_device_bcache = NULL; -static char *path_to_sys_devices_virtual_block_device = NULL; -static char *path_to_device_mapper = NULL; -static char *path_to_dev_disk = NULL; -static char *path_to_sys_block = NULL; -static char *path_to_device_label = NULL; -static char *path_to_device_id = NULL; -static char *path_to_veritas_volume_groups = NULL; +static const char *path_to_sys_dev_block_major_minor_string = NULL; +static const char *path_to_sys_block_device = NULL; +static const char *path_to_sys_block_device_bcache = NULL; +static const char *path_to_sys_devices_virtual_block_device = NULL; +static const char *path_to_device_mapper = NULL; +static const char *path_to_dev_disk = NULL; +static const char *path_to_sys_block = NULL; +static const char *path_to_device_label = NULL; +static const char *path_to_device_id = NULL; +static const char *path_to_veritas_volume_groups = NULL; static int name_disks_by_id = CONFIG_BOOLEAN_NO; static int global_bcache_priority_stats_update_every = 0; // disabled by default @@ -998,7 +978,7 @@ static void disk_labels_cb(RRDSET *st, void *data) { add_labels_to_disk(data, st); } -static int diskstats_function_block_devices(BUFFER *wb, const char *function __maybe_unused) { +static int diskstats_function_block_devices(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { buffer_flush(wb); wb->content_type = CT_APPLICATION_JSON; buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); @@ -1049,21 +1029,21 @@ static int diskstats_function_block_devices(BUFFER *wb, const char *function __m max_io = MAX(max_io, io_total); } // Backlog and Busy Time - double busy_perc = rrddim_get_last_stored_value(d->rd_util_utilization, &max_busy_perc, 1); - double busy_time = rrddim_get_last_stored_value(d->rd_busy_busy, &max_busy_time, 1); + double busy_perc = rrddim_get_last_stored_value(d->disk_util.rd_util, &max_busy_perc, 1); + double busy_time = rrddim_get_last_stored_value(d->disk_busy.rd_busy, &max_busy_time, 1); double backlog_time = rrddim_get_last_stored_value(d->rd_backlog_backlog, &max_backlog_time, 1); // IOPS - double iops_reads = rrddim_get_last_stored_value(d->rd_ops_reads, &max_iops_reads, 1); - double iops_writes = rrddim_get_last_stored_value(d->rd_ops_writes, &max_iops_writes, 1); + double iops_reads = rrddim_get_last_stored_value(d->disk_ops.rd_ops_reads, &max_iops_reads, 1); + double iops_writes = rrddim_get_last_stored_value(d->disk_ops.rd_ops_writes, &max_iops_writes, 1); // IO Time - double iops_time_reads = rrddim_get_last_stored_value(d->rd_iotime_reads, &max_iops_time_reads, 1); - double iops_time_writes = rrddim_get_last_stored_value(d->rd_iotime_writes, &max_iops_time_writes, 1); + double iops_time_reads = rrddim_get_last_stored_value(d->disk_iotime.rd_reads_ms, &max_iops_time_reads, 1); + double iops_time_writes = rrddim_get_last_stored_value(d->disk_iotime.rd_writes_ms, &max_iops_time_writes, 1); // Avg IO Time - double iops_avg_time_read = rrddim_get_last_stored_value(d->rd_await_reads, &max_iops_avg_time_read, 1); - double iops_avg_time_write = rrddim_get_last_stored_value(d->rd_await_writes, &max_iops_avg_time_write, 1); + double iops_avg_time_read = rrddim_get_last_stored_value(d->disk_await.rd_await_reads, &max_iops_avg_time_read, 1); + double iops_avg_time_write = rrddim_get_last_stored_value(d->disk_await.rd_await_writes, &max_iops_avg_time_write, 1); // Avg IO Size - double iops_avg_size_read = rrddim_get_last_stored_value(d->rd_avgsz_reads, &max_iops_avg_size_read, 1); - double iops_avg_size_write = rrddim_get_last_stored_value(d->rd_avgsz_writes, &max_iops_avg_size_write, 1); + double iops_avg_size_read = rrddim_get_last_stored_value(d->disk_avgsz.rd_avgsz_reads, &max_iops_avg_size_read, 1); + double iops_avg_size_write = rrddim_get_last_stored_value(d->disk_avgsz.rd_avgsz_writes, &max_iops_avg_size_write, 1); buffer_json_add_array_item_double(wb, io_reads); @@ -1287,23 +1267,25 @@ static void diskstats_cleanup_disks() { if (unlikely(global_cleanup_removed_disks && !d->updated)) { struct disk *t = d; - rrdset_obsolete_and_pointer_null(d->st_avgsz); + rrdset_obsolete_and_pointer_null(d->disk_io.st_io); + rrdset_obsolete_and_pointer_null(d->disk_ops.st_ops); + rrdset_obsolete_and_pointer_null(d->disk_qops.st_qops); + rrdset_obsolete_and_pointer_null(d->disk_util.st_util); + rrdset_obsolete_and_pointer_null(d->disk_busy.st_busy); + rrdset_obsolete_and_pointer_null(d->disk_iotime.st_iotime); + rrdset_obsolete_and_pointer_null(d->disk_await.st_await); + rrdset_obsolete_and_pointer_null(d->disk_svctm.st_svctm); + + rrdset_obsolete_and_pointer_null(d->disk_avgsz.st_avgsz); rrdset_obsolete_and_pointer_null(d->st_ext_avgsz); - rrdset_obsolete_and_pointer_null(d->st_await); rrdset_obsolete_and_pointer_null(d->st_ext_await); rrdset_obsolete_and_pointer_null(d->st_backlog); - rrdset_obsolete_and_pointer_null(d->st_busy); rrdset_obsolete_and_pointer_null(d->disk_io.st_io); rrdset_obsolete_and_pointer_null(d->st_ext_io); - rrdset_obsolete_and_pointer_null(d->st_iotime); rrdset_obsolete_and_pointer_null(d->st_ext_iotime); rrdset_obsolete_and_pointer_null(d->st_mops); rrdset_obsolete_and_pointer_null(d->st_ext_mops); - rrdset_obsolete_and_pointer_null(d->st_ops); rrdset_obsolete_and_pointer_null(d->st_ext_ops); - rrdset_obsolete_and_pointer_null(d->st_qops); - rrdset_obsolete_and_pointer_null(d->st_svctm); - rrdset_obsolete_and_pointer_null(d->st_util); rrdset_obsolete_and_pointer_null(d->st_bcache); rrdset_obsolete_and_pointer_null(d->st_bcache_bypass); rrdset_obsolete_and_pointer_null(d->st_bcache_rates); @@ -1374,7 +1356,7 @@ int do_proc_diskstats(int update_every, usec_t dt) { global_do_ext = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "extended operations for all disks", global_do_ext); global_do_backlog = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "backlog for all disks", global_do_backlog); global_do_bcache = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache for all disks", global_do_bcache); - global_bcache_priority_stats_update_every = (int)config_get_number(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every); + global_bcache_priority_stats_update_every = (int)config_get_duration_seconds(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every); global_cleanup_removed_disks = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "remove charts of removed disks" , global_cleanup_removed_disks); @@ -1421,7 +1403,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { SIMPLE_PATTERN_EXACT, true); rrd_function_add_inline(localhost, NULL, "block-devices", 10, - RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_DISKSTATS_HELP, + RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_VERSION_DEFAULT, + RRDFUNCTIONS_DISKSTATS_HELP, "top", HTTP_ACCESS_ANONYMOUS_DATA, diskstats_function_block_devices); } @@ -1453,18 +1436,17 @@ int do_proc_diskstats(int update_every, usec_t dt) { char *disk; unsigned long major = 0, minor = 0; - collected_number reads = 0, mreads = 0, readsectors = 0, readms = 0, - writes = 0, mwrites = 0, writesectors = 0, writems = 0, + collected_number rd_ios = 0, mreads = 0, readsectors = 0, readms = 0, wr_ios = 0, mwrites = 0, writesectors = 0, writems = 0, queued_ios = 0, busy_ms = 0, backlog_ms = 0, discards = 0, mdiscards = 0, discardsectors = 0, discardms = 0, flushes = 0, flushms = 0; - collected_number last_reads = 0, last_readsectors = 0, last_readms = 0, - last_writes = 0, last_writesectors = 0, last_writems = 0, - last_busy_ms = 0, - last_discards = 0, last_discardsectors = 0, last_discardms = 0, - last_flushes = 0, last_flushms = 0; + collected_number last_rd_ios = 0, last_readsectors = 0, last_readms = 0, + last_wr_ios = 0, last_writesectors = 0, last_writems = 0, + last_busy_ms = 0, + last_discards = 0, last_discardsectors = 0, last_discardms = 0, + last_flushes = 0, last_flushms = 0; size_t words = procfile_linewords(ff, l); if(unlikely(words < 14)) continue; @@ -1475,8 +1457,8 @@ int do_proc_diskstats(int update_every, usec_t dt) { // # of reads completed # of writes completed // This is the total number of reads or writes completed successfully. - reads = str2ull(procfile_lineword(ff, l, 3), NULL); // rd_ios - writes = str2ull(procfile_lineword(ff, l, 7), NULL); // wr_ios + rd_ios = str2ull(procfile_lineword(ff, l, 3), NULL); // rd_ios + wr_ios = str2ull(procfile_lineword(ff, l, 7), NULL); // wr_ios // # of reads merged # of writes merged // Reads and writes which are adjacent to each other may be merged for @@ -1615,33 +1597,15 @@ int do_proc_diskstats(int update_every, usec_t dt) { if (d->do_ops == CONFIG_BOOLEAN_YES || d->do_ops == CONFIG_BOOLEAN_AUTO) { d->do_ops = CONFIG_BOOLEAN_YES; - if(unlikely(!d->st_ops)) { - d->st_ops = rrdset_create_localhost( - "disk_ops" - , d->chart_id - , d->disk - , family - , "disk.ops" - , "Disk Completed I/O Operations" - , "operations/s" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_DISKSTATS_NAME - , NETDATA_CHART_PRIO_DISK_OPS - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_ops, RRDSET_FLAG_DETAIL); - - d->rd_ops_reads = rrddim_add(d->st_ops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_ops_writes = rrddim_add(d->st_ops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - add_labels_to_disk(d, d->st_ops); - } + last_rd_ios = d->disk_ops.rd_ops_reads ? d->disk_ops.rd_ops_reads->collector.last_collected_value : 0; + last_wr_ios = d->disk_ops.rd_ops_writes ? d->disk_ops.rd_ops_writes->collector.last_collected_value : 0; - last_reads = rrddim_set_by_pointer(d->st_ops, d->rd_ops_reads, reads); - last_writes = rrddim_set_by_pointer(d->st_ops, d->rd_ops_writes, writes); - rrdset_done(d->st_ops); + common_disk_ops(&d->disk_ops, + d->chart_id, + d->disk, rd_ios, wr_ios, + update_every, + disk_labels_cb, + d); } if (do_dc_stats && d->do_ops == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) { @@ -1661,8 +1625,6 @@ int do_proc_diskstats(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_ext_ops, RRDSET_FLAG_DETAIL); - d->rd_ops_discards = rrddim_add(d->st_ext_ops, "discards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); if (do_fl_stats) d->rd_ops_flushes = rrddim_add(d->st_ext_ops, "flushes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1679,31 +1641,14 @@ int do_proc_diskstats(int update_every, usec_t dt) { if (d->do_qops == CONFIG_BOOLEAN_YES || d->do_qops == CONFIG_BOOLEAN_AUTO) { d->do_qops = CONFIG_BOOLEAN_YES; - if(unlikely(!d->st_qops)) { - d->st_qops = rrdset_create_localhost( - "disk_qops" - , d->chart_id - , d->disk - , family - , "disk.qops" - , "Disk Current I/O Operations" - , "operations" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_DISKSTATS_NAME - , NETDATA_CHART_PRIO_DISK_QOPS - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_qops, RRDSET_FLAG_DETAIL); - - d->rd_qops_operations = rrddim_add(d->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - - add_labels_to_disk(d, d->st_qops); - } - - rrddim_set_by_pointer(d->st_qops, d->rd_qops_operations, queued_ios); - rrdset_done(d->st_qops); + common_disk_qops( + &d->disk_qops, + d->chart_id, + d->disk, + queued_ios, + update_every, + disk_labels_cb, + d); } if (d->do_backlog == CONFIG_BOOLEAN_YES || d->do_backlog == CONFIG_BOOLEAN_AUTO) { @@ -1725,8 +1670,6 @@ int do_proc_diskstats(int update_every, usec_t dt) { , RRDSET_TYPE_AREA ); - rrdset_flag_set(d->st_backlog, RRDSET_FLAG_DETAIL); - d->rd_backlog_backlog = rrddim_add(d->st_backlog, "backlog", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); add_labels_to_disk(d, d->st_backlog); @@ -1739,61 +1682,28 @@ int do_proc_diskstats(int update_every, usec_t dt) { if (d->do_util == CONFIG_BOOLEAN_YES || d->do_util == CONFIG_BOOLEAN_AUTO) { d->do_util = CONFIG_BOOLEAN_YES; - if(unlikely(!d->st_busy)) { - d->st_busy = rrdset_create_localhost( - "disk_busy" - , d->chart_id - , d->disk - , family - , "disk.busy" - , "Disk Busy Time" - , "milliseconds" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_DISKSTATS_NAME - , NETDATA_CHART_PRIO_DISK_BUSY - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_flag_set(d->st_busy, RRDSET_FLAG_DETAIL); - - d->rd_busy_busy = rrddim_add(d->st_busy, "busy", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - add_labels_to_disk(d, d->st_busy); - } - - last_busy_ms = rrddim_set_by_pointer(d->st_busy, d->rd_busy_busy, busy_ms); - rrdset_done(d->st_busy); - - if(unlikely(!d->st_util)) { - d->st_util = rrdset_create_localhost( - "disk_util" - , d->chart_id - , d->disk - , family - , "disk.util" - , "Disk Utilization Time" - , "% of time working" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_DISKSTATS_NAME - , NETDATA_CHART_PRIO_DISK_UTIL - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_flag_set(d->st_util, RRDSET_FLAG_DETAIL); + last_busy_ms = d->disk_busy.rd_busy ? d->disk_busy.rd_busy->collector.last_collected_value : 0; - d->rd_util_utilization = rrddim_add(d->st_util, "utilization", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - - add_labels_to_disk(d, d->st_util); - } + common_disk_busy(&d->disk_busy, + d->chart_id, + d->disk, + busy_ms, + update_every, + disk_labels_cb, + d); collected_number disk_utilization = (busy_ms - last_busy_ms) / (10 * update_every); if (disk_utilization > 100) disk_utilization = 100; - rrddim_set_by_pointer(d->st_util, d->rd_util_utilization, disk_utilization); - rrdset_done(d->st_util); + common_disk_util(&d->disk_util, + d->chart_id, + d->disk, + disk_utilization, + update_every, + disk_labels_cb, + d); + } if (d->do_mops == CONFIG_BOOLEAN_YES || d->do_mops == CONFIG_BOOLEAN_AUTO) { @@ -1815,8 +1725,6 @@ int do_proc_diskstats(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_mops, RRDSET_FLAG_DETAIL); - d->rd_mops_reads = rrddim_add(d->st_mops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); d->rd_mops_writes = rrddim_add(d->st_mops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1847,8 +1755,6 @@ int do_proc_diskstats(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_ext_mops, RRDSET_FLAG_DETAIL); - d->rd_mops_discards = rrddim_add(d->st_ext_mops, "discards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); add_labels_to_disk(d, d->st_ext_mops); @@ -1861,33 +1767,18 @@ int do_proc_diskstats(int update_every, usec_t dt) { if (d->do_iotime == CONFIG_BOOLEAN_YES || d->do_iotime == CONFIG_BOOLEAN_AUTO) { d->do_iotime = CONFIG_BOOLEAN_YES; - if(unlikely(!d->st_iotime)) { - d->st_iotime = rrdset_create_localhost( - "disk_iotime" - , d->chart_id - , d->disk - , family - , "disk.iotime" - , "Disk Total I/O Time" - , "milliseconds/s" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_DISKSTATS_NAME - , NETDATA_CHART_PRIO_DISK_IOTIME - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_iotime, RRDSET_FLAG_DETAIL); - - d->rd_iotime_reads = rrddim_add(d->st_iotime, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->rd_iotime_writes = rrddim_add(d->st_iotime, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - add_labels_to_disk(d, d->st_iotime); - } - - last_readms = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_reads, readms); - last_writems = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_writes, writems); - rrdset_done(d->st_iotime); + last_readms = d->disk_iotime.rd_reads_ms ? d->disk_iotime.rd_reads_ms->collector.last_collected_value : 0; + last_writems = d->disk_iotime.rd_writes_ms ? d->disk_iotime.rd_writes_ms->collector.last_collected_value : 0; + + common_disk_iotime( + &d->disk_iotime, + d->chart_id, + d->disk, + readms, + writems, + update_every, + disk_labels_cb, + d); } if(do_dc_stats && d->do_iotime == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) { @@ -1907,8 +1798,6 @@ int do_proc_diskstats(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_ext_iotime, RRDSET_FLAG_DETAIL); - d->rd_iotime_discards = rrddim_add(d->st_ext_iotime, "discards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); if (do_fl_stats) d->rd_iotime_flushes = rrddim_add(d->st_ext_iotime, "flushes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1928,36 +1817,19 @@ int do_proc_diskstats(int update_every, usec_t dt) { if(likely(dt)) { if ((d->do_iotime == CONFIG_BOOLEAN_YES || d->do_iotime == CONFIG_BOOLEAN_AUTO) && (d->do_ops == CONFIG_BOOLEAN_YES || d->do_ops == CONFIG_BOOLEAN_AUTO)) { - if(unlikely(!d->st_await)) { - d->st_await = rrdset_create_localhost( - "disk_await" - , d->chart_id - , d->disk - , family - , "disk.await" - , "Average Completed I/O Operation Time" - , "milliseconds/operation" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_DISKSTATS_NAME - , NETDATA_CHART_PRIO_DISK_AWAIT - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_await, RRDSET_FLAG_DETAIL); - - d->rd_await_reads = rrddim_add(d->st_await, "reads", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - d->rd_await_writes = rrddim_add(d->st_await, "writes", NULL, -1, 1000, RRD_ALGORITHM_ABSOLUTE); - - add_labels_to_disk(d, d->st_await); - } - double read_avg = (reads - last_reads) ? (double)(readms - last_readms) / (reads - last_reads) : 0; - double write_avg = (writes - last_writes) ? (double)(writems - last_writems) / (writes - last_writes) : 0; - - rrddim_set_by_pointer(d->st_await, d->rd_await_reads, (collected_number)(read_avg * 1000)); - rrddim_set_by_pointer(d->st_await, d->rd_await_writes, (collected_number)(write_avg * 1000)); - rrdset_done(d->st_await); + double read_ms_avg = (rd_ios - last_rd_ios) ? (double)(readms - last_readms) / (rd_ios - last_rd_ios) : 0; + double write_ms_avg = (wr_ios - last_wr_ios) ? (double)(writems - last_writems) / (wr_ios - last_wr_ios) : 0; + + common_disk_await( + &d->disk_await, + d->chart_id, + d->disk, + read_ms_avg, + write_ms_avg, + update_every, + disk_labels_cb, + d); } if (do_dc_stats && d->do_iotime == CONFIG_BOOLEAN_YES && d->do_ops == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) { @@ -1977,8 +1849,6 @@ int do_proc_diskstats(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_ext_await, RRDSET_FLAG_DETAIL); - d->rd_await_discards = rrddim_add(d->st_ext_await, "discards", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); if (do_fl_stats) d->rd_await_flushes = rrddim_add(d->st_ext_await, "flushes", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); @@ -2001,33 +1871,19 @@ int do_proc_diskstats(int update_every, usec_t dt) { if ((d->do_io == CONFIG_BOOLEAN_YES || d->do_io == CONFIG_BOOLEAN_AUTO) && (d->do_ops == CONFIG_BOOLEAN_YES || d->do_ops == CONFIG_BOOLEAN_AUTO)) { - if(unlikely(!d->st_avgsz)) { - d->st_avgsz = rrdset_create_localhost( - "disk_avgsz" - , d->chart_id - , d->disk - , family - , "disk.avgsz" - , "Average Completed I/O Operation Bandwidth" - , "KiB/operation" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_DISKSTATS_NAME - , NETDATA_CHART_PRIO_DISK_AVGSZ - , update_every - , RRDSET_TYPE_AREA - ); - - rrdset_flag_set(d->st_avgsz, RRDSET_FLAG_DETAIL); - - d->rd_avgsz_reads = rrddim_add(d->st_avgsz, "reads", NULL, SECTOR_SIZE, 1024, RRD_ALGORITHM_ABSOLUTE); - d->rd_avgsz_writes = rrddim_add(d->st_avgsz, "writes", NULL, SECTOR_SIZE * -1, 1024, RRD_ALGORITHM_ABSOLUTE); - - add_labels_to_disk(d, d->st_avgsz); - } - rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_reads, (reads - last_reads) ? (readsectors - last_readsectors) / (reads - last_reads) : 0); - rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_writes, (writes - last_writes) ? (writesectors - last_writesectors) / (writes - last_writes) : 0); - rrdset_done(d->st_avgsz); + kernel_uint_t avg_read_bytes = SECTOR_SIZE * ((rd_ios - last_rd_ios) ? (readsectors - last_readsectors) / (rd_ios - last_rd_ios) : 0); + kernel_uint_t avg_write_bytes = SECTOR_SIZE * ((wr_ios - last_wr_ios) ? (writesectors - last_writesectors) / (wr_ios - last_wr_ios) : 0); + + common_disk_avgsz( + &d->disk_avgsz, + d->chart_id, + d->disk, + avg_read_bytes, + avg_write_bytes, + update_every, + disk_labels_cb, + d); } if(do_dc_stats && d->do_io == CONFIG_BOOLEAN_YES && d->do_ops == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) { @@ -2047,8 +1903,6 @@ int do_proc_diskstats(int update_every, usec_t dt) { , RRDSET_TYPE_AREA ); - rrdset_flag_set(d->st_ext_avgsz, RRDSET_FLAG_DETAIL); - d->rd_avgsz_discards = rrddim_add(d->st_ext_avgsz, "discards", NULL, SECTOR_SIZE, 1024, RRD_ALGORITHM_ABSOLUTE); add_labels_to_disk(d, d->st_ext_avgsz); @@ -2063,36 +1917,20 @@ int do_proc_diskstats(int update_every, usec_t dt) { if ((d->do_util == CONFIG_BOOLEAN_YES || d->do_util == CONFIG_BOOLEAN_AUTO) && (d->do_ops == CONFIG_BOOLEAN_YES || d->do_ops == CONFIG_BOOLEAN_AUTO)) { - if(unlikely(!d->st_svctm)) { - d->st_svctm = rrdset_create_localhost( - "disk_svctm" - , d->chart_id - , d->disk - , family - , "disk.svctm" - , "Average Service Time" - , "milliseconds/operation" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_DISKSTATS_NAME - , NETDATA_CHART_PRIO_DISK_SVCTM - , update_every - , RRDSET_TYPE_LINE - ); - - rrdset_flag_set(d->st_svctm, RRDSET_FLAG_DETAIL); - - d->rd_svctm_svctm = rrddim_add(d->st_svctm, "svctm", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - - add_labels_to_disk(d, d->st_svctm); - } double svctm_avg = - ((reads - last_reads) + (writes - last_writes)) ? - (double)(busy_ms - last_busy_ms) / ((reads - last_reads) + (writes - last_writes)) : + ((rd_ios - last_rd_ios) + (wr_ios - last_wr_ios)) ? + (double) (busy_ms - last_busy_ms) / ((rd_ios - last_rd_ios) + (wr_ios - last_wr_ios)) : 0; - rrddim_set_by_pointer(d->st_svctm, d->rd_svctm_svctm, (collected_number)(svctm_avg * 1000)); - rrdset_done(d->st_svctm); + common_disk_svctm( + &d->disk_svctm, + d->chart_id, + d->disk, + svctm_avg, + update_every, + disk_labels_cb, + d); } } @@ -2331,8 +2169,6 @@ int do_proc_diskstats(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_bcache, RRDSET_FLAG_DETAIL); - d->rd_bcache_hits = rrddim_add(d->st_bcache, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); d->rd_bcache_misses = rrddim_add(d->st_bcache, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); d->rd_bcache_miss_collisions = rrddim_add(d->st_bcache, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2365,8 +2201,6 @@ int do_proc_diskstats(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_bcache_bypass, RRDSET_FLAG_DETAIL); - d->rd_bcache_bypass_hits = rrddim_add(d->st_bcache_bypass, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); d->rd_bcache_bypass_misses = rrddim_add(d->st_bcache_bypass, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); diff --git a/src/collectors/proc.plugin/proc_mdstat.c b/src/collectors/proc.plugin/proc_mdstat.c index 3857d9ec4..47c4f0d2b 100644 --- a/src/collectors/proc.plugin/proc_mdstat.c +++ b/src/collectors/proc.plugin/proc_mdstat.c @@ -89,7 +89,7 @@ int do_proc_mdstat(int update_every, usec_t dt) static int do_health = -1, do_nonredundant = -1, do_disks = -1, do_operations = -1, do_mismatch = -1, do_mismatch_config = -1; static int make_charts_obsolete = -1; - static char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL; + static const char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL; static struct raid *raids = NULL; static size_t raids_allocated = 0; size_t raids_num = 0, raid_idx = 0, redundant_num = 0; diff --git a/src/collectors/proc.plugin/proc_meminfo.c b/src/collectors/proc.plugin/proc_meminfo.c index 781329b59..de3f42329 100644 --- a/src/collectors/proc.plugin/proc_meminfo.c +++ b/src/collectors/proc.plugin/proc_meminfo.c @@ -347,8 +347,6 @@ int do_proc_meminfo(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(st_mem_hwcorrupt, RRDSET_FLAG_DETAIL); - rd_corrupted = rrddim_add(st_mem_hwcorrupt, "HardwareCorrupted", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); } @@ -376,8 +374,6 @@ int do_proc_meminfo(int update_every, usec_t dt) { , RRDSET_TYPE_AREA ); - rrdset_flag_set(st_mem_committed, RRDSET_FLAG_DETAIL); - rd_committed = rrddim_add(st_mem_committed, "Committed_AS", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); } @@ -404,7 +400,6 @@ int do_proc_meminfo(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st_mem_writeback, RRDSET_FLAG_DETAIL); rd_dirty = rrddim_add(st_mem_writeback, "Dirty", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); rd_writeback = rrddim_add(st_mem_writeback, "Writeback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); @@ -444,8 +439,6 @@ int do_proc_meminfo(int update_every, usec_t dt) { , RRDSET_TYPE_STACKED ); - rrdset_flag_set(st_mem_kernel, RRDSET_FLAG_DETAIL); - rd_slab = rrddim_add(st_mem_kernel, "Slab", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); rd_kernelstack = rrddim_add(st_mem_kernel, "KernelStack", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); rd_pagetables = rrddim_add(st_mem_kernel, "PageTables", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); @@ -484,8 +477,6 @@ int do_proc_meminfo(int update_every, usec_t dt) { , RRDSET_TYPE_STACKED ); - rrdset_flag_set(st_mem_slab, RRDSET_FLAG_DETAIL); - rd_reclaimable = rrddim_add(st_mem_slab, "reclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); rd_unreclaimable = rrddim_add(st_mem_slab, "unreclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); } @@ -518,8 +509,6 @@ int do_proc_meminfo(int update_every, usec_t dt) { , RRDSET_TYPE_STACKED ); - rrdset_flag_set(st_mem_hugepages, RRDSET_FLAG_DETAIL); - rd_free = rrddim_add(st_mem_hugepages, "free", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE); rd_used = rrddim_add(st_mem_hugepages, "used", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE); rd_surp = rrddim_add(st_mem_hugepages, "surplus", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE); @@ -555,8 +544,6 @@ int do_proc_meminfo(int update_every, usec_t dt) { , RRDSET_TYPE_STACKED ); - rrdset_flag_set(st_mem_transparent_hugepages, RRDSET_FLAG_DETAIL); - rd_anonymous = rrddim_add(st_mem_transparent_hugepages, "anonymous", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); rd_shared = rrddim_add(st_mem_transparent_hugepages, "shmem", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); } @@ -585,8 +572,6 @@ int do_proc_meminfo(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(st_mem_thp_details, RRDSET_FLAG_DETAIL); - rd_shmem_pmd_mapped = rrddim_add(st_mem_thp_details, "shmem_pmd", "ShmemPmdMapped", 1, 1024, RRD_ALGORITHM_ABSOLUTE); rd_file_huge_pages = rrddim_add(st_mem_thp_details, "file", "FileHugePages", 1, 1024, RRD_ALGORITHM_ABSOLUTE); rd_file_pmd_mapped = rrddim_add(st_mem_thp_details, "file_pmd", "FilePmdMapped", 1, 1024, RRD_ALGORITHM_ABSOLUTE); @@ -622,8 +607,6 @@ int do_proc_meminfo(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(st_mem_reclaiming, RRDSET_FLAG_DETAIL); - rd_active = rrddim_add(st_mem_reclaiming, "active", "Active", 1, 1024, RRD_ALGORITHM_ABSOLUTE); rd_inactive = rrddim_add(st_mem_reclaiming, "inactive", "Inactive", 1, 1024, RRD_ALGORITHM_ABSOLUTE); rd_active_anon = rrddim_add(st_mem_reclaiming, "active_anon", "Active(anon)", 1, 1024, RRD_ALGORITHM_ABSOLUTE); @@ -667,8 +650,6 @@ int do_proc_meminfo(int update_every, usec_t dt) { , RRDSET_TYPE_STACKED ); - rrdset_flag_set(st_mem_high_low, RRDSET_FLAG_DETAIL); - rd_high_used = rrddim_add(st_mem_high_low, "high_used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); rd_low_used = rrddim_add(st_mem_high_low, "low_used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); rd_high_free = rrddim_add(st_mem_high_low, "high_free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); diff --git a/src/collectors/proc.plugin/proc_net_dev.c b/src/collectors/proc.plugin/proc_net_dev.c index 41c10ddbc..3af59aed1 100644 --- a/src/collectors/proc.plugin/proc_net_dev.c +++ b/src/collectors/proc.plugin/proc_net_dev.c @@ -251,6 +251,8 @@ static struct netdev { // ---------------------------------------------------------------------------- static void netdev_charts_release(struct netdev *d) { + rrdvar_chart_variable_release(d->st_bandwidth, d->chart_var_speed); + if(d->st_bandwidth) rrdset_is_obsolete___safe_from_collector_thread(d->st_bandwidth); if(d->st_packets) rrdset_is_obsolete___safe_from_collector_thread(d->st_packets); if(d->st_errors) rrdset_is_obsolete___safe_from_collector_thread(d->st_errors); @@ -472,7 +474,7 @@ static void netdev_rename_this_device(struct netdev *d) { // ---------------------------------------------------------------------------- -int netdev_function_net_interfaces(BUFFER *wb, const char *function __maybe_unused) { +static int netdev_function_net_interfaces(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { buffer_flush(wb); wb->content_type = CT_APPLICATION_JSON; buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); @@ -1279,8 +1281,6 @@ int do_proc_net_dev(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_speed, RRDSET_FLAG_DETAIL); - rrdset_update_rrdlabels(d->st_speed, d->chart_labels); d->rd_speed = rrddim_add(d->st_speed, "speed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); @@ -1319,8 +1319,6 @@ int do_proc_net_dev(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_duplex, RRDSET_FLAG_DETAIL); - rrdset_update_rrdlabels(d->st_duplex, d->chart_labels); d->rd_duplex_full = rrddim_add(d->st_duplex, "full", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); @@ -1351,8 +1349,6 @@ int do_proc_net_dev(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_operstate, RRDSET_FLAG_DETAIL); - rrdset_update_rrdlabels(d->st_operstate, d->chart_labels); d->rd_operstate_up = rrddim_add(d->st_operstate, "up", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); @@ -1391,8 +1387,6 @@ int do_proc_net_dev(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_carrier, RRDSET_FLAG_DETAIL); - rrdset_update_rrdlabels(d->st_carrier, d->chart_labels); d->rd_carrier_up = rrddim_add(d->st_carrier, "up", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); @@ -1421,8 +1415,6 @@ int do_proc_net_dev(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_mtu, RRDSET_FLAG_DETAIL); - rrdset_update_rrdlabels(d->st_mtu, d->chart_labels); d->rd_mtu = rrddim_add(d->st_mtu, "mtu", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); @@ -1450,8 +1442,6 @@ int do_proc_net_dev(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_packets, RRDSET_FLAG_DETAIL); - rrdset_update_rrdlabels(d->st_packets, d->chart_labels); d->rd_rpackets = rrddim_add(d->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1491,8 +1481,6 @@ int do_proc_net_dev(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_errors, RRDSET_FLAG_DETAIL); - rrdset_update_rrdlabels(d->st_errors, d->chart_labels); d->rd_rerrors = rrddim_add(d->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1530,8 +1518,6 @@ int do_proc_net_dev(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_drops, RRDSET_FLAG_DETAIL); - rrdset_update_rrdlabels(d->st_drops, d->chart_labels); d->rd_rdrops = rrddim_add(d->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1569,8 +1555,6 @@ int do_proc_net_dev(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_fifo, RRDSET_FLAG_DETAIL); - rrdset_update_rrdlabels(d->st_fifo, d->chart_labels); d->rd_rfifo = rrddim_add(d->st_fifo, "receive", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1608,8 +1592,6 @@ int do_proc_net_dev(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_compressed, RRDSET_FLAG_DETAIL); - rrdset_update_rrdlabels(d->st_compressed, d->chart_labels); d->rd_rcompressed = rrddim_add(d->st_compressed, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1647,8 +1629,6 @@ int do_proc_net_dev(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(d->st_events, RRDSET_FLAG_DETAIL); - rrdset_update_rrdlabels(d->st_events, d->chart_labels); d->rd_rframe = rrddim_add(d->st_events, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1721,17 +1701,17 @@ void *netdev_main(void *ptr_is_null __maybe_unused) virtual_device_collect_delay_secs = 300; rrd_function_add_inline(localhost, NULL, "network-interfaces", 10, - RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_NETDEV_HELP, + RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_VERSION_DEFAULT, + RRDFUNCTIONS_NETDEV_HELP, "top", HTTP_ACCESS_ANONYMOUS_DATA, netdev_function_net_interfaces); - usec_t step = localhost->rrd_update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, localhost->rrd_update_every * USEC_PER_SEC); while (service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - usec_t hb_dt = heartbeat_next(&hb, step); + usec_t hb_dt = heartbeat_next(&hb); if (unlikely(!service_running(SERVICE_COLLECTORS))) break; diff --git a/src/collectors/proc.plugin/proc_net_netstat.c b/src/collectors/proc.plugin/proc_net_netstat.c index da7a28fa7..6689a057f 100644 --- a/src/collectors/proc.plugin/proc_net_netstat.c +++ b/src/collectors/proc.plugin/proc_net_netstat.c @@ -465,7 +465,6 @@ static void do_proc_net_snmp6(int update_every) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_failed = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -501,7 +500,6 @@ static void do_proc_net_snmp6(int update_every) { , NETDATA_CHART_PRIO_IPV6_FRAGSIN , update_every , RRDSET_TYPE_LINE); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -544,7 +542,6 @@ static void do_proc_net_snmp6(int update_every) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -624,7 +621,6 @@ static void do_proc_net_snmp6(int update_every) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -697,7 +693,6 @@ static void do_proc_net_snmp6(int update_every) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -735,7 +730,6 @@ static void do_proc_net_snmp6(int update_every) { , update_every , RRDSET_TYPE_AREA ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_Ip6InMcastOctets = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); rd_Ip6OutMcastOctets = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); @@ -767,7 +761,6 @@ static void do_proc_net_snmp6(int update_every) { , update_every , RRDSET_TYPE_AREA ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_Ip6InBcastOctets = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); rd_Ip6OutBcastOctets = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); @@ -799,7 +792,6 @@ static void do_proc_net_snmp6(int update_every) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_Ip6InMcastPkts = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_Ip6OutMcastPkts = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -1781,8 +1773,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , RRDSET_TYPE_AREA ); - rrdset_flag_set(st_ip_mcast, RRDSET_FLAG_DETAIL); - rd_in = rrddim_add(st_ip_mcast, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); rd_out = rrddim_add(st_ip_mcast, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); } @@ -1817,8 +1807,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , RRDSET_TYPE_AREA ); - rrdset_flag_set(st_ip_bcast, RRDSET_FLAG_DETAIL); - rd_in = rrddim_add(st_ip_bcast, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); rd_out = rrddim_add(st_ip_bcast, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); } @@ -1853,8 +1841,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(st_ip_mcastpkts, RRDSET_FLAG_DETAIL); - rd_in = rrddim_add(st_ip_mcastpkts, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_out = rrddim_add(st_ip_mcastpkts, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); } @@ -1886,8 +1872,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(st_ip_bcastpkts, RRDSET_FLAG_DETAIL); - rd_in = rrddim_add(st_ip_bcastpkts, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_out = rrddim_add(st_ip_bcastpkts, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); } @@ -1919,8 +1903,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(st_ecnpkts, RRDSET_FLAG_DETAIL); - rd_cep = rrddim_add(st_ecnpkts, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_noectp = rrddim_add(st_ecnpkts, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL); rd_ectp0 = rrddim_add(st_ecnpkts, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2241,7 +2223,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_FragOKs = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_FragFails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2277,7 +2258,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_ReasmOKs = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_ReasmFails = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2319,7 +2299,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2596,7 +2575,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_InErrs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2631,7 +2609,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_ActiveOpens = rrddim_add(st, "ActiveOpens", "active", 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_PassiveOpens = rrddim_add(st, "PassiveOpens", "passive", 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2666,7 +2643,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_EstabResets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_OutRsts = rrddim_add(st, "OutRsts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -2744,7 +2720,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); diff --git a/src/collectors/proc.plugin/proc_net_rpc_nfs.c b/src/collectors/proc.plugin/proc_net_rpc_nfs.c index d6547636e..da3243bad 100644 --- a/src/collectors/proc.plugin/proc_net_rpc_nfs.c +++ b/src/collectors/proc.plugin/proc_net_rpc_nfs.c @@ -296,8 +296,6 @@ int do_proc_net_rpc_nfs(int update_every, usec_t dt) { , RRDSET_TYPE_STACKED ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); - rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); } @@ -332,7 +330,6 @@ int do_proc_net_rpc_nfs(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_retransmits = rrddim_add(st, "retransmits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); diff --git a/src/collectors/proc.plugin/proc_net_rpc_nfsd.c b/src/collectors/proc.plugin/proc_net_rpc_nfsd.c index 1d9127a03..82d74b6a9 100644 --- a/src/collectors/proc.plugin/proc_net_rpc_nfsd.c +++ b/src/collectors/proc.plugin/proc_net_rpc_nfsd.c @@ -501,7 +501,6 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_stale = rrddim_add(st, "stale", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); } @@ -587,7 +586,6 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_STACKED ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -623,7 +621,6 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_bad_format = rrddim_add(st, "bad_format", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); diff --git a/src/collectors/proc.plugin/proc_net_sctp_snmp.c b/src/collectors/proc.plugin/proc_net_sctp_snmp.c index 4a3d5c912..1987ff7a1 100644 --- a/src/collectors/proc.plugin/proc_net_sctp_snmp.c +++ b/src/collectors/proc.plugin/proc_net_sctp_snmp.c @@ -214,7 +214,6 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_received = rrddim_add(st, "SctpInSCTPPacks", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_sent = rrddim_add(st, "SctpOutSCTPPacks", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -248,7 +247,6 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_invalid = rrddim_add(st, "SctpOutOfBlues", "invalid", 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_csum = rrddim_add(st, "SctpChecksumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -282,7 +280,6 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { , NETDATA_CHART_PRIO_SCTP + 40 , update_every , RRDSET_TYPE_LINE); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_reassembled = rrddim_add(st, "SctpReasmUsrMsgs", "reassembled", 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_fragmented = rrddim_add(st, "SctpFragUsrMsgs", "fragmented", -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -321,7 +318,6 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_InCtrl = rrddim_add(st, "SctpInCtrlChunks", "InCtrl", 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_InOrder = rrddim_add(st, "SctpInOrderChunks", "InOrder", 1, 1, RRD_ALGORITHM_INCREMENTAL); diff --git a/src/collectors/proc.plugin/proc_net_sockstat.c b/src/collectors/proc.plugin/proc_net_sockstat.c index da8682b51..185eb4e5a 100644 --- a/src/collectors/proc.plugin/proc_net_sockstat.c +++ b/src/collectors/proc.plugin/proc_net_sockstat.c @@ -128,7 +128,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG sockets", CONFIG_BOOLEAN_AUTO); do_frag_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG memory", CONFIG_BOOLEAN_AUTO); - update_constants_every = config_get_number("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every); + update_constants_every = config_get_duration_seconds("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every); update_constants_count = update_constants_every; arl_sockets = arl_create("sockstat/sockets", arl_callback_str2kernel_uint_t, 60); diff --git a/src/collectors/proc.plugin/proc_net_stat_conntrack.c b/src/collectors/proc.plugin/proc_net_stat_conntrack.c index 6951cba79..cb1c8837d 100644 --- a/src/collectors/proc.plugin/proc_net_stat_conntrack.c +++ b/src/collectors/proc.plugin/proc_net_stat_conntrack.c @@ -11,7 +11,7 @@ int do_proc_net_stat_conntrack(int update_every, usec_t dt) { static int do_sockets = -1, do_new = -1, do_changes = -1, do_expect = -1, do_search = -1, do_errors = -1; static usec_t get_max_every = 10 * USEC_PER_SEC, usec_since_last_max = 0; static int read_full = 1; - static char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename; + static const char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename; static const RRDVAR_ACQUIRED *rrdvar_max = NULL; unsigned long long aentries = 0, asearched = 0, afound = 0, anew = 0, ainvalid = 0, aignore = 0, adelete = 0, adelete_list = 0, @@ -217,7 +217,6 @@ int do_proc_net_stat_conntrack(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_inserted = rrddim_add(st, "inserted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -253,7 +252,6 @@ int do_proc_net_stat_conntrack(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_created = rrddim_add(st, "created", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -289,7 +287,6 @@ int do_proc_net_stat_conntrack(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_searched = rrddim_add(st, "searched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_restarted = rrddim_add(st, "restarted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -326,7 +323,6 @@ int do_proc_net_stat_conntrack(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_icmp_error = rrddim_add(st, "icmp_error", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_insert_failed = rrddim_add(st, "insert_failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); diff --git a/src/collectors/proc.plugin/proc_net_wireless.c b/src/collectors/proc.plugin/proc_net_wireless.c index c7efa3335..f5556a942 100644 --- a/src/collectors/proc.plugin/proc_net_wireless.c +++ b/src/collectors/proc.plugin/proc_net_wireless.c @@ -208,7 +208,7 @@ int do_proc_net_wireless(int update_every, usec_t dt) UNUSED(dt); static procfile *ff = NULL; static int do_status, do_quality = -1, do_discarded_packets, do_beacon; - static char *proc_net_wireless_filename = NULL; + static const char *proc_net_wireless_filename = NULL; if (unlikely(do_quality == -1)) { char filename[FILENAME_MAX + 1]; @@ -264,8 +264,6 @@ int do_proc_net_wireless(int update_every, usec_t dt) update_every, RRDSET_TYPE_LINE); - rrdset_flag_set(wireless_dev->st_status, RRDSET_FLAG_DETAIL); - wireless_dev->rd_status = rrddim_add(wireless_dev->st_status, "status", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); add_labels_to_wireless(wireless_dev, wireless_dev->st_status); @@ -295,7 +293,6 @@ int do_proc_net_wireless(int update_every, usec_t dt) NETDATA_CHART_PRIO_WIRELESS_IFACE + 1, update_every, RRDSET_TYPE_LINE); - rrdset_flag_set(wireless_dev->st_link, RRDSET_FLAG_DETAIL); wireless_dev->rd_link = rrddim_add(wireless_dev->st_link, "link_quality", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); @@ -316,7 +313,6 @@ int do_proc_net_wireless(int update_every, usec_t dt) NETDATA_CHART_PRIO_WIRELESS_IFACE + 2, update_every, RRDSET_TYPE_LINE); - rrdset_flag_set(wireless_dev->st_level, RRDSET_FLAG_DETAIL); wireless_dev->rd_level = rrddim_add(wireless_dev->st_level, "signal_level", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); @@ -337,7 +333,6 @@ int do_proc_net_wireless(int update_every, usec_t dt) NETDATA_CHART_PRIO_WIRELESS_IFACE + 3, update_every, RRDSET_TYPE_LINE); - rrdset_flag_set(wireless_dev->st_noise, RRDSET_FLAG_DETAIL); wireless_dev->rd_noise = rrddim_add(wireless_dev->st_noise, "noise_level", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); @@ -376,8 +371,6 @@ int do_proc_net_wireless(int update_every, usec_t dt) update_every, RRDSET_TYPE_LINE); - rrdset_flag_set(wireless_dev->st_discarded_packets, RRDSET_FLAG_DETAIL); - wireless_dev->rd_nwid = rrddim_add(wireless_dev->st_discarded_packets, "nwid", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); wireless_dev->rd_crypt = rrddim_add(wireless_dev->st_discarded_packets, "crypt", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); wireless_dev->rd_frag = rrddim_add(wireless_dev->st_discarded_packets, "frag", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -414,8 +407,6 @@ int do_proc_net_wireless(int update_every, usec_t dt) update_every, RRDSET_TYPE_LINE); - rrdset_flag_set(wireless_dev->st_missed_beacon, RRDSET_FLAG_DETAIL); - wireless_dev->rd_missed_beacon = rrddim_add(wireless_dev->st_missed_beacon, "missed_beacons", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); add_labels_to_wireless(wireless_dev, wireless_dev->st_missed_beacon); diff --git a/src/collectors/proc.plugin/proc_pressure.c b/src/collectors/proc.plugin/proc_pressure.c index 4037e60ac..c4d4bc2b1 100644 --- a/src/collectors/proc.plugin/proc_pressure.c +++ b/src/collectors/proc.plugin/proc_pressure.c @@ -158,7 +158,7 @@ int do_proc_pressure(int update_every, usec_t dt) { int i; static usec_t next_pressure_dt = 0; - static char *base_path = NULL; + static const char *base_path = NULL; update_every = (update_every < MIN_PRESSURE_UPDATE_EVERY) ? MIN_PRESSURE_UPDATE_EVERY : update_every; pressure_update_every = update_every; @@ -170,9 +170,8 @@ int do_proc_pressure(int update_every, usec_t dt) { return 0; } - if (unlikely(!base_path)) { + if (unlikely(!base_path)) base_path = config_get(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, "base path of pressure metrics", "/proc/pressure"); - } for (i = 0; i < PRESSURE_NUM_RESOURCES; i++) { procfile *ff = resource_info[i].pf; diff --git a/src/collectors/proc.plugin/proc_spl_kstat_zfs.c b/src/collectors/proc.plugin/proc_spl_kstat_zfs.c index 5a0f90951..9286479c9 100644 --- a/src/collectors/proc.plugin/proc_spl_kstat_zfs.c +++ b/src/collectors/proc.plugin/proc_spl_kstat_zfs.c @@ -18,7 +18,7 @@ int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) { static int do_zfs_stats = 0; static procfile *ff = NULL; - static char *dirname = NULL; + static const char *dirname = NULL; static ARL_BASE *arl_base = NULL; arcstats.l2exist = -1; diff --git a/src/collectors/proc.plugin/proc_stat.c b/src/collectors/proc.plugin/proc_stat.c index c211ceee5..b36f6b867 100644 --- a/src/collectors/proc.plugin/proc_stat.c +++ b/src/collectors/proc.plugin/proc_stat.c @@ -293,7 +293,7 @@ static void* wake_cpu_thread(void* core) { return 0; } -static int read_schedstat(char *schedstat_filename, struct per_core_cpuidle_chart **cpuidle_charts_address, size_t *schedstat_cores_found) { +static int read_schedstat(const char *schedstat_filename, struct per_core_cpuidle_chart **cpuidle_charts_address, size_t *schedstat_cores_found) { static size_t cpuidle_charts_len = 0; static procfile *ff = NULL; struct per_core_cpuidle_chart *cpuidle_charts = *cpuidle_charts_address; @@ -373,7 +373,7 @@ static int read_one_state(char *buf, const char *filename, int *fd) { return 1; } -static int read_cpuidle_states(char *cpuidle_name_filename , char *cpuidle_time_filename, struct per_core_cpuidle_chart *cpuidle_charts, size_t core) { +static int read_cpuidle_states(const char *cpuidle_name_filename, const char *cpuidle_time_filename, struct per_core_cpuidle_chart *cpuidle_charts, size_t core) { char filename[FILENAME_MAX + 1]; static char next_state_filename[FILENAME_MAX + 1]; struct stat stbuf; @@ -484,7 +484,7 @@ int do_proc_stat(int update_every, usec_t dt) { static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1, do_core_throttle_count = -1, do_package_throttle_count = -1, do_cpu_freq = -1, do_cpuidle = -1; static uint32_t hash_intr, hash_ctxt, hash_processes, hash_procs_running, hash_procs_blocked; - static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL, + static const char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL, *time_in_state_filename = NULL, *schedstat_filename = NULL, *cpuidle_name_filename = NULL, *cpuidle_time_filename = NULL; static const RRDVAR_ACQUIRED *cpus_var = NULL; static int accurate_freq_avail = 0, accurate_freq_is_used = 0; @@ -794,7 +794,6 @@ int do_proc_stat(int update_every, usec_t dt) { , update_every , RRDSET_TYPE_LINE ); - rrdset_flag_set(st_forks, RRDSET_FLAG_DETAIL); rd_started = rrddim_add(st_forks, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); } diff --git a/src/collectors/proc.plugin/proc_uptime.c b/src/collectors/proc.plugin/proc_uptime.c index ddab7269b..7471171ed 100644 --- a/src/collectors/proc.plugin/proc_uptime.c +++ b/src/collectors/proc.plugin/proc_uptime.c @@ -5,7 +5,7 @@ int do_proc_uptime(int update_every, usec_t dt) { (void)dt; - static char *uptime_filename = NULL; + static const char *uptime_filename = NULL; if(!uptime_filename) { char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/uptime"); diff --git a/src/collectors/proc.plugin/proc_vmstat.c b/src/collectors/proc.plugin/proc_vmstat.c index 050086689..e38e9b678 100644 --- a/src/collectors/proc.plugin/proc_vmstat.c +++ b/src/collectors/proc.plugin/proc_vmstat.c @@ -355,8 +355,6 @@ int do_proc_vmstat(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(st_oom_kill, RRDSET_FLAG_DETAIL); - rd_oom_kill = rrddim_add(st_oom_kill, "kills", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); } @@ -397,8 +395,6 @@ int do_proc_vmstat(int update_every, usec_t dt) { , RRDSET_TYPE_LINE ); - rrdset_flag_set(st_numa, RRDSET_FLAG_DETAIL); - // These depend on CONFIG_NUMA in the kernel. rd_local = rrddim_add(st_numa, "local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); rd_foreign = rrddim_add(st_numa, "foreign", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); diff --git a/src/collectors/proc.plugin/sys_class_drm.c b/src/collectors/proc.plugin/sys_class_drm.c index ab4d98a72..0622274a0 100644 --- a/src/collectors/proc.plugin/sys_class_drm.c +++ b/src/collectors/proc.plugin/sys_class_drm.c @@ -837,7 +837,7 @@ int do_sys_class_drm(int update_every, usec_t dt) { if(unlikely(!drm_dir)) { char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/drm"); - char *drm_dir_name = config_get(CONFIG_SECTION_PLUGIN_PROC_DRM, "directory to monitor", filename); + const char *drm_dir_name = config_get(CONFIG_SECTION_PLUGIN_PROC_DRM, "directory to monitor", filename); if(unlikely(NULL == (drm_dir = opendir(drm_dir_name)))){ collector_error("Cannot read directory '%s'", drm_dir_name); return 1; diff --git a/src/collectors/proc.plugin/sys_class_infiniband.c b/src/collectors/proc.plugin/sys_class_infiniband.c index ff1652ddf..34a126c5e 100644 --- a/src/collectors/proc.plugin/sys_class_infiniband.c +++ b/src/collectors/proc.plugin/sys_class_infiniband.c @@ -302,7 +302,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt) static int initialized = 0; static int enable_new_ports = -1, enable_only_active = CONFIG_BOOLEAN_YES; static int do_bytes = -1, do_packets = -1, do_errors = -1, do_hwpackets = -1, do_hwerrors = -1; - static char *sys_class_infiniband_dirname = NULL; + static const char *sys_class_infiniband_dirname = NULL; static long long int dt_to_refresh_ports = 0, last_refresh_ports_usec = 0; @@ -332,7 +332,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt) SIMPLE_PATTERN_EXACT, true); dt_to_refresh_ports = - config_get_number(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "refresh ports state every seconds", 30) * + config_get_duration_seconds(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "refresh ports state every", 30) * USEC_PER_SEC; if (dt_to_refresh_ports < 0) dt_to_refresh_ports = 0; @@ -538,8 +538,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt) port->priority + 1, update_every, RRDSET_TYPE_AREA); - // Create Dimensions - rrdset_flag_set(port->st_bytes, RRDSET_FLAG_DETAIL); + // On this chart, we want to have a KB/s so the dashboard will autoscale it // The reported values are also per-lane, so we must multiply it by the width // x4 lanes multiplier as per Documentation/ABI/stable/sysfs-class-infiniband @@ -576,8 +575,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt) port->priority + 2, update_every, RRDSET_TYPE_AREA); - // Create Dimensions - rrdset_flag_set(port->st_packets, RRDSET_FLAG_DETAIL); + FOREACH_COUNTER_PACKETS(GEN_RRD_DIM_ADD, port) } @@ -605,8 +603,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt) port->priority + 3, update_every, RRDSET_TYPE_LINE); - // Create Dimensions - rrdset_flag_set(port->st_errors, RRDSET_FLAG_DETAIL); + FOREACH_COUNTER_ERRORS(GEN_RRD_DIM_ADD, port) } @@ -641,8 +638,6 @@ int do_sys_class_infiniband(int update_every, usec_t dt) update_every, RRDSET_TYPE_LINE); - rrdset_flag_set(port->st_hwerrors, RRDSET_FLAG_DETAIL); - // VENDORS: Set your selection // VENDOR: Mellanox @@ -677,8 +672,6 @@ int do_sys_class_infiniband(int update_every, usec_t dt) update_every, RRDSET_TYPE_LINE); - rrdset_flag_set(port->st_hwpackets, RRDSET_FLAG_DETAIL); - // VENDORS: Set your selection // VENDOR: Mellanox diff --git a/src/collectors/proc.plugin/sys_class_power_supply.c b/src/collectors/proc.plugin/sys_class_power_supply.c index c6be72679..7e4dda777 100644 --- a/src/collectors/proc.plugin/sys_class_power_supply.c +++ b/src/collectors/proc.plugin/sys_class_power_supply.c @@ -199,7 +199,7 @@ int do_sys_class_power_supply(int update_every, usec_t dt) { (void)dt; static int do_capacity = -1, do_power = -1, do_property[3] = {-1}; static int keep_fds_open = CONFIG_BOOLEAN_NO, keep_fds_open_config = -1; - static char *dirname = NULL; + static const char *dirname = NULL; if(unlikely(do_capacity == -1)) { do_capacity = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery capacity", CONFIG_BOOLEAN_YES); diff --git a/src/collectors/proc.plugin/sys_devices_pci_aer.c b/src/collectors/proc.plugin/sys_devices_pci_aer.c index 563ebf051..c39795ea1 100644 --- a/src/collectors/proc.plugin/sys_devices_pci_aer.c +++ b/src/collectors/proc.plugin/sys_devices_pci_aer.c @@ -2,7 +2,7 @@ #include "plugin_proc.h" -static char *pci_aer_dirname = NULL; +static const char *pci_aer_dirname = NULL; typedef enum __attribute__((packed)) { AER_DEV_NONFATAL = (1 << 0), diff --git a/src/collectors/proc.plugin/sys_devices_system_edac_mc.c b/src/collectors/proc.plugin/sys_devices_system_edac_mc.c index d3db8c044..93ee235cf 100644 --- a/src/collectors/proc.plugin/sys_devices_system_edac_mc.c +++ b/src/collectors/proc.plugin/sys_devices_system_edac_mc.c @@ -37,7 +37,7 @@ struct mc { }; static struct mc *mc_root = NULL; -static char *mc_dirname = NULL; +static const char *mc_dirname = NULL; static void find_all_mc() { char name[FILENAME_MAX + 1]; diff --git a/src/collectors/proc.plugin/sys_devices_system_node.c b/src/collectors/proc.plugin/sys_devices_system_node.c index 12f31a04e..9bc3703fa 100644 --- a/src/collectors/proc.plugin/sys_devices_system_node.c +++ b/src/collectors/proc.plugin/sys_devices_system_node.c @@ -4,22 +4,35 @@ struct node { char *name; - char *numastat_filename; - procfile *numastat_ff; - RRDSET *numastat_st; + + struct { + char *filename; + procfile *ff; + RRDSET *st; + } numastat; + + struct { + char *filename; + procfile *ff; + RRDSET *st_mem_usage; + RRDSET *st_mem_activity; + } meminfo; + struct node *next; }; static struct node *numa_root = NULL; +static int numa_node_count = 0; static int find_all_nodes() { int numa_node_count = 0; char name[FILENAME_MAX + 1]; snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node"); - char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name); + const char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name); DIR *dir = opendir(dirname); if(!dir) { - collector_error("Cannot read NUMA node directory '%s'", dirname); + nd_log( + NDLS_COLLECTORS, errno == ENOENT ? NDLP_INFO : NDLP_ERR, "Cannot read NUMA node directory '%s'", dirname); return 0; } @@ -47,8 +60,16 @@ static int find_all_nodes() { freez(m); continue; } + m->numastat.filename = strdupz(name); - m->numastat_filename = strdupz(name); + snprintfz(name, FILENAME_MAX, "%s/%s/meminfo", dirname, de->d_name); + if(stat(name, &st) == -1) { + freez(m->numastat.filename); + freez(m->name); + freez(m); + continue; + } + m->meminfo.filename = strdupz(name); m->next = numa_root; numa_root = m; @@ -59,22 +80,12 @@ static int find_all_nodes() { return numa_node_count; } -int do_proc_sys_devices_system_node(int update_every, usec_t dt) { - (void)dt; - +static void do_muma_numastat(struct node *m, int update_every) { static uint32_t hash_local_node = 0, hash_numa_foreign = 0, hash_interleave_hit = 0, hash_other_node = 0, hash_numa_hit = 0, hash_numa_miss = 0; - static int do_numastat = -1, numa_node_count = 0; - struct node *m; - - if(unlikely(numa_root == NULL)) { - numa_node_count = find_all_nodes(); - if(unlikely(numa_root == NULL)) - return 1; - } - - if(unlikely(do_numastat == -1)) { - do_numastat = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/node", "enable per-node numa metrics", CONFIG_BOOLEAN_AUTO); + static bool initialized = false; + if(unlikely(!initialized)) { + initialized = true; hash_local_node = simple_hash("local_node"); hash_numa_foreign = simple_hash("numa_foreign"); hash_interleave_hit = simple_hash("interleave_hit"); @@ -83,82 +94,205 @@ int do_proc_sys_devices_system_node(int update_every, usec_t dt) { hash_numa_miss = simple_hash("numa_miss"); } - if (do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO && numa_node_count >= 2)) { - for(m = numa_root; m; m = m->next) { - if(m->numastat_filename) { - - if(unlikely(!m->numastat_ff)) { - m->numastat_ff = procfile_open(m->numastat_filename, " ", PROCFILE_FLAG_DEFAULT); - - if(unlikely(!m->numastat_ff)) - continue; - } - - m->numastat_ff = procfile_readall(m->numastat_ff); - if(unlikely(!m->numastat_ff || procfile_lines(m->numastat_ff) < 1 || procfile_linewords(m->numastat_ff, 0) < 1)) - continue; - - if(unlikely(!m->numastat_st)) { - m->numastat_st = rrdset_create_localhost( - "mem" - , m->name - , NULL - , "numa" - , "mem.numa_nodes" - , "NUMA events" - , "events/s" - , PLUGIN_PROC_NAME - , "/sys/devices/system/node" - , NETDATA_CHART_PRIO_MEM_NUMA_NODES - , update_every - , RRDSET_TYPE_LINE - ); - - rrdlabels_add(m->numastat_st->rrdlabels, "numa_node", m->name, RRDLABEL_SRC_AUTO); - - rrdset_flag_set(m->numastat_st, RRDSET_FLAG_DETAIL); - - rrddim_add(m->numastat_st, "numa_hit", "hit", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(m->numastat_st, "numa_miss", "miss", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(m->numastat_st, "local_node", "local", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(m->numastat_st, "numa_foreign", "foreign", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(m->numastat_st, "interleave_hit", "interleave", 1, 1, RRD_ALGORITHM_INCREMENTAL); - rrddim_add(m->numastat_st, "other_node", "other", 1, 1, RRD_ALGORITHM_INCREMENTAL); - - } - - size_t lines = procfile_lines(m->numastat_ff), l; - for(l = 0; l < lines; l++) { - size_t words = procfile_linewords(m->numastat_ff, l); - - if(unlikely(words < 2)) { - if(unlikely(words)) - collector_error("Cannot read %s numastat line %zu. Expected 2 params, read %zu.", m->name, l, words); - continue; - } - - char *name = procfile_lineword(m->numastat_ff, l, 0); - char *value = procfile_lineword(m->numastat_ff, l, 1); - - if (unlikely(!name || !*name || !value || !*value)) - continue; - - uint32_t hash = simple_hash(name); - if(likely( - (hash == hash_numa_hit && !strcmp(name, "numa_hit")) - || (hash == hash_numa_miss && !strcmp(name, "numa_miss")) - || (hash == hash_local_node && !strcmp(name, "local_node")) - || (hash == hash_numa_foreign && !strcmp(name, "numa_foreign")) - || (hash == hash_interleave_hit && !strcmp(name, "interleave_hit")) - || (hash == hash_other_node && !strcmp(name, "other_node")) - )) - rrddim_set(m->numastat_st, name, (collected_number)str2kernel_uint_t(value)); - } - - rrdset_done(m->numastat_st); + if (m->numastat.filename) { + if(unlikely(!m->numastat.ff)) { + m->numastat.ff = procfile_open(m->numastat.filename, " ", PROCFILE_FLAG_DEFAULT); + + if(unlikely(!m->numastat.ff)) + return; + } + + m->numastat.ff = procfile_readall(m->numastat.ff); + if(unlikely(!m->numastat.ff || procfile_lines(m->numastat.ff) < 1 || procfile_linewords(m->numastat.ff, 0) < 1)) + return; + + if(unlikely(!m->numastat.st)) { + m->numastat.st = rrdset_create_localhost( + "numa_node_stat" + , m->name + , NULL + , "numa" + , "mem.numa_node_stat" + , "NUMA Node Memory Allocation Events" + , "events/s" + , PLUGIN_PROC_NAME + , "/sys/devices/system/node" + , NETDATA_CHART_PRIO_MEM_NUMA_NODES_NUMASTAT + , update_every + , RRDSET_TYPE_LINE + ); + + rrdlabels_add(m->numastat.st->rrdlabels, "numa_node", m->name, RRDLABEL_SRC_AUTO); + + rrddim_add(m->numastat.st, "numa_hit", "hit", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(m->numastat.st, "numa_miss", "miss", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(m->numastat.st, "local_node", "local", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(m->numastat.st, "numa_foreign", "foreign", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(m->numastat.st, "interleave_hit", "interleave", 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrddim_add(m->numastat.st, "other_node", "other", 1, 1, RRD_ALGORITHM_INCREMENTAL); + + } + + size_t lines = procfile_lines(m->numastat.ff), l; + for(l = 0; l < lines; l++) { + size_t words = procfile_linewords(m->numastat.ff, l); + + if(unlikely(words < 2)) { + if(unlikely(words)) + collector_error("Cannot read %s line %zu. Expected 2 params, read %zu.", m->numastat.filename, l, words); + continue; + } + + char *name = procfile_lineword(m->numastat.ff, l, 0); + char *value = procfile_lineword(m->numastat.ff, l, 1); + + if (unlikely(!name || !*name || !value || !*value)) + continue; + + uint32_t hash = simple_hash(name); + + if ((hash == hash_numa_hit && !strcmp(name, "numa_hit")) || + (hash == hash_numa_miss && !strcmp(name, "numa_miss")) || + (hash == hash_local_node && !strcmp(name, "local_node")) || + (hash == hash_numa_foreign && !strcmp(name, "numa_foreign")) || + (hash == hash_interleave_hit && !strcmp(name, "interleave_hit")) || + (hash == hash_other_node && !strcmp(name, "other_node"))) { + rrddim_set(m->numastat.st, name, (collected_number)str2kernel_uint_t(value)); + } + } + + rrdset_done(m->numastat.st); + } +} + +static void do_numa_meminfo(struct node *m, int update_every) { + static uint32_t hash_MemFree = 0, hash_MemUsed = 0, hash_ActiveAnon = 0, hash_InactiveAnon = 0, hash_ActiveFile = 0, + hash_InactiveFile = 0; + static bool initialized = false; + + if (unlikely(!initialized)) { + initialized = true; + hash_MemFree = simple_hash("MemFree"); + hash_MemUsed = simple_hash("MemUsed"); + hash_ActiveAnon = simple_hash("Active(anon)"); + hash_InactiveAnon = simple_hash("Inactive(anon)"); + hash_ActiveFile = simple_hash("Active(file)"); + hash_InactiveFile = simple_hash("Inactive(file)"); + } + + if (m->meminfo.filename) { + if (unlikely(!m->meminfo.ff)) { + m->meminfo.ff = procfile_open(m->meminfo.filename, " :", PROCFILE_FLAG_DEFAULT); + if (unlikely(!m->meminfo.ff)) + return; + } + + m->meminfo.ff = procfile_readall(m->meminfo.ff); + if (unlikely(!m->meminfo.ff || procfile_lines(m->meminfo.ff) < 1 || procfile_linewords(m->meminfo.ff, 0) < 1)) + return; + + if (unlikely(!m->meminfo.st_mem_usage)) { + m->meminfo.st_mem_usage = rrdset_create_localhost( + "numa_node_mem_usage", + m->name, + NULL, + "numa", + "mem.numa_node_mem_usage", + "NUMA Node Memory Usage", + "bytes", + PLUGIN_PROC_NAME, + "/sys/devices/system/node", + NETDATA_CHART_PRIO_MEM_NUMA_NODES_MEMINFO, + update_every, + RRDSET_TYPE_STACKED); + + rrdlabels_add(m->meminfo.st_mem_usage->rrdlabels, "numa_node", m->name, RRDLABEL_SRC_AUTO); + + rrddim_add(m->meminfo.st_mem_usage, "MemFree", "free", 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(m->meminfo.st_mem_usage, "MemUsed", "used", 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + if (unlikely(!m->meminfo.st_mem_activity)) { + m->meminfo.st_mem_activity = rrdset_create_localhost( + "numa_node_mem_activity", + m->name, + NULL, + "numa", + "mem.numa_node_mem_activity", + "NUMA Node Memory Activity", + "bytes", + PLUGIN_PROC_NAME, + "/sys/devices/system/node", + NETDATA_CHART_PRIO_MEM_NUMA_NODES_ACTIVITY, + update_every, + RRDSET_TYPE_STACKED); + + rrdlabels_add(m->meminfo.st_mem_activity->rrdlabels, "numa_node", m->name, RRDLABEL_SRC_AUTO); + + rrddim_add(m->meminfo.st_mem_activity, "Active(anon)", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(m->meminfo.st_mem_activity, "Inactive(anon)", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(m->meminfo.st_mem_activity, "Active(file)", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_add(m->meminfo.st_mem_activity, "Inactive(file)", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + size_t lines = procfile_lines(m->meminfo.ff), l; + for (l = 0; l < lines; l++) { + size_t words = procfile_linewords(m->meminfo.ff, l); + + if (unlikely(words < 4)) { + if (words) + collector_error( + "Cannot read %s line %zu. Expected 4 params, read %zu.", m->meminfo.filename, l, words); + continue; } + + char *name = procfile_lineword(m->meminfo.ff, l, 2); + char *value = procfile_lineword(m->meminfo.ff, l, 3); + + if (unlikely(!name || !*name || !value || !*value)) + continue; + + uint32_t hash = simple_hash(name); + + if ((hash == hash_MemFree && !strcmp(name, "MemFree")) || + (hash == hash_MemUsed && !strcmp(name, "MemUsed"))) { + rrddim_set(m->meminfo.st_mem_usage, name, (collected_number)str2kernel_uint_t(value) * 1024); + } else if ( + (hash == hash_ActiveAnon && !strcmp(name, "Active(anon)")) || + (hash == hash_InactiveAnon && !strcmp(name, "Inactive(anon)")) || + (hash == hash_ActiveFile && !strcmp(name, "Active(file)")) || + (hash == hash_InactiveFile && !strcmp(name, "Inactive(file)"))) { + rrddim_set(m->meminfo.st_mem_activity, name, (collected_number)str2kernel_uint_t(value) * 1024); + } + } + rrdset_done(m->meminfo.st_mem_usage); + rrdset_done(m->meminfo.st_mem_activity); + } +} + +int do_proc_sys_devices_system_node(int update_every, usec_t dt) { + (void)dt; + struct node *m; + + static int do_numastat = -1; + + if(unlikely(do_numastat == -1)) { + do_numastat = config_get_boolean_ondemand( + "plugin:proc:/sys/devices/system/node", "enable per-node numa metrics", CONFIG_BOOLEAN_AUTO); + } + + if(unlikely(numa_root == NULL)) { + numa_node_count = find_all_nodes(); + if(unlikely(numa_root == NULL)) + return 1; + } + + if (do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO && numa_node_count >= 2)) { + for (m = numa_root; m; m = m->next) { + do_muma_numastat(m, update_every); + do_numa_meminfo(m, update_every); } + return 0; } - return 0; + return 1; } diff --git a/src/collectors/proc.plugin/sys_fs_btrfs.c b/src/collectors/proc.plugin/sys_fs_btrfs.c index bf9b002bc..f51461146 100644 --- a/src/collectors/proc.plugin/sys_fs_btrfs.c +++ b/src/collectors/proc.plugin/sys_fs_btrfs.c @@ -270,8 +270,8 @@ static inline int find_btrfs_disks(BTRFS_NODE *node, const char *path) { DIR *dir = opendir(path); if (!dir) { - if(!node->logged_error) { - collector_error("BTRFS: Cannot open directory '%s'.", path); + if (!node->logged_error) { + nd_log(NDLS_COLLECTORS, errno == ENOENT ? NDLP_INFO : NDLP_ERR, "BTRFS: Cannot open directory '%s'.", path); node->logged_error = 1; } return 1; @@ -374,8 +374,8 @@ static inline int find_btrfs_devices(BTRFS_NODE *node, const char *path) { DIR *dir = opendir(path); if (!dir) { - if(!node->logged_error) { - collector_error("BTRFS: Cannot open directory '%s'.", path); + if (!node->logged_error) { + nd_log(NDLS_COLLECTORS, errno == ENOENT ? NDLP_INFO : NDLP_ERR, "BTRFS: Cannot open directory '%s'.", path); node->logged_error = 1; } return 1; @@ -474,8 +474,8 @@ static inline int find_all_btrfs_pools(const char *path, int update_every) { DIR *dir = opendir(path); if (!dir) { - if(!logged_error) { - collector_error("BTRFS: Cannot open directory '%s'.", path); + if (!logged_error) { + nd_log(NDLS_COLLECTORS, errno == ENOENT ? NDLP_INFO : NDLP_ERR, "BTRFS: Cannot open directory '%s'.", path); logged_error = 1; } return 1; @@ -678,7 +678,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { , do_error_stats = CONFIG_BOOLEAN_AUTO; static usec_t refresh_delta = 0, refresh_every = 60 * USEC_PER_SEC; - static char *btrfs_path = NULL; + static const char *btrfs_path = NULL; (void)dt; @@ -689,7 +689,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/fs/btrfs"); btrfs_path = config_get("plugin:proc:/sys/fs/btrfs", "path to monitor", filename); - refresh_every = config_get_number("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC; + refresh_every = config_get_duration_seconds("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC; refresh_delta = refresh_every; do_allocation_disks = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "physical disks allocation", do_allocation_disks); diff --git a/src/collectors/profile.plugin/README.md b/src/collectors/profile.plugin/README.md index 7e3681208..992e6de99 100644 --- a/src/collectors/profile.plugin/README.md +++ b/src/collectors/profile.plugin/README.md @@ -4,25 +4,25 @@ This plugin allows someone to backfill an agent with random data. A user can specify: - - The number charts they want, - - the number of dimensions per chart, - - the desire update every collection frequency, - - the number of seconds to backfill. - - the number of collection threads. +- The number charts they want, +- the number of dimensions per chart, +- the desire update every collection frequency, +- the number of seconds to backfill. +- the number of collection threads. ## Configuration -Edit the `netdata.conf` configuration file using [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) from the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory), which is typically at `/etc/netdata`. +Edit the `netdata.conf` configuration file using [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) from the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory), which is typically at `/etc/netdata`. Scroll down to the `[plugin:profile]` section to find the available options: -``` +```text [plugin:profile] - update every = 5 - number of charts = 200 - number of dimensions per chart = 5 - seconds to backfill = 86400 - number of threads = 16 + update every = 5 + number of charts = 200 + number of dimensions per chart = 5 + seconds to backfill = 86400 + number of threads = 16 ``` The `number of threads` option will create the specified number of collection diff --git a/src/collectors/profile.plugin/plugin_profile.cc b/src/collectors/profile.plugin/plugin_profile.cc index 390bca29e..14de55db1 100644 --- a/src/collectors/profile.plugin/plugin_profile.cc +++ b/src/collectors/profile.plugin/plugin_profile.cc @@ -117,7 +117,7 @@ public: worker_register_job_custom_metric(WORKER_JOB_METRIC_POINTS_BACKFILLED, "points backfilled", "points", WORKER_METRIC_ABSOLUTE); heartbeat_t HB; - heartbeat_init(&HB); + heartbeat_init(&HB, UpdateEvery * USEC_PER_SEC); worker_is_busy(WORKER_JOB_CREATE_CHARTS); create(); @@ -157,7 +157,7 @@ public: if (CollectionTV.tv_sec >= NowTV.tv_sec) { worker_is_idle(); - heartbeat_next(&HB, UpdateEvery * USEC_PER_SEC); + heartbeat_next(&HB); } } } @@ -194,9 +194,11 @@ static void profile_main_cleanup(void *pptr) { extern "C" void *profile_main(void *ptr) { CLEANUP_FUNCTION_REGISTER(profile_main_cleanup) cleanup_ptr = ptr; - int UpdateEvery = (int) config_get_number(CONFIG_SECTION_PROFILE, "update every", 1); - if (UpdateEvery < localhost->rrd_update_every) + int UpdateEvery = (int) config_get_duration_seconds(CONFIG_SECTION_PROFILE, "update every", 1); + if (UpdateEvery < localhost->rrd_update_every) { UpdateEvery = localhost->rrd_update_every; + config_set_duration_seconds(CONFIG_SECTION_PROFILE, "update every", UpdateEvery); + } // pick low-default values, in case this plugin is ever enabled accidentaly. size_t NumThreads = config_get_number(CONFIG_SECTION_PROFILE, "number of threads", 2); diff --git a/src/collectors/python.d.plugin/README.md b/src/collectors/python.d.plugin/README.md index 299cebc03..f8d4184e8 100644 --- a/src/collectors/python.d.plugin/README.md +++ b/src/collectors/python.d.plugin/README.md @@ -1,22 +1,13 @@ - - # python.d.plugin `python.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `python`. -1. It runs as an independent process `ps fax` shows it -2. It is started and stopped automatically by Netdata -3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon) -4. Supports any number of data collection **modules** -5. Allows each **module** to have one or more data collection **jobs** -6. Each **job** is collecting one or more metrics from a single data source +1. It runs as an independent process `ps fax` shows it +2. It is started and stopped automatically by Netdata +3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon) +4. Supports any number of data collection **modules** +5. Allows each **module** to have one or more data collection **jobs** +6. Each **job** is collecting one or more metrics from a single data source ## Disclaimer @@ -25,7 +16,7 @@ Module configurations are written in YAML and **pyYAML is required**. Every configuration file must have one of two formats: -- Configuration for only one job: +- Configuration for only one job: ```yaml update_every : 2 # update frequency @@ -35,7 +26,7 @@ other_var1 : bla # variables passed to module other_var2 : alb ``` -- Configuration for many jobs (ex. mysql): +- Configuration for many jobs (ex. mysql): ```yaml # module defaults: @@ -55,23 +46,19 @@ other_job: ## How to debug a python module -``` +```bash # become user netdata sudo su -s /bin/bash netdata ``` Depending on where Netdata was installed, execute one of the following commands to trace the execution of a python module: -``` +```bash # execute the plugin in debug mode, for a specific module /opt/netdata/usr/libexec/netdata/plugins.d/python.d.plugin debug trace /usr/libexec/netdata/plugins.d/python.d.plugin debug trace ``` -Where `[module]` is the directory name under +Where `[module]` is the directory name under **Note**: If you would like execute a collector in debug mode while it is still running by Netdata, you can pass the `nolock` CLI option to the above commands. - -## How to write a new module - -See [develop a custom collector in Python](https://github.com/netdata/netdata/edit/master/docs/developer-and-contributor-corner/python-collector.md). diff --git a/src/collectors/python.d.plugin/am2320/integrations/am2320.md b/src/collectors/python.d.plugin/am2320/integrations/am2320.md index ea0e505c2..9aaa1153e 100644 --- a/src/collectors/python.d.plugin/am2320/integrations/am2320.md +++ b/src/collectors/python.d.plugin/am2320/integrations/am2320.md @@ -106,8 +106,8 @@ Install the Adafruit Circuit Python AM2320 library: The configuration file name for this integration is `python.d/am2320.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/python.d.plugin/anomalies/README.md b/src/collectors/python.d.plugin/anomalies/README.md deleted file mode 100644 index 1d7f8ba1b..000000000 --- a/src/collectors/python.d.plugin/anomalies/README.md +++ /dev/null @@ -1,248 +0,0 @@ - - -# Anomaly detection with Netdata - -**Note**: Check out the [Netdata Anomaly Advisor](/docs/dashboards-and-charts/anomaly-advisor-tab.md) for a more native anomaly detection experience within Netdata. - -This collector uses the Python [PyOD](https://pyod.readthedocs.io/en/latest/index.html) library to perform unsupervised [anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) on your Netdata charts and/or dimensions. - -Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return an anomaly probability and anomaly flag for each chart or custom model you define. This computation consists of a **train** function that runs every `train_n_secs` to train the ML models to learn what 'normal' typically looks like on your node. At each iteration there is also a **predict** function that uses the latest trained models and most recent metrics to produce an anomaly probability and anomaly flag for each chart or custom model you define. - -> As this is a somewhat unique collector and involves often subjective concepts like anomalies and anomaly probabilities, we would love to hear any feedback on it from the community. Please let us know on the [community forum](https://community.netdata.cloud/t/anomalies-collector-feedback-megathread/767) or drop us a note at [analytics-ml-team@netdata.cloud](mailto:analytics-ml-team@netdata.cloud) for any and all feedback, both positive and negative. This sort of feedback is priceless to help us make complex features more useful. - -## Charts - -Two charts are produced: - -- **Anomaly Probability** (`anomalies.probability`): This chart shows the probability that the latest observed data is anomalous based on the trained model for that chart (using the [`predict_proba()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict_proba) method of the trained PyOD model). -- **Anomaly** (`anomalies.anomaly`): This chart shows `1` or `0` predictions of if the latest observed data is considered anomalous or not based on the trained model (using the [`predict()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict) method of the trained PyOD model). - -Below is an example of the charts produced by this collector and how they might look when things are 'normal' on the node. The anomaly probabilities tend to bounce randomly around a typically low probability range, one or two might randomly jump or drift outside of this range every now and then and show up as anomalies on the anomaly chart. - -![netdata-anomalies-collector-normal](https://user-images.githubusercontent.com/2178292/100663699-99755000-334e-11eb-922f-0c41a0176484.jpg) - -If we then go onto the system and run a command like `stress-ng --all 2` to create some [stress](https://wiki.ubuntu.com/Kernel/Reference/stress-ng), we see some charts begin to have anomaly probabilities that jump outside the typical range. When the anomaly probabilities change enough, we will start seeing anomalies being flagged on the `anomalies.anomaly` chart. The idea is that these charts are the most anomalous right now so could be a good place to start your troubleshooting. - -![netdata-anomalies-collector-abnormal](https://user-images.githubusercontent.com/2178292/100663710-9bd7aa00-334e-11eb-9d14-76fda73bc309.jpg) - -Then, as the issue passes, the anomaly probabilities should settle back down into their 'normal' range again. - -![netdata-anomalies-collector-normal-again](https://user-images.githubusercontent.com/2178292/100666681-481a9000-3351-11eb-9979-64728ee2dfb6.jpg) - -## Requirements - -- This collector will only work with Python 3 and requires the packages below be installed. -- Typically you will not need to do this, but, if needed, to ensure Python 3 is used you can add the below line to the `[plugin:python.d]` section of `netdata.conf` - -```conf -[plugin:python.d] - # update every = 1 - command options = -ppython3 -``` - -Install the required python libraries. - -```bash -# become netdata user -sudo su -s /bin/bash netdata -# install required packages for the netdata user -pip3 install --user netdata-pandas==0.0.38 numba==0.50.1 scikit-learn==0.23.2 pyod==0.8.3 -``` - -## Configuration - -Install the Python requirements above, enable the collector and restart Netdata. - -```bash -cd /etc/netdata/ -sudo ./edit-config python.d.conf -# Set `anomalies: no` to `anomalies: yes` -sudo systemctl restart netdata -``` - -The configuration for the anomalies collector defines how it will behave on your system and might take some experimentation with over time to set it optimally for your node. Out of the box, the config comes with some [sane defaults](https://www.netdata.cloud/blog/redefining-monitoring-with-netdata/) to get you started that try to balance the flexibility and power of the ML models with the goal of being as cheap as possible in term of cost on the node resources. - -_**Note**: If you are unsure about any of the below configuration options then it's best to just ignore all this and leave the `anomalies.conf` file alone to begin with. Then you can return to it later if you would like to tune things a bit more once the collector is running for a while and you have a feeling for its performance on your node._ - -Edit the `python.d/anomalies.conf` configuration file using `edit-config` from the your agent's [config -directory](/docs/netdata-agent/configuration/README.md), which is usually at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d/anomalies.conf -``` - -The default configuration should look something like this. Here you can see each parameter (with sane defaults) and some information about each one and what it does. - -```conf -# - -# JOBS (data collection sources) - -# Pull data from local Netdata node. -anomalies: - name: 'Anomalies' - - # Host to pull data from. - host: '127.0.0.1:19999' - - # Username and Password for Netdata if using basic auth. - # username: '???' - # password: '???' - - # Use http or https to pull data - protocol: 'http' - - # SSL verify parameter for requests.get() calls - tls_verify: true - - # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc. - charts_regex: 'system\..*' - - # Charts to exclude, useful if you would like to exclude some specific charts. - # Note: should be a ',' separated string like 'chart.name,chart.name'. - charts_to_exclude: 'system.uptime,system.entropy' - - # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'. - # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html. - model: 'pca' - - # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs. - train_max_n: 100000 - - # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes). - # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained. - train_every_n: 1800 - - # The length of the window of data to train on (14400 = last 4 hours). - train_n_secs: 14400 - - # How many prediction steps after a train event to just use previous prediction value for. - # Used to reduce possibility of the training step itself appearing as an anomaly on the charts. - train_no_prediction_n: 10 - - # If you would like to train the model for the first time on a specific window then you can define it using the below two variables. - # Start of training data for initial model. - # initial_train_data_after: 1604578857 - - # End of training data for initial model. - # initial_train_data_before: 1604593257 - - # If you would like to ignore recent data in training then you can offset it by offset_n_secs. - offset_n_secs: 0 - - # How many lagged values of each dimension to include in the 'feature vector' each model is trained on. - lags_n: 5 - - # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on. - smooth_n: 3 - - # How many differences to take in preprocessing your data. - # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing - # diffs_n=0 would mean training models on the raw values of each dimension. - # diffs_n=1 means everything is done in terms of differences. - diffs_n: 1 - - # What is the typical proportion of anomalies in your data on average? - # This parameter can control the sensitivity of your models to anomalies. - # Some discussion here: https://github.com/yzhao062/pyod/issues/144 - contamination: 0.001 - - # Set to true to include an "average_prob" dimension on anomalies probability chart which is - # just the average of all anomaly probabilities at each time step - include_average_prob: true - - # Define any custom models you would like to create anomaly probabilities for, some examples below to show how. - # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers - # and one on the cpu and mem apps metrics for the python.d.plugin. - # custom_models: - # - name: 'demos_cpu' - # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system' - # - name: 'apps_python_d_plugin' - # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin' - - # Set to true to normalize, using min-max standardization, features used for the custom models. - # Useful if your custom models contain dimensions on very different scales an model you use does - # not internally do its own normalization. Usually best to leave as false. - # custom_models_normalize: false -``` - -## Custom models - -In the `anomalies.conf` file you can also define some "custom models" which you can use to group one or more metrics into a single model much like is done by default for the charts you specify. This is useful if you have a handful of metrics that exist in different charts but perhaps are related to the same underlying thing you would like to perform anomaly detection on, for example a specific app or user. - -To define a custom model you would include configuration like below in `anomalies.conf`. By default there should already be some commented out examples in there. - -`name` is a name you give your custom model, this is what will appear alongside any other specified charts in the `anomalies.probability` and `anomalies.anomaly` charts. `dimensions` is a string of metrics you want to include in your custom model. By default the [netdata-pandas](https://github.com/netdata/netdata-pandas) library used to pull the data from Netdata uses a "chart.a|dim.1" type of naming convention in the pandas columns it returns, hence the `dimensions` string should look like "chart.name|dimension.name,chart.name|dimension.name". The examples below hopefully make this clear. - -```yaml -custom_models: - # a model for anomaly detection on the netdata user in terms of cpu, mem, threads, processes and sockets. - - name: 'user_netdata' - dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata' - # a model for anomaly detection on the netdata python.d.plugin app in terms of cpu, mem, threads, processes and sockets. - - name: 'apps_python_d_plugin' - dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin' - -custom_models_normalize: false -``` - -## Troubleshooting - -To see any relevant log messages you can use a command like below. - -```bash -`grep 'anomalies' /var/log/netdata/error.log` -``` - -If you would like to log in as `netdata` user and run the collector in debug mode to see more detail. - -```bash -# become netdata user -sudo su -s /bin/bash netdata -# run collector in debug using `nolock` option if netdata is already running the collector itself. -/usr/libexec/netdata/plugins.d/python.d.plugin anomalies debug trace nolock -``` - -## Deepdive tutorial - -If you would like to go deeper on what exactly the anomalies collector is doing under the hood then check out this [deepdive tutorial](https://github.com/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb) in our community repo where you can play around with some data from our demo servers (or your own if its accessible to you) and work through the calculations step by step. - -(Note: as its a Jupyter Notebook it might render a little prettier on [nbviewer](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb)) - -## Notes - -- Python 3 is required as the [`netdata-pandas`](https://github.com/netdata/netdata-pandas) package uses Python async libraries ([asks](https://pypi.org/project/asks/) and [trio](https://pypi.org/project/trio/)) to make asynchronous calls to the [Netdata REST API](/src/web/api/README.md) to get the required data for each chart. -- Python 3 is also required for the underlying ML libraries of [numba](https://pypi.org/project/numba/), [scikit-learn](https://pypi.org/project/scikit-learn/), and [PyOD](https://pypi.org/project/pyod/). -- It may take a few hours or so (depending on your choice of `train_secs_n`) for the collector to 'settle' into it's typical behaviour in terms of the trained models and probabilities you will see in the normal running of your node. -- As this collector does most of the work in Python itself, with [PyOD](https://pyod.readthedocs.io/en/latest/) leveraging [numba](https://numba.pydata.org/) under the hood, you may want to try it out first on a test or development system to get a sense of its performance characteristics on a node similar to where you would like to use it. -- `lags_n`, `smooth_n`, and `diffs_n` together define the preprocessing done to the raw data before models are trained and before each prediction. This essentially creates a [feature vector](https://en.wikipedia.org/wiki/Feature_(machine_learning)#:~:text=In%20pattern%20recognition%20and%20machine,features%20that%20represent%20some%20object.&text=Feature%20vectors%20are%20often%20combined,score%20for%20making%20a%20prediction.) for each chart model (or each custom model). The default settings for these parameters aim to create a rolling matrix of recent smoothed [differenced](https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing) values for each chart. The aim of the model then is to score how unusual this 'matrix' of features is for each chart based on what it has learned as 'normal' from the training data. So as opposed to just looking at the single most recent value of a dimension and considering how strange it is, this approach looks at a recent smoothed window of all dimensions for a chart (or dimensions in a custom model) and asks how unusual the data as a whole looks. This should be more flexible in capturing a wider range of [anomaly types](https://andrewm4894.com/2020/10/19/different-types-of-time-series-anomalies/) and be somewhat more robust to temporary 'spikes' in the data that tend to always be happening somewhere in your metrics but often are not the most important type of anomaly (this is all covered in a lot more detail in the [deepdive tutorial](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb)). -- You can see how long model training is taking by looking in the logs for the collector `grep 'anomalies' /var/log/netdata/error.log | grep 'training'` and you should see lines like `2020-12-01 22:02:14: python.d INFO: anomalies[local] : training complete in 2.81 seconds (runs_counter=2700, model=pca, train_n_secs=14400, models=26, n_fit_success=26, n_fit_fails=0, after=1606845731, before=1606860131).`. - - This also gives counts of the number of models, if any, that failed to fit and so had to default back to the DefaultModel (which is currently [HBOS](https://pyod.readthedocs.io/en/latest/_modules/pyod/models/hbos.html)). - - `after` and `before` here refer to the start and end of the training data used to train the models. -- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the typical performance characteristics we saw from running this collector (with defaults) were: - - A runtime (`netdata.runtime_anomalies`) of ~80ms when doing scoring and ~3 seconds when training or retraining the models. - - Typically ~3%-3.5% additional cpu usage from scoring, jumping to ~60% for a couple of seconds during model training. - - About ~150mb of ram (`apps.mem`) being continually used by the `python.d.plugin`. -- If you activate this collector on a fresh node, it might take a little while to build up enough data to calculate a realistic and useful model. -- Some models like `iforest` can be comparatively expensive (on same n1-standard-2 system above ~2s runtime during predict, ~40s training time, ~50% cpu on both train and predict) so if you would like to use it you might be advised to set a relatively high `update_every` maybe 10, 15 or 30 in `anomalies.conf`. -- Setting a higher `train_every_n` and `update_every` is an easy way to devote less resources on the node to anomaly detection. Specifying less charts and a lower `train_n_secs` will also help reduce resources at the expense of covering less charts and maybe a more noisy model if you set `train_n_secs` to be too small for how your node tends to behave. -- If you would like to enable this on a Raspberry Pi, then check out [this guide](/docs/developer-and-contributor-corner/raspberry-pi-anomaly-detection.md) which will guide you through first installing LLVM. - -## Useful links and further reading - -- [PyOD documentation](https://pyod.readthedocs.io/en/latest/), [PyOD Github](https://github.com/yzhao062/pyod). -- [Anomaly Detection](https://en.wikipedia.org/wiki/Anomaly_detection) wikipedia page. -- [Anomaly Detection YouTube playlist](https://www.youtube.com/playlist?list=PL6Zhl9mK2r0KxA6rB87oi4kWzoqGd5vp0) maintained by [andrewm4894](https://github.com/andrewm4894/) from Netdata. -- [awesome-TS-anomaly-detection](https://github.com/rob-med/awesome-TS-anomaly-detection) Github list of useful tools, libraries and resources. -- [Mendeley public group](https://www.mendeley.com/community/interesting-anomaly-detection-papers/) with some interesting anomaly detection papers we have been reading. -- Good [blog post](https://www.anodot.com/blog/what-is-anomaly-detection/) from Anodot on time series anomaly detection. Anodot also have some great whitepapers in this space too that some may find useful. -- Novelty and outlier detection in the [scikit-learn documentation](https://scikit-learn.org/stable/modules/outlier_detection.html). - diff --git a/src/collectors/python.d.plugin/anomalies/anomalies.chart.py b/src/collectors/python.d.plugin/anomalies/anomalies.chart.py deleted file mode 100644 index 24e84cc15..000000000 --- a/src/collectors/python.d.plugin/anomalies/anomalies.chart.py +++ /dev/null @@ -1,425 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: anomalies netdata python.d module -# Author: andrewm4894 -# SPDX-License-Identifier: GPL-3.0-or-later - -import sys -import time -from datetime import datetime -import re -import warnings - -import requests -import numpy as np -import pandas as pd -from netdata_pandas.data import get_data, get_allmetrics_async -from pyod.models.hbos import HBOS -from pyod.models.pca import PCA -from pyod.models.loda import LODA -from pyod.models.iforest import IForest -from pyod.models.cblof import CBLOF -from pyod.models.feature_bagging import FeatureBagging -from pyod.models.copod import COPOD -from sklearn.preprocessing import MinMaxScaler - -from bases.FrameworkServices.SimpleService import SimpleService - -# ignore some sklearn/numpy warnings that are ok -warnings.filterwarnings('ignore', r'All-NaN slice encountered') -warnings.filterwarnings('ignore', r'invalid value encountered in true_divide') -warnings.filterwarnings('ignore', r'divide by zero encountered in true_divide') -warnings.filterwarnings('ignore', r'invalid value encountered in subtract') - -disabled_by_default = True - -ORDER = ['probability', 'anomaly'] - -CHARTS = { - 'probability': { - 'options': ['probability', 'Anomaly Probability', 'probability', 'anomalies', 'anomalies.probability', 'line'], - 'lines': [] - }, - 'anomaly': { - 'options': ['anomaly', 'Anomaly', 'count', 'anomalies', 'anomalies.anomaly', 'stacked'], - 'lines': [] - }, -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.basic_init() - self.charts_init() - self.custom_models_init() - self.data_init() - self.model_params_init() - self.models_init() - self.collected_dims = {'probability': set(), 'anomaly': set()} - - def check(self): - if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6): - self.error("anomalies collector only works with Python>=3.6") - if len(self.host_charts_dict[self.host]) > 0: - _ = get_allmetrics_async(host_charts_dict=self.host_charts_dict, protocol=self.protocol, user=self.username, pwd=self.password) - return True - - def basic_init(self): - """Perform some basic initialization. - """ - self.order = ORDER - self.definitions = CHARTS - self.protocol = self.configuration.get('protocol', 'http') - self.host = self.configuration.get('host', '127.0.0.1:19999') - self.username = self.configuration.get('username', None) - self.password = self.configuration.get('password', None) - self.tls_verify = self.configuration.get('tls_verify', True) - self.fitted_at = {} - self.df_allmetrics = pd.DataFrame() - self.last_train_at = 0 - self.include_average_prob = bool(self.configuration.get('include_average_prob', True)) - self.reinitialize_at_every_step = bool(self.configuration.get('reinitialize_at_every_step', False)) - - def charts_init(self): - """Do some initialisation of charts in scope related variables. - """ - self.charts_regex = re.compile(self.configuration.get('charts_regex','None')) - self.charts_available = [c for c in list(requests.get(f'{self.protocol}://{self.host}/api/v1/charts', verify=self.tls_verify).json().get('charts', {}).keys())] - self.charts_in_scope = list(filter(self.charts_regex.match, self.charts_available)) - self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',') - if len(self.charts_to_exclude) > 0: - self.charts_in_scope = [c for c in self.charts_in_scope if c not in self.charts_to_exclude] - - def custom_models_init(self): - """Perform initialization steps related to custom models. - """ - self.custom_models = self.configuration.get('custom_models', None) - self.custom_models_normalize = bool(self.configuration.get('custom_models_normalize', False)) - if self.custom_models: - self.custom_models_names = [model['name'] for model in self.custom_models] - self.custom_models_dims = [i for s in [model['dimensions'].split(',') for model in self.custom_models] for i in s] - self.custom_models_dims = [dim if '::' in dim else f'{self.host}::{dim}' for dim in self.custom_models_dims] - self.custom_models_charts = list(set([dim.split('|')[0].split('::')[1] for dim in self.custom_models_dims])) - self.custom_models_hosts = list(set([dim.split('::')[0] for dim in self.custom_models_dims])) - self.custom_models_host_charts_dict = {} - for host in self.custom_models_hosts: - self.custom_models_host_charts_dict[host] = list(set([dim.split('::')[1].split('|')[0] for dim in self.custom_models_dims if dim.startswith(host)])) - self.custom_models_dims_renamed = [f"{model['name']}|{dim}" for model in self.custom_models for dim in model['dimensions'].split(',')] - self.models_in_scope = list(set([f'{self.host}::{c}' for c in self.charts_in_scope] + self.custom_models_names)) - self.charts_in_scope = list(set(self.charts_in_scope + self.custom_models_charts)) - self.host_charts_dict = {self.host: self.charts_in_scope} - for host in self.custom_models_host_charts_dict: - if host not in self.host_charts_dict: - self.host_charts_dict[host] = self.custom_models_host_charts_dict[host] - else: - for chart in self.custom_models_host_charts_dict[host]: - if chart not in self.host_charts_dict[host]: - self.host_charts_dict[host].extend(chart) - else: - self.models_in_scope = [f'{self.host}::{c}' for c in self.charts_in_scope] - self.host_charts_dict = {self.host: self.charts_in_scope} - self.model_display_names = {model: model.split('::')[1] if '::' in model else model for model in self.models_in_scope} - #self.info(f'self.host_charts_dict (len={len(self.host_charts_dict[self.host])}): {self.host_charts_dict}') - - def data_init(self): - """Initialize some empty data objects. - """ - self.data_probability_latest = {f'{m}_prob': 0 for m in self.charts_in_scope} - self.data_anomaly_latest = {f'{m}_anomaly': 0 for m in self.charts_in_scope} - self.data_latest = {**self.data_probability_latest, **self.data_anomaly_latest} - - def model_params_init(self): - """Model parameters initialisation. - """ - self.train_max_n = self.configuration.get('train_max_n', 100000) - self.train_n_secs = self.configuration.get('train_n_secs', 14400) - self.offset_n_secs = self.configuration.get('offset_n_secs', 0) - self.train_every_n = self.configuration.get('train_every_n', 1800) - self.train_no_prediction_n = self.configuration.get('train_no_prediction_n', 10) - self.initial_train_data_after = self.configuration.get('initial_train_data_after', 0) - self.initial_train_data_before = self.configuration.get('initial_train_data_before', 0) - self.contamination = self.configuration.get('contamination', 0.001) - self.lags_n = {model: self.configuration.get('lags_n', 5) for model in self.models_in_scope} - self.smooth_n = {model: self.configuration.get('smooth_n', 5) for model in self.models_in_scope} - self.diffs_n = {model: self.configuration.get('diffs_n', 5) for model in self.models_in_scope} - - def models_init(self): - """Models initialisation. - """ - self.model = self.configuration.get('model', 'pca') - if self.model == 'pca': - self.models = {model: PCA(contamination=self.contamination) for model in self.models_in_scope} - elif self.model == 'loda': - self.models = {model: LODA(contamination=self.contamination) for model in self.models_in_scope} - elif self.model == 'iforest': - self.models = {model: IForest(n_estimators=50, bootstrap=True, behaviour='new', contamination=self.contamination) for model in self.models_in_scope} - elif self.model == 'cblof': - self.models = {model: CBLOF(n_clusters=3, contamination=self.contamination) for model in self.models_in_scope} - elif self.model == 'feature_bagging': - self.models = {model: FeatureBagging(base_estimator=PCA(contamination=self.contamination), contamination=self.contamination) for model in self.models_in_scope} - elif self.model == 'copod': - self.models = {model: COPOD(contamination=self.contamination) for model in self.models_in_scope} - elif self.model == 'hbos': - self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope} - else: - self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope} - self.custom_model_scalers = {model: MinMaxScaler() for model in self.models_in_scope} - - def model_init(self, model): - """Model initialisation of a single model. - """ - if self.model == 'pca': - self.models[model] = PCA(contamination=self.contamination) - elif self.model == 'loda': - self.models[model] = LODA(contamination=self.contamination) - elif self.model == 'iforest': - self.models[model] = IForest(n_estimators=50, bootstrap=True, behaviour='new', contamination=self.contamination) - elif self.model == 'cblof': - self.models[model] = CBLOF(n_clusters=3, contamination=self.contamination) - elif self.model == 'feature_bagging': - self.models[model] = FeatureBagging(base_estimator=PCA(contamination=self.contamination), contamination=self.contamination) - elif self.model == 'copod': - self.models[model] = COPOD(contamination=self.contamination) - elif self.model == 'hbos': - self.models[model] = HBOS(contamination=self.contamination) - else: - self.models[model] = HBOS(contamination=self.contamination) - self.custom_model_scalers[model] = MinMaxScaler() - - def reinitialize(self): - """Reinitialize charts, models and data to a beginning state. - """ - self.charts_init() - self.custom_models_init() - self.data_init() - self.model_params_init() - self.models_init() - - def save_data_latest(self, data, data_probability, data_anomaly): - """Save the most recent data objects to be used if needed in the future. - """ - self.data_latest = data - self.data_probability_latest = data_probability - self.data_anomaly_latest = data_anomaly - - def validate_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1): - """If dimension not in chart then add it. - """ - for dim in data: - if dim not in self.collected_dims[chart]: - self.collected_dims[chart].add(dim) - self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor]) - - for dim in list(self.collected_dims[chart]): - if dim not in data: - self.collected_dims[chart].remove(dim) - self.charts[chart].del_dimension(dim, hide=False) - - def add_custom_models_dims(self, df): - """Given a df, select columns used by custom models, add custom model name as prefix, and append to df. - - :param df : dataframe to append new renamed columns to. - :return: dataframe with additional columns added relating to the specified custom models. - """ - df_custom = df[self.custom_models_dims].copy() - df_custom.columns = self.custom_models_dims_renamed - df = df.join(df_custom) - - return df - - def make_features(self, arr, train=False, model=None): - """Take in numpy array and preprocess accordingly by taking diffs, smoothing and adding lags. - - :param arr : numpy array we want to make features from. - :param train : True if making features for training, in which case need to fit_transform scaler and maybe sample train_max_n. - :param model : model to make features for. - :return: transformed numpy array. - """ - - def lag(arr, n): - res = np.empty_like(arr) - res[:n] = np.nan - res[n:] = arr[:-n] - - return res - - arr = np.nan_to_num(arr) - - diffs_n = self.diffs_n[model] - smooth_n = self.smooth_n[model] - lags_n = self.lags_n[model] - - if self.custom_models_normalize and model in self.custom_models_names: - if train: - arr = self.custom_model_scalers[model].fit_transform(arr) - else: - arr = self.custom_model_scalers[model].transform(arr) - - if diffs_n > 0: - arr = np.diff(arr, diffs_n, axis=0) - arr = arr[~np.isnan(arr).any(axis=1)] - - if smooth_n > 1: - arr = np.cumsum(arr, axis=0, dtype=float) - arr[smooth_n:] = arr[smooth_n:] - arr[:-smooth_n] - arr = arr[smooth_n - 1:] / smooth_n - arr = arr[~np.isnan(arr).any(axis=1)] - - if lags_n > 0: - arr_orig = np.copy(arr) - for lag_n in range(1, lags_n + 1): - arr = np.concatenate((arr, lag(arr_orig, lag_n)), axis=1) - arr = arr[~np.isnan(arr).any(axis=1)] - - if train: - if len(arr) > self.train_max_n: - arr = arr[np.random.randint(arr.shape[0], size=self.train_max_n), :] - - arr = np.nan_to_num(arr) - - return arr - - def train(self, models_to_train=None, train_data_after=0, train_data_before=0): - """Pull required training data and train a model for each specified model. - - :param models_to_train : list of models to train on. - :param train_data_after : integer timestamp for start of train data. - :param train_data_before : integer timestamp for end of train data. - """ - now = datetime.now().timestamp() - if train_data_after > 0 and train_data_before > 0: - before = train_data_before - after = train_data_after - else: - before = int(now) - self.offset_n_secs - after = before - self.train_n_secs - - # get training data - df_train = get_data( - host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', after=after, before=before, - sort_cols=True, numeric_only=True, protocol=self.protocol, float_size='float32', user=self.username, pwd=self.password, - verify=self.tls_verify - ).ffill() - if self.custom_models: - df_train = self.add_custom_models_dims(df_train) - - # train model - self.try_fit(df_train, models_to_train=models_to_train) - self.info(f'training complete in {round(time.time() - now, 2)} seconds (runs_counter={self.runs_counter}, model={self.model}, train_n_secs={self.train_n_secs}, models={len(self.fitted_at)}, n_fit_success={self.n_fit_success}, n_fit_fails={self.n_fit_fail}, after={after}, before={before}).') - self.last_train_at = self.runs_counter - - def try_fit(self, df_train, models_to_train=None): - """Try fit each model and try to fallback to a default model if fit fails for any reason. - - :param df_train : data to train on. - :param models_to_train : list of models to train. - """ - if models_to_train is None: - models_to_train = list(self.models.keys()) - self.n_fit_fail, self.n_fit_success = 0, 0 - for model in models_to_train: - if model not in self.models: - self.model_init(model) - X_train = self.make_features( - df_train[df_train.columns[df_train.columns.str.startswith(f'{model}|')]].values, - train=True, model=model) - try: - self.models[model].fit(X_train) - self.n_fit_success += 1 - except Exception as e: - self.n_fit_fail += 1 - self.info(e) - self.info(f'training failed for {model} at run_counter {self.runs_counter}, defaulting to hbos model.') - self.models[model] = HBOS(contamination=self.contamination) - self.models[model].fit(X_train) - self.fitted_at[model] = self.runs_counter - - def predict(self): - """Get latest data, make it into a feature vector, and get predictions for each available model. - - :return: (,) tuple of dictionaries, one for probability scores and the other for anomaly predictions. - """ - # get recent data to predict on - df_allmetrics = get_allmetrics_async( - host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', wide=True, sort_cols=True, - protocol=self.protocol, numeric_only=True, float_size='float32', user=self.username, pwd=self.password - ) - if self.custom_models: - df_allmetrics = self.add_custom_models_dims(df_allmetrics) - self.df_allmetrics = self.df_allmetrics.append(df_allmetrics).ffill().tail((max(self.lags_n.values()) + max(self.smooth_n.values()) + max(self.diffs_n.values())) * 2) - - # get predictions - data_probability, data_anomaly = self.try_predict() - - return data_probability, data_anomaly - - def try_predict(self): - """Try make prediction and fall back to last known prediction if fails. - - :return: (,) tuple of dictionaries, one for probability scores and the other for anomaly predictions. - """ - data_probability, data_anomaly = {}, {} - for model in self.fitted_at.keys(): - model_display_name = self.model_display_names[model] - try: - X_model = np.nan_to_num( - self.make_features( - self.df_allmetrics[self.df_allmetrics.columns[self.df_allmetrics.columns.str.startswith(f'{model}|')]].values, - model=model - )[-1,:].reshape(1, -1) - ) - data_probability[model_display_name + '_prob'] = np.nan_to_num(self.models[model].predict_proba(X_model)[-1][1]) * 10000 - data_anomaly[model_display_name + '_anomaly'] = self.models[model].predict(X_model)[-1] - except Exception as _: - #self.info(e) - if model_display_name + '_prob' in self.data_latest: - #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, using last prediction instead.') - data_probability[model_display_name + '_prob'] = self.data_latest[model_display_name + '_prob'] - data_anomaly[model_display_name + '_anomaly'] = self.data_latest[model_display_name + '_anomaly'] - else: - #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, skipping as no previous prediction.') - continue - - return data_probability, data_anomaly - - def get_data(self): - - # initialize to what's available right now - if self.reinitialize_at_every_step or len(self.host_charts_dict[self.host]) == 0: - self.charts_init() - self.custom_models_init() - self.model_params_init() - - # if not all models have been trained then train those we need to - if len(self.fitted_at) < len(self.models_in_scope): - self.train( - models_to_train=[m for m in self.models_in_scope if m not in self.fitted_at], - train_data_after=self.initial_train_data_after, - train_data_before=self.initial_train_data_before - ) - # retrain all models as per schedule from config - elif self.train_every_n > 0 and self.runs_counter % self.train_every_n == 0: - self.reinitialize() - self.train() - - # roll forward previous predictions around a training step to avoid the possibility of having the training itself trigger an anomaly - if (self.runs_counter - self.last_train_at) <= self.train_no_prediction_n: - data_probability = self.data_probability_latest - data_anomaly = self.data_anomaly_latest - else: - data_probability, data_anomaly = self.predict() - if self.include_average_prob: - average_prob = np.mean(list(data_probability.values())) - data_probability['average_prob'] = 0 if np.isnan(average_prob) else average_prob - - data = {**data_probability, **data_anomaly} - - self.validate_charts('probability', data_probability, divisor=100) - self.validate_charts('anomaly', data_anomaly) - - self.save_data_latest(data, data_probability, data_anomaly) - - #self.info(f'len(data)={len(data)}') - #self.info(f'data') - - return data diff --git a/src/collectors/python.d.plugin/anomalies/anomalies.conf b/src/collectors/python.d.plugin/anomalies/anomalies.conf deleted file mode 100644 index ef867709a..000000000 --- a/src/collectors/python.d.plugin/anomalies/anomalies.conf +++ /dev/null @@ -1,184 +0,0 @@ -# netdata python.d.plugin configuration for anomalies -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 2 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) - -# Pull data from local Netdata node. -anomalies: - name: 'Anomalies' - - # Host to pull data from. - host: '127.0.0.1:19999' - - # Username and Password for Netdata if using basic auth. - # username: '???' - # password: '???' - - # Use http or https to pull data - protocol: 'http' - - # SSL verify parameter for requests.get() calls - tls_verify: true - - # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc. - charts_regex: 'system\..*' - - # Charts to exclude, useful if you would like to exclude some specific charts. - # Note: should be a ',' separated string like 'chart.name,chart.name'. - charts_to_exclude: 'system.uptime,system.entropy' - - # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'. - # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html. - model: 'pca' - - # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs. - train_max_n: 100000 - - # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes). - # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained. - train_every_n: 1800 - - # The length of the window of data to train on (14400 = last 4 hours). - train_n_secs: 14400 - - # How many prediction steps after a train event to just use previous prediction value for. - # Used to reduce possibility of the training step itself appearing as an anomaly on the charts. - train_no_prediction_n: 10 - - # If you would like to train the model for the first time on a specific window then you can define it using the below two variables. - # Start of training data for initial model. - # initial_train_data_after: 1604578857 - - # End of training data for initial model. - # initial_train_data_before: 1604593257 - - # If you would like to ignore recent data in training then you can offset it by offset_n_secs. - offset_n_secs: 0 - - # How many lagged values of each dimension to include in the 'feature vector' each model is trained on. - lags_n: 5 - - # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on. - smooth_n: 3 - - # How many differences to take in preprocessing your data. - # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing - # diffs_n=0 would mean training models on the raw values of each dimension. - # diffs_n=1 means everything is done in terms of differences. - diffs_n: 1 - - # What is the typical proportion of anomalies in your data on average? - # This parameter can control the sensitivity of your models to anomalies. - # Some discussion here: https://github.com/yzhao062/pyod/issues/144 - contamination: 0.001 - - # Set to true to include an "average_prob" dimension on anomalies probability chart which is - # just the average of all anomaly probabilities at each time step - include_average_prob: true - - # Define any custom models you would like to create anomaly probabilities for, some examples below to show how. - # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers - # and one on the cpu and mem apps metrics for the python.d.plugin. - # custom_models: - # - name: 'demos_cpu' - # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system' - # - name: 'apps_python_d_plugin' - # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin' - - # Set to true to normalize, using min-max standardization, features used for the custom models. - # Useful if your custom models contain dimensions on very different scales an model you use does - # not internally do its own normalization. Usually best to leave as false. - # custom_models_normalize: false - -# Standalone Custom models example as an additional collector job. -# custom: -# name: 'custom' -# host: '127.0.0.1:19999' -# protocol: 'http' -# charts_regex: 'None' -# charts_to_exclude: 'None' -# model: 'pca' -# train_max_n: 100000 -# train_every_n: 1800 -# train_n_secs: 14400 -# offset_n_secs: 0 -# lags_n: 5 -# smooth_n: 3 -# diffs_n: 1 -# contamination: 0.001 -# custom_models: -# - name: 'user_netdata' -# dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata' -# - name: 'apps_python_d_plugin' -# dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin' - -# Pull data from some demo nodes for cross node custom models. -# demos: -# name: 'demos' -# host: '127.0.0.1:19999' -# protocol: 'http' -# charts_regex: 'None' -# charts_to_exclude: 'None' -# model: 'pca' -# train_max_n: 100000 -# train_every_n: 1800 -# train_n_secs: 14400 -# offset_n_secs: 0 -# lags_n: 5 -# smooth_n: 3 -# diffs_n: 1 -# contamination: 0.001 -# custom_models: -# - name: 'system.cpu' -# dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system' -# - name: 'system.ip' -# dimensions: 'london.my-netdata.io::system.ip|received,london.my-netdata.io::system.ip|sent,newyork.my-netdata.io::system.ip|received,newyork.my-netdata.io::system.ip|sent' -# - name: 'system.net' -# dimensions: 'london.my-netdata.io::system.net|received,london.my-netdata.io::system.net|sent,newyork.my-netdata.io::system.net|received,newyork.my-netdata.io::system.net|sent' -# - name: 'system.io' -# dimensions: 'london.my-netdata.io::system.io|in,london.my-netdata.io::system.io|out,newyork.my-netdata.io::system.io|in,newyork.my-netdata.io::system.io|out' - -# Example additional job if you want to also pull data from a child streaming to your -# local parent or even a remote node so long as the Netdata REST API is accessible. -# mychildnode1: -# name: 'mychildnode1' -# host: '127.0.0.1:19999/host/mychildnode1' -# protocol: 'http' -# charts_regex: 'system\..*' -# charts_to_exclude: 'None' -# model: 'pca' -# train_max_n: 100000 -# train_every_n: 1800 -# train_n_secs: 14400 -# offset_n_secs: 0 -# lags_n: 5 -# smooth_n: 3 -# diffs_n: 1 -# contamination: 0.001 diff --git a/src/collectors/python.d.plugin/anomalies/metadata.yaml b/src/collectors/python.d.plugin/anomalies/metadata.yaml deleted file mode 100644 index c14e47bf4..000000000 --- a/src/collectors/python.d.plugin/anomalies/metadata.yaml +++ /dev/null @@ -1,87 +0,0 @@ -# NOTE: this file is commented out as users are reccomended to use the -# native anomaly detection capabilities on the agent instead. -# meta: -# plugin_name: python.d.plugin -# module_name: anomalies -# monitored_instance: -# name: python.d anomalies -# link: "" -# categories: [] -# icon_filename: "" -# related_resources: -# integrations: -# list: [] -# info_provided_to_referring_integrations: -# description: "" -# keywords: [] -# most_popular: false -# overview: -# data_collection: -# metrics_description: "" -# method_description: "" -# supported_platforms: -# include: [] -# exclude: [] -# multi_instance: true -# additional_permissions: -# description: "" -# default_behavior: -# auto_detection: -# description: "" -# limits: -# description: "" -# performance_impact: -# description: "" -# setup: -# prerequisites: -# list: [] -# configuration: -# file: -# name: "" -# description: "" -# options: -# description: "" -# folding: -# title: "" -# enabled: true -# list: [] -# examples: -# folding: -# enabled: true -# title: "" -# list: [] -# troubleshooting: -# problems: -# list: [] -# alerts: -# - name: anomalies_anomaly_probabilities -# link: https://github.com/netdata/netdata/blob/master/src/health/health.d/anomalies.conf -# metric: anomalies.probability -# info: average anomaly probability over the last 2 minutes -# - name: anomalies_anomaly_flags -# link: https://github.com/netdata/netdata/blob/master/src/health/health.d/anomalies.conf -# metric: anomalies.anomaly -# info: number of anomalies in the last 2 minutes -# metrics: -# folding: -# title: Metrics -# enabled: false -# description: "" -# availability: [] -# scopes: -# - name: global -# description: "" -# labels: [] -# metrics: -# - name: anomalies.probability -# description: Anomaly Probability -# unit: "probability" -# chart_type: line -# dimensions: -# - name: a dimension per probability -# - name: anomalies.anomaly -# description: Anomaly -# unit: "count" -# chart_type: stacked -# dimensions: -# - name: a dimension per anomaly diff --git a/src/collectors/python.d.plugin/boinc/README.md b/src/collectors/python.d.plugin/boinc/README.md deleted file mode 120000 index 22c10ca17..000000000 --- a/src/collectors/python.d.plugin/boinc/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/boinc.md \ No newline at end of file diff --git a/src/collectors/python.d.plugin/boinc/boinc.chart.py b/src/collectors/python.d.plugin/boinc/boinc.chart.py deleted file mode 100644 index a31eda1c2..000000000 --- a/src/collectors/python.d.plugin/boinc/boinc.chart.py +++ /dev/null @@ -1,168 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: BOINC netdata python.d module -# Author: Austin S. Hemmelgarn (Ferroin) -# SPDX-License-Identifier: GPL-3.0-or-later - -import socket - -from bases.FrameworkServices.SimpleService import SimpleService -from third_party import boinc_client - -ORDER = [ - 'tasks', - 'states', - 'sched_states', - 'process_states', -] - -CHARTS = { - 'tasks': { - 'options': [None, 'Overall Tasks', 'tasks', 'boinc', 'boinc.tasks', 'line'], - 'lines': [ - ['total', 'Total', 'absolute', 1, 1], - ['active', 'Active', 'absolute', 1, 1] - ] - }, - 'states': { - 'options': [None, 'Tasks per State', 'tasks', 'boinc', 'boinc.states', 'line'], - 'lines': [ - ['new', 'New', 'absolute', 1, 1], - ['downloading', 'Downloading', 'absolute', 1, 1], - ['downloaded', 'Ready to Run', 'absolute', 1, 1], - ['comperror', 'Compute Errors', 'absolute', 1, 1], - ['uploading', 'Uploading', 'absolute', 1, 1], - ['uploaded', 'Uploaded', 'absolute', 1, 1], - ['aborted', 'Aborted', 'absolute', 1, 1], - ['upload_failed', 'Failed Uploads', 'absolute', 1, 1] - ] - }, - 'sched_states': { - 'options': [None, 'Tasks per Scheduler State', 'tasks', 'boinc', 'boinc.sched', 'line'], - 'lines': [ - ['uninit_sched', 'Uninitialized', 'absolute', 1, 1], - ['preempted', 'Preempted', 'absolute', 1, 1], - ['scheduled', 'Scheduled', 'absolute', 1, 1] - ] - }, - 'process_states': { - 'options': [None, 'Tasks per Process State', 'tasks', 'boinc', 'boinc.process', 'line'], - 'lines': [ - ['uninit_proc', 'Uninitialized', 'absolute', 1, 1], - ['executing', 'Executing', 'absolute', 1, 1], - ['suspended', 'Suspended', 'absolute', 1, 1], - ['aborting', 'Aborted', 'absolute', 1, 1], - ['quit', 'Quit', 'absolute', 1, 1], - ['copy_pending', 'Copy Pending', 'absolute', 1, 1] - ] - } -} - -# A simple template used for pre-loading the return dictionary to make -# the _get_data() method simpler. -_DATA_TEMPLATE = { - 'total': 0, - 'active': 0, - 'new': 0, - 'downloading': 0, - 'downloaded': 0, - 'comperror': 0, - 'uploading': 0, - 'uploaded': 0, - 'aborted': 0, - 'upload_failed': 0, - 'uninit_sched': 0, - 'preempted': 0, - 'scheduled': 0, - 'uninit_proc': 0, - 'executing': 0, - 'suspended': 0, - 'aborting': 0, - 'quit': 0, - 'copy_pending': 0 -} - -# Map task states to dimensions -_TASK_MAP = { - boinc_client.ResultState.NEW: 'new', - boinc_client.ResultState.FILES_DOWNLOADING: 'downloading', - boinc_client.ResultState.FILES_DOWNLOADED: 'downloaded', - boinc_client.ResultState.COMPUTE_ERROR: 'comperror', - boinc_client.ResultState.FILES_UPLOADING: 'uploading', - boinc_client.ResultState.FILES_UPLOADED: 'uploaded', - boinc_client.ResultState.ABORTED: 'aborted', - boinc_client.ResultState.UPLOAD_FAILED: 'upload_failed' -} - -# Map scheduler states to dimensions -_SCHED_MAP = { - boinc_client.CpuSched.UNINITIALIZED: 'uninit_sched', - boinc_client.CpuSched.PREEMPTED: 'preempted', - boinc_client.CpuSched.SCHEDULED: 'scheduled', -} - -# Maps process states to dimensions -_PROC_MAP = { - boinc_client.Process.UNINITIALIZED: 'uninit_proc', - boinc_client.Process.EXECUTING: 'executing', - boinc_client.Process.SUSPENDED: 'suspended', - boinc_client.Process.ABORT_PENDING: 'aborted', - boinc_client.Process.QUIT_PENDING: 'quit', - boinc_client.Process.COPY_PENDING: 'copy_pending' -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.host = self.configuration.get('host', 'localhost') - self.port = self.configuration.get('port', 0) - self.password = self.configuration.get('password', '') - self.client = boinc_client.BoincClient(host=self.host, port=self.port, passwd=self.password) - self.alive = False - - def check(self): - return self.connect() - - def connect(self): - self.client.connect() - self.alive = self.client.connected and self.client.authorized - return self.alive - - def reconnect(self): - # The client class itself actually disconnects existing - # connections when it is told to connect, so we don't need to - # explicitly disconnect when we're just trying to reconnect. - return self.connect() - - def is_alive(self): - if not self.alive: - return self.reconnect() - return True - - def _get_data(self): - if not self.is_alive(): - return None - - data = dict(_DATA_TEMPLATE) - - try: - results = self.client.get_tasks() - except socket.error: - self.error('Connection is dead') - self.alive = False - return None - - for task in results: - data['total'] += 1 - data[_TASK_MAP[task.state]] += 1 - try: - if task.active_task: - data['active'] += 1 - data[_SCHED_MAP[task.scheduler_state]] += 1 - data[_PROC_MAP[task.active_task_state]] += 1 - except AttributeError: - pass - - return data or None diff --git a/src/collectors/python.d.plugin/boinc/boinc.conf b/src/collectors/python.d.plugin/boinc/boinc.conf deleted file mode 100644 index 16edf55c4..000000000 --- a/src/collectors/python.d.plugin/boinc/boinc.conf +++ /dev/null @@ -1,66 +0,0 @@ -# netdata python.d.plugin configuration for boinc -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# penalty indicates whether to apply penalty to update_every in case of failures. -# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. -# penalty: yes - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# penalty: yes # the JOB's penalty -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, boinc also supports the following: -# -# hostname: localhost # The host running the BOINC client -# port: 31416 # The remote GUI RPC port for BOINC -# password: '' # The remote GUI RPC password diff --git a/src/collectors/python.d.plugin/boinc/integrations/boinc.md b/src/collectors/python.d.plugin/boinc/integrations/boinc.md deleted file mode 100644 index d5fcac215..000000000 --- a/src/collectors/python.d.plugin/boinc/integrations/boinc.md +++ /dev/null @@ -1,238 +0,0 @@ - - -# BOINC - - - - - -Plugin: python.d.plugin -Module: boinc - - - -## Overview - -This collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client. - -It uses the same RPC interface that the BOINC monitoring GUI does. - -This collector is supported on all platforms. - -This collector supports collecting metrics from multiple instances of this integration, including remote instances. - - -### Default Behavior - -#### Auto-Detection - -By default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system. - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per BOINC instance - -These metrics refer to the entire monitored application. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| boinc.tasks | Total, Active | tasks | -| boinc.states | New, Downloading, Ready to Run, Compute Errors, Uploading, Uploaded, Aborted, Failed Uploads | tasks | -| boinc.sched | Uninitialized, Preempted, Scheduled | tasks | -| boinc.process | Uninitialized, Executing, Suspended, Aborted, Quit, Copy Pending | tasks | - - - -## Alerts - - -The following alerts are available: - -| Alert name | On metric | Description | -|:------------|:----------|:------------| -| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes | -| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes | -| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of compute errors over the last 10 minutes | -| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of failed uploads over the last 10 minutes | - - -## Setup - -### Prerequisites - -#### Boinc RPC interface - -BOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory. - - -### Configuration - -#### File - -The configuration file name for this integration is `python.d/boinc.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config python.d/boinc.conf -``` -#### Options - -There are 2 sections: - -* Global variables -* One or more JOBS that can define multiple different instances to monitor. - -The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - -Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - -Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| update_every | Sets the default data collection frequency. | 5 | no | -| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | -| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | -| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | -| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | -| hostname | Define a hostname where boinc is running. | localhost | no | -| port | The port of boinc RPC interface. | | no | -| password | Provide a password to connect to a boinc RPC interface. | | no | - -
- -#### Examples - -##### Configuration of a remote boinc instance - -A basic JOB configuration for a remote boinc instance - -```yaml -remote: - hostname: '1.2.3.4' - port: 1234 - password: 'some-password' - -``` -##### Multi-instance - -> **Note**: When you define multiple jobs, their names must be unique. - -Collecting metrics from local and remote instances. - - -
Config - -```yaml -localhost: - name: 'local' - host: '127.0.0.1' - port: 1234 - password: 'some-password' - -remote_job: - name: 'remote' - host: '192.0.2.1' - port: 1234 - password: some-other-password - -``` -
- - - -## Troubleshooting - -### Debug Mode - - -To troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `python.d.plugin` to debug the collector: - - ```bash - ./python.d.plugin boinc debug trace - ``` - -### Getting Logs - -If you're encountering problems with the `boinc` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep boinc -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep boinc /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep boinc -``` - - diff --git a/src/collectors/python.d.plugin/boinc/metadata.yaml b/src/collectors/python.d.plugin/boinc/metadata.yaml deleted file mode 100644 index 9448cbe0f..000000000 --- a/src/collectors/python.d.plugin/boinc/metadata.yaml +++ /dev/null @@ -1,198 +0,0 @@ -plugin_name: python.d.plugin -modules: - - meta: - plugin_name: python.d.plugin - module_name: boinc - monitored_instance: - name: BOINC - link: "https://boinc.berkeley.edu/" - categories: - - data-collection.distributed-computing-systems - icon_filename: "bolt.svg" - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: "" - keywords: - - boinc - - distributed - most_popular: false - overview: - data_collection: - metrics_description: "This collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client." - method_description: "It uses the same RPC interface that the BOINC monitoring GUI does." - supported_platforms: - include: [] - exclude: [] - multi_instance: true - additional_permissions: - description: "" - default_behavior: - auto_detection: - description: "By default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system." - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: - - title: "Boinc RPC interface" - description: BOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory. - configuration: - file: - name: python.d/boinc.conf - options: - description: | - There are 2 sections: - - * Global variables - * One or more JOBS that can define multiple different instances to monitor. - - The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - - Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - - Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - folding: - title: "Config options" - enabled: true - list: - - name: update_every - description: Sets the default data collection frequency. - default_value: 5 - required: false - - name: priority - description: Controls the order of charts at the netdata dashboard. - default_value: 60000 - required: false - - name: autodetection_retry - description: Sets the job re-check interval in seconds. - default_value: 0 - required: false - - name: penalty - description: Indicates whether to apply penalty to update_every in case of failures. - default_value: yes - required: false - - name: name - description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. - default_value: "" - required: false - - name: hostname - description: Define a hostname where boinc is running. - default_value: "localhost" - required: false - - name: port - description: The port of boinc RPC interface. - default_value: "" - required: false - - name: password - description: Provide a password to connect to a boinc RPC interface. - default_value: "" - required: false - examples: - folding: - enabled: true - title: "Config" - list: - - name: Configuration of a remote boinc instance - description: A basic JOB configuration for a remote boinc instance - folding: - enabled: false - config: | - remote: - hostname: '1.2.3.4' - port: 1234 - password: 'some-password' - - name: Multi-instance - description: | - > **Note**: When you define multiple jobs, their names must be unique. - - Collecting metrics from local and remote instances. - config: | - localhost: - name: 'local' - host: '127.0.0.1' - port: 1234 - password: 'some-password' - - remote_job: - name: 'remote' - host: '192.0.2.1' - port: 1234 - password: some-other-password - troubleshooting: - problems: - list: [] - alerts: - - name: boinc_total_tasks - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf - metric: boinc.tasks - info: average number of total tasks over the last 10 minutes - os: "*" - - name: boinc_active_tasks - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf - metric: boinc.tasks - info: average number of active tasks over the last 10 minutes - os: "*" - - name: boinc_compute_errors - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf - metric: boinc.states - info: average number of compute errors over the last 10 minutes - os: "*" - - name: boinc_upload_errors - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf - metric: boinc.states - info: average number of failed uploads over the last 10 minutes - os: "*" - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: global - description: "These metrics refer to the entire monitored application." - labels: [] - metrics: - - name: boinc.tasks - description: Overall Tasks - unit: "tasks" - chart_type: line - dimensions: - - name: Total - - name: Active - - name: boinc.states - description: Tasks per State - unit: "tasks" - chart_type: line - dimensions: - - name: New - - name: Downloading - - name: Ready to Run - - name: Compute Errors - - name: Uploading - - name: Uploaded - - name: Aborted - - name: Failed Uploads - - name: boinc.sched - description: Tasks per Scheduler State - unit: "tasks" - chart_type: line - dimensions: - - name: Uninitialized - - name: Preempted - - name: Scheduled - - name: boinc.process - description: Tasks per Process State - unit: "tasks" - chart_type: line - dimensions: - - name: Uninitialized - - name: Executing - - name: Suspended - - name: Aborted - - name: Quit - - name: Copy Pending diff --git a/src/collectors/python.d.plugin/ceph/README.md b/src/collectors/python.d.plugin/ceph/README.md deleted file mode 120000 index 654248b70..000000000 --- a/src/collectors/python.d.plugin/ceph/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/ceph.md \ No newline at end of file diff --git a/src/collectors/python.d.plugin/ceph/ceph.chart.py b/src/collectors/python.d.plugin/ceph/ceph.chart.py deleted file mode 100644 index 4bcbe1979..000000000 --- a/src/collectors/python.d.plugin/ceph/ceph.chart.py +++ /dev/null @@ -1,374 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: ceph netdata python.d module -# Author: Luis Eduardo (lets00) -# SPDX-License-Identifier: GPL-3.0-or-later - -try: - import rados - - CEPH = True -except ImportError: - CEPH = False - -import json -import os - -from bases.FrameworkServices.SimpleService import SimpleService - -# default module values (can be overridden per job in `config`) -update_every = 10 - -ORDER = [ - 'general_usage', - 'general_objects', - 'general_bytes', - 'general_operations', - 'general_latency', - 'pool_usage', - 'pool_objects', - 'pool_read_bytes', - 'pool_write_bytes', - 'pool_read_operations', - 'pool_write_operations', - 'osd_usage', - 'osd_size', - 'osd_apply_latency', - 'osd_commit_latency' -] - -CHARTS = { - 'general_usage': { - 'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'], - 'lines': [ - ['general_available', 'avail', 'absolute'], - ['general_usage', 'used', 'absolute'] - ] - }, - 'general_objects': { - 'options': [None, 'Ceph General Objects', 'objects', 'general', 'ceph.general_objects', 'area'], - 'lines': [ - ['general_objects', 'cluster', 'absolute'] - ] - }, - 'general_bytes': { - 'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes', - 'area'], - 'lines': [ - ['general_read_bytes', 'read', 'absolute', 1, 1024], - ['general_write_bytes', 'write', 'absolute', -1, 1024] - ] - }, - 'general_operations': { - 'options': [None, 'Ceph General Read/Write Operations/s', 'operations', 'general', 'ceph.general_operations', - 'area'], - 'lines': [ - ['general_read_operations', 'read', 'absolute', 1], - ['general_write_operations', 'write', 'absolute', -1] - ] - }, - 'general_latency': { - 'options': [None, 'Ceph General Apply/Commit latency', 'milliseconds', 'general', 'ceph.general_latency', - 'area'], - 'lines': [ - ['general_apply_latency', 'apply', 'absolute'], - ['general_commit_latency', 'commit', 'absolute'] - ] - }, - 'pool_usage': { - 'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'], - 'lines': [] - }, - 'pool_objects': { - 'options': [None, 'Ceph Pools', 'objects', 'pool', 'ceph.pool_objects', 'line'], - 'lines': [] - }, - 'pool_read_bytes': { - 'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'], - 'lines': [] - }, - 'pool_write_bytes': { - 'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'], - 'lines': [] - }, - 'pool_read_operations': { - 'options': [None, 'Ceph Read Pool Operations/s', 'operations', 'pool', 'ceph.pool_read_operations', 'area'], - 'lines': [] - }, - 'pool_write_operations': { - 'options': [None, 'Ceph Write Pool Operations/s', 'operations', 'pool', 'ceph.pool_write_operations', 'area'], - 'lines': [] - }, - 'osd_usage': { - 'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'], - 'lines': [] - }, - 'osd_size': { - 'options': [None, 'Ceph OSDs size', 'KiB', 'osd', 'ceph.osd_size', 'line'], - 'lines': [] - }, - 'osd_apply_latency': { - 'options': [None, 'Ceph OSDs apply latency', 'milliseconds', 'osd', 'ceph.apply_latency', 'line'], - 'lines': [] - }, - 'osd_commit_latency': { - 'options': [None, 'Ceph OSDs commit latency', 'milliseconds', 'osd', 'ceph.commit_latency', 'line'], - 'lines': [] - } - -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.config_file = self.configuration.get('config_file') - self.keyring_file = self.configuration.get('keyring_file') - self.rados_id = self.configuration.get('rados_id', 'admin') - - def check(self): - """ - Checks module - :return: - """ - if not CEPH: - self.error('rados module is needed to use ceph.chart.py') - return False - if not (self.config_file and self.keyring_file): - self.error('config_file and/or keyring_file is not defined') - return False - - # Verify files and permissions - if not (os.access(self.config_file, os.F_OK)): - self.error('{0} does not exist'.format(self.config_file)) - return False - if not (os.access(self.keyring_file, os.F_OK)): - self.error('{0} does not exist'.format(self.keyring_file)) - return False - if not (os.access(self.config_file, os.R_OK)): - self.error('Ceph plugin does not read {0}, define read permission.'.format(self.config_file)) - return False - if not (os.access(self.keyring_file, os.R_OK)): - self.error('Ceph plugin does not read {0}, define read permission.'.format(self.keyring_file)) - return False - try: - self.cluster = rados.Rados(conffile=self.config_file, - conf=dict(keyring=self.keyring_file), - rados_id=self.rados_id) - self.cluster.connect() - except rados.Error as error: - self.error(error) - return False - self.create_definitions() - return True - - def create_definitions(self): - """ - Create dynamically charts options - :return: None - """ - # Pool lines - for pool in sorted(self._get_df()['pools'], key=lambda x: sorted(x.keys())): - self.definitions['pool_usage']['lines'].append([pool['name'], - pool['name'], - 'absolute']) - self.definitions['pool_objects']['lines'].append(["obj_{0}".format(pool['name']), - pool['name'], - 'absolute']) - self.definitions['pool_read_bytes']['lines'].append(['read_{0}'.format(pool['name']), - pool['name'], - 'absolute', 1, 1024]) - self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']), - pool['name'], - 'absolute', 1, 1024]) - self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']), - pool['name'], - 'absolute']) - self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']), - pool['name'], - 'absolute']) - - # OSD lines - for osd in sorted(self._get_osd_df()['nodes'], key=lambda x: sorted(x.keys())): - self.definitions['osd_usage']['lines'].append([osd['name'], - osd['name'], - 'absolute']) - self.definitions['osd_size']['lines'].append(['size_{0}'.format(osd['name']), - osd['name'], - 'absolute']) - self.definitions['osd_apply_latency']['lines'].append(['apply_latency_{0}'.format(osd['name']), - osd['name'], - 'absolute']) - self.definitions['osd_commit_latency']['lines'].append(['commit_latency_{0}'.format(osd['name']), - osd['name'], - 'absolute']) - - def get_data(self): - """ - Catch all ceph data - :return: dict - """ - try: - data = {} - df = self._get_df() - osd_df = self._get_osd_df() - osd_perf = self._get_osd_perf() - osd_perf_infos = get_osd_perf_infos(osd_perf) - pool_stats = self._get_osd_pool_stats() - - data.update(self._get_general(osd_perf_infos, pool_stats)) - for pool in df['pools']: - data.update(self._get_pool_usage(pool)) - data.update(self._get_pool_objects(pool)) - for pool_io in pool_stats: - data.update(self._get_pool_rw(pool_io)) - for osd in osd_df['nodes']: - data.update(self._get_osd_usage(osd)) - data.update(self._get_osd_size(osd)) - for osd_apply_commit in osd_perf_infos: - data.update(self._get_osd_latency(osd_apply_commit)) - return data - except (ValueError, AttributeError) as error: - self.error(error) - return None - - def _get_general(self, osd_perf_infos, pool_stats): - """ - Get ceph's general usage - :return: dict - """ - status = self.cluster.get_cluster_stats() - read_bytes_sec = 0 - write_bytes_sec = 0 - read_op_per_sec = 0 - write_op_per_sec = 0 - apply_latency = 0 - commit_latency = 0 - - for pool_rw_io_b in pool_stats: - read_bytes_sec += pool_rw_io_b['client_io_rate'].get('read_bytes_sec', 0) - write_bytes_sec += pool_rw_io_b['client_io_rate'].get('write_bytes_sec', 0) - read_op_per_sec += pool_rw_io_b['client_io_rate'].get('read_op_per_sec', 0) - write_op_per_sec += pool_rw_io_b['client_io_rate'].get('write_op_per_sec', 0) - for perf in osd_perf_infos: - apply_latency += perf['perf_stats']['apply_latency_ms'] - commit_latency += perf['perf_stats']['commit_latency_ms'] - - return { - 'general_usage': int(status['kb_used']), - 'general_available': int(status['kb_avail']), - 'general_objects': int(status['num_objects']), - 'general_read_bytes': read_bytes_sec, - 'general_write_bytes': write_bytes_sec, - 'general_read_operations': read_op_per_sec, - 'general_write_operations': write_op_per_sec, - 'general_apply_latency': apply_latency, - 'general_commit_latency': commit_latency - } - - @staticmethod - def _get_pool_usage(pool): - """ - Process raw data into pool usage dict information - :return: A pool dict with pool name's key and usage bytes' value - """ - return {pool['name']: pool['stats']['kb_used']} - - @staticmethod - def _get_pool_objects(pool): - """ - Process raw data into pool usage dict information - :return: A pool dict with pool name's key and object numbers - """ - return {'obj_{0}'.format(pool['name']): pool['stats']['objects']} - - @staticmethod - def _get_pool_rw(pool): - """ - Get read/write kb and operations in a pool - :return: A pool dict with both read/write bytes and operations. - """ - return { - 'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)), - 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)), - 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)), - 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0)) - } - - @staticmethod - def _get_osd_usage(osd): - """ - Process raw data into osd dict information to get osd usage - :return: A osd dict with osd name's key and usage bytes' value - """ - return {osd['name']: float(osd['kb_used'])} - - @staticmethod - def _get_osd_size(osd): - """ - Process raw data into osd dict information to get osd size (kb) - :return: A osd dict with osd name's key and size bytes' value - """ - return {'size_{0}'.format(osd['name']): float(osd['kb'])} - - @staticmethod - def _get_osd_latency(osd): - """ - Get ceph osd apply and commit latency - :return: A osd dict with osd name's key with both apply and commit latency values - """ - return { - 'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'], - 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms'] - } - - def _get_df(self): - """ - Get ceph df output - :return: ceph df --format json - """ - return json.loads(self.cluster.mon_command(json.dumps({ - 'prefix': 'df', - 'format': 'json' - }), b'')[1].decode('utf-8')) - - def _get_osd_df(self): - """ - Get ceph osd df output - :return: ceph osd df --format json - """ - return json.loads(self.cluster.mon_command(json.dumps({ - 'prefix': 'osd df', - 'format': 'json' - }), b'')[1].decode('utf-8').replace('-nan', '"-nan"')) - - def _get_osd_perf(self): - """ - Get ceph osd performance - :return: ceph osd perf --format json - """ - return json.loads(self.cluster.mon_command(json.dumps({ - 'prefix': 'osd perf', - 'format': 'json' - }), b'')[1].decode('utf-8')) - - def _get_osd_pool_stats(self): - """ - Get ceph osd pool status. - This command is used to get information about both - read/write operation and bytes per second on each pool - :return: ceph osd pool stats --format json - """ - return json.loads(self.cluster.mon_command(json.dumps({ - 'prefix': 'osd pool stats', - 'format': 'json' - }), b'')[1].decode('utf-8')) - - -def get_osd_perf_infos(osd_perf): - # https://github.com/netdata/netdata/issues/8247 - # module uses 'osd_perf_infos' data, its been moved under 'osdstats` since Ceph v14.2 - if 'osd_perf_infos' in osd_perf: - return osd_perf['osd_perf_infos'] - return osd_perf['osdstats']['osd_perf_infos'] diff --git a/src/collectors/python.d.plugin/ceph/ceph.conf b/src/collectors/python.d.plugin/ceph/ceph.conf deleted file mode 100644 index 81788e866..000000000 --- a/src/collectors/python.d.plugin/ceph/ceph.conf +++ /dev/null @@ -1,75 +0,0 @@ -# netdata python.d.plugin configuration for ceph stats -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 10 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# penalty indicates whether to apply penalty to update_every in case of failures. -# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. -# penalty: yes - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 10 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# penalty: yes # the JOB's penalty -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, ceph plugin also supports the following: -# -# config_file: 'config_file' # Ceph config file. -# keyring_file: 'keyring_file' # Ceph keyring file. netdata user must be added into ceph group -# # and keyring file must be read group permission. -# rados_id: 'rados username' # ID used to connect to ceph cluster. Allows -# # creating a read only key for pulling data v.s. admin -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -config_file: '/etc/ceph/ceph.conf' -keyring_file: '/etc/ceph/ceph.client.admin.keyring' -rados_id: 'admin' diff --git a/src/collectors/python.d.plugin/ceph/integrations/ceph.md b/src/collectors/python.d.plugin/ceph/integrations/ceph.md deleted file mode 100644 index d2584a4d0..000000000 --- a/src/collectors/python.d.plugin/ceph/integrations/ceph.md +++ /dev/null @@ -1,228 +0,0 @@ - - -# Ceph - - - - - -Plugin: python.d.plugin -Module: ceph - - - -## Overview - -This collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics. - -Uses the `rados` python module to connect to a Ceph cluster. - -This collector is supported on all platforms. - -This collector supports collecting metrics from multiple instances of this integration, including remote instances. - - -### Default Behavior - -#### Auto-Detection - -This integration doesn't support auto-detection. - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per Ceph instance - -These metrics refer to the entire monitored application. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| ceph.general_usage | avail, used | KiB | -| ceph.general_objects | cluster | objects | -| ceph.general_bytes | read, write | KiB/s | -| ceph.general_operations | read, write | operations | -| ceph.general_latency | apply, commit | milliseconds | -| ceph.pool_usage | a dimension per Ceph Pool | KiB | -| ceph.pool_objects | a dimension per Ceph Pool | objects | -| ceph.pool_read_bytes | a dimension per Ceph Pool | KiB/s | -| ceph.pool_write_bytes | a dimension per Ceph Pool | KiB/s | -| ceph.pool_read_operations | a dimension per Ceph Pool | operations | -| ceph.pool_write_operations | a dimension per Ceph Pool | operations | -| ceph.osd_usage | a dimension per Ceph OSD | KiB | -| ceph.osd_size | a dimension per Ceph OSD | KiB | -| ceph.apply_latency | a dimension per Ceph OSD | milliseconds | -| ceph.commit_latency | a dimension per Ceph OSD | milliseconds | - - - -## Alerts - - -The following alerts are available: - -| Alert name | On metric | Description | -|:------------|:----------|:------------| -| [ ceph_cluster_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.general_usage | cluster disk space utilization | - - -## Setup - -### Prerequisites - -#### `rados` python module - -Make sure the `rados` python module is installed - -#### Granting read permissions to ceph group from keyring file - -Execute: `chmod 640 /etc/ceph/ceph.client.admin.keyring` - -#### Create a specific rados_id - -You can optionally create a rados_id to use instead of admin - - -### Configuration - -#### File - -The configuration file name for this integration is `python.d/ceph.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config python.d/ceph.conf -``` -#### Options - -There are 2 sections: - -* Global variables -* One or more JOBS that can define multiple different instances to monitor. - -The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - -Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - -Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| update_every | Sets the default data collection frequency. | 5 | no | -| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | -| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | -| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | -| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | -| config_file | Ceph config file | | yes | -| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes | -| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no | - -
- -#### Examples - -##### Basic local Ceph cluster - -A basic configuration to connect to a local Ceph cluster. - -```yaml -local: - config_file: '/etc/ceph/ceph.conf' - keyring_file: '/etc/ceph/ceph.client.admin.keyring' - -``` - - -## Troubleshooting - -### Debug Mode - - -To troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `python.d.plugin` to debug the collector: - - ```bash - ./python.d.plugin ceph debug trace - ``` - -### Getting Logs - -If you're encountering problems with the `ceph` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep ceph -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep ceph /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep ceph -``` - - diff --git a/src/collectors/python.d.plugin/ceph/metadata.yaml b/src/collectors/python.d.plugin/ceph/metadata.yaml deleted file mode 100644 index 642941137..000000000 --- a/src/collectors/python.d.plugin/ceph/metadata.yaml +++ /dev/null @@ -1,223 +0,0 @@ -plugin_name: python.d.plugin -modules: - - meta: - plugin_name: python.d.plugin - module_name: ceph - monitored_instance: - name: Ceph - link: 'https://ceph.io/' - categories: - - data-collection.storage-mount-points-and-filesystems - icon_filename: 'ceph.svg' - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: '' - keywords: - - ceph - - storage - most_popular: false - overview: - data_collection: - metrics_description: 'This collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.' - method_description: 'Uses the `rados` python module to connect to a Ceph cluster.' - supported_platforms: - include: [] - exclude: [] - multi_instance: true - additional_permissions: - description: '' - default_behavior: - auto_detection: - description: '' - limits: - description: '' - performance_impact: - description: '' - setup: - prerequisites: - list: - - title: '`rados` python module' - description: 'Make sure the `rados` python module is installed' - - title: 'Granting read permissions to ceph group from keyring file' - description: 'Execute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`' - - title: 'Create a specific rados_id' - description: 'You can optionally create a rados_id to use instead of admin' - configuration: - file: - name: python.d/ceph.conf - options: - description: | - There are 2 sections: - - * Global variables - * One or more JOBS that can define multiple different instances to monitor. - - The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - - Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - - Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - folding: - title: "Config options" - enabled: true - list: - - name: update_every - description: Sets the default data collection frequency. - default_value: 5 - required: false - - name: priority - description: Controls the order of charts at the netdata dashboard. - default_value: 60000 - required: false - - name: autodetection_retry - description: Sets the job re-check interval in seconds. - default_value: 0 - required: false - - name: penalty - description: Indicates whether to apply penalty to update_every in case of failures. - default_value: yes - required: false - - name: name - description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. - default_value: '' - required: false - - name: config_file - description: Ceph config file - default_value: '' - required: true - - name: keyring_file - description: Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. - default_value: '' - required: true - - name: rados_id - description: A rados user id to use for connecting to the Ceph cluster. - default_value: 'admin' - required: false - examples: - folding: - enabled: true - title: "Config" - list: - - name: Basic local Ceph cluster - description: A basic configuration to connect to a local Ceph cluster. - folding: - enabled: false - config: | - local: - config_file: '/etc/ceph/ceph.conf' - keyring_file: '/etc/ceph/ceph.client.admin.keyring' - troubleshooting: - problems: - list: [] - alerts: - - name: ceph_cluster_space_usage - link: https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf - metric: ceph.general_usage - info: cluster disk space utilization - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: global - description: "These metrics refer to the entire monitored application." - labels: [] - metrics: - - name: ceph.general_usage - description: Ceph General Space - unit: "KiB" - chart_type: stacked - dimensions: - - name: avail - - name: used - - name: ceph.general_objects - description: Ceph General Objects - unit: "objects" - chart_type: area - dimensions: - - name: cluster - - name: ceph.general_bytes - description: Ceph General Read/Write Data/s - unit: "KiB/s" - chart_type: area - dimensions: - - name: read - - name: write - - name: ceph.general_operations - description: Ceph General Read/Write Operations/s - unit: "operations" - chart_type: area - dimensions: - - name: read - - name: write - - name: ceph.general_latency - description: Ceph General Apply/Commit latency - unit: "milliseconds" - chart_type: area - dimensions: - - name: apply - - name: commit - - name: ceph.pool_usage - description: Ceph Pools - unit: "KiB" - chart_type: line - dimensions: - - name: a dimension per Ceph Pool - - name: ceph.pool_objects - description: Ceph Pools - unit: "objects" - chart_type: line - dimensions: - - name: a dimension per Ceph Pool - - name: ceph.pool_read_bytes - description: Ceph Read Pool Data/s - unit: "KiB/s" - chart_type: area - dimensions: - - name: a dimension per Ceph Pool - - name: ceph.pool_write_bytes - description: Ceph Write Pool Data/s - unit: "KiB/s" - chart_type: area - dimensions: - - name: a dimension per Ceph Pool - - name: ceph.pool_read_operations - description: Ceph Read Pool Operations/s - unit: "operations" - chart_type: area - dimensions: - - name: a dimension per Ceph Pool - - name: ceph.pool_write_operations - description: Ceph Write Pool Operations/s - unit: "operations" - chart_type: area - dimensions: - - name: a dimension per Ceph Pool - - name: ceph.osd_usage - description: Ceph OSDs - unit: "KiB" - chart_type: line - dimensions: - - name: a dimension per Ceph OSD - - name: ceph.osd_size - description: Ceph OSDs size - unit: "KiB" - chart_type: line - dimensions: - - name: a dimension per Ceph OSD - - name: ceph.apply_latency - description: Ceph OSDs apply latency - unit: "milliseconds" - chart_type: line - dimensions: - - name: a dimension per Ceph OSD - - name: ceph.commit_latency - description: Ceph OSDs commit latency - unit: "milliseconds" - chart_type: line - dimensions: - - name: a dimension per Ceph OSD diff --git a/src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md b/src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md index 8f086765e..103058db8 100644 --- a/src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md +++ b/src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md @@ -84,14 +84,14 @@ There are no alerts configured by default for this integration. #### Enable the go_expvar collector -The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file. +The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file. ```bash cd /etc/netdata # Replace this path with your Netdata config directory, if different sudo ./edit-config python.d.conf ``` -Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system. +Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/start-stop-restart.md) for your system. #### Sample `expvar` usage in a Go application @@ -171,8 +171,8 @@ number of currently running Goroutines and updates these stats every second. The configuration file name for this integration is `python.d/go_expvar.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -231,8 +231,8 @@ See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-28449 Please see these two links to the official Netdata documentation for more information about the values: -- [External plugins - charts](/src/collectors/plugins.d/README.md#chart) -- [Chart variables](/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart) +- [External plugins - charts](https://github.com/netdata/netdata/blob/master/src/plugins.d/README.md#chart) +- [Chart variables](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart) **Line definitions** @@ -255,7 +255,7 @@ hidden: False ``` Please see the following link for more information about the options and their default values: -[External plugins - dimensions](/src/collectors/plugins.d/README.md#dimension) +[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/src/plugins.d/README.md#dimension) Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map; All dicts in the resulting JSON document are then flattened to one level. diff --git a/src/collectors/python.d.plugin/go_expvar/metadata.yaml b/src/collectors/python.d.plugin/go_expvar/metadata.yaml index aa45968ff..b91225e9a 100644 --- a/src/collectors/python.d.plugin/go_expvar/metadata.yaml +++ b/src/collectors/python.d.plugin/go_expvar/metadata.yaml @@ -48,7 +48,7 @@ modules: sudo ./edit-config python.d.conf ``` - Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system. + Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system. - title: "Sample `expvar` usage in a Go application" description: | The `expvar` package exposes metrics over HTTP and is very easy to use. @@ -200,7 +200,7 @@ modules: Please see these two links to the official Netdata documentation for more information about the values: - - [External plugins - charts](/src/collectors/plugins.d/README.md#chart) + - [External plugins - charts](/src/plugins.d/README.md#chart) - [Chart variables](/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart) **Line definitions** @@ -224,7 +224,7 @@ modules: ``` Please see the following link for more information about the options and their default values: - [External plugins - dimensions](/src/collectors/plugins.d/README.md#dimension) + [External plugins - dimensions](/src/plugins.d/README.md#dimension) Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map; All dicts in the resulting JSON document are then flattened to one level. diff --git a/src/collectors/python.d.plugin/haproxy/README.md b/src/collectors/python.d.plugin/haproxy/README.md index 8ade512bb..bc54d8638 100644 --- a/src/collectors/python.d.plugin/haproxy/README.md +++ b/src/collectors/python.d.plugin/haproxy/README.md @@ -1,12 +1,3 @@ - - # HAProxy collector Monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current. diff --git a/src/collectors/python.d.plugin/openldap/README.md b/src/collectors/python.d.plugin/openldap/README.md deleted file mode 120000 index 45f36b9b9..000000000 --- a/src/collectors/python.d.plugin/openldap/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/openldap.md \ No newline at end of file diff --git a/src/collectors/python.d.plugin/openldap/integrations/openldap.md b/src/collectors/python.d.plugin/openldap/integrations/openldap.md deleted file mode 100644 index 3f363343a..000000000 --- a/src/collectors/python.d.plugin/openldap/integrations/openldap.md +++ /dev/null @@ -1,249 +0,0 @@ - - -# OpenLDAP - - - - - -Plugin: python.d.plugin -Module: openldap - - - -## Overview - -This collector monitors OpenLDAP metrics about connections, operations, referrals and more. - -Statistics are taken from the monitoring interface of a openLDAP (slapd) server - - -This collector is supported on all platforms. - -This collector only supports collecting metrics from a single instance of this integration. - - -### Default Behavior - -#### Auto-Detection - -This collector doesn't work until all the prerequisites are checked. - - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per OpenLDAP instance - -These metrics refer to the entire monitored application. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| openldap.total_connections | connections | connections/s | -| openldap.traffic_stats | sent | KiB/s | -| openldap.operations_status | completed, initiated | ops/s | -| openldap.referrals | sent | referrals/s | -| openldap.entries | sent | entries/s | -| openldap.ldap_operations | bind, search, unbind, add, delete, modify, compare | ops/s | -| openldap.waiters | write, read | waiters/s | - - - -## Alerts - -There are no alerts configured by default for this integration. - - -## Setup - -### Prerequisites - -#### Configure the openLDAP server to expose metrics to monitor it. - -Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface. - - -#### Install python-ldap module - -Install python ldap module - -1. From pip package manager - -```bash -pip install ldap -``` - -2. With apt package manager (in most deb based distros) - - -```bash -apt-get install python-ldap -``` - - -3. With yum package manager (in most rpm based distros) - - -```bash -yum install python-ldap -``` - - -#### Insert credentials for Netdata to access openLDAP server - -Use the `ldappasswd` utility to set a password for the username you will use. - - - -### Configuration - -#### File - -The configuration file name for this integration is `python.d/openldap.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config python.d/openldap.conf -``` -#### Options - -There are 2 sections: - -* Global variables -* One or more JOBS that can define multiple different instances to monitor. - -The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - -Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - -Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| update_every | Sets the default data collection frequency. | 5 | no | -| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | -| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | -| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | -| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | -| username | The bind user with right to access monitor statistics | | yes | -| password | The password for the binded user | | yes | -| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes | -| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes | -| use_tls | Make True if a TLS connection is used over ldaps:// | no | no | -| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no | -| cert_check | False if you want to ignore certificate check | True | yes | -| timeout | Seconds to timeout if no connection exist | | yes | - -
- -#### Examples - -##### Basic - -A basic example configuration. - -```yaml -username: "cn=admin" -password: "pass" -server: "localhost" -port: "389" -check_cert: True -timeout: 1 - -``` - - -## Troubleshooting - -### Debug Mode - - -To troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `python.d.plugin` to debug the collector: - - ```bash - ./python.d.plugin openldap debug trace - ``` - -### Getting Logs - -If you're encountering problems with the `openldap` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep openldap -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep openldap /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep openldap -``` - - diff --git a/src/collectors/python.d.plugin/openldap/metadata.yaml b/src/collectors/python.d.plugin/openldap/metadata.yaml deleted file mode 100644 index 3826b22c7..000000000 --- a/src/collectors/python.d.plugin/openldap/metadata.yaml +++ /dev/null @@ -1,225 +0,0 @@ -plugin_name: python.d.plugin -modules: - - meta: - plugin_name: python.d.plugin - module_name: openldap - monitored_instance: - name: OpenLDAP - link: "https://www.openldap.org/" - categories: - - data-collection.authentication-and-authorization - icon_filename: "statsd.png" - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: "" - keywords: - - openldap - - RBAC - - Directory access - most_popular: false - overview: - data_collection: - metrics_description: "This collector monitors OpenLDAP metrics about connections, operations, referrals and more." - method_description: | - Statistics are taken from the monitoring interface of a openLDAP (slapd) server - supported_platforms: - include: [] - exclude: [] - multi_instance: false - additional_permissions: - description: "" - default_behavior: - auto_detection: - description: | - This collector doesn't work until all the prerequisites are checked. - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: - - title: Configure the openLDAP server to expose metrics to monitor it. - description: | - Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface. - - title: Install python-ldap module - description: | - Install python ldap module - - 1. From pip package manager - - ```bash - pip install ldap - ``` - - 2. With apt package manager (in most deb based distros) - - - ```bash - apt-get install python-ldap - ``` - - - 3. With yum package manager (in most rpm based distros) - - - ```bash - yum install python-ldap - ``` - - title: Insert credentials for Netdata to access openLDAP server - description: | - Use the `ldappasswd` utility to set a password for the username you will use. - configuration: - file: - name: "python.d/openldap.conf" - options: - description: | - There are 2 sections: - - * Global variables - * One or more JOBS that can define multiple different instances to monitor. - - The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - - Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - - Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - folding: - title: "Config options" - enabled: true - list: - - name: update_every - description: Sets the default data collection frequency. - default_value: 5 - required: false - - name: priority - description: Controls the order of charts at the netdata dashboard. - default_value: 60000 - required: false - - name: autodetection_retry - description: Sets the job re-check interval in seconds. - default_value: 0 - required: false - - name: penalty - description: Indicates whether to apply penalty to update_every in case of failures. - default_value: yes - required: false - - name: name - description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. - default_value: "" - required: false - - name: username - description: The bind user with right to access monitor statistics - default_value: "" - required: true - - name: password - description: The password for the binded user - default_value: "" - required: true - - name: server - description: The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. - default_value: "" - required: true - - name: port - description: The listening port of the LDAP server. Change to 636 port in case of TLS connection. - default_value: "389" - required: true - - name: use_tls - description: Make True if a TLS connection is used over ldaps:// - default_value: False - required: false - - name: use_start_tls - description: Make True if a TLS connection is used over ldap:// - default_value: False - required: false - - name: cert_check - description: False if you want to ignore certificate check - default_value: "True" - required: true - - name: timeout - description: Seconds to timeout if no connection exist - default_value: "" - required: true - examples: - folding: - enabled: true - title: "Config" - list: - - name: Basic - description: A basic example configuration. - folding: - enabled: false - config: | - username: "cn=admin" - password: "pass" - server: "localhost" - port: "389" - check_cert: True - timeout: 1 - troubleshooting: - problems: - list: [] - alerts: [] - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: global - description: "These metrics refer to the entire monitored application." - labels: [] - metrics: - - name: openldap.total_connections - description: Total Connections - unit: "connections/s" - chart_type: line - dimensions: - - name: connections - - name: openldap.traffic_stats - description: Traffic - unit: "KiB/s" - chart_type: line - dimensions: - - name: sent - - name: openldap.operations_status - description: Operations Status - unit: "ops/s" - chart_type: line - dimensions: - - name: completed - - name: initiated - - name: openldap.referrals - description: Referrals - unit: "referrals/s" - chart_type: line - dimensions: - - name: sent - - name: openldap.entries - description: Entries - unit: "entries/s" - chart_type: line - dimensions: - - name: sent - - name: openldap.ldap_operations - description: Operations - unit: "ops/s" - chart_type: line - dimensions: - - name: bind - - name: search - - name: unbind - - name: add - - name: delete - - name: modify - - name: compare - - name: openldap.waiters - description: Waiters - unit: "waiters/s" - chart_type: line - dimensions: - - name: write - - name: read diff --git a/src/collectors/python.d.plugin/openldap/openldap.chart.py b/src/collectors/python.d.plugin/openldap/openldap.chart.py deleted file mode 100644 index aba143954..000000000 --- a/src/collectors/python.d.plugin/openldap/openldap.chart.py +++ /dev/null @@ -1,216 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: openldap netdata python.d module -# Author: Manolis Kartsonakis (ekartsonakis) -# SPDX-License-Identifier: GPL-3.0+ - -try: - import ldap - - HAS_LDAP = True -except ImportError: - HAS_LDAP = False - -from bases.FrameworkServices.SimpleService import SimpleService - -DEFAULT_SERVER = 'localhost' -DEFAULT_PORT = '389' -DEFAULT_TLS = False -DEFAULT_CERT_CHECK = True -DEFAULT_TIMEOUT = 1 -DEFAULT_START_TLS = False - -ORDER = [ - 'total_connections', - 'bytes_sent', - 'operations', - 'referrals_sent', - 'entries_sent', - 'ldap_operations', - 'waiters' -] - -CHARTS = { - 'total_connections': { - 'options': [None, 'Total Connections', 'connections/s', 'ldap', 'openldap.total_connections', 'line'], - 'lines': [ - ['total_connections', 'connections', 'incremental'] - ] - }, - 'bytes_sent': { - 'options': [None, 'Traffic', 'KiB/s', 'ldap', 'openldap.traffic_stats', 'line'], - 'lines': [ - ['bytes_sent', 'sent', 'incremental', 1, 1024] - ] - }, - 'operations': { - 'options': [None, 'Operations Status', 'ops/s', 'ldap', 'openldap.operations_status', 'line'], - 'lines': [ - ['completed_operations', 'completed', 'incremental'], - ['initiated_operations', 'initiated', 'incremental'] - ] - }, - 'referrals_sent': { - 'options': [None, 'Referrals', 'referrals/s', 'ldap', 'openldap.referrals', 'line'], - 'lines': [ - ['referrals_sent', 'sent', 'incremental'] - ] - }, - 'entries_sent': { - 'options': [None, 'Entries', 'entries/s', 'ldap', 'openldap.entries', 'line'], - 'lines': [ - ['entries_sent', 'sent', 'incremental'] - ] - }, - 'ldap_operations': { - 'options': [None, 'Operations', 'ops/s', 'ldap', 'openldap.ldap_operations', 'line'], - 'lines': [ - ['bind_operations', 'bind', 'incremental'], - ['search_operations', 'search', 'incremental'], - ['unbind_operations', 'unbind', 'incremental'], - ['add_operations', 'add', 'incremental'], - ['delete_operations', 'delete', 'incremental'], - ['modify_operations', 'modify', 'incremental'], - ['compare_operations', 'compare', 'incremental'] - ] - }, - 'waiters': { - 'options': [None, 'Waiters', 'waiters/s', 'ldap', 'openldap.waiters', 'line'], - 'lines': [ - ['write_waiters', 'write', 'incremental'], - ['read_waiters', 'read', 'incremental'] - ] - }, -} - -# Stuff to gather - make tuples of DN dn and attrib to get -SEARCH_LIST = { - 'total_connections': ( - 'cn=Total,cn=Connections,cn=Monitor', 'monitorCounter', - ), - 'bytes_sent': ( - 'cn=Bytes,cn=Statistics,cn=Monitor', 'monitorCounter', - ), - 'completed_operations': ( - 'cn=Operations,cn=Monitor', 'monitorOpCompleted', - ), - 'initiated_operations': ( - 'cn=Operations,cn=Monitor', 'monitorOpInitiated', - ), - 'referrals_sent': ( - 'cn=Referrals,cn=Statistics,cn=Monitor', 'monitorCounter', - ), - 'entries_sent': ( - 'cn=Entries,cn=Statistics,cn=Monitor', 'monitorCounter', - ), - 'bind_operations': ( - 'cn=Bind,cn=Operations,cn=Monitor', 'monitorOpCompleted', - ), - 'unbind_operations': ( - 'cn=Unbind,cn=Operations,cn=Monitor', 'monitorOpCompleted', - ), - 'add_operations': ( - 'cn=Add,cn=Operations,cn=Monitor', 'monitorOpInitiated', - ), - 'delete_operations': ( - 'cn=Delete,cn=Operations,cn=Monitor', 'monitorOpCompleted', - ), - 'modify_operations': ( - 'cn=Modify,cn=Operations,cn=Monitor', 'monitorOpCompleted', - ), - 'compare_operations': ( - 'cn=Compare,cn=Operations,cn=Monitor', 'monitorOpCompleted', - ), - 'search_operations': ( - 'cn=Search,cn=Operations,cn=Monitor', 'monitorOpCompleted', - ), - 'write_waiters': ( - 'cn=Write,cn=Waiters,cn=Monitor', 'monitorCounter', - ), - 'read_waiters': ( - 'cn=Read,cn=Waiters,cn=Monitor', 'monitorCounter', - ), -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.server = configuration.get('server', DEFAULT_SERVER) - self.port = configuration.get('port', DEFAULT_PORT) - self.username = configuration.get('username') - self.password = configuration.get('password') - self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT) - self.use_tls = configuration.get('use_tls', DEFAULT_TLS) - self.cert_check = configuration.get('cert_check', DEFAULT_CERT_CHECK) - self.use_start_tls = configuration.get('use_start_tls', DEFAULT_START_TLS) - self.alive = False - self.conn = None - - def disconnect(self): - if self.conn: - self.conn.unbind() - self.conn = None - self.alive = False - - def connect(self): - try: - if self.use_tls: - self.conn = ldap.initialize('ldaps://%s:%s' % (self.server, self.port)) - else: - self.conn = ldap.initialize('ldap://%s:%s' % (self.server, self.port)) - self.conn.set_option(ldap.OPT_NETWORK_TIMEOUT, self.timeout) - if (self.use_tls or self.use_start_tls) and not self.cert_check: - self.conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) - if self.use_start_tls or self.use_tls: - self.conn.set_option(ldap.OPT_X_TLS_NEWCTX, 0) - if self.use_start_tls: - self.conn.protocol_version = ldap.VERSION3 - self.conn.start_tls_s() - if self.username and self.password: - self.conn.simple_bind(self.username, self.password) - except ldap.LDAPError as error: - self.error(error) - return False - - self.alive = True - return True - - def reconnect(self): - self.disconnect() - return self.connect() - - def check(self): - if not HAS_LDAP: - self.error("'python-ldap' package is needed") - return None - - return self.connect() and self.get_data() - - def get_data(self): - if not self.alive and not self.reconnect(): - return None - - data = dict() - for key in SEARCH_LIST: - dn = SEARCH_LIST[key][0] - attr = SEARCH_LIST[key][1] - try: - num = self.conn.search(dn, ldap.SCOPE_BASE, 'objectClass=*', [attr, ]) - result_type, result_data = self.conn.result(num, 1) - except ldap.LDAPError as error: - self.error("Empty result. Check bind username/password. Message: ", error) - self.alive = False - return None - - if result_type != 101: - continue - - try: - data[key] = int(list(result_data[0][1].values())[0][0]) - except (ValueError, IndexError) as error: - self.debug(error) - continue - - return data diff --git a/src/collectors/python.d.plugin/openldap/openldap.conf b/src/collectors/python.d.plugin/openldap/openldap.conf deleted file mode 100644 index 5fd99a525..000000000 --- a/src/collectors/python.d.plugin/openldap/openldap.conf +++ /dev/null @@ -1,75 +0,0 @@ -# netdata python.d.plugin configuration for openldap -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# postfix is slow, so once every 10 seconds -update_every: 10 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# penalty indicates whether to apply penalty to update_every in case of failures. -# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. -# penalty: yes - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# penalty: yes # the JOB's penalty -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# ---------------------------------------------------------------------- -# OPENLDAP EXTRA PARAMETERS - -# Set here your LDAP connection settings - -#username : "cn=admin,dc=example,dc=com" # The bind user with right to access monitor statistics -#password : "yourpass" # The password for the binded user -#server : 'localhost' # The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. -#port : 389 # The listening port of the LDAP server. Change to 636 port in case of TLS connection -#use_tls : False # Make True if a TLS connection is used over ldaps:// -#use_start_tls: False # Make True if a TLS connection is used over ldap:// -#cert_check : True # False if you want to ignore certificate check -#timeout : 1 # Seconds to timeout if no connection exi diff --git a/src/collectors/python.d.plugin/oracledb/README.md b/src/collectors/python.d.plugin/oracledb/README.md deleted file mode 120000 index a75e3611e..000000000 --- a/src/collectors/python.d.plugin/oracledb/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/oracle_db.md \ No newline at end of file diff --git a/src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md b/src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md deleted file mode 100644 index 4cf1b54a4..000000000 --- a/src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md +++ /dev/null @@ -1,260 +0,0 @@ - - -# Oracle DB - - - - - -Plugin: python.d.plugin -Module: oracledb - - - -## Overview - -This collector monitors OracleDB database metrics about sessions, tables, memory and more. - -It collects the metrics via the supported database client library - -This collector is supported on all platforms. - -This collector supports collecting metrics from multiple instances of this integration, including remote instances. - -In order for this collector to work, it needs a read-only user `netdata` in the RDBMS. - - -### Default Behavior - -#### Auto-Detection - -When the requirements are met, databases on the local host on port 1521 will be auto-detected - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - -These metrics refer to the entire monitored application. - -### Per Oracle DB instance - - - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| oracledb.session_count | total, active | sessions | -| oracledb.session_limit_usage | usage | % | -| oracledb.logons | logons | events/s | -| oracledb.physical_disk_read_writes | reads, writes | events/s | -| oracledb.sorts_on_disks | sorts | events/s | -| oracledb.full_table_scans | full table scans | events/s | -| oracledb.database_wait_time_ratio | wait time ratio | % | -| oracledb.shared_pool_free_memory | free memory | % | -| oracledb.in_memory_sorts_ratio | in-memory sorts | % | -| oracledb.sql_service_response_time | time | seconds | -| oracledb.user_rollbacks | rollbacks | events/s | -| oracledb.enqueue_timeouts | enqueue timeouts | events/s | -| oracledb.cache_hit_ration | buffer, cursor, library, row | % | -| oracledb.global_cache_blocks | corrupted, lost | events/s | -| oracledb.activity | parse count, execute count, user commits, user rollbacks | events/s | -| oracledb.wait_time | application, configuration, administrative, concurrency, commit, network, user I/O, system I/O, scheduler, other | ms | -| oracledb.tablespace_size | a dimension per active tablespace | KiB | -| oracledb.tablespace_usage | a dimension per active tablespace | KiB | -| oracledb.tablespace_usage_in_percent | a dimension per active tablespace | % | -| oracledb.allocated_size | a dimension per active tablespace | B | -| oracledb.allocated_usage | a dimension per active tablespace | B | -| oracledb.allocated_usage_in_percent | a dimension per active tablespace | % | - - - -## Alerts - -There are no alerts configured by default for this integration. - - -## Setup - -### Prerequisites - -#### Install the python-oracledb package - -You can follow the official guide below to install the required package: - -Source: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html - - -#### Create a read only user for netdata - -Follow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach - -Connect to your Oracle database with an administrative user and execute: - -```bash -CREATE USER netdata IDENTIFIED BY ; - -GRANT CONNECT TO netdata; -GRANT SELECT_CATALOG_ROLE TO netdata; -``` - - -#### Edit the configuration - -Edit the configuration troubleshooting: - -1. Provide a valid user for the netdata collector to access the database -2. Specify the network target this database is listening. - - - -### Configuration - -#### File - -The configuration file name for this integration is `python.d/oracledb.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config python.d/oracledb.conf -``` -#### Options - -There are 2 sections: - -* Global variables -* One or more JOBS that can define multiple different instances to monitor. - -The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - -Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - -Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| update_every | Sets the default data collection frequency. | 5 | no | -| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | -| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | -| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | -| user | The username for the user account. | no | yes | -| password | The password for the user account. | no | yes | -| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes | -| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes | -| protocol | one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes | - -
- -#### Examples - -##### Basic - -A basic example configuration, two jobs described for two databases. - -```yaml -local: - user: 'netdata' - password: 'secret' - server: 'localhost:1521' - service: 'XE' - protocol: 'tcps' - -remote: - user: 'netdata' - password: 'secret' - server: '10.0.0.1:1521' - service: 'XE' - protocol: 'tcps' - -``` - - -## Troubleshooting - -### Debug Mode - - -To troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `python.d.plugin` to debug the collector: - - ```bash - ./python.d.plugin oracledb debug trace - ``` - -### Getting Logs - -If you're encountering problems with the `oracledb` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep oracledb -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep oracledb /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep oracledb -``` - - diff --git a/src/collectors/python.d.plugin/oracledb/metadata.yaml b/src/collectors/python.d.plugin/oracledb/metadata.yaml deleted file mode 100644 index f2ab8312b..000000000 --- a/src/collectors/python.d.plugin/oracledb/metadata.yaml +++ /dev/null @@ -1,309 +0,0 @@ -plugin_name: python.d.plugin -modules: - - meta: - plugin_name: python.d.plugin - module_name: oracledb - monitored_instance: - name: Oracle DB - link: "https://docs.oracle.com/en/database/oracle/oracle-database/" - categories: - - data-collection.database-servers - icon_filename: "oracle.svg" - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: "" - keywords: - - database - - oracle - - data warehouse - - SQL - most_popular: false - overview: - data_collection: - metrics_description: "This collector monitors OracleDB database metrics about sessions, tables, memory and more." - method_description: "It collects the metrics via the supported database client library" - supported_platforms: - include: [] - exclude: [] - multi_instance: true - additional_permissions: - description: | - In order for this collector to work, it needs a read-only user `netdata` in the RDBMS. - default_behavior: - auto_detection: - description: "When the requirements are met, databases on the local host on port 1521 will be auto-detected" - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: - - title: Install the python-oracledb package - description: | - You can follow the official guide below to install the required package: - - Source: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html - - title: Create a read only user for netdata - description: | - Follow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach - - Connect to your Oracle database with an administrative user and execute: - - ```bash - CREATE USER netdata IDENTIFIED BY ; - - GRANT CONNECT TO netdata; - GRANT SELECT_CATALOG_ROLE TO netdata; - ``` - - title: Edit the configuration - description: | - Edit the configuration troubleshooting: - - 1. Provide a valid user for the netdata collector to access the database - 2. Specify the network target this database is listening. - configuration: - file: - name: "python.d/oracledb.conf" - options: - description: | - There are 2 sections: - - * Global variables - * One or more JOBS that can define multiple different instances to monitor. - - The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - - Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - - Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - folding: - title: "Config options" - enabled: true - list: - - name: update_every - description: Sets the default data collection frequency. - default_value: 5 - required: false - - name: priority - description: Controls the order of charts at the netdata dashboard. - default_value: 60000 - required: false - - name: autodetection_retry - description: Sets the job re-check interval in seconds. - default_value: 0 - required: false - - name: penalty - description: Indicates whether to apply penalty to update_every in case of failures. - default_value: yes - required: false - - name: user - description: The username for the user account. - default_value: no - required: true - - name: password - description: The password for the user account. - default_value: no - required: true - - name: server - description: The IP address or hostname (and port) of the Oracle Database Server. - default_value: no - required: true - - name: service - description: The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. - default_value: no - required: true - - name: protocol - description: one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic - default_value: no - required: true - examples: - folding: - enabled: true - title: "Config" - list: - - name: Basic - folding: - enabled: false - description: A basic example configuration, two jobs described for two databases. - config: | - local: - user: 'netdata' - password: 'secret' - server: 'localhost:1521' - service: 'XE' - protocol: 'tcps' - - remote: - user: 'netdata' - password: 'secret' - server: '10.0.0.1:1521' - service: 'XE' - protocol: 'tcps' - troubleshooting: - problems: - list: [] - alerts: [] - metrics: - folding: - title: Metrics - enabled: false - description: "These metrics refer to the entire monitored application." - availability: [] - scopes: - - name: global - description: "" - labels: [] - metrics: - - name: oracledb.session_count - description: Session Count - unit: "sessions" - chart_type: line - dimensions: - - name: total - - name: active - - name: oracledb.session_limit_usage - description: Session Limit Usage - unit: "%" - chart_type: area - dimensions: - - name: usage - - name: oracledb.logons - description: Logons - unit: "events/s" - chart_type: area - dimensions: - - name: logons - - name: oracledb.physical_disk_read_writes - description: Physical Disk Reads/Writes - unit: "events/s" - chart_type: area - dimensions: - - name: reads - - name: writes - - name: oracledb.sorts_on_disks - description: Sorts On Disk - unit: "events/s" - chart_type: line - dimensions: - - name: sorts - - name: oracledb.full_table_scans - description: Full Table Scans - unit: "events/s" - chart_type: line - dimensions: - - name: full table scans - - name: oracledb.database_wait_time_ratio - description: Database Wait Time Ratio - unit: "%" - chart_type: line - dimensions: - - name: wait time ratio - - name: oracledb.shared_pool_free_memory - description: Shared Pool Free Memory - unit: "%" - chart_type: line - dimensions: - - name: free memory - - name: oracledb.in_memory_sorts_ratio - description: In-Memory Sorts Ratio - unit: "%" - chart_type: line - dimensions: - - name: in-memory sorts - - name: oracledb.sql_service_response_time - description: SQL Service Response Time - unit: "seconds" - chart_type: line - dimensions: - - name: time - - name: oracledb.user_rollbacks - description: User Rollbacks - unit: "events/s" - chart_type: line - dimensions: - - name: rollbacks - - name: oracledb.enqueue_timeouts - description: Enqueue Timeouts - unit: "events/s" - chart_type: line - dimensions: - - name: enqueue timeouts - - name: oracledb.cache_hit_ration - description: Cache Hit Ratio - unit: "%" - chart_type: stacked - dimensions: - - name: buffer - - name: cursor - - name: library - - name: row - - name: oracledb.global_cache_blocks - description: Global Cache Blocks Events - unit: "events/s" - chart_type: area - dimensions: - - name: corrupted - - name: lost - - name: oracledb.activity - description: Activities - unit: "events/s" - chart_type: stacked - dimensions: - - name: parse count - - name: execute count - - name: user commits - - name: user rollbacks - - name: oracledb.wait_time - description: Wait Time - unit: "ms" - chart_type: stacked - dimensions: - - name: application - - name: configuration - - name: administrative - - name: concurrency - - name: commit - - name: network - - name: user I/O - - name: system I/O - - name: scheduler - - name: other - - name: oracledb.tablespace_size - description: Size - unit: "KiB" - chart_type: line - dimensions: - - name: a dimension per active tablespace - - name: oracledb.tablespace_usage - description: Usage - unit: "KiB" - chart_type: line - dimensions: - - name: a dimension per active tablespace - - name: oracledb.tablespace_usage_in_percent - description: Usage - unit: "%" - chart_type: line - dimensions: - - name: a dimension per active tablespace - - name: oracledb.allocated_size - description: Size - unit: "B" - chart_type: line - dimensions: - - name: a dimension per active tablespace - - name: oracledb.allocated_usage - description: Usage - unit: "B" - chart_type: line - dimensions: - - name: a dimension per active tablespace - - name: oracledb.allocated_usage_in_percent - description: Usage - unit: "%" - chart_type: line - dimensions: - - name: a dimension per active tablespace diff --git a/src/collectors/python.d.plugin/oracledb/oracledb.chart.py b/src/collectors/python.d.plugin/oracledb/oracledb.chart.py deleted file mode 100644 index 455cf270e..000000000 --- a/src/collectors/python.d.plugin/oracledb/oracledb.chart.py +++ /dev/null @@ -1,846 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: oracledb netdata python.d module -# Author: ilyam8 (Ilya Mashchenko) -# SPDX-License-Identifier: GPL-3.0-or-later - -from copy import deepcopy - -from bases.FrameworkServices.SimpleService import SimpleService - -try: - import oracledb as cx_Oracle - - HAS_ORACLE_NEW = True - HAS_ORACLE_OLD = False -except ImportError: - HAS_ORACLE_NEW = False - try: - import cx_Oracle - - HAS_ORACLE_OLD = True - except ImportError: - HAS_ORACLE_OLD = False - -ORDER = [ - 'session_count', - 'session_limit_usage', - 'logons', - 'physical_disk_read_write', - 'sorts_on_disk', - 'full_table_scans', - 'database_wait_time_ratio', - 'shared_pool_free_memory', - 'in_memory_sorts_ratio', - 'sql_service_response_time', - 'user_rollbacks', - 'enqueue_timeouts', - 'cache_hit_ratio', - 'global_cache_blocks', - 'activity', - 'wait_time', - 'tablespace_size', - 'tablespace_usage', - 'tablespace_usage_in_percent', - 'allocated_size', - 'allocated_usage', - 'allocated_usage_in_percent', -] - -CHARTS = { - 'session_count': { - 'options': [None, 'Session Count', 'sessions', 'session activity', 'oracledb.session_count', 'line'], - 'lines': [ - ['session_count', 'total', 'absolute', 1, 1000], - ['average_active_sessions', 'active', 'absolute', 1, 1000], - ] - }, - 'session_limit_usage': { - 'options': [None, 'Session Limit Usage', '%', 'session activity', 'oracledb.session_limit_usage', 'area'], - 'lines': [ - ['session_limit_percent', 'usage', 'absolute', 1, 1000], - ] - }, - 'logons': { - 'options': [None, 'Logons', 'events/s', 'session activity', 'oracledb.logons', 'area'], - 'lines': [ - ['logons_per_sec', 'logons', 'absolute', 1, 1000], - ] - }, - 'physical_disk_read_write': { - 'options': [None, 'Physical Disk Reads/Writes', 'events/s', 'disk activity', - 'oracledb.physical_disk_read_writes', 'area'], - 'lines': [ - ['physical_reads_per_sec', 'reads', 'absolute', 1, 1000], - ['physical_writes_per_sec', 'writes', 'absolute', -1, 1000], - ] - }, - 'sorts_on_disk': { - 'options': [None, 'Sorts On Disk', 'events/s', 'disk activity', 'oracledb.sorts_on_disks', 'line'], - 'lines': [ - ['disk_sort_per_sec', 'sorts', 'absolute', 1, 1000], - ] - }, - 'full_table_scans': { - 'options': [None, 'Full Table Scans', 'events/s', 'disk activity', 'oracledb.full_table_scans', 'line'], - 'lines': [ - ['long_table_scans_per_sec', 'full table scans', 'absolute', 1, 1000], - ] - }, - 'database_wait_time_ratio': { - 'options': [None, 'Database Wait Time Ratio', '%', 'database and buffer activity', - 'oracledb.database_wait_time_ratio', 'line'], - 'lines': [ - ['database_wait_time_ratio', 'wait time ratio', 'absolute', 1, 1000], - ] - }, - 'shared_pool_free_memory': { - 'options': [None, 'Shared Pool Free Memory', '%', 'database and buffer activity', - 'oracledb.shared_pool_free_memory', 'line'], - 'lines': [ - ['shared_pool_free_percent', 'free memory', 'absolute', 1, 1000], - ] - }, - 'in_memory_sorts_ratio': { - 'options': [None, 'In-Memory Sorts Ratio', '%', 'database and buffer activity', - 'oracledb.in_memory_sorts_ratio', 'line'], - 'lines': [ - ['memory_sorts_ratio', 'in-memory sorts', 'absolute', 1, 1000], - ] - }, - 'sql_service_response_time': { - 'options': [None, 'SQL Service Response Time', 'seconds', 'database and buffer activity', - 'oracledb.sql_service_response_time', 'line'], - 'lines': [ - ['sql_service_response_time', 'time', 'absolute', 1, 1000], - ] - }, - 'user_rollbacks': { - 'options': [None, 'User Rollbacks', 'events/s', 'database and buffer activity', - 'oracledb.user_rollbacks', 'line'], - 'lines': [ - ['user_rollbacks_per_sec', 'rollbacks', 'absolute', 1, 1000], - ] - }, - 'enqueue_timeouts': { - 'options': [None, 'Enqueue Timeouts', 'events/s', 'database and buffer activity', - 'oracledb.enqueue_timeouts', 'line'], - 'lines': [ - ['enqueue_timeouts_per_sec', 'enqueue timeouts', 'absolute', 1, 1000], - ] - }, - 'cache_hit_ratio': { - 'options': [None, 'Cache Hit Ratio', '%', 'cache', 'oracledb.cache_hit_ration', 'stacked'], - 'lines': [ - ['buffer_cache_hit_ratio', 'buffer', 'absolute', 1, 1000], - ['cursor_cache_hit_ratio', 'cursor', 'absolute', 1, 1000], - ['library_cache_hit_ratio', 'library', 'absolute', 1, 1000], - ['row_cache_hit_ratio', 'row', 'absolute', 1, 1000], - ] - }, - 'global_cache_blocks': { - 'options': [None, 'Global Cache Blocks Events', 'events/s', 'cache', 'oracledb.global_cache_blocks', 'area'], - 'lines': [ - ['global_cache_blocks_corrupted', 'corrupted', 'incremental', 1, 1000], - ['global_cache_blocks_lost', 'lost', 'incremental', 1, 1000], - ] - }, - 'activity': { - 'options': [None, 'Activities', 'events/s', 'activities', 'oracledb.activity', 'stacked'], - 'lines': [ - ['activity_parse_count_total', 'parse count', 'incremental', 1, 1000], - ['activity_execute_count', 'execute count', 'incremental', 1, 1000], - ['activity_user_commits', 'user commits', 'incremental', 1, 1000], - ['activity_user_rollbacks', 'user rollbacks', 'incremental', 1, 1000], - ] - }, - 'wait_time': { - 'options': [None, 'Wait Time', 'ms', 'wait time', 'oracledb.wait_time', 'stacked'], - 'lines': [ - ['wait_time_application', 'application', 'absolute', 1, 1000], - ['wait_time_configuration', 'configuration', 'absolute', 1, 1000], - ['wait_time_administrative', 'administrative', 'absolute', 1, 1000], - ['wait_time_concurrency', 'concurrency', 'absolute', 1, 1000], - ['wait_time_commit', 'commit', 'absolute', 1, 1000], - ['wait_time_network', 'network', 'absolute', 1, 1000], - ['wait_time_user_io', 'user I/O', 'absolute', 1, 1000], - ['wait_time_system_io', 'system I/O', 'absolute', 1, 1000], - ['wait_time_scheduler', 'scheduler', 'absolute', 1, 1000], - ['wait_time_other', 'other', 'absolute', 1, 1000], - ] - }, - 'tablespace_size': { - 'options': [None, 'Size', 'KiB', 'tablespace', 'oracledb.tablespace_size', 'line'], - 'lines': [], - }, - 'tablespace_usage': { - 'options': [None, 'Usage', 'KiB', 'tablespace', 'oracledb.tablespace_usage', 'line'], - 'lines': [], - }, - 'tablespace_usage_in_percent': { - 'options': [None, 'Usage', '%', 'tablespace', 'oracledb.tablespace_usage_in_percent', 'line'], - 'lines': [], - }, - 'allocated_size': { - 'options': [None, 'Size', 'B', 'tablespace', 'oracledb.allocated_size', 'line'], - 'lines': [], - }, - 'allocated_usage': { - 'options': [None, 'Usage', 'B', 'tablespace', 'oracledb.allocated_usage', 'line'], - 'lines': [], - }, - 'allocated_usage_in_percent': { - 'options': [None, 'Usage', '%', 'tablespace', 'oracledb.allocated_usage_in_percent', 'line'], - 'lines': [], - }, -} - -CX_CONNECT_STRING_OLD = "{0}/{1}@//{2}/{3}" - -QUERY_SYSTEM = ''' -SELECT - metric_name, - value -FROM - gv$sysmetric -ORDER BY - begin_time -''' -QUERY_TABLESPACE = ''' -SELECT - m.tablespace_name, - m.used_space * t.block_size AS used_bytes, - m.tablespace_size * t.block_size AS max_bytes, - m.used_percent -FROM - dba_tablespace_usage_metrics m - JOIN dba_tablespaces t ON m.tablespace_name = t.tablespace_name -''' -QUERY_ALLOCATED = ''' -SELECT - nvl(b.tablespace_name,nvl(a.tablespace_name,'UNKNOWN')) tablespace_name, - bytes_alloc used_bytes, - bytes_alloc-nvl(bytes_free,0) max_bytes, - ((bytes_alloc-nvl(bytes_free,0))/ bytes_alloc)*100 used_percent -FROM - (SELECT - sum(bytes) bytes_free, - tablespace_name - FROM sys.dba_free_space - GROUP BY tablespace_name - ) a, - (SELECT - sum(bytes) bytes_alloc, - tablespace_name - FROM sys.dba_data_files - GROUP BY tablespace_name - ) b -WHERE a.tablespace_name (+) = b.tablespace_name -''' -QUERY_ACTIVITIES_COUNT = ''' -SELECT - name, - value -FROM - v$sysstat -WHERE - name IN ( - 'parse count (total)', - 'execute count', - 'user commits', - 'user rollbacks' - ) -''' -QUERY_WAIT_TIME = ''' -SELECT - n.wait_class, - round(m.time_waited / m.INTSIZE_CSEC, 3) -FROM - v$waitclassmetric m, - v$system_wait_class n -WHERE - m.wait_class_id = n.wait_class_id - AND n.wait_class != 'Idle' -''' -# QUERY_SESSION_COUNT = ''' -# SELECT -# status, -# type -# FROM -# v$session -# GROUP BY -# status, -# type -# ''' -# QUERY_PROCESSES_COUNT = ''' -# SELECT -# COUNT(*) -# FROM -# v$process -# ''' -# QUERY_PROCESS = ''' -# SELECT -# program, -# pga_used_mem, -# pga_alloc_mem, -# pga_freeable_mem, -# pga_max_mem -# FROM -# gv$process -# ''' - -# PROCESS_METRICS = [ -# 'pga_used_memory', -# 'pga_allocated_memory', -# 'pga_freeable_memory', -# 'pga_maximum_memory', -# ] - - -SYS_METRICS = { - 'Average Active Sessions': 'average_active_sessions', - 'Session Count': 'session_count', - 'Session Limit %': 'session_limit_percent', - 'Logons Per Sec': 'logons_per_sec', - 'Physical Reads Per Sec': 'physical_reads_per_sec', - 'Physical Writes Per Sec': 'physical_writes_per_sec', - 'Disk Sort Per Sec': 'disk_sort_per_sec', - 'Long Table Scans Per Sec': 'long_table_scans_per_sec', - 'Database Wait Time Ratio': 'database_wait_time_ratio', - 'Shared Pool Free %': 'shared_pool_free_percent', - 'Memory Sorts Ratio': 'memory_sorts_ratio', - 'SQL Service Response Time': 'sql_service_response_time', - 'User Rollbacks Per Sec': 'user_rollbacks_per_sec', - 'Enqueue Timeouts Per Sec': 'enqueue_timeouts_per_sec', - 'Buffer Cache Hit Ratio': 'buffer_cache_hit_ratio', - 'Cursor Cache Hit Ratio': 'cursor_cache_hit_ratio', - 'Library Cache Hit Ratio': 'library_cache_hit_ratio', - 'Row Cache Hit Ratio': 'row_cache_hit_ratio', - 'Global Cache Blocks Corrupted': 'global_cache_blocks_corrupted', - 'Global Cache Blocks Lost': 'global_cache_blocks_lost', -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = deepcopy(CHARTS) - self.user = configuration.get('user') - self.password = configuration.get('password') - self.server = configuration.get('server') - self.service = configuration.get('service') - self.protocol = configuration.get('protocol', 'tcps') - self.alive = False - self.conn = None - self.active_tablespaces = set() - - def connect(self): - if self.conn: - self.conn.close() - self.conn = None - if HAS_ORACLE_NEW: - try: - self.conn = cx_Oracle.connect( - f'{self.user}/{self.password}@{self.protocol}://{self.server}/{self.service}') - except cx_Oracle.DatabaseError as error: - self.error(error) - return False - else: - try: - self.conn = cx_Oracle.connect( - CX_CONNECT_STRING_OLD.format( - self.user, - self.password, - self.server, - self.service, - )) - except cx_Oracle.DatabaseError as error: - self.error(error) - return False - - self.alive = True - return True - - def reconnect(self): - return self.connect() - - def check(self): - if not HAS_ORACLE_NEW and not HAS_ORACLE_OLD: - self.error("'oracledb' package is needed to use oracledb module") - return False - - if not all([ - self.user, - self.password, - self.server, - self.service - ]): - self.error("one of these parameters is not specified: user, password, server, service") - return False - - if not self.connect(): - return False - - return bool(self.get_data()) - - def get_data(self): - if not self.alive and not self.reconnect(): - return None - - data = dict() - - # SYSTEM - try: - rv = self.gather_system_metrics() - except cx_Oracle.Error as error: - self.error(error) - self.alive = False - return None - else: - for name, value in rv: - if name not in SYS_METRICS: - continue - data[SYS_METRICS[name]] = int(float(value) * 1000) - - # ACTIVITIES COUNT - try: - rv = self.gather_activities_count() - except cx_Oracle.Error as error: - self.error(error) - self.alive = False - return None - else: - for name, amount in rv: - cleaned = name.replace(' ', '_').replace('(', '').replace(')', '') - new_name = 'activity_{0}'.format(cleaned) - data[new_name] = int(float(amount) * 1000) - - # WAIT TIME - try: - rv = self.gather_wait_time_metrics() - except cx_Oracle.Error as error: - self.error(error) - self.alive = False - return None - else: - for name, amount in rv: - cleaned = name.replace(' ', '_').replace('/', '').lower() - new_name = 'wait_time_{0}'.format(cleaned) - data[new_name] = amount - - # TABLESPACE - try: - rv = self.gather_tablespace_metrics() - except cx_Oracle.Error as error: - self.error(error) - self.alive = False - return None - else: - for name, offline, size, used, used_in_percent in rv: - # TODO: skip offline? - if not (not offline and self.charts): - continue - # TODO: remove inactive? - if name not in self.active_tablespaces: - self.active_tablespaces.add(name) - self.add_tablespace_to_charts(name) - data['{0}_tablespace_size'.format(name)] = int(size * 1000) - data['{0}_tablespace_used'.format(name)] = int(used * 1000) - data['{0}_tablespace_used_in_percent'.format(name)] = int(used_in_percent * 1000) - - # ALLOCATED SPACE - try: - rv = self.gather_allocated_metrics() - except cx_Oracle.Error as error: - self.error(error) - self.alive = False - return None - else: - for name, offline, size, used, used_in_percent in rv: - # TODO: skip offline? - if not (not offline and self.charts): - continue - # TODO: remove inactive? - if name not in self.active_tablespaces: - self.active_tablespaces.add(name) - self.add_tablespace_to_charts(name) - data['{0}_allocated_size'.format(name)] = int(size * 1000) - data['{0}_allocated_used'.format(name)] = int(used * 1000) - data['{0}_allocated_used_in_percent'.format(name)] = int(used_in_percent * 1000) - - return data or None - - def gather_system_metrics(self): - - """ - :return: - - [['Buffer Cache Hit Ratio', 100], - ['Memory Sorts Ratio', 100], - ['Redo Allocation Hit Ratio', 100], - ['User Transaction Per Sec', 0], - ['Physical Reads Per Sec', 0], - ['Physical Reads Per Txn', 0], - ['Physical Writes Per Sec', 0], - ['Physical Writes Per Txn', 0], - ['Physical Reads Direct Per Sec', 0], - ['Physical Reads Direct Per Txn', 0], - ['Physical Writes Direct Per Sec', 0], - ['Physical Writes Direct Per Txn', 0], - ['Physical Reads Direct Lobs Per Sec', 0], - ['Physical Reads Direct Lobs Per Txn', 0], - ['Physical Writes Direct Lobs Per Sec', 0], - ['Physical Writes Direct Lobs Per Txn', 0], - ['Redo Generated Per Sec', Decimal('4.66666666666667')], - ['Redo Generated Per Txn', 280], - ['Logons Per Sec', Decimal('0.0166666666666667')], - ['Logons Per Txn', 1], - ['Open Cursors Per Sec', 0.35], - ['Open Cursors Per Txn', 21], - ['User Commits Per Sec', 0], - ['User Commits Percentage', 0], - ['User Rollbacks Per Sec', 0], - ['User Rollbacks Percentage', 0], - ['User Calls Per Sec', Decimal('0.0333333333333333')], - ['User Calls Per Txn', 2], - ['Recursive Calls Per Sec', 14.15], - ['Recursive Calls Per Txn', 849], - ['Logical Reads Per Sec', Decimal('0.683333333333333')], - ['Logical Reads Per Txn', 41], - ['DBWR Checkpoints Per Sec', 0], - ['Background Checkpoints Per Sec', 0], - ['Redo Writes Per Sec', Decimal('0.0333333333333333')], - ['Redo Writes Per Txn', 2], - ['Long Table Scans Per Sec', 0], - ['Long Table Scans Per Txn', 0], - ['Total Table Scans Per Sec', Decimal('0.0166666666666667')], - ['Total Table Scans Per Txn', 1], - ['Full Index Scans Per Sec', 0], - ['Full Index Scans Per Txn', 0], - ['Total Index Scans Per Sec', Decimal('0.216666666666667')], - ['Total Index Scans Per Txn', 13], - ['Total Parse Count Per Sec', 0.35], - ['Total Parse Count Per Txn', 21], - ['Hard Parse Count Per Sec', 0], - ['Hard Parse Count Per Txn', 0], - ['Parse Failure Count Per Sec', 0], - ['Parse Failure Count Per Txn', 0], - ['Cursor Cache Hit Ratio', Decimal('52.3809523809524')], - ['Disk Sort Per Sec', 0], - ['Disk Sort Per Txn', 0], - ['Rows Per Sort', 8.6], - ['Execute Without Parse Ratio', Decimal('27.5862068965517')], - ['Soft Parse Ratio', 100], - ['User Calls Ratio', Decimal('0.235017626321974')], - ['Host CPU Utilization (%)', Decimal('0.124311845142959')], - ['Network Traffic Volume Per Sec', 0], - ['Enqueue Timeouts Per Sec', 0], - ['Enqueue Timeouts Per Txn', 0], - ['Enqueue Waits Per Sec', 0], - ['Enqueue Waits Per Txn', 0], - ['Enqueue Deadlocks Per Sec', 0], - ['Enqueue Deadlocks Per Txn', 0], - ['Enqueue Requests Per Sec', Decimal('216.683333333333')], - ['Enqueue Requests Per Txn', 13001], - ['DB Block Gets Per Sec', 0], - ['DB Block Gets Per Txn', 0], - ['Consistent Read Gets Per Sec', Decimal('0.683333333333333')], - ['Consistent Read Gets Per Txn', 41], - ['DB Block Changes Per Sec', 0], - ['DB Block Changes Per Txn', 0], - ['Consistent Read Changes Per Sec', 0], - ['Consistent Read Changes Per Txn', 0], - ['CPU Usage Per Sec', 0], - ['CPU Usage Per Txn', 0], - ['CR Blocks Created Per Sec', 0], - ['CR Blocks Created Per Txn', 0], - ['CR Undo Records Applied Per Sec', 0], - ['CR Undo Records Applied Per Txn', 0], - ['User Rollback UndoRec Applied Per Sec', 0], - ['User Rollback Undo Records Applied Per Txn', 0], - ['Leaf Node Splits Per Sec', 0], - ['Leaf Node Splits Per Txn', 0], - ['Branch Node Splits Per Sec', 0], - ['Branch Node Splits Per Txn', 0], - ['PX downgraded 1 to 25% Per Sec', 0], - ['PX downgraded 25 to 50% Per Sec', 0], - ['PX downgraded 50 to 75% Per Sec', 0], - ['PX downgraded 75 to 99% Per Sec', 0], - ['PX downgraded to serial Per Sec', 0], - ['Physical Read Total IO Requests Per Sec', Decimal('2.16666666666667')], - ['Physical Read Total Bytes Per Sec', Decimal('35498.6666666667')], - ['GC CR Block Received Per Second', 0], - ['GC CR Block Received Per Txn', 0], - ['GC Current Block Received Per Second', 0], - ['GC Current Block Received Per Txn', 0], - ['Global Cache Average CR Get Time', 0], - ['Global Cache Average Current Get Time', 0], - ['Physical Write Total IO Requests Per Sec', Decimal('0.966666666666667')], - ['Global Cache Blocks Corrupted', 0], - ['Global Cache Blocks Lost', 0], - ['Current Logons Count', 49], - ['Current Open Cursors Count', 64], - ['User Limit %', Decimal('0.00000114087015416959')], - ['SQL Service Response Time', 0], - ['Database Wait Time Ratio', 0], - ['Database CPU Time Ratio', 0], - ['Response Time Per Txn', 0], - ['Row Cache Hit Ratio', 100], - ['Row Cache Miss Ratio', 0], - ['Library Cache Hit Ratio', 100], - ['Library Cache Miss Ratio', 0], - ['Shared Pool Free %', Decimal('7.82380268491548')], - ['PGA Cache Hit %', Decimal('98.0399767109115')], - ['Process Limit %', Decimal('17.6666666666667')], - ['Session Limit %', Decimal('15.2542372881356')], - ['Executions Per Txn', 29], - ['Executions Per Sec', Decimal('0.483333333333333')], - ['Txns Per Logon', 0], - ['Database Time Per Sec', 0], - ['Physical Write Total Bytes Per Sec', 15308.8], - ['Physical Read IO Requests Per Sec', 0], - ['Physical Read Bytes Per Sec', 0], - ['Physical Write IO Requests Per Sec', 0], - ['Physical Write Bytes Per Sec', 0], - ['DB Block Changes Per User Call', 0], - ['DB Block Gets Per User Call', 0], - ['Executions Per User Call', 14.5], - ['Logical Reads Per User Call', 20.5], - ['Total Sorts Per User Call', 2.5], - ['Total Table Scans Per User Call', 0.5], - ['Current OS Load', 0.0390625], - ['Streams Pool Usage Percentage', 0], - ['PQ QC Session Count', 0], - ['PQ Slave Session Count', 0], - ['Queries parallelized Per Sec', 0], - ['DML statements parallelized Per Sec', 0], - ['DDL statements parallelized Per Sec', 0], - ['PX operations not downgraded Per Sec', 0], - ['Session Count', 72], - ['Average Synchronous Single-Block Read Latency', 0], - ['I/O Megabytes per Second', 0.05], - ['I/O Requests per Second', Decimal('3.13333333333333')], - ['Average Active Sessions', 0], - ['Active Serial Sessions', 1], - ['Active Parallel Sessions', 0], - ['Captured user calls', 0], - ['Replayed user calls', 0], - ['Workload Capture and Replay status', 0], - ['Background CPU Usage Per Sec', Decimal('1.22578833333333')], - ['Background Time Per Sec', 0.0147551], - ['Host CPU Usage Per Sec', Decimal('0.116666666666667')], - ['Cell Physical IO Interconnect Bytes', 3048448], - ['Temp Space Used', 0], - ['Total PGA Allocated', 200657920], - ['Total PGA Used by SQL Workareas', 0], - ['Run Queue Per Sec', 0], - ['VM in bytes Per Sec', 0], - ['VM out bytes Per Sec', 0]] - """ - - metrics = list() - with self.conn.cursor() as cursor: - cursor.execute(QUERY_SYSTEM) - for metric_name, value in cursor.fetchall(): - metrics.append([metric_name, value]) - return metrics - - def gather_tablespace_metrics(self): - """ - :return: - - [['SYSTEM', 874250240.0, 3233169408.0, 27.040038107400033, 0], - ['SYSAUX', 498860032.0, 3233169408.0, 15.429443033997678, 0], - ['TEMP', 0.0, 3233177600.0, 0.0, 0], - ['USERS', 1048576.0, 3233169408.0, 0.03243182981397305, 0]] - """ - metrics = list() - with self.conn.cursor() as cursor: - cursor.execute(QUERY_TABLESPACE) - for tablespace_name, used_bytes, max_bytes, used_percent in cursor.fetchall(): - if used_bytes is None: - offline = True - used = 0 - else: - offline = False - used = float(used_bytes) - if max_bytes is None: - size = 0 - else: - size = float(max_bytes) - if used_percent is None: - used_percent = 0 - else: - used_percent = float(used_percent) - metrics.append( - [ - tablespace_name, - offline, - size, - used, - used_percent, - ] - ) - return metrics - - def gather_allocated_metrics(self): - """ - :return: - - [['SYSTEM', 874250240.0, 3233169408.0, 27.040038107400033, 0], - ['SYSAUX', 498860032.0, 3233169408.0, 15.429443033997678, 0], - ['TEMP', 0.0, 3233177600.0, 0.0, 0], - ['USERS', 1048576.0, 3233169408.0, 0.03243182981397305, 0]] - """ - metrics = list() - with self.conn.cursor() as cursor: - cursor.execute(QUERY_ALLOCATED) - for tablespace_name, used_bytes, max_bytes, used_percent in cursor.fetchall(): - if used_bytes is None: - offline = True - used = 0 - else: - offline = False - used = float(used_bytes) - if max_bytes is None: - size = 0 - else: - size = float(max_bytes) - if used_percent is None: - used_percent = 0 - else: - used_percent = float(used_percent) - metrics.append( - [ - tablespace_name, - offline, - size, - used, - used_percent, - ] - ) - return metrics - - def gather_wait_time_metrics(self): - """ - :return: - - [['Other', 0], - ['Application', 0], - ['Configuration', 0], - ['Administrative', 0], - ['Concurrency', 0], - ['Commit', 0], - ['Network', 0], - ['User I/O', 0], - ['System I/O', 0.002], - ['Scheduler', 0]] - """ - metrics = list() - with self.conn.cursor() as cursor: - cursor.execute(QUERY_WAIT_TIME) - for wait_class_name, value in cursor.fetchall(): - metrics.append([wait_class_name, value]) - return metrics - - def gather_activities_count(self): - """ - :return: - - [('user commits', 9104), - ('user rollbacks', 17), - ('parse count (total)', 483695), - ('execute count', 2020356)] - """ - with self.conn.cursor() as cursor: - cursor.execute(QUERY_ACTIVITIES_COUNT) - return cursor.fetchall() - - # def gather_process_metrics(self): - # """ - # :return: - # - # [['PSEUDO', 'pga_used_memory', 0], - # ['PSEUDO', 'pga_allocated_memory', 0], - # ['PSEUDO', 'pga_freeable_memory', 0], - # ['PSEUDO', 'pga_maximum_memory', 0], - # ['oracle@localhost.localdomain (PMON)', 'pga_used_memory', 1793827], - # ['oracle@localhost.localdomain (PMON)', 'pga_allocated_memory', 1888651], - # ['oracle@localhost.localdomain (PMON)', 'pga_freeable_memory', 0], - # ['oracle@localhost.localdomain (PMON)', 'pga_maximum_memory', 1888651], - # ... - # ... - # """ - # - # metrics = list() - # with self.conn.cursor() as cursor: - # cursor.execute(QUERY_PROCESS) - # for row in cursor.fetchall(): - # for i, name in enumerate(PROCESS_METRICS, 1): - # metrics.append([row[0], name, row[i]]) - # return metrics - - # def gather_processes_count(self): - # with self.conn.cursor() as cursor: - # cursor.execute(QUERY_PROCESSES_COUNT) - # return cursor.fetchone()[0] # 53 - - # def gather_sessions_count(self): - # with self.conn.cursor() as cursor: - # cursor.execute(QUERY_SESSION_COUNT) - # total, active, inactive = 0, 0, 0 - # for status, _ in cursor.fetchall(): - # total += 1 - # active += status == 'ACTIVE' - # inactive += status == 'INACTIVE' - # return [total, active, inactive] - - def add_tablespace_to_charts(self, name): - self.charts['tablespace_size'].add_dimension( - [ - '{0}_tablespace_size'.format(name), - name, - 'absolute', - 1, - 1024 * 1000, - ]) - self.charts['tablespace_usage'].add_dimension( - [ - '{0}_tablespace_used'.format(name), - name, - 'absolute', - 1, - 1024 * 1000, - ]) - self.charts['tablespace_usage_in_percent'].add_dimension( - [ - '{0}_tablespace_used_in_percent'.format(name), - name, - 'absolute', - 1, - 1000, - ]) - self.charts['allocated_size'].add_dimension( - [ - '{0}_allocated_size'.format(name), - name, - 'absolute', - 1, - 1000, - ]) - self.charts['allocated_usage'].add_dimension( - [ - '{0}_allocated_used'.format(name), - name, - 'absolute', - 1, - 1000, - ]) - self.charts['allocated_usage_in_percent'].add_dimension( - [ - '{0}_allocated_used_in_percent'.format(name), - name, - 'absolute', - 1, - 1000, - ]) diff --git a/src/collectors/python.d.plugin/oracledb/oracledb.conf b/src/collectors/python.d.plugin/oracledb/oracledb.conf deleted file mode 100644 index 027215dad..000000000 --- a/src/collectors/python.d.plugin/oracledb/oracledb.conf +++ /dev/null @@ -1,88 +0,0 @@ -# netdata python.d.plugin configuration for oracledb -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# penalty indicates whether to apply penalty to update_every in case of failures. -# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. -# penalty: yes - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# penalty: yes # the JOB's penalty -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, oracledb also supports the following: -# -# user: username # the username for the user account. Required. -# password: password # the password for the user account. Required. -# server: localhost:1521 # the IP address or hostname (and port) of the Oracle Database Server. Required. -# service: XE # the Oracle Database service name. Required. To view the services available on your server, -# run this query: `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. -# protocol: tcp/tcps # one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic -# or encrypted network traffic -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -#local: -# user: 'netdata' -# password: 'secret' -# server: 'localhost:1521' -# service: 'XE' -# protocol: 'tcps' - -#remote: -# user: 'netdata' -# password: 'secret' -# server: '10.0.0.1:1521' -# service: 'XE' -# protocol: 'tcps' diff --git a/src/collectors/python.d.plugin/pandas/integrations/pandas.md b/src/collectors/python.d.plugin/pandas/integrations/pandas.md index e0b5418c5..b36bea073 100644 --- a/src/collectors/python.d.plugin/pandas/integrations/pandas.md +++ b/src/collectors/python.d.plugin/pandas/integrations/pandas.md @@ -108,8 +108,8 @@ sudo pip install 'sqlalchemy<2.0' psycopg2-binary The configuration file name for this integration is `python.d/pandas.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -136,8 +136,8 @@ Every configuration JOB starts with a `job_name` value which will appear in the | chart_configs | an array of chart configuration dictionaries | [] | yes | | chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes | | chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes | -| chart_configs.family | [family](/docs/dashboards-and-charts/netdata-charts.md#families) of the chart to be displayed in the dashboard. | None | yes | -| chart_configs.context | [context](/docs/dashboards-and-charts/netdata-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes | +| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/dashboards-and-charts/netdata-charts.md#families) of the chart to be displayed in the dashboard. | None | yes | +| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/dashboards-and-charts/netdata-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes | | chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes | | chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes | | chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes | diff --git a/src/collectors/python.d.plugin/python.d.conf b/src/collectors/python.d.plugin/python.d.conf index 4fcecc75d..e2ce1347e 100644 --- a/src/collectors/python.d.plugin/python.d.conf +++ b/src/collectors/python.d.plugin/python.d.conf @@ -26,30 +26,23 @@ gc_run: yes gc_interval: 300 # am2320: yes -# anomalies: no -# boinc: yes -# ceph: yes # this is just an example go_expvar: no # haproxy: yes -# openldap: yes -# oracledb: yes # pandas: yes # retroshare: yes -# samba: yes # smartd_log: yes -# spigotmc: yes # traefik: yes # varnish: yes -# w1sensor: yes -# zscores: no ## Disabled for existing installations. adaptec_raid: no # Removed (replaced with go.d/adaptercraid). apache: no # Removed (replaced with go.d/apache). beanstalk: no # Removed (replaced with go.d/beanstalk). +boinc: no # Removed (replaced with go.d/boinc). dovecot: no # Removed (replaced with go.d/dovecot). +ceph: no # Removed (replaced with go.d/ceph). elasticsearch: no # Removed (replaced with go.d/elasticsearch). exim: no # Removed (replaced with go.d/exim). fail2ban: no # Removed (replaced with go.d/fail2ban). @@ -68,15 +61,21 @@ mysql: no # Removed (replaced with go.d/mysql). nginx: no # Removed (replaced with go.d/nginx). nsd: no # Removed (replaced with go.d/nsd). nvidia_smi: no # Removed (replaced with go.d/nvidia_smi). +openldap: no # Removed (replaced with go.d/openldap). +oracledb: no # Removed (replaced with go.d/oracledb). postfix: no # Removed (replaced with go.d/postfix). postgres: no # Removed (replaced with go.d/postgres). proxysql: no # Removed (replaced with go.d/proxysql). redis: no # Removed (replaced with go.d/redis). rethinkdbs: no # Removed (replaced with go.d/rethinkdb). riakkv: no # Removed (replaced with go.d/riak). +samba: no # Removed (replaced with go.d/samba). sensors: no # Removed (replaced with go.d/sensors). squid: no # Removed (replaced with go.d/squid). +spigotmc: no # Removed (replaced with go.d/spigotmc). tomcat: no # Removed (replaced with go.d/tomcat) tor: no # Removed (replaced with go.d/tor). puppet: no # Removed (replaced with go.d/puppet). uwsgi: no # Removed (replaced with go.d/uwsgi). +varnish: no # Removed (replaced with go.d/varnish). +w1sensor: no # Removed (replaced with go.d/w1sensor) diff --git a/src/collectors/python.d.plugin/python.d.plugin.in b/src/collectors/python.d.plugin/python.d.plugin.in index 81e68f94c..089fb5a58 100644 --- a/src/collectors/python.d.plugin/python.d.plugin.in +++ b/src/collectors/python.d.plugin/python.d.plugin.in @@ -12,7 +12,8 @@ do done if [ "$pybinary" = "" ] then - echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM" + echo 1>&2 "python.d ERROR: python is not installed on this system" + echo "EXIT" exit 1 fi exec "$pybinary" "$0" "${filtered[@]}" # ''' diff --git a/src/collectors/python.d.plugin/python_modules/bases/loaders.py b/src/collectors/python.d.plugin/python_modules/bases/loaders.py index 095f3a3b1..6ffa2189d 100644 --- a/src/collectors/python.d.plugin/python_modules/bases/loaders.py +++ b/src/collectors/python.d.plugin/python_modules/bases/loaders.py @@ -3,27 +3,17 @@ # Author: Ilya Mashchenko (ilyam8) # SPDX-License-Identifier: GPL-3.0-or-later - -from sys import version_info - -PY_VERSION = version_info[:2] - try: - if PY_VERSION > (3, 1): - from pyyaml3 import SafeLoader as YamlSafeLoader - else: - from pyyaml2 import SafeLoader as YamlSafeLoader + from pyyaml3 import SafeLoader as YamlSafeLoader except ImportError: from yaml import SafeLoader as YamlSafeLoader - try: from collections import OrderedDict except ImportError: from third_party.ordereddict import OrderedDict - -DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' if PY_VERSION > (3, 1) else u'tag:yaml.org,2002:map' +DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' def dict_constructor(loader, node): diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py deleted file mode 100644 index 4d560e438..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py +++ /dev/null @@ -1,316 +0,0 @@ -# SPDX-License-Identifier: MIT - -from error import * - -from tokens import * -from events import * -from nodes import * - -from loader import * -from dumper import * - -__version__ = '3.11' - -try: - from cyaml import * - __with_libyaml__ = True -except ImportError: - __with_libyaml__ = False - -def scan(stream, Loader=Loader): - """ - Scan a YAML stream and produce scanning tokens. - """ - loader = Loader(stream) - try: - while loader.check_token(): - yield loader.get_token() - finally: - loader.dispose() - -def parse(stream, Loader=Loader): - """ - Parse a YAML stream and produce parsing events. - """ - loader = Loader(stream) - try: - while loader.check_event(): - yield loader.get_event() - finally: - loader.dispose() - -def compose(stream, Loader=Loader): - """ - Parse the first YAML document in a stream - and produce the corresponding representation tree. - """ - loader = Loader(stream) - try: - return loader.get_single_node() - finally: - loader.dispose() - -def compose_all(stream, Loader=Loader): - """ - Parse all YAML documents in a stream - and produce corresponding representation trees. - """ - loader = Loader(stream) - try: - while loader.check_node(): - yield loader.get_node() - finally: - loader.dispose() - -def load(stream, Loader=Loader): - """ - Parse the first YAML document in a stream - and produce the corresponding Python object. - """ - loader = Loader(stream) - try: - return loader.get_single_data() - finally: - loader.dispose() - -def load_all(stream, Loader=Loader): - """ - Parse all YAML documents in a stream - and produce corresponding Python objects. - """ - loader = Loader(stream) - try: - while loader.check_data(): - yield loader.get_data() - finally: - loader.dispose() - -def safe_load(stream): - """ - Parse the first YAML document in a stream - and produce the corresponding Python object. - Resolve only basic YAML tags. - """ - return load(stream, SafeLoader) - -def safe_load_all(stream): - """ - Parse all YAML documents in a stream - and produce corresponding Python objects. - Resolve only basic YAML tags. - """ - return load_all(stream, SafeLoader) - -def emit(events, stream=None, Dumper=Dumper, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None): - """ - Emit YAML parsing events into a stream. - If stream is None, return the produced string instead. - """ - getvalue = None - if stream is None: - from StringIO import StringIO - stream = StringIO() - getvalue = stream.getvalue - dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - try: - for event in events: - dumper.emit(event) - finally: - dumper.dispose() - if getvalue: - return getvalue() - -def serialize_all(nodes, stream=None, Dumper=Dumper, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding='utf-8', explicit_start=None, explicit_end=None, - version=None, tags=None): - """ - Serialize a sequence of representation trees into a YAML stream. - If stream is None, return the produced string instead. - """ - getvalue = None - if stream is None: - if encoding is None: - from StringIO import StringIO - else: - from cStringIO import StringIO - stream = StringIO() - getvalue = stream.getvalue - dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break, - encoding=encoding, version=version, tags=tags, - explicit_start=explicit_start, explicit_end=explicit_end) - try: - dumper.open() - for node in nodes: - dumper.serialize(node) - dumper.close() - finally: - dumper.dispose() - if getvalue: - return getvalue() - -def serialize(node, stream=None, Dumper=Dumper, **kwds): - """ - Serialize a representation tree into a YAML stream. - If stream is None, return the produced string instead. - """ - return serialize_all([node], stream, Dumper=Dumper, **kwds) - -def dump_all(documents, stream=None, Dumper=Dumper, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding='utf-8', explicit_start=None, explicit_end=None, - version=None, tags=None): - """ - Serialize a sequence of Python objects into a YAML stream. - If stream is None, return the produced string instead. - """ - getvalue = None - if stream is None: - if encoding is None: - from StringIO import StringIO - else: - from cStringIO import StringIO - stream = StringIO() - getvalue = stream.getvalue - dumper = Dumper(stream, default_style=default_style, - default_flow_style=default_flow_style, - canonical=canonical, indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break, - encoding=encoding, version=version, tags=tags, - explicit_start=explicit_start, explicit_end=explicit_end) - try: - dumper.open() - for data in documents: - dumper.represent(data) - dumper.close() - finally: - dumper.dispose() - if getvalue: - return getvalue() - -def dump(data, stream=None, Dumper=Dumper, **kwds): - """ - Serialize a Python object into a YAML stream. - If stream is None, return the produced string instead. - """ - return dump_all([data], stream, Dumper=Dumper, **kwds) - -def safe_dump_all(documents, stream=None, **kwds): - """ - Serialize a sequence of Python objects into a YAML stream. - Produce only basic YAML tags. - If stream is None, return the produced string instead. - """ - return dump_all(documents, stream, Dumper=SafeDumper, **kwds) - -def safe_dump(data, stream=None, **kwds): - """ - Serialize a Python object into a YAML stream. - Produce only basic YAML tags. - If stream is None, return the produced string instead. - """ - return dump_all([data], stream, Dumper=SafeDumper, **kwds) - -def add_implicit_resolver(tag, regexp, first=None, - Loader=Loader, Dumper=Dumper): - """ - Add an implicit scalar detector. - If an implicit scalar value matches the given regexp, - the corresponding tag is assigned to the scalar. - first is a sequence of possible initial characters or None. - """ - Loader.add_implicit_resolver(tag, regexp, first) - Dumper.add_implicit_resolver(tag, regexp, first) - -def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): - """ - Add a path based resolver for the given tag. - A path is a list of keys that forms a path - to a node in the representation tree. - Keys can be string values, integers, or None. - """ - Loader.add_path_resolver(tag, path, kind) - Dumper.add_path_resolver(tag, path, kind) - -def add_constructor(tag, constructor, Loader=Loader): - """ - Add a constructor for the given tag. - Constructor is a function that accepts a Loader instance - and a node object and produces the corresponding Python object. - """ - Loader.add_constructor(tag, constructor) - -def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): - """ - Add a multi-constructor for the given tag prefix. - Multi-constructor is called for a node if its tag starts with tag_prefix. - Multi-constructor accepts a Loader instance, a tag suffix, - and a node object and produces the corresponding Python object. - """ - Loader.add_multi_constructor(tag_prefix, multi_constructor) - -def add_representer(data_type, representer, Dumper=Dumper): - """ - Add a representer for the given type. - Representer is a function accepting a Dumper instance - and an instance of the given data type - and producing the corresponding representation node. - """ - Dumper.add_representer(data_type, representer) - -def add_multi_representer(data_type, multi_representer, Dumper=Dumper): - """ - Add a representer for the given type. - Multi-representer is a function accepting a Dumper instance - and an instance of the given data type or subtype - and producing the corresponding representation node. - """ - Dumper.add_multi_representer(data_type, multi_representer) - -class YAMLObjectMetaclass(type): - """ - The metaclass for YAMLObject. - """ - def __init__(cls, name, bases, kwds): - super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) - if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: - cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) - cls.yaml_dumper.add_representer(cls, cls.to_yaml) - -class YAMLObject(object): - """ - An object that can dump itself to a YAML stream - and load itself from a YAML stream. - """ - - __metaclass__ = YAMLObjectMetaclass - __slots__ = () # no direct instantiation, so allow immutable subclasses - - yaml_loader = Loader - yaml_dumper = Dumper - - yaml_tag = None - yaml_flow_style = None - - def from_yaml(cls, loader, node): - """ - Convert a representation node to a Python object. - """ - return loader.construct_yaml_object(node, cls) - from_yaml = classmethod(from_yaml) - - def to_yaml(cls, dumper, data): - """ - Convert a Python object to a representation node. - """ - return dumper.represent_yaml_object(cls.yaml_tag, data, cls, - flow_style=cls.yaml_flow_style) - to_yaml = classmethod(to_yaml) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/composer.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/composer.py deleted file mode 100644 index 6b41b8067..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/composer.py +++ /dev/null @@ -1,140 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['Composer', 'ComposerError'] - -from error import MarkedYAMLError -from events import * -from nodes import * - -class ComposerError(MarkedYAMLError): - pass - -class Composer(object): - - def __init__(self): - self.anchors = {} - - def check_node(self): - # Drop the STREAM-START event. - if self.check_event(StreamStartEvent): - self.get_event() - - # If there are more documents available? - return not self.check_event(StreamEndEvent) - - def get_node(self): - # Get the root node of the next document. - if not self.check_event(StreamEndEvent): - return self.compose_document() - - def get_single_node(self): - # Drop the STREAM-START event. - self.get_event() - - # Compose a document if the stream is not empty. - document = None - if not self.check_event(StreamEndEvent): - document = self.compose_document() - - # Ensure that the stream contains no more documents. - if not self.check_event(StreamEndEvent): - event = self.get_event() - raise ComposerError("expected a single document in the stream", - document.start_mark, "but found another document", - event.start_mark) - - # Drop the STREAM-END event. - self.get_event() - - return document - - def compose_document(self): - # Drop the DOCUMENT-START event. - self.get_event() - - # Compose the root node. - node = self.compose_node(None, None) - - # Drop the DOCUMENT-END event. - self.get_event() - - self.anchors = {} - return node - - def compose_node(self, parent, index): - if self.check_event(AliasEvent): - event = self.get_event() - anchor = event.anchor - if anchor not in self.anchors: - raise ComposerError(None, None, "found undefined alias %r" - % anchor.encode('utf-8'), event.start_mark) - return self.anchors[anchor] - event = self.peek_event() - anchor = event.anchor - if anchor is not None: - if anchor in self.anchors: - raise ComposerError("found duplicate anchor %r; first occurence" - % anchor.encode('utf-8'), self.anchors[anchor].start_mark, - "second occurence", event.start_mark) - self.descend_resolver(parent, index) - if self.check_event(ScalarEvent): - node = self.compose_scalar_node(anchor) - elif self.check_event(SequenceStartEvent): - node = self.compose_sequence_node(anchor) - elif self.check_event(MappingStartEvent): - node = self.compose_mapping_node(anchor) - self.ascend_resolver() - return node - - def compose_scalar_node(self, anchor): - event = self.get_event() - tag = event.tag - if tag is None or tag == u'!': - tag = self.resolve(ScalarNode, event.value, event.implicit) - node = ScalarNode(tag, event.value, - event.start_mark, event.end_mark, style=event.style) - if anchor is not None: - self.anchors[anchor] = node - return node - - def compose_sequence_node(self, anchor): - start_event = self.get_event() - tag = start_event.tag - if tag is None or tag == u'!': - tag = self.resolve(SequenceNode, None, start_event.implicit) - node = SequenceNode(tag, [], - start_event.start_mark, None, - flow_style=start_event.flow_style) - if anchor is not None: - self.anchors[anchor] = node - index = 0 - while not self.check_event(SequenceEndEvent): - node.value.append(self.compose_node(node, index)) - index += 1 - end_event = self.get_event() - node.end_mark = end_event.end_mark - return node - - def compose_mapping_node(self, anchor): - start_event = self.get_event() - tag = start_event.tag - if tag is None or tag == u'!': - tag = self.resolve(MappingNode, None, start_event.implicit) - node = MappingNode(tag, [], - start_event.start_mark, None, - flow_style=start_event.flow_style) - if anchor is not None: - self.anchors[anchor] = node - while not self.check_event(MappingEndEvent): - #key_event = self.peek_event() - item_key = self.compose_node(node, None) - #if item_key in node.value: - # raise ComposerError("while composing a mapping", start_event.start_mark, - # "found duplicate key", key_event.start_mark) - item_value = self.compose_node(node, item_key) - #node.value[item_key] = item_value - node.value.append((item_key, item_value)) - end_event = self.get_event() - node.end_mark = end_event.end_mark - return node - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py deleted file mode 100644 index 8ad1b90a7..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py +++ /dev/null @@ -1,676 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', - 'ConstructorError'] - -from error import * -from nodes import * - -import datetime - -import binascii, re, sys, types - -class ConstructorError(MarkedYAMLError): - pass - -class BaseConstructor(object): - - yaml_constructors = {} - yaml_multi_constructors = {} - - def __init__(self): - self.constructed_objects = {} - self.recursive_objects = {} - self.state_generators = [] - self.deep_construct = False - - def check_data(self): - # If there are more documents available? - return self.check_node() - - def get_data(self): - # Construct and return the next document. - if self.check_node(): - return self.construct_document(self.get_node()) - - def get_single_data(self): - # Ensure that the stream contains a single document and construct it. - node = self.get_single_node() - if node is not None: - return self.construct_document(node) - return None - - def construct_document(self, node): - data = self.construct_object(node) - while self.state_generators: - state_generators = self.state_generators - self.state_generators = [] - for generator in state_generators: - for dummy in generator: - pass - self.constructed_objects = {} - self.recursive_objects = {} - self.deep_construct = False - return data - - def construct_object(self, node, deep=False): - if node in self.constructed_objects: - return self.constructed_objects[node] - if deep: - old_deep = self.deep_construct - self.deep_construct = True - if node in self.recursive_objects: - raise ConstructorError(None, None, - "found unconstructable recursive node", node.start_mark) - self.recursive_objects[node] = None - constructor = None - tag_suffix = None - if node.tag in self.yaml_constructors: - constructor = self.yaml_constructors[node.tag] - else: - for tag_prefix in self.yaml_multi_constructors: - if node.tag.startswith(tag_prefix): - tag_suffix = node.tag[len(tag_prefix):] - constructor = self.yaml_multi_constructors[tag_prefix] - break - else: - if None in self.yaml_multi_constructors: - tag_suffix = node.tag - constructor = self.yaml_multi_constructors[None] - elif None in self.yaml_constructors: - constructor = self.yaml_constructors[None] - elif isinstance(node, ScalarNode): - constructor = self.__class__.construct_scalar - elif isinstance(node, SequenceNode): - constructor = self.__class__.construct_sequence - elif isinstance(node, MappingNode): - constructor = self.__class__.construct_mapping - if tag_suffix is None: - data = constructor(self, node) - else: - data = constructor(self, tag_suffix, node) - if isinstance(data, types.GeneratorType): - generator = data - data = generator.next() - if self.deep_construct: - for dummy in generator: - pass - else: - self.state_generators.append(generator) - self.constructed_objects[node] = data - del self.recursive_objects[node] - if deep: - self.deep_construct = old_deep - return data - - def construct_scalar(self, node): - if not isinstance(node, ScalarNode): - raise ConstructorError(None, None, - "expected a scalar node, but found %s" % node.id, - node.start_mark) - return node.value - - def construct_sequence(self, node, deep=False): - if not isinstance(node, SequenceNode): - raise ConstructorError(None, None, - "expected a sequence node, but found %s" % node.id, - node.start_mark) - return [self.construct_object(child, deep=deep) - for child in node.value] - - def construct_mapping(self, node, deep=False): - if not isinstance(node, MappingNode): - raise ConstructorError(None, None, - "expected a mapping node, but found %s" % node.id, - node.start_mark) - mapping = {} - for key_node, value_node in node.value: - key = self.construct_object(key_node, deep=deep) - try: - hash(key) - except TypeError, exc: - raise ConstructorError("while constructing a mapping", node.start_mark, - "found unacceptable key (%s)" % exc, key_node.start_mark) - value = self.construct_object(value_node, deep=deep) - mapping[key] = value - return mapping - - def construct_pairs(self, node, deep=False): - if not isinstance(node, MappingNode): - raise ConstructorError(None, None, - "expected a mapping node, but found %s" % node.id, - node.start_mark) - pairs = [] - for key_node, value_node in node.value: - key = self.construct_object(key_node, deep=deep) - value = self.construct_object(value_node, deep=deep) - pairs.append((key, value)) - return pairs - - def add_constructor(cls, tag, constructor): - if not 'yaml_constructors' in cls.__dict__: - cls.yaml_constructors = cls.yaml_constructors.copy() - cls.yaml_constructors[tag] = constructor - add_constructor = classmethod(add_constructor) - - def add_multi_constructor(cls, tag_prefix, multi_constructor): - if not 'yaml_multi_constructors' in cls.__dict__: - cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() - cls.yaml_multi_constructors[tag_prefix] = multi_constructor - add_multi_constructor = classmethod(add_multi_constructor) - -class SafeConstructor(BaseConstructor): - - def construct_scalar(self, node): - if isinstance(node, MappingNode): - for key_node, value_node in node.value: - if key_node.tag == u'tag:yaml.org,2002:value': - return self.construct_scalar(value_node) - return BaseConstructor.construct_scalar(self, node) - - def flatten_mapping(self, node): - merge = [] - index = 0 - while index < len(node.value): - key_node, value_node = node.value[index] - if key_node.tag == u'tag:yaml.org,2002:merge': - del node.value[index] - if isinstance(value_node, MappingNode): - self.flatten_mapping(value_node) - merge.extend(value_node.value) - elif isinstance(value_node, SequenceNode): - submerge = [] - for subnode in value_node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing a mapping", - node.start_mark, - "expected a mapping for merging, but found %s" - % subnode.id, subnode.start_mark) - self.flatten_mapping(subnode) - submerge.append(subnode.value) - submerge.reverse() - for value in submerge: - merge.extend(value) - else: - raise ConstructorError("while constructing a mapping", node.start_mark, - "expected a mapping or list of mappings for merging, but found %s" - % value_node.id, value_node.start_mark) - elif key_node.tag == u'tag:yaml.org,2002:value': - key_node.tag = u'tag:yaml.org,2002:str' - index += 1 - else: - index += 1 - if merge: - node.value = merge + node.value - - def construct_mapping(self, node, deep=False): - if isinstance(node, MappingNode): - self.flatten_mapping(node) - return BaseConstructor.construct_mapping(self, node, deep=deep) - - def construct_yaml_null(self, node): - self.construct_scalar(node) - return None - - bool_values = { - u'yes': True, - u'no': False, - u'true': True, - u'false': False, - u'on': True, - u'off': False, - } - - def construct_yaml_bool(self, node): - value = self.construct_scalar(node) - return self.bool_values[value.lower()] - - def construct_yaml_int(self, node): - value = str(self.construct_scalar(node)) - value = value.replace('_', '') - sign = +1 - if value[0] == '-': - sign = -1 - if value[0] in '+-': - value = value[1:] - if value == '0': - return 0 - elif value.startswith('0b'): - return sign*int(value[2:], 2) - elif value.startswith('0x'): - return sign*int(value[2:], 16) - elif value[0] == '0': - return sign*int(value, 8) - elif ':' in value: - digits = [int(part) for part in value.split(':')] - digits.reverse() - base = 1 - value = 0 - for digit in digits: - value += digit*base - base *= 60 - return sign*value - else: - return sign*int(value) - - inf_value = 1e300 - while inf_value != inf_value*inf_value: - inf_value *= inf_value - nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). - - def construct_yaml_float(self, node): - value = str(self.construct_scalar(node)) - value = value.replace('_', '').lower() - sign = +1 - if value[0] == '-': - sign = -1 - if value[0] in '+-': - value = value[1:] - if value == '.inf': - return sign*self.inf_value - elif value == '.nan': - return self.nan_value - elif ':' in value: - digits = [float(part) for part in value.split(':')] - digits.reverse() - base = 1 - value = 0.0 - for digit in digits: - value += digit*base - base *= 60 - return sign*value - else: - return sign*float(value) - - def construct_yaml_binary(self, node): - value = self.construct_scalar(node) - try: - return str(value).decode('base64') - except (binascii.Error, UnicodeEncodeError), exc: - raise ConstructorError(None, None, - "failed to decode base64 data: %s" % exc, node.start_mark) - - timestamp_regexp = re.compile( - ur'''^(?P[0-9][0-9][0-9][0-9]) - -(?P[0-9][0-9]?) - -(?P[0-9][0-9]?) - (?:(?:[Tt]|[ \t]+) - (?P[0-9][0-9]?) - :(?P[0-9][0-9]) - :(?P[0-9][0-9]) - (?:\.(?P[0-9]*))? - (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) - (?::(?P[0-9][0-9]))?))?)?$''', re.X) - - def construct_yaml_timestamp(self, node): - value = self.construct_scalar(node) - match = self.timestamp_regexp.match(node.value) - values = match.groupdict() - year = int(values['year']) - month = int(values['month']) - day = int(values['day']) - if not values['hour']: - return datetime.date(year, month, day) - hour = int(values['hour']) - minute = int(values['minute']) - second = int(values['second']) - fraction = 0 - if values['fraction']: - fraction = values['fraction'][:6] - while len(fraction) < 6: - fraction += '0' - fraction = int(fraction) - delta = None - if values['tz_sign']: - tz_hour = int(values['tz_hour']) - tz_minute = int(values['tz_minute'] or 0) - delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) - if values['tz_sign'] == '-': - delta = -delta - data = datetime.datetime(year, month, day, hour, minute, second, fraction) - if delta: - data -= delta - return data - - def construct_yaml_omap(self, node): - # Note: we do not check for duplicate keys, because it's too - # CPU-expensive. - omap = [] - yield omap - if not isinstance(node, SequenceNode): - raise ConstructorError("while constructing an ordered map", node.start_mark, - "expected a sequence, but found %s" % node.id, node.start_mark) - for subnode in node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing an ordered map", node.start_mark, - "expected a mapping of length 1, but found %s" % subnode.id, - subnode.start_mark) - if len(subnode.value) != 1: - raise ConstructorError("while constructing an ordered map", node.start_mark, - "expected a single mapping item, but found %d items" % len(subnode.value), - subnode.start_mark) - key_node, value_node = subnode.value[0] - key = self.construct_object(key_node) - value = self.construct_object(value_node) - omap.append((key, value)) - - def construct_yaml_pairs(self, node): - # Note: the same code as `construct_yaml_omap`. - pairs = [] - yield pairs - if not isinstance(node, SequenceNode): - raise ConstructorError("while constructing pairs", node.start_mark, - "expected a sequence, but found %s" % node.id, node.start_mark) - for subnode in node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing pairs", node.start_mark, - "expected a mapping of length 1, but found %s" % subnode.id, - subnode.start_mark) - if len(subnode.value) != 1: - raise ConstructorError("while constructing pairs", node.start_mark, - "expected a single mapping item, but found %d items" % len(subnode.value), - subnode.start_mark) - key_node, value_node = subnode.value[0] - key = self.construct_object(key_node) - value = self.construct_object(value_node) - pairs.append((key, value)) - - def construct_yaml_set(self, node): - data = set() - yield data - value = self.construct_mapping(node) - data.update(value) - - def construct_yaml_str(self, node): - value = self.construct_scalar(node) - try: - return value.encode('ascii') - except UnicodeEncodeError: - return value - - def construct_yaml_seq(self, node): - data = [] - yield data - data.extend(self.construct_sequence(node)) - - def construct_yaml_map(self, node): - data = {} - yield data - value = self.construct_mapping(node) - data.update(value) - - def construct_yaml_object(self, node, cls): - data = cls.__new__(cls) - yield data - if hasattr(data, '__setstate__'): - state = self.construct_mapping(node, deep=True) - data.__setstate__(state) - else: - state = self.construct_mapping(node) - data.__dict__.update(state) - - def construct_undefined(self, node): - raise ConstructorError(None, None, - "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), - node.start_mark) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:null', - SafeConstructor.construct_yaml_null) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:bool', - SafeConstructor.construct_yaml_bool) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:int', - SafeConstructor.construct_yaml_int) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:float', - SafeConstructor.construct_yaml_float) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:binary', - SafeConstructor.construct_yaml_binary) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:timestamp', - SafeConstructor.construct_yaml_timestamp) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:omap', - SafeConstructor.construct_yaml_omap) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:pairs', - SafeConstructor.construct_yaml_pairs) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:set', - SafeConstructor.construct_yaml_set) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:str', - SafeConstructor.construct_yaml_str) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:seq', - SafeConstructor.construct_yaml_seq) - -SafeConstructor.add_constructor( - u'tag:yaml.org,2002:map', - SafeConstructor.construct_yaml_map) - -SafeConstructor.add_constructor(None, - SafeConstructor.construct_undefined) - -class Constructor(SafeConstructor): - - def construct_python_str(self, node): - return self.construct_scalar(node).encode('utf-8') - - def construct_python_unicode(self, node): - return self.construct_scalar(node) - - def construct_python_long(self, node): - return long(self.construct_yaml_int(node)) - - def construct_python_complex(self, node): - return complex(self.construct_scalar(node)) - - def construct_python_tuple(self, node): - return tuple(self.construct_sequence(node)) - - def find_python_module(self, name, mark): - if not name: - raise ConstructorError("while constructing a Python module", mark, - "expected non-empty name appended to the tag", mark) - try: - __import__(name) - except ImportError, exc: - raise ConstructorError("while constructing a Python module", mark, - "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) - return sys.modules[name] - - def find_python_name(self, name, mark): - if not name: - raise ConstructorError("while constructing a Python object", mark, - "expected non-empty name appended to the tag", mark) - if u'.' in name: - module_name, object_name = name.rsplit('.', 1) - else: - module_name = '__builtin__' - object_name = name - try: - __import__(module_name) - except ImportError, exc: - raise ConstructorError("while constructing a Python object", mark, - "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) - module = sys.modules[module_name] - if not hasattr(module, object_name): - raise ConstructorError("while constructing a Python object", mark, - "cannot find %r in the module %r" % (object_name.encode('utf-8'), - module.__name__), mark) - return getattr(module, object_name) - - def construct_python_name(self, suffix, node): - value = self.construct_scalar(node) - if value: - raise ConstructorError("while constructing a Python name", node.start_mark, - "expected the empty value, but found %r" % value.encode('utf-8'), - node.start_mark) - return self.find_python_name(suffix, node.start_mark) - - def construct_python_module(self, suffix, node): - value = self.construct_scalar(node) - if value: - raise ConstructorError("while constructing a Python module", node.start_mark, - "expected the empty value, but found %r" % value.encode('utf-8'), - node.start_mark) - return self.find_python_module(suffix, node.start_mark) - - class classobj: pass - - def make_python_instance(self, suffix, node, - args=None, kwds=None, newobj=False): - if not args: - args = [] - if not kwds: - kwds = {} - cls = self.find_python_name(suffix, node.start_mark) - if newobj and isinstance(cls, type(self.classobj)) \ - and not args and not kwds: - instance = self.classobj() - instance.__class__ = cls - return instance - elif newobj and isinstance(cls, type): - return cls.__new__(cls, *args, **kwds) - else: - return cls(*args, **kwds) - - def set_python_instance_state(self, instance, state): - if hasattr(instance, '__setstate__'): - instance.__setstate__(state) - else: - slotstate = {} - if isinstance(state, tuple) and len(state) == 2: - state, slotstate = state - if hasattr(instance, '__dict__'): - instance.__dict__.update(state) - elif state: - slotstate.update(state) - for key, value in slotstate.items(): - setattr(object, key, value) - - def construct_python_object(self, suffix, node): - # Format: - # !!python/object:module.name { ... state ... } - instance = self.make_python_instance(suffix, node, newobj=True) - yield instance - deep = hasattr(instance, '__setstate__') - state = self.construct_mapping(node, deep=deep) - self.set_python_instance_state(instance, state) - - def construct_python_object_apply(self, suffix, node, newobj=False): - # Format: - # !!python/object/apply # (or !!python/object/new) - # args: [ ... arguments ... ] - # kwds: { ... keywords ... } - # state: ... state ... - # listitems: [ ... listitems ... ] - # dictitems: { ... dictitems ... } - # or short format: - # !!python/object/apply [ ... arguments ... ] - # The difference between !!python/object/apply and !!python/object/new - # is how an object is created, check make_python_instance for details. - if isinstance(node, SequenceNode): - args = self.construct_sequence(node, deep=True) - kwds = {} - state = {} - listitems = [] - dictitems = {} - else: - value = self.construct_mapping(node, deep=True) - args = value.get('args', []) - kwds = value.get('kwds', {}) - state = value.get('state', {}) - listitems = value.get('listitems', []) - dictitems = value.get('dictitems', {}) - instance = self.make_python_instance(suffix, node, args, kwds, newobj) - if state: - self.set_python_instance_state(instance, state) - if listitems: - instance.extend(listitems) - if dictitems: - for key in dictitems: - instance[key] = dictitems[key] - return instance - - def construct_python_object_new(self, suffix, node): - return self.construct_python_object_apply(suffix, node, newobj=True) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/none', - Constructor.construct_yaml_null) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/bool', - Constructor.construct_yaml_bool) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/str', - Constructor.construct_python_str) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/unicode', - Constructor.construct_python_unicode) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/int', - Constructor.construct_yaml_int) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/long', - Constructor.construct_python_long) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/float', - Constructor.construct_yaml_float) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/complex', - Constructor.construct_python_complex) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/list', - Constructor.construct_yaml_seq) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/tuple', - Constructor.construct_python_tuple) - -Constructor.add_constructor( - u'tag:yaml.org,2002:python/dict', - Constructor.construct_yaml_map) - -Constructor.add_multi_constructor( - u'tag:yaml.org,2002:python/name:', - Constructor.construct_python_name) - -Constructor.add_multi_constructor( - u'tag:yaml.org,2002:python/module:', - Constructor.construct_python_module) - -Constructor.add_multi_constructor( - u'tag:yaml.org,2002:python/object:', - Constructor.construct_python_object) - -Constructor.add_multi_constructor( - u'tag:yaml.org,2002:python/object/apply:', - Constructor.construct_python_object_apply) - -Constructor.add_multi_constructor( - u'tag:yaml.org,2002:python/object/new:', - Constructor.construct_python_object_new) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py deleted file mode 100644 index 2858ab479..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py +++ /dev/null @@ -1,86 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', - 'CBaseDumper', 'CSafeDumper', 'CDumper'] - -from _yaml import CParser, CEmitter - -from constructor import * - -from serializer import * -from representer import * - -from resolver import * - -class CBaseLoader(CParser, BaseConstructor, BaseResolver): - - def __init__(self, stream): - CParser.__init__(self, stream) - BaseConstructor.__init__(self) - BaseResolver.__init__(self) - -class CSafeLoader(CParser, SafeConstructor, Resolver): - - def __init__(self, stream): - CParser.__init__(self, stream) - SafeConstructor.__init__(self) - Resolver.__init__(self) - -class CLoader(CParser, Constructor, Resolver): - - def __init__(self, stream): - CParser.__init__(self, stream) - Constructor.__init__(self) - Resolver.__init__(self) - -class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - CEmitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, encoding=encoding, - allow_unicode=allow_unicode, line_break=line_break, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class CSafeDumper(CEmitter, SafeRepresenter, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - CEmitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, encoding=encoding, - allow_unicode=allow_unicode, line_break=line_break, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - SafeRepresenter.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class CDumper(CEmitter, Serializer, Representer, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - CEmitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, encoding=encoding, - allow_unicode=allow_unicode, line_break=line_break, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py deleted file mode 100644 index 3685cbeeb..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py +++ /dev/null @@ -1,63 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] - -from emitter import * -from serializer import * -from representer import * -from resolver import * - -class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - Emitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - Serializer.__init__(self, encoding=encoding, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - Emitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - Serializer.__init__(self, encoding=encoding, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - SafeRepresenter.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class Dumper(Emitter, Serializer, Representer, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - Emitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - Serializer.__init__(self, encoding=encoding, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py deleted file mode 100644 index 9a460a0fd..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py +++ /dev/null @@ -1,1141 +0,0 @@ -# SPDX-License-Identifier: MIT - -# Emitter expects events obeying the following grammar: -# stream ::= STREAM-START document* STREAM-END -# document ::= DOCUMENT-START node DOCUMENT-END -# node ::= SCALAR | sequence | mapping -# sequence ::= SEQUENCE-START node* SEQUENCE-END -# mapping ::= MAPPING-START (node node)* MAPPING-END - -__all__ = ['Emitter', 'EmitterError'] - -from error import YAMLError -from events import * - -class EmitterError(YAMLError): - pass - -class ScalarAnalysis(object): - def __init__(self, scalar, empty, multiline, - allow_flow_plain, allow_block_plain, - allow_single_quoted, allow_double_quoted, - allow_block): - self.scalar = scalar - self.empty = empty - self.multiline = multiline - self.allow_flow_plain = allow_flow_plain - self.allow_block_plain = allow_block_plain - self.allow_single_quoted = allow_single_quoted - self.allow_double_quoted = allow_double_quoted - self.allow_block = allow_block - -class Emitter(object): - - DEFAULT_TAG_PREFIXES = { - u'!' : u'!', - u'tag:yaml.org,2002:' : u'!!', - } - - def __init__(self, stream, canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None): - - # The stream should have the methods `write` and possibly `flush`. - self.stream = stream - - # Encoding can be overriden by STREAM-START. - self.encoding = None - - # Emitter is a state machine with a stack of states to handle nested - # structures. - self.states = [] - self.state = self.expect_stream_start - - # Current event and the event queue. - self.events = [] - self.event = None - - # The current indentation level and the stack of previous indents. - self.indents = [] - self.indent = None - - # Flow level. - self.flow_level = 0 - - # Contexts. - self.root_context = False - self.sequence_context = False - self.mapping_context = False - self.simple_key_context = False - - # Characteristics of the last emitted character: - # - current position. - # - is it a whitespace? - # - is it an indention character - # (indentation space, '-', '?', or ':')? - self.line = 0 - self.column = 0 - self.whitespace = True - self.indention = True - - # Whether the document requires an explicit document indicator - self.open_ended = False - - # Formatting details. - self.canonical = canonical - self.allow_unicode = allow_unicode - self.best_indent = 2 - if indent and 1 < indent < 10: - self.best_indent = indent - self.best_width = 80 - if width and width > self.best_indent*2: - self.best_width = width - self.best_line_break = u'\n' - if line_break in [u'\r', u'\n', u'\r\n']: - self.best_line_break = line_break - - # Tag prefixes. - self.tag_prefixes = None - - # Prepared anchor and tag. - self.prepared_anchor = None - self.prepared_tag = None - - # Scalar analysis and style. - self.analysis = None - self.style = None - - def dispose(self): - # Reset the state attributes (to clear self-references) - self.states = [] - self.state = None - - def emit(self, event): - self.events.append(event) - while not self.need_more_events(): - self.event = self.events.pop(0) - self.state() - self.event = None - - # In some cases, we wait for a few next events before emitting. - - def need_more_events(self): - if not self.events: - return True - event = self.events[0] - if isinstance(event, DocumentStartEvent): - return self.need_events(1) - elif isinstance(event, SequenceStartEvent): - return self.need_events(2) - elif isinstance(event, MappingStartEvent): - return self.need_events(3) - else: - return False - - def need_events(self, count): - level = 0 - for event in self.events[1:]: - if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): - level += 1 - elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): - level -= 1 - elif isinstance(event, StreamEndEvent): - level = -1 - if level < 0: - return False - return (len(self.events) < count+1) - - def increase_indent(self, flow=False, indentless=False): - self.indents.append(self.indent) - if self.indent is None: - if flow: - self.indent = self.best_indent - else: - self.indent = 0 - elif not indentless: - self.indent += self.best_indent - - # States. - - # Stream handlers. - - def expect_stream_start(self): - if isinstance(self.event, StreamStartEvent): - if self.event.encoding and not getattr(self.stream, 'encoding', None): - self.encoding = self.event.encoding - self.write_stream_start() - self.state = self.expect_first_document_start - else: - raise EmitterError("expected StreamStartEvent, but got %s" - % self.event) - - def expect_nothing(self): - raise EmitterError("expected nothing, but got %s" % self.event) - - # Document handlers. - - def expect_first_document_start(self): - return self.expect_document_start(first=True) - - def expect_document_start(self, first=False): - if isinstance(self.event, DocumentStartEvent): - if (self.event.version or self.event.tags) and self.open_ended: - self.write_indicator(u'...', True) - self.write_indent() - if self.event.version: - version_text = self.prepare_version(self.event.version) - self.write_version_directive(version_text) - self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() - if self.event.tags: - handles = self.event.tags.keys() - handles.sort() - for handle in handles: - prefix = self.event.tags[handle] - self.tag_prefixes[prefix] = handle - handle_text = self.prepare_tag_handle(handle) - prefix_text = self.prepare_tag_prefix(prefix) - self.write_tag_directive(handle_text, prefix_text) - implicit = (first and not self.event.explicit and not self.canonical - and not self.event.version and not self.event.tags - and not self.check_empty_document()) - if not implicit: - self.write_indent() - self.write_indicator(u'---', True) - if self.canonical: - self.write_indent() - self.state = self.expect_document_root - elif isinstance(self.event, StreamEndEvent): - if self.open_ended: - self.write_indicator(u'...', True) - self.write_indent() - self.write_stream_end() - self.state = self.expect_nothing - else: - raise EmitterError("expected DocumentStartEvent, but got %s" - % self.event) - - def expect_document_end(self): - if isinstance(self.event, DocumentEndEvent): - self.write_indent() - if self.event.explicit: - self.write_indicator(u'...', True) - self.write_indent() - self.flush_stream() - self.state = self.expect_document_start - else: - raise EmitterError("expected DocumentEndEvent, but got %s" - % self.event) - - def expect_document_root(self): - self.states.append(self.expect_document_end) - self.expect_node(root=True) - - # Node handlers. - - def expect_node(self, root=False, sequence=False, mapping=False, - simple_key=False): - self.root_context = root - self.sequence_context = sequence - self.mapping_context = mapping - self.simple_key_context = simple_key - if isinstance(self.event, AliasEvent): - self.expect_alias() - elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): - self.process_anchor(u'&') - self.process_tag() - if isinstance(self.event, ScalarEvent): - self.expect_scalar() - elif isinstance(self.event, SequenceStartEvent): - if self.flow_level or self.canonical or self.event.flow_style \ - or self.check_empty_sequence(): - self.expect_flow_sequence() - else: - self.expect_block_sequence() - elif isinstance(self.event, MappingStartEvent): - if self.flow_level or self.canonical or self.event.flow_style \ - or self.check_empty_mapping(): - self.expect_flow_mapping() - else: - self.expect_block_mapping() - else: - raise EmitterError("expected NodeEvent, but got %s" % self.event) - - def expect_alias(self): - if self.event.anchor is None: - raise EmitterError("anchor is not specified for alias") - self.process_anchor(u'*') - self.state = self.states.pop() - - def expect_scalar(self): - self.increase_indent(flow=True) - self.process_scalar() - self.indent = self.indents.pop() - self.state = self.states.pop() - - # Flow sequence handlers. - - def expect_flow_sequence(self): - self.write_indicator(u'[', True, whitespace=True) - self.flow_level += 1 - self.increase_indent(flow=True) - self.state = self.expect_first_flow_sequence_item - - def expect_first_flow_sequence_item(self): - if isinstance(self.event, SequenceEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - self.write_indicator(u']', False) - self.state = self.states.pop() - else: - if self.canonical or self.column > self.best_width: - self.write_indent() - self.states.append(self.expect_flow_sequence_item) - self.expect_node(sequence=True) - - def expect_flow_sequence_item(self): - if isinstance(self.event, SequenceEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - if self.canonical: - self.write_indicator(u',', False) - self.write_indent() - self.write_indicator(u']', False) - self.state = self.states.pop() - else: - self.write_indicator(u',', False) - if self.canonical or self.column > self.best_width: - self.write_indent() - self.states.append(self.expect_flow_sequence_item) - self.expect_node(sequence=True) - - # Flow mapping handlers. - - def expect_flow_mapping(self): - self.write_indicator(u'{', True, whitespace=True) - self.flow_level += 1 - self.increase_indent(flow=True) - self.state = self.expect_first_flow_mapping_key - - def expect_first_flow_mapping_key(self): - if isinstance(self.event, MappingEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - self.write_indicator(u'}', False) - self.state = self.states.pop() - else: - if self.canonical or self.column > self.best_width: - self.write_indent() - if not self.canonical and self.check_simple_key(): - self.states.append(self.expect_flow_mapping_simple_value) - self.expect_node(mapping=True, simple_key=True) - else: - self.write_indicator(u'?', True) - self.states.append(self.expect_flow_mapping_value) - self.expect_node(mapping=True) - - def expect_flow_mapping_key(self): - if isinstance(self.event, MappingEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - if self.canonical: - self.write_indicator(u',', False) - self.write_indent() - self.write_indicator(u'}', False) - self.state = self.states.pop() - else: - self.write_indicator(u',', False) - if self.canonical or self.column > self.best_width: - self.write_indent() - if not self.canonical and self.check_simple_key(): - self.states.append(self.expect_flow_mapping_simple_value) - self.expect_node(mapping=True, simple_key=True) - else: - self.write_indicator(u'?', True) - self.states.append(self.expect_flow_mapping_value) - self.expect_node(mapping=True) - - def expect_flow_mapping_simple_value(self): - self.write_indicator(u':', False) - self.states.append(self.expect_flow_mapping_key) - self.expect_node(mapping=True) - - def expect_flow_mapping_value(self): - if self.canonical or self.column > self.best_width: - self.write_indent() - self.write_indicator(u':', True) - self.states.append(self.expect_flow_mapping_key) - self.expect_node(mapping=True) - - # Block sequence handlers. - - def expect_block_sequence(self): - indentless = (self.mapping_context and not self.indention) - self.increase_indent(flow=False, indentless=indentless) - self.state = self.expect_first_block_sequence_item - - def expect_first_block_sequence_item(self): - return self.expect_block_sequence_item(first=True) - - def expect_block_sequence_item(self, first=False): - if not first and isinstance(self.event, SequenceEndEvent): - self.indent = self.indents.pop() - self.state = self.states.pop() - else: - self.write_indent() - self.write_indicator(u'-', True, indention=True) - self.states.append(self.expect_block_sequence_item) - self.expect_node(sequence=True) - - # Block mapping handlers. - - def expect_block_mapping(self): - self.increase_indent(flow=False) - self.state = self.expect_first_block_mapping_key - - def expect_first_block_mapping_key(self): - return self.expect_block_mapping_key(first=True) - - def expect_block_mapping_key(self, first=False): - if not first and isinstance(self.event, MappingEndEvent): - self.indent = self.indents.pop() - self.state = self.states.pop() - else: - self.write_indent() - if self.check_simple_key(): - self.states.append(self.expect_block_mapping_simple_value) - self.expect_node(mapping=True, simple_key=True) - else: - self.write_indicator(u'?', True, indention=True) - self.states.append(self.expect_block_mapping_value) - self.expect_node(mapping=True) - - def expect_block_mapping_simple_value(self): - self.write_indicator(u':', False) - self.states.append(self.expect_block_mapping_key) - self.expect_node(mapping=True) - - def expect_block_mapping_value(self): - self.write_indent() - self.write_indicator(u':', True, indention=True) - self.states.append(self.expect_block_mapping_key) - self.expect_node(mapping=True) - - # Checkers. - - def check_empty_sequence(self): - return (isinstance(self.event, SequenceStartEvent) and self.events - and isinstance(self.events[0], SequenceEndEvent)) - - def check_empty_mapping(self): - return (isinstance(self.event, MappingStartEvent) and self.events - and isinstance(self.events[0], MappingEndEvent)) - - def check_empty_document(self): - if not isinstance(self.event, DocumentStartEvent) or not self.events: - return False - event = self.events[0] - return (isinstance(event, ScalarEvent) and event.anchor is None - and event.tag is None and event.implicit and event.value == u'') - - def check_simple_key(self): - length = 0 - if isinstance(self.event, NodeEvent) and self.event.anchor is not None: - if self.prepared_anchor is None: - self.prepared_anchor = self.prepare_anchor(self.event.anchor) - length += len(self.prepared_anchor) - if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ - and self.event.tag is not None: - if self.prepared_tag is None: - self.prepared_tag = self.prepare_tag(self.event.tag) - length += len(self.prepared_tag) - if isinstance(self.event, ScalarEvent): - if self.analysis is None: - self.analysis = self.analyze_scalar(self.event.value) - length += len(self.analysis.scalar) - return (length < 128 and (isinstance(self.event, AliasEvent) - or (isinstance(self.event, ScalarEvent) - and not self.analysis.empty and not self.analysis.multiline) - or self.check_empty_sequence() or self.check_empty_mapping())) - - # Anchor, Tag, and Scalar processors. - - def process_anchor(self, indicator): - if self.event.anchor is None: - self.prepared_anchor = None - return - if self.prepared_anchor is None: - self.prepared_anchor = self.prepare_anchor(self.event.anchor) - if self.prepared_anchor: - self.write_indicator(indicator+self.prepared_anchor, True) - self.prepared_anchor = None - - def process_tag(self): - tag = self.event.tag - if isinstance(self.event, ScalarEvent): - if self.style is None: - self.style = self.choose_scalar_style() - if ((not self.canonical or tag is None) and - ((self.style == '' and self.event.implicit[0]) - or (self.style != '' and self.event.implicit[1]))): - self.prepared_tag = None - return - if self.event.implicit[0] and tag is None: - tag = u'!' - self.prepared_tag = None - else: - if (not self.canonical or tag is None) and self.event.implicit: - self.prepared_tag = None - return - if tag is None: - raise EmitterError("tag is not specified") - if self.prepared_tag is None: - self.prepared_tag = self.prepare_tag(tag) - if self.prepared_tag: - self.write_indicator(self.prepared_tag, True) - self.prepared_tag = None - - def choose_scalar_style(self): - if self.analysis is None: - self.analysis = self.analyze_scalar(self.event.value) - if self.event.style == '"' or self.canonical: - return '"' - if not self.event.style and self.event.implicit[0]: - if (not (self.simple_key_context and - (self.analysis.empty or self.analysis.multiline)) - and (self.flow_level and self.analysis.allow_flow_plain - or (not self.flow_level and self.analysis.allow_block_plain))): - return '' - if self.event.style and self.event.style in '|>': - if (not self.flow_level and not self.simple_key_context - and self.analysis.allow_block): - return self.event.style - if not self.event.style or self.event.style == '\'': - if (self.analysis.allow_single_quoted and - not (self.simple_key_context and self.analysis.multiline)): - return '\'' - return '"' - - def process_scalar(self): - if self.analysis is None: - self.analysis = self.analyze_scalar(self.event.value) - if self.style is None: - self.style = self.choose_scalar_style() - split = (not self.simple_key_context) - #if self.analysis.multiline and split \ - # and (not self.style or self.style in '\'\"'): - # self.write_indent() - if self.style == '"': - self.write_double_quoted(self.analysis.scalar, split) - elif self.style == '\'': - self.write_single_quoted(self.analysis.scalar, split) - elif self.style == '>': - self.write_folded(self.analysis.scalar) - elif self.style == '|': - self.write_literal(self.analysis.scalar) - else: - self.write_plain(self.analysis.scalar, split) - self.analysis = None - self.style = None - - # Analyzers. - - def prepare_version(self, version): - major, minor = version - if major != 1: - raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) - return u'%d.%d' % (major, minor) - - def prepare_tag_handle(self, handle): - if not handle: - raise EmitterError("tag handle must not be empty") - if handle[0] != u'!' or handle[-1] != u'!': - raise EmitterError("tag handle must start and end with '!': %r" - % (handle.encode('utf-8'))) - for ch in handle[1:-1]: - if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-_'): - raise EmitterError("invalid character %r in the tag handle: %r" - % (ch.encode('utf-8'), handle.encode('utf-8'))) - return handle - - def prepare_tag_prefix(self, prefix): - if not prefix: - raise EmitterError("tag prefix must not be empty") - chunks = [] - start = end = 0 - if prefix[0] == u'!': - end = 1 - while end < len(prefix): - ch = prefix[end] - if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-;/?!:@&=+$,_.~*\'()[]': - end += 1 - else: - if start < end: - chunks.append(prefix[start:end]) - start = end = end+1 - data = ch.encode('utf-8') - for ch in data: - chunks.append(u'%%%02X' % ord(ch)) - if start < end: - chunks.append(prefix[start:end]) - return u''.join(chunks) - - def prepare_tag(self, tag): - if not tag: - raise EmitterError("tag must not be empty") - if tag == u'!': - return tag - handle = None - suffix = tag - prefixes = self.tag_prefixes.keys() - prefixes.sort() - for prefix in prefixes: - if tag.startswith(prefix) \ - and (prefix == u'!' or len(prefix) < len(tag)): - handle = self.tag_prefixes[prefix] - suffix = tag[len(prefix):] - chunks = [] - start = end = 0 - while end < len(suffix): - ch = suffix[end] - if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-;/?:@&=+$,_.~*\'()[]' \ - or (ch == u'!' and handle != u'!'): - end += 1 - else: - if start < end: - chunks.append(suffix[start:end]) - start = end = end+1 - data = ch.encode('utf-8') - for ch in data: - chunks.append(u'%%%02X' % ord(ch)) - if start < end: - chunks.append(suffix[start:end]) - suffix_text = u''.join(chunks) - if handle: - return u'%s%s' % (handle, suffix_text) - else: - return u'!<%s>' % suffix_text - - def prepare_anchor(self, anchor): - if not anchor: - raise EmitterError("anchor must not be empty") - for ch in anchor: - if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-_'): - raise EmitterError("invalid character %r in the anchor: %r" - % (ch.encode('utf-8'), anchor.encode('utf-8'))) - return anchor - - def analyze_scalar(self, scalar): - - # Empty scalar is a special case. - if not scalar: - return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, - allow_flow_plain=False, allow_block_plain=True, - allow_single_quoted=True, allow_double_quoted=True, - allow_block=False) - - # Indicators and special characters. - block_indicators = False - flow_indicators = False - line_breaks = False - special_characters = False - - # Important whitespace combinations. - leading_space = False - leading_break = False - trailing_space = False - trailing_break = False - break_space = False - space_break = False - - # Check document indicators. - if scalar.startswith(u'---') or scalar.startswith(u'...'): - block_indicators = True - flow_indicators = True - - # First character or preceded by a whitespace. - preceeded_by_whitespace = True - - # Last character or followed by a whitespace. - followed_by_whitespace = (len(scalar) == 1 or - scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') - - # The previous character is a space. - previous_space = False - - # The previous character is a break. - previous_break = False - - index = 0 - while index < len(scalar): - ch = scalar[index] - - # Check for indicators. - if index == 0: - # Leading indicators are special characters. - if ch in u'#,[]{}&*!|>\'\"%@`': - flow_indicators = True - block_indicators = True - if ch in u'?:': - flow_indicators = True - if followed_by_whitespace: - block_indicators = True - if ch == u'-' and followed_by_whitespace: - flow_indicators = True - block_indicators = True - else: - # Some indicators cannot appear within a scalar as well. - if ch in u',?[]{}': - flow_indicators = True - if ch == u':': - flow_indicators = True - if followed_by_whitespace: - block_indicators = True - if ch == u'#' and preceeded_by_whitespace: - flow_indicators = True - block_indicators = True - - # Check for line breaks, special, and unicode characters. - if ch in u'\n\x85\u2028\u2029': - line_breaks = True - if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): - if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' - or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': - unicode_characters = True - if not self.allow_unicode: - special_characters = True - else: - special_characters = True - - # Detect important whitespace combinations. - if ch == u' ': - if index == 0: - leading_space = True - if index == len(scalar)-1: - trailing_space = True - if previous_break: - break_space = True - previous_space = True - previous_break = False - elif ch in u'\n\x85\u2028\u2029': - if index == 0: - leading_break = True - if index == len(scalar)-1: - trailing_break = True - if previous_space: - space_break = True - previous_space = False - previous_break = True - else: - previous_space = False - previous_break = False - - # Prepare for the next character. - index += 1 - preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') - followed_by_whitespace = (index+1 >= len(scalar) or - scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') - - # Let's decide what styles are allowed. - allow_flow_plain = True - allow_block_plain = True - allow_single_quoted = True - allow_double_quoted = True - allow_block = True - - # Leading and trailing whitespaces are bad for plain scalars. - if (leading_space or leading_break - or trailing_space or trailing_break): - allow_flow_plain = allow_block_plain = False - - # We do not permit trailing spaces for block scalars. - if trailing_space: - allow_block = False - - # Spaces at the beginning of a new line are only acceptable for block - # scalars. - if break_space: - allow_flow_plain = allow_block_plain = allow_single_quoted = False - - # Spaces followed by breaks, as well as special character are only - # allowed for double quoted scalars. - if space_break or special_characters: - allow_flow_plain = allow_block_plain = \ - allow_single_quoted = allow_block = False - - # Although the plain scalar writer supports breaks, we never emit - # multiline plain scalars. - if line_breaks: - allow_flow_plain = allow_block_plain = False - - # Flow indicators are forbidden for flow plain scalars. - if flow_indicators: - allow_flow_plain = False - - # Block indicators are forbidden for block plain scalars. - if block_indicators: - allow_block_plain = False - - return ScalarAnalysis(scalar=scalar, - empty=False, multiline=line_breaks, - allow_flow_plain=allow_flow_plain, - allow_block_plain=allow_block_plain, - allow_single_quoted=allow_single_quoted, - allow_double_quoted=allow_double_quoted, - allow_block=allow_block) - - # Writers. - - def flush_stream(self): - if hasattr(self.stream, 'flush'): - self.stream.flush() - - def write_stream_start(self): - # Write BOM if needed. - if self.encoding and self.encoding.startswith('utf-16'): - self.stream.write(u'\uFEFF'.encode(self.encoding)) - - def write_stream_end(self): - self.flush_stream() - - def write_indicator(self, indicator, need_whitespace, - whitespace=False, indention=False): - if self.whitespace or not need_whitespace: - data = indicator - else: - data = u' '+indicator - self.whitespace = whitespace - self.indention = self.indention and indention - self.column += len(data) - self.open_ended = False - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - - def write_indent(self): - indent = self.indent or 0 - if not self.indention or self.column > indent \ - or (self.column == indent and not self.whitespace): - self.write_line_break() - if self.column < indent: - self.whitespace = True - data = u' '*(indent-self.column) - self.column = indent - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - - def write_line_break(self, data=None): - if data is None: - data = self.best_line_break - self.whitespace = True - self.indention = True - self.line += 1 - self.column = 0 - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - - def write_version_directive(self, version_text): - data = u'%%YAML %s' % version_text - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.write_line_break() - - def write_tag_directive(self, handle_text, prefix_text): - data = u'%%TAG %s %s' % (handle_text, prefix_text) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.write_line_break() - - # Scalar streams. - - def write_single_quoted(self, text, split=True): - self.write_indicator(u'\'', True) - spaces = False - breaks = False - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if spaces: - if ch is None or ch != u' ': - if start+1 == end and self.column > self.best_width and split \ - and start != 0 and end != len(text): - self.write_indent() - else: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - elif breaks: - if ch is None or ch not in u'\n\x85\u2028\u2029': - if text[start] == u'\n': - self.write_line_break() - for br in text[start:end]: - if br == u'\n': - self.write_line_break() - else: - self.write_line_break(br) - self.write_indent() - start = end - else: - if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': - if start < end: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - if ch == u'\'': - data = u'\'\'' - self.column += 2 - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end + 1 - if ch is not None: - spaces = (ch == u' ') - breaks = (ch in u'\n\x85\u2028\u2029') - end += 1 - self.write_indicator(u'\'', False) - - ESCAPE_REPLACEMENTS = { - u'\0': u'0', - u'\x07': u'a', - u'\x08': u'b', - u'\x09': u't', - u'\x0A': u'n', - u'\x0B': u'v', - u'\x0C': u'f', - u'\x0D': u'r', - u'\x1B': u'e', - u'\"': u'\"', - u'\\': u'\\', - u'\x85': u'N', - u'\xA0': u'_', - u'\u2028': u'L', - u'\u2029': u'P', - } - - def write_double_quoted(self, text, split=True): - self.write_indicator(u'"', True) - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ - or not (u'\x20' <= ch <= u'\x7E' - or (self.allow_unicode - and (u'\xA0' <= ch <= u'\uD7FF' - or u'\uE000' <= ch <= u'\uFFFD'))): - if start < end: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - if ch is not None: - if ch in self.ESCAPE_REPLACEMENTS: - data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] - elif ch <= u'\xFF': - data = u'\\x%02X' % ord(ch) - elif ch <= u'\uFFFF': - data = u'\\u%04X' % ord(ch) - else: - data = u'\\U%08X' % ord(ch) - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end+1 - if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ - and self.column+(end-start) > self.best_width and split: - data = text[start:end]+u'\\' - if start < end: - start = end - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.write_indent() - self.whitespace = False - self.indention = False - if text[start] == u' ': - data = u'\\' - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - end += 1 - self.write_indicator(u'"', False) - - def determine_block_hints(self, text): - hints = u'' - if text: - if text[0] in u' \n\x85\u2028\u2029': - hints += unicode(self.best_indent) - if text[-1] not in u'\n\x85\u2028\u2029': - hints += u'-' - elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': - hints += u'+' - return hints - - def write_folded(self, text): - hints = self.determine_block_hints(text) - self.write_indicator(u'>'+hints, True) - if hints[-1:] == u'+': - self.open_ended = True - self.write_line_break() - leading_space = True - spaces = False - breaks = True - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if breaks: - if ch is None or ch not in u'\n\x85\u2028\u2029': - if not leading_space and ch is not None and ch != u' ' \ - and text[start] == u'\n': - self.write_line_break() - leading_space = (ch == u' ') - for br in text[start:end]: - if br == u'\n': - self.write_line_break() - else: - self.write_line_break(br) - if ch is not None: - self.write_indent() - start = end - elif spaces: - if ch != u' ': - if start+1 == end and self.column > self.best_width: - self.write_indent() - else: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - else: - if ch is None or ch in u' \n\x85\u2028\u2029': - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - if ch is None: - self.write_line_break() - start = end - if ch is not None: - breaks = (ch in u'\n\x85\u2028\u2029') - spaces = (ch == u' ') - end += 1 - - def write_literal(self, text): - hints = self.determine_block_hints(text) - self.write_indicator(u'|'+hints, True) - if hints[-1:] == u'+': - self.open_ended = True - self.write_line_break() - breaks = True - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if breaks: - if ch is None or ch not in u'\n\x85\u2028\u2029': - for br in text[start:end]: - if br == u'\n': - self.write_line_break() - else: - self.write_line_break(br) - if ch is not None: - self.write_indent() - start = end - else: - if ch is None or ch in u'\n\x85\u2028\u2029': - data = text[start:end] - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - if ch is None: - self.write_line_break() - start = end - if ch is not None: - breaks = (ch in u'\n\x85\u2028\u2029') - end += 1 - - def write_plain(self, text, split=True): - if self.root_context: - self.open_ended = True - if not text: - return - if not self.whitespace: - data = u' ' - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.whitespace = False - self.indention = False - spaces = False - breaks = False - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if spaces: - if ch != u' ': - if start+1 == end and self.column > self.best_width and split: - self.write_indent() - self.whitespace = False - self.indention = False - else: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - elif breaks: - if ch not in u'\n\x85\u2028\u2029': - if text[start] == u'\n': - self.write_line_break() - for br in text[start:end]: - if br == u'\n': - self.write_line_break() - else: - self.write_line_break(br) - self.write_indent() - self.whitespace = False - self.indention = False - start = end - else: - if ch is None or ch in u' \n\x85\u2028\u2029': - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - if ch is not None: - spaces = (ch == u' ') - breaks = (ch in u'\n\x85\u2028\u2029') - end += 1 - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/error.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/error.py deleted file mode 100644 index 5466be721..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/error.py +++ /dev/null @@ -1,76 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] - -class Mark(object): - - def __init__(self, name, index, line, column, buffer, pointer): - self.name = name - self.index = index - self.line = line - self.column = column - self.buffer = buffer - self.pointer = pointer - - def get_snippet(self, indent=4, max_length=75): - if self.buffer is None: - return None - head = '' - start = self.pointer - while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': - start -= 1 - if self.pointer-start > max_length/2-1: - head = ' ... ' - start += 5 - break - tail = '' - end = self.pointer - while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': - end += 1 - if end-self.pointer > max_length/2-1: - tail = ' ... ' - end -= 5 - break - snippet = self.buffer[start:end].encode('utf-8') - return ' '*indent + head + snippet + tail + '\n' \ - + ' '*(indent+self.pointer-start+len(head)) + '^' - - def __str__(self): - snippet = self.get_snippet() - where = " in \"%s\", line %d, column %d" \ - % (self.name, self.line+1, self.column+1) - if snippet is not None: - where += ":\n"+snippet - return where - -class YAMLError(Exception): - pass - -class MarkedYAMLError(YAMLError): - - def __init__(self, context=None, context_mark=None, - problem=None, problem_mark=None, note=None): - self.context = context - self.context_mark = context_mark - self.problem = problem - self.problem_mark = problem_mark - self.note = note - - def __str__(self): - lines = [] - if self.context is not None: - lines.append(self.context) - if self.context_mark is not None \ - and (self.problem is None or self.problem_mark is None - or self.context_mark.name != self.problem_mark.name - or self.context_mark.line != self.problem_mark.line - or self.context_mark.column != self.problem_mark.column): - lines.append(str(self.context_mark)) - if self.problem is not None: - lines.append(self.problem) - if self.problem_mark is not None: - lines.append(str(self.problem_mark)) - if self.note is not None: - lines.append(self.note) - return '\n'.join(lines) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/events.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/events.py deleted file mode 100644 index 283452add..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/events.py +++ /dev/null @@ -1,87 +0,0 @@ -# SPDX-License-Identifier: MIT - -# Abstract classes. - -class Event(object): - def __init__(self, start_mark=None, end_mark=None): - self.start_mark = start_mark - self.end_mark = end_mark - def __repr__(self): - attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] - if hasattr(self, key)] - arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) - for key in attributes]) - return '%s(%s)' % (self.__class__.__name__, arguments) - -class NodeEvent(Event): - def __init__(self, anchor, start_mark=None, end_mark=None): - self.anchor = anchor - self.start_mark = start_mark - self.end_mark = end_mark - -class CollectionStartEvent(NodeEvent): - def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, - flow_style=None): - self.anchor = anchor - self.tag = tag - self.implicit = implicit - self.start_mark = start_mark - self.end_mark = end_mark - self.flow_style = flow_style - -class CollectionEndEvent(Event): - pass - -# Implementations. - -class StreamStartEvent(Event): - def __init__(self, start_mark=None, end_mark=None, encoding=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.encoding = encoding - -class StreamEndEvent(Event): - pass - -class DocumentStartEvent(Event): - def __init__(self, start_mark=None, end_mark=None, - explicit=None, version=None, tags=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.explicit = explicit - self.version = version - self.tags = tags - -class DocumentEndEvent(Event): - def __init__(self, start_mark=None, end_mark=None, - explicit=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.explicit = explicit - -class AliasEvent(NodeEvent): - pass - -class ScalarEvent(NodeEvent): - def __init__(self, anchor, tag, implicit, value, - start_mark=None, end_mark=None, style=None): - self.anchor = anchor - self.tag = tag - self.implicit = implicit - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - -class SequenceStartEvent(CollectionStartEvent): - pass - -class SequenceEndEvent(CollectionEndEvent): - pass - -class MappingStartEvent(CollectionStartEvent): - pass - -class MappingEndEvent(CollectionEndEvent): - pass - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/loader.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/loader.py deleted file mode 100644 index 1c195531f..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/loader.py +++ /dev/null @@ -1,41 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] - -from reader import * -from scanner import * -from parser import * -from composer import * -from constructor import * -from resolver import * - -class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - BaseConstructor.__init__(self) - BaseResolver.__init__(self) - -class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - SafeConstructor.__init__(self) - Resolver.__init__(self) - -class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - Constructor.__init__(self) - Resolver.__init__(self) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py deleted file mode 100644 index ed2a1b43e..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py +++ /dev/null @@ -1,50 +0,0 @@ -# SPDX-License-Identifier: MIT - -class Node(object): - def __init__(self, tag, value, start_mark, end_mark): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - def __repr__(self): - value = self.value - #if isinstance(value, list): - # if len(value) == 0: - # value = '' - # elif len(value) == 1: - # value = '<1 item>' - # else: - # value = '<%d items>' % len(value) - #else: - # if len(value) > 75: - # value = repr(value[:70]+u' ... ') - # else: - # value = repr(value) - value = repr(value) - return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) - -class ScalarNode(Node): - id = 'scalar' - def __init__(self, tag, value, - start_mark=None, end_mark=None, style=None): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - -class CollectionNode(Node): - def __init__(self, tag, value, - start_mark=None, end_mark=None, flow_style=None): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.flow_style = flow_style - -class SequenceNode(CollectionNode): - id = 'sequence' - -class MappingNode(CollectionNode): - id = 'mapping' - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/parser.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/parser.py deleted file mode 100644 index 97ba08337..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/parser.py +++ /dev/null @@ -1,590 +0,0 @@ -# SPDX-License-Identifier: MIT - -# The following YAML grammar is LL(1) and is parsed by a recursive descent -# parser. -# -# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -# implicit_document ::= block_node DOCUMENT-END* -# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -# block_node_or_indentless_sequence ::= -# ALIAS -# | properties (block_content | indentless_block_sequence)? -# | block_content -# | indentless_block_sequence -# block_node ::= ALIAS -# | properties block_content? -# | block_content -# flow_node ::= ALIAS -# | properties flow_content? -# | flow_content -# properties ::= TAG ANCHOR? | ANCHOR TAG? -# block_content ::= block_collection | flow_collection | SCALAR -# flow_content ::= flow_collection | SCALAR -# block_collection ::= block_sequence | block_mapping -# flow_collection ::= flow_sequence | flow_mapping -# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -# block_mapping ::= BLOCK-MAPPING_START -# ((KEY block_node_or_indentless_sequence?)? -# (VALUE block_node_or_indentless_sequence?)?)* -# BLOCK-END -# flow_sequence ::= FLOW-SEQUENCE-START -# (flow_sequence_entry FLOW-ENTRY)* -# flow_sequence_entry? -# FLOW-SEQUENCE-END -# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -# flow_mapping ::= FLOW-MAPPING-START -# (flow_mapping_entry FLOW-ENTRY)* -# flow_mapping_entry? -# FLOW-MAPPING-END -# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -# -# FIRST sets: -# -# stream: { STREAM-START } -# explicit_document: { DIRECTIVE DOCUMENT-START } -# implicit_document: FIRST(block_node) -# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } -# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } -# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } -# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } -# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } -# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } -# block_sequence: { BLOCK-SEQUENCE-START } -# block_mapping: { BLOCK-MAPPING-START } -# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } -# indentless_sequence: { ENTRY } -# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } -# flow_sequence: { FLOW-SEQUENCE-START } -# flow_mapping: { FLOW-MAPPING-START } -# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } -# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } - -__all__ = ['Parser', 'ParserError'] - -from error import MarkedYAMLError -from tokens import * -from events import * -from scanner import * - -class ParserError(MarkedYAMLError): - pass - -class Parser(object): - # Since writing a recursive-descendant parser is a straightforward task, we - # do not give many comments here. - - DEFAULT_TAGS = { - u'!': u'!', - u'!!': u'tag:yaml.org,2002:', - } - - def __init__(self): - self.current_event = None - self.yaml_version = None - self.tag_handles = {} - self.states = [] - self.marks = [] - self.state = self.parse_stream_start - - def dispose(self): - # Reset the state attributes (to clear self-references) - self.states = [] - self.state = None - - def check_event(self, *choices): - # Check the type of the next event. - if self.current_event is None: - if self.state: - self.current_event = self.state() - if self.current_event is not None: - if not choices: - return True - for choice in choices: - if isinstance(self.current_event, choice): - return True - return False - - def peek_event(self): - # Get the next event. - if self.current_event is None: - if self.state: - self.current_event = self.state() - return self.current_event - - def get_event(self): - # Get the next event and proceed further. - if self.current_event is None: - if self.state: - self.current_event = self.state() - value = self.current_event - self.current_event = None - return value - - # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END - # implicit_document ::= block_node DOCUMENT-END* - # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - - def parse_stream_start(self): - - # Parse the stream start. - token = self.get_token() - event = StreamStartEvent(token.start_mark, token.end_mark, - encoding=token.encoding) - - # Prepare the next state. - self.state = self.parse_implicit_document_start - - return event - - def parse_implicit_document_start(self): - - # Parse an implicit document. - if not self.check_token(DirectiveToken, DocumentStartToken, - StreamEndToken): - self.tag_handles = self.DEFAULT_TAGS - token = self.peek_token() - start_mark = end_mark = token.start_mark - event = DocumentStartEvent(start_mark, end_mark, - explicit=False) - - # Prepare the next state. - self.states.append(self.parse_document_end) - self.state = self.parse_block_node - - return event - - else: - return self.parse_document_start() - - def parse_document_start(self): - - # Parse any extra document end indicators. - while self.check_token(DocumentEndToken): - self.get_token() - - # Parse an explicit document. - if not self.check_token(StreamEndToken): - token = self.peek_token() - start_mark = token.start_mark - version, tags = self.process_directives() - if not self.check_token(DocumentStartToken): - raise ParserError(None, None, - "expected '', but found %r" - % self.peek_token().id, - self.peek_token().start_mark) - token = self.get_token() - end_mark = token.end_mark - event = DocumentStartEvent(start_mark, end_mark, - explicit=True, version=version, tags=tags) - self.states.append(self.parse_document_end) - self.state = self.parse_document_content - else: - # Parse the end of the stream. - token = self.get_token() - event = StreamEndEvent(token.start_mark, token.end_mark) - assert not self.states - assert not self.marks - self.state = None - return event - - def parse_document_end(self): - - # Parse the document end. - token = self.peek_token() - start_mark = end_mark = token.start_mark - explicit = False - if self.check_token(DocumentEndToken): - token = self.get_token() - end_mark = token.end_mark - explicit = True - event = DocumentEndEvent(start_mark, end_mark, - explicit=explicit) - - # Prepare the next state. - self.state = self.parse_document_start - - return event - - def parse_document_content(self): - if self.check_token(DirectiveToken, - DocumentStartToken, DocumentEndToken, StreamEndToken): - event = self.process_empty_scalar(self.peek_token().start_mark) - self.state = self.states.pop() - return event - else: - return self.parse_block_node() - - def process_directives(self): - self.yaml_version = None - self.tag_handles = {} - while self.check_token(DirectiveToken): - token = self.get_token() - if token.name == u'YAML': - if self.yaml_version is not None: - raise ParserError(None, None, - "found duplicate YAML directive", token.start_mark) - major, minor = token.value - if major != 1: - raise ParserError(None, None, - "found incompatible YAML document (version 1.* is required)", - token.start_mark) - self.yaml_version = token.value - elif token.name == u'TAG': - handle, prefix = token.value - if handle in self.tag_handles: - raise ParserError(None, None, - "duplicate tag handle %r" % handle.encode('utf-8'), - token.start_mark) - self.tag_handles[handle] = prefix - if self.tag_handles: - value = self.yaml_version, self.tag_handles.copy() - else: - value = self.yaml_version, None - for key in self.DEFAULT_TAGS: - if key not in self.tag_handles: - self.tag_handles[key] = self.DEFAULT_TAGS[key] - return value - - # block_node_or_indentless_sequence ::= ALIAS - # | properties (block_content | indentless_block_sequence)? - # | block_content - # | indentless_block_sequence - # block_node ::= ALIAS - # | properties block_content? - # | block_content - # flow_node ::= ALIAS - # | properties flow_content? - # | flow_content - # properties ::= TAG ANCHOR? | ANCHOR TAG? - # block_content ::= block_collection | flow_collection | SCALAR - # flow_content ::= flow_collection | SCALAR - # block_collection ::= block_sequence | block_mapping - # flow_collection ::= flow_sequence | flow_mapping - - def parse_block_node(self): - return self.parse_node(block=True) - - def parse_flow_node(self): - return self.parse_node() - - def parse_block_node_or_indentless_sequence(self): - return self.parse_node(block=True, indentless_sequence=True) - - def parse_node(self, block=False, indentless_sequence=False): - if self.check_token(AliasToken): - token = self.get_token() - event = AliasEvent(token.value, token.start_mark, token.end_mark) - self.state = self.states.pop() - else: - anchor = None - tag = None - start_mark = end_mark = tag_mark = None - if self.check_token(AnchorToken): - token = self.get_token() - start_mark = token.start_mark - end_mark = token.end_mark - anchor = token.value - if self.check_token(TagToken): - token = self.get_token() - tag_mark = token.start_mark - end_mark = token.end_mark - tag = token.value - elif self.check_token(TagToken): - token = self.get_token() - start_mark = tag_mark = token.start_mark - end_mark = token.end_mark - tag = token.value - if self.check_token(AnchorToken): - token = self.get_token() - end_mark = token.end_mark - anchor = token.value - if tag is not None: - handle, suffix = tag - if handle is not None: - if handle not in self.tag_handles: - raise ParserError("while parsing a node", start_mark, - "found undefined tag handle %r" % handle.encode('utf-8'), - tag_mark) - tag = self.tag_handles[handle]+suffix - else: - tag = suffix - #if tag == u'!': - # raise ParserError("while parsing a node", start_mark, - # "found non-specific tag '!'", tag_mark, - # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") - if start_mark is None: - start_mark = end_mark = self.peek_token().start_mark - event = None - implicit = (tag is None or tag == u'!') - if indentless_sequence and self.check_token(BlockEntryToken): - end_mark = self.peek_token().end_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark) - self.state = self.parse_indentless_sequence_entry - else: - if self.check_token(ScalarToken): - token = self.get_token() - end_mark = token.end_mark - if (token.plain and tag is None) or tag == u'!': - implicit = (True, False) - elif tag is None: - implicit = (False, True) - else: - implicit = (False, False) - event = ScalarEvent(anchor, tag, implicit, token.value, - start_mark, end_mark, style=token.style) - self.state = self.states.pop() - elif self.check_token(FlowSequenceStartToken): - end_mark = self.peek_token().end_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=True) - self.state = self.parse_flow_sequence_first_entry - elif self.check_token(FlowMappingStartToken): - end_mark = self.peek_token().end_mark - event = MappingStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=True) - self.state = self.parse_flow_mapping_first_key - elif block and self.check_token(BlockSequenceStartToken): - end_mark = self.peek_token().start_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=False) - self.state = self.parse_block_sequence_first_entry - elif block and self.check_token(BlockMappingStartToken): - end_mark = self.peek_token().start_mark - event = MappingStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=False) - self.state = self.parse_block_mapping_first_key - elif anchor is not None or tag is not None: - # Empty scalars are allowed even if a tag or an anchor is - # specified. - event = ScalarEvent(anchor, tag, (implicit, False), u'', - start_mark, end_mark) - self.state = self.states.pop() - else: - if block: - node = 'block' - else: - node = 'flow' - token = self.peek_token() - raise ParserError("while parsing a %s node" % node, start_mark, - "expected the node content, but found %r" % token.id, - token.start_mark) - return event - - # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END - - def parse_block_sequence_first_entry(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_block_sequence_entry() - - def parse_block_sequence_entry(self): - if self.check_token(BlockEntryToken): - token = self.get_token() - if not self.check_token(BlockEntryToken, BlockEndToken): - self.states.append(self.parse_block_sequence_entry) - return self.parse_block_node() - else: - self.state = self.parse_block_sequence_entry - return self.process_empty_scalar(token.end_mark) - if not self.check_token(BlockEndToken): - token = self.peek_token() - raise ParserError("while parsing a block collection", self.marks[-1], - "expected , but found %r" % token.id, token.start_mark) - token = self.get_token() - event = SequenceEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ - - def parse_indentless_sequence_entry(self): - if self.check_token(BlockEntryToken): - token = self.get_token() - if not self.check_token(BlockEntryToken, - KeyToken, ValueToken, BlockEndToken): - self.states.append(self.parse_indentless_sequence_entry) - return self.parse_block_node() - else: - self.state = self.parse_indentless_sequence_entry - return self.process_empty_scalar(token.end_mark) - token = self.peek_token() - event = SequenceEndEvent(token.start_mark, token.start_mark) - self.state = self.states.pop() - return event - - # block_mapping ::= BLOCK-MAPPING_START - # ((KEY block_node_or_indentless_sequence?)? - # (VALUE block_node_or_indentless_sequence?)?)* - # BLOCK-END - - def parse_block_mapping_first_key(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_block_mapping_key() - - def parse_block_mapping_key(self): - if self.check_token(KeyToken): - token = self.get_token() - if not self.check_token(KeyToken, ValueToken, BlockEndToken): - self.states.append(self.parse_block_mapping_value) - return self.parse_block_node_or_indentless_sequence() - else: - self.state = self.parse_block_mapping_value - return self.process_empty_scalar(token.end_mark) - if not self.check_token(BlockEndToken): - token = self.peek_token() - raise ParserError("while parsing a block mapping", self.marks[-1], - "expected , but found %r" % token.id, token.start_mark) - token = self.get_token() - event = MappingEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_block_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(KeyToken, ValueToken, BlockEndToken): - self.states.append(self.parse_block_mapping_key) - return self.parse_block_node_or_indentless_sequence() - else: - self.state = self.parse_block_mapping_key - return self.process_empty_scalar(token.end_mark) - else: - self.state = self.parse_block_mapping_key - token = self.peek_token() - return self.process_empty_scalar(token.start_mark) - - # flow_sequence ::= FLOW-SEQUENCE-START - # (flow_sequence_entry FLOW-ENTRY)* - # flow_sequence_entry? - # FLOW-SEQUENCE-END - # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - # - # Note that while production rules for both flow_sequence_entry and - # flow_mapping_entry are equal, their interpretations are different. - # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` - # generate an inline mapping (set syntax). - - def parse_flow_sequence_first_entry(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_flow_sequence_entry(first=True) - - def parse_flow_sequence_entry(self, first=False): - if not self.check_token(FlowSequenceEndToken): - if not first: - if self.check_token(FlowEntryToken): - self.get_token() - else: - token = self.peek_token() - raise ParserError("while parsing a flow sequence", self.marks[-1], - "expected ',' or ']', but got %r" % token.id, token.start_mark) - - if self.check_token(KeyToken): - token = self.peek_token() - event = MappingStartEvent(None, None, True, - token.start_mark, token.end_mark, - flow_style=True) - self.state = self.parse_flow_sequence_entry_mapping_key - return event - elif not self.check_token(FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry) - return self.parse_flow_node() - token = self.get_token() - event = SequenceEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_flow_sequence_entry_mapping_key(self): - token = self.get_token() - if not self.check_token(ValueToken, - FlowEntryToken, FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry_mapping_value) - return self.parse_flow_node() - else: - self.state = self.parse_flow_sequence_entry_mapping_value - return self.process_empty_scalar(token.end_mark) - - def parse_flow_sequence_entry_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(FlowEntryToken, FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry_mapping_end) - return self.parse_flow_node() - else: - self.state = self.parse_flow_sequence_entry_mapping_end - return self.process_empty_scalar(token.end_mark) - else: - self.state = self.parse_flow_sequence_entry_mapping_end - token = self.peek_token() - return self.process_empty_scalar(token.start_mark) - - def parse_flow_sequence_entry_mapping_end(self): - self.state = self.parse_flow_sequence_entry - token = self.peek_token() - return MappingEndEvent(token.start_mark, token.start_mark) - - # flow_mapping ::= FLOW-MAPPING-START - # (flow_mapping_entry FLOW-ENTRY)* - # flow_mapping_entry? - # FLOW-MAPPING-END - # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - - def parse_flow_mapping_first_key(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_flow_mapping_key(first=True) - - def parse_flow_mapping_key(self, first=False): - if not self.check_token(FlowMappingEndToken): - if not first: - if self.check_token(FlowEntryToken): - self.get_token() - else: - token = self.peek_token() - raise ParserError("while parsing a flow mapping", self.marks[-1], - "expected ',' or '}', but got %r" % token.id, token.start_mark) - if self.check_token(KeyToken): - token = self.get_token() - if not self.check_token(ValueToken, - FlowEntryToken, FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_value) - return self.parse_flow_node() - else: - self.state = self.parse_flow_mapping_value - return self.process_empty_scalar(token.end_mark) - elif not self.check_token(FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_empty_value) - return self.parse_flow_node() - token = self.get_token() - event = MappingEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_flow_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(FlowEntryToken, FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_key) - return self.parse_flow_node() - else: - self.state = self.parse_flow_mapping_key - return self.process_empty_scalar(token.end_mark) - else: - self.state = self.parse_flow_mapping_key - token = self.peek_token() - return self.process_empty_scalar(token.start_mark) - - def parse_flow_mapping_empty_value(self): - self.state = self.parse_flow_mapping_key - return self.process_empty_scalar(self.peek_token().start_mark) - - def process_empty_scalar(self, mark): - return ScalarEvent(None, None, (True, False), u'', mark, mark) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/reader.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/reader.py deleted file mode 100644 index 8d422954e..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/reader.py +++ /dev/null @@ -1,191 +0,0 @@ -# SPDX-License-Identifier: MIT -# This module contains abstractions for the input stream. You don't have to -# looks further, there are no pretty code. -# -# We define two classes here. -# -# Mark(source, line, column) -# It's just a record and its only use is producing nice error messages. -# Parser does not use it for any other purposes. -# -# Reader(source, data) -# Reader determines the encoding of `data` and converts it to unicode. -# Reader provides the following methods and attributes: -# reader.peek(length=1) - return the next `length` characters -# reader.forward(length=1) - move the current position to `length` characters. -# reader.index - the number of the current character. -# reader.line, stream.column - the line and the column of the current character. - -__all__ = ['Reader', 'ReaderError'] - -from error import YAMLError, Mark - -import codecs, re - -class ReaderError(YAMLError): - - def __init__(self, name, position, character, encoding, reason): - self.name = name - self.character = character - self.position = position - self.encoding = encoding - self.reason = reason - - def __str__(self): - if isinstance(self.character, str): - return "'%s' codec can't decode byte #x%02x: %s\n" \ - " in \"%s\", position %d" \ - % (self.encoding, ord(self.character), self.reason, - self.name, self.position) - else: - return "unacceptable character #x%04x: %s\n" \ - " in \"%s\", position %d" \ - % (self.character, self.reason, - self.name, self.position) - -class Reader(object): - # Reader: - # - determines the data encoding and converts it to unicode, - # - checks if characters are in allowed range, - # - adds '\0' to the end. - - # Reader accepts - # - a `str` object, - # - a `unicode` object, - # - a file-like object with its `read` method returning `str`, - # - a file-like object with its `read` method returning `unicode`. - - # Yeah, it's ugly and slow. - - def __init__(self, stream): - self.name = None - self.stream = None - self.stream_pointer = 0 - self.eof = True - self.buffer = u'' - self.pointer = 0 - self.raw_buffer = None - self.raw_decode = None - self.encoding = None - self.index = 0 - self.line = 0 - self.column = 0 - if isinstance(stream, unicode): - self.name = "" - self.check_printable(stream) - self.buffer = stream+u'\0' - elif isinstance(stream, str): - self.name = "" - self.raw_buffer = stream - self.determine_encoding() - else: - self.stream = stream - self.name = getattr(stream, 'name', "") - self.eof = False - self.raw_buffer = '' - self.determine_encoding() - - def peek(self, index=0): - try: - return self.buffer[self.pointer+index] - except IndexError: - self.update(index+1) - return self.buffer[self.pointer+index] - - def prefix(self, length=1): - if self.pointer+length >= len(self.buffer): - self.update(length) - return self.buffer[self.pointer:self.pointer+length] - - def forward(self, length=1): - if self.pointer+length+1 >= len(self.buffer): - self.update(length+1) - while length: - ch = self.buffer[self.pointer] - self.pointer += 1 - self.index += 1 - if ch in u'\n\x85\u2028\u2029' \ - or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): - self.line += 1 - self.column = 0 - elif ch != u'\uFEFF': - self.column += 1 - length -= 1 - - def get_mark(self): - if self.stream is None: - return Mark(self.name, self.index, self.line, self.column, - self.buffer, self.pointer) - else: - return Mark(self.name, self.index, self.line, self.column, - None, None) - - def determine_encoding(self): - while not self.eof and len(self.raw_buffer) < 2: - self.update_raw() - if not isinstance(self.raw_buffer, unicode): - if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): - self.raw_decode = codecs.utf_16_le_decode - self.encoding = 'utf-16-le' - elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): - self.raw_decode = codecs.utf_16_be_decode - self.encoding = 'utf-16-be' - else: - self.raw_decode = codecs.utf_8_decode - self.encoding = 'utf-8' - self.update(1) - - NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') - def check_printable(self, data): - match = self.NON_PRINTABLE.search(data) - if match: - character = match.group() - position = self.index+(len(self.buffer)-self.pointer)+match.start() - raise ReaderError(self.name, position, ord(character), - 'unicode', "special characters are not allowed") - - def update(self, length): - if self.raw_buffer is None: - return - self.buffer = self.buffer[self.pointer:] - self.pointer = 0 - while len(self.buffer) < length: - if not self.eof: - self.update_raw() - if self.raw_decode is not None: - try: - data, converted = self.raw_decode(self.raw_buffer, - 'strict', self.eof) - except UnicodeDecodeError, exc: - character = exc.object[exc.start] - if self.stream is not None: - position = self.stream_pointer-len(self.raw_buffer)+exc.start - else: - position = exc.start - raise ReaderError(self.name, position, character, - exc.encoding, exc.reason) - else: - data = self.raw_buffer - converted = len(data) - self.check_printable(data) - self.buffer += data - self.raw_buffer = self.raw_buffer[converted:] - if self.eof: - self.buffer += u'\0' - self.raw_buffer = None - break - - def update_raw(self, size=1024): - data = self.stream.read(size) - if data: - self.raw_buffer += data - self.stream_pointer += len(data) - else: - self.eof = True - -#try: -# import psyco -# psyco.bind(Reader) -#except ImportError: -# pass - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/representer.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/representer.py deleted file mode 100644 index 0a1404eca..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/representer.py +++ /dev/null @@ -1,485 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', - 'RepresenterError'] - -from error import * -from nodes import * - -import datetime - -import sys, copy_reg, types - -class RepresenterError(YAMLError): - pass - -class BaseRepresenter(object): - - yaml_representers = {} - yaml_multi_representers = {} - - def __init__(self, default_style=None, default_flow_style=None): - self.default_style = default_style - self.default_flow_style = default_flow_style - self.represented_objects = {} - self.object_keeper = [] - self.alias_key = None - - def represent(self, data): - node = self.represent_data(data) - self.serialize(node) - self.represented_objects = {} - self.object_keeper = [] - self.alias_key = None - - def get_classobj_bases(self, cls): - bases = [cls] - for base in cls.__bases__: - bases.extend(self.get_classobj_bases(base)) - return bases - - def represent_data(self, data): - if self.ignore_aliases(data): - self.alias_key = None - else: - self.alias_key = id(data) - if self.alias_key is not None: - if self.alias_key in self.represented_objects: - node = self.represented_objects[self.alias_key] - #if node is None: - # raise RepresenterError("recursive objects are not allowed: %r" % data) - return node - #self.represented_objects[alias_key] = None - self.object_keeper.append(data) - data_types = type(data).__mro__ - if type(data) is types.InstanceType: - data_types = self.get_classobj_bases(data.__class__)+list(data_types) - if data_types[0] in self.yaml_representers: - node = self.yaml_representers[data_types[0]](self, data) - else: - for data_type in data_types: - if data_type in self.yaml_multi_representers: - node = self.yaml_multi_representers[data_type](self, data) - break - else: - if None in self.yaml_multi_representers: - node = self.yaml_multi_representers[None](self, data) - elif None in self.yaml_representers: - node = self.yaml_representers[None](self, data) - else: - node = ScalarNode(None, unicode(data)) - #if alias_key is not None: - # self.represented_objects[alias_key] = node - return node - - def add_representer(cls, data_type, representer): - if not 'yaml_representers' in cls.__dict__: - cls.yaml_representers = cls.yaml_representers.copy() - cls.yaml_representers[data_type] = representer - add_representer = classmethod(add_representer) - - def add_multi_representer(cls, data_type, representer): - if not 'yaml_multi_representers' in cls.__dict__: - cls.yaml_multi_representers = cls.yaml_multi_representers.copy() - cls.yaml_multi_representers[data_type] = representer - add_multi_representer = classmethod(add_multi_representer) - - def represent_scalar(self, tag, value, style=None): - if style is None: - style = self.default_style - node = ScalarNode(tag, value, style=style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - return node - - def represent_sequence(self, tag, sequence, flow_style=None): - value = [] - node = SequenceNode(tag, value, flow_style=flow_style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - best_style = True - for item in sequence: - node_item = self.represent_data(item) - if not (isinstance(node_item, ScalarNode) and not node_item.style): - best_style = False - value.append(node_item) - if flow_style is None: - if self.default_flow_style is not None: - node.flow_style = self.default_flow_style - else: - node.flow_style = best_style - return node - - def represent_mapping(self, tag, mapping, flow_style=None): - value = [] - node = MappingNode(tag, value, flow_style=flow_style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - best_style = True - if hasattr(mapping, 'items'): - mapping = mapping.items() - mapping.sort() - for item_key, item_value in mapping: - node_key = self.represent_data(item_key) - node_value = self.represent_data(item_value) - if not (isinstance(node_key, ScalarNode) and not node_key.style): - best_style = False - if not (isinstance(node_value, ScalarNode) and not node_value.style): - best_style = False - value.append((node_key, node_value)) - if flow_style is None: - if self.default_flow_style is not None: - node.flow_style = self.default_flow_style - else: - node.flow_style = best_style - return node - - def ignore_aliases(self, data): - return False - -class SafeRepresenter(BaseRepresenter): - - def ignore_aliases(self, data): - if data in [None, ()]: - return True - if isinstance(data, (str, unicode, bool, int, float)): - return True - - def represent_none(self, data): - return self.represent_scalar(u'tag:yaml.org,2002:null', - u'null') - - def represent_str(self, data): - tag = None - style = None - try: - data = unicode(data, 'ascii') - tag = u'tag:yaml.org,2002:str' - except UnicodeDecodeError: - try: - data = unicode(data, 'utf-8') - tag = u'tag:yaml.org,2002:str' - except UnicodeDecodeError: - data = data.encode('base64') - tag = u'tag:yaml.org,2002:binary' - style = '|' - return self.represent_scalar(tag, data, style=style) - - def represent_unicode(self, data): - return self.represent_scalar(u'tag:yaml.org,2002:str', data) - - def represent_bool(self, data): - if data: - value = u'true' - else: - value = u'false' - return self.represent_scalar(u'tag:yaml.org,2002:bool', value) - - def represent_int(self, data): - return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) - - def represent_long(self, data): - return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) - - inf_value = 1e300 - while repr(inf_value) != repr(inf_value*inf_value): - inf_value *= inf_value - - def represent_float(self, data): - if data != data or (data == 0.0 and data == 1.0): - value = u'.nan' - elif data == self.inf_value: - value = u'.inf' - elif data == -self.inf_value: - value = u'-.inf' - else: - value = unicode(repr(data)).lower() - # Note that in some cases `repr(data)` represents a float number - # without the decimal parts. For instance: - # >>> repr(1e17) - # '1e17' - # Unfortunately, this is not a valid float representation according - # to the definition of the `!!float` tag. We fix this by adding - # '.0' before the 'e' symbol. - if u'.' not in value and u'e' in value: - value = value.replace(u'e', u'.0e', 1) - return self.represent_scalar(u'tag:yaml.org,2002:float', value) - - def represent_list(self, data): - #pairs = (len(data) > 0 and isinstance(data, list)) - #if pairs: - # for item in data: - # if not isinstance(item, tuple) or len(item) != 2: - # pairs = False - # break - #if not pairs: - return self.represent_sequence(u'tag:yaml.org,2002:seq', data) - #value = [] - #for item_key, item_value in data: - # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', - # [(item_key, item_value)])) - #return SequenceNode(u'tag:yaml.org,2002:pairs', value) - - def represent_dict(self, data): - return self.represent_mapping(u'tag:yaml.org,2002:map', data) - - def represent_set(self, data): - value = {} - for key in data: - value[key] = None - return self.represent_mapping(u'tag:yaml.org,2002:set', value) - - def represent_date(self, data): - value = unicode(data.isoformat()) - return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) - - def represent_datetime(self, data): - value = unicode(data.isoformat(' ')) - return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) - - def represent_yaml_object(self, tag, data, cls, flow_style=None): - if hasattr(data, '__getstate__'): - state = data.__getstate__() - else: - state = data.__dict__.copy() - return self.represent_mapping(tag, state, flow_style=flow_style) - - def represent_undefined(self, data): - raise RepresenterError("cannot represent an object: %s" % data) - -SafeRepresenter.add_representer(type(None), - SafeRepresenter.represent_none) - -SafeRepresenter.add_representer(str, - SafeRepresenter.represent_str) - -SafeRepresenter.add_representer(unicode, - SafeRepresenter.represent_unicode) - -SafeRepresenter.add_representer(bool, - SafeRepresenter.represent_bool) - -SafeRepresenter.add_representer(int, - SafeRepresenter.represent_int) - -SafeRepresenter.add_representer(long, - SafeRepresenter.represent_long) - -SafeRepresenter.add_representer(float, - SafeRepresenter.represent_float) - -SafeRepresenter.add_representer(list, - SafeRepresenter.represent_list) - -SafeRepresenter.add_representer(tuple, - SafeRepresenter.represent_list) - -SafeRepresenter.add_representer(dict, - SafeRepresenter.represent_dict) - -SafeRepresenter.add_representer(set, - SafeRepresenter.represent_set) - -SafeRepresenter.add_representer(datetime.date, - SafeRepresenter.represent_date) - -SafeRepresenter.add_representer(datetime.datetime, - SafeRepresenter.represent_datetime) - -SafeRepresenter.add_representer(None, - SafeRepresenter.represent_undefined) - -class Representer(SafeRepresenter): - - def represent_str(self, data): - tag = None - style = None - try: - data = unicode(data, 'ascii') - tag = u'tag:yaml.org,2002:str' - except UnicodeDecodeError: - try: - data = unicode(data, 'utf-8') - tag = u'tag:yaml.org,2002:python/str' - except UnicodeDecodeError: - data = data.encode('base64') - tag = u'tag:yaml.org,2002:binary' - style = '|' - return self.represent_scalar(tag, data, style=style) - - def represent_unicode(self, data): - tag = None - try: - data.encode('ascii') - tag = u'tag:yaml.org,2002:python/unicode' - except UnicodeEncodeError: - tag = u'tag:yaml.org,2002:str' - return self.represent_scalar(tag, data) - - def represent_long(self, data): - tag = u'tag:yaml.org,2002:int' - if int(data) is not data: - tag = u'tag:yaml.org,2002:python/long' - return self.represent_scalar(tag, unicode(data)) - - def represent_complex(self, data): - if data.imag == 0.0: - data = u'%r' % data.real - elif data.real == 0.0: - data = u'%rj' % data.imag - elif data.imag > 0: - data = u'%r+%rj' % (data.real, data.imag) - else: - data = u'%r%rj' % (data.real, data.imag) - return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) - - def represent_tuple(self, data): - return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) - - def represent_name(self, data): - name = u'%s.%s' % (data.__module__, data.__name__) - return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') - - def represent_module(self, data): - return self.represent_scalar( - u'tag:yaml.org,2002:python/module:'+data.__name__, u'') - - def represent_instance(self, data): - # For instances of classic classes, we use __getinitargs__ and - # __getstate__ to serialize the data. - - # If data.__getinitargs__ exists, the object must be reconstructed by - # calling cls(**args), where args is a tuple returned by - # __getinitargs__. Otherwise, the cls.__init__ method should never be - # called and the class instance is created by instantiating a trivial - # class and assigning to the instance's __class__ variable. - - # If data.__getstate__ exists, it returns the state of the object. - # Otherwise, the state of the object is data.__dict__. - - # We produce either a !!python/object or !!python/object/new node. - # If data.__getinitargs__ does not exist and state is a dictionary, we - # produce a !!python/object node . Otherwise we produce a - # !!python/object/new node. - - cls = data.__class__ - class_name = u'%s.%s' % (cls.__module__, cls.__name__) - args = None - state = None - if hasattr(data, '__getinitargs__'): - args = list(data.__getinitargs__()) - if hasattr(data, '__getstate__'): - state = data.__getstate__() - else: - state = data.__dict__ - if args is None and isinstance(state, dict): - return self.represent_mapping( - u'tag:yaml.org,2002:python/object:'+class_name, state) - if isinstance(state, dict) and not state: - return self.represent_sequence( - u'tag:yaml.org,2002:python/object/new:'+class_name, args) - value = {} - if args: - value['args'] = args - value['state'] = state - return self.represent_mapping( - u'tag:yaml.org,2002:python/object/new:'+class_name, value) - - def represent_object(self, data): - # We use __reduce__ API to save the data. data.__reduce__ returns - # a tuple of length 2-5: - # (function, args, state, listitems, dictitems) - - # For reconstructing, we calls function(*args), then set its state, - # listitems, and dictitems if they are not None. - - # A special case is when function.__name__ == '__newobj__'. In this - # case we create the object with args[0].__new__(*args). - - # Another special case is when __reduce__ returns a string - we don't - # support it. - - # We produce a !!python/object, !!python/object/new or - # !!python/object/apply node. - - cls = type(data) - if cls in copy_reg.dispatch_table: - reduce = copy_reg.dispatch_table[cls](data) - elif hasattr(data, '__reduce_ex__'): - reduce = data.__reduce_ex__(2) - elif hasattr(data, '__reduce__'): - reduce = data.__reduce__() - else: - raise RepresenterError("cannot represent object: %r" % data) - reduce = (list(reduce)+[None]*5)[:5] - function, args, state, listitems, dictitems = reduce - args = list(args) - if state is None: - state = {} - if listitems is not None: - listitems = list(listitems) - if dictitems is not None: - dictitems = dict(dictitems) - if function.__name__ == '__newobj__': - function = args[0] - args = args[1:] - tag = u'tag:yaml.org,2002:python/object/new:' - newobj = True - else: - tag = u'tag:yaml.org,2002:python/object/apply:' - newobj = False - function_name = u'%s.%s' % (function.__module__, function.__name__) - if not args and not listitems and not dictitems \ - and isinstance(state, dict) and newobj: - return self.represent_mapping( - u'tag:yaml.org,2002:python/object:'+function_name, state) - if not listitems and not dictitems \ - and isinstance(state, dict) and not state: - return self.represent_sequence(tag+function_name, args) - value = {} - if args: - value['args'] = args - if state or not isinstance(state, dict): - value['state'] = state - if listitems: - value['listitems'] = listitems - if dictitems: - value['dictitems'] = dictitems - return self.represent_mapping(tag+function_name, value) - -Representer.add_representer(str, - Representer.represent_str) - -Representer.add_representer(unicode, - Representer.represent_unicode) - -Representer.add_representer(long, - Representer.represent_long) - -Representer.add_representer(complex, - Representer.represent_complex) - -Representer.add_representer(tuple, - Representer.represent_tuple) - -Representer.add_representer(type, - Representer.represent_name) - -Representer.add_representer(types.ClassType, - Representer.represent_name) - -Representer.add_representer(types.FunctionType, - Representer.represent_name) - -Representer.add_representer(types.BuiltinFunctionType, - Representer.represent_name) - -Representer.add_representer(types.ModuleType, - Representer.represent_module) - -Representer.add_multi_representer(types.InstanceType, - Representer.represent_instance) - -Representer.add_multi_representer(object, - Representer.represent_object) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py deleted file mode 100644 index 49922debf..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py +++ /dev/null @@ -1,225 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['BaseResolver', 'Resolver'] - -from error import * -from nodes import * - -import re - -class ResolverError(YAMLError): - pass - -class BaseResolver(object): - - DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' - DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' - DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' - - yaml_implicit_resolvers = {} - yaml_path_resolvers = {} - - def __init__(self): - self.resolver_exact_paths = [] - self.resolver_prefix_paths = [] - - def add_implicit_resolver(cls, tag, regexp, first): - if not 'yaml_implicit_resolvers' in cls.__dict__: - cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy() - if first is None: - first = [None] - for ch in first: - cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) - add_implicit_resolver = classmethod(add_implicit_resolver) - - def add_path_resolver(cls, tag, path, kind=None): - # Note: `add_path_resolver` is experimental. The API could be changed. - # `new_path` is a pattern that is matched against the path from the - # root to the node that is being considered. `node_path` elements are - # tuples `(node_check, index_check)`. `node_check` is a node class: - # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` - # matches any kind of a node. `index_check` could be `None`, a boolean - # value, a string value, or a number. `None` and `False` match against - # any _value_ of sequence and mapping nodes. `True` matches against - # any _key_ of a mapping node. A string `index_check` matches against - # a mapping value that corresponds to a scalar key which content is - # equal to the `index_check` value. An integer `index_check` matches - # against a sequence value with the index equal to `index_check`. - if not 'yaml_path_resolvers' in cls.__dict__: - cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() - new_path = [] - for element in path: - if isinstance(element, (list, tuple)): - if len(element) == 2: - node_check, index_check = element - elif len(element) == 1: - node_check = element[0] - index_check = True - else: - raise ResolverError("Invalid path element: %s" % element) - else: - node_check = None - index_check = element - if node_check is str: - node_check = ScalarNode - elif node_check is list: - node_check = SequenceNode - elif node_check is dict: - node_check = MappingNode - elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ - and not isinstance(node_check, basestring) \ - and node_check is not None: - raise ResolverError("Invalid node checker: %s" % node_check) - if not isinstance(index_check, (basestring, int)) \ - and index_check is not None: - raise ResolverError("Invalid index checker: %s" % index_check) - new_path.append((node_check, index_check)) - if kind is str: - kind = ScalarNode - elif kind is list: - kind = SequenceNode - elif kind is dict: - kind = MappingNode - elif kind not in [ScalarNode, SequenceNode, MappingNode] \ - and kind is not None: - raise ResolverError("Invalid node kind: %s" % kind) - cls.yaml_path_resolvers[tuple(new_path), kind] = tag - add_path_resolver = classmethod(add_path_resolver) - - def descend_resolver(self, current_node, current_index): - if not self.yaml_path_resolvers: - return - exact_paths = {} - prefix_paths = [] - if current_node: - depth = len(self.resolver_prefix_paths) - for path, kind in self.resolver_prefix_paths[-1]: - if self.check_resolver_prefix(depth, path, kind, - current_node, current_index): - if len(path) > depth: - prefix_paths.append((path, kind)) - else: - exact_paths[kind] = self.yaml_path_resolvers[path, kind] - else: - for path, kind in self.yaml_path_resolvers: - if not path: - exact_paths[kind] = self.yaml_path_resolvers[path, kind] - else: - prefix_paths.append((path, kind)) - self.resolver_exact_paths.append(exact_paths) - self.resolver_prefix_paths.append(prefix_paths) - - def ascend_resolver(self): - if not self.yaml_path_resolvers: - return - self.resolver_exact_paths.pop() - self.resolver_prefix_paths.pop() - - def check_resolver_prefix(self, depth, path, kind, - current_node, current_index): - node_check, index_check = path[depth-1] - if isinstance(node_check, basestring): - if current_node.tag != node_check: - return - elif node_check is not None: - if not isinstance(current_node, node_check): - return - if index_check is True and current_index is not None: - return - if (index_check is False or index_check is None) \ - and current_index is None: - return - if isinstance(index_check, basestring): - if not (isinstance(current_index, ScalarNode) - and index_check == current_index.value): - return - elif isinstance(index_check, int) and not isinstance(index_check, bool): - if index_check != current_index: - return - return True - - def resolve(self, kind, value, implicit): - if kind is ScalarNode and implicit[0]: - if value == u'': - resolvers = self.yaml_implicit_resolvers.get(u'', []) - else: - resolvers = self.yaml_implicit_resolvers.get(value[0], []) - resolvers += self.yaml_implicit_resolvers.get(None, []) - for tag, regexp in resolvers: - if regexp.match(value): - return tag - implicit = implicit[1] - if self.yaml_path_resolvers: - exact_paths = self.resolver_exact_paths[-1] - if kind in exact_paths: - return exact_paths[kind] - if None in exact_paths: - return exact_paths[None] - if kind is ScalarNode: - return self.DEFAULT_SCALAR_TAG - elif kind is SequenceNode: - return self.DEFAULT_SEQUENCE_TAG - elif kind is MappingNode: - return self.DEFAULT_MAPPING_TAG - -class Resolver(BaseResolver): - pass - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:bool', - re.compile(ur'''^(?:yes|Yes|YES|no|No|NO - |true|True|TRUE|false|False|FALSE - |on|On|ON|off|Off|OFF)$''', re.X), - list(u'yYnNtTfFoO')) - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:float', - re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? - |\.[0-9_]+(?:[eE][-+][0-9]+)? - |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* - |[-+]?\.(?:inf|Inf|INF) - |\.(?:nan|NaN|NAN))$''', re.X), - list(u'-+0123456789.')) - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:int', - re.compile(ur'''^(?:[-+]?0b[0-1_]+ - |[-+]?0[0-7_]+ - |[-+]?(?:0|[1-9][0-9_]*) - |[-+]?0x[0-9a-fA-F_]+ - |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), - list(u'-+0123456789')) - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:merge', - re.compile(ur'^(?:<<)$'), - [u'<']) - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:null', - re.compile(ur'''^(?: ~ - |null|Null|NULL - | )$''', re.X), - [u'~', u'n', u'N', u'']) - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:timestamp', - re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] - |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? - (?:[Tt]|[ \t]+)[0-9][0-9]? - :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? - (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), - list(u'0123456789')) - -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:value', - re.compile(ur'^(?:=)$'), - [u'=']) - -# The following resolver is only for documentation purposes. It cannot work -# because plain scalars cannot start with '!', '&', or '*'. -Resolver.add_implicit_resolver( - u'tag:yaml.org,2002:yaml', - re.compile(ur'^(?:!|&|\*)$'), - list(u'!&*')) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py deleted file mode 100644 index 971da6127..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py +++ /dev/null @@ -1,1458 +0,0 @@ -# SPDX-License-Identifier: MIT - -# Scanner produces tokens of the following types: -# STREAM-START -# STREAM-END -# DIRECTIVE(name, value) -# DOCUMENT-START -# DOCUMENT-END -# BLOCK-SEQUENCE-START -# BLOCK-MAPPING-START -# BLOCK-END -# FLOW-SEQUENCE-START -# FLOW-MAPPING-START -# FLOW-SEQUENCE-END -# FLOW-MAPPING-END -# BLOCK-ENTRY -# FLOW-ENTRY -# KEY -# VALUE -# ALIAS(value) -# ANCHOR(value) -# TAG(value) -# SCALAR(value, plain, style) -# -# Read comments in the Scanner code for more details. -# - -__all__ = ['Scanner', 'ScannerError'] - -from error import MarkedYAMLError -from tokens import * - -class ScannerError(MarkedYAMLError): - pass - -class SimpleKey(object): - # See below simple keys treatment. - - def __init__(self, token_number, required, index, line, column, mark): - self.token_number = token_number - self.required = required - self.index = index - self.line = line - self.column = column - self.mark = mark - -class Scanner(object): - - def __init__(self): - """Initialize the scanner.""" - # It is assumed that Scanner and Reader will have a common descendant. - # Reader do the dirty work of checking for BOM and converting the - # input data to Unicode. It also adds NUL to the end. - # - # Reader supports the following methods - # self.peek(i=0) # peek the next i-th character - # self.prefix(l=1) # peek the next l characters - # self.forward(l=1) # read the next l characters and move the pointer. - - # Had we reached the end of the stream? - self.done = False - - # The number of unclosed '{' and '['. `flow_level == 0` means block - # context. - self.flow_level = 0 - - # List of processed tokens that are not yet emitted. - self.tokens = [] - - # Add the STREAM-START token. - self.fetch_stream_start() - - # Number of tokens that were emitted through the `get_token` method. - self.tokens_taken = 0 - - # The current indentation level. - self.indent = -1 - - # Past indentation levels. - self.indents = [] - - # Variables related to simple keys treatment. - - # A simple key is a key that is not denoted by the '?' indicator. - # Example of simple keys: - # --- - # block simple key: value - # ? not a simple key: - # : { flow simple key: value } - # We emit the KEY token before all keys, so when we find a potential - # simple key, we try to locate the corresponding ':' indicator. - # Simple keys should be limited to a single line and 1024 characters. - - # Can a simple key start at the current position? A simple key may - # start: - # - at the beginning of the line, not counting indentation spaces - # (in block context), - # - after '{', '[', ',' (in the flow context), - # - after '?', ':', '-' (in the block context). - # In the block context, this flag also signifies if a block collection - # may start at the current position. - self.allow_simple_key = True - - # Keep track of possible simple keys. This is a dictionary. The key - # is `flow_level`; there can be no more that one possible simple key - # for each level. The value is a SimpleKey record: - # (token_number, required, index, line, column, mark) - # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), - # '[', or '{' tokens. - self.possible_simple_keys = {} - - # Public methods. - - def check_token(self, *choices): - # Check if the next token is one of the given types. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - if not choices: - return True - for choice in choices: - if isinstance(self.tokens[0], choice): - return True - return False - - def peek_token(self): - # Return the next token, but do not delete if from the queue. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - return self.tokens[0] - - def get_token(self): - # Return the next token. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - self.tokens_taken += 1 - return self.tokens.pop(0) - - # Private methods. - - def need_more_tokens(self): - if self.done: - return False - if not self.tokens: - return True - # The current token may be a potential simple key, so we - # need to look further. - self.stale_possible_simple_keys() - if self.next_possible_simple_key() == self.tokens_taken: - return True - - def fetch_more_tokens(self): - - # Eat whitespaces and comments until we reach the next token. - self.scan_to_next_token() - - # Remove obsolete possible simple keys. - self.stale_possible_simple_keys() - - # Compare the current indentation and column. It may add some tokens - # and decrease the current indentation level. - self.unwind_indent(self.column) - - # Peek the next character. - ch = self.peek() - - # Is it the end of stream? - if ch == u'\0': - return self.fetch_stream_end() - - # Is it a directive? - if ch == u'%' and self.check_directive(): - return self.fetch_directive() - - # Is it the document start? - if ch == u'-' and self.check_document_start(): - return self.fetch_document_start() - - # Is it the document end? - if ch == u'.' and self.check_document_end(): - return self.fetch_document_end() - - # TODO: support for BOM within a stream. - #if ch == u'\uFEFF': - # return self.fetch_bom() <-- issue BOMToken - - # Note: the order of the following checks is NOT significant. - - # Is it the flow sequence start indicator? - if ch == u'[': - return self.fetch_flow_sequence_start() - - # Is it the flow mapping start indicator? - if ch == u'{': - return self.fetch_flow_mapping_start() - - # Is it the flow sequence end indicator? - if ch == u']': - return self.fetch_flow_sequence_end() - - # Is it the flow mapping end indicator? - if ch == u'}': - return self.fetch_flow_mapping_end() - - # Is it the flow entry indicator? - if ch == u',': - return self.fetch_flow_entry() - - # Is it the block entry indicator? - if ch == u'-' and self.check_block_entry(): - return self.fetch_block_entry() - - # Is it the key indicator? - if ch == u'?' and self.check_key(): - return self.fetch_key() - - # Is it the value indicator? - if ch == u':' and self.check_value(): - return self.fetch_value() - - # Is it an alias? - if ch == u'*': - return self.fetch_alias() - - # Is it an anchor? - if ch == u'&': - return self.fetch_anchor() - - # Is it a tag? - if ch == u'!': - return self.fetch_tag() - - # Is it a literal scalar? - if ch == u'|' and not self.flow_level: - return self.fetch_literal() - - # Is it a folded scalar? - if ch == u'>' and not self.flow_level: - return self.fetch_folded() - - # Is it a single quoted scalar? - if ch == u'\'': - return self.fetch_single() - - # Is it a double quoted scalar? - if ch == u'\"': - return self.fetch_double() - - # It must be a plain scalar then. - if self.check_plain(): - return self.fetch_plain() - - # No? It's an error. Let's produce a nice error message. - raise ScannerError("while scanning for the next token", None, - "found character %r that cannot start any token" - % ch.encode('utf-8'), self.get_mark()) - - # Simple keys treatment. - - def next_possible_simple_key(self): - # Return the number of the nearest possible simple key. Actually we - # don't need to loop through the whole dictionary. We may replace it - # with the following code: - # if not self.possible_simple_keys: - # return None - # return self.possible_simple_keys[ - # min(self.possible_simple_keys.keys())].token_number - min_token_number = None - for level in self.possible_simple_keys: - key = self.possible_simple_keys[level] - if min_token_number is None or key.token_number < min_token_number: - min_token_number = key.token_number - return min_token_number - - def stale_possible_simple_keys(self): - # Remove entries that are no longer possible simple keys. According to - # the YAML specification, simple keys - # - should be limited to a single line, - # - should be no longer than 1024 characters. - # Disabling this procedure will allow simple keys of any length and - # height (may cause problems if indentation is broken though). - for level in self.possible_simple_keys.keys(): - key = self.possible_simple_keys[level] - if key.line != self.line \ - or self.index-key.index > 1024: - if key.required: - raise ScannerError("while scanning a simple key", key.mark, - "could not found expected ':'", self.get_mark()) - del self.possible_simple_keys[level] - - def save_possible_simple_key(self): - # The next token may start a simple key. We check if it's possible - # and save its position. This function is called for - # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. - - # Check if a simple key is required at the current position. - required = not self.flow_level and self.indent == self.column - - # A simple key is required only if it is the first token in the current - # line. Therefore it is always allowed. - assert self.allow_simple_key or not required - - # The next token might be a simple key. Let's save it's number and - # position. - if self.allow_simple_key: - self.remove_possible_simple_key() - token_number = self.tokens_taken+len(self.tokens) - key = SimpleKey(token_number, required, - self.index, self.line, self.column, self.get_mark()) - self.possible_simple_keys[self.flow_level] = key - - def remove_possible_simple_key(self): - # Remove the saved possible key position at the current flow level. - if self.flow_level in self.possible_simple_keys: - key = self.possible_simple_keys[self.flow_level] - - if key.required: - raise ScannerError("while scanning a simple key", key.mark, - "could not found expected ':'", self.get_mark()) - - del self.possible_simple_keys[self.flow_level] - - # Indentation functions. - - def unwind_indent(self, column): - - ## In flow context, tokens should respect indentation. - ## Actually the condition should be `self.indent >= column` according to - ## the spec. But this condition will prohibit intuitively correct - ## constructions such as - ## key : { - ## } - #if self.flow_level and self.indent > column: - # raise ScannerError(None, None, - # "invalid intendation or unclosed '[' or '{'", - # self.get_mark()) - - # In the flow context, indentation is ignored. We make the scanner less - # restrictive then specification requires. - if self.flow_level: - return - - # In block context, we may need to issue the BLOCK-END tokens. - while self.indent > column: - mark = self.get_mark() - self.indent = self.indents.pop() - self.tokens.append(BlockEndToken(mark, mark)) - - def add_indent(self, column): - # Check if we need to increase indentation. - if self.indent < column: - self.indents.append(self.indent) - self.indent = column - return True - return False - - # Fetchers. - - def fetch_stream_start(self): - # We always add STREAM-START as the first token and STREAM-END as the - # last token. - - # Read the token. - mark = self.get_mark() - - # Add STREAM-START. - self.tokens.append(StreamStartToken(mark, mark, - encoding=self.encoding)) - - - def fetch_stream_end(self): - - # Set the current intendation to -1. - self.unwind_indent(-1) - - # Reset simple keys. - self.remove_possible_simple_key() - self.allow_simple_key = False - self.possible_simple_keys = {} - - # Read the token. - mark = self.get_mark() - - # Add STREAM-END. - self.tokens.append(StreamEndToken(mark, mark)) - - # The steam is finished. - self.done = True - - def fetch_directive(self): - - # Set the current intendation to -1. - self.unwind_indent(-1) - - # Reset simple keys. - self.remove_possible_simple_key() - self.allow_simple_key = False - - # Scan and add DIRECTIVE. - self.tokens.append(self.scan_directive()) - - def fetch_document_start(self): - self.fetch_document_indicator(DocumentStartToken) - - def fetch_document_end(self): - self.fetch_document_indicator(DocumentEndToken) - - def fetch_document_indicator(self, TokenClass): - - # Set the current intendation to -1. - self.unwind_indent(-1) - - # Reset simple keys. Note that there could not be a block collection - # after '---'. - self.remove_possible_simple_key() - self.allow_simple_key = False - - # Add DOCUMENT-START or DOCUMENT-END. - start_mark = self.get_mark() - self.forward(3) - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_sequence_start(self): - self.fetch_flow_collection_start(FlowSequenceStartToken) - - def fetch_flow_mapping_start(self): - self.fetch_flow_collection_start(FlowMappingStartToken) - - def fetch_flow_collection_start(self, TokenClass): - - # '[' and '{' may start a simple key. - self.save_possible_simple_key() - - # Increase the flow level. - self.flow_level += 1 - - # Simple keys are allowed after '[' and '{'. - self.allow_simple_key = True - - # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_sequence_end(self): - self.fetch_flow_collection_end(FlowSequenceEndToken) - - def fetch_flow_mapping_end(self): - self.fetch_flow_collection_end(FlowMappingEndToken) - - def fetch_flow_collection_end(self, TokenClass): - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Decrease the flow level. - self.flow_level -= 1 - - # No simple keys after ']' or '}'. - self.allow_simple_key = False - - # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_entry(self): - - # Simple keys are allowed after ','. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add FLOW-ENTRY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(FlowEntryToken(start_mark, end_mark)) - - def fetch_block_entry(self): - - # Block context needs additional checks. - if not self.flow_level: - - # Are we allowed to start a new entry? - if not self.allow_simple_key: - raise ScannerError(None, None, - "sequence entries are not allowed here", - self.get_mark()) - - # We may need to add BLOCK-SEQUENCE-START. - if self.add_indent(self.column): - mark = self.get_mark() - self.tokens.append(BlockSequenceStartToken(mark, mark)) - - # It's an error for the block entry to occur in the flow context, - # but we let the parser detect this. - else: - pass - - # Simple keys are allowed after '-'. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add BLOCK-ENTRY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(BlockEntryToken(start_mark, end_mark)) - - def fetch_key(self): - - # Block context needs additional checks. - if not self.flow_level: - - # Are we allowed to start a key (not nessesary a simple)? - if not self.allow_simple_key: - raise ScannerError(None, None, - "mapping keys are not allowed here", - self.get_mark()) - - # We may need to add BLOCK-MAPPING-START. - if self.add_indent(self.column): - mark = self.get_mark() - self.tokens.append(BlockMappingStartToken(mark, mark)) - - # Simple keys are allowed after '?' in the block context. - self.allow_simple_key = not self.flow_level - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add KEY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(KeyToken(start_mark, end_mark)) - - def fetch_value(self): - - # Do we determine a simple key? - if self.flow_level in self.possible_simple_keys: - - # Add KEY. - key = self.possible_simple_keys[self.flow_level] - del self.possible_simple_keys[self.flow_level] - self.tokens.insert(key.token_number-self.tokens_taken, - KeyToken(key.mark, key.mark)) - - # If this key starts a new block mapping, we need to add - # BLOCK-MAPPING-START. - if not self.flow_level: - if self.add_indent(key.column): - self.tokens.insert(key.token_number-self.tokens_taken, - BlockMappingStartToken(key.mark, key.mark)) - - # There cannot be two simple keys one after another. - self.allow_simple_key = False - - # It must be a part of a complex key. - else: - - # Block context needs additional checks. - # (Do we really need them? They will be catched by the parser - # anyway.) - if not self.flow_level: - - # We are allowed to start a complex value if and only if - # we can start a simple key. - if not self.allow_simple_key: - raise ScannerError(None, None, - "mapping values are not allowed here", - self.get_mark()) - - # If this value starts a new block mapping, we need to add - # BLOCK-MAPPING-START. It will be detected as an error later by - # the parser. - if not self.flow_level: - if self.add_indent(self.column): - mark = self.get_mark() - self.tokens.append(BlockMappingStartToken(mark, mark)) - - # Simple keys are allowed after ':' in the block context. - self.allow_simple_key = not self.flow_level - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add VALUE. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(ValueToken(start_mark, end_mark)) - - def fetch_alias(self): - - # ALIAS could be a simple key. - self.save_possible_simple_key() - - # No simple keys after ALIAS. - self.allow_simple_key = False - - # Scan and add ALIAS. - self.tokens.append(self.scan_anchor(AliasToken)) - - def fetch_anchor(self): - - # ANCHOR could start a simple key. - self.save_possible_simple_key() - - # No simple keys after ANCHOR. - self.allow_simple_key = False - - # Scan and add ANCHOR. - self.tokens.append(self.scan_anchor(AnchorToken)) - - def fetch_tag(self): - - # TAG could start a simple key. - self.save_possible_simple_key() - - # No simple keys after TAG. - self.allow_simple_key = False - - # Scan and add TAG. - self.tokens.append(self.scan_tag()) - - def fetch_literal(self): - self.fetch_block_scalar(style='|') - - def fetch_folded(self): - self.fetch_block_scalar(style='>') - - def fetch_block_scalar(self, style): - - # A simple key may follow a block scalar. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Scan and add SCALAR. - self.tokens.append(self.scan_block_scalar(style)) - - def fetch_single(self): - self.fetch_flow_scalar(style='\'') - - def fetch_double(self): - self.fetch_flow_scalar(style='"') - - def fetch_flow_scalar(self, style): - - # A flow scalar could be a simple key. - self.save_possible_simple_key() - - # No simple keys after flow scalars. - self.allow_simple_key = False - - # Scan and add SCALAR. - self.tokens.append(self.scan_flow_scalar(style)) - - def fetch_plain(self): - - # A plain scalar could be a simple key. - self.save_possible_simple_key() - - # No simple keys after plain scalars. But note that `scan_plain` will - # change this flag if the scan is finished at the beginning of the - # line. - self.allow_simple_key = False - - # Scan and add SCALAR. May change `allow_simple_key`. - self.tokens.append(self.scan_plain()) - - # Checkers. - - def check_directive(self): - - # DIRECTIVE: ^ '%' ... - # The '%' indicator is already checked. - if self.column == 0: - return True - - def check_document_start(self): - - # DOCUMENT-START: ^ '---' (' '|'\n') - if self.column == 0: - if self.prefix(3) == u'---' \ - and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': - return True - - def check_document_end(self): - - # DOCUMENT-END: ^ '...' (' '|'\n') - if self.column == 0: - if self.prefix(3) == u'...' \ - and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': - return True - - def check_block_entry(self): - - # BLOCK-ENTRY: '-' (' '|'\n') - return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' - - def check_key(self): - - # KEY(flow context): '?' - if self.flow_level: - return True - - # KEY(block context): '?' (' '|'\n') - else: - return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' - - def check_value(self): - - # VALUE(flow context): ':' - if self.flow_level: - return True - - # VALUE(block context): ':' (' '|'\n') - else: - return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' - - def check_plain(self): - - # A plain scalar may start with any non-space character except: - # '-', '?', ':', ',', '[', ']', '{', '}', - # '#', '&', '*', '!', '|', '>', '\'', '\"', - # '%', '@', '`'. - # - # It may also start with - # '-', '?', ':' - # if it is followed by a non-space character. - # - # Note that we limit the last rule to the block context (except the - # '-' character) because we want the flow context to be space - # independent. - ch = self.peek() - return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ - or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' - and (ch == u'-' or (not self.flow_level and ch in u'?:'))) - - # Scanners. - - def scan_to_next_token(self): - # We ignore spaces, line breaks and comments. - # If we find a line break in the block context, we set the flag - # `allow_simple_key` on. - # The byte order mark is stripped if it's the first character in the - # stream. We do not yet support BOM inside the stream as the - # specification requires. Any such mark will be considered as a part - # of the document. - # - # TODO: We need to make tab handling rules more sane. A good rule is - # Tabs cannot precede tokens - # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, - # KEY(block), VALUE(block), BLOCK-ENTRY - # So the checking code is - # if : - # self.allow_simple_keys = False - # We also need to add the check for `allow_simple_keys == True` to - # `unwind_indent` before issuing BLOCK-END. - # Scanners for block, flow, and plain scalars need to be modified. - - if self.index == 0 and self.peek() == u'\uFEFF': - self.forward() - found = False - while not found: - while self.peek() == u' ': - self.forward() - if self.peek() == u'#': - while self.peek() not in u'\0\r\n\x85\u2028\u2029': - self.forward() - if self.scan_line_break(): - if not self.flow_level: - self.allow_simple_key = True - else: - found = True - - def scan_directive(self): - # See the specification for details. - start_mark = self.get_mark() - self.forward() - name = self.scan_directive_name(start_mark) - value = None - if name == u'YAML': - value = self.scan_yaml_directive_value(start_mark) - end_mark = self.get_mark() - elif name == u'TAG': - value = self.scan_tag_directive_value(start_mark) - end_mark = self.get_mark() - else: - end_mark = self.get_mark() - while self.peek() not in u'\0\r\n\x85\u2028\u2029': - self.forward() - self.scan_directive_ignored_line(start_mark) - return DirectiveToken(name, value, start_mark, end_mark) - - def scan_directive_name(self, start_mark): - # See the specification for details. - length = 0 - ch = self.peek(length) - while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-_': - length += 1 - ch = self.peek(length) - if not length: - raise ScannerError("while scanning a directive", start_mark, - "expected alphabetic or numeric character, but found %r" - % ch.encode('utf-8'), self.get_mark()) - value = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch not in u'\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected alphabetic or numeric character, but found %r" - % ch.encode('utf-8'), self.get_mark()) - return value - - def scan_yaml_directive_value(self, start_mark): - # See the specification for details. - while self.peek() == u' ': - self.forward() - major = self.scan_yaml_directive_number(start_mark) - if self.peek() != '.': - raise ScannerError("while scanning a directive", start_mark, - "expected a digit or '.', but found %r" - % self.peek().encode('utf-8'), - self.get_mark()) - self.forward() - minor = self.scan_yaml_directive_number(start_mark) - if self.peek() not in u'\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected a digit or ' ', but found %r" - % self.peek().encode('utf-8'), - self.get_mark()) - return (major, minor) - - def scan_yaml_directive_number(self, start_mark): - # See the specification for details. - ch = self.peek() - if not (u'0' <= ch <= u'9'): - raise ScannerError("while scanning a directive", start_mark, - "expected a digit, but found %r" % ch.encode('utf-8'), - self.get_mark()) - length = 0 - while u'0' <= self.peek(length) <= u'9': - length += 1 - value = int(self.prefix(length)) - self.forward(length) - return value - - def scan_tag_directive_value(self, start_mark): - # See the specification for details. - while self.peek() == u' ': - self.forward() - handle = self.scan_tag_directive_handle(start_mark) - while self.peek() == u' ': - self.forward() - prefix = self.scan_tag_directive_prefix(start_mark) - return (handle, prefix) - - def scan_tag_directive_handle(self, start_mark): - # See the specification for details. - value = self.scan_tag_handle('directive', start_mark) - ch = self.peek() - if ch != u' ': - raise ScannerError("while scanning a directive", start_mark, - "expected ' ', but found %r" % ch.encode('utf-8'), - self.get_mark()) - return value - - def scan_tag_directive_prefix(self, start_mark): - # See the specification for details. - value = self.scan_tag_uri('directive', start_mark) - ch = self.peek() - if ch not in u'\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected ' ', but found %r" % ch.encode('utf-8'), - self.get_mark()) - return value - - def scan_directive_ignored_line(self, start_mark): - # See the specification for details. - while self.peek() == u' ': - self.forward() - if self.peek() == u'#': - while self.peek() not in u'\0\r\n\x85\u2028\u2029': - self.forward() - ch = self.peek() - if ch not in u'\0\r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected a comment or a line break, but found %r" - % ch.encode('utf-8'), self.get_mark()) - self.scan_line_break() - - def scan_anchor(self, TokenClass): - # The specification does not restrict characters for anchors and - # aliases. This may lead to problems, for instance, the document: - # [ *alias, value ] - # can be interpteted in two ways, as - # [ "value" ] - # and - # [ *alias , "value" ] - # Therefore we restrict aliases to numbers and ASCII letters. - start_mark = self.get_mark() - indicator = self.peek() - if indicator == u'*': - name = 'alias' - else: - name = 'anchor' - self.forward() - length = 0 - ch = self.peek(length) - while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-_': - length += 1 - ch = self.peek(length) - if not length: - raise ScannerError("while scanning an %s" % name, start_mark, - "expected alphabetic or numeric character, but found %r" - % ch.encode('utf-8'), self.get_mark()) - value = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': - raise ScannerError("while scanning an %s" % name, start_mark, - "expected alphabetic or numeric character, but found %r" - % ch.encode('utf-8'), self.get_mark()) - end_mark = self.get_mark() - return TokenClass(value, start_mark, end_mark) - - def scan_tag(self): - # See the specification for details. - start_mark = self.get_mark() - ch = self.peek(1) - if ch == u'<': - handle = None - self.forward(2) - suffix = self.scan_tag_uri('tag', start_mark) - if self.peek() != u'>': - raise ScannerError("while parsing a tag", start_mark, - "expected '>', but found %r" % self.peek().encode('utf-8'), - self.get_mark()) - self.forward() - elif ch in u'\0 \t\r\n\x85\u2028\u2029': - handle = None - suffix = u'!' - self.forward() - else: - length = 1 - use_handle = False - while ch not in u'\0 \r\n\x85\u2028\u2029': - if ch == u'!': - use_handle = True - break - length += 1 - ch = self.peek(length) - handle = u'!' - if use_handle: - handle = self.scan_tag_handle('tag', start_mark) - else: - handle = u'!' - self.forward() - suffix = self.scan_tag_uri('tag', start_mark) - ch = self.peek() - if ch not in u'\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a tag", start_mark, - "expected ' ', but found %r" % ch.encode('utf-8'), - self.get_mark()) - value = (handle, suffix) - end_mark = self.get_mark() - return TagToken(value, start_mark, end_mark) - - def scan_block_scalar(self, style): - # See the specification for details. - - if style == '>': - folded = True - else: - folded = False - - chunks = [] - start_mark = self.get_mark() - - # Scan the header. - self.forward() - chomping, increment = self.scan_block_scalar_indicators(start_mark) - self.scan_block_scalar_ignored_line(start_mark) - - # Determine the indentation level and go to the first non-empty line. - min_indent = self.indent+1 - if min_indent < 1: - min_indent = 1 - if increment is None: - breaks, max_indent, end_mark = self.scan_block_scalar_indentation() - indent = max(min_indent, max_indent) - else: - indent = min_indent+increment-1 - breaks, end_mark = self.scan_block_scalar_breaks(indent) - line_break = u'' - - # Scan the inner part of the block scalar. - while self.column == indent and self.peek() != u'\0': - chunks.extend(breaks) - leading_non_space = self.peek() not in u' \t' - length = 0 - while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': - length += 1 - chunks.append(self.prefix(length)) - self.forward(length) - line_break = self.scan_line_break() - breaks, end_mark = self.scan_block_scalar_breaks(indent) - if self.column == indent and self.peek() != u'\0': - - # Unfortunately, folding rules are ambiguous. - # - # This is the folding according to the specification: - - if folded and line_break == u'\n' \ - and leading_non_space and self.peek() not in u' \t': - if not breaks: - chunks.append(u' ') - else: - chunks.append(line_break) - - # This is Clark Evans's interpretation (also in the spec - # examples): - # - #if folded and line_break == u'\n': - # if not breaks: - # if self.peek() not in ' \t': - # chunks.append(u' ') - # else: - # chunks.append(line_break) - #else: - # chunks.append(line_break) - else: - break - - # Chomp the tail. - if chomping is not False: - chunks.append(line_break) - if chomping is True: - chunks.extend(breaks) - - # We are done. - return ScalarToken(u''.join(chunks), False, start_mark, end_mark, - style) - - def scan_block_scalar_indicators(self, start_mark): - # See the specification for details. - chomping = None - increment = None - ch = self.peek() - if ch in u'+-': - if ch == '+': - chomping = True - else: - chomping = False - self.forward() - ch = self.peek() - if ch in u'0123456789': - increment = int(ch) - if increment == 0: - raise ScannerError("while scanning a block scalar", start_mark, - "expected indentation indicator in the range 1-9, but found 0", - self.get_mark()) - self.forward() - elif ch in u'0123456789': - increment = int(ch) - if increment == 0: - raise ScannerError("while scanning a block scalar", start_mark, - "expected indentation indicator in the range 1-9, but found 0", - self.get_mark()) - self.forward() - ch = self.peek() - if ch in u'+-': - if ch == '+': - chomping = True - else: - chomping = False - self.forward() - ch = self.peek() - if ch not in u'\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a block scalar", start_mark, - "expected chomping or indentation indicators, but found %r" - % ch.encode('utf-8'), self.get_mark()) - return chomping, increment - - def scan_block_scalar_ignored_line(self, start_mark): - # See the specification for details. - while self.peek() == u' ': - self.forward() - if self.peek() == u'#': - while self.peek() not in u'\0\r\n\x85\u2028\u2029': - self.forward() - ch = self.peek() - if ch not in u'\0\r\n\x85\u2028\u2029': - raise ScannerError("while scanning a block scalar", start_mark, - "expected a comment or a line break, but found %r" - % ch.encode('utf-8'), self.get_mark()) - self.scan_line_break() - - def scan_block_scalar_indentation(self): - # See the specification for details. - chunks = [] - max_indent = 0 - end_mark = self.get_mark() - while self.peek() in u' \r\n\x85\u2028\u2029': - if self.peek() != u' ': - chunks.append(self.scan_line_break()) - end_mark = self.get_mark() - else: - self.forward() - if self.column > max_indent: - max_indent = self.column - return chunks, max_indent, end_mark - - def scan_block_scalar_breaks(self, indent): - # See the specification for details. - chunks = [] - end_mark = self.get_mark() - while self.column < indent and self.peek() == u' ': - self.forward() - while self.peek() in u'\r\n\x85\u2028\u2029': - chunks.append(self.scan_line_break()) - end_mark = self.get_mark() - while self.column < indent and self.peek() == u' ': - self.forward() - return chunks, end_mark - - def scan_flow_scalar(self, style): - # See the specification for details. - # Note that we loose indentation rules for quoted scalars. Quoted - # scalars don't need to adhere indentation because " and ' clearly - # mark the beginning and the end of them. Therefore we are less - # restrictive then the specification requires. We only need to check - # that document separators are not included in scalars. - if style == '"': - double = True - else: - double = False - chunks = [] - start_mark = self.get_mark() - quote = self.peek() - self.forward() - chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) - while self.peek() != quote: - chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) - chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) - self.forward() - end_mark = self.get_mark() - return ScalarToken(u''.join(chunks), False, start_mark, end_mark, - style) - - ESCAPE_REPLACEMENTS = { - u'0': u'\0', - u'a': u'\x07', - u'b': u'\x08', - u't': u'\x09', - u'\t': u'\x09', - u'n': u'\x0A', - u'v': u'\x0B', - u'f': u'\x0C', - u'r': u'\x0D', - u'e': u'\x1B', - u' ': u'\x20', - u'\"': u'\"', - u'\\': u'\\', - u'N': u'\x85', - u'_': u'\xA0', - u'L': u'\u2028', - u'P': u'\u2029', - } - - ESCAPE_CODES = { - u'x': 2, - u'u': 4, - u'U': 8, - } - - def scan_flow_scalar_non_spaces(self, double, start_mark): - # See the specification for details. - chunks = [] - while True: - length = 0 - while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': - length += 1 - if length: - chunks.append(self.prefix(length)) - self.forward(length) - ch = self.peek() - if not double and ch == u'\'' and self.peek(1) == u'\'': - chunks.append(u'\'') - self.forward(2) - elif (double and ch == u'\'') or (not double and ch in u'\"\\'): - chunks.append(ch) - self.forward() - elif double and ch == u'\\': - self.forward() - ch = self.peek() - if ch in self.ESCAPE_REPLACEMENTS: - chunks.append(self.ESCAPE_REPLACEMENTS[ch]) - self.forward() - elif ch in self.ESCAPE_CODES: - length = self.ESCAPE_CODES[ch] - self.forward() - for k in range(length): - if self.peek(k) not in u'0123456789ABCDEFabcdef': - raise ScannerError("while scanning a double-quoted scalar", start_mark, - "expected escape sequence of %d hexdecimal numbers, but found %r" % - (length, self.peek(k).encode('utf-8')), self.get_mark()) - code = int(self.prefix(length), 16) - chunks.append(unichr(code)) - self.forward(length) - elif ch in u'\r\n\x85\u2028\u2029': - self.scan_line_break() - chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) - else: - raise ScannerError("while scanning a double-quoted scalar", start_mark, - "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) - else: - return chunks - - def scan_flow_scalar_spaces(self, double, start_mark): - # See the specification for details. - chunks = [] - length = 0 - while self.peek(length) in u' \t': - length += 1 - whitespaces = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch == u'\0': - raise ScannerError("while scanning a quoted scalar", start_mark, - "found unexpected end of stream", self.get_mark()) - elif ch in u'\r\n\x85\u2028\u2029': - line_break = self.scan_line_break() - breaks = self.scan_flow_scalar_breaks(double, start_mark) - if line_break != u'\n': - chunks.append(line_break) - elif not breaks: - chunks.append(u' ') - chunks.extend(breaks) - else: - chunks.append(whitespaces) - return chunks - - def scan_flow_scalar_breaks(self, double, start_mark): - # See the specification for details. - chunks = [] - while True: - # Instead of checking indentation, we check for document - # separators. - prefix = self.prefix(3) - if (prefix == u'---' or prefix == u'...') \ - and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': - raise ScannerError("while scanning a quoted scalar", start_mark, - "found unexpected document separator", self.get_mark()) - while self.peek() in u' \t': - self.forward() - if self.peek() in u'\r\n\x85\u2028\u2029': - chunks.append(self.scan_line_break()) - else: - return chunks - - def scan_plain(self): - # See the specification for details. - # We add an additional restriction for the flow context: - # plain scalars in the flow context cannot contain ',', ':' and '?'. - # We also keep track of the `allow_simple_key` flag here. - # Indentation rules are loosed for the flow context. - chunks = [] - start_mark = self.get_mark() - end_mark = start_mark - indent = self.indent+1 - # We allow zero indentation for scalars, but then we need to check for - # document separators at the beginning of the line. - #if indent == 0: - # indent = 1 - spaces = [] - while True: - length = 0 - if self.peek() == u'#': - break - while True: - ch = self.peek(length) - if ch in u'\0 \t\r\n\x85\u2028\u2029' \ - or (not self.flow_level and ch == u':' and - self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ - or (self.flow_level and ch in u',:?[]{}'): - break - length += 1 - # It's not clear what we should do with ':' in the flow context. - if (self.flow_level and ch == u':' - and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): - self.forward(length) - raise ScannerError("while scanning a plain scalar", start_mark, - "found unexpected ':'", self.get_mark(), - "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") - if length == 0: - break - self.allow_simple_key = False - chunks.extend(spaces) - chunks.append(self.prefix(length)) - self.forward(length) - end_mark = self.get_mark() - spaces = self.scan_plain_spaces(indent, start_mark) - if not spaces or self.peek() == u'#' \ - or (not self.flow_level and self.column < indent): - break - return ScalarToken(u''.join(chunks), True, start_mark, end_mark) - - def scan_plain_spaces(self, indent, start_mark): - # See the specification for details. - # The specification is really confusing about tabs in plain scalars. - # We just forbid them completely. Do not use tabs in YAML! - chunks = [] - length = 0 - while self.peek(length) in u' ': - length += 1 - whitespaces = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch in u'\r\n\x85\u2028\u2029': - line_break = self.scan_line_break() - self.allow_simple_key = True - prefix = self.prefix(3) - if (prefix == u'---' or prefix == u'...') \ - and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': - return - breaks = [] - while self.peek() in u' \r\n\x85\u2028\u2029': - if self.peek() == ' ': - self.forward() - else: - breaks.append(self.scan_line_break()) - prefix = self.prefix(3) - if (prefix == u'---' or prefix == u'...') \ - and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': - return - if line_break != u'\n': - chunks.append(line_break) - elif not breaks: - chunks.append(u' ') - chunks.extend(breaks) - elif whitespaces: - chunks.append(whitespaces) - return chunks - - def scan_tag_handle(self, name, start_mark): - # See the specification for details. - # For some strange reasons, the specification does not allow '_' in - # tag handles. I have allowed it anyway. - ch = self.peek() - if ch != u'!': - raise ScannerError("while scanning a %s" % name, start_mark, - "expected '!', but found %r" % ch.encode('utf-8'), - self.get_mark()) - length = 1 - ch = self.peek(length) - if ch != u' ': - while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-_': - length += 1 - ch = self.peek(length) - if ch != u'!': - self.forward(length) - raise ScannerError("while scanning a %s" % name, start_mark, - "expected '!', but found %r" % ch.encode('utf-8'), - self.get_mark()) - length += 1 - value = self.prefix(length) - self.forward(length) - return value - - def scan_tag_uri(self, name, start_mark): - # See the specification for details. - # Note: we do not check if URI is well-formed. - chunks = [] - length = 0 - ch = self.peek(length) - while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ - or ch in u'-;/?:@&=+$,_.!~*\'()[]%': - if ch == u'%': - chunks.append(self.prefix(length)) - self.forward(length) - length = 0 - chunks.append(self.scan_uri_escapes(name, start_mark)) - else: - length += 1 - ch = self.peek(length) - if length: - chunks.append(self.prefix(length)) - self.forward(length) - length = 0 - if not chunks: - raise ScannerError("while parsing a %s" % name, start_mark, - "expected URI, but found %r" % ch.encode('utf-8'), - self.get_mark()) - return u''.join(chunks) - - def scan_uri_escapes(self, name, start_mark): - # See the specification for details. - bytes = [] - mark = self.get_mark() - while self.peek() == u'%': - self.forward() - for k in range(2): - if self.peek(k) not in u'0123456789ABCDEFabcdef': - raise ScannerError("while scanning a %s" % name, start_mark, - "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % - (self.peek(k).encode('utf-8')), self.get_mark()) - bytes.append(chr(int(self.prefix(2), 16))) - self.forward(2) - try: - value = unicode(''.join(bytes), 'utf-8') - except UnicodeDecodeError, exc: - raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) - return value - - def scan_line_break(self): - # Transforms: - # '\r\n' : '\n' - # '\r' : '\n' - # '\n' : '\n' - # '\x85' : '\n' - # '\u2028' : '\u2028' - # '\u2029 : '\u2029' - # default : '' - ch = self.peek() - if ch in u'\r\n\x85': - if self.prefix(2) == u'\r\n': - self.forward(2) - else: - self.forward() - return u'\n' - elif ch in u'\u2028\u2029': - self.forward() - return ch - return u'' - -#try: -# import psyco -# psyco.bind(Scanner) -#except ImportError: -# pass - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py deleted file mode 100644 index 15fdbb0c0..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py +++ /dev/null @@ -1,112 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['Serializer', 'SerializerError'] - -from error import YAMLError -from events import * -from nodes import * - -class SerializerError(YAMLError): - pass - -class Serializer(object): - - ANCHOR_TEMPLATE = u'id%03d' - - def __init__(self, encoding=None, - explicit_start=None, explicit_end=None, version=None, tags=None): - self.use_encoding = encoding - self.use_explicit_start = explicit_start - self.use_explicit_end = explicit_end - self.use_version = version - self.use_tags = tags - self.serialized_nodes = {} - self.anchors = {} - self.last_anchor_id = 0 - self.closed = None - - def open(self): - if self.closed is None: - self.emit(StreamStartEvent(encoding=self.use_encoding)) - self.closed = False - elif self.closed: - raise SerializerError("serializer is closed") - else: - raise SerializerError("serializer is already opened") - - def close(self): - if self.closed is None: - raise SerializerError("serializer is not opened") - elif not self.closed: - self.emit(StreamEndEvent()) - self.closed = True - - #def __del__(self): - # self.close() - - def serialize(self, node): - if self.closed is None: - raise SerializerError("serializer is not opened") - elif self.closed: - raise SerializerError("serializer is closed") - self.emit(DocumentStartEvent(explicit=self.use_explicit_start, - version=self.use_version, tags=self.use_tags)) - self.anchor_node(node) - self.serialize_node(node, None, None) - self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) - self.serialized_nodes = {} - self.anchors = {} - self.last_anchor_id = 0 - - def anchor_node(self, node): - if node in self.anchors: - if self.anchors[node] is None: - self.anchors[node] = self.generate_anchor(node) - else: - self.anchors[node] = None - if isinstance(node, SequenceNode): - for item in node.value: - self.anchor_node(item) - elif isinstance(node, MappingNode): - for key, value in node.value: - self.anchor_node(key) - self.anchor_node(value) - - def generate_anchor(self, node): - self.last_anchor_id += 1 - return self.ANCHOR_TEMPLATE % self.last_anchor_id - - def serialize_node(self, node, parent, index): - alias = self.anchors[node] - if node in self.serialized_nodes: - self.emit(AliasEvent(alias)) - else: - self.serialized_nodes[node] = True - self.descend_resolver(parent, index) - if isinstance(node, ScalarNode): - detected_tag = self.resolve(ScalarNode, node.value, (True, False)) - default_tag = self.resolve(ScalarNode, node.value, (False, True)) - implicit = (node.tag == detected_tag), (node.tag == default_tag) - self.emit(ScalarEvent(alias, node.tag, implicit, node.value, - style=node.style)) - elif isinstance(node, SequenceNode): - implicit = (node.tag - == self.resolve(SequenceNode, node.value, True)) - self.emit(SequenceStartEvent(alias, node.tag, implicit, - flow_style=node.flow_style)) - index = 0 - for item in node.value: - self.serialize_node(item, node, index) - index += 1 - self.emit(SequenceEndEvent()) - elif isinstance(node, MappingNode): - implicit = (node.tag - == self.resolve(MappingNode, node.value, True)) - self.emit(MappingStartEvent(alias, node.tag, implicit, - flow_style=node.flow_style)) - for key, value in node.value: - self.serialize_node(key, node, None) - self.serialize_node(value, node, key) - self.emit(MappingEndEvent()) - self.ascend_resolver() - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py deleted file mode 100644 index c5c4fb116..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py +++ /dev/null @@ -1,105 +0,0 @@ -# SPDX-License-Identifier: MIT - -class Token(object): - def __init__(self, start_mark, end_mark): - self.start_mark = start_mark - self.end_mark = end_mark - def __repr__(self): - attributes = [key for key in self.__dict__ - if not key.endswith('_mark')] - attributes.sort() - arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) - for key in attributes]) - return '%s(%s)' % (self.__class__.__name__, arguments) - -#class BOMToken(Token): -# id = '' - -class DirectiveToken(Token): - id = '' - def __init__(self, name, value, start_mark, end_mark): - self.name = name - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class DocumentStartToken(Token): - id = '' - -class DocumentEndToken(Token): - id = '' - -class StreamStartToken(Token): - id = '' - def __init__(self, start_mark=None, end_mark=None, - encoding=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.encoding = encoding - -class StreamEndToken(Token): - id = '' - -class BlockSequenceStartToken(Token): - id = '' - -class BlockMappingStartToken(Token): - id = '' - -class BlockEndToken(Token): - id = '' - -class FlowSequenceStartToken(Token): - id = '[' - -class FlowMappingStartToken(Token): - id = '{' - -class FlowSequenceEndToken(Token): - id = ']' - -class FlowMappingEndToken(Token): - id = '}' - -class KeyToken(Token): - id = '?' - -class ValueToken(Token): - id = ':' - -class BlockEntryToken(Token): - id = '-' - -class FlowEntryToken(Token): - id = ',' - -class AliasToken(Token): - id = '' - def __init__(self, value, start_mark, end_mark): - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class AnchorToken(Token): - id = '' - def __init__(self, value, start_mark, end_mark): - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class TagToken(Token): - id = '' - def __init__(self, value, start_mark, end_mark): - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class ScalarToken(Token): - id = '' - def __init__(self, value, plain, start_mark, end_mark, style=None): - self.value = value - self.plain = plain - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py deleted file mode 100644 index a884b33cf..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py +++ /dev/null @@ -1,313 +0,0 @@ -# SPDX-License-Identifier: MIT - -from .error import * - -from .tokens import * -from .events import * -from .nodes import * - -from .loader import * -from .dumper import * - -__version__ = '3.11' -try: - from .cyaml import * - __with_libyaml__ = True -except ImportError: - __with_libyaml__ = False - -import io - -def scan(stream, Loader=Loader): - """ - Scan a YAML stream and produce scanning tokens. - """ - loader = Loader(stream) - try: - while loader.check_token(): - yield loader.get_token() - finally: - loader.dispose() - -def parse(stream, Loader=Loader): - """ - Parse a YAML stream and produce parsing events. - """ - loader = Loader(stream) - try: - while loader.check_event(): - yield loader.get_event() - finally: - loader.dispose() - -def compose(stream, Loader=Loader): - """ - Parse the first YAML document in a stream - and produce the corresponding representation tree. - """ - loader = Loader(stream) - try: - return loader.get_single_node() - finally: - loader.dispose() - -def compose_all(stream, Loader=Loader): - """ - Parse all YAML documents in a stream - and produce corresponding representation trees. - """ - loader = Loader(stream) - try: - while loader.check_node(): - yield loader.get_node() - finally: - loader.dispose() - -def load(stream, Loader=Loader): - """ - Parse the first YAML document in a stream - and produce the corresponding Python object. - """ - loader = Loader(stream) - try: - return loader.get_single_data() - finally: - loader.dispose() - -def load_all(stream, Loader=Loader): - """ - Parse all YAML documents in a stream - and produce corresponding Python objects. - """ - loader = Loader(stream) - try: - while loader.check_data(): - yield loader.get_data() - finally: - loader.dispose() - -def safe_load(stream): - """ - Parse the first YAML document in a stream - and produce the corresponding Python object. - Resolve only basic YAML tags. - """ - return load(stream, SafeLoader) - -def safe_load_all(stream): - """ - Parse all YAML documents in a stream - and produce corresponding Python objects. - Resolve only basic YAML tags. - """ - return load_all(stream, SafeLoader) - -def emit(events, stream=None, Dumper=Dumper, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None): - """ - Emit YAML parsing events into a stream. - If stream is None, return the produced string instead. - """ - getvalue = None - if stream is None: - stream = io.StringIO() - getvalue = stream.getvalue - dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - try: - for event in events: - dumper.emit(event) - finally: - dumper.dispose() - if getvalue: - return getvalue() - -def serialize_all(nodes, stream=None, Dumper=Dumper, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - """ - Serialize a sequence of representation trees into a YAML stream. - If stream is None, return the produced string instead. - """ - getvalue = None - if stream is None: - if encoding is None: - stream = io.StringIO() - else: - stream = io.BytesIO() - getvalue = stream.getvalue - dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break, - encoding=encoding, version=version, tags=tags, - explicit_start=explicit_start, explicit_end=explicit_end) - try: - dumper.open() - for node in nodes: - dumper.serialize(node) - dumper.close() - finally: - dumper.dispose() - if getvalue: - return getvalue() - -def serialize(node, stream=None, Dumper=Dumper, **kwds): - """ - Serialize a representation tree into a YAML stream. - If stream is None, return the produced string instead. - """ - return serialize_all([node], stream, Dumper=Dumper, **kwds) - -def dump_all(documents, stream=None, Dumper=Dumper, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - """ - Serialize a sequence of Python objects into a YAML stream. - If stream is None, return the produced string instead. - """ - getvalue = None - if stream is None: - if encoding is None: - stream = io.StringIO() - else: - stream = io.BytesIO() - getvalue = stream.getvalue - dumper = Dumper(stream, default_style=default_style, - default_flow_style=default_flow_style, - canonical=canonical, indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break, - encoding=encoding, version=version, tags=tags, - explicit_start=explicit_start, explicit_end=explicit_end) - try: - dumper.open() - for data in documents: - dumper.represent(data) - dumper.close() - finally: - dumper.dispose() - if getvalue: - return getvalue() - -def dump(data, stream=None, Dumper=Dumper, **kwds): - """ - Serialize a Python object into a YAML stream. - If stream is None, return the produced string instead. - """ - return dump_all([data], stream, Dumper=Dumper, **kwds) - -def safe_dump_all(documents, stream=None, **kwds): - """ - Serialize a sequence of Python objects into a YAML stream. - Produce only basic YAML tags. - If stream is None, return the produced string instead. - """ - return dump_all(documents, stream, Dumper=SafeDumper, **kwds) - -def safe_dump(data, stream=None, **kwds): - """ - Serialize a Python object into a YAML stream. - Produce only basic YAML tags. - If stream is None, return the produced string instead. - """ - return dump_all([data], stream, Dumper=SafeDumper, **kwds) - -def add_implicit_resolver(tag, regexp, first=None, - Loader=Loader, Dumper=Dumper): - """ - Add an implicit scalar detector. - If an implicit scalar value matches the given regexp, - the corresponding tag is assigned to the scalar. - first is a sequence of possible initial characters or None. - """ - Loader.add_implicit_resolver(tag, regexp, first) - Dumper.add_implicit_resolver(tag, regexp, first) - -def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): - """ - Add a path based resolver for the given tag. - A path is a list of keys that forms a path - to a node in the representation tree. - Keys can be string values, integers, or None. - """ - Loader.add_path_resolver(tag, path, kind) - Dumper.add_path_resolver(tag, path, kind) - -def add_constructor(tag, constructor, Loader=Loader): - """ - Add a constructor for the given tag. - Constructor is a function that accepts a Loader instance - and a node object and produces the corresponding Python object. - """ - Loader.add_constructor(tag, constructor) - -def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): - """ - Add a multi-constructor for the given tag prefix. - Multi-constructor is called for a node if its tag starts with tag_prefix. - Multi-constructor accepts a Loader instance, a tag suffix, - and a node object and produces the corresponding Python object. - """ - Loader.add_multi_constructor(tag_prefix, multi_constructor) - -def add_representer(data_type, representer, Dumper=Dumper): - """ - Add a representer for the given type. - Representer is a function accepting a Dumper instance - and an instance of the given data type - and producing the corresponding representation node. - """ - Dumper.add_representer(data_type, representer) - -def add_multi_representer(data_type, multi_representer, Dumper=Dumper): - """ - Add a representer for the given type. - Multi-representer is a function accepting a Dumper instance - and an instance of the given data type or subtype - and producing the corresponding representation node. - """ - Dumper.add_multi_representer(data_type, multi_representer) - -class YAMLObjectMetaclass(type): - """ - The metaclass for YAMLObject. - """ - def __init__(cls, name, bases, kwds): - super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) - if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: - cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) - cls.yaml_dumper.add_representer(cls, cls.to_yaml) - -class YAMLObject(metaclass=YAMLObjectMetaclass): - """ - An object that can dump itself to a YAML stream - and load itself from a YAML stream. - """ - - __slots__ = () # no direct instantiation, so allow immutable subclasses - - yaml_loader = Loader - yaml_dumper = Dumper - - yaml_tag = None - yaml_flow_style = None - - @classmethod - def from_yaml(cls, loader, node): - """ - Convert a representation node to a Python object. - """ - return loader.construct_yaml_object(node, cls) - - @classmethod - def to_yaml(cls, dumper, data): - """ - Convert a Python object to a representation node. - """ - return dumper.represent_yaml_object(cls.yaml_tag, data, cls, - flow_style=cls.yaml_flow_style) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/composer.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/composer.py deleted file mode 100644 index c418bba91..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/composer.py +++ /dev/null @@ -1,140 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['Composer', 'ComposerError'] - -from .error import MarkedYAMLError -from .events import * -from .nodes import * - -class ComposerError(MarkedYAMLError): - pass - -class Composer: - - def __init__(self): - self.anchors = {} - - def check_node(self): - # Drop the STREAM-START event. - if self.check_event(StreamStartEvent): - self.get_event() - - # If there are more documents available? - return not self.check_event(StreamEndEvent) - - def get_node(self): - # Get the root node of the next document. - if not self.check_event(StreamEndEvent): - return self.compose_document() - - def get_single_node(self): - # Drop the STREAM-START event. - self.get_event() - - # Compose a document if the stream is not empty. - document = None - if not self.check_event(StreamEndEvent): - document = self.compose_document() - - # Ensure that the stream contains no more documents. - if not self.check_event(StreamEndEvent): - event = self.get_event() - raise ComposerError("expected a single document in the stream", - document.start_mark, "but found another document", - event.start_mark) - - # Drop the STREAM-END event. - self.get_event() - - return document - - def compose_document(self): - # Drop the DOCUMENT-START event. - self.get_event() - - # Compose the root node. - node = self.compose_node(None, None) - - # Drop the DOCUMENT-END event. - self.get_event() - - self.anchors = {} - return node - - def compose_node(self, parent, index): - if self.check_event(AliasEvent): - event = self.get_event() - anchor = event.anchor - if anchor not in self.anchors: - raise ComposerError(None, None, "found undefined alias %r" - % anchor, event.start_mark) - return self.anchors[anchor] - event = self.peek_event() - anchor = event.anchor - if anchor is not None: - if anchor in self.anchors: - raise ComposerError("found duplicate anchor %r; first occurence" - % anchor, self.anchors[anchor].start_mark, - "second occurence", event.start_mark) - self.descend_resolver(parent, index) - if self.check_event(ScalarEvent): - node = self.compose_scalar_node(anchor) - elif self.check_event(SequenceStartEvent): - node = self.compose_sequence_node(anchor) - elif self.check_event(MappingStartEvent): - node = self.compose_mapping_node(anchor) - self.ascend_resolver() - return node - - def compose_scalar_node(self, anchor): - event = self.get_event() - tag = event.tag - if tag is None or tag == '!': - tag = self.resolve(ScalarNode, event.value, event.implicit) - node = ScalarNode(tag, event.value, - event.start_mark, event.end_mark, style=event.style) - if anchor is not None: - self.anchors[anchor] = node - return node - - def compose_sequence_node(self, anchor): - start_event = self.get_event() - tag = start_event.tag - if tag is None or tag == '!': - tag = self.resolve(SequenceNode, None, start_event.implicit) - node = SequenceNode(tag, [], - start_event.start_mark, None, - flow_style=start_event.flow_style) - if anchor is not None: - self.anchors[anchor] = node - index = 0 - while not self.check_event(SequenceEndEvent): - node.value.append(self.compose_node(node, index)) - index += 1 - end_event = self.get_event() - node.end_mark = end_event.end_mark - return node - - def compose_mapping_node(self, anchor): - start_event = self.get_event() - tag = start_event.tag - if tag is None or tag == '!': - tag = self.resolve(MappingNode, None, start_event.implicit) - node = MappingNode(tag, [], - start_event.start_mark, None, - flow_style=start_event.flow_style) - if anchor is not None: - self.anchors[anchor] = node - while not self.check_event(MappingEndEvent): - #key_event = self.peek_event() - item_key = self.compose_node(node, None) - #if item_key in node.value: - # raise ComposerError("while composing a mapping", start_event.start_mark, - # "found duplicate key", key_event.start_mark) - item_value = self.compose_node(node, item_key) - #node.value[item_key] = item_value - node.value.append((item_key, item_value)) - end_event = self.get_event() - node.end_mark = end_event.end_mark - return node - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py deleted file mode 100644 index ee09a7a7e..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py +++ /dev/null @@ -1,687 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', - 'ConstructorError'] - -from .error import * -from .nodes import * - -import collections, datetime, base64, binascii, re, sys, types - -class ConstructorError(MarkedYAMLError): - pass - -class BaseConstructor: - - yaml_constructors = {} - yaml_multi_constructors = {} - - def __init__(self): - self.constructed_objects = {} - self.recursive_objects = {} - self.state_generators = [] - self.deep_construct = False - - def check_data(self): - # If there are more documents available? - return self.check_node() - - def get_data(self): - # Construct and return the next document. - if self.check_node(): - return self.construct_document(self.get_node()) - - def get_single_data(self): - # Ensure that the stream contains a single document and construct it. - node = self.get_single_node() - if node is not None: - return self.construct_document(node) - return None - - def construct_document(self, node): - data = self.construct_object(node) - while self.state_generators: - state_generators = self.state_generators - self.state_generators = [] - for generator in state_generators: - for dummy in generator: - pass - self.constructed_objects = {} - self.recursive_objects = {} - self.deep_construct = False - return data - - def construct_object(self, node, deep=False): - if node in self.constructed_objects: - return self.constructed_objects[node] - if deep: - old_deep = self.deep_construct - self.deep_construct = True - if node in self.recursive_objects: - raise ConstructorError(None, None, - "found unconstructable recursive node", node.start_mark) - self.recursive_objects[node] = None - constructor = None - tag_suffix = None - if node.tag in self.yaml_constructors: - constructor = self.yaml_constructors[node.tag] - else: - for tag_prefix in self.yaml_multi_constructors: - if node.tag.startswith(tag_prefix): - tag_suffix = node.tag[len(tag_prefix):] - constructor = self.yaml_multi_constructors[tag_prefix] - break - else: - if None in self.yaml_multi_constructors: - tag_suffix = node.tag - constructor = self.yaml_multi_constructors[None] - elif None in self.yaml_constructors: - constructor = self.yaml_constructors[None] - elif isinstance(node, ScalarNode): - constructor = self.__class__.construct_scalar - elif isinstance(node, SequenceNode): - constructor = self.__class__.construct_sequence - elif isinstance(node, MappingNode): - constructor = self.__class__.construct_mapping - if tag_suffix is None: - data = constructor(self, node) - else: - data = constructor(self, tag_suffix, node) - if isinstance(data, types.GeneratorType): - generator = data - data = next(generator) - if self.deep_construct: - for dummy in generator: - pass - else: - self.state_generators.append(generator) - self.constructed_objects[node] = data - del self.recursive_objects[node] - if deep: - self.deep_construct = old_deep - return data - - def construct_scalar(self, node): - if not isinstance(node, ScalarNode): - raise ConstructorError(None, None, - "expected a scalar node, but found %s" % node.id, - node.start_mark) - return node.value - - def construct_sequence(self, node, deep=False): - if not isinstance(node, SequenceNode): - raise ConstructorError(None, None, - "expected a sequence node, but found %s" % node.id, - node.start_mark) - return [self.construct_object(child, deep=deep) - for child in node.value] - - def construct_mapping(self, node, deep=False): - if not isinstance(node, MappingNode): - raise ConstructorError(None, None, - "expected a mapping node, but found %s" % node.id, - node.start_mark) - mapping = {} - for key_node, value_node in node.value: - key = self.construct_object(key_node, deep=deep) - if not isinstance(key, collections.Hashable): - raise ConstructorError("while constructing a mapping", node.start_mark, - "found unhashable key", key_node.start_mark) - value = self.construct_object(value_node, deep=deep) - mapping[key] = value - return mapping - - def construct_pairs(self, node, deep=False): - if not isinstance(node, MappingNode): - raise ConstructorError(None, None, - "expected a mapping node, but found %s" % node.id, - node.start_mark) - pairs = [] - for key_node, value_node in node.value: - key = self.construct_object(key_node, deep=deep) - value = self.construct_object(value_node, deep=deep) - pairs.append((key, value)) - return pairs - - @classmethod - def add_constructor(cls, tag, constructor): - if not 'yaml_constructors' in cls.__dict__: - cls.yaml_constructors = cls.yaml_constructors.copy() - cls.yaml_constructors[tag] = constructor - - @classmethod - def add_multi_constructor(cls, tag_prefix, multi_constructor): - if not 'yaml_multi_constructors' in cls.__dict__: - cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() - cls.yaml_multi_constructors[tag_prefix] = multi_constructor - -class SafeConstructor(BaseConstructor): - - def construct_scalar(self, node): - if isinstance(node, MappingNode): - for key_node, value_node in node.value: - if key_node.tag == 'tag:yaml.org,2002:value': - return self.construct_scalar(value_node) - return super().construct_scalar(node) - - def flatten_mapping(self, node): - merge = [] - index = 0 - while index < len(node.value): - key_node, value_node = node.value[index] - if key_node.tag == 'tag:yaml.org,2002:merge': - del node.value[index] - if isinstance(value_node, MappingNode): - self.flatten_mapping(value_node) - merge.extend(value_node.value) - elif isinstance(value_node, SequenceNode): - submerge = [] - for subnode in value_node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing a mapping", - node.start_mark, - "expected a mapping for merging, but found %s" - % subnode.id, subnode.start_mark) - self.flatten_mapping(subnode) - submerge.append(subnode.value) - submerge.reverse() - for value in submerge: - merge.extend(value) - else: - raise ConstructorError("while constructing a mapping", node.start_mark, - "expected a mapping or list of mappings for merging, but found %s" - % value_node.id, value_node.start_mark) - elif key_node.tag == 'tag:yaml.org,2002:value': - key_node.tag = 'tag:yaml.org,2002:str' - index += 1 - else: - index += 1 - if merge: - node.value = merge + node.value - - def construct_mapping(self, node, deep=False): - if isinstance(node, MappingNode): - self.flatten_mapping(node) - return super().construct_mapping(node, deep=deep) - - def construct_yaml_null(self, node): - self.construct_scalar(node) - return None - - bool_values = { - 'yes': True, - 'no': False, - 'true': True, - 'false': False, - 'on': True, - 'off': False, - } - - def construct_yaml_bool(self, node): - value = self.construct_scalar(node) - return self.bool_values[value.lower()] - - def construct_yaml_int(self, node): - value = self.construct_scalar(node) - value = value.replace('_', '') - sign = +1 - if value[0] == '-': - sign = -1 - if value[0] in '+-': - value = value[1:] - if value == '0': - return 0 - elif value.startswith('0b'): - return sign*int(value[2:], 2) - elif value.startswith('0x'): - return sign*int(value[2:], 16) - elif value[0] == '0': - return sign*int(value, 8) - elif ':' in value: - digits = [int(part) for part in value.split(':')] - digits.reverse() - base = 1 - value = 0 - for digit in digits: - value += digit*base - base *= 60 - return sign*value - else: - return sign*int(value) - - inf_value = 1e300 - while inf_value != inf_value*inf_value: - inf_value *= inf_value - nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). - - def construct_yaml_float(self, node): - value = self.construct_scalar(node) - value = value.replace('_', '').lower() - sign = +1 - if value[0] == '-': - sign = -1 - if value[0] in '+-': - value = value[1:] - if value == '.inf': - return sign*self.inf_value - elif value == '.nan': - return self.nan_value - elif ':' in value: - digits = [float(part) for part in value.split(':')] - digits.reverse() - base = 1 - value = 0.0 - for digit in digits: - value += digit*base - base *= 60 - return sign*value - else: - return sign*float(value) - - def construct_yaml_binary(self, node): - try: - value = self.construct_scalar(node).encode('ascii') - except UnicodeEncodeError as exc: - raise ConstructorError(None, None, - "failed to convert base64 data into ascii: %s" % exc, - node.start_mark) - try: - if hasattr(base64, 'decodebytes'): - return base64.decodebytes(value) - else: - return base64.decodestring(value) - except binascii.Error as exc: - raise ConstructorError(None, None, - "failed to decode base64 data: %s" % exc, node.start_mark) - - timestamp_regexp = re.compile( - r'''^(?P[0-9][0-9][0-9][0-9]) - -(?P[0-9][0-9]?) - -(?P[0-9][0-9]?) - (?:(?:[Tt]|[ \t]+) - (?P[0-9][0-9]?) - :(?P[0-9][0-9]) - :(?P[0-9][0-9]) - (?:\.(?P[0-9]*))? - (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) - (?::(?P[0-9][0-9]))?))?)?$''', re.X) - - def construct_yaml_timestamp(self, node): - value = self.construct_scalar(node) - match = self.timestamp_regexp.match(node.value) - values = match.groupdict() - year = int(values['year']) - month = int(values['month']) - day = int(values['day']) - if not values['hour']: - return datetime.date(year, month, day) - hour = int(values['hour']) - minute = int(values['minute']) - second = int(values['second']) - fraction = 0 - if values['fraction']: - fraction = values['fraction'][:6] - while len(fraction) < 6: - fraction += '0' - fraction = int(fraction) - delta = None - if values['tz_sign']: - tz_hour = int(values['tz_hour']) - tz_minute = int(values['tz_minute'] or 0) - delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) - if values['tz_sign'] == '-': - delta = -delta - data = datetime.datetime(year, month, day, hour, minute, second, fraction) - if delta: - data -= delta - return data - - def construct_yaml_omap(self, node): - # Note: we do not check for duplicate keys, because it's too - # CPU-expensive. - omap = [] - yield omap - if not isinstance(node, SequenceNode): - raise ConstructorError("while constructing an ordered map", node.start_mark, - "expected a sequence, but found %s" % node.id, node.start_mark) - for subnode in node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing an ordered map", node.start_mark, - "expected a mapping of length 1, but found %s" % subnode.id, - subnode.start_mark) - if len(subnode.value) != 1: - raise ConstructorError("while constructing an ordered map", node.start_mark, - "expected a single mapping item, but found %d items" % len(subnode.value), - subnode.start_mark) - key_node, value_node = subnode.value[0] - key = self.construct_object(key_node) - value = self.construct_object(value_node) - omap.append((key, value)) - - def construct_yaml_pairs(self, node): - # Note: the same code as `construct_yaml_omap`. - pairs = [] - yield pairs - if not isinstance(node, SequenceNode): - raise ConstructorError("while constructing pairs", node.start_mark, - "expected a sequence, but found %s" % node.id, node.start_mark) - for subnode in node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing pairs", node.start_mark, - "expected a mapping of length 1, but found %s" % subnode.id, - subnode.start_mark) - if len(subnode.value) != 1: - raise ConstructorError("while constructing pairs", node.start_mark, - "expected a single mapping item, but found %d items" % len(subnode.value), - subnode.start_mark) - key_node, value_node = subnode.value[0] - key = self.construct_object(key_node) - value = self.construct_object(value_node) - pairs.append((key, value)) - - def construct_yaml_set(self, node): - data = set() - yield data - value = self.construct_mapping(node) - data.update(value) - - def construct_yaml_str(self, node): - return self.construct_scalar(node) - - def construct_yaml_seq(self, node): - data = [] - yield data - data.extend(self.construct_sequence(node)) - - def construct_yaml_map(self, node): - data = {} - yield data - value = self.construct_mapping(node) - data.update(value) - - def construct_yaml_object(self, node, cls): - data = cls.__new__(cls) - yield data - if hasattr(data, '__setstate__'): - state = self.construct_mapping(node, deep=True) - data.__setstate__(state) - else: - state = self.construct_mapping(node) - data.__dict__.update(state) - - def construct_undefined(self, node): - raise ConstructorError(None, None, - "could not determine a constructor for the tag %r" % node.tag, - node.start_mark) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:null', - SafeConstructor.construct_yaml_null) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:bool', - SafeConstructor.construct_yaml_bool) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:int', - SafeConstructor.construct_yaml_int) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:float', - SafeConstructor.construct_yaml_float) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:binary', - SafeConstructor.construct_yaml_binary) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:timestamp', - SafeConstructor.construct_yaml_timestamp) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:omap', - SafeConstructor.construct_yaml_omap) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:pairs', - SafeConstructor.construct_yaml_pairs) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:set', - SafeConstructor.construct_yaml_set) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:str', - SafeConstructor.construct_yaml_str) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:seq', - SafeConstructor.construct_yaml_seq) - -SafeConstructor.add_constructor( - 'tag:yaml.org,2002:map', - SafeConstructor.construct_yaml_map) - -SafeConstructor.add_constructor(None, - SafeConstructor.construct_undefined) - -class Constructor(SafeConstructor): - - def construct_python_str(self, node): - return self.construct_scalar(node) - - def construct_python_unicode(self, node): - return self.construct_scalar(node) - - def construct_python_bytes(self, node): - try: - value = self.construct_scalar(node).encode('ascii') - except UnicodeEncodeError as exc: - raise ConstructorError(None, None, - "failed to convert base64 data into ascii: %s" % exc, - node.start_mark) - try: - if hasattr(base64, 'decodebytes'): - return base64.decodebytes(value) - else: - return base64.decodestring(value) - except binascii.Error as exc: - raise ConstructorError(None, None, - "failed to decode base64 data: %s" % exc, node.start_mark) - - def construct_python_long(self, node): - return self.construct_yaml_int(node) - - def construct_python_complex(self, node): - return complex(self.construct_scalar(node)) - - def construct_python_tuple(self, node): - return tuple(self.construct_sequence(node)) - - def find_python_module(self, name, mark): - if not name: - raise ConstructorError("while constructing a Python module", mark, - "expected non-empty name appended to the tag", mark) - try: - __import__(name) - except ImportError as exc: - raise ConstructorError("while constructing a Python module", mark, - "cannot find module %r (%s)" % (name, exc), mark) - return sys.modules[name] - - def find_python_name(self, name, mark): - if not name: - raise ConstructorError("while constructing a Python object", mark, - "expected non-empty name appended to the tag", mark) - if '.' in name: - module_name, object_name = name.rsplit('.', 1) - else: - module_name = 'builtins' - object_name = name - try: - __import__(module_name) - except ImportError as exc: - raise ConstructorError("while constructing a Python object", mark, - "cannot find module %r (%s)" % (module_name, exc), mark) - module = sys.modules[module_name] - if not hasattr(module, object_name): - raise ConstructorError("while constructing a Python object", mark, - "cannot find %r in the module %r" - % (object_name, module.__name__), mark) - return getattr(module, object_name) - - def construct_python_name(self, suffix, node): - value = self.construct_scalar(node) - if value: - raise ConstructorError("while constructing a Python name", node.start_mark, - "expected the empty value, but found %r" % value, node.start_mark) - return self.find_python_name(suffix, node.start_mark) - - def construct_python_module(self, suffix, node): - value = self.construct_scalar(node) - if value: - raise ConstructorError("while constructing a Python module", node.start_mark, - "expected the empty value, but found %r" % value, node.start_mark) - return self.find_python_module(suffix, node.start_mark) - - def make_python_instance(self, suffix, node, - args=None, kwds=None, newobj=False): - if not args: - args = [] - if not kwds: - kwds = {} - cls = self.find_python_name(suffix, node.start_mark) - if newobj and isinstance(cls, type): - return cls.__new__(cls, *args, **kwds) - else: - return cls(*args, **kwds) - - def set_python_instance_state(self, instance, state): - if hasattr(instance, '__setstate__'): - instance.__setstate__(state) - else: - slotstate = {} - if isinstance(state, tuple) and len(state) == 2: - state, slotstate = state - if hasattr(instance, '__dict__'): - instance.__dict__.update(state) - elif state: - slotstate.update(state) - for key, value in slotstate.items(): - setattr(object, key, value) - - def construct_python_object(self, suffix, node): - # Format: - # !!python/object:module.name { ... state ... } - instance = self.make_python_instance(suffix, node, newobj=True) - yield instance - deep = hasattr(instance, '__setstate__') - state = self.construct_mapping(node, deep=deep) - self.set_python_instance_state(instance, state) - - def construct_python_object_apply(self, suffix, node, newobj=False): - # Format: - # !!python/object/apply # (or !!python/object/new) - # args: [ ... arguments ... ] - # kwds: { ... keywords ... } - # state: ... state ... - # listitems: [ ... listitems ... ] - # dictitems: { ... dictitems ... } - # or short format: - # !!python/object/apply [ ... arguments ... ] - # The difference between !!python/object/apply and !!python/object/new - # is how an object is created, check make_python_instance for details. - if isinstance(node, SequenceNode): - args = self.construct_sequence(node, deep=True) - kwds = {} - state = {} - listitems = [] - dictitems = {} - else: - value = self.construct_mapping(node, deep=True) - args = value.get('args', []) - kwds = value.get('kwds', {}) - state = value.get('state', {}) - listitems = value.get('listitems', []) - dictitems = value.get('dictitems', {}) - instance = self.make_python_instance(suffix, node, args, kwds, newobj) - if state: - self.set_python_instance_state(instance, state) - if listitems: - instance.extend(listitems) - if dictitems: - for key in dictitems: - instance[key] = dictitems[key] - return instance - - def construct_python_object_new(self, suffix, node): - return self.construct_python_object_apply(suffix, node, newobj=True) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/none', - Constructor.construct_yaml_null) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/bool', - Constructor.construct_yaml_bool) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/str', - Constructor.construct_python_str) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/unicode', - Constructor.construct_python_unicode) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/bytes', - Constructor.construct_python_bytes) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/int', - Constructor.construct_yaml_int) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/long', - Constructor.construct_python_long) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/float', - Constructor.construct_yaml_float) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/complex', - Constructor.construct_python_complex) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/list', - Constructor.construct_yaml_seq) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/tuple', - Constructor.construct_python_tuple) - -Constructor.add_constructor( - 'tag:yaml.org,2002:python/dict', - Constructor.construct_yaml_map) - -Constructor.add_multi_constructor( - 'tag:yaml.org,2002:python/name:', - Constructor.construct_python_name) - -Constructor.add_multi_constructor( - 'tag:yaml.org,2002:python/module:', - Constructor.construct_python_module) - -Constructor.add_multi_constructor( - 'tag:yaml.org,2002:python/object:', - Constructor.construct_python_object) - -Constructor.add_multi_constructor( - 'tag:yaml.org,2002:python/object/apply:', - Constructor.construct_python_object_apply) - -Constructor.add_multi_constructor( - 'tag:yaml.org,2002:python/object/new:', - Constructor.construct_python_object_new) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py deleted file mode 100644 index e6c16d894..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py +++ /dev/null @@ -1,86 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', - 'CBaseDumper', 'CSafeDumper', 'CDumper'] - -from _yaml import CParser, CEmitter - -from .constructor import * - -from .serializer import * -from .representer import * - -from .resolver import * - -class CBaseLoader(CParser, BaseConstructor, BaseResolver): - - def __init__(self, stream): - CParser.__init__(self, stream) - BaseConstructor.__init__(self) - BaseResolver.__init__(self) - -class CSafeLoader(CParser, SafeConstructor, Resolver): - - def __init__(self, stream): - CParser.__init__(self, stream) - SafeConstructor.__init__(self) - Resolver.__init__(self) - -class CLoader(CParser, Constructor, Resolver): - - def __init__(self, stream): - CParser.__init__(self, stream) - Constructor.__init__(self) - Resolver.__init__(self) - -class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - CEmitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, encoding=encoding, - allow_unicode=allow_unicode, line_break=line_break, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class CSafeDumper(CEmitter, SafeRepresenter, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - CEmitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, encoding=encoding, - allow_unicode=allow_unicode, line_break=line_break, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - SafeRepresenter.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class CDumper(CEmitter, Serializer, Representer, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - CEmitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, encoding=encoding, - allow_unicode=allow_unicode, line_break=line_break, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py deleted file mode 100644 index ba590c6e6..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py +++ /dev/null @@ -1,63 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] - -from .emitter import * -from .serializer import * -from .representer import * -from .resolver import * - -class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - Emitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - Serializer.__init__(self, encoding=encoding, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - Emitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - Serializer.__init__(self, encoding=encoding, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - SafeRepresenter.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - -class Dumper(Emitter, Serializer, Representer, Resolver): - - def __init__(self, stream, - default_style=None, default_flow_style=None, - canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None, - encoding=None, explicit_start=None, explicit_end=None, - version=None, tags=None): - Emitter.__init__(self, stream, canonical=canonical, - indent=indent, width=width, - allow_unicode=allow_unicode, line_break=line_break) - Serializer.__init__(self, encoding=encoding, - explicit_start=explicit_start, explicit_end=explicit_end, - version=version, tags=tags) - Representer.__init__(self, default_style=default_style, - default_flow_style=default_flow_style) - Resolver.__init__(self) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py deleted file mode 100644 index d4be65a8e..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py +++ /dev/null @@ -1,1138 +0,0 @@ -# SPDX-License-Identifier: MIT - -# Emitter expects events obeying the following grammar: -# stream ::= STREAM-START document* STREAM-END -# document ::= DOCUMENT-START node DOCUMENT-END -# node ::= SCALAR | sequence | mapping -# sequence ::= SEQUENCE-START node* SEQUENCE-END -# mapping ::= MAPPING-START (node node)* MAPPING-END - -__all__ = ['Emitter', 'EmitterError'] - -from .error import YAMLError -from .events import * - -class EmitterError(YAMLError): - pass - -class ScalarAnalysis: - def __init__(self, scalar, empty, multiline, - allow_flow_plain, allow_block_plain, - allow_single_quoted, allow_double_quoted, - allow_block): - self.scalar = scalar - self.empty = empty - self.multiline = multiline - self.allow_flow_plain = allow_flow_plain - self.allow_block_plain = allow_block_plain - self.allow_single_quoted = allow_single_quoted - self.allow_double_quoted = allow_double_quoted - self.allow_block = allow_block - -class Emitter: - - DEFAULT_TAG_PREFIXES = { - '!' : '!', - 'tag:yaml.org,2002:' : '!!', - } - - def __init__(self, stream, canonical=None, indent=None, width=None, - allow_unicode=None, line_break=None): - - # The stream should have the methods `write` and possibly `flush`. - self.stream = stream - - # Encoding can be overriden by STREAM-START. - self.encoding = None - - # Emitter is a state machine with a stack of states to handle nested - # structures. - self.states = [] - self.state = self.expect_stream_start - - # Current event and the event queue. - self.events = [] - self.event = None - - # The current indentation level and the stack of previous indents. - self.indents = [] - self.indent = None - - # Flow level. - self.flow_level = 0 - - # Contexts. - self.root_context = False - self.sequence_context = False - self.mapping_context = False - self.simple_key_context = False - - # Characteristics of the last emitted character: - # - current position. - # - is it a whitespace? - # - is it an indention character - # (indentation space, '-', '?', or ':')? - self.line = 0 - self.column = 0 - self.whitespace = True - self.indention = True - - # Whether the document requires an explicit document indicator - self.open_ended = False - - # Formatting details. - self.canonical = canonical - self.allow_unicode = allow_unicode - self.best_indent = 2 - if indent and 1 < indent < 10: - self.best_indent = indent - self.best_width = 80 - if width and width > self.best_indent*2: - self.best_width = width - self.best_line_break = '\n' - if line_break in ['\r', '\n', '\r\n']: - self.best_line_break = line_break - - # Tag prefixes. - self.tag_prefixes = None - - # Prepared anchor and tag. - self.prepared_anchor = None - self.prepared_tag = None - - # Scalar analysis and style. - self.analysis = None - self.style = None - - def dispose(self): - # Reset the state attributes (to clear self-references) - self.states = [] - self.state = None - - def emit(self, event): - self.events.append(event) - while not self.need_more_events(): - self.event = self.events.pop(0) - self.state() - self.event = None - - # In some cases, we wait for a few next events before emitting. - - def need_more_events(self): - if not self.events: - return True - event = self.events[0] - if isinstance(event, DocumentStartEvent): - return self.need_events(1) - elif isinstance(event, SequenceStartEvent): - return self.need_events(2) - elif isinstance(event, MappingStartEvent): - return self.need_events(3) - else: - return False - - def need_events(self, count): - level = 0 - for event in self.events[1:]: - if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): - level += 1 - elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): - level -= 1 - elif isinstance(event, StreamEndEvent): - level = -1 - if level < 0: - return False - return (len(self.events) < count+1) - - def increase_indent(self, flow=False, indentless=False): - self.indents.append(self.indent) - if self.indent is None: - if flow: - self.indent = self.best_indent - else: - self.indent = 0 - elif not indentless: - self.indent += self.best_indent - - # States. - - # Stream handlers. - - def expect_stream_start(self): - if isinstance(self.event, StreamStartEvent): - if self.event.encoding and not hasattr(self.stream, 'encoding'): - self.encoding = self.event.encoding - self.write_stream_start() - self.state = self.expect_first_document_start - else: - raise EmitterError("expected StreamStartEvent, but got %s" - % self.event) - - def expect_nothing(self): - raise EmitterError("expected nothing, but got %s" % self.event) - - # Document handlers. - - def expect_first_document_start(self): - return self.expect_document_start(first=True) - - def expect_document_start(self, first=False): - if isinstance(self.event, DocumentStartEvent): - if (self.event.version or self.event.tags) and self.open_ended: - self.write_indicator('...', True) - self.write_indent() - if self.event.version: - version_text = self.prepare_version(self.event.version) - self.write_version_directive(version_text) - self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() - if self.event.tags: - handles = sorted(self.event.tags.keys()) - for handle in handles: - prefix = self.event.tags[handle] - self.tag_prefixes[prefix] = handle - handle_text = self.prepare_tag_handle(handle) - prefix_text = self.prepare_tag_prefix(prefix) - self.write_tag_directive(handle_text, prefix_text) - implicit = (first and not self.event.explicit and not self.canonical - and not self.event.version and not self.event.tags - and not self.check_empty_document()) - if not implicit: - self.write_indent() - self.write_indicator('---', True) - if self.canonical: - self.write_indent() - self.state = self.expect_document_root - elif isinstance(self.event, StreamEndEvent): - if self.open_ended: - self.write_indicator('...', True) - self.write_indent() - self.write_stream_end() - self.state = self.expect_nothing - else: - raise EmitterError("expected DocumentStartEvent, but got %s" - % self.event) - - def expect_document_end(self): - if isinstance(self.event, DocumentEndEvent): - self.write_indent() - if self.event.explicit: - self.write_indicator('...', True) - self.write_indent() - self.flush_stream() - self.state = self.expect_document_start - else: - raise EmitterError("expected DocumentEndEvent, but got %s" - % self.event) - - def expect_document_root(self): - self.states.append(self.expect_document_end) - self.expect_node(root=True) - - # Node handlers. - - def expect_node(self, root=False, sequence=False, mapping=False, - simple_key=False): - self.root_context = root - self.sequence_context = sequence - self.mapping_context = mapping - self.simple_key_context = simple_key - if isinstance(self.event, AliasEvent): - self.expect_alias() - elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): - self.process_anchor('&') - self.process_tag() - if isinstance(self.event, ScalarEvent): - self.expect_scalar() - elif isinstance(self.event, SequenceStartEvent): - if self.flow_level or self.canonical or self.event.flow_style \ - or self.check_empty_sequence(): - self.expect_flow_sequence() - else: - self.expect_block_sequence() - elif isinstance(self.event, MappingStartEvent): - if self.flow_level or self.canonical or self.event.flow_style \ - or self.check_empty_mapping(): - self.expect_flow_mapping() - else: - self.expect_block_mapping() - else: - raise EmitterError("expected NodeEvent, but got %s" % self.event) - - def expect_alias(self): - if self.event.anchor is None: - raise EmitterError("anchor is not specified for alias") - self.process_anchor('*') - self.state = self.states.pop() - - def expect_scalar(self): - self.increase_indent(flow=True) - self.process_scalar() - self.indent = self.indents.pop() - self.state = self.states.pop() - - # Flow sequence handlers. - - def expect_flow_sequence(self): - self.write_indicator('[', True, whitespace=True) - self.flow_level += 1 - self.increase_indent(flow=True) - self.state = self.expect_first_flow_sequence_item - - def expect_first_flow_sequence_item(self): - if isinstance(self.event, SequenceEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - self.write_indicator(']', False) - self.state = self.states.pop() - else: - if self.canonical or self.column > self.best_width: - self.write_indent() - self.states.append(self.expect_flow_sequence_item) - self.expect_node(sequence=True) - - def expect_flow_sequence_item(self): - if isinstance(self.event, SequenceEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - if self.canonical: - self.write_indicator(',', False) - self.write_indent() - self.write_indicator(']', False) - self.state = self.states.pop() - else: - self.write_indicator(',', False) - if self.canonical or self.column > self.best_width: - self.write_indent() - self.states.append(self.expect_flow_sequence_item) - self.expect_node(sequence=True) - - # Flow mapping handlers. - - def expect_flow_mapping(self): - self.write_indicator('{', True, whitespace=True) - self.flow_level += 1 - self.increase_indent(flow=True) - self.state = self.expect_first_flow_mapping_key - - def expect_first_flow_mapping_key(self): - if isinstance(self.event, MappingEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - self.write_indicator('}', False) - self.state = self.states.pop() - else: - if self.canonical or self.column > self.best_width: - self.write_indent() - if not self.canonical and self.check_simple_key(): - self.states.append(self.expect_flow_mapping_simple_value) - self.expect_node(mapping=True, simple_key=True) - else: - self.write_indicator('?', True) - self.states.append(self.expect_flow_mapping_value) - self.expect_node(mapping=True) - - def expect_flow_mapping_key(self): - if isinstance(self.event, MappingEndEvent): - self.indent = self.indents.pop() - self.flow_level -= 1 - if self.canonical: - self.write_indicator(',', False) - self.write_indent() - self.write_indicator('}', False) - self.state = self.states.pop() - else: - self.write_indicator(',', False) - if self.canonical or self.column > self.best_width: - self.write_indent() - if not self.canonical and self.check_simple_key(): - self.states.append(self.expect_flow_mapping_simple_value) - self.expect_node(mapping=True, simple_key=True) - else: - self.write_indicator('?', True) - self.states.append(self.expect_flow_mapping_value) - self.expect_node(mapping=True) - - def expect_flow_mapping_simple_value(self): - self.write_indicator(':', False) - self.states.append(self.expect_flow_mapping_key) - self.expect_node(mapping=True) - - def expect_flow_mapping_value(self): - if self.canonical or self.column > self.best_width: - self.write_indent() - self.write_indicator(':', True) - self.states.append(self.expect_flow_mapping_key) - self.expect_node(mapping=True) - - # Block sequence handlers. - - def expect_block_sequence(self): - indentless = (self.mapping_context and not self.indention) - self.increase_indent(flow=False, indentless=indentless) - self.state = self.expect_first_block_sequence_item - - def expect_first_block_sequence_item(self): - return self.expect_block_sequence_item(first=True) - - def expect_block_sequence_item(self, first=False): - if not first and isinstance(self.event, SequenceEndEvent): - self.indent = self.indents.pop() - self.state = self.states.pop() - else: - self.write_indent() - self.write_indicator('-', True, indention=True) - self.states.append(self.expect_block_sequence_item) - self.expect_node(sequence=True) - - # Block mapping handlers. - - def expect_block_mapping(self): - self.increase_indent(flow=False) - self.state = self.expect_first_block_mapping_key - - def expect_first_block_mapping_key(self): - return self.expect_block_mapping_key(first=True) - - def expect_block_mapping_key(self, first=False): - if not first and isinstance(self.event, MappingEndEvent): - self.indent = self.indents.pop() - self.state = self.states.pop() - else: - self.write_indent() - if self.check_simple_key(): - self.states.append(self.expect_block_mapping_simple_value) - self.expect_node(mapping=True, simple_key=True) - else: - self.write_indicator('?', True, indention=True) - self.states.append(self.expect_block_mapping_value) - self.expect_node(mapping=True) - - def expect_block_mapping_simple_value(self): - self.write_indicator(':', False) - self.states.append(self.expect_block_mapping_key) - self.expect_node(mapping=True) - - def expect_block_mapping_value(self): - self.write_indent() - self.write_indicator(':', True, indention=True) - self.states.append(self.expect_block_mapping_key) - self.expect_node(mapping=True) - - # Checkers. - - def check_empty_sequence(self): - return (isinstance(self.event, SequenceStartEvent) and self.events - and isinstance(self.events[0], SequenceEndEvent)) - - def check_empty_mapping(self): - return (isinstance(self.event, MappingStartEvent) and self.events - and isinstance(self.events[0], MappingEndEvent)) - - def check_empty_document(self): - if not isinstance(self.event, DocumentStartEvent) or not self.events: - return False - event = self.events[0] - return (isinstance(event, ScalarEvent) and event.anchor is None - and event.tag is None and event.implicit and event.value == '') - - def check_simple_key(self): - length = 0 - if isinstance(self.event, NodeEvent) and self.event.anchor is not None: - if self.prepared_anchor is None: - self.prepared_anchor = self.prepare_anchor(self.event.anchor) - length += len(self.prepared_anchor) - if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ - and self.event.tag is not None: - if self.prepared_tag is None: - self.prepared_tag = self.prepare_tag(self.event.tag) - length += len(self.prepared_tag) - if isinstance(self.event, ScalarEvent): - if self.analysis is None: - self.analysis = self.analyze_scalar(self.event.value) - length += len(self.analysis.scalar) - return (length < 128 and (isinstance(self.event, AliasEvent) - or (isinstance(self.event, ScalarEvent) - and not self.analysis.empty and not self.analysis.multiline) - or self.check_empty_sequence() or self.check_empty_mapping())) - - # Anchor, Tag, and Scalar processors. - - def process_anchor(self, indicator): - if self.event.anchor is None: - self.prepared_anchor = None - return - if self.prepared_anchor is None: - self.prepared_anchor = self.prepare_anchor(self.event.anchor) - if self.prepared_anchor: - self.write_indicator(indicator+self.prepared_anchor, True) - self.prepared_anchor = None - - def process_tag(self): - tag = self.event.tag - if isinstance(self.event, ScalarEvent): - if self.style is None: - self.style = self.choose_scalar_style() - if ((not self.canonical or tag is None) and - ((self.style == '' and self.event.implicit[0]) - or (self.style != '' and self.event.implicit[1]))): - self.prepared_tag = None - return - if self.event.implicit[0] and tag is None: - tag = '!' - self.prepared_tag = None - else: - if (not self.canonical or tag is None) and self.event.implicit: - self.prepared_tag = None - return - if tag is None: - raise EmitterError("tag is not specified") - if self.prepared_tag is None: - self.prepared_tag = self.prepare_tag(tag) - if self.prepared_tag: - self.write_indicator(self.prepared_tag, True) - self.prepared_tag = None - - def choose_scalar_style(self): - if self.analysis is None: - self.analysis = self.analyze_scalar(self.event.value) - if self.event.style == '"' or self.canonical: - return '"' - if not self.event.style and self.event.implicit[0]: - if (not (self.simple_key_context and - (self.analysis.empty or self.analysis.multiline)) - and (self.flow_level and self.analysis.allow_flow_plain - or (not self.flow_level and self.analysis.allow_block_plain))): - return '' - if self.event.style and self.event.style in '|>': - if (not self.flow_level and not self.simple_key_context - and self.analysis.allow_block): - return self.event.style - if not self.event.style or self.event.style == '\'': - if (self.analysis.allow_single_quoted and - not (self.simple_key_context and self.analysis.multiline)): - return '\'' - return '"' - - def process_scalar(self): - if self.analysis is None: - self.analysis = self.analyze_scalar(self.event.value) - if self.style is None: - self.style = self.choose_scalar_style() - split = (not self.simple_key_context) - #if self.analysis.multiline and split \ - # and (not self.style or self.style in '\'\"'): - # self.write_indent() - if self.style == '"': - self.write_double_quoted(self.analysis.scalar, split) - elif self.style == '\'': - self.write_single_quoted(self.analysis.scalar, split) - elif self.style == '>': - self.write_folded(self.analysis.scalar) - elif self.style == '|': - self.write_literal(self.analysis.scalar) - else: - self.write_plain(self.analysis.scalar, split) - self.analysis = None - self.style = None - - # Analyzers. - - def prepare_version(self, version): - major, minor = version - if major != 1: - raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) - return '%d.%d' % (major, minor) - - def prepare_tag_handle(self, handle): - if not handle: - raise EmitterError("tag handle must not be empty") - if handle[0] != '!' or handle[-1] != '!': - raise EmitterError("tag handle must start and end with '!': %r" % handle) - for ch in handle[1:-1]: - if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-_'): - raise EmitterError("invalid character %r in the tag handle: %r" - % (ch, handle)) - return handle - - def prepare_tag_prefix(self, prefix): - if not prefix: - raise EmitterError("tag prefix must not be empty") - chunks = [] - start = end = 0 - if prefix[0] == '!': - end = 1 - while end < len(prefix): - ch = prefix[end] - if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-;/?!:@&=+$,_.~*\'()[]': - end += 1 - else: - if start < end: - chunks.append(prefix[start:end]) - start = end = end+1 - data = ch.encode('utf-8') - for ch in data: - chunks.append('%%%02X' % ord(ch)) - if start < end: - chunks.append(prefix[start:end]) - return ''.join(chunks) - - def prepare_tag(self, tag): - if not tag: - raise EmitterError("tag must not be empty") - if tag == '!': - return tag - handle = None - suffix = tag - prefixes = sorted(self.tag_prefixes.keys()) - for prefix in prefixes: - if tag.startswith(prefix) \ - and (prefix == '!' or len(prefix) < len(tag)): - handle = self.tag_prefixes[prefix] - suffix = tag[len(prefix):] - chunks = [] - start = end = 0 - while end < len(suffix): - ch = suffix[end] - if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-;/?:@&=+$,_.~*\'()[]' \ - or (ch == '!' and handle != '!'): - end += 1 - else: - if start < end: - chunks.append(suffix[start:end]) - start = end = end+1 - data = ch.encode('utf-8') - for ch in data: - chunks.append('%%%02X' % ord(ch)) - if start < end: - chunks.append(suffix[start:end]) - suffix_text = ''.join(chunks) - if handle: - return '%s%s' % (handle, suffix_text) - else: - return '!<%s>' % suffix_text - - def prepare_anchor(self, anchor): - if not anchor: - raise EmitterError("anchor must not be empty") - for ch in anchor: - if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-_'): - raise EmitterError("invalid character %r in the anchor: %r" - % (ch, anchor)) - return anchor - - def analyze_scalar(self, scalar): - - # Empty scalar is a special case. - if not scalar: - return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, - allow_flow_plain=False, allow_block_plain=True, - allow_single_quoted=True, allow_double_quoted=True, - allow_block=False) - - # Indicators and special characters. - block_indicators = False - flow_indicators = False - line_breaks = False - special_characters = False - - # Important whitespace combinations. - leading_space = False - leading_break = False - trailing_space = False - trailing_break = False - break_space = False - space_break = False - - # Check document indicators. - if scalar.startswith('---') or scalar.startswith('...'): - block_indicators = True - flow_indicators = True - - # First character or preceded by a whitespace. - preceeded_by_whitespace = True - - # Last character or followed by a whitespace. - followed_by_whitespace = (len(scalar) == 1 or - scalar[1] in '\0 \t\r\n\x85\u2028\u2029') - - # The previous character is a space. - previous_space = False - - # The previous character is a break. - previous_break = False - - index = 0 - while index < len(scalar): - ch = scalar[index] - - # Check for indicators. - if index == 0: - # Leading indicators are special characters. - if ch in '#,[]{}&*!|>\'\"%@`': - flow_indicators = True - block_indicators = True - if ch in '?:': - flow_indicators = True - if followed_by_whitespace: - block_indicators = True - if ch == '-' and followed_by_whitespace: - flow_indicators = True - block_indicators = True - else: - # Some indicators cannot appear within a scalar as well. - if ch in ',?[]{}': - flow_indicators = True - if ch == ':': - flow_indicators = True - if followed_by_whitespace: - block_indicators = True - if ch == '#' and preceeded_by_whitespace: - flow_indicators = True - block_indicators = True - - # Check for line breaks, special, and unicode characters. - if ch in '\n\x85\u2028\u2029': - line_breaks = True - if not (ch == '\n' or '\x20' <= ch <= '\x7E'): - if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF' - or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF': - unicode_characters = True - if not self.allow_unicode: - special_characters = True - else: - special_characters = True - - # Detect important whitespace combinations. - if ch == ' ': - if index == 0: - leading_space = True - if index == len(scalar)-1: - trailing_space = True - if previous_break: - break_space = True - previous_space = True - previous_break = False - elif ch in '\n\x85\u2028\u2029': - if index == 0: - leading_break = True - if index == len(scalar)-1: - trailing_break = True - if previous_space: - space_break = True - previous_space = False - previous_break = True - else: - previous_space = False - previous_break = False - - # Prepare for the next character. - index += 1 - preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029') - followed_by_whitespace = (index+1 >= len(scalar) or - scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029') - - # Let's decide what styles are allowed. - allow_flow_plain = True - allow_block_plain = True - allow_single_quoted = True - allow_double_quoted = True - allow_block = True - - # Leading and trailing whitespaces are bad for plain scalars. - if (leading_space or leading_break - or trailing_space or trailing_break): - allow_flow_plain = allow_block_plain = False - - # We do not permit trailing spaces for block scalars. - if trailing_space: - allow_block = False - - # Spaces at the beginning of a new line are only acceptable for block - # scalars. - if break_space: - allow_flow_plain = allow_block_plain = allow_single_quoted = False - - # Spaces followed by breaks, as well as special character are only - # allowed for double quoted scalars. - if space_break or special_characters: - allow_flow_plain = allow_block_plain = \ - allow_single_quoted = allow_block = False - - # Although the plain scalar writer supports breaks, we never emit - # multiline plain scalars. - if line_breaks: - allow_flow_plain = allow_block_plain = False - - # Flow indicators are forbidden for flow plain scalars. - if flow_indicators: - allow_flow_plain = False - - # Block indicators are forbidden for block plain scalars. - if block_indicators: - allow_block_plain = False - - return ScalarAnalysis(scalar=scalar, - empty=False, multiline=line_breaks, - allow_flow_plain=allow_flow_plain, - allow_block_plain=allow_block_plain, - allow_single_quoted=allow_single_quoted, - allow_double_quoted=allow_double_quoted, - allow_block=allow_block) - - # Writers. - - def flush_stream(self): - if hasattr(self.stream, 'flush'): - self.stream.flush() - - def write_stream_start(self): - # Write BOM if needed. - if self.encoding and self.encoding.startswith('utf-16'): - self.stream.write('\uFEFF'.encode(self.encoding)) - - def write_stream_end(self): - self.flush_stream() - - def write_indicator(self, indicator, need_whitespace, - whitespace=False, indention=False): - if self.whitespace or not need_whitespace: - data = indicator - else: - data = ' '+indicator - self.whitespace = whitespace - self.indention = self.indention and indention - self.column += len(data) - self.open_ended = False - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - - def write_indent(self): - indent = self.indent or 0 - if not self.indention or self.column > indent \ - or (self.column == indent and not self.whitespace): - self.write_line_break() - if self.column < indent: - self.whitespace = True - data = ' '*(indent-self.column) - self.column = indent - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - - def write_line_break(self, data=None): - if data is None: - data = self.best_line_break - self.whitespace = True - self.indention = True - self.line += 1 - self.column = 0 - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - - def write_version_directive(self, version_text): - data = '%%YAML %s' % version_text - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.write_line_break() - - def write_tag_directive(self, handle_text, prefix_text): - data = '%%TAG %s %s' % (handle_text, prefix_text) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.write_line_break() - - # Scalar streams. - - def write_single_quoted(self, text, split=True): - self.write_indicator('\'', True) - spaces = False - breaks = False - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if spaces: - if ch is None or ch != ' ': - if start+1 == end and self.column > self.best_width and split \ - and start != 0 and end != len(text): - self.write_indent() - else: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - elif breaks: - if ch is None or ch not in '\n\x85\u2028\u2029': - if text[start] == '\n': - self.write_line_break() - for br in text[start:end]: - if br == '\n': - self.write_line_break() - else: - self.write_line_break(br) - self.write_indent() - start = end - else: - if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'': - if start < end: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - if ch == '\'': - data = '\'\'' - self.column += 2 - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end + 1 - if ch is not None: - spaces = (ch == ' ') - breaks = (ch in '\n\x85\u2028\u2029') - end += 1 - self.write_indicator('\'', False) - - ESCAPE_REPLACEMENTS = { - '\0': '0', - '\x07': 'a', - '\x08': 'b', - '\x09': 't', - '\x0A': 'n', - '\x0B': 'v', - '\x0C': 'f', - '\x0D': 'r', - '\x1B': 'e', - '\"': '\"', - '\\': '\\', - '\x85': 'N', - '\xA0': '_', - '\u2028': 'L', - '\u2029': 'P', - } - - def write_double_quoted(self, text, split=True): - self.write_indicator('"', True) - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \ - or not ('\x20' <= ch <= '\x7E' - or (self.allow_unicode - and ('\xA0' <= ch <= '\uD7FF' - or '\uE000' <= ch <= '\uFFFD'))): - if start < end: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - if ch is not None: - if ch in self.ESCAPE_REPLACEMENTS: - data = '\\'+self.ESCAPE_REPLACEMENTS[ch] - elif ch <= '\xFF': - data = '\\x%02X' % ord(ch) - elif ch <= '\uFFFF': - data = '\\u%04X' % ord(ch) - else: - data = '\\U%08X' % ord(ch) - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end+1 - if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \ - and self.column+(end-start) > self.best_width and split: - data = text[start:end]+'\\' - if start < end: - start = end - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.write_indent() - self.whitespace = False - self.indention = False - if text[start] == ' ': - data = '\\' - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - end += 1 - self.write_indicator('"', False) - - def determine_block_hints(self, text): - hints = '' - if text: - if text[0] in ' \n\x85\u2028\u2029': - hints += str(self.best_indent) - if text[-1] not in '\n\x85\u2028\u2029': - hints += '-' - elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029': - hints += '+' - return hints - - def write_folded(self, text): - hints = self.determine_block_hints(text) - self.write_indicator('>'+hints, True) - if hints[-1:] == '+': - self.open_ended = True - self.write_line_break() - leading_space = True - spaces = False - breaks = True - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if breaks: - if ch is None or ch not in '\n\x85\u2028\u2029': - if not leading_space and ch is not None and ch != ' ' \ - and text[start] == '\n': - self.write_line_break() - leading_space = (ch == ' ') - for br in text[start:end]: - if br == '\n': - self.write_line_break() - else: - self.write_line_break(br) - if ch is not None: - self.write_indent() - start = end - elif spaces: - if ch != ' ': - if start+1 == end and self.column > self.best_width: - self.write_indent() - else: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - else: - if ch is None or ch in ' \n\x85\u2028\u2029': - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - if ch is None: - self.write_line_break() - start = end - if ch is not None: - breaks = (ch in '\n\x85\u2028\u2029') - spaces = (ch == ' ') - end += 1 - - def write_literal(self, text): - hints = self.determine_block_hints(text) - self.write_indicator('|'+hints, True) - if hints[-1:] == '+': - self.open_ended = True - self.write_line_break() - breaks = True - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if breaks: - if ch is None or ch not in '\n\x85\u2028\u2029': - for br in text[start:end]: - if br == '\n': - self.write_line_break() - else: - self.write_line_break(br) - if ch is not None: - self.write_indent() - start = end - else: - if ch is None or ch in '\n\x85\u2028\u2029': - data = text[start:end] - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - if ch is None: - self.write_line_break() - start = end - if ch is not None: - breaks = (ch in '\n\x85\u2028\u2029') - end += 1 - - def write_plain(self, text, split=True): - if self.root_context: - self.open_ended = True - if not text: - return - if not self.whitespace: - data = ' ' - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - self.whitespace = False - self.indention = False - spaces = False - breaks = False - start = end = 0 - while end <= len(text): - ch = None - if end < len(text): - ch = text[end] - if spaces: - if ch != ' ': - if start+1 == end and self.column > self.best_width and split: - self.write_indent() - self.whitespace = False - self.indention = False - else: - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - elif breaks: - if ch not in '\n\x85\u2028\u2029': - if text[start] == '\n': - self.write_line_break() - for br in text[start:end]: - if br == '\n': - self.write_line_break() - else: - self.write_line_break(br) - self.write_indent() - self.whitespace = False - self.indention = False - start = end - else: - if ch is None or ch in ' \n\x85\u2028\u2029': - data = text[start:end] - self.column += len(data) - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end - if ch is not None: - spaces = (ch == ' ') - breaks = (ch in '\n\x85\u2028\u2029') - end += 1 - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/error.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/error.py deleted file mode 100644 index 5fec7d449..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/error.py +++ /dev/null @@ -1,76 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] - -class Mark: - - def __init__(self, name, index, line, column, buffer, pointer): - self.name = name - self.index = index - self.line = line - self.column = column - self.buffer = buffer - self.pointer = pointer - - def get_snippet(self, indent=4, max_length=75): - if self.buffer is None: - return None - head = '' - start = self.pointer - while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029': - start -= 1 - if self.pointer-start > max_length/2-1: - head = ' ... ' - start += 5 - break - tail = '' - end = self.pointer - while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029': - end += 1 - if end-self.pointer > max_length/2-1: - tail = ' ... ' - end -= 5 - break - snippet = self.buffer[start:end] - return ' '*indent + head + snippet + tail + '\n' \ - + ' '*(indent+self.pointer-start+len(head)) + '^' - - def __str__(self): - snippet = self.get_snippet() - where = " in \"%s\", line %d, column %d" \ - % (self.name, self.line+1, self.column+1) - if snippet is not None: - where += ":\n"+snippet - return where - -class YAMLError(Exception): - pass - -class MarkedYAMLError(YAMLError): - - def __init__(self, context=None, context_mark=None, - problem=None, problem_mark=None, note=None): - self.context = context - self.context_mark = context_mark - self.problem = problem - self.problem_mark = problem_mark - self.note = note - - def __str__(self): - lines = [] - if self.context is not None: - lines.append(self.context) - if self.context_mark is not None \ - and (self.problem is None or self.problem_mark is None - or self.context_mark.name != self.problem_mark.name - or self.context_mark.line != self.problem_mark.line - or self.context_mark.column != self.problem_mark.column): - lines.append(str(self.context_mark)) - if self.problem is not None: - lines.append(self.problem) - if self.problem_mark is not None: - lines.append(str(self.problem_mark)) - if self.note is not None: - lines.append(self.note) - return '\n'.join(lines) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/events.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/events.py deleted file mode 100644 index 283452add..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/events.py +++ /dev/null @@ -1,87 +0,0 @@ -# SPDX-License-Identifier: MIT - -# Abstract classes. - -class Event(object): - def __init__(self, start_mark=None, end_mark=None): - self.start_mark = start_mark - self.end_mark = end_mark - def __repr__(self): - attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] - if hasattr(self, key)] - arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) - for key in attributes]) - return '%s(%s)' % (self.__class__.__name__, arguments) - -class NodeEvent(Event): - def __init__(self, anchor, start_mark=None, end_mark=None): - self.anchor = anchor - self.start_mark = start_mark - self.end_mark = end_mark - -class CollectionStartEvent(NodeEvent): - def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, - flow_style=None): - self.anchor = anchor - self.tag = tag - self.implicit = implicit - self.start_mark = start_mark - self.end_mark = end_mark - self.flow_style = flow_style - -class CollectionEndEvent(Event): - pass - -# Implementations. - -class StreamStartEvent(Event): - def __init__(self, start_mark=None, end_mark=None, encoding=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.encoding = encoding - -class StreamEndEvent(Event): - pass - -class DocumentStartEvent(Event): - def __init__(self, start_mark=None, end_mark=None, - explicit=None, version=None, tags=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.explicit = explicit - self.version = version - self.tags = tags - -class DocumentEndEvent(Event): - def __init__(self, start_mark=None, end_mark=None, - explicit=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.explicit = explicit - -class AliasEvent(NodeEvent): - pass - -class ScalarEvent(NodeEvent): - def __init__(self, anchor, tag, implicit, value, - start_mark=None, end_mark=None, style=None): - self.anchor = anchor - self.tag = tag - self.implicit = implicit - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - -class SequenceStartEvent(CollectionStartEvent): - pass - -class SequenceEndEvent(CollectionEndEvent): - pass - -class MappingStartEvent(CollectionStartEvent): - pass - -class MappingEndEvent(CollectionEndEvent): - pass - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/loader.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/loader.py deleted file mode 100644 index 7ef6cf815..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/loader.py +++ /dev/null @@ -1,41 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] - -from .reader import * -from .scanner import * -from .parser import * -from .composer import * -from .constructor import * -from .resolver import * - -class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - BaseConstructor.__init__(self) - BaseResolver.__init__(self) - -class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - SafeConstructor.__init__(self) - Resolver.__init__(self) - -class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - Constructor.__init__(self) - Resolver.__init__(self) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py deleted file mode 100644 index ed2a1b43e..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py +++ /dev/null @@ -1,50 +0,0 @@ -# SPDX-License-Identifier: MIT - -class Node(object): - def __init__(self, tag, value, start_mark, end_mark): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - def __repr__(self): - value = self.value - #if isinstance(value, list): - # if len(value) == 0: - # value = '' - # elif len(value) == 1: - # value = '<1 item>' - # else: - # value = '<%d items>' % len(value) - #else: - # if len(value) > 75: - # value = repr(value[:70]+u' ... ') - # else: - # value = repr(value) - value = repr(value) - return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) - -class ScalarNode(Node): - id = 'scalar' - def __init__(self, tag, value, - start_mark=None, end_mark=None, style=None): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - -class CollectionNode(Node): - def __init__(self, tag, value, - start_mark=None, end_mark=None, flow_style=None): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.flow_style = flow_style - -class SequenceNode(CollectionNode): - id = 'sequence' - -class MappingNode(CollectionNode): - id = 'mapping' - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/parser.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/parser.py deleted file mode 100644 index bcec7f994..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/parser.py +++ /dev/null @@ -1,590 +0,0 @@ -# SPDX-License-Identifier: MIT - -# The following YAML grammar is LL(1) and is parsed by a recursive descent -# parser. -# -# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -# implicit_document ::= block_node DOCUMENT-END* -# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -# block_node_or_indentless_sequence ::= -# ALIAS -# | properties (block_content | indentless_block_sequence)? -# | block_content -# | indentless_block_sequence -# block_node ::= ALIAS -# | properties block_content? -# | block_content -# flow_node ::= ALIAS -# | properties flow_content? -# | flow_content -# properties ::= TAG ANCHOR? | ANCHOR TAG? -# block_content ::= block_collection | flow_collection | SCALAR -# flow_content ::= flow_collection | SCALAR -# block_collection ::= block_sequence | block_mapping -# flow_collection ::= flow_sequence | flow_mapping -# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -# block_mapping ::= BLOCK-MAPPING_START -# ((KEY block_node_or_indentless_sequence?)? -# (VALUE block_node_or_indentless_sequence?)?)* -# BLOCK-END -# flow_sequence ::= FLOW-SEQUENCE-START -# (flow_sequence_entry FLOW-ENTRY)* -# flow_sequence_entry? -# FLOW-SEQUENCE-END -# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -# flow_mapping ::= FLOW-MAPPING-START -# (flow_mapping_entry FLOW-ENTRY)* -# flow_mapping_entry? -# FLOW-MAPPING-END -# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -# -# FIRST sets: -# -# stream: { STREAM-START } -# explicit_document: { DIRECTIVE DOCUMENT-START } -# implicit_document: FIRST(block_node) -# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } -# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } -# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } -# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } -# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } -# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } -# block_sequence: { BLOCK-SEQUENCE-START } -# block_mapping: { BLOCK-MAPPING-START } -# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } -# indentless_sequence: { ENTRY } -# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } -# flow_sequence: { FLOW-SEQUENCE-START } -# flow_mapping: { FLOW-MAPPING-START } -# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } -# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } - -__all__ = ['Parser', 'ParserError'] - -from .error import MarkedYAMLError -from .tokens import * -from .events import * -from .scanner import * - -class ParserError(MarkedYAMLError): - pass - -class Parser: - # Since writing a recursive-descendant parser is a straightforward task, we - # do not give many comments here. - - DEFAULT_TAGS = { - '!': '!', - '!!': 'tag:yaml.org,2002:', - } - - def __init__(self): - self.current_event = None - self.yaml_version = None - self.tag_handles = {} - self.states = [] - self.marks = [] - self.state = self.parse_stream_start - - def dispose(self): - # Reset the state attributes (to clear self-references) - self.states = [] - self.state = None - - def check_event(self, *choices): - # Check the type of the next event. - if self.current_event is None: - if self.state: - self.current_event = self.state() - if self.current_event is not None: - if not choices: - return True - for choice in choices: - if isinstance(self.current_event, choice): - return True - return False - - def peek_event(self): - # Get the next event. - if self.current_event is None: - if self.state: - self.current_event = self.state() - return self.current_event - - def get_event(self): - # Get the next event and proceed further. - if self.current_event is None: - if self.state: - self.current_event = self.state() - value = self.current_event - self.current_event = None - return value - - # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END - # implicit_document ::= block_node DOCUMENT-END* - # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - - def parse_stream_start(self): - - # Parse the stream start. - token = self.get_token() - event = StreamStartEvent(token.start_mark, token.end_mark, - encoding=token.encoding) - - # Prepare the next state. - self.state = self.parse_implicit_document_start - - return event - - def parse_implicit_document_start(self): - - # Parse an implicit document. - if not self.check_token(DirectiveToken, DocumentStartToken, - StreamEndToken): - self.tag_handles = self.DEFAULT_TAGS - token = self.peek_token() - start_mark = end_mark = token.start_mark - event = DocumentStartEvent(start_mark, end_mark, - explicit=False) - - # Prepare the next state. - self.states.append(self.parse_document_end) - self.state = self.parse_block_node - - return event - - else: - return self.parse_document_start() - - def parse_document_start(self): - - # Parse any extra document end indicators. - while self.check_token(DocumentEndToken): - self.get_token() - - # Parse an explicit document. - if not self.check_token(StreamEndToken): - token = self.peek_token() - start_mark = token.start_mark - version, tags = self.process_directives() - if not self.check_token(DocumentStartToken): - raise ParserError(None, None, - "expected '', but found %r" - % self.peek_token().id, - self.peek_token().start_mark) - token = self.get_token() - end_mark = token.end_mark - event = DocumentStartEvent(start_mark, end_mark, - explicit=True, version=version, tags=tags) - self.states.append(self.parse_document_end) - self.state = self.parse_document_content - else: - # Parse the end of the stream. - token = self.get_token() - event = StreamEndEvent(token.start_mark, token.end_mark) - assert not self.states - assert not self.marks - self.state = None - return event - - def parse_document_end(self): - - # Parse the document end. - token = self.peek_token() - start_mark = end_mark = token.start_mark - explicit = False - if self.check_token(DocumentEndToken): - token = self.get_token() - end_mark = token.end_mark - explicit = True - event = DocumentEndEvent(start_mark, end_mark, - explicit=explicit) - - # Prepare the next state. - self.state = self.parse_document_start - - return event - - def parse_document_content(self): - if self.check_token(DirectiveToken, - DocumentStartToken, DocumentEndToken, StreamEndToken): - event = self.process_empty_scalar(self.peek_token().start_mark) - self.state = self.states.pop() - return event - else: - return self.parse_block_node() - - def process_directives(self): - self.yaml_version = None - self.tag_handles = {} - while self.check_token(DirectiveToken): - token = self.get_token() - if token.name == 'YAML': - if self.yaml_version is not None: - raise ParserError(None, None, - "found duplicate YAML directive", token.start_mark) - major, minor = token.value - if major != 1: - raise ParserError(None, None, - "found incompatible YAML document (version 1.* is required)", - token.start_mark) - self.yaml_version = token.value - elif token.name == 'TAG': - handle, prefix = token.value - if handle in self.tag_handles: - raise ParserError(None, None, - "duplicate tag handle %r" % handle, - token.start_mark) - self.tag_handles[handle] = prefix - if self.tag_handles: - value = self.yaml_version, self.tag_handles.copy() - else: - value = self.yaml_version, None - for key in self.DEFAULT_TAGS: - if key not in self.tag_handles: - self.tag_handles[key] = self.DEFAULT_TAGS[key] - return value - - # block_node_or_indentless_sequence ::= ALIAS - # | properties (block_content | indentless_block_sequence)? - # | block_content - # | indentless_block_sequence - # block_node ::= ALIAS - # | properties block_content? - # | block_content - # flow_node ::= ALIAS - # | properties flow_content? - # | flow_content - # properties ::= TAG ANCHOR? | ANCHOR TAG? - # block_content ::= block_collection | flow_collection | SCALAR - # flow_content ::= flow_collection | SCALAR - # block_collection ::= block_sequence | block_mapping - # flow_collection ::= flow_sequence | flow_mapping - - def parse_block_node(self): - return self.parse_node(block=True) - - def parse_flow_node(self): - return self.parse_node() - - def parse_block_node_or_indentless_sequence(self): - return self.parse_node(block=True, indentless_sequence=True) - - def parse_node(self, block=False, indentless_sequence=False): - if self.check_token(AliasToken): - token = self.get_token() - event = AliasEvent(token.value, token.start_mark, token.end_mark) - self.state = self.states.pop() - else: - anchor = None - tag = None - start_mark = end_mark = tag_mark = None - if self.check_token(AnchorToken): - token = self.get_token() - start_mark = token.start_mark - end_mark = token.end_mark - anchor = token.value - if self.check_token(TagToken): - token = self.get_token() - tag_mark = token.start_mark - end_mark = token.end_mark - tag = token.value - elif self.check_token(TagToken): - token = self.get_token() - start_mark = tag_mark = token.start_mark - end_mark = token.end_mark - tag = token.value - if self.check_token(AnchorToken): - token = self.get_token() - end_mark = token.end_mark - anchor = token.value - if tag is not None: - handle, suffix = tag - if handle is not None: - if handle not in self.tag_handles: - raise ParserError("while parsing a node", start_mark, - "found undefined tag handle %r" % handle, - tag_mark) - tag = self.tag_handles[handle]+suffix - else: - tag = suffix - #if tag == '!': - # raise ParserError("while parsing a node", start_mark, - # "found non-specific tag '!'", tag_mark, - # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") - if start_mark is None: - start_mark = end_mark = self.peek_token().start_mark - event = None - implicit = (tag is None or tag == '!') - if indentless_sequence and self.check_token(BlockEntryToken): - end_mark = self.peek_token().end_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark) - self.state = self.parse_indentless_sequence_entry - else: - if self.check_token(ScalarToken): - token = self.get_token() - end_mark = token.end_mark - if (token.plain and tag is None) or tag == '!': - implicit = (True, False) - elif tag is None: - implicit = (False, True) - else: - implicit = (False, False) - event = ScalarEvent(anchor, tag, implicit, token.value, - start_mark, end_mark, style=token.style) - self.state = self.states.pop() - elif self.check_token(FlowSequenceStartToken): - end_mark = self.peek_token().end_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=True) - self.state = self.parse_flow_sequence_first_entry - elif self.check_token(FlowMappingStartToken): - end_mark = self.peek_token().end_mark - event = MappingStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=True) - self.state = self.parse_flow_mapping_first_key - elif block and self.check_token(BlockSequenceStartToken): - end_mark = self.peek_token().start_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=False) - self.state = self.parse_block_sequence_first_entry - elif block and self.check_token(BlockMappingStartToken): - end_mark = self.peek_token().start_mark - event = MappingStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=False) - self.state = self.parse_block_mapping_first_key - elif anchor is not None or tag is not None: - # Empty scalars are allowed even if a tag or an anchor is - # specified. - event = ScalarEvent(anchor, tag, (implicit, False), '', - start_mark, end_mark) - self.state = self.states.pop() - else: - if block: - node = 'block' - else: - node = 'flow' - token = self.peek_token() - raise ParserError("while parsing a %s node" % node, start_mark, - "expected the node content, but found %r" % token.id, - token.start_mark) - return event - - # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END - - def parse_block_sequence_first_entry(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_block_sequence_entry() - - def parse_block_sequence_entry(self): - if self.check_token(BlockEntryToken): - token = self.get_token() - if not self.check_token(BlockEntryToken, BlockEndToken): - self.states.append(self.parse_block_sequence_entry) - return self.parse_block_node() - else: - self.state = self.parse_block_sequence_entry - return self.process_empty_scalar(token.end_mark) - if not self.check_token(BlockEndToken): - token = self.peek_token() - raise ParserError("while parsing a block collection", self.marks[-1], - "expected , but found %r" % token.id, token.start_mark) - token = self.get_token() - event = SequenceEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ - - def parse_indentless_sequence_entry(self): - if self.check_token(BlockEntryToken): - token = self.get_token() - if not self.check_token(BlockEntryToken, - KeyToken, ValueToken, BlockEndToken): - self.states.append(self.parse_indentless_sequence_entry) - return self.parse_block_node() - else: - self.state = self.parse_indentless_sequence_entry - return self.process_empty_scalar(token.end_mark) - token = self.peek_token() - event = SequenceEndEvent(token.start_mark, token.start_mark) - self.state = self.states.pop() - return event - - # block_mapping ::= BLOCK-MAPPING_START - # ((KEY block_node_or_indentless_sequence?)? - # (VALUE block_node_or_indentless_sequence?)?)* - # BLOCK-END - - def parse_block_mapping_first_key(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_block_mapping_key() - - def parse_block_mapping_key(self): - if self.check_token(KeyToken): - token = self.get_token() - if not self.check_token(KeyToken, ValueToken, BlockEndToken): - self.states.append(self.parse_block_mapping_value) - return self.parse_block_node_or_indentless_sequence() - else: - self.state = self.parse_block_mapping_value - return self.process_empty_scalar(token.end_mark) - if not self.check_token(BlockEndToken): - token = self.peek_token() - raise ParserError("while parsing a block mapping", self.marks[-1], - "expected , but found %r" % token.id, token.start_mark) - token = self.get_token() - event = MappingEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_block_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(KeyToken, ValueToken, BlockEndToken): - self.states.append(self.parse_block_mapping_key) - return self.parse_block_node_or_indentless_sequence() - else: - self.state = self.parse_block_mapping_key - return self.process_empty_scalar(token.end_mark) - else: - self.state = self.parse_block_mapping_key - token = self.peek_token() - return self.process_empty_scalar(token.start_mark) - - # flow_sequence ::= FLOW-SEQUENCE-START - # (flow_sequence_entry FLOW-ENTRY)* - # flow_sequence_entry? - # FLOW-SEQUENCE-END - # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - # - # Note that while production rules for both flow_sequence_entry and - # flow_mapping_entry are equal, their interpretations are different. - # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` - # generate an inline mapping (set syntax). - - def parse_flow_sequence_first_entry(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_flow_sequence_entry(first=True) - - def parse_flow_sequence_entry(self, first=False): - if not self.check_token(FlowSequenceEndToken): - if not first: - if self.check_token(FlowEntryToken): - self.get_token() - else: - token = self.peek_token() - raise ParserError("while parsing a flow sequence", self.marks[-1], - "expected ',' or ']', but got %r" % token.id, token.start_mark) - - if self.check_token(KeyToken): - token = self.peek_token() - event = MappingStartEvent(None, None, True, - token.start_mark, token.end_mark, - flow_style=True) - self.state = self.parse_flow_sequence_entry_mapping_key - return event - elif not self.check_token(FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry) - return self.parse_flow_node() - token = self.get_token() - event = SequenceEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_flow_sequence_entry_mapping_key(self): - token = self.get_token() - if not self.check_token(ValueToken, - FlowEntryToken, FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry_mapping_value) - return self.parse_flow_node() - else: - self.state = self.parse_flow_sequence_entry_mapping_value - return self.process_empty_scalar(token.end_mark) - - def parse_flow_sequence_entry_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(FlowEntryToken, FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry_mapping_end) - return self.parse_flow_node() - else: - self.state = self.parse_flow_sequence_entry_mapping_end - return self.process_empty_scalar(token.end_mark) - else: - self.state = self.parse_flow_sequence_entry_mapping_end - token = self.peek_token() - return self.process_empty_scalar(token.start_mark) - - def parse_flow_sequence_entry_mapping_end(self): - self.state = self.parse_flow_sequence_entry - token = self.peek_token() - return MappingEndEvent(token.start_mark, token.start_mark) - - # flow_mapping ::= FLOW-MAPPING-START - # (flow_mapping_entry FLOW-ENTRY)* - # flow_mapping_entry? - # FLOW-MAPPING-END - # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - - def parse_flow_mapping_first_key(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_flow_mapping_key(first=True) - - def parse_flow_mapping_key(self, first=False): - if not self.check_token(FlowMappingEndToken): - if not first: - if self.check_token(FlowEntryToken): - self.get_token() - else: - token = self.peek_token() - raise ParserError("while parsing a flow mapping", self.marks[-1], - "expected ',' or '}', but got %r" % token.id, token.start_mark) - if self.check_token(KeyToken): - token = self.get_token() - if not self.check_token(ValueToken, - FlowEntryToken, FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_value) - return self.parse_flow_node() - else: - self.state = self.parse_flow_mapping_value - return self.process_empty_scalar(token.end_mark) - elif not self.check_token(FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_empty_value) - return self.parse_flow_node() - token = self.get_token() - event = MappingEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_flow_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(FlowEntryToken, FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_key) - return self.parse_flow_node() - else: - self.state = self.parse_flow_mapping_key - return self.process_empty_scalar(token.end_mark) - else: - self.state = self.parse_flow_mapping_key - token = self.peek_token() - return self.process_empty_scalar(token.start_mark) - - def parse_flow_mapping_empty_value(self): - self.state = self.parse_flow_mapping_key - return self.process_empty_scalar(self.peek_token().start_mark) - - def process_empty_scalar(self, mark): - return ScalarEvent(None, None, (True, False), '', mark, mark) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/reader.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/reader.py deleted file mode 100644 index 0a515fd64..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/reader.py +++ /dev/null @@ -1,193 +0,0 @@ -# SPDX-License-Identifier: MIT -# This module contains abstractions for the input stream. You don't have to -# looks further, there are no pretty code. -# -# We define two classes here. -# -# Mark(source, line, column) -# It's just a record and its only use is producing nice error messages. -# Parser does not use it for any other purposes. -# -# Reader(source, data) -# Reader determines the encoding of `data` and converts it to unicode. -# Reader provides the following methods and attributes: -# reader.peek(length=1) - return the next `length` characters -# reader.forward(length=1) - move the current position to `length` characters. -# reader.index - the number of the current character. -# reader.line, stream.column - the line and the column of the current character. - -__all__ = ['Reader', 'ReaderError'] - -from .error import YAMLError, Mark - -import codecs, re - -class ReaderError(YAMLError): - - def __init__(self, name, position, character, encoding, reason): - self.name = name - self.character = character - self.position = position - self.encoding = encoding - self.reason = reason - - def __str__(self): - if isinstance(self.character, bytes): - return "'%s' codec can't decode byte #x%02x: %s\n" \ - " in \"%s\", position %d" \ - % (self.encoding, ord(self.character), self.reason, - self.name, self.position) - else: - return "unacceptable character #x%04x: %s\n" \ - " in \"%s\", position %d" \ - % (self.character, self.reason, - self.name, self.position) - -class Reader(object): - # Reader: - # - determines the data encoding and converts it to a unicode string, - # - checks if characters are in allowed range, - # - adds '\0' to the end. - - # Reader accepts - # - a `bytes` object, - # - a `str` object, - # - a file-like object with its `read` method returning `str`, - # - a file-like object with its `read` method returning `unicode`. - - # Yeah, it's ugly and slow. - - def __init__(self, stream): - self.name = None - self.stream = None - self.stream_pointer = 0 - self.eof = True - self.buffer = '' - self.pointer = 0 - self.raw_buffer = None - self.raw_decode = None - self.encoding = None - self.index = 0 - self.line = 0 - self.column = 0 - if isinstance(stream, str): - self.name = "" - self.check_printable(stream) - self.buffer = stream+'\0' - elif isinstance(stream, bytes): - self.name = "" - self.raw_buffer = stream - self.determine_encoding() - else: - self.stream = stream - self.name = getattr(stream, 'name', "") - self.eof = False - self.raw_buffer = None - self.determine_encoding() - - def peek(self, index=0): - try: - return self.buffer[self.pointer+index] - except IndexError: - self.update(index+1) - return self.buffer[self.pointer+index] - - def prefix(self, length=1): - if self.pointer+length >= len(self.buffer): - self.update(length) - return self.buffer[self.pointer:self.pointer+length] - - def forward(self, length=1): - if self.pointer+length+1 >= len(self.buffer): - self.update(length+1) - while length: - ch = self.buffer[self.pointer] - self.pointer += 1 - self.index += 1 - if ch in '\n\x85\u2028\u2029' \ - or (ch == '\r' and self.buffer[self.pointer] != '\n'): - self.line += 1 - self.column = 0 - elif ch != '\uFEFF': - self.column += 1 - length -= 1 - - def get_mark(self): - if self.stream is None: - return Mark(self.name, self.index, self.line, self.column, - self.buffer, self.pointer) - else: - return Mark(self.name, self.index, self.line, self.column, - None, None) - - def determine_encoding(self): - while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): - self.update_raw() - if isinstance(self.raw_buffer, bytes): - if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): - self.raw_decode = codecs.utf_16_le_decode - self.encoding = 'utf-16-le' - elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): - self.raw_decode = codecs.utf_16_be_decode - self.encoding = 'utf-16-be' - else: - self.raw_decode = codecs.utf_8_decode - self.encoding = 'utf-8' - self.update(1) - - NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') - def check_printable(self, data): - match = self.NON_PRINTABLE.search(data) - if match: - character = match.group() - position = self.index+(len(self.buffer)-self.pointer)+match.start() - raise ReaderError(self.name, position, ord(character), - 'unicode', "special characters are not allowed") - - def update(self, length): - if self.raw_buffer is None: - return - self.buffer = self.buffer[self.pointer:] - self.pointer = 0 - while len(self.buffer) < length: - if not self.eof: - self.update_raw() - if self.raw_decode is not None: - try: - data, converted = self.raw_decode(self.raw_buffer, - 'strict', self.eof) - except UnicodeDecodeError as exc: - character = self.raw_buffer[exc.start] - if self.stream is not None: - position = self.stream_pointer-len(self.raw_buffer)+exc.start - else: - position = exc.start - raise ReaderError(self.name, position, character, - exc.encoding, exc.reason) - else: - data = self.raw_buffer - converted = len(data) - self.check_printable(data) - self.buffer += data - self.raw_buffer = self.raw_buffer[converted:] - if self.eof: - self.buffer += '\0' - self.raw_buffer = None - break - - def update_raw(self, size=4096): - data = self.stream.read(size) - if self.raw_buffer is None: - self.raw_buffer = data - else: - self.raw_buffer += data - self.stream_pointer += len(data) - if not data: - self.eof = True - -#try: -# import psyco -# psyco.bind(Reader) -#except ImportError: -# pass - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/representer.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/representer.py deleted file mode 100644 index 756a18dcc..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/representer.py +++ /dev/null @@ -1,375 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', - 'RepresenterError'] - -from .error import * -from .nodes import * - -import datetime, sys, copyreg, types, base64 - -class RepresenterError(YAMLError): - pass - -class BaseRepresenter: - - yaml_representers = {} - yaml_multi_representers = {} - - def __init__(self, default_style=None, default_flow_style=None): - self.default_style = default_style - self.default_flow_style = default_flow_style - self.represented_objects = {} - self.object_keeper = [] - self.alias_key = None - - def represent(self, data): - node = self.represent_data(data) - self.serialize(node) - self.represented_objects = {} - self.object_keeper = [] - self.alias_key = None - - def represent_data(self, data): - if self.ignore_aliases(data): - self.alias_key = None - else: - self.alias_key = id(data) - if self.alias_key is not None: - if self.alias_key in self.represented_objects: - node = self.represented_objects[self.alias_key] - #if node is None: - # raise RepresenterError("recursive objects are not allowed: %r" % data) - return node - #self.represented_objects[alias_key] = None - self.object_keeper.append(data) - data_types = type(data).__mro__ - if data_types[0] in self.yaml_representers: - node = self.yaml_representers[data_types[0]](self, data) - else: - for data_type in data_types: - if data_type in self.yaml_multi_representers: - node = self.yaml_multi_representers[data_type](self, data) - break - else: - if None in self.yaml_multi_representers: - node = self.yaml_multi_representers[None](self, data) - elif None in self.yaml_representers: - node = self.yaml_representers[None](self, data) - else: - node = ScalarNode(None, str(data)) - #if alias_key is not None: - # self.represented_objects[alias_key] = node - return node - - @classmethod - def add_representer(cls, data_type, representer): - if not 'yaml_representers' in cls.__dict__: - cls.yaml_representers = cls.yaml_representers.copy() - cls.yaml_representers[data_type] = representer - - @classmethod - def add_multi_representer(cls, data_type, representer): - if not 'yaml_multi_representers' in cls.__dict__: - cls.yaml_multi_representers = cls.yaml_multi_representers.copy() - cls.yaml_multi_representers[data_type] = representer - - def represent_scalar(self, tag, value, style=None): - if style is None: - style = self.default_style - node = ScalarNode(tag, value, style=style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - return node - - def represent_sequence(self, tag, sequence, flow_style=None): - value = [] - node = SequenceNode(tag, value, flow_style=flow_style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - best_style = True - for item in sequence: - node_item = self.represent_data(item) - if not (isinstance(node_item, ScalarNode) and not node_item.style): - best_style = False - value.append(node_item) - if flow_style is None: - if self.default_flow_style is not None: - node.flow_style = self.default_flow_style - else: - node.flow_style = best_style - return node - - def represent_mapping(self, tag, mapping, flow_style=None): - value = [] - node = MappingNode(tag, value, flow_style=flow_style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - best_style = True - if hasattr(mapping, 'items'): - mapping = list(mapping.items()) - try: - mapping = sorted(mapping) - except TypeError: - pass - for item_key, item_value in mapping: - node_key = self.represent_data(item_key) - node_value = self.represent_data(item_value) - if not (isinstance(node_key, ScalarNode) and not node_key.style): - best_style = False - if not (isinstance(node_value, ScalarNode) and not node_value.style): - best_style = False - value.append((node_key, node_value)) - if flow_style is None: - if self.default_flow_style is not None: - node.flow_style = self.default_flow_style - else: - node.flow_style = best_style - return node - - def ignore_aliases(self, data): - return False - -class SafeRepresenter(BaseRepresenter): - - def ignore_aliases(self, data): - if data in [None, ()]: - return True - if isinstance(data, (str, bytes, bool, int, float)): - return True - - def represent_none(self, data): - return self.represent_scalar('tag:yaml.org,2002:null', 'null') - - def represent_str(self, data): - return self.represent_scalar('tag:yaml.org,2002:str', data) - - def represent_binary(self, data): - if hasattr(base64, 'encodebytes'): - data = base64.encodebytes(data).decode('ascii') - else: - data = base64.encodestring(data).decode('ascii') - return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|') - - def represent_bool(self, data): - if data: - value = 'true' - else: - value = 'false' - return self.represent_scalar('tag:yaml.org,2002:bool', value) - - def represent_int(self, data): - return self.represent_scalar('tag:yaml.org,2002:int', str(data)) - - inf_value = 1e300 - while repr(inf_value) != repr(inf_value*inf_value): - inf_value *= inf_value - - def represent_float(self, data): - if data != data or (data == 0.0 and data == 1.0): - value = '.nan' - elif data == self.inf_value: - value = '.inf' - elif data == -self.inf_value: - value = '-.inf' - else: - value = repr(data).lower() - # Note that in some cases `repr(data)` represents a float number - # without the decimal parts. For instance: - # >>> repr(1e17) - # '1e17' - # Unfortunately, this is not a valid float representation according - # to the definition of the `!!float` tag. We fix this by adding - # '.0' before the 'e' symbol. - if '.' not in value and 'e' in value: - value = value.replace('e', '.0e', 1) - return self.represent_scalar('tag:yaml.org,2002:float', value) - - def represent_list(self, data): - #pairs = (len(data) > 0 and isinstance(data, list)) - #if pairs: - # for item in data: - # if not isinstance(item, tuple) or len(item) != 2: - # pairs = False - # break - #if not pairs: - return self.represent_sequence('tag:yaml.org,2002:seq', data) - #value = [] - #for item_key, item_value in data: - # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', - # [(item_key, item_value)])) - #return SequenceNode(u'tag:yaml.org,2002:pairs', value) - - def represent_dict(self, data): - return self.represent_mapping('tag:yaml.org,2002:map', data) - - def represent_set(self, data): - value = {} - for key in data: - value[key] = None - return self.represent_mapping('tag:yaml.org,2002:set', value) - - def represent_date(self, data): - value = data.isoformat() - return self.represent_scalar('tag:yaml.org,2002:timestamp', value) - - def represent_datetime(self, data): - value = data.isoformat(' ') - return self.represent_scalar('tag:yaml.org,2002:timestamp', value) - - def represent_yaml_object(self, tag, data, cls, flow_style=None): - if hasattr(data, '__getstate__'): - state = data.__getstate__() - else: - state = data.__dict__.copy() - return self.represent_mapping(tag, state, flow_style=flow_style) - - def represent_undefined(self, data): - raise RepresenterError("cannot represent an object: %s" % data) - -SafeRepresenter.add_representer(type(None), - SafeRepresenter.represent_none) - -SafeRepresenter.add_representer(str, - SafeRepresenter.represent_str) - -SafeRepresenter.add_representer(bytes, - SafeRepresenter.represent_binary) - -SafeRepresenter.add_representer(bool, - SafeRepresenter.represent_bool) - -SafeRepresenter.add_representer(int, - SafeRepresenter.represent_int) - -SafeRepresenter.add_representer(float, - SafeRepresenter.represent_float) - -SafeRepresenter.add_representer(list, - SafeRepresenter.represent_list) - -SafeRepresenter.add_representer(tuple, - SafeRepresenter.represent_list) - -SafeRepresenter.add_representer(dict, - SafeRepresenter.represent_dict) - -SafeRepresenter.add_representer(set, - SafeRepresenter.represent_set) - -SafeRepresenter.add_representer(datetime.date, - SafeRepresenter.represent_date) - -SafeRepresenter.add_representer(datetime.datetime, - SafeRepresenter.represent_datetime) - -SafeRepresenter.add_representer(None, - SafeRepresenter.represent_undefined) - -class Representer(SafeRepresenter): - - def represent_complex(self, data): - if data.imag == 0.0: - data = '%r' % data.real - elif data.real == 0.0: - data = '%rj' % data.imag - elif data.imag > 0: - data = '%r+%rj' % (data.real, data.imag) - else: - data = '%r%rj' % (data.real, data.imag) - return self.represent_scalar('tag:yaml.org,2002:python/complex', data) - - def represent_tuple(self, data): - return self.represent_sequence('tag:yaml.org,2002:python/tuple', data) - - def represent_name(self, data): - name = '%s.%s' % (data.__module__, data.__name__) - return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '') - - def represent_module(self, data): - return self.represent_scalar( - 'tag:yaml.org,2002:python/module:'+data.__name__, '') - - def represent_object(self, data): - # We use __reduce__ API to save the data. data.__reduce__ returns - # a tuple of length 2-5: - # (function, args, state, listitems, dictitems) - - # For reconstructing, we calls function(*args), then set its state, - # listitems, and dictitems if they are not None. - - # A special case is when function.__name__ == '__newobj__'. In this - # case we create the object with args[0].__new__(*args). - - # Another special case is when __reduce__ returns a string - we don't - # support it. - - # We produce a !!python/object, !!python/object/new or - # !!python/object/apply node. - - cls = type(data) - if cls in copyreg.dispatch_table: - reduce = copyreg.dispatch_table[cls](data) - elif hasattr(data, '__reduce_ex__'): - reduce = data.__reduce_ex__(2) - elif hasattr(data, '__reduce__'): - reduce = data.__reduce__() - else: - raise RepresenterError("cannot represent object: %r" % data) - reduce = (list(reduce)+[None]*5)[:5] - function, args, state, listitems, dictitems = reduce - args = list(args) - if state is None: - state = {} - if listitems is not None: - listitems = list(listitems) - if dictitems is not None: - dictitems = dict(dictitems) - if function.__name__ == '__newobj__': - function = args[0] - args = args[1:] - tag = 'tag:yaml.org,2002:python/object/new:' - newobj = True - else: - tag = 'tag:yaml.org,2002:python/object/apply:' - newobj = False - function_name = '%s.%s' % (function.__module__, function.__name__) - if not args and not listitems and not dictitems \ - and isinstance(state, dict) and newobj: - return self.represent_mapping( - 'tag:yaml.org,2002:python/object:'+function_name, state) - if not listitems and not dictitems \ - and isinstance(state, dict) and not state: - return self.represent_sequence(tag+function_name, args) - value = {} - if args: - value['args'] = args - if state or not isinstance(state, dict): - value['state'] = state - if listitems: - value['listitems'] = listitems - if dictitems: - value['dictitems'] = dictitems - return self.represent_mapping(tag+function_name, value) - -Representer.add_representer(complex, - Representer.represent_complex) - -Representer.add_representer(tuple, - Representer.represent_tuple) - -Representer.add_representer(type, - Representer.represent_name) - -Representer.add_representer(types.FunctionType, - Representer.represent_name) - -Representer.add_representer(types.BuiltinFunctionType, - Representer.represent_name) - -Representer.add_representer(types.ModuleType, - Representer.represent_module) - -Representer.add_multi_representer(object, - Representer.represent_object) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py deleted file mode 100644 index 50945e04d..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py +++ /dev/null @@ -1,225 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['BaseResolver', 'Resolver'] - -from .error import * -from .nodes import * - -import re - -class ResolverError(YAMLError): - pass - -class BaseResolver: - - DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str' - DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq' - DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' - - yaml_implicit_resolvers = {} - yaml_path_resolvers = {} - - def __init__(self): - self.resolver_exact_paths = [] - self.resolver_prefix_paths = [] - - @classmethod - def add_implicit_resolver(cls, tag, regexp, first): - if not 'yaml_implicit_resolvers' in cls.__dict__: - cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy() - if first is None: - first = [None] - for ch in first: - cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) - - @classmethod - def add_path_resolver(cls, tag, path, kind=None): - # Note: `add_path_resolver` is experimental. The API could be changed. - # `new_path` is a pattern that is matched against the path from the - # root to the node that is being considered. `node_path` elements are - # tuples `(node_check, index_check)`. `node_check` is a node class: - # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` - # matches any kind of a node. `index_check` could be `None`, a boolean - # value, a string value, or a number. `None` and `False` match against - # any _value_ of sequence and mapping nodes. `True` matches against - # any _key_ of a mapping node. A string `index_check` matches against - # a mapping value that corresponds to a scalar key which content is - # equal to the `index_check` value. An integer `index_check` matches - # against a sequence value with the index equal to `index_check`. - if not 'yaml_path_resolvers' in cls.__dict__: - cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() - new_path = [] - for element in path: - if isinstance(element, (list, tuple)): - if len(element) == 2: - node_check, index_check = element - elif len(element) == 1: - node_check = element[0] - index_check = True - else: - raise ResolverError("Invalid path element: %s" % element) - else: - node_check = None - index_check = element - if node_check is str: - node_check = ScalarNode - elif node_check is list: - node_check = SequenceNode - elif node_check is dict: - node_check = MappingNode - elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ - and not isinstance(node_check, str) \ - and node_check is not None: - raise ResolverError("Invalid node checker: %s" % node_check) - if not isinstance(index_check, (str, int)) \ - and index_check is not None: - raise ResolverError("Invalid index checker: %s" % index_check) - new_path.append((node_check, index_check)) - if kind is str: - kind = ScalarNode - elif kind is list: - kind = SequenceNode - elif kind is dict: - kind = MappingNode - elif kind not in [ScalarNode, SequenceNode, MappingNode] \ - and kind is not None: - raise ResolverError("Invalid node kind: %s" % kind) - cls.yaml_path_resolvers[tuple(new_path), kind] = tag - - def descend_resolver(self, current_node, current_index): - if not self.yaml_path_resolvers: - return - exact_paths = {} - prefix_paths = [] - if current_node: - depth = len(self.resolver_prefix_paths) - for path, kind in self.resolver_prefix_paths[-1]: - if self.check_resolver_prefix(depth, path, kind, - current_node, current_index): - if len(path) > depth: - prefix_paths.append((path, kind)) - else: - exact_paths[kind] = self.yaml_path_resolvers[path, kind] - else: - for path, kind in self.yaml_path_resolvers: - if not path: - exact_paths[kind] = self.yaml_path_resolvers[path, kind] - else: - prefix_paths.append((path, kind)) - self.resolver_exact_paths.append(exact_paths) - self.resolver_prefix_paths.append(prefix_paths) - - def ascend_resolver(self): - if not self.yaml_path_resolvers: - return - self.resolver_exact_paths.pop() - self.resolver_prefix_paths.pop() - - def check_resolver_prefix(self, depth, path, kind, - current_node, current_index): - node_check, index_check = path[depth-1] - if isinstance(node_check, str): - if current_node.tag != node_check: - return - elif node_check is not None: - if not isinstance(current_node, node_check): - return - if index_check is True and current_index is not None: - return - if (index_check is False or index_check is None) \ - and current_index is None: - return - if isinstance(index_check, str): - if not (isinstance(current_index, ScalarNode) - and index_check == current_index.value): - return - elif isinstance(index_check, int) and not isinstance(index_check, bool): - if index_check != current_index: - return - return True - - def resolve(self, kind, value, implicit): - if kind is ScalarNode and implicit[0]: - if value == '': - resolvers = self.yaml_implicit_resolvers.get('', []) - else: - resolvers = self.yaml_implicit_resolvers.get(value[0], []) - resolvers += self.yaml_implicit_resolvers.get(None, []) - for tag, regexp in resolvers: - if regexp.match(value): - return tag - implicit = implicit[1] - if self.yaml_path_resolvers: - exact_paths = self.resolver_exact_paths[-1] - if kind in exact_paths: - return exact_paths[kind] - if None in exact_paths: - return exact_paths[None] - if kind is ScalarNode: - return self.DEFAULT_SCALAR_TAG - elif kind is SequenceNode: - return self.DEFAULT_SEQUENCE_TAG - elif kind is MappingNode: - return self.DEFAULT_MAPPING_TAG - -class Resolver(BaseResolver): - pass - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:bool', - re.compile(r'''^(?:yes|Yes|YES|no|No|NO - |true|True|TRUE|false|False|FALSE - |on|On|ON|off|Off|OFF)$''', re.X), - list('yYnNtTfFoO')) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:float', - re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? - |\.[0-9_]+(?:[eE][-+][0-9]+)? - |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* - |[-+]?\.(?:inf|Inf|INF) - |\.(?:nan|NaN|NAN))$''', re.X), - list('-+0123456789.')) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:int', - re.compile(r'''^(?:[-+]?0b[0-1_]+ - |[-+]?0[0-7_]+ - |[-+]?(?:0|[1-9][0-9_]*) - |[-+]?0x[0-9a-fA-F_]+ - |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), - list('-+0123456789')) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:merge', - re.compile(r'^(?:<<)$'), - ['<']) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:null', - re.compile(r'''^(?: ~ - |null|Null|NULL - | )$''', re.X), - ['~', 'n', 'N', '']) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:timestamp', - re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] - |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? - (?:[Tt]|[ \t]+)[0-9][0-9]? - :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? - (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), - list('0123456789')) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:value', - re.compile(r'^(?:=)$'), - ['=']) - -# The following resolver is only for documentation purposes. It cannot work -# because plain scalars cannot start with '!', '&', or '*'. -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:yaml', - re.compile(r'^(?:!|&|\*)$'), - list('!&*')) - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py deleted file mode 100644 index b55854e8b..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py +++ /dev/null @@ -1,1449 +0,0 @@ -# SPDX-License-Identifier: MIT - -# Scanner produces tokens of the following types: -# STREAM-START -# STREAM-END -# DIRECTIVE(name, value) -# DOCUMENT-START -# DOCUMENT-END -# BLOCK-SEQUENCE-START -# BLOCK-MAPPING-START -# BLOCK-END -# FLOW-SEQUENCE-START -# FLOW-MAPPING-START -# FLOW-SEQUENCE-END -# FLOW-MAPPING-END -# BLOCK-ENTRY -# FLOW-ENTRY -# KEY -# VALUE -# ALIAS(value) -# ANCHOR(value) -# TAG(value) -# SCALAR(value, plain, style) -# -# Read comments in the Scanner code for more details. -# - -__all__ = ['Scanner', 'ScannerError'] - -from .error import MarkedYAMLError -from .tokens import * - -class ScannerError(MarkedYAMLError): - pass - -class SimpleKey: - # See below simple keys treatment. - - def __init__(self, token_number, required, index, line, column, mark): - self.token_number = token_number - self.required = required - self.index = index - self.line = line - self.column = column - self.mark = mark - -class Scanner: - - def __init__(self): - """Initialize the scanner.""" - # It is assumed that Scanner and Reader will have a common descendant. - # Reader do the dirty work of checking for BOM and converting the - # input data to Unicode. It also adds NUL to the end. - # - # Reader supports the following methods - # self.peek(i=0) # peek the next i-th character - # self.prefix(l=1) # peek the next l characters - # self.forward(l=1) # read the next l characters and move the pointer. - - # Had we reached the end of the stream? - self.done = False - - # The number of unclosed '{' and '['. `flow_level == 0` means block - # context. - self.flow_level = 0 - - # List of processed tokens that are not yet emitted. - self.tokens = [] - - # Add the STREAM-START token. - self.fetch_stream_start() - - # Number of tokens that were emitted through the `get_token` method. - self.tokens_taken = 0 - - # The current indentation level. - self.indent = -1 - - # Past indentation levels. - self.indents = [] - - # Variables related to simple keys treatment. - - # A simple key is a key that is not denoted by the '?' indicator. - # Example of simple keys: - # --- - # block simple key: value - # ? not a simple key: - # : { flow simple key: value } - # We emit the KEY token before all keys, so when we find a potential - # simple key, we try to locate the corresponding ':' indicator. - # Simple keys should be limited to a single line and 1024 characters. - - # Can a simple key start at the current position? A simple key may - # start: - # - at the beginning of the line, not counting indentation spaces - # (in block context), - # - after '{', '[', ',' (in the flow context), - # - after '?', ':', '-' (in the block context). - # In the block context, this flag also signifies if a block collection - # may start at the current position. - self.allow_simple_key = True - - # Keep track of possible simple keys. This is a dictionary. The key - # is `flow_level`; there can be no more that one possible simple key - # for each level. The value is a SimpleKey record: - # (token_number, required, index, line, column, mark) - # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), - # '[', or '{' tokens. - self.possible_simple_keys = {} - - # Public methods. - - def check_token(self, *choices): - # Check if the next token is one of the given types. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - if not choices: - return True - for choice in choices: - if isinstance(self.tokens[0], choice): - return True - return False - - def peek_token(self): - # Return the next token, but do not delete if from the queue. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - return self.tokens[0] - - def get_token(self): - # Return the next token. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - self.tokens_taken += 1 - return self.tokens.pop(0) - - # Private methods. - - def need_more_tokens(self): - if self.done: - return False - if not self.tokens: - return True - # The current token may be a potential simple key, so we - # need to look further. - self.stale_possible_simple_keys() - if self.next_possible_simple_key() == self.tokens_taken: - return True - - def fetch_more_tokens(self): - - # Eat whitespaces and comments until we reach the next token. - self.scan_to_next_token() - - # Remove obsolete possible simple keys. - self.stale_possible_simple_keys() - - # Compare the current indentation and column. It may add some tokens - # and decrease the current indentation level. - self.unwind_indent(self.column) - - # Peek the next character. - ch = self.peek() - - # Is it the end of stream? - if ch == '\0': - return self.fetch_stream_end() - - # Is it a directive? - if ch == '%' and self.check_directive(): - return self.fetch_directive() - - # Is it the document start? - if ch == '-' and self.check_document_start(): - return self.fetch_document_start() - - # Is it the document end? - if ch == '.' and self.check_document_end(): - return self.fetch_document_end() - - # TODO: support for BOM within a stream. - #if ch == '\uFEFF': - # return self.fetch_bom() <-- issue BOMToken - - # Note: the order of the following checks is NOT significant. - - # Is it the flow sequence start indicator? - if ch == '[': - return self.fetch_flow_sequence_start() - - # Is it the flow mapping start indicator? - if ch == '{': - return self.fetch_flow_mapping_start() - - # Is it the flow sequence end indicator? - if ch == ']': - return self.fetch_flow_sequence_end() - - # Is it the flow mapping end indicator? - if ch == '}': - return self.fetch_flow_mapping_end() - - # Is it the flow entry indicator? - if ch == ',': - return self.fetch_flow_entry() - - # Is it the block entry indicator? - if ch == '-' and self.check_block_entry(): - return self.fetch_block_entry() - - # Is it the key indicator? - if ch == '?' and self.check_key(): - return self.fetch_key() - - # Is it the value indicator? - if ch == ':' and self.check_value(): - return self.fetch_value() - - # Is it an alias? - if ch == '*': - return self.fetch_alias() - - # Is it an anchor? - if ch == '&': - return self.fetch_anchor() - - # Is it a tag? - if ch == '!': - return self.fetch_tag() - - # Is it a literal scalar? - if ch == '|' and not self.flow_level: - return self.fetch_literal() - - # Is it a folded scalar? - if ch == '>' and not self.flow_level: - return self.fetch_folded() - - # Is it a single quoted scalar? - if ch == '\'': - return self.fetch_single() - - # Is it a double quoted scalar? - if ch == '\"': - return self.fetch_double() - - # It must be a plain scalar then. - if self.check_plain(): - return self.fetch_plain() - - # No? It's an error. Let's produce a nice error message. - raise ScannerError("while scanning for the next token", None, - "found character %r that cannot start any token" % ch, - self.get_mark()) - - # Simple keys treatment. - - def next_possible_simple_key(self): - # Return the number of the nearest possible simple key. Actually we - # don't need to loop through the whole dictionary. We may replace it - # with the following code: - # if not self.possible_simple_keys: - # return None - # return self.possible_simple_keys[ - # min(self.possible_simple_keys.keys())].token_number - min_token_number = None - for level in self.possible_simple_keys: - key = self.possible_simple_keys[level] - if min_token_number is None or key.token_number < min_token_number: - min_token_number = key.token_number - return min_token_number - - def stale_possible_simple_keys(self): - # Remove entries that are no longer possible simple keys. According to - # the YAML specification, simple keys - # - should be limited to a single line, - # - should be no longer than 1024 characters. - # Disabling this procedure will allow simple keys of any length and - # height (may cause problems if indentation is broken though). - for level in list(self.possible_simple_keys): - key = self.possible_simple_keys[level] - if key.line != self.line \ - or self.index-key.index > 1024: - if key.required: - raise ScannerError("while scanning a simple key", key.mark, - "could not found expected ':'", self.get_mark()) - del self.possible_simple_keys[level] - - def save_possible_simple_key(self): - # The next token may start a simple key. We check if it's possible - # and save its position. This function is called for - # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. - - # Check if a simple key is required at the current position. - required = not self.flow_level and self.indent == self.column - - # A simple key is required only if it is the first token in the current - # line. Therefore it is always allowed. - assert self.allow_simple_key or not required - - # The next token might be a simple key. Let's save it's number and - # position. - if self.allow_simple_key: - self.remove_possible_simple_key() - token_number = self.tokens_taken+len(self.tokens) - key = SimpleKey(token_number, required, - self.index, self.line, self.column, self.get_mark()) - self.possible_simple_keys[self.flow_level] = key - - def remove_possible_simple_key(self): - # Remove the saved possible key position at the current flow level. - if self.flow_level in self.possible_simple_keys: - key = self.possible_simple_keys[self.flow_level] - - if key.required: - raise ScannerError("while scanning a simple key", key.mark, - "could not found expected ':'", self.get_mark()) - - del self.possible_simple_keys[self.flow_level] - - # Indentation functions. - - def unwind_indent(self, column): - - ## In flow context, tokens should respect indentation. - ## Actually the condition should be `self.indent >= column` according to - ## the spec. But this condition will prohibit intuitively correct - ## constructions such as - ## key : { - ## } - #if self.flow_level and self.indent > column: - # raise ScannerError(None, None, - # "invalid intendation or unclosed '[' or '{'", - # self.get_mark()) - - # In the flow context, indentation is ignored. We make the scanner less - # restrictive then specification requires. - if self.flow_level: - return - - # In block context, we may need to issue the BLOCK-END tokens. - while self.indent > column: - mark = self.get_mark() - self.indent = self.indents.pop() - self.tokens.append(BlockEndToken(mark, mark)) - - def add_indent(self, column): - # Check if we need to increase indentation. - if self.indent < column: - self.indents.append(self.indent) - self.indent = column - return True - return False - - # Fetchers. - - def fetch_stream_start(self): - # We always add STREAM-START as the first token and STREAM-END as the - # last token. - - # Read the token. - mark = self.get_mark() - - # Add STREAM-START. - self.tokens.append(StreamStartToken(mark, mark, - encoding=self.encoding)) - - - def fetch_stream_end(self): - - # Set the current intendation to -1. - self.unwind_indent(-1) - - # Reset simple keys. - self.remove_possible_simple_key() - self.allow_simple_key = False - self.possible_simple_keys = {} - - # Read the token. - mark = self.get_mark() - - # Add STREAM-END. - self.tokens.append(StreamEndToken(mark, mark)) - - # The steam is finished. - self.done = True - - def fetch_directive(self): - - # Set the current intendation to -1. - self.unwind_indent(-1) - - # Reset simple keys. - self.remove_possible_simple_key() - self.allow_simple_key = False - - # Scan and add DIRECTIVE. - self.tokens.append(self.scan_directive()) - - def fetch_document_start(self): - self.fetch_document_indicator(DocumentStartToken) - - def fetch_document_end(self): - self.fetch_document_indicator(DocumentEndToken) - - def fetch_document_indicator(self, TokenClass): - - # Set the current intendation to -1. - self.unwind_indent(-1) - - # Reset simple keys. Note that there could not be a block collection - # after '---'. - self.remove_possible_simple_key() - self.allow_simple_key = False - - # Add DOCUMENT-START or DOCUMENT-END. - start_mark = self.get_mark() - self.forward(3) - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_sequence_start(self): - self.fetch_flow_collection_start(FlowSequenceStartToken) - - def fetch_flow_mapping_start(self): - self.fetch_flow_collection_start(FlowMappingStartToken) - - def fetch_flow_collection_start(self, TokenClass): - - # '[' and '{' may start a simple key. - self.save_possible_simple_key() - - # Increase the flow level. - self.flow_level += 1 - - # Simple keys are allowed after '[' and '{'. - self.allow_simple_key = True - - # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_sequence_end(self): - self.fetch_flow_collection_end(FlowSequenceEndToken) - - def fetch_flow_mapping_end(self): - self.fetch_flow_collection_end(FlowMappingEndToken) - - def fetch_flow_collection_end(self, TokenClass): - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Decrease the flow level. - self.flow_level -= 1 - - # No simple keys after ']' or '}'. - self.allow_simple_key = False - - # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_entry(self): - - # Simple keys are allowed after ','. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add FLOW-ENTRY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(FlowEntryToken(start_mark, end_mark)) - - def fetch_block_entry(self): - - # Block context needs additional checks. - if not self.flow_level: - - # Are we allowed to start a new entry? - if not self.allow_simple_key: - raise ScannerError(None, None, - "sequence entries are not allowed here", - self.get_mark()) - - # We may need to add BLOCK-SEQUENCE-START. - if self.add_indent(self.column): - mark = self.get_mark() - self.tokens.append(BlockSequenceStartToken(mark, mark)) - - # It's an error for the block entry to occur in the flow context, - # but we let the parser detect this. - else: - pass - - # Simple keys are allowed after '-'. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add BLOCK-ENTRY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(BlockEntryToken(start_mark, end_mark)) - - def fetch_key(self): - - # Block context needs additional checks. - if not self.flow_level: - - # Are we allowed to start a key (not nessesary a simple)? - if not self.allow_simple_key: - raise ScannerError(None, None, - "mapping keys are not allowed here", - self.get_mark()) - - # We may need to add BLOCK-MAPPING-START. - if self.add_indent(self.column): - mark = self.get_mark() - self.tokens.append(BlockMappingStartToken(mark, mark)) - - # Simple keys are allowed after '?' in the block context. - self.allow_simple_key = not self.flow_level - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add KEY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(KeyToken(start_mark, end_mark)) - - def fetch_value(self): - - # Do we determine a simple key? - if self.flow_level in self.possible_simple_keys: - - # Add KEY. - key = self.possible_simple_keys[self.flow_level] - del self.possible_simple_keys[self.flow_level] - self.tokens.insert(key.token_number-self.tokens_taken, - KeyToken(key.mark, key.mark)) - - # If this key starts a new block mapping, we need to add - # BLOCK-MAPPING-START. - if not self.flow_level: - if self.add_indent(key.column): - self.tokens.insert(key.token_number-self.tokens_taken, - BlockMappingStartToken(key.mark, key.mark)) - - # There cannot be two simple keys one after another. - self.allow_simple_key = False - - # It must be a part of a complex key. - else: - - # Block context needs additional checks. - # (Do we really need them? They will be catched by the parser - # anyway.) - if not self.flow_level: - - # We are allowed to start a complex value if and only if - # we can start a simple key. - if not self.allow_simple_key: - raise ScannerError(None, None, - "mapping values are not allowed here", - self.get_mark()) - - # If this value starts a new block mapping, we need to add - # BLOCK-MAPPING-START. It will be detected as an error later by - # the parser. - if not self.flow_level: - if self.add_indent(self.column): - mark = self.get_mark() - self.tokens.append(BlockMappingStartToken(mark, mark)) - - # Simple keys are allowed after ':' in the block context. - self.allow_simple_key = not self.flow_level - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add VALUE. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(ValueToken(start_mark, end_mark)) - - def fetch_alias(self): - - # ALIAS could be a simple key. - self.save_possible_simple_key() - - # No simple keys after ALIAS. - self.allow_simple_key = False - - # Scan and add ALIAS. - self.tokens.append(self.scan_anchor(AliasToken)) - - def fetch_anchor(self): - - # ANCHOR could start a simple key. - self.save_possible_simple_key() - - # No simple keys after ANCHOR. - self.allow_simple_key = False - - # Scan and add ANCHOR. - self.tokens.append(self.scan_anchor(AnchorToken)) - - def fetch_tag(self): - - # TAG could start a simple key. - self.save_possible_simple_key() - - # No simple keys after TAG. - self.allow_simple_key = False - - # Scan and add TAG. - self.tokens.append(self.scan_tag()) - - def fetch_literal(self): - self.fetch_block_scalar(style='|') - - def fetch_folded(self): - self.fetch_block_scalar(style='>') - - def fetch_block_scalar(self, style): - - # A simple key may follow a block scalar. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Scan and add SCALAR. - self.tokens.append(self.scan_block_scalar(style)) - - def fetch_single(self): - self.fetch_flow_scalar(style='\'') - - def fetch_double(self): - self.fetch_flow_scalar(style='"') - - def fetch_flow_scalar(self, style): - - # A flow scalar could be a simple key. - self.save_possible_simple_key() - - # No simple keys after flow scalars. - self.allow_simple_key = False - - # Scan and add SCALAR. - self.tokens.append(self.scan_flow_scalar(style)) - - def fetch_plain(self): - - # A plain scalar could be a simple key. - self.save_possible_simple_key() - - # No simple keys after plain scalars. But note that `scan_plain` will - # change this flag if the scan is finished at the beginning of the - # line. - self.allow_simple_key = False - - # Scan and add SCALAR. May change `allow_simple_key`. - self.tokens.append(self.scan_plain()) - - # Checkers. - - def check_directive(self): - - # DIRECTIVE: ^ '%' ... - # The '%' indicator is already checked. - if self.column == 0: - return True - - def check_document_start(self): - - # DOCUMENT-START: ^ '---' (' '|'\n') - if self.column == 0: - if self.prefix(3) == '---' \ - and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': - return True - - def check_document_end(self): - - # DOCUMENT-END: ^ '...' (' '|'\n') - if self.column == 0: - if self.prefix(3) == '...' \ - and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': - return True - - def check_block_entry(self): - - # BLOCK-ENTRY: '-' (' '|'\n') - return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' - - def check_key(self): - - # KEY(flow context): '?' - if self.flow_level: - return True - - # KEY(block context): '?' (' '|'\n') - else: - return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' - - def check_value(self): - - # VALUE(flow context): ':' - if self.flow_level: - return True - - # VALUE(block context): ':' (' '|'\n') - else: - return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' - - def check_plain(self): - - # A plain scalar may start with any non-space character except: - # '-', '?', ':', ',', '[', ']', '{', '}', - # '#', '&', '*', '!', '|', '>', '\'', '\"', - # '%', '@', '`'. - # - # It may also start with - # '-', '?', ':' - # if it is followed by a non-space character. - # - # Note that we limit the last rule to the block context (except the - # '-' character) because we want the flow context to be space - # independent. - ch = self.peek() - return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ - or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029' - and (ch == '-' or (not self.flow_level and ch in '?:'))) - - # Scanners. - - def scan_to_next_token(self): - # We ignore spaces, line breaks and comments. - # If we find a line break in the block context, we set the flag - # `allow_simple_key` on. - # The byte order mark is stripped if it's the first character in the - # stream. We do not yet support BOM inside the stream as the - # specification requires. Any such mark will be considered as a part - # of the document. - # - # TODO: We need to make tab handling rules more sane. A good rule is - # Tabs cannot precede tokens - # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, - # KEY(block), VALUE(block), BLOCK-ENTRY - # So the checking code is - # if : - # self.allow_simple_keys = False - # We also need to add the check for `allow_simple_keys == True` to - # `unwind_indent` before issuing BLOCK-END. - # Scanners for block, flow, and plain scalars need to be modified. - - if self.index == 0 and self.peek() == '\uFEFF': - self.forward() - found = False - while not found: - while self.peek() == ' ': - self.forward() - if self.peek() == '#': - while self.peek() not in '\0\r\n\x85\u2028\u2029': - self.forward() - if self.scan_line_break(): - if not self.flow_level: - self.allow_simple_key = True - else: - found = True - - def scan_directive(self): - # See the specification for details. - start_mark = self.get_mark() - self.forward() - name = self.scan_directive_name(start_mark) - value = None - if name == 'YAML': - value = self.scan_yaml_directive_value(start_mark) - end_mark = self.get_mark() - elif name == 'TAG': - value = self.scan_tag_directive_value(start_mark) - end_mark = self.get_mark() - else: - end_mark = self.get_mark() - while self.peek() not in '\0\r\n\x85\u2028\u2029': - self.forward() - self.scan_directive_ignored_line(start_mark) - return DirectiveToken(name, value, start_mark, end_mark) - - def scan_directive_name(self, start_mark): - # See the specification for details. - length = 0 - ch = self.peek(length) - while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-_': - length += 1 - ch = self.peek(length) - if not length: - raise ScannerError("while scanning a directive", start_mark, - "expected alphabetic or numeric character, but found %r" - % ch, self.get_mark()) - value = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch not in '\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected alphabetic or numeric character, but found %r" - % ch, self.get_mark()) - return value - - def scan_yaml_directive_value(self, start_mark): - # See the specification for details. - while self.peek() == ' ': - self.forward() - major = self.scan_yaml_directive_number(start_mark) - if self.peek() != '.': - raise ScannerError("while scanning a directive", start_mark, - "expected a digit or '.', but found %r" % self.peek(), - self.get_mark()) - self.forward() - minor = self.scan_yaml_directive_number(start_mark) - if self.peek() not in '\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected a digit or ' ', but found %r" % self.peek(), - self.get_mark()) - return (major, minor) - - def scan_yaml_directive_number(self, start_mark): - # See the specification for details. - ch = self.peek() - if not ('0' <= ch <= '9'): - raise ScannerError("while scanning a directive", start_mark, - "expected a digit, but found %r" % ch, self.get_mark()) - length = 0 - while '0' <= self.peek(length) <= '9': - length += 1 - value = int(self.prefix(length)) - self.forward(length) - return value - - def scan_tag_directive_value(self, start_mark): - # See the specification for details. - while self.peek() == ' ': - self.forward() - handle = self.scan_tag_directive_handle(start_mark) - while self.peek() == ' ': - self.forward() - prefix = self.scan_tag_directive_prefix(start_mark) - return (handle, prefix) - - def scan_tag_directive_handle(self, start_mark): - # See the specification for details. - value = self.scan_tag_handle('directive', start_mark) - ch = self.peek() - if ch != ' ': - raise ScannerError("while scanning a directive", start_mark, - "expected ' ', but found %r" % ch, self.get_mark()) - return value - - def scan_tag_directive_prefix(self, start_mark): - # See the specification for details. - value = self.scan_tag_uri('directive', start_mark) - ch = self.peek() - if ch not in '\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected ' ', but found %r" % ch, self.get_mark()) - return value - - def scan_directive_ignored_line(self, start_mark): - # See the specification for details. - while self.peek() == ' ': - self.forward() - if self.peek() == '#': - while self.peek() not in '\0\r\n\x85\u2028\u2029': - self.forward() - ch = self.peek() - if ch not in '\0\r\n\x85\u2028\u2029': - raise ScannerError("while scanning a directive", start_mark, - "expected a comment or a line break, but found %r" - % ch, self.get_mark()) - self.scan_line_break() - - def scan_anchor(self, TokenClass): - # The specification does not restrict characters for anchors and - # aliases. This may lead to problems, for instance, the document: - # [ *alias, value ] - # can be interpteted in two ways, as - # [ "value" ] - # and - # [ *alias , "value" ] - # Therefore we restrict aliases to numbers and ASCII letters. - start_mark = self.get_mark() - indicator = self.peek() - if indicator == '*': - name = 'alias' - else: - name = 'anchor' - self.forward() - length = 0 - ch = self.peek(length) - while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-_': - length += 1 - ch = self.peek(length) - if not length: - raise ScannerError("while scanning an %s" % name, start_mark, - "expected alphabetic or numeric character, but found %r" - % ch, self.get_mark()) - value = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`': - raise ScannerError("while scanning an %s" % name, start_mark, - "expected alphabetic or numeric character, but found %r" - % ch, self.get_mark()) - end_mark = self.get_mark() - return TokenClass(value, start_mark, end_mark) - - def scan_tag(self): - # See the specification for details. - start_mark = self.get_mark() - ch = self.peek(1) - if ch == '<': - handle = None - self.forward(2) - suffix = self.scan_tag_uri('tag', start_mark) - if self.peek() != '>': - raise ScannerError("while parsing a tag", start_mark, - "expected '>', but found %r" % self.peek(), - self.get_mark()) - self.forward() - elif ch in '\0 \t\r\n\x85\u2028\u2029': - handle = None - suffix = '!' - self.forward() - else: - length = 1 - use_handle = False - while ch not in '\0 \r\n\x85\u2028\u2029': - if ch == '!': - use_handle = True - break - length += 1 - ch = self.peek(length) - handle = '!' - if use_handle: - handle = self.scan_tag_handle('tag', start_mark) - else: - handle = '!' - self.forward() - suffix = self.scan_tag_uri('tag', start_mark) - ch = self.peek() - if ch not in '\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a tag", start_mark, - "expected ' ', but found %r" % ch, self.get_mark()) - value = (handle, suffix) - end_mark = self.get_mark() - return TagToken(value, start_mark, end_mark) - - def scan_block_scalar(self, style): - # See the specification for details. - - if style == '>': - folded = True - else: - folded = False - - chunks = [] - start_mark = self.get_mark() - - # Scan the header. - self.forward() - chomping, increment = self.scan_block_scalar_indicators(start_mark) - self.scan_block_scalar_ignored_line(start_mark) - - # Determine the indentation level and go to the first non-empty line. - min_indent = self.indent+1 - if min_indent < 1: - min_indent = 1 - if increment is None: - breaks, max_indent, end_mark = self.scan_block_scalar_indentation() - indent = max(min_indent, max_indent) - else: - indent = min_indent+increment-1 - breaks, end_mark = self.scan_block_scalar_breaks(indent) - line_break = '' - - # Scan the inner part of the block scalar. - while self.column == indent and self.peek() != '\0': - chunks.extend(breaks) - leading_non_space = self.peek() not in ' \t' - length = 0 - while self.peek(length) not in '\0\r\n\x85\u2028\u2029': - length += 1 - chunks.append(self.prefix(length)) - self.forward(length) - line_break = self.scan_line_break() - breaks, end_mark = self.scan_block_scalar_breaks(indent) - if self.column == indent and self.peek() != '\0': - - # Unfortunately, folding rules are ambiguous. - # - # This is the folding according to the specification: - - if folded and line_break == '\n' \ - and leading_non_space and self.peek() not in ' \t': - if not breaks: - chunks.append(' ') - else: - chunks.append(line_break) - - # This is Clark Evans's interpretation (also in the spec - # examples): - # - #if folded and line_break == '\n': - # if not breaks: - # if self.peek() not in ' \t': - # chunks.append(' ') - # else: - # chunks.append(line_break) - #else: - # chunks.append(line_break) - else: - break - - # Chomp the tail. - if chomping is not False: - chunks.append(line_break) - if chomping is True: - chunks.extend(breaks) - - # We are done. - return ScalarToken(''.join(chunks), False, start_mark, end_mark, - style) - - def scan_block_scalar_indicators(self, start_mark): - # See the specification for details. - chomping = None - increment = None - ch = self.peek() - if ch in '+-': - if ch == '+': - chomping = True - else: - chomping = False - self.forward() - ch = self.peek() - if ch in '0123456789': - increment = int(ch) - if increment == 0: - raise ScannerError("while scanning a block scalar", start_mark, - "expected indentation indicator in the range 1-9, but found 0", - self.get_mark()) - self.forward() - elif ch in '0123456789': - increment = int(ch) - if increment == 0: - raise ScannerError("while scanning a block scalar", start_mark, - "expected indentation indicator in the range 1-9, but found 0", - self.get_mark()) - self.forward() - ch = self.peek() - if ch in '+-': - if ch == '+': - chomping = True - else: - chomping = False - self.forward() - ch = self.peek() - if ch not in '\0 \r\n\x85\u2028\u2029': - raise ScannerError("while scanning a block scalar", start_mark, - "expected chomping or indentation indicators, but found %r" - % ch, self.get_mark()) - return chomping, increment - - def scan_block_scalar_ignored_line(self, start_mark): - # See the specification for details. - while self.peek() == ' ': - self.forward() - if self.peek() == '#': - while self.peek() not in '\0\r\n\x85\u2028\u2029': - self.forward() - ch = self.peek() - if ch not in '\0\r\n\x85\u2028\u2029': - raise ScannerError("while scanning a block scalar", start_mark, - "expected a comment or a line break, but found %r" % ch, - self.get_mark()) - self.scan_line_break() - - def scan_block_scalar_indentation(self): - # See the specification for details. - chunks = [] - max_indent = 0 - end_mark = self.get_mark() - while self.peek() in ' \r\n\x85\u2028\u2029': - if self.peek() != ' ': - chunks.append(self.scan_line_break()) - end_mark = self.get_mark() - else: - self.forward() - if self.column > max_indent: - max_indent = self.column - return chunks, max_indent, end_mark - - def scan_block_scalar_breaks(self, indent): - # See the specification for details. - chunks = [] - end_mark = self.get_mark() - while self.column < indent and self.peek() == ' ': - self.forward() - while self.peek() in '\r\n\x85\u2028\u2029': - chunks.append(self.scan_line_break()) - end_mark = self.get_mark() - while self.column < indent and self.peek() == ' ': - self.forward() - return chunks, end_mark - - def scan_flow_scalar(self, style): - # See the specification for details. - # Note that we loose indentation rules for quoted scalars. Quoted - # scalars don't need to adhere indentation because " and ' clearly - # mark the beginning and the end of them. Therefore we are less - # restrictive then the specification requires. We only need to check - # that document separators are not included in scalars. - if style == '"': - double = True - else: - double = False - chunks = [] - start_mark = self.get_mark() - quote = self.peek() - self.forward() - chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) - while self.peek() != quote: - chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) - chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) - self.forward() - end_mark = self.get_mark() - return ScalarToken(''.join(chunks), False, start_mark, end_mark, - style) - - ESCAPE_REPLACEMENTS = { - '0': '\0', - 'a': '\x07', - 'b': '\x08', - 't': '\x09', - '\t': '\x09', - 'n': '\x0A', - 'v': '\x0B', - 'f': '\x0C', - 'r': '\x0D', - 'e': '\x1B', - ' ': '\x20', - '\"': '\"', - '\\': '\\', - 'N': '\x85', - '_': '\xA0', - 'L': '\u2028', - 'P': '\u2029', - } - - ESCAPE_CODES = { - 'x': 2, - 'u': 4, - 'U': 8, - } - - def scan_flow_scalar_non_spaces(self, double, start_mark): - # See the specification for details. - chunks = [] - while True: - length = 0 - while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029': - length += 1 - if length: - chunks.append(self.prefix(length)) - self.forward(length) - ch = self.peek() - if not double and ch == '\'' and self.peek(1) == '\'': - chunks.append('\'') - self.forward(2) - elif (double and ch == '\'') or (not double and ch in '\"\\'): - chunks.append(ch) - self.forward() - elif double and ch == '\\': - self.forward() - ch = self.peek() - if ch in self.ESCAPE_REPLACEMENTS: - chunks.append(self.ESCAPE_REPLACEMENTS[ch]) - self.forward() - elif ch in self.ESCAPE_CODES: - length = self.ESCAPE_CODES[ch] - self.forward() - for k in range(length): - if self.peek(k) not in '0123456789ABCDEFabcdef': - raise ScannerError("while scanning a double-quoted scalar", start_mark, - "expected escape sequence of %d hexdecimal numbers, but found %r" % - (length, self.peek(k)), self.get_mark()) - code = int(self.prefix(length), 16) - chunks.append(chr(code)) - self.forward(length) - elif ch in '\r\n\x85\u2028\u2029': - self.scan_line_break() - chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) - else: - raise ScannerError("while scanning a double-quoted scalar", start_mark, - "found unknown escape character %r" % ch, self.get_mark()) - else: - return chunks - - def scan_flow_scalar_spaces(self, double, start_mark): - # See the specification for details. - chunks = [] - length = 0 - while self.peek(length) in ' \t': - length += 1 - whitespaces = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch == '\0': - raise ScannerError("while scanning a quoted scalar", start_mark, - "found unexpected end of stream", self.get_mark()) - elif ch in '\r\n\x85\u2028\u2029': - line_break = self.scan_line_break() - breaks = self.scan_flow_scalar_breaks(double, start_mark) - if line_break != '\n': - chunks.append(line_break) - elif not breaks: - chunks.append(' ') - chunks.extend(breaks) - else: - chunks.append(whitespaces) - return chunks - - def scan_flow_scalar_breaks(self, double, start_mark): - # See the specification for details. - chunks = [] - while True: - # Instead of checking indentation, we check for document - # separators. - prefix = self.prefix(3) - if (prefix == '---' or prefix == '...') \ - and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': - raise ScannerError("while scanning a quoted scalar", start_mark, - "found unexpected document separator", self.get_mark()) - while self.peek() in ' \t': - self.forward() - if self.peek() in '\r\n\x85\u2028\u2029': - chunks.append(self.scan_line_break()) - else: - return chunks - - def scan_plain(self): - # See the specification for details. - # We add an additional restriction for the flow context: - # plain scalars in the flow context cannot contain ',', ':' and '?'. - # We also keep track of the `allow_simple_key` flag here. - # Indentation rules are loosed for the flow context. - chunks = [] - start_mark = self.get_mark() - end_mark = start_mark - indent = self.indent+1 - # We allow zero indentation for scalars, but then we need to check for - # document separators at the beginning of the line. - #if indent == 0: - # indent = 1 - spaces = [] - while True: - length = 0 - if self.peek() == '#': - break - while True: - ch = self.peek(length) - if ch in '\0 \t\r\n\x85\u2028\u2029' \ - or (not self.flow_level and ch == ':' and - self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \ - or (self.flow_level and ch in ',:?[]{}'): - break - length += 1 - # It's not clear what we should do with ':' in the flow context. - if (self.flow_level and ch == ':' - and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'): - self.forward(length) - raise ScannerError("while scanning a plain scalar", start_mark, - "found unexpected ':'", self.get_mark(), - "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") - if length == 0: - break - self.allow_simple_key = False - chunks.extend(spaces) - chunks.append(self.prefix(length)) - self.forward(length) - end_mark = self.get_mark() - spaces = self.scan_plain_spaces(indent, start_mark) - if not spaces or self.peek() == '#' \ - or (not self.flow_level and self.column < indent): - break - return ScalarToken(''.join(chunks), True, start_mark, end_mark) - - def scan_plain_spaces(self, indent, start_mark): - # See the specification for details. - # The specification is really confusing about tabs in plain scalars. - # We just forbid them completely. Do not use tabs in YAML! - chunks = [] - length = 0 - while self.peek(length) in ' ': - length += 1 - whitespaces = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch in '\r\n\x85\u2028\u2029': - line_break = self.scan_line_break() - self.allow_simple_key = True - prefix = self.prefix(3) - if (prefix == '---' or prefix == '...') \ - and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': - return - breaks = [] - while self.peek() in ' \r\n\x85\u2028\u2029': - if self.peek() == ' ': - self.forward() - else: - breaks.append(self.scan_line_break()) - prefix = self.prefix(3) - if (prefix == '---' or prefix == '...') \ - and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': - return - if line_break != '\n': - chunks.append(line_break) - elif not breaks: - chunks.append(' ') - chunks.extend(breaks) - elif whitespaces: - chunks.append(whitespaces) - return chunks - - def scan_tag_handle(self, name, start_mark): - # See the specification for details. - # For some strange reasons, the specification does not allow '_' in - # tag handles. I have allowed it anyway. - ch = self.peek() - if ch != '!': - raise ScannerError("while scanning a %s" % name, start_mark, - "expected '!', but found %r" % ch, self.get_mark()) - length = 1 - ch = self.peek(length) - if ch != ' ': - while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-_': - length += 1 - ch = self.peek(length) - if ch != '!': - self.forward(length) - raise ScannerError("while scanning a %s" % name, start_mark, - "expected '!', but found %r" % ch, self.get_mark()) - length += 1 - value = self.prefix(length) - self.forward(length) - return value - - def scan_tag_uri(self, name, start_mark): - # See the specification for details. - # Note: we do not check if URI is well-formed. - chunks = [] - length = 0 - ch = self.peek(length) - while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ - or ch in '-;/?:@&=+$,_.!~*\'()[]%': - if ch == '%': - chunks.append(self.prefix(length)) - self.forward(length) - length = 0 - chunks.append(self.scan_uri_escapes(name, start_mark)) - else: - length += 1 - ch = self.peek(length) - if length: - chunks.append(self.prefix(length)) - self.forward(length) - length = 0 - if not chunks: - raise ScannerError("while parsing a %s" % name, start_mark, - "expected URI, but found %r" % ch, self.get_mark()) - return ''.join(chunks) - - def scan_uri_escapes(self, name, start_mark): - # See the specification for details. - codes = [] - mark = self.get_mark() - while self.peek() == '%': - self.forward() - for k in range(2): - if self.peek(k) not in '0123456789ABCDEFabcdef': - raise ScannerError("while scanning a %s" % name, start_mark, - "expected URI escape sequence of 2 hexdecimal numbers, but found %r" - % self.peek(k), self.get_mark()) - codes.append(int(self.prefix(2), 16)) - self.forward(2) - try: - value = bytes(codes).decode('utf-8') - except UnicodeDecodeError as exc: - raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) - return value - - def scan_line_break(self): - # Transforms: - # '\r\n' : '\n' - # '\r' : '\n' - # '\n' : '\n' - # '\x85' : '\n' - # '\u2028' : '\u2028' - # '\u2029 : '\u2029' - # default : '' - ch = self.peek() - if ch in '\r\n\x85': - if self.prefix(2) == '\r\n': - self.forward(2) - else: - self.forward() - return '\n' - elif ch in '\u2028\u2029': - self.forward() - return ch - return '' - -#try: -# import psyco -# psyco.bind(Scanner) -#except ImportError: -# pass - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py deleted file mode 100644 index 1ba2f7f9d..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py +++ /dev/null @@ -1,112 +0,0 @@ -# SPDX-License-Identifier: MIT - -__all__ = ['Serializer', 'SerializerError'] - -from .error import YAMLError -from .events import * -from .nodes import * - -class SerializerError(YAMLError): - pass - -class Serializer: - - ANCHOR_TEMPLATE = 'id%03d' - - def __init__(self, encoding=None, - explicit_start=None, explicit_end=None, version=None, tags=None): - self.use_encoding = encoding - self.use_explicit_start = explicit_start - self.use_explicit_end = explicit_end - self.use_version = version - self.use_tags = tags - self.serialized_nodes = {} - self.anchors = {} - self.last_anchor_id = 0 - self.closed = None - - def open(self): - if self.closed is None: - self.emit(StreamStartEvent(encoding=self.use_encoding)) - self.closed = False - elif self.closed: - raise SerializerError("serializer is closed") - else: - raise SerializerError("serializer is already opened") - - def close(self): - if self.closed is None: - raise SerializerError("serializer is not opened") - elif not self.closed: - self.emit(StreamEndEvent()) - self.closed = True - - #def __del__(self): - # self.close() - - def serialize(self, node): - if self.closed is None: - raise SerializerError("serializer is not opened") - elif self.closed: - raise SerializerError("serializer is closed") - self.emit(DocumentStartEvent(explicit=self.use_explicit_start, - version=self.use_version, tags=self.use_tags)) - self.anchor_node(node) - self.serialize_node(node, None, None) - self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) - self.serialized_nodes = {} - self.anchors = {} - self.last_anchor_id = 0 - - def anchor_node(self, node): - if node in self.anchors: - if self.anchors[node] is None: - self.anchors[node] = self.generate_anchor(node) - else: - self.anchors[node] = None - if isinstance(node, SequenceNode): - for item in node.value: - self.anchor_node(item) - elif isinstance(node, MappingNode): - for key, value in node.value: - self.anchor_node(key) - self.anchor_node(value) - - def generate_anchor(self, node): - self.last_anchor_id += 1 - return self.ANCHOR_TEMPLATE % self.last_anchor_id - - def serialize_node(self, node, parent, index): - alias = self.anchors[node] - if node in self.serialized_nodes: - self.emit(AliasEvent(alias)) - else: - self.serialized_nodes[node] = True - self.descend_resolver(parent, index) - if isinstance(node, ScalarNode): - detected_tag = self.resolve(ScalarNode, node.value, (True, False)) - default_tag = self.resolve(ScalarNode, node.value, (False, True)) - implicit = (node.tag == detected_tag), (node.tag == default_tag) - self.emit(ScalarEvent(alias, node.tag, implicit, node.value, - style=node.style)) - elif isinstance(node, SequenceNode): - implicit = (node.tag - == self.resolve(SequenceNode, node.value, True)) - self.emit(SequenceStartEvent(alias, node.tag, implicit, - flow_style=node.flow_style)) - index = 0 - for item in node.value: - self.serialize_node(item, node, index) - index += 1 - self.emit(SequenceEndEvent()) - elif isinstance(node, MappingNode): - implicit = (node.tag - == self.resolve(MappingNode, node.value, True)) - self.emit(MappingStartEvent(alias, node.tag, implicit, - flow_style=node.flow_style)) - for key, value in node.value: - self.serialize_node(key, node, None) - self.serialize_node(value, node, key) - self.emit(MappingEndEvent()) - self.ascend_resolver() - diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py deleted file mode 100644 index c5c4fb116..000000000 --- a/src/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py +++ /dev/null @@ -1,105 +0,0 @@ -# SPDX-License-Identifier: MIT - -class Token(object): - def __init__(self, start_mark, end_mark): - self.start_mark = start_mark - self.end_mark = end_mark - def __repr__(self): - attributes = [key for key in self.__dict__ - if not key.endswith('_mark')] - attributes.sort() - arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) - for key in attributes]) - return '%s(%s)' % (self.__class__.__name__, arguments) - -#class BOMToken(Token): -# id = '' - -class DirectiveToken(Token): - id = '' - def __init__(self, name, value, start_mark, end_mark): - self.name = name - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class DocumentStartToken(Token): - id = '' - -class DocumentEndToken(Token): - id = '' - -class StreamStartToken(Token): - id = '' - def __init__(self, start_mark=None, end_mark=None, - encoding=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.encoding = encoding - -class StreamEndToken(Token): - id = '' - -class BlockSequenceStartToken(Token): - id = '' - -class BlockMappingStartToken(Token): - id = '' - -class BlockEndToken(Token): - id = '' - -class FlowSequenceStartToken(Token): - id = '[' - -class FlowMappingStartToken(Token): - id = '{' - -class FlowSequenceEndToken(Token): - id = ']' - -class FlowMappingEndToken(Token): - id = '}' - -class KeyToken(Token): - id = '?' - -class ValueToken(Token): - id = ':' - -class BlockEntryToken(Token): - id = '-' - -class FlowEntryToken(Token): - id = ',' - -class AliasToken(Token): - id = '' - def __init__(self, value, start_mark, end_mark): - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class AnchorToken(Token): - id = '' - def __init__(self, value, start_mark, end_mark): - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class TagToken(Token): - id = '' - def __init__(self, value, start_mark, end_mark): - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - -class ScalarToken(Token): - id = '' - def __init__(self, value, plain, start_mark, end_mark, style=None): - self.value = value - self.plain = plain - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - diff --git a/src/collectors/python.d.plugin/python_modules/third_party/__init__.py b/src/collectors/python.d.plugin/python_modules/third_party/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/collectors/python.d.plugin/python_modules/third_party/boinc_client.py b/src/collectors/python.d.plugin/python_modules/third_party/boinc_client.py deleted file mode 100644 index ec21779a0..000000000 --- a/src/collectors/python.d.plugin/python_modules/third_party/boinc_client.py +++ /dev/null @@ -1,515 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# client.py - Somewhat higher-level GUI_RPC API for BOINC core client -# -# Copyright (C) 2013 Rodrigo Silva (MestreLion) -# Copyright (C) 2017 Austin S. Hemmelgarn -# -# SPDX-License-Identifier: GPL-3.0 - -# Based on client/boinc_cmd.cpp - -import hashlib -import socket -import sys -import time -from functools import total_ordering -from xml.etree import ElementTree - -GUI_RPC_PASSWD_FILE = "/var/lib/boinc/gui_rpc_auth.cfg" - -GUI_RPC_HOSTNAME = None # localhost -GUI_RPC_PORT = 31416 -GUI_RPC_TIMEOUT = 1 - -class Rpc(object): - ''' Class to perform GUI RPC calls to a BOINC core client. - Usage in a context manager ('with' block) is recommended to ensure - disconnect() is called. Using the same instance for all calls is also - recommended so it reuses the same socket connection - ''' - def __init__(self, hostname="", port=0, timeout=0, text_output=False): - self.hostname = hostname - self.port = port - self.timeout = timeout - self.sock = None - self.text_output = text_output - - @property - def sockargs(self): - return (self.hostname, self.port, self.timeout) - - def __enter__(self): self.connect(*self.sockargs); return self - def __exit__(self, *args): self.disconnect() - - def connect(self, hostname="", port=0, timeout=0): - ''' Connect to (hostname, port) with timeout in seconds. - Hostname defaults to None (localhost), and port to 31416 - Calling multiple times will disconnect previous connection (if any), - and (re-)connect to host. - ''' - if self.sock: - self.disconnect() - - self.hostname = hostname or GUI_RPC_HOSTNAME - self.port = port or GUI_RPC_PORT - self.timeout = timeout or GUI_RPC_TIMEOUT - - self.sock = socket.create_connection(self.sockargs[0:2], self.sockargs[2]) - - def disconnect(self): - ''' Disconnect from host. Calling multiple times is OK (idempotent) - ''' - if self.sock: - self.sock.close() - self.sock = None - - def call(self, request, text_output=None): - ''' Do an RPC call. Pack and send the XML request and return the - unpacked reply. request can be either plain XML text or a - xml.etree.ElementTree.Element object. Return ElementTree.Element - or XML text according to text_output flag. - Will auto-connect if not connected. - ''' - if text_output is None: - text_output = self.text_output - - if not self.sock: - self.connect(*self.sockargs) - - if not isinstance(request, ElementTree.Element): - request = ElementTree.fromstring(request) - - # pack request - end = '\003' - if sys.version_info[0] < 3: - req = "\n{0}\n\n{1}".format(ElementTree.tostring(request).replace(' />', '/>'), end) - else: - req = "\n{0}\n\n{1}".format(ElementTree.tostring(request, encoding='unicode').replace(' />', '/>'), end).encode() - - try: - self.sock.sendall(req) - except (socket.error, socket.herror, socket.gaierror, socket.timeout): - raise - - req = "" - while True: - try: - buf = self.sock.recv(8192) - if not buf: - raise socket.error("No data from socket") - if sys.version_info[0] >= 3: - buf = buf.decode() - except socket.error: - raise - n = buf.find(end) - if not n == -1: break - req += buf - req += buf[:n] - - # unpack reply (remove root tag, ie: first and last lines) - req = '\n'.join(req.strip().rsplit('\n')[1:-1]) - - if text_output: - return req - else: - return ElementTree.fromstring(req) - -def setattrs_from_xml(obj, xml, attrfuncdict={}): - ''' Helper to set values for attributes of a class instance by mapping - matching tags from a XML file. - attrfuncdict is a dict of functions to customize value data type of - each attribute. It falls back to simple int/float/bool/str detection - based on values defined in __init__(). This would not be needed if - Boinc used standard RPC protocol, which includes data type in XML. - ''' - if not isinstance(xml, ElementTree.Element): - xml = ElementTree.fromstring(xml) - for e in list(xml): - if hasattr(obj, e.tag): - attr = getattr(obj, e.tag) - attrfunc = attrfuncdict.get(e.tag, None) - if attrfunc is None: - if isinstance(attr, bool): attrfunc = parse_bool - elif isinstance(attr, int): attrfunc = parse_int - elif isinstance(attr, float): attrfunc = parse_float - elif isinstance(attr, str): attrfunc = parse_str - elif isinstance(attr, list): attrfunc = parse_list - else: attrfunc = lambda x: x - setattr(obj, e.tag, attrfunc(e)) - else: - pass - #print "class missing attribute '%s': %r" % (e.tag, obj) - return obj - - -def parse_bool(e): - ''' Helper to convert ElementTree.Element.text to boolean. - Treat '' (and '[[:blank:]]') as True - Treat '0' and 'false' as False - ''' - if e.text is None: - return True - else: - return bool(e.text) and not e.text.strip().lower() in ('0', 'false') - - -def parse_int(e): - ''' Helper to convert ElementTree.Element.text to integer. - Treat '' (and '') as 0 - ''' - # int(float()) allows casting to int a value expressed as float in XML - return 0 if e.text is None else int(float(e.text.strip())) - - -def parse_float(e): - ''' Helper to convert ElementTree.Element.text to float. ''' - return 0.0 if e.text is None else float(e.text.strip()) - - -def parse_str(e): - ''' Helper to convert ElementTree.Element.text to string. ''' - return "" if e.text is None else e.text.strip() - - -def parse_list(e): - ''' Helper to convert ElementTree.Element to list. For now, simply return - the list of root element's children - ''' - return list(e) - - -class Enum(object): - UNKNOWN = -1 # Not in original API - - @classmethod - def name(cls, value): - ''' Quick-and-dirty fallback for getting the "name" of an enum item ''' - - # value as string, if it matches an enum attribute. - # Allows short usage as Enum.name("VALUE") besides Enum.name(Enum.VALUE) - if hasattr(cls, str(value)): - return cls.name(getattr(cls, value, None)) - - # value not handled in subclass name() - for k, v in cls.__dict__.items(): - if v == value: - return k.lower().replace('_', ' ') - - # value not found - return cls.name(Enum.UNKNOWN) - - -class CpuSched(Enum): - ''' values of ACTIVE_TASK::scheduler_state and ACTIVE_TASK::next_scheduler_state - "SCHEDULED" is synonymous with "executing" except when CPU throttling - is in use. - ''' - UNINITIALIZED = 0 - PREEMPTED = 1 - SCHEDULED = 2 - - -class ResultState(Enum): - ''' Values of RESULT::state in client. - THESE MUST BE IN NUMERICAL ORDER - (because of the > comparison in RESULT::computing_done()) - see html/inc/common_defs.inc - ''' - NEW = 0 - #// New result - FILES_DOWNLOADING = 1 - #// Input files for result (WU, app version) are being downloaded - FILES_DOWNLOADED = 2 - #// Files are downloaded, result can be (or is being) computed - COMPUTE_ERROR = 3 - #// computation failed; no file upload - FILES_UPLOADING = 4 - #// Output files for result are being uploaded - FILES_UPLOADED = 5 - #// Files are uploaded, notify scheduling server at some point - ABORTED = 6 - #// result was aborted - UPLOAD_FAILED = 7 - #// some output file permanent failure - - -class Process(Enum): - ''' values of ACTIVE_TASK::task_state ''' - UNINITIALIZED = 0 - #// process doesn't exist yet - EXECUTING = 1 - #// process is running, as far as we know - SUSPENDED = 9 - #// we've sent it a "suspend" message - ABORT_PENDING = 5 - #// process exceeded limits; send "abort" message, waiting to exit - QUIT_PENDING = 8 - #// we've sent it a "quit" message, waiting to exit - COPY_PENDING = 10 - #// waiting for async file copies to finish - - -class _Struct(object): - ''' base helper class with common methods for all classes derived from - BOINC's C++ structs - ''' - @classmethod - def parse(cls, xml): - return setattrs_from_xml(cls(), xml) - - def __str__(self, indent=0): - buf = '{0}{1}:\n'.format('\t' * indent, self.__class__.__name__) - for attr in self.__dict__: - value = getattr(self, attr) - if isinstance(value, list): - buf += '{0}\t{1} [\n'.format('\t' * indent, attr) - for v in value: buf += '\t\t{0}\t\t,\n'.format(v) - buf += '\t]\n' - else: - buf += '{0}\t{1}\t{2}\n'.format('\t' * indent, - attr, - value.__str__(indent+2) - if isinstance(value, _Struct) - else repr(value)) - return buf - - -@total_ordering -class VersionInfo(_Struct): - def __init__(self, major=0, minor=0, release=0): - self.major = major - self.minor = minor - self.release = release - - @property - def _tuple(self): - return (self.major, self.minor, self.release) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self._tuple == other._tuple - - def __ne__(self, other): - return not self.__eq__(other) - - def __gt__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._tuple > other._tuple - - def __str__(self): - return "{0}.{1}.{2}".format(self.major, self.minor, self.release) - - def __repr__(self): - return "{0}{1}".format(self.__class__.__name__, self._tuple) - - -class Result(_Struct): - ''' Also called "task" in some contexts ''' - def __init__(self): - # Names and values follow lib/gui_rpc_client.h @ RESULT - # Order too, except when grouping contradicts client/result.cpp - # RESULT::write_gui(), then XML order is used. - - self.name = "" - self.wu_name = "" - self.version_num = 0 - #// identifies the app used - self.plan_class = "" - self.project_url = "" # from PROJECT.master_url - self.report_deadline = 0.0 # seconds since epoch - self.received_time = 0.0 # seconds since epoch - #// when we got this from server - self.ready_to_report = False - #// we're ready to report this result to the server; - #// either computation is done and all the files have been uploaded - #// or there was an error - self.got_server_ack = False - #// we've received the ack for this result from the server - self.final_cpu_time = 0.0 - self.final_elapsed_time = 0.0 - self.state = ResultState.NEW - self.estimated_cpu_time_remaining = 0.0 - #// actually, estimated elapsed time remaining - self.exit_status = 0 - #// return value from the application - self.suspended_via_gui = False - self.project_suspended_via_gui = False - self.edf_scheduled = False - #// temporary used to tell GUI that this result is deadline-scheduled - self.coproc_missing = False - #// a coproc needed by this job is missing - #// (e.g. because user removed their GPU board). - self.scheduler_wait = False - self.scheduler_wait_reason = "" - self.network_wait = False - self.resources = "" - #// textual description of resources used - - #// the following defined if active - # XML is generated in client/app.cpp ACTIVE_TASK::write_gui() - self.active_task = False - self.active_task_state = Process.UNINITIALIZED - self.app_version_num = 0 - self.slot = -1 - self.pid = 0 - self.scheduler_state = CpuSched.UNINITIALIZED - self.checkpoint_cpu_time = 0.0 - self.current_cpu_time = 0.0 - self.fraction_done = 0.0 - self.elapsed_time = 0.0 - self.swap_size = 0 - self.working_set_size_smoothed = 0.0 - self.too_large = False - self.needs_shmem = False - self.graphics_exec_path = "" - self.web_graphics_url = "" - self.remote_desktop_addr = "" - self.slot_path = "" - #// only present if graphics_exec_path is - - # The following are not in original API, but are present in RPC XML reply - self.completed_time = 0.0 - #// time when ready_to_report was set - self.report_immediately = False - self.working_set_size = 0 - self.page_fault_rate = 0.0 - #// derived by higher-level code - - # The following are in API, but are NEVER in RPC XML reply. Go figure - self.signal = 0 - - self.app = None # APP* - self.wup = None # WORKUNIT* - self.project = None # PROJECT* - self.avp = None # APP_VERSION* - - @classmethod - def parse(cls, xml): - if not isinstance(xml, ElementTree.Element): - xml = ElementTree.fromstring(xml) - - # parse main XML - result = super(Result, cls).parse(xml) - - # parse '' children - active_task = xml.find('active_task') - if active_task is None: - result.active_task = False # already the default after __init__() - else: - result.active_task = True # already the default after main parse - result = setattrs_from_xml(result, active_task) - - #// if CPU time is nonzero but elapsed time is zero, - #// we must be talking to an old client. - #// Set elapsed = CPU - #// (easier to deal with this here than in the manager) - if result.current_cpu_time != 0 and result.elapsed_time == 0: - result.elapsed_time = result.current_cpu_time - - if result.final_cpu_time != 0 and result.final_elapsed_time == 0: - result.final_elapsed_time = result.final_cpu_time - - return result - - def __str__(self): - buf = '{0}:\n'.format(self.__class__.__name__) - for attr in self.__dict__: - value = getattr(self, attr) - if attr in ['received_time', 'report_deadline']: - value = time.ctime(value) - buf += '\t{0}\t{1}\n'.format(attr, value) - return buf - - -class BoincClient(object): - - def __init__(self, host="", port=0, passwd=None): - self.hostname = host - self.port = port - self.passwd = passwd - self.rpc = Rpc(text_output=False) - self.version = None - self.authorized = False - - # Informative, not authoritative. Records status of *last* RPC call, - # but does not infer success about the *next* one. - # Thus, it should be read *after* an RPC call, not prior to one - self.connected = False - - def __enter__(self): self.connect(); return self - def __exit__(self, *args): self.disconnect() - - def connect(self): - try: - self.rpc.connect(self.hostname, self.port) - self.connected = True - except socket.error: - self.connected = False - return - self.authorized = self.authorize(self.passwd) - self.version = self.exchange_versions() - - def disconnect(self): - self.rpc.disconnect() - - def authorize(self, password): - ''' Request authorization. If password is None and we are connecting - to localhost, try to read password from the local config file - GUI_RPC_PASSWD_FILE. If file can't be read (not found or no - permission to read), try to authorize with a blank password. - If authorization is requested and fails, all subsequent calls - will be refused with socket.error 'Connection reset by peer' (104). - Since most local calls do no require authorization, do not attempt - it if you're not sure about the password. - ''' - if password is None and not self.hostname: - password = read_gui_rpc_password() or "" - nonce = self.rpc.call('').text - authhash = hashlib.md5('{0}{1}'.format(nonce, password).encode()).hexdigest().lower() - reply = self.rpc.call('{0}'.format(authhash)) - - if reply.tag == 'authorized': - return True - else: - return False - - def exchange_versions(self): - ''' Return VersionInfo instance with core client version info ''' - return VersionInfo.parse(self.rpc.call('')) - - def get_tasks(self): - ''' Same as get_results(active_only=False) ''' - return self.get_results(False) - - def get_results(self, active_only=False): - ''' Get a list of results. - Those that are in progress will have information such as CPU time - and fraction done. Each result includes a name; - Use CC_STATE::lookup_result() to find this result in the current static state; - if it's not there, call get_state() again. - ''' - reply = self.rpc.call("{0}".format(1 if active_only else 0)) - if not reply.tag == 'results': - return [] - - results = [] - for item in list(reply): - results.append(Result.parse(item)) - - return results - - -def read_gui_rpc_password(): - ''' Read password string from GUI_RPC_PASSWD_FILE file, trim the last CR - (if any), and return it - ''' - try: - with open(GUI_RPC_PASSWD_FILE, 'r') as f: - buf = f.read() - if buf.endswith('\n'): return buf[:-1] # trim last CR - else: return buf - except IOError: - # Permission denied or File not found. - pass diff --git a/src/collectors/python.d.plugin/python_modules/third_party/filelock.py b/src/collectors/python.d.plugin/python_modules/third_party/filelock.py deleted file mode 100644 index 4c981672b..000000000 --- a/src/collectors/python.d.plugin/python_modules/third_party/filelock.py +++ /dev/null @@ -1,451 +0,0 @@ -# This is free and unencumbered software released into the public domain. -# -# Anyone is free to copy, modify, publish, use, compile, sell, or -# distribute this software, either in source code form or as a compiled -# binary, for any purpose, commercial or non-commercial, and by any -# means. -# -# In jurisdictions that recognize copyright laws, the author or authors -# of this software dedicate any and all copyright interest in the -# software to the public domain. We make this dedication for the benefit -# of the public at large and to the detriment of our heirs and -# successors. We intend this dedication to be an overt act of -# relinquishment in perpetuity of all present and future rights to this -# software under copyright law. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# For more information, please refer to - -""" -A platform independent file lock that supports the with-statement. -""" - - -# Modules -# ------------------------------------------------ -import logging -import os -import threading -import time -try: - import warnings -except ImportError: - warnings = None - -try: - import msvcrt -except ImportError: - msvcrt = None - -try: - import fcntl -except ImportError: - fcntl = None - - -# Backward compatibility -# ------------------------------------------------ -try: - TimeoutError -except NameError: - TimeoutError = OSError - - -# Data -# ------------------------------------------------ -__all__ = [ - "Timeout", - "BaseFileLock", - "WindowsFileLock", - "UnixFileLock", - "SoftFileLock", - "FileLock" -] - -__version__ = "3.0.12" - - -_logger = None -def logger(): - """Returns the logger instance used in this module.""" - global _logger - _logger = _logger or logging.getLogger(__name__) - return _logger - - -# Exceptions -# ------------------------------------------------ -class Timeout(TimeoutError): - """ - Raised when the lock could not be acquired in *timeout* - seconds. - """ - - def __init__(self, lock_file): - """ - """ - #: The path of the file lock. - self.lock_file = lock_file - return None - - def __str__(self): - temp = "The file lock '{}' could not be acquired."\ - .format(self.lock_file) - return temp - - -# Classes -# ------------------------------------------------ - -# This is a helper class which is returned by :meth:`BaseFileLock.acquire` -# and wraps the lock to make sure __enter__ is not called twice when entering -# the with statement. -# If we would simply return *self*, the lock would be acquired again -# in the *__enter__* method of the BaseFileLock, but not released again -# automatically. -# -# :seealso: issue #37 (memory leak) -class _Acquire_ReturnProxy(object): - - def __init__(self, lock): - self.lock = lock - return None - - def __enter__(self): - return self.lock - - def __exit__(self, exc_type, exc_value, traceback): - self.lock.release() - return None - - -class BaseFileLock(object): - """ - Implements the base class of a file lock. - """ - - def __init__(self, lock_file, timeout = -1): - """ - """ - # The path to the lock file. - self._lock_file = lock_file - - # The file descriptor for the *_lock_file* as it is returned by the - # os.open() function. - # This file lock is only NOT None, if the object currently holds the - # lock. - self._lock_file_fd = None - - # The default timeout value. - self.timeout = timeout - - # We use this lock primarily for the lock counter. - self._thread_lock = threading.Lock() - - # The lock counter is used for implementing the nested locking - # mechanism. Whenever the lock is acquired, the counter is increased and - # the lock is only released, when this value is 0 again. - self._lock_counter = 0 - return None - - @property - def lock_file(self): - """ - The path to the lock file. - """ - return self._lock_file - - @property - def timeout(self): - """ - You can set a default timeout for the filelock. It will be used as - fallback value in the acquire method, if no timeout value (*None*) is - given. - - If you want to disable the timeout, set it to a negative value. - - A timeout of 0 means, that there is exactly one attempt to acquire the - file lock. - - .. versionadded:: 2.0.0 - """ - return self._timeout - - @timeout.setter - def timeout(self, value): - """ - """ - self._timeout = float(value) - return None - - # Platform dependent locking - # -------------------------------------------- - - def _acquire(self): - """ - Platform dependent. If the file lock could be - acquired, self._lock_file_fd holds the file descriptor - of the lock file. - """ - raise NotImplementedError() - - def _release(self): - """ - Releases the lock and sets self._lock_file_fd to None. - """ - raise NotImplementedError() - - # Platform independent methods - # -------------------------------------------- - - @property - def is_locked(self): - """ - True, if the object holds the file lock. - - .. versionchanged:: 2.0.0 - - This was previously a method and is now a property. - """ - return self._lock_file_fd is not None - - def acquire(self, timeout=None, poll_intervall=0.05): - """ - Acquires the file lock or fails with a :exc:`Timeout` error. - - .. code-block:: python - - # You can use this method in the context manager (recommended) - with lock.acquire(): - pass - - # Or use an equivalent try-finally construct: - lock.acquire() - try: - pass - finally: - lock.release() - - :arg float timeout: - The maximum time waited for the file lock. - If ``timeout < 0``, there is no timeout and this method will - block until the lock could be acquired. - If ``timeout`` is None, the default :attr:`~timeout` is used. - - :arg float poll_intervall: - We check once in *poll_intervall* seconds if we can acquire the - file lock. - - :raises Timeout: - if the lock could not be acquired in *timeout* seconds. - - .. versionchanged:: 2.0.0 - - This method returns now a *proxy* object instead of *self*, - so that it can be used in a with statement without side effects. - """ - # Use the default timeout, if no timeout is provided. - if timeout is None: - timeout = self.timeout - - # Increment the number right at the beginning. - # We can still undo it, if something fails. - with self._thread_lock: - self._lock_counter += 1 - - lock_id = id(self) - lock_filename = self._lock_file - start_time = time.time() - try: - while True: - with self._thread_lock: - if not self.is_locked: - logger().debug('Attempting to acquire lock %s on %s', lock_id, lock_filename) - self._acquire() - - if self.is_locked: - logger().info('Lock %s acquired on %s', lock_id, lock_filename) - break - elif timeout >= 0 and time.time() - start_time > timeout: - logger().debug('Timeout on acquiring lock %s on %s', lock_id, lock_filename) - raise Timeout(self._lock_file) - else: - logger().debug( - 'Lock %s not acquired on %s, waiting %s seconds ...', - lock_id, lock_filename, poll_intervall - ) - time.sleep(poll_intervall) - except: - # Something did go wrong, so decrement the counter. - with self._thread_lock: - self._lock_counter = max(0, self._lock_counter - 1) - - raise - return _Acquire_ReturnProxy(lock = self) - - def release(self, force = False): - """ - Releases the file lock. - - Please note, that the lock is only completly released, if the lock - counter is 0. - - Also note, that the lock file itself is not automatically deleted. - - :arg bool force: - If true, the lock counter is ignored and the lock is released in - every case. - """ - with self._thread_lock: - - if self.is_locked: - self._lock_counter -= 1 - - if self._lock_counter == 0 or force: - lock_id = id(self) - lock_filename = self._lock_file - - logger().debug('Attempting to release lock %s on %s', lock_id, lock_filename) - self._release() - self._lock_counter = 0 - logger().info('Lock %s released on %s', lock_id, lock_filename) - - return None - - def __enter__(self): - self.acquire() - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.release() - return None - - def __del__(self): - self.release(force = True) - return None - - -# Windows locking mechanism -# ~~~~~~~~~~~~~~~~~~~~~~~~~ - -class WindowsFileLock(BaseFileLock): - """ - Uses the :func:`msvcrt.locking` function to hard lock the lock file on - windows systems. - """ - - def _acquire(self): - open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC - - try: - fd = os.open(self._lock_file, open_mode) - except OSError: - pass - else: - try: - msvcrt.locking(fd, msvcrt.LK_NBLCK, 1) - except (IOError, OSError): - os.close(fd) - else: - self._lock_file_fd = fd - return None - - def _release(self): - fd = self._lock_file_fd - self._lock_file_fd = None - msvcrt.locking(fd, msvcrt.LK_UNLCK, 1) - os.close(fd) - - try: - os.remove(self._lock_file) - # Probably another instance of the application - # that acquired the file lock. - except OSError: - pass - return None - -# Unix locking mechanism -# ~~~~~~~~~~~~~~~~~~~~~~ - -class UnixFileLock(BaseFileLock): - """ - Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems. - """ - - def _acquire(self): - open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC - fd = os.open(self._lock_file, open_mode) - - try: - fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) - except (IOError, OSError): - os.close(fd) - else: - self._lock_file_fd = fd - return None - - def _release(self): - # Do not remove the lockfile: - # - # https://github.com/benediktschmitt/py-filelock/issues/31 - # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition - fd = self._lock_file_fd - self._lock_file_fd = None - fcntl.flock(fd, fcntl.LOCK_UN) - os.close(fd) - return None - -# Soft lock -# ~~~~~~~~~ - -class SoftFileLock(BaseFileLock): - """ - Simply watches the existence of the lock file. - """ - - def _acquire(self): - open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC - try: - fd = os.open(self._lock_file, open_mode) - except (IOError, OSError): - pass - else: - self._lock_file_fd = fd - return None - - def _release(self): - os.close(self._lock_file_fd) - self._lock_file_fd = None - - try: - os.remove(self._lock_file) - # The file is already deleted and that's what we want. - except OSError: - pass - return None - - -# Platform filelock -# ~~~~~~~~~~~~~~~~~ - -#: Alias for the lock, which should be used for the current platform. On -#: Windows, this is an alias for :class:`WindowsFileLock`, on Unix for -#: :class:`UnixFileLock` and otherwise for :class:`SoftFileLock`. -FileLock = None - -if msvcrt: - FileLock = WindowsFileLock -elif fcntl: - FileLock = UnixFileLock -else: - FileLock = SoftFileLock - - if warnings is not None: - warnings.warn("only soft file lock is available") diff --git a/src/collectors/python.d.plugin/python_modules/third_party/mcrcon.py b/src/collectors/python.d.plugin/python_modules/third_party/mcrcon.py deleted file mode 100644 index a65a304b6..000000000 --- a/src/collectors/python.d.plugin/python_modules/third_party/mcrcon.py +++ /dev/null @@ -1,74 +0,0 @@ -# Minecraft Remote Console module. -# -# Copyright (C) 2015 Barnaby Gale -# -# SPDX-License-Identifier: MIT - -import socket -import select -import struct -import time - - -class MCRconException(Exception): - pass - - -class MCRcon(object): - socket = None - - def connect(self, host, port, password): - if self.socket is not None: - raise MCRconException("Already connected") - self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.socket.settimeout(0.9) - self.socket.connect((host, port)) - self.send(3, password) - - def disconnect(self): - if self.socket is None: - raise MCRconException("Already disconnected") - self.socket.close() - self.socket = None - - def read(self, length): - data = b"" - while len(data) < length: - data += self.socket.recv(length - len(data)) - return data - - def send(self, out_type, out_data): - if self.socket is None: - raise MCRconException("Must connect before sending data") - - # Send a request packet - out_payload = struct.pack(' - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -""" -import time - - -__all__ = ('monotonic',) - - -try: - monotonic = time.monotonic -except AttributeError: - import ctypes - import ctypes.util - import os - import sys - import threading - - - def clock_clock_gettime_c_library(): - return ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True).clock_gettime - - - def clock_clock_gettime_rt_library(): - return ctypes.CDLL(ctypes.util.find_library('rt'), use_errno=True).clock_gettime - - - def clock_clock_gettime_c_library_synology6(): - return ctypes.CDLL('/usr/lib/libc.so.6', use_errno=True).clock_gettime - - - def clock_clock_gettime_rt_library_synology6(): - return ctypes.CDLL('/usr/lib/librt.so.1', use_errno=True).clock_gettime - - - def clock_gettime_linux(): - # see https://github.com/netdata/netdata/issues/7976 - order = [ - clock_clock_gettime_c_library, - clock_clock_gettime_rt_library, - clock_clock_gettime_c_library_synology6, - clock_clock_gettime_rt_library_synology6, - ] - - for gettime in order: - try: - return gettime() - except (RuntimeError, AttributeError, OSError): - continue - raise RuntimeError('can not find c and rt libraries') - - - try: - if sys.platform == 'darwin': # OS X, iOS - # See Technical Q&A QA1398 of the Mac Developer Library: - # - libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True) - - class mach_timebase_info_data_t(ctypes.Structure): - """System timebase info. Defined in .""" - _fields_ = (('numer', ctypes.c_uint32), - ('denom', ctypes.c_uint32)) - - mach_absolute_time = libc.mach_absolute_time - mach_absolute_time.restype = ctypes.c_uint64 - - timebase = mach_timebase_info_data_t() - libc.mach_timebase_info(ctypes.byref(timebase)) - ticks_per_second = timebase.numer / timebase.denom * 1.0e9 - - def monotonic(): - """Monotonic clock, cannot go backward.""" - return mach_absolute_time() / ticks_per_second - - elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'): - if sys.platform.startswith('cygwin'): - # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since - # version 1.7.6. Using raw WinAPI for maximum version compatibility. - - # Ugly hack using the wrong calling convention (in 32-bit mode) - # because ctypes has no windll under cygwin (and it also seems that - # the code letting you select stdcall in _ctypes doesn't exist under - # the preprocessor definitions relevant to cygwin). - # This is 'safe' because: - # 1. The ABI of GetTickCount and GetTickCount64 is identical for - # both calling conventions because they both have no parameters. - # 2. libffi masks the problem because after making the call it doesn't - # touch anything through esp and epilogue code restores a correct - # esp from ebp afterwards. - try: - kernel32 = ctypes.cdll.kernel32 - except OSError: # 'No such file or directory' - kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll') - else: - kernel32 = ctypes.windll.kernel32 - - GetTickCount64 = getattr(kernel32, 'GetTickCount64', None) - if GetTickCount64: - # Windows Vista / Windows Server 2008 or newer. - GetTickCount64.restype = ctypes.c_ulonglong - - def monotonic(): - """Monotonic clock, cannot go backward.""" - return GetTickCount64() / 1000.0 - - else: - # Before Windows Vista. - GetTickCount = kernel32.GetTickCount - GetTickCount.restype = ctypes.c_uint32 - - get_tick_count_lock = threading.Lock() - get_tick_count_last_sample = 0 - get_tick_count_wraparounds = 0 - - def monotonic(): - """Monotonic clock, cannot go backward.""" - global get_tick_count_last_sample - global get_tick_count_wraparounds - - with get_tick_count_lock: - current_sample = GetTickCount() - if current_sample < get_tick_count_last_sample: - get_tick_count_wraparounds += 1 - get_tick_count_last_sample = current_sample - - final_milliseconds = get_tick_count_wraparounds << 32 - final_milliseconds += get_tick_count_last_sample - return final_milliseconds / 1000.0 - - else: - clock_gettime = clock_gettime_linux() - - class timespec(ctypes.Structure): - """Time specification, as described in clock_gettime(3).""" - _fields_ = (('tv_sec', ctypes.c_long), - ('tv_nsec', ctypes.c_long)) - - if sys.platform.startswith('linux'): - CLOCK_MONOTONIC = 1 - elif sys.platform.startswith('freebsd'): - CLOCK_MONOTONIC = 4 - elif sys.platform.startswith('sunos5'): - CLOCK_MONOTONIC = 4 - elif 'bsd' in sys.platform: - CLOCK_MONOTONIC = 3 - elif sys.platform.startswith('aix'): - CLOCK_MONOTONIC = ctypes.c_longlong(10) - - def monotonic(): - """Monotonic clock, cannot go backward.""" - ts = timespec() - if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)): - errno = ctypes.get_errno() - raise OSError(errno, os.strerror(errno)) - return ts.tv_sec + ts.tv_nsec / 1.0e9 - - # Perform a sanity-check. - if monotonic() - monotonic() > 0: - raise ValueError('monotonic() is not monotonic!') - - except Exception as e: - raise RuntimeError('no suitable implementation for this system: ' + repr(e)) diff --git a/src/collectors/python.d.plugin/python_modules/third_party/ordereddict.py b/src/collectors/python.d.plugin/python_modules/third_party/ordereddict.py deleted file mode 100644 index 589401b8f..000000000 --- a/src/collectors/python.d.plugin/python_modules/third_party/ordereddict.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) 2009 Raymond Hettinger -# -# SPDX-License-Identifier: MIT - -from UserDict import DictMixin - - -class OrderedDict(dict, DictMixin): - - def __init__(self, *args, **kwds): - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__end - except AttributeError: - self.clear() - self.update(*args, **kwds) - - def clear(self): - self.__end = end = [] - end += [None, end, end] # sentinel node for doubly linked list - self.__map = {} # key --> [key, prev, next] - dict.clear(self) - - def __setitem__(self, key, value): - if key not in self: - end = self.__end - curr = end[1] - curr[2] = end[1] = self.__map[key] = [key, curr, end] - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - dict.__delitem__(self, key) - key, prev, next = self.__map.pop(key) - prev[2] = next - next[1] = prev - - def __iter__(self): - end = self.__end - curr = end[2] - while curr is not end: - yield curr[0] - curr = curr[2] - - def __reversed__(self): - end = self.__end - curr = end[1] - while curr is not end: - yield curr[0] - curr = curr[1] - - def popitem(self, last=True): - if not self: - raise KeyError('dictionary is empty') - if last: - key = reversed(self).next() - else: - key = iter(self).next() - value = self.pop(key) - return key, value - - def __reduce__(self): - items = [[k, self[k]] for k in self] - tmp = self.__map, self.__end - del self.__map, self.__end - inst_dict = vars(self).copy() - self.__map, self.__end = tmp - if inst_dict: - return self.__class__, (items,), inst_dict - return self.__class__, (items,) - - def keys(self): - return list(self) - - setdefault = DictMixin.setdefault - update = DictMixin.update - pop = DictMixin.pop - values = DictMixin.values - items = DictMixin.items - iterkeys = DictMixin.iterkeys - itervalues = DictMixin.itervalues - iteritems = DictMixin.iteritems - - def __repr__(self): - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - - def copy(self): - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - if isinstance(other, OrderedDict): - if len(self) != len(other): - return False - for p, q in zip(self.items(), other.items()): - if p != q: - return False - return True - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/__init__.py deleted file mode 100644 index 3add84816..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/__init__.py +++ /dev/null @@ -1,98 +0,0 @@ -# SPDX-License-Identifier: MIT -""" -urllib3 - Thread-safe connection pooling and re-using. -""" - -from __future__ import absolute_import -import warnings - -from .connectionpool import ( - HTTPConnectionPool, - HTTPSConnectionPool, - connection_from_url -) - -from . import exceptions -from .filepost import encode_multipart_formdata -from .poolmanager import PoolManager, ProxyManager, proxy_from_url -from .response import HTTPResponse -from .util.request import make_headers -from .util.url import get_host -from .util.timeout import Timeout -from .util.retry import Retry - - -# Set default logging handler to avoid "No handler found" warnings. -import logging -try: # Python 2.7+ - from logging import NullHandler -except ImportError: - class NullHandler(logging.Handler): - def emit(self, record): - pass - -__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' -__license__ = 'MIT' -__version__ = '1.21.1' - -__all__ = ( - 'HTTPConnectionPool', - 'HTTPSConnectionPool', - 'PoolManager', - 'ProxyManager', - 'HTTPResponse', - 'Retry', - 'Timeout', - 'add_stderr_logger', - 'connection_from_url', - 'disable_warnings', - 'encode_multipart_formdata', - 'get_host', - 'make_headers', - 'proxy_from_url', -) - -logging.getLogger(__name__).addHandler(NullHandler()) - - -def add_stderr_logger(level=logging.DEBUG): - """ - Helper for quickly adding a StreamHandler to the logger. Useful for - debugging. - - Returns the handler after adding it. - """ - # This method needs to be in this __init__.py to get the __name__ correct - # even if urllib3 is vendored within another package. - logger = logging.getLogger(__name__) - handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) - logger.addHandler(handler) - logger.setLevel(level) - logger.debug('Added a stderr logging handler to logger: %s', __name__) - return handler - - -# ... Clean up. -del NullHandler - - -# All warning filters *must* be appended unless you're really certain that they -# shouldn't be: otherwise, it's very hard for users to use most Python -# mechanisms to silence them. -# SecurityWarning's always go off by default. -warnings.simplefilter('always', exceptions.SecurityWarning, append=True) -# SubjectAltNameWarning's should go off once per host -warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True) -# InsecurePlatformWarning's don't vary between requests, so we keep it default. -warnings.simplefilter('default', exceptions.InsecurePlatformWarning, - append=True) -# SNIMissingWarnings should go off only once. -warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True) - - -def disable_warnings(category=exceptions.HTTPWarning): - """ - Helper for quickly disabling all urllib3 warnings. - """ - warnings.simplefilter('ignore', category) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/_collections.py b/src/collectors/python.d.plugin/python_modules/urllib3/_collections.py deleted file mode 100644 index 2a6b3ec70..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/_collections.py +++ /dev/null @@ -1,320 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import - -try: - from collections import Mapping, MutableMapping -except ImportError: - from collections.abc import Mapping, MutableMapping - -try: - from threading import RLock -except ImportError: # Platform-specific: No threads available - class RLock: - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_value, traceback): - pass - - -try: # Python 2.7+ - from collections import OrderedDict -except ImportError: - from .packages.ordered_dict import OrderedDict -from .packages.six import iterkeys, itervalues, PY3 - - -__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] - - -_Null = object() - - -class RecentlyUsedContainer(MutableMapping): - """ - Provides a thread-safe dict-like container which maintains up to - ``maxsize`` keys while throwing away the least-recently-used keys beyond - ``maxsize``. - - :param maxsize: - Maximum number of recent elements to retain. - - :param dispose_func: - Every time an item is evicted from the container, - ``dispose_func(value)`` is called. Callback which will get called - """ - - ContainerCls = OrderedDict - - def __init__(self, maxsize=10, dispose_func=None): - self._maxsize = maxsize - self.dispose_func = dispose_func - - self._container = self.ContainerCls() - self.lock = RLock() - - def __getitem__(self, key): - # Re-insert the item, moving it to the end of the eviction line. - with self.lock: - item = self._container.pop(key) - self._container[key] = item - return item - - def __setitem__(self, key, value): - evicted_value = _Null - with self.lock: - # Possibly evict the existing value of 'key' - evicted_value = self._container.get(key, _Null) - self._container[key] = value - - # If we didn't evict an existing value, we might have to evict the - # least recently used item from the beginning of the container. - if len(self._container) > self._maxsize: - _key, evicted_value = self._container.popitem(last=False) - - if self.dispose_func and evicted_value is not _Null: - self.dispose_func(evicted_value) - - def __delitem__(self, key): - with self.lock: - value = self._container.pop(key) - - if self.dispose_func: - self.dispose_func(value) - - def __len__(self): - with self.lock: - return len(self._container) - - def __iter__(self): - raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') - - def clear(self): - with self.lock: - # Copy pointers to all values, then wipe the mapping - values = list(itervalues(self._container)) - self._container.clear() - - if self.dispose_func: - for value in values: - self.dispose_func(value) - - def keys(self): - with self.lock: - return list(iterkeys(self._container)) - - -class HTTPHeaderDict(MutableMapping): - """ - :param headers: - An iterable of field-value pairs. Must not contain multiple field names - when compared case-insensitively. - - :param kwargs: - Additional field-value pairs to pass in to ``dict.update``. - - A ``dict`` like container for storing HTTP Headers. - - Field names are stored and compared case-insensitively in compliance with - RFC 7230. Iteration provides the first case-sensitive key seen for each - case-insensitive pair. - - Using ``__setitem__`` syntax overwrites fields that compare equal - case-insensitively in order to maintain ``dict``'s api. For fields that - compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` - in a loop. - - If multiple fields that are equal case-insensitively are passed to the - constructor or ``.update``, the behavior is undefined and some will be - lost. - - >>> headers = HTTPHeaderDict() - >>> headers.add('Set-Cookie', 'foo=bar') - >>> headers.add('set-cookie', 'baz=quxx') - >>> headers['content-length'] = '7' - >>> headers['SET-cookie'] - 'foo=bar, baz=quxx' - >>> headers['Content-Length'] - '7' - """ - - def __init__(self, headers=None, **kwargs): - super(HTTPHeaderDict, self).__init__() - self._container = OrderedDict() - if headers is not None: - if isinstance(headers, HTTPHeaderDict): - self._copy_from(headers) - else: - self.extend(headers) - if kwargs: - self.extend(kwargs) - - def __setitem__(self, key, val): - self._container[key.lower()] = [key, val] - return self._container[key.lower()] - - def __getitem__(self, key): - val = self._container[key.lower()] - return ', '.join(val[1:]) - - def __delitem__(self, key): - del self._container[key.lower()] - - def __contains__(self, key): - return key.lower() in self._container - - def __eq__(self, other): - if not isinstance(other, Mapping) and not hasattr(other, 'keys'): - return False - if not isinstance(other, type(self)): - other = type(self)(other) - return (dict((k.lower(), v) for k, v in self.itermerged()) == - dict((k.lower(), v) for k, v in other.itermerged())) - - def __ne__(self, other): - return not self.__eq__(other) - - if not PY3: # Python 2 - iterkeys = MutableMapping.iterkeys - itervalues = MutableMapping.itervalues - - __marker = object() - - def __len__(self): - return len(self._container) - - def __iter__(self): - # Only provide the originally cased names - for vals in self._container.values(): - yield vals[0] - - def pop(self, key, default=__marker): - '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. - If key is not found, d is returned if given, otherwise KeyError is raised. - ''' - # Using the MutableMapping function directly fails due to the private marker. - # Using ordinary dict.pop would expose the internal structures. - # So let's reinvent the wheel. - try: - value = self[key] - except KeyError: - if default is self.__marker: - raise - return default - else: - del self[key] - return value - - def discard(self, key): - try: - del self[key] - except KeyError: - pass - - def add(self, key, val): - """Adds a (name, value) pair, doesn't overwrite the value if it already - exists. - - >>> headers = HTTPHeaderDict(foo='bar') - >>> headers.add('Foo', 'baz') - >>> headers['foo'] - 'bar, baz' - """ - key_lower = key.lower() - new_vals = [key, val] - # Keep the common case aka no item present as fast as possible - vals = self._container.setdefault(key_lower, new_vals) - if new_vals is not vals: - vals.append(val) - - def extend(self, *args, **kwargs): - """Generic import function for any type of header-like object. - Adapted version of MutableMapping.update in order to insert items - with self.add instead of self.__setitem__ - """ - if len(args) > 1: - raise TypeError("extend() takes at most 1 positional " - "arguments ({0} given)".format(len(args))) - other = args[0] if len(args) >= 1 else () - - if isinstance(other, HTTPHeaderDict): - for key, val in other.iteritems(): - self.add(key, val) - elif isinstance(other, Mapping): - for key in other: - self.add(key, other[key]) - elif hasattr(other, "keys"): - for key in other.keys(): - self.add(key, other[key]) - else: - for key, value in other: - self.add(key, value) - - for key, value in kwargs.items(): - self.add(key, value) - - def getlist(self, key): - """Returns a list of all the values for the named field. Returns an - empty list if the key doesn't exist.""" - try: - vals = self._container[key.lower()] - except KeyError: - return [] - else: - return vals[1:] - - # Backwards compatibility for httplib - getheaders = getlist - getallmatchingheaders = getlist - iget = getlist - - def __repr__(self): - return "%s(%s)" % (type(self).__name__, dict(self.itermerged())) - - def _copy_from(self, other): - for key in other: - val = other.getlist(key) - if isinstance(val, list): - # Don't need to convert tuples - val = list(val) - self._container[key.lower()] = [key] + val - - def copy(self): - clone = type(self)() - clone._copy_from(self) - return clone - - def iteritems(self): - """Iterate over all header lines, including duplicate ones.""" - for key in self: - vals = self._container[key.lower()] - for val in vals[1:]: - yield vals[0], val - - def itermerged(self): - """Iterate over all headers, merging duplicate ones together.""" - for key in self: - val = self._container[key.lower()] - yield val[0], ', '.join(val[1:]) - - def items(self): - return list(self.iteritems()) - - @classmethod - def from_httplib(cls, message): # Python 2 - """Read headers from a Python 2 httplib message object.""" - # python2.7 does not expose a proper API for exporting multiheaders - # efficiently. This function re-reads raw lines from the message - # object and extracts the multiheaders properly. - headers = [] - - for line in message.headers: - if line.startswith((' ', '\t')): - key, value = headers[-1] - headers[-1] = (key, value + '\r\n' + line.rstrip()) - continue - - key, value = line.split(':', 1) - headers.append((key, value.strip())) - - return cls(headers) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/connection.py b/src/collectors/python.d.plugin/python_modules/urllib3/connection.py deleted file mode 100644 index f757493c7..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/connection.py +++ /dev/null @@ -1,374 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -import datetime -import logging -import os -import sys -import socket -from socket import error as SocketError, timeout as SocketTimeout -import warnings -from .packages import six -from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection -from .packages.six.moves.http_client import HTTPException # noqa: F401 - -try: # Compiled with SSL? - import ssl - BaseSSLError = ssl.SSLError -except (ImportError, AttributeError): # Platform-specific: No SSL. - ssl = None - - class BaseSSLError(BaseException): - pass - - -try: # Python 3: - # Not a no-op, we're adding this to the namespace so it can be imported. - ConnectionError = ConnectionError -except NameError: # Python 2: - class ConnectionError(Exception): - pass - - -from .exceptions import ( - NewConnectionError, - ConnectTimeoutError, - SubjectAltNameWarning, - SystemTimeWarning, -) -from .packages.ssl_match_hostname import match_hostname, CertificateError - -from .util.ssl_ import ( - resolve_cert_reqs, - resolve_ssl_version, - assert_fingerprint, - create_urllib3_context, - ssl_wrap_socket -) - - -from .util import connection - -from ._collections import HTTPHeaderDict - -log = logging.getLogger(__name__) - -port_by_scheme = { - 'http': 80, - 'https': 443, -} - -# When updating RECENT_DATE, move it to -# within two years of the current date, and no -# earlier than 6 months ago. -RECENT_DATE = datetime.date(2016, 1, 1) - - -class DummyConnection(object): - """Used to detect a failed ConnectionCls import.""" - pass - - -class HTTPConnection(_HTTPConnection, object): - """ - Based on httplib.HTTPConnection but provides an extra constructor - backwards-compatibility layer between older and newer Pythons. - - Additional keyword parameters are used to configure attributes of the connection. - Accepted parameters include: - - - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool` - - ``source_address``: Set the source address for the current connection. - - .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x - - - ``socket_options``: Set specific options on the underlying socket. If not specified, then - defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling - Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. - - For example, if you wish to enable TCP Keep Alive in addition to the defaults, - you might pass:: - - HTTPConnection.default_socket_options + [ - (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), - ] - - Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). - """ - - default_port = port_by_scheme['http'] - - #: Disable Nagle's algorithm by default. - #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` - default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] - - #: Whether this connection verifies the host's certificate. - is_verified = False - - def __init__(self, *args, **kw): - if six.PY3: # Python 3 - kw.pop('strict', None) - - # Pre-set source_address in case we have an older Python like 2.6. - self.source_address = kw.get('source_address') - - if sys.version_info < (2, 7): # Python 2.6 - # _HTTPConnection on Python 2.6 will balk at this keyword arg, but - # not newer versions. We can still use it when creating a - # connection though, so we pop it *after* we have saved it as - # self.source_address. - kw.pop('source_address', None) - - #: The socket options provided by the user. If no options are - #: provided, we use the default options. - self.socket_options = kw.pop('socket_options', self.default_socket_options) - - # Superclass also sets self.source_address in Python 2.7+. - _HTTPConnection.__init__(self, *args, **kw) - - def _new_conn(self): - """ Establish a socket connection and set nodelay settings on it. - - :return: New socket connection. - """ - extra_kw = {} - if self.source_address: - extra_kw['source_address'] = self.source_address - - if self.socket_options: - extra_kw['socket_options'] = self.socket_options - - try: - conn = connection.create_connection( - (self.host, self.port), self.timeout, **extra_kw) - - except SocketTimeout as e: - raise ConnectTimeoutError( - self, "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout)) - - except SocketError as e: - raise NewConnectionError( - self, "Failed to establish a new connection: %s" % e) - - return conn - - def _prepare_conn(self, conn): - self.sock = conn - # the _tunnel_host attribute was added in python 2.6.3 (via - # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do - # not have them. - if getattr(self, '_tunnel_host', None): - # TODO: Fix tunnel so it doesn't depend on self.sock state. - self._tunnel() - # Mark this connection as not reusable - self.auto_open = 0 - - def connect(self): - conn = self._new_conn() - self._prepare_conn(conn) - - def request_chunked(self, method, url, body=None, headers=None): - """ - Alternative to the common request method, which sends the - body with chunked encoding and not as one block - """ - headers = HTTPHeaderDict(headers if headers is not None else {}) - skip_accept_encoding = 'accept-encoding' in headers - skip_host = 'host' in headers - self.putrequest( - method, - url, - skip_accept_encoding=skip_accept_encoding, - skip_host=skip_host - ) - for header, value in headers.items(): - self.putheader(header, value) - if 'transfer-encoding' not in headers: - self.putheader('Transfer-Encoding', 'chunked') - self.endheaders() - - if body is not None: - stringish_types = six.string_types + (six.binary_type,) - if isinstance(body, stringish_types): - body = (body,) - for chunk in body: - if not chunk: - continue - if not isinstance(chunk, six.binary_type): - chunk = chunk.encode('utf8') - len_str = hex(len(chunk))[2:] - self.send(len_str.encode('utf-8')) - self.send(b'\r\n') - self.send(chunk) - self.send(b'\r\n') - - # After the if clause, to always have a closed body - self.send(b'0\r\n\r\n') - - -class HTTPSConnection(HTTPConnection): - default_port = port_by_scheme['https'] - - ssl_version = None - - def __init__(self, host, port=None, key_file=None, cert_file=None, - strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - ssl_context=None, **kw): - - HTTPConnection.__init__(self, host, port, strict=strict, - timeout=timeout, **kw) - - self.key_file = key_file - self.cert_file = cert_file - self.ssl_context = ssl_context - - # Required property for Google AppEngine 1.9.0 which otherwise causes - # HTTPS requests to go out as HTTP. (See Issue #356) - self._protocol = 'https' - - def connect(self): - conn = self._new_conn() - self._prepare_conn(conn) - - if self.ssl_context is None: - self.ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(None), - cert_reqs=resolve_cert_reqs(None), - ) - - self.sock = ssl_wrap_socket( - sock=conn, - keyfile=self.key_file, - certfile=self.cert_file, - ssl_context=self.ssl_context, - ) - - -class VerifiedHTTPSConnection(HTTPSConnection): - """ - Based on httplib.HTTPSConnection but wraps the socket with - SSL certification. - """ - cert_reqs = None - ca_certs = None - ca_cert_dir = None - ssl_version = None - assert_fingerprint = None - - def set_cert(self, key_file=None, cert_file=None, - cert_reqs=None, ca_certs=None, - assert_hostname=None, assert_fingerprint=None, - ca_cert_dir=None): - """ - This method should only be called once, before the connection is used. - """ - # If cert_reqs is not provided, we can try to guess. If the user gave - # us a cert database, we assume they want to use it: otherwise, if - # they gave us an SSL Context object we should use whatever is set for - # it. - if cert_reqs is None: - if ca_certs or ca_cert_dir: - cert_reqs = 'CERT_REQUIRED' - elif self.ssl_context is not None: - cert_reqs = self.ssl_context.verify_mode - - self.key_file = key_file - self.cert_file = cert_file - self.cert_reqs = cert_reqs - self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint - self.ca_certs = ca_certs and os.path.expanduser(ca_certs) - self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) - - def connect(self): - # Add certificate verification - conn = self._new_conn() - - hostname = self.host - if getattr(self, '_tunnel_host', None): - # _tunnel_host was added in Python 2.6.3 - # (See: http://hg.python.org/cpython/rev/0f57b30a152f) - - self.sock = conn - # Calls self._set_hostport(), so self.host is - # self._tunnel_host below. - self._tunnel() - # Mark this connection as not reusable - self.auto_open = 0 - - # Override the host with the one we're requesting data from. - hostname = self._tunnel_host - - is_time_off = datetime.date.today() < RECENT_DATE - if is_time_off: - warnings.warn(( - 'System time is way off (before {0}). This will probably ' - 'lead to SSL verification errors').format(RECENT_DATE), - SystemTimeWarning - ) - - # Wrap socket using verification with the root certs in - # trusted_root_certs - if self.ssl_context is None: - self.ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(self.ssl_version), - cert_reqs=resolve_cert_reqs(self.cert_reqs), - ) - - context = self.ssl_context - context.verify_mode = resolve_cert_reqs(self.cert_reqs) - self.sock = ssl_wrap_socket( - sock=conn, - keyfile=self.key_file, - certfile=self.cert_file, - ca_certs=self.ca_certs, - ca_cert_dir=self.ca_cert_dir, - server_hostname=hostname, - ssl_context=context) - - if self.assert_fingerprint: - assert_fingerprint(self.sock.getpeercert(binary_form=True), - self.assert_fingerprint) - elif context.verify_mode != ssl.CERT_NONE \ - and not getattr(context, 'check_hostname', False) \ - and self.assert_hostname is not False: - # While urllib3 attempts to always turn off hostname matching from - # the TLS library, this cannot always be done. So we check whether - # the TLS Library still thinks it's matching hostnames. - cert = self.sock.getpeercert() - if not cert.get('subjectAltName', ()): - warnings.warn(( - 'Certificate for {0} has no `subjectAltName`, falling back to check for a ' - '`commonName` for now. This feature is being removed by major browsers and ' - 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 ' - 'for details.)'.format(hostname)), - SubjectAltNameWarning - ) - _match_hostname(cert, self.assert_hostname or hostname) - - self.is_verified = ( - context.verify_mode == ssl.CERT_REQUIRED or - self.assert_fingerprint is not None - ) - - -def _match_hostname(cert, asserted_hostname): - try: - match_hostname(cert, asserted_hostname) - except CertificateError as e: - log.error( - 'Certificate did not match expected hostname: %s. ' - 'Certificate: %s', asserted_hostname, cert - ) - # Add cert to exception and reraise so client code can inspect - # the cert when catching the exception, if they want to - e._peer_cert = cert - raise - - -if ssl: - # Make a copy for testing. - UnverifiedHTTPSConnection = HTTPSConnection - HTTPSConnection = VerifiedHTTPSConnection -else: - HTTPSConnection = DummyConnection diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py b/src/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py deleted file mode 100644 index 90e4c86a5..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py +++ /dev/null @@ -1,900 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -import errno -import logging -import sys -import warnings - -from socket import error as SocketError, timeout as SocketTimeout -import socket - - -from .exceptions import ( - ClosedPoolError, - ProtocolError, - EmptyPoolError, - HeaderParsingError, - HostChangedError, - LocationValueError, - MaxRetryError, - ProxyError, - ReadTimeoutError, - SSLError, - TimeoutError, - InsecureRequestWarning, - NewConnectionError, -) -from .packages.ssl_match_hostname import CertificateError -from .packages import six -from .packages.six.moves import queue -from .connection import ( - port_by_scheme, - DummyConnection, - HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, - HTTPException, BaseSSLError, -) -from .request import RequestMethods -from .response import HTTPResponse - -from .util.connection import is_connection_dropped -from .util.request import set_file_position -from .util.response import assert_header_parsing -from .util.retry import Retry -from .util.timeout import Timeout -from .util.url import get_host, Url - - -if six.PY2: - # Queue is imported for side effects on MS Windows - import Queue as _unused_module_Queue # noqa: F401 - -xrange = six.moves.xrange - -log = logging.getLogger(__name__) - -_Default = object() - - -# Pool objects -class ConnectionPool(object): - """ - Base class for all connection pools, such as - :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. - """ - - scheme = None - QueueCls = queue.LifoQueue - - def __init__(self, host, port=None): - if not host: - raise LocationValueError("No host specified.") - - self.host = _ipv6_host(host).lower() - self.port = port - - def __str__(self): - return '%s(host=%r, port=%r)' % (type(self).__name__, - self.host, self.port) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - # Return False to re-raise any potential exceptions - return False - - def close(self): - """ - Close all pooled connections and disable the pool. - """ - pass - - -# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 -_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK]) - - -class HTTPConnectionPool(ConnectionPool, RequestMethods): - """ - Thread-safe connection pool for one host. - - :param host: - Host used for this HTTP Connection (e.g. "localhost"), passed into - :class:`httplib.HTTPConnection`. - - :param port: - Port used for this HTTP Connection (None is equivalent to 80), passed - into :class:`httplib.HTTPConnection`. - - :param strict: - Causes BadStatusLine to be raised if the status line can't be parsed - as a valid HTTP/1.0 or 1.1 status line, passed into - :class:`httplib.HTTPConnection`. - - .. note:: - Only works in Python 2. This parameter is ignored in Python 3. - - :param timeout: - Socket timeout in seconds for each individual connection. This can - be a float or integer, which sets the timeout for the HTTP request, - or an instance of :class:`urllib3.util.Timeout` which gives you more - fine-grained control over request timeouts. After the constructor has - been parsed, this is always a `urllib3.util.Timeout` object. - - :param maxsize: - Number of connections to save that can be reused. More than 1 is useful - in multithreaded situations. If ``block`` is set to False, more - connections will be created but they will not be saved once they've - been used. - - :param block: - If set to True, no more than ``maxsize`` connections will be used at - a time. When no free connections are available, the call will block - until a connection has been released. This is a useful side effect for - particular multithreaded situations where one does not want to use more - than maxsize connections per host to prevent flooding. - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - - :param retries: - Retry configuration to use by default with requests in this pool. - - :param _proxy: - Parsed proxy URL, should not be used directly, instead, see - :class:`urllib3.connectionpool.ProxyManager`" - - :param _proxy_headers: - A dictionary with proxy headers, should not be used directly, - instead, see :class:`urllib3.connectionpool.ProxyManager`" - - :param \\**conn_kw: - Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, - :class:`urllib3.connection.HTTPSConnection` instances. - """ - - scheme = 'http' - ConnectionCls = HTTPConnection - ResponseCls = HTTPResponse - - def __init__(self, host, port=None, strict=False, - timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, - headers=None, retries=None, - _proxy=None, _proxy_headers=None, - **conn_kw): - ConnectionPool.__init__(self, host, port) - RequestMethods.__init__(self, headers) - - self.strict = strict - - if not isinstance(timeout, Timeout): - timeout = Timeout.from_float(timeout) - - if retries is None: - retries = Retry.DEFAULT - - self.timeout = timeout - self.retries = retries - - self.pool = self.QueueCls(maxsize) - self.block = block - - self.proxy = _proxy - self.proxy_headers = _proxy_headers or {} - - # Fill the queue up so that doing get() on it will block properly - for _ in xrange(maxsize): - self.pool.put(None) - - # These are mostly for testing and debugging purposes. - self.num_connections = 0 - self.num_requests = 0 - self.conn_kw = conn_kw - - if self.proxy: - # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. - # We cannot know if the user has added default socket options, so we cannot replace the - # list. - self.conn_kw.setdefault('socket_options', []) - - def _new_conn(self): - """ - Return a fresh :class:`HTTPConnection`. - """ - self.num_connections += 1 - log.debug("Starting new HTTP connection (%d): %s", - self.num_connections, self.host) - - conn = self.ConnectionCls(host=self.host, port=self.port, - timeout=self.timeout.connect_timeout, - strict=self.strict, **self.conn_kw) - return conn - - def _get_conn(self, timeout=None): - """ - Get a connection. Will return a pooled connection if one is available. - - If no connections are available and :prop:`.block` is ``False``, then a - fresh connection is returned. - - :param timeout: - Seconds to wait before giving up and raising - :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and - :prop:`.block` is ``True``. - """ - conn = None - try: - conn = self.pool.get(block=self.block, timeout=timeout) - - except AttributeError: # self.pool is None - raise ClosedPoolError(self, "Pool is closed.") - - except queue.Empty: - if self.block: - raise EmptyPoolError(self, - "Pool reached maximum size and no more " - "connections are allowed.") - pass # Oh well, we'll create a new connection then - - # If this is a persistent connection, check if it got disconnected - if conn and is_connection_dropped(conn): - log.debug("Resetting dropped connection: %s", self.host) - conn.close() - if getattr(conn, 'auto_open', 1) == 0: - # This is a proxied connection that has been mutated by - # httplib._tunnel() and cannot be reused (since it would - # attempt to bypass the proxy) - conn = None - - return conn or self._new_conn() - - def _put_conn(self, conn): - """ - Put a connection back into the pool. - - :param conn: - Connection object for the current host and port as returned by - :meth:`._new_conn` or :meth:`._get_conn`. - - If the pool is already full, the connection is closed and discarded - because we exceeded maxsize. If connections are discarded frequently, - then maxsize should be increased. - - If the pool is closed, then the connection will be closed and discarded. - """ - try: - self.pool.put(conn, block=False) - return # Everything is dandy, done. - except AttributeError: - # self.pool is None. - pass - except queue.Full: - # This should never happen if self.block == True - log.warning( - "Connection pool is full, discarding connection: %s", - self.host) - - # Connection never got put back into the pool, close it. - if conn: - conn.close() - - def _validate_conn(self, conn): - """ - Called right before a request is made, after the socket is created. - """ - pass - - def _prepare_proxy(self, conn): - # Nothing to do for HTTP connections. - pass - - def _get_timeout(self, timeout): - """ Helper that always returns a :class:`urllib3.util.Timeout` """ - if timeout is _Default: - return self.timeout.clone() - - if isinstance(timeout, Timeout): - return timeout.clone() - else: - # User passed us an int/float. This is for backwards compatibility, - # can be removed later - return Timeout.from_float(timeout) - - def _raise_timeout(self, err, url, timeout_value): - """Is the error actually a timeout? Will raise a ReadTimeout or pass""" - - if isinstance(err, SocketTimeout): - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) - - # See the above comment about EAGAIN in Python 3. In Python 2 we have - # to specifically catch it and throw the timeout error - if hasattr(err, 'errno') and err.errno in _blocking_errnos: - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) - - # Catch possible read timeouts thrown as SSL errors. If not the - # case, rethrow the original. We need to do this because of: - # http://bugs.python.org/issue10272 - if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6 - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) - - def _make_request(self, conn, method, url, timeout=_Default, chunked=False, - **httplib_request_kw): - """ - Perform a request on a given urllib connection object taken from our - pool. - - :param conn: - a connection from one of our connection pools - - :param timeout: - Socket timeout in seconds for the request. This can be a - float or integer, which will set the same timeout value for - the socket connect and the socket read, or an instance of - :class:`urllib3.util.Timeout`, which gives you more fine-grained - control over your timeouts. - """ - self.num_requests += 1 - - timeout_obj = self._get_timeout(timeout) - timeout_obj.start_connect() - conn.timeout = timeout_obj.connect_timeout - - # Trigger any extra validation we need to do. - try: - self._validate_conn(conn) - except (SocketTimeout, BaseSSLError) as e: - # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. - self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) - raise - - # conn.request() calls httplib.*.request, not the method in - # urllib3.request. It also calls makefile (recv) on the socket. - if chunked: - conn.request_chunked(method, url, **httplib_request_kw) - else: - conn.request(method, url, **httplib_request_kw) - - # Reset the timeout for the recv() on the socket - read_timeout = timeout_obj.read_timeout - - # App Engine doesn't have a sock attr - if getattr(conn, 'sock', None): - # In Python 3 socket.py will catch EAGAIN and return None when you - # try and read into the file pointer created by http.client, which - # instead raises a BadStatusLine exception. Instead of catching - # the exception and assuming all BadStatusLine exceptions are read - # timeouts, check for a zero timeout before making the request. - if read_timeout == 0: - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % read_timeout) - if read_timeout is Timeout.DEFAULT_TIMEOUT: - conn.sock.settimeout(socket.getdefaulttimeout()) - else: # None or a value - conn.sock.settimeout(read_timeout) - - # Receive the response from the server - try: - try: # Python 2.7, use buffering of HTTP responses - httplib_response = conn.getresponse(buffering=True) - except TypeError: # Python 2.6 and older, Python 3 - try: - httplib_response = conn.getresponse() - except Exception as e: - # Remove the TypeError from the exception chain in Python 3; - # otherwise it looks like a programming error was the cause. - six.raise_from(e, None) - except (SocketTimeout, BaseSSLError, SocketError) as e: - self._raise_timeout(err=e, url=url, timeout_value=read_timeout) - raise - - # AppEngine doesn't have a version attr. - http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') - log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port, - method, url, http_version, httplib_response.status, - httplib_response.length) - - try: - assert_header_parsing(httplib_response.msg) - except HeaderParsingError as hpe: # Platform-specific: Python 3 - log.warning( - 'Failed to parse headers (url=%s): %s', - self._absolute_url(url), hpe, exc_info=True) - - return httplib_response - - def _absolute_url(self, path): - return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url - - def close(self): - """ - Close all pooled connections and disable the pool. - """ - # Disable access to the pool - old_pool, self.pool = self.pool, None - - try: - while True: - conn = old_pool.get(block=False) - if conn: - conn.close() - - except queue.Empty: - pass # Done. - - def is_same_host(self, url): - """ - Check if the given ``url`` is a member of the same host as this - connection pool. - """ - if url.startswith('/'): - return True - - # TODO: Add optional support for socket.gethostbyname checking. - scheme, host, port = get_host(url) - - host = _ipv6_host(host).lower() - - # Use explicit default port for comparison when none is given - if self.port and not port: - port = port_by_scheme.get(scheme) - elif not self.port and port == port_by_scheme.get(scheme): - port = None - - return (scheme, host, port) == (self.scheme, self.host, self.port) - - def urlopen(self, method, url, body=None, headers=None, retries=None, - redirect=True, assert_same_host=True, timeout=_Default, - pool_timeout=None, release_conn=None, chunked=False, - body_pos=None, **response_kw): - """ - Get a connection from the pool and perform an HTTP request. This is the - lowest level call for making a request, so you'll need to specify all - the raw details. - - .. note:: - - More commonly, it's appropriate to use a convenience method provided - by :class:`.RequestMethods`, such as :meth:`request`. - - .. note:: - - `release_conn` will only behave as expected if - `preload_content=False` because we want to make - `preload_content=False` the default behaviour someday soon without - breaking backwards compatibility. - - :param method: - HTTP request method (such as GET, POST, PUT, etc.) - - :param body: - Data to send in the request body (useful for creating - POST requests, see HTTPConnectionPool.post_url for - more convenience). - - :param headers: - Dictionary of custom headers to send, such as User-Agent, - If-None-Match, etc. If None, pool headers are used. If provided, - these headers completely replace any pool-specific headers. - - :param retries: - Configure the number of retries to allow before raising a - :class:`~urllib3.exceptions.MaxRetryError` exception. - - Pass ``None`` to retry until you receive a response. Pass a - :class:`~urllib3.util.retry.Retry` object for fine-grained control - over different types of retries. - Pass an integer number to retry connection errors that many times, - but no other types of errors. Pass zero to never retry. - - If ``False``, then retries are disabled and any exception is raised - immediately. Also, instead of raising a MaxRetryError on redirects, - the redirect response will be returned. - - :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. - - :param redirect: - If True, automatically handle redirects (status codes 301, 302, - 303, 307, 308). Each redirect counts as a retry. Disabling retries - will disable redirect, too. - - :param assert_same_host: - If ``True``, will make sure that the host of the pool requests is - consistent else will raise HostChangedError. When False, you can - use the pool on an HTTP proxy and request foreign hosts. - - :param timeout: - If specified, overrides the default timeout for this one - request. It may be a float (in seconds) or an instance of - :class:`urllib3.util.Timeout`. - - :param pool_timeout: - If set and the pool is set to block=True, then this method will - block for ``pool_timeout`` seconds and raise EmptyPoolError if no - connection is available within the time period. - - :param release_conn: - If False, then the urlopen call will not release the connection - back into the pool once a response is received (but will release if - you read the entire contents of the response such as when - `preload_content=True`). This is useful if you're not preloading - the response's content immediately. You will need to call - ``r.release_conn()`` on the response ``r`` to return the connection - back into the pool. If None, it takes the value of - ``response_kw.get('preload_content', True)``. - - :param chunked: - If True, urllib3 will send the body using chunked transfer - encoding. Otherwise, urllib3 will send the body using the standard - content-length form. Defaults to False. - - :param int body_pos: - Position to seek to in file-like body in the event of a retry or - redirect. Typically this won't need to be set because urllib3 will - auto-populate the value when needed. - - :param \\**response_kw: - Additional parameters are passed to - :meth:`urllib3.response.HTTPResponse.from_httplib` - """ - if headers is None: - headers = self.headers - - if not isinstance(retries, Retry): - retries = Retry.from_int(retries, redirect=redirect, default=self.retries) - - if release_conn is None: - release_conn = response_kw.get('preload_content', True) - - # Check host - if assert_same_host and not self.is_same_host(url): - raise HostChangedError(self, url, retries) - - conn = None - - # Track whether `conn` needs to be released before - # returning/raising/recursing. Update this variable if necessary, and - # leave `release_conn` constant throughout the function. That way, if - # the function recurses, the original value of `release_conn` will be - # passed down into the recursive call, and its value will be respected. - # - # See issue #651 [1] for details. - # - # [1] - release_this_conn = release_conn - - # Merge the proxy headers. Only do this in HTTP. We have to copy the - # headers dict so we can safely change it without those changes being - # reflected in anyone else's copy. - if self.scheme == 'http': - headers = headers.copy() - headers.update(self.proxy_headers) - - # Must keep the exception bound to a separate variable or else Python 3 - # complains about UnboundLocalError. - err = None - - # Keep track of whether we cleanly exited the except block. This - # ensures we do proper cleanup in finally. - clean_exit = False - - # Rewind body position, if needed. Record current position - # for future rewinds in the event of a redirect/retry. - body_pos = set_file_position(body, body_pos) - - try: - # Request a connection from the queue. - timeout_obj = self._get_timeout(timeout) - conn = self._get_conn(timeout=pool_timeout) - - conn.timeout = timeout_obj.connect_timeout - - is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None) - if is_new_proxy_conn: - self._prepare_proxy(conn) - - # Make the request on the httplib connection object. - httplib_response = self._make_request(conn, method, url, - timeout=timeout_obj, - body=body, headers=headers, - chunked=chunked) - - # If we're going to release the connection in ``finally:``, then - # the response doesn't need to know about the connection. Otherwise - # it will also try to release it and we'll have a double-release - # mess. - response_conn = conn if not release_conn else None - - # Pass method to Response for length checking - response_kw['request_method'] = method - - # Import httplib's response into our own wrapper object - response = self.ResponseCls.from_httplib(httplib_response, - pool=self, - connection=response_conn, - retries=retries, - **response_kw) - - # Everything went great! - clean_exit = True - - except queue.Empty: - # Timed out by queue. - raise EmptyPoolError(self, "No pool connections are available.") - - except (BaseSSLError, CertificateError) as e: - # Close the connection. If a connection is reused on which there - # was a Certificate error, the next request will certainly raise - # another Certificate error. - clean_exit = False - raise SSLError(e) - - except SSLError: - # Treat SSLError separately from BaseSSLError to preserve - # traceback. - clean_exit = False - raise - - except (TimeoutError, HTTPException, SocketError, ProtocolError) as e: - # Discard the connection for these exceptions. It will be - # be replaced during the next _get_conn() call. - clean_exit = False - - if isinstance(e, (SocketError, NewConnectionError)) and self.proxy: - e = ProxyError('Cannot connect to proxy.', e) - elif isinstance(e, (SocketError, HTTPException)): - e = ProtocolError('Connection aborted.', e) - - retries = retries.increment(method, url, error=e, _pool=self, - _stacktrace=sys.exc_info()[2]) - retries.sleep() - - # Keep track of the error for the retry warning. - err = e - - finally: - if not clean_exit: - # We hit some kind of exception, handled or otherwise. We need - # to throw the connection away unless explicitly told not to. - # Close the connection, set the variable to None, and make sure - # we put the None back in the pool to avoid leaking it. - conn = conn and conn.close() - release_this_conn = True - - if release_this_conn: - # Put the connection back to be reused. If the connection is - # expired then it will be None, which will get replaced with a - # fresh connection during _get_conn. - self._put_conn(conn) - - if not conn: - # Try again - log.warning("Retrying (%r) after connection " - "broken by '%r': %s", retries, err, url) - return self.urlopen(method, url, body, headers, retries, - redirect, assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, body_pos=body_pos, - **response_kw) - - # Handle redirect? - redirect_location = redirect and response.get_redirect_location() - if redirect_location: - if response.status == 303: - method = 'GET' - - try: - retries = retries.increment(method, url, response=response, _pool=self) - except MaxRetryError: - if retries.raise_on_redirect: - # Release the connection for this response, since we're not - # returning it to be released manually. - response.release_conn() - raise - return response - - retries.sleep_for_retry(response) - log.debug("Redirecting %s -> %s", url, redirect_location) - return self.urlopen( - method, redirect_location, body, headers, - retries=retries, redirect=redirect, - assert_same_host=assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, body_pos=body_pos, - **response_kw) - - # Check if we should retry the HTTP response. - has_retry_after = bool(response.getheader('Retry-After')) - if retries.is_retry(method, response.status, has_retry_after): - try: - retries = retries.increment(method, url, response=response, _pool=self) - except MaxRetryError: - if retries.raise_on_status: - # Release the connection for this response, since we're not - # returning it to be released manually. - response.release_conn() - raise - return response - retries.sleep(response) - log.debug("Retry: %s", url) - return self.urlopen( - method, url, body, headers, - retries=retries, redirect=redirect, - assert_same_host=assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, - body_pos=body_pos, **response_kw) - - return response - - -class HTTPSConnectionPool(HTTPConnectionPool): - """ - Same as :class:`.HTTPConnectionPool`, but HTTPS. - - When Python is compiled with the :mod:`ssl` module, then - :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates, - instead of :class:`.HTTPSConnection`. - - :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, - ``assert_hostname`` and ``host`` in this order to verify connections. - If ``assert_hostname`` is False, no verification is done. - - The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, - ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is - available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade - the connection socket into an SSL socket. - """ - - scheme = 'https' - ConnectionCls = HTTPSConnection - - def __init__(self, host, port=None, - strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, - block=False, headers=None, retries=None, - _proxy=None, _proxy_headers=None, - key_file=None, cert_file=None, cert_reqs=None, - ca_certs=None, ssl_version=None, - assert_hostname=None, assert_fingerprint=None, - ca_cert_dir=None, **conn_kw): - - HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, - block, headers, retries, _proxy, _proxy_headers, - **conn_kw) - - if ca_certs and cert_reqs is None: - cert_reqs = 'CERT_REQUIRED' - - self.key_file = key_file - self.cert_file = cert_file - self.cert_reqs = cert_reqs - self.ca_certs = ca_certs - self.ca_cert_dir = ca_cert_dir - self.ssl_version = ssl_version - self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint - - def _prepare_conn(self, conn): - """ - Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` - and establish the tunnel if proxy is used. - """ - - if isinstance(conn, VerifiedHTTPSConnection): - conn.set_cert(key_file=self.key_file, - cert_file=self.cert_file, - cert_reqs=self.cert_reqs, - ca_certs=self.ca_certs, - ca_cert_dir=self.ca_cert_dir, - assert_hostname=self.assert_hostname, - assert_fingerprint=self.assert_fingerprint) - conn.ssl_version = self.ssl_version - return conn - - def _prepare_proxy(self, conn): - """ - Establish tunnel connection early, because otherwise httplib - would improperly set Host: header to proxy's IP:port. - """ - # Python 2.7+ - try: - set_tunnel = conn.set_tunnel - except AttributeError: # Platform-specific: Python 2.6 - set_tunnel = conn._set_tunnel - - if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older - set_tunnel(self.host, self.port) - else: - set_tunnel(self.host, self.port, self.proxy_headers) - - conn.connect() - - def _new_conn(self): - """ - Return a fresh :class:`httplib.HTTPSConnection`. - """ - self.num_connections += 1 - log.debug("Starting new HTTPS connection (%d): %s", - self.num_connections, self.host) - - if not self.ConnectionCls or self.ConnectionCls is DummyConnection: - raise SSLError("Can't connect to HTTPS URL because the SSL " - "module is not available.") - - actual_host = self.host - actual_port = self.port - if self.proxy is not None: - actual_host = self.proxy.host - actual_port = self.proxy.port - - conn = self.ConnectionCls(host=actual_host, port=actual_port, - timeout=self.timeout.connect_timeout, - strict=self.strict, **self.conn_kw) - - return self._prepare_conn(conn) - - def _validate_conn(self, conn): - """ - Called right before a request is made, after the socket is created. - """ - super(HTTPSConnectionPool, self)._validate_conn(conn) - - # Force connect early to allow us to validate the connection. - if not getattr(conn, 'sock', None): # AppEngine might not have `.sock` - conn.connect() - - if not conn.is_verified: - warnings.warn(( - 'Unverified HTTPS request is being made. ' - 'Adding certificate verification is strongly advised. See: ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings'), - InsecureRequestWarning) - - -def connection_from_url(url, **kw): - """ - Given a url, return an :class:`.ConnectionPool` instance of its host. - - This is a shortcut for not having to parse out the scheme, host, and port - of the url before creating an :class:`.ConnectionPool` instance. - - :param url: - Absolute URL string that must include the scheme. Port is optional. - - :param \\**kw: - Passes additional parameters to the constructor of the appropriate - :class:`.ConnectionPool`. Useful for specifying things like - timeout, maxsize, headers, etc. - - Example:: - - >>> conn = connection_from_url('http://google.com/') - >>> r = conn.request('GET', '/') - """ - scheme, host, port = get_host(url) - port = port or port_by_scheme.get(scheme, 80) - if scheme == 'https': - return HTTPSConnectionPool(host, port=port, **kw) - else: - return HTTPConnectionPool(host, port=port, **kw) - - -def _ipv6_host(host): - """ - Process IPv6 address literals - """ - - # httplib doesn't like it when we include brackets in IPv6 addresses - # Specifically, if we include brackets but also pass the port then - # httplib crazily doubles up the square brackets on the Host header. - # Instead, we need to make sure we never pass ``None`` as the port. - # However, for backward compatibility reasons we can't actually - # *assert* that. See http://bugs.python.org/issue28539 - # - # Also if an IPv6 address literal has a zone identifier, the - # percent sign might be URIencoded, convert it back into ASCII - if host.startswith('[') and host.endswith(']'): - host = host.replace('%25', '%').strip('[]') - return host diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py deleted file mode 100644 index bb826673f..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py +++ /dev/null @@ -1,591 +0,0 @@ -# SPDX-License-Identifier: MIT -""" -This module uses ctypes to bind a whole bunch of functions and constants from -SecureTransport. The goal here is to provide the low-level API to -SecureTransport. These are essentially the C-level functions and constants, and -they're pretty gross to work with. - -This code is a bastardised version of the code found in Will Bond's oscrypto -library. An enormous debt is owed to him for blazing this trail for us. For -that reason, this code should be considered to be covered both by urllib3's -license and by oscrypto's: - - Copyright (c) 2015-2016 Will Bond - - Permission is hereby granted, free of charge, to any person obtaining a - copy of this software and associated documentation files (the "Software"), - to deal in the Software without restriction, including without limitation - the rights to use, copy, modify, merge, publish, distribute, sublicense, - and/or sell copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. -""" -from __future__ import absolute_import - -import platform -from ctypes.util import find_library -from ctypes import ( - c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long, - c_bool -) -from ctypes import CDLL, POINTER, CFUNCTYPE - - -security_path = find_library('Security') -if not security_path: - raise ImportError('The library Security could not be found') - - -core_foundation_path = find_library('CoreFoundation') -if not core_foundation_path: - raise ImportError('The library CoreFoundation could not be found') - - -version = platform.mac_ver()[0] -version_info = tuple(map(int, version.split('.'))) -if version_info < (10, 8): - raise OSError( - 'Only OS X 10.8 and newer are supported, not %s.%s' % ( - version_info[0], version_info[1] - ) - ) - -Security = CDLL(security_path, use_errno=True) -CoreFoundation = CDLL(core_foundation_path, use_errno=True) - -Boolean = c_bool -CFIndex = c_long -CFStringEncoding = c_uint32 -CFData = c_void_p -CFString = c_void_p -CFArray = c_void_p -CFMutableArray = c_void_p -CFDictionary = c_void_p -CFError = c_void_p -CFType = c_void_p -CFTypeID = c_ulong - -CFTypeRef = POINTER(CFType) -CFAllocatorRef = c_void_p - -OSStatus = c_int32 - -CFDataRef = POINTER(CFData) -CFStringRef = POINTER(CFString) -CFArrayRef = POINTER(CFArray) -CFMutableArrayRef = POINTER(CFMutableArray) -CFDictionaryRef = POINTER(CFDictionary) -CFArrayCallBacks = c_void_p -CFDictionaryKeyCallBacks = c_void_p -CFDictionaryValueCallBacks = c_void_p - -SecCertificateRef = POINTER(c_void_p) -SecExternalFormat = c_uint32 -SecExternalItemType = c_uint32 -SecIdentityRef = POINTER(c_void_p) -SecItemImportExportFlags = c_uint32 -SecItemImportExportKeyParameters = c_void_p -SecKeychainRef = POINTER(c_void_p) -SSLProtocol = c_uint32 -SSLCipherSuite = c_uint32 -SSLContextRef = POINTER(c_void_p) -SecTrustRef = POINTER(c_void_p) -SSLConnectionRef = c_uint32 -SecTrustResultType = c_uint32 -SecTrustOptionFlags = c_uint32 -SSLProtocolSide = c_uint32 -SSLConnectionType = c_uint32 -SSLSessionOption = c_uint32 - - -try: - Security.SecItemImport.argtypes = [ - CFDataRef, - CFStringRef, - POINTER(SecExternalFormat), - POINTER(SecExternalItemType), - SecItemImportExportFlags, - POINTER(SecItemImportExportKeyParameters), - SecKeychainRef, - POINTER(CFArrayRef), - ] - Security.SecItemImport.restype = OSStatus - - Security.SecCertificateGetTypeID.argtypes = [] - Security.SecCertificateGetTypeID.restype = CFTypeID - - Security.SecIdentityGetTypeID.argtypes = [] - Security.SecIdentityGetTypeID.restype = CFTypeID - - Security.SecKeyGetTypeID.argtypes = [] - Security.SecKeyGetTypeID.restype = CFTypeID - - Security.SecCertificateCreateWithData.argtypes = [ - CFAllocatorRef, - CFDataRef - ] - Security.SecCertificateCreateWithData.restype = SecCertificateRef - - Security.SecCertificateCopyData.argtypes = [ - SecCertificateRef - ] - Security.SecCertificateCopyData.restype = CFDataRef - - Security.SecCopyErrorMessageString.argtypes = [ - OSStatus, - c_void_p - ] - Security.SecCopyErrorMessageString.restype = CFStringRef - - Security.SecIdentityCreateWithCertificate.argtypes = [ - CFTypeRef, - SecCertificateRef, - POINTER(SecIdentityRef) - ] - Security.SecIdentityCreateWithCertificate.restype = OSStatus - - Security.SecKeychainCreate.argtypes = [ - c_char_p, - c_uint32, - c_void_p, - Boolean, - c_void_p, - POINTER(SecKeychainRef) - ] - Security.SecKeychainCreate.restype = OSStatus - - Security.SecKeychainDelete.argtypes = [ - SecKeychainRef - ] - Security.SecKeychainDelete.restype = OSStatus - - Security.SecPKCS12Import.argtypes = [ - CFDataRef, - CFDictionaryRef, - POINTER(CFArrayRef) - ] - Security.SecPKCS12Import.restype = OSStatus - - SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t)) - SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)) - - Security.SSLSetIOFuncs.argtypes = [ - SSLContextRef, - SSLReadFunc, - SSLWriteFunc - ] - Security.SSLSetIOFuncs.restype = OSStatus - - Security.SSLSetPeerID.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t - ] - Security.SSLSetPeerID.restype = OSStatus - - Security.SSLSetCertificate.argtypes = [ - SSLContextRef, - CFArrayRef - ] - Security.SSLSetCertificate.restype = OSStatus - - Security.SSLSetCertificateAuthorities.argtypes = [ - SSLContextRef, - CFTypeRef, - Boolean - ] - Security.SSLSetCertificateAuthorities.restype = OSStatus - - Security.SSLSetConnection.argtypes = [ - SSLContextRef, - SSLConnectionRef - ] - Security.SSLSetConnection.restype = OSStatus - - Security.SSLSetPeerDomainName.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t - ] - Security.SSLSetPeerDomainName.restype = OSStatus - - Security.SSLHandshake.argtypes = [ - SSLContextRef - ] - Security.SSLHandshake.restype = OSStatus - - Security.SSLRead.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t, - POINTER(c_size_t) - ] - Security.SSLRead.restype = OSStatus - - Security.SSLWrite.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t, - POINTER(c_size_t) - ] - Security.SSLWrite.restype = OSStatus - - Security.SSLClose.argtypes = [ - SSLContextRef - ] - Security.SSLClose.restype = OSStatus - - Security.SSLGetNumberSupportedCiphers.argtypes = [ - SSLContextRef, - POINTER(c_size_t) - ] - Security.SSLGetNumberSupportedCiphers.restype = OSStatus - - Security.SSLGetSupportedCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - POINTER(c_size_t) - ] - Security.SSLGetSupportedCiphers.restype = OSStatus - - Security.SSLSetEnabledCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - c_size_t - ] - Security.SSLSetEnabledCiphers.restype = OSStatus - - Security.SSLGetNumberEnabledCiphers.argtype = [ - SSLContextRef, - POINTER(c_size_t) - ] - Security.SSLGetNumberEnabledCiphers.restype = OSStatus - - Security.SSLGetEnabledCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - POINTER(c_size_t) - ] - Security.SSLGetEnabledCiphers.restype = OSStatus - - Security.SSLGetNegotiatedCipher.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite) - ] - Security.SSLGetNegotiatedCipher.restype = OSStatus - - Security.SSLGetNegotiatedProtocolVersion.argtypes = [ - SSLContextRef, - POINTER(SSLProtocol) - ] - Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus - - Security.SSLCopyPeerTrust.argtypes = [ - SSLContextRef, - POINTER(SecTrustRef) - ] - Security.SSLCopyPeerTrust.restype = OSStatus - - Security.SecTrustSetAnchorCertificates.argtypes = [ - SecTrustRef, - CFArrayRef - ] - Security.SecTrustSetAnchorCertificates.restype = OSStatus - - Security.SecTrustSetAnchorCertificatesOnly.argstypes = [ - SecTrustRef, - Boolean - ] - Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus - - Security.SecTrustEvaluate.argtypes = [ - SecTrustRef, - POINTER(SecTrustResultType) - ] - Security.SecTrustEvaluate.restype = OSStatus - - Security.SecTrustGetCertificateCount.argtypes = [ - SecTrustRef - ] - Security.SecTrustGetCertificateCount.restype = CFIndex - - Security.SecTrustGetCertificateAtIndex.argtypes = [ - SecTrustRef, - CFIndex - ] - Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef - - Security.SSLCreateContext.argtypes = [ - CFAllocatorRef, - SSLProtocolSide, - SSLConnectionType - ] - Security.SSLCreateContext.restype = SSLContextRef - - Security.SSLSetSessionOption.argtypes = [ - SSLContextRef, - SSLSessionOption, - Boolean - ] - Security.SSLSetSessionOption.restype = OSStatus - - Security.SSLSetProtocolVersionMin.argtypes = [ - SSLContextRef, - SSLProtocol - ] - Security.SSLSetProtocolVersionMin.restype = OSStatus - - Security.SSLSetProtocolVersionMax.argtypes = [ - SSLContextRef, - SSLProtocol - ] - Security.SSLSetProtocolVersionMax.restype = OSStatus - - Security.SecCopyErrorMessageString.argtypes = [ - OSStatus, - c_void_p - ] - Security.SecCopyErrorMessageString.restype = CFStringRef - - Security.SSLReadFunc = SSLReadFunc - Security.SSLWriteFunc = SSLWriteFunc - Security.SSLContextRef = SSLContextRef - Security.SSLProtocol = SSLProtocol - Security.SSLCipherSuite = SSLCipherSuite - Security.SecIdentityRef = SecIdentityRef - Security.SecKeychainRef = SecKeychainRef - Security.SecTrustRef = SecTrustRef - Security.SecTrustResultType = SecTrustResultType - Security.SecExternalFormat = SecExternalFormat - Security.OSStatus = OSStatus - - Security.kSecImportExportPassphrase = CFStringRef.in_dll( - Security, 'kSecImportExportPassphrase' - ) - Security.kSecImportItemIdentity = CFStringRef.in_dll( - Security, 'kSecImportItemIdentity' - ) - - # CoreFoundation time! - CoreFoundation.CFRetain.argtypes = [ - CFTypeRef - ] - CoreFoundation.CFRetain.restype = CFTypeRef - - CoreFoundation.CFRelease.argtypes = [ - CFTypeRef - ] - CoreFoundation.CFRelease.restype = None - - CoreFoundation.CFGetTypeID.argtypes = [ - CFTypeRef - ] - CoreFoundation.CFGetTypeID.restype = CFTypeID - - CoreFoundation.CFStringCreateWithCString.argtypes = [ - CFAllocatorRef, - c_char_p, - CFStringEncoding - ] - CoreFoundation.CFStringCreateWithCString.restype = CFStringRef - - CoreFoundation.CFStringGetCStringPtr.argtypes = [ - CFStringRef, - CFStringEncoding - ] - CoreFoundation.CFStringGetCStringPtr.restype = c_char_p - - CoreFoundation.CFStringGetCString.argtypes = [ - CFStringRef, - c_char_p, - CFIndex, - CFStringEncoding - ] - CoreFoundation.CFStringGetCString.restype = c_bool - - CoreFoundation.CFDataCreate.argtypes = [ - CFAllocatorRef, - c_char_p, - CFIndex - ] - CoreFoundation.CFDataCreate.restype = CFDataRef - - CoreFoundation.CFDataGetLength.argtypes = [ - CFDataRef - ] - CoreFoundation.CFDataGetLength.restype = CFIndex - - CoreFoundation.CFDataGetBytePtr.argtypes = [ - CFDataRef - ] - CoreFoundation.CFDataGetBytePtr.restype = c_void_p - - CoreFoundation.CFDictionaryCreate.argtypes = [ - CFAllocatorRef, - POINTER(CFTypeRef), - POINTER(CFTypeRef), - CFIndex, - CFDictionaryKeyCallBacks, - CFDictionaryValueCallBacks - ] - CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef - - CoreFoundation.CFDictionaryGetValue.argtypes = [ - CFDictionaryRef, - CFTypeRef - ] - CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef - - CoreFoundation.CFArrayCreate.argtypes = [ - CFAllocatorRef, - POINTER(CFTypeRef), - CFIndex, - CFArrayCallBacks, - ] - CoreFoundation.CFArrayCreate.restype = CFArrayRef - - CoreFoundation.CFArrayCreateMutable.argtypes = [ - CFAllocatorRef, - CFIndex, - CFArrayCallBacks - ] - CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef - - CoreFoundation.CFArrayAppendValue.argtypes = [ - CFMutableArrayRef, - c_void_p - ] - CoreFoundation.CFArrayAppendValue.restype = None - - CoreFoundation.CFArrayGetCount.argtypes = [ - CFArrayRef - ] - CoreFoundation.CFArrayGetCount.restype = CFIndex - - CoreFoundation.CFArrayGetValueAtIndex.argtypes = [ - CFArrayRef, - CFIndex - ] - CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p - - CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll( - CoreFoundation, 'kCFAllocatorDefault' - ) - CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks') - CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll( - CoreFoundation, 'kCFTypeDictionaryKeyCallBacks' - ) - CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll( - CoreFoundation, 'kCFTypeDictionaryValueCallBacks' - ) - - CoreFoundation.CFTypeRef = CFTypeRef - CoreFoundation.CFArrayRef = CFArrayRef - CoreFoundation.CFStringRef = CFStringRef - CoreFoundation.CFDictionaryRef = CFDictionaryRef - -except (AttributeError): - raise ImportError('Error initializing ctypes') - - -class CFConst(object): - """ - A class object that acts as essentially a namespace for CoreFoundation - constants. - """ - kCFStringEncodingUTF8 = CFStringEncoding(0x08000100) - - -class SecurityConst(object): - """ - A class object that acts as essentially a namespace for Security constants. - """ - kSSLSessionOptionBreakOnServerAuth = 0 - - kSSLProtocol2 = 1 - kSSLProtocol3 = 2 - kTLSProtocol1 = 4 - kTLSProtocol11 = 7 - kTLSProtocol12 = 8 - - kSSLClientSide = 1 - kSSLStreamType = 0 - - kSecFormatPEMSequence = 10 - - kSecTrustResultInvalid = 0 - kSecTrustResultProceed = 1 - # This gap is present on purpose: this was kSecTrustResultConfirm, which - # is deprecated. - kSecTrustResultDeny = 3 - kSecTrustResultUnspecified = 4 - kSecTrustResultRecoverableTrustFailure = 5 - kSecTrustResultFatalTrustFailure = 6 - kSecTrustResultOtherError = 7 - - errSSLProtocol = -9800 - errSSLWouldBlock = -9803 - errSSLClosedGraceful = -9805 - errSSLClosedNoNotify = -9816 - errSSLClosedAbort = -9806 - - errSSLXCertChainInvalid = -9807 - errSSLCrypto = -9809 - errSSLInternal = -9810 - errSSLCertExpired = -9814 - errSSLCertNotYetValid = -9815 - errSSLUnknownRootCert = -9812 - errSSLNoRootCert = -9813 - errSSLHostNameMismatch = -9843 - errSSLPeerHandshakeFail = -9824 - errSSLPeerUserCancelled = -9839 - errSSLWeakPeerEphemeralDHKey = -9850 - errSSLServerAuthCompleted = -9841 - errSSLRecordOverflow = -9847 - - errSecVerifyFailed = -67808 - errSecNoTrustSettings = -25263 - errSecItemNotFound = -25300 - errSecInvalidTrustSettings = -25262 - - # Cipher suites. We only pick the ones our default cipher string allows. - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030 - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F - TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3 - TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F - TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2 - TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024 - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028 - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014 - TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B - TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A - TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039 - TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067 - TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033 - TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032 - TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D - TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C - TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D - TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C - TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 - TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py deleted file mode 100644 index 0f79a1372..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py +++ /dev/null @@ -1,344 +0,0 @@ -# SPDX-License-Identifier: MIT -""" -Low-level helpers for the SecureTransport bindings. - -These are Python functions that are not directly related to the high-level APIs -but are necessary to get them to work. They include a whole bunch of low-level -CoreFoundation messing about and memory management. The concerns in this module -are almost entirely about trying to avoid memory leaks and providing -appropriate and useful assistance to the higher-level code. -""" -import base64 -import ctypes -import itertools -import re -import os -import ssl -import tempfile - -from .bindings import Security, CoreFoundation, CFConst - - -# This regular expression is used to grab PEM data out of a PEM bundle. -_PEM_CERTS_RE = re.compile( - b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL -) - - -def _cf_data_from_bytes(bytestring): - """ - Given a bytestring, create a CFData object from it. This CFData object must - be CFReleased by the caller. - """ - return CoreFoundation.CFDataCreate( - CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring) - ) - - -def _cf_dictionary_from_tuples(tuples): - """ - Given a list of Python tuples, create an associated CFDictionary. - """ - dictionary_size = len(tuples) - - # We need to get the dictionary keys and values out in the same order. - keys = (t[0] for t in tuples) - values = (t[1] for t in tuples) - cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys) - cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values) - - return CoreFoundation.CFDictionaryCreate( - CoreFoundation.kCFAllocatorDefault, - cf_keys, - cf_values, - dictionary_size, - CoreFoundation.kCFTypeDictionaryKeyCallBacks, - CoreFoundation.kCFTypeDictionaryValueCallBacks, - ) - - -def _cf_string_to_unicode(value): - """ - Creates a Unicode string from a CFString object. Used entirely for error - reporting. - - Yes, it annoys me quite a lot that this function is this complex. - """ - value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p)) - - string = CoreFoundation.CFStringGetCStringPtr( - value_as_void_p, - CFConst.kCFStringEncodingUTF8 - ) - if string is None: - buffer = ctypes.create_string_buffer(1024) - result = CoreFoundation.CFStringGetCString( - value_as_void_p, - buffer, - 1024, - CFConst.kCFStringEncodingUTF8 - ) - if not result: - raise OSError('Error copying C string from CFStringRef') - string = buffer.value - if string is not None: - string = string.decode('utf-8') - return string - - -def _assert_no_error(error, exception_class=None): - """ - Checks the return code and throws an exception if there is an error to - report - """ - if error == 0: - return - - cf_error_string = Security.SecCopyErrorMessageString(error, None) - output = _cf_string_to_unicode(cf_error_string) - CoreFoundation.CFRelease(cf_error_string) - - if output is None or output == u'': - output = u'OSStatus %s' % error - - if exception_class is None: - exception_class = ssl.SSLError - - raise exception_class(output) - - -def _cert_array_from_pem(pem_bundle): - """ - Given a bundle of certs in PEM format, turns them into a CFArray of certs - that can be used to validate a cert chain. - """ - der_certs = [ - base64.b64decode(match.group(1)) - for match in _PEM_CERTS_RE.finditer(pem_bundle) - ] - if not der_certs: - raise ssl.SSLError("No root certificates specified") - - cert_array = CoreFoundation.CFArrayCreateMutable( - CoreFoundation.kCFAllocatorDefault, - 0, - ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks) - ) - if not cert_array: - raise ssl.SSLError("Unable to allocate memory!") - - try: - for der_bytes in der_certs: - certdata = _cf_data_from_bytes(der_bytes) - if not certdata: - raise ssl.SSLError("Unable to allocate memory!") - cert = Security.SecCertificateCreateWithData( - CoreFoundation.kCFAllocatorDefault, certdata - ) - CoreFoundation.CFRelease(certdata) - if not cert: - raise ssl.SSLError("Unable to build cert object!") - - CoreFoundation.CFArrayAppendValue(cert_array, cert) - CoreFoundation.CFRelease(cert) - except Exception: - # We need to free the array before the exception bubbles further. - # We only want to do that if an error occurs: otherwise, the caller - # should free. - CoreFoundation.CFRelease(cert_array) - - return cert_array - - -def _is_cert(item): - """ - Returns True if a given CFTypeRef is a certificate. - """ - expected = Security.SecCertificateGetTypeID() - return CoreFoundation.CFGetTypeID(item) == expected - - -def _is_identity(item): - """ - Returns True if a given CFTypeRef is an identity. - """ - expected = Security.SecIdentityGetTypeID() - return CoreFoundation.CFGetTypeID(item) == expected - - -def _temporary_keychain(): - """ - This function creates a temporary Mac keychain that we can use to work with - credentials. This keychain uses a one-time password and a temporary file to - store the data. We expect to have one keychain per socket. The returned - SecKeychainRef must be freed by the caller, including calling - SecKeychainDelete. - - Returns a tuple of the SecKeychainRef and the path to the temporary - directory that contains it. - """ - # Unfortunately, SecKeychainCreate requires a path to a keychain. This - # means we cannot use mkstemp to use a generic temporary file. Instead, - # we're going to create a temporary directory and a filename to use there. - # This filename will be 8 random bytes expanded into base64. We also need - # some random bytes to password-protect the keychain we're creating, so we - # ask for 40 random bytes. - random_bytes = os.urandom(40) - filename = base64.b64encode(random_bytes[:8]).decode('utf-8') - password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8 - tempdirectory = tempfile.mkdtemp() - - keychain_path = os.path.join(tempdirectory, filename).encode('utf-8') - - # We now want to create the keychain itself. - keychain = Security.SecKeychainRef() - status = Security.SecKeychainCreate( - keychain_path, - len(password), - password, - False, - None, - ctypes.byref(keychain) - ) - _assert_no_error(status) - - # Having created the keychain, we want to pass it off to the caller. - return keychain, tempdirectory - - -def _load_items_from_file(keychain, path): - """ - Given a single file, loads all the trust objects from it into arrays and - the keychain. - Returns a tuple of lists: the first list is a list of identities, the - second a list of certs. - """ - certificates = [] - identities = [] - result_array = None - - with open(path, 'rb') as f: - raw_filedata = f.read() - - try: - filedata = CoreFoundation.CFDataCreate( - CoreFoundation.kCFAllocatorDefault, - raw_filedata, - len(raw_filedata) - ) - result_array = CoreFoundation.CFArrayRef() - result = Security.SecItemImport( - filedata, # cert data - None, # Filename, leaving it out for now - None, # What the type of the file is, we don't care - None, # what's in the file, we don't care - 0, # import flags - None, # key params, can include passphrase in the future - keychain, # The keychain to insert into - ctypes.byref(result_array) # Results - ) - _assert_no_error(result) - - # A CFArray is not very useful to us as an intermediary - # representation, so we are going to extract the objects we want - # and then free the array. We don't need to keep hold of keys: the - # keychain already has them! - result_count = CoreFoundation.CFArrayGetCount(result_array) - for index in range(result_count): - item = CoreFoundation.CFArrayGetValueAtIndex( - result_array, index - ) - item = ctypes.cast(item, CoreFoundation.CFTypeRef) - - if _is_cert(item): - CoreFoundation.CFRetain(item) - certificates.append(item) - elif _is_identity(item): - CoreFoundation.CFRetain(item) - identities.append(item) - finally: - if result_array: - CoreFoundation.CFRelease(result_array) - - CoreFoundation.CFRelease(filedata) - - return (identities, certificates) - - -def _load_client_cert_chain(keychain, *paths): - """ - Load certificates and maybe keys from a number of files. Has the end goal - of returning a CFArray containing one SecIdentityRef, and then zero or more - SecCertificateRef objects, suitable for use as a client certificate trust - chain. - """ - # Ok, the strategy. - # - # This relies on knowing that macOS will not give you a SecIdentityRef - # unless you have imported a key into a keychain. This is a somewhat - # artificial limitation of macOS (for example, it doesn't necessarily - # affect iOS), but there is nothing inside Security.framework that lets you - # get a SecIdentityRef without having a key in a keychain. - # - # So the policy here is we take all the files and iterate them in order. - # Each one will use SecItemImport to have one or more objects loaded from - # it. We will also point at a keychain that macOS can use to work with the - # private key. - # - # Once we have all the objects, we'll check what we actually have. If we - # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise, - # we'll take the first certificate (which we assume to be our leaf) and - # ask the keychain to give us a SecIdentityRef with that cert's associated - # key. - # - # We'll then return a CFArray containing the trust chain: one - # SecIdentityRef and then zero-or-more SecCertificateRef objects. The - # responsibility for freeing this CFArray will be with the caller. This - # CFArray must remain alive for the entire connection, so in practice it - # will be stored with a single SSLSocket, along with the reference to the - # keychain. - certificates = [] - identities = [] - - # Filter out bad paths. - paths = (path for path in paths if path) - - try: - for file_path in paths: - new_identities, new_certs = _load_items_from_file( - keychain, file_path - ) - identities.extend(new_identities) - certificates.extend(new_certs) - - # Ok, we have everything. The question is: do we have an identity? If - # not, we want to grab one from the first cert we have. - if not identities: - new_identity = Security.SecIdentityRef() - status = Security.SecIdentityCreateWithCertificate( - keychain, - certificates[0], - ctypes.byref(new_identity) - ) - _assert_no_error(status) - identities.append(new_identity) - - # We now want to release the original certificate, as we no longer - # need it. - CoreFoundation.CFRelease(certificates.pop(0)) - - # We now need to build a new CFArray that holds the trust chain. - trust_chain = CoreFoundation.CFArrayCreateMutable( - CoreFoundation.kCFAllocatorDefault, - 0, - ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), - ) - for item in itertools.chain(identities, certificates): - # ArrayAppendValue does a CFRetain on the item. That's fine, - # because the finally block will release our other refs to them. - CoreFoundation.CFArrayAppendValue(trust_chain, item) - - return trust_chain - finally: - for obj in itertools.chain(identities, certificates): - CoreFoundation.CFRelease(obj) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py deleted file mode 100644 index e74589fa8..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py +++ /dev/null @@ -1,297 +0,0 @@ -# SPDX-License-Identifier: MIT -""" -This module provides a pool manager that uses Google App Engine's -`URLFetch Service `_. - -Example usage:: - - from urllib3 import PoolManager - from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox - - if is_appengine_sandbox(): - # AppEngineManager uses AppEngine's URLFetch API behind the scenes - http = AppEngineManager() - else: - # PoolManager uses a socket-level API behind the scenes - http = PoolManager() - - r = http.request('GET', 'https://google.com/') - -There are `limitations `_ to the URLFetch service and it may not be -the best choice for your application. There are three options for using -urllib3 on Google App Engine: - -1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is - cost-effective in many circumstances as long as your usage is within the - limitations. -2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets. - Sockets also have `limitations and restrictions - `_ and have a lower free quota than URLFetch. - To use sockets, be sure to specify the following in your ``app.yaml``:: - - env_variables: - GAE_USE_SOCKETS_HTTPLIB : 'true' - -3. If you are using `App Engine Flexible -`_, you can use the standard -:class:`PoolManager` without any configuration or special environment variables. -""" - -from __future__ import absolute_import -import logging -import os -import warnings -from ..packages.six.moves.urllib.parse import urljoin - -from ..exceptions import ( - HTTPError, - HTTPWarning, - MaxRetryError, - ProtocolError, - TimeoutError, - SSLError -) - -from ..packages.six import BytesIO -from ..request import RequestMethods -from ..response import HTTPResponse -from ..util.timeout import Timeout -from ..util.retry import Retry - -try: - from google.appengine.api import urlfetch -except ImportError: - urlfetch = None - - -log = logging.getLogger(__name__) - - -class AppEnginePlatformWarning(HTTPWarning): - pass - - -class AppEnginePlatformError(HTTPError): - pass - - -class AppEngineManager(RequestMethods): - """ - Connection manager for Google App Engine sandbox applications. - - This manager uses the URLFetch service directly instead of using the - emulated httplib, and is subject to URLFetch limitations as described in - the App Engine documentation `here - `_. - - Notably it will raise an :class:`AppEnginePlatformError` if: - * URLFetch is not available. - * If you attempt to use this on App Engine Flexible, as full socket - support is available. - * If a request size is more than 10 megabytes. - * If a response size is more than 32 megabtyes. - * If you use an unsupported request method such as OPTIONS. - - Beyond those cases, it will raise normal urllib3 errors. - """ - - def __init__(self, headers=None, retries=None, validate_certificate=True, - urlfetch_retries=True): - if not urlfetch: - raise AppEnginePlatformError( - "URLFetch is not available in this environment.") - - if is_prod_appengine_mvms(): - raise AppEnginePlatformError( - "Use normal urllib3.PoolManager instead of AppEngineManager" - "on Managed VMs, as using URLFetch is not necessary in " - "this environment.") - - warnings.warn( - "urllib3 is using URLFetch on Google App Engine sandbox instead " - "of sockets. To use sockets directly instead of URLFetch see " - "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.", - AppEnginePlatformWarning) - - RequestMethods.__init__(self, headers) - self.validate_certificate = validate_certificate - self.urlfetch_retries = urlfetch_retries - - self.retries = retries or Retry.DEFAULT - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - # Return False to re-raise any potential exceptions - return False - - def urlopen(self, method, url, body=None, headers=None, - retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, - **response_kw): - - retries = self._get_retries(retries, redirect) - - try: - follow_redirects = ( - redirect and - retries.redirect != 0 and - retries.total) - response = urlfetch.fetch( - url, - payload=body, - method=method, - headers=headers or {}, - allow_truncated=False, - follow_redirects=self.urlfetch_retries and follow_redirects, - deadline=self._get_absolute_timeout(timeout), - validate_certificate=self.validate_certificate, - ) - except urlfetch.DeadlineExceededError as e: - raise TimeoutError(self, e) - - except urlfetch.InvalidURLError as e: - if 'too large' in str(e): - raise AppEnginePlatformError( - "URLFetch request too large, URLFetch only " - "supports requests up to 10mb in size.", e) - raise ProtocolError(e) - - except urlfetch.DownloadError as e: - if 'Too many redirects' in str(e): - raise MaxRetryError(self, url, reason=e) - raise ProtocolError(e) - - except urlfetch.ResponseTooLargeError as e: - raise AppEnginePlatformError( - "URLFetch response too large, URLFetch only supports" - "responses up to 32mb in size.", e) - - except urlfetch.SSLCertificateError as e: - raise SSLError(e) - - except urlfetch.InvalidMethodError as e: - raise AppEnginePlatformError( - "URLFetch does not support method: %s" % method, e) - - http_response = self._urlfetch_response_to_http_response( - response, retries=retries, **response_kw) - - # Handle redirect? - redirect_location = redirect and http_response.get_redirect_location() - if redirect_location: - # Check for redirect response - if (self.urlfetch_retries and retries.raise_on_redirect): - raise MaxRetryError(self, url, "too many redirects") - else: - if http_response.status == 303: - method = 'GET' - - try: - retries = retries.increment(method, url, response=http_response, _pool=self) - except MaxRetryError: - if retries.raise_on_redirect: - raise MaxRetryError(self, url, "too many redirects") - return http_response - - retries.sleep_for_retry(http_response) - log.debug("Redirecting %s -> %s", url, redirect_location) - redirect_url = urljoin(url, redirect_location) - return self.urlopen( - method, redirect_url, body, headers, - retries=retries, redirect=redirect, - timeout=timeout, **response_kw) - - # Check if we should retry the HTTP response. - has_retry_after = bool(http_response.getheader('Retry-After')) - if retries.is_retry(method, http_response.status, has_retry_after): - retries = retries.increment( - method, url, response=http_response, _pool=self) - log.debug("Retry: %s", url) - retries.sleep(http_response) - return self.urlopen( - method, url, - body=body, headers=headers, - retries=retries, redirect=redirect, - timeout=timeout, **response_kw) - - return http_response - - def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): - - if is_prod_appengine(): - # Production GAE handles deflate encoding automatically, but does - # not remove the encoding header. - content_encoding = urlfetch_resp.headers.get('content-encoding') - - if content_encoding == 'deflate': - del urlfetch_resp.headers['content-encoding'] - - transfer_encoding = urlfetch_resp.headers.get('transfer-encoding') - # We have a full response's content, - # so let's make sure we don't report ourselves as chunked data. - if transfer_encoding == 'chunked': - encodings = transfer_encoding.split(",") - encodings.remove('chunked') - urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings) - - return HTTPResponse( - # In order for decoding to work, we must present the content as - # a file-like object. - body=BytesIO(urlfetch_resp.content), - headers=urlfetch_resp.headers, - status=urlfetch_resp.status_code, - **response_kw - ) - - def _get_absolute_timeout(self, timeout): - if timeout is Timeout.DEFAULT_TIMEOUT: - return None # Defer to URLFetch's default. - if isinstance(timeout, Timeout): - if timeout._read is not None or timeout._connect is not None: - warnings.warn( - "URLFetch does not support granular timeout settings, " - "reverting to total or default URLFetch timeout.", - AppEnginePlatformWarning) - return timeout.total - return timeout - - def _get_retries(self, retries, redirect): - if not isinstance(retries, Retry): - retries = Retry.from_int( - retries, redirect=redirect, default=self.retries) - - if retries.connect or retries.read or retries.redirect: - warnings.warn( - "URLFetch only supports total retries and does not " - "recognize connect, read, or redirect retry parameters.", - AppEnginePlatformWarning) - - return retries - - -def is_appengine(): - return (is_local_appengine() or - is_prod_appengine() or - is_prod_appengine_mvms()) - - -def is_appengine_sandbox(): - return is_appengine() and not is_prod_appengine_mvms() - - -def is_local_appengine(): - return ('APPENGINE_RUNTIME' in os.environ and - 'Development/' in os.environ['SERVER_SOFTWARE']) - - -def is_prod_appengine(): - return ('APPENGINE_RUNTIME' in os.environ and - 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and - not is_prod_appengine_mvms()) - - -def is_prod_appengine_mvms(): - return os.environ.get('GAE_VM', False) == 'true' diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py deleted file mode 100644 index 3f8c9ebf5..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py +++ /dev/null @@ -1,113 +0,0 @@ -# SPDX-License-Identifier: MIT -""" -NTLM authenticating pool, contributed by erikcederstran - -Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 -""" -from __future__ import absolute_import - -from logging import getLogger -from ntlm import ntlm - -from .. import HTTPSConnectionPool -from ..packages.six.moves.http_client import HTTPSConnection - - -log = getLogger(__name__) - - -class NTLMConnectionPool(HTTPSConnectionPool): - """ - Implements an NTLM authentication version of an urllib3 connection pool - """ - - scheme = 'https' - - def __init__(self, user, pw, authurl, *args, **kwargs): - """ - authurl is a random URL on the server that is protected by NTLM. - user is the Windows user, probably in the DOMAIN\\username format. - pw is the password for the user. - """ - super(NTLMConnectionPool, self).__init__(*args, **kwargs) - self.authurl = authurl - self.rawuser = user - user_parts = user.split('\\', 1) - self.domain = user_parts[0].upper() - self.user = user_parts[1] - self.pw = pw - - def _new_conn(self): - # Performs the NTLM handshake that secures the connection. The socket - # must be kept open while requests are performed. - self.num_connections += 1 - log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s', - self.num_connections, self.host, self.authurl) - - headers = {} - headers['Connection'] = 'Keep-Alive' - req_header = 'Authorization' - resp_header = 'www-authenticate' - - conn = HTTPSConnection(host=self.host, port=self.port) - - # Send negotiation message - headers[req_header] = ( - 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) - log.debug('Request headers: %s', headers) - conn.request('GET', self.authurl, None, headers) - res = conn.getresponse() - reshdr = dict(res.getheaders()) - log.debug('Response status: %s %s', res.status, res.reason) - log.debug('Response headers: %s', reshdr) - log.debug('Response data: %s [...]', res.read(100)) - - # Remove the reference to the socket, so that it can not be closed by - # the response object (we want to keep the socket open) - res.fp = None - - # Server should respond with a challenge message - auth_header_values = reshdr[resp_header].split(', ') - auth_header_value = None - for s in auth_header_values: - if s[:5] == 'NTLM ': - auth_header_value = s[5:] - if auth_header_value is None: - raise Exception('Unexpected %s response header: %s' % - (resp_header, reshdr[resp_header])) - - # Send authentication message - ServerChallenge, NegotiateFlags = \ - ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) - auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, - self.user, - self.domain, - self.pw, - NegotiateFlags) - headers[req_header] = 'NTLM %s' % auth_msg - log.debug('Request headers: %s', headers) - conn.request('GET', self.authurl, None, headers) - res = conn.getresponse() - log.debug('Response status: %s %s', res.status, res.reason) - log.debug('Response headers: %s', dict(res.getheaders())) - log.debug('Response data: %s [...]', res.read()[:100]) - if res.status != 200: - if res.status == 401: - raise Exception('Server rejected request: wrong ' - 'username or password') - raise Exception('Wrong server response: %s %s' % - (res.status, res.reason)) - - res.fp = None - log.debug('Connection established') - return conn - - def urlopen(self, method, url, body=None, headers=None, retries=3, - redirect=True, assert_same_host=True): - if headers is None: - headers = {} - headers['Connection'] = 'Keep-Alive' - return super(NTLMConnectionPool, self).urlopen(method, url, body, - headers, retries, - redirect, - assert_same_host) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py deleted file mode 100644 index 8d373507d..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py +++ /dev/null @@ -1,458 +0,0 @@ -# SPDX-License-Identifier: MIT -""" -SSL with SNI_-support for Python 2. Follow these instructions if you would -like to verify SSL certificates in Python 2. Note, the default libraries do -*not* do certificate checking; you need to do additional work to validate -certificates yourself. - -This needs the following packages installed: - -* pyOpenSSL (tested with 16.0.0) -* cryptography (minimum 1.3.4, from pyopenssl) -* idna (minimum 2.0, from cryptography) - -However, pyopenssl depends on cryptography, which depends on idna, so while we -use all three directly here we end up having relatively few packages required. - -You can install them with the following command: - - pip install pyopenssl cryptography idna - -To activate certificate checking, call -:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code -before you begin making HTTP requests. This can be done in a ``sitecustomize`` -module, or at any other time before your application begins using ``urllib3``, -like this:: - - try: - import urllib3.contrib.pyopenssl - urllib3.contrib.pyopenssl.inject_into_urllib3() - except ImportError: - pass - -Now you can use :mod:`urllib3` as you normally would, and it will support SNI -when the required modules are installed. - -Activating this module also has the positive side effect of disabling SSL/TLS -compression in Python 2 (see `CRIME attack`_). - -If you want to configure the default list of supported cipher suites, you can -set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable. - -.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication -.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) -""" -from __future__ import absolute_import - -import OpenSSL.SSL -from cryptography import x509 -from cryptography.hazmat.backends.openssl import backend as openssl_backend -from cryptography.hazmat.backends.openssl.x509 import _Certificate - -from socket import timeout, error as SocketError -from io import BytesIO - -try: # Platform-specific: Python 2 - from socket import _fileobject -except ImportError: # Platform-specific: Python 3 - _fileobject = None - from ..packages.backports.makefile import backport_makefile - -import logging -import ssl - -try: - import six -except ImportError: - from ..packages import six - -import sys - -from .. import util - -__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] - -# SNI always works. -HAS_SNI = True - -# Map from urllib3 to PyOpenSSL compatible parameter-values. -_openssl_versions = { - ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, - ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, -} - -if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'): - _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD - -if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): - _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD - -try: - _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD}) -except AttributeError: - pass - -_stdlib_to_openssl_verify = { - ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, - ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, - ssl.CERT_REQUIRED: - OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, -} -_openssl_to_stdlib_verify = dict( - (v, k) for k, v in _stdlib_to_openssl_verify.items() -) - -# OpenSSL will only write 16K at a time -SSL_WRITE_BLOCKSIZE = 16384 - -orig_util_HAS_SNI = util.HAS_SNI -orig_util_SSLContext = util.ssl_.SSLContext - - -log = logging.getLogger(__name__) - - -def inject_into_urllib3(): - 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' - - _validate_dependencies_met() - - util.ssl_.SSLContext = PyOpenSSLContext - util.HAS_SNI = HAS_SNI - util.ssl_.HAS_SNI = HAS_SNI - util.IS_PYOPENSSL = True - util.ssl_.IS_PYOPENSSL = True - - -def extract_from_urllib3(): - 'Undo monkey-patching by :func:`inject_into_urllib3`.' - - util.ssl_.SSLContext = orig_util_SSLContext - util.HAS_SNI = orig_util_HAS_SNI - util.ssl_.HAS_SNI = orig_util_HAS_SNI - util.IS_PYOPENSSL = False - util.ssl_.IS_PYOPENSSL = False - - -def _validate_dependencies_met(): - """ - Verifies that PyOpenSSL's package-level dependencies have been met. - Throws `ImportError` if they are not met. - """ - # Method added in `cryptography==1.1`; not available in older versions - from cryptography.x509.extensions import Extensions - if getattr(Extensions, "get_extension_for_class", None) is None: - raise ImportError("'cryptography' module missing required functionality. " - "Try upgrading to v1.3.4 or newer.") - - # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509 - # attribute is only present on those versions. - from OpenSSL.crypto import X509 - x509 = X509() - if getattr(x509, "_x509", None) is None: - raise ImportError("'pyOpenSSL' module missing required functionality. " - "Try upgrading to v0.14 or newer.") - - -def _dnsname_to_stdlib(name): - """ - Converts a dNSName SubjectAlternativeName field to the form used by the - standard library on the given Python version. - - Cryptography produces a dNSName as a unicode string that was idna-decoded - from ASCII bytes. We need to idna-encode that string to get it back, and - then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib - uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). - """ - def idna_encode(name): - """ - Borrowed wholesale from the Python Cryptography Project. It turns out - that we can't just safely call `idna.encode`: it can explode for - wildcard names. This avoids that problem. - """ - import idna - - for prefix in [u'*.', u'.']: - if name.startswith(prefix): - name = name[len(prefix):] - return prefix.encode('ascii') + idna.encode(name) - return idna.encode(name) - - name = idna_encode(name) - if sys.version_info >= (3, 0): - name = name.decode('utf-8') - return name - - -def get_subj_alt_name(peer_cert): - """ - Given an PyOpenSSL certificate, provides all the subject alternative names. - """ - # Pass the cert to cryptography, which has much better APIs for this. - # This is technically using private APIs, but should work across all - # relevant versions until PyOpenSSL gets something proper for this. - cert = _Certificate(openssl_backend, peer_cert._x509) - - # We want to find the SAN extension. Ask Cryptography to locate it (it's - # faster than looping in Python) - try: - ext = cert.extensions.get_extension_for_class( - x509.SubjectAlternativeName - ).value - except x509.ExtensionNotFound: - # No such extension, return the empty list. - return [] - except (x509.DuplicateExtension, x509.UnsupportedExtension, - x509.UnsupportedGeneralNameType, UnicodeError) as e: - # A problem has been found with the quality of the certificate. Assume - # no SAN field is present. - log.warning( - "A problem was encountered with the certificate that prevented " - "urllib3 from finding the SubjectAlternativeName field. This can " - "affect certificate validation. The error was %s", - e, - ) - return [] - - # We want to return dNSName and iPAddress fields. We need to cast the IPs - # back to strings because the match_hostname function wants them as - # strings. - # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8 - # decoded. This is pretty frustrating, but that's what the standard library - # does with certificates, and so we need to attempt to do the same. - names = [ - ('DNS', _dnsname_to_stdlib(name)) - for name in ext.get_values_for_type(x509.DNSName) - ] - names.extend( - ('IP Address', str(name)) - for name in ext.get_values_for_type(x509.IPAddress) - ) - - return names - - -class WrappedSocket(object): - '''API-compatibility wrapper for Python OpenSSL's Connection-class. - - Note: _makefile_refs, _drop() and _reuse() are needed for the garbage - collector of pypy. - ''' - - def __init__(self, connection, socket, suppress_ragged_eofs=True): - self.connection = connection - self.socket = socket - self.suppress_ragged_eofs = suppress_ragged_eofs - self._makefile_refs = 0 - self._closed = False - - def fileno(self): - return self.socket.fileno() - - # Copy-pasted from Python 3.5 source code - def _decref_socketios(self): - if self._makefile_refs > 0: - self._makefile_refs -= 1 - if self._closed: - self.close() - - def recv(self, *args, **kwargs): - try: - data = self.connection.recv(*args, **kwargs) - except OpenSSL.SSL.SysCallError as e: - if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): - return b'' - else: - raise SocketError(str(e)) - except OpenSSL.SSL.ZeroReturnError as e: - if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: - return b'' - else: - raise - except OpenSSL.SSL.WantReadError: - rd = util.wait_for_read(self.socket, self.socket.gettimeout()) - if not rd: - raise timeout('The read operation timed out') - else: - return self.recv(*args, **kwargs) - else: - return data - - def recv_into(self, *args, **kwargs): - try: - return self.connection.recv_into(*args, **kwargs) - except OpenSSL.SSL.SysCallError as e: - if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): - return 0 - else: - raise SocketError(str(e)) - except OpenSSL.SSL.ZeroReturnError as e: - if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: - return 0 - else: - raise - except OpenSSL.SSL.WantReadError: - rd = util.wait_for_read(self.socket, self.socket.gettimeout()) - if not rd: - raise timeout('The read operation timed out') - else: - return self.recv_into(*args, **kwargs) - - def settimeout(self, timeout): - return self.socket.settimeout(timeout) - - def _send_until_done(self, data): - while True: - try: - return self.connection.send(data) - except OpenSSL.SSL.WantWriteError: - wr = util.wait_for_write(self.socket, self.socket.gettimeout()) - if not wr: - raise timeout() - continue - except OpenSSL.SSL.SysCallError as e: - raise SocketError(str(e)) - - def sendall(self, data): - total_sent = 0 - while total_sent < len(data): - sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) - total_sent += sent - - def shutdown(self): - # FIXME rethrow compatible exceptions should we ever use this - self.connection.shutdown() - - def close(self): - if self._makefile_refs < 1: - try: - self._closed = True - return self.connection.close() - except OpenSSL.SSL.Error: - return - else: - self._makefile_refs -= 1 - - def getpeercert(self, binary_form=False): - x509 = self.connection.get_peer_certificate() - - if not x509: - return x509 - - if binary_form: - return OpenSSL.crypto.dump_certificate( - OpenSSL.crypto.FILETYPE_ASN1, - x509) - - return { - 'subject': ( - (('commonName', x509.get_subject().CN),), - ), - 'subjectAltName': get_subj_alt_name(x509) - } - - def _reuse(self): - self._makefile_refs += 1 - - def _drop(self): - if self._makefile_refs < 1: - self.close() - else: - self._makefile_refs -= 1 - - -if _fileobject: # Platform-specific: Python 2 - def makefile(self, mode, bufsize=-1): - self._makefile_refs += 1 - return _fileobject(self, mode, bufsize, close=True) -else: # Platform-specific: Python 3 - makefile = backport_makefile - -WrappedSocket.makefile = makefile - - -class PyOpenSSLContext(object): - """ - I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible - for translating the interface of the standard library ``SSLContext`` object - to calls into PyOpenSSL. - """ - def __init__(self, protocol): - self.protocol = _openssl_versions[protocol] - self._ctx = OpenSSL.SSL.Context(self.protocol) - self._options = 0 - self.check_hostname = False - - @property - def options(self): - return self._options - - @options.setter - def options(self, value): - self._options = value - self._ctx.set_options(value) - - @property - def verify_mode(self): - return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()] - - @verify_mode.setter - def verify_mode(self, value): - self._ctx.set_verify( - _stdlib_to_openssl_verify[value], - _verify_callback - ) - - def set_default_verify_paths(self): - self._ctx.set_default_verify_paths() - - def set_ciphers(self, ciphers): - if isinstance(ciphers, six.text_type): - ciphers = ciphers.encode('utf-8') - self._ctx.set_cipher_list(ciphers) - - def load_verify_locations(self, cafile=None, capath=None, cadata=None): - if cafile is not None: - cafile = cafile.encode('utf-8') - if capath is not None: - capath = capath.encode('utf-8') - self._ctx.load_verify_locations(cafile, capath) - if cadata is not None: - self._ctx.load_verify_locations(BytesIO(cadata)) - - def load_cert_chain(self, certfile, keyfile=None, password=None): - self._ctx.use_certificate_file(certfile) - if password is not None: - self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password) - self._ctx.use_privatekey_file(keyfile or certfile) - - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, suppress_ragged_eofs=True, - server_hostname=None): - cnx = OpenSSL.SSL.Connection(self._ctx, sock) - - if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 - server_hostname = server_hostname.encode('utf-8') - - if server_hostname is not None: - cnx.set_tlsext_host_name(server_hostname) - - cnx.set_connect_state() - - while True: - try: - cnx.do_handshake() - except OpenSSL.SSL.WantReadError: - rd = util.wait_for_read(sock, sock.gettimeout()) - if not rd: - raise timeout('select timed out') - continue - except OpenSSL.SSL.Error as e: - raise ssl.SSLError('bad handshake: %r' % e) - break - - return WrappedSocket(cnx, sock) - - -def _verify_callback(cnx, x509, err_no, err_depth, return_code): - return err_no == 0 diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py deleted file mode 100644 index fcc30118c..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py +++ /dev/null @@ -1,808 +0,0 @@ -# SPDX-License-Identifier: MIT -""" -SecureTranport support for urllib3 via ctypes. - -This makes platform-native TLS available to urllib3 users on macOS without the -use of a compiler. This is an important feature because the Python Package -Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL -that ships with macOS is not capable of doing TLSv1.2. The only way to resolve -this is to give macOS users an alternative solution to the problem, and that -solution is to use SecureTransport. - -We use ctypes here because this solution must not require a compiler. That's -because pip is not allowed to require a compiler either. - -This is not intended to be a seriously long-term solution to this problem. -The hope is that PEP 543 will eventually solve this issue for us, at which -point we can retire this contrib module. But in the short term, we need to -solve the impending tire fire that is Python on Mac without this kind of -contrib module. So...here we are. - -To use this module, simply import and inject it:: - - import urllib3.contrib.securetransport - urllib3.contrib.securetransport.inject_into_urllib3() - -Happy TLSing! -""" -from __future__ import absolute_import - -import contextlib -import ctypes -import errno -import os.path -import shutil -import socket -import ssl -import threading -import weakref - -from .. import util -from ._securetransport.bindings import ( - Security, SecurityConst, CoreFoundation -) -from ._securetransport.low_level import ( - _assert_no_error, _cert_array_from_pem, _temporary_keychain, - _load_client_cert_chain -) - -try: # Platform-specific: Python 2 - from socket import _fileobject -except ImportError: # Platform-specific: Python 3 - _fileobject = None - from ..packages.backports.makefile import backport_makefile - -try: - memoryview(b'') -except NameError: - raise ImportError("SecureTransport only works on Pythons with memoryview") - -__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] - -# SNI always works -HAS_SNI = True - -orig_util_HAS_SNI = util.HAS_SNI -orig_util_SSLContext = util.ssl_.SSLContext - -# This dictionary is used by the read callback to obtain a handle to the -# calling wrapped socket. This is a pretty silly approach, but for now it'll -# do. I feel like I should be able to smuggle a handle to the wrapped socket -# directly in the SSLConnectionRef, but for now this approach will work I -# guess. -# -# We need to lock around this structure for inserts, but we don't do it for -# reads/writes in the callbacks. The reasoning here goes as follows: -# -# 1. It is not possible to call into the callbacks before the dictionary is -# populated, so once in the callback the id must be in the dictionary. -# 2. The callbacks don't mutate the dictionary, they only read from it, and -# so cannot conflict with any of the insertions. -# -# This is good: if we had to lock in the callbacks we'd drastically slow down -# the performance of this code. -_connection_refs = weakref.WeakValueDictionary() -_connection_ref_lock = threading.Lock() - -# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over -# for no better reason than we need *a* limit, and this one is right there. -SSL_WRITE_BLOCKSIZE = 16384 - -# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to -# individual cipher suites. We need to do this becuase this is how -# SecureTransport wants them. -CIPHER_SUITES = [ - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA, -] - -# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of -# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. -_protocol_to_min_max = { - ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), -} - -if hasattr(ssl, "PROTOCOL_SSLv2"): - _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = ( - SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2 - ) -if hasattr(ssl, "PROTOCOL_SSLv3"): - _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = ( - SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3 - ) -if hasattr(ssl, "PROTOCOL_TLSv1"): - _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = ( - SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1 - ) -if hasattr(ssl, "PROTOCOL_TLSv1_1"): - _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = ( - SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11 - ) -if hasattr(ssl, "PROTOCOL_TLSv1_2"): - _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = ( - SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12 - ) -if hasattr(ssl, "PROTOCOL_TLS"): - _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23] - - -def inject_into_urllib3(): - """ - Monkey-patch urllib3 with SecureTransport-backed SSL-support. - """ - util.ssl_.SSLContext = SecureTransportContext - util.HAS_SNI = HAS_SNI - util.ssl_.HAS_SNI = HAS_SNI - util.IS_SECURETRANSPORT = True - util.ssl_.IS_SECURETRANSPORT = True - - -def extract_from_urllib3(): - """ - Undo monkey-patching by :func:`inject_into_urllib3`. - """ - util.ssl_.SSLContext = orig_util_SSLContext - util.HAS_SNI = orig_util_HAS_SNI - util.ssl_.HAS_SNI = orig_util_HAS_SNI - util.IS_SECURETRANSPORT = False - util.ssl_.IS_SECURETRANSPORT = False - - -def _read_callback(connection_id, data_buffer, data_length_pointer): - """ - SecureTransport read callback. This is called by ST to request that data - be returned from the socket. - """ - wrapped_socket = None - try: - wrapped_socket = _connection_refs.get(connection_id) - if wrapped_socket is None: - return SecurityConst.errSSLInternal - base_socket = wrapped_socket.socket - - requested_length = data_length_pointer[0] - - timeout = wrapped_socket.gettimeout() - error = None - read_count = 0 - buffer = (ctypes.c_char * requested_length).from_address(data_buffer) - buffer_view = memoryview(buffer) - - try: - while read_count < requested_length: - if timeout is None or timeout >= 0: - readables = util.wait_for_read([base_socket], timeout) - if not readables: - raise socket.error(errno.EAGAIN, 'timed out') - - # We need to tell ctypes that we have a buffer that can be - # written to. Upsettingly, we do that like this: - chunk_size = base_socket.recv_into( - buffer_view[read_count:requested_length] - ) - read_count += chunk_size - if not chunk_size: - if not read_count: - return SecurityConst.errSSLClosedGraceful - break - except (socket.error) as e: - error = e.errno - - if error is not None and error != errno.EAGAIN: - if error == errno.ECONNRESET: - return SecurityConst.errSSLClosedAbort - raise - - data_length_pointer[0] = read_count - - if read_count != requested_length: - return SecurityConst.errSSLWouldBlock - - return 0 - except Exception as e: - if wrapped_socket is not None: - wrapped_socket._exception = e - return SecurityConst.errSSLInternal - - -def _write_callback(connection_id, data_buffer, data_length_pointer): - """ - SecureTransport write callback. This is called by ST to request that data - actually be sent on the network. - """ - wrapped_socket = None - try: - wrapped_socket = _connection_refs.get(connection_id) - if wrapped_socket is None: - return SecurityConst.errSSLInternal - base_socket = wrapped_socket.socket - - bytes_to_write = data_length_pointer[0] - data = ctypes.string_at(data_buffer, bytes_to_write) - - timeout = wrapped_socket.gettimeout() - error = None - sent = 0 - - try: - while sent < bytes_to_write: - if timeout is None or timeout >= 0: - writables = util.wait_for_write([base_socket], timeout) - if not writables: - raise socket.error(errno.EAGAIN, 'timed out') - chunk_sent = base_socket.send(data) - sent += chunk_sent - - # This has some needless copying here, but I'm not sure there's - # much value in optimising this data path. - data = data[chunk_sent:] - except (socket.error) as e: - error = e.errno - - if error is not None and error != errno.EAGAIN: - if error == errno.ECONNRESET: - return SecurityConst.errSSLClosedAbort - raise - - data_length_pointer[0] = sent - if sent != bytes_to_write: - return SecurityConst.errSSLWouldBlock - - return 0 - except Exception as e: - if wrapped_socket is not None: - wrapped_socket._exception = e - return SecurityConst.errSSLInternal - - -# We need to keep these two objects references alive: if they get GC'd while -# in use then SecureTransport could attempt to call a function that is in freed -# memory. That would be...uh...bad. Yeah, that's the word. Bad. -_read_callback_pointer = Security.SSLReadFunc(_read_callback) -_write_callback_pointer = Security.SSLWriteFunc(_write_callback) - - -class WrappedSocket(object): - """ - API-compatibility wrapper for Python's OpenSSL wrapped socket object. - - Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage - collector of PyPy. - """ - def __init__(self, socket): - self.socket = socket - self.context = None - self._makefile_refs = 0 - self._closed = False - self._exception = None - self._keychain = None - self._keychain_dir = None - self._client_cert_chain = None - - # We save off the previously-configured timeout and then set it to - # zero. This is done because we use select and friends to handle the - # timeouts, but if we leave the timeout set on the lower socket then - # Python will "kindly" call select on that socket again for us. Avoid - # that by forcing the timeout to zero. - self._timeout = self.socket.gettimeout() - self.socket.settimeout(0) - - @contextlib.contextmanager - def _raise_on_error(self): - """ - A context manager that can be used to wrap calls that do I/O from - SecureTransport. If any of the I/O callbacks hit an exception, this - context manager will correctly propagate the exception after the fact. - This avoids silently swallowing those exceptions. - - It also correctly forces the socket closed. - """ - self._exception = None - - # We explicitly don't catch around this yield because in the unlikely - # event that an exception was hit in the block we don't want to swallow - # it. - yield - if self._exception is not None: - exception, self._exception = self._exception, None - self.close() - raise exception - - def _set_ciphers(self): - """ - Sets up the allowed ciphers. By default this matches the set in - util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done - custom and doesn't allow changing at this time, mostly because parsing - OpenSSL cipher strings is going to be a freaking nightmare. - """ - ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES) - result = Security.SSLSetEnabledCiphers( - self.context, ciphers, len(CIPHER_SUITES) - ) - _assert_no_error(result) - - def _custom_validate(self, verify, trust_bundle): - """ - Called when we have set custom validation. We do this in two cases: - first, when cert validation is entirely disabled; and second, when - using a custom trust DB. - """ - # If we disabled cert validation, just say: cool. - if not verify: - return - - # We want data in memory, so load it up. - if os.path.isfile(trust_bundle): - with open(trust_bundle, 'rb') as f: - trust_bundle = f.read() - - cert_array = None - trust = Security.SecTrustRef() - - try: - # Get a CFArray that contains the certs we want. - cert_array = _cert_array_from_pem(trust_bundle) - - # Ok, now the hard part. We want to get the SecTrustRef that ST has - # created for this connection, shove our CAs into it, tell ST to - # ignore everything else it knows, and then ask if it can build a - # chain. This is a buuuunch of code. - result = Security.SSLCopyPeerTrust( - self.context, ctypes.byref(trust) - ) - _assert_no_error(result) - if not trust: - raise ssl.SSLError("Failed to copy trust reference") - - result = Security.SecTrustSetAnchorCertificates(trust, cert_array) - _assert_no_error(result) - - result = Security.SecTrustSetAnchorCertificatesOnly(trust, True) - _assert_no_error(result) - - trust_result = Security.SecTrustResultType() - result = Security.SecTrustEvaluate( - trust, ctypes.byref(trust_result) - ) - _assert_no_error(result) - finally: - if trust: - CoreFoundation.CFRelease(trust) - - if cert_array is None: - CoreFoundation.CFRelease(cert_array) - - # Ok, now we can look at what the result was. - successes = ( - SecurityConst.kSecTrustResultUnspecified, - SecurityConst.kSecTrustResultProceed - ) - if trust_result.value not in successes: - raise ssl.SSLError( - "certificate verify failed, error code: %d" % - trust_result.value - ) - - def handshake(self, - server_hostname, - verify, - trust_bundle, - min_version, - max_version, - client_cert, - client_key, - client_key_passphrase): - """ - Actually performs the TLS handshake. This is run automatically by - wrapped socket, and shouldn't be needed in user code. - """ - # First, we do the initial bits of connection setup. We need to create - # a context, set its I/O funcs, and set the connection reference. - self.context = Security.SSLCreateContext( - None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType - ) - result = Security.SSLSetIOFuncs( - self.context, _read_callback_pointer, _write_callback_pointer - ) - _assert_no_error(result) - - # Here we need to compute the handle to use. We do this by taking the - # id of self modulo 2**31 - 1. If this is already in the dictionary, we - # just keep incrementing by one until we find a free space. - with _connection_ref_lock: - handle = id(self) % 2147483647 - while handle in _connection_refs: - handle = (handle + 1) % 2147483647 - _connection_refs[handle] = self - - result = Security.SSLSetConnection(self.context, handle) - _assert_no_error(result) - - # If we have a server hostname, we should set that too. - if server_hostname: - if not isinstance(server_hostname, bytes): - server_hostname = server_hostname.encode('utf-8') - - result = Security.SSLSetPeerDomainName( - self.context, server_hostname, len(server_hostname) - ) - _assert_no_error(result) - - # Setup the ciphers. - self._set_ciphers() - - # Set the minimum and maximum TLS versions. - result = Security.SSLSetProtocolVersionMin(self.context, min_version) - _assert_no_error(result) - result = Security.SSLSetProtocolVersionMax(self.context, max_version) - _assert_no_error(result) - - # If there's a trust DB, we need to use it. We do that by telling - # SecureTransport to break on server auth. We also do that if we don't - # want to validate the certs at all: we just won't actually do any - # authing in that case. - if not verify or trust_bundle is not None: - result = Security.SSLSetSessionOption( - self.context, - SecurityConst.kSSLSessionOptionBreakOnServerAuth, - True - ) - _assert_no_error(result) - - # If there's a client cert, we need to use it. - if client_cert: - self._keychain, self._keychain_dir = _temporary_keychain() - self._client_cert_chain = _load_client_cert_chain( - self._keychain, client_cert, client_key - ) - result = Security.SSLSetCertificate( - self.context, self._client_cert_chain - ) - _assert_no_error(result) - - while True: - with self._raise_on_error(): - result = Security.SSLHandshake(self.context) - - if result == SecurityConst.errSSLWouldBlock: - raise socket.timeout("handshake timed out") - elif result == SecurityConst.errSSLServerAuthCompleted: - self._custom_validate(verify, trust_bundle) - continue - else: - _assert_no_error(result) - break - - def fileno(self): - return self.socket.fileno() - - # Copy-pasted from Python 3.5 source code - def _decref_socketios(self): - if self._makefile_refs > 0: - self._makefile_refs -= 1 - if self._closed: - self.close() - - def recv(self, bufsiz): - buffer = ctypes.create_string_buffer(bufsiz) - bytes_read = self.recv_into(buffer, bufsiz) - data = buffer[:bytes_read] - return data - - def recv_into(self, buffer, nbytes=None): - # Read short on EOF. - if self._closed: - return 0 - - if nbytes is None: - nbytes = len(buffer) - - buffer = (ctypes.c_char * nbytes).from_buffer(buffer) - processed_bytes = ctypes.c_size_t(0) - - with self._raise_on_error(): - result = Security.SSLRead( - self.context, buffer, nbytes, ctypes.byref(processed_bytes) - ) - - # There are some result codes that we want to treat as "not always - # errors". Specifically, those are errSSLWouldBlock, - # errSSLClosedGraceful, and errSSLClosedNoNotify. - if (result == SecurityConst.errSSLWouldBlock): - # If we didn't process any bytes, then this was just a time out. - # However, we can get errSSLWouldBlock in situations when we *did* - # read some data, and in those cases we should just read "short" - # and return. - if processed_bytes.value == 0: - # Timed out, no data read. - raise socket.timeout("recv timed out") - elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify): - # The remote peer has closed this connection. We should do so as - # well. Note that we don't actually return here because in - # principle this could actually be fired along with return data. - # It's unlikely though. - self.close() - else: - _assert_no_error(result) - - # Ok, we read and probably succeeded. We should return whatever data - # was actually read. - return processed_bytes.value - - def settimeout(self, timeout): - self._timeout = timeout - - def gettimeout(self): - return self._timeout - - def send(self, data): - processed_bytes = ctypes.c_size_t(0) - - with self._raise_on_error(): - result = Security.SSLWrite( - self.context, data, len(data), ctypes.byref(processed_bytes) - ) - - if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0: - # Timed out - raise socket.timeout("send timed out") - else: - _assert_no_error(result) - - # We sent, and probably succeeded. Tell them how much we sent. - return processed_bytes.value - - def sendall(self, data): - total_sent = 0 - while total_sent < len(data): - sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) - total_sent += sent - - def shutdown(self): - with self._raise_on_error(): - Security.SSLClose(self.context) - - def close(self): - # TODO: should I do clean shutdown here? Do I have to? - if self._makefile_refs < 1: - self._closed = True - if self.context: - CoreFoundation.CFRelease(self.context) - self.context = None - if self._client_cert_chain: - CoreFoundation.CFRelease(self._client_cert_chain) - self._client_cert_chain = None - if self._keychain: - Security.SecKeychainDelete(self._keychain) - CoreFoundation.CFRelease(self._keychain) - shutil.rmtree(self._keychain_dir) - self._keychain = self._keychain_dir = None - return self.socket.close() - else: - self._makefile_refs -= 1 - - def getpeercert(self, binary_form=False): - # Urgh, annoying. - # - # Here's how we do this: - # - # 1. Call SSLCopyPeerTrust to get hold of the trust object for this - # connection. - # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf. - # 3. To get the CN, call SecCertificateCopyCommonName and process that - # string so that it's of the appropriate type. - # 4. To get the SAN, we need to do something a bit more complex: - # a. Call SecCertificateCopyValues to get the data, requesting - # kSecOIDSubjectAltName. - # b. Mess about with this dictionary to try to get the SANs out. - # - # This is gross. Really gross. It's going to be a few hundred LoC extra - # just to repeat something that SecureTransport can *already do*. So my - # operating assumption at this time is that what we want to do is - # instead to just flag to urllib3 that it shouldn't do its own hostname - # validation when using SecureTransport. - if not binary_form: - raise ValueError( - "SecureTransport only supports dumping binary certs" - ) - trust = Security.SecTrustRef() - certdata = None - der_bytes = None - - try: - # Grab the trust store. - result = Security.SSLCopyPeerTrust( - self.context, ctypes.byref(trust) - ) - _assert_no_error(result) - if not trust: - # Probably we haven't done the handshake yet. No biggie. - return None - - cert_count = Security.SecTrustGetCertificateCount(trust) - if not cert_count: - # Also a case that might happen if we haven't handshaked. - # Handshook? Handshaken? - return None - - leaf = Security.SecTrustGetCertificateAtIndex(trust, 0) - assert leaf - - # Ok, now we want the DER bytes. - certdata = Security.SecCertificateCopyData(leaf) - assert certdata - - data_length = CoreFoundation.CFDataGetLength(certdata) - data_buffer = CoreFoundation.CFDataGetBytePtr(certdata) - der_bytes = ctypes.string_at(data_buffer, data_length) - finally: - if certdata: - CoreFoundation.CFRelease(certdata) - if trust: - CoreFoundation.CFRelease(trust) - - return der_bytes - - def _reuse(self): - self._makefile_refs += 1 - - def _drop(self): - if self._makefile_refs < 1: - self.close() - else: - self._makefile_refs -= 1 - - -if _fileobject: # Platform-specific: Python 2 - def makefile(self, mode, bufsize=-1): - self._makefile_refs += 1 - return _fileobject(self, mode, bufsize, close=True) -else: # Platform-specific: Python 3 - def makefile(self, mode="r", buffering=None, *args, **kwargs): - # We disable buffering with SecureTransport because it conflicts with - # the buffering that ST does internally (see issue #1153 for more). - buffering = 0 - return backport_makefile(self, mode, buffering, *args, **kwargs) - -WrappedSocket.makefile = makefile - - -class SecureTransportContext(object): - """ - I am a wrapper class for the SecureTransport library, to translate the - interface of the standard library ``SSLContext`` object to calls into - SecureTransport. - """ - def __init__(self, protocol): - self._min_version, self._max_version = _protocol_to_min_max[protocol] - self._options = 0 - self._verify = False - self._trust_bundle = None - self._client_cert = None - self._client_key = None - self._client_key_passphrase = None - - @property - def check_hostname(self): - """ - SecureTransport cannot have its hostname checking disabled. For more, - see the comment on getpeercert() in this file. - """ - return True - - @check_hostname.setter - def check_hostname(self, value): - """ - SecureTransport cannot have its hostname checking disabled. For more, - see the comment on getpeercert() in this file. - """ - pass - - @property - def options(self): - # TODO: Well, crap. - # - # So this is the bit of the code that is the most likely to cause us - # trouble. Essentially we need to enumerate all of the SSL options that - # users might want to use and try to see if we can sensibly translate - # them, or whether we should just ignore them. - return self._options - - @options.setter - def options(self, value): - # TODO: Update in line with above. - self._options = value - - @property - def verify_mode(self): - return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE - - @verify_mode.setter - def verify_mode(self, value): - self._verify = True if value == ssl.CERT_REQUIRED else False - - def set_default_verify_paths(self): - # So, this has to do something a bit weird. Specifically, what it does - # is nothing. - # - # This means that, if we had previously had load_verify_locations - # called, this does not undo that. We need to do that because it turns - # out that the rest of the urllib3 code will attempt to load the - # default verify paths if it hasn't been told about any paths, even if - # the context itself was sometime earlier. We resolve that by just - # ignoring it. - pass - - def load_default_certs(self): - return self.set_default_verify_paths() - - def set_ciphers(self, ciphers): - # For now, we just require the default cipher string. - if ciphers != util.ssl_.DEFAULT_CIPHERS: - raise ValueError( - "SecureTransport doesn't support custom cipher strings" - ) - - def load_verify_locations(self, cafile=None, capath=None, cadata=None): - # OK, we only really support cadata and cafile. - if capath is not None: - raise ValueError( - "SecureTransport does not support cert directories" - ) - - self._trust_bundle = cafile or cadata - - def load_cert_chain(self, certfile, keyfile=None, password=None): - self._client_cert = certfile - self._client_key = keyfile - self._client_cert_passphrase = password - - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, suppress_ragged_eofs=True, - server_hostname=None): - # So, what do we do here? Firstly, we assert some properties. This is a - # stripped down shim, so there is some functionality we don't support. - # See PEP 543 for the real deal. - assert not server_side - assert do_handshake_on_connect - assert suppress_ragged_eofs - - # Ok, we're good to go. Now we want to create the wrapped socket object - # and store it in the appropriate place. - wrapped_socket = WrappedSocket(sock) - - # Now we can handshake - wrapped_socket.handshake( - server_hostname, self._verify, self._trust_bundle, - self._min_version, self._max_version, self._client_cert, - self._client_key, self._client_key_passphrase - ) - return wrapped_socket diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py deleted file mode 100644 index 1cb79285b..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py +++ /dev/null @@ -1,189 +0,0 @@ -# -*- coding: utf-8 -*- -# SPDX-License-Identifier: MIT -""" -This module contains provisional support for SOCKS proxies from within -urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and -SOCKS5. To enable its functionality, either install PySocks or install this -module with the ``socks`` extra. - -The SOCKS implementation supports the full range of urllib3 features. It also -supports the following SOCKS features: - -- SOCKS4 -- SOCKS4a -- SOCKS5 -- Usernames and passwords for the SOCKS proxy - -Known Limitations: - -- Currently PySocks does not support contacting remote websites via literal - IPv6 addresses. Any such connection attempt will fail. You must use a domain - name. -- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any - such connection attempt will fail. -""" -from __future__ import absolute_import - -try: - import socks -except ImportError: - import warnings - from ..exceptions import DependencyWarning - - warnings.warn(( - 'SOCKS support in urllib3 requires the installation of optional ' - 'dependencies: specifically, PySocks. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies' - ), - DependencyWarning - ) - raise - -from socket import error as SocketError, timeout as SocketTimeout - -from ..connection import ( - HTTPConnection, HTTPSConnection -) -from ..connectionpool import ( - HTTPConnectionPool, HTTPSConnectionPool -) -from ..exceptions import ConnectTimeoutError, NewConnectionError -from ..poolmanager import PoolManager -from ..util.url import parse_url - -try: - import ssl -except ImportError: - ssl = None - - -class SOCKSConnection(HTTPConnection): - """ - A plain-text HTTP connection that connects via a SOCKS proxy. - """ - def __init__(self, *args, **kwargs): - self._socks_options = kwargs.pop('_socks_options') - super(SOCKSConnection, self).__init__(*args, **kwargs) - - def _new_conn(self): - """ - Establish a new connection via the SOCKS proxy. - """ - extra_kw = {} - if self.source_address: - extra_kw['source_address'] = self.source_address - - if self.socket_options: - extra_kw['socket_options'] = self.socket_options - - try: - conn = socks.create_connection( - (self.host, self.port), - proxy_type=self._socks_options['socks_version'], - proxy_addr=self._socks_options['proxy_host'], - proxy_port=self._socks_options['proxy_port'], - proxy_username=self._socks_options['username'], - proxy_password=self._socks_options['password'], - proxy_rdns=self._socks_options['rdns'], - timeout=self.timeout, - **extra_kw - ) - - except SocketTimeout as e: - raise ConnectTimeoutError( - self, "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout)) - - except socks.ProxyError as e: - # This is fragile as hell, but it seems to be the only way to raise - # useful errors here. - if e.socket_err: - error = e.socket_err - if isinstance(error, SocketTimeout): - raise ConnectTimeoutError( - self, - "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout) - ) - else: - raise NewConnectionError( - self, - "Failed to establish a new connection: %s" % error - ) - else: - raise NewConnectionError( - self, - "Failed to establish a new connection: %s" % e - ) - - except SocketError as e: # Defensive: PySocks should catch all these. - raise NewConnectionError( - self, "Failed to establish a new connection: %s" % e) - - return conn - - -# We don't need to duplicate the Verified/Unverified distinction from -# urllib3/connection.py here because the HTTPSConnection will already have been -# correctly set to either the Verified or Unverified form by that module. This -# means the SOCKSHTTPSConnection will automatically be the correct type. -class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): - pass - - -class SOCKSHTTPConnectionPool(HTTPConnectionPool): - ConnectionCls = SOCKSConnection - - -class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): - ConnectionCls = SOCKSHTTPSConnection - - -class SOCKSProxyManager(PoolManager): - """ - A version of the urllib3 ProxyManager that routes connections via the - defined SOCKS proxy. - """ - pool_classes_by_scheme = { - 'http': SOCKSHTTPConnectionPool, - 'https': SOCKSHTTPSConnectionPool, - } - - def __init__(self, proxy_url, username=None, password=None, - num_pools=10, headers=None, **connection_pool_kw): - parsed = parse_url(proxy_url) - - if parsed.scheme == 'socks5': - socks_version = socks.PROXY_TYPE_SOCKS5 - rdns = False - elif parsed.scheme == 'socks5h': - socks_version = socks.PROXY_TYPE_SOCKS5 - rdns = True - elif parsed.scheme == 'socks4': - socks_version = socks.PROXY_TYPE_SOCKS4 - rdns = False - elif parsed.scheme == 'socks4a': - socks_version = socks.PROXY_TYPE_SOCKS4 - rdns = True - else: - raise ValueError( - "Unable to determine SOCKS version from %s" % proxy_url - ) - - self.proxy_url = proxy_url - - socks_options = { - 'socks_version': socks_version, - 'proxy_host': parsed.host, - 'proxy_port': parsed.port, - 'username': username, - 'password': password, - 'rdns': rdns - } - connection_pool_kw['_socks_options'] = socks_options - - super(SOCKSProxyManager, self).__init__( - num_pools, headers, **connection_pool_kw - ) - - self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/exceptions.py b/src/collectors/python.d.plugin/python_modules/urllib3/exceptions.py deleted file mode 100644 index a71cabe06..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/exceptions.py +++ /dev/null @@ -1,247 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -from .packages.six.moves.http_client import ( - IncompleteRead as httplib_IncompleteRead -) -# Base Exceptions - - -class HTTPError(Exception): - "Base exception used by this module." - pass - - -class HTTPWarning(Warning): - "Base warning used by this module." - pass - - -class PoolError(HTTPError): - "Base exception for errors caused within a pool." - def __init__(self, pool, message): - self.pool = pool - HTTPError.__init__(self, "%s: %s" % (pool, message)) - - def __reduce__(self): - # For pickling purposes. - return self.__class__, (None, None) - - -class RequestError(PoolError): - "Base exception for PoolErrors that have associated URLs." - def __init__(self, pool, url, message): - self.url = url - PoolError.__init__(self, pool, message) - - def __reduce__(self): - # For pickling purposes. - return self.__class__, (None, self.url, None) - - -class SSLError(HTTPError): - "Raised when SSL certificate fails in an HTTPS connection." - pass - - -class ProxyError(HTTPError): - "Raised when the connection to a proxy fails." - pass - - -class DecodeError(HTTPError): - "Raised when automatic decoding based on Content-Type fails." - pass - - -class ProtocolError(HTTPError): - "Raised when something unexpected happens mid-request/response." - pass - - -#: Renamed to ProtocolError but aliased for backwards compatibility. -ConnectionError = ProtocolError - - -# Leaf Exceptions - -class MaxRetryError(RequestError): - """Raised when the maximum number of retries is exceeded. - - :param pool: The connection pool - :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` - :param string url: The requested Url - :param exceptions.Exception reason: The underlying error - - """ - - def __init__(self, pool, url, reason=None): - self.reason = reason - - message = "Max retries exceeded with url: %s (Caused by %r)" % ( - url, reason) - - RequestError.__init__(self, pool, url, message) - - -class HostChangedError(RequestError): - "Raised when an existing pool gets a request for a foreign host." - - def __init__(self, pool, url, retries=3): - message = "Tried to open a foreign host with url: %s" % url - RequestError.__init__(self, pool, url, message) - self.retries = retries - - -class TimeoutStateError(HTTPError): - """ Raised when passing an invalid state to a timeout """ - pass - - -class TimeoutError(HTTPError): - """ Raised when a socket timeout error occurs. - - Catching this error will catch both :exc:`ReadTimeoutErrors - ` and :exc:`ConnectTimeoutErrors `. - """ - pass - - -class ReadTimeoutError(TimeoutError, RequestError): - "Raised when a socket timeout occurs while receiving data from a server" - pass - - -# This timeout error does not have a URL attached and needs to inherit from the -# base HTTPError -class ConnectTimeoutError(TimeoutError): - "Raised when a socket timeout occurs while connecting to a server" - pass - - -class NewConnectionError(ConnectTimeoutError, PoolError): - "Raised when we fail to establish a new connection. Usually ECONNREFUSED." - pass - - -class EmptyPoolError(PoolError): - "Raised when a pool runs out of connections and no more are allowed." - pass - - -class ClosedPoolError(PoolError): - "Raised when a request enters a pool after the pool has been closed." - pass - - -class LocationValueError(ValueError, HTTPError): - "Raised when there is something wrong with a given URL input." - pass - - -class LocationParseError(LocationValueError): - "Raised when get_host or similar fails to parse the URL input." - - def __init__(self, location): - message = "Failed to parse: %s" % location - HTTPError.__init__(self, message) - - self.location = location - - -class ResponseError(HTTPError): - "Used as a container for an error reason supplied in a MaxRetryError." - GENERIC_ERROR = 'too many error responses' - SPECIFIC_ERROR = 'too many {status_code} error responses' - - -class SecurityWarning(HTTPWarning): - "Warned when perfoming security reducing actions" - pass - - -class SubjectAltNameWarning(SecurityWarning): - "Warned when connecting to a host with a certificate missing a SAN." - pass - - -class InsecureRequestWarning(SecurityWarning): - "Warned when making an unverified HTTPS request." - pass - - -class SystemTimeWarning(SecurityWarning): - "Warned when system time is suspected to be wrong" - pass - - -class InsecurePlatformWarning(SecurityWarning): - "Warned when certain SSL configuration is not available on a platform." - pass - - -class SNIMissingWarning(HTTPWarning): - "Warned when making a HTTPS request without SNI available." - pass - - -class DependencyWarning(HTTPWarning): - """ - Warned when an attempt is made to import a module with missing optional - dependencies. - """ - pass - - -class ResponseNotChunked(ProtocolError, ValueError): - "Response needs to be chunked in order to read it as chunks." - pass - - -class BodyNotHttplibCompatible(HTTPError): - """ - Body should be httplib.HTTPResponse like (have an fp attribute which - returns raw chunks) for read_chunked(). - """ - pass - - -class IncompleteRead(HTTPError, httplib_IncompleteRead): - """ - Response length doesn't match expected Content-Length - - Subclass of http_client.IncompleteRead to allow int value - for `partial` to avoid creating large objects on streamed - reads. - """ - def __init__(self, partial, expected): - super(IncompleteRead, self).__init__(partial, expected) - - def __repr__(self): - return ('IncompleteRead(%i bytes read, ' - '%i more expected)' % (self.partial, self.expected)) - - -class InvalidHeader(HTTPError): - "The header provided was somehow invalid." - pass - - -class ProxySchemeUnknown(AssertionError, ValueError): - "ProxyManager does not support the supplied scheme" - # TODO(t-8ch): Stop inheriting from AssertionError in v2.0. - - def __init__(self, scheme): - message = "Not supported proxy scheme %s" % scheme - super(ProxySchemeUnknown, self).__init__(message) - - -class HeaderParsingError(HTTPError): - "Raised by assert_header_parsing, but we convert it to a log.warning statement." - def __init__(self, defects, unparsed_data): - message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data) - super(HeaderParsingError, self).__init__(message) - - -class UnrewindableBodyError(HTTPError): - "urllib3 encountered an error when trying to rewind a body" - pass diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/fields.py b/src/collectors/python.d.plugin/python_modules/urllib3/fields.py deleted file mode 100644 index de7577b74..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/fields.py +++ /dev/null @@ -1,179 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -import email.utils -import mimetypes - -from .packages import six - - -def guess_content_type(filename, default='application/octet-stream'): - """ - Guess the "Content-Type" of a file. - - :param filename: - The filename to guess the "Content-Type" of using :mod:`mimetypes`. - :param default: - If no "Content-Type" can be guessed, default to `default`. - """ - if filename: - return mimetypes.guess_type(filename)[0] or default - return default - - -def format_header_param(name, value): - """ - Helper function to format and quote a single header parameter. - - Particularly useful for header parameters which might contain - non-ASCII values, like file names. This follows RFC 2231, as - suggested by RFC 2388 Section 4.4. - - :param name: - The name of the parameter, a string expected to be ASCII only. - :param value: - The value of the parameter, provided as a unicode string. - """ - if not any(ch in value for ch in '"\\\r\n'): - result = '%s="%s"' % (name, value) - try: - result.encode('ascii') - except (UnicodeEncodeError, UnicodeDecodeError): - pass - else: - return result - if not six.PY3 and isinstance(value, six.text_type): # Python 2: - value = value.encode('utf-8') - value = email.utils.encode_rfc2231(value, 'utf-8') - value = '%s*=%s' % (name, value) - return value - - -class RequestField(object): - """ - A data container for request body parameters. - - :param name: - The name of this request field. - :param data: - The data/value body. - :param filename: - An optional filename of the request field. - :param headers: - An optional dict-like object of headers to initially use for the field. - """ - def __init__(self, name, data, filename=None, headers=None): - self._name = name - self._filename = filename - self.data = data - self.headers = {} - if headers: - self.headers = dict(headers) - - @classmethod - def from_tuples(cls, fieldname, value): - """ - A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. - - Supports constructing :class:`~urllib3.fields.RequestField` from - parameter of key/value strings AND key/filetuple. A filetuple is a - (filename, data, MIME type) tuple where the MIME type is optional. - For example:: - - 'foo': 'bar', - 'fakefile': ('foofile.txt', 'contents of foofile'), - 'realfile': ('barfile.txt', open('realfile').read()), - 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), - 'nonamefile': 'contents of nonamefile field', - - Field names and filenames must be unicode. - """ - if isinstance(value, tuple): - if len(value) == 3: - filename, data, content_type = value - else: - filename, data = value - content_type = guess_content_type(filename) - else: - filename = None - content_type = None - data = value - - request_param = cls(fieldname, data, filename=filename) - request_param.make_multipart(content_type=content_type) - - return request_param - - def _render_part(self, name, value): - """ - Overridable helper function to format a single header parameter. - - :param name: - The name of the parameter, a string expected to be ASCII only. - :param value: - The value of the parameter, provided as a unicode string. - """ - return format_header_param(name, value) - - def _render_parts(self, header_parts): - """ - Helper function to format and quote a single header. - - Useful for single headers that are composed of multiple items. E.g., - 'Content-Disposition' fields. - - :param header_parts: - A sequence of (k, v) typles or a :class:`dict` of (k, v) to format - as `k1="v1"; k2="v2"; ...`. - """ - parts = [] - iterable = header_parts - if isinstance(header_parts, dict): - iterable = header_parts.items() - - for name, value in iterable: - if value is not None: - parts.append(self._render_part(name, value)) - - return '; '.join(parts) - - def render_headers(self): - """ - Renders the headers for this request field. - """ - lines = [] - - sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] - for sort_key in sort_keys: - if self.headers.get(sort_key, False): - lines.append('%s: %s' % (sort_key, self.headers[sort_key])) - - for header_name, header_value in self.headers.items(): - if header_name not in sort_keys: - if header_value: - lines.append('%s: %s' % (header_name, header_value)) - - lines.append('\r\n') - return '\r\n'.join(lines) - - def make_multipart(self, content_disposition=None, content_type=None, - content_location=None): - """ - Makes this request field into a multipart request field. - - This method overrides "Content-Disposition", "Content-Type" and - "Content-Location" headers to the request parameter. - - :param content_type: - The 'Content-Type' of the request body. - :param content_location: - The 'Content-Location' of the request body. - - """ - self.headers['Content-Disposition'] = content_disposition or 'form-data' - self.headers['Content-Disposition'] += '; '.join([ - '', self._render_parts( - (('name', self._name), ('filename', self._filename)) - ) - ]) - self.headers['Content-Type'] = content_type - self.headers['Content-Location'] = content_location diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/filepost.py b/src/collectors/python.d.plugin/python_modules/urllib3/filepost.py deleted file mode 100644 index 3febc9cfe..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/filepost.py +++ /dev/null @@ -1,95 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -import codecs - -from uuid import uuid4 -from io import BytesIO - -from .packages import six -from .packages.six import b -from .fields import RequestField - -writer = codecs.lookup('utf-8')[3] - - -def choose_boundary(): - """ - Our embarrassingly-simple replacement for mimetools.choose_boundary. - """ - return uuid4().hex - - -def iter_field_objects(fields): - """ - Iterate over fields. - - Supports list of (k, v) tuples and dicts, and lists of - :class:`~urllib3.fields.RequestField`. - - """ - if isinstance(fields, dict): - i = six.iteritems(fields) - else: - i = iter(fields) - - for field in i: - if isinstance(field, RequestField): - yield field - else: - yield RequestField.from_tuples(*field) - - -def iter_fields(fields): - """ - .. deprecated:: 1.6 - - Iterate over fields. - - The addition of :class:`~urllib3.fields.RequestField` makes this function - obsolete. Instead, use :func:`iter_field_objects`, which returns - :class:`~urllib3.fields.RequestField` objects. - - Supports list of (k, v) tuples and dicts. - """ - if isinstance(fields, dict): - return ((k, v) for k, v in six.iteritems(fields)) - - return ((k, v) for k, v in fields) - - -def encode_multipart_formdata(fields, boundary=None): - """ - Encode a dictionary of ``fields`` using the multipart/form-data MIME format. - - :param fields: - Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). - - :param boundary: - If not specified, then a random boundary will be generated using - :func:`mimetools.choose_boundary`. - """ - body = BytesIO() - if boundary is None: - boundary = choose_boundary() - - for field in iter_field_objects(fields): - body.write(b('--%s\r\n' % (boundary))) - - writer(body).write(field.render_headers()) - data = field.data - - if isinstance(data, int): - data = str(data) # Backwards compatibility - - if isinstance(data, six.text_type): - writer(body).write(data) - else: - body.write(data) - - body.write(b'\r\n') - - body.write(b('--%s--\r\n' % (boundary))) - - content_type = str('multipart/form-data; boundary=%s' % boundary) - - return body.getvalue(), content_type diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py deleted file mode 100644 index 170e974c1..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import absolute_import - -from . import ssl_match_hostname - -__all__ = ('ssl_match_hostname', ) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py deleted file mode 100644 index 8ab122f8b..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# SPDX-License-Identifier: MIT -""" -backports.makefile -~~~~~~~~~~~~~~~~~~ - -Backports the Python 3 ``socket.makefile`` method for use with anything that -wants to create a "fake" socket object. -""" -import io - -from socket import SocketIO - - -def backport_makefile(self, mode="r", buffering=None, encoding=None, - errors=None, newline=None): - """ - Backport of ``socket.makefile`` from Python 3.5. - """ - if not set(mode) <= set(["r", "w", "b"]): - raise ValueError( - "invalid mode %r (only r, w, b allowed)" % (mode,) - ) - writing = "w" in mode - reading = "r" in mode or not writing - assert reading or writing - binary = "b" in mode - rawmode = "" - if reading: - rawmode += "r" - if writing: - rawmode += "w" - raw = SocketIO(self, rawmode) - self._makefile_refs += 1 - if buffering is None: - buffering = -1 - if buffering < 0: - buffering = io.DEFAULT_BUFFER_SIZE - if buffering == 0: - if not binary: - raise ValueError("unbuffered streams must be binary") - return raw - if reading and writing: - buffer = io.BufferedRWPair(raw, raw, buffering) - elif reading: - buffer = io.BufferedReader(raw, buffering) - else: - assert writing - buffer = io.BufferedWriter(raw, buffering) - if binary: - return buffer - text = io.TextIOWrapper(buffer, encoding, errors, newline) - text.mode = mode - return text diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py deleted file mode 100644 index 9f7c0e6b8..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py +++ /dev/null @@ -1,260 +0,0 @@ -# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. -# Passes Python2.7's test suite and incorporates all the latest updates. -# Copyright 2009 Raymond Hettinger, released under the MIT License. -# http://code.activestate.com/recipes/576693/ -# SPDX-License-Identifier: MIT -try: - from thread import get_ident as _get_ident -except ImportError: - from dummy_thread import get_ident as _get_ident - -try: - from _abcoll import KeysView, ValuesView, ItemsView -except ImportError: - pass - - -class OrderedDict(dict): - 'Dictionary that remembers insertion order' - # An inherited dict maps keys to values. - # The inherited dict provides __getitem__, __len__, __contains__, and get. - # The remaining methods are order-aware. - # Big-O running times for all methods are the same as for regular dictionaries. - - # The internal self.__map dictionary maps keys to links in a doubly linked list. - # The circular doubly linked list starts and ends with a sentinel element. - # The sentinel element never gets deleted (this simplifies the algorithm). - # Each link is stored as a list of length three: [PREV, NEXT, KEY]. - - def __init__(self, *args, **kwds): - '''Initialize an ordered dictionary. Signature is the same as for - regular dictionaries, but keyword arguments are not recommended - because their insertion order is arbitrary. - - ''' - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__root - except AttributeError: - self.__root = root = [] # sentinel node - root[:] = [root, root, None] - self.__map = {} - self.__update(*args, **kwds) - - def __setitem__(self, key, value, dict_setitem=dict.__setitem__): - 'od.__setitem__(i, y) <==> od[i]=y' - # Setting a new item creates a new link which goes at the end of the linked - # list, and the inherited dictionary is updated with the new key/value pair. - if key not in self: - root = self.__root - last = root[0] - last[1] = root[0] = self.__map[key] = [last, root, key] - dict_setitem(self, key, value) - - def __delitem__(self, key, dict_delitem=dict.__delitem__): - 'od.__delitem__(y) <==> del od[y]' - # Deleting an existing item uses self.__map to find the link which is - # then removed by updating the links in the predecessor and successor nodes. - dict_delitem(self, key) - link_prev, link_next, key = self.__map.pop(key) - link_prev[1] = link_next - link_next[0] = link_prev - - def __iter__(self): - 'od.__iter__() <==> iter(od)' - root = self.__root - curr = root[1] - while curr is not root: - yield curr[2] - curr = curr[1] - - def __reversed__(self): - 'od.__reversed__() <==> reversed(od)' - root = self.__root - curr = root[0] - while curr is not root: - yield curr[2] - curr = curr[0] - - def clear(self): - 'od.clear() -> None. Remove all items from od.' - try: - for node in self.__map.itervalues(): - del node[:] - root = self.__root - root[:] = [root, root, None] - self.__map.clear() - except AttributeError: - pass - dict.clear(self) - - def popitem(self, last=True): - '''od.popitem() -> (k, v), return and remove a (key, value) pair. - Pairs are returned in LIFO order if last is true or FIFO order if false. - - ''' - if not self: - raise KeyError('dictionary is empty') - root = self.__root - if last: - link = root[0] - link_prev = link[0] - link_prev[1] = root - root[0] = link_prev - else: - link = root[1] - link_next = link[1] - root[1] = link_next - link_next[0] = root - key = link[2] - del self.__map[key] - value = dict.pop(self, key) - return key, value - - # -- the following methods do not depend on the internal structure -- - - def keys(self): - 'od.keys() -> list of keys in od' - return list(self) - - def values(self): - 'od.values() -> list of values in od' - return [self[key] for key in self] - - def items(self): - 'od.items() -> list of (key, value) pairs in od' - return [(key, self[key]) for key in self] - - def iterkeys(self): - 'od.iterkeys() -> an iterator over the keys in od' - return iter(self) - - def itervalues(self): - 'od.itervalues -> an iterator over the values in od' - for k in self: - yield self[k] - - def iteritems(self): - 'od.iteritems -> an iterator over the (key, value) items in od' - for k in self: - yield (k, self[k]) - - def update(*args, **kwds): - '''od.update(E, **F) -> None. Update od from dict/iterable E and F. - - If E is a dict instance, does: for k in E: od[k] = E[k] - If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] - Or if E is an iterable of items, does: for k, v in E: od[k] = v - In either case, this is followed by: for k, v in F.items(): od[k] = v - - ''' - if len(args) > 2: - raise TypeError('update() takes at most 2 positional ' - 'arguments (%d given)' % (len(args),)) - elif not args: - raise TypeError('update() takes at least 1 argument (0 given)') - self = args[0] - # Make progressively weaker assumptions about "other" - other = () - if len(args) == 2: - other = args[1] - if isinstance(other, dict): - for key in other: - self[key] = other[key] - elif hasattr(other, 'keys'): - for key in other.keys(): - self[key] = other[key] - else: - for key, value in other: - self[key] = value - for key, value in kwds.items(): - self[key] = value - - __update = update # let subclasses override update without breaking __init__ - - __marker = object() - - def pop(self, key, default=__marker): - '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. - If key is not found, d is returned if given, otherwise KeyError is raised. - - ''' - if key in self: - result = self[key] - del self[key] - return result - if default is self.__marker: - raise KeyError(key) - return default - - def setdefault(self, key, default=None): - 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' - if key in self: - return self[key] - self[key] = default - return default - - def __repr__(self, _repr_running={}): - 'od.__repr__() <==> repr(od)' - call_key = id(self), _get_ident() - if call_key in _repr_running: - return '...' - _repr_running[call_key] = 1 - try: - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - finally: - del _repr_running[call_key] - - def __reduce__(self): - 'Return state information for pickling' - items = [[k, self[k]] for k in self] - inst_dict = vars(self).copy() - for k in vars(OrderedDict()): - inst_dict.pop(k, None) - if inst_dict: - return (self.__class__, (items,), inst_dict) - return self.__class__, (items,) - - def copy(self): - 'od.copy() -> a shallow copy of od' - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S - and values equal to v (which defaults to None). - - ''' - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive - while comparison to a regular mapping is order-insensitive. - - ''' - if isinstance(other, OrderedDict): - return len(self)==len(other) and self.items() == other.items() - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other - - # -- the following methods are only used in Python 2.7 -- - - def viewkeys(self): - "od.viewkeys() -> a set-like object providing a view on od's keys" - return KeysView(self) - - def viewvalues(self): - "od.viewvalues() -> an object providing a view on od's values" - return ValuesView(self) - - def viewitems(self): - "od.viewitems() -> a set-like object providing a view on od's items" - return ItemsView(self) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/six.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/six.py deleted file mode 100644 index 31df5012b..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/six.py +++ /dev/null @@ -1,852 +0,0 @@ -"""Utilities for writing code that runs on Python 2 and 3""" - -# Copyright (c) 2010-2015 Benjamin Peterson -# -# SPDX-License-Identifier: MIT - -from __future__ import absolute_import - -import functools -import itertools -import operator -import sys -import types - -__author__ = "Benjamin Peterson " -__version__ = "1.10.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -PY34 = sys.version_info[0:2] >= (3, 4) - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - try: - # This is a bit ugly, but it avoids running this again by - # removing this descriptor. - delattr(obj.__class__, self.name) - except AttributeError: - pass - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), - MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), -] -# Add windows specific modules. -if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] - -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - def create_unbound_method(func, cls): - return func - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - def create_unbound_method(func, cls): - return types.MethodType(func, None, cls) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) - - viewkeys = operator.methodcaller("keys") - - viewvalues = operator.methodcaller("values") - - viewitems = operator.methodcaller("items") -else: - def iterkeys(d, **kw): - return d.iterkeys(**kw) - - def itervalues(d, **kw): - return d.itervalues(**kw) - - def iteritems(d, **kw): - return d.iteritems(**kw) - - def iterlists(d, **kw): - return d.iterlists(**kw) - - viewkeys = operator.methodcaller("viewkeys") - - viewvalues = operator.methodcaller("viewvalues") - - viewitems = operator.methodcaller("viewitems") - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - - def u(s): - return s - unichr = chr - import struct - int2byte = struct.Struct(">B").pack - del struct - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - _assertCountEqual = "assertCountEqual" - if sys.version_info[1] <= 1: - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - else: - _assertRaisesRegex = "assertRaisesRegex" - _assertRegex = "assertRegex" -else: - def b(s): - return s - # Workaround for standalone backslash - - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - - def byte2int(bs): - return ord(bs[0]) - - def indexbytes(buf, i): - return ord(buf[i]) - iterbytes = functools.partial(itertools.imap, ord) - import StringIO - StringIO = BytesIO = StringIO.StringIO - _assertCountEqual = "assertItemsEqual" - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -def assertCountEqual(self, *args, **kwargs): - return getattr(self, _assertCountEqual)(*args, **kwargs) - - -def assertRaisesRegex(self, *args, **kwargs): - return getattr(self, _assertRaisesRegex)(*args, **kwargs) - - -def assertRegex(self, *args, **kwargs): - return getattr(self, _assertRegex)(*args, **kwargs) - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - def reraise(tp, value, tb=None): - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - exec_("""def reraise(tp, value, tb=None): - raise tp, value, tb -""") - - -if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): - if from_value is None: - raise value - raise value from from_value -""") -elif sys.version_info[:2] > (3, 2): - exec_("""def raise_from(value, from_value): - raise value from from_value -""") -else: - def raise_from(value, from_value): - raise value - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) -if sys.version_info[:2] < (3, 3): - _print = print_ - - def print_(*args, **kwargs): - fp = kwargs.get("file", sys.stdout) - flush = kwargs.pop("flush", False) - _print(*args, **kwargs) - if flush and fp is not None: - fp.flush() - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - def wrapper(f): - f = functools.wraps(wrapped, assigned, updated)(f) - f.__wrapped__ = wrapped - return f - return wrapper -else: - wraps = functools.wraps - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def python_2_unicode_compatible(klass): - """ - A decorator that defines __unicode__ and __str__ methods under Python 2. - Under Python 3 it does nothing. - - To support Python 2 and 3 with a single code base, define a __str__ method - returning text and apply this decorator to the class. - """ - if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) - klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') - return klass - - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py deleted file mode 100644 index 2aeeeff91..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# SPDX-License-Identifier: MIT -import sys - -try: - # Our match_hostname function is the same as 3.5's, so we only want to - # import the match_hostname function if it's at least that good. - if sys.version_info < (3, 5): - raise ImportError("Fallback to vendored code") - - from ssl import CertificateError, match_hostname -except ImportError: - try: - # Backport of the function from a pypi module - from backports.ssl_match_hostname import CertificateError, match_hostname - except ImportError: - # Our vendored copy - from ._implementation import CertificateError, match_hostname - -# Not needed, but documenting what we provide. -__all__ = ('CertificateError', 'match_hostname') diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py deleted file mode 100644 index 647e081da..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py +++ /dev/null @@ -1,156 +0,0 @@ -"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" - -# SPDX-License-Identifier: Python-2.0 - -import re -import sys - -# ipaddress has been backported to 2.6+ in pypi. If it is installed on the -# system, use it to handle IPAddress ServerAltnames (this was added in -# python-3.5) otherwise only do DNS matching. This allows -# backports.ssl_match_hostname to continue to be used all the way back to -# python-2.4. -try: - import ipaddress -except ImportError: - ipaddress = None - -__version__ = '3.5.0.1' - - -class CertificateError(ValueError): - pass - - -def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - # Ported from python3-syntax: - # leftmost, *remainder = dn.split(r'.') - parts = dn.split(r'.') - leftmost = parts[0] - remainder = parts[1:] - - wildcards = leftmost.count('*') - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survey of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) - - # add the remaining fragments, ignore any wildcards - for frag in remainder: - pats.append(re.escape(frag)) - - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) - return pat.match(hostname) - - -def _to_unicode(obj): - if isinstance(obj, str) and sys.version_info < (3,): - obj = unicode(obj, encoding='ascii', errors='strict') - return obj - -def _ipaddress_match(ipname, host_ip): - """Exact matching of IP addresses. - - RFC 6125 explicitly doesn't define an algorithm for this - (section 1.7.2 - "Out of Scope"). - """ - # OpenSSL may add a trailing newline to a subjectAltName's IP address - # Divergence from upstream: ipaddress can't handle byte str - ip = ipaddress.ip_address(_to_unicode(ipname).rstrip()) - return ip == host_ip - - -def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed, but IP addresses are not accepted for *hostname*. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED") - try: - # Divergence from upstream: ipaddress can't handle byte str - host_ip = ipaddress.ip_address(_to_unicode(hostname)) - except ValueError: - # Not an IP address (common case) - host_ip = None - except UnicodeError: - # Divergence from upstream: Have to deal with ipaddress not taking - # byte strings. addresses should be all ascii, so we consider it not - # an ipaddress in this case - host_ip = None - except AttributeError: - # Divergence from upstream: Make ipaddress library optional - if ipaddress is None: - host_ip = None - else: - raise - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if host_ip is None and _dnsname_match(value, hostname): - return - dnsnames.append(value) - elif key == 'IP Address': - if host_ip is not None and _ipaddress_match(value, host_ip): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py b/src/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py deleted file mode 100644 index adea9bc01..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py +++ /dev/null @@ -1,441 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -import collections -import functools -import logging - -from ._collections import RecentlyUsedContainer -from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool -from .connectionpool import port_by_scheme -from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown -from .packages.six.moves.urllib.parse import urljoin -from .request import RequestMethods -from .util.url import parse_url -from .util.retry import Retry - - -__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] - - -log = logging.getLogger(__name__) - -SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', - 'ssl_version', 'ca_cert_dir', 'ssl_context') - -# All known keyword arguments that could be provided to the pool manager, its -# pools, or the underlying connections. This is used to construct a pool key. -_key_fields = ( - 'key_scheme', # str - 'key_host', # str - 'key_port', # int - 'key_timeout', # int or float or Timeout - 'key_retries', # int or Retry - 'key_strict', # bool - 'key_block', # bool - 'key_source_address', # str - 'key_key_file', # str - 'key_cert_file', # str - 'key_cert_reqs', # str - 'key_ca_certs', # str - 'key_ssl_version', # str - 'key_ca_cert_dir', # str - 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext - 'key_maxsize', # int - 'key_headers', # dict - 'key__proxy', # parsed proxy url - 'key__proxy_headers', # dict - 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples - 'key__socks_options', # dict - 'key_assert_hostname', # bool or string - 'key_assert_fingerprint', # str -) - -#: The namedtuple class used to construct keys for the connection pool. -#: All custom key schemes should include the fields in this key at a minimum. -PoolKey = collections.namedtuple('PoolKey', _key_fields) - - -def _default_key_normalizer(key_class, request_context): - """ - Create a pool key out of a request context dictionary. - - According to RFC 3986, both the scheme and host are case-insensitive. - Therefore, this function normalizes both before constructing the pool - key for an HTTPS request. If you wish to change this behaviour, provide - alternate callables to ``key_fn_by_scheme``. - - :param key_class: - The class to use when constructing the key. This should be a namedtuple - with the ``scheme`` and ``host`` keys at a minimum. - :type key_class: namedtuple - :param request_context: - A dictionary-like object that contain the context for a request. - :type request_context: dict - - :return: A namedtuple that can be used as a connection pool key. - :rtype: PoolKey - """ - # Since we mutate the dictionary, make a copy first - context = request_context.copy() - context['scheme'] = context['scheme'].lower() - context['host'] = context['host'].lower() - - # These are both dictionaries and need to be transformed into frozensets - for key in ('headers', '_proxy_headers', '_socks_options'): - if key in context and context[key] is not None: - context[key] = frozenset(context[key].items()) - - # The socket_options key may be a list and needs to be transformed into a - # tuple. - socket_opts = context.get('socket_options') - if socket_opts is not None: - context['socket_options'] = tuple(socket_opts) - - # Map the kwargs to the names in the namedtuple - this is necessary since - # namedtuples can't have fields starting with '_'. - for key in list(context.keys()): - context['key_' + key] = context.pop(key) - - # Default to ``None`` for keys missing from the context - for field in key_class._fields: - if field not in context: - context[field] = None - - return key_class(**context) - - -#: A dictionary that maps a scheme to a callable that creates a pool key. -#: This can be used to alter the way pool keys are constructed, if desired. -#: Each PoolManager makes a copy of this dictionary so they can be configured -#: globally here, or individually on the instance. -key_fn_by_scheme = { - 'http': functools.partial(_default_key_normalizer, PoolKey), - 'https': functools.partial(_default_key_normalizer, PoolKey), -} - -pool_classes_by_scheme = { - 'http': HTTPConnectionPool, - 'https': HTTPSConnectionPool, -} - - -class PoolManager(RequestMethods): - """ - Allows for arbitrary requests while transparently keeping track of - necessary connection pools for you. - - :param num_pools: - Number of connection pools to cache before discarding the least - recently used pool. - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - - :param \\**connection_pool_kw: - Additional parameters are used to create fresh - :class:`urllib3.connectionpool.ConnectionPool` instances. - - Example:: - - >>> manager = PoolManager(num_pools=2) - >>> r = manager.request('GET', 'http://google.com/') - >>> r = manager.request('GET', 'http://google.com/mail') - >>> r = manager.request('GET', 'http://yahoo.com/') - >>> len(manager.pools) - 2 - - """ - - proxy = None - - def __init__(self, num_pools=10, headers=None, **connection_pool_kw): - RequestMethods.__init__(self, headers) - self.connection_pool_kw = connection_pool_kw - self.pools = RecentlyUsedContainer(num_pools, - dispose_func=lambda p: p.close()) - - # Locally set the pool classes and keys so other PoolManagers can - # override them. - self.pool_classes_by_scheme = pool_classes_by_scheme - self.key_fn_by_scheme = key_fn_by_scheme.copy() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.clear() - # Return False to re-raise any potential exceptions - return False - - def _new_pool(self, scheme, host, port, request_context=None): - """ - Create a new :class:`ConnectionPool` based on host, port, scheme, and - any additional pool keyword arguments. - - If ``request_context`` is provided, it is provided as keyword arguments - to the pool class used. This method is used to actually create the - connection pools handed out by :meth:`connection_from_url` and - companion methods. It is intended to be overridden for customization. - """ - pool_cls = self.pool_classes_by_scheme[scheme] - if request_context is None: - request_context = self.connection_pool_kw.copy() - - # Although the context has everything necessary to create the pool, - # this function has historically only used the scheme, host, and port - # in the positional args. When an API change is acceptable these can - # be removed. - for key in ('scheme', 'host', 'port'): - request_context.pop(key, None) - - if scheme == 'http': - for kw in SSL_KEYWORDS: - request_context.pop(kw, None) - - return pool_cls(host, port, **request_context) - - def clear(self): - """ - Empty our store of pools and direct them all to close. - - This will not affect in-flight connections, but they will not be - re-used after completion. - """ - self.pools.clear() - - def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): - """ - Get a :class:`ConnectionPool` based on the host, port, and scheme. - - If ``port`` isn't given, it will be derived from the ``scheme`` using - ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is - provided, it is merged with the instance's ``connection_pool_kw`` - variable and used to create the new connection pool, if one is - needed. - """ - - if not host: - raise LocationValueError("No host specified.") - - request_context = self._merge_pool_kwargs(pool_kwargs) - request_context['scheme'] = scheme or 'http' - if not port: - port = port_by_scheme.get(request_context['scheme'].lower(), 80) - request_context['port'] = port - request_context['host'] = host - - return self.connection_from_context(request_context) - - def connection_from_context(self, request_context): - """ - Get a :class:`ConnectionPool` based on the request context. - - ``request_context`` must at least contain the ``scheme`` key and its - value must be a key in ``key_fn_by_scheme`` instance variable. - """ - scheme = request_context['scheme'].lower() - pool_key_constructor = self.key_fn_by_scheme[scheme] - pool_key = pool_key_constructor(request_context) - - return self.connection_from_pool_key(pool_key, request_context=request_context) - - def connection_from_pool_key(self, pool_key, request_context=None): - """ - Get a :class:`ConnectionPool` based on the provided pool key. - - ``pool_key`` should be a namedtuple that only contains immutable - objects. At a minimum it must have the ``scheme``, ``host``, and - ``port`` fields. - """ - with self.pools.lock: - # If the scheme, host, or port doesn't match existing open - # connections, open a new ConnectionPool. - pool = self.pools.get(pool_key) - if pool: - return pool - - # Make a fresh ConnectionPool of the desired type - scheme = request_context['scheme'] - host = request_context['host'] - port = request_context['port'] - pool = self._new_pool(scheme, host, port, request_context=request_context) - self.pools[pool_key] = pool - - return pool - - def connection_from_url(self, url, pool_kwargs=None): - """ - Similar to :func:`urllib3.connectionpool.connection_from_url`. - - If ``pool_kwargs`` is not provided and a new pool needs to be - constructed, ``self.connection_pool_kw`` is used to initialize - the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` - is provided, it is used instead. Note that if a new pool does not - need to be created for the request, the provided ``pool_kwargs`` are - not used. - """ - u = parse_url(url) - return self.connection_from_host(u.host, port=u.port, scheme=u.scheme, - pool_kwargs=pool_kwargs) - - def _merge_pool_kwargs(self, override): - """ - Merge a dictionary of override values for self.connection_pool_kw. - - This does not modify self.connection_pool_kw and returns a new dict. - Any keys in the override dictionary with a value of ``None`` are - removed from the merged dictionary. - """ - base_pool_kwargs = self.connection_pool_kw.copy() - if override: - for key, value in override.items(): - if value is None: - try: - del base_pool_kwargs[key] - except KeyError: - pass - else: - base_pool_kwargs[key] = value - return base_pool_kwargs - - def urlopen(self, method, url, redirect=True, **kw): - """ - Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` - with custom cross-host redirect logic and only sends the request-uri - portion of the ``url``. - - The given ``url`` parameter must be absolute, such that an appropriate - :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. - """ - u = parse_url(url) - conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) - - kw['assert_same_host'] = False - kw['redirect'] = False - if 'headers' not in kw: - kw['headers'] = self.headers - - if self.proxy is not None and u.scheme == "http": - response = conn.urlopen(method, url, **kw) - else: - response = conn.urlopen(method, u.request_uri, **kw) - - redirect_location = redirect and response.get_redirect_location() - if not redirect_location: - return response - - # Support relative URLs for redirecting. - redirect_location = urljoin(url, redirect_location) - - # RFC 7231, Section 6.4.4 - if response.status == 303: - method = 'GET' - - retries = kw.get('retries') - if not isinstance(retries, Retry): - retries = Retry.from_int(retries, redirect=redirect) - - try: - retries = retries.increment(method, url, response=response, _pool=conn) - except MaxRetryError: - if retries.raise_on_redirect: - raise - return response - - kw['retries'] = retries - kw['redirect'] = redirect - - log.info("Redirecting %s -> %s", url, redirect_location) - return self.urlopen(method, redirect_location, **kw) - - -class ProxyManager(PoolManager): - """ - Behaves just like :class:`PoolManager`, but sends all requests through - the defined proxy, using the CONNECT method for HTTPS URLs. - - :param proxy_url: - The URL of the proxy to be used. - - :param proxy_headers: - A dictionary contaning headers that will be sent to the proxy. In case - of HTTP they are being sent with each request, while in the - HTTPS/CONNECT case they are sent only once. Could be used for proxy - authentication. - - Example: - >>> proxy = urllib3.ProxyManager('http://localhost:3128/') - >>> r1 = proxy.request('GET', 'http://google.com/') - >>> r2 = proxy.request('GET', 'http://httpbin.org/') - >>> len(proxy.pools) - 1 - >>> r3 = proxy.request('GET', 'https://httpbin.org/') - >>> r4 = proxy.request('GET', 'https://twitter.com/') - >>> len(proxy.pools) - 3 - - """ - - def __init__(self, proxy_url, num_pools=10, headers=None, - proxy_headers=None, **connection_pool_kw): - - if isinstance(proxy_url, HTTPConnectionPool): - proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, - proxy_url.port) - proxy = parse_url(proxy_url) - if not proxy.port: - port = port_by_scheme.get(proxy.scheme, 80) - proxy = proxy._replace(port=port) - - if proxy.scheme not in ("http", "https"): - raise ProxySchemeUnknown(proxy.scheme) - - self.proxy = proxy - self.proxy_headers = proxy_headers or {} - - connection_pool_kw['_proxy'] = self.proxy - connection_pool_kw['_proxy_headers'] = self.proxy_headers - - super(ProxyManager, self).__init__( - num_pools, headers, **connection_pool_kw) - - def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): - if scheme == "https": - return super(ProxyManager, self).connection_from_host( - host, port, scheme, pool_kwargs=pool_kwargs) - - return super(ProxyManager, self).connection_from_host( - self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs) - - def _set_proxy_headers(self, url, headers=None): - """ - Sets headers needed by proxies: specifically, the Accept and Host - headers. Only sets headers not provided by the user. - """ - headers_ = {'Accept': '*/*'} - - netloc = parse_url(url).netloc - if netloc: - headers_['Host'] = netloc - - if headers: - headers_.update(headers) - return headers_ - - def urlopen(self, method, url, redirect=True, **kw): - "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." - u = parse_url(url) - - if u.scheme == "http": - # For proxied HTTPS requests, httplib sets the necessary headers - # on the CONNECT to the proxy. For HTTP, we'll definitely - # need to set 'Host' at the very least. - headers = kw.get('headers', self.headers) - kw['headers'] = self._set_proxy_headers(url, headers) - - return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) - - -def proxy_from_url(url, **kw): - return ProxyManager(proxy_url=url, **kw) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/request.py b/src/collectors/python.d.plugin/python_modules/urllib3/request.py deleted file mode 100644 index f78331975..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/request.py +++ /dev/null @@ -1,149 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import - -from .filepost import encode_multipart_formdata -from .packages.six.moves.urllib.parse import urlencode - - -__all__ = ['RequestMethods'] - - -class RequestMethods(object): - """ - Convenience mixin for classes who implement a :meth:`urlopen` method, such - as :class:`~urllib3.connectionpool.HTTPConnectionPool` and - :class:`~urllib3.poolmanager.PoolManager`. - - Provides behavior for making common types of HTTP request methods and - decides which type of request field encoding to use. - - Specifically, - - :meth:`.request_encode_url` is for sending requests whose fields are - encoded in the URL (such as GET, HEAD, DELETE). - - :meth:`.request_encode_body` is for sending requests whose fields are - encoded in the *body* of the request using multipart or www-form-urlencoded - (such as for POST, PUT, PATCH). - - :meth:`.request` is for making any kind of request, it will look up the - appropriate encoding format and use one of the above two methods to make - the request. - - Initializer parameters: - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - """ - - _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) - - def __init__(self, headers=None): - self.headers = headers or {} - - def urlopen(self, method, url, body=None, headers=None, - encode_multipart=True, multipart_boundary=None, - **kw): # Abstract - raise NotImplemented("Classes extending RequestMethods must implement " - "their own ``urlopen`` method.") - - def request(self, method, url, fields=None, headers=None, **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the appropriate encoding of - ``fields`` based on the ``method`` used. - - This is a convenience method that requires the least amount of manual - effort. It can be used in most situations, while still having the - option to drop down to more specific methods when necessary, such as - :meth:`request_encode_url`, :meth:`request_encode_body`, - or even the lowest level :meth:`urlopen`. - """ - method = method.upper() - - if method in self._encode_url_methods: - return self.request_encode_url(method, url, fields=fields, - headers=headers, - **urlopen_kw) - else: - return self.request_encode_body(method, url, fields=fields, - headers=headers, - **urlopen_kw) - - def request_encode_url(self, method, url, fields=None, headers=None, - **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the ``fields`` encoded in - the url. This is useful for request methods like GET, HEAD, DELETE, etc. - """ - if headers is None: - headers = self.headers - - extra_kw = {'headers': headers} - extra_kw.update(urlopen_kw) - - if fields: - url += '?' + urlencode(fields) - - return self.urlopen(method, url, **extra_kw) - - def request_encode_body(self, method, url, fields=None, headers=None, - encode_multipart=True, multipart_boundary=None, - **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the ``fields`` encoded in - the body. This is useful for request methods like POST, PUT, PATCH, etc. - - When ``encode_multipart=True`` (default), then - :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode - the payload with the appropriate content type. Otherwise - :meth:`urllib.urlencode` is used with the - 'application/x-www-form-urlencoded' content type. - - Multipart encoding must be used when posting files, and it's reasonably - safe to use it in other times too. However, it may break request - signing, such as with OAuth. - - Supports an optional ``fields`` parameter of key/value strings AND - key/filetuple. A filetuple is a (filename, data, MIME type) tuple where - the MIME type is optional. For example:: - - fields = { - 'foo': 'bar', - 'fakefile': ('foofile.txt', 'contents of foofile'), - 'realfile': ('barfile.txt', open('realfile').read()), - 'typedfile': ('bazfile.bin', open('bazfile').read(), - 'image/jpeg'), - 'nonamefile': 'contents of nonamefile field', - } - - When uploading a file, providing a filename (the first parameter of the - tuple) is optional but recommended to best mimick behavior of browsers. - - Note that if ``headers`` are supplied, the 'Content-Type' header will - be overwritten because it depends on the dynamic random boundary string - which is used to compose the body of the request. The random boundary - string can be explicitly set with the ``multipart_boundary`` parameter. - """ - if headers is None: - headers = self.headers - - extra_kw = {'headers': {}} - - if fields: - if 'body' in urlopen_kw: - raise TypeError( - "request got values for both 'fields' and 'body', can only specify one.") - - if encode_multipart: - body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) - else: - body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' - - extra_kw['body'] = body - extra_kw['headers'] = {'Content-Type': content_type} - - extra_kw['headers'].update(headers) - extra_kw.update(urlopen_kw) - - return self.urlopen(method, url, **extra_kw) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/response.py b/src/collectors/python.d.plugin/python_modules/urllib3/response.py deleted file mode 100644 index cf14a3076..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/response.py +++ /dev/null @@ -1,623 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -from contextlib import contextmanager -import zlib -import io -import logging -from socket import timeout as SocketTimeout -from socket import error as SocketError - -from ._collections import HTTPHeaderDict -from .exceptions import ( - BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError, - ResponseNotChunked, IncompleteRead, InvalidHeader -) -from .packages.six import string_types as basestring, binary_type, PY3 -from .packages.six.moves import http_client as httplib -from .connection import HTTPException, BaseSSLError -from .util.response import is_fp_closed, is_response_to_head - -log = logging.getLogger(__name__) - - -class DeflateDecoder(object): - - def __init__(self): - self._first_try = True - self._data = binary_type() - self._obj = zlib.decompressobj() - - def __getattr__(self, name): - return getattr(self._obj, name) - - def decompress(self, data): - if not data: - return data - - if not self._first_try: - return self._obj.decompress(data) - - self._data += data - try: - decompressed = self._obj.decompress(data) - if decompressed: - self._first_try = False - self._data = None - return decompressed - except zlib.error: - self._first_try = False - self._obj = zlib.decompressobj(-zlib.MAX_WBITS) - try: - return self.decompress(self._data) - finally: - self._data = None - - -class GzipDecoder(object): - - def __init__(self): - self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) - - def __getattr__(self, name): - return getattr(self._obj, name) - - def decompress(self, data): - if not data: - return data - return self._obj.decompress(data) - - -def _get_decoder(mode): - if mode == 'gzip': - return GzipDecoder() - - return DeflateDecoder() - - -class HTTPResponse(io.IOBase): - """ - HTTP Response container. - - Backwards-compatible to httplib's HTTPResponse but the response ``body`` is - loaded and decoded on-demand when the ``data`` property is accessed. This - class is also compatible with the Python standard library's :mod:`io` - module, and can hence be treated as a readable object in the context of that - framework. - - Extra parameters for behaviour not present in httplib.HTTPResponse: - - :param preload_content: - If True, the response's body will be preloaded during construction. - - :param decode_content: - If True, attempts to decode specific content-encoding's based on headers - (like 'gzip' and 'deflate') will be skipped and raw data will be used - instead. - - :param original_response: - When this HTTPResponse wrapper is generated from an httplib.HTTPResponse - object, it's convenient to include the original for debug purposes. It's - otherwise unused. - - :param retries: - The retries contains the last :class:`~urllib3.util.retry.Retry` that - was used during the request. - - :param enforce_content_length: - Enforce content length checking. Body returned by server must match - value of Content-Length header, if present. Otherwise, raise error. - """ - - CONTENT_DECODERS = ['gzip', 'deflate'] - REDIRECT_STATUSES = [301, 302, 303, 307, 308] - - def __init__(self, body='', headers=None, status=0, version=0, reason=None, - strict=0, preload_content=True, decode_content=True, - original_response=None, pool=None, connection=None, - retries=None, enforce_content_length=False, request_method=None): - - if isinstance(headers, HTTPHeaderDict): - self.headers = headers - else: - self.headers = HTTPHeaderDict(headers) - self.status = status - self.version = version - self.reason = reason - self.strict = strict - self.decode_content = decode_content - self.retries = retries - self.enforce_content_length = enforce_content_length - - self._decoder = None - self._body = None - self._fp = None - self._original_response = original_response - self._fp_bytes_read = 0 - - if body and isinstance(body, (basestring, binary_type)): - self._body = body - - self._pool = pool - self._connection = connection - - if hasattr(body, 'read'): - self._fp = body - - # Are we using the chunked-style of transfer encoding? - self.chunked = False - self.chunk_left = None - tr_enc = self.headers.get('transfer-encoding', '').lower() - # Don't incur the penalty of creating a list and then discarding it - encodings = (enc.strip() for enc in tr_enc.split(",")) - if "chunked" in encodings: - self.chunked = True - - # Determine length of response - self.length_remaining = self._init_length(request_method) - - # If requested, preload the body. - if preload_content and not self._body: - self._body = self.read(decode_content=decode_content) - - def get_redirect_location(self): - """ - Should we redirect and where to? - - :returns: Truthy redirect location string if we got a redirect status - code and valid location. ``None`` if redirect status and no - location. ``False`` if not a redirect status code. - """ - if self.status in self.REDIRECT_STATUSES: - return self.headers.get('location') - - return False - - def release_conn(self): - if not self._pool or not self._connection: - return - - self._pool._put_conn(self._connection) - self._connection = None - - @property - def data(self): - # For backwords-compat with earlier urllib3 0.4 and earlier. - if self._body: - return self._body - - if self._fp: - return self.read(cache_content=True) - - @property - def connection(self): - return self._connection - - def tell(self): - """ - Obtain the number of bytes pulled over the wire so far. May differ from - the amount of content returned by :meth:``HTTPResponse.read`` if bytes - are encoded on the wire (e.g, compressed). - """ - return self._fp_bytes_read - - def _init_length(self, request_method): - """ - Set initial length value for Response content if available. - """ - length = self.headers.get('content-length') - - if length is not None and self.chunked: - # This Response will fail with an IncompleteRead if it can't be - # received as chunked. This method falls back to attempt reading - # the response before raising an exception. - log.warning("Received response with both Content-Length and " - "Transfer-Encoding set. This is expressly forbidden " - "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " - "attempting to process response as Transfer-Encoding: " - "chunked.") - return None - - elif length is not None: - try: - # RFC 7230 section 3.3.2 specifies multiple content lengths can - # be sent in a single Content-Length header - # (e.g. Content-Length: 42, 42). This line ensures the values - # are all valid ints and that as long as the `set` length is 1, - # all values are the same. Otherwise, the header is invalid. - lengths = set([int(val) for val in length.split(',')]) - if len(lengths) > 1: - raise InvalidHeader("Content-Length contained multiple " - "unmatching values (%s)" % length) - length = lengths.pop() - except ValueError: - length = None - else: - if length < 0: - length = None - - # Convert status to int for comparison - # In some cases, httplib returns a status of "_UNKNOWN" - try: - status = int(self.status) - except ValueError: - status = 0 - - # Check for responses that shouldn't include a body - if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD': - length = 0 - - return length - - def _init_decoder(self): - """ - Set-up the _decoder attribute if necessary. - """ - # Note: content-encoding value should be case-insensitive, per RFC 7230 - # Section 3.2 - content_encoding = self.headers.get('content-encoding', '').lower() - if self._decoder is None and content_encoding in self.CONTENT_DECODERS: - self._decoder = _get_decoder(content_encoding) - - def _decode(self, data, decode_content, flush_decoder): - """ - Decode the data passed in and potentially flush the decoder. - """ - try: - if decode_content and self._decoder: - data = self._decoder.decompress(data) - except (IOError, zlib.error) as e: - content_encoding = self.headers.get('content-encoding', '').lower() - raise DecodeError( - "Received response with content-encoding: %s, but " - "failed to decode it." % content_encoding, e) - - if flush_decoder and decode_content: - data += self._flush_decoder() - - return data - - def _flush_decoder(self): - """ - Flushes the decoder. Should only be called if the decoder is actually - being used. - """ - if self._decoder: - buf = self._decoder.decompress(b'') - return buf + self._decoder.flush() - - return b'' - - @contextmanager - def _error_catcher(self): - """ - Catch low-level python exceptions, instead re-raising urllib3 - variants, so that low-level exceptions are not leaked in the - high-level api. - - On exit, release the connection back to the pool. - """ - clean_exit = False - - try: - try: - yield - - except SocketTimeout: - # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but - # there is yet no clean way to get at it from this context. - raise ReadTimeoutError(self._pool, None, 'Read timed out.') - - except BaseSSLError as e: - # FIXME: Is there a better way to differentiate between SSLErrors? - if 'read operation timed out' not in str(e): # Defensive: - # This shouldn't happen but just in case we're missing an edge - # case, let's avoid swallowing SSL errors. - raise - - raise ReadTimeoutError(self._pool, None, 'Read timed out.') - - except (HTTPException, SocketError) as e: - # This includes IncompleteRead. - raise ProtocolError('Connection broken: %r' % e, e) - - # If no exception is thrown, we should avoid cleaning up - # unnecessarily. - clean_exit = True - finally: - # If we didn't terminate cleanly, we need to throw away our - # connection. - if not clean_exit: - # The response may not be closed but we're not going to use it - # anymore so close it now to ensure that the connection is - # released back to the pool. - if self._original_response: - self._original_response.close() - - # Closing the response may not actually be sufficient to close - # everything, so if we have a hold of the connection close that - # too. - if self._connection: - self._connection.close() - - # If we hold the original response but it's closed now, we should - # return the connection back to the pool. - if self._original_response and self._original_response.isclosed(): - self.release_conn() - - def read(self, amt=None, decode_content=None, cache_content=False): - """ - Similar to :meth:`httplib.HTTPResponse.read`, but with two additional - parameters: ``decode_content`` and ``cache_content``. - - :param amt: - How much of the content to read. If specified, caching is skipped - because it doesn't make sense to cache partial content as the full - response. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - - :param cache_content: - If True, will save the returned data such that the same result is - returned despite of the state of the underlying file object. This - is useful if you want the ``.data`` property to continue working - after having ``.read()`` the file object. (Overridden if ``amt`` is - set.) - """ - self._init_decoder() - if decode_content is None: - decode_content = self.decode_content - - if self._fp is None: - return - - flush_decoder = False - data = None - - with self._error_catcher(): - if amt is None: - # cStringIO doesn't like amt=None - data = self._fp.read() - flush_decoder = True - else: - cache_content = False - data = self._fp.read(amt) - if amt != 0 and not data: # Platform-specific: Buggy versions of Python. - # Close the connection when no data is returned - # - # This is redundant to what httplib/http.client _should_ - # already do. However, versions of python released before - # December 15, 2012 (http://bugs.python.org/issue16298) do - # not properly close the connection in all cases. There is - # no harm in redundantly calling close. - self._fp.close() - flush_decoder = True - if self.enforce_content_length and self.length_remaining not in (0, None): - # This is an edge case that httplib failed to cover due - # to concerns of backward compatibility. We're - # addressing it here to make sure IncompleteRead is - # raised during streaming, so all calls with incorrect - # Content-Length are caught. - raise IncompleteRead(self._fp_bytes_read, self.length_remaining) - - if data: - self._fp_bytes_read += len(data) - if self.length_remaining is not None: - self.length_remaining -= len(data) - - data = self._decode(data, decode_content, flush_decoder) - - if cache_content: - self._body = data - - return data - - def stream(self, amt=2**16, decode_content=None): - """ - A generator wrapper for the read() method. A call will block until - ``amt`` bytes have been read from the connection or until the - connection is closed. - - :param amt: - How much of the content to read. The generator will return up to - much data per iteration, but may return less. This is particularly - likely when using compressed data. However, the empty string will - never be returned. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - """ - if self.chunked and self.supports_chunked_reads(): - for line in self.read_chunked(amt, decode_content=decode_content): - yield line - else: - while not is_fp_closed(self._fp): - data = self.read(amt=amt, decode_content=decode_content) - - if data: - yield data - - @classmethod - def from_httplib(ResponseCls, r, **response_kw): - """ - Given an :class:`httplib.HTTPResponse` instance ``r``, return a - corresponding :class:`urllib3.response.HTTPResponse` object. - - Remaining parameters are passed to the HTTPResponse constructor, along - with ``original_response=r``. - """ - headers = r.msg - - if not isinstance(headers, HTTPHeaderDict): - if PY3: # Python 3 - headers = HTTPHeaderDict(headers.items()) - else: # Python 2 - headers = HTTPHeaderDict.from_httplib(headers) - - # HTTPResponse objects in Python 3 don't have a .strict attribute - strict = getattr(r, 'strict', 0) - resp = ResponseCls(body=r, - headers=headers, - status=r.status, - version=r.version, - reason=r.reason, - strict=strict, - original_response=r, - **response_kw) - return resp - - # Backwards-compatibility methods for httplib.HTTPResponse - def getheaders(self): - return self.headers - - def getheader(self, name, default=None): - return self.headers.get(name, default) - - # Overrides from io.IOBase - def close(self): - if not self.closed: - self._fp.close() - - if self._connection: - self._connection.close() - - @property - def closed(self): - if self._fp is None: - return True - elif hasattr(self._fp, 'isclosed'): - return self._fp.isclosed() - elif hasattr(self._fp, 'closed'): - return self._fp.closed - else: - return True - - def fileno(self): - if self._fp is None: - raise IOError("HTTPResponse has no file to get a fileno from") - elif hasattr(self._fp, "fileno"): - return self._fp.fileno() - else: - raise IOError("The file-like object this HTTPResponse is wrapped " - "around has no file descriptor") - - def flush(self): - if self._fp is not None and hasattr(self._fp, 'flush'): - return self._fp.flush() - - def readable(self): - # This method is required for `io` module compatibility. - return True - - def readinto(self, b): - # This method is required for `io` module compatibility. - temp = self.read(len(b)) - if len(temp) == 0: - return 0 - else: - b[:len(temp)] = temp - return len(temp) - - def supports_chunked_reads(self): - """ - Checks if the underlying file-like object looks like a - httplib.HTTPResponse object. We do this by testing for the fp - attribute. If it is present we assume it returns raw chunks as - processed by read_chunked(). - """ - return hasattr(self._fp, 'fp') - - def _update_chunk_length(self): - # First, we'll figure out length of a chunk and then - # we'll try to read it from socket. - if self.chunk_left is not None: - return - line = self._fp.fp.readline() - line = line.split(b';', 1)[0] - try: - self.chunk_left = int(line, 16) - except ValueError: - # Invalid chunked protocol response, abort. - self.close() - raise httplib.IncompleteRead(line) - - def _handle_chunk(self, amt): - returned_chunk = None - if amt is None: - chunk = self._fp._safe_read(self.chunk_left) - returned_chunk = chunk - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - elif amt < self.chunk_left: - value = self._fp._safe_read(amt) - self.chunk_left = self.chunk_left - amt - returned_chunk = value - elif amt == self.chunk_left: - value = self._fp._safe_read(amt) - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - returned_chunk = value - else: # amt > self.chunk_left - returned_chunk = self._fp._safe_read(self.chunk_left) - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - return returned_chunk - - def read_chunked(self, amt=None, decode_content=None): - """ - Similar to :meth:`HTTPResponse.read`, but with an additional - parameter: ``decode_content``. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - """ - self._init_decoder() - # FIXME: Rewrite this method and make it a class with a better structured logic. - if not self.chunked: - raise ResponseNotChunked( - "Response is not chunked. " - "Header 'transfer-encoding: chunked' is missing.") - if not self.supports_chunked_reads(): - raise BodyNotHttplibCompatible( - "Body should be httplib.HTTPResponse like. " - "It should have have an fp attribute which returns raw chunks.") - - # Don't bother reading the body of a HEAD request. - if self._original_response and is_response_to_head(self._original_response): - self._original_response.close() - return - - with self._error_catcher(): - while True: - self._update_chunk_length() - if self.chunk_left == 0: - break - chunk = self._handle_chunk(amt) - decoded = self._decode(chunk, decode_content=decode_content, - flush_decoder=False) - if decoded: - yield decoded - - if decode_content: - # On CPython and PyPy, we should never need to flush the - # decoder. However, on Jython we *might* need to, so - # lets defensively do it anyway. - decoded = self._flush_decoder() - if decoded: # Platform-specific: Jython. - yield decoded - - # Chunk content ends with \r\n: discard it. - while True: - line = self._fp.fp.readline() - if not line: - # Some sites may not end with '\r\n'. - break - if line == b'\r\n': - break - - # We read everything; close the "file". - if self._original_response: - self._original_response.close() diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py deleted file mode 100644 index bba628d98..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py +++ /dev/null @@ -1,55 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -# For backwards compatibility, provide imports that used to be here. -from .connection import is_connection_dropped -from .request import make_headers -from .response import is_fp_closed -from .ssl_ import ( - SSLContext, - HAS_SNI, - IS_PYOPENSSL, - IS_SECURETRANSPORT, - assert_fingerprint, - resolve_cert_reqs, - resolve_ssl_version, - ssl_wrap_socket, -) -from .timeout import ( - current_time, - Timeout, -) - -from .retry import Retry -from .url import ( - get_host, - parse_url, - split_first, - Url, -) -from .wait import ( - wait_for_read, - wait_for_write -) - -__all__ = ( - 'HAS_SNI', - 'IS_PYOPENSSL', - 'IS_SECURETRANSPORT', - 'SSLContext', - 'Retry', - 'Timeout', - 'Url', - 'assert_fingerprint', - 'current_time', - 'is_connection_dropped', - 'is_fp_closed', - 'get_host', - 'parse_url', - 'make_headers', - 'resolve_cert_reqs', - 'resolve_ssl_version', - 'split_first', - 'ssl_wrap_socket', - 'wait_for_read', - 'wait_for_write' -) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/connection.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/connection.py deleted file mode 100644 index 3bd69e8fa..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/util/connection.py +++ /dev/null @@ -1,131 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -import socket -from .wait import wait_for_read -from .selectors import HAS_SELECT, SelectorError - - -def is_connection_dropped(conn): # Platform-specific - """ - Returns True if the connection is dropped and should be closed. - - :param conn: - :class:`httplib.HTTPConnection` object. - - Note: For platforms like AppEngine, this will always return ``False`` to - let the platform handle connection recycling transparently for us. - """ - sock = getattr(conn, 'sock', False) - if sock is False: # Platform-specific: AppEngine - return False - if sock is None: # Connection already closed (such as by httplib). - return True - - if not HAS_SELECT: - return False - - try: - return bool(wait_for_read(sock, timeout=0.0)) - except SelectorError: - return True - - -# This function is copied from socket.py in the Python 2.7 standard -# library test suite. Added to its signature is only `socket_options`. -# One additional modification is that we avoid binding to IPv6 servers -# discovered in DNS if the system doesn't have IPv6 functionality. -def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None, socket_options=None): - """Connect to *address* and return the socket object. - - Convenience function. Connect to *address* (a 2-tuple ``(host, - port)``) and return the socket object. Passing the optional - *timeout* parameter will set the timeout on the socket instance - before attempting to connect. If no *timeout* is supplied, the - global default timeout setting returned by :func:`getdefaulttimeout` - is used. If *source_address* is set it must be a tuple of (host, port) - for the socket to bind as a source address before making the connection. - An host of '' or port 0 tells the OS to use the default. - """ - - host, port = address - if host.startswith('['): - host = host.strip('[]') - err = None - - # Using the value from allowed_gai_family() in the context of getaddrinfo lets - # us select whether to work with IPv4 DNS records, IPv6 records, or both. - # The original create_connection function always returns all records. - family = allowed_gai_family() - - for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): - af, socktype, proto, canonname, sa = res - sock = None - try: - sock = socket.socket(af, socktype, proto) - - # If provided, set socket level options before connecting. - _set_socket_options(sock, socket_options) - - if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: - sock.settimeout(timeout) - if source_address: - sock.bind(source_address) - sock.connect(sa) - return sock - - except socket.error as e: - err = e - if sock is not None: - sock.close() - sock = None - - if err is not None: - raise err - - raise socket.error("getaddrinfo returns an empty list") - - -def _set_socket_options(sock, options): - if options is None: - return - - for opt in options: - sock.setsockopt(*opt) - - -def allowed_gai_family(): - """This function is designed to work in the context of - getaddrinfo, where family=socket.AF_UNSPEC is the default and - will perform a DNS search for both IPv6 and IPv4 records.""" - - family = socket.AF_INET - if HAS_IPV6: - family = socket.AF_UNSPEC - return family - - -def _has_ipv6(host): - """ Returns True if the system can bind an IPv6 address. """ - sock = None - has_ipv6 = False - - if socket.has_ipv6: - # has_ipv6 returns true if cPython was compiled with IPv6 support. - # It does not tell us if the system has IPv6 support enabled. To - # determine that we must bind to an IPv6 address. - # https://github.com/shazow/urllib3/pull/611 - # https://bugs.python.org/issue658327 - try: - sock = socket.socket(socket.AF_INET6) - sock.bind((host, 0)) - has_ipv6 = True - except Exception: - pass - - if sock: - sock.close() - return has_ipv6 - - -HAS_IPV6 = _has_ipv6('::1') diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/request.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/request.py deleted file mode 100644 index 18f27b032..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/util/request.py +++ /dev/null @@ -1,119 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -from base64 import b64encode - -from ..packages.six import b, integer_types -from ..exceptions import UnrewindableBodyError - -ACCEPT_ENCODING = 'gzip,deflate' -_FAILEDTELL = object() - - -def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, - basic_auth=None, proxy_basic_auth=None, disable_cache=None): - """ - Shortcuts for generating request headers. - - :param keep_alive: - If ``True``, adds 'connection: keep-alive' header. - - :param accept_encoding: - Can be a boolean, list, or string. - ``True`` translates to 'gzip,deflate'. - List will get joined by comma. - String will be used as provided. - - :param user_agent: - String representing the user-agent you want, such as - "python-urllib3/0.6" - - :param basic_auth: - Colon-separated username:password string for 'authorization: basic ...' - auth header. - - :param proxy_basic_auth: - Colon-separated username:password string for 'proxy-authorization: basic ...' - auth header. - - :param disable_cache: - If ``True``, adds 'cache-control: no-cache' header. - - Example:: - - >>> make_headers(keep_alive=True, user_agent="Batman/1.0") - {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} - >>> make_headers(accept_encoding=True) - {'accept-encoding': 'gzip,deflate'} - """ - headers = {} - if accept_encoding: - if isinstance(accept_encoding, str): - pass - elif isinstance(accept_encoding, list): - accept_encoding = ','.join(accept_encoding) - else: - accept_encoding = ACCEPT_ENCODING - headers['accept-encoding'] = accept_encoding - - if user_agent: - headers['user-agent'] = user_agent - - if keep_alive: - headers['connection'] = 'keep-alive' - - if basic_auth: - headers['authorization'] = 'Basic ' + \ - b64encode(b(basic_auth)).decode('utf-8') - - if proxy_basic_auth: - headers['proxy-authorization'] = 'Basic ' + \ - b64encode(b(proxy_basic_auth)).decode('utf-8') - - if disable_cache: - headers['cache-control'] = 'no-cache' - - return headers - - -def set_file_position(body, pos): - """ - If a position is provided, move file to that point. - Otherwise, we'll attempt to record a position for future use. - """ - if pos is not None: - rewind_body(body, pos) - elif getattr(body, 'tell', None) is not None: - try: - pos = body.tell() - except (IOError, OSError): - # This differentiates from None, allowing us to catch - # a failed `tell()` later when trying to rewind the body. - pos = _FAILEDTELL - - return pos - - -def rewind_body(body, body_pos): - """ - Attempt to rewind body to a certain position. - Primarily used for request redirects and retries. - - :param body: - File-like object that supports seek. - - :param int pos: - Position to seek to in file. - """ - body_seek = getattr(body, 'seek', None) - if body_seek is not None and isinstance(body_pos, integer_types): - try: - body_seek(body_pos) - except (IOError, OSError): - raise UnrewindableBodyError("An error occurred when rewinding request " - "body for redirect/retry.") - elif body_pos is _FAILEDTELL: - raise UnrewindableBodyError("Unable to record file position for rewinding " - "request body during a redirect/retry.") - else: - raise ValueError("body_pos must be of type integer, " - "instead it was %s." % type(body_pos)) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/response.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/response.py deleted file mode 100644 index e4cda93d4..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/util/response.py +++ /dev/null @@ -1,82 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -from ..packages.six.moves import http_client as httplib - -from ..exceptions import HeaderParsingError - - -def is_fp_closed(obj): - """ - Checks whether a given file-like object is closed. - - :param obj: - The file-like object to check. - """ - - try: - # Check `isclosed()` first, in case Python3 doesn't set `closed`. - # GH Issue #928 - return obj.isclosed() - except AttributeError: - pass - - try: - # Check via the official file-like-object way. - return obj.closed - except AttributeError: - pass - - try: - # Check if the object is a container for another file-like object that - # gets released on exhaustion (e.g. HTTPResponse). - return obj.fp is None - except AttributeError: - pass - - raise ValueError("Unable to determine whether fp is closed.") - - -def assert_header_parsing(headers): - """ - Asserts whether all headers have been successfully parsed. - Extracts encountered errors from the result of parsing headers. - - Only works on Python 3. - - :param headers: Headers to verify. - :type headers: `httplib.HTTPMessage`. - - :raises urllib3.exceptions.HeaderParsingError: - If parsing errors are found. - """ - - # This will fail silently if we pass in the wrong kind of parameter. - # To make debugging easier add an explicit check. - if not isinstance(headers, httplib.HTTPMessage): - raise TypeError('expected httplib.Message, got {0}.'.format( - type(headers))) - - defects = getattr(headers, 'defects', None) - get_payload = getattr(headers, 'get_payload', None) - - unparsed_data = None - if get_payload: # Platform-specific: Python 3. - unparsed_data = get_payload() - - if defects or unparsed_data: - raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) - - -def is_response_to_head(response): - """ - Checks whether the request of a response has been a HEAD-request. - Handles the quirks of AppEngine. - - :param conn: - :type conn: :class:`httplib.HTTPResponse` - """ - # FIXME: Can we do this somehow without accessing private httplib _method? - method = response._method - if isinstance(method, int): # Platform-specific: Appengine - return method == 3 - return method.upper() == 'HEAD' diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/retry.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/retry.py deleted file mode 100644 index 61e63afec..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/util/retry.py +++ /dev/null @@ -1,402 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -import time -import logging -from collections import namedtuple -from itertools import takewhile -import email -import re - -from ..exceptions import ( - ConnectTimeoutError, - MaxRetryError, - ProtocolError, - ReadTimeoutError, - ResponseError, - InvalidHeader, -) -from ..packages import six - - -log = logging.getLogger(__name__) - -# Data structure for representing the metadata of requests that result in a retry. -RequestHistory = namedtuple('RequestHistory', ["method", "url", "error", - "status", "redirect_location"]) - - -class Retry(object): - """ Retry configuration. - - Each retry attempt will create a new Retry object with updated values, so - they can be safely reused. - - Retries can be defined as a default for a pool:: - - retries = Retry(connect=5, read=2, redirect=5) - http = PoolManager(retries=retries) - response = http.request('GET', 'http://example.com/') - - Or per-request (which overrides the default for the pool):: - - response = http.request('GET', 'http://example.com/', retries=Retry(10)) - - Retries can be disabled by passing ``False``:: - - response = http.request('GET', 'http://example.com/', retries=False) - - Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless - retries are disabled, in which case the causing exception will be raised. - - :param int total: - Total number of retries to allow. Takes precedence over other counts. - - Set to ``None`` to remove this constraint and fall back on other - counts. It's a good idea to set this to some sensibly-high value to - account for unexpected edge cases and avoid infinite retry loops. - - Set to ``0`` to fail on the first retry. - - Set to ``False`` to disable and imply ``raise_on_redirect=False``. - - :param int connect: - How many connection-related errors to retry on. - - These are errors raised before the request is sent to the remote server, - which we assume has not triggered the server to process the request. - - Set to ``0`` to fail on the first retry of this type. - - :param int read: - How many times to retry on read errors. - - These errors are raised after the request was sent to the server, so the - request may have side-effects. - - Set to ``0`` to fail on the first retry of this type. - - :param int redirect: - How many redirects to perform. Limit this to avoid infinite redirect - loops. - - A redirect is a HTTP response with a status code 301, 302, 303, 307 or - 308. - - Set to ``0`` to fail on the first retry of this type. - - Set to ``False`` to disable and imply ``raise_on_redirect=False``. - - :param int status: - How many times to retry on bad status codes. - - These are retries made on responses, where status code matches - ``status_forcelist``. - - Set to ``0`` to fail on the first retry of this type. - - :param iterable method_whitelist: - Set of uppercased HTTP method verbs that we should retry on. - - By default, we only retry on methods which are considered to be - idempotent (multiple requests with the same parameters end with the - same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`. - - Set to a ``False`` value to retry on any verb. - - :param iterable status_forcelist: - A set of integer HTTP status codes that we should force a retry on. - A retry is initiated if the request method is in ``method_whitelist`` - and the response status code is in ``status_forcelist``. - - By default, this is disabled with ``None``. - - :param float backoff_factor: - A backoff factor to apply between attempts after the second try - (most errors are resolved immediately by a second try without a - delay). urllib3 will sleep for:: - - {backoff factor} * (2 ^ ({number of total retries} - 1)) - - seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep - for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer - than :attr:`Retry.BACKOFF_MAX`. - - By default, backoff is disabled (set to 0). - - :param bool raise_on_redirect: Whether, if the number of redirects is - exhausted, to raise a MaxRetryError, or to return a response with a - response code in the 3xx range. - - :param bool raise_on_status: Similar meaning to ``raise_on_redirect``: - whether we should raise an exception, or return a response, - if status falls in ``status_forcelist`` range and retries have - been exhausted. - - :param tuple history: The history of the request encountered during - each call to :meth:`~Retry.increment`. The list is in the order - the requests occurred. Each list item is of class :class:`RequestHistory`. - - :param bool respect_retry_after_header: - Whether to respect Retry-After header on status codes defined as - :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not. - - """ - - DEFAULT_METHOD_WHITELIST = frozenset([ - 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE']) - - RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) - - #: Maximum backoff time. - BACKOFF_MAX = 120 - - def __init__(self, total=10, connect=None, read=None, redirect=None, status=None, - method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None, - backoff_factor=0, raise_on_redirect=True, raise_on_status=True, - history=None, respect_retry_after_header=True): - - self.total = total - self.connect = connect - self.read = read - self.status = status - - if redirect is False or total is False: - redirect = 0 - raise_on_redirect = False - - self.redirect = redirect - self.status_forcelist = status_forcelist or set() - self.method_whitelist = method_whitelist - self.backoff_factor = backoff_factor - self.raise_on_redirect = raise_on_redirect - self.raise_on_status = raise_on_status - self.history = history or tuple() - self.respect_retry_after_header = respect_retry_after_header - - def new(self, **kw): - params = dict( - total=self.total, - connect=self.connect, read=self.read, redirect=self.redirect, status=self.status, - method_whitelist=self.method_whitelist, - status_forcelist=self.status_forcelist, - backoff_factor=self.backoff_factor, - raise_on_redirect=self.raise_on_redirect, - raise_on_status=self.raise_on_status, - history=self.history, - ) - params.update(kw) - return type(self)(**params) - - @classmethod - def from_int(cls, retries, redirect=True, default=None): - """ Backwards-compatibility for the old retries format.""" - if retries is None: - retries = default if default is not None else cls.DEFAULT - - if isinstance(retries, Retry): - return retries - - redirect = bool(redirect) and None - new_retries = cls(retries, redirect=redirect) - log.debug("Converted retries value: %r -> %r", retries, new_retries) - return new_retries - - def get_backoff_time(self): - """ Formula for computing the current backoff - - :rtype: float - """ - # We want to consider only the last consecutive errors sequence (Ignore redirects). - consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None, - reversed(self.history)))) - if consecutive_errors_len <= 1: - return 0 - - backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1)) - return min(self.BACKOFF_MAX, backoff_value) - - def parse_retry_after(self, retry_after): - # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4 - if re.match(r"^\s*[0-9]+\s*$", retry_after): - seconds = int(retry_after) - else: - retry_date_tuple = email.utils.parsedate(retry_after) - if retry_date_tuple is None: - raise InvalidHeader("Invalid Retry-After header: %s" % retry_after) - retry_date = time.mktime(retry_date_tuple) - seconds = retry_date - time.time() - - if seconds < 0: - seconds = 0 - - return seconds - - def get_retry_after(self, response): - """ Get the value of Retry-After in seconds. """ - - retry_after = response.getheader("Retry-After") - - if retry_after is None: - return None - - return self.parse_retry_after(retry_after) - - def sleep_for_retry(self, response=None): - retry_after = self.get_retry_after(response) - if retry_after: - time.sleep(retry_after) - return True - - return False - - def _sleep_backoff(self): - backoff = self.get_backoff_time() - if backoff <= 0: - return - time.sleep(backoff) - - def sleep(self, response=None): - """ Sleep between retry attempts. - - This method will respect a server's ``Retry-After`` response header - and sleep the duration of the time requested. If that is not present, it - will use an exponential backoff. By default, the backoff factor is 0 and - this method will return immediately. - """ - - if response: - slept = self.sleep_for_retry(response) - if slept: - return - - self._sleep_backoff() - - def _is_connection_error(self, err): - """ Errors when we're fairly sure that the server did not receive the - request, so it should be safe to retry. - """ - return isinstance(err, ConnectTimeoutError) - - def _is_read_error(self, err): - """ Errors that occur after the request has been started, so we should - assume that the server began processing it. - """ - return isinstance(err, (ReadTimeoutError, ProtocolError)) - - def _is_method_retryable(self, method): - """ Checks if a given HTTP method should be retried upon, depending if - it is included on the method whitelist. - """ - if self.method_whitelist and method.upper() not in self.method_whitelist: - return False - - return True - - def is_retry(self, method, status_code, has_retry_after=False): - """ Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - if not self._is_method_retryable(method): - return False - - if self.status_forcelist and status_code in self.status_forcelist: - return True - - return (self.total and self.respect_retry_after_header and - has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES)) - - def is_exhausted(self): - """ Are we out of retries? """ - retry_counts = (self.total, self.connect, self.read, self.redirect, self.status) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - - return min(retry_counts) < 0 - - def increment(self, method=None, url=None, response=None, error=None, - _pool=None, _stacktrace=None): - """ Return a new Retry object with incremented retry counters. - - :param response: A response object, or None, if the server did not - return a response. - :type response: :class:`~urllib3.response.HTTPResponse` - :param Exception error: An error encountered during the request, or - None if the response was received successfully. - - :return: A new ``Retry`` object. - """ - if self.total is False and error: - # Disabled, indicate to re-raise the error. - raise six.reraise(type(error), error, _stacktrace) - - total = self.total - if total is not None: - total -= 1 - - connect = self.connect - read = self.read - redirect = self.redirect - status_count = self.status - cause = 'unknown' - status = None - redirect_location = None - - if error and self._is_connection_error(error): - # Connect retry? - if connect is False: - raise six.reraise(type(error), error, _stacktrace) - elif connect is not None: - connect -= 1 - - elif error and self._is_read_error(error): - # Read retry? - if read is False or not self._is_method_retryable(method): - raise six.reraise(type(error), error, _stacktrace) - elif read is not None: - read -= 1 - - elif response and response.get_redirect_location(): - # Redirect retry? - if redirect is not None: - redirect -= 1 - cause = 'too many redirects' - redirect_location = response.get_redirect_location() - status = response.status - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - cause = ResponseError.GENERIC_ERROR - if response and response.status: - if status_count is not None: - status_count -= 1 - cause = ResponseError.SPECIFIC_ERROR.format( - status_code=response.status) - status = response.status - - history = self.history + (RequestHistory(method, url, error, status, redirect_location),) - - new_retry = self.new( - total=total, - connect=connect, read=read, redirect=redirect, status=status_count, - history=history) - - if new_retry.is_exhausted(): - raise MaxRetryError(_pool, url, error or ResponseError(cause)) - - log.debug("Incremented Retry for (url='%s'): %r", url, new_retry) - - return new_retry - - def __repr__(self): - return ('{cls.__name__}(total={self.total}, connect={self.connect}, ' - 'read={self.read}, redirect={self.redirect}, status={self.status})').format( - cls=type(self), self=self) - - -# For backwards compatibility (equivalent to pre-v1.9): -Retry.DEFAULT = Retry(3) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py deleted file mode 100644 index de5e49838..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py +++ /dev/null @@ -1,588 +0,0 @@ -# SPDX-License-Identifier: MIT -# Backport of selectors.py from Python 3.5+ to support Python < 3.4 -# Also has the behavior specified in PEP 475 which is to retry syscalls -# in the case of an EINTR error. This module is required because selectors34 -# does not follow this behavior and instead returns that no dile descriptor -# events have occurred rather than retry the syscall. The decision to drop -# support for select.devpoll is made to maintain 100% test coverage. - -import errno -import math -import select -import socket -import sys -import time - -from collections import namedtuple - -try: - from collections import Mapping -except ImportError: - from collections.abc import Mapping - -try: - monotonic = time.monotonic -except (AttributeError, ImportError): # Python 3.3< - monotonic = time.time - -EVENT_READ = (1 << 0) -EVENT_WRITE = (1 << 1) - -HAS_SELECT = True # Variable that shows whether the platform has a selector. -_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None. -_DEFAULT_SELECTOR = None - - -class SelectorError(Exception): - def __init__(self, errcode): - super(SelectorError, self).__init__() - self.errno = errcode - - def __repr__(self): - return "".format(self.errno) - - def __str__(self): - return self.__repr__() - - -def _fileobj_to_fd(fileobj): - """ Return a file descriptor from a file object. If - given an integer will simply return that integer back. """ - if isinstance(fileobj, int): - fd = fileobj - else: - try: - fd = int(fileobj.fileno()) - except (AttributeError, TypeError, ValueError): - raise ValueError("Invalid file object: {0!r}".format(fileobj)) - if fd < 0: - raise ValueError("Invalid file descriptor: {0}".format(fd)) - return fd - - -# Determine which function to use to wrap system calls because Python 3.5+ -# already handles the case when system calls are interrupted. -if sys.version_info >= (3, 5): - def _syscall_wrapper(func, _, *args, **kwargs): - """ This is the short-circuit version of the below logic - because in Python 3.5+ all system calls automatically restart - and recalculate their timeouts. """ - try: - return func(*args, **kwargs) - except (OSError, IOError, select.error) as e: - errcode = None - if hasattr(e, "errno"): - errcode = e.errno - raise SelectorError(errcode) -else: - def _syscall_wrapper(func, recalc_timeout, *args, **kwargs): - """ Wrapper function for syscalls that could fail due to EINTR. - All functions should be retried if there is time left in the timeout - in accordance with PEP 475. """ - timeout = kwargs.get("timeout", None) - if timeout is None: - expires = None - recalc_timeout = False - else: - timeout = float(timeout) - if timeout < 0.0: # Timeout less than 0 treated as no timeout. - expires = None - else: - expires = monotonic() + timeout - - args = list(args) - if recalc_timeout and "timeout" not in kwargs: - raise ValueError( - "Timeout must be in args or kwargs to be recalculated") - - result = _SYSCALL_SENTINEL - while result is _SYSCALL_SENTINEL: - try: - result = func(*args, **kwargs) - # OSError is thrown by select.select - # IOError is thrown by select.epoll.poll - # select.error is thrown by select.poll.poll - # Aren't we thankful for Python 3.x rework for exceptions? - except (OSError, IOError, select.error) as e: - # select.error wasn't a subclass of OSError in the past. - errcode = None - if hasattr(e, "errno"): - errcode = e.errno - elif hasattr(e, "args"): - errcode = e.args[0] - - # Also test for the Windows equivalent of EINTR. - is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and - errcode == errno.WSAEINTR)) - - if is_interrupt: - if expires is not None: - current_time = monotonic() - if current_time > expires: - raise OSError(errno=errno.ETIMEDOUT) - if recalc_timeout: - if "timeout" in kwargs: - kwargs["timeout"] = expires - current_time - continue - if errcode: - raise SelectorError(errcode) - else: - raise - return result - - -SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) - - -class _SelectorMapping(Mapping): - """ Mapping of file objects to selector keys """ - - def __init__(self, selector): - self._selector = selector - - def __len__(self): - return len(self._selector._fd_to_key) - - def __getitem__(self, fileobj): - try: - fd = self._selector._fileobj_lookup(fileobj) - return self._selector._fd_to_key[fd] - except KeyError: - raise KeyError("{0!r} is not registered.".format(fileobj)) - - def __iter__(self): - return iter(self._selector._fd_to_key) - - -class BaseSelector(object): - """ Abstract Selector class - - A selector supports registering file objects to be monitored - for specific I/O events. - - A file object is a file descriptor or any object with a - `fileno()` method. An arbitrary object can be attached to the - file object which can be used for example to store context info, - a callback, etc. - - A selector can use various implementations (select(), poll(), epoll(), - and kqueue()) depending on the platform. The 'DefaultSelector' class uses - the most efficient implementation for the current platform. - """ - def __init__(self): - # Maps file descriptors to keys. - self._fd_to_key = {} - - # Read-only mapping returned by get_map() - self._map = _SelectorMapping(self) - - def _fileobj_lookup(self, fileobj): - """ Return a file descriptor from a file object. - This wraps _fileobj_to_fd() to do an exhaustive - search in case the object is invalid but we still - have it in our map. Used by unregister() so we can - unregister an object that was previously registered - even if it is closed. It is also used by _SelectorMapping - """ - try: - return _fileobj_to_fd(fileobj) - except ValueError: - - # Search through all our mapped keys. - for key in self._fd_to_key.values(): - if key.fileobj is fileobj: - return key.fd - - # Raise ValueError after all. - raise - - def register(self, fileobj, events, data=None): - """ Register a file object for a set of events to monitor. """ - if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): - raise ValueError("Invalid events: {0!r}".format(events)) - - key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) - - if key.fd in self._fd_to_key: - raise KeyError("{0!r} (FD {1}) is already registered" - .format(fileobj, key.fd)) - - self._fd_to_key[key.fd] = key - return key - - def unregister(self, fileobj): - """ Unregister a file object from being monitored. """ - try: - key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - # Getting the fileno of a closed socket on Windows errors with EBADF. - except socket.error as e: # Platform-specific: Windows. - if e.errno != errno.EBADF: - raise - else: - for key in self._fd_to_key.values(): - if key.fileobj is fileobj: - self._fd_to_key.pop(key.fd) - break - else: - raise KeyError("{0!r} is not registered".format(fileobj)) - return key - - def modify(self, fileobj, events, data=None): - """ Change a registered file object monitored events and data. """ - # NOTE: Some subclasses optimize this operation even further. - try: - key = self._fd_to_key[self._fileobj_lookup(fileobj)] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - if events != key.events: - self.unregister(fileobj) - key = self.register(fileobj, events, data) - - elif data != key.data: - # Use a shortcut to update the data. - key = key._replace(data=data) - self._fd_to_key[key.fd] = key - - return key - - def select(self, timeout=None): - """ Perform the actual selection until some monitored file objects - are ready or the timeout expires. """ - raise NotImplementedError() - - def close(self): - """ Close the selector. This must be called to ensure that all - underlying resources are freed. """ - self._fd_to_key.clear() - self._map = None - - def get_key(self, fileobj): - """ Return the key associated with a registered file object. """ - mapping = self.get_map() - if mapping is None: - raise RuntimeError("Selector is closed") - try: - return mapping[fileobj] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - def get_map(self): - """ Return a mapping of file objects to selector keys """ - return self._map - - def _key_from_fd(self, fd): - """ Return the key associated to a given file descriptor - Return None if it is not found. """ - try: - return self._fd_to_key[fd] - except KeyError: - return None - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - -# Almost all platforms have select.select() -if hasattr(select, "select"): - class SelectSelector(BaseSelector): - """ Select-based selector. """ - def __init__(self): - super(SelectSelector, self).__init__() - self._readers = set() - self._writers = set() - - def register(self, fileobj, events, data=None): - key = super(SelectSelector, self).register(fileobj, events, data) - if events & EVENT_READ: - self._readers.add(key.fd) - if events & EVENT_WRITE: - self._writers.add(key.fd) - return key - - def unregister(self, fileobj): - key = super(SelectSelector, self).unregister(fileobj) - self._readers.discard(key.fd) - self._writers.discard(key.fd) - return key - - def _select(self, r, w, timeout=None): - """ Wrapper for select.select because timeout is a positional arg """ - return select.select(r, w, [], timeout) - - def select(self, timeout=None): - # Selecting on empty lists on Windows errors out. - if not len(self._readers) and not len(self._writers): - return [] - - timeout = None if timeout is None else max(timeout, 0.0) - ready = [] - r, w, _ = _syscall_wrapper(self._select, True, self._readers, - self._writers, timeout) - r = set(r) - w = set(w) - for fd in r | w: - events = 0 - if fd in r: - events |= EVENT_READ - if fd in w: - events |= EVENT_WRITE - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - -if hasattr(select, "poll"): - class PollSelector(BaseSelector): - """ Poll-based selector """ - def __init__(self): - super(PollSelector, self).__init__() - self._poll = select.poll() - - def register(self, fileobj, events, data=None): - key = super(PollSelector, self).register(fileobj, events, data) - event_mask = 0 - if events & EVENT_READ: - event_mask |= select.POLLIN - if events & EVENT_WRITE: - event_mask |= select.POLLOUT - self._poll.register(key.fd, event_mask) - return key - - def unregister(self, fileobj): - key = super(PollSelector, self).unregister(fileobj) - self._poll.unregister(key.fd) - return key - - def _wrap_poll(self, timeout=None): - """ Wrapper function for select.poll.poll() so that - _syscall_wrapper can work with only seconds. """ - if timeout is not None: - if timeout <= 0: - timeout = 0 - else: - # select.poll.poll() has a resolution of 1 millisecond, - # round away from zero to wait *at least* timeout seconds. - timeout = math.ceil(timeout * 1e3) - - result = self._poll.poll(timeout) - return result - - def select(self, timeout=None): - ready = [] - fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) - for fd, event_mask in fd_events: - events = 0 - if event_mask & ~select.POLLIN: - events |= EVENT_WRITE - if event_mask & ~select.POLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - - return ready - - -if hasattr(select, "epoll"): - class EpollSelector(BaseSelector): - """ Epoll-based selector """ - def __init__(self): - super(EpollSelector, self).__init__() - self._epoll = select.epoll() - - def fileno(self): - return self._epoll.fileno() - - def register(self, fileobj, events, data=None): - key = super(EpollSelector, self).register(fileobj, events, data) - events_mask = 0 - if events & EVENT_READ: - events_mask |= select.EPOLLIN - if events & EVENT_WRITE: - events_mask |= select.EPOLLOUT - _syscall_wrapper(self._epoll.register, False, key.fd, events_mask) - return key - - def unregister(self, fileobj): - key = super(EpollSelector, self).unregister(fileobj) - try: - _syscall_wrapper(self._epoll.unregister, False, key.fd) - except SelectorError: - # This can occur when the fd was closed since registry. - pass - return key - - def select(self, timeout=None): - if timeout is not None: - if timeout <= 0: - timeout = 0.0 - else: - # select.epoll.poll() has a resolution of 1 millisecond - # but luckily takes seconds so we don't need a wrapper - # like PollSelector. Just for better rounding. - timeout = math.ceil(timeout * 1e3) * 1e-3 - timeout = float(timeout) - else: - timeout = -1.0 # epoll.poll() must have a float. - - # We always want at least 1 to ensure that select can be called - # with no file descriptors registered. Otherwise will fail. - max_events = max(len(self._fd_to_key), 1) - - ready = [] - fd_events = _syscall_wrapper(self._epoll.poll, True, - timeout=timeout, - maxevents=max_events) - for fd, event_mask in fd_events: - events = 0 - if event_mask & ~select.EPOLLIN: - events |= EVENT_WRITE - if event_mask & ~select.EPOLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - def close(self): - self._epoll.close() - super(EpollSelector, self).close() - - -if hasattr(select, "kqueue"): - class KqueueSelector(BaseSelector): - """ Kqueue / Kevent-based selector """ - def __init__(self): - super(KqueueSelector, self).__init__() - self._kqueue = select.kqueue() - - def fileno(self): - return self._kqueue.fileno() - - def register(self, fileobj, events, data=None): - key = super(KqueueSelector, self).register(fileobj, events, data) - if events & EVENT_READ: - kevent = select.kevent(key.fd, - select.KQ_FILTER_READ, - select.KQ_EV_ADD) - - _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) - - if events & EVENT_WRITE: - kevent = select.kevent(key.fd, - select.KQ_FILTER_WRITE, - select.KQ_EV_ADD) - - _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) - - return key - - def unregister(self, fileobj): - key = super(KqueueSelector, self).unregister(fileobj) - if key.events & EVENT_READ: - kevent = select.kevent(key.fd, - select.KQ_FILTER_READ, - select.KQ_EV_DELETE) - try: - _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) - except SelectorError: - pass - if key.events & EVENT_WRITE: - kevent = select.kevent(key.fd, - select.KQ_FILTER_WRITE, - select.KQ_EV_DELETE) - try: - _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) - except SelectorError: - pass - - return key - - def select(self, timeout=None): - if timeout is not None: - timeout = max(timeout, 0) - - max_events = len(self._fd_to_key) * 2 - ready_fds = {} - - kevent_list = _syscall_wrapper(self._kqueue.control, True, - None, max_events, timeout) - - for kevent in kevent_list: - fd = kevent.ident - event_mask = kevent.filter - events = 0 - if event_mask == select.KQ_FILTER_READ: - events |= EVENT_READ - if event_mask == select.KQ_FILTER_WRITE: - events |= EVENT_WRITE - - key = self._key_from_fd(fd) - if key: - if key.fd not in ready_fds: - ready_fds[key.fd] = (key, events & key.events) - else: - old_events = ready_fds[key.fd][1] - ready_fds[key.fd] = (key, (events | old_events) & key.events) - - return list(ready_fds.values()) - - def close(self): - self._kqueue.close() - super(KqueueSelector, self).close() - - -if not hasattr(select, 'select'): # Platform-specific: AppEngine - HAS_SELECT = False - - -def _can_allocate(struct): - """ Checks that select structs can be allocated by the underlying - operating system, not just advertised by the select module. We don't - check select() because we'll be hopeful that most platforms that - don't have it available will not advertise it. (ie: GAE) """ - try: - # select.poll() objects won't fail until used. - if struct == 'poll': - p = select.poll() - p.poll(0) - - # All others will fail on allocation. - else: - getattr(select, struct)().close() - return True - except (OSError, AttributeError) as e: - return False - - -# Choose the best implementation, roughly: -# kqueue == epoll > poll > select. Devpoll not supported. (See above) -# select() also can't accept a FD > FD_SETSIZE (usually around 1024) -def DefaultSelector(): - """ This function serves as a first call for DefaultSelector to - detect if the select module is being monkey-patched incorrectly - by eventlet, greenlet, and preserve proper behavior. """ - global _DEFAULT_SELECTOR - if _DEFAULT_SELECTOR is None: - if _can_allocate('kqueue'): - _DEFAULT_SELECTOR = KqueueSelector - elif _can_allocate('epoll'): - _DEFAULT_SELECTOR = EpollSelector - elif _can_allocate('poll'): - _DEFAULT_SELECTOR = PollSelector - elif hasattr(select, 'select'): - _DEFAULT_SELECTOR = SelectSelector - else: # Platform-specific: AppEngine - raise ValueError('Platform does not have a selector') - return _DEFAULT_SELECTOR() diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py deleted file mode 100644 index ece3ec39e..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py +++ /dev/null @@ -1,338 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -import errno -import warnings -import hmac - -from binascii import hexlify, unhexlify -from hashlib import md5, sha1, sha256 - -from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning - - -SSLContext = None -HAS_SNI = False -IS_PYOPENSSL = False -IS_SECURETRANSPORT = False - -# Maps the length of a digest to a possible hash function producing this digest -HASHFUNC_MAP = { - 32: md5, - 40: sha1, - 64: sha256, -} - - -def _const_compare_digest_backport(a, b): - """ - Compare two digests of equal length in constant time. - - The digests must be of type str/bytes. - Returns True if the digests match, and False otherwise. - """ - result = abs(len(a) - len(b)) - for l, r in zip(bytearray(a), bytearray(b)): - result |= l ^ r - return result == 0 - - -_const_compare_digest = getattr(hmac, 'compare_digest', - _const_compare_digest_backport) - - -try: # Test for SSL features - import ssl - from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 - from ssl import HAS_SNI # Has SNI? -except ImportError: - pass - - -try: - from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION -except ImportError: - OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000 - OP_NO_COMPRESSION = 0x20000 - -# A secure default. -# Sources for more information on TLS ciphers: -# -# - https://wiki.mozilla.org/Security/Server_Side_TLS -# - https://www.ssllabs.com/projects/best-practices/index.html -# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ -# -# The general intent is: -# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE), -# - prefer ECDHE over DHE for better performance, -# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and -# security, -# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common, -# - disable NULL authentication, MD5 MACs and DSS for security reasons. -DEFAULT_CIPHERS = ':'.join([ - 'ECDH+AESGCM', - 'ECDH+CHACHA20', - 'DH+AESGCM', - 'DH+CHACHA20', - 'ECDH+AES256', - 'DH+AES256', - 'ECDH+AES128', - 'DH+AES', - 'RSA+AESGCM', - 'RSA+AES', - '!aNULL', - '!eNULL', - '!MD5', -]) - -try: - from ssl import SSLContext # Modern SSL? -except ImportError: - import sys - - class SSLContext(object): # Platform-specific: Python 2 & 3.1 - supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or - (3, 2) <= sys.version_info) - - def __init__(self, protocol_version): - self.protocol = protocol_version - # Use default values from a real SSLContext - self.check_hostname = False - self.verify_mode = ssl.CERT_NONE - self.ca_certs = None - self.options = 0 - self.certfile = None - self.keyfile = None - self.ciphers = None - - def load_cert_chain(self, certfile, keyfile): - self.certfile = certfile - self.keyfile = keyfile - - def load_verify_locations(self, cafile=None, capath=None): - self.ca_certs = cafile - - if capath is not None: - raise SSLError("CA directories not supported in older Pythons") - - def set_ciphers(self, cipher_suite): - if not self.supports_set_ciphers: - raise TypeError( - 'Your version of Python does not support setting ' - 'a custom cipher suite. Please upgrade to Python ' - '2.7, 3.2, or later if you need this functionality.' - ) - self.ciphers = cipher_suite - - def wrap_socket(self, socket, server_hostname=None, server_side=False): - warnings.warn( - 'A true SSLContext object is not available. This prevents ' - 'urllib3 from configuring SSL appropriately and may cause ' - 'certain SSL connections to fail. You can upgrade to a newer ' - 'version of Python to solve this. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings', - InsecurePlatformWarning - ) - kwargs = { - 'keyfile': self.keyfile, - 'certfile': self.certfile, - 'ca_certs': self.ca_certs, - 'cert_reqs': self.verify_mode, - 'ssl_version': self.protocol, - 'server_side': server_side, - } - if self.supports_set_ciphers: # Platform-specific: Python 2.7+ - return wrap_socket(socket, ciphers=self.ciphers, **kwargs) - else: # Platform-specific: Python 2.6 - return wrap_socket(socket, **kwargs) - - -def assert_fingerprint(cert, fingerprint): - """ - Checks if given fingerprint matches the supplied certificate. - - :param cert: - Certificate as bytes object. - :param fingerprint: - Fingerprint as string of hexdigits, can be interspersed by colons. - """ - - fingerprint = fingerprint.replace(':', '').lower() - digest_length = len(fingerprint) - hashfunc = HASHFUNC_MAP.get(digest_length) - if not hashfunc: - raise SSLError( - 'Fingerprint of invalid length: {0}'.format(fingerprint)) - - # We need encode() here for py32; works on py2 and p33. - fingerprint_bytes = unhexlify(fingerprint.encode()) - - cert_digest = hashfunc(cert).digest() - - if not _const_compare_digest(cert_digest, fingerprint_bytes): - raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' - .format(fingerprint, hexlify(cert_digest))) - - -def resolve_cert_reqs(candidate): - """ - Resolves the argument to a numeric constant, which can be passed to - the wrap_socket function/method from the ssl module. - Defaults to :data:`ssl.CERT_NONE`. - If given a string it is assumed to be the name of the constant in the - :mod:`ssl` module or its abbrevation. - (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. - If it's neither `None` nor a string we assume it is already the numeric - constant which can directly be passed to wrap_socket. - """ - if candidate is None: - return CERT_NONE - - if isinstance(candidate, str): - res = getattr(ssl, candidate, None) - if res is None: - res = getattr(ssl, 'CERT_' + candidate) - return res - - return candidate - - -def resolve_ssl_version(candidate): - """ - like resolve_cert_reqs - """ - if candidate is None: - return PROTOCOL_SSLv23 - - if isinstance(candidate, str): - res = getattr(ssl, candidate, None) - if res is None: - res = getattr(ssl, 'PROTOCOL_' + candidate) - return res - - return candidate - - -def create_urllib3_context(ssl_version=None, cert_reqs=None, - options=None, ciphers=None): - """All arguments have the same meaning as ``ssl_wrap_socket``. - - By default, this function does a lot of the same work that - ``ssl.create_default_context`` does on Python 3.4+. It: - - - Disables SSLv2, SSLv3, and compression - - Sets a restricted set of server ciphers - - If you wish to enable SSLv3, you can do:: - - from urllib3.util import ssl_ - context = ssl_.create_urllib3_context() - context.options &= ~ssl_.OP_NO_SSLv3 - - You can do the same to enable compression (substituting ``COMPRESSION`` - for ``SSLv3`` in the last line above). - - :param ssl_version: - The desired protocol version to use. This will default to - PROTOCOL_SSLv23 which will negotiate the highest protocol that both - the server and your installation of OpenSSL support. - :param cert_reqs: - Whether to require the certificate verification. This defaults to - ``ssl.CERT_REQUIRED``. - :param options: - Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``, - ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``. - :param ciphers: - Which cipher suites to allow the server to select. - :returns: - Constructed SSLContext object with specified options - :rtype: SSLContext - """ - context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) - - # Setting the default here, as we may have no ssl module on import - cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs - - if options is None: - options = 0 - # SSLv2 is easily broken and is considered harmful and dangerous - options |= OP_NO_SSLv2 - # SSLv3 has several problems and is now dangerous - options |= OP_NO_SSLv3 - # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ - # (issue #309) - options |= OP_NO_COMPRESSION - - context.options |= options - - if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6 - context.set_ciphers(ciphers or DEFAULT_CIPHERS) - - context.verify_mode = cert_reqs - if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 - # We do our own verification, including fingerprints and alternative - # hostnames. So disable it here - context.check_hostname = False - return context - - -def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, - ca_certs=None, server_hostname=None, - ssl_version=None, ciphers=None, ssl_context=None, - ca_cert_dir=None): - """ - All arguments except for server_hostname, ssl_context, and ca_cert_dir have - the same meaning as they do when using :func:`ssl.wrap_socket`. - - :param server_hostname: - When SNI is supported, the expected hostname of the certificate - :param ssl_context: - A pre-made :class:`SSLContext` object. If none is provided, one will - be created using :func:`create_urllib3_context`. - :param ciphers: - A string of ciphers we wish the client to support. This is not - supported on Python 2.6 as the ssl module does not support it. - :param ca_cert_dir: - A directory containing CA certificates in multiple separate files, as - supported by OpenSSL's -CApath flag or the capath argument to - SSLContext.load_verify_locations(). - """ - context = ssl_context - if context is None: - # Note: This branch of code and all the variables in it are no longer - # used by urllib3 itself. We should consider deprecating and removing - # this code. - context = create_urllib3_context(ssl_version, cert_reqs, - ciphers=ciphers) - - if ca_certs or ca_cert_dir: - try: - context.load_verify_locations(ca_certs, ca_cert_dir) - except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2 - raise SSLError(e) - # Py33 raises FileNotFoundError which subclasses OSError - # These are not equivalent unless we check the errno attribute - except OSError as e: # Platform-specific: Python 3.3 and beyond - if e.errno == errno.ENOENT: - raise SSLError(e) - raise - elif getattr(context, 'load_default_certs', None) is not None: - # try to load OS default certs; works well on Windows (require Python3.4+) - context.load_default_certs() - - if certfile: - context.load_cert_chain(certfile, keyfile) - if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI - return context.wrap_socket(sock, server_hostname=server_hostname) - - warnings.warn( - 'An HTTPS request has been made, but the SNI (Subject Name ' - 'Indication) extension to TLS is not available on this platform. ' - 'This may cause the server to present an incorrect TLS ' - 'certificate, which can cause validation failures. You can upgrade to ' - 'a newer version of Python to solve this. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings', - SNIMissingWarning - ) - return context.wrap_socket(sock) diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py deleted file mode 100644 index 4041cf9b9..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py +++ /dev/null @@ -1,243 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -# The default socket timeout, used by httplib to indicate that no timeout was -# specified by the user -from socket import _GLOBAL_DEFAULT_TIMEOUT -import time - -from ..exceptions import TimeoutStateError - -# A sentinel value to indicate that no timeout was specified by the user in -# urllib3 -_Default = object() - - -# Use time.monotonic if available. -current_time = getattr(time, "monotonic", time.time) - - -class Timeout(object): - """ Timeout configuration. - - Timeouts can be defined as a default for a pool:: - - timeout = Timeout(connect=2.0, read=7.0) - http = PoolManager(timeout=timeout) - response = http.request('GET', 'http://example.com/') - - Or per-request (which overrides the default for the pool):: - - response = http.request('GET', 'http://example.com/', timeout=Timeout(10)) - - Timeouts can be disabled by setting all the parameters to ``None``:: - - no_timeout = Timeout(connect=None, read=None) - response = http.request('GET', 'http://example.com/, timeout=no_timeout) - - - :param total: - This combines the connect and read timeouts into one; the read timeout - will be set to the time leftover from the connect attempt. In the - event that both a connect timeout and a total are specified, or a read - timeout and a total are specified, the shorter timeout will be applied. - - Defaults to None. - - :type total: integer, float, or None - - :param connect: - The maximum amount of time to wait for a connection attempt to a server - to succeed. Omitting the parameter will default the connect timeout to - the system default, probably `the global default timeout in socket.py - `_. - None will set an infinite timeout for connection attempts. - - :type connect: integer, float, or None - - :param read: - The maximum amount of time to wait between consecutive - read operations for a response from the server. Omitting - the parameter will default the read timeout to the system - default, probably `the global default timeout in socket.py - `_. - None will set an infinite timeout. - - :type read: integer, float, or None - - .. note:: - - Many factors can affect the total amount of time for urllib3 to return - an HTTP response. - - For example, Python's DNS resolver does not obey the timeout specified - on the socket. Other factors that can affect total request time include - high CPU load, high swap, the program running at a low priority level, - or other behaviors. - - In addition, the read and total timeouts only measure the time between - read operations on the socket connecting the client and the server, - not the total amount of time for the request to return a complete - response. For most requests, the timeout is raised because the server - has not sent the first byte in the specified time. This is not always - the case; if a server streams one byte every fifteen seconds, a timeout - of 20 seconds will not trigger, even though the request will take - several minutes to complete. - - If your goal is to cut off any request after a set amount of wall clock - time, consider having a second "watcher" thread to cut off a slow - request. - """ - - #: A sentinel object representing the default timeout value - DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT - - def __init__(self, total=None, connect=_Default, read=_Default): - self._connect = self._validate_timeout(connect, 'connect') - self._read = self._validate_timeout(read, 'read') - self.total = self._validate_timeout(total, 'total') - self._start_connect = None - - def __str__(self): - return '%s(connect=%r, read=%r, total=%r)' % ( - type(self).__name__, self._connect, self._read, self.total) - - @classmethod - def _validate_timeout(cls, value, name): - """ Check that a timeout attribute is valid. - - :param value: The timeout value to validate - :param name: The name of the timeout attribute to validate. This is - used to specify in error messages. - :return: The validated and casted version of the given value. - :raises ValueError: If it is a numeric value less than or equal to - zero, or the type is not an integer, float, or None. - """ - if value is _Default: - return cls.DEFAULT_TIMEOUT - - if value is None or value is cls.DEFAULT_TIMEOUT: - return value - - if isinstance(value, bool): - raise ValueError("Timeout cannot be a boolean value. It must " - "be an int, float or None.") - try: - float(value) - except (TypeError, ValueError): - raise ValueError("Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value)) - - try: - if value <= 0: - raise ValueError("Attempted to set %s timeout to %s, but the " - "timeout cannot be set to a value less " - "than or equal to 0." % (name, value)) - except TypeError: # Python 3 - raise ValueError("Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value)) - - return value - - @classmethod - def from_float(cls, timeout): - """ Create a new Timeout from a legacy timeout value. - - The timeout value used by httplib.py sets the same timeout on the - connect(), and recv() socket requests. This creates a :class:`Timeout` - object that sets the individual timeouts to the ``timeout`` value - passed to this function. - - :param timeout: The legacy timeout value. - :type timeout: integer, float, sentinel default object, or None - :return: Timeout object - :rtype: :class:`Timeout` - """ - return Timeout(read=timeout, connect=timeout) - - def clone(self): - """ Create a copy of the timeout object - - Timeout properties are stored per-pool but each request needs a fresh - Timeout object to ensure each one has its own start/stop configured. - - :return: a copy of the timeout object - :rtype: :class:`Timeout` - """ - # We can't use copy.deepcopy because that will also create a new object - # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to - # detect the user default. - return Timeout(connect=self._connect, read=self._read, - total=self.total) - - def start_connect(self): - """ Start the timeout clock, used during a connect() attempt - - :raises urllib3.exceptions.TimeoutStateError: if you attempt - to start a timer that has been started already. - """ - if self._start_connect is not None: - raise TimeoutStateError("Timeout timer has already been started.") - self._start_connect = current_time() - return self._start_connect - - def get_connect_duration(self): - """ Gets the time elapsed since the call to :meth:`start_connect`. - - :return: Elapsed time. - :rtype: float - :raises urllib3.exceptions.TimeoutStateError: if you attempt - to get duration for a timer that hasn't been started. - """ - if self._start_connect is None: - raise TimeoutStateError("Can't get connect duration for timer " - "that has not started.") - return current_time() - self._start_connect - - @property - def connect_timeout(self): - """ Get the value to use when setting a connection timeout. - - This will be a positive float or integer, the value None - (never timeout), or the default system timeout. - - :return: Connect timeout. - :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None - """ - if self.total is None: - return self._connect - - if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: - return self.total - - return min(self._connect, self.total) - - @property - def read_timeout(self): - """ Get the value for the read timeout. - - This assumes some time has elapsed in the connection timeout and - computes the read timeout appropriately. - - If self.total is set, the read timeout is dependent on the amount of - time taken by the connect timeout. If the connection time has not been - established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be - raised. - - :return: Value to use for the read timeout. - :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None - :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` - has not yet been called on this object. - """ - if (self.total is not None and - self.total is not self.DEFAULT_TIMEOUT and - self._read is not None and - self._read is not self.DEFAULT_TIMEOUT): - # In case the connect timeout has not yet been established. - if self._start_connect is None: - return self._read - return max(0, min(self.total - self.get_connect_duration(), - self._read)) - elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: - return max(0, self.total - self.get_connect_duration()) - else: - return self._read diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/url.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/url.py deleted file mode 100644 index 99fd6534a..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/util/url.py +++ /dev/null @@ -1,231 +0,0 @@ -# SPDX-License-Identifier: MIT -from __future__ import absolute_import -from collections import namedtuple - -from ..exceptions import LocationParseError - - -url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] - -# We only want to normalize urls with an HTTP(S) scheme. -# urllib3 infers URLs without a scheme (None) to be http. -NORMALIZABLE_SCHEMES = ('http', 'https', None) - - -class Url(namedtuple('Url', url_attrs)): - """ - Datastructure for representing an HTTP URL. Used as a return value for - :func:`parse_url`. Both the scheme and host are normalized as they are - both case-insensitive according to RFC 3986. - """ - __slots__ = () - - def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, - query=None, fragment=None): - if path and not path.startswith('/'): - path = '/' + path - if scheme: - scheme = scheme.lower() - if host and scheme in NORMALIZABLE_SCHEMES: - host = host.lower() - return super(Url, cls).__new__(cls, scheme, auth, host, port, path, - query, fragment) - - @property - def hostname(self): - """For backwards-compatibility with urlparse. We're nice like that.""" - return self.host - - @property - def request_uri(self): - """Absolute path including the query string.""" - uri = self.path or '/' - - if self.query is not None: - uri += '?' + self.query - - return uri - - @property - def netloc(self): - """Network location including host and port""" - if self.port: - return '%s:%d' % (self.host, self.port) - return self.host - - @property - def url(self): - """ - Convert self into a url - - This function should more or less round-trip with :func:`.parse_url`. The - returned url may not be exactly the same as the url inputted to - :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls - with a blank port will have : removed). - - Example: :: - - >>> U = parse_url('http://google.com/mail/') - >>> U.url - 'http://google.com/mail/' - >>> Url('http', 'username:password', 'host.com', 80, - ... '/path', 'query', 'fragment').url - 'http://username:password@host.com:80/path?query#fragment' - """ - scheme, auth, host, port, path, query, fragment = self - url = '' - - # We use "is not None" we want things to happen with empty strings (or 0 port) - if scheme is not None: - url += scheme + '://' - if auth is not None: - url += auth + '@' - if host is not None: - url += host - if port is not None: - url += ':' + str(port) - if path is not None: - url += path - if query is not None: - url += '?' + query - if fragment is not None: - url += '#' + fragment - - return url - - def __str__(self): - return self.url - - -def split_first(s, delims): - """ - Given a string and an iterable of delimiters, split on the first found - delimiter. Return two split parts and the matched delimiter. - - If not found, then the first part is the full input string. - - Example:: - - >>> split_first('foo/bar?baz', '?/=') - ('foo', 'bar?baz', '/') - >>> split_first('foo/bar?baz', '123') - ('foo/bar?baz', '', None) - - Scales linearly with number of delims. Not ideal for large number of delims. - """ - min_idx = None - min_delim = None - for d in delims: - idx = s.find(d) - if idx < 0: - continue - - if min_idx is None or idx < min_idx: - min_idx = idx - min_delim = d - - if min_idx is None or min_idx < 0: - return s, '', None - - return s[:min_idx], s[min_idx + 1:], min_delim - - -def parse_url(url): - """ - Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is - performed to parse incomplete urls. Fields not provided will be None. - - Partly backwards-compatible with :mod:`urlparse`. - - Example:: - - >>> parse_url('http://google.com/mail/') - Url(scheme='http', host='google.com', port=None, path='/mail/', ...) - >>> parse_url('google.com:80') - Url(scheme=None, host='google.com', port=80, path=None, ...) - >>> parse_url('/foo?bar') - Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) - """ - - # While this code has overlap with stdlib's urlparse, it is much - # simplified for our needs and less annoying. - # Additionally, this implementations does silly things to be optimal - # on CPython. - - if not url: - # Empty - return Url() - - scheme = None - auth = None - host = None - port = None - path = None - fragment = None - query = None - - # Scheme - if '://' in url: - scheme, url = url.split('://', 1) - - # Find the earliest Authority Terminator - # (http://tools.ietf.org/html/rfc3986#section-3.2) - url, path_, delim = split_first(url, ['/', '?', '#']) - - if delim: - # Reassemble the path - path = delim + path_ - - # Auth - if '@' in url: - # Last '@' denotes end of auth part - auth, url = url.rsplit('@', 1) - - # IPv6 - if url and url[0] == '[': - host, url = url.split(']', 1) - host += ']' - - # Port - if ':' in url: - _host, port = url.split(':', 1) - - if not host: - host = _host - - if port: - # If given, ports must be integers. No whitespace, no plus or - # minus prefixes, no non-integer digits such as ^2 (superscript). - if not port.isdigit(): - raise LocationParseError(url) - try: - port = int(port) - except ValueError: - raise LocationParseError(url) - else: - # Blank ports are cool, too. (rfc3986#section-3.2.3) - port = None - - elif not host and url: - host = url - - if not path: - return Url(scheme, auth, host, port, path, query, fragment) - - # Fragment - if '#' in path: - path, fragment = path.split('#', 1) - - # Query - if '?' in path: - path, query = path.split('?', 1) - - return Url(scheme, auth, host, port, path, query, fragment) - - -def get_host(url): - """ - Deprecated. Use :func:`parse_url` instead. - """ - p = parse_url(url) - return p.scheme or 'http', p.hostname, p.port diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/wait.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/wait.py deleted file mode 100644 index 21e72979c..000000000 --- a/src/collectors/python.d.plugin/python_modules/urllib3/util/wait.py +++ /dev/null @@ -1,41 +0,0 @@ -# SPDX-License-Identifier: MIT -from .selectors import ( - HAS_SELECT, - DefaultSelector, - EVENT_READ, - EVENT_WRITE -) - - -def _wait_for_io_events(socks, events, timeout=None): - """ Waits for IO events to be available from a list of sockets - or optionally a single socket if passed in. Returns a list of - sockets that can be interacted with immediately. """ - if not HAS_SELECT: - raise ValueError('Platform does not have a selector') - if not isinstance(socks, list): - # Probably just a single socket. - if hasattr(socks, "fileno"): - socks = [socks] - # Otherwise it might be a non-list iterable. - else: - socks = list(socks) - with DefaultSelector() as selector: - for sock in socks: - selector.register(sock, events) - return [key[0].fileobj for key in - selector.select(timeout) if key[1] & events] - - -def wait_for_read(socks, timeout=None): - """ Waits for reading to be available from a list of sockets - or optionally a single socket if passed in. Returns a list of - sockets that can be read from immediately. """ - return _wait_for_io_events(socks, EVENT_READ, timeout) - - -def wait_for_write(socks, timeout=None): - """ Waits for writing to be available from a list of sockets - or optionally a single socket if passed in. Returns a list of - sockets that can be written to immediately. """ - return _wait_for_io_events(socks, EVENT_WRITE, timeout) diff --git a/src/collectors/python.d.plugin/samba/README.md b/src/collectors/python.d.plugin/samba/README.md deleted file mode 120000 index 3b63bbab6..000000000 --- a/src/collectors/python.d.plugin/samba/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/samba.md \ No newline at end of file diff --git a/src/collectors/python.d.plugin/samba/integrations/samba.md b/src/collectors/python.d.plugin/samba/integrations/samba.md deleted file mode 100644 index 4d6f8fcc3..000000000 --- a/src/collectors/python.d.plugin/samba/integrations/samba.md +++ /dev/null @@ -1,255 +0,0 @@ - - -# Samba - - - - - -Plugin: python.d.plugin -Module: samba - - - -## Overview - -This collector monitors the performance metrics of Samba file sharing. - -It is using the `smbstatus` command-line tool. - -Executed commands: - -- `sudo -n smbstatus -P` - - -This collector is supported on all platforms. - -This collector only supports collecting metrics from a single instance of this integration. - -`smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password. - - -### Default Behavior - -#### Auto-Detection - -After all the permissions are satisfied, the `smbstatus -P` binary is executed. - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per Samba instance - -These metrics refer to the entire monitored application. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| syscall.rw | sendfile, recvfile | KiB/s | -| smb2.rw | readout, writein, readin, writeout | KiB/s | -| smb2.create_close | create, close | operations/s | -| smb2.get_set_info | getinfo, setinfo | operations/s | -| smb2.find | find | operations/s | -| smb2.notify | notify | operations/s | -| smb2.sm_counters | tcon, negprot, tdis, cancel, logoff, flush, lock, keepalive, break, sessetup | count | - - - -## Alerts - -There are no alerts configured by default for this integration. - - -## Setup - -### Prerequisites - -#### Enable the samba collector - -The `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory, if different -sudo ./edit-config python.d.conf -``` -Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system. - - -#### Permissions and programs - -To run the collector you need: - -- `smbstatus` program -- `sudo` program -- `smbd` must be compiled with profiling enabled -- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level` - -The module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password. - -- add to your `/etc/sudoers` file: - - `which smbstatus` shows the full path to the binary. - - ```bash - netdata ALL=(root) NOPASSWD: /path/to/smbstatus - ``` - -- Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd) - - The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`. - - - As the `root` user, do the following: - - ```cmd - mkdir /etc/systemd/system/netdata.service.d - echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf - systemctl daemon-reload - systemctl restart netdata.service - ``` - - - -### Configuration - -#### File - -The configuration file name for this integration is `python.d/samba.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config python.d/samba.conf -``` -#### Options - -There are 2 sections: - -* Global variables -* One or more JOBS that can define multiple different instances to monitor. - -The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - -Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - -Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| update_every | Sets the default data collection frequency. | 5 | no | -| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | -| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | -| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | - -
- -#### Examples - -##### Basic - -A basic example configuration. - -
Config - -```yaml -my_job_name: - name: my_name - update_every: 1 - -``` -
- - - -## Troubleshooting - -### Debug Mode - - -To troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `python.d.plugin` to debug the collector: - - ```bash - ./python.d.plugin samba debug trace - ``` - -### Getting Logs - -If you're encountering problems with the `samba` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep samba -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep samba /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep samba -``` - - diff --git a/src/collectors/python.d.plugin/samba/metadata.yaml b/src/collectors/python.d.plugin/samba/metadata.yaml deleted file mode 100644 index 09c04e7d4..000000000 --- a/src/collectors/python.d.plugin/samba/metadata.yaml +++ /dev/null @@ -1,205 +0,0 @@ -plugin_name: python.d.plugin -modules: - - meta: - plugin_name: python.d.plugin - module_name: samba - monitored_instance: - name: Samba - link: https://www.samba.org/samba/ - categories: - - data-collection.storage-mount-points-and-filesystems - icon_filename: "samba.svg" - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: "" - keywords: - - samba - - file sharing - most_popular: false - overview: - data_collection: - metrics_description: "This collector monitors the performance metrics of Samba file sharing." - method_description: | - It is using the `smbstatus` command-line tool. - - Executed commands: - - - `sudo -n smbstatus -P` - supported_platforms: - include: [] - exclude: [] - multi_instance: false - additional_permissions: - description: | - `smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password. - default_behavior: - auto_detection: - description: "After all the permissions are satisfied, the `smbstatus -P` binary is executed." - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: - - title: Enable the samba collector - description: | - The `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file. - - ```bash - cd /etc/netdata # Replace this path with your Netdata config directory, if different - sudo ./edit-config python.d.conf - ``` - Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system. - - title: Permissions and programs - description: | - To run the collector you need: - - - `smbstatus` program - - `sudo` program - - `smbd` must be compiled with profiling enabled - - `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level` - - The module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password. - - - add to your `/etc/sudoers` file: - - `which smbstatus` shows the full path to the binary. - - ```bash - netdata ALL=(root) NOPASSWD: /path/to/smbstatus - ``` - - - Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd) - - The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`. - - - As the `root` user, do the following: - - ```cmd - mkdir /etc/systemd/system/netdata.service.d - echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf - systemctl daemon-reload - systemctl restart netdata.service - ``` - configuration: - file: - name: python.d/samba.conf - options: - description: | - There are 2 sections: - - * Global variables - * One or more JOBS that can define multiple different instances to monitor. - - The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - - Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - - Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - folding: - title: "Config options" - enabled: true - list: - - name: update_every - description: Sets the default data collection frequency. - default_value: 5 - required: false - - name: priority - description: Controls the order of charts at the netdata dashboard. - default_value: 60000 - required: false - - name: autodetection_retry - description: Sets the job re-check interval in seconds. - default_value: 0 - required: false - - name: penalty - description: Indicates whether to apply penalty to update_every in case of failures. - default_value: yes - required: false - examples: - folding: - enabled: true - title: "Config" - list: - - name: Basic - description: A basic example configuration. - config: | - my_job_name: - name: my_name - update_every: 1 - troubleshooting: - problems: - list: [] - alerts: [] - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: global - description: "These metrics refer to the entire monitored application." - labels: [] - metrics: - - name: syscall.rw - description: R/Ws - unit: "KiB/s" - chart_type: area - dimensions: - - name: sendfile - - name: recvfile - - name: smb2.rw - description: R/Ws - unit: "KiB/s" - chart_type: area - dimensions: - - name: readout - - name: writein - - name: readin - - name: writeout - - name: smb2.create_close - description: Create/Close - unit: "operations/s" - chart_type: line - dimensions: - - name: create - - name: close - - name: smb2.get_set_info - description: Info - unit: "operations/s" - chart_type: line - dimensions: - - name: getinfo - - name: setinfo - - name: smb2.find - description: Find - unit: "operations/s" - chart_type: line - dimensions: - - name: find - - name: smb2.notify - description: Notify - unit: "operations/s" - chart_type: line - dimensions: - - name: notify - - name: smb2.sm_counters - description: Lesser Ops - unit: "count" - chart_type: stacked - dimensions: - - name: tcon - - name: negprot - - name: tdis - - name: cancel - - name: logoff - - name: flush - - name: lock - - name: keepalive - - name: break - - name: sessetup diff --git a/src/collectors/python.d.plugin/samba/samba.chart.py b/src/collectors/python.d.plugin/samba/samba.chart.py deleted file mode 100644 index 8eebcd60c..000000000 --- a/src/collectors/python.d.plugin/samba/samba.chart.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: samba netdata python.d module -# Author: Christopher Cox -# SPDX-License-Identifier: GPL-3.0-or-later -# -# The netdata user needs to be able to be able to sudo the smbstatus program -# without password: -# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P -# -# This makes calls to smbstatus -P -# -# This just looks at a couple of values out of syscall, and some from smb2. -# -# The Lesser Ops chart is merely a display of current counter values. They -# didn't seem to change much to me. However, if you notice something changing -# a lot there, bring one or more out into its own chart and make it incremental -# (like find and notify... good examples). - -import re -import os - -from bases.FrameworkServices.ExecutableService import ExecutableService -from bases.collection import find_binary - -disabled_by_default = True - -update_every = 5 - -ORDER = [ - 'syscall_rw', - 'smb2_rw', - 'smb2_create_close', - 'smb2_info', - 'smb2_find', - 'smb2_notify', - 'smb2_sm_count' -] - -CHARTS = { - 'syscall_rw': { - 'options': [None, 'R/Ws', 'KiB/s', 'syscall', 'syscall.rw', 'area'], - 'lines': [ - ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024], - ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024] - ] - }, - 'smb2_rw': { - 'options': [None, 'R/Ws', 'KiB/s', 'smb2', 'smb2.rw', 'area'], - 'lines': [ - ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024], - ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024], - ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024], - ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024] - ] - }, - 'smb2_create_close': { - 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line'], - 'lines': [ - ['smb2_create_count', 'create', 'incremental', 1, 1], - ['smb2_close_count', 'close', 'incremental', -1, 1] - ] - }, - 'smb2_info': { - 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line'], - 'lines': [ - ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1], - ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1] - ] - }, - 'smb2_find': { - 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line'], - 'lines': [ - ['smb2_find_count', 'find', 'incremental', 1, 1] - ] - }, - 'smb2_notify': { - 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line'], - 'lines': [ - ['smb2_notify_count', 'notify', 'incremental', 1, 1] - ] - }, - 'smb2_sm_count': { - 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked'], - 'lines': [ - ['smb2_tcon_count', 'tcon', 'absolute', 1, 1], - ['smb2_negprot_count', 'negprot', 'absolute', 1, 1], - ['smb2_tdis_count', 'tdis', 'absolute', 1, 1], - ['smb2_cancel_count', 'cancel', 'absolute', 1, 1], - ['smb2_logoff_count', 'logoff', 'absolute', 1, 1], - ['smb2_flush_count', 'flush', 'absolute', 1, 1], - ['smb2_lock_count', 'lock', 'absolute', 1, 1], - ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1], - ['smb2_break_count', 'break', 'absolute', 1, 1], - ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1] - ] - } -} - -SUDO = 'sudo' -SMBSTATUS = 'smbstatus' - - -class Service(ExecutableService): - def __init__(self, configuration=None, name=None): - ExecutableService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)') - - def check(self): - smbstatus_binary = find_binary(SMBSTATUS) - if not smbstatus_binary: - self.error("can't locate '{0}' binary".format(SMBSTATUS)) - return False - - if os.getuid() == 0: - self.command = ' '.join([smbstatus_binary, '-P']) - return ExecutableService.check(self) - - sudo_binary = find_binary(SUDO) - if not sudo_binary: - self.error("can't locate '{0}' binary".format(SUDO)) - return False - command = [sudo_binary, '-n', '-l', smbstatus_binary, '-P'] - smbstatus = '{0} -P'.format(smbstatus_binary) - allowed = self._get_raw_data(command=command) - if not (allowed and allowed[0].strip() == smbstatus): - self.error("not allowed to run sudo for command '{0}'".format(smbstatus)) - return False - self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P']) - return ExecutableService.check(self) - - def _get_data(self): - """ - Format data received from shell command - :return: dict - """ - raw_data = self._get_raw_data() - if not raw_data: - return None - - parsed = self.rgx_smb2.findall(' '.join(raw_data)) - - return dict(parsed) or None diff --git a/src/collectors/python.d.plugin/samba/samba.conf b/src/collectors/python.d.plugin/samba/samba.conf deleted file mode 100644 index db15d4e9e..000000000 --- a/src/collectors/python.d.plugin/samba/samba.conf +++ /dev/null @@ -1,60 +0,0 @@ -# netdata python.d.plugin configuration for samba -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -update_every: 5 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# penalty indicates whether to apply penalty to update_every in case of failures. -# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. -# penalty: yes - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# penalty: yes # the JOB's penalty -# autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file diff --git a/src/collectors/python.d.plugin/spigotmc/README.md b/src/collectors/python.d.plugin/spigotmc/README.md deleted file mode 120000 index 66e5c9c47..000000000 --- a/src/collectors/python.d.plugin/spigotmc/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/spigotmc.md \ No newline at end of file diff --git a/src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md b/src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md deleted file mode 100644 index 2e5e60669..000000000 --- a/src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md +++ /dev/null @@ -1,250 +0,0 @@ - - -# SpigotMC - - - - - -Plugin: python.d.plugin -Module: spigotmc - - - -## Overview - -This collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users. - - -It sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses. - - -This collector is only supported on the following platforms: - -- Linux - -This collector supports collecting metrics from multiple instances of this integration, including remote instances. - - -### Default Behavior - -#### Auto-Detection - -By default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`. - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per SpigotMC instance - -These metrics refer to the entire monitored application. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| spigotmc.tps | 1 Minute Average, 5 Minute Average, 15 Minute Average | ticks | -| spigotmc.users | Users | users | -| spigotmc.mem | used, allocated, max | MiB | - - - -## Alerts - -There are no alerts configured by default for this integration. - - -## Setup - -### Prerequisites - -#### Enable the Remote Console Protocol - -Under your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`. - -This will allow the Server to listen and respond to queries over the rcon protocol. - - - -### Configuration - -#### File - -The configuration file name for this integration is `python.d/spigotmc.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config python.d/spigotmc.conf -``` -#### Options - -There are 2 sections: - -* Global variables -* One or more JOBS that can define multiple different instances to monitor. - -The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - -Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - -Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| update_every | Sets the default data collection frequency. | 1 | no | -| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | -| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | -| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | -| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | -| host | The host's IP to connect to. | localhost | yes | -| port | The port the remote console is listening on. | 25575 | yes | -| password | Remote console password if any. | | no | - -
- -#### Examples - -##### Basic - -A basic configuration example. - -```yaml -local: - name: local_server - url: 127.0.0.1 - port: 25575 - -``` -##### Basic Authentication - -An example using basic password for authentication with the remote console. - -
Config - -```yaml -local: - name: local_server_pass - url: 127.0.0.1 - port: 25575 - password: 'foobar' - -``` -
- -##### Multi-instance - -> **Note**: When you define multiple jobs, their names must be unique. - -Collecting metrics from local and remote instances. - - -
Config - -```yaml -local_server: - name : my_local_server - url : 127.0.0.1 - port: 25575 - -remote_server: - name : another_remote_server - url : 192.0.2.1 - port: 25575 - -``` -
- - - -## Troubleshooting - -### Debug Mode - - -To troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `python.d.plugin` to debug the collector: - - ```bash - ./python.d.plugin spigotmc debug trace - ``` - -### Getting Logs - -If you're encountering problems with the `spigotmc` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep spigotmc -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep spigotmc /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep spigotmc -``` - - diff --git a/src/collectors/python.d.plugin/spigotmc/metadata.yaml b/src/collectors/python.d.plugin/spigotmc/metadata.yaml deleted file mode 100644 index 5dea9f0c8..000000000 --- a/src/collectors/python.d.plugin/spigotmc/metadata.yaml +++ /dev/null @@ -1,176 +0,0 @@ -plugin_name: python.d.plugin -modules: - - meta: - plugin_name: python.d.plugin - module_name: spigotmc - monitored_instance: - name: SpigotMC - link: "" - categories: - - data-collection.gaming - icon_filename: "spigot.jfif" - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: "" - keywords: - - minecraft server - - spigotmc server - - spigot - most_popular: false - overview: - data_collection: - metrics_description: | - This collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users. - method_description: | - It sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses. - supported_platforms: - include: - - Linux - exclude: [] - multi_instance: true - additional_permissions: - description: "" - default_behavior: - auto_detection: - description: By default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`. - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: - - title: Enable the Remote Console Protocol - description: | - Under your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`. - - This will allow the Server to listen and respond to queries over the rcon protocol. - configuration: - file: - name: "python.d/spigotmc.conf" - options: - description: | - There are 2 sections: - - * Global variables - * One or more JOBS that can define multiple different instances to monitor. - - The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - - Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - - Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - folding: - title: "Config options" - enabled: true - list: - - name: update_every - description: Sets the default data collection frequency. - default_value: 1 - required: false - - name: priority - description: Controls the order of charts at the netdata dashboard. - default_value: 60000 - required: false - - name: autodetection_retry - description: Sets the job re-check interval in seconds. - default_value: 0 - required: false - - name: penalty - description: Indicates whether to apply penalty to update_every in case of failures. - default_value: yes - required: false - - name: name - description: > - Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed - running at any time. This allows autodetection to try several alternatives and pick the one that works. - default_value: "" - required: false - - name: host - description: The host's IP to connect to. - default_value: localhost - required: true - - name: port - description: The port the remote console is listening on. - default_value: 25575 - required: true - - name: password - description: Remote console password if any. - default_value: "" - required: false - examples: - folding: - enabled: true - title: "Config" - list: - - name: Basic - description: A basic configuration example. - folding: - enabled: false - config: | - local: - name: local_server - url: 127.0.0.1 - port: 25575 - - name: Basic Authentication - description: An example using basic password for authentication with the remote console. - config: | - local: - name: local_server_pass - url: 127.0.0.1 - port: 25575 - password: 'foobar' - - name: Multi-instance - description: | - > **Note**: When you define multiple jobs, their names must be unique. - - Collecting metrics from local and remote instances. - config: | - local_server: - name : my_local_server - url : 127.0.0.1 - port: 25575 - - remote_server: - name : another_remote_server - url : 192.0.2.1 - port: 25575 - troubleshooting: - problems: - list: [] - alerts: [] - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: global - description: "These metrics refer to the entire monitored application." - labels: [] - metrics: - - name: spigotmc.tps - description: Spigot Ticks Per Second - unit: "ticks" - chart_type: line - dimensions: - - name: 1 Minute Average - - name: 5 Minute Average - - name: 15 Minute Average - - name: spigotmc.users - description: Minecraft Users - unit: "users" - chart_type: area - dimensions: - - name: Users - - name: spigotmc.mem - description: Minecraft Memory Usage - unit: "MiB" - chart_type: line - dimensions: - - name: used - - name: allocated - - name: max diff --git a/src/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/src/collectors/python.d.plugin/spigotmc/spigotmc.chart.py deleted file mode 100644 index 81370fb4c..000000000 --- a/src/collectors/python.d.plugin/spigotmc/spigotmc.chart.py +++ /dev/null @@ -1,184 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: spigotmc netdata python.d module -# Author: Austin S. Hemmelgarn (Ferroin) -# SPDX-License-Identifier: GPL-3.0-or-later - -import platform -import re -import socket - -from bases.FrameworkServices.SimpleService import SimpleService -from third_party import mcrcon - -# Update only every 5 seconds because collection takes in excess of -# 100ms sometimes, and most people won't care about second-by-second data. -update_every = 5 - -PRECISION = 100 - -COMMAND_TPS = 'tps' -COMMAND_LIST = 'list' -COMMAND_ONLINE = 'online' - -ORDER = [ - 'tps', - 'mem', - 'users', -] - -CHARTS = { - 'tps': { - 'options': [None, 'Spigot Ticks Per Second', 'ticks', 'spigotmc', 'spigotmc.tps', 'line'], - 'lines': [ - ['tps1', '1 Minute Average', 'absolute', 1, PRECISION], - ['tps5', '5 Minute Average', 'absolute', 1, PRECISION], - ['tps15', '15 Minute Average', 'absolute', 1, PRECISION] - ] - }, - 'users': { - 'options': [None, 'Minecraft Users', 'users', 'spigotmc', 'spigotmc.users', 'area'], - 'lines': [ - ['users', 'Users', 'absolute', 1, 1] - ] - }, - 'mem': { - 'options': [None, 'Minecraft Memory Usage', 'MiB', 'spigotmc', 'spigotmc.mem', 'line'], - 'lines': [ - ['mem_used', 'used', 'absolute', 1, 1], - ['mem_alloc', 'allocated', 'absolute', 1, 1], - ['mem_max', 'max', 'absolute', 1, 1] - ] - } -} - -_TPS_REGEX = re.compile( - # Examples: - # §6TPS from last 1m, 5m, 15m: §a*20.0, §a*20.0, §a*20.0 - # §6Current Memory Usage: §a936/65536 mb (Max: 65536 mb) - r'^.*: .*?' # Message lead-in - r'(\d{1,2}.\d+), .*?' # 1-minute TPS value - r'(\d{1,2}.\d+), .*?' # 5-minute TPS value - r'(\d{1,2}\.\d+).*?' # 15-minute TPS value - r'(\s.*?(\d+)\/(\d+).*?: (\d+).*)?', # Current Memory Usage / Total Memory (Max Memory) - re.MULTILINE -) -_LIST_REGEX = re.compile( - # Examples: - # There are 4 of a max 50 players online: player1, player2, player3, player4 - # §6There are §c4§6 out of maximum §c50§6 players online. - # §6There are §c3§6/§c1§6 out of maximum §c50§6 players online. - # §6当前有 §c4§6 个玩家在线,最大在线人数为 §c50§6 个玩家. - # §c4§6 人のプレイヤーが接続中です。最大接続可能人数\:§c 50 - r'[^§](\d+)(?:.*?(?=/).*?[^§](\d+))?', # Current user count. - re.X -) - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.host = self.configuration.get('host', 'localhost') - self.port = self.configuration.get('port', 25575) - self.password = self.configuration.get('password', '') - self.console = mcrcon.MCRcon() - self.alive = True - - def check(self): - if platform.system() != 'Linux': - self.error('Only supported on Linux.') - return False - try: - self.connect() - except (mcrcon.MCRconException, socket.error) as err: - self.error('Error connecting.') - self.error(repr(err)) - return False - - return self._get_data() - - def connect(self): - self.console.connect(self.host, self.port, self.password) - - def reconnect(self): - self.error('try reconnect.') - try: - try: - self.console.disconnect() - except mcrcon.MCRconException: - pass - self.console.connect(self.host, self.port, self.password) - self.alive = True - except (mcrcon.MCRconException, socket.error) as err: - self.error('Error connecting.') - self.error(repr(err)) - return False - return True - - def is_alive(self): - if any( - [ - not self.alive, - self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1 - ] - ): - return self.reconnect() - return True - - def _get_data(self): - if not self.is_alive(): - return None - - data = {} - - try: - raw = self.console.command(COMMAND_TPS) - match = _TPS_REGEX.match(raw) - if match: - data['tps1'] = int(float(match.group(1)) * PRECISION) - data['tps5'] = int(float(match.group(2)) * PRECISION) - data['tps15'] = int(float(match.group(3)) * PRECISION) - if match.group(4): - data['mem_used'] = int(match.group(5)) - data['mem_alloc'] = int(match.group(6)) - data['mem_max'] = int(match.group(7)) - else: - self.error('Unable to process TPS values.') - if not raw: - self.error( - "'{0}' command returned no value, make sure you set correct password".format(COMMAND_TPS)) - except mcrcon.MCRconException: - self.error('Unable to fetch TPS values.') - except socket.error: - self.error('Connection is dead.') - self.alive = False - return None - - try: - raw = self.console.command(COMMAND_LIST) - match = _LIST_REGEX.search(raw) - if not match: - raw = self.console.command(COMMAND_ONLINE) - match = _LIST_REGEX.search(raw) - if match: - users = int(match.group(1)) - hidden_users = match.group(2) - if hidden_users: - hidden_users = int(hidden_users) - else: - hidden_users = 0 - data['users'] = users + hidden_users - else: - if not raw: - self.error("'{0}' and '{1}' commands returned no value, make sure you set correct password".format( - COMMAND_LIST, COMMAND_ONLINE)) - self.error('Unable to process user counts.') - except mcrcon.MCRconException: - self.error('Unable to fetch user counts.') - except socket.error: - self.error('Connection is dead.') - self.alive = False - return None - - return data diff --git a/src/collectors/python.d.plugin/spigotmc/spigotmc.conf b/src/collectors/python.d.plugin/spigotmc/spigotmc.conf deleted file mode 100644 index f0064ea2f..000000000 --- a/src/collectors/python.d.plugin/spigotmc/spigotmc.conf +++ /dev/null @@ -1,66 +0,0 @@ -# netdata python.d.plugin configuration for spigotmc -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# penalty indicates whether to apply penalty to update_every in case of failures. -# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. -# penalty: yes - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# penalty: yes # the JOB's penalty -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# In addition to the above, spigotmc supports the following: -# -# host: localhost # The host to connect to. Defaults to the local system. -# port: 25575 # The port the remote console is listening on. -# password: '' # The remote console password. Most be set correctly. diff --git a/src/collectors/python.d.plugin/traefik/README.md b/src/collectors/python.d.plugin/traefik/README.md index 079f309c7..f4574051a 100644 --- a/src/collectors/python.d.plugin/traefik/README.md +++ b/src/collectors/python.d.plugin/traefik/README.md @@ -1,12 +1,3 @@ - - # Traefik collector Uses the `health` API to provide statistics. diff --git a/src/collectors/python.d.plugin/varnish/README.md b/src/collectors/python.d.plugin/varnish/README.md deleted file mode 120000 index 194be2335..000000000 --- a/src/collectors/python.d.plugin/varnish/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/varnish.md \ No newline at end of file diff --git a/src/collectors/python.d.plugin/varnish/integrations/varnish.md b/src/collectors/python.d.plugin/varnish/integrations/varnish.md deleted file mode 100644 index 5850dcc4c..000000000 --- a/src/collectors/python.d.plugin/varnish/integrations/varnish.md +++ /dev/null @@ -1,247 +0,0 @@ - - -# Varnish - - - - - -Plugin: python.d.plugin -Module: varnish - - - -## Overview - -This collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics. - -Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported. - - -It uses the `varnishstat` tool in order to collect the metrics. - - -This collector is supported on all platforms. - -This collector only supports collecting metrics from a single instance of this integration. - -`netdata` user must be a member of the `varnish` group. - - -### Default Behavior - -#### Auto-Detection - -By default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host. - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per Varnish instance - -These metrics refer to the entire monitored application. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| varnish.session_connection | accepted, dropped | connections/s | -| varnish.client_requests | received | requests/s | -| varnish.all_time_hit_rate | hit, miss, hitpass | percentage | -| varnish.current_poll_hit_rate | hit, miss, hitpass | percentage | -| varnish.cached_objects_expired | objects | expired/s | -| varnish.cached_objects_nuked | objects | nuked/s | -| varnish.threads_total | None | number | -| varnish.threads_statistics | created, failed, limited | threads/s | -| varnish.threads_queue_len | in queue | requests | -| varnish.backend_connections | successful, unhealthy, reused, closed, recycled, failed | connections/s | -| varnish.backend_requests | sent | requests/s | -| varnish.esi_statistics | errors, warnings | problems/s | -| varnish.memory_usage | free, allocated | MiB | -| varnish.uptime | uptime | seconds | - -### Per Backend - - - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| varnish.backend | header, body | kilobits/s | - -### Per Storage - - - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| varnish.storage_usage | free, allocated | KiB | -| varnish.storage_alloc_objs | allocated | objects | - - - -## Alerts - -There are no alerts configured by default for this integration. - - -## Setup - -### Prerequisites - -#### Provide the necessary permissions - -In order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool: - -``` -usermod -aG varnish netdata -``` - - - -### Configuration - -#### File - -The configuration file name for this integration is `python.d/varnish.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config python.d/varnish.conf -``` -#### Options - -There are 2 sections: - -* Global variables -* One or more JOBS that can define multiple different instances to monitor. - -The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - -Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - -Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes | -| update_every | Sets the default data collection frequency. | 10 | no | -| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | -| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | -| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | -| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | - -
- -#### Examples - -##### Basic - -An example configuration. - -```yaml -job_name: - instance_name: '' - -``` - - -## Troubleshooting - -### Debug Mode - - -To troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `python.d.plugin` to debug the collector: - - ```bash - ./python.d.plugin varnish debug trace - ``` - -### Getting Logs - -If you're encountering problems with the `varnish` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep varnish -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep varnish /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep varnish -``` - - diff --git a/src/collectors/python.d.plugin/varnish/metadata.yaml b/src/collectors/python.d.plugin/varnish/metadata.yaml deleted file mode 100644 index d31c1cf6f..000000000 --- a/src/collectors/python.d.plugin/varnish/metadata.yaml +++ /dev/null @@ -1,253 +0,0 @@ -plugin_name: python.d.plugin -modules: - - meta: - plugin_name: python.d.plugin - module_name: varnish - monitored_instance: - name: Varnish - link: https://varnish-cache.org/ - categories: - - data-collection.web-servers-and-web-proxies - icon_filename: 'varnish.svg' - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: '' - keywords: - - varnish - - varnishstat - - varnishd - - cache - - web server - - web cache - most_popular: false - overview: - data_collection: - metrics_description: | - This collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics. - - Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported. - method_description: | - It uses the `varnishstat` tool in order to collect the metrics. - supported_platforms: - include: [] - exclude: [] - multi_instance: false - additional_permissions: - description: | - `netdata` user must be a member of the `varnish` group. - default_behavior: - auto_detection: - description: By default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host. - limits: - description: '' - performance_impact: - description: '' - setup: - prerequisites: - list: - - title: Provide the necessary permissions - description: | - In order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool: - - ``` - usermod -aG varnish netdata - ``` - configuration: - file: - name: python.d/varnish.conf - description: '' - options: - description: | - There are 2 sections: - - * Global variables - * One or more JOBS that can define multiple different instances to monitor. - - The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - - Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - - Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - folding: - title: Config options - enabled: true - list: - - name: instance_name - description: the name of the varnishd instance to get logs from. If not specified, the local host name is used. - default_value: "" - required: true - - name: update_every - description: Sets the default data collection frequency. - default_value: 10 - required: false - - name: priority - description: Controls the order of charts at the netdata dashboard. - default_value: 60000 - required: false - - name: autodetection_retry - description: Sets the job re-check interval in seconds. - default_value: 0 - required: false - - name: penalty - description: Indicates whether to apply penalty to update_every in case of failures. - default_value: yes - required: false - - name: name - description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. - default_value: '' - required: false - examples: - folding: - enabled: true - title: 'Config' - list: - - name: Basic - description: An example configuration. - folding: - enabled: false - config: | - job_name: - instance_name: '' - troubleshooting: - problems: - list: [] - alerts: [] - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: global - description: "These metrics refer to the entire monitored application." - labels: [] - metrics: - - name: varnish.session_connection - description: Connections Statistics - unit: "connections/s" - chart_type: line - dimensions: - - name: accepted - - name: dropped - - name: varnish.client_requests - description: Client Requests - unit: "requests/s" - chart_type: line - dimensions: - - name: received - - name: varnish.all_time_hit_rate - description: All History Hit Rate Ratio - unit: "percentage" - chart_type: stacked - dimensions: - - name: hit - - name: miss - - name: hitpass - - name: varnish.current_poll_hit_rate - description: Current Poll Hit Rate Ratio - unit: "percentage" - chart_type: stacked - dimensions: - - name: hit - - name: miss - - name: hitpass - - name: varnish.cached_objects_expired - description: Expired Objects - unit: "expired/s" - chart_type: line - dimensions: - - name: objects - - name: varnish.cached_objects_nuked - description: Least Recently Used Nuked Objects - unit: "nuked/s" - chart_type: line - dimensions: - - name: objects - - name: varnish.threads_total - description: Number Of Threads In All Pools - unit: "number" - chart_type: line - dimensions: - - name: None - - name: varnish.threads_statistics - description: Threads Statistics - unit: "threads/s" - chart_type: line - dimensions: - - name: created - - name: failed - - name: limited - - name: varnish.threads_queue_len - description: Current Queue Length - unit: "requests" - chart_type: line - dimensions: - - name: in queue - - name: varnish.backend_connections - description: Backend Connections Statistics - unit: "connections/s" - chart_type: line - dimensions: - - name: successful - - name: unhealthy - - name: reused - - name: closed - - name: recycled - - name: failed - - name: varnish.backend_requests - description: Requests To The Backend - unit: "requests/s" - chart_type: line - dimensions: - - name: sent - - name: varnish.esi_statistics - description: ESI Statistics - unit: "problems/s" - chart_type: line - dimensions: - - name: errors - - name: warnings - - name: varnish.memory_usage - description: Memory Usage - unit: "MiB" - chart_type: stacked - dimensions: - - name: free - - name: allocated - - name: varnish.uptime - description: Uptime - unit: "seconds" - chart_type: line - dimensions: - - name: uptime - - name: Backend - description: "" - labels: [] - metrics: - - name: varnish.backend - description: Backend {backend_name} - unit: "kilobits/s" - chart_type: area - dimensions: - - name: header - - name: body - - name: Storage - description: "" - labels: [] - metrics: - - name: varnish.storage_usage - description: Storage {storage_name} Usage - unit: "KiB" - chart_type: stacked - dimensions: - - name: free - - name: allocated - - name: varnish.storage_alloc_objs - description: Storage {storage_name} Allocated Objects - unit: "objects" - chart_type: line - dimensions: - - name: allocated diff --git a/src/collectors/python.d.plugin/varnish/varnish.chart.py b/src/collectors/python.d.plugin/varnish/varnish.chart.py deleted file mode 100644 index 506ad026a..000000000 --- a/src/collectors/python.d.plugin/varnish/varnish.chart.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: varnish netdata python.d module -# Author: ilyam8 -# SPDX-License-Identifier: GPL-3.0-or-later - -import re - -from bases.FrameworkServices.ExecutableService import ExecutableService -from bases.collection import find_binary - -ORDER = [ - 'session_connections', - 'client_requests', - 'all_time_hit_rate', - 'current_poll_hit_rate', - 'cached_objects_expired', - 'cached_objects_nuked', - 'threads_total', - 'threads_statistics', - 'threads_queue_len', - 'backend_connections', - 'backend_requests', - 'esi_statistics', - 'memory_usage', - 'uptime' -] - -CHARTS = { - 'session_connections': { - 'options': [None, 'Connections Statistics', 'connections/s', - 'client metrics', 'varnish.session_connection', 'line'], - 'lines': [ - ['sess_conn', 'accepted', 'incremental'], - ['sess_dropped', 'dropped', 'incremental'] - ] - }, - 'client_requests': { - 'options': [None, 'Client Requests', 'requests/s', - 'client metrics', 'varnish.client_requests', 'line'], - 'lines': [ - ['client_req', 'received', 'incremental'] - ] - }, - 'all_time_hit_rate': { - 'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance', - 'varnish.all_time_hit_rate', 'stacked'], - 'lines': [ - ['cache_hit', 'hit', 'percentage-of-absolute-row'], - ['cache_miss', 'miss', 'percentage-of-absolute-row'], - ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']] - }, - 'current_poll_hit_rate': { - 'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance', - 'varnish.current_poll_hit_rate', 'stacked'], - 'lines': [ - ['cache_hit', 'hit', 'percentage-of-incremental-row'], - ['cache_miss', 'miss', 'percentage-of-incremental-row'], - ['cache_hitpass', 'hitpass', 'percentage-of-incremental-row'] - ] - }, - 'cached_objects_expired': { - 'options': [None, 'Expired Objects', 'expired/s', 'cache performance', - 'varnish.cached_objects_expired', 'line'], - 'lines': [ - ['n_expired', 'objects', 'incremental'] - ] - }, - 'cached_objects_nuked': { - 'options': [None, 'Least Recently Used Nuked Objects', 'nuked/s', 'cache performance', - 'varnish.cached_objects_nuked', 'line'], - 'lines': [ - ['n_lru_nuked', 'objects', 'incremental'] - ] - }, - 'threads_total': { - 'options': [None, 'Number Of Threads In All Pools', 'number', 'thread related metrics', - 'varnish.threads_total', 'line'], - 'lines': [ - ['threads', None, 'absolute'] - ] - }, - 'threads_statistics': { - 'options': [None, 'Threads Statistics', 'threads/s', 'thread related metrics', - 'varnish.threads_statistics', 'line'], - 'lines': [ - ['threads_created', 'created', 'incremental'], - ['threads_failed', 'failed', 'incremental'], - ['threads_limited', 'limited', 'incremental'] - ] - }, - 'threads_queue_len': { - 'options': [None, 'Current Queue Length', 'requests', 'thread related metrics', - 'varnish.threads_queue_len', 'line'], - 'lines': [ - ['thread_queue_len', 'in queue'] - ] - }, - 'backend_connections': { - 'options': [None, 'Backend Connections Statistics', 'connections/s', 'backend metrics', - 'varnish.backend_connections', 'line'], - 'lines': [ - ['backend_conn', 'successful', 'incremental'], - ['backend_unhealthy', 'unhealthy', 'incremental'], - ['backend_reuse', 'reused', 'incremental'], - ['backend_toolate', 'closed', 'incremental'], - ['backend_recycle', 'recycled', 'incremental'], - ['backend_fail', 'failed', 'incremental'] - ] - }, - 'backend_requests': { - 'options': [None, 'Requests To The Backend', 'requests/s', 'backend metrics', - 'varnish.backend_requests', 'line'], - 'lines': [ - ['backend_req', 'sent', 'incremental'] - ] - }, - 'esi_statistics': { - 'options': [None, 'ESI Statistics', 'problems/s', 'esi related metrics', 'varnish.esi_statistics', 'line'], - 'lines': [ - ['esi_errors', 'errors', 'incremental'], - ['esi_warnings', 'warnings', 'incremental'] - ] - }, - 'memory_usage': { - 'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'], - 'lines': [ - ['memory_free', 'free', 'absolute', 1, 1 << 20], - ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]] - }, - 'uptime': { - 'lines': [ - ['uptime', None, 'absolute'] - ], - 'options': [None, 'Uptime', 'seconds', 'uptime', 'varnish.uptime', 'line'] - } -} - - -def backend_charts_template(name): - order = [ - '{0}_response_statistics'.format(name), - ] - - charts = { - order[0]: { - 'options': [None, 'Backend "{0}"'.format(name), 'kilobits/s', 'backend response statistics', - 'varnish.backend', 'area'], - 'lines': [ - ['{0}_beresp_hdrbytes'.format(name), 'header', 'incremental', 8, 1000], - ['{0}_beresp_bodybytes'.format(name), 'body', 'incremental', -8, 1000] - ] - }, - } - - return order, charts - - -def storage_charts_template(name): - order = [ - 'storage_{0}_usage'.format(name), - 'storage_{0}_alloc_objs'.format(name) - ] - - charts = { - order[0]: { - 'options': [None, 'Storage "{0}" Usage'.format(name), 'KiB', 'storage usage', 'varnish.storage_usage', 'stacked'], - 'lines': [ - ['{0}.g_space'.format(name), 'free', 'absolute', 1, 1 << 10], - ['{0}.g_bytes'.format(name), 'allocated', 'absolute', 1, 1 << 10] - ] - }, - order[1]: { - 'options': [None, 'Storage "{0}" Allocated Objects'.format(name), 'objects', 'storage usage', 'varnish.storage_alloc_objs', 'line'], - 'lines': [ - ['{0}.g_alloc'.format(name), 'allocated', 'absolute'] - ] - } - } - - return order, charts - - -VARNISHSTAT = 'varnishstat' - -re_version = re.compile(r'varnish-(?:plus-)?(?P\d+)\.(?P\d+)\.(?P\d+)') - - -class VarnishVersion: - def __init__(self, major, minor, patch): - self.major = major - self.minor = minor - self.patch = patch - - def __str__(self): - return '{0}.{1}.{2}'.format(self.major, self.minor, self.patch) - - -class Parser: - _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)') - _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_-]+).(beresp[\w_]+)\s+(\d+)') - _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)') - - def __init__(self): - self.re_default = None - self.re_backend = None - - def init(self, data): - data = ''.join(data) - parsed_main = Parser._default.findall(data) - if parsed_main: - self.re_default = Parser._default - - parsed_backend = Parser._backend_new.findall(data) - if parsed_backend: - self.re_backend = Parser._backend_new - else: - parsed_backend = Parser._backend_old.findall(data) - if parsed_backend: - self.re_backend = Parser._backend_old - - def server_stats(self, data): - return self.re_default.findall(''.join(data)) - - def backend_stats(self, data): - return self.re_backend.findall(''.join(data)) - - -class Service(ExecutableService): - def __init__(self, configuration=None, name=None): - ExecutableService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.instance_name = configuration.get('instance_name') - self.parser = Parser() - self.command = None - self.collected_vbe = set() - self.collected_storages = set() - - def create_command(self): - varnishstat = find_binary(VARNISHSTAT) - - if not varnishstat: - self.error("can't locate '{0}' binary or binary is not executable by user netdata".format(VARNISHSTAT)) - return False - - command = [varnishstat, '-V'] - reply = self._get_raw_data(stderr=True, command=command) - if not reply: - self.error( - "no output from '{0}'. Is varnish running? Not enough privileges?".format(' '.join(self.command))) - return False - - ver = parse_varnish_version(reply) - if not ver: - self.error("failed to parse reply from '{0}', used regex :'{1}', reply : {2}".format( - ' '.join(command), re_version.pattern, reply)) - return False - - if self.instance_name: - self.command = [varnishstat, '-1', '-n', self.instance_name] - else: - self.command = [varnishstat, '-1'] - - if ver.major > 4: - self.command.extend(['-t', '1']) - - self.info("varnish version: {0}, will use command: '{1}'".format(ver, ' '.join(self.command))) - - return True - - def check(self): - if not self.create_command(): - return False - - # STDOUT is not empty - reply = self._get_raw_data() - if not reply: - self.error("no output from '{0}'. Is it running? Not enough privileges?".format(' '.join(self.command))) - return False - - self.parser.init(reply) - - # Output is parsable - if not self.parser.re_default: - self.error('cant parse the output...') - return False - - return True - - def get_data(self): - """ - Format data received from shell command - :return: dict - """ - raw = self._get_raw_data() - if not raw: - return None - - data = dict() - server_stats = self.parser.server_stats(raw) - if not server_stats: - return None - - stats = dict((param, value) for _, param, value in server_stats) - data.update(stats) - - self.get_vbe_backends(data, raw) - self.get_storages(server_stats) - - # varnish 5 uses default.g_bytes and default.g_space - data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes') - data['memory_free'] = data.get('s0.g_space') or data.get('default.g_space') - - return data - - def get_vbe_backends(self, data, raw): - if not self.parser.re_backend: - return - stats = self.parser.backend_stats(raw) - if not stats: - return - - for (name, param, value) in stats: - data['_'.join([name, param])] = value - if name in self.collected_vbe: - continue - self.collected_vbe.add(name) - self.add_backend_charts(name) - - def get_storages(self, server_stats): - # Storage types: - # - SMF: File Storage - # - SMA: Malloc Storage - # - MSE: Massive Storage Engine (Varnish-Plus only) - # - # Stats example: - # [('SMF.', 'ssdStorage.c_req', '47686'), - # ('SMF.', 'ssdStorage.c_fail', '0'), - # ('SMF.', 'ssdStorage.c_bytes', '668102656'), - # ('SMF.', 'ssdStorage.c_freed', '140980224'), - # ('SMF.', 'ssdStorage.g_alloc', '39753'), - # ('SMF.', 'ssdStorage.g_bytes', '527122432'), - # ('SMF.', 'ssdStorage.g_space', '53159968768'), - # ('SMF.', 'ssdStorage.g_smf', '40130'), - # ('SMF.', 'ssdStorage.g_smf_frag', '311'), - # ('SMF.', 'ssdStorage.g_smf_large', '66')] - storages = [name for typ, name, _ in server_stats if typ.startswith(('SMF', 'SMA', 'MSE')) and name.endswith('g_space')] - if not storages: - return - for storage in storages: - storage = storage.split('.')[0] - if storage in self.collected_storages: - continue - self.collected_storages.add(storage) - self.add_storage_charts(storage) - - def add_backend_charts(self, backend_name): - self.add_charts(backend_name, backend_charts_template) - - def add_storage_charts(self, storage_name): - self.add_charts(storage_name, storage_charts_template) - - def add_charts(self, name, charts_template): - order, charts = charts_template(name) - - for chart_name in order: - params = [chart_name] + charts[chart_name]['options'] - dimensions = charts[chart_name]['lines'] - - new_chart = self.charts.add_chart(params) - for dimension in dimensions: - new_chart.add_dimension(dimension) - - -def parse_varnish_version(lines): - m = re_version.search(lines[0]) - if not m: - return None - - m = m.groupdict() - return VarnishVersion( - int(m['major']), - int(m['minor']), - int(m['patch']), - ) diff --git a/src/collectors/python.d.plugin/varnish/varnish.conf b/src/collectors/python.d.plugin/varnish/varnish.conf deleted file mode 100644 index 54bfe4dee..000000000 --- a/src/collectors/python.d.plugin/varnish/varnish.conf +++ /dev/null @@ -1,66 +0,0 @@ -# netdata python.d.plugin configuration for varnish -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# penalty indicates whether to apply penalty to update_every in case of failures. -# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. -# penalty: yes - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# penalty: yes # the JOB's penalty -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, varnish also supports the following: -# -# instance_name: 'name' # the name of the varnishd instance to get logs from. If not specified, the host name is used. -# -# ---------------------------------------------------------------------- diff --git a/src/collectors/python.d.plugin/w1sensor/README.md b/src/collectors/python.d.plugin/w1sensor/README.md deleted file mode 120000 index c0fa9cd1b..000000000 --- a/src/collectors/python.d.plugin/w1sensor/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/1-wire_sensors.md \ No newline at end of file diff --git a/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md b/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md deleted file mode 100644 index 15582879e..000000000 --- a/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md +++ /dev/null @@ -1,201 +0,0 @@ - - -# 1-Wire Sensors - - - - - -Plugin: python.d.plugin -Module: w1sensor - - - -## Overview - -Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts. - -The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected. - -This collector is only supported on the following platforms: - -- Linux - -This collector supports collecting metrics from multiple instances of this integration, including remote instances. - - -### Default Behavior - -#### Auto-Detection - -The collector will try to auto detect available 1-Wire devices. - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per 1-Wire Sensors instance - -These metrics refer to the entire monitored application. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| w1sensor.temp | a dimension per sensor | Celsius | - - - -## Alerts - -There are no alerts configured by default for this integration. - - -## Setup - -### Prerequisites - -#### Required Linux kernel modules - -Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded. - - -### Configuration - -#### File - -The configuration file name for this integration is `python.d/w1sensor.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config python.d/w1sensor.conf -``` -#### Options - -There are 2 sections: - -* Global variables -* One or more JOBS that can define multiple different instances to monitor. - -The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - -Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - -Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| update_every | Sets the default data collection frequency. | 5 | no | -| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | -| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | -| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | -| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | -| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no | - -
- -#### Examples - -##### Provide human readable names - -Associate two 1-Wire identifiers with human readable names. - -```yaml -sensors: - name_00000022276e: 'Machine room' - name_00000022298f: 'Rack 12' - -``` - - -## Troubleshooting - -### Debug Mode - - -To troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `python.d.plugin` to debug the collector: - - ```bash - ./python.d.plugin w1sensor debug trace - ``` - -### Getting Logs - -If you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep w1sensor -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep w1sensor /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep w1sensor -``` - - diff --git a/src/collectors/python.d.plugin/w1sensor/metadata.yaml b/src/collectors/python.d.plugin/w1sensor/metadata.yaml deleted file mode 100644 index 7b0768237..000000000 --- a/src/collectors/python.d.plugin/w1sensor/metadata.yaml +++ /dev/null @@ -1,119 +0,0 @@ -plugin_name: python.d.plugin -modules: - - meta: - plugin_name: python.d.plugin - module_name: w1sensor - monitored_instance: - name: 1-Wire Sensors - link: "https://www.analog.com/en/product-category/1wire-temperature-sensors.html" - categories: - - data-collection.hardware-devices-and-sensors - icon_filename: "1-wire.png" - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: "" - keywords: - - temperature - - sensor - - 1-wire - most_popular: false - overview: - data_collection: - metrics_description: "Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts." - method_description: "The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected." - supported_platforms: - include: - - Linux - exclude: [] - multi_instance: true - additional_permissions: - description: "" - default_behavior: - auto_detection: - description: "The collector will try to auto detect available 1-Wire devices." - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: - - title: "Required Linux kernel modules" - description: "Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded." - configuration: - file: - name: python.d/w1sensor.conf - options: - description: | - There are 2 sections: - - * Global variables - * One or more JOBS that can define multiple different instances to monitor. - - The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - - Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - - Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - folding: - title: "Config options" - enabled: true - list: - - name: update_every - description: Sets the default data collection frequency. - default_value: 5 - required: false - - name: priority - description: Controls the order of charts at the netdata dashboard. - default_value: 60000 - required: false - - name: autodetection_retry - description: Sets the job re-check interval in seconds. - default_value: 0 - required: false - - name: penalty - description: Indicates whether to apply penalty to update_every in case of failures. - default_value: yes - required: false - - name: name - description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. - default_value: "" - required: false - - name: name_<1-Wire id> - description: This allows associating a human readable name with a sensor's 1-Wire identifier. - default_value: "" - required: false - examples: - folding: - enabled: false - title: "Config" - list: - - name: Provide human readable names - description: Associate two 1-Wire identifiers with human readable names. - config: | - sensors: - name_00000022276e: 'Machine room' - name_00000022298f: 'Rack 12' - troubleshooting: - problems: - list: [] - alerts: [] - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: global - description: "These metrics refer to the entire monitored application." - labels: [] - metrics: - - name: w1sensor.temp - description: 1-Wire Temperature Sensor - unit: "Celsius" - chart_type: line - dimensions: - - name: a dimension per sensor diff --git a/src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py deleted file mode 100644 index 66797ced3..000000000 --- a/src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: 1-wire temperature monitor netdata python.d module -# Author: Diomidis Spinellis -# SPDX-License-Identifier: GPL-3.0-or-later - -import os -import re - -from bases.FrameworkServices.SimpleService import SimpleService - -# default module values (can be overridden per job in `config`) -update_every = 5 - -# Location where 1-Wire devices can be found -W1_DIR = '/sys/bus/w1/devices/' - -# Lines matching the following regular expression contain a temperature value -RE_TEMP = re.compile(r' t=(-?\d+)') - -ORDER = [ - 'temp', -] - -CHARTS = { - 'temp': { - 'options': [None, '1-Wire Temperature Sensor', 'Celsius', 'Temperature', 'w1sensor.temp', 'line'], - 'lines': [] - } -} - -# Known and supported family members -# Based on linux/drivers/w1/w1_family.h and w1/slaves/w1_therm.c -THERM_FAMILY = { - '10': 'W1_THERM_DS18S20', - '22': 'W1_THERM_DS1822', - '28': 'W1_THERM_DS18B20', - '3b': 'W1_THERM_DS1825', - '42': 'W1_THERM_DS28EA00', -} - - -class Service(SimpleService): - """Provide netdata service for 1-Wire sensors""" - - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.probes = [] - - def check(self): - """Auto-detect available 1-Wire sensors, setting line definitions - and probes to be monitored.""" - try: - file_names = os.listdir(W1_DIR) - except OSError as err: - self.error(err) - return False - - lines = [] - for file_name in file_names: - if file_name[2] != '-': - continue - if not file_name[0:2] in THERM_FAMILY: - continue - - self.probes.append(file_name) - identifier = file_name[3:] - name = identifier - config_name = self.configuration.get('name_' + identifier) - if config_name: - name = config_name - lines.append(['w1sensor_temp_' + identifier, name, 'absolute', - 1, 10]) - self.definitions['temp']['lines'] = lines - return len(self.probes) > 0 - - def get_data(self): - """Return data read from sensors.""" - data = dict() - - for file_name in self.probes: - file_path = W1_DIR + file_name + '/w1_slave' - identifier = file_name[3:] - try: - with open(file_path, 'r') as device_file: - for line in device_file: - matched = RE_TEMP.search(line) - if matched: - # Round to one decimal digit to filter-out noise - value = round(int(matched.group(1)) / 1000., 1) - value = int(value * 10) - data['w1sensor_temp_' + identifier] = value - except (OSError, IOError) as err: - self.error(err) - continue - return data or None diff --git a/src/collectors/python.d.plugin/w1sensor/w1sensor.conf b/src/collectors/python.d.plugin/w1sensor/w1sensor.conf deleted file mode 100644 index b60d28650..000000000 --- a/src/collectors/python.d.plugin/w1sensor/w1sensor.conf +++ /dev/null @@ -1,72 +0,0 @@ -# netdata python.d.plugin configuration for w1sensor -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 5 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# penalty indicates whether to apply penalty to update_every in case of failures. -# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. -# penalty: yes - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 5 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# penalty: yes # the JOB's penalty -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, w1sensor also supports the following: -# -# name_<1-Wire id>: '' -# This allows associating a human readable name with a sensor's 1-Wire -# identifier. Example: -# name_00000022276e: 'Machine room' -# name_00000022298f: 'Rack 12' -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) diff --git a/src/collectors/python.d.plugin/zscores/README.md b/src/collectors/python.d.plugin/zscores/README.md deleted file mode 120000 index 159ce0787..000000000 --- a/src/collectors/python.d.plugin/zscores/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/python.d_zscores.md \ No newline at end of file diff --git a/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md b/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md deleted file mode 100644 index a5d2a7e47..000000000 --- a/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md +++ /dev/null @@ -1,229 +0,0 @@ - - -# python.d zscores - -Plugin: python.d.plugin -Module: zscores - - - -## Overview - -By using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis. - - -This collector uses the [Netdata rest api](/src/web/api/README.md) to get the `mean` and `stddev` -for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`). - -For each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over -time (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step. - - -This collector is supported on all platforms. - -This collector supports collecting metrics from multiple instances of this integration, including remote instances. - - -### Default Behavior - -#### Auto-Detection - -This integration doesn't support auto-detection. - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per python.d zscores instance - -These metrics refer to the entire monitored application. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| zscores.z | a dimension per chart or dimension | z | -| zscores.3stddev | a dimension per chart or dimension | count | - - - -## Alerts - -There are no alerts configured by default for this integration. - - -## Setup - -### Prerequisites - -#### Python Requirements - -This collector will only work with Python 3 and requires the below packages be installed. - -```bash -# become netdata user -sudo su -s /bin/bash netdata -# install required packages -pip3 install numpy pandas requests netdata-pandas==0.0.38 -``` - - - -### Configuration - -#### File - -The configuration file name for this integration is `python.d/zscores.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config python.d/zscores.conf -``` -#### Options - -There are 2 sections: - -* Global variables -* One or more JOBS that can define multiple different instances to monitor. - -The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - -Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - -Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | yes | -| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes | -| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes | -| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes | -| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes | -| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes | -| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes | -| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes | -| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes | -| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes | -| update_every | Sets the default data collection frequency. | 5 | no | -| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | -| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | -| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | - -
- -#### Examples - -##### Default - -Default configuration. - -```yaml -local: - name: 'local' - host: '127.0.0.1:19999' - charts_regex: 'system\..*' - charts_to_exclude: 'system.uptime' - train_secs: 14400 - offset_secs: 300 - train_every_n: 900 - z_smooth_n: 15 - z_clip: 10 - z_abs: 'true' - burn_in: 2 - mode: 'per_chart' - per_chart_agg: 'mean' - -``` - - -## Troubleshooting - -### Debug Mode - - -To troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `python.d.plugin` to debug the collector: - - ```bash - ./python.d.plugin zscores debug trace - ``` - -### Getting Logs - -If you're encountering problems with the `zscores` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep zscores -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep zscores /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep zscores -``` - - diff --git a/src/collectors/python.d.plugin/zscores/metadata.yaml b/src/collectors/python.d.plugin/zscores/metadata.yaml deleted file mode 100644 index e027562ad..000000000 --- a/src/collectors/python.d.plugin/zscores/metadata.yaml +++ /dev/null @@ -1,187 +0,0 @@ -plugin_name: python.d.plugin -modules: - - meta: - plugin_name: python.d.plugin - module_name: zscores - monitored_instance: - name: python.d zscores - link: https://en.wikipedia.org/wiki/Standard_score - categories: - - data-collection.other - icon_filename: "" - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: "" - keywords: - - zscore - - z-score - - standard score - - standard deviation - - anomaly detection - - statistical anomaly detection - most_popular: false - overview: - data_collection: - metrics_description: | - By using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis. - method_description: | - This collector uses the [Netdata rest api](/src/web/api/README.md) to get the `mean` and `stddev` - for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`). - - For each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over - time (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step. - supported_platforms: - include: [] - exclude: [] - multi_instance: true - additional_permissions: - description: "" - default_behavior: - auto_detection: - description: "" - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: - - title: Python Requirements - description: | - This collector will only work with Python 3 and requires the below packages be installed. - - ```bash - # become netdata user - sudo su -s /bin/bash netdata - # install required packages - pip3 install numpy pandas requests netdata-pandas==0.0.38 - ``` - configuration: - file: - name: python.d/zscores.conf - description: "" - options: - description: | - There are 2 sections: - - * Global variables - * One or more JOBS that can define multiple different instances to monitor. - - The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - - Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - - Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - folding: - title: "Config options" - enabled: true - list: - - name: charts_regex - description: what charts to pull data for - A regex like `system\..*|` or `system\..*|apps.cpu|apps.mem` etc. - default_value: "system\\..*" - required: true - - name: train_secs - description: length of time (in seconds) to base calculations off for mean and stddev. - default_value: 14400 - required: true - - name: offset_secs - description: offset (in seconds) preceding latest data to ignore when calculating mean and stddev. - default_value: 300 - required: true - - name: train_every_n - description: recalculate the mean and stddev every n steps of the collector. - default_value: 900 - required: true - - name: z_smooth_n - description: smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. - default_value: 15 - required: true - - name: z_clip - description: cap absolute value of zscore (before smoothing) for better stability. - default_value: 10 - required: true - - name: z_abs - description: "set z_abs: 'true' to make all zscores be absolute values only." - default_value: "true" - required: true - - name: burn_in - description: burn in period in which to initially calculate mean and stddev on every step. - default_value: 2 - required: true - - name: mode - description: mode can be to get a zscore 'per_dim' or 'per_chart'. - default_value: per_chart - required: true - - name: per_chart_agg - description: per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. - default_value: mean - required: true - - name: update_every - description: Sets the default data collection frequency. - default_value: 5 - required: false - - name: priority - description: Controls the order of charts at the netdata dashboard. - default_value: 60000 - required: false - - name: autodetection_retry - description: Sets the job re-check interval in seconds. - default_value: 0 - required: false - - name: penalty - description: Indicates whether to apply penalty to update_every in case of failures. - default_value: yes - required: false - examples: - folding: - enabled: true - title: "Config" - list: - - name: Default - description: Default configuration. - folding: - enabled: false - config: | - local: - name: 'local' - host: '127.0.0.1:19999' - charts_regex: 'system\..*' - charts_to_exclude: 'system.uptime' - train_secs: 14400 - offset_secs: 300 - train_every_n: 900 - z_smooth_n: 15 - z_clip: 10 - z_abs: 'true' - burn_in: 2 - mode: 'per_chart' - per_chart_agg: 'mean' - troubleshooting: - problems: - list: [] - alerts: [] - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: global - description: "These metrics refer to the entire monitored application." - labels: [] - metrics: - - name: zscores.z - description: Z Score - unit: "z" - chart_type: line - dimensions: - - name: a dimension per chart or dimension - - name: zscores.3stddev - description: Z Score >3 - unit: "count" - chart_type: stacked - dimensions: - - name: a dimension per chart or dimension diff --git a/src/collectors/python.d.plugin/zscores/zscores.chart.py b/src/collectors/python.d.plugin/zscores/zscores.chart.py deleted file mode 100644 index 1099b9376..000000000 --- a/src/collectors/python.d.plugin/zscores/zscores.chart.py +++ /dev/null @@ -1,146 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: zscores netdata python.d module -# Author: andrewm4894 -# SPDX-License-Identifier: GPL-3.0-or-later - -from datetime import datetime -import re - -import requests -import numpy as np -import pandas as pd - -from bases.FrameworkServices.SimpleService import SimpleService -from netdata_pandas.data import get_data, get_allmetrics - -priority = 60000 -update_every = 5 -disabled_by_default = True - -ORDER = [ - 'z', - '3stddev' -] - -CHARTS = { - 'z': { - 'options': ['z', 'Z Score', 'z', 'Z Score', 'zscores.z', 'line'], - 'lines': [] - }, - '3stddev': { - 'options': ['3stddev', 'Z Score >3', 'count', '3 Stddev', 'zscores.3stddev', 'stacked'], - 'lines': [] - }, -} - - -class Service(SimpleService): - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.host = self.configuration.get('host', '127.0.0.1:19999') - self.charts_regex = re.compile(self.configuration.get('charts_regex', 'system.*')) - self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',') - self.charts_in_scope = [ - c for c in - list(filter(self.charts_regex.match, - requests.get(f'http://{self.host}/api/v1/charts').json()['charts'].keys())) - if c not in self.charts_to_exclude - ] - self.train_secs = self.configuration.get('train_secs', 14400) - self.offset_secs = self.configuration.get('offset_secs', 300) - self.train_every_n = self.configuration.get('train_every_n', 900) - self.z_smooth_n = self.configuration.get('z_smooth_n', 15) - self.z_clip = self.configuration.get('z_clip', 10) - self.z_abs = bool(self.configuration.get('z_abs', True)) - self.burn_in = self.configuration.get('burn_in', 2) - self.mode = self.configuration.get('mode', 'per_chart') - self.per_chart_agg = self.configuration.get('per_chart_agg', 'mean') - self.order = ORDER - self.definitions = CHARTS - self.collected_dims = {'z': set(), '3stddev': set()} - self.df_mean = pd.DataFrame() - self.df_std = pd.DataFrame() - self.df_z_history = pd.DataFrame() - - def check(self): - _ = get_allmetrics(self.host, self.charts_in_scope, wide=True, col_sep='.') - return True - - def validate_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1): - """If dimension not in chart then add it. - """ - for dim in data: - if dim not in self.collected_dims[chart]: - self.collected_dims[chart].add(dim) - self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor]) - - for dim in list(self.collected_dims[chart]): - if dim not in data: - self.collected_dims[chart].remove(dim) - self.charts[chart].del_dimension(dim, hide=False) - - def train_model(self): - """Calculate the mean and stddev for all relevant metrics and store them for use in calulcating zscore at each timestep. - """ - before = int(datetime.now().timestamp()) - self.offset_secs - after = before - self.train_secs - - self.df_mean = get_data( - self.host, self.charts_in_scope, after, before, points=10, group='average', col_sep='.' - ).mean().to_frame().rename(columns={0: "mean"}) - - self.df_std = get_data( - self.host, self.charts_in_scope, after, before, points=10, group='stddev', col_sep='.' - ).mean().to_frame().rename(columns={0: "std"}) - - def create_data(self, df_allmetrics): - """Use x, mean, stddev to generate z scores and 3stddev flags via some pandas manipulation. - Returning two dictionaries of dimensions and measures, one for each chart. - - :param df_allmetrics : pandas dataframe with latest data from api/v1/allmetrics. - :return: (,) tuple of dictionaries, one for zscores and the other for a flag if abs(z)>3. - """ - # calculate clipped z score for each available metric - df_z = pd.concat([self.df_mean, self.df_std, df_allmetrics], axis=1, join='inner') - df_z['z'] = ((df_z['value'] - df_z['mean']) / df_z['std']).clip(-self.z_clip, self.z_clip).fillna(0) * 100 - if self.z_abs: - df_z['z'] = df_z['z'].abs() - - # append last z_smooth_n rows of zscores to history table in wide format - self.df_z_history = self.df_z_history.append( - df_z[['z']].reset_index().pivot_table(values='z', columns='index'), sort=True - ).tail(self.z_smooth_n) - - # get average zscore for last z_smooth_n for each metric - df_z_smooth = self.df_z_history.melt(value_name='z').groupby('index')['z'].mean().to_frame() - df_z_smooth['3stddev'] = np.where(abs(df_z_smooth['z']) > 300, 1, 0) - data_z = df_z_smooth['z'].add_suffix('_z').to_dict() - - # aggregate to chart level if specified - if self.mode == 'per_chart': - df_z_smooth['chart'] = ['.'.join(x[0:2]) + '_z' for x in df_z_smooth.index.str.split('.').to_list()] - if self.per_chart_agg == 'absmax': - data_z = \ - list(df_z_smooth.groupby('chart').agg({'z': lambda x: max(x, key=abs)})['z'].to_dict().values())[0] - else: - data_z = list(df_z_smooth.groupby('chart').agg({'z': [self.per_chart_agg]})['z'].to_dict().values())[0] - - data_3stddev = {} - for k in data_z: - data_3stddev[k.replace('_z', '')] = 1 if abs(data_z[k]) > 300 else 0 - - return data_z, data_3stddev - - def get_data(self): - - if self.runs_counter <= self.burn_in or self.runs_counter % self.train_every_n == 0: - self.train_model() - - data_z, data_3stddev = self.create_data( - get_allmetrics(self.host, self.charts_in_scope, wide=True, col_sep='.').transpose()) - data = {**data_z, **data_3stddev} - - self.validate_charts('z', data_z, divisor=100) - self.validate_charts('3stddev', data_3stddev) - - return data diff --git a/src/collectors/python.d.plugin/zscores/zscores.conf b/src/collectors/python.d.plugin/zscores/zscores.conf deleted file mode 100644 index 07d62ebe6..000000000 --- a/src/collectors/python.d.plugin/zscores/zscores.conf +++ /dev/null @@ -1,108 +0,0 @@ -# netdata python.d.plugin configuration for example -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -update_every: 5 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# penalty indicates whether to apply penalty to update_every in case of failures. -# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. -# penalty: yes - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# penalty: yes # the JOB's penalty -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, example also supports the following: -# -# - none -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -local: - name: 'local' - - # what host to pull data from - host: '127.0.0.1:19999' - - # what charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc. - charts_regex: 'system\..*' - - # Charts to exclude, useful if you would like to exclude some specific charts. - # Note: should be a ',' separated string like 'chart.name,chart.name'. - charts_to_exclude: 'system.uptime' - - # length of time to base calculations off for mean and stddev - train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore - - # offset preceding latest data to ignore when calculating mean and stddev - offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev - - # recalculate the mean and stddev every n steps of the collector - train_every_n: 900 # recalculate mean and stddev every 15 minutes - - # smooth the z score by averaging it over last n values - z_smooth_n: 15 # take a rolling average of the last 15 zscore values to reduce sensitivity to temporary 'spikes' - - # cap absolute value of zscore (before smoothing) for better stability - z_clip: 10 # cap each zscore at 10 so as to avoid really large individual zscores swamping any rolling average - - # set z_abs: 'true' to make all zscores be absolute values only. - z_abs: 'true' - - # burn in period in which to initially calculate mean and stddev on every step - burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or initial calculations fail to return - - # mode can be to get a zscore 'per_dim' or 'per_chart' - mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step - - # per_chart_agg is how you aggregate from dimension to chart when mode='per_chart' - per_chart_agg: 'mean' # 'absmax' will take the max absolute value across all dimensions but will maintain the sign. 'mean' will just average. diff --git a/src/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md b/src/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md index 1e0db44e4..04e1e4d39 100644 --- a/src/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md +++ b/src/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md @@ -106,8 +106,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/slabinfo.plugin/slabinfo.c b/src/collectors/slabinfo.plugin/slabinfo.c index 216f31ac6..98adc1513 100644 --- a/src/collectors/slabinfo.plugin/slabinfo.c +++ b/src/collectors/slabinfo.plugin/slabinfo.c @@ -167,7 +167,7 @@ struct slabinfo *read_file_slabinfo() { slabdebug(" Read %lu lines from procfile", (unsigned long)lines); for(l = 2; l < lines; l++) { if (unlikely(procfile_linewords(ff, l) < 14)) { - slabdebug(" Line %zu has only %zu words, skipping", l, procfile_linewords(ff,l)); + slabdebug(" Line %zu has only %zu words, skipping", l, (size_t)procfile_linewords(ff,l)); continue; } @@ -318,6 +318,12 @@ unsigned int do_slab_stats(int update_every) { } printf("END\n"); + fprintf(stdout, "\n"); + fflush(stdout); + if (ferror(stdout) && errno == EPIPE) { + netdata_log_error("error writing to stdout: EPIPE. Exiting..."); + return loops; + } loops++; @@ -339,7 +345,6 @@ void usage(void) { } int main(int argc, char **argv) { - clocks_init(); nd_log_initialize_for_external_plugins("slabinfo.plugin"); program_name = argv[0]; diff --git a/src/collectors/statsd.plugin/README.md b/src/collectors/statsd.plugin/README.md index 302829242..b93d6c798 100644 --- a/src/collectors/statsd.plugin/README.md +++ b/src/collectors/statsd.plugin/README.md @@ -1,12 +1,3 @@ - - # StatsD [StatsD](https://github.com/statsd/statsd) is a system to collect data from any application. Applications send metrics to it, @@ -170,11 +161,11 @@ You can find the configuration at `/etc/netdata/netdata.conf`: [statsd] # enabled = yes # decimal detail = 1000 - # update every (flushInterval) = 1 + # update every (flushInterval) = 1s # udp messages to process at once = 10 # create private charts for metrics matching = * # max private charts hard limit = 1000 - # cleanup obsolete charts after secs = 0 + # cleanup obsolete charts after = 0 # private charts memory mode = save # private charts history = 3996 # histograms and timers percentile (percentThreshold) = 95.00000 @@ -204,7 +195,7 @@ You can find the configuration at `/etc/netdata/netdata.conf`: is a space separated list of IPs and ports to listen to. The format is `PROTOCOL:IP:PORT` - if `PORT` is omitted, the `default port` will be used. If `IP` is IPv6, it needs to be enclosed in `[]`. `IP` can also be `*` (to listen on all IPs) or even a hostname. -- `update every (flushInterval) = 1` seconds, controls the frequency StatsD will push the collected metrics to Netdata charts. +- `update every (flushInterval) = 1s` controls the frequency StatsD will push the collected metrics to Netdata charts. - `decimal detail = 1000` controls the number of fractional digits in gauges and histograms. Netdata collects metrics using signed 64-bit integers and their fractional detail is controlled using multipliers and divisors. This setting is used to multiply all collected values to convert them to integers and is also set as the divisors, so that the final data will be a floating point number with this fractional detail (1000 = X.0 - X.999, 10000 = X.0 - X.9999, etc). @@ -238,7 +229,7 @@ The default behavior is to use the same settings as the rest of the Netdata Agen For optimization reasons, Netdata imposes a hard limit on private metric charts. The limit is set via the `max private charts hard limit` setting (which defaults to 1000 charts). Metrics above this hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too). -If you have many ephemeral metrics collected (i.e. that you collect values for a certain amount of time), you can set the configuration option `set charts as obsolete after secs`. Setting a value in seconds here, means that Netdata will mark those metrics (and their private charts) as obsolete after the specified time has passed since the last sent metric value. Those charts will later be deleted according to the setting in `cleanup obsolete charts after secs`. Setting `set charts as obsolete after secs` to 0 (which is also the default value) will disable this functionality. +If you have many ephemeral metrics collected (i.e. that you collect values for a certain amount of time), you can set the configuration option `set charts as obsolete after`. Setting a value in seconds here, means that Netdata will mark those metrics (and their private charts) as obsolete after the specified time has passed since the last sent metric value. Those charts will later be deleted according to the setting in `cleanup obsolete charts after`. Setting `set charts as obsolete after` to 0 (which is also the default value) will disable this functionality. Example private charts (automatically generated without any configuration): @@ -785,7 +776,7 @@ visualize all the available operations. Start by creating a new configuration file under the `statsd.d/` folder in the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). -Use [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) +Use [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) to create a new file called `k6.conf`. ```bash= @@ -794,7 +785,7 @@ sudo ./edit-config statsd.d/k6.conf Copy the following configuration into your file as a starting point. -```conf +```text [app] name = k6 metrics = k6* @@ -973,7 +964,7 @@ Note that Netdata will report the rate for metrics and counters, even if k6 or a sends an _absolute_ number. For example, k6 sends absolute HTTP requests with `http_reqs`, but Netdata visualizes that in `requests/second`. -To enable this StatsD configuration, [restart Netdata](/packaging/installer/README.md#maintaining-a-netdata-agent-installation). +To enable this StatsD configuration, [restart Netdata](/docs/netdata-agent/start-stop-restart.md). ### Final touches diff --git a/src/collectors/statsd.plugin/asterisk.md b/src/collectors/statsd.plugin/asterisk.md index 302fb932f..d7cb588e5 100644 --- a/src/collectors/statsd.plugin/asterisk.md +++ b/src/collectors/statsd.plugin/asterisk.md @@ -1,11 +1,3 @@ - - # Asterisk collector Monitors [Asterisk](https://www.asterisk.org/) dialplan application's statistics. diff --git a/src/collectors/statsd.plugin/k6.md b/src/collectors/statsd.plugin/k6.md index b657ff1a9..76939d3bb 100644 --- a/src/collectors/statsd.plugin/k6.md +++ b/src/collectors/statsd.plugin/k6.md @@ -1,11 +1,3 @@ - - # K6 load test collector Monitors the impact of load testing experiments performed with [K6](https://k6.io/). diff --git a/src/collectors/statsd.plugin/statsd.c b/src/collectors/statsd.plugin/statsd.c index f83818059..11a6ac968 100644 --- a/src/collectors/statsd.plugin/statsd.c +++ b/src/collectors/statsd.plugin/statsd.c @@ -1283,7 +1283,7 @@ static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHA // find the directory name from the file we already read char *filename2 = strdupz(filename); // copy filename, since dirname() will change it char *dir = dirname(filename2); // find the directory part of the filename - tmp = strdupz_path_subpath(dir, s); // compose the new filename to read; + tmp = filename_from_path_entry_strdupz(dir, s); // compose the new filename to read; freez(filename2); // free the filename we copied } statsd_readfile(tmp, app, chart, dict); @@ -2491,10 +2491,11 @@ void *statsd_main(void *ptr) { statsd.enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, "statsd", statsd.enabled); statsd.update_every = default_rrd_update_every; - statsd.update_every = (int)config_get_number(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every); + statsd.update_every = (int)config_get_duration_seconds(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every); if(statsd.update_every < default_rrd_update_every) { collector_error("STATSD: minimum flush interval %d given, but the minimum is the update every of netdata. Using %d", statsd.update_every, default_rrd_update_every); statsd.update_every = default_rrd_update_every; + config_set_duration_seconds(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every); } #ifdef HAVE_RECVMMSG @@ -2504,13 +2505,26 @@ void *statsd_main(void *ptr) { statsd.charts_for = simple_pattern_create( config_get(CONFIG_SECTION_STATSD, "create private charts for metrics matching", "*"), NULL, SIMPLE_PATTERN_EXACT, true); - statsd.max_private_charts_hard = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts_hard); - statsd.set_obsolete_after = (size_t)config_get_number(CONFIG_SECTION_STATSD, "set charts as obsolete after secs", (long long)statsd.set_obsolete_after); - statsd.decimal_detail = (collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail); - statsd.tcp_idle_timeout = (size_t) config_get_number(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds", (long long int)statsd.tcp_idle_timeout); - statsd.private_charts_hidden = (unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden); - statsd.histogram_percentile = (double)config_get_float(CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile); + statsd.max_private_charts_hard = + (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts_hard); + + statsd.set_obsolete_after = + (size_t)config_get_duration_seconds(CONFIG_SECTION_STATSD, "set charts as obsolete after", (long long)statsd.set_obsolete_after); + + statsd.decimal_detail = + (collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail); + + statsd.tcp_idle_timeout = + (size_t) config_get_duration_seconds(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after", (long long int)statsd.tcp_idle_timeout); + + statsd.private_charts_hidden = + (unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden); + + statsd.histogram_percentile = + (double)config_get_double( + CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile); + if(isless(statsd.histogram_percentile, 0) || isgreater(statsd.histogram_percentile, 100)) { collector_error("STATSD: invalid histograms and timers percentile %0.5f given", statsd.histogram_percentile); statsd.histogram_percentile = 95.0; @@ -2521,7 +2535,8 @@ void *statsd_main(void *ptr) { statsd.histogram_percentile_str = strdupz(buffer); } - statsd.dictionary_max_unique = config_get_number(CONFIG_SECTION_STATSD, "dictionaries max unique dimensions", statsd.dictionary_max_unique); + statsd.dictionary_max_unique = + config_get_number(CONFIG_SECTION_STATSD, "dictionaries max unique dimensions", statsd.dictionary_max_unique); if(config_get_boolean(CONFIG_SECTION_STATSD, "add dimension for number of events received", 0)) { statsd.gauges.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; @@ -2803,12 +2818,11 @@ void *statsd_main(void *ptr) { // ---------------------------------------------------------------------------------------------------------------- // statsd thread to turn metrics into charts - usec_t step = statsd.update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, statsd.update_every * USEC_PER_SEC); while(service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - heartbeat_next(&hb, step); + heartbeat_next(&hb); worker_is_busy(WORKER_STATSD_FLUSH_GAUGES); statsd_flush_index_metrics(&statsd.gauges, statsd_flush_gauge); diff --git a/src/collectors/systemd-journal.plugin/README.md b/src/collectors/systemd-journal.plugin/README.md index 9f73ba30e..74eba78de 100644 --- a/src/collectors/systemd-journal.plugin/README.md +++ b/src/collectors/systemd-journal.plugin/README.md @@ -1,4 +1,3 @@ - # `systemd` journal plugin [KEY FEATURES](#key-features) | [JOURNAL SOURCES](#journal-sources) | [JOURNAL FIELDS](#journal-fields) | @@ -40,8 +39,8 @@ For more information check [this discussion](https://github.com/netdata/netdata/ The following are limitations related to the availability of the plugin: -- Netdata versions prior to 1.44 shipped in a docker container do not include this plugin. - The problem is that `libsystemd` is not available in Alpine Linux (there is a `libsystemd`, but it is a dummy that +- Netdata versions prior to 1.44 shipped in a docker container do not include this plugin. + The problem is that `libsystemd` is not available in Alpine Linux (there is a `libsystemd`, but it is a dummy that returns failure on all calls). Starting with Netdata version 1.44, Netdata containers use a Debian base image making this plugin available when Netdata is running in a container. - For the same reason (lack of `systemd` support for Alpine Linux), the plugin is not available on `static` builds of @@ -321,7 +320,7 @@ algorithm to allow it respond promptly. It works like this: 6. In systemd versions 254 or later, the plugin fetches the unique sequence number of each log entry and calculates the the percentage of the file matched by the query, versus the total number of the log entries in the journal file. 7. In systemd versions prior to 254, the plugin estimates the number of entries the journal file contributes to the - query, using the amount of log entries matched it vs. the total duration the log file has entries for. + query, using the amount of log entries matched it vs. the total duration the log file has entries for. The above allow the plugin to respond promptly even when the number of log entries in the journal files is several dozens millions, while providing accurate estimations of the log entries over time at the histogram and enough counters diff --git a/src/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md b/src/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md index cbed1e81e..ef57e1d24 100644 --- a/src/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md +++ b/src/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md @@ -47,7 +47,7 @@ sudo systemctl enable --now systemd-journal-gatewayd.socket To use it, open your web browser and navigate to: -``` +```text http://server.ip:19531/browse ``` diff --git a/src/collectors/systemd-journal.plugin/forward_secure_sealing.md b/src/collectors/systemd-journal.plugin/forward_secure_sealing.md index b41570d68..3ab7c8d08 100644 --- a/src/collectors/systemd-journal.plugin/forward_secure_sealing.md +++ b/src/collectors/systemd-journal.plugin/forward_secure_sealing.md @@ -5,12 +5,14 @@ Given that attackers often try to hide their actions by modifying or deleting lo FSS provides administrators with a mechanism to identify any such unauthorized alterations. ## Importance + Logs are a crucial component of system monitoring and auditing. Ensuring their integrity means administrators can trust the data, detect potential breaches, and trace actions back to their origins. Traditional methods to maintain this integrity involve writing logs to external systems or printing them out. While these methods are effective, they are not foolproof. FSS offers a more streamlined approach, allowing for log verification directly on the local system. ## How FSS Works + FSS operates by "sealing" binary logs at regular intervals. This seal is a cryptographic operation, ensuring that any tampering with the logs prior to the sealing can be detected. If an attacker modifies logs before they are sealed, these changes become a permanent part of the sealed record, highlighting any malicious activity. @@ -29,6 +31,7 @@ administrators to verify older seals. If logs are tampered with, verification wi breach. ## Enabling FSS + To enable FSS, use the following command: ```bash @@ -43,6 +46,7 @@ journalctl --setup-keys --interval=10s ``` ## Verifying Journals + After enabling FSS, you can verify the integrity of your logs using the verification key: ```bash @@ -52,6 +56,7 @@ journalctl --verify If any discrepancies are found, you'll be alerted, indicating potential tampering. ## Disabling FSS + Should you wish to disable FSS: **Delete the Sealing Key**: This stops new log entries from being sealed. @@ -66,7 +71,6 @@ journalctl --rotate journalctl --vacuum-time=1s ``` - **Adjust Systemd Configuration (Optional)**: If you've made changes to facilitate FSS in `/etc/systemd/journald.conf`, consider reverting or adjusting those. Restart the systemd-journald service afterward: @@ -75,6 +79,7 @@ systemctl restart systemd-journald ``` ## Conclusion + FSS is a significant advancement in maintaining log integrity. While not a replacement for all traditional integrity methods, it offers a valuable tool in the battle against unauthorized log tampering. By integrating FSS into your log management strategy, you ensure a more transparent, reliable, and tamper-evident logging system. diff --git a/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md b/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md index b70c22033..a89379e4b 100644 --- a/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md +++ b/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md @@ -74,7 +74,7 @@ sudo apt-get install systemd-journal-remote Edit `/etc/systemd/journal-upload.conf` and set the IP address and the port of the server, like so: -```conf +```text [Upload] URL=http://centralization.server.ip:19532 ``` @@ -87,7 +87,7 @@ sudo systemctl edit systemd-journal-upload At the top, add: -```conf +```text [Service] Restart=always ``` diff --git a/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md b/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md index f8b9a62f0..f4038e812 100644 --- a/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md +++ b/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md @@ -46,9 +46,9 @@ sudo ./systemd-journal-self-signed-certs.sh "server1" "DNS:hostname1" "IP:10.0.0 Where: - - `server1` is the canonical name of the server. On newer systemd version, this name will be used by `systemd-journal-remote` and Netdata when you view the logs on the dashboard. - - `DNS:hostname1` is a DNS name that the server is reachable at. Add `"DNS:xyz"` multiple times to define multiple DNS names for the server. - - `IP:10.0.0.1` is an IP that the server is reachable at. Add `"IP:xyz"` multiple times to define multiple IPs for the server. +- `server1` is the canonical name of the server. On newer systemd version, this name will be used by `systemd-journal-remote` and Netdata when you view the logs on the dashboard. +- `DNS:hostname1` is a DNS name that the server is reachable at. Add `"DNS:xyz"` multiple times to define multiple DNS names for the server. +- `IP:10.0.0.1` is an IP that the server is reachable at. Add `"IP:xyz"` multiple times to define multiple IPs for the server. Repeat this process to create the certificates for all your servers. You can add servers as required, at any time in the future. @@ -150,7 +150,7 @@ sudo apt-get install systemd-journal-remote Edit `/etc/systemd/journal-upload.conf` and set the IP address and the port of the server, like so: -```conf +```text [Upload] URL=https://centralization.server.ip:19532 ``` @@ -165,7 +165,7 @@ sudo systemctl edit systemd-journal-upload.service At the top, add: -```conf +```text [Service] Restart=always ``` @@ -198,7 +198,6 @@ Here it is in action, in Netdata: ![2023-10-18 16-23-05](https://github.com/netdata/netdata/assets/2662304/83bec232-4770-455b-8f1c-46b5de5f93a2) - ## Verify it works To verify the central server is receiving logs, run this on the central server: diff --git a/src/collectors/systemd-journal.plugin/systemd-internals.h b/src/collectors/systemd-journal.plugin/systemd-internals.h index 31acb2f20..a2d13de64 100644 --- a/src/collectors/systemd-journal.plugin/systemd-internals.h +++ b/src/collectors/systemd-journal.plugin/systemd-internals.h @@ -153,4 +153,6 @@ static inline bool parse_journal_field(const char *data, size_t data_length, con void systemd_journal_dyncfg_init(struct functions_evloop_globals *wg); +bool is_journal_file(const char *filename, ssize_t len, const char **start_of_extension); + #endif //NETDATA_COLLECTORS_SYSTEMD_INTERNALS_H diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-annotations.c b/src/collectors/systemd-journal.plugin/systemd-journal-annotations.c index c5b708714..c9ade0a33 100644 --- a/src/collectors/systemd-journal.plugin/systemd-journal-annotations.c +++ b/src/collectors/systemd-journal.plugin/systemd-journal-annotations.c @@ -2,18 +2,6 @@ #include "systemd-internals.h" -// ---------------------------------------------------------------------------- -#include "libnetdata/maps/system-users.h" -#include "libnetdata/maps/system-groups.h" - -static struct { - USERNAMES_CACHE *uc; - GROUPNAMES_CACHE *gc; -} systemd_annotations_globals = { - .uc = NULL, - .gc = NULL, -}; - // ---------------------------------------------------------------------------- const char *errno_map[] = { @@ -369,9 +357,9 @@ void netdata_systemd_journal_transform_uid(FACETS *facets __maybe_unused, BUFFER const char *v = buffer_tostring(wb); if(*v && isdigit(*v)) { uid_t uid = str2i(buffer_tostring(wb)); - STRING *u = system_usernames_cache_lookup_uid(systemd_annotations_globals.uc, uid); - buffer_contents_replace(wb, string2str(u), string_strlen(u)); - string_freez(u); + CACHED_USERNAME cu = cached_username_get_by_uid(uid); + buffer_contents_replace(wb, string2str(cu.username), string_strlen(cu.username)); + cached_username_release(cu); } } @@ -382,9 +370,9 @@ void netdata_systemd_journal_transform_gid(FACETS *facets __maybe_unused, BUFFER const char *v = buffer_tostring(wb); if(*v && isdigit(*v)) { gid_t gid = str2i(buffer_tostring(wb)); - STRING *g = system_groupnames_cache_lookup_gid(systemd_annotations_globals.gc, gid); - buffer_contents_replace(wb, string2str(g), string_strlen(g)); - string_freez(g); + CACHED_GROUPNAME cg = cached_groupname_get_by_gid(gid); + buffer_contents_replace(wb, string2str(cg.groupname), string_strlen(cg.groupname)); + cached_groupname_release(cg); } } @@ -650,8 +638,10 @@ void netdata_systemd_journal_transform_message_id(FACETS *facets __maybe_unused, // ---------------------------------------------------------------------------- void netdata_systemd_journal_annotations_init(void) { - systemd_annotations_globals.uc = system_usernames_cache_init(); - systemd_annotations_globals.gc = system_groupnames_cache_init(); + cached_usernames_init(); + cached_groupnames_init(); + update_cached_host_users(); + update_cached_host_groups(); netdata_systemd_journal_message_ids_init(); } diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c b/src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c index 469f9d2cf..d8098bd6c 100644 --- a/src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c +++ b/src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c @@ -58,6 +58,10 @@ static int systemd_journal_directories_dyncfg_update(BUFFER *result, BUFFER *pay struct json_object *journalDirectories; json_object_object_get_ex(jobj, JOURNAL_DIRECTORIES_JSON_NODE, &journalDirectories); + if (json_object_get_type(journalDirectories) != json_type_array) + return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, + "member " JOURNAL_DIRECTORIES_JSON_NODE " is not an array"); + size_t n_directories = json_object_array_length(journalDirectories); if(n_directories > MAX_JOURNAL_DIRECTORIES) return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, "too many directories configured"); diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-files.c b/src/collectors/systemd-journal.plugin/systemd-journal-files.c index a05cd1c5c..ea0511f7a 100644 --- a/src/collectors/systemd-journal.plugin/systemd-journal-files.c +++ b/src/collectors/systemd-journal.plugin/systemd-journal-files.c @@ -285,8 +285,8 @@ void journal_file_update_header(const char *filename, struct journal_file *jf) { if(dash_seqnum) { const char *dash_first_msg_ut = strchr(dash_seqnum + 1, '-'); if(dash_first_msg_ut) { - const char *dot_journal = strstr(dash_first_msg_ut + 1, ".journal"); - if(dot_journal) { + const char *dot_journal = NULL; + if(is_journal_file(filename, -1, &dot_journal) && dot_journal && dot_journal > dash_first_msg_ut) { if(dash_seqnum - at - 1 == 32 && dash_first_msg_ut - dash_seqnum - 1 == 16 && dot_journal - dash_first_msg_ut - 1 == 16) { @@ -369,8 +369,7 @@ static STRING *string_strdupz_source(const char *s, const char *e, size_t max_le buf[max_len - 1] = '\0'; for(size_t i = 0; buf[i] ;i++) - if(!isalnum(buf[i]) && buf[i] != '-' && buf[i] != '.' && buf[i] != ':') - buf[i] = '_'; + if(!is_netdata_api_valid_character(buf[i])) buf[i] = '_'; return string_strdupz(buf); } @@ -393,7 +392,7 @@ static void files_registry_insert_cb(const DICTIONARY_ITEM *item, void *value, v char *e = strchr(s, '@'); if(!e) - e = strstr(s, ".journal"); + is_journal_file(s, -1, (const char **)&e); if(e) { const char *d = s; @@ -475,19 +474,6 @@ struct journal_file_source { uint64_t size; }; -static void human_readable_size_ib(uint64_t size, char *dst, size_t dst_len) { - if(size > 1024ULL * 1024 * 1024 * 1024) - snprintfz(dst, dst_len, "%0.2f TiB", (double)size / 1024.0 / 1024.0 / 1024.0 / 1024.0); - else if(size > 1024ULL * 1024 * 1024) - snprintfz(dst, dst_len, "%0.2f GiB", (double)size / 1024.0 / 1024.0 / 1024.0); - else if(size > 1024ULL * 1024) - snprintfz(dst, dst_len, "%0.2f MiB", (double)size / 1024.0 / 1024.0); - else if(size > 1024ULL) - snprintfz(dst, dst_len, "%0.2f KiB", (double)size / 1024.0); - else - snprintfz(dst, dst_len, "%"PRIu64" B", size); -} - #define print_duration(dst, dst_len, pos, remaining, duration, one, many, printed) do { \ if((remaining) > (duration)) { \ uint64_t _count = (remaining) / (duration); \ @@ -498,22 +484,6 @@ static void human_readable_size_ib(uint64_t size, char *dst, size_t dst_len) { } \ } while(0) -static void human_readable_duration_s(time_t duration_s, char *dst, size_t dst_len) { - if(duration_s < 0) - duration_s = -duration_s; - - size_t pos = 0; - dst[0] = 0 ; - - bool printed = false; - print_duration(dst, dst_len, pos, duration_s, 86400 * 365, "year", "years", printed); - print_duration(dst, dst_len, pos, duration_s, 86400 * 30, "month", "months", printed); - print_duration(dst, dst_len, pos, duration_s, 86400 * 1, "day", "days", printed); - print_duration(dst, dst_len, pos, duration_s, 3600 * 1, "hour", "hours", printed); - print_duration(dst, dst_len, pos, duration_s, 60 * 1, "min", "mins", printed); - print_duration(dst, dst_len, pos, duration_s, 1, "sec", "secs", printed); -} - static int journal_file_to_json_array_cb(const DICTIONARY_ITEM *item, void *entry, void *data) { struct journal_file_source *jfs = entry; BUFFER *wb = data; @@ -522,12 +492,12 @@ static int journal_file_to_json_array_cb(const DICTIONARY_ITEM *item, void *entr buffer_json_add_array_item_object(wb); { - char size_for_humans[100]; - human_readable_size_ib(jfs->size, size_for_humans, sizeof(size_for_humans)); + char size_for_humans[128]; + size_snprintf(size_for_humans, sizeof(size_for_humans), jfs->size, "B", false); - char duration_for_humans[1024]; - human_readable_duration_s((time_t)((jfs->last_ut - jfs->first_ut) / USEC_PER_SEC), - duration_for_humans, sizeof(duration_for_humans)); + char duration_for_humans[128]; + duration_snprintf(duration_for_humans, sizeof(duration_for_humans), + (time_t)((jfs->last_ut - jfs->first_ut) / USEC_PER_SEC), "s", true); char info[1024]; snprintfz(info, sizeof(info), "%zu files, with a total size of %s, covering %s", @@ -602,10 +572,39 @@ static void files_registry_delete_cb(const DICTIONARY_ITEM *item, void *value, v string_freez(jf->source); } -void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, const char *dirname, int depth) { - static const char *ext = ".journal"; - static const ssize_t ext_len = sizeof(".journal") - 1; +#define EXT_DOT_JOURNAL ".journal" +#define EXT_DOT_JOURNAL_TILDA ".journal~" + +static struct { + const char *ext; + ssize_t len; +} valid_journal_extension[] = { + { .ext = EXT_DOT_JOURNAL, .len = sizeof(EXT_DOT_JOURNAL) - 1 }, + { .ext = EXT_DOT_JOURNAL_TILDA, .len = sizeof(EXT_DOT_JOURNAL_TILDA) - 1 }, +}; + +bool is_journal_file(const char *filename, ssize_t len, const char **start_of_extension) { + if(len < 0) + len = (ssize_t)strlen(filename); + for(size_t i = 0; i < _countof(valid_journal_extension) ;i++) { + const char *ext = valid_journal_extension[i].ext; + ssize_t elen = valid_journal_extension[i].len; + + if(len > elen && strcmp(filename + len - elen, ext) == 0) { + if(start_of_extension) + *start_of_extension = filename + len - elen; + return true; + } + } + + if(start_of_extension) + *start_of_extension = NULL; + + return false; +} + +void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, const char *dirname, int depth) { if (depth > VAR_LOG_JOURNAL_MAX_DEPTH) return; @@ -635,7 +634,7 @@ void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, con if (entry->d_type == DT_DIR) { journal_directory_scan_recursively(files, dirs, full_path, depth++); } - else if (entry->d_type == DT_REG && len > ext_len && strcmp(full_path + len - ext_len, ext) == 0) { + else if (entry->d_type == DT_REG && is_journal_file(full_path, len, NULL)) { if(files) dictionary_set(files, full_path, NULL, 0); @@ -653,7 +652,7 @@ void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, con journal_directory_scan_recursively(files, dirs, resolved_path, depth++); } } - else if(S_ISREG(info.st_mode) && len > ext_len && strcmp(full_path + len - ext_len, ext) == 0) { + else if(S_ISREG(info.st_mode) && is_journal_file(full_path, len, NULL)) { if(files) dictionary_set(files, full_path, NULL, 0); @@ -756,6 +755,7 @@ void journal_files_registry_update(void) { dictionary_del(journal_files_registry, jf_dfe.name); } dfe_done(jf); + dictionary_garbage_collect(journal_files_registry); journal_files_scans++; spinlock_unlock(&spinlock); diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-sampling.h b/src/collectors/systemd-journal.plugin/systemd-journal-sampling.h new file mode 100644 index 000000000..0e1fed2d6 --- /dev/null +++ b/src/collectors/systemd-journal.plugin/systemd-journal-sampling.h @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SYSTEMD_JOURNAL_SAMPLING_H +#define NETDATA_SYSTEMD_JOURNAL_SAMPLING_H + +// ---------------------------------------------------------------------------- +// sampling support + +static inline void sampling_query_init(LOGS_QUERY_STATUS *lqs, FACETS *facets) { + if(!lqs->rq.sampling) + return; + + if(!lqs->rq.slice) { + // the user is doing a full data query + // disable sampling + lqs->rq.sampling = 0; + return; + } + + if(lqs->rq.data_only) { + // the user is doing a data query + // disable sampling + lqs->rq.sampling = 0; + return; + } + + if(!lqs->c.files_matched) { + // no files have been matched + // disable sampling + lqs->rq.sampling = 0; + return; + } + + lqs->c.samples.slots = facets_histogram_slots(facets); + if(lqs->c.samples.slots < 2) + lqs->c.samples.slots = 2; + if(lqs->c.samples.slots > SYSTEMD_JOURNAL_SAMPLING_SLOTS) + lqs->c.samples.slots = SYSTEMD_JOURNAL_SAMPLING_SLOTS; + + if(!lqs->rq.after_ut || !lqs->rq.before_ut || lqs->rq.after_ut >= lqs->rq.before_ut) { + // we don't have enough information for sampling + lqs->rq.sampling = 0; + return; + } + + usec_t delta = lqs->rq.before_ut - lqs->rq.after_ut; + usec_t step = delta / facets_histogram_slots(facets) - 1; + if(step < 1) step = 1; + + lqs->c.samples_per_time_slot.start_ut = lqs->rq.after_ut; + lqs->c.samples_per_time_slot.end_ut = lqs->rq.before_ut; + lqs->c.samples_per_time_slot.step_ut = step; + + // the minimum number of rows to enable sampling + lqs->c.samples.enable_after_samples = lqs->rq.sampling / 2; + + size_t files_matched = lqs->c.files_matched; + if(!files_matched) + files_matched = 1; + + // the minimum number of rows per file to enable sampling + lqs->c.samples_per_file.enable_after_samples = (lqs->rq.sampling / 4) / files_matched; + if(lqs->c.samples_per_file.enable_after_samples < lqs->rq.entries) + lqs->c.samples_per_file.enable_after_samples = lqs->rq.entries; + + // the minimum number of rows per time slot to enable sampling + lqs->c.samples_per_time_slot.enable_after_samples = (lqs->rq.sampling / 4) / lqs->c.samples.slots; + if(lqs->c.samples_per_time_slot.enable_after_samples < lqs->rq.entries) + lqs->c.samples_per_time_slot.enable_after_samples = lqs->rq.entries; +} + +static inline void sampling_file_init(LOGS_QUERY_STATUS *lqs, struct journal_file *jf __maybe_unused) { + lqs->c.samples_per_file.sampled = 0; + lqs->c.samples_per_file.unsampled = 0; + lqs->c.samples_per_file.estimated = 0; + lqs->c.samples_per_file.every = 0; + lqs->c.samples_per_file.skipped = 0; + lqs->c.samples_per_file.recalibrate = 0; +} + +static inline size_t sampling_file_lines_scanned_so_far(LOGS_QUERY_STATUS *lqs) { + size_t sampled = lqs->c.samples_per_file.sampled + lqs->c.samples_per_file.unsampled; + if(!sampled) sampled = 1; + return sampled; +} + +static inline void sampling_running_file_query_overlapping_timeframe_ut( + LOGS_QUERY_STATUS *lqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, + usec_t msg_ut, usec_t *after_ut, usec_t *before_ut) { + + // find the overlap of the query and file timeframes + // taking into account the first message we encountered + + usec_t oldest_ut, newest_ut; + if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) { + // the first message we know (oldest) + oldest_ut = lqs->c.query_file.first_msg_ut ? lqs->c.query_file.first_msg_ut : jf->msg_first_ut; + if(!oldest_ut) oldest_ut = lqs->c.query_file.start_ut; + + if(jf->msg_last_ut) + newest_ut = MIN(lqs->c.query_file.stop_ut, jf->msg_last_ut); + else if(jf->file_last_modified_ut) + newest_ut = MIN(lqs->c.query_file.stop_ut, jf->file_last_modified_ut); + else + newest_ut = lqs->c.query_file.stop_ut; + + if(msg_ut < oldest_ut) + oldest_ut = msg_ut - 1; + } + else /* BACKWARD */ { + // the latest message we know (newest) + newest_ut = lqs->c.query_file.first_msg_ut ? lqs->c.query_file.first_msg_ut : jf->msg_last_ut; + if(!newest_ut) newest_ut = lqs->c.query_file.start_ut; + + if(jf->msg_first_ut) + oldest_ut = MAX(lqs->c.query_file.stop_ut, jf->msg_first_ut); + else + oldest_ut = lqs->c.query_file.stop_ut; + + if(newest_ut < msg_ut) + newest_ut = msg_ut + 1; + } + + *after_ut = oldest_ut; + *before_ut = newest_ut; +} + +static inline double sampling_running_file_query_progress_by_time( + LOGS_QUERY_STATUS *lqs, struct journal_file *jf, + FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) { + + usec_t after_ut, before_ut, elapsed_ut; + sampling_running_file_query_overlapping_timeframe_ut(lqs, jf, direction, msg_ut, &after_ut, &before_ut); + + if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) + elapsed_ut = msg_ut - after_ut; + else + elapsed_ut = before_ut - msg_ut; + + usec_t total_ut = before_ut - after_ut; + double progress = (double)elapsed_ut / (double)total_ut; + + return progress; +} + +static inline usec_t sampling_running_file_query_remaining_time( + LOGS_QUERY_STATUS *lqs, struct journal_file *jf, + FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut, + usec_t *total_time_ut, usec_t *remaining_start_ut, + usec_t *remaining_end_ut) { + usec_t after_ut, before_ut; + sampling_running_file_query_overlapping_timeframe_ut(lqs, jf, direction, msg_ut, &after_ut, &before_ut); + + // since we have a timestamp in msg_ut + // this timestamp can extend the overlap + if(msg_ut <= after_ut) + after_ut = msg_ut - 1; + + if(msg_ut >= before_ut) + before_ut = msg_ut + 1; + + // return the remaining duration + usec_t remaining_from_ut, remaining_to_ut; + if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) { + remaining_from_ut = msg_ut; + remaining_to_ut = before_ut; + } + else { + remaining_from_ut = after_ut; + remaining_to_ut = msg_ut; + } + + usec_t remaining_ut = remaining_to_ut - remaining_from_ut; + + if(total_time_ut) + *total_time_ut = (before_ut > after_ut) ? before_ut - after_ut : 1; + + if(remaining_start_ut) + *remaining_start_ut = remaining_from_ut; + + if(remaining_end_ut) + *remaining_end_ut = remaining_to_ut; + + return remaining_ut; +} + +static inline size_t sampling_running_file_query_estimate_remaining_lines_by_time( + LOGS_QUERY_STATUS *lqs, + struct journal_file *jf, + FACETS_ANCHOR_DIRECTION direction, + usec_t msg_ut) { + size_t scanned_lines = sampling_file_lines_scanned_so_far(lqs); + + // Calculate the proportion of time covered + usec_t total_time_ut, remaining_start_ut, remaining_end_ut; + usec_t remaining_time_ut = sampling_running_file_query_remaining_time( + lqs, jf, direction, msg_ut, &total_time_ut, &remaining_start_ut, &remaining_end_ut); + if (total_time_ut == 0) total_time_ut = 1; + + double proportion_by_time = (double) (total_time_ut - remaining_time_ut) / (double) total_time_ut; + + if (proportion_by_time == 0 || proportion_by_time > 1.0 || !isfinite(proportion_by_time)) + proportion_by_time = 1.0; + + // Estimate the total number of lines in the file + size_t expected_matching_logs_by_time = (size_t)((double)scanned_lines / proportion_by_time); + + if(jf->messages_in_file && expected_matching_logs_by_time > jf->messages_in_file) + expected_matching_logs_by_time = jf->messages_in_file; + + // Calculate the estimated number of remaining lines + size_t remaining_logs_by_time = expected_matching_logs_by_time - scanned_lines; + if (remaining_logs_by_time < 1) remaining_logs_by_time = 1; + + // nd_log(NDLS_COLLECTORS, NDLP_INFO, + // "JOURNAL ESTIMATION: '%s' " + // "scanned_lines=%zu [sampled=%zu, unsampled=%zu, estimated=%zu], " + // "file [%"PRIu64" - %"PRIu64", duration %"PRId64", known lines in file %zu], " + // "query [%"PRIu64" - %"PRIu64", duration %"PRId64"], " + // "first message read from the file at %"PRIu64", current message at %"PRIu64", " + // "proportion of time %.2f %%, " + // "expected total lines in file %zu, " + // "remaining lines %zu, " + // "remaining time %"PRIu64" [%"PRIu64" - %"PRIu64", duration %"PRId64"]" + // , jf->filename + // , scanned_lines, fqs->samples_per_file.sampled, fqs->samples_per_file.unsampled, fqs->samples_per_file.estimated + // , jf->msg_first_ut, jf->msg_last_ut, jf->msg_last_ut - jf->msg_first_ut, jf->messages_in_file + // , fqs->query_file.start_ut, fqs->query_file.stop_ut, fqs->query_file.stop_ut - fqs->query_file.start_ut + // , fqs->query_file.first_msg_ut, msg_ut + // , proportion_by_time * 100.0 + // , expected_matching_logs_by_time + // , remaining_logs_by_time + // , remaining_time_ut, remaining_start_ut, remaining_end_ut, remaining_end_ut - remaining_start_ut + // ); + + return remaining_logs_by_time; +} + +static inline size_t sampling_running_file_query_estimate_remaining_lines( + sd_journal *j __maybe_unused, LOGS_QUERY_STATUS *lqs, struct journal_file *jf, + FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) { + size_t remaining_logs_by_seqnum = 0; + +#ifdef HAVE_SD_JOURNAL_GET_SEQNUM + size_t expected_matching_logs_by_seqnum = 0; + double proportion_by_seqnum = 0.0; + uint64_t current_msg_seqnum; + sd_id128_t current_msg_writer; + if(!lqs->c.query_file.first_msg_seqnum || sd_journal_get_seqnum(j, ¤t_msg_seqnum, ¤t_msg_writer) < 0) { + lqs->c.query_file.first_msg_seqnum = 0; + lqs->c.query_file.first_msg_writer = SD_ID128_NULL; + } + else if(jf->messages_in_file) { + size_t scanned_lines = sampling_file_lines_scanned_so_far(lqs); + + double proportion_of_all_lines_so_far; + if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) + proportion_of_all_lines_so_far = (double)scanned_lines / (double)(current_msg_seqnum - jf->first_seqnum); + else + proportion_of_all_lines_so_far = (double)scanned_lines / (double)(jf->last_seqnum - current_msg_seqnum); + + if(proportion_of_all_lines_so_far > 1.0) + proportion_of_all_lines_so_far = 1.0; + + expected_matching_logs_by_seqnum = (size_t)(proportion_of_all_lines_so_far * (double)jf->messages_in_file); + + proportion_by_seqnum = (double)scanned_lines / (double)expected_matching_logs_by_seqnum; + + if (proportion_by_seqnum == 0 || proportion_by_seqnum > 1.0 || !isfinite(proportion_by_seqnum)) + proportion_by_seqnum = 1.0; + + remaining_logs_by_seqnum = expected_matching_logs_by_seqnum - scanned_lines; + if(!remaining_logs_by_seqnum) remaining_logs_by_seqnum = 1; + } +#endif + + if(remaining_logs_by_seqnum) + return remaining_logs_by_seqnum; + + return sampling_running_file_query_estimate_remaining_lines_by_time(lqs, jf, direction, msg_ut); +} + +static inline void sampling_decide_file_sampling_every(sd_journal *j, + LOGS_QUERY_STATUS *lqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) { + size_t files_matched = lqs->c.files_matched; + if(!files_matched) files_matched = 1; + + size_t remaining_lines = sampling_running_file_query_estimate_remaining_lines(j, lqs, jf, direction, msg_ut); + size_t wanted_samples = (lqs->rq.sampling / 2) / files_matched; + if(!wanted_samples) wanted_samples = 1; + + lqs->c.samples_per_file.every = remaining_lines / wanted_samples; + + if(lqs->c.samples_per_file.every < 1) + lqs->c.samples_per_file.every = 1; +} + +typedef enum { + SAMPLING_STOP_AND_ESTIMATE = -1, + SAMPLING_FULL = 0, + SAMPLING_SKIP_FIELDS = 1, +} sampling_t; + +static inline sampling_t is_row_in_sample( + sd_journal *j, LOGS_QUERY_STATUS *lqs, struct journal_file *jf, + usec_t msg_ut, FACETS_ANCHOR_DIRECTION direction, bool candidate_to_keep) { + if(!lqs->rq.sampling || candidate_to_keep) + return SAMPLING_FULL; + + if(unlikely(msg_ut < lqs->c.samples_per_time_slot.start_ut)) + msg_ut = lqs->c.samples_per_time_slot.start_ut; + if(unlikely(msg_ut > lqs->c.samples_per_time_slot.end_ut)) + msg_ut = lqs->c.samples_per_time_slot.end_ut; + + size_t slot = (msg_ut - lqs->c.samples_per_time_slot.start_ut) / lqs->c.samples_per_time_slot.step_ut; + if(slot >= lqs->c.samples.slots) + slot = lqs->c.samples.slots - 1; + + bool should_sample = false; + + if(lqs->c.samples.sampled < lqs->c.samples.enable_after_samples || + lqs->c.samples_per_file.sampled < lqs->c.samples_per_file.enable_after_samples || + lqs->c.samples_per_time_slot.sampled[slot] < lqs->c.samples_per_time_slot.enable_after_samples) + should_sample = true; + + else if(lqs->c.samples_per_file.recalibrate >= SYSTEMD_JOURNAL_SAMPLING_RECALIBRATE || !lqs->c.samples_per_file.every) { + // this is the first to be unsampled for this file + sampling_decide_file_sampling_every(j, lqs, jf, direction, msg_ut); + lqs->c.samples_per_file.recalibrate = 0; + should_sample = true; + } + else { + // we sample 1 every fqs->samples_per_file.every + if(lqs->c.samples_per_file.skipped >= lqs->c.samples_per_file.every) { + lqs->c.samples_per_file.skipped = 0; + should_sample = true; + } + else + lqs->c.samples_per_file.skipped++; + } + + if(should_sample) { + lqs->c.samples.sampled++; + lqs->c.samples_per_file.sampled++; + lqs->c.samples_per_time_slot.sampled[slot]++; + + return SAMPLING_FULL; + } + + lqs->c.samples_per_file.recalibrate++; + + lqs->c.samples.unsampled++; + lqs->c.samples_per_file.unsampled++; + lqs->c.samples_per_time_slot.unsampled[slot]++; + + if(lqs->c.samples_per_file.unsampled > lqs->c.samples_per_file.sampled) { + double progress_by_time = sampling_running_file_query_progress_by_time(lqs, jf, direction, msg_ut); + + if(progress_by_time > SYSTEMD_JOURNAL_ENABLE_ESTIMATIONS_FILE_PERCENTAGE) + return SAMPLING_STOP_AND_ESTIMATE; + } + + return SAMPLING_SKIP_FIELDS; +} + +static inline void sampling_update_running_query_file_estimates( + FACETS *facets, sd_journal *j, + LOGS_QUERY_STATUS *lqs, struct journal_file *jf, usec_t msg_ut, FACETS_ANCHOR_DIRECTION direction) { + usec_t total_time_ut, remaining_start_ut, remaining_end_ut; + sampling_running_file_query_remaining_time( + lqs, jf, direction, msg_ut, &total_time_ut, &remaining_start_ut, &remaining_end_ut); + size_t remaining_lines = sampling_running_file_query_estimate_remaining_lines(j, lqs, jf, direction, msg_ut); + facets_update_estimations(facets, remaining_start_ut, remaining_end_ut, remaining_lines); + lqs->c.samples.estimated += remaining_lines; + lqs->c.samples_per_file.estimated += remaining_lines; +} + +#endif //NETDATA_SYSTEMD_JOURNAL_SAMPLING_H diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-watcher.c b/src/collectors/systemd-journal.plugin/systemd-journal-watcher.c index 6f12f154e..dd48ccc35 100644 --- a/src/collectors/systemd-journal.plugin/systemd-journal-watcher.c +++ b/src/collectors/systemd-journal.plugin/systemd-journal-watcher.c @@ -245,7 +245,7 @@ void process_event(Watcher *watcher, int inotifyFd, struct inotify_event *event) "JOURNAL WATCHER: Received unhandled event with mask %u for directory '%s'", event->mask, fullPath); } - else if(len > sizeof(".journal") - 1 && strcmp(&event->name[len - (sizeof(".journal") - 1)], ".journal") == 0) { + else if(is_journal_file(event->name, (ssize_t)len, NULL)) { // It is a file that ends in .journal // add it to our pending list dictionary_set(watcher->pending, fullPath, NULL, 0); diff --git a/src/collectors/systemd-journal.plugin/systemd-journal.c b/src/collectors/systemd-journal.plugin/systemd-journal.c index 6da9c687e..9666e0109 100644 --- a/src/collectors/systemd-journal.plugin/systemd-journal.c +++ b/src/collectors/systemd-journal.plugin/systemd-journal.c @@ -5,53 +5,99 @@ * GPL v3+ */ -#include "systemd-internals.h" - /* * TODO - * * _UDEV_DEVLINK is frequently set more than once per field - support multi-value faces * */ -#define FACET_MAX_VALUE_LENGTH 8192 +#include "systemd-internals.h" #define SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION "View, search and analyze systemd journal entries." #define SYSTEMD_JOURNAL_FUNCTION_NAME "systemd-journal" -#define SYSTEMD_JOURNAL_DEFAULT_TIMEOUT 60 -#define SYSTEMD_JOURNAL_MAX_PARAMS 1000 -#define SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION (1 * 3600) -#define SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY 200 -#define SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING 1000000 -#define SYSTEMD_JOURNAL_SAMPLING_SLOTS 1000 -#define SYSTEMD_JOURNAL_SAMPLING_RECALIBRATE 10000 +#define SYSTEMD_JOURNAL_SAMPLING_SLOTS 1000 +#define SYSTEMD_JOURNAL_SAMPLING_RECALIBRATE 10000 -#define SYSTEMD_JOURNAL_PROGRESS_EVERY_UT (250 * USEC_PER_MS) +#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS +#define LQS_DEFAULT_SLICE_MODE 1 +#else +#define LQS_DEFAULT_SLICE_MODE 0 +#endif + +// functions needed by LQS +static SD_JOURNAL_FILE_SOURCE_TYPE get_internal_source_type(const char *value); + +// structures needed by LQS +struct lqs_extension { + struct { + usec_t start_ut; + usec_t stop_ut; + usec_t first_msg_ut; + + sd_id128_t first_msg_writer; + uint64_t first_msg_seqnum; + } query_file; + + struct { + uint32_t enable_after_samples; + uint32_t slots; + uint32_t sampled; + uint32_t unsampled; + uint32_t estimated; + } samples; + + struct { + uint32_t enable_after_samples; + uint32_t every; + uint32_t skipped; + uint32_t recalibrate; + uint32_t sampled; + uint32_t unsampled; + uint32_t estimated; + } samples_per_file; + + struct { + usec_t start_ut; + usec_t end_ut; + usec_t step_ut; + uint32_t enable_after_samples; + uint32_t sampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS]; + uint32_t unsampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS]; + } samples_per_time_slot; + + // per file progress info + // size_t cached_count; -#define JOURNAL_PARAMETER_HELP "help" -#define JOURNAL_PARAMETER_AFTER "after" -#define JOURNAL_PARAMETER_BEFORE "before" -#define JOURNAL_PARAMETER_ANCHOR "anchor" -#define JOURNAL_PARAMETER_LAST "last" -#define JOURNAL_PARAMETER_QUERY "query" -#define JOURNAL_PARAMETER_FACETS "facets" -#define JOURNAL_PARAMETER_HISTOGRAM "histogram" -#define JOURNAL_PARAMETER_DIRECTION "direction" -#define JOURNAL_PARAMETER_IF_MODIFIED_SINCE "if_modified_since" -#define JOURNAL_PARAMETER_DATA_ONLY "data_only" -#define JOURNAL_PARAMETER_SOURCE "source" -#define JOURNAL_PARAMETER_INFO "info" -#define JOURNAL_PARAMETER_SLICE "slice" -#define JOURNAL_PARAMETER_DELTA "delta" -#define JOURNAL_PARAMETER_TAIL "tail" -#define JOURNAL_PARAMETER_SAMPLING "sampling" + // progress statistics + usec_t matches_setup_ut; + size_t rows_useful; + size_t rows_read; + size_t bytes_read; + size_t files_matched; + size_t file_working; +}; + +// prepare LQS +#define LQS_FUNCTION_NAME SYSTEMD_JOURNAL_FUNCTION_NAME +#define LQS_FUNCTION_DESCRIPTION SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION +#define LQS_DEFAULT_ITEMS_PER_QUERY 200 +#define LQS_DEFAULT_ITEMS_SAMPLING 1000000 +#define LQS_SOURCE_TYPE SD_JOURNAL_FILE_SOURCE_TYPE +#define LQS_SOURCE_TYPE_ALL SDJF_ALL +#define LQS_SOURCE_TYPE_NONE SDJF_NONE +#define LQS_PARAMETER_SOURCE_NAME "Journal Sources" // this is how it is shown to users +#define LQS_FUNCTION_GET_INTERNAL_SOURCE_TYPE(value) get_internal_source_type(value) +#define LQS_FUNCTION_SOURCE_TO_JSON_ARRAY(wb) available_journal_file_sources_to_json_array(wb) +#include "libnetdata/facets/logs_query_status.h" + +#include "systemd-journal-sampling.h" +#define FACET_MAX_VALUE_LENGTH 8192 +#define SYSTEMD_JOURNAL_DEFAULT_TIMEOUT 60 +#define SYSTEMD_JOURNAL_PROGRESS_EVERY_UT (250 * USEC_PER_MS) #define JOURNAL_KEY_ND_JOURNAL_FILE "ND_JOURNAL_FILE" #define JOURNAL_KEY_ND_JOURNAL_PROCESS "ND_JOURNAL_PROCESS" - -#define JOURNAL_DEFAULT_SLICE_MODE true #define JOURNAL_DEFAULT_DIRECTION FACETS_ANCHOR_DIRECTION_BACKWARD - #define SYSTEMD_ALWAYS_VISIBLE_KEYS NULL #define SYSTEMD_KEYS_EXCLUDED_FROM_FACETS \ @@ -182,100 +228,27 @@ // ---------------------------------------------------------------------------- -typedef struct function_query_status { - bool *cancelled; // a pointer to the cancelling boolean - usec_t *stop_monotonic_ut; - - // request - const char *transaction; - - SD_JOURNAL_FILE_SOURCE_TYPE source_type; - SIMPLE_PATTERN *sources; - usec_t after_ut; - usec_t before_ut; - - struct { - usec_t start_ut; - usec_t stop_ut; - } anchor; - - FACETS_ANCHOR_DIRECTION direction; - size_t entries; - usec_t if_modified_since; - bool delta; - bool tail; - bool data_only; - bool slice; - size_t sampling; - size_t filters; - usec_t last_modified; - const char *query; - const char *histogram; - - struct { - usec_t start_ut; // the starting time of the query - we start from this - usec_t stop_ut; // the ending time of the query - we stop at this - usec_t first_msg_ut; - - sd_id128_t first_msg_writer; - uint64_t first_msg_seqnum; - } query_file; - - struct { - uint32_t enable_after_samples; - uint32_t slots; - uint32_t sampled; - uint32_t unsampled; - uint32_t estimated; - } samples; - - struct { - uint32_t enable_after_samples; - uint32_t every; - uint32_t skipped; - uint32_t recalibrate; - uint32_t sampled; - uint32_t unsampled; - uint32_t estimated; - } samples_per_file; - - struct { - usec_t start_ut; - usec_t end_ut; - usec_t step_ut; - uint32_t enable_after_samples; - uint32_t sampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS]; - uint32_t unsampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS]; - } samples_per_time_slot; - - // per file progress info - // size_t cached_count; - - // progress statistics - usec_t matches_setup_ut; - size_t rows_useful; - size_t rows_read; - size_t bytes_read; - size_t files_matched; - size_t file_working; -} FUNCTION_QUERY_STATUS; - -static void log_fqs(FUNCTION_QUERY_STATUS *fqs, const char *msg) { - netdata_log_error("ERROR: %s, on query " - "timeframe [%"PRIu64" - %"PRIu64"], " - "anchor [%"PRIu64" - %"PRIu64"], " - "if_modified_since %"PRIu64", " - "data_only:%s, delta:%s, tail:%s, direction:%s" - , msg - , fqs->after_ut, fqs->before_ut - , fqs->anchor.start_ut, fqs->anchor.stop_ut - , fqs->if_modified_since - , fqs->data_only ? "true" : "false" - , fqs->delta ? "true" : "false" - , fqs->tail ? "tail" : "false" - , fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD ? "forward" : "backward"); +static SD_JOURNAL_FILE_SOURCE_TYPE get_internal_source_type(const char *value) { + if(strcmp(value, SDJF_SOURCE_ALL_NAME) == 0) + return SDJF_ALL; + else if(strcmp(value, SDJF_SOURCE_LOCAL_NAME) == 0) + return SDJF_LOCAL_ALL; + else if(strcmp(value, SDJF_SOURCE_REMOTES_NAME) == 0) + return SDJF_REMOTE_ALL; + else if(strcmp(value, SDJF_SOURCE_NAMESPACES_NAME) == 0) + return SDJF_LOCAL_NAMESPACE; + else if(strcmp(value, SDJF_SOURCE_LOCAL_SYSTEM_NAME) == 0) + return SDJF_LOCAL_SYSTEM; + else if(strcmp(value, SDJF_SOURCE_LOCAL_USERS_NAME) == 0) + return SDJF_LOCAL_USER; + else if(strcmp(value, SDJF_SOURCE_LOCAL_OTHER_NAME) == 0) + return SDJF_LOCAL_OTHER; + + return SDJF_NONE; } +// ---------------------------------------------------------------------------- + static inline bool netdata_systemd_journal_seek_to(sd_journal *j, usec_t timestamp) { if(sd_journal_seek_realtime_usec(j, timestamp) < 0) { netdata_log_error("SYSTEMD-JOURNAL: Failed to seek to %" PRIu64, timestamp); @@ -290,367 +263,6 @@ static inline bool netdata_systemd_journal_seek_to(sd_journal *j, usec_t timesta #define JD_SOURCE_REALTIME_TIMESTAMP "_SOURCE_REALTIME_TIMESTAMP" -// ---------------------------------------------------------------------------- -// sampling support - -static void sampling_query_init(FUNCTION_QUERY_STATUS *fqs, FACETS *facets) { - if(!fqs->sampling) - return; - - if(!fqs->slice) { - // the user is doing a full data query - // disable sampling - fqs->sampling = 0; - return; - } - - if(fqs->data_only) { - // the user is doing a data query - // disable sampling - fqs->sampling = 0; - return; - } - - if(!fqs->files_matched) { - // no files have been matched - // disable sampling - fqs->sampling = 0; - return; - } - - fqs->samples.slots = facets_histogram_slots(facets); - if(fqs->samples.slots < 2) fqs->samples.slots = 2; - if(fqs->samples.slots > SYSTEMD_JOURNAL_SAMPLING_SLOTS) - fqs->samples.slots = SYSTEMD_JOURNAL_SAMPLING_SLOTS; - - if(!fqs->after_ut || !fqs->before_ut || fqs->after_ut >= fqs->before_ut) { - // we don't have enough information for sampling - fqs->sampling = 0; - return; - } - - usec_t delta = fqs->before_ut - fqs->after_ut; - usec_t step = delta / facets_histogram_slots(facets) - 1; - if(step < 1) step = 1; - - fqs->samples_per_time_slot.start_ut = fqs->after_ut; - fqs->samples_per_time_slot.end_ut = fqs->before_ut; - fqs->samples_per_time_slot.step_ut = step; - - // the minimum number of rows to enable sampling - fqs->samples.enable_after_samples = fqs->sampling / 2; - - size_t files_matched = fqs->files_matched; - if(!files_matched) - files_matched = 1; - - // the minimum number of rows per file to enable sampling - fqs->samples_per_file.enable_after_samples = (fqs->sampling / 4) / files_matched; - if(fqs->samples_per_file.enable_after_samples < fqs->entries) - fqs->samples_per_file.enable_after_samples = fqs->entries; - - // the minimum number of rows per time slot to enable sampling - fqs->samples_per_time_slot.enable_after_samples = (fqs->sampling / 4) / fqs->samples.slots; - if(fqs->samples_per_time_slot.enable_after_samples < fqs->entries) - fqs->samples_per_time_slot.enable_after_samples = fqs->entries; -} - -static void sampling_file_init(FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf __maybe_unused) { - fqs->samples_per_file.sampled = 0; - fqs->samples_per_file.unsampled = 0; - fqs->samples_per_file.estimated = 0; - fqs->samples_per_file.every = 0; - fqs->samples_per_file.skipped = 0; - fqs->samples_per_file.recalibrate = 0; -} - -static size_t sampling_file_lines_scanned_so_far(FUNCTION_QUERY_STATUS *fqs) { - size_t sampled = fqs->samples_per_file.sampled + fqs->samples_per_file.unsampled; - if(!sampled) sampled = 1; - return sampled; -} - -static void sampling_running_file_query_overlapping_timeframe_ut( - FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, - usec_t msg_ut, usec_t *after_ut, usec_t *before_ut) { - - // find the overlap of the query and file timeframes - // taking into account the first message we encountered - - usec_t oldest_ut, newest_ut; - if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) { - // the first message we know (oldest) - oldest_ut = fqs->query_file.first_msg_ut ? fqs->query_file.first_msg_ut : jf->msg_first_ut; - if(!oldest_ut) oldest_ut = fqs->query_file.start_ut; - - if(jf->msg_last_ut) - newest_ut = MIN(fqs->query_file.stop_ut, jf->msg_last_ut); - else if(jf->file_last_modified_ut) - newest_ut = MIN(fqs->query_file.stop_ut, jf->file_last_modified_ut); - else - newest_ut = fqs->query_file.stop_ut; - - if(msg_ut < oldest_ut) - oldest_ut = msg_ut - 1; - } - else /* BACKWARD */ { - // the latest message we know (newest) - newest_ut = fqs->query_file.first_msg_ut ? fqs->query_file.first_msg_ut : jf->msg_last_ut; - if(!newest_ut) newest_ut = fqs->query_file.start_ut; - - if(jf->msg_first_ut) - oldest_ut = MAX(fqs->query_file.stop_ut, jf->msg_first_ut); - else - oldest_ut = fqs->query_file.stop_ut; - - if(newest_ut < msg_ut) - newest_ut = msg_ut + 1; - } - - *after_ut = oldest_ut; - *before_ut = newest_ut; -} - -static double sampling_running_file_query_progress_by_time(FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, - FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) { - - usec_t after_ut, before_ut, elapsed_ut; - sampling_running_file_query_overlapping_timeframe_ut(fqs, jf, direction, msg_ut, &after_ut, &before_ut); - - if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) - elapsed_ut = msg_ut - after_ut; - else - elapsed_ut = before_ut - msg_ut; - - usec_t total_ut = before_ut - after_ut; - double progress = (double)elapsed_ut / (double)total_ut; - - return progress; -} - -static usec_t sampling_running_file_query_remaining_time(FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, - FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut, - usec_t *total_time_ut, usec_t *remaining_start_ut, - usec_t *remaining_end_ut) { - usec_t after_ut, before_ut; - sampling_running_file_query_overlapping_timeframe_ut(fqs, jf, direction, msg_ut, &after_ut, &before_ut); - - // since we have a timestamp in msg_ut - // this timestamp can extend the overlap - if(msg_ut <= after_ut) - after_ut = msg_ut - 1; - - if(msg_ut >= before_ut) - before_ut = msg_ut + 1; - - // return the remaining duration - usec_t remaining_from_ut, remaining_to_ut; - if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) { - remaining_from_ut = msg_ut; - remaining_to_ut = before_ut; - } - else { - remaining_from_ut = after_ut; - remaining_to_ut = msg_ut; - } - - usec_t remaining_ut = remaining_to_ut - remaining_from_ut; - - if(total_time_ut) - *total_time_ut = (before_ut > after_ut) ? before_ut - after_ut : 1; - - if(remaining_start_ut) - *remaining_start_ut = remaining_from_ut; - - if(remaining_end_ut) - *remaining_end_ut = remaining_to_ut; - - return remaining_ut; -} - -static size_t sampling_running_file_query_estimate_remaining_lines_by_time(FUNCTION_QUERY_STATUS *fqs, - struct journal_file *jf, - FACETS_ANCHOR_DIRECTION direction, - usec_t msg_ut) { - size_t scanned_lines = sampling_file_lines_scanned_so_far(fqs); - - // Calculate the proportion of time covered - usec_t total_time_ut, remaining_start_ut, remaining_end_ut; - usec_t remaining_time_ut = sampling_running_file_query_remaining_time(fqs, jf, direction, msg_ut, &total_time_ut, - &remaining_start_ut, &remaining_end_ut); - if (total_time_ut == 0) total_time_ut = 1; - - double proportion_by_time = (double) (total_time_ut - remaining_time_ut) / (double) total_time_ut; - - if (proportion_by_time == 0 || proportion_by_time > 1.0 || !isfinite(proportion_by_time)) - proportion_by_time = 1.0; - - // Estimate the total number of lines in the file - size_t expected_matching_logs_by_time = (size_t)((double)scanned_lines / proportion_by_time); - - if(jf->messages_in_file && expected_matching_logs_by_time > jf->messages_in_file) - expected_matching_logs_by_time = jf->messages_in_file; - - // Calculate the estimated number of remaining lines - size_t remaining_logs_by_time = expected_matching_logs_by_time - scanned_lines; - if (remaining_logs_by_time < 1) remaining_logs_by_time = 1; - -// nd_log(NDLS_COLLECTORS, NDLP_INFO, -// "JOURNAL ESTIMATION: '%s' " -// "scanned_lines=%zu [sampled=%zu, unsampled=%zu, estimated=%zu], " -// "file [%"PRIu64" - %"PRIu64", duration %"PRId64", known lines in file %zu], " -// "query [%"PRIu64" - %"PRIu64", duration %"PRId64"], " -// "first message read from the file at %"PRIu64", current message at %"PRIu64", " -// "proportion of time %.2f %%, " -// "expected total lines in file %zu, " -// "remaining lines %zu, " -// "remaining time %"PRIu64" [%"PRIu64" - %"PRIu64", duration %"PRId64"]" -// , jf->filename -// , scanned_lines, fqs->samples_per_file.sampled, fqs->samples_per_file.unsampled, fqs->samples_per_file.estimated -// , jf->msg_first_ut, jf->msg_last_ut, jf->msg_last_ut - jf->msg_first_ut, jf->messages_in_file -// , fqs->query_file.start_ut, fqs->query_file.stop_ut, fqs->query_file.stop_ut - fqs->query_file.start_ut -// , fqs->query_file.first_msg_ut, msg_ut -// , proportion_by_time * 100.0 -// , expected_matching_logs_by_time -// , remaining_logs_by_time -// , remaining_time_ut, remaining_start_ut, remaining_end_ut, remaining_end_ut - remaining_start_ut -// ); - - return remaining_logs_by_time; -} - -static size_t sampling_running_file_query_estimate_remaining_lines(sd_journal *j __maybe_unused, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) { - size_t remaining_logs_by_seqnum = 0; - -#ifdef HAVE_SD_JOURNAL_GET_SEQNUM - size_t expected_matching_logs_by_seqnum = 0; - double proportion_by_seqnum = 0.0; - uint64_t current_msg_seqnum; - sd_id128_t current_msg_writer; - if(!fqs->query_file.first_msg_seqnum || sd_journal_get_seqnum(j, ¤t_msg_seqnum, ¤t_msg_writer) < 0) { - fqs->query_file.first_msg_seqnum = 0; - fqs->query_file.first_msg_writer = SD_ID128_NULL; - } - else if(jf->messages_in_file) { - size_t scanned_lines = sampling_file_lines_scanned_so_far(fqs); - - double proportion_of_all_lines_so_far; - if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) - proportion_of_all_lines_so_far = (double)scanned_lines / (double)(current_msg_seqnum - jf->first_seqnum); - else - proportion_of_all_lines_so_far = (double)scanned_lines / (double)(jf->last_seqnum - current_msg_seqnum); - - if(proportion_of_all_lines_so_far > 1.0) - proportion_of_all_lines_so_far = 1.0; - - expected_matching_logs_by_seqnum = (size_t)(proportion_of_all_lines_so_far * (double)jf->messages_in_file); - - proportion_by_seqnum = (double)scanned_lines / (double)expected_matching_logs_by_seqnum; - - if (proportion_by_seqnum == 0 || proportion_by_seqnum > 1.0 || !isfinite(proportion_by_seqnum)) - proportion_by_seqnum = 1.0; - - remaining_logs_by_seqnum = expected_matching_logs_by_seqnum - scanned_lines; - if(!remaining_logs_by_seqnum) remaining_logs_by_seqnum = 1; - } -#endif - - if(remaining_logs_by_seqnum) - return remaining_logs_by_seqnum; - - return sampling_running_file_query_estimate_remaining_lines_by_time(fqs, jf, direction, msg_ut); -} - -static void sampling_decide_file_sampling_every(sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) { - size_t files_matched = fqs->files_matched; - if(!files_matched) files_matched = 1; - - size_t remaining_lines = sampling_running_file_query_estimate_remaining_lines(j, fqs, jf, direction, msg_ut); - size_t wanted_samples = (fqs->sampling / 2) / files_matched; - if(!wanted_samples) wanted_samples = 1; - - fqs->samples_per_file.every = remaining_lines / wanted_samples; - - if(fqs->samples_per_file.every < 1) - fqs->samples_per_file.every = 1; -} - -typedef enum { - SAMPLING_STOP_AND_ESTIMATE = -1, - SAMPLING_FULL = 0, - SAMPLING_SKIP_FIELDS = 1, -} sampling_t; - -static inline sampling_t is_row_in_sample(sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, usec_t msg_ut, FACETS_ANCHOR_DIRECTION direction, bool candidate_to_keep) { - if(!fqs->sampling || candidate_to_keep) - return SAMPLING_FULL; - - if(unlikely(msg_ut < fqs->samples_per_time_slot.start_ut)) - msg_ut = fqs->samples_per_time_slot.start_ut; - if(unlikely(msg_ut > fqs->samples_per_time_slot.end_ut)) - msg_ut = fqs->samples_per_time_slot.end_ut; - - size_t slot = (msg_ut - fqs->samples_per_time_slot.start_ut) / fqs->samples_per_time_slot.step_ut; - if(slot >= fqs->samples.slots) - slot = fqs->samples.slots - 1; - - bool should_sample = false; - - if(fqs->samples.sampled < fqs->samples.enable_after_samples || - fqs->samples_per_file.sampled < fqs->samples_per_file.enable_after_samples || - fqs->samples_per_time_slot.sampled[slot] < fqs->samples_per_time_slot.enable_after_samples) - should_sample = true; - - else if(fqs->samples_per_file.recalibrate >= SYSTEMD_JOURNAL_SAMPLING_RECALIBRATE || !fqs->samples_per_file.every) { - // this is the first to be unsampled for this file - sampling_decide_file_sampling_every(j, fqs, jf, direction, msg_ut); - fqs->samples_per_file.recalibrate = 0; - should_sample = true; - } - else { - // we sample 1 every fqs->samples_per_file.every - if(fqs->samples_per_file.skipped >= fqs->samples_per_file.every) { - fqs->samples_per_file.skipped = 0; - should_sample = true; - } - else - fqs->samples_per_file.skipped++; - } - - if(should_sample) { - fqs->samples.sampled++; - fqs->samples_per_file.sampled++; - fqs->samples_per_time_slot.sampled[slot]++; - - return SAMPLING_FULL; - } - - fqs->samples_per_file.recalibrate++; - - fqs->samples.unsampled++; - fqs->samples_per_file.unsampled++; - fqs->samples_per_time_slot.unsampled[slot]++; - - if(fqs->samples_per_file.unsampled > fqs->samples_per_file.sampled) { - double progress_by_time = sampling_running_file_query_progress_by_time(fqs, jf, direction, msg_ut); - - if(progress_by_time > SYSTEMD_JOURNAL_ENABLE_ESTIMATIONS_FILE_PERCENTAGE) - return SAMPLING_STOP_AND_ESTIMATE; - } - - return SAMPLING_SKIP_FIELDS; -} - -static void sampling_update_running_query_file_estimates(FACETS *facets, sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, usec_t msg_ut, FACETS_ANCHOR_DIRECTION direction) { - usec_t total_time_ut, remaining_start_ut, remaining_end_ut; - sampling_running_file_query_remaining_time(fqs, jf, direction, msg_ut, &total_time_ut, &remaining_start_ut, - &remaining_end_ut); - size_t remaining_lines = sampling_running_file_query_estimate_remaining_lines(j, fqs, jf, direction, msg_ut); - facets_update_estimations(facets, remaining_start_ut, remaining_end_ut, remaining_lines); - fqs->samples.estimated += remaining_lines; - fqs->samples_per_file.estimated += remaining_lines; -} - // ---------------------------------------------------------------------------- static inline size_t netdata_systemd_journal_process_row(sd_journal *j, FACETS *facets, struct journal_file *jf, usec_t *msg_ut) { @@ -721,16 +333,17 @@ static inline ND_SD_JOURNAL_STATUS check_stop(const bool *cancelled, const usec_ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward( sd_journal *j, BUFFER *wb __maybe_unused, FACETS *facets, - struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) { + struct journal_file *jf, + LOGS_QUERY_STATUS *fqs) { usec_t anchor_delta = __atomic_load_n(&jf->max_journal_vs_realtime_delta_ut, __ATOMIC_RELAXED); + lqs_query_timeframe(fqs, anchor_delta); + usec_t start_ut = fqs->query.start_ut; + usec_t stop_ut = fqs->query.stop_ut; + bool stop_when_full = fqs->query.stop_when_full; - usec_t start_ut = ((fqs->data_only && fqs->anchor.start_ut) ? fqs->anchor.start_ut : fqs->before_ut) + anchor_delta; - usec_t stop_ut = (fqs->data_only && fqs->anchor.stop_ut) ? fqs->anchor.stop_ut : fqs->after_ut; - bool stop_when_full = (fqs->data_only && !fqs->anchor.stop_ut); - - fqs->query_file.start_ut = start_ut; - fqs->query_file.stop_ut = stop_ut; + fqs->c.query_file.start_ut = start_ut; + fqs->c.query_file.stop_ut = stop_ut; if(!netdata_systemd_journal_seek_to(j, start_ut)) return ND_SD_JOURNAL_FAILED_TO_SEEK; @@ -765,12 +378,12 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward( if(unlikely(!first_msg_ut)) { first_msg_ut = msg_ut; - fqs->query_file.first_msg_ut = msg_ut; + fqs->c.query_file.first_msg_ut = msg_ut; #ifdef HAVE_SD_JOURNAL_GET_SEQNUM - if(sd_journal_get_seqnum(j, &fqs->query_file.first_msg_seqnum, &fqs->query_file.first_msg_writer) < 0) { - fqs->query_file.first_msg_seqnum = 0; - fqs->query_file.first_msg_writer = SD_ID128_NULL; + if(sd_journal_get_seqnum(j, &fqs->c.query_file.first_msg_seqnum, &fqs->c.query_file.first_msg_writer) < 0) { + fqs->c.query_file.first_msg_seqnum = 0; + fqs->c.query_file.first_msg_writer = SD_ID128_NULL; } #endif } @@ -794,7 +407,7 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward( row_counter++; if(unlikely((row_counter % FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS) == 0 && stop_when_full && - facets_rows(facets) >= fqs->entries)) { + facets_rows(facets) >= fqs->rq.entries)) { // stop the data only query usec_t oldest = facets_row_oldest_ut(facets); if(oldest && msg_ut < (oldest - anchor_delta)) @@ -802,10 +415,10 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward( } if(unlikely(row_counter % FUNCTION_PROGRESS_EVERY_ROWS == 0)) { - FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter); + FUNCTION_PROGRESS_UPDATE_ROWS(fqs->c.rows_read, row_counter - last_row_counter); last_row_counter = row_counter; - FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes); + FUNCTION_PROGRESS_UPDATE_BYTES(fqs->c.bytes_read, bytes - last_bytes); last_bytes = bytes; status = check_stop(fqs->cancelled, fqs->stop_monotonic_ut); @@ -819,10 +432,10 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward( } } - FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter); - FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes); + FUNCTION_PROGRESS_UPDATE_ROWS(fqs->c.rows_read, row_counter - last_row_counter); + FUNCTION_PROGRESS_UPDATE_BYTES(fqs->c.bytes_read, bytes - last_bytes); - fqs->rows_useful += rows_useful; + fqs->c.rows_useful += rows_useful; if(errors_no_timestamp) netdata_log_error("SYSTEMD-JOURNAL: %zu lines did not have timestamps", errors_no_timestamp); @@ -835,16 +448,17 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward( ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_forward( sd_journal *j, BUFFER *wb __maybe_unused, FACETS *facets, - struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) { + struct journal_file *jf, + LOGS_QUERY_STATUS *fqs) { usec_t anchor_delta = __atomic_load_n(&jf->max_journal_vs_realtime_delta_ut, __ATOMIC_RELAXED); + lqs_query_timeframe(fqs, anchor_delta); + usec_t start_ut = fqs->query.start_ut; + usec_t stop_ut = fqs->query.stop_ut; + bool stop_when_full = fqs->query.stop_when_full; - usec_t start_ut = (fqs->data_only && fqs->anchor.start_ut) ? fqs->anchor.start_ut : fqs->after_ut; - usec_t stop_ut = ((fqs->data_only && fqs->anchor.stop_ut) ? fqs->anchor.stop_ut : fqs->before_ut) + anchor_delta; - bool stop_when_full = (fqs->data_only && !fqs->anchor.stop_ut); - - fqs->query_file.start_ut = start_ut; - fqs->query_file.stop_ut = stop_ut; + fqs->c.query_file.start_ut = start_ut; + fqs->c.query_file.stop_ut = stop_ut; if(!netdata_systemd_journal_seek_to(j, start_ut)) return ND_SD_JOURNAL_FAILED_TO_SEEK; @@ -879,7 +493,7 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_forward( if(unlikely(!first_msg_ut)) { first_msg_ut = msg_ut; - fqs->query_file.first_msg_ut = msg_ut; + fqs->c.query_file.first_msg_ut = msg_ut; } sampling_t sample = is_row_in_sample(j, fqs, jf, msg_ut, @@ -901,7 +515,7 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_forward( row_counter++; if(unlikely((row_counter % FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS) == 0 && stop_when_full && - facets_rows(facets) >= fqs->entries)) { + facets_rows(facets) >= fqs->rq.entries)) { // stop the data only query usec_t newest = facets_row_newest_ut(facets); if(newest && msg_ut > (newest + anchor_delta)) @@ -909,10 +523,10 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_forward( } if(unlikely(row_counter % FUNCTION_PROGRESS_EVERY_ROWS == 0)) { - FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter); + FUNCTION_PROGRESS_UPDATE_ROWS(fqs->c.rows_read, row_counter - last_row_counter); last_row_counter = row_counter; - FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes); + FUNCTION_PROGRESS_UPDATE_BYTES(fqs->c.bytes_read, bytes - last_bytes); last_bytes = bytes; status = check_stop(fqs->cancelled, fqs->stop_monotonic_ut); @@ -926,10 +540,10 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_forward( } } - FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter); - FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes); + FUNCTION_PROGRESS_UPDATE_ROWS(fqs->c.rows_read, row_counter - last_row_counter); + FUNCTION_PROGRESS_UPDATE_BYTES(fqs->c.bytes_read, bytes - last_bytes); - fqs->rows_useful += rows_useful; + fqs->c.rows_useful += rows_useful; if(errors_no_timestamp) netdata_log_error("SYSTEMD-JOURNAL: %zu lines did not have timestamps", errors_no_timestamp); @@ -963,7 +577,7 @@ bool netdata_systemd_journal_check_if_modified_since(sd_journal *j, usec_t seek_ } #ifdef HAVE_SD_JOURNAL_RESTART_FIELDS -static bool netdata_systemd_filtering_by_journal(sd_journal *j, FACETS *facets, FUNCTION_QUERY_STATUS *fqs) { +static bool netdata_systemd_filtering_by_journal(sd_journal *j, FACETS *facets, LOGS_QUERY_STATUS *lqs) { const char *field = NULL; const void *data = NULL; size_t data_length; @@ -974,7 +588,7 @@ static bool netdata_systemd_filtering_by_journal(sd_journal *j, FACETS *facets, SD_JOURNAL_FOREACH_FIELD(j, field) { // for each key bool interesting; - if(fqs->data_only) + if(lqs->rq.data_only) interesting = facets_key_name_is_filter(facets, field); else interesting = facets_key_name_is_facet(facets, field); @@ -1023,7 +637,7 @@ static bool netdata_systemd_filtering_by_journal(sd_journal *j, FACETS *facets, } if(failures) { - log_fqs(fqs, "failed to setup journal filter, will run the full query."); + lqs_log_error(lqs, "failed to setup journal filter, will run the full query."); sd_journal_flush_matches(j); return true; } @@ -1034,7 +648,8 @@ static bool netdata_systemd_filtering_by_journal(sd_journal *j, FACETS *facets, static ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_one_file( const char *filename, BUFFER *wb, FACETS *facets, - struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) { + struct journal_file *jf, + LOGS_QUERY_STATUS *fqs) { sd_journal *j = NULL; errno_clear(); @@ -1056,18 +671,18 @@ static ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_one_file( bool matches_filters = true; #ifdef HAVE_SD_JOURNAL_RESTART_FIELDS - if(fqs->slice) { + if(fqs->rq.slice) { usec_t started = now_monotonic_usec(); - matches_filters = netdata_systemd_filtering_by_journal(j, facets, fqs) || !fqs->filters; + matches_filters = netdata_systemd_filtering_by_journal(j, facets, fqs) || !fqs->rq.filters; usec_t ended = now_monotonic_usec(); - fqs->matches_setup_ut += (ended - started); + fqs->c.matches_setup_ut += (ended - started); } #endif // HAVE_SD_JOURNAL_RESTART_FIELDS if(matches_filters) { - if(fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD) + if(fqs->rq.direction == FACETS_ANCHOR_DIRECTION_FORWARD) status = netdata_systemd_journal_query_forward(j, wb, facets, jf, fqs); else status = netdata_systemd_journal_query_backward(j, wb, facets, jf, fqs); @@ -1081,10 +696,10 @@ static ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_one_file( return status; } -static bool jf_is_mine(struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) { +static bool jf_is_mine(struct journal_file *jf, LOGS_QUERY_STATUS *fqs) { - if((fqs->source_type == SDJF_NONE && !fqs->sources) || (jf->source_type & fqs->source_type) || - (fqs->sources && simple_pattern_matches(fqs->sources, string2str(jf->source)))) { + if((fqs->rq.source_type == SDJF_NONE && !fqs->rq.sources) || (jf->source_type & fqs->rq.source_type) || + (fqs->rq.sources && simple_pattern_matches(fqs->rq.sources, string2str(jf->source)))) { if(!jf->msg_last_ut) // the file is not scanned yet, or the timestamps have not been updated, @@ -1095,22 +710,24 @@ static bool jf_is_mine(struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) { usec_t first_ut = jf->msg_first_ut - anchor_delta; usec_t last_ut = jf->msg_last_ut + anchor_delta; - if(last_ut >= fqs->after_ut && first_ut <= fqs->before_ut) + if(last_ut >= fqs->rq.after_ut && first_ut <= fqs->rq.before_ut) return true; } return false; } -static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QUERY_STATUS *fqs) { +static int netdata_systemd_journal_query(BUFFER *wb, LOGS_QUERY_STATUS *lqs) { + FACETS *facets = lqs->facets; + ND_SD_JOURNAL_STATUS status = ND_SD_JOURNAL_NO_FILE_MATCHED; struct journal_file *jf; - fqs->files_matched = 0; - fqs->file_working = 0; - fqs->rows_useful = 0; - fqs->rows_read = 0; - fqs->bytes_read = 0; + lqs->c.files_matched = 0; + lqs->c.file_working = 0; + lqs->c.rows_useful = 0; + lqs->c.rows_read = 0; + lqs->c.bytes_read = 0; size_t files_used = 0; size_t files_max = dictionary_entries(journal_files_registry); @@ -1119,26 +736,29 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU // count the files bool files_are_newer = false; dfe_start_read(journal_files_registry, jf) { - if(!jf_is_mine(jf, fqs)) + if(!jf_is_mine(jf, lqs)) continue; file_items[files_used++] = dictionary_acquired_item_dup(journal_files_registry, jf_dfe.item); - if(jf->msg_last_ut > fqs->if_modified_since) + if(jf->msg_last_ut > lqs->rq.if_modified_since) files_are_newer = true; } dfe_done(jf); - fqs->files_matched = files_used; + lqs->c.files_matched = files_used; + + if(lqs->rq.if_modified_since && !files_are_newer) { + // release the files + for(size_t f = 0; f < files_used ;f++) + dictionary_acquired_item_release(journal_files_registry, file_items[f]); - if(fqs->if_modified_since && !files_are_newer) { - buffer_flush(wb); - return HTTP_RESP_NOT_MODIFIED; + return rrd_call_function_error(wb, "No new data since the previous call.", HTTP_RESP_NOT_MODIFIED); } // sort the files, so that they are optimal for facets if(files_used >= 2) { - if (fqs->direction == FACETS_ANCHOR_DIRECTION_BACKWARD) + if (lqs->rq.direction == FACETS_ANCHOR_DIRECTION_BACKWARD) qsort(file_items, files_used, sizeof(const DICTIONARY_ITEM *), journal_file_dict_items_backward_compar); else @@ -1153,38 +773,38 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU usec_t duration_ut = 0, max_duration_ut = 0; usec_t progress_duration_ut = 0; - sampling_query_init(fqs, facets); + sampling_query_init(lqs, facets); buffer_json_member_add_array(wb, "_journal_files"); for(size_t f = 0; f < files_used ;f++) { const char *filename = dictionary_acquired_item_name(file_items[f]); jf = dictionary_acquired_item_value(file_items[f]); - if(!jf_is_mine(jf, fqs)) + if(!jf_is_mine(jf, lqs)) continue; started_ut = ended_ut; // do not even try to do the query if we expect it to pass the timeout - if(ended_ut + max_duration_ut * 3 >= *fqs->stop_monotonic_ut) { + if(ended_ut + max_duration_ut * 3 >= *lqs->stop_monotonic_ut) { partial = true; status = ND_SD_JOURNAL_TIMED_OUT; break; } - fqs->file_working++; + lqs->c.file_working++; // fqs->cached_count = 0; size_t fs_calls = fstat_thread_calls; size_t fs_cached = fstat_thread_cached_responses; - size_t rows_useful = fqs->rows_useful; - size_t rows_read = fqs->rows_read; - size_t bytes_read = fqs->bytes_read; - size_t matches_setup_ut = fqs->matches_setup_ut; + size_t rows_useful = lqs->c.rows_useful; + size_t rows_read = lqs->c.rows_read; + size_t bytes_read = lqs->c.bytes_read; + size_t matches_setup_ut = lqs->c.matches_setup_ut; - sampling_file_init(fqs, jf); + sampling_file_init(lqs, jf); - ND_SD_JOURNAL_STATUS tmp_status = netdata_systemd_journal_query_one_file(filename, wb, facets, jf, fqs); + ND_SD_JOURNAL_STATUS tmp_status = netdata_systemd_journal_query_one_file(filename, wb, facets, jf, lqs); // nd_log(NDLS_COLLECTORS, NDLP_INFO, // "JOURNAL ESTIMATION FINAL: '%s' " @@ -1198,10 +818,10 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU // , fqs->query_file.start_ut, fqs->query_file.stop_ut, fqs->query_file.stop_ut - fqs->query_file.start_ut // ); - rows_useful = fqs->rows_useful - rows_useful; - rows_read = fqs->rows_read - rows_read; - bytes_read = fqs->bytes_read - bytes_read; - matches_setup_ut = fqs->matches_setup_ut - matches_setup_ut; + rows_useful = lqs->c.rows_useful - rows_useful; + rows_read = lqs->c.rows_read - rows_read; + bytes_read = lqs->c.bytes_read - bytes_read; + matches_setup_ut = lqs->c.matches_setup_ut - matches_setup_ut; fs_calls = fstat_thread_calls - fs_calls; fs_cached = fstat_thread_cached_responses - fs_cached; @@ -1215,7 +835,7 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU if(progress_duration_ut >= SYSTEMD_JOURNAL_PROGRESS_EVERY_UT) { progress_duration_ut = 0; netdata_mutex_lock(&stdout_mutex); - pluginsd_function_progress_to_stdout(fqs->transaction, f + 1, files_used); + pluginsd_function_progress_to_stdout(lqs->rq.transaction, f + 1, files_used); netdata_mutex_unlock(&stdout_mutex); } @@ -1241,12 +861,12 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU buffer_json_member_add_uint64(wb, "fstat_query_calls", fs_calls); buffer_json_member_add_uint64(wb, "fstat_query_cached_responses", fs_cached); - if(fqs->sampling) { + if(lqs->rq.sampling) { buffer_json_member_add_object(wb, "_sampling"); { - buffer_json_member_add_uint64(wb, "sampled", fqs->samples_per_file.sampled); - buffer_json_member_add_uint64(wb, "unsampled", fqs->samples_per_file.unsampled); - buffer_json_member_add_uint64(wb, "estimated", fqs->samples_per_file.estimated); + buffer_json_member_add_uint64(wb, "sampled", lqs->c.samples_per_file.sampled); + buffer_json_member_add_uint64(wb, "unsampled", lqs->c.samples_per_file.unsampled); + buffer_json_member_add_uint64(wb, "estimated", lqs->c.samples_per_file.estimated); } buffer_json_object_close(wb); // _sampling } @@ -1290,10 +910,8 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU switch (status) { case ND_SD_JOURNAL_OK: - if(fqs->if_modified_since && !fqs->rows_useful) { - buffer_flush(wb); - return HTTP_RESP_NOT_MODIFIED; - } + if(lqs->rq.if_modified_since && !lqs->c.rows_useful) + return rrd_call_function_error(wb, "No additional useful data since the previous call.", HTTP_RESP_NOT_MODIFIED); break; case ND_SD_JOURNAL_TIMED_OUT: @@ -1301,18 +919,19 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU break; case ND_SD_JOURNAL_CANCELLED: - buffer_flush(wb); - return HTTP_RESP_CLIENT_CLOSED_REQUEST; + return rrd_call_function_error(wb, "Request cancelled.", HTTP_RESP_CLIENT_CLOSED_REQUEST); case ND_SD_JOURNAL_NOT_MODIFIED: - buffer_flush(wb); - return HTTP_RESP_NOT_MODIFIED; + return rrd_call_function_error(wb, "No new data since the previous call.", HTTP_RESP_NOT_MODIFIED); - default: case ND_SD_JOURNAL_FAILED_TO_OPEN: + return rrd_call_function_error(wb, "Failed to open systemd journal file.", HTTP_RESP_INTERNAL_SERVER_ERROR); + case ND_SD_JOURNAL_FAILED_TO_SEEK: - buffer_flush(wb); - return HTTP_RESP_INTERNAL_SERVER_ERROR; + return rrd_call_function_error(wb, "Failed to seek in systemd journal file.", HTTP_RESP_INTERNAL_SERVER_ERROR); + + default: + return rrd_call_function_error(wb, "Unknown status", HTTP_RESP_INTERNAL_SERVER_ERROR); } buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); @@ -1320,7 +939,7 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU buffer_json_member_add_string(wb, "type", "table"); // build a message for the query - if(!fqs->data_only) { + if(!lqs->rq.data_only) { CLEAN_BUFFER *msg = buffer_create(0, NULL); CLEAN_BUFFER *msg_description = buffer_create(0, NULL); ND_LOG_FIELD_PRIORITY msg_priority = NDLP_INFO; @@ -1339,17 +958,17 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU msg_priority = NDLP_WARNING; } - if(fqs->samples.estimated || fqs->samples.unsampled) { - double percent = (double) (fqs->samples.sampled * 100.0 / - (fqs->samples.estimated + fqs->samples.unsampled + fqs->samples.sampled)); + if(lqs->c.samples.estimated || lqs->c.samples.unsampled) { + double percent = (double) (lqs->c.samples.sampled * 100.0 / + (lqs->c.samples.estimated + lqs->c.samples.unsampled + lqs->c.samples.sampled)); buffer_sprintf(msg, "%.2f%% real data", percent); buffer_sprintf(msg_description, "ACTUAL DATA: The filters counters reflect %0.2f%% of the data. ", percent); msg_priority = MIN(msg_priority, NDLP_NOTICE); } - if(fqs->samples.unsampled) { - double percent = (double) (fqs->samples.unsampled * 100.0 / - (fqs->samples.estimated + fqs->samples.unsampled + fqs->samples.sampled)); + if(lqs->c.samples.unsampled) { + double percent = (double) (lqs->c.samples.unsampled * 100.0 / + (lqs->c.samples.estimated + lqs->c.samples.unsampled + lqs->c.samples.sampled)); buffer_sprintf(msg, ", %.2f%% unsampled", percent); buffer_sprintf(msg_description , "UNSAMPLED DATA: %0.2f%% of the events exist and have been counted, but their values have not been evaluated, so they are not included in the filters counters. " @@ -1357,9 +976,9 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU msg_priority = MIN(msg_priority, NDLP_NOTICE); } - if(fqs->samples.estimated) { - double percent = (double) (fqs->samples.estimated * 100.0 / - (fqs->samples.estimated + fqs->samples.unsampled + fqs->samples.sampled)); + if(lqs->c.samples.estimated) { + double percent = (double) (lqs->c.samples.estimated * 100.0 / + (lqs->c.samples.estimated + lqs->c.samples.unsampled + lqs->c.samples.sampled)); buffer_sprintf(msg, ", %.2f%% estimated", percent); buffer_sprintf(msg_description , "ESTIMATED DATA: The query selected a large amount of data, so to avoid delaying too much, the presented data are estimated by %0.2f%%. " @@ -1377,18 +996,19 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU buffer_json_object_close(wb); // message } - if(!fqs->data_only) { + if(!lqs->rq.data_only) { buffer_json_member_add_time_t(wb, "update_every", 1); buffer_json_member_add_string(wb, "help", SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION); } - if(!fqs->data_only || fqs->tail) - buffer_json_member_add_uint64(wb, "last_modified", fqs->last_modified); + if(!lqs->rq.data_only || lqs->rq.tail) + buffer_json_member_add_uint64(wb, "last_modified", lqs->last_modified); facets_sort_and_reorder_keys(facets); facets_report(facets, wb, used_hashes_registry); - buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + (fqs->data_only ? 3600 : 0)); + wb->expires = now_realtime_sec() + (lqs->rq.data_only ? 3600 : 0); + buffer_json_member_add_time_t(wb, "expires", wb->expires); buffer_json_member_add_object(wb, "_fstat_caching"); { @@ -1397,643 +1017,197 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU } buffer_json_object_close(wb); // _fstat_caching - if(fqs->sampling) { + if(lqs->rq.sampling) { buffer_json_member_add_object(wb, "_sampling"); { - buffer_json_member_add_uint64(wb, "sampled", fqs->samples.sampled); - buffer_json_member_add_uint64(wb, "unsampled", fqs->samples.unsampled); - buffer_json_member_add_uint64(wb, "estimated", fqs->samples.estimated); + buffer_json_member_add_uint64(wb, "sampled", lqs->c.samples.sampled); + buffer_json_member_add_uint64(wb, "unsampled", lqs->c.samples.unsampled); + buffer_json_member_add_uint64(wb, "estimated", lqs->c.samples.estimated); } buffer_json_object_close(wb); // _sampling } - buffer_json_finalize(wb); - - return HTTP_RESP_OK; + wb->content_type = CT_APPLICATION_JSON; + wb->response_code = HTTP_RESP_OK; + return wb->response_code; } -static void netdata_systemd_journal_function_help(const char *transaction) { - BUFFER *wb = buffer_create(0, NULL); - buffer_sprintf(wb, - "%s / %s\n" - "\n" - "%s\n" - "\n" - "The following parameters are supported:\n" - "\n" - " "JOURNAL_PARAMETER_HELP"\n" - " Shows this help message.\n" - "\n" - " "JOURNAL_PARAMETER_INFO"\n" - " Request initial configuration information about the plugin.\n" - " The key entity returned is the required_params array, which includes\n" - " all the available systemd journal sources.\n" - " When `"JOURNAL_PARAMETER_INFO"` is requested, all other parameters are ignored.\n" - "\n" - " "JOURNAL_PARAMETER_DATA_ONLY":true or "JOURNAL_PARAMETER_DATA_ONLY":false\n" - " Quickly respond with data requested, without generating a\n" - " `histogram`, `facets` counters and `items`.\n" - "\n" - " "JOURNAL_PARAMETER_DELTA":true or "JOURNAL_PARAMETER_DELTA":false\n" - " When doing data only queries, include deltas for histogram, facets and items.\n" - "\n" - " "JOURNAL_PARAMETER_TAIL":true or "JOURNAL_PARAMETER_TAIL":false\n" - " When doing data only queries, respond with the newest messages,\n" - " and up to the anchor, but calculate deltas (if requested) for\n" - " the duration [anchor - before].\n" - "\n" - " "JOURNAL_PARAMETER_SLICE":true or "JOURNAL_PARAMETER_SLICE":false\n" - " When it is turned on, the plugin is executing filtering via libsystemd,\n" - " utilizing all the available indexes of the journal files.\n" - " When it is off, only the time constraint is handled by libsystemd and\n" - " all filtering is done by the plugin.\n" - " The default is: %s\n" - "\n" - " "JOURNAL_PARAMETER_SOURCE":SOURCE\n" - " Query only the specified journal sources.\n" - " Do an `"JOURNAL_PARAMETER_INFO"` query to find the sources.\n" - "\n" - " "JOURNAL_PARAMETER_BEFORE":TIMESTAMP_IN_SECONDS\n" - " Absolute or relative (to now) timestamp in seconds, to start the query.\n" - " The query is always executed from the most recent to the oldest log entry.\n" - " If not given the default is: now.\n" - "\n" - " "JOURNAL_PARAMETER_AFTER":TIMESTAMP_IN_SECONDS\n" - " Absolute or relative (to `before`) timestamp in seconds, to end the query.\n" - " If not given, the default is %d.\n" - "\n" - " "JOURNAL_PARAMETER_LAST":ITEMS\n" - " The number of items to return.\n" - " The default is %d.\n" - "\n" - " "JOURNAL_PARAMETER_SAMPLING":ITEMS\n" - " The number of log entries to sample to estimate facets counters and histogram.\n" - " The default is %d.\n" - "\n" - " "JOURNAL_PARAMETER_ANCHOR":TIMESTAMP_IN_MICROSECONDS\n" - " Return items relative to this timestamp.\n" - " The exact items to be returned depend on the query `"JOURNAL_PARAMETER_DIRECTION"`.\n" - "\n" - " "JOURNAL_PARAMETER_DIRECTION":forward or "JOURNAL_PARAMETER_DIRECTION":backward\n" - " When set to `backward` (default) the items returned are the newest before the\n" - " `"JOURNAL_PARAMETER_ANCHOR"`, (or `"JOURNAL_PARAMETER_BEFORE"` if `"JOURNAL_PARAMETER_ANCHOR"` is not set)\n" - " When set to `forward` the items returned are the oldest after the\n" - " `"JOURNAL_PARAMETER_ANCHOR"`, (or `"JOURNAL_PARAMETER_AFTER"` if `"JOURNAL_PARAMETER_ANCHOR"` is not set)\n" - " The default is: %s\n" - "\n" - " "JOURNAL_PARAMETER_QUERY":SIMPLE_PATTERN\n" - " Do a full text search to find the log entries matching the pattern given.\n" - " The plugin is searching for matches on all fields of the database.\n" - "\n" - " "JOURNAL_PARAMETER_IF_MODIFIED_SINCE":TIMESTAMP_IN_MICROSECONDS\n" - " Each successful response, includes a `last_modified` field.\n" - " By providing the timestamp to the `"JOURNAL_PARAMETER_IF_MODIFIED_SINCE"` parameter,\n" - " the plugin will return 200 with a successful response, or 304 if the source has not\n" - " been modified since that timestamp.\n" - "\n" - " "JOURNAL_PARAMETER_HISTOGRAM":facet_id\n" - " Use the given `facet_id` for the histogram.\n" - " This parameter is ignored in `"JOURNAL_PARAMETER_DATA_ONLY"` mode.\n" - "\n" - " "JOURNAL_PARAMETER_FACETS":facet_id1,facet_id2,facet_id3,...\n" - " Add the given facets to the list of fields for which analysis is required.\n" - " The plugin will offer both a histogram and facet value counters for its values.\n" - " This parameter is ignored in `"JOURNAL_PARAMETER_DATA_ONLY"` mode.\n" - "\n" - " facet_id:value_id1,value_id2,value_id3,...\n" - " Apply filters to the query, based on the facet IDs returned.\n" - " Each `facet_id` can be given once, but multiple `facet_ids` can be given.\n" - "\n" - , program_name - , SYSTEMD_JOURNAL_FUNCTION_NAME - , SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION - , JOURNAL_DEFAULT_SLICE_MODE ? "true" : "false" // slice - , -SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION - , SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY - , SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING - , JOURNAL_DEFAULT_DIRECTION == FACETS_ANCHOR_DIRECTION_BACKWARD ? "backward" : "forward" - ); +static void systemd_journal_register_transformations(LOGS_QUERY_STATUS *lqs) { + FACETS *facets = lqs->facets; + LOGS_QUERY_REQUEST *rq = &lqs->rq; - netdata_mutex_lock(&stdout_mutex); - pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb); - netdata_mutex_unlock(&stdout_mutex); + // ---------------------------------------------------------------------------------------------------------------- + // register the fields in the order you want them on the dashboard + + facets_register_row_severity(facets, syslog_priority_to_facet_severity, NULL); - buffer_free(wb); + facets_register_key_name( + facets, "_HOSTNAME", rq->default_facet | FACET_KEY_OPTION_VISIBLE); + + facets_register_dynamic_key_name( + facets, JOURNAL_KEY_ND_JOURNAL_PROCESS, + FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_VISIBLE, + netdata_systemd_journal_dynamic_row_id, NULL); + + facets_register_key_name( + facets, "MESSAGE", + FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | + FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS); + + // facets_register_dynamic_key_name( + // facets, "MESSAGE", + // FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | FACET_KEY_OPTION_RICH_TEXT | + // FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS, + // netdata_systemd_journal_rich_message, NULL); + + facets_register_key_name_transformation( + facets, "PRIORITY", + rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW | + FACET_KEY_OPTION_EXPANDED_FILTER, + netdata_systemd_journal_transform_priority, NULL); + + facets_register_key_name_transformation( + facets, "SYSLOG_FACILITY", + rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW | + FACET_KEY_OPTION_EXPANDED_FILTER, + netdata_systemd_journal_transform_syslog_facility, NULL); + + facets_register_key_name_transformation( + facets, "ERRNO", + rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_errno, NULL); + + facets_register_key_name( + facets, JOURNAL_KEY_ND_JOURNAL_FILE, + FACET_KEY_OPTION_NEVER_FACET); + + facets_register_key_name( + facets, "SYSLOG_IDENTIFIER", rq->default_facet); + + facets_register_key_name( + facets, "UNIT", rq->default_facet); + + facets_register_key_name( + facets, "USER_UNIT", rq->default_facet); + + facets_register_key_name_transformation( + facets, "MESSAGE_ID", + rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW | + FACET_KEY_OPTION_EXPANDED_FILTER, + netdata_systemd_journal_transform_message_id, NULL); + + facets_register_key_name_transformation( + facets, "_BOOT_ID", + rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_boot_id, NULL); + + facets_register_key_name_transformation( + facets, "_SYSTEMD_OWNER_UID", + rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation( + facets, "_UID", + rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation( + facets, "OBJECT_SYSTEMD_OWNER_UID", + rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation( + facets, "OBJECT_UID", + rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation( + facets, "_GID", + rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_gid, NULL); + + facets_register_key_name_transformation( + facets, "OBJECT_GID", + rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_gid, NULL); + + facets_register_key_name_transformation( + facets, "_CAP_EFFECTIVE", + FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_cap_effective, NULL); + + facets_register_key_name_transformation( + facets, "_AUDIT_LOGINUID", + FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation( + facets, "OBJECT_AUDIT_LOGINUID", + FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation( + facets, "_SOURCE_REALTIME_TIMESTAMP", + FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_timestamp_usec, NULL); } void function_systemd_journal(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled, - BUFFER *payload __maybe_unused, HTTP_ACCESS access __maybe_unused, + BUFFER *payload, HTTP_ACCESS access __maybe_unused, const char *source __maybe_unused, void *data __maybe_unused) { fstat_thread_calls = 0; fstat_thread_cached_responses = 0; - BUFFER *wb = buffer_create(0, NULL); - buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); - - FUNCTION_QUERY_STATUS tmp_fqs = { - .cancelled = cancelled, - .stop_monotonic_ut = stop_monotonic_ut, - }; - FUNCTION_QUERY_STATUS *fqs = NULL; - - FACETS *facets = facets_create(50, FACETS_OPTION_ALL_KEYS_FTS, - SYSTEMD_ALWAYS_VISIBLE_KEYS, - SYSTEMD_KEYS_INCLUDED_IN_FACETS, - SYSTEMD_KEYS_EXCLUDED_FROM_FACETS); - - facets_accepted_param(facets, JOURNAL_PARAMETER_INFO); - facets_accepted_param(facets, JOURNAL_PARAMETER_SOURCE); - facets_accepted_param(facets, JOURNAL_PARAMETER_AFTER); - facets_accepted_param(facets, JOURNAL_PARAMETER_BEFORE); - facets_accepted_param(facets, JOURNAL_PARAMETER_ANCHOR); - facets_accepted_param(facets, JOURNAL_PARAMETER_DIRECTION); - facets_accepted_param(facets, JOURNAL_PARAMETER_LAST); - facets_accepted_param(facets, JOURNAL_PARAMETER_QUERY); - facets_accepted_param(facets, JOURNAL_PARAMETER_FACETS); - facets_accepted_param(facets, JOURNAL_PARAMETER_HISTOGRAM); - facets_accepted_param(facets, JOURNAL_PARAMETER_IF_MODIFIED_SINCE); - facets_accepted_param(facets, JOURNAL_PARAMETER_DATA_ONLY); - facets_accepted_param(facets, JOURNAL_PARAMETER_DELTA); - facets_accepted_param(facets, JOURNAL_PARAMETER_TAIL); - facets_accepted_param(facets, JOURNAL_PARAMETER_SAMPLING); - #ifdef HAVE_SD_JOURNAL_RESTART_FIELDS - facets_accepted_param(facets, JOURNAL_PARAMETER_SLICE); + bool have_slice = true; +#else + bool have_slice = false; #endif // HAVE_SD_JOURNAL_RESTART_FIELDS - // register the fields in the order you want them on the dashboard - - facets_register_row_severity(facets, syslog_priority_to_facet_severity, NULL); - - facets_register_key_name(facets, "_HOSTNAME", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_VISIBLE); - - facets_register_dynamic_key_name(facets, JOURNAL_KEY_ND_JOURNAL_PROCESS, - FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_VISIBLE, - netdata_systemd_journal_dynamic_row_id, NULL); - - facets_register_key_name(facets, "MESSAGE", - FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | - FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS); - -// facets_register_dynamic_key_name(facets, "MESSAGE", -// FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | FACET_KEY_OPTION_RICH_TEXT | -// FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS, -// netdata_systemd_journal_rich_message, NULL); - - facets_register_key_name_transformation(facets, "PRIORITY", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW | - FACET_KEY_OPTION_EXPANDED_FILTER, - netdata_systemd_journal_transform_priority, NULL); - - facets_register_key_name_transformation(facets, "SYSLOG_FACILITY", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW | - FACET_KEY_OPTION_EXPANDED_FILTER, - netdata_systemd_journal_transform_syslog_facility, NULL); - - facets_register_key_name_transformation(facets, "ERRNO", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_errno, NULL); - - facets_register_key_name(facets, JOURNAL_KEY_ND_JOURNAL_FILE, - FACET_KEY_OPTION_NEVER_FACET); - - facets_register_key_name(facets, "SYSLOG_IDENTIFIER", - FACET_KEY_OPTION_FACET); - - facets_register_key_name(facets, "UNIT", - FACET_KEY_OPTION_FACET); - - facets_register_key_name(facets, "USER_UNIT", - FACET_KEY_OPTION_FACET); - - facets_register_key_name_transformation(facets, "MESSAGE_ID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW | - FACET_KEY_OPTION_EXPANDED_FILTER, - netdata_systemd_journal_transform_message_id, NULL); - - facets_register_key_name_transformation(facets, "_BOOT_ID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_boot_id, NULL); - - facets_register_key_name_transformation(facets, "_SYSTEMD_OWNER_UID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_uid, NULL); - - facets_register_key_name_transformation(facets, "_UID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_uid, NULL); - - facets_register_key_name_transformation(facets, "OBJECT_SYSTEMD_OWNER_UID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_uid, NULL); - - facets_register_key_name_transformation(facets, "OBJECT_UID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_uid, NULL); + LOGS_QUERY_STATUS tmp_fqs = { + .facets = lqs_facets_create( + LQS_DEFAULT_ITEMS_PER_QUERY, + FACETS_OPTION_ALL_KEYS_FTS | FACETS_OPTION_HASH_IDS, + SYSTEMD_ALWAYS_VISIBLE_KEYS, + SYSTEMD_KEYS_INCLUDED_IN_FACETS, + SYSTEMD_KEYS_EXCLUDED_FROM_FACETS, + have_slice), - facets_register_key_name_transformation(facets, "_GID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_gid, NULL); + .rq = LOGS_QUERY_REQUEST_DEFAULTS(transaction, LQS_DEFAULT_SLICE_MODE, JOURNAL_DEFAULT_DIRECTION), - facets_register_key_name_transformation(facets, "OBJECT_GID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_gid, NULL); - - facets_register_key_name_transformation(facets, "_CAP_EFFECTIVE", - FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_cap_effective, NULL); - - facets_register_key_name_transformation(facets, "_AUDIT_LOGINUID", - FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_uid, NULL); - - facets_register_key_name_transformation(facets, "OBJECT_AUDIT_LOGINUID", - FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_uid, NULL); + .cancelled = cancelled, + .stop_monotonic_ut = stop_monotonic_ut, + }; + LOGS_QUERY_STATUS *lqs = &tmp_fqs; - facets_register_key_name_transformation(facets, "_SOURCE_REALTIME_TIMESTAMP", - FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_timestamp_usec, NULL); + CLEAN_BUFFER *wb = lqs_create_output_buffer(); // ------------------------------------------------------------------------ // parse the parameters - bool info = false, data_only = false, slice = JOURNAL_DEFAULT_SLICE_MODE, delta = false, tail = false; - time_t after_s = 0, before_s = 0; - usec_t anchor = 0; - usec_t if_modified_since = 0; - size_t last = 0; - FACETS_ANCHOR_DIRECTION direction = JOURNAL_DEFAULT_DIRECTION; - const char *query = NULL; - const char *chart = NULL; - SIMPLE_PATTERN *sources = NULL; - SD_JOURNAL_FILE_SOURCE_TYPE source_type = SDJF_ALL; - size_t filters = 0; - size_t sampling = SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING; - - buffer_json_member_add_object(wb, "_request"); - - char *words[SYSTEMD_JOURNAL_MAX_PARAMS] = { NULL }; - size_t num_words = quoted_strings_splitter_pluginsd(function, words, SYSTEMD_JOURNAL_MAX_PARAMS); - for(int i = 1; i < SYSTEMD_JOURNAL_MAX_PARAMS ;i++) { - char *keyword = get_word(words, num_words, i); - if(!keyword) break; - - if(strcmp(keyword, JOURNAL_PARAMETER_HELP) == 0) { - netdata_systemd_journal_function_help(transaction); - goto cleanup; - } - else if(strcmp(keyword, JOURNAL_PARAMETER_INFO) == 0) { - info = true; - } - else if(strncmp(keyword, JOURNAL_PARAMETER_DELTA ":", sizeof(JOURNAL_PARAMETER_DELTA ":") - 1) == 0) { - char *v = &keyword[sizeof(JOURNAL_PARAMETER_DELTA ":") - 1]; - - if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) - delta = false; - else - delta = true; - } - else if(strncmp(keyword, JOURNAL_PARAMETER_TAIL ":", sizeof(JOURNAL_PARAMETER_TAIL ":") - 1) == 0) { - char *v = &keyword[sizeof(JOURNAL_PARAMETER_TAIL ":") - 1]; - - if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) - tail = false; - else - tail = true; - } - else if(strncmp(keyword, JOURNAL_PARAMETER_SAMPLING ":", sizeof(JOURNAL_PARAMETER_SAMPLING ":") - 1) == 0) { - sampling = str2ul(&keyword[sizeof(JOURNAL_PARAMETER_SAMPLING ":") - 1]); - } - else if(strncmp(keyword, JOURNAL_PARAMETER_DATA_ONLY ":", sizeof(JOURNAL_PARAMETER_DATA_ONLY ":") - 1) == 0) { - char *v = &keyword[sizeof(JOURNAL_PARAMETER_DATA_ONLY ":") - 1]; - - if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) - data_only = false; - else - data_only = true; - } - else if(strncmp(keyword, JOURNAL_PARAMETER_SLICE ":", sizeof(JOURNAL_PARAMETER_SLICE ":") - 1) == 0) { - char *v = &keyword[sizeof(JOURNAL_PARAMETER_SLICE ":") - 1]; - - if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) - slice = false; - else - slice = true; - } - else if(strncmp(keyword, JOURNAL_PARAMETER_SOURCE ":", sizeof(JOURNAL_PARAMETER_SOURCE ":") - 1) == 0) { - const char *value = &keyword[sizeof(JOURNAL_PARAMETER_SOURCE ":") - 1]; - - buffer_json_member_add_array(wb, JOURNAL_PARAMETER_SOURCE); - - BUFFER *sources_list = buffer_create(0, NULL); - - source_type = SDJF_NONE; - while(value) { - char *sep = strchr(value, ','); - if(sep) - *sep++ = '\0'; + if(lqs_request_parse_and_validate(lqs, wb, function, payload, have_slice, "PRIORITY")) { + systemd_journal_register_transformations(lqs); - buffer_json_add_array_item_string(wb, value); + // ------------------------------------------------------------------------ + // add versions to the response - if(strcmp(value, SDJF_SOURCE_ALL_NAME) == 0) { - source_type |= SDJF_ALL; - value = NULL; - } - else if(strcmp(value, SDJF_SOURCE_LOCAL_NAME) == 0) { - source_type |= SDJF_LOCAL_ALL; - value = NULL; - } - else if(strcmp(value, SDJF_SOURCE_REMOTES_NAME) == 0) { - source_type |= SDJF_REMOTE_ALL; - value = NULL; - } - else if(strcmp(value, SDJF_SOURCE_NAMESPACES_NAME) == 0) { - source_type |= SDJF_LOCAL_NAMESPACE; - value = NULL; - } - else if(strcmp(value, SDJF_SOURCE_LOCAL_SYSTEM_NAME) == 0) { - source_type |= SDJF_LOCAL_SYSTEM; - value = NULL; - } - else if(strcmp(value, SDJF_SOURCE_LOCAL_USERS_NAME) == 0) { - source_type |= SDJF_LOCAL_USER; - value = NULL; - } - else if(strcmp(value, SDJF_SOURCE_LOCAL_OTHER_NAME) == 0) { - source_type |= SDJF_LOCAL_OTHER; - value = NULL; - } - else { - // else, match the source, whatever it is - if(buffer_strlen(sources_list)) - buffer_strcat(sources_list, ","); - - buffer_strcat(sources_list, value); - } + buffer_json_journal_versions(wb); - value = sep; - } - - if(buffer_strlen(sources_list)) { - simple_pattern_free(sources); - sources = simple_pattern_create(buffer_tostring(sources_list), ",", SIMPLE_PATTERN_EXACT, false); - } - - buffer_free(sources_list); - - buffer_json_array_close(wb); // source - } - else if(strncmp(keyword, JOURNAL_PARAMETER_AFTER ":", sizeof(JOURNAL_PARAMETER_AFTER ":") - 1) == 0) { - after_s = str2l(&keyword[sizeof(JOURNAL_PARAMETER_AFTER ":") - 1]); - } - else if(strncmp(keyword, JOURNAL_PARAMETER_BEFORE ":", sizeof(JOURNAL_PARAMETER_BEFORE ":") - 1) == 0) { - before_s = str2l(&keyword[sizeof(JOURNAL_PARAMETER_BEFORE ":") - 1]); - } - else if(strncmp(keyword, JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":", sizeof(JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":") - 1) == 0) { - if_modified_since = str2ull(&keyword[sizeof(JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":") - 1], NULL); - } - else if(strncmp(keyword, JOURNAL_PARAMETER_ANCHOR ":", sizeof(JOURNAL_PARAMETER_ANCHOR ":") - 1) == 0) { - anchor = str2ull(&keyword[sizeof(JOURNAL_PARAMETER_ANCHOR ":") - 1], NULL); - } - else if(strncmp(keyword, JOURNAL_PARAMETER_DIRECTION ":", sizeof(JOURNAL_PARAMETER_DIRECTION ":") - 1) == 0) { - direction = strcasecmp(&keyword[sizeof(JOURNAL_PARAMETER_DIRECTION ":") - 1], "forward") == 0 ? FACETS_ANCHOR_DIRECTION_FORWARD : FACETS_ANCHOR_DIRECTION_BACKWARD; - } - else if(strncmp(keyword, JOURNAL_PARAMETER_LAST ":", sizeof(JOURNAL_PARAMETER_LAST ":") - 1) == 0) { - last = str2ul(&keyword[sizeof(JOURNAL_PARAMETER_LAST ":") - 1]); - } - else if(strncmp(keyword, JOURNAL_PARAMETER_QUERY ":", sizeof(JOURNAL_PARAMETER_QUERY ":") - 1) == 0) { - query= &keyword[sizeof(JOURNAL_PARAMETER_QUERY ":") - 1]; - } - else if(strncmp(keyword, JOURNAL_PARAMETER_HISTOGRAM ":", sizeof(JOURNAL_PARAMETER_HISTOGRAM ":") - 1) == 0) { - chart = &keyword[sizeof(JOURNAL_PARAMETER_HISTOGRAM ":") - 1]; - } - else if(strncmp(keyword, JOURNAL_PARAMETER_FACETS ":", sizeof(JOURNAL_PARAMETER_FACETS ":") - 1) == 0) { - char *value = &keyword[sizeof(JOURNAL_PARAMETER_FACETS ":") - 1]; - if(*value) { - buffer_json_member_add_array(wb, JOURNAL_PARAMETER_FACETS); + // ------------------------------------------------------------------------ + // run the request - while(value) { - char *sep = strchr(value, ','); - if(sep) - *sep++ = '\0'; - - facets_register_facet_id(facets, value, FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS|FACET_KEY_OPTION_REORDER); - buffer_json_add_array_item_string(wb, value); - - value = sep; - } - - buffer_json_array_close(wb); // JOURNAL_PARAMETER_FACETS - } - } + if (lqs->rq.info) + lqs_info_response(wb, lqs->facets); else { - char *value = strchr(keyword, ':'); - if(value) { - *value++ = '\0'; - - buffer_json_member_add_array(wb, keyword); - - while(value) { - char *sep = strchr(value, ','); - if(sep) - *sep++ = '\0'; - - facets_register_facet_id_filter(facets, keyword, value, FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS|FACET_KEY_OPTION_REORDER); - buffer_json_add_array_item_string(wb, value); - filters++; - - value = sep; - } - - buffer_json_array_close(wb); // keyword - } + netdata_systemd_journal_query(wb, lqs); + if (wb->response_code == HTTP_RESP_OK) + buffer_json_finalize(wb); } } - // ------------------------------------------------------------------------ - // put this request into the progress db - - fqs = &tmp_fqs; - - // ------------------------------------------------------------------------ - // validate parameters - - time_t now_s = now_realtime_sec(); - time_t expires = now_s + 1; - - if(!after_s && !before_s) { - before_s = now_s; - after_s = before_s - SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION; - } - else - rrdr_relative_window_to_absolute(&after_s, &before_s, now_s); - - if(after_s > before_s) { - time_t tmp = after_s; - after_s = before_s; - before_s = tmp; - } - - if(after_s == before_s) - after_s = before_s - SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION; - - if(!last) - last = SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY; - - - // ------------------------------------------------------------------------ - // set query time-frame, anchors and direction - - fqs->transaction = transaction; - fqs->after_ut = after_s * USEC_PER_SEC; - fqs->before_ut = (before_s * USEC_PER_SEC) + USEC_PER_SEC - 1; - fqs->if_modified_since = if_modified_since; - fqs->data_only = data_only; - fqs->delta = (fqs->data_only) ? delta : false; - fqs->tail = (fqs->data_only && fqs->if_modified_since) ? tail : false; - fqs->sources = sources; - fqs->source_type = source_type; - fqs->entries = last; - fqs->last_modified = 0; - fqs->filters = filters; - fqs->query = (query && *query) ? query : NULL; - fqs->histogram = (chart && *chart) ? chart : NULL; - fqs->direction = direction; - fqs->anchor.start_ut = anchor; - fqs->anchor.stop_ut = 0; - fqs->sampling = sampling; - - if(fqs->anchor.start_ut && fqs->tail) { - // a tail request - // we need the top X entries from BEFORE - // but, we need to calculate the facets and the - // histogram up to the anchor - fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD; - fqs->anchor.start_ut = 0; - fqs->anchor.stop_ut = anchor; - } - - if(anchor && anchor < fqs->after_ut) { - log_fqs(fqs, "received anchor is too small for query timeframe, ignoring anchor"); - anchor = 0; - fqs->anchor.start_ut = 0; - fqs->anchor.stop_ut = 0; - fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD; - } - else if(anchor > fqs->before_ut) { - log_fqs(fqs, "received anchor is too big for query timeframe, ignoring anchor"); - anchor = 0; - fqs->anchor.start_ut = 0; - fqs->anchor.stop_ut = 0; - fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD; - } - - facets_set_anchor(facets, fqs->anchor.start_ut, fqs->anchor.stop_ut, fqs->direction); - - facets_set_additional_options(facets, - ((fqs->data_only) ? FACETS_OPTION_DATA_ONLY : 0) | - ((fqs->delta) ? FACETS_OPTION_SHOW_DELTAS : 0)); - - // ------------------------------------------------------------------------ - // set the rest of the query parameters - - - facets_set_items(facets, fqs->entries); - facets_set_query(facets, fqs->query); - -#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS - fqs->slice = slice; - if(slice) - facets_enable_slice_mode(facets); -#else - fqs->slice = false; -#endif - - if(fqs->histogram) - facets_set_timeframe_and_histogram_by_id(facets, fqs->histogram, fqs->after_ut, fqs->before_ut); - else - facets_set_timeframe_and_histogram_by_name(facets, "PRIORITY", fqs->after_ut, fqs->before_ut); - - - // ------------------------------------------------------------------------ - // complete the request object - - buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_INFO, false); - buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_SLICE, fqs->slice); - buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_DATA_ONLY, fqs->data_only); - buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_DELTA, fqs->delta); - buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_TAIL, fqs->tail); - buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_SAMPLING, fqs->sampling); - buffer_json_member_add_uint64(wb, "source_type", fqs->source_type); - buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_AFTER, fqs->after_ut / USEC_PER_SEC); - buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_BEFORE, fqs->before_ut / USEC_PER_SEC); - buffer_json_member_add_uint64(wb, "if_modified_since", fqs->if_modified_since); - buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_ANCHOR, anchor); - buffer_json_member_add_string(wb, JOURNAL_PARAMETER_DIRECTION, fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD ? "forward" : "backward"); - buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_LAST, fqs->entries); - buffer_json_member_add_string(wb, JOURNAL_PARAMETER_QUERY, fqs->query); - buffer_json_member_add_string(wb, JOURNAL_PARAMETER_HISTOGRAM, fqs->histogram); - buffer_json_object_close(wb); // request - - buffer_json_journal_versions(wb); - - // ------------------------------------------------------------------------ - // run the request - - int response; - - if(info) { - facets_accepted_parameters_to_json_array(facets, wb, false); - buffer_json_member_add_array(wb, "required_params"); - { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "id", "source"); - buffer_json_member_add_string(wb, "name", "source"); - buffer_json_member_add_string(wb, "help", "Select the SystemD Journal source to query"); - buffer_json_member_add_string(wb, "type", "multiselect"); - buffer_json_member_add_array(wb, "options"); - { - available_journal_file_sources_to_json_array(wb); - } - buffer_json_array_close(wb); // options array - } - buffer_json_object_close(wb); // required params object - } - buffer_json_array_close(wb); // required_params array - - facets_table_config(wb); - - buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); - buffer_json_member_add_string(wb, "type", "table"); - buffer_json_member_add_string(wb, "help", SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION); - buffer_json_finalize(wb); - response = HTTP_RESP_OK; - goto output; - } - - response = netdata_systemd_journal_query(wb, facets, fqs); - - // ------------------------------------------------------------------------ - // handle error response - - if(response != HTTP_RESP_OK) { - netdata_mutex_lock(&stdout_mutex); - pluginsd_function_json_error_to_stdout(transaction, response, "failed"); - netdata_mutex_unlock(&stdout_mutex); - goto cleanup; - } - -output: netdata_mutex_lock(&stdout_mutex); - pluginsd_function_result_to_stdout(transaction, response, "application/json", expires, wb); + pluginsd_function_result_to_stdout(transaction, wb); netdata_mutex_unlock(&stdout_mutex); -cleanup: - simple_pattern_free(sources); - facets_destroy(facets); - buffer_free(wb); + lqs_cleanup(lqs); } diff --git a/src/collectors/systemd-journal.plugin/systemd-main.c b/src/collectors/systemd-journal.plugin/systemd-main.c index e3afe4e86..e7d79c413 100644 --- a/src/collectors/systemd-journal.plugin/systemd-main.c +++ b/src/collectors/systemd-journal.plugin/systemd-main.c @@ -18,7 +18,6 @@ static bool journal_data_directories_exist() { } int main(int argc __maybe_unused, char **argv __maybe_unused) { - clocks_init(); nd_thread_tag_set("sd-jrnl.plugin"); nd_log_initialize_for_external_plugins("systemd-journal.plugin"); @@ -46,7 +45,8 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) { bool cancelled = false; usec_t stop_monotonic_ut = now_monotonic_usec() + 600 * USEC_PER_SEC; - char buf[] = "systemd-journal after:-8640000 before:0 direction:backward last:200 data_only:false slice:true source:all"; + // char buf[] = "systemd-journal after:1726573205 before:1726574105 last:200 facets:F9q1S4.MEeL,DHKucpqUoe1,MewN7JHJ.3X,LKWfxQdCIoc,IjWzTvQ9.4t,O6O.cgYOhns,KmQ1KSeTSfO,IIsT7Ytfxy6,EQD6.NflJQq,I6rMJzShJIE,DxoIlg6RTuM,AU4H2NMVPXJ,H4mcdIPho07,EDYhj5U8330,DloDtGMQHje,JHSbsQ2fXqr,AIRrOu._40Z,NFZXv8AEpS_,Iiic3t4NuxV,F2YCtRNSfDv,GOUMAmZiRrq,O0VYoHcyq49,FDQoaBH15Bp,ClBB5dSGmCc,GTwmQptJYkk,BWH4O3GPNSL,APv6JsKkF9X,IAURKhjtcRF,Jw1dz4fJmFr slice:true source:all"; + char buf[] = "systemd-journal after:-8640000 before:0 direction:backward last:200 data_only:false slice:true facets: source:all"; // char buf[] = "systemd-journal after:1695332964 before:1695937764 direction:backward last:100 slice:true source:all DHKucpqUoe1:PtVoyIuX.MU"; // char buf[] = "systemd-journal after:1694511062 before:1694514662 anchor:1694514122024403"; function_systemd_journal("123", buf, &stop_monotonic_ut, &cancelled, @@ -113,13 +113,12 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) { // ------------------------------------------------------------------------ - usec_t step_ut = 100 * USEC_PER_MS; usec_t send_newline_ut = 0; usec_t since_last_scan_ut = SYSTEMD_JOURNAL_ALL_FILES_SCAN_EVERY_USEC * 2; // something big to trigger scanning at start - bool tty = isatty(fileno(stdout)) == 1; + const bool tty = isatty(fileno(stdout)) == 1; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); while(!plugin_should_exit) { if(since_last_scan_ut > SYSTEMD_JOURNAL_ALL_FILES_SCAN_EVERY_USEC) { @@ -127,7 +126,7 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) { since_last_scan_ut = 0; } - usec_t dt_ut = heartbeat_next(&hb, step_ut); + usec_t dt_ut = heartbeat_next(&hb); since_last_scan_ut += dt_ut; send_newline_ut += dt_ut; diff --git a/src/collectors/systemd-journal.plugin/systemd-units.c b/src/collectors/systemd-journal.plugin/systemd-units.c index 0e096dae1..a5f670d68 100644 --- a/src/collectors/systemd-journal.plugin/systemd-units.c +++ b/src/collectors/systemd-journal.plugin/systemd-units.c @@ -1153,7 +1153,10 @@ static void netdata_systemd_units_function_help(const char *transaction) { ); netdata_mutex_lock(&stdout_mutex); - pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb); + wb->response_code = HTTP_RESP_OK; + wb->content_type = CT_TEXT_PLAIN; + wb->expires = now_realtime_sec() + 3600; + pluginsd_function_result_to_stdout(transaction, wb); netdata_mutex_unlock(&stdout_mutex); buffer_free(wb); @@ -1169,7 +1172,10 @@ static void netdata_systemd_units_function_info(const char *transaction) { buffer_json_finalize(wb); netdata_mutex_lock(&stdout_mutex); - pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb); + wb->response_code = HTTP_RESP_OK; + wb->content_type = CT_TEXT_PLAIN; + wb->expires = now_realtime_sec() + 3600; + pluginsd_function_result_to_stdout(transaction, wb); netdata_mutex_unlock(&stdout_mutex); buffer_free(wb); @@ -1601,7 +1607,7 @@ void function_systemd_units(const char *transaction, char *function, BUFFER *payload __maybe_unused, HTTP_ACCESS access __maybe_unused, const char *source __maybe_unused, void *data __maybe_unused) { char *words[SYSTEMD_UNITS_MAX_PARAMS] = { NULL }; - size_t num_words = quoted_strings_splitter_pluginsd(function, words, SYSTEMD_UNITS_MAX_PARAMS); + size_t num_words = quoted_strings_splitter_whitespace(function, words, SYSTEMD_UNITS_MAX_PARAMS); for(int i = 1; i < SYSTEMD_UNITS_MAX_PARAMS ;i++) { char *keyword = get_word(words, num_words, i); if(!keyword) break; @@ -1958,7 +1964,10 @@ void function_systemd_units(const char *transaction, char *function, buffer_json_finalize(wb); netdata_mutex_lock(&stdout_mutex); - pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", now_realtime_sec() + 1, wb); + wb->response_code = HTTP_RESP_OK; + wb->content_type = CT_APPLICATION_JSON; + wb->expires = now_realtime_sec() + 1; + pluginsd_function_result_to_stdout(transaction, wb); netdata_mutex_unlock(&stdout_mutex); buffer_free(wb); diff --git a/src/collectors/tc.plugin/integrations/tc_qos_classes.md b/src/collectors/tc.plugin/integrations/tc_qos_classes.md index 2928110b3..204fa7de6 100644 --- a/src/collectors/tc.plugin/integrations/tc_qos_classes.md +++ b/src/collectors/tc.plugin/integrations/tc_qos_classes.md @@ -93,7 +93,7 @@ There are no alerts configured by default for this integration. In order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content: -```conf +```text tc_show="class" ``` @@ -116,8 +116,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -144,7 +144,7 @@ A basic example configuration using classes defined in `/etc/iproute2/tc_cls`. An example of class IDs mapped to names in that file can be: -```conf +```text 2:1 Standard 2:8 LowPriorityData 2:10 HighThroughputData diff --git a/src/collectors/tc.plugin/metadata.yaml b/src/collectors/tc.plugin/metadata.yaml index f4039a8c5..c40b5aa01 100644 --- a/src/collectors/tc.plugin/metadata.yaml +++ b/src/collectors/tc.plugin/metadata.yaml @@ -41,7 +41,7 @@ modules: description: | In order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content: - ```conf + ```text tc_show="class" ``` configuration: @@ -74,7 +74,7 @@ modules: An example of class IDs mapped to names in that file can be: - ```conf + ```text 2:1 Standard 2:8 LowPriorityData 2:10 HighThroughputData diff --git a/src/collectors/tc.plugin/plugin_tc.c b/src/collectors/tc.plugin/plugin_tc.c index da2a39194..7102e216d 100644 --- a/src/collectors/tc.plugin/plugin_tc.c +++ b/src/collectors/tc.plugin/plugin_tc.c @@ -912,7 +912,7 @@ void *tc_main(void *ptr) { uint32_t first_hash; snprintfz(command, TC_LINE_MAX, "%s/tc-qos-helper.sh", netdata_configured_primary_plugins_dir); - char *tc_script = config_get("plugin:tc", "script to run to get tc values", command); + const char *tc_script = config_get("plugin:tc", "script to run to get tc values", command); while(service_running(SERVICE_COLLECTORS)) { struct tc_device *device = NULL; @@ -928,7 +928,7 @@ void *tc_main(void *ptr) { } char buffer[TC_LINE_MAX+1] = ""; - while(fgets(buffer, TC_LINE_MAX, tc_child_instance->child_stdout_fp) != NULL) { + while(fgets(buffer, TC_LINE_MAX, spawn_popen_stdout(tc_child_instance)) != NULL) { if(unlikely(!service_running(SERVICE_COLLECTORS))) break; buffer[TC_LINE_MAX] = '\0'; diff --git a/src/collectors/timex.plugin/integrations/timex.md b/src/collectors/timex.plugin/integrations/timex.md index 98bcbe10b..5d758857c 100644 --- a/src/collectors/timex.plugin/integrations/timex.md +++ b/src/collectors/timex.plugin/integrations/timex.md @@ -102,8 +102,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/timex.plugin/plugin_timex.c b/src/collectors/timex.plugin/plugin_timex.c index 6e200c425..381079cf4 100644 --- a/src/collectors/timex.plugin/plugin_timex.c +++ b/src/collectors/timex.plugin/plugin_timex.c @@ -50,9 +50,11 @@ void *timex_main(void *ptr) worker_register("TIMEX"); worker_register_job_name(0, "clock check"); - int update_every = (int)config_get_number(CONFIG_SECTION_TIMEX, "update every", 10); - if (update_every < localhost->rrd_update_every) + int update_every = (int)config_get_duration_seconds(CONFIG_SECTION_TIMEX, "update every", 10); + if (update_every < localhost->rrd_update_every) { update_every = localhost->rrd_update_every; + config_set_duration_seconds(CONFIG_SECTION_TIMEX, "update every", update_every); + } int do_sync = config_get_boolean(CONFIG_SECTION_TIMEX, "clock synchronization state", CONFIG_BOOLEAN_YES); int do_offset = config_get_boolean(CONFIG_SECTION_TIMEX, "time offset", CONFIG_BOOLEAN_YES); @@ -65,10 +67,10 @@ void *timex_main(void *ptr) usec_t step = update_every * USEC_PER_SEC; usec_t real_step = USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); while (service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (real_step < step) { real_step += USEC_PER_SEC; diff --git a/src/collectors/utils/local_listeners.c b/src/collectors/utils/local_listeners.c new file mode 100644 index 000000000..a2e8968ff --- /dev/null +++ b/src/collectors/utils/local_listeners.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "libnetdata/libnetdata.h" +#include "libnetdata/local-sockets/local-sockets.h" +#include "libnetdata/required_dummies.h" + +// -------------------------------------------------------------------------------------------------------------------- + +static void print_local_listeners(LS_STATE *ls __maybe_unused, const LOCAL_SOCKET *nn, void *data __maybe_unused) { + LOCAL_SOCKET *n = (LOCAL_SOCKET *)nn; + + char local_address[INET6_ADDRSTRLEN]; + char remote_address[INET6_ADDRSTRLEN]; + + if(n->local.family == AF_INET) { + ipv4_address_to_txt(n->local.ip.ipv4, local_address); + ipv4_address_to_txt(n->remote.ip.ipv4, remote_address); + } + else if(is_local_socket_ipv46(n)) { + strncpyz(local_address, "*", sizeof(local_address) - 1); + remote_address[0] = '\0'; + } + else if(n->local.family == AF_INET6) { + ipv6_address_to_txt(&n->local.ip.ipv6, local_address); + ipv6_address_to_txt(&n->remote.ip.ipv6, remote_address); + } + + printf("%s|%s|%u|%s\n", local_sockets_protocol_name(n), local_address, n->local.port, string2str(n->cmdline)); +} + +// -------------------------------------------------------------------------------------------------------------------- + +int main(int argc, char **argv) { + static struct rusage started, ended; + getrusage(RUSAGE_SELF, &started); + bool debug = false; + + LS_STATE ls = { + .config = { + .listening = true, + .inbound = false, + .outbound = false, + .local = false, + .tcp4 = true, + .tcp6 = true, + .udp4 = true, + .udp6 = true, + .pid = false, + .cmdline = true, + .comm = false, + .namespaces = true, + .tcp_info = false, + .no_mnl = false, + .report = false, + + .max_errors = 10, + .max_concurrent_namespaces = 10, + + .cb = print_local_listeners, + .data = NULL, + }, + .stats = { 0 }, + .sockets_hashtable = { 0 }, + .local_ips_hashtable = { 0 }, + .listening_ports_hashtable = { 0 }, + }; + + netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX"); + if(!netdata_configured_host_prefix) netdata_configured_host_prefix = ""; + + for (int i = 1; i < argc; i++) { + char *s = argv[i]; + bool positive = true; + + if(strcmp(s, "-h") == 0 || strcmp(s, "--help") == 0) { + fprintf(stderr, + "\n" + " Netdata local-listeners\n" + " (C) 2024 Netdata Inc.\n" + "\n" + " This program prints a list of all the processes that have a listening socket.\n" + " It is used by Netdata to auto-detect the services running.\n" + "\n" + " Options:\n" + "\n" + " The options:\n" + "\n" + " udp, udp4, udp6, tcp, tcp4, tcp6, ipv4, ipv6\n" + "\n" + " select the sources to read currently available sockets.\n" + "\n" + " while:\n" + "\n" + " listening, local, inbound, outbound, namespaces\n" + "\n" + " filter the output based on the direction of the sockets.\n" + "\n" + " Prepending any option with 'no-', 'not-' or 'non-' will disable them.\n" + "\n" + " Current options:\n" + "\n" + " %s %s %s %s %s %s %s %s %s %s %s %s\n" + "\n" + " Option 'debug' enables all sources and all directions and provides\n" + " a full dump of current sockets.\n" + "\n" + " Option 'report' reports timings per step while collecting and processing\n" + " system information.\n" + "\n" + " Option 'procfile' uses procfile to read proc files, instead of getline().\n" + "\n" + " DIRECTION DETECTION\n" + " The program detects the direction of the sockets using these rules:\n" + "\n" + " - listening are all the TCP sockets that are in listen state\n" + " and all sockets that their remote IP is zero.\n" + "\n" + " - local are all the non-listening sockets that either their source IP\n" + " or their remote IP are loopback addresses. Loopback addresses are\n" + " those in 127.0.0.0/8 and ::1. When IPv4 addresses are mapped\n" + " into IPv6, the program extracts the IPv4 addresses to check them.\n" + "\n" + " Also, local are considered all the sockets that their remote\n" + " IP is one of the IPs that appear as local on another socket.\n" + "\n" + " - inbound are all the non-listening and non-local sockets that their local\n" + " port is a port of another socket that is marked as listening.\n" + "\n" + " - outbound are all the other sockets.\n" + "\n" + " Keep in mind that this kind of socket direction detection is not 100%% accurate,\n" + " and there may be cases (e.g. reusable sockets) that this code may incorrectly\n" + " mark sockets as inbound or outbound.\n" + "\n" + " WARNING:\n" + " This program reads the entire /proc/net/{tcp,udp,tcp6,upd6} files, builds\n" + " multiple hash maps in memory and traverses the entire /proc filesystem to\n" + " associate sockets with processes. We have made the most to make it as\n" + " lightweight and fast as possible, but still this program has a lot of work\n" + " to do and it may have some impact on very busy servers with millions of.\n" + " established connections." + "\n" + " Therefore, we suggest to avoid running it repeatedly for data collection.\n" + "\n" + " Netdata executes it only when it starts to auto-detect data collection sources\n" + " and initialize the network dependencies explorer." + "\n" + , ls.config.udp4 ? "udp4" :"no-udp4" + , ls.config.udp6 ? "udp6" :"no-udp6" + , ls.config.tcp4 ? "tcp4" :"no-tcp4" + , ls.config.tcp6 ? "tcp6" :"no-tcp6" + , ls.config.listening ? "listening" : "no-listening" + , ls.config.local ? "local" : "no-local" + , ls.config.inbound ? "inbound" : "no-inbound" + , ls.config.outbound ? "outbound" : "no-outbound" + , ls.config.namespaces ? "namespaces" : "no-namespaces" + , ls.config.no_mnl ? "no-mnl" : "mnl" + , ls.config.procfile ? "procfile" : "no-procfile" + , ls.config.report ? "report" : "no-report" + ); + exit(1); + } + + if(strncmp(s, "no-", 3) == 0) { + positive = false; + s += 3; + } + else if(strncmp(s, "not-", 4) == 0 || strncmp(s, "non-", 4) == 0) { + positive = false; + s += 4; + } + + if(strcmp(s, "debug") == 0 || strcmp(s, "--debug") == 0) { + fprintf(stderr, "%s debugging\n", positive ? "enabling" : "disabling"); + ls.config.listening = true; + ls.config.local = true; + ls.config.inbound = true; + ls.config.outbound = true; + ls.config.pid = true; + ls.config.comm = true; + ls.config.cmdline = true; + ls.config.namespaces = true; + ls.config.tcp_info = true; + ls.config.uid = true; + ls.config.procfile = false; + ls.config.max_errors = SIZE_MAX; + ls.config.cb = local_listeners_print_socket; + + debug = true; + } + else if (strcmp("tcp", s) == 0) { + ls.config.tcp4 = ls.config.tcp6 = positive; + // fprintf(stderr, "%s tcp4 and tcp6\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("tcp4", s) == 0) { + ls.config.tcp4 = positive; + // fprintf(stderr, "%s tcp4\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("tcp6", s) == 0) { + ls.config.tcp6 = positive; + // fprintf(stderr, "%s tcp6\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("udp", s) == 0) { + ls.config.udp4 = ls.config.udp6 = positive; + // fprintf(stderr, "%s udp4 and udp6\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("udp4", s) == 0) { + ls.config.udp4 = positive; + // fprintf(stderr, "%s udp4\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("udp6", s) == 0) { + ls.config.udp6 = positive; + // fprintf(stderr, "%s udp6\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("ipv4", s) == 0) { + ls.config.tcp4 = ls.config.udp4 = positive; + // fprintf(stderr, "%s udp4 and tcp4\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("ipv6", s) == 0) { + ls.config.tcp6 = ls.config.udp6 = positive; + // fprintf(stderr, "%s udp6 and tcp6\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("listening", s) == 0) { + ls.config.listening = positive; + // fprintf(stderr, "%s listening\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("local", s) == 0) { + ls.config.local = positive; + // fprintf(stderr, "%s local\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("inbound", s) == 0) { + ls.config.inbound = positive; + // fprintf(stderr, "%s inbound\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("outbound", s) == 0) { + ls.config.outbound = positive; + // fprintf(stderr, "%s outbound\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("namespaces", s) == 0 || strcmp("ns", s) == 0) { + ls.config.namespaces = positive; + // fprintf(stderr, "%s namespaces\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("mnl", s) == 0) { + ls.config.no_mnl = !positive; + // fprintf(stderr, "%s mnl\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("procfile", s) == 0) { + ls.config.procfile = positive; + // fprintf(stderr, "%s procfile\n", positive ? "enabling" : "disabling"); + } + else if (strcmp("report", s) == 0) { + ls.config.report = positive; + // fprintf(stderr, "%s report\n", positive ? "enabling" : "disabling"); + } + else { + fprintf(stderr, "Unknown parameter %s\n", s); + exit(1); + } + } + +#if defined(LOCAL_SOCKETS_USE_SETNS) + SPAWN_SERVER *spawn_server = spawn_server_create(SPAWN_SERVER_OPTION_CALLBACK, NULL, local_sockets_spawn_server_callback, argc, (const char **)argv); + if(spawn_server == NULL) { + fprintf(stderr, "Cannot create spawn server.\n"); + exit(1); + } + + ls.spawn_server = spawn_server; +#endif + + local_sockets_process(&ls); + +#if defined(LOCAL_SOCKETS_USE_SETNS) + spawn_server_destroy(spawn_server); +#endif + + getrusage(RUSAGE_SELF, &ended); + + if(debug) { + unsigned long long user = ended.ru_utime.tv_sec * 1000000ULL + ended.ru_utime.tv_usec - started.ru_utime.tv_sec * 1000000ULL + started.ru_utime.tv_usec; + unsigned long long system = ended.ru_stime.tv_sec * 1000000ULL + ended.ru_stime.tv_usec - started.ru_stime.tv_sec * 1000000ULL + started.ru_stime.tv_usec; + unsigned long long total = user + system; + + fprintf(stderr, "CPU Usage %llu user, %llu system, %llu total, %zu namespaces, %zu nl requests (without namespaces)\n", user, system, total, ls.stats.namespaces_found, ls.stats.mnl_sends); + } + + if(ls.config.report) { + fprintf(stderr, "\nTIMINGS REPORT:\n"); + char buf[100]; + usec_t total_ut = 0; + for(size_t i = 0; i < _countof(ls.timings) ;i++) { + if (!ls.timings[i].end_ut) continue; + usec_t dt_ut = ls.timings[i].end_ut - ls.timings[i].start_ut; + total_ut += dt_ut; + } + + for(size_t i = 0; i < _countof(ls.timings) ;i++) { + if(!ls.timings[i].end_ut) continue; + usec_t dt_ut = ls.timings[i].end_ut - ls.timings[i].start_ut; + double percent = (100.0 * (double)dt_ut) / (double)total_ut; + duration_snprintf(buf, sizeof(buf), (int64_t)dt_ut, "us", true); + fprintf(stderr, "%20s: %6.2f%% %s\n", ls.timings[i].name, percent, buf); + } + + duration_snprintf(buf, sizeof(buf), (int64_t)total_ut, "us", true); + fprintf(stderr, "%20s: %6.2f%% %s\n", "TOTAL", 100.0, buf); + + fprintf(stderr, "\n"); + fprintf(stderr, "Namespaces [ found: %zu, absent: %zu, invalid: %zu ]\n" +#if defined(LOCAL_SOCKETS_USE_SETNS) + " \\_ forks [ tried: %zu, failed: %zu, unresponsive: %zu ]\n" + " \\_ sockets [ new: %zu, existing: %zu ]\n" +#endif + , ls.stats.namespaces_found, ls.stats.namespaces_absent, ls.stats.namespaces_invalid +#if defined(LOCAL_SOCKETS_USE_SETNS) + , ls.stats.namespaces_forks_attempted, ls.stats.namespaces_forks_failed, ls.stats.namespaces_forks_unresponsive + , ls.stats.namespaces_sockets_new, ls.stats.namespaces_sockets_existing +#endif + ); + + fprintf(stderr, "\n"); + fprintf(stderr, "Sockets [ found: %zu ]\n", + ls.stats.sockets_added); + + fprintf(stderr, "\n"); + fprintf(stderr, "Main Procfile [ opens: %zu, reads: %zu, resizes: %zu, memory: %zu ]\n" + " \\_ reads [ total bytes read: %zu, average read size: %zu, max read size: %zu ]\n" + " \\_ max [ max file size: %zu, max lines: %zu, max words: %zu ]\n", + ls.stats.ff.opens, ls.stats.ff.reads, ls.stats.ff.resizes, ls.stats.ff.memory, + ls.stats.ff.total_read_bytes, ls.stats.ff.total_read_bytes / (ls.stats.ff.reads ? ls.stats.ff.reads : 1), ls.stats.ff.max_read_size, + ls.stats.ff.max_source_bytes, ls.stats.ff.max_lines, ls.stats.ff.max_words); + + fprintf(stderr, "\n"); + fprintf(stderr, "MNL(without namespaces) [ requests: %zu ]\n", + ls.stats.mnl_sends); + } + + return 0; +} diff --git a/src/collectors/utils/ndsudo.c b/src/collectors/utils/ndsudo.c new file mode 100644 index 000000000..e37110bbb --- /dev/null +++ b/src/collectors/utils/ndsudo.c @@ -0,0 +1,494 @@ +#include +#include +#include +#include +#include + +#define MAX_SEARCH 2 +#define MAX_PARAMETERS 128 +#define ERROR_BUFFER_SIZE 1024 + +struct command { + const char *name; + const char *params; + const char *search[MAX_SEARCH]; +} allowed_commands[] = { + { + .name = "chronyc-serverstats", + .params = "serverstats", + .search = + { + [0] = "chronyc", + [1] = NULL, + }, + }, + { + .name = "varnishadm-backend-list", + .params = "backend.list", + .search = + { + [0] = "varnishadm", + [1] = NULL, + }, + }, + { + .name = "varnishstat-stats", + .params = "-1 -t off -n {{instanceName}}", + .search = + { + [0] = "varnishstat", + [1] = NULL, + }, + }, + { + .name = "smbstatus-profile", + .params = "-P", + .search = + { + [0] = "smbstatus", + [1] = NULL, + }, + }, + { + .name = "exim-bpc", + .params = "-bpc", + .search = + { + [0] = "exim", + [1] = NULL, + }, + }, + { + .name = "nsd-control-stats", + .params = "stats_noreset", + .search = { + [0] = "nsd-control", + [1] = NULL, + }, + }, + { + .name = "chronyc-serverstats", + .params = "serverstats", + .search = { + [0] = "chronyc", + [1] = NULL, + }, + }, + { + .name = "dmsetup-status-cache", + .params = "status --target cache --noflush", + .search = { + [0] = "dmsetup", + [1] = NULL, + }, + }, + { + .name = "ssacli-controllers-info", + .params = "ctrl all show config detail", + .search = { + [0] = "ssacli", + [1] = NULL, + }, + }, + { + .name = "smartctl-json-scan", + .params = "--json --scan", + .search = { + [0] = "smartctl", + [1] = NULL, + }, + }, + { + .name = "smartctl-json-scan-open", + .params = "--json --scan-open", + .search = { + [0] = "smartctl", + [1] = NULL, + }, + }, + { + .name = "smartctl-json-device-info", + .params = "--json --all {{deviceName}} --device {{deviceType}} --nocheck {{powerMode}}", + .search = { + [0] = "smartctl", + [1] = NULL, + }, + }, + { + .name = "fail2ban-client-status", + .params = "status", + .search = { + [0] = "fail2ban-client", + [1] = NULL, + }, + }, + { + .name = "fail2ban-client-status-socket", + .params = "-s {{socket_path}} status", + .search = { + [0] = "fail2ban-client", + [1] = NULL, + }, + }, + { + .name = "fail2ban-client-status-jail", + .params = "status {{jail}}", + .search = { + [0] = "fail2ban-client", + [1] = NULL, + }, + }, + { + .name = "fail2ban-client-status-jail-socket", + .params = "-s {{socket_path}} status {{jail}}", + .search = { + [0] = "fail2ban-client", + [1] = NULL, + }, + }, + { + .name = "storcli-controllers-info", + .params = "/cALL show all J nolog", + .search = { + [0] = "storcli", + [1] = NULL, + }, + }, + { + .name = "storcli-drives-info", + .params = "/cALL/eALL/sALL show all J nolog", + .search = { + [0] = "storcli", + [1] = NULL, + }, + }, + { + .name = "lvs-report-json", + .params = "--reportformat json --units b --nosuffix -o {{options}}", + .search = { + [0] = "lvs", + [1] = NULL, + }, + }, + { + .name = "igt-list-gpus", + .params = "-L", + .search = { + [0] = "intel_gpu_top", + [1] = NULL, + }, + }, + { + .name = "igt-device-json", + .params = "-d {{device}} -J -s {{interval}}", + .search = { + [0] = "intel_gpu_top", + [1] = NULL, + }, + }, + { + .name = "igt-json", + .params = "-J -s {{interval}}", + .search = { + [0] = "intel_gpu_top", + [1] = NULL, + }, + }, + { + .name = "nvme-list", + .params = "list --output-format=json", + .search = { + [0] = "nvme", + [1] = NULL, + }, + }, + { + .name = "nvme-smart-log", + .params = "smart-log {{device}} --output-format=json", + .search = { + [0] = "nvme", + [1] = NULL, + }, + }, + { + .name = "megacli-disk-info", + .params = "-LDPDInfo -aAll -NoLog", + .search = { + [0] = "megacli", + [1] = "MegaCli", + }, + }, + { + .name = "megacli-battery-info", + .params = "-AdpBbuCmd -aAll -NoLog", + .search = { + [0] = "megacli", + [1] = "MegaCli", + }, + }, + { + .name = "arcconf-ld-info", + .params = "GETCONFIG 1 LD", + .search = { + [0] = "arcconf", + [1] = NULL, + }, + }, + { + .name = "arcconf-pd-info", + .params = "GETCONFIG 1 PD", + .search = { + [0] = "arcconf", + [1] = NULL, + }, + } +}; + +bool command_exists_in_dir(const char *dir, const char *cmd, char *dst, size_t dst_size) { + snprintf(dst, dst_size, "%s/%s", dir, cmd); + return access(dst, X_OK) == 0; +} + +bool command_exists_in_PATH(const char *cmd, char *dst, size_t dst_size) { + if(!dst || !dst_size) + return false; + + char *path = getenv("PATH"); + if(!path) + return false; + + char *path_copy = strdup(path); + if (!path_copy) + return false; + + char *dir; + bool found = false; + dir = strtok(path_copy, ":"); + while(dir && !found) { + found = command_exists_in_dir(dir, cmd, dst, dst_size); + dir = strtok(NULL, ":"); + } + + free(path_copy); + return found; +} + +struct command *find_command(const char *cmd) { + size_t size = sizeof(allowed_commands) / sizeof(allowed_commands[0]); + for(size_t i = 0; i < size ;i++) { + if(strcmp(cmd, allowed_commands[i].name) == 0) + return &allowed_commands[i]; + } + + return NULL; +} + +bool check_string(const char *str, size_t index, char *err, size_t err_size) { + const char *s = str; + while(*s) { + char c = *s++; + if(!((c >= 'A' && c <= 'Z') || + (c >= 'a' && c <= 'z') || + (c >= '0' && c <= '9') || + c == ' ' || c == '_' || c == '-' || c == '/' || + c == '.' || c == ',' || c == ':' || c == '=')) { + snprintf(err, err_size, "command line argument No %zu includes invalid character '%c'", index, c); + return false; + } + } + + return true; +} + +bool check_params(int argc, char **argv, char *err, size_t err_size) { + for(int i = 0 ; i < argc ;i++) + if(!check_string(argv[i], i, err, err_size)) + return false; + + return true; +} + +char *find_variable_in_argv(const char *variable, int argc, char **argv, char *err, size_t err_size) { + for (int i = 1; i < argc - 1; i++) { + if (strcmp(argv[i], variable) == 0) + return strdup(argv[i + 1]); + } + + snprintf(err, err_size, "variable '%s' is required, but was not provided in the command line parameters", variable); + + return NULL; +} + +bool search_and_replace_params(struct command *cmd, char **params, size_t max_params, const char *filename, int argc, char **argv, char *err, size_t err_size) { + if (!cmd || !params || !max_params) { + snprintf(err, err_size, "search_and_replace_params() internal error"); + return false; + } + + const char *delim = " "; + char *token; + char *temp_params = strdup(cmd->params); + if (!temp_params) { + snprintf(err, err_size, "search_and_replace_params() cannot allocate memory"); + return false; + } + + size_t param_count = 0; + params[param_count++] = strdup(filename); + + token = strtok(temp_params, delim); + while (token && param_count < max_params - 1) { + size_t len = strlen(token); + + char *value = NULL; + + if (strncmp(token, "{{", 2) == 0 && strncmp(token + len - 2, "}}", 2) == 0) { + token[0] = '-'; + token[1] = '-'; + token[len - 2] = '\0'; + + value = find_variable_in_argv(token, argc, argv, err, err_size); + } + else + value = strdup(token); + + if(!value) + goto cleanup; + + params[param_count++] = value; + token = strtok(NULL, delim); + } + + params[param_count] = NULL; // Null-terminate the params array + free(temp_params); + return true; + +cleanup: + if(!err[0]) + snprintf(err, err_size, "memory allocation failure"); + + free(temp_params); + for (size_t i = 0; i < param_count; ++i) { + free(params[i]); + params[i] = NULL; + } + return false; +} + +void show_help() { + fprintf(stdout, "\n"); + fprintf(stdout, "ndsudo\n"); + fprintf(stdout, "\n"); + fprintf(stdout, "(C) Netdata Inc.\n"); + fprintf(stdout, "\n"); + fprintf(stdout, "A helper to allow Netdata run privileged commands.\n"); + fprintf(stdout, "\n"); + fprintf(stdout, " --test\n"); + fprintf(stdout, " print the generated command that will be run, without running it.\n"); + fprintf(stdout, "\n"); + fprintf(stdout, " --help\n"); + fprintf(stdout, " print this message.\n"); + fprintf(stdout, "\n"); + + fprintf(stdout, "The following commands are supported:\n\n"); + + size_t size = sizeof(allowed_commands) / sizeof(allowed_commands[0]); + for(size_t i = 0; i < size ;i++) { + fprintf(stdout, "- Command : %s\n", allowed_commands[i].name); + fprintf(stdout, " Executables: "); + for(size_t j = 0; j < MAX_SEARCH && allowed_commands[i].search[j] ;j++) { + fprintf(stdout, "%s ", allowed_commands[i].search[j]); + } + fprintf(stdout, "\n"); + fprintf(stdout, " Parameters : %s\n\n", allowed_commands[i].params); + } + + fprintf(stdout, "The program searches for executables in the system path.\n"); + fprintf(stdout, "\n"); + fprintf(stdout, "Variables given as {{variable}} are expected on the command line as:\n"); + fprintf(stdout, " --variable VALUE\n"); + fprintf(stdout, "\n"); + fprintf(stdout, "VALUE can include space, A-Z, a-z, 0-9, _, -, /, and .\n"); + fprintf(stdout, "\n"); +} + +int main(int argc, char *argv[]) { + char error_buffer[ERROR_BUFFER_SIZE] = ""; + + if (argc < 2) { + fprintf(stderr, "at least 2 parameters are needed, but %d were given.\n", argc); + return 1; + } + + if(!check_params(argc, argv, error_buffer, sizeof(error_buffer))) { + fprintf(stderr, "invalid characters in parameters: %s\n", error_buffer); + return 2; + } + + bool test = false; + const char *cmd = argv[1]; + if(strcmp(cmd, "--help") == 0 || strcmp(cmd, "-h") == 0) { + show_help(); + exit(0); + } + else if(strcmp(cmd, "--test") == 0) { + cmd = argv[2]; + test = true; + } + + struct command *command = find_command(cmd); + if(!command) { + fprintf(stderr, "command not recognized: %s\n", cmd); + return 3; + } + + char new_path[] = "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin"; + putenv(new_path); + + setuid(0); + setgid(0); + setegid(0); + + bool found = false; + char filename[FILENAME_MAX]; + + for(size_t i = 0; i < MAX_SEARCH && !found ;i++) { + if(command->search[i]) { + found = command_exists_in_PATH(command->search[i], filename, sizeof(filename)); + if(!found) { + size_t len = strlen(error_buffer); + snprintf(&error_buffer[len], sizeof(error_buffer) - len, "%s ", command->search[i]); + } + } + } + + if(!found) { + fprintf(stderr, "%s: not available in PATH.\n", error_buffer); + return 4; + } + else + error_buffer[0] = '\0'; + + char *params[MAX_PARAMETERS]; + if(!search_and_replace_params(command, params, MAX_PARAMETERS, filename, argc, argv, error_buffer, sizeof(error_buffer))) { + fprintf(stderr, "command line parameters are not satisfied: %s\n", error_buffer); + return 5; + } + + if(test) { + fprintf(stderr, "Command to run: \n"); + + for(size_t i = 0; i < MAX_PARAMETERS && params[i] ;i++) + fprintf(stderr, "'%s' ", params[i]); + + fprintf(stderr, "\n"); + + exit(0); + } + else { + char *clean_env[] = {NULL}; + execve(filename, params, clean_env); + perror("execve"); // execve only returns on error + return 6; + } +} diff --git a/src/collectors/windows-events.plugin/README.md b/src/collectors/windows-events.plugin/README.md new file mode 100644 index 000000000..ecaa4349a --- /dev/null +++ b/src/collectors/windows-events.plugin/README.md @@ -0,0 +1,289 @@ +# Windows Events plugin + +[KEY FEATURES](#key-features) | [EVENTS SOURCES](#events-sources) | [EVENT FIELDS](#event-fields) | +[PLAY MODE](#play-mode) | [FULL TEXT SEARCH](#full-text-search) | [PERFORMANCE](#query-performance) | +[CONFIGURATION](#configuration-and-maintenance) | [FAQ](#faq) + +The Windows Events plugin by Netdata makes viewing, exploring and analyzing Windows Events simple and +efficient. + +![image](https://github.com/user-attachments/assets/71a1ab1d-5b7b-477e-a4e6-a30275a5710b) + +## Key features + +- Supports **Windows Event Logs (WEL)**. +- Supports **Event Tracing for Windows (ETW)** and **TraceLogging (TL)**, when events are routed to Event Log. +- Allows filtering on all System Events fields. +- Allows **full text search** (`grep`) on all System and User fields. +- Provides a **histogram** for log entries over time, with a break down per field-value, for any System Event field and any + time-frame. +- Supports coloring log entries based on severity. +- In PLAY mode it "tails" all the Events, showing new log entries immediately after they are received. + +### Prerequisites + +`windows-events.plugin` is a Netdata Function Plugin. + +To protect your privacy, as with all Netdata Functions, a free Netdata Cloud user account is required to access it. +For more information check [this discussion](https://github.com/netdata/netdata/discussions/16136). + +## Events Sources + +The plugin automatically detects all the available channels and offers a list of "Event Channels". + +By default, it aggregates events from all event channels, providing a unified view of all events. + +> To improve query performance, we recommend selecting the relevant event channels, before doing more +> analysis on the events. + +In the list of events channels, several shortcuts are added, aggregating events according to various attributes: + +- `All`, aggregates events from all available channels. This provides a holistic view of all events in the system. +- `All-Admin`, `All-Operational`, `All-Analytic` and `All-Debug` aggregates events from channels marked `Admin`, `Operational`, `Analytic` and `Debug`, respectively. +- `All-Windows`, aggregates events from `Application`, `Security`, `System` and `Setup`. +- `All-Enabled` and `All-Disabled` aggregates events from channels depending on their status. +- `All-Forwarded` aggregates events from channels owned by `Microsoft-Windows-EventCollector`. +- `All-Classic` aggregates events from channels using the Classic Event Log API. +- `All-Of-X`, where `X` is a provider name, is offered for all providers having more than a channel. +- `All-In-X`, where `X` is `Backup-Mode`, `Overwrite-Mode`, `StopWhenFull-Mode` and `RetainAndBackup-Mode`, aggregate events based on their channel retention policy. + +Channels that are configured but are not queryable, and channels that do not have any events in them, are automatically excluded from the channels list. + +## Event Fields + +Windows Events are structured with both system-defined fields and user-defined fields. +The Windows Events plugin primarily works with the system-defined fields, which are consistently available +across all event types. + +### System-defined fields + +The system-defined fields are: + +1. **EventRecordID** + A unique, sequential identifier for the event within the channel. This ID increases as new events are logged. + +2. **Version** + The version of the event, indicating possible structural changes or updates to the event definition. + + Netdata adds this field automatically when it is not zero. + +3. **Level** + The severity or importance of the event. Levels can include: + - 0: LogAlways (reserved) + - 1: Critical + - 2: Error + - 3: Warning + - 4: Informational + - 5: Verbose + + Additionally, applications may define their own levels. + + Netdata provides 2 fields: `Level` and `LevelID` for the text and numeric representation of it. + +4. **Opcode** + The action or state within a provider when the event was logged. + + Netdata provides 2 fields: `Opcode` and `OpcodeID` for the text and numeric representation of it. + +5. **EventID** + This identifies the event template, linking it to a specific message or event type. Event IDs are provider-specific. + +6. **Task** + Defines a higher-level categorization or logical grouping for the event, often related to a specific function within the application or provider. + + Netdata provides 2 fields: `Task` and `TaskID` for the text and numeric representation of it. + +7. **Qualifiers** + Provides additional detail for interpreting the event and is often specific to the event source. + + Netdata adds this field automatically when it is not zero. + +8. **ProcessID** + The ID of the process that generated the event, useful for pinpointing the source of the event within the system. + +9. **ThreadID** + The ID of the thread within the process that generated the event, which helps in more detailed debugging scenarios. + +10. **Keywords** + A categorization field that can be used for event filtering. Keywords are bit flags that represent categories or purposes of the event, providing additional context. + + Netdata provides 2 fields: `Keywords` and `keywordsID` for the text and numeric representation of it. + +11. **Provider** + The unique identifier (GUID) of the event provider. This is essential for knowing which application or system component generated the event. + + Netdata provides 2 fields: `Provider` and `ProviderGUID` for its name and GUID of it. + +12. **ActivityID** + A GUID that correlates events generated as part of the same operation or transaction, helping to track activities across different components or stages. + + Netdata adds this field automatically when it is not zero. + +13. **RelatedActivityID** + A GUID that links related operations or transactions, allowing for tracing complex workflows where one event triggers or relates to another. + + Netdata adds this field automatically when it is not zero. + +14. **Timestamp** + The timestamp when the event was created. This provides precise timing information about when the event occurred. + +15. **User** + The system user who logged this event. + + Netdata provides 3 fields: `UserAccount`, `UserDomain` and `UserSID`. + +### User-defined fields +Each event log entry can include up to 100 user-defined fields (per event-id). + +Unfortunately, accessing these fields is significantly slower, to a level that is not practical to do so +when there are more than few thousand log entries to explore. So, Netdata presents them +with lazy loading. + +This prevents Netdata for offering filtering for user-defined fields, although Netdata does support +full text search on user-defined field values. + +### Event fields as columns in the table + +The system fields mentioned above are offered as columns on the UI. Use the gear button above the table to +select visible columns. + +### Event fields as filters + +The plugin presents the system fields as filters for the query, with counters for each of the possible values +for the field. This list can be used to quickly check which fields and values are available for the entire +time-frame of the query, across multiple providers and channels. + +### Event fields as histogram sources + +The histogram can be based on any of the system fields that are available as filters. This allows you to +visualize the distribution of events over time based on different criteria such as Level, Provider, or EventID. + +## PLAY mode + +The PLAY mode in this plugin allows real-time monitoring of new events as they are added to the Windows Event +Log. This feature works by continuously querying for new events and updating the display. + +## Full-text search + +The plugin supports searching for text within all system and user fields of the events. This means that while +user-defined fields are not directly filterable, they are searchable through the full-text search feature. + +Keep in mind that query performance is slower while doing full text search, mainly because the plugin +needs to ask from the system to provide all the user fields values. + +## Query performance + +The plugin is optimized to work efficiently with Event Logs. It uses several layers of caching and +similar techniques to offload as much work as possible from the system, offering quick responses even when +hundreds of thousands of events are within the visible timeframe. + +To achieve this level of efficiency, the plugin: + +- pre-loads ETW providers' manifests for resolving numeric Levels, Opcodes, Tasks and Keywords to text. +- caches number to text maps for Levels, Opcodes, Tasks and Keywords per provider for WEL providers. +- caches user SID to account and domain maps. +- lazy loads the "expensive" event Message and XML, so that the system is queried only for the visible events. + +For Full Text Search: + +- requests only the Message and the values of the user-fields from the system, avoiding the "expensive" XML call (which is still lazy-loaded). + +The result is a system that is highly efficient for working with moderate volumes (hundreds of thousands) of events. + +## Configuration and maintenance + +This Netdata plugin does not require any specific configuration. It automatically detects available event logs +on the system. + +## FAQ + +### Can I use this plugin on event centralization servers? + +Yes. You can centralize your Windows Events using Windows Event Forwarding (WEF) or other event collection +mechanisms, and then install Netdata on this events centralization server to explore the events of all your +infrastructure. + +This plugin will automatically provide multi-node views of your events and also give you the ability to +combine the events of multiple servers, as you see fit. + +### Can I use this plugin from a parent Netdata? + +Yes. When your nodes are connected to a Netdata parent, all their functions are available via the parent's UI. +So, from the parent UI, you can access the functions of all your nodes. + +Keep in mind that to protect your privacy, in order to access Netdata functions, you need a free Netdata Cloud +account. + +### Is any of my data exposed to Netdata Cloud from this plugin? + +No. When you access the agent directly, none of your data passes through Netdata Cloud. You need a free Netdata +Cloud account only to verify your identity and enable the use of Netdata Functions. Once this is done, all the +data flow directly from your Netdata agent to your web browser. + +When you access Netdata via https://app.netdata.cloud, your data travel via Netdata Cloud, but they are not stored +in Netdata Cloud. This is to allow you access your Netdata agents from anywhere. All communication from/to +Netdata Cloud is encrypted. + +### What are the different types of event logs supported by this plugin? + +The plugin supports all the kinds of event logs currently supported by the Windows Event Viewer: + +- Windows Event Logs (WEL): The traditional event logging system in Windows. +- Event Tracing for Windows (ETW): A more detailed and efficient event tracing system. +- TraceLogging (TL): An extension of ETW that simplifies the process of adding events to your code. + +The plugin can access all of these when they are routed to the Windows Event Log. + +### How does this plugin handle user-defined fields in Windows Events? + +User-defined fields are not directly exposed as table columns or filters in the plugin interface. However, +they are included in the XML representation of each event, which can be viewed in the info sidebar when +clicking on an event entry. Additionally, the full-text search feature does search through these +user-defined fields, allowing you to find specific information even if it's not in the main system fields. + +### Can I use this plugin to monitor real-time events? + +Yes, the plugin supports a PLAY mode that allows you to monitor events in real-time. When activated, it +continuously updates to show new events as they are logged, similar to the "tail" functionality in +Unix-like systems. + +### How does the plugin handle large volumes of events? + +The plugin is designed to handle moderate volumes of events (hundreds of thousands of events) efficiently. + +It is in our roadmap to port the `systemd-journal` sampling techniques to it, for working with very large +datasets to provide quick responses while still giving accurate representations of the data. However, for +the best performance, we recommend querying smaller time frames or using more specific filters when dealing +with extremely large event volumes. + +### Can I use this plugin to analyze events from multiple servers? + +Yes, if you have set up Windows Event Forwarding (WEF) or another method of centralizing your Windows Events, +you can use this plugin on the central server to analyze events from multiple sources. The plugin will +automatically detect the available event sources. + +### How does the histogram feature work in this plugin? + +The histogram feature provides a visual representation of event frequency over time. You can base the +histogram on any of the system fields available as filters (such as Level, Provider, or EventID). This +allows you to quickly identify patterns or anomalies in your event logs. + +### Is it possible to export or share the results from this plugin? + +While the plugin doesn't have a direct export feature, you can use browser-based methods to save or share +the results. This could include taking screenshots, using browser print/save as PDF functionality, or +copying data from the table view. For more advanced data export needs, you might need to use the Windows +Event Log API directly or other Windows administrative tools. + +### How often does the plugin update its data? + +The plugin updates its data in real-time when in PLAY mode. In normal mode, it refreshes data based on the +query you've submitted. The plugin is designed to provide the most up-to-date information available in the +Windows Event Logs at the time of the query. + +## TODO + +1. Support Sampling, so that the plugin can respond faster even on very busy systems (millions of events visible). +2. Support exploring events from live Tracing sessions. +3. Support exploring events in saved Event Trace Log files (`.etl` files). +4. Support exploring events in saved Event Logs files (`.evtx` files). diff --git a/src/collectors/windows-events.plugin/windows-events-fields-cache.c b/src/collectors/windows-events.plugin/windows-events-fields-cache.c new file mode 100644 index 000000000..4b4b72fa4 --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-fields-cache.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows-events-fields-cache.h" + +typedef struct field_key { + uint64_t value; + ND_UUID provider; +} WEVT_FIELD_KEY; + +typedef struct field_value { + WEVT_FIELD_KEY key; + uint32_t name_size; + char name[]; +} WEVT_FIELD_VALUE; + +#define SIMPLE_HASHTABLE_NAME _FIELDS_CACHE +#define SIMPLE_HASHTABLE_VALUE_TYPE WEVT_FIELD_VALUE +#define SIMPLE_HASHTABLE_KEY_TYPE WEVT_FIELD_KEY +#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION field_cache_value_to_key +#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION field_cache_cache_compar +#define SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION 1 +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +static inline WEVT_FIELD_KEY *field_cache_value_to_key(WEVT_FIELD_VALUE *p) { + return &p->key; +} + +static inline bool field_cache_cache_compar(WEVT_FIELD_KEY *a, WEVT_FIELD_KEY *b) { + return memcmp(a, b, sizeof(WEVT_FIELD_KEY)) == 0; +} + +struct ht { + SPINLOCK spinlock; + size_t allocations; + size_t bytes; + struct simple_hashtable_FIELDS_CACHE ht; +}; + +static struct { + bool initialized; + struct ht ht[WEVT_FIELD_TYPE_MAX]; +} fdc = { + .initialized = false, +}; + +void field_cache_init(void) { + for(size_t type = 0; type < WEVT_FIELD_TYPE_MAX ; type++) { + spinlock_init(&fdc.ht[type].spinlock); + simple_hashtable_init_FIELDS_CACHE(&fdc.ht[type].ht, 10000); + } +} + +static inline bool should_zero_provider(WEVT_FIELD_TYPE type, uint64_t value) { + switch(type) { + case WEVT_FIELD_TYPE_LEVEL: + return !is_valid_provider_level(value, true); + + case WEVT_FIELD_TYPE_KEYWORD: + return !is_valid_provider_keyword(value, true); + + case WEVT_FIELD_TYPE_OPCODE: + return !is_valid_provider_opcode(value, true); + + case WEVT_FIELD_TYPE_TASK: + return !is_valid_provider_task(value, true); + + default: + return false; + } +} + +bool field_cache_get(WEVT_FIELD_TYPE type, const ND_UUID *uuid, uint64_t value, TXT_UTF8 *dst) { + fatal_assert(type < WEVT_FIELD_TYPE_MAX); + + struct ht *ht = &fdc.ht[type]; + + WEVT_FIELD_KEY t = { + .value = value, + .provider = should_zero_provider(type, value) ? UUID_ZERO : *uuid, + }; + XXH64_hash_t hash = XXH3_64bits(&t, sizeof(t)); + + spinlock_lock(&ht->spinlock); + SIMPLE_HASHTABLE_SLOT_FIELDS_CACHE *slot = simple_hashtable_get_slot_FIELDS_CACHE(&ht->ht, hash, &t, true); + WEVT_FIELD_VALUE *v = SIMPLE_HASHTABLE_SLOT_DATA(slot); + spinlock_unlock(&ht->spinlock); + + if(v) { + txt_utf8_resize(dst, v->name_size, false); + memcpy(dst->data, v->name, v->name_size); + dst->used = v->name_size; + dst->src = TXT_SOURCE_FIELD_CACHE; + return true; + } + + return false; +} + +static WEVT_FIELD_VALUE *wevt_create_cache_entry(WEVT_FIELD_KEY *t, TXT_UTF8 *name, size_t *bytes) { + *bytes = sizeof(WEVT_FIELD_VALUE) + name->used; + WEVT_FIELD_VALUE *v = callocz(1, *bytes); + v->key = *t; + memcpy(v->name, name->data, name->used); + v->name_size = name->used; + return v; +} + +//static bool is_numeric(const char *s) { +// while(*s) { +// if(!isdigit((uint8_t)*s++)) +// return false; +// } +// +// return true; +//} + +void field_cache_set(WEVT_FIELD_TYPE type, const ND_UUID *uuid, uint64_t value, TXT_UTF8 *name) { + fatal_assert(type < WEVT_FIELD_TYPE_MAX); + + struct ht *ht = &fdc.ht[type]; + + WEVT_FIELD_KEY t = { + .value = value, + .provider = should_zero_provider(type, value) ? UUID_ZERO : *uuid, + }; + XXH64_hash_t hash = XXH3_64bits(&t, sizeof(t)); + + spinlock_lock(&ht->spinlock); + SIMPLE_HASHTABLE_SLOT_FIELDS_CACHE *slot = simple_hashtable_get_slot_FIELDS_CACHE(&ht->ht, hash, &t, true); + WEVT_FIELD_VALUE *v = SIMPLE_HASHTABLE_SLOT_DATA(slot); + if(!v) { + size_t bytes; + v = wevt_create_cache_entry(&t, name, &bytes); + simple_hashtable_set_slot_FIELDS_CACHE(&ht->ht, slot, hash, v); + + ht->allocations++; + ht->bytes += bytes; + } +// else { +// if((v->name_size == 1 && name->used > 0) || is_numeric(v->name)) { +// size_t bytes; +// WEVT_FIELD_VALUE *nv = wevt_create_cache_entry(&t, name, &bytes); +// simple_hashtable_set_slot_FIELDS_CACHE(&ht->ht, slot, hash, nv); +// ht->bytes += name->used; +// ht->bytes -= v->name_size; +// freez(v); +// } +// else if(name->used > 2 && !is_numeric(name->data) && (v->name_size != name->used || strcasecmp(v->name, name->data) != 0)) { +// const char *a = v->name; +// const char *b = name->data; +// int x = 0; +// x++; +// } +// } + + spinlock_unlock(&ht->spinlock); +} + diff --git a/src/collectors/windows-events.plugin/windows-events-fields-cache.h b/src/collectors/windows-events.plugin/windows-events-fields-cache.h new file mode 100644 index 000000000..a76170d68 --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-fields-cache.h @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WINDOWS_EVENTS_FIELDS_CACHE_H +#define NETDATA_WINDOWS_EVENTS_FIELDS_CACHE_H + +#include "windows-events.h" + +typedef enum __attribute__((packed)) { + WEVT_FIELD_TYPE_LEVEL = 0, + WEVT_FIELD_TYPE_OPCODE, + WEVT_FIELD_TYPE_KEYWORD, + WEVT_FIELD_TYPE_TASK, + + // terminator + WEVT_FIELD_TYPE_MAX, +} WEVT_FIELD_TYPE; + +void field_cache_init(void); +bool field_cache_get(WEVT_FIELD_TYPE type, const ND_UUID *uuid, uint64_t value, TXT_UTF8 *dst); +void field_cache_set(WEVT_FIELD_TYPE type, const ND_UUID *uuid, uint64_t value, TXT_UTF8 *name); + +#endif //NETDATA_WINDOWS_EVENTS_FIELDS_CACHE_H diff --git a/src/collectors/windows-events.plugin/windows-events-providers.c b/src/collectors/windows-events.plugin/windows-events-providers.c new file mode 100644 index 000000000..d4c4d35ea --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-providers.c @@ -0,0 +1,678 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows-events-providers.h" + +#define MAX_OPEN_HANDLES_PER_PROVIDER 5 + +struct provider; + +// typedef as PROVIDER_META_HANDLE in include file +struct provider_meta_handle { + pid_t owner; // the owner of the handle, or zero + uint32_t locks; // the number of locks the owner has on this handle + EVT_HANDLE hMetadata; // the handle + struct provider *provider; // a pointer back to the provider + + usec_t created_monotonic_ut; // the monotonic timestamp this handle was created + + // double linked list + PROVIDER_META_HANDLE *prev; + PROVIDER_META_HANDLE *next; +}; + +struct provider_data { + uint64_t value; // the mask of the keyword + XXH64_hash_t hash; // the hash of the name + uint32_t len; // the length of the name + char *name; // the name of the keyword in UTF-8 +}; + +struct provider_list { + uint64_t min, max, mask; + bool exceeds_data_type; // true when the manifest values exceed the capacity of the EvtXXX() API + uint32_t total; // the number of entries in the array + struct provider_data *array; // the array of entries, sorted (for binary search) +}; + +typedef struct provider_key { + ND_UUID uuid; // the Provider GUID + DWORD len; // the length of the Provider Name + const wchar_t *wname; // the Provider wide-string Name (UTF-16) +} PROVIDER_KEY; + +typedef struct provider { + PROVIDER_KEY key; + const char *name; // the Provider Name (UTF-8) + uint32_t total_handles; // the number of handles allocated + uint32_t available_handles; // the number of available handles + uint32_t deleted_handles; // the number of deleted handles + PROVIDER_META_HANDLE *handles; // a double linked list of all the handles + + WEVT_PROVIDER_PLATFORM platform; + + struct provider_list keyword; + struct provider_list tasks; + struct provider_list opcodes; + struct provider_list levels; +} PROVIDER; + +// A hashtable implementation for Providers +// using the Provider GUID as key and PROVIDER as value +#define SIMPLE_HASHTABLE_NAME _PROVIDER +#define SIMPLE_HASHTABLE_VALUE_TYPE PROVIDER +#define SIMPLE_HASHTABLE_KEY_TYPE PROVIDER_KEY +#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION provider_value_to_key +#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION provider_cache_compar +#define SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION 1 +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +static struct { + SPINLOCK spinlock; + uint32_t total_providers; + uint32_t total_handles; + uint32_t deleted_handles; + struct simple_hashtable_PROVIDER hashtable; + ARAL *aral_providers; + ARAL *aral_handles; +} pbc = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, +}; + +static void provider_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, WEVT_VARIANT *property, + TXT_UTF16 *dst, struct provider_list *l, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id); + +const char *provider_get_name(PROVIDER_META_HANDLE *p) { + return (p && p->provider && p->provider->name) ? p->provider->name : "__UNKNOWN PROVIDER__"; +} + +ND_UUID provider_get_uuid(PROVIDER_META_HANDLE *p) { + return (p && p->provider) ? p->provider->key.uuid : UUID_ZERO; +} + +static inline PROVIDER_KEY *provider_value_to_key(PROVIDER *p) { + return &p->key; +} + +static inline bool provider_cache_compar(PROVIDER_KEY *a, PROVIDER_KEY *b) { + return a->len == b->len && UUIDeq(a->uuid, b->uuid) && memcmp(a->wname, b->wname, a->len) == 0; +} + +void provider_cache_init(void) { + simple_hashtable_init_PROVIDER(&pbc.hashtable, 100000); + pbc.aral_providers = aral_create("wevt_providers", sizeof(PROVIDER), 0, 4096, NULL, NULL, NULL, false, true); + pbc.aral_handles = aral_create("wevt_handles", sizeof(PROVIDER_META_HANDLE), 0, 4096, NULL, NULL, NULL, false, true); +} + +static bool provider_property_get(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id) { + DWORD bufferUsed = 0; + + if(!EvtGetPublisherMetadataProperty(h->hMetadata, property_id, 0, 0, NULL, &bufferUsed)) { + DWORD status = GetLastError(); + if (status != ERROR_INSUFFICIENT_BUFFER) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetPublisherMetadataProperty() failed"); + goto cleanup; + } + } + + wevt_variant_resize(content, bufferUsed); + if (!EvtGetPublisherMetadataProperty(h->hMetadata, property_id, 0, content->size, content->data, &bufferUsed)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetPublisherMetadataProperty() failed after resize"); + goto cleanup; + } + + return true; + +cleanup: + return false; +} + +static bool provider_string_property_exists(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id) { + if(!provider_property_get(h, content, property_id)) + return false; + + if(content->data->Type != EvtVarTypeString) + return false; + + if(!content->data->StringVal[0]) + return false; + + return true; +} + +static void provider_detect_platform(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content) { + if(UUIDiszero(h->provider->key.uuid)) + h->provider->platform = WEVT_PLATFORM_WEL; + else if(h->hMetadata) { + if (provider_string_property_exists(h, content, EvtPublisherMetadataMessageFilePath) || + provider_string_property_exists(h, content, EvtPublisherMetadataResourceFilePath) || + provider_string_property_exists(h, content, EvtPublisherMetadataParameterFilePath)) + h->provider->platform = WEVT_PLATFORM_ETW; + else + // The provider cannot be opened, does not have any resource files (message, resource, parameter) + h->provider->platform = WEVT_PLATFORM_TL; + } + else h->provider->platform = WEVT_PLATFORM_ETW; +} + +WEVT_PROVIDER_PLATFORM provider_get_platform(PROVIDER_META_HANDLE *p) { + return p->provider->platform; +} + +PROVIDER_META_HANDLE *provider_get(ND_UUID uuid, LPCWSTR providerName) { + if(!providerName || !providerName[0]) + return NULL; + + PROVIDER_KEY key = { + .uuid = uuid, + .len = wcslen(providerName), + .wname = providerName, + }; + XXH64_hash_t hash = XXH3_64bits(providerName, wcslen(key.wname) * sizeof(*key.wname)); + + spinlock_lock(&pbc.spinlock); + + SIMPLE_HASHTABLE_SLOT_PROVIDER *slot = + simple_hashtable_get_slot_PROVIDER(&pbc.hashtable, hash, &key, true); + + bool load_it = false; + PROVIDER *p = SIMPLE_HASHTABLE_SLOT_DATA(slot); + if(!p) { + p = aral_callocz(pbc.aral_providers); + p->key.uuid = key.uuid; + p->key.len = key.len; + p->key.wname = wcsdup(key.wname); + p->name = strdupz(provider2utf8(key.wname)); + simple_hashtable_set_slot_PROVIDER(&pbc.hashtable, slot, hash, p); + load_it = true; + pbc.total_providers++; + } + + pid_t me = gettid_cached(); + PROVIDER_META_HANDLE *h; + for(h = p->handles; h ;h = h->next) { + // find the first that is mine, + // or the first not owned by anyone + if(!h->owner || h->owner == me) + break; + } + + if(!h) { + h = aral_callocz(pbc.aral_handles); + h->provider = p; + h->created_monotonic_ut = now_monotonic_usec(); + h->hMetadata = EvtOpenPublisherMetadata( + NULL, // Local machine + providerName, // Provider name + NULL, // Log file path (NULL for default) + 0, // Locale (0 for default locale) + 0 // Flags + ); + + // we put it at the beginning of the list + // to find it first if the same owner needs more locks on it + DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(p->handles, h, prev, next); + pbc.total_handles++; + p->total_handles++; + p->available_handles++; + } + + if(!h->owner) { + fatal_assert(p->available_handles > 0); + p->available_handles--; + h->owner = me; + } + + h->locks++; + + if(load_it) { + WEVT_VARIANT content = { 0 }; + WEVT_VARIANT property = { 0 }; + TXT_UTF16 unicode = { 0 }; + + provider_detect_platform(h, &content); + provider_load_list(h, &content, &property, &unicode, &p->keyword, EvtPublisherMetadataKeywords); + provider_load_list(h, &content, &property, &unicode, &p->levels, EvtPublisherMetadataLevels); + provider_load_list(h, &content, &property, &unicode, &p->opcodes, EvtPublisherMetadataOpcodes); + provider_load_list(h, &content, &property, &unicode, &p->tasks, EvtPublisherMetadataTasks); + + txt_utf16_cleanup(&unicode); + wevt_variant_cleanup(&content); + wevt_variant_cleanup(&property); + } + + spinlock_unlock(&pbc.spinlock); + + return h; +} + +EVT_HANDLE provider_handle(PROVIDER_META_HANDLE *h) { + return h ? h->hMetadata : NULL; +} + +PROVIDER_META_HANDLE *provider_dup(PROVIDER_META_HANDLE *h) { + if(h) h->locks++; + return h; +} + +static void provider_meta_handle_delete(PROVIDER_META_HANDLE *h) { + PROVIDER *p = h->provider; + + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(p->handles, h, prev, next); + + if(h->hMetadata) + EvtClose(h->hMetadata); + + aral_freez(pbc.aral_handles, h); + + fatal_assert(pbc.total_handles && p->total_handles && p->available_handles); + + pbc.total_handles--; + p->total_handles--; + + pbc.deleted_handles++; + p->deleted_handles++; + + p->available_handles--; +} + +void providers_release_unused_handles(void) { + usec_t now_ut = now_monotonic_usec(); + + spinlock_lock(&pbc.spinlock); + for(size_t i = 0; i < pbc.hashtable.size ; i++) { + SIMPLE_HASHTABLE_SLOT_PROVIDER *slot = &pbc.hashtable.hashtable[i]; + PROVIDER *p = SIMPLE_HASHTABLE_SLOT_DATA(slot); + if(!p) continue; + + PROVIDER_META_HANDLE *h = p->handles; + while(h) { + PROVIDER_META_HANDLE *next = h->next; + + if(!h->locks && (now_ut - h->created_monotonic_ut) >= WINDOWS_EVENTS_RELEASE_IDLE_PROVIDER_HANDLES_TIME_UT) + provider_meta_handle_delete(h); + + h = next; + } + } + spinlock_unlock(&pbc.spinlock); +} + +void provider_release(PROVIDER_META_HANDLE *h) { + if(!h) return; + pid_t me = gettid_cached(); + fatal_assert(h->owner == me); + fatal_assert(h->locks > 0); + if(--h->locks == 0) { + PROVIDER *p = h->provider; + + spinlock_lock(&pbc.spinlock); + h->owner = 0; + + if(++p->available_handles > MAX_OPEN_HANDLES_PER_PROVIDER) { + // there are too many idle handles on this provider + provider_meta_handle_delete(h); + } + else if(h->next) { + // it is not the last, put it at the end + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(p->handles, h, prev, next); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(p->handles, h, prev, next); + } + + spinlock_unlock(&pbc.spinlock); + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// load provider lists + +static bool wevt_get_property_from_array(WEVT_VARIANT *property, EVT_HANDLE handle, DWORD dwIndex, EVT_PUBLISHER_METADATA_PROPERTY_ID PropertyId) { + DWORD used = 0; + + if (!EvtGetObjectArrayProperty(handle, PropertyId, dwIndex, 0, property->size, property->data, &used)) { + DWORD status = GetLastError(); + if (status != ERROR_INSUFFICIENT_BUFFER) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetObjectArrayProperty() failed"); + return false; + } + + wevt_variant_resize(property, used); + if (!EvtGetObjectArrayProperty(handle, PropertyId, dwIndex, 0, property->size, property->data, &used)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetObjectArrayProperty() failed"); + return false; + } + } + + property->used = used; + return true; +} + +// Comparison function for ascending order (for Levels, Opcodes, Tasks) +static int compare_ascending(const void *a, const void *b) { + struct provider_data *d1 = (struct provider_data *)a; + struct provider_data *d2 = (struct provider_data *)b; + + if (d1->value < d2->value) return -1; + if (d1->value > d2->value) return 1; + return 0; +} + +//// Comparison function for descending order (for Keywords) +//static int compare_descending(const void *a, const void *b) { +// struct provider_data *d1 = (struct provider_data *)a; +// struct provider_data *d2 = (struct provider_data *)b; +// +// if (d1->value > d2->value) return -1; +// if (d1->value < d2->value) return 1; +// return 0; +//} + +static void provider_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, WEVT_VARIANT *property, + TXT_UTF16 *dst, struct provider_list *l, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id) { + if(!h || !h->hMetadata) return; + + EVT_PUBLISHER_METADATA_PROPERTY_ID name_id, message_id, value_id; + uint8_t value_bits = 32; + int (*compare_func)(const void *, const void *); + bool (*is_valid)(uint64_t, bool); + + switch(property_id) { + case EvtPublisherMetadataLevels: + name_id = EvtPublisherMetadataLevelName; + message_id = EvtPublisherMetadataLevelMessageID; + value_id = EvtPublisherMetadataLevelValue; + value_bits = 32; + compare_func = compare_ascending; + is_valid = is_valid_provider_level; + break; + + case EvtPublisherMetadataOpcodes: + name_id = EvtPublisherMetadataOpcodeName; + message_id = EvtPublisherMetadataOpcodeMessageID; + value_id = EvtPublisherMetadataOpcodeValue; + value_bits = 32; + is_valid = is_valid_provider_opcode; + compare_func = compare_ascending; + break; + + case EvtPublisherMetadataTasks: + name_id = EvtPublisherMetadataTaskName; + message_id = EvtPublisherMetadataTaskMessageID; + value_id = EvtPublisherMetadataTaskValue; + value_bits = 32; + is_valid = is_valid_provider_task; + compare_func = compare_ascending; + break; + + case EvtPublisherMetadataKeywords: + name_id = EvtPublisherMetadataKeywordName; + message_id = EvtPublisherMetadataKeywordMessageID; + value_id = EvtPublisherMetadataKeywordValue; + value_bits = 64; + is_valid = is_valid_provider_keyword; + compare_func = NULL; + break; + + default: + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Internal Error: Can't handle property id %u", property_id); + return; + } + + EVT_HANDLE hMetadata = h->hMetadata; + EVT_HANDLE hArray = NULL; + DWORD itemCount = 0; + + // Get the metadata array for the list (e.g., opcodes, tasks, or levels) + if(!provider_property_get(h, content, property_id)) + goto cleanup; + + // Get the number of items (e.g., levels, tasks, or opcodes) + hArray = content->data->EvtHandleVal; + if (!EvtGetObjectArraySize(hArray, &itemCount)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetObjectArraySize() failed"); + goto cleanup; + } + + if (itemCount == 0) { + l->total = 0; + l->array = NULL; + goto cleanup; + } + + // Allocate memory for the list items + l->array = callocz(itemCount, sizeof(struct provider_data)); + l->total = itemCount; + + uint64_t min = UINT64_MAX, max = 0, mask = 0; + + // Iterate over the list and populate the entries + for (DWORD i = 0; i < itemCount; ++i) { + struct provider_data *d = &l->array[i]; + + // Get the value (e.g., opcode, task, or level) + if (wevt_get_property_from_array(property, hArray, i, value_id)) { + switch(value_bits) { + case 64: + d->value = wevt_field_get_uint64(property->data); + break; + + case 32: + d->value = wevt_field_get_uint32(property->data); + break; + } + + if(d->value < min) + min = d->value; + + if(d->value > max) + max = d->value; + + mask |= d->value; + + if(!is_valid(d->value, false)) + l->exceeds_data_type = true; + } + + // Get the message ID + if (wevt_get_property_from_array(property, hArray, i, message_id)) { + uint32_t messageID = wevt_field_get_uint32(property->data); + + if (messageID != (uint32_t)-1) { + if (EvtFormatMessage_utf16(dst, hMetadata, NULL, messageID, EvtFormatMessageId)) { + size_t len; + d->name = utf16_to_utf8_strdupz(dst->data, &len); + d->len = len; + } + } + } + + // Get the name if the message is missing + if (!d->name && wevt_get_property_from_array(property, hArray, i, name_id)) { + fatal_assert(property->data->Type == EvtVarTypeString); + size_t len; + d->name = utf16_to_utf8_strdupz(property->data->StringVal, &len); + d->len = len; + } + + // Calculate the hash for the name + if (d->name) + d->hash = XXH3_64bits(d->name, d->len); + } + + l->min = min; + l->max = max; + l->mask = mask; + + if(itemCount > 1 && compare_func != NULL) { + // Sort the array based on the value (ascending for all except keywords, descending for keywords) + qsort(l->array, itemCount, sizeof(struct provider_data), compare_func); + } + +cleanup: + if (hArray) + EvtClose(hArray); +} + +// -------------------------------------------------------------------------------------------------------------------- +// lookup functions + +// lookup bitmap metdata (returns a comma separated list of strings) +static bool provider_bitmap_metadata(TXT_UTF8 *dst, struct provider_list *l, uint64_t value) { + if(!(value & l->mask) || !l->total || !l->array || l->exceeds_data_type) + return false; + + // do not empty the buffer, there may be reserved keywords in it + // dst->used = 0; + + if(dst->used) + dst->used--; + + size_t added = 0; + for(size_t k = 0; value && k < l->total; k++) { + struct provider_data *d = &l->array[k]; + + if(d->value && (value & d->value) == d->value && d->name && d->len) { + const char *s = d->name; + size_t slen = d->len; + + // remove the mask from the value + value &= ~(d->value); + + txt_utf8_resize(dst, dst->used + slen + 2 + 1, true); + + if(dst->used) { + // add a comma and a space + dst->data[dst->used++] = ','; + dst->data[dst->used++] = ' '; + } + + memcpy(&dst->data[dst->used], s, slen); + dst->used += slen; + dst->src = TXT_SOURCE_PROVIDER; + added++; + } + } + + if(dst->used > 1) { + txt_utf8_resize(dst, dst->used + 1, true); + dst->data[dst->used++] = 0; + } + + fatal_assert(dst->used <= dst->size); + return added; +} + +//// lookup a single value (returns its string) +//static bool provider_value_metadata_linear(TXT_UTF8 *dst, struct provider_list *l, uint64_t value) { +// if(value < l->min || value > l->max || !l->total || !l->array || l->exceeds_data_type) +// return false; +// +// dst->used = 0; +// +// for(size_t k = 0; k < l->total; k++) { +// struct provider_data *d = &l->array[k]; +// +// if(d->value == value && d->name && d->len) { +// const char *s = d->name; +// size_t slen = d->len; +// +// txt_utf8_resize(dst, slen + 1, false); +// +// memcpy(dst->data, s, slen); +// dst->used = slen; +// dst->src = TXT_SOURCE_PROVIDER; +// +// break; +// } +// } +// +// if(dst->used) { +// txt_utf8_resize(dst, dst->used + 1, true); +// dst->data[dst->used++] = 0; +// } +// +// fatal_assert(dst->used <= dst->size); +// +// return (dst->used > 0); +//} + +static bool provider_value_metadata(TXT_UTF8 *dst, struct provider_list *l, uint64_t value) { + if(value < l->min || value > l->max || !l->total || !l->array || l->exceeds_data_type) + return false; + + // if(l->total < 3) return provider_value_metadata_linear(dst, l, value); + + dst->used = 0; + + size_t left = 0; + size_t right = l->total - 1; + + // Binary search within bounds + while (left <= right) { + size_t mid = left + (right - left) / 2; + struct provider_data *d = &l->array[mid]; + + if (d->value == value) { + // Value found, now check if it has a valid name and length + if (d->name && d->len) { + const char *s = d->name; + size_t slen = d->len; + + txt_utf8_resize(dst, slen + 1, false); + memcpy(dst->data, s, slen); + dst->used = slen; + dst->data[dst->used++] = 0; + dst->src = TXT_SOURCE_PROVIDER; + } + break; + } + + if (d->value < value) + left = mid + 1; + else { + if (mid == 0) break; + right = mid - 1; + } + } + + fatal_assert(dst->used <= dst->size); + return (dst->used > 0); +} + +// -------------------------------------------------------------------------------------------------------------------- +// public API to lookup metadata + +bool provider_keyword_cacheable(PROVIDER_META_HANDLE *h) { + return h && !h->provider->keyword.exceeds_data_type; +} + +bool provider_tasks_cacheable(PROVIDER_META_HANDLE *h) { + return h && !h->provider->tasks.exceeds_data_type; +} + +bool is_useful_provider_for_levels(PROVIDER_META_HANDLE *h) { + return h && !h->provider->levels.exceeds_data_type; +} + +bool provider_opcodes_cacheable(PROVIDER_META_HANDLE *h) { + return h && !h->provider->opcodes.exceeds_data_type; +} + +bool provider_get_keywords(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) { + if(!h) return false; + return provider_bitmap_metadata(dst, &h->provider->keyword, value); +} + +bool provider_get_level(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) { + if(!h) return false; + return provider_value_metadata(dst, &h->provider->levels, value); +} + +bool provider_get_task(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) { + if(!h) return false; + return provider_value_metadata(dst, &h->provider->tasks, value); +} + +bool provider_get_opcode(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) { + if(!h) return false; + return provider_value_metadata(dst, &h->provider->opcodes, value); +} diff --git a/src/collectors/windows-events.plugin/windows-events-providers.h b/src/collectors/windows-events.plugin/windows-events-providers.h new file mode 100644 index 000000000..b6d476c5c --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-providers.h @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WINDOWS_EVENTS_PROVIDERS_H +#define NETDATA_WINDOWS_EVENTS_PROVIDERS_H + +typedef enum __attribute__((packed)) { + WEVT_PLATFORM_UNKNOWN = 0, + WEVT_PLATFORM_WEL, + WEVT_PLATFORM_ETW, + WEVT_PLATFORM_TL, +} WEVT_PROVIDER_PLATFORM; + +#include "windows-events.h" + +struct provider_meta_handle; +typedef struct provider_meta_handle PROVIDER_META_HANDLE; + +PROVIDER_META_HANDLE *provider_get(ND_UUID uuid, LPCWSTR providerName); +void provider_release(PROVIDER_META_HANDLE *h); +EVT_HANDLE provider_handle(PROVIDER_META_HANDLE *h); +PROVIDER_META_HANDLE *provider_dup(PROVIDER_META_HANDLE *h); + +void providers_release_unused_handles(void); + +const char *provider_get_name(PROVIDER_META_HANDLE *p); +ND_UUID provider_get_uuid(PROVIDER_META_HANDLE *p); + +void provider_cache_init(void); + +bool provider_keyword_cacheable(PROVIDER_META_HANDLE *h); +bool provider_tasks_cacheable(PROVIDER_META_HANDLE *h); +bool is_useful_provider_for_levels(PROVIDER_META_HANDLE *h); +bool provider_opcodes_cacheable(PROVIDER_META_HANDLE *h); + +bool provider_get_keywords(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value); +bool provider_get_level(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value); +bool provider_get_task(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value); +bool provider_get_opcode(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value); +WEVT_PROVIDER_PLATFORM provider_get_platform(PROVIDER_META_HANDLE *p); + +#endif //NETDATA_WINDOWS_EVENTS_PROVIDERS_H diff --git a/src/collectors/windows-events.plugin/windows-events-query-builder.c b/src/collectors/windows-events.plugin/windows-events-query-builder.c new file mode 100644 index 000000000..75c6fbdca --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-query-builder.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows-events-query-builder.h" + +// -------------------------------------------------------------------------------------------------------------------- +// query without XPath + +typedef struct static_utf8_8k { + char buffer[8192]; + size_t size; + size_t len; +} STATIC_BUF_8K; + +typedef struct static_unicode_16k { + wchar_t buffer[16384]; + size_t size; + size_t len; +} STATIC_UNI_16K; + +static bool wevt_foreach_selected_value_cb(FACETS *facets __maybe_unused, size_t id, const char *key, const char *value, void *data) { + STATIC_BUF_8K *b = data; + + b->len += snprintfz(&b->buffer[b->len], b->size - b->len, + "%s%s=%s", + id ? " or " : "", key, value); + + return b->len < b->size; +} + +wchar_t *wevt_generate_query_no_xpath(LOGS_QUERY_STATUS *lqs, BUFFER *wb) { + static __thread STATIC_UNI_16K q = { + .size = sizeof(q.buffer) / sizeof(wchar_t), + .len = 0, + }; + static __thread STATIC_BUF_8K b = { + .size = sizeof(b.buffer) / sizeof(char), + .len = 0, + }; + + lqs_query_timeframe(lqs, ANCHOR_DELTA_UT); + + usec_t seek_to = lqs->query.start_ut; + if(lqs->rq.direction == FACETS_ANCHOR_DIRECTION_BACKWARD) + // windows events queries are limited to millisecond resolution + // so, in order not to lose data, we have to add + // a millisecond when the direction is backward + seek_to += USEC_PER_MS; + + // Convert the microseconds since Unix epoch to FILETIME (used in Windows APIs) + FILETIME fileTime = os_unix_epoch_ut_to_filetime(seek_to); + + // Convert FILETIME to SYSTEMTIME for use in XPath + SYSTEMTIME systemTime; + if (!FileTimeToSystemTime(&fileTime, &systemTime)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "FileTimeToSystemTime() failed"); + return NULL; + } + + // Format SYSTEMTIME into ISO 8601 format (YYYY-MM-DDTHH:MM:SS.sssZ) + q.len = swprintf(q.buffer, q.size, + L"Event/System[TimeCreated[@SystemTime%ls\"%04d-%02d-%02dT%02d:%02d:%02d.%03dZ\"]", + lqs->rq.direction == FACETS_ANCHOR_DIRECTION_BACKWARD ? L"<=" : L">=", + systemTime.wYear, systemTime.wMonth, systemTime.wDay, + systemTime.wHour, systemTime.wMinute, systemTime.wSecond, systemTime.wMilliseconds); + + if(lqs->rq.slice) { + b.len = snprintf(b.buffer, b.size, " and ("); + if (facets_foreach_selected_value_in_key( + lqs->facets, + WEVT_FIELD_LEVEL, + sizeof(WEVT_FIELD_LEVEL) - 1, + used_hashes_registry, + wevt_foreach_selected_value_cb, + &b)) { + b.len += snprintf(&b.buffer[b.len], b.size - b.len, ")"); + if (b.len < b.size) { + utf82unicode(&q.buffer[q.len], q.size - q.len, b.buffer); + q.len = wcslen(q.buffer); + } + } + + b.len = snprintf(b.buffer, b.size, " and ("); + if (facets_foreach_selected_value_in_key( + lqs->facets, + WEVT_FIELD_EVENTID, + sizeof(WEVT_FIELD_EVENTID) - 1, + used_hashes_registry, + wevt_foreach_selected_value_cb, + &b)) { + b.len += snprintf(&b.buffer[b.len], b.size - b.len, ")"); + if (b.len < b.size) { + utf82unicode(&q.buffer[q.len], q.size - q.len, b.buffer); + q.len = wcslen(q.buffer); + } + } + } + + q.len += swprintf(&q.buffer[q.len], q.size - q.len, L"]"); + + buffer_json_member_add_string(wb, "_query", channel2utf8(q.buffer)); + + return q.buffer; +} + +// -------------------------------------------------------------------------------------------------------------------- +// query with XPath + diff --git a/src/collectors/windows-events.plugin/windows-events-query-builder.h b/src/collectors/windows-events.plugin/windows-events-query-builder.h new file mode 100644 index 000000000..80136e0aa --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-query-builder.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WINDOWS_EVENTS_QUERY_BUILDER_H +#define NETDATA_WINDOWS_EVENTS_QUERY_BUILDER_H + +#include "windows-events.h" + +wchar_t *wevt_generate_query_no_xpath(LOGS_QUERY_STATUS *lqs, BUFFER *wb); + +#endif //NETDATA_WINDOWS_EVENTS_QUERY_BUILDER_H diff --git a/src/collectors/windows-events.plugin/windows-events-query-evt-variant.c b/src/collectors/windows-events.plugin/windows-events-query-evt-variant.c new file mode 100644 index 000000000..ee3aa382b --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-query-evt-variant.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows-events.h" +#include // For SID string conversion + +// Function to append the separator if the buffer is not empty +static inline void append_separator_if_needed(BUFFER *b, const char *separator) { + if (buffer_strlen(b) > 0 && separator != NULL) + buffer_strcat(b, separator); +} + +// Helper function to convert UTF16 strings to UTF8 and append to the buffer +static inline void append_utf16(BUFFER *b, LPCWSTR utf16Str, const char *separator) { + if (!utf16Str || !*utf16Str) return; + + append_separator_if_needed(b, separator); + + size_t remaining = b->size - b->len; + if(remaining < 128) { + buffer_need_bytes(b, 128); + remaining = b->size - b->len; + } + + bool truncated = false; + size_t used = utf16_to_utf8(&b->buffer[b->len], remaining, utf16Str, -1, &truncated); + if(truncated) { + // we need to resize + size_t needed = utf16_to_utf8(NULL, 0, utf16Str, -1, NULL); // find the size needed + buffer_need_bytes(b, needed); + remaining = b->size - b->len; + used = utf16_to_utf8(&b->buffer[b->len], remaining, utf16Str, -1, NULL); + } + + if(used) { + b->len += used - 1; + + internal_fatal(buffer_strlen(b) != strlen(buffer_tostring(b)), + "Buffer length mismatch."); + } +} + +// Function to append binary data to the buffer +static inline void append_binary(BUFFER *b, PBYTE data, DWORD size, const char *separator) { + if (data == NULL || size == 0) return; + + append_separator_if_needed(b, separator); + + buffer_need_bytes(b, size * 4); + for (DWORD i = 0; i < size; i++) { + uint8_t value = data[i]; + b->buffer[b->len++] = hex_digits[(value & 0xf0) >> 4]; + b->buffer[b->len++] = hex_digits[(value & 0x0f)]; + } +} + +// Function to append size_t to the buffer +static inline void append_size_t(BUFFER *b, size_t size, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64(b, size); +} + +// Function to append HexInt32 in hexadecimal format +static inline void append_uint32_hex(BUFFER *b, UINT32 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64_hex(b, n); +} + +// Function to append HexInt64 in hexadecimal format +static inline void append_uint64_hex(BUFFER *b, UINT64 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64_hex(b, n); +} + +// Function to append various data types to the buffer +static inline void append_uint64(BUFFER *b, UINT64 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64(b, n); +} + +static inline void append_int64(BUFFER *b, INT64 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_int64(b, n); +} + +static inline void append_double(BUFFER *b, double n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_netdata_double(b, n); +} + +static inline void append_guid(BUFFER *b, GUID *guid, const char *separator) { + fatal_assert(sizeof(GUID) == sizeof(nd_uuid_t)); + + append_separator_if_needed(b, separator); + + ND_UUID *uuid = (ND_UUID *)guid; + buffer_need_bytes(b, UUID_STR_LEN); + uuid_unparse_lower(uuid->uuid, &b->buffer[b->len]); + b->len += UUID_STR_LEN - 1; + + internal_fatal(buffer_strlen(b) != strlen(buffer_tostring(b)), + "Buffer length mismatch."); +} + +static inline void append_systime(BUFFER *b, SYSTEMTIME *st, const char *separator) { + append_separator_if_needed(b, separator); + buffer_sprintf(b, "%04d-%02d-%02d %02d:%02d:%02d", + st->wYear, st->wMonth, st->wDay, st->wHour, st->wMinute, st->wSecond); +} + +static inline void append_filetime(BUFFER *b, FILETIME *ft, const char *separator) { + SYSTEMTIME st; + if (FileTimeToSystemTime(ft, &st)) + append_systime(b, &st, separator); +} + +static inline void append_sid(BUFFER *b, PSID sid, const char *separator) { + cached_sid_to_buffer_append(sid, b, separator); +} + +static inline void append_sbyte(BUFFER *b, INT8 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_int64(b, n); +} + +static inline void append_byte(BUFFER *b, UINT8 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64(b, n); +} + +static inline void append_int16(BUFFER *b, INT16 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_int64(b, n); +} + +static inline void append_uint16(BUFFER *b, UINT16 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64(b, n); +} + +static inline void append_int32(BUFFER *b, INT32 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_int64(b, n); +} + +static inline void append_uint32(BUFFER *b, UINT32 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64(b, n); +} + +// Function to append EVT_HANDLE to the buffer +static inline void append_evt_handle(BUFFER *b, EVT_HANDLE h, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64_hex(b, (uintptr_t)h); +} + +// Function to append XML data (UTF-16) to the buffer +static inline void append_evt_xml(BUFFER *b, LPCWSTR xmlData, const char *separator) { + append_utf16(b, xmlData, separator); // XML data is essentially UTF-16 string +} + +void evt_variant_to_buffer(BUFFER *b, EVT_VARIANT *ev, const char *separator) { + if(ev->Type == EvtVarTypeNull) return; + + if (ev->Type & EVT_VARIANT_TYPE_ARRAY) { + for (DWORD i = 0; i < ev->Count; i++) { + switch (ev->Type & EVT_VARIANT_TYPE_MASK) { + case EvtVarTypeString: + append_utf16(b, ev->StringArr[i], separator); + break; + + case EvtVarTypeAnsiString: + if (ev->AnsiStringArr[i] != NULL) { + append_utf16(b, (LPCWSTR)ev->AnsiStringArr[i], separator); + } + break; + + case EvtVarTypeSByte: + append_sbyte(b, ev->SByteArr[i], separator); + break; + + case EvtVarTypeByte: + append_byte(b, ev->ByteArr[i], separator); + break; + + case EvtVarTypeInt16: + append_int16(b, ev->Int16Arr[i], separator); + break; + + case EvtVarTypeUInt16: + append_uint16(b, ev->UInt16Arr[i], separator); + break; + + case EvtVarTypeInt32: + append_int32(b, ev->Int32Arr[i], separator); + break; + + case EvtVarTypeUInt32: + append_uint32(b, ev->UInt32Arr[i], separator); + break; + + case EvtVarTypeInt64: + append_int64(b, ev->Int64Arr[i], separator); + break; + + case EvtVarTypeUInt64: + append_uint64(b, ev->UInt64Arr[i], separator); + break; + + case EvtVarTypeSingle: + append_double(b, ev->SingleArr[i], separator); + break; + + case EvtVarTypeDouble: + append_double(b, ev->DoubleArr[i], separator); + break; + + case EvtVarTypeGuid: + append_guid(b, &ev->GuidArr[i], separator); + break; + + case EvtVarTypeFileTime: + append_filetime(b, &ev->FileTimeArr[i], separator); + break; + + case EvtVarTypeSysTime: + append_systime(b, &ev->SysTimeArr[i], separator); + break; + + case EvtVarTypeSid: + append_sid(b, ev->SidArr[i], separator); + break; + + case EvtVarTypeBinary: + append_binary(b, ev->BinaryVal, ev->Count, separator); + break; + + case EvtVarTypeSizeT: + append_size_t(b, ev->SizeTArr[i], separator); + break; + + case EvtVarTypeHexInt32: + append_uint32_hex(b, ev->UInt32Arr[i], separator); + break; + + case EvtVarTypeHexInt64: + append_uint64_hex(b, ev->UInt64Arr[i], separator); + break; + + case EvtVarTypeEvtHandle: + append_evt_handle(b, ev->EvtHandleVal, separator); + break; + + case EvtVarTypeEvtXml: + append_evt_xml(b, ev->XmlValArr[i], separator); + break; + + default: + // Skip unknown array types + break; + } + } + } else { + switch (ev->Type & EVT_VARIANT_TYPE_MASK) { + case EvtVarTypeNull: + // Do nothing for null types + break; + + case EvtVarTypeString: + append_utf16(b, ev->StringVal, separator); + break; + + case EvtVarTypeAnsiString: + append_utf16(b, (LPCWSTR)ev->AnsiStringVal, separator); + break; + + case EvtVarTypeSByte: + append_sbyte(b, ev->SByteVal, separator); + break; + + case EvtVarTypeByte: + append_byte(b, ev->ByteVal, separator); + break; + + case EvtVarTypeInt16: + append_int16(b, ev->Int16Val, separator); + break; + + case EvtVarTypeUInt16: + append_uint16(b, ev->UInt16Val, separator); + break; + + case EvtVarTypeInt32: + append_int32(b, ev->Int32Val, separator); + break; + + case EvtVarTypeUInt32: + append_uint32(b, ev->UInt32Val, separator); + break; + + case EvtVarTypeInt64: + append_int64(b, ev->Int64Val, separator); + break; + + case EvtVarTypeUInt64: + append_uint64(b, ev->UInt64Val, separator); + break; + + case EvtVarTypeSingle: + append_double(b, ev->SingleVal, separator); + break; + + case EvtVarTypeDouble: + append_double(b, ev->DoubleVal, separator); + break; + + case EvtVarTypeBoolean: + append_separator_if_needed(b, separator); + buffer_strcat(b, ev->BooleanVal ? "true" : "false"); + break; + + case EvtVarTypeGuid: + append_guid(b, ev->GuidVal, separator); + break; + + case EvtVarTypeBinary: + append_binary(b, ev->BinaryVal, ev->Count, separator); + break; + + case EvtVarTypeSizeT: + append_size_t(b, ev->SizeTVal, separator); + break; + + case EvtVarTypeHexInt32: + append_uint32_hex(b, ev->UInt32Val, separator); + break; + + case EvtVarTypeHexInt64: + append_uint64_hex(b, ev->UInt64Val, separator); + break; + + case EvtVarTypeEvtHandle: + append_evt_handle(b, ev->EvtHandleVal, separator); + break; + + case EvtVarTypeEvtXml: + append_evt_xml(b, ev->XmlVal, separator); + break; + + default: + // Skip unknown types + break; + } + } +} diff --git a/src/collectors/windows-events.plugin/windows-events-query.c b/src/collectors/windows-events.plugin/windows-events-query.c new file mode 100644 index 000000000..fefa72829 --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-query.c @@ -0,0 +1,717 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows-events.h" + +static void wevt_event_done(WEVT_LOG *log); + +static uint64_t wevt_log_file_size(const wchar_t *channel); + +// -------------------------------------------------------------------------------------------------------------------- + +static const char *EvtGetExtendedStatus_utf8(void) { + static __thread wchar_t wbuf[4096]; + static __thread char buf[4096]; + DWORD wbuf_used = 0; + + if(EvtGetExtendedStatus(sizeof(wbuf) / sizeof(wchar_t), wbuf, &wbuf_used) == ERROR_SUCCESS) { + wbuf[sizeof(wbuf) / sizeof(wchar_t) - 1] = 0; + unicode2utf8(buf, sizeof(buf), wbuf); + } + else + buf[0] = '\0'; + + // the EvtGetExtendedStatus() may be successful with an empty message + if(!buf[0]) + strncpyz(buf, "no additional information", sizeof(buf) - 1); + + return buf; +} + +// -------------------------------------------------------------------------------------------------------------------- + +bool EvtFormatMessage_utf16( + TXT_UTF16 *dst, EVT_HANDLE hMetadata, EVT_HANDLE hEvent, DWORD dwMessageId, EVT_FORMAT_MESSAGE_FLAGS flags) { + dst->used = 0; + + DWORD size = 0; + if(!dst->data) { + EvtFormatMessage(hMetadata, hEvent, dwMessageId, 0, NULL, flags, 0, NULL, &size); + if(!size) { + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtFormatMessage() to get message size failed."); + goto cleanup; + } + txt_utf16_resize(dst, size, false); + } + + // First, try to get the message using the existing buffer + if (!EvtFormatMessage(hMetadata, hEvent, dwMessageId, 0, NULL, flags, dst->size, dst->data, &size) || !dst->data) { + if (dst->data && GetLastError() != ERROR_INSUFFICIENT_BUFFER) { + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtFormatMessage() failed."); + goto cleanup; + } + + // Try again with the resized buffer + txt_utf16_resize(dst, size, false); + if (!EvtFormatMessage(hMetadata, hEvent, dwMessageId, 0, NULL, flags, dst->size, dst->data, &size)) { + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtFormatMessage() failed after resizing buffer."); + goto cleanup; + } + } + + // make sure it is null terminated + if(size <= dst->size) + dst->data[size - 1] = 0; + else + dst->data[dst->size - 1] = 0; + + // unfortunately we have to calculate the length every time + // the size returned may not be the length of the dst string + dst->used = wcslen(dst->data) + 1; + + return true; + +cleanup: + dst->used = 0; + return false; +} + +static bool EvtFormatMessage_utf8( + TXT_UTF16 *tmp, PROVIDER_META_HANDLE *p, EVT_HANDLE hEvent, + TXT_UTF8 *dst, EVT_FORMAT_MESSAGE_FLAGS flags) { + + dst->src = TXT_SOURCE_EVENT_LOG; + + if(EvtFormatMessage_utf16(tmp, provider_handle(p), hEvent, 0, flags)) + return txt_utf16_to_utf8(dst, tmp); + + txt_utf8_empty(dst); + return false; +} + +bool EvtFormatMessage_Event_utf8(TXT_UTF16 *tmp, PROVIDER_META_HANDLE *p, EVT_HANDLE hEvent, TXT_UTF8 *dst) { + return EvtFormatMessage_utf8(tmp, p, hEvent, dst, EvtFormatMessageEvent); +} + +bool EvtFormatMessage_Xml_utf8(TXT_UTF16 *tmp, PROVIDER_META_HANDLE *p, EVT_HANDLE hEvent, TXT_UTF8 *dst) { + return EvtFormatMessage_utf8(tmp, p, hEvent, dst, EvtFormatMessageXml); +} + +// -------------------------------------------------------------------------------------------------------------------- + +static void wevt_get_field_from_cache( + WEVT_LOG *log, uint64_t value, PROVIDER_META_HANDLE *h, + TXT_UTF8 *dst, const ND_UUID *provider, + WEVT_FIELD_TYPE cache_type, EVT_FORMAT_MESSAGE_FLAGS flags) { + + if (field_cache_get(cache_type, provider, value, dst)) + return; + + EvtFormatMessage_utf8(&log->ops.unicode, h, log->hEvent, dst, flags); + field_cache_set(cache_type, provider, value, dst); +} + +// -------------------------------------------------------------------------------------------------------------------- +// Level + +#define SET_LEN_AND_RETURN(constant) *len = sizeof(constant) - 1; return constant + +static inline const char *wevt_level_hardcoded(uint64_t level, size_t *len) { + switch(level) { + case WEVT_LEVEL_NONE: SET_LEN_AND_RETURN(WEVT_LEVEL_NAME_NONE); + case WEVT_LEVEL_CRITICAL: SET_LEN_AND_RETURN(WEVT_LEVEL_NAME_CRITICAL); + case WEVT_LEVEL_ERROR: SET_LEN_AND_RETURN(WEVT_LEVEL_NAME_ERROR); + case WEVT_LEVEL_WARNING: SET_LEN_AND_RETURN(WEVT_LEVEL_NAME_WARNING); + case WEVT_LEVEL_INFORMATION: SET_LEN_AND_RETURN(WEVT_LEVEL_NAME_INFORMATION); + case WEVT_LEVEL_VERBOSE: SET_LEN_AND_RETURN(WEVT_LEVEL_NAME_VERBOSE); + default: *len = 0; return NULL; + } +} + +static void wevt_get_level(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h) { + TXT_UTF8 *dst = &log->ops.level; + uint64_t value = ev->level; + + txt_utf8_empty(dst); + + EVT_FORMAT_MESSAGE_FLAGS flags = EvtFormatMessageLevel; + WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_LEVEL; + bool is_provider = is_valid_provider_level(value, true); + + if(!is_provider) { + size_t len; + const char *hardcoded = wevt_level_hardcoded(value, &len); + if(hardcoded) { + txt_utf8_set(dst, hardcoded, len); + dst->src = TXT_SOURCE_HARDCODED; + } + else { + // since this is not a provider value + // we expect to get the system description of it + wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags); + } + } + else if (!provider_get_level(dst, h, value)) { + // not found in the manifest, get it from the cache + wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags); + } + + txt_utf8_set_numeric_if_empty( + dst, WEVT_PREFIX_LEVEL, sizeof(WEVT_PREFIX_LEVEL) - 1, ev->level); +} + +// -------------------------------------------------------------------------------------------------------------------- +// Opcode + +static inline const char *wevt_opcode_hardcoded(uint64_t opcode, size_t *len) { + switch(opcode) { + case WEVT_OPCODE_INFO: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_INFO); + case WEVT_OPCODE_START: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_START); + case WEVT_OPCODE_STOP: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_STOP); + case WEVT_OPCODE_DC_START: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_DC_START); + case WEVT_OPCODE_DC_STOP: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_DC_STOP); + case WEVT_OPCODE_EXTENSION: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_EXTENSION); + case WEVT_OPCODE_REPLY: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_REPLY); + case WEVT_OPCODE_RESUME: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_RESUME); + case WEVT_OPCODE_SUSPEND: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_SUSPEND); + case WEVT_OPCODE_SEND: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_SEND); + case WEVT_OPCODE_RECEIVE: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_RECEIVE); + default: *len = 0; return NULL; + } +} + +static void wevt_get_opcode(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h) { + TXT_UTF8 *dst = &log->ops.opcode; + uint64_t value = ev->opcode; + + txt_utf8_empty(dst); + + EVT_FORMAT_MESSAGE_FLAGS flags = EvtFormatMessageOpcode; + WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_OPCODE; + bool is_provider = is_valid_provider_opcode(value, true); + + if(!is_provider) { + size_t len; + const char *hardcoded = wevt_opcode_hardcoded(value, &len); + if(hardcoded) { + txt_utf8_set(dst, hardcoded, len); + dst->src = TXT_SOURCE_HARDCODED; + } + else { + // since this is not a provider value + // we expect to get the system description of it + wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags); + } + } + else if (!provider_get_opcode(dst, h, value)) { + // not found in the manifest, get it from the cache + wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags); + } + + txt_utf8_set_numeric_if_empty( + dst, WEVT_PREFIX_OPCODE, sizeof(WEVT_PREFIX_OPCODE) - 1, ev->opcode); +} + +// -------------------------------------------------------------------------------------------------------------------- +// Task + +static const char *wevt_task_hardcoded(uint64_t task, size_t *len) { + switch(task) { + case WEVT_TASK_NONE: SET_LEN_AND_RETURN(WEVT_TASK_NAME_NONE); + default: *len = 0; return NULL; + } +} + +static void wevt_get_task(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h) { + TXT_UTF8 *dst = &log->ops.task; + uint64_t value = ev->task; + + txt_utf8_empty(dst); + + EVT_FORMAT_MESSAGE_FLAGS flags = EvtFormatMessageTask; + WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_TASK; + bool is_provider = is_valid_provider_task(value, true); + + if(!is_provider) { + size_t len; + const char *hardcoded = wevt_task_hardcoded(value, &len); + if(hardcoded) { + txt_utf8_set(dst, hardcoded, len); + dst->src = TXT_SOURCE_HARDCODED; + } + else { + // since this is not a provider value + // we expect to get the system description of it + wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags); + } + } + else if (!provider_get_task(dst, h, value)) { + // not found in the manifest, get it from the cache + wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags); + } + + txt_utf8_set_numeric_if_empty( + dst, WEVT_PREFIX_TASK, sizeof(WEVT_PREFIX_TASK) - 1, ev->task); +} + +// -------------------------------------------------------------------------------------------------------------------- +// Keyword + +#define SET_BITS(msk, txt) { .mask = msk, .name = txt, .len = sizeof(txt) - 1, } + +static uint64_t wevt_keyword_handle_reserved(uint64_t value, TXT_UTF8 *dst) { + struct { + uint64_t mask; + const char *name; + size_t len; + } bits[] = { + SET_BITS(WEVT_KEYWORD_EVENTLOG_CLASSIC, WEVT_KEYWORD_NAME_EVENTLOG_CLASSIC), + SET_BITS(WEVT_KEYWORD_CORRELATION_HINT, WEVT_KEYWORD_NAME_CORRELATION_HINT), + SET_BITS(WEVT_KEYWORD_AUDIT_SUCCESS, WEVT_KEYWORD_NAME_AUDIT_SUCCESS), + SET_BITS(WEVT_KEYWORD_AUDIT_FAILURE, WEVT_KEYWORD_NAME_AUDIT_FAILURE), + SET_BITS(WEVT_KEYWORD_SQM, WEVT_KEYWORD_NAME_SQM), + SET_BITS(WEVT_KEYWORD_WDI_DIAG, WEVT_KEYWORD_NAME_WDI_DIAG), + SET_BITS(WEVT_KEYWORD_WDI_CONTEXT, WEVT_KEYWORD_NAME_WDI_CONTEXT), + SET_BITS(WEVT_KEYWORD_RESPONSE_TIME, WEVT_KEYWORD_NAME_RESPONSE_TIME), + }; + + txt_utf8_empty(dst); + + for(size_t i = 0; i < sizeof(bits) / sizeof(bits[0]) ;i++) { + if((value & bits[i].mask) == bits[i].mask) { + txt_utf8_add_keywords_separator_if_needed(dst); + txt_utf8_append(dst, bits[i].name, bits[i].len); + value &= ~(bits[i].mask); + dst->src = TXT_SOURCE_HARDCODED; + } + } + + // return it without any remaining reserved bits + return value & 0x0000FFFFFFFFFFFF; +} + +static void wevt_get_keyword(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h) { + TXT_UTF8 *dst = &log->ops.keywords; + + if(ev->keywords == WEVT_KEYWORD_NONE) { + txt_utf8_set(dst, WEVT_KEYWORD_NAME_NONE, sizeof(WEVT_KEYWORD_NAME_NONE) - 1); + dst->src = TXT_SOURCE_HARDCODED; + } + + uint64_t value = wevt_keyword_handle_reserved(ev->keywords, dst); + + EVT_FORMAT_MESSAGE_FLAGS flags = EvtFormatMessageKeyword; + WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_KEYWORD; + + if(!value && dst->used <= 1) { + // no hardcoded info in the buffer, make it None + txt_utf8_set(dst, WEVT_KEYWORD_NAME_NONE, sizeof(WEVT_KEYWORD_NAME_NONE) - 1); + dst->src = TXT_SOURCE_HARDCODED; + } + else if (value && !provider_get_keywords(dst, h, value) && dst->used <= 1) { + // the provider did not provide any info and the description is still empty. + // the system returns 1 keyword, the highest bit, not a list + // so, when we call the system, we pass the original value (ev->keywords) + wevt_get_field_from_cache(log, ev->keywords, h, dst, &ev->provider, cache_type, flags); + } + + txt_utf8_set_hex_if_empty( + dst, WEVT_PREFIX_KEYWORDS, sizeof(WEVT_PREFIX_KEYWORDS) - 1, ev->keywords); +} + +// -------------------------------------------------------------------------------------------------------------------- +// Fetching Events + +static inline bool wEvtRender(WEVT_LOG *log, EVT_HANDLE context, WEVT_VARIANT *raw) { + DWORD bytes_used = 0, property_count = 0; + if (!EvtRender(context, log->hEvent, EvtRenderEventValues, raw->size, raw->data, &bytes_used, &property_count)) { + // information exceeds the allocated space + if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "EvtRender() failed, hRenderSystemContext: 0x%lx, hEvent: 0x%lx, content: 0x%lx, size: %u, extended info: %s", + (uintptr_t)context, (uintptr_t)log->hEvent, (uintptr_t)raw->data, raw->size, + EvtGetExtendedStatus_utf8()); + return false; + } + + wevt_variant_resize(raw, bytes_used); + if (!EvtRender(context, log->hEvent, EvtRenderEventValues, raw->size, raw->data, &bytes_used, &property_count)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "EvtRender() failed, after bytes_used increase, extended info: %s", + EvtGetExtendedStatus_utf8()); + return false; + } + } + raw->used = bytes_used; + raw->count = property_count; + + return true; +} + +static bool wevt_get_next_event_one(WEVT_LOG *log, WEVT_EVENT *ev) { + bool ret = false; + + if(!wEvtRender(log, log->hRenderSystemContext, &log->ops.raw.system)) + goto cleanup; + + EVT_VARIANT *content = log->ops.raw.system.data; + + ev->id = wevt_field_get_uint64(&content[EvtSystemEventRecordId]); + ev->event_id = wevt_field_get_uint16(&content[EvtSystemEventID]); + ev->level = wevt_field_get_uint8(&content[EvtSystemLevel]); + ev->opcode = wevt_field_get_uint8(&content[EvtSystemOpcode]); + ev->keywords = wevt_field_get_uint64_hex(&content[EvtSystemKeywords]); + ev->version = wevt_field_get_uint8(&content[EvtSystemVersion]); + ev->task = wevt_field_get_uint16(&content[EvtSystemTask]); + ev->qualifiers = wevt_field_get_uint16(&content[EvtSystemQualifiers]); + ev->process_id = wevt_field_get_uint32(&content[EvtSystemProcessID]); + ev->thread_id = wevt_field_get_uint32(&content[EvtSystemThreadID]); + ev->created_ns = wevt_field_get_filetime_to_ns(&content[EvtSystemTimeCreated]); + + if(log->type & WEVT_QUERY_EXTENDED) { + wevt_field_get_string_utf8(&content[EvtSystemChannel], &log->ops.channel); + wevt_field_get_string_utf8(&content[EvtSystemComputer], &log->ops.computer); + wevt_field_get_string_utf8(&content[EvtSystemProviderName], &log->ops.provider); + wevt_get_uuid_by_type(&content[EvtSystemProviderGuid], &ev->provider); + wevt_get_uuid_by_type(&content[EvtSystemActivityID], &ev->activity_id); + wevt_get_uuid_by_type(&content[EvtSystemRelatedActivityID], &ev->related_activity_id); + wevt_field_get_sid(&content[EvtSystemUserID], &log->ops.account, &log->ops.domain, &log->ops.sid); + + PROVIDER_META_HANDLE *p = log->provider = + provider_get(ev->provider, content[EvtSystemProviderName].StringVal); + + ev->platform = provider_get_platform(p); + + wevt_get_level(log, ev, p); + wevt_get_task(log, ev, p); + wevt_get_opcode(log, ev, p); + wevt_get_keyword(log, ev, p); + + if(log->type & WEVT_QUERY_EVENT_DATA && wEvtRender(log, log->hRenderUserContext, &log->ops.raw.user)) { +#if (ON_FTS_PRELOAD_MESSAGE == 1) + EvtFormatMessage_Event_utf8(&log->ops.unicode, log->provider, log->hEvent, &log->ops.event); +#endif +#if (ON_FTS_PRELOAD_XML == 1) + EvtFormatMessage_Xml_utf8(&log->ops.unicode, log->provider, log->hEvent, &log->ops.xml); +#endif +#if (ON_FTS_PRELOAD_EVENT_DATA == 1) + for(size_t i = 0; i < log->ops.raw.user.count ;i++) + evt_variant_to_buffer(log->ops.event_data, &log->ops.raw.user.data[i], " ||| "); +#endif + } + } + + ret = true; + +cleanup: + return ret; +} + +bool wevt_get_next_event(WEVT_LOG *log, WEVT_EVENT *ev) { + DWORD size = (log->type & WEVT_QUERY_EXTENDED) ? BATCH_NEXT_EVENT : 1; + DWORD max_failures = 10; + + fatal_assert(log && log->hQuery && log->hRenderSystemContext); + + while(max_failures > 0) { + if (log->batch.used >= log->batch.size) { + log->batch.size = 0; + log->batch.used = 0; + DWORD err; + if(!EvtNext(log->hQuery, size, log->batch.hEvents, INFINITE, 0, &log->batch.size)) { + err = GetLastError(); + if(err == ERROR_NO_MORE_ITEMS) + return false; // no data available, return failure + } + + if(!log->batch.size) { + if(size == 1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "EvtNext() failed, hQuery: 0x%lx, size: %zu, extended info: %s", + (uintptr_t)log->hQuery, (size_t)size, EvtGetExtendedStatus_utf8()); + return false; + } + + // EvtNext() returns true when it can full the array + // so, let's retry with a smaller array. + size /= 2; + if(size < 1) size = 1; + continue; + } + } + + log->query_stats.event_count++; + log->log_stats.event_count++; + + // cleanup any previous event data + wevt_event_done(log); + + log->hEvent = log->batch.hEvents[log->batch.used]; + log->batch.hEvents[log->batch.used] = NULL; + log->batch.used++; + + if(wevt_get_next_event_one(log, ev)) + return true; + else { + log->query_stats.failed_count++; + log->log_stats.failed_count++; + max_failures--; + } + } + + return false; +} + +static void wevt_event_done(WEVT_LOG *log) { + if (log->provider) { + provider_release(log->provider); + log->provider = NULL; + } + + if (log->hEvent) { + EvtClose(log->hEvent); + log->hEvent = NULL; + } + + log->ops.channel.src = TXT_SOURCE_UNKNOWN; + log->ops.provider.src = TXT_SOURCE_UNKNOWN; + log->ops.computer.src = TXT_SOURCE_UNKNOWN; + log->ops.account.src = TXT_SOURCE_UNKNOWN; + log->ops.domain.src = TXT_SOURCE_UNKNOWN; + log->ops.sid.src = TXT_SOURCE_UNKNOWN; + + log->ops.event.src = TXT_SOURCE_UNKNOWN; + log->ops.level.src = TXT_SOURCE_UNKNOWN; + log->ops.keywords.src = TXT_SOURCE_UNKNOWN; + log->ops.opcode.src = TXT_SOURCE_UNKNOWN; + log->ops.task.src = TXT_SOURCE_UNKNOWN; + log->ops.xml.src = TXT_SOURCE_UNKNOWN; + + log->ops.channel.used = 0; + log->ops.provider.used = 0; + log->ops.computer.used = 0; + log->ops.account.used = 0; + log->ops.domain.used = 0; + log->ops.sid.used = 0; + + log->ops.event.used = 0; + log->ops.level.used = 0; + log->ops.keywords.used = 0; + log->ops.opcode.used = 0; + log->ops.task.used = 0; + log->ops.xml.used = 0; + + if(log->ops.event_data) + log->ops.event_data->len = 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// Query management + +bool wevt_query(WEVT_LOG *log, LPCWSTR channel, LPCWSTR query, EVT_QUERY_FLAGS direction) { + wevt_query_done(log); + log->log_stats.queries_count++; + + EVT_HANDLE hQuery = EvtQuery(NULL, channel, query, EvtQueryChannelPath | (direction & (EvtQueryReverseDirection | EvtQueryForwardDirection)) | EvtQueryTolerateQueryErrors); + if (!hQuery) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() failed, query: %s | extended info: %s", + query2utf8(query), EvtGetExtendedStatus_utf8()); + + log->log_stats.queries_failed++; + return false; + } + + log->hQuery = hQuery; + return true; +} + +void wevt_query_done(WEVT_LOG *log) { + // close the last working hEvent + wevt_event_done(log); + + // close all batched hEvents + for(DWORD i = log->batch.used; i < log->batch.size ;i++) { + if(log->batch.hEvents[i]) + EvtClose(log->batch.hEvents[i]); + + log->batch.hEvents[i] = NULL; + } + log->batch.used = 0; + log->batch.size = 0; + + if (log->hQuery) { + EvtClose(log->hQuery); + log->hQuery = NULL; + } + + log->query_stats.event_count = 0; + log->query_stats.failed_count = 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// Log management + +WEVT_LOG *wevt_openlog6(WEVT_QUERY_TYPE type) { + WEVT_LOG *log = callocz(1, sizeof(*log)); + log->type = type; + + // create the system render + log->hRenderSystemContext = EvtCreateRenderContext(0, NULL, EvtRenderContextSystem); + if (!log->hRenderSystemContext) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "EvtCreateRenderContext() on system context failed, extended info: %s", + EvtGetExtendedStatus_utf8()); + goto cleanup; + } + + if(type & WEVT_QUERY_EVENT_DATA) { + log->hRenderUserContext = EvtCreateRenderContext(0, NULL, EvtRenderContextUser); + if (!log->hRenderUserContext) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "EvtCreateRenderContext failed, on user context failed, extended info: %s", + EvtGetExtendedStatus_utf8()); + goto cleanup; + } + + log->ops.event_data = buffer_create(4096, NULL); + } + + return log; + +cleanup: + wevt_closelog6(log); + return NULL; +} + +void wevt_closelog6(WEVT_LOG *log) { + wevt_query_done(log); + + if (log->hRenderSystemContext) + EvtClose(log->hRenderSystemContext); + + if (log->hRenderUserContext) + EvtClose(log->hRenderUserContext); + + wevt_variant_cleanup(&log->ops.raw.system); + wevt_variant_cleanup(&log->ops.raw.user); + txt_utf16_cleanup(&log->ops.unicode); + txt_utf8_cleanup(&log->ops.channel); + txt_utf8_cleanup(&log->ops.provider); + txt_utf8_cleanup(&log->ops.computer); + txt_utf8_cleanup(&log->ops.account); + txt_utf8_cleanup(&log->ops.domain); + txt_utf8_cleanup(&log->ops.sid); + + txt_utf8_cleanup(&log->ops.event); + txt_utf8_cleanup(&log->ops.level); + txt_utf8_cleanup(&log->ops.keywords); + txt_utf8_cleanup(&log->ops.opcode); + txt_utf8_cleanup(&log->ops.task); + txt_utf8_cleanup(&log->ops.xml); + + buffer_free(log->ops.event_data); + + freez(log); +} + +// -------------------------------------------------------------------------------------------------------------------- +// Retention + +bool wevt_channel_retention(WEVT_LOG *log, const wchar_t *channel, const wchar_t *query, EVT_RETENTION *retention) { + bool ret = false; + + // get the number of the oldest record in the log + // "EvtGetLogInfo()" does not work properly with "EvtLogOldestRecordNumber" + // we have to get it from the first EventRecordID + + // query the eventlog + log->hQuery = EvtQuery(NULL, channel, query, EvtQueryChannelPath | EvtQueryForwardDirection | EvtQueryTolerateQueryErrors); + if (!log->hQuery) { + if (GetLastError() == ERROR_EVT_CHANNEL_NOT_FOUND) + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() for retention failed, channel '%s' not found, cannot get retention, extended info: %s", + channel2utf8(channel), EvtGetExtendedStatus_utf8()); + else + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() for retention on channel '%s' failed, cannot get retention, extended info: %s", + channel2utf8(channel), EvtGetExtendedStatus_utf8()); + + goto cleanup; + } + + if (!wevt_get_next_event(log, &retention->first_event)) + goto cleanup; + + if (!retention->first_event.id) { + // no data in the event log + retention->first_event = retention->last_event = WEVT_EVENT_EMPTY; + ret = true; + goto cleanup; + } + EvtClose(log->hQuery); + + log->hQuery = EvtQuery(NULL, channel, query, EvtQueryChannelPath | EvtQueryReverseDirection | EvtQueryTolerateQueryErrors); + if (!log->hQuery) { + if (GetLastError() == ERROR_EVT_CHANNEL_NOT_FOUND) + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() for retention failed, channel '%s' not found, extended info: %s", + channel2utf8(channel), EvtGetExtendedStatus_utf8()); + else + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() for retention on channel '%s' failed, extended info: %s", + channel2utf8(channel), EvtGetExtendedStatus_utf8()); + + goto cleanup; + } + + if (!wevt_get_next_event(log, &retention->last_event) || retention->last_event.id == 0) { + // no data in eventlog + retention->last_event = retention->first_event; + } + retention->last_event.id += 1; // we should read the last record + ret = true; + +cleanup: + wevt_query_done(log); + + if(ret) { + retention->entries = (channel && !query) ? retention->last_event.id - retention->first_event.id : 0; + + if(retention->last_event.created_ns >= retention->first_event.created_ns) + retention->duration_ns = retention->last_event.created_ns - retention->first_event.created_ns; + else + retention->duration_ns = retention->first_event.created_ns - retention->last_event.created_ns; + + retention->size_bytes = wevt_log_file_size(channel); + } + else + memset(retention, 0, sizeof(*retention)); + + return ret; +} + +static uint64_t wevt_log_file_size(const wchar_t *channel) { + EVT_HANDLE hLog = NULL; + EVT_VARIANT evtVariant; + DWORD bufferUsed = 0; + uint64_t file_size = 0; + + // Open the event log channel + hLog = EvtOpenLog(NULL, channel, EvtOpenChannelPath); + if (!hLog) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtOpenLog() on channel '%s' failed, extended info: %s", + channel2utf8(channel), EvtGetExtendedStatus_utf8()); + goto cleanup; + } + + // Get the file size of the log + if (!EvtGetLogInfo(hLog, EvtLogFileSize, sizeof(evtVariant), &evtVariant, &bufferUsed)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetLogInfo() on channel '%s' failed, extended info: %s", + channel2utf8(channel), EvtGetExtendedStatus_utf8()); + goto cleanup; + } + + // Extract the file size from the EVT_VARIANT structure + file_size = evtVariant.UInt64Val; + +cleanup: + if (hLog) + EvtClose(hLog); + + return file_size; +} diff --git a/src/collectors/windows-events.plugin/windows-events-query.h b/src/collectors/windows-events.plugin/windows-events-query.h new file mode 100644 index 000000000..3136b23df --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-query.h @@ -0,0 +1,296 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WINDOWS_EVENTS_QUERY_H +#define NETDATA_WINDOWS_EVENTS_QUERY_H + +#include "libnetdata/libnetdata.h" +#include "windows-events.h" + +#define BATCH_NEXT_EVENT 500 + +typedef struct wevt_event { + uint64_t id; // EventRecordId (unique and sequential per channel) + uint8_t version; + uint8_t level; // The severity of event + uint8_t opcode; // we receive this as 8bit, but providers use 32bit + uint16_t event_id; // This is the template that defines the message to be shown + uint16_t task; + uint16_t qualifiers; + uint32_t process_id; + uint32_t thread_id; + uint64_t keywords; // Categorization of the event + ND_UUID provider; + ND_UUID activity_id; + ND_UUID related_activity_id; + nsec_t created_ns; + WEVT_PROVIDER_PLATFORM platform; +} WEVT_EVENT; + +#define WEVT_EVENT_EMPTY (WEVT_EVENT){ .id = 0, .created_ns = 0, } + +typedef struct { + EVT_VARIANT *data; + DWORD size; + DWORD used; + DWORD count; +} WEVT_VARIANT; + +typedef struct { + WEVT_EVENT first_event; + WEVT_EVENT last_event; + + uint64_t entries; + nsec_t duration_ns; + uint64_t size_bytes; +} EVT_RETENTION; + +struct provider_meta_handle; + +typedef enum __attribute__((packed)) { + WEVT_QUERY_BASIC = (1 << 0), + WEVT_QUERY_EXTENDED = (1 << 1), + WEVT_QUERY_EVENT_DATA = (1 << 2), +} WEVT_QUERY_TYPE; + +#define WEVT_QUERY_RETENTION WEVT_QUERY_BASIC +#define WEVT_QUERY_NORMAL (WEVT_QUERY_BASIC | WEVT_QUERY_EXTENDED) +#define WEVT_QUERY_FTS (WEVT_QUERY_BASIC | WEVT_QUERY_EXTENDED | WEVT_QUERY_EVENT_DATA) + +typedef struct wevt_log { + struct { + DWORD size; + DWORD used; + EVT_HANDLE hEvents[BATCH_NEXT_EVENT]; + } batch; + + EVT_HANDLE hEvent; + EVT_HANDLE hQuery; + EVT_HANDLE hRenderSystemContext; + EVT_HANDLE hRenderUserContext; + struct provider_meta_handle *provider; + + WEVT_QUERY_TYPE type; + + struct { + struct { + // temp buffer used for rendering event log messages + // never use directly + WEVT_VARIANT system; + WEVT_VARIANT user; + } raw; + + // temp buffer used for fetching and converting UNICODE and UTF-8 + // every string operation overwrites it, multiple times per event log entry + // it can be used within any function, for its own purposes, + // but never share between functions + TXT_UTF16 unicode; + + // string attributes of the current event log entry + // valid until another event if fetched + + // IMPORTANT: + // EVERY FIELD NEEDS ITS OWN BUFFER! + // the way facets work, all the field value pointers need to be valid + // until the entire row closes, so reusing a buffer for the same field + // actually copies the same value to all fields using the same buffer. + + TXT_UTF8 channel; + TXT_UTF8 provider; + TXT_UTF8 computer; + TXT_UTF8 account; + TXT_UTF8 domain; + TXT_UTF8 sid; + + TXT_UTF8 event; // the message to be shown to the user + TXT_UTF8 level; + TXT_UTF8 keywords; + TXT_UTF8 opcode; + TXT_UTF8 task; + TXT_UTF8 xml; + + BUFFER *event_data; + } ops; + + struct { + size_t event_count; + size_t failed_count; + } query_stats; + + struct { + size_t queries_count; + size_t queries_failed; + + size_t event_count; + size_t failed_count; + } log_stats; + +} WEVT_LOG; + +WEVT_LOG *wevt_openlog6(WEVT_QUERY_TYPE type); +void wevt_closelog6(WEVT_LOG *log); + +bool wevt_channel_retention(WEVT_LOG *log, const wchar_t *channel, const wchar_t *query, EVT_RETENTION *retention); + +bool wevt_query(WEVT_LOG *log, LPCWSTR channel, LPCWSTR query, EVT_QUERY_FLAGS direction); +void wevt_query_done(WEVT_LOG *log); + +bool wevt_get_next_event(WEVT_LOG *log, WEVT_EVENT *ev); + +bool EvtFormatMessage_utf16( + TXT_UTF16 *dst, EVT_HANDLE hMetadata, EVT_HANDLE hEvent, DWORD dwMessageId, EVT_FORMAT_MESSAGE_FLAGS flags); + +bool EvtFormatMessage_Event_utf8(TXT_UTF16 *tmp, struct provider_meta_handle *p, EVT_HANDLE hEvent, TXT_UTF8 *dst); +bool EvtFormatMessage_Xml_utf8(TXT_UTF16 *tmp, struct provider_meta_handle *p, EVT_HANDLE hEvent, TXT_UTF8 *dst); + +void evt_variant_to_buffer(BUFFER *b, EVT_VARIANT *ev, const char *separator); + +static inline void wevt_variant_cleanup(WEVT_VARIANT *v) { + freez(v->data); +} + +static inline void wevt_variant_resize(WEVT_VARIANT *v, size_t required_size) { + if(required_size < v->size) + return; + + wevt_variant_cleanup(v); + v->size = txt_compute_new_size(v->size, required_size); + v->data = mallocz(v->size); +} + +static inline void wevt_variant_count_from_used(WEVT_VARIANT *v) { + v->count = v->used / sizeof(*v->data); +} + +static inline uint8_t wevt_field_get_uint8(EVT_VARIANT *ev) { + if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) + return 0; + + fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeByte); + return ev->ByteVal; +} + +static inline uint16_t wevt_field_get_uint16(EVT_VARIANT *ev) { + if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) + return 0; + + fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeUInt16); + return ev->UInt16Val; +} + +static inline uint32_t wevt_field_get_uint32(EVT_VARIANT *ev) { + if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) + return 0; + + fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeUInt32); + return ev->UInt32Val; +} + +static inline uint64_t wevt_field_get_uint64(EVT_VARIANT *ev) { + if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) + return 0; + + fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeUInt64); + return ev->UInt64Val; +} + +static inline uint64_t wevt_field_get_uint64_hex(EVT_VARIANT *ev) { + if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) + return 0; + + fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeHexInt64); + return ev->UInt64Val; +} + +static inline bool wevt_field_get_string_utf8(EVT_VARIANT *ev, TXT_UTF8 *dst) { + if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) { + txt_utf8_empty(dst); + return false; + } + + fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeString); + return wchar_to_txt_utf8(dst, ev->StringVal, -1); +} + +bool cached_sid_to_account_domain_sidstr(PSID sid, TXT_UTF8 *dst_account, TXT_UTF8 *dst_domain, TXT_UTF8 *dst_sid_str); +static inline bool wevt_field_get_sid(EVT_VARIANT *ev, TXT_UTF8 *dst_account, TXT_UTF8 *dst_domain, TXT_UTF8 *dst_sid_str) { + if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) { + txt_utf8_empty(dst_account); + txt_utf8_empty(dst_domain); + txt_utf8_empty(dst_sid_str); + return false; + } + + fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeSid); + return cached_sid_to_account_domain_sidstr(ev->SidVal, dst_account, dst_domain, dst_sid_str); +} + +static inline uint64_t wevt_field_get_filetime_to_ns(EVT_VARIANT *ev) { + if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) + return 0; + + fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeFileTime); + return os_windows_ulonglong_to_unix_epoch_ns(ev->FileTimeVal); +} + +static inline bool wevt_GUID_to_ND_UUID(ND_UUID *nd_uuid, const GUID *guid) { + if(guid && sizeof(GUID) == sizeof(ND_UUID)) { + memcpy(nd_uuid->uuid, guid, sizeof(ND_UUID)); + return true; + } + else { + *nd_uuid = UUID_ZERO; + return false; + } +} + +static inline bool wevt_get_uuid_by_type(EVT_VARIANT *ev, ND_UUID *dst) { + if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) { + wevt_GUID_to_ND_UUID(dst, NULL); + return false; + } + + fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeGuid); + return wevt_GUID_to_ND_UUID(dst, ev->GuidVal); +} + +// https://learn.microsoft.com/en-us/windows/win32/wes/defining-severity-levels +static inline bool is_valid_provider_level(uint64_t level, bool strict) { + if(strict) + // when checking if the name is provider independent + return level >= 16 && level <= 255; + else + // when checking acceptable values in provider manifests + return level <= 255; +} + +// https://learn.microsoft.com/en-us/windows/win32/wes/defining-tasks-and-opcodes +static inline bool is_valid_provider_opcode(uint64_t opcode, bool strict) { + if(strict) + // when checking if the name is provider independent + return opcode >= 10 && opcode <= 239; + else + // when checking acceptable values in provider manifests + return opcode <= 255; +} + +// https://learn.microsoft.com/en-us/windows/win32/wes/defining-tasks-and-opcodes +static inline bool is_valid_provider_task(uint64_t task, bool strict) { + if(strict) + // when checking if the name is provider independent + return task > 0 && task <= 0xFFFF; + else + // when checking acceptable values in provider manifests + return task <= 0xFFFF; +} + +// https://learn.microsoft.com/en-us/windows/win32/wes/defining-keywords-used-to-classify-types-of-events +static inline bool is_valid_provider_keyword(uint64_t keyword, bool strict) { + if(strict) + // when checking if the name is provider independent + return keyword > 0 && keyword <= 0x0000FFFFFFFFFFFF; + else + // when checking acceptable values in provider manifests + return true; +} + +#endif //NETDATA_WINDOWS_EVENTS_QUERY_H diff --git a/src/collectors/windows-events.plugin/windows-events-sources.c b/src/collectors/windows-events.plugin/windows-events-sources.c new file mode 100644 index 000000000..b931ed059 --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-sources.c @@ -0,0 +1,644 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows-events.h" + +//struct { +// const char *name; +// const wchar_t *query; +//} custom_queries[] = { +// { +// .name = "All-Administrative-Events", +// .query = L"\n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// "", +// }, +// { +// .name = "All-Remote-Desktop-Services", +// .query = L"\n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// " \n" +// "", +// }, +// { +// .name = "All-Security-SPP", +// .query = L"\n" +// " \n" +// " \n" +// " \n" +// "", +// } +//}; + +ENUM_STR_MAP_DEFINE(WEVT_SOURCE_TYPE) = { + { .id = WEVTS_ALL, .name = WEVT_SOURCE_ALL_NAME }, + { .id = WEVTS_ADMIN, .name = WEVT_SOURCE_ALL_ADMIN_NAME }, + { .id = WEVTS_OPERATIONAL, .name = WEVT_SOURCE_ALL_OPERATIONAL_NAME }, + { .id = WEVTS_ANALYTIC, .name = WEVT_SOURCE_ALL_ANALYTIC_NAME }, + { .id = WEVTS_DEBUG, .name = WEVT_SOURCE_ALL_DEBUG_NAME }, + { .id = WEVTS_WINDOWS, .name = WEVT_SOURCE_ALL_WINDOWS_NAME }, + { .id = WEVTS_ENABLED, .name = WEVT_SOURCE_ALL_ENABLED_NAME }, + { .id = WEVTS_DISABLED, .name = WEVT_SOURCE_ALL_DISABLED_NAME }, + { .id = WEVTS_FORWARDED, .name = WEVT_SOURCE_ALL_FORWARDED_NAME }, + { .id = WEVTS_CLASSIC, .name = WEVT_SOURCE_ALL_CLASSIC_NAME }, + { .id = WEVTS_BACKUP_MODE, .name = WEVT_SOURCE_ALL_BACKUP_MODE_NAME }, + { .id = WEVTS_OVERWRITE_MODE, .name = WEVT_SOURCE_ALL_OVERWRITE_MODE_NAME }, + { .id = WEVTS_STOP_WHEN_FULL_MODE, .name = WEVT_SOURCE_ALL_STOP_WHEN_FULL_MODE_NAME }, + { .id = WEVTS_RETAIN_AND_BACKUP_MODE, .name = WEVT_SOURCE_ALL_RETAIN_AND_BACKUP_MODE_NAME }, + + // terminator + { . id = 0, .name = NULL } +}; + +BITMAP_STR_DEFINE_FUNCTIONS(WEVT_SOURCE_TYPE, WEVTS_NONE, ""); + +DICTIONARY *wevt_sources = NULL; +DICTIONARY *used_hashes_registry = NULL; +static usec_t wevt_session = 0; + +void wevt_sources_del_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + LOGS_QUERY_SOURCE *src = value; + freez((void *)src->fullname); + string_freez(src->source); + + src->fullname = NULL; + src->source = NULL; +} + +static bool wevt_sources_conflict_cb(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { + LOGS_QUERY_SOURCE *src_old = old_value; + LOGS_QUERY_SOURCE *src_new = new_value; + + bool ret = false; + if(src_new->last_scan_monotonic_ut > src_old->last_scan_monotonic_ut) { + src_old->last_scan_monotonic_ut = src_new->last_scan_monotonic_ut; + + if (src_old->source != src_new->source) { + string_freez(src_old->source); + src_old->source = src_new->source; + src_new->source = NULL; + } + src_old->source_type = src_new->source_type; + + src_old->msg_first_ut = src_new->msg_first_ut; + src_old->msg_last_ut = src_new->msg_last_ut; + src_old->msg_first_id = src_new->msg_first_id; + src_old->msg_last_id = src_new->msg_last_id; + src_old->entries = src_new->entries; + src_old->size = src_new->size; + + ret = true; + } + + freez((void *)src_new->fullname); + string_freez(src_new->source); + src_new->fullname = NULL; + src_new->source = NULL; + + return ret; +} + +void wevt_sources_init(void) { + wevt_session = now_realtime_usec(); + + used_hashes_registry = dictionary_create(DICT_OPTION_DONT_OVERWRITE_VALUE); + + wevt_sources = dictionary_create_advanced(DICT_OPTION_FIXED_SIZE | DICT_OPTION_DONT_OVERWRITE_VALUE, + NULL, sizeof(LOGS_QUERY_SOURCE)); + + dictionary_register_delete_callback(wevt_sources, wevt_sources_del_cb, NULL); + dictionary_register_conflict_callback(wevt_sources, wevt_sources_conflict_cb, NULL); +} + +void buffer_json_wevt_versions(BUFFER *wb __maybe_unused) { + buffer_json_member_add_object(wb, "versions"); + { + buffer_json_member_add_uint64(wb, "sources", + wevt_session + dictionary_version(wevt_sources)); + } + buffer_json_object_close(wb); +} + +// -------------------------------------------------------------------------------------------------------------------- + +int wevt_sources_dict_items_backward_compar(const void *a, const void *b) { + const DICTIONARY_ITEM **da = (const DICTIONARY_ITEM **)a, **db = (const DICTIONARY_ITEM **)b; + LOGS_QUERY_SOURCE *sa = dictionary_acquired_item_value(*da); + LOGS_QUERY_SOURCE *sb = dictionary_acquired_item_value(*db); + + // compare the last message timestamps + if(sa->msg_last_ut < sb->msg_last_ut) + return 1; + + if(sa->msg_last_ut > sb->msg_last_ut) + return -1; + + // compare the first message timestamps + if(sa->msg_first_ut < sb->msg_first_ut) + return 1; + + if(sa->msg_first_ut > sb->msg_first_ut) + return -1; + + return 0; +} + +int wevt_sources_dict_items_forward_compar(const void *a, const void *b) { + return -wevt_sources_dict_items_backward_compar(a, b); +} + +// -------------------------------------------------------------------------------------------------------------------- + +typedef enum { + wevt_source_type_internal, + wevt_source_type_provider, + wevt_source_type_channel, +} wevt_source_type; + +struct wevt_source { + wevt_source_type type; + usec_t first_ut; + usec_t last_ut; + size_t count; + size_t entries; + uint64_t size; +}; + +static int wevt_source_to_json_array_cb(const DICTIONARY_ITEM *item, void *entry, void *data) { + const struct wevt_source *s = entry; + BUFFER *wb = data; + + const char *name = dictionary_acquired_item_name(item); + + if(s->count == 1 && strncmp(name, WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX, sizeof(WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX) - 1) == 0) + // do not include "All-Of-X" when there is only 1 channel + return 0; + + bool default_selected = (s->type == wevt_source_type_channel); + if(default_selected && (strcmp(name, "NetdataWEL") == 0 || strcmp(name, "Netdata/Access") == 0)) + // do not select Netdata Access logs by default + default_selected = false; + + buffer_json_add_array_item_object(wb); + { + char size_for_humans[128]; + size_snprintf(size_for_humans, sizeof(size_for_humans), s->size, "B", false); + + char duration_for_humans[128]; + duration_snprintf(duration_for_humans, sizeof(duration_for_humans), + (time_t)((s->last_ut - s->first_ut) / USEC_PER_SEC), "s", true); + + char entries_for_humans[128]; + entries_snprintf(entries_for_humans, sizeof(entries_for_humans), s->entries, "", false); + + char info[1024]; + snprintfz(info, sizeof(info), "%zu channel%s, with a total size of %s, covering %s%s%s%s", + s->count, s->count > 1 ? "s":"", size_for_humans, duration_for_humans, + s->entries ? ", having " : "", s->entries ? entries_for_humans : "", s->entries ? " entries" : ""); + + buffer_json_member_add_string(wb, "id", name); + buffer_json_member_add_string(wb, "name", name); + buffer_json_member_add_string(wb, "pill", size_for_humans); + buffer_json_member_add_string(wb, "info", info); + buffer_json_member_add_boolean(wb, "default_selected", default_selected); + } + buffer_json_object_close(wb); // options object + + return 1; +} + +static bool wevt_source_merge_sizes(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value , void *data __maybe_unused) { + struct wevt_source *old_v = old_value; + const struct wevt_source *new_v = new_value; + + old_v->count += new_v->count; + old_v->size += new_v->size; + old_v->entries += new_v->entries; + + if(new_v->first_ut && new_v->first_ut < old_v->first_ut) + old_v->first_ut = new_v->first_ut; + + if(new_v->last_ut && new_v->last_ut > old_v->last_ut) + old_v->last_ut = new_v->last_ut; + + return false; +} + +void wevt_sources_to_json_array(BUFFER *wb) { + DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_NAME_LINK_DONT_CLONE|DICT_OPTION_DONT_OVERWRITE_VALUE); + dictionary_register_conflict_callback(dict, wevt_source_merge_sizes, NULL); + + struct wevt_source t = { 0 }; + + LOGS_QUERY_SOURCE *src; + dfe_start_read(wevt_sources, src) { + t.first_ut = src->msg_first_ut; + t.last_ut = src->msg_last_ut; + t.count = 1; + t.size = src->size; + t.entries = src->entries; + + src->source_type |= WEVTS_ALL; + t.type = wevt_source_type_internal; + for(size_t i = 0; WEVT_SOURCE_TYPE_names[i].name ;i++) { + if(src->source_type & WEVT_SOURCE_TYPE_names[i].id) + dictionary_set(dict, WEVT_SOURCE_TYPE_names[i].name, &t, sizeof(t)); + } + + if(src->provider) { + t.type = wevt_source_type_provider; + dictionary_set(dict, string2str(src->provider), &t, sizeof(t)); + } + + if(src->source) { + t.type = wevt_source_type_channel; + dictionary_set(dict, string2str(src->source), &t, sizeof(t)); + } + } + dfe_done(jf); + + dictionary_sorted_walkthrough_read(dict, wevt_source_to_json_array_cb, wb); +} + +static bool ndEvtGetChannelConfigProperty(EVT_HANDLE hChannelConfig, WEVT_VARIANT *pr, EVT_CHANNEL_CONFIG_PROPERTY_ID id) { + if (!EvtGetChannelConfigProperty(hChannelConfig, id, 0, pr->size, pr->data, &pr->used)) { + DWORD status = GetLastError(); + if (ERROR_INSUFFICIENT_BUFFER == status) { + wevt_variant_resize(pr, pr->used); + if(!EvtGetChannelConfigProperty(hChannelConfig, id, 0, pr->size, pr->data, &pr->used)) { + pr->used = 0; + pr->count = 0; + return false; + } + } + } + + wevt_variant_count_from_used(pr); + return true; +} + +WEVT_SOURCE_TYPE categorize_channel(const wchar_t *channel_path, const char **provider, WEVT_VARIANT *property) { + EVT_HANDLE hChannelConfig = NULL; + WEVT_SOURCE_TYPE result = WEVTS_ALL; + + // Open the channel configuration + hChannelConfig = EvtOpenChannelConfig(NULL, channel_path, 0); + if (!hChannelConfig) + goto cleanup; + + if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelConfigType) & + property->count && + property->data[0].Type == EvtVarTypeUInt32) { + switch (property->data[0].UInt32Val) { + case EvtChannelTypeAdmin: + result |= WEVTS_ADMIN; + break; + + case EvtChannelTypeOperational: + result |= WEVTS_OPERATIONAL; + break; + + case EvtChannelTypeAnalytic: + result |= WEVTS_ANALYTIC; + break; + + case EvtChannelTypeDebug: + result |= WEVTS_DEBUG; + break; + + default: + break; + } + } + + if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelConfigClassicEventlog) && + property->count && + property->data[0].Type == EvtVarTypeBoolean && + property->data[0].BooleanVal) + result |= WEVTS_CLASSIC; + + if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelConfigOwningPublisher) && + property->count && + property->data[0].Type == EvtVarTypeString) { + *provider = provider2utf8(property->data[0].StringVal); + if(wcscasecmp(property->data[0].StringVal, L"Microsoft-Windows-EventCollector") == 0) + result |= WEVTS_FORWARDED; + } + else + *provider = NULL; + + if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelConfigEnabled) && + property->count && + property->data[0].Type == EvtVarTypeBoolean) { + if(property->data[0].BooleanVal) + result |= WEVTS_ENABLED; + else + result |= WEVTS_DISABLED; + } + + bool got_retention = false; + bool retained = false; + if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelLoggingConfigRetention) && + property->count && + property->data[0].Type == EvtVarTypeBoolean) { + got_retention = true; + retained = property->data[0].BooleanVal; + } + + bool got_auto_backup = false; + bool auto_backup = false; + if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelLoggingConfigAutoBackup) && + property->count && + property->data[0].Type == EvtVarTypeBoolean) { + got_auto_backup = true; + auto_backup = property->data[0].BooleanVal; + } + + if(got_retention && got_auto_backup) { + if(!retained) { + if(auto_backup) + result |= WEVTS_BACKUP_MODE; + else + result |= WEVTS_OVERWRITE_MODE; + } + else { + if(auto_backup) + result |= WEVTS_STOP_WHEN_FULL_MODE; + else + result |= WEVTS_RETAIN_AND_BACKUP_MODE; + } + } + +cleanup: + if (hChannelConfig) + EvtClose(hChannelConfig); + + return result; +} + +void wevt_sources_scan(void) { + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + LPWSTR channel = NULL; + EVT_HANDLE hChannelEnum = NULL; + + if(spinlock_trylock(&spinlock)) { + const usec_t started_ut = now_monotonic_usec(); + + WEVT_VARIANT property = { 0 }; + DWORD dwChannelBufferSize = 0; + DWORD dwChannelBufferUsed = 0; + DWORD status = ERROR_SUCCESS; + + // Open a handle to enumerate the event channels + hChannelEnum = EvtOpenChannelEnum(NULL, 0); + if (!hChannelEnum) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "WINDOWS EVENTS: EvtOpenChannelEnum() failed with %" PRIu64 "\n", + (uint64_t)GetLastError()); + goto cleanup; + } + + WEVT_LOG *log = wevt_openlog6(WEVT_QUERY_RETENTION); + if(!log) goto cleanup; + + while (true) { + if (!EvtNextChannelPath(hChannelEnum, dwChannelBufferSize, channel, &dwChannelBufferUsed)) { + status = GetLastError(); + if (status == ERROR_NO_MORE_ITEMS) + break; // No more channels + else if (status == ERROR_INSUFFICIENT_BUFFER) { + dwChannelBufferSize = dwChannelBufferUsed; + freez(channel); + channel = mallocz(dwChannelBufferSize * sizeof(WCHAR)); + continue; + } else { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS EVENTS: EvtNextChannelPath() failed\n"); + break; + } + } + + EVT_RETENTION retention; + if(!wevt_channel_retention(log, channel, NULL, &retention)) + continue; + + LOGS_QUERY_SOURCE *found = dictionary_get(wevt_sources, channel2utf8(channel)); + if(found) { + // we just need to update its retention + + found->last_scan_monotonic_ut = now_monotonic_usec(); + found->msg_first_id = retention.first_event.id; + found->msg_last_id = retention.last_event.id; + found->msg_first_ut = retention.first_event.created_ns / NSEC_PER_USEC; + found->msg_last_ut = retention.last_event.created_ns / NSEC_PER_USEC; + found->size = retention.size_bytes; + continue; + } + + const char *name = channel2utf8(channel); + const char *fullname = strdupz(name); + const char *provider; + + WEVT_SOURCE_TYPE sources = categorize_channel(channel, &provider, &property); + char *slash = strchr(name, '/'); + if(slash) *slash = '\0'; + + if(strcasecmp(name, "Application") == 0) + sources |= WEVTS_WINDOWS; + if(strcasecmp(name, "Security") == 0) + sources |= WEVTS_WINDOWS; + if(strcasecmp(name, "Setup") == 0) + sources |= WEVTS_WINDOWS; + if(strcasecmp(name, "System") == 0) + sources |= WEVTS_WINDOWS; + + LOGS_QUERY_SOURCE src = { + .entries = retention.entries, + .fullname = fullname, + .fullname_len = strlen(fullname), + .last_scan_monotonic_ut = now_monotonic_usec(), + .msg_first_id = retention.first_event.id, + .msg_last_id = retention.last_event.id, + .msg_first_ut = retention.first_event.created_ns / NSEC_PER_USEC, + .msg_last_ut = retention.last_event.created_ns / NSEC_PER_USEC, + .size = retention.size_bytes, + .source_type = sources, + .source = string_strdupz(fullname), + }; + + if(strncmp(fullname, "Netdata", 7) == 0) + // WEL based providers of Netdata are named NetdataX + provider = "Netdata"; + + if(provider && *provider) { + char buf[sizeof(WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX) + strlen(provider)]; // sizeof() includes terminator + snprintf(buf, sizeof(buf), WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX "%s", provider); + + if(trim_all(buf) != NULL) { + for (size_t i = 0; i < sizeof(buf) - 1; i++) { + // remove character that may interfere with our parsing + if (isspace((uint8_t) buf[i]) || buf[i] == '%' || buf[i] == '+' || buf[i] == '|' || buf[i] == ':') + buf[i] = '_'; + } + src.provider = string_strdupz(buf); + } + } + + dictionary_set(wevt_sources, src.fullname, &src, sizeof(src)); + } + +// // add custom queries +// for(size_t i = 0; i < sizeof(custom_queries) / sizeof(custom_queries[0]) ;i++) { +// EVT_RETENTION retention; +// if(!wevt_channel_retention(log, NULL, custom_queries[i].query, &retention)) +// continue; +// +// LOGS_QUERY_SOURCE src = { +// .entries = 0, +// .fullname = strdupz(custom_queries[i].name), +// .fullname_len = strlen(custom_queries[i].name), +// .last_scan_monotonic_ut = now_monotonic_usec(), +// .msg_first_id = retention.first_event.id, +// .msg_last_id = retention.last_event.id, +// .msg_first_ut = retention.first_event.created_ns / NSEC_PER_USEC, +// .msg_last_ut = retention.last_event.created_ns / NSEC_PER_USEC, +// .size = retention.size_bytes, +// .source_type = WEVTS_ALL, +// .source = string_strdupz(custom_queries[i].name), +// }; +// +// dictionary_set(wevt_sources, src.fullname, &src, sizeof(src)); +// } +// + wevt_closelog6(log); + + LOGS_QUERY_SOURCE *src; + dfe_start_write(wevt_sources, src) + { + if(src->last_scan_monotonic_ut < started_ut) { + src->msg_first_id = 0; + src->msg_last_id = 0; + src->msg_first_ut = 0; + src->msg_last_ut = 0; + src->size = 0; + dictionary_del(wevt_sources, src->fullname); + } + } + dfe_done(src); + dictionary_garbage_collect(wevt_sources); + + spinlock_unlock(&spinlock); + + wevt_variant_cleanup(&property); + } + +cleanup: + freez(channel); + EvtClose(hChannelEnum); +} diff --git a/src/collectors/windows-events.plugin/windows-events-sources.h b/src/collectors/windows-events.plugin/windows-events-sources.h new file mode 100644 index 000000000..4ad4880d7 --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-sources.h @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WINDOWS_EVENTS_SOURCES_H +#define NETDATA_WINDOWS_EVENTS_SOURCES_H + +#include "libnetdata/libnetdata.h" + +typedef enum { + WEVTS_NONE = 0, + WEVTS_ALL = (1 << 0), + WEVTS_ADMIN = (1 << 1), + WEVTS_OPERATIONAL = (1 << 2), + WEVTS_ANALYTIC = (1 << 3), + WEVTS_DEBUG = (1 << 4), + WEVTS_WINDOWS = (1 << 5), + WEVTS_ENABLED = (1 << 6), + WEVTS_DISABLED = (1 << 7), + WEVTS_FORWARDED = (1 << 8), + WEVTS_CLASSIC = (1 << 9), + WEVTS_BACKUP_MODE = (1 << 10), + WEVTS_OVERWRITE_MODE = (1 << 11), + WEVTS_STOP_WHEN_FULL_MODE = (1 << 12), + WEVTS_RETAIN_AND_BACKUP_MODE = (1 << 13), +} WEVT_SOURCE_TYPE; + +BITMAP_STR_DEFINE_FUNCTIONS_EXTERN(WEVT_SOURCE_TYPE) + +#define WEVT_SOURCE_ALL_NAME "All" +#define WEVT_SOURCE_ALL_ADMIN_NAME "All-Admin" +#define WEVT_SOURCE_ALL_OPERATIONAL_NAME "All-Operational" +#define WEVT_SOURCE_ALL_ANALYTIC_NAME "All-Analytic" +#define WEVT_SOURCE_ALL_DEBUG_NAME "All-Debug" +#define WEVT_SOURCE_ALL_WINDOWS_NAME "All-Windows" +#define WEVT_SOURCE_ALL_ENABLED_NAME "All-Enabled" +#define WEVT_SOURCE_ALL_DISABLED_NAME "All-Disabled" +#define WEVT_SOURCE_ALL_FORWARDED_NAME "All-Forwarded" +#define WEVT_SOURCE_ALL_CLASSIC_NAME "All-Classic" +#define WEVT_SOURCE_ALL_BACKUP_MODE_NAME "All-In-Backup-Mode" +#define WEVT_SOURCE_ALL_OVERWRITE_MODE_NAME "All-In-Overwrite-Mode" +#define WEVT_SOURCE_ALL_STOP_WHEN_FULL_MODE_NAME "All-In-StopWhenFull-Mode" +#define WEVT_SOURCE_ALL_RETAIN_AND_BACKUP_MODE_NAME "All-In-RetainAndBackup-Mode" + +#define WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX "All-Of-" + +typedef struct { + const char *fullname; + size_t fullname_len; + + const wchar_t *custom_query; + + STRING *source; + STRING *provider; + WEVT_SOURCE_TYPE source_type; + usec_t msg_first_ut; + usec_t msg_last_ut; + size_t size; + + usec_t last_scan_monotonic_ut; + + uint64_t msg_first_id; + uint64_t msg_last_id; + uint64_t entries; +} LOGS_QUERY_SOURCE; + +extern DICTIONARY *wevt_sources; +extern DICTIONARY *used_hashes_registry; + +void wevt_sources_init(void); +void wevt_sources_scan(void); +void buffer_json_wevt_versions(BUFFER *wb); + +void wevt_sources_to_json_array(BUFFER *wb); +WEVT_SOURCE_TYPE wevt_internal_source_type(const char *value); + +int wevt_sources_dict_items_backward_compar(const void *a, const void *b); +int wevt_sources_dict_items_forward_compar(const void *a, const void *b); + +#endif //NETDATA_WINDOWS_EVENTS_SOURCES_H diff --git a/src/collectors/windows-events.plugin/windows-events-unicode.c b/src/collectors/windows-events.plugin/windows-events-unicode.c new file mode 100644 index 000000000..81da31107 --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-unicode.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows-events-unicode.h" + +inline void utf82unicode(wchar_t *dst, size_t dst_size, const char *src) { + if (src) { + // Convert from UTF-8 to wide char (UTF-16) + if (utf8_to_utf16(dst, dst_size, src, -1) == 0) + wcsncpy(dst, L"[failed conv.]", dst_size - 1); + } + else + wcsncpy(dst, L"[null]", dst_size - 1); +} + +inline void unicode2utf8(char *dst, size_t dst_size, const wchar_t *src) { + if (src) { + if(WideCharToMultiByte(CP_UTF8, 0, src, -1, dst, (int)dst_size, NULL, NULL) == 0) + strncpyz(dst, "[failed conv.]", dst_size - 1); + } + else + strncpyz(dst, "[null]", dst_size - 1); +} + +wchar_t *channel2unicode(const char *utf8str) { + static __thread wchar_t buffer[1024]; + utf82unicode(buffer, _countof(buffer), utf8str); + return buffer; +} + +char *channel2utf8(const wchar_t *channel) { + static __thread char buffer[1024]; + unicode2utf8(buffer, sizeof(buffer), channel); + return buffer; +} + +char *query2utf8(const wchar_t *query) { + static __thread char buffer[16384]; + unicode2utf8(buffer, sizeof(buffer), query); + return buffer; +} + +char *provider2utf8(const wchar_t *provider) { + static __thread char buffer[256]; + unicode2utf8(buffer, sizeof(buffer), provider); + return buffer; +} diff --git a/src/collectors/windows-events.plugin/windows-events-unicode.h b/src/collectors/windows-events.plugin/windows-events-unicode.h new file mode 100644 index 000000000..e932bb5df --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-unicode.h @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WINDOWS_EVENTS_UNICODE_H +#define NETDATA_WINDOWS_EVENTS_UNICODE_H + +#include "libnetdata/libnetdata.h" + +#define WINEVENT_NAME_KEYWORDS_SEPARATOR ", " +static inline void txt_utf8_add_keywords_separator_if_needed(TXT_UTF8 *dst) { + if(dst->used > 1) + txt_utf8_append(dst, WINEVENT_NAME_KEYWORDS_SEPARATOR, sizeof(WINEVENT_NAME_KEYWORDS_SEPARATOR) - 1); +} + +static inline void txt_utf8_set_numeric_if_empty(TXT_UTF8 *dst, const char *prefix, size_t len, uint64_t value) { + if(dst->used <= 1) { + txt_utf8_resize(dst, len + UINT64_MAX_LENGTH + 1, false); + memcpy(dst->data, prefix, len); + dst->used = len + print_uint64(&dst->data[len], value) + 1; + } +} + +static inline void txt_utf8_set_hex_if_empty(TXT_UTF8 *dst, const char *prefix, size_t len, uint64_t value) { + if(dst->used <= 1) { + txt_utf8_resize(dst, len + UINT64_HEX_MAX_LENGTH + 1, false); + memcpy(dst->data, prefix, len); + dst->used = len + print_uint64_hex_full(&dst->data[len], value) + 1; + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// conversions + +void unicode2utf8(char *dst, size_t dst_size, const wchar_t *src); +void utf82unicode(wchar_t *dst, size_t dst_size, const char *src); + +char *channel2utf8(const wchar_t *channel); +wchar_t *channel2unicode(const char *utf8str); + +char *query2utf8(const wchar_t *query); +char *provider2utf8(const wchar_t *provider); + +#endif //NETDATA_WINDOWS_EVENTS_UNICODE_H diff --git a/src/collectors/windows-events.plugin/windows-events-xml.c b/src/collectors/windows-events.plugin/windows-events-xml.c new file mode 100644 index 000000000..931ea6c54 --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-xml.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows-events-xml.h" + +#include +#include + +#define INDENT_STEP 2 +#define A_LOT_OF_SPACES " " + +// Helper: Add indentation +static inline void buffer_add_xml_indent(BUFFER *buffer, const int level) { + size_t total_spaces = (size_t)level * INDENT_STEP; + const size_t step = sizeof(A_LOT_OF_SPACES) - 1; + while (total_spaces > 0) { + const size_t spaces_to_add = (total_spaces > step) ? step : total_spaces; + buffer_fast_strcat(buffer, A_LOT_OF_SPACES, spaces_to_add); + total_spaces -= spaces_to_add; + } +} + +const char *append_the_rest(BUFFER *buffer, const char *xml, const char *end) { + if(xml >= end) return end; + buffer_fast_strcat(buffer, xml, end - xml); + return end; +} + +static const char *parse_node(BUFFER *buffer, const char *xml, const char *end, int level); + +// Helper: Parse the value (between > and <) and return the next position to parse +const char *parse_value_and_closing_tag(BUFFER *buffer, const char *xml, const char *end, int level) { + const char *start = xml; + bool has_subnodes = false; + + // const char *tag_start = NULL, *tag_end = NULL; + while (xml < end) { + if(*xml == '<') { + if(xml + 1 < end && *(xml + 1) == '/') { + // a closing tag + xml += 2; + +// tag_start = xml; + + while(xml < end && *xml != '>') + xml++; + +// tag_end = xml; + + if(xml < end && *xml == '>') + xml++; + + if(has_subnodes) { + buffer_putc(buffer, '\n'); + buffer_add_xml_indent(buffer, level); + } + + buffer_fast_strcat(buffer, start, xml - start); + return xml; + } + else { + // an opening tag + buffer_fast_strcat(buffer, start, xml - start); + xml = start = parse_node(buffer, xml, end, level + 1); + while(xml < end && isspace((uint8_t)*xml)) + xml++; + has_subnodes = true; + } + } + else + xml++; + } + + return append_the_rest(buffer, start, end); +} + +// Parse a field value and return the next position to parse +const char *parse_field_value(BUFFER *buffer, const char *xml, const char *end) { + const char quote = *xml; + + if(quote != '"' && quote != '\'') + return append_the_rest(buffer, xml, end); + + const char *start = xml++; + + while (xml < end && *xml != quote) { + if (*xml == '\\') { + xml++; // Skip escape character + + if(xml < end) + xml++; + + continue; + } + + xml++; + } + + if(xml < end && *xml == quote) { + xml++; // Move past the closing quote + buffer_fast_strcat(buffer, start, xml - start); + return xml; + } + + return append_the_rest(buffer, start, end); +} + +// Parse a field name and return the next position to parse +const char *parse_field(BUFFER *buffer, const char *xml, const char *end) { + while(isspace((uint8_t)*xml) && xml < end) xml++; + + const char *start = xml; + + while (*xml != '=' && xml < end) + xml++; + + // Append the field name + buffer_fast_strcat(buffer, start, xml - start); + + if(xml < end && *xml == '=') { + xml++; + + buffer_putc(buffer, '='); + + if(xml < end && (*xml == '"' || *xml == '\'')) + xml = parse_field_value(buffer, xml, end); + + return xml; // Return the next character to parse + } + + return append_the_rest(buffer, start, end); +} + +// Parse a node (handles fields and subnodes) and return the next position to parse +static inline const char *parse_node(BUFFER *buffer, const char *xml, const char *end, int level) { + if(*xml != '<') + return append_the_rest(buffer, xml, end); + + const char *start = xml++; // skip the < + + buffer_putc(buffer, '\n'); + buffer_add_xml_indent(buffer, level); + + // skip spaces before the tag name + while(xml < end && isspace((uint8_t)*xml)) xml++; + + // Parse the tag name +// const char *tag_start = xml, *tag_end = NULL; + while (xml < end && *xml != '>' && *xml != '/') { + xml++; + + if(xml < end && isspace((uint8_t)*xml)) { + xml++; +// tag_end = xml; + + while(xml < end && isspace((uint8_t)*xml)) + xml++; + + if(xml < end && *xml == '/') { + // an opening tag that is self-closing + xml++; + if(xml < end && *xml == '>') { + xml++; + buffer_fast_strcat(buffer, start, xml - start); + return xml; + } + else + return append_the_rest(buffer, start, end); + } + else if(xml < end && *xml == '>') { + // the end of an opening tag + xml++; + buffer_fast_strcat(buffer, start, xml - start); + return parse_value_and_closing_tag(buffer, xml, end, level); + } + else { + buffer_fast_strcat(buffer, start, xml - start); + xml = start = parse_field(buffer, xml, end); + while(xml < end && isspace((uint8_t)*xml)) + xml++; + } + } + } + + bool self_closing_tag = false; + if(xml < end && *xml == '/') { + self_closing_tag = true; + xml++; + } + + if(xml < end && *xml == '>') { + xml++; + buffer_fast_strcat(buffer, start, xml - start); + + if(self_closing_tag) + return xml; + + return parse_value_and_closing_tag(buffer, xml, end, level); + } + + return append_the_rest(buffer, start, end); +} + +static inline void buffer_pretty_print_xml_object(BUFFER *buffer, const char *xml, const char *end) { + while(xml < end) { + while(xml < end && isspace((uint8_t)*xml)) + xml++; + + if(xml < end && *xml == '<') + xml = parse_node(buffer, xml, end, 1); + else { + append_the_rest(buffer, xml, end); + return; + } + } +} + +void buffer_pretty_print_xml(BUFFER *buffer, const char *xml, size_t xml_len) { + const char *end = xml + xml_len; + buffer_pretty_print_xml_object(buffer, xml, end); +} + +// -------------------------------------------------------------------------------------------------------------------- + +bool buffer_extract_and_print_xml_with_cb(BUFFER *buffer, const char *xml, size_t xml_len, const char *prefix, const char *keys[], + void (*cb)(BUFFER *, const char *, const char *, const char *)) { + if(!keys || !*keys[0]) { + buffer_pretty_print_xml(buffer, xml, xml_len); + return true; + } + + const char *start = xml, *end = NULL; + for(size_t k = 0; keys[k] ; k++) { + if(!*keys[k]) continue; + + size_t klen = strlen(keys[k]); + char tag_open[klen + 2]; + tag_open[0] = '<'; + strcpy(&tag_open[1], keys[k]); + tag_open[klen + 1] = '\0'; + + const char *new_start = strstr(start, tag_open); + if(!new_start) + return false; + + start = new_start + klen + 1; + + if(*start != '>' && !isspace((uint8_t)*start)) + return false; + + if(*start != '>') { + start = strchr(start, '>'); + if(!start) return false; + } + start++; // skip the > + + char tag_close[klen + 4]; + tag_close[0] = '<'; + tag_close[1] = '/'; + strcpy(&tag_close[2], keys[k]); + tag_close[klen + 2] = '>'; + tag_close[klen + 3] = '\0'; + + const char *new_end = strstr(start, tag_close); + if(!new_end || (end && new_end > end)) + return false; + + end = new_end; + } + + if(!start || !end || start == end) + return false; + + cb(buffer, prefix, start, end); + return true; +} + +static void print_xml_cb(BUFFER *buffer, const char *prefix, const char *start, const char *end) { + if(prefix) + buffer_strcat(buffer, prefix); + + buffer_pretty_print_xml_object(buffer, start, end); +} + +bool buffer_extract_and_print_xml(BUFFER *buffer, const char *xml, size_t xml_len, const char *prefix, const char *keys[]) { + return buffer_extract_and_print_xml_with_cb( + buffer, xml, xml_len, + prefix, keys, + print_xml_cb); +} + +static void print_value_cb(BUFFER *buffer, const char *prefix, const char *start, const char *end) { + if(prefix) + buffer_strcat(buffer, prefix); + + buffer_need_bytes(buffer, end - start + 1); + + char *started = &buffer->buffer[buffer->len]; + char *d = started; + const char *s = start; + + while(s < end && s) { + if(*s == '&' && s + 3 < end) { + if(*(s + 1) == '#') { + if(s + 4 < end && *(s + 2) == '1' && *(s + 4) == ';') { + if (*(s + 3) == '0') { + s += 5; + *d++ = '\n'; + continue; + } else if (*(s + 3) == '3') { + s += 5; + // *d++ = '\r'; + continue; + } + } else if (*(s + 2) == '9' && *(s + 3) == ';') { + s += 4; + *d++ = '\t'; + continue; + } + } + else if(s + 3 < end && *(s + 2) == 't' && *(s + 3) == ';') { + if(*(s + 1) == 'l') { + s += 4; + *d++ = '<'; + continue; + } + else if(*(s + 1) == 'g') { + s += 4; + *d++ = '>'; + continue; + } + } + } + *d++ = *s++; + } + *d = '\0'; + buffer->len += d - started; +} + +bool buffer_xml_extract_and_print_value(BUFFER *buffer, const char *xml, size_t xml_len, const char *prefix, const char *keys[]) { + return buffer_extract_and_print_xml_with_cb( + buffer, xml, xml_len, + prefix, keys, + print_value_cb); +} diff --git a/src/collectors/windows-events.plugin/windows-events-xml.h b/src/collectors/windows-events.plugin/windows-events-xml.h new file mode 100644 index 000000000..78d2f686e --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-xml.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef WINDOWS_EVENTS_XML_H +#define WINDOWS_EVENTS_XML_H + +#include "libnetdata/libnetdata.h" + +void buffer_pretty_print_xml(BUFFER *buffer, const char *xml, size_t xml_len); +bool buffer_extract_and_print_xml(BUFFER *buffer, const char *xml, size_t xml_len, const char *prefix, const char *keys[]); +bool buffer_xml_extract_and_print_value(BUFFER *buffer, const char *xml, size_t xml_len, const char *prefix, const char *keys[]); + +#endif //WINDOWS_EVENTS_XML_H diff --git a/src/collectors/windows-events.plugin/windows-events.c b/src/collectors/windows-events.plugin/windows-events.c new file mode 100644 index 000000000..09ce558ae --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events.c @@ -0,0 +1,1402 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "libnetdata/libnetdata.h" +#include "libnetdata/required_dummies.h" + +#include "windows-events.h" + +netdata_mutex_t stdout_mutex = NETDATA_MUTEX_INITIALIZER; +static bool plugin_should_exit = false; + +#define WEVT_ALWAYS_VISIBLE_KEYS NULL + +#define WEVT_KEYS_EXCLUDED_FROM_FACETS \ + "|" WEVT_FIELD_MESSAGE \ + "|" WEVT_FIELD_XML \ + "" + +#define WEVT_KEYS_INCLUDED_IN_FACETS \ + "|" WEVT_FIELD_COMPUTER \ + "|" WEVT_FIELD_PROVIDER \ + "|" WEVT_FIELD_LEVEL \ + "|" WEVT_FIELD_KEYWORDS \ + "|" WEVT_FIELD_OPCODE \ + "|" WEVT_FIELD_TASK \ + "|" WEVT_FIELD_ACCOUNT \ + "|" WEVT_FIELD_DOMAIN \ + "|" WEVT_FIELD_SID \ + "" + +#define query_has_fts(lqs) ((lqs)->rq.query != NULL) + +static inline WEVT_QUERY_STATUS check_stop(const bool *cancelled, const usec_t *stop_monotonic_ut) { + if(cancelled && __atomic_load_n(cancelled, __ATOMIC_RELAXED)) { + nd_log(NDLS_COLLECTORS, NDLP_INFO, "Function has been cancelled"); + return WEVT_CANCELLED; + } + + if(now_monotonic_usec() > __atomic_load_n(stop_monotonic_ut, __ATOMIC_RELAXED)) { + internal_error(true, "Function timed out"); + return WEVT_TIMED_OUT; + } + + return WEVT_OK; +} + +FACET_ROW_SEVERITY wevt_levelid_to_facet_severity(FACETS *facets __maybe_unused, FACET_ROW *row, void *data __maybe_unused) { + FACET_ROW_KEY_VALUE *levelid_rkv = dictionary_get(row->dict, WEVT_FIELD_LEVEL "ID"); + if(!levelid_rkv || levelid_rkv->empty) + return FACET_ROW_SEVERITY_NORMAL; + + int windows_event_level = str2i(buffer_tostring(levelid_rkv->wb)); + + switch (windows_event_level) { + case WEVT_LEVEL_VERBOSE: + return FACET_ROW_SEVERITY_DEBUG; + + default: + case WEVT_LEVEL_INFORMATION: + return FACET_ROW_SEVERITY_NORMAL; + + case WEVT_LEVEL_WARNING: + return FACET_ROW_SEVERITY_WARNING; + + case WEVT_LEVEL_ERROR: + case WEVT_LEVEL_CRITICAL: + return FACET_ROW_SEVERITY_CRITICAL; + } +} + +struct wevt_bin_data { + bool rendered; + WEVT_EVENT ev; + WEVT_LOG *log; + EVT_HANDLE hEvent; + PROVIDER_META_HANDLE *provider; +}; + +static void wevt_cleanup_bin_data(void *data) { + struct wevt_bin_data *d = data; + + if(d->hEvent) + EvtClose(d->hEvent); + + provider_release(d->provider); + freez(d); +} + +static inline void wevt_facets_register_bin_data(WEVT_LOG *log, FACETS *facets, WEVT_EVENT *ev) { + struct wevt_bin_data *d = mallocz(sizeof(struct wevt_bin_data)); + +#ifdef NETDATA_INTERNAL_CHECKS + internal_fatal(strcmp(log->ops.provider.data, provider_get_name(log->provider)) != 0, + "Provider name mismatch in data!"); + + internal_fatal(!UUIDeq(ev->provider, provider_get_uuid(log->provider)), + "Provider UUID mismatch in data!"); +#endif + + d->ev = *ev; + d->log = log; + d->rendered = false; + + // take the bookmark + d->hEvent = log->hEvent; log->hEvent = NULL; + + // dup the provider + d->provider = provider_dup(log->provider); + + facets_row_bin_data_set(facets, wevt_cleanup_bin_data, d); +} + +static void wevt_lazy_loading_event_and_xml(struct wevt_bin_data *d, FACET_ROW *row __maybe_unused) { + if(d->rendered) return; + +#ifdef NETDATA_INTERNAL_CHECKS + const FACET_ROW_KEY_VALUE *provider_rkv = dictionary_get(row->dict, WEVT_FIELD_PROVIDER); + internal_fatal(!provider_rkv || strcmp(buffer_tostring(provider_rkv->wb), provider_get_name(d->provider)) != 0, + "Provider of row does not match the bin data associated with it"); + + uint64_t event_record_id = UINT64_MAX; + const FACET_ROW_KEY_VALUE *event_record_id_rkv = dictionary_get(row->dict, WEVT_FIELD_EVENTRECORDID); + if(event_record_id_rkv) + event_record_id = str2uint64_t(buffer_tostring(event_record_id_rkv->wb), NULL); + internal_fatal(event_record_id != d->ev.id, + "Event Record ID of row does not match the bin data associated with it"); +#endif + + // the message needs the xml + EvtFormatMessage_Xml_utf8(&d->log->ops.unicode, d->provider, d->hEvent, &d->log->ops.xml); + EvtFormatMessage_Event_utf8(&d->log->ops.unicode, d->provider, d->hEvent, &d->log->ops.event); + d->rendered = true; +} + +static void wevt_lazy_load_xml( + FACETS *facets, + BUFFER *json_array, + FACET_ROW_KEY_VALUE *rkv __maybe_unused, + FACET_ROW *row, + void *data __maybe_unused) { + + struct wevt_bin_data *d = facets_row_bin_data_get(facets, row); + if(!d) { + buffer_json_add_array_item_string(json_array, "Failed to get row BIN DATA from facets"); + return; + } + + wevt_lazy_loading_event_and_xml(d, row); + buffer_json_add_array_item_string(json_array, d->log->ops.xml.data); +} + +static void wevt_lazy_load_message( + FACETS *facets, + BUFFER *json_array, + FACET_ROW_KEY_VALUE *rkv __maybe_unused, + FACET_ROW *row, + void *data __maybe_unused) { + + struct wevt_bin_data *d = facets_row_bin_data_get(facets, row); + if(!d) { + buffer_json_add_array_item_string(json_array, "Failed to get row BIN DATA from facets"); + return; + } + + wevt_lazy_loading_event_and_xml(d, row); + + if(d->log->ops.event.used <= 1) { + TXT_UTF8 *xml = &d->log->ops.xml; + + buffer_flush(rkv->wb); + + bool added_message = false; + if(xml->used > 1) { + const char *message_path[] = { + "RenderingInfo", + "Message", + NULL}; + + added_message = buffer_xml_extract_and_print_value( + rkv->wb, + xml->data, xml->used - 1, + NULL, + message_path); + } + + if(!added_message) { + const FACET_ROW_KEY_VALUE *event_id_rkv = dictionary_get(row->dict, WEVT_FIELD_EVENTID); + if (event_id_rkv && buffer_strlen(event_id_rkv->wb)) { + buffer_fast_strcat(rkv->wb, "Event ", 6); + buffer_fast_strcat(rkv->wb, buffer_tostring(event_id_rkv->wb), buffer_strlen(event_id_rkv->wb)); + } else + buffer_strcat(rkv->wb, "Unknown Event "); + + const FACET_ROW_KEY_VALUE *provider_rkv = dictionary_get(row->dict, WEVT_FIELD_PROVIDER); + if (provider_rkv && buffer_strlen(provider_rkv->wb)) { + buffer_fast_strcat(rkv->wb, " of ", 4); + buffer_fast_strcat(rkv->wb, buffer_tostring(provider_rkv->wb), buffer_strlen(provider_rkv->wb)); + buffer_putc(rkv->wb, '.'); + } else + buffer_strcat(rkv->wb, "of unknown Provider."); + } + + if(xml->used > 1) { + const char *event_path[] = { + "EventData", + NULL + }; + bool added_event_data = buffer_extract_and_print_xml( + rkv->wb, + xml->data, xml->used - 1, + "\n\nRelated event data:\n", + event_path); + + const char *user_path[] = { + "UserData", + NULL + }; + bool added_user_data = buffer_extract_and_print_xml( + rkv->wb, + xml->data, xml->used - 1, + "\n\nRelated user data:\n", + user_path); + + if(!added_event_data && !added_user_data) + buffer_strcat(rkv->wb, " Without any related data."); + } + + buffer_json_add_array_item_string(json_array, buffer_tostring(rkv->wb)); + } + else + buffer_json_add_array_item_string(json_array, d->log->ops.event.data); +} + +static void wevt_register_fields(LOGS_QUERY_STATUS *lqs) { + // the order of the fields here, controls the order of the fields at the table presented + + FACETS *facets = lqs->facets; + LOGS_QUERY_REQUEST *rq = &lqs->rq; + + facets_register_row_severity(facets, wevt_levelid_to_facet_severity, NULL); + + facets_register_key_name( + facets, WEVT_FIELD_COMPUTER, + rq->default_facet | FACET_KEY_OPTION_VISIBLE); + + facets_register_key_name( + facets, WEVT_FIELD_CHANNEL, + rq->default_facet | FACET_KEY_OPTION_FTS); + + facets_register_key_name( + facets, WEVT_FIELD_PROVIDER, + rq->default_facet | FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS); + + facets_register_key_name( + facets, WEVT_FIELD_ACCOUNT, + rq->default_facet | FACET_KEY_OPTION_FTS); + + facets_register_key_name( + facets, WEVT_FIELD_DOMAIN, + rq->default_facet | FACET_KEY_OPTION_FTS); + + facets_register_key_name( + facets, WEVT_FIELD_SID, + rq->default_facet | FACET_KEY_OPTION_FTS); + + facets_register_key_name( + facets, WEVT_FIELD_EVENTID, + rq->default_facet | + FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS); + + facets_register_key_name( + facets, WEVT_FIELD_EVENTS_API, + rq->default_facet | + FACET_KEY_OPTION_FTS); + + facets_register_key_name( + facets, WEVT_FIELD_LEVEL, + rq->default_facet | FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_EXPANDED_FILTER); + + facets_register_key_name( + facets, WEVT_FIELD_LEVEL "ID", + FACET_KEY_OPTION_NONE); + + facets_register_key_name( + facets, WEVT_FIELD_PROCESSID, + FACET_KEY_OPTION_FTS); + + facets_register_key_name( + facets, WEVT_FIELD_THREADID, + FACET_KEY_OPTION_FTS); + + facets_register_key_name( + facets, WEVT_FIELD_TASK, + rq->default_facet | FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_VISIBLE); + + facets_register_key_name( + facets, WEVT_FIELD_TASK "ID", + FACET_KEY_OPTION_NONE); + + facets_register_key_name( + facets, WEVT_FIELD_OPCODE, + rq->default_facet | FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_VISIBLE); + + facets_register_key_name( + facets, WEVT_FIELD_OPCODE "ID", + FACET_KEY_OPTION_NONE); + + facets_register_key_name( + facets, WEVT_FIELD_KEYWORDS, + rq->default_facet | FACET_KEY_OPTION_FTS); + + facets_register_key_name( + facets, WEVT_FIELD_KEYWORDS "ID", + FACET_KEY_OPTION_NONE); + + facets_register_dynamic_key_name( + facets, + WEVT_FIELD_MESSAGE, + FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | FACET_KEY_OPTION_VISIBLE, + wevt_lazy_load_message, + NULL); + + facets_register_dynamic_key_name( + facets, + WEVT_FIELD_XML, + FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_PRETTY_XML, + wevt_lazy_load_xml, + NULL); + + if(query_has_fts(lqs)) { + facets_register_key_name( + facets, WEVT_FIELD_EVENT_MESSAGE_HIDDEN, + FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_HIDDEN | FACET_KEY_OPTION_NEVER_FACET); + + facets_register_key_name( + facets, WEVT_FIELD_EVENT_XML_HIDDEN, + FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_HIDDEN | FACET_KEY_OPTION_NEVER_FACET); + + facets_register_key_name( + facets, WEVT_FIELD_EVENT_DATA_HIDDEN, + FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_HIDDEN | FACET_KEY_OPTION_NEVER_FACET); + } + +#ifdef NETDATA_INTERNAL_CHECKS + facets_register_key_name( + facets, "z_level_source", + rq->default_facet); + + facets_register_key_name( + facets, "z_keywords_source", + rq->default_facet); + + facets_register_key_name( + facets, "z_opcode_source", + rq->default_facet); + + facets_register_key_name( + facets, "z_task_source", + rq->default_facet); +#endif +} + +#ifdef NETDATA_INTERNAL_CHECKS +static const char *source_to_str(TXT_UTF8 *txt) { + switch(txt->src) { + default: + case TXT_SOURCE_UNKNOWN: + return "unknown"; + + case TXT_SOURCE_EVENT_LOG: + return "event-log"; + + case TXT_SOURCE_PROVIDER: + return "provider"; + + case TXT_SOURCE_FIELD_CACHE: + return "fields-cache"; + + case TXT_SOURCE_HARDCODED: + return "hardcoded"; + } +} +#endif + +static const char *events_api_to_str(WEVT_PROVIDER_PLATFORM platform) { + switch(platform) { + case WEVT_PLATFORM_WEL: + return "Windows Event Log"; + + case WEVT_PLATFORM_ETW: + return "Event Tracing for Windows"; + + case WEVT_PLATFORM_TL: + return "TraceLogging"; + + default: + return "Unknown"; + } +} + +static inline size_t wevt_process_event(WEVT_LOG *log, FACETS *facets, LOGS_QUERY_SOURCE *src, usec_t *msg_ut __maybe_unused, WEVT_EVENT *ev) { + static __thread char uuid_str[UUID_STR_LEN]; + + size_t len, bytes = log->ops.raw.system.used + log->ops.raw.user.used; + + if(!UUIDiszero(ev->provider)) { + uuid_unparse_lower(ev->provider.uuid, uuid_str); + facets_add_key_value_length( + facets, WEVT_FIELD_PROVIDER_GUID, sizeof(WEVT_FIELD_PROVIDER_GUID) - 1, + uuid_str, sizeof(uuid_str) - 1); + } + + if(!UUIDiszero(ev->activity_id)) { + uuid_unparse_lower(ev->activity_id.uuid, uuid_str); + facets_add_key_value_length( + facets, WEVT_FIELD_ACTIVITY_ID, sizeof(WEVT_FIELD_ACTIVITY_ID) - 1, + uuid_str, sizeof(uuid_str) - 1); + } + + if(!UUIDiszero(ev->related_activity_id)) { + uuid_unparse_lower(ev->related_activity_id.uuid, uuid_str); + facets_add_key_value_length( + facets, WEVT_FIELD_RELATED_ACTIVITY_ID, sizeof(WEVT_FIELD_RELATED_ACTIVITY_ID) - 1, + uuid_str, sizeof(uuid_str) - 1); + } + + if(ev->qualifiers) { + static __thread char qualifiers[UINT64_HEX_MAX_LENGTH]; + len = print_uint64_hex(qualifiers, ev->qualifiers); + bytes += len; + facets_add_key_value_length( + facets, WEVT_FIELD_QUALIFIERS, sizeof(WEVT_FIELD_QUALIFIERS) - 1, + qualifiers, len); + } + + { + static __thread char event_record_id_str[UINT64_MAX_LENGTH]; + len = print_uint64(event_record_id_str, ev->id); + bytes += len; + facets_add_key_value_length( + facets, WEVT_FIELD_EVENTRECORDID, sizeof(WEVT_FIELD_EVENTRECORDID) - 1, + event_record_id_str, len); + } + + if(ev->version) { + static __thread char version[UINT64_MAX_LENGTH]; + len = print_uint64(version, ev->version); + bytes += len; + facets_add_key_value_length( + facets, WEVT_FIELD_VERSION, sizeof(WEVT_FIELD_VERSION) - 1, + version, len); + } + + if(log->ops.provider.used > 1) { + bytes += log->ops.provider.used * 2; // unicode is double + facets_add_key_value_length( + facets, WEVT_FIELD_PROVIDER, sizeof(WEVT_FIELD_PROVIDER) - 1, + log->ops.provider.data, log->ops.provider.used - 1); + } + + if(log->ops.channel.used > 1) { + bytes += log->ops.channel.used * 2; + facets_add_key_value_length( + facets, WEVT_FIELD_CHANNEL, sizeof(WEVT_FIELD_CHANNEL) - 1, + log->ops.channel.data, log->ops.channel.used - 1); + } + else { + bytes += src->fullname_len * 2; + facets_add_key_value_length( + facets, WEVT_FIELD_CHANNEL, sizeof(WEVT_FIELD_CHANNEL) - 1, + src->fullname, src->fullname_len); + } + + if(log->ops.level.used > 1) { + bytes += log->ops.level.used * 2; + facets_add_key_value_length( + facets, WEVT_FIELD_LEVEL, sizeof(WEVT_FIELD_LEVEL) - 1, + log->ops.level.data, log->ops.level.used - 1); + } + + if(log->ops.computer.used > 1) { + bytes += log->ops.computer.used * 2; + facets_add_key_value_length( + facets, WEVT_FIELD_COMPUTER, sizeof(WEVT_FIELD_COMPUTER) - 1, + log->ops.computer.data, log->ops.computer.used - 1); + } + + if(log->ops.opcode.used > 1) { + bytes += log->ops.opcode.used * 2; + facets_add_key_value_length( + facets, WEVT_FIELD_OPCODE, sizeof(WEVT_FIELD_OPCODE) - 1, + log->ops.opcode.data, log->ops.opcode.used - 1); + } + + if(log->ops.keywords.used > 1) { + bytes += log->ops.keywords.used * 2; + facets_add_key_value_length( + facets, WEVT_FIELD_KEYWORDS, sizeof(WEVT_FIELD_KEYWORDS) - 1, + log->ops.keywords.data, log->ops.keywords.used - 1); + } + + if(log->ops.task.used > 1) { + bytes += log->ops.task.used * 2; + facets_add_key_value_length( + facets, WEVT_FIELD_TASK, sizeof(WEVT_FIELD_TASK) - 1, + log->ops.task.data, log->ops.task.used - 1); + } + + if(log->ops.account.used > 1) { + bytes += log->ops.account.used * 2; + facets_add_key_value_length( + facets, + WEVT_FIELD_ACCOUNT, sizeof(WEVT_FIELD_ACCOUNT) - 1, + log->ops.account.data, log->ops.account.used - 1); + } + + if(log->ops.domain.used > 1) { + bytes += log->ops.domain.used * 2; + facets_add_key_value_length( + facets, + WEVT_FIELD_DOMAIN, sizeof(WEVT_FIELD_DOMAIN) - 1, + log->ops.domain.data, log->ops.domain.used - 1); + } + + if(log->ops.sid.used > 1) { + bytes += log->ops.sid.used * 2; + facets_add_key_value_length( + facets, + WEVT_FIELD_SID, sizeof(WEVT_FIELD_SID) - 1, + log->ops.sid.data, log->ops.sid.used - 1); + } + + { + static __thread char event_id_str[UINT64_MAX_LENGTH]; + len = print_uint64(event_id_str, ev->event_id); + bytes += len; + facets_add_key_value_length( + facets, WEVT_FIELD_EVENTID, sizeof(WEVT_FIELD_EVENTID) - 1, + event_id_str, len); + } + + { + const char *s = events_api_to_str(ev->platform); + facets_add_key_value_length( + facets, WEVT_FIELD_EVENTS_API, sizeof(WEVT_FIELD_EVENTS_API) - 1, s, strlen(s)); + } + + if(ev->process_id) { + static __thread char process_id_str[UINT64_MAX_LENGTH]; + len = print_uint64(process_id_str, ev->process_id); + bytes += len; + facets_add_key_value_length( + facets, WEVT_FIELD_PROCESSID, sizeof(WEVT_FIELD_PROCESSID) - 1, + process_id_str, len); + } + + if(ev->thread_id) { + static __thread char thread_id_str[UINT64_MAX_LENGTH]; + len = print_uint64(thread_id_str, ev->thread_id); + bytes += len; + facets_add_key_value_length( + facets, WEVT_FIELD_THREADID, sizeof(WEVT_FIELD_THREADID) - 1, + thread_id_str, len); + } + + { + static __thread char str[UINT64_MAX_LENGTH]; + len = print_uint64(str, ev->level); + bytes += len; + facets_add_key_value_length( + facets, WEVT_FIELD_LEVEL "ID", sizeof(WEVT_FIELD_LEVEL) + 2 - 1, str, len); + } + + { + static __thread char str[UINT64_HEX_MAX_LENGTH]; + len = print_uint64_hex_full(str, ev->keywords); + bytes += len; + facets_add_key_value_length( + facets, WEVT_FIELD_KEYWORDS "ID", sizeof(WEVT_FIELD_KEYWORDS) + 2 - 1, str, len); + } + + { + static __thread char str[UINT64_MAX_LENGTH]; + len = print_uint64(str, ev->opcode); + bytes += len; + facets_add_key_value_length( + facets, WEVT_FIELD_OPCODE "ID", sizeof(WEVT_FIELD_OPCODE) + 2 - 1, str, len); + } + + { + static __thread char str[UINT64_MAX_LENGTH]; + len = print_uint64(str, ev->task); + bytes += len; + facets_add_key_value_length( + facets, WEVT_FIELD_TASK "ID", sizeof(WEVT_FIELD_TASK) + 2 - 1, str, len); + } + + if(log->type & WEVT_QUERY_EVENT_DATA) { + // the query has full text-search + if(log->ops.event.used > 1) { + bytes += log->ops.event.used; + facets_add_key_value_length( + facets, WEVT_FIELD_EVENT_MESSAGE_HIDDEN, sizeof(WEVT_FIELD_EVENT_MESSAGE_HIDDEN) - 1, + log->ops.event.data, log->ops.event.used - 1); + } + + if(log->ops.xml.used > 1) { + bytes += log->ops.xml.used; + facets_add_key_value_length( + facets, WEVT_FIELD_EVENT_XML_HIDDEN, sizeof(WEVT_FIELD_EVENT_XML_HIDDEN) - 1, + log->ops.xml.data, log->ops.xml.used - 1); + } + + if(log->ops.event_data->len) { + bytes += log->ops.event_data->len; + facets_add_key_value_length( + facets, WEVT_FIELD_EVENT_DATA_HIDDEN, sizeof(WEVT_FIELD_EVENT_DATA_HIDDEN) - 1, + buffer_tostring(log->ops.event_data), buffer_strlen(log->ops.event_data)); + } + } + + wevt_facets_register_bin_data(log, facets, ev); + +#ifdef NETDATA_INTERNAL_CHECKS + facets_add_key_value(facets, "z_level_source", source_to_str(&log->ops.level)); + facets_add_key_value(facets, "z_keywords_source", source_to_str(&log->ops.keywords)); + facets_add_key_value(facets, "z_opcode_source", source_to_str(&log->ops.opcode)); + facets_add_key_value(facets, "z_task_source", source_to_str(&log->ops.task)); +#endif + + return bytes; +} + +static void send_progress_update(LOGS_QUERY_STATUS *lqs, size_t current_row_counter, bool flush_current_file) { + usec_t now_ut = now_monotonic_usec(); + + if(current_row_counter > lqs->c.progress.entries.current_query_total) { + lqs->c.progress.entries.total += current_row_counter - lqs->c.progress.entries.current_query_total; + lqs->c.progress.entries.current_query_total = current_row_counter; + } + + if(flush_current_file) { + lqs->c.progress.entries.total += current_row_counter; + lqs->c.progress.entries.total -= lqs->c.progress.entries.current_query_total; + lqs->c.progress.entries.completed += current_row_counter; + lqs->c.progress.entries.current_query_total = 0; + } + + size_t completed = lqs->c.progress.entries.completed + current_row_counter; + if(completed > lqs->c.progress.entries.total) + lqs->c.progress.entries.total = completed; + + usec_t progress_duration_ut = now_ut - lqs->c.progress.last_ut; + if(progress_duration_ut >= WINDOWS_EVENTS_PROGRESS_EVERY_UT) { + lqs->c.progress.last_ut = now_ut; + + netdata_mutex_lock(&stdout_mutex); + pluginsd_function_progress_to_stdout(lqs->rq.transaction, completed, lqs->c.progress.entries.total); + netdata_mutex_unlock(&stdout_mutex); + } +} + +static WEVT_QUERY_STATUS wevt_query_backward( + WEVT_LOG *log, BUFFER *wb __maybe_unused, FACETS *facets, + LOGS_QUERY_SOURCE *src, + LOGS_QUERY_STATUS *lqs) +{ + usec_t start_ut = lqs->query.start_ut; + usec_t stop_ut = lqs->query.stop_ut; + bool stop_when_full = lqs->query.stop_when_full; + +// lqs->c.query_file.start_ut = start_ut; +// lqs->c.query_file.stop_ut = stop_ut; + + if(!wevt_query(log, channel2unicode(src->fullname), lqs->c.query, EvtQueryReverseDirection)) + return WEVT_FAILED_TO_SEEK; + + size_t errors_no_timestamp = 0; + usec_t latest_msg_ut = 0; // the biggest timestamp we have seen so far + usec_t first_msg_ut = 0; // the first message we got from the db + size_t row_counter = 0, last_row_counter = 0, rows_useful = 0; + size_t bytes = 0, last_bytes = 0; + + usec_t last_usec_from = 0; + usec_t last_usec_to = 0; + + WEVT_QUERY_STATUS status = WEVT_OK; + + facets_rows_begin(facets); + WEVT_EVENT e; + while (status == WEVT_OK && wevt_get_next_event(log, &e)) { + usec_t msg_ut = e.created_ns / NSEC_PER_USEC; + + if(unlikely(!msg_ut)) { + errors_no_timestamp++; + continue; + } + + if (unlikely(msg_ut > start_ut)) + continue; + + if (unlikely(msg_ut < stop_ut)) + break; + + if(unlikely(msg_ut > latest_msg_ut)) + latest_msg_ut = msg_ut; + + if(unlikely(!first_msg_ut)) { + first_msg_ut = msg_ut; + // lqs->c.query_file.first_msg_ut = msg_ut; + } + +// sampling_t sample = is_row_in_sample(log, lqs, src, msg_ut, +// FACETS_ANCHOR_DIRECTION_BACKWARD, +// facets_row_candidate_to_keep(facets, msg_ut)); +// +// if(sample == SAMPLING_FULL) { + bytes += wevt_process_event(log, facets, src, &msg_ut, &e); + + // make sure each line gets a unique timestamp + if(unlikely(msg_ut >= last_usec_from && msg_ut <= last_usec_to)) + msg_ut = --last_usec_from; + else + last_usec_from = last_usec_to = msg_ut; + + if(facets_row_finished(facets, msg_ut)) + rows_useful++; + + row_counter++; + if(unlikely((row_counter % FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS) == 0 && + stop_when_full && + facets_rows(facets) >= lqs->rq.entries)) { + // stop the data only query + usec_t oldest = facets_row_oldest_ut(facets); + if(oldest && msg_ut < (oldest - lqs->anchor.delta_ut)) + break; + } + + if(unlikely(row_counter % FUNCTION_PROGRESS_EVERY_ROWS == 0)) { + status = check_stop(lqs->cancelled, lqs->stop_monotonic_ut); + + if(status == WEVT_OK) { + lqs->c.rows_read += row_counter - last_row_counter; + last_row_counter = row_counter; + + lqs->c.bytes_read += bytes - last_bytes; + last_bytes = bytes; + + send_progress_update(lqs, row_counter, false); + } + } +// } +// else if(sample == SAMPLING_SKIP_FIELDS) +// facets_row_finished_unsampled(facets, msg_ut); +// else { +// sampling_update_running_query_file_estimates(facets, log, lqs, src, msg_ut, FACETS_ANCHOR_DIRECTION_BACKWARD); +// break; +// } + } + + send_progress_update(lqs, row_counter, true); + lqs->c.rows_read += row_counter - last_row_counter; + lqs->c.bytes_read += bytes - last_bytes; + lqs->c.rows_useful += rows_useful; + + if(errors_no_timestamp) + netdata_log_error("WINDOWS-EVENTS: %zu events did not have timestamps", errors_no_timestamp); + + if(latest_msg_ut > lqs->last_modified) + lqs->last_modified = latest_msg_ut; + + wevt_query_done(log); + + return status; +} + +static WEVT_QUERY_STATUS wevt_query_forward( + WEVT_LOG *log, BUFFER *wb __maybe_unused, FACETS *facets, + LOGS_QUERY_SOURCE *src, + LOGS_QUERY_STATUS *lqs) +{ + usec_t start_ut = lqs->query.start_ut; + usec_t stop_ut = lqs->query.stop_ut; + bool stop_when_full = lqs->query.stop_when_full; + +// lqs->c.query_file.start_ut = start_ut; +// lqs->c.query_file.stop_ut = stop_ut; + + if(!wevt_query(log, channel2unicode(src->fullname), lqs->c.query, EvtQueryForwardDirection)) + return WEVT_FAILED_TO_SEEK; + + size_t errors_no_timestamp = 0; + usec_t latest_msg_ut = 0; // the biggest timestamp we have seen so far + usec_t first_msg_ut = 0; // the first message we got from the db + size_t row_counter = 0, last_row_counter = 0, rows_useful = 0; + size_t bytes = 0, last_bytes = 0; + + usec_t last_usec_from = 0; + usec_t last_usec_to = 0; + + WEVT_QUERY_STATUS status = WEVT_OK; + + facets_rows_begin(facets); + WEVT_EVENT e; + while (status == WEVT_OK && wevt_get_next_event(log, &e)) { + usec_t msg_ut = e.created_ns / NSEC_PER_USEC; + + if(unlikely(!msg_ut)) { + errors_no_timestamp++; + continue; + } + + if (unlikely(msg_ut < start_ut)) + continue; + + if (unlikely(msg_ut > stop_ut)) + break; + + if(likely(msg_ut > latest_msg_ut)) + latest_msg_ut = msg_ut; + + if(unlikely(!first_msg_ut)) { + first_msg_ut = msg_ut; + // lqs->c.query_file.first_msg_ut = msg_ut; + } + +// sampling_t sample = is_row_in_sample(log, lqs, src, msg_ut, +// FACETS_ANCHOR_DIRECTION_FORWARD, +// facets_row_candidate_to_keep(facets, msg_ut)); +// +// if(sample == SAMPLING_FULL) { + bytes += wevt_process_event(log, facets, src, &msg_ut, &e); + + // make sure each line gets a unique timestamp + if(unlikely(msg_ut >= last_usec_from && msg_ut <= last_usec_to)) + msg_ut = ++last_usec_to; + else + last_usec_from = last_usec_to = msg_ut; + + if(facets_row_finished(facets, msg_ut)) + rows_useful++; + + row_counter++; + if(unlikely((row_counter % FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS) == 0 && + stop_when_full && + facets_rows(facets) >= lqs->rq.entries)) { + // stop the data only query + usec_t newest = facets_row_newest_ut(facets); + if(newest && msg_ut > (newest + lqs->anchor.delta_ut)) + break; + } + + if(unlikely(row_counter % FUNCTION_PROGRESS_EVERY_ROWS == 0)) { + status = check_stop(lqs->cancelled, lqs->stop_monotonic_ut); + + if(status == WEVT_OK) { + lqs->c.rows_read += row_counter - last_row_counter; + last_row_counter = row_counter; + + lqs->c.bytes_read += bytes - last_bytes; + last_bytes = bytes; + + send_progress_update(lqs, row_counter, false); + } + } +// } +// else if(sample == SAMPLING_SKIP_FIELDS) +// facets_row_finished_unsampled(facets, msg_ut); +// else { +// sampling_update_running_query_file_estimates(facets, log, lqs, src, msg_ut, FACETS_ANCHOR_DIRECTION_FORWARD); +// break; +// } + } + + send_progress_update(lqs, row_counter, true); + lqs->c.rows_read += row_counter - last_row_counter; + lqs->c.bytes_read += bytes - last_bytes; + lqs->c.rows_useful += rows_useful; + + if(errors_no_timestamp) + netdata_log_error("WINDOWS-EVENTS: %zu events did not have timestamps", errors_no_timestamp); + + if(latest_msg_ut > lqs->last_modified) + lqs->last_modified = latest_msg_ut; + + wevt_query_done(log); + + return status; +} + +static WEVT_QUERY_STATUS wevt_query_one_channel( + WEVT_LOG *log, + BUFFER *wb, FACETS *facets, + LOGS_QUERY_SOURCE *src, + LOGS_QUERY_STATUS *lqs) { + + errno_clear(); + + WEVT_QUERY_STATUS status; + if(lqs->rq.direction == FACETS_ANCHOR_DIRECTION_FORWARD) + status = wevt_query_forward(log, wb, facets, src, lqs); + else + status = wevt_query_backward(log, wb, facets, src, lqs); + + return status; +} + +static bool source_is_mine(LOGS_QUERY_SOURCE *src, LOGS_QUERY_STATUS *lqs) { + if( + // no source is requested + (lqs->rq.source_type == WEVTS_NONE && !lqs->rq.sources) || + + // matches our internal source types + (src->source_type & lqs->rq.source_type) || + + // matches the source name + (lqs->rq.sources && src->source && simple_pattern_matches(lqs->rq.sources, string2str(src->source))) || + + // matches the provider (providers start with a special prefix to avoid mix and match) + (lqs->rq.sources && src->provider && simple_pattern_matches(lqs->rq.sources, string2str(src->provider))) + + ) { + + if(!src->msg_last_ut) + // the file is not scanned yet, or the timestamps have not been updated, + // so we don't know if it can contribute or not - let's add it. + return true; + + usec_t anchor_delta = ANCHOR_DELTA_UT; + usec_t first_ut = src->msg_first_ut - anchor_delta; + usec_t last_ut = src->msg_last_ut + anchor_delta; + + if(last_ut >= lqs->rq.after_ut && first_ut <= lqs->rq.before_ut) + return true; + } + + return false; +} + +static int wevt_master_query(BUFFER *wb __maybe_unused, LOGS_QUERY_STATUS *lqs __maybe_unused) { + // make sure the sources list is updated + wevt_sources_scan(); + + lqs->c.query = wevt_generate_query_no_xpath(lqs, wb); + if(!lqs->c.query) + return rrd_call_function_error(wb, "failed to generate query", HTTP_RESP_INTERNAL_SERVER_ERROR); + + FACETS *facets = lqs->facets; + + WEVT_QUERY_STATUS status = WEVT_NO_CHANNEL_MATCHED; + + lqs->c.files_matched = 0; + lqs->c.file_working = 0; + lqs->c.rows_useful = 0; + lqs->c.rows_read = 0; + lqs->c.bytes_read = 0; + + size_t files_used = 0; + size_t files_max = dictionary_entries(wevt_sources); + const DICTIONARY_ITEM *file_items[files_max]; + + // count the files + bool files_are_newer = false; + LOGS_QUERY_SOURCE *src; + dfe_start_read(wevt_sources, src) { + if(!source_is_mine(src, lqs)) + continue; + + file_items[files_used++] = dictionary_acquired_item_dup(wevt_sources, src_dfe.item); + + if(src->msg_last_ut > lqs->rq.if_modified_since) + files_are_newer = true; + + lqs->c.progress.entries.total += src->entries; + } + dfe_done(jf); + + lqs->c.files_matched = files_used; + + if(lqs->rq.if_modified_since && !files_are_newer) { + // release the files + for(size_t f = 0; f < files_used ;f++) + dictionary_acquired_item_release(wevt_sources, file_items[f]); + + return rrd_call_function_error(wb, "not modified", HTTP_RESP_NOT_MODIFIED); + } + + // sort the files, so that they are optimal for facets + if(files_used >= 2) { + if (lqs->rq.direction == FACETS_ANCHOR_DIRECTION_BACKWARD) + qsort(file_items, files_used, sizeof(const DICTIONARY_ITEM *), + wevt_sources_dict_items_backward_compar); + else + qsort(file_items, files_used, sizeof(const DICTIONARY_ITEM *), + wevt_sources_dict_items_forward_compar); + } + + bool partial = false; + usec_t query_started_ut = now_monotonic_usec(); + usec_t started_ut = query_started_ut; + usec_t ended_ut = started_ut; + usec_t duration_ut, max_duration_ut = 0; + + WEVT_LOG *log = wevt_openlog6(query_has_fts(lqs) ? WEVT_QUERY_FTS : WEVT_QUERY_NORMAL); + if(!log) { + // release the files + for(size_t f = 0; f < files_used ;f++) + dictionary_acquired_item_release(wevt_sources, file_items[f]); + + netdata_log_error("WINDOWS EVENTS: cannot open windows event log"); + return rrd_call_function_error(wb, "cannot open windows events log", HTTP_RESP_INTERNAL_SERVER_ERROR); + } + + // sampling_query_init(lqs, facets); + + buffer_json_member_add_array(wb, "_channels"); + for(size_t f = 0; f < files_used ;f++) { + const char *fullname = dictionary_acquired_item_name(file_items[f]); + src = dictionary_acquired_item_value(file_items[f]); + + if(!source_is_mine(src, lqs)) + continue; + + started_ut = ended_ut; + + // do not even try to do the query if we expect it to pass the timeout + if(ended_ut + max_duration_ut * 3 >= *lqs->stop_monotonic_ut) { + partial = true; + status = WEVT_TIMED_OUT; + break; + } + + lqs->c.file_working++; + + size_t rows_useful = lqs->c.rows_useful; + size_t rows_read = lqs->c.rows_read; + size_t bytes_read = lqs->c.bytes_read; + size_t matches_setup_ut = lqs->c.matches_setup_ut; + + // sampling_file_init(lqs, src); + + lqs->c.progress.entries.current_query_total = src->entries; + WEVT_QUERY_STATUS tmp_status = wevt_query_one_channel(log, wb, facets, src, lqs); + + rows_useful = lqs->c.rows_useful - rows_useful; + rows_read = lqs->c.rows_read - rows_read; + bytes_read = lqs->c.bytes_read - bytes_read; + matches_setup_ut = lqs->c.matches_setup_ut - matches_setup_ut; + + ended_ut = now_monotonic_usec(); + duration_ut = ended_ut - started_ut; + + if(duration_ut > max_duration_ut) + max_duration_ut = duration_ut; + + buffer_json_add_array_item_object(wb); // channel source + { + // information about the file + buffer_json_member_add_string(wb, "_name", fullname); + buffer_json_member_add_uint64(wb, "_source_type", src->source_type); + buffer_json_member_add_string(wb, "_source", string2str(src->source)); + buffer_json_member_add_uint64(wb, "_msg_first_ut", src->msg_first_ut); + buffer_json_member_add_uint64(wb, "_msg_last_ut", src->msg_last_ut); + + // information about the current use of the file + buffer_json_member_add_uint64(wb, "duration_ut", ended_ut - started_ut); + buffer_json_member_add_uint64(wb, "rows_read", rows_read); + buffer_json_member_add_uint64(wb, "rows_useful", rows_useful); + buffer_json_member_add_double(wb, "rows_per_second", (double) rows_read / (double) duration_ut * (double) USEC_PER_SEC); + buffer_json_member_add_uint64(wb, "bytes_read", bytes_read); + buffer_json_member_add_double(wb, "bytes_per_second", (double) bytes_read / (double) duration_ut * (double) USEC_PER_SEC); + buffer_json_member_add_uint64(wb, "duration_matches_ut", matches_setup_ut); + + // if(lqs->rq.sampling) { + // buffer_json_member_add_object(wb, "_sampling"); + // { + // buffer_json_member_add_uint64(wb, "sampled", lqs->c.samples_per_file.sampled); + // buffer_json_member_add_uint64(wb, "unsampled", lqs->c.samples_per_file.unsampled); + // buffer_json_member_add_uint64(wb, "estimated", lqs->c.samples_per_file.estimated); + // } + // buffer_json_object_close(wb); // _sampling + // } + } + buffer_json_object_close(wb); // channel source + + bool stop = false; + switch(tmp_status) { + case WEVT_OK: + case WEVT_NO_CHANNEL_MATCHED: + status = (status == WEVT_OK) ? WEVT_OK : tmp_status; + break; + + case WEVT_FAILED_TO_OPEN: + case WEVT_FAILED_TO_SEEK: + partial = true; + if(status == WEVT_NO_CHANNEL_MATCHED) + status = tmp_status; + break; + + case WEVT_CANCELLED: + case WEVT_TIMED_OUT: + partial = true; + stop = true; + status = tmp_status; + break; + + case WEVT_NOT_MODIFIED: + internal_fatal(true, "this should never be returned here"); + break; + } + + if(stop) + break; + } + buffer_json_array_close(wb); // _channels + + // release the files + for(size_t f = 0; f < files_used ;f++) + dictionary_acquired_item_release(wevt_sources, file_items[f]); + + switch (status) { + case WEVT_OK: + if(lqs->rq.if_modified_since && !lqs->c.rows_useful) + return rrd_call_function_error(wb, "no useful logs, not modified", HTTP_RESP_NOT_MODIFIED); + break; + + case WEVT_TIMED_OUT: + case WEVT_NO_CHANNEL_MATCHED: + break; + + case WEVT_CANCELLED: + return rrd_call_function_error(wb, "client closed connection", HTTP_RESP_CLIENT_CLOSED_REQUEST); + + case WEVT_NOT_MODIFIED: + return rrd_call_function_error(wb, "not modified", HTTP_RESP_NOT_MODIFIED); + + case WEVT_FAILED_TO_OPEN: + return rrd_call_function_error(wb, "failed to open event log", HTTP_RESP_INTERNAL_SERVER_ERROR); + + case WEVT_FAILED_TO_SEEK: + return rrd_call_function_error(wb, "failed to execute event log query", HTTP_RESP_INTERNAL_SERVER_ERROR); + + default: + return rrd_call_function_error(wb, "unknown status", HTTP_RESP_INTERNAL_SERVER_ERROR); + } + + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_boolean(wb, "partial", partial); + buffer_json_member_add_string(wb, "type", "table"); + + // build a message for the query + if(!lqs->rq.data_only) { + CLEAN_BUFFER *msg = buffer_create(0, NULL); + CLEAN_BUFFER *msg_description = buffer_create(0, NULL); + ND_LOG_FIELD_PRIORITY msg_priority = NDLP_INFO; + + // if(!journal_files_completed_once()) { + // buffer_strcat(msg, "Journals are still being scanned. "); + // buffer_strcat(msg_description + // , "LIBRARY SCAN: The journal files are still being scanned, you are probably viewing incomplete data. "); + // msg_priority = NDLP_WARNING; + // } + + if(partial) { + buffer_strcat(msg, "Query timed-out, incomplete data. "); + buffer_strcat(msg_description + , "QUERY TIMEOUT: The query timed out and may not include all the data of the selected window. "); + msg_priority = NDLP_WARNING; + } + + // if(lqs->c.samples.estimated || lqs->c.samples.unsampled) { + // double percent = (double) (lqs->c.samples.sampled * 100.0 / + // (lqs->c.samples.estimated + lqs->c.samples.unsampled + lqs->c.samples.sampled)); + // buffer_sprintf(msg, "%.2f%% real data", percent); + // buffer_sprintf(msg_description, "ACTUAL DATA: The filters counters reflect %0.2f%% of the data. ", percent); + // msg_priority = MIN(msg_priority, NDLP_NOTICE); + // } + // + // if(lqs->c.samples.unsampled) { + // double percent = (double) (lqs->c.samples.unsampled * 100.0 / + // (lqs->c.samples.estimated + lqs->c.samples.unsampled + lqs->c.samples.sampled)); + // buffer_sprintf(msg, ", %.2f%% unsampled", percent); + // buffer_sprintf(msg_description + // , "UNSAMPLED DATA: %0.2f%% of the events exist and have been counted, but their values have not been evaluated, so they are not included in the filters counters. " + // , percent); + // msg_priority = MIN(msg_priority, NDLP_NOTICE); + // } + // + // if(lqs->c.samples.estimated) { + // double percent = (double) (lqs->c.samples.estimated * 100.0 / + // (lqs->c.samples.estimated + lqs->c.samples.unsampled + lqs->c.samples.sampled)); + // buffer_sprintf(msg, ", %.2f%% estimated", percent); + // buffer_sprintf(msg_description + // , "ESTIMATED DATA: The query selected a large amount of data, so to avoid delaying too much, the presented data are estimated by %0.2f%%. " + // , percent); + // msg_priority = MIN(msg_priority, NDLP_NOTICE); + // } + + buffer_json_member_add_object(wb, "message"); + if(buffer_tostring(msg)) { + buffer_json_member_add_string(wb, "title", buffer_tostring(msg)); + buffer_json_member_add_string(wb, "description", buffer_tostring(msg_description)); + buffer_json_member_add_string(wb, "status", nd_log_id2priority(msg_priority)); + } + // else send an empty object if there is nothing to tell + buffer_json_object_close(wb); // message + } + + if(!lqs->rq.data_only) { + buffer_json_member_add_time_t(wb, "update_every", 1); + buffer_json_member_add_string(wb, "help", WEVT_FUNCTION_DESCRIPTION); + } + + if(!lqs->rq.data_only || lqs->rq.tail) + buffer_json_member_add_uint64(wb, "last_modified", lqs->last_modified); + + facets_sort_and_reorder_keys(facets); + facets_report(facets, wb, used_hashes_registry); + + wb->expires = now_realtime_sec() + (lqs->rq.data_only ? 3600 : 0); + buffer_json_member_add_time_t(wb, "expires", wb->expires); + + // if(lqs->rq.sampling) { + // buffer_json_member_add_object(wb, "_sampling"); + // { + // buffer_json_member_add_uint64(wb, "sampled", lqs->c.samples.sampled); + // buffer_json_member_add_uint64(wb, "unsampled", lqs->c.samples.unsampled); + // buffer_json_member_add_uint64(wb, "estimated", lqs->c.samples.estimated); + // } + // buffer_json_object_close(wb); // _sampling + // } + + wevt_closelog6(log); + + wb->content_type = CT_APPLICATION_JSON; + wb->response_code = HTTP_RESP_OK; + return wb->response_code; +} + +void function_windows_events(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled, + BUFFER *payload, HTTP_ACCESS access __maybe_unused, + const char *source __maybe_unused, void *data __maybe_unused) { + bool have_slice = LQS_DEFAULT_SLICE_MODE; + + LOGS_QUERY_STATUS tmp_fqs = { + .facets = lqs_facets_create( + LQS_DEFAULT_ITEMS_PER_QUERY, + FACETS_OPTION_ALL_KEYS_FTS | FACETS_OPTION_HASH_IDS, + WEVT_ALWAYS_VISIBLE_KEYS, + WEVT_KEYS_INCLUDED_IN_FACETS, + WEVT_KEYS_EXCLUDED_FROM_FACETS, + have_slice), + + .rq = LOGS_QUERY_REQUEST_DEFAULTS(transaction, have_slice, FACETS_ANCHOR_DIRECTION_BACKWARD), + + .cancelled = cancelled, + .stop_monotonic_ut = stop_monotonic_ut, + }; + LOGS_QUERY_STATUS *lqs = &tmp_fqs; + + CLEAN_BUFFER *wb = lqs_create_output_buffer(); + + // ------------------------------------------------------------------------ + // parse the parameters + + if(lqs_request_parse_and_validate(lqs, wb, function, payload, have_slice, WEVT_FIELD_LEVEL)) { + wevt_register_fields(lqs); + + // ------------------------------------------------------------------------ + // add versions to the response + + buffer_json_wevt_versions(wb); + + // ------------------------------------------------------------------------ + // run the request + + if (lqs->rq.info) + lqs_info_response(wb, lqs->facets); + else { + wevt_master_query(wb, lqs); + if (wb->response_code == HTTP_RESP_OK) + buffer_json_finalize(wb); + } + } + + netdata_mutex_lock(&stdout_mutex); + pluginsd_function_result_to_stdout(transaction, wb); + netdata_mutex_unlock(&stdout_mutex); + + lqs_cleanup(lqs); +} + +int main(int argc __maybe_unused, char **argv __maybe_unused) { + nd_thread_tag_set("wevt.plugin"); + nd_log_initialize_for_external_plugins("windows-events.plugin"); + + // ------------------------------------------------------------------------ + // initialization + + wevt_sources_init(); + provider_cache_init(); + cached_sid_username_init(); + field_cache_init(); + + if(!EnableWindowsPrivilege(SE_SECURITY_NAME)) + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to enable %s privilege", SE_SECURITY_NAME); + + if(!EnableWindowsPrivilege(SE_BACKUP_NAME)) + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to enable %s privilege", SE_BACKUP_NAME); + + if(!EnableWindowsPrivilege(SE_AUDIT_NAME)) + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to enable %s privilege", SE_AUDIT_NAME); + + // ------------------------------------------------------------------------ + // debug + + if(argc >= 2 && strcmp(argv[argc - 1], "debug") == 0) { + wevt_sources_scan(); + + struct { + const char *func; + } array[] = { + { "windows-events after:-8640000 before:0 last:200 source:All" }, + //{ "windows-events after:-86400 before:0 direction:backward last:200 facets:HdUoSYab5wV,Cq2r7mRUv4a,LAnVlsIQfeD,BnPLNbA5VWT,KeCITtVD5AD,HytMJ9kj82B,JM3OPW3kHn6,H106l8MXSSr,HREiMN.4Ahu,ClaDGnYSQE7,ApYltST_icg,PtkRm91M0En data_only:false slice:true source:All" }, + //{ "windows-events after:1726055370 before:1726056270 direction:backward last:200 facets:HdUoSYab5wV,Cq2r7mRUv4a,LAnVlsIQfeD,BnPLNbA5VWT,KeCITtVD5AD,HytMJ9kj82B,LT.Xp9I9tiP,No4kPTQbS.g,LQ2LQzfE8EG,PtkRm91M0En,JM3OPW3kHn6,ClaDGnYSQE7,H106l8MXSSr,HREiMN.4Ahu data_only:false source:All HytMJ9kj82B:BlC24d5JBBV,PtVoyIuX.MU,HMj1B38kHTv KeCITtVD5AD:PY1JtCeWwSe,O9kz5J37nNl,JZoJURadhDb" }, + // { "windows-events after:1725636012 before:1726240812 direction:backward last:200 facets:HdUoSYab5wV,Cq2r7mRUv4a,LAnVlsIQfeD,BnPLNbA5VWT,KeCITtVD5AD,HytMJ9kj82B,JM3OPW3kHn6,H106l8MXSSr,HREiMN.4Ahu,ClaDGnYSQE7,ApYltST_icg,PtkRm91M0En data_only:false source:All PtkRm91M0En:LDzHbP5libb" }, + //{ "windows-events after:1725650386 before:1725736786 anchor:1725652420809461 direction:forward last:200 facets:HWNGeY7tg6c,LAnVlsIQfeD,BnPLNbA5VWT,Cq2r7mRUv4a,KeCITtVD5AD,I_Amz_APBm3,HytMJ9kj82B,LT.Xp9I9tiP,No4kPTQbS.g,LQ2LQzfE8EG,PtkRm91M0En,JM3OPW3kHn6 if_modified_since:1725736649011085 data_only:true delta:true tail:true source:all Cq2r7mRUv4a:PPc9fUy.q6o No4kPTQbS.g:Dwo9PhK27v3 HytMJ9kj82B:KbbznGjt_9r LAnVlsIQfeD:OfU1t5cpjgG JM3OPW3kHn6:CS_0g5AEpy2" }, + //{ "windows-events info after:1725650420 before:1725736820" }, + //{ "windows-events after:1725650420 before:1725736820 last:200 facets:HWNGeY7tg6c,LAnVlsIQfeD,BnPLNbA5VWT,Cq2r7mRUv4a,KeCITtVD5AD,I_Amz_APBm3,HytMJ9kj82B,LT.Xp9I9tiP,No4kPTQbS.g,LQ2LQzfE8EG,PtkRm91M0En,JM3OPW3kHn6 source:all Cq2r7mRUv4a:PPc9fUy.q6o No4kPTQbS.g:Dwo9PhK27v3 HytMJ9kj82B:KbbznGjt_9r LAnVlsIQfeD:OfU1t5cpjgG JM3OPW3kHn6:CS_0g5AEpy2" }, + //{ "windows-events after:1725650430 before:1725736830 last:200 facets:HWNGeY7tg6c,LAnVlsIQfeD,BnPLNbA5VWT,Cq2r7mRUv4a,KeCITtVD5AD,I_Amz_APBm3,HytMJ9kj82B,LT.Xp9I9tiP,No4kPTQbS.g,LQ2LQzfE8EG,PtkRm91M0En,JM3OPW3kHn6 source:all Cq2r7mRUv4a:PPc9fUy.q6o No4kPTQbS.g:Dwo9PhK27v3 HytMJ9kj82B:KbbznGjt_9r LAnVlsIQfeD:OfU1t5cpjgG JM3OPW3kHn6:CS_0g5AEpy2" }, + { NULL }, + }; + + for(int i = 0; array[i].func ;i++) { + bool cancelled = false; + usec_t stop_monotonic_ut = now_monotonic_usec() + 600 * USEC_PER_SEC; + //char buf[] = "windows-events after:-86400 before:0 direction:backward last:200 data_only:false slice:true source:all"; + function_windows_events("123", (char *)array[i].func, &stop_monotonic_ut, &cancelled, NULL, HTTP_ACCESS_ALL, NULL, NULL); + } + printf("\n\nAll done!\n\n"); + fflush(stdout); + exit(1); + } + + // ------------------------------------------------------------------------ + // the event loop for functions + + struct functions_evloop_globals *wg = + functions_evloop_init(WINDOWS_EVENTS_WORKER_THREADS, "WEVT", &stdout_mutex, &plugin_should_exit); + + functions_evloop_add_function(wg, + WEVT_FUNCTION_NAME, + function_windows_events, + WINDOWS_EVENTS_DEFAULT_TIMEOUT, + NULL); + + // ------------------------------------------------------------------------ + // register functions to netdata + + netdata_mutex_lock(&stdout_mutex); + + fprintf(stdout, PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"%s\" %d \"%s\" \"logs\" "HTTP_ACCESS_FORMAT" %d\n", + WEVT_FUNCTION_NAME, WINDOWS_EVENTS_DEFAULT_TIMEOUT, WEVT_FUNCTION_DESCRIPTION, + (HTTP_ACCESS_FORMAT_CAST)(HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA), + RRDFUNCTIONS_PRIORITY_DEFAULT); + + fflush(stdout); + netdata_mutex_unlock(&stdout_mutex); + + // ------------------------------------------------------------------------ + + usec_t send_newline_ut = 0; + usec_t since_last_scan_ut = WINDOWS_EVENTS_SCAN_EVERY_USEC * 2; // something big to trigger scanning at start + usec_t since_last_providers_release_ut = 0; + const bool tty = isatty(fileno(stdout)) == 1; + + heartbeat_t hb; + heartbeat_init(&hb, USEC_PER_SEC); + while(!plugin_should_exit) { + + if(since_last_scan_ut > WINDOWS_EVENTS_SCAN_EVERY_USEC) { + wevt_sources_scan(); + since_last_scan_ut = 0; + } + + if(since_last_providers_release_ut > WINDOWS_EVENTS_RELEASE_PROVIDERS_HANDLES_EVERY_UT) { + providers_release_unused_handles(); + since_last_providers_release_ut = 0; + } + + usec_t dt_ut = heartbeat_next(&hb); + since_last_providers_release_ut += dt_ut; + since_last_scan_ut += dt_ut; + send_newline_ut += dt_ut; + + if(!tty && send_newline_ut > USEC_PER_SEC) { + send_newline_and_flush(&stdout_mutex); + send_newline_ut = 0; + } + } + + exit(0); +} diff --git a/src/collectors/windows-events.plugin/windows-events.h b/src/collectors/windows-events.plugin/windows-events.h new file mode 100644 index 000000000..34d600a98 --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events.h @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WINDOWS_EVENTS_H +#define NETDATA_WINDOWS_EVENTS_H + +#include "libnetdata/libnetdata.h" +#include "collectors/all.h" + +typedef enum { + WEVT_NO_CHANNEL_MATCHED, + WEVT_FAILED_TO_OPEN, + WEVT_FAILED_TO_SEEK, + WEVT_TIMED_OUT, + WEVT_OK, + WEVT_NOT_MODIFIED, + WEVT_CANCELLED, +} WEVT_QUERY_STATUS; + +#define WEVT_CHANNEL_CLASSIC_TRACE 0x0 +#define WEVT_CHANNEL_GLOBAL_SYSTEM 0x8 +#define WEVT_CHANNEL_GLOBAL_APPLICATION 0x9 +#define WEVT_CHANNEL_GLOBAL_SECURITY 0xa + +#define WEVT_LEVEL_NONE 0x0 +#define WEVT_LEVEL_CRITICAL 0x1 +#define WEVT_LEVEL_ERROR 0x2 +#define WEVT_LEVEL_WARNING 0x3 +#define WEVT_LEVEL_INFORMATION 0x4 +#define WEVT_LEVEL_VERBOSE 0x5 +#define WEVT_LEVEL_RESERVED_6 0x6 +#define WEVT_LEVEL_RESERVED_7 0x7 +#define WEVT_LEVEL_RESERVED_8 0x8 +#define WEVT_LEVEL_RESERVED_9 0x9 +#define WEVT_LEVEL_RESERVED_10 0xa +#define WEVT_LEVEL_RESERVED_11 0xb +#define WEVT_LEVEL_RESERVED_12 0xc +#define WEVT_LEVEL_RESERVED_13 0xd +#define WEVT_LEVEL_RESERVED_14 0xe +#define WEVT_LEVEL_RESERVED_15 0xf + +#define WEVT_OPCODE_INFO 0x0 +#define WEVT_OPCODE_START 0x1 +#define WEVT_OPCODE_STOP 0x2 +#define WEVT_OPCODE_DC_START 0x3 +#define WEVT_OPCODE_DC_STOP 0x4 +#define WEVT_OPCODE_EXTENSION 0x5 +#define WEVT_OPCODE_REPLY 0x6 +#define WEVT_OPCODE_RESUME 0x7 +#define WEVT_OPCODE_SUSPEND 0x8 +#define WEVT_OPCODE_SEND 0x9 +#define WEVT_OPCODE_RECEIVE 0xf0 +#define WEVT_OPCODE_RESERVED_241 0xf1 +#define WEVT_OPCODE_RESERVED_242 0xf2 +#define WEVT_OPCODE_RESERVED_243 0xf3 +#define WEVT_OPCODE_RESERVED_244 0xf4 +#define WEVT_OPCODE_RESERVED_245 0xf5 +#define WEVT_OPCODE_RESERVED_246 0xf6 +#define WEVT_OPCODE_RESERVED_247 0xf7 +#define WEVT_OPCODE_RESERVED_248 0xf8 +#define WEVT_OPCODE_RESERVED_249 0xf9 +#define WEVT_OPCODE_RESERVED_250 0xfa +#define WEVT_OPCODE_RESERVED_251 0xfb +#define WEVT_OPCODE_RESERVED_252 0xfc +#define WEVT_OPCODE_RESERVED_253 0xfd +#define WEVT_OPCODE_RESERVED_254 0xfe +#define WEVT_OPCODE_RESERVED_255 0xff + +#define WEVT_TASK_NONE 0x0 + +#define WEVT_KEYWORD_NONE 0x0 +#define WEVT_KEYWORD_RESPONSE_TIME 0x0001000000000000 +#define WEVT_KEYWORD_WDI_CONTEXT 0x0002000000000000 +#define WEVT_KEYWORD_WDI_DIAG 0x0004000000000000 +#define WEVT_KEYWORD_SQM 0x0008000000000000 +#define WEVT_KEYWORD_AUDIT_FAILURE 0x0010000000000000 +#define WEVT_KEYWORD_AUDIT_SUCCESS 0x0020000000000000 +#define WEVT_KEYWORD_CORRELATION_HINT 0x0040000000000000 +#define WEVT_KEYWORD_EVENTLOG_CLASSIC 0x0080000000000000 +#define WEVT_KEYWORD_RESERVED_56 0x0100000000000000 +#define WEVT_KEYWORD_RESERVED_57 0x0200000000000000 +#define WEVT_KEYWORD_RESERVED_58 0x0400000000000000 +#define WEVT_KEYWORD_RESERVED_59 0x0800000000000000 +#define WEVT_KEYWORDE_RESERVED_60 0x1000000000000000 +#define WEVT_KEYWORD_RESERVED_61 0x2000000000000000 +#define WEVT_KEYWORD_RESERVED_62 0x4000000000000000 +#define WEVT_KEYWORD_RESERVED_63 0x8000000000000000 + +#define WEVT_LEVEL_NAME_NONE "None" +#define WEVT_LEVEL_NAME_CRITICAL "Critical" +#define WEVT_LEVEL_NAME_ERROR "Error" +#define WEVT_LEVEL_NAME_WARNING "Warning" +#define WEVT_LEVEL_NAME_INFORMATION "Information" +#define WEVT_LEVEL_NAME_VERBOSE "Verbose" + +#define WEVT_OPCODE_NAME_INFO "Info" +#define WEVT_OPCODE_NAME_START "Start" +#define WEVT_OPCODE_NAME_STOP "Stop" +#define WEVT_OPCODE_NAME_DC_START "DC Start" +#define WEVT_OPCODE_NAME_DC_STOP "DC Stop" +#define WEVT_OPCODE_NAME_EXTENSION "Extension" +#define WEVT_OPCODE_NAME_REPLY "Reply" +#define WEVT_OPCODE_NAME_RESUME "Resume" +#define WEVT_OPCODE_NAME_SUSPEND "Suspend" +#define WEVT_OPCODE_NAME_SEND "Send" +#define WEVT_OPCODE_NAME_RECEIVE "Receive" + +#define WEVT_TASK_NAME_NONE "None" + +#define WEVT_KEYWORD_NAME_NONE "None" +#define WEVT_KEYWORD_NAME_RESPONSE_TIME "Response Time" +#define WEVT_KEYWORD_NAME_WDI_CONTEXT "WDI Context" +#define WEVT_KEYWORD_NAME_WDI_DIAG "WDI Diagnostics" +#define WEVT_KEYWORD_NAME_SQM "SQM (Software Quality Metrics)" +#define WEVT_KEYWORD_NAME_AUDIT_FAILURE "Audit Failure" +#define WEVT_KEYWORD_NAME_AUDIT_SUCCESS "Audit Success" +#define WEVT_KEYWORD_NAME_CORRELATION_HINT "Correlation Hint" +#define WEVT_KEYWORD_NAME_EVENTLOG_CLASSIC "Event Log Classic" + +#define WEVT_PREFIX_LEVEL "Level " // the space at the end is needed +#define WEVT_PREFIX_KEYWORDS "Keywords " // the space at the end is needed +#define WEVT_PREFIX_OPCODE "Opcode " // the space at the end is needed +#define WEVT_PREFIX_TASK "Task " // the space at the end is needed + +#include "windows-events-sources.h" +#include "windows-events-unicode.h" +#include "windows-events-xml.h" +#include "windows-events-providers.h" +#include "windows-events-fields-cache.h" +#include "windows-events-query.h" + +// enable or disable preloading on full-text-search +#define ON_FTS_PRELOAD_MESSAGE 1 +#define ON_FTS_PRELOAD_XML 0 +#define ON_FTS_PRELOAD_EVENT_DATA 1 + +#define WEVT_FUNCTION_DESCRIPTION "View, search and analyze the Microsoft Windows Events log." +#define WEVT_FUNCTION_NAME "windows-events" + +#define WINDOWS_EVENTS_WORKER_THREADS 5 +#define WINDOWS_EVENTS_DEFAULT_TIMEOUT 600 +#define WINDOWS_EVENTS_SCAN_EVERY_USEC (5 * 60 * USEC_PER_SEC) +#define WINDOWS_EVENTS_PROGRESS_EVERY_UT (250 * USEC_PER_MS) +#define FUNCTION_PROGRESS_EVERY_ROWS (2000) +#define FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS (1000) +#define ANCHOR_DELTA_UT (10 * USEC_PER_SEC) + +// run providers release every 5 mins +#define WINDOWS_EVENTS_RELEASE_PROVIDERS_HANDLES_EVERY_UT (5 * 60 * USEC_PER_SEC) +// release idle handles that are older than 5 mins +#define WINDOWS_EVENTS_RELEASE_IDLE_PROVIDER_HANDLES_TIME_UT (5 * 60 * USEC_PER_SEC) + +#define WEVT_FIELD_COMPUTER "Computer" +#define WEVT_FIELD_CHANNEL "Channel" +#define WEVT_FIELD_PROVIDER "Provider" +#define WEVT_FIELD_PROVIDER_GUID "ProviderGUID" +#define WEVT_FIELD_EVENTRECORDID "EventRecordID" +#define WEVT_FIELD_VERSION "Version" +#define WEVT_FIELD_QUALIFIERS "Qualifiers" +#define WEVT_FIELD_EVENTID "EventID" +#define WEVT_FIELD_LEVEL "Level" +#define WEVT_FIELD_KEYWORDS "Keywords" +#define WEVT_FIELD_OPCODE "Opcode" +#define WEVT_FIELD_ACCOUNT "UserAccount" +#define WEVT_FIELD_DOMAIN "UserDomain" +#define WEVT_FIELD_SID "UserSID" +#define WEVT_FIELD_TASK "Task" +#define WEVT_FIELD_PROCESSID "ProcessID" +#define WEVT_FIELD_THREADID "ThreadID" +#define WEVT_FIELD_ACTIVITY_ID "ActivityID" +#define WEVT_FIELD_RELATED_ACTIVITY_ID "RelatedActivityID" +#define WEVT_FIELD_XML "XML" +#define WEVT_FIELD_MESSAGE "Message" +#define WEVT_FIELD_EVENTS_API "EventsAPI" +#define WEVT_FIELD_EVENT_DATA_HIDDEN "__HIDDEN__EVENT__DATA__" +#define WEVT_FIELD_EVENT_MESSAGE_HIDDEN "__HIDDEN__MESSAGE__DATA__" +#define WEVT_FIELD_EVENT_XML_HIDDEN "__HIDDEN__XML__DATA__" + +// functions needed by LQS + +// structures needed by LQS +struct lqs_extension { + wchar_t *query; + + struct { + struct { + size_t completed; + size_t total; + } queries; + + struct { + size_t current_query_total; + size_t completed; + size_t total; + } entries; + + usec_t last_ut; + } progress; + + // struct { + // usec_t start_ut; + // usec_t stop_ut; + // usec_t first_msg_ut; + // + // uint64_t first_msg_seqnum; + // } query_file; + + // struct { + // uint32_t enable_after_samples; + // uint32_t slots; + // uint32_t sampled; + // uint32_t unsampled; + // uint32_t estimated; + // } samples; + + // struct { + // uint32_t enable_after_samples; + // uint32_t every; + // uint32_t skipped; + // uint32_t recalibrate; + // uint32_t sampled; + // uint32_t unsampled; + // uint32_t estimated; + // } samples_per_file; + + // struct { + // usec_t start_ut; + // usec_t end_ut; + // usec_t step_ut; + // uint32_t enable_after_samples; + // uint32_t sampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS]; + // uint32_t unsampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS]; + // } samples_per_time_slot; + + // per file progress info + // size_t cached_count; + + // progress statistics + usec_t matches_setup_ut; + size_t rows_useful; + size_t rows_read; + size_t bytes_read; + size_t files_matched; + size_t file_working; +}; + +// prepare LQS +#define LQS_DEFAULT_SLICE_MODE 0 +#define LQS_FUNCTION_NAME WEVT_FUNCTION_NAME +#define LQS_FUNCTION_DESCRIPTION WEVT_FUNCTION_DESCRIPTION +#define LQS_DEFAULT_ITEMS_PER_QUERY 200 +#define LQS_DEFAULT_ITEMS_SAMPLING 1000000 +#define LQS_SOURCE_TYPE WEVT_SOURCE_TYPE +#define LQS_SOURCE_TYPE_ALL WEVTS_ALL +#define LQS_SOURCE_TYPE_NONE WEVTS_NONE +#define LQS_PARAMETER_SOURCE_NAME "Event Channels" // this is how it is shown to users +#define LQS_FUNCTION_GET_INTERNAL_SOURCE_TYPE(value) WEVT_SOURCE_TYPE_2id_one(value) +#define LQS_FUNCTION_SOURCE_TO_JSON_ARRAY(wb) wevt_sources_to_json_array(wb) +#include "libnetdata/facets/logs_query_status.h" + +#include "windows-events-query-builder.h" // needs the LQS definition, so it has to be last + +#endif //NETDATA_WINDOWS_EVENTS_H diff --git a/src/collectors/windows.plugin/GetSystemUptime.c b/src/collectors/windows.plugin/GetSystemUptime.c index 9ed939ca0..59bf9d855 100644 --- a/src/collectors/windows.plugin/GetSystemUptime.c +++ b/src/collectors/windows.plugin/GetSystemUptime.c @@ -1,34 +1,34 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "windows_plugin.h" -#include "windows-internals.h" - -int do_GetSystemUptime(int update_every, usec_t dt __maybe_unused) { - ULONGLONG uptime = GetTickCount64(); // in milliseconds - - static RRDSET *st = NULL; - static RRDDIM *rd_uptime = NULL; - if (!st) { - st = rrdset_create_localhost( - "system" - , "uptime" - , NULL - , "uptime" - , "system.uptime" - , "System Uptime" - , "seconds" - , PLUGIN_WINDOWS_NAME - , "GetSystemUptime" - , NETDATA_CHART_PRIO_SYSTEM_UPTIME - , update_every - , RRDSET_TYPE_LINE - ); - - rd_uptime = rrddim_add(st, "uptime", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } - - rrddim_set_by_pointer(st, rd_uptime, (collected_number)uptime); - rrdset_done(st); - - return 0; -} +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +int do_GetSystemUptime(int update_every, usec_t dt __maybe_unused) { + ULONGLONG uptime = GetTickCount64(); // in milliseconds + + static RRDSET *st = NULL; + static RRDDIM *rd_uptime = NULL; + if (!st) { + st = rrdset_create_localhost( + "system" + , "uptime" + , NULL + , "uptime" + , "system.uptime" + , "System Uptime" + , "seconds" + , PLUGIN_WINDOWS_NAME + , "GetSystemUptime" + , NETDATA_CHART_PRIO_SYSTEM_UPTIME + , update_every + , RRDSET_TYPE_LINE + ); + + rd_uptime = rrddim_add(st, "uptime", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(st, rd_uptime, (collected_number)uptime); + rrdset_done(st); + + return 0; +} diff --git a/src/collectors/windows.plugin/integrations/memory_statistics.md b/src/collectors/windows.plugin/integrations/memory_statistics.md new file mode 100644 index 000000000..2f67580a6 --- /dev/null +++ b/src/collectors/windows.plugin/integrations/memory_statistics.md @@ -0,0 +1,123 @@ + + +# Memory statistics + + + + + +Plugin: windows.plugin +Module: PerflibMemory + + + +## Overview + +This collector monitors swap and memory pool statistics on Windows systems. + + +It queries for the 'Memory' object from Perflib in order to gather the metrics. + + +This collector is only supported on the following platforms: + +- windows + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +The collector automatically detects all of the metrics, no further configuration is required. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Memory statistics instance + +These metrics refer to the entire monitored instance + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mem.swap_iops | read, write | operations/s | +| mem.swap_pages_io | read, write | pages/s | +| mem.system_pool_size | paged, pool-paged | bytes | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:windows]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| PerflibMemory | An option to enable or disable the data collection. | yes | no | + +#### Examples +There are no configuration examples. + + diff --git a/src/collectors/windows.plugin/integrations/system_statistics.md b/src/collectors/windows.plugin/integrations/system_statistics.md new file mode 100644 index 000000000..6df183a7a --- /dev/null +++ b/src/collectors/windows.plugin/integrations/system_statistics.md @@ -0,0 +1,123 @@ + + +# System statistics + + + + + +Plugin: windows.plugin +Module: PerflibProcesses + + + +## Overview + +This collector monitors the current number of processes, threads, and context switches on Windows systems. + + +It queries the 'System' object from Perflib in order to gather the metrics. + + +This collector is only supported on the following platforms: + +- windows + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +The collector automatically detects all of the metrics, no further configuration is required. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per System statistics instance + +These metrics refer to the entire monitored instance. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.processes | running | processes | +| system.threads | threads | threads | +| system.ctxt | switches | context switches/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:windows]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| PerflibProcesses | An option to enable or disable the data collection. | yes | no | + +#### Examples +There are no configuration examples. + + diff --git a/src/collectors/windows.plugin/integrations/system_thermal_zone.md b/src/collectors/windows.plugin/integrations/system_thermal_zone.md new file mode 100644 index 000000000..6a740b8a0 --- /dev/null +++ b/src/collectors/windows.plugin/integrations/system_thermal_zone.md @@ -0,0 +1,121 @@ + + +# System thermal zone + + + + + +Plugin: windows.plugin +Module: PerflibThermalZone + + + +## Overview + +This collector monitors thermal zone statistics on Windows systems. + + +It queries for the 'Thermal Zone Information' object from Perflib in order to gather the metrics. + + +This collector is only supported on the following platforms: + +- windows + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +The collector automatically detects all of the metrics, no further configuration is required. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Thermal zone + +These metrics refer to a Thermal zone + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| system.thermalzone_temperature | temperature | celsius | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `netdata.conf`. +Configuration for this specific integration is located in the `[plugin:windows]` section within that file. + +The file format is a modified INI syntax. The general structure is: + +```ini +[section1] + option1 = some value + option2 = some other value + +[section2] + option3 = some third value +``` +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config netdata.conf +``` +#### Options + + + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| PerflibThermalZone | An option to enable or disable the data collection. | yes | no | + +#### Examples +There are no configuration examples. + + diff --git a/src/collectors/windows.plugin/metadata.yaml b/src/collectors/windows.plugin/metadata.yaml new file mode 100644 index 000000000..52694e03d --- /dev/null +++ b/src/collectors/windows.plugin/metadata.yaml @@ -0,0 +1,276 @@ +plugin_name: windows.plugin +modules: + - meta: + plugin_name: windows.plugin + module_name: PerflibProcesses + monitored_instance: + name: System statistics + link: "https://learn.microsoft.com/en-us/windows/win32/procthread/processes-and-threads" + categories: + - data-collection.windows-systems + icon_filename: "windows.svg" + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - process counts + - threads + - context switch + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors the current number of processes, threads, and context switches on Windows systems. + method_description: | + It queries the 'System' object from Perflib in order to gather the metrics. + supported_platforms: + include: ["windows"] + exclude: [] + multi_instance: false + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + The collector automatically detects all of the metrics, no further configuration is required. + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: "netdata.conf" + section_name: "[plugin:windows]" + description: "The Netdata main configuration file" + options: + description: "" + folding: + title: "Config option" + enabled: false + list: + - name: PerflibProcesses + description: An option to enable or disable the data collection. + default_value: yes + required: false + examples: + folding: + enabled: true + title: "" + list: [] + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: "These metrics refer to the entire monitored instance." + labels: [] + metrics: + - name: system.processes + description: System Processes + unit: "processes" + chart_type: line + dimensions: + - name: running + - name: system.threads + description: System Threads + unit: "threads" + chart_type: line + dimensions: + - name: threads + - name: system.ctxt + description: CPU Context Switches + unit: "context switches/s" + chart_type: line + dimensions: + - name: switches + - meta: + plugin_name: windows.plugin + module_name: PerflibMemory + monitored_instance: + name: Memory statistics + link: "https://learn.microsoft.com/en-us/windows/win32/Memory/memory-management" + categories: + - data-collection.windows-systems + icon_filename: "windows.svg" + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - memory + - swap + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors swap and memory pool statistics on Windows systems. + method_description: | + It queries for the 'Memory' object from Perflib in order to gather the metrics. + supported_platforms: + include: ["windows"] + exclude: [] + multi_instance: false + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + The collector automatically detects all of the metrics, no further configuration is required. + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: "netdata.conf" + section_name: "[plugin:windows]" + description: "The Netdata main configuration file" + options: + description: "" + folding: + title: "Config option" + enabled: false + list: + - name: PerflibMemory + description: An option to enable or disable the data collection. + default_value: yes + required: false + examples: + folding: + enabled: true + title: "" + list: [] + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: "These metrics refer to the entire monitored instance" + labels: [] + metrics: + - name: mem.swap_iops + description: Swap Operations + unit: "operations/s" + chart_type: stacked + dimensions: + - name: read + - name: write + - name: mem.swap_pages_io + description: Swap Pages + unit: "pages/s" + chart_type: stacked + dimensions: + - name: read + - name: write + - name: mem.system_pool_size + description: System Memory Pool + unit: "bytes" + chart_type: stacked + dimensions: + - name: paged + - name: pool-paged + - meta: + plugin_name: windows.plugin + module_name: PerflibThermalZone + monitored_instance: + name: System thermal zone + link: "https://learn.microsoft.com/en-us/windows-hardware/design/device-experiences/design-guide" + categories: + - data-collection.windows-systems + icon_filename: "windows.svg" + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - thermal + - temperature + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors thermal zone statistics on Windows systems. + method_description: | + It queries for the 'Thermal Zone Information' object from Perflib in order to gather the metrics. + supported_platforms: + include: ["windows"] + exclude: [] + multi_instance: false + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + The collector automatically detects all of the metrics, no further configuration is required. + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + section_name: "[plugin:windows]" + name: "netdata.conf" + description: "The Netdata main configuration file." + options: + description: "" + folding: + title: "Config Option" + enabled: false + list: + - name: PerflibThermalZone + description: An option to enable or disable the data collection. + default_value: yes + required: false + examples: + folding: + enabled: false + title: "" + list: [] + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: Thermal zone + description: "These metrics refer to a Thermal zone" + labels: [] + metrics: + - name: system.thermalzone_temperature + description: Thermal zone temperature + unit: celsius + chart_type: line + dimensions: + - name: temperature diff --git a/src/collectors/windows.plugin/metdata.yaml b/src/collectors/windows.plugin/metdata.yaml deleted file mode 100644 index 090a48db5..000000000 --- a/src/collectors/windows.plugin/metdata.yaml +++ /dev/null @@ -1,92 +0,0 @@ -plugin_name: windows.plugin -modules: - - meta: - plugin_name: proc.plugin - module_name: PerflibProcesses - monitored_instance: - name: System statistics - link: "" - categories: - - data-collection.windows-systems - icon_filename: "windows.svg" - related_resources: - integrations: - list: [ ] - info_provided_to_referring_integrations: - description: "" - keywords: - - process counts - - threads - most_popular: false - overview: - data_collection: - metrics_description: | - Perflib provides different statistical methods about Microsoft Windows environment. This collector query for - Object 'System' to show actual number of processes, threads and context switches. - method_description: "" - supported_platforms: - include: [ "windows" ] - exclude: [ ] - multi_instance: false - additional_permissions: - description: "" - default_behavior: - auto_detection: - description: | - The collector auto-detects all metrics. No configuration is needed. - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: [ ] - configuration: - file: - section_name: "" - name: "" - description: "" - options: - description: "" - folding: - title: "" - enabled: true - list: [ ] - examples: - folding: - enabled: true - title: "" - list: [ ] - troubleshooting: - problems: - list: [ ] - alerts: - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [ ] - scopes: - - name: global - description: "" - labels: [ ] - metrics: - - name: system.processes - description: System Processes - unit: "processes" - chart_type: line - dimensions: - - name: running - - name: system.threads - description: System Threads - unit: "threads" - chart_type: line - dimensions: - - name: threads - - name: system.ctxt - description: CPU Context Switches - unit: "context switches/s" - chart_type: line - dimensions: - - name: switches \ No newline at end of file diff --git a/src/collectors/windows.plugin/perflib-dump.c b/src/collectors/windows.plugin/perflib-dump.c deleted file mode 100644 index e01813a49..000000000 --- a/src/collectors/windows.plugin/perflib-dump.c +++ /dev/null @@ -1,529 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "perflib.h" -#include "windows-internals.h" - -static const char *getCounterType(DWORD CounterType) { - switch (CounterType) { - case PERF_COUNTER_COUNTER: - return "PERF_COUNTER_COUNTER"; - - case PERF_COUNTER_TIMER: - return "PERF_COUNTER_TIMER"; - - case PERF_COUNTER_QUEUELEN_TYPE: - return "PERF_COUNTER_QUEUELEN_TYPE"; - - case PERF_COUNTER_LARGE_QUEUELEN_TYPE: - return "PERF_COUNTER_LARGE_QUEUELEN_TYPE"; - - case PERF_COUNTER_100NS_QUEUELEN_TYPE: - return "PERF_COUNTER_100NS_QUEUELEN_TYPE"; - - case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: - return "PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE"; - - case PERF_COUNTER_BULK_COUNT: - return "PERF_COUNTER_BULK_COUNT"; - - case PERF_COUNTER_TEXT: - return "PERF_COUNTER_TEXT"; - - case PERF_COUNTER_RAWCOUNT: - return "PERF_COUNTER_RAWCOUNT"; - - case PERF_COUNTER_LARGE_RAWCOUNT: - return "PERF_COUNTER_LARGE_RAWCOUNT"; - - case PERF_COUNTER_RAWCOUNT_HEX: - return "PERF_COUNTER_RAWCOUNT_HEX"; - - case PERF_COUNTER_LARGE_RAWCOUNT_HEX: - return "PERF_COUNTER_LARGE_RAWCOUNT_HEX"; - - case PERF_SAMPLE_FRACTION: - return "PERF_SAMPLE_FRACTION"; - - case PERF_SAMPLE_COUNTER: - return "PERF_SAMPLE_COUNTER"; - - case PERF_COUNTER_NODATA: - return "PERF_COUNTER_NODATA"; - - case PERF_COUNTER_TIMER_INV: - return "PERF_COUNTER_TIMER_INV"; - - case PERF_SAMPLE_BASE: - return "PERF_SAMPLE_BASE"; - - case PERF_AVERAGE_TIMER: - return "PERF_AVERAGE_TIMER"; - - case PERF_AVERAGE_BASE: - return "PERF_AVERAGE_BASE"; - - case PERF_AVERAGE_BULK: - return "PERF_AVERAGE_BULK"; - - case PERF_OBJ_TIME_TIMER: - return "PERF_OBJ_TIME_TIMER"; - - case PERF_100NSEC_TIMER: - return "PERF_100NSEC_TIMER"; - - case PERF_100NSEC_TIMER_INV: - return "PERF_100NSEC_TIMER_INV"; - - case PERF_COUNTER_MULTI_TIMER: - return "PERF_COUNTER_MULTI_TIMER"; - - case PERF_COUNTER_MULTI_TIMER_INV: - return "PERF_COUNTER_MULTI_TIMER_INV"; - - case PERF_COUNTER_MULTI_BASE: - return "PERF_COUNTER_MULTI_BASE"; - - case PERF_100NSEC_MULTI_TIMER: - return "PERF_100NSEC_MULTI_TIMER"; - - case PERF_100NSEC_MULTI_TIMER_INV: - return "PERF_100NSEC_MULTI_TIMER_INV"; - - case PERF_RAW_FRACTION: - return "PERF_RAW_FRACTION"; - - case PERF_LARGE_RAW_FRACTION: - return "PERF_LARGE_RAW_FRACTION"; - - case PERF_RAW_BASE: - return "PERF_RAW_BASE"; - - case PERF_LARGE_RAW_BASE: - return "PERF_LARGE_RAW_BASE"; - - case PERF_ELAPSED_TIME: - return "PERF_ELAPSED_TIME"; - - case PERF_COUNTER_HISTOGRAM_TYPE: - return "PERF_COUNTER_HISTOGRAM_TYPE"; - - case PERF_COUNTER_DELTA: - return "PERF_COUNTER_DELTA"; - - case PERF_COUNTER_LARGE_DELTA: - return "PERF_COUNTER_LARGE_DELTA"; - - case PERF_PRECISION_SYSTEM_TIMER: - return "PERF_PRECISION_SYSTEM_TIMER"; - - case PERF_PRECISION_100NS_TIMER: - return "PERF_PRECISION_100NS_TIMER"; - - case PERF_PRECISION_OBJECT_TIMER: - return "PERF_PRECISION_OBJECT_TIMER"; - - default: - return "UNKNOWN_COUNTER_TYPE"; - } -} - -static const char *getCounterDescription(DWORD CounterType) { - switch (CounterType) { - case PERF_COUNTER_COUNTER: - return "32-bit Counter. Divide delta by delta time. Display suffix: \"/sec\""; - - case PERF_COUNTER_TIMER: - return "64-bit Timer. Divide delta by delta time. Display suffix: \"%\""; - - case PERF_COUNTER_QUEUELEN_TYPE: - case PERF_COUNTER_LARGE_QUEUELEN_TYPE: - return "Queue Length Space-Time Product. Divide delta by delta time. No Display Suffix"; - - case PERF_COUNTER_100NS_QUEUELEN_TYPE: - return "Queue Length Space-Time Product using 100 Ns timebase. Divide delta by delta time. No Display Suffix"; - - case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: - return "Queue Length Space-Time Product using Object specific timebase. Divide delta by delta time. No Display Suffix."; - - case PERF_COUNTER_BULK_COUNT: - return "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\""; - - case PERF_COUNTER_TEXT: - return "Unicode text Display as text."; - - case PERF_COUNTER_RAWCOUNT: - case PERF_COUNTER_LARGE_RAWCOUNT: - return "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."; - - case PERF_COUNTER_RAWCOUNT_HEX: - case PERF_COUNTER_LARGE_RAWCOUNT_HEX: - return "Special case for RAWCOUNT which should be displayed in hex. A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."; - - case PERF_SAMPLE_FRACTION: - return "A count which is either 1 or 0 on each sampling interrupt (% busy). Divide delta by delta base. Display Suffix: \"%\""; - - case PERF_SAMPLE_COUNTER: - return "A count which is sampled on each sampling interrupt (queue length). Divide delta by delta time. No Display Suffix."; - - case PERF_COUNTER_NODATA: - return "A label: no data is associated with this counter (it has 0 length). Do not display."; - - case PERF_COUNTER_TIMER_INV: - return "64-bit Timer inverse (e.g., idle is measured, but display busy %). Display 100 - delta divided by delta time. Display suffix: \"%\""; - - case PERF_SAMPLE_BASE: - return "The divisor for a sample, used with the previous counter to form a sampled %. You must check for >0 before dividing by this! This counter will directly follow the numerator counter. It should not be displayed to the user."; - - case PERF_AVERAGE_TIMER: - return "A timer which, when divided by an average base, produces a time in seconds which is the average time of some operation. This timer times total operations, and the base is the number of operations. Display Suffix: \"sec\""; - - case PERF_AVERAGE_BASE: - return "Used as the denominator in the computation of time or count averages. Must directly follow the numerator counter. Not displayed to the user."; - - case PERF_AVERAGE_BULK: - return "A bulk count which, when divided (typically) by the number of operations, gives (typically) the number of bytes per operation. No Display Suffix."; - - case PERF_OBJ_TIME_TIMER: - return "64-bit Timer in object specific units. Display delta divided by delta time as returned in the object type header structure. Display suffix: \"%\""; - - case PERF_100NSEC_TIMER: - return "64-bit Timer in 100 nsec units. Display delta divided by delta time. Display suffix: \"%\""; - - case PERF_100NSEC_TIMER_INV: - return "64-bit Timer inverse (e.g., idle is measured, but display busy %). Display 100 - delta divided by delta time. Display suffix: \"%\""; - - case PERF_COUNTER_MULTI_TIMER: - return "64-bit Timer. Divide delta by delta time. Display suffix: \"%\". Timer for multiple instances, so result can exceed 100%."; - - case PERF_COUNTER_MULTI_TIMER_INV: - return "64-bit Timer inverse (e.g., idle is measured, but display busy %). Display 100 * _MULTI_BASE - delta divided by delta time. Display suffix: \"%\" Timer for multiple instances, so result can exceed 100%. Followed by a counter of type _MULTI_BASE."; - - case PERF_COUNTER_MULTI_BASE: - return "Number of instances to which the preceding _MULTI_..._INV counter applies. Used as a factor to get the percentage."; - - case PERF_100NSEC_MULTI_TIMER: - return "64-bit Timer in 100 nsec units. Display delta divided by delta time. Display suffix: \"%\" Timer for multiple instances, so result can exceed 100%."; - - case PERF_100NSEC_MULTI_TIMER_INV: - return "64-bit Timer inverse (e.g., idle is measured, but display busy %). Display 100 * _MULTI_BASE - delta divided by delta time. Display suffix: \"%\" Timer for multiple instances, so result can exceed 100%. Followed by a counter of type _MULTI_BASE."; - - case PERF_LARGE_RAW_FRACTION: - case PERF_RAW_FRACTION: - return "Indicates the data is a fraction of the following counter which should not be time averaged on display (such as free space over total space.) Display as is. Display the quotient as \"%\""; - - case PERF_RAW_BASE: - case PERF_LARGE_RAW_BASE: - return "Indicates the data is a base for the preceding counter which should not be time averaged on display (such as free space over total space.)"; - - case PERF_ELAPSED_TIME: - return "The data collected in this counter is actually the start time of the item being measured. For display, this data is subtracted from the sample time to yield the elapsed time as the difference between the two. In the definition below, the PerfTime field of the Object contains the sample time as indicated by the PERF_OBJECT_TIMER bit and the difference is scaled by the PerfFreq of the Object to convert the time units into seconds."; - - case PERF_COUNTER_HISTOGRAM_TYPE: - return "Counter type can be used with the preceding types to define a range of values to be displayed in a histogram."; - - case PERF_COUNTER_DELTA: - case PERF_COUNTER_LARGE_DELTA: - return "This counter is used to display the difference from one sample to the next. The counter value is a constantly increasing number and the value displayed is the difference between the current value and the previous value. Negative numbers are not allowed which shouldn't be a problem as long as the counter value is increasing or unchanged."; - - case PERF_PRECISION_SYSTEM_TIMER: - return "The precision counters are timers that consist of two counter values:\r\n\t1) the count of elapsed time of the event being monitored\r\n\t2) the \"clock\" time in the same units\r\nthe precision timers are used where the standard system timers are not precise enough for accurate readings. It's assumed that the service providing the data is also providing a timestamp at the same time which will eliminate any error that may occur since some small and variable time elapses between the time the system timestamp is captured and when the data is collected from the performance DLL. Only in extreme cases has this been observed to be problematic.\r\nwhen using this type of timer, the definition of the PERF_PRECISION_TIMESTAMP counter must immediately follow the definition of the PERF_PRECISION_*_TIMER in the Object header\r\nThe timer used has the same frequency as the System Performance Timer"; - - case PERF_PRECISION_100NS_TIMER: - return "The precision counters are timers that consist of two counter values:\r\n\t1) the count of elapsed time of the event being monitored\r\n\t2) the \"clock\" time in the same units\r\nthe precision timers are used where the standard system timers are not precise enough for accurate readings. It's assumed that the service providing the data is also providing a timestamp at the same time which will eliminate any error that may occur since some small and variable time elapses between the time the system timestamp is captured and when the data is collected from the performance DLL. Only in extreme cases has this been observed to be problematic.\r\nwhen using this type of timer, the definition of the PERF_PRECISION_TIMESTAMP counter must immediately follow the definition of the PERF_PRECISION_*_TIMER in the Object header\r\nThe timer used has the same frequency as the 100 NanoSecond Timer"; - - case PERF_PRECISION_OBJECT_TIMER: - return "The precision counters are timers that consist of two counter values:\r\n\t1) the count of elapsed time of the event being monitored\r\n\t2) the \"clock\" time in the same units\r\nthe precision timers are used where the standard system timers are not precise enough for accurate readings. It's assumed that the service providing the data is also providing a timestamp at the same time which will eliminate any error that may occur since some small and variable time elapses between the time the system timestamp is captured and when the data is collected from the performance DLL. Only in extreme cases has this been observed to be problematic.\r\nwhen using this type of timer, the definition of the PERF_PRECISION_TIMESTAMP counter must immediately follow the definition of the PERF_PRECISION_*_TIMER in the Object header\r\nThe timer used is of the frequency specified in the Object header's. PerfFreq field (PerfTime is ignored)"; - - default: - return ""; - } -} - -static const char *getCounterAlgorithm(DWORD CounterType) { - switch (CounterType) - { - case PERF_COUNTER_COUNTER: - case PERF_SAMPLE_COUNTER: - case PERF_COUNTER_BULK_COUNT: - return "(data1 - data0) / ((time1 - time0) / frequency)"; - - case PERF_COUNTER_QUEUELEN_TYPE: - case PERF_COUNTER_100NS_QUEUELEN_TYPE: - case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: - case PERF_COUNTER_LARGE_QUEUELEN_TYPE: - case PERF_AVERAGE_BULK: // normally not displayed - return "(data1 - data0) / (time1 - time0)"; - - case PERF_OBJ_TIME_TIMER: - case PERF_COUNTER_TIMER: - case PERF_100NSEC_TIMER: - case PERF_PRECISION_SYSTEM_TIMER: - case PERF_PRECISION_100NS_TIMER: - case PERF_PRECISION_OBJECT_TIMER: - case PERF_SAMPLE_FRACTION: - return "100 * (data1 - data0) / (time1 - time0)"; - - case PERF_COUNTER_TIMER_INV: - return "100 * (1 - ((data1 - data0) / (time1 - time0)))"; - - case PERF_100NSEC_TIMER_INV: - return "100 * (1- (data1 - data0) / (time1 - time0))"; - - case PERF_COUNTER_MULTI_TIMER: - return "100 * ((data1 - data0) / ((time1 - time0) / frequency1)) / multi1"; - - case PERF_100NSEC_MULTI_TIMER: - return "100 * ((data1 - data0) / (time1 - time0)) / multi1"; - - case PERF_COUNTER_MULTI_TIMER_INV: - case PERF_100NSEC_MULTI_TIMER_INV: - return "100 * (multi1 - ((data1 - data0) / (time1 - time0)))"; - - case PERF_COUNTER_RAWCOUNT: - case PERF_COUNTER_LARGE_RAWCOUNT: - return "data0"; - - case PERF_COUNTER_RAWCOUNT_HEX: - case PERF_COUNTER_LARGE_RAWCOUNT_HEX: - return "hex(data0)"; - - case PERF_COUNTER_DELTA: - case PERF_COUNTER_LARGE_DELTA: - return "data1 - data0"; - - case PERF_RAW_FRACTION: - case PERF_LARGE_RAW_FRACTION: - return "100 * data0 / time0"; - - case PERF_AVERAGE_TIMER: - return "((data1 - data0) / frequency1) / (time1 - time0)"; - - case PERF_ELAPSED_TIME: - return "(time0 - data0) / frequency0"; - - case PERF_COUNTER_TEXT: - case PERF_SAMPLE_BASE: - case PERF_AVERAGE_BASE: - case PERF_COUNTER_MULTI_BASE: - case PERF_RAW_BASE: - case PERF_COUNTER_NODATA: - case PERF_PRECISION_TIMESTAMP: - default: - return ""; - } -} - -void dumpSystemTime(BUFFER *wb, SYSTEMTIME *st) { - buffer_json_member_add_uint64(wb, "Year", st->wYear); - buffer_json_member_add_uint64(wb, "Month", st->wMonth); - buffer_json_member_add_uint64(wb, "DayOfWeek", st->wDayOfWeek); - buffer_json_member_add_uint64(wb, "Day", st->wDay); - buffer_json_member_add_uint64(wb, "Hour", st->wHour); - buffer_json_member_add_uint64(wb, "Minute", st->wMinute); - buffer_json_member_add_uint64(wb, "Second", st->wSecond); - buffer_json_member_add_uint64(wb, "Milliseconds", st->wMilliseconds); -} - -bool dumpDataCb(PERF_DATA_BLOCK *pDataBlock, void *data) { - char name[4096]; - if(!getSystemName(pDataBlock, name, sizeof(name))) - strncpyz(name, "[failed]", sizeof(name) - 1); - - BUFFER *wb = data; - buffer_json_member_add_string(wb, "SystemName", name); - - // Number of types of objects being reported - // Type: DWORD - buffer_json_member_add_int64(wb, "NumObjectTypes", pDataBlock->NumObjectTypes); - - buffer_json_member_add_int64(wb, "LittleEndian", pDataBlock->LittleEndian); - - // Version and Revision of these data structures. - // Version starts at 1. - // Revision starts at 0 for each Version. - // Type: DWORD - buffer_json_member_add_int64(wb, "Version", pDataBlock->Version); - buffer_json_member_add_int64(wb, "Revision", pDataBlock->Revision); - - // Object Title Index of default object to display when data from this system is retrieved - // (-1 = none, but this is not expected to be used) - // Type: LONG - buffer_json_member_add_int64(wb, "DefaultObject", pDataBlock->DefaultObject); - - // Performance counter frequency at the system under measurement - // Type: LARGE_INTEGER - buffer_json_member_add_int64(wb, "PerfFreq", pDataBlock->PerfFreq.QuadPart); - - // Performance counter value at the system under measurement - // Type: LARGE_INTEGER - buffer_json_member_add_int64(wb, "PerfTime", pDataBlock->PerfTime.QuadPart); - - // Performance counter time in 100 nsec units at the system under measurement - // Type: LARGE_INTEGER - buffer_json_member_add_int64(wb, "PerfTime100nSec", pDataBlock->PerfTime100nSec.QuadPart); - - // Time at the system under measurement in UTC - // Type: SYSTEMTIME - buffer_json_member_add_object(wb, "SystemTime"); - dumpSystemTime(wb, &pDataBlock->SystemTime); - buffer_json_object_close(wb); - - if(pDataBlock->NumObjectTypes) - buffer_json_member_add_array(wb, "Objects"); - - return true; -} - -static const char *GetDetailLevel(DWORD num) { - switch (num) { - case 100: - return "Novice (100)"; - case 200: - return "Advanced (200)"; - case 300: - return "Expert (300)"; - case 400: - return "Wizard (400)"; - - default: - return "Unknown"; - } -} - -bool dumpObjectCb(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, void *data) { - (void)pDataBlock; - BUFFER *wb = data; - if(!pObjectType) { - buffer_json_array_close(wb); // instances or counters - buffer_json_object_close(wb); // objectType - return true; - } - - buffer_json_add_array_item_object(wb); // objectType - buffer_json_member_add_int64(wb, "NameId", pObjectType->ObjectNameTitleIndex); - buffer_json_member_add_string(wb, "Name", RegistryFindNameByID(pObjectType->ObjectNameTitleIndex)); - buffer_json_member_add_int64(wb, "HelpId", pObjectType->ObjectHelpTitleIndex); - buffer_json_member_add_string(wb, "Help", RegistryFindHelpByID(pObjectType->ObjectHelpTitleIndex)); - buffer_json_member_add_int64(wb, "NumInstances", pObjectType->NumInstances); - buffer_json_member_add_int64(wb, "NumCounters", pObjectType->NumCounters); - buffer_json_member_add_int64(wb, "PerfTime", pObjectType->PerfTime.QuadPart); - buffer_json_member_add_int64(wb, "PerfFreq", pObjectType->PerfFreq.QuadPart); - buffer_json_member_add_int64(wb, "CodePage", pObjectType->CodePage); - buffer_json_member_add_int64(wb, "DefaultCounter", pObjectType->DefaultCounter); - buffer_json_member_add_string(wb, "DetailLevel", GetDetailLevel(pObjectType->DetailLevel)); - - if(ObjectTypeHasInstances(pDataBlock, pObjectType)) - buffer_json_member_add_array(wb, "Instances"); - else - buffer_json_member_add_array(wb, "Counters"); - - return true; -} - -bool dumpInstanceCb(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, void *data) { - (void)pDataBlock; - BUFFER *wb = data; - if(!pInstance) { - buffer_json_array_close(wb); // counters - buffer_json_object_close(wb); // instance - return true; - } - - char name[4096]; - if(!getInstanceName(pDataBlock, pObjectType, pInstance, name, sizeof(name))) - strncpyz(name, "[failed]", sizeof(name) - 1); - - buffer_json_add_array_item_object(wb); - buffer_json_member_add_string(wb, "Instance", name); - buffer_json_member_add_int64(wb, "UniqueID", pInstance->UniqueID); - buffer_json_member_add_array(wb, "Labels"); - { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "key", RegistryFindNameByID(pObjectType->ObjectNameTitleIndex)); - buffer_json_member_add_string(wb, "value", name); - } - buffer_json_object_close(wb); - - if(pInstance->ParentObjectTitleIndex) { - PERF_INSTANCE_DEFINITION *pi = pInstance; - while(pi->ParentObjectTitleIndex) { - PERF_OBJECT_TYPE *po = getObjectTypeByIndex(pDataBlock, pInstance->ParentObjectTitleIndex); - pi = getInstanceByPosition(pDataBlock, po, pi->ParentObjectInstance); - - if(!getInstanceName(pDataBlock, po, pi, name, sizeof(name))) - strncpyz(name, "[failed]", sizeof(name) - 1); - - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "key", RegistryFindNameByID(po->ObjectNameTitleIndex)); - buffer_json_member_add_string(wb, "value", name); - } - buffer_json_object_close(wb); - } - } - } - buffer_json_array_close(wb); // rrdlabels - - buffer_json_member_add_array(wb, "Counters"); - return true; -} - -void dumpSample(BUFFER *wb, RAW_DATA *d) { - buffer_json_member_add_object(wb, "Value"); - buffer_json_member_add_uint64(wb, "data", d->Data); - buffer_json_member_add_int64(wb, "time", d->Time); - buffer_json_member_add_uint64(wb, "type", d->CounterType); - buffer_json_member_add_int64(wb, "multi", d->MultiCounterData); - buffer_json_member_add_int64(wb, "frequency", d->Frequency); - buffer_json_object_close(wb); -} - -bool dumpCounterCb(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_COUNTER_DEFINITION *pCounter, RAW_DATA *sample, void *data) { - (void)pDataBlock; - (void)pObjectType; - BUFFER *wb = data; - buffer_json_add_array_item_object(wb); - buffer_json_member_add_string(wb, "Counter", RegistryFindNameByID(pCounter->CounterNameTitleIndex)); - dumpSample(wb, sample); - buffer_json_member_add_string(wb, "Help", RegistryFindHelpByID(pCounter->CounterHelpTitleIndex)); - buffer_json_member_add_string(wb, "Type", getCounterType(pCounter->CounterType)); - buffer_json_member_add_string(wb, "Algorithm", getCounterAlgorithm(pCounter->CounterType)); - buffer_json_member_add_string(wb, "Description", getCounterDescription(pCounter->CounterType)); - buffer_json_object_close(wb); - return true; -} - -bool dumpInstanceCounterCb(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, PERF_COUNTER_DEFINITION *pCounter, RAW_DATA *sample, void *data) { - (void)pInstance; - return dumpCounterCb(pDataBlock, pObjectType, pCounter, sample, data); -} - - -int windows_perflib_dump(const char *key) { - if(key && !*key) - key = NULL; - - PerflibNamesRegistryInitialize(); - - DWORD id = 0; - if(key) { - id = RegistryFindIDByName(key); - if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) { - fprintf(stderr, "Cannot find key '%s' in Windows Performance Counters Registry.\n", key); - exit(1); - } - } - - CLEAN_BUFFER *wb = buffer_create(0, NULL); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); - - perflibQueryAndTraverse(id, dumpDataCb, dumpObjectCb, dumpInstanceCb, dumpInstanceCounterCb, dumpCounterCb, wb); - - buffer_json_finalize(wb); - printf("\n%s\n", buffer_tostring(wb)); - - perflibFreePerformanceData(); - - return 0; -} diff --git a/src/collectors/windows.plugin/perflib-hyperv.c b/src/collectors/windows.plugin/perflib-hyperv.c new file mode 100644 index 000000000..523361995 --- /dev/null +++ b/src/collectors/windows.plugin/perflib-hyperv.c @@ -0,0 +1,1793 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +#define _COMMON_PLUGIN_NAME "windows.plugin" +#define _COMMON_PLUGIN_MODULE_NAME "PerflibHyperV" +#include "../common-contexts/common-contexts.h" + +#define HYPERV "hyperv" + +static void get_and_sanitize_instance_value( + PERF_DATA_BLOCK *pDataBlock, + PERF_OBJECT_TYPE *pObjectType, + PERF_INSTANCE_DEFINITION *pi, + char *buffer, + size_t buffer_size) +{ + // char wstr[8192]; + if (!getInstanceName(pDataBlock, pObjectType, pi, buffer, buffer_size)) { + strncpyz(buffer, "[unknown]", buffer_size - 1); + // return; + } + // rrdlabels_sanitize_value(buffer, wstr, buffer_size); +} + +#define DICT_PERF_OPTION (DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE) + +#define DEFINE_RD(counter_name) RRDDIM *rd_##counter_name + +#define GET_INSTANCE_COUNTER(counter) \ + do { \ + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->counter); \ + } while (0) + +#define GET_OBJECT_COUNTER(counter) \ + do { \ + perflibGetObjectCounter(pDataBlock, pObjectType, &p->counter); \ + } while (0) + +#define SETP_DIM_VALUE(st, field) \ + do { \ + rrddim_set_by_pointer(p->st, p->rd_##field, (collected_number)p->field.current.Data); \ + } while (0) + +typedef bool (*perf_func_collect)(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data); + +typedef struct { + const char *registry_name; + perf_func_collect function_collect; + dict_cb_insert_t dict_insert_cb; + size_t dict_size; + DICTIONARY *instance; +} hyperv_perf_item; + +struct hypervisor_memory { + bool collected_metadata; + bool charts_created; + + RRDSET *st_pressure; + RRDSET *st_vm_memory_physical; + RRDSET *st_vm_memory_physical_guest_visible; + + DEFINE_RD(CurrentPressure); + DEFINE_RD(PhysicalMemory); + DEFINE_RD(GuestVisiblePhysicalMemory); + DEFINE_RD(GuestAvailableMemory); + + COUNTER_DATA CurrentPressure; + COUNTER_DATA PhysicalMemory; + COUNTER_DATA GuestVisiblePhysicalMemory; + COUNTER_DATA GuestAvailableMemory; +}; + +void initialize_hyperv_memory_keys(struct hypervisor_memory *p) { + p->CurrentPressure.key = "Current Pressure"; + p->PhysicalMemory.key = "Physical Memory"; + p->GuestVisiblePhysicalMemory.key = "Guest Visible Physical Memory"; + p->GuestAvailableMemory.key = "Guest Available Memory"; +} + + +void dict_hyperv_memory_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct hypervisor_memory *p = value; + initialize_hyperv_memory_keys(p); +} + +struct hypervisor_partition { + bool collected_metadata; + bool charts_created; + + RRDSET *st_vm_vid_physical_pages_allocated; + RRDSET *st_vm_vid_remote_physical_pages; + + DEFINE_RD(PhysicalPagesAllocated); + DEFINE_RD(RemotePhysicalPages); + + COUNTER_DATA PhysicalPagesAllocated; + COUNTER_DATA RemotePhysicalPages; + +}; + +void initialize_hyperv_partition_keys(struct hypervisor_partition *p) +{ + p->PhysicalPagesAllocated.key = "Physical Pages Allocated"; + p->RemotePhysicalPages.key = "Remote Physical Pages"; +} + +void dict_hyperv_partition_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct hypervisor_partition *p = value; + initialize_hyperv_partition_keys(p); +} + +static bool do_hyperv_memory(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data) +{ + hyperv_perf_item *item = data; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name); + if (!pObjectType) + return false; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for(LONG i = 0; i < pObjectType->NumInstances ; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + get_and_sanitize_instance_value(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)); + + struct hypervisor_memory *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p)); + + if(!p->collected_metadata) { + p->collected_metadata = true; + } + + GET_INSTANCE_COUNTER(CurrentPressure); + GET_INSTANCE_COUNTER(PhysicalMemory); + GET_INSTANCE_COUNTER(GuestVisiblePhysicalMemory); + GET_INSTANCE_COUNTER(GuestAvailableMemory); + + if (!p->charts_created) { + p->charts_created = true; + if(!p->st_vm_memory_physical) { + p->st_vm_memory_physical = rrdset_create_localhost( + "vm_memory_physical", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV".vm_memory_physical", + "VM assigned memory", + "bytes", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_MEMORY_PHYSICAL, + update_every, + RRDSET_TYPE_LINE); + + p->st_vm_memory_physical_guest_visible = rrdset_create_localhost( + "vm_memory_physical_guest_visible", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV".vm_memory_physical_guest_visible", + "VM guest visible memory", + "bytes", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_MEMORY_PHYSICAL_GUEST_VISIBLE, + update_every, + RRDSET_TYPE_LINE); + + p->st_pressure = rrdset_create_localhost( + "vm_memory_pressure_current", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV".vm_memory_pressure_current", + "VM Memory Pressure", + "percentage", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_MEMORY_PRESSURE_CURRENT, + update_every, + RRDSET_TYPE_LINE); + + p->rd_CurrentPressure = rrddim_add(p->st_pressure, "pressure", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + p->rd_PhysicalMemory = rrddim_add(p->st_vm_memory_physical, "assigned", NULL, 1024 * 1024, 1, RRD_ALGORITHM_ABSOLUTE); + p->rd_GuestVisiblePhysicalMemory = rrddim_add(p->st_vm_memory_physical_guest_visible, "visible", NULL, 1024 * 1024, 1, RRD_ALGORITHM_ABSOLUTE); + p->rd_GuestAvailableMemory = rrddim_add(p->st_vm_memory_physical_guest_visible, "available", NULL, 1024 * 1024, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_vm_memory_physical->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO); + rrdlabels_add(p->st_pressure->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO); + rrdlabels_add(p->st_vm_memory_physical_guest_visible->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + } + + SETP_DIM_VALUE(st_pressure, CurrentPressure); + SETP_DIM_VALUE(st_vm_memory_physical, PhysicalMemory); + SETP_DIM_VALUE(st_vm_memory_physical_guest_visible, GuestVisiblePhysicalMemory); + SETP_DIM_VALUE(st_vm_memory_physical_guest_visible, GuestAvailableMemory); + + rrdset_done(p->st_pressure); + rrdset_done(p->st_vm_memory_physical); + rrdset_done(p->st_vm_memory_physical_guest_visible); + } + + return true; +} + +static bool do_hyperv_vid_partition(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data) +{ + hyperv_perf_item *item = data; + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name); + if (!pObjectType) + return false; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for(LONG i = 0; i < pObjectType->NumInstances ; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + get_and_sanitize_instance_value(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)); + + struct hypervisor_partition *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p)); + + if(!p->collected_metadata) { + + p->collected_metadata = true; + } + + if(strcasecmp(windows_shared_buffer, "_Total") == 0) + continue; + + GET_INSTANCE_COUNTER(RemotePhysicalPages); + GET_INSTANCE_COUNTER(PhysicalPagesAllocated); + + if (!p->charts_created) { + p->charts_created = true; + + p->st_vm_vid_physical_pages_allocated = rrdset_create_localhost( + "vm_vid_physical_pages_allocated", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vm_vid_physical_pages_allocated", + "VM physical pages allocated", + "pages", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_VID_PHYSICAL_PAGES_ALLOCATED, + update_every, + RRDSET_TYPE_LINE); + + p->st_vm_vid_remote_physical_pages = rrdset_create_localhost( + "vm_vid_remote_physical_pages", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vm_vid_remote_physical_pages", + "VM physical pages not allocated from the preferred NUMA node", + "pages", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_VID_REMOTE_PHYSICAL_PAGES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_PhysicalPagesAllocated = rrddim_add(p->st_vm_vid_physical_pages_allocated, "allocated", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + p->rd_RemotePhysicalPages = rrddim_add(p->st_vm_vid_remote_physical_pages, "remote_physical", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_vm_vid_physical_pages_allocated->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO); + rrdlabels_add(p->st_vm_vid_remote_physical_pages->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + SETP_DIM_VALUE(st_vm_vid_remote_physical_pages, RemotePhysicalPages); + SETP_DIM_VALUE(st_vm_vid_physical_pages_allocated, PhysicalPagesAllocated); + + rrdset_done(p->st_vm_vid_physical_pages_allocated); + rrdset_done(p->st_vm_vid_remote_physical_pages); + } + + return true; +} + +// Define structure for Hyper-V Virtual Machine Health Summary +static struct hypervisor_health_summary { + bool collected_metadata; + bool charts_created; + + RRDSET *st_health; + + DEFINE_RD(HealthCritical); + DEFINE_RD(HealthOk); + + COUNTER_DATA HealthCritical; + COUNTER_DATA HealthOk; +} health_summary = { + .collected_metadata = false, + .st_health = NULL, + .HealthCritical.key = "Health Critical", + .HealthOk.key = "Health Ok"}; + +// Function to handle "Hyper-V Virtual Machine Health Summary" +static bool do_hyperv_health_summary(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data) +{ + hyperv_perf_item *item = data; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name); + if (!pObjectType) + return false; + + struct hypervisor_health_summary *p = &health_summary; + + GET_OBJECT_COUNTER(HealthCritical); + GET_OBJECT_COUNTER(HealthOk); + + if (!p->charts_created) { + p->charts_created = true; + p->st_health = rrdset_create_localhost( + "vms_health", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vms_health", + "Virtual machines health status", + "vms", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VMS_HEALTH, + update_every, + RRDSET_TYPE_STACKED); + + p->rd_HealthCritical = rrddim_add(p->st_health, "critical", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + p->rd_HealthOk = rrddim_add(p->st_health, "ok", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + SETP_DIM_VALUE(st_health, HealthCritical); + SETP_DIM_VALUE(st_health, HealthOk); + + rrdset_done(p->st_health); + return true; +} + +// Define structure for Hyper-V Root Partition Metrics (Device and GPA Space Pages) +struct hypervisor_root_partition { + bool collected_metadata; + bool charts_created; + + RRDSET *st_device_space_pages; + RRDSET *st_gpa_space_pages; + RRDSET *st_gpa_space_modifications; + RRDSET *st_attached_devices; + RRDSET *st_deposited_pages; + + RRDSET *st_DeviceDMAErrors; + RRDSET *st_DeviceInterruptErrors; + RRDSET *st_DeviceInterruptThrottleEvents; + RRDSET *st_IOΤLBFlushesSec; + RRDSET *st_AddressSpaces; + RRDSET *st_VirtualTLBPages; + RRDSET *st_VirtualTLBFlushEntiresSec; + + DEFINE_RD(DeviceSpacePages4K); + DEFINE_RD(DeviceSpacePages2M); + DEFINE_RD(DeviceSpacePages1G); + DEFINE_RD(GPASpacePages4K); + DEFINE_RD(GPASpacePages2M); + DEFINE_RD(GPASpacePages1G); + DEFINE_RD(GPASpaceModifications); + + DEFINE_RD(AttachedDevices); + DEFINE_RD(DepositedPages); + + DEFINE_RD(DeviceDMAErrors); + DEFINE_RD(DeviceInterruptErrors); + DEFINE_RD(DeviceInterruptThrottleEvents); + DEFINE_RD(IOΤLBFlushesSec); + DEFINE_RD(AddressSpaces); + DEFINE_RD(VirtualTLBPages); + DEFINE_RD(VirtualTLBFlushEntiresSec); + + COUNTER_DATA DeviceSpacePages4K; + COUNTER_DATA DeviceSpacePages2M; + COUNTER_DATA DeviceSpacePages1G; + COUNTER_DATA GPASpacePages4K; + COUNTER_DATA GPASpacePages2M; + COUNTER_DATA GPASpacePages1G; + COUNTER_DATA GPASpaceModifications; + COUNTER_DATA AttachedDevices; + COUNTER_DATA DepositedPages; + COUNTER_DATA DeviceDMAErrors; + COUNTER_DATA DeviceInterruptErrors; + COUNTER_DATA DeviceInterruptThrottleEvents; + COUNTER_DATA IOΤLBFlushesSec; + COUNTER_DATA AddressSpaces; + COUNTER_DATA VirtualTLBPages; + COUNTER_DATA VirtualTLBFlushEntiresSec; +}; + +// Initialize the keys for the root partition metrics +void initialize_hyperv_root_partition_keys(struct hypervisor_root_partition *p) { + p->DeviceSpacePages4K.key = "4K device pages"; + p->DeviceSpacePages2M.key = "2M device pages"; + p->DeviceSpacePages1G.key = "1G device pages"; + + p->GPASpacePages4K.key = "4K GPA pages"; + p->GPASpacePages2M.key = "2M GPA pages"; + p->GPASpacePages1G.key = "1G GPA pages"; + + p->GPASpaceModifications.key = "GPA Space Modifications/sec"; + p->AttachedDevices.key = "Attached Devices"; + p->DepositedPages.key = "Deposited Pages"; + + p->DeviceDMAErrors.key = "Device DMA Errors"; + p->DeviceInterruptErrors.key = "Device Interrupt Errors"; + p->DeviceInterruptThrottleEvents.key = "Device Interrupt Throttle Events"; + p->IOΤLBFlushesSec.key = "I/O TLB Flushes/sec"; + p->AddressSpaces.key = "Address Spaces"; + p->VirtualTLBPages.key = "Virtual TLB Pages"; + p->VirtualTLBFlushEntiresSec.key = "Virtual TLB Flush Entires/sec"; +} + +// Callback function for inserting root partition metrics into the dictionary +void dict_hyperv_root_partition_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct hypervisor_root_partition *p = value; + initialize_hyperv_root_partition_keys(p); +} + +// Function to handle "Hyper-V Hypervisor Root Partition" metrics (Device Space and GPA Space) +static bool do_hyperv_root_partition(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data) +{ + hyperv_perf_item *item = data; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name); + if (!pObjectType) + return false; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + get_and_sanitize_instance_value(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)); + + if(strcasecmp(windows_shared_buffer, "_Total") == 0) + continue; + + struct hypervisor_root_partition *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p)); + + if (!p->collected_metadata) { + p->collected_metadata = true; + } + + // Fetch counters + GET_INSTANCE_COUNTER(DeviceSpacePages4K); + GET_INSTANCE_COUNTER(DeviceSpacePages2M); + GET_INSTANCE_COUNTER(DeviceSpacePages1G); + GET_INSTANCE_COUNTER(GPASpacePages4K); + GET_INSTANCE_COUNTER(GPASpacePages2M); + GET_INSTANCE_COUNTER(GPASpacePages1G); + GET_INSTANCE_COUNTER(GPASpaceModifications); + GET_INSTANCE_COUNTER(AttachedDevices); + GET_INSTANCE_COUNTER(DepositedPages); + + GET_INSTANCE_COUNTER(DeviceDMAErrors); + GET_INSTANCE_COUNTER(DeviceInterruptErrors); + GET_INSTANCE_COUNTER(DeviceInterruptThrottleEvents); + GET_INSTANCE_COUNTER(IOΤLBFlushesSec); + GET_INSTANCE_COUNTER(AddressSpaces); + GET_INSTANCE_COUNTER(VirtualTLBPages); + GET_INSTANCE_COUNTER(VirtualTLBFlushEntiresSec); + + + // Create charts + if (!p->charts_created) { + p->charts_created = true; + p->st_device_space_pages = rrdset_create_localhost( + "root_partition_device_space_pages", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".root_partition_device_space_pages", + "Root partition device space pages", + "pages", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_SPACE_PAGES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_DeviceSpacePages4K = rrddim_add(p->st_device_space_pages, "4K", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + p->rd_DeviceSpacePages2M = rrddim_add(p->st_device_space_pages, "2M", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + p->rd_DeviceSpacePages1G = rrddim_add(p->st_device_space_pages, "1G", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + p->st_gpa_space_pages = rrdset_create_localhost( + "root_partition_gpa_space_pages", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".root_partition_gpa_space_pages", + "Root partition GPA space pages", + "pages", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_GPA_SPACE_PAGES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_GPASpacePages4K = rrddim_add(p->st_gpa_space_pages, "4K", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + p->rd_GPASpacePages2M = rrddim_add(p->st_gpa_space_pages, "2M", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + p->rd_GPASpacePages1G = rrddim_add(p->st_gpa_space_pages, "1G", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + p->st_gpa_space_modifications = rrdset_create_localhost( + "root_partition_gpa_space_modifications", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".root_partition_gpa_space_modifications", + "Root partition GPA space modifications", + "modifications/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_GPA_SPACE_MODIFICATIONS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_GPASpaceModifications = + rrddim_add(p->st_gpa_space_modifications, "gpa", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + p->st_attached_devices = rrdset_create_localhost( + "root_partition_attached_devices", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".root_partition_attached_devices", + "Root partition attached devices", + "devices", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_ATTACHED_DEVICES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_AttachedDevices = rrddim_add(p->st_attached_devices, "attached", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + p->st_deposited_pages = rrdset_create_localhost( + "root_partition_deposited_pages", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".root_partition_deposited_pages", + "Root partition deposited pages", + "pages", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEPOSITED_PAGES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_DepositedPages = rrddim_add(p->st_deposited_pages, "gpa", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + p->st_DeviceDMAErrors = rrdset_create_localhost( + "root_partition_device_dma_errors", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".root_partition_device_dma_errors", + "Root partition illegal DMA requests", + "requests", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_DMA_ERRORS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_DeviceDMAErrors = + rrddim_add(p->st_DeviceDMAErrors, "illegal_dma", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + p->st_DeviceInterruptErrors = rrdset_create_localhost( + "root_partition_device_interrupt_errors", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".root_partition_device_interrupt_errors", + "Root partition illegal interrupt requestss", + "requests", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_INTERRUPT_ERRORS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_DeviceInterruptErrors = + rrddim_add(p->st_DeviceInterruptErrors, "illegal_interrupt", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + p->st_DeviceInterruptThrottleEvents = rrdset_create_localhost( + "root_partition_device_interrupt_throttle_events", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".root_partition_device_interrupt_throttle_events", + "Root partition throttled interrupts", + "events", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_INTERRUPT_THROTTLE_EVENTS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_DeviceInterruptThrottleEvents = + rrddim_add(p->st_DeviceInterruptThrottleEvents, "throttling", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + p->st_IOΤLBFlushesSec = rrdset_create_localhost( + "root_partition_io_tlb_flush", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".root_partition_io_tlb_flush", + "Root partition flushes of I/O TLBs", + "flushes/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_IO_TLB_FLUSH, + update_every, + RRDSET_TYPE_LINE); + + p->rd_IOΤLBFlushesSec = rrddim_add(p->st_IOΤLBFlushesSec, "gpa", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + p->st_AddressSpaces = rrdset_create_localhost( + "root_partition_address_space", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".root_partition_address_space", + "Root partition address spaces in the virtual TLB", + "address spaces", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_ADDRESS_SPACE, + update_every, + RRDSET_TYPE_LINE); + + p->rd_AddressSpaces = rrddim_add(p->st_AddressSpaces, "address_spaces", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + p->st_VirtualTLBPages = rrdset_create_localhost( + "root_partition_virtual_tlb_pages", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".root_partition_virtual_tlb_pages", + "Root partition pages used by the virtual TLB", + "pages", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_VIRTUAL_TLB_PAGES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_VirtualTLBPages = rrddim_add(p->st_VirtualTLBPages, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + p->st_VirtualTLBFlushEntiresSec = rrdset_create_localhost( + "root_partition_virtual_tlb_flush_entries", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".root_partition_virtual_tlb_flush_entries", + "Root partition flushes of the entire virtual TLB", + "flushes/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_VIRTUAL_TLB_FLUSH_ENTRIES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_VirtualTLBFlushEntiresSec = + rrddim_add(p->st_VirtualTLBFlushEntiresSec, "flushes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + // Set the data for each dimension + + SETP_DIM_VALUE(st_device_space_pages,DeviceSpacePages4K); + SETP_DIM_VALUE(st_device_space_pages,DeviceSpacePages2M); + SETP_DIM_VALUE(st_device_space_pages,DeviceSpacePages1G); + + SETP_DIM_VALUE(st_gpa_space_pages, GPASpacePages4K); + SETP_DIM_VALUE(st_gpa_space_pages, GPASpacePages2M); + SETP_DIM_VALUE(st_gpa_space_pages, GPASpacePages1G); + + SETP_DIM_VALUE(st_gpa_space_modifications, GPASpaceModifications); + + SETP_DIM_VALUE(st_attached_devices, AttachedDevices); + SETP_DIM_VALUE(st_deposited_pages, DepositedPages); + + SETP_DIM_VALUE(st_DeviceDMAErrors, DeviceDMAErrors); + SETP_DIM_VALUE(st_DeviceInterruptErrors, DeviceInterruptErrors); + SETP_DIM_VALUE(st_DeviceInterruptThrottleEvents, DeviceInterruptThrottleEvents); + SETP_DIM_VALUE(st_IOΤLBFlushesSec, IOΤLBFlushesSec); + SETP_DIM_VALUE(st_AddressSpaces, AddressSpaces); + SETP_DIM_VALUE(st_VirtualTLBPages, VirtualTLBPages); + SETP_DIM_VALUE(st_VirtualTLBFlushEntiresSec, VirtualTLBFlushEntiresSec); + + // Mark the charts as done + rrdset_done(p->st_device_space_pages); + rrdset_done(p->st_gpa_space_pages); + rrdset_done(p->st_gpa_space_modifications); + rrdset_done(p->st_attached_devices); + rrdset_done(p->st_deposited_pages); + rrdset_done(p->st_DeviceInterruptErrors); + rrdset_done(p->st_DeviceInterruptThrottleEvents); + rrdset_done(p->st_IOΤLBFlushesSec); + rrdset_done(p->st_AddressSpaces); + rrdset_done(p->st_DeviceDMAErrors); + rrdset_done(p->st_VirtualTLBPages); + rrdset_done(p->st_VirtualTLBFlushEntiresSec); + } + + return true; +} + +// Storage DEVICE + +struct hypervisor_storage_device { + bool collected_metadata; + bool charts_created; + + RRDSET *st_operations; + DEFINE_RD(ReadOperationsSec); + DEFINE_RD(WriteOperationsSec); + + RRDSET *st_bytes; + DEFINE_RD(ReadBytesSec); + DEFINE_RD(WriteBytesSec); + + RRDSET *st_errors; + DEFINE_RD(ErrorCount); + + COUNTER_DATA ReadOperationsSec; + COUNTER_DATA WriteOperationsSec; + + COUNTER_DATA ReadBytesSec; + COUNTER_DATA WriteBytesSec; + COUNTER_DATA ErrorCount; +}; + + +// Initialize the keys for the root partition metrics +void initialize_hyperv_storage_device_keys(struct hypervisor_storage_device *p) { + p->ReadOperationsSec.key = "Read Operations/Sec"; + p->WriteOperationsSec.key = "Write Operations/Sec"; + + p->ReadBytesSec.key = "Read Bytes/sec"; + p->WriteBytesSec.key = "Write Bytes/sec"; + p->ErrorCount.key = "Error Count"; +} + +// Callback function for inserting root partition metrics into the dictionary +void dict_hyperv_storage_device_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct hypervisor_storage_device *p = value; + initialize_hyperv_storage_device_keys(p); +} + +static bool do_hyperv_storage_device(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data) +{ + hyperv_perf_item *item = data; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name); + if (!pObjectType) + return false; + + + PERF_INSTANCE_DEFINITION *pi = NULL; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + get_and_sanitize_instance_value(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)); + + if(strcasecmp(windows_shared_buffer, "_Total") == 0) + continue; + + struct hypervisor_storage_device *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p)); + + if (!p->collected_metadata) { + p->collected_metadata = true; + } + + // Fetch counters + GET_INSTANCE_COUNTER(ReadOperationsSec); + GET_INSTANCE_COUNTER(WriteOperationsSec); + + GET_INSTANCE_COUNTER(ReadBytesSec); + GET_INSTANCE_COUNTER(WriteBytesSec); + GET_INSTANCE_COUNTER(ErrorCount); + + if (!p->charts_created) { + p->charts_created = true; + if (!p->st_operations) { + p->st_operations = rrdset_create_localhost( + "vm_storage_device_operations", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV".vm_storage_device_operations", + "VM storage device IOPS", + "operations/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_STORAGE_DEVICE_OPERATIONS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_ReadOperationsSec = rrddim_add(p->st_operations, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_WriteOperationsSec = rrddim_add(p->st_operations, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_operations->rrdlabels, "vm_storage_device", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + if (!p->st_bytes) { + p->st_bytes = rrdset_create_localhost( + "vm_storage_device_bytes", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV".vm_storage_device_bytes", + "VM storage device IO", + "bytes/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_STORAGE_DEVICE_BYTES, + update_every, + RRDSET_TYPE_AREA); + + p->rd_ReadBytesSec = rrddim_add(p->st_bytes, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_WriteBytesSec = rrddim_add(p->st_bytes, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_bytes->rrdlabels, "vm_storage_device", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + if (!p->st_errors) { + p->st_errors = rrdset_create_localhost( + "vm_storage_device_errors", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV".vm_storage_device_errors", + "VM storage device errors", + "errors/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_STORAGE_DEVICE_ERRORS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_ErrorCount = rrddim_add(p->st_errors, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_errors->rrdlabels, "vm_storage_device", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + } + + SETP_DIM_VALUE(st_operations,ReadOperationsSec); + SETP_DIM_VALUE(st_operations,WriteOperationsSec); + + SETP_DIM_VALUE(st_bytes,ReadBytesSec); + SETP_DIM_VALUE(st_bytes,WriteBytesSec); + + SETP_DIM_VALUE(st_errors,ErrorCount); + + // Mark the charts as done + rrdset_done(p->st_operations); + rrdset_done(p->st_bytes); + rrdset_done(p->st_errors); + } + + return true; +} + +struct hypervisor_switch { + bool collected_metadata; + bool charts_created; + + RRDSET *st_bytes; + DEFINE_RD(BytesSentSec); + DEFINE_RD(BytesReceivedSec); + + RRDSET *st_packets; + DEFINE_RD(PacketsSentSec); + DEFINE_RD(PacketsReceivedSec); + + RRDSET *st_directed_packets; + DEFINE_RD(DirectedPacketsSentSec); + DEFINE_RD(DirectedPacketsReceivedSec); + + RRDSET *st_broadcast_packets; + DEFINE_RD(BroadcastPacketsSentSec); + DEFINE_RD(BroadcastPacketsReceivedSec); + + RRDSET *st_multicast_packets; + DEFINE_RD(MulticastPacketsSentSec); + DEFINE_RD(MulticastPacketsReceivedSec); + + RRDSET *st_dropped_packets; + DEFINE_RD(DroppedPacketsOutgoingSec); + DEFINE_RD(DroppedPacketsIncomingSec); + + RRDSET *st_ext_dropped_packets; + DEFINE_RD(ExtensionsDroppedPacketsOutgoingSec); + DEFINE_RD(ExtensionsDroppedPacketsIncomingSec); + + RRDSET *st_flooded; + DEFINE_RD(PacketsFlooded); + + RRDSET *st_learned_mac; + DEFINE_RD(LearnedMacAddresses); + + RRDSET *st_purged_mac; + DEFINE_RD(PurgedMacAddresses); + + COUNTER_DATA BytesSentSec; + COUNTER_DATA BytesReceivedSec; + + COUNTER_DATA PacketsSentSec; + COUNTER_DATA PacketsReceivedSec; + + COUNTER_DATA DirectedPacketsSentSec; + COUNTER_DATA DirectedPacketsReceivedSec; + + COUNTER_DATA BroadcastPacketsSentSec; + COUNTER_DATA BroadcastPacketsReceivedSec; + + COUNTER_DATA MulticastPacketsSentSec; + COUNTER_DATA MulticastPacketsReceivedSec; + + COUNTER_DATA DroppedPacketsOutgoingSec; + COUNTER_DATA DroppedPacketsIncomingSec; + + COUNTER_DATA ExtensionsDroppedPacketsOutgoingSec; + COUNTER_DATA ExtensionsDroppedPacketsIncomingSec; + + COUNTER_DATA PacketsFlooded; + + COUNTER_DATA LearnedMacAddresses; + + COUNTER_DATA PurgedMacAddresses; +}; + +// Initialize the keys for the root partition metrics +void initialize_hyperv_switch_keys(struct hypervisor_switch *p) +{ + p->BytesSentSec.key = "Bytes Sent/sec"; + p->BytesReceivedSec.key = "Bytes Received/sec"; + p->PacketsSentSec.key = "Packets Sent/sec"; + p->PacketsReceivedSec.key = "Packets Received/sec"; + + p->DirectedPacketsSentSec.key = "Directed Packets Sent/sec"; + p->DirectedPacketsReceivedSec.key = "Directed Packets Received/sec"; + p->BroadcastPacketsSentSec.key = "Broadcast Packets Sent/sec"; + p->BroadcastPacketsReceivedSec.key = "Broadcast Packets Received/sec"; + p->MulticastPacketsSentSec.key = "Multicast Packets Sent/sec"; + p->MulticastPacketsReceivedSec.key = "Multicast Packets Received/sec"; + p->DroppedPacketsOutgoingSec.key = "Dropped Packets Outgoing/sec"; + p->DroppedPacketsIncomingSec.key = "Dropped Packets Incoming/sec"; + p->ExtensionsDroppedPacketsOutgoingSec.key = "Extensions Dropped Packets Outgoing/sec"; + p->ExtensionsDroppedPacketsIncomingSec.key = "Extensions Dropped Packets Incoming/sec"; + p->PacketsFlooded.key = "Packets Flooded"; + p->LearnedMacAddresses.key = "Learned Mac Addresses"; + p->PurgedMacAddresses.key = "Purged Mac Addresses"; +} + +void dict_hyperv_switch_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) +{ + struct hypervisor_switch *p = value; + initialize_hyperv_switch_keys(p); +} + +static bool do_hyperv_switch(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data) +{ + hyperv_perf_item *item = data; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name); + if (!pObjectType) + return false; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + static bool charts_created = false; + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + get_and_sanitize_instance_value( + pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)); + + if(strcasecmp(windows_shared_buffer, "_Total") == 0) + continue; + + struct hypervisor_switch *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p)); + + if (!p->collected_metadata) { + p->collected_metadata = true; + } + + GET_INSTANCE_COUNTER(BytesReceivedSec); + GET_INSTANCE_COUNTER(BytesSentSec); + + GET_INSTANCE_COUNTER(PacketsReceivedSec); + GET_INSTANCE_COUNTER(PacketsSentSec); + + GET_INSTANCE_COUNTER(DirectedPacketsSentSec); + GET_INSTANCE_COUNTER(DirectedPacketsReceivedSec); + + GET_INSTANCE_COUNTER(BroadcastPacketsSentSec); + GET_INSTANCE_COUNTER(BroadcastPacketsReceivedSec); + + GET_INSTANCE_COUNTER(MulticastPacketsSentSec); + GET_INSTANCE_COUNTER(MulticastPacketsReceivedSec); + + GET_INSTANCE_COUNTER(DroppedPacketsOutgoingSec); + GET_INSTANCE_COUNTER(DroppedPacketsIncomingSec); + + GET_INSTANCE_COUNTER(ExtensionsDroppedPacketsOutgoingSec); + GET_INSTANCE_COUNTER(ExtensionsDroppedPacketsIncomingSec); + + GET_INSTANCE_COUNTER(PacketsFlooded); + + GET_INSTANCE_COUNTER(LearnedMacAddresses); + + GET_INSTANCE_COUNTER(PurgedMacAddresses); + + if (!p->charts_created) { + p->charts_created = true; + + p->st_bytes = rrdset_create_localhost( + "vswitch_traffic", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vswitch_traffic", + "Virtual switch traffic", + "kilobits/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_TRAFFIC, + update_every, + RRDSET_TYPE_AREA); + + p->rd_BytesReceivedSec = rrddim_add(p->st_bytes, "received", NULL, 8, 1000, RRD_ALGORITHM_INCREMENTAL); + p->rd_BytesSentSec = rrddim_add(p->st_bytes, "sent", NULL, -8, 1000, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_bytes->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_packets = rrdset_create_localhost( + "vswitch_packets", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vswitch_packets", + "Virtual switch packets", + "packets/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_PACKETS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_PacketsReceivedSec = rrddim_add(p->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_PacketsSentSec = rrddim_add(p->st_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_packets->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_directed_packets = rrdset_create_localhost( + "vswitch_directed_packets", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vswitch_directed_packets", + "Virtual switch directed packets", + "packets/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_DIRECTED_PACKETS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_DirectedPacketsReceivedSec = + rrddim_add(p->st_directed_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_DirectedPacketsSentSec = + rrddim_add(p->st_directed_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_directed_packets->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_broadcast_packets = rrdset_create_localhost( + "vswitch_broadcast_packets", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vswitch_broadcast_packets", + "Virtual switch broadcast packets", + "packets/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_BROADCAST_PACKETS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_BroadcastPacketsReceivedSec = + rrddim_add(p->st_broadcast_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_BroadcastPacketsSentSec = + rrddim_add(p->st_broadcast_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_broadcast_packets->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_multicast_packets = rrdset_create_localhost( + "vswitch_multicast_packets", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vswitch_multicast_packets", + "Virtual switch multicast packets", + "packets/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_MULTICAST_PACKETS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_MulticastPacketsReceivedSec = + rrddim_add(p->st_multicast_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_MulticastPacketsSentSec = + rrddim_add(p->st_multicast_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_multicast_packets->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_dropped_packets = rrdset_create_localhost( + "vswitch_dropped_packets", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vswitch_dropped_packets", + "Virtual switch dropped packets", + "drops/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_DROPPED_PACKETS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_DroppedPacketsIncomingSec = + rrddim_add(p->st_dropped_packets, "incoming", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_DroppedPacketsOutgoingSec = + rrddim_add(p->st_dropped_packets, "outgoing", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_dropped_packets->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_ext_dropped_packets = rrdset_create_localhost( + "vswitch_extensions_dropped_packets", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vswitch_extensions_dropped_packets", + "Virtual switch extensions dropped packets", + "drops/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_EXTENSIONS_DROPPED_PACKETS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_ExtensionsDroppedPacketsIncomingSec = + rrddim_add(p->st_ext_dropped_packets, "incoming", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_ExtensionsDroppedPacketsOutgoingSec = + rrddim_add(p->st_ext_dropped_packets, "outgoing", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_ext_dropped_packets->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_flooded = rrdset_create_localhost( + "vswitch_packets_flooded", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vswitch_packets_flooded", + "Virtual switch flooded packets", + "packets/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_PACKETS_FLOODED, + update_every, + RRDSET_TYPE_LINE); + + p->rd_PacketsFlooded = rrddim_add(p->st_flooded, "flooded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_flooded->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_learned_mac = rrdset_create_localhost( + "vswitch_learned_mac_addresses", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vswitch_learned_mac_addresses", + "Virtual switch learned MAC addresses", + "mac addresses/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_LEARNED_MAC_ADDRESSES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_LearnedMacAddresses = rrddim_add(p->st_learned_mac, "learned", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_learned_mac->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_purged_mac = rrdset_create_localhost( + "vswitch_purged_mac_addresses", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vswitch_purged_mac_addresses", + "Virtual switch purged MAC addresses", + "mac addresses/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_PURGED_MAC_ADDRESSES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_PurgedMacAddresses = rrddim_add(p->st_purged_mac, "purged", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_purged_mac->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + SETP_DIM_VALUE(st_packets, PacketsReceivedSec); + SETP_DIM_VALUE(st_packets, PacketsSentSec); + + SETP_DIM_VALUE(st_bytes, BytesReceivedSec); + SETP_DIM_VALUE(st_bytes, BytesSentSec); + + SETP_DIM_VALUE(st_directed_packets, DirectedPacketsSentSec); + SETP_DIM_VALUE(st_directed_packets, DirectedPacketsReceivedSec); + + SETP_DIM_VALUE(st_broadcast_packets, BroadcastPacketsSentSec); + SETP_DIM_VALUE(st_broadcast_packets, BroadcastPacketsReceivedSec); + + SETP_DIM_VALUE(st_multicast_packets, MulticastPacketsSentSec); + SETP_DIM_VALUE(st_multicast_packets, MulticastPacketsReceivedSec); + + SETP_DIM_VALUE(st_dropped_packets, DroppedPacketsOutgoingSec); + SETP_DIM_VALUE(st_dropped_packets, DroppedPacketsIncomingSec); + + SETP_DIM_VALUE(st_ext_dropped_packets, ExtensionsDroppedPacketsOutgoingSec); + SETP_DIM_VALUE(st_ext_dropped_packets, ExtensionsDroppedPacketsIncomingSec); + + SETP_DIM_VALUE(st_flooded, PacketsFlooded); + SETP_DIM_VALUE(st_learned_mac, LearnedMacAddresses); + SETP_DIM_VALUE(st_purged_mac, PurgedMacAddresses); + + // Mark the charts as done + rrdset_done(p->st_packets); + rrdset_done(p->st_bytes); + + rrdset_done(p->st_directed_packets); + rrdset_done(p->st_broadcast_packets); + rrdset_done(p->st_multicast_packets); + rrdset_done(p->st_dropped_packets); + rrdset_done(p->st_ext_dropped_packets); + rrdset_done(p->st_flooded); + rrdset_done(p->st_learned_mac); + rrdset_done(p->st_purged_mac); + + } + return true; +} + +struct hypervisor_network_adapter { + bool collected_metadata; + bool charts_created; + + RRDSET *st_dropped_packets; + DEFINE_RD(DroppedPacketsOutgoingSec); + DEFINE_RD(DroppedPacketsIncomingSec); + + RRDSET *st_send_receive_packets; + DEFINE_RD(PacketsSentSec); + DEFINE_RD(PacketsReceivedSec); + + RRDSET *st_send_receive_bytes; + DEFINE_RD(BytesSentSec); + DEFINE_RD(BytesReceivedSec); + + RRDSET *st_IPsecoffloadBytes; + DEFINE_RD(IPsecoffloadBytesReceivedSec); + DEFINE_RD(IPsecoffloadBytesSentSec); + + RRDSET *st_DirectedPackets; + DEFINE_RD(DirectedPacketsSentSec); + DEFINE_RD(DirectedPacketsReceivedSec); + + RRDSET *st_BroadcastPackets; + DEFINE_RD(BroadcastPacketsSentSec); + DEFINE_RD(BroadcastPacketsReceivedSec); + + RRDSET *st_MulticastPackets; + DEFINE_RD(MulticastPacketsSentSec); + DEFINE_RD(MulticastPacketsReceivedSec); + + COUNTER_DATA DroppedPacketsOutgoingSec; + COUNTER_DATA DroppedPacketsIncomingSec; + + COUNTER_DATA PacketsSentSec; + COUNTER_DATA PacketsReceivedSec; + + COUNTER_DATA BytesSentSec; + COUNTER_DATA BytesReceivedSec; + + COUNTER_DATA IPsecoffloadBytesReceivedSec; + COUNTER_DATA IPsecoffloadBytesSentSec; + + COUNTER_DATA DirectedPacketsSentSec; + COUNTER_DATA DirectedPacketsReceivedSec; + + COUNTER_DATA BroadcastPacketsSentSec; + COUNTER_DATA BroadcastPacketsReceivedSec; + + COUNTER_DATA MulticastPacketsSentSec; + COUNTER_DATA MulticastPacketsReceivedSec; +}; + +// Initialize the keys for the root partition metrics +void initialize_hyperv_network_adapter_keys(struct hypervisor_network_adapter *p) +{ + p->DroppedPacketsOutgoingSec.key = "Dropped Packets Outgoing/sec"; + p->DroppedPacketsIncomingSec.key = "Dropped Packets Incoming/sec"; + + p->PacketsSentSec.key = "Packets Sent/sec"; + p->PacketsReceivedSec.key = "Packets Received/sec"; + + p->BytesSentSec.key = "Bytes Sent/sec"; + p->BytesReceivedSec.key = "Bytes Received/sec"; + + p->IPsecoffloadBytesReceivedSec.key = "IPsec offload Bytes Receive/sec"; + p->IPsecoffloadBytesSentSec.key = "IPsec offload Bytes Sent/sec"; + p->DirectedPacketsSentSec.key = "Directed Packets Sent/sec"; + p->DirectedPacketsReceivedSec.key = "Directed Packets Received/sec"; + p->BroadcastPacketsSentSec.key = "Broadcast Packets Sent/sec"; + p->BroadcastPacketsReceivedSec.key = "Broadcast Packets Received/sec"; + p->MulticastPacketsSentSec.key = "Multicast Packets Sent/sec"; + p->MulticastPacketsReceivedSec.key = "Multicast Packets Received/sec"; +} + +void dict_hyperv_network_adapter_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) +{ + struct hypervisor_network_adapter *p = value; + initialize_hyperv_network_adapter_keys(p); +} + +static bool do_hyperv_network_adapter(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data) +{ + hyperv_perf_item *item = data; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name); + if (!pObjectType) + return false; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + get_and_sanitize_instance_value(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)); + + if(strcasecmp(windows_shared_buffer, "_Total") == 0) + continue; + + struct hypervisor_network_adapter *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p)); + + if (!p->collected_metadata) { + p->collected_metadata = true; + } + + GET_INSTANCE_COUNTER(DroppedPacketsIncomingSec); + GET_INSTANCE_COUNTER(DroppedPacketsOutgoingSec); + + GET_INSTANCE_COUNTER(PacketsReceivedSec); + GET_INSTANCE_COUNTER(PacketsSentSec); + + GET_INSTANCE_COUNTER(BytesReceivedSec); + GET_INSTANCE_COUNTER(BytesSentSec); + + GET_INSTANCE_COUNTER(IPsecoffloadBytesReceivedSec); + GET_INSTANCE_COUNTER(IPsecoffloadBytesSentSec); + + GET_INSTANCE_COUNTER(DirectedPacketsSentSec); + GET_INSTANCE_COUNTER(DirectedPacketsReceivedSec); + + GET_INSTANCE_COUNTER(BroadcastPacketsSentSec); + GET_INSTANCE_COUNTER(BroadcastPacketsReceivedSec); + + GET_INSTANCE_COUNTER(MulticastPacketsSentSec); + GET_INSTANCE_COUNTER(MulticastPacketsReceivedSec); + + if (!p->charts_created) { + p->charts_created = true; + p->st_dropped_packets = rrdset_create_localhost( + "vm_net_interface_packets_dropped", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV".vm_net_interface_packets_dropped", + "VM interface packets dropped", + "drops/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_PACKETS_DROPPED, + update_every, + RRDSET_TYPE_LINE); + + p->rd_DroppedPacketsIncomingSec = rrddim_add(p->st_dropped_packets, "incoming", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_DroppedPacketsOutgoingSec = rrddim_add(p->st_dropped_packets, "outgoing", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_dropped_packets->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_send_receive_packets = rrdset_create_localhost( + "vm_net_interface_packets", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vm_net_interface_packets", + "VM interface packets", + "packets/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_PACKETS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_PacketsReceivedSec = rrddim_add(p->st_send_receive_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_PacketsSentSec = rrddim_add(p->st_send_receive_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_send_receive_packets->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_send_receive_bytes = rrdset_create_localhost( + "vm_net_interface_traffic", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vm_net_interface_traffic", + "VM interface traffic", + "kilobits/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_TRAFFIC, + update_every, + RRDSET_TYPE_AREA); + + p->rd_BytesReceivedSec = rrddim_add(p->st_send_receive_bytes, "received", NULL, 8, 1000, RRD_ALGORITHM_INCREMENTAL); + p->rd_BytesSentSec = rrddim_add(p->st_send_receive_bytes, "sent", NULL, -8, 1000, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_send_receive_bytes->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_IPsecoffloadBytes = rrdset_create_localhost( + "vm_net_interface_ipsec_traffic", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vm_net_interface_ipsec_traffic", + "VM interface traffic", + "kilobits/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_IPSEC_TRAFFIC, + update_every, + RRDSET_TYPE_AREA); + + p->rd_IPsecoffloadBytesReceivedSec = + rrddim_add(p->st_IPsecoffloadBytes, "received", NULL, 8, 1000, RRD_ALGORITHM_INCREMENTAL); + p->rd_IPsecoffloadBytesSentSec = + rrddim_add(p->st_IPsecoffloadBytes, "sent", NULL, -8, 1000, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add( + p->st_IPsecoffloadBytes->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_DirectedPackets = rrdset_create_localhost( + "vm_net_interface_directed_packets", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vm_net_interface_directed_packets", + "VM interface traffic", + "packets/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_DIRECTED_PACKETS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_DirectedPacketsReceivedSec = + rrddim_add(p->st_DirectedPackets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_DirectedPacketsSentSec = + rrddim_add(p->st_DirectedPackets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add( + p->st_DirectedPackets->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_BroadcastPackets = rrdset_create_localhost( + "vm_net_interface_broadcast_packets", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vm_net_interface_broadcast_packets", + "VM interface broadcast", + "packets/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_PACKETS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_BroadcastPacketsReceivedSec = + rrddim_add(p->st_BroadcastPackets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_BroadcastPacketsSentSec = + rrddim_add(p->st_BroadcastPackets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add( + p->st_BroadcastPackets->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_MulticastPackets = rrdset_create_localhost( + "vm_net_interface_multicast_packets", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vm_net_interface_multicast_packets", + "VM interface multicast", + "packets/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_MULTICAST_PACKETS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_MulticastPacketsReceivedSec = + rrddim_add(p->st_MulticastPackets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_MulticastPacketsSentSec = + rrddim_add(p->st_MulticastPackets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add( + p->st_MulticastPackets->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + + } + + SETP_DIM_VALUE(st_dropped_packets, DroppedPacketsIncomingSec); + SETP_DIM_VALUE(st_dropped_packets, DroppedPacketsOutgoingSec); + + SETP_DIM_VALUE(st_send_receive_packets, PacketsReceivedSec); + SETP_DIM_VALUE(st_send_receive_packets, PacketsSentSec); + + SETP_DIM_VALUE(st_send_receive_bytes, BytesReceivedSec); + SETP_DIM_VALUE(st_send_receive_bytes, BytesSentSec); + + SETP_DIM_VALUE(st_IPsecoffloadBytes, IPsecoffloadBytesReceivedSec); + SETP_DIM_VALUE(st_IPsecoffloadBytes, IPsecoffloadBytesSentSec); + + SETP_DIM_VALUE(st_DirectedPackets, DirectedPacketsSentSec); + SETP_DIM_VALUE(st_DirectedPackets, DirectedPacketsReceivedSec); + + SETP_DIM_VALUE(st_BroadcastPackets, BroadcastPacketsSentSec); + SETP_DIM_VALUE(st_BroadcastPackets, BroadcastPacketsReceivedSec); + + SETP_DIM_VALUE(st_MulticastPackets,MulticastPacketsSentSec); + SETP_DIM_VALUE(st_MulticastPackets,MulticastPacketsReceivedSec); + + rrdset_done(p->st_IPsecoffloadBytes); + rrdset_done(p->st_DirectedPackets); + rrdset_done(p->st_BroadcastPackets); + rrdset_done(p->st_MulticastPackets); + rrdset_done(p->st_send_receive_bytes); + rrdset_done(p->st_send_receive_packets); + rrdset_done(p->st_dropped_packets); + } + return true; +} + + +// Hypervisor Virtual Processor +struct hypervisor_processor { + bool collected_metadata; + bool charts_created; + + RRDSET *st_HypervisorProcessor; + + DEFINE_RD(GuestRunTime); + DEFINE_RD(HypervisorRunTime); + DEFINE_RD(RemoteRunTime); + + RRDSET *st_HypervisorProcessorTotal; + DEFINE_RD(TotalRunTime); + + COUNTER_DATA GuestRunTime; + COUNTER_DATA HypervisorRunTime; + COUNTER_DATA RemoteRunTime; + COUNTER_DATA TotalRunTime; + collected_number GuestRunTime_total; + collected_number HypervisorRunTime_total; + collected_number RemoteRunTime_total; + collected_number TotalRunTime_total; +}; + + +void initialize_hyperv_processor_keys(struct hypervisor_processor *p) +{ + p->GuestRunTime.key = "% Guest Run Time"; + p->HypervisorRunTime.key = "% Hypervisor Run Time"; + p->RemoteRunTime.key = "% Remote Run Time"; + p->TotalRunTime.key = "% Total Run Time"; + p->GuestRunTime_total = 0; + p->HypervisorRunTime_total = 0; + p->RemoteRunTime_total = 0; + p->TotalRunTime_total = 0; +} + +void dict_hyperv_processor_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) +{ + struct hypervisor_processor *p = value; + initialize_hyperv_processor_keys(p); +} + +static bool do_hyperv_processor(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data) +{ + hyperv_perf_item *item = data; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name); + if (!pObjectType) + return false; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + get_and_sanitize_instance_value(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)); + + if (strcasecmp(windows_shared_buffer, "_Total") == 0) + continue; + + char *vm = strchr(windows_shared_buffer, ':'); + if (vm) + *vm = '\0'; + + struct hypervisor_processor *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p)); + + if (!p->collected_metadata) { + p->collected_metadata = true; + } + + GET_INSTANCE_COUNTER(GuestRunTime); + GET_INSTANCE_COUNTER(HypervisorRunTime); + GET_INSTANCE_COUNTER(RemoteRunTime); + GET_INSTANCE_COUNTER(TotalRunTime); + + if (!p->charts_created) { + p->charts_created = true; + p->st_HypervisorProcessorTotal = rrdset_create_localhost( + "vm_cpu_usage", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vm_cpu_usage", + "VM CPU usage", + "percentage", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_CPU_USAGE, + update_every, + RRDSET_TYPE_STACKED); + + p->rd_TotalRunTime = + rrddim_add(p->st_HypervisorProcessorTotal, "usage", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_HypervisorProcessorTotal->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO); + + p->st_HypervisorProcessor = rrdset_create_localhost( + "vm_cpu_usage_by_run_context", + windows_shared_buffer, + NULL, + HYPERV, + HYPERV ".vm_cpu_usage_by_run_context", + "VM CPU usage by run context", + "percentage", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_CPU_USAGE_BY_RUN_CONTEXT, + update_every, + RRDSET_TYPE_STACKED); + + p->rd_GuestRunTime = + rrddim_add(p->st_HypervisorProcessor, "guest", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL); + p->rd_HypervisorRunTime = + rrddim_add(p->st_HypervisorProcessor, "hypervisor", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL); + p->rd_RemoteRunTime = + rrddim_add(p->st_HypervisorProcessor, "remote", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_HypervisorProcessor->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + p->GuestRunTime_total += (collected_number)p->GuestRunTime.current.Data; + p->HypervisorRunTime_total += (collected_number)p->HypervisorRunTime.current.Data; + p->RemoteRunTime_total += (collected_number)p->RemoteRunTime.current.Data; + p->TotalRunTime_total += (collected_number)p->TotalRunTime.current.Data; + } + + { + struct hypervisor_processor *p; + dfe_start_read(item->instance, p) { + rrddim_set_by_pointer(p->st_HypervisorProcessor, p->rd_HypervisorRunTime, (collected_number) p->HypervisorRunTime_total); + rrddim_set_by_pointer(p->st_HypervisorProcessor, p->rd_GuestRunTime, (collected_number) p->GuestRunTime_total); + rrddim_set_by_pointer(p->st_HypervisorProcessor, p->rd_RemoteRunTime, (collected_number) p->RemoteRunTime_total); + rrdset_done(p->st_HypervisorProcessor); + + rrddim_set_by_pointer(p->st_HypervisorProcessorTotal, p->rd_TotalRunTime, (collected_number) p->TotalRunTime_total); + rrdset_done(p->st_HypervisorProcessorTotal); + + p->GuestRunTime_total = 0; + p->HypervisorRunTime_total = 0; + p->RemoteRunTime_total = 0; + p->TotalRunTime_total = 0; + } + dfe_done(p); + } + + return true; +} + +hyperv_perf_item hyperv_perf_list[] = { + {.registry_name = "Hyper-V Dynamic Memory VM", + .function_collect = do_hyperv_memory, + .dict_insert_cb = dict_hyperv_memory_insert_cb, + .dict_size = sizeof(struct hypervisor_memory)}, + + {.registry_name = "Hyper-V VM Vid Partition", + .function_collect = do_hyperv_vid_partition, + .dict_insert_cb = dict_hyperv_partition_insert_cb, + .dict_size = sizeof(struct hypervisor_partition)}, + + { + .registry_name = "Hyper-V Virtual Machine Health Summary", + .function_collect = do_hyperv_health_summary, + }, + + { + .registry_name = "Hyper-V Hypervisor Root Partition", + .function_collect = do_hyperv_root_partition, + .dict_insert_cb = dict_hyperv_root_partition_insert_cb, + .dict_size = sizeof(struct hypervisor_root_partition), + }, + + {.registry_name = "Hyper-V Virtual Storage Device", + .function_collect = do_hyperv_storage_device, + .dict_insert_cb = dict_hyperv_storage_device_insert_cb, + .dict_size = sizeof(struct hypervisor_storage_device)}, + + {.registry_name = "Hyper-V Virtual Switch", + .function_collect = do_hyperv_switch, + .dict_insert_cb = dict_hyperv_switch_insert_cb, + .dict_size = sizeof(struct hypervisor_switch)}, + + {.registry_name = "Hyper-V Virtual Network Adapter", + .function_collect = do_hyperv_network_adapter, + .dict_insert_cb = dict_hyperv_network_adapter_insert_cb, + .dict_size = sizeof(struct hypervisor_network_adapter)}, + + {.registry_name = "Hyper-V Hypervisor Virtual Processor", + .function_collect = do_hyperv_processor, + .dict_insert_cb = dict_hyperv_processor_insert_cb, + .dict_size = sizeof(struct hypervisor_processor)}, + + {.registry_name = NULL, .function_collect = NULL}}; + +int do_PerflibHyperV(int update_every, usec_t dt __maybe_unused) { + static bool initialized = false; + + if (unlikely(!initialized)) { + for (int i = 0; hyperv_perf_list[i].registry_name != NULL; i++) { + hyperv_perf_item *item = &hyperv_perf_list[i]; + if (item->dict_insert_cb) { + item->instance = dictionary_create_advanced(DICT_PERF_OPTION, NULL, item->dict_size); + dictionary_register_insert_callback(item->instance, item->dict_insert_cb, NULL); + } + } + initialized = true; + } + + for (int i = 0; hyperv_perf_list[i].registry_name != NULL; i++) { + // Find the registry ID using the registry name + DWORD id = RegistryFindIDByName(hyperv_perf_list[i].registry_name); + if (id == PERFLIB_REGISTRY_NAME_NOT_FOUND) + continue; + + // Get the performance data using the registry ID + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if (!pDataBlock) + continue; + + hyperv_perf_list[i].function_collect(pDataBlock, update_every, &hyperv_perf_list[i]); + } + return 0; +} diff --git a/src/collectors/windows.plugin/perflib-memory.c b/src/collectors/windows.plugin/perflib-memory.c index c876fc68a..e26729cda 100644 --- a/src/collectors/windows.plugin/perflib-memory.c +++ b/src/collectors/windows.plugin/perflib-memory.c @@ -1,65 +1,219 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "windows_plugin.h" -#include "windows-internals.h" - -#define _COMMON_PLUGIN_NAME "windows.plugin" -#define _COMMON_PLUGIN_MODULE_NAME "PerflibMemory" -#include "../common-contexts/common-contexts.h" - -static void initialize(void) { - ; -} - -static bool do_memory(PERF_DATA_BLOCK *pDataBlock, int update_every) { - PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Memory"); - if (!pObjectType) - return false; - - static COUNTER_DATA pagesPerSec = { .key = "Pages/sec" }; - static COUNTER_DATA pageFaultsPerSec = { .key = "Page Faults/sec" }; - - if(perflibGetObjectCounter(pDataBlock, pObjectType, &pageFaultsPerSec) && - perflibGetObjectCounter(pDataBlock, pObjectType, &pagesPerSec)) { - ULONGLONG total = pageFaultsPerSec.current.Data; - ULONGLONG major = pagesPerSec.current.Data; - ULONGLONG minor = (total > major) ? total - major : 0; - common_mem_pgfaults(minor, major, update_every); - } - - static COUNTER_DATA availableBytes = { .key = "Available Bytes" }; - static COUNTER_DATA availableKBytes = { .key = "Available KBytes" }; - static COUNTER_DATA availableMBytes = { .key = "Available MBytes" }; - ULONGLONG available_bytes = 0; - - if(perflibGetObjectCounter(pDataBlock, pObjectType, &availableBytes)) - available_bytes = availableBytes.current.Data; - else if(perflibGetObjectCounter(pDataBlock, pObjectType, &availableKBytes)) - available_bytes = availableKBytes.current.Data * 1024; - else if(perflibGetObjectCounter(pDataBlock, pObjectType, &availableMBytes)) - available_bytes = availableMBytes.current.Data * 1024 * 1024; - - common_mem_available(available_bytes, update_every); - - return true; -} - -int do_PerflibMemory(int update_every, usec_t dt __maybe_unused) { - static bool initialized = false; - - if(unlikely(!initialized)) { - initialize(); - initialized = true; - } - - DWORD id = RegistryFindIDByName("Memory"); - if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) - return -1; - - PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); - if(!pDataBlock) return -1; - - do_memory(pDataBlock, update_every); - - return 0; -} +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +#define _COMMON_PLUGIN_NAME "windows.plugin" +#define _COMMON_PLUGIN_MODULE_NAME "PerflibMemory" +#include "../common-contexts/common-contexts.h" + +struct swap { + RRDSET *operations; + RRDDIM *rd_op_read; + RRDDIM *rd_op_write; + + RRDSET *pages; + RRDDIM *rd_page_read; + RRDDIM *rd_page_write; + + COUNTER_DATA pageReadsTotal; + COUNTER_DATA pageWritesTotal; + COUNTER_DATA pageInputTotal; + COUNTER_DATA pageOutputTotal; +}; + +struct system_pool { + RRDSET *pool; + RRDDIM *rd_paged; + RRDDIM *rd_nonpaged; + + COUNTER_DATA pagedData; + COUNTER_DATA nonPagedData; +}; + +struct swap localSwap = { 0 }; +struct system_pool localPool = { 0 }; + +void initialize_swap_keys(struct swap *p) { + // SWAP Operations + p->pageReadsTotal.key = "Page Reads/sec"; + p->pageWritesTotal.key = "Page Writes/s"; + + // Swap Pages + p->pageInputTotal.key = "Pages Input/sec"; + p->pageOutputTotal.key = "Pages Output/s"; +} + +void initialize_pool_keys(struct system_pool *p) { + p->pagedData.key = "Pool Paged Bytes"; + p->nonPagedData.key = "Pool Nonpaged Bytes"; +} + +static void initialize(void) { + initialize_swap_keys(&localSwap); + initialize_pool_keys(&localPool); +} + +static void do_memory_swap(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, int update_every) +{ + perflibGetObjectCounter(pDataBlock, pObjectType, &localSwap.pageReadsTotal); + perflibGetObjectCounter(pDataBlock, pObjectType, &localSwap.pageWritesTotal); + perflibGetObjectCounter(pDataBlock, pObjectType, &localSwap.pageInputTotal); + perflibGetObjectCounter(pDataBlock, pObjectType, &localSwap.pageOutputTotal); + + if (!localSwap.operations) { + localSwap.operations = rrdset_create_localhost( + "mem" + , "swap_operations", NULL + , "swap" + , "mem.swap_iops" + + , "Swap Operations" + , "operations/s" + , PLUGIN_WINDOWS_NAME + , "PerflibMemory" + , NETDATA_CHART_PRIO_MEM_SWAPIO + , update_every + , RRDSET_TYPE_STACKED + ); + + localSwap.rd_op_read = rrddim_add(localSwap.operations, "read", NULL, + 1, 1, RRD_ALGORITHM_INCREMENTAL); + localSwap.rd_op_write = rrddim_add(localSwap.operations, "write", NULL, + 1, -1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(localSwap.operations, + localSwap.rd_op_read, + (collected_number)localSwap.pageReadsTotal.current.Data); + + rrddim_set_by_pointer(localSwap.operations, + localSwap.rd_op_write, + (collected_number)localSwap.pageWritesTotal.current.Data); + rrdset_done(localSwap.operations); + + if (!localSwap.pages) { + localSwap.pages = rrdset_create_localhost( + "mem" + , "swap_pages", NULL + , "swap" + , "mem.swap_pages_io" + + , "Swap Pages" + , "pages/s" + , PLUGIN_WINDOWS_NAME + , "PerflibMemory" + , NETDATA_CHART_PRIO_MEM_SWAP_PAGES + , update_every + , RRDSET_TYPE_STACKED + ); + + localSwap.rd_page_read = rrddim_add(localSwap.pages, "read", NULL, + 1, 1, RRD_ALGORITHM_INCREMENTAL); + localSwap.rd_page_write = rrddim_add(localSwap.pages, "write", NULL, + 1, -1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(localSwap.pages, + localSwap.rd_page_read, + (collected_number)localSwap.pageInputTotal.current.Data); + + rrddim_set_by_pointer(localSwap.pages, + localSwap.rd_page_write, + (collected_number)localSwap.pageOutputTotal.current.Data); + rrdset_done(localSwap.pages); +} + +static void do_memory_system_pool(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, int update_every) +{ + perflibGetObjectCounter(pDataBlock, pObjectType, &localPool.nonPagedData); + perflibGetObjectCounter(pDataBlock, pObjectType, &localPool.pagedData); + + if (!localPool.pool) { + localPool.pool = rrdset_create_localhost( + "mem" + , "system_pool", NULL + , "mem" + , "mem.system_pool_size" + + , "System Memory Pool" + , "bytes" + , PLUGIN_WINDOWS_NAME + , "PerflibMemory" + , NETDATA_CHART_PRIO_MEM_SYSTEM_POOL + , update_every + , RRDSET_TYPE_STACKED + ); + + localPool.rd_paged = rrddim_add(localPool.pool, "paged", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + localPool.rd_nonpaged = rrddim_add(localPool.pool, "pool-paged", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(localPool.pool, + localPool.rd_paged, + (collected_number)localPool.pagedData.current.Data); + + rrddim_set_by_pointer(localPool.pool, + localPool.rd_nonpaged, + (collected_number)localPool.nonPagedData.current.Data); + rrdset_done(localPool.pool); +} + +static bool do_memory(PERF_DATA_BLOCK *pDataBlock, int update_every) { + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Memory"); + if (!pObjectType) + return false; + + static COUNTER_DATA pagesPerSec = { .key = "Pages/sec" }; + static COUNTER_DATA pageFaultsPerSec = { .key = "Page Faults/sec" }; + + if(perflibGetObjectCounter(pDataBlock, pObjectType, &pageFaultsPerSec) && + perflibGetObjectCounter(pDataBlock, pObjectType, &pagesPerSec)) { + ULONGLONG total = pageFaultsPerSec.current.Data; + ULONGLONG major = pagesPerSec.current.Data; + ULONGLONG minor = (total > major) ? total - major : 0; + common_mem_pgfaults(minor, major, update_every); + } + + static COUNTER_DATA availableBytes = { .key = "Available Bytes" }; + static COUNTER_DATA availableKBytes = { .key = "Available KBytes" }; + static COUNTER_DATA availableMBytes = { .key = "Available MBytes" }; + ULONGLONG available_bytes = 0; + + if(perflibGetObjectCounter(pDataBlock, pObjectType, &availableBytes)) + available_bytes = availableBytes.current.Data; + else if(perflibGetObjectCounter(pDataBlock, pObjectType, &availableKBytes)) + available_bytes = availableKBytes.current.Data * 1024; + else if(perflibGetObjectCounter(pDataBlock, pObjectType, &availableMBytes)) + available_bytes = availableMBytes.current.Data * 1024 * 1024; + + common_mem_available(available_bytes, update_every); + + do_memory_swap(pDataBlock, pObjectType, update_every); + + do_memory_system_pool(pDataBlock, pObjectType, update_every); + + return true; +} + +int do_PerflibMemory(int update_every, usec_t dt __maybe_unused) { + static bool initialized = false; + + if(unlikely(!initialized)) { + initialize(); + initialized = true; + } + + DWORD id = RegistryFindIDByName("Memory"); + if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) + return -1; + + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if(!pDataBlock) return -1; + + do_memory(pDataBlock, update_every); + + return 0; +} diff --git a/src/collectors/windows.plugin/perflib-mssql.c b/src/collectors/windows.plugin/perflib-mssql.c new file mode 100644 index 000000000..f6f5c434d --- /dev/null +++ b/src/collectors/windows.plugin/perflib-mssql.c @@ -0,0 +1,1413 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +// https://learn.microsoft.com/en-us/sql/sql-server/install/instance-configuration?view=sql-server-ver16 +#define NETDATA_MAX_INSTANCE_NAME 32 +#define NETDATA_MAX_INSTANCE_OBJECT 128 + +BOOL is_sqlexpress = FALSE; + +enum netdata_mssql_metrics { + NETDATA_MSSQL_GENERAL_STATS, + NETDATA_MSSQL_SQL_ERRORS, + NETDATA_MSSQL_DATABASE, + NETDATA_MSSQL_LOCKS, + NETDATA_MSSQL_MEMORY, + NETDATA_MSSQL_BUFFER_MANAGEMENT, + NETDATA_MSSQL_SQL_STATS, + NETDATA_MSSQL_ACCESS_METHODS, + + NETDATA_MSSQL_METRICS_END +}; + +struct mssql_instance { + char *instanceID; + + char *objectName[NETDATA_MSSQL_METRICS_END]; + + RRDSET *st_user_connections; + RRDDIM *rd_user_connections; + + RRDSET *st_process_blocked; + RRDDIM *rd_process_blocked; + + RRDSET *st_stats_auto_param; + RRDDIM *rd_stats_auto_param; + + RRDSET *st_stats_batch_request; + RRDDIM *rd_stats_batch_request; + + RRDSET *st_stats_safe_auto; + RRDDIM *rd_stats_safe_auto; + + RRDSET *st_stats_compilation; + RRDDIM *rd_stats_compilation; + + RRDSET *st_stats_recompiles; + RRDDIM *rd_stats_recompiles; + + RRDSET *st_buff_cache_hits; + RRDDIM *rd_buff_cache_hits; + + RRDSET *st_buff_cache_page_life_expectancy; + RRDDIM *rd_buff_cache_page_life_expectancy; + + RRDSET *st_buff_checkpoint_pages; + RRDDIM *rd_buff_checkpoint_pages; + + RRDSET *st_buff_page_iops; + RRDDIM *rd_buff_page_reads; + RRDDIM *rd_buff_page_writes; + + RRDSET *st_access_method_page_splits; + RRDDIM *rd_access_method_page_splits; + + RRDSET *st_sql_errors; + RRDDIM *rd_sql_errors; + + RRDSET *st_lockWait; + RRDSET *st_deadLocks; + DICTIONARY *locks_instances; + + DICTIONARY *databases; + + RRDSET *st_conn_memory; + RRDDIM *rd_conn_memory; + + RRDSET *st_ext_benefit_mem; + RRDDIM *rd_ext_benefit_mem; + + RRDSET *st_pending_mem_grant; + RRDDIM *rd_pending_mem_grant; + + RRDSET *st_mem_tot_server; + RRDDIM *rd_mem_tot_server; + + COUNTER_DATA MSSQLAccessMethodPageSplits; + COUNTER_DATA MSSQLBufferCacheHits; + COUNTER_DATA MSSQLBufferCheckpointPages; + COUNTER_DATA MSSQLBufferPageLifeExpectancy; + COUNTER_DATA MSSQLBufferPageReads; + COUNTER_DATA MSSQLBufferPageWrites; + COUNTER_DATA MSSQLBlockedProcesses; + COUNTER_DATA MSSQLUserConnections; + COUNTER_DATA MSSQLConnectionMemoryBytes; + COUNTER_DATA MSSQLExternalBenefitOfMemory; + COUNTER_DATA MSSQLPendingMemoryGrants; + COUNTER_DATA MSSQLSQLErrorsTotal; + COUNTER_DATA MSSQLTotalServerMemory; + COUNTER_DATA MSSQLStatsAutoParameterization; + COUNTER_DATA MSSQLStatsBatchRequests; + COUNTER_DATA MSSQLStatSafeAutoParameterization; + COUNTER_DATA MSSQLCompilations; + COUNTER_DATA MSSQLRecompilations; +}; + +enum lock_instance_idx { + NETDATA_MSSQL_ENUM_MLI_IDX_WAIT, + NETDATA_MSSQL_ENUM_MLI_IDX_DEAD_LOCKS, + + NETDATA_MSSQL_ENUM_MLI_IDX_END +}; + +struct mssql_lock_instance { + struct mssql_instance *parent; + + COUNTER_DATA lockWait; + COUNTER_DATA deadLocks; + + RRDDIM *rd_lockWait; + RRDDIM *rd_deadLocks; + + uint32_t updated; +}; + +enum db_instance_idx { + NETDATA_MSSQL_ENUM_MDI_IDX_FILE_SIZE, + NETDATA_MSSQL_ENUM_MDI_IDX_ACTIVE_TRANSACTIONS, + NETDATA_MSSQL_ENUM_MDI_IDX_BACKUP_RESTORE_OP, + NETDATA_MSSQL_ENUM_MDI_IDX_LOG_FLUSHED, + NETDATA_MSSQL_ENUM_MDI_IDX_LOG_FLUSHES, + NETDATA_MSSQL_ENUM_MDI_IDX_TRANSACTIONS, + NETDATA_MSSQL_ENUM_MDI_IDX_WRITE_TRANSACTIONS, + + NETDATA_MSSQL_ENUM_MDI_IDX_END +}; + +struct mssql_db_instance { + struct mssql_instance *parent; + + RRDSET *st_db_data_file_size; + RRDSET *st_db_active_transactions; + RRDSET *st_db_backup_restore_operations; + RRDSET *st_db_log_flushed; + RRDSET *st_db_log_flushes; + RRDSET *st_db_transactions; + RRDSET *st_db_write_transactions; + + RRDDIM *rd_db_data_file_size; + RRDDIM *rd_db_active_transactions; + RRDDIM *rd_db_backup_restore_operations; + RRDDIM *rd_db_log_flushed; + RRDDIM *rd_db_log_flushes; + RRDDIM *rd_db_transactions; + RRDDIM *rd_db_write_transactions; + + COUNTER_DATA MSSQLDatabaseActiveTransactions; + COUNTER_DATA MSSQLDatabaseBackupRestoreOperations; + COUNTER_DATA MSSQLDatabaseDataFileSize; + COUNTER_DATA MSSQLDatabaseLogFlushed; + COUNTER_DATA MSSQLDatabaseLogFlushes; + COUNTER_DATA MSSQLDatabaseTransactions; + COUNTER_DATA MSSQLDatabaseWriteTransactions; + + uint32_t updated; +}; + +static DICTIONARY *mssql_instances = NULL; + +static void initialize_mssql_objects(struct mssql_instance *p, const char *instance) { + char prefix[NETDATA_MAX_INSTANCE_NAME]; + if (!strcmp(instance, "MSSQLSERVER")) { + strncpyz(prefix, "SQLServer:", sizeof(prefix) - 1); + } else if (!strcmp(instance, "SQLEXPRESS")) { + strncpyz(prefix, "MSSQL$SQLEXPRESS:", sizeof(prefix) - 1); + } else { + char *express = (!is_sqlexpress) ? "" : "SQLEXPRESS"; + snprintfz(prefix, sizeof(prefix) - 1, "MSSQL$%s:%s:", express, instance); + } + + size_t length = strlen(prefix); + char name[NETDATA_MAX_INSTANCE_OBJECT]; + snprintfz(name, sizeof(name) - 1, "%s%s", prefix, "General Statistics"); + p->objectName[NETDATA_MSSQL_GENERAL_STATS] = strdup(name); + + strncpyz(&name[length], "SQL Errors", sizeof(name) - length); + p->objectName[NETDATA_MSSQL_SQL_ERRORS] = strdup(name); + + strncpyz(&name[length], "Databases", sizeof(name) - length); + p->objectName[NETDATA_MSSQL_DATABASE] = strdup(name); + + strncpyz(&name[length], "SQL Statistics", sizeof(name) - length); + p->objectName[NETDATA_MSSQL_SQL_STATS] = strdup(name); + + strncpyz(&name[length], "Buffer Manager", sizeof(name) - length); + p->objectName[NETDATA_MSSQL_BUFFER_MANAGEMENT] = strdup(name); + + strncpyz(&name[length], "Memory Manager", sizeof(name) - length); + p->objectName[NETDATA_MSSQL_MEMORY] = strdup(name); + + strncpyz(&name[length], "Locks", sizeof(name) - length); + p->objectName[NETDATA_MSSQL_LOCKS] = strdup(name); + + strncpyz(&name[length], "Access Methods", sizeof(name) - length); + p->objectName[NETDATA_MSSQL_ACCESS_METHODS] = strdup(name); + + p->instanceID = strdup(instance); +} + +static inline void initialize_mssql_keys(struct mssql_instance *p) { + // General Statistics + p->MSSQLUserConnections.key = "User Connections"; + p->MSSQLBlockedProcesses.key = "Processes blocked"; + + // SQL Statistics + p->MSSQLStatsAutoParameterization.key = "Auto-Param Attempts/sec"; + p->MSSQLStatsBatchRequests.key = "Batch Requests/sec"; + p->MSSQLStatSafeAutoParameterization.key = "Safe Auto-Params/sec"; + p->MSSQLCompilations.key = "SQL Compilations/sec"; + p->MSSQLRecompilations.key = "SQL Re-Compilations/sec"; + + // Buffer Management + p->MSSQLBufferCacheHits.key = "Buffer cache hit ratio"; + p->MSSQLBufferPageLifeExpectancy.key = "Page life expectancy"; + p->MSSQLBufferCheckpointPages.key = "Checkpoint pages/sec"; + p->MSSQLBufferPageReads.key = "Page reads/sec"; + p->MSSQLBufferPageWrites.key = "Page writes/sec"; + + // Access Methods + p->MSSQLAccessMethodPageSplits.key = "Page Splits/sec"; + + // Errors + p->MSSQLSQLErrorsTotal.key = "Errors/sec"; + + p->MSSQLConnectionMemoryBytes.key = "Connection Memory (KB)"; + p->MSSQLExternalBenefitOfMemory.key = "External benefit of memory"; + p->MSSQLPendingMemoryGrants.key = "Memory Grants Pending"; + p->MSSQLTotalServerMemory.key = "Total Server Memory (KB)"; +} + +void dict_mssql_insert_locks_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct mssql_lock_instance *ptr = value; + ptr->deadLocks.key = "Number of Deadlocks/sec"; + ptr->lockWait.key = "Lock Waits/sec"; +} + +void dict_mssql_insert_databases_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct mssql_db_instance *ptr = value; + + ptr->MSSQLDatabaseDataFileSize.key = "Data File(s) Size (KB)"; + ptr->MSSQLDatabaseActiveTransactions.key = "Active Transactions"; + ptr->MSSQLDatabaseBackupRestoreOperations.key = "Backup/Restore Throughput/sec"; + ptr->MSSQLDatabaseLogFlushed.key = "Log Bytes Flushed/sec"; + ptr->MSSQLDatabaseLogFlushes.key = "Log Flushes/sec"; + ptr->MSSQLDatabaseTransactions.key = "Transactions/sec"; + ptr->MSSQLDatabaseWriteTransactions.key = "Write Transactions/sec"; +} + +void dict_mssql_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct mssql_instance *p = value; + const char *instance = dictionary_acquired_item_name((DICTIONARY_ITEM *)item); + + if (!p->locks_instances) { + p->locks_instances = dictionary_create_advanced( + DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct mssql_lock_instance)); + dictionary_register_insert_callback(p->locks_instances, dict_mssql_insert_locks_cb, NULL); + } + + if (!p->databases) { + p->databases = dictionary_create_advanced( + DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct mssql_db_instance)); + dictionary_register_insert_callback(p->databases, dict_mssql_insert_databases_cb, NULL); + } + + initialize_mssql_objects(p, instance); + initialize_mssql_keys(p); +} + +static int mssql_fill_dictionary() { + HKEY hKey; + LSTATUS ret = RegOpenKeyExA( + HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Microsoft SQL Server\\Instance Names\\SQL", 0, KEY_READ, &hKey); + if (ret != ERROR_SUCCESS) + return -1; + + DWORD values = 0; + + ret = RegQueryInfoKey(hKey, NULL, NULL, NULL, NULL, NULL, NULL, &values, NULL, NULL, NULL, NULL); + if (ret != ERROR_SUCCESS) { + goto endMSSQLFillDict; + } + + if (!values) { + ret = ERROR_PATH_NOT_FOUND; + goto endMSSQLFillDict; + } + +// https://learn.microsoft.com/en-us/windows/win32/sysinfo/enumerating-registry-subkeys +#define REGISTRY_MAX_VALUE 16383 + + DWORD i; + char avalue[REGISTRY_MAX_VALUE] = {'\0'}; + DWORD length = REGISTRY_MAX_VALUE; + for (i = 0; i < values; i++) { + avalue[0] = '\0'; + + ret = RegEnumValue(hKey, i, avalue, &length, NULL, NULL, NULL, NULL); + if (ret != ERROR_SUCCESS) + continue; + + if (!strcmp(avalue, "SQLEXPRESS")) { + is_sqlexpress = TRUE; + } + + struct mssql_instance *p = dictionary_set(mssql_instances, avalue, NULL, sizeof(*p)); + } + +endMSSQLFillDict: + RegCloseKey(hKey); + + return (ret == ERROR_SUCCESS) ? 0 : -1; +} + +static int initialize(void) { + mssql_instances = dictionary_create_advanced( + DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct mssql_instance)); + + dictionary_register_insert_callback(mssql_instances, dict_mssql_insert_cb, NULL); + + if (mssql_fill_dictionary()) { + return -1; + } + + return 0; +} + +static void do_mssql_general_stats(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_GENERAL_STATS]); + if (!pObjectType) + return; + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLUserConnections)) { + if (!p->st_user_connections) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_user_connections", p->instanceID); + netdata_fix_chart_name(id); + p->st_user_connections = rrdset_create_localhost( + "mssql", + id, + NULL, + "connections", + "mssql.instance_user_connections", + "User connections", + "connections", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_USER_CONNECTIONS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_user_connections = rrddim_add(p->st_user_connections, "user", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_user_connections->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_user_connections, p->rd_user_connections, (collected_number)p->MSSQLUserConnections.current.Data); + rrdset_done(p->st_user_connections); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLBlockedProcesses)) { + if (!p->st_process_blocked) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_blocked_process", p->instanceID); + netdata_fix_chart_name(id); + p->st_process_blocked = rrdset_create_localhost( + "mssql", + id, + NULL, + "processes", + "mssql.instance_blocked_processes", + "Blocked processes", + "process", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_BLOCKED_PROCESSES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_process_blocked = rrddim_add(p->st_process_blocked, "blocked", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_process_blocked->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_process_blocked, p->rd_process_blocked, (collected_number)p->MSSQLBlockedProcesses.current.Data); + rrdset_done(p->st_process_blocked); + } +} + +static void do_mssql_sql_statistics(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_SQL_STATS]); + if (!pObjectType) + return; + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLStatsAutoParameterization)) { + if (!p->st_stats_auto_param) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_sqlstats_auto_parameterization_attempts", p->instanceID); + netdata_fix_chart_name(id); + p->st_stats_auto_param = rrdset_create_localhost( + "mssql", + id, + NULL, + "sql activity", + "mssql.instance_sqlstats_auto_parameterization_attempts", + "Failed auto-parameterization attempts", + "attempts/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_STATS_AUTO_PARAMETRIZATION, + update_every, + RRDSET_TYPE_LINE); + + p->rd_stats_auto_param = + rrddim_add(p->st_stats_auto_param, "failed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_stats_auto_param->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_stats_auto_param, + p->rd_stats_auto_param, + (collected_number)p->MSSQLStatsAutoParameterization.current.Data); + rrdset_done(p->st_stats_auto_param); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLStatsBatchRequests)) { + if (!p->st_stats_batch_request) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_sqlstats_batch_requests", p->instanceID); + netdata_fix_chart_name(id); + p->st_stats_batch_request = rrdset_create_localhost( + "mssql", + id, + NULL, + "sql activity", + "mssql.instance_sqlstats_batch_requests", + "Total of batches requests", + "requests/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_STATS_BATCH_REQUEST, + update_every, + RRDSET_TYPE_LINE); + + p->rd_stats_batch_request = + rrddim_add(p->st_stats_batch_request, "batch", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_stats_batch_request->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_stats_batch_request, + p->rd_stats_batch_request, + (collected_number)p->MSSQLStatsBatchRequests.current.Data); + rrdset_done(p->st_stats_batch_request); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLStatSafeAutoParameterization)) { + if (!p->st_stats_safe_auto) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_sqlstats_safe_auto_parameterization_attempts", p->instanceID); + netdata_fix_chart_name(id); + p->st_stats_safe_auto = rrdset_create_localhost( + "mssql", + id, + NULL, + "sql activity", + "mssql.instance_sqlstats_safe_auto_parameterization_attempts", + "Safe auto-parameterization attempts", + "attempts/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_STATS_SAFE_AUTO_PARAMETRIZATION, + update_every, + RRDSET_TYPE_LINE); + + p->rd_stats_safe_auto = rrddim_add(p->st_stats_safe_auto, "safe", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_stats_safe_auto->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_stats_safe_auto, + p->rd_stats_safe_auto, + (collected_number)p->MSSQLStatSafeAutoParameterization.current.Data); + rrdset_done(p->st_stats_safe_auto); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLCompilations)) { + if (!p->st_stats_compilation) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_sqlstats_sql_compilations", p->instanceID); + netdata_fix_chart_name(id); + p->st_stats_compilation = rrdset_create_localhost( + "mssql", + id, + NULL, + "sql activity", + "mssql.instance_sqlstats_sql_compilations", + "SQL compilations", + "compilations/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_STATS_COMPILATIONS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_stats_compilation = + rrddim_add(p->st_stats_compilation, "compilations", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_stats_compilation->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_stats_compilation, p->rd_stats_compilation, (collected_number)p->MSSQLCompilations.current.Data); + rrdset_done(p->st_stats_compilation); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLRecompilations)) { + if (!p->st_stats_recompiles) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_sqlstats_sql_recompilations", p->instanceID); + netdata_fix_chart_name(id); + p->st_stats_recompiles = rrdset_create_localhost( + "mssql", + id, + NULL, + "sql activity", + "mssql.instance_sqlstats_sql_recompilations", + "SQL re-compilations", + "recompiles/", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_STATS_RECOMPILATIONS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_stats_recompiles = + rrddim_add(p->st_stats_recompiles, "recompiles", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_stats_recompiles->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_stats_recompiles, p->rd_stats_recompiles, (collected_number)p->MSSQLRecompilations.current.Data); + rrdset_done(p->st_stats_recompiles); + } +} + +static void do_mssql_buffer_management(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + PERF_OBJECT_TYPE *pObjectType = + perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_BUFFER_MANAGEMENT]); + if (!pObjectType) + return; + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLBufferCacheHits)) { + if (!p->st_buff_cache_hits) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_cache_hit_ratio", p->instanceID); + netdata_fix_chart_name(id); + p->st_buff_cache_hits = rrdset_create_localhost( + "mssql", + id, + NULL, + "buffer cache", + "mssql.instance_cache_hit_ratio", + "Buffer Cache hit ratio", + "percentage", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_BUFF_CACHE_HIT_RATIO, + update_every, + RRDSET_TYPE_LINE); + + p->rd_buff_cache_hits = rrddim_add(p->st_buff_cache_hits, "hit_ratio", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_buff_cache_hits->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_buff_cache_hits, p->rd_buff_cache_hits, (collected_number)p->MSSQLBufferCacheHits.current.Data); + rrdset_done(p->st_buff_cache_hits); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLBufferCheckpointPages)) { + if (!p->st_buff_checkpoint_pages) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_bufman_checkpoint_pages", p->instanceID); + netdata_fix_chart_name(id); + p->st_buff_checkpoint_pages = rrdset_create_localhost( + "mssql", + id, + NULL, + "buffer cache", + "mssql.instance_bufman_checkpoint_pages", + "Flushed pages", + "pages/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_BUFF_CHECKPOINT_PAGES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_buff_checkpoint_pages = + rrddim_add(p->st_buff_checkpoint_pages, "log", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_buff_checkpoint_pages->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_buff_checkpoint_pages, + p->rd_buff_checkpoint_pages, + (collected_number)p->MSSQLBufferCheckpointPages.current.Data); + rrdset_done(p->st_buff_checkpoint_pages); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLBufferPageLifeExpectancy)) { + if (!p->st_buff_cache_page_life_expectancy) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_bufman_page_life_expectancy", p->instanceID); + netdata_fix_chart_name(id); + p->st_buff_cache_page_life_expectancy = rrdset_create_localhost( + "mssql", + id, + NULL, + "buffer cache", + "mssql.instance_bufman_page_life_expectancy", + "Page life expectancy", + "seconds", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_BUFF_PAGE_LIFE_EXPECTANCY, + update_every, + RRDSET_TYPE_LINE); + + p->rd_buff_cache_page_life_expectancy = rrddim_add( + p->st_buff_cache_page_life_expectancy, "life_expectancy", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add( + p->st_buff_cache_page_life_expectancy->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_buff_cache_page_life_expectancy, + p->rd_buff_cache_page_life_expectancy, + (collected_number)p->MSSQLBufferPageLifeExpectancy.current.Data); + rrdset_done(p->st_buff_cache_page_life_expectancy); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLBufferPageReads) && + perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLBufferPageWrites)) { + if (!p->st_buff_page_iops) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_bufman_iops", p->instanceID); + netdata_fix_chart_name(id); + p->st_buff_page_iops = rrdset_create_localhost( + "mssql", + id, + NULL, + "buffer cache", + "mssql.instance_bufman_iops", + "Number of pages input and output", + "pages/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_BUFF_MAN_IOPS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_buff_page_reads = rrddim_add(p->st_buff_page_iops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_buff_page_writes = + rrddim_add(p->st_buff_page_iops, "written", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_buff_page_iops->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_buff_page_iops, p->rd_buff_page_reads, (collected_number)p->MSSQLBufferPageReads.current.Data); + rrddim_set_by_pointer( + p->st_buff_page_iops, p->rd_buff_page_writes, (collected_number)p->MSSQLBufferPageWrites.current.Data); + + rrdset_done(p->st_buff_page_iops); + } +} + +static void do_mssql_access_methods(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + PERF_OBJECT_TYPE *pObjectType = + perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_ACCESS_METHODS]); + if (!pObjectType) + return; + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLAccessMethodPageSplits)) { + if (!p->st_access_method_page_splits) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_accessmethods_page_splits", p->instanceID); + netdata_fix_chart_name(id); + p->st_access_method_page_splits = rrdset_create_localhost( + "mssql", + id, + NULL, + "buffer cache", + "mssql.instance_accessmethods_page_splits", + "Page splits", + "splits/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_BUFF_METHODS_PAGE_SPLIT, + update_every, + RRDSET_TYPE_LINE); + + p->rd_access_method_page_splits = + rrddim_add(p->st_access_method_page_splits, "page", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_access_method_page_splits->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_access_method_page_splits, + p->rd_access_method_page_splits, + (collected_number)p->MSSQLAccessMethodPageSplits.current.Data); + rrdset_done(p->st_access_method_page_splits); + } +} + +static void do_mssql_errors(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_SQL_ERRORS]); + if (!pObjectType) + return; + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLSQLErrorsTotal)) { + if (!p->st_sql_errors) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_sql_errors_total", p->instanceID); + netdata_fix_chart_name(id); + p->st_sql_errors = rrdset_create_localhost( + "mssql", + id, + NULL, + "errors", + "mssql.instance_sql_errors", + "Errors", + "errors/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_SQL_ERRORS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_sql_errors = rrddim_add(p->st_sql_errors, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_sql_errors->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_sql_errors, p->rd_sql_errors, (collected_number)p->MSSQLAccessMethodPageSplits.current.Data); + rrdset_done(p->st_sql_errors); + } +} + +int dict_mssql_locks_charts_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + char id[RRD_ID_LENGTH_MAX + 1]; + + struct mssql_lock_instance *mli = value; + const char *instance = dictionary_acquired_item_name((DICTIONARY_ITEM *)item); + + int *update_every = data; + + if (!mli->parent->st_lockWait) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_locks_lock_wait", mli->parent->instanceID); + netdata_fix_chart_name(id); + mli->parent->st_lockWait = rrdset_create_localhost( + "mssql", + id, + NULL, + "locks", + "mssql.instance_locks_lock_wait", + "Lock requests that required the caller to wait.", + "locks/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_LOCKS_WAIT, + *update_every, + RRDSET_TYPE_LINE); + + rrdlabels_add( + mli->parent->st_lockWait->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO); + } + + if (!mli->rd_lockWait) { + mli->rd_lockWait = rrddim_add(mli->parent->st_lockWait, instance, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MLI_IDX_WAIT)) { + rrddim_set_by_pointer( + mli->parent->st_lockWait, mli->rd_lockWait, (collected_number)(mli->lockWait.current.Data)); + } + + if (!mli->parent->st_deadLocks) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_locks_deadlocks", mli->parent->instanceID); + netdata_fix_chart_name(id); + mli->parent->st_deadLocks = rrdset_create_localhost( + "mssql", + id, + NULL, + "locks", + "mssql.instance_locks_deadlocks", + "Lock requests that resulted in deadlock.", + "deadlocks/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_LOCKS_DEADLOCK, + *update_every, + RRDSET_TYPE_LINE); + + rrdlabels_add( + mli->parent->st_deadLocks->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO); + } + + if (!mli->rd_deadLocks) { + mli->rd_deadLocks = rrddim_add(mli->parent->st_deadLocks, instance, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MLI_IDX_DEAD_LOCKS)) { + rrddim_set_by_pointer( + mli->parent->st_deadLocks, mli->rd_deadLocks, (collected_number)mli->deadLocks.current.Data); + } + + return 1; +} + +static void do_mssql_locks(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) { + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_LOCKS]); + if (!pObjectType) + return; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + if (!strcasecmp(windows_shared_buffer, "_Total")) + continue; + + struct mssql_lock_instance *mli = dictionary_set(p->locks_instances, windows_shared_buffer, NULL, sizeof(*mli)); + if (!mli) + continue; + + if (!mli->parent) { + mli->parent = p; + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &mli->lockWait)) + mli->updated |= (1 << NETDATA_MSSQL_ENUM_MLI_IDX_WAIT); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &mli->deadLocks)) + mli->updated |= (1 << NETDATA_MSSQL_ENUM_MLI_IDX_DEAD_LOCKS); + } + + dictionary_sorted_walkthrough_read(p->locks_instances, dict_mssql_locks_charts_cb, &update_every); + rrdset_done(p->st_lockWait); + rrdset_done(p->st_deadLocks); +} + +static void mssql_database_backup_restore_chart(struct mssql_db_instance *mli, const char *db, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + if (!mli->st_db_backup_restore_operations) { + snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_backup_restore_operations", db, mli->parent->instanceID); + netdata_fix_chart_name(id); + mli->st_db_backup_restore_operations = rrdset_create_localhost( + "mssql", + id, + NULL, + "transactions", + "mssql.database_backup_restore_operations", + "Backup IO per database", + "operations/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_DATABASE_BACKUP_RESTORE_OPERATIONS, + update_every, + RRDSET_TYPE_LINE); + + rrdlabels_add( + mli->st_db_backup_restore_operations->rrdlabels, + "mssql_instance", + mli->parent->instanceID, + RRDLABEL_SRC_AUTO); + rrdlabels_add(mli->st_db_backup_restore_operations->rrdlabels, "database", db, RRDLABEL_SRC_AUTO); + } + + if (!mli->rd_db_backup_restore_operations) { + mli->rd_db_backup_restore_operations = + rrddim_add(mli->st_db_backup_restore_operations, "backup", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_BACKUP_RESTORE_OP)) { + rrddim_set_by_pointer( + mli->st_db_backup_restore_operations, + mli->rd_db_backup_restore_operations, + (collected_number)mli->MSSQLDatabaseBackupRestoreOperations.current.Data); + } + + rrdset_done(mli->st_db_backup_restore_operations); +} + +static void mssql_database_log_flushes_chart(struct mssql_db_instance *mli, const char *db, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + if (!mli->st_db_log_flushes) { + snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_log_flushes", db, mli->parent->instanceID); + netdata_fix_chart_name(id); + mli->st_db_log_flushes = rrdset_create_localhost( + "mssql", + id, + NULL, + "transactions", + "mssql.database_log_flushes", + "Log flushes", + "flushes/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_DATABASE_LOG_FLUSHES, + update_every, + RRDSET_TYPE_LINE); + + rrdlabels_add(mli->st_db_log_flushes->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO); + rrdlabels_add(mli->st_db_log_flushes->rrdlabels, "database", db, RRDLABEL_SRC_AUTO); + } + + if (!mli->rd_db_log_flushes) { + mli->rd_db_log_flushes = rrddim_add(mli->st_db_log_flushes, "flushes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_LOG_FLUSHES)) { + rrddim_set_by_pointer( + mli->st_db_log_flushes, + mli->rd_db_log_flushes, + (collected_number)mli->MSSQLDatabaseLogFlushes.current.Data); + } + + rrdset_done(mli->st_db_log_flushes); +} + +static void mssql_database_log_flushed_chart(struct mssql_db_instance *mli, const char *db, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + if (!mli->st_db_log_flushed) { + snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_log_flushed", db, mli->parent->instanceID); + netdata_fix_chart_name(id); + mli->st_db_log_flushed = rrdset_create_localhost( + "mssql", + id, + NULL, + "transactions", + "mssql.database_log_flushed", + "Log flushed", + "bytes/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_DATABASE_LOG_FLUSHED, + update_every, + RRDSET_TYPE_LINE); + + rrdlabels_add(mli->st_db_log_flushed->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO); + rrdlabels_add(mli->st_db_log_flushed->rrdlabels, "database", db, RRDLABEL_SRC_AUTO); + } + + if (!mli->rd_db_log_flushed) { + mli->rd_db_log_flushed = rrddim_add(mli->st_db_log_flushed, "flushed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_LOG_FLUSHED)) { + rrddim_set_by_pointer( + mli->st_db_log_flushed, + mli->rd_db_log_flushed, + (collected_number)mli->MSSQLDatabaseLogFlushed.current.Data); + } + + rrdset_done(mli->st_db_log_flushed); +} + +static void mssql_transactions_chart(struct mssql_db_instance *mli, const char *db, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + if (!mli->st_db_transactions) { + snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_transactions", db, mli->parent->instanceID); + netdata_fix_chart_name(id); + mli->st_db_transactions = rrdset_create_localhost( + "mssql", + id, + NULL, + "transactions", + "mssql.database_transactions", + "Transactions", + "transactions/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_DATABASE_TRANSACTIONS, + update_every, + RRDSET_TYPE_LINE); + + rrdlabels_add(mli->st_db_transactions->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO); + rrdlabels_add(mli->st_db_transactions->rrdlabels, "database", db, RRDLABEL_SRC_AUTO); + } + + if (!mli->rd_db_transactions) { + mli->rd_db_transactions = + rrddim_add(mli->st_db_transactions, "transactions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_TRANSACTIONS)) { + rrddim_set_by_pointer( + mli->st_db_transactions, + mli->rd_db_transactions, + (collected_number)mli->MSSQLDatabaseTransactions.current.Data); + } + + rrdset_done(mli->st_db_transactions); +} + +static void mssql_write_transactions_chart(struct mssql_db_instance *mli, const char *db, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + if (!mli->st_db_write_transactions) { + snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_write_transactions", db, mli->parent->instanceID); + netdata_fix_chart_name(id); + mli->st_db_write_transactions = rrdset_create_localhost( + "mssql", + id, + NULL, + "transactions", + "mssql.database_write_transactions", + "Write transactions", + "transactions/s", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_DATABASE_WRITE_TRANSACTIONS, + update_every, + RRDSET_TYPE_LINE); + + rrdlabels_add( + mli->st_db_write_transactions->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO); + rrdlabels_add(mli->st_db_write_transactions->rrdlabels, "database", db, RRDLABEL_SRC_AUTO); + } + + if (!mli->rd_db_write_transactions) { + mli->rd_db_write_transactions = + rrddim_add(mli->st_db_write_transactions, "write", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_WRITE_TRANSACTIONS)) { + rrddim_set_by_pointer( + mli->st_db_write_transactions, + mli->rd_db_write_transactions, + (collected_number)mli->MSSQLDatabaseWriteTransactions.current.Data); + } + + rrdset_done(mli->st_db_write_transactions); +} + +static void mssql_active_transactions_chart(struct mssql_db_instance *mli, const char *db, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + if (!mli->st_db_active_transactions) { + snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_active_transactions", db, mli->parent->instanceID); + netdata_fix_chart_name(id); + mli->st_db_active_transactions = rrdset_create_localhost( + "mssql", + id, + NULL, + "transactions", + "mssql.database_active_transactions", + "Active transactions per database", + "transactions", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_DATABASE_ACTIVE_TRANSACTIONS, + update_every, + RRDSET_TYPE_LINE); + + rrdlabels_add( + mli->st_db_active_transactions->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO); + rrdlabels_add(mli->st_db_active_transactions->rrdlabels, "database", db, RRDLABEL_SRC_AUTO); + } + + if (!mli->rd_db_active_transactions) { + mli->rd_db_active_transactions = + rrddim_add(mli->st_db_active_transactions, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_ACTIVE_TRANSACTIONS)) { + rrddim_set_by_pointer( + mli->st_db_active_transactions, + mli->rd_db_active_transactions, + (collected_number)mli->MSSQLDatabaseActiveTransactions.current.Data); + } + + rrdset_done(mli->st_db_active_transactions); +} + +static void mssql_data_file_size_chart(struct mssql_db_instance *mli, const char *db, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + if (!mli->st_db_data_file_size) { + snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_data_files_size", db, mli->parent->instanceID); + netdata_fix_chart_name(id); + mli->st_db_data_file_size = rrdset_create_localhost( + "mssql", + id, + NULL, + "size", + "mssql.database_data_files_size", + "Current database size.", + "bytes", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_DATABASE_DATA_FILE_SIZE, + update_every, + RRDSET_TYPE_LINE); + + rrdlabels_add( + mli->st_db_data_file_size->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO); + rrdlabels_add(mli->st_db_data_file_size->rrdlabels, "database", db, RRDLABEL_SRC_AUTO); + } + + if (!mli->rd_db_data_file_size) { + mli->rd_db_data_file_size = rrddim_add(mli->st_db_data_file_size, "size", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + // FIXME: If the value cannot be retrieved, remove the chart instead of displaying a 0 value. + collected_number data = + (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_FILE_SIZE)) ? mli->MSSQLDatabaseDataFileSize.current.Data : 0; + rrddim_set_by_pointer(mli->st_db_data_file_size, mli->rd_db_data_file_size, data); + + rrdset_done(mli->st_db_data_file_size); +} + +int dict_mssql_databases_charts_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct mssql_db_instance *mli = value; + const char *db = dictionary_acquired_item_name((DICTIONARY_ITEM *)item); + + int *update_every = data; + + void (*transaction_chart[])(struct mssql_db_instance *, const char *, int) = { + // FIXME: allegedly Netdata collects negative values (MSSQLDatabaseDataFileSize). + // something is wrong, perflibdump shows correct values. + // mssql_data_file_size_chart, + mssql_transactions_chart, + mssql_database_backup_restore_chart, + mssql_database_log_flushed_chart, + mssql_database_log_flushes_chart, + mssql_active_transactions_chart, + mssql_write_transactions_chart, + + // Last function pointer must be NULL + NULL}; + + int i; + for (i = 0; transaction_chart[i]; i++) { + transaction_chart[i](mli, db, *update_every); + } + + return 1; +} + +static void do_mssql_databases(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) { + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_DATABASE]); + if (!pObjectType) + return; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + if (!strcasecmp(windows_shared_buffer, "_Total")) + continue; + + struct mssql_db_instance *mdi = dictionary_set(p->databases, windows_shared_buffer, NULL, sizeof(*mdi)); + if (!mdi) + continue; + + mdi->updated = 0; + if (!mdi->parent) { + mdi->parent = p; + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseDataFileSize)) { + LONGLONG value = (LONGLONG)mdi->MSSQLDatabaseDataFileSize.current.Data; + if (value > 0) + mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_FILE_SIZE); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseActiveTransactions)) + mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_ACTIVE_TRANSACTIONS); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseBackupRestoreOperations)) + mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_BACKUP_RESTORE_OP); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseLogFlushed)) + mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_LOG_FLUSHED); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseLogFlushes)) + mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_LOG_FLUSHES); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseTransactions)) + mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_TRANSACTIONS); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseWriteTransactions)) + mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_WRITE_TRANSACTIONS); + } + + dictionary_sorted_walkthrough_read(p->databases, dict_mssql_databases_charts_cb, &update_every); +} + +static void do_mssql_memory_mgr(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_MEMORY]); + if (!pObjectType) + return; + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLConnectionMemoryBytes)) { + if (!p->st_conn_memory) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_memmgr_connection_memory_bytes", p->instanceID); + netdata_fix_chart_name(id); + p->st_conn_memory = rrdset_create_localhost( + "mssql", + id, + NULL, + "memory", + "mssql.instance_memmgr_connection_memory_bytes", + "Amount of dynamic memory to maintain connections", + "bytes", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_MEMMGR_CONNECTION_MEMORY_BYTES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_conn_memory = rrddim_add(p->st_conn_memory, "memory", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_conn_memory->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_conn_memory, + p->rd_conn_memory, + (collected_number)(p->MSSQLConnectionMemoryBytes.current.Data * 1024)); + rrdset_done(p->st_conn_memory); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLExternalBenefitOfMemory)) { + if (!p->st_ext_benefit_mem) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_memmgr_external_benefit_of_memory", p->instanceID); + netdata_fix_chart_name(id); + p->st_ext_benefit_mem = rrdset_create_localhost( + "mssql", + id, + NULL, + "memory", + "mssql.instance_memmgr_external_benefit_of_memory", + "Performance benefit from adding memory to a specific cache", + "bytes", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_MEMMGR_EXTERNAL_BENEFIT_OF_MEMORY, + update_every, + RRDSET_TYPE_LINE); + + p->rd_ext_benefit_mem = rrddim_add(p->st_ext_benefit_mem, "benefit", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_ext_benefit_mem->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_ext_benefit_mem, + p->rd_ext_benefit_mem, + (collected_number)p->MSSQLExternalBenefitOfMemory.current.Data); + rrdset_done(p->st_ext_benefit_mem); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLPendingMemoryGrants)) { + if (!p->st_pending_mem_grant) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_memmgr_pending_memory_grants", p->instanceID); + netdata_fix_chart_name(id); + p->st_pending_mem_grant = rrdset_create_localhost( + "mssql", + id, + NULL, + "memory", + "mssql.instance_memmgr_pending_memory_grants", + "Process waiting for memory grant", + "processes", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_MEMMGR_PENDING_MEMORY_GRANTS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_pending_mem_grant = + rrddim_add(p->st_pending_mem_grant, "pending", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_pending_mem_grant->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_pending_mem_grant, + p->rd_pending_mem_grant, + (collected_number)p->MSSQLPendingMemoryGrants.current.Data); + + rrdset_done(p->st_pending_mem_grant); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLTotalServerMemory)) { + if (!p->st_mem_tot_server) { + snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_memmgr_server_memory", p->instanceID); + netdata_fix_chart_name(id); + p->st_mem_tot_server = rrdset_create_localhost( + "mssql", + id, + NULL, + "memory", + "mssql.instance_memmgr_server_memory", + "Memory committed", + "bytes", + PLUGIN_WINDOWS_NAME, + "PerflibMSSQL", + PRIO_MSSQL_MEMMGR_TOTAL_SERVER, + update_every, + RRDSET_TYPE_LINE); + + p->rd_mem_tot_server = rrddim_add(p->st_mem_tot_server, "memory", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_mem_tot_server->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_mem_tot_server, + p->rd_mem_tot_server, + (collected_number)(p->MSSQLTotalServerMemory.current.Data * 1024)); + + rrdset_done(p->st_mem_tot_server); + } +} + +int dict_mssql_charts_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct mssql_instance *p = value; + int *update_every = data; + + static void (*doMSSQL[])(PERF_DATA_BLOCK *, struct mssql_instance *, int) = { + do_mssql_general_stats, + do_mssql_errors, + do_mssql_databases, + do_mssql_locks, + do_mssql_memory_mgr, + do_mssql_buffer_management, + do_mssql_sql_statistics, + do_mssql_access_methods}; + + DWORD i; + for (i = 0; i < NETDATA_MSSQL_METRICS_END; i++) { + if (!doMSSQL[i]) + continue; + + DWORD id = RegistryFindIDByName(p->objectName[i]); + if (id == PERFLIB_REGISTRY_NAME_NOT_FOUND) + return -1; + + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if (!pDataBlock) + return -1; + + doMSSQL[i](pDataBlock, p, *update_every); + } + + return 1; +} + +int do_PerflibMSSQL(int update_every, usec_t dt __maybe_unused) { + static bool initialized = false; + + if (unlikely(!initialized)) { + if (initialize()) + return -1; + + initialized = true; + } + + dictionary_sorted_walkthrough_read(mssql_instances, dict_mssql_charts_cb, &update_every); + + return 0; +} diff --git a/src/collectors/windows.plugin/perflib-names.c b/src/collectors/windows.plugin/perflib-names.c deleted file mode 100644 index 5b47cbce7..000000000 --- a/src/collectors/windows.plugin/perflib-names.c +++ /dev/null @@ -1,242 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "perflib.h" - -#define REGISTRY_KEY "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Perflib\\009" - -typedef struct perflib_registry { - DWORD id; - char *key; - char *help; -} perfLibRegistryEntry; - -static inline bool compare_perfLibRegistryEntry(const char *k1, const char *k2) { - return strcmp(k1, k2) == 0; -} - -static inline const char *value2key_perfLibRegistryEntry(perfLibRegistryEntry *entry) { - return entry->key; -} - -#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION compare_perfLibRegistryEntry -#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION value2key_perfLibRegistryEntry -#define SIMPLE_HASHTABLE_KEY_TYPE const char -#define SIMPLE_HASHTABLE_VALUE_TYPE perfLibRegistryEntry -#define SIMPLE_HASHTABLE_NAME _PERFLIB -#include "libnetdata/simple_hashtable.h" - -static struct { - SPINLOCK spinlock; - size_t size; - perfLibRegistryEntry **array; - struct simple_hashtable_PERFLIB hashtable; - FILETIME lastWriteTime; -} names_globals = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .size = 0, - .array = NULL, -}; - -DWORD RegistryFindIDByName(const char *name) { - DWORD rc = PERFLIB_REGISTRY_NAME_NOT_FOUND; - - spinlock_lock(&names_globals.spinlock); - XXH64_hash_t hash = XXH3_64bits((void *)name, strlen(name)); - SIMPLE_HASHTABLE_SLOT_PERFLIB *sl = simple_hashtable_get_slot_PERFLIB(&names_globals.hashtable, hash, name, false); - perfLibRegistryEntry *e = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if(e) rc = e->id; - spinlock_unlock(&names_globals.spinlock); - - return rc; -} - -static inline void RegistryAddToHashTable_unsafe(perfLibRegistryEntry *entry) { - XXH64_hash_t hash = XXH3_64bits((void *)entry->key, strlen(entry->key)); - SIMPLE_HASHTABLE_SLOT_PERFLIB *sl = simple_hashtable_get_slot_PERFLIB(&names_globals.hashtable, hash, entry->key, true); - perfLibRegistryEntry *e = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if(!e || e->id > entry->id) - simple_hashtable_set_slot_PERFLIB(&names_globals.hashtable, sl, hash, entry); -} - -static void RegistrySetData_unsafe(DWORD id, const char *key, const char *help) { - if(id >= names_globals.size) { - // increase the size of the array - - size_t old_size = names_globals.size; - - if(!names_globals.size) - names_globals.size = 20000; - else - names_globals.size *= 2; - - names_globals.array = reallocz(names_globals.array, names_globals.size * sizeof(perfLibRegistryEntry *)); - - memset(names_globals.array + old_size, 0, (names_globals.size - old_size) * sizeof(perfLibRegistryEntry *)); - } - - perfLibRegistryEntry *entry = names_globals.array[id]; - if(!entry) - entry = names_globals.array[id] = (perfLibRegistryEntry *)calloc(1, sizeof(perfLibRegistryEntry)); - - bool add_to_hash = false; - if(key && !entry->key) { - entry->key = strdup(key); - add_to_hash = true; - } - - if(help && !entry->help) - entry->help = strdup(help); - - entry->id = id; - - if(add_to_hash) - RegistryAddToHashTable_unsafe(entry); -} - -const char *RegistryFindNameByID(DWORD id) { - const char *s = ""; - spinlock_lock(&names_globals.spinlock); - - if(id < names_globals.size) { - perfLibRegistryEntry *titleEntry = names_globals.array[id]; - if(titleEntry && titleEntry->key) - s = titleEntry->key; - } - - spinlock_unlock(&names_globals.spinlock); - return s; -} - -const char *RegistryFindHelpByID(DWORD id) { - const char *s = ""; - spinlock_lock(&names_globals.spinlock); - - if(id < names_globals.size) { - perfLibRegistryEntry *titleEntry = names_globals.array[id]; - if(titleEntry && titleEntry->help) - s = titleEntry->help; - } - - spinlock_unlock(&names_globals.spinlock); - return s; -} - -// ---------------------------------------------------------- - -static inline void readRegistryKeys_unsafe(BOOL helps) { - TCHAR *pData = NULL; - - HKEY hKey; - DWORD dwType; - DWORD dwSize = 0; - LONG lStatus; - - LPCSTR valueName; - if(helps) - valueName = TEXT("help"); - else - valueName = TEXT("CounterDefinition"); - - // Open the key for the English counters - lStatus = RegOpenKeyEx(HKEY_LOCAL_MACHINE, TEXT(REGISTRY_KEY), 0, KEY_READ, &hKey); - if (lStatus != ERROR_SUCCESS) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "Failed to open registry key HKEY_LOCAL_MACHINE, subkey '%s', error %ld\n", REGISTRY_KEY, (long)lStatus); - return; - } - - // Get the size of the 'Counters' data - lStatus = RegQueryValueEx(hKey, valueName, NULL, &dwType, NULL, &dwSize); - if (lStatus != ERROR_SUCCESS) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "Failed to get registry key HKEY_LOCAL_MACHINE, subkey '%s', value '%s', size of data, error %ld\n", - REGISTRY_KEY, (const char *)valueName, (long)lStatus); - goto cleanup; - } - - // Allocate memory for the data - pData = mallocz(dwSize); - - // Read the 'Counters' data - lStatus = RegQueryValueEx(hKey, valueName, NULL, &dwType, (LPBYTE)pData, &dwSize); - if (lStatus != ERROR_SUCCESS || dwType != REG_MULTI_SZ) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "Failed to get registry key HKEY_LOCAL_MACHINE, subkey '%s', value '%s', data, error %ld\n", - REGISTRY_KEY, (const char *)valueName, (long)lStatus); - goto cleanup; - } - - // Process the counter data - TCHAR *ptr = pData; - while (*ptr) { - TCHAR *sid = ptr; // First string is the ID - ptr += lstrlen(ptr) + 1; // Move to the next string - TCHAR *name = ptr; // Second string is the name - ptr += lstrlen(ptr) + 1; // Move to the next pair - - DWORD id = strtoul(sid, NULL, 10); - - if(helps) - RegistrySetData_unsafe(id, NULL, name); - else - RegistrySetData_unsafe(id, name, NULL); - } - -cleanup: - if(pData) freez(pData); - RegCloseKey(hKey); -} - -static BOOL RegistryKeyModification(FILETIME *lastWriteTime) { - HKEY hKey; - LONG lResult; - BOOL ret = FALSE; - - // Open the registry key - lResult = RegOpenKeyEx(HKEY_LOCAL_MACHINE, TEXT(REGISTRY_KEY), 0, KEY_READ, &hKey); - if (lResult != ERROR_SUCCESS) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "Failed to open registry key HKEY_LOCAL_MACHINE, subkey '%s', error %ld\n", REGISTRY_KEY, (long)lResult); - return FALSE; - } - - // Get the last write time - lResult = RegQueryInfoKey(hKey, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, lastWriteTime); - if (lResult != ERROR_SUCCESS) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "Failed to query registry key HKEY_LOCAL_MACHINE, subkey '%s', last write time, error %ld\n", REGISTRY_KEY, (long)lResult); - ret = FALSE; - } - else - ret = TRUE; - - RegCloseKey(hKey); - return ret; -} - -static inline void RegistryFetchAll_unsafe(void) { - readRegistryKeys_unsafe(FALSE); - readRegistryKeys_unsafe(TRUE); -} - -void PerflibNamesRegistryInitialize(void) { - spinlock_lock(&names_globals.spinlock); - simple_hashtable_init_PERFLIB(&names_globals.hashtable, 20000); - RegistryKeyModification(&names_globals.lastWriteTime); - RegistryFetchAll_unsafe(); - spinlock_unlock(&names_globals.spinlock); -} - -void PerflibNamesRegistryUpdate(void) { - FILETIME lastWriteTime = { 0 }; - RegistryKeyModification(&lastWriteTime); - - if(CompareFileTime(&lastWriteTime, &names_globals.lastWriteTime) > 0) { - spinlock_lock(&names_globals.spinlock); - if(CompareFileTime(&lastWriteTime, &names_globals.lastWriteTime) > 0) { - names_globals.lastWriteTime = lastWriteTime; - RegistryFetchAll_unsafe(); - } - spinlock_unlock(&names_globals.spinlock); - } -} diff --git a/src/collectors/windows.plugin/perflib-netframework.c b/src/collectors/windows.plugin/perflib-netframework.c new file mode 100644 index 000000000..28d58cae8 --- /dev/null +++ b/src/collectors/windows.plugin/perflib-netframework.c @@ -0,0 +1,796 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +enum netdata_netframework_metrics { + NETDATA_NETFRAMEWORK_EXCEPTIONS, + NETDATA_NETFRAMEWORK_INTEROP, + NETDATA_NETFRAMEWORK_JIT, + NETDATA_NETFRAMEWORK_LOADING, + + NETDATA_NETFRAMEWORK_END +}; + +struct net_framework_instances { + RRDSET *st_clrexception_thrown; + RRDDIM *rd_clrexception_thrown; + + RRDSET *st_clrexception_filters; + RRDDIM *rd_clrexception_filters; + + RRDSET *st_clrexception_finallys; + RRDDIM *rd_clrexception_finallys; + + RRDSET *st_clrexception_total_catch_depth; + RRDDIM *rd_clrexception_total_catch_depth; + + RRDSET *st_clrinterop_com_callable_wrappers; + RRDDIM *rd_clrinterop_com_callable_wrappers; + + RRDSET *st_clrinterop_marshalling; + RRDDIM *rd_clrinterop_marshalling; + + RRDSET *st_clrinterop_interop_stubs_created; + RRDDIM *rd_clrinterop_interop_stubs_created; + + RRDSET *st_clrjit_methods; + RRDDIM *rd_clrjit_methods; + + RRDSET *st_clrjit_time; + RRDDIM *rd_clrjit_time; + + RRDSET *st_clrjit_standard_failures; + RRDDIM *rd_clrjit_standard_failures; + + RRDSET *st_clrjit_il_bytes; + RRDDIM *rd_clrjit_il_bytes; + + RRDSET *st_clrloading_heap_size; + RRDDIM *rd_clrloading_heap_size; + + RRDSET *st_clrloading_app_domains_loaded; + RRDDIM *rd_clrloading_app_domains_loaded; + + RRDSET *st_clrloading_app_domains_unloaded; + RRDDIM *rd_clrloading_app_domains_unloaded; + + RRDSET *st_clrloading_assemblies_loaded; + RRDDIM *rd_clrloading_assemblies_loaded; + + RRDSET *st_clrloading_classes_loaded; + RRDDIM *rd_clrloading_classes_loaded; + + RRDSET *st_clrloading_class_load_failure; + RRDDIM *rd_clrloading_class_load_failure; + + COUNTER_DATA NETFrameworkCLRExceptionThrown; + COUNTER_DATA NETFrameworkCLRExceptionFilters; + COUNTER_DATA NETFrameworkCLRExceptionFinallys; + COUNTER_DATA NETFrameworkCLRExceptionTotalCatchDepth; + + COUNTER_DATA NETFrameworkCLRInteropCOMCallableWrappers; + COUNTER_DATA NETFrameworkCLRInteropMarshalling; + COUNTER_DATA NETFrameworkCLRInteropStubsCreated; + + COUNTER_DATA NETFrameworkCLRJITMethods; + COUNTER_DATA NETFrameworkCLRJITPercentTime; + COUNTER_DATA NETFrameworkCLRJITFrequencyTime; + COUNTER_DATA NETFrameworkCLRJITStandardFailures; + COUNTER_DATA NETFrameworkCLRJITIlBytes; + + COUNTER_DATA NETFrameworkCLRLoadingHeapSize; + COUNTER_DATA NETFrameworkCLRLoadingAppDomainsLoaded; + COUNTER_DATA NETFrameworkCLRLoadingAppDomainsUnloaded; + COUNTER_DATA NETFrameworkCLRLoadingAssembliesLoaded; + COUNTER_DATA NETFrameworkCLRLoadingClassesLoaded; + COUNTER_DATA NETFrameworkCLRLoadingClassLoadFailure; +}; + +static inline void initialize_net_framework_processes_keys(struct net_framework_instances *p) { + p->NETFrameworkCLRExceptionFilters.key = "# of Filters / sec"; + p->NETFrameworkCLRExceptionFinallys.key = "# of Finallys / sec"; + p->NETFrameworkCLRExceptionThrown.key = "# of Exceps Thrown / sec"; + p->NETFrameworkCLRExceptionTotalCatchDepth.key = "Throw To Catch Depth / sec"; + + p->NETFrameworkCLRInteropCOMCallableWrappers.key = "# of CCWs"; + p->NETFrameworkCLRInteropMarshalling.key = "# of Stubs"; + p->NETFrameworkCLRInteropStubsCreated.key = "# of marshalling"; + + p->NETFrameworkCLRJITMethods.key = "# of Methods Jitted"; + p->NETFrameworkCLRJITPercentTime.key = "% Time in Jit"; + p->NETFrameworkCLRJITFrequencyTime.key = "IL Bytes Jitted / sec"; + p->NETFrameworkCLRJITStandardFailures.key = "Standard Jit Failures"; + p->NETFrameworkCLRJITIlBytes.key = "# of IL Bytes Jitted"; + + p->NETFrameworkCLRLoadingHeapSize.key = "Bytes in Loader Heap"; + p->NETFrameworkCLRLoadingAppDomainsLoaded.key = "Rate of appdomains"; + p->NETFrameworkCLRLoadingAppDomainsUnloaded.key = "Total appdomains unloaded"; + p->NETFrameworkCLRLoadingAssembliesLoaded.key = "Total Assemblies"; + p->NETFrameworkCLRLoadingClassesLoaded.key = "Total Classes Loaded"; + p->NETFrameworkCLRLoadingClassLoadFailure.key = "Total # of Load Failures"; +} + +void dict_net_framework_processes_insert_cb( + const DICTIONARY_ITEM *item __maybe_unused, + void *value, + void *data __maybe_unused) { + struct net_framework_instances *p = value; + initialize_net_framework_processes_keys(p); +} + +static DICTIONARY *processes = NULL; + +static void initialize(void) { + processes = dictionary_create_advanced( + DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct net_framework_instances)); + + dictionary_register_insert_callback(processes, dict_net_framework_processes_insert_cb, NULL); +} + +static void netdata_framework_clr_exceptions(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, int update_every) { + PERF_INSTANCE_DEFINITION *pi = NULL; + char id[RRD_ID_LENGTH_MAX + 1]; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + if (strcasecmp(windows_shared_buffer, "_Global_") == 0) + continue; + + struct net_framework_instances *p = dictionary_set(processes, windows_shared_buffer, NULL, sizeof(*p)); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRExceptionThrown)) { + if (!p->st_clrexception_thrown) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrexception_thrown", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrexception_thrown = rrdset_create_localhost( + "netframework", + id, + NULL, + "exceptions", + "netframework.clrexception_thrown", + "Thrown exceptions", + "exceptions/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_EXCEPTION_THROWN, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrexception_thrown = + rrddim_add(p->st_clrexception_thrown, "exceptions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrexception_thrown->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrexception_thrown, + p->rd_clrexception_thrown, + (collected_number)p->NETFrameworkCLRExceptionThrown.current.Data); + rrdset_done(p->st_clrexception_thrown); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRExceptionFilters)) { + if (!p->st_clrexception_filters) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrexception_filters", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrexception_filters = rrdset_create_localhost( + "netframework", + id, + NULL, + "exceptions", + "netframework.clrexception_filters", + "Thrown exceptions filters", + "filters/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_EXCEPTION_FILTERS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrexception_filters = + rrddim_add(p->st_clrexception_filters, "filters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrexception_filters->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrexception_filters, + p->rd_clrexception_filters, + (collected_number)p->NETFrameworkCLRExceptionFilters.current.Data); + rrdset_done(p->st_clrexception_filters); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRExceptionFinallys)) { + if (!p->st_clrexception_finallys) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrexception_finallys", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrexception_finallys = rrdset_create_localhost( + "netframework", + id, + NULL, + "exceptions", + "netframework.clrexception_finallys", + "Executed finally blocks", + "finallys/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_EXCEPTION_FINALLYS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrexception_finallys = + rrddim_add(p->st_clrexception_finallys, "finallys", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrexception_finallys->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrexception_finallys, + p->rd_clrexception_finallys, + (collected_number)p->NETFrameworkCLRExceptionFinallys.current.Data); + rrdset_done(p->st_clrexception_finallys); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRExceptionTotalCatchDepth)) { + if (!p->st_clrexception_total_catch_depth) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrexception_throw_to_catch_depth", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrexception_total_catch_depth = rrdset_create_localhost( + "netframework", + id, + NULL, + "exceptions", + "netframework.clrexception_throw_to_catch_depth", + "Traversed stack frames", + "stack_frames/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_EXCEPTION_THROW_TO_CATCH_DEPTH, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrexception_total_catch_depth = rrddim_add( + p->st_clrexception_total_catch_depth, "traversed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrexception_total_catch_depth->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrexception_total_catch_depth, + p->rd_clrexception_total_catch_depth, + (collected_number)p->NETFrameworkCLRExceptionTotalCatchDepth.current.Data); + rrdset_done(p->st_clrexception_total_catch_depth); + } + } +} + +static void netdata_framework_clr_interop(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, int update_every) { + PERF_INSTANCE_DEFINITION *pi = NULL; + char id[RRD_ID_LENGTH_MAX + 1]; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + if (strcasecmp(windows_shared_buffer, "_Global_") == 0) + continue; + + struct net_framework_instances *p = dictionary_set(processes, windows_shared_buffer, NULL, sizeof(*p)); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRInteropCOMCallableWrappers)) { + if (!p->st_clrinterop_com_callable_wrappers) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrinterop_com_callable_wrappers", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrinterop_com_callable_wrappers = rrdset_create_localhost( + "netframework", + id, + NULL, + "interop", + "netframework.clrinterop_com_callable_wrappers", + "COM callable wrappers (CCW)", + "ccw/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_INTEROP_CCW, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrinterop_com_callable_wrappers = rrddim_add( + p->st_clrinterop_com_callable_wrappers, + "com_callable_wrappers", + NULL, + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrinterop_com_callable_wrappers->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrinterop_com_callable_wrappers, + p->rd_clrinterop_com_callable_wrappers, + (collected_number)p->NETFrameworkCLRInteropCOMCallableWrappers.current.Data); + rrdset_done(p->st_clrinterop_com_callable_wrappers); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRInteropMarshalling)) { + if (!p->st_clrinterop_marshalling) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrinterop_interop_marshalling", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrinterop_marshalling = rrdset_create_localhost( + "netframework", + id, + NULL, + "interop", + "netframework.clrinterop_interop_marshallings", + "Arguments and return values marshallings", + "marshalling/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_INTEROP_MARSHALLING, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrinterop_marshalling = + rrddim_add(p->st_clrinterop_marshalling, "marshallings", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrinterop_marshalling->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrinterop_marshalling, + p->rd_clrinterop_marshalling, + (collected_number)p->NETFrameworkCLRInteropMarshalling.current.Data); + rrdset_done(p->st_clrinterop_marshalling); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRInteropStubsCreated)) { + if (!p->st_clrinterop_interop_stubs_created) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrinterop_interop_stubs_created", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrinterop_interop_stubs_created = rrdset_create_localhost( + "netframework", + id, + NULL, + "interop", + "netframework.clrinterop_interop_stubs_created", + "Created stubs", + "stubs/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_INTEROP_STUBS_CREATED, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrinterop_interop_stubs_created = rrddim_add( + p->st_clrinterop_interop_stubs_created, "created", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrinterop_interop_stubs_created->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrinterop_interop_stubs_created, + p->rd_clrinterop_interop_stubs_created, + (collected_number)p->NETFrameworkCLRInteropStubsCreated.current.Data); + rrdset_done(p->st_clrinterop_interop_stubs_created); + } + } +} + +static void netdata_framework_clr_jit(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, int update_every) { + PERF_INSTANCE_DEFINITION *pi = NULL; + char id[RRD_ID_LENGTH_MAX + 1]; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + if (strcasecmp(windows_shared_buffer, "_Global_") == 0) + continue; + + struct net_framework_instances *p = dictionary_set(processes, windows_shared_buffer, NULL, sizeof(*p)); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITMethods)) { + if (!p->st_clrjit_methods) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrjit_methods", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrjit_methods = rrdset_create_localhost( + "netframework", + id, + NULL, + "jit", + "netframework.clrjit_methods", + "JIT-compiled methods", + "methods/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_JIT_METHODS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrjit_methods = + rrddim_add(p->st_clrjit_methods, "jit-compiled", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrjit_methods->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrjit_methods, + p->rd_clrjit_methods, + (collected_number)p->NETFrameworkCLRJITMethods.current.Data); + rrdset_done(p->st_clrjit_methods); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITFrequencyTime) && + perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITPercentTime)) { + if (!p->st_clrjit_time) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrjit_time", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrjit_time = rrdset_create_localhost( + "netframework", + id, + NULL, + "jit", + "netframework.clrjit_time", + "Time spent in JIT compilation", + "percentage", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_JIT_TIME, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrjit_time = rrddim_add(p->st_clrjit_time, "time", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_clrjit_time->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + double percTime = (double)p->NETFrameworkCLRJITPercentTime.current.Data; + percTime /= (double)p->NETFrameworkCLRJITFrequencyTime.current.Data; + percTime *= 100; + rrddim_set_by_pointer(p->st_clrjit_time, p->rd_clrjit_time, (collected_number)percTime); + rrdset_done(p->st_clrjit_time); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITStandardFailures)) { + if (!p->st_clrjit_standard_failures) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrjit_standard_failures", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrjit_standard_failures = rrdset_create_localhost( + "netframework", + id, + NULL, + "jit", + "netframework.clrjit_standard_failures", + "JIT compiler failures", + "failures/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_JIT_STANDARD_FAILURES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrjit_standard_failures = + rrddim_add(p->st_clrjit_standard_failures, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrjit_standard_failures->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrjit_standard_failures, + p->rd_clrjit_standard_failures, + (collected_number)p->NETFrameworkCLRJITStandardFailures.current.Data); + rrdset_done(p->st_clrjit_standard_failures); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITIlBytes)) { + if (!p->st_clrjit_il_bytes) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrjit_il_bytes", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrjit_il_bytes = rrdset_create_localhost( + "netframework", + id, + NULL, + "jit", + "netframework.clrjit_il_bytes", + "Compiled Microsoft intermediate language (MSIL) bytes", + "bytes/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_JIT_IL_BYTES, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrjit_il_bytes = + rrddim_add(p->st_clrjit_il_bytes, "compiled_msil", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrjit_il_bytes->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrjit_il_bytes, + p->rd_clrjit_il_bytes, + (collected_number)p->NETFrameworkCLRJITIlBytes.current.Data); + rrdset_done(p->st_clrjit_il_bytes); + } + } +} + +static void netdata_framework_clr_loading(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + PERF_INSTANCE_DEFINITION *pi = NULL; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + if (strcasecmp(windows_shared_buffer, "_Global_") == 0) + continue; + + struct net_framework_instances *p = dictionary_set(processes, windows_shared_buffer, NULL, sizeof(*p)); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingHeapSize)) { + if (!p->st_clrloading_heap_size) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_loader_heap_size", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrloading_heap_size = rrdset_create_localhost( + "netframework", + id, + NULL, + "loading", + "netframework.clrloading_loader_heap_size", + "Memory committed by class loader", + "bytes", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_LOADING_HEAP_SIZE, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrloading_heap_size = + rrddim_add(p->st_clrloading_heap_size, "committed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add( + p->st_clrloading_heap_size->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrloading_heap_size, + p->rd_clrloading_heap_size, + (collected_number)p->NETFrameworkCLRLoadingHeapSize.current.Data); + rrdset_done(p->st_clrloading_heap_size); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingAppDomainsLoaded)) { + if (!p->st_clrloading_app_domains_loaded) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_appdomains_loaded", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrloading_app_domains_loaded = rrdset_create_localhost( + "netframework", + id, + NULL, + "loading", + "netframework.clrloading_appdomains_loaded", + "Loaded application domains", + "domain/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_LOADING_APP_DOMAINS_LOADED, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrloading_app_domains_loaded = + rrddim_add(p->st_clrloading_app_domains_loaded, "loaded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrloading_app_domains_loaded->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrloading_app_domains_loaded, + p->rd_clrloading_app_domains_loaded, + (collected_number)p->NETFrameworkCLRLoadingAppDomainsLoaded.current.Data); + rrdset_done(p->st_clrloading_app_domains_loaded); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingAppDomainsUnloaded)) { + if (!p->st_clrloading_app_domains_unloaded) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_appdomains_unloaded", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrloading_app_domains_unloaded = rrdset_create_localhost( + "netframework", + id, + NULL, + "loading", + "netframework.clrloading_appdomains_unloaded", + "Unloaded application domains", + "domain/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_LOADING_APP_DOMAINS_UNLOADED, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrloading_app_domains_unloaded = rrddim_add( + p->st_clrloading_app_domains_unloaded, "unloaded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrloading_app_domains_unloaded->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrloading_app_domains_unloaded, + p->rd_clrloading_app_domains_unloaded, + (collected_number)p->NETFrameworkCLRLoadingAppDomainsUnloaded.current.Data); + rrdset_done(p->st_clrloading_app_domains_unloaded); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingAssembliesLoaded)) { + if (!p->st_clrloading_assemblies_loaded) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_assemblies_loaded", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrloading_assemblies_loaded = rrdset_create_localhost( + "netframework", + id, + NULL, + "loading", + "netframework.clrloading_assemblies_loaded", + "Loaded assemblies", + "assemblies/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_LOADING_ASSEMBLIES_LOADED, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrloading_assemblies_loaded = + rrddim_add(p->st_clrloading_assemblies_loaded, "loaded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrloading_assemblies_loaded->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrloading_assemblies_loaded, + p->rd_clrloading_assemblies_loaded, + (collected_number)p->NETFrameworkCLRLoadingAssembliesLoaded.current.Data); + rrdset_done(p->st_clrloading_assemblies_loaded); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingClassesLoaded)) { + if (!p->st_clrloading_classes_loaded) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_classes_loaded", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrloading_classes_loaded = rrdset_create_localhost( + "netframework", + id, + NULL, + "loading", + "netframework.clrloading_classes_loaded", + "Loaded classes in all assemblies", + "classes/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_LOADING_CLASSES_LOADED, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrloading_classes_loaded = + rrddim_add(p->st_clrloading_classes_loaded, "loaded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrloading_classes_loaded->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrloading_classes_loaded, + p->rd_clrloading_classes_loaded, + (collected_number)p->NETFrameworkCLRLoadingClassesLoaded.current.Data); + rrdset_done(p->st_clrloading_classes_loaded); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingClassLoadFailure)) { + if (!p->st_clrloading_class_load_failure) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_class_load_failure", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_clrloading_class_load_failure = rrdset_create_localhost( + "netframework", + id, + NULL, + "loading", + "netframework.clrloading_class_load_failures", + "Class load failures", + "failures/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetFramework", + PRIO_NETFRAMEWORK_CLR_LOADING_CLASS_LOAD_FAILURE, + update_every, + RRDSET_TYPE_LINE); + + p->rd_clrloading_class_load_failure = rrddim_add( + p->st_clrloading_class_load_failure, "class_load", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_clrloading_class_load_failure->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_clrloading_class_load_failure, + p->rd_clrloading_class_load_failure, + (collected_number)p->NETFrameworkCLRLoadingClassLoadFailure.current.Data); + rrdset_done(p->st_clrloading_class_load_failure); + } + } +} + +struct netdata_netframework_objects { + char *object; + void (*fnct)(PERF_DATA_BLOCK *, PERF_OBJECT_TYPE *, int); +} netframewrk_obj[NETDATA_NETFRAMEWORK_END] = { + {.fnct = netdata_framework_clr_exceptions, .object = ".NET CLR Exceptions"}, + {.fnct = netdata_framework_clr_interop, .object = ".NET CLR Interop"}, + {.fnct = netdata_framework_clr_jit, .object = ".NET CLR Jit"}, + {.fnct = netdata_framework_clr_loading, .object = ".NET CLR Loading"}}; + +int do_PerflibNetFramework(int update_every, usec_t dt __maybe_unused) { + static bool initialized = false; + + if (unlikely(!initialized)) { + initialize(); + initialized = true; + } + + int i; + for (i = 0; i < NETDATA_NETFRAMEWORK_END; i++) { + DWORD id = RegistryFindIDByName(netframewrk_obj[i].object); + if (id == PERFLIB_REGISTRY_NAME_NOT_FOUND) + continue; + + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if (!pDataBlock) + continue; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, netframewrk_obj[i].object); + if (!pObjectType) + continue; + + netframewrk_obj[i].fnct(pDataBlock, pObjectType, update_every); + } + + return 0; +} diff --git a/src/collectors/windows.plugin/perflib-network.c b/src/collectors/windows.plugin/perflib-network.c index ecadd1e87..55d873b6f 100644 --- a/src/collectors/windows.plugin/perflib-network.c +++ b/src/collectors/windows.plugin/perflib-network.c @@ -1,453 +1,1047 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "windows_plugin.h" -#include "windows-internals.h" - -// -------------------------------------------------------------------------------------------------------------------- -// network protocols - -struct network_protocol { - const char *protocol; - - struct { - COUNTER_DATA received; - COUNTER_DATA sent; - COUNTER_DATA delivered; - COUNTER_DATA forwarded; - RRDSET *st; - RRDDIM *rd_received; - RRDDIM *rd_sent; - RRDDIM *rd_forwarded; - RRDDIM *rd_delivered; - const char *type; - const char *id; - const char *family; - const char *context; - const char *title; - long priority; - } packets; - -} networks[] = { - { - .protocol = "IPv4", - .packets = { - .received = { .key = "Datagrams Received/sec" }, - .sent = { .key = "Datagrams Sent/sec" }, - .delivered = { .key = "Datagrams Received Delivered/sec" }, - .forwarded = { .key = "Datagrams Forwarded/sec" }, - .type = "ipv4", - .id = "packets", - .family = "packets", - .context = "ipv4.packets", - .title = "IPv4 Packets", - .priority = NETDATA_CHART_PRIO_IPV4_PACKETS, - }, - }, - { - .protocol = "IPv6", - .packets = { - .received = { .key = "Datagrams Received/sec" }, - .sent = { .key = "Datagrams Sent/sec" }, - .delivered = { .key = "Datagrams Received Delivered/sec" }, - .forwarded = { .key = "Datagrams Forwarded/sec" }, - .type = "ipv6", - .id = "packets", - .family = "packets", - .context = "ip6.packets", - .title = "IPv6 Packets", - .priority = NETDATA_CHART_PRIO_IPV6_PACKETS, - }, - }, - { - .protocol = "TCPv4", - .packets = { - .received = { .key = "Segments Received/sec" }, - .sent = { .key = "Segments Sent/sec" }, - .type = "ipv4", - .id = "tcppackets", - .family = "tcp", - .context = "ipv4.tcppackets", - .title = "IPv4 TCP Packets", - .priority = NETDATA_CHART_PRIO_IPV4_TCP_PACKETS, - }, - }, - { - .protocol = "TCPv6", - .packets = { - .received = { .key = "Segments Received/sec" }, - .sent = { .key = "Segments Sent/sec" }, - .type = "ipv6", - .id = "tcppackets", - .family = "tcp6", - .context = "ipv6.tcppackets", - .title = "IPv6 TCP Packets", - .priority = NETDATA_CHART_PRIO_IPV6_TCP_PACKETS, - }, - }, - { - .protocol = "UDPv4", - .packets = { - .received = { .key = "Datagrams Received/sec" }, - .sent = { .key = "Datagrams Sent/sec" }, - .type = "ipv4", - .id = "udppackets", - .family = "udp", - .context = "ipv4.udppackets", - .title = "IPv4 UDP Packets", - .priority = NETDATA_CHART_PRIO_IPV4_UDP_PACKETS, - }, - }, - { - .protocol = "UDPv6", - .packets = { - .received = { .key = "Datagrams Received/sec" }, - .sent = { .key = "Datagrams Sent/sec" }, - .type = "ipv6", - .id = "udppackets", - .family = "udp6", - .context = "ipv6.udppackets", - .title = "IPv6 UDP Packets", - .priority = NETDATA_CHART_PRIO_IPV6_UDP_PACKETS, - }, - }, - { - .protocol = "ICMP", - .packets = { - .received = { .key = "Messages Received/sec" }, - .sent = { .key = "Messages Sent/sec" }, - .type = "ipv4", - .id = "icmp", - .family = "icmp", - .context = "ipv4.icmp", - .title = "IPv4 ICMP Packets", - .priority = NETDATA_CHART_PRIO_IPV4_ICMP_PACKETS, - }, - }, - { - .protocol = "ICMPv6", - .packets = { - .received = { .key = "Messages Received/sec" }, - .sent = { .key = "Messages Sent/sec" }, - .type = "ipv6", - .id = "icmp", - .family = "icmp6", - .context = "ipv6.icmp", - .title = "IPv6 ICMP Packets", - .priority = NETDATA_CHART_PRIO_IPV6_ICMP_PACKETS, - }, - }, - - // terminator - { - .protocol = NULL, - } -}; - -struct network_protocol tcp46 = { - .packets = { - .type = "ip", - .id = "tcppackets", - .family = "tcp", - .context = "ip.tcppackets", - .title = "TCP Packets", - .priority = NETDATA_CHART_PRIO_IP_TCP_PACKETS, - } -}; - -static void protocol_packets_chart_update(struct network_protocol *p, int update_every) { - if(!p->packets.st) { - p->packets.st = rrdset_create_localhost( - p->packets.type - , p->packets.id - , NULL - , p->packets.family - , NULL - , p->packets.title - , "packets/s" - , PLUGIN_WINDOWS_NAME - , "PerflibNetwork" - , p->packets.priority - , update_every - , RRDSET_TYPE_AREA - ); - - p->packets.rd_received = rrddim_add(p->packets.st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - p->packets.rd_sent = rrddim_add(p->packets.st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - if(p->packets.forwarded.key) - p->packets.rd_forwarded = rrddim_add(p->packets.st, "forwarded", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - - if(p->packets.delivered.key) - p->packets.rd_delivered = rrddim_add(p->packets.st, "delivered", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - if(p->packets.received.updated) - rrddim_set_by_pointer(p->packets.st, p->packets.rd_received, (collected_number)p->packets.received.current.Data); - - if(p->packets.sent.updated) - rrddim_set_by_pointer(p->packets.st, p->packets.rd_sent, (collected_number)p->packets.sent.current.Data); - - if(p->packets.forwarded.key && p->packets.forwarded.updated) - rrddim_set_by_pointer(p->packets.st, p->packets.rd_forwarded, (collected_number)p->packets.forwarded.current.Data); - - if(p->packets.delivered.key && p->packets.delivered.updated) - rrddim_set_by_pointer(p->packets.st, p->packets.rd_delivered, (collected_number)p->packets.delivered.current.Data); - - rrdset_done(p->packets.st); -} - -static bool do_network_protocol(PERF_DATA_BLOCK *pDataBlock, int update_every, struct network_protocol *p) { - if(!p || !p->protocol) return false; - - PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->protocol); - if(!pObjectType) return false; - - size_t packets = 0; - if(p->packets.received.key) - packets += perflibGetObjectCounter(pDataBlock, pObjectType, &p->packets.received) ? 1 : 0; - - if(p->packets.sent.key) - packets += perflibGetObjectCounter(pDataBlock, pObjectType, &p->packets.sent) ? 1 : 0; - - if(p->packets.delivered.key) - packets += perflibGetObjectCounter(pDataBlock, pObjectType, &p->packets.delivered) ? 1 :0; - - if(p->packets.forwarded.key) - packets += perflibGetObjectCounter(pDataBlock, pObjectType, &p->packets.forwarded) ? 1 : 0; - - if(packets) - protocol_packets_chart_update(p, update_every); - - return true; -} - -// -------------------------------------------------------------------------------------------------------------------- -// network interfaces - -struct network_interface { - bool collected_metadata; - - struct { - COUNTER_DATA received; - COUNTER_DATA sent; - - RRDSET *st; - RRDDIM *rd_received; - RRDDIM *rd_sent; - } packets; - - struct { - COUNTER_DATA received; - COUNTER_DATA sent; - - RRDSET *st; - RRDDIM *rd_received; - RRDDIM *rd_sent; - } traffic; -}; - -static DICTIONARY *physical_interfaces = NULL, *virtual_interfaces = NULL; - -static void network_interface_init(struct network_interface *ni) { - ni->packets.received.key = "Packets Received/sec"; - ni->packets.sent.key = "Packets Sent/sec"; - - ni->traffic.received.key = "Bytes Received/sec"; - ni->traffic.sent.key = "Bytes Sent/sec"; -} - -void dict_interface_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - struct network_interface *ni = value; - network_interface_init(ni); -} - -static void initialize(void) { - physical_interfaces = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | - DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct network_interface)); - - virtual_interfaces = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | - DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct network_interface)); - - dictionary_register_insert_callback(physical_interfaces, dict_interface_insert_cb, NULL); - dictionary_register_insert_callback(virtual_interfaces, dict_interface_insert_cb, NULL); -} - -static void add_interface_labels(RRDSET *st, const char *name, bool physical) { - rrdlabels_add(st->rrdlabels, "device", name, RRDLABEL_SRC_AUTO); - rrdlabels_add(st->rrdlabels, "interface_type", physical ? "real" : "virtual", RRDLABEL_SRC_AUTO); -} - -static bool is_physical_interface(const char *name) { - void *d = dictionary_get(physical_interfaces, name); - return d ? true : false; -} - -static bool do_network_interface(PERF_DATA_BLOCK *pDataBlock, int update_every, bool physical) { - DICTIONARY *dict = physical_interfaces; - - PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, physical ? "Network Interface" : "Network Adapter"); - if(!pObjectType) return false; - - uint64_t total_received = 0, total_sent = 0; - - PERF_INSTANCE_DEFINITION *pi = NULL; - for(LONG i = 0; i < pObjectType->NumInstances ; i++) { - pi = perflibForEachInstance(pDataBlock, pObjectType, pi); - if(!pi) break; - - if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) - strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); - - if(strcasecmp(windows_shared_buffer, "_Total") == 0) - continue; - - if(!physical && is_physical_interface(windows_shared_buffer)) - // this virtual interface is already reported as physical interface - continue; - - struct network_interface *d = dictionary_set(dict, windows_shared_buffer, NULL, sizeof(*d)); - - if(!d->collected_metadata) { - // TODO - get metadata about the network interface - d->collected_metadata = true; - } - - if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->traffic.received) && - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->traffic.sent)) { - - if(d->traffic.received.current.Data == 0 && d->traffic.sent.current.Data == 0) - // this interface has not received or sent any traffic - continue; - - if (unlikely(!d->traffic.st)) { - d->traffic.st = rrdset_create_localhost( - "net", - windows_shared_buffer, - NULL, - windows_shared_buffer, - "net.net", - "Bandwidth", - "kilobits/s", - PLUGIN_WINDOWS_NAME, - "PerflibNetwork", - NETDATA_CHART_PRIO_FIRST_NET_IFACE, - update_every, - RRDSET_TYPE_AREA); - - rrdset_flag_set(d->traffic.st, RRDSET_FLAG_DETAIL); - - add_interface_labels(d->traffic.st, windows_shared_buffer, physical); - - d->traffic.rd_received = rrddim_add(d->traffic.st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - d->traffic.rd_sent = rrddim_add(d->traffic.st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - - total_received += d->traffic.received.current.Data; - total_sent += d->traffic.sent.current.Data; - - rrddim_set_by_pointer(d->traffic.st, d->traffic.rd_received, (collected_number)d->traffic.received.current.Data); - rrddim_set_by_pointer(d->traffic.st, d->traffic.rd_sent, (collected_number)d->traffic.sent.current.Data); - rrdset_done(d->traffic.st); - } - - if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->packets.received) && - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->packets.sent)) { - - if (unlikely(!d->packets.st)) { - d->packets.st = rrdset_create_localhost( - "net_packets", - windows_shared_buffer, - NULL, - windows_shared_buffer, - "net.packets", - "Packets", - "packets/s", - PLUGIN_WINDOWS_NAME, - "PerflibNetwork", - NETDATA_CHART_PRIO_FIRST_NET_IFACE + 1, - update_every, - RRDSET_TYPE_LINE); - - rrdset_flag_set(d->packets.st, RRDSET_FLAG_DETAIL); - - add_interface_labels(d->traffic.st, windows_shared_buffer, physical); - - d->packets.rd_received = rrddim_add(d->packets.st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - d->packets.rd_sent = rrddim_add(d->packets.st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - } - - rrddim_set_by_pointer(d->packets.st, d->packets.rd_received, (collected_number)d->packets.received.current.Data); - rrddim_set_by_pointer(d->packets.st, d->packets.rd_sent, (collected_number)d->packets.sent.current.Data); - rrdset_done(d->packets.st); - } - } - - if(physical) { - static RRDSET *st = NULL; - static RRDDIM *rd_received = NULL, *rd_sent = NULL; - - if (unlikely(!st)) { - st = rrdset_create_localhost( - "system", - "net", - NULL, - "network", - "system.net", - "Physical Network Interfaces Aggregated Bandwidth", - "kilobits/s", - PLUGIN_WINDOWS_NAME, - "PerflibNetwork", - NETDATA_CHART_PRIO_SYSTEM_NET, - update_every, - RRDSET_TYPE_AREA); - - rd_received = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - rd_sent = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); - } - - rrddim_set_by_pointer(st, rd_received, (collected_number)total_received); - rrddim_set_by_pointer(st, rd_sent, (collected_number)total_sent); - rrdset_done(st); - } - - return true; -} - -int do_PerflibNetwork(int update_every, usec_t dt __maybe_unused) { - static bool initialized = false; - - if(unlikely(!initialized)) { - initialize(); - initialized = true; - } - - DWORD id = RegistryFindIDByName("Network Interface"); - if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) - return -1; - - PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); - if(!pDataBlock) return -1; - - do_network_interface(pDataBlock, update_every, true); - do_network_interface(pDataBlock, update_every, false); - - struct network_protocol *tcp4 = NULL, *tcp6 = NULL; - for(size_t i = 0; networks[i].protocol ;i++) { - do_network_protocol(pDataBlock, update_every, &networks[i]); - - if(!tcp4 && strcmp(networks[i].protocol, "TCPv4") == 0) - tcp4 = &networks[i]; - if(!tcp6 && strcmp(networks[i].protocol, "TCPv6") == 0) - tcp6 = &networks[i]; - } - - if(tcp4 && tcp6) { - tcp46.packets.received = tcp4->packets.received; - tcp46.packets.sent = tcp4->packets.sent; - tcp46.packets.received.current.Data += tcp6->packets.received.current.Data; - tcp46.packets.sent.current.Data += tcp6->packets.sent.current.Data; - protocol_packets_chart_update(&tcp46, update_every); - } - - return 0; -} +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +#define ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, counter) \ + do { \ + if ((p)->packets.counter.key) { \ + packets += perflibGetObjectCounter((pDataBlock), (pObjectType), &(p)->packets.counter) ? 1 : 0; \ + } \ + } while (0) + +#define SET_DIM_IF_KEY_AND_UPDATED(p, field) \ + do { \ + if ((p)->packets.field.key && (p)->packets.field.updated) { \ + rrddim_set_by_pointer( \ + (p)->packets.st, (p)->packets.rd_##field, (collected_number)(p)->packets.field.current.Data); \ + } \ + } while (0) + +#define ADD_RRD_DIM_IF_KEY(packet_field, id, name, multiplier, algorithm) \ + do { \ + if (p->packets.packet_field.key) \ + p->packets.rd_##packet_field = rrddim_add(st, id, name, multiplier, 1, algorithm); \ + } while (0) + +// -------------------------------------------------------------------------------------------------------------------- +// network protocols + +struct network_protocol { + const char *protocol; + + struct { + COUNTER_DATA received; + COUNTER_DATA sent; + COUNTER_DATA delivered; + COUNTER_DATA forwarded; + + COUNTER_DATA InDiscards; + COUNTER_DATA OutDiscards; + COUNTER_DATA InHdrErrors; + COUNTER_DATA InAddrErrors; + COUNTER_DATA InUnknownProtos; + COUNTER_DATA InTooBigErrors; + COUNTER_DATA InTruncatedPkts; + COUNTER_DATA InNoRoutes; + COUNTER_DATA OutNoRoutes; + + COUNTER_DATA InEchoReps; + COUNTER_DATA OutEchoReps; + COUNTER_DATA InDestUnreachs; + COUNTER_DATA OutDestUnreachs; + COUNTER_DATA InRedirects; + COUNTER_DATA OutRedirects; + COUNTER_DATA InEchos; + COUNTER_DATA OutEchos; + COUNTER_DATA InRouterAdvert; + COUNTER_DATA OutRouterAdvert; + COUNTER_DATA InRouterSelect; + COUNTER_DATA OutRouterSelect; + COUNTER_DATA InTimeExcds; + COUNTER_DATA OutTimeExcds; + COUNTER_DATA InParmProbs; + COUNTER_DATA OutParmProbs; + COUNTER_DATA InTimestamps; + COUNTER_DATA OutTimestamps; + COUNTER_DATA InTimestampReps; + COUNTER_DATA OutTimestampReps; + + RRDSET *st; + RRDDIM *rd_received; + RRDDIM *rd_sent; + RRDDIM *rd_forwarded; + RRDDIM *rd_delivered; + + RRDDIM *rd_InDiscards; + RRDDIM *rd_OutDiscards; + RRDDIM *rd_InHdrErrors; + RRDDIM *rd_InAddrErrors; + RRDDIM *rd_InUnknownProtos; + RRDDIM *rd_InTooBigErrors; + RRDDIM *rd_InTruncatedPkts; + RRDDIM *rd_InNoRoutes; + RRDDIM *rd_OutNoRoutes; + + RRDDIM *rd_InEchoReps; + RRDDIM *rd_OutEchoReps; + RRDDIM *rd_InDestUnreachs; + RRDDIM *rd_OutDestUnreachs; + RRDDIM *rd_InRedirects; + RRDDIM *rd_OutRedirects; + RRDDIM *rd_InEchos; + RRDDIM *rd_OutEchos; + RRDDIM *rd_InRouterAdvert; + RRDDIM *rd_OutRouterAdvert; + RRDDIM *rd_InRouterSelect; + RRDDIM *rd_OutRouterSelect; + RRDDIM *rd_InTimeExcds; + RRDDIM *rd_OutTimeExcds; + RRDDIM *rd_InParmProbs; + RRDDIM *rd_OutParmProbs; + RRDDIM *rd_InTimestamps; + RRDDIM *rd_OutTimestamps; + RRDDIM *rd_InTimestampReps; + RRDDIM *rd_OutTimestampReps; + + const char *type; + const char *id; + const char *family; + const char *context; + const char *title; + long priority; + } packets; + +} networks[] = { + { + .protocol = "IPv4", + .packets = { + .received = { .key = "Datagrams Received/sec" }, + .sent = { .key = "Datagrams Sent/sec" }, + .delivered = { .key = "Datagrams Received Delivered/sec" }, + .forwarded = { .key = "Datagrams Forwarded/sec" }, + .type = "ipv4", + .id = "packets", + .family = "packets", + .context = "ipv4.packets", + .title = "IPv4 Packets", + .priority = NETDATA_CHART_PRIO_IPV4_PACKETS, + }, + }, + { + .protocol = "IPv6", + .packets = { + .received = { .key = "Datagrams Received/sec" }, + .sent = { .key = "Datagrams Sent/sec" }, + .delivered = { .key = "Datagrams Received Delivered/sec" }, + .forwarded = { .key = "Datagrams Forwarded/sec" }, + .type = "ipv6", + .id = "packets", + .family = "packets", + .context = "ip6.packets", + .title = "IPv6 Packets", + .priority = NETDATA_CHART_PRIO_IPV6_PACKETS, + }, + }, + { + .protocol = "TCPv4", + .packets = { + .received = { .key = "Segments Received/sec" }, + .sent = { .key = "Segments Sent/sec" }, + .type = "ipv4", + .id = "tcppackets", + .family = "tcp", + .context = "ipv4.tcppackets", + .title = "IPv4 TCP Packets", + .priority = NETDATA_CHART_PRIO_IPV4_TCP_PACKETS, + }, + }, + { + .protocol = "TCPv6", + .packets = { + .received = { .key = "Segments Received/sec" }, + .sent = { .key = "Segments Sent/sec" }, + .type = "ipv6", + .id = "tcppackets", + .family = "tcp6", + .context = "ipv6.tcppackets", + .title = "IPv6 TCP Packets", + .priority = NETDATA_CHART_PRIO_IPV6_TCP_PACKETS, + }, + }, + { + .protocol = "UDPv4", + .packets = { + .received = { .key = "Datagrams Received/sec" }, + .sent = { .key = "Datagrams Sent/sec" }, + .type = "ipv4", + .id = "udppackets", + .family = "udp", + .context = "ipv4.udppackets", + .title = "IPv4 UDP Packets", + .priority = NETDATA_CHART_PRIO_IPV4_UDP_PACKETS, + }, + }, + { + .protocol = "UDPv6", + .packets = { + .received = { .key = "Datagrams Received/sec" }, + .sent = { .key = "Datagrams Sent/sec" }, + .type = "ipv6", + .id = "udppackets", + .family = "udp6", + .context = "ipv6.udppackets", + .title = "IPv6 UDP Packets", + .priority = NETDATA_CHART_PRIO_IPV6_UDP_PACKETS, + }, + }, + { + .protocol = "ICMP", + .packets = { + .received = { .key = "Messages Received/sec" }, + .sent = { .key = "Messages Sent/sec" }, + .type = "ipv4", + .id = "icmp", + .family = "icmp", + .context = "ipv4.icmp", + .title = "IPv4 ICMP Packets", + .priority = NETDATA_CHART_PRIO_IPV4_ICMP_PACKETS, + }, + }, + { + .protocol = "ICMPv6", + .packets = { + .received = { .key = "Messages Received/sec" }, + .sent = { .key = "Messages Sent/sec" }, + .type = "ipv6", + .id = "icmp", + .family = "icmp6", + .context = "ipv6.icmp", + .title = "IPv6 ICMP Packets", + .priority = NETDATA_CHART_PRIO_IPV6_ICMP_PACKETS, + }, + }, + + { + .protocol = "IPv4", + .packets = { + .InDiscards = { .key = "Datagrams Received Discarded" }, + .OutDiscards = { .key = "Datagrams Outbound Discarded" }, + .OutNoRoutes = { .key = "Datagrams Outbound No Route" }, + .InAddrErrors = { .key = "Datagrams Received Address Errors" }, + .InHdrErrors = { .key = "Datagrams Received Header Errors" }, + .InUnknownProtos = { .key = "Datagrams Received Unknown Protocol" }, + .type = "ipv4", + .id = "errors", + .family = "errors", + .context = "ipv4.errors", + .title = "IPv4 errors", + .priority = NETDATA_CHART_PRIO_IPV4_ERRORS, + }, + }, + { + .protocol = "IPv6", + .packets = { + .InDiscards = { .key = "Datagrams Received Discarded" }, + .OutDiscards = { .key = "Datagrams Outbound Discarded" }, + .OutNoRoutes = { .key = "Datagrams Outbound No Route" }, + .InAddrErrors = { .key = "Datagrams Received Address Errors" }, + .InHdrErrors = { .key = "Datagrams Received Header Errors" }, + .InUnknownProtos = { .key = "Datagrams Received Unknown Protocol" }, + .type = "ipv6", + .id = "errors", + .family = "errors", + .context = "ipv6.errors", + .title = "IPv6 errors", + .priority = NETDATA_CHART_PRIO_IPV6_ERRORS, + }, + }, + { + .protocol = "ICMP", + .packets = + { + .InEchoReps = {.key = "Received Echo Reply/sec"}, + .OutEchoReps = {.key = "Received Echo Reply/sec"}, + .InDestUnreachs = {.key = "Received Dest. Unreachable"}, + .OutDestUnreachs = {.key = "Sent Destination Unreachable"}, + .InRedirects = {.key = "Received Redirect/sec"}, + .OutRedirects = {.key = "Sent Redirect/sec"}, + .InEchos = {.key = "Received Echo/sec"}, + .OutEchos = {.key = "Sent Echo/sec"}, + .InRouterAdvert = {.key = NULL}, + .OutRouterAdvert = {.key = NULL}, + .InRouterSelect = {.key = NULL}, + .OutRouterSelect = {.key = NULL}, + .InTimeExcds = {.key = "Received Time Exceeded"}, + .OutTimeExcds = {.key = "Sent Time Exceeded"}, + .InParmProbs = {.key = "Received Parameter Problem"}, + .OutParmProbs = {.key = "Sent Parameter Problem"}, + .InTimestamps = {.key = "Received Timestamp/sec"}, + .OutTimestamps = {.key = "Sent Timestamp/sec"}, + .InTimestampReps = {.key = "Received Timestamp Reply/sec"}, + .OutTimestampReps = {.key = "Sent Timestamp Reply/sec"}, + + .type = "ipv4", + .id = "icmpmsg", + .family = "icmp", + .context = "ipv4.icmpmsg", + .title = "IPv4 ICMP Packets", + .priority = NETDATA_CHART_PRIO_IPV4_ICMP_MESSAGES, + }, + }, + { + .protocol = "ICMPv6", + .packets = + { + .InEchoReps = {.key = "Received Echo Reply/sec"}, + .OutEchoReps = {.key = "Received Echo Reply/sec"}, + .InDestUnreachs = {.key = "Received Dest. Unreachable"}, + .OutDestUnreachs = {.key = "Sent Destination Unreachable"}, + .InRedirects = {.key = "Received Redirect/sec"}, + .OutRedirects = {.key = "Sent Redirect/sec"}, + .InEchos = {.key = "Received Echo/sec"}, + .OutEchos = {.key = "Sent Echo/sec"}, + .InRouterAdvert = {.key = NULL}, + .OutRouterAdvert = {.key = NULL}, + .InRouterSelect = {.key = NULL}, + .OutRouterSelect = {.key = NULL}, + .InTimeExcds = {.key = "Received Time Exceeded"}, + .OutTimeExcds = {.key = "Sent Time Exceeded"}, + .InParmProbs = {.key = "Received Parameter Problem"}, + .OutParmProbs = {.key = "Sent Parameter Problem"}, + .InTimestamps = {.key = "Received Timestamp/sec"}, + .OutTimestamps = {.key = "Sent Timestamp/sec"}, + .InTimestampReps = {.key = "Received Timestamp Reply/sec"}, + .OutTimestampReps = {.key = "Sent Timestamp Reply/sec"}, + + .type = "ipv6", + .id = "icmpmsg", + .family = "icmp", + .context = "ipv6.icmpmsg", + .title = "IPv6 ICMP Packets", + .priority = NETDATA_CHART_PRIO_IPV6_ICMP_MESSAGES, + }, + }, + + // terminator + { + .protocol = NULL, + } +}; + +struct network_protocol tcp46 = { + .packets = { + .type = "ip", + .id = "tcppackets", + .family = "tcp", + .context = "ip.tcppackets", + .title = "TCP Packets", + .priority = NETDATA_CHART_PRIO_IP_TCP_PACKETS, + } +}; + +static void protocol_packets_chart_update(struct network_protocol *p, int update_every) { + if(!p->packets.st) { + p->packets.st = rrdset_create_localhost( + p->packets.type + , p->packets.id + , NULL + , p->packets.family + , NULL + , p->packets.title + , "packets/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetwork" + , p->packets.priority + , update_every + , RRDSET_TYPE_AREA + ); + + RRDSET *st = p->packets.st; + + ADD_RRD_DIM_IF_KEY(received, "received", NULL, 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(sent, "sent", NULL, -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(forwarded, "forwarded", NULL, -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(delivered, "delivered", NULL, 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InDiscards, "InDiscards", NULL, 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(OutDiscards, "OutDiscards", NULL, -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InHdrErrors, "InHdrErrors", NULL, 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InAddrErrors, "InAddrErrors", NULL, 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InUnknownProtos, "InUnknownProtos", NULL, 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InTooBigErrors, "InTooBigErrors", NULL, 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InTruncatedPkts, "InTruncatedPkts", NULL, 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InNoRoutes, "InNoRoutes", NULL, 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(OutNoRoutes, "OutNoRoutes", NULL, -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InEchoReps, "InType0", "InEchoReps", 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(OutEchoReps, "OutType0", "OutEchoReps", -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InDestUnreachs, "InType3", "InDestUnreachs", 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(OutDestUnreachs, "OutType3", "OutDestUnreachs", -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InRedirects, "InType5", "InRedirects", 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(OutRedirects, "OutType5", "OutRedirects", -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InEchos, "InType8", "InEchos", 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(OutEchos, "OutType8", "OutEchos", -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InRouterAdvert, "InType9", "InRouterAdvert", 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(OutRouterAdvert, "OutType9", "OutRouterAdvert", -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InRouterSelect, "InType10", "InRouterSelect", 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(OutRouterSelect, "OutType10", "OutRouterSelect", -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InTimeExcds, "InType11", "InTimeExcds", 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(OutTimeExcds, "OutType11", "OutTimeExcds", -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InParmProbs, "InType12", "InParmProbs", 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(OutParmProbs, "OutType12", "OutParmProbs", -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InTimestamps, "InType13", "InTimestamps", 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(OutTimestamps, "OutType13", "OutTimestamps", -1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(InTimestampReps, "InType14", "InTimestampReps", 1, RRD_ALGORITHM_INCREMENTAL); + ADD_RRD_DIM_IF_KEY(OutTimestampReps, "OutType14", "OutTimestampReps", -1, RRD_ALGORITHM_INCREMENTAL); + + } + + SET_DIM_IF_KEY_AND_UPDATED(p, received); + SET_DIM_IF_KEY_AND_UPDATED(p, sent); + + SET_DIM_IF_KEY_AND_UPDATED(p, forwarded); + SET_DIM_IF_KEY_AND_UPDATED(p, delivered); + SET_DIM_IF_KEY_AND_UPDATED(p, InDiscards); + SET_DIM_IF_KEY_AND_UPDATED(p, OutDiscards); + SET_DIM_IF_KEY_AND_UPDATED(p, InHdrErrors); + SET_DIM_IF_KEY_AND_UPDATED(p, InAddrErrors); + SET_DIM_IF_KEY_AND_UPDATED(p, InUnknownProtos); + SET_DIM_IF_KEY_AND_UPDATED(p, InTooBigErrors); + SET_DIM_IF_KEY_AND_UPDATED(p, InTruncatedPkts); + SET_DIM_IF_KEY_AND_UPDATED(p, InNoRoutes); + SET_DIM_IF_KEY_AND_UPDATED(p, OutNoRoutes); + SET_DIM_IF_KEY_AND_UPDATED(p, InEchoReps); + SET_DIM_IF_KEY_AND_UPDATED(p, OutEchoReps); + SET_DIM_IF_KEY_AND_UPDATED(p, InDestUnreachs); + SET_DIM_IF_KEY_AND_UPDATED(p, OutDestUnreachs); + SET_DIM_IF_KEY_AND_UPDATED(p, InRedirects); + SET_DIM_IF_KEY_AND_UPDATED(p, OutRedirects); + SET_DIM_IF_KEY_AND_UPDATED(p, InEchos); + SET_DIM_IF_KEY_AND_UPDATED(p, OutEchos); + SET_DIM_IF_KEY_AND_UPDATED(p, InRouterAdvert); + SET_DIM_IF_KEY_AND_UPDATED(p, OutRouterAdvert); + SET_DIM_IF_KEY_AND_UPDATED(p, InRouterSelect); + SET_DIM_IF_KEY_AND_UPDATED(p, OutRouterSelect); + SET_DIM_IF_KEY_AND_UPDATED(p, InTimeExcds); + SET_DIM_IF_KEY_AND_UPDATED(p, OutTimeExcds); + SET_DIM_IF_KEY_AND_UPDATED(p, InParmProbs); + SET_DIM_IF_KEY_AND_UPDATED(p, OutParmProbs); + SET_DIM_IF_KEY_AND_UPDATED(p, InTimestamps); + SET_DIM_IF_KEY_AND_UPDATED(p, OutTimestamps); + SET_DIM_IF_KEY_AND_UPDATED(p, InTimestampReps); + SET_DIM_IF_KEY_AND_UPDATED(p, OutTimestampReps); + + rrdset_done(p->packets.st); +} + +static bool do_network_protocol(PERF_DATA_BLOCK *pDataBlock, int update_every, struct network_protocol *p) { + if(!p || !p->protocol) return false; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->protocol); + if(!pObjectType) return false; + + size_t packets = 0; + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, received); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, sent); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, delivered); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, forwarded); + + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InDiscards); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutDiscards); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InHdrErrors); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InAddrErrors); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InUnknownProtos); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InTooBigErrors); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InTruncatedPkts); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InNoRoutes); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutNoRoutes); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InEchoReps); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutEchoReps); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InDestUnreachs); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutDestUnreachs); + + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InRedirects); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutRedirects); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InEchos); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutEchos); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InRouterAdvert); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutRouterAdvert); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InRouterSelect); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutRouterSelect); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InTimeExcds); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutTimeExcds); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InParmProbs); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutParmProbs); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InTimestamps); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutTimestamps); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InTimestampReps); + ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutTimestampReps); + + if(packets) + protocol_packets_chart_update(p, update_every); + + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- +// network interfaces + +struct network_interface { + usec_t last_collected; + bool collected_metadata; + + struct { + COUNTER_DATA received; + COUNTER_DATA sent; + + RRDSET *st; + RRDDIM *rd_received; + RRDDIM *rd_sent; + } packets; + + struct { + const RRDVAR_ACQUIRED *chart_var_speed; + + COUNTER_DATA received; + COUNTER_DATA sent; + + RRDSET *st; + RRDDIM *rd_received; + RRDDIM *rd_sent; + } traffic; + + struct { + COUNTER_DATA current_bandwidth; + RRDSET *st; + RRDDIM *rd; + } speed; + + struct { + COUNTER_DATA received; + COUNTER_DATA outbound; + + RRDSET *st; + RRDDIM *rd_received; + RRDDIM *rd_outbound; + } discards; + + struct { + COUNTER_DATA received; + COUNTER_DATA outbound; + + RRDSET *st; + RRDDIM *rd_received; + RRDDIM *rd_outbound; + } errors; + + struct { + COUNTER_DATA length; + RRDSET *st; + RRDDIM *rd; + } queue; + + struct { + COUNTER_DATA connections; + RRDSET *st; + RRDDIM *rd; + } chimney; + + struct { + COUNTER_DATA connections; + COUNTER_DATA packets; + COUNTER_DATA exceptions; + COUNTER_DATA average_packet_size; + + RRDSET *st_connections; + RRDDIM *rd_connections; + + RRDSET *st_packets; + RRDDIM *rd_packets; + + RRDSET *st_exceptions; + RRDDIM *rd_exceptions; + + RRDSET *st_average_packet_size; + RRDDIM *rd_average_packet_size; + } rsc; +}; + +static DICTIONARY *physical_interfaces = NULL, *virtual_interfaces = NULL; + +static void network_interface_init(struct network_interface *d) { + d->packets.received.key = "Packets Received/sec"; + d->packets.sent.key = "Packets Sent/sec"; + d->traffic.received.key = "Bytes Received/sec"; + d->traffic.sent.key = "Bytes Sent/sec"; + d->speed.current_bandwidth.key = "Current Bandwidth"; + d->discards.received.key = "Packets Received Discarded"; + d->discards.outbound.key = "Packets Outbound Discarded"; + d->errors.received.key = "Packets Received Errors"; + d->errors.outbound.key = "Packets Outbound Errors"; + d->queue.length.key = "Output Queue Length"; + d->chimney.connections.key = "Offloaded Connections"; + d->rsc.connections.key = "TCP Active RSC Connections"; + d->rsc.packets.key = "TCP RSC Coalesced Packets/sec"; + d->rsc.exceptions.key = "TCP RSC Exceptions/sec"; + d->rsc.average_packet_size.key = "TCP RSC Average Packet Size"; +} + +static void network_interface_cleanup(struct network_interface *d) { + rrdvar_chart_variable_release(d->traffic.st, d->traffic.chart_var_speed); + rrdset_is_obsolete___safe_from_collector_thread(d->packets.st); + rrdset_is_obsolete___safe_from_collector_thread(d->traffic.st); + rrdset_is_obsolete___safe_from_collector_thread(d->speed.st); + rrdset_is_obsolete___safe_from_collector_thread(d->discards.st); + rrdset_is_obsolete___safe_from_collector_thread(d->errors.st); + rrdset_is_obsolete___safe_from_collector_thread(d->queue.st); + rrdset_is_obsolete___safe_from_collector_thread(d->chimney.st); + rrdset_is_obsolete___safe_from_collector_thread(d->rsc.st_connections); + rrdset_is_obsolete___safe_from_collector_thread(d->rsc.st_packets); + rrdset_is_obsolete___safe_from_collector_thread(d->rsc.st_exceptions); + rrdset_is_obsolete___safe_from_collector_thread(d->rsc.st_average_packet_size); +} + +void dict_interface_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct network_interface *ni = value; + network_interface_init(ni); +} + +static void initialize(void) { + physical_interfaces = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | + DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct network_interface)); + + virtual_interfaces = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | + DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct network_interface)); + + dictionary_register_insert_callback(physical_interfaces, dict_interface_insert_cb, NULL); + dictionary_register_insert_callback(virtual_interfaces, dict_interface_insert_cb, NULL); +} + +static void add_interface_labels(RRDSET *st, const char *name, bool physical) { + rrdlabels_add(st->rrdlabels, "device", name, RRDLABEL_SRC_AUTO); + rrdlabels_add(st->rrdlabels, "interface_type", physical ? "real" : "virtual", RRDLABEL_SRC_AUTO); +} + +static bool is_physical_interface(const char *name) { + void *d = dictionary_get(physical_interfaces, name); + return d ? true : false; +} + +static bool do_network_interface(PERF_DATA_BLOCK *pDataBlock, int update_every, bool physical, usec_t now_ut) { + DICTIONARY *dict = physical ? physical_interfaces : virtual_interfaces; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, physical ? "Network Interface" : "Network Adapter"); + if(!pObjectType) return false; + + uint64_t total_received = 0, total_sent = 0; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for(LONG i = 0; i < pObjectType->NumInstances ; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if(!pi) break; + + if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + if(strcasecmp(windows_shared_buffer, "_Total") == 0) + continue; + + if(!physical && is_physical_interface(windows_shared_buffer)) + // this virtual interface is already reported as physical interface + continue; + + struct network_interface *d = dictionary_set(dict, windows_shared_buffer, NULL, sizeof(*d)); + d->last_collected = now_ut; + + if(!d->collected_metadata) { + // TODO - get metadata about the network interface + d->collected_metadata = true; + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->traffic.received) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->traffic.sent)) { + + if(d->traffic.received.current.Data == 0 && d->traffic.sent.current.Data == 0) + // this interface has not received or sent any traffic yet + continue; + + if (unlikely(!d->traffic.st)) { + d->traffic.st = rrdset_create_localhost( + "net", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.net", + "Bandwidth", + "kilobits/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE, + update_every, + RRDSET_TYPE_AREA); + + add_interface_labels(d->traffic.st, windows_shared_buffer, physical); + + d->traffic.rd_received = rrddim_add(d->traffic.st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + d->traffic.rd_sent = rrddim_add(d->traffic.st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + + d->traffic.chart_var_speed = rrdvar_chart_variable_add_and_acquire(d->traffic.st, "nic_speed_max"); + rrdvar_chart_variable_set(d->traffic.st, d->traffic.chart_var_speed, NAN); + } + + total_received += d->traffic.received.current.Data; + total_sent += d->traffic.sent.current.Data; + + rrddim_set_by_pointer(d->traffic.st, d->traffic.rd_received, (collected_number)d->traffic.received.current.Data); + rrddim_set_by_pointer(d->traffic.st, d->traffic.rd_sent, (collected_number)d->traffic.sent.current.Data); + rrdset_done(d->traffic.st); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->packets.received) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->packets.sent)) { + + if (unlikely(!d->packets.st)) { + d->packets.st = rrdset_create_localhost( + "net_packets", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.packets", + "Packets", + "packets/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 1, + update_every, + RRDSET_TYPE_LINE); + + add_interface_labels(d->packets.st, windows_shared_buffer, physical); + + d->packets.rd_received = rrddim_add(d->packets.st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->packets.rd_sent = rrddim_add(d->packets.st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(d->packets.st, d->packets.rd_received, (collected_number)d->packets.received.current.Data); + rrddim_set_by_pointer(d->packets.st, d->packets.rd_sent, (collected_number)d->packets.sent.current.Data); + rrdset_done(d->packets.st); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->speed.current_bandwidth)) { + if(unlikely(!d->speed.st)) { + d->speed.st = rrdset_create_localhost( + "net_speed" + , windows_shared_buffer + , NULL + , windows_shared_buffer + , "net.speed" + , "Interface Speed" + , "kilobits/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetwork" + , NETDATA_CHART_PRIO_FIRST_NET_IFACE + 10 + , update_every + , RRDSET_TYPE_LINE + ); + + add_interface_labels(d->speed.st, windows_shared_buffer, physical); + + d->speed.rd = rrddim_add(d->speed.st, "speed", NULL, 1, BITS_IN_A_KILOBIT, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(d->speed.st, d->speed.rd, (collected_number)d->speed.current_bandwidth.current.Data); + rrdset_done(d->speed.st); + + rrdvar_chart_variable_set(d->traffic.st, d->traffic.chart_var_speed, + (NETDATA_DOUBLE)d->speed.current_bandwidth.current.Data / BITS_IN_A_KILOBIT); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->errors.received) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->errors.outbound)) { + + if (unlikely(!d->errors.st)) { + d->errors.st = rrdset_create_localhost( + "net_errors", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.errors", + "Interface Errors", + "errors/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 3, + update_every, + RRDSET_TYPE_LINE); + + add_interface_labels(d->errors.st, windows_shared_buffer, physical); + + d->errors.rd_received = rrddim_add(d->errors.st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->errors.rd_outbound = rrddim_add(d->errors.st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(d->errors.st, d->errors.rd_received, (collected_number)d->errors.received.current.Data); + rrddim_set_by_pointer(d->errors.st, d->errors.rd_outbound, (collected_number)d->errors.outbound.current.Data); + rrdset_done(d->errors.st); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->discards.received) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->discards.outbound)) { + + if (unlikely(!d->discards.st)) { + d->discards.st = rrdset_create_localhost( + "net_drops", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.drops", + "Interface Drops", + "drops/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 4, + update_every, + RRDSET_TYPE_LINE); + + add_interface_labels(d->discards.st, windows_shared_buffer, physical); + + d->discards.rd_received = rrddim_add(d->discards.st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->discards.rd_outbound = rrddim_add(d->discards.st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(d->discards.st, d->discards.rd_received, (collected_number)d->discards.received.current.Data); + rrddim_set_by_pointer(d->discards.st, d->discards.rd_outbound, (collected_number)d->discards.outbound.current.Data); + rrdset_done(d->discards.st); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->queue.length)) { + if (unlikely(!d->queue.st)) { + d->queue.st = rrdset_create_localhost( + "net_queue_length", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.queue_length", + "Interface Output Queue Length", + "packets", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 5, + update_every, + RRDSET_TYPE_LINE); + + add_interface_labels(d->queue.st, windows_shared_buffer, physical); + + d->queue.rd = rrddim_add(d->queue.st, "length", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(d->queue.st, d->queue.rd, (collected_number)d->queue.length.current.Data); + rrdset_done(d->queue.st); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->rsc.connections)) { + if (unlikely(!d->rsc.st_connections)) { + d->rsc.st_connections = rrdset_create_localhost( + "net_rsc_connections", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.rsc_connections", + "Active TCP Connections Offloaded by RSC", + "connections", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 6, + update_every, + RRDSET_TYPE_LINE); + + add_interface_labels(d->rsc.st_connections, windows_shared_buffer, physical); + + d->rsc.rd_connections = rrddim_add(d->rsc.st_connections, "connections", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(d->rsc.st_connections, d->rsc.rd_connections, (collected_number)d->rsc.connections.current.Data); + rrdset_done(d->rsc.st_connections); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->rsc.packets)) { + if (unlikely(!d->rsc.st_packets)) { + d->rsc.st_packets = rrdset_create_localhost( + "net_rsc_packets", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.rsc_packets", + "TCP RSC Coalesced Packets", + "packets/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 7, + update_every, + RRDSET_TYPE_LINE); + + add_interface_labels(d->rsc.st_packets, windows_shared_buffer, physical); + + d->rsc.rd_packets = rrddim_add(d->rsc.st_packets, "packets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(d->rsc.st_packets, d->rsc.rd_packets, (collected_number)d->rsc.packets.current.Data); + rrdset_done(d->rsc.st_packets); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->rsc.exceptions)) { + if (unlikely(!d->rsc.st_exceptions)) { + d->rsc.st_exceptions = rrdset_create_localhost( + "net_rsc_exceptions", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.rsc_exceptions", + "TCP RSC Exceptions", + "exceptions/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 8, + update_every, + RRDSET_TYPE_LINE); + + add_interface_labels(d->rsc.st_exceptions, windows_shared_buffer, physical); + + d->rsc.rd_exceptions = rrddim_add(d->rsc.st_exceptions, "exceptions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(d->rsc.st_exceptions, d->rsc.rd_exceptions, (collected_number)d->rsc.exceptions.current.Data); + rrdset_done(d->rsc.st_exceptions); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->rsc.average_packet_size)) { + if (unlikely(!d->rsc.st_average_packet_size)) { + d->rsc.st_average_packet_size = rrdset_create_localhost( + "net_rsc_average_packet_size", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.rsc_average_packet_size", + "TCP RSC Average Packet Size", + "bytes", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 9, + update_every, + RRDSET_TYPE_LINE); + + add_interface_labels(d->rsc.st_average_packet_size, windows_shared_buffer, physical); + + d->rsc.rd_average_packet_size = rrddim_add(d->rsc.st_average_packet_size, "average", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(d->rsc.st_average_packet_size, d->rsc.rd_average_packet_size, (collected_number)d->rsc.average_packet_size.current.Data); + rrdset_done(d->rsc.st_average_packet_size); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->chimney.connections)) { + if (unlikely(!d->chimney.st)) { + d->chimney.st = rrdset_create_localhost( + "net_chimney_connections", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.chimney_connections", + "Active TCP Connections Offloaded with Chimney", + "connections", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 10, + update_every, + RRDSET_TYPE_LINE); + + add_interface_labels(d->chimney.st, windows_shared_buffer, physical); + + d->chimney.rd = rrddim_add(d->chimney.st, "connections", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(d->chimney.st, d->chimney.rd, (collected_number)d->chimney.connections.current.Data); + rrdset_done(d->chimney.st); + } + } + + if(physical) { + static RRDSET *st = NULL; + static RRDDIM *rd_received = NULL, *rd_sent = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "system", + "net", + NULL, + "network", + "system.net", + "Physical Network Interfaces Aggregated Bandwidth", + "kilobits/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_SYSTEM_NET, + update_every, + RRDSET_TYPE_AREA); + + rd_received = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + rd_sent = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(st, rd_received, (collected_number)total_received); + rrddim_set_by_pointer(st, rd_sent, (collected_number)total_sent); + rrdset_done(st); + } + + // cleanup + { + struct network_interface *d; + dfe_start_write(dict, d) { + if(d->last_collected < now_ut) { + network_interface_cleanup(d); + dictionary_del(dict, d_dfe.name); + } + } + dfe_done(d); + dictionary_garbage_collect(dict); + } + + return true; +} + +int do_PerflibNetwork(int update_every, usec_t dt __maybe_unused) { + static bool initialized = false; + + if(unlikely(!initialized)) { + initialize(); + initialized = true; + } + + DWORD id = RegistryFindIDByName("Network Interface"); + if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) + return -1; + + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if(!pDataBlock) return -1; + + usec_t now_ut = now_monotonic_usec(); + do_network_interface(pDataBlock, update_every, true, now_ut); + do_network_interface(pDataBlock, update_every, false, now_ut); + + struct network_protocol *tcp4 = NULL, *tcp6 = NULL; + for(size_t i = 0; networks[i].protocol ;i++) { + do_network_protocol(pDataBlock, update_every, &networks[i]); + + if(!tcp4 && strcmp(networks[i].protocol, "TCPv4") == 0) + tcp4 = &networks[i]; + if(!tcp6 && strcmp(networks[i].protocol, "TCPv6") == 0) + tcp6 = &networks[i]; + } + + if(tcp4 && tcp6) { + tcp46.packets.received = tcp4->packets.received; + tcp46.packets.sent = tcp4->packets.sent; + tcp46.packets.received.current.Data += tcp6->packets.received.current.Data; + tcp46.packets.sent.current.Data += tcp6->packets.sent.current.Data; + protocol_packets_chart_update(&tcp46, update_every); + } + return 0; +} diff --git a/src/collectors/windows.plugin/perflib-objects.c b/src/collectors/windows.plugin/perflib-objects.c index 6628ff864..cb1bc8d22 100644 --- a/src/collectors/windows.plugin/perflib-objects.c +++ b/src/collectors/windows.plugin/perflib-objects.c @@ -1,47 +1,47 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "windows_plugin.h" -#include "windows-internals.h" - -#define _COMMON_PLUGIN_NAME "windows.plugin" -#define _COMMON_PLUGIN_MODULE_NAME "PerflibObjects" -#include "../common-contexts/common-contexts.h" - -static void initialize(void) { - ; -} - -static bool do_objects(PERF_DATA_BLOCK *pDataBlock, int update_every) { - PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Objects"); - if (!pObjectType) - return false; - - static COUNTER_DATA semaphores = { .key = "Semaphores" }; - - if(perflibGetObjectCounter(pDataBlock, pObjectType, &semaphores)) { - ULONGLONG sem = semaphores.current.Data; - common_semaphore_ipc(sem, WINDOWS_MAX_KERNEL_OBJECT, _COMMON_PLUGIN_MODULE_NAME, update_every); - } - - return true; -} - -int do_PerflibObjects(int update_every, usec_t dt __maybe_unused) { - static bool initialized = false; - - if(unlikely(!initialized)) { - initialize(); - initialized = true; - } - - DWORD id = RegistryFindIDByName("Objects"); - if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) - return -1; - - PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); - if(!pDataBlock) return -1; - - do_objects(pDataBlock, update_every); - - return 0; -} +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +#define _COMMON_PLUGIN_NAME "windows.plugin" +#define _COMMON_PLUGIN_MODULE_NAME "PerflibObjects" +#include "../common-contexts/common-contexts.h" + +static void initialize(void) { + ; +} + +static bool do_objects(PERF_DATA_BLOCK *pDataBlock, int update_every) { + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Objects"); + if (!pObjectType) + return false; + + static COUNTER_DATA semaphores = { .key = "Semaphores" }; + + if(perflibGetObjectCounter(pDataBlock, pObjectType, &semaphores)) { + ULONGLONG sem = semaphores.current.Data; + common_semaphore_ipc(sem, WINDOWS_MAX_KERNEL_OBJECT, _COMMON_PLUGIN_MODULE_NAME, update_every); + } + + return true; +} + +int do_PerflibObjects(int update_every, usec_t dt __maybe_unused) { + static bool initialized = false; + + if(unlikely(!initialized)) { + initialize(); + initialized = true; + } + + DWORD id = RegistryFindIDByName("Objects"); + if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) + return -1; + + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if(!pDataBlock) return -1; + + do_objects(pDataBlock, update_every); + + return 0; +} diff --git a/src/collectors/windows.plugin/perflib-processes.c b/src/collectors/windows.plugin/perflib-processes.c index 92aa243b9..70e388eed 100644 --- a/src/collectors/windows.plugin/perflib-processes.c +++ b/src/collectors/windows.plugin/perflib-processes.c @@ -1,58 +1,58 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "windows_plugin.h" -#include "windows-internals.h" - -#define _COMMON_PLUGIN_NAME "windows.plugin" -#define _COMMON_PLUGIN_MODULE_NAME "PerflibProcesses" -#include "../common-contexts/common-contexts.h" - -static void initialize(void) { - ; -} - -static bool do_processes(PERF_DATA_BLOCK *pDataBlock, int update_every) { - PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "System"); - if (!pObjectType) - return false; - - static COUNTER_DATA processesRunning = { .key = "Processes" }; - static COUNTER_DATA contextSwitchPerSec = { .key = "Context Switches/sec" }; - static COUNTER_DATA threads = { .key = "Threads" }; - - if(perflibGetObjectCounter(pDataBlock, pObjectType, &processesRunning)) { - ULONGLONG running = processesRunning.current.Data; - common_system_processes(running, update_every); - } - - if(perflibGetObjectCounter(pDataBlock, pObjectType, &contextSwitchPerSec)) { - ULONGLONG contexts = contextSwitchPerSec.current.Data; - common_system_context_switch(contexts, update_every); - } - - if(perflibGetObjectCounter(pDataBlock, pObjectType, &threads)) { - ULONGLONG totalThreads = threads.current.Data; - common_system_threads(totalThreads, update_every); - } - return true; -} - -int do_PerflibProcesses(int update_every, usec_t dt __maybe_unused) { - static bool initialized = false; - - if(unlikely(!initialized)) { - initialize(); - initialized = true; - } - - DWORD id = RegistryFindIDByName("System"); - if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) - return -1; - - PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); - if(!pDataBlock) return -1; - - do_processes(pDataBlock, update_every); - - return 0; -} +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +#define _COMMON_PLUGIN_NAME "windows.plugin" +#define _COMMON_PLUGIN_MODULE_NAME "PerflibProcesses" +#include "../common-contexts/common-contexts.h" + +static void initialize(void) { + ; +} + +static bool do_processes(PERF_DATA_BLOCK *pDataBlock, int update_every) { + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "System"); + if (!pObjectType) + return false; + + static COUNTER_DATA processesRunning = { .key = "Processes" }; + static COUNTER_DATA contextSwitchPerSec = { .key = "Context Switches/sec" }; + static COUNTER_DATA threads = { .key = "Threads" }; + + if(perflibGetObjectCounter(pDataBlock, pObjectType, &processesRunning)) { + ULONGLONG running = processesRunning.current.Data; + common_system_processes(running, update_every); + } + + if(perflibGetObjectCounter(pDataBlock, pObjectType, &contextSwitchPerSec)) { + ULONGLONG contexts = contextSwitchPerSec.current.Data; + common_system_context_switch(contexts, update_every); + } + + if(perflibGetObjectCounter(pDataBlock, pObjectType, &threads)) { + ULONGLONG totalThreads = threads.current.Data; + common_system_threads(totalThreads, update_every); + } + return true; +} + +int do_PerflibProcesses(int update_every, usec_t dt __maybe_unused) { + static bool initialized = false; + + if(unlikely(!initialized)) { + initialize(); + initialized = true; + } + + DWORD id = RegistryFindIDByName("System"); + if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) + return -1; + + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if(!pDataBlock) return -1; + + do_processes(pDataBlock, update_every); + + return 0; +} diff --git a/src/collectors/windows.plugin/perflib-processor.c b/src/collectors/windows.plugin/perflib-processor.c index 4c7d86c90..a3df0fced 100644 --- a/src/collectors/windows.plugin/perflib-processor.c +++ b/src/collectors/windows.plugin/perflib-processor.c @@ -1,205 +1,205 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "windows_plugin.h" -#include "windows-internals.h" - -#define _COMMON_PLUGIN_NAME "windows.plugin" -#define _COMMON_PLUGIN_MODULE_NAME "PerflibProcesses" -#include "../common-contexts/common-contexts.h" - -struct processor { - bool collected_metadata; - - RRDSET *st; - RRDDIM *rd_user; - RRDDIM *rd_system; - RRDDIM *rd_irq; - RRDDIM *rd_dpc; - RRDDIM *rd_idle; - -// RRDSET *st2; -// RRDDIM *rd2_busy; - - COUNTER_DATA percentProcessorTime; - COUNTER_DATA percentUserTime; - COUNTER_DATA percentPrivilegedTime; - COUNTER_DATA percentDPCTime; - COUNTER_DATA percentInterruptTime; - COUNTER_DATA percentIdleTime; - - COUNTER_DATA interruptsPerSec; -}; - -struct processor total = { 0 }; - -void initialize_processor_keys(struct processor *p) { - p->percentProcessorTime.key = "% Processor Time"; - p->percentUserTime.key = "% User Time"; - p->percentPrivilegedTime.key = "% Privileged Time"; - p->percentDPCTime.key = "% DPC Time"; - p->percentInterruptTime.key = "% Interrupt Time"; - p->percentIdleTime.key = "% Idle Time"; - p->interruptsPerSec.key = "Interrupts/sec"; -} - -void dict_processor_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - struct processor *p = value; - initialize_processor_keys(p); -} - -static DICTIONARY *processors = NULL; - -static void initialize(void) { - initialize_processor_keys(&total); - - processors = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | - DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct processor)); - - dictionary_register_insert_callback(processors, dict_processor_insert_cb, NULL); -} - -static bool do_processors(PERF_DATA_BLOCK *pDataBlock, int update_every) { - PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Processor"); - if(!pObjectType) return false; - - static const RRDVAR_ACQUIRED *cpus_var = NULL; - int cores_found = 0; - uint64_t totalIPC = 0; - - PERF_INSTANCE_DEFINITION *pi = NULL; - for(LONG i = 0; i < pObjectType->NumInstances ; i++) { - pi = perflibForEachInstance(pDataBlock, pObjectType, pi); - if(!pi) break; - - if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) - strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); - - bool is_total = false; - struct processor *p; - int cpu = -1; - if(strcasecmp(windows_shared_buffer, "_Total") == 0) { - p = &total; - is_total = true; - cpu = -1; - } - else { - p = dictionary_set(processors, windows_shared_buffer, NULL, sizeof(*p)); - is_total = false; - cpu = str2i(windows_shared_buffer); - snprintfz(windows_shared_buffer, sizeof(windows_shared_buffer), "cpu%d", cpu); - - if(cpu + 1 > cores_found) - cores_found = cpu + 1; - } - - if(!is_total && !p->collected_metadata) { - // TODO collect processor metadata - p->collected_metadata = true; - } - - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentProcessorTime); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentUserTime); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentPrivilegedTime); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentDPCTime); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentInterruptTime); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentIdleTime); - - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->interruptsPerSec); - - if(!p->st) { - p->st = rrdset_create_localhost( - is_total ? "system" : "cpu" - , is_total ? "cpu" : windows_shared_buffer, NULL - , is_total ? "cpu" : "utilization" - , is_total ? "system.cpu" : "cpu.cpu" - , is_total ? "Total CPU Utilization" : "Core Utilization" - , "percentage" - , PLUGIN_WINDOWS_NAME - , "PerflibProcessor" - , is_total ? NETDATA_CHART_PRIO_SYSTEM_CPU : NETDATA_CHART_PRIO_CPU_PER_CORE - , update_every - , RRDSET_TYPE_STACKED - ); - - p->rd_irq = rrddim_add(p->st, "interrupts", "irq", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - p->rd_user = rrddim_add(p->st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - p->rd_system = rrddim_add(p->st, "privileged", "system", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - p->rd_dpc = rrddim_add(p->st, "dpc", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - p->rd_idle = rrddim_add(p->st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); - rrddim_hide(p->st, "idle"); - - if(!is_total) - rrdlabels_add(p->st->rrdlabels, "cpu", windows_shared_buffer, RRDLABEL_SRC_AUTO); - else - cpus_var = rrdvar_host_variable_add_and_acquire(localhost, "active_processors"); - } - - uint64_t user = p->percentUserTime.current.Data; - uint64_t system = p->percentPrivilegedTime.current.Data; - uint64_t dpc = p->percentDPCTime.current.Data; - uint64_t irq = p->percentInterruptTime.current.Data; - uint64_t idle = p->percentIdleTime.current.Data; - - totalIPC += p->interruptsPerSec.current.Data; - - rrddim_set_by_pointer(p->st, p->rd_user, (collected_number)user); - rrddim_set_by_pointer(p->st, p->rd_system, (collected_number)system); - rrddim_set_by_pointer(p->st, p->rd_irq, (collected_number)irq); - rrddim_set_by_pointer(p->st, p->rd_dpc, (collected_number)dpc); - rrddim_set_by_pointer(p->st, p->rd_idle, (collected_number)idle); - rrdset_done(p->st); - -// if(!p->st2) { -// p->st2 = rrdset_create_localhost( -// is_total ? "system" : "cpu2" -// , is_total ? "cpu3" : buffer -// , NULL -// , is_total ? "utilization" : buffer -// , is_total ? "system.cpu3" : "cpu2.cpu" -// , is_total ? "Total CPU Utilization" : "Core Utilization" -// , "percentage" -// , PLUGIN_WINDOWS_NAME -// , "PerflibProcessor" -// , is_total ? NETDATA_CHART_PRIO_SYSTEM_CPU : NETDATA_CHART_PRIO_CPU_PER_CORE -// , update_every -// , RRDSET_TYPE_STACKED -// ); -// -// p->rd2_busy = perflib_rrddim_add(p->st2, "busy", NULL, 1, 1, &p->percentProcessorTime); -// rrddim_hide(p->st2, "idle"); -// -// if(!is_total) -// rrdlabels_add(p->st->rrdlabels, "cpu", buffer, RRDLABEL_SRC_AUTO); -// } -// -// perflib_rrddim_set_by_pointer(p->st2, p->rd2_busy, &p->percentProcessorTime); -// rrdset_done(p->st2); - } - - if(cpus_var) - rrdvar_host_variable_set(localhost, cpus_var, cores_found); - - common_interrupts(totalIPC, update_every, NULL); - - return true; -} - -int do_PerflibProcessor(int update_every, usec_t dt __maybe_unused) { - static bool initialized = false; - - if(unlikely(!initialized)) { - initialize(); - initialized = true; - } - - DWORD id = RegistryFindIDByName("Processor"); - if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) - return -1; - - PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); - if(!pDataBlock) return -1; - - do_processors(pDataBlock, update_every); - - return 0; -} +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +#define _COMMON_PLUGIN_NAME "windows.plugin" +#define _COMMON_PLUGIN_MODULE_NAME "PerflibProcesses" +#include "../common-contexts/common-contexts.h" + +struct processor { + bool collected_metadata; + + RRDSET *st; + RRDDIM *rd_user; + RRDDIM *rd_system; + RRDDIM *rd_irq; + RRDDIM *rd_dpc; + RRDDIM *rd_idle; + +// RRDSET *st2; +// RRDDIM *rd2_busy; + + COUNTER_DATA percentProcessorTime; + COUNTER_DATA percentUserTime; + COUNTER_DATA percentPrivilegedTime; + COUNTER_DATA percentDPCTime; + COUNTER_DATA percentInterruptTime; + COUNTER_DATA percentIdleTime; + + COUNTER_DATA interruptsPerSec; +}; + +struct processor total = { 0 }; + +void initialize_processor_keys(struct processor *p) { + p->percentProcessorTime.key = "% Processor Time"; + p->percentUserTime.key = "% User Time"; + p->percentPrivilegedTime.key = "% Privileged Time"; + p->percentDPCTime.key = "% DPC Time"; + p->percentInterruptTime.key = "% Interrupt Time"; + p->percentIdleTime.key = "% Idle Time"; + p->interruptsPerSec.key = "Interrupts/sec"; +} + +void dict_processor_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct processor *p = value; + initialize_processor_keys(p); +} + +static DICTIONARY *processors = NULL; + +static void initialize(void) { + initialize_processor_keys(&total); + + processors = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | + DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct processor)); + + dictionary_register_insert_callback(processors, dict_processor_insert_cb, NULL); +} + +static bool do_processors(PERF_DATA_BLOCK *pDataBlock, int update_every) { + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Processor"); + if(!pObjectType) return false; + + static const RRDVAR_ACQUIRED *cpus_var = NULL; + int cores_found = 0; + uint64_t totalIPC = 0; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for(LONG i = 0; i < pObjectType->NumInstances ; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if(!pi) break; + + if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + bool is_total = false; + struct processor *p; + int cpu = -1; + if(strcasecmp(windows_shared_buffer, "_Total") == 0) { + p = &total; + is_total = true; + cpu = -1; + } + else { + p = dictionary_set(processors, windows_shared_buffer, NULL, sizeof(*p)); + is_total = false; + cpu = str2i(windows_shared_buffer); + snprintfz(windows_shared_buffer, sizeof(windows_shared_buffer), "cpu%d", cpu); + + if(cpu + 1 > cores_found) + cores_found = cpu + 1; + } + + if(!is_total && !p->collected_metadata) { + // TODO collect processor metadata + p->collected_metadata = true; + } + + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentProcessorTime); + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentUserTime); + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentPrivilegedTime); + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentDPCTime); + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentInterruptTime); + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentIdleTime); + + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->interruptsPerSec); + + if(!p->st) { + p->st = rrdset_create_localhost( + is_total ? "system" : "cpu" + , is_total ? "cpu" : windows_shared_buffer, NULL + , is_total ? "cpu" : "utilization" + , is_total ? "system.cpu" : "cpu.cpu" + , is_total ? "Total CPU Utilization" : "Core Utilization" + , "percentage" + , PLUGIN_WINDOWS_NAME + , "PerflibProcessor" + , is_total ? NETDATA_CHART_PRIO_SYSTEM_CPU : NETDATA_CHART_PRIO_CPU_PER_CORE + , update_every + , RRDSET_TYPE_STACKED + ); + + p->rd_irq = rrddim_add(p->st, "interrupts", "irq", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + p->rd_user = rrddim_add(p->st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + p->rd_system = rrddim_add(p->st, "privileged", "system", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + p->rd_dpc = rrddim_add(p->st, "dpc", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + p->rd_idle = rrddim_add(p->st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + rrddim_hide(p->st, "idle"); + + if(!is_total) + rrdlabels_add(p->st->rrdlabels, "cpu", windows_shared_buffer, RRDLABEL_SRC_AUTO); + else + cpus_var = rrdvar_host_variable_add_and_acquire(localhost, "active_processors"); + } + + uint64_t user = p->percentUserTime.current.Data; + uint64_t system = p->percentPrivilegedTime.current.Data; + uint64_t dpc = p->percentDPCTime.current.Data; + uint64_t irq = p->percentInterruptTime.current.Data; + uint64_t idle = p->percentIdleTime.current.Data; + + totalIPC += p->interruptsPerSec.current.Data; + + rrddim_set_by_pointer(p->st, p->rd_user, (collected_number)user); + rrddim_set_by_pointer(p->st, p->rd_system, (collected_number)system); + rrddim_set_by_pointer(p->st, p->rd_irq, (collected_number)irq); + rrddim_set_by_pointer(p->st, p->rd_dpc, (collected_number)dpc); + rrddim_set_by_pointer(p->st, p->rd_idle, (collected_number)idle); + rrdset_done(p->st); + +// if(!p->st2) { +// p->st2 = rrdset_create_localhost( +// is_total ? "system" : "cpu2" +// , is_total ? "cpu3" : buffer +// , NULL +// , is_total ? "utilization" : buffer +// , is_total ? "system.cpu3" : "cpu2.cpu" +// , is_total ? "Total CPU Utilization" : "Core Utilization" +// , "percentage" +// , PLUGIN_WINDOWS_NAME +// , "PerflibProcessor" +// , is_total ? NETDATA_CHART_PRIO_SYSTEM_CPU : NETDATA_CHART_PRIO_CPU_PER_CORE +// , update_every +// , RRDSET_TYPE_STACKED +// ); +// +// p->rd2_busy = perflib_rrddim_add(p->st2, "busy", NULL, 1, 1, &p->percentProcessorTime); +// rrddim_hide(p->st2, "idle"); +// +// if(!is_total) +// rrdlabels_add(p->st->rrdlabels, "cpu", buffer, RRDLABEL_SRC_AUTO); +// } +// +// perflib_rrddim_set_by_pointer(p->st2, p->rd2_busy, &p->percentProcessorTime); +// rrdset_done(p->st2); + } + + if(cpus_var) + rrdvar_host_variable_set(localhost, cpus_var, cores_found); + + common_interrupts(totalIPC, update_every, NULL); + + return true; +} + +int do_PerflibProcessor(int update_every, usec_t dt __maybe_unused) { + static bool initialized = false; + + if(unlikely(!initialized)) { + initialize(); + initialized = true; + } + + DWORD id = RegistryFindIDByName("Processor"); + if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) + return -1; + + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if(!pDataBlock) return -1; + + do_processors(pDataBlock, update_every); + + return 0; +} diff --git a/src/collectors/windows.plugin/perflib-rrd.c b/src/collectors/windows.plugin/perflib-rrd.c index d425307ee..5af36ae35 100644 --- a/src/collectors/windows.plugin/perflib-rrd.c +++ b/src/collectors/windows.plugin/perflib-rrd.c @@ -1,411 +1,411 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "perflib-rrd.h" - -#define COLLECTED_NUMBER_PRECISION 10000 - -RRDDIM *perflib_rrddim_add(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divider, COUNTER_DATA *cd) { - RRD_ALGORITHM algorithm = RRD_ALGORITHM_ABSOLUTE; - - switch (cd->current.CounterType) { - case PERF_COUNTER_COUNTER: - case PERF_SAMPLE_COUNTER: - case PERF_COUNTER_BULK_COUNT: - // (N1 - N0) / ((D1 - D0) / F) - // multiplier *= cd->current.Frequency / 10000000; - // tested, the frequency is not that useful for netdata - // we get right results without it. - algorithm = RRD_ALGORITHM_INCREMENTAL; - break; - - case PERF_COUNTER_QUEUELEN_TYPE: - case PERF_COUNTER_100NS_QUEUELEN_TYPE: - case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: - case PERF_COUNTER_LARGE_QUEUELEN_TYPE: - case PERF_AVERAGE_BULK: // normally not displayed - // (N1 - N0) / (D1 - D0) - algorithm = RRD_ALGORITHM_INCREMENTAL; - break; - - case PERF_OBJ_TIME_TIMER: - case PERF_COUNTER_TIMER: - case PERF_100NSEC_TIMER: - case PERF_PRECISION_SYSTEM_TIMER: - case PERF_PRECISION_100NS_TIMER: - case PERF_PRECISION_OBJECT_TIMER: - case PERF_SAMPLE_FRACTION: - // 100 * (N1 - N0) / (D1 - D0) - multiplier *= 100; - algorithm = RRD_ALGORITHM_INCREMENTAL; - break; - - case PERF_COUNTER_TIMER_INV: - case PERF_100NSEC_TIMER_INV: - // 100 * (1 - ((N1 - N0) / (D1 - D0))) - divider *= COLLECTED_NUMBER_PRECISION; - algorithm = RRD_ALGORITHM_ABSOLUTE; - break; - - case PERF_COUNTER_MULTI_TIMER: - // 100 * ((N1 - N0) / ((D1 - D0) / TB)) / B1 - divider *= COLLECTED_NUMBER_PRECISION; - algorithm = RRD_ALGORITHM_ABSOLUTE; - break; - - case PERF_100NSEC_MULTI_TIMER: - // 100 * ((N1 - N0) / (D1 - D0)) / B1 - divider *= COLLECTED_NUMBER_PRECISION; - algorithm = RRD_ALGORITHM_ABSOLUTE; - break; - - case PERF_COUNTER_MULTI_TIMER_INV: - case PERF_100NSEC_MULTI_TIMER_INV: - // 100 * (B1 - ((N1 - N0) / (D1 - D0))) - divider *= COLLECTED_NUMBER_PRECISION; - algorithm = RRD_ALGORITHM_ABSOLUTE; - break; - - case PERF_COUNTER_RAWCOUNT: - case PERF_COUNTER_LARGE_RAWCOUNT: - // N as decimal - algorithm = RRD_ALGORITHM_ABSOLUTE; - break; - - case PERF_COUNTER_RAWCOUNT_HEX: - case PERF_COUNTER_LARGE_RAWCOUNT_HEX: - // N as hexadecimal - algorithm = RRD_ALGORITHM_ABSOLUTE; - break; - - case PERF_COUNTER_DELTA: - case PERF_COUNTER_LARGE_DELTA: - // N1 - N0 - algorithm = RRD_ALGORITHM_ABSOLUTE; - break; - - case PERF_RAW_FRACTION: - case PERF_LARGE_RAW_FRACTION: - // 100 * N / B - algorithm = RRD_ALGORITHM_ABSOLUTE; - divider *= COLLECTED_NUMBER_PRECISION; - break; - - case PERF_AVERAGE_TIMER: - // ((N1 - N0) / TB) / (B1 - B0) - // divider *= cd->current.Frequency / 10000000; - algorithm = RRD_ALGORITHM_INCREMENTAL; - break; - - case PERF_ELAPSED_TIME: - // (D0 - N0) / F - algorithm = RRD_ALGORITHM_ABSOLUTE; - break; - - case PERF_COUNTER_TEXT: - case PERF_SAMPLE_BASE: - case PERF_AVERAGE_BASE: - case PERF_COUNTER_MULTI_BASE: - case PERF_RAW_BASE: - case PERF_COUNTER_NODATA: - case PERF_PRECISION_TIMESTAMP: - default: - break; - } - - return rrddim_add(st, id, name, multiplier, divider, algorithm); -} - -#define VALID_DELTA(cd) \ - ((cd)->previous.Time > 0 && (cd)->current.Data >= (cd)->previous.Data && (cd)->current.Time > (cd)->previous.Time) - -collected_number perflib_rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, COUNTER_DATA *cd) { - ULONGLONG numerator = 0; - LONGLONG denominator = 0; - double doubleValue = 0.0; - collected_number value; - - switch(cd->current.CounterType) { - case PERF_COUNTER_COUNTER: - case PERF_SAMPLE_COUNTER: - case PERF_COUNTER_BULK_COUNT: - // (N1 - N0) / ((D1 - D0) / F) - value = (collected_number)cd->current.Data; - break; - - case PERF_COUNTER_QUEUELEN_TYPE: - case PERF_COUNTER_100NS_QUEUELEN_TYPE: - case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: - case PERF_COUNTER_LARGE_QUEUELEN_TYPE: - case PERF_AVERAGE_BULK: // normally not displayed - // (N1 - N0) / (D1 - D0) - value = (collected_number)cd->current.Data; - break; - - case PERF_OBJ_TIME_TIMER: - case PERF_COUNTER_TIMER: - case PERF_100NSEC_TIMER: - case PERF_PRECISION_SYSTEM_TIMER: - case PERF_PRECISION_100NS_TIMER: - case PERF_PRECISION_OBJECT_TIMER: - case PERF_SAMPLE_FRACTION: - // 100 * (N1 - N0) / (D1 - D0) - value = (collected_number)cd->current.Data; - break; - - case PERF_COUNTER_TIMER_INV: - case PERF_100NSEC_TIMER_INV: - // 100 * (1 - ((N1 - N0) / (D1 - D0))) - if(!VALID_DELTA(cd)) return 0; - numerator = cd->current.Data - cd->previous.Data; - denominator = cd->current.Time - cd->previous.Time; - doubleValue = 100.0 * (1.0 - ((double)numerator / (double)denominator)); - // printf("Display value is (timer-inv): %f%%\n", doubleValue); - value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION); - break; - - case PERF_COUNTER_MULTI_TIMER: - // 100 * ((N1 - N0) / ((D1 - D0) / TB)) / B1 - if(!VALID_DELTA(cd)) return 0; - numerator = cd->current.Data - cd->previous.Data; - denominator = cd->current.Time - cd->previous.Time; - denominator /= cd->current.Frequency; - doubleValue = 100.0 * ((double)numerator / (double)denominator) / cd->current.MultiCounterData; - // printf("Display value is (multi-timer): %f%%\n", doubleValue); - value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION); - break; - - case PERF_100NSEC_MULTI_TIMER: - // 100 * ((N1 - N0) / (D1 - D0)) / B1 - if(!VALID_DELTA(cd)) return 0; - numerator = cd->current.Data - cd->previous.Data; - denominator = cd->current.Time - cd->previous.Time; - doubleValue = 100.0 * ((double)numerator / (double)denominator) / (double)cd->current.MultiCounterData; - // printf("Display value is (100ns multi-timer): %f%%\n", doubleValue); - value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION); - break; - - case PERF_COUNTER_MULTI_TIMER_INV: - case PERF_100NSEC_MULTI_TIMER_INV: - // 100 * (B1 - ((N1 - N0) / (D1 - D0))) - if(!VALID_DELTA(cd)) return 0; - numerator = cd->current.Data - cd->previous.Data; - denominator = cd->current.Time - cd->previous.Time; - doubleValue = 100.0 * ((double)cd->current.MultiCounterData - ((double)numerator / (double)denominator)); - // printf("Display value is (multi-timer-inv): %f%%\n", doubleValue); - value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION); - break; - - case PERF_COUNTER_RAWCOUNT: - case PERF_COUNTER_LARGE_RAWCOUNT: - // N as decimal - value = (collected_number)cd->current.Data; - break; - - case PERF_COUNTER_RAWCOUNT_HEX: - case PERF_COUNTER_LARGE_RAWCOUNT_HEX: - // N as hexadecimal - value = (collected_number)cd->current.Data; - break; - - case PERF_COUNTER_DELTA: - case PERF_COUNTER_LARGE_DELTA: - if(!VALID_DELTA(cd)) return 0; - value = (collected_number)(cd->current.Data - cd->previous.Data); - break; - - case PERF_RAW_FRACTION: - case PERF_LARGE_RAW_FRACTION: - // 100 * N / B - if(!cd->current.Time) return 0; - doubleValue = 100.0 * (double)cd->current.Data / (double)cd->current.Time; - // printf("Display value is (fraction): %f%%\n", doubleValue); - value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION); - break; - - default: - return 0; - } - - return rrddim_set_by_pointer(st, rd, value); -} - -/* -double perflibCalculateValue(RAW_DATA *current, RAW_DATA *previous) { - ULONGLONG numerator = 0; - LONGLONG denominator = 0; - double doubleValue = 0.0; - DWORD dwordValue = 0; - - if (NULL == previous) { - // Return error if the counter type requires two samples to calculate the value. - switch (current->CounterType) { - default: - if (PERF_DELTA_COUNTER != (current->CounterType & PERF_DELTA_COUNTER)) - break; - __fallthrough; - // fallthrough - - case PERF_AVERAGE_TIMER: // Special case. - case PERF_AVERAGE_BULK: // Special case. - // printf(" > The counter type requires two samples but only one sample was provided.\n"); - return NAN; - } - } - else { - if (current->CounterType != previous->CounterType) { - // printf(" > The samples have inconsistent counter types.\n"); - return NAN; - } - - // Check for integer overflow or bad data from provider (the data from - // sample 2 must be greater than the data from sample 1). - if (current->Data < previous->Data) - { - // Can happen for various reasons. Commonly occurs with the Process counterset when - // multiple processes have the same name and one of them starts or stops. - // Normally you'll just drop the older sample and continue. - // printf("> current (%llu) is smaller than previous (%llu).\n", current->Data, previous->Data); - return NAN; - } - } - - switch (current->CounterType) { - case PERF_COUNTER_COUNTER: - case PERF_SAMPLE_COUNTER: - case PERF_COUNTER_BULK_COUNT: - // (N1 - N0) / ((D1 - D0) / F) - numerator = current->Data - previous->Data; - denominator = current->Time - previous->Time; - dwordValue = (DWORD)(numerator / ((double)denominator / current->Frequency)); - //printf("Display value is (counter): %lu%s\n", (unsigned long)dwordValue, - // (previous->CounterType == PERF_SAMPLE_COUNTER) ? "" : "/sec"); - return (double)dwordValue; - - case PERF_COUNTER_QUEUELEN_TYPE: - case PERF_COUNTER_100NS_QUEUELEN_TYPE: - case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: - case PERF_COUNTER_LARGE_QUEUELEN_TYPE: - case PERF_AVERAGE_BULK: // normally not displayed - // (N1 - N0) / (D1 - D0) - numerator = current->Data - previous->Data; - denominator = current->Time - previous->Time; - doubleValue = (double)numerator / denominator; - if (previous->CounterType != PERF_AVERAGE_BULK) { - // printf("Display value is (queuelen): %f\n", doubleValue); - return doubleValue; - } - return NAN; - - case PERF_OBJ_TIME_TIMER: - case PERF_COUNTER_TIMER: - case PERF_100NSEC_TIMER: - case PERF_PRECISION_SYSTEM_TIMER: - case PERF_PRECISION_100NS_TIMER: - case PERF_PRECISION_OBJECT_TIMER: - case PERF_SAMPLE_FRACTION: - // 100 * (N1 - N0) / (D1 - D0) - numerator = current->Data - previous->Data; - denominator = current->Time - previous->Time; - doubleValue = (double)(100 * numerator) / denominator; - // printf("Display value is (timer): %f%%\n", doubleValue); - return doubleValue; - - case PERF_COUNTER_TIMER_INV: - // 100 * (1 - ((N1 - N0) / (D1 - D0))) - numerator = current->Data - previous->Data; - denominator = current->Time - previous->Time; - doubleValue = 100 * (1 - ((double)numerator / denominator)); - // printf("Display value is (timer-inv): %f%%\n", doubleValue); - return doubleValue; - - case PERF_100NSEC_TIMER_INV: - // 100 * (1- (N1 - N0) / (D1 - D0)) - numerator = current->Data - previous->Data; - denominator = current->Time - previous->Time; - doubleValue = 100 * (1 - (double)numerator / denominator); - // printf("Display value is (100ns-timer-inv): %f%%\n", doubleValue); - return doubleValue; - - case PERF_COUNTER_MULTI_TIMER: - // 100 * ((N1 - N0) / ((D1 - D0) / TB)) / B1 - numerator = current->Data - previous->Data; - denominator = current->Time - previous->Time; - denominator /= current->Frequency; - doubleValue = 100 * ((double)numerator / denominator) / current->MultiCounterData; - // printf("Display value is (multi-timer): %f%%\n", doubleValue); - return doubleValue; - - case PERF_100NSEC_MULTI_TIMER: - // 100 * ((N1 - N0) / (D1 - D0)) / B1 - numerator = current->Data - previous->Data; - denominator = current->Time - previous->Time; - doubleValue = 100 * ((double)numerator / (double)denominator) / (double)current->MultiCounterData; - // printf("Display value is (100ns multi-timer): %f%%\n", doubleValue); - return doubleValue; - - case PERF_COUNTER_MULTI_TIMER_INV: - case PERF_100NSEC_MULTI_TIMER_INV: - // 100 * (B1 - ((N1 - N0) / (D1 - D0))) - numerator = current->Data - previous->Data; - denominator = current->Time - previous->Time; - doubleValue = 100.0 * ((double)current->MultiCounterData - ((double)numerator / (double)denominator)); - // printf("Display value is (multi-timer-inv): %f%%\n", doubleValue); - return doubleValue; - - case PERF_COUNTER_RAWCOUNT: - case PERF_COUNTER_LARGE_RAWCOUNT: - // N as decimal - // printf("Display value is (rawcount): %llu\n", current->Data); - return (double)current->Data; - - case PERF_COUNTER_RAWCOUNT_HEX: - case PERF_COUNTER_LARGE_RAWCOUNT_HEX: - // N as hexadecimal - // printf("Display value is (hex): 0x%llx\n", current->Data); - return (double)current->Data; - - case PERF_COUNTER_DELTA: - case PERF_COUNTER_LARGE_DELTA: - // N1 - N0 - // printf("Display value is (delta): %llu\n", current->Data - previous->Data); - return (double)(current->Data - previous->Data); - - case PERF_RAW_FRACTION: - case PERF_LARGE_RAW_FRACTION: - // 100 * N / B - doubleValue = 100.0 * (double)current->Data / (double)current->Time; - // printf("Display value is (fraction): %f%%\n", doubleValue); - return doubleValue; - - case PERF_AVERAGE_TIMER: - // ((N1 - N0) / TB) / (B1 - B0) - numerator = current->Data - previous->Data; - denominator = current->Time - previous->Time; - doubleValue = (double)numerator / (double)current->Frequency / (double)denominator; - // printf("Display value is (average timer): %f seconds\n", doubleValue); - return doubleValue; - - case PERF_ELAPSED_TIME: - // (D0 - N0) / F - doubleValue = (double)(current->Time - current->Data) / (double)current->Frequency; - // printf("Display value is (elapsed time): %f seconds\n", doubleValue); - return doubleValue; - - case PERF_COUNTER_TEXT: - case PERF_SAMPLE_BASE: - case PERF_AVERAGE_BASE: - case PERF_COUNTER_MULTI_BASE: - case PERF_RAW_BASE: - case PERF_COUNTER_NODATA: - case PERF_PRECISION_TIMESTAMP: - // printf(" > Non-printing counter type: 0x%08x\n", current->CounterType); - return NAN; - break; - - default: - // printf(" > Unrecognized counter type: 0x%08x\n", current->CounterType); - return NAN; - break; - } -} -*/ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "perflib-rrd.h" + +#define COLLECTED_NUMBER_PRECISION 10000 + +RRDDIM *perflib_rrddim_add(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divider, COUNTER_DATA *cd) { + RRD_ALGORITHM algorithm = RRD_ALGORITHM_ABSOLUTE; + + switch (cd->current.CounterType) { + case PERF_COUNTER_COUNTER: + case PERF_SAMPLE_COUNTER: + case PERF_COUNTER_BULK_COUNT: + // (N1 - N0) / ((D1 - D0) / F) + // multiplier *= cd->current.Frequency / 10000000; + // tested, the frequency is not that useful for netdata + // we get right results without it. + algorithm = RRD_ALGORITHM_INCREMENTAL; + break; + + case PERF_COUNTER_QUEUELEN_TYPE: + case PERF_COUNTER_100NS_QUEUELEN_TYPE: + case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: + case PERF_COUNTER_LARGE_QUEUELEN_TYPE: + case PERF_AVERAGE_BULK: // normally not displayed + // (N1 - N0) / (D1 - D0) + algorithm = RRD_ALGORITHM_INCREMENTAL; + break; + + case PERF_OBJ_TIME_TIMER: + case PERF_COUNTER_TIMER: + case PERF_100NSEC_TIMER: + case PERF_PRECISION_SYSTEM_TIMER: + case PERF_PRECISION_100NS_TIMER: + case PERF_PRECISION_OBJECT_TIMER: + case PERF_SAMPLE_FRACTION: + // 100 * (N1 - N0) / (D1 - D0) + multiplier *= 100; + algorithm = RRD_ALGORITHM_INCREMENTAL; + break; + + case PERF_COUNTER_TIMER_INV: + case PERF_100NSEC_TIMER_INV: + // 100 * (1 - ((N1 - N0) / (D1 - D0))) + divider *= COLLECTED_NUMBER_PRECISION; + algorithm = RRD_ALGORITHM_ABSOLUTE; + break; + + case PERF_COUNTER_MULTI_TIMER: + // 100 * ((N1 - N0) / ((D1 - D0) / TB)) / B1 + divider *= COLLECTED_NUMBER_PRECISION; + algorithm = RRD_ALGORITHM_ABSOLUTE; + break; + + case PERF_100NSEC_MULTI_TIMER: + // 100 * ((N1 - N0) / (D1 - D0)) / B1 + divider *= COLLECTED_NUMBER_PRECISION; + algorithm = RRD_ALGORITHM_ABSOLUTE; + break; + + case PERF_COUNTER_MULTI_TIMER_INV: + case PERF_100NSEC_MULTI_TIMER_INV: + // 100 * (B1 - ((N1 - N0) / (D1 - D0))) + divider *= COLLECTED_NUMBER_PRECISION; + algorithm = RRD_ALGORITHM_ABSOLUTE; + break; + + case PERF_COUNTER_RAWCOUNT: + case PERF_COUNTER_LARGE_RAWCOUNT: + // N as decimal + algorithm = RRD_ALGORITHM_ABSOLUTE; + break; + + case PERF_COUNTER_RAWCOUNT_HEX: + case PERF_COUNTER_LARGE_RAWCOUNT_HEX: + // N as hexadecimal + algorithm = RRD_ALGORITHM_ABSOLUTE; + break; + + case PERF_COUNTER_DELTA: + case PERF_COUNTER_LARGE_DELTA: + // N1 - N0 + algorithm = RRD_ALGORITHM_ABSOLUTE; + break; + + case PERF_RAW_FRACTION: + case PERF_LARGE_RAW_FRACTION: + // 100 * N / B + algorithm = RRD_ALGORITHM_ABSOLUTE; + divider *= COLLECTED_NUMBER_PRECISION; + break; + + case PERF_AVERAGE_TIMER: + // ((N1 - N0) / TB) / (B1 - B0) + // divider *= cd->current.Frequency / 10000000; + algorithm = RRD_ALGORITHM_INCREMENTAL; + break; + + case PERF_ELAPSED_TIME: + // (D0 - N0) / F + algorithm = RRD_ALGORITHM_ABSOLUTE; + break; + + case PERF_COUNTER_TEXT: + case PERF_SAMPLE_BASE: + case PERF_AVERAGE_BASE: + case PERF_COUNTER_MULTI_BASE: + case PERF_RAW_BASE: + case PERF_COUNTER_NODATA: + case PERF_PRECISION_TIMESTAMP: + default: + break; + } + + return rrddim_add(st, id, name, multiplier, divider, algorithm); +} + +#define VALID_DELTA(cd) \ + ((cd)->previous.Time > 0 && (cd)->current.Data >= (cd)->previous.Data && (cd)->current.Time > (cd)->previous.Time) + +collected_number perflib_rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, COUNTER_DATA *cd) { + ULONGLONG numerator = 0; + LONGLONG denominator = 0; + double doubleValue = 0.0; + collected_number value; + + switch(cd->current.CounterType) { + case PERF_COUNTER_COUNTER: + case PERF_SAMPLE_COUNTER: + case PERF_COUNTER_BULK_COUNT: + // (N1 - N0) / ((D1 - D0) / F) + value = (collected_number)cd->current.Data; + break; + + case PERF_COUNTER_QUEUELEN_TYPE: + case PERF_COUNTER_100NS_QUEUELEN_TYPE: + case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: + case PERF_COUNTER_LARGE_QUEUELEN_TYPE: + case PERF_AVERAGE_BULK: // normally not displayed + // (N1 - N0) / (D1 - D0) + value = (collected_number)cd->current.Data; + break; + + case PERF_OBJ_TIME_TIMER: + case PERF_COUNTER_TIMER: + case PERF_100NSEC_TIMER: + case PERF_PRECISION_SYSTEM_TIMER: + case PERF_PRECISION_100NS_TIMER: + case PERF_PRECISION_OBJECT_TIMER: + case PERF_SAMPLE_FRACTION: + // 100 * (N1 - N0) / (D1 - D0) + value = (collected_number)cd->current.Data; + break; + + case PERF_COUNTER_TIMER_INV: + case PERF_100NSEC_TIMER_INV: + // 100 * (1 - ((N1 - N0) / (D1 - D0))) + if(!VALID_DELTA(cd)) return 0; + numerator = cd->current.Data - cd->previous.Data; + denominator = cd->current.Time - cd->previous.Time; + doubleValue = 100.0 * (1.0 - ((double)numerator / (double)denominator)); + // printf("Display value is (timer-inv): %f%%\n", doubleValue); + value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION); + break; + + case PERF_COUNTER_MULTI_TIMER: + // 100 * ((N1 - N0) / ((D1 - D0) / TB)) / B1 + if(!VALID_DELTA(cd)) return 0; + numerator = cd->current.Data - cd->previous.Data; + denominator = cd->current.Time - cd->previous.Time; + denominator /= cd->current.Frequency; + doubleValue = 100.0 * ((double)numerator / (double)denominator) / cd->current.MultiCounterData; + // printf("Display value is (multi-timer): %f%%\n", doubleValue); + value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION); + break; + + case PERF_100NSEC_MULTI_TIMER: + // 100 * ((N1 - N0) / (D1 - D0)) / B1 + if(!VALID_DELTA(cd)) return 0; + numerator = cd->current.Data - cd->previous.Data; + denominator = cd->current.Time - cd->previous.Time; + doubleValue = 100.0 * ((double)numerator / (double)denominator) / (double)cd->current.MultiCounterData; + // printf("Display value is (100ns multi-timer): %f%%\n", doubleValue); + value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION); + break; + + case PERF_COUNTER_MULTI_TIMER_INV: + case PERF_100NSEC_MULTI_TIMER_INV: + // 100 * (B1 - ((N1 - N0) / (D1 - D0))) + if(!VALID_DELTA(cd)) return 0; + numerator = cd->current.Data - cd->previous.Data; + denominator = cd->current.Time - cd->previous.Time; + doubleValue = 100.0 * ((double)cd->current.MultiCounterData - ((double)numerator / (double)denominator)); + // printf("Display value is (multi-timer-inv): %f%%\n", doubleValue); + value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION); + break; + + case PERF_COUNTER_RAWCOUNT: + case PERF_COUNTER_LARGE_RAWCOUNT: + // N as decimal + value = (collected_number)cd->current.Data; + break; + + case PERF_COUNTER_RAWCOUNT_HEX: + case PERF_COUNTER_LARGE_RAWCOUNT_HEX: + // N as hexadecimal + value = (collected_number)cd->current.Data; + break; + + case PERF_COUNTER_DELTA: + case PERF_COUNTER_LARGE_DELTA: + if(!VALID_DELTA(cd)) return 0; + value = (collected_number)(cd->current.Data - cd->previous.Data); + break; + + case PERF_RAW_FRACTION: + case PERF_LARGE_RAW_FRACTION: + // 100 * N / B + if(!cd->current.Time) return 0; + doubleValue = 100.0 * (double)cd->current.Data / (double)cd->current.Time; + // printf("Display value is (fraction): %f%%\n", doubleValue); + value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION); + break; + + default: + return 0; + } + + return rrddim_set_by_pointer(st, rd, value); +} + +/* +double perflibCalculateValue(RAW_DATA *current, RAW_DATA *previous) { + ULONGLONG numerator = 0; + LONGLONG denominator = 0; + double doubleValue = 0.0; + DWORD dwordValue = 0; + + if (NULL == previous) { + // Return error if the counter type requires two samples to calculate the value. + switch (current->CounterType) { + default: + if (PERF_DELTA_COUNTER != (current->CounterType & PERF_DELTA_COUNTER)) + break; + __fallthrough; + // fallthrough + + case PERF_AVERAGE_TIMER: // Special case. + case PERF_AVERAGE_BULK: // Special case. + // printf(" > The counter type requires two samples but only one sample was provided.\n"); + return NAN; + } + } + else { + if (current->CounterType != previous->CounterType) { + // printf(" > The samples have inconsistent counter types.\n"); + return NAN; + } + + // Check for integer overflow or bad data from provider (the data from + // sample 2 must be greater than the data from sample 1). + if (current->Data < previous->Data) + { + // Can happen for various reasons. Commonly occurs with the Process counterset when + // multiple processes have the same name and one of them starts or stops. + // Normally you'll just drop the older sample and continue. + // printf("> current (%llu) is smaller than previous (%llu).\n", current->Data, previous->Data); + return NAN; + } + } + + switch (current->CounterType) { + case PERF_COUNTER_COUNTER: + case PERF_SAMPLE_COUNTER: + case PERF_COUNTER_BULK_COUNT: + // (N1 - N0) / ((D1 - D0) / F) + numerator = current->Data - previous->Data; + denominator = current->Time - previous->Time; + dwordValue = (DWORD)(numerator / ((double)denominator / current->Frequency)); + //printf("Display value is (counter): %lu%s\n", (unsigned long)dwordValue, + // (previous->CounterType == PERF_SAMPLE_COUNTER) ? "" : "/sec"); + return (double)dwordValue; + + case PERF_COUNTER_QUEUELEN_TYPE: + case PERF_COUNTER_100NS_QUEUELEN_TYPE: + case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: + case PERF_COUNTER_LARGE_QUEUELEN_TYPE: + case PERF_AVERAGE_BULK: // normally not displayed + // (N1 - N0) / (D1 - D0) + numerator = current->Data - previous->Data; + denominator = current->Time - previous->Time; + doubleValue = (double)numerator / denominator; + if (previous->CounterType != PERF_AVERAGE_BULK) { + // printf("Display value is (queuelen): %f\n", doubleValue); + return doubleValue; + } + return NAN; + + case PERF_OBJ_TIME_TIMER: + case PERF_COUNTER_TIMER: + case PERF_100NSEC_TIMER: + case PERF_PRECISION_SYSTEM_TIMER: + case PERF_PRECISION_100NS_TIMER: + case PERF_PRECISION_OBJECT_TIMER: + case PERF_SAMPLE_FRACTION: + // 100 * (N1 - N0) / (D1 - D0) + numerator = current->Data - previous->Data; + denominator = current->Time - previous->Time; + doubleValue = (double)(100 * numerator) / denominator; + // printf("Display value is (timer): %f%%\n", doubleValue); + return doubleValue; + + case PERF_COUNTER_TIMER_INV: + // 100 * (1 - ((N1 - N0) / (D1 - D0))) + numerator = current->Data - previous->Data; + denominator = current->Time - previous->Time; + doubleValue = 100 * (1 - ((double)numerator / denominator)); + // printf("Display value is (timer-inv): %f%%\n", doubleValue); + return doubleValue; + + case PERF_100NSEC_TIMER_INV: + // 100 * (1- (N1 - N0) / (D1 - D0)) + numerator = current->Data - previous->Data; + denominator = current->Time - previous->Time; + doubleValue = 100 * (1 - (double)numerator / denominator); + // printf("Display value is (100ns-timer-inv): %f%%\n", doubleValue); + return doubleValue; + + case PERF_COUNTER_MULTI_TIMER: + // 100 * ((N1 - N0) / ((D1 - D0) / TB)) / B1 + numerator = current->Data - previous->Data; + denominator = current->Time - previous->Time; + denominator /= current->Frequency; + doubleValue = 100 * ((double)numerator / denominator) / current->MultiCounterData; + // printf("Display value is (multi-timer): %f%%\n", doubleValue); + return doubleValue; + + case PERF_100NSEC_MULTI_TIMER: + // 100 * ((N1 - N0) / (D1 - D0)) / B1 + numerator = current->Data - previous->Data; + denominator = current->Time - previous->Time; + doubleValue = 100 * ((double)numerator / (double)denominator) / (double)current->MultiCounterData; + // printf("Display value is (100ns multi-timer): %f%%\n", doubleValue); + return doubleValue; + + case PERF_COUNTER_MULTI_TIMER_INV: + case PERF_100NSEC_MULTI_TIMER_INV: + // 100 * (B1 - ((N1 - N0) / (D1 - D0))) + numerator = current->Data - previous->Data; + denominator = current->Time - previous->Time; + doubleValue = 100.0 * ((double)current->MultiCounterData - ((double)numerator / (double)denominator)); + // printf("Display value is (multi-timer-inv): %f%%\n", doubleValue); + return doubleValue; + + case PERF_COUNTER_RAWCOUNT: + case PERF_COUNTER_LARGE_RAWCOUNT: + // N as decimal + // printf("Display value is (rawcount): %llu\n", current->Data); + return (double)current->Data; + + case PERF_COUNTER_RAWCOUNT_HEX: + case PERF_COUNTER_LARGE_RAWCOUNT_HEX: + // N as hexadecimal + // printf("Display value is (hex): 0x%llx\n", current->Data); + return (double)current->Data; + + case PERF_COUNTER_DELTA: + case PERF_COUNTER_LARGE_DELTA: + // N1 - N0 + // printf("Display value is (delta): %llu\n", current->Data - previous->Data); + return (double)(current->Data - previous->Data); + + case PERF_RAW_FRACTION: + case PERF_LARGE_RAW_FRACTION: + // 100 * N / B + doubleValue = 100.0 * (double)current->Data / (double)current->Time; + // printf("Display value is (fraction): %f%%\n", doubleValue); + return doubleValue; + + case PERF_AVERAGE_TIMER: + // ((N1 - N0) / TB) / (B1 - B0) + numerator = current->Data - previous->Data; + denominator = current->Time - previous->Time; + doubleValue = (double)numerator / (double)current->Frequency / (double)denominator; + // printf("Display value is (average timer): %f seconds\n", doubleValue); + return doubleValue; + + case PERF_ELAPSED_TIME: + // (D0 - N0) / F + doubleValue = (double)(current->Time - current->Data) / (double)current->Frequency; + // printf("Display value is (elapsed time): %f seconds\n", doubleValue); + return doubleValue; + + case PERF_COUNTER_TEXT: + case PERF_SAMPLE_BASE: + case PERF_AVERAGE_BASE: + case PERF_COUNTER_MULTI_BASE: + case PERF_RAW_BASE: + case PERF_COUNTER_NODATA: + case PERF_PRECISION_TIMESTAMP: + // printf(" > Non-printing counter type: 0x%08x\n", current->CounterType); + return NAN; + break; + + default: + // printf(" > Unrecognized counter type: 0x%08x\n", current->CounterType); + return NAN; + break; + } +} +*/ diff --git a/src/collectors/windows.plugin/perflib-rrd.h b/src/collectors/windows.plugin/perflib-rrd.h index 0b91de2ec..2347c5b1d 100644 --- a/src/collectors/windows.plugin/perflib-rrd.h +++ b/src/collectors/windows.plugin/perflib-rrd.h @@ -1,12 +1,11 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_PERFLIB_RRD_H -#define NETDATA_PERFLIB_RRD_H - -#include "perflib.h" -#include "database/rrd.h" - -RRDDIM *perflib_rrddim_add(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divider, COUNTER_DATA *cd); -collected_number perflib_rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, COUNTER_DATA *cd); - -#endif //NETDATA_PERFLIB_RRD_H +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PERFLIB_RRD_H +#define NETDATA_PERFLIB_RRD_H + +#include "database/rrd.h" + +RRDDIM *perflib_rrddim_add(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divider, COUNTER_DATA *cd); +collected_number perflib_rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, COUNTER_DATA *cd); + +#endif //NETDATA_PERFLIB_RRD_H diff --git a/src/collectors/windows.plugin/perflib-storage.c b/src/collectors/windows.plugin/perflib-storage.c index d3b80052f..823ba2c04 100644 --- a/src/collectors/windows.plugin/perflib-storage.c +++ b/src/collectors/windows.plugin/perflib-storage.c @@ -1,317 +1,632 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "windows_plugin.h" -#include "windows-internals.h" - -#define _COMMON_PLUGIN_NAME PLUGIN_WINDOWS_NAME -#define _COMMON_PLUGIN_MODULE_NAME "PerflibStorage" -#include "../common-contexts/common-contexts.h" - -struct logical_disk { - bool collected_metadata; - - STRING *filesystem; - - RRDSET *st_disk_space; - RRDDIM *rd_disk_space_used; - RRDDIM *rd_disk_space_free; - - COUNTER_DATA percentDiskFree; - // COUNTER_DATA freeMegabytes; -}; - -struct physical_disk { - bool collected_metadata; - - STRING *device; - STRING *mount_point; - - ND_DISK_IO disk_io; - COUNTER_DATA diskReadBytesPerSec; - COUNTER_DATA diskWriteBytesPerSec; - - COUNTER_DATA percentIdleTime; - COUNTER_DATA percentDiskTime; - COUNTER_DATA percentDiskReadTime; - COUNTER_DATA percentDiskWriteTime; - COUNTER_DATA currentDiskQueueLength; - COUNTER_DATA averageDiskQueueLength; - COUNTER_DATA averageDiskReadQueueLength; - COUNTER_DATA averageDiskWriteQueueLength; - COUNTER_DATA averageDiskSecondsPerTransfer; - COUNTER_DATA averageDiskSecondsPerRead; - COUNTER_DATA averageDiskSecondsPerWrite; - COUNTER_DATA diskTransfersPerSec; - COUNTER_DATA diskReadsPerSec; - COUNTER_DATA diskWritesPerSec; - COUNTER_DATA diskBytesPerSec; - COUNTER_DATA averageDiskBytesPerTransfer; - COUNTER_DATA averageDiskBytesPerRead; - COUNTER_DATA averageDiskBytesPerWrite; - COUNTER_DATA splitIoPerSec; -}; - -struct physical_disk system_physical_total = { - .collected_metadata = true, -}; - -void dict_logical_disk_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - struct logical_disk *ld = value; - - ld->percentDiskFree.key = "% Free Space"; - // ld->freeMegabytes.key = "Free Megabytes"; -} - -void initialize_physical_disk(struct physical_disk *pd) { - pd->percentIdleTime.key = "% Idle Time"; - pd->percentDiskTime.key = "% Disk Time"; - pd->percentDiskReadTime.key = "% Disk Read Time"; - pd->percentDiskWriteTime.key = "% Disk Write Time"; - pd->currentDiskQueueLength.key = "Current Disk Queue Length"; - pd->averageDiskQueueLength.key = "Avg. Disk Queue Length"; - pd->averageDiskReadQueueLength.key = "Avg. Disk Read Queue Length"; - pd->averageDiskWriteQueueLength.key = "Avg. Disk Write Queue Length"; - pd->averageDiskSecondsPerTransfer.key = "Avg. Disk sec/Transfer"; - pd->averageDiskSecondsPerRead.key = "Avg. Disk sec/Read"; - pd->averageDiskSecondsPerWrite.key = "Avg. Disk sec/Write"; - pd->diskTransfersPerSec.key = "Disk Transfers/sec"; - pd->diskReadsPerSec.key = "Disk Reads/sec"; - pd->diskWritesPerSec.key = "Disk Writes/sec"; - pd->diskBytesPerSec.key = "Disk Bytes/sec"; - pd->diskReadBytesPerSec.key = "Disk Read Bytes/sec"; - pd->diskWriteBytesPerSec.key = "Disk Write Bytes/sec"; - pd->averageDiskBytesPerTransfer.key = "Avg. Disk Bytes/Transfer"; - pd->averageDiskBytesPerRead.key = "Avg. Disk Bytes/Read"; - pd->averageDiskBytesPerWrite.key = "Avg. Disk Bytes/Write"; - pd->splitIoPerSec.key = "Split IO/Sec"; -} - -void dict_physical_disk_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - struct physical_disk *pd = value; - initialize_physical_disk(pd); -} - -static DICTIONARY *logicalDisks = NULL, *physicalDisks = NULL; -static void initialize(void) { - initialize_physical_disk(&system_physical_total); - - logicalDisks = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | - DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct logical_disk)); - - dictionary_register_insert_callback(logicalDisks, dict_logical_disk_insert_cb, NULL); - - physicalDisks = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | - DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct physical_disk)); - - dictionary_register_insert_callback(physicalDisks, dict_physical_disk_insert_cb, NULL); -} - -static STRING *getFileSystemType(const char* diskName) { - if (!diskName || !*diskName) return NULL; - - char fileSystemNameBuffer[128] = {0}; // Buffer for file system name - char pathBuffer[256] = {0}; // Path buffer to accommodate different formats - DWORD serialNumber = 0; - DWORD maxComponentLength = 0; - DWORD fileSystemFlags = 0; - BOOL success; - - // Check if the input is likely a drive letter (e.g., "C:") - if (isalpha((uint8_t)diskName[0]) && diskName[1] == ':' && diskName[2] == '\0') - snprintf(pathBuffer, sizeof(pathBuffer), "%s\\", diskName); // Format as "C:\\" - else - // Assume it's a Volume GUID path or a device path - snprintf(pathBuffer, sizeof(pathBuffer), "\\\\.\\%s", diskName); // Format as "\\.\HarddiskVolume1" - - // Attempt to get the volume information - success = GetVolumeInformation( - pathBuffer, // Path to the disk - NULL, // We don't need the volume name - 0, // Size of volume name buffer is 0 - &serialNumber, // Volume serial number - &maxComponentLength, // Maximum component length - &fileSystemFlags, // File system flags - fileSystemNameBuffer, // File system name buffer - sizeof(fileSystemNameBuffer) // Size of file system name buffer - ); - - if (success && fileSystemNameBuffer[0]) { - char *s = fileSystemNameBuffer; - while(*s) { *s = tolower((uint8_t)*s); s++; } - return string_strdupz(fileSystemNameBuffer); // Duplicate the file system name - } - else - return NULL; -} - -static bool do_logical_disk(PERF_DATA_BLOCK *pDataBlock, int update_every) { - DICTIONARY *dict = logicalDisks; - - PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "LogicalDisk"); - if(!pObjectType) return false; - - PERF_INSTANCE_DEFINITION *pi = NULL; - for(LONG i = 0; i < pObjectType->NumInstances ; i++) { - pi = perflibForEachInstance(pDataBlock, pObjectType, pi); - if(!pi) break; - - if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) - strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); - - if(strcasecmp(windows_shared_buffer, "_Total") == 0) - continue; - - struct logical_disk *d = dictionary_set(dict, windows_shared_buffer, NULL, sizeof(*d)); - - if(!d->collected_metadata) { - d->filesystem = getFileSystemType(windows_shared_buffer); - d->collected_metadata = true; - } - - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskFree); - // perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->freeMegabytes); - - if(!d->st_disk_space) { - d->st_disk_space = rrdset_create_localhost( - "disk_space" - , windows_shared_buffer, NULL - , windows_shared_buffer, "disk.space" - , "Disk Space Usage" - , "GiB" - , PLUGIN_WINDOWS_NAME - , "PerflibStorage" - , NETDATA_CHART_PRIO_DISKSPACE_SPACE - , update_every - , RRDSET_TYPE_STACKED - ); - - rrdlabels_add(d->st_disk_space->rrdlabels, "mount_point", windows_shared_buffer, RRDLABEL_SRC_AUTO); - // rrdlabels_add(d->st->rrdlabels, "mount_root", name, RRDLABEL_SRC_AUTO); - - if(d->filesystem) - rrdlabels_add(d->st_disk_space->rrdlabels, "filesystem", string2str(d->filesystem), RRDLABEL_SRC_AUTO); - - d->rd_disk_space_free = rrddim_add(d->st_disk_space, "avail", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - d->rd_disk_space_used = rrddim_add(d->st_disk_space, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); - } - - // percentDiskFree has the free space in Data and the size of the disk in Time, in MiB. - rrddim_set_by_pointer(d->st_disk_space, d->rd_disk_space_free, (collected_number)d->percentDiskFree.current.Data); - rrddim_set_by_pointer(d->st_disk_space, d->rd_disk_space_used, (collected_number)(d->percentDiskFree.current.Time - d->percentDiskFree.current.Data)); - rrdset_done(d->st_disk_space); - } - - return true; -} - -static void physical_disk_labels(RRDSET *st, void *data) { - struct physical_disk *d = data; - - if(d->device) - rrdlabels_add(st->rrdlabels, "device", string2str(d->device), RRDLABEL_SRC_AUTO); - - if (d->mount_point) - rrdlabels_add(st->rrdlabels, "mount_point", string2str(d->mount_point), RRDLABEL_SRC_AUTO); -} - -static bool do_physical_disk(PERF_DATA_BLOCK *pDataBlock, int update_every) { - DICTIONARY *dict = physicalDisks; - - PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "PhysicalDisk"); - if(!pObjectType) return false; - - PERF_INSTANCE_DEFINITION *pi = NULL; - for (LONG i = 0; i < pObjectType->NumInstances; i++) { - pi = perflibForEachInstance(pDataBlock, pObjectType, pi); - if (!pi) - break; - - if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) - strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); - - char *device = windows_shared_buffer; - char *mount_point = NULL; - - if((mount_point = strchr(device, ' '))) { - *mount_point = '\0'; - mount_point++; - } - - struct physical_disk *d; - bool is_system; - if (strcasecmp(windows_shared_buffer, "_Total") == 0) { - d = &system_physical_total; - is_system = true; - } - else { - d = dictionary_set(dict, device, NULL, sizeof(*d)); - is_system = false; - } - - if (!d->collected_metadata) { - // TODO collect metadata - device_type, serial, id - d->device = string_strdupz(device); - d->mount_point = string_strdupz(mount_point); - d->collected_metadata = true; - } - - if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskReadBytesPerSec) && - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskWriteBytesPerSec)) { - if(is_system) - common_system_io(d->diskReadBytesPerSec.current.Data, d->diskWriteBytesPerSec.current.Data, update_every); - else - common_disk_io( - &d->disk_io, - device, - NULL, - d->diskReadBytesPerSec.current.Data, - d->diskWriteBytesPerSec.current.Data, - update_every, - physical_disk_labels, - d); - } - - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentIdleTime); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskTime); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskReadTime); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskWriteTime); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->currentDiskQueueLength); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskQueueLength); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskReadQueueLength); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskWriteQueueLength); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskSecondsPerTransfer); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskSecondsPerRead); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskSecondsPerWrite); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskTransfersPerSec); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskReadsPerSec); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskWritesPerSec); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskBytesPerSec); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskBytesPerTransfer); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskBytesPerRead); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskBytesPerWrite); - perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->splitIoPerSec); - } - - return true; -} - -int do_PerflibStorage(int update_every, usec_t dt __maybe_unused) { - static bool initialized = false; - - if(unlikely(!initialized)) { - initialize(); - initialized = true; - } - - DWORD id = RegistryFindIDByName("LogicalDisk"); - if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) - return -1; - - PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); - if(!pDataBlock) return -1; - - do_logical_disk(pDataBlock, update_every); - do_physical_disk(pDataBlock, update_every); - - return 0; -} +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +#define _COMMON_PLUGIN_NAME PLUGIN_WINDOWS_NAME +#define _COMMON_PLUGIN_MODULE_NAME "PerflibStorage" +#include "../common-contexts/common-contexts.h" +#include "libnetdata/os/windows-wmi/windows-wmi.h" + +struct logical_disk { + usec_t last_collected; + bool collected_metadata; + + UINT DriveType; + DWORD SerialNumber; + bool readonly; + + STRING *filesystem; + + RRDSET *st_disk_space; + RRDDIM *rd_disk_space_used; + RRDDIM *rd_disk_space_free; + + COUNTER_DATA percentDiskFree; + // COUNTER_DATA freeMegabytes; +}; + +struct physical_disk { + usec_t last_collected; + bool collected_metadata; + + STRING *device; + STRING *mount_point; + STRING *manufacturer; + STRING *model; + STRING *media_type; + STRING *name; + STRING *device_id; + + ND_DISK_IO disk_io; + // COUNTER_DATA diskBytesPerSec; + COUNTER_DATA diskReadBytesPerSec; + COUNTER_DATA diskWriteBytesPerSec; + + ND_DISK_OPS disk_ops; + // COUNTER_DATA diskTransfersPerSec; + COUNTER_DATA diskReadsPerSec; + COUNTER_DATA diskWritesPerSec; + + ND_DISK_UTIL disk_util; + COUNTER_DATA percentIdleTime; + + ND_DISK_BUSY disk_busy; + COUNTER_DATA percentDiskTime; + + ND_DISK_IOTIME disk_iotime; + COUNTER_DATA percentDiskReadTime; + COUNTER_DATA percentDiskWriteTime; + + ND_DISK_QOPS disk_qops; + COUNTER_DATA currentDiskQueueLength; + // COUNTER_DATA averageDiskQueueLength; + // COUNTER_DATA averageDiskReadQueueLength; + // COUNTER_DATA averageDiskWriteQueueLength; + + ND_DISK_AWAIT disk_await; + COUNTER_DATA averageDiskSecondsPerRead; + COUNTER_DATA averageDiskSecondsPerWrite; + + ND_DISK_SVCTM disk_svctm; + COUNTER_DATA averageDiskSecondsPerTransfer; + + ND_DISK_AVGSZ disk_avgsz; + //COUNTER_DATA averageDiskBytesPerTransfer; + COUNTER_DATA averageDiskBytesPerRead; + COUNTER_DATA averageDiskBytesPerWrite; + + COUNTER_DATA splitIoPerSec; + RRDSET *st_split; + RRDDIM *rd_split; +}; + +struct physical_disk system_physical_total = { + .collected_metadata = true, +}; + +static void dict_logical_disk_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct logical_disk *d = value; + + d->percentDiskFree.key = "% Free Space"; + // d->freeMegabytes.key = "Free Megabytes"; +} + +static void logical_disk_cleanup(struct logical_disk *d) { + rrdset_is_obsolete___safe_from_collector_thread(d->st_disk_space); +} + +static void physical_disk_initialize(struct physical_disk *d) { + d->percentIdleTime.key = "% Idle Time"; + d->percentDiskTime.key = "% Disk Time"; + d->percentDiskReadTime.key = "% Disk Read Time"; + d->percentDiskWriteTime.key = "% Disk Write Time"; + d->currentDiskQueueLength.key = "Current Disk Queue Length"; + // d->averageDiskQueueLength.key = "Avg. Disk Queue Length"; + // d->averageDiskReadQueueLength.key = "Avg. Disk Read Queue Length"; + // d->averageDiskWriteQueueLength.key = "Avg. Disk Write Queue Length"; + d->averageDiskSecondsPerTransfer.key = "Avg. Disk sec/Transfer"; + d->averageDiskSecondsPerRead.key = "Avg. Disk sec/Read"; + d->averageDiskSecondsPerWrite.key = "Avg. Disk sec/Write"; + // d->diskTransfersPerSec.key = "Disk Transfers/sec"; + d->diskReadsPerSec.key = "Disk Reads/sec"; + d->diskWritesPerSec.key = "Disk Writes/sec"; + // d->diskBytesPerSec.key = "Disk Bytes/sec"; + d->diskReadBytesPerSec.key = "Disk Read Bytes/sec"; + d->diskWriteBytesPerSec.key = "Disk Write Bytes/sec"; + // d->averageDiskBytesPerTransfer.key = "Avg. Disk Bytes/Transfer"; + d->averageDiskBytesPerRead.key = "Avg. Disk Bytes/Read"; + d->averageDiskBytesPerWrite.key = "Avg. Disk Bytes/Write"; + d->splitIoPerSec.key = "Split IO/Sec"; +} + +static void physical_disk_cleanup(struct physical_disk *d) { + string_freez(d->device); + string_freez(d->mount_point); + string_freez(d->manufacturer); + string_freez(d->model); + string_freez(d->media_type); + string_freez(d->name); + string_freez(d->device_id); + + rrdset_is_obsolete___safe_from_collector_thread(d->disk_io.st_io); + rrdset_is_obsolete___safe_from_collector_thread(d->disk_ops.st_ops); + rrdset_is_obsolete___safe_from_collector_thread(d->disk_util.st_util); + rrdset_is_obsolete___safe_from_collector_thread(d->disk_busy.st_busy); + rrdset_is_obsolete___safe_from_collector_thread(d->disk_iotime.st_iotime); + rrdset_is_obsolete___safe_from_collector_thread(d->disk_qops.st_qops); + rrdset_is_obsolete___safe_from_collector_thread(d->disk_await.st_await); + rrdset_is_obsolete___safe_from_collector_thread(d->disk_svctm.st_svctm); + rrdset_is_obsolete___safe_from_collector_thread(d->disk_avgsz.st_avgsz); + rrdset_is_obsolete___safe_from_collector_thread(d->st_split); +} + +void dict_physical_disk_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct physical_disk *pd = value; + physical_disk_initialize(pd); +} + +static DICTIONARY *logicalDisks = NULL, *physicalDisks = NULL; +static void initialize(void) { + physical_disk_initialize(&system_physical_total); + + logicalDisks = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | + DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct logical_disk)); + + dictionary_register_insert_callback(logicalDisks, dict_logical_disk_insert_cb, NULL); + + physicalDisks = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | + DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct physical_disk)); + + dictionary_register_insert_callback(physicalDisks, dict_physical_disk_insert_cb, NULL); +} + +static STRING *getFileSystemType(struct logical_disk *d, const char* diskName) { + if (!diskName || !*diskName) return NULL; + + char fileSystemNameBuffer[128] = {0}; // Buffer for file system name + char pathBuffer[256] = {0}; // Path buffer to accommodate different formats + DWORD serialNumber = 0; + DWORD maxComponentLength = 0; + DWORD fileSystemFlags = 0; + BOOL success; + + // Check if the input is likely a drive letter (e.g., "C:") + if (isalpha((uint8_t)diskName[0]) && diskName[1] == ':' && diskName[2] == '\0') + snprintf(pathBuffer, sizeof(pathBuffer), "%s\\", diskName); // Format as "C:\" + else + // Assume it's a Volume GUID path or a device path + snprintf(pathBuffer, sizeof(pathBuffer), "\\\\.\\%s\\", diskName); // Format as "\\.\HarddiskVolume1\" + + d->DriveType = GetDriveTypeA(pathBuffer); + + // Attempt to get the volume information + success = GetVolumeInformationA( + pathBuffer, // Path to the disk + NULL, // We don't need the volume name + 0, // Size of volume name buffer is 0 + &serialNumber, // Volume serial number + &maxComponentLength, // Maximum component length + &fileSystemFlags, // File system flags + fileSystemNameBuffer, // File system name buffer + sizeof(fileSystemNameBuffer) // Size of file system name buffer + ); + + if(success) { + d->readonly = fileSystemFlags & FILE_READ_ONLY_VOLUME; + d->SerialNumber = serialNumber; + + if (fileSystemNameBuffer[0]) { + char *s = fileSystemNameBuffer; + while (*s) { + *s = tolower((uint8_t) *s); + s++; + } + return string_strdupz(fileSystemNameBuffer); // Duplicate the file system name + } + } + return NULL; +} + +static const char *drive_type_to_str(UINT type) { + switch(type) { + default: + case 0: return "unknown"; + case 1: return "norootdir"; + case 2: return "removable"; + case 3: return "fixed"; + case 4: return "remote"; + case 5: return "cdrom"; + case 6: return "ramdisk"; + } +} + +static bool do_logical_disk(PERF_DATA_BLOCK *pDataBlock, int update_every, usec_t now_ut) { + DICTIONARY *dict = logicalDisks; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "LogicalDisk"); + if(!pObjectType) return false; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for(LONG i = 0; i < pObjectType->NumInstances ; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if(!pi) break; + + if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + if(strcasecmp(windows_shared_buffer, "_Total") == 0) + continue; + + struct logical_disk *d = dictionary_set(dict, windows_shared_buffer, NULL, sizeof(*d)); + d->last_collected = now_ut; + + if(!d->collected_metadata) { + d->filesystem = getFileSystemType(d, windows_shared_buffer); + d->collected_metadata = true; + } + + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskFree); + // perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->freeMegabytes); + + if(!d->st_disk_space) { + d->st_disk_space = rrdset_create_localhost( + "disk_space" + , windows_shared_buffer + , NULL + , windows_shared_buffer + , "disk.space" + , "Disk Space Usage" + , "GiB" + , PLUGIN_WINDOWS_NAME + , "PerflibStorage" + , NETDATA_CHART_PRIO_DISKSPACE_SPACE + , update_every + , RRDSET_TYPE_STACKED + ); + + rrdlabels_add(d->st_disk_space->rrdlabels, "mount_point", windows_shared_buffer, RRDLABEL_SRC_AUTO); + rrdlabels_add(d->st_disk_space->rrdlabels, "drive_type", drive_type_to_str(d->DriveType), RRDLABEL_SRC_AUTO); + rrdlabels_add(d->st_disk_space->rrdlabels, "filesystem", d->filesystem ? string2str(d->filesystem) : "unknown", RRDLABEL_SRC_AUTO); + rrdlabels_add(d->st_disk_space->rrdlabels, "rw_mode", d->readonly ? "ro" : "rw", RRDLABEL_SRC_AUTO); + + { + char buf[UINT64_HEX_MAX_LENGTH]; + print_uint64_hex(buf, d->SerialNumber); + rrdlabels_add(d->st_disk_space->rrdlabels, "serial_number", buf, RRDLABEL_SRC_AUTO); + } + + d->rd_disk_space_free = rrddim_add(d->st_disk_space, "avail", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + d->rd_disk_space_used = rrddim_add(d->st_disk_space, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); + } + + // percentDiskFree has the free space in Data and the size of the disk in Time, in MiB. + rrddim_set_by_pointer(d->st_disk_space, d->rd_disk_space_free, (collected_number)d->percentDiskFree.current.Data); + rrddim_set_by_pointer(d->st_disk_space, d->rd_disk_space_used, (collected_number)(d->percentDiskFree.current.Time - d->percentDiskFree.current.Data)); + rrdset_done(d->st_disk_space); + } + + // cleanup + { + struct logical_disk *d; + dfe_start_write(dict, d) { + if(d->last_collected < now_ut) { + logical_disk_cleanup(d); + dictionary_del(dict, d_dfe.name); + } + } + dfe_done(d); + dictionary_garbage_collect(dict); + } + + return true; +} + +static void physical_disk_labels(RRDSET *st, void *data) { + struct physical_disk *d = data; + + if(d->device) + rrdlabels_add(st->rrdlabels, "device", string2str(d->device), RRDLABEL_SRC_AUTO); + + if (d->mount_point) + rrdlabels_add(st->rrdlabels, "mount_point", string2str(d->mount_point), RRDLABEL_SRC_AUTO); + +// if (d->manufacturer) +// rrdlabels_add(st->rrdlabels, "manufacturer", string2str(d->manufacturer), RRDLABEL_SRC_AUTO); + + if (d->model) + rrdlabels_add(st->rrdlabels, "model", string2str(d->model), RRDLABEL_SRC_AUTO); + +// if (d->media_type) +// rrdlabels_add(st->rrdlabels, "media_type", string2str(d->media_type), RRDLABEL_SRC_AUTO); + +// if (d->name) +// rrdlabels_add(st->rrdlabels, "name", string2str(d->name), RRDLABEL_SRC_AUTO); + + if (d->device_id) + rrdlabels_add(st->rrdlabels, "device_id", string2str(d->device_id), RRDLABEL_SRC_AUTO); +} + +static bool str_is_numeric(const char *s) { + while(*s) if(!isdigit((uint8_t)*s++)) return false; + return true; +} + +static inline double perflib_average_timer_ms(COUNTER_DATA *d) { + if(!d->updated) return 0.0; + + ULONGLONG data1 = d->current.Data; + ULONGLONG data0 = d->previous.Data; + LONGLONG time1 = d->current.Time; + LONGLONG time0 = d->previous.Time; + LONGLONG freq1 = d->current.Frequency; + + if(data1 >= data0 && time1 > time0 && time0 && freq1) + return ((double)(data1 - data0) / (double)(freq1 / MSEC_PER_SEC)) / (double)(time1 - time0); + + return 0; +} + +static inline uint64_t perflib_average_bulk(COUNTER_DATA *d) { + if(!d->updated) return 0; + + ULONGLONG data1 = d->current.Data; + ULONGLONG data0 = d->previous.Data; + LONGLONG time1 = d->current.Time; + LONGLONG time0 = d->previous.Time; + + if(data1 >= data0 && time1 > time0 && time0) + return (data1 - data0) / (time1 - time0); + + return 0; +} + +static inline uint64_t perflib_idle_time_percent(COUNTER_DATA *d) { + if(!d->updated) return 0.0; + + ULONGLONG data1 = d->current.Data; + ULONGLONG data0 = d->previous.Data; + LONGLONG time1 = d->current.Time; + LONGLONG time0 = d->previous.Time; + + if(data1 >= data0 && time1 > time0 && time0) { + uint64_t pcent = 100 * (data1 - data0) / (time1 - time0); + return pcent > 100 ? 100 : pcent; + } + + return 0; +} + +#define MAX_WMI_DRIVES 100 +static DiskDriveInfoWMI infos[MAX_WMI_DRIVES]; + +static bool do_physical_disk(PERF_DATA_BLOCK *pDataBlock, int update_every, usec_t now_ut) { + DICTIONARY *dict = physicalDisks; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "PhysicalDisk"); + if(!pObjectType) return false; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + int device_index = -1; + char *device = windows_shared_buffer; + char mount_point[128]; mount_point[0] = '\0'; + + struct physical_disk *d; + bool is_system; + if (strcasecmp(windows_shared_buffer, "_Total") == 0) { + d = &system_physical_total; + is_system = true; + } + else { + char *space; + if((space = strchr(windows_shared_buffer, ' '))) { + *space++ = '\0'; + strncpyz(mount_point, space, sizeof(mount_point) - 1); + } + + if(str_is_numeric(windows_shared_buffer)) { + device_index = str2ull(device, NULL); + snprintfz(windows_shared_buffer, sizeof(windows_shared_buffer), "Disk %d", device_index); + device = windows_shared_buffer; + } + + d = dictionary_set(dict, device, NULL, sizeof(*d)); + is_system = false; + } + d->last_collected = now_ut; + + if (!d->collected_metadata) { + if(!is_system && device_index != -1) { + size_t infoCount = GetDiskDriveInfo(infos, _countof(infos)); + for(size_t k = 0; k < infoCount ; k++) { + if(infos[k].Index != device_index) + continue; + + d->manufacturer = string_strdupz(infos[k].Manufacturer); + d->model = string_strdupz(infos[k].Model); + d->media_type = string_strdupz(infos[k].MediaType); + d->name = string_strdupz(infos[k].Name); + d->device_id = string_strdupz(infos[k].DeviceID); + + break; + } + } + + d->device = string_strdupz(device); + d->mount_point = string_strdupz(mount_point); + d->collected_metadata = true; + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskReadBytesPerSec) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskWriteBytesPerSec)) { + if(is_system) + common_system_io( + d->diskReadBytesPerSec.current.Data, + d->diskWriteBytesPerSec.current.Data, + update_every); + else + common_disk_io( + &d->disk_io, + device, + NULL, + d->diskReadBytesPerSec.current.Data, + d->diskWriteBytesPerSec.current.Data, + update_every, + physical_disk_labels, + d); + } + + if(is_system) continue; + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskReadsPerSec) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskWritesPerSec)) { + + common_disk_ops( + &d->disk_ops, + device, + NULL, + d->diskReadsPerSec.current.Data, + d->diskWritesPerSec.current.Data, + update_every, + physical_disk_labels, + d); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentIdleTime)) { + common_disk_util( + &d->disk_util, + device, + NULL, + 100 - perflib_idle_time_percent(&d->percentIdleTime), + update_every, + physical_disk_labels, + d); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskTime)) { + common_disk_busy( + &d->disk_busy, + device, + NULL, + d->percentDiskTime.current.Data / NS100_PER_MS, + update_every, + physical_disk_labels, + d); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskReadTime) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskWriteTime)) { + + common_disk_iotime( + &d->disk_iotime, + device, + NULL, + d->percentDiskReadTime.current.Data / NS100_PER_MS, + d->percentDiskWriteTime.current.Data / NS100_PER_MS, + update_every, + physical_disk_labels, + d); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->currentDiskQueueLength)) { + common_disk_qops( + &d->disk_qops, + device, + NULL, + d->currentDiskQueueLength.current.Data, + update_every, + physical_disk_labels, + d); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskSecondsPerRead) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskSecondsPerWrite)) { + + common_disk_await( + &d->disk_await, + device, + NULL, + perflib_average_timer_ms(&d->averageDiskSecondsPerRead), + perflib_average_timer_ms(&d->averageDiskSecondsPerWrite), + update_every, + physical_disk_labels, + d); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskSecondsPerTransfer)) { + common_disk_svctm( + &d->disk_svctm, + device, + NULL, + perflib_average_timer_ms(&d->averageDiskSecondsPerTransfer), + update_every, + physical_disk_labels, + d); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskBytesPerRead) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskBytesPerWrite)) { + + common_disk_avgsz( + &d->disk_avgsz, + device, + NULL, + perflib_average_bulk(&d->averageDiskBytesPerRead), + perflib_average_bulk(&d->averageDiskBytesPerWrite), + update_every, + physical_disk_labels, + d); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->splitIoPerSec)) { + if (!d->st_split) { + d->st_split = rrdset_create_localhost( + "disk_split", + device, + NULL, + "iops", + "disk.split", + "Split I/O Operations", + "operations/s", + _COMMON_PLUGIN_NAME, + _COMMON_PLUGIN_MODULE_NAME, + NETDATA_CHART_PRIO_DISK_SPLIT, + update_every, + RRDSET_TYPE_LINE + ); + + d->rd_split = rrddim_add(d->st_split, "discards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + physical_disk_labels(d->st_split, d); + } + + rrddim_set_by_pointer(d->st_split, d->rd_split, d->splitIoPerSec.current.Data); + rrdset_done(d->st_split); + } + } + + // cleanup + { + struct physical_disk *d; + dfe_start_write(dict, d) { + if(d->last_collected < now_ut) { + physical_disk_cleanup(d); + dictionary_del(dict, d_dfe.name); + } + } + dfe_done(d); + dictionary_garbage_collect(dict); + } + + return true; +} + +int do_PerflibStorage(int update_every, usec_t dt __maybe_unused) { + static bool initialized = false; + + if(unlikely(!initialized)) { + initialize(); + initialized = true; + } + + DWORD id = RegistryFindIDByName("LogicalDisk"); + if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) + return -1; + + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if(!pDataBlock) return -1; + + usec_t now_ut = now_monotonic_usec(); + do_logical_disk(pDataBlock, update_every, now_ut); + do_physical_disk(pDataBlock, update_every, now_ut); + + return 0; +} diff --git a/src/collectors/windows.plugin/perflib-thermalzone.c b/src/collectors/windows.plugin/perflib-thermalzone.c new file mode 100644 index 000000000..f85ba019f --- /dev/null +++ b/src/collectors/windows.plugin/perflib-thermalzone.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +struct thermal_zone { + RRDSET *st; + RRDDIM *rd; + + COUNTER_DATA thermalZoneTemperature; +}; + +static inline void initialize_thermal_zone_keys(struct thermal_zone *p) { + p->thermalZoneTemperature.key = "Temperature"; +} + +void dict_thermal_zone_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct thermal_zone *p = value; + initialize_thermal_zone_keys(p); +} + +static DICTIONARY *thermal_zones = NULL; + +static void initialize(void) { + thermal_zones = dictionary_create_advanced( + DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct thermal_zone)); + + dictionary_register_insert_callback(thermal_zones, dict_thermal_zone_insert_cb, NULL); +} + +static bool do_thermal_zones(PERF_DATA_BLOCK *pDataBlock, int update_every) { + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Thermal Zone Information"); + if (!pObjectType) + return false; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + struct thermal_zone *p = dictionary_set(thermal_zones, windows_shared_buffer, NULL, sizeof(*p)); + + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->thermalZoneTemperature); + + // https://learn.microsoft.com/en-us/windows-hardware/design/device-experiences/design-guide + if (!p->st) { + char id[RRD_ID_LENGTH_MAX + 1]; + snprintfz(id, RRD_ID_LENGTH_MAX, "thermalzone_%s_temperature", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st = rrdset_create_localhost( + "system", + id, + NULL, + "thermalzone", + "system.thermalzone_temperature", + "Thermal zone temperature", + "Celsius", + PLUGIN_WINDOWS_NAME, + "ThermalZone", + NETDATA_CHART_PRIO_WINDOWS_THERMAL_ZONES, + update_every, + RRDSET_TYPE_LINE); + + p->rd = rrddim_add(p->st, id, "temperature", 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st->rrdlabels, "thermalzone", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + // Convert to Celsius before to plot + NETDATA_DOUBLE kTemperature = (NETDATA_DOUBLE)p->thermalZoneTemperature.current.Data; + kTemperature -= 273.15; + + rrddim_set_by_pointer(p->st, p->rd, (collected_number)kTemperature); + rrdset_done(p->st); + } + + return true; +} + +int do_PerflibThermalZone(int update_every, usec_t dt __maybe_unused) { + static bool initialized = false; + + if (unlikely(!initialized)) { + initialize(); + initialized = true; + } + + DWORD id = RegistryFindIDByName("Thermal Zone Information"); + if (id == PERFLIB_REGISTRY_NAME_NOT_FOUND) + return -1; + + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if (!pDataBlock) + return -1; + + do_thermal_zones(pDataBlock, update_every); + + return 0; +} diff --git a/src/collectors/windows.plugin/perflib-web-service.c b/src/collectors/windows.plugin/perflib-web-service.c new file mode 100644 index 000000000..159f6e0ee --- /dev/null +++ b/src/collectors/windows.plugin/perflib-web-service.c @@ -0,0 +1,669 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +struct web_service { + RRDSET *st_request_rate; + RRDDIM *rd_request_rate; + + RRDSET *st_request_by_type_rate; + RRDDIM *rd_request_options_rate; + RRDDIM *rd_request_get_rate; + RRDDIM *rd_request_post_rate; + RRDDIM *rd_request_head_rate; + RRDDIM *rd_request_put_rate; + RRDDIM *rd_request_delete_rate; + RRDDIM *rd_request_trace_rate; + RRDDIM *rd_request_move_rate; + RRDDIM *rd_request_copy_rate; + RRDDIM *rd_request_mkcol_rate; + RRDDIM *rd_request_propfind_rate; + RRDDIM *rd_request_proppatch_rate; + RRDDIM *rd_request_search_rate; + RRDDIM *rd_request_lock_rate; + RRDDIM *rd_request_unlock_rate; + RRDDIM *rd_request_other_rate; + + RRDSET *st_traffic; + RRDDIM *rd_traffic_received; + RRDDIM *rd_traffic_sent; + + RRDSET *st_file_transfer; + RRDDIM *rd_files_received; + RRDDIM *rd_files_sent; + + RRDSET *st_curr_connections; + RRDDIM *rd_curr_connections; + + RRDSET *st_connections_attemps; + RRDDIM *rd_connections_attemps; + + RRDSET *st_user_count; + RRDDIM *rd_user_anonymous; + RRDDIM *rd_user_nonanonymous; + + RRDSET *st_isapi_extension_request_count; + RRDDIM *rd_isapi_extension_request_count; + + RRDSET *st_isapi_extension_request_rate; + RRDDIM *rd_isapi_extension_request_rate; + + RRDSET *st_error_rate; + RRDDIM *rd_error_rate_locked; + RRDDIM *rd_error_rate_not_found; + + RRDSET *st_logon_attemps; + RRDDIM *rd_logon_attemps; + + RRDSET *st_service_uptime; + RRDDIM *rd_service_uptime; + + COUNTER_DATA IISCurrentAnonymousUser; + COUNTER_DATA IISCurrentNonAnonymousUsers; + COUNTER_DATA IISCurrentConnections; + COUNTER_DATA IISCurrentISAPIExtRequests; + COUNTER_DATA IISUptime; + + COUNTER_DATA IISReceivedBytesTotal; + COUNTER_DATA IISSentBytesTotal; + COUNTER_DATA IISIPAPIExtRequestsTotal; + COUNTER_DATA IISConnAttemptsAllInstancesTotal; + COUNTER_DATA IISFilesReceivedTotal; + COUNTER_DATA IISFilesSentTotal; + COUNTER_DATA IISLogonAttemptsTotal; + COUNTER_DATA IISLockedErrorsTotal; + COUNTER_DATA IISNotFoundErrorsTotal; + + COUNTER_DATA IISRequestsOptions; + COUNTER_DATA IISRequestsGet; + COUNTER_DATA IISRequestsPost; + COUNTER_DATA IISRequestsHead; + COUNTER_DATA IISRequestsPut; + COUNTER_DATA IISRequestsDelete; + COUNTER_DATA IISRequestsTrace; + COUNTER_DATA IISRequestsMove; + COUNTER_DATA IISRequestsCopy; + COUNTER_DATA IISRequestsMkcol; + COUNTER_DATA IISRequestsPropfind; + COUNTER_DATA IISRequestsProppatch; + COUNTER_DATA IISRequestsSearch; + COUNTER_DATA IISRequestsLock; + COUNTER_DATA IISRequestsUnlock; + COUNTER_DATA IISRequestsOther; +}; + +static inline void initialize_web_service_keys(struct web_service *p) { + p->IISCurrentAnonymousUser.key = "Current Anonymous Users"; + p->IISCurrentNonAnonymousUsers.key = "Current NonAnonymous Users"; + p->IISCurrentConnections.key = "Current Connections"; + p->IISCurrentISAPIExtRequests.key = "Current ISAPI Extension Requests"; + p->IISUptime.key = "Service Uptime"; + + p->IISReceivedBytesTotal.key = "Total Bytes Received"; + p->IISSentBytesTotal.key = "Total Bytes Sent"; + p->IISIPAPIExtRequestsTotal.key = "Total ISAPI Extension Requests"; + p->IISConnAttemptsAllInstancesTotal.key = "Total Connection Attempts (all instances)"; + p->IISFilesReceivedTotal.key = "Total Files Received"; + p->IISFilesSentTotal.key = "Total Files Sent"; + p->IISLogonAttemptsTotal.key = "Total Logon Attempts"; + p->IISLockedErrorsTotal.key = "Total Locked Errors"; + p->IISNotFoundErrorsTotal.key = "Total Not Found Errors"; + + p->IISRequestsOptions.key = "Options Requests/sec"; + p->IISRequestsGet.key = "Get Requests/sec"; + p->IISRequestsPost.key = "Post Requests/sec"; + p->IISRequestsHead.key = "Head Requests/sec"; + p->IISRequestsPut.key = "Put Requests/sec"; + p->IISRequestsDelete.key = "Delete Requests/sec"; + p->IISRequestsTrace.key = "Trace Requests/sec"; + p->IISRequestsMove.key = "Move Requests/sec"; + p->IISRequestsCopy.key = "Copy Requests/sec"; + p->IISRequestsMkcol.key = "Mkcol Requests/sec"; + p->IISRequestsPropfind.key = "Propfind Requests/sec"; + p->IISRequestsProppatch.key = "Proppatch Requests/sec"; + p->IISRequestsSearch.key = "Search Requests/sec"; + p->IISRequestsLock.key = "Lock Requests/sec"; + p->IISRequestsUnlock.key = "Unlock Requests/sec"; + p->IISRequestsOther.key = "Other Request Methods/sec"; +} + +void dict_web_service_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct web_service *p = value; + initialize_web_service_keys(p); +} + +static DICTIONARY *web_services = NULL; + +static void initialize(void) { + web_services = dictionary_create_advanced( + DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct web_service)); + + dictionary_register_insert_callback(web_services, dict_web_service_insert_cb, NULL); +} + +static bool do_web_services(PERF_DATA_BLOCK *pDataBlock, int update_every) { + char id[RRD_ID_LENGTH_MAX + 1]; + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Web Service"); + if (!pObjectType) + return false; + + PERF_INSTANCE_DEFINITION *pi = NULL; + for (LONG i = 0; i < pObjectType->NumInstances; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + // We are not ploting _Total here, because cloud will group the sites + if (strcasecmp(windows_shared_buffer, "_Total") == 0) { + continue; + } + + struct web_service *p = dictionary_set(web_services, windows_shared_buffer, NULL, sizeof(*p)); + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISReceivedBytesTotal) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISSentBytesTotal)) { + if (!p->st_traffic) { + snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_traffic", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_traffic = rrdset_create_localhost( + "iis", + id, + NULL, + "traffic", + "iis.website_traffic", + "Website traffic", + "bytes/s", + PLUGIN_WINDOWS_NAME, + "PerflibWebService", + PRIO_WEBSITE_IIS_TRAFFIC, + update_every, + RRDSET_TYPE_AREA); + + p->rd_traffic_received = rrddim_add(p->st_traffic, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_traffic_sent = rrddim_add(p->st_traffic, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_traffic->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_traffic, p->rd_traffic_received, (collected_number)p->IISReceivedBytesTotal.current.Data); + rrddim_set_by_pointer( + p->st_traffic, p->rd_traffic_sent, (collected_number)p->IISSentBytesTotal.current.Data); + + rrdset_done(p->st_traffic); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISFilesReceivedTotal) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISFilesSentTotal)) { + if (!p->st_file_transfer) { + snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_ftp_file_transfer_rate", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_file_transfer = rrdset_create_localhost( + "iis", + id, + NULL, + "traffic", + "iis.website_ftp_file_transfer_rate", + "Website FTP file transfer rate", + "files/s", + PLUGIN_WINDOWS_NAME, + "PerflibWebService", + PRIO_WEBSITE_IIS_FTP_FILE_TRANSFER_RATE, + update_every, + RRDSET_TYPE_LINE); + + p->rd_files_received = rrddim_add(p->st_file_transfer, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_files_sent = rrddim_add(p->st_file_transfer, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + rrdlabels_add(p->st_file_transfer->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_file_transfer, p->rd_files_received, (collected_number)p->IISFilesReceivedTotal.current.Data); + rrddim_set_by_pointer( + p->st_file_transfer, p->rd_files_sent, (collected_number)p->IISFilesSentTotal.current.Data); + + rrdset_done(p->st_file_transfer); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISCurrentConnections)) { + if (!p->st_curr_connections) { + snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_active_connections_count", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_curr_connections = rrdset_create_localhost( + "iis", + id, + NULL, + "connections", + "iis.website_active_connections_count", + "Website active connections", + "connections", + PLUGIN_WINDOWS_NAME, + "PerflibWebService1", + PRIO_WEBSITE_IIS_ACTIVE_CONNECTIONS_COUNT, + update_every, + RRDSET_TYPE_LINE); + + p->rd_curr_connections = rrddim_add(p->st_curr_connections, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrdlabels_add(p->st_curr_connections->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_curr_connections, + p->rd_curr_connections, + (collected_number)p->IISCurrentConnections.current.Data); + + rrdset_done(p->st_curr_connections); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISConnAttemptsAllInstancesTotal)) { + if (!p->st_connections_attemps) { + snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_connection_attempts_rate", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_connections_attemps = rrdset_create_localhost( + "iis", + id, + NULL, + "connections", + "iis.website_connection_attempts_rate", + "Website connections attempts", + "attempts/s", + PLUGIN_WINDOWS_NAME, + "PerflibWebService", + PRIO_WEBSITE_IIS_CONNECTIONS_ATTEMP, + update_every, + RRDSET_TYPE_LINE); + + p->rd_connections_attemps = + rrddim_add(p->st_connections_attemps, "connection", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_connections_attemps->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_connections_attemps, + p->rd_connections_attemps, + (collected_number)p->IISCurrentConnections.current.Data); + + rrdset_done(p->st_connections_attemps); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISCurrentAnonymousUser) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISCurrentNonAnonymousUsers)) { + if (!p->st_user_count) { + snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_users_count", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_user_count = rrdset_create_localhost( + "iis", + id, + NULL, + "requests", + "iis.website_users_count", + "Website users with pending requests", + "users", + PLUGIN_WINDOWS_NAME, + "PerflibWebService", + PRIO_WEBSITE_IIS_USERS, + update_every, + RRDSET_TYPE_STACKED); + + p->rd_user_anonymous = rrddim_add(p->st_user_count, "anonymous", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + p->rd_user_nonanonymous = + rrddim_add(p->st_user_count, "non_anonymous", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_user_count->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_user_count, p->rd_user_anonymous, (collected_number)p->IISCurrentAnonymousUser.current.Data); + + rrddim_set_by_pointer( + p->st_user_count, + p->rd_user_nonanonymous, + (collected_number)p->IISCurrentNonAnonymousUsers.current.Data); + + rrdset_done(p->st_user_count); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISCurrentISAPIExtRequests)) { + if (!p->st_isapi_extension_request_count) { + snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_isapi_extension_requests_count", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_isapi_extension_request_count = rrdset_create_localhost( + "iis", + id, + NULL, + "requests", + "iis.website_isapi_extension_requests_count", + "ISAPI extension requests", + "requests", + PLUGIN_WINDOWS_NAME, + "PerflibWebService", + PRIO_WEBSITE_IIS_ISAPI_EXT_REQUEST_COUNT, + update_every, + RRDSET_TYPE_LINE); + + p->rd_isapi_extension_request_count = + rrddim_add(p->st_isapi_extension_request_count, "isapi", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add( + p->st_isapi_extension_request_count->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_isapi_extension_request_count, + p->rd_isapi_extension_request_count, + (collected_number)p->IISCurrentISAPIExtRequests.current.Data); + + rrdset_done(p->st_isapi_extension_request_count); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISIPAPIExtRequestsTotal)) { + if (!p->st_isapi_extension_request_rate) { + snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_isapi_extension_requests_rate", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_isapi_extension_request_rate = rrdset_create_localhost( + "iis", + id, + NULL, + "requests", + "iis.website_isapi_extension_requests_rate", + "Website extensions request", + "requests/s", + PLUGIN_WINDOWS_NAME, + "PerflibWebService", + PRIO_WEBSITE_IIS_ISAPI_EXT_REQUEST_RATE, + update_every, + RRDSET_TYPE_LINE); + + p->rd_isapi_extension_request_rate = + rrddim_add(p->st_isapi_extension_request_rate, "isapi", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add( + p->st_isapi_extension_request_rate->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_isapi_extension_request_rate, + p->rd_isapi_extension_request_rate, + (collected_number)p->IISIPAPIExtRequestsTotal.current.Data); + + rrdset_done(p->st_isapi_extension_request_rate); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISLockedErrorsTotal) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISNotFoundErrorsTotal)) { + if (!p->st_error_rate) { + snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_errors_rate", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_error_rate = rrdset_create_localhost( + "iis", + id, + NULL, + "requests", + "iis.website_errors_rate", + "Website errors", + "errors/s", + PLUGIN_WINDOWS_NAME, + "PerflibWebService", + PRIO_WEBSITE_IIS_USERS, + update_every, + RRDSET_TYPE_STACKED); + + p->rd_error_rate_locked = + rrddim_add(p->st_error_rate, "document_locked", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_error_rate_not_found = + rrddim_add(p->st_error_rate, "document_not_found", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_error_rate->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_error_rate, p->rd_error_rate_locked, (collected_number)p->IISLockedErrorsTotal.current.Data); + + rrddim_set_by_pointer( + p->st_error_rate, p->rd_error_rate_not_found, (collected_number)p->IISNotFoundErrorsTotal.current.Data); + + rrdset_done(p->st_error_rate); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISLogonAttemptsTotal)) { + if (!p->st_logon_attemps) { + snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_logon_attempts_rate", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_logon_attemps = rrdset_create_localhost( + "iis", + id, + NULL, + "logon", + "iis.website_logon_attempts_rate", + "Website logon attempts", + "attempts/s", + PLUGIN_WINDOWS_NAME, + "PerflibWebService", + PRIO_WEBSITE_IIS_LOGON_ATTEMPTS, + update_every, + RRDSET_TYPE_LINE); + + p->rd_logon_attemps = rrddim_add(p->st_logon_attemps, "logon", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_logon_attemps->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_logon_attemps, p->rd_logon_attemps, (collected_number)p->IISLogonAttemptsTotal.current.Data); + + rrdset_done(p->st_logon_attemps); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISUptime)) { + if (!p->st_service_uptime) { + snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_uptime", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_service_uptime = rrdset_create_localhost( + "iis", + id, + NULL, + "uptime", + "iis.website_uptime", + "Website uptime", + "seconds", + PLUGIN_WINDOWS_NAME, + "PerflibWebService", + PRIO_WEBSITE_IIS_UPTIME, + update_every, + RRDSET_TYPE_LINE); + + p->rd_service_uptime = rrddim_add(p->st_service_uptime, "uptime", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_service_uptime->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_service_uptime, p->rd_service_uptime, (collected_number)p->IISUptime.current.Data); + + rrdset_done(p->st_service_uptime); + } + + if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsOptions) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsGet) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsPost) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsHead) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsPut) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsDelete) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsTrace) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsMove) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsCopy) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsMkcol) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsPropfind) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsProppatch) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsSearch) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsLock) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsUnlock) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsOther)) { + if (!p->st_request_rate) { + snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_requests_rate", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_request_rate = rrdset_create_localhost( + "iis", + id, + NULL, + "requests", + "iis.website_requests_rate", + "Website requests rate", + "requests/s", + PLUGIN_WINDOWS_NAME, + "PerflibWebService", + PRIO_WEBSITE_IIS_REQUESTS_RATE, + update_every, + RRDSET_TYPE_LINE); + + p->rd_request_rate = rrddim_add(p->st_request_rate, "requests", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_request_rate->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + uint64_t requests = + p->IISRequestsOptions.current.Data + p->IISRequestsGet.current.Data + p->IISRequestsPost.current.Data + + p->IISRequestsHead.current.Data + p->IISRequestsPut.current.Data + p->IISRequestsDelete.current.Data + + p->IISRequestsTrace.current.Data + p->IISRequestsMove.current.Data + p->IISRequestsCopy.current.Data + + p->IISRequestsMkcol.current.Data + p->IISRequestsPropfind.current.Data + + p->IISRequestsProppatch.current.Data + p->IISRequestsSearch.current.Data + + p->IISRequestsLock.current.Data + p->IISRequestsUnlock.current.Data + p->IISRequestsOther.current.Data; + + rrddim_set_by_pointer(p->st_request_rate, p->rd_request_rate, (collected_number)requests); + + rrdset_done(p->st_request_rate); + + if (!p->st_request_by_type_rate) { + snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_requests_by_type_rate", windows_shared_buffer); + netdata_fix_chart_name(id); + p->st_request_by_type_rate = rrdset_create_localhost( + "iis", + id, + NULL, + "requests", + "iis.website_requests_by_type_rate", + "Website requests rate", + "requests/s", + PLUGIN_WINDOWS_NAME, + "PerflibWebService", + PRIO_WEBSITE_IIS_REQUESTS_BY_TYPE_RATE, + update_every, + RRDSET_TYPE_STACKED); + + p->rd_request_options_rate = rrddim_add(p->st_request_by_type_rate, "options", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_get_rate = rrddim_add(p->st_request_by_type_rate, "get", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_post_rate = rrddim_add(p->st_request_by_type_rate, "post", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_head_rate = rrddim_add(p->st_request_by_type_rate, "head", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_put_rate = rrddim_add(p->st_request_by_type_rate, "put", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_delete_rate = rrddim_add(p->st_request_by_type_rate, "delete", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_trace_rate = rrddim_add(p->st_request_by_type_rate, "trace", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_move_rate = rrddim_add(p->st_request_by_type_rate, "move", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_copy_rate = rrddim_add(p->st_request_by_type_rate, "copy", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_mkcol_rate = rrddim_add(p->st_request_by_type_rate, "mkcol", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_propfind_rate = rrddim_add(p->st_request_by_type_rate, "propfind", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_proppatch_rate = rrddim_add(p->st_request_by_type_rate, "proppatch", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_search_rate = rrddim_add(p->st_request_by_type_rate, "search", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_lock_rate = rrddim_add(p->st_request_by_type_rate, "lock", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_unlock_rate = rrddim_add(p->st_request_by_type_rate, "unlock", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + p->rd_request_other_rate = rrddim_add(p->st_request_by_type_rate, "other", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_request_by_type_rate->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_options_rate, + (collected_number)p->IISRequestsOptions.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_get_rate, + (collected_number)p->IISRequestsGet.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_post_rate, + (collected_number)p->IISRequestsPost.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_head_rate, + (collected_number)p->IISRequestsHead.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_put_rate, + (collected_number)p->IISRequestsPut.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_delete_rate, + (collected_number)p->IISRequestsDelete.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_trace_rate, + (collected_number)p->IISRequestsTrace.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_move_rate, + (collected_number)p->IISRequestsMove.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_copy_rate, + (collected_number)p->IISRequestsCopy.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_mkcol_rate, + (collected_number)p->IISRequestsMkcol.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_propfind_rate, + (collected_number)p->IISRequestsPropfind.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_proppatch_rate, + (collected_number)p->IISRequestsProppatch.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_search_rate, + (collected_number)p->IISRequestsSearch.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_lock_rate, + (collected_number)p->IISRequestsLock.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_unlock_rate, + (collected_number)p->IISRequestsUnlock.current.Data); + rrddim_set_by_pointer( + p->st_request_by_type_rate, + p->rd_request_other_rate, + (collected_number)p->IISRequestsOther.current.Data); + + rrdset_done(p->st_request_by_type_rate); + } + } + + return true; +} + +int do_PerflibWebService(int update_every, usec_t dt __maybe_unused) { + static bool initialized = false; + + if (unlikely(!initialized)) { + initialize(); + initialized = true; + } + + DWORD id = RegistryFindIDByName("Web Service"); + if (id == PERFLIB_REGISTRY_NAME_NOT_FOUND) + return -1; + + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if (!pDataBlock) + return -1; + + do_web_services(pDataBlock, update_every); + + return 0; +} diff --git a/src/collectors/windows.plugin/perflib.c b/src/collectors/windows.plugin/perflib.c deleted file mode 100644 index 4df48acfb..000000000 --- a/src/collectors/windows.plugin/perflib.c +++ /dev/null @@ -1,671 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "perflib.h" - -// -------------------------------------------------------------------------------- - -// Retrieve a buffer that contains the specified performance data. -// The pwszSource parameter determines the data that GetRegistryBuffer returns. -// -// Typically, when calling RegQueryValueEx, you can specify zero for the size of the buffer -// and the RegQueryValueEx will set your size variable to the required buffer size. However, -// if the source is "Global" or one or more object index values, you will need to increment -// the buffer size in a loop until RegQueryValueEx does not return ERROR_MORE_DATA. -static LPBYTE getPerformanceData(const char *pwszSource) { - static __thread DWORD size = 0; - static __thread LPBYTE buffer = NULL; - - if(pwszSource == (const char *)0x01) { - freez(buffer); - buffer = NULL; - size = 0; - return NULL; - } - - if(!size) { - size = 32 * 1024; - buffer = mallocz(size); - } - - LONG status = ERROR_SUCCESS; - while ((status = RegQueryValueEx(HKEY_PERFORMANCE_DATA, pwszSource, - NULL, NULL, buffer, &size)) == ERROR_MORE_DATA) { - size *= 2; - buffer = reallocz(buffer, size); - } - - if (status != ERROR_SUCCESS) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "RegQueryValueEx failed with 0x%x.\n", status); - return NULL; - } - - return buffer; -} - -void perflibFreePerformanceData(void) { - getPerformanceData((const char *)0x01); -} - -// -------------------------------------------------------------------------------------------------------------------- - -// Retrieve the raw counter value and any supporting data needed to calculate -// a displayable counter value. Use the counter type to determine the information -// needed to calculate the value. - -static BOOL getCounterData( - PERF_DATA_BLOCK *pDataBlock, - PERF_OBJECT_TYPE* pObject, - PERF_COUNTER_DEFINITION* pCounter, - PERF_COUNTER_BLOCK* pCounterDataBlock, - PRAW_DATA pRawData) -{ - PVOID pData = NULL; - UNALIGNED ULONGLONG* pullData = NULL; - PERF_COUNTER_DEFINITION* pBaseCounter = NULL; - BOOL fSuccess = TRUE; - - //Point to the raw counter data. - pData = (PVOID)((LPBYTE)pCounterDataBlock + pCounter->CounterOffset); - - //Now use the PERF_COUNTER_DEFINITION.CounterType value to figure out what - //other information you need to calculate a displayable value. - switch (pCounter->CounterType) { - - case PERF_COUNTER_COUNTER: - case PERF_COUNTER_QUEUELEN_TYPE: - case PERF_SAMPLE_COUNTER: - pRawData->Data = (ULONGLONG)(*(DWORD*)pData); - pRawData->Time = pDataBlock->PerfTime.QuadPart; - if (PERF_COUNTER_COUNTER == pCounter->CounterType || PERF_SAMPLE_COUNTER == pCounter->CounterType) - pRawData->Frequency = pDataBlock->PerfFreq.QuadPart; - break; - - case PERF_OBJ_TIME_TIMER: - pRawData->Data = (ULONGLONG)(*(DWORD*)pData); - pRawData->Time = pObject->PerfTime.QuadPart; - break; - - case PERF_COUNTER_100NS_QUEUELEN_TYPE: - pRawData->Data = *(UNALIGNED ULONGLONG *)pData; - pRawData->Time = pDataBlock->PerfTime100nSec.QuadPart; - break; - - case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: - pRawData->Data = *(UNALIGNED ULONGLONG *)pData; - pRawData->Time = pObject->PerfTime.QuadPart; - break; - - case PERF_COUNTER_TIMER: - case PERF_COUNTER_TIMER_INV: - case PERF_COUNTER_BULK_COUNT: - case PERF_COUNTER_LARGE_QUEUELEN_TYPE: - pullData = (UNALIGNED ULONGLONG *)pData; - pRawData->Data = *pullData; - pRawData->Time = pDataBlock->PerfTime.QuadPart; - if (pCounter->CounterType == PERF_COUNTER_BULK_COUNT) - pRawData->Frequency = pDataBlock->PerfFreq.QuadPart; - break; - - case PERF_COUNTER_MULTI_TIMER: - case PERF_COUNTER_MULTI_TIMER_INV: - pullData = (UNALIGNED ULONGLONG *)pData; - pRawData->Data = *pullData; - pRawData->Frequency = pDataBlock->PerfFreq.QuadPart; - pRawData->Time = pDataBlock->PerfTime.QuadPart; - - //These counter types have a second counter value that is adjacent to - //this counter value in the counter data block. The value is needed for - //the calculation. - if ((pCounter->CounterType & PERF_MULTI_COUNTER) == PERF_MULTI_COUNTER) { - ++pullData; - pRawData->MultiCounterData = *(DWORD*)pullData; - } - break; - - //These counters do not use any time reference. - case PERF_COUNTER_RAWCOUNT: - case PERF_COUNTER_RAWCOUNT_HEX: - case PERF_COUNTER_DELTA: - // some counters in these categories, have CounterSize = sizeof(ULONGLONG) - // but the official documentation always uses them as sizeof(DWORD) - pRawData->Data = (ULONGLONG)(*(DWORD*)pData); - pRawData->Time = 0; - break; - - case PERF_COUNTER_LARGE_RAWCOUNT: - case PERF_COUNTER_LARGE_RAWCOUNT_HEX: - case PERF_COUNTER_LARGE_DELTA: - pRawData->Data = *(UNALIGNED ULONGLONG*)pData; - pRawData->Time = 0; - break; - - //These counters use the 100ns time base in their calculation. - case PERF_100NSEC_TIMER: - case PERF_100NSEC_TIMER_INV: - case PERF_100NSEC_MULTI_TIMER: - case PERF_100NSEC_MULTI_TIMER_INV: - pullData = (UNALIGNED ULONGLONG*)pData; - pRawData->Data = *pullData; - pRawData->Time = pDataBlock->PerfTime100nSec.QuadPart; - - //These counter types have a second counter value that is adjacent to - //this counter value in the counter data block. The value is needed for - //the calculation. - if ((pCounter->CounterType & PERF_MULTI_COUNTER) == PERF_MULTI_COUNTER) { - ++pullData; - pRawData->MultiCounterData = *(DWORD*)pullData; - } - break; - - //These counters use two data points, this value and one from this counter's - //base counter. The base counter should be the next counter in the object's - //list of counters. - case PERF_SAMPLE_FRACTION: - case PERF_RAW_FRACTION: - pRawData->Data = (ULONGLONG)(*(DWORD*)pData); - pBaseCounter = pCounter + 1; //Get base counter - if ((pBaseCounter->CounterType & PERF_COUNTER_BASE) == PERF_COUNTER_BASE) { - pData = (PVOID)((LPBYTE)pCounterDataBlock + pBaseCounter->CounterOffset); - pRawData->Time = (LONGLONG)(*(DWORD*)pData); - } - else - fSuccess = FALSE; - break; - - case PERF_LARGE_RAW_FRACTION: - case PERF_PRECISION_SYSTEM_TIMER: - case PERF_PRECISION_100NS_TIMER: - case PERF_PRECISION_OBJECT_TIMER: - pRawData->Data = *(UNALIGNED ULONGLONG*)pData; - pBaseCounter = pCounter + 1; - if ((pBaseCounter->CounterType & PERF_COUNTER_BASE) == PERF_COUNTER_BASE) { - pData = (PVOID)((LPBYTE)pCounterDataBlock + pBaseCounter->CounterOffset); - pRawData->Time = *(LONGLONG*)pData; - } - else - fSuccess = FALSE; - break; - - case PERF_AVERAGE_TIMER: - case PERF_AVERAGE_BULK: - pRawData->Data = *(UNALIGNED ULONGLONG*)pData; - pBaseCounter = pCounter+1; - if ((pBaseCounter->CounterType & PERF_COUNTER_BASE) == PERF_COUNTER_BASE) { - pData = (PVOID)((LPBYTE)pCounterDataBlock + pBaseCounter->CounterOffset); - pRawData->Time = *(DWORD*)pData; - } - else - fSuccess = FALSE; - - if (pCounter->CounterType == PERF_AVERAGE_TIMER) - pRawData->Frequency = pDataBlock->PerfFreq.QuadPart; - break; - - //These are base counters and are used in calculations for other counters. - //This case should never be entered. - case PERF_SAMPLE_BASE: - case PERF_AVERAGE_BASE: - case PERF_COUNTER_MULTI_BASE: - case PERF_RAW_BASE: - case PERF_LARGE_RAW_BASE: - pRawData->Data = 0; - pRawData->Time = 0; - fSuccess = FALSE; - break; - - case PERF_ELAPSED_TIME: - pRawData->Data = *(UNALIGNED ULONGLONG*)pData; - pRawData->Time = pObject->PerfTime.QuadPart; - pRawData->Frequency = pObject->PerfFreq.QuadPart; - break; - - //These counters are currently not supported. - case PERF_COUNTER_TEXT: - case PERF_COUNTER_NODATA: - case PERF_COUNTER_HISTOGRAM_TYPE: - default: // unknown counter types - pRawData->Data = 0; - pRawData->Time = 0; - fSuccess = FALSE; - break; - } - - return fSuccess; -} - -// -------------------------------------------------------------------------------------------------------------------- - -static inline BOOL isValidPointer(PERF_DATA_BLOCK *pDataBlock __maybe_unused, void *ptr __maybe_unused) { -#ifdef NETDATA_INTERNAL_CHECKS - return (PBYTE)ptr >= (PBYTE)pDataBlock + pDataBlock->TotalByteLength ? FALSE : TRUE; -#else - return TRUE; -#endif -} - -static inline BOOL isValidStructure(PERF_DATA_BLOCK *pDataBlock __maybe_unused, void *ptr __maybe_unused, size_t length __maybe_unused) { -#ifdef NETDATA_INTERNAL_CHECKS - return (PBYTE)ptr + length > (PBYTE)pDataBlock + pDataBlock->TotalByteLength ? FALSE : TRUE; -#else - return TRUE; -#endif -} - -static inline PERF_DATA_BLOCK *getDataBlock(BYTE *pBuffer) { - PERF_DATA_BLOCK *pDataBlock = (PERF_DATA_BLOCK *)pBuffer; - - static WCHAR signature[] = { 'P', 'E', 'R', 'F' }; - - if(memcmp(pDataBlock->Signature, signature, sizeof(signature)) != 0) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Invalid data block signature."); - return NULL; - } - - if(!isValidPointer(pDataBlock, (PBYTE)pDataBlock + pDataBlock->SystemNameOffset) || - !isValidStructure(pDataBlock, (PBYTE)pDataBlock + pDataBlock->SystemNameOffset, pDataBlock->SystemNameLength)) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Invalid system name array."); - return NULL; - } - - return pDataBlock; -} - -static inline PERF_OBJECT_TYPE *getObjectType(PERF_DATA_BLOCK* pDataBlock, PERF_OBJECT_TYPE *lastObjectType) { - PERF_OBJECT_TYPE* pObjectType = NULL; - - if(!lastObjectType) - pObjectType = (PERF_OBJECT_TYPE *)((PBYTE)pDataBlock + pDataBlock->HeaderLength); - else if (lastObjectType->TotalByteLength != 0) - pObjectType = (PERF_OBJECT_TYPE *)((PBYTE)lastObjectType + lastObjectType->TotalByteLength); - - if(pObjectType && (!isValidPointer(pDataBlock, pObjectType) || !isValidStructure(pDataBlock, pObjectType, pObjectType->TotalByteLength))) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Invalid ObjectType!"); - pObjectType = NULL; - } - - return pObjectType; -} - -inline PERF_OBJECT_TYPE *getObjectTypeByIndex(PERF_DATA_BLOCK *pDataBlock, DWORD ObjectNameTitleIndex) { - PERF_OBJECT_TYPE *po = NULL; - for(DWORD o = 0; o < pDataBlock->NumObjectTypes ; o++) { - po = getObjectType(pDataBlock, po); - if(po->ObjectNameTitleIndex == ObjectNameTitleIndex) - return po; - } - - return NULL; -} - -static inline PERF_INSTANCE_DEFINITION *getInstance( - PERF_DATA_BLOCK *pDataBlock, - PERF_OBJECT_TYPE *pObjectType, - PERF_COUNTER_BLOCK *lastCounterBlock -) { - PERF_INSTANCE_DEFINITION *pInstance; - - if(!lastCounterBlock) - pInstance = (PERF_INSTANCE_DEFINITION *)((PBYTE)pObjectType + pObjectType->DefinitionLength); - else - pInstance = (PERF_INSTANCE_DEFINITION *)((PBYTE)lastCounterBlock + lastCounterBlock->ByteLength); - - if(pInstance && (!isValidPointer(pDataBlock, pInstance) || !isValidStructure(pDataBlock, pInstance, pInstance->ByteLength))) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Invalid Instance Definition!"); - pInstance = NULL; - } - - return pInstance; -} - -static inline PERF_COUNTER_BLOCK *getObjectTypeCounterBlock( - PERF_DATA_BLOCK *pDataBlock, - PERF_OBJECT_TYPE *pObjectType -) { - PERF_COUNTER_BLOCK *pCounterBlock = (PERF_COUNTER_BLOCK *)((PBYTE)pObjectType + pObjectType->DefinitionLength); - - if(pCounterBlock && (!isValidPointer(pDataBlock, pCounterBlock) || !isValidStructure(pDataBlock, pCounterBlock, pCounterBlock->ByteLength))) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Invalid ObjectType CounterBlock!"); - pCounterBlock = NULL; - } - - return pCounterBlock; -} - -static inline PERF_COUNTER_BLOCK *getInstanceCounterBlock( - PERF_DATA_BLOCK *pDataBlock, - PERF_OBJECT_TYPE *pObjectType, - PERF_INSTANCE_DEFINITION *pInstance -) { - (void)pObjectType; - PERF_COUNTER_BLOCK *pCounterBlock = (PERF_COUNTER_BLOCK *)((PBYTE)pInstance + pInstance->ByteLength); - - if(pCounterBlock && (!isValidPointer(pDataBlock, pCounterBlock) || !isValidStructure(pDataBlock, pCounterBlock, pCounterBlock->ByteLength))) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Invalid Instance CounterBlock!"); - pCounterBlock = NULL; - } - - return pCounterBlock; -} - -inline PERF_INSTANCE_DEFINITION *getInstanceByPosition(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, DWORD instancePosition) { - PERF_INSTANCE_DEFINITION *pi = NULL; - PERF_COUNTER_BLOCK *pc = NULL; - for(DWORD i = 0; i <= instancePosition ;i++) { - pi = getInstance(pDataBlock, pObjectType, pc); - pc = getInstanceCounterBlock(pDataBlock, pObjectType, pi); - } - return pi; -} - -static inline PERF_COUNTER_DEFINITION *getCounterDefinition(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_COUNTER_DEFINITION *lastCounterDefinition) { - PERF_COUNTER_DEFINITION *pCounterDefinition = NULL; - - if(!lastCounterDefinition) - pCounterDefinition = (PERF_COUNTER_DEFINITION *)((PBYTE)pObjectType + pObjectType->HeaderLength); - else - pCounterDefinition = (PERF_COUNTER_DEFINITION *)((PBYTE)lastCounterDefinition + lastCounterDefinition->ByteLength); - - if(pCounterDefinition && (!isValidPointer(pDataBlock, pCounterDefinition) || !isValidStructure(pDataBlock, pCounterDefinition, pCounterDefinition->ByteLength))) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Invalid Counter Definition!"); - pCounterDefinition = NULL; - } - - return pCounterDefinition; -} - -// -------------------------------------------------------------------------------------------------------------------- - -static inline BOOL getEncodedStringToUTF8(char *dst, size_t dst_len, DWORD CodePage, char *start, DWORD length) { - WCHAR *tempBuffer; // Temporary buffer for Unicode data - DWORD charsCopied = 0; - BOOL free_tempBuffer; - - if (CodePage == 0) { - // Input is already Unicode (UTF-16) - tempBuffer = (WCHAR *)start; - charsCopied = length / sizeof(WCHAR); // Convert byte length to number of WCHARs - free_tempBuffer = FALSE; - } - else { - // Convert the multi-byte instance name to Unicode (UTF-16) - // Calculate maximum possible characters in UTF-16 - - int charCount = MultiByteToWideChar(CodePage, 0, start, (int)length, NULL, 0); - tempBuffer = (WCHAR *)malloc(charCount * sizeof(WCHAR)); - if (!tempBuffer) return FALSE; - - charsCopied = MultiByteToWideChar(CodePage, 0, start, (int)length, tempBuffer, charCount); - if (charsCopied == 0) { - free(tempBuffer); - dst[0] = '\0'; - return FALSE; - } - - free_tempBuffer = TRUE; - } - - // Now convert from Unicode (UTF-16) to UTF-8 - int bytesCopied = WideCharToMultiByte(CP_UTF8, 0, tempBuffer, (int)charsCopied, dst, (int)dst_len, NULL, NULL); - if (bytesCopied == 0) { - if (free_tempBuffer) free(tempBuffer); - dst[0] = '\0'; // Ensure the buffer is null-terminated even on failure - return FALSE; - } - - dst[bytesCopied] = '\0'; // Ensure buffer is null-terminated - if (free_tempBuffer) free(tempBuffer); // Free temporary buffer if used - return TRUE; -} - -inline BOOL getInstanceName(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, - char *buffer, size_t bufferLen) { - (void)pDataBlock; - if (!pInstance || !buffer || !bufferLen) return FALSE; - - return getEncodedStringToUTF8(buffer, bufferLen, pObjectType->CodePage, - ((char *)pInstance + pInstance->NameOffset), pInstance->NameLength); -} - -inline BOOL getSystemName(PERF_DATA_BLOCK *pDataBlock, char *buffer, size_t bufferLen) { - return getEncodedStringToUTF8(buffer, bufferLen, 0, - ((char *)pDataBlock + pDataBlock->SystemNameOffset), pDataBlock->SystemNameLength); -} - -inline bool ObjectTypeHasInstances(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType) { - (void)pDataBlock; - return pObjectType->NumInstances != PERF_NO_INSTANCES && pObjectType->NumInstances > 0; -} - -PERF_OBJECT_TYPE *perflibFindObjectTypeByName(PERF_DATA_BLOCK *pDataBlock, const char *name) { - PERF_OBJECT_TYPE* pObjectType = NULL; - for(DWORD o = 0; o < pDataBlock->NumObjectTypes; o++) { - pObjectType = getObjectType(pDataBlock, pObjectType); - if(strcmp(name, RegistryFindNameByID(pObjectType->ObjectNameTitleIndex)) == 0) - return pObjectType; - } - - return NULL; -} - -PERF_INSTANCE_DEFINITION *perflibForEachInstance(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *lastInstance) { - if(!ObjectTypeHasInstances(pDataBlock, pObjectType)) - return NULL; - - return getInstance(pDataBlock, pObjectType, - lastInstance ? - getInstanceCounterBlock(pDataBlock, pObjectType, lastInstance) : - NULL ); -} - -bool perflibGetInstanceCounter(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, COUNTER_DATA *cd) { - PERF_COUNTER_DEFINITION *pCounterDefinition = NULL; - for(DWORD c = 0; c < pObjectType->NumCounters ;c++) { - pCounterDefinition = getCounterDefinition(pDataBlock, pObjectType, pCounterDefinition); - if(!pCounterDefinition) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Cannot read counter definition No %u (out of %u)", - c, pObjectType->NumCounters); - break; - } - - if(cd->id) { - if(cd->id != pCounterDefinition->CounterNameTitleIndex) - continue; - } - else { - if(strcmp(RegistryFindNameByID(pCounterDefinition->CounterNameTitleIndex), cd->key) != 0) - continue; - - cd->id = pCounterDefinition->CounterNameTitleIndex; - } - - cd->current.CounterType = cd->OverwriteCounterType ? cd->OverwriteCounterType : pCounterDefinition->CounterType; - PERF_COUNTER_BLOCK *pCounterBlock = getInstanceCounterBlock(pDataBlock, pObjectType, pInstance); - - cd->previous = cd->current; - cd->updated = getCounterData(pDataBlock, pObjectType, pCounterDefinition, pCounterBlock, &cd->current); - return cd->updated; - } - - cd->previous = cd->current; - cd->current = RAW_DATA_EMPTY; - cd->updated = false; - return false; -} - -bool perflibGetObjectCounter(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, COUNTER_DATA *cd) { - PERF_COUNTER_DEFINITION *pCounterDefinition = NULL; - for(DWORD c = 0; c < pObjectType->NumCounters ;c++) { - pCounterDefinition = getCounterDefinition(pDataBlock, pObjectType, pCounterDefinition); - if(!pCounterDefinition) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Cannot read counter definition No %u (out of %u)", - c, pObjectType->NumCounters); - break; - } - - if(cd->id) { - if(cd->id != pCounterDefinition->CounterNameTitleIndex) - continue; - } - else { - if(strcmp(RegistryFindNameByID(pCounterDefinition->CounterNameTitleIndex), cd->key) != 0) - continue; - - cd->id = pCounterDefinition->CounterNameTitleIndex; - } - - cd->current.CounterType = cd->OverwriteCounterType ? cd->OverwriteCounterType : pCounterDefinition->CounterType; - PERF_COUNTER_BLOCK *pCounterBlock = getObjectTypeCounterBlock(pDataBlock, pObjectType); - - cd->previous = cd->current; - cd->updated = getCounterData(pDataBlock, pObjectType, pCounterDefinition, pCounterBlock, &cd->current); - return cd->updated; - } - - cd->previous = cd->current; - cd->current = RAW_DATA_EMPTY; - cd->updated = false; - return false; -} - -PERF_DATA_BLOCK *perflibGetPerformanceData(DWORD id) { - char source[24]; - snprintfz(source, sizeof(source), "%u", id); - - LPBYTE pData = (LPBYTE)getPerformanceData((id > 0) ? source : NULL); - if (!pData) return NULL; - - PERF_DATA_BLOCK *pDataBlock = getDataBlock(pData); - if(!pDataBlock) return NULL; - - return pDataBlock; -} - -int perflibQueryAndTraverse(DWORD id, - perflib_data_cb dataCb, - perflib_object_cb objectCb, - perflib_instance_cb instanceCb, - perflib_instance_counter_cb instanceCounterCb, - perflib_counter_cb counterCb, - void *data) { - int counters = -1; - - PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); - if(!pDataBlock) goto cleanup; - - bool do_data = true; - if(dataCb) - do_data = dataCb(pDataBlock, data); - - PERF_OBJECT_TYPE* pObjectType = NULL; - for(DWORD o = 0; do_data && o < pDataBlock->NumObjectTypes; o++) { - pObjectType = getObjectType(pDataBlock, pObjectType); - if(!pObjectType) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Cannot read object type No %d (out of %d)", - o, pDataBlock->NumObjectTypes); - break; - } - - bool do_object = true; - if(objectCb) - do_object = objectCb(pDataBlock, pObjectType, data); - - if(!do_object) - continue; - - if(ObjectTypeHasInstances(pDataBlock, pObjectType)) { - PERF_INSTANCE_DEFINITION *pInstance = NULL; - PERF_COUNTER_BLOCK *pCounterBlock = NULL; - for(LONG i = 0; i < pObjectType->NumInstances ;i++) { - pInstance = getInstance(pDataBlock, pObjectType, pCounterBlock); - if(!pInstance) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Cannot read Instance No %d (out of %d)", - i, pObjectType->NumInstances); - break; - } - - pCounterBlock = getInstanceCounterBlock(pDataBlock, pObjectType, pInstance); - if(!pCounterBlock) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Cannot read CounterBlock of instance No %d (out of %d)", - i, pObjectType->NumInstances); - break; - } - - bool do_instance = true; - if(instanceCb) - do_instance = instanceCb(pDataBlock, pObjectType, pInstance, data); - - if(!do_instance) - continue; - - PERF_COUNTER_DEFINITION *pCounterDefinition = NULL; - for(DWORD c = 0; c < pObjectType->NumCounters ;c++) { - pCounterDefinition = getCounterDefinition(pDataBlock, pObjectType, pCounterDefinition); - if(!pCounterDefinition) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Cannot read counter definition No %u (out of %u)", - c, pObjectType->NumCounters); - break; - } - - RAW_DATA sample = { - .CounterType = pCounterDefinition->CounterType, - }; - if(getCounterData(pDataBlock, pObjectType, pCounterDefinition, pCounterBlock, &sample)) { - // DisplayCalculatedValue(&sample, &sample); - - if(instanceCounterCb) { - instanceCounterCb(pDataBlock, pObjectType, pInstance, pCounterDefinition, &sample, data); - counters++; - } - } - } - - if(instanceCb) - instanceCb(pDataBlock, pObjectType, NULL, data); - } - } - else { - PERF_COUNTER_BLOCK *pCounterBlock = getObjectTypeCounterBlock(pDataBlock, pObjectType); - PERF_COUNTER_DEFINITION *pCounterDefinition = NULL; - for(DWORD c = 0; c < pObjectType->NumCounters ;c++) { - pCounterDefinition = getCounterDefinition(pDataBlock, pObjectType, pCounterDefinition); - if(!pCounterDefinition) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "WINDOWS: PERFLIB: Cannot read counter definition No %u (out of %u)", - c, pObjectType->NumCounters); - break; - } - - RAW_DATA sample = { - .CounterType = pCounterDefinition->CounterType, - }; - if(getCounterData(pDataBlock, pObjectType, pCounterDefinition, pCounterBlock, &sample)) { - // DisplayCalculatedValue(&sample, &sample); - - if(counterCb) { - counterCb(pDataBlock, pObjectType, pCounterDefinition, &sample, data); - counters++; - } - } - } - } - - if(objectCb) - objectCb(pDataBlock, NULL, data); - } - -cleanup: - return counters; -} diff --git a/src/collectors/windows.plugin/perflib.h b/src/collectors/windows.plugin/perflib.h deleted file mode 100644 index deba4e9a3..000000000 --- a/src/collectors/windows.plugin/perflib.h +++ /dev/null @@ -1,72 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_PERFLIB_H -#define NETDATA_PERFLIB_H - -#include "libnetdata/libnetdata.h" -#include - -const char *RegistryFindNameByID(DWORD id); -const char *RegistryFindHelpByID(DWORD id); -DWORD RegistryFindIDByName(const char *name); -#define PERFLIB_REGISTRY_NAME_NOT_FOUND (DWORD)-1 - -PERF_DATA_BLOCK *perflibGetPerformanceData(DWORD id); -void perflibFreePerformanceData(void); -PERF_OBJECT_TYPE *perflibFindObjectTypeByName(PERF_DATA_BLOCK *pDataBlock, const char *name); -PERF_INSTANCE_DEFINITION *perflibForEachInstance(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *lastInstance); - -typedef struct _rawdata { - DWORD CounterType; - DWORD MultiCounterData; // Second raw counter value for multi-valued counters - ULONGLONG Data; // Raw counter data - LONGLONG Time; // Is a time value or a base value - LONGLONG Frequency; -} RAW_DATA, *PRAW_DATA; - -typedef struct _counterdata { - DWORD id; - bool updated; - const char *key; - DWORD OverwriteCounterType; // if set, the counter type will be overwritten once read - RAW_DATA current; - RAW_DATA previous; -} COUNTER_DATA; - -#define RAW_DATA_EMPTY (RAW_DATA){ 0 } - -bool perflibGetInstanceCounter(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, COUNTER_DATA *cd); -bool perflibGetObjectCounter(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, COUNTER_DATA *cd); - -typedef bool (*perflib_data_cb)(PERF_DATA_BLOCK *pDataBlock, void *data); -typedef bool (*perflib_object_cb)(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, void *data); -typedef bool (*perflib_instance_cb)(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, void *data); -typedef bool (*perflib_instance_counter_cb)(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, PERF_COUNTER_DEFINITION *pCounter, RAW_DATA *sample, void *data); -typedef bool (*perflib_counter_cb)(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_COUNTER_DEFINITION *pCounter, RAW_DATA *sample, void *data); - -int perflibQueryAndTraverse(DWORD id, - perflib_data_cb dataCb, - perflib_object_cb objectCb, - perflib_instance_cb instanceCb, - perflib_instance_counter_cb instanceCounterCb, - perflib_counter_cb counterCb, - void *data); - -bool ObjectTypeHasInstances(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType); - -BOOL getInstanceName(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, - char *buffer, size_t bufferLen); - -BOOL getSystemName(PERF_DATA_BLOCK *pDataBlock, char *buffer, size_t bufferLen); - -PERF_OBJECT_TYPE *getObjectTypeByIndex(PERF_DATA_BLOCK *pDataBlock, DWORD ObjectNameTitleIndex); - -PERF_INSTANCE_DEFINITION *getInstanceByPosition( - PERF_DATA_BLOCK *pDataBlock, - PERF_OBJECT_TYPE *pObjectType, - DWORD instancePosition); - -void PerflibNamesRegistryInitialize(void); -void PerflibNamesRegistryUpdate(void); - -#endif //NETDATA_PERFLIB_H diff --git a/src/collectors/windows.plugin/windows-internals.h b/src/collectors/windows.plugin/windows-internals.h index 1b7cccc73..70d44b902 100644 --- a/src/collectors/windows.plugin/windows-internals.h +++ b/src/collectors/windows.plugin/windows-internals.h @@ -1,18 +1,17 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_WINDOWS_INTERNALS_H -#define NETDATA_WINDOWS_INTERNALS_H - -#include - -static inline ULONGLONG FileTimeToULL(FILETIME ft) { - ULARGE_INTEGER ul; - ul.LowPart = ft.dwLowDateTime; - ul.HighPart = ft.dwHighDateTime; - return ul.QuadPart; -} - -#include "perflib.h" -#include "perflib-rrd.h" - -#endif //NETDATA_WINDOWS_INTERNALS_H +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WINDOWS_INTERNALS_H +#define NETDATA_WINDOWS_INTERNALS_H + +#include "libnetdata/libnetdata.h" + +static inline ULONGLONG FileTimeToULL(FILETIME ft) { + ULARGE_INTEGER ul; + ul.LowPart = ft.dwLowDateTime; + ul.HighPart = ft.dwHighDateTime; + return ul.QuadPart; +} + +#include "perflib-rrd.h" + +#endif //NETDATA_WINDOWS_INTERNALS_H diff --git a/src/collectors/windows.plugin/windows_plugin.c b/src/collectors/windows.plugin/windows_plugin.c index 35ef857be..74b72e0ce 100644 --- a/src/collectors/windows.plugin/windows_plugin.c +++ b/src/collectors/windows.plugin/windows_plugin.c @@ -13,18 +13,26 @@ static struct proc_module { } win_modules[] = { // system metrics - {.name = "GetSystemUptime", .dim = "GetSystemUptime", .func = do_GetSystemUptime}, - {.name = "GetSystemRAM", .dim = "GetSystemRAM", .func = do_GetSystemRAM}, + {.name = "GetSystemUptime", .dim = "GetSystemUptime", .enabled = CONFIG_BOOLEAN_YES, .func = do_GetSystemUptime}, + {.name = "GetSystemRAM", .dim = "GetSystemRAM", .enabled = CONFIG_BOOLEAN_YES, .func = do_GetSystemRAM}, // the same is provided by PerflibProcessor, with more detailed analysis - //{.name = "GetSystemCPU", .dim = "GetSystemCPU", .func = do_GetSystemCPU}, + //{.name = "GetSystemCPU", .dim = "GetSystemCPU", .enabled = CONFIG_BOOLEAN_YES, .func = do_GetSystemCPU}, - {.name = "PerflibProcesses", .dim = "PerflibProcesses", .func = do_PerflibProcesses}, - {.name = "PerflibProcessor", .dim = "PerflibProcessor", .func = do_PerflibProcessor}, - {.name = "PerflibMemory", .dim = "PerflibMemory", .func = do_PerflibMemory}, - {.name = "PerflibStorage", .dim = "PerflibStorage", .func = do_PerflibStorage}, - {.name = "PerflibNetwork", .dim = "PerflibNetwork", .func = do_PerflibNetwork}, - {.name = "PerflibObjects", .dim = "PerflibObjects", .func = do_PerflibObjects}, + {.name = "PerflibProcesses", .dim = "PerflibProcesses", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibProcesses}, + {.name = "PerflibProcessor", .dim = "PerflibProcessor", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibProcessor}, + {.name = "PerflibMemory", .dim = "PerflibMemory", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibMemory}, + {.name = "PerflibStorage", .dim = "PerflibStorage", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibStorage}, + {.name = "PerflibNetwork", .dim = "PerflibNetwork", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibNetwork}, + {.name = "PerflibObjects", .dim = "PerflibObjects", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibObjects}, + {.name = "PerflibHyperV", .dim = "PerflibHyperV", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibHyperV}, + + {.name = "PerflibThermalZone", .dim = "PerflibThermalZone", .enabled = CONFIG_BOOLEAN_NO, .func = do_PerflibThermalZone}, + + {.name = "PerflibWebService", .dim = "PerflibWebService", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibWebService}, + {.name = "PerflibMSSQL", .dim = "PerflibMSSQL", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibMSSQL}, + + {.name = "PerflibNetFramework", .dim = "PerflibNetFramework", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibNetFramework}, // the terminator of this array {.name = NULL, .dim = NULL, .func = NULL} @@ -66,15 +74,14 @@ void *win_plugin_main(void *ptr) { for(i = 0; win_modules[i].name; i++) { struct proc_module *pm = &win_modules[i]; - pm->enabled = config_get_boolean("plugin:windows", pm->name, CONFIG_BOOLEAN_YES); + pm->enabled = config_get_boolean("plugin:windows", pm->name, pm->enabled); pm->rd = NULL; worker_register_job_name(i, win_modules[i].dim); } - usec_t step = localhost->rrd_update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, localhost->rrd_update_every * USEC_PER_SEC); #define LGS_MODULE_ID 0 @@ -86,7 +93,7 @@ void *win_plugin_main(void *ptr) { while(service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - usec_t hb_dt = heartbeat_next(&hb, step); + usec_t hb_dt = heartbeat_next(&hb); if(unlikely(!service_running(SERVICE_COLLECTORS))) break; diff --git a/src/collectors/windows.plugin/windows_plugin.h b/src/collectors/windows.plugin/windows_plugin.h index 73c1ecda1..3852653ed 100644 --- a/src/collectors/windows.plugin/windows_plugin.h +++ b/src/collectors/windows.plugin/windows_plugin.h @@ -24,7 +24,82 @@ int do_PerflibProcesses(int update_every, usec_t dt); int do_PerflibProcessor(int update_every, usec_t dt); int do_PerflibMemory(int update_every, usec_t dt); int do_PerflibObjects(int update_every, usec_t dt); +int do_PerflibThermalZone(int update_every, usec_t dt); +int do_PerflibWebService(int update_every, usec_t dt); +int do_PerflibMSSQL(int update_every, usec_t dt); +int do_PerflibNetFramework(int update_every, usec_t dt); -#include "perflib.h" +enum PERFLIB_PRIO { + PRIO_WEBSITE_IIS_REQUESTS_RATE = 21000, // PRIO selected, because APPS is using 20YYY + PRIO_WEBSITE_IIS_REQUESTS_BY_TYPE_RATE, + PRIO_WEBSITE_IIS_TRAFFIC, + PRIO_WEBSITE_IIS_FTP_FILE_TRANSFER_RATE, + PRIO_WEBSITE_IIS_ACTIVE_CONNECTIONS_COUNT, + PRIO_WEBSITE_IIS_CONNECTIONS_ATTEMP, + PRIO_WEBSITE_IIS_USERS, + PRIO_WEBSITE_IIS_ISAPI_EXT_REQUEST_COUNT, + PRIO_WEBSITE_IIS_ISAPI_EXT_REQUEST_RATE, + PRIO_WEBSITE_IIS_ERRORS_RATE, + PRIO_WEBSITE_IIS_LOGON_ATTEMPTS, + PRIO_WEBSITE_IIS_UPTIME, + + PRIO_MSSQL_USER_CONNECTIONS, + + PRIO_MSSQL_DATABASE_TRANSACTIONS, + PRIO_MSSQL_DATABASE_ACTIVE_TRANSACTIONS, + PRIO_MSSQL_DATABASE_WRITE_TRANSACTIONS, + PRIO_MSSQL_DATABASE_BACKUP_RESTORE_OPERATIONS, + PRIO_MSSQL_DATABASE_LOG_FLUSHES, + PRIO_MSSQL_DATABASE_LOG_FLUSHED, + + PRIO_MSSQL_DATABASE_DATA_FILE_SIZE, + + PRIO_MSSQL_STATS_BATCH_REQUEST, + PRIO_MSSQL_STATS_COMPILATIONS, + PRIO_MSSQL_STATS_RECOMPILATIONS, + PRIO_MSSQL_STATS_AUTO_PARAMETRIZATION, + PRIO_MSSQL_STATS_SAFE_AUTO_PARAMETRIZATION, + + PRIO_MSSQL_BLOCKED_PROCESSES, + + PRIO_MSSQL_BUFF_CACHE_HIT_RATIO, + PRIO_MSSQL_BUFF_MAN_IOPS, + PRIO_MSSQL_BUFF_CHECKPOINT_PAGES, + PRIO_MSSQL_BUFF_METHODS_PAGE_SPLIT, + PRIO_MSSQL_BUFF_PAGE_LIFE_EXPECTANCY, + + PRIO_MSSQL_MEMMGR_CONNECTION_MEMORY_BYTES, + PRIO_MSSQL_MEMMGR_TOTAL_SERVER, + PRIO_MSSQL_MEMMGR_EXTERNAL_BENEFIT_OF_MEMORY, + PRIO_MSSQL_MEMMGR_PENDING_MEMORY_GRANTS, + + PRIO_MSSQL_LOCKS_WAIT, + PRIO_MSSQL_LOCKS_DEADLOCK, + + PRIO_MSSQL_SQL_ERRORS, + + PRIO_NETFRAMEWORK_CLR_EXCEPTION_THROWN, + PRIO_NETFRAMEWORK_CLR_EXCEPTION_FILTERS, + PRIO_NETFRAMEWORK_CLR_EXCEPTION_FINALLYS, + PRIO_NETFRAMEWORK_CLR_EXCEPTION_THROW_TO_CATCH_DEPTH, + + PRIO_NETFRAMEWORK_CLR_INTEROP_CCW, + PRIO_NETFRAMEWORK_CLR_INTEROP_MARSHALLING, + PRIO_NETFRAMEWORK_CLR_INTEROP_STUBS_CREATED, + + PRIO_NETFRAMEWORK_CLR_JIT_METHODS, + PRIO_NETFRAMEWORK_CLR_JIT_TIME, + PRIO_NETFRAMEWORK_CLR_JIT_STANDARD_FAILURES, + PRIO_NETFRAMEWORK_CLR_JIT_IL_BYTES, + + PRIO_NETFRAMEWORK_CLR_LOADING_HEAP_SIZE, + PRIO_NETFRAMEWORK_CLR_LOADING_APP_DOMAINS_LOADED, + PRIO_NETFRAMEWORK_CLR_LOADING_APP_DOMAINS_UNLOADED, + PRIO_NETFRAMEWORK_CLR_LOADING_ASSEMBLIES_LOADED, + PRIO_NETFRAMEWORK_CLR_LOADING_CLASSES_LOADED, + PRIO_NETFRAMEWORK_CLR_LOADING_CLASS_LOAD_FAILURE +}; + +int do_PerflibHyperV(int update_every, usec_t dt); #endif //NETDATA_WINDOWS_PLUGIN_H diff --git a/src/collectors/xenstat.plugin/integrations/xen_xcp-ng.md b/src/collectors/xenstat.plugin/integrations/xen_xcp-ng.md index 2aed4a06e..cd356202d 100644 --- a/src/collectors/xenstat.plugin/integrations/xen_xcp-ng.md +++ b/src/collectors/xenstat.plugin/integrations/xen_xcp-ng.md @@ -151,8 +151,8 @@ The file format is a modified INI syntax. The general structure is: [section2] option3 = some third value ``` -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/collectors/xenstat.plugin/xenstat_plugin.c b/src/collectors/xenstat.plugin/xenstat_plugin.c index e4b8a2bd0..63592b6fd 100644 --- a/src/collectors/xenstat.plugin/xenstat_plugin.c +++ b/src/collectors/xenstat.plugin/xenstat_plugin.c @@ -920,8 +920,6 @@ static void xenstat_send_domain_metrics() { } int main(int argc, char **argv) { - clocks_init(); - // ------------------------------------------------------------------------ // initialization of netdata plugin @@ -1022,12 +1020,11 @@ int main(int argc, char **argv) { time_t started_t = now_monotonic_sec(); size_t iteration; - usec_t step = netdata_update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, netdata_update_every * USEC_PER_SEC); for(iteration = 0; 1; iteration++) { - usec_t dt = heartbeat_next(&hb, step); + usec_t dt = heartbeat_next(&hb); if(unlikely(netdata_exit)) break; diff --git a/src/daemon/README.md b/src/daemon/README.md index bc2ec7757..da70f41e3 100644 --- a/src/daemon/README.md +++ b/src/daemon/README.md @@ -1,8 +1,8 @@ # Netdata daemon -The Netdata daemon is practically a synonym for the Netdata Agent, as it controls its -entire operation. We support various methods to -[start, stop, or restart the daemon](/packaging/installer/README.md#maintaining-a-netdata-agent-installation). +The Netdata daemon is practically a synonym for the Netdata Agent, as it controls its +entire operation. We support various methods to +[start, stop, or restart the daemon](/docs/netdata-agent/start-stop-restart.md). This document provides some basic information on the command line options, log files, and how to debug and troubleshoot @@ -104,9 +104,6 @@ The command line options of the Netdata 1.10.0 version are the following: -W simple-pattern pattern string Check if string matches pattern and exit. - -W "claim -token=TOKEN -rooms=ROOM1,ROOM2 url=https://app.netdata.cloud" - Connect the agent to the workspace Rooms pointed to by TOKEN and ROOM*. - Signals netdata handles: - HUP Close and reopen log files. @@ -119,10 +116,10 @@ You can send commands during runtime via [netdatacli](/src/cli/README.md). Netdata uses 4 log files: -1. `error.log` -2. `collector.log` -3. `access.log` -4. `debug.log` +1. `error.log` +2. `collector.log` +3. `access.log` +4. `debug.log` Any of them can be disabled by setting it to `/dev/null` or `none` in `netdata.conf`. By default `error.log`, `collector.log`, and `access.log` are enabled. `debug.log` is only enabled if debugging/tracing is also enabled @@ -136,8 +133,8 @@ The `error.log` is the `stderr` of the `netdata` daemon . For most Netdata programs (including standard external plugins shipped by netdata), the following lines may appear: -| tag | description | -|:-:|:----------| +| tag | description | +|:-------:|:--------------------------------------------------------------------------------------------------------------------------| | `INFO` | Something important the user should know. | | `ERROR` | Something that might disable a part of netdata.
The log line includes `errno` (if it is not zero). | | `FATAL` | Something prevented a program from running.
The log line includes `errno` (if it is not zero) and the program exited. | @@ -163,21 +160,21 @@ Data stored inside this file follows pattern already described for `error.log`. The `access.log` logs web requests. The format is: -```txt +```text DATE: ID: (sent/all = SENT_BYTES/ALL_BYTES bytes PERCENT_COMPRESSION%, prep/sent/total PREP_TIME/SENT_TIME/TOTAL_TIME ms): ACTION CODE URL ``` where: -- `ID` is the client ID. Client IDs are auto-incremented every time a client connects to netdata. -- `SENT_BYTES` is the number of bytes sent to the client, without the HTTP response header. -- `ALL_BYTES` is the number of bytes of the response, before compression. -- `PERCENT_COMPRESSION` is the percentage of traffic saved due to compression. -- `PREP_TIME` is the time in milliseconds needed to prepared the response. -- `SENT_TIME` is the time in milliseconds needed to sent the response to the client. -- `TOTAL_TIME` is the total time the request was inside Netdata (from the first byte of the request to the last byte +- `ID` is the client ID. Client IDs are auto-incremented every time a client connects to netdata. +- `SENT_BYTES` is the number of bytes sent to the client, without the HTTP response header. +- `ALL_BYTES` is the number of bytes of the response, before compression. +- `PERCENT_COMPRESSION` is the percentage of traffic saved due to compression. +- `PREP_TIME` is the time in milliseconds needed to prepared the response. +- `SENT_TIME` is the time in milliseconds needed to sent the response to the client. +- `TOTAL_TIME` is the total time the request was inside Netdata (from the first byte of the request to the last byte of the response). -- `ACTION` can be `filecopy`, `options` (used in CORS), `data` (API call). +- `ACTION` can be `filecopy`, `options` (used in CORS), `data` (API call). ### debug.log @@ -194,20 +191,20 @@ issues with gaps in charts on busy systems while still keeping the impact on the You can set Netdata scheduling policy in `netdata.conf`, like this: -```conf +```text [global] process scheduling policy = idle ``` You can use the following: -| policy | description | -| :-----------------------: | :---------- | -| `idle` | use CPU only when there is spare - this is lower than nice 19 - it is the default for Netdata and it is so low that Netdata will run in "slow motion" under extreme system load, resulting in short (1-2 seconds) gaps at the charts. | +| policy | description | +|:-------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `idle` | use CPU only when there is spare - this is lower than nice 19 - it is the default for Netdata and it is so low that Netdata will run in "slow motion" under extreme system load, resulting in short (1-2 seconds) gaps at the charts. | | `other`
or
`nice` | this is the default policy for all processes under Linux. It provides dynamic priorities based on the `nice` level of each process. Check below for setting this `nice` level for netdata. | -| `batch` | This policy is similar to `other` in that it schedules the thread according to its dynamic priority (based on the `nice` value). The difference is that this policy will cause the scheduler to always assume that the thread is CPU-intensive. Consequently, the scheduler will apply a small scheduling penalty with respect to wake-up behavior, so that this thread is mildly disfavored in scheduling decisions. | -| `fifo` | `fifo` can be used only with static priorities higher than 0, which means that when a `fifo` threads becomes runnable, it will always immediately preempt any currently running `other`, `batch`, or `idle` thread. `fifo` is a simple scheduling algorithm without time slicing. | -| `rr` | a simple enhancement of `fifo`. Everything described above for `fifo` also applies to `rr`, except that each thread is allowed to run only for a maximum time quantum. | +| `batch` | This policy is similar to `other` in that it schedules the thread according to its dynamic priority (based on the `nice` value). The difference is that this policy will cause the scheduler to always assume that the thread is CPU-intensive. Consequently, the scheduler will apply a small scheduling penalty with respect to wake-up behavior, so that this thread is mildly disfavored in scheduling decisions. | +| `fifo` | `fifo` can be used only with static priorities higher than 0, which means that when a `fifo` threads becomes runnable, it will always immediately preempt any currently running `other`, `batch`, or `idle` thread. `fifo` is a simple scheduling algorithm without time slicing. | +| `rr` | a simple enhancement of `fifo`. Everything described above for `fifo` also applies to `rr`, except that each thread is allowed to run only for a maximum time quantum. | | `keep`
or
`none` | do not set scheduling policy, priority or nice level - i.e. keep running with whatever it is set already (e.g. by systemd). | For more information see `man sched`. @@ -216,7 +213,7 @@ For more information see `man sched`. Once the policy is set to one of `rr` or `fifo`, the following will appear: -```conf +```text [global] process scheduling priority = 0 ``` @@ -228,7 +225,7 @@ important. When the policy is set to `other`, `nice`, or `batch`, the following will appear: -```conf +```text [global] process nice level = 19 ``` @@ -262,7 +259,7 @@ Run `systemctl daemon-reload` to reload these changes. Now, tell Netdata to keep these settings, as set by systemd, by editing `netdata.conf` and setting: -```conf +```text [global] process scheduling policy = keep ``` @@ -275,24 +272,20 @@ will be maintained by netdata. On a system that is not based on systemd, to make Netdata run with nice level -1 (a little bit higher to the default for all programs), edit `netdata.conf` and set: -```conf +```text [global] process scheduling policy = other process nice level = -1 ``` -then execute this to [restart Netdata](/packaging/installer/README.md#maintaining-a-netdata-agent-installation): - -```sh -sudo systemctl restart netdata -``` +then [restart Netdata](/docs/netdata-agent/start-stop-restart.md): #### Example 2: Netdata with nice -1 on systemd systems On a system that is based on systemd, to make Netdata run with nice level -1 (a little bit higher to the default for all programs), edit `netdata.conf` and set: -```conf +```text [global] process scheduling policy = keep ``` @@ -335,7 +328,7 @@ will roughly get the number of threads running. The system does this for speed. Having a separate memory arena for each thread, allows the threads to run in parallel in multi-core systems, without any locks between them. -This behaviour is system specific. For example, the chart above when running +This behavior is system specific. For example, the chart above when running Netdata on Alpine Linux (that uses **musl** instead of **glibc**) is this: ![image](https://cloud.githubusercontent.com/assets/2662304/19013807/7cf5878e-87e4-11e6-9651-082e68701eab.png) @@ -367,9 +360,9 @@ accounts the whole pages, even if parts of them are actually used). When you compile Netdata with debugging: -1. compiler optimizations for your CPU are disabled (Netdata will run somewhat slower) +1. compiler optimizations for your CPU are disabled (Netdata will run somewhat slower) -2. a lot of code is added all over netdata, to log debug messages to `/var/log/netdata/debug.log`. However, nothing is +2. a lot of code is added all over netdata, to log debug messages to `/var/log/netdata/debug.log`. However, nothing is printed by default. Netdata allows you to select which sections of Netdata you want to trace. Tracing is activated via the config option `debug flags`. It accepts a hex number, to enable or disable specific sections. You can find the options supported at [log.h](https://raw.githubusercontent.com/netdata/netdata/master/src/libnetdata/log/log.h). @@ -407,9 +400,9 @@ To provide stack traces, **you need to have Netdata compiled with debugging**. T Then you need to be in one of the following 2 cases: -1. Netdata crashes and you have a core dump +1. Netdata crashes and you have a core dump -2. you can reproduce the crash +2. you can reproduce the crash If you are not on these cases, you need to find a way to be (i.e. if your system does not produce core dumps, check your distro documentation to enable them). diff --git a/src/daemon/analytics.c b/src/daemon/analytics.c index 0e5c221c4..cebfdeb70 100644 --- a/src/daemon/analytics.c +++ b/src/daemon/analytics.c @@ -334,7 +334,7 @@ void analytics_alarms_notifications(void) if (instance) { char line[200 + 1]; - while (fgets(line, 200, instance->child_stdout_fp) != NULL) { + while (fgets(line, 200, spawn_popen_stdout(instance)) != NULL) { char *end = line; while (*end && *end != '\n') end++; @@ -375,7 +375,6 @@ static void analytics_get_install_type(struct rrdhost_system_info *system_info) void analytics_https(void) { BUFFER *b = buffer_create(30, NULL); -#ifdef ENABLE_HTTPS analytics_exporting_connectors_ssl(b); buffer_strcat(b, netdata_ssl_streaming_sender_ctx && @@ -383,9 +382,6 @@ void analytics_https(void) SSL_connection(&localhost->sender->ssl) ? "streaming|" : "|"); buffer_strcat(b, netdata_ssl_web_server_ctx ? "web" : ""); -#else - buffer_strcat(b, "||"); -#endif analytics_set_data_str(&analytics_data.netdata_config_https_available, (char *)buffer_tostring(b)); buffer_free(b); @@ -468,13 +464,8 @@ void analytics_alarms(void) */ void analytics_misc(void) { -#ifdef ENABLE_ACLK analytics_set_data(&analytics_data.netdata_host_cloud_available, "true"); analytics_set_data_str(&analytics_data.netdata_host_aclk_implementation, "Next Generation"); -#else - analytics_set_data(&analytics_data.netdata_host_cloud_available, "false"); - analytics_set_data_str(&analytics_data.netdata_host_aclk_implementation, ""); -#endif analytics_data.exporting_enabled = appconfig_get_boolean(&exporting_config, CONFIG_SECTION_EXPORTING, "enabled", CONFIG_BOOLEAN_NO); analytics_set_data(&analytics_data.netdata_config_exporting_enabled, analytics_data.exporting_enabled ? "true" : "false"); @@ -495,13 +486,11 @@ void analytics_misc(void) void analytics_aclk(void) { -#ifdef ENABLE_ACLK - if (aclk_connected) { + if (aclk_online()) { analytics_set_data(&analytics_data.netdata_host_aclk_available, "true"); analytics_set_data_str(&analytics_data.netdata_host_aclk_protocol, "New"); } else -#endif analytics_set_data(&analytics_data.netdata_host_aclk_available, "false"); } @@ -533,11 +522,9 @@ void analytics_gather_mutable_meta_data(void) analytics_alarms_notifications(); analytics_set_data( - &analytics_data.netdata_config_is_parent, (rrdhost_hosts_available() > 1 || configured_as_parent()) ? "true" : "false"); + &analytics_data.netdata_config_is_parent, (rrdhost_hosts_available() > 1 || stream_conf_configured_as_parent()) ? "true" : "false"); - char *claim_id = get_agent_claimid(); - analytics_set_data(&analytics_data.netdata_host_agent_claimed, claim_id ? "true" : "false"); - freez(claim_id); + analytics_set_data(&analytics_data.netdata_host_agent_claimed, is_agent_claimed() ? "true" : "false"); { char b[21]; @@ -582,14 +569,13 @@ void *analytics_main(void *ptr) CLEANUP_FUNCTION_REGISTER(analytics_main_cleanup) cleanup_ptr = ptr; unsigned int sec = 0; heartbeat_t hb; - heartbeat_init(&hb); - usec_t step_ut = USEC_PER_SEC; + heartbeat_init(&hb, USEC_PER_SEC); netdata_log_debug(D_ANALYTICS, "Analytics thread starts"); - //first delay after agent start + // first delay after agent start while (service_running(SERVICE_ANALYTICS) && likely(sec <= ANALYTICS_INIT_SLEEP_SEC)) { - heartbeat_next(&hb, step_ut); + heartbeat_next(&hb); sec++; } @@ -605,8 +591,8 @@ void *analytics_main(void *ptr) sec = 0; while (1) { - heartbeat_next(&hb, step_ut * 2); - sec += 2; + heartbeat_next(&hb); + sec++; if (unlikely(!service_running(SERVICE_ANALYTICS))) break; @@ -627,46 +613,15 @@ cleanup: return NULL; } -static const char *verify_required_directory(const char *dir) -{ - if (chdir(dir) == -1) - fatal("Cannot change directory to '%s'", dir); - - DIR *d = opendir(dir); - if (!d) - fatal("Cannot examine the contents of directory '%s'", dir); - closedir(d); - - return dir; -} - -static const char *verify_or_create_required_directory(const char *dir) { - int result; - - result = mkdir(dir, 0755); - - if (result != 0 && errno != EEXIST) - fatal("Cannot create required directory '%s'", dir); - - return verify_required_directory(dir); -} - /* * This is called after the rrdinit * These values will be sent on the START event */ -void set_late_global_environment(struct rrdhost_system_info *system_info) +void set_late_analytics_variables(struct rrdhost_system_info *system_info) { - analytics_set_data(&analytics_data.netdata_config_stream_enabled, default_rrdpush_enabled ? "true" : "false"); + analytics_set_data(&analytics_data.netdata_config_stream_enabled, stream_conf_send_enabled ? "true" : "false"); analytics_set_data_str(&analytics_data.netdata_config_memory_mode, (char *)rrd_memory_mode_name(default_rrd_memory_mode)); - -#ifdef DISABLE_CLOUD - analytics_set_data(&analytics_data.netdata_host_cloud_enabled, "false"); -#else - analytics_set_data( - &analytics_data.netdata_host_cloud_enabled, - appconfig_get_boolean_ondemand(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", netdata_cloud_enabled) ? "true" : "false"); -#endif + analytics_set_data(&analytics_data.netdata_host_cloud_enabled, "true"); #ifdef ENABLE_DBENGINE { @@ -679,11 +634,7 @@ void set_late_global_environment(struct rrdhost_system_info *system_info) } #endif -#ifdef ENABLE_HTTPS analytics_set_data(&analytics_data.netdata_config_https_enabled, "true"); -#else - analytics_set_data(&analytics_data.netdata_config_https_enabled, "false"); -#endif if (web_server_mode == WEB_SERVER_MODE_NONE) analytics_set_data(&analytics_data.netdata_config_web_enabled, "false"); @@ -831,119 +782,6 @@ void get_system_timezone(void) } } -void set_global_environment() { - { - char b[16]; - snprintfz(b, sizeof(b) - 1, "%d", default_rrd_update_every); - setenv("NETDATA_UPDATE_EVERY", b, 1); - } - - setenv("NETDATA_VERSION", NETDATA_VERSION, 1); - setenv("NETDATA_HOSTNAME", netdata_configured_hostname, 1); - setenv("NETDATA_CONFIG_DIR", verify_required_directory(netdata_configured_user_config_dir), 1); - setenv("NETDATA_USER_CONFIG_DIR", verify_required_directory(netdata_configured_user_config_dir), 1); - setenv("NETDATA_STOCK_CONFIG_DIR", verify_required_directory(netdata_configured_stock_config_dir), 1); - setenv("NETDATA_PLUGINS_DIR", verify_required_directory(netdata_configured_primary_plugins_dir), 1); - setenv("NETDATA_WEB_DIR", verify_required_directory(netdata_configured_web_dir), 1); - setenv("NETDATA_CACHE_DIR", verify_or_create_required_directory(netdata_configured_cache_dir), 1); - setenv("NETDATA_LIB_DIR", verify_or_create_required_directory(netdata_configured_varlib_dir), 1); - setenv("NETDATA_LOCK_DIR", verify_or_create_required_directory(netdata_configured_lock_dir), 1); - setenv("NETDATA_LOG_DIR", verify_or_create_required_directory(netdata_configured_log_dir), 1); - setenv("NETDATA_HOST_PREFIX", netdata_configured_host_prefix, 1); - - { - BUFFER *user_plugins_dirs = buffer_create(FILENAME_MAX, NULL); - - for (size_t i = 1; i < PLUGINSD_MAX_DIRECTORIES && plugin_directories[i]; i++) { - if (i > 1) - buffer_strcat(user_plugins_dirs, " "); - buffer_strcat(user_plugins_dirs, plugin_directories[i]); - } - - setenv("NETDATA_USER_PLUGINS_DIRS", buffer_tostring(user_plugins_dirs), 1); - - buffer_free(user_plugins_dirs); - } - - analytics_data.data_length = 0; - analytics_set_data(&analytics_data.netdata_config_stream_enabled, "null"); - analytics_set_data(&analytics_data.netdata_config_memory_mode, "null"); - analytics_set_data(&analytics_data.netdata_config_exporting_enabled, "null"); - analytics_set_data(&analytics_data.netdata_exporting_connectors, "null"); - analytics_set_data(&analytics_data.netdata_allmetrics_prometheus_used, "null"); - analytics_set_data(&analytics_data.netdata_allmetrics_shell_used, "null"); - analytics_set_data(&analytics_data.netdata_allmetrics_json_used, "null"); - analytics_set_data(&analytics_data.netdata_dashboard_used, "null"); - analytics_set_data(&analytics_data.netdata_collectors, "null"); - analytics_set_data(&analytics_data.netdata_collectors_count, "null"); - analytics_set_data(&analytics_data.netdata_buildinfo, "null"); - analytics_set_data(&analytics_data.netdata_config_page_cache_size, "null"); - analytics_set_data(&analytics_data.netdata_config_multidb_disk_quota, "null"); - analytics_set_data(&analytics_data.netdata_config_https_enabled, "null"); - analytics_set_data(&analytics_data.netdata_config_web_enabled, "null"); - analytics_set_data(&analytics_data.netdata_config_release_channel, "null"); - analytics_set_data(&analytics_data.netdata_mirrored_host_count, "null"); - analytics_set_data(&analytics_data.netdata_mirrored_hosts_reachable, "null"); - analytics_set_data(&analytics_data.netdata_mirrored_hosts_unreachable, "null"); - analytics_set_data(&analytics_data.netdata_notification_methods, "null"); - analytics_set_data(&analytics_data.netdata_alarms_normal, "null"); - analytics_set_data(&analytics_data.netdata_alarms_warning, "null"); - analytics_set_data(&analytics_data.netdata_alarms_critical, "null"); - analytics_set_data(&analytics_data.netdata_charts_count, "null"); - analytics_set_data(&analytics_data.netdata_metrics_count, "null"); - analytics_set_data(&analytics_data.netdata_config_is_parent, "null"); - analytics_set_data(&analytics_data.netdata_config_hosts_available, "null"); - analytics_set_data(&analytics_data.netdata_host_cloud_available, "null"); - analytics_set_data(&analytics_data.netdata_host_aclk_implementation, "null"); - analytics_set_data(&analytics_data.netdata_host_aclk_available, "null"); - analytics_set_data(&analytics_data.netdata_host_aclk_protocol, "null"); - analytics_set_data(&analytics_data.netdata_host_agent_claimed, "null"); - analytics_set_data(&analytics_data.netdata_host_cloud_enabled, "null"); - analytics_set_data(&analytics_data.netdata_config_https_available, "null"); - analytics_set_data(&analytics_data.netdata_install_type, "null"); - analytics_set_data(&analytics_data.netdata_config_is_private_registry, "null"); - analytics_set_data(&analytics_data.netdata_config_use_private_registry, "null"); - analytics_set_data(&analytics_data.netdata_config_oom_score, "null"); - analytics_set_data(&analytics_data.netdata_prebuilt_distro, "null"); - analytics_set_data(&analytics_data.netdata_fail_reason, "null"); - - analytics_data.prometheus_hits = 0; - analytics_data.shell_hits = 0; - analytics_data.json_hits = 0; - analytics_data.dashboard_hits = 0; - analytics_data.charts_count = 0; - analytics_data.metrics_count = 0; - analytics_data.exporting_enabled = false; - - char *default_port = appconfig_get(&netdata_config, CONFIG_SECTION_WEB, "default port", NULL); - int clean = 0; - if (!default_port) { - default_port = strdupz("19999"); - clean = 1; - } - - setenv("NETDATA_LISTEN_PORT", default_port, 1); - if (clean) - freez(default_port); - - // set the path we need - char path[4096], *p = getenv("PATH"); - if (!p) p = "/bin:/usr/bin"; - snprintfz(path, sizeof(path), "%s:%s", p, "/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"); - setenv("PATH", config_get(CONFIG_SECTION_ENV_VARS, "PATH", path), 1); - - // python options - p = getenv("PYTHONPATH"); - if (!p) p = ""; - setenv("PYTHONPATH", config_get(CONFIG_SECTION_ENV_VARS, "PYTHONPATH", p), 1); - - // disable buffering for python plugins - setenv("PYTHONUNBUFFERED", "1", 1); - - // switch to standard locale for plugins - setenv("LC_ALL", "C", 1); -} - void analytics_statistic_send(const analytics_statistic_t *statistic) { if (!statistic) return; @@ -1053,7 +891,7 @@ void analytics_statistic_send(const analytics_statistic_t *statistic) { POPEN_INSTANCE *instance = spawn_popen_run(command_to_run); if (instance) { char buffer[4 + 1]; - char *s = fgets(buffer, 4, instance->child_stdout_fp); + char *s = fgets(buffer, 4, spawn_popen_stdout(instance)); int exit_code = spawn_popen_wait(instance); if (exit_code) @@ -1075,6 +913,58 @@ void analytics_statistic_send(const analytics_statistic_t *statistic) { freez(command_to_run); } +void analytics_reset(void) { + analytics_data.data_length = 0; + analytics_set_data(&analytics_data.netdata_config_stream_enabled, "null"); + analytics_set_data(&analytics_data.netdata_config_memory_mode, "null"); + analytics_set_data(&analytics_data.netdata_config_exporting_enabled, "null"); + analytics_set_data(&analytics_data.netdata_exporting_connectors, "null"); + analytics_set_data(&analytics_data.netdata_allmetrics_prometheus_used, "null"); + analytics_set_data(&analytics_data.netdata_allmetrics_shell_used, "null"); + analytics_set_data(&analytics_data.netdata_allmetrics_json_used, "null"); + analytics_set_data(&analytics_data.netdata_dashboard_used, "null"); + analytics_set_data(&analytics_data.netdata_collectors, "null"); + analytics_set_data(&analytics_data.netdata_collectors_count, "null"); + analytics_set_data(&analytics_data.netdata_buildinfo, "null"); + analytics_set_data(&analytics_data.netdata_config_page_cache_size, "null"); + analytics_set_data(&analytics_data.netdata_config_multidb_disk_quota, "null"); + analytics_set_data(&analytics_data.netdata_config_https_enabled, "null"); + analytics_set_data(&analytics_data.netdata_config_web_enabled, "null"); + analytics_set_data(&analytics_data.netdata_config_release_channel, "null"); + analytics_set_data(&analytics_data.netdata_mirrored_host_count, "null"); + analytics_set_data(&analytics_data.netdata_mirrored_hosts_reachable, "null"); + analytics_set_data(&analytics_data.netdata_mirrored_hosts_unreachable, "null"); + analytics_set_data(&analytics_data.netdata_notification_methods, "null"); + analytics_set_data(&analytics_data.netdata_alarms_normal, "null"); + analytics_set_data(&analytics_data.netdata_alarms_warning, "null"); + analytics_set_data(&analytics_data.netdata_alarms_critical, "null"); + analytics_set_data(&analytics_data.netdata_charts_count, "null"); + analytics_set_data(&analytics_data.netdata_metrics_count, "null"); + analytics_set_data(&analytics_data.netdata_config_is_parent, "null"); + analytics_set_data(&analytics_data.netdata_config_hosts_available, "null"); + analytics_set_data(&analytics_data.netdata_host_cloud_available, "null"); + analytics_set_data(&analytics_data.netdata_host_aclk_implementation, "null"); + analytics_set_data(&analytics_data.netdata_host_aclk_available, "null"); + analytics_set_data(&analytics_data.netdata_host_aclk_protocol, "null"); + analytics_set_data(&analytics_data.netdata_host_agent_claimed, "null"); + analytics_set_data(&analytics_data.netdata_host_cloud_enabled, "null"); + analytics_set_data(&analytics_data.netdata_config_https_available, "null"); + analytics_set_data(&analytics_data.netdata_install_type, "null"); + analytics_set_data(&analytics_data.netdata_config_is_private_registry, "null"); + analytics_set_data(&analytics_data.netdata_config_use_private_registry, "null"); + analytics_set_data(&analytics_data.netdata_config_oom_score, "null"); + analytics_set_data(&analytics_data.netdata_prebuilt_distro, "null"); + analytics_set_data(&analytics_data.netdata_fail_reason, "null"); + + analytics_data.prometheus_hits = 0; + analytics_data.shell_hits = 0; + analytics_data.json_hits = 0; + analytics_data.dashboard_hits = 0; + analytics_data.charts_count = 0; + analytics_data.metrics_count = 0; + analytics_data.exporting_enabled = false; +} + void analytics_init(void) { spinlock_init(&analytics_data.spinlock); diff --git a/src/daemon/analytics.h b/src/daemon/analytics.h index 747cf6070..b1d3c1386 100644 --- a/src/daemon/analytics.h +++ b/src/daemon/analytics.h @@ -76,9 +76,9 @@ struct analytics_data { bool exporting_enabled; }; -void set_late_global_environment(struct rrdhost_system_info *system_info); +struct rrdhost_system_info; +void set_late_analytics_variables(struct rrdhost_system_info *system_info); void analytics_free_data(void); -void set_global_environment(void); void analytics_log_shell(void); void analytics_log_json(void); void analytics_log_prometheus(void); @@ -86,6 +86,7 @@ void analytics_log_dashboard(void); void analytics_gather_mutable_meta_data(void); void analytics_report_oom_score(long long int score); void get_system_timezone(void); +void analytics_reset(void); void analytics_init(void); typedef struct { diff --git a/src/daemon/buildinfo.c b/src/daemon/buildinfo.c index ace96199a..3cbbe9035 100644 --- a/src/daemon/buildinfo.c +++ b/src/daemon/buildinfo.c @@ -1069,18 +1069,8 @@ __attribute__((constructor)) void initialize_build_info(void) { #endif #endif -#ifdef ENABLE_ACLK build_info_set_status(BIB_FEATURE_CLOUD, true); build_info_set_status(BIB_CONNECTIVITY_ACLK, true); -#else - build_info_set_status(BIB_FEATURE_CLOUD, false); -#ifdef DISABLE_CLOUD - build_info_set_value(BIB_FEATURE_CLOUD, "disabled"); -#else - build_info_set_value(BIB_FEATURE_CLOUD, "unavailable"); -#endif -#endif - build_info_set_status(BIB_FEATURE_HEALTH, true); build_info_set_status(BIB_FEATURE_STREAMING, true); build_info_set_status(BIB_FEATURE_BACKFILLING, true); @@ -1126,9 +1116,7 @@ __attribute__((constructor)) void initialize_build_info(void) { #ifdef ENABLE_WEBRTC build_info_set_status(BIB_CONNECTIVITY_WEBRTC, true); #endif -#ifdef ENABLE_HTTPS build_info_set_status(BIB_CONNECTIVITY_NATIVE_HTTPS, true); -#endif #if defined(HAVE_X509_VERIFY_PARAM_set1_host) && HAVE_X509_VERIFY_PARAM_set1_host == 1 build_info_set_status(BIB_CONNECTIVITY_TLS_HOST_VERIFY, true); #endif @@ -1162,9 +1150,7 @@ __attribute__((constructor)) void initialize_build_info(void) { #ifdef HAVE_LIBDATACHANNEL build_info_set_status(BIB_LIB_LIBDATACHANNEL, true); #endif -#ifdef ENABLE_OPENSSL build_info_set_status(BIB_LIB_OPENSSL, true); -#endif #ifdef ENABLE_JSONC build_info_set_status(BIB_LIB_JSONC, true); #endif @@ -1345,7 +1331,8 @@ char *get_value_from_key(char *buffer, char *key) { return s; } -void get_install_type(char **install_type, char **prebuilt_arch, char **prebuilt_dist) { +void get_install_type(char **install_type, char **prebuilt_arch __maybe_unused, char **prebuilt_dist __maybe_unused) { +#ifndef OS_WINDOWS char *install_type_filename; int install_type_filename_len = (strlen(netdata_configured_user_config_dir) + strlen(".install-type") + 3); @@ -1368,6 +1355,9 @@ void get_install_type(char **install_type, char **prebuilt_arch, char **prebuilt fclose(fp); } freez(install_type_filename); +#else + *install_type = strdupz("netdata_installer.exe"); +#endif } static struct { diff --git a/src/daemon/commands.c b/src/daemon/commands.c index f0637ad31..9d716d932 100644 --- a/src/daemon/commands.c +++ b/src/daemon/commands.c @@ -47,9 +47,7 @@ static cmd_status_t cmd_ping_execute(char *args, char **message); static cmd_status_t cmd_aclk_state(char *args, char **message); static cmd_status_t cmd_version(char *args, char **message); static cmd_status_t cmd_dumpconfig(char *args, char **message); -#ifdef ENABLE_ACLK static cmd_status_t cmd_remove_node(char *args, char **message); -#endif static command_info_t command_info_array[] = { {"help", cmd_help_execute, CMD_TYPE_HIGH_PRIORITY}, // show help menu @@ -65,9 +63,7 @@ static command_info_t command_info_array[] = { {"aclk-state", cmd_aclk_state, CMD_TYPE_ORTHOGONAL}, {"version", cmd_version, CMD_TYPE_ORTHOGONAL}, {"dumpconfig", cmd_dumpconfig, CMD_TYPE_ORTHOGONAL}, -#ifdef ENABLE_ACLK {"remove-stale-node", cmd_remove_node, CMD_TYPE_ORTHOGONAL} -#endif }; /* Mutexes for commands of type CMD_TYPE_ORTHOGONAL */ @@ -119,8 +115,6 @@ static cmd_status_t cmd_help_execute(char *args, char **message) " Reload health configuration.\n\n" "reload-labels\n" " Reload all labels.\n\n" - "save-database\n" - " Save internal DB to disk for memory mode save.\n\n" "reopen-logs\n" " Close and reopen log files.\n\n" "shutdown-agent\n" @@ -135,10 +129,8 @@ static cmd_status_t cmd_help_execute(char *args, char **message) " Returns current state of ACLK and Cloud connection. (optionally in json).\n\n" "dumpconfig\n" " Returns the current netdata.conf on stdout.\n\n" -#ifdef ENABLE_ACLK "remove-stale-node \n" " Unregisters and removes a node from the cloud.\n\n" -#endif "version\n" " Returns the netdata version.\n", MAX_COMMAND_LENGTH - 1); @@ -193,17 +185,42 @@ static cmd_status_t cmd_fatal_execute(char *args, char **message) return CMD_STATUS_SUCCESS; } -static cmd_status_t cmd_reload_claiming_state_execute(char *args, char **message) -{ - (void)args; - (void)message; -#if defined(DISABLE_CLOUD) || !defined(ENABLE_ACLK) - netdata_log_info("The claiming feature has been explicitly disabled"); - *message = strdupz("This agent cannot be claimed, it was built without support for Cloud"); - return CMD_STATUS_FAILURE; -#endif - netdata_log_info("COMMAND: Reloading Agent Claiming configuration."); - claim_reload_all(); +static cmd_status_t cmd_reload_claiming_state_execute(char *args __maybe_unused, char **message) { + char msg[1024]; + + CLOUD_STATUS status = claim_reload_and_wait_online(); + switch(status) { + case CLOUD_STATUS_ONLINE: + snprintfz(msg, sizeof(msg), + "Netdata Agent is claimed to Netdata Cloud and is currently online."); + break; + + case CLOUD_STATUS_BANNED: + snprintfz(msg, sizeof(msg), + "Netdata Agent is claimed to Netdata Cloud, but it is banned."); + break; + + default: + case CLOUD_STATUS_AVAILABLE: + snprintfz(msg, sizeof(msg), + "Netdata Agent is not claimed to Netdata Cloud: %s", + claim_agent_failure_reason_get()); + break; + + case CLOUD_STATUS_OFFLINE: + snprintfz(msg, sizeof(msg), + "Netdata Agent is claimed to Netdata Cloud, but it is currently offline: %s", + cloud_status_aclk_offline_reason()); + break; + + case CLOUD_STATUS_INDIRECT: + snprintfz(msg, sizeof(msg), + "Netdata Agent is not claimed to Netdata Cloud, but it is currently online via parent."); + break; + } + + *message = strdupz(msg); + return CMD_STATUS_SUCCESS; } @@ -242,9 +259,8 @@ static cmd_status_t cmd_read_config_execute(char *args, char **message) const char *conf_file = temp; /* "cloud" is cloud.conf, otherwise netdata.conf */ struct config *tmp_config = strcmp(conf_file, "cloud") ? &netdata_config : &cloud_config; - char *value = appconfig_get(tmp_config, temp + offset + 1, temp + offset2 + 1, NULL); - if (value == NULL) - { + const char *value = appconfig_get(tmp_config, temp + offset + 1, temp + offset2 + 1, NULL); + if (value == NULL) { netdata_log_error("Cannot execute read-config conf_file=%s section=%s / key=%s because no value set", conf_file, temp + offset + 1, @@ -252,13 +268,11 @@ static cmd_status_t cmd_read_config_execute(char *args, char **message) freez(temp); return CMD_STATUS_FAILURE; } - else - { + else { (*message) = strdupz(value); freez(temp); return CMD_STATUS_SUCCESS; } - } static cmd_status_t cmd_write_config_execute(char *args, char **message) @@ -306,17 +320,10 @@ static cmd_status_t cmd_ping_execute(char *args, char **message) static cmd_status_t cmd_aclk_state(char *args, char **message) { netdata_log_info("COMMAND: Reopening aclk/cloud state."); -#ifdef ENABLE_ACLK if (strstr(args, "json")) *message = aclk_state_json(); else *message = aclk_state(); -#else - if (strstr(args, "json")) - *message = strdupz("{\"aclk-available\":false}"); - else - *message = strdupz("ACLK Available: No"); -#endif return CMD_STATUS_SUCCESS; } @@ -338,14 +345,12 @@ static cmd_status_t cmd_dumpconfig(char *args, char **message) (void)args; BUFFER *wb = buffer_create(1024, NULL); - config_generate(wb, 0); + netdata_conf_generate(wb, 0); *message = strdupz(buffer_tostring(wb)); buffer_free(wb); return CMD_STATUS_SUCCESS; } -#ifdef ENABLE_ACLK - static int remove_ephemeral_host(BUFFER *wb, RRDHOST *host, bool report_error) { if (host == localhost) { @@ -362,11 +367,10 @@ static int remove_ephemeral_host(BUFFER *wb, RRDHOST *host, bool report_error) if (!rrdhost_option_check(host, RRDHOST_OPTION_EPHEMERAL_HOST)) { rrdhost_option_set(host, RRDHOST_OPTION_EPHEMERAL_HOST); - sql_set_host_label(&host->host_uuid, "_is_ephemeral", "true"); + sql_set_host_label(&host->host_id.uuid, "_is_ephemeral", "true"); aclk_host_state_update(host, 0, 0); unregister_node(host->machine_guid); - freez(host->node_id); - host->node_id = NULL; + host->node_id = UUID_ZERO; buffer_sprintf(wb, "Unregistering node with machine guid %s, hostname = %s", host->machine_guid, rrdhost_hostname(host)); rrd_wrlock(); rrdhost_free___while_having_rrd_wrlock(host, true); @@ -438,7 +442,6 @@ done: buffer_free(wb); return CMD_STATUS_SUCCESS; } -#endif static void cmd_lock_exclusive(unsigned index) { @@ -509,7 +512,7 @@ static void pipe_write_cb(uv_write_t* req, int status) static inline void add_char_to_command_reply(BUFFER *reply_string, unsigned *reply_string_size, char character) { - buffer_fast_charcat(reply_string, character); + buffer_putc(reply_string, character); *reply_string_size +=1; } diff --git a/src/daemon/commands.h b/src/daemon/commands.h index 14c2ec49e..8327d28d2 100644 --- a/src/daemon/commands.h +++ b/src/daemon/commands.h @@ -20,9 +20,7 @@ typedef enum cmd { CMD_ACLK_STATE, CMD_VERSION, CMD_DUMPCONFIG, -#ifdef ENABLE_ACLK CMD_REMOVE_NODE, -#endif CMD_TOTAL_COMMANDS } cmd_t; diff --git a/src/daemon/common.c b/src/daemon/common.c deleted file mode 100644 index 6c824eec6..000000000 --- a/src/daemon/common.c +++ /dev/null @@ -1,197 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "common.h" - -char *netdata_configured_hostname = NULL; -char *netdata_configured_user_config_dir = CONFIG_DIR; -char *netdata_configured_stock_config_dir = LIBCONFIG_DIR; -char *netdata_configured_log_dir = LOG_DIR; -char *netdata_configured_primary_plugins_dir = PLUGINS_DIR; -char *netdata_configured_web_dir = WEB_DIR; -char *netdata_configured_cache_dir = CACHE_DIR; -char *netdata_configured_varlib_dir = VARLIB_DIR; -char *netdata_configured_lock_dir = VARLIB_DIR "/lock"; -char *netdata_configured_home_dir = VARLIB_DIR; -char *netdata_configured_host_prefix = NULL; -char *netdata_configured_timezone = NULL; -char *netdata_configured_abbrev_timezone = NULL; -int32_t netdata_configured_utc_offset = 0; - -bool netdata_ready = false; - -#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK ) -int netdata_cloud_enabled = CONFIG_BOOLEAN_NO; -#else -int netdata_cloud_enabled = CONFIG_BOOLEAN_AUTO; -#endif - -long get_netdata_cpus(void) { - static long processors = 0; - - if(processors) - return processors; - - long cores_proc_stat = os_get_system_cpus_cached(false, true); - long cores_cpuset_v1 = (long)os_read_cpuset_cpus("/sys/fs/cgroup/cpuset/cpuset.cpus", cores_proc_stat); - long cores_cpuset_v2 = (long)os_read_cpuset_cpus("/sys/fs/cgroup/cpuset.cpus", cores_proc_stat); - - if(cores_cpuset_v2) - processors = cores_cpuset_v2; - else if(cores_cpuset_v1) - processors = cores_cpuset_v1; - else - processors = cores_proc_stat; - - long cores_user_configured = config_get_number(CONFIG_SECTION_GLOBAL, "cpu cores", processors); - - errno_clear(); - internal_error(true, - "System CPUs: %ld, (" - "system: %ld, cgroups cpuset v1: %ld, cgroups cpuset v2: %ld, netdata.conf: %ld" - ")" - , processors - , cores_proc_stat - , cores_cpuset_v1 - , cores_cpuset_v2 - , cores_user_configured - ); - - processors = cores_user_configured; - - if(processors < 1) - processors = 1; - - return processors; -} - -const char *cloud_status_to_string(CLOUD_STATUS status) { - switch(status) { - default: - case CLOUD_STATUS_UNAVAILABLE: - return "unavailable"; - - case CLOUD_STATUS_AVAILABLE: - return "available"; - - case CLOUD_STATUS_DISABLED: - return "disabled"; - - case CLOUD_STATUS_BANNED: - return "banned"; - - case CLOUD_STATUS_OFFLINE: - return "offline"; - - case CLOUD_STATUS_ONLINE: - return "online"; - } -} - -CLOUD_STATUS cloud_status(void) { -#ifdef ENABLE_ACLK - if(aclk_disable_runtime) - return CLOUD_STATUS_BANNED; - - if(aclk_connected) - return CLOUD_STATUS_ONLINE; - - if(netdata_cloud_enabled == CONFIG_BOOLEAN_YES) { - char *agent_id = get_agent_claimid(); - bool claimed = agent_id != NULL; - freez(agent_id); - - if(claimed) - return CLOUD_STATUS_OFFLINE; - } - - if(netdata_cloud_enabled != CONFIG_BOOLEAN_NO) - return CLOUD_STATUS_AVAILABLE; - - return CLOUD_STATUS_DISABLED; -#else - return CLOUD_STATUS_UNAVAILABLE; -#endif -} - -time_t cloud_last_change(void) { -#ifdef ENABLE_ACLK - time_t ret = MAX(last_conn_time_mqtt, last_disconnect_time); - if(!ret) ret = netdata_start_time; - return ret; -#else - return netdata_start_time; -#endif -} - -time_t cloud_next_connection_attempt(void) { -#ifdef ENABLE_ACLK - return next_connection_attempt; -#else - return 0; -#endif -} - -size_t cloud_connection_id(void) { -#ifdef ENABLE_ACLK - return aclk_connection_counter; -#else - return 0; -#endif -} - -const char *cloud_offline_reason() { -#ifdef ENABLE_ACLK - if(!netdata_cloud_enabled) - return "disabled"; - - if(aclk_disable_runtime) - return "banned"; - - return aclk_status_to_string(); -#else - return "disabled"; -#endif -} - -const char *cloud_base_url() { -#ifdef ENABLE_ACLK - return aclk_cloud_base_url; -#else - return NULL; -#endif -} - -CLOUD_STATUS buffer_json_cloud_status(BUFFER *wb, time_t now_s) { - CLOUD_STATUS status = cloud_status(); - - buffer_json_member_add_object(wb, "cloud"); - { - size_t id = cloud_connection_id(); - time_t last_change = cloud_last_change(); - time_t next_connect = cloud_next_connection_attempt(); - buffer_json_member_add_uint64(wb, "id", id); - buffer_json_member_add_string(wb, "status", cloud_status_to_string(status)); - buffer_json_member_add_time_t(wb, "since", last_change); - buffer_json_member_add_time_t(wb, "age", now_s - last_change); - - if (status != CLOUD_STATUS_ONLINE) - buffer_json_member_add_string(wb, "reason", cloud_offline_reason()); - - if (status == CLOUD_STATUS_OFFLINE && next_connect > now_s) { - buffer_json_member_add_time_t(wb, "next_check", next_connect); - buffer_json_member_add_time_t(wb, "next_in", next_connect - now_s); - } - - if (cloud_base_url()) - buffer_json_member_add_string(wb, "url", cloud_base_url()); - - char *claim_id = get_agent_claimid(); - if(claim_id) { - buffer_json_member_add_string(wb, "claim_id", claim_id); - freez(claim_id); - } - } - buffer_json_object_close(wb); // cloud - - return status; -} diff --git a/src/daemon/common.h b/src/daemon/common.h index 1dea19c5b..9f6efa3ef 100644 --- a/src/daemon/common.h +++ b/src/daemon/common.h @@ -4,36 +4,13 @@ #define NETDATA_COMMON_H 1 #include "libnetdata/libnetdata.h" -#include "event_loop.h" - -// ---------------------------------------------------------------------------- -// shortcuts for the default netdata configuration - -#define config_load(filename, overwrite_used, section) appconfig_load(&netdata_config, filename, overwrite_used, section) -#define config_get(section, name, default_value) appconfig_get(&netdata_config, section, name, default_value) -#define config_get_number(section, name, value) appconfig_get_number(&netdata_config, section, name, value) -#define config_get_float(section, name, value) appconfig_get_float(&netdata_config, section, name, value) -#define config_get_boolean(section, name, value) appconfig_get_boolean(&netdata_config, section, name, value) -#define config_get_boolean_ondemand(section, name, value) appconfig_get_boolean_ondemand(&netdata_config, section, name, value) -#define config_get_duration(section, name, value) appconfig_get_duration(&netdata_config, section, name, value) - -#define config_set(section, name, default_value) appconfig_set(&netdata_config, section, name, default_value) -#define config_set_default(section, name, value) appconfig_set_default(&netdata_config, section, name, value) -#define config_set_number(section, name, value) appconfig_set_number(&netdata_config, section, name, value) -#define config_set_float(section, name, value) appconfig_set_float(&netdata_config, section, name, value) -#define config_set_boolean(section, name, value) appconfig_set_boolean(&netdata_config, section, name, value) - -#define config_exists(section, name) appconfig_exists(&netdata_config, section, name) -#define config_move(section_old, name_old, section_new, name_new) appconfig_move(&netdata_config, section_old, name_old, section_new, name_new) - -#define config_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed) - -#define config_section_destroy(section) appconfig_section_destroy_non_loaded(&netdata_config, section) -#define config_section_option_destroy(section, name) appconfig_section_option_destroy_non_loaded(&netdata_config, section, name) +#include "libuv_workers.h" // ---------------------------------------------------------------------------- // netdata include files +#include "web/api/maps/maps.h" + #include "daemon/config/dyncfg.h" #include "global_statistics.h" @@ -55,7 +32,6 @@ // streaming metrics between netdata servers #include "streaming/rrdpush.h" - // anomaly detection #include "ml/ml.h" @@ -94,45 +70,28 @@ #include "analytics.h" // global netdata daemon variables -extern char *netdata_configured_hostname; -extern char *netdata_configured_user_config_dir; -extern char *netdata_configured_stock_config_dir; -extern char *netdata_configured_log_dir; -extern char *netdata_configured_primary_plugins_dir; -extern char *netdata_configured_web_dir; -extern char *netdata_configured_cache_dir; -extern char *netdata_configured_varlib_dir; -extern char *netdata_configured_lock_dir; -extern char *netdata_configured_home_dir; -extern char *netdata_configured_host_prefix; -extern char *netdata_configured_timezone; -extern char *netdata_configured_abbrev_timezone; +extern const char *netdata_configured_hostname; +extern const char *netdata_configured_user_config_dir; +extern const char *netdata_configured_stock_config_dir; +extern const char *netdata_configured_log_dir; +extern const char *netdata_configured_primary_plugins_dir; +extern const char *netdata_configured_web_dir; +extern const char *netdata_configured_cache_dir; +extern const char *netdata_configured_varlib_dir; +extern const char *netdata_configured_lock_dir; +extern const char *netdata_configured_cloud_dir; +extern const char *netdata_configured_home_dir; +extern const char *netdata_configured_host_prefix; +extern const char *netdata_configured_timezone; +extern const char *netdata_configured_abbrev_timezone; extern int32_t netdata_configured_utc_offset; extern int netdata_anonymous_statistics_enabled; extern bool netdata_ready; -extern int netdata_cloud_enabled; - extern time_t netdata_start_time; long get_netdata_cpus(void); -typedef enum __attribute__((packed)) { - CLOUD_STATUS_UNAVAILABLE = 0, // cloud and aclk functionality is not available on this agent - CLOUD_STATUS_AVAILABLE, // cloud and aclk functionality is available, but the agent is not claimed - CLOUD_STATUS_DISABLED, // cloud and aclk functionality is available, but it is disabled - CLOUD_STATUS_BANNED, // the agent has been banned from cloud - CLOUD_STATUS_OFFLINE, // the agent tries to connect to cloud, but cannot do it - CLOUD_STATUS_ONLINE, // the agent is connected to cloud -} CLOUD_STATUS; - -const char *cloud_status_to_string(CLOUD_STATUS status); -CLOUD_STATUS cloud_status(void); -time_t cloud_last_change(void); -time_t cloud_next_connection_attempt(void); -size_t cloud_connection_id(void); -const char *cloud_offline_reason(void); -const char *cloud_base_url(void); -CLOUD_STATUS buffer_json_cloud_status(BUFFER *wb, time_t now_s); +void set_environment_for_plugins_and_scripts(void); #endif /* NETDATA_COMMON_H */ diff --git a/src/daemon/config/README.md b/src/daemon/config/README.md index 3c0912fba..7217ec4ea 100644 --- a/src/daemon/config/README.md +++ b/src/daemon/config/README.md @@ -1,13 +1,3 @@ - - # Daemon configuration
@@ -53,7 +43,7 @@ comment on settings it does not currently use. ## Applying changes -After `netdata.conf` has been modified, Netdata needs to be [restarted](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for +After `netdata.conf` has been modified, Netdata needs to be [restarted](/docs/netdata-agent/start-stop-restart.md) for changes to apply: ```bash @@ -86,24 +76,22 @@ Please note that your data history will be lost if you have modified `history` p ### [db] section options -| setting | default | info | -|:---------------------------------------------:|:----------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| mode | `dbengine` | `dbengine`: The default for long-term metrics storage with efficient RAM and disk usage. Can be extended with `dbengine page cache size MB` and `dbengine disk space MB`.
`ram`: The round-robin database will be temporary and it will be lost when Netdata exits.
`alloc`: Similar to `ram`, but can significantly reduce memory usage, when combined with a low retention and does not support KSM.
`none`: Disables the database at this host, and disables health monitoring entirely, as that requires a database of metrics. Not to be used together with streaming. | -| retention | `3600` | Used with `mode = ram/alloc`, not the default `mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. Check [Memory Requirements](/src/database/README.md) for more information. | -| storage tiers | `3` | The number of storage tiers you want to have in your dbengine. Check the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). You can have up to 5 tiers of data (including the _Tier 0_). This number ranges between 1 and 5. | -| dbengine page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated to caching for _Tier 0_ Netdata metric values. | -| dbengine tier **`N`** page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated for caching Netdata metric values of the **`N`** tier.
`N belongs to [1..4]` | -| dbengine disk space MB | `256` | Determines the amount of disk space in MiB that is dedicated to storing _Tier 0_ Netdata metric values and all related metadata describing them. This option is available **only for legacy configuration** (`Agent v1.23.2 and prior`). | -| dbengine multihost disk space MB | `256` | Same functionality as `dbengine disk space MB`, but includes support for storing metrics streamed to a parent node by its children. Can be used in single-node environments as well. This setting is only for _Tier 0_ metrics. | -| dbengine tier **`N`** multihost disk space MB | `256` | Same functionality as `dbengine multihost disk space MB`, but stores metrics of the **`N`** tier (both parent node and its children). Can be used in single-node environments as well.
`N belongs to [1..4]` | -| update every | `1` | The frequency in seconds, for data collection. For more information see the [performance guide](/docs/netdata-agent/configuration/optimize-the-netdata-agents-performance.md). These metrics stored as _Tier 0_ data. Explore the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). | -| dbengine tier **`N`** update every iterations | `60` | The down sampling value of each tier from the previous one. For each Tier, the greater by one Tier has N (equal to 60 by default) less data points of any metric it collects. This setting can take values from `2` up to `255`.
`N belongs to [1..4]` | -| dbengine tier **`N`** back fill | `New` | Specifies the strategy of recreating missing data on each Tier from the exact lower Tier.
`New`: Sees the latest point on each Tier and save new points to it only if the exact lower Tier has available points for it's observation window (`dbengine tier N update every iterations` window).
`none`: No back filling is applied.
`N belongs to [1..4]` | -| memory deduplication (ksm) | `yes` | When set to `yes`, Netdata will offer its in-memory round robin database and the dbengine page cache to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](/src/database/README.md#ksm) | -| cleanup obsolete charts after secs | `3600` | See [monitoring ephemeral containers](/src/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions | -| gap when lost iterations above | `1` | | -| cleanup orphan hosts after secs | `3600` | How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data. | -| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. | +| setting | default | info | +|:---------------------------------------------:|:-------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| mode | `dbengine` | `dbengine`: The default for long-term metrics storage with efficient RAM and disk usage. Can be extended with `dbengine page cache size` and `dbengine tier X retention size`.
`ram`: The round-robin database will be temporary and it will be lost when Netdata exits.
`alloc`: Similar to `ram`, but can significantly reduce memory usage, when combined with a low retention and does not support KSM.
`none`: Disables the database at this host, and disables health monitoring entirely, as that requires a database of metrics. Not to be used together with streaming. | +| retention | `3600` | Used with `mode = ram/alloc`, not the default `mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. Check [Memory Requirements](/src/database/README.md) for more information. | +| storage tiers | `3` | The number of storage tiers you want to have in your dbengine. Check the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). You can have up to 5 tiers of data (including the _Tier 0_). This number ranges between 1 and 5. | +| dbengine page cache size | `32MiB` | Determines the amount of RAM in MiB that is dedicated to caching for _Tier 0_ Netdata metric values. | +| dbengine tier **`N`** retention size | `1GiB` | The disk space dedicated to metrics storage, per tier. Can be used in single-node environments as well.
`N belongs to [1..4]` | +| dbengine tier **`N`** retention time | `14d`, `3mo`, `1y`, `1y`, `1y` | The database retention, expressed in time. Can be used in single-node environments as well.
`N belongs to [1..4]` | +| update every | `1` | The frequency in seconds, for data collection. For more information see the [performance guide](/docs/netdata-agent/configuration/optimize-the-netdata-agents-performance.md). These metrics stored as _Tier 0_ data. Explore the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). | +| dbengine tier **`N`** update every iterations | `60` | The down sampling value of each tier from the previous one. For each Tier, the greater by one Tier has N (equal to 60 by default) less data points of any metric it collects. This setting can take values from `2` up to `255`.
`N belongs to [1..4]` | +| dbengine tier back fill | `new` | Specifies the strategy of recreating missing data on higher database Tiers.
`new`: Sees the latest point on each Tier and save new points to it only if the exact lower Tier has available points for it's observation window (`dbengine tier N update every iterations` window).
`none`: No back filling is applied.
`N belongs to [1..4]` | +| memory deduplication (ksm) | `yes` | When set to `yes`, Netdata will offer its in-memory round robin database and the dbengine page cache to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](/src/database/README.md#ksm) | +| cleanup obsolete charts after | `1h` | See [monitoring ephemeral containers](/src/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions | +| gap when lost iterations above | `1` | | +| cleanup orphan hosts after | `1h` | How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data. | +| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. | > ### Info > @@ -140,7 +128,7 @@ There are additional configuration options for the logs. For more info, see [Net | health | `journal` | The filename to save the log of Netdata health collectors. You can also set it to `syslog` to send the access log to syslog, or `off` to disable this log. Defaults to `Journal` if using systemd. | | daemon | `journal` | The filename to save the log of Netdata daemon. You can also set it to `syslog` to send the access log to syslog, or `off` to disable this log. Defaults to `Journal` if using systemd. | | facility | `daemon` | A facility keyword is used to specify the type of system that is logging the message. | -| logs flood protection period | `60` | Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`. | +| logs flood protection period | `1m` | Length of period during which the number of errors should not exceed the `errors to trigger flood protection`. | | logs to trigger flood protection | `1000` | Number of errors written to the log in `errors flood protection period` sec before flood protection is activated. | | level | `info` | Controls which log messages are logged, with error being the most important. Supported values: `info` and `error`. | @@ -172,15 +160,15 @@ monitoring](/src/health/README.md). [Alert notifications](/src/health/notifications/README.md) are configured in `health_alarm_notify.conf`. -| setting | default | info | -|:----------------------------------------------:|:------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| enabled | `yes` | Set to `no` to disable all alerts and notifications | -| in memory max health log entries | 1000 | Size of the alert history held in RAM | -| script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alert notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). | -| run at least every seconds | `10` | Controls how often all alert conditions should be evaluated. | -| postpone alarms during hibernation for seconds | `60` | Prevents false alerts. May need to be increased if you get alerts during hibernation. | -| health log history | `432000` | Specifies the history of alert events (in seconds) kept in the agent's sqlite database. | -| enabled alarms | * | Defines which alerts to load from both user and stock directories. This is a [simple pattern](/src/libnetdata/simple_pattern/README.md) list of alert or template names. Can be used to disable specific alerts. For example, `enabled alarms = !oom_kill *` will load all alerts except `oom_kill`. | +| setting | default | info | +|:--------------------------------------:|:------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| enabled | `yes` | Set to `no` to disable all alerts and notifications | +| in memory max health log entries | 1000 | Size of the alert history held in RAM | +| script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alert notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). | +| run at least every | `10s` | Controls how often all alert conditions should be evaluated. | +| postpone alarms during hibernation for | `1m` | Prevents false alerts. May need to be increased if you get alerts during hibernation. | +| health log retention | `5d` | Specifies the history of alert events (in seconds) kept in the agent's sqlite database. | +| enabled alarms | * | Defines which alerts to load from both user and stock directories. This is a [simple pattern](/src/libnetdata/simple_pattern/README.md) list of alert or template names. Can be used to disable specific alerts. For example, `enabled alarms = !oom_kill *` will load all alerts except `oom_kill`. | ### [web] section options diff --git a/src/daemon/config/dyncfg-echo.c b/src/daemon/config/dyncfg-echo.c index 95d40a025..f6eb48c35 100644 --- a/src/daemon/config/dyncfg-echo.c +++ b/src/daemon/config/dyncfg-echo.c @@ -96,7 +96,7 @@ void dyncfg_echo(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id __maybe dyncfg_echo_cb, e, NULL, NULL, NULL, NULL, - NULL, string2str(df->dyncfg.source)); + NULL, string2str(df->dyncfg.source), false); } // ---------------------------------------------------------------------------- @@ -129,7 +129,7 @@ void dyncfg_echo_update(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id) dyncfg_echo_cb, e, NULL, NULL, NULL, NULL, - df->dyncfg.payload, string2str(df->dyncfg.source)); + df->dyncfg.payload, string2str(df->dyncfg.source), false); } // ---------------------------------------------------------------------------- @@ -164,7 +164,7 @@ static void dyncfg_echo_payload_add(const DICTIONARY_ITEM *item_template __maybe dyncfg_echo_cb, e, NULL, NULL, NULL, NULL, - df_job->dyncfg.payload, string2str(df_job->dyncfg.source)); + df_job->dyncfg.payload, string2str(df_job->dyncfg.source), false); } void dyncfg_echo_add(const DICTIONARY_ITEM *item_template, const DICTIONARY_ITEM *item_job, DYNCFG *df_template, DYNCFG *df_job, const char *template_id, const char *job_name) { diff --git a/src/daemon/config/dyncfg-intercept.c b/src/daemon/config/dyncfg-intercept.c index 65f8383ed..b302d72aa 100644 --- a/src/daemon/config/dyncfg-intercept.c +++ b/src/daemon/config/dyncfg-intercept.c @@ -216,7 +216,7 @@ int dyncfg_function_intercept_cb(struct rrd_function_execute *rfe, void *data __ memcpy(buf, rfe->function, sizeof(buf)); char *words[20]; - size_t num_words = quoted_strings_splitter_pluginsd(buf, words, 20); + size_t num_words = quoted_strings_splitter_whitespace(buf, words, 20); size_t i = 0; char *config = get_word(words, num_words, i++); diff --git a/src/daemon/config/dyncfg-tree.c b/src/daemon/config/dyncfg-tree.c index 77d031fa0..4bad2f30f 100644 --- a/src/daemon/config/dyncfg-tree.c +++ b/src/daemon/config/dyncfg-tree.c @@ -71,12 +71,10 @@ static void dyncfg_tree_for_host(RRDHOST *host, BUFFER *wb, const char *path, co if(id && *id) template = string_strdupz(id); - ND_UUID host_uuid = uuid2UUID(host->host_uuid); - size_t path_len = strlen(path); DYNCFG *df; dfe_start_read(dyncfg_globals.nodes, df) { - if(!UUIDeq(df->host_uuid, host_uuid)) + if(!UUIDeq(df->host_uuid, host->host_id)) continue; if(strncmp(string2str(df->path), path, path_len) != 0) @@ -162,7 +160,7 @@ static int dyncfg_config_execute_cb(struct rrd_function_execute *rfe, void *data memcpy(buf, rfe->function, sizeof(buf)); char *words[MAX_FUNCTION_PARAMETERS]; // an array of pointers for the words in this line - size_t num_words = quoted_strings_splitter_pluginsd(buf, words, MAX_FUNCTION_PARAMETERS); + size_t num_words = quoted_strings_splitter_whitespace(buf, words, MAX_FUNCTION_PARAMETERS); const char *config = get_word(words, num_words, 0); const char *action = get_word(words, num_words, 1); @@ -266,7 +264,7 @@ static int dyncfg_config_execute_cb(struct rrd_function_execute *rfe, void *data rrd_call_function_error( rfe->result.wb, - "unknown config id given", code); + "Unknown config id given.", code); } cleanup: @@ -286,7 +284,7 @@ void dyncfg_host_init(RRDHOST *host) { // This function needs to be async, although it is internal. // The reason is that it can call by itself another function that may or may not be internal (sync). - rrd_function_add(host, NULL, PLUGINSD_FUNCTION_CONFIG, 120, - 1000, "Dynamic configuration", "config", HTTP_ACCESS_ANONYMOUS_DATA, + rrd_function_add(host, NULL, PLUGINSD_FUNCTION_CONFIG, 120, 1000, DYNCFG_FUNCTIONS_VERSION, + "Dynamic configuration", "config", HTTP_ACCESS_ANONYMOUS_DATA, false, dyncfg_config_execute_cb, host); } diff --git a/src/daemon/config/dyncfg-unittest.c b/src/daemon/config/dyncfg-unittest.c index 775dc7cbd..763451501 100644 --- a/src/daemon/config/dyncfg-unittest.c +++ b/src/daemon/config/dyncfg-unittest.c @@ -195,7 +195,7 @@ static int dyncfg_unittest_execute_cb(struct rrd_function_execute *rfe, void *da memcpy(buf, rfe->function, sizeof(buf)); char *words[MAX_FUNCTION_PARAMETERS]; // an array of pointers for the words in this line - size_t num_words = quoted_strings_splitter_pluginsd(buf, words, MAX_FUNCTION_PARAMETERS); + size_t num_words = quoted_strings_splitter_whitespace(buf, words, MAX_FUNCTION_PARAMETERS); const char *config = get_word(words, num_words, 0); const char *id = get_word(words, num_words, 1); @@ -426,7 +426,7 @@ static int dyncfg_unittest_run(const char *cmd, BUFFER *wb, const char *payload, memcpy(buf, cmd, sizeof(buf)); char *words[MAX_FUNCTION_PARAMETERS]; // an array of pointers for the words in this line - size_t num_words = quoted_strings_splitter_pluginsd(buf, words, MAX_FUNCTION_PARAMETERS); + size_t num_words = quoted_strings_splitter_whitespace(buf, words, MAX_FUNCTION_PARAMETERS); // const char *config = get_word(words, num_words, 0); const char *id = get_word(words, num_words, 1); @@ -473,7 +473,7 @@ static int dyncfg_unittest_run(const char *cmd, BUFFER *wb, const char *payload, NULL, NULL, NULL, NULL, NULL, NULL, - pld, source); + pld, source, false); if(!DYNCFG_RESP_SUCCESS(rc)) { nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG UNITTEST: failed to run: %s; returned code %d", cmd, rc); dyncfg_unittest_register_error(NULL, NULL); diff --git a/src/daemon/config/dyncfg.c b/src/daemon/config/dyncfg.c index 2f484d1ed..e6c1768cc 100644 --- a/src/daemon/config/dyncfg.c +++ b/src/daemon/config/dyncfg.c @@ -192,7 +192,7 @@ const DICTIONARY_ITEM *dyncfg_add_internal(RRDHOST *host, const char *id, const rrd_function_execute_cb_t execute_cb, void *execute_cb_data, bool overwrite_cb) { DYNCFG tmp = { - .host_uuid = uuid2UUID(host->host_uuid), + .host_uuid = host->host_id, .path = string_strdupz(path), .cmds = cmds, .type = type, @@ -358,6 +358,7 @@ bool dyncfg_add_low_level(RRDHOST *host, const char *id, const char *path, string2str(df->function), 120, 1000, + DYNCFG_FUNCTIONS_VERSION, "Dynamic configuration", "config", (view_access & edit_access), diff --git a/src/daemon/config/dyncfg.h b/src/daemon/config/dyncfg.h index 539eddbfb..84fab07d2 100644 --- a/src/daemon/config/dyncfg.h +++ b/src/daemon/config/dyncfg.h @@ -7,6 +7,8 @@ #include "database/rrd.h" #include "database/rrdfunctions.h" +#define DYNCFG_FUNCTIONS_VERSION 0 + void dyncfg_add_streaming(BUFFER *wb); bool dyncfg_available_for_rrdhost(RRDHOST *host); void dyncfg_host_init(RRDHOST *host); diff --git a/src/daemon/daemon.c b/src/daemon/daemon.c index 2392d4cc1..d3ddf027d 100644 --- a/src/daemon/daemon.c +++ b/src/daemon/daemon.c @@ -3,34 +3,24 @@ #include "common.h" #include -char pidfile[FILENAME_MAX + 1] = ""; -char claiming_directory[FILENAME_MAX + 1]; -char netdata_exe_path[FILENAME_MAX + 1]; -char netdata_exe_file[FILENAME_MAX + 1]; +char *pidfile = NULL; +char *netdata_exe_path = NULL; void get_netdata_execution_path(void) { - int ret; - size_t exepath_size = 0; - struct passwd *passwd = NULL; - char *user = NULL; - - passwd = getpwuid(getuid()); - user = (passwd && passwd->pw_name) ? passwd->pw_name : ""; - - exepath_size = sizeof(netdata_exe_file) - 1; - ret = uv_exepath(netdata_exe_file, &exepath_size); - if (0 != ret) { - netdata_log_error("uv_exepath(\"%s\", %u) (user: %s) failed (%s).", netdata_exe_file, (unsigned)exepath_size, user, - uv_strerror(ret)); - fatal("Cannot start netdata without getting execution path."); + struct passwd *passwd = getpwuid(getuid()); + char *user = (passwd && passwd->pw_name) ? passwd->pw_name : ""; + + char b[FILENAME_MAX + 1]; + size_t b_size = sizeof(b) - 1; + int ret = uv_exepath(b, &b_size); + if (ret != 0) { + fatal("Cannot start netdata without getting execution path. " + "(uv_exepath(\"%s\", %zu), user: '%s', failed: %s).", + b, b_size, user, uv_strerror(ret)); } + b[b_size] = '\0'; - netdata_exe_file[exepath_size] = '\0'; - - // macOS's dirname(3) does not modify passed string - char *tmpdir = strdupz(netdata_exe_file); - strcpy(netdata_exe_path, dirname(tmpdir)); - freez(tmpdir); + netdata_exe_path = strdupz(b); } static void fix_directory_file_permissions(const char *dirname, uid_t uid, gid_t gid, bool recursive) @@ -68,7 +58,7 @@ static void change_dir_ownership(const char *dir, uid_t uid, gid_t gid, bool rec fix_directory_file_permissions(dir, uid, gid, recursive); } -static void clean_directory(char *dirname) +static void clean_directory(const char *dirname) { DIR *dir = opendir(dirname); if(!dir) return; @@ -89,7 +79,7 @@ static void prepare_required_directories(uid_t uid, gid_t gid) { change_dir_ownership(netdata_configured_varlib_dir, uid, gid, false); change_dir_ownership(netdata_configured_lock_dir, uid, gid, false); change_dir_ownership(netdata_configured_log_dir, uid, gid, false); - change_dir_ownership(claiming_directory, uid, gid, false); + change_dir_ownership(netdata_configured_cloud_dir, uid, gid, false); char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s/registry", netdata_configured_varlib_dir); @@ -112,7 +102,7 @@ static int become_user(const char *username, int pid_fd) { prepare_required_directories(uid, gid); - if(pidfile[0]) { + if(pidfile && *pidfile) { if(chown(pidfile, uid, gid) == -1) netdata_log_error("Cannot chown '%s' to %u:%u", pidfile, (unsigned int)uid, (unsigned int)gid); } @@ -198,7 +188,7 @@ static void oom_score_adj(void) { } // check the environment - char *s = getenv("OOMScoreAdjust"); + const char *s = getenv("OOMScoreAdjust"); if(!s || !*s) { snprintfz(buf, sizeof(buf) - 1, "%d", (int)wanted_score); s = buf; @@ -442,9 +432,8 @@ int become_daemon(int dont_fork, const char *user) perror("cannot fork"); exit(1); } - if(i != 0) { - exit(0); // the parent - } + if(i != 0) exit(0); // the parent + gettid_uncached(); // become session leader if (setsid() < 0) { @@ -458,14 +447,13 @@ int become_daemon(int dont_fork, const char *user) perror("cannot fork"); exit(1); } - if(i != 0) { - exit(0); // the parent - } + if(i != 0) exit(0); // the parent + gettid_uncached(); } // generate our pid file int pidfd = -1; - if(pidfile[0]) { + if(pidfile && *pidfile) { pidfd = open(pidfile, O_WRONLY | O_CREAT | O_CLOEXEC, 0644); if(pidfd >= 0) { if(ftruncate(pidfd, 0) != 0) @@ -490,9 +478,6 @@ int become_daemon(int dont_fork, const char *user) // never become a problem sched_setscheduler_set(); - // Set claiming directory based on user config directory with correct ownership - snprintfz(claiming_directory, FILENAME_MAX, "%s/cloud.d", netdata_configured_varlib_dir); - if(user && *user) { if(become_user(user, pidfd) != 0) { netdata_log_error("Cannot become user '%s'. Continuing as we are.", user); diff --git a/src/daemon/daemon.h b/src/daemon/daemon.h index 1f8837fd6..13ef1f647 100644 --- a/src/daemon/daemon.h +++ b/src/daemon/daemon.h @@ -9,8 +9,7 @@ void netdata_cleanup_and_exit(int ret, const char *action, const char *action_re void get_netdata_execution_path(void); -extern char pidfile[]; -extern char netdata_exe_file[]; -extern char netdata_exe_path[]; +extern char *pidfile; +extern char *netdata_exe_path; #endif /* NETDATA_DAEMON_H */ diff --git a/src/daemon/environment.c b/src/daemon/environment.c new file mode 100644 index 000000000..2822278d3 --- /dev/null +++ b/src/daemon/environment.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common.h" + +static const char *verify_required_directory(const char *dir) +{ + if (chdir(dir) == -1) + fatal("Cannot change directory to '%s'", dir); + + DIR *d = opendir(dir); + if (!d) + fatal("Cannot examine the contents of directory '%s'", dir); + closedir(d); + + return dir; +} + +static const char *verify_or_create_required_directory(const char *dir) { + errno_clear(); + + if (mkdir(dir, 0755) != 0 && errno != EEXIST) + fatal("Cannot create required directory '%s'", dir); + + return verify_required_directory(dir); +} + +static const char *verify_or_create_required_private_directory(const char *dir) { + errno_clear(); + + if (mkdir(dir, 0770) != 0 && errno != EEXIST) + fatal("Cannot create required directory '%s'", dir); + + return verify_required_directory(dir); +} + +void set_environment_for_plugins_and_scripts(void) { + { + char b[16]; + snprintfz(b, sizeof(b) - 1, "%d", default_rrd_update_every); + nd_setenv("NETDATA_UPDATE_EVERY", b, 1); + } + + nd_setenv("NETDATA_VERSION", NETDATA_VERSION, 1); + nd_setenv("NETDATA_HOSTNAME", netdata_configured_hostname, 1); + nd_setenv("NETDATA_CONFIG_DIR", verify_required_directory(netdata_configured_user_config_dir), 1); + nd_setenv("NETDATA_USER_CONFIG_DIR", verify_required_directory(netdata_configured_user_config_dir), 1); + nd_setenv("NETDATA_STOCK_CONFIG_DIR", verify_required_directory(netdata_configured_stock_config_dir), 1); + nd_setenv("NETDATA_PLUGINS_DIR", verify_required_directory(netdata_configured_primary_plugins_dir), 1); + nd_setenv("NETDATA_WEB_DIR", verify_required_directory(netdata_configured_web_dir), 1); + nd_setenv("NETDATA_CACHE_DIR", verify_or_create_required_directory(netdata_configured_cache_dir), 1); + nd_setenv("NETDATA_LIB_DIR", verify_or_create_required_directory(netdata_configured_varlib_dir), 1); + nd_setenv("NETDATA_LOCK_DIR", verify_or_create_required_directory(netdata_configured_lock_dir), 1); + nd_setenv("NETDATA_LOG_DIR", verify_or_create_required_directory(netdata_configured_log_dir), 1); + nd_setenv("NETDATA_HOST_PREFIX", netdata_configured_host_prefix, 1); + + nd_setenv("CLAIMING_DIR", verify_or_create_required_private_directory(netdata_configured_cloud_dir), 1); + + { + BUFFER *user_plugins_dirs = buffer_create(FILENAME_MAX, NULL); + + for (size_t i = 1; i < PLUGINSD_MAX_DIRECTORIES && plugin_directories[i]; i++) { + if (i > 1) + buffer_strcat(user_plugins_dirs, " "); + buffer_strcat(user_plugins_dirs, plugin_directories[i]); + } + + nd_setenv("NETDATA_USER_PLUGINS_DIRS", buffer_tostring(user_plugins_dirs), 1); + + buffer_free(user_plugins_dirs); + } + + const char *default_port = appconfig_get(&netdata_config, CONFIG_SECTION_WEB, "default port", NULL); + int clean = 0; + if (!default_port) { + default_port = strdupz("19999"); + clean = 1; + } + + nd_setenv("NETDATA_LISTEN_PORT", default_port, 1); + if (clean) + freez((char *)default_port); + + // set the path we need + char path[4096], *p = getenv("PATH"); + if (!p) p = "/bin:/usr/bin"; + snprintfz(path, sizeof(path), "%s:%s", p, "/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"); + setenv("PATH", config_get(CONFIG_SECTION_ENV_VARS, "PATH", path), 1); + + // python options + p = getenv("PYTHONPATH"); + if (!p) p = ""; + setenv("PYTHONPATH", config_get(CONFIG_SECTION_ENV_VARS, "PYTHONPATH", p), 1); + + // disable buffering for python plugins + setenv("PYTHONUNBUFFERED", "1", 1); + + // switch to standard locale for plugins + setenv("LC_ALL", "C", 1); +} diff --git a/src/daemon/event_loop.c b/src/daemon/event_loop.c deleted file mode 100644 index d1908ec15..000000000 --- a/src/daemon/event_loop.c +++ /dev/null @@ -1,66 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include -#include "event_loop.h" - -// Register workers -void register_libuv_worker_jobs() { - static __thread bool registered = false; - - if(likely(registered)) - return; - - registered = true; - - worker_register("LIBUV"); - - // generic - worker_register_job_name(UV_EVENT_WORKER_INIT, "worker init"); - - // query related - worker_register_job_name(UV_EVENT_DBENGINE_QUERY, "query"); - worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_CACHE_LOOKUP, "extent cache"); - worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_MMAP, "extent mmap"); - worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_DECOMPRESSION, "extent decompression"); - worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_PAGE_LOOKUP, "page lookup"); - worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_PAGE_POPULATION, "page populate"); - worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_PAGE_ALLOCATION, "page allocate"); - - // flushing related - worker_register_job_name(UV_EVENT_DBENGINE_FLUSH_MAIN_CACHE, "flush main"); - worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_WRITE, "extent write"); - worker_register_job_name(UV_EVENT_DBENGINE_FLUSHED_TO_OPEN, "flushed to open"); - - // datafile full - worker_register_job_name(UV_EVENT_DBENGINE_JOURNAL_INDEX_WAIT, "jv2 index wait"); - worker_register_job_name(UV_EVENT_DBENGINE_JOURNAL_INDEX, "jv2 indexing"); - - // db rotation related - worker_register_job_name(UV_EVENT_DBENGINE_DATAFILE_DELETE_WAIT, "datafile delete wait"); - worker_register_job_name(UV_EVENT_DBENGINE_DATAFILE_DELETE, "datafile deletion"); - worker_register_job_name(UV_EVENT_DBENGINE_FIND_ROTATED_METRICS, "find rotated metrics"); - worker_register_job_name(UV_EVENT_DBENGINE_FIND_REMAINING_RETENTION, "find remaining retention"); - worker_register_job_name(UV_EVENT_DBENGINE_POPULATE_MRG, "update retention"); - - // other dbengine events - worker_register_job_name(UV_EVENT_DBENGINE_EVICT_MAIN_CACHE, "evict main"); - worker_register_job_name(UV_EVENT_DBENGINE_BUFFERS_CLEANUP, "dbengine buffers cleanup"); - worker_register_job_name(UV_EVENT_DBENGINE_QUIESCE, "dbengine quiesce"); - worker_register_job_name(UV_EVENT_DBENGINE_SHUTDOWN, "dbengine shutdown"); - - // metadata - worker_register_job_name(UV_EVENT_HOST_CONTEXT_LOAD, "metadata load host context"); - worker_register_job_name(UV_EVENT_METADATA_STORE, "metadata store host"); - worker_register_job_name(UV_EVENT_METADATA_CLEANUP, "metadata cleanup"); - worker_register_job_name(UV_EVENT_METADATA_ML_LOAD, "metadata load ml models"); - - // netdatacli - worker_register_job_name(UV_EVENT_SCHEDULE_CMD, "schedule command"); - - static int workers = 0; - int worker_id = __atomic_add_fetch(&workers, 1, __ATOMIC_RELAXED); - - char buf[NETDATA_THREAD_TAG_MAX + 1]; - snprintfz(buf, NETDATA_THREAD_TAG_MAX, "UV_WORKER[%d]", worker_id); - uv_thread_set_name_np(buf); -} diff --git a/src/daemon/event_loop.h b/src/daemon/event_loop.h deleted file mode 100644 index c1821c646..000000000 --- a/src/daemon/event_loop.h +++ /dev/null @@ -1,55 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_EVENT_LOOP_H -#define NETDATA_EVENT_LOOP_H - -enum event_loop_job { - UV_EVENT_JOB_NONE = 0, - - // generic - UV_EVENT_WORKER_INIT, - - // query related - UV_EVENT_DBENGINE_QUERY, - UV_EVENT_DBENGINE_EXTENT_CACHE_LOOKUP, - UV_EVENT_DBENGINE_EXTENT_MMAP, - UV_EVENT_DBENGINE_EXTENT_DECOMPRESSION, - UV_EVENT_DBENGINE_EXTENT_PAGE_LOOKUP, - UV_EVENT_DBENGINE_EXTENT_PAGE_POPULATION, - UV_EVENT_DBENGINE_EXTENT_PAGE_ALLOCATION, - - // flushing related - UV_EVENT_DBENGINE_FLUSH_MAIN_CACHE, - UV_EVENT_DBENGINE_EXTENT_WRITE, - UV_EVENT_DBENGINE_FLUSHED_TO_OPEN, - - // datafile full - UV_EVENT_DBENGINE_JOURNAL_INDEX_WAIT, - UV_EVENT_DBENGINE_JOURNAL_INDEX, - - // db rotation related - UV_EVENT_DBENGINE_DATAFILE_DELETE_WAIT, - UV_EVENT_DBENGINE_DATAFILE_DELETE, - UV_EVENT_DBENGINE_FIND_ROTATED_METRICS, // find the metrics that are rotated - UV_EVENT_DBENGINE_FIND_REMAINING_RETENTION, // find their remaining retention - UV_EVENT_DBENGINE_POPULATE_MRG, // update mrg - - // other dbengine events - UV_EVENT_DBENGINE_EVICT_MAIN_CACHE, - UV_EVENT_DBENGINE_BUFFERS_CLEANUP, - UV_EVENT_DBENGINE_QUIESCE, - UV_EVENT_DBENGINE_SHUTDOWN, - - // metadata - UV_EVENT_HOST_CONTEXT_LOAD, - UV_EVENT_METADATA_STORE, - UV_EVENT_METADATA_CLEANUP, - UV_EVENT_METADATA_ML_LOAD, - - // netdatacli - UV_EVENT_SCHEDULE_CMD, -}; - -void register_libuv_worker_jobs(); - -#endif //NETDATA_EVENT_LOOP_H diff --git a/src/daemon/global_statistics.c b/src/daemon/global_statistics.c index 17fd53761..236298a59 100644 --- a/src/daemon/global_statistics.c +++ b/src/daemon/global_statistics.c @@ -3502,8 +3502,7 @@ static struct worker_utilization all_workers_utilization[] = { { .name = "DBENGINE", .family = "workers dbengine instances", .priority = 1000000 }, { .name = "LIBUV", .family = "workers libuv threadpool", .priority = 1000000 }, { .name = "WEB", .family = "workers web server", .priority = 1000000 }, - { .name = "ACLKQUERY", .family = "workers aclk query", .priority = 1000000 }, - { .name = "ACLKSYNC", .family = "workers aclk host sync", .priority = 1000000 }, + { .name = "ACLKSYNC", .family = "workers aclk sync", .priority = 1000000 }, { .name = "METASYNC", .family = "workers metadata sync", .priority = 1000000 }, { .name = "PLUGINSD", .family = "workers plugins.d", .priority = 1000000 }, { .name = "STATSD", .family = "workers plugin statsd", .priority = 1000000 }, @@ -4222,13 +4221,15 @@ void *global_statistics_main(void *ptr) global_statistics_register_workers(); int update_every = - (int)config_get_number(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", localhost->rrd_update_every); - if (update_every < localhost->rrd_update_every) + (int)config_get_duration_seconds(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", localhost->rrd_update_every); + if (update_every < localhost->rrd_update_every) { update_every = localhost->rrd_update_every; + config_set_duration_seconds(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", update_every); + } usec_t step = update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); usec_t real_step = USEC_PER_SEC; // keep the randomness at zero @@ -4237,7 +4238,7 @@ void *global_statistics_main(void *ptr) while (service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (real_step < step) { real_step += USEC_PER_SEC; continue; @@ -4278,18 +4279,20 @@ void *global_statistics_extended_main(void *ptr) global_statistics_register_workers(); int update_every = - (int)config_get_number(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", localhost->rrd_update_every); - if (update_every < localhost->rrd_update_every) + (int)config_get_duration_seconds(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", localhost->rrd_update_every); + if (update_every < localhost->rrd_update_every) { update_every = localhost->rrd_update_every; + config_set_duration_seconds(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", update_every); + } usec_t step = update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); usec_t real_step = USEC_PER_SEC; while (service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (real_step < step) { real_step += USEC_PER_SEC; continue; diff --git a/src/daemon/h2o-common.c b/src/daemon/h2o-common.c new file mode 100644 index 000000000..aa7a3c581 --- /dev/null +++ b/src/daemon/h2o-common.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common.h" + +const char *netdata_configured_hostname = NULL; +const char *netdata_configured_user_config_dir = CONFIG_DIR; +const char *netdata_configured_stock_config_dir = LIBCONFIG_DIR; +const char *netdata_configured_log_dir = LOG_DIR; +const char *netdata_configured_primary_plugins_dir = PLUGINS_DIR; +const char *netdata_configured_web_dir = WEB_DIR; +const char *netdata_configured_cache_dir = CACHE_DIR; +const char *netdata_configured_varlib_dir = VARLIB_DIR; +const char *netdata_configured_lock_dir = VARLIB_DIR "/lock"; +const char *netdata_configured_cloud_dir = VARLIB_DIR "/cloud.d"; +const char *netdata_configured_home_dir = VARLIB_DIR; +const char *netdata_configured_host_prefix = NULL; +const char *netdata_configured_timezone = NULL; +const char *netdata_configured_abbrev_timezone = NULL; +int32_t netdata_configured_utc_offset = 0; + +bool netdata_ready = false; + +long get_netdata_cpus(void) { + static long processors = 0; + + if(processors) + return processors; + + long cores_proc_stat = os_get_system_cpus_cached(false, true); + long cores_cpuset_v1 = (long)os_read_cpuset_cpus("/sys/fs/cgroup/cpuset/cpuset.cpus", cores_proc_stat); + long cores_cpuset_v2 = (long)os_read_cpuset_cpus("/sys/fs/cgroup/cpuset.cpus", cores_proc_stat); + + if(cores_cpuset_v2) + processors = cores_cpuset_v2; + else if(cores_cpuset_v1) + processors = cores_cpuset_v1; + else + processors = cores_proc_stat; + + long cores_user_configured = config_get_number(CONFIG_SECTION_GLOBAL, "cpu cores", processors); + + errno_clear(); + internal_error(true, + "System CPUs: %ld, (" + "system: %ld, cgroups cpuset v1: %ld, cgroups cpuset v2: %ld, netdata.conf: %ld" + ")" + , processors + , cores_proc_stat + , cores_cpuset_v1 + , cores_cpuset_v2 + , cores_user_configured + ); + + processors = cores_user_configured; + + if(processors < 1) + processors = 1; + + return processors; +} diff --git a/src/daemon/libuv_workers.c b/src/daemon/libuv_workers.c new file mode 100644 index 000000000..441002d06 --- /dev/null +++ b/src/daemon/libuv_workers.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include +#include "libuv_workers.h" + +// Register workers +void register_libuv_worker_jobs() { + static __thread bool registered = false; + + if(likely(registered)) + return; + + registered = true; + + worker_register("LIBUV"); + + // generic + worker_register_job_name(UV_EVENT_WORKER_INIT, "worker init"); + + // query related + worker_register_job_name(UV_EVENT_DBENGINE_QUERY, "query"); + worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_CACHE_LOOKUP, "extent cache"); + worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_MMAP, "extent mmap"); + worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_DECOMPRESSION, "extent decompression"); + worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_PAGE_LOOKUP, "page lookup"); + worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_PAGE_POPULATION, "page populate"); + worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_PAGE_ALLOCATION, "page allocate"); + + // flushing related + worker_register_job_name(UV_EVENT_DBENGINE_FLUSH_MAIN_CACHE, "flush main"); + worker_register_job_name(UV_EVENT_DBENGINE_EXTENT_WRITE, "extent write"); + worker_register_job_name(UV_EVENT_DBENGINE_FLUSHED_TO_OPEN, "flushed to open"); + + // datafile full + worker_register_job_name(UV_EVENT_DBENGINE_JOURNAL_INDEX_WAIT, "jv2 index wait"); + worker_register_job_name(UV_EVENT_DBENGINE_JOURNAL_INDEX, "jv2 indexing"); + + // db rotation related + worker_register_job_name(UV_EVENT_DBENGINE_DATAFILE_DELETE_WAIT, "datafile delete wait"); + worker_register_job_name(UV_EVENT_DBENGINE_DATAFILE_DELETE, "datafile deletion"); + worker_register_job_name(UV_EVENT_DBENGINE_FIND_ROTATED_METRICS, "find rotated metrics"); + worker_register_job_name(UV_EVENT_DBENGINE_FIND_REMAINING_RETENTION, "find remaining retention"); + worker_register_job_name(UV_EVENT_DBENGINE_POPULATE_MRG, "update retention"); + + // other dbengine events + worker_register_job_name(UV_EVENT_DBENGINE_EVICT_MAIN_CACHE, "evict main"); + worker_register_job_name(UV_EVENT_DBENGINE_BUFFERS_CLEANUP, "dbengine buffers cleanup"); + worker_register_job_name(UV_EVENT_DBENGINE_QUIESCE, "dbengine quiesce"); + worker_register_job_name(UV_EVENT_DBENGINE_SHUTDOWN, "dbengine shutdown"); + + // metadata + worker_register_job_name(UV_EVENT_HOST_CONTEXT_LOAD, "metadata load host context"); + worker_register_job_name(UV_EVENT_METADATA_STORE, "metadata store host"); + worker_register_job_name(UV_EVENT_METADATA_CLEANUP, "metadata cleanup"); + worker_register_job_name(UV_EVENT_METADATA_ML_LOAD, "metadata load ml models"); + + // netdatacli + worker_register_job_name(UV_EVENT_SCHEDULE_CMD, "schedule command"); + + static int workers = 0; + int worker_id = __atomic_add_fetch(&workers, 1, __ATOMIC_RELAXED); + + char buf[NETDATA_THREAD_TAG_MAX + 1]; + snprintfz(buf, NETDATA_THREAD_TAG_MAX, "UV_WORKER[%d]", worker_id); + uv_thread_set_name_np(buf); +} diff --git a/src/daemon/libuv_workers.h b/src/daemon/libuv_workers.h new file mode 100644 index 000000000..c1821c646 --- /dev/null +++ b/src/daemon/libuv_workers.h @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EVENT_LOOP_H +#define NETDATA_EVENT_LOOP_H + +enum event_loop_job { + UV_EVENT_JOB_NONE = 0, + + // generic + UV_EVENT_WORKER_INIT, + + // query related + UV_EVENT_DBENGINE_QUERY, + UV_EVENT_DBENGINE_EXTENT_CACHE_LOOKUP, + UV_EVENT_DBENGINE_EXTENT_MMAP, + UV_EVENT_DBENGINE_EXTENT_DECOMPRESSION, + UV_EVENT_DBENGINE_EXTENT_PAGE_LOOKUP, + UV_EVENT_DBENGINE_EXTENT_PAGE_POPULATION, + UV_EVENT_DBENGINE_EXTENT_PAGE_ALLOCATION, + + // flushing related + UV_EVENT_DBENGINE_FLUSH_MAIN_CACHE, + UV_EVENT_DBENGINE_EXTENT_WRITE, + UV_EVENT_DBENGINE_FLUSHED_TO_OPEN, + + // datafile full + UV_EVENT_DBENGINE_JOURNAL_INDEX_WAIT, + UV_EVENT_DBENGINE_JOURNAL_INDEX, + + // db rotation related + UV_EVENT_DBENGINE_DATAFILE_DELETE_WAIT, + UV_EVENT_DBENGINE_DATAFILE_DELETE, + UV_EVENT_DBENGINE_FIND_ROTATED_METRICS, // find the metrics that are rotated + UV_EVENT_DBENGINE_FIND_REMAINING_RETENTION, // find their remaining retention + UV_EVENT_DBENGINE_POPULATE_MRG, // update mrg + + // other dbengine events + UV_EVENT_DBENGINE_EVICT_MAIN_CACHE, + UV_EVENT_DBENGINE_BUFFERS_CLEANUP, + UV_EVENT_DBENGINE_QUIESCE, + UV_EVENT_DBENGINE_SHUTDOWN, + + // metadata + UV_EVENT_HOST_CONTEXT_LOAD, + UV_EVENT_METADATA_STORE, + UV_EVENT_METADATA_CLEANUP, + UV_EVENT_METADATA_ML_LOAD, + + // netdatacli + UV_EVENT_SCHEDULE_CMD, +}; + +void register_libuv_worker_jobs(); + +#endif //NETDATA_EVENT_LOOP_H diff --git a/src/daemon/main.c b/src/daemon/main.c index 17fef8449..03ae7e003 100644 --- a/src/daemon/main.c +++ b/src/daemon/main.c @@ -6,6 +6,7 @@ #include "static_threads.h" #include "database/engine/page_test.h" +#include #ifdef OS_WINDOWS #include "win_system-info.h" @@ -27,18 +28,7 @@ bool ieee754_doubles = false; time_t netdata_start_time = 0; struct netdata_static_thread *static_threads; -struct config netdata_config = { - .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { - .avl_tree = { - .root = NULL, - .compar = appconfig_section_compare - }, - .rwlock = AVL_LOCK_INITIALIZER - } -}; +struct config netdata_config = APPCONFIG_INITIALIZER; typedef struct service_thread { pid_t tid; @@ -326,6 +316,7 @@ void web_client_cache_destroy(void); void netdata_cleanup_and_exit(int ret, const char *action, const char *action_result, const char *action_data) { netdata_exit = 1; + usec_t shutdown_start_time = now_monotonic_usec(); watcher_shutdown_begin(); nd_log_limits_unlimited(); @@ -361,7 +352,7 @@ void netdata_cleanup_and_exit(int ret, const char *action, const char *action_re watcher_step_complete(WATCHER_STEP_ID_CLOSE_WEBRTC_CONNECTIONS); service_signal_exit(SERVICE_MAINTENANCE | ABILITY_DATA_QUERIES | ABILITY_WEB_REQUESTS | - ABILITY_STREAMING_CONNECTIONS | SERVICE_ACLK | SERVICE_ACLKSYNC); + ABILITY_STREAMING_CONNECTIONS | SERVICE_ACLK); watcher_step_complete(WATCHER_STEP_ID_DISABLE_MAINTENANCE_NEW_QUERIES_NEW_WEB_REQUESTS_NEW_STREAMING_CONNECTIONS_AND_ACLK); service_wait_exit(SERVICE_MAINTENANCE, 3 * USEC_PER_SEC); @@ -474,21 +465,22 @@ void netdata_cleanup_and_exit(int ret, const char *action, const char *action_re #endif } + // Don't register a shutdown event if we crashed + if (!ret) + add_agent_event(EVENT_AGENT_SHUTDOWN_TIME, (int64_t)(now_monotonic_usec() - shutdown_start_time)); sqlite_close_databases(); watcher_step_complete(WATCHER_STEP_ID_CLOSE_SQL_DATABASES); sqlite_library_shutdown(); // unlink the pid - if(pidfile[0]) { + if(pidfile && *pidfile) { if(unlink(pidfile) != 0) netdata_log_error("EXIT: cannot unlink pidfile '%s'.", pidfile); } watcher_step_complete(WATCHER_STEP_ID_REMOVE_PID_FILE); -#ifdef ENABLE_HTTPS netdata_ssl_cleanup(); -#endif watcher_step_complete(WATCHER_STEP_ID_FREE_OPENSSL_STRUCTURES); (void) unlink(agent_incomplete_shutdown_file); @@ -496,6 +488,7 @@ void netdata_cleanup_and_exit(int ret, const char *action, const char *action_re watcher_shutdown_end(); watcher_thread_stop(); + curl_global_cleanup(); #ifdef OS_WINDOWS return; @@ -527,12 +520,12 @@ void web_server_threading_selection(void) { int make_dns_decision(const char *section_name, const char *config_name, const char *default_value, SIMPLE_PATTERN *p) { - char *value = config_get(section_name,config_name,default_value); + const char *value = config_get(section_name,config_name,default_value); if(!strcmp("yes",value)) return 1; if(!strcmp("no",value)) return 0; - if(strcmp("heuristic",value)) + if(strcmp("heuristic",value) != 0) netdata_log_error("Invalid configuration option '%s' for '%s'/'%s'. Valid options are 'yes', 'no' and 'heuristic'. Proceeding with 'heuristic'", value, section_name, config_name); @@ -542,11 +535,13 @@ int make_dns_decision(const char *section_name, const char *config_name, const c void web_server_config_options(void) { web_client_timeout = - (int)config_get_number(CONFIG_SECTION_WEB, "disconnect idle clients after seconds", web_client_timeout); + (int)config_get_duration_seconds(CONFIG_SECTION_WEB, "disconnect idle clients after", web_client_timeout); + web_client_first_request_timeout = - (int)config_get_number(CONFIG_SECTION_WEB, "timeout for first request", web_client_first_request_timeout); + (int)config_get_duration_seconds(CONFIG_SECTION_WEB, "timeout for first request", web_client_first_request_timeout); + web_client_streaming_rate_t = - config_get_number(CONFIG_SECTION_WEB, "accept a streaming request every seconds", web_client_streaming_rate_t); + config_get_duration_seconds(CONFIG_SECTION_WEB, "accept a streaming request every", web_client_streaming_rate_t); respect_web_browser_do_not_track_policy = config_get_boolean(CONFIG_SECTION_WEB, "respect do not track policy", respect_web_browser_do_not_track_policy); @@ -595,7 +590,7 @@ void web_server_config_options(void) web_enable_gzip = config_get_boolean(CONFIG_SECTION_WEB, "enable gzip compression", web_enable_gzip); - char *s = config_get(CONFIG_SECTION_WEB, "gzip compression strategy", "default"); + const char *s = config_get(CONFIG_SECTION_WEB, "gzip compression strategy", "default"); if(!strcmp(s, "default")) web_gzip_strategy = Z_DEFAULT_STRATEGY; else if(!strcmp(s, "filtered")) @@ -807,8 +802,6 @@ int help(int exitcode) { " are enabled or not, in JSON format.\n\n" " -W simple-pattern pattern string\n" " Check if string matches pattern and exit.\n\n" - " -W \"claim -token=TOKEN -rooms=ROOM1,ROOM2\"\n" - " Claim the agent to the workspace rooms pointed to by TOKEN and ROOM*.\n\n" #ifdef OS_WINDOWS " -W perflibdump [key]\n" " Dump the Windows Performance Counters Registry in JSON.\n\n" @@ -825,7 +818,6 @@ int help(int exitcode) { return exitcode; } -#ifdef ENABLE_HTTPS static void security_init(){ char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s/ssl/key.pem",netdata_configured_user_config_dir); @@ -839,14 +831,13 @@ static void security_init(){ netdata_ssl_initialize_openssl(); } -#endif static void log_init(void) { nd_log_set_facility(config_get(CONFIG_SECTION_LOGS, "facility", "daemon")); time_t period = ND_LOG_DEFAULT_THROTTLE_PERIOD; size_t logs = ND_LOG_DEFAULT_THROTTLE_LOGS; - period = config_get_number(CONFIG_SECTION_LOGS, "logs flood protection period", period); + period = config_get_duration_seconds(CONFIG_SECTION_LOGS, "logs flood protection period", period); logs = (unsigned long)config_get_number(CONFIG_SECTION_LOGS, "logs to trigger flood protection", (long long int)logs); nd_log_set_flood_protection(logs, period); @@ -856,50 +847,75 @@ static void log_init(void) { nd_log_set_priority_level(config_get(CONFIG_SECTION_LOGS, "level", netdata_log_level)); char filename[FILENAME_MAX + 1]; + char* os_default_method = NULL; +#if defined(OS_LINUX) + os_default_method = is_stderr_connected_to_journal() /* || nd_log_journal_socket_available() */ ? "journal" : NULL; +#elif defined(OS_WINDOWS) +#if defined(HAVE_ETW) + os_default_method = "etw"; +#elif defined(HAVE_WEL) + os_default_method = "wel"; +#endif +#endif + +#if defined(OS_WINDOWS) + // on windows, debug log goes to windows events + snprintfz(filename, FILENAME_MAX, "%s", os_default_method); +#else snprintfz(filename, FILENAME_MAX, "%s/debug.log", netdata_configured_log_dir); +#endif + nd_log_set_user_settings(NDLS_DEBUG, config_get(CONFIG_SECTION_LOGS, "debug", filename)); - bool with_journal = is_stderr_connected_to_journal() /* || nd_log_journal_socket_available() */; - if(with_journal) - snprintfz(filename, FILENAME_MAX, "journal"); + if(os_default_method) + snprintfz(filename, FILENAME_MAX, "%s", os_default_method); else snprintfz(filename, FILENAME_MAX, "%s/daemon.log", netdata_configured_log_dir); nd_log_set_user_settings(NDLS_DAEMON, config_get(CONFIG_SECTION_LOGS, "daemon", filename)); - if(with_journal) - snprintfz(filename, FILENAME_MAX, "journal"); + if(os_default_method) + snprintfz(filename, FILENAME_MAX, "%s", os_default_method); else snprintfz(filename, FILENAME_MAX, "%s/collector.log", netdata_configured_log_dir); nd_log_set_user_settings(NDLS_COLLECTORS, config_get(CONFIG_SECTION_LOGS, "collector", filename)); +#if defined(OS_WINDOWS) + // on windows, access log goes to windows events + snprintfz(filename, FILENAME_MAX, "%s", os_default_method); +#else snprintfz(filename, FILENAME_MAX, "%s/access.log", netdata_configured_log_dir); +#endif nd_log_set_user_settings(NDLS_ACCESS, config_get(CONFIG_SECTION_LOGS, "access", filename)); - if(with_journal) - snprintfz(filename, FILENAME_MAX, "journal"); + if(os_default_method) + snprintfz(filename, FILENAME_MAX, "%s", os_default_method); else snprintfz(filename, FILENAME_MAX, "%s/health.log", netdata_configured_log_dir); nd_log_set_user_settings(NDLS_HEALTH, config_get(CONFIG_SECTION_LOGS, "health", filename)); -#ifdef ENABLE_ACLK aclklog_enabled = config_get_boolean(CONFIG_SECTION_CLOUD, "conversation log", CONFIG_BOOLEAN_NO); if (aclklog_enabled) { +#if defined(OS_WINDOWS) + // on windows, aclk log goes to windows events + snprintfz(filename, FILENAME_MAX, "%s", os_default_method); +#else snprintfz(filename, FILENAME_MAX, "%s/aclk.log", netdata_configured_log_dir); +#endif nd_log_set_user_settings(NDLS_ACLK, config_get(CONFIG_SECTION_CLOUD, "conversation log file", filename)); } -#endif + + aclk_config_get_query_scope(); } -char *initialize_lock_directory_path(char *prefix) -{ +static const char *get_varlib_subdir_from_config(const char *prefix, const char *dir) { char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/lock", prefix); - - return config_get(CONFIG_SECTION_DIRECTORIES, "lock", filename); + snprintfz(filename, FILENAME_MAX, "%s/%s", prefix, dir); + return config_get(CONFIG_SECTION_DIRECTORIES, dir, filename); } static void backwards_compatible_config() { // move [global] options to the [web] section + config_move(CONFIG_SECTION_GLOBAL, "http port listen backlog", CONFIG_SECTION_WEB, "listen backlog"); @@ -1003,7 +1019,10 @@ static void backwards_compatible_config() { CONFIG_SECTION_PLUGINS, "statsd"); config_move(CONFIG_SECTION_GLOBAL, "memory mode", - CONFIG_SECTION_DB, "mode"); + CONFIG_SECTION_DB, "db"); + + config_move(CONFIG_SECTION_DB, "mode", + CONFIG_SECTION_DB, "db"); config_move(CONFIG_SECTION_GLOBAL, "history", CONFIG_SECTION_DB, "retention"); @@ -1012,7 +1031,13 @@ static void backwards_compatible_config() { CONFIG_SECTION_DB, "update every"); config_move(CONFIG_SECTION_GLOBAL, "page cache size", - CONFIG_SECTION_DB, "dbengine page cache size MB"); + CONFIG_SECTION_DB, "dbengine page cache size"); + + config_move(CONFIG_SECTION_DB, "dbengine page cache size MB", + CONFIG_SECTION_DB, "dbengine page cache size"); + + config_move(CONFIG_SECTION_DB, "dbengine extent cache size MB", + CONFIG_SECTION_DB, "dbengine extent cache size"); config_move(CONFIG_SECTION_DB, "page cache size", CONFIG_SECTION_DB, "dbengine page cache size MB"); @@ -1023,30 +1048,6 @@ static void backwards_compatible_config() { config_move(CONFIG_SECTION_DB, "page cache with malloc", CONFIG_SECTION_DB, "dbengine page cache with malloc"); - config_move(CONFIG_SECTION_GLOBAL, "dbengine disk space", - CONFIG_SECTION_DB, "dbengine disk space MB"); - - config_move(CONFIG_SECTION_GLOBAL, "dbengine multihost disk space", - CONFIG_SECTION_DB, "dbengine multihost disk space MB"); - - config_move(CONFIG_SECTION_DB, "dbengine disk space MB", - CONFIG_SECTION_DB, "dbengine multihost disk space MB"); - - config_move(CONFIG_SECTION_DB, "dbengine multihost disk space MB", - CONFIG_SECTION_DB, "dbengine tier 0 disk space MB"); - - config_move(CONFIG_SECTION_DB, "dbengine tier 1 multihost disk space MB", - CONFIG_SECTION_DB, "dbengine tier 1 disk space MB"); - - config_move(CONFIG_SECTION_DB, "dbengine tier 2 multihost disk space MB", - CONFIG_SECTION_DB, "dbengine tier 2 disk space MB"); - - config_move(CONFIG_SECTION_DB, "dbengine tier 3 multihost disk space MB", - CONFIG_SECTION_DB, "dbengine tier 3 disk space MB"); - - config_move(CONFIG_SECTION_DB, "dbengine tier 4 multihost disk space MB", - CONFIG_SECTION_DB, "dbengine tier 4 disk space MB"); - config_move(CONFIG_SECTION_GLOBAL, "memory deduplication (ksm)", CONFIG_SECTION_DB, "memory deduplication (ksm)"); @@ -1060,17 +1061,67 @@ static void backwards_compatible_config() { CONFIG_SECTION_DB, "dbengine pages per extent"); config_move(CONFIG_SECTION_GLOBAL, "cleanup obsolete charts after seconds", - CONFIG_SECTION_DB, "cleanup obsolete charts after secs"); + CONFIG_SECTION_DB, "cleanup obsolete charts after"); + + config_move(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", + CONFIG_SECTION_DB, "cleanup obsolete charts after"); config_move(CONFIG_SECTION_GLOBAL, "gap when lost iterations above", CONFIG_SECTION_DB, "gap when lost iterations above"); config_move(CONFIG_SECTION_GLOBAL, "cleanup orphan hosts after seconds", - CONFIG_SECTION_DB, "cleanup orphan hosts after secs"); + CONFIG_SECTION_DB, "cleanup orphan hosts after"); + + config_move(CONFIG_SECTION_DB, "cleanup orphan hosts after secs", + CONFIG_SECTION_DB, "cleanup orphan hosts after"); + + config_move(CONFIG_SECTION_DB, "cleanup ephemeral hosts after secs", + CONFIG_SECTION_DB, "cleanup ephemeral hosts after"); + + config_move(CONFIG_SECTION_DB, "seconds to replicate", + CONFIG_SECTION_DB, "replication period"); + + config_move(CONFIG_SECTION_DB, "seconds per replication step", + CONFIG_SECTION_DB, "replication step"); config_move(CONFIG_SECTION_GLOBAL, "enable zero metrics", CONFIG_SECTION_DB, "enable zero metrics"); + // ---------------------------------------------------------------------------------------------------------------- + + config_move(CONFIG_SECTION_GLOBAL, "dbengine disk space", + CONFIG_SECTION_DB, "dbengine tier 0 retention size"); + + config_move(CONFIG_SECTION_GLOBAL, "dbengine multihost disk space", + CONFIG_SECTION_DB, "dbengine tier 0 retention size"); + + config_move(CONFIG_SECTION_DB, "dbengine disk space MB", + CONFIG_SECTION_DB, "dbengine tier 0 retention size"); + + for(size_t tier = 0; tier < RRD_STORAGE_TIERS ;tier++) { + char old_config[128], new_config[128]; + + snprintfz(old_config, sizeof(old_config), "dbengine tier %zu retention days", tier); + snprintfz(new_config, sizeof(new_config), "dbengine tier %zu retention time", tier); + config_move(CONFIG_SECTION_DB, old_config, + CONFIG_SECTION_DB, new_config); + + if(tier == 0) + snprintfz(old_config, sizeof(old_config), "dbengine multihost disk space MB"); + else + snprintfz(old_config, sizeof(old_config), "dbengine tier %zu multihost disk space MB", tier); + snprintfz(new_config, sizeof(new_config), "dbengine tier %zu retention size", tier); + config_move(CONFIG_SECTION_DB, old_config, + CONFIG_SECTION_DB, new_config); + + snprintfz(old_config, sizeof(old_config), "dbengine tier %zu disk space MB", tier); + snprintfz(new_config, sizeof(new_config), "dbengine tier %zu retention size", tier); + config_move(CONFIG_SECTION_DB, old_config, + CONFIG_SECTION_DB, new_config); + } + + // ---------------------------------------------------------------------------------------------------------------- + config_move(CONFIG_SECTION_LOGS, "error", CONFIG_SECTION_LOGS, "daemon"); @@ -1082,11 +1133,42 @@ static void backwards_compatible_config() { config_move(CONFIG_SECTION_LOGS, "errors flood protection period", CONFIG_SECTION_LOGS, "logs flood protection period"); + config_move(CONFIG_SECTION_HEALTH, "is ephemeral", CONFIG_SECTION_GLOBAL, "is ephemeral node"); config_move(CONFIG_SECTION_HEALTH, "has unstable connection", CONFIG_SECTION_GLOBAL, "has unstable connection"); + + config_move(CONFIG_SECTION_HEALTH, "run at least every seconds", + CONFIG_SECTION_HEALTH, "run at least every"); + + config_move(CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for seconds", + CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for"); + + config_move(CONFIG_SECTION_HEALTH, "health log history", + CONFIG_SECTION_HEALTH, "health log retention"); + + config_move(CONFIG_SECTION_REGISTRY, "registry expire idle persons days", + CONFIG_SECTION_REGISTRY, "registry expire idle persons"); + + config_move(CONFIG_SECTION_WEB, "disconnect idle clients after seconds", + CONFIG_SECTION_WEB, "disconnect idle clients after"); + + config_move(CONFIG_SECTION_WEB, "accept a streaming request every seconds", + CONFIG_SECTION_WEB, "accept a streaming request every"); + + config_move(CONFIG_SECTION_STATSD, "set charts as obsolete after secs", + CONFIG_SECTION_STATSD, "set charts as obsolete after"); + + config_move(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds", + CONFIG_SECTION_STATSD, "disconnect idle tcp clients after"); + + config_move("plugin:idlejitter", "loop time in ms", + "plugin:idlejitter", "loop time"); + + config_move("plugin:proc:/sys/class/infiniband", "refresh ports state every seconds", + "plugin:proc:/sys/class/infiniband", "refresh ports state every"); } static int get_hostname(char *buf, size_t buf_size) { @@ -1119,7 +1201,7 @@ static void get_netdata_configured_variables() // get the hostname netdata_configured_host_prefix = config_get(CONFIG_SECTION_GLOBAL, "host access prefix", ""); - verify_netdata_host_prefix(true); + (void) verify_netdata_host_prefix(true); char buf[HOSTNAME_MAX + 1]; if (get_hostname(buf, HOSTNAME_MAX)) @@ -1131,22 +1213,22 @@ static void get_netdata_configured_variables() // ------------------------------------------------------------------------ // get default database update frequency - default_rrd_update_every = (int) config_get_number(CONFIG_SECTION_DB, "update every", UPDATE_EVERY); + default_rrd_update_every = (int) config_get_duration_seconds(CONFIG_SECTION_DB, "update every", UPDATE_EVERY); if(default_rrd_update_every < 1 || default_rrd_update_every > 600) { netdata_log_error("Invalid data collection frequency (update every) %d given. Defaulting to %d.", default_rrd_update_every, UPDATE_EVERY); default_rrd_update_every = UPDATE_EVERY; - config_set_number(CONFIG_SECTION_DB, "update every", default_rrd_update_every); + config_set_duration_seconds(CONFIG_SECTION_DB, "update every", default_rrd_update_every); } // ------------------------------------------------------------------------ - // get default memory mode for the database + // get the database selection { - const char *mode = config_get(CONFIG_SECTION_DB, "mode", rrd_memory_mode_name(default_rrd_memory_mode)); + const char *mode = config_get(CONFIG_SECTION_DB, "db", rrd_memory_mode_name(default_rrd_memory_mode)); default_rrd_memory_mode = rrd_memory_mode_id(mode); if(strcmp(mode, rrd_memory_mode_name(default_rrd_memory_mode)) != 0) { netdata_log_error("Invalid memory mode '%s' given. Using '%s'", mode, rrd_memory_mode_name(default_rrd_memory_mode)); - config_set(CONFIG_SECTION_DB, "mode", rrd_memory_mode_name(default_rrd_memory_mode)); + config_set(CONFIG_SECTION_DB, "db", rrd_memory_mode_name(default_rrd_memory_mode)); } } @@ -1175,7 +1257,8 @@ static void get_netdata_configured_variables() netdata_configured_cache_dir = config_get(CONFIG_SECTION_DIRECTORIES, "cache", netdata_configured_cache_dir); netdata_configured_varlib_dir = config_get(CONFIG_SECTION_DIRECTORIES, "lib", netdata_configured_varlib_dir); - netdata_configured_lock_dir = initialize_lock_directory_path(netdata_configured_varlib_dir); + netdata_configured_lock_dir = get_varlib_subdir_from_config(netdata_configured_varlib_dir, "lock"); + netdata_configured_cloud_dir = get_varlib_subdir_from_config(netdata_configured_varlib_dir, "cloud.d"); { pluginsd_initialize_plugin_directories(); @@ -1199,17 +1282,19 @@ static void get_netdata_configured_variables() // ------------------------------------------------------------------------ // get default Database Engine page cache size in MiB - default_rrdeng_page_cache_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine page cache size MB", default_rrdeng_page_cache_mb); - default_rrdeng_extent_cache_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine extent cache size MB", default_rrdeng_extent_cache_mb); + default_rrdeng_page_cache_mb = (int) config_get_size_mb(CONFIG_SECTION_DB, "dbengine page cache size", default_rrdeng_page_cache_mb); + default_rrdeng_extent_cache_mb = (int) config_get_size_mb(CONFIG_SECTION_DB, "dbengine extent cache size", default_rrdeng_extent_cache_mb); db_engine_journal_check = config_get_boolean(CONFIG_SECTION_DB, "dbengine enable journal integrity check", CONFIG_BOOLEAN_NO); - if(default_rrdeng_extent_cache_mb < 0) + if(default_rrdeng_extent_cache_mb < 0) { default_rrdeng_extent_cache_mb = 0; + config_set_size_mb(CONFIG_SECTION_DB, "dbengine extent cache size", default_rrdeng_extent_cache_mb); + } if(default_rrdeng_page_cache_mb < RRDENG_MIN_PAGE_CACHE_SIZE_MB) { netdata_log_error("Invalid page cache size %d given. Defaulting to %d.", default_rrdeng_page_cache_mb, RRDENG_MIN_PAGE_CACHE_SIZE_MB); default_rrdeng_page_cache_mb = RRDENG_MIN_PAGE_CACHE_SIZE_MB; - config_set_number(CONFIG_SECTION_DB, "dbengine page cache size MB", default_rrdeng_page_cache_mb); + config_set_size_mb(CONFIG_SECTION_DB, "dbengine page cache size", default_rrdeng_page_cache_mb); } // ------------------------------------------------------------------------ @@ -1242,28 +1327,24 @@ static void get_netdata_configured_variables() // get KSM settings #ifdef MADV_MERGEABLE - enable_ksm = config_get_boolean(CONFIG_SECTION_DB, "memory deduplication (ksm)", enable_ksm); + enable_ksm = config_get_boolean_ondemand(CONFIG_SECTION_DB, "memory deduplication (ksm)", enable_ksm); #endif // -------------------------------------------------------------------- - // metric correlations - enable_metric_correlations = config_get_boolean(CONFIG_SECTION_GLOBAL, "enable metric correlations", enable_metric_correlations); - default_metric_correlations_method = weights_string_to_method(config_get( - CONFIG_SECTION_GLOBAL, "metric correlations method", - weights_method_to_string(default_metric_correlations_method))); + rrdhost_free_ephemeral_time_s = + config_get_duration_seconds(CONFIG_SECTION_DB, "cleanup ephemeral hosts after", rrdhost_free_ephemeral_time_s); - // -------------------------------------------------------------------- + rrdset_free_obsolete_time_s = + config_get_duration_seconds(CONFIG_SECTION_DB, "cleanup obsolete charts after", rrdset_free_obsolete_time_s); - rrdset_free_obsolete_time_s = config_get_number(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", rrdset_free_obsolete_time_s); - rrdhost_free_ephemeral_time_s = config_get_number(CONFIG_SECTION_DB, "cleanup ephemeral hosts after secs", rrdhost_free_ephemeral_time_s); // Current chart locking and invalidation scheme doesn't prevent Netdata from segmentation faults if a short // cleanup delay is set. Extensive stress tests showed that 10 seconds is quite a safe delay. Look at // https://github.com/netdata/netdata/pull/11222#issuecomment-868367920 for more information. if (rrdset_free_obsolete_time_s < 10) { rrdset_free_obsolete_time_s = 10; - netdata_log_info("The \"cleanup obsolete charts after seconds\" option was set to 10 seconds."); - config_set_number(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", rrdset_free_obsolete_time_s); + netdata_log_info("The \"cleanup obsolete charts after\" option was set to 10 seconds."); + config_set_duration_seconds(CONFIG_SECTION_DB, "cleanup obsolete charts after", rrdset_free_obsolete_time_s); } gap_when_lost_iterations_above = (int)config_get_number(CONFIG_SECTION_DB, "gap when lost iterations above", gap_when_lost_iterations_above); @@ -1276,14 +1357,13 @@ static void get_netdata_configured_variables() // -------------------------------------------------------------------- // get various system parameters - os_get_system_HZ(); os_get_system_cpus_uncached(); os_get_system_pid_max(); } -static void post_conf_load(char **user) +static void post_conf_load(const char **user) { // -------------------------------------------------------------------- // get the user we should run @@ -1298,7 +1378,7 @@ static void post_conf_load(char **user) } } -static bool load_netdata_conf(char *filename, char overwrite_used, char **user) { +static bool load_netdata_conf(char *filename, char overwrite_used, const char **user) { errno_clear(); int ret = 0; @@ -1309,14 +1389,14 @@ static bool load_netdata_conf(char *filename, char overwrite_used, char **user) netdata_log_error("CONFIG: cannot load config file '%s'.", filename); } else { - filename = strdupz_path_subpath(netdata_configured_user_config_dir, "netdata.conf"); + filename = filename_from_path_entry_strdupz(netdata_configured_user_config_dir, "netdata.conf"); ret = config_load(filename, overwrite_used, NULL); if(!ret) { netdata_log_info("CONFIG: cannot load user config '%s'. Will try the stock version.", filename); freez(filename); - filename = strdupz_path_subpath(netdata_configured_stock_config_dir, "netdata.conf"); + filename = filename_from_path_entry_strdupz(netdata_configured_stock_config_dir, "netdata.conf"); ret = config_load(filename, overwrite_used, NULL); if(!ret) netdata_log_info("CONFIG: cannot load stock config '%s'. Running with internal defaults.", filename); @@ -1351,7 +1431,7 @@ int get_system_info(struct rrdhost_system_info *system_info) { char line[200 + 1]; // Removed the double strlens, if the Coverity tainted string warning reappears I'll revert. // One time init code, but I'm curious about the warning... - while (fgets(line, 200, instance->child_stdout_fp) != NULL) { + while (fgets(line, 200, spawn_popen_stdout(instance)) != NULL) { char *value=line; while (*value && *value != '=') value++; if (*value=='=') { @@ -1366,7 +1446,7 @@ int get_system_info(struct rrdhost_system_info *system_info) { if(unlikely(rrdhost_set_system_info_variable(system_info, line, value))) { netdata_log_error("Unexpected environment variable %s=%s", line, value); } else { - setenv(line, value, 1); + nd_setenv(line, value, 1); } } } @@ -1405,12 +1485,13 @@ int unittest_rrdpush_compressions(void); int uuid_unittest(void); int progress_unittest(void); int dyncfg_unittest(void); +bool netdata_random_session_id_generate(void); #ifdef OS_WINDOWS int windows_perflib_dump(const char *key); #endif -int unittest_prepare_rrd(char **user) { +int unittest_prepare_rrd(const char **user) { post_conf_load(user); get_netdata_configured_variables(); default_rrd_update_every = 1; @@ -1422,13 +1503,12 @@ int unittest_prepare_rrd(char **user) { fprintf(stderr, "rrd_init failed for unittest\n"); return 1; } - default_rrdpush_enabled = 0; + stream_conf_send_enabled = 0; return 0; } int netdata_main(int argc, char **argv) { - clocks_init(); string_init(); analytics_init(); @@ -1441,7 +1521,7 @@ int netdata_main(int argc, char **argv) { int config_loaded = 0; bool close_open_fds = true; size_t default_stacksize; - char *user = NULL; + const char *user = NULL; #ifdef OS_WINDOWS int dont_fork = 1; @@ -1455,6 +1535,8 @@ int netdata_main(int argc, char **argv) { // set the name for logging program_name = "netdata"; + curl_global_init(CURL_GLOBAL_ALL); + // parse options { int num_opts = sizeof(option_definitions) / sizeof(struct option_def); @@ -1483,7 +1565,7 @@ int netdata_main(int argc, char **argv) { } else { netdata_log_debug(D_OPTIONS, "Configuration loaded from %s.", optarg); - load_cloud_conf(1); + cloud_conf_load(1); config_loaded = 1; } break; @@ -1499,8 +1581,7 @@ int netdata_main(int argc, char **argv) { config_set(CONFIG_SECTION_WEB, "bind to", optarg); break; case 'P': - strncpy(pidfile, optarg, FILENAME_MAX); - pidfile[FILENAME_MAX] = '\0'; + pidfile = strdupz(optarg); break; case 'p': config_set(CONFIG_SECTION_GLOBAL, "default port", optarg); @@ -1522,7 +1603,6 @@ int netdata_main(int argc, char **argv) { { char* stacksize_string = "stacksize="; char* debug_flags_string = "debug_flags="; - char* claim_string = "claim"; #ifdef ENABLE_DBENGINE char* createdataset_string = "createdataset="; char* stresstest_string = "stresstest="; @@ -1791,7 +1871,7 @@ int netdata_main(int argc, char **argv) { // so the caller can use -c netdata.conf before or // after this parameter to prevent or allow overwriting // variables at netdata.conf - config_set_default(section, key, value); + config_set_default_raw_value(section, key, value); // fprintf(stderr, "SET section '%s', key '%s', value '%s'\n", section, key, value); } @@ -1824,7 +1904,7 @@ int netdata_main(int argc, char **argv) { // so the caller can use -c netdata.conf before or // after this parameter to prevent or allow overwriting // variables at netdata.conf - appconfig_set_default(tmp_config, section, key, value); + appconfig_set_default_raw_value(tmp_config, section, key, value); // fprintf(stderr, "SET section '%s', key '%s', value '%s'\n", section, key, value); } @@ -1870,7 +1950,7 @@ int netdata_main(int argc, char **argv) { if(!config_loaded) { fprintf(stderr, "warning: no configuration file has been loaded. Use -c CONFIG_FILE, before -W get. Using default config.\n"); load_netdata_conf(NULL, 0, &user); - load_cloud_conf(1); + cloud_conf_load(1); } get_netdata_configured_variables(); @@ -1884,10 +1964,6 @@ int netdata_main(int argc, char **argv) { printf("%s\n", value); return 0; } - else if(strncmp(optarg, claim_string, strlen(claim_string)) == 0) { - /* will trigger a claiming attempt when the agent is initialized */ - claiming_pending_arguments = optarg + strlen(claim_string); - } else if(strcmp(optarg, "buildinfo") == 0) { print_build_info(); return 0; @@ -1919,18 +1995,18 @@ int netdata_main(int argc, char **argv) { if (close_open_fds == true) { // close all open file descriptors, except the standard ones // the caller may have left open files (lxc-attach has this issue) - os_close_all_non_std_open_fds_except(NULL, 0); + os_close_all_non_std_open_fds_except(NULL, 0, 0); } if(!config_loaded) { load_netdata_conf(NULL, 0, &user); - load_cloud_conf(0); + cloud_conf_load(0); } // ------------------------------------------------------------------------ // initialize netdata { - char *pmax = config_get(CONFIG_SECTION_GLOBAL, "glibc malloc arena max for plugins", "1"); + const char *pmax = config_get(CONFIG_SECTION_GLOBAL, "glibc malloc arena max for plugins", "1"); if(pmax && *pmax) setenv("MALLOC_ARENA_MAX", pmax, 1); @@ -1970,7 +2046,8 @@ int netdata_main(int argc, char **argv) { // prepare configuration environment variables for the plugins get_netdata_configured_variables(); - set_global_environment(); + set_environment_for_plugins_and_scripts(); + analytics_reset(); // work while we are cd into config_dir // to allow the plugins refer to their config @@ -1986,8 +2063,8 @@ int netdata_main(int argc, char **argv) { // -------------------------------------------------------------------- // get the debugging flags from the configuration file - char *flags = config_get(CONFIG_SECTION_LOGS, "debug flags", "0x0000000000000000"); - setenv("NETDATA_DEBUG_FLAGS", flags, 1); + const char *flags = config_get(CONFIG_SECTION_LOGS, "debug flags", "0x0000000000000000"); + nd_setenv("NETDATA_DEBUG_FLAGS", flags, 1); debug_flags = strtoull(flags, NULL, 0); netdata_log_debug(D_OPTIONS, "Debug flags set to '0x%" PRIX64 "'.", debug_flags); @@ -2013,16 +2090,10 @@ int netdata_main(int argc, char **argv) { nd_log_initialize(); netdata_log_info("Netdata agent version '%s' is starting", NETDATA_VERSION); - ieee754_doubles = is_system_ieee754_double(); - if(!ieee754_doubles) - globally_disabled_capabilities |= STREAM_CAP_IEEE754; - - aral_judy_init(); + check_local_streaming_capabilities(); get_system_timezone(); - bearer_tokens_init(); - replication_initialize(); rrd_functions_inflight_init(); @@ -2030,9 +2101,7 @@ int netdata_main(int argc, char **argv) { // -------------------------------------------------------------------- // get the certificate and start security -#ifdef ENABLE_HTTPS security_init(); -#endif // -------------------------------------------------------------------- // This is the safest place to start the SILENCERS structure @@ -2040,12 +2109,6 @@ int netdata_main(int argc, char **argv) { health_set_silencers_filename(); health_initialize_global_silencers(); -// // -------------------------------------------------------------------- -// // Initialize ML configuration -// -// delta_startup_time("initialize ML"); -// ml_init(); - // -------------------------------------------------------------------- // setup process signals @@ -2053,8 +2116,7 @@ int netdata_main(int argc, char **argv) { // this causes the threads to block signals. delta_startup_time("initialize signals"); - signals_block(); - signals_init(); // setup the signals we want to use + nd_initialize_signals(); // setup the signals we want to use // -------------------------------------------------------------------- // check which threads are enabled and initialize them @@ -2086,7 +2148,7 @@ int netdata_main(int argc, char **argv) { st->init_routine(); if(st->env_name) - setenv(st->env_name, st->enabled?"YES":"NO", 1); + nd_setenv(st->env_name, st->enabled?"YES":"NO", 1); if(st->global_variable) *st->global_variable = (st->enabled) ? true : false; @@ -2097,7 +2159,7 @@ int netdata_main(int argc, char **argv) { delta_startup_time("initialize web server"); - web_client_api_v1_init(); + nd_web_api_init(); web_server_threading_selection(); if(web_server_mode != WEB_SERVER_MODE_NONE) { @@ -2165,7 +2227,7 @@ int netdata_main(int argc, char **argv) { netdata_configured_home_dir = config_get(CONFIG_SECTION_DIRECTORIES, "home", pw->pw_dir); } - setenv("HOME", netdata_configured_home_dir, 1); + nd_setenv("HOME", netdata_configured_home_dir, 1); dyncfg_init(true); @@ -2173,11 +2235,12 @@ int netdata_main(int argc, char **argv) { delta_startup_time("initialize threads after fork"); - netdata_threads_init_after_fork((size_t)config_get_number(CONFIG_SECTION_GLOBAL, "pthread stack size", (long)default_stacksize)); + netdata_threads_init_after_fork((size_t)config_get_size_bytes(CONFIG_SECTION_GLOBAL, "pthread stack size", default_stacksize)); // initialize internal registry delta_startup_time("initialize registry"); registry_init(); + cloud_conf_init_after_registry(); netdata_random_session_id_generate(); // ------------------------------------------------------------------------ @@ -2203,7 +2266,7 @@ int netdata_main(int argc, char **argv) { delta_startup_time("initialize RRD structures"); if(rrd_init(netdata_configured_hostname, system_info, false)) { - set_late_global_environment(system_info); + set_late_analytics_variables(system_info); fatal("Cannot initialize localhost instance with name '%s'.", netdata_configured_hostname); } @@ -2219,15 +2282,10 @@ int netdata_main(int argc, char **argv) { if (fd >= 0) close(fd); - // ------------------------------------------------------------------------ // Claim netdata agent to a cloud endpoint delta_startup_time("collect claiming info"); - - if (claiming_pending_arguments) - claim_agent(claiming_pending_arguments, false, NULL); - load_claiming_state(); // ------------------------------------------------------------------------ @@ -2242,11 +2300,13 @@ int netdata_main(int argc, char **argv) { // ------------------------------------------------------------------------ // spawn the threads + bearer_tokens_init(); + delta_startup_time("start the static threads"); web_server_config_options(); - set_late_global_environment(system_info); + set_late_analytics_variables(system_info); for (i = 0; static_threads[i].name != NULL ; i++) { struct netdata_static_thread *st = &static_threads[i]; @@ -2269,7 +2329,13 @@ int netdata_main(int argc, char **argv) { delta_startup_time("ready"); usec_t ready_ut = now_monotonic_usec(); - netdata_log_info("NETDATA STARTUP: completed in %llu ms. Enjoy real-time performance monitoring!", (ready_ut - started_ut) / USEC_PER_MS); + add_agent_event(EVENT_AGENT_START_TIME, (int64_t ) (ready_ut - started_ut)); + usec_t median_start_time = get_agent_event_time_median(EVENT_AGENT_START_TIME); + netdata_log_info( + "NETDATA STARTUP: completed in %llu ms (median start up time is %llu ms). Enjoy real-time performance monitoring!", + (ready_ut - started_ut) / USEC_PER_MS, median_start_time / USEC_PER_MS); + + cleanup_agent_event_log(); netdata_ready = true; analytics_statistic_t start_statistic = { "START", "-", "-" }; @@ -2295,28 +2361,7 @@ int netdata_main(int argc, char **argv) { } } - // ------------------------------------------------------------------------ - // Report ACLK build failure -#ifndef ENABLE_ACLK - netdata_log_error("This agent doesn't have ACLK."); - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/.aclk_report_sent", netdata_configured_varlib_dir); - if (netdata_anonymous_statistics_enabled > 0 && access(filename, F_OK)) { // -1 -> not initialized - analytics_statistic_t statistic = { "ACLK_DISABLED", "-", "-" }; - analytics_statistic_send(&statistic); - - int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 444); - if (fd == -1) - netdata_log_error("Cannot create file '%s'. Please fix this.", filename); - else - close(fd); - } -#endif - webrtc_initialize(); - - signals_unblock(); - return 10; } @@ -2327,7 +2372,7 @@ int main(int argc, char *argv[]) if (rc != 10) return rc; - signals_handle(); + nd_process_signals(); return 1; } #endif diff --git a/src/daemon/main.h b/src/daemon/main.h index 3188623b6..f5da3feb6 100644 --- a/src/daemon/main.h +++ b/src/daemon/main.h @@ -23,8 +23,7 @@ typedef enum { SERVICE_CONTEXT = (1 << 10), SERVICE_ANALYTICS = (1 << 11), SERVICE_EXPORTERS = (1 << 12), - SERVICE_ACLKSYNC = (1 << 13), - SERVICE_HTTPD = (1 << 14) + SERVICE_HTTPD = (1 << 13) } SERVICE_TYPE; typedef enum { diff --git a/src/daemon/service.c b/src/daemon/service.c index ead633445..f209cb470 100644 --- a/src/daemon/service.c +++ b/src/daemon/service.c @@ -203,7 +203,7 @@ static void svc_rrd_cleanup_obsolete_charts_from_all_hosts() { if (host == localhost) continue; - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); time_t now = now_realtime_sec(); @@ -215,7 +215,7 @@ static void svc_rrd_cleanup_obsolete_charts_from_all_hosts() { host->trigger_chart_obsoletion_check = 0; } - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); } rrd_rdunlock(); @@ -247,14 +247,12 @@ restart_after_removal: } worker_is_busy(WORKER_JOB_FREE_HOST); -#ifdef ENABLE_ACLK // in case we have cloud connection we inform cloud // a child disconnected - if (netdata_cloud_enabled && force) { + if (force) { aclk_host_state_update(host, 0, 0); unregister_node(host->machine_guid); } -#endif rrdhost_free___while_having_rrd_wrlock(host, force); goto restart_after_removal; } @@ -299,7 +297,7 @@ void *service_main(void *ptr) CLEANUP_FUNCTION_REGISTER(service_main_cleanup) cleanup_ptr = ptr; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); usec_t step = USEC_PER_SEC * SERVICE_HEARTBEAT; usec_t real_step = USEC_PER_SEC; @@ -307,7 +305,7 @@ void *service_main(void *ptr) while (service_running(SERVICE_MAINTENANCE)) { worker_is_idle(); - heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); if (real_step < step) { real_step += USEC_PER_SEC; continue; diff --git a/src/daemon/signals.c b/src/daemon/signals.c index 4e4d7c4d4..163f92ad8 100644 --- a/src/daemon/signals.c +++ b/src/daemon/signals.c @@ -2,12 +2,6 @@ #include "common.h" -/* - * IMPORTANT: Libuv uv_spawn() uses SIGCHLD internally: - * https://github.com/libuv/libuv/blob/cc51217a317e96510fbb284721d5e6bc2af31e33/src/unix/process.c#L485 - * Extreme care is needed when mixing and matching POSIX and libuv. - */ - typedef enum signal_action { NETDATA_SIGNAL_END_OF_LIST, NETDATA_SIGNAL_IGNORE, @@ -56,24 +50,33 @@ static void signal_handler(int signo) { } } -void signals_block(void) { +// Mask all signals, to ensure they will only be unmasked at the threads that can handle them. +// This means that all third party libraries (including libuv) cannot use signals anymore. +// The signals they are interested must be unblocked at their corresponding event loops. +static void posix_mask_all_signals(void) { sigset_t sigset; sigfillset(&sigset); - if(pthread_sigmask(SIG_BLOCK, &sigset, NULL) == -1) - netdata_log_error("SIGNAL: Could not block signals for threads"); + if(pthread_sigmask(SIG_BLOCK, &sigset, NULL) != 0) + netdata_log_error("SIGNAL: cannot mask all signals"); } -void signals_unblock(void) { +// Unmask all signals the netdata main signal handler uses. +// All other signals remain masked. +static void posix_unmask_my_signals(void) { sigset_t sigset; - sigfillset(&sigset); + sigemptyset(&sigset); - if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) == -1) { - netdata_log_error("SIGNAL: Could not unblock signals for threads"); - } + for (int i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) + sigaddset(&sigset, signals_waiting[i].signo); + + if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) != 0) + netdata_log_error("SIGNAL: cannot unmask netdata signals"); } -void signals_init(void) { +void nd_initialize_signals(void) { + posix_mask_all_signals(); // block all signals for all threads + // Catch signals which we want to use struct sigaction sa; sa.sa_flags = 0; @@ -97,22 +100,10 @@ void signals_init(void) { } } -void signals_reset(void) { - struct sigaction sa; - sigemptyset(&sa.sa_mask); - sa.sa_handler = SIG_DFL; - sa.sa_flags = 0; - - int i; - for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) { - if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1) - netdata_log_error("SIGNAL: Failed to reset signal handler for: %s", signals_waiting[i].name); - } -} +void nd_process_signals(void) { + posix_unmask_my_signals(); -void signals_handle(void) { while(1) { - // pause() causes the calling process (or thread) to sleep until a signal // is delivered that either terminates the process or causes the invocation // of a signal-catching function. diff --git a/src/daemon/signals.h b/src/daemon/signals.h index 26dbc6dcd..897b2b7f0 100644 --- a/src/daemon/signals.h +++ b/src/daemon/signals.h @@ -3,10 +3,7 @@ #ifndef NETDATA_SIGNALS_H #define NETDATA_SIGNALS_H 1 -void signals_init(void); -void signals_block(void); -void signals_unblock(void); -void signals_reset(void); -void signals_handle(void) NORETURN; +void nd_initialize_signals(void); +void nd_process_signals(void) NORETURN; #endif //NETDATA_SIGNALS_H diff --git a/src/daemon/static_threads.c b/src/daemon/static_threads.c index c6ec79956..3e5b7e350 100644 --- a/src/daemon/static_threads.c +++ b/src/daemon/static_threads.c @@ -133,7 +133,6 @@ const struct netdata_static_thread static_threads_common[] = { }, #endif -#ifdef ENABLE_ACLK { .name = "ACLK_MAIN", .config_section = NULL, @@ -143,7 +142,6 @@ const struct netdata_static_thread static_threads_common[] = { .init_routine = NULL, .start_routine = aclk_main }, -#endif { .name = "RRDCONTEXT", diff --git a/src/daemon/unit_test.c b/src/daemon/unit_test.c index 0f15f67d7..46166d673 100644 --- a/src/daemon/unit_test.c +++ b/src/daemon/unit_test.c @@ -1437,8 +1437,8 @@ int check_strdupz_path_subpath() { size_t i; for(i = 0; checks[i].result ; i++) { - char *s = strdupz_path_subpath(checks[i].path, checks[i].subpath); - fprintf(stderr, "strdupz_path_subpath(\"%s\", \"%s\") = \"%s\": ", checks[i].path, checks[i].subpath, s); + char *s = filename_from_path_entry_strdupz(checks[i].path, checks[i].subpath); + fprintf(stderr, "filename_from_path_entry_strdupz(\"%s\", \"%s\") = \"%s\": ", checks[i].path, checks[i].subpath, s); if(!s || strcmp(s, checks[i].result) != 0) { freez(s); fprintf(stderr, "FAILED\n"); diff --git a/src/daemon/win_system-info.c b/src/daemon/win_system-info.c index 2d67862fb..517692dff 100644 --- a/src/daemon/win_system-info.c +++ b/src/daemon/win_system-info.c @@ -108,10 +108,11 @@ static void netdata_windows_get_mem(struct rrdhost_system_info *systemInfo) { ULONGLONG size; char memSize[256]; + // The amount of physically installed RAM, in kilobytes. if (!GetPhysicallyInstalledSystemMemory(&size)) size = 0; else - (void)snprintf(memSize, 255, "%llu", size); + (void)snprintf(memSize, 255, "%llu", size * 1024); // to bytes (void)rrdhost_set_system_info_variable(systemInfo, "NETDATA_SYSTEM_TOTAL_RAM", @@ -220,32 +221,25 @@ static void netdata_windows_discover_os_version(char *os, size_t length, DWORD b } // We are not testing older, because it is not supported anymore by Microsoft - (void)snprintf(os, length, "Microsoft Windows Version %s, Build %d (Name: Windows %s)", versionName, build, version); + (void)snprintf(os, length, "Microsoft Windows Version %s, Build %d", version, build); } -static void netdata_windows_os_version(char *out, DWORD length) +static void netdata_windows_os_kernel_version(char *out, DWORD length, DWORD build) { - if (netdata_registry_get_string(out, - length, + DWORD major, minor; + if (!netdata_registry_get_dword(&major, HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion", - "ProductName")) - return; + "CurrentMajorVersionNumber")) + major = 0; - (void)snprintf(out, length, "%s", NETDATA_DEFAULT_SYSTEM_INFO_VALUE_UNKNOWN); -} - -static void netdata_windows_os_kernel_version(char *out, DWORD length, DWORD build) -{ - char version[8]; - if (!netdata_registry_get_string(version, - 7, + if (!netdata_registry_get_dword(&minor, HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion", - "CurrentVersion")) - version[0] = '\0'; + "CurrentMinorVersionNumber")) + minor = 0; - (void)snprintf(out, length, "%s (build: %u)", version, build); + (void)snprintf(out, length, "Windows %u.%u.%u Build: %u", major, minor, build, build); } static void netdata_windows_host(struct rrdhost_system_info *systemInfo) @@ -261,7 +255,6 @@ static void netdata_windows_host(struct rrdhost_system_info *systemInfo) (void)rrdhost_set_system_info_variable( systemInfo, "NETDATA_HOST_OS_ID_LIKE", NETDATA_DEFAULT_SYSTEM_INFO_VALUE_UNKNOWN); - netdata_windows_os_version(osVersion, 4095); (void)rrdhost_set_system_info_variable(systemInfo, "NETDATA_HOST_OS_VERSION", osVersion); (void)rrdhost_set_system_info_variable(systemInfo, "NETDATA_HOST_OS_VERSION_ID", osVersion); @@ -306,6 +299,11 @@ static void netdata_windows_container(struct rrdhost_system_info *systemInfo) systemInfo, "NETDATA_CONTAINER_IS_OFFICIAL_IMAGE", NETDATA_DEFAULT_SYSTEM_INFO_VALUE_FALSE); } +static void netdata_windows_install_type(struct rrdhost_system_info *systemInfo) +{ + (void)rrdhost_set_system_info_variable(systemInfo, "NETDATA_INSTALL_TYPE", "netdata-installer.exe"); +} + void netdata_windows_get_system_info(struct rrdhost_system_info *systemInfo) { netdata_windows_cloud(systemInfo); @@ -314,5 +312,6 @@ void netdata_windows_get_system_info(struct rrdhost_system_info *systemInfo) netdata_windows_get_cpu(systemInfo); netdata_windows_get_mem(systemInfo); netdata_windows_get_total_disk_size(systemInfo); + netdata_windows_install_type(systemInfo); } #endif diff --git a/src/daemon/winsvc.cc b/src/daemon/winsvc.cc index 9c5eb49ff..a56f5eb7c 100644 --- a/src/daemon/winsvc.cc +++ b/src/daemon/winsvc.cc @@ -4,12 +4,10 @@ extern "C" { #include "libnetdata/libnetdata.h" int netdata_main(int argc, char *argv[]); -void signals_handle(void); +void nd_process_signals(void); } -#include - __attribute__((format(printf, 1, 2))) static void netdata_service_log(const char *fmt, ...) { @@ -74,7 +72,7 @@ static HANDLE CreateEventHandle(const char *msg) if (!h) { - netdata_service_log(msg); + netdata_service_log("%s", msg); if (!ReportSvcStatus(SERVICE_STOPPED, GetLastError(), 1000, 0)) { @@ -219,7 +217,11 @@ static bool update_path() { int main(int argc, char *argv[]) { +#if defined(OS_WINDOWS) && defined(RUN_UNDER_CLION) + bool tty = true; +#else bool tty = isatty(fileno(stdin)) == 1; +#endif if (!update_path()) { return 1; @@ -231,7 +233,7 @@ int main(int argc, char *argv[]) if (rc != 10) return rc; - signals_handle(); + nd_process_signals(); return 1; } else diff --git a/src/database/README.md b/src/database/README.md index ed95d4ded..e861582d4 100644 --- a/src/database/README.md +++ b/src/database/README.md @@ -36,7 +36,7 @@ So, You can select the database mode by editing `netdata.conf` and setting: -```conf +```text [db] # dbengine (default), ram (the default if dbengine not available), alloc, none mode = dbengine diff --git a/src/database/contexts/api_v1.c b/src/database/contexts/api_v1.c deleted file mode 100644 index 355aaf91a..000000000 --- a/src/database/contexts/api_v1.c +++ /dev/null @@ -1,439 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "internal.h" - -static void rrd_flags_to_buffer_json_array_items(RRD_FLAGS flags, BUFFER *wb) { - if(flags & RRD_FLAG_QUEUED_FOR_HUB) - buffer_json_add_array_item_string(wb, "QUEUED"); - - if(flags & RRD_FLAG_DELETED) - buffer_json_add_array_item_string(wb, "DELETED"); - - if(flags & RRD_FLAG_COLLECTED) - buffer_json_add_array_item_string(wb, "COLLECTED"); - - if(flags & RRD_FLAG_UPDATED) - buffer_json_add_array_item_string(wb, "UPDATED"); - - if(flags & RRD_FLAG_ARCHIVED) - buffer_json_add_array_item_string(wb, "ARCHIVED"); - - if(flags & RRD_FLAG_OWN_LABELS) - buffer_json_add_array_item_string(wb, "OWN_LABELS"); - - if(flags & RRD_FLAG_LIVE_RETENTION) - buffer_json_add_array_item_string(wb, "LIVE_RETENTION"); - - if(flags & RRD_FLAG_HIDDEN) - buffer_json_add_array_item_string(wb, "HIDDEN"); - - if(flags & RRD_FLAG_QUEUED_FOR_PP) - buffer_json_add_array_item_string(wb, "PENDING_UPDATES"); -} - -// ---------------------------------------------------------------------------- -// /api/v1/context(s) API - -struct rrdcontext_to_json { - BUFFER *wb; - RRDCONTEXT_TO_JSON_OPTIONS options; - time_t after; - time_t before; - SIMPLE_PATTERN *chart_label_key; - SIMPLE_PATTERN *chart_labels_filter; - SIMPLE_PATTERN *chart_dimensions; - size_t written; - time_t now; - time_t combined_first_time_s; - time_t combined_last_time_s; - RRD_FLAGS combined_flags; -}; - -static inline int rrdmetric_to_json_callback(const DICTIONARY_ITEM *item, void *value, void *data) { - const char *id = dictionary_acquired_item_name(item); - struct rrdcontext_to_json * t = data; - RRDMETRIC *rm = value; - BUFFER *wb = t->wb; - RRDCONTEXT_TO_JSON_OPTIONS options = t->options; - time_t after = t->after; - time_t before = t->before; - - if(unlikely(rrd_flag_is_deleted(rm) && !(options & RRDCONTEXT_OPTION_SHOW_DELETED))) - return 0; - - if(after && (!rm->last_time_s || after > rm->last_time_s)) - return 0; - - if(before && (!rm->first_time_s || before < rm->first_time_s)) - return 0; - - if(t->chart_dimensions - && !simple_pattern_matches_string(t->chart_dimensions, rm->id) - && rm->name != rm->id - && !simple_pattern_matches_string(t->chart_dimensions, rm->name)) - return 0; - - if(t->written) { - t->combined_first_time_s = MIN(t->combined_first_time_s, rm->first_time_s); - t->combined_last_time_s = MAX(t->combined_last_time_s, rm->last_time_s); - t->combined_flags |= rrd_flags_get(rm); - } - else { - t->combined_first_time_s = rm->first_time_s; - t->combined_last_time_s = rm->last_time_s; - t->combined_flags = rrd_flags_get(rm); - } - - buffer_json_member_add_object(wb, id); - - if(options & RRDCONTEXT_OPTION_SHOW_UUIDS) { - char uuid[UUID_STR_LEN]; - uuid_unparse(rm->uuid, uuid); - buffer_json_member_add_string(wb, "uuid", uuid); - } - - buffer_json_member_add_string(wb, "name", string2str(rm->name)); - buffer_json_member_add_time_t(wb, "first_time_t", rm->first_time_s); - buffer_json_member_add_time_t(wb, "last_time_t", rrd_flag_is_collected(rm) ? (long long)t->now : (long long)rm->last_time_s); - buffer_json_member_add_boolean(wb, "collected", rrd_flag_is_collected(rm)); - - if(options & RRDCONTEXT_OPTION_SHOW_DELETED) - buffer_json_member_add_boolean(wb, "deleted", rrd_flag_is_deleted(rm)); - - if(options & RRDCONTEXT_OPTION_SHOW_FLAGS) { - buffer_json_member_add_array(wb, "flags"); - rrd_flags_to_buffer_json_array_items(rrd_flags_get(rm), wb); - buffer_json_array_close(wb); - } - - buffer_json_object_close(wb); - t->written++; - return 1; -} - -static inline int rrdinstance_to_json_callback(const DICTIONARY_ITEM *item, void *value, void *data) { - const char *id = dictionary_acquired_item_name(item); - - struct rrdcontext_to_json *t_parent = data; - RRDINSTANCE *ri = value; - BUFFER *wb = t_parent->wb; - RRDCONTEXT_TO_JSON_OPTIONS options = t_parent->options; - time_t after = t_parent->after; - time_t before = t_parent->before; - bool has_filter = t_parent->chart_label_key || t_parent->chart_labels_filter || t_parent->chart_dimensions; - - if(unlikely(rrd_flag_is_deleted(ri) && !(options & RRDCONTEXT_OPTION_SHOW_DELETED))) - return 0; - - if(after && (!ri->last_time_s || after > ri->last_time_s)) - return 0; - - if(before && (!ri->first_time_s || before < ri->first_time_s)) - return 0; - - if(t_parent->chart_label_key && rrdlabels_match_simple_pattern_parsed(ri->rrdlabels, t_parent->chart_label_key, - '\0', NULL) != SP_MATCHED_POSITIVE) - return 0; - - if(t_parent->chart_labels_filter && rrdlabels_match_simple_pattern_parsed(ri->rrdlabels, - t_parent->chart_labels_filter, ':', - NULL) != SP_MATCHED_POSITIVE) - return 0; - - time_t first_time_s = ri->first_time_s; - time_t last_time_s = ri->last_time_s; - RRD_FLAGS flags = rrd_flags_get(ri); - - BUFFER *wb_metrics = NULL; - if(options & RRDCONTEXT_OPTION_SHOW_METRICS || t_parent->chart_dimensions) { - - wb_metrics = buffer_create(4096, &netdata_buffers_statistics.buffers_api); - buffer_json_initialize(wb_metrics, "\"", "\"", wb->json.depth + 2, false, BUFFER_JSON_OPTIONS_DEFAULT); - - struct rrdcontext_to_json t_metrics = { - .wb = wb_metrics, - .options = options, - .chart_label_key = t_parent->chart_label_key, - .chart_labels_filter = t_parent->chart_labels_filter, - .chart_dimensions = t_parent->chart_dimensions, - .after = after, - .before = before, - .written = 0, - .now = t_parent->now, - }; - dictionary_walkthrough_read(ri->rrdmetrics, rrdmetric_to_json_callback, &t_metrics); - - if(has_filter && !t_metrics.written) { - buffer_free(wb_metrics); - return 0; - } - - first_time_s = t_metrics.combined_first_time_s; - last_time_s = t_metrics.combined_last_time_s; - flags = t_metrics.combined_flags; - } - - if(t_parent->written) { - t_parent->combined_first_time_s = MIN(t_parent->combined_first_time_s, first_time_s); - t_parent->combined_last_time_s = MAX(t_parent->combined_last_time_s, last_time_s); - t_parent->combined_flags |= flags; - } - else { - t_parent->combined_first_time_s = first_time_s; - t_parent->combined_last_time_s = last_time_s; - t_parent->combined_flags = flags; - } - - buffer_json_member_add_object(wb, id); - - if(options & RRDCONTEXT_OPTION_SHOW_UUIDS) { - char uuid[UUID_STR_LEN]; - uuid_unparse(ri->uuid, uuid); - buffer_json_member_add_string(wb, "uuid", uuid); - } - - buffer_json_member_add_string(wb, "name", string2str(ri->name)); - buffer_json_member_add_string(wb, "context", string2str(ri->rc->id)); - buffer_json_member_add_string(wb, "title", string2str(ri->title)); - buffer_json_member_add_string(wb, "units", string2str(ri->units)); - buffer_json_member_add_string(wb, "family", string2str(ri->family)); - buffer_json_member_add_string(wb, "chart_type", rrdset_type_name(ri->chart_type)); - buffer_json_member_add_uint64(wb, "priority", ri->priority); - buffer_json_member_add_time_t(wb, "update_every", ri->update_every_s); - buffer_json_member_add_time_t(wb, "first_time_t", first_time_s); - buffer_json_member_add_time_t(wb, "last_time_t", (flags & RRD_FLAG_COLLECTED) ? (long long)t_parent->now : (long long)last_time_s); - buffer_json_member_add_boolean(wb, "collected", flags & RRD_FLAG_COLLECTED); - - if(options & RRDCONTEXT_OPTION_SHOW_DELETED) - buffer_json_member_add_boolean(wb, "deleted", rrd_flag_is_deleted(ri)); - - if(options & RRDCONTEXT_OPTION_SHOW_FLAGS) { - buffer_json_member_add_array(wb, "flags"); - rrd_flags_to_buffer_json_array_items(rrd_flags_get(ri), wb); - buffer_json_array_close(wb); - } - - if(options & RRDCONTEXT_OPTION_SHOW_LABELS && ri->rrdlabels && rrdlabels_entries(ri->rrdlabels)) { - buffer_json_member_add_object(wb, "labels"); - rrdlabels_to_buffer_json_members(ri->rrdlabels, wb); - buffer_json_object_close(wb); - } - - if(wb_metrics) { - buffer_json_member_add_object(wb, "dimensions"); - buffer_fast_strcat(wb, buffer_tostring(wb_metrics), buffer_strlen(wb_metrics)); - buffer_json_object_close(wb); - - buffer_free(wb_metrics); - } - - buffer_json_object_close(wb); - t_parent->written++; - return 1; -} - -static inline int rrdcontext_to_json_callback(const DICTIONARY_ITEM *item, void *value, void *data) { - const char *id = dictionary_acquired_item_name(item); - struct rrdcontext_to_json *t_parent = data; - RRDCONTEXT *rc = value; - BUFFER *wb = t_parent->wb; - RRDCONTEXT_TO_JSON_OPTIONS options = t_parent->options; - time_t after = t_parent->after; - time_t before = t_parent->before; - bool has_filter = t_parent->chart_label_key || t_parent->chart_labels_filter || t_parent->chart_dimensions; - - if(unlikely(rrd_flag_check(rc, RRD_FLAG_HIDDEN) && !(options & RRDCONTEXT_OPTION_SHOW_HIDDEN))) - return 0; - - if(unlikely(rrd_flag_is_deleted(rc) && !(options & RRDCONTEXT_OPTION_SHOW_DELETED))) - return 0; - - if(options & RRDCONTEXT_OPTION_DEEPSCAN) - rrdcontext_recalculate_context_retention(rc, RRD_FLAG_NONE, false); - - if(after && (!rc->last_time_s || after > rc->last_time_s)) - return 0; - - if(before && (!rc->first_time_s || before < rc->first_time_s)) - return 0; - - time_t first_time_s = rc->first_time_s; - time_t last_time_s = rc->last_time_s; - RRD_FLAGS flags = rrd_flags_get(rc); - - BUFFER *wb_instances = NULL; - if((options & (RRDCONTEXT_OPTION_SHOW_LABELS|RRDCONTEXT_OPTION_SHOW_INSTANCES|RRDCONTEXT_OPTION_SHOW_METRICS)) - || t_parent->chart_label_key - || t_parent->chart_labels_filter - || t_parent->chart_dimensions) { - - wb_instances = buffer_create(4096, &netdata_buffers_statistics.buffers_api); - buffer_json_initialize(wb_instances, "\"", "\"", wb->json.depth + 2, false, BUFFER_JSON_OPTIONS_DEFAULT); - - struct rrdcontext_to_json t_instances = { - .wb = wb_instances, - .options = options, - .chart_label_key = t_parent->chart_label_key, - .chart_labels_filter = t_parent->chart_labels_filter, - .chart_dimensions = t_parent->chart_dimensions, - .after = after, - .before = before, - .written = 0, - .now = t_parent->now, - }; - dictionary_walkthrough_read(rc->rrdinstances, rrdinstance_to_json_callback, &t_instances); - - if(has_filter && !t_instances.written) { - buffer_free(wb_instances); - return 0; - } - - first_time_s = t_instances.combined_first_time_s; - last_time_s = t_instances.combined_last_time_s; - flags = t_instances.combined_flags; - } - - if(!(options & RRDCONTEXT_OPTION_SKIP_ID)) - buffer_json_member_add_object(wb, id); - - rrdcontext_lock(rc); - - buffer_json_member_add_string(wb, "title", string2str(rc->title)); - buffer_json_member_add_string(wb, "units", string2str(rc->units)); - buffer_json_member_add_string(wb, "family", string2str(rc->family)); - buffer_json_member_add_string(wb, "chart_type", rrdset_type_name(rc->chart_type)); - buffer_json_member_add_uint64(wb, "priority", rc->priority); - buffer_json_member_add_time_t(wb, "first_time_t", first_time_s); - buffer_json_member_add_time_t(wb, "last_time_t", (flags & RRD_FLAG_COLLECTED) ? (long long)t_parent->now : (long long)last_time_s); - buffer_json_member_add_boolean(wb, "collected", (flags & RRD_FLAG_COLLECTED)); - - if(options & RRDCONTEXT_OPTION_SHOW_DELETED) - buffer_json_member_add_boolean(wb, "deleted", rrd_flag_is_deleted(rc)); - - if(options & RRDCONTEXT_OPTION_SHOW_FLAGS) { - buffer_json_member_add_array(wb, "flags"); - rrd_flags_to_buffer_json_array_items(rrd_flags_get(rc), wb); - buffer_json_array_close(wb); - } - - if(options & RRDCONTEXT_OPTION_SHOW_QUEUED) { - buffer_json_member_add_array(wb, "queued_reasons"); - rrd_reasons_to_buffer_json_array_items(rc->queue.queued_flags, wb); - buffer_json_array_close(wb); - - buffer_json_member_add_time_t(wb, "last_queued", (time_t)(rc->queue.queued_ut / USEC_PER_SEC)); - buffer_json_member_add_time_t(wb, "scheduled_dispatch", (time_t)(rc->queue.scheduled_dispatch_ut / USEC_PER_SEC)); - buffer_json_member_add_time_t(wb, "last_dequeued", (time_t)(rc->queue.dequeued_ut / USEC_PER_SEC)); - buffer_json_member_add_uint64(wb, "dispatches", rc->queue.dispatches); - buffer_json_member_add_uint64(wb, "hub_version", rc->hub.version); - buffer_json_member_add_uint64(wb, "version", rc->version); - - buffer_json_member_add_array(wb, "pp_reasons"); - rrd_reasons_to_buffer_json_array_items(rc->pp.queued_flags, wb); - buffer_json_array_close(wb); - - buffer_json_member_add_time_t(wb, "pp_last_queued", (time_t)(rc->pp.queued_ut / USEC_PER_SEC)); - buffer_json_member_add_time_t(wb, "pp_last_dequeued", (time_t)(rc->pp.dequeued_ut / USEC_PER_SEC)); - buffer_json_member_add_uint64(wb, "pp_executed", rc->pp.executions); - } - - rrdcontext_unlock(rc); - - if(wb_instances) { - buffer_json_member_add_object(wb, "charts"); - buffer_fast_strcat(wb, buffer_tostring(wb_instances), buffer_strlen(wb_instances)); - buffer_json_object_close(wb); - - buffer_free(wb_instances); - } - - if(!(options & RRDCONTEXT_OPTION_SKIP_ID)) - buffer_json_object_close(wb); - - t_parent->written++; - return 1; -} - -int rrdcontext_to_json(RRDHOST *host, BUFFER *wb, time_t after, time_t before, RRDCONTEXT_TO_JSON_OPTIONS options, const char *context, SIMPLE_PATTERN *chart_label_key, SIMPLE_PATTERN *chart_labels_filter, SIMPLE_PATTERN *chart_dimensions) { - if(!host->rrdctx.contexts) { - netdata_log_error("%s(): request for host '%s' that does not have rrdcontexts initialized.", __FUNCTION__, rrdhost_hostname(host)); - return HTTP_RESP_NOT_FOUND; - } - - RRDCONTEXT_ACQUIRED *rca = (RRDCONTEXT_ACQUIRED *)dictionary_get_and_acquire_item(host->rrdctx.contexts, context); - if(!rca) return HTTP_RESP_NOT_FOUND; - - RRDCONTEXT *rc = rrdcontext_acquired_value(rca); - - if(after != 0 && before != 0) - rrdr_relative_window_to_absolute_query(&after, &before, NULL, false); - - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - struct rrdcontext_to_json t_contexts = { - .wb = wb, - .options = options|RRDCONTEXT_OPTION_SKIP_ID, - .chart_label_key = chart_label_key, - .chart_labels_filter = chart_labels_filter, - .chart_dimensions = chart_dimensions, - .after = after, - .before = before, - .written = 0, - .now = now_realtime_sec(), - }; - rrdcontext_to_json_callback((DICTIONARY_ITEM *)rca, rc, &t_contexts); - buffer_json_finalize(wb); - - rrdcontext_release(rca); - - if(!t_contexts.written) - return HTTP_RESP_NOT_FOUND; - - return HTTP_RESP_OK; -} - -int rrdcontexts_to_json(RRDHOST *host, BUFFER *wb, time_t after, time_t before, RRDCONTEXT_TO_JSON_OPTIONS options, SIMPLE_PATTERN *chart_label_key, SIMPLE_PATTERN *chart_labels_filter, SIMPLE_PATTERN *chart_dimensions) { - if(!host->rrdctx.contexts) { - netdata_log_error("%s(): request for host '%s' that does not have rrdcontexts initialized.", __FUNCTION__, rrdhost_hostname(host)); - return HTTP_RESP_NOT_FOUND; - } - - char node_uuid[UUID_STR_LEN] = ""; - - if(host->node_id) - uuid_unparse(*host->node_id, node_uuid); - - if(after != 0 && before != 0) - rrdr_relative_window_to_absolute_query(&after, &before, NULL, false); - - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host)); - buffer_json_member_add_string(wb, "machine_guid", host->machine_guid); - buffer_json_member_add_string(wb, "node_id", node_uuid); - buffer_json_member_add_string(wb, "claim_id", host->aclk_state.claimed_id ? host->aclk_state.claimed_id : ""); - - if(options & RRDCONTEXT_OPTION_SHOW_LABELS) { - buffer_json_member_add_object(wb, "host_labels"); - rrdlabels_to_buffer_json_members(host->rrdlabels, wb); - buffer_json_object_close(wb); - } - - buffer_json_member_add_object(wb, "contexts"); - struct rrdcontext_to_json t_contexts = { - .wb = wb, - .options = options, - .chart_label_key = chart_label_key, - .chart_labels_filter = chart_labels_filter, - .chart_dimensions = chart_dimensions, - .after = after, - .before = before, - .written = 0, - .now = now_realtime_sec(), - }; - dictionary_walkthrough_read(host->rrdctx.contexts, rrdcontext_to_json_callback, &t_contexts); - buffer_json_object_close(wb); - - buffer_json_finalize(wb); - - return HTTP_RESP_OK; -} - diff --git a/src/database/contexts/api_v1_contexts.c b/src/database/contexts/api_v1_contexts.c new file mode 100644 index 000000000..1a1c83a00 --- /dev/null +++ b/src/database/contexts/api_v1_contexts.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "internal.h" + +static void rrd_flags_to_buffer_json_array_items(RRD_FLAGS flags, BUFFER *wb) { + if(flags & RRD_FLAG_QUEUED_FOR_HUB) + buffer_json_add_array_item_string(wb, "QUEUED"); + + if(flags & RRD_FLAG_DELETED) + buffer_json_add_array_item_string(wb, "DELETED"); + + if(flags & RRD_FLAG_COLLECTED) + buffer_json_add_array_item_string(wb, "COLLECTED"); + + if(flags & RRD_FLAG_UPDATED) + buffer_json_add_array_item_string(wb, "UPDATED"); + + if(flags & RRD_FLAG_ARCHIVED) + buffer_json_add_array_item_string(wb, "ARCHIVED"); + + if(flags & RRD_FLAG_OWN_LABELS) + buffer_json_add_array_item_string(wb, "OWN_LABELS"); + + if(flags & RRD_FLAG_LIVE_RETENTION) + buffer_json_add_array_item_string(wb, "LIVE_RETENTION"); + + if(flags & RRD_FLAG_HIDDEN) + buffer_json_add_array_item_string(wb, "HIDDEN"); + + if(flags & RRD_FLAG_QUEUED_FOR_PP) + buffer_json_add_array_item_string(wb, "PENDING_UPDATES"); +} + +// ---------------------------------------------------------------------------- +// /api/v1/context(s) API + +struct rrdcontext_to_json { + BUFFER *wb; + RRDCONTEXT_TO_JSON_OPTIONS options; + time_t after; + time_t before; + SIMPLE_PATTERN *chart_label_key; + SIMPLE_PATTERN *chart_labels_filter; + SIMPLE_PATTERN *chart_dimensions; + size_t written; + time_t now; + time_t combined_first_time_s; + time_t combined_last_time_s; + RRD_FLAGS combined_flags; +}; + +static inline int rrdmetric_to_json_callback(const DICTIONARY_ITEM *item, void *value, void *data) { + const char *id = dictionary_acquired_item_name(item); + struct rrdcontext_to_json * t = data; + RRDMETRIC *rm = value; + BUFFER *wb = t->wb; + RRDCONTEXT_TO_JSON_OPTIONS options = t->options; + time_t after = t->after; + time_t before = t->before; + + if(unlikely(rrd_flag_is_deleted(rm) && !(options & RRDCONTEXT_OPTION_SHOW_DELETED))) + return 0; + + if(after && (!rm->last_time_s || after > rm->last_time_s)) + return 0; + + if(before && (!rm->first_time_s || before < rm->first_time_s)) + return 0; + + if(t->chart_dimensions + && !simple_pattern_matches_string(t->chart_dimensions, rm->id) + && rm->name != rm->id + && !simple_pattern_matches_string(t->chart_dimensions, rm->name)) + return 0; + + if(t->written) { + t->combined_first_time_s = MIN(t->combined_first_time_s, rm->first_time_s); + t->combined_last_time_s = MAX(t->combined_last_time_s, rm->last_time_s); + t->combined_flags |= rrd_flags_get(rm); + } + else { + t->combined_first_time_s = rm->first_time_s; + t->combined_last_time_s = rm->last_time_s; + t->combined_flags = rrd_flags_get(rm); + } + + buffer_json_member_add_object(wb, id); + + if(options & RRDCONTEXT_OPTION_SHOW_UUIDS) { + char uuid[UUID_STR_LEN]; + uuid_unparse(rm->uuid, uuid); + buffer_json_member_add_string(wb, "uuid", uuid); + } + + buffer_json_member_add_string(wb, "name", string2str(rm->name)); + buffer_json_member_add_time_t(wb, "first_time_t", rm->first_time_s); + buffer_json_member_add_time_t(wb, "last_time_t", rrd_flag_is_collected(rm) ? (long long)t->now : (long long)rm->last_time_s); + buffer_json_member_add_boolean(wb, "collected", rrd_flag_is_collected(rm)); + + if(options & RRDCONTEXT_OPTION_SHOW_DELETED) + buffer_json_member_add_boolean(wb, "deleted", rrd_flag_is_deleted(rm)); + + if(options & RRDCONTEXT_OPTION_SHOW_FLAGS) { + buffer_json_member_add_array(wb, "flags"); + rrd_flags_to_buffer_json_array_items(rrd_flags_get(rm), wb); + buffer_json_array_close(wb); + } + + buffer_json_object_close(wb); + t->written++; + return 1; +} + +static inline int rrdinstance_to_json_callback(const DICTIONARY_ITEM *item, void *value, void *data) { + const char *id = dictionary_acquired_item_name(item); + + struct rrdcontext_to_json *t_parent = data; + RRDINSTANCE *ri = value; + BUFFER *wb = t_parent->wb; + RRDCONTEXT_TO_JSON_OPTIONS options = t_parent->options; + time_t after = t_parent->after; + time_t before = t_parent->before; + bool has_filter = t_parent->chart_label_key || t_parent->chart_labels_filter || t_parent->chart_dimensions; + + if(unlikely(rrd_flag_is_deleted(ri) && !(options & RRDCONTEXT_OPTION_SHOW_DELETED))) + return 0; + + if(after && (!ri->last_time_s || after > ri->last_time_s)) + return 0; + + if(before && (!ri->first_time_s || before < ri->first_time_s)) + return 0; + + if(t_parent->chart_label_key && rrdlabels_match_simple_pattern_parsed(ri->rrdlabels, t_parent->chart_label_key, + '\0', NULL) != SP_MATCHED_POSITIVE) + return 0; + + if(t_parent->chart_labels_filter && rrdlabels_match_simple_pattern_parsed(ri->rrdlabels, + t_parent->chart_labels_filter, ':', + NULL) != SP_MATCHED_POSITIVE) + return 0; + + time_t first_time_s = ri->first_time_s; + time_t last_time_s = ri->last_time_s; + RRD_FLAGS flags = rrd_flags_get(ri); + + BUFFER *wb_metrics = NULL; + if(options & RRDCONTEXT_OPTION_SHOW_METRICS || t_parent->chart_dimensions) { + + wb_metrics = buffer_create(4096, &netdata_buffers_statistics.buffers_api); + buffer_json_initialize(wb_metrics, "\"", "\"", wb->json.depth + 2, false, BUFFER_JSON_OPTIONS_DEFAULT); + + struct rrdcontext_to_json t_metrics = { + .wb = wb_metrics, + .options = options, + .chart_label_key = t_parent->chart_label_key, + .chart_labels_filter = t_parent->chart_labels_filter, + .chart_dimensions = t_parent->chart_dimensions, + .after = after, + .before = before, + .written = 0, + .now = t_parent->now, + }; + dictionary_walkthrough_read(ri->rrdmetrics, rrdmetric_to_json_callback, &t_metrics); + + if(has_filter && !t_metrics.written) { + buffer_free(wb_metrics); + return 0; + } + + first_time_s = t_metrics.combined_first_time_s; + last_time_s = t_metrics.combined_last_time_s; + flags = t_metrics.combined_flags; + } + + if(t_parent->written) { + t_parent->combined_first_time_s = MIN(t_parent->combined_first_time_s, first_time_s); + t_parent->combined_last_time_s = MAX(t_parent->combined_last_time_s, last_time_s); + t_parent->combined_flags |= flags; + } + else { + t_parent->combined_first_time_s = first_time_s; + t_parent->combined_last_time_s = last_time_s; + t_parent->combined_flags = flags; + } + + buffer_json_member_add_object(wb, id); + + if(options & RRDCONTEXT_OPTION_SHOW_UUIDS) { + char uuid[UUID_STR_LEN]; + uuid_unparse(ri->uuid, uuid); + buffer_json_member_add_string(wb, "uuid", uuid); + } + + buffer_json_member_add_string(wb, "name", string2str(ri->name)); + buffer_json_member_add_string(wb, "context", string2str(ri->rc->id)); + buffer_json_member_add_string(wb, "title", string2str(ri->title)); + buffer_json_member_add_string(wb, "units", string2str(ri->units)); + buffer_json_member_add_string(wb, "family", string2str(ri->family)); + buffer_json_member_add_string(wb, "chart_type", rrdset_type_name(ri->chart_type)); + buffer_json_member_add_uint64(wb, "priority", ri->priority); + buffer_json_member_add_time_t(wb, "update_every", ri->update_every_s); + buffer_json_member_add_time_t(wb, "first_time_t", first_time_s); + buffer_json_member_add_time_t(wb, "last_time_t", (flags & RRD_FLAG_COLLECTED) ? (long long)t_parent->now : (long long)last_time_s); + buffer_json_member_add_boolean(wb, "collected", flags & RRD_FLAG_COLLECTED); + + if(options & RRDCONTEXT_OPTION_SHOW_DELETED) + buffer_json_member_add_boolean(wb, "deleted", rrd_flag_is_deleted(ri)); + + if(options & RRDCONTEXT_OPTION_SHOW_FLAGS) { + buffer_json_member_add_array(wb, "flags"); + rrd_flags_to_buffer_json_array_items(rrd_flags_get(ri), wb); + buffer_json_array_close(wb); + } + + if(options & RRDCONTEXT_OPTION_SHOW_LABELS && ri->rrdlabels && rrdlabels_entries(ri->rrdlabels)) { + buffer_json_member_add_object(wb, "labels"); + rrdlabels_to_buffer_json_members(ri->rrdlabels, wb); + buffer_json_object_close(wb); + } + + if(wb_metrics) { + buffer_json_member_add_object(wb, "dimensions"); + buffer_fast_strcat(wb, buffer_tostring(wb_metrics), buffer_strlen(wb_metrics)); + buffer_json_object_close(wb); + + buffer_free(wb_metrics); + } + + buffer_json_object_close(wb); + t_parent->written++; + return 1; +} + +static inline int rrdcontext_to_json_callback(const DICTIONARY_ITEM *item, void *value, void *data) { + const char *id = dictionary_acquired_item_name(item); + struct rrdcontext_to_json *t_parent = data; + RRDCONTEXT *rc = value; + BUFFER *wb = t_parent->wb; + RRDCONTEXT_TO_JSON_OPTIONS options = t_parent->options; + time_t after = t_parent->after; + time_t before = t_parent->before; + bool has_filter = t_parent->chart_label_key || t_parent->chart_labels_filter || t_parent->chart_dimensions; + + if(unlikely(rrd_flag_check(rc, RRD_FLAG_HIDDEN) && !(options & RRDCONTEXT_OPTION_SHOW_HIDDEN))) + return 0; + + if(unlikely(rrd_flag_is_deleted(rc) && !(options & RRDCONTEXT_OPTION_SHOW_DELETED))) + return 0; + + if(options & RRDCONTEXT_OPTION_DEEPSCAN) + rrdcontext_recalculate_context_retention(rc, RRD_FLAG_NONE, false); + + if(after && (!rc->last_time_s || after > rc->last_time_s)) + return 0; + + if(before && (!rc->first_time_s || before < rc->first_time_s)) + return 0; + + time_t first_time_s = rc->first_time_s; + time_t last_time_s = rc->last_time_s; + RRD_FLAGS flags = rrd_flags_get(rc); + + BUFFER *wb_instances = NULL; + if((options & (RRDCONTEXT_OPTION_SHOW_LABELS|RRDCONTEXT_OPTION_SHOW_INSTANCES|RRDCONTEXT_OPTION_SHOW_METRICS)) + || t_parent->chart_label_key + || t_parent->chart_labels_filter + || t_parent->chart_dimensions) { + + wb_instances = buffer_create(4096, &netdata_buffers_statistics.buffers_api); + buffer_json_initialize(wb_instances, "\"", "\"", wb->json.depth + 2, false, BUFFER_JSON_OPTIONS_DEFAULT); + + struct rrdcontext_to_json t_instances = { + .wb = wb_instances, + .options = options, + .chart_label_key = t_parent->chart_label_key, + .chart_labels_filter = t_parent->chart_labels_filter, + .chart_dimensions = t_parent->chart_dimensions, + .after = after, + .before = before, + .written = 0, + .now = t_parent->now, + }; + dictionary_walkthrough_read(rc->rrdinstances, rrdinstance_to_json_callback, &t_instances); + + if(has_filter && !t_instances.written) { + buffer_free(wb_instances); + return 0; + } + + first_time_s = t_instances.combined_first_time_s; + last_time_s = t_instances.combined_last_time_s; + flags = t_instances.combined_flags; + } + + if(!(options & RRDCONTEXT_OPTION_SKIP_ID)) + buffer_json_member_add_object(wb, id); + + rrdcontext_lock(rc); + + buffer_json_member_add_string(wb, "title", string2str(rc->title)); + buffer_json_member_add_string(wb, "units", string2str(rc->units)); + buffer_json_member_add_string(wb, "family", string2str(rc->family)); + buffer_json_member_add_string(wb, "chart_type", rrdset_type_name(rc->chart_type)); + buffer_json_member_add_uint64(wb, "priority", rc->priority); + buffer_json_member_add_time_t(wb, "first_time_t", first_time_s); + buffer_json_member_add_time_t(wb, "last_time_t", (flags & RRD_FLAG_COLLECTED) ? (long long)t_parent->now : (long long)last_time_s); + buffer_json_member_add_boolean(wb, "collected", (flags & RRD_FLAG_COLLECTED)); + + if(options & RRDCONTEXT_OPTION_SHOW_DELETED) + buffer_json_member_add_boolean(wb, "deleted", rrd_flag_is_deleted(rc)); + + if(options & RRDCONTEXT_OPTION_SHOW_FLAGS) { + buffer_json_member_add_array(wb, "flags"); + rrd_flags_to_buffer_json_array_items(rrd_flags_get(rc), wb); + buffer_json_array_close(wb); + } + + if(options & RRDCONTEXT_OPTION_SHOW_QUEUED) { + buffer_json_member_add_array(wb, "queued_reasons"); + rrd_reasons_to_buffer_json_array_items(rc->queue.queued_flags, wb); + buffer_json_array_close(wb); + + buffer_json_member_add_time_t(wb, "last_queued", (time_t)(rc->queue.queued_ut / USEC_PER_SEC)); + buffer_json_member_add_time_t(wb, "scheduled_dispatch", (time_t)(rc->queue.scheduled_dispatch_ut / USEC_PER_SEC)); + buffer_json_member_add_time_t(wb, "last_dequeued", (time_t)(rc->queue.dequeued_ut / USEC_PER_SEC)); + buffer_json_member_add_uint64(wb, "dispatches", rc->queue.dispatches); + buffer_json_member_add_uint64(wb, "hub_version", rc->hub.version); + buffer_json_member_add_uint64(wb, "version", rc->version); + + buffer_json_member_add_array(wb, "pp_reasons"); + rrd_reasons_to_buffer_json_array_items(rc->pp.queued_flags, wb); + buffer_json_array_close(wb); + + buffer_json_member_add_time_t(wb, "pp_last_queued", (time_t)(rc->pp.queued_ut / USEC_PER_SEC)); + buffer_json_member_add_time_t(wb, "pp_last_dequeued", (time_t)(rc->pp.dequeued_ut / USEC_PER_SEC)); + buffer_json_member_add_uint64(wb, "pp_executed", rc->pp.executions); + } + + rrdcontext_unlock(rc); + + if(wb_instances) { + buffer_json_member_add_object(wb, "charts"); + buffer_fast_strcat(wb, buffer_tostring(wb_instances), buffer_strlen(wb_instances)); + buffer_json_object_close(wb); + + buffer_free(wb_instances); + } + + if(!(options & RRDCONTEXT_OPTION_SKIP_ID)) + buffer_json_object_close(wb); + + t_parent->written++; + return 1; +} + +int rrdcontext_to_json(RRDHOST *host, BUFFER *wb, time_t after, time_t before, RRDCONTEXT_TO_JSON_OPTIONS options, const char *context, SIMPLE_PATTERN *chart_label_key, SIMPLE_PATTERN *chart_labels_filter, SIMPLE_PATTERN *chart_dimensions) { + if(!host->rrdctx.contexts) { + netdata_log_error("%s(): request for host '%s' that does not have rrdcontexts initialized.", __FUNCTION__, rrdhost_hostname(host)); + return HTTP_RESP_NOT_FOUND; + } + + RRDCONTEXT_ACQUIRED *rca = (RRDCONTEXT_ACQUIRED *)dictionary_get_and_acquire_item(host->rrdctx.contexts, context); + if(!rca) return HTTP_RESP_NOT_FOUND; + + RRDCONTEXT *rc = rrdcontext_acquired_value(rca); + + if(after != 0 && before != 0) + rrdr_relative_window_to_absolute_query(&after, &before, NULL, false); + + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + struct rrdcontext_to_json t_contexts = { + .wb = wb, + .options = options|RRDCONTEXT_OPTION_SKIP_ID, + .chart_label_key = chart_label_key, + .chart_labels_filter = chart_labels_filter, + .chart_dimensions = chart_dimensions, + .after = after, + .before = before, + .written = 0, + .now = now_realtime_sec(), + }; + rrdcontext_to_json_callback((DICTIONARY_ITEM *)rca, rc, &t_contexts); + buffer_json_finalize(wb); + + rrdcontext_release(rca); + + if(!t_contexts.written) + return HTTP_RESP_NOT_FOUND; + + return HTTP_RESP_OK; +} + +int rrdcontexts_to_json(RRDHOST *host, BUFFER *wb, time_t after, time_t before, RRDCONTEXT_TO_JSON_OPTIONS options, SIMPLE_PATTERN *chart_label_key, SIMPLE_PATTERN *chart_labels_filter, SIMPLE_PATTERN *chart_dimensions) { + if(!host->rrdctx.contexts) { + netdata_log_error("%s(): request for host '%s' that does not have rrdcontexts initialized.", __FUNCTION__, rrdhost_hostname(host)); + return HTTP_RESP_NOT_FOUND; + } + + char node_uuid[UUID_STR_LEN] = ""; + + if(!UUIDiszero(host->node_id)) + uuid_unparse_lower(host->node_id.uuid, node_uuid); + + if(after != 0 && before != 0) + rrdr_relative_window_to_absolute_query(&after, &before, NULL, false); + + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host)); + buffer_json_member_add_string(wb, "machine_guid", host->machine_guid); + buffer_json_member_add_string(wb, "node_id", node_uuid); + CLAIM_ID claim_id = rrdhost_claim_id_get(host); + buffer_json_member_add_string(wb, "claim_id", claim_id.str); + + if(options & RRDCONTEXT_OPTION_SHOW_LABELS) { + buffer_json_member_add_object(wb, "host_labels"); + rrdlabels_to_buffer_json_members(host->rrdlabels, wb); + buffer_json_object_close(wb); + } + + buffer_json_member_add_object(wb, "contexts"); + struct rrdcontext_to_json t_contexts = { + .wb = wb, + .options = options, + .chart_label_key = chart_label_key, + .chart_labels_filter = chart_labels_filter, + .chart_dimensions = chart_dimensions, + .after = after, + .before = before, + .written = 0, + .now = now_realtime_sec(), + }; + dictionary_walkthrough_read(host->rrdctx.contexts, rrdcontext_to_json_callback, &t_contexts); + buffer_json_object_close(wb); + + buffer_json_finalize(wb); + + return HTTP_RESP_OK; +} + diff --git a/src/database/contexts/api_v2.c b/src/database/contexts/api_v2.c deleted file mode 100644 index 07cd3ac83..000000000 --- a/src/database/contexts/api_v2.c +++ /dev/null @@ -1,2454 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "internal.h" - -#include "aclk/aclk_capas.h" - -// ---------------------------------------------------------------------------- -// /api/v2/contexts API - -struct alert_transitions_facets alert_transition_facets[] = { - [ATF_STATUS] = { - .id = "f_status", - .name = "Alert Status", - .query_param = "f_status", - .order = 1, - }, - [ATF_TYPE] = { - .id = "f_type", - .name = "Alert Type", - .query_param = "f_type", - .order = 2, - }, - [ATF_ROLE] = { - .id = "f_role", - .name = "Recipient Role", - .query_param = "f_role", - .order = 3, - }, - [ATF_CLASS] = { - .id = "f_class", - .name = "Alert Class", - .query_param = "f_class", - .order = 4, - }, - [ATF_COMPONENT] = { - .id = "f_component", - .name = "Alert Component", - .query_param = "f_component", - .order = 5, - }, - [ATF_NODE] = { - .id = "f_node", - .name = "Alert Node", - .query_param = "f_node", - .order = 6, - }, - [ATF_ALERT_NAME] = { - .id = "f_alert", - .name = "Alert Name", - .query_param = "f_alert", - .order = 7, - }, - [ATF_CHART_NAME] = { - .id = "f_instance", - .name = "Instance Name", - .query_param = "f_instance", - .order = 8, - }, - [ATF_CONTEXT] = { - .id = "f_context", - .name = "Context", - .query_param = "f_context", - .order = 9, - }, - - // terminator - [ATF_TOTAL_ENTRIES] = { - .id = NULL, - .name = NULL, - .query_param = NULL, - .order = 9999, - } -}; - -struct facet_entry { - uint32_t count; -}; - -struct alert_transitions_callback_data { - struct rrdcontext_to_json_v2_data *ctl; - BUFFER *wb; - bool debug; - bool only_one_config; - - struct { - SIMPLE_PATTERN *pattern; - DICTIONARY *dict; - } facets[ATF_TOTAL_ENTRIES]; - - uint32_t max_items_to_return; - uint32_t items_to_return; - - uint32_t items_evaluated; - uint32_t items_matched; - - - struct sql_alert_transition_fixed_size *base; // double linked list - last item is base->prev - struct sql_alert_transition_fixed_size *last_added; // the last item added, not the last of the list - - struct { - size_t first; - size_t skips_before; - size_t skips_after; - size_t backwards; - size_t forwards; - size_t prepend; - size_t append; - size_t shifts; - } operations; - - uint32_t configs_added; -}; - -typedef enum __attribute__ ((__packed__)) { - FTS_MATCHED_NONE = 0, - FTS_MATCHED_HOST, - FTS_MATCHED_CONTEXT, - FTS_MATCHED_INSTANCE, - FTS_MATCHED_DIMENSION, - FTS_MATCHED_LABEL, - FTS_MATCHED_ALERT, - FTS_MATCHED_ALERT_INFO, - FTS_MATCHED_FAMILY, - FTS_MATCHED_TITLE, - FTS_MATCHED_UNITS, -} FTS_MATCH; - -static const char *fts_match_to_string(FTS_MATCH match) { - switch(match) { - case FTS_MATCHED_HOST: - return "HOST"; - - case FTS_MATCHED_CONTEXT: - return "CONTEXT"; - - case FTS_MATCHED_INSTANCE: - return "INSTANCE"; - - case FTS_MATCHED_DIMENSION: - return "DIMENSION"; - - case FTS_MATCHED_ALERT: - return "ALERT"; - - case FTS_MATCHED_ALERT_INFO: - return "ALERT_INFO"; - - case FTS_MATCHED_LABEL: - return "LABEL"; - - case FTS_MATCHED_FAMILY: - return "FAMILY"; - - case FTS_MATCHED_TITLE: - return "TITLE"; - - case FTS_MATCHED_UNITS: - return "UNITS"; - - default: - return "NONE"; - } -} - -struct function_v2_entry { - size_t size; - size_t used; - size_t *node_ids; - STRING *help; - STRING *tags; - HTTP_ACCESS access; - int priority; -}; - -struct context_v2_entry { - size_t count; - STRING *id; - STRING *family; - uint32_t priority; - time_t first_time_s; - time_t last_time_s; - RRD_FLAGS flags; - FTS_MATCH match; -}; - -struct alert_counts { - size_t critical; - size_t warning; - size_t clear; - size_t error; -}; - -struct alert_v2_entry { - RRDCALC *tmp; - - STRING *name; - STRING *summary; - RRDLABELS *recipient; - RRDLABELS *classification; - RRDLABELS *context; - RRDLABELS *component; - RRDLABELS *type; - - size_t ati; - - struct alert_counts counts; - - size_t instances; - DICTIONARY *nodes; - DICTIONARY *configs; -}; - -struct alert_by_x_entry { - struct { - struct alert_counts counts; - size_t silent; - size_t total; - } running; - - struct { - size_t available; - } prototypes; -}; - -typedef struct full_text_search_index { - size_t searches; - size_t string_searches; - size_t char_searches; -} FTS_INDEX; - -static inline bool full_text_search_string(FTS_INDEX *fts, SIMPLE_PATTERN *q, STRING *ptr) { - fts->searches++; - fts->string_searches++; - return simple_pattern_matches_string(q, ptr); -} - -static inline bool full_text_search_char(FTS_INDEX *fts, SIMPLE_PATTERN *q, char *ptr) { - fts->searches++; - fts->char_searches++; - return simple_pattern_matches(q, ptr); -} - -struct contexts_v2_node { - size_t ni; - RRDHOST *host; -}; - -struct rrdcontext_to_json_v2_data { - time_t now; - - BUFFER *wb; - struct api_v2_contexts_request *request; - - CONTEXTS_V2_MODE mode; - CONTEXTS_V2_OPTIONS options; - struct query_versions versions; - - struct { - SIMPLE_PATTERN *scope_pattern; - SIMPLE_PATTERN *pattern; - size_t ni; - DICTIONARY *dict; // the result set - } nodes; - - struct { - SIMPLE_PATTERN *scope_pattern; - SIMPLE_PATTERN *pattern; - size_t ci; - DICTIONARY *dict; // the result set - } contexts; - - struct { - SIMPLE_PATTERN *alert_name_pattern; - time_t alarm_id_filter; - - size_t ati; - - DICTIONARY *summary; - DICTIONARY *alert_instances; - - DICTIONARY *by_type; - DICTIONARY *by_component; - DICTIONARY *by_classification; - DICTIONARY *by_recipient; - DICTIONARY *by_module; - } alerts; - - struct { - FTS_MATCH host_match; - char host_node_id_str[UUID_STR_LEN]; - SIMPLE_PATTERN *pattern; - FTS_INDEX fts; - } q; - - struct { - DICTIONARY *dict; // the result set - } functions; - - struct { - bool enabled; - bool relative; - time_t after; - time_t before; - } window; - - struct query_timings timings; -}; - -static void alert_counts_add(struct alert_counts *t, RRDCALC *rc) { - switch(rc->status) { - case RRDCALC_STATUS_CRITICAL: - t->critical++; - break; - - case RRDCALC_STATUS_WARNING: - t->warning++; - break; - - case RRDCALC_STATUS_CLEAR: - t->clear++; - break; - - case RRDCALC_STATUS_REMOVED: - case RRDCALC_STATUS_UNINITIALIZED: - break; - - case RRDCALC_STATUS_UNDEFINED: - default: - if(!netdata_double_isnumber(rc->value)) - t->error++; - - break; - } -} - -static void alerts_v2_add(struct alert_v2_entry *t, RRDCALC *rc) { - t->instances++; - - alert_counts_add(&t->counts, rc); - - dictionary_set(t->nodes, rc->rrdset->rrdhost->machine_guid, NULL, 0); - - char key[UUID_STR_LEN + 1]; - uuid_unparse_lower(rc->config.hash_id, key); - dictionary_set(t->configs, key, NULL, 0); -} - -static void alerts_by_x_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { - static STRING *silent = NULL; - if(unlikely(!silent)) silent = string_strdupz("silent"); - - struct alert_by_x_entry *b = value; - RRDCALC *rc = data; - if(!rc) { - // prototype - b->prototypes.available++; - } - else { - alert_counts_add(&b->running.counts, rc); - - b->running.total++; - - if (rc->config.recipient == silent) - b->running.silent++; - } -} - -static bool alerts_by_x_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value __maybe_unused, void *data __maybe_unused) { - alerts_by_x_insert_callback(item, old_value, data); - return false; -} - -static void alerts_v2_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { - struct rrdcontext_to_json_v2_data *ctl = data; - struct alert_v2_entry *t = value; - RRDCALC *rc = t->tmp; - t->name = rc->config.name; - t->summary = rc->config.summary; // the original summary - t->context = rrdlabels_create(); - t->recipient = rrdlabels_create(); - t->classification = rrdlabels_create(); - t->component = rrdlabels_create(); - t->type = rrdlabels_create(); - if (string_strlen(rc->rrdset->context)) - rrdlabels_add(t->context, string2str(rc->rrdset->context), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.recipient)) - rrdlabels_add(t->recipient, string2str(rc->config.recipient), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.classification)) - rrdlabels_add(t->classification, string2str(rc->config.classification), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.component)) - rrdlabels_add(t->component, string2str(rc->config.component), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.type)) - rrdlabels_add(t->type, string2str(rc->config.type), "yes", RRDLABEL_SRC_AUTO); - t->ati = ctl->alerts.ati++; - - t->nodes = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_VALUE_LINK_DONT_CLONE|DICT_OPTION_NAME_LINK_DONT_CLONE); - t->configs = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_VALUE_LINK_DONT_CLONE|DICT_OPTION_NAME_LINK_DONT_CLONE); - - alerts_v2_add(t, rc); -} - -static bool alerts_v2_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { - struct alert_v2_entry *t = old_value, *n = new_value; - RRDCALC *rc = n->tmp; - if (string_strlen(rc->rrdset->context)) - rrdlabels_add(t->context, string2str(rc->rrdset->context), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.recipient)) - rrdlabels_add(t->recipient, string2str(rc->config.recipient), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.classification)) - rrdlabels_add(t->classification, string2str(rc->config.classification), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.component)) - rrdlabels_add(t->component, string2str(rc->config.component), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.type)) - rrdlabels_add(t->type, string2str(rc->config.type), "yes", RRDLABEL_SRC_AUTO); - alerts_v2_add(t, rc); - return true; -} - -static void alerts_v2_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - struct alert_v2_entry *t = value; - - rrdlabels_destroy(t->context); - rrdlabels_destroy(t->recipient); - rrdlabels_destroy(t->classification); - rrdlabels_destroy(t->component); - rrdlabels_destroy(t->type); - - dictionary_destroy(t->nodes); - dictionary_destroy(t->configs); -} - -static void alert_instances_v2_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { - struct rrdcontext_to_json_v2_data *ctl = data; - struct sql_alert_instance_v2_entry *t = value; - RRDCALC *rc = t->tmp; - - t->context = rc->rrdset->context; - t->chart_id = rc->rrdset->id; - t->chart_name = rc->rrdset->name; - t->family = rc->rrdset->family; - t->units = rc->config.units; - t->classification = rc->config.classification; - t->type = rc->config.type; - t->recipient = rc->config.recipient; - t->component = rc->config.component; - t->name = rc->config.name; - t->source = rc->config.source; - t->status = rc->status; - t->flags = rc->run_flags; - t->info = rc->config.info; - t->summary = rc->summary; - t->value = rc->value; - t->last_updated = rc->last_updated; - t->last_status_change = rc->last_status_change; - t->last_status_change_value = rc->last_status_change_value; - t->host = rc->rrdset->rrdhost; - t->alarm_id = rc->id; - t->ni = ctl->nodes.ni; - - uuid_copy(t->config_hash_id, rc->config.hash_id); - health_alarm_log_get_global_id_and_transition_id_for_rrdcalc(rc, &t->global_id, &t->last_transition_id); -} - -static bool alert_instances_v2_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value __maybe_unused, void *new_value __maybe_unused, void *data __maybe_unused) { - internal_fatal(true, "This should never happen!"); - return true; -} - -static void alert_instances_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value __maybe_unused, void *data __maybe_unused) { - ; -} - -static FTS_MATCH rrdcontext_to_json_v2_full_text_search(struct rrdcontext_to_json_v2_data *ctl, RRDCONTEXT *rc, SIMPLE_PATTERN *q) { - if(unlikely(full_text_search_string(&ctl->q.fts, q, rc->id) || - full_text_search_string(&ctl->q.fts, q, rc->family))) - return FTS_MATCHED_CONTEXT; - - if(unlikely(full_text_search_string(&ctl->q.fts, q, rc->title))) - return FTS_MATCHED_TITLE; - - if(unlikely(full_text_search_string(&ctl->q.fts, q, rc->units))) - return FTS_MATCHED_UNITS; - - FTS_MATCH matched = FTS_MATCHED_NONE; - RRDINSTANCE *ri; - dfe_start_read(rc->rrdinstances, ri) { - if(matched) break; - - if(ctl->window.enabled && !query_matches_retention(ctl->window.after, ctl->window.before, ri->first_time_s, (ri->flags & RRD_FLAG_COLLECTED) ? ctl->now : ri->last_time_s, 0)) - continue; - - if(unlikely(full_text_search_string(&ctl->q.fts, q, ri->id)) || - (ri->name != ri->id && full_text_search_string(&ctl->q.fts, q, ri->name))) { - matched = FTS_MATCHED_INSTANCE; - break; - } - - RRDMETRIC *rm; - dfe_start_read(ri->rrdmetrics, rm) { - if(ctl->window.enabled && !query_matches_retention(ctl->window.after, ctl->window.before, rm->first_time_s, (rm->flags & RRD_FLAG_COLLECTED) ? ctl->now : rm->last_time_s, 0)) - continue; - - if(unlikely(full_text_search_string(&ctl->q.fts, q, rm->id)) || - (rm->name != rm->id && full_text_search_string(&ctl->q.fts, q, rm->name))) { - matched = FTS_MATCHED_DIMENSION; - break; - } - } - dfe_done(rm); - - size_t label_searches = 0; - if(unlikely(ri->rrdlabels && rrdlabels_entries(ri->rrdlabels) && - rrdlabels_match_simple_pattern_parsed(ri->rrdlabels, q, ':', &label_searches) == SP_MATCHED_POSITIVE)) { - ctl->q.fts.searches += label_searches; - ctl->q.fts.char_searches += label_searches; - matched = FTS_MATCHED_LABEL; - break; - } - ctl->q.fts.searches += label_searches; - ctl->q.fts.char_searches += label_searches; - - if(ri->rrdset) { - RRDSET *st = ri->rrdset; - rw_spinlock_read_lock(&st->alerts.spinlock); - for (RRDCALC *rcl = st->alerts.base; rcl; rcl = rcl->next) { - if(unlikely(full_text_search_string(&ctl->q.fts, q, rcl->config.name))) { - matched = FTS_MATCHED_ALERT; - break; - } - - if(unlikely(full_text_search_string(&ctl->q.fts, q, rcl->config.info))) { - matched = FTS_MATCHED_ALERT_INFO; - break; - } - } - rw_spinlock_read_unlock(&st->alerts.spinlock); - } - } - dfe_done(ri); - return matched; -} - -static bool rrdcontext_matches_alert(struct rrdcontext_to_json_v2_data *ctl, RRDCONTEXT *rc) { - size_t matches = 0; - RRDINSTANCE *ri; - dfe_start_read(rc->rrdinstances, ri) { - if(ri->rrdset) { - RRDSET *st = ri->rrdset; - rw_spinlock_read_lock(&st->alerts.spinlock); - for (RRDCALC *rcl = st->alerts.base; rcl; rcl = rcl->next) { - if(ctl->alerts.alert_name_pattern && !simple_pattern_matches_string(ctl->alerts.alert_name_pattern, rcl->config.name)) - continue; - - if(ctl->alerts.alarm_id_filter && ctl->alerts.alarm_id_filter != rcl->id) - continue; - - size_t m = ctl->request->alerts.status & CONTEXTS_V2_ALERT_STATUSES ? 0 : 1; - - if (!m) { - if ((ctl->request->alerts.status & CONTEXT_V2_ALERT_UNINITIALIZED) && - rcl->status == RRDCALC_STATUS_UNINITIALIZED) - m++; - - if ((ctl->request->alerts.status & CONTEXT_V2_ALERT_UNDEFINED) && - rcl->status == RRDCALC_STATUS_UNDEFINED) - m++; - - if ((ctl->request->alerts.status & CONTEXT_V2_ALERT_CLEAR) && - rcl->status == RRDCALC_STATUS_CLEAR) - m++; - - if ((ctl->request->alerts.status & CONTEXT_V2_ALERT_RAISED) && - rcl->status >= RRDCALC_STATUS_RAISED) - m++; - - if ((ctl->request->alerts.status & CONTEXT_V2_ALERT_WARNING) && - rcl->status == RRDCALC_STATUS_WARNING) - m++; - - if ((ctl->request->alerts.status & CONTEXT_V2_ALERT_CRITICAL) && - rcl->status == RRDCALC_STATUS_CRITICAL) - m++; - - if(!m) - continue; - } - - struct alert_v2_entry t = { - .tmp = rcl, - }; - struct alert_v2_entry *a2e = - dictionary_set(ctl->alerts.summary, string2str(rcl->config.name), - &t, sizeof(struct alert_v2_entry)); - size_t ati = a2e->ati; - matches++; - - dictionary_set_advanced(ctl->alerts.by_type, - string2str(rcl->config.type), - (ssize_t)string_strlen(rcl->config.type), - NULL, - sizeof(struct alert_by_x_entry), - rcl); - - dictionary_set_advanced(ctl->alerts.by_component, - string2str(rcl->config.component), - (ssize_t)string_strlen(rcl->config.component), - NULL, - sizeof(struct alert_by_x_entry), - rcl); - - dictionary_set_advanced(ctl->alerts.by_classification, - string2str(rcl->config.classification), - (ssize_t)string_strlen(rcl->config.classification), - NULL, - sizeof(struct alert_by_x_entry), - rcl); - - dictionary_set_advanced(ctl->alerts.by_recipient, - string2str(rcl->config.recipient), - (ssize_t)string_strlen(rcl->config.recipient), - NULL, - sizeof(struct alert_by_x_entry), - rcl); - - char *module = NULL; - rrdlabels_get_value_strdup_or_null(st->rrdlabels, &module, "_collect_module"); - if(!module || !*module) module = "[unset]"; - - dictionary_set_advanced(ctl->alerts.by_module, - module, - -1, - NULL, - sizeof(struct alert_by_x_entry), - rcl); - - if (ctl->options & (CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES | CONTEXT_V2_OPTION_ALERTS_WITH_VALUES)) { - char key[20 + 1]; - snprintfz(key, sizeof(key) - 1, "%p", rcl); - - struct sql_alert_instance_v2_entry z = { - .ati = ati, - .tmp = rcl, - }; - dictionary_set(ctl->alerts.alert_instances, key, &z, sizeof(z)); - } - } - rw_spinlock_read_unlock(&st->alerts.spinlock); - } - } - dfe_done(ri); - - return matches != 0; -} - - -static ssize_t rrdcontext_to_json_v2_add_context(void *data, RRDCONTEXT_ACQUIRED *rca, bool queryable_context __maybe_unused) { - struct rrdcontext_to_json_v2_data *ctl = data; - - RRDCONTEXT *rc = rrdcontext_acquired_value(rca); - - if(ctl->window.enabled && !query_matches_retention(ctl->window.after, ctl->window.before, rc->first_time_s, (rc->flags & RRD_FLAG_COLLECTED) ? ctl->now : rc->last_time_s, 0)) - return 0; // continue to next context - - FTS_MATCH match = ctl->q.host_match; - if((ctl->mode & CONTEXTS_V2_SEARCH) && ctl->q.pattern) { - match = rrdcontext_to_json_v2_full_text_search(ctl, rc, ctl->q.pattern); - - if(match == FTS_MATCHED_NONE) - return 0; // continue to next context - } - - if(ctl->mode & CONTEXTS_V2_ALERTS) { - if(!rrdcontext_matches_alert(ctl, rc)) - return 0; // continue to next context - } - - if(ctl->contexts.dict) { - struct context_v2_entry t = { - .count = 1, - .id = rc->id, - .family = string_dup(rc->family), - .priority = rc->priority, - .first_time_s = rc->first_time_s, - .last_time_s = rc->last_time_s, - .flags = rc->flags, - .match = match, - }; - - dictionary_set(ctl->contexts.dict, string2str(rc->id), &t, sizeof(struct context_v2_entry)); - } - - return 1; -} - -void buffer_json_agent_status_id(BUFFER *wb, size_t ai, usec_t duration_ut) { - buffer_json_member_add_object(wb, "st"); - { - buffer_json_member_add_uint64(wb, "ai", ai); - buffer_json_member_add_uint64(wb, "code", 200); - buffer_json_member_add_string(wb, "msg", ""); - if (duration_ut) - buffer_json_member_add_double(wb, "ms", (NETDATA_DOUBLE) duration_ut / 1000.0); - } - buffer_json_object_close(wb); -} - -void buffer_json_node_add_v2(BUFFER *wb, RRDHOST *host, size_t ni, usec_t duration_ut, bool status) { - buffer_json_member_add_string(wb, "mg", host->machine_guid); - - if(host->node_id) - buffer_json_member_add_uuid(wb, "nd", host->node_id); - buffer_json_member_add_string(wb, "nm", rrdhost_hostname(host)); - buffer_json_member_add_uint64(wb, "ni", ni); - - if(status) - buffer_json_agent_status_id(wb, 0, duration_ut); -} - -static void rrdhost_receiver_to_json(BUFFER *wb, RRDHOST_STATUS *s, const char *key) { - buffer_json_member_add_object(wb, key); - { - buffer_json_member_add_uint64(wb, "id", s->ingest.id); - buffer_json_member_add_uint64(wb, "hops", s->ingest.hops); - buffer_json_member_add_string(wb, "type", rrdhost_ingest_type_to_string(s->ingest.type)); - buffer_json_member_add_string(wb, "status", rrdhost_ingest_status_to_string(s->ingest.status)); - buffer_json_member_add_time_t(wb, "since", s->ingest.since); - buffer_json_member_add_time_t(wb, "age", s->now - s->ingest.since); - - if(s->ingest.type == RRDHOST_INGEST_TYPE_CHILD) { - if(s->ingest.status == RRDHOST_INGEST_STATUS_OFFLINE) - buffer_json_member_add_string(wb, "reason", stream_handshake_error_to_string(s->ingest.reason)); - - if(s->ingest.status == RRDHOST_INGEST_STATUS_REPLICATING) { - buffer_json_member_add_object(wb, "replication"); - { - buffer_json_member_add_boolean(wb, "in_progress", s->ingest.replication.in_progress); - buffer_json_member_add_double(wb, "completion", s->ingest.replication.completion); - buffer_json_member_add_uint64(wb, "instances", s->ingest.replication.instances); - } - buffer_json_object_close(wb); // replication - } - - if(s->ingest.status == RRDHOST_INGEST_STATUS_REPLICATING || s->ingest.status == RRDHOST_INGEST_STATUS_ONLINE) { - buffer_json_member_add_object(wb, "source"); - { - char buf[1024 + 1]; - snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->ingest.peers.local.ip, s->ingest.peers.local.port, s->ingest.ssl ? ":SSL" : ""); - buffer_json_member_add_string(wb, "local", buf); - - snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->ingest.peers.peer.ip, s->ingest.peers.peer.port, s->ingest.ssl ? ":SSL" : ""); - buffer_json_member_add_string(wb, "remote", buf); - - stream_capabilities_to_json_array(wb, s->ingest.capabilities, "capabilities"); - } - buffer_json_object_close(wb); // source - } - } - } - buffer_json_object_close(wb); // collection -} - -static void rrdhost_sender_to_json(BUFFER *wb, RRDHOST_STATUS *s, const char *key) { - if(s->stream.status == RRDHOST_STREAM_STATUS_DISABLED) - return; - - buffer_json_member_add_object(wb, key); - { - buffer_json_member_add_uint64(wb, "id", s->stream.id); - buffer_json_member_add_uint64(wb, "hops", s->stream.hops); - buffer_json_member_add_string(wb, "status", rrdhost_streaming_status_to_string(s->stream.status)); - buffer_json_member_add_time_t(wb, "since", s->stream.since); - buffer_json_member_add_time_t(wb, "age", s->now - s->stream.since); - - if (s->stream.status == RRDHOST_STREAM_STATUS_OFFLINE) - buffer_json_member_add_string(wb, "reason", stream_handshake_error_to_string(s->stream.reason)); - - if (s->stream.status == RRDHOST_STREAM_STATUS_REPLICATING) { - buffer_json_member_add_object(wb, "replication"); - { - buffer_json_member_add_boolean(wb, "in_progress", s->stream.replication.in_progress); - buffer_json_member_add_double(wb, "completion", s->stream.replication.completion); - buffer_json_member_add_uint64(wb, "instances", s->stream.replication.instances); - } - buffer_json_object_close(wb); - } - - buffer_json_member_add_object(wb, "destination"); - { - char buf[1024 + 1]; - snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->stream.peers.local.ip, s->stream.peers.local.port, s->stream.ssl ? ":SSL" : ""); - buffer_json_member_add_string(wb, "local", buf); - - snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->stream.peers.peer.ip, s->stream.peers.peer.port, s->stream.ssl ? ":SSL" : ""); - buffer_json_member_add_string(wb, "remote", buf); - - stream_capabilities_to_json_array(wb, s->stream.capabilities, "capabilities"); - - buffer_json_member_add_object(wb, "traffic"); - { - buffer_json_member_add_boolean(wb, "compression", s->stream.compression); - buffer_json_member_add_uint64(wb, "data", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DATA]); - buffer_json_member_add_uint64(wb, "metadata", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_METADATA]); - buffer_json_member_add_uint64(wb, "functions", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_FUNCTIONS]); - buffer_json_member_add_uint64(wb, "replication", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_REPLICATION]); - buffer_json_member_add_uint64(wb, "dyncfg", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DYNCFG]); - } - buffer_json_object_close(wb); // traffic - - buffer_json_member_add_array(wb, "candidates"); - struct rrdpush_destinations *d; - for (d = s->host->destinations; d; d = d->next) { - buffer_json_add_array_item_object(wb); - buffer_json_member_add_uint64(wb, "attempts", d->attempts); - { - - if (d->ssl) { - snprintfz(buf, sizeof(buf) - 1, "%s:SSL", string2str(d->destination)); - buffer_json_member_add_string(wb, "destination", buf); - } - else - buffer_json_member_add_string(wb, "destination", string2str(d->destination)); - - buffer_json_member_add_time_t(wb, "since", d->since); - buffer_json_member_add_time_t(wb, "age", s->now - d->since); - buffer_json_member_add_string(wb, "last_handshake", stream_handshake_error_to_string(d->reason)); - if(d->postpone_reconnection_until > s->now) { - buffer_json_member_add_time_t(wb, "next_check", d->postpone_reconnection_until); - buffer_json_member_add_time_t(wb, "next_in", d->postpone_reconnection_until - s->now); - } - } - buffer_json_object_close(wb); // each candidate - } - buffer_json_array_close(wb); // candidates - } - buffer_json_object_close(wb); // destination - } - buffer_json_object_close(wb); // streaming -} - -static void agent_capabilities_to_json(BUFFER *wb, RRDHOST *host, const char *key) { - buffer_json_member_add_array(wb, key); - - struct capability *capas = aclk_get_node_instance_capas(host); - for(struct capability *capa = capas; capa->name ;capa++) { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "name", capa->name); - buffer_json_member_add_uint64(wb, "version", capa->version); - buffer_json_member_add_boolean(wb, "enabled", capa->enabled); - } - buffer_json_object_close(wb); - } - buffer_json_array_close(wb); - freez(capas); -} - -static inline void host_dyncfg_to_json_v2(BUFFER *wb, const char *key, RRDHOST_STATUS *s) { - buffer_json_member_add_object(wb, key); - { - buffer_json_member_add_string(wb, "status", rrdhost_dyncfg_status_to_string(s->dyncfg.status)); - } - buffer_json_object_close(wb); // health - -} - -static inline void rrdhost_health_to_json_v2(BUFFER *wb, const char *key, RRDHOST_STATUS *s) { - buffer_json_member_add_object(wb, key); - { - buffer_json_member_add_string(wb, "status", rrdhost_health_status_to_string(s->health.status)); - if (s->health.status == RRDHOST_HEALTH_STATUS_RUNNING) { - buffer_json_member_add_object(wb, "alerts"); - { - buffer_json_member_add_uint64(wb, "critical", s->health.alerts.critical); - buffer_json_member_add_uint64(wb, "warning", s->health.alerts.warning); - buffer_json_member_add_uint64(wb, "clear", s->health.alerts.clear); - buffer_json_member_add_uint64(wb, "undefined", s->health.alerts.undefined); - buffer_json_member_add_uint64(wb, "uninitialized", s->health.alerts.uninitialized); - } - buffer_json_object_close(wb); // alerts - } - } - buffer_json_object_close(wb); // health -} - -static void rrdcontext_to_json_v2_rrdhost(BUFFER *wb, RRDHOST *host, struct rrdcontext_to_json_v2_data *ctl, size_t node_id) { - buffer_json_add_array_item_object(wb); // this node - buffer_json_node_add_v2(wb, host, node_id, 0, - (ctl->mode & CONTEXTS_V2_AGENTS) && !(ctl->mode & CONTEXTS_V2_NODE_INSTANCES)); - - if(ctl->mode & (CONTEXTS_V2_NODES_INFO | CONTEXTS_V2_NODE_INSTANCES)) { - RRDHOST_STATUS s; - rrdhost_status(host, ctl->now, &s); - - if (ctl->mode & (CONTEXTS_V2_NODES_INFO)) { - buffer_json_member_add_string(wb, "v", rrdhost_program_version(host)); - - host_labels2json(host, wb, "labels"); - - if (host->system_info) { - buffer_json_member_add_object(wb, "hw"); - { - buffer_json_member_add_string_or_empty(wb, "architecture", host->system_info->architecture); - buffer_json_member_add_string_or_empty(wb, "cpu_frequency", host->system_info->host_cpu_freq); - buffer_json_member_add_string_or_empty(wb, "cpus", host->system_info->host_cores); - buffer_json_member_add_string_or_empty(wb, "memory", host->system_info->host_ram_total); - buffer_json_member_add_string_or_empty(wb, "disk_space", host->system_info->host_disk_space); - buffer_json_member_add_string_or_empty(wb, "virtualization", host->system_info->virtualization); - buffer_json_member_add_string_or_empty(wb, "container", host->system_info->container); - } - buffer_json_object_close(wb); - - buffer_json_member_add_object(wb, "os"); - { - buffer_json_member_add_string_or_empty(wb, "id", host->system_info->host_os_id); - buffer_json_member_add_string_or_empty(wb, "nm", host->system_info->host_os_name); - buffer_json_member_add_string_or_empty(wb, "v", host->system_info->host_os_version); - buffer_json_member_add_object(wb, "kernel"); - buffer_json_member_add_string_or_empty(wb, "nm", host->system_info->kernel_name); - buffer_json_member_add_string_or_empty(wb, "v", host->system_info->kernel_version); - buffer_json_object_close(wb); - } - buffer_json_object_close(wb); - } - - // created - the node is created but never connected to cloud - // unreachable - not currently connected - // stale - connected but not having live data - // reachable - connected with live data - // pruned - not connected for some time and has been removed - buffer_json_member_add_string(wb, "state", rrdhost_state_cloud_emulation(host) ? "reachable" : "stale"); - - rrdhost_health_to_json_v2(wb, "health", &s); - agent_capabilities_to_json(wb, host, "capabilities"); - } - - if (ctl->mode & (CONTEXTS_V2_NODE_INSTANCES)) { - buffer_json_member_add_array(wb, "instances"); - buffer_json_add_array_item_object(wb); // this instance - { - buffer_json_agent_status_id(wb, 0, 0); - - buffer_json_member_add_object(wb, "db"); - { - buffer_json_member_add_string(wb, "status", rrdhost_db_status_to_string(s.db.status)); - buffer_json_member_add_string(wb, "liveness", rrdhost_db_liveness_to_string(s.db.liveness)); - buffer_json_member_add_string(wb, "mode", rrd_memory_mode_name(s.db.mode)); - buffer_json_member_add_time_t(wb, "first_time", s.db.first_time_s); - buffer_json_member_add_time_t(wb, "last_time", s.db.last_time_s); - buffer_json_member_add_uint64(wb, "metrics", s.db.metrics); - buffer_json_member_add_uint64(wb, "instances", s.db.instances); - buffer_json_member_add_uint64(wb, "contexts", s.db.contexts); - } - buffer_json_object_close(wb); - - rrdhost_receiver_to_json(wb, &s, "ingest"); - rrdhost_sender_to_json(wb, &s, "stream"); - - buffer_json_member_add_object(wb, "ml"); - buffer_json_member_add_string(wb, "status", rrdhost_ml_status_to_string(s.ml.status)); - buffer_json_member_add_string(wb, "type", rrdhost_ml_type_to_string(s.ml.type)); - if (s.ml.status == RRDHOST_ML_STATUS_RUNNING) { - buffer_json_member_add_object(wb, "metrics"); - { - buffer_json_member_add_uint64(wb, "anomalous", s.ml.metrics.anomalous); - buffer_json_member_add_uint64(wb, "normal", s.ml.metrics.normal); - buffer_json_member_add_uint64(wb, "trained", s.ml.metrics.trained); - buffer_json_member_add_uint64(wb, "pending", s.ml.metrics.pending); - buffer_json_member_add_uint64(wb, "silenced", s.ml.metrics.silenced); - } - buffer_json_object_close(wb); // metrics - } - buffer_json_object_close(wb); // ml - - rrdhost_health_to_json_v2(wb, "health", &s); - - host_functions2json(host, wb); // functions - agent_capabilities_to_json(wb, host, "capabilities"); - - host_dyncfg_to_json_v2(wb, "dyncfg", &s); - } - buffer_json_object_close(wb); // this instance - buffer_json_array_close(wb); // instances - } - } - buffer_json_object_close(wb); // this node -} - -static ssize_t rrdcontext_to_json_v2_add_host(void *data, RRDHOST *host, bool queryable_host) { - if(!queryable_host || !host->rrdctx.contexts) - // the host matches the 'scope_host' but does not match the 'host' patterns - // or the host does not have any contexts - return 0; // continue to next host - - struct rrdcontext_to_json_v2_data *ctl = data; - - if(ctl->window.enabled && !rrdhost_matches_window(host, ctl->window.after, ctl->window.before, ctl->now)) - // the host does not have data in the requested window - return 0; // continue to next host - - if(ctl->request->timeout_ms && now_monotonic_usec() > ctl->timings.received_ut + ctl->request->timeout_ms * USEC_PER_MS) - // timed out - return -2; // stop the query - - if(ctl->request->interrupt_callback && ctl->request->interrupt_callback(ctl->request->interrupt_callback_data)) - // interrupted - return -1; // stop the query - - bool host_matched = (ctl->mode & CONTEXTS_V2_NODES); - bool do_contexts = (ctl->mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_ALERTS)); - - ctl->q.host_match = FTS_MATCHED_NONE; - if((ctl->mode & CONTEXTS_V2_SEARCH)) { - // check if we match the host itself - if(ctl->q.pattern && ( - full_text_search_string(&ctl->q.fts, ctl->q.pattern, host->hostname) || - full_text_search_char(&ctl->q.fts, ctl->q.pattern, host->machine_guid) || - (ctl->q.pattern && full_text_search_char(&ctl->q.fts, ctl->q.pattern, ctl->q.host_node_id_str)))) { - ctl->q.host_match = FTS_MATCHED_HOST; - do_contexts = true; - } - } - - if(do_contexts) { - // save it - SIMPLE_PATTERN *old_q = ctl->q.pattern; - - if(ctl->q.host_match == FTS_MATCHED_HOST) - // do not do pattern matching on contexts - we matched the host itself - ctl->q.pattern = NULL; - - ssize_t added = query_scope_foreach_context( - host, ctl->request->scope_contexts, - ctl->contexts.scope_pattern, ctl->contexts.pattern, - rrdcontext_to_json_v2_add_context, queryable_host, ctl); - - // restore it - ctl->q.pattern = old_q; - - if(unlikely(added < 0)) - return -1; // stop the query - - if(added) - host_matched = true; - } - - if(!host_matched) - return 0; - - if(ctl->mode & CONTEXTS_V2_FUNCTIONS) { - struct function_v2_entry t = { - .used = 1, - .size = 1, - .node_ids = &ctl->nodes.ni, - .help = NULL, - .tags = NULL, - .access = HTTP_ACCESS_ALL, - .priority = RRDFUNCTIONS_PRIORITY_DEFAULT, - }; - host_functions_to_dict(host, ctl->functions.dict, &t, sizeof(t), &t.help, &t.tags, &t.access, &t.priority); - } - - if(ctl->mode & CONTEXTS_V2_NODES) { - struct contexts_v2_node t = { - .ni = ctl->nodes.ni++, - .host = host, - }; - - dictionary_set(ctl->nodes.dict, host->machine_guid, &t, sizeof(struct contexts_v2_node)); - } - - return 1; -} - -static void buffer_json_contexts_v2_mode_to_array(BUFFER *wb, const char *key, CONTEXTS_V2_MODE mode) { - buffer_json_member_add_array(wb, key); - - if(mode & CONTEXTS_V2_VERSIONS) - buffer_json_add_array_item_string(wb, "versions"); - - if(mode & CONTEXTS_V2_AGENTS) - buffer_json_add_array_item_string(wb, "agents"); - - if(mode & CONTEXTS_V2_AGENTS_INFO) - buffer_json_add_array_item_string(wb, "agents-info"); - - if(mode & CONTEXTS_V2_NODES) - buffer_json_add_array_item_string(wb, "nodes"); - - if(mode & CONTEXTS_V2_NODES_INFO) - buffer_json_add_array_item_string(wb, "nodes-info"); - - if(mode & CONTEXTS_V2_NODE_INSTANCES) - buffer_json_add_array_item_string(wb, "nodes-instances"); - - if(mode & CONTEXTS_V2_CONTEXTS) - buffer_json_add_array_item_string(wb, "contexts"); - - if(mode & CONTEXTS_V2_SEARCH) - buffer_json_add_array_item_string(wb, "search"); - - if(mode & CONTEXTS_V2_ALERTS) - buffer_json_add_array_item_string(wb, "alerts"); - - if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) - buffer_json_add_array_item_string(wb, "alert_transitions"); - - buffer_json_array_close(wb); -} - -void buffer_json_query_timings(BUFFER *wb, const char *key, struct query_timings *timings) { - timings->finished_ut = now_monotonic_usec(); - if(!timings->executed_ut) - timings->executed_ut = timings->finished_ut; - if(!timings->preprocessed_ut) - timings->preprocessed_ut = timings->received_ut; - buffer_json_member_add_object(wb, key); - buffer_json_member_add_double(wb, "prep_ms", (NETDATA_DOUBLE)(timings->preprocessed_ut - timings->received_ut) / USEC_PER_MS); - buffer_json_member_add_double(wb, "query_ms", (NETDATA_DOUBLE)(timings->executed_ut - timings->preprocessed_ut) / USEC_PER_MS); - buffer_json_member_add_double(wb, "output_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->executed_ut) / USEC_PER_MS); - buffer_json_member_add_double(wb, "total_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->received_ut) / USEC_PER_MS); - buffer_json_member_add_double(wb, "cloud_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->received_ut) / USEC_PER_MS); - buffer_json_object_close(wb); -} - -void build_info_to_json_object(BUFFER *b); - -static void convert_seconds_to_dhms(time_t seconds, char *result, int result_size) { - int days, hours, minutes; - - days = (int) (seconds / (24 * 3600)); - seconds = (int) (seconds % (24 * 3600)); - hours = (int) (seconds / 3600); - seconds %= 3600; - minutes = (int) (seconds / 60); - seconds %= 60; - - // Format the result into the provided string buffer - BUFFER *buf = buffer_create(128, NULL); - if (days) - buffer_sprintf(buf,"%d day%s%s", days, days==1 ? "" : "s", hours || minutes ? ", " : ""); - if (hours) - buffer_sprintf(buf,"%d hour%s%s", hours, hours==1 ? "" : "s", minutes ? ", " : ""); - if (minutes) - buffer_sprintf(buf,"%d minute%s%s", minutes, minutes==1 ? "" : "s", seconds ? ", " : ""); - if (seconds) - buffer_sprintf(buf,"%d second%s", (int) seconds, seconds==1 ? "" : "s"); - strncpyz(result, buffer_tostring(buf), result_size); - buffer_free(buf); -} - -void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now_s, bool info, bool array) { - if(!now_s) - now_s = now_realtime_sec(); - - if(array) { - buffer_json_member_add_array(wb, "agents"); - buffer_json_add_array_item_object(wb); - } - else - buffer_json_member_add_object(wb, "agent"); - - buffer_json_member_add_string(wb, "mg", localhost->machine_guid); - buffer_json_member_add_uuid(wb, "nd", localhost->node_id); - buffer_json_member_add_string(wb, "nm", rrdhost_hostname(localhost)); - buffer_json_member_add_time_t(wb, "now", now_s); - - if(array) - buffer_json_member_add_uint64(wb, "ai", 0); - - if(info) { - buffer_json_member_add_object(wb, "application"); - build_info_to_json_object(wb); - buffer_json_object_close(wb); // netdata - - buffer_json_cloud_status(wb, now_s); - - buffer_json_member_add_array(wb, "db_size"); - size_t group_seconds = localhost->rrd_update_every; - for (size_t tier = 0; tier < storage_tiers; tier++) { - STORAGE_ENGINE *eng = localhost->db[tier].eng; - if (!eng) continue; - - group_seconds *= storage_tiers_grouping_iterations[tier]; - uint64_t max = storage_engine_disk_space_max(eng->seb, localhost->db[tier].si); - uint64_t used = storage_engine_disk_space_used(eng->seb, localhost->db[tier].si); -#ifdef ENABLE_DBENGINE - if (!max && eng->seb == STORAGE_ENGINE_BACKEND_DBENGINE) { - max = get_directory_free_bytes_space(multidb_ctx[tier]); - max += used; - } -#endif - time_t first_time_s = storage_engine_global_first_time_s(eng->seb, localhost->db[tier].si); - size_t currently_collected_metrics = storage_engine_collected_metrics(eng->seb, localhost->db[tier].si); - - NETDATA_DOUBLE percent; - if (used && max) - percent = (NETDATA_DOUBLE) used * 100.0 / (NETDATA_DOUBLE) max; - else - percent = 0.0; - - buffer_json_add_array_item_object(wb); - buffer_json_member_add_uint64(wb, "tier", tier); - char human_retention[128]; - convert_seconds_to_dhms((time_t) group_seconds, human_retention, sizeof(human_retention) - 1); - buffer_json_member_add_string(wb, "point_every", human_retention); - - buffer_json_member_add_uint64(wb, "metrics", storage_engine_metrics(eng->seb, localhost->db[tier].si)); - buffer_json_member_add_uint64(wb, "samples", storage_engine_samples(eng->seb, localhost->db[tier].si)); - - if(used || max) { - buffer_json_member_add_uint64(wb, "disk_used", used); - buffer_json_member_add_uint64(wb, "disk_max", max); - buffer_json_member_add_double(wb, "disk_percent", percent); - } - - if(first_time_s) { - time_t retention = now_s - first_time_s; - - buffer_json_member_add_time_t(wb, "from", first_time_s); - buffer_json_member_add_time_t(wb, "to", now_s); - buffer_json_member_add_time_t(wb, "retention", retention); - - convert_seconds_to_dhms(retention, human_retention, sizeof(human_retention) - 1); - buffer_json_member_add_string(wb, "retention_human", human_retention); - - if(used || max) { // we have disk space information - time_t time_retention = 0; -#ifdef ENABLE_DBENGINE - time_retention = multidb_ctx[tier]->config.max_retention_s; -#endif - time_t space_retention = (time_t)((NETDATA_DOUBLE)(now_s - first_time_s) * 100.0 / percent); - time_t actual_retention = MIN(space_retention, time_retention ? time_retention : space_retention); - - if (time_retention) { - convert_seconds_to_dhms(time_retention, human_retention, sizeof(human_retention) - 1); - buffer_json_member_add_time_t(wb, "requested_retention", time_retention); - buffer_json_member_add_string(wb, "requested_retention_human", human_retention); - } - - convert_seconds_to_dhms(actual_retention, human_retention, sizeof(human_retention) - 1); - buffer_json_member_add_time_t(wb, "expected_retention", actual_retention); - buffer_json_member_add_string(wb, "expected_retention_human", human_retention); - } - } - - if(currently_collected_metrics) - buffer_json_member_add_uint64(wb, "currently_collected_metrics", currently_collected_metrics); - - buffer_json_object_close(wb); - } - buffer_json_array_close(wb); // db_size - } - - if(timings) - buffer_json_query_timings(wb, "timings", timings); - - buffer_json_object_close(wb); - - if(array) - buffer_json_array_close(wb); -} - -void buffer_json_cloud_timings(BUFFER *wb, const char *key, struct query_timings *timings) { - if(!timings->finished_ut) - timings->finished_ut = now_monotonic_usec(); - - buffer_json_member_add_object(wb, key); - buffer_json_member_add_double(wb, "routing_ms", 0.0); - buffer_json_member_add_double(wb, "node_max_ms", 0.0); - buffer_json_member_add_double(wb, "total_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->received_ut) / USEC_PER_MS); - buffer_json_object_close(wb); -} - -static void functions_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - struct function_v2_entry *t = value; - - // it is initialized with a static reference - we need to mallocz() the array - size_t *v = t->node_ids; - t->node_ids = mallocz(sizeof(size_t)); - *t->node_ids = *v; - t->size = 1; - t->used = 1; -} - -static bool functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { - struct function_v2_entry *t = old_value, *n = new_value; - size_t *v = n->node_ids; - - if(t->used >= t->size) { - t->node_ids = reallocz(t->node_ids, t->size * 2 * sizeof(size_t)); - t->size *= 2; - } - - t->node_ids[t->used++] = *v; - - return true; -} - -static void functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - struct function_v2_entry *t = value; - freez(t->node_ids); -} - -static bool contexts_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { - struct context_v2_entry *o = old_value; - struct context_v2_entry *n = new_value; - - o->count++; - - if(o->family != n->family) { - if((o->flags & RRD_FLAG_COLLECTED) && !(n->flags & RRD_FLAG_COLLECTED)) - // keep old - ; - else if(!(o->flags & RRD_FLAG_COLLECTED) && (n->flags & RRD_FLAG_COLLECTED)) { - // keep new - string_freez(o->family); - o->family = string_dup(n->family); - } - else { - // merge - STRING *old_family = o->family; - o->family = string_2way_merge(o->family, n->family); - string_freez(old_family); - } - } - - if(o->priority != n->priority) { - if((o->flags & RRD_FLAG_COLLECTED) && !(n->flags & RRD_FLAG_COLLECTED)) - // keep o - ; - else if(!(o->flags & RRD_FLAG_COLLECTED) && (n->flags & RRD_FLAG_COLLECTED)) - // keep n - o->priority = n->priority; - else - // keep the min - o->priority = MIN(o->priority, n->priority); - } - - if(o->first_time_s && n->first_time_s) - o->first_time_s = MIN(o->first_time_s, n->first_time_s); - else if(!o->first_time_s) - o->first_time_s = n->first_time_s; - - if(o->last_time_s && n->last_time_s) - o->last_time_s = MAX(o->last_time_s, n->last_time_s); - else if(!o->last_time_s) - o->last_time_s = n->last_time_s; - - o->flags |= n->flags; - o->match = MIN(o->match, n->match); - - string_freez(n->family); - - return true; -} - -static void contexts_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - struct context_v2_entry *z = value; - string_freez(z->family); -} - -static void rrdcontext_v2_set_transition_filter(const char *machine_guid, const char *context, time_t alarm_id, void *data) { - struct rrdcontext_to_json_v2_data *ctl = data; - - if(machine_guid && *machine_guid) { - if(ctl->nodes.scope_pattern) - simple_pattern_free(ctl->nodes.scope_pattern); - - if(ctl->nodes.pattern) - simple_pattern_free(ctl->nodes.pattern); - - ctl->nodes.scope_pattern = string_to_simple_pattern(machine_guid); - ctl->nodes.pattern = NULL; - } - - if(context && *context) { - if(ctl->contexts.scope_pattern) - simple_pattern_free(ctl->contexts.scope_pattern); - - if(ctl->contexts.pattern) - simple_pattern_free(ctl->contexts.pattern); - - ctl->contexts.scope_pattern = string_to_simple_pattern(context); - ctl->contexts.pattern = NULL; - } - - ctl->alerts.alarm_id_filter = alarm_id; -} - -struct alert_instances_callback_data { - BUFFER *wb; - struct rrdcontext_to_json_v2_data *ctl; - bool debug; -}; - -static void contexts_v2_alert_config_to_json_from_sql_alert_config_data(struct sql_alert_config_data *t, void *data) { - struct alert_transitions_callback_data *d = data; - BUFFER *wb = d->wb; - bool debug = d->debug; - d->configs_added++; - - if(d->only_one_config) - buffer_json_add_array_item_object(wb); // alert config - - { - buffer_json_member_add_string(wb, "name", t->name); - buffer_json_member_add_uuid(wb, "config_hash_id", t->config_hash_id); - - buffer_json_member_add_object(wb, "selectors"); - { - bool is_template = t->selectors.on_template && *t->selectors.on_template ? true : false; - buffer_json_member_add_string(wb, "type", is_template ? "template" : "alarm"); - buffer_json_member_add_string(wb, "on", is_template ? t->selectors.on_template : t->selectors.on_key); - - buffer_json_member_add_string(wb, "families", t->selectors.families); - buffer_json_member_add_string(wb, "host_labels", t->selectors.host_labels); - buffer_json_member_add_string(wb, "chart_labels", t->selectors.chart_labels); - } - buffer_json_object_close(wb); // selectors - - buffer_json_member_add_object(wb, "value"); // value - { - // buffer_json_member_add_string(wb, "every", t->value.every); // does not exist in Netdata Cloud - buffer_json_member_add_string(wb, "units", t->value.units); - buffer_json_member_add_uint64(wb, "update_every", t->value.update_every); - - if (t->value.db.after || debug) { - buffer_json_member_add_object(wb, "db"); - { - // buffer_json_member_add_string(wb, "lookup", t->value.db.lookup); // does not exist in Netdata Cloud - - buffer_json_member_add_time_t(wb, "after", t->value.db.after); - buffer_json_member_add_time_t(wb, "before", t->value.db.before); - buffer_json_member_add_string(wb, "time_group_condition", alerts_group_conditions_id2txt(t->value.db.time_group_condition)); - buffer_json_member_add_double(wb, "time_group_value", t->value.db.time_group_value); - buffer_json_member_add_string(wb, "dims_group", alerts_dims_grouping_id2group(t->value.db.dims_group)); - buffer_json_member_add_string(wb, "data_source", alerts_data_source_id2source(t->value.db.data_source)); - buffer_json_member_add_string(wb, "method", t->value.db.method); - buffer_json_member_add_string(wb, "dimensions", t->value.db.dimensions); - rrdr_options_to_buffer_json_array(wb, "options", (RRDR_OPTIONS)t->value.db.options); - } - buffer_json_object_close(wb); // db - } - - if (t->value.calc || debug) - buffer_json_member_add_string(wb, "calc", t->value.calc); - } - buffer_json_object_close(wb); // value - - if (t->status.warn || t->status.crit || debug) { - buffer_json_member_add_object(wb, "status"); // status - { - NETDATA_DOUBLE green = t->status.green ? str2ndd(t->status.green, NULL) : NAN; - NETDATA_DOUBLE red = t->status.red ? str2ndd(t->status.red, NULL) : NAN; - - if (!isnan(green) || debug) - buffer_json_member_add_double(wb, "green", green); - - if (!isnan(red) || debug) - buffer_json_member_add_double(wb, "red", red); - - if (t->status.warn || debug) - buffer_json_member_add_string(wb, "warn", t->status.warn); - - if (t->status.crit || debug) - buffer_json_member_add_string(wb, "crit", t->status.crit); - } - buffer_json_object_close(wb); // status - } - - buffer_json_member_add_object(wb, "notification"); - { - buffer_json_member_add_string(wb, "type", "agent"); - buffer_json_member_add_string(wb, "exec", t->notification.exec ? t->notification.exec : NULL); - buffer_json_member_add_string(wb, "to", t->notification.to_key ? t->notification.to_key : string2str(localhost->health.health_default_recipient)); - buffer_json_member_add_string(wb, "delay", t->notification.delay); - buffer_json_member_add_string(wb, "repeat", t->notification.repeat); - buffer_json_member_add_string(wb, "options", t->notification.options); - } - buffer_json_object_close(wb); // notification - - buffer_json_member_add_string(wb, "class", t->classification); - buffer_json_member_add_string(wb, "component", t->component); - buffer_json_member_add_string(wb, "type", t->type); - buffer_json_member_add_string(wb, "info", t->info); - buffer_json_member_add_string(wb, "summary", t->summary); - // buffer_json_member_add_string(wb, "source", t->source); // moved to alert instance - } - - if(d->only_one_config) - buffer_json_object_close(wb); -} - -int contexts_v2_alert_config_to_json(struct web_client *w, const char *config_hash_id) { - struct alert_transitions_callback_data data = { - .wb = w->response.data, - .debug = false, - .only_one_config = false, - }; - DICTIONARY *configs = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE); - dictionary_set(configs, config_hash_id, NULL, 0); - - buffer_flush(w->response.data); - - buffer_json_initialize(w->response.data, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - - int added = sql_get_alert_configuration(configs, contexts_v2_alert_config_to_json_from_sql_alert_config_data, &data, false); - buffer_json_finalize(w->response.data); - - int ret = HTTP_RESP_OK; - - if(added <= 0) { - buffer_flush(w->response.data); - w->response.data->content_type = CT_TEXT_PLAIN; - if(added < 0) { - buffer_strcat(w->response.data, "Failed to execute SQL query."); - ret = HTTP_RESP_INTERNAL_SERVER_ERROR; - } - else { - buffer_strcat(w->response.data, "Config is not found."); - ret = HTTP_RESP_NOT_FOUND; - } - } - - return ret; -} - -static int contexts_v2_alert_instance_to_json_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { - struct sql_alert_instance_v2_entry *t = value; - struct alert_instances_callback_data *d = data; - struct rrdcontext_to_json_v2_data *ctl = d->ctl; (void)ctl; - bool debug = d->debug; (void)debug; - BUFFER *wb = d->wb; - - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_uint64(wb, "ni", t->ni); - - buffer_json_member_add_string(wb, "nm", string2str(t->name)); - buffer_json_member_add_string(wb, "ch", string2str(t->chart_id)); - buffer_json_member_add_string(wb, "ch_n", string2str(t->chart_name)); - - if(ctl->request->options & CONTEXT_V2_OPTION_ALERTS_WITH_SUMMARY) - buffer_json_member_add_uint64(wb, "ati", t->ati); - - if(ctl->request->options & CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES) { - buffer_json_member_add_string(wb, "units", string2str(t->units)); - buffer_json_member_add_string(wb, "fami", string2str(t->family)); - buffer_json_member_add_string(wb, "info", string2str(t->info)); - buffer_json_member_add_string(wb, "sum", string2str(t->summary)); - buffer_json_member_add_string(wb, "ctx", string2str(t->context)); - buffer_json_member_add_string(wb, "st", rrdcalc_status2string(t->status)); - buffer_json_member_add_uuid(wb, "tr_i", &t->last_transition_id); - buffer_json_member_add_double(wb, "tr_v", t->last_status_change_value); - buffer_json_member_add_time_t(wb, "tr_t", t->last_status_change); - buffer_json_member_add_uuid(wb, "cfg", &t->config_hash_id); - buffer_json_member_add_string(wb, "src", string2str(t->source)); - - buffer_json_member_add_string(wb, "to", string2str(t->recipient)); - buffer_json_member_add_string(wb, "tp", string2str(t->type)); - buffer_json_member_add_string(wb, "cm", string2str(t->component)); - buffer_json_member_add_string(wb, "cl", string2str(t->classification)); - - // Agent specific fields - buffer_json_member_add_uint64(wb, "gi", t->global_id); - // rrdcalc_flags_to_json_array (wb, "flags", t->flags); - } - - if(ctl->request->options & CONTEXT_V2_OPTION_ALERTS_WITH_VALUES) { - // Netdata Cloud fetched these by querying the agents - buffer_json_member_add_double(wb, "v", t->value); - buffer_json_member_add_time_t(wb, "t", t->last_updated); - } - } - buffer_json_object_close(wb); // alert instance - - return 1; -} - -static void contexts_v2_alerts_by_x_update_prototypes(void *data, STRING *type, STRING *component, STRING *classification, STRING *recipient) { - struct rrdcontext_to_json_v2_data *ctl = data; - - dictionary_set_advanced(ctl->alerts.by_type, string2str(type), (ssize_t)string_strlen(type), NULL, sizeof(struct alert_by_x_entry), NULL); - dictionary_set_advanced(ctl->alerts.by_component, string2str(component), (ssize_t)string_strlen(component), NULL, sizeof(struct alert_by_x_entry), NULL); - dictionary_set_advanced(ctl->alerts.by_classification, string2str(classification), (ssize_t)string_strlen(classification), NULL, sizeof(struct alert_by_x_entry), NULL); - dictionary_set_advanced(ctl->alerts.by_recipient, string2str(recipient), (ssize_t)string_strlen(recipient), NULL, sizeof(struct alert_by_x_entry), NULL); -} - -static void contexts_v2_alerts_by_x_to_json(BUFFER *wb, DICTIONARY *dict, const char *key) { - buffer_json_member_add_array(wb, key); - { - struct alert_by_x_entry *b; - dfe_start_read(dict, b) { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "name", b_dfe.name); - buffer_json_member_add_uint64(wb, "cr", b->running.counts.critical); - buffer_json_member_add_uint64(wb, "wr", b->running.counts.warning); - buffer_json_member_add_uint64(wb, "cl", b->running.counts.clear); - buffer_json_member_add_uint64(wb, "er", b->running.counts.error); - buffer_json_member_add_uint64(wb, "running", b->running.total); - - buffer_json_member_add_uint64(wb, "running_silent", b->running.silent); - - if(b->prototypes.available) - buffer_json_member_add_uint64(wb, "available", b->prototypes.available); - } - buffer_json_object_close(wb); - } - dfe_done(b); - } - buffer_json_array_close(wb); -} - -static void contexts_v2_alert_instances_to_json(BUFFER *wb, const char *key, struct rrdcontext_to_json_v2_data *ctl, bool debug) { - buffer_json_member_add_array(wb, key); - { - struct alert_instances_callback_data data = { - .wb = wb, - .ctl = ctl, - .debug = debug, - }; - dictionary_walkthrough_rw(ctl->alerts.alert_instances, DICTIONARY_LOCK_READ, - contexts_v2_alert_instance_to_json_callback, &data); - } - buffer_json_array_close(wb); // alerts_instances -} - -static void contexts_v2_alerts_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_data *ctl, bool debug) { - if(ctl->request->options & CONTEXT_V2_OPTION_ALERTS_WITH_SUMMARY) { - buffer_json_member_add_array(wb, "alerts"); - { - struct alert_v2_entry *t; - dfe_start_read(ctl->alerts.summary, t) - { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_uint64(wb, "ati", t->ati); - - buffer_json_member_add_array(wb, "ni"); - void *host_guid; - dfe_start_read(t->nodes, host_guid) { - struct contexts_v2_node *cn = dictionary_get(ctl->nodes.dict,host_guid_dfe.name); - buffer_json_add_array_item_int64(wb, (int64_t) cn->ni); - } - dfe_done(host_guid); - buffer_json_array_close(wb); - - buffer_json_member_add_string(wb, "nm", string2str(t->name)); - buffer_json_member_add_string(wb, "sum", string2str(t->summary)); - - buffer_json_member_add_uint64(wb, "cr", t->counts.critical); - buffer_json_member_add_uint64(wb, "wr", t->counts.warning); - buffer_json_member_add_uint64(wb, "cl", t->counts.clear); - buffer_json_member_add_uint64(wb, "er", t->counts.error); - - buffer_json_member_add_uint64(wb, "in", t->instances); - buffer_json_member_add_uint64(wb, "nd", dictionary_entries(t->nodes)); - buffer_json_member_add_uint64(wb, "cfg", dictionary_entries(t->configs)); - - buffer_json_member_add_array(wb, "ctx"); - rrdlabels_key_to_buffer_array_item(t->context, wb); - buffer_json_array_close(wb); // ctx - - buffer_json_member_add_array(wb, "cls"); - rrdlabels_key_to_buffer_array_item(t->classification, wb); - buffer_json_array_close(wb); // classification - - - buffer_json_member_add_array(wb, "cp"); - rrdlabels_key_to_buffer_array_item(t->component, wb); - buffer_json_array_close(wb); // component - - buffer_json_member_add_array(wb, "ty"); - rrdlabels_key_to_buffer_array_item(t->type, wb); - buffer_json_array_close(wb); // type - - buffer_json_member_add_array(wb, "to"); - rrdlabels_key_to_buffer_array_item(t->recipient, wb); - buffer_json_array_close(wb); // recipient - } - buffer_json_object_close(wb); // alert name - } - dfe_done(t); - } - buffer_json_array_close(wb); // alerts - - health_prototype_metadata_foreach(ctl, contexts_v2_alerts_by_x_update_prototypes); - contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_type, "alerts_by_type"); - contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_component, "alerts_by_component"); - contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_classification, "alerts_by_classification"); - contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_recipient, "alerts_by_recipient"); - contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_module, "alerts_by_module"); - } - - if(ctl->request->options & (CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES|CONTEXT_V2_OPTION_ALERTS_WITH_VALUES)) { - contexts_v2_alert_instances_to_json(wb, "alert_instances", ctl, debug); - } -} - -#define SQL_TRANSITION_DATA_SMALL_STRING (6 * 8) -#define SQL_TRANSITION_DATA_MEDIUM_STRING (12 * 8) -#define SQL_TRANSITION_DATA_BIG_STRING 512 - -struct sql_alert_transition_fixed_size { - usec_t global_id; - nd_uuid_t transition_id; - nd_uuid_t host_id; - nd_uuid_t config_hash_id; - uint32_t alarm_id; - char alert_name[SQL_TRANSITION_DATA_SMALL_STRING]; - char chart[RRD_ID_LENGTH_MAX]; - char chart_name[RRD_ID_LENGTH_MAX]; - char chart_context[SQL_TRANSITION_DATA_MEDIUM_STRING]; - char family[SQL_TRANSITION_DATA_SMALL_STRING]; - char recipient[SQL_TRANSITION_DATA_MEDIUM_STRING]; - char units[SQL_TRANSITION_DATA_SMALL_STRING]; - char exec[SQL_TRANSITION_DATA_BIG_STRING]; - char info[SQL_TRANSITION_DATA_BIG_STRING]; - char summary[SQL_TRANSITION_DATA_BIG_STRING]; - char classification[SQL_TRANSITION_DATA_SMALL_STRING]; - char type[SQL_TRANSITION_DATA_SMALL_STRING]; - char component[SQL_TRANSITION_DATA_SMALL_STRING]; - time_t when_key; - time_t duration; - time_t non_clear_duration; - uint64_t flags; - time_t delay_up_to_timestamp; - time_t exec_run_timestamp; - int exec_code; - int new_status; - int old_status; - int delay; - time_t last_repeat; - NETDATA_DOUBLE new_value; - NETDATA_DOUBLE old_value; - - char machine_guid[UUID_STR_LEN]; - struct sql_alert_transition_fixed_size *next; - struct sql_alert_transition_fixed_size *prev; -}; - -static struct sql_alert_transition_fixed_size *contexts_v2_alert_transition_dup(struct sql_alert_transition_data *t, const char *machine_guid, struct sql_alert_transition_fixed_size *dst) { - struct sql_alert_transition_fixed_size *n = dst ? dst : mallocz(sizeof(*n)); - - n->global_id = t->global_id; - uuid_copy(n->transition_id, *t->transition_id); - uuid_copy(n->host_id, *t->host_id); - uuid_copy(n->config_hash_id, *t->config_hash_id); - n->alarm_id = t->alarm_id; - strncpyz(n->alert_name, t->alert_name ? t->alert_name : "", sizeof(n->alert_name) - 1); - strncpyz(n->chart, t->chart ? t->chart : "", sizeof(n->chart) - 1); - strncpyz(n->chart_name, t->chart_name ? t->chart_name : n->chart, sizeof(n->chart_name) - 1); - strncpyz(n->chart_context, t->chart_context ? t->chart_context : "", sizeof(n->chart_context) - 1); - strncpyz(n->family, t->family ? t->family : "", sizeof(n->family) - 1); - strncpyz(n->recipient, t->recipient ? t->recipient : "", sizeof(n->recipient) - 1); - strncpyz(n->units, t->units ? t->units : "", sizeof(n->units) - 1); - strncpyz(n->exec, t->exec ? t->exec : "", sizeof(n->exec) - 1); - strncpyz(n->info, t->info ? t->info : "", sizeof(n->info) - 1); - strncpyz(n->summary, t->summary ? t->summary : "", sizeof(n->summary) - 1); - strncpyz(n->classification, t->classification ? t->classification : "", sizeof(n->classification) - 1); - strncpyz(n->type, t->type ? t->type : "", sizeof(n->type) - 1); - strncpyz(n->component, t->component ? t->component : "", sizeof(n->component) - 1); - n->when_key = t->when_key; - n->duration = t->duration; - n->non_clear_duration = t->non_clear_duration; - n->flags = t->flags; - n->delay_up_to_timestamp = t->delay_up_to_timestamp; - n->exec_run_timestamp = t->exec_run_timestamp; - n->exec_code = t->exec_code; - n->new_status = t->new_status; - n->old_status = t->old_status; - n->delay = t->delay; - n->last_repeat = t->last_repeat; - n->new_value = t->new_value; - n->old_value = t->old_value; - - memcpy(n->machine_guid, machine_guid, sizeof(n->machine_guid)); - n->next = n->prev = NULL; - - return n; -} - -static void contexts_v2_alert_transition_free(struct sql_alert_transition_fixed_size *t) { - freez(t); -} - -static inline void contexts_v2_alert_transition_keep(struct alert_transitions_callback_data *d, struct sql_alert_transition_data *t, const char *machine_guid) { - d->items_matched++; - - if(unlikely(t->global_id <= d->ctl->request->alerts.global_id_anchor)) { - // this is in our past, we are not interested - d->operations.skips_before++; - return; - } - - if(unlikely(!d->base)) { - d->last_added = contexts_v2_alert_transition_dup(t, machine_guid, NULL); - DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(d->base, d->last_added, prev, next); - d->items_to_return++; - d->operations.first++; - return; - } - - struct sql_alert_transition_fixed_size *last = d->last_added; - while(last->prev != d->base->prev && t->global_id > last->prev->global_id) { - last = last->prev; - d->operations.backwards++; - } - - while(last->next && t->global_id < last->next->global_id) { - last = last->next; - d->operations.forwards++; - } - - if(d->items_to_return >= d->max_items_to_return) { - if(last == d->base->prev && t->global_id < last->global_id) { - d->operations.skips_after++; - return; - } - } - - d->items_to_return++; - - if(t->global_id > last->global_id) { - if(d->items_to_return > d->max_items_to_return) { - d->items_to_return--; - d->operations.shifts++; - d->last_added = d->base->prev; - DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(d->base, d->last_added, prev, next); - d->last_added = contexts_v2_alert_transition_dup(t, machine_guid, d->last_added); - } - DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(d->base, d->last_added, prev, next); - d->operations.prepend++; - } - else { - d->last_added = contexts_v2_alert_transition_dup(t, machine_guid, NULL); - DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(d->base, d->last_added, prev, next); - d->operations.append++; - } - - while(d->items_to_return > d->max_items_to_return) { - // we have to remove something - - struct sql_alert_transition_fixed_size *tmp = d->base->prev; - DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(d->base, tmp, prev, next); - d->items_to_return--; - - if(unlikely(d->last_added == tmp)) - d->last_added = d->base; - - contexts_v2_alert_transition_free(tmp); - - d->operations.shifts++; - } -} - -static void contexts_v2_alert_transition_callback(struct sql_alert_transition_data *t, void *data) { - struct alert_transitions_callback_data *d = data; - d->items_evaluated++; - - char machine_guid[UUID_STR_LEN] = ""; - uuid_unparse_lower(*t->host_id, machine_guid); - - const char *facets[ATF_TOTAL_ENTRIES] = { - [ATF_STATUS] = rrdcalc_status2string(t->new_status), - [ATF_CLASS] = t->classification, - [ATF_TYPE] = t->type, - [ATF_COMPONENT] = t->component, - [ATF_ROLE] = t->recipient && *t->recipient ? t->recipient : string2str(localhost->health.health_default_recipient), - [ATF_NODE] = machine_guid, - [ATF_ALERT_NAME] = t->alert_name, - [ATF_CHART_NAME] = t->chart_name, - [ATF_CONTEXT] = t->chart_context, - }; - - for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { - if (!facets[i] || !*facets[i]) facets[i] = "unknown"; - - struct facet_entry tmp = { - .count = 0, - }; - dictionary_set(d->facets[i].dict, facets[i], &tmp, sizeof(tmp)); - } - - bool selected[ATF_TOTAL_ENTRIES] = { 0 }; - - uint32_t selected_by = 0; - for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { - selected[i] = !d->facets[i].pattern || simple_pattern_matches(d->facets[i].pattern, facets[i]); - if(selected[i]) - selected_by++; - } - - if(selected_by == ATF_TOTAL_ENTRIES) { - // this item is selected by all facets - // put it in our result (if it fits) - contexts_v2_alert_transition_keep(d, t, machine_guid); - } - - if(selected_by >= ATF_TOTAL_ENTRIES - 1) { - // this item is selected by all, or all except one facet - // in both cases we need to add it to our counters - - for (size_t i = 0; i < ATF_TOTAL_ENTRIES; i++) { - uint32_t counted_by = selected_by; - - if (counted_by != ATF_TOTAL_ENTRIES) { - counted_by = 0; - for (size_t j = 0; j < ATF_TOTAL_ENTRIES; j++) { - if (i == j || selected[j]) - counted_by++; - } - } - - if (counted_by == ATF_TOTAL_ENTRIES) { - // we need to count it on this facet - struct facet_entry *x = dictionary_get(d->facets[i].dict, facets[i]); - internal_fatal(!x, "facet is not found"); - if(x) - x->count++; - } - } - } -} - -static void contexts_v2_alert_transitions_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_data *ctl, bool debug) { - struct alert_transitions_callback_data data = { - .wb = wb, - .ctl = ctl, - .debug = debug, - .only_one_config = true, - .max_items_to_return = ctl->request->alerts.last, - .items_to_return = 0, - .base = NULL, - }; - - for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { - data.facets[i].dict = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_FIXED_SIZE | DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, sizeof(struct facet_entry)); - if(ctl->request->alerts.facets[i]) - data.facets[i].pattern = simple_pattern_create(ctl->request->alerts.facets[i], ",|", SIMPLE_PATTERN_EXACT, false); - } - - sql_alert_transitions( - ctl->nodes.dict, - ctl->window.after, - ctl->window.before, - ctl->request->contexts, - ctl->request->alerts.alert, - ctl->request->alerts.transition, - contexts_v2_alert_transition_callback, - &data, - debug); - - buffer_json_member_add_array(wb, "facets"); - for (size_t i = 0; i < ATF_TOTAL_ENTRIES; i++) { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "id", alert_transition_facets[i].id); - buffer_json_member_add_string(wb, "name", alert_transition_facets[i].name); - buffer_json_member_add_uint64(wb, "order", alert_transition_facets[i].order); - buffer_json_member_add_array(wb, "options"); - { - struct facet_entry *x; - dfe_start_read(data.facets[i].dict, x) { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "id", x_dfe.name); - if (i == ATF_NODE) { - RRDHOST *host = rrdhost_find_by_guid(x_dfe.name); - if (host) - buffer_json_member_add_string(wb, "name", rrdhost_hostname(host)); - else - buffer_json_member_add_string(wb, "name", x_dfe.name); - } else - buffer_json_member_add_string(wb, "name", x_dfe.name); - buffer_json_member_add_uint64(wb, "count", x->count); - } - buffer_json_object_close(wb); - } - dfe_done(x); - } - buffer_json_array_close(wb); // options - } - buffer_json_object_close(wb); // facet - } - buffer_json_array_close(wb); // facets - - buffer_json_member_add_array(wb, "transitions"); - for(struct sql_alert_transition_fixed_size *t = data.base; t ; t = t->next) { - buffer_json_add_array_item_object(wb); - { - RRDHOST *host = rrdhost_find_by_guid(t->machine_guid); - - buffer_json_member_add_uint64(wb, "gi", t->global_id); - buffer_json_member_add_uuid(wb, "transition_id", &t->transition_id); - buffer_json_member_add_uuid(wb, "config_hash_id", &t->config_hash_id); - buffer_json_member_add_string(wb, "machine_guid", t->machine_guid); - - if(host) { - buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host)); - - if(host->node_id) - buffer_json_member_add_uuid(wb, "node_id", host->node_id); - } - - buffer_json_member_add_string(wb, "alert", *t->alert_name ? t->alert_name : NULL); - buffer_json_member_add_string(wb, "instance", *t->chart ? t->chart : NULL); - buffer_json_member_add_string(wb, "instance_n", *t->chart_name ? t->chart_name : NULL); - buffer_json_member_add_string(wb, "context", *t->chart_context ? t->chart_context : NULL); - // buffer_json_member_add_string(wb, "family", *t->family ? t->family : NULL); - buffer_json_member_add_string(wb, "component", *t->component ? t->component : NULL); - buffer_json_member_add_string(wb, "classification", *t->classification ? t->classification : NULL); - buffer_json_member_add_string(wb, "type", *t->type ? t->type : NULL); - - buffer_json_member_add_time_t(wb, "when", t->when_key); - buffer_json_member_add_string(wb, "info", *t->info ? t->info : ""); - buffer_json_member_add_string(wb, "summary", *t->summary ? t->summary : ""); - buffer_json_member_add_string(wb, "units", *t->units ? t->units : NULL); - buffer_json_member_add_object(wb, "new"); - { - buffer_json_member_add_string(wb, "status", rrdcalc_status2string(t->new_status)); - buffer_json_member_add_double(wb, "value", t->new_value); - } - buffer_json_object_close(wb); // new - buffer_json_member_add_object(wb, "old"); - { - buffer_json_member_add_string(wb, "status", rrdcalc_status2string(t->old_status)); - buffer_json_member_add_double(wb, "value", t->old_value); - buffer_json_member_add_time_t(wb, "duration", t->duration); - buffer_json_member_add_time_t(wb, "raised_duration", t->non_clear_duration); - } - buffer_json_object_close(wb); // old - - buffer_json_member_add_object(wb, "notification"); - { - buffer_json_member_add_time_t(wb, "when", t->exec_run_timestamp); - buffer_json_member_add_time_t(wb, "delay", t->delay); - buffer_json_member_add_time_t(wb, "delay_up_to_time", t->delay_up_to_timestamp); - health_entry_flags_to_json_array(wb, "flags", t->flags); - buffer_json_member_add_string(wb, "exec", *t->exec ? t->exec : string2str(localhost->health.health_default_exec)); - buffer_json_member_add_uint64(wb, "exec_code", t->exec_code); - buffer_json_member_add_string(wb, "to", *t->recipient ? t->recipient : string2str(localhost->health.health_default_recipient)); - } - buffer_json_object_close(wb); // notification - } - buffer_json_object_close(wb); // a transition - } - buffer_json_array_close(wb); // all transitions - - if(ctl->options & CONTEXT_V2_OPTION_ALERTS_WITH_CONFIGURATIONS) { - DICTIONARY *configs = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE); - - for(struct sql_alert_transition_fixed_size *t = data.base; t ; t = t->next) { - char guid[UUID_STR_LEN]; - uuid_unparse_lower(t->config_hash_id, guid); - dictionary_set(configs, guid, NULL, 0); - } - - buffer_json_member_add_array(wb, "configurations"); - sql_get_alert_configuration(configs, contexts_v2_alert_config_to_json_from_sql_alert_config_data, &data, debug); - buffer_json_array_close(wb); - - dictionary_destroy(configs); - } - - while(data.base) { - struct sql_alert_transition_fixed_size *t = data.base; - DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(data.base, t, prev, next); - contexts_v2_alert_transition_free(t); - } - - for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { - dictionary_destroy(data.facets[i].dict); - simple_pattern_free(data.facets[i].pattern); - } - - buffer_json_member_add_object(wb, "items"); - { - // all the items in the window, under the scope_nodes, ignoring the facets (filters) - buffer_json_member_add_uint64(wb, "evaluated", data.items_evaluated); - - // all the items matching the query (if you didn't put anchor_gi and last, these are all the items you would get back) - buffer_json_member_add_uint64(wb, "matched", data.items_matched); - - // the items included in this response - buffer_json_member_add_uint64(wb, "returned", data.items_to_return); - - // same as last=X parameter - buffer_json_member_add_uint64(wb, "max_to_return", data.max_items_to_return); - - // items before the first returned, this should be 0 if anchor_gi is not set - buffer_json_member_add_uint64(wb, "before", data.operations.skips_before); - - // items after the last returned, when this is zero there aren't any items after the current list - buffer_json_member_add_uint64(wb, "after", data.operations.skips_after + data.operations.shifts); - } - buffer_json_object_close(wb); // items - - if(debug) { - buffer_json_member_add_object(wb, "stats"); - { - buffer_json_member_add_uint64(wb, "first", data.operations.first); - buffer_json_member_add_uint64(wb, "prepend", data.operations.prepend); - buffer_json_member_add_uint64(wb, "append", data.operations.append); - buffer_json_member_add_uint64(wb, "backwards", data.operations.backwards); - buffer_json_member_add_uint64(wb, "forwards", data.operations.forwards); - buffer_json_member_add_uint64(wb, "shifts", data.operations.shifts); - buffer_json_member_add_uint64(wb, "skips_before", data.operations.skips_before); - buffer_json_member_add_uint64(wb, "skips_after", data.operations.skips_after); - } - buffer_json_object_close(wb); - } -} - -int rrdcontext_to_json_v2(BUFFER *wb, struct api_v2_contexts_request *req, CONTEXTS_V2_MODE mode) { - int resp = HTTP_RESP_OK; - bool run = true; - - if(mode & CONTEXTS_V2_SEARCH) - mode |= CONTEXTS_V2_CONTEXTS; - - if(mode & (CONTEXTS_V2_AGENTS_INFO)) - mode |= CONTEXTS_V2_AGENTS; - - if(mode & (CONTEXTS_V2_FUNCTIONS | CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_NODES_INFO | CONTEXTS_V2_NODE_INSTANCES)) - mode |= CONTEXTS_V2_NODES; - - if(mode & CONTEXTS_V2_ALERTS) { - mode |= CONTEXTS_V2_NODES; - req->options &= ~CONTEXT_V2_OPTION_ALERTS_WITH_CONFIGURATIONS; - - if(!(req->options & (CONTEXT_V2_OPTION_ALERTS_WITH_SUMMARY|CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES|CONTEXT_V2_OPTION_ALERTS_WITH_VALUES))) - req->options |= CONTEXT_V2_OPTION_ALERTS_WITH_SUMMARY; - } - - if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { - mode |= CONTEXTS_V2_NODES; - req->options &= ~CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES; - } - - struct rrdcontext_to_json_v2_data ctl = { - .wb = wb, - .request = req, - .mode = mode, - .options = req->options, - .versions = { 0 }, - .nodes.scope_pattern = string_to_simple_pattern(req->scope_nodes), - .nodes.pattern = string_to_simple_pattern(req->nodes), - .contexts.pattern = string_to_simple_pattern(req->contexts), - .contexts.scope_pattern = string_to_simple_pattern(req->scope_contexts), - .q.pattern = string_to_simple_pattern_nocase(req->q), - .alerts.alert_name_pattern = string_to_simple_pattern(req->alerts.alert), - .window = { - .enabled = false, - .relative = false, - .after = req->after, - .before = req->before, - }, - .timings = { - .received_ut = now_monotonic_usec(), - } - }; - - bool debug = ctl.options & CONTEXT_V2_OPTION_DEBUG; - - if(mode & CONTEXTS_V2_NODES) { - ctl.nodes.dict = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct contexts_v2_node)); - } - - if(mode & CONTEXTS_V2_CONTEXTS) { - ctl.contexts.dict = dictionary_create_advanced( - DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, - sizeof(struct context_v2_entry)); - - dictionary_register_conflict_callback(ctl.contexts.dict, contexts_conflict_callback, &ctl); - dictionary_register_delete_callback(ctl.contexts.dict, contexts_delete_callback, &ctl); - } - - if(mode & CONTEXTS_V2_FUNCTIONS) { - ctl.functions.dict = dictionary_create_advanced( - DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, - sizeof(struct function_v2_entry)); - - dictionary_register_insert_callback(ctl.functions.dict, functions_insert_callback, &ctl); - dictionary_register_conflict_callback(ctl.functions.dict, functions_conflict_callback, &ctl); - dictionary_register_delete_callback(ctl.functions.dict, functions_delete_callback, &ctl); - } - - if(mode & CONTEXTS_V2_ALERTS) { - if(req->alerts.transition) { - ctl.options |= CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES|CONTEXT_V2_OPTION_ALERTS_WITH_VALUES; - run = sql_find_alert_transition(req->alerts.transition, rrdcontext_v2_set_transition_filter, &ctl); - if(!run) { - resp = HTTP_RESP_NOT_FOUND; - goto cleanup; - } - } - - ctl.alerts.summary = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct alert_v2_entry)); - - dictionary_register_insert_callback(ctl.alerts.summary, alerts_v2_insert_callback, &ctl); - dictionary_register_conflict_callback(ctl.alerts.summary, alerts_v2_conflict_callback, &ctl); - dictionary_register_delete_callback(ctl.alerts.summary, alerts_v2_delete_callback, &ctl); - - ctl.alerts.by_type = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct alert_by_x_entry)); - - dictionary_register_insert_callback(ctl.alerts.by_type, alerts_by_x_insert_callback, NULL); - dictionary_register_conflict_callback(ctl.alerts.by_type, alerts_by_x_conflict_callback, NULL); - - ctl.alerts.by_component = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct alert_by_x_entry)); - - dictionary_register_insert_callback(ctl.alerts.by_component, alerts_by_x_insert_callback, NULL); - dictionary_register_conflict_callback(ctl.alerts.by_component, alerts_by_x_conflict_callback, NULL); - - ctl.alerts.by_classification = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct alert_by_x_entry)); - - dictionary_register_insert_callback(ctl.alerts.by_classification, alerts_by_x_insert_callback, NULL); - dictionary_register_conflict_callback(ctl.alerts.by_classification, alerts_by_x_conflict_callback, NULL); - - ctl.alerts.by_recipient = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct alert_by_x_entry)); - - dictionary_register_insert_callback(ctl.alerts.by_recipient, alerts_by_x_insert_callback, NULL); - dictionary_register_conflict_callback(ctl.alerts.by_recipient, alerts_by_x_conflict_callback, NULL); - - ctl.alerts.by_module = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct alert_by_x_entry)); - - dictionary_register_insert_callback(ctl.alerts.by_module, alerts_by_x_insert_callback, NULL); - dictionary_register_conflict_callback(ctl.alerts.by_module, alerts_by_x_conflict_callback, NULL); - - if(ctl.options & (CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES | CONTEXT_V2_OPTION_ALERTS_WITH_VALUES)) { - ctl.alerts.alert_instances = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct sql_alert_instance_v2_entry)); - - dictionary_register_insert_callback(ctl.alerts.alert_instances, alert_instances_v2_insert_callback, &ctl); - dictionary_register_conflict_callback(ctl.alerts.alert_instances, alert_instances_v2_conflict_callback, &ctl); - dictionary_register_delete_callback(ctl.alerts.alert_instances, alert_instances_delete_callback, &ctl); - } - } - - if(req->after || req->before) { - ctl.window.relative = rrdr_relative_window_to_absolute_query(&ctl.window.after, &ctl.window.before, &ctl.now - , false - ); - ctl.window.enabled = !(mode & CONTEXTS_V2_ALERT_TRANSITIONS); - } - else - ctl.now = now_realtime_sec(); - - buffer_json_initialize(wb, "\"", "\"", 0, true, - ((req->options & CONTEXT_V2_OPTION_MINIFY) && !(req->options & CONTEXT_V2_OPTION_DEBUG)) ? BUFFER_JSON_OPTIONS_MINIFY : BUFFER_JSON_OPTIONS_DEFAULT); - - buffer_json_member_add_uint64(wb, "api", 2); - - if(req->options & CONTEXT_V2_OPTION_DEBUG) { - buffer_json_member_add_object(wb, "request"); - { - buffer_json_contexts_v2_mode_to_array(wb, "mode", mode); - web_client_api_request_v2_contexts_options_to_buffer_json_array(wb, "options", req->options); - - buffer_json_member_add_object(wb, "scope"); - { - buffer_json_member_add_string(wb, "scope_nodes", req->scope_nodes); - if (mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS)) - buffer_json_member_add_string(wb, "scope_contexts", req->scope_contexts); - } - buffer_json_object_close(wb); - - buffer_json_member_add_object(wb, "selectors"); - { - buffer_json_member_add_string(wb, "nodes", req->nodes); - - if (mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS)) - buffer_json_member_add_string(wb, "contexts", req->contexts); - - if(mode & (CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) { - buffer_json_member_add_object(wb, "alerts"); - - if(mode & CONTEXTS_V2_ALERTS) - web_client_api_request_v2_contexts_alerts_status_to_buffer_json_array(wb, "status", req->alerts.status); - - if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { - buffer_json_member_add_string(wb, "context", req->contexts); - buffer_json_member_add_uint64(wb, "anchor_gi", req->alerts.global_id_anchor); - buffer_json_member_add_uint64(wb, "last", req->alerts.last); - } - - buffer_json_member_add_string(wb, "alert", req->alerts.alert); - buffer_json_member_add_string(wb, "transition", req->alerts.transition); - buffer_json_object_close(wb); // alerts - } - } - buffer_json_object_close(wb); // selectors - - buffer_json_member_add_object(wb, "filters"); - { - if (mode & CONTEXTS_V2_SEARCH) - buffer_json_member_add_string(wb, "q", req->q); - - buffer_json_member_add_time_t(wb, "after", req->after); - buffer_json_member_add_time_t(wb, "before", req->before); - } - buffer_json_object_close(wb); // filters - - if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { - buffer_json_member_add_object(wb, "facets"); - { - for (int i = 0; i < ATF_TOTAL_ENTRIES; i++) { - buffer_json_member_add_string(wb, alert_transition_facets[i].query_param, req->alerts.facets[i]); - } - } - buffer_json_object_close(wb); // facets - } - } - buffer_json_object_close(wb); - } - - ssize_t ret = 0; - if(run) - ret = query_scope_foreach_host(ctl.nodes.scope_pattern, ctl.nodes.pattern, - rrdcontext_to_json_v2_add_host, &ctl, - &ctl.versions, ctl.q.host_node_id_str); - - if(unlikely(ret < 0)) { - buffer_flush(wb); - - if(ret == -2) { - buffer_strcat(wb, "query timeout"); - resp = HTTP_RESP_GATEWAY_TIMEOUT; - } - else { - buffer_strcat(wb, "query interrupted"); - resp = HTTP_RESP_CLIENT_CLOSED_REQUEST; - } - goto cleanup; - } - - ctl.timings.executed_ut = now_monotonic_usec(); - - if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { - contexts_v2_alert_transitions_to_json(wb, &ctl, debug); - } - else { - if (mode & CONTEXTS_V2_NODES) { - buffer_json_member_add_array(wb, "nodes"); - struct contexts_v2_node *t; - dfe_start_read(ctl.nodes.dict, t) { - rrdcontext_to_json_v2_rrdhost(wb, t->host, &ctl, t->ni); - } - dfe_done(t); - buffer_json_array_close(wb); - } - - if (mode & CONTEXTS_V2_FUNCTIONS) { - buffer_json_member_add_array(wb, "functions"); - { - struct function_v2_entry *t; - dfe_start_read(ctl.functions.dict, t) { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "name", t_dfe.name); - buffer_json_member_add_string(wb, "help", string2str(t->help)); - buffer_json_member_add_array(wb, "ni"); - { - for (size_t i = 0; i < t->used; i++) - buffer_json_add_array_item_uint64(wb, t->node_ids[i]); - } - buffer_json_array_close(wb); - buffer_json_member_add_string(wb, "tags", string2str(t->tags)); - http_access2buffer_json_array(wb, "access", t->access); - buffer_json_member_add_uint64(wb, "priority", t->priority); - } - buffer_json_object_close(wb); - } - dfe_done(t); - } - buffer_json_array_close(wb); - } - - if (mode & CONTEXTS_V2_CONTEXTS) { - buffer_json_member_add_object(wb, "contexts"); - { - struct context_v2_entry *z; - dfe_start_read(ctl.contexts.dict, z) { - bool collected = z->flags & RRD_FLAG_COLLECTED; - - buffer_json_member_add_object(wb, string2str(z->id)); - { - buffer_json_member_add_string(wb, "family", string2str(z->family)); - buffer_json_member_add_uint64(wb, "priority", z->priority); - buffer_json_member_add_time_t(wb, "first_entry", z->first_time_s); - buffer_json_member_add_time_t(wb, "last_entry", collected ? ctl.now : z->last_time_s); - buffer_json_member_add_boolean(wb, "live", collected); - if (mode & CONTEXTS_V2_SEARCH) - buffer_json_member_add_string(wb, "match", fts_match_to_string(z->match)); - } - buffer_json_object_close(wb); - } - dfe_done(z); - } - buffer_json_object_close(wb); // contexts - } - - if (mode & CONTEXTS_V2_ALERTS) - contexts_v2_alerts_to_json(wb, &ctl, debug); - - if (mode & CONTEXTS_V2_SEARCH) { - buffer_json_member_add_object(wb, "searches"); - { - buffer_json_member_add_uint64(wb, "strings", ctl.q.fts.string_searches); - buffer_json_member_add_uint64(wb, "char", ctl.q.fts.char_searches); - buffer_json_member_add_uint64(wb, "total", ctl.q.fts.searches); - } - buffer_json_object_close(wb); - } - - if (mode & (CONTEXTS_V2_VERSIONS)) - version_hashes_api_v2(wb, &ctl.versions); - - if (mode & CONTEXTS_V2_AGENTS) - buffer_json_agents_v2(wb, &ctl.timings, ctl.now, mode & (CONTEXTS_V2_AGENTS_INFO), true); - } - - buffer_json_cloud_timings(wb, "timings", &ctl.timings); - - buffer_json_finalize(wb); - -cleanup: - dictionary_destroy(ctl.nodes.dict); - dictionary_destroy(ctl.contexts.dict); - dictionary_destroy(ctl.functions.dict); - dictionary_destroy(ctl.alerts.summary); - dictionary_destroy(ctl.alerts.alert_instances); - dictionary_destroy(ctl.alerts.by_type); - dictionary_destroy(ctl.alerts.by_component); - dictionary_destroy(ctl.alerts.by_classification); - dictionary_destroy(ctl.alerts.by_recipient); - dictionary_destroy(ctl.alerts.by_module); - simple_pattern_free(ctl.nodes.scope_pattern); - simple_pattern_free(ctl.nodes.pattern); - simple_pattern_free(ctl.contexts.pattern); - simple_pattern_free(ctl.contexts.scope_pattern); - simple_pattern_free(ctl.q.pattern); - simple_pattern_free(ctl.alerts.alert_name_pattern); - - return resp; -} diff --git a/src/database/contexts/api_v2_contexts.c b/src/database/contexts/api_v2_contexts.c new file mode 100644 index 000000000..d8d945afb --- /dev/null +++ b/src/database/contexts/api_v2_contexts.c @@ -0,0 +1,1033 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_contexts.h" + +#include "aclk/aclk_capas.h" + +// ---------------------------------------------------------------------------- +// /api/v2/contexts API + +static const char *fts_match_to_string(FTS_MATCH match) { + switch(match) { + case FTS_MATCHED_HOST: + return "HOST"; + + case FTS_MATCHED_CONTEXT: + return "CONTEXT"; + + case FTS_MATCHED_INSTANCE: + return "INSTANCE"; + + case FTS_MATCHED_DIMENSION: + return "DIMENSION"; + + case FTS_MATCHED_ALERT: + return "ALERT"; + + case FTS_MATCHED_ALERT_INFO: + return "ALERT_INFO"; + + case FTS_MATCHED_LABEL: + return "LABEL"; + + case FTS_MATCHED_FAMILY: + return "FAMILY"; + + case FTS_MATCHED_TITLE: + return "TITLE"; + + case FTS_MATCHED_UNITS: + return "UNITS"; + + default: + return "NONE"; + } +} + +struct function_v2_entry { + size_t size; + size_t used; + size_t *node_ids; + STRING *help; + STRING *tags; + HTTP_ACCESS access; + int priority; + uint32_t version; +}; + +struct context_v2_entry { + size_t count; + STRING *id; + STRING *family; + uint32_t priority; + time_t first_time_s; + time_t last_time_s; + RRD_FLAGS flags; + FTS_MATCH match; +}; + +static inline bool full_text_search_string(FTS_INDEX *fts, SIMPLE_PATTERN *q, STRING *ptr) { + fts->searches++; + fts->string_searches++; + return simple_pattern_matches_string(q, ptr); +} + +static inline bool full_text_search_char(FTS_INDEX *fts, SIMPLE_PATTERN *q, char *ptr) { + fts->searches++; + fts->char_searches++; + return simple_pattern_matches(q, ptr); +} + +static FTS_MATCH rrdcontext_to_json_v2_full_text_search(struct rrdcontext_to_json_v2_data *ctl, RRDCONTEXT *rc, SIMPLE_PATTERN *q) { + if(unlikely(full_text_search_string(&ctl->q.fts, q, rc->id) || + full_text_search_string(&ctl->q.fts, q, rc->family))) + return FTS_MATCHED_CONTEXT; + + if(unlikely(full_text_search_string(&ctl->q.fts, q, rc->title))) + return FTS_MATCHED_TITLE; + + if(unlikely(full_text_search_string(&ctl->q.fts, q, rc->units))) + return FTS_MATCHED_UNITS; + + FTS_MATCH matched = FTS_MATCHED_NONE; + RRDINSTANCE *ri; + dfe_start_read(rc->rrdinstances, ri) { + if(matched) break; + + if(ctl->window.enabled && !query_matches_retention(ctl->window.after, ctl->window.before, ri->first_time_s, (ri->flags & RRD_FLAG_COLLECTED) ? ctl->now : ri->last_time_s, 0)) + continue; + + if(unlikely(full_text_search_string(&ctl->q.fts, q, ri->id)) || + (ri->name != ri->id && full_text_search_string(&ctl->q.fts, q, ri->name))) { + matched = FTS_MATCHED_INSTANCE; + break; + } + + RRDMETRIC *rm; + dfe_start_read(ri->rrdmetrics, rm) { + if(ctl->window.enabled && !query_matches_retention(ctl->window.after, ctl->window.before, rm->first_time_s, (rm->flags & RRD_FLAG_COLLECTED) ? ctl->now : rm->last_time_s, 0)) + continue; + + if(unlikely(full_text_search_string(&ctl->q.fts, q, rm->id)) || + (rm->name != rm->id && full_text_search_string(&ctl->q.fts, q, rm->name))) { + matched = FTS_MATCHED_DIMENSION; + break; + } + } + dfe_done(rm); + + size_t label_searches = 0; + if(unlikely(ri->rrdlabels && rrdlabels_entries(ri->rrdlabels) && + rrdlabels_match_simple_pattern_parsed(ri->rrdlabels, q, ':', &label_searches) == SP_MATCHED_POSITIVE)) { + ctl->q.fts.searches += label_searches; + ctl->q.fts.char_searches += label_searches; + matched = FTS_MATCHED_LABEL; + break; + } + ctl->q.fts.searches += label_searches; + ctl->q.fts.char_searches += label_searches; + + if(ri->rrdset) { + RRDSET *st = ri->rrdset; + rw_spinlock_read_lock(&st->alerts.spinlock); + for (RRDCALC *rcl = st->alerts.base; rcl; rcl = rcl->next) { + if(unlikely(full_text_search_string(&ctl->q.fts, q, rcl->config.name))) { + matched = FTS_MATCHED_ALERT; + break; + } + + if(unlikely(full_text_search_string(&ctl->q.fts, q, rcl->config.info))) { + matched = FTS_MATCHED_ALERT_INFO; + break; + } + } + rw_spinlock_read_unlock(&st->alerts.spinlock); + } + } + dfe_done(ri); + return matched; +} + +static ssize_t rrdcontext_to_json_v2_add_context(void *data, RRDCONTEXT_ACQUIRED *rca, bool queryable_context __maybe_unused) { + struct rrdcontext_to_json_v2_data *ctl = data; + + RRDCONTEXT *rc = rrdcontext_acquired_value(rca); + + if(ctl->window.enabled && !query_matches_retention(ctl->window.after, ctl->window.before, rc->first_time_s, (rc->flags & RRD_FLAG_COLLECTED) ? ctl->now : rc->last_time_s, 0)) + return 0; // continue to next context + + FTS_MATCH match = ctl->q.host_match; + if((ctl->mode & CONTEXTS_V2_SEARCH) && ctl->q.pattern) { + match = rrdcontext_to_json_v2_full_text_search(ctl, rc, ctl->q.pattern); + + if(match == FTS_MATCHED_NONE) + return 0; // continue to next context + } + + if(ctl->mode & CONTEXTS_V2_ALERTS) { + if(!rrdcontext_matches_alert(ctl, rc)) + return 0; // continue to next context + } + + if(ctl->contexts.dict) { + struct context_v2_entry t = { + .count = 1, + .id = rc->id, + .family = string_dup(rc->family), + .priority = rc->priority, + .first_time_s = rc->first_time_s, + .last_time_s = rc->last_time_s, + .flags = rc->flags, + .match = match, + }; + + dictionary_set(ctl->contexts.dict, string2str(rc->id), &t, sizeof(struct context_v2_entry)); + } + + return 1; +} + +void buffer_json_agent_status_id(BUFFER *wb, size_t ai, usec_t duration_ut) { + buffer_json_member_add_object(wb, "st"); + { + buffer_json_member_add_uint64(wb, "ai", ai); + buffer_json_member_add_uint64(wb, "code", 200); + buffer_json_member_add_string(wb, "msg", ""); + if (duration_ut) + buffer_json_member_add_double(wb, "ms", (NETDATA_DOUBLE) duration_ut / 1000.0); + } + buffer_json_object_close(wb); +} + +void buffer_json_node_add_v2(BUFFER *wb, RRDHOST *host, size_t ni, usec_t duration_ut, bool status) { + buffer_json_member_add_string(wb, "mg", host->machine_guid); + + if(!UUIDiszero(host->node_id)) + buffer_json_member_add_uuid(wb, "nd", host->node_id.uuid); + buffer_json_member_add_string(wb, "nm", rrdhost_hostname(host)); + buffer_json_member_add_uint64(wb, "ni", ni); + + if(status) + buffer_json_agent_status_id(wb, 0, duration_ut); +} + +static void rrdhost_receiver_to_json(BUFFER *wb, RRDHOST_STATUS *s, const char *key) { + buffer_json_member_add_object(wb, key); + { + buffer_json_member_add_uint64(wb, "id", s->ingest.id); + buffer_json_member_add_uint64(wb, "hops", s->ingest.hops); + buffer_json_member_add_string(wb, "type", rrdhost_ingest_type_to_string(s->ingest.type)); + buffer_json_member_add_string(wb, "status", rrdhost_ingest_status_to_string(s->ingest.status)); + buffer_json_member_add_time_t(wb, "since", s->ingest.since); + buffer_json_member_add_time_t(wb, "age", s->now - s->ingest.since); + + if(s->ingest.type == RRDHOST_INGEST_TYPE_CHILD) { + if(s->ingest.status == RRDHOST_INGEST_STATUS_OFFLINE) + buffer_json_member_add_string(wb, "reason", stream_handshake_error_to_string(s->ingest.reason)); + + if(s->ingest.status == RRDHOST_INGEST_STATUS_REPLICATING) { + buffer_json_member_add_object(wb, "replication"); + { + buffer_json_member_add_boolean(wb, "in_progress", s->ingest.replication.in_progress); + buffer_json_member_add_double(wb, "completion", s->ingest.replication.completion); + buffer_json_member_add_uint64(wb, "instances", s->ingest.replication.instances); + } + buffer_json_object_close(wb); // replication + } + + if(s->ingest.status == RRDHOST_INGEST_STATUS_REPLICATING || s->ingest.status == RRDHOST_INGEST_STATUS_ONLINE) { + buffer_json_member_add_object(wb, "source"); + { + char buf[1024 + 1]; + snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->ingest.peers.local.ip, s->ingest.peers.local.port, s->ingest.ssl ? ":SSL" : ""); + buffer_json_member_add_string(wb, "local", buf); + + snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->ingest.peers.peer.ip, s->ingest.peers.peer.port, s->ingest.ssl ? ":SSL" : ""); + buffer_json_member_add_string(wb, "remote", buf); + + stream_capabilities_to_json_array(wb, s->ingest.capabilities, "capabilities"); + } + buffer_json_object_close(wb); // source + } + } + } + buffer_json_object_close(wb); // collection +} + +static void rrdhost_sender_to_json(BUFFER *wb, RRDHOST_STATUS *s, const char *key) { + if(s->stream.status == RRDHOST_STREAM_STATUS_DISABLED) + return; + + buffer_json_member_add_object(wb, key); + { + buffer_json_member_add_uint64(wb, "id", s->stream.id); + buffer_json_member_add_uint64(wb, "hops", s->stream.hops); + buffer_json_member_add_string(wb, "status", rrdhost_streaming_status_to_string(s->stream.status)); + buffer_json_member_add_time_t(wb, "since", s->stream.since); + buffer_json_member_add_time_t(wb, "age", s->now - s->stream.since); + + if (s->stream.status == RRDHOST_STREAM_STATUS_OFFLINE) + buffer_json_member_add_string(wb, "reason", stream_handshake_error_to_string(s->stream.reason)); + + if (s->stream.status == RRDHOST_STREAM_STATUS_REPLICATING) { + buffer_json_member_add_object(wb, "replication"); + { + buffer_json_member_add_boolean(wb, "in_progress", s->stream.replication.in_progress); + buffer_json_member_add_double(wb, "completion", s->stream.replication.completion); + buffer_json_member_add_uint64(wb, "instances", s->stream.replication.instances); + } + buffer_json_object_close(wb); + } + + buffer_json_member_add_object(wb, "destination"); + { + char buf[1024 + 1]; + snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->stream.peers.local.ip, s->stream.peers.local.port, s->stream.ssl ? ":SSL" : ""); + buffer_json_member_add_string(wb, "local", buf); + + snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->stream.peers.peer.ip, s->stream.peers.peer.port, s->stream.ssl ? ":SSL" : ""); + buffer_json_member_add_string(wb, "remote", buf); + + stream_capabilities_to_json_array(wb, s->stream.capabilities, "capabilities"); + + buffer_json_member_add_object(wb, "traffic"); + { + buffer_json_member_add_boolean(wb, "compression", s->stream.compression); + buffer_json_member_add_uint64(wb, "data", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DATA]); + buffer_json_member_add_uint64(wb, "metadata", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_METADATA]); + buffer_json_member_add_uint64(wb, "functions", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_FUNCTIONS]); + buffer_json_member_add_uint64(wb, "replication", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_REPLICATION]); + buffer_json_member_add_uint64(wb, "dyncfg", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DYNCFG]); + } + buffer_json_object_close(wb); // traffic + + buffer_json_member_add_array(wb, "candidates"); + struct rrdpush_destinations *d; + for (d = s->host->destinations; d; d = d->next) { + buffer_json_add_array_item_object(wb); + buffer_json_member_add_uint64(wb, "attempts", d->attempts); + { + + if (d->ssl) { + snprintfz(buf, sizeof(buf) - 1, "%s:SSL", string2str(d->destination)); + buffer_json_member_add_string(wb, "destination", buf); + } + else + buffer_json_member_add_string(wb, "destination", string2str(d->destination)); + + buffer_json_member_add_time_t(wb, "since", d->since); + buffer_json_member_add_time_t(wb, "age", s->now - d->since); + buffer_json_member_add_string(wb, "last_handshake", stream_handshake_error_to_string(d->reason)); + if(d->postpone_reconnection_until > s->now) { + buffer_json_member_add_time_t(wb, "next_check", d->postpone_reconnection_until); + buffer_json_member_add_time_t(wb, "next_in", d->postpone_reconnection_until - s->now); + } + } + buffer_json_object_close(wb); // each candidate + } + buffer_json_array_close(wb); // candidates + } + buffer_json_object_close(wb); // destination + } + buffer_json_object_close(wb); // streaming +} + +void agent_capabilities_to_json(BUFFER *wb, RRDHOST *host, const char *key) { + buffer_json_member_add_array(wb, key); + + struct capability *capas = aclk_get_node_instance_capas(host); + for(struct capability *capa = capas; capa->name ;capa++) { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "name", capa->name); + buffer_json_member_add_uint64(wb, "version", capa->version); + buffer_json_member_add_boolean(wb, "enabled", capa->enabled); + } + buffer_json_object_close(wb); + } + buffer_json_array_close(wb); + freez(capas); +} + +static inline void host_dyncfg_to_json_v2(BUFFER *wb, const char *key, RRDHOST_STATUS *s) { + buffer_json_member_add_object(wb, key); + { + buffer_json_member_add_string(wb, "status", rrdhost_dyncfg_status_to_string(s->dyncfg.status)); + } + buffer_json_object_close(wb); // health + +} + +static inline void rrdhost_health_to_json_v2(BUFFER *wb, const char *key, RRDHOST_STATUS *s) { + buffer_json_member_add_object(wb, key); + { + buffer_json_member_add_string(wb, "status", rrdhost_health_status_to_string(s->health.status)); + if (s->health.status == RRDHOST_HEALTH_STATUS_RUNNING) { + buffer_json_member_add_object(wb, "alerts"); + { + buffer_json_member_add_uint64(wb, "critical", s->health.alerts.critical); + buffer_json_member_add_uint64(wb, "warning", s->health.alerts.warning); + buffer_json_member_add_uint64(wb, "clear", s->health.alerts.clear); + buffer_json_member_add_uint64(wb, "undefined", s->health.alerts.undefined); + buffer_json_member_add_uint64(wb, "uninitialized", s->health.alerts.uninitialized); + } + buffer_json_object_close(wb); // alerts + } + } + buffer_json_object_close(wb); // health +} + +static void rrdcontext_to_json_v2_rrdhost(BUFFER *wb, RRDHOST *host, struct rrdcontext_to_json_v2_data *ctl, size_t node_id) { + buffer_json_add_array_item_object(wb); // this node + buffer_json_node_add_v2(wb, host, node_id, 0, + (ctl->mode & CONTEXTS_V2_AGENTS) && !(ctl->mode & CONTEXTS_V2_NODE_INSTANCES)); + + if(ctl->mode & (CONTEXTS_V2_NODES_INFO | CONTEXTS_V2_NODE_INSTANCES)) { + RRDHOST_STATUS s; + rrdhost_status(host, ctl->now, &s); + + if (ctl->mode & (CONTEXTS_V2_NODES_INFO)) { + buffer_json_member_add_string(wb, "v", rrdhost_program_version(host)); + + host_labels2json(host, wb, "labels"); + + if (host->system_info) { + buffer_json_member_add_object(wb, "hw"); + { + buffer_json_member_add_string_or_empty(wb, "architecture", host->system_info->architecture); + buffer_json_member_add_string_or_empty(wb, "cpu_frequency", host->system_info->host_cpu_freq); + buffer_json_member_add_string_or_empty(wb, "cpus", host->system_info->host_cores); + buffer_json_member_add_string_or_empty(wb, "memory", host->system_info->host_ram_total); + buffer_json_member_add_string_or_empty(wb, "disk_space", host->system_info->host_disk_space); + buffer_json_member_add_string_or_empty(wb, "virtualization", host->system_info->virtualization); + buffer_json_member_add_string_or_empty(wb, "container", host->system_info->container); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "os"); + { + buffer_json_member_add_string_or_empty(wb, "id", host->system_info->host_os_id); + buffer_json_member_add_string_or_empty(wb, "nm", host->system_info->host_os_name); + buffer_json_member_add_string_or_empty(wb, "v", host->system_info->host_os_version); + buffer_json_member_add_object(wb, "kernel"); + buffer_json_member_add_string_or_empty(wb, "nm", host->system_info->kernel_name); + buffer_json_member_add_string_or_empty(wb, "v", host->system_info->kernel_version); + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); + } + + // created - the node is created but never connected to cloud + // unreachable - not currently connected + // stale - connected but not having live data + // reachable - connected with live data + // pruned - not connected for some time and has been removed + buffer_json_member_add_string(wb, "state", rrdhost_is_online(host) ? "reachable" : "stale"); + + rrdhost_health_to_json_v2(wb, "health", &s); + agent_capabilities_to_json(wb, host, "capabilities"); + rrdhost_stream_path_to_json(wb, host, STREAM_PATH_JSON_MEMBER, false); + } + + if (ctl->mode & (CONTEXTS_V2_NODE_INSTANCES)) { + buffer_json_member_add_array(wb, "instances"); + buffer_json_add_array_item_object(wb); // this instance + { + buffer_json_agent_status_id(wb, 0, 0); + + buffer_json_member_add_object(wb, "db"); + { + buffer_json_member_add_string(wb, "status", rrdhost_db_status_to_string(s.db.status)); + buffer_json_member_add_string(wb, "liveness", rrdhost_db_liveness_to_string(s.db.liveness)); + buffer_json_member_add_string(wb, "mode", rrd_memory_mode_name(s.db.mode)); + buffer_json_member_add_time_t(wb, "first_time", s.db.first_time_s); + buffer_json_member_add_time_t(wb, "last_time", s.db.last_time_s); + buffer_json_member_add_uint64(wb, "metrics", s.db.metrics); + + spinlock_lock(&s.host->accounting.spinlock); + int64_t count = 0; + + if (s.host->accounting.cache_timestamp && + ctl->now - s.host->accounting.cache_timestamp < host->rrd_update_every * 1.5) + count = s.host->accounting.currently_collected; + else { + Pvoid_t *Pvalue; + bool first = true; + Word_t dimension_id = 0; + while ((Pvalue = JudyLFirstThenNext(s.host->accounting.JudyL, &dimension_id, &first))) { + RRDDIM *rd = *Pvalue; + if (rd->collector.last_collected_time.tv_sec > ctl->now - (rd->rrdset->update_every * 2)) + count++; + } + s.host->accounting.currently_collected = count; + s.host->accounting.cache_timestamp = ctl->now; + } + spinlock_unlock(&s.host->accounting.spinlock); + + buffer_json_member_add_uint64(wb, "currently_collected_metrics", count); + buffer_json_member_add_uint64(wb, "instances", s.db.instances); + buffer_json_member_add_uint64(wb, "contexts", s.db.contexts); + } + buffer_json_object_close(wb); + + rrdhost_receiver_to_json(wb, &s, "ingest"); + rrdhost_sender_to_json(wb, &s, "stream"); + + buffer_json_member_add_object(wb, "ml"); + buffer_json_member_add_string(wb, "status", rrdhost_ml_status_to_string(s.ml.status)); + buffer_json_member_add_string(wb, "type", rrdhost_ml_type_to_string(s.ml.type)); + if (s.ml.status == RRDHOST_ML_STATUS_RUNNING) { + buffer_json_member_add_object(wb, "metrics"); + { + buffer_json_member_add_uint64(wb, "anomalous", s.ml.metrics.anomalous); + buffer_json_member_add_uint64(wb, "normal", s.ml.metrics.normal); + buffer_json_member_add_uint64(wb, "trained", s.ml.metrics.trained); + buffer_json_member_add_uint64(wb, "pending", s.ml.metrics.pending); + buffer_json_member_add_uint64(wb, "silenced", s.ml.metrics.silenced); + } + buffer_json_object_close(wb); // metrics + } + buffer_json_object_close(wb); // ml + + rrdhost_health_to_json_v2(wb, "health", &s); + + host_functions2json(host, wb); // functions + agent_capabilities_to_json(wb, host, "capabilities"); + + host_dyncfg_to_json_v2(wb, "dyncfg", &s); + } + buffer_json_object_close(wb); // this instance + buffer_json_array_close(wb); // instances + } + } + buffer_json_object_close(wb); // this node +} + +static ssize_t rrdcontext_to_json_v2_add_host(void *data, RRDHOST *host, bool queryable_host) { + if(!queryable_host || !host->rrdctx.contexts) + // the host matches the 'scope_host' but does not match the 'host' patterns + // or the host does not have any contexts + return 0; // continue to next host + + struct rrdcontext_to_json_v2_data *ctl = data; + + if(ctl->window.enabled && !rrdhost_matches_window(host, ctl->window.after, ctl->window.before, ctl->now)) + // the host does not have data in the requested window + return 0; // continue to next host + + if(ctl->request->timeout_ms && now_monotonic_usec() > ctl->timings.received_ut + ctl->request->timeout_ms * USEC_PER_MS) + // timed out + return -2; // stop the query + + if(ctl->request->interrupt_callback && ctl->request->interrupt_callback(ctl->request->interrupt_callback_data)) + // interrupted + return -1; // stop the query + + bool host_matched = (ctl->mode & CONTEXTS_V2_NODES); + bool do_contexts = (ctl->mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_ALERTS)); + + ctl->q.host_match = FTS_MATCHED_NONE; + if((ctl->mode & CONTEXTS_V2_SEARCH)) { + // check if we match the host itself + if(ctl->q.pattern && ( + full_text_search_string(&ctl->q.fts, ctl->q.pattern, host->hostname) || + full_text_search_char(&ctl->q.fts, ctl->q.pattern, host->machine_guid) || + (ctl->q.pattern && full_text_search_char(&ctl->q.fts, ctl->q.pattern, ctl->q.host_node_id_str)))) { + ctl->q.host_match = FTS_MATCHED_HOST; + do_contexts = true; + } + } + + if(do_contexts) { + // save it + SIMPLE_PATTERN *old_q = ctl->q.pattern; + + if(ctl->q.host_match == FTS_MATCHED_HOST) + // do not do pattern matching on contexts - we matched the host itself + ctl->q.pattern = NULL; + + ssize_t added = query_scope_foreach_context( + host, ctl->request->scope_contexts, + ctl->contexts.scope_pattern, ctl->contexts.pattern, + rrdcontext_to_json_v2_add_context, queryable_host, ctl); + + // restore it + ctl->q.pattern = old_q; + + if(unlikely(added < 0)) + return -1; // stop the query + + if(added) + host_matched = true; + } + + if(!host_matched) + return 0; + + if(ctl->mode & CONTEXTS_V2_FUNCTIONS) { + struct function_v2_entry t = { + .used = 1, + .size = 1, + .node_ids = &ctl->nodes.ni, + .help = NULL, + .tags = NULL, + .access = HTTP_ACCESS_ALL, + .priority = RRDFUNCTIONS_PRIORITY_DEFAULT, + .version = RRDFUNCTIONS_VERSION_DEFAULT, + }; + host_functions_to_dict(host, ctl->functions.dict, &t, sizeof(t), &t.help, &t.tags, &t.access, &t.priority, &t.version); + } + + if(ctl->mode & CONTEXTS_V2_NODES) { + struct contexts_v2_node t = { + .ni = ctl->nodes.ni++, + .host = host, + }; + + dictionary_set(ctl->nodes.dict, host->machine_guid, &t, sizeof(struct contexts_v2_node)); + } + + return 1; +} + +static void buffer_json_contexts_v2_mode_to_array(BUFFER *wb, const char *key, CONTEXTS_V2_MODE mode) { + buffer_json_member_add_array(wb, key); + + if(mode & CONTEXTS_V2_VERSIONS) + buffer_json_add_array_item_string(wb, "versions"); + + if(mode & CONTEXTS_V2_AGENTS) + buffer_json_add_array_item_string(wb, "agents"); + + if(mode & CONTEXTS_V2_AGENTS_INFO) + buffer_json_add_array_item_string(wb, "agents-info"); + + if(mode & CONTEXTS_V2_NODES) + buffer_json_add_array_item_string(wb, "nodes"); + + if(mode & CONTEXTS_V2_NODES_INFO) + buffer_json_add_array_item_string(wb, "nodes-info"); + + if(mode & CONTEXTS_V2_NODE_INSTANCES) + buffer_json_add_array_item_string(wb, "nodes-instances"); + + if(mode & CONTEXTS_V2_CONTEXTS) + buffer_json_add_array_item_string(wb, "contexts"); + + if(mode & CONTEXTS_V2_SEARCH) + buffer_json_add_array_item_string(wb, "search"); + + if(mode & CONTEXTS_V2_ALERTS) + buffer_json_add_array_item_string(wb, "alerts"); + + if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) + buffer_json_add_array_item_string(wb, "alert_transitions"); + + buffer_json_array_close(wb); +} + +void buffer_json_query_timings(BUFFER *wb, const char *key, struct query_timings *timings) { + timings->finished_ut = now_monotonic_usec(); + if(!timings->executed_ut) + timings->executed_ut = timings->finished_ut; + if(!timings->preprocessed_ut) + timings->preprocessed_ut = timings->received_ut; + buffer_json_member_add_object(wb, key); + buffer_json_member_add_double(wb, "prep_ms", (NETDATA_DOUBLE)(timings->preprocessed_ut - timings->received_ut) / USEC_PER_MS); + buffer_json_member_add_double(wb, "query_ms", (NETDATA_DOUBLE)(timings->executed_ut - timings->preprocessed_ut) / USEC_PER_MS); + buffer_json_member_add_double(wb, "output_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->executed_ut) / USEC_PER_MS); + buffer_json_member_add_double(wb, "total_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->received_ut) / USEC_PER_MS); + buffer_json_member_add_double(wb, "cloud_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->received_ut) / USEC_PER_MS); + buffer_json_object_close(wb); +} + +void buffer_json_cloud_timings(BUFFER *wb, const char *key, struct query_timings *timings) { + if(!timings->finished_ut) + timings->finished_ut = now_monotonic_usec(); + + buffer_json_member_add_object(wb, key); + buffer_json_member_add_double(wb, "routing_ms", 0.0); + buffer_json_member_add_double(wb, "node_max_ms", 0.0); + buffer_json_member_add_double(wb, "total_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->received_ut) / USEC_PER_MS); + buffer_json_object_close(wb); +} + +static void functions_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct function_v2_entry *t = value; + + // it is initialized with a static reference - we need to mallocz() the array + size_t *v = t->node_ids; + t->node_ids = mallocz(sizeof(size_t)); + *t->node_ids = *v; + t->size = 1; + t->used = 1; +} + +static bool functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { + struct function_v2_entry *t = old_value, *n = new_value; + size_t *v = n->node_ids; + + if(t->used >= t->size) { + t->node_ids = reallocz(t->node_ids, t->size * 2 * sizeof(size_t)); + t->size *= 2; + } + + t->node_ids[t->used++] = *v; + + return true; +} + +static void functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct function_v2_entry *t = value; + freez(t->node_ids); +} + +static bool contexts_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { + struct context_v2_entry *o = old_value; + struct context_v2_entry *n = new_value; + + o->count++; + + if(o->family != n->family) { + if((o->flags & RRD_FLAG_COLLECTED) && !(n->flags & RRD_FLAG_COLLECTED)) + // keep old + ; + else if(!(o->flags & RRD_FLAG_COLLECTED) && (n->flags & RRD_FLAG_COLLECTED)) { + // keep new + string_freez(o->family); + o->family = string_dup(n->family); + } + else { + // merge + STRING *old_family = o->family; + o->family = string_2way_merge(o->family, n->family); + string_freez(old_family); + } + } + + if(o->priority != n->priority) { + if((o->flags & RRD_FLAG_COLLECTED) && !(n->flags & RRD_FLAG_COLLECTED)) + // keep o + ; + else if(!(o->flags & RRD_FLAG_COLLECTED) && (n->flags & RRD_FLAG_COLLECTED)) + // keep n + o->priority = n->priority; + else + // keep the min + o->priority = MIN(o->priority, n->priority); + } + + if(o->first_time_s && n->first_time_s) + o->first_time_s = MIN(o->first_time_s, n->first_time_s); + else if(!o->first_time_s) + o->first_time_s = n->first_time_s; + + if(o->last_time_s && n->last_time_s) + o->last_time_s = MAX(o->last_time_s, n->last_time_s); + else if(!o->last_time_s) + o->last_time_s = n->last_time_s; + + o->flags |= n->flags; + o->match = MIN(o->match, n->match); + + string_freez(n->family); + + return true; +} + +static void contexts_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct context_v2_entry *z = value; + string_freez(z->family); +} + +int rrdcontext_to_json_v2(BUFFER *wb, struct api_v2_contexts_request *req, CONTEXTS_V2_MODE mode) { + int resp = HTTP_RESP_OK; + bool run = true; + + if(mode & CONTEXTS_V2_SEARCH) + mode |= CONTEXTS_V2_CONTEXTS; + + if(mode & (CONTEXTS_V2_AGENTS_INFO)) + mode |= CONTEXTS_V2_AGENTS; + + if(mode & (CONTEXTS_V2_FUNCTIONS | CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_NODES_INFO | CONTEXTS_V2_NODE_INSTANCES)) + mode |= CONTEXTS_V2_NODES; + + if(mode & CONTEXTS_V2_ALERTS) { + mode |= CONTEXTS_V2_NODES; + req->options &= ~CONTEXTS_OPTION_ALERTS_WITH_CONFIGURATIONS; + + if(!(req->options & (CONTEXTS_OPTION_ALERTS_WITH_SUMMARY | CONTEXTS_OPTION_ALERTS_WITH_INSTANCES | + CONTEXTS_OPTION_ALERTS_WITH_VALUES))) + req->options |= CONTEXTS_OPTION_ALERTS_WITH_SUMMARY; + } + + if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { + mode |= CONTEXTS_V2_NODES; + req->options &= ~CONTEXTS_OPTION_ALERTS_WITH_INSTANCES; + } + + struct rrdcontext_to_json_v2_data ctl = { + .wb = wb, + .request = req, + .mode = mode, + .options = req->options, + .versions = { 0 }, + .nodes.scope_pattern = string_to_simple_pattern(req->scope_nodes), + .nodes.pattern = string_to_simple_pattern(req->nodes), + .contexts.pattern = string_to_simple_pattern(req->contexts), + .contexts.scope_pattern = string_to_simple_pattern(req->scope_contexts), + .q.pattern = string_to_simple_pattern_nocase(req->q), + .alerts.alert_name_pattern = string_to_simple_pattern(req->alerts.alert), + .window = { + .enabled = false, + .relative = false, + .after = req->after, + .before = req->before, + }, + .timings = { + .received_ut = now_monotonic_usec(), + } + }; + + bool debug = ctl.options & CONTEXTS_OPTION_DEBUG; + + if(mode & CONTEXTS_V2_NODES) { + ctl.nodes.dict = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, sizeof(struct contexts_v2_node)); + } + + if(mode & CONTEXTS_V2_CONTEXTS) { + ctl.contexts.dict = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, + sizeof(struct context_v2_entry)); + + dictionary_register_conflict_callback(ctl.contexts.dict, contexts_conflict_callback, &ctl); + dictionary_register_delete_callback(ctl.contexts.dict, contexts_delete_callback, &ctl); + } + + if(mode & CONTEXTS_V2_FUNCTIONS) { + ctl.functions.dict = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, + sizeof(struct function_v2_entry)); + + dictionary_register_insert_callback(ctl.functions.dict, functions_insert_callback, &ctl); + dictionary_register_conflict_callback(ctl.functions.dict, functions_conflict_callback, &ctl); + dictionary_register_delete_callback(ctl.functions.dict, functions_delete_callback, &ctl); + } + + if(mode & CONTEXTS_V2_ALERTS) { + if(!rrdcontexts_v2_init_alert_dictionaries(&ctl, req)) { + resp = HTTP_RESP_NOT_FOUND; + goto cleanup; + } + } + + if(req->after || req->before) { + ctl.window.relative = rrdr_relative_window_to_absolute_query( + &ctl.window.after, &ctl.window.before, &ctl.now, false); + + ctl.window.enabled = !(mode & CONTEXTS_V2_ALERT_TRANSITIONS); + } + else + ctl.now = now_realtime_sec(); + + buffer_json_initialize(wb, "\"", "\"", 0, true, + ((req->options & CONTEXTS_OPTION_MINIFY) && !(req->options & CONTEXTS_OPTION_DEBUG)) ? BUFFER_JSON_OPTIONS_MINIFY : BUFFER_JSON_OPTIONS_DEFAULT); + + buffer_json_member_add_uint64(wb, "api", 2); + + if(req->options & CONTEXTS_OPTION_DEBUG) { + buffer_json_member_add_object(wb, "request"); + { + buffer_json_contexts_v2_mode_to_array(wb, "mode", mode); + contexts_options_to_buffer_json_array(wb, "options", req->options); + + buffer_json_member_add_object(wb, "scope"); + { + buffer_json_member_add_string(wb, "scope_nodes", req->scope_nodes); + if (mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS)) + buffer_json_member_add_string(wb, "scope_contexts", req->scope_contexts); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "selectors"); + { + buffer_json_member_add_string(wb, "nodes", req->nodes); + + if (mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS)) + buffer_json_member_add_string(wb, "contexts", req->contexts); + + if(mode & (CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) { + buffer_json_member_add_object(wb, "alerts"); + + if(mode & CONTEXTS_V2_ALERTS) + contexts_alerts_status_to_buffer_json_array(wb, "status", req->alerts.status); + + if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { + buffer_json_member_add_string(wb, "context", req->contexts); + buffer_json_member_add_uint64(wb, "anchor_gi", req->alerts.global_id_anchor); + buffer_json_member_add_uint64(wb, "last", req->alerts.last); + } + + buffer_json_member_add_string(wb, "alert", req->alerts.alert); + buffer_json_member_add_string(wb, "transition", req->alerts.transition); + buffer_json_object_close(wb); // alerts + } + } + buffer_json_object_close(wb); // selectors + + buffer_json_member_add_object(wb, "filters"); + { + if (mode & CONTEXTS_V2_SEARCH) + buffer_json_member_add_string(wb, "q", req->q); + + buffer_json_member_add_time_t(wb, "after", req->after); + buffer_json_member_add_time_t(wb, "before", req->before); + } + buffer_json_object_close(wb); // filters + + if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { + buffer_json_member_add_object(wb, "facets"); + { + for (int i = 0; i < ATF_TOTAL_ENTRIES; i++) { + buffer_json_member_add_string(wb, alert_transition_facets[i].query_param, req->alerts.facets[i]); + } + } + buffer_json_object_close(wb); // facets + } + } + buffer_json_object_close(wb); + } + + ssize_t ret = 0; + if(run) + ret = query_scope_foreach_host(ctl.nodes.scope_pattern, ctl.nodes.pattern, + rrdcontext_to_json_v2_add_host, &ctl, + &ctl.versions, ctl.q.host_node_id_str); + + if(unlikely(ret < 0)) { + buffer_flush(wb); + + if(ret == -2) { + buffer_strcat(wb, "query timeout"); + resp = HTTP_RESP_GATEWAY_TIMEOUT; + } + else { + buffer_strcat(wb, "query interrupted"); + resp = HTTP_RESP_CLIENT_CLOSED_REQUEST; + } + goto cleanup; + } + + ctl.timings.executed_ut = now_monotonic_usec(); + + if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { + contexts_v2_alert_transitions_to_json(wb, &ctl, debug); + } + else { + if (mode & CONTEXTS_V2_NODES) { + buffer_json_member_add_array(wb, "nodes"); + struct contexts_v2_node *t; + dfe_start_read(ctl.nodes.dict, t) { + rrdcontext_to_json_v2_rrdhost(wb, t->host, &ctl, t->ni); + } + dfe_done(t); + buffer_json_array_close(wb); + } + + if (mode & CONTEXTS_V2_FUNCTIONS) { + buffer_json_member_add_array(wb, "functions"); + { + struct function_v2_entry *t; + dfe_start_read(ctl.functions.dict, t) { + buffer_json_add_array_item_object(wb); + { + const char *name = t_dfe.name ? strstr(t_dfe.name, RRDFUNCTIONS_VERSION_SEPARATOR) : NULL; + if(name) + name += sizeof(RRDFUNCTIONS_VERSION_SEPARATOR) - 1; + else + name = t_dfe.name; + + buffer_json_member_add_string(wb, "name", name); + buffer_json_member_add_string(wb, "help", string2str(t->help)); + buffer_json_member_add_array(wb, "ni"); + { + for (size_t i = 0; i < t->used; i++) + buffer_json_add_array_item_uint64(wb, t->node_ids[i]); + } + buffer_json_array_close(wb); + buffer_json_member_add_string(wb, "tags", string2str(t->tags)); + http_access2buffer_json_array(wb, "access", t->access); + buffer_json_member_add_uint64(wb, "priority", t->priority); + buffer_json_member_add_uint64(wb, "version", t->version); + } + buffer_json_object_close(wb); + } + dfe_done(t); + } + buffer_json_array_close(wb); + } + + if (mode & CONTEXTS_V2_CONTEXTS) { + buffer_json_member_add_object(wb, "contexts"); + { + struct context_v2_entry *z; + dfe_start_read(ctl.contexts.dict, z) { + bool collected = z->flags & RRD_FLAG_COLLECTED; + + buffer_json_member_add_object(wb, string2str(z->id)); + { + buffer_json_member_add_string(wb, "family", string2str(z->family)); + buffer_json_member_add_uint64(wb, "priority", z->priority); + buffer_json_member_add_time_t(wb, "first_entry", z->first_time_s); + buffer_json_member_add_time_t(wb, "last_entry", collected ? ctl.now : z->last_time_s); + buffer_json_member_add_boolean(wb, "live", collected); + if (mode & CONTEXTS_V2_SEARCH) + buffer_json_member_add_string(wb, "match", fts_match_to_string(z->match)); + } + buffer_json_object_close(wb); + } + dfe_done(z); + } + buffer_json_object_close(wb); // contexts + } + + if (mode & CONTEXTS_V2_ALERTS) + contexts_v2_alerts_to_json(wb, &ctl, debug); + + if (mode & CONTEXTS_V2_SEARCH) { + buffer_json_member_add_object(wb, "searches"); + { + buffer_json_member_add_uint64(wb, "strings", ctl.q.fts.string_searches); + buffer_json_member_add_uint64(wb, "char", ctl.q.fts.char_searches); + buffer_json_member_add_uint64(wb, "total", ctl.q.fts.searches); + } + buffer_json_object_close(wb); + } + + if (mode & (CONTEXTS_V2_VERSIONS)) + version_hashes_api_v2(wb, &ctl.versions); + + if (mode & CONTEXTS_V2_AGENTS) + buffer_json_agents_v2(wb, &ctl.timings, ctl.now, mode & (CONTEXTS_V2_AGENTS_INFO), true); + } + + buffer_json_cloud_timings(wb, "timings", &ctl.timings); + + buffer_json_finalize(wb); + +cleanup: + dictionary_destroy(ctl.nodes.dict); + dictionary_destroy(ctl.contexts.dict); + dictionary_destroy(ctl.functions.dict); + rrdcontexts_v2_alerts_cleanup(&ctl); + simple_pattern_free(ctl.nodes.scope_pattern); + simple_pattern_free(ctl.nodes.pattern); + simple_pattern_free(ctl.contexts.pattern); + simple_pattern_free(ctl.contexts.scope_pattern); + simple_pattern_free(ctl.q.pattern); + simple_pattern_free(ctl.alerts.alert_name_pattern); + + return resp; +} diff --git a/src/database/contexts/api_v2_contexts.h b/src/database/contexts/api_v2_contexts.h new file mode 100644 index 000000000..3fb5354b9 --- /dev/null +++ b/src/database/contexts/api_v2_contexts.h @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_V2_CONTEXTS_H +#define NETDATA_API_V2_CONTEXTS_H + +#include "internal.h" + +typedef enum __attribute__ ((__packed__)) { + FTS_MATCHED_NONE = 0, + FTS_MATCHED_HOST, + FTS_MATCHED_CONTEXT, + FTS_MATCHED_INSTANCE, + FTS_MATCHED_DIMENSION, + FTS_MATCHED_LABEL, + FTS_MATCHED_ALERT, + FTS_MATCHED_ALERT_INFO, + FTS_MATCHED_FAMILY, + FTS_MATCHED_TITLE, + FTS_MATCHED_UNITS, +} FTS_MATCH; + +typedef struct full_text_search_index { + size_t searches; + size_t string_searches; + size_t char_searches; +} FTS_INDEX; + +struct contexts_v2_node { + size_t ni; + RRDHOST *host; +}; + +struct rrdcontext_to_json_v2_data { + time_t now; + + BUFFER *wb; + struct api_v2_contexts_request *request; + + CONTEXTS_V2_MODE mode; + CONTEXTS_OPTIONS options; + struct query_versions versions; + + struct { + SIMPLE_PATTERN *scope_pattern; + SIMPLE_PATTERN *pattern; + size_t ni; + DICTIONARY *dict; // the result set + } nodes; + + struct { + SIMPLE_PATTERN *scope_pattern; + SIMPLE_PATTERN *pattern; + size_t ci; + DICTIONARY *dict; // the result set + } contexts; + + struct { + SIMPLE_PATTERN *alert_name_pattern; + time_t alarm_id_filter; + + size_t ati; + + DICTIONARY *summary; + DICTIONARY *alert_instances; + + DICTIONARY *by_type; + DICTIONARY *by_component; + DICTIONARY *by_classification; + DICTIONARY *by_recipient; + DICTIONARY *by_module; + } alerts; + + struct { + FTS_MATCH host_match; + char host_node_id_str[UUID_STR_LEN]; + SIMPLE_PATTERN *pattern; + FTS_INDEX fts; + } q; + + struct { + DICTIONARY *dict; // the result set + } functions; + + struct { + bool enabled; + bool relative; + time_t after; + time_t before; + } window; + + struct query_timings timings; +}; + +void agent_capabilities_to_json(BUFFER *wb, RRDHOST *host, const char *key); + +#include "api_v2_contexts_alerts.h" + +#endif //NETDATA_API_V2_CONTEXTS_H diff --git a/src/database/contexts/api_v2_contexts_agents.c b/src/database/contexts/api_v2_contexts_agents.c new file mode 100644 index 000000000..e279405a0 --- /dev/null +++ b/src/database/contexts/api_v2_contexts_agents.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_contexts.h" +#include "aclk/aclk_capas.h" + +void build_info_to_json_object(BUFFER *b); + +void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now_s, bool info, bool array) { + if(!now_s) + now_s = now_realtime_sec(); + + if(array) { + buffer_json_member_add_array(wb, "agents"); + buffer_json_add_array_item_object(wb); + } + else + buffer_json_member_add_object(wb, "agent"); + + buffer_json_member_add_string(wb, "mg", localhost->machine_guid); + buffer_json_member_add_uuid(wb, "nd", localhost->node_id.uuid); + buffer_json_member_add_string(wb, "nm", rrdhost_hostname(localhost)); + buffer_json_member_add_time_t(wb, "now", now_s); + + if(array) + buffer_json_member_add_uint64(wb, "ai", 0); + + if(info) { + buffer_json_member_add_object(wb, "application"); + build_info_to_json_object(wb); + buffer_json_object_close(wb); // application + + buffer_json_cloud_status(wb, now_s); + + size_t currently_collected_metrics = 0; + + buffer_json_member_add_object(wb, "nodes"); + { + size_t receiving = 0, archived = 0, sending = 0, total = 0; + RRDHOST *host; + dfe_start_read(rrdhost_root_index, host) { + total++; + + if(rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED)) + sending++; + + if(host != localhost) { + if (rrdhost_is_online(host)) + receiving++; + else + archived++; + } + currently_collected_metrics += host->accounting.currently_collected; + } + dfe_done(host); + + buffer_json_member_add_uint64(wb, "total", total); + buffer_json_member_add_uint64(wb, "receiving", receiving); + buffer_json_member_add_uint64(wb, "sending", sending); + buffer_json_member_add_uint64(wb, "archived", archived); + } + buffer_json_object_close(wb); // nodes + + agent_capabilities_to_json(wb, localhost, "capabilities"); + + buffer_json_member_add_object(wb, "api"); + { + buffer_json_member_add_uint64(wb, "version", aclk_get_http_api_version()); + buffer_json_member_add_boolean(wb, "bearer_protection", netdata_is_protected_by_bearer); + } + buffer_json_object_close(wb); // api + + buffer_json_member_add_array(wb, "db_size"); + size_t group_seconds = localhost->rrd_update_every; + for (size_t tier = 0; tier < storage_tiers; tier++) { + STORAGE_ENGINE *eng = localhost->db[tier].eng; + if (!eng) continue; + + group_seconds *= storage_tiers_grouping_iterations[tier]; + uint64_t max = storage_engine_disk_space_max(eng->seb, localhost->db[tier].si); + uint64_t used = storage_engine_disk_space_used(eng->seb, localhost->db[tier].si); +#ifdef ENABLE_DBENGINE + if (!max && eng->seb == STORAGE_ENGINE_BACKEND_DBENGINE) { + max = get_directory_free_bytes_space(multidb_ctx[tier]); + max += used; + } +#endif + time_t first_time_s = storage_engine_global_first_time_s(eng->seb, localhost->db[tier].si); +// size_t currently_collected_metrics = storage_engine_collected_metrics(eng->seb, localhost->db[tier].si); + + NETDATA_DOUBLE percent; + if (used && max) + percent = (NETDATA_DOUBLE) used * 100.0 / (NETDATA_DOUBLE) max; + else + percent = 0.0; + + buffer_json_add_array_item_object(wb); + buffer_json_member_add_uint64(wb, "tier", tier); + char human_retention[128]; + duration_snprintf_time_t(human_retention, sizeof(human_retention), (stime_t)group_seconds); + buffer_json_member_add_string(wb, "granularity", human_retention); + + buffer_json_member_add_uint64(wb, "metrics", storage_engine_metrics(eng->seb, localhost->db[tier].si)); + buffer_json_member_add_uint64(wb, "samples", storage_engine_samples(eng->seb, localhost->db[tier].si)); + + if(used || max) { + buffer_json_member_add_uint64(wb, "disk_used", used); + buffer_json_member_add_uint64(wb, "disk_max", max); + buffer_json_member_add_double(wb, "disk_percent", percent); + } + + if(first_time_s) { + time_t retention = now_s - first_time_s; + + buffer_json_member_add_time_t(wb, "from", first_time_s); + buffer_json_member_add_time_t(wb, "to", now_s); + buffer_json_member_add_time_t(wb, "retention", retention); + + duration_snprintf_hours(human_retention, sizeof(human_retention), + (int)duration_round_to_resolution(retention, 3600)); + + buffer_json_member_add_string(wb, "retention_human", human_retention); + + if(used || max) { // we have disk space information + time_t time_retention = 0; +#ifdef ENABLE_DBENGINE + time_retention = multidb_ctx[tier]->config.max_retention_s; +#endif + time_t space_retention = (time_t)((NETDATA_DOUBLE)(now_s - first_time_s) * 100.0 / percent); + time_t actual_retention = MIN(space_retention, time_retention ? time_retention : space_retention); + + if (time_retention) { + duration_snprintf_hours(human_retention, sizeof(human_retention), + (int)duration_round_to_resolution(time_retention, 3600)); + + buffer_json_member_add_time_t(wb, "requested_retention", time_retention); + buffer_json_member_add_string(wb, "requested_retention_human", human_retention); + } + + duration_snprintf_hours(human_retention, sizeof(human_retention), + (int)duration_round_to_resolution(actual_retention, 3600)); + + buffer_json_member_add_time_t(wb, "expected_retention", actual_retention); + buffer_json_member_add_string(wb, "expected_retention_human", human_retention); + } + } + + if(currently_collected_metrics) + buffer_json_member_add_uint64(wb, "currently_collected_metrics", currently_collected_metrics); + + buffer_json_object_close(wb); + } + buffer_json_array_close(wb); // db_size + } + + if(timings) + buffer_json_query_timings(wb, "timings", timings); + + buffer_json_object_close(wb); + + if(array) + buffer_json_array_close(wb); +} diff --git a/src/database/contexts/api_v2_contexts_alert_config.c b/src/database/contexts/api_v2_contexts_alert_config.c new file mode 100644 index 000000000..cd3d8fc14 --- /dev/null +++ b/src/database/contexts/api_v2_contexts_alert_config.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_contexts_alerts.h" + +void contexts_v2_alert_config_to_json_from_sql_alert_config_data(struct sql_alert_config_data *t, void *data) { + struct alert_transitions_callback_data *d = data; + BUFFER *wb = d->wb; + bool debug = d->debug; + d->configs_added++; + + if(d->only_one_config) + buffer_json_add_array_item_object(wb); // alert config + + { + buffer_json_member_add_string(wb, "name", t->name); + buffer_json_member_add_uuid_ptr(wb, "config_hash_id", t->config_hash_id); + + buffer_json_member_add_object(wb, "selectors"); + { + bool is_template = t->selectors.on_template && *t->selectors.on_template ? true : false; + buffer_json_member_add_string(wb, "type", is_template ? "template" : "alarm"); + buffer_json_member_add_string(wb, "on", is_template ? t->selectors.on_template : t->selectors.on_key); + + buffer_json_member_add_string(wb, "families", t->selectors.families); + buffer_json_member_add_string(wb, "host_labels", t->selectors.host_labels); + buffer_json_member_add_string(wb, "chart_labels", t->selectors.chart_labels); + } + buffer_json_object_close(wb); // selectors + + buffer_json_member_add_object(wb, "value"); // value + { + // buffer_json_member_add_string(wb, "every", t->value.every); // does not exist in Netdata Cloud + buffer_json_member_add_string(wb, "units", t->value.units); + buffer_json_member_add_uint64(wb, "update_every", t->value.update_every); + + if (t->value.db.after || debug) { + buffer_json_member_add_object(wb, "db"); + { + // buffer_json_member_add_string(wb, "lookup", t->value.db.lookup); // does not exist in Netdata Cloud + + buffer_json_member_add_time_t(wb, "after", t->value.db.after); + buffer_json_member_add_time_t(wb, "before", t->value.db.before); + buffer_json_member_add_string(wb, "time_group_condition", alerts_group_conditions_id2txt(t->value.db.time_group_condition)); + buffer_json_member_add_double(wb, "time_group_value", t->value.db.time_group_value); + buffer_json_member_add_string(wb, "dims_group", alerts_dims_grouping_id2group(t->value.db.dims_group)); + buffer_json_member_add_string(wb, "data_source", alerts_data_source_id2source(t->value.db.data_source)); + buffer_json_member_add_string(wb, "method", t->value.db.method); + buffer_json_member_add_string(wb, "dimensions", t->value.db.dimensions); + rrdr_options_to_buffer_json_array(wb, "options", (RRDR_OPTIONS)t->value.db.options); + } + buffer_json_object_close(wb); // db + } + + if (t->value.calc || debug) + buffer_json_member_add_string(wb, "calc", t->value.calc); + } + buffer_json_object_close(wb); // value + + if (t->status.warn || t->status.crit || debug) { + buffer_json_member_add_object(wb, "status"); // status + { + NETDATA_DOUBLE green = t->status.green ? str2ndd(t->status.green, NULL) : NAN; + NETDATA_DOUBLE red = t->status.red ? str2ndd(t->status.red, NULL) : NAN; + + if (!isnan(green) || debug) + buffer_json_member_add_double(wb, "green", green); + + if (!isnan(red) || debug) + buffer_json_member_add_double(wb, "red", red); + + if (t->status.warn || debug) + buffer_json_member_add_string(wb, "warn", t->status.warn); + + if (t->status.crit || debug) + buffer_json_member_add_string(wb, "crit", t->status.crit); + } + buffer_json_object_close(wb); // status + } + + buffer_json_member_add_object(wb, "notification"); + { + buffer_json_member_add_string(wb, "type", "agent"); + buffer_json_member_add_string(wb, "exec", t->notification.exec ? t->notification.exec : NULL); + buffer_json_member_add_string(wb, "to", t->notification.to_key ? t->notification.to_key : string2str(localhost->health.health_default_recipient)); + buffer_json_member_add_string(wb, "delay", t->notification.delay); + buffer_json_member_add_string(wb, "repeat", t->notification.repeat); + buffer_json_member_add_string(wb, "options", t->notification.options); + } + buffer_json_object_close(wb); // notification + + buffer_json_member_add_string(wb, "class", t->classification); + buffer_json_member_add_string(wb, "component", t->component); + buffer_json_member_add_string(wb, "type", t->type); + buffer_json_member_add_string(wb, "info", t->info); + buffer_json_member_add_string(wb, "summary", t->summary); + // buffer_json_member_add_string(wb, "source", t->source); // moved to alert instance + } + + if(d->only_one_config) + buffer_json_object_close(wb); +} + +int contexts_v2_alert_config_to_json(struct web_client *w, const char *config_hash_id) { + struct alert_transitions_callback_data data = { + .wb = w->response.data, + .debug = false, + .only_one_config = false, + }; + DICTIONARY *configs = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE); + dictionary_set(configs, config_hash_id, NULL, 0); + + buffer_flush(w->response.data); + + buffer_json_initialize(w->response.data, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + int added = sql_get_alert_configuration(configs, contexts_v2_alert_config_to_json_from_sql_alert_config_data, &data, false); + buffer_json_finalize(w->response.data); + + int ret = HTTP_RESP_OK; + + if(added <= 0) { + buffer_flush(w->response.data); + w->response.data->content_type = CT_TEXT_PLAIN; + if(added < 0) { + buffer_strcat(w->response.data, "Failed to execute SQL query."); + ret = HTTP_RESP_INTERNAL_SERVER_ERROR; + } + else { + buffer_strcat(w->response.data, "Config is not found."); + ret = HTTP_RESP_NOT_FOUND; + } + } + + return ret; +} diff --git a/src/database/contexts/api_v2_contexts_alert_transitions.c b/src/database/contexts/api_v2_contexts_alert_transitions.c new file mode 100644 index 000000000..13061f60f --- /dev/null +++ b/src/database/contexts/api_v2_contexts_alert_transitions.c @@ -0,0 +1,487 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_contexts_alerts.h" + +struct alert_transitions_facets alert_transition_facets[] = { + [ATF_STATUS] = { + .id = "f_status", + .name = "Alert Status", + .query_param = "f_status", + .order = 1, + }, + [ATF_TYPE] = { + .id = "f_type", + .name = "Alert Type", + .query_param = "f_type", + .order = 2, + }, + [ATF_ROLE] = { + .id = "f_role", + .name = "Recipient Role", + .query_param = "f_role", + .order = 3, + }, + [ATF_CLASS] = { + .id = "f_class", + .name = "Alert Class", + .query_param = "f_class", + .order = 4, + }, + [ATF_COMPONENT] = { + .id = "f_component", + .name = "Alert Component", + .query_param = "f_component", + .order = 5, + }, + [ATF_NODE] = { + .id = "f_node", + .name = "Alert Node", + .query_param = "f_node", + .order = 6, + }, + [ATF_ALERT_NAME] = { + .id = "f_alert", + .name = "Alert Name", + .query_param = "f_alert", + .order = 7, + }, + [ATF_CHART_NAME] = { + .id = "f_instance", + .name = "Instance Name", + .query_param = "f_instance", + .order = 8, + }, + [ATF_CONTEXT] = { + .id = "f_context", + .name = "Context", + .query_param = "f_context", + .order = 9, + }, + + // terminator + [ATF_TOTAL_ENTRIES] = { + .id = NULL, + .name = NULL, + .query_param = NULL, + .order = 9999, + } +}; + +#define SQL_TRANSITION_DATA_SMALL_STRING (6 * 8) +#define SQL_TRANSITION_DATA_MEDIUM_STRING (12 * 8) +#define SQL_TRANSITION_DATA_BIG_STRING 512 + +struct sql_alert_transition_fixed_size { + usec_t global_id; + nd_uuid_t transition_id; + nd_uuid_t host_id; + nd_uuid_t config_hash_id; + uint32_t alarm_id; + char alert_name[SQL_TRANSITION_DATA_SMALL_STRING]; + char chart[RRD_ID_LENGTH_MAX]; + char chart_name[RRD_ID_LENGTH_MAX]; + char chart_context[SQL_TRANSITION_DATA_MEDIUM_STRING]; + char family[SQL_TRANSITION_DATA_SMALL_STRING]; + char recipient[SQL_TRANSITION_DATA_MEDIUM_STRING]; + char units[SQL_TRANSITION_DATA_SMALL_STRING]; + char exec[SQL_TRANSITION_DATA_BIG_STRING]; + char info[SQL_TRANSITION_DATA_BIG_STRING]; + char summary[SQL_TRANSITION_DATA_BIG_STRING]; + char classification[SQL_TRANSITION_DATA_SMALL_STRING]; + char type[SQL_TRANSITION_DATA_SMALL_STRING]; + char component[SQL_TRANSITION_DATA_SMALL_STRING]; + time_t when_key; + time_t duration; + time_t non_clear_duration; + uint64_t flags; + time_t delay_up_to_timestamp; + time_t exec_run_timestamp; + int exec_code; + int new_status; + int old_status; + int delay; + time_t last_repeat; + NETDATA_DOUBLE new_value; + NETDATA_DOUBLE old_value; + + char machine_guid[UUID_STR_LEN]; + struct sql_alert_transition_fixed_size *next; + struct sql_alert_transition_fixed_size *prev; +}; + +struct facet_entry { + uint32_t count; +}; + +static struct sql_alert_transition_fixed_size *contexts_v2_alert_transition_dup(struct sql_alert_transition_data *t, const char *machine_guid, struct sql_alert_transition_fixed_size *dst) { + struct sql_alert_transition_fixed_size *n = dst ? dst : mallocz(sizeof(*n)); + + n->global_id = t->global_id; + uuid_copy(n->transition_id, *t->transition_id); + uuid_copy(n->host_id, *t->host_id); + uuid_copy(n->config_hash_id, *t->config_hash_id); + n->alarm_id = t->alarm_id; + strncpyz(n->alert_name, t->alert_name ? t->alert_name : "", sizeof(n->alert_name) - 1); + strncpyz(n->chart, t->chart ? t->chart : "", sizeof(n->chart) - 1); + strncpyz(n->chart_name, t->chart_name ? t->chart_name : n->chart, sizeof(n->chart_name) - 1); + strncpyz(n->chart_context, t->chart_context ? t->chart_context : "", sizeof(n->chart_context) - 1); + strncpyz(n->family, t->family ? t->family : "", sizeof(n->family) - 1); + strncpyz(n->recipient, t->recipient ? t->recipient : "", sizeof(n->recipient) - 1); + strncpyz(n->units, t->units ? t->units : "", sizeof(n->units) - 1); + strncpyz(n->exec, t->exec ? t->exec : "", sizeof(n->exec) - 1); + strncpyz(n->info, t->info ? t->info : "", sizeof(n->info) - 1); + strncpyz(n->summary, t->summary ? t->summary : "", sizeof(n->summary) - 1); + strncpyz(n->classification, t->classification ? t->classification : "", sizeof(n->classification) - 1); + strncpyz(n->type, t->type ? t->type : "", sizeof(n->type) - 1); + strncpyz(n->component, t->component ? t->component : "", sizeof(n->component) - 1); + n->when_key = t->when_key; + n->duration = t->duration; + n->non_clear_duration = t->non_clear_duration; + n->flags = t->flags; + n->delay_up_to_timestamp = t->delay_up_to_timestamp; + n->exec_run_timestamp = t->exec_run_timestamp; + n->exec_code = t->exec_code; + n->new_status = t->new_status; + n->old_status = t->old_status; + n->delay = t->delay; + n->last_repeat = t->last_repeat; + n->new_value = t->new_value; + n->old_value = t->old_value; + + memcpy(n->machine_guid, machine_guid, sizeof(n->machine_guid)); + n->next = n->prev = NULL; + + return n; +} + +static void contexts_v2_alert_transition_free(struct sql_alert_transition_fixed_size *t) { + freez(t); +} + +static inline void contexts_v2_alert_transition_keep(struct alert_transitions_callback_data *d, struct sql_alert_transition_data *t, const char *machine_guid) { + d->items_matched++; + + if(unlikely(t->global_id <= d->ctl->request->alerts.global_id_anchor)) { + // this is in our past, we are not interested + d->operations.skips_before++; + return; + } + + if(unlikely(!d->base)) { + d->last_added = contexts_v2_alert_transition_dup(t, machine_guid, NULL); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(d->base, d->last_added, prev, next); + d->items_to_return++; + d->operations.first++; + return; + } + + struct sql_alert_transition_fixed_size *last = d->last_added; + while(last->prev != d->base->prev && t->global_id > last->prev->global_id) { + last = last->prev; + d->operations.backwards++; + } + + while(last->next && t->global_id < last->next->global_id) { + last = last->next; + d->operations.forwards++; + } + + if(d->items_to_return >= d->max_items_to_return) { + if(last == d->base->prev && t->global_id < last->global_id) { + d->operations.skips_after++; + return; + } + } + + d->items_to_return++; + + if(t->global_id > last->global_id) { + if(d->items_to_return > d->max_items_to_return) { + d->items_to_return--; + d->operations.shifts++; + d->last_added = d->base->prev; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(d->base, d->last_added, prev, next); + d->last_added = contexts_v2_alert_transition_dup(t, machine_guid, d->last_added); + } + DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(d->base, d->last_added, prev, next); + d->operations.prepend++; + } + else { + d->last_added = contexts_v2_alert_transition_dup(t, machine_guid, NULL); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(d->base, d->last_added, prev, next); + d->operations.append++; + } + + while(d->items_to_return > d->max_items_to_return) { + // we have to remove something + + struct sql_alert_transition_fixed_size *tmp = d->base->prev; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(d->base, tmp, prev, next); + d->items_to_return--; + + if(unlikely(d->last_added == tmp)) + d->last_added = d->base; + + contexts_v2_alert_transition_free(tmp); + + d->operations.shifts++; + } +} + +static void contexts_v2_alert_transition_callback(struct sql_alert_transition_data *t, void *data) { + struct alert_transitions_callback_data *d = data; + d->items_evaluated++; + + char machine_guid[UUID_STR_LEN] = ""; + uuid_unparse_lower(*t->host_id, machine_guid); + + const char *facets[ATF_TOTAL_ENTRIES] = { + [ATF_STATUS] = rrdcalc_status2string(t->new_status), + [ATF_CLASS] = t->classification, + [ATF_TYPE] = t->type, + [ATF_COMPONENT] = t->component, + [ATF_ROLE] = t->recipient && *t->recipient ? t->recipient : string2str(localhost->health.health_default_recipient), + [ATF_NODE] = machine_guid, + [ATF_ALERT_NAME] = t->alert_name, + [ATF_CHART_NAME] = t->chart_name, + [ATF_CONTEXT] = t->chart_context, + }; + + for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { + if (!facets[i] || !*facets[i]) facets[i] = "unknown"; + + struct facet_entry tmp = { + .count = 0, + }; + dictionary_set(d->facets[i].dict, facets[i], &tmp, sizeof(tmp)); + } + + bool selected[ATF_TOTAL_ENTRIES] = { 0 }; + + uint32_t selected_by = 0; + for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { + selected[i] = !d->facets[i].pattern || simple_pattern_matches(d->facets[i].pattern, facets[i]); + if(selected[i]) + selected_by++; + } + + if(selected_by == ATF_TOTAL_ENTRIES) { + // this item is selected by all facets + // put it in our result (if it fits) + contexts_v2_alert_transition_keep(d, t, machine_guid); + } + + if(selected_by >= ATF_TOTAL_ENTRIES - 1) { + // this item is selected by all, or all except one facet + // in both cases we need to add it to our counters + + for (size_t i = 0; i < ATF_TOTAL_ENTRIES; i++) { + uint32_t counted_by = selected_by; + + if (counted_by != ATF_TOTAL_ENTRIES) { + counted_by = 0; + for (size_t j = 0; j < ATF_TOTAL_ENTRIES; j++) { + if (i == j || selected[j]) + counted_by++; + } + } + + if (counted_by == ATF_TOTAL_ENTRIES) { + // we need to count it on this facet + struct facet_entry *x = dictionary_get(d->facets[i].dict, facets[i]); + internal_fatal(!x, "facet is not found"); + if(x) + x->count++; + } + } + } +} + +void contexts_v2_alert_transitions_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_data *ctl, bool debug) { + struct alert_transitions_callback_data data = { + .wb = wb, + .ctl = ctl, + .debug = debug, + .only_one_config = true, + .max_items_to_return = ctl->request->alerts.last, + .items_to_return = 0, + .base = NULL, + }; + + for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { + data.facets[i].dict = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_FIXED_SIZE | DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, sizeof(struct facet_entry)); + if(ctl->request->alerts.facets[i]) + data.facets[i].pattern = simple_pattern_create(ctl->request->alerts.facets[i], ",|", SIMPLE_PATTERN_EXACT, false); + } + + sql_alert_transitions( + ctl->nodes.dict, + ctl->window.after, + ctl->window.before, + ctl->request->contexts, + ctl->request->alerts.alert, + ctl->request->alerts.transition, + contexts_v2_alert_transition_callback, + &data, + debug); + + buffer_json_member_add_array(wb, "facets"); + for (size_t i = 0; i < ATF_TOTAL_ENTRIES; i++) { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "id", alert_transition_facets[i].id); + buffer_json_member_add_string(wb, "name", alert_transition_facets[i].name); + buffer_json_member_add_uint64(wb, "order", alert_transition_facets[i].order); + buffer_json_member_add_array(wb, "options"); + { + struct facet_entry *x; + dfe_start_read(data.facets[i].dict, x) { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "id", x_dfe.name); + if (i == ATF_NODE) { + RRDHOST *host = rrdhost_find_by_guid(x_dfe.name); + if (host) + buffer_json_member_add_string(wb, "name", rrdhost_hostname(host)); + else + buffer_json_member_add_string(wb, "name", x_dfe.name); + } else + buffer_json_member_add_string(wb, "name", x_dfe.name); + buffer_json_member_add_uint64(wb, "count", x->count); + } + buffer_json_object_close(wb); + } + dfe_done(x); + } + buffer_json_array_close(wb); // options + } + buffer_json_object_close(wb); // facet + } + buffer_json_array_close(wb); // facets + + buffer_json_member_add_array(wb, "transitions"); + for(struct sql_alert_transition_fixed_size *t = data.base; t ; t = t->next) { + buffer_json_add_array_item_object(wb); + { + RRDHOST *host = rrdhost_find_by_guid(t->machine_guid); + + buffer_json_member_add_uint64(wb, "gi", t->global_id); + buffer_json_member_add_uuid(wb, "transition_id", t->transition_id); + buffer_json_member_add_uuid(wb, "config_hash_id", t->config_hash_id); + buffer_json_member_add_string(wb, "machine_guid", t->machine_guid); + + if(host) { + buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host)); + + if(!UUIDiszero(host->node_id)) + buffer_json_member_add_uuid(wb, "node_id", host->node_id.uuid); + } + + buffer_json_member_add_string(wb, "alert", *t->alert_name ? t->alert_name : NULL); + buffer_json_member_add_string(wb, "instance", *t->chart ? t->chart : NULL); + buffer_json_member_add_string(wb, "instance_n", *t->chart_name ? t->chart_name : NULL); + buffer_json_member_add_string(wb, "context", *t->chart_context ? t->chart_context : NULL); + // buffer_json_member_add_string(wb, "family", *t->family ? t->family : NULL); + buffer_json_member_add_string(wb, "component", *t->component ? t->component : NULL); + buffer_json_member_add_string(wb, "classification", *t->classification ? t->classification : NULL); + buffer_json_member_add_string(wb, "type", *t->type ? t->type : NULL); + + buffer_json_member_add_time_t(wb, "when", t->when_key); + buffer_json_member_add_string(wb, "info", *t->info ? t->info : ""); + buffer_json_member_add_string(wb, "summary", *t->summary ? t->summary : ""); + buffer_json_member_add_string(wb, "units", *t->units ? t->units : NULL); + buffer_json_member_add_object(wb, "new"); + { + buffer_json_member_add_string(wb, "status", rrdcalc_status2string(t->new_status)); + buffer_json_member_add_double(wb, "value", t->new_value); + } + buffer_json_object_close(wb); // new + buffer_json_member_add_object(wb, "old"); + { + buffer_json_member_add_string(wb, "status", rrdcalc_status2string(t->old_status)); + buffer_json_member_add_double(wb, "value", t->old_value); + buffer_json_member_add_time_t(wb, "duration", t->duration); + buffer_json_member_add_time_t(wb, "raised_duration", t->non_clear_duration); + } + buffer_json_object_close(wb); // old + + buffer_json_member_add_object(wb, "notification"); + { + buffer_json_member_add_time_t(wb, "when", t->exec_run_timestamp); + buffer_json_member_add_time_t(wb, "delay", t->delay); + buffer_json_member_add_time_t(wb, "delay_up_to_time", t->delay_up_to_timestamp); + health_entry_flags_to_json_array(wb, "flags", t->flags); + buffer_json_member_add_string(wb, "exec", *t->exec ? t->exec : string2str(localhost->health.health_default_exec)); + buffer_json_member_add_uint64(wb, "exec_code", t->exec_code); + buffer_json_member_add_string(wb, "to", *t->recipient ? t->recipient : string2str(localhost->health.health_default_recipient)); + } + buffer_json_object_close(wb); // notification + } + buffer_json_object_close(wb); // a transition + } + buffer_json_array_close(wb); // all transitions + + if(ctl->options & CONTEXTS_OPTION_ALERTS_WITH_CONFIGURATIONS) { + DICTIONARY *configs = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE); + + for(struct sql_alert_transition_fixed_size *t = data.base; t ; t = t->next) { + char guid[UUID_STR_LEN]; + uuid_unparse_lower(t->config_hash_id, guid); + dictionary_set(configs, guid, NULL, 0); + } + + buffer_json_member_add_array(wb, "configurations"); + sql_get_alert_configuration(configs, contexts_v2_alert_config_to_json_from_sql_alert_config_data, &data, debug); + buffer_json_array_close(wb); + + dictionary_destroy(configs); + } + + while(data.base) { + struct sql_alert_transition_fixed_size *t = data.base; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(data.base, t, prev, next); + contexts_v2_alert_transition_free(t); + } + + for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { + dictionary_destroy(data.facets[i].dict); + simple_pattern_free(data.facets[i].pattern); + } + + buffer_json_member_add_object(wb, "items"); + { + // all the items in the window, under the scope_nodes, ignoring the facets (filters) + buffer_json_member_add_uint64(wb, "evaluated", data.items_evaluated); + + // all the items matching the query (if you didn't put anchor_gi and last, these are all the items you would get back) + buffer_json_member_add_uint64(wb, "matched", data.items_matched); + + // the items included in this response + buffer_json_member_add_uint64(wb, "returned", data.items_to_return); + + // same as last=X parameter + buffer_json_member_add_uint64(wb, "max_to_return", data.max_items_to_return); + + // items before the first returned, this should be 0 if anchor_gi is not set + buffer_json_member_add_uint64(wb, "before", data.operations.skips_before); + + // items after the last returned, when this is zero there aren't any items after the current list + buffer_json_member_add_uint64(wb, "after", data.operations.skips_after + data.operations.shifts); + } + buffer_json_object_close(wb); // items + + if(debug) { + buffer_json_member_add_object(wb, "stats"); + { + buffer_json_member_add_uint64(wb, "first", data.operations.first); + buffer_json_member_add_uint64(wb, "prepend", data.operations.prepend); + buffer_json_member_add_uint64(wb, "append", data.operations.append); + buffer_json_member_add_uint64(wb, "backwards", data.operations.backwards); + buffer_json_member_add_uint64(wb, "forwards", data.operations.forwards); + buffer_json_member_add_uint64(wb, "shifts", data.operations.shifts); + buffer_json_member_add_uint64(wb, "skips_before", data.operations.skips_before); + buffer_json_member_add_uint64(wb, "skips_after", data.operations.skips_after); + } + buffer_json_object_close(wb); + } +} diff --git a/src/database/contexts/api_v2_contexts_alerts.c b/src/database/contexts/api_v2_contexts_alerts.c new file mode 100644 index 000000000..ea7f977bb --- /dev/null +++ b/src/database/contexts/api_v2_contexts_alerts.c @@ -0,0 +1,604 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_contexts.h" + +struct alert_counts { + size_t critical; + size_t warning; + size_t clear; + size_t error; +}; + +struct alert_v2_entry { + RRDCALC *tmp; + + STRING *name; + STRING *summary; + RRDLABELS *recipient; + RRDLABELS *classification; + RRDLABELS *context; + RRDLABELS *component; + RRDLABELS *type; + + size_t ati; + + struct alert_counts counts; + + size_t instances; + DICTIONARY *nodes; + DICTIONARY *configs; +}; + +struct alert_by_x_entry { + struct { + struct alert_counts counts; + size_t silent; + size_t total; + } running; + + struct { + size_t available; + } prototypes; +}; + +bool rrdcontext_matches_alert(struct rrdcontext_to_json_v2_data *ctl, RRDCONTEXT *rc) { + size_t matches = 0; + RRDINSTANCE *ri; + dfe_start_read(rc->rrdinstances, ri) { + if(ri->rrdset) { + RRDSET *st = ri->rrdset; + rw_spinlock_read_lock(&st->alerts.spinlock); + for (RRDCALC *rcl = st->alerts.base; rcl; rcl = rcl->next) { + if(ctl->alerts.alert_name_pattern && !simple_pattern_matches_string(ctl->alerts.alert_name_pattern, rcl->config.name)) + continue; + + if(ctl->alerts.alarm_id_filter && ctl->alerts.alarm_id_filter != rcl->id) + continue; + + size_t m = ctl->request->alerts.status & CONTEXTS_ALERT_STATUSES ? 0 : 1; + + if (!m) { + if ((ctl->request->alerts.status & CONTEXT_ALERT_UNINITIALIZED) && + rcl->status == RRDCALC_STATUS_UNINITIALIZED) + m++; + + if ((ctl->request->alerts.status & CONTEXT_ALERT_UNDEFINED) && + rcl->status == RRDCALC_STATUS_UNDEFINED) + m++; + + if ((ctl->request->alerts.status & CONTEXT_ALERT_CLEAR) && + rcl->status == RRDCALC_STATUS_CLEAR) + m++; + + if ((ctl->request->alerts.status & CONTEXT_ALERT_RAISED) && + rcl->status >= RRDCALC_STATUS_RAISED) + m++; + + if ((ctl->request->alerts.status & CONTEXT_ALERT_WARNING) && + rcl->status == RRDCALC_STATUS_WARNING) + m++; + + if ((ctl->request->alerts.status & CONTEXT_ALERT_CRITICAL) && + rcl->status == RRDCALC_STATUS_CRITICAL) + m++; + + if(!m) + continue; + } + + struct alert_v2_entry t = { + .tmp = rcl, + }; + struct alert_v2_entry *a2e = + dictionary_set(ctl->alerts.summary, string2str(rcl->config.name), + &t, sizeof(struct alert_v2_entry)); + size_t ati = a2e->ati; + matches++; + + dictionary_set_advanced(ctl->alerts.by_type, + string2str(rcl->config.type), + (ssize_t)string_strlen(rcl->config.type), + NULL, + sizeof(struct alert_by_x_entry), + rcl); + + dictionary_set_advanced(ctl->alerts.by_component, + string2str(rcl->config.component), + (ssize_t)string_strlen(rcl->config.component), + NULL, + sizeof(struct alert_by_x_entry), + rcl); + + dictionary_set_advanced(ctl->alerts.by_classification, + string2str(rcl->config.classification), + (ssize_t)string_strlen(rcl->config.classification), + NULL, + sizeof(struct alert_by_x_entry), + rcl); + + dictionary_set_advanced(ctl->alerts.by_recipient, + string2str(rcl->config.recipient), + (ssize_t)string_strlen(rcl->config.recipient), + NULL, + sizeof(struct alert_by_x_entry), + rcl); + + char *module = NULL; + rrdlabels_get_value_strdup_or_null(st->rrdlabels, &module, "_collect_module"); + if(!module || !*module) module = "[unset]"; + + dictionary_set_advanced(ctl->alerts.by_module, + module, + -1, + NULL, + sizeof(struct alert_by_x_entry), + rcl); + + if (ctl->options & (CONTEXTS_OPTION_ALERTS_WITH_INSTANCES | CONTEXTS_OPTION_ALERTS_WITH_VALUES)) { + char key[20 + 1]; + snprintfz(key, sizeof(key) - 1, "%p", rcl); + + struct sql_alert_instance_v2_entry z = { + .ati = ati, + .tmp = rcl, + }; + dictionary_set(ctl->alerts.alert_instances, key, &z, sizeof(z)); + } + } + rw_spinlock_read_unlock(&st->alerts.spinlock); + } + } + dfe_done(ri); + + return matches != 0; +} + +static void alert_counts_add(struct alert_counts *t, RRDCALC *rc) { + switch(rc->status) { + case RRDCALC_STATUS_CRITICAL: + t->critical++; + break; + + case RRDCALC_STATUS_WARNING: + t->warning++; + break; + + case RRDCALC_STATUS_CLEAR: + t->clear++; + break; + + case RRDCALC_STATUS_REMOVED: + case RRDCALC_STATUS_UNINITIALIZED: + break; + + case RRDCALC_STATUS_UNDEFINED: + default: + if(!netdata_double_isnumber(rc->value)) + t->error++; + + break; + } +} + +static void alerts_v2_add(struct alert_v2_entry *t, RRDCALC *rc) { + t->instances++; + + alert_counts_add(&t->counts, rc); + + dictionary_set(t->nodes, rc->rrdset->rrdhost->machine_guid, NULL, 0); + + char key[UUID_STR_LEN + 1]; + uuid_unparse_lower(rc->config.hash_id, key); + dictionary_set(t->configs, key, NULL, 0); +} + +static void alerts_by_x_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { + static STRING *silent = NULL; + if(unlikely(!silent)) silent = string_strdupz("silent"); + + struct alert_by_x_entry *b = value; + RRDCALC *rc = data; + if(!rc) { + // prototype + b->prototypes.available++; + } + else { + alert_counts_add(&b->running.counts, rc); + + b->running.total++; + + if (rc->config.recipient == silent) + b->running.silent++; + } +} + +static bool alerts_by_x_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value __maybe_unused, void *data __maybe_unused) { + alerts_by_x_insert_callback(item, old_value, data); + return false; +} + +static void alerts_v2_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { + struct rrdcontext_to_json_v2_data *ctl = data; + struct alert_v2_entry *t = value; + RRDCALC *rc = t->tmp; + t->name = rc->config.name; + t->summary = rc->config.summary; // the original summary + t->context = rrdlabels_create(); + t->recipient = rrdlabels_create(); + t->classification = rrdlabels_create(); + t->component = rrdlabels_create(); + t->type = rrdlabels_create(); + if (string_strlen(rc->rrdset->context)) + rrdlabels_add(t->context, string2str(rc->rrdset->context), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.recipient)) + rrdlabels_add(t->recipient, string2str(rc->config.recipient), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.classification)) + rrdlabels_add(t->classification, string2str(rc->config.classification), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.component)) + rrdlabels_add(t->component, string2str(rc->config.component), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.type)) + rrdlabels_add(t->type, string2str(rc->config.type), "yes", RRDLABEL_SRC_AUTO); + t->ati = ctl->alerts.ati++; + + t->nodes = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_VALUE_LINK_DONT_CLONE|DICT_OPTION_NAME_LINK_DONT_CLONE); + t->configs = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_VALUE_LINK_DONT_CLONE|DICT_OPTION_NAME_LINK_DONT_CLONE); + + alerts_v2_add(t, rc); +} + +static bool alerts_v2_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { + struct alert_v2_entry *t = old_value, *n = new_value; + RRDCALC *rc = n->tmp; + if (string_strlen(rc->rrdset->context)) + rrdlabels_add(t->context, string2str(rc->rrdset->context), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.recipient)) + rrdlabels_add(t->recipient, string2str(rc->config.recipient), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.classification)) + rrdlabels_add(t->classification, string2str(rc->config.classification), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.component)) + rrdlabels_add(t->component, string2str(rc->config.component), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.type)) + rrdlabels_add(t->type, string2str(rc->config.type), "yes", RRDLABEL_SRC_AUTO); + alerts_v2_add(t, rc); + return true; +} + +static void alerts_v2_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct alert_v2_entry *t = value; + + rrdlabels_destroy(t->context); + rrdlabels_destroy(t->recipient); + rrdlabels_destroy(t->classification); + rrdlabels_destroy(t->component); + rrdlabels_destroy(t->type); + + dictionary_destroy(t->nodes); + dictionary_destroy(t->configs); +} + +struct alert_instances_callback_data { + BUFFER *wb; + struct rrdcontext_to_json_v2_data *ctl; + bool debug; +}; + +static int contexts_v2_alert_instance_to_json_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { + struct sql_alert_instance_v2_entry *t = value; + struct alert_instances_callback_data *d = data; + struct rrdcontext_to_json_v2_data *ctl = d->ctl; (void)ctl; + bool debug = d->debug; (void)debug; + BUFFER *wb = d->wb; + + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_uint64(wb, "ni", t->ni); + + buffer_json_member_add_string(wb, "nm", string2str(t->name)); + buffer_json_member_add_string(wb, "ch", string2str(t->chart_id)); + buffer_json_member_add_string(wb, "ch_n", string2str(t->chart_name)); + + if(ctl->request->options & CONTEXTS_OPTION_ALERTS_WITH_SUMMARY) + buffer_json_member_add_uint64(wb, "ati", t->ati); + + if(ctl->request->options & CONTEXTS_OPTION_ALERTS_WITH_INSTANCES) { + buffer_json_member_add_string(wb, "units", string2str(t->units)); + buffer_json_member_add_string(wb, "fami", string2str(t->family)); + buffer_json_member_add_string(wb, "info", string2str(t->info)); + buffer_json_member_add_string(wb, "sum", string2str(t->summary)); + buffer_json_member_add_string(wb, "ctx", string2str(t->context)); + buffer_json_member_add_string(wb, "st", rrdcalc_status2string(t->status)); + buffer_json_member_add_uuid(wb, "tr_i", t->last_transition_id); + buffer_json_member_add_double(wb, "tr_v", t->last_status_change_value); + buffer_json_member_add_time_t(wb, "tr_t", t->last_status_change); + buffer_json_member_add_uuid(wb, "cfg", t->config_hash_id); + buffer_json_member_add_string(wb, "src", string2str(t->source)); + + buffer_json_member_add_string(wb, "to", string2str(t->recipient)); + buffer_json_member_add_string(wb, "tp", string2str(t->type)); + buffer_json_member_add_string(wb, "cm", string2str(t->component)); + buffer_json_member_add_string(wb, "cl", string2str(t->classification)); + + // Agent specific fields + buffer_json_member_add_uint64(wb, "gi", t->global_id); + // rrdcalc_flags_to_json_array (wb, "flags", t->flags); + } + + if(ctl->request->options & CONTEXTS_OPTION_ALERTS_WITH_VALUES) { + // Netdata Cloud fetched these by querying the agents + buffer_json_member_add_double(wb, "v", t->value); + buffer_json_member_add_time_t(wb, "t", t->last_updated); + } + } + buffer_json_object_close(wb); // alert instance + + return 1; +} + +static void contexts_v2_alerts_by_x_update_prototypes(void *data, STRING *type, STRING *component, STRING *classification, STRING *recipient) { + struct rrdcontext_to_json_v2_data *ctl = data; + + dictionary_set_advanced(ctl->alerts.by_type, string2str(type), (ssize_t)string_strlen(type), NULL, sizeof(struct alert_by_x_entry), NULL); + dictionary_set_advanced(ctl->alerts.by_component, string2str(component), (ssize_t)string_strlen(component), NULL, sizeof(struct alert_by_x_entry), NULL); + dictionary_set_advanced(ctl->alerts.by_classification, string2str(classification), (ssize_t)string_strlen(classification), NULL, sizeof(struct alert_by_x_entry), NULL); + dictionary_set_advanced(ctl->alerts.by_recipient, string2str(recipient), (ssize_t)string_strlen(recipient), NULL, sizeof(struct alert_by_x_entry), NULL); +} + +static void contexts_v2_alerts_by_x_to_json(BUFFER *wb, DICTIONARY *dict, const char *key) { + buffer_json_member_add_array(wb, key); + { + struct alert_by_x_entry *b; + dfe_start_read(dict, b) { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "name", b_dfe.name); + buffer_json_member_add_uint64(wb, "cr", b->running.counts.critical); + buffer_json_member_add_uint64(wb, "wr", b->running.counts.warning); + buffer_json_member_add_uint64(wb, "cl", b->running.counts.clear); + buffer_json_member_add_uint64(wb, "er", b->running.counts.error); + buffer_json_member_add_uint64(wb, "running", b->running.total); + + buffer_json_member_add_uint64(wb, "running_silent", b->running.silent); + + if(b->prototypes.available) + buffer_json_member_add_uint64(wb, "available", b->prototypes.available); + } + buffer_json_object_close(wb); + } + dfe_done(b); + } + buffer_json_array_close(wb); +} + +static void contexts_v2_alert_instances_to_json(BUFFER *wb, const char *key, struct rrdcontext_to_json_v2_data *ctl, bool debug) { + buffer_json_member_add_array(wb, key); + { + struct alert_instances_callback_data data = { + .wb = wb, + .ctl = ctl, + .debug = debug, + }; + dictionary_walkthrough_rw(ctl->alerts.alert_instances, DICTIONARY_LOCK_READ, + contexts_v2_alert_instance_to_json_callback, &data); + } + buffer_json_array_close(wb); // alerts_instances +} + +void contexts_v2_alerts_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_data *ctl, bool debug) { + if(ctl->request->options & CONTEXTS_OPTION_ALERTS_WITH_SUMMARY) { + buffer_json_member_add_array(wb, "alerts"); + { + struct alert_v2_entry *t; + dfe_start_read(ctl->alerts.summary, t) + { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_uint64(wb, "ati", t->ati); + + buffer_json_member_add_array(wb, "ni"); + void *host_guid; + dfe_start_read(t->nodes, host_guid) { + struct contexts_v2_node *cn = dictionary_get(ctl->nodes.dict,host_guid_dfe.name); + buffer_json_add_array_item_int64(wb, (int64_t) cn->ni); + } + dfe_done(host_guid); + buffer_json_array_close(wb); + + buffer_json_member_add_string(wb, "nm", string2str(t->name)); + buffer_json_member_add_string(wb, "sum", string2str(t->summary)); + + buffer_json_member_add_uint64(wb, "cr", t->counts.critical); + buffer_json_member_add_uint64(wb, "wr", t->counts.warning); + buffer_json_member_add_uint64(wb, "cl", t->counts.clear); + buffer_json_member_add_uint64(wb, "er", t->counts.error); + + buffer_json_member_add_uint64(wb, "in", t->instances); + buffer_json_member_add_uint64(wb, "nd", dictionary_entries(t->nodes)); + buffer_json_member_add_uint64(wb, "cfg", dictionary_entries(t->configs)); + + buffer_json_member_add_array(wb, "ctx"); + rrdlabels_key_to_buffer_array_item(t->context, wb); + buffer_json_array_close(wb); // ctx + + buffer_json_member_add_array(wb, "cls"); + rrdlabels_key_to_buffer_array_item(t->classification, wb); + buffer_json_array_close(wb); // classification + + + buffer_json_member_add_array(wb, "cp"); + rrdlabels_key_to_buffer_array_item(t->component, wb); + buffer_json_array_close(wb); // component + + buffer_json_member_add_array(wb, "ty"); + rrdlabels_key_to_buffer_array_item(t->type, wb); + buffer_json_array_close(wb); // type + + buffer_json_member_add_array(wb, "to"); + rrdlabels_key_to_buffer_array_item(t->recipient, wb); + buffer_json_array_close(wb); // recipient + } + buffer_json_object_close(wb); // alert name + } + dfe_done(t); + } + buffer_json_array_close(wb); // alerts + + health_prototype_metadata_foreach(ctl, contexts_v2_alerts_by_x_update_prototypes); + contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_type, "alerts_by_type"); + contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_component, "alerts_by_component"); + contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_classification, "alerts_by_classification"); + contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_recipient, "alerts_by_recipient"); + contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_module, "alerts_by_module"); + } + + if(ctl->request->options & (CONTEXTS_OPTION_ALERTS_WITH_INSTANCES | CONTEXTS_OPTION_ALERTS_WITH_VALUES)) { + contexts_v2_alert_instances_to_json(wb, "alert_instances", ctl, debug); + } +} + +static void alert_instances_v2_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { + struct rrdcontext_to_json_v2_data *ctl = data; + struct sql_alert_instance_v2_entry *t = value; + RRDCALC *rc = t->tmp; + + t->context = rc->rrdset->context; + t->chart_id = rc->rrdset->id; + t->chart_name = rc->rrdset->name; + t->family = rc->rrdset->family; + t->units = rc->config.units; + t->classification = rc->config.classification; + t->type = rc->config.type; + t->recipient = rc->config.recipient; + t->component = rc->config.component; + t->name = rc->config.name; + t->source = rc->config.source; + t->status = rc->status; + t->flags = rc->run_flags; + t->info = rc->config.info; + t->summary = rc->summary; + t->value = rc->value; + t->last_updated = rc->last_updated; + t->last_status_change = rc->last_status_change; + t->last_status_change_value = rc->last_status_change_value; + t->host = rc->rrdset->rrdhost; + t->alarm_id = rc->id; + t->ni = ctl->nodes.ni; + + uuid_copy(t->config_hash_id, rc->config.hash_id); + health_alarm_log_get_global_id_and_transition_id_for_rrdcalc(rc, &t->global_id, &t->last_transition_id); +} + +static bool alert_instances_v2_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value __maybe_unused, void *new_value __maybe_unused, void *data __maybe_unused) { + internal_fatal(true, "This should never happen!"); + return true; +} + +static void alert_instances_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value __maybe_unused, void *data __maybe_unused) { + ; +} + +static void rrdcontext_v2_set_transition_filter(const char *machine_guid, const char *context, time_t alarm_id, void *data) { + struct rrdcontext_to_json_v2_data *ctl = data; + + if(machine_guid && *machine_guid) { + if(ctl->nodes.scope_pattern) + simple_pattern_free(ctl->nodes.scope_pattern); + + if(ctl->nodes.pattern) + simple_pattern_free(ctl->nodes.pattern); + + ctl->nodes.scope_pattern = string_to_simple_pattern(machine_guid); + ctl->nodes.pattern = NULL; + } + + if(context && *context) { + if(ctl->contexts.scope_pattern) + simple_pattern_free(ctl->contexts.scope_pattern); + + if(ctl->contexts.pattern) + simple_pattern_free(ctl->contexts.pattern); + + ctl->contexts.scope_pattern = string_to_simple_pattern(context); + ctl->contexts.pattern = NULL; + } + + ctl->alerts.alarm_id_filter = alarm_id; +} + +bool rrdcontexts_v2_init_alert_dictionaries(struct rrdcontext_to_json_v2_data *ctl, struct api_v2_contexts_request *req) { + if(req->alerts.transition) { + ctl->options |= CONTEXTS_OPTION_ALERTS_WITH_INSTANCES | CONTEXTS_OPTION_ALERTS_WITH_VALUES; + if(!sql_find_alert_transition(req->alerts.transition, rrdcontext_v2_set_transition_filter, ctl)) + return false; + } + + ctl->alerts.summary = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, + sizeof(struct alert_v2_entry)); + + dictionary_register_insert_callback(ctl->alerts.summary, alerts_v2_insert_callback, ctl); + dictionary_register_conflict_callback(ctl->alerts.summary, alerts_v2_conflict_callback, ctl); + dictionary_register_delete_callback(ctl->alerts.summary, alerts_v2_delete_callback, ctl); + + ctl->alerts.by_type = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, + sizeof(struct alert_by_x_entry)); + + dictionary_register_insert_callback(ctl->alerts.by_type, alerts_by_x_insert_callback, NULL); + dictionary_register_conflict_callback(ctl->alerts.by_type, alerts_by_x_conflict_callback, NULL); + + ctl->alerts.by_component = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, + sizeof(struct alert_by_x_entry)); + + dictionary_register_insert_callback(ctl->alerts.by_component, alerts_by_x_insert_callback, NULL); + dictionary_register_conflict_callback(ctl->alerts.by_component, alerts_by_x_conflict_callback, NULL); + + ctl->alerts.by_classification = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, + sizeof(struct alert_by_x_entry)); + + dictionary_register_insert_callback(ctl->alerts.by_classification, alerts_by_x_insert_callback, NULL); + dictionary_register_conflict_callback(ctl->alerts.by_classification, alerts_by_x_conflict_callback, NULL); + + ctl->alerts.by_recipient = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, + sizeof(struct alert_by_x_entry)); + + dictionary_register_insert_callback(ctl->alerts.by_recipient, alerts_by_x_insert_callback, NULL); + dictionary_register_conflict_callback(ctl->alerts.by_recipient, alerts_by_x_conflict_callback, NULL); + + ctl->alerts.by_module = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, + sizeof(struct alert_by_x_entry)); + + dictionary_register_insert_callback(ctl->alerts.by_module, alerts_by_x_insert_callback, NULL); + dictionary_register_conflict_callback(ctl->alerts.by_module, alerts_by_x_conflict_callback, NULL); + + if(ctl->options & (CONTEXTS_OPTION_ALERTS_WITH_INSTANCES | CONTEXTS_OPTION_ALERTS_WITH_VALUES)) { + ctl->alerts.alert_instances = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, sizeof(struct sql_alert_instance_v2_entry)); + + dictionary_register_insert_callback(ctl->alerts.alert_instances, alert_instances_v2_insert_callback, ctl); + dictionary_register_conflict_callback(ctl->alerts.alert_instances, alert_instances_v2_conflict_callback, ctl); + dictionary_register_delete_callback(ctl->alerts.alert_instances, alert_instances_delete_callback, ctl); + } + + return true; +} + +void rrdcontexts_v2_alerts_cleanup(struct rrdcontext_to_json_v2_data *ctl) { + dictionary_destroy(ctl->alerts.summary); + dictionary_destroy(ctl->alerts.alert_instances); + dictionary_destroy(ctl->alerts.by_type); + dictionary_destroy(ctl->alerts.by_component); + dictionary_destroy(ctl->alerts.by_classification); + dictionary_destroy(ctl->alerts.by_recipient); + dictionary_destroy(ctl->alerts.by_module); +} diff --git a/src/database/contexts/api_v2_contexts_alerts.h b/src/database/contexts/api_v2_contexts_alerts.h new file mode 100644 index 000000000..b7be3f4d9 --- /dev/null +++ b/src/database/contexts/api_v2_contexts_alerts.h @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_V2_CONTEXTS_ALERTS_H +#define NETDATA_API_V2_CONTEXTS_ALERTS_H + +#include "internal.h" +#include "api_v2_contexts.h" + +struct alert_transitions_callback_data { + struct rrdcontext_to_json_v2_data *ctl; + BUFFER *wb; + bool debug; + bool only_one_config; + + struct { + SIMPLE_PATTERN *pattern; + DICTIONARY *dict; + } facets[ATF_TOTAL_ENTRIES]; + + uint32_t max_items_to_return; + uint32_t items_to_return; + + uint32_t items_evaluated; + uint32_t items_matched; + + + struct sql_alert_transition_fixed_size *base; // double linked list - last item is base->prev + struct sql_alert_transition_fixed_size *last_added; // the last item added, not the last of the list + + struct { + size_t first; + size_t skips_before; + size_t skips_after; + size_t backwards; + size_t forwards; + size_t prepend; + size_t append; + size_t shifts; + } operations; + + uint32_t configs_added; +}; + +void contexts_v2_alerts_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_data *ctl, bool debug); +bool rrdcontext_matches_alert(struct rrdcontext_to_json_v2_data *ctl, RRDCONTEXT *rc); +void contexts_v2_alert_config_to_json_from_sql_alert_config_data(struct sql_alert_config_data *t, void *data); +void contexts_v2_alert_transitions_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_data *ctl, bool debug); + +bool rrdcontexts_v2_init_alert_dictionaries(struct rrdcontext_to_json_v2_data *ctl, struct api_v2_contexts_request *req); +void rrdcontexts_v2_alerts_cleanup(struct rrdcontext_to_json_v2_data *ctl); + +#endif //NETDATA_API_V2_CONTEXTS_ALERTS_H diff --git a/src/database/contexts/instance.c b/src/database/contexts/instance.c index 5d841bc82..ee6a906e7 100644 --- a/src/database/contexts/instance.c +++ b/src/database/contexts/instance.c @@ -37,6 +37,11 @@ inline STRING *rrdinstance_acquired_units_dup(RRDINSTANCE_ACQUIRED *ria) { inline RRDLABELS *rrdinstance_acquired_labels(RRDINSTANCE_ACQUIRED *ria) { RRDINSTANCE *ri = rrdinstance_acquired_value(ria); + if (rrd_flag_check(ri, RRD_FLAG_OWN_LABELS | RRD_FLAG_DEMAND_LABELS)) { + rrd_flag_clear(ri, RRD_FLAG_DEMAND_LABELS); + load_instance_labels_on_demand(&ri->uuid, ri); + rrdinstance_trigger_updates(ri, __FUNCTION__ ); + } return ri->rrdlabels; } @@ -101,11 +106,11 @@ static void rrdinstance_insert_callback(const DICTIONARY_ITEM *item __maybe_unus if(ri->rrdset) { ri->rrdlabels = ri->rrdset->rrdlabels; - ri->flags &= ~RRD_FLAG_OWN_LABELS; // no need of atomics at the constructor + ri->flags &= ~(RRD_FLAG_OWN_LABELS| RRD_FLAG_DEMAND_LABELS); // no need of atomics at the constructor } else { ri->rrdlabels = rrdlabels_create(); - ri->flags |= RRD_FLAG_OWN_LABELS; // no need of atomics at the constructor + ri->flags |= (RRD_FLAG_OWN_LABELS | RRD_FLAG_DEMAND_LABELS); // no need of atomics at the constructor } if(ri->rrdset) { @@ -213,12 +218,12 @@ static bool rrdinstance_conflict_callback(const DICTIONARY_ITEM *item __maybe_un if(ri->rrdset && rrd_flag_check(ri, RRD_FLAG_OWN_LABELS)) { RRDLABELS *old = ri->rrdlabels; ri->rrdlabels = ri->rrdset->rrdlabels; - rrd_flag_clear(ri, RRD_FLAG_OWN_LABELS); + rrd_flag_clear(ri, RRD_FLAG_OWN_LABELS| RRD_FLAG_DEMAND_LABELS); rrdlabels_destroy(old); } else if(!ri->rrdset && !rrd_flag_check(ri, RRD_FLAG_OWN_LABELS)) { ri->rrdlabels = rrdlabels_create(); - rrd_flag_set(ri, RRD_FLAG_OWN_LABELS); + rrd_flag_set(ri, RRD_FLAG_OWN_LABELS | RRD_FLAG_DEMAND_LABELS); } } @@ -431,8 +436,9 @@ inline void rrdinstance_rrdset_is_freed(RRDSET *st) { if(!rrd_flag_check(ri, RRD_FLAG_OWN_LABELS)) { ri->rrdlabels = rrdlabels_create(); - rrdlabels_copy(ri->rrdlabels, st->rrdlabels); - rrd_flag_set(ri, RRD_FLAG_OWN_LABELS); + // Do not load copy labels, just load on demand + //rrdlabels_copy(ri->rrdlabels, st->rrdlabels); + rrd_flag_set(ri, RRD_FLAG_OWN_LABELS | RRD_FLAG_DEMAND_LABELS); } ri->rrdset = NULL; diff --git a/src/database/contexts/internal.h b/src/database/contexts/internal.h index 270c59649..2c69077e5 100644 --- a/src/database/contexts/internal.h +++ b/src/database/contexts/internal.h @@ -39,25 +39,26 @@ typedef enum __attribute__ ((__packed__)) { RRD_FLAG_UPDATED = (1 << 2), // this object has updates to propagate RRD_FLAG_ARCHIVED = (1 << 3), // this object is not currently being collected RRD_FLAG_OWN_LABELS = (1 << 4), // this instance has its own labels - not linked to an RRDSET - RRD_FLAG_LIVE_RETENTION = (1 << 5), // we have got live retention from the database - RRD_FLAG_QUEUED_FOR_HUB = (1 << 6), // this context is currently queued to be dispatched to hub - RRD_FLAG_QUEUED_FOR_PP = (1 << 7), // this context is currently queued to be post-processed - RRD_FLAG_HIDDEN = (1 << 8), // don't expose this to the hub or the API - - RRD_FLAG_UPDATE_REASON_TRIGGERED = (1 << 9), // the update was triggered by the child object - RRD_FLAG_UPDATE_REASON_LOAD_SQL = (1 << 10), // this object has just been loaded from SQL - RRD_FLAG_UPDATE_REASON_NEW_OBJECT = (1 << 11), // this object has just been created - RRD_FLAG_UPDATE_REASON_UPDATED_OBJECT = (1 << 12), // we received an update on this object - RRD_FLAG_UPDATE_REASON_CHANGED_LINKING = (1 << 13), // an instance or a metric switched RRDSET or RRDDIM - RRD_FLAG_UPDATE_REASON_CHANGED_METADATA = (1 << 14), // this context or instance changed uuid, name, units, title, family, chart type, priority, update every, rrd changed flags - RRD_FLAG_UPDATE_REASON_ZERO_RETENTION = (1 << 15), // this object has no retention - RRD_FLAG_UPDATE_REASON_CHANGED_FIRST_TIME_T = (1 << 16), // this object changed its oldest time in the db - RRD_FLAG_UPDATE_REASON_CHANGED_LAST_TIME_T = (1 << 17), // this object change its latest time in the db - RRD_FLAG_UPDATE_REASON_STOPPED_BEING_COLLECTED = (1 << 18), // this object has stopped being collected - RRD_FLAG_UPDATE_REASON_STARTED_BEING_COLLECTED = (1 << 19), // this object has started being collected - RRD_FLAG_UPDATE_REASON_DISCONNECTED_CHILD = (1 << 20), // this context belongs to a host that just disconnected - RRD_FLAG_UPDATE_REASON_UNUSED = (1 << 21), // this context is not used anymore - RRD_FLAG_UPDATE_REASON_DB_ROTATION = (1 << 22), // this context changed because of a db rotation + RRD_FLAG_DEMAND_LABELS = (1 << 5), // this instance should load labels on demand + RRD_FLAG_LIVE_RETENTION = (1 << 6), // we have got live retention from the database + RRD_FLAG_QUEUED_FOR_HUB = (1 << 7), // this context is currently queued to be dispatched to hub + RRD_FLAG_QUEUED_FOR_PP = (1 << 8), // this context is currently queued to be post-processed + RRD_FLAG_HIDDEN = (1 << 9), // don't expose this to the hub or the API + + RRD_FLAG_UPDATE_REASON_TRIGGERED = (1 << 10), // the update was triggered by the child object + RRD_FLAG_UPDATE_REASON_LOAD_SQL = (1 << 11), // this object has just been loaded from SQL + RRD_FLAG_UPDATE_REASON_NEW_OBJECT = (1 << 12), // this object has just been created + RRD_FLAG_UPDATE_REASON_UPDATED_OBJECT = (1 << 13), // we received an update on this object + RRD_FLAG_UPDATE_REASON_CHANGED_LINKING = (1 << 14), // an instance or a metric switched RRDSET or RRDDIM + RRD_FLAG_UPDATE_REASON_CHANGED_METADATA = (1 << 15), // this context or instance changed uuid, name, units, title, family, chart type, priority, update every, rrd changed flags + RRD_FLAG_UPDATE_REASON_ZERO_RETENTION = (1 << 16), // this object has no retention + RRD_FLAG_UPDATE_REASON_CHANGED_FIRST_TIME_T = (1 << 17), // this object changed its oldest time in the db + RRD_FLAG_UPDATE_REASON_CHANGED_LAST_TIME_T = (1 << 18), // this object change its latest time in the db + RRD_FLAG_UPDATE_REASON_STOPPED_BEING_COLLECTED = (1 << 19), // this object has stopped being collected + RRD_FLAG_UPDATE_REASON_STARTED_BEING_COLLECTED = (1 << 20), // this object has started being collected + RRD_FLAG_UPDATE_REASON_DISCONNECTED_CHILD = (1 << 21), // this context belongs to a host that just disconnected + RRD_FLAG_UPDATE_REASON_UNUSED = (1 << 22), // this context is not used anymore + RRD_FLAG_UPDATE_REASON_DB_ROTATION = (1 << 23), // this context changed because of a db rotation RRD_FLAG_MERGED_COLLECTED_RI_TO_RC = (1 << 29), @@ -354,6 +355,7 @@ static inline void rrdcontext_release(RRDCONTEXT_ACQUIRED *rca) { // ---------------------------------------------------------------------------- // Forward definitions +void load_instance_labels_on_demand(nd_uuid_t *uuid, void *data); void rrdcontext_recalculate_context_retention(RRDCONTEXT *rc, RRD_FLAGS reason, bool worker_jobs); void rrdcontext_recalculate_host_retention(RRDHOST *host, RRD_FLAGS reason, bool worker_jobs); diff --git a/src/database/contexts/query_scope.c b/src/database/contexts/query_scope.c index f3bcd0b3f..7485ef3e6 100644 --- a/src/database/contexts/query_scope.c +++ b/src/database/contexts/query_scope.c @@ -18,8 +18,8 @@ ssize_t query_scope_foreach_host(SIMPLE_PATTERN *scope_hosts_sp, SIMPLE_PATTERN uint64_t t_hash = 0; dfe_start_read(rrdhost_root_index, host) { - if(host->node_id) - uuid_unparse_lower(*host->node_id, host_node_id_str); + if(!UUIDiszero(host->node_id)) + uuid_unparse_lower(host->node_id.uuid, host_node_id_str); else host_node_id_str[0] = '\0'; diff --git a/src/database/contexts/query_target.c b/src/database/contexts/query_target.c index 29a9c3e59..b25b8e427 100644 --- a/src/database/contexts/query_target.c +++ b/src/database/contexts/query_target.c @@ -897,9 +897,9 @@ static ssize_t query_node_add(void *data, RRDHOST *host, bool queryable_host) { QUERY_TARGET *qt = qtl->qt; QUERY_NODE *qn = query_node_allocate(qt, host); - if(host->node_id) { + if(!UUIDiszero(host->node_id)) { if(!qtl->host_node_id_str[0]) - uuid_unparse_lower(*host->node_id, qn->node_id); + uuid_unparse_lower(host->node_id.uuid, qn->node_id); else memcpy(qn->node_id, qtl->host_node_id_str, sizeof(qn->node_id)); } @@ -958,7 +958,7 @@ static ssize_t query_node_add(void *data, RRDHOST *host, bool queryable_host) { void query_target_generate_name(QUERY_TARGET *qt) { char options_buffer[100 + 1]; - web_client_api_request_v1_data_options_to_string(options_buffer, 100, qt->request.options); + web_client_api_request_data_vX_options_to_string(options_buffer, 100, qt->request.options); char resampling_buffer[20 + 1] = ""; if(qt->request.resampling_time > 1) @@ -1035,8 +1035,8 @@ void query_target_generate_name(QUERY_TARGET *qt) { } QUERY_TARGET *query_target_create(QUERY_TARGET_REQUEST *qtr) { - if(!service_running(ABILITY_DATA_QUERIES)) - return NULL; + //if(!service_running(ABILITY_DATA_QUERIES)) + // return NULL; QUERY_TARGET *qt = query_target_get(); @@ -1120,8 +1120,8 @@ QUERY_TARGET *query_target_create(QUERY_TARGET_REQUEST *qtr) { } if(host) { - if(host->node_id) - uuid_unparse_lower(*host->node_id, qtl.host_node_id_str); + if(!UUIDiszero(host->node_id)) + uuid_unparse_lower(host->node_id.uuid, qtl.host_node_id_str); else qtl.host_node_id_str[0] = '\0'; diff --git a/src/database/contexts/rrdcontext.c b/src/database/contexts/rrdcontext.c index f755e1f7e..a98bc98ef 100644 --- a/src/database/contexts/rrdcontext.c +++ b/src/database/contexts/rrdcontext.c @@ -198,21 +198,16 @@ int rrdcontext_foreach_instance_with_rrdset_in_context(RRDHOST *host, const char // ---------------------------------------------------------------------------- // ACLK interface -static bool rrdhost_check_our_claim_id(const char *claim_id) { - if(!localhost->aclk_state.claimed_id) return false; - return (strcasecmp(claim_id, localhost->aclk_state.claimed_id) == 0) ? true : false; -} - void rrdcontext_hub_checkpoint_command(void *ptr) { struct ctxs_checkpoint *cmd = ptr; - if(!rrdhost_check_our_claim_id(cmd->claim_id)) { + if(!claim_id_matches(cmd->claim_id)) { + CLAIM_ID claim_id = claim_id_get(); nd_log(NDLS_DAEMON, NDLP_WARNING, "RRDCONTEXT: received checkpoint command for claim_id '%s', node id '%s', " "but this is not our claim id. Ours '%s', received '%s'. Ignoring command.", cmd->claim_id, cmd->node_id, - localhost->aclk_state.claimed_id?localhost->aclk_state.claimed_id:"NOT SET", - cmd->claim_id); + claim_id.str, cmd->claim_id); return; } @@ -245,11 +240,10 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { "Sending snapshot of all contexts.", cmd->version_hash, rrdhost_hostname(host), our_version_hash); -#ifdef ENABLE_ACLK // prepare the snapshot - char uuid[UUID_STR_LEN]; - uuid_unparse_lower(*host->node_id, uuid); - contexts_snapshot_t bundle = contexts_snapshot_new(cmd->claim_id, uuid, our_version_hash); + char uuid_str[UUID_STR_LEN]; + uuid_unparse_lower(host->node_id.uuid, uuid_str); + contexts_snapshot_t bundle = contexts_snapshot_new(cmd->claim_id, uuid_str, our_version_hash); // do a deep scan on every metric of the host to make sure all our data are updated rrdcontext_recalculate_host_retention(host, RRD_FLAG_NONE, false); @@ -262,7 +256,6 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { // send it aclk_send_contexts_snapshot(bundle); -#endif } nd_log(NDLS_DAEMON, NDLP_DEBUG, @@ -271,7 +264,7 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { rrdhost_flag_set(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS); char node_str[UUID_STR_LEN]; - uuid_unparse_lower(*host->node_id, node_str); + uuid_unparse_lower(host->node_id.uuid, node_str); nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK REQ [%s (%s)]: STREAM CONTEXTS ENABLED", node_str, rrdhost_hostname(host)); @@ -280,13 +273,13 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { void rrdcontext_hub_stop_streaming_command(void *ptr) { struct stop_streaming_ctxs *cmd = ptr; - if(!rrdhost_check_our_claim_id(cmd->claim_id)) { + if(!claim_id_matches(cmd->claim_id)) { + CLAIM_ID claim_id = claim_id_get(); nd_log(NDLS_DAEMON, NDLP_WARNING, "RRDCONTEXT: received stop streaming command for claim_id '%s', node id '%s', " "but this is not our claim id. Ours '%s', received '%s'. Ignoring command.", cmd->claim_id, cmd->node_id, - localhost->aclk_state.claimed_id?localhost->aclk_state.claimed_id:"NOT SET", - cmd->claim_id); + claim_id.str, cmd->claim_id); return; } diff --git a/src/database/contexts/rrdcontext.h b/src/database/contexts/rrdcontext.h index 9fea55d38..0906329bc 100644 --- a/src/database/contexts/rrdcontext.h +++ b/src/database/contexts/rrdcontext.h @@ -623,10 +623,10 @@ struct api_v2_contexts_request { char *contexts; char *q; - CONTEXTS_V2_OPTIONS options; + CONTEXTS_OPTIONS options; struct { - CONTEXTS_V2_ALERT_STATUS status; + CONTEXTS_ALERT_STATUS status; char *alert; char *transition; uint32_t last; diff --git a/src/database/contexts/worker.c b/src/database/contexts/worker.c index 6012c14f5..4ffa92139 100644 --- a/src/database/contexts/worker.c +++ b/src/database/contexts/worker.c @@ -24,6 +24,10 @@ static void rrdinstance_load_clabel(SQL_CLABEL_DATA *sld, void *data) { rrdlabels_add(ri->rrdlabels, sld->label_key, sld->label_value, sld->label_source); } +void load_instance_labels_on_demand(nd_uuid_t *uuid, void *data) { + ctx_get_label_list(uuid, rrdinstance_load_clabel, data); +} + static void rrdinstance_load_dimension(SQL_DIMENSION_DATA *sd, void *data) { RRDINSTANCE *ri = data; @@ -73,7 +77,6 @@ static void rrdinstance_load_chart_callback(SQL_CHART_DATA *sc, void *data) { RRDINSTANCE *ri = rrdinstance_acquired_value(ria); ctx_get_dimension_list(&ri->uuid, rrdinstance_load_dimension, ri); - ctx_get_label_list(&ri->uuid, rrdinstance_load_clabel, ri); rrdinstance_trigger_updates(ri, __FUNCTION__ ); rrdinstance_release(ria); rrdcontext_release(rca); @@ -99,8 +102,11 @@ void rrdhost_load_rrdcontext_data(RRDHOST *host) { if(host->rrdctx.contexts) return; rrdhost_create_rrdcontexts(host); - ctx_get_context_list(&host->host_uuid, rrdcontext_load_context_callback, host); - ctx_get_chart_list(&host->host_uuid, rrdinstance_load_chart_callback, host); + if (host->rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) + return; + + ctx_get_context_list(&host->host_id.uuid, rrdcontext_load_context_callback, host); + ctx_get_chart_list(&host->host_id.uuid, rrdinstance_load_chart_callback, host); RRDCONTEXT *rc; dfe_start_read(host->rrdctx.contexts, rc) { @@ -173,6 +179,8 @@ static void rrdhost_update_cached_retention(RRDHOST *host, time_t first_time_s, spinlock_lock(&host->retention.spinlock); + time_t old_first_time_s = host->retention.first_time_s; + if(global) { host->retention.first_time_s = first_time_s; host->retention.last_time_s = last_time_s; @@ -185,7 +193,12 @@ static void rrdhost_update_cached_retention(RRDHOST *host, time_t first_time_s, host->retention.last_time_s = last_time_s; } + bool stream_path_update_required = old_first_time_s != host->retention.first_time_s; + spinlock_unlock(&host->retention.spinlock); + + if(stream_path_update_required) + stream_path_retention_updated(host); } void rrdcontext_recalculate_context_retention(RRDCONTEXT *rc, RRD_FLAGS reason, bool worker_jobs) { @@ -348,8 +361,11 @@ void rrdcontext_delete_from_sql_unsafe(RRDCONTEXT *rc) { rc->hub.units = string2str(rc->units); rc->hub.family = string2str(rc->family); + if (rc->rrdhost->rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) + return; + // delete it from SQL - if(ctx_delete_context(&rc->rrdhost->host_uuid, &rc->hub) != 0) + if(ctx_delete_context(&rc->rrdhost->host_id.uuid, &rc->hub) != 0) netdata_log_error("RRDCONTEXT: failed to delete context '%s' version %"PRIu64" from SQL.", rc->hub.id, rc->hub.version); } @@ -818,7 +834,6 @@ void rrdcontext_message_send_unsafe(RRDCONTEXT *rc, bool snapshot __maybe_unused rc->hub.last_time_s = rrd_flag_is_collected(rc) ? 0 : rc->last_time_s; rc->hub.deleted = rrd_flag_is_deleted(rc) ? true : false; -#ifdef ENABLE_ACLK struct context_updated message = { .id = rc->hub.id, .version = rc->hub.version, @@ -840,15 +855,19 @@ void rrdcontext_message_send_unsafe(RRDCONTEXT *rc, bool snapshot __maybe_unused else contexts_updated_add_ctx_update(bundle, &message); } -#endif // store it to SQL if(rrd_flag_is_deleted(rc)) rrdcontext_delete_from_sql_unsafe(rc); - else if (ctx_store_context(&rc->rrdhost->host_uuid, &rc->hub) != 0) - netdata_log_error("RRDCONTEXT: failed to save context '%s' version %"PRIu64" to SQL.", rc->hub.id, rc->hub.version); + else { + if (rc->rrdhost->rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) + return; + if (ctx_store_context(&rc->rrdhost->host_id.uuid, &rc->hub) != 0) + netdata_log_error( + "RRDCONTEXT: failed to save context '%s' version %" PRIu64 " to SQL.", rc->hub.id, rc->hub.version); + } } static bool check_if_cloud_version_changed_unsafe(RRDCONTEXT *rc, bool sending __maybe_unused) { @@ -956,7 +975,7 @@ static void rrdcontext_dequeue_from_hub_queue(RRDCONTEXT *rc) { static void rrdcontext_dispatch_queued_contexts_to_hub(RRDHOST *host, usec_t now_ut) { // check if we have received a streaming command for this host - if(!host->node_id || !rrdhost_flag_check(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS) || !aclk_connected || !host->rrdctx.hub_queue) + if(UUIDiszero(host->node_id) || !rrdhost_flag_check(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS) || !aclk_online_for_contexts() || !host->rrdctx.hub_queue) return; // check if there are queued items to send @@ -975,9 +994,9 @@ static void rrdcontext_dispatch_queued_contexts_to_hub(RRDHOST *host, usec_t now worker_is_busy(WORKER_JOB_QUEUED); usec_t dispatch_ut = rrdcontext_calculate_queued_dispatch_time_ut(rc, now_ut); - char *claim_id = get_agent_claimid(); + CLAIM_ID claim_id = claim_id_get(); - if(unlikely(now_ut >= dispatch_ut) && claim_id) { + if(unlikely(now_ut >= dispatch_ut) && claim_id_is_set(claim_id)) { worker_is_busy(WORKER_JOB_CHECK); rrdcontext_lock(rc); @@ -985,15 +1004,13 @@ static void rrdcontext_dispatch_queued_contexts_to_hub(RRDHOST *host, usec_t now if(check_if_cloud_version_changed_unsafe(rc, true)) { worker_is_busy(WORKER_JOB_SEND); -#ifdef ENABLE_ACLK if(!bundle) { // prepare the bundle to send the messages - char uuid[UUID_STR_LEN]; - uuid_unparse_lower(*host->node_id, uuid); + char uuid_str[UUID_STR_LEN]; + uuid_unparse_lower(host->node_id.uuid, uuid_str); - bundle = contexts_updated_new(claim_id, uuid, 0, now_ut); + bundle = contexts_updated_new(claim_id.str, uuid_str, 0, now_ut); } -#endif // update the hub data of the context, give a new version, pack the message // and save an update to SQL rrdcontext_message_send_unsafe(rc, false, bundle); @@ -1030,11 +1047,9 @@ static void rrdcontext_dispatch_queued_contexts_to_hub(RRDHOST *host, usec_t now else rrdcontext_unlock(rc); } - freez(claim_id); } dfe_done(rc); -#ifdef ENABLE_ACLK if(service_running(SERVICE_CONTEXT) && bundle) { // we have a bundle to send messages @@ -1046,7 +1061,6 @@ static void rrdcontext_dispatch_queued_contexts_to_hub(RRDHOST *host, usec_t now } else if(bundle) contexts_updated_delete(bundle); -#endif } @@ -1085,12 +1099,11 @@ void *rrdcontext_main(void *ptr) { worker_register_job_custom_metric(WORKER_JOB_PP_QUEUE_SIZE, "post processing queue size", "contexts", WORKER_METRIC_ABSOLUTE); heartbeat_t hb; - heartbeat_init(&hb); - usec_t step = RRDCONTEXT_WORKER_THREAD_HEARTBEAT_USEC; + heartbeat_init(&hb, RRDCONTEXT_WORKER_THREAD_HEARTBEAT_USEC); while (service_running(SERVICE_CONTEXT)) { worker_is_idle(); - heartbeat_next(&hb, step); + heartbeat_next(&hb); if(unlikely(!service_running(SERVICE_CONTEXT))) break; diff --git a/src/database/engine/cache.c b/src/database/engine/cache.c index a03df4676..f47674764 100644 --- a/src/database/engine/cache.c +++ b/src/database/engine/cache.c @@ -1786,7 +1786,7 @@ PGC *pgc_create(const char *name, sizeof(PGC_PAGE) + cache->config.additional_bytes_per_page, 0, 16384, - aral_statistics(pgc_section_pages_aral), + aral_get_statistics(pgc_section_pages_aral), NULL, NULL, false, false); } #endif @@ -1797,7 +1797,7 @@ PGC *pgc_create(const char *name, } struct aral_statistics *pgc_aral_statistics(void) { - return aral_statistics(pgc_section_pages_aral); + return aral_get_statistics(pgc_section_pages_aral); } size_t pgc_aral_structures(void) { @@ -2366,7 +2366,7 @@ void *unittest_stress_test_collector(void *ptr) { time_t start_time_t = pgc_uts.first_time_t + 1; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, pgc_uts.time_per_collection_ut); while(!__atomic_load_n(&pgc_uts.stop, __ATOMIC_RELAXED)) { // netdata_log_info("COLLECTOR %zu: collecting metrics %zu to %zu, from %ld to %lu", id, metric_start, metric_end, start_time_t, start_time_t + pgc_uts.points_per_page); @@ -2393,7 +2393,7 @@ void *unittest_stress_test_collector(void *ptr) { time_t end_time_t = start_time_t + (time_t)pgc_uts.points_per_page; while(++start_time_t <= end_time_t && !__atomic_load_n(&pgc_uts.stop, __ATOMIC_RELAXED)) { - heartbeat_next(&hb, pgc_uts.time_per_collection_ut); + heartbeat_next(&hb); for (size_t i = metric_start; i < metric_end; i++) { if(pgc_uts.metrics[i]) @@ -2480,9 +2480,9 @@ void *unittest_stress_test_queries(void *ptr) { void *unittest_stress_test_service(void *ptr) { heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); while(!__atomic_load_n(&pgc_uts.stop, __ATOMIC_RELAXED)) { - heartbeat_next(&hb, 1 * USEC_PER_SEC); + heartbeat_next(&hb); pgc_flush_pages(pgc_uts.cache, 1000); pgc_evict_pages(pgc_uts.cache, 0, 0); @@ -2545,7 +2545,7 @@ void unittest_stress_test(void) { } heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); struct { size_t entries; @@ -2578,7 +2578,7 @@ void unittest_stress_test(void) { } stats = {}, old_stats = {}; for(int i = 0; i < 86400 ;i++) { - heartbeat_next(&hb, 1 * USEC_PER_SEC); + heartbeat_next(&hb); old_stats = stats; stats.entries = __atomic_load_n(&pgc_uts.cache->stats.entries, __ATOMIC_RELAXED); diff --git a/src/database/engine/cache.h b/src/database/engine/cache.h index b6f81bcc2..ef9652028 100644 --- a/src/database/engine/cache.h +++ b/src/database/engine/cache.h @@ -248,4 +248,15 @@ struct aral_statistics *pgc_aral_statistics(void); size_t pgc_aral_structures(void); size_t pgc_aral_overhead(void); +static inline size_t indexing_partition(Word_t ptr, Word_t modulo) __attribute__((const)); +static inline size_t indexing_partition(Word_t ptr, Word_t modulo) { +#ifdef ENV64BIT + uint64_t hash = murmur64(ptr); + return hash % modulo; +#else + uint32_t hash = murmur32(ptr); + return hash % modulo; +#endif +} + #endif // DBENGINE_CACHE_H diff --git a/src/database/engine/datafile.c b/src/database/engine/datafile.c index 35c047722..7bf9487f2 100644 --- a/src/database/engine/datafile.c +++ b/src/database/engine/datafile.c @@ -66,7 +66,8 @@ void datafile_release(struct rrdengine_datafile *df, DATAFILE_ACQUIRE_REASONS re spinlock_unlock(&df->users.spinlock); } -bool datafile_acquire_for_deletion(struct rrdengine_datafile *df) { +bool datafile_acquire_for_deletion(struct rrdengine_datafile *df, bool is_shutdown) +{ bool can_be_deleted = false; spinlock_lock(&df->users.spinlock); @@ -107,7 +108,7 @@ bool datafile_acquire_for_deletion(struct rrdengine_datafile *df) { if(!df->users.time_to_evict) { // first time we did the above - df->users.time_to_evict = now_s + 120; + df->users.time_to_evict = now_s + (is_shutdown ? DATAFILE_DELETE_TIMEOUT_SHORT : DATAFILE_DELETE_TIMEOUT_LONG); internal_error(true, "DBENGINE: datafile %u of tier %d is not used by any open cache pages, " "but it has %u lockers (oc:%u, pd:%u), " "%zu clean and %zu hot open cache pages " @@ -572,8 +573,8 @@ void finalize_data_files(struct rrdengine_instance *ctx) struct rrdengine_journalfile *journalfile = datafile->journalfile; logged = false; - size_t iterations = 100; - while(!datafile_acquire_for_deletion(datafile) && datafile != ctx->datafiles.first->prev && --iterations > 0) { + size_t iterations = 10; + while(!datafile_acquire_for_deletion(datafile, true) && datafile != ctx->datafiles.first->prev && --iterations > 0) { if(!logged) { netdata_log_info("Waiting to acquire data file %u of tier %d to close it...", datafile->fileno, ctx->config.tier); logged = true; diff --git a/src/database/engine/datafile.h b/src/database/engine/datafile.h index 569f1b0a2..843cb8c1e 100644 --- a/src/database/engine/datafile.h +++ b/src/database/engine/datafile.h @@ -24,6 +24,11 @@ struct rrdengine_instance; #define MAX_DATAFILES (65536 * 4) /* Supports up to 64TiB for now */ #define TARGET_DATAFILES (50) +// When trying to acquire a datafile for deletion and an attempt to evict pages is completed +// the acquire for deletion will return true after this timeout +#define DATAFILE_DELETE_TIMEOUT_SHORT (1) +#define DATAFILE_DELETE_TIMEOUT_LONG (120) + typedef enum __attribute__ ((__packed__)) { DATAFILE_ACQUIRE_OPEN_CACHE = 0, DATAFILE_ACQUIRE_PAGE_DETAILS, @@ -72,7 +77,7 @@ struct rrdengine_datafile { bool datafile_acquire(struct rrdengine_datafile *df, DATAFILE_ACQUIRE_REASONS reason); void datafile_release(struct rrdengine_datafile *df, DATAFILE_ACQUIRE_REASONS reason); -bool datafile_acquire_for_deletion(struct rrdengine_datafile *df); +bool datafile_acquire_for_deletion(struct rrdengine_datafile *df, bool is_shutdown); void datafile_list_insert(struct rrdengine_instance *ctx, struct rrdengine_datafile *datafile, bool having_lock); void datafile_list_delete_unsafe(struct rrdengine_instance *ctx, struct rrdengine_datafile *datafile); diff --git a/src/database/engine/dbengine-stresstest.c b/src/database/engine/dbengine-stresstest.c index 1d978cd52..0447bcf33 100644 --- a/src/database/engine/dbengine-stresstest.c +++ b/src/database/engine/dbengine-stresstest.c @@ -22,13 +22,13 @@ static RRDHOST *dbengine_rrdhost_find_or_create(char *name) { default_rrd_history_entries, RRD_MEMORY_MODE_DBENGINE, health_plugin_enabled(), - default_rrdpush_enabled, - default_rrdpush_destination, - default_rrdpush_api_key, - default_rrdpush_send_charts_matching, - default_rrdpush_enable_replication, - default_rrdpush_seconds_to_replicate, - default_rrdpush_replication_step, + stream_conf_send_enabled, + stream_conf_send_destination, + stream_conf_send_api_key, + stream_conf_send_charts_matching, + stream_conf_replication_enabled, + stream_conf_replication_period, + stream_conf_replication_step, NULL, 0 ); diff --git a/src/database/engine/dbengine-unittest.c b/src/database/engine/dbengine-unittest.c index cfe038df6..755336101 100644 --- a/src/database/engine/dbengine-unittest.c +++ b/src/database/engine/dbengine-unittest.c @@ -108,13 +108,13 @@ static RRDHOST *dbengine_rrdhost_find_or_create(char *name) { default_rrd_history_entries, RRD_MEMORY_MODE_DBENGINE, health_plugin_enabled(), - default_rrdpush_enabled, - default_rrdpush_destination, - default_rrdpush_api_key, - default_rrdpush_send_charts_matching, - default_rrdpush_enable_replication, - default_rrdpush_seconds_to_replicate, - default_rrdpush_replication_step, + stream_conf_send_enabled, + stream_conf_send_destination, + stream_conf_send_api_key, + stream_conf_send_charts_matching, + stream_conf_replication_enabled, + stream_conf_replication_period, + stream_conf_replication_step, NULL, 0 ); diff --git a/src/database/engine/journalfile.c b/src/database/engine/journalfile.c index 4ea988d64..b120ce3c3 100644 --- a/src/database/engine/journalfile.c +++ b/src/database/engine/journalfile.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata/bitmap64.h" #include "rrdengine.h" static void after_extent_write_journalfile_v1_io(uv_fs_t* req) diff --git a/src/database/engine/pdc.c b/src/database/engine/pdc.c index 28a83e2bc..da5dbd7d8 100644 --- a/src/database/engine/pdc.c +++ b/src/database/engine/pdc.c @@ -184,12 +184,12 @@ static struct { .allocated = 0, .allocated_bytes = 0, }, - .max_size = MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE, + .max_size = MAX_EXTENT_UNCOMPRESSED_SIZE }; void extent_buffer_init(void) { - size_t max_extent_uncompressed = MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE; - size_t max_size = (size_t)LZ4_compressBound(MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE); + size_t max_extent_uncompressed = MAX_EXTENT_UNCOMPRESSED_SIZE; + size_t max_size = (size_t)LZ4_compressBound(MAX_EXTENT_UNCOMPRESSED_SIZE); if(max_size < max_extent_uncompressed) max_size = max_extent_uncompressed; @@ -1010,7 +1010,7 @@ static bool epdl_populate_pages_from_extent_data( uncompressed_payload_length += header->descr[i].page_length; } - if(unlikely(uncompressed_payload_length > MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE)) + if(unlikely(uncompressed_payload_length > MAX_EXTENT_UNCOMPRESSED_SIZE)) have_read_error = true; if(likely(!have_read_error)) { diff --git a/src/database/engine/rrdengine.c b/src/database/engine/rrdengine.c index a989877fc..78ad873f7 100644 --- a/src/database/engine/rrdengine.c +++ b/src/database/engine/rrdengine.c @@ -1218,7 +1218,7 @@ void datafile_delete(struct rrdengine_instance *ctx, struct rrdengine_datafile * if(worker) worker_is_busy(UV_EVENT_DBENGINE_DATAFILE_DELETE_WAIT); - bool datafile_got_for_deletion = datafile_acquire_for_deletion(datafile); + bool datafile_got_for_deletion = datafile_acquire_for_deletion(datafile, false); if (update_retention) update_metrics_first_time_s(ctx, datafile, datafile->next, worker); @@ -1227,7 +1227,7 @@ void datafile_delete(struct rrdengine_instance *ctx, struct rrdengine_datafile * if(worker) worker_is_busy(UV_EVENT_DBENGINE_DATAFILE_DELETE_WAIT); - datafile_got_for_deletion = datafile_acquire_for_deletion(datafile); + datafile_got_for_deletion = datafile_acquire_for_deletion(datafile, false); if (!datafile_got_for_deletion) { netdata_log_info("DBENGINE: waiting for data file '%s/" @@ -2075,6 +2075,6 @@ void dbengine_event_loop(void* arg) { } nd_log(NDLS_DAEMON, NDLP_DEBUG, "Shutting down dbengine thread"); - uv_loop_close(&main->loop); + (void) uv_loop_close(&main->loop); worker_unregister(); } diff --git a/src/database/engine/rrdengine.h b/src/database/engine/rrdengine.h index 37ea92b8a..0a22477ac 100644 --- a/src/database/engine/rrdengine.h +++ b/src/database/engine/rrdengine.h @@ -28,7 +28,10 @@ struct rrdengine_instance; struct rrdeng_cmd; #define MAX_PAGES_PER_EXTENT (109) /* TODO: can go higher only when journal supports bigger than 4KiB transactions */ -#define DEFAULT_PAGES_PER_EXTENT (64) +#define DEFAULT_PAGES_PER_EXTENT (109) + +#define MAX_EXTENT_UNCOMPRESSED_SIZE (MAX_PAGES_PER_EXTENT * (RRDENG_BLOCK_SIZE + RRDENG_GORILLA_32BIT_BUFFER_SIZE)) + #define RRDENG_FILE_NUMBER_SCAN_TMPL "%1u-%10u" #define RRDENG_FILE_NUMBER_PRINT_TMPL "%1.1u-%10.10u" diff --git a/src/database/ram/README.md b/src/database/ram/README.md index 6ece6d0f4..0a35e6111 100644 --- a/src/database/ram/README.md +++ b/src/database/ram/README.md @@ -1,11 +1 @@ - - # RAM database modes diff --git a/src/database/rrd.h b/src/database/rrd.h index bd31e21e1..c914b783d 100644 --- a/src/database/rrd.h +++ b/src/database/rrd.h @@ -98,14 +98,55 @@ struct ml_metrics_statistics { size_t silenced; }; + +// use this for configuration flags, not for state control +// flags are set/unset in a manner that is not thread safe +// and may lead to missing information. +typedef enum __attribute__ ((__packed__)) rrdset_flags { + RRDSET_FLAG_DEBUG = (1 << 2), // enables or disables debugging for a chart + RRDSET_FLAG_OBSOLETE = (1 << 3), // this is marked by the collector/module as obsolete + RRDSET_FLAG_EXPORTING_SEND = (1 << 4), // if set, this chart should be sent to Prometheus web API and external databases + RRDSET_FLAG_EXPORTING_IGNORE = (1 << 5), // if set, this chart should not be sent to Prometheus web API and external databases + + RRDSET_FLAG_UPSTREAM_SEND = (1 << 6), // if set, this chart should be sent upstream (streaming) + RRDSET_FLAG_UPSTREAM_IGNORE = (1 << 7), // if set, this chart should not be sent upstream (streaming) + + RRDSET_FLAG_STORE_FIRST = (1 << 8), // if set, do not eliminate the first collection during interpolation + RRDSET_FLAG_HETEROGENEOUS = (1 << 9), // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers) + RRDSET_FLAG_HOMOGENEOUS_CHECK = (1 << 10), // if set, the chart should be checked to determine if the dimensions are homogeneous + RRDSET_FLAG_HIDDEN = (1 << 11), // if set, do not show this chart on the dashboard, but use it for exporting + RRDSET_FLAG_SYNC_CLOCK = (1 << 12), // if set, microseconds on next data collection will be ignored (the chart will be synced to now) + RRDSET_FLAG_OBSOLETE_DIMENSIONS = (1 << 13), // this is marked by the collector/module when a chart has obsolete dimensions + + RRDSET_FLAG_METADATA_UPDATE = (1 << 14), // Mark that metadata needs to be stored + RRDSET_FLAG_ANOMALY_DETECTION = (1 << 15), // flag to identify anomaly detection charts. + RRDSET_FLAG_INDEXED_ID = (1 << 16), // the rrdset is indexed by its id + RRDSET_FLAG_INDEXED_NAME = (1 << 17), // the rrdset is indexed by its name + + RRDSET_FLAG_PENDING_HEALTH_INITIALIZATION = (1 << 18), + + RRDSET_FLAG_SENDER_REPLICATION_IN_PROGRESS = (1 << 19), // the sending side has replication in progress + RRDSET_FLAG_SENDER_REPLICATION_FINISHED = (1 << 20), // the sending side has completed replication + RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS = (1 << 21), // the receiving side has replication in progress + RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED = (1 << 22), // the receiving side has completed replication + + RRDSET_FLAG_UPSTREAM_SEND_VARIABLES = (1 << 23), // a custom variable has been updated and needs to be exposed to parent + + RRDSET_FLAG_COLLECTION_FINISHED = (1 << 24), // when set, data collection is not available for this chart + + RRDSET_FLAG_HAS_RRDCALC_LINKED = (1 << 25), // this chart has at least one rrdcal linked +} RRDSET_FLAGS; + #include "daemon/common.h" #include "web/api/queries/query.h" #include "web/api/queries/rrdr.h" #include "health/rrdvar.h" #include "health/rrdcalc.h" #include "rrdlabels.h" +#include "streaming/stream-capabilities.h" +#include "streaming/stream-path.h" #include "streaming/rrdpush.h" -#include "aclk/aclk_rrdhost_state.h" +//#include "aclk/aclk_rrdhost_state.h" #include "sqlite/sqlite_health.h" typedef struct storage_query_handle STORAGE_QUERY_HANDLE; @@ -268,7 +309,7 @@ void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, size_t tier, time_t now_s // RRD DIMENSION - this is a metric struct rrddim { - nd_uuid_t metric_uuid; // global UUID for this metric (unique_across hosts) + nd_uuid_t metric_uuid; // global UUID for this metric (unique_across hosts) // ------------------------------------------------------------------------ // dimension definition @@ -282,15 +323,16 @@ struct rrddim { int32_t multiplier; // the multiplier of the collected values int32_t divisor; // the divider of the collected values + int32_t dimension_id; // Dimension id // ------------------------------------------------------------------------ // operational state members struct rrdset *rrdset; - rrd_ml_dimension_t *ml_dimension; // machine learning data about this dimension + rrd_ml_dimension_t *ml_dimension; // machine learning data about this dimension struct { - RRDMETRIC_ACQUIRED *rrdmetric; // the rrdmetric of this dimension + RRDMETRIC_ACQUIRED *rrdmetric; // the rrdmetric of this dimension bool collected; } rrdcontexts; @@ -661,47 +703,6 @@ STORAGE_ENGINE* storage_engine_find(const char* name); // ---------------------------------------------------------------------------- // RRDSET - this is a chart -// use this for configuration flags, not for state control -// flags are set/unset in a manner that is not thread safe -// and may lead to missing information. - -typedef enum __attribute__ ((__packed__)) rrdset_flags { - RRDSET_FLAG_DETAIL = (1 << 1), // if set, the data set should be considered as a detail of another - // (the master data set should be the one that has the same family and is not detail) - RRDSET_FLAG_DEBUG = (1 << 2), // enables or disables debugging for a chart - RRDSET_FLAG_OBSOLETE = (1 << 3), // this is marked by the collector/module as obsolete - RRDSET_FLAG_EXPORTING_SEND = (1 << 4), // if set, this chart should be sent to Prometheus web API and external databases - RRDSET_FLAG_EXPORTING_IGNORE = (1 << 5), // if set, this chart should not be sent to Prometheus web API and external databases - - RRDSET_FLAG_UPSTREAM_SEND = (1 << 6), // if set, this chart should be sent upstream (streaming) - RRDSET_FLAG_UPSTREAM_IGNORE = (1 << 7), // if set, this chart should not be sent upstream (streaming) - - RRDSET_FLAG_STORE_FIRST = (1 << 8), // if set, do not eliminate the first collection during interpolation - RRDSET_FLAG_HETEROGENEOUS = (1 << 9), // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers) - RRDSET_FLAG_HOMOGENEOUS_CHECK = (1 << 10), // if set, the chart should be checked to determine if the dimensions are homogeneous - RRDSET_FLAG_HIDDEN = (1 << 11), // if set, do not show this chart on the dashboard, but use it for exporting - RRDSET_FLAG_SYNC_CLOCK = (1 << 12), // if set, microseconds on next data collection will be ignored (the chart will be synced to now) - RRDSET_FLAG_OBSOLETE_DIMENSIONS = (1 << 13), // this is marked by the collector/module when a chart has obsolete dimensions - - RRDSET_FLAG_METADATA_UPDATE = (1 << 14), // Mark that metadata needs to be stored - RRDSET_FLAG_ANOMALY_DETECTION = (1 << 15), // flag to identify anomaly detection charts. - RRDSET_FLAG_INDEXED_ID = (1 << 16), // the rrdset is indexed by its id - RRDSET_FLAG_INDEXED_NAME = (1 << 17), // the rrdset is indexed by its name - - RRDSET_FLAG_PENDING_HEALTH_INITIALIZATION = (1 << 18), - - RRDSET_FLAG_SENDER_REPLICATION_IN_PROGRESS = (1 << 19), // the sending side has replication in progress - RRDSET_FLAG_SENDER_REPLICATION_FINISHED = (1 << 20), // the sending side has completed replication - RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS = (1 << 21), // the receiving side has replication in progress - RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED = (1 << 22), // the receiving side has completed replication - - RRDSET_FLAG_UPSTREAM_SEND_VARIABLES = (1 << 23), // a custom variable has been updated and needs to be exposed to parent - - RRDSET_FLAG_COLLECTION_FINISHED = (1 << 24), // when set, data collection is not available for this chart - - RRDSET_FLAG_HAS_RRDCALC_LINKED = (1 << 25), // this chart has at least one rrdcal linked -} RRDSET_FLAGS; - #define rrdset_flag_get(st) __atomic_load_n(&((st)->flags), __ATOMIC_ACQUIRE) #define rrdset_flag_check(st, flag) (__atomic_load_n(&((st)->flags), __ATOMIC_ACQUIRE) & (flag)) #define rrdset_flag_set(st, flag) __atomic_or_fetch(&((st)->flags), flag, __ATOMIC_RELEASE) @@ -1094,7 +1095,7 @@ typedef struct alarm_log { uint32_t next_alarm_id; unsigned int count; unsigned int max; - uint32_t health_log_history; // the health log history in seconds to be kept in db + uint32_t health_log_retention_s; // the health log retention in seconds to be kept in db ALARM_ENTRY *alarms; RW_SPINLOCK spinlock; } ALARM_LOG; @@ -1123,6 +1124,7 @@ struct rrdhost_system_info { char *host_os_detection; char *host_cores; char *host_cpu_freq; + char *host_cpu_model; char *host_ram_total; char *host_disk_space; char *container_os_name; @@ -1173,6 +1175,14 @@ struct rrdhost { int32_t rrd_update_every; // the update frequency of the host int32_t rrd_history_entries; // the number of history entries for the host's charts + struct { + uint32_t dimension_count; // Dimension count for this host + uint32_t currently_collected; // Currectly collected metrics cache + time_t cache_timestamp; + Pvoid_t JudyL; // Store metrics collected -- link to rrddim + SPINLOCK spinlock; + } accounting; + RRD_MEMORY_MODE rrd_memory_mode; // the configured memory more for the charts of this host // the actual per tier is at .db[tier].mode @@ -1188,7 +1198,7 @@ struct rrdhost { struct rrdhost_system_info *system_info; // information collected from the host environment // ------------------------------------------------------------------------ - // streaming of data to remote hosts - rrdpush sender + // streaming of data to remote hosts - rrdpush struct { struct { @@ -1204,6 +1214,10 @@ struct rrdhost { uint32_t last_used; // the last slot we used for a chart (increments only) } pluginsd_chart_slots; + + char *destination; // where to send metrics to + char *api_key; // the api key at the receiving netdata + SIMPLE_PATTERN *charts_matching; // pattern to match the charts to be sent } send; struct { @@ -1213,13 +1227,12 @@ struct rrdhost { RRDSET **array; } pluginsd_chart_slots; } receive; + + RRDHOST_STREAM_PATH path; } rrdpush; - char *rrdpush_send_destination; // where to send metrics to - char *rrdpush_send_api_key; // the api key at the receiving netdata struct rrdpush_destinations *destinations; // a linked list of possible destinations struct rrdpush_destinations *destination; // the current destination from the above list - SIMPLE_PATTERN *rrdpush_send_charts_matching; // pattern to match the charts to be sent int32_t rrdpush_last_receiver_exit_reason; time_t rrdpush_seconds_to_replicate; // max time we want to replicate from the child @@ -1247,7 +1260,7 @@ struct rrdhost { int connected_children_count; // number of senders currently streaming struct receiver_state *receiver; - netdata_mutex_t receiver_lock; + SPINLOCK receiver_lock; int trigger_chart_obsoletion_check; // set when child connects, will instruct parent to // trigger a check for obsoleted charts since previous connect @@ -1306,11 +1319,13 @@ struct rrdhost { time_t last_time_s; } retention; - nd_uuid_t host_uuid; // Global GUID for this host - nd_uuid_t *node_id; // Cloud node_id + ND_UUID host_id; // Global GUID for this host + ND_UUID node_id; // Cloud node_id - netdata_mutex_t aclk_state_lock; - aclk_rrdhost_state aclk_state; + struct { + ND_UUID claim_id_of_origin; + ND_UUID claim_id_of_parent; + } aclk; struct rrdhost *next; struct rrdhost *prev; @@ -1325,9 +1340,6 @@ extern RRDHOST *localhost; #define rrdhost_program_name(host) string2str((host)->program_name) #define rrdhost_program_version(host) string2str((host)->program_version) -#define rrdhost_aclk_state_lock(host) netdata_mutex_lock(&((host)->aclk_state_lock)) -#define rrdhost_aclk_state_unlock(host) netdata_mutex_unlock(&((host)->aclk_state_lock)) - #define rrdhost_receiver_replicating_charts(host) (__atomic_load_n(&((host)->rrdpush_receiver_replicating_charts), __ATOMIC_RELAXED)) #define rrdhost_receiver_replicating_charts_plus_one(host) (__atomic_add_fetch(&((host)->rrdpush_receiver_replicating_charts), 1, __ATOMIC_RELAXED)) #define rrdhost_receiver_replicating_charts_minus_one(host) (__atomic_sub_fetch(&((host)->rrdpush_receiver_replicating_charts), 1, __ATOMIC_RELAXED)) @@ -1380,7 +1392,7 @@ void rrddim_index_destroy(RRDSET *st); extern time_t rrdhost_free_orphan_time_s; extern time_t rrdhost_free_ephemeral_time_s; -int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unittest); +int rrd_init(const char *hostname, struct rrdhost_system_info *system_info, bool unittest); RRDHOST *rrdhost_find_by_hostname(const char *hostname); RRDHOST *rrdhost_find_by_guid(const char *guid); @@ -1401,9 +1413,9 @@ RRDHOST *rrdhost_find_or_create( RRD_MEMORY_MODE mode, unsigned int health_enabled, unsigned int rrdpush_enabled, - char *rrdpush_destination, - char *rrdpush_api_key, - char *rrdpush_send_charts_matching, + const char *rrdpush_destination, + const char *rrdpush_api_key, + const char *rrdpush_send_charts_matching, bool rrdpush_enable_replication, time_t rrdpush_seconds_to_replicate, time_t rrdpush_replication_step, @@ -1564,7 +1576,6 @@ void rrddim_store_metric(RRDDIM *rd, usec_t point_end_time_ut, NETDATA_DOUBLE n, // ---------------------------------------------------------------------------- // Miscellaneous functions -char *rrdset_strncpyz_name(char *to, const char *from, size_t length); void reload_host_labels(void); void rrdhost_set_is_parent_label(void); @@ -1626,6 +1637,11 @@ static inline double rrddim_get_last_stored_value(RRDDIM *rd_dim, double *max_va return value; } +static inline uint32_t get_uint32_id() +{ + return now_realtime_sec() & UINT32_MAX; +} + // // RRD DB engine declarations diff --git a/src/database/rrddim.c b/src/database/rrddim.c index 580319d30..7b98b64d0 100644 --- a/src/database/rrddim.c +++ b/src/database/rrddim.c @@ -41,6 +41,13 @@ static void rrddim_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, v RRDSET *st = ctr->st; RRDHOST *host = st->rrdhost; + rd->dimension_id = __atomic_add_fetch(&host->accounting.dimension_count, 1, __ATOMIC_RELAXED); + spinlock_lock(&host->accounting.spinlock); + Pvoid_t *Pvalue = JudyLIns(&host->accounting.JudyL, rd->dimension_id, PJE0); + if (Pvalue) + *Pvalue = rd; + spinlock_unlock(&host->accounting.spinlock); + rd->flags = RRDDIM_FLAG_NONE; rd->id = string_strdupz(ctr->id); @@ -231,6 +238,10 @@ static void rrddim_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, v freez(rd->db.data); } + spinlock_lock(&host->accounting.spinlock); + (void) JudyLDel(&host->accounting.JudyL, rd->dimension_id, PJE0); + spinlock_unlock(&host->accounting.spinlock); + string_freez(rd->id); string_freez(rd->name); } @@ -557,6 +568,12 @@ collected_number rrddim_timed_set_by_pointer(RRDSET *st __maybe_unused, RRDDIM * rrddim_set_updated(rd); rd->collector.counter++; +// spinlock_lock(&st->rrdhost->accounting.spinlock); +// Pvoid_t *Pvalue = JudyLIns(&st->rrdhost->accounting.JudySecL, (Word_t) collected_time.tv_sec, PJE0); +// if (Pvalue) +// *((int64_t *)Pvalue) = *((int64_t *)Pvalue) + 1; +// spinlock_unlock(&st->rrdhost->accounting.spinlock); + collected_number v = (value >= 0) ? value : -value; if (unlikely(v > rd->collector.collected_value_max)) rd->collector.collected_value_max = v; diff --git a/src/database/rrdfunctions-exporters.c b/src/database/rrdfunctions-exporters.c index afcdc8a98..9a1511be1 100644 --- a/src/database/rrdfunctions-exporters.c +++ b/src/database/rrdfunctions-exporters.c @@ -14,13 +14,14 @@ void rrd_chart_functions_expose_rrdpush(RRDSET *st, BUFFER *wb) { if(t->options & RRD_FUNCTION_DYNCFG) continue; buffer_sprintf(wb - , PLUGINSD_KEYWORD_FUNCTION " \"%s\" %d \"%s\" \"%s\" "HTTP_ACCESS_FORMAT" %d\n" + , PLUGINSD_KEYWORD_FUNCTION " \"%s\" %d \"%s\" \"%s\" "HTTP_ACCESS_FORMAT" %d %"PRIu32"\n" , t_dfe.name , t->timeout , string2str(t->help) , string2str(t->tags) , (HTTP_ACCESS_FORMAT_CAST)t->access , t->priority + , t->version ); } dfe_done(t); @@ -41,13 +42,14 @@ void rrd_global_functions_expose_rrdpush(RRDHOST *host, BUFFER *wb, bool dyncfg) } buffer_sprintf(wb - , PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"%s\" %d \"%s\" \"%s\" "HTTP_ACCESS_FORMAT" %d\n" + , PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"%s\" %d \"%s\" \"%s\" "HTTP_ACCESS_FORMAT" %d %"PRIu32"\n" , tmp_dfe.name , tmp->timeout , string2str(tmp->help) , string2str(tmp->tags) , (HTTP_ACCESS_FORMAT_CAST)tmp->access , tmp->priority + , tmp->version ); } dfe_done(tmp); @@ -60,12 +62,13 @@ static void functions2json(DICTIONARY *functions, BUFFER *wb) { struct rrd_host_function *t; dfe_start_read(functions, t) { if (!rrd_collector_running(t->collector)) continue; - if(t->options & RRD_FUNCTION_DYNCFG) continue; + if(t->options & (RRD_FUNCTION_DYNCFG| RRD_FUNCTION_RESTRICTED)) continue; buffer_json_member_add_object(wb, t_dfe.name); { buffer_json_member_add_string_or_empty(wb, "help", string2str(t->help)); buffer_json_member_add_int64(wb, "timeout", (int64_t) t->timeout); + buffer_json_member_add_uint64(wb, "version", (uint64_t) t->version); char options[65]; snprintfz( @@ -99,12 +102,13 @@ void host_functions2json(RRDHOST *host, BUFFER *wb) { struct rrd_host_function *t; dfe_start_read(host->functions, t) { if(!rrd_collector_running(t->collector)) continue; - if(t->options & RRD_FUNCTION_DYNCFG) continue; + if(t->options & (RRD_FUNCTION_DYNCFG| RRD_FUNCTION_RESTRICTED)) continue; buffer_json_member_add_object(wb, t_dfe.name); { buffer_json_member_add_string(wb, "help", string2str(t->help)); buffer_json_member_add_int64(wb, "timeout", t->timeout); + buffer_json_member_add_uint64(wb, "version", (uint64_t) t->version); buffer_json_member_add_array(wb, "options"); { if (t->options & RRD_FUNCTION_GLOBAL) @@ -130,7 +134,7 @@ void chart_functions_to_dict(DICTIONARY *rrdset_functions_view, DICTIONARY *dst, struct rrd_host_function *t; dfe_start_read(rrdset_functions_view, t) { if(!rrd_collector_running(t->collector)) continue; - if(t->options & RRD_FUNCTION_DYNCFG) continue; + if(t->options & (RRD_FUNCTION_DYNCFG| RRD_FUNCTION_RESTRICTED)) continue; dictionary_set(dst, t_dfe.name, value, value_size); } @@ -138,13 +142,13 @@ void chart_functions_to_dict(DICTIONARY *rrdset_functions_view, DICTIONARY *dst, } void host_functions_to_dict(RRDHOST *host, DICTIONARY *dst, void *value, size_t value_size, - STRING **help, STRING **tags, HTTP_ACCESS *access, int *priority) { + STRING **help, STRING **tags, HTTP_ACCESS *access, int *priority, uint32_t *version) { if(!host || !host->functions || !dictionary_entries(host->functions) || !dst) return; struct rrd_host_function *t; dfe_start_read(host->functions, t) { if(!rrd_collector_running(t->collector)) continue; - if(t->options & RRD_FUNCTION_DYNCFG) continue; + if(t->options & (RRD_FUNCTION_DYNCFG| RRD_FUNCTION_RESTRICTED)) continue; if(help) *help = t->help; @@ -158,7 +162,14 @@ void host_functions_to_dict(RRDHOST *host, DICTIONARY *dst, void *value, size_t if(priority) *priority = t->priority; - dictionary_set(dst, t_dfe.name, value, value_size); + if(version) + *version = t->version; + + char key[UINT64_MAX_LENGTH + sizeof(RRDFUNCTIONS_VERSION_SEPARATOR) + strlen(t_dfe.name)]; + snprintfz(key, sizeof(key), "%"PRIu32 RRDFUNCTIONS_VERSION_SEPARATOR "%s", + t->version, t_dfe.name); + + dictionary_set(dst, key, value, value_size); } dfe_done(t); } diff --git a/src/database/rrdfunctions-exporters.h b/src/database/rrdfunctions-exporters.h index 43bb660eb..295e670c9 100644 --- a/src/database/rrdfunctions-exporters.h +++ b/src/database/rrdfunctions-exporters.h @@ -5,13 +5,15 @@ #include "rrd.h" +#define RRDFUNCTIONS_VERSION_SEPARATOR "|" + void rrd_chart_functions_expose_rrdpush(RRDSET *st, BUFFER *wb); void rrd_global_functions_expose_rrdpush(RRDHOST *host, BUFFER *wb, bool dyncfg); void chart_functions2json(RRDSET *st, BUFFER *wb); void chart_functions_to_dict(DICTIONARY *rrdset_functions_view, DICTIONARY *dst, void *value, size_t value_size); void host_functions_to_dict(RRDHOST *host, DICTIONARY *dst, void *value, size_t value_size, STRING **help, STRING **tags, - HTTP_ACCESS *access, int *priority); + HTTP_ACCESS *access, int *priority, uint32_t *version); void host_functions2json(RRDHOST *host, BUFFER *wb); #endif //NETDATA_RRDFUNCTIONS_EXPORTERS_H diff --git a/src/database/rrdfunctions-inflight.c b/src/database/rrdfunctions-inflight.c index adb27b3e7..811bec87f 100644 --- a/src/database/rrdfunctions-inflight.c +++ b/src/database/rrdfunctions-inflight.c @@ -351,7 +351,7 @@ static int rrd_call_function_async_and_wait(struct rrd_function_inflight *r) { HTTP_RESP_CLIENT_CLOSED_REQUEST); else code = rrd_call_function_error(r->result.wb, - "Timeout while waiting for a response from the collector.", + "Timeout while waiting for a response from the plugin that serves this features", HTTP_RESP_GATEWAY_TIMEOUT); tmp->free_with_signal = true; @@ -359,7 +359,7 @@ static int rrd_call_function_async_and_wait(struct rrd_function_inflight *r) { } else { code = rrd_call_function_error( - r->result.wb, "Internal error while communicating with the collector", + r->result.wb, "Internal error while communicating with the plugin that serves this feature.", HTTP_RESP_INTERNAL_SERVER_ERROR); tmp->free_with_signal = true; @@ -398,7 +398,7 @@ int rrd_function_run(RRDHOST *host, BUFFER *result_wb, int timeout_s, rrd_function_result_callback_t result_cb, void *result_cb_data, rrd_function_progress_cb_t progress_cb, void *progress_cb_data, rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, - BUFFER *payload, const char *source) { + BUFFER *payload, const char *source, bool allow_restricted) { int code; char sanitized_cmd[PLUGINSD_LINE_MAX + 1]; @@ -412,7 +412,7 @@ int rrd_function_run(RRDHOST *host, BUFFER *result_wb, int timeout_s, if(!host) { code = HTTP_RESP_INTERNAL_SERVER_ERROR; - rrd_call_function_error(result_wb, "no host given for running the function", code); + rrd_call_function_error(result_wb, "No host given for routing this request to.", code); if(result_cb) result_cb(result_wb, code, result_cb_data); @@ -436,15 +436,21 @@ int rrd_function_run(RRDHOST *host, BUFFER *result_wb, int timeout_s, struct rrd_host_function *rdcf = dictionary_acquired_item_value(host_function_acquired); - if(!http_access_user_has_enough_access_level_for_endpoint(user_access, rdcf->access)) { + if((rdcf->options & RRD_FUNCTION_RESTRICTED) && !allow_restricted) { + code = rrd_call_function_error(result_wb, + "This feature is not available via this API.", + HTTP_ACCESS_PERMISSION_DENIED_HTTP_CODE(user_access)); + dictionary_acquired_item_release(host->functions, host_function_acquired); - if(!aclk_connected) - code = rrd_call_function_error(result_wb, - "This Netdata must be connected to Netdata Cloud for Single-Sign-On (SSO) " - "access this feature. Claim this Netdata to Netdata Cloud to enable access.", - HTTP_ACCESS_PERMISSION_DENIED_HTTP_CODE(user_access)); + if(result_cb) + result_cb(result_wb, code, result_cb_data); + + return code; + } + + if(!http_access_user_has_enough_access_level_for_endpoint(user_access, rdcf->access)) { - else if((rdcf->access & HTTP_ACCESS_SIGNED_ID) && !(user_access & HTTP_ACCESS_SIGNED_ID)) + if((rdcf->access & HTTP_ACCESS_SIGNED_ID) && !(user_access & HTTP_ACCESS_SIGNED_ID)) code = rrd_call_function_error(result_wb, "You need to be authenticated via Netdata Cloud Single-Sign-On (SSO) " "to access this feature. Sign-in on this dashboard, " @@ -540,7 +546,7 @@ int rrd_function_run(RRDHOST *host, BUFFER *result_wb, int timeout_s, "FUNCTIONS: duplicate transaction '%s', function: '%s'", t.transaction, t.cmd); - code = rrd_call_function_error(result_wb, "duplicate transaction", HTTP_RESP_BAD_REQUEST); + code = rrd_call_function_error(result_wb, "Duplicate transaction.", HTTP_RESP_BAD_REQUEST); rrd_functions_inflight_cleanup(&t); dictionary_acquired_item_release(r->host->functions, t.host_function_acquired); diff --git a/src/database/rrdfunctions-inline.c b/src/database/rrdfunctions-inline.c index 3eb30e7b5..a8e2c2357 100644 --- a/src/database/rrdfunctions-inline.c +++ b/src/database/rrdfunctions-inline.c @@ -17,7 +17,7 @@ static int rrd_function_run_inline(struct rrd_function_execute *rfe, void *data) if(rfe->is_cancelled.cb && rfe->is_cancelled.cb(rfe->is_cancelled.data)) code = HTTP_RESP_CLIENT_CLOSED_REQUEST; else - code = fi->cb(rfe->result.wb, rfe->function); + code = fi->cb(rfe->result.wb, rfe->function, rfe->payload, rfe->source); if(code == HTTP_RESP_CLIENT_CLOSED_REQUEST || (rfe->is_cancelled.cb && rfe->is_cancelled.cb(rfe->is_cancelled.data))) { buffer_flush(rfe->result.wb); @@ -30,7 +30,7 @@ static int rrd_function_run_inline(struct rrd_function_execute *rfe, void *data) return code; } -void rrd_function_add_inline(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority, +void rrd_function_add_inline(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority, uint32_t version, const char *help, const char *tags, HTTP_ACCESS access, rrd_function_execute_inline_cb_t execute_cb) { @@ -39,6 +39,7 @@ void rrd_function_add_inline(RRDHOST *host, RRDSET *st, const char *name, int ti struct rrd_function_inline *fi = callocz(1, sizeof(struct rrd_function_inline)); fi->cb = execute_cb; - rrd_function_add(host, st, name, timeout, priority, help, tags, access, true, + rrd_function_add(host, st, name, timeout, priority, version, + help, tags, access, true, rrd_function_run_inline, fi); } diff --git a/src/database/rrdfunctions-inline.h b/src/database/rrdfunctions-inline.h index 9948edbef..32353daa7 100644 --- a/src/database/rrdfunctions-inline.h +++ b/src/database/rrdfunctions-inline.h @@ -5,9 +5,9 @@ #include "rrd.h" -typedef int (*rrd_function_execute_inline_cb_t)(BUFFER *wb, const char *function); +typedef int (*rrd_function_execute_inline_cb_t)(BUFFER *wb, const char *function, BUFFER *payload, const char *source); -void rrd_function_add_inline(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority, +void rrd_function_add_inline(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority, uint32_t version, const char *help, const char *tags, HTTP_ACCESS access, rrd_function_execute_inline_cb_t execute_cb); diff --git a/src/database/rrdfunctions-internals.h b/src/database/rrdfunctions-internals.h index a846e4de0..79eb52aa4 100644 --- a/src/database/rrdfunctions-internals.h +++ b/src/database/rrdfunctions-internals.h @@ -11,6 +11,7 @@ typedef enum __attribute__((packed)) { RRD_FUNCTION_LOCAL = (1 << 0), RRD_FUNCTION_GLOBAL = (1 << 1), RRD_FUNCTION_DYNCFG = (1 << 2), + RRD_FUNCTION_RESTRICTED = (1 << 3), // this function is restricted (hidden from user) // this is 8-bit } RRD_FUNCTION_OPTIONS; @@ -23,6 +24,7 @@ struct rrd_host_function { STRING *tags; int timeout; // the default timeout of the function int priority; + uint32_t version; rrd_function_execute_cb_t execute_cb; void *execute_cb_data; @@ -30,7 +32,6 @@ struct rrd_host_function { struct rrd_collector *collector; }; -size_t rrd_functions_sanitize(char *dst, const char *src, size_t dst_len); int rrd_functions_find_by_name(RRDHOST *host, BUFFER *wb, const char *name, size_t key_length, const DICTIONARY_ITEM **item); #endif //NETDATA_RRDFUNCTIONS_INTERNALS_H diff --git a/src/database/rrdfunctions-progress.c b/src/database/rrdfunctions-progress.c deleted file mode 100644 index 81d663909..000000000 --- a/src/database/rrdfunctions-progress.c +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "rrdfunctions-progress.h" - -int rrdhost_function_progress(BUFFER *wb, const char *function __maybe_unused) { - return progress_function_result(wb, rrdhost_hostname(localhost)); -} - diff --git a/src/database/rrdfunctions-progress.h b/src/database/rrdfunctions-progress.h deleted file mode 100644 index 8f97bf7e9..000000000 --- a/src/database/rrdfunctions-progress.h +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_RRDFUNCTIONS_PROGRESS_H -#define NETDATA_RRDFUNCTIONS_PROGRESS_H - -#include "rrd.h" - -int rrdhost_function_progress(BUFFER *wb, const char *function __maybe_unused); - -#endif //NETDATA_RRDFUNCTIONS_PROGRESS_H diff --git a/src/database/rrdfunctions-streaming.c b/src/database/rrdfunctions-streaming.c deleted file mode 100644 index baf3ebc38..000000000 --- a/src/database/rrdfunctions-streaming.c +++ /dev/null @@ -1,627 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "rrdfunctions-streaming.h" - -int rrdhost_function_streaming(BUFFER *wb, const char *function __maybe_unused) { - - time_t now = now_realtime_sec(); - - buffer_flush(wb); - wb->content_type = CT_APPLICATION_JSON; - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - - buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost)); - buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); - buffer_json_member_add_string(wb, "type", "table"); - buffer_json_member_add_time_t(wb, "update_every", 1); - buffer_json_member_add_boolean(wb, "has_history", false); - buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_STREAMING_HELP); - buffer_json_member_add_array(wb, "data"); - - size_t max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_MAX] = { 0 }; - size_t max_db_metrics = 0, max_db_instances = 0, max_db_contexts = 0; - size_t max_collection_replication_instances = 0, max_streaming_replication_instances = 0; - size_t max_ml_anomalous = 0, max_ml_normal = 0, max_ml_trained = 0, max_ml_pending = 0, max_ml_silenced = 0; - { - RRDHOST *host; - dfe_start_read(rrdhost_root_index, host) { - RRDHOST_STATUS s; - rrdhost_status(host, now, &s); - buffer_json_add_array_item_array(wb); - - if(s.db.metrics > max_db_metrics) - max_db_metrics = s.db.metrics; - - if(s.db.instances > max_db_instances) - max_db_instances = s.db.instances; - - if(s.db.contexts > max_db_contexts) - max_db_contexts = s.db.contexts; - - if(s.ingest.replication.instances > max_collection_replication_instances) - max_collection_replication_instances = s.ingest.replication.instances; - - if(s.stream.replication.instances > max_streaming_replication_instances) - max_streaming_replication_instances = s.stream.replication.instances; - - for(int i = 0; i < STREAM_TRAFFIC_TYPE_MAX ;i++) { - if (s.stream.sent_bytes_on_this_connection_per_type[i] > - max_sent_bytes_on_this_connection_per_type[i]) - max_sent_bytes_on_this_connection_per_type[i] = - s.stream.sent_bytes_on_this_connection_per_type[i]; - } - - // retention - buffer_json_add_array_item_string(wb, rrdhost_hostname(s.host)); // Node - buffer_json_add_array_item_uint64(wb, s.db.first_time_s * MSEC_PER_SEC); // dbFrom - buffer_json_add_array_item_uint64(wb, s.db.last_time_s * MSEC_PER_SEC); // dbTo - - if(s.db.first_time_s && s.db.last_time_s && s.db.last_time_s > s.db.first_time_s) - buffer_json_add_array_item_uint64(wb, s.db.last_time_s - s.db.first_time_s); // dbDuration - else - buffer_json_add_array_item_string(wb, NULL); // dbDuration - - buffer_json_add_array_item_uint64(wb, s.db.metrics); // dbMetrics - buffer_json_add_array_item_uint64(wb, s.db.instances); // dbInstances - buffer_json_add_array_item_uint64(wb, s.db.contexts); // dbContexts - - // statuses - buffer_json_add_array_item_string(wb, rrdhost_ingest_status_to_string(s.ingest.status)); // InStatus - buffer_json_add_array_item_string(wb, rrdhost_streaming_status_to_string(s.stream.status)); // OutStatus - buffer_json_add_array_item_string(wb, rrdhost_ml_status_to_string(s.ml.status)); // MLStatus - - // collection - if(s.ingest.since) { - buffer_json_add_array_item_uint64(wb, s.ingest.since * MSEC_PER_SEC); // InSince - buffer_json_add_array_item_time_t(wb, s.now - s.ingest.since); // InAge - } - else { - buffer_json_add_array_item_string(wb, NULL); // InSince - buffer_json_add_array_item_string(wb, NULL); // InAge - } - buffer_json_add_array_item_string(wb, stream_handshake_error_to_string(s.ingest.reason)); // InReason - buffer_json_add_array_item_uint64(wb, s.ingest.hops); // InHops - buffer_json_add_array_item_double(wb, s.ingest.replication.completion); // InReplCompletion - buffer_json_add_array_item_uint64(wb, s.ingest.replication.instances); // InReplInstances - buffer_json_add_array_item_string(wb, s.ingest.peers.local.ip); // InLocalIP - buffer_json_add_array_item_uint64(wb, s.ingest.peers.local.port); // InLocalPort - buffer_json_add_array_item_string(wb, s.ingest.peers.peer.ip); // InRemoteIP - buffer_json_add_array_item_uint64(wb, s.ingest.peers.peer.port); // InRemotePort - buffer_json_add_array_item_string(wb, s.ingest.ssl ? "SSL" : "PLAIN"); // InSSL - stream_capabilities_to_json_array(wb, s.ingest.capabilities, NULL); // InCapabilities - - // streaming - if(s.stream.since) { - buffer_json_add_array_item_uint64(wb, s.stream.since * MSEC_PER_SEC); // OutSince - buffer_json_add_array_item_time_t(wb, s.now - s.stream.since); // OutAge - } - else { - buffer_json_add_array_item_string(wb, NULL); // OutSince - buffer_json_add_array_item_string(wb, NULL); // OutAge - } - buffer_json_add_array_item_string(wb, stream_handshake_error_to_string(s.stream.reason)); // OutReason - buffer_json_add_array_item_uint64(wb, s.stream.hops); // OutHops - buffer_json_add_array_item_double(wb, s.stream.replication.completion); // OutReplCompletion - buffer_json_add_array_item_uint64(wb, s.stream.replication.instances); // OutReplInstances - buffer_json_add_array_item_string(wb, s.stream.peers.local.ip); // OutLocalIP - buffer_json_add_array_item_uint64(wb, s.stream.peers.local.port); // OutLocalPort - buffer_json_add_array_item_string(wb, s.stream.peers.peer.ip); // OutRemoteIP - buffer_json_add_array_item_uint64(wb, s.stream.peers.peer.port); // OutRemotePort - buffer_json_add_array_item_string(wb, s.stream.ssl ? "SSL" : "PLAIN"); // OutSSL - buffer_json_add_array_item_string(wb, s.stream.compression ? "COMPRESSED" : "UNCOMPRESSED"); // OutCompression - stream_capabilities_to_json_array(wb, s.stream.capabilities, NULL); // OutCapabilities - buffer_json_add_array_item_uint64(wb, s.stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DATA]); - buffer_json_add_array_item_uint64(wb, s.stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_METADATA]); - buffer_json_add_array_item_uint64(wb, s.stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_REPLICATION]); - buffer_json_add_array_item_uint64(wb, s.stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_FUNCTIONS]); - - buffer_json_add_array_item_array(wb); // OutAttemptHandshake - time_t last_attempt = 0; - for(struct rrdpush_destinations *d = host->destinations; d ; d = d->next) { - if(d->since > last_attempt) - last_attempt = d->since; - - buffer_json_add_array_item_string(wb, stream_handshake_error_to_string(d->reason)); - } - buffer_json_array_close(wb); // // OutAttemptHandshake - - if(!last_attempt) { - buffer_json_add_array_item_string(wb, NULL); // OutAttemptSince - buffer_json_add_array_item_string(wb, NULL); // OutAttemptAge - } - else { - buffer_json_add_array_item_uint64(wb, last_attempt * 1000); // OutAttemptSince - buffer_json_add_array_item_time_t(wb, s.now - last_attempt); // OutAttemptAge - } - - // ML - if(s.ml.status == RRDHOST_ML_STATUS_RUNNING) { - buffer_json_add_array_item_uint64(wb, s.ml.metrics.anomalous); // MlAnomalous - buffer_json_add_array_item_uint64(wb, s.ml.metrics.normal); // MlNormal - buffer_json_add_array_item_uint64(wb, s.ml.metrics.trained); // MlTrained - buffer_json_add_array_item_uint64(wb, s.ml.metrics.pending); // MlPending - buffer_json_add_array_item_uint64(wb, s.ml.metrics.silenced); // MlSilenced - - if(s.ml.metrics.anomalous > max_ml_anomalous) - max_ml_anomalous = s.ml.metrics.anomalous; - - if(s.ml.metrics.normal > max_ml_normal) - max_ml_normal = s.ml.metrics.normal; - - if(s.ml.metrics.trained > max_ml_trained) - max_ml_trained = s.ml.metrics.trained; - - if(s.ml.metrics.pending > max_ml_pending) - max_ml_pending = s.ml.metrics.pending; - - if(s.ml.metrics.silenced > max_ml_silenced) - max_ml_silenced = s.ml.metrics.silenced; - - } - else { - buffer_json_add_array_item_string(wb, NULL); // MlAnomalous - buffer_json_add_array_item_string(wb, NULL); // MlNormal - buffer_json_add_array_item_string(wb, NULL); // MlTrained - buffer_json_add_array_item_string(wb, NULL); // MlPending - buffer_json_add_array_item_string(wb, NULL); // MlSilenced - } - - // close - buffer_json_array_close(wb); - } - dfe_done(host); - } - buffer_json_array_close(wb); // data - buffer_json_member_add_object(wb, "columns"); - { - size_t field_id = 0; - - // Node - buffer_rrdf_table_add_field(wb, field_id++, "Node", "Node's Hostname", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY, - NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "dbFrom", "DB Data Retention From", - RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "dbTo", "DB Data Retention To", - RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "dbDuration", "DB Data Retention Duration", - RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "dbMetrics", "Time-series Metrics in the DB", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, NULL, (double)max_db_metrics, RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "dbInstances", "Instances in the DB", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, NULL, (double)max_db_instances, RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "dbContexts", "Contexts in the DB", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, NULL, (double)max_db_contexts, RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - // --- statuses --- - - buffer_rrdf_table_add_field(wb, field_id++, "InStatus", "Data Collection Online Status", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - - buffer_rrdf_table_add_field(wb, field_id++, "OutStatus", "Streaming Online Status", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "MlStatus", "ML Status", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - // --- collection --- - - buffer_rrdf_table_add_field(wb, field_id++, "InSince", "Last Data Collection Status Change", - RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS, - 0, NULL, NAN, RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "InAge", "Last Data Collection Online Status Change Age", - RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "InReason", "Data Collection Online Status Reason", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "InHops", "Data Collection Distance Hops from Origin Node", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "InReplCompletion", "Inbound Replication Completion", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, - 1, "%", 100.0, RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "InReplInstances", "Inbound Replicating Instances", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, "instances", (double)max_collection_replication_instances, RRDF_FIELD_SORT_DESCENDING, - NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "InLocalIP", "Inbound Local IP", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "InLocalPort", "Inbound Local Port", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "InRemoteIP", "Inbound Remote IP", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "InRemotePort", "Inbound Remote Port", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "InSSL", "Inbound SSL Connection", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "InCapabilities", "Inbound Connection Capabilities", - RRDF_FIELD_TYPE_ARRAY, RRDF_FIELD_VISUAL_PILL, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_NONE, NULL); - - // --- streaming --- - - buffer_rrdf_table_add_field(wb, field_id++, "OutSince", "Last Streaming Status Change", - RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS, - 0, NULL, NAN, RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutAge", "Last Streaming Status Change Age", - RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutReason", "Streaming Status Reason", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutHops", "Streaming Distance Hops from Origin Node", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutReplCompletion", "Outbound Replication Completion", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, - 1, "%", 100.0, RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutReplInstances", "Outbound Replicating Instances", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, "instances", (double)max_streaming_replication_instances, RRDF_FIELD_SORT_DESCENDING, - NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutLocalIP", "Outbound Local IP", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutLocalPort", "Outbound Local Port", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutRemoteIP", "Outbound Remote IP", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutRemotePort", "Outbound Remote Port", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutSSL", "Outbound SSL Connection", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutCompression", "Outbound Compressed Connection", - RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutCapabilities", "Outbound Connection Capabilities", - RRDF_FIELD_TYPE_ARRAY, RRDF_FIELD_VISUAL_PILL, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutTrafficData", "Outbound Metric Data Traffic", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, "bytes", (double)max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DATA], - RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutTrafficMetadata", "Outbound Metric Metadata Traffic", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, "bytes", - (double)max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_METADATA], - RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutTrafficReplication", "Outbound Metric Replication Traffic", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, "bytes", - (double)max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_REPLICATION], - RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutTrafficFunctions", "Outbound Metric Functions Traffic", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, "bytes", - (double)max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_FUNCTIONS], - RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutAttemptHandshake", - "Outbound Connection Attempt Handshake Status", - RRDF_FIELD_TYPE_ARRAY, RRDF_FIELD_VISUAL_PILL, RRDF_FIELD_TRANSFORM_NONE, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutAttemptSince", - "Last Outbound Connection Attempt Status Change Time", - RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS, - 0, NULL, NAN, RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "OutAttemptAge", - "Last Outbound Connection Attempt Status Change Age", - RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S, - 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, - RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_VISIBLE, NULL); - - // --- ML --- - - buffer_rrdf_table_add_field(wb, field_id++, "MlAnomalous", "Number of Anomalous Metrics", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, "metrics", - (double)max_ml_anomalous, - RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "MlNormal", "Number of Not Anomalous Metrics", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, "metrics", - (double)max_ml_normal, - RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "MlTrained", "Number of Trained Metrics", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, "metrics", - (double)max_ml_trained, - RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "MlPending", "Number of Pending Metrics", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, "metrics", - (double)max_ml_pending, - RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - - buffer_rrdf_table_add_field(wb, field_id++, "MlSilenced", "Number of Silenced Metrics", - RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, - 0, "metrics", - (double)max_ml_silenced, - RRDF_FIELD_SORT_DESCENDING, NULL, - RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, - RRDF_FIELD_OPTS_NONE, NULL); - } - buffer_json_object_close(wb); // columns - buffer_json_member_add_string(wb, "default_sort_column", "Node"); - buffer_json_member_add_object(wb, "charts"); - { - // Data Collection Age chart - buffer_json_member_add_object(wb, "InAge"); - { - buffer_json_member_add_string(wb, "name", "Data Collection Age"); - buffer_json_member_add_string(wb, "type", "stacked-bar"); - buffer_json_member_add_array(wb, "columns"); - { - buffer_json_add_array_item_string(wb, "InAge"); - } - buffer_json_array_close(wb); - } - buffer_json_object_close(wb); - - // Streaming Age chart - buffer_json_member_add_object(wb, "OutAge"); - { - buffer_json_member_add_string(wb, "name", "Streaming Age"); - buffer_json_member_add_string(wb, "type", "stacked-bar"); - buffer_json_member_add_array(wb, "columns"); - { - buffer_json_add_array_item_string(wb, "OutAge"); - } - buffer_json_array_close(wb); - } - buffer_json_object_close(wb); - - // DB Duration - buffer_json_member_add_object(wb, "dbDuration"); - { - buffer_json_member_add_string(wb, "name", "Retention Duration"); - buffer_json_member_add_string(wb, "type", "stacked-bar"); - buffer_json_member_add_array(wb, "columns"); - { - buffer_json_add_array_item_string(wb, "dbDuration"); - } - buffer_json_array_close(wb); - } - buffer_json_object_close(wb); - } - buffer_json_object_close(wb); // charts - - buffer_json_member_add_array(wb, "default_charts"); - { - buffer_json_add_array_item_array(wb); - buffer_json_add_array_item_string(wb, "InAge"); - buffer_json_add_array_item_string(wb, "Node"); - buffer_json_array_close(wb); - - buffer_json_add_array_item_array(wb); - buffer_json_add_array_item_string(wb, "OutAge"); - buffer_json_add_array_item_string(wb, "Node"); - buffer_json_array_close(wb); - } - buffer_json_array_close(wb); - - buffer_json_member_add_object(wb, "group_by"); - { - buffer_json_member_add_object(wb, "Node"); - { - buffer_json_member_add_string(wb, "name", "Node"); - buffer_json_member_add_array(wb, "columns"); - { - buffer_json_add_array_item_string(wb, "Node"); - } - buffer_json_array_close(wb); - } - buffer_json_object_close(wb); - - buffer_json_member_add_object(wb, "InStatus"); - { - buffer_json_member_add_string(wb, "name", "Nodes by Collection Status"); - buffer_json_member_add_array(wb, "columns"); - { - buffer_json_add_array_item_string(wb, "InStatus"); - } - buffer_json_array_close(wb); - } - buffer_json_object_close(wb); - - buffer_json_member_add_object(wb, "OutStatus"); - { - buffer_json_member_add_string(wb, "name", "Nodes by Streaming Status"); - buffer_json_member_add_array(wb, "columns"); - { - buffer_json_add_array_item_string(wb, "OutStatus"); - } - buffer_json_array_close(wb); - } - buffer_json_object_close(wb); - - buffer_json_member_add_object(wb, "MlStatus"); - { - buffer_json_member_add_string(wb, "name", "Nodes by ML Status"); - buffer_json_member_add_array(wb, "columns"); - { - buffer_json_add_array_item_string(wb, "MlStatus"); - } - buffer_json_array_close(wb); - } - buffer_json_object_close(wb); - - buffer_json_member_add_object(wb, "InRemoteIP"); - { - buffer_json_member_add_string(wb, "name", "Nodes by Inbound IP"); - buffer_json_member_add_array(wb, "columns"); - { - buffer_json_add_array_item_string(wb, "InRemoteIP"); - } - buffer_json_array_close(wb); - } - buffer_json_object_close(wb); - - buffer_json_member_add_object(wb, "OutRemoteIP"); - { - buffer_json_member_add_string(wb, "name", "Nodes by Outbound IP"); - buffer_json_member_add_array(wb, "columns"); - { - buffer_json_add_array_item_string(wb, "OutRemoteIP"); - } - buffer_json_array_close(wb); - } - buffer_json_object_close(wb); - } - buffer_json_object_close(wb); // group_by - - buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1); - buffer_json_finalize(wb); - - return HTTP_RESP_OK; -} diff --git a/src/database/rrdfunctions-streaming.h b/src/database/rrdfunctions-streaming.h deleted file mode 100644 index cfa15bdb5..000000000 --- a/src/database/rrdfunctions-streaming.h +++ /dev/null @@ -1,12 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_RRDFUNCTIONS_STREAMING_H -#define NETDATA_RRDFUNCTIONS_STREAMING_H - -#include "rrd.h" - -#define RRDFUNCTIONS_STREAMING_HELP "Streaming status for parents and children." - -int rrdhost_function_streaming(BUFFER *wb, const char *function); - -#endif //NETDATA_RRDFUNCTIONS_STREAMING_H diff --git a/src/database/rrdfunctions.c b/src/database/rrdfunctions.c index 9411c4c3f..508ec98f6 100644 --- a/src/database/rrdfunctions.c +++ b/src/database/rrdfunctions.c @@ -7,70 +7,6 @@ #define MAX_FUNCTION_LENGTH (PLUGINSD_LINE_MAX - 512) // we need some space for the rest of the line -static unsigned char functions_allowed_chars[256] = { - [0] = '\0', [1] = '_', [2] = '_', [3] = '_', [4] = '_', [5] = '_', [6] = '_', [7] = '_', [8] = '_', - - // control - ['\t'] = ' ', ['\n'] = ' ', ['\v'] = ' ', [12] = ' ', ['\r'] = ' ', - - [14] = '_', [15] = '_', [16] = '_', [17] = '_', [18] = '_', [19] = '_', [20] = '_', [21] = '_', - [22] = '_', [23] = '_', [24] = '_', [25] = '_', [26] = '_', [27] = '_', [28] = '_', [29] = '_', - [30] = '_', [31] = '_', - - // symbols - [' '] = ' ', ['!'] = '!', ['"'] = '\'', ['#'] = '#', ['$'] = '$', ['%'] = '%', ['&'] = '&', ['\''] = '\'', - ['('] = '(', [')'] = ')', ['*'] = '*', ['+'] = '+', [','] = ',', ['-'] = '-', ['.'] = '.', ['/'] = '/', - - // numbers - ['0'] = '0', ['1'] = '1', ['2'] = '2', ['3'] = '3', ['4'] = '4', ['5'] = '5', ['6'] = '6', ['7'] = '7', - ['8'] = '8', ['9'] = '9', - - // symbols - [':'] = ':', [';'] = ';', ['<'] = '<', ['='] = '=', ['>'] = '>', ['?'] = '?', ['@'] = '@', - - // capitals - ['A'] = 'A', ['B'] = 'B', ['C'] = 'C', ['D'] = 'D', ['E'] = 'E', ['F'] = 'F', ['G'] = 'G', ['H'] = 'H', - ['I'] = 'I', ['J'] = 'J', ['K'] = 'K', ['L'] = 'L', ['M'] = 'M', ['N'] = 'N', ['O'] = 'O', ['P'] = 'P', - ['Q'] = 'Q', ['R'] = 'R', ['S'] = 'S', ['T'] = 'T', ['U'] = 'U', ['V'] = 'V', ['W'] = 'W', ['X'] = 'X', - ['Y'] = 'Y', ['Z'] = 'Z', - - // symbols - ['['] = '[', ['\\'] = '\\', [']'] = ']', ['^'] = '^', ['_'] = '_', ['`'] = '`', - - // lower - ['a'] = 'a', ['b'] = 'b', ['c'] = 'c', ['d'] = 'd', ['e'] = 'e', ['f'] = 'f', ['g'] = 'g', ['h'] = 'h', - ['i'] = 'i', ['j'] = 'j', ['k'] = 'k', ['l'] = 'l', ['m'] = 'm', ['n'] = 'n', ['o'] = 'o', ['p'] = 'p', - ['q'] = 'q', ['r'] = 'r', ['s'] = 's', ['t'] = 't', ['u'] = 'u', ['v'] = 'v', ['w'] = 'w', ['x'] = 'x', - ['y'] = 'y', ['z'] = 'z', - - // symbols - ['{'] = '{', ['|'] = '|', ['}'] = '}', ['~'] = '~', - - // rest - [127] = '_', [128] = '_', [129] = '_', [130] = '_', [131] = '_', [132] = '_', [133] = '_', [134] = '_', - [135] = '_', [136] = '_', [137] = '_', [138] = '_', [139] = '_', [140] = '_', [141] = '_', [142] = '_', - [143] = '_', [144] = '_', [145] = '_', [146] = '_', [147] = '_', [148] = '_', [149] = '_', [150] = '_', - [151] = '_', [152] = '_', [153] = '_', [154] = '_', [155] = '_', [156] = '_', [157] = '_', [158] = '_', - [159] = '_', [160] = '_', [161] = '_', [162] = '_', [163] = '_', [164] = '_', [165] = '_', [166] = '_', - [167] = '_', [168] = '_', [169] = '_', [170] = '_', [171] = '_', [172] = '_', [173] = '_', [174] = '_', - [175] = '_', [176] = '_', [177] = '_', [178] = '_', [179] = '_', [180] = '_', [181] = '_', [182] = '_', - [183] = '_', [184] = '_', [185] = '_', [186] = '_', [187] = '_', [188] = '_', [189] = '_', [190] = '_', - [191] = '_', [192] = '_', [193] = '_', [194] = '_', [195] = '_', [196] = '_', [197] = '_', [198] = '_', - [199] = '_', [200] = '_', [201] = '_', [202] = '_', [203] = '_', [204] = '_', [205] = '_', [206] = '_', - [207] = '_', [208] = '_', [209] = '_', [210] = '_', [211] = '_', [212] = '_', [213] = '_', [214] = '_', - [215] = '_', [216] = '_', [217] = '_', [218] = '_', [219] = '_', [220] = '_', [221] = '_', [222] = '_', - [223] = '_', [224] = '_', [225] = '_', [226] = '_', [227] = '_', [228] = '_', [229] = '_', [230] = '_', - [231] = '_', [232] = '_', [233] = '_', [234] = '_', [235] = '_', [236] = '_', [237] = '_', [238] = '_', - [239] = '_', [240] = '_', [241] = '_', [242] = '_', [243] = '_', [244] = '_', [245] = '_', [246] = '_', - [247] = '_', [248] = '_', [249] = '_', [250] = '_', [251] = '_', [252] = '_', [253] = '_', [254] = '_', - [255] = '_' -}; - -size_t rrd_functions_sanitize(char *dst, const char *src, size_t dst_len) { - return text_sanitize((unsigned char *)dst, (const unsigned char *)src, dst_len, - functions_allowed_chars, true, "", NULL); -} - // ---------------------------------------------------------------------------- // we keep a dictionary per RRDSET with these functions @@ -158,13 +94,24 @@ static bool rrd_functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_ if(rdcf->timeout != new_rdcf->timeout) { nd_log(NDLS_DAEMON, NDLP_DEBUG, - "FUNCTIONS: function '%s' of host '%s' changed timeout", - dictionary_acquired_item_name(item), rrdhost_hostname(host)); + "FUNCTIONS: function '%s' of host '%s' changed timeout (from %d to %d)", + dictionary_acquired_item_name(item), rrdhost_hostname(host), + rdcf->timeout, new_rdcf->timeout); rdcf->timeout = new_rdcf->timeout; changed = true; } + if(rdcf->version != new_rdcf->version) { + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "FUNCTIONS: function '%s' of host '%s' changed version (from %"PRIu32", to %"PRIu32")", + dictionary_acquired_item_name(item), rrdhost_hostname(host), + rdcf->version, new_rdcf->version); + + rdcf->version = new_rdcf->version; + changed = true; + } + if(rdcf->priority != new_rdcf->priority) { nd_log(NDLS_DAEMON, NDLP_DEBUG, "FUNCTIONS: function '%s' of host '%s' changed priority", @@ -225,6 +172,10 @@ void rrd_functions_host_destroy(RRDHOST *host) { // ---------------------------------------------------------------------------- +static inline bool is_function_restricted(const char *name, const char *tags) { + return (name && name[0] == '_' && name[1] == '_') || (tags && strstr(tags, RRDFUNCTIONS_TAG_HIDDEN) != NULL); +} + static inline bool is_function_dyncfg(const char *name) { if(!name || !*name) return false; @@ -239,7 +190,16 @@ static inline bool is_function_dyncfg(const char *name) { return false; } -void rrd_function_add(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority, +static inline RRD_FUNCTION_OPTIONS get_function_options(RRDSET *st, const char *name, const char *tags) { + if(is_function_dyncfg(name)) + return RRD_FUNCTION_DYNCFG; + + RRD_FUNCTION_OPTIONS options = st ? RRD_FUNCTION_LOCAL : RRD_FUNCTION_GLOBAL; + + return options | (is_function_restricted(name, tags) ? RRD_FUNCTION_RESTRICTED : 0); +} + +void rrd_function_add(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority, uint32_t version, const char *help, const char *tags, HTTP_ACCESS access, bool sync, rrd_function_execute_cb_t execute_cb, void *execute_cb_data) { @@ -263,13 +223,14 @@ void rrd_function_add(RRDHOST *host, RRDSET *st, const char *name, int timeout, struct rrd_host_function tmp = { .sync = sync, .timeout = timeout, - .options = st ? RRD_FUNCTION_LOCAL: (is_function_dyncfg(name) ? RRD_FUNCTION_DYNCFG : RRD_FUNCTION_GLOBAL), + .version = version, + .priority = priority, + .options = get_function_options(st, name, tags), .access = access, .execute_cb = execute_cb, .execute_cb_data = execute_cb_data, .help = string_strdupz(help), .tags = string_strdupz(tags), - .priority = priority, }; const DICTIONARY_ITEM *item = dictionary_set_and_acquire_item(host->functions, key, &tmp, sizeof(tmp)); @@ -294,17 +255,6 @@ void rrd_function_del(RRDHOST *host, RRDSET *st, const char *name) { dictionary_garbage_collect(host->functions); } -int rrd_call_function_error(BUFFER *wb, const char *msg, int code) { - char buffer[PLUGINSD_LINE_MAX]; - json_escape_string(buffer, msg, PLUGINSD_LINE_MAX); - - buffer_flush(wb); - buffer_sprintf(wb, "{\"status\":%d,\"error_message\":\"%s\"}", code, buffer); - wb->content_type = CT_APPLICATION_JSON; - buffer_no_cacheable(wb); - return code; -} - int rrd_functions_find_by_name(RRDHOST *host, BUFFER *wb, const char *name, size_t key_length, const DICTIONARY_ITEM **item) { char buffer[MAX_FUNCTION_LENGTH + 1]; strncpyz(buffer, name, sizeof(buffer) - 1); @@ -345,11 +295,11 @@ int rrd_functions_find_by_name(RRDHOST *host, BUFFER *wb, const char *name, size if(!(*item)) { if(found) return rrd_call_function_error(wb, - "The collector that registered this function, is not currently running.", + "The plugin that registered this feature, is not currently running.", HTTP_RESP_SERVICE_UNAVAILABLE); else return rrd_call_function_error(wb, - "No collector is supplying this function on this host at this time.", + "This feature is not available on this host at this time.", HTTP_RESP_NOT_FOUND); } diff --git a/src/database/rrdfunctions.h b/src/database/rrdfunctions.h index d3c7f0e13..69c6c703c 100644 --- a/src/database/rrdfunctions.h +++ b/src/database/rrdfunctions.h @@ -7,6 +7,8 @@ #include "libnetdata/libnetdata.h" #define RRDFUNCTIONS_PRIORITY_DEFAULT 100 +#define RRDFUNCTIONS_VERSION_DEFAULT 0 +#define RRDFUNCTIONS_TAG_HIDDEN "hidden" #define RRDFUNCTIONS_TIMEOUT_EXTENSION_UT (1 * USEC_PER_SEC) @@ -66,7 +68,7 @@ void rrd_functions_host_init(RRDHOST *host); void rrd_functions_host_destroy(RRDHOST *host); // add a function, to be run from the collector -void rrd_function_add(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority, const char *help, const char *tags, +void rrd_function_add(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority, uint32_t version, const char *help, const char *tags, HTTP_ACCESS access, bool sync, rrd_function_execute_cb_t execute_cb, void *execute_cb_data); @@ -79,9 +81,7 @@ int rrd_function_run(RRDHOST *host, BUFFER *result_wb, int timeout_s, rrd_function_result_callback_t result_cb, void *result_cb_data, rrd_function_progress_cb_t progress_cb, void *progress_cb_data, rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, - BUFFER *payload, const char *source); - -int rrd_call_function_error(BUFFER *wb, const char *msg, int code); + BUFFER *payload, const char *source, bool allow_restricted); bool rrd_function_available(RRDHOST *host, const char *function); @@ -90,7 +90,5 @@ bool rrd_function_has_this_original_result_callback(nd_uuid_t *transaction, rrd_ #include "rrdfunctions-inline.h" #include "rrdfunctions-inflight.h" #include "rrdfunctions-exporters.h" -#include "rrdfunctions-streaming.h" -#include "rrdfunctions-progress.h" #endif // NETDATA_RRDFUNCTIONS_H diff --git a/src/database/rrdhost.c b/src/database/rrdhost.c index b3d786cff..1902746ee 100644 --- a/src/database/rrdhost.c +++ b/src/database/rrdhost.c @@ -36,13 +36,13 @@ time_t rrdhost_free_ephemeral_time_s = 86400; RRDHOST *find_host_by_node_id(char *node_id) { - nd_uuid_t node_uuid; - if (unlikely(!node_id || uuid_parse(node_id, node_uuid))) + ND_UUID node_uuid; + if (unlikely(!node_id || uuid_parse(node_id, node_uuid.uuid))) return NULL; RRDHOST *host, *ret = NULL; dfe_start_read(rrdhost_root_index, host) { - if (host->node_id && uuid_eq(*host->node_id, node_uuid)) { + if (UUIDeq(host->node_id, node_uuid)) { ret = host; break; } @@ -232,10 +232,10 @@ void set_host_properties(RRDHOST *host, int update_every, RRD_MEMORY_MODE memory // RRDHOST - add a host static void rrdhost_initialize_rrdpush_sender(RRDHOST *host, - unsigned int rrdpush_enabled, - char *rrdpush_destination, - char *rrdpush_api_key, - char *rrdpush_send_charts_matching + unsigned int rrdpush_enabled, + const char *rrdpush_destination, + const char *rrdpush_api_key, + const char *rrdpush_send_charts_matching ) { if(rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_INITIALIZED)) return; @@ -244,15 +244,13 @@ static void rrdhost_initialize_rrdpush_sender(RRDHOST *host, rrdhost_streaming_sender_structures_init(host); -#ifdef ENABLE_HTTPS host->sender->ssl = NETDATA_SSL_UNSET_CONNECTION; -#endif - host->rrdpush_send_destination = strdupz(rrdpush_destination); + host->rrdpush.send.destination = strdupz(rrdpush_destination); rrdpush_destinations_init(host); - host->rrdpush_send_api_key = strdupz(rrdpush_api_key); - host->rrdpush_send_charts_matching = simple_pattern_create(rrdpush_send_charts_matching, NULL, + host->rrdpush.send.api_key = strdupz(rrdpush_api_key); + host->rrdpush.send.charts_matching = simple_pattern_create(rrdpush_send_charts_matching, NULL, SIMPLE_PATTERN_EXACT, true); rrdhost_option_set(host, RRDHOST_OPTION_SENDER_ENABLED); @@ -343,9 +341,9 @@ static RRDHOST *rrdhost_create( RRD_MEMORY_MODE memory_mode, unsigned int health_enabled, unsigned int rrdpush_enabled, - char *rrdpush_destination, - char *rrdpush_api_key, - char *rrdpush_send_charts_matching, + const char *rrdpush_destination, + const char *rrdpush_api_key, + const char *rrdpush_send_charts_matching, bool rrdpush_enable_replication, time_t rrdpush_seconds_to_replicate, time_t rrdpush_replication_step, @@ -383,8 +381,7 @@ static RRDHOST *rrdhost_create( host->rrd_history_entries = align_entries_to_pagesize(memory_mode, entries); host->health.health_enabled = ((memory_mode == RRD_MEMORY_MODE_NONE)) ? 0 : health_enabled; - netdata_mutex_init(&host->aclk_state_lock); - netdata_mutex_init(&host->receiver_lock); + spinlock_init(&host->receiver_lock); if (likely(!archived)) { rrd_functions_host_init(host); @@ -426,7 +423,7 @@ static RRDHOST *rrdhost_create( if(!host->rrdvars) host->rrdvars = rrdvariables_create(); - if (likely(!uuid_parse(host->machine_guid, host->host_uuid))) + if (likely(!uuid_parse(host->machine_guid, host->host_id.uuid))) sql_load_node_id(host); else error_report("Host machine GUID %s is not valid", host->machine_guid); @@ -477,7 +474,7 @@ static RRDHOST *rrdhost_create( if (is_localhost && host->system_info) { host->system_info->ml_capable = ml_capable(); host->system_info->ml_enabled = ml_enabled(host); - host->system_info->mc_version = enable_metric_correlations ? metric_correlations_version : 0; + host->system_info->mc_version = metric_correlations_version; } // ------------------------------------------------------------------------ @@ -535,8 +532,8 @@ static RRDHOST *rrdhost_create( , rrd_memory_mode_name(host->rrd_memory_mode) , host->rrd_history_entries , rrdhost_has_rrdpush_sender_enabled(host)?"enabled":"disabled" - , host->rrdpush_send_destination?host->rrdpush_send_destination:"" - , host->rrdpush_send_api_key?host->rrdpush_send_api_key:"" + , host->rrdpush.send.destination?host->rrdpush.send.destination:"" + , host->rrdpush.send.api_key?host->rrdpush.send.api_key:"" , host->health.health_enabled?"enabled":"disabled" , host->cache_dir , string2str(host->health.health_default_exec) @@ -569,9 +566,9 @@ static void rrdhost_update(RRDHOST *host , RRD_MEMORY_MODE mode , unsigned int health_enabled , unsigned int rrdpush_enabled - , char *rrdpush_destination - , char *rrdpush_api_key - , char *rrdpush_send_charts_matching + , const char *rrdpush_destination + , const char *rrdpush_api_key + , const char *rrdpush_send_charts_matching , bool rrdpush_enable_replication , time_t rrdpush_seconds_to_replicate , time_t rrdpush_replication_step @@ -709,9 +706,9 @@ RRDHOST *rrdhost_find_or_create( , RRD_MEMORY_MODE mode , unsigned int health_enabled , unsigned int rrdpush_enabled - , char *rrdpush_destination - , char *rrdpush_api_key - , char *rrdpush_send_charts_matching + , const char *rrdpush_destination + , const char *rrdpush_api_key + , const char *rrdpush_send_charts_matching , bool rrdpush_enable_replication , time_t rrdpush_seconds_to_replicate , time_t rrdpush_replication_step @@ -865,7 +862,7 @@ RRD_BACKFILL get_dbengine_backfill(RRD_BACKFILL backfill) #endif -void dbengine_init(char *hostname) { +static void dbengine_init(const char *hostname) { #ifdef ENABLE_DBENGINE use_direct_io = config_get_boolean(CONFIG_SECTION_DB, "dbengine use direct io", use_direct_io); @@ -903,10 +900,10 @@ void dbengine_init(char *hostname) { !config_exists(CONFIG_SECTION_DB, "dbengine tier 2 update every iterations") && !config_exists(CONFIG_SECTION_DB, "dbengine tier 3 update every iterations") && !config_exists(CONFIG_SECTION_DB, "dbengine tier 4 update every iterations") && - !config_exists(CONFIG_SECTION_DB, "dbengine tier 1 disk space MB") && - !config_exists(CONFIG_SECTION_DB, "dbengine tier 2 disk space MB") && - !config_exists(CONFIG_SECTION_DB, "dbengine tier 3 disk space MB") && - !config_exists(CONFIG_SECTION_DB, "dbengine tier 4 disk space MB")); + !config_exists(CONFIG_SECTION_DB, "dbengine tier 1 retention size") && + !config_exists(CONFIG_SECTION_DB, "dbengine tier 2 retention size") && + !config_exists(CONFIG_SECTION_DB, "dbengine tier 3 retention size") && + !config_exists(CONFIG_SECTION_DB, "dbengine tier 4 retention size")); default_backfill = get_dbengine_backfill(RRD_BACKFILL_NEW); char dbengineconfig[200 + 1]; @@ -928,11 +925,11 @@ void dbengine_init(char *hostname) { storage_tiers_grouping_iterations[tier] = grouping_iterations; } - default_multidb_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine tier 0 disk space MB", RRDENG_DEFAULT_TIER_DISK_SPACE_MB); + default_multidb_disk_quota_mb = (int) config_get_size_mb(CONFIG_SECTION_DB, "dbengine tier 0 retention size", RRDENG_DEFAULT_TIER_DISK_SPACE_MB); if(default_multidb_disk_quota_mb && default_multidb_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB) { netdata_log_error("Invalid disk space %d for tier 0 given. Defaulting to %d.", default_multidb_disk_quota_mb, RRDENG_MIN_DISK_SPACE_MB); default_multidb_disk_quota_mb = RRDENG_MIN_DISK_SPACE_MB; - config_set_number(CONFIG_SECTION_DB, "dbengine tier 0 disk space MB", default_multidb_disk_quota_mb); + config_set_size_mb(CONFIG_SECTION_DB, "dbengine tier 0 retention size", default_multidb_disk_quota_mb); } #ifdef OS_WINDOWS @@ -962,11 +959,11 @@ void dbengine_init(char *hostname) { } int disk_space_mb = tier ? RRDENG_DEFAULT_TIER_DISK_SPACE_MB : default_multidb_disk_quota_mb; - snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu disk space MB", tier); - disk_space_mb = config_get_number(CONFIG_SECTION_DB, dbengineconfig, disk_space_mb); + snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu retention size", tier); + disk_space_mb = config_get_size_mb(CONFIG_SECTION_DB, dbengineconfig, disk_space_mb); - snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu retention days", tier); - storage_tiers_retention_days[tier] = config_get_number( + snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu retention time", tier); + storage_tiers_retention_days[tier] = config_get_duration_days( CONFIG_SECTION_DB, dbengineconfig, new_dbengine_defaults ? storage_tiers_retention_days[tier] : 0); tiers_init[tier].disk_space_mb = (int) disk_space_mb; @@ -1027,12 +1024,14 @@ void dbengine_init(char *hostname) { #endif } -int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unittest) { +void api_v1_management_init(void); + +int rrd_init(const char *hostname, struct rrdhost_system_info *system_info, bool unittest) { rrdhost_init(); if (unlikely(sql_init_meta_database(DB_CHECK_NONE, system_info ? 0 : 1))) { if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) { - set_late_global_environment(system_info); + set_late_analytics_variables(system_info); fatal("Failed to initialize SQLite"); } @@ -1048,9 +1047,9 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unitt dbengine_enabled = true; } else { - rrdpush_init(); + stream_conf_init(); - if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE || rrdpush_receiver_needs_dbengine()) { + if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE || stream_conf_receiver_needs_dbengine()) { nd_log(NDLS_DAEMON, NDLP_DEBUG, "DBENGINE: Initializing ..."); @@ -1095,14 +1094,14 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unitt , default_rrd_history_entries , default_rrd_memory_mode , health_plugin_enabled() - , default_rrdpush_enabled - , default_rrdpush_destination - , default_rrdpush_api_key - , default_rrdpush_send_charts_matching - , default_rrdpush_enable_replication - , default_rrdpush_seconds_to_replicate - , default_rrdpush_replication_step - , system_info + , + stream_conf_send_enabled, + stream_conf_send_destination, + stream_conf_send_api_key, + stream_conf_send_charts_matching, + stream_conf_replication_enabled, + stream_conf_replication_period, + stream_conf_replication_step, system_info , 1 , 0 ); @@ -1112,26 +1111,15 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unitt dyncfg_host_init(localhost); - if(!unittest) { + if(!unittest) health_plugin_init(); - } - - // we register this only on localhost - // for the other nodes, the origin server should register it - rrd_function_add_inline(localhost, NULL, "streaming", 10, - RRDFUNCTIONS_PRIORITY_DEFAULT + 1, RRDFUNCTIONS_STREAMING_HELP, "top", - HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA, - rrdhost_function_streaming); - rrd_function_add_inline(localhost, NULL, "netdata-api-calls", 10, - RRDFUNCTIONS_PRIORITY_DEFAULT + 2, RRDFUNCTIONS_PROGRESS_HELP, "top", - HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA, - rrdhost_function_progress); + global_functions_add(); if (likely(system_info)) { - detect_machine_guid_change(&localhost->host_uuid); + detect_machine_guid_change(&localhost->host_id.uuid); sql_aclk_sync_init(); - web_client_api_v1_management_init(); + api_v1_management_init(); } return 0; @@ -1155,6 +1143,7 @@ void rrdhost_system_info_free(struct rrdhost_system_info *system_info) { freez(system_info->host_os_detection); freez(system_info->host_cores); freez(system_info->host_cpu_freq); + freez(system_info->host_cpu_model); freez(system_info->host_ram_total); freez(system_info->host_disk_space); freez(system_info->container_os_name); @@ -1195,7 +1184,7 @@ static void rrdhost_streaming_sender_structures_init(RRDHOST *host) host->sender->rrdpush_sender_socket = -1; host->sender->disabled_capabilities = STREAM_CAP_NONE; - if(!default_rrdpush_compression_enabled) + if(!stream_conf_compression_enabled) host->sender->disabled_capabilities |= STREAM_CAP_COMPRESSIONS_AVAILABLE; spinlock_init(&host->sender->spinlock); @@ -1241,6 +1230,10 @@ void rrdhost_free___while_having_rrd_wrlock(RRDHOST *host, bool force) { DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(localhost, host, prev, next); } + // ------------------------------------------------------------------------ + + rrdhost_stream_path_clear(host, true); + // ------------------------------------------------------------------------ // clean up streaming chart slots @@ -1284,9 +1277,6 @@ void rrdhost_free___while_having_rrd_wrlock(RRDHOST *host, bool force) { // ------------------------------------------------------------------------ // free it - pthread_mutex_destroy(&host->aclk_state_lock); - freez(host->aclk_state.claimed_id); - freez(host->aclk_state.prev_claimed_id); rrdlabels_destroy(host->rrdlabels); string_freez(host->os); string_freez(host->timezone); @@ -1295,14 +1285,13 @@ void rrdhost_free___while_having_rrd_wrlock(RRDHOST *host, bool force) { string_freez(host->program_version); rrdhost_system_info_free(host->system_info); freez(host->cache_dir); - freez(host->rrdpush_send_api_key); - freez(host->rrdpush_send_destination); + freez(host->rrdpush.send.api_key); + freez(host->rrdpush.send.destination); rrdpush_destinations_free(host); string_freez(host->health.health_default_exec); string_freez(host->health.health_default_recipient); string_freez(host->registry_hostname); - simple_pattern_free(host->rrdpush_send_charts_matching); - freez(host->node_id); + simple_pattern_free(host->rrdpush.send.charts_matching); rrd_functions_host_destroy(host); rrdvariables_destroy(host->rrdvars); @@ -1350,6 +1339,7 @@ struct rrdhost_system_info *rrdhost_labels_to_system_info(RRDLABELS *labels) { rrdlabels_get_value_strdup_or_null(labels, &info->kernel_version, "_kernel_version"); rrdlabels_get_value_strdup_or_null(labels, &info->host_cores, "_system_cores"); rrdlabels_get_value_strdup_or_null(labels, &info->host_cpu_freq, "_system_cpu_freq"); + rrdlabels_get_value_strdup_or_null(labels, &info->host_cpu_model, "_system_cpu_model"); rrdlabels_get_value_strdup_or_null(labels, &info->host_ram_total, "_system_ram_total"); rrdlabels_get_value_strdup_or_null(labels, &info->host_disk_space, "_system_disk_space"); rrdlabels_get_value_strdup_or_null(labels, &info->architecture, "_architecture"); @@ -1392,6 +1382,9 @@ static void rrdhost_load_auto_labels(void) { if (localhost->system_info->host_cpu_freq) rrdlabels_add(labels, "_system_cpu_freq", localhost->system_info->host_cpu_freq, RRDLABEL_SRC_AUTO); + if (localhost->system_info->host_cpu_model) + rrdlabels_add(labels, "_system_cpu_model", localhost->system_info->host_cpu_model, RRDLABEL_SRC_AUTO); + if (localhost->system_info->host_ram_total) rrdlabels_add(labels, "_system_ram_total", localhost->system_info->host_ram_total, RRDLABEL_SRC_AUTO); @@ -1441,8 +1434,8 @@ static void rrdhost_load_auto_labels(void) { rrdlabels_add(labels, "_hostname", string2str(localhost->hostname), RRDLABEL_SRC_AUTO); rrdlabels_add(labels, "_os", string2str(localhost->os), RRDLABEL_SRC_AUTO); - if (localhost->rrdpush_send_destination) - rrdlabels_add(labels, "_streams_to", localhost->rrdpush_send_destination, RRDLABEL_SRC_AUTO); + if (localhost->rrdpush.send.destination) + rrdlabels_add(labels, "_streams_to", localhost->rrdpush.send.destination, RRDLABEL_SRC_AUTO); } void rrdhost_set_is_parent_label(void) { @@ -1453,14 +1446,15 @@ void rrdhost_set_is_parent_label(void) { rrdlabels_add(labels, "_is_parent", (count) ? "true" : "false", RRDLABEL_SRC_AUTO); //queue a node info -#ifdef ENABLE_ACLK - if (netdata_cloud_enabled) { - aclk_queue_node_info(localhost, false); - } -#endif + aclk_queue_node_info(localhost, false); } } +static bool config_label_cb(void *data __maybe_unused, const char *name, const char *value) { + rrdlabels_add(localhost->rrdlabels, name, value, RRDLABEL_SRC_CONFIG); + return true; +} + static void rrdhost_load_config_labels(void) { int status = config_load(NULL, 1, CONFIG_SECTION_HOST_LABEL); if(!status) { @@ -1470,16 +1464,7 @@ static void rrdhost_load_config_labels(void) { filename); } - struct section *co = appconfig_get_section(&netdata_config, CONFIG_SECTION_HOST_LABEL); - if(co) { - config_section_wrlock(co); - struct config_option *cv; - for(cv = co->values; cv ; cv = cv->next) { - rrdlabels_add(localhost->rrdlabels, cv->name, cv->value, RRDLABEL_SRC_CONFIG); - cv->flags |= CONFIG_VALUE_USED; - } - config_section_unlock(co); - } + appconfig_foreach_value_in_section(&netdata_config, CONFIG_SECTION_HOST_LABEL, config_label_cb, NULL); } static void rrdhost_load_kubernetes_labels(void) { @@ -1498,7 +1483,7 @@ static void rrdhost_load_kubernetes_labels(void) { if(!instance) return; char buffer[1000 + 1]; - while (fgets(buffer, 1000, instance->child_stdout_fp) != NULL) + while (fgets(buffer, 1000, spawn_popen_stdout(instance)) != NULL) rrdlabels_add_pair(localhost->rrdlabels, buffer, RRDLABEL_SRC_AUTO|RRDLABEL_SRC_K8S); // Non-zero exit code means that all the script output is error messages. We've shown already any message that didn't include a ':' @@ -1624,6 +1609,10 @@ int rrdhost_set_system_info_variable(struct rrdhost_system_info *system_info, ch freez(system_info->host_cpu_freq); system_info->host_cpu_freq = strdupz(value); } + else if (!strcmp(name, "NETDATA_SYSTEM_CPU_MODEL")){ + freez(system_info->host_cpu_model); + system_info->host_cpu_model = strdupz(value); + } else if(!strcmp(name, "NETDATA_SYSTEM_TOTAL_RAM")){ freez(system_info->host_ram_total); system_info->host_ram_total = strdupz(value); @@ -1662,8 +1651,6 @@ int rrdhost_set_system_info_variable(struct rrdhost_system_info *system_info, ch } else if (!strcmp(name, "NETDATA_SYSTEM_CPU_VENDOR")) return res; - else if (!strcmp(name, "NETDATA_SYSTEM_CPU_MODEL")) - return res; else if (!strcmp(name, "NETDATA_SYSTEM_CPU_DETECTION")) return res; else if (!strcmp(name, "NETDATA_SYSTEM_RAM_DETECTION")) @@ -1679,239 +1666,8 @@ int rrdhost_set_system_info_variable(struct rrdhost_system_info *system_info, ch return res; } -static NETDATA_DOUBLE rrdhost_sender_replication_completion_unsafe(RRDHOST *host, time_t now, size_t *instances) { - size_t charts = rrdhost_sender_replicating_charts(host); - NETDATA_DOUBLE completion; - if(!charts || !host->sender || !host->sender->replication.oldest_request_after_t) - completion = 100.0; - else if(!host->sender->replication.latest_completed_before_t || host->sender->replication.latest_completed_before_t < host->sender->replication.oldest_request_after_t) - completion = 0.0; - else { - time_t total = now - host->sender->replication.oldest_request_after_t; - time_t current = host->sender->replication.latest_completed_before_t - host->sender->replication.oldest_request_after_t; - completion = (NETDATA_DOUBLE) current * 100.0 / (NETDATA_DOUBLE) total; - } - - *instances = charts; - - return completion; -} - bool rrdhost_matches_window(RRDHOST *host, time_t after, time_t before, time_t now) { time_t first_time_s, last_time_s; rrdhost_retention(host, now, rrdhost_is_online(host), &first_time_s, &last_time_s); return query_matches_retention(after, before, first_time_s, last_time_s, 0); } - -bool rrdhost_state_cloud_emulation(RRDHOST *host) { - return rrdhost_is_online(host); -} - -void rrdhost_status(RRDHOST *host, time_t now, RRDHOST_STATUS *s) { - memset(s, 0, sizeof(*s)); - - s->host = host; - s->now = now; - - RRDHOST_FLAGS flags = __atomic_load_n(&host->flags, __ATOMIC_RELAXED); - - // --- dyncfg --- - - s->dyncfg.status = dyncfg_available_for_rrdhost(host) ? RRDHOST_DYNCFG_STATUS_AVAILABLE : RRDHOST_DYNCFG_STATUS_UNAVAILABLE; - - // --- db --- - - bool online = rrdhost_is_online(host); - - rrdhost_retention(host, now, online, &s->db.first_time_s, &s->db.last_time_s); - s->db.metrics = host->rrdctx.metrics; - s->db.instances = host->rrdctx.instances; - s->db.contexts = dictionary_entries(host->rrdctx.contexts); - if(!s->db.first_time_s || !s->db.last_time_s || !s->db.metrics || !s->db.instances || !s->db.contexts || - (flags & (RRDHOST_FLAG_PENDING_CONTEXT_LOAD))) - s->db.status = RRDHOST_DB_STATUS_INITIALIZING; - else - s->db.status = RRDHOST_DB_STATUS_QUERYABLE; - - s->db.mode = host->rrd_memory_mode; - - // --- ingest --- - - s->ingest.since = MAX(host->child_connect_time, host->child_disconnected_time); - s->ingest.reason = (online) ? STREAM_HANDSHAKE_NEVER : host->rrdpush_last_receiver_exit_reason; - - netdata_mutex_lock(&host->receiver_lock); - s->ingest.hops = (host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1); - bool has_receiver = false; - if (host->receiver) { - has_receiver = true; - s->ingest.replication.instances = rrdhost_receiver_replicating_charts(host); - s->ingest.replication.completion = host->rrdpush_receiver_replication_percent; - s->ingest.replication.in_progress = s->ingest.replication.instances > 0; - - s->ingest.capabilities = host->receiver->capabilities; - s->ingest.peers = socket_peers(host->receiver->fd); -#ifdef ENABLE_HTTPS - s->ingest.ssl = SSL_connection(&host->receiver->ssl); -#endif - } - netdata_mutex_unlock(&host->receiver_lock); - - if (online) { - if(s->db.status == RRDHOST_DB_STATUS_INITIALIZING) - s->ingest.status = RRDHOST_INGEST_STATUS_INITIALIZING; - - else if (host == localhost || rrdhost_option_check(host, RRDHOST_OPTION_VIRTUAL_HOST)) { - s->ingest.status = RRDHOST_INGEST_STATUS_ONLINE; - s->ingest.since = netdata_start_time; - } - - else if (s->ingest.replication.in_progress) - s->ingest.status = RRDHOST_INGEST_STATUS_REPLICATING; - - else - s->ingest.status = RRDHOST_INGEST_STATUS_ONLINE; - } - else { - if (!s->ingest.since) { - s->ingest.status = RRDHOST_INGEST_STATUS_ARCHIVED; - s->ingest.since = s->db.last_time_s; - } - - else - s->ingest.status = RRDHOST_INGEST_STATUS_OFFLINE; - } - - if(host == localhost) - s->ingest.type = RRDHOST_INGEST_TYPE_LOCALHOST; - else if(has_receiver || rrdhost_flag_set(host, RRDHOST_FLAG_RRDPUSH_RECEIVER_DISCONNECTED)) - s->ingest.type = RRDHOST_INGEST_TYPE_CHILD; - else if(rrdhost_option_check(host, RRDHOST_OPTION_VIRTUAL_HOST)) - s->ingest.type = RRDHOST_INGEST_TYPE_VIRTUAL; - else - s->ingest.type = RRDHOST_INGEST_TYPE_ARCHIVED; - - s->ingest.id = host->rrdpush_receiver_connection_counter; - - if(!s->ingest.since) - s->ingest.since = netdata_start_time; - - if(s->ingest.status == RRDHOST_INGEST_STATUS_ONLINE) - s->db.liveness = RRDHOST_DB_LIVENESS_LIVE; - else - s->db.liveness = RRDHOST_DB_LIVENESS_STALE; - - // --- stream --- - - if (!host->sender) { - s->stream.status = RRDHOST_STREAM_STATUS_DISABLED; - s->stream.hops = s->ingest.hops + 1; - } - else { - sender_lock(host->sender); - - s->stream.since = host->sender->last_state_since_t; - s->stream.peers = socket_peers(host->sender->rrdpush_sender_socket); -#ifdef ENABLE_HTTPS - s->stream.ssl = SSL_connection(&host->sender->ssl); -#endif - - memcpy(s->stream.sent_bytes_on_this_connection_per_type, - host->sender->sent_bytes_on_this_connection_per_type, - MIN(sizeof(s->stream.sent_bytes_on_this_connection_per_type), - sizeof(host->sender->sent_bytes_on_this_connection_per_type))); - - if (rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED)) { - s->stream.hops = host->sender->hops; - s->stream.reason = STREAM_HANDSHAKE_NEVER; - s->stream.capabilities = host->sender->capabilities; - - s->stream.replication.completion = rrdhost_sender_replication_completion_unsafe(host, now, &s->stream.replication.instances); - s->stream.replication.in_progress = s->stream.replication.instances > 0; - - if(s->stream.replication.in_progress) - s->stream.status = RRDHOST_STREAM_STATUS_REPLICATING; - else - s->stream.status = RRDHOST_STREAM_STATUS_ONLINE; - - s->stream.compression = host->sender->compressor.initialized; - } - else { - s->stream.status = RRDHOST_STREAM_STATUS_OFFLINE; - s->stream.hops = s->ingest.hops + 1; - s->stream.reason = host->sender->exit.reason; - } - - sender_unlock(host->sender); - } - - s->stream.id = host->rrdpush_sender_connection_counter; - - if(!s->stream.since) - s->stream.since = netdata_start_time; - - // --- ml --- - - if(ml_host_get_host_status(host, &s->ml.metrics)) { - s->ml.type = RRDHOST_ML_TYPE_SELF; - - if(s->ingest.status == RRDHOST_INGEST_STATUS_OFFLINE || s->ingest.status == RRDHOST_INGEST_STATUS_ARCHIVED) - s->ml.status = RRDHOST_ML_STATUS_OFFLINE; - else - s->ml.status = RRDHOST_ML_STATUS_RUNNING; - } - else if(stream_has_capability(&s->ingest, STREAM_CAP_DATA_WITH_ML)) { - s->ml.type = RRDHOST_ML_TYPE_RECEIVED; - s->ml.status = RRDHOST_ML_STATUS_RUNNING; - } - else { - // does not receive ML, does not run ML - s->ml.type = RRDHOST_ML_TYPE_DISABLED; - s->ml.status = RRDHOST_ML_STATUS_DISABLED; - } - - // --- health --- - - if(host->health.health_enabled) { - if(flags & RRDHOST_FLAG_PENDING_HEALTH_INITIALIZATION) - s->health.status = RRDHOST_HEALTH_STATUS_INITIALIZING; - else { - s->health.status = RRDHOST_HEALTH_STATUS_RUNNING; - - RRDCALC *rc; - foreach_rrdcalc_in_rrdhost_read(host, rc) { - if (unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec)) - continue; - - switch (rc->status) { - default: - case RRDCALC_STATUS_REMOVED: - break; - - case RRDCALC_STATUS_CLEAR: - s->health.alerts.clear++; - break; - - case RRDCALC_STATUS_WARNING: - s->health.alerts.warning++; - break; - - case RRDCALC_STATUS_CRITICAL: - s->health.alerts.critical++; - break; - - case RRDCALC_STATUS_UNDEFINED: - s->health.alerts.undefined++; - break; - - case RRDCALC_STATUS_UNINITIALIZED: - s->health.alerts.uninitialized++; - break; - } - } - foreach_rrdcalc_in_rrdhost_done(rc); - } - } - else - s->health.status = RRDHOST_HEALTH_STATUS_DISABLED; -} diff --git a/src/database/rrdlabels.c b/src/database/rrdlabels.c index 65e2dc9e4..585b98264 100644 --- a/src/database/rrdlabels.c +++ b/src/database/rrdlabels.c @@ -88,464 +88,15 @@ static inline void STATS_MINUS_MEMORY(struct dictionary_stats *stats, size_t key __atomic_fetch_sub(&stats->memory.values, (long)value_size, __ATOMIC_RELAXED); } -// ---------------------------------------------------------------------------- -// labels sanitization - -/* - * All labels follow these rules: - * - * Character Symbol Values Names - * UTF-8 characters UTF-8 yes -> _ - * Lower case letter [a-z] yes yes - * Upper case letter [A-Z] yes -> [a-z] - * Digit [0-9] yes yes - * Underscore _ yes yes - * Minus - yes yes - * Plus + yes -> _ - * Colon : yes -> _ - * Semicolon ; -> : -> _ - * Equal = -> : -> _ - * Period . yes yes - * Comma , -> . -> . - * Slash / yes yes - * Backslash \ -> / -> / - * At @ yes -> _ - * Space yes -> _ - * Opening parenthesis ( yes -> _ - * Closing parenthesis ) yes -> _ - * anything else -> _ -> _ -* - * The above rules should allow users to set in tags (indicative): - * - * 1. hostnames and domain names as-is - * 2. email addresses as-is - * 3. floating point numbers, converted to always use a dot as the decimal point - * - * Leading and trailing spaces and control characters are removed from both label - * names and values. - * - * Multiple spaces inside the label name or the value are removed (only 1 is retained). - * In names spaces are also converted to underscores. - * - * Names that are only underscores are rejected (they do not enter the dictionary). - * - * The above rules do not require any conversion to be included in JSON strings. - * - * Label names and values are truncated to LABELS_MAX_LENGTH (200) characters. - * - * When parsing, label key and value are separated by the first colon (:) found. - * So label:value1:value2 is parsed as key = "label", value = "value1:value2" - * - * This means a label key cannot contain a colon (:) - it is converted to - * underscore if it does. - * - */ - #define RRDLABELS_MAX_NAME_LENGTH 200 #define RRDLABELS_MAX_VALUE_LENGTH 800 // 800 in bytes, up to 200 UTF-8 characters -static unsigned char label_spaces_char_map[256]; -static unsigned char label_names_char_map[256]; -static unsigned char label_values_char_map[256] = { - [0] = '\0', // - [1] = '_', // - [2] = '_', // - [3] = '_', // - [4] = '_', // - [5] = '_', // - [6] = '_', // - [7] = '_', // - [8] = '_', // - [9] = '_', // - [10] = '_', // - [11] = '_', // - [12] = '_', // - [13] = '_', // - [14] = '_', // - [15] = '_', // - [16] = '_', // - [17] = '_', // - [18] = '_', // - [19] = '_', // - [20] = '_', // - [21] = '_', // - [22] = '_', // - [23] = '_', // - [24] = '_', // - [25] = '_', // - [26] = '_', // - [27] = '_', // - [28] = '_', // - [29] = '_', // - [30] = '_', // - [31] = '_', // - [32] = ' ', // SPACE keep - [33] = '_', // ! - [34] = '_', // " - [35] = '_', // # - [36] = '_', // $ - [37] = '_', // % - [38] = '_', // & - [39] = '_', // ' - [40] = '(', // ( keep - [41] = ')', // ) keep - [42] = '_', // * - [43] = '+', // + keep - [44] = '.', // , convert , to . - [45] = '-', // - keep - [46] = '.', // . keep - [47] = '/', // / keep - [48] = '0', // 0 keep - [49] = '1', // 1 keep - [50] = '2', // 2 keep - [51] = '3', // 3 keep - [52] = '4', // 4 keep - [53] = '5', // 5 keep - [54] = '6', // 6 keep - [55] = '7', // 7 keep - [56] = '8', // 8 keep - [57] = '9', // 9 keep - [58] = ':', // : keep - [59] = ':', // ; convert ; to : - [60] = '_', // < - [61] = ':', // = convert = to : - [62] = '_', // > - [63] = '_', // ? - [64] = '@', // @ - [65] = 'A', // A keep - [66] = 'B', // B keep - [67] = 'C', // C keep - [68] = 'D', // D keep - [69] = 'E', // E keep - [70] = 'F', // F keep - [71] = 'G', // G keep - [72] = 'H', // H keep - [73] = 'I', // I keep - [74] = 'J', // J keep - [75] = 'K', // K keep - [76] = 'L', // L keep - [77] = 'M', // M keep - [78] = 'N', // N keep - [79] = 'O', // O keep - [80] = 'P', // P keep - [81] = 'Q', // Q keep - [82] = 'R', // R keep - [83] = 'S', // S keep - [84] = 'T', // T keep - [85] = 'U', // U keep - [86] = 'V', // V keep - [87] = 'W', // W keep - [88] = 'X', // X keep - [89] = 'Y', // Y keep - [90] = 'Z', // Z keep - [91] = '[', // [ keep - [92] = '/', // backslash convert \ to / - [93] = ']', // ] keep - [94] = '_', // ^ - [95] = '_', // _ keep - [96] = '_', // ` - [97] = 'a', // a keep - [98] = 'b', // b keep - [99] = 'c', // c keep - [100] = 'd', // d keep - [101] = 'e', // e keep - [102] = 'f', // f keep - [103] = 'g', // g keep - [104] = 'h', // h keep - [105] = 'i', // i keep - [106] = 'j', // j keep - [107] = 'k', // k keep - [108] = 'l', // l keep - [109] = 'm', // m keep - [110] = 'n', // n keep - [111] = 'o', // o keep - [112] = 'p', // p keep - [113] = 'q', // q keep - [114] = 'r', // r keep - [115] = 's', // s keep - [116] = 't', // t keep - [117] = 'u', // u keep - [118] = 'v', // v keep - [119] = 'w', // w keep - [120] = 'x', // x keep - [121] = 'y', // y keep - [122] = 'z', // z keep - [123] = '_', // { - [124] = '_', // | - [125] = '_', // } - [126] = '_', // ~ - [127] = '_', // - [128] = '_', // - [129] = '_', // - [130] = '_', // - [131] = '_', // - [132] = '_', // - [133] = '_', // - [134] = '_', // - [135] = '_', // - [136] = '_', // - [137] = '_', // - [138] = '_', // - [139] = '_', // - [140] = '_', // - [141] = '_', // - [142] = '_', // - [143] = '_', // - [144] = '_', // - [145] = '_', // - [146] = '_', // - [147] = '_', // - [148] = '_', // - [149] = '_', // - [150] = '_', // - [151] = '_', // - [152] = '_', // - [153] = '_', // - [154] = '_', // - [155] = '_', // - [156] = '_', // - [157] = '_', // - [158] = '_', // - [159] = '_', // - [160] = '_', // - [161] = '_', // - [162] = '_', // - [163] = '_', // - [164] = '_', // - [165] = '_', // - [166] = '_', // - [167] = '_', // - [168] = '_', // - [169] = '_', // - [170] = '_', // - [171] = '_', // - [172] = '_', // - [173] = '_', // - [174] = '_', // - [175] = '_', // - [176] = '_', // - [177] = '_', // - [178] = '_', // - [179] = '_', // - [180] = '_', // - [181] = '_', // - [182] = '_', // - [183] = '_', // - [184] = '_', // - [185] = '_', // - [186] = '_', // - [187] = '_', // - [188] = '_', // - [189] = '_', // - [190] = '_', // - [191] = '_', // - [192] = '_', // - [193] = '_', // - [194] = '_', // - [195] = '_', // - [196] = '_', // - [197] = '_', // - [198] = '_', // - [199] = '_', // - [200] = '_', // - [201] = '_', // - [202] = '_', // - [203] = '_', // - [204] = '_', // - [205] = '_', // - [206] = '_', // - [207] = '_', // - [208] = '_', // - [209] = '_', // - [210] = '_', // - [211] = '_', // - [212] = '_', // - [213] = '_', // - [214] = '_', // - [215] = '_', // - [216] = '_', // - [217] = '_', // - [218] = '_', // - [219] = '_', // - [220] = '_', // - [221] = '_', // - [222] = '_', // - [223] = '_', // - [224] = '_', // - [225] = '_', // - [226] = '_', // - [227] = '_', // - [228] = '_', // - [229] = '_', // - [230] = '_', // - [231] = '_', // - [232] = '_', // - [233] = '_', // - [234] = '_', // - [235] = '_', // - [236] = '_', // - [237] = '_', // - [238] = '_', // - [239] = '_', // - [240] = '_', // - [241] = '_', // - [242] = '_', // - [243] = '_', // - [244] = '_', // - [245] = '_', // - [246] = '_', // - [247] = '_', // - [248] = '_', // - [249] = '_', // - [250] = '_', // - [251] = '_', // - [252] = '_', // - [253] = '_', // - [254] = '_', // - [255] = '_' // -}; - -__attribute__((constructor)) void initialize_labels_keys_char_map(void) { - // copy the values char map to the names char map - size_t i; - for(i = 0; i < 256 ;i++) - label_names_char_map[i] = label_values_char_map[i]; - - // apply overrides to the label names map - label_names_char_map['='] = '_'; - label_names_char_map[':'] = '_'; - label_names_char_map['+'] = '_'; - label_names_char_map[';'] = '_'; - label_names_char_map['@'] = '_'; - label_names_char_map['('] = '_'; - label_names_char_map[')'] = '_'; - label_names_char_map[' '] = '_'; - label_names_char_map['\\'] = '/'; - - // create the space map - for(i = 0; i < 256 ;i++) - label_spaces_char_map[i] = (isspace(i) || iscntrl(i) || !isprint(i))?1:0; - -} - __attribute__((constructor)) void initialize_label_stats(void) { dictionary_stats_category_rrdlabels.memory.dict = 0; dictionary_stats_category_rrdlabels.memory.index = 0; dictionary_stats_category_rrdlabels.memory.values = 0; } -size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_size, const unsigned char *char_map, bool utf, const char *empty, size_t *multibyte_length) { - if(unlikely(!src || !dst_size)) return 0; - - if(unlikely(!src || !*src)) { - strncpyz((char *)dst, empty, dst_size); - dst[dst_size - 1] = '\0'; - size_t len = strlen((char *)dst); - if(multibyte_length) *multibyte_length = len; - return len; - } - - unsigned char *d = dst; - - // make room for the final string termination - unsigned char *end = &d[dst_size - 1]; - - // copy while converting, but keep only one space - // we start wil last_is_space = 1 to skip leading spaces - int last_is_space = 1; - - size_t mblen = 0; - - while(*src && d < end) { - unsigned char c = *src; - - if(IS_UTF8_STARTBYTE(c) && IS_UTF8_BYTE(src[1]) && d + 2 < end) { - // UTF-8 multi-byte encoded character - - // find how big this character is (2-4 bytes) - size_t utf_character_size = 2; - while(utf_character_size < 4 && src[utf_character_size] && IS_UTF8_BYTE(src[utf_character_size]) && !IS_UTF8_STARTBYTE(src[utf_character_size])) - utf_character_size++; - - if(utf) { - while(utf_character_size) { - utf_character_size--; - *d++ = *src++; - } - } - else { - // UTF-8 characters are not allowed. - // Assume it is an underscore - // and skip all except the first byte - *d++ = '_'; - src += (utf_character_size - 1); - } - - last_is_space = 0; - mblen++; - continue; - } - - if(label_spaces_char_map[c]) { - // a space character - - if(!last_is_space) { - // add one space - *d++ = char_map[c]; - mblen++; - } - - last_is_space++; - } - else { - *d++ = char_map[c]; - last_is_space = 0; - mblen++; - } - - src++; - } - - // remove the last trailing space - if(last_is_space && d > dst) { - d--; - mblen--; - } - - // put a termination at the end of what we copied - *d = '\0'; - - // check if dst is all underscores and empty it if it is - if(*dst == '_') { - unsigned char *t = dst; - while (*t == '_') t++; - if (unlikely(*t == '\0')) { - *dst = '\0'; - mblen = 0; - } - } - - if(unlikely(*dst == '\0')) { - strncpyz((char *)dst, empty, dst_size); - dst[dst_size - 1] = '\0'; - mblen = strlen((char *)dst); - if(multibyte_length) *multibyte_length = mblen; - return mblen; - } - - if(multibyte_length) *multibyte_length = mblen; - - return d - dst; -} - -static inline size_t rrdlabels_sanitize_name(char *dst, const char *src, size_t dst_size) { - return text_sanitize((unsigned char *)dst, (const unsigned char *)src, dst_size, label_names_char_map, 0, "", NULL); -} - -static inline size_t rrdlabels_sanitize_value(char *dst, const char *src, size_t dst_size) { - return text_sanitize((unsigned char *)dst, (const unsigned char *)src, dst_size, label_values_char_map, 1, "[none]", NULL); -} - // ---------------------------------------------------------------------------- // rrdlabels_create() @@ -886,6 +437,7 @@ void rrdlabels_get_value_to_buffer_or_unset(RRDLABELS *labels, BUFFER *wb, const RRDLABEL *lb; RRDLABEL_SRC ls; + bool set = false; lfe_start_read(labels, lb, ls) { if (lb->index.key == this_key) { @@ -893,10 +445,15 @@ void rrdlabels_get_value_to_buffer_or_unset(RRDLABELS *labels, BUFFER *wb, const buffer_strcat(wb, string2str(lb->index.value)); else buffer_strcat(wb, unset); + set = true; break; } } lfe_done(labels); + + if(!set) + buffer_strcat(wb, unset); + string_freez(this_key); } @@ -1601,6 +1158,9 @@ static int rrdlabels_unittest_add_pairs() { // test newlines errors += rrdlabels_unittest_add_a_pair(" tag = \t value \r\n", "tag", "value"); + // test spaces in names + errors += rrdlabels_unittest_add_a_pair(" t a g = value", "t_a_g", "value"); + // test : in values errors += rrdlabels_unittest_add_a_pair("tag=:value", "tag", ":value"); errors += rrdlabels_unittest_add_a_pair("tag::value", "tag", ":value"); @@ -1991,6 +1551,18 @@ int rrdlabels_unittest_sanitization() { // mixed multi-byte errors += rrdlabels_unittest_sanitize_value("Ű‱𩸽‱Ű", "Ű‱𩸽‱Ű"); + // invalid UTF8 No 1 + const unsigned char invalid1[] = { 0xC3, 0x28, 'A', 'B', 0x0 }; + errors += rrdlabels_unittest_sanitize_value((const char *)invalid1, "(AB"); + + // invalid UTF8 No 2 + const unsigned char invalid2[] = { 'A', 'B', 0xC3, 0x28, 'C', 'D', 0x0 }; + errors += rrdlabels_unittest_sanitize_value((const char *)invalid2, "AB (CD"); + + // invalid UTF8 No 3 + const unsigned char invalid3[] = { 'A', 'B', 0xC3, 0x28, 0x0 }; + errors += rrdlabels_unittest_sanitize_value((const char *)invalid3, "AB ("); + return errors; } diff --git a/src/database/rrdlabels.h b/src/database/rrdlabels.h index 28132b73e..da8ec5be1 100644 --- a/src/database/rrdlabels.h +++ b/src/database/rrdlabels.h @@ -30,8 +30,6 @@ typedef enum __attribute__ ((__packed__)) rrdlabel_source { #define RRDLABEL_FLAG_INTERNAL (RRDLABEL_FLAG_OLD | RRDLABEL_FLAG_NEW | RRDLABEL_FLAG_DONT_DELETE) -size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_size, const unsigned char *char_map, bool utf, const char *empty, size_t *multibyte_length); - RRDLABELS *rrdlabels_create(void); void rrdlabels_destroy(RRDLABELS *labels_dict); void rrdlabels_add(RRDLABELS *labels, const char *name, const char *value, RRDLABEL_SRC ls); @@ -77,6 +75,7 @@ pattern_array_add_key_simple_pattern(struct pattern_array *pa, const char *key, void pattern_array_free(struct pattern_array *pa); int rrdlabels_unittest(void); +size_t rrdlabels_sanitize_name(char *dst, const char *src, size_t dst_size); // unfortunately this break when defined in exporting_engine.h bool exporting_labels_filter_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data); diff --git a/src/database/rrdset.c b/src/database/rrdset.c index e5273532f..396f66835 100644 --- a/src/database/rrdset.c +++ b/src/database/rrdset.c @@ -597,21 +597,6 @@ void rrdset_acquired_release(RRDSET_ACQUIRED *rsa) { // ---------------------------------------------------------------------------- // RRDSET - rename charts -char *rrdset_strncpyz_name(char *to, const char *from, size_t length) { - char c, *p = to; - - while (length-- && (c = *from++)) { - if(c != '.' && c != '-' && !isalnum(c)) - c = '_'; - - *p++ = c; - } - - *p = '\0'; - - return to; -} - int rrdset_reset_name(RRDSET *st, const char *name) { if(unlikely(!strcmp(rrdset_name(st), name))) return 1; @@ -747,6 +732,8 @@ void rrdset_get_retention_of_tier_for_collected_chart(RRDSET *st, time_t *first_ } inline void rrdset_is_obsolete___safe_from_collector_thread(RRDSET *st) { + if(!st) return; + rrdset_pluginsd_receive_unslot(st); if(unlikely(!(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) { diff --git a/src/database/sqlite/sqlite3.c b/src/database/sqlite/sqlite3.c index 61cfb904c..fa796f861 100644 --- a/src/database/sqlite/sqlite3.c +++ b/src/database/sqlite/sqlite3.c @@ -1,6 +1,6 @@ /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.45.3. By combining all the individual C code files into this +** version 3.46.1. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -18,7 +18,7 @@ ** separate file. This file contains only code for the core SQLite library. ** ** The content in this amalgamation comes from Fossil check-in -** 8653b758870e6ef0c98d46b3ace27849054a. +** c9c2ab54ba1f5f46360f1b4f35d849cd3f08. */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wimplicit-fallthrough" @@ -467,9 +467,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.45.3" -#define SQLITE_VERSION_NUMBER 3045003 -#define SQLITE_SOURCE_ID "2024-04-15 13:34:05 8653b758870e6ef0c98d46b3ace27849054af85da891eb121e9aaa537f1e8355" +#define SQLITE_VERSION "3.46.1" +#define SQLITE_VERSION_NUMBER 3046001 +#define SQLITE_SOURCE_ID "2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -1085,11 +1085,11 @@ struct sqlite3_file { ** ** xLock() upgrades the database file lock. In other words, xLock() moves the ** database file lock in the direction NONE toward EXCLUSIVE. The argument to -** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never +** xLock() is always one of SHARED, RESERVED, PENDING, or EXCLUSIVE, never ** SQLITE_LOCK_NONE. If the database file lock is already at or above the ** requested lock, then the call to xLock() is a no-op. ** xUnlock() downgrades the database file lock to either SHARED or NONE. -* If the lock is already at or below the requested lock state, then the call +** If the lock is already at or below the requested lock state, then the call ** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, @@ -3626,8 +3626,8 @@ SQLITE_API int sqlite3_set_authorizer( #define SQLITE_RECURSIVE 33 /* NULL NULL */ /* -** CAPI3REF: Tracing And Profiling Functions -** METHOD: sqlite3 +** CAPI3REF: Deprecated Tracing And Profiling Functions +** DEPRECATED ** ** These routines are deprecated. Use the [sqlite3_trace_v2()] interface ** instead of the routines described here. @@ -7208,6 +7208,12 @@ SQLITE_API int sqlite3_autovacuum_pages( ** The exceptions defined in this paragraph might change in a future ** release of SQLite. ** +** Whether the update hook is invoked before or after the +** corresponding change is currently unspecified and may differ +** depending on the type of change. Do not rely on the order of the +** hook call with regards to the final result of the operation which +** triggers the hook. +** ** The update hook implementation must not do anything that will modify ** the database connection that invoked the update hook. Any actions ** to modify the database connection must be deferred until after the @@ -8678,7 +8684,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); ** The sqlite3_keyword_count() interface returns the number of distinct ** keywords understood by SQLite. ** -** The sqlite3_keyword_name(N,Z,L) interface finds the N-th keyword and +** The sqlite3_keyword_name(N,Z,L) interface finds the 0-based N-th keyword and ** makes *Z point to that keyword expressed as UTF8 and writes the number ** of bytes in the keyword into *L. The string that *Z points to is not ** zero-terminated. The sqlite3_keyword_name(N,Z,L) routine returns @@ -10257,24 +10263,45 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); **
  • ** ^(If the sqlite3_vtab_distinct() interface returns 2, that means ** that the query planner does not need the rows returned in any particular -** order, as long as rows with the same values in all "aOrderBy" columns -** are adjacent.)^ ^(Furthermore, only a single row for each particular -** combination of values in the columns identified by the "aOrderBy" field -** needs to be returned.)^ ^It is always ok for two or more rows with the same -** values in all "aOrderBy" columns to be returned, as long as all such rows -** are adjacent. ^The virtual table may, if it chooses, omit extra rows -** that have the same value for all columns identified by "aOrderBy". -** ^However omitting the extra rows is optional. +** order, as long as rows with the same values in all columns identified +** by "aOrderBy" are adjacent.)^ ^(Furthermore, when two or more rows +** contain the same values for all columns identified by "colUsed", all but +** one such row may optionally be omitted from the result.)^ +** The virtual table is not required to omit rows that are duplicates +** over the "colUsed" columns, but if the virtual table can do that without +** too much extra effort, it could potentially help the query to run faster. ** This mode is used for a DISTINCT query. **

  • -** ^(If the sqlite3_vtab_distinct() interface returns 3, that means -** that the query planner needs only distinct rows but it does need the -** rows to be sorted.)^ ^The virtual table implementation is free to omit -** rows that are identical in all aOrderBy columns, if it wants to, but -** it is not required to omit any rows. This mode is used for queries +** ^(If the sqlite3_vtab_distinct() interface returns 3, that means the +** virtual table must return rows in the order defined by "aOrderBy" as +** if the sqlite3_vtab_distinct() interface had returned 0. However if +** two or more rows in the result have the same values for all columns +** identified by "colUsed", then all but one such row may optionally be +** omitted.)^ Like when the return value is 2, the virtual table +** is not required to omit rows that are duplicates over the "colUsed" +** columns, but if the virtual table can do that without +** too much extra effort, it could potentially help the query to run faster. +** This mode is used for queries ** that have both DISTINCT and ORDER BY clauses. ** ** +**

    The following table summarizes the conditions under which the +** virtual table is allowed to set the "orderByConsumed" flag based on +** the value returned by sqlite3_vtab_distinct(). This table is a +** restatement of the previous four paragraphs: +** +** +** +**
    sqlite3_vtab_distinct() return value +** Rows are returned in aOrderBy order +** Rows with the same value in all aOrderBy columns are adjacent +** Duplicates over all colUsed columns may be omitted +**
    0yesyesno +**
    1noyesno +**
    2noyesyes +**
    3yesyesyes +**
    +** ** ^For the purposes of comparing virtual table output values to see if the ** values are same value for sorting purposes, two NULL values are considered ** to be the same. In other words, the comparison operator is "IS" @@ -12319,6 +12346,30 @@ SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const c */ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); +/* +** CAPI3REF: Add A Single Change To A Changegroup +** METHOD: sqlite3_changegroup +** +** This function adds the single change currently indicated by the iterator +** passed as the second argument to the changegroup object. The rules for +** adding the change are just as described for [sqlite3changegroup_add()]. +** +** If the change is successfully added to the changegroup, SQLITE_OK is +** returned. Otherwise, an SQLite error code is returned. +** +** The iterator must point to a valid entry when this function is called. +** If it does not, SQLITE_ERROR is returned and no change is added to the +** changegroup. Additionally, the iterator must not have been opened with +** the SQLITE_CHANGESETAPPLY_INVERT flag. In this case SQLITE_ERROR is also +** returned. +*/ +SQLITE_API int sqlite3changegroup_add_change( + sqlite3_changegroup*, + sqlite3_changeset_iter* +); + + + /* ** CAPI3REF: Obtain A Composite Changeset From A Changegroup ** METHOD: sqlite3_changegroup @@ -13123,8 +13174,8 @@ struct Fts5PhraseIter { ** EXTENSION API FUNCTIONS ** ** xUserData(pFts): -** Return a copy of the context pointer the extension function was -** registered with. +** Return a copy of the pUserData pointer passed to the xCreateFunction() +** API when the extension function was registered. ** ** xColumnTotalSize(pFts, iCol, pnToken): ** If parameter iCol is less than zero, set output variable *pnToken @@ -14322,6 +14373,8 @@ struct fts5_api { # define SQLITE_OMIT_ALTERTABLE #endif +#define SQLITE_DIGIT_SEPARATOR '_' + /* ** Return true (non-zero) if the input is an integer that is too large ** to fit in 32-bits. This macro is used inside of various testcase() @@ -14614,8 +14667,8 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_TRUEFALSE 170 #define TK_ISNOT 171 #define TK_FUNCTION 172 -#define TK_UMINUS 173 -#define TK_UPLUS 174 +#define TK_UPLUS 173 +#define TK_UMINUS 174 #define TK_TRUTH 175 #define TK_REGISTER 176 #define TK_VECTOR 177 @@ -14624,8 +14677,9 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_ASTERISK 180 #define TK_SPAN 181 #define TK_ERROR 182 -#define TK_SPACE 183 -#define TK_ILLEGAL 184 +#define TK_QNUMBER 183 +#define TK_SPACE 184 +#define TK_ILLEGAL 185 /************** End of parse.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -14887,7 +14941,7 @@ typedef INT16_TYPE LogEst; # define SQLITE_PTRSIZE __SIZEOF_POINTER__ # elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \ defined(_M_ARM) || defined(__arm__) || defined(__x86) || \ - (defined(__APPLE__) && defined(__POWERPC__)) || \ + (defined(__APPLE__) && defined(__ppc__)) || \ (defined(__TOS_AIX__) && !defined(__64BIT__)) # define SQLITE_PTRSIZE 4 # else @@ -15155,7 +15209,7 @@ SQLITE_PRIVATE u32 sqlite3WhereTrace; ** 0x00000010 Display sqlite3_index_info xBestIndex calls ** 0x00000020 Range an equality scan metrics ** 0x00000040 IN operator decisions -** 0x00000080 WhereLoop cost adjustements +** 0x00000080 WhereLoop cost adjustments ** 0x00000100 ** 0x00000200 Covering index decisions ** 0x00000400 OR optimization @@ -16304,6 +16358,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( sqlite3 *db, /* Database connection that is running the check */ Btree *p, /* The btree to be checked */ Pgno *aRoot, /* An array of root pages numbers for individual trees */ + sqlite3_value *aCnt, /* OUT: entry counts for each btree in aRoot[] */ int nRoot, /* Number of entries in aRoot[] */ int mxErr, /* Stop reporting errors after this many */ int *pnErr, /* OUT: Write number of errors seen to this variable */ @@ -16574,12 +16629,12 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Vacuum 5 #define OP_VFilter 6 /* jump, synopsis: iplan=r[P3] zplan='P4' */ #define OP_VUpdate 7 /* synopsis: data=r[P3@P2] */ -#define OP_Init 8 /* jump, synopsis: Start at P2 */ +#define OP_Init 8 /* jump0, synopsis: Start at P2 */ #define OP_Goto 9 /* jump */ #define OP_Gosub 10 /* jump */ -#define OP_InitCoroutine 11 /* jump */ -#define OP_Yield 12 /* jump */ -#define OP_MustBeInt 13 /* jump */ +#define OP_InitCoroutine 11 /* jump0 */ +#define OP_Yield 12 /* jump0 */ +#define OP_MustBeInt 13 /* jump0 */ #define OP_Jump 14 /* jump */ #define OP_Once 15 /* jump */ #define OP_If 16 /* jump */ @@ -16587,22 +16642,22 @@ typedef struct VdbeOpList VdbeOpList; #define OP_IsType 18 /* jump, synopsis: if typeof(P1.P3) in P5 goto P2 */ #define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */ #define OP_IfNullRow 20 /* jump, synopsis: if P1.nullRow then r[P3]=NULL, goto P2 */ -#define OP_SeekLT 21 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekLE 22 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekGE 23 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekGT 24 /* jump, synopsis: key=r[P3@P4] */ +#define OP_SeekLT 21 /* jump0, synopsis: key=r[P3@P4] */ +#define OP_SeekLE 22 /* jump0, synopsis: key=r[P3@P4] */ +#define OP_SeekGE 23 /* jump0, synopsis: key=r[P3@P4] */ +#define OP_SeekGT 24 /* jump0, synopsis: key=r[P3@P4] */ #define OP_IfNotOpen 25 /* jump, synopsis: if( !csr[P1] ) goto P2 */ #define OP_IfNoHope 26 /* jump, synopsis: key=r[P3@P4] */ #define OP_NoConflict 27 /* jump, synopsis: key=r[P3@P4] */ #define OP_NotFound 28 /* jump, synopsis: key=r[P3@P4] */ #define OP_Found 29 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekRowid 30 /* jump, synopsis: intkey=r[P3] */ +#define OP_SeekRowid 30 /* jump0, synopsis: intkey=r[P3] */ #define OP_NotExists 31 /* jump, synopsis: intkey=r[P3] */ -#define OP_Last 32 /* jump */ -#define OP_IfSmaller 33 /* jump */ +#define OP_Last 32 /* jump0 */ +#define OP_IfSizeBetween 33 /* jump */ #define OP_SorterSort 34 /* jump */ #define OP_Sort 35 /* jump */ -#define OP_Rewind 36 /* jump */ +#define OP_Rewind 36 /* jump0 */ #define OP_SorterNext 37 /* jump */ #define OP_Prev 38 /* jump */ #define OP_Next 39 /* jump */ @@ -16614,7 +16669,7 @@ typedef struct VdbeOpList VdbeOpList; #define OP_IdxGE 45 /* jump, synopsis: key=r[P3@P4] */ #define OP_RowSetRead 46 /* jump, synopsis: r[P3]=rowset(P1) */ #define OP_RowSetTest 47 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */ -#define OP_Program 48 /* jump */ +#define OP_Program 48 /* jump0 */ #define OP_FkIfZero 49 /* jump, synopsis: if fkctr[P1]==0 goto P2 */ #define OP_IsNull 50 /* jump, same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ #define OP_NotNull 51 /* jump, same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ @@ -16644,7 +16699,7 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Null 75 /* synopsis: r[P2..P3]=NULL */ #define OP_SoftNull 76 /* synopsis: r[P1]=NULL */ #define OP_Blob 77 /* synopsis: r[P2]=P4 (len=P1) */ -#define OP_Variable 78 /* synopsis: r[P2]=parameter(P1,P4) */ +#define OP_Variable 78 /* synopsis: r[P2]=parameter(P1) */ #define OP_Move 79 /* synopsis: r[P2@P3]=r[P1@P3] */ #define OP_Copy 80 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ #define OP_SCopy 81 /* synopsis: r[P2]=r[P1] */ @@ -16768,14 +16823,15 @@ typedef struct VdbeOpList VdbeOpList; #define OPFLG_OUT2 0x10 /* out2: P2 is an output */ #define OPFLG_OUT3 0x20 /* out3: P3 is an output */ #define OPFLG_NCYCLE 0x40 /* ncycle:Cycles count against P1 */ +#define OPFLG_JUMP0 0x80 /* jump0: P2 might be zero */ #define OPFLG_INITIALIZER {\ /* 0 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x41, 0x00,\ -/* 8 */ 0x01, 0x01, 0x01, 0x01, 0x03, 0x03, 0x01, 0x01,\ -/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x49, 0x49, 0x49,\ -/* 24 */ 0x49, 0x01, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49,\ -/* 32 */ 0x41, 0x01, 0x41, 0x41, 0x41, 0x01, 0x41, 0x41,\ +/* 8 */ 0x81, 0x01, 0x01, 0x81, 0x83, 0x83, 0x01, 0x01,\ +/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0xc9, 0xc9, 0xc9,\ +/* 24 */ 0xc9, 0x01, 0x49, 0x49, 0x49, 0x49, 0xc9, 0x49,\ +/* 32 */ 0xc1, 0x01, 0x41, 0x41, 0xc1, 0x01, 0x41, 0x41,\ /* 40 */ 0x41, 0x41, 0x41, 0x26, 0x26, 0x41, 0x23, 0x0b,\ -/* 48 */ 0x01, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ +/* 48 */ 0x81, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ /* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x41,\ /* 64 */ 0x01, 0x00, 0x00, 0x02, 0x02, 0x08, 0x00, 0x10,\ /* 72 */ 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10, 0x00,\ @@ -16935,6 +16991,8 @@ SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord*); SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *, SubProgram *); SQLITE_PRIVATE int sqlite3VdbeHasSubProgram(Vdbe*); +SQLITE_PRIVATE void sqlite3MemSetArrayInt64(sqlite3_value *aMem, int iIdx, i64 val); + SQLITE_PRIVATE int sqlite3NotPureFunc(sqlite3_context*); #ifdef SQLITE_ENABLE_BYTECODE_VTAB SQLITE_PRIVATE int sqlite3VdbeBytecodeVtabInit(sqlite3*); @@ -17522,6 +17580,10 @@ struct FuncDefHash { }; #define SQLITE_FUNC_HASH(C,L) (((C)+(L))%SQLITE_FUNC_HASH_SZ) +#if defined(SQLITE_USER_AUTHENTICATION) +# warning "The SQLITE_USER_AUTHENTICATION extension is deprecated. \ + See ext/userauth/user-auth.txt for details." +#endif #ifdef SQLITE_USER_AUTHENTICATION /* ** Information held in the "sqlite3" database connection object and used @@ -17825,7 +17887,7 @@ struct sqlite3 { #define SQLITE_CursorHints 0x00000400 /* Add OP_CursorHint opcodes */ #define SQLITE_Stat4 0x00000800 /* Use STAT4 data */ /* TH3 expects this value ^^^^^^^^^^ to be 0x0000800. Don't change it */ -#define SQLITE_PushDown 0x00001000 /* The push-down optimization */ +#define SQLITE_PushDown 0x00001000 /* WHERE-clause push-down opt */ #define SQLITE_SimplifyJoin 0x00002000 /* Convert LEFT JOIN to JOIN */ #define SQLITE_SkipScan 0x00004000 /* Skip-scans */ #define SQLITE_PropagateConst 0x00008000 /* The constant propagation opt */ @@ -18398,8 +18460,7 @@ struct Table { #define TF_HasStored 0x00000040 /* Has one or more STORED columns */ #define TF_HasGenerated 0x00000060 /* Combo: HasVirtual + HasStored */ #define TF_WithoutRowid 0x00000080 /* No rowid. PRIMARY KEY is the key */ -#define TF_StatsUsed 0x00000100 /* Query planner decisions affected by - ** Index.aiRowLogEst[] values */ +#define TF_MaybeReanalyze 0x00000100 /* Maybe run ANALYZE on this table */ #define TF_NoVisibleRowid 0x00000200 /* No user-visible "rowid" column */ #define TF_OOOHidden 0x00000400 /* Out-of-Order hidden columns */ #define TF_HasNotNull 0x00000800 /* Contains NOT NULL constraints */ @@ -19199,10 +19260,12 @@ struct IdList { ** ** Union member validity: ** -** u1.zIndexedBy fg.isIndexedBy && !fg.isTabFunc -** u1.pFuncArg fg.isTabFunc && !fg.isIndexedBy -** u2.pIBIndex fg.isIndexedBy && !fg.isCte -** u2.pCteUse fg.isCte && !fg.isIndexedBy +** u1.zIndexedBy fg.isIndexedBy && !fg.isTabFunc +** u1.pFuncArg fg.isTabFunc && !fg.isIndexedBy +** u1.nRow !fg.isTabFunc && !fg.isIndexedBy +** +** u2.pIBIndex fg.isIndexedBy && !fg.isCte +** u2.pCteUse fg.isCte && !fg.isIndexedBy */ struct SrcItem { Schema *pSchema; /* Schema to which this item is fixed */ @@ -19230,6 +19293,7 @@ struct SrcItem { unsigned isOn :1; /* u3.pOn was once valid and non-NULL */ unsigned isSynthUsing :1; /* u3.pUsing is synthesized from NATURAL */ unsigned isNestedFrom :1; /* pSelect is a SF_NestedFrom subquery */ + unsigned rowidUsed :1; /* The ROWID of this table is referenced */ } fg; int iCursor; /* The VDBE cursor number used to access this table */ union { @@ -19240,6 +19304,7 @@ struct SrcItem { union { char *zIndexedBy; /* Identifier from "INDEXED BY " clause */ ExprList *pFuncArg; /* Arguments to table-valued-function */ + u32 nRow; /* Number of rows in a VALUES clause */ } u1; union { Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */ @@ -19304,7 +19369,7 @@ struct SrcList { #define WHERE_AGG_DISTINCT 0x0400 /* Query is "SELECT agg(DISTINCT ...)" */ #define WHERE_ORDERBY_LIMIT 0x0800 /* ORDERBY+LIMIT on the inner loop */ #define WHERE_RIGHT_JOIN 0x1000 /* Processing a RIGHT JOIN */ - /* 0x2000 not currently used */ +#define WHERE_KEEP_ALL_JOINS 0x2000 /* Do not do the omit-noop-join opt */ #define WHERE_USE_LIMIT 0x4000 /* Use the LIMIT in cost estimates */ /* 0x8000 not currently used */ @@ -19497,11 +19562,12 @@ struct Select { #define SF_View 0x0200000 /* SELECT statement is a view */ #define SF_NoopOrderBy 0x0400000 /* ORDER BY is ignored for this query */ #define SF_UFSrcCheck 0x0800000 /* Check pSrc as required by UPDATE...FROM */ -#define SF_PushDown 0x1000000 /* SELECT has be modified by push-down opt */ +#define SF_PushDown 0x1000000 /* Modified by WHERE-clause push-down opt */ #define SF_MultiPart 0x2000000 /* Has multiple incompatible PARTITIONs */ #define SF_CopyCte 0x4000000 /* SELECT statement is a copy of a CTE */ #define SF_OrderByReqd 0x8000000 /* The ORDER BY clause may not be omitted */ #define SF_UpdateFrom 0x10000000 /* Query originates with UPDATE FROM */ +#define SF_Correlated 0x20000000 /* True if references the outer context */ /* True if S exists and has SF_NestedFrom */ #define IsNestedFrom(S) ((S)!=0 && ((S)->selFlags&SF_NestedFrom)!=0) @@ -19741,6 +19807,7 @@ struct Parse { u8 disableLookaside; /* Number of times lookaside has been disabled */ u8 prepFlags; /* SQLITE_PREPARE_* flags */ u8 withinRJSubrtn; /* Nesting level for RIGHT JOIN body subroutines */ + u8 bHasWith; /* True if statement contains WITH */ #if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */ #endif @@ -20420,6 +20487,9 @@ struct Window { ** due to the SQLITE_SUBTYPE flag */ }; +SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList *pRow); +SQLITE_PRIVATE void sqlite3MultiValuesEnd(Parse *pParse, Select *pVal); + #ifndef SQLITE_OMIT_WINDOWFUNC SQLITE_PRIVATE void sqlite3WindowDelete(sqlite3*, Window*); SQLITE_PRIVATE void sqlite3WindowUnlinkFromSelect(Window*); @@ -20737,6 +20807,7 @@ SQLITE_PRIVATE int sqlite3ErrorToParser(sqlite3*,int); SQLITE_PRIVATE void sqlite3Dequote(char*); SQLITE_PRIVATE void sqlite3DequoteExpr(Expr*); SQLITE_PRIVATE void sqlite3DequoteToken(Token*); +SQLITE_PRIVATE void sqlite3DequoteNumber(Parse*, Expr*); SQLITE_PRIVATE void sqlite3TokenInit(Token*,char*); SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char*, int); SQLITE_PRIVATE int sqlite3RunParser(Parse*, const char*); @@ -20767,7 +20838,7 @@ SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,const Expr*,const FuncDef*) SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32); SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*); SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3*,void*); -SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse*, Expr*); +SQLITE_PRIVATE int sqlite3ExprDeferredDelete(Parse*, Expr*); SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse*, Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(Parse*,ExprList*,IdList*,Expr*); @@ -20990,12 +21061,10 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3*); SQLITE_PRIVATE u32 sqlite3IsTrueOrFalse(const char*); SQLITE_PRIVATE int sqlite3ExprIdToTrueFalse(Expr*); SQLITE_PRIVATE int sqlite3ExprTruthValue(const Expr*); -SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr*); -SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*); +SQLITE_PRIVATE int sqlite3ExprIsConstant(Parse*,Expr*); SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8); SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse*, Expr*, ExprList*); -SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int); -SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint(Expr*,const SrcList*,int); +SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint(Expr*,const SrcList*,int,int); #ifdef SQLITE_ENABLE_CURSOR_HINTS SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr*); #endif @@ -21180,7 +21249,9 @@ SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...); SQLITE_PRIVATE void sqlite3Error(sqlite3*,int); SQLITE_PRIVATE void sqlite3ErrorClear(sqlite3*); SQLITE_PRIVATE void sqlite3SystemError(sqlite3*,int); +#if !defined(SQLITE_OMIT_BLOB_LITERAL) SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3*, const char *z, int n); +#endif SQLITE_PRIVATE u8 sqlite3HexToInt(int h); SQLITE_PRIVATE int sqlite3TwoPartName(Parse *, Token *, Token *, Token **); @@ -24227,13 +24298,14 @@ struct DateTime { int tz; /* Timezone offset in minutes */ double s; /* Seconds */ char validJD; /* True (1) if iJD is valid */ - char rawS; /* Raw numeric value stored in s */ char validYMD; /* True (1) if Y,M,D are valid */ char validHMS; /* True (1) if h,m,s are valid */ - char validTZ; /* True (1) if tz is valid */ - char tzSet; /* Timezone was set explicitly */ - char isError; /* An overflow has occurred */ - char useSubsec; /* Display subsecond precision */ + char nFloor; /* Days to implement "floor" */ + unsigned rawS : 1; /* Raw numeric value stored in s */ + unsigned isError : 1; /* An overflow has occurred */ + unsigned useSubsec : 1; /* Display subsecond precision */ + unsigned isUtc : 1; /* Time is known to be UTC */ + unsigned isLocal : 1; /* Time is known to be localtime */ }; @@ -24331,6 +24403,8 @@ static int parseTimezone(const char *zDate, DateTime *p){ sgn = +1; }else if( c=='Z' || c=='z' ){ zDate++; + p->isLocal = 0; + p->isUtc = 1; goto zulu_time; }else{ return c!=0; @@ -24343,7 +24417,6 @@ static int parseTimezone(const char *zDate, DateTime *p){ p->tz = sgn*(nMn + nHr*60); zulu_time: while( sqlite3Isspace(*zDate) ){ zDate++; } - p->tzSet = 1; return *zDate!=0; } @@ -24387,7 +24460,6 @@ static int parseHhMmSs(const char *zDate, DateTime *p){ p->m = m; p->s = s + ms; if( parseTimezone(zDate, p) ) return 1; - p->validTZ = (p->tz!=0)?1:0; return 0; } @@ -24434,15 +24506,40 @@ static void computeJD(DateTime *p){ p->validJD = 1; if( p->validHMS ){ p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000 + 0.5); - if( p->validTZ ){ + if( p->tz ){ p->iJD -= p->tz*60000; p->validYMD = 0; p->validHMS = 0; - p->validTZ = 0; + p->tz = 0; + p->isUtc = 1; + p->isLocal = 0; } } } +/* +** Given the YYYY-MM-DD information current in p, determine if there +** is day-of-month overflow and set nFloor to the number of days that +** would need to be subtracted from the date in order to bring the +** date back to the end of the month. +*/ +static void computeFloor(DateTime *p){ + assert( p->validYMD || p->isError ); + assert( p->D>=0 && p->D<=31 ); + assert( p->M>=0 && p->M<=12 ); + if( p->D<=28 ){ + p->nFloor = 0; + }else if( (1<M) & 0x15aa ){ + p->nFloor = 0; + }else if( p->M!=2 ){ + p->nFloor = (p->D==31); + }else if( p->Y%4!=0 || (p->Y%100==0 && p->Y%400!=0) ){ + p->nFloor = p->D - 28; + }else{ + p->nFloor = p->D - 29; + } +} + /* ** Parse dates of the form ** @@ -24481,12 +24578,16 @@ static int parseYyyyMmDd(const char *zDate, DateTime *p){ p->Y = neg ? -Y : Y; p->M = M; p->D = D; - if( p->validTZ ){ + computeFloor(p); + if( p->tz ){ computeJD(p); } return 0; } + +static void clearYMD_HMS_TZ(DateTime *p); /* Forward declaration */ + /* ** Set the time to the current time reported by the VFS. ** @@ -24496,6 +24597,9 @@ static int setDateTimeToCurrent(sqlite3_context *context, DateTime *p){ p->iJD = sqlite3StmtCurrentTime(context); if( p->iJD>0 ){ p->validJD = 1; + p->isUtc = 1; + p->isLocal = 0; + clearYMD_HMS_TZ(p); return 0; }else{ return 1; @@ -24634,7 +24738,7 @@ static void computeYMD_HMS(DateTime *p){ static void clearYMD_HMS_TZ(DateTime *p){ p->validYMD = 0; p->validHMS = 0; - p->validTZ = 0; + p->tz = 0; } #ifndef SQLITE_OMIT_LOCALTIME @@ -24766,7 +24870,7 @@ static int toLocaltime( p->validHMS = 1; p->validJD = 0; p->rawS = 0; - p->validTZ = 0; + p->tz = 0; p->isError = 0; return SQLITE_OK; } @@ -24786,12 +24890,12 @@ static const struct { float rLimit; /* Maximum NNN value for this transform */ float rXform; /* Constant used for this transform */ } aXformType[] = { - { 6, "second", 4.6427e+14, 1.0 }, - { 6, "minute", 7.7379e+12, 60.0 }, - { 4, "hour", 1.2897e+11, 3600.0 }, - { 3, "day", 5373485.0, 86400.0 }, - { 5, "month", 176546.0, 2592000.0 }, - { 4, "year", 14713.0, 31536000.0 }, + /* 0 */ { 6, "second", 4.6427e+14, 1.0 }, + /* 1 */ { 6, "minute", 7.7379e+12, 60.0 }, + /* 2 */ { 4, "hour", 1.2897e+11, 3600.0 }, + /* 3 */ { 3, "day", 5373485.0, 86400.0 }, + /* 4 */ { 5, "month", 176546.0, 30.0*86400.0 }, + /* 5 */ { 4, "year", 14713.0, 365.0*86400.0 }, }; /* @@ -24823,14 +24927,20 @@ static void autoAdjustDate(DateTime *p){ ** NNN.NNNN seconds ** NNN months ** NNN years +** +/-YYYY-MM-DD HH:MM:SS.SSS +** ceiling +** floor ** start of month ** start of year ** start of week ** start of day ** weekday N ** unixepoch +** auto ** localtime ** utc +** subsec +** subsecond ** ** Return 0 on success and 1 if there is any kind of error. If the error ** is in a system call (i.e. localtime()), then an error message is written @@ -24861,6 +24971,37 @@ static int parseModifier( } break; } + case 'c': { + /* + ** ceiling + ** + ** Resolve day-of-month overflow by rolling forward into the next + ** month. As this is the default action, this modifier is really + ** a no-op that is only included for symmetry. See "floor". + */ + if( sqlite3_stricmp(z, "ceiling")==0 ){ + computeJD(p); + clearYMD_HMS_TZ(p); + rc = 0; + p->nFloor = 0; + } + break; + } + case 'f': { + /* + ** floor + ** + ** Resolve day-of-month overflow by rolling back to the end of the + ** previous month. + */ + if( sqlite3_stricmp(z, "floor")==0 ){ + computeJD(p); + p->iJD -= p->nFloor*86400000; + clearYMD_HMS_TZ(p); + rc = 0; + } + break; + } case 'j': { /* ** julianday @@ -24887,7 +25028,9 @@ static int parseModifier( ** show local time. */ if( sqlite3_stricmp(z, "localtime")==0 && sqlite3NotPureFunc(pCtx) ){ - rc = toLocaltime(p, pCtx); + rc = p->isLocal ? SQLITE_OK : toLocaltime(p, pCtx); + p->isUtc = 0; + p->isLocal = 1; } break; } @@ -24912,7 +25055,7 @@ static int parseModifier( } #ifndef SQLITE_OMIT_LOCALTIME else if( sqlite3_stricmp(z, "utc")==0 && sqlite3NotPureFunc(pCtx) ){ - if( p->tzSet==0 ){ + if( p->isUtc==0 ){ i64 iOrigJD; /* Original localtime */ i64 iGuess; /* Guess at the corresponding utc time */ int cnt = 0; /* Safety to prevent infinite loop */ @@ -24935,7 +25078,8 @@ static int parseModifier( memset(p, 0, sizeof(*p)); p->iJD = iGuess; p->validJD = 1; - p->tzSet = 1; + p->isUtc = 1; + p->isLocal = 0; } rc = SQLITE_OK; } @@ -24955,7 +25099,7 @@ static int parseModifier( && r>=0.0 && r<7.0 && (n=(int)r)==r ){ sqlite3_int64 Z; computeYMD_HMS(p); - p->validTZ = 0; + p->tz = 0; p->validJD = 0; computeJD(p); Z = ((p->iJD + 129600000)/86400000) % 7; @@ -24995,7 +25139,7 @@ static int parseModifier( p->h = p->m = 0; p->s = 0.0; p->rawS = 0; - p->validTZ = 0; + p->tz = 0; p->validJD = 0; if( sqlite3_stricmp(z,"month")==0 ){ p->D = 1; @@ -25066,6 +25210,7 @@ static int parseModifier( x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; p->Y += x; p->M -= x*12; + computeFloor(p); computeJD(p); p->validHMS = 0; p->validYMD = 0; @@ -25112,11 +25257,12 @@ static int parseModifier( z += n; while( sqlite3Isspace(*z) ) z++; n = sqlite3Strlen30(z); - if( n>10 || n<3 ) break; + if( n<3 || n>10 ) break; if( sqlite3UpperToLower[(u8)z[n-1]]=='s' ) n--; computeJD(p); assert( rc==1 ); rRounder = r<0 ? -0.5 : +0.5; + p->nFloor = 0; for(i=0; iM += (int)r; x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; p->Y += x; p->M -= x*12; + computeFloor(p); p->validJD = 0; r -= (int)r; break; } case 5: { /* Special processing to add years */ int y = (int)r; - assert( strcmp(aXformType[i].zName,"year")==0 ); + assert( strcmp(aXformType[5].zName,"year")==0 ); computeYMD_HMS(p); + assert( p->M>=0 && p->M<=12 ); p->Y += y; + computeFloor(p); p->validJD = 0; r -= (int)r; break; @@ -25392,22 +25541,83 @@ static void dateFunc( } } +/* +** Compute the number of days after the most recent January 1. +** +** In other words, compute the zero-based day number for the +** current year: +** +** Jan01 = 0, Jan02 = 1, ..., Jan31 = 30, Feb01 = 31, ... +** Dec31 = 364 or 365. +*/ +static int daysAfterJan01(DateTime *pDate){ + DateTime jan01 = *pDate; + assert( jan01.validYMD ); + assert( jan01.validHMS ); + assert( pDate->validJD ); + jan01.validJD = 0; + jan01.M = 1; + jan01.D = 1; + computeJD(&jan01); + return (int)((pDate->iJD-jan01.iJD+43200000)/86400000); +} + +/* +** Return the number of days after the most recent Monday. +** +** In other words, return the day of the week according +** to this code: +** +** 0=Monday, 1=Tuesday, 2=Wednesday, ..., 6=Sunday. +*/ +static int daysAfterMonday(DateTime *pDate){ + assert( pDate->validJD ); + return (int)((pDate->iJD+43200000)/86400000) % 7; +} + +/* +** Return the number of days after the most recent Sunday. +** +** In other words, return the day of the week according +** to this code: +** +** 0=Sunday, 1=Monday, 2=Tues, ..., 6=Saturday +*/ +static int daysAfterSunday(DateTime *pDate){ + assert( pDate->validJD ); + return (int)((pDate->iJD+129600000)/86400000) % 7; +} + /* ** strftime( FORMAT, TIMESTRING, MOD, MOD, ...) ** ** Return a string described by FORMAT. Conversions as follows: ** -** %d day of month +** %d day of month 01-31 +** %e day of month 1-31 ** %f ** fractional seconds SS.SSS +** %F ISO date. YYYY-MM-DD +** %G ISO year corresponding to %V 0000-9999. +** %g 2-digit ISO year corresponding to %V 00-99 ** %H hour 00-24 -** %j day of year 000-366 +** %k hour 0-24 (leading zero converted to space) +** %I hour 01-12 +** %j day of year 001-366 ** %J ** julian day number +** %l hour 1-12 (leading zero converted to space) ** %m month 01-12 ** %M minute 00-59 +** %p "am" or "pm" +** %P "AM" or "PM" +** %R time as HH:MM ** %s seconds since 1970-01-01 ** %S seconds 00-59 -** %w day of week 0-6 Sunday==0 -** %W week of year 00-53 +** %T time as HH:MM:SS +** %u day of week 1-7 Monday==1, Sunday==7 +** %w day of week 0-6 Sunday==0, Monday==1 +** %U week of year 00-53 (First Sunday is start of week 01) +** %V week of year 01-53 (First week containing Thursday is week 01) +** %W week of year 00-53 (First Monday is start of week 01) ** %Y year 0000-9999 ** %% % */ @@ -25444,7 +25654,7 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes, cf=='d' ? "%02d" : "%2d", x.D); break; } - case 'f': { + case 'f': { /* Fractional seconds. (Non-standard) */ double s = x.s; if( s>59.999 ) s = 59.999; sqlite3_str_appendf(&sRes, "%06.3f", s); @@ -25454,6 +25664,21 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes, "%04d-%02d-%02d", x.Y, x.M, x.D); break; } + case 'G': /* Fall thru */ + case 'g': { + DateTime y = x; + assert( y.validJD ); + /* Move y so that it is the Thursday in the same week as x */ + y.iJD += (3 - daysAfterMonday(&x))*86400000; + y.validYMD = 0; + computeYMD(&y); + if( cf=='g' ){ + sqlite3_str_appendf(&sRes, "%02d", y.Y%100); + }else{ + sqlite3_str_appendf(&sRes, "%04d", y.Y); + } + break; + } case 'H': case 'k': { sqlite3_str_appendf(&sRes, cf=='H' ? "%02d" : "%2d", x.h); @@ -25467,25 +25692,11 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes, cf=='I' ? "%02d" : "%2d", h); break; } - case 'W': /* Fall thru */ - case 'j': { - int nDay; /* Number of days since 1st day of year */ - DateTime y = x; - y.validJD = 0; - y.M = 1; - y.D = 1; - computeJD(&y); - nDay = (int)((x.iJD-y.iJD+43200000)/86400000); - if( cf=='W' ){ - int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ - wd = (int)(((x.iJD+43200000)/86400000)%7); - sqlite3_str_appendf(&sRes,"%02d",(nDay+7-wd)/7); - }else{ - sqlite3_str_appendf(&sRes,"%03d",nDay+1); - } + case 'j': { /* Day of year. Jan01==1, Jan02==2, and so forth */ + sqlite3_str_appendf(&sRes,"%03d",daysAfterJan01(&x)+1); break; } - case 'J': { + case 'J': { /* Julian day number. (Non-standard) */ sqlite3_str_appendf(&sRes,"%.16g",x.iJD/86400000.0); break; } @@ -25528,13 +25739,33 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes,"%02d:%02d:%02d", x.h, x.m, (int)x.s); break; } - case 'u': /* Fall thru */ - case 'w': { - char c = (char)(((x.iJD+129600000)/86400000) % 7) + '0'; + case 'u': /* Day of week. 1 to 7. Monday==1, Sunday==7 */ + case 'w': { /* Day of week. 0 to 6. Sunday==0, Monday==1 */ + char c = (char)daysAfterSunday(&x) + '0'; if( c=='0' && cf=='u' ) c = '7'; sqlite3_str_appendchar(&sRes, 1, c); break; } + case 'U': { /* Week num. 00-53. First Sun of the year is week 01 */ + sqlite3_str_appendf(&sRes,"%02d", + (daysAfterJan01(&x)-daysAfterSunday(&x)+7)/7); + break; + } + case 'V': { /* Week num. 01-53. First week with a Thur is week 01 */ + DateTime y = x; + /* Adjust y so that is the Thursday in the same week as x */ + assert( y.validJD ); + y.iJD += (3 - daysAfterMonday(&x))*86400000; + y.validYMD = 0; + computeYMD(&y); + sqlite3_str_appendf(&sRes,"%02d", daysAfterJan01(&y)/7+1); + break; + } + case 'W': { /* Week num. 00-53. First Mon of the year is week 01 */ + sqlite3_str_appendf(&sRes,"%02d", + (daysAfterJan01(&x)-daysAfterMonday(&x)+7)/7); + break; + } case 'Y': { sqlite3_str_appendf(&sRes,"%04d",x.Y); break; @@ -25681,9 +25912,7 @@ static void timediffFunc( d1.iJD = d2.iJD - d1.iJD; d1.iJD += (u64)1486995408 * (u64)100000; } - d1.validYMD = 0; - d1.validHMS = 0; - d1.validTZ = 0; + clearYMD_HMS_TZ(&d1); computeYMD_HMS(&d1); sqlite3StrAccumInit(&sRes, 0, 0, 0, 100); sqlite3_str_appendf(&sRes, "%c%04d-%02d-%02d %02d:%02d:%06.3f", @@ -25752,6 +25981,36 @@ static void currentTimeFunc( } #endif +#if !defined(SQLITE_OMIT_DATETIME_FUNCS) && defined(SQLITE_DEBUG) +/* +** datedebug(...) +** +** This routine returns JSON that describes the internal DateTime object. +** Used for debugging and testing only. Subject to change. +*/ +static void datedebugFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + DateTime x; + if( isDate(context, argc, argv, &x)==0 ){ + char *zJson; + zJson = sqlite3_mprintf( + "{iJD:%lld,Y:%d,M:%d,D:%d,h:%d,m:%d,tz:%d," + "s:%.3f,validJD:%d,validYMS:%d,validHMS:%d," + "nFloor:%d,rawS:%d,isError:%d,useSubsec:%d," + "isUtc:%d,isLocal:%d}", + x.iJD, x.Y, x.M, x.D, x.h, x.m, x.tz, + x.s, x.validJD, x.validYMD, x.validHMS, + x.nFloor, x.rawS, x.isError, x.useSubsec, + x.isUtc, x.isLocal); + sqlite3_result_text(context, zJson, -1, sqlite3_free); + } +} +#endif /* !SQLITE_OMIT_DATETIME_FUNCS && SQLITE_DEBUG */ + + /* ** This function registered all of the above C functions as SQL ** functions. This should be the only routine in this file with @@ -25767,6 +26026,9 @@ SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){ PURE_DATE(datetime, -1, 0, 0, datetimeFunc ), PURE_DATE(strftime, -1, 0, 0, strftimeFunc ), PURE_DATE(timediff, 2, 0, 0, timediffFunc ), +#ifdef SQLITE_DEBUG + PURE_DATE(datedebug, -1, 0, 0, datedebugFunc ), +#endif DFUNCTION(current_time, 0, 0, 0, ctimeFunc ), DFUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc), DFUNCTION(current_date, 0, 0, 0, cdateFunc ), @@ -30182,6 +30444,24 @@ static void sqlite3MallocAlarm(int nByte){ sqlite3_mutex_enter(mem0.mutex); } +#ifdef SQLITE_DEBUG +/* +** This routine is called whenever an out-of-memory condition is seen, +** It's only purpose to to serve as a breakpoint for gdb or similar +** code debuggers when working on out-of-memory conditions, for example +** caused by PRAGMA hard_heap_limit=N. +*/ +static SQLITE_NOINLINE void test_oom_breakpoint(u64 n){ + static u64 nOomFault = 0; + nOomFault += n; + /* The assert() is never reached in a human lifetime. It is here mostly + ** to prevent code optimizers from optimizing out this function. */ + assert( (nOomFault>>32) < 0xffffffff ); +} +#else +# define test_oom_breakpoint(X) /* No-op for production builds */ +#endif + /* ** Do a memory allocation with statistics and alarms. Assume the ** lock is already held. @@ -30208,6 +30488,7 @@ static void mallocWithAlarm(int n, void **pp){ if( mem0.hardLimit ){ nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); if( nUsed >= mem0.hardLimit - nFull ){ + test_oom_breakpoint(1); *pp = 0; return; } @@ -30496,6 +30777,7 @@ SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){ sqlite3MallocAlarm(nDiff); if( mem0.hardLimit>0 && nUsed >= mem0.hardLimit - nDiff ){ sqlite3_mutex_leave(mem0.mutex); + test_oom_breakpoint(1); return 0; } } @@ -31398,13 +31680,14 @@ SQLITE_API void sqlite3_str_vappendf( } exp = s.iDP-1; - if( xtype==etGENERIC && precision>0 ) precision--; /* ** If the field type is etGENERIC, then convert to either etEXP ** or etFLOAT, as appropriate. */ if( xtype==etGENERIC ){ + assert( precision>0 ); + precision--; flag_rtz = !flag_alternateform; if( exp<-4 || exp>precision ){ xtype = etEXP; @@ -31720,9 +32003,13 @@ SQLITE_API void sqlite3_str_vappendf( sqlite3_str_appendall(pAccum, pItem->zAlias); }else{ Select *pSel = pItem->pSelect; - assert( pSel!=0 ); + assert( pSel!=0 ); /* Because of tag-20240424-1 */ if( pSel->selFlags & SF_NestedFrom ){ sqlite3_str_appendf(pAccum, "(join-%u)", pSel->selId); + }else if( pSel->selFlags & SF_MultiValue ){ + assert( !pItem->fg.isTabFunc && !pItem->fg.isIndexedBy ); + sqlite3_str_appendf(pAccum, "%u-ROW VALUES CLAUSE", + pItem->u1.nRow); }else{ sqlite3_str_appendf(pAccum, "(subquery-%u)", pSel->selId); } @@ -32499,8 +32786,10 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc) x.printfFlags |= SQLITE_PRINTF_INTERNAL; sqlite3_str_appendf(&x, "{%d:*} %!S", pItem->iCursor, pItem); if( pItem->pTab ){ - sqlite3_str_appendf(&x, " tab=%Q nCol=%d ptr=%p used=%llx", - pItem->pTab->zName, pItem->pTab->nCol, pItem->pTab, pItem->colUsed); + sqlite3_str_appendf(&x, " tab=%Q nCol=%d ptr=%p used=%llx%s", + pItem->pTab->zName, pItem->pTab->nCol, pItem->pTab, + pItem->colUsed, + pItem->fg.rowidUsed ? "+rowid" : ""); } if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))==(JT_LEFT|JT_RIGHT) ){ sqlite3_str_appendf(&x, " FULL-OUTER-JOIN"); @@ -32540,12 +32829,14 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc) sqlite3TreeViewIdList(pView, pItem->u3.pUsing, (--n)>0, "USING"); } if( pItem->pSelect ){ + sqlite3TreeViewPush(&pView, i+1nSrc); if( pItem->pTab ){ Table *pTab = pItem->pTab; sqlite3TreeViewColumnList(pView, pTab->aCol, pTab->nCol, 1); } assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem->pSelect) ); sqlite3TreeViewSelect(pView, pItem->pSelect, (--n)>0); + sqlite3TreeViewPop(&pView); } if( pItem->fg.isTabFunc ){ sqlite3TreeViewExprList(pView, pItem->u1.pFuncArg, 0, "func-args:"); @@ -32649,7 +32940,7 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m sqlite3TreeViewItem(pView, "LIMIT", (n--)>0); sqlite3TreeViewExpr(pView, p->pLimit->pLeft, p->pLimit->pRight!=0); if( p->pLimit->pRight ){ - sqlite3TreeViewItem(pView, "OFFSET", (n--)>0); + sqlite3TreeViewItem(pView, "OFFSET", 0); sqlite3TreeViewExpr(pView, p->pLimit->pRight, 0); sqlite3TreeViewPop(&pView); } @@ -34950,6 +35241,44 @@ SQLITE_PRIVATE void sqlite3DequoteExpr(Expr *p){ sqlite3Dequote(p->u.zToken); } +/* +** Expression p is a QNUMBER (quoted number). Dequote the value in p->u.zToken +** and set the type to INTEGER or FLOAT. "Quoted" integers or floats are those +** that contain '_' characters that must be removed before further processing. +*/ +SQLITE_PRIVATE void sqlite3DequoteNumber(Parse *pParse, Expr *p){ + assert( p!=0 || pParse->db->mallocFailed ); + if( p ){ + const char *pIn = p->u.zToken; + char *pOut = p->u.zToken; + int bHex = (pIn[0]=='0' && (pIn[1]=='x' || pIn[1]=='X')); + int iValue; + assert( p->op==TK_QNUMBER ); + p->op = TK_INTEGER; + do { + if( *pIn!=SQLITE_DIGIT_SEPARATOR ){ + *pOut++ = *pIn; + if( *pIn=='e' || *pIn=='E' || *pIn=='.' ) p->op = TK_FLOAT; + }else{ + if( (bHex==0 && (!sqlite3Isdigit(pIn[-1]) || !sqlite3Isdigit(pIn[1]))) + || (bHex==1 && (!sqlite3Isxdigit(pIn[-1]) || !sqlite3Isxdigit(pIn[1]))) + ){ + sqlite3ErrorMsg(pParse, "unrecognized token: \"%s\"", p->u.zToken); + } + } + }while( *pIn++ ); + if( bHex ) p->op = TK_INTEGER; + + /* tag-20240227-a: If after dequoting, the number is an integer that + ** fits in 32 bits, then it must be converted into EP_IntValue. Other + ** parts of the code expect this. See also tag-20240227-b. */ + if( p->op==TK_INTEGER && sqlite3GetInt32(p->u.zToken, &iValue) ){ + p->u.iValue = iValue; + p->flags |= EP_IntValue; + } + } +} + /* ** If the input token p is quoted, try to adjust the token to remove ** the quotes. This is not always possible: @@ -36889,7 +37218,7 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 30 */ "SeekRowid" OpHelp("intkey=r[P3]"), /* 31 */ "NotExists" OpHelp("intkey=r[P3]"), /* 32 */ "Last" OpHelp(""), - /* 33 */ "IfSmaller" OpHelp(""), + /* 33 */ "IfSizeBetween" OpHelp(""), /* 34 */ "SorterSort" OpHelp(""), /* 35 */ "Sort" OpHelp(""), /* 36 */ "Rewind" OpHelp(""), @@ -36934,7 +37263,7 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 75 */ "Null" OpHelp("r[P2..P3]=NULL"), /* 76 */ "SoftNull" OpHelp("r[P1]=NULL"), /* 77 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"), - /* 78 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"), + /* 78 */ "Variable" OpHelp("r[P2]=parameter(P1)"), /* 79 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"), /* 80 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), /* 81 */ "SCopy" OpHelp("r[P2]=r[P1]"), @@ -39332,8 +39661,12 @@ static int unixLogErrorAtLine( ** available, the error message will often be an empty string. Not a ** huge problem. Incorrectly concluding that the GNU version is available ** could lead to a segfault though. + ** + ** Forum post 3f13857fa4062301 reports that the Android SDK may use + ** int-type return, depending on its version. */ -#if defined(STRERROR_R_CHAR_P) || defined(__USE_GNU) +#if (defined(STRERROR_R_CHAR_P) || defined(__USE_GNU)) \ + && !defined(ANDROID) && !defined(__ANDROID__) zErr = # endif strerror_r(iErrno, aErr, sizeof(aErr)-1); @@ -44431,12 +44764,19 @@ static int unixOpen( rc = SQLITE_READONLY_DIRECTORY; }else if( errno!=EISDIR && isReadWrite ){ /* Failed to open the file for read/write access. Try read-only. */ + UnixUnusedFd *pReadonly = 0; flags &= ~(SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE); openFlags &= ~(O_RDWR|O_CREAT); flags |= SQLITE_OPEN_READONLY; openFlags |= O_RDONLY; isReadonly = 1; - fd = robust_open(zName, openFlags, openMode); + pReadonly = findReusableFd(zName, flags); + if( pReadonly ){ + fd = pReadonly->fd; + sqlite3_free(pReadonly); + }else{ + fd = robust_open(zName, openFlags, openMode); + } } } if( fd<0 ){ @@ -69887,6 +70227,7 @@ struct IntegrityCk { StrAccum errMsg; /* Accumulate the error message text here */ u32 *heap; /* Min-heap used for analyzing cell coverage */ sqlite3 *db; /* Database connection running the check */ + i64 nRow; /* Number of rows visited in current tree */ }; /* @@ -70361,8 +70702,47 @@ int corruptPageError(int lineno, MemPage *p){ # define SQLITE_CORRUPT_PAGE(pMemPage) SQLITE_CORRUPT_PGNO(pMemPage->pgno) #endif +/* Default value for SHARED_LOCK_TRACE macro if shared-cache is disabled +** or if the lock tracking is disabled. This is always the value for +** release builds. +*/ +#define SHARED_LOCK_TRACE(X,MSG,TAB,TYPE) /*no-op*/ + #ifndef SQLITE_OMIT_SHARED_CACHE +#if 0 +/* ^---- Change to 1 and recompile to enable shared-lock tracing +** for debugging purposes. +** +** Print all shared-cache locks on a BtShared. Debugging use only. +*/ +static void sharedLockTrace( + BtShared *pBt, + const char *zMsg, + int iRoot, + int eLockType +){ + BtLock *pLock; + if( iRoot>0 ){ + printf("%s-%p %u%s:", zMsg, pBt, iRoot, eLockType==READ_LOCK?"R":"W"); + }else{ + printf("%s-%p:", zMsg, pBt); + } + for(pLock=pBt->pLock; pLock; pLock=pLock->pNext){ + printf(" %p/%u%s", pLock->pBtree, pLock->iTable, + pLock->eLock==READ_LOCK ? "R" : "W"); + while( pLock->pNext && pLock->pBtree==pLock->pNext->pBtree ){ + pLock = pLock->pNext; + printf(",%u%s", pLock->iTable, pLock->eLock==READ_LOCK ? "R" : "W"); + } + } + printf("\n"); + fflush(stdout); +} +#undef SHARED_LOCK_TRACE +#define SHARED_LOCK_TRACE(X,MSG,TAB,TYPE) sharedLockTrace(X,MSG,TAB,TYPE) +#endif /* Shared-lock tracing */ + #ifdef SQLITE_DEBUG /* **** This function is only used as part of an assert() statement. *** @@ -70439,6 +70819,8 @@ static int hasSharedCacheTableLock( iTab = iRoot; } + SHARED_LOCK_TRACE(pBtree->pBt,"hasLock",iRoot,eLockType); + /* Search for the required lock. Either a write-lock on root-page iTab, a ** write-lock on the schema table, or (if the client is reading) a ** read-lock on iTab will suffice. Return 1 if any of these are found. */ @@ -70572,6 +70954,8 @@ static int setSharedCacheTableLock(Btree *p, Pgno iTable, u8 eLock){ BtLock *pLock = 0; BtLock *pIter; + SHARED_LOCK_TRACE(pBt,"setLock", iTable, eLock); + assert( sqlite3BtreeHoldsMutex(p) ); assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); assert( p->db!=0 ); @@ -70639,6 +71023,8 @@ static void clearAllSharedCacheTableLocks(Btree *p){ assert( p->sharable || 0==*ppIter ); assert( p->inTrans>0 ); + SHARED_LOCK_TRACE(pBt, "clearAllLocks", 0, 0); + while( *ppIter ){ BtLock *pLock = *ppIter; assert( (pBt->btsFlags & BTS_EXCLUSIVE)==0 || pBt->pWriter==pLock->pBtree ); @@ -70677,6 +71063,9 @@ static void clearAllSharedCacheTableLocks(Btree *p){ */ static void downgradeAllSharedCacheTableLocks(Btree *p){ BtShared *pBt = p->pBt; + + SHARED_LOCK_TRACE(pBt, "downgradeLocks", 0, 0); + if( pBt->pWriter==p ){ BtLock *pLock; pBt->pWriter = 0; @@ -75290,9 +75679,12 @@ static int accessPayload( if( pCur->aOverflow==0 || nOvfl*(int)sizeof(Pgno) > sqlite3MallocSize(pCur->aOverflow) ){ - Pgno *aNew = (Pgno*)sqlite3Realloc( - pCur->aOverflow, nOvfl*2*sizeof(Pgno) - ); + Pgno *aNew; + if( sqlite3FaultSim(413) ){ + aNew = 0; + }else{ + aNew = (Pgno*)sqlite3Realloc(pCur->aOverflow, nOvfl*2*sizeof(Pgno)); + } if( aNew==0 ){ return SQLITE_NOMEM_BKPT; }else{ @@ -75302,6 +75694,12 @@ static int accessPayload( memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno)); pCur->curFlags |= BTCF_ValidOvfl; }else{ + /* Sanity check the validity of the overflow page cache */ + assert( pCur->aOverflow[0]==nextPage + || pCur->aOverflow[0]==0 + || CORRUPT_DB ); + assert( pCur->aOverflow[0]!=0 || pCur->aOverflow[offset/ovflSize]==0 ); + /* If the overflow page-list cache has been allocated and the ** entry for the first required overflow page is valid, skip ** directly to it. @@ -75783,6 +76181,23 @@ SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ return rc; } +#ifdef SQLITE_DEBUG +/* The cursors is CURSOR_VALID and has BTCF_AtLast set. Verify that +** this flags are true for a consistent database. +** +** This routine is is called from within assert() statements only. +** It is an internal verification routine and does not appear in production +** builds. +*/ +static int cursorIsAtLastEntry(BtCursor *pCur){ + int ii; + for(ii=0; iiiPage; ii++){ + if( pCur->aiIdx[ii]!=pCur->apPage[ii]->nCell ) return 0; + } + return pCur->ix==pCur->pPage->nCell-1 && pCur->pPage->leaf!=0; +} +#endif + /* Move the cursor to the last entry in the table. Return SQLITE_OK ** on success. Set *pRes to 0 if the cursor actually points to something ** or set *pRes to 1 if the table is empty. @@ -75811,18 +76226,7 @@ SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ /* If the cursor already points to the last entry, this is a no-op. */ if( CURSOR_VALID==pCur->eState && (pCur->curFlags & BTCF_AtLast)!=0 ){ -#ifdef SQLITE_DEBUG - /* This block serves to assert() that the cursor really does point - ** to the last entry in the b-tree. */ - int ii; - for(ii=0; iiiPage; ii++){ - assert( pCur->aiIdx[ii]==pCur->apPage[ii]->nCell ); - } - assert( pCur->ix==pCur->pPage->nCell-1 || CORRUPT_DB ); - testcase( pCur->ix!=pCur->pPage->nCell-1 ); - /* ^-- dbsqlfuzz b92b72e4de80b5140c30ab71372ca719b8feb618 */ - assert( pCur->pPage->leaf ); -#endif + assert( cursorIsAtLastEntry(pCur) || CORRUPT_DB ); *pRes = 0; return SQLITE_OK; } @@ -75875,6 +76279,7 @@ SQLITE_PRIVATE int sqlite3BtreeTableMoveto( } if( pCur->info.nKeycurFlags & BTCF_AtLast)!=0 ){ + assert( cursorIsAtLastEntry(pCur) || CORRUPT_DB ); *pRes = -1; return SQLITE_OK; } @@ -76341,10 +76746,10 @@ SQLITE_PRIVATE i64 sqlite3BtreeRowCountEst(BtCursor *pCur){ assert( cursorOwnsBtShared(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); - /* Currently this interface is only called by the OP_IfSmaller - ** opcode, and it that case the cursor will always be valid and - ** will always point to a leaf node. */ - if( NEVER(pCur->eState!=CURSOR_VALID) ) return -1; + /* Currently this interface is only called by the OP_IfSizeBetween + ** opcode and the OP_Count opcode with P3=1. In either case, + ** the cursor will always be valid unless the btree is empty. */ + if( pCur->eState!=CURSOR_VALID ) return 0; if( NEVER(pCur->pPage->leaf==0) ) return -1; n = pCur->pPage->nCell; @@ -78475,7 +78880,7 @@ static int balance_nonroot( ** table-interior, index-leaf, or index-interior). */ if( pOld->aData[0]!=apOld[0]->aData[0] ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pOld); goto balance_cleanup; } @@ -78499,7 +78904,7 @@ static int balance_nonroot( memset(&b.szCell[b.nCell], 0, sizeof(b.szCell[0])*(limit+pOld->nOverflow)); if( pOld->nOverflow>0 ){ if( NEVER(limitaiOvfl[0]) ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pOld); goto balance_cleanup; } limit = pOld->aiOvfl[0]; @@ -79142,7 +79547,7 @@ static int anotherValidCursor(BtCursor *pCur){ && pOther->eState==CURSOR_VALID && pOther->pPage==pCur->pPage ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pCur->pPage); } } return SQLITE_OK; @@ -79202,7 +79607,7 @@ static int balance(BtCursor *pCur){ /* The page being written is not a root page, and there is currently ** more than one reference to it. This only happens if the page is one ** of its own ancestor pages. Corruption. */ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); }else{ MemPage * const pParent = pCur->apPage[iPage-1]; int const iIdx = pCur->aiIdx[iPage-1]; @@ -79366,7 +79771,7 @@ static SQLITE_NOINLINE int btreeOverwriteOverflowCell( rc = btreeGetPage(pBt, ovflPgno, &pPage, 0); if( rc ) return rc; if( sqlite3PagerPageRefcount(pPage->pDbPage)!=1 || pPage->isInit ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); }else{ if( iOffset+ovflPageSize<(u32)nTotal ){ ovflPgno = get4byte(pPage->aData); @@ -79394,7 +79799,7 @@ static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){ if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd || pCur->info.pPayload < pPage->aData + pPage->cellOffset ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } if( pCur->info.nLocal==nTotal ){ /* The entire cell is local */ @@ -79475,7 +79880,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** Which can only happen if the SQLITE_NoSchemaError flag was set when ** the schema was loaded. This cannot be asserted though, as a user might ** set the flag, load the schema, and then unset the flag. */ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pCur->pgnoRoot); } } @@ -79598,7 +80003,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( if( pPage->nFree<0 ){ if( NEVER(pCur->eState>CURSOR_INVALID) ){ /* ^^^^^--- due to the moveToRoot() call above */ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); }else{ rc = btreeComputeFreeSpace(pPage); } @@ -79640,7 +80045,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( CellInfo info; assert( idx>=0 ); if( idx>=pPage->nCell ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ){ @@ -79667,10 +80072,10 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** necessary to add the PTRMAP_OVERFLOW1 pointer-map entry. */ assert( rc==SQLITE_OK ); /* clearCell never fails when nLocal==nPayload */ if( oldCell < pPage->aData+pPage->hdrOffset+10 ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } if( oldCell+szNew > pPage->aDataEnd ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } memcpy(oldCell, newCell, szNew); return SQLITE_OK; @@ -79772,7 +80177,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 nIn = pSrc->info.nLocal; aIn = pSrc->info.pPayload; if( aIn+nIn>pSrc->pPage->aDataEnd ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pSrc->pPage); } nRem = pSrc->info.nPayload; if( nIn==nRem && nInpPage->maxLocal ){ @@ -79797,7 +80202,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 if( nRem>nIn ){ if( aIn+nIn+4>pSrc->pPage->aDataEnd ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pSrc->pPage); } ovflIn = get4byte(&pSrc->info.pPayload[nIn]); } @@ -79893,7 +80298,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ assert( rc!=SQLITE_OK || CORRUPT_DB || pCur->eState==CURSOR_VALID ); if( rc || pCur->eState!=CURSOR_VALID ) return rc; }else{ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pCur->pgnoRoot); } } assert( pCur->eState==CURSOR_VALID ); @@ -79902,14 +80307,14 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ iCellIdx = pCur->ix; pPage = pCur->pPage; if( pPage->nCell<=iCellIdx ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } pCell = findCell(pPage, iCellIdx); if( pPage->nFree<0 && btreeComputeFreeSpace(pPage) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } if( pCell<&pPage->aCellIdx[pPage->nCell] ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } /* If the BTREE_SAVEPOSITION bit is on, then the cursor position must @@ -80000,7 +80405,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ n = pCur->pPage->pgno; } pCell = findCell(pLeaf, pLeaf->nCell-1); - if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_BKPT; + if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_PAGE(pLeaf); nCell = pLeaf->xCellSize(pLeaf, pCell); assert( MX_CELL_SIZE(pBt) >= nCell ); pTmp = pBt->pTmpSpace; @@ -80116,7 +80521,7 @@ static int btreeCreateTable(Btree *p, Pgno *piTable, int createTabFlags){ */ sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &pgnoRoot); if( pgnoRoot>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pgnoRoot); } pgnoRoot++; @@ -80164,7 +80569,7 @@ static int btreeCreateTable(Btree *p, Pgno *piTable, int createTabFlags){ } rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage); if( eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PGNO(pgnoRoot); } if( rc!=SQLITE_OK ){ releasePage(pRoot); @@ -80254,14 +80659,14 @@ static int clearDatabasePage( assert( sqlite3_mutex_held(pBt->mutex) ); if( pgno>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pgno); } rc = getAndInitPage(pBt, pgno, &pPage, 0); if( rc ) return rc; if( (pBt->openFlags & BTREE_SINGLE)==0 && sqlite3PagerPageRefcount(pPage->pDbPage) != (1 + (pgno==1)) ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); goto cleardatabasepage_out; } hdr = pPage->hdrOffset; @@ -80365,7 +80770,7 @@ static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){ assert( p->inTrans==TRANS_WRITE ); assert( iTable>=2 ); if( iTable>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(iTable); } rc = sqlite3BtreeClearTable(p, iTable, 0); @@ -80959,6 +81364,9 @@ static int checkTreePage( ** number of cells on the page. */ nCell = get2byte(&data[hdr+3]); assert( pPage->nCell==nCell ); + if( pPage->leaf || pPage->intKey==0 ){ + pCheck->nRow += nCell; + } /* EVIDENCE-OF: R-23882-45353 The cell pointer array of a b-tree page ** immediately follows the b-tree page header. */ @@ -81070,6 +81478,7 @@ static int checkTreePage( btreeHeapInsert(heap, (pc<<16)|(pc+size-1)); } } + assert( heap!=0 ); /* Add the freeblocks to the min-heap ** ** EVIDENCE-OF: R-20690-50594 The second field of the b-tree page header @@ -81169,6 +81578,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( sqlite3 *db, /* Database connection that is running the check */ Btree *p, /* The btree to be checked */ Pgno *aRoot, /* An array of root pages numbers for individual trees */ + Mem *aCnt, /* Memory cells to write counts for each tree to */ int nRoot, /* Number of entries in aRoot[] */ int mxErr, /* Stop reporting errors after this many */ int *pnErr, /* OUT: Write number of errors seen to this variable */ @@ -81182,7 +81592,9 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( int bPartial = 0; /* True if not checking all btrees */ int bCkFreelist = 1; /* True to scan the freelist */ VVA_ONLY( int nRef ); + assert( nRoot>0 ); + assert( aCnt!=0 ); /* aRoot[0]==0 means this is a partial check */ if( aRoot[0]==0 ){ @@ -81255,15 +81667,18 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( testcase( pBt->db->flags & SQLITE_CellSizeCk ); pBt->db->flags &= ~(u64)SQLITE_CellSizeCk; for(i=0; (int)iautoVacuum && aRoot[i]>1 && !bPartial ){ - checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); - } + if( pBt->autoVacuum && aRoot[i]>1 && !bPartial ){ + checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); + } #endif - sCheck.v0 = aRoot[i]; - checkTreePage(&sCheck, aRoot[i], ¬Used, LARGEST_INT64); + sCheck.v0 = aRoot[i]; + checkTreePage(&sCheck, aRoot[i], ¬Used, LARGEST_INT64); + } + sqlite3MemSetArrayInt64(aCnt, i, sCheck.nRow); } pBt->db->flags = savedDbFlags; @@ -83318,6 +83733,13 @@ SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem *pMem, i64 val){ } } +/* +** Set the iIdx'th entry of array aMem[] to contain integer value val. +*/ +SQLITE_PRIVATE void sqlite3MemSetArrayInt64(sqlite3_value *aMem, int iIdx, i64 val){ + sqlite3VdbeMemSetInt64(&aMem[iIdx], val); +} + /* A no-op destructor */ SQLITE_PRIVATE void sqlite3NoopDestructor(void *p){ UNUSED_PARAMETER(p); } @@ -84006,14 +84428,20 @@ static int valueFromExpr( } /* Handle negative integers in a single step. This is needed in the - ** case when the value is -9223372036854775808. - */ - if( op==TK_UMINUS - && (pExpr->pLeft->op==TK_INTEGER || pExpr->pLeft->op==TK_FLOAT) ){ - pExpr = pExpr->pLeft; - op = pExpr->op; - negInt = -1; - zNeg = "-"; + ** case when the value is -9223372036854775808. Except - do not do this + ** for hexadecimal literals. */ + if( op==TK_UMINUS ){ + Expr *pLeft = pExpr->pLeft; + if( (pLeft->op==TK_INTEGER || pLeft->op==TK_FLOAT) ){ + if( ExprHasProperty(pLeft, EP_IntValue) + || pLeft->u.zToken[0]!='0' || (pLeft->u.zToken[1] & ~0x20)!='X' + ){ + pExpr = pLeft; + op = pExpr->op; + negInt = -1; + zNeg = "-"; + } + } } if( op==TK_STRING || op==TK_FLOAT || op==TK_INTEGER ){ @@ -84022,12 +84450,26 @@ static int valueFromExpr( if( ExprHasProperty(pExpr, EP_IntValue) ){ sqlite3VdbeMemSetInt64(pVal, (i64)pExpr->u.iValue*negInt); }else{ - zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken); - if( zVal==0 ) goto no_mem; - sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC); + i64 iVal; + if( op==TK_INTEGER && 0==sqlite3DecOrHexToI64(pExpr->u.zToken, &iVal) ){ + sqlite3VdbeMemSetInt64(pVal, iVal*negInt); + }else{ + zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken); + if( zVal==0 ) goto no_mem; + sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC); + } } - if( (op==TK_INTEGER || op==TK_FLOAT ) && affinity==SQLITE_AFF_BLOB ){ - sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8); + if( affinity==SQLITE_AFF_BLOB ){ + if( op==TK_FLOAT ){ + assert( pVal && pVal->z && pVal->flags==(MEM_Str|MEM_Term) ); + sqlite3AtoF(pVal->z, &pVal->u.r, pVal->n, SQLITE_UTF8); + pVal->flags = MEM_Real; + }else if( op==TK_INTEGER ){ + /* This case is required by -9223372036854775808 and other strings + ** that look like integers but cannot be handled by the + ** sqlite3DecOrHexToI64() call above. */ + sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8); + } }else{ sqlite3ValueApplyAffinity(pVal, affinity, SQLITE_UTF8); } @@ -84297,17 +84739,17 @@ SQLITE_PRIVATE int sqlite3Stat4Column( sqlite3_value **ppVal /* OUT: Extracted value */ ){ u32 t = 0; /* a column type code */ - int nHdr; /* Size of the header in the record */ - int iHdr; /* Next unread header byte */ - int iField; /* Next unread data byte */ - int szField = 0; /* Size of the current data field */ + u32 nHdr; /* Size of the header in the record */ + u32 iHdr; /* Next unread header byte */ + i64 iField; /* Next unread data byte */ + u32 szField = 0; /* Size of the current data field */ int i; /* Column index */ u8 *a = (u8*)pRec; /* Typecast byte array */ Mem *pMem = *ppVal; /* Write result into this Mem object */ assert( iCol>0 ); iHdr = getVarint32(a, nHdr); - if( nHdr>nRec || iHdr>=nHdr ) return SQLITE_CORRUPT_BKPT; + if( nHdr>(u32)nRec || iHdr>=nHdr ) return SQLITE_CORRUPT_BKPT; iField = nHdr; for(i=0; i<=iCol; i++){ iHdr += getVarint32(&a[iHdr], t); @@ -85342,6 +85784,15 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ assert( aLabel!=0 ); /* True because of tag-20230419-1 */ pOp->p2 = aLabel[ADDR(pOp->p2)]; } + + /* OPFLG_JUMP opcodes never have P2==0, though OPFLG_JUMP0 opcodes + ** might */ + assert( pOp->p2>0 + || (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP0)!=0 ); + + /* Jumps never go off the end of the bytecode array */ + assert( pOp->p2nOp + || (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP)==0 ); break; } } @@ -87749,7 +88200,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ /* Check for immediate foreign key violations. */ if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){ - sqlite3VdbeCheckFk(p, 0); + (void)sqlite3VdbeCheckFk(p, 0); } /* If the auto-commit flag is set and this is the only active writer @@ -88919,17 +89370,15 @@ SQLITE_PRIVATE int sqlite3IntFloatCompare(i64 i, double r){ return (xr); }else{ i64 y; - double s; if( r<-9223372036854775808.0 ) return +1; if( r>=9223372036854775808.0 ) return -1; y = (i64)r; if( iy ) return +1; - s = (double)i; - testcase( doubleLt(s,r) ); - testcase( doubleLt(r,s) ); - testcase( doubleEq(r,s) ); - return (sr); + testcase( doubleLt(((double)i),r) ); + testcase( doubleLt(r,((double)i)) ); + testcase( doubleEq(r,((double)i)) ); + return (((double)i)r); } } @@ -89732,7 +90181,8 @@ SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe *v, int iVar, u8 aff assert( iVar>0 ); if( v ){ Mem *pMem = &v->aVar[iVar-1]; - assert( (v->db->flags & SQLITE_EnableQPSG)==0 ); + assert( (v->db->flags & SQLITE_EnableQPSG)==0 + || (v->db->mDbFlags & DBFLAG_InternalFunc)!=0 ); if( 0==(pMem->flags & MEM_Null) ){ sqlite3_value *pRet = sqlite3ValueNew(v->db); if( pRet ){ @@ -89752,7 +90202,8 @@ SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe *v, int iVar, u8 aff */ SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe *v, int iVar){ assert( iVar>0 ); - assert( (v->db->flags & SQLITE_EnableQPSG)==0 ); + assert( (v->db->flags & SQLITE_EnableQPSG)==0 + || (v->db->mDbFlags & DBFLAG_InternalFunc)!=0 ); if( iVar>=32 ){ v->expmask |= 0x80000000; }else{ @@ -92337,7 +92788,6 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2( } if( flags & SQLITE_SCANSTAT_COMPLEX ){ idx = iScan; - pScan = &p->aScan[idx]; }else{ /* If the COMPLEX flag is clear, then this function must ignore any ** ScanStatus structures with ScanStatus.addrLoop set to 0. */ @@ -92350,6 +92800,8 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2( } } if( idx>=p->nScan ) return 1; + assert( pScan==0 || pScan==&p->aScan[idx] ); + pScan = &p->aScan[idx]; switch( iScanStatusOp ){ case SQLITE_SCANSTAT_NLOOP: { @@ -93798,7 +94250,7 @@ case OP_Return: { /* in1 */ ** ** See also: EndCoroutine */ -case OP_InitCoroutine: { /* jump */ +case OP_InitCoroutine: { /* jump0 */ assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) ); assert( pOp->p2>=0 && pOp->p2nOp ); assert( pOp->p3>=0 && pOp->p3nOp ); @@ -93821,7 +94273,9 @@ jump_to_p2: ** ** The instruction at the address in register P1 is a Yield. ** Jump to the P2 parameter of that Yield. -** After the jump, register P1 becomes undefined. +** After the jump, the value register P1 is left with a value +** such that subsequent OP_Yields go back to the this same +** OP_EndCoroutine instruction. ** ** See also: InitCoroutine */ @@ -93833,8 +94287,8 @@ case OP_EndCoroutine: { /* in1 */ pCaller = &aOp[pIn1->u.i]; assert( pCaller->opcode==OP_Yield ); assert( pCaller->p2>=0 && pCaller->p2nOp ); + pIn1->u.i = (int)(pOp - p->aOp) - 1; pOp = &aOp[pCaller->p2 - 1]; - pIn1->flags = MEM_Undefined; break; } @@ -93851,7 +94305,7 @@ case OP_EndCoroutine: { /* in1 */ ** ** See also: InitCoroutine */ -case OP_Yield: { /* in1, jump */ +case OP_Yield: { /* in1, jump0 */ int pcDest; pIn1 = &aMem[pOp->p1]; assert( VdbeMemDynamic(pIn1)==0 ); @@ -94181,19 +94635,15 @@ case OP_Blob: { /* out2 */ break; } -/* Opcode: Variable P1 P2 * P4 * -** Synopsis: r[P2]=parameter(P1,P4) +/* Opcode: Variable P1 P2 * * * +** Synopsis: r[P2]=parameter(P1) ** ** Transfer the values of bound parameter P1 into register P2 -** -** If the parameter is named, then its name appears in P4. -** The P4 value is used by sqlite3_bind_parameter_name(). */ case OP_Variable: { /* out2 */ Mem *pVar; /* Value being transferred */ assert( pOp->p1>0 && pOp->p1<=p->nVar ); - assert( pOp->p4.z==0 || pOp->p4.z==sqlite3VListNumToName(p->pVList,pOp->p1) ); pVar = &p->aVar[pOp->p1 - 1]; if( sqlite3VdbeMemTooBig(pVar) ){ goto too_big; @@ -94714,7 +95164,7 @@ case OP_AddImm: { /* in1 */ ** without data loss, then jump immediately to P2, or if P2==0 ** raise an SQLITE_MISMATCH exception. */ -case OP_MustBeInt: { /* jump, in1 */ +case OP_MustBeInt: { /* jump0, in1 */ pIn1 = &aMem[pOp->p1]; if( (pIn1->flags & MEM_Int)==0 ){ applyAffinity(pIn1, SQLITE_AFF_NUMERIC, encoding); @@ -94755,7 +95205,7 @@ case OP_RealAffinity: { /* in1 */ } #endif -#ifndef SQLITE_OMIT_CAST +#if !defined(SQLITE_OMIT_CAST) && !defined(SQLITE_OMIT_ANALYZE) /* Opcode: Cast P1 P2 * * * ** Synopsis: affinity(r[P1]) ** @@ -96327,11 +96777,16 @@ case OP_MakeRecord: { switch( len ){ default: zPayload[7] = (u8)(v&0xff); v >>= 8; zPayload[6] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 6: zPayload[5] = (u8)(v&0xff); v >>= 8; zPayload[4] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 4: zPayload[3] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 3: zPayload[2] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 2: zPayload[1] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 1: zPayload[0] = (u8)(v&0xff); } zPayload += len; @@ -97250,7 +97705,8 @@ case OP_SequenceTest: { ** is the only cursor opcode that works with a pseudo-table. ** ** P3 is the number of fields in the records that will be stored by -** the pseudo-table. +** the pseudo-table. If P2 is 0 or negative then the pseudo-cursor +** will return NULL for every column. */ case OP_OpenPseudo: { VdbeCursor *pCx; @@ -97393,10 +97849,10 @@ case OP_ColumnsUsed: { ** ** See also: Found, NotFound, SeekGt, SeekGe, SeekLt */ -case OP_SeekLT: /* jump, in3, group, ncycle */ -case OP_SeekLE: /* jump, in3, group, ncycle */ -case OP_SeekGE: /* jump, in3, group, ncycle */ -case OP_SeekGT: { /* jump, in3, group, ncycle */ +case OP_SeekLT: /* jump0, in3, group, ncycle */ +case OP_SeekLE: /* jump0, in3, group, ncycle */ +case OP_SeekGE: /* jump0, in3, group, ncycle */ +case OP_SeekGT: { /* jump0, in3, group, ncycle */ int res; /* Comparison result */ int oc; /* Opcode */ VdbeCursor *pC; /* The cursor to seek */ @@ -98063,7 +98519,7 @@ case OP_Found: { /* jump, in3, ncycle */ ** ** See also: Found, NotFound, NoConflict, SeekRowid */ -case OP_SeekRowid: { /* jump, in3, ncycle */ +case OP_SeekRowid: { /* jump0, in3, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; @@ -98822,7 +99278,7 @@ case OP_NullRow: { ** configured to use Prev, not Next. */ case OP_SeekEnd: /* ncycle */ -case OP_Last: { /* jump, ncycle */ +case OP_Last: { /* jump0, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; @@ -98856,28 +99312,38 @@ case OP_Last: { /* jump, ncycle */ break; } -/* Opcode: IfSmaller P1 P2 P3 * * +/* Opcode: IfSizeBetween P1 P2 P3 P4 * ** -** Estimate the number of rows in the table P1. Jump to P2 if that -** estimate is less than approximately 2**(0.1*P3). +** Let N be the approximate number of rows in the table or index +** with cursor P1 and let X be 10*log2(N) if N is positive or -1 +** if N is zero. +** +** Jump to P2 if X is in between P3 and P4, inclusive. */ -case OP_IfSmaller: { /* jump */ +case OP_IfSizeBetween: { /* jump */ VdbeCursor *pC; BtCursor *pCrsr; int res; i64 sz; assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( pOp->p4type==P4_INT32 ); + assert( pOp->p3>=-1 && pOp->p3<=640*2 ); + assert( pOp->p4.i>=-1 && pOp->p4.i<=640*2 ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); pCrsr = pC->uc.pCursor; assert( pCrsr ); rc = sqlite3BtreeFirst(pCrsr, &res); if( rc ) goto abort_due_to_error; - if( res==0 ){ + if( res!=0 ){ + sz = -1; /* -Infinity encoding */ + }else{ sz = sqlite3BtreeRowCountEst(pCrsr); - if( ALWAYS(sz>=0) && sqlite3LogEst((u64)sz)p3 ) res = 1; + assert( sz>0 ); + sz = sqlite3LogEst((u64)sz); } + res = sz>=pOp->p3 && sz<=pOp->p4.i; VdbeBranchTaken(res!=0,2); if( res ) goto jump_to_p2; break; @@ -98930,7 +99396,7 @@ case OP_Sort: { /* jump ncycle */ ** from the beginning toward the end. In other words, the cursor is ** configured to use Next, not Prev. */ -case OP_Rewind: { /* jump, ncycle */ +case OP_Rewind: { /* jump0, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; @@ -99577,11 +100043,18 @@ case OP_CreateBtree: { /* out2 */ break; } -/* Opcode: SqlExec * * * P4 * +/* Opcode: SqlExec P1 P2 * P4 * ** ** Run the SQL statement or statements specified in the P4 string. -** Disable Auth and Trace callbacks while those statements are running if -** P1 is true. +** +** The P1 parameter is a bitmask of options: +** +** 0x0001 Disable Auth and Trace callbacks while the statements +** in P4 are running. +** +** 0x0002 Set db->nAnalysisLimit to P2 while the statements in +** P4 are running. +** */ case OP_SqlExec: { char *zErr; @@ -99589,6 +100062,7 @@ case OP_SqlExec: { sqlite3_xauth xAuth; #endif u8 mTrace; + int savedAnalysisLimit; sqlite3VdbeIncrWriteCounter(p, 0); db->nSqlExec++; @@ -99597,18 +100071,23 @@ case OP_SqlExec: { xAuth = db->xAuth; #endif mTrace = db->mTrace; - if( pOp->p1 ){ + savedAnalysisLimit = db->nAnalysisLimit; + if( pOp->p1 & 0x0001 ){ #ifndef SQLITE_OMIT_AUTHORIZATION db->xAuth = 0; #endif db->mTrace = 0; } + if( pOp->p1 & 0x0002 ){ + db->nAnalysisLimit = pOp->p2; + } rc = sqlite3_exec(db, pOp->p4.z, 0, 0, &zErr); db->nSqlExec--; #ifndef SQLITE_OMIT_AUTHORIZATION db->xAuth = xAuth; #endif db->mTrace = mTrace; + db->nAnalysisLimit = savedAnalysisLimit; if( zErr || rc ){ sqlite3VdbeError(p, "%s", zErr); sqlite3_free(zErr); @@ -99760,11 +100239,11 @@ case OP_DropTrigger: { /* Opcode: IntegrityCk P1 P2 P3 P4 P5 ** ** Do an analysis of the currently open database. Store in -** register P1 the text of an error message describing any problems. -** If no problems are found, store a NULL in register P1. +** register (P1+1) the text of an error message describing any problems. +** If no problems are found, store a NULL in register (P1+1). ** -** The register P3 contains one less than the maximum number of allowed errors. -** At most reg(P3) errors will be reported. +** The register (P1) contains one less than the maximum number of allowed +** errors. At most reg(P1) errors will be reported. ** In other words, the analysis stops as soon as reg(P1) errors are ** seen. Reg(P1) is updated with the number of errors remaining. ** @@ -99784,19 +100263,21 @@ case OP_IntegrityCk: { Mem *pnErr; /* Register keeping track of errors remaining */ assert( p->bIsReader ); + assert( pOp->p4type==P4_INTARRAY ); nRoot = pOp->p2; aRoot = pOp->p4.ai; assert( nRoot>0 ); + assert( aRoot!=0 ); assert( aRoot[0]==(Pgno)nRoot ); - assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); - pnErr = &aMem[pOp->p3]; + assert( pOp->p1>0 && (pOp->p1+1)<=(p->nMem+1 - p->nCursor) ); + pnErr = &aMem[pOp->p1]; assert( (pnErr->flags & MEM_Int)!=0 ); assert( (pnErr->flags & (MEM_Str|MEM_Blob))==0 ); - pIn1 = &aMem[pOp->p1]; + pIn1 = &aMem[pOp->p1+1]; assert( pOp->p5nDb ); assert( DbMaskTest(p->btreeMask, pOp->p5) ); - rc = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], nRoot, - (int)pnErr->u.i+1, &nErr, &z); + rc = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], + &aMem[pOp->p3], nRoot, (int)pnErr->u.i+1, &nErr, &z); sqlite3VdbeMemSetNull(pIn1); if( nErr==0 ){ assert( z==0 ); @@ -99923,7 +100404,9 @@ case OP_RowSetTest: { /* jump, in1, in3 */ ** P1 contains the address of the memory cell that contains the first memory ** cell in an array of values used as arguments to the sub-program. P2 ** contains the address to jump to if the sub-program throws an IGNORE -** exception using the RAISE() function. Register P3 contains the address +** exception using the RAISE() function. P2 might be zero, if there is +** no possibility that an IGNORE exception will be raised. +** Register P3 contains the address ** of a memory cell in this (the parent) VM that is used to allocate the ** memory required by the sub-vdbe at runtime. ** @@ -99931,7 +100414,7 @@ case OP_RowSetTest: { /* jump, in1, in3 */ ** ** If P5 is non-zero, then recursive program invocation is enabled. */ -case OP_Program: { /* jump */ +case OP_Program: { /* jump0 */ int nMem; /* Number of memory registers for sub-program */ int nByte; /* Bytes of runtime space required for sub-program */ Mem *pRt; /* Register to allocate runtime space */ @@ -101480,7 +101963,7 @@ case OP_Filter: { /* jump */ ** error is encountered. */ case OP_Trace: -case OP_Init: { /* jump */ +case OP_Init: { /* jump0 */ int i; #ifndef SQLITE_OMIT_TRACE char *zTrace; @@ -105381,10 +105864,10 @@ static int bytecodevtabColumn( #ifdef SQLITE_ENABLE_STMT_SCANSTATUS case 9: /* nexec */ - sqlite3_result_int(ctx, pOp->nExec); + sqlite3_result_int64(ctx, pOp->nExec); break; case 10: /* ncycle */ - sqlite3_result_int(ctx, pOp->nCycle); + sqlite3_result_int64(ctx, pOp->nCycle); break; #else case 9: /* nexec */ @@ -106477,7 +106960,7 @@ static void extendFJMatch( static SQLITE_NOINLINE int isValidSchemaTableName( const char *zTab, /* Name as it appears in the SQL */ Table *pTab, /* The schema table we are trying to match */ - Schema *pSchema /* non-NULL if a database qualifier is present */ + const char *zDb /* non-NULL if a database qualifier is present */ ){ const char *zLegacy; assert( pTab!=0 ); @@ -106488,7 +106971,7 @@ static SQLITE_NOINLINE int isValidSchemaTableName( if( sqlite3StrICmp(zTab+7, &PREFERRED_TEMP_SCHEMA_TABLE[7])==0 ){ return 1; } - if( pSchema==0 ) return 0; + if( zDb==0 ) return 0; if( sqlite3StrICmp(zTab+7, &LEGACY_SCHEMA_TABLE[7])==0 ) return 1; if( sqlite3StrICmp(zTab+7, &PREFERRED_SCHEMA_TABLE[7])==0 ) return 1; }else{ @@ -106528,7 +107011,7 @@ static int lookupName( Parse *pParse, /* The parsing context */ const char *zDb, /* Name of the database containing table, or NULL */ const char *zTab, /* Name of table containing column, or NULL */ - const char *zCol, /* Name of the column. */ + const Expr *pRight, /* Name of the column. */ NameContext *pNC, /* The name context used to resolve the name */ Expr *pExpr /* Make this EXPR node point to the selected column */ ){ @@ -106545,6 +107028,7 @@ static int lookupName( Table *pTab = 0; /* Table holding the row */ Column *pCol; /* A column of pTab */ ExprList *pFJMatch = 0; /* Matches for FULL JOIN .. USING */ + const char *zCol = pRight->u.zToken; assert( pNC ); /* the name context cannot be NULL. */ assert( zCol ); /* The Z in X.Y.Z cannot be NULL */ @@ -106670,7 +107154,7 @@ static int lookupName( } }else if( sqlite3StrICmp(zTab, pTab->zName)!=0 ){ if( pTab->tnum!=1 ) continue; - if( !isValidSchemaTableName(zTab, pTab, pSchema) ) continue; + if( !isValidSchemaTableName(zTab, pTab, zDb) ) continue; } assert( ExprUseYTab(pExpr) ); if( IN_RENAME_OBJECT && pItem->zAlias ){ @@ -106776,7 +107260,8 @@ static int lookupName( if( pParse->bReturning ){ if( (pNC->ncFlags & NC_UBaseReg)!=0 && ALWAYS(zTab==0 - || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0) + || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0 + || isValidSchemaTableName(zTab, pParse->pTriggerTab, 0)) ){ pExpr->iTable = op!=TK_DELETE; pTab = pParse->pTriggerTab; @@ -106880,6 +107365,11 @@ static int lookupName( && ALWAYS(VisibleRowid(pMatch->pTab) || pMatch->fg.isNestedFrom) ){ cnt = cntTab; +#if SQLITE_ALLOW_ROWID_IN_VIEW+0==2 + if( pMatch->pTab!=0 && IsView(pMatch->pTab) ){ + eNewExprOp = TK_NULL; + } +#endif if( pMatch->fg.isNestedFrom==0 ) pExpr->iColumn = -1; pExpr->affExpr = SQLITE_AFF_INTEGER; } @@ -107033,6 +107523,10 @@ static int lookupName( sqlite3ErrorMsg(pParse, "%s: %s.%s.%s", zErr, zDb, zTab, zCol); }else if( zTab ){ sqlite3ErrorMsg(pParse, "%s: %s.%s", zErr, zTab, zCol); + }else if( cnt==0 && ExprHasProperty(pRight,EP_DblQuoted) ){ + sqlite3ErrorMsg(pParse, "%s: \"%s\" - should this be a" + " string literal in single-quotes?", + zErr, zCol); }else{ sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol); } @@ -107066,8 +107560,12 @@ static int lookupName( ** If a generated column is referenced, set bits for every column ** of the table. */ - if( pExpr->iColumn>=0 && cnt==1 && pMatch!=0 ){ - pMatch->colUsed |= sqlite3ExprColUsed(pExpr); + if( pMatch ){ + if( pExpr->iColumn>=0 ){ + pMatch->colUsed |= sqlite3ExprColUsed(pExpr); + }else{ + pMatch->fg.rowidUsed = 1; + } } pExpr->op = eNewExprOp; @@ -107310,7 +107808,6 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ */ case TK_ID: case TK_DOT: { - const char *zColumn; const char *zTable; const char *zDb; Expr *pRight; @@ -107319,7 +107816,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ zDb = 0; zTable = 0; assert( !ExprHasProperty(pExpr, EP_IntValue) ); - zColumn = pExpr->u.zToken; + pRight = pExpr; }else{ Expr *pLeft = pExpr->pLeft; testcase( pNC->ncFlags & NC_IdxExpr ); @@ -107338,14 +107835,13 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ } assert( ExprUseUToken(pLeft) && ExprUseUToken(pRight) ); zTable = pLeft->u.zToken; - zColumn = pRight->u.zToken; assert( ExprUseYTab(pExpr) ); if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, (void*)pExpr, (void*)pRight); sqlite3RenameTokenRemap(pParse, (void*)&pExpr->y.pTab, (void*)pLeft); } } - return lookupName(pParse, zDb, zTable, zColumn, pNC, pExpr); + return lookupName(pParse, zDb, zTable, pRight, pNC, pExpr); } /* Resolve function names @@ -107521,11 +108017,9 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ #endif } } -#ifndef SQLITE_OMIT_WINDOWFUNC - else if( ExprHasProperty(pExpr, EP_WinFunc) ){ + else if( ExprHasProperty(pExpr, EP_WinFunc) || pExpr->pLeft ){ is_agg = 1; } -#endif sqlite3WalkExprList(pWalker, pList); if( is_agg ){ if( pExpr->pLeft ){ @@ -107595,6 +108089,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ testcase( pNC->ncFlags & NC_PartIdx ); testcase( pNC->ncFlags & NC_IdxExpr ); testcase( pNC->ncFlags & NC_GenCol ); + assert( pExpr->x.pSelect ); if( pNC->ncFlags & NC_SelfRef ){ notValidImpl(pParse, pNC, "subqueries", pExpr, pExpr); }else{ @@ -107603,6 +108098,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ assert( pNC->nRef>=nRef ); if( nRef!=pNC->nRef ){ ExprSetProperty(pExpr, EP_VarSelect); + pExpr->x.pSelect->selFlags |= SF_Correlated; } pNC->ncFlags |= NC_Subquery; } @@ -108128,6 +108624,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ if( pOuterNC ) pOuterNC->nNestedSelect++; for(i=0; ipSrc->nSrc; i++){ SrcItem *pItem = &p->pSrc->a[i]; + assert( pItem->zName!=0 || pItem->pSelect!=0 );/* Test of tag-20240424-1*/ if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){ int nRef = pOuterNC ? pOuterNC->nRef : 0; const char *zSavedContext = pParse->zAuthContext; @@ -108389,6 +108886,9 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames( ** Resolve all names for all expression in an expression list. This is ** just like sqlite3ResolveExprNames() except that it works for an expression ** list rather than a single expression. +** +** The return value is SQLITE_OK (0) for success or SQLITE_ERROR (1) for a +** failure. */ SQLITE_PRIVATE int sqlite3ResolveExprListNames( NameContext *pNC, /* Namespace to resolve expressions in. */ @@ -108397,7 +108897,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( int i; int savedHasAgg = 0; Walker w; - if( pList==0 ) return WRC_Continue; + if( pList==0 ) return SQLITE_OK; w.pParse = pNC->pParse; w.xExprCallback = resolveExprStep; w.xSelectCallback = resolveSelectStep; @@ -108411,7 +108911,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( #if SQLITE_MAX_EXPR_DEPTH>0 w.pParse->nHeight += pExpr->nHeight; if( sqlite3ExprCheckHeight(w.pParse, w.pParse->nHeight) ){ - return WRC_Abort; + return SQLITE_ERROR; } #endif sqlite3WalkExprNN(&w, pExpr); @@ -108428,10 +108928,10 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( (NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg); pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg); } - if( w.pParse->nErr>0 ) return WRC_Abort; + if( w.pParse->nErr>0 ) return SQLITE_ERROR; } pNC->ncFlags |= savedHasAgg; - return WRC_Continue; + return SQLITE_OK; } /* @@ -109434,11 +109934,12 @@ SQLITE_PRIVATE void sqlite3ExprSetErrorOffset(Expr *pExpr, int iOfst){ ** appear to be quoted. If the quotes were of the form "..." (double-quotes) ** then the EP_DblQuoted flag is set on the expression node. ** -** Special case: If op==TK_INTEGER and pToken points to a string that -** can be translated into a 32-bit integer, then the token is not -** stored in u.zToken. Instead, the integer values is written -** into u.iValue and the EP_IntValue flag is set. No extra storage +** Special case (tag-20240227-a): If op==TK_INTEGER and pToken points to +** a string that can be translated into a 32-bit integer, then the token is +** not stored in u.zToken. Instead, the integer values is written +** into u.iValue and the EP_IntValue flag is set. No extra storage ** is allocated to hold the integer text and the dequote flag is ignored. +** See also tag-20240227-b. */ SQLITE_PRIVATE Expr *sqlite3ExprAlloc( sqlite3 *db, /* Handle for sqlite3DbMallocRawNN() */ @@ -109454,7 +109955,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprAlloc( if( pToken ){ if( op!=TK_INTEGER || pToken->z==0 || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - nExtra = pToken->n+1; + nExtra = pToken->n+1; /* tag-20240227-a */ assert( iValue>=0 ); } } @@ -109886,6 +110387,7 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ assert( p!=0 ); assert( db!=0 ); +exprDeleteRestart: assert( !ExprUseUValue(p) || p->u.iValue>=0 ); assert( !ExprUseYWin(p) || !ExprUseYSub(p) ); assert( !ExprUseYWin(p) || p->y.pWin!=0 || db->mallocFailed ); @@ -109901,7 +110403,6 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ if( !ExprHasProperty(p, (EP_TokenOnly|EP_Leaf)) ){ /* The Expr.x union is never used at the same time as Expr.pRight */ assert( (ExprUseXList(p) && p->x.pList==0) || p->pRight==0 ); - if( p->pLeft && p->op!=TK_SELECT_COLUMN ) sqlite3ExprDeleteNN(db, p->pLeft); if( p->pRight ){ assert( !ExprHasProperty(p, EP_WinFunc) ); sqlite3ExprDeleteNN(db, p->pRight); @@ -109916,6 +110417,19 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ } #endif } + if( p->pLeft && p->op!=TK_SELECT_COLUMN ){ + Expr *pLeft = p->pLeft; + if( !ExprHasProperty(p, EP_Static) + && !ExprHasProperty(pLeft, EP_Static) + ){ + /* Avoid unnecessary recursion on unary operators */ + sqlite3DbNNFreeNN(db, p); + p = pLeft; + goto exprDeleteRestart; + }else{ + sqlite3ExprDeleteNN(db, pLeft); + } + } } if( !ExprHasProperty(p, EP_Static) ){ sqlite3DbNNFreeNN(db, p); @@ -109948,11 +110462,11 @@ SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){ ** ** The pExpr might be deleted immediately on an OOM error. ** -** The deferred delete is (currently) implemented by adding the -** pExpr to the pParse->pConstExpr list with a register number of 0. +** Return 0 if the delete was successfully deferred. Return non-zero +** if the delete happened immediately because of an OOM. */ -SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){ - sqlite3ParserAddCleanup(pParse, sqlite3ExprDeleteGeneric, pExpr); +SQLITE_PRIVATE int sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){ + return 0==sqlite3ParserAddCleanup(pParse, sqlite3ExprDeleteGeneric, pExpr); } /* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the @@ -110388,17 +110902,19 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, const SrcList *p, int fla pNewItem->iCursor = pOldItem->iCursor; pNewItem->addrFillSub = pOldItem->addrFillSub; pNewItem->regReturn = pOldItem->regReturn; + pNewItem->regResult = pOldItem->regResult; if( pNewItem->fg.isIndexedBy ){ pNewItem->u1.zIndexedBy = sqlite3DbStrDup(db, pOldItem->u1.zIndexedBy); + }else if( pNewItem->fg.isTabFunc ){ + pNewItem->u1.pFuncArg = + sqlite3ExprListDup(db, pOldItem->u1.pFuncArg, flags); + }else{ + pNewItem->u1.nRow = pOldItem->u1.nRow; } pNewItem->u2 = pOldItem->u2; if( pNewItem->fg.isCte ){ pNewItem->u2.pCteUse->nUse++; } - if( pNewItem->fg.isTabFunc ){ - pNewItem->u1.pFuncArg = - sqlite3ExprListDup(db, pOldItem->u1.pFuncArg, flags); - } pTab = pNewItem->pTab = pOldItem->pTab; if( pTab ){ pTab->nTabRef++; @@ -110864,6 +111380,54 @@ SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr *pExpr){ return pExpr; } +/* +** pExpr is a TK_FUNCTION node. Try to determine whether or not the +** function is a constant function. A function is constant if all of +** the following are true: +** +** (1) It is a scalar function (not an aggregate or window function) +** (2) It has either the SQLITE_FUNC_CONSTANT or SQLITE_FUNC_SLOCHNG +** property. +** (3) All of its arguments are constants +** +** This routine sets pWalker->eCode to 0 if pExpr is not a constant. +** It makes no changes to pWalker->eCode if pExpr is constant. In +** every case, it returns WRC_Abort. +** +** Called as a service subroutine from exprNodeIsConstant(). +*/ +static SQLITE_NOINLINE int exprNodeIsConstantFunction( + Walker *pWalker, + Expr *pExpr +){ + int n; /* Number of arguments */ + ExprList *pList; /* List of arguments */ + FuncDef *pDef; /* The function */ + sqlite3 *db; /* The database */ + + assert( pExpr->op==TK_FUNCTION ); + if( ExprHasProperty(pExpr, EP_TokenOnly) + || (pList = pExpr->x.pList)==0 + ){; + n = 0; + }else{ + n = pList->nExpr; + sqlite3WalkExprList(pWalker, pList); + if( pWalker->eCode==0 ) return WRC_Abort; + } + db = pWalker->pParse->db; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( pDef==0 + || pDef->xFinalize!=0 + || (pDef->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0 + || ExprHasProperty(pExpr, EP_WinFunc) + ){ + pWalker->eCode = 0; + return WRC_Abort; + } + return WRC_Prune; +} + /* ** These routines are Walker callbacks used to check expressions to @@ -110892,6 +111456,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr *pExpr){ ** malformed schema error. */ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ + assert( pWalker->eCode>0 ); /* If pWalker->eCode is 2 then any term of the expression that comes from ** the ON or USING clauses of an outer join disqualifies the expression @@ -110911,6 +111476,8 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ ){ if( pWalker->eCode==5 ) ExprSetProperty(pExpr, EP_FromDDL); return WRC_Continue; + }else if( pWalker->pParse ){ + return exprNodeIsConstantFunction(pWalker, pExpr); }else{ pWalker->eCode = 0; return WRC_Abort; @@ -110939,9 +111506,11 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ case TK_IF_NULL_ROW: case TK_REGISTER: case TK_DOT: + case TK_RAISE: testcase( pExpr->op==TK_REGISTER ); testcase( pExpr->op==TK_IF_NULL_ROW ); testcase( pExpr->op==TK_DOT ); + testcase( pExpr->op==TK_RAISE ); pWalker->eCode = 0; return WRC_Abort; case TK_VARIABLE: @@ -110963,15 +111532,15 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ return WRC_Continue; } } -static int exprIsConst(Expr *p, int initFlag, int iCur){ +static int exprIsConst(Parse *pParse, Expr *p, int initFlag){ Walker w; w.eCode = initFlag; + w.pParse = pParse; w.xExprCallback = exprNodeIsConstant; w.xSelectCallback = sqlite3SelectWalkFail; #ifdef SQLITE_DEBUG w.xSelectCallback2 = sqlite3SelectWalkAssert2; #endif - w.u.iCur = iCur; sqlite3WalkExpr(&w, p); return w.eCode; } @@ -110983,9 +111552,15 @@ static int exprIsConst(Expr *p, int initFlag, int iCur){ ** For the purposes of this function, a double-quoted string (ex: "abc") ** is considered a variable but a single-quoted string (ex: 'abc') is ** a constant. +** +** The pParse parameter may be NULL. But if it is NULL, there is no way +** to determine if function calls are constant or not, and hence all +** function calls will be considered to be non-constant. If pParse is +** not NULL, then a function call might be constant, depending on the +** function and on its parameters. */ -SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr *p){ - return exprIsConst(p, 1, 0); +SQLITE_PRIVATE int sqlite3ExprIsConstant(Parse *pParse, Expr *p){ + return exprIsConst(pParse, p, 1); } /* @@ -111001,8 +111576,24 @@ SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr *p){ ** can be added to the pParse->pConstExpr list and evaluated once when ** the prepared statement starts up. See sqlite3ExprCodeRunJustOnce(). */ -SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr *p){ - return exprIsConst(p, 2, 0); +static int sqlite3ExprIsConstantNotJoin(Parse *pParse, Expr *p){ + return exprIsConst(pParse, p, 2); +} + +/* +** This routine examines sub-SELECT statements as an expression is being +** walked as part of sqlite3ExprIsTableConstant(). Sub-SELECTs are considered +** constant as long as they are uncorrelated - meaning that they do not +** contain any terms from outer contexts. +*/ +static int exprSelectWalkTableConstant(Walker *pWalker, Select *pSelect){ + assert( pSelect!=0 ); + assert( pWalker->eCode==3 || pWalker->eCode==0 ); + if( (pSelect->selFlags & SF_Correlated)!=0 ){ + pWalker->eCode = 0; + return WRC_Abort; + } + return WRC_Prune; } /* @@ -111010,9 +111601,26 @@ SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr *p){ ** for any single row of the table with cursor iCur. In other words, the ** expression must not refer to any non-deterministic function nor any ** table other than iCur. +** +** Consider uncorrelated subqueries to be constants if the bAllowSubq +** parameter is true. */ -SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ - return exprIsConst(p, 3, iCur); +static int sqlite3ExprIsTableConstant(Expr *p, int iCur, int bAllowSubq){ + Walker w; + w.eCode = 3; + w.pParse = 0; + w.xExprCallback = exprNodeIsConstant; + if( bAllowSubq ){ + w.xSelectCallback = exprSelectWalkTableConstant; + }else{ + w.xSelectCallback = sqlite3SelectWalkFail; +#ifdef SQLITE_DEBUG + w.xSelectCallback2 = sqlite3SelectWalkAssert2; +#endif + } + w.u.iCur = iCur; + sqlite3WalkExpr(&w, p); + return w.eCode; } /* @@ -111030,7 +111638,10 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ ** ** (1) pExpr cannot refer to any table other than pSrc->iCursor. ** -** (2) pExpr cannot use subqueries or non-deterministic functions. +** (2a) pExpr cannot use subqueries unless the bAllowSubq parameter is +** true and the subquery is non-correlated +** +** (2b) pExpr cannot use non-deterministic functions. ** ** (3) pSrc cannot be part of the left operand for a RIGHT JOIN. ** (Is there some way to relax this constraint?) @@ -111059,7 +111670,8 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint( Expr *pExpr, /* The constraint */ const SrcList *pSrcList, /* Complete FROM clause */ - int iSrc /* Which element of pSrcList to use */ + int iSrc, /* Which element of pSrcList to use */ + int bAllowSubq /* Allow non-correlated subqueries */ ){ const SrcItem *pSrc = &pSrcList->a[iSrc]; if( pSrc->fg.jointype & JT_LTORJ ){ @@ -111084,7 +111696,8 @@ SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint( } } } - return sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor); /* rules (1), (2) */ + /* Rules (1), (2a), and (2b) handled by the following: */ + return sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor, bAllowSubq); } @@ -111169,7 +111782,7 @@ SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse *pParse, Expr *p, ExprLi */ SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr *p, u8 isInit){ assert( isInit==0 || isInit==1 ); - return exprIsConst(p, 4+isInit, 0); + return exprIsConst(0, p, 4+isInit); } #ifdef SQLITE_ENABLE_CURSOR_HINTS @@ -111417,13 +112030,13 @@ static void sqlite3SetHasNullFlag(Vdbe *v, int iCur, int regHasNull){ ** The argument is an IN operator with a list (not a subquery) on the ** right-hand side. Return TRUE if that list is constant. */ -static int sqlite3InRhsIsConstant(Expr *pIn){ +static int sqlite3InRhsIsConstant(Parse *pParse, Expr *pIn){ Expr *pLHS; int res; assert( !ExprHasProperty(pIn, EP_xIsSelect) ); pLHS = pIn->pLeft; pIn->pLeft = 0; - res = sqlite3ExprIsConstant(pIn); + res = sqlite3ExprIsConstant(pParse, pIn); pIn->pLeft = pLHS; return res; } @@ -111692,7 +112305,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex( if( eType==0 && (inFlags & IN_INDEX_NOOP_OK) && ExprUseXList(pX) - && (!sqlite3InRhsIsConstant(pX) || pX->x.pList->nExpr<=2) + && (!sqlite3InRhsIsConstant(pParse,pX) || pX->x.pList->nExpr<=2) ){ pParse->nTab--; /* Back out the allocation of the unused cursor */ iTab = -1; /* Cursor is not allocated */ @@ -111975,7 +112588,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( ** this code only executes once. Because for a non-constant ** expression we need to rerun this code each time. */ - if( addrOnce && !sqlite3ExprIsConstant(pE2) ){ + if( addrOnce && !sqlite3ExprIsConstant(pParse, pE2) ){ sqlite3VdbeChangeToNoop(v, addrOnce-1); sqlite3VdbeChangeToNoop(v, addrOnce); ExprClearProperty(pExpr, EP_Subrtn); @@ -113139,12 +113752,6 @@ expr_code_doover: assert( pExpr->u.zToken!=0 ); assert( pExpr->u.zToken[0]!=0 ); sqlite3VdbeAddOp2(v, OP_Variable, pExpr->iColumn, target); - if( pExpr->u.zToken[1]!=0 ){ - const char *z = sqlite3VListNumToName(pParse->pVList, pExpr->iColumn); - assert( pExpr->u.zToken[0]=='?' || (z && !strcmp(pExpr->u.zToken, z)) ); - pParse->pVList[0] = 0; /* Indicate VList may no longer be enlarged */ - sqlite3VdbeAppendP4(v, (char*)z, P4_STATIC); - } return target; } case TK_REGISTER: { @@ -113318,7 +113925,9 @@ expr_code_doover: } #endif - if( ConstFactorOk(pParse) && sqlite3ExprIsConstantNotJoin(pExpr) ){ + if( ConstFactorOk(pParse) + && sqlite3ExprIsConstantNotJoin(pParse,pExpr) + ){ /* SQL functions can be expensive. So try to avoid running them ** multiple times if we know they always give the same result */ return sqlite3ExprCodeRunJustOnce(pParse, pExpr, -1); @@ -113349,7 +113958,7 @@ expr_code_doover: } for(i=0; ia[i].pExpr) ){ + if( i<32 && sqlite3ExprIsConstant(pParse, pFarg->a[i].pExpr) ){ testcase( i==31 ); constMask |= MASKBIT32(i); } @@ -113491,8 +114100,9 @@ expr_code_doover: if( !ExprHasProperty(pExpr, EP_Collate) ){ /* A TK_COLLATE Expr node without the EP_Collate tag is a so-called ** "SOFT-COLLATE" that is added to constraints that are pushed down - ** from outer queries into sub-queries by the push-down optimization. - ** Clear subtypes as subtypes may not cross a subquery boundary. + ** from outer queries into sub-queries by the WHERE-clause push-down + ** optimization. Clear subtypes as subtypes may not cross a subquery + ** boundary. */ assert( pExpr->pLeft ); sqlite3ExprCode(pParse, pExpr->pLeft, target); @@ -113816,7 +114426,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse *pParse, Expr *pExpr, int *pReg){ if( ConstFactorOk(pParse) && ALWAYS(pExpr!=0) && pExpr->op!=TK_REGISTER - && sqlite3ExprIsConstantNotJoin(pExpr) + && sqlite3ExprIsConstantNotJoin(pParse, pExpr) ){ *pReg = 0; r2 = sqlite3ExprCodeRunJustOnce(pParse, pExpr, -1); @@ -113880,7 +114490,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse *pParse, Expr *pExpr, int target){ ** might choose to code the expression at initialization time. */ SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse *pParse, Expr *pExpr, int target){ - if( pParse->okConstFactor && sqlite3ExprIsConstantNotJoin(pExpr) ){ + if( pParse->okConstFactor && sqlite3ExprIsConstantNotJoin(pParse,pExpr) ){ sqlite3ExprCodeRunJustOnce(pParse, pExpr, target); }else{ sqlite3ExprCodeCopy(pParse, pExpr, target); @@ -113939,7 +114549,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList( sqlite3VdbeAddOp2(v, copyOp, j+srcReg-1, target+i); } }else if( (flags & SQLITE_ECEL_FACTOR)!=0 - && sqlite3ExprIsConstantNotJoin(pExpr) + && sqlite3ExprIsConstantNotJoin(pParse,pExpr) ){ sqlite3ExprCodeRunJustOnce(pParse, pExpr, target+i); }else{ @@ -115090,9 +115700,8 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ && pAggInfo->aCol[iAgg].pCExpr==pExpr ){ pExpr = sqlite3ExprDup(db, pExpr, 0); - if( pExpr ){ + if( pExpr && !sqlite3ExprDeferredDelete(pParse, pExpr) ){ pAggInfo->aCol[iAgg].pCExpr = pExpr; - sqlite3ExprDeferredDelete(pParse, pExpr); } } }else{ @@ -115101,9 +115710,8 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ && pAggInfo->aFunc[iAgg].pFExpr==pExpr ){ pExpr = sqlite3ExprDup(db, pExpr, 0); - if( pExpr ){ + if( pExpr && !sqlite3ExprDeferredDelete(pParse, pExpr) ){ pAggInfo->aFunc[iAgg].pFExpr = pExpr; - sqlite3ExprDeferredDelete(pParse, pExpr); } } } @@ -116862,7 +117470,7 @@ static int renameResolveTrigger(Parse *pParse){ /* ALWAYS() because if the table of the trigger does not exist, the ** error would have been hit before this point */ if( ALWAYS(pParse->pTriggerTab) ){ - rc = sqlite3ViewGetColumnNames(pParse, pParse->pTriggerTab); + rc = sqlite3ViewGetColumnNames(pParse, pParse->pTriggerTab)!=0; } /* Resolve symbols in WHEN clause */ @@ -117804,7 +118412,12 @@ SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse *pParse, SrcList *pSrc, const T if( i==pTab->iPKey ){ sqlite3VdbeAddOp2(v, OP_Null, 0, regOut); }else{ + char aff = pTab->aCol[i].affinity; + if( aff==SQLITE_AFF_REAL ){ + pTab->aCol[i].affinity = SQLITE_AFF_NUMERIC; + } sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, i, regOut); + pTab->aCol[i].affinity = aff; } nField++; } @@ -118723,7 +119336,7 @@ static void statGet( if( iVal==2 && p->nRow*10 <= nDistinct*11 ) iVal = 1; sqlite3_str_appendf(&sStat, " %llu", iVal); #ifdef SQLITE_ENABLE_STAT4 - assert( p->current.anEq[i] ); + assert( p->current.anEq[i] || p->nRow==0 ); #endif } sqlite3ResultStrAccum(context, &sStat); @@ -118908,7 +119521,7 @@ static void analyzeOneTable( for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ int nCol; /* Number of columns in pIdx. "N" */ - int addrRewind; /* Address of "OP_Rewind iIdxCur" */ + int addrGotoEnd; /* Address of "OP_Rewind iIdxCur" */ int addrNextRow; /* Address of "next_row:" */ const char *zIdxName; /* Name of the index */ int nColTest; /* Number of columns to test for changes */ @@ -118932,9 +119545,14 @@ static void analyzeOneTable( /* ** Pseudo-code for loop that calls stat_push(): ** - ** Rewind csr - ** if eof(csr) goto end_of_scan; ** regChng = 0 + ** Rewind csr + ** if eof(csr){ + ** stat_init() with count = 0; + ** goto end_of_scan; + ** } + ** count() + ** stat_init() ** goto chng_addr_0; ** ** next_row: @@ -118973,41 +119591,36 @@ static void analyzeOneTable( sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "%s", pIdx->zName)); - /* Invoke the stat_init() function. The arguments are: + /* Implementation of the following: ** + ** regChng = 0 + ** Rewind csr + ** if eof(csr){ + ** stat_init() with count = 0; + ** goto end_of_scan; + ** } + ** count() + ** stat_init() + ** goto chng_addr_0; + */ + assert( regTemp2==regStat+4 ); + sqlite3VdbeAddOp2(v, OP_Integer, db->nAnalysisLimit, regTemp2); + + /* Arguments to stat_init(): ** (1) the number of columns in the index including the rowid ** (or for a WITHOUT ROWID table, the number of PK columns), ** (2) the number of columns in the key without the rowid/pk - ** (3) estimated number of rows in the index, - */ + ** (3) estimated number of rows in the index. */ sqlite3VdbeAddOp2(v, OP_Integer, nCol, regStat+1); assert( regRowid==regStat+2 ); sqlite3VdbeAddOp2(v, OP_Integer, pIdx->nKeyCol, regRowid); -#ifdef SQLITE_ENABLE_STAT4 - if( OptimizationEnabled(db, SQLITE_Stat4) ){ - sqlite3VdbeAddOp2(v, OP_Count, iIdxCur, regTemp); - addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); - VdbeCoverage(v); - }else -#endif - { - addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); - VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_Count, iIdxCur, regTemp, 1); - } - assert( regTemp2==regStat+4 ); - sqlite3VdbeAddOp2(v, OP_Integer, db->nAnalysisLimit, regTemp2); + sqlite3VdbeAddOp3(v, OP_Count, iIdxCur, regTemp, + OptimizationDisabled(db, SQLITE_Stat4)); sqlite3VdbeAddFunctionCall(pParse, 0, regStat+1, regStat, 4, &statInitFuncdef, 0); + addrGotoEnd = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); + VdbeCoverage(v); - /* Implementation of the following: - ** - ** Rewind csr - ** if eof(csr) goto end_of_scan; - ** regChng = 0 - ** goto next_push_0; - ** - */ sqlite3VdbeAddOp2(v, OP_Integer, 0, regChng); addrNextRow = sqlite3VdbeCurrentAddr(v); @@ -119114,6 +119727,12 @@ static void analyzeOneTable( } /* Add the entry to the stat1 table. */ + if( pIdx->pPartIdxWhere ){ + /* Partial indexes might get a zero-entry in sqlite_stat1. But + ** an empty table is omitted from sqlite_stat1. */ + sqlite3VdbeJumpHere(v, addrGotoEnd); + addrGotoEnd = 0; + } callStatGet(pParse, regStat, STAT_GET_STAT1, regStat1); assert( "BBB"[0]==SQLITE_AFF_TEXT ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0); @@ -119137,6 +119756,13 @@ static void analyzeOneTable( int addrIsNull; u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound; + /* No STAT4 data is generated if the number of rows is zero */ + if( addrGotoEnd==0 ){ + sqlite3VdbeAddOp2(v, OP_Cast, regStat1, SQLITE_AFF_INTEGER); + addrGotoEnd = sqlite3VdbeAddOp1(v, OP_IfNot, regStat1); + VdbeCoverage(v); + } + if( doOnce ){ int mxCol = nCol; Index *pX; @@ -119189,7 +119815,7 @@ static void analyzeOneTable( #endif /* SQLITE_ENABLE_STAT4 */ /* End of analysis */ - sqlite3VdbeJumpHere(v, addrRewind); + if( addrGotoEnd ) sqlite3VdbeJumpHere(v, addrGotoEnd); } @@ -120938,7 +121564,7 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ } sqlite3VdbeAddOp0(v, OP_Halt); -#if SQLITE_USER_AUTHENTICATION +#if SQLITE_USER_AUTHENTICATION && !defined(SQLITE_OMIT_SHARED_CACHE) if( pParse->nTableLock>0 && db->init.busy==0 ){ sqlite3UserAuthInit(db); if( db->auth.authLevelrc = SQLITE_ERROR; pParse->nErr++; return; } + iCsr = pParse->nTab++; regYield = ++pParse->nMem; regRec = ++pParse->nMem; regRowid = ++pParse->nMem; - assert(pParse->nTab==1); sqlite3MayAbort(pParse); - sqlite3VdbeAddOp3(v, OP_OpenWrite, 1, pParse->regRoot, iDb); + sqlite3VdbeAddOp3(v, OP_OpenWrite, iCsr, pParse->regRoot, iDb); sqlite3VdbeChangeP5(v, OPFLAG_P2ISREG); - pParse->nTab = 2; addrTop = sqlite3VdbeCurrentAddr(v) + 1; sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); if( pParse->nErr ) return; @@ -123611,11 +124237,11 @@ SQLITE_PRIVATE void sqlite3EndTable( VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_MakeRecord, dest.iSdst, dest.nSdst, regRec); sqlite3TableAffinity(v, p, 0); - sqlite3VdbeAddOp2(v, OP_NewRowid, 1, regRowid); - sqlite3VdbeAddOp3(v, OP_Insert, 1, regRec, regRowid); + sqlite3VdbeAddOp2(v, OP_NewRowid, iCsr, regRowid); + sqlite3VdbeAddOp3(v, OP_Insert, iCsr, regRec, regRowid); sqlite3VdbeGoto(v, addrInsLoop); sqlite3VdbeJumpHere(v, addrInsLoop); - sqlite3VdbeAddOp1(v, OP_Close, 1); + sqlite3VdbeAddOp1(v, OP_Close, iCsr); } /* Compute the complete text of the CREATE statement */ @@ -123672,13 +124298,10 @@ SQLITE_PRIVATE void sqlite3EndTable( /* Test for cycles in generated columns and illegal expressions ** in CHECK constraints and in DEFAULT clauses. */ if( p->tabFlags & TF_HasGenerated ){ - sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0, + sqlite3VdbeAddOp4(v, OP_SqlExec, 0x0001, 0, 0, sqlite3MPrintf(db, "SELECT*FROM\"%w\".\"%w\"", db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC); } - sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0, - sqlite3MPrintf(db, "PRAGMA \"%w\".integrity_check(%Q)", - db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC); } /* Add the table to the in-memory representation of the database. @@ -123816,8 +124439,9 @@ create_view_fail: #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) /* ** The Table structure pTable is really a VIEW. Fill in the names of -** the columns of the view in the pTable structure. Return the number -** of errors. If an error is seen leave an error message in pParse->zErrMsg. +** the columns of the view in the pTable structure. Return non-zero if +** there are errors. If an error is seen an error message is left +** in pParse->zErrMsg. */ static SQLITE_NOINLINE int viewGetColumnNames(Parse *pParse, Table *pTable){ Table *pSelTab; /* A fake table from which we get the result set */ @@ -123940,7 +124564,7 @@ static SQLITE_NOINLINE int viewGetColumnNames(Parse *pParse, Table *pTable){ sqlite3DeleteColumnNames(db, pTable); } #endif /* SQLITE_OMIT_VIEW */ - return nErr; + return nErr + pParse->nErr; } SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ assert( pTable!=0 ); @@ -130238,6 +130862,8 @@ static void groupConcatValue(sqlite3_context *context){ sqlite3_result_error_toobig(context); }else if( pAccum->accError==SQLITE_NOMEM ){ sqlite3_result_error_nomem(context); + }else if( pGCC->nAccum>0 && pAccum->nChar==0 ){ + sqlite3_result_text(context, "", 1, SQLITE_STATIC); }else{ const char *zText = sqlite3_str_value(pAccum); sqlite3_result_text(context, zText, pAccum->nChar, SQLITE_TRANSIENT); @@ -132852,6 +133478,196 @@ SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse){ # define autoIncStep(A,B,C) #endif /* SQLITE_OMIT_AUTOINCREMENT */ +/* +** If argument pVal is a Select object returned by an sqlite3MultiValues() +** that was able to use the co-routine optimization, finish coding the +** co-routine. +*/ +SQLITE_PRIVATE void sqlite3MultiValuesEnd(Parse *pParse, Select *pVal){ + if( ALWAYS(pVal) && pVal->pSrc->nSrc>0 ){ + SrcItem *pItem = &pVal->pSrc->a[0]; + sqlite3VdbeEndCoroutine(pParse->pVdbe, pItem->regReturn); + sqlite3VdbeJumpHere(pParse->pVdbe, pItem->addrFillSub - 1); + } +} + +/* +** Return true if all expressions in the expression-list passed as the +** only argument are constant. +*/ +static int exprListIsConstant(Parse *pParse, ExprList *pRow){ + int ii; + for(ii=0; iinExpr; ii++){ + if( 0==sqlite3ExprIsConstant(pParse, pRow->a[ii].pExpr) ) return 0; + } + return 1; +} + +/* +** Return true if all expressions in the expression-list passed as the +** only argument are both constant and have no affinity. +*/ +static int exprListIsNoAffinity(Parse *pParse, ExprList *pRow){ + int ii; + if( exprListIsConstant(pParse,pRow)==0 ) return 0; + for(ii=0; iinExpr; ii++){ + Expr *pExpr = pRow->a[ii].pExpr; + assert( pExpr->op!=TK_RAISE ); + assert( pExpr->affExpr==0 ); + if( 0!=sqlite3ExprAffinity(pExpr) ) return 0; + } + return 1; + +} + +/* +** This function is called by the parser for the second and subsequent +** rows of a multi-row VALUES clause. Argument pLeft is the part of +** the VALUES clause already parsed, argument pRow is the vector of values +** for the new row. The Select object returned represents the complete +** VALUES clause, including the new row. +** +** There are two ways in which this may be achieved - by incremental +** coding of a co-routine (the "co-routine" method) or by returning a +** Select object equivalent to the following (the "UNION ALL" method): +** +** "pLeft UNION ALL SELECT pRow" +** +** If the VALUES clause contains a lot of rows, this compound Select +** object may consume a lot of memory. +** +** When the co-routine method is used, each row that will be returned +** by the VALUES clause is coded into part of a co-routine as it is +** passed to this function. The returned Select object is equivalent to: +** +** SELECT * FROM ( +** Select object to read co-routine +** ) +** +** The co-routine method is used in most cases. Exceptions are: +** +** a) If the current statement has a WITH clause. This is to avoid +** statements like: +** +** WITH cte AS ( VALUES('x'), ('y') ... ) +** SELECT * FROM cte AS a, cte AS b; +** +** This will not work, as the co-routine uses a hard-coded register +** for its OP_Yield instructions, and so it is not possible for two +** cursors to iterate through it concurrently. +** +** b) The schema is currently being parsed (i.e. the VALUES clause is part +** of a schema item like a VIEW or TRIGGER). In this case there is no VM +** being generated when parsing is taking place, and so generating +** a co-routine is not possible. +** +** c) There are non-constant expressions in the VALUES clause (e.g. +** the VALUES clause is part of a correlated sub-query). +** +** d) One or more of the values in the first row of the VALUES clause +** has an affinity (i.e. is a CAST expression). This causes problems +** because the complex rules SQLite uses (see function +** sqlite3SubqueryColumnTypes() in select.c) to determine the effective +** affinity of such a column for all rows require access to all values in +** the column simultaneously. +*/ +SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList *pRow){ + + if( pParse->bHasWith /* condition (a) above */ + || pParse->db->init.busy /* condition (b) above */ + || exprListIsConstant(pParse,pRow)==0 /* condition (c) above */ + || (pLeft->pSrc->nSrc==0 && + exprListIsNoAffinity(pParse,pLeft->pEList)==0) /* condition (d) above */ + || IN_SPECIAL_PARSE + ){ + /* The co-routine method cannot be used. Fall back to UNION ALL. */ + Select *pSelect = 0; + int f = SF_Values | SF_MultiValue; + if( pLeft->pSrc->nSrc ){ + sqlite3MultiValuesEnd(pParse, pLeft); + f = SF_Values; + }else if( pLeft->pPrior ){ + /* In this case set the SF_MultiValue flag only if it was set on pLeft */ + f = (f & pLeft->selFlags); + } + pSelect = sqlite3SelectNew(pParse, pRow, 0, 0, 0, 0, 0, f, 0); + pLeft->selFlags &= ~SF_MultiValue; + if( pSelect ){ + pSelect->op = TK_ALL; + pSelect->pPrior = pLeft; + pLeft = pSelect; + } + }else{ + SrcItem *p = 0; /* SrcItem that reads from co-routine */ + + if( pLeft->pSrc->nSrc==0 ){ + /* Co-routine has not yet been started and the special Select object + ** that accesses the co-routine has not yet been created. This block + ** does both those things. */ + Vdbe *v = sqlite3GetVdbe(pParse); + Select *pRet = sqlite3SelectNew(pParse, 0, 0, 0, 0, 0, 0, 0, 0); + + /* Ensure the database schema has been read. This is to ensure we have + ** the correct text encoding. */ + if( (pParse->db->mDbFlags & DBFLAG_SchemaKnownOk)==0 ){ + sqlite3ReadSchema(pParse); + } + + if( pRet ){ + SelectDest dest; + pRet->pSrc->nSrc = 1; + pRet->pPrior = pLeft->pPrior; + pRet->op = pLeft->op; + if( pRet->pPrior ) pRet->selFlags |= SF_Values; + pLeft->pPrior = 0; + pLeft->op = TK_SELECT; + assert( pLeft->pNext==0 ); + assert( pRet->pNext==0 ); + p = &pRet->pSrc->a[0]; + p->pSelect = pLeft; + p->fg.viaCoroutine = 1; + p->addrFillSub = sqlite3VdbeCurrentAddr(v) + 1; + p->regReturn = ++pParse->nMem; + p->iCursor = -1; + p->u1.nRow = 2; + sqlite3VdbeAddOp3(v,OP_InitCoroutine,p->regReturn,0,p->addrFillSub); + sqlite3SelectDestInit(&dest, SRT_Coroutine, p->regReturn); + + /* Allocate registers for the output of the co-routine. Do so so + ** that there are two unused registers immediately before those + ** used by the co-routine. This allows the code in sqlite3Insert() + ** to use these registers directly, instead of copying the output + ** of the co-routine to a separate array for processing. */ + dest.iSdst = pParse->nMem + 3; + dest.nSdst = pLeft->pEList->nExpr; + pParse->nMem += 2 + dest.nSdst; + + pLeft->selFlags |= SF_MultiValue; + sqlite3Select(pParse, pLeft, &dest); + p->regResult = dest.iSdst; + assert( pParse->nErr || dest.iSdst>0 ); + pLeft = pRet; + } + }else{ + p = &pLeft->pSrc->a[0]; + assert( !p->fg.isTabFunc && !p->fg.isIndexedBy ); + p->u1.nRow++; + } + + if( pParse->nErr==0 ){ + assert( p!=0 ); + if( p->pSelect->pEList->nExpr!=pRow->nExpr ){ + sqlite3SelectWrongNumTermsError(pParse, p->pSelect); + }else{ + sqlite3ExprCodeExprList(pParse, pRow, p->regResult, 0, 0); + sqlite3VdbeAddOp1(pParse->pVdbe, OP_Yield, p->regReturn); + } + } + sqlite3ExprListDelete(pParse->db, pRow); + } + + return pLeft; +} /* Forward declaration */ static int xferOptimization( @@ -133188,25 +134004,40 @@ SQLITE_PRIVATE void sqlite3Insert( if( pSelect ){ /* Data is coming from a SELECT or from a multi-row VALUES clause. ** Generate a co-routine to run the SELECT. */ - int regYield; /* Register holding co-routine entry-point */ - int addrTop; /* Top of the co-routine */ int rc; /* Result code */ - regYield = ++pParse->nMem; - addrTop = sqlite3VdbeCurrentAddr(v) + 1; - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); - sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield); - dest.iSdst = bIdListInOrder ? regData : 0; - dest.nSdst = pTab->nCol; - rc = sqlite3Select(pParse, pSelect, &dest); - regFromSelect = dest.iSdst; - assert( db->pParse==pParse ); - if( rc || pParse->nErr ) goto insert_cleanup; - assert( db->mallocFailed==0 ); - sqlite3VdbeEndCoroutine(v, regYield); - sqlite3VdbeJumpHere(v, addrTop - 1); /* label B: */ - assert( pSelect->pEList ); - nColumn = pSelect->pEList->nExpr; + if( pSelect->pSrc->nSrc==1 + && pSelect->pSrc->a[0].fg.viaCoroutine + && pSelect->pPrior==0 + ){ + SrcItem *pItem = &pSelect->pSrc->a[0]; + dest.iSDParm = pItem->regReturn; + regFromSelect = pItem->regResult; + nColumn = pItem->pSelect->pEList->nExpr; + ExplainQueryPlan((pParse, 0, "SCAN %S", pItem)); + if( bIdListInOrder && nColumn==pTab->nCol ){ + regData = regFromSelect; + regRowid = regData - 1; + regIns = regRowid - (IsVirtual(pTab) ? 1 : 0); + } + }else{ + int addrTop; /* Top of the co-routine */ + int regYield = ++pParse->nMem; + addrTop = sqlite3VdbeCurrentAddr(v) + 1; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); + sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield); + dest.iSdst = bIdListInOrder ? regData : 0; + dest.nSdst = pTab->nCol; + rc = sqlite3Select(pParse, pSelect, &dest); + regFromSelect = dest.iSdst; + assert( db->pParse==pParse ); + if( rc || pParse->nErr ) goto insert_cleanup; + assert( db->mallocFailed==0 ); + sqlite3VdbeEndCoroutine(v, regYield); + sqlite3VdbeJumpHere(v, addrTop - 1); /* label B: */ + assert( pSelect->pEList ); + nColumn = pSelect->pEList->nExpr; + } /* Set useTempTable to TRUE if the result of the SELECT statement ** should be written into a temporary table (template 4). Set to @@ -137931,6 +138762,34 @@ static const PragmaName aPragmaName[] = { /************** End of pragma.h **********************************************/ /************** Continuing where we left off in pragma.c *********************/ +/* +** When the 0x10 bit of PRAGMA optimize is set, any ANALYZE commands +** will be run with an analysis_limit set to the lessor of the value of +** the following macro or to the actual analysis_limit if it is non-zero, +** in order to prevent PRAGMA optimize from running for too long. +** +** The value of 2000 is chosen emperically so that the worst-case run-time +** for PRAGMA optimize does not exceed 100 milliseconds against a variety +** of test databases on a RaspberryPI-4 compiled using -Os and without +** -DSQLITE_DEBUG. Of course, your mileage may vary. For the purpose of +** this paragraph, "worst-case" means that ANALYZE ends up being +** run on every table in the database. The worst case typically only +** happens if PRAGMA optimize is run on a database file for which ANALYZE +** has not been previously run and the 0x10000 flag is included so that +** all tables are analyzed. The usual case for PRAGMA optimize is that +** no ANALYZE commands will be run at all, or if any ANALYZE happens it +** will be against a single table, so that expected timing for PRAGMA +** optimize on a PI-4 is more like 1 millisecond or less with the 0x10000 +** flag or less than 100 microseconds without the 0x10000 flag. +** +** An analysis limit of 2000 is almost always sufficient for the query +** planner to fully characterize an index. The additional accuracy from +** a larger analysis is not usually helpful. +*/ +#ifndef SQLITE_DEFAULT_OPTIMIZE_LIMIT +# define SQLITE_DEFAULT_OPTIMIZE_LIMIT 2000 +#endif + /* ** Interpret the given string as a safety level. Return 0 for OFF, ** 1 for ON or NORMAL, 2 for FULL, and 3 for EXTRA. Return 1 for an empty or @@ -139576,7 +140435,7 @@ SQLITE_PRIVATE void sqlite3Pragma( /* Set the maximum error count */ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; if( zRight ){ - if( sqlite3GetInt32(zRight, &mxErr) ){ + if( sqlite3GetInt32(pValue->z, &mxErr) ){ if( mxErr<=0 ){ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; } @@ -139593,7 +140452,6 @@ SQLITE_PRIVATE void sqlite3Pragma( Hash *pTbls; /* Set of all tables in the schema */ int *aRoot; /* Array of root page numbers of all btrees */ int cnt = 0; /* Number of entries in aRoot[] */ - int mxIdx = 0; /* Maximum number of indexes for any table */ if( OMIT_TEMPDB && i==1 ) continue; if( iDb>=0 && i!=iDb ) continue; @@ -139615,7 +140473,6 @@ SQLITE_PRIVATE void sqlite3Pragma( if( pObjTab && pObjTab!=pTab ) continue; if( HasRowid(pTab) ) cnt++; for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ cnt++; } - if( nIdx>mxIdx ) mxIdx = nIdx; } if( cnt==0 ) continue; if( pObjTab ) cnt++; @@ -139635,11 +140492,11 @@ SQLITE_PRIVATE void sqlite3Pragma( aRoot[0] = cnt; /* Make sure sufficient number of registers have been allocated */ - sqlite3TouchRegister(pParse, 8+mxIdx); + sqlite3TouchRegister(pParse, 8+cnt); sqlite3ClearTempRegCache(pParse); /* Do the b-tree integrity checks */ - sqlite3VdbeAddOp4(v, OP_IntegrityCk, 2, cnt, 1, (char*)aRoot,P4_INTARRAY); + sqlite3VdbeAddOp4(v, OP_IntegrityCk, 1, cnt, 8, (char*)aRoot,P4_INTARRAY); sqlite3VdbeChangeP5(v, (u8)i); addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, @@ -139649,6 +140506,36 @@ SQLITE_PRIVATE void sqlite3Pragma( integrityCheckResultRow(v); sqlite3VdbeJumpHere(v, addr); + /* Check that the indexes all have the right number of rows */ + cnt = pObjTab ? 1 : 0; + sqlite3VdbeLoadString(v, 2, "wrong # of entries in index "); + for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ + int iTab = 0; + Table *pTab = sqliteHashData(x); + Index *pIdx; + if( pObjTab && pObjTab!=pTab ) continue; + if( HasRowid(pTab) ){ + iTab = cnt++; + }else{ + iTab = cnt; + for(pIdx=pTab->pIndex; ALWAYS(pIdx); pIdx=pIdx->pNext){ + if( IsPrimaryKeyIndex(pIdx) ) break; + iTab++; + } + } + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( pIdx->pPartIdxWhere==0 ){ + addr = sqlite3VdbeAddOp3(v, OP_Eq, 8+cnt, 0, 8+iTab); + VdbeCoverageNeverNull(v); + sqlite3VdbeLoadString(v, 4, pIdx->zName); + sqlite3VdbeAddOp3(v, OP_Concat, 4, 2, 3); + integrityCheckResultRow(v); + sqlite3VdbeJumpHere(v, addr); + } + cnt++; + } + } + /* Make sure all the indices are constructed correctly. */ for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ @@ -139972,21 +140859,9 @@ SQLITE_PRIVATE void sqlite3Pragma( } sqlite3VdbeAddOp2(v, OP_Next, iDataCur, loopTop); VdbeCoverage(v); sqlite3VdbeJumpHere(v, loopTop-1); - if( !isQuick ){ - sqlite3VdbeLoadString(v, 2, "wrong # of entries in index "); - for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ - if( pPk==pIdx ) continue; - sqlite3VdbeAddOp2(v, OP_Count, iIdxCur+j, 3); - addr = sqlite3VdbeAddOp3(v, OP_Eq, 8+j, 0, 3); VdbeCoverage(v); - sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); - sqlite3VdbeLoadString(v, 4, pIdx->zName); - sqlite3VdbeAddOp3(v, OP_Concat, 4, 2, 3); - integrityCheckResultRow(v); - sqlite3VdbeJumpHere(v, addr); - } - if( pPk ){ - sqlite3ReleaseTempRange(pParse, r2, pPk->nKeyCol); - } + if( pPk ){ + assert( !isQuick ); + sqlite3ReleaseTempRange(pParse, r2, pPk->nKeyCol); } } @@ -140284,44 +141159,63 @@ SQLITE_PRIVATE void sqlite3Pragma( ** ** The optional argument is a bitmask of optimizations to perform: ** - ** 0x0001 Debugging mode. Do not actually perform any optimizations - ** but instead return one line of text for each optimization - ** that would have been done. Off by default. + ** 0x00001 Debugging mode. Do not actually perform any optimizations + ** but instead return one line of text for each optimization + ** that would have been done. Off by default. ** - ** 0x0002 Run ANALYZE on tables that might benefit. On by default. - ** See below for additional information. + ** 0x00002 Run ANALYZE on tables that might benefit. On by default. + ** See below for additional information. ** - ** 0x0004 (Not yet implemented) Record usage and performance - ** information from the current session in the - ** database file so that it will be available to "optimize" - ** pragmas run by future database connections. + ** 0x00010 Run all ANALYZE operations using an analysis_limit that + ** is the lessor of the current analysis_limit and the + ** SQLITE_DEFAULT_OPTIMIZE_LIMIT compile-time option. + ** The default value of SQLITE_DEFAULT_OPTIMIZE_LIMIT is + ** currently (2024-02-19) set to 2000, which is such that + ** the worst case run-time for PRAGMA optimize on a 100MB + ** database will usually be less than 100 milliseconds on + ** a RaspberryPI-4 class machine. On by default. ** - ** 0x0008 (Not yet implemented) Create indexes that might have - ** been helpful to recent queries + ** 0x10000 Look at tables to see if they need to be reanalyzed + ** due to growth or shrinkage even if they have not been + ** queried during the current connection. Off by default. ** - ** The default MASK is and always shall be 0xfffe. 0xfffe means perform all - ** of the optimizations listed above except Debug Mode, including new - ** optimizations that have not yet been invented. If new optimizations are - ** ever added that should be off by default, those off-by-default - ** optimizations will have bitmasks of 0x10000 or larger. + ** The default MASK is and always shall be 0x0fffe. In the current + ** implementation, the default mask only covers the 0x00002 optimization, + ** though additional optimizations that are covered by 0x0fffe might be + ** added in the future. Optimizations that are off by default and must + ** be explicitly requested have masks of 0x10000 or greater. ** ** DETERMINATION OF WHEN TO RUN ANALYZE ** ** In the current implementation, a table is analyzed if only if all of ** the following are true: ** - ** (1) MASK bit 0x02 is set. + ** (1) MASK bit 0x00002 is set. + ** + ** (2) The table is an ordinary table, not a virtual table or view. + ** + ** (3) The table name does not begin with "sqlite_". ** - ** (2) The query planner used sqlite_stat1-style statistics for one or - ** more indexes of the table at some point during the lifetime of - ** the current connection. + ** (4) One or more of the following is true: + ** (4a) The 0x10000 MASK bit is set. + ** (4b) One or more indexes on the table lacks an entry + ** in the sqlite_stat1 table. + ** (4c) The query planner used sqlite_stat1-style statistics for one + ** or more indexes of the table at some point during the lifetime + ** of the current connection. ** - ** (3) One or more indexes of the table are currently unanalyzed OR - ** the number of rows in the table has increased by 25 times or more - ** since the last time ANALYZE was run. + ** (5) One or more of the following is true: + ** (5a) One or more indexes on the table lacks an entry + ** in the sqlite_stat1 table. (Same as 4a) + ** (5b) The number of rows in the table has increased or decreased by + ** 10-fold. In other words, the current size of the table is + ** 10 times larger than the size in sqlite_stat1 or else the + ** current size is less than 1/10th the size in sqlite_stat1. ** ** The rules for when tables are analyzed are likely to change in - ** future releases. + ** future releases. Future versions of SQLite might accept a string + ** literal argument to this pragma that contains a mnemonic description + ** of the options rather than a bitmap. */ case PragTyp_OPTIMIZE: { int iDbLast; /* Loop termination point for the schema loop */ @@ -140333,6 +141227,10 @@ SQLITE_PRIVATE void sqlite3Pragma( LogEst szThreshold; /* Size threshold above which reanalysis needed */ char *zSubSql; /* SQL statement for the OP_SqlExec opcode */ u32 opMask; /* Mask of operations to perform */ + int nLimit; /* Analysis limit to use */ + int nCheck = 0; /* Number of tables to be optimized */ + int nBtree = 0; /* Number of btrees to scan */ + int nIndex; /* Number of indexes on the current table */ if( zRight ){ opMask = (u32)sqlite3Atoi(zRight); @@ -140340,6 +141238,14 @@ SQLITE_PRIVATE void sqlite3Pragma( }else{ opMask = 0xfffe; } + if( (opMask & 0x10)==0 ){ + nLimit = 0; + }else if( db->nAnalysisLimit>0 + && db->nAnalysisLimitnTab++; for(iDbLast = zDb?iDb:db->nDb-1; iDb<=iDbLast; iDb++){ if( iDb==1 ) continue; @@ -140348,23 +141254,61 @@ SQLITE_PRIVATE void sqlite3Pragma( for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){ pTab = (Table*)sqliteHashData(k); - /* If table pTab has not been used in a way that would benefit from - ** having analysis statistics during the current session, then skip it. - ** This also has the effect of skipping virtual tables and views */ - if( (pTab->tabFlags & TF_StatsUsed)==0 ) continue; + /* This only works for ordinary tables */ + if( !IsOrdinaryTable(pTab) ) continue; - /* Reanalyze if the table is 25 times larger than the last analysis */ - szThreshold = pTab->nRowLogEst + 46; assert( sqlite3LogEst(25)==46 ); + /* Do not scan system tables */ + if( 0==sqlite3StrNICmp(pTab->zName, "sqlite_", 7) ) continue; + + /* Find the size of the table as last recorded in sqlite_stat1. + ** If any index is unanalyzed, then the threshold is -1 to + ** indicate a new, unanalyzed index + */ + szThreshold = pTab->nRowLogEst; + nIndex = 0; for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + nIndex++; if( !pIdx->hasStat1 ){ - szThreshold = 0; /* Always analyze if any index lacks statistics */ - break; + szThreshold = -1; /* Always analyze if any index lacks statistics */ } } - if( szThreshold ){ - sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead); - sqlite3VdbeAddOp3(v, OP_IfSmaller, iTabCur, - sqlite3VdbeCurrentAddr(v)+2+(opMask&1), szThreshold); + + /* If table pTab has not been used in a way that would benefit from + ** having analysis statistics during the current session, then skip it, + ** unless the 0x10000 MASK bit is set. */ + if( (pTab->tabFlags & TF_MaybeReanalyze)!=0 ){ + /* Check for size change if stat1 has been used for a query */ + }else if( opMask & 0x10000 ){ + /* Check for size change if 0x10000 is set */ + }else if( pTab->pIndex!=0 && szThreshold<0 ){ + /* Do analysis if unanalyzed indexes exists */ + }else{ + /* Otherwise, we can skip this table */ + continue; + } + + nCheck++; + if( nCheck==2 ){ + /* If ANALYZE might be invoked two or more times, hold a write + ** transaction for efficiency */ + sqlite3BeginWriteOperation(pParse, 0, iDb); + } + nBtree += nIndex+1; + + /* Reanalyze if the table is 10 times larger or smaller than + ** the last analysis. Unconditional reanalysis if there are + ** unanalyzed indexes. */ + sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead); + if( szThreshold>=0 ){ + const LogEst iRange = 33; /* 10x size change */ + sqlite3VdbeAddOp4Int(v, OP_IfSizeBetween, iTabCur, + sqlite3VdbeCurrentAddr(v)+2+(opMask&1), + szThreshold>=iRange ? szThreshold-iRange : -1, + szThreshold+iRange); + VdbeCoverage(v); + }else{ + sqlite3VdbeAddOp2(v, OP_Rewind, iTabCur, + sqlite3VdbeCurrentAddr(v)+2+(opMask&1)); VdbeCoverage(v); } zSubSql = sqlite3MPrintf(db, "ANALYZE \"%w\".\"%w\"", @@ -140374,11 +141318,27 @@ SQLITE_PRIVATE void sqlite3Pragma( sqlite3VdbeAddOp4(v, OP_String8, 0, r1, 0, zSubSql, P4_DYNAMIC); sqlite3VdbeAddOp2(v, OP_ResultRow, r1, 1); }else{ - sqlite3VdbeAddOp4(v, OP_SqlExec, 0, 0, 0, zSubSql, P4_DYNAMIC); + sqlite3VdbeAddOp4(v, OP_SqlExec, nLimit ? 0x02 : 00, nLimit, 0, + zSubSql, P4_DYNAMIC); } } } sqlite3VdbeAddOp0(v, OP_Expire); + + /* In a schema with a large number of tables and indexes, scale back + ** the analysis_limit to avoid excess run-time in the worst case. + */ + if( !db->mallocFailed && nLimit>0 && nBtree>100 ){ + int iAddr, iEnd; + VdbeOp *aOp; + nLimit = 100*nLimit/nBtree; + if( nLimit<100 ) nLimit = 100; + aOp = sqlite3VdbeGetOp(v, 0); + iEnd = sqlite3VdbeCurrentAddr(v); + for(iAddr=0; iAddrnConstraint; i++, pConstraint++){ - if( pConstraint->usable==0 ) continue; - if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue; if( pConstraint->iColumn < pTab->iHidden ) continue; + if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue; + if( pConstraint->usable==0 ) return SQLITE_CONSTRAINT; j = pConstraint->iColumn - pTab->iHidden; assert( j < 2 ); seen[j] = i+1; @@ -140657,16 +141617,13 @@ static int pragmaVtabBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ j = seen[0]-1; pIdxInfo->aConstraintUsage[j].argvIndex = 1; pIdxInfo->aConstraintUsage[j].omit = 1; - if( seen[1]==0 ){ - pIdxInfo->estimatedCost = (double)1000; - pIdxInfo->estimatedRows = 1000; - return SQLITE_OK; - } pIdxInfo->estimatedCost = (double)20; pIdxInfo->estimatedRows = 20; - j = seen[1]-1; - pIdxInfo->aConstraintUsage[j].argvIndex = 2; - pIdxInfo->aConstraintUsage[j].omit = 1; + if( seen[1] ){ + j = seen[1]-1; + pIdxInfo->aConstraintUsage[j].argvIndex = 2; + pIdxInfo->aConstraintUsage[j].omit = 1; + } return SQLITE_OK; } @@ -140686,6 +141643,7 @@ static void pragmaVtabCursorClear(PragmaVtabCursor *pCsr){ int i; sqlite3_finalize(pCsr->pPragma); pCsr->pPragma = 0; + pCsr->iRowid = 0; for(i=0; iazArg); i++){ sqlite3_free(pCsr->azArg[i]); pCsr->azArg[i] = 0; @@ -141486,7 +142444,13 @@ SQLITE_PRIVATE void *sqlite3ParserAddCleanup( void (*xCleanup)(sqlite3*,void*), /* The cleanup routine */ void *pPtr /* Pointer to object to be cleaned up */ ){ - ParseCleanup *pCleanup = sqlite3DbMallocRaw(pParse->db, sizeof(*pCleanup)); + ParseCleanup *pCleanup; + if( sqlite3FaultSim(300) ){ + pCleanup = 0; + sqlite3OomFault(pParse->db); + }else{ + pCleanup = sqlite3DbMallocRaw(pParse->db, sizeof(*pCleanup)); + } if( pCleanup ){ pCleanup->pNext = pParse->pCleanup; pParse->pCleanup = pCleanup; @@ -143608,9 +144572,16 @@ static void generateSortTail( int addrExplain; /* Address of OP_Explain instruction */ #endif - ExplainQueryPlan2(addrExplain, (pParse, 0, - "USE TEMP B-TREE FOR %sORDER BY", pSort->nOBSat>0?"RIGHT PART OF ":"") - ); + nKey = pOrderBy->nExpr - pSort->nOBSat; + if( pSort->nOBSat==0 || nKey==1 ){ + ExplainQueryPlan2(addrExplain, (pParse, 0, + "USE TEMP B-TREE FOR %sORDER BY", pSort->nOBSat?"LAST TERM OF ":"" + )); + }else{ + ExplainQueryPlan2(addrExplain, (pParse, 0, + "USE TEMP B-TREE FOR LAST %d TERMS OF ORDER BY", nKey + )); + } sqlite3VdbeScanStatusRange(v, addrExplain,pSort->addrPush,pSort->addrPushEnd); sqlite3VdbeScanStatusCounters(v, addrExplain, addrExplain, pSort->addrPush); @@ -143648,7 +144619,6 @@ static void generateSortTail( regRow = sqlite3GetTempRange(pParse, nColumn); } } - nKey = pOrderBy->nExpr - pSort->nOBSat; if( pSort->sortFlags & SORTFLAG_UseSorter ){ int regSortOut = ++pParse->nMem; iSortTab = pParse->nTab++; @@ -144253,8 +145223,7 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( NameContext sNC; assert( pSelect!=0 ); - testcase( (pSelect->selFlags & SF_Resolved)==0 ); - assert( (pSelect->selFlags & SF_Resolved)!=0 || IN_RENAME_OBJECT ); + assert( (pSelect->selFlags & SF_Resolved)!=0 ); assert( pTab->nCol==pSelect->pEList->nExpr || pParse->nErr>0 ); assert( aff==SQLITE_AFF_NONE || aff==SQLITE_AFF_BLOB ); if( db->mallocFailed || IN_RENAME_OBJECT ) return; @@ -144265,17 +145234,22 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ const char *zType; i64 n; + int m = 0; + Select *pS2 = pSelect; pTab->tabFlags |= (pCol->colFlags & COLFLAG_NOINSERT); p = a[i].pExpr; /* pCol->szEst = ... // Column size est for SELECT tables never used */ pCol->affinity = sqlite3ExprAffinity(p); + while( pCol->affinity<=SQLITE_AFF_NONE && pS2->pNext!=0 ){ + m |= sqlite3ExprDataType(pS2->pEList->a[i].pExpr); + pS2 = pS2->pNext; + pCol->affinity = sqlite3ExprAffinity(pS2->pEList->a[i].pExpr); + } if( pCol->affinity<=SQLITE_AFF_NONE ){ pCol->affinity = aff; } - if( pCol->affinity>=SQLITE_AFF_TEXT && pSelect->pNext ){ - int m = 0; - Select *pS2; - for(m=0, pS2=pSelect->pNext; pS2; pS2=pS2->pNext){ + if( pCol->affinity>=SQLITE_AFF_TEXT && (pS2->pNext || pS2!=pSelect) ){ + for(pS2=pS2->pNext; pS2; pS2=pS2->pNext){ m |= sqlite3ExprDataType(pS2->pEList->a[i].pExpr); } if( pCol->affinity==SQLITE_AFF_TEXT && (m&0x01)!=0 ){ @@ -144305,12 +145279,12 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( } } if( zType ){ - i64 m = sqlite3Strlen30(zType); + const i64 k = sqlite3Strlen30(zType); n = sqlite3Strlen30(pCol->zCnName); - pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+m+2); + pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+k+2); pCol->colFlags &= ~(COLFLAG_HASTYPE|COLFLAG_HASCOLL); if( pCol->zCnName ){ - memcpy(&pCol->zCnName[n+1], zType, m+1); + memcpy(&pCol->zCnName[n+1], zType, k+1); pCol->colFlags |= COLFLAG_HASTYPE; } } @@ -146707,7 +147681,7 @@ static void constInsert( ){ int i; assert( pColumn->op==TK_COLUMN ); - assert( sqlite3ExprIsConstant(pValue) ); + assert( sqlite3ExprIsConstant(pConst->pParse, pValue) ); if( ExprHasProperty(pColumn, EP_FixedCol) ) return; if( sqlite3ExprAffinity(pValue)!=0 ) return; @@ -146765,10 +147739,10 @@ static void findConstInWhere(WhereConst *pConst, Expr *pExpr){ pLeft = pExpr->pLeft; assert( pRight!=0 ); assert( pLeft!=0 ); - if( pRight->op==TK_COLUMN && sqlite3ExprIsConstant(pLeft) ){ + if( pRight->op==TK_COLUMN && sqlite3ExprIsConstant(pConst->pParse, pLeft) ){ constInsert(pConst,pRight,pLeft,pExpr); } - if( pLeft->op==TK_COLUMN && sqlite3ExprIsConstant(pRight) ){ + if( pLeft->op==TK_COLUMN && sqlite3ExprIsConstant(pConst->pParse, pRight) ){ constInsert(pConst,pLeft,pRight,pExpr); } } @@ -146989,6 +147963,18 @@ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ ** The hope is that the terms added to the inner query will make it more ** efficient. ** +** NAME AMBIGUITY +** +** This optimization is called the "WHERE-clause push-down optimization". +** +** Do not confuse this optimization with another unrelated optimization +** with a similar name: The "MySQL push-down optimization" causes WHERE +** clause terms that can be evaluated using only the index and without +** reference to the table are run first, so that if they are false, +** unnecessary table seeks are avoided. +** +** RULES +** ** Do not attempt this optimization if: ** ** (1) (** This restriction was removed on 2017-09-29. We used to @@ -147054,10 +148040,10 @@ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ ** (9c) There is a RIGHT JOIN (or FULL JOIN) in between the ON/USING ** clause and the subquery. ** -** Without this restriction, the push-down optimization might move -** the ON/USING filter expression from the left side of a RIGHT JOIN -** over to the right side, which leads to incorrect answers. See -** also restriction (6) in sqlite3ExprIsSingleTableConstraint(). +** Without this restriction, the WHERE-clause push-down optimization +** might move the ON/USING filter expression from the left side of a +** RIGHT JOIN over to the right side, which leads to incorrect answers. +** See also restriction (6) in sqlite3ExprIsSingleTableConstraint(). ** ** (10) The inner query is not the right-hand table of a RIGHT JOIN. ** @@ -147189,7 +148175,7 @@ static int pushDownWhereTerms( } #endif - if( sqlite3ExprIsSingleTableConstraint(pWhere, pSrcList, iSrc) ){ + if( sqlite3ExprIsSingleTableConstraint(pWhere, pSrcList, iSrc, 1) ){ nChng++; pSubq->selFlags |= SF_PushDown; while( pSubq ){ @@ -148324,8 +149310,7 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ if( p->selFlags & SF_HasTypeInfo ) return; p->selFlags |= SF_HasTypeInfo; pParse = pWalker->pParse; - testcase( (p->selFlags & SF_Resolved)==0 ); - assert( (p->selFlags & SF_Resolved) || IN_RENAME_OBJECT ); + assert( (p->selFlags & SF_Resolved) ); pTabList = p->pSrc; for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ Table *pTab = pFrom->pTab; @@ -148395,6 +149380,8 @@ SQLITE_PRIVATE void sqlite3SelectPrep( */ static void printAggInfo(AggInfo *pAggInfo){ int ii; + sqlite3DebugPrintf("AggInfo %d/%p:\n", + pAggInfo->selId, pAggInfo); for(ii=0; iinColumn; ii++){ struct AggInfo_col *pCol = &pAggInfo->aCol[ii]; sqlite3DebugPrintf( @@ -149585,7 +150572,7 @@ SQLITE_PRIVATE int sqlite3Select( /* Generate code for all sub-queries in the FROM clause */ pSub = pItem->pSelect; - if( pSub==0 ) continue; + if( pSub==0 || pItem->addrFillSub!=0 ) continue; /* The code for a subquery should only be generated once. */ assert( pItem->addrFillSub==0 ); @@ -149616,7 +150603,7 @@ SQLITE_PRIVATE int sqlite3Select( #endif assert( pItem->pSelect && (pItem->pSelect->selFlags & SF_PushDown)!=0 ); }else{ - TREETRACE(0x4000,pParse,p,("Push-down not possible\n")); + TREETRACE(0x4000,pParse,p,("WHERE-lcause push-down not possible\n")); } /* Convert unused result columns of the subquery into simple NULL @@ -150497,6 +151484,12 @@ select_end: sqlite3ExprListDelete(db, pMinMaxOrderBy); #ifdef SQLITE_DEBUG if( pAggInfo && !db->mallocFailed ){ +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x20 ){ + TREETRACE(0x20,pParse,p,("Finished with AggInfo\n")); + printAggInfo(pAggInfo); + } +#endif for(i=0; inColumn; i++){ Expr *pExpr = pAggInfo->aCol[i].pCExpr; if( pExpr==0 ) continue; @@ -151678,6 +152671,72 @@ static ExprList *sqlite3ExpandReturning( return pNew; } +/* If the Expr node is a subquery or an EXISTS operator or an IN operator that +** uses a subquery, and if the subquery is SF_Correlated, then mark the +** expression as EP_VarSelect. +*/ +static int sqlite3ReturningSubqueryVarSelect(Walker *NotUsed, Expr *pExpr){ + UNUSED_PARAMETER(NotUsed); + if( ExprUseXSelect(pExpr) + && (pExpr->x.pSelect->selFlags & SF_Correlated)!=0 + ){ + testcase( ExprHasProperty(pExpr, EP_VarSelect) ); + ExprSetProperty(pExpr, EP_VarSelect); + } + return WRC_Continue; +} + + +/* +** If the SELECT references the table pWalker->u.pTab, then do two things: +** +** (1) Mark the SELECT as as SF_Correlated. +** (2) Set pWalker->eCode to non-zero so that the caller will know +** that (1) has happened. +*/ +static int sqlite3ReturningSubqueryCorrelated(Walker *pWalker, Select *pSelect){ + int i; + SrcList *pSrc; + assert( pSelect!=0 ); + pSrc = pSelect->pSrc; + assert( pSrc!=0 ); + for(i=0; inSrc; i++){ + if( pSrc->a[i].pTab==pWalker->u.pTab ){ + testcase( pSelect->selFlags & SF_Correlated ); + pSelect->selFlags |= SF_Correlated; + pWalker->eCode = 1; + break; + } + } + return WRC_Continue; +} + +/* +** Scan the expression list that is the argument to RETURNING looking +** for subqueries that depend on the table which is being modified in the +** statement that is hosting the RETURNING clause (pTab). Mark all such +** subqueries as SF_Correlated. If the subqueries are part of an +** expression, mark the expression as EP_VarSelect. +** +** https://sqlite.org/forum/forumpost/2c83569ce8945d39 +*/ +static void sqlite3ProcessReturningSubqueries( + ExprList *pEList, + Table *pTab +){ + Walker w; + memset(&w, 0, sizeof(w)); + w.xExprCallback = sqlite3ExprWalkNoop; + w.xSelectCallback = sqlite3ReturningSubqueryCorrelated; + w.u.pTab = pTab; + sqlite3WalkExprList(&w, pEList); + if( w.eCode ){ + w.xExprCallback = sqlite3ReturningSubqueryVarSelect; + w.xSelectCallback = sqlite3SelectWalkNoop; + sqlite3WalkExprList(&w, pEList); + } +} + /* ** Generate code for the RETURNING trigger. Unlike other triggers ** that invoke a subprogram in the bytecode, the code for RETURNING @@ -151714,6 +152773,7 @@ static void codeReturningTrigger( sSelect.pSrc = &sFrom; sFrom.nSrc = 1; sFrom.a[0].pTab = pTab; + sFrom.a[0].zName = pTab->zName; /* tag-20240424-1 */ sFrom.a[0].iCursor = -1; sqlite3SelectPrep(pParse, &sSelect, 0); if( pParse->nErr==0 ){ @@ -151740,6 +152800,7 @@ static void codeReturningTrigger( int i; int nCol = pNew->nExpr; int reg = pParse->nMem+1; + sqlite3ProcessReturningSubqueries(pNew, pTab); pParse->nMem += nCol+2; pReturning->iRetReg = reg; for(i=0; ipVtabCtx = &sCtx; pTab->nTabRef++; rc = xConstruct(db, pMod->pAux, nArg, azArg, &pVTable->pVtab, &zErr); + assert( pTab!=0 ); + assert( pTab->nTabRef>1 || rc!=SQLITE_OK ); sqlite3DeleteTable(db, pTab); db->pVtabCtx = sCtx.pPrior; if( rc==SQLITE_NOMEM ) sqlite3OomFault(db); @@ -154972,7 +156035,7 @@ static int vtabCallConstructor( pVTable->nRef = 1; if( sCtx.bDeclared==0 ){ const char *zFormat = "vtable constructor did not declare schema: %s"; - *pzErr = sqlite3MPrintf(db, zFormat, pTab->zName); + *pzErr = sqlite3MPrintf(db, zFormat, zModuleName); sqlite3VtabUnlock(pVTable); rc = SQLITE_ERROR; }else{ @@ -155150,12 +156213,30 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ Table *pTab; Parse sParse; int initBusy; + int i; + const unsigned char *z; + static const u8 aKeyword[] = { TK_CREATE, TK_TABLE, 0 }; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zCreateTable==0 ){ return SQLITE_MISUSE_BKPT; } #endif + + /* Verify that the first two keywords in the CREATE TABLE statement + ** really are "CREATE" and "TABLE". If this is not the case, then + ** sqlite3_declare_vtab() is being misused. + */ + z = (const unsigned char*)zCreateTable; + for(i=0; aKeyword[i]; i++){ + int tokenType = 0; + do{ z += sqlite3GetToken(z, &tokenType); }while( tokenType==TK_SPACE ); + if( tokenType!=aKeyword[i] ){ + sqlite3ErrorWithMsg(db, SQLITE_ERROR, "syntax error"); + return SQLITE_ERROR; + } + } + sqlite3_mutex_enter(db->mutex); pCtx = db->pVtabCtx; if( !pCtx || pCtx->bDeclared ){ @@ -155163,6 +156244,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ sqlite3_mutex_leave(db->mutex); return SQLITE_MISUSE_BKPT; } + pTab = pCtx->pTab; assert( IsVirtual(pTab) ); @@ -155176,11 +156258,10 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ initBusy = db->init.busy; db->init.busy = 0; sParse.nQueryLoop = 1; - if( SQLITE_OK==sqlite3RunParser(&sParse, zCreateTable) - && ALWAYS(sParse.pNewTable!=0) - && ALWAYS(!db->mallocFailed) - && IsOrdinaryTable(sParse.pNewTable) - ){ + if( SQLITE_OK==sqlite3RunParser(&sParse, zCreateTable) ){ + assert( sParse.pNewTable!=0 ); + assert( !db->mallocFailed ); + assert( IsOrdinaryTable(sParse.pNewTable) ); assert( sParse.zErrMsg==0 ); if( !pTab->aCol ){ Table *pNew = sParse.pNewTable; @@ -157675,6 +158756,27 @@ static SQLITE_NOINLINE void filterPullDown( } } +/* +** Loop pLoop is a WHERE_INDEXED level that uses at least one IN(...) +** operator. Return true if level pLoop is guaranteed to visit only one +** row for each key generated for the index. +*/ +static int whereLoopIsOneRow(WhereLoop *pLoop){ + if( pLoop->u.btree.pIndex->onError + && pLoop->nSkip==0 + && pLoop->u.btree.nEq==pLoop->u.btree.pIndex->nKeyCol + ){ + int ii; + for(ii=0; iiu.btree.nEq; ii++){ + if( pLoop->aLTerm[ii]->eOperator & (WO_IS|WO_ISNULL) ){ + return 0; + } + } + return 1; + } + return 0; +} + /* ** Generate code for the start of the iLevel-th loop in the WHERE clause ** implementation described by pWInfo. @@ -157753,7 +158855,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( pLevel->iFrom>0 && (pTabItem[0].fg.jointype & JT_LEFT)!=0 ){ pLevel->iLeftJoin = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Integer, 0, pLevel->iLeftJoin); - VdbeComment((v, "init LEFT JOIN no-match flag")); + VdbeComment((v, "init LEFT JOIN match flag")); } /* Compute a safe address to jump to if we discover that the table for @@ -158422,7 +159524,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } /* Record the instruction used to terminate the loop. */ - if( pLoop->wsFlags & WHERE_ONEROW ){ + if( (pLoop->wsFlags & WHERE_ONEROW) + || (pLevel->u.in.nIn && regBignull==0 && whereLoopIsOneRow(pLoop)) + ){ pLevel->op = OP_Noop; }else if( bRev ){ pLevel->op = OP_Prev; @@ -158812,6 +159916,12 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** iLoop==3: Code all remaining expressions. ** ** An effort is made to skip unnecessary iterations of the loop. + ** + ** This optimization of causing simple query restrictions to occur before + ** more complex one is call the "push-down" optimization in MySQL. Here + ** in SQLite, the name is "MySQL push-down", since there is also another + ** totally unrelated optimization called "WHERE-clause push-down". + ** Sometimes the qualifier is omitted, resulting in an ambiguity, so beware. */ iLoop = (pIdx ? 1 : 2); do{ @@ -159062,7 +160172,16 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( pRJ->regReturn); for(k=0; ka[k].pWLoop->iTab == pWInfo->a[k].iFrom ); + pRight = &pWInfo->pTabList->a[pWInfo->a[k].iFrom]; mAll |= pWInfo->a[k].pWLoop->maskSelf; + if( pRight->fg.viaCoroutine ){ + sqlite3VdbeAddOp3( + v, OP_Null, 0, pRight->regResult, + pRight->regResult + pRight->pSelect->pEList->nExpr-1 + ); + } sqlite3VdbeAddOp1(v, OP_NullRow, pWInfo->a[k].iTabCur); iIdxCur = pWInfo->a[k].iIdxCur; if( iIdxCur ){ @@ -160119,7 +161238,7 @@ static SQLITE_NOINLINE int exprMightBeIndexed2( if( pIdx->aiColumn[i]!=XN_EXPR ) continue; assert( pIdx->bHasExpr ); if( sqlite3ExprCompareSkip(pExpr,pIdx->aColExpr->a[i].pExpr,iCur)==0 - && pExpr->op!=TK_STRING + && !sqlite3ExprIsConstant(0,pIdx->aColExpr->a[i].pExpr) ){ aiCurCol[0] = iCur; aiCurCol[1] = XN_EXPR; @@ -160768,6 +161887,7 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Selec continue; } if( pWC->a[ii].leftCursor!=iCsr ) return; + if( pWC->a[ii].prereqRight!=0 ) return; } /* Check condition (5). Return early if it is not met. */ @@ -160782,12 +161902,14 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Selec /* All conditions are met. Add the terms to the where-clause object. */ assert( p->pLimit->op==TK_LIMIT ); - whereAddLimitExpr(pWC, p->iLimit, p->pLimit->pLeft, - iCsr, SQLITE_INDEX_CONSTRAINT_LIMIT); - if( p->iOffset>0 ){ + if( p->iOffset!=0 && (p->selFlags & SF_Compound)==0 ){ whereAddLimitExpr(pWC, p->iOffset, p->pLimit->pRight, iCsr, SQLITE_INDEX_CONSTRAINT_OFFSET); } + if( p->iOffset==0 || (p->selFlags & SF_Compound)==0 ){ + whereAddLimitExpr(pWC, p->iLimit, p->pLimit->pLeft, + iCsr, SQLITE_INDEX_CONSTRAINT_LIMIT); + } } } @@ -161305,6 +162427,42 @@ static Expr *whereRightSubexprIsColumn(Expr *p){ return 0; } +/* +** Term pTerm is guaranteed to be a WO_IN term. It may be a component term +** of a vector IN expression of the form "(x, y, ...) IN (SELECT ...)". +** This function checks to see if the term is compatible with an index +** column with affinity idxaff (one of the SQLITE_AFF_XYZ values). If so, +** it returns a pointer to the name of the collation sequence (e.g. "BINARY" +** or "NOCASE") used by the comparison in pTerm. If it is not compatible +** with affinity idxaff, NULL is returned. +*/ +static SQLITE_NOINLINE const char *indexInAffinityOk( + Parse *pParse, + WhereTerm *pTerm, + u8 idxaff +){ + Expr *pX = pTerm->pExpr; + Expr inexpr; + + assert( pTerm->eOperator & WO_IN ); + + if( sqlite3ExprIsVector(pX->pLeft) ){ + int iField = pTerm->u.x.iField - 1; + inexpr.flags = 0; + inexpr.op = TK_EQ; + inexpr.pLeft = pX->pLeft->x.pList->a[iField].pExpr; + assert( ExprUseXSelect(pX) ); + inexpr.pRight = pX->x.pSelect->pEList->a[iField].pExpr; + pX = &inexpr; + } + + if( sqlite3IndexAffinityOk(pX, idxaff) ){ + CollSeq *pRet = sqlite3ExprCompareCollSeq(pParse, pX); + return pRet ? pRet->zName : sqlite3StrBINARY; + } + return 0; +} + /* ** Advance to the next WhereTerm that matches according to the criteria ** established when the pScan object was initialized by whereScanInit(). @@ -161355,16 +162513,24 @@ static WhereTerm *whereScanNext(WhereScan *pScan){ if( (pTerm->eOperator & pScan->opMask)!=0 ){ /* Verify the affinity and collating sequence match */ if( pScan->zCollName && (pTerm->eOperator & WO_ISNULL)==0 ){ - CollSeq *pColl; + const char *zCollName; Parse *pParse = pWC->pWInfo->pParse; pX = pTerm->pExpr; - if( !sqlite3IndexAffinityOk(pX, pScan->idxaff) ){ - continue; + + if( (pTerm->eOperator & WO_IN) ){ + zCollName = indexInAffinityOk(pParse, pTerm, pScan->idxaff); + if( !zCollName ) continue; + }else{ + CollSeq *pColl; + if( !sqlite3IndexAffinityOk(pX, pScan->idxaff) ){ + continue; + } + assert(pX->pLeft); + pColl = sqlite3ExprCompareCollSeq(pParse, pX); + zCollName = pColl ? pColl->zName : sqlite3StrBINARY; } - assert(pX->pLeft); - pColl = sqlite3ExprCompareCollSeq(pParse, pX); - if( pColl==0 ) pColl = pParse->db->pDfltColl; - if( sqlite3StrICmp(pColl->zName, pScan->zCollName) ){ + + if( sqlite3StrICmp(zCollName, pScan->zCollName) ){ continue; } } @@ -161716,9 +162882,13 @@ static void translateColumnToCopy( ** are no-ops. */ #if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(WHERETRACE_ENABLED) -static void whereTraceIndexInfoInputs(sqlite3_index_info *p){ +static void whereTraceIndexInfoInputs( + sqlite3_index_info *p, /* The IndexInfo object */ + Table *pTab /* The TABLE that is the virtual table */ +){ int i; if( (sqlite3WhereTrace & 0x10)==0 ) return; + sqlite3DebugPrintf("sqlite3_index_info inputs for %s:\n", pTab->zName); for(i=0; inConstraint; i++){ sqlite3DebugPrintf( " constraint[%d]: col=%d termid=%d op=%d usabled=%d collseq=%s\n", @@ -161736,9 +162906,13 @@ static void whereTraceIndexInfoInputs(sqlite3_index_info *p){ p->aOrderBy[i].desc); } } -static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){ +static void whereTraceIndexInfoOutputs( + sqlite3_index_info *p, /* The IndexInfo object */ + Table *pTab /* The TABLE that is the virtual table */ +){ int i; if( (sqlite3WhereTrace & 0x10)==0 ) return; + sqlite3DebugPrintf("sqlite3_index_info outputs for %s:\n", pTab->zName); for(i=0; inConstraint; i++){ sqlite3DebugPrintf(" usage[%d]: argvIdx=%d omit=%d\n", i, @@ -161752,8 +162926,8 @@ static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){ sqlite3DebugPrintf(" estimatedRows=%lld\n", p->estimatedRows); } #else -#define whereTraceIndexInfoInputs(A) -#define whereTraceIndexInfoOutputs(A) +#define whereTraceIndexInfoInputs(A,B) +#define whereTraceIndexInfoOutputs(A,B) #endif /* @@ -161937,7 +163111,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex( ** WHERE clause (or the ON clause of a LEFT join) that constrain which ** rows of the target table (pSrc) that can be used. */ if( (pTerm->wtFlags & TERM_VIRTUAL)==0 - && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, pLevel->iFrom) + && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, pLevel->iFrom, 0) ){ pPartial = sqlite3ExprAnd(pParse, pPartial, sqlite3ExprDup(pParse->db, pExpr, 0)); @@ -161979,7 +163153,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex( ** if they go out of sync. */ if( IsView(pTable) ){ - extraCols = ALLBITS; + extraCols = ALLBITS & ~idxCols; }else{ extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); } @@ -162206,7 +163380,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( for(pTerm=pWInfo->sWC.a; pTermpExpr; if( (pTerm->wtFlags & TERM_VIRTUAL)==0 - && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, iSrc) + && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, iSrc, 0) ){ sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL); } @@ -162332,7 +163506,7 @@ static sqlite3_index_info *allocateIndexInfo( Expr *pE2; /* Skip over constant terms in the ORDER BY clause */ - if( sqlite3ExprIsConstant(pExpr) ){ + if( sqlite3ExprIsConstant(0, pExpr) ){ continue; } @@ -162367,7 +163541,7 @@ static sqlite3_index_info *allocateIndexInfo( } if( i==n ){ nOrderBy = n; - if( (pWInfo->wctrlFlags & WHERE_DISTINCTBY) ){ + if( (pWInfo->wctrlFlags & WHERE_DISTINCTBY) && !pSrc->fg.rowidUsed ){ eDistinct = 2 + ((pWInfo->wctrlFlags & WHERE_SORTBYGROUP)!=0); }else if( pWInfo->wctrlFlags & WHERE_GROUPBY ){ eDistinct = 1; @@ -162444,7 +163618,7 @@ static sqlite3_index_info *allocateIndexInfo( pIdxInfo->nConstraint = j; for(i=j=0; ia[i].pExpr; - if( sqlite3ExprIsConstant(pExpr) ) continue; + if( sqlite3ExprIsConstant(0, pExpr) ) continue; assert( pExpr->op==TK_COLUMN || (pExpr->op==TK_COLLATE && pExpr->pLeft->op==TK_COLUMN && pExpr->iColumn==pExpr->pLeft->iColumn) ); @@ -162496,11 +163670,11 @@ static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ sqlite3_vtab *pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab; int rc; - whereTraceIndexInfoInputs(p); + whereTraceIndexInfoInputs(p, pTab); pParse->db->nSchemaLock++; rc = pVtab->pModule->xBestIndex(pVtab, p); pParse->db->nSchemaLock--; - whereTraceIndexInfoOutputs(p); + whereTraceIndexInfoOutputs(p, pTab); if( rc!=SQLITE_OK && rc!=SQLITE_CONSTRAINT ){ if( rc==SQLITE_NOMEM ){ @@ -163978,7 +165152,9 @@ static int whereLoopAddBtreeIndex( } if( pProbe->bUnordered || pProbe->bLowQual ){ if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); - if( pProbe->bLowQual ) opMask &= ~(WO_EQ|WO_IN|WO_IS); + if( pProbe->bLowQual && pSrc->fg.isIndexedBy==0 ){ + opMask &= ~(WO_EQ|WO_IN|WO_IS); + } } assert( pNew->u.btree.nEqnColumn ); @@ -164245,10 +165421,13 @@ static int whereLoopAddBtreeIndex( } } - /* Set rCostIdx to the cost of visiting selected rows in index. Add - ** it to pNew->rRun, which is currently set to the cost of the index - ** seek only. Then, if this is a non-covering index, add the cost of - ** visiting the rows in the main table. */ + /* Set rCostIdx to the estimated cost of visiting selected rows in the + ** index. The estimate is the sum of two values: + ** 1. The cost of doing one search-by-key to find the first matching + ** entry + ** 2. Stepping forward in the index pNew->nOut times to find all + ** additional matching entries. + */ assert( pSrc->pTab->szTabRow>0 ); if( pProbe->idxType==SQLITE_IDXTYPE_IPK ){ /* The pProbe->szIdxRow is low for an IPK table since the interior @@ -164259,7 +165438,15 @@ static int whereLoopAddBtreeIndex( }else{ rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; } - pNew->rRun = sqlite3LogEstAdd(rLogSize, rCostIdx); + rCostIdx = sqlite3LogEstAdd(rLogSize, rCostIdx); + + /* Estimate the cost of running the loop. If all data is coming + ** from the index, then this is just the cost of doing the index + ** lookup and scan. But if some data is coming out of the main table, + ** we also have to add in the cost of doing pNew->nOut searches to + ** locate the row in the main table that corresponds to the index entry. + */ + pNew->rRun = rCostIdx; if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK|WHERE_EXPRIDX))==0 ){ pNew->rRun = sqlite3LogEstAdd(pNew->rRun, pNew->nOut + 16); } @@ -164365,7 +165552,9 @@ static int indexMightHelpWithOrderBy( for(ii=0; iinExpr; ii++){ Expr *pExpr = sqlite3ExprSkipCollateAndLikely(pOB->a[ii].pExpr); if( NEVER(pExpr==0) ) continue; - if( pExpr->op==TK_COLUMN && pExpr->iTable==iCursor ){ + if( (pExpr->op==TK_COLUMN || pExpr->op==TK_AGG_COLUMN) + && pExpr->iTable==iCursor + ){ if( pExpr->iColumn<0 ) return 1; for(jj=0; jjnKeyCol; jj++){ if( pExpr->iColumn==pIndex->aiColumn[jj] ) return 1; @@ -164622,7 +165811,7 @@ static void wherePartIdxExpr( u8 aff; if( pLeft->op!=TK_COLUMN ) return; - if( !sqlite3ExprIsConstant(pRight) ) return; + if( !sqlite3ExprIsConstant(0, pRight) ) return; if( !sqlite3IsBinary(sqlite3ExprCompareCollSeq(pParse, pPart)) ) return; if( pLeft->iColumn<0 ) return; aff = pIdx->pTable->aCol[pLeft->iColumn].affinity; @@ -164895,7 +166084,9 @@ static int whereLoopAddBtree( " according to whereIsCoveringIndex()\n", pProbe->zName)); } } - }else if( m==0 ){ + }else if( m==0 + && (HasRowid(pTab) || pWInfo->pSelect!=0 || sqlite3FaultSim(700)) + ){ WHERETRACE(0x200, ("-> %s a covering index according to bitmasks\n", pProbe->zName, m==0 ? "is" : "is not")); @@ -164971,7 +166162,7 @@ static int whereLoopAddBtree( ** unique index is used (making the index functionally non-unique) ** then the sqlite_stat1 data becomes important for scoring the ** plan */ - pTab->tabFlags |= TF_StatsUsed; + pTab->tabFlags |= TF_MaybeReanalyze; } #ifdef SQLITE_ENABLE_STAT4 sqlite3Stat4ProbeFree(pBuilder->pRec); @@ -164993,6 +166184,21 @@ static int isLimitTerm(WhereTerm *pTerm){ && pTerm->eMatchOp<=SQLITE_INDEX_CONSTRAINT_OFFSET; } +/* +** Return true if the first nCons constraints in the pUsage array are +** marked as in-use (have argvIndex>0). False otherwise. +*/ +static int allConstraintsUsed( + struct sqlite3_index_constraint_usage *aUsage, + int nCons +){ + int ii; + for(ii=0; iipNew->iTab. This @@ -165133,13 +166339,20 @@ static int whereLoopAddVirtualOne( *pbIn = 1; assert( (mExclude & WO_IN)==0 ); } + /* Unless pbRetryLimit is non-NULL, there should be no LIMIT/OFFSET + ** terms. And if there are any, they should follow all other terms. */ assert( pbRetryLimit || !isLimitTerm(pTerm) ); - if( isLimitTerm(pTerm) && *pbIn ){ + assert( !isLimitTerm(pTerm) || i>=nConstraint-2 ); + assert( !isLimitTerm(pTerm) || i==nConstraint-1 || isLimitTerm(pTerm+1) ); + + if( isLimitTerm(pTerm) && (*pbIn || !allConstraintsUsed(pUsage, i)) ){ /* If there is an IN(...) term handled as an == (separate call to ** xFilter for each value on the RHS of the IN) and a LIMIT or - ** OFFSET term handled as well, the plan is unusable. Set output - ** variable *pbRetryLimit to true to tell the caller to retry with - ** LIMIT and OFFSET disabled. */ + ** OFFSET term handled as well, the plan is unusable. Similarly, + ** if there is a LIMIT/OFFSET and there are other unused terms, + ** the plan cannot be used. In these cases set variable *pbRetryLimit + ** to true to tell the caller to retry with LIMIT and OFFSET + ** disabled. */ if( pIdxInfo->needToFreeIdxStr ){ sqlite3_free(pIdxInfo->idxStr); pIdxInfo->idxStr = 0; @@ -165996,7 +167209,7 @@ static i8 wherePathSatisfiesOrderBy( if( MASKBIT(i) & obSat ) continue; p = pOrderBy->a[i].pExpr; mTerm = sqlite3WhereExprUsage(&pWInfo->sMaskSet,p); - if( mTerm==0 && !sqlite3ExprIsConstant(p) ) continue; + if( mTerm==0 && !sqlite3ExprIsConstant(0,p) ) continue; if( (mTerm&~orderDistinctMask)==0 ){ obSat |= MASKBIT(i); } @@ -166465,10 +167678,9 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){ pWInfo->eDistinct = WHERE_DISTINCT_ORDERED; } - if( pWInfo->pSelect->pOrderBy - && pWInfo->nOBSat > pWInfo->pSelect->pOrderBy->nExpr ){ - pWInfo->nOBSat = pWInfo->pSelect->pOrderBy->nExpr; - } + /* vvv--- See check-in [12ad822d9b827777] on 2023-03-16 ---vvv */ + assert( pWInfo->pSelect->pOrderBy==0 + || pWInfo->nOBSat <= pWInfo->pSelect->pOrderBy->nExpr ); }else{ pWInfo->revMask = pFrom->revLoop; if( pWInfo->nOBSat<=0 ){ @@ -166511,7 +167723,6 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ } } - pWInfo->nRowOut = pFrom->nRow; /* Free temporary memory and return success */ @@ -166519,6 +167730,83 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ return SQLITE_OK; } +/* +** This routine implements a heuristic designed to improve query planning. +** This routine is called in between the first and second call to +** wherePathSolver(). Hence the name "Interstage" "Heuristic". +** +** The first call to wherePathSolver() (hereafter just "solver()") computes +** the best path without regard to the order of the outputs. The second call +** to the solver() builds upon the first call to try to find an alternative +** path that satisfies the ORDER BY clause. +** +** This routine looks at the results of the first solver() run, and for +** every FROM clause term in the resulting query plan that uses an equality +** constraint against an index, disable other WhereLoops for that same +** FROM clause term that would try to do a full-table scan. This prevents +** an index search from being converted into a full-table scan in order to +** satisfy an ORDER BY clause, since even though we might get slightly better +** performance using the full-scan without sorting if the output size +** estimates are very precise, we might also get severe performance +** degradation using the full-scan if the output size estimate is too large. +** It is better to err on the side of caution. +** +** Except, if the first solver() call generated a full-table scan in an outer +** loop then stop this analysis at the first full-scan, since the second +** solver() run might try to swap that full-scan for another in order to +** get the output into the correct order. In other words, we allow a +** rewrite like this: +** +** First Solver() Second Solver() +** |-- SCAN t1 |-- SCAN t2 +** |-- SEARCH t2 `-- SEARCH t1 +** `-- SORT USING B-TREE +** +** The purpose of this routine is to disallow rewrites such as: +** +** First Solver() Second Solver() +** |-- SEARCH t1 |-- SCAN t2 <--- bad! +** |-- SEARCH t2 `-- SEARCH t1 +** `-- SORT USING B-TREE +** +** See test cases in test/whereN.test for the real-world query that +** originally provoked this heuristic. +*/ +static SQLITE_NOINLINE void whereInterstageHeuristic(WhereInfo *pWInfo){ + int i; +#ifdef WHERETRACE_ENABLED + int once = 0; +#endif + for(i=0; inLevel; i++){ + WhereLoop *p = pWInfo->a[i].pWLoop; + if( p==0 ) break; + if( (p->wsFlags & WHERE_VIRTUALTABLE)!=0 ) continue; + if( (p->wsFlags & (WHERE_COLUMN_EQ|WHERE_COLUMN_NULL|WHERE_COLUMN_IN))!=0 ){ + u8 iTab = p->iTab; + WhereLoop *pLoop; + for(pLoop=pWInfo->pLoops; pLoop; pLoop=pLoop->pNextLoop){ + if( pLoop->iTab!=iTab ) continue; + if( (pLoop->wsFlags & (WHERE_CONSTRAINT|WHERE_AUTO_INDEX))!=0 ){ + /* Auto-index and index-constrained loops allowed to remain */ + continue; + } +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace & 0x80 ){ + if( once==0 ){ + sqlite3DebugPrintf("Loops disabled by interstage heuristic:\n"); + once = 1; + } + sqlite3WhereLoopPrint(pLoop, &pWInfo->sWC); + } +#endif /* WHERETRACE_ENABLED */ + pLoop->prereq = ALLBITS; /* Prevent 2nd solver() from using this one */ + } + }else{ + break; + } + } +} + /* ** Most queries use only a single table (they are not joins) and have ** simple == constraints against indexed fields. This routine attempts @@ -166687,6 +167975,10 @@ static void showAllWhereLoops(WhereInfo *pWInfo, WhereClause *pWC){ ** the right-most table of a subquery that was flattened into the ** main query and that subquery was the right-hand operand of an ** inner join that held an ON or USING clause. +** 6) The ORDER BY clause has 63 or fewer terms +** 7) The omit-noop-join optimization is enabled. +** +** Items (1), (6), and (7) are checked by the caller. ** ** For example, given: ** @@ -166807,7 +168099,7 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab]; Table *pTab = pItem->pTab; if( (pTab->tabFlags & TF_HasStat1)==0 ) break; - pTab->tabFlags |= TF_StatsUsed; + pTab->tabFlags |= TF_MaybeReanalyze; if( i>=1 && (pLoop->wsFlags & reqFlags)==reqFlags /* vvvvvv--- Always the case if WHERE_COLUMN_EQ is defined */ @@ -166828,6 +168120,58 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( } } +/* +** Expression Node callback for sqlite3ExprCanReturnSubtype(). +** +** Only a function call is able to return a subtype. So if the node +** is not a function call, return WRC_Prune immediately. +** +** A function call is able to return a subtype if it has the +** SQLITE_RESULT_SUBTYPE property. +** +** Assume that every function is able to pass-through a subtype from +** one of its argument (using sqlite3_result_value()). Most functions +** are not this way, but we don't have a mechanism to distinguish those +** that are from those that are not, so assume they all work this way. +** That means that if one of its arguments is another function and that +** other function is able to return a subtype, then this function is +** able to return a subtype. +*/ +static int exprNodeCanReturnSubtype(Walker *pWalker, Expr *pExpr){ + int n; + FuncDef *pDef; + sqlite3 *db; + if( pExpr->op!=TK_FUNCTION ){ + return WRC_Prune; + } + assert( ExprUseXList(pExpr) ); + db = pWalker->pParse->db; + n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ + pWalker->eCode = 1; + return WRC_Prune; + } + return WRC_Continue; +} + +/* +** Return TRUE if expression pExpr is able to return a subtype. +** +** A TRUE return does not guarantee that a subtype will be returned. +** It only indicates that a subtype return is possible. False positives +** are acceptable as they only disable an optimization. False negatives, +** on the other hand, can lead to incorrect answers. +*/ +static int sqlite3ExprCanReturnSubtype(Parse *pParse, Expr *pExpr){ + Walker w; + memset(&w, 0, sizeof(w)); + w.pParse = pParse; + w.xExprCallback = exprNodeCanReturnSubtype; + sqlite3WalkExpr(&w, pExpr); + return w.eCode; +} + /* ** The index pIdx is used by a query and contains one or more expressions. ** In other words pIdx is an index on an expression. iIdxCur is the cursor @@ -166860,20 +168204,12 @@ static SQLITE_NOINLINE void whereAddIndexedExpr( }else{ continue; } - if( sqlite3ExprIsConstant(pExpr) ) continue; - if( pExpr->op==TK_FUNCTION ){ + if( sqlite3ExprIsConstant(0,pExpr) ) continue; + if( pExpr->op==TK_FUNCTION && sqlite3ExprCanReturnSubtype(pParse,pExpr) ){ /* Functions that might set a subtype should not be replaced by the ** value taken from an expression index since the index omits the ** subtype. https://sqlite.org/forum/forumpost/68d284c86b082c3e */ - int n; - FuncDef *pDef; - sqlite3 *db = pParse->db; - assert( ExprUseXList(pExpr) ); - n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; - pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); - if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ - continue; - } + continue; } p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr)); if( p==0 ) break; @@ -167056,6 +168392,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( pOrderBy && pOrderBy->nExpr>=BMS ){ pOrderBy = 0; wctrlFlags &= ~WHERE_WANT_DISTINCT; + wctrlFlags |= WHERE_KEEP_ALL_JOINS; /* Disable omit-noop-join opt */ } /* The number of tables in the FROM clause is limited by the number of @@ -167138,7 +168475,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ){ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; } - ExplainQueryPlan((pParse, 0, "SCAN CONSTANT ROW")); + if( ALWAYS(pWInfo->pSelect) + && (pWInfo->pSelect->selFlags & SF_MultiValue)==0 + ){ + ExplainQueryPlan((pParse, 0, "SCAN CONSTANT ROW")); + } }else{ /* Assign a bit from the bitmask to every term in the FROM clause. ** @@ -167291,6 +168632,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( wherePathSolver(pWInfo, 0); if( db->mallocFailed ) goto whereBeginError; if( pWInfo->pOrderBy ){ + whereInterstageHeuristic(pWInfo); wherePathSolver(pWInfo, pWInfo->nRowOut+1); if( db->mallocFailed ) goto whereBeginError; } @@ -167351,10 +168693,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** in-line sqlite3WhereCodeOneLoopStart() for performance reasons. */ notReady = ~(Bitmask)0; - if( pWInfo->nLevel>=2 - && pResultSet!=0 /* these two combine to guarantee */ - && 0==(wctrlFlags & WHERE_AGG_DISTINCT) /* condition (1) above */ - && OptimizationEnabled(db, SQLITE_OmitNoopJoin) + if( pWInfo->nLevel>=2 /* Must be a join, or this opt8n is pointless */ + && pResultSet!=0 /* Condition (1) */ + && 0==(wctrlFlags & (WHERE_AGG_DISTINCT|WHERE_KEEP_ALL_JOINS)) /* (1),(6) */ + && OptimizationEnabled(db, SQLITE_OmitNoopJoin) /* (7) */ ){ notReady = whereOmitNoopJoin(pWInfo, notReady); nTabList = pWInfo->nLevel; @@ -167674,26 +169016,6 @@ whereBeginError: } #endif -#ifdef SQLITE_DEBUG -/* -** Return true if cursor iCur is opened by instruction k of the -** bytecode. Used inside of assert() only. -*/ -static int cursorIsOpen(Vdbe *v, int iCur, int k){ - while( k>=0 ){ - VdbeOp *pOp = sqlite3VdbeGetOp(v,k--); - if( pOp->p1!=iCur ) continue; - if( pOp->opcode==OP_Close ) return 0; - if( pOp->opcode==OP_OpenRead ) return 1; - if( pOp->opcode==OP_OpenWrite ) return 1; - if( pOp->opcode==OP_OpenDup ) return 1; - if( pOp->opcode==OP_OpenAutoindex ) return 1; - if( pOp->opcode==OP_OpenEphemeral ) return 1; - } - return 0; -} -#endif /* SQLITE_DEBUG */ - /* ** Generate the end of the WHERE loop. See comments on ** sqlite3WhereBegin() for additional information. @@ -167840,7 +169162,15 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ addr = sqlite3VdbeAddOp1(v, OP_IfPos, pLevel->iLeftJoin); VdbeCoverage(v); assert( (ws & WHERE_IDX_ONLY)==0 || (ws & WHERE_INDEXED)!=0 ); if( (ws & WHERE_IDX_ONLY)==0 ){ - assert( pLevel->iTabCur==pTabList->a[pLevel->iFrom].iCursor ); + SrcItem *pSrc = &pTabList->a[pLevel->iFrom]; + assert( pLevel->iTabCur==pSrc->iCursor ); + if( pSrc->fg.viaCoroutine ){ + int m, n; + n = pSrc->regResult; + assert( pSrc->pTab!=0 ); + m = pSrc->pTab->nCol; + sqlite3VdbeAddOp3(v, OP_Null, 0, n, n+m-1); + } sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iTabCur); } if( (ws & WHERE_INDEXED) @@ -167890,6 +169220,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ */ if( pTabItem->fg.viaCoroutine ){ testcase( pParse->db->mallocFailed ); + assert( pTabItem->regResult>=0 ); translateColumnToCopy(pParse, pLevel->addrBody, pLevel->iTabCur, pTabItem->regResult, 0); continue; @@ -167984,16 +169315,10 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ ** reference. Verify that this is harmless - that the ** table being referenced really is open. */ -#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC - assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 - || cursorIsOpen(v,pOp->p1,k) - || pOp->opcode==OP_Offset - ); -#else - assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 - || cursorIsOpen(v,pOp->p1,k) - ); -#endif + if( pLoop->wsFlags & WHERE_IDX_ONLY ){ + sqlite3ErrorMsg(pParse, "internal query planner error"); + pParse->rc = SQLITE_INTERNAL; + } } }else if( pOp->opcode==OP_Rowid ){ pOp->p1 = pLevel->iIdxCur; @@ -169194,7 +170519,7 @@ SQLITE_PRIVATE void sqlite3WindowListDelete(sqlite3 *db, Window *p){ ** variable values in the expression tree. */ static Expr *sqlite3WindowOffsetExpr(Parse *pParse, Expr *pExpr){ - if( 0==sqlite3ExprIsConstant(pExpr) ){ + if( 0==sqlite3ExprIsConstant(0,pExpr) ){ if( IN_RENAME_OBJECT ) sqlite3RenameExprUnmap(pParse, pExpr); sqlite3ExprDelete(pParse->db, pExpr); pExpr = sqlite3ExprAlloc(pParse->db, TK_NULL, 0, 0); @@ -171266,9 +172591,9 @@ static void updateDeleteLimitError( break; } } - if( (p->selFlags & SF_MultiValue)==0 && - (mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT])>0 && - cnt>mxSelect + if( (p->selFlags & (SF_MultiValue|SF_Values))==0 + && (mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT])>0 + && cnt>mxSelect ){ sqlite3ErrorMsg(pParse, "too many terms in compound SELECT"); } @@ -171288,6 +172613,14 @@ static void updateDeleteLimitError( return pSelect; } + /* Memory allocator for parser stack resizing. This is a thin wrapper around + ** sqlite3_realloc() that includes a call to sqlite3FaultSim() to facilitate + ** testing. + */ + static void *parserStackRealloc(void *pOld, sqlite3_uint64 newSize){ + return sqlite3FaultSim(700) ? 0 : sqlite3_realloc(pOld, newSize); + } + /* Construct a new Expr object from a single token */ static Expr *tokenExpr(Parse *pParse, int op, Token t){ @@ -171537,8 +172870,8 @@ static void updateDeleteLimitError( #define TK_TRUEFALSE 170 #define TK_ISNOT 171 #define TK_FUNCTION 172 -#define TK_UMINUS 173 -#define TK_UPLUS 174 +#define TK_UPLUS 173 +#define TK_UMINUS 174 #define TK_TRUTH 175 #define TK_REGISTER 176 #define TK_VECTOR 177 @@ -171547,8 +172880,9 @@ static void updateDeleteLimitError( #define TK_ASTERISK 180 #define TK_SPAN 181 #define TK_ERROR 182 -#define TK_SPACE 183 -#define TK_ILLEGAL 184 +#define TK_QNUMBER 183 +#define TK_SPACE 184 +#define TK_ILLEGAL 185 #endif /**************** End token definitions ***************************************/ @@ -171589,6 +172923,9 @@ static void updateDeleteLimitError( ** sqlite3ParserARG_STORE Code to store %extra_argument into yypParser ** sqlite3ParserARG_FETCH Code to extract %extra_argument from yypParser ** sqlite3ParserCTX_* As sqlite3ParserARG_ except for %extra_context +** YYREALLOC Name of the realloc() function to use +** YYFREE Name of the free() function to use +** YYDYNSTACK True if stack space should be extended on heap ** YYERRORSYMBOL is the code number of the error symbol. If not ** defined, then do no error processing. ** YYNSTATE the combined number of states. @@ -171602,37 +172939,39 @@ static void updateDeleteLimitError( ** YY_NO_ACTION The yy_action[] code for no-op ** YY_MIN_REDUCE Minimum value for reduce actions ** YY_MAX_REDUCE Maximum value for reduce actions +** YY_MIN_DSTRCTR Minimum symbol value that has a destructor +** YY_MAX_DSTRCTR Maximum symbol value that has a destructor */ #ifndef INTERFACE # define INTERFACE 1 #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 319 +#define YYNOCODE 322 #define YYACTIONTYPE unsigned short int #define YYWILDCARD 101 #define sqlite3ParserTOKENTYPE Token typedef union { int yyinit; sqlite3ParserTOKENTYPE yy0; - TriggerStep* yy33; - Window* yy41; - Select* yy47; - SrcList* yy131; - struct TrigEvent yy180; - struct {int value; int mask;} yy231; - IdList* yy254; - u32 yy285; - ExprList* yy322; - Cte* yy385; - int yy394; - Upsert* yy444; - u8 yy516; - With* yy521; - const char* yy522; - Expr* yy528; - OnOrUsing yy561; - struct FrameBound yy595; + ExprList* yy14; + With* yy59; + Cte* yy67; + Upsert* yy122; + IdList* yy132; + int yy144; + const char* yy168; + SrcList* yy203; + Window* yy211; + OnOrUsing yy269; + struct TrigEvent yy286; + struct {int value; int mask;} yy383; + u32 yy391; + TriggerStep* yy427; + Expr* yy454; + u8 yy462; + struct FrameBound yy509; + Select* yy555; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -171642,24 +172981,29 @@ typedef union { #define sqlite3ParserARG_PARAM #define sqlite3ParserARG_FETCH #define sqlite3ParserARG_STORE +#define YYREALLOC parserStackRealloc +#define YYFREE sqlite3_free +#define YYDYNSTACK 1 #define sqlite3ParserCTX_SDECL Parse *pParse; #define sqlite3ParserCTX_PDECL ,Parse *pParse #define sqlite3ParserCTX_PARAM ,pParse #define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse; #define sqlite3ParserCTX_STORE yypParser->pParse=pParse; #define YYFALLBACK 1 -#define YYNSTATE 583 -#define YYNRULE 405 -#define YYNRULE_WITH_ACTION 340 -#define YYNTOKEN 185 -#define YY_MAX_SHIFT 582 -#define YY_MIN_SHIFTREDUCE 842 -#define YY_MAX_SHIFTREDUCE 1246 -#define YY_ERROR_ACTION 1247 -#define YY_ACCEPT_ACTION 1248 -#define YY_NO_ACTION 1249 -#define YY_MIN_REDUCE 1250 -#define YY_MAX_REDUCE 1654 +#define YYNSTATE 587 +#define YYNRULE 409 +#define YYNRULE_WITH_ACTION 344 +#define YYNTOKEN 186 +#define YY_MAX_SHIFT 586 +#define YY_MIN_SHIFTREDUCE 849 +#define YY_MAX_SHIFTREDUCE 1257 +#define YY_ERROR_ACTION 1258 +#define YY_ACCEPT_ACTION 1259 +#define YY_NO_ACTION 1260 +#define YY_MIN_REDUCE 1261 +#define YY_MAX_REDUCE 1669 +#define YY_MIN_DSTRCTR 205 +#define YY_MAX_DSTRCTR 319 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -171675,6 +173019,22 @@ typedef union { # define yytestcase(X) #endif +/* Macro to determine if stack space has the ability to grow using +** heap memory. +*/ +#if YYSTACKDEPTH<=0 || YYDYNSTACK +# define YYGROWABLESTACK 1 +#else +# define YYGROWABLESTACK 0 +#endif + +/* Guarantee a minimum number of initial stack slots. +*/ +#if YYSTACKDEPTH<=0 +# undef YYSTACKDEPTH +# define YYSTACKDEPTH 2 /* Need a minimum stack size */ +#endif + /* Next are the tables used to determine what action to take based on the ** current state and lookahead token. These tables are used to implement @@ -171726,624 +173086,629 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2112) +#define YY_ACTTAB_COUNT (2138) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 576, 210, 576, 119, 116, 231, 576, 119, 116, 231, - /* 10 */ 576, 1321, 383, 1300, 414, 570, 570, 570, 576, 415, - /* 20 */ 384, 1321, 1283, 42, 42, 42, 42, 210, 1533, 72, - /* 30 */ 72, 978, 425, 42, 42, 499, 305, 281, 305, 979, - /* 40 */ 403, 72, 72, 126, 127, 81, 1221, 1221, 1058, 1061, - /* 50 */ 1048, 1048, 124, 124, 125, 125, 125, 125, 484, 415, - /* 60 */ 1248, 1, 1, 582, 2, 1252, 558, 119, 116, 231, - /* 70 */ 319, 488, 147, 488, 532, 119, 116, 231, 537, 1334, - /* 80 */ 423, 531, 143, 126, 127, 81, 1221, 1221, 1058, 1061, - /* 90 */ 1048, 1048, 124, 124, 125, 125, 125, 125, 119, 116, - /* 100 */ 231, 329, 123, 123, 123, 123, 122, 122, 121, 121, - /* 110 */ 121, 120, 117, 452, 286, 286, 286, 286, 450, 450, - /* 120 */ 450, 1572, 382, 1574, 1197, 381, 1168, 573, 1168, 573, - /* 130 */ 415, 1572, 545, 261, 228, 452, 102, 146, 457, 318, - /* 140 */ 567, 242, 123, 123, 123, 123, 122, 122, 121, 121, - /* 150 */ 121, 120, 117, 452, 126, 127, 81, 1221, 1221, 1058, - /* 160 */ 1061, 1048, 1048, 124, 124, 125, 125, 125, 125, 143, - /* 170 */ 296, 1197, 345, 456, 121, 121, 121, 120, 117, 452, - /* 180 */ 128, 1197, 1198, 1197, 149, 449, 448, 576, 120, 117, - /* 190 */ 452, 125, 125, 125, 125, 118, 123, 123, 123, 123, - /* 200 */ 122, 122, 121, 121, 121, 120, 117, 452, 462, 114, - /* 210 */ 13, 13, 554, 123, 123, 123, 123, 122, 122, 121, - /* 220 */ 121, 121, 120, 117, 452, 428, 318, 567, 1197, 1198, - /* 230 */ 1197, 150, 1229, 415, 1229, 125, 125, 125, 125, 123, - /* 240 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117, - /* 250 */ 452, 473, 348, 1045, 1045, 1059, 1062, 126, 127, 81, - /* 260 */ 1221, 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, - /* 270 */ 125, 125, 1286, 530, 224, 1197, 576, 415, 226, 523, - /* 280 */ 177, 83, 84, 123, 123, 123, 123, 122, 122, 121, - /* 290 */ 121, 121, 120, 117, 452, 1014, 16, 16, 1197, 134, - /* 300 */ 134, 126, 127, 81, 1221, 1221, 1058, 1061, 1048, 1048, - /* 310 */ 124, 124, 125, 125, 125, 125, 123, 123, 123, 123, - /* 320 */ 122, 122, 121, 121, 121, 120, 117, 452, 1049, 554, - /* 330 */ 1197, 379, 1197, 1198, 1197, 254, 1442, 405, 512, 509, - /* 340 */ 508, 112, 568, 574, 4, 933, 933, 439, 507, 346, - /* 350 */ 468, 332, 366, 400, 1242, 1197, 1198, 1197, 571, 576, - /* 360 */ 123, 123, 123, 123, 122, 122, 121, 121, 121, 120, - /* 370 */ 117, 452, 286, 286, 375, 1585, 1611, 449, 448, 155, - /* 380 */ 415, 453, 72, 72, 1293, 573, 1226, 1197, 1198, 1197, - /* 390 */ 86, 1228, 273, 565, 551, 524, 524, 576, 99, 1227, - /* 400 */ 6, 1285, 480, 143, 126, 127, 81, 1221, 1221, 1058, - /* 410 */ 1061, 1048, 1048, 124, 124, 125, 125, 125, 125, 558, - /* 420 */ 13, 13, 1035, 515, 1229, 1197, 1229, 557, 110, 110, - /* 430 */ 224, 576, 1243, 177, 576, 433, 111, 199, 453, 577, - /* 440 */ 453, 436, 1559, 1023, 327, 559, 1197, 272, 289, 374, - /* 450 */ 518, 369, 517, 259, 72, 72, 551, 72, 72, 365, - /* 460 */ 318, 567, 1617, 123, 123, 123, 123, 122, 122, 121, - /* 470 */ 121, 121, 120, 117, 452, 1023, 1023, 1025, 1026, 28, - /* 480 */ 286, 286, 1197, 1198, 1197, 1163, 576, 1616, 415, 908, - /* 490 */ 192, 558, 362, 573, 558, 944, 541, 525, 1163, 441, - /* 500 */ 419, 1163, 560, 1197, 1198, 1197, 576, 552, 552, 52, - /* 510 */ 52, 216, 126, 127, 81, 1221, 1221, 1058, 1061, 1048, - /* 520 */ 1048, 124, 124, 125, 125, 125, 125, 1197, 482, 136, - /* 530 */ 136, 415, 286, 286, 1497, 513, 122, 122, 121, 121, - /* 540 */ 121, 120, 117, 452, 1014, 573, 526, 219, 549, 549, - /* 550 */ 318, 567, 143, 6, 540, 126, 127, 81, 1221, 1221, - /* 560 */ 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, 125, - /* 570 */ 1561, 123, 123, 123, 123, 122, 122, 121, 121, 121, - /* 580 */ 120, 117, 452, 493, 1197, 1198, 1197, 490, 283, 1274, - /* 590 */ 964, 254, 1197, 379, 512, 509, 508, 1197, 346, 578, - /* 600 */ 1197, 578, 415, 294, 507, 964, 883, 193, 488, 318, - /* 610 */ 567, 390, 292, 386, 123, 123, 123, 123, 122, 122, - /* 620 */ 121, 121, 121, 120, 117, 452, 126, 127, 81, 1221, - /* 630 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 640 */ 125, 415, 400, 1143, 1197, 876, 101, 286, 286, 1197, - /* 650 */ 1198, 1197, 379, 1100, 1197, 1198, 1197, 1197, 1198, 1197, - /* 660 */ 573, 463, 33, 379, 235, 126, 127, 81, 1221, 1221, - /* 670 */ 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, 125, - /* 680 */ 1441, 966, 576, 230, 965, 123, 123, 123, 123, 122, - /* 690 */ 122, 121, 121, 121, 120, 117, 452, 1163, 230, 1197, - /* 700 */ 158, 1197, 1198, 1197, 1560, 13, 13, 303, 964, 1237, - /* 710 */ 1163, 154, 415, 1163, 379, 1588, 1181, 5, 375, 1585, - /* 720 */ 435, 1243, 3, 964, 123, 123, 123, 123, 122, 122, - /* 730 */ 121, 121, 121, 120, 117, 452, 126, 127, 81, 1221, - /* 740 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 750 */ 125, 415, 210, 575, 1197, 1036, 1197, 1198, 1197, 1197, - /* 760 */ 394, 859, 156, 1559, 380, 408, 1105, 1105, 496, 576, - /* 770 */ 473, 348, 1326, 1326, 1559, 126, 127, 81, 1221, 1221, - /* 780 */ 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, 125, - /* 790 */ 130, 576, 13, 13, 536, 123, 123, 123, 123, 122, - /* 800 */ 122, 121, 121, 121, 120, 117, 452, 304, 576, 461, - /* 810 */ 229, 1197, 1198, 1197, 13, 13, 1197, 1198, 1197, 1304, - /* 820 */ 471, 1274, 415, 1324, 1324, 1559, 1019, 461, 460, 440, - /* 830 */ 301, 72, 72, 1272, 123, 123, 123, 123, 122, 122, - /* 840 */ 121, 121, 121, 120, 117, 452, 126, 127, 81, 1221, - /* 850 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 860 */ 125, 415, 388, 1080, 1163, 286, 286, 425, 314, 280, - /* 870 */ 280, 287, 287, 465, 412, 411, 1543, 1163, 573, 576, - /* 880 */ 1163, 1200, 573, 413, 573, 126, 127, 81, 1221, 1221, - /* 890 */ 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, 125, - /* 900 */ 461, 1489, 13, 13, 1545, 123, 123, 123, 123, 122, - /* 910 */ 122, 121, 121, 121, 120, 117, 452, 202, 576, 466, - /* 920 */ 1591, 582, 2, 1252, 847, 848, 849, 1567, 319, 413, - /* 930 */ 147, 6, 415, 257, 256, 255, 208, 1334, 9, 1200, - /* 940 */ 264, 72, 72, 1440, 123, 123, 123, 123, 122, 122, - /* 950 */ 121, 121, 121, 120, 117, 452, 126, 127, 81, 1221, - /* 960 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 970 */ 125, 576, 286, 286, 576, 1217, 415, 581, 315, 1252, - /* 980 */ 425, 375, 1585, 360, 319, 573, 147, 499, 533, 1648, - /* 990 */ 401, 939, 499, 1334, 71, 71, 938, 72, 72, 242, - /* 1000 */ 1332, 105, 81, 1221, 1221, 1058, 1061, 1048, 1048, 124, - /* 1010 */ 124, 125, 125, 125, 125, 123, 123, 123, 123, 122, - /* 1020 */ 122, 121, 121, 121, 120, 117, 452, 1121, 286, 286, - /* 1030 */ 1426, 456, 1532, 1217, 447, 286, 286, 1496, 1359, 313, - /* 1040 */ 482, 573, 1122, 458, 355, 499, 358, 1270, 573, 209, - /* 1050 */ 576, 422, 179, 576, 1035, 242, 389, 1123, 527, 123, - /* 1060 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117, - /* 1070 */ 452, 1024, 108, 72, 72, 1023, 13, 13, 919, 576, - /* 1080 */ 1502, 576, 286, 286, 98, 534, 1541, 456, 920, 1338, - /* 1090 */ 1333, 203, 415, 286, 286, 573, 152, 211, 1502, 1504, - /* 1100 */ 430, 573, 56, 56, 57, 57, 573, 1023, 1023, 1025, - /* 1110 */ 451, 576, 415, 535, 12, 297, 126, 127, 81, 1221, - /* 1120 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 1130 */ 125, 576, 415, 871, 15, 15, 126, 127, 81, 1221, - /* 1140 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 1150 */ 125, 377, 533, 264, 44, 44, 126, 115, 81, 1221, - /* 1160 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 1170 */ 125, 1502, 482, 1275, 421, 123, 123, 123, 123, 122, - /* 1180 */ 122, 121, 121, 121, 120, 117, 452, 205, 1217, 499, - /* 1190 */ 434, 871, 472, 322, 499, 123, 123, 123, 123, 122, - /* 1200 */ 122, 121, 121, 121, 120, 117, 452, 576, 561, 1144, - /* 1210 */ 1646, 1426, 1646, 547, 576, 123, 123, 123, 123, 122, - /* 1220 */ 122, 121, 121, 121, 120, 117, 452, 576, 1426, 576, - /* 1230 */ 13, 13, 546, 323, 1329, 415, 338, 58, 58, 353, - /* 1240 */ 1426, 1174, 326, 286, 286, 553, 1217, 300, 899, 534, - /* 1250 */ 45, 45, 59, 59, 1144, 1647, 573, 1647, 569, 421, - /* 1260 */ 127, 81, 1221, 1221, 1058, 1061, 1048, 1048, 124, 124, - /* 1270 */ 125, 125, 125, 125, 1371, 377, 504, 290, 1197, 516, - /* 1280 */ 1370, 431, 398, 398, 397, 275, 395, 900, 1142, 856, - /* 1290 */ 482, 258, 1426, 1174, 467, 1163, 12, 335, 432, 337, - /* 1300 */ 1121, 464, 236, 258, 325, 464, 548, 1548, 1163, 1102, - /* 1310 */ 495, 1163, 324, 1102, 444, 1122, 339, 520, 123, 123, - /* 1320 */ 123, 123, 122, 122, 121, 121, 121, 120, 117, 452, - /* 1330 */ 1123, 318, 567, 1142, 576, 1197, 1198, 1197, 112, 568, - /* 1340 */ 201, 4, 238, 437, 939, 494, 285, 228, 1521, 938, - /* 1350 */ 170, 564, 576, 142, 1520, 571, 576, 60, 60, 576, - /* 1360 */ 420, 576, 445, 576, 539, 302, 879, 8, 491, 576, - /* 1370 */ 237, 576, 420, 576, 489, 61, 61, 576, 453, 62, - /* 1380 */ 62, 336, 63, 63, 46, 46, 47, 47, 365, 576, - /* 1390 */ 565, 576, 48, 48, 50, 50, 51, 51, 576, 295, - /* 1400 */ 64, 64, 486, 295, 543, 416, 475, 1035, 576, 542, - /* 1410 */ 318, 567, 65, 65, 66, 66, 413, 479, 576, 1035, - /* 1420 */ 576, 14, 14, 879, 1024, 110, 110, 413, 1023, 576, - /* 1430 */ 478, 67, 67, 111, 459, 453, 577, 453, 98, 317, - /* 1440 */ 1023, 132, 132, 133, 133, 576, 1565, 576, 978, 413, - /* 1450 */ 6, 1566, 68, 68, 1564, 6, 979, 576, 6, 1563, - /* 1460 */ 1023, 1023, 1025, 6, 350, 218, 101, 535, 53, 53, - /* 1470 */ 69, 69, 1023, 1023, 1025, 1026, 28, 1590, 1185, 455, - /* 1480 */ 70, 70, 290, 87, 215, 31, 1367, 398, 398, 397, - /* 1490 */ 275, 395, 354, 109, 856, 107, 576, 112, 568, 487, - /* 1500 */ 4, 1216, 576, 239, 153, 576, 39, 236, 1303, 325, - /* 1510 */ 112, 568, 1302, 4, 571, 576, 32, 324, 576, 54, - /* 1520 */ 54, 576, 1139, 357, 402, 165, 165, 571, 166, 166, - /* 1530 */ 576, 291, 359, 576, 17, 361, 576, 453, 77, 77, - /* 1540 */ 1317, 55, 55, 1301, 73, 73, 576, 238, 474, 565, - /* 1550 */ 453, 476, 368, 135, 135, 170, 74, 74, 142, 163, - /* 1560 */ 163, 378, 565, 543, 576, 321, 576, 890, 544, 137, - /* 1570 */ 137, 343, 1357, 426, 298, 237, 543, 576, 1035, 576, - /* 1580 */ 344, 542, 101, 373, 110, 110, 162, 131, 131, 164, - /* 1590 */ 164, 1035, 111, 372, 453, 577, 453, 110, 110, 1023, - /* 1600 */ 157, 157, 141, 141, 576, 111, 576, 453, 577, 453, - /* 1610 */ 416, 288, 1023, 576, 886, 318, 567, 576, 219, 576, - /* 1620 */ 241, 1016, 481, 263, 263, 898, 897, 140, 140, 138, - /* 1630 */ 138, 1023, 1023, 1025, 1026, 28, 139, 139, 529, 459, - /* 1640 */ 76, 76, 78, 78, 1023, 1023, 1025, 1026, 28, 1185, - /* 1650 */ 455, 576, 1087, 290, 112, 568, 1579, 4, 398, 398, - /* 1660 */ 397, 275, 395, 576, 1027, 856, 576, 483, 349, 263, - /* 1670 */ 101, 571, 886, 1380, 75, 75, 1425, 505, 236, 260, - /* 1680 */ 325, 112, 568, 363, 4, 101, 43, 43, 324, 49, - /* 1690 */ 49, 905, 906, 161, 453, 101, 981, 982, 571, 1083, - /* 1700 */ 1353, 260, 969, 936, 263, 114, 565, 1099, 521, 1099, - /* 1710 */ 1087, 1098, 869, 1098, 151, 937, 1148, 114, 238, 1365, - /* 1720 */ 562, 453, 1027, 563, 1430, 1282, 170, 1273, 1261, 142, - /* 1730 */ 1605, 1260, 1262, 565, 1598, 1035, 500, 278, 213, 1350, - /* 1740 */ 310, 110, 110, 943, 311, 312, 237, 11, 234, 111, - /* 1750 */ 221, 453, 577, 453, 293, 399, 1023, 1412, 341, 331, - /* 1760 */ 334, 342, 1035, 299, 347, 1417, 1416, 485, 110, 110, - /* 1770 */ 510, 406, 225, 1300, 206, 371, 111, 1362, 453, 577, - /* 1780 */ 453, 416, 1363, 1023, 1493, 1492, 318, 567, 1023, 1023, - /* 1790 */ 1025, 1026, 28, 566, 207, 220, 80, 568, 393, 4, - /* 1800 */ 1601, 1361, 556, 1360, 1237, 181, 267, 232, 1540, 1538, - /* 1810 */ 459, 1234, 424, 571, 82, 1023, 1023, 1025, 1026, 28, - /* 1820 */ 86, 217, 85, 1498, 190, 129, 1407, 554, 330, 175, - /* 1830 */ 36, 1413, 1400, 333, 183, 469, 453, 470, 185, 186, - /* 1840 */ 187, 503, 244, 188, 99, 1419, 1418, 37, 565, 1421, - /* 1850 */ 492, 194, 248, 404, 92, 498, 112, 568, 1509, 4, - /* 1860 */ 477, 352, 407, 279, 1487, 250, 198, 501, 356, 519, - /* 1870 */ 409, 251, 252, 571, 1263, 1320, 1319, 1035, 438, 1318, - /* 1880 */ 94, 890, 1311, 110, 110, 1290, 1310, 226, 1615, 442, - /* 1890 */ 1584, 111, 528, 453, 577, 453, 453, 443, 1023, 1614, - /* 1900 */ 265, 266, 410, 1289, 370, 1288, 1613, 308, 565, 309, - /* 1910 */ 446, 376, 1570, 1569, 10, 1474, 1385, 555, 387, 106, - /* 1920 */ 316, 1384, 100, 35, 538, 579, 1191, 274, 1343, 276, - /* 1930 */ 1023, 1023, 1025, 1026, 28, 1342, 277, 1035, 580, 1258, - /* 1940 */ 385, 212, 1253, 110, 110, 391, 167, 392, 168, 1525, - /* 1950 */ 417, 111, 180, 453, 577, 453, 1526, 843, 1023, 148, - /* 1960 */ 306, 1524, 1523, 454, 169, 222, 223, 214, 320, 79, - /* 1970 */ 233, 1097, 145, 1095, 328, 182, 171, 1216, 240, 922, - /* 1980 */ 184, 340, 1111, 243, 189, 172, 173, 427, 429, 88, - /* 1990 */ 1023, 1023, 1025, 1026, 28, 89, 191, 418, 174, 90, - /* 2000 */ 91, 1114, 245, 246, 1110, 159, 18, 1103, 247, 351, - /* 2010 */ 263, 195, 1231, 497, 249, 196, 38, 858, 502, 372, - /* 2020 */ 253, 364, 888, 197, 506, 511, 93, 19, 20, 514, - /* 2030 */ 901, 367, 95, 307, 160, 96, 522, 97, 1179, 1064, - /* 2040 */ 1150, 40, 227, 21, 282, 176, 1149, 284, 973, 200, - /* 2050 */ 967, 114, 262, 1169, 22, 1165, 23, 24, 1173, 25, - /* 2060 */ 1167, 1154, 1172, 26, 34, 550, 27, 103, 204, 101, - /* 2070 */ 104, 1078, 7, 1065, 1063, 1067, 1120, 1068, 1119, 268, - /* 2080 */ 269, 29, 41, 1187, 1028, 870, 113, 30, 572, 396, - /* 2090 */ 1186, 144, 178, 1249, 1249, 1249, 932, 1249, 1249, 1249, - /* 2100 */ 1249, 1249, 1249, 270, 1249, 271, 1249, 1249, 1249, 1249, - /* 2110 */ 1249, 1606, + /* 0 */ 580, 128, 125, 232, 1626, 553, 580, 1294, 1285, 580, + /* 10 */ 328, 580, 1304, 212, 580, 128, 125, 232, 582, 416, + /* 20 */ 582, 395, 1546, 51, 51, 527, 409, 1297, 533, 51, + /* 30 */ 51, 987, 51, 51, 81, 81, 1111, 61, 61, 988, + /* 40 */ 1111, 1296, 384, 135, 136, 90, 1232, 1232, 1067, 1070, + /* 50 */ 1057, 1057, 133, 133, 134, 134, 134, 134, 1581, 416, + /* 60 */ 287, 287, 7, 287, 287, 426, 1054, 1054, 1068, 1071, + /* 70 */ 289, 560, 496, 577, 528, 565, 577, 501, 565, 486, + /* 80 */ 534, 262, 229, 135, 136, 90, 1232, 1232, 1067, 1070, + /* 90 */ 1057, 1057, 133, 133, 134, 134, 134, 134, 128, 125, + /* 100 */ 232, 1510, 132, 132, 132, 132, 131, 131, 130, 130, + /* 110 */ 130, 129, 126, 454, 1208, 1259, 1, 1, 586, 2, + /* 120 */ 1263, 1575, 424, 1586, 383, 320, 1178, 153, 1178, 1588, + /* 130 */ 416, 382, 1586, 547, 1345, 330, 111, 574, 574, 574, + /* 140 */ 293, 1058, 132, 132, 132, 132, 131, 131, 130, 130, + /* 150 */ 130, 129, 126, 454, 135, 136, 90, 1232, 1232, 1067, + /* 160 */ 1070, 1057, 1057, 133, 133, 134, 134, 134, 134, 287, + /* 170 */ 287, 1208, 1209, 1208, 255, 287, 287, 514, 511, 510, + /* 180 */ 137, 459, 577, 212, 565, 451, 450, 509, 577, 1620, + /* 190 */ 565, 134, 134, 134, 134, 127, 404, 243, 132, 132, + /* 200 */ 132, 132, 131, 131, 130, 130, 130, 129, 126, 454, + /* 210 */ 282, 475, 349, 132, 132, 132, 132, 131, 131, 130, + /* 220 */ 130, 130, 129, 126, 454, 578, 155, 940, 940, 458, + /* 230 */ 227, 525, 1240, 416, 1240, 134, 134, 134, 134, 132, + /* 240 */ 132, 132, 132, 131, 131, 130, 130, 130, 129, 126, + /* 250 */ 454, 130, 130, 130, 129, 126, 454, 135, 136, 90, + /* 260 */ 1232, 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, + /* 270 */ 134, 134, 128, 125, 232, 454, 580, 416, 401, 1253, + /* 280 */ 180, 92, 93, 132, 132, 132, 132, 131, 131, 130, + /* 290 */ 130, 130, 129, 126, 454, 385, 391, 1208, 387, 81, + /* 300 */ 81, 135, 136, 90, 1232, 1232, 1067, 1070, 1057, 1057, + /* 310 */ 133, 133, 134, 134, 134, 134, 132, 132, 132, 132, + /* 320 */ 131, 131, 130, 130, 130, 129, 126, 454, 131, 131, + /* 330 */ 130, 130, 130, 129, 126, 454, 560, 1208, 302, 319, + /* 340 */ 571, 121, 572, 484, 4, 559, 1153, 1661, 1632, 1661, + /* 350 */ 45, 128, 125, 232, 1208, 1209, 1208, 1254, 575, 1173, + /* 360 */ 132, 132, 132, 132, 131, 131, 130, 130, 130, 129, + /* 370 */ 126, 454, 1173, 287, 287, 1173, 1023, 580, 426, 1023, + /* 380 */ 416, 455, 1606, 586, 2, 1263, 577, 44, 565, 95, + /* 390 */ 320, 110, 153, 569, 1208, 1209, 1208, 526, 526, 1345, + /* 400 */ 81, 81, 7, 44, 135, 136, 90, 1232, 1232, 1067, + /* 410 */ 1070, 1057, 1057, 133, 133, 134, 134, 134, 134, 295, + /* 420 */ 1153, 1662, 1044, 1662, 1208, 1151, 319, 571, 119, 119, + /* 430 */ 347, 470, 333, 347, 287, 287, 120, 560, 455, 581, + /* 440 */ 455, 1173, 1173, 1032, 319, 571, 442, 577, 210, 565, + /* 450 */ 1343, 1455, 550, 535, 1173, 1173, 1602, 1173, 1173, 420, + /* 460 */ 319, 571, 243, 132, 132, 132, 132, 131, 131, 130, + /* 470 */ 130, 130, 129, 126, 454, 1032, 1032, 1034, 1035, 35, + /* 480 */ 44, 1208, 1209, 1208, 476, 287, 287, 1332, 416, 1311, + /* 490 */ 376, 1599, 363, 225, 458, 1208, 195, 1332, 577, 1151, + /* 500 */ 565, 1337, 1337, 274, 580, 1192, 580, 344, 46, 196, + /* 510 */ 541, 217, 135, 136, 90, 1232, 1232, 1067, 1070, 1057, + /* 520 */ 1057, 133, 133, 134, 134, 134, 134, 19, 19, 19, + /* 530 */ 19, 416, 585, 1208, 1263, 515, 1208, 319, 571, 320, + /* 540 */ 948, 153, 429, 495, 434, 947, 1208, 492, 1345, 1454, + /* 550 */ 536, 1281, 1208, 1209, 1208, 135, 136, 90, 1232, 1232, + /* 560 */ 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, 134, + /* 570 */ 579, 132, 132, 132, 132, 131, 131, 130, 130, 130, + /* 580 */ 129, 126, 454, 287, 287, 532, 287, 287, 376, 1599, + /* 590 */ 1208, 1209, 1208, 1208, 1209, 1208, 577, 490, 565, 577, + /* 600 */ 893, 565, 416, 1208, 1209, 1208, 890, 40, 22, 22, + /* 610 */ 220, 243, 529, 1453, 132, 132, 132, 132, 131, 131, + /* 620 */ 130, 130, 130, 129, 126, 454, 135, 136, 90, 1232, + /* 630 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 640 */ 134, 416, 180, 458, 1208, 883, 255, 287, 287, 514, + /* 650 */ 511, 510, 376, 1599, 1572, 1335, 1335, 580, 893, 509, + /* 660 */ 577, 44, 565, 563, 1211, 135, 136, 90, 1232, 1232, + /* 670 */ 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, 134, + /* 680 */ 81, 81, 426, 580, 381, 132, 132, 132, 132, 131, + /* 690 */ 131, 130, 130, 130, 129, 126, 454, 297, 287, 287, + /* 700 */ 464, 1208, 1209, 1208, 1208, 538, 19, 19, 452, 452, + /* 710 */ 452, 577, 416, 565, 230, 440, 1191, 539, 319, 571, + /* 720 */ 367, 436, 1211, 1439, 132, 132, 132, 132, 131, 131, + /* 730 */ 130, 130, 130, 129, 126, 454, 135, 136, 90, 1232, + /* 740 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 750 */ 134, 416, 211, 953, 1173, 1045, 1114, 1114, 498, 551, + /* 760 */ 551, 1208, 1209, 1208, 7, 543, 1574, 1173, 380, 580, + /* 770 */ 1173, 5, 1208, 490, 3, 135, 136, 90, 1232, 1232, + /* 780 */ 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, 134, + /* 790 */ 580, 517, 19, 19, 431, 132, 132, 132, 132, 131, + /* 800 */ 131, 130, 130, 130, 129, 126, 454, 305, 1208, 437, + /* 810 */ 225, 1208, 389, 19, 19, 273, 290, 375, 520, 370, + /* 820 */ 519, 260, 416, 542, 1572, 553, 1028, 366, 441, 1208, + /* 830 */ 1209, 1208, 906, 1556, 132, 132, 132, 132, 131, 131, + /* 840 */ 130, 130, 130, 129, 126, 454, 135, 136, 90, 1232, + /* 850 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 860 */ 134, 416, 1439, 518, 1285, 1208, 1209, 1208, 1208, 1209, + /* 870 */ 1208, 907, 48, 346, 1572, 1572, 1283, 1631, 1572, 915, + /* 880 */ 580, 129, 126, 454, 110, 135, 136, 90, 1232, 1232, + /* 890 */ 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, 134, + /* 900 */ 265, 580, 463, 19, 19, 132, 132, 132, 132, 131, + /* 910 */ 131, 130, 130, 130, 129, 126, 454, 1349, 204, 580, + /* 920 */ 463, 462, 50, 47, 19, 19, 49, 438, 1109, 577, + /* 930 */ 501, 565, 416, 432, 108, 1228, 1573, 1558, 380, 205, + /* 940 */ 554, 554, 81, 81, 132, 132, 132, 132, 131, 131, + /* 950 */ 130, 130, 130, 129, 126, 454, 135, 136, 90, 1232, + /* 960 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 970 */ 134, 484, 580, 1208, 580, 1545, 416, 1439, 973, 315, + /* 980 */ 1663, 402, 284, 501, 973, 897, 1573, 1573, 380, 380, + /* 990 */ 1573, 465, 380, 1228, 463, 80, 80, 81, 81, 501, + /* 1000 */ 378, 114, 90, 1232, 1232, 1067, 1070, 1057, 1057, 133, + /* 1010 */ 133, 134, 134, 134, 134, 132, 132, 132, 132, 131, + /* 1020 */ 131, 130, 130, 130, 129, 126, 454, 1208, 1509, 580, + /* 1030 */ 1208, 1209, 1208, 1370, 316, 490, 281, 281, 501, 435, + /* 1040 */ 561, 288, 288, 406, 1344, 475, 349, 298, 433, 577, + /* 1050 */ 580, 565, 81, 81, 577, 378, 565, 975, 390, 132, + /* 1060 */ 132, 132, 132, 131, 131, 130, 130, 130, 129, 126, + /* 1070 */ 454, 231, 117, 81, 81, 287, 287, 231, 287, 287, + /* 1080 */ 580, 1515, 580, 1340, 1208, 1209, 1208, 139, 577, 560, + /* 1090 */ 565, 577, 416, 565, 445, 460, 973, 213, 562, 1515, + /* 1100 */ 1517, 1554, 973, 143, 143, 145, 145, 1372, 314, 482, + /* 1110 */ 448, 974, 416, 854, 855, 856, 135, 136, 90, 1232, + /* 1120 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 1130 */ 134, 361, 416, 401, 1152, 304, 135, 136, 90, 1232, + /* 1140 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 1150 */ 134, 1579, 323, 6, 866, 7, 135, 124, 90, 1232, + /* 1160 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 1170 */ 134, 413, 412, 1515, 212, 132, 132, 132, 132, 131, + /* 1180 */ 131, 130, 130, 130, 129, 126, 454, 415, 118, 1208, + /* 1190 */ 116, 10, 356, 265, 359, 132, 132, 132, 132, 131, + /* 1200 */ 131, 130, 130, 130, 129, 126, 454, 580, 324, 306, + /* 1210 */ 580, 306, 1254, 473, 158, 132, 132, 132, 132, 131, + /* 1220 */ 131, 130, 130, 130, 129, 126, 454, 207, 1228, 1130, + /* 1230 */ 65, 65, 474, 66, 66, 416, 451, 450, 886, 535, + /* 1240 */ 339, 258, 257, 256, 1131, 1237, 1208, 1209, 1208, 327, + /* 1250 */ 1239, 878, 159, 580, 16, 484, 1089, 1044, 1238, 1132, + /* 1260 */ 136, 90, 1232, 1232, 1067, 1070, 1057, 1057, 133, 133, + /* 1270 */ 134, 134, 134, 134, 1033, 580, 81, 81, 1032, 1044, + /* 1280 */ 926, 580, 467, 1240, 580, 1240, 1228, 506, 107, 1439, + /* 1290 */ 927, 6, 580, 414, 1502, 886, 1033, 484, 21, 21, + /* 1300 */ 1032, 336, 1384, 338, 53, 53, 501, 81, 81, 878, + /* 1310 */ 1032, 1032, 1034, 449, 259, 19, 19, 537, 132, 132, + /* 1320 */ 132, 132, 131, 131, 130, 130, 130, 129, 126, 454, + /* 1330 */ 555, 301, 1032, 1032, 1034, 107, 536, 549, 121, 572, + /* 1340 */ 1192, 4, 1130, 1580, 453, 580, 466, 7, 1286, 422, + /* 1350 */ 466, 354, 1439, 580, 522, 575, 548, 1131, 121, 572, + /* 1360 */ 446, 4, 1192, 468, 537, 1184, 1227, 9, 67, 67, + /* 1370 */ 491, 580, 1132, 303, 414, 575, 54, 54, 455, 580, + /* 1380 */ 123, 948, 580, 421, 580, 337, 947, 1383, 580, 236, + /* 1390 */ 569, 580, 1578, 568, 68, 68, 7, 580, 455, 366, + /* 1400 */ 423, 182, 69, 69, 545, 70, 70, 71, 71, 544, + /* 1410 */ 569, 72, 72, 488, 55, 55, 477, 1184, 296, 1044, + /* 1420 */ 56, 56, 296, 497, 545, 119, 119, 414, 1577, 546, + /* 1430 */ 573, 422, 7, 120, 1248, 455, 581, 455, 469, 1044, + /* 1440 */ 1032, 580, 1561, 556, 480, 119, 119, 531, 259, 121, + /* 1450 */ 572, 240, 4, 120, 580, 455, 581, 455, 580, 481, + /* 1460 */ 1032, 580, 156, 580, 57, 57, 575, 580, 286, 229, + /* 1470 */ 414, 340, 1032, 1032, 1034, 1035, 35, 59, 59, 219, + /* 1480 */ 987, 60, 60, 220, 73, 73, 74, 74, 988, 455, + /* 1490 */ 75, 75, 1032, 1032, 1034, 1035, 35, 96, 216, 291, + /* 1500 */ 556, 569, 1192, 318, 399, 399, 398, 276, 396, 580, + /* 1510 */ 489, 863, 478, 1315, 414, 545, 580, 421, 1534, 1148, + /* 1520 */ 544, 403, 1192, 292, 237, 1157, 326, 38, 23, 580, + /* 1530 */ 1044, 580, 20, 20, 325, 299, 119, 119, 164, 76, + /* 1540 */ 76, 1533, 121, 572, 120, 4, 455, 581, 455, 203, + /* 1550 */ 580, 1032, 141, 141, 142, 142, 580, 322, 39, 575, + /* 1560 */ 345, 1025, 110, 264, 239, 905, 904, 427, 242, 912, + /* 1570 */ 913, 374, 173, 77, 77, 43, 483, 1314, 264, 62, + /* 1580 */ 62, 373, 455, 1032, 1032, 1034, 1035, 35, 1605, 1196, + /* 1590 */ 457, 1096, 238, 291, 569, 163, 1313, 110, 399, 399, + /* 1600 */ 398, 276, 396, 990, 991, 863, 485, 350, 264, 110, + /* 1610 */ 1036, 493, 580, 1192, 507, 1092, 261, 261, 237, 580, + /* 1620 */ 326, 121, 572, 1044, 4, 351, 1380, 417, 325, 119, + /* 1630 */ 119, 952, 319, 571, 355, 78, 78, 120, 575, 455, + /* 1640 */ 581, 455, 79, 79, 1032, 358, 360, 580, 364, 1096, + /* 1650 */ 110, 580, 978, 946, 264, 123, 461, 362, 239, 580, + /* 1660 */ 523, 455, 943, 1108, 123, 1108, 173, 580, 1036, 43, + /* 1670 */ 63, 63, 1328, 569, 168, 168, 1032, 1032, 1034, 1035, + /* 1680 */ 35, 580, 169, 169, 1312, 876, 238, 157, 1593, 580, + /* 1690 */ 86, 86, 369, 89, 572, 379, 4, 1107, 945, 1107, + /* 1700 */ 123, 580, 1044, 1393, 64, 64, 1192, 1438, 119, 119, + /* 1710 */ 575, 580, 82, 82, 567, 580, 120, 165, 455, 581, + /* 1720 */ 455, 417, 1366, 1032, 144, 144, 319, 571, 580, 1378, + /* 1730 */ 566, 502, 279, 455, 83, 83, 1443, 580, 166, 166, + /* 1740 */ 580, 1293, 558, 580, 1284, 569, 580, 12, 580, 1272, + /* 1750 */ 461, 146, 146, 1271, 580, 1032, 1032, 1034, 1035, 35, + /* 1760 */ 140, 140, 1273, 167, 167, 1613, 160, 160, 1363, 150, + /* 1770 */ 150, 149, 149, 311, 1044, 580, 312, 147, 147, 313, + /* 1780 */ 119, 119, 222, 235, 580, 1192, 400, 580, 120, 580, + /* 1790 */ 455, 581, 455, 1196, 457, 1032, 512, 291, 148, 148, + /* 1800 */ 1425, 1616, 399, 399, 398, 276, 396, 85, 85, 863, + /* 1810 */ 87, 87, 84, 84, 557, 580, 294, 580, 1430, 342, + /* 1820 */ 343, 1429, 237, 300, 326, 332, 335, 1032, 1032, 1034, + /* 1830 */ 1035, 35, 325, 348, 407, 487, 226, 1311, 52, 52, + /* 1840 */ 58, 58, 372, 1375, 1506, 570, 1505, 121, 572, 221, + /* 1850 */ 4, 208, 268, 209, 394, 1248, 1553, 1192, 1376, 1374, + /* 1860 */ 1373, 1551, 239, 184, 575, 233, 425, 1245, 95, 218, + /* 1870 */ 173, 1511, 193, 43, 91, 94, 138, 556, 1420, 178, + /* 1880 */ 186, 1426, 13, 331, 471, 1413, 334, 455, 188, 189, + /* 1890 */ 238, 190, 191, 472, 505, 245, 108, 494, 1434, 569, + /* 1900 */ 249, 197, 1432, 405, 479, 1431, 408, 14, 1500, 101, + /* 1910 */ 1522, 500, 201, 280, 251, 503, 357, 1274, 252, 410, + /* 1920 */ 253, 521, 1331, 353, 1330, 417, 1329, 439, 1044, 1322, + /* 1930 */ 319, 571, 103, 1630, 119, 119, 897, 1321, 1629, 227, + /* 1940 */ 443, 1301, 120, 411, 455, 581, 455, 1300, 371, 1032, + /* 1950 */ 1299, 530, 1628, 1598, 461, 444, 309, 310, 377, 266, + /* 1960 */ 267, 1584, 1583, 447, 11, 1398, 1397, 1487, 388, 115, + /* 1970 */ 317, 109, 540, 214, 1354, 386, 42, 393, 1353, 583, + /* 1980 */ 1202, 1032, 1032, 1034, 1035, 35, 392, 278, 275, 277, + /* 1990 */ 584, 1269, 1264, 170, 1538, 418, 419, 1539, 1537, 171, + /* 2000 */ 1536, 154, 307, 223, 183, 224, 850, 456, 172, 215, + /* 2010 */ 88, 1192, 234, 1106, 321, 152, 1104, 329, 185, 174, + /* 2020 */ 1227, 187, 241, 929, 244, 341, 1120, 192, 175, 176, + /* 2030 */ 428, 430, 97, 194, 177, 98, 99, 100, 1123, 246, + /* 2040 */ 1119, 247, 161, 24, 248, 352, 1112, 264, 198, 1242, + /* 2050 */ 250, 499, 199, 15, 865, 504, 373, 254, 508, 513, + /* 2060 */ 200, 516, 102, 25, 179, 365, 26, 895, 104, 368, + /* 2070 */ 162, 908, 105, 308, 524, 106, 1189, 1073, 1159, 17, + /* 2080 */ 228, 283, 27, 1158, 285, 263, 982, 202, 976, 123, + /* 2090 */ 28, 1179, 1183, 29, 30, 1175, 31, 1177, 1164, 41, + /* 2100 */ 32, 206, 552, 33, 110, 1182, 112, 8, 113, 1087, + /* 2110 */ 1074, 1072, 1076, 34, 1077, 564, 1129, 269, 1128, 270, + /* 2120 */ 36, 18, 1198, 1037, 877, 151, 122, 37, 397, 271, + /* 2130 */ 272, 576, 181, 1197, 1260, 939, 1260, 1621, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 193, 193, 193, 274, 275, 276, 193, 274, 275, 276, - /* 10 */ 193, 223, 219, 225, 206, 210, 211, 212, 193, 19, - /* 20 */ 219, 233, 216, 216, 217, 216, 217, 193, 295, 216, - /* 30 */ 217, 31, 193, 216, 217, 193, 228, 213, 230, 39, - /* 40 */ 206, 216, 217, 43, 44, 45, 46, 47, 48, 49, - /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 193, 19, - /* 60 */ 185, 186, 187, 188, 189, 190, 253, 274, 275, 276, - /* 70 */ 195, 193, 197, 193, 261, 274, 275, 276, 253, 204, - /* 80 */ 238, 204, 81, 43, 44, 45, 46, 47, 48, 49, - /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 274, 275, - /* 100 */ 276, 262, 102, 103, 104, 105, 106, 107, 108, 109, - /* 110 */ 110, 111, 112, 113, 239, 240, 239, 240, 210, 211, - /* 120 */ 212, 314, 315, 314, 59, 316, 86, 252, 88, 252, - /* 130 */ 19, 314, 315, 256, 257, 113, 25, 72, 296, 138, - /* 140 */ 139, 266, 102, 103, 104, 105, 106, 107, 108, 109, + /* 0 */ 194, 276, 277, 278, 216, 194, 194, 217, 194, 194, + /* 10 */ 194, 194, 224, 194, 194, 276, 277, 278, 204, 19, + /* 20 */ 206, 202, 297, 217, 218, 205, 207, 217, 205, 217, + /* 30 */ 218, 31, 217, 218, 217, 218, 29, 217, 218, 39, + /* 40 */ 33, 217, 220, 43, 44, 45, 46, 47, 48, 49, + /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 312, 19, + /* 60 */ 240, 241, 316, 240, 241, 194, 46, 47, 48, 49, + /* 70 */ 22, 254, 65, 253, 254, 255, 253, 194, 255, 194, + /* 80 */ 263, 258, 259, 43, 44, 45, 46, 47, 48, 49, + /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 276, 277, + /* 100 */ 278, 285, 102, 103, 104, 105, 106, 107, 108, 109, + /* 110 */ 110, 111, 112, 113, 59, 186, 187, 188, 189, 190, + /* 120 */ 191, 310, 239, 317, 318, 196, 86, 198, 88, 317, + /* 130 */ 19, 319, 317, 318, 205, 264, 25, 211, 212, 213, + /* 140 */ 205, 121, 102, 103, 104, 105, 106, 107, 108, 109, /* 150 */ 110, 111, 112, 113, 43, 44, 45, 46, 47, 48, - /* 160 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 81, - /* 170 */ 292, 59, 292, 298, 108, 109, 110, 111, 112, 113, - /* 180 */ 69, 116, 117, 118, 72, 106, 107, 193, 111, 112, - /* 190 */ 113, 54, 55, 56, 57, 58, 102, 103, 104, 105, - /* 200 */ 106, 107, 108, 109, 110, 111, 112, 113, 120, 25, - /* 210 */ 216, 217, 145, 102, 103, 104, 105, 106, 107, 108, - /* 220 */ 109, 110, 111, 112, 113, 231, 138, 139, 116, 117, - /* 230 */ 118, 164, 153, 19, 155, 54, 55, 56, 57, 102, + /* 160 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 240, + /* 170 */ 241, 116, 117, 118, 119, 240, 241, 122, 123, 124, + /* 180 */ 69, 298, 253, 194, 255, 106, 107, 132, 253, 141, + /* 190 */ 255, 54, 55, 56, 57, 58, 207, 268, 102, 103, + /* 200 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + /* 210 */ 214, 128, 129, 102, 103, 104, 105, 106, 107, 108, + /* 220 */ 109, 110, 111, 112, 113, 134, 25, 136, 137, 300, + /* 230 */ 165, 166, 153, 19, 155, 54, 55, 56, 57, 102, /* 240 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 250 */ 113, 128, 129, 46, 47, 48, 49, 43, 44, 45, + /* 250 */ 113, 108, 109, 110, 111, 112, 113, 43, 44, 45, /* 260 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - /* 270 */ 56, 57, 216, 193, 25, 59, 193, 19, 165, 166, - /* 280 */ 193, 67, 24, 102, 103, 104, 105, 106, 107, 108, - /* 290 */ 109, 110, 111, 112, 113, 73, 216, 217, 59, 216, - /* 300 */ 217, 43, 44, 45, 46, 47, 48, 49, 50, 51, + /* 270 */ 56, 57, 276, 277, 278, 113, 194, 19, 22, 23, + /* 280 */ 194, 67, 24, 102, 103, 104, 105, 106, 107, 108, + /* 290 */ 109, 110, 111, 112, 113, 220, 250, 59, 252, 217, + /* 300 */ 218, 43, 44, 45, 46, 47, 48, 49, 50, 51, /* 310 */ 52, 53, 54, 55, 56, 57, 102, 103, 104, 105, - /* 320 */ 106, 107, 108, 109, 110, 111, 112, 113, 121, 145, - /* 330 */ 59, 193, 116, 117, 118, 119, 273, 204, 122, 123, - /* 340 */ 124, 19, 20, 134, 22, 136, 137, 19, 132, 127, - /* 350 */ 128, 129, 24, 22, 23, 116, 117, 118, 36, 193, + /* 320 */ 106, 107, 108, 109, 110, 111, 112, 113, 106, 107, + /* 330 */ 108, 109, 110, 111, 112, 113, 254, 59, 205, 138, + /* 340 */ 139, 19, 20, 194, 22, 263, 22, 23, 231, 25, + /* 350 */ 72, 276, 277, 278, 116, 117, 118, 101, 36, 76, /* 360 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 370 */ 112, 113, 239, 240, 311, 312, 215, 106, 107, 241, - /* 380 */ 19, 59, 216, 217, 223, 252, 115, 116, 117, 118, - /* 390 */ 151, 120, 26, 71, 193, 308, 309, 193, 149, 128, - /* 400 */ 313, 216, 269, 81, 43, 44, 45, 46, 47, 48, - /* 410 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 253, - /* 420 */ 216, 217, 100, 95, 153, 59, 155, 261, 106, 107, - /* 430 */ 25, 193, 101, 193, 193, 231, 114, 25, 116, 117, - /* 440 */ 118, 113, 304, 121, 193, 204, 59, 119, 120, 121, - /* 450 */ 122, 123, 124, 125, 216, 217, 193, 216, 217, 131, - /* 460 */ 138, 139, 230, 102, 103, 104, 105, 106, 107, 108, + /* 370 */ 112, 113, 89, 240, 241, 92, 73, 194, 194, 73, + /* 380 */ 19, 59, 188, 189, 190, 191, 253, 81, 255, 151, + /* 390 */ 196, 25, 198, 71, 116, 117, 118, 311, 312, 205, + /* 400 */ 217, 218, 316, 81, 43, 44, 45, 46, 47, 48, + /* 410 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 270, + /* 420 */ 22, 23, 100, 25, 59, 101, 138, 139, 106, 107, + /* 430 */ 127, 128, 129, 127, 240, 241, 114, 254, 116, 117, + /* 440 */ 118, 76, 76, 121, 138, 139, 263, 253, 264, 255, + /* 450 */ 205, 275, 87, 19, 89, 89, 194, 92, 92, 199, + /* 460 */ 138, 139, 268, 102, 103, 104, 105, 106, 107, 108, /* 470 */ 109, 110, 111, 112, 113, 153, 154, 155, 156, 157, - /* 480 */ 239, 240, 116, 117, 118, 76, 193, 23, 19, 25, - /* 490 */ 22, 253, 23, 252, 253, 108, 87, 204, 89, 261, - /* 500 */ 198, 92, 261, 116, 117, 118, 193, 306, 307, 216, - /* 510 */ 217, 150, 43, 44, 45, 46, 47, 48, 49, 50, - /* 520 */ 51, 52, 53, 54, 55, 56, 57, 59, 193, 216, - /* 530 */ 217, 19, 239, 240, 283, 23, 106, 107, 108, 109, - /* 540 */ 110, 111, 112, 113, 73, 252, 253, 142, 308, 309, - /* 550 */ 138, 139, 81, 313, 145, 43, 44, 45, 46, 47, + /* 480 */ 81, 116, 117, 118, 129, 240, 241, 224, 19, 226, + /* 490 */ 314, 315, 23, 25, 300, 59, 22, 234, 253, 101, + /* 500 */ 255, 236, 237, 26, 194, 183, 194, 152, 72, 22, + /* 510 */ 145, 150, 43, 44, 45, 46, 47, 48, 49, 50, + /* 520 */ 51, 52, 53, 54, 55, 56, 57, 217, 218, 217, + /* 530 */ 218, 19, 189, 59, 191, 23, 59, 138, 139, 196, + /* 540 */ 135, 198, 232, 283, 232, 140, 59, 287, 205, 275, + /* 550 */ 116, 205, 116, 117, 118, 43, 44, 45, 46, 47, /* 560 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 570 */ 307, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 580 */ 111, 112, 113, 281, 116, 117, 118, 285, 23, 193, - /* 590 */ 25, 119, 59, 193, 122, 123, 124, 59, 127, 203, - /* 600 */ 59, 205, 19, 268, 132, 25, 23, 22, 193, 138, - /* 610 */ 139, 249, 204, 251, 102, 103, 104, 105, 106, 107, + /* 570 */ 194, 102, 103, 104, 105, 106, 107, 108, 109, 110, + /* 580 */ 111, 112, 113, 240, 241, 194, 240, 241, 314, 315, + /* 590 */ 116, 117, 118, 116, 117, 118, 253, 194, 255, 253, + /* 600 */ 59, 255, 19, 116, 117, 118, 23, 22, 217, 218, + /* 610 */ 142, 268, 205, 275, 102, 103, 104, 105, 106, 107, /* 620 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 630 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 640 */ 57, 19, 22, 23, 59, 23, 25, 239, 240, 116, - /* 650 */ 117, 118, 193, 11, 116, 117, 118, 116, 117, 118, - /* 660 */ 252, 269, 22, 193, 15, 43, 44, 45, 46, 47, + /* 640 */ 57, 19, 194, 300, 59, 23, 119, 240, 241, 122, + /* 650 */ 123, 124, 314, 315, 194, 236, 237, 194, 117, 132, + /* 660 */ 253, 81, 255, 205, 59, 43, 44, 45, 46, 47, /* 670 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 680 */ 273, 143, 193, 118, 143, 102, 103, 104, 105, 106, - /* 690 */ 107, 108, 109, 110, 111, 112, 113, 76, 118, 59, - /* 700 */ 241, 116, 117, 118, 304, 216, 217, 292, 143, 60, - /* 710 */ 89, 241, 19, 92, 193, 193, 23, 22, 311, 312, - /* 720 */ 231, 101, 22, 143, 102, 103, 104, 105, 106, 107, + /* 680 */ 217, 218, 194, 194, 194, 102, 103, 104, 105, 106, + /* 690 */ 107, 108, 109, 110, 111, 112, 113, 294, 240, 241, + /* 700 */ 120, 116, 117, 118, 59, 194, 217, 218, 211, 212, + /* 710 */ 213, 253, 19, 255, 194, 19, 23, 254, 138, 139, + /* 720 */ 24, 232, 117, 194, 102, 103, 104, 105, 106, 107, /* 730 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 740 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 750 */ 57, 19, 193, 193, 59, 23, 116, 117, 118, 59, - /* 760 */ 201, 21, 241, 304, 193, 206, 127, 128, 129, 193, - /* 770 */ 128, 129, 235, 236, 304, 43, 44, 45, 46, 47, + /* 750 */ 57, 19, 264, 108, 76, 23, 127, 128, 129, 311, + /* 760 */ 312, 116, 117, 118, 316, 87, 306, 89, 308, 194, + /* 770 */ 92, 22, 59, 194, 22, 43, 44, 45, 46, 47, /* 780 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 790 */ 22, 193, 216, 217, 193, 102, 103, 104, 105, 106, - /* 800 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 193, - /* 810 */ 193, 116, 117, 118, 216, 217, 116, 117, 118, 226, - /* 820 */ 80, 193, 19, 235, 236, 304, 23, 211, 212, 231, - /* 830 */ 204, 216, 217, 205, 102, 103, 104, 105, 106, 107, + /* 790 */ 194, 95, 217, 218, 265, 102, 103, 104, 105, 106, + /* 800 */ 107, 108, 109, 110, 111, 112, 113, 232, 59, 113, + /* 810 */ 25, 59, 194, 217, 218, 119, 120, 121, 122, 123, + /* 820 */ 124, 125, 19, 145, 194, 194, 23, 131, 232, 116, + /* 830 */ 117, 118, 35, 194, 102, 103, 104, 105, 106, 107, /* 840 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 850 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 860 */ 57, 19, 193, 123, 76, 239, 240, 193, 253, 239, - /* 870 */ 240, 239, 240, 244, 106, 107, 193, 89, 252, 193, - /* 880 */ 92, 59, 252, 254, 252, 43, 44, 45, 46, 47, + /* 860 */ 57, 19, 194, 66, 194, 116, 117, 118, 116, 117, + /* 870 */ 118, 74, 242, 294, 194, 194, 206, 23, 194, 25, + /* 880 */ 194, 111, 112, 113, 25, 43, 44, 45, 46, 47, /* 890 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 900 */ 284, 161, 216, 217, 193, 102, 103, 104, 105, 106, - /* 910 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 244, - /* 920 */ 187, 188, 189, 190, 7, 8, 9, 309, 195, 254, - /* 930 */ 197, 313, 19, 127, 128, 129, 262, 204, 22, 117, - /* 940 */ 24, 216, 217, 273, 102, 103, 104, 105, 106, 107, + /* 900 */ 24, 194, 194, 217, 218, 102, 103, 104, 105, 106, + /* 910 */ 107, 108, 109, 110, 111, 112, 113, 241, 232, 194, + /* 920 */ 212, 213, 242, 242, 217, 218, 242, 130, 11, 253, + /* 930 */ 194, 255, 19, 265, 149, 59, 306, 194, 308, 232, + /* 940 */ 309, 310, 217, 218, 102, 103, 104, 105, 106, 107, /* 950 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 960 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 970 */ 57, 193, 239, 240, 193, 59, 19, 188, 253, 190, - /* 980 */ 193, 311, 312, 16, 195, 252, 197, 193, 19, 301, - /* 990 */ 302, 135, 193, 204, 216, 217, 140, 216, 217, 266, - /* 1000 */ 204, 159, 45, 46, 47, 48, 49, 50, 51, 52, + /* 970 */ 57, 194, 194, 59, 194, 239, 19, 194, 25, 254, + /* 980 */ 303, 304, 23, 194, 25, 126, 306, 306, 308, 308, + /* 990 */ 306, 271, 308, 117, 286, 217, 218, 217, 218, 194, + /* 1000 */ 194, 159, 45, 46, 47, 48, 49, 50, 51, 52, /* 1010 */ 53, 54, 55, 56, 57, 102, 103, 104, 105, 106, - /* 1020 */ 107, 108, 109, 110, 111, 112, 113, 12, 239, 240, - /* 1030 */ 193, 298, 238, 117, 253, 239, 240, 238, 259, 260, - /* 1040 */ 193, 252, 27, 193, 77, 193, 79, 204, 252, 262, - /* 1050 */ 193, 299, 300, 193, 100, 266, 278, 42, 204, 102, + /* 1020 */ 107, 108, 109, 110, 111, 112, 113, 59, 239, 194, + /* 1030 */ 116, 117, 118, 260, 254, 194, 240, 241, 194, 233, + /* 1040 */ 205, 240, 241, 205, 239, 128, 129, 270, 265, 253, + /* 1050 */ 194, 255, 217, 218, 253, 194, 255, 143, 280, 102, /* 1060 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 1070 */ 113, 117, 159, 216, 217, 121, 216, 217, 63, 193, - /* 1080 */ 193, 193, 239, 240, 115, 116, 193, 298, 73, 240, - /* 1090 */ 238, 231, 19, 239, 240, 252, 22, 24, 211, 212, - /* 1100 */ 263, 252, 216, 217, 216, 217, 252, 153, 154, 155, - /* 1110 */ 253, 193, 19, 144, 213, 268, 43, 44, 45, 46, + /* 1070 */ 113, 118, 159, 217, 218, 240, 241, 118, 240, 241, + /* 1080 */ 194, 194, 194, 239, 116, 117, 118, 22, 253, 254, + /* 1090 */ 255, 253, 19, 255, 233, 194, 143, 24, 263, 212, + /* 1100 */ 213, 194, 143, 217, 218, 217, 218, 261, 262, 271, + /* 1110 */ 254, 143, 19, 7, 8, 9, 43, 44, 45, 46, /* 1120 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1130 */ 57, 193, 19, 59, 216, 217, 43, 44, 45, 46, + /* 1130 */ 57, 16, 19, 22, 23, 294, 43, 44, 45, 46, /* 1140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1150 */ 57, 193, 19, 24, 216, 217, 43, 44, 45, 46, + /* 1150 */ 57, 312, 194, 214, 21, 316, 43, 44, 45, 46, /* 1160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1170 */ 57, 284, 193, 208, 209, 102, 103, 104, 105, 106, - /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 286, 59, 193, - /* 1190 */ 232, 117, 291, 193, 193, 102, 103, 104, 105, 106, - /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 193, 204, 22, - /* 1210 */ 23, 193, 25, 66, 193, 102, 103, 104, 105, 106, - /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 193, 193, 193, - /* 1230 */ 216, 217, 85, 193, 238, 19, 16, 216, 217, 238, - /* 1240 */ 193, 94, 193, 239, 240, 231, 117, 268, 35, 116, - /* 1250 */ 216, 217, 216, 217, 22, 23, 252, 25, 208, 209, + /* 1170 */ 57, 106, 107, 286, 194, 102, 103, 104, 105, 106, + /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 207, 158, 59, + /* 1190 */ 160, 22, 77, 24, 79, 102, 103, 104, 105, 106, + /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 194, 194, 229, + /* 1210 */ 194, 231, 101, 80, 22, 102, 103, 104, 105, 106, + /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 288, 59, 12, + /* 1230 */ 217, 218, 293, 217, 218, 19, 106, 107, 59, 19, + /* 1240 */ 16, 127, 128, 129, 27, 115, 116, 117, 118, 194, + /* 1250 */ 120, 59, 22, 194, 24, 194, 123, 100, 128, 42, /* 1260 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 1270 */ 54, 55, 56, 57, 193, 193, 19, 5, 59, 66, - /* 1280 */ 193, 263, 10, 11, 12, 13, 14, 74, 101, 17, - /* 1290 */ 193, 46, 193, 146, 193, 76, 213, 77, 263, 79, - /* 1300 */ 12, 260, 30, 46, 32, 264, 87, 193, 89, 29, - /* 1310 */ 263, 92, 40, 33, 232, 27, 193, 108, 102, 103, + /* 1270 */ 54, 55, 56, 57, 117, 194, 217, 218, 121, 100, + /* 1280 */ 63, 194, 245, 153, 194, 155, 117, 19, 115, 194, + /* 1290 */ 73, 214, 194, 256, 161, 116, 117, 194, 217, 218, + /* 1300 */ 121, 77, 194, 79, 217, 218, 194, 217, 218, 117, + /* 1310 */ 153, 154, 155, 254, 46, 217, 218, 144, 102, 103, /* 1320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - /* 1330 */ 42, 138, 139, 101, 193, 116, 117, 118, 19, 20, - /* 1340 */ 255, 22, 70, 130, 135, 65, 256, 257, 193, 140, - /* 1350 */ 78, 63, 193, 81, 193, 36, 193, 216, 217, 193, - /* 1360 */ 115, 193, 263, 193, 145, 268, 59, 48, 193, 193, - /* 1370 */ 98, 193, 115, 193, 291, 216, 217, 193, 59, 216, - /* 1380 */ 217, 161, 216, 217, 216, 217, 216, 217, 131, 193, - /* 1390 */ 71, 193, 216, 217, 216, 217, 216, 217, 193, 260, - /* 1400 */ 216, 217, 19, 264, 85, 133, 244, 100, 193, 90, - /* 1410 */ 138, 139, 216, 217, 216, 217, 254, 244, 193, 100, - /* 1420 */ 193, 216, 217, 116, 117, 106, 107, 254, 121, 193, - /* 1430 */ 115, 216, 217, 114, 162, 116, 117, 118, 115, 244, - /* 1440 */ 121, 216, 217, 216, 217, 193, 309, 193, 31, 254, - /* 1450 */ 313, 309, 216, 217, 309, 313, 39, 193, 313, 309, - /* 1460 */ 153, 154, 155, 313, 193, 150, 25, 144, 216, 217, - /* 1470 */ 216, 217, 153, 154, 155, 156, 157, 0, 1, 2, - /* 1480 */ 216, 217, 5, 149, 150, 22, 193, 10, 11, 12, - /* 1490 */ 13, 14, 193, 158, 17, 160, 193, 19, 20, 116, - /* 1500 */ 22, 25, 193, 24, 22, 193, 24, 30, 226, 32, - /* 1510 */ 19, 20, 226, 22, 36, 193, 53, 40, 193, 216, - /* 1520 */ 217, 193, 23, 193, 25, 216, 217, 36, 216, 217, - /* 1530 */ 193, 99, 193, 193, 22, 193, 193, 59, 216, 217, - /* 1540 */ 193, 216, 217, 193, 216, 217, 193, 70, 129, 71, - /* 1550 */ 59, 129, 193, 216, 217, 78, 216, 217, 81, 216, - /* 1560 */ 217, 193, 71, 85, 193, 133, 193, 126, 90, 216, - /* 1570 */ 217, 152, 258, 61, 152, 98, 85, 193, 100, 193, - /* 1580 */ 23, 90, 25, 121, 106, 107, 23, 216, 217, 216, - /* 1590 */ 217, 100, 114, 131, 116, 117, 118, 106, 107, 121, - /* 1600 */ 216, 217, 216, 217, 193, 114, 193, 116, 117, 118, - /* 1610 */ 133, 22, 121, 193, 59, 138, 139, 193, 142, 193, - /* 1620 */ 141, 23, 23, 25, 25, 120, 121, 216, 217, 216, - /* 1630 */ 217, 153, 154, 155, 156, 157, 216, 217, 19, 162, - /* 1640 */ 216, 217, 216, 217, 153, 154, 155, 156, 157, 1, - /* 1650 */ 2, 193, 59, 5, 19, 20, 318, 22, 10, 11, - /* 1660 */ 12, 13, 14, 193, 59, 17, 193, 23, 23, 25, - /* 1670 */ 25, 36, 117, 193, 216, 217, 193, 23, 30, 25, - /* 1680 */ 32, 19, 20, 23, 22, 25, 216, 217, 40, 216, - /* 1690 */ 217, 7, 8, 23, 59, 25, 83, 84, 36, 23, - /* 1700 */ 193, 25, 23, 23, 25, 25, 71, 153, 145, 155, - /* 1710 */ 117, 153, 23, 155, 25, 23, 97, 25, 70, 193, - /* 1720 */ 193, 59, 117, 236, 193, 193, 78, 193, 193, 81, - /* 1730 */ 141, 193, 193, 71, 193, 100, 288, 287, 242, 255, - /* 1740 */ 255, 106, 107, 108, 255, 255, 98, 243, 297, 114, - /* 1750 */ 214, 116, 117, 118, 245, 191, 121, 271, 293, 267, - /* 1760 */ 267, 246, 100, 246, 245, 271, 271, 293, 106, 107, - /* 1770 */ 220, 271, 229, 225, 249, 219, 114, 259, 116, 117, - /* 1780 */ 118, 133, 259, 121, 219, 219, 138, 139, 153, 154, - /* 1790 */ 155, 156, 157, 280, 249, 243, 19, 20, 245, 22, - /* 1800 */ 196, 259, 140, 259, 60, 297, 141, 297, 200, 200, - /* 1810 */ 162, 38, 200, 36, 294, 153, 154, 155, 156, 157, - /* 1820 */ 151, 150, 294, 283, 22, 148, 250, 145, 249, 43, - /* 1830 */ 270, 272, 250, 249, 234, 18, 59, 200, 237, 237, - /* 1840 */ 237, 18, 199, 237, 149, 272, 272, 270, 71, 234, - /* 1850 */ 200, 234, 199, 246, 158, 62, 19, 20, 290, 22, - /* 1860 */ 246, 289, 246, 200, 246, 199, 22, 221, 200, 115, - /* 1870 */ 221, 199, 199, 36, 200, 218, 218, 100, 64, 218, - /* 1880 */ 22, 126, 227, 106, 107, 218, 227, 165, 224, 24, - /* 1890 */ 312, 114, 305, 116, 117, 118, 59, 113, 121, 224, - /* 1900 */ 200, 91, 221, 220, 218, 218, 218, 282, 71, 282, - /* 1910 */ 82, 221, 317, 317, 22, 277, 265, 140, 200, 158, - /* 1920 */ 279, 265, 147, 25, 146, 202, 13, 194, 250, 194, - /* 1930 */ 153, 154, 155, 156, 157, 250, 6, 100, 192, 192, - /* 1940 */ 249, 248, 192, 106, 107, 247, 207, 246, 207, 213, - /* 1950 */ 303, 114, 300, 116, 117, 118, 213, 4, 121, 222, - /* 1960 */ 222, 213, 213, 3, 207, 214, 214, 22, 163, 213, - /* 1970 */ 15, 23, 16, 23, 139, 151, 130, 25, 24, 20, - /* 1980 */ 142, 16, 1, 144, 142, 130, 130, 61, 37, 53, - /* 1990 */ 153, 154, 155, 156, 157, 53, 151, 303, 130, 53, - /* 2000 */ 53, 116, 34, 141, 1, 5, 22, 68, 115, 161, - /* 2010 */ 25, 68, 75, 41, 141, 115, 24, 20, 19, 131, - /* 2020 */ 125, 23, 59, 22, 67, 67, 22, 22, 22, 96, - /* 2030 */ 28, 24, 22, 67, 23, 149, 22, 25, 23, 23, - /* 2040 */ 23, 22, 141, 34, 23, 37, 97, 23, 116, 22, - /* 2050 */ 143, 25, 34, 75, 34, 88, 34, 34, 75, 34, - /* 2060 */ 86, 23, 93, 34, 22, 24, 34, 142, 25, 25, - /* 2070 */ 142, 23, 44, 23, 23, 23, 23, 11, 23, 25, - /* 2080 */ 22, 22, 22, 1, 23, 23, 22, 22, 25, 15, - /* 2090 */ 1, 23, 25, 319, 319, 319, 135, 319, 319, 319, - /* 2100 */ 319, 319, 319, 141, 319, 141, 319, 319, 319, 319, - /* 2110 */ 319, 141, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2120 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2130 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2140 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2150 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2160 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2170 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2180 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2190 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2200 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2210 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2220 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2230 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2240 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2250 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2260 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2270 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2280 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2290 */ 319, 319, 319, 319, 319, 319, 319, + /* 1330 */ 232, 270, 153, 154, 155, 115, 116, 66, 19, 20, + /* 1340 */ 183, 22, 12, 312, 254, 194, 262, 316, 209, 210, + /* 1350 */ 266, 239, 194, 194, 108, 36, 85, 27, 19, 20, + /* 1360 */ 265, 22, 183, 245, 144, 94, 25, 48, 217, 218, + /* 1370 */ 293, 194, 42, 270, 256, 36, 217, 218, 59, 194, + /* 1380 */ 25, 135, 194, 115, 194, 161, 140, 194, 194, 15, + /* 1390 */ 71, 194, 312, 63, 217, 218, 316, 194, 59, 131, + /* 1400 */ 301, 302, 217, 218, 85, 217, 218, 217, 218, 90, + /* 1410 */ 71, 217, 218, 19, 217, 218, 245, 146, 262, 100, + /* 1420 */ 217, 218, 266, 265, 85, 106, 107, 256, 312, 90, + /* 1430 */ 209, 210, 316, 114, 60, 116, 117, 118, 194, 100, + /* 1440 */ 121, 194, 194, 145, 115, 106, 107, 19, 46, 19, + /* 1450 */ 20, 24, 22, 114, 194, 116, 117, 118, 194, 245, + /* 1460 */ 121, 194, 164, 194, 217, 218, 36, 194, 258, 259, + /* 1470 */ 256, 194, 153, 154, 155, 156, 157, 217, 218, 150, + /* 1480 */ 31, 217, 218, 142, 217, 218, 217, 218, 39, 59, + /* 1490 */ 217, 218, 153, 154, 155, 156, 157, 149, 150, 5, + /* 1500 */ 145, 71, 183, 245, 10, 11, 12, 13, 14, 194, + /* 1510 */ 116, 17, 129, 227, 256, 85, 194, 115, 194, 23, + /* 1520 */ 90, 25, 183, 99, 30, 97, 32, 22, 22, 194, + /* 1530 */ 100, 194, 217, 218, 40, 152, 106, 107, 23, 217, + /* 1540 */ 218, 194, 19, 20, 114, 22, 116, 117, 118, 257, + /* 1550 */ 194, 121, 217, 218, 217, 218, 194, 133, 53, 36, + /* 1560 */ 23, 23, 25, 25, 70, 120, 121, 61, 141, 7, + /* 1570 */ 8, 121, 78, 217, 218, 81, 23, 227, 25, 217, + /* 1580 */ 218, 131, 59, 153, 154, 155, 156, 157, 0, 1, + /* 1590 */ 2, 59, 98, 5, 71, 23, 227, 25, 10, 11, + /* 1600 */ 12, 13, 14, 83, 84, 17, 23, 23, 25, 25, + /* 1610 */ 59, 194, 194, 183, 23, 23, 25, 25, 30, 194, + /* 1620 */ 32, 19, 20, 100, 22, 194, 194, 133, 40, 106, + /* 1630 */ 107, 108, 138, 139, 194, 217, 218, 114, 36, 116, + /* 1640 */ 117, 118, 217, 218, 121, 194, 194, 194, 23, 117, + /* 1650 */ 25, 194, 23, 23, 25, 25, 162, 194, 70, 194, + /* 1660 */ 145, 59, 23, 153, 25, 155, 78, 194, 117, 81, + /* 1670 */ 217, 218, 194, 71, 217, 218, 153, 154, 155, 156, + /* 1680 */ 157, 194, 217, 218, 194, 23, 98, 25, 321, 194, + /* 1690 */ 217, 218, 194, 19, 20, 194, 22, 153, 23, 155, + /* 1700 */ 25, 194, 100, 194, 217, 218, 183, 194, 106, 107, + /* 1710 */ 36, 194, 217, 218, 237, 194, 114, 243, 116, 117, + /* 1720 */ 118, 133, 194, 121, 217, 218, 138, 139, 194, 194, + /* 1730 */ 194, 290, 289, 59, 217, 218, 194, 194, 217, 218, + /* 1740 */ 194, 194, 140, 194, 194, 71, 194, 244, 194, 194, + /* 1750 */ 162, 217, 218, 194, 194, 153, 154, 155, 156, 157, + /* 1760 */ 217, 218, 194, 217, 218, 194, 217, 218, 257, 217, + /* 1770 */ 218, 217, 218, 257, 100, 194, 257, 217, 218, 257, + /* 1780 */ 106, 107, 215, 299, 194, 183, 192, 194, 114, 194, + /* 1790 */ 116, 117, 118, 1, 2, 121, 221, 5, 217, 218, + /* 1800 */ 273, 197, 10, 11, 12, 13, 14, 217, 218, 17, + /* 1810 */ 217, 218, 217, 218, 140, 194, 246, 194, 273, 295, + /* 1820 */ 247, 273, 30, 247, 32, 269, 269, 153, 154, 155, + /* 1830 */ 156, 157, 40, 246, 273, 295, 230, 226, 217, 218, + /* 1840 */ 217, 218, 220, 261, 220, 282, 220, 19, 20, 244, + /* 1850 */ 22, 250, 141, 250, 246, 60, 201, 183, 261, 261, + /* 1860 */ 261, 201, 70, 299, 36, 299, 201, 38, 151, 150, + /* 1870 */ 78, 285, 22, 81, 296, 296, 148, 145, 251, 43, + /* 1880 */ 235, 274, 272, 250, 18, 251, 250, 59, 238, 238, + /* 1890 */ 98, 238, 238, 201, 18, 200, 149, 201, 235, 71, + /* 1900 */ 200, 235, 274, 247, 247, 274, 247, 272, 247, 158, + /* 1910 */ 292, 62, 22, 201, 200, 222, 201, 201, 200, 222, + /* 1920 */ 200, 115, 219, 291, 219, 133, 219, 64, 100, 228, + /* 1930 */ 138, 139, 22, 225, 106, 107, 126, 228, 225, 165, + /* 1940 */ 24, 219, 114, 222, 116, 117, 118, 221, 219, 121, + /* 1950 */ 219, 307, 219, 315, 162, 113, 284, 284, 222, 201, + /* 1960 */ 91, 320, 320, 82, 22, 267, 267, 279, 201, 158, + /* 1970 */ 281, 147, 146, 249, 251, 250, 25, 247, 251, 203, + /* 1980 */ 13, 153, 154, 155, 156, 157, 248, 6, 195, 195, + /* 1990 */ 193, 193, 193, 208, 214, 305, 305, 214, 214, 208, + /* 2000 */ 214, 223, 223, 215, 302, 215, 4, 3, 208, 22, + /* 2010 */ 214, 183, 15, 23, 163, 16, 23, 139, 151, 130, + /* 2020 */ 25, 142, 24, 20, 144, 16, 1, 142, 130, 130, + /* 2030 */ 61, 37, 53, 151, 130, 53, 53, 53, 116, 34, + /* 2040 */ 1, 141, 5, 22, 115, 161, 68, 25, 68, 75, + /* 2050 */ 141, 41, 115, 24, 20, 19, 131, 125, 67, 67, + /* 2060 */ 22, 96, 22, 22, 37, 23, 22, 59, 22, 24, + /* 2070 */ 23, 28, 149, 67, 22, 25, 23, 23, 23, 22, + /* 2080 */ 141, 23, 34, 97, 23, 34, 116, 22, 143, 25, + /* 2090 */ 34, 75, 75, 34, 34, 88, 34, 86, 23, 22, + /* 2100 */ 34, 25, 24, 34, 25, 93, 142, 44, 142, 23, + /* 2110 */ 23, 23, 23, 22, 11, 25, 23, 25, 23, 22, + /* 2120 */ 22, 22, 1, 23, 23, 23, 22, 22, 15, 141, + /* 2130 */ 141, 25, 25, 1, 322, 135, 322, 141, 322, 322, + /* 2140 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2150 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2160 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2170 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2180 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2190 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2200 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2210 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2220 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2230 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2240 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2250 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2260 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2270 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2280 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2290 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2300 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2310 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2320 */ 322, 322, 322, 322, }; -#define YY_SHIFT_COUNT (582) +#define YY_SHIFT_COUNT (586) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2089) +#define YY_SHIFT_MAX (2132) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 1648, 1477, 1272, 322, 322, 1, 1319, 1478, 1491, 1837, - /* 10 */ 1837, 1837, 471, 0, 0, 214, 1093, 1837, 1837, 1837, - /* 20 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 30 */ 1837, 271, 271, 1219, 1219, 216, 88, 1, 1, 1, - /* 40 */ 1, 1, 40, 111, 258, 361, 469, 512, 583, 622, - /* 50 */ 693, 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, - /* 60 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, - /* 70 */ 1093, 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635, - /* 80 */ 1662, 1777, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 90 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 100 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 110 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 120 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 130 */ 1837, 137, 181, 181, 181, 181, 181, 181, 181, 94, - /* 140 */ 430, 66, 65, 112, 366, 533, 533, 740, 1257, 533, - /* 150 */ 533, 79, 79, 533, 412, 412, 412, 77, 412, 123, - /* 160 */ 113, 113, 113, 22, 22, 2112, 2112, 328, 328, 328, - /* 170 */ 239, 468, 468, 468, 468, 1015, 1015, 409, 366, 1187, - /* 180 */ 1232, 533, 533, 533, 533, 533, 533, 533, 533, 533, - /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, - /* 200 */ 533, 969, 621, 621, 533, 642, 788, 788, 1133, 1133, - /* 210 */ 822, 822, 67, 1193, 2112, 2112, 2112, 2112, 2112, 2112, - /* 220 */ 2112, 1307, 954, 954, 585, 472, 640, 387, 695, 538, - /* 230 */ 541, 700, 533, 533, 533, 533, 533, 533, 533, 533, - /* 240 */ 533, 533, 222, 533, 533, 533, 533, 533, 533, 533, - /* 250 */ 533, 533, 533, 533, 533, 1213, 1213, 1213, 533, 533, - /* 260 */ 533, 565, 533, 533, 533, 916, 1147, 533, 533, 1288, - /* 270 */ 533, 533, 533, 533, 533, 533, 533, 533, 639, 1280, - /* 280 */ 209, 1129, 1129, 1129, 1129, 580, 209, 209, 1209, 768, - /* 290 */ 917, 649, 1315, 1334, 405, 1334, 1383, 249, 1315, 1315, - /* 300 */ 249, 1315, 405, 1383, 1441, 464, 1245, 1417, 1417, 1417, - /* 310 */ 1323, 1323, 1323, 1323, 184, 184, 1335, 1476, 856, 1482, - /* 320 */ 1744, 1744, 1665, 1665, 1773, 1773, 1665, 1669, 1671, 1802, - /* 330 */ 1677, 1682, 1786, 1677, 1682, 1817, 1817, 1817, 1817, 1665, - /* 340 */ 1823, 1695, 1671, 1671, 1695, 1802, 1786, 1695, 1786, 1695, - /* 350 */ 1665, 1823, 1696, 1793, 1665, 1823, 1844, 1665, 1823, 1665, - /* 360 */ 1823, 1844, 1754, 1754, 1754, 1814, 1858, 1858, 1844, 1754, - /* 370 */ 1755, 1754, 1814, 1754, 1754, 1722, 1865, 1784, 1784, 1844, - /* 380 */ 1665, 1810, 1810, 1828, 1828, 1677, 1682, 1892, 1665, 1761, - /* 390 */ 1677, 1775, 1778, 1695, 1898, 1913, 1913, 1930, 1930, 1930, - /* 400 */ 2112, 2112, 2112, 2112, 2112, 2112, 2112, 2112, 2112, 2112, - /* 410 */ 2112, 2112, 2112, 2112, 2112, 207, 1220, 331, 620, 967, - /* 420 */ 806, 1074, 1499, 1432, 1463, 1479, 1419, 1422, 1557, 1512, - /* 430 */ 1598, 1599, 1644, 1645, 1654, 1660, 1555, 1505, 1684, 1462, - /* 440 */ 1670, 1563, 1619, 1593, 1676, 1679, 1613, 1680, 1554, 1558, - /* 450 */ 1689, 1692, 1605, 1589, 1953, 1960, 1945, 1805, 1955, 1956, - /* 460 */ 1948, 1950, 1835, 1824, 1846, 1952, 1952, 1954, 1838, 1959, - /* 470 */ 1839, 1965, 1981, 1842, 1855, 1952, 1856, 1926, 1951, 1952, - /* 480 */ 1845, 1936, 1942, 1946, 1947, 1868, 1885, 1968, 1862, 2003, - /* 490 */ 2000, 1984, 1893, 1848, 1939, 1985, 1943, 1937, 1972, 1873, - /* 500 */ 1900, 1992, 1997, 1999, 1888, 1895, 2001, 1957, 2004, 2005, - /* 510 */ 1998, 2006, 1958, 1963, 2007, 1933, 2002, 2010, 1966, 2008, - /* 520 */ 2011, 2009, 1886, 2014, 2015, 2016, 2012, 2017, 2019, 1949, - /* 530 */ 1901, 2021, 2024, 1932, 2018, 2027, 1907, 2026, 2020, 2022, - /* 540 */ 2023, 2025, 1967, 1978, 1974, 2028, 1983, 1969, 2029, 2038, - /* 550 */ 2042, 2041, 2043, 2044, 2032, 1925, 1928, 2048, 2026, 2050, - /* 560 */ 2051, 2052, 2053, 2054, 2055, 2058, 2066, 2059, 2060, 2061, - /* 570 */ 2062, 2064, 2065, 2063, 1961, 1962, 1964, 1970, 2067, 2068, - /* 580 */ 2074, 2082, 2089, + /* 0 */ 1792, 1588, 1494, 322, 322, 399, 306, 1319, 1339, 1430, + /* 10 */ 1828, 1828, 1828, 580, 399, 399, 399, 399, 399, 0, + /* 20 */ 0, 214, 1093, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 30 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1130, 1130, + /* 40 */ 365, 365, 55, 278, 436, 713, 713, 201, 201, 201, + /* 50 */ 201, 40, 111, 258, 361, 469, 512, 583, 622, 693, + /* 60 */ 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, 1093, + /* 70 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, + /* 80 */ 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1523, 1602, + /* 90 */ 1674, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 100 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 110 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 120 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 130 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 140 */ 137, 181, 181, 181, 181, 181, 181, 181, 96, 222, + /* 150 */ 143, 477, 713, 1133, 1268, 713, 713, 79, 79, 713, + /* 160 */ 770, 83, 65, 65, 65, 288, 162, 162, 2138, 2138, + /* 170 */ 696, 696, 696, 238, 474, 474, 474, 474, 1217, 1217, + /* 180 */ 678, 477, 324, 398, 713, 713, 713, 713, 713, 713, + /* 190 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, + /* 200 */ 713, 713, 713, 1220, 366, 366, 713, 917, 283, 283, + /* 210 */ 434, 434, 605, 605, 1298, 2138, 2138, 2138, 2138, 2138, + /* 220 */ 2138, 2138, 1179, 1157, 1157, 487, 527, 585, 645, 749, + /* 230 */ 914, 968, 752, 713, 713, 713, 713, 713, 713, 713, + /* 240 */ 713, 713, 713, 303, 713, 713, 713, 713, 713, 713, + /* 250 */ 713, 713, 713, 713, 713, 713, 797, 797, 797, 713, + /* 260 */ 713, 713, 959, 713, 713, 713, 1169, 1271, 713, 713, + /* 270 */ 1330, 713, 713, 713, 713, 713, 713, 713, 713, 629, + /* 280 */ 7, 91, 876, 876, 876, 876, 953, 91, 91, 1246, + /* 290 */ 1065, 1106, 1374, 1329, 1348, 468, 1348, 1394, 785, 1329, + /* 300 */ 1329, 785, 1329, 468, 1394, 859, 854, 1402, 1449, 1449, + /* 310 */ 1449, 1173, 1173, 1173, 1173, 1355, 1355, 1030, 1341, 405, + /* 320 */ 1230, 1795, 1795, 1711, 1711, 1829, 1829, 1711, 1717, 1719, + /* 330 */ 1850, 1728, 1732, 1836, 1728, 1732, 1866, 1866, 1866, 1866, + /* 340 */ 1711, 1876, 1747, 1719, 1719, 1747, 1850, 1836, 1747, 1836, + /* 350 */ 1747, 1711, 1876, 1751, 1849, 1711, 1876, 1890, 1711, 1876, + /* 360 */ 1711, 1876, 1890, 1806, 1806, 1806, 1863, 1910, 1910, 1890, + /* 370 */ 1806, 1810, 1806, 1863, 1806, 1806, 1774, 1916, 1842, 1842, + /* 380 */ 1890, 1711, 1869, 1869, 1881, 1881, 1728, 1732, 1942, 1711, + /* 390 */ 1811, 1728, 1824, 1826, 1747, 1951, 1967, 1967, 1981, 1981, + /* 400 */ 1981, 2138, 2138, 2138, 2138, 2138, 2138, 2138, 2138, 2138, + /* 410 */ 2138, 2138, 2138, 2138, 2138, 2138, 20, 1224, 256, 1111, + /* 420 */ 1115, 1114, 1192, 1496, 1424, 1505, 1427, 355, 1383, 1537, + /* 430 */ 1506, 1538, 1553, 1583, 1584, 1591, 1625, 541, 1445, 1562, + /* 440 */ 1450, 1572, 1515, 1428, 1532, 1592, 1629, 1520, 1630, 1639, + /* 450 */ 1510, 1544, 1662, 1675, 1551, 48, 2002, 2004, 1987, 1851, + /* 460 */ 1997, 1999, 1990, 1993, 1878, 1867, 1889, 1995, 1995, 1998, + /* 470 */ 1879, 2003, 1880, 2009, 2025, 1885, 1898, 1995, 1899, 1969, + /* 480 */ 1994, 1995, 1882, 1979, 1982, 1983, 1984, 1904, 1922, 2005, + /* 490 */ 1900, 2039, 2037, 2021, 1929, 1884, 1978, 2022, 1980, 1974, + /* 500 */ 2010, 1909, 1937, 2029, 2034, 2036, 1925, 1932, 2038, 1991, + /* 510 */ 2040, 2041, 2042, 2044, 1992, 2008, 2045, 1965, 2043, 2046, + /* 520 */ 2006, 2027, 2047, 2048, 1923, 2052, 2053, 2054, 2050, 2055, + /* 530 */ 2057, 1986, 1939, 2058, 2061, 1970, 2051, 2065, 1945, 2064, + /* 540 */ 2056, 2059, 2060, 2062, 2007, 2016, 2011, 2063, 2017, 2012, + /* 550 */ 2066, 2075, 2077, 2078, 2076, 2079, 2069, 1964, 1966, 2086, + /* 560 */ 2064, 2087, 2088, 2089, 2091, 2090, 2093, 2092, 2095, 2097, + /* 570 */ 2103, 2098, 2099, 2100, 2101, 2104, 2105, 2106, 2000, 1988, + /* 580 */ 1989, 1996, 2107, 2102, 2113, 2121, 2132, }; -#define YY_REDUCE_COUNT (414) -#define YY_REDUCE_MIN (-271) -#define YY_REDUCE_MAX (1757) +#define YY_REDUCE_COUNT (415) +#define YY_REDUCE_MIN (-275) +#define YY_REDUCE_MAX (1800) static const short yy_reduce_ofst[] = { - /* 0 */ -125, 733, 789, 241, 293, -123, -193, -191, -183, -187, - /* 10 */ 166, 238, 133, -207, -199, -267, -176, -6, 204, 489, - /* 20 */ 576, 598, -175, 686, 860, 615, 725, 1014, 778, 781, - /* 30 */ 857, 616, 887, 87, 240, -192, 408, 626, 796, 843, - /* 40 */ 854, 1004, -271, -271, -271, -271, -271, -271, -271, -271, - /* 50 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 60 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 70 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, 80, - /* 80 */ 83, 313, 886, 888, 918, 938, 1021, 1034, 1036, 1141, - /* 90 */ 1159, 1163, 1166, 1168, 1170, 1176, 1178, 1180, 1184, 1196, - /* 100 */ 1198, 1205, 1215, 1225, 1227, 1236, 1252, 1254, 1264, 1303, - /* 110 */ 1309, 1312, 1322, 1325, 1328, 1337, 1340, 1343, 1353, 1371, - /* 120 */ 1373, 1384, 1386, 1411, 1413, 1420, 1424, 1426, 1458, 1470, - /* 130 */ 1473, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 140 */ -271, -271, 138, 459, 396, -158, 470, 302, -212, 521, - /* 150 */ 201, -195, -92, 559, 630, 632, 630, -271, 632, 901, - /* 160 */ 63, 407, 670, -271, -271, -271, -271, 161, 161, 161, - /* 170 */ 251, 335, 847, 979, 1097, 537, 588, 618, 628, 688, - /* 180 */ 688, -166, -161, 674, 787, 794, 799, 852, 996, -122, - /* 190 */ 837, -120, 1018, 1035, 415, 1047, 1001, 958, 1082, 400, - /* 200 */ 1099, 779, 1137, 1142, 263, 1083, 1145, 1150, 1041, 1139, - /* 210 */ 965, 1050, 362, 849, 752, 629, 675, 1162, 1173, 1090, - /* 220 */ 1195, -194, 56, 185, -135, 232, 522, 560, 571, 601, - /* 230 */ 617, 669, 683, 711, 850, 893, 1000, 1040, 1049, 1081, - /* 240 */ 1087, 1101, 392, 1114, 1123, 1155, 1161, 1175, 1271, 1293, - /* 250 */ 1299, 1330, 1339, 1342, 1347, 593, 1282, 1286, 1350, 1359, - /* 260 */ 1368, 1314, 1480, 1483, 1507, 1085, 1338, 1526, 1527, 1487, - /* 270 */ 1531, 560, 1532, 1534, 1535, 1538, 1539, 1541, 1448, 1450, - /* 280 */ 1496, 1484, 1485, 1489, 1490, 1314, 1496, 1496, 1504, 1536, - /* 290 */ 1564, 1451, 1486, 1492, 1509, 1493, 1465, 1515, 1494, 1495, - /* 300 */ 1517, 1500, 1519, 1474, 1550, 1543, 1548, 1556, 1565, 1566, - /* 310 */ 1518, 1523, 1542, 1544, 1525, 1545, 1513, 1553, 1552, 1604, - /* 320 */ 1508, 1510, 1608, 1609, 1520, 1528, 1612, 1540, 1559, 1560, - /* 330 */ 1576, 1579, 1600, 1582, 1584, 1601, 1602, 1603, 1606, 1637, - /* 340 */ 1643, 1607, 1573, 1574, 1614, 1577, 1615, 1616, 1617, 1618, - /* 350 */ 1650, 1653, 1568, 1572, 1663, 1666, 1646, 1668, 1672, 1674, - /* 360 */ 1673, 1649, 1657, 1658, 1661, 1655, 1664, 1675, 1681, 1667, - /* 370 */ 1683, 1686, 1659, 1687, 1688, 1578, 1587, 1625, 1627, 1690, - /* 380 */ 1700, 1595, 1596, 1651, 1656, 1678, 1691, 1638, 1718, 1641, - /* 390 */ 1685, 1693, 1698, 1701, 1723, 1733, 1735, 1746, 1747, 1750, - /* 400 */ 1647, 1694, 1652, 1739, 1736, 1743, 1748, 1749, 1741, 1737, - /* 410 */ 1738, 1751, 1752, 1756, 1757, + /* 0 */ -71, 194, 343, 835, -180, -177, 838, -194, -188, -185, + /* 10 */ -183, 82, 183, -65, 133, 245, 346, 407, 458, -178, + /* 20 */ 75, -275, -4, 310, 312, 489, 575, 596, 463, 686, + /* 30 */ 707, 725, 780, 1098, 856, 778, 1059, 1090, 708, 887, + /* 40 */ 86, 448, 980, 630, 680, 681, 684, 796, 801, 796, + /* 50 */ 801, -261, -261, -261, -261, -261, -261, -261, -261, -261, + /* 60 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, + /* 70 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, + /* 80 */ -261, -261, -261, -261, -261, -261, -261, -261, 391, 886, + /* 90 */ 888, 1013, 1016, 1081, 1087, 1151, 1159, 1177, 1185, 1188, + /* 100 */ 1190, 1194, 1197, 1203, 1247, 1260, 1264, 1267, 1269, 1273, + /* 110 */ 1315, 1322, 1335, 1337, 1356, 1362, 1418, 1425, 1453, 1457, + /* 120 */ 1465, 1473, 1487, 1495, 1507, 1517, 1521, 1534, 1543, 1546, + /* 130 */ 1549, 1552, 1554, 1560, 1581, 1590, 1593, 1595, 1621, 1623, + /* 140 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, + /* 150 */ -261, -186, -117, 260, 263, 460, 631, -74, 497, -181, + /* 160 */ -261, 939, 176, 274, 338, 676, -261, -261, -261, -261, + /* 170 */ -212, -212, -212, -184, 149, 777, 1061, 1103, 265, 419, + /* 180 */ -254, 670, 677, 677, -11, -129, 184, 488, 736, 789, + /* 190 */ 805, 844, 403, 529, 579, 668, 783, 841, 1158, 1112, + /* 200 */ 806, 861, 1095, 846, 839, 1031, -189, 1077, 1080, 1116, + /* 210 */ 1084, 1156, 1139, 1221, 46, 1099, 1037, 1118, 1171, 1214, + /* 220 */ 1210, 1258, -210, -190, -176, -115, 117, 262, 376, 490, + /* 230 */ 511, 520, 618, 639, 743, 901, 907, 958, 1014, 1055, + /* 240 */ 1108, 1193, 1244, 720, 1248, 1277, 1324, 1347, 1417, 1431, + /* 250 */ 1432, 1440, 1451, 1452, 1463, 1478, 1286, 1350, 1369, 1490, + /* 260 */ 1498, 1501, 773, 1509, 1513, 1528, 1292, 1367, 1535, 1536, + /* 270 */ 1477, 1542, 376, 1547, 1550, 1555, 1559, 1568, 1571, 1441, + /* 280 */ 1443, 1474, 1511, 1516, 1519, 1522, 773, 1474, 1474, 1503, + /* 290 */ 1567, 1594, 1484, 1527, 1556, 1570, 1557, 1524, 1573, 1545, + /* 300 */ 1548, 1576, 1561, 1587, 1540, 1575, 1606, 1611, 1622, 1624, + /* 310 */ 1626, 1582, 1597, 1598, 1599, 1601, 1603, 1563, 1608, 1605, + /* 320 */ 1604, 1564, 1566, 1655, 1660, 1578, 1579, 1665, 1586, 1607, + /* 330 */ 1610, 1627, 1633, 1645, 1634, 1636, 1650, 1651, 1653, 1654, + /* 340 */ 1692, 1695, 1656, 1628, 1631, 1657, 1635, 1663, 1659, 1666, + /* 350 */ 1661, 1696, 1700, 1618, 1632, 1712, 1714, 1693, 1715, 1718, + /* 360 */ 1716, 1720, 1697, 1703, 1705, 1707, 1701, 1708, 1713, 1721, + /* 370 */ 1722, 1726, 1729, 1709, 1731, 1733, 1638, 1644, 1672, 1673, + /* 380 */ 1736, 1758, 1641, 1642, 1698, 1699, 1723, 1725, 1688, 1767, + /* 390 */ 1689, 1727, 1724, 1738, 1730, 1776, 1793, 1794, 1797, 1798, + /* 400 */ 1799, 1690, 1691, 1702, 1785, 1780, 1783, 1784, 1786, 1791, + /* 410 */ 1778, 1779, 1788, 1790, 1796, 1800, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1652, 1652, 1652, 1482, 1247, 1358, 1247, 1247, 1247, 1482, - /* 10 */ 1482, 1482, 1247, 1388, 1388, 1535, 1280, 1247, 1247, 1247, - /* 20 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1481, 1247, - /* 30 */ 1247, 1247, 1247, 1568, 1568, 1247, 1247, 1247, 1247, 1247, - /* 40 */ 1247, 1247, 1247, 1397, 1247, 1404, 1247, 1247, 1247, 1247, - /* 50 */ 1247, 1483, 1484, 1247, 1247, 1247, 1534, 1536, 1499, 1411, - /* 60 */ 1410, 1409, 1408, 1517, 1376, 1402, 1395, 1399, 1478, 1479, - /* 70 */ 1477, 1630, 1484, 1483, 1247, 1398, 1446, 1462, 1445, 1247, - /* 80 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 90 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 100 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 110 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 120 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 130 */ 1247, 1454, 1461, 1460, 1459, 1468, 1458, 1455, 1448, 1447, - /* 140 */ 1449, 1450, 1247, 1247, 1271, 1247, 1247, 1268, 1322, 1247, - /* 150 */ 1247, 1247, 1247, 1247, 1554, 1553, 1247, 1451, 1247, 1280, - /* 160 */ 1439, 1438, 1437, 1465, 1452, 1464, 1463, 1542, 1604, 1603, - /* 170 */ 1500, 1247, 1247, 1247, 1247, 1247, 1247, 1568, 1247, 1247, - /* 180 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 190 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 200 */ 1247, 1378, 1568, 1568, 1247, 1280, 1568, 1568, 1379, 1379, - /* 210 */ 1276, 1276, 1382, 1247, 1549, 1349, 1349, 1349, 1349, 1358, - /* 220 */ 1349, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 230 */ 1247, 1247, 1247, 1247, 1247, 1247, 1539, 1537, 1247, 1247, - /* 240 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 250 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 260 */ 1247, 1247, 1247, 1247, 1247, 1354, 1247, 1247, 1247, 1247, - /* 270 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1597, 1247, 1512, - /* 280 */ 1336, 1354, 1354, 1354, 1354, 1356, 1337, 1335, 1348, 1281, - /* 290 */ 1254, 1644, 1414, 1403, 1355, 1403, 1641, 1401, 1414, 1414, - /* 300 */ 1401, 1414, 1355, 1641, 1297, 1619, 1292, 1388, 1388, 1388, - /* 310 */ 1378, 1378, 1378, 1378, 1382, 1382, 1480, 1355, 1348, 1247, - /* 320 */ 1644, 1644, 1364, 1364, 1643, 1643, 1364, 1500, 1627, 1423, - /* 330 */ 1396, 1382, 1325, 1396, 1382, 1331, 1331, 1331, 1331, 1364, - /* 340 */ 1265, 1401, 1627, 1627, 1401, 1423, 1325, 1401, 1325, 1401, - /* 350 */ 1364, 1265, 1516, 1638, 1364, 1265, 1490, 1364, 1265, 1364, - /* 360 */ 1265, 1490, 1323, 1323, 1323, 1312, 1247, 1247, 1490, 1323, - /* 370 */ 1297, 1323, 1312, 1323, 1323, 1586, 1247, 1494, 1494, 1490, - /* 380 */ 1364, 1578, 1578, 1391, 1391, 1396, 1382, 1485, 1364, 1247, - /* 390 */ 1396, 1394, 1392, 1401, 1315, 1600, 1600, 1596, 1596, 1596, - /* 400 */ 1649, 1649, 1549, 1612, 1280, 1280, 1280, 1280, 1612, 1299, - /* 410 */ 1299, 1281, 1281, 1280, 1612, 1247, 1247, 1247, 1247, 1247, - /* 420 */ 1247, 1607, 1247, 1544, 1501, 1368, 1247, 1247, 1247, 1247, - /* 430 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 440 */ 1247, 1247, 1555, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 450 */ 1247, 1247, 1247, 1428, 1247, 1250, 1546, 1247, 1247, 1247, - /* 460 */ 1247, 1247, 1247, 1247, 1247, 1405, 1406, 1369, 1247, 1247, - /* 470 */ 1247, 1247, 1247, 1247, 1247, 1420, 1247, 1247, 1247, 1415, - /* 480 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1640, 1247, - /* 490 */ 1247, 1247, 1247, 1247, 1247, 1515, 1514, 1247, 1247, 1366, - /* 500 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 510 */ 1247, 1247, 1247, 1295, 1247, 1247, 1247, 1247, 1247, 1247, - /* 520 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 530 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1393, 1247, 1247, - /* 540 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 550 */ 1247, 1247, 1583, 1383, 1247, 1247, 1247, 1247, 1631, 1247, - /* 560 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 570 */ 1247, 1247, 1247, 1623, 1339, 1429, 1247, 1432, 1269, 1247, - /* 580 */ 1259, 1247, 1247, + /* 0 */ 1667, 1667, 1667, 1495, 1258, 1371, 1258, 1258, 1258, 1258, + /* 10 */ 1495, 1495, 1495, 1258, 1258, 1258, 1258, 1258, 1258, 1401, + /* 20 */ 1401, 1548, 1291, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 30 */ 1258, 1258, 1258, 1258, 1258, 1494, 1258, 1258, 1258, 1258, + /* 40 */ 1582, 1582, 1258, 1258, 1258, 1258, 1258, 1567, 1566, 1258, + /* 50 */ 1258, 1258, 1410, 1258, 1417, 1258, 1258, 1258, 1258, 1258, + /* 60 */ 1496, 1497, 1258, 1258, 1258, 1547, 1549, 1512, 1424, 1423, + /* 70 */ 1422, 1421, 1530, 1389, 1415, 1408, 1412, 1491, 1492, 1490, + /* 80 */ 1645, 1497, 1496, 1258, 1411, 1459, 1475, 1458, 1258, 1258, + /* 90 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 100 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 110 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 120 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 130 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 140 */ 1467, 1474, 1473, 1472, 1481, 1471, 1468, 1461, 1460, 1462, + /* 150 */ 1463, 1282, 1258, 1279, 1333, 1258, 1258, 1258, 1258, 1258, + /* 160 */ 1464, 1291, 1452, 1451, 1450, 1258, 1478, 1465, 1477, 1476, + /* 170 */ 1555, 1619, 1618, 1513, 1258, 1258, 1258, 1258, 1258, 1258, + /* 180 */ 1582, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 190 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 200 */ 1258, 1258, 1258, 1391, 1582, 1582, 1258, 1291, 1582, 1582, + /* 210 */ 1392, 1392, 1287, 1287, 1395, 1562, 1362, 1362, 1362, 1362, + /* 220 */ 1371, 1362, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 230 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1552, 1550, 1258, + /* 240 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 250 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 260 */ 1258, 1258, 1258, 1258, 1258, 1258, 1367, 1258, 1258, 1258, + /* 270 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1612, 1258, + /* 280 */ 1525, 1347, 1367, 1367, 1367, 1367, 1369, 1348, 1346, 1361, + /* 290 */ 1292, 1265, 1659, 1427, 1416, 1368, 1416, 1656, 1414, 1427, + /* 300 */ 1427, 1414, 1427, 1368, 1656, 1308, 1634, 1303, 1401, 1401, + /* 310 */ 1401, 1391, 1391, 1391, 1391, 1395, 1395, 1493, 1368, 1361, + /* 320 */ 1258, 1659, 1659, 1377, 1377, 1658, 1658, 1377, 1513, 1642, + /* 330 */ 1436, 1409, 1395, 1336, 1409, 1395, 1342, 1342, 1342, 1342, + /* 340 */ 1377, 1276, 1414, 1642, 1642, 1414, 1436, 1336, 1414, 1336, + /* 350 */ 1414, 1377, 1276, 1529, 1653, 1377, 1276, 1503, 1377, 1276, + /* 360 */ 1377, 1276, 1503, 1334, 1334, 1334, 1323, 1258, 1258, 1503, + /* 370 */ 1334, 1308, 1334, 1323, 1334, 1334, 1600, 1258, 1507, 1507, + /* 380 */ 1503, 1377, 1592, 1592, 1404, 1404, 1409, 1395, 1498, 1377, + /* 390 */ 1258, 1409, 1407, 1405, 1414, 1326, 1615, 1615, 1611, 1611, + /* 400 */ 1611, 1664, 1664, 1562, 1627, 1291, 1291, 1291, 1291, 1627, + /* 410 */ 1310, 1310, 1292, 1292, 1291, 1627, 1258, 1258, 1258, 1258, + /* 420 */ 1258, 1258, 1622, 1258, 1557, 1514, 1381, 1258, 1258, 1258, + /* 430 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 440 */ 1258, 1258, 1258, 1568, 1258, 1258, 1258, 1258, 1258, 1258, + /* 450 */ 1258, 1258, 1258, 1258, 1258, 1441, 1258, 1261, 1559, 1258, + /* 460 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1418, 1419, 1382, + /* 470 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1433, 1258, 1258, + /* 480 */ 1258, 1428, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 490 */ 1655, 1258, 1258, 1258, 1258, 1258, 1258, 1528, 1527, 1258, + /* 500 */ 1258, 1379, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 510 */ 1258, 1258, 1258, 1258, 1258, 1306, 1258, 1258, 1258, 1258, + /* 520 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 530 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1406, + /* 540 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 550 */ 1258, 1258, 1258, 1258, 1597, 1396, 1258, 1258, 1258, 1258, + /* 560 */ 1646, 1258, 1258, 1258, 1258, 1356, 1258, 1258, 1258, 1258, + /* 570 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1638, 1350, 1442, + /* 580 */ 1258, 1445, 1280, 1258, 1270, 1258, 1258, }; /********** End of lemon-generated parsing tables *****************************/ @@ -172536,8 +173901,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* TRUEFALSE => nothing */ 0, /* ISNOT => nothing */ 0, /* FUNCTION => nothing */ - 0, /* UMINUS => nothing */ 0, /* UPLUS => nothing */ + 0, /* UMINUS => nothing */ 0, /* TRUTH => nothing */ 0, /* REGISTER => nothing */ 0, /* VECTOR => nothing */ @@ -172546,6 +173911,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* ASTERISK => nothing */ 0, /* SPAN => nothing */ 0, /* ERROR => nothing */ + 0, /* QNUMBER => nothing */ 0, /* SPACE => nothing */ 0, /* ILLEGAL => nothing */ }; @@ -172588,14 +173954,9 @@ struct yyParser { #endif sqlite3ParserARG_SDECL /* A place to hold %extra_argument */ sqlite3ParserCTX_SDECL /* A place to hold %extra_context */ -#if YYSTACKDEPTH<=0 - int yystksz; /* Current side of the stack */ - yyStackEntry *yystack; /* The parser's stack */ - yyStackEntry yystk0; /* First stack entry */ -#else - yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */ - yyStackEntry *yystackEnd; /* Last entry in the stack */ -#endif + yyStackEntry *yystackEnd; /* Last entry in the stack */ + yyStackEntry *yystack; /* The parser stack */ + yyStackEntry yystk0[YYSTACKDEPTH]; /* Initial stack space */ }; typedef struct yyParser yyParser; @@ -172809,8 +174170,8 @@ static const char *const yyTokenName[] = { /* 170 */ "TRUEFALSE", /* 171 */ "ISNOT", /* 172 */ "FUNCTION", - /* 173 */ "UMINUS", - /* 174 */ "UPLUS", + /* 173 */ "UPLUS", + /* 174 */ "UMINUS", /* 175 */ "TRUTH", /* 176 */ "REGISTER", /* 177 */ "VECTOR", @@ -172819,142 +174180,145 @@ static const char *const yyTokenName[] = { /* 180 */ "ASTERISK", /* 181 */ "SPAN", /* 182 */ "ERROR", - /* 183 */ "SPACE", - /* 184 */ "ILLEGAL", - /* 185 */ "input", - /* 186 */ "cmdlist", - /* 187 */ "ecmd", - /* 188 */ "cmdx", - /* 189 */ "explain", - /* 190 */ "cmd", - /* 191 */ "transtype", - /* 192 */ "trans_opt", - /* 193 */ "nm", - /* 194 */ "savepoint_opt", - /* 195 */ "create_table", - /* 196 */ "create_table_args", - /* 197 */ "createkw", - /* 198 */ "temp", - /* 199 */ "ifnotexists", - /* 200 */ "dbnm", - /* 201 */ "columnlist", - /* 202 */ "conslist_opt", - /* 203 */ "table_option_set", - /* 204 */ "select", - /* 205 */ "table_option", - /* 206 */ "columnname", - /* 207 */ "carglist", - /* 208 */ "typetoken", - /* 209 */ "typename", - /* 210 */ "signed", - /* 211 */ "plus_num", - /* 212 */ "minus_num", - /* 213 */ "scanpt", - /* 214 */ "scantok", - /* 215 */ "ccons", - /* 216 */ "term", - /* 217 */ "expr", - /* 218 */ "onconf", - /* 219 */ "sortorder", - /* 220 */ "autoinc", - /* 221 */ "eidlist_opt", - /* 222 */ "refargs", - /* 223 */ "defer_subclause", - /* 224 */ "generated", - /* 225 */ "refarg", - /* 226 */ "refact", - /* 227 */ "init_deferred_pred_opt", - /* 228 */ "conslist", - /* 229 */ "tconscomma", - /* 230 */ "tcons", - /* 231 */ "sortlist", - /* 232 */ "eidlist", - /* 233 */ "defer_subclause_opt", - /* 234 */ "orconf", - /* 235 */ "resolvetype", - /* 236 */ "raisetype", - /* 237 */ "ifexists", - /* 238 */ "fullname", - /* 239 */ "selectnowith", - /* 240 */ "oneselect", - /* 241 */ "wqlist", - /* 242 */ "multiselect_op", - /* 243 */ "distinct", - /* 244 */ "selcollist", - /* 245 */ "from", - /* 246 */ "where_opt", - /* 247 */ "groupby_opt", - /* 248 */ "having_opt", - /* 249 */ "orderby_opt", - /* 250 */ "limit_opt", - /* 251 */ "window_clause", - /* 252 */ "values", - /* 253 */ "nexprlist", - /* 254 */ "sclp", - /* 255 */ "as", - /* 256 */ "seltablist", - /* 257 */ "stl_prefix", - /* 258 */ "joinop", - /* 259 */ "on_using", - /* 260 */ "indexed_by", - /* 261 */ "exprlist", - /* 262 */ "xfullname", - /* 263 */ "idlist", - /* 264 */ "indexed_opt", - /* 265 */ "nulls", - /* 266 */ "with", - /* 267 */ "where_opt_ret", - /* 268 */ "setlist", - /* 269 */ "insert_cmd", - /* 270 */ "idlist_opt", - /* 271 */ "upsert", - /* 272 */ "returning", - /* 273 */ "filter_over", - /* 274 */ "likeop", - /* 275 */ "between_op", - /* 276 */ "in_op", - /* 277 */ "paren_exprlist", - /* 278 */ "case_operand", - /* 279 */ "case_exprlist", - /* 280 */ "case_else", - /* 281 */ "uniqueflag", - /* 282 */ "collate", - /* 283 */ "vinto", - /* 284 */ "nmnum", - /* 285 */ "trigger_decl", - /* 286 */ "trigger_cmd_list", - /* 287 */ "trigger_time", - /* 288 */ "trigger_event", - /* 289 */ "foreach_clause", - /* 290 */ "when_clause", - /* 291 */ "trigger_cmd", - /* 292 */ "trnm", - /* 293 */ "tridxby", - /* 294 */ "database_kw_opt", - /* 295 */ "key_opt", - /* 296 */ "add_column_fullname", - /* 297 */ "kwcolumn_opt", - /* 298 */ "create_vtab", - /* 299 */ "vtabarglist", - /* 300 */ "vtabarg", - /* 301 */ "vtabargtoken", - /* 302 */ "lp", - /* 303 */ "anylist", - /* 304 */ "wqitem", - /* 305 */ "wqas", - /* 306 */ "windowdefn_list", - /* 307 */ "windowdefn", - /* 308 */ "window", - /* 309 */ "frame_opt", - /* 310 */ "part_opt", - /* 311 */ "filter_clause", - /* 312 */ "over_clause", - /* 313 */ "range_or_rows", - /* 314 */ "frame_bound", - /* 315 */ "frame_bound_s", - /* 316 */ "frame_bound_e", - /* 317 */ "frame_exclude_opt", - /* 318 */ "frame_exclude", + /* 183 */ "QNUMBER", + /* 184 */ "SPACE", + /* 185 */ "ILLEGAL", + /* 186 */ "input", + /* 187 */ "cmdlist", + /* 188 */ "ecmd", + /* 189 */ "cmdx", + /* 190 */ "explain", + /* 191 */ "cmd", + /* 192 */ "transtype", + /* 193 */ "trans_opt", + /* 194 */ "nm", + /* 195 */ "savepoint_opt", + /* 196 */ "create_table", + /* 197 */ "create_table_args", + /* 198 */ "createkw", + /* 199 */ "temp", + /* 200 */ "ifnotexists", + /* 201 */ "dbnm", + /* 202 */ "columnlist", + /* 203 */ "conslist_opt", + /* 204 */ "table_option_set", + /* 205 */ "select", + /* 206 */ "table_option", + /* 207 */ "columnname", + /* 208 */ "carglist", + /* 209 */ "typetoken", + /* 210 */ "typename", + /* 211 */ "signed", + /* 212 */ "plus_num", + /* 213 */ "minus_num", + /* 214 */ "scanpt", + /* 215 */ "scantok", + /* 216 */ "ccons", + /* 217 */ "term", + /* 218 */ "expr", + /* 219 */ "onconf", + /* 220 */ "sortorder", + /* 221 */ "autoinc", + /* 222 */ "eidlist_opt", + /* 223 */ "refargs", + /* 224 */ "defer_subclause", + /* 225 */ "generated", + /* 226 */ "refarg", + /* 227 */ "refact", + /* 228 */ "init_deferred_pred_opt", + /* 229 */ "conslist", + /* 230 */ "tconscomma", + /* 231 */ "tcons", + /* 232 */ "sortlist", + /* 233 */ "eidlist", + /* 234 */ "defer_subclause_opt", + /* 235 */ "orconf", + /* 236 */ "resolvetype", + /* 237 */ "raisetype", + /* 238 */ "ifexists", + /* 239 */ "fullname", + /* 240 */ "selectnowith", + /* 241 */ "oneselect", + /* 242 */ "wqlist", + /* 243 */ "multiselect_op", + /* 244 */ "distinct", + /* 245 */ "selcollist", + /* 246 */ "from", + /* 247 */ "where_opt", + /* 248 */ "groupby_opt", + /* 249 */ "having_opt", + /* 250 */ "orderby_opt", + /* 251 */ "limit_opt", + /* 252 */ "window_clause", + /* 253 */ "values", + /* 254 */ "nexprlist", + /* 255 */ "mvalues", + /* 256 */ "sclp", + /* 257 */ "as", + /* 258 */ "seltablist", + /* 259 */ "stl_prefix", + /* 260 */ "joinop", + /* 261 */ "on_using", + /* 262 */ "indexed_by", + /* 263 */ "exprlist", + /* 264 */ "xfullname", + /* 265 */ "idlist", + /* 266 */ "indexed_opt", + /* 267 */ "nulls", + /* 268 */ "with", + /* 269 */ "where_opt_ret", + /* 270 */ "setlist", + /* 271 */ "insert_cmd", + /* 272 */ "idlist_opt", + /* 273 */ "upsert", + /* 274 */ "returning", + /* 275 */ "filter_over", + /* 276 */ "likeop", + /* 277 */ "between_op", + /* 278 */ "in_op", + /* 279 */ "paren_exprlist", + /* 280 */ "case_operand", + /* 281 */ "case_exprlist", + /* 282 */ "case_else", + /* 283 */ "uniqueflag", + /* 284 */ "collate", + /* 285 */ "vinto", + /* 286 */ "nmnum", + /* 287 */ "trigger_decl", + /* 288 */ "trigger_cmd_list", + /* 289 */ "trigger_time", + /* 290 */ "trigger_event", + /* 291 */ "foreach_clause", + /* 292 */ "when_clause", + /* 293 */ "trigger_cmd", + /* 294 */ "trnm", + /* 295 */ "tridxby", + /* 296 */ "database_kw_opt", + /* 297 */ "key_opt", + /* 298 */ "add_column_fullname", + /* 299 */ "kwcolumn_opt", + /* 300 */ "create_vtab", + /* 301 */ "vtabarglist", + /* 302 */ "vtabarg", + /* 303 */ "vtabargtoken", + /* 304 */ "lp", + /* 305 */ "anylist", + /* 306 */ "wqitem", + /* 307 */ "wqas", + /* 308 */ "withnm", + /* 309 */ "windowdefn_list", + /* 310 */ "windowdefn", + /* 311 */ "window", + /* 312 */ "frame_opt", + /* 313 */ "part_opt", + /* 314 */ "filter_clause", + /* 315 */ "over_clause", + /* 316 */ "range_or_rows", + /* 317 */ "frame_bound", + /* 318 */ "frame_bound_s", + /* 319 */ "frame_bound_e", + /* 320 */ "frame_exclude_opt", + /* 321 */ "frame_exclude", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -173057,351 +174421,363 @@ static const char *const yyRuleName[] = { /* 92 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt", /* 93 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt", /* 94 */ "values ::= VALUES LP nexprlist RP", - /* 95 */ "values ::= values COMMA LP nexprlist RP", - /* 96 */ "distinct ::= DISTINCT", - /* 97 */ "distinct ::= ALL", - /* 98 */ "distinct ::=", - /* 99 */ "sclp ::=", - /* 100 */ "selcollist ::= sclp scanpt expr scanpt as", - /* 101 */ "selcollist ::= sclp scanpt STAR", - /* 102 */ "selcollist ::= sclp scanpt nm DOT STAR", - /* 103 */ "as ::= AS nm", - /* 104 */ "as ::=", - /* 105 */ "from ::=", - /* 106 */ "from ::= FROM seltablist", - /* 107 */ "stl_prefix ::= seltablist joinop", - /* 108 */ "stl_prefix ::=", - /* 109 */ "seltablist ::= stl_prefix nm dbnm as on_using", - /* 110 */ "seltablist ::= stl_prefix nm dbnm as indexed_by on_using", - /* 111 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using", - /* 112 */ "seltablist ::= stl_prefix LP select RP as on_using", - /* 113 */ "seltablist ::= stl_prefix LP seltablist RP as on_using", - /* 114 */ "dbnm ::=", - /* 115 */ "dbnm ::= DOT nm", - /* 116 */ "fullname ::= nm", - /* 117 */ "fullname ::= nm DOT nm", - /* 118 */ "xfullname ::= nm", - /* 119 */ "xfullname ::= nm DOT nm", - /* 120 */ "xfullname ::= nm DOT nm AS nm", - /* 121 */ "xfullname ::= nm AS nm", - /* 122 */ "joinop ::= COMMA|JOIN", - /* 123 */ "joinop ::= JOIN_KW JOIN", - /* 124 */ "joinop ::= JOIN_KW nm JOIN", - /* 125 */ "joinop ::= JOIN_KW nm nm JOIN", - /* 126 */ "on_using ::= ON expr", - /* 127 */ "on_using ::= USING LP idlist RP", - /* 128 */ "on_using ::=", - /* 129 */ "indexed_opt ::=", - /* 130 */ "indexed_by ::= INDEXED BY nm", - /* 131 */ "indexed_by ::= NOT INDEXED", - /* 132 */ "orderby_opt ::=", - /* 133 */ "orderby_opt ::= ORDER BY sortlist", - /* 134 */ "sortlist ::= sortlist COMMA expr sortorder nulls", - /* 135 */ "sortlist ::= expr sortorder nulls", - /* 136 */ "sortorder ::= ASC", - /* 137 */ "sortorder ::= DESC", - /* 138 */ "sortorder ::=", - /* 139 */ "nulls ::= NULLS FIRST", - /* 140 */ "nulls ::= NULLS LAST", - /* 141 */ "nulls ::=", - /* 142 */ "groupby_opt ::=", - /* 143 */ "groupby_opt ::= GROUP BY nexprlist", - /* 144 */ "having_opt ::=", - /* 145 */ "having_opt ::= HAVING expr", - /* 146 */ "limit_opt ::=", - /* 147 */ "limit_opt ::= LIMIT expr", - /* 148 */ "limit_opt ::= LIMIT expr OFFSET expr", - /* 149 */ "limit_opt ::= LIMIT expr COMMA expr", - /* 150 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt", - /* 151 */ "where_opt ::=", - /* 152 */ "where_opt ::= WHERE expr", - /* 153 */ "where_opt_ret ::=", - /* 154 */ "where_opt_ret ::= WHERE expr", - /* 155 */ "where_opt_ret ::= RETURNING selcollist", - /* 156 */ "where_opt_ret ::= WHERE expr RETURNING selcollist", - /* 157 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt", - /* 158 */ "setlist ::= setlist COMMA nm EQ expr", - /* 159 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", - /* 160 */ "setlist ::= nm EQ expr", - /* 161 */ "setlist ::= LP idlist RP EQ expr", - /* 162 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert", - /* 163 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning", - /* 164 */ "upsert ::=", - /* 165 */ "upsert ::= RETURNING selcollist", - /* 166 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert", - /* 167 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert", - /* 168 */ "upsert ::= ON CONFLICT DO NOTHING returning", - /* 169 */ "upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning", - /* 170 */ "returning ::= RETURNING selcollist", - /* 171 */ "insert_cmd ::= INSERT orconf", - /* 172 */ "insert_cmd ::= REPLACE", - /* 173 */ "idlist_opt ::=", - /* 174 */ "idlist_opt ::= LP idlist RP", - /* 175 */ "idlist ::= idlist COMMA nm", - /* 176 */ "idlist ::= nm", - /* 177 */ "expr ::= LP expr RP", - /* 178 */ "expr ::= ID|INDEXED|JOIN_KW", - /* 179 */ "expr ::= nm DOT nm", - /* 180 */ "expr ::= nm DOT nm DOT nm", - /* 181 */ "term ::= NULL|FLOAT|BLOB", - /* 182 */ "term ::= STRING", - /* 183 */ "term ::= INTEGER", - /* 184 */ "expr ::= VARIABLE", - /* 185 */ "expr ::= expr COLLATE ID|STRING", - /* 186 */ "expr ::= CAST LP expr AS typetoken RP", - /* 187 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP", - /* 188 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP", - /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP", - /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over", - /* 191 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over", - /* 192 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over", - /* 193 */ "term ::= CTIME_KW", - /* 194 */ "expr ::= LP nexprlist COMMA expr RP", - /* 195 */ "expr ::= expr AND expr", - /* 196 */ "expr ::= expr OR expr", - /* 197 */ "expr ::= expr LT|GT|GE|LE expr", - /* 198 */ "expr ::= expr EQ|NE expr", - /* 199 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", - /* 200 */ "expr ::= expr PLUS|MINUS expr", - /* 201 */ "expr ::= expr STAR|SLASH|REM expr", - /* 202 */ "expr ::= expr CONCAT expr", - /* 203 */ "likeop ::= NOT LIKE_KW|MATCH", - /* 204 */ "expr ::= expr likeop expr", - /* 205 */ "expr ::= expr likeop expr ESCAPE expr", - /* 206 */ "expr ::= expr ISNULL|NOTNULL", - /* 207 */ "expr ::= expr NOT NULL", - /* 208 */ "expr ::= expr IS expr", - /* 209 */ "expr ::= expr IS NOT expr", - /* 210 */ "expr ::= expr IS NOT DISTINCT FROM expr", - /* 211 */ "expr ::= expr IS DISTINCT FROM expr", - /* 212 */ "expr ::= NOT expr", - /* 213 */ "expr ::= BITNOT expr", - /* 214 */ "expr ::= PLUS|MINUS expr", - /* 215 */ "expr ::= expr PTR expr", - /* 216 */ "between_op ::= BETWEEN", - /* 217 */ "between_op ::= NOT BETWEEN", - /* 218 */ "expr ::= expr between_op expr AND expr", - /* 219 */ "in_op ::= IN", - /* 220 */ "in_op ::= NOT IN", - /* 221 */ "expr ::= expr in_op LP exprlist RP", - /* 222 */ "expr ::= LP select RP", - /* 223 */ "expr ::= expr in_op LP select RP", - /* 224 */ "expr ::= expr in_op nm dbnm paren_exprlist", - /* 225 */ "expr ::= EXISTS LP select RP", - /* 226 */ "expr ::= CASE case_operand case_exprlist case_else END", - /* 227 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", - /* 228 */ "case_exprlist ::= WHEN expr THEN expr", - /* 229 */ "case_else ::= ELSE expr", - /* 230 */ "case_else ::=", - /* 231 */ "case_operand ::=", - /* 232 */ "exprlist ::=", - /* 233 */ "nexprlist ::= nexprlist COMMA expr", - /* 234 */ "nexprlist ::= expr", - /* 235 */ "paren_exprlist ::=", - /* 236 */ "paren_exprlist ::= LP exprlist RP", - /* 237 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", - /* 238 */ "uniqueflag ::= UNIQUE", - /* 239 */ "uniqueflag ::=", - /* 240 */ "eidlist_opt ::=", - /* 241 */ "eidlist_opt ::= LP eidlist RP", - /* 242 */ "eidlist ::= eidlist COMMA nm collate sortorder", - /* 243 */ "eidlist ::= nm collate sortorder", - /* 244 */ "collate ::=", - /* 245 */ "collate ::= COLLATE ID|STRING", - /* 246 */ "cmd ::= DROP INDEX ifexists fullname", - /* 247 */ "cmd ::= VACUUM vinto", - /* 248 */ "cmd ::= VACUUM nm vinto", - /* 249 */ "vinto ::= INTO expr", - /* 250 */ "vinto ::=", - /* 251 */ "cmd ::= PRAGMA nm dbnm", - /* 252 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", - /* 253 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", - /* 254 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", - /* 255 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", - /* 256 */ "plus_num ::= PLUS INTEGER|FLOAT", - /* 257 */ "minus_num ::= MINUS INTEGER|FLOAT", - /* 258 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", - /* 259 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", - /* 260 */ "trigger_time ::= BEFORE|AFTER", - /* 261 */ "trigger_time ::= INSTEAD OF", - /* 262 */ "trigger_time ::=", - /* 263 */ "trigger_event ::= DELETE|INSERT", - /* 264 */ "trigger_event ::= UPDATE", - /* 265 */ "trigger_event ::= UPDATE OF idlist", - /* 266 */ "when_clause ::=", - /* 267 */ "when_clause ::= WHEN expr", - /* 268 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", - /* 269 */ "trigger_cmd_list ::= trigger_cmd SEMI", - /* 270 */ "trnm ::= nm DOT nm", - /* 271 */ "tridxby ::= INDEXED BY nm", - /* 272 */ "tridxby ::= NOT INDEXED", - /* 273 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", - /* 274 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", - /* 275 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", - /* 276 */ "trigger_cmd ::= scanpt select scanpt", - /* 277 */ "expr ::= RAISE LP IGNORE RP", - /* 278 */ "expr ::= RAISE LP raisetype COMMA nm RP", - /* 279 */ "raisetype ::= ROLLBACK", - /* 280 */ "raisetype ::= ABORT", - /* 281 */ "raisetype ::= FAIL", - /* 282 */ "cmd ::= DROP TRIGGER ifexists fullname", - /* 283 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", - /* 284 */ "cmd ::= DETACH database_kw_opt expr", - /* 285 */ "key_opt ::=", - /* 286 */ "key_opt ::= KEY expr", - /* 287 */ "cmd ::= REINDEX", - /* 288 */ "cmd ::= REINDEX nm dbnm", - /* 289 */ "cmd ::= ANALYZE", - /* 290 */ "cmd ::= ANALYZE nm dbnm", - /* 291 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", - /* 292 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", - /* 293 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm", - /* 294 */ "add_column_fullname ::= fullname", - /* 295 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", - /* 296 */ "cmd ::= create_vtab", - /* 297 */ "cmd ::= create_vtab LP vtabarglist RP", - /* 298 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", - /* 299 */ "vtabarg ::=", - /* 300 */ "vtabargtoken ::= ANY", - /* 301 */ "vtabargtoken ::= lp anylist RP", - /* 302 */ "lp ::= LP", - /* 303 */ "with ::= WITH wqlist", - /* 304 */ "with ::= WITH RECURSIVE wqlist", - /* 305 */ "wqas ::= AS", - /* 306 */ "wqas ::= AS MATERIALIZED", - /* 307 */ "wqas ::= AS NOT MATERIALIZED", - /* 308 */ "wqitem ::= nm eidlist_opt wqas LP select RP", - /* 309 */ "wqlist ::= wqitem", - /* 310 */ "wqlist ::= wqlist COMMA wqitem", - /* 311 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", - /* 312 */ "windowdefn ::= nm AS LP window RP", - /* 313 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", - /* 314 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", - /* 315 */ "window ::= ORDER BY sortlist frame_opt", - /* 316 */ "window ::= nm ORDER BY sortlist frame_opt", - /* 317 */ "window ::= nm frame_opt", - /* 318 */ "frame_opt ::=", - /* 319 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", - /* 320 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", - /* 321 */ "range_or_rows ::= RANGE|ROWS|GROUPS", - /* 322 */ "frame_bound_s ::= frame_bound", - /* 323 */ "frame_bound_s ::= UNBOUNDED PRECEDING", - /* 324 */ "frame_bound_e ::= frame_bound", - /* 325 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", - /* 326 */ "frame_bound ::= expr PRECEDING|FOLLOWING", - /* 327 */ "frame_bound ::= CURRENT ROW", - /* 328 */ "frame_exclude_opt ::=", - /* 329 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", - /* 330 */ "frame_exclude ::= NO OTHERS", - /* 331 */ "frame_exclude ::= CURRENT ROW", - /* 332 */ "frame_exclude ::= GROUP|TIES", - /* 333 */ "window_clause ::= WINDOW windowdefn_list", - /* 334 */ "filter_over ::= filter_clause over_clause", - /* 335 */ "filter_over ::= over_clause", - /* 336 */ "filter_over ::= filter_clause", - /* 337 */ "over_clause ::= OVER LP window RP", - /* 338 */ "over_clause ::= OVER nm", - /* 339 */ "filter_clause ::= FILTER LP WHERE expr RP", - /* 340 */ "input ::= cmdlist", - /* 341 */ "cmdlist ::= cmdlist ecmd", - /* 342 */ "cmdlist ::= ecmd", - /* 343 */ "ecmd ::= SEMI", - /* 344 */ "ecmd ::= cmdx SEMI", - /* 345 */ "ecmd ::= explain cmdx SEMI", - /* 346 */ "trans_opt ::=", - /* 347 */ "trans_opt ::= TRANSACTION", - /* 348 */ "trans_opt ::= TRANSACTION nm", - /* 349 */ "savepoint_opt ::= SAVEPOINT", - /* 350 */ "savepoint_opt ::=", - /* 351 */ "cmd ::= create_table create_table_args", - /* 352 */ "table_option_set ::= table_option", - /* 353 */ "columnlist ::= columnlist COMMA columnname carglist", - /* 354 */ "columnlist ::= columnname carglist", - /* 355 */ "nm ::= ID|INDEXED|JOIN_KW", - /* 356 */ "nm ::= STRING", - /* 357 */ "typetoken ::= typename", - /* 358 */ "typename ::= ID|STRING", - /* 359 */ "signed ::= plus_num", - /* 360 */ "signed ::= minus_num", - /* 361 */ "carglist ::= carglist ccons", - /* 362 */ "carglist ::=", - /* 363 */ "ccons ::= NULL onconf", - /* 364 */ "ccons ::= GENERATED ALWAYS AS generated", - /* 365 */ "ccons ::= AS generated", - /* 366 */ "conslist_opt ::= COMMA conslist", - /* 367 */ "conslist ::= conslist tconscomma tcons", - /* 368 */ "conslist ::= tcons", - /* 369 */ "tconscomma ::=", - /* 370 */ "defer_subclause_opt ::= defer_subclause", - /* 371 */ "resolvetype ::= raisetype", - /* 372 */ "selectnowith ::= oneselect", - /* 373 */ "oneselect ::= values", - /* 374 */ "sclp ::= selcollist COMMA", - /* 375 */ "as ::= ID|STRING", - /* 376 */ "indexed_opt ::= indexed_by", - /* 377 */ "returning ::=", - /* 378 */ "expr ::= term", - /* 379 */ "likeop ::= LIKE_KW|MATCH", - /* 380 */ "case_operand ::= expr", - /* 381 */ "exprlist ::= nexprlist", - /* 382 */ "nmnum ::= plus_num", - /* 383 */ "nmnum ::= nm", - /* 384 */ "nmnum ::= ON", - /* 385 */ "nmnum ::= DELETE", - /* 386 */ "nmnum ::= DEFAULT", - /* 387 */ "plus_num ::= INTEGER|FLOAT", - /* 388 */ "foreach_clause ::=", - /* 389 */ "foreach_clause ::= FOR EACH ROW", - /* 390 */ "trnm ::= nm", - /* 391 */ "tridxby ::=", - /* 392 */ "database_kw_opt ::= DATABASE", - /* 393 */ "database_kw_opt ::=", - /* 394 */ "kwcolumn_opt ::=", - /* 395 */ "kwcolumn_opt ::= COLUMNKW", - /* 396 */ "vtabarglist ::= vtabarg", - /* 397 */ "vtabarglist ::= vtabarglist COMMA vtabarg", - /* 398 */ "vtabarg ::= vtabarg vtabargtoken", - /* 399 */ "anylist ::=", - /* 400 */ "anylist ::= anylist LP anylist RP", - /* 401 */ "anylist ::= anylist ANY", - /* 402 */ "with ::=", - /* 403 */ "windowdefn_list ::= windowdefn", - /* 404 */ "window ::= frame_opt", + /* 95 */ "oneselect ::= mvalues", + /* 96 */ "mvalues ::= values COMMA LP nexprlist RP", + /* 97 */ "mvalues ::= mvalues COMMA LP nexprlist RP", + /* 98 */ "distinct ::= DISTINCT", + /* 99 */ "distinct ::= ALL", + /* 100 */ "distinct ::=", + /* 101 */ "sclp ::=", + /* 102 */ "selcollist ::= sclp scanpt expr scanpt as", + /* 103 */ "selcollist ::= sclp scanpt STAR", + /* 104 */ "selcollist ::= sclp scanpt nm DOT STAR", + /* 105 */ "as ::= AS nm", + /* 106 */ "as ::=", + /* 107 */ "from ::=", + /* 108 */ "from ::= FROM seltablist", + /* 109 */ "stl_prefix ::= seltablist joinop", + /* 110 */ "stl_prefix ::=", + /* 111 */ "seltablist ::= stl_prefix nm dbnm as on_using", + /* 112 */ "seltablist ::= stl_prefix nm dbnm as indexed_by on_using", + /* 113 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using", + /* 114 */ "seltablist ::= stl_prefix LP select RP as on_using", + /* 115 */ "seltablist ::= stl_prefix LP seltablist RP as on_using", + /* 116 */ "dbnm ::=", + /* 117 */ "dbnm ::= DOT nm", + /* 118 */ "fullname ::= nm", + /* 119 */ "fullname ::= nm DOT nm", + /* 120 */ "xfullname ::= nm", + /* 121 */ "xfullname ::= nm DOT nm", + /* 122 */ "xfullname ::= nm DOT nm AS nm", + /* 123 */ "xfullname ::= nm AS nm", + /* 124 */ "joinop ::= COMMA|JOIN", + /* 125 */ "joinop ::= JOIN_KW JOIN", + /* 126 */ "joinop ::= JOIN_KW nm JOIN", + /* 127 */ "joinop ::= JOIN_KW nm nm JOIN", + /* 128 */ "on_using ::= ON expr", + /* 129 */ "on_using ::= USING LP idlist RP", + /* 130 */ "on_using ::=", + /* 131 */ "indexed_opt ::=", + /* 132 */ "indexed_by ::= INDEXED BY nm", + /* 133 */ "indexed_by ::= NOT INDEXED", + /* 134 */ "orderby_opt ::=", + /* 135 */ "orderby_opt ::= ORDER BY sortlist", + /* 136 */ "sortlist ::= sortlist COMMA expr sortorder nulls", + /* 137 */ "sortlist ::= expr sortorder nulls", + /* 138 */ "sortorder ::= ASC", + /* 139 */ "sortorder ::= DESC", + /* 140 */ "sortorder ::=", + /* 141 */ "nulls ::= NULLS FIRST", + /* 142 */ "nulls ::= NULLS LAST", + /* 143 */ "nulls ::=", + /* 144 */ "groupby_opt ::=", + /* 145 */ "groupby_opt ::= GROUP BY nexprlist", + /* 146 */ "having_opt ::=", + /* 147 */ "having_opt ::= HAVING expr", + /* 148 */ "limit_opt ::=", + /* 149 */ "limit_opt ::= LIMIT expr", + /* 150 */ "limit_opt ::= LIMIT expr OFFSET expr", + /* 151 */ "limit_opt ::= LIMIT expr COMMA expr", + /* 152 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt", + /* 153 */ "where_opt ::=", + /* 154 */ "where_opt ::= WHERE expr", + /* 155 */ "where_opt_ret ::=", + /* 156 */ "where_opt_ret ::= WHERE expr", + /* 157 */ "where_opt_ret ::= RETURNING selcollist", + /* 158 */ "where_opt_ret ::= WHERE expr RETURNING selcollist", + /* 159 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt", + /* 160 */ "setlist ::= setlist COMMA nm EQ expr", + /* 161 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", + /* 162 */ "setlist ::= nm EQ expr", + /* 163 */ "setlist ::= LP idlist RP EQ expr", + /* 164 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert", + /* 165 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning", + /* 166 */ "upsert ::=", + /* 167 */ "upsert ::= RETURNING selcollist", + /* 168 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert", + /* 169 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert", + /* 170 */ "upsert ::= ON CONFLICT DO NOTHING returning", + /* 171 */ "upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning", + /* 172 */ "returning ::= RETURNING selcollist", + /* 173 */ "insert_cmd ::= INSERT orconf", + /* 174 */ "insert_cmd ::= REPLACE", + /* 175 */ "idlist_opt ::=", + /* 176 */ "idlist_opt ::= LP idlist RP", + /* 177 */ "idlist ::= idlist COMMA nm", + /* 178 */ "idlist ::= nm", + /* 179 */ "expr ::= LP expr RP", + /* 180 */ "expr ::= ID|INDEXED|JOIN_KW", + /* 181 */ "expr ::= nm DOT nm", + /* 182 */ "expr ::= nm DOT nm DOT nm", + /* 183 */ "term ::= NULL|FLOAT|BLOB", + /* 184 */ "term ::= STRING", + /* 185 */ "term ::= INTEGER", + /* 186 */ "expr ::= VARIABLE", + /* 187 */ "expr ::= expr COLLATE ID|STRING", + /* 188 */ "expr ::= CAST LP expr AS typetoken RP", + /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP", + /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP", + /* 191 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP", + /* 192 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over", + /* 193 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over", + /* 194 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over", + /* 195 */ "term ::= CTIME_KW", + /* 196 */ "expr ::= LP nexprlist COMMA expr RP", + /* 197 */ "expr ::= expr AND expr", + /* 198 */ "expr ::= expr OR expr", + /* 199 */ "expr ::= expr LT|GT|GE|LE expr", + /* 200 */ "expr ::= expr EQ|NE expr", + /* 201 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", + /* 202 */ "expr ::= expr PLUS|MINUS expr", + /* 203 */ "expr ::= expr STAR|SLASH|REM expr", + /* 204 */ "expr ::= expr CONCAT expr", + /* 205 */ "likeop ::= NOT LIKE_KW|MATCH", + /* 206 */ "expr ::= expr likeop expr", + /* 207 */ "expr ::= expr likeop expr ESCAPE expr", + /* 208 */ "expr ::= expr ISNULL|NOTNULL", + /* 209 */ "expr ::= expr NOT NULL", + /* 210 */ "expr ::= expr IS expr", + /* 211 */ "expr ::= expr IS NOT expr", + /* 212 */ "expr ::= expr IS NOT DISTINCT FROM expr", + /* 213 */ "expr ::= expr IS DISTINCT FROM expr", + /* 214 */ "expr ::= NOT expr", + /* 215 */ "expr ::= BITNOT expr", + /* 216 */ "expr ::= PLUS|MINUS expr", + /* 217 */ "expr ::= expr PTR expr", + /* 218 */ "between_op ::= BETWEEN", + /* 219 */ "between_op ::= NOT BETWEEN", + /* 220 */ "expr ::= expr between_op expr AND expr", + /* 221 */ "in_op ::= IN", + /* 222 */ "in_op ::= NOT IN", + /* 223 */ "expr ::= expr in_op LP exprlist RP", + /* 224 */ "expr ::= LP select RP", + /* 225 */ "expr ::= expr in_op LP select RP", + /* 226 */ "expr ::= expr in_op nm dbnm paren_exprlist", + /* 227 */ "expr ::= EXISTS LP select RP", + /* 228 */ "expr ::= CASE case_operand case_exprlist case_else END", + /* 229 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", + /* 230 */ "case_exprlist ::= WHEN expr THEN expr", + /* 231 */ "case_else ::= ELSE expr", + /* 232 */ "case_else ::=", + /* 233 */ "case_operand ::=", + /* 234 */ "exprlist ::=", + /* 235 */ "nexprlist ::= nexprlist COMMA expr", + /* 236 */ "nexprlist ::= expr", + /* 237 */ "paren_exprlist ::=", + /* 238 */ "paren_exprlist ::= LP exprlist RP", + /* 239 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", + /* 240 */ "uniqueflag ::= UNIQUE", + /* 241 */ "uniqueflag ::=", + /* 242 */ "eidlist_opt ::=", + /* 243 */ "eidlist_opt ::= LP eidlist RP", + /* 244 */ "eidlist ::= eidlist COMMA nm collate sortorder", + /* 245 */ "eidlist ::= nm collate sortorder", + /* 246 */ "collate ::=", + /* 247 */ "collate ::= COLLATE ID|STRING", + /* 248 */ "cmd ::= DROP INDEX ifexists fullname", + /* 249 */ "cmd ::= VACUUM vinto", + /* 250 */ "cmd ::= VACUUM nm vinto", + /* 251 */ "vinto ::= INTO expr", + /* 252 */ "vinto ::=", + /* 253 */ "cmd ::= PRAGMA nm dbnm", + /* 254 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", + /* 255 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", + /* 256 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", + /* 257 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", + /* 258 */ "plus_num ::= PLUS INTEGER|FLOAT", + /* 259 */ "minus_num ::= MINUS INTEGER|FLOAT", + /* 260 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", + /* 261 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", + /* 262 */ "trigger_time ::= BEFORE|AFTER", + /* 263 */ "trigger_time ::= INSTEAD OF", + /* 264 */ "trigger_time ::=", + /* 265 */ "trigger_event ::= DELETE|INSERT", + /* 266 */ "trigger_event ::= UPDATE", + /* 267 */ "trigger_event ::= UPDATE OF idlist", + /* 268 */ "when_clause ::=", + /* 269 */ "when_clause ::= WHEN expr", + /* 270 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", + /* 271 */ "trigger_cmd_list ::= trigger_cmd SEMI", + /* 272 */ "trnm ::= nm DOT nm", + /* 273 */ "tridxby ::= INDEXED BY nm", + /* 274 */ "tridxby ::= NOT INDEXED", + /* 275 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", + /* 276 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", + /* 277 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", + /* 278 */ "trigger_cmd ::= scanpt select scanpt", + /* 279 */ "expr ::= RAISE LP IGNORE RP", + /* 280 */ "expr ::= RAISE LP raisetype COMMA nm RP", + /* 281 */ "raisetype ::= ROLLBACK", + /* 282 */ "raisetype ::= ABORT", + /* 283 */ "raisetype ::= FAIL", + /* 284 */ "cmd ::= DROP TRIGGER ifexists fullname", + /* 285 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", + /* 286 */ "cmd ::= DETACH database_kw_opt expr", + /* 287 */ "key_opt ::=", + /* 288 */ "key_opt ::= KEY expr", + /* 289 */ "cmd ::= REINDEX", + /* 290 */ "cmd ::= REINDEX nm dbnm", + /* 291 */ "cmd ::= ANALYZE", + /* 292 */ "cmd ::= ANALYZE nm dbnm", + /* 293 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", + /* 294 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", + /* 295 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm", + /* 296 */ "add_column_fullname ::= fullname", + /* 297 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", + /* 298 */ "cmd ::= create_vtab", + /* 299 */ "cmd ::= create_vtab LP vtabarglist RP", + /* 300 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", + /* 301 */ "vtabarg ::=", + /* 302 */ "vtabargtoken ::= ANY", + /* 303 */ "vtabargtoken ::= lp anylist RP", + /* 304 */ "lp ::= LP", + /* 305 */ "with ::= WITH wqlist", + /* 306 */ "with ::= WITH RECURSIVE wqlist", + /* 307 */ "wqas ::= AS", + /* 308 */ "wqas ::= AS MATERIALIZED", + /* 309 */ "wqas ::= AS NOT MATERIALIZED", + /* 310 */ "wqitem ::= withnm eidlist_opt wqas LP select RP", + /* 311 */ "withnm ::= nm", + /* 312 */ "wqlist ::= wqitem", + /* 313 */ "wqlist ::= wqlist COMMA wqitem", + /* 314 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", + /* 315 */ "windowdefn ::= nm AS LP window RP", + /* 316 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", + /* 317 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", + /* 318 */ "window ::= ORDER BY sortlist frame_opt", + /* 319 */ "window ::= nm ORDER BY sortlist frame_opt", + /* 320 */ "window ::= nm frame_opt", + /* 321 */ "frame_opt ::=", + /* 322 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", + /* 323 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", + /* 324 */ "range_or_rows ::= RANGE|ROWS|GROUPS", + /* 325 */ "frame_bound_s ::= frame_bound", + /* 326 */ "frame_bound_s ::= UNBOUNDED PRECEDING", + /* 327 */ "frame_bound_e ::= frame_bound", + /* 328 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", + /* 329 */ "frame_bound ::= expr PRECEDING|FOLLOWING", + /* 330 */ "frame_bound ::= CURRENT ROW", + /* 331 */ "frame_exclude_opt ::=", + /* 332 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", + /* 333 */ "frame_exclude ::= NO OTHERS", + /* 334 */ "frame_exclude ::= CURRENT ROW", + /* 335 */ "frame_exclude ::= GROUP|TIES", + /* 336 */ "window_clause ::= WINDOW windowdefn_list", + /* 337 */ "filter_over ::= filter_clause over_clause", + /* 338 */ "filter_over ::= over_clause", + /* 339 */ "filter_over ::= filter_clause", + /* 340 */ "over_clause ::= OVER LP window RP", + /* 341 */ "over_clause ::= OVER nm", + /* 342 */ "filter_clause ::= FILTER LP WHERE expr RP", + /* 343 */ "term ::= QNUMBER", + /* 344 */ "input ::= cmdlist", + /* 345 */ "cmdlist ::= cmdlist ecmd", + /* 346 */ "cmdlist ::= ecmd", + /* 347 */ "ecmd ::= SEMI", + /* 348 */ "ecmd ::= cmdx SEMI", + /* 349 */ "ecmd ::= explain cmdx SEMI", + /* 350 */ "trans_opt ::=", + /* 351 */ "trans_opt ::= TRANSACTION", + /* 352 */ "trans_opt ::= TRANSACTION nm", + /* 353 */ "savepoint_opt ::= SAVEPOINT", + /* 354 */ "savepoint_opt ::=", + /* 355 */ "cmd ::= create_table create_table_args", + /* 356 */ "table_option_set ::= table_option", + /* 357 */ "columnlist ::= columnlist COMMA columnname carglist", + /* 358 */ "columnlist ::= columnname carglist", + /* 359 */ "nm ::= ID|INDEXED|JOIN_KW", + /* 360 */ "nm ::= STRING", + /* 361 */ "typetoken ::= typename", + /* 362 */ "typename ::= ID|STRING", + /* 363 */ "signed ::= plus_num", + /* 364 */ "signed ::= minus_num", + /* 365 */ "carglist ::= carglist ccons", + /* 366 */ "carglist ::=", + /* 367 */ "ccons ::= NULL onconf", + /* 368 */ "ccons ::= GENERATED ALWAYS AS generated", + /* 369 */ "ccons ::= AS generated", + /* 370 */ "conslist_opt ::= COMMA conslist", + /* 371 */ "conslist ::= conslist tconscomma tcons", + /* 372 */ "conslist ::= tcons", + /* 373 */ "tconscomma ::=", + /* 374 */ "defer_subclause_opt ::= defer_subclause", + /* 375 */ "resolvetype ::= raisetype", + /* 376 */ "selectnowith ::= oneselect", + /* 377 */ "oneselect ::= values", + /* 378 */ "sclp ::= selcollist COMMA", + /* 379 */ "as ::= ID|STRING", + /* 380 */ "indexed_opt ::= indexed_by", + /* 381 */ "returning ::=", + /* 382 */ "expr ::= term", + /* 383 */ "likeop ::= LIKE_KW|MATCH", + /* 384 */ "case_operand ::= expr", + /* 385 */ "exprlist ::= nexprlist", + /* 386 */ "nmnum ::= plus_num", + /* 387 */ "nmnum ::= nm", + /* 388 */ "nmnum ::= ON", + /* 389 */ "nmnum ::= DELETE", + /* 390 */ "nmnum ::= DEFAULT", + /* 391 */ "plus_num ::= INTEGER|FLOAT", + /* 392 */ "foreach_clause ::=", + /* 393 */ "foreach_clause ::= FOR EACH ROW", + /* 394 */ "trnm ::= nm", + /* 395 */ "tridxby ::=", + /* 396 */ "database_kw_opt ::= DATABASE", + /* 397 */ "database_kw_opt ::=", + /* 398 */ "kwcolumn_opt ::=", + /* 399 */ "kwcolumn_opt ::= COLUMNKW", + /* 400 */ "vtabarglist ::= vtabarg", + /* 401 */ "vtabarglist ::= vtabarglist COMMA vtabarg", + /* 402 */ "vtabarg ::= vtabarg vtabargtoken", + /* 403 */ "anylist ::=", + /* 404 */ "anylist ::= anylist LP anylist RP", + /* 405 */ "anylist ::= anylist ANY", + /* 406 */ "with ::=", + /* 407 */ "windowdefn_list ::= windowdefn", + /* 408 */ "window ::= frame_opt", }; #endif /* NDEBUG */ -#if YYSTACKDEPTH<=0 +#if YYGROWABLESTACK /* ** Try to increase the size of the parser stack. Return the number ** of errors. Return 0 on success. */ static int yyGrowStack(yyParser *p){ + int oldSize = 1 + (int)(p->yystackEnd - p->yystack); int newSize; int idx; yyStackEntry *pNew; - newSize = p->yystksz*2 + 100; - idx = p->yytos ? (int)(p->yytos - p->yystack) : 0; - if( p->yystack==&p->yystk0 ){ - pNew = malloc(newSize*sizeof(pNew[0])); - if( pNew ) pNew[0] = p->yystk0; + newSize = oldSize*2 + 100; + idx = (int)(p->yytos - p->yystack); + if( p->yystack==p->yystk0 ){ + pNew = YYREALLOC(0, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; + memcpy(pNew, p->yystack, oldSize*sizeof(pNew[0])); }else{ - pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); + pNew = YYREALLOC(p->yystack, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; } - if( pNew ){ - p->yystack = pNew; - p->yytos = &p->yystack[idx]; + p->yystack = pNew; + p->yytos = &p->yystack[idx]; #ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", - yyTracePrompt, p->yystksz, newSize); - } -#endif - p->yystksz = newSize; + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", + yyTracePrompt, oldSize, newSize); } - return pNew==0; +#endif + p->yystackEnd = &p->yystack[newSize-1]; + return 0; } +#endif /* YYGROWABLESTACK */ + +#if !YYGROWABLESTACK +/* For builds that do no have a growable stack, yyGrowStack always +** returns an error. +*/ +# define yyGrowStack(X) 1 #endif /* Datatype of the argument to the memory allocated passed as the @@ -173421,24 +174797,14 @@ SQLITE_PRIVATE void sqlite3ParserInit(void *yypRawParser sqlite3ParserCTX_PDECL) #ifdef YYTRACKMAXSTACKDEPTH yypParser->yyhwm = 0; #endif -#if YYSTACKDEPTH<=0 - yypParser->yytos = NULL; - yypParser->yystack = NULL; - yypParser->yystksz = 0; - if( yyGrowStack(yypParser) ){ - yypParser->yystack = &yypParser->yystk0; - yypParser->yystksz = 1; - } -#endif + yypParser->yystack = yypParser->yystk0; + yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; #ifndef YYNOERRORRECOVERY yypParser->yyerrcnt = -1; #endif yypParser->yytos = yypParser->yystack; yypParser->yystack[0].stateno = 0; yypParser->yystack[0].major = 0; -#if YYSTACKDEPTH>0 - yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; -#endif } #ifndef sqlite3Parser_ENGINEALWAYSONSTACK @@ -173492,97 +174858,98 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 204: /* select */ - case 239: /* selectnowith */ - case 240: /* oneselect */ - case 252: /* values */ + case 205: /* select */ + case 240: /* selectnowith */ + case 241: /* oneselect */ + case 253: /* values */ + case 255: /* mvalues */ { -sqlite3SelectDelete(pParse->db, (yypminor->yy47)); -} - break; - case 216: /* term */ - case 217: /* expr */ - case 246: /* where_opt */ - case 248: /* having_opt */ - case 267: /* where_opt_ret */ - case 278: /* case_operand */ - case 280: /* case_else */ - case 283: /* vinto */ - case 290: /* when_clause */ - case 295: /* key_opt */ - case 311: /* filter_clause */ +sqlite3SelectDelete(pParse->db, (yypminor->yy555)); +} + break; + case 217: /* term */ + case 218: /* expr */ + case 247: /* where_opt */ + case 249: /* having_opt */ + case 269: /* where_opt_ret */ + case 280: /* case_operand */ + case 282: /* case_else */ + case 285: /* vinto */ + case 292: /* when_clause */ + case 297: /* key_opt */ + case 314: /* filter_clause */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy528)); -} - break; - case 221: /* eidlist_opt */ - case 231: /* sortlist */ - case 232: /* eidlist */ - case 244: /* selcollist */ - case 247: /* groupby_opt */ - case 249: /* orderby_opt */ - case 253: /* nexprlist */ - case 254: /* sclp */ - case 261: /* exprlist */ - case 268: /* setlist */ - case 277: /* paren_exprlist */ - case 279: /* case_exprlist */ - case 310: /* part_opt */ +sqlite3ExprDelete(pParse->db, (yypminor->yy454)); +} + break; + case 222: /* eidlist_opt */ + case 232: /* sortlist */ + case 233: /* eidlist */ + case 245: /* selcollist */ + case 248: /* groupby_opt */ + case 250: /* orderby_opt */ + case 254: /* nexprlist */ + case 256: /* sclp */ + case 263: /* exprlist */ + case 270: /* setlist */ + case 279: /* paren_exprlist */ + case 281: /* case_exprlist */ + case 313: /* part_opt */ { -sqlite3ExprListDelete(pParse->db, (yypminor->yy322)); +sqlite3ExprListDelete(pParse->db, (yypminor->yy14)); } break; - case 238: /* fullname */ - case 245: /* from */ - case 256: /* seltablist */ - case 257: /* stl_prefix */ - case 262: /* xfullname */ + case 239: /* fullname */ + case 246: /* from */ + case 258: /* seltablist */ + case 259: /* stl_prefix */ + case 264: /* xfullname */ { -sqlite3SrcListDelete(pParse->db, (yypminor->yy131)); +sqlite3SrcListDelete(pParse->db, (yypminor->yy203)); } break; - case 241: /* wqlist */ + case 242: /* wqlist */ { -sqlite3WithDelete(pParse->db, (yypminor->yy521)); +sqlite3WithDelete(pParse->db, (yypminor->yy59)); } break; - case 251: /* window_clause */ - case 306: /* windowdefn_list */ + case 252: /* window_clause */ + case 309: /* windowdefn_list */ { -sqlite3WindowListDelete(pParse->db, (yypminor->yy41)); +sqlite3WindowListDelete(pParse->db, (yypminor->yy211)); } break; - case 263: /* idlist */ - case 270: /* idlist_opt */ + case 265: /* idlist */ + case 272: /* idlist_opt */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy254)); +sqlite3IdListDelete(pParse->db, (yypminor->yy132)); } break; - case 273: /* filter_over */ - case 307: /* windowdefn */ - case 308: /* window */ - case 309: /* frame_opt */ - case 312: /* over_clause */ + case 275: /* filter_over */ + case 310: /* windowdefn */ + case 311: /* window */ + case 312: /* frame_opt */ + case 315: /* over_clause */ { -sqlite3WindowDelete(pParse->db, (yypminor->yy41)); +sqlite3WindowDelete(pParse->db, (yypminor->yy211)); } break; - case 286: /* trigger_cmd_list */ - case 291: /* trigger_cmd */ + case 288: /* trigger_cmd_list */ + case 293: /* trigger_cmd */ { -sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy33)); +sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy427)); } break; - case 288: /* trigger_event */ + case 290: /* trigger_event */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy180).b); +sqlite3IdListDelete(pParse->db, (yypminor->yy286).b); } break; - case 314: /* frame_bound */ - case 315: /* frame_bound_s */ - case 316: /* frame_bound_e */ + case 317: /* frame_bound */ + case 318: /* frame_bound_s */ + case 319: /* frame_bound_e */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy595).pExpr); +sqlite3ExprDelete(pParse->db, (yypminor->yy509).pExpr); } break; /********* End destructor definitions *****************************************/ @@ -173616,9 +174983,26 @@ static void yy_pop_parser_stack(yyParser *pParser){ */ SQLITE_PRIVATE void sqlite3ParserFinalize(void *p){ yyParser *pParser = (yyParser*)p; - while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser); -#if YYSTACKDEPTH<=0 - if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack); + + /* In-lined version of calling yy_pop_parser_stack() for each + ** element left in the stack */ + yyStackEntry *yytos = pParser->yytos; + while( yytos>pParser->yystack ){ +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sPopping %s\n", + yyTracePrompt, + yyTokenName[yytos->major]); + } +#endif + if( yytos->major>=YY_MIN_DSTRCTR ){ + yy_destructor(pParser, yytos->major, &yytos->minor); + } + yytos--; + } + +#if YYGROWABLESTACK + if( pParser->yystack!=pParser->yystk0 ) YYFREE(pParser->yystack); #endif } @@ -173801,7 +175185,7 @@ static void yyStackOverflow(yyParser *yypParser){ ** stack every overflows */ /******** Begin %stack_overflow code ******************************************/ - sqlite3ErrorMsg(pParse, "parser stack overflow"); + sqlite3OomFault(pParse->db); /******** End %stack_overflow code ********************************************/ sqlite3ParserARG_STORE /* Suppress warning about unused %extra_argument var */ sqlite3ParserCTX_STORE @@ -173845,25 +175229,19 @@ static void yy_shift( assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) ); } #endif -#if YYSTACKDEPTH>0 - if( yypParser->yytos>yypParser->yystackEnd ){ - yypParser->yytos--; - yyStackOverflow(yypParser); - return; - } -#else - if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){ + yytos = yypParser->yytos; + if( yytos>yypParser->yystackEnd ){ if( yyGrowStack(yypParser) ){ yypParser->yytos--; yyStackOverflow(yypParser); return; } + yytos = yypParser->yytos; + assert( yytos <= yypParser->yystackEnd ); } -#endif if( yyNewState > YY_MAX_SHIFT ){ yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; } - yytos = yypParser->yytos; yytos->stateno = yyNewState; yytos->major = yyMajor; yytos->minor.yy0 = yyMinor; @@ -173873,411 +175251,415 @@ static void yy_shift( /* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side ** of that rule */ static const YYCODETYPE yyRuleInfoLhs[] = { - 189, /* (0) explain ::= EXPLAIN */ - 189, /* (1) explain ::= EXPLAIN QUERY PLAN */ - 188, /* (2) cmdx ::= cmd */ - 190, /* (3) cmd ::= BEGIN transtype trans_opt */ - 191, /* (4) transtype ::= */ - 191, /* (5) transtype ::= DEFERRED */ - 191, /* (6) transtype ::= IMMEDIATE */ - 191, /* (7) transtype ::= EXCLUSIVE */ - 190, /* (8) cmd ::= COMMIT|END trans_opt */ - 190, /* (9) cmd ::= ROLLBACK trans_opt */ - 190, /* (10) cmd ::= SAVEPOINT nm */ - 190, /* (11) cmd ::= RELEASE savepoint_opt nm */ - 190, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ - 195, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ - 197, /* (14) createkw ::= CREATE */ - 199, /* (15) ifnotexists ::= */ - 199, /* (16) ifnotexists ::= IF NOT EXISTS */ - 198, /* (17) temp ::= TEMP */ - 198, /* (18) temp ::= */ - 196, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ - 196, /* (20) create_table_args ::= AS select */ - 203, /* (21) table_option_set ::= */ - 203, /* (22) table_option_set ::= table_option_set COMMA table_option */ - 205, /* (23) table_option ::= WITHOUT nm */ - 205, /* (24) table_option ::= nm */ - 206, /* (25) columnname ::= nm typetoken */ - 208, /* (26) typetoken ::= */ - 208, /* (27) typetoken ::= typename LP signed RP */ - 208, /* (28) typetoken ::= typename LP signed COMMA signed RP */ - 209, /* (29) typename ::= typename ID|STRING */ - 213, /* (30) scanpt ::= */ - 214, /* (31) scantok ::= */ - 215, /* (32) ccons ::= CONSTRAINT nm */ - 215, /* (33) ccons ::= DEFAULT scantok term */ - 215, /* (34) ccons ::= DEFAULT LP expr RP */ - 215, /* (35) ccons ::= DEFAULT PLUS scantok term */ - 215, /* (36) ccons ::= DEFAULT MINUS scantok term */ - 215, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ - 215, /* (38) ccons ::= NOT NULL onconf */ - 215, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ - 215, /* (40) ccons ::= UNIQUE onconf */ - 215, /* (41) ccons ::= CHECK LP expr RP */ - 215, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ - 215, /* (43) ccons ::= defer_subclause */ - 215, /* (44) ccons ::= COLLATE ID|STRING */ - 224, /* (45) generated ::= LP expr RP */ - 224, /* (46) generated ::= LP expr RP ID */ - 220, /* (47) autoinc ::= */ - 220, /* (48) autoinc ::= AUTOINCR */ - 222, /* (49) refargs ::= */ - 222, /* (50) refargs ::= refargs refarg */ - 225, /* (51) refarg ::= MATCH nm */ - 225, /* (52) refarg ::= ON INSERT refact */ - 225, /* (53) refarg ::= ON DELETE refact */ - 225, /* (54) refarg ::= ON UPDATE refact */ - 226, /* (55) refact ::= SET NULL */ - 226, /* (56) refact ::= SET DEFAULT */ - 226, /* (57) refact ::= CASCADE */ - 226, /* (58) refact ::= RESTRICT */ - 226, /* (59) refact ::= NO ACTION */ - 223, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ - 223, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ - 227, /* (62) init_deferred_pred_opt ::= */ - 227, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ - 227, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ - 202, /* (65) conslist_opt ::= */ - 229, /* (66) tconscomma ::= COMMA */ - 230, /* (67) tcons ::= CONSTRAINT nm */ - 230, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ - 230, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ - 230, /* (70) tcons ::= CHECK LP expr RP onconf */ - 230, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ - 233, /* (72) defer_subclause_opt ::= */ - 218, /* (73) onconf ::= */ - 218, /* (74) onconf ::= ON CONFLICT resolvetype */ - 234, /* (75) orconf ::= */ - 234, /* (76) orconf ::= OR resolvetype */ - 235, /* (77) resolvetype ::= IGNORE */ - 235, /* (78) resolvetype ::= REPLACE */ - 190, /* (79) cmd ::= DROP TABLE ifexists fullname */ - 237, /* (80) ifexists ::= IF EXISTS */ - 237, /* (81) ifexists ::= */ - 190, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ - 190, /* (83) cmd ::= DROP VIEW ifexists fullname */ - 190, /* (84) cmd ::= select */ - 204, /* (85) select ::= WITH wqlist selectnowith */ - 204, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ - 204, /* (87) select ::= selectnowith */ - 239, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ - 242, /* (89) multiselect_op ::= UNION */ - 242, /* (90) multiselect_op ::= UNION ALL */ - 242, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ - 240, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ - 240, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ - 252, /* (94) values ::= VALUES LP nexprlist RP */ - 252, /* (95) values ::= values COMMA LP nexprlist RP */ - 243, /* (96) distinct ::= DISTINCT */ - 243, /* (97) distinct ::= ALL */ - 243, /* (98) distinct ::= */ - 254, /* (99) sclp ::= */ - 244, /* (100) selcollist ::= sclp scanpt expr scanpt as */ - 244, /* (101) selcollist ::= sclp scanpt STAR */ - 244, /* (102) selcollist ::= sclp scanpt nm DOT STAR */ - 255, /* (103) as ::= AS nm */ - 255, /* (104) as ::= */ - 245, /* (105) from ::= */ - 245, /* (106) from ::= FROM seltablist */ - 257, /* (107) stl_prefix ::= seltablist joinop */ - 257, /* (108) stl_prefix ::= */ - 256, /* (109) seltablist ::= stl_prefix nm dbnm as on_using */ - 256, /* (110) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ - 256, /* (111) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ - 256, /* (112) seltablist ::= stl_prefix LP select RP as on_using */ - 256, /* (113) seltablist ::= stl_prefix LP seltablist RP as on_using */ - 200, /* (114) dbnm ::= */ - 200, /* (115) dbnm ::= DOT nm */ - 238, /* (116) fullname ::= nm */ - 238, /* (117) fullname ::= nm DOT nm */ - 262, /* (118) xfullname ::= nm */ - 262, /* (119) xfullname ::= nm DOT nm */ - 262, /* (120) xfullname ::= nm DOT nm AS nm */ - 262, /* (121) xfullname ::= nm AS nm */ - 258, /* (122) joinop ::= COMMA|JOIN */ - 258, /* (123) joinop ::= JOIN_KW JOIN */ - 258, /* (124) joinop ::= JOIN_KW nm JOIN */ - 258, /* (125) joinop ::= JOIN_KW nm nm JOIN */ - 259, /* (126) on_using ::= ON expr */ - 259, /* (127) on_using ::= USING LP idlist RP */ - 259, /* (128) on_using ::= */ - 264, /* (129) indexed_opt ::= */ - 260, /* (130) indexed_by ::= INDEXED BY nm */ - 260, /* (131) indexed_by ::= NOT INDEXED */ - 249, /* (132) orderby_opt ::= */ - 249, /* (133) orderby_opt ::= ORDER BY sortlist */ - 231, /* (134) sortlist ::= sortlist COMMA expr sortorder nulls */ - 231, /* (135) sortlist ::= expr sortorder nulls */ - 219, /* (136) sortorder ::= ASC */ - 219, /* (137) sortorder ::= DESC */ - 219, /* (138) sortorder ::= */ - 265, /* (139) nulls ::= NULLS FIRST */ - 265, /* (140) nulls ::= NULLS LAST */ - 265, /* (141) nulls ::= */ - 247, /* (142) groupby_opt ::= */ - 247, /* (143) groupby_opt ::= GROUP BY nexprlist */ - 248, /* (144) having_opt ::= */ - 248, /* (145) having_opt ::= HAVING expr */ - 250, /* (146) limit_opt ::= */ - 250, /* (147) limit_opt ::= LIMIT expr */ - 250, /* (148) limit_opt ::= LIMIT expr OFFSET expr */ - 250, /* (149) limit_opt ::= LIMIT expr COMMA expr */ - 190, /* (150) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ - 246, /* (151) where_opt ::= */ - 246, /* (152) where_opt ::= WHERE expr */ - 267, /* (153) where_opt_ret ::= */ - 267, /* (154) where_opt_ret ::= WHERE expr */ - 267, /* (155) where_opt_ret ::= RETURNING selcollist */ - 267, /* (156) where_opt_ret ::= WHERE expr RETURNING selcollist */ - 190, /* (157) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ - 268, /* (158) setlist ::= setlist COMMA nm EQ expr */ - 268, /* (159) setlist ::= setlist COMMA LP idlist RP EQ expr */ - 268, /* (160) setlist ::= nm EQ expr */ - 268, /* (161) setlist ::= LP idlist RP EQ expr */ - 190, /* (162) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - 190, /* (163) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ - 271, /* (164) upsert ::= */ - 271, /* (165) upsert ::= RETURNING selcollist */ - 271, /* (166) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ - 271, /* (167) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ - 271, /* (168) upsert ::= ON CONFLICT DO NOTHING returning */ - 271, /* (169) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ - 272, /* (170) returning ::= RETURNING selcollist */ - 269, /* (171) insert_cmd ::= INSERT orconf */ - 269, /* (172) insert_cmd ::= REPLACE */ - 270, /* (173) idlist_opt ::= */ - 270, /* (174) idlist_opt ::= LP idlist RP */ - 263, /* (175) idlist ::= idlist COMMA nm */ - 263, /* (176) idlist ::= nm */ - 217, /* (177) expr ::= LP expr RP */ - 217, /* (178) expr ::= ID|INDEXED|JOIN_KW */ - 217, /* (179) expr ::= nm DOT nm */ - 217, /* (180) expr ::= nm DOT nm DOT nm */ - 216, /* (181) term ::= NULL|FLOAT|BLOB */ - 216, /* (182) term ::= STRING */ - 216, /* (183) term ::= INTEGER */ - 217, /* (184) expr ::= VARIABLE */ - 217, /* (185) expr ::= expr COLLATE ID|STRING */ - 217, /* (186) expr ::= CAST LP expr AS typetoken RP */ - 217, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ - 217, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ - 217, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ - 217, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ - 217, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ - 217, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ - 216, /* (193) term ::= CTIME_KW */ - 217, /* (194) expr ::= LP nexprlist COMMA expr RP */ - 217, /* (195) expr ::= expr AND expr */ - 217, /* (196) expr ::= expr OR expr */ - 217, /* (197) expr ::= expr LT|GT|GE|LE expr */ - 217, /* (198) expr ::= expr EQ|NE expr */ - 217, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - 217, /* (200) expr ::= expr PLUS|MINUS expr */ - 217, /* (201) expr ::= expr STAR|SLASH|REM expr */ - 217, /* (202) expr ::= expr CONCAT expr */ - 274, /* (203) likeop ::= NOT LIKE_KW|MATCH */ - 217, /* (204) expr ::= expr likeop expr */ - 217, /* (205) expr ::= expr likeop expr ESCAPE expr */ - 217, /* (206) expr ::= expr ISNULL|NOTNULL */ - 217, /* (207) expr ::= expr NOT NULL */ - 217, /* (208) expr ::= expr IS expr */ - 217, /* (209) expr ::= expr IS NOT expr */ - 217, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */ - 217, /* (211) expr ::= expr IS DISTINCT FROM expr */ - 217, /* (212) expr ::= NOT expr */ - 217, /* (213) expr ::= BITNOT expr */ - 217, /* (214) expr ::= PLUS|MINUS expr */ - 217, /* (215) expr ::= expr PTR expr */ - 275, /* (216) between_op ::= BETWEEN */ - 275, /* (217) between_op ::= NOT BETWEEN */ - 217, /* (218) expr ::= expr between_op expr AND expr */ - 276, /* (219) in_op ::= IN */ - 276, /* (220) in_op ::= NOT IN */ - 217, /* (221) expr ::= expr in_op LP exprlist RP */ - 217, /* (222) expr ::= LP select RP */ - 217, /* (223) expr ::= expr in_op LP select RP */ - 217, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */ - 217, /* (225) expr ::= EXISTS LP select RP */ - 217, /* (226) expr ::= CASE case_operand case_exprlist case_else END */ - 279, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - 279, /* (228) case_exprlist ::= WHEN expr THEN expr */ - 280, /* (229) case_else ::= ELSE expr */ - 280, /* (230) case_else ::= */ - 278, /* (231) case_operand ::= */ - 261, /* (232) exprlist ::= */ - 253, /* (233) nexprlist ::= nexprlist COMMA expr */ - 253, /* (234) nexprlist ::= expr */ - 277, /* (235) paren_exprlist ::= */ - 277, /* (236) paren_exprlist ::= LP exprlist RP */ - 190, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - 281, /* (238) uniqueflag ::= UNIQUE */ - 281, /* (239) uniqueflag ::= */ - 221, /* (240) eidlist_opt ::= */ - 221, /* (241) eidlist_opt ::= LP eidlist RP */ - 232, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */ - 232, /* (243) eidlist ::= nm collate sortorder */ - 282, /* (244) collate ::= */ - 282, /* (245) collate ::= COLLATE ID|STRING */ - 190, /* (246) cmd ::= DROP INDEX ifexists fullname */ - 190, /* (247) cmd ::= VACUUM vinto */ - 190, /* (248) cmd ::= VACUUM nm vinto */ - 283, /* (249) vinto ::= INTO expr */ - 283, /* (250) vinto ::= */ - 190, /* (251) cmd ::= PRAGMA nm dbnm */ - 190, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */ - 190, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - 190, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */ - 190, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - 211, /* (256) plus_num ::= PLUS INTEGER|FLOAT */ - 212, /* (257) minus_num ::= MINUS INTEGER|FLOAT */ - 190, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - 285, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - 287, /* (260) trigger_time ::= BEFORE|AFTER */ - 287, /* (261) trigger_time ::= INSTEAD OF */ - 287, /* (262) trigger_time ::= */ - 288, /* (263) trigger_event ::= DELETE|INSERT */ - 288, /* (264) trigger_event ::= UPDATE */ - 288, /* (265) trigger_event ::= UPDATE OF idlist */ - 290, /* (266) when_clause ::= */ - 290, /* (267) when_clause ::= WHEN expr */ - 286, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - 286, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */ - 292, /* (270) trnm ::= nm DOT nm */ - 293, /* (271) tridxby ::= INDEXED BY nm */ - 293, /* (272) tridxby ::= NOT INDEXED */ - 291, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - 291, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - 291, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - 291, /* (276) trigger_cmd ::= scanpt select scanpt */ - 217, /* (277) expr ::= RAISE LP IGNORE RP */ - 217, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */ - 236, /* (279) raisetype ::= ROLLBACK */ - 236, /* (280) raisetype ::= ABORT */ - 236, /* (281) raisetype ::= FAIL */ - 190, /* (282) cmd ::= DROP TRIGGER ifexists fullname */ - 190, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - 190, /* (284) cmd ::= DETACH database_kw_opt expr */ - 295, /* (285) key_opt ::= */ - 295, /* (286) key_opt ::= KEY expr */ - 190, /* (287) cmd ::= REINDEX */ - 190, /* (288) cmd ::= REINDEX nm dbnm */ - 190, /* (289) cmd ::= ANALYZE */ - 190, /* (290) cmd ::= ANALYZE nm dbnm */ - 190, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */ - 190, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - 190, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - 296, /* (294) add_column_fullname ::= fullname */ - 190, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - 190, /* (296) cmd ::= create_vtab */ - 190, /* (297) cmd ::= create_vtab LP vtabarglist RP */ - 298, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 300, /* (299) vtabarg ::= */ - 301, /* (300) vtabargtoken ::= ANY */ - 301, /* (301) vtabargtoken ::= lp anylist RP */ - 302, /* (302) lp ::= LP */ - 266, /* (303) with ::= WITH wqlist */ - 266, /* (304) with ::= WITH RECURSIVE wqlist */ - 305, /* (305) wqas ::= AS */ - 305, /* (306) wqas ::= AS MATERIALIZED */ - 305, /* (307) wqas ::= AS NOT MATERIALIZED */ - 304, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */ - 241, /* (309) wqlist ::= wqitem */ - 241, /* (310) wqlist ::= wqlist COMMA wqitem */ - 306, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - 307, /* (312) windowdefn ::= nm AS LP window RP */ - 308, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - 308, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - 308, /* (315) window ::= ORDER BY sortlist frame_opt */ - 308, /* (316) window ::= nm ORDER BY sortlist frame_opt */ - 308, /* (317) window ::= nm frame_opt */ - 309, /* (318) frame_opt ::= */ - 309, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - 309, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - 313, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ - 315, /* (322) frame_bound_s ::= frame_bound */ - 315, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ - 316, /* (324) frame_bound_e ::= frame_bound */ - 316, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ - 314, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ - 314, /* (327) frame_bound ::= CURRENT ROW */ - 317, /* (328) frame_exclude_opt ::= */ - 317, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ - 318, /* (330) frame_exclude ::= NO OTHERS */ - 318, /* (331) frame_exclude ::= CURRENT ROW */ - 318, /* (332) frame_exclude ::= GROUP|TIES */ - 251, /* (333) window_clause ::= WINDOW windowdefn_list */ - 273, /* (334) filter_over ::= filter_clause over_clause */ - 273, /* (335) filter_over ::= over_clause */ - 273, /* (336) filter_over ::= filter_clause */ - 312, /* (337) over_clause ::= OVER LP window RP */ - 312, /* (338) over_clause ::= OVER nm */ - 311, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ - 185, /* (340) input ::= cmdlist */ - 186, /* (341) cmdlist ::= cmdlist ecmd */ - 186, /* (342) cmdlist ::= ecmd */ - 187, /* (343) ecmd ::= SEMI */ - 187, /* (344) ecmd ::= cmdx SEMI */ - 187, /* (345) ecmd ::= explain cmdx SEMI */ - 192, /* (346) trans_opt ::= */ - 192, /* (347) trans_opt ::= TRANSACTION */ - 192, /* (348) trans_opt ::= TRANSACTION nm */ - 194, /* (349) savepoint_opt ::= SAVEPOINT */ - 194, /* (350) savepoint_opt ::= */ - 190, /* (351) cmd ::= create_table create_table_args */ - 203, /* (352) table_option_set ::= table_option */ - 201, /* (353) columnlist ::= columnlist COMMA columnname carglist */ - 201, /* (354) columnlist ::= columnname carglist */ - 193, /* (355) nm ::= ID|INDEXED|JOIN_KW */ - 193, /* (356) nm ::= STRING */ - 208, /* (357) typetoken ::= typename */ - 209, /* (358) typename ::= ID|STRING */ - 210, /* (359) signed ::= plus_num */ - 210, /* (360) signed ::= minus_num */ - 207, /* (361) carglist ::= carglist ccons */ - 207, /* (362) carglist ::= */ - 215, /* (363) ccons ::= NULL onconf */ - 215, /* (364) ccons ::= GENERATED ALWAYS AS generated */ - 215, /* (365) ccons ::= AS generated */ - 202, /* (366) conslist_opt ::= COMMA conslist */ - 228, /* (367) conslist ::= conslist tconscomma tcons */ - 228, /* (368) conslist ::= tcons */ - 229, /* (369) tconscomma ::= */ - 233, /* (370) defer_subclause_opt ::= defer_subclause */ - 235, /* (371) resolvetype ::= raisetype */ - 239, /* (372) selectnowith ::= oneselect */ - 240, /* (373) oneselect ::= values */ - 254, /* (374) sclp ::= selcollist COMMA */ - 255, /* (375) as ::= ID|STRING */ - 264, /* (376) indexed_opt ::= indexed_by */ - 272, /* (377) returning ::= */ - 217, /* (378) expr ::= term */ - 274, /* (379) likeop ::= LIKE_KW|MATCH */ - 278, /* (380) case_operand ::= expr */ - 261, /* (381) exprlist ::= nexprlist */ - 284, /* (382) nmnum ::= plus_num */ - 284, /* (383) nmnum ::= nm */ - 284, /* (384) nmnum ::= ON */ - 284, /* (385) nmnum ::= DELETE */ - 284, /* (386) nmnum ::= DEFAULT */ - 211, /* (387) plus_num ::= INTEGER|FLOAT */ - 289, /* (388) foreach_clause ::= */ - 289, /* (389) foreach_clause ::= FOR EACH ROW */ - 292, /* (390) trnm ::= nm */ - 293, /* (391) tridxby ::= */ - 294, /* (392) database_kw_opt ::= DATABASE */ - 294, /* (393) database_kw_opt ::= */ - 297, /* (394) kwcolumn_opt ::= */ - 297, /* (395) kwcolumn_opt ::= COLUMNKW */ - 299, /* (396) vtabarglist ::= vtabarg */ - 299, /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ - 300, /* (398) vtabarg ::= vtabarg vtabargtoken */ - 303, /* (399) anylist ::= */ - 303, /* (400) anylist ::= anylist LP anylist RP */ - 303, /* (401) anylist ::= anylist ANY */ - 266, /* (402) with ::= */ - 306, /* (403) windowdefn_list ::= windowdefn */ - 308, /* (404) window ::= frame_opt */ + 190, /* (0) explain ::= EXPLAIN */ + 190, /* (1) explain ::= EXPLAIN QUERY PLAN */ + 189, /* (2) cmdx ::= cmd */ + 191, /* (3) cmd ::= BEGIN transtype trans_opt */ + 192, /* (4) transtype ::= */ + 192, /* (5) transtype ::= DEFERRED */ + 192, /* (6) transtype ::= IMMEDIATE */ + 192, /* (7) transtype ::= EXCLUSIVE */ + 191, /* (8) cmd ::= COMMIT|END trans_opt */ + 191, /* (9) cmd ::= ROLLBACK trans_opt */ + 191, /* (10) cmd ::= SAVEPOINT nm */ + 191, /* (11) cmd ::= RELEASE savepoint_opt nm */ + 191, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ + 196, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ + 198, /* (14) createkw ::= CREATE */ + 200, /* (15) ifnotexists ::= */ + 200, /* (16) ifnotexists ::= IF NOT EXISTS */ + 199, /* (17) temp ::= TEMP */ + 199, /* (18) temp ::= */ + 197, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ + 197, /* (20) create_table_args ::= AS select */ + 204, /* (21) table_option_set ::= */ + 204, /* (22) table_option_set ::= table_option_set COMMA table_option */ + 206, /* (23) table_option ::= WITHOUT nm */ + 206, /* (24) table_option ::= nm */ + 207, /* (25) columnname ::= nm typetoken */ + 209, /* (26) typetoken ::= */ + 209, /* (27) typetoken ::= typename LP signed RP */ + 209, /* (28) typetoken ::= typename LP signed COMMA signed RP */ + 210, /* (29) typename ::= typename ID|STRING */ + 214, /* (30) scanpt ::= */ + 215, /* (31) scantok ::= */ + 216, /* (32) ccons ::= CONSTRAINT nm */ + 216, /* (33) ccons ::= DEFAULT scantok term */ + 216, /* (34) ccons ::= DEFAULT LP expr RP */ + 216, /* (35) ccons ::= DEFAULT PLUS scantok term */ + 216, /* (36) ccons ::= DEFAULT MINUS scantok term */ + 216, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ + 216, /* (38) ccons ::= NOT NULL onconf */ + 216, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ + 216, /* (40) ccons ::= UNIQUE onconf */ + 216, /* (41) ccons ::= CHECK LP expr RP */ + 216, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ + 216, /* (43) ccons ::= defer_subclause */ + 216, /* (44) ccons ::= COLLATE ID|STRING */ + 225, /* (45) generated ::= LP expr RP */ + 225, /* (46) generated ::= LP expr RP ID */ + 221, /* (47) autoinc ::= */ + 221, /* (48) autoinc ::= AUTOINCR */ + 223, /* (49) refargs ::= */ + 223, /* (50) refargs ::= refargs refarg */ + 226, /* (51) refarg ::= MATCH nm */ + 226, /* (52) refarg ::= ON INSERT refact */ + 226, /* (53) refarg ::= ON DELETE refact */ + 226, /* (54) refarg ::= ON UPDATE refact */ + 227, /* (55) refact ::= SET NULL */ + 227, /* (56) refact ::= SET DEFAULT */ + 227, /* (57) refact ::= CASCADE */ + 227, /* (58) refact ::= RESTRICT */ + 227, /* (59) refact ::= NO ACTION */ + 224, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ + 224, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ + 228, /* (62) init_deferred_pred_opt ::= */ + 228, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ + 228, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ + 203, /* (65) conslist_opt ::= */ + 230, /* (66) tconscomma ::= COMMA */ + 231, /* (67) tcons ::= CONSTRAINT nm */ + 231, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ + 231, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ + 231, /* (70) tcons ::= CHECK LP expr RP onconf */ + 231, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ + 234, /* (72) defer_subclause_opt ::= */ + 219, /* (73) onconf ::= */ + 219, /* (74) onconf ::= ON CONFLICT resolvetype */ + 235, /* (75) orconf ::= */ + 235, /* (76) orconf ::= OR resolvetype */ + 236, /* (77) resolvetype ::= IGNORE */ + 236, /* (78) resolvetype ::= REPLACE */ + 191, /* (79) cmd ::= DROP TABLE ifexists fullname */ + 238, /* (80) ifexists ::= IF EXISTS */ + 238, /* (81) ifexists ::= */ + 191, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ + 191, /* (83) cmd ::= DROP VIEW ifexists fullname */ + 191, /* (84) cmd ::= select */ + 205, /* (85) select ::= WITH wqlist selectnowith */ + 205, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ + 205, /* (87) select ::= selectnowith */ + 240, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ + 243, /* (89) multiselect_op ::= UNION */ + 243, /* (90) multiselect_op ::= UNION ALL */ + 243, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ + 241, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ + 241, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ + 253, /* (94) values ::= VALUES LP nexprlist RP */ + 241, /* (95) oneselect ::= mvalues */ + 255, /* (96) mvalues ::= values COMMA LP nexprlist RP */ + 255, /* (97) mvalues ::= mvalues COMMA LP nexprlist RP */ + 244, /* (98) distinct ::= DISTINCT */ + 244, /* (99) distinct ::= ALL */ + 244, /* (100) distinct ::= */ + 256, /* (101) sclp ::= */ + 245, /* (102) selcollist ::= sclp scanpt expr scanpt as */ + 245, /* (103) selcollist ::= sclp scanpt STAR */ + 245, /* (104) selcollist ::= sclp scanpt nm DOT STAR */ + 257, /* (105) as ::= AS nm */ + 257, /* (106) as ::= */ + 246, /* (107) from ::= */ + 246, /* (108) from ::= FROM seltablist */ + 259, /* (109) stl_prefix ::= seltablist joinop */ + 259, /* (110) stl_prefix ::= */ + 258, /* (111) seltablist ::= stl_prefix nm dbnm as on_using */ + 258, /* (112) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ + 258, /* (113) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ + 258, /* (114) seltablist ::= stl_prefix LP select RP as on_using */ + 258, /* (115) seltablist ::= stl_prefix LP seltablist RP as on_using */ + 201, /* (116) dbnm ::= */ + 201, /* (117) dbnm ::= DOT nm */ + 239, /* (118) fullname ::= nm */ + 239, /* (119) fullname ::= nm DOT nm */ + 264, /* (120) xfullname ::= nm */ + 264, /* (121) xfullname ::= nm DOT nm */ + 264, /* (122) xfullname ::= nm DOT nm AS nm */ + 264, /* (123) xfullname ::= nm AS nm */ + 260, /* (124) joinop ::= COMMA|JOIN */ + 260, /* (125) joinop ::= JOIN_KW JOIN */ + 260, /* (126) joinop ::= JOIN_KW nm JOIN */ + 260, /* (127) joinop ::= JOIN_KW nm nm JOIN */ + 261, /* (128) on_using ::= ON expr */ + 261, /* (129) on_using ::= USING LP idlist RP */ + 261, /* (130) on_using ::= */ + 266, /* (131) indexed_opt ::= */ + 262, /* (132) indexed_by ::= INDEXED BY nm */ + 262, /* (133) indexed_by ::= NOT INDEXED */ + 250, /* (134) orderby_opt ::= */ + 250, /* (135) orderby_opt ::= ORDER BY sortlist */ + 232, /* (136) sortlist ::= sortlist COMMA expr sortorder nulls */ + 232, /* (137) sortlist ::= expr sortorder nulls */ + 220, /* (138) sortorder ::= ASC */ + 220, /* (139) sortorder ::= DESC */ + 220, /* (140) sortorder ::= */ + 267, /* (141) nulls ::= NULLS FIRST */ + 267, /* (142) nulls ::= NULLS LAST */ + 267, /* (143) nulls ::= */ + 248, /* (144) groupby_opt ::= */ + 248, /* (145) groupby_opt ::= GROUP BY nexprlist */ + 249, /* (146) having_opt ::= */ + 249, /* (147) having_opt ::= HAVING expr */ + 251, /* (148) limit_opt ::= */ + 251, /* (149) limit_opt ::= LIMIT expr */ + 251, /* (150) limit_opt ::= LIMIT expr OFFSET expr */ + 251, /* (151) limit_opt ::= LIMIT expr COMMA expr */ + 191, /* (152) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ + 247, /* (153) where_opt ::= */ + 247, /* (154) where_opt ::= WHERE expr */ + 269, /* (155) where_opt_ret ::= */ + 269, /* (156) where_opt_ret ::= WHERE expr */ + 269, /* (157) where_opt_ret ::= RETURNING selcollist */ + 269, /* (158) where_opt_ret ::= WHERE expr RETURNING selcollist */ + 191, /* (159) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ + 270, /* (160) setlist ::= setlist COMMA nm EQ expr */ + 270, /* (161) setlist ::= setlist COMMA LP idlist RP EQ expr */ + 270, /* (162) setlist ::= nm EQ expr */ + 270, /* (163) setlist ::= LP idlist RP EQ expr */ + 191, /* (164) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + 191, /* (165) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 273, /* (166) upsert ::= */ + 273, /* (167) upsert ::= RETURNING selcollist */ + 273, /* (168) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + 273, /* (169) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + 273, /* (170) upsert ::= ON CONFLICT DO NOTHING returning */ + 273, /* (171) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + 274, /* (172) returning ::= RETURNING selcollist */ + 271, /* (173) insert_cmd ::= INSERT orconf */ + 271, /* (174) insert_cmd ::= REPLACE */ + 272, /* (175) idlist_opt ::= */ + 272, /* (176) idlist_opt ::= LP idlist RP */ + 265, /* (177) idlist ::= idlist COMMA nm */ + 265, /* (178) idlist ::= nm */ + 218, /* (179) expr ::= LP expr RP */ + 218, /* (180) expr ::= ID|INDEXED|JOIN_KW */ + 218, /* (181) expr ::= nm DOT nm */ + 218, /* (182) expr ::= nm DOT nm DOT nm */ + 217, /* (183) term ::= NULL|FLOAT|BLOB */ + 217, /* (184) term ::= STRING */ + 217, /* (185) term ::= INTEGER */ + 218, /* (186) expr ::= VARIABLE */ + 218, /* (187) expr ::= expr COLLATE ID|STRING */ + 218, /* (188) expr ::= CAST LP expr AS typetoken RP */ + 218, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + 218, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + 218, /* (191) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + 218, /* (192) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + 218, /* (193) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + 218, /* (194) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + 217, /* (195) term ::= CTIME_KW */ + 218, /* (196) expr ::= LP nexprlist COMMA expr RP */ + 218, /* (197) expr ::= expr AND expr */ + 218, /* (198) expr ::= expr OR expr */ + 218, /* (199) expr ::= expr LT|GT|GE|LE expr */ + 218, /* (200) expr ::= expr EQ|NE expr */ + 218, /* (201) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + 218, /* (202) expr ::= expr PLUS|MINUS expr */ + 218, /* (203) expr ::= expr STAR|SLASH|REM expr */ + 218, /* (204) expr ::= expr CONCAT expr */ + 276, /* (205) likeop ::= NOT LIKE_KW|MATCH */ + 218, /* (206) expr ::= expr likeop expr */ + 218, /* (207) expr ::= expr likeop expr ESCAPE expr */ + 218, /* (208) expr ::= expr ISNULL|NOTNULL */ + 218, /* (209) expr ::= expr NOT NULL */ + 218, /* (210) expr ::= expr IS expr */ + 218, /* (211) expr ::= expr IS NOT expr */ + 218, /* (212) expr ::= expr IS NOT DISTINCT FROM expr */ + 218, /* (213) expr ::= expr IS DISTINCT FROM expr */ + 218, /* (214) expr ::= NOT expr */ + 218, /* (215) expr ::= BITNOT expr */ + 218, /* (216) expr ::= PLUS|MINUS expr */ + 218, /* (217) expr ::= expr PTR expr */ + 277, /* (218) between_op ::= BETWEEN */ + 277, /* (219) between_op ::= NOT BETWEEN */ + 218, /* (220) expr ::= expr between_op expr AND expr */ + 278, /* (221) in_op ::= IN */ + 278, /* (222) in_op ::= NOT IN */ + 218, /* (223) expr ::= expr in_op LP exprlist RP */ + 218, /* (224) expr ::= LP select RP */ + 218, /* (225) expr ::= expr in_op LP select RP */ + 218, /* (226) expr ::= expr in_op nm dbnm paren_exprlist */ + 218, /* (227) expr ::= EXISTS LP select RP */ + 218, /* (228) expr ::= CASE case_operand case_exprlist case_else END */ + 281, /* (229) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + 281, /* (230) case_exprlist ::= WHEN expr THEN expr */ + 282, /* (231) case_else ::= ELSE expr */ + 282, /* (232) case_else ::= */ + 280, /* (233) case_operand ::= */ + 263, /* (234) exprlist ::= */ + 254, /* (235) nexprlist ::= nexprlist COMMA expr */ + 254, /* (236) nexprlist ::= expr */ + 279, /* (237) paren_exprlist ::= */ + 279, /* (238) paren_exprlist ::= LP exprlist RP */ + 191, /* (239) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + 283, /* (240) uniqueflag ::= UNIQUE */ + 283, /* (241) uniqueflag ::= */ + 222, /* (242) eidlist_opt ::= */ + 222, /* (243) eidlist_opt ::= LP eidlist RP */ + 233, /* (244) eidlist ::= eidlist COMMA nm collate sortorder */ + 233, /* (245) eidlist ::= nm collate sortorder */ + 284, /* (246) collate ::= */ + 284, /* (247) collate ::= COLLATE ID|STRING */ + 191, /* (248) cmd ::= DROP INDEX ifexists fullname */ + 191, /* (249) cmd ::= VACUUM vinto */ + 191, /* (250) cmd ::= VACUUM nm vinto */ + 285, /* (251) vinto ::= INTO expr */ + 285, /* (252) vinto ::= */ + 191, /* (253) cmd ::= PRAGMA nm dbnm */ + 191, /* (254) cmd ::= PRAGMA nm dbnm EQ nmnum */ + 191, /* (255) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + 191, /* (256) cmd ::= PRAGMA nm dbnm EQ minus_num */ + 191, /* (257) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + 212, /* (258) plus_num ::= PLUS INTEGER|FLOAT */ + 213, /* (259) minus_num ::= MINUS INTEGER|FLOAT */ + 191, /* (260) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + 287, /* (261) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + 289, /* (262) trigger_time ::= BEFORE|AFTER */ + 289, /* (263) trigger_time ::= INSTEAD OF */ + 289, /* (264) trigger_time ::= */ + 290, /* (265) trigger_event ::= DELETE|INSERT */ + 290, /* (266) trigger_event ::= UPDATE */ + 290, /* (267) trigger_event ::= UPDATE OF idlist */ + 292, /* (268) when_clause ::= */ + 292, /* (269) when_clause ::= WHEN expr */ + 288, /* (270) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + 288, /* (271) trigger_cmd_list ::= trigger_cmd SEMI */ + 294, /* (272) trnm ::= nm DOT nm */ + 295, /* (273) tridxby ::= INDEXED BY nm */ + 295, /* (274) tridxby ::= NOT INDEXED */ + 293, /* (275) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + 293, /* (276) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + 293, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + 293, /* (278) trigger_cmd ::= scanpt select scanpt */ + 218, /* (279) expr ::= RAISE LP IGNORE RP */ + 218, /* (280) expr ::= RAISE LP raisetype COMMA nm RP */ + 237, /* (281) raisetype ::= ROLLBACK */ + 237, /* (282) raisetype ::= ABORT */ + 237, /* (283) raisetype ::= FAIL */ + 191, /* (284) cmd ::= DROP TRIGGER ifexists fullname */ + 191, /* (285) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + 191, /* (286) cmd ::= DETACH database_kw_opt expr */ + 297, /* (287) key_opt ::= */ + 297, /* (288) key_opt ::= KEY expr */ + 191, /* (289) cmd ::= REINDEX */ + 191, /* (290) cmd ::= REINDEX nm dbnm */ + 191, /* (291) cmd ::= ANALYZE */ + 191, /* (292) cmd ::= ANALYZE nm dbnm */ + 191, /* (293) cmd ::= ALTER TABLE fullname RENAME TO nm */ + 191, /* (294) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + 191, /* (295) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + 298, /* (296) add_column_fullname ::= fullname */ + 191, /* (297) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + 191, /* (298) cmd ::= create_vtab */ + 191, /* (299) cmd ::= create_vtab LP vtabarglist RP */ + 300, /* (300) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 302, /* (301) vtabarg ::= */ + 303, /* (302) vtabargtoken ::= ANY */ + 303, /* (303) vtabargtoken ::= lp anylist RP */ + 304, /* (304) lp ::= LP */ + 268, /* (305) with ::= WITH wqlist */ + 268, /* (306) with ::= WITH RECURSIVE wqlist */ + 307, /* (307) wqas ::= AS */ + 307, /* (308) wqas ::= AS MATERIALIZED */ + 307, /* (309) wqas ::= AS NOT MATERIALIZED */ + 306, /* (310) wqitem ::= withnm eidlist_opt wqas LP select RP */ + 308, /* (311) withnm ::= nm */ + 242, /* (312) wqlist ::= wqitem */ + 242, /* (313) wqlist ::= wqlist COMMA wqitem */ + 309, /* (314) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + 310, /* (315) windowdefn ::= nm AS LP window RP */ + 311, /* (316) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + 311, /* (317) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + 311, /* (318) window ::= ORDER BY sortlist frame_opt */ + 311, /* (319) window ::= nm ORDER BY sortlist frame_opt */ + 311, /* (320) window ::= nm frame_opt */ + 312, /* (321) frame_opt ::= */ + 312, /* (322) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + 312, /* (323) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + 316, /* (324) range_or_rows ::= RANGE|ROWS|GROUPS */ + 318, /* (325) frame_bound_s ::= frame_bound */ + 318, /* (326) frame_bound_s ::= UNBOUNDED PRECEDING */ + 319, /* (327) frame_bound_e ::= frame_bound */ + 319, /* (328) frame_bound_e ::= UNBOUNDED FOLLOWING */ + 317, /* (329) frame_bound ::= expr PRECEDING|FOLLOWING */ + 317, /* (330) frame_bound ::= CURRENT ROW */ + 320, /* (331) frame_exclude_opt ::= */ + 320, /* (332) frame_exclude_opt ::= EXCLUDE frame_exclude */ + 321, /* (333) frame_exclude ::= NO OTHERS */ + 321, /* (334) frame_exclude ::= CURRENT ROW */ + 321, /* (335) frame_exclude ::= GROUP|TIES */ + 252, /* (336) window_clause ::= WINDOW windowdefn_list */ + 275, /* (337) filter_over ::= filter_clause over_clause */ + 275, /* (338) filter_over ::= over_clause */ + 275, /* (339) filter_over ::= filter_clause */ + 315, /* (340) over_clause ::= OVER LP window RP */ + 315, /* (341) over_clause ::= OVER nm */ + 314, /* (342) filter_clause ::= FILTER LP WHERE expr RP */ + 217, /* (343) term ::= QNUMBER */ + 186, /* (344) input ::= cmdlist */ + 187, /* (345) cmdlist ::= cmdlist ecmd */ + 187, /* (346) cmdlist ::= ecmd */ + 188, /* (347) ecmd ::= SEMI */ + 188, /* (348) ecmd ::= cmdx SEMI */ + 188, /* (349) ecmd ::= explain cmdx SEMI */ + 193, /* (350) trans_opt ::= */ + 193, /* (351) trans_opt ::= TRANSACTION */ + 193, /* (352) trans_opt ::= TRANSACTION nm */ + 195, /* (353) savepoint_opt ::= SAVEPOINT */ + 195, /* (354) savepoint_opt ::= */ + 191, /* (355) cmd ::= create_table create_table_args */ + 204, /* (356) table_option_set ::= table_option */ + 202, /* (357) columnlist ::= columnlist COMMA columnname carglist */ + 202, /* (358) columnlist ::= columnname carglist */ + 194, /* (359) nm ::= ID|INDEXED|JOIN_KW */ + 194, /* (360) nm ::= STRING */ + 209, /* (361) typetoken ::= typename */ + 210, /* (362) typename ::= ID|STRING */ + 211, /* (363) signed ::= plus_num */ + 211, /* (364) signed ::= minus_num */ + 208, /* (365) carglist ::= carglist ccons */ + 208, /* (366) carglist ::= */ + 216, /* (367) ccons ::= NULL onconf */ + 216, /* (368) ccons ::= GENERATED ALWAYS AS generated */ + 216, /* (369) ccons ::= AS generated */ + 203, /* (370) conslist_opt ::= COMMA conslist */ + 229, /* (371) conslist ::= conslist tconscomma tcons */ + 229, /* (372) conslist ::= tcons */ + 230, /* (373) tconscomma ::= */ + 234, /* (374) defer_subclause_opt ::= defer_subclause */ + 236, /* (375) resolvetype ::= raisetype */ + 240, /* (376) selectnowith ::= oneselect */ + 241, /* (377) oneselect ::= values */ + 256, /* (378) sclp ::= selcollist COMMA */ + 257, /* (379) as ::= ID|STRING */ + 266, /* (380) indexed_opt ::= indexed_by */ + 274, /* (381) returning ::= */ + 218, /* (382) expr ::= term */ + 276, /* (383) likeop ::= LIKE_KW|MATCH */ + 280, /* (384) case_operand ::= expr */ + 263, /* (385) exprlist ::= nexprlist */ + 286, /* (386) nmnum ::= plus_num */ + 286, /* (387) nmnum ::= nm */ + 286, /* (388) nmnum ::= ON */ + 286, /* (389) nmnum ::= DELETE */ + 286, /* (390) nmnum ::= DEFAULT */ + 212, /* (391) plus_num ::= INTEGER|FLOAT */ + 291, /* (392) foreach_clause ::= */ + 291, /* (393) foreach_clause ::= FOR EACH ROW */ + 294, /* (394) trnm ::= nm */ + 295, /* (395) tridxby ::= */ + 296, /* (396) database_kw_opt ::= DATABASE */ + 296, /* (397) database_kw_opt ::= */ + 299, /* (398) kwcolumn_opt ::= */ + 299, /* (399) kwcolumn_opt ::= COLUMNKW */ + 301, /* (400) vtabarglist ::= vtabarg */ + 301, /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ + 302, /* (402) vtabarg ::= vtabarg vtabargtoken */ + 305, /* (403) anylist ::= */ + 305, /* (404) anylist ::= anylist LP anylist RP */ + 305, /* (405) anylist ::= anylist ANY */ + 268, /* (406) with ::= */ + 309, /* (407) windowdefn_list ::= windowdefn */ + 311, /* (408) window ::= frame_opt */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -174378,316 +175760,320 @@ static const signed char yyRuleInfoNRhs[] = { -9, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ -10, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ -4, /* (94) values ::= VALUES LP nexprlist RP */ - -5, /* (95) values ::= values COMMA LP nexprlist RP */ - -1, /* (96) distinct ::= DISTINCT */ - -1, /* (97) distinct ::= ALL */ - 0, /* (98) distinct ::= */ - 0, /* (99) sclp ::= */ - -5, /* (100) selcollist ::= sclp scanpt expr scanpt as */ - -3, /* (101) selcollist ::= sclp scanpt STAR */ - -5, /* (102) selcollist ::= sclp scanpt nm DOT STAR */ - -2, /* (103) as ::= AS nm */ - 0, /* (104) as ::= */ - 0, /* (105) from ::= */ - -2, /* (106) from ::= FROM seltablist */ - -2, /* (107) stl_prefix ::= seltablist joinop */ - 0, /* (108) stl_prefix ::= */ - -5, /* (109) seltablist ::= stl_prefix nm dbnm as on_using */ - -6, /* (110) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ - -8, /* (111) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ - -6, /* (112) seltablist ::= stl_prefix LP select RP as on_using */ - -6, /* (113) seltablist ::= stl_prefix LP seltablist RP as on_using */ - 0, /* (114) dbnm ::= */ - -2, /* (115) dbnm ::= DOT nm */ - -1, /* (116) fullname ::= nm */ - -3, /* (117) fullname ::= nm DOT nm */ - -1, /* (118) xfullname ::= nm */ - -3, /* (119) xfullname ::= nm DOT nm */ - -5, /* (120) xfullname ::= nm DOT nm AS nm */ - -3, /* (121) xfullname ::= nm AS nm */ - -1, /* (122) joinop ::= COMMA|JOIN */ - -2, /* (123) joinop ::= JOIN_KW JOIN */ - -3, /* (124) joinop ::= JOIN_KW nm JOIN */ - -4, /* (125) joinop ::= JOIN_KW nm nm JOIN */ - -2, /* (126) on_using ::= ON expr */ - -4, /* (127) on_using ::= USING LP idlist RP */ - 0, /* (128) on_using ::= */ - 0, /* (129) indexed_opt ::= */ - -3, /* (130) indexed_by ::= INDEXED BY nm */ - -2, /* (131) indexed_by ::= NOT INDEXED */ - 0, /* (132) orderby_opt ::= */ - -3, /* (133) orderby_opt ::= ORDER BY sortlist */ - -5, /* (134) sortlist ::= sortlist COMMA expr sortorder nulls */ - -3, /* (135) sortlist ::= expr sortorder nulls */ - -1, /* (136) sortorder ::= ASC */ - -1, /* (137) sortorder ::= DESC */ - 0, /* (138) sortorder ::= */ - -2, /* (139) nulls ::= NULLS FIRST */ - -2, /* (140) nulls ::= NULLS LAST */ - 0, /* (141) nulls ::= */ - 0, /* (142) groupby_opt ::= */ - -3, /* (143) groupby_opt ::= GROUP BY nexprlist */ - 0, /* (144) having_opt ::= */ - -2, /* (145) having_opt ::= HAVING expr */ - 0, /* (146) limit_opt ::= */ - -2, /* (147) limit_opt ::= LIMIT expr */ - -4, /* (148) limit_opt ::= LIMIT expr OFFSET expr */ - -4, /* (149) limit_opt ::= LIMIT expr COMMA expr */ - -8, /* (150) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ - 0, /* (151) where_opt ::= */ - -2, /* (152) where_opt ::= WHERE expr */ - 0, /* (153) where_opt_ret ::= */ - -2, /* (154) where_opt_ret ::= WHERE expr */ - -2, /* (155) where_opt_ret ::= RETURNING selcollist */ - -4, /* (156) where_opt_ret ::= WHERE expr RETURNING selcollist */ - -11, /* (157) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ - -5, /* (158) setlist ::= setlist COMMA nm EQ expr */ - -7, /* (159) setlist ::= setlist COMMA LP idlist RP EQ expr */ - -3, /* (160) setlist ::= nm EQ expr */ - -5, /* (161) setlist ::= LP idlist RP EQ expr */ - -7, /* (162) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - -8, /* (163) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ - 0, /* (164) upsert ::= */ - -2, /* (165) upsert ::= RETURNING selcollist */ - -12, /* (166) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ - -9, /* (167) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ - -5, /* (168) upsert ::= ON CONFLICT DO NOTHING returning */ - -8, /* (169) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ - -2, /* (170) returning ::= RETURNING selcollist */ - -2, /* (171) insert_cmd ::= INSERT orconf */ - -1, /* (172) insert_cmd ::= REPLACE */ - 0, /* (173) idlist_opt ::= */ - -3, /* (174) idlist_opt ::= LP idlist RP */ - -3, /* (175) idlist ::= idlist COMMA nm */ - -1, /* (176) idlist ::= nm */ - -3, /* (177) expr ::= LP expr RP */ - -1, /* (178) expr ::= ID|INDEXED|JOIN_KW */ - -3, /* (179) expr ::= nm DOT nm */ - -5, /* (180) expr ::= nm DOT nm DOT nm */ - -1, /* (181) term ::= NULL|FLOAT|BLOB */ - -1, /* (182) term ::= STRING */ - -1, /* (183) term ::= INTEGER */ - -1, /* (184) expr ::= VARIABLE */ - -3, /* (185) expr ::= expr COLLATE ID|STRING */ - -6, /* (186) expr ::= CAST LP expr AS typetoken RP */ - -5, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ - -8, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ - -4, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ - -6, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ - -9, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ - -5, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ - -1, /* (193) term ::= CTIME_KW */ - -5, /* (194) expr ::= LP nexprlist COMMA expr RP */ - -3, /* (195) expr ::= expr AND expr */ - -3, /* (196) expr ::= expr OR expr */ - -3, /* (197) expr ::= expr LT|GT|GE|LE expr */ - -3, /* (198) expr ::= expr EQ|NE expr */ - -3, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - -3, /* (200) expr ::= expr PLUS|MINUS expr */ - -3, /* (201) expr ::= expr STAR|SLASH|REM expr */ - -3, /* (202) expr ::= expr CONCAT expr */ - -2, /* (203) likeop ::= NOT LIKE_KW|MATCH */ - -3, /* (204) expr ::= expr likeop expr */ - -5, /* (205) expr ::= expr likeop expr ESCAPE expr */ - -2, /* (206) expr ::= expr ISNULL|NOTNULL */ - -3, /* (207) expr ::= expr NOT NULL */ - -3, /* (208) expr ::= expr IS expr */ - -4, /* (209) expr ::= expr IS NOT expr */ - -6, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */ - -5, /* (211) expr ::= expr IS DISTINCT FROM expr */ - -2, /* (212) expr ::= NOT expr */ - -2, /* (213) expr ::= BITNOT expr */ - -2, /* (214) expr ::= PLUS|MINUS expr */ - -3, /* (215) expr ::= expr PTR expr */ - -1, /* (216) between_op ::= BETWEEN */ - -2, /* (217) between_op ::= NOT BETWEEN */ - -5, /* (218) expr ::= expr between_op expr AND expr */ - -1, /* (219) in_op ::= IN */ - -2, /* (220) in_op ::= NOT IN */ - -5, /* (221) expr ::= expr in_op LP exprlist RP */ - -3, /* (222) expr ::= LP select RP */ - -5, /* (223) expr ::= expr in_op LP select RP */ - -5, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */ - -4, /* (225) expr ::= EXISTS LP select RP */ - -5, /* (226) expr ::= CASE case_operand case_exprlist case_else END */ - -5, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - -4, /* (228) case_exprlist ::= WHEN expr THEN expr */ - -2, /* (229) case_else ::= ELSE expr */ - 0, /* (230) case_else ::= */ - 0, /* (231) case_operand ::= */ - 0, /* (232) exprlist ::= */ - -3, /* (233) nexprlist ::= nexprlist COMMA expr */ - -1, /* (234) nexprlist ::= expr */ - 0, /* (235) paren_exprlist ::= */ - -3, /* (236) paren_exprlist ::= LP exprlist RP */ - -12, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - -1, /* (238) uniqueflag ::= UNIQUE */ - 0, /* (239) uniqueflag ::= */ - 0, /* (240) eidlist_opt ::= */ - -3, /* (241) eidlist_opt ::= LP eidlist RP */ - -5, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */ - -3, /* (243) eidlist ::= nm collate sortorder */ - 0, /* (244) collate ::= */ - -2, /* (245) collate ::= COLLATE ID|STRING */ - -4, /* (246) cmd ::= DROP INDEX ifexists fullname */ - -2, /* (247) cmd ::= VACUUM vinto */ - -3, /* (248) cmd ::= VACUUM nm vinto */ - -2, /* (249) vinto ::= INTO expr */ - 0, /* (250) vinto ::= */ - -3, /* (251) cmd ::= PRAGMA nm dbnm */ - -5, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */ - -6, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - -5, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */ - -6, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - -2, /* (256) plus_num ::= PLUS INTEGER|FLOAT */ - -2, /* (257) minus_num ::= MINUS INTEGER|FLOAT */ - -5, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - -11, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - -1, /* (260) trigger_time ::= BEFORE|AFTER */ - -2, /* (261) trigger_time ::= INSTEAD OF */ - 0, /* (262) trigger_time ::= */ - -1, /* (263) trigger_event ::= DELETE|INSERT */ - -1, /* (264) trigger_event ::= UPDATE */ - -3, /* (265) trigger_event ::= UPDATE OF idlist */ - 0, /* (266) when_clause ::= */ - -2, /* (267) when_clause ::= WHEN expr */ - -3, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - -2, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */ - -3, /* (270) trnm ::= nm DOT nm */ - -3, /* (271) tridxby ::= INDEXED BY nm */ - -2, /* (272) tridxby ::= NOT INDEXED */ - -9, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - -8, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - -6, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - -3, /* (276) trigger_cmd ::= scanpt select scanpt */ - -4, /* (277) expr ::= RAISE LP IGNORE RP */ - -6, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */ - -1, /* (279) raisetype ::= ROLLBACK */ - -1, /* (280) raisetype ::= ABORT */ - -1, /* (281) raisetype ::= FAIL */ - -4, /* (282) cmd ::= DROP TRIGGER ifexists fullname */ - -6, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - -3, /* (284) cmd ::= DETACH database_kw_opt expr */ - 0, /* (285) key_opt ::= */ - -2, /* (286) key_opt ::= KEY expr */ - -1, /* (287) cmd ::= REINDEX */ - -3, /* (288) cmd ::= REINDEX nm dbnm */ - -1, /* (289) cmd ::= ANALYZE */ - -3, /* (290) cmd ::= ANALYZE nm dbnm */ - -6, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */ - -7, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - -6, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - -1, /* (294) add_column_fullname ::= fullname */ - -8, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - -1, /* (296) cmd ::= create_vtab */ - -4, /* (297) cmd ::= create_vtab LP vtabarglist RP */ - -8, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 0, /* (299) vtabarg ::= */ - -1, /* (300) vtabargtoken ::= ANY */ - -3, /* (301) vtabargtoken ::= lp anylist RP */ - -1, /* (302) lp ::= LP */ - -2, /* (303) with ::= WITH wqlist */ - -3, /* (304) with ::= WITH RECURSIVE wqlist */ - -1, /* (305) wqas ::= AS */ - -2, /* (306) wqas ::= AS MATERIALIZED */ - -3, /* (307) wqas ::= AS NOT MATERIALIZED */ - -6, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */ - -1, /* (309) wqlist ::= wqitem */ - -3, /* (310) wqlist ::= wqlist COMMA wqitem */ - -3, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - -5, /* (312) windowdefn ::= nm AS LP window RP */ - -5, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - -6, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - -4, /* (315) window ::= ORDER BY sortlist frame_opt */ - -5, /* (316) window ::= nm ORDER BY sortlist frame_opt */ - -2, /* (317) window ::= nm frame_opt */ - 0, /* (318) frame_opt ::= */ - -3, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - -6, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - -1, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ - -1, /* (322) frame_bound_s ::= frame_bound */ - -2, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ - -1, /* (324) frame_bound_e ::= frame_bound */ - -2, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ - -2, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ - -2, /* (327) frame_bound ::= CURRENT ROW */ - 0, /* (328) frame_exclude_opt ::= */ - -2, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ - -2, /* (330) frame_exclude ::= NO OTHERS */ - -2, /* (331) frame_exclude ::= CURRENT ROW */ - -1, /* (332) frame_exclude ::= GROUP|TIES */ - -2, /* (333) window_clause ::= WINDOW windowdefn_list */ - -2, /* (334) filter_over ::= filter_clause over_clause */ - -1, /* (335) filter_over ::= over_clause */ - -1, /* (336) filter_over ::= filter_clause */ - -4, /* (337) over_clause ::= OVER LP window RP */ - -2, /* (338) over_clause ::= OVER nm */ - -5, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ - -1, /* (340) input ::= cmdlist */ - -2, /* (341) cmdlist ::= cmdlist ecmd */ - -1, /* (342) cmdlist ::= ecmd */ - -1, /* (343) ecmd ::= SEMI */ - -2, /* (344) ecmd ::= cmdx SEMI */ - -3, /* (345) ecmd ::= explain cmdx SEMI */ - 0, /* (346) trans_opt ::= */ - -1, /* (347) trans_opt ::= TRANSACTION */ - -2, /* (348) trans_opt ::= TRANSACTION nm */ - -1, /* (349) savepoint_opt ::= SAVEPOINT */ - 0, /* (350) savepoint_opt ::= */ - -2, /* (351) cmd ::= create_table create_table_args */ - -1, /* (352) table_option_set ::= table_option */ - -4, /* (353) columnlist ::= columnlist COMMA columnname carglist */ - -2, /* (354) columnlist ::= columnname carglist */ - -1, /* (355) nm ::= ID|INDEXED|JOIN_KW */ - -1, /* (356) nm ::= STRING */ - -1, /* (357) typetoken ::= typename */ - -1, /* (358) typename ::= ID|STRING */ - -1, /* (359) signed ::= plus_num */ - -1, /* (360) signed ::= minus_num */ - -2, /* (361) carglist ::= carglist ccons */ - 0, /* (362) carglist ::= */ - -2, /* (363) ccons ::= NULL onconf */ - -4, /* (364) ccons ::= GENERATED ALWAYS AS generated */ - -2, /* (365) ccons ::= AS generated */ - -2, /* (366) conslist_opt ::= COMMA conslist */ - -3, /* (367) conslist ::= conslist tconscomma tcons */ - -1, /* (368) conslist ::= tcons */ - 0, /* (369) tconscomma ::= */ - -1, /* (370) defer_subclause_opt ::= defer_subclause */ - -1, /* (371) resolvetype ::= raisetype */ - -1, /* (372) selectnowith ::= oneselect */ - -1, /* (373) oneselect ::= values */ - -2, /* (374) sclp ::= selcollist COMMA */ - -1, /* (375) as ::= ID|STRING */ - -1, /* (376) indexed_opt ::= indexed_by */ - 0, /* (377) returning ::= */ - -1, /* (378) expr ::= term */ - -1, /* (379) likeop ::= LIKE_KW|MATCH */ - -1, /* (380) case_operand ::= expr */ - -1, /* (381) exprlist ::= nexprlist */ - -1, /* (382) nmnum ::= plus_num */ - -1, /* (383) nmnum ::= nm */ - -1, /* (384) nmnum ::= ON */ - -1, /* (385) nmnum ::= DELETE */ - -1, /* (386) nmnum ::= DEFAULT */ - -1, /* (387) plus_num ::= INTEGER|FLOAT */ - 0, /* (388) foreach_clause ::= */ - -3, /* (389) foreach_clause ::= FOR EACH ROW */ - -1, /* (390) trnm ::= nm */ - 0, /* (391) tridxby ::= */ - -1, /* (392) database_kw_opt ::= DATABASE */ - 0, /* (393) database_kw_opt ::= */ - 0, /* (394) kwcolumn_opt ::= */ - -1, /* (395) kwcolumn_opt ::= COLUMNKW */ - -1, /* (396) vtabarglist ::= vtabarg */ - -3, /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ - -2, /* (398) vtabarg ::= vtabarg vtabargtoken */ - 0, /* (399) anylist ::= */ - -4, /* (400) anylist ::= anylist LP anylist RP */ - -2, /* (401) anylist ::= anylist ANY */ - 0, /* (402) with ::= */ - -1, /* (403) windowdefn_list ::= windowdefn */ - -1, /* (404) window ::= frame_opt */ + -1, /* (95) oneselect ::= mvalues */ + -5, /* (96) mvalues ::= values COMMA LP nexprlist RP */ + -5, /* (97) mvalues ::= mvalues COMMA LP nexprlist RP */ + -1, /* (98) distinct ::= DISTINCT */ + -1, /* (99) distinct ::= ALL */ + 0, /* (100) distinct ::= */ + 0, /* (101) sclp ::= */ + -5, /* (102) selcollist ::= sclp scanpt expr scanpt as */ + -3, /* (103) selcollist ::= sclp scanpt STAR */ + -5, /* (104) selcollist ::= sclp scanpt nm DOT STAR */ + -2, /* (105) as ::= AS nm */ + 0, /* (106) as ::= */ + 0, /* (107) from ::= */ + -2, /* (108) from ::= FROM seltablist */ + -2, /* (109) stl_prefix ::= seltablist joinop */ + 0, /* (110) stl_prefix ::= */ + -5, /* (111) seltablist ::= stl_prefix nm dbnm as on_using */ + -6, /* (112) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ + -8, /* (113) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ + -6, /* (114) seltablist ::= stl_prefix LP select RP as on_using */ + -6, /* (115) seltablist ::= stl_prefix LP seltablist RP as on_using */ + 0, /* (116) dbnm ::= */ + -2, /* (117) dbnm ::= DOT nm */ + -1, /* (118) fullname ::= nm */ + -3, /* (119) fullname ::= nm DOT nm */ + -1, /* (120) xfullname ::= nm */ + -3, /* (121) xfullname ::= nm DOT nm */ + -5, /* (122) xfullname ::= nm DOT nm AS nm */ + -3, /* (123) xfullname ::= nm AS nm */ + -1, /* (124) joinop ::= COMMA|JOIN */ + -2, /* (125) joinop ::= JOIN_KW JOIN */ + -3, /* (126) joinop ::= JOIN_KW nm JOIN */ + -4, /* (127) joinop ::= JOIN_KW nm nm JOIN */ + -2, /* (128) on_using ::= ON expr */ + -4, /* (129) on_using ::= USING LP idlist RP */ + 0, /* (130) on_using ::= */ + 0, /* (131) indexed_opt ::= */ + -3, /* (132) indexed_by ::= INDEXED BY nm */ + -2, /* (133) indexed_by ::= NOT INDEXED */ + 0, /* (134) orderby_opt ::= */ + -3, /* (135) orderby_opt ::= ORDER BY sortlist */ + -5, /* (136) sortlist ::= sortlist COMMA expr sortorder nulls */ + -3, /* (137) sortlist ::= expr sortorder nulls */ + -1, /* (138) sortorder ::= ASC */ + -1, /* (139) sortorder ::= DESC */ + 0, /* (140) sortorder ::= */ + -2, /* (141) nulls ::= NULLS FIRST */ + -2, /* (142) nulls ::= NULLS LAST */ + 0, /* (143) nulls ::= */ + 0, /* (144) groupby_opt ::= */ + -3, /* (145) groupby_opt ::= GROUP BY nexprlist */ + 0, /* (146) having_opt ::= */ + -2, /* (147) having_opt ::= HAVING expr */ + 0, /* (148) limit_opt ::= */ + -2, /* (149) limit_opt ::= LIMIT expr */ + -4, /* (150) limit_opt ::= LIMIT expr OFFSET expr */ + -4, /* (151) limit_opt ::= LIMIT expr COMMA expr */ + -8, /* (152) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ + 0, /* (153) where_opt ::= */ + -2, /* (154) where_opt ::= WHERE expr */ + 0, /* (155) where_opt_ret ::= */ + -2, /* (156) where_opt_ret ::= WHERE expr */ + -2, /* (157) where_opt_ret ::= RETURNING selcollist */ + -4, /* (158) where_opt_ret ::= WHERE expr RETURNING selcollist */ + -11, /* (159) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ + -5, /* (160) setlist ::= setlist COMMA nm EQ expr */ + -7, /* (161) setlist ::= setlist COMMA LP idlist RP EQ expr */ + -3, /* (162) setlist ::= nm EQ expr */ + -5, /* (163) setlist ::= LP idlist RP EQ expr */ + -7, /* (164) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + -8, /* (165) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 0, /* (166) upsert ::= */ + -2, /* (167) upsert ::= RETURNING selcollist */ + -12, /* (168) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + -9, /* (169) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + -5, /* (170) upsert ::= ON CONFLICT DO NOTHING returning */ + -8, /* (171) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + -2, /* (172) returning ::= RETURNING selcollist */ + -2, /* (173) insert_cmd ::= INSERT orconf */ + -1, /* (174) insert_cmd ::= REPLACE */ + 0, /* (175) idlist_opt ::= */ + -3, /* (176) idlist_opt ::= LP idlist RP */ + -3, /* (177) idlist ::= idlist COMMA nm */ + -1, /* (178) idlist ::= nm */ + -3, /* (179) expr ::= LP expr RP */ + -1, /* (180) expr ::= ID|INDEXED|JOIN_KW */ + -3, /* (181) expr ::= nm DOT nm */ + -5, /* (182) expr ::= nm DOT nm DOT nm */ + -1, /* (183) term ::= NULL|FLOAT|BLOB */ + -1, /* (184) term ::= STRING */ + -1, /* (185) term ::= INTEGER */ + -1, /* (186) expr ::= VARIABLE */ + -3, /* (187) expr ::= expr COLLATE ID|STRING */ + -6, /* (188) expr ::= CAST LP expr AS typetoken RP */ + -5, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + -8, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + -4, /* (191) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + -6, /* (192) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + -9, /* (193) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + -5, /* (194) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + -1, /* (195) term ::= CTIME_KW */ + -5, /* (196) expr ::= LP nexprlist COMMA expr RP */ + -3, /* (197) expr ::= expr AND expr */ + -3, /* (198) expr ::= expr OR expr */ + -3, /* (199) expr ::= expr LT|GT|GE|LE expr */ + -3, /* (200) expr ::= expr EQ|NE expr */ + -3, /* (201) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + -3, /* (202) expr ::= expr PLUS|MINUS expr */ + -3, /* (203) expr ::= expr STAR|SLASH|REM expr */ + -3, /* (204) expr ::= expr CONCAT expr */ + -2, /* (205) likeop ::= NOT LIKE_KW|MATCH */ + -3, /* (206) expr ::= expr likeop expr */ + -5, /* (207) expr ::= expr likeop expr ESCAPE expr */ + -2, /* (208) expr ::= expr ISNULL|NOTNULL */ + -3, /* (209) expr ::= expr NOT NULL */ + -3, /* (210) expr ::= expr IS expr */ + -4, /* (211) expr ::= expr IS NOT expr */ + -6, /* (212) expr ::= expr IS NOT DISTINCT FROM expr */ + -5, /* (213) expr ::= expr IS DISTINCT FROM expr */ + -2, /* (214) expr ::= NOT expr */ + -2, /* (215) expr ::= BITNOT expr */ + -2, /* (216) expr ::= PLUS|MINUS expr */ + -3, /* (217) expr ::= expr PTR expr */ + -1, /* (218) between_op ::= BETWEEN */ + -2, /* (219) between_op ::= NOT BETWEEN */ + -5, /* (220) expr ::= expr between_op expr AND expr */ + -1, /* (221) in_op ::= IN */ + -2, /* (222) in_op ::= NOT IN */ + -5, /* (223) expr ::= expr in_op LP exprlist RP */ + -3, /* (224) expr ::= LP select RP */ + -5, /* (225) expr ::= expr in_op LP select RP */ + -5, /* (226) expr ::= expr in_op nm dbnm paren_exprlist */ + -4, /* (227) expr ::= EXISTS LP select RP */ + -5, /* (228) expr ::= CASE case_operand case_exprlist case_else END */ + -5, /* (229) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + -4, /* (230) case_exprlist ::= WHEN expr THEN expr */ + -2, /* (231) case_else ::= ELSE expr */ + 0, /* (232) case_else ::= */ + 0, /* (233) case_operand ::= */ + 0, /* (234) exprlist ::= */ + -3, /* (235) nexprlist ::= nexprlist COMMA expr */ + -1, /* (236) nexprlist ::= expr */ + 0, /* (237) paren_exprlist ::= */ + -3, /* (238) paren_exprlist ::= LP exprlist RP */ + -12, /* (239) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + -1, /* (240) uniqueflag ::= UNIQUE */ + 0, /* (241) uniqueflag ::= */ + 0, /* (242) eidlist_opt ::= */ + -3, /* (243) eidlist_opt ::= LP eidlist RP */ + -5, /* (244) eidlist ::= eidlist COMMA nm collate sortorder */ + -3, /* (245) eidlist ::= nm collate sortorder */ + 0, /* (246) collate ::= */ + -2, /* (247) collate ::= COLLATE ID|STRING */ + -4, /* (248) cmd ::= DROP INDEX ifexists fullname */ + -2, /* (249) cmd ::= VACUUM vinto */ + -3, /* (250) cmd ::= VACUUM nm vinto */ + -2, /* (251) vinto ::= INTO expr */ + 0, /* (252) vinto ::= */ + -3, /* (253) cmd ::= PRAGMA nm dbnm */ + -5, /* (254) cmd ::= PRAGMA nm dbnm EQ nmnum */ + -6, /* (255) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + -5, /* (256) cmd ::= PRAGMA nm dbnm EQ minus_num */ + -6, /* (257) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + -2, /* (258) plus_num ::= PLUS INTEGER|FLOAT */ + -2, /* (259) minus_num ::= MINUS INTEGER|FLOAT */ + -5, /* (260) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + -11, /* (261) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + -1, /* (262) trigger_time ::= BEFORE|AFTER */ + -2, /* (263) trigger_time ::= INSTEAD OF */ + 0, /* (264) trigger_time ::= */ + -1, /* (265) trigger_event ::= DELETE|INSERT */ + -1, /* (266) trigger_event ::= UPDATE */ + -3, /* (267) trigger_event ::= UPDATE OF idlist */ + 0, /* (268) when_clause ::= */ + -2, /* (269) when_clause ::= WHEN expr */ + -3, /* (270) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + -2, /* (271) trigger_cmd_list ::= trigger_cmd SEMI */ + -3, /* (272) trnm ::= nm DOT nm */ + -3, /* (273) tridxby ::= INDEXED BY nm */ + -2, /* (274) tridxby ::= NOT INDEXED */ + -9, /* (275) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + -8, /* (276) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + -6, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + -3, /* (278) trigger_cmd ::= scanpt select scanpt */ + -4, /* (279) expr ::= RAISE LP IGNORE RP */ + -6, /* (280) expr ::= RAISE LP raisetype COMMA nm RP */ + -1, /* (281) raisetype ::= ROLLBACK */ + -1, /* (282) raisetype ::= ABORT */ + -1, /* (283) raisetype ::= FAIL */ + -4, /* (284) cmd ::= DROP TRIGGER ifexists fullname */ + -6, /* (285) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + -3, /* (286) cmd ::= DETACH database_kw_opt expr */ + 0, /* (287) key_opt ::= */ + -2, /* (288) key_opt ::= KEY expr */ + -1, /* (289) cmd ::= REINDEX */ + -3, /* (290) cmd ::= REINDEX nm dbnm */ + -1, /* (291) cmd ::= ANALYZE */ + -3, /* (292) cmd ::= ANALYZE nm dbnm */ + -6, /* (293) cmd ::= ALTER TABLE fullname RENAME TO nm */ + -7, /* (294) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + -6, /* (295) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + -1, /* (296) add_column_fullname ::= fullname */ + -8, /* (297) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + -1, /* (298) cmd ::= create_vtab */ + -4, /* (299) cmd ::= create_vtab LP vtabarglist RP */ + -8, /* (300) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 0, /* (301) vtabarg ::= */ + -1, /* (302) vtabargtoken ::= ANY */ + -3, /* (303) vtabargtoken ::= lp anylist RP */ + -1, /* (304) lp ::= LP */ + -2, /* (305) with ::= WITH wqlist */ + -3, /* (306) with ::= WITH RECURSIVE wqlist */ + -1, /* (307) wqas ::= AS */ + -2, /* (308) wqas ::= AS MATERIALIZED */ + -3, /* (309) wqas ::= AS NOT MATERIALIZED */ + -6, /* (310) wqitem ::= withnm eidlist_opt wqas LP select RP */ + -1, /* (311) withnm ::= nm */ + -1, /* (312) wqlist ::= wqitem */ + -3, /* (313) wqlist ::= wqlist COMMA wqitem */ + -3, /* (314) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + -5, /* (315) windowdefn ::= nm AS LP window RP */ + -5, /* (316) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + -6, /* (317) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + -4, /* (318) window ::= ORDER BY sortlist frame_opt */ + -5, /* (319) window ::= nm ORDER BY sortlist frame_opt */ + -2, /* (320) window ::= nm frame_opt */ + 0, /* (321) frame_opt ::= */ + -3, /* (322) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + -6, /* (323) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + -1, /* (324) range_or_rows ::= RANGE|ROWS|GROUPS */ + -1, /* (325) frame_bound_s ::= frame_bound */ + -2, /* (326) frame_bound_s ::= UNBOUNDED PRECEDING */ + -1, /* (327) frame_bound_e ::= frame_bound */ + -2, /* (328) frame_bound_e ::= UNBOUNDED FOLLOWING */ + -2, /* (329) frame_bound ::= expr PRECEDING|FOLLOWING */ + -2, /* (330) frame_bound ::= CURRENT ROW */ + 0, /* (331) frame_exclude_opt ::= */ + -2, /* (332) frame_exclude_opt ::= EXCLUDE frame_exclude */ + -2, /* (333) frame_exclude ::= NO OTHERS */ + -2, /* (334) frame_exclude ::= CURRENT ROW */ + -1, /* (335) frame_exclude ::= GROUP|TIES */ + -2, /* (336) window_clause ::= WINDOW windowdefn_list */ + -2, /* (337) filter_over ::= filter_clause over_clause */ + -1, /* (338) filter_over ::= over_clause */ + -1, /* (339) filter_over ::= filter_clause */ + -4, /* (340) over_clause ::= OVER LP window RP */ + -2, /* (341) over_clause ::= OVER nm */ + -5, /* (342) filter_clause ::= FILTER LP WHERE expr RP */ + -1, /* (343) term ::= QNUMBER */ + -1, /* (344) input ::= cmdlist */ + -2, /* (345) cmdlist ::= cmdlist ecmd */ + -1, /* (346) cmdlist ::= ecmd */ + -1, /* (347) ecmd ::= SEMI */ + -2, /* (348) ecmd ::= cmdx SEMI */ + -3, /* (349) ecmd ::= explain cmdx SEMI */ + 0, /* (350) trans_opt ::= */ + -1, /* (351) trans_opt ::= TRANSACTION */ + -2, /* (352) trans_opt ::= TRANSACTION nm */ + -1, /* (353) savepoint_opt ::= SAVEPOINT */ + 0, /* (354) savepoint_opt ::= */ + -2, /* (355) cmd ::= create_table create_table_args */ + -1, /* (356) table_option_set ::= table_option */ + -4, /* (357) columnlist ::= columnlist COMMA columnname carglist */ + -2, /* (358) columnlist ::= columnname carglist */ + -1, /* (359) nm ::= ID|INDEXED|JOIN_KW */ + -1, /* (360) nm ::= STRING */ + -1, /* (361) typetoken ::= typename */ + -1, /* (362) typename ::= ID|STRING */ + -1, /* (363) signed ::= plus_num */ + -1, /* (364) signed ::= minus_num */ + -2, /* (365) carglist ::= carglist ccons */ + 0, /* (366) carglist ::= */ + -2, /* (367) ccons ::= NULL onconf */ + -4, /* (368) ccons ::= GENERATED ALWAYS AS generated */ + -2, /* (369) ccons ::= AS generated */ + -2, /* (370) conslist_opt ::= COMMA conslist */ + -3, /* (371) conslist ::= conslist tconscomma tcons */ + -1, /* (372) conslist ::= tcons */ + 0, /* (373) tconscomma ::= */ + -1, /* (374) defer_subclause_opt ::= defer_subclause */ + -1, /* (375) resolvetype ::= raisetype */ + -1, /* (376) selectnowith ::= oneselect */ + -1, /* (377) oneselect ::= values */ + -2, /* (378) sclp ::= selcollist COMMA */ + -1, /* (379) as ::= ID|STRING */ + -1, /* (380) indexed_opt ::= indexed_by */ + 0, /* (381) returning ::= */ + -1, /* (382) expr ::= term */ + -1, /* (383) likeop ::= LIKE_KW|MATCH */ + -1, /* (384) case_operand ::= expr */ + -1, /* (385) exprlist ::= nexprlist */ + -1, /* (386) nmnum ::= plus_num */ + -1, /* (387) nmnum ::= nm */ + -1, /* (388) nmnum ::= ON */ + -1, /* (389) nmnum ::= DELETE */ + -1, /* (390) nmnum ::= DEFAULT */ + -1, /* (391) plus_num ::= INTEGER|FLOAT */ + 0, /* (392) foreach_clause ::= */ + -3, /* (393) foreach_clause ::= FOR EACH ROW */ + -1, /* (394) trnm ::= nm */ + 0, /* (395) tridxby ::= */ + -1, /* (396) database_kw_opt ::= DATABASE */ + 0, /* (397) database_kw_opt ::= */ + 0, /* (398) kwcolumn_opt ::= */ + -1, /* (399) kwcolumn_opt ::= COLUMNKW */ + -1, /* (400) vtabarglist ::= vtabarg */ + -3, /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ + -2, /* (402) vtabarg ::= vtabarg vtabargtoken */ + 0, /* (403) anylist ::= */ + -4, /* (404) anylist ::= anylist LP anylist RP */ + -2, /* (405) anylist ::= anylist ANY */ + 0, /* (406) with ::= */ + -1, /* (407) windowdefn_list ::= windowdefn */ + -1, /* (408) window ::= frame_opt */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -174739,16 +176125,16 @@ static YYACTIONTYPE yy_reduce( { sqlite3FinishCoding(pParse); } break; case 3: /* cmd ::= BEGIN transtype trans_opt */ -{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy394);} +{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy144);} break; case 4: /* transtype ::= */ -{yymsp[1].minor.yy394 = TK_DEFERRED;} +{yymsp[1].minor.yy144 = TK_DEFERRED;} break; case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); - case 321: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==321); -{yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/} + case 324: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==324); +{yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT|END trans_opt */ case 9: /* cmd ::= ROLLBACK trans_opt */ yytestcase(yyruleno==9); @@ -174771,7 +176157,7 @@ static YYACTIONTYPE yy_reduce( break; case 13: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ { - sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy394,0,0,yymsp[-2].minor.yy394); + sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy144,0,0,yymsp[-2].minor.yy144); } break; case 14: /* createkw ::= CREATE */ @@ -174783,40 +176169,40 @@ static YYACTIONTYPE yy_reduce( case 62: /* init_deferred_pred_opt ::= */ yytestcase(yyruleno==62); case 72: /* defer_subclause_opt ::= */ yytestcase(yyruleno==72); case 81: /* ifexists ::= */ yytestcase(yyruleno==81); - case 98: /* distinct ::= */ yytestcase(yyruleno==98); - case 244: /* collate ::= */ yytestcase(yyruleno==244); -{yymsp[1].minor.yy394 = 0;} + case 100: /* distinct ::= */ yytestcase(yyruleno==100); + case 246: /* collate ::= */ yytestcase(yyruleno==246); +{yymsp[1].minor.yy144 = 0;} break; case 16: /* ifnotexists ::= IF NOT EXISTS */ -{yymsp[-2].minor.yy394 = 1;} +{yymsp[-2].minor.yy144 = 1;} break; case 17: /* temp ::= TEMP */ -{yymsp[0].minor.yy394 = pParse->db->init.busy==0;} +{yymsp[0].minor.yy144 = pParse->db->init.busy==0;} break; case 19: /* create_table_args ::= LP columnlist conslist_opt RP table_option_set */ { - sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy285,0); + sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy391,0); } break; case 20: /* create_table_args ::= AS select */ { - sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy47); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy47); + sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy555); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy555); } break; case 21: /* table_option_set ::= */ -{yymsp[1].minor.yy285 = 0;} +{yymsp[1].minor.yy391 = 0;} break; case 22: /* table_option_set ::= table_option_set COMMA table_option */ -{yylhsminor.yy285 = yymsp[-2].minor.yy285|yymsp[0].minor.yy285;} - yymsp[-2].minor.yy285 = yylhsminor.yy285; +{yylhsminor.yy391 = yymsp[-2].minor.yy391|yymsp[0].minor.yy391;} + yymsp[-2].minor.yy391 = yylhsminor.yy391; break; case 23: /* table_option ::= WITHOUT nm */ { if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ - yymsp[-1].minor.yy285 = TF_WithoutRowid | TF_NoVisibleRowid; + yymsp[-1].minor.yy391 = TF_WithoutRowid | TF_NoVisibleRowid; }else{ - yymsp[-1].minor.yy285 = 0; + yymsp[-1].minor.yy391 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } @@ -174824,20 +176210,20 @@ static YYACTIONTYPE yy_reduce( case 24: /* table_option ::= nm */ { if( yymsp[0].minor.yy0.n==6 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"strict",6)==0 ){ - yylhsminor.yy285 = TF_Strict; + yylhsminor.yy391 = TF_Strict; }else{ - yylhsminor.yy285 = 0; + yylhsminor.yy391 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } - yymsp[0].minor.yy285 = yylhsminor.yy285; + yymsp[0].minor.yy391 = yylhsminor.yy391; break; case 25: /* columnname ::= nm typetoken */ {sqlite3AddColumn(pParse,yymsp[-1].minor.yy0,yymsp[0].minor.yy0);} break; case 26: /* typetoken ::= */ case 65: /* conslist_opt ::= */ yytestcase(yyruleno==65); - case 104: /* as ::= */ yytestcase(yyruleno==104); + case 106: /* as ::= */ yytestcase(yyruleno==106); {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = 0;} break; case 27: /* typetoken ::= typename LP signed RP */ @@ -174856,7 +176242,7 @@ static YYACTIONTYPE yy_reduce( case 30: /* scanpt ::= */ { assert( yyLookahead!=YYNOCODE ); - yymsp[1].minor.yy522 = yyLookaheadToken.z; + yymsp[1].minor.yy168 = yyLookaheadToken.z; } break; case 31: /* scantok ::= */ @@ -174870,17 +176256,17 @@ static YYACTIONTYPE yy_reduce( {pParse->constraintName = yymsp[0].minor.yy0;} break; case 33: /* ccons ::= DEFAULT scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy528,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy454,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 34: /* ccons ::= DEFAULT LP expr RP */ -{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy528,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} +{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy454,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} break; case 35: /* ccons ::= DEFAULT PLUS scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy528,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy454,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 36: /* ccons ::= DEFAULT MINUS scantok term */ { - Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy528, 0); + Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy454, 0); sqlite3AddDefaultValue(pParse,p,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]); } break; @@ -174895,151 +176281,151 @@ static YYACTIONTYPE yy_reduce( } break; case 38: /* ccons ::= NOT NULL onconf */ -{sqlite3AddNotNull(pParse, yymsp[0].minor.yy394);} +{sqlite3AddNotNull(pParse, yymsp[0].minor.yy144);} break; case 39: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ -{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy394,yymsp[0].minor.yy394,yymsp[-2].minor.yy394);} +{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy144,yymsp[0].minor.yy144,yymsp[-2].minor.yy144);} break; case 40: /* ccons ::= UNIQUE onconf */ -{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy394,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy144,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 41: /* ccons ::= CHECK LP expr RP */ -{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy528,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy454,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} break; case 42: /* ccons ::= REFERENCES nm eidlist_opt refargs */ -{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy322,yymsp[0].minor.yy394);} +{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy14,yymsp[0].minor.yy144);} break; case 43: /* ccons ::= defer_subclause */ -{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy394);} +{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy144);} break; case 44: /* ccons ::= COLLATE ID|STRING */ {sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} break; case 45: /* generated ::= LP expr RP */ -{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy528,0);} +{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy454,0);} break; case 46: /* generated ::= LP expr RP ID */ -{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy528,&yymsp[0].minor.yy0);} +{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy454,&yymsp[0].minor.yy0);} break; case 48: /* autoinc ::= AUTOINCR */ -{yymsp[0].minor.yy394 = 1;} +{yymsp[0].minor.yy144 = 1;} break; case 49: /* refargs ::= */ -{ yymsp[1].minor.yy394 = OE_None*0x0101; /* EV: R-19803-45884 */} +{ yymsp[1].minor.yy144 = OE_None*0x0101; /* EV: R-19803-45884 */} break; case 50: /* refargs ::= refargs refarg */ -{ yymsp[-1].minor.yy394 = (yymsp[-1].minor.yy394 & ~yymsp[0].minor.yy231.mask) | yymsp[0].minor.yy231.value; } +{ yymsp[-1].minor.yy144 = (yymsp[-1].minor.yy144 & ~yymsp[0].minor.yy383.mask) | yymsp[0].minor.yy383.value; } break; case 51: /* refarg ::= MATCH nm */ -{ yymsp[-1].minor.yy231.value = 0; yymsp[-1].minor.yy231.mask = 0x000000; } +{ yymsp[-1].minor.yy383.value = 0; yymsp[-1].minor.yy383.mask = 0x000000; } break; case 52: /* refarg ::= ON INSERT refact */ -{ yymsp[-2].minor.yy231.value = 0; yymsp[-2].minor.yy231.mask = 0x000000; } +{ yymsp[-2].minor.yy383.value = 0; yymsp[-2].minor.yy383.mask = 0x000000; } break; case 53: /* refarg ::= ON DELETE refact */ -{ yymsp[-2].minor.yy231.value = yymsp[0].minor.yy394; yymsp[-2].minor.yy231.mask = 0x0000ff; } +{ yymsp[-2].minor.yy383.value = yymsp[0].minor.yy144; yymsp[-2].minor.yy383.mask = 0x0000ff; } break; case 54: /* refarg ::= ON UPDATE refact */ -{ yymsp[-2].minor.yy231.value = yymsp[0].minor.yy394<<8; yymsp[-2].minor.yy231.mask = 0x00ff00; } +{ yymsp[-2].minor.yy383.value = yymsp[0].minor.yy144<<8; yymsp[-2].minor.yy383.mask = 0x00ff00; } break; case 55: /* refact ::= SET NULL */ -{ yymsp[-1].minor.yy394 = OE_SetNull; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy144 = OE_SetNull; /* EV: R-33326-45252 */} break; case 56: /* refact ::= SET DEFAULT */ -{ yymsp[-1].minor.yy394 = OE_SetDflt; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy144 = OE_SetDflt; /* EV: R-33326-45252 */} break; case 57: /* refact ::= CASCADE */ -{ yymsp[0].minor.yy394 = OE_Cascade; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy144 = OE_Cascade; /* EV: R-33326-45252 */} break; case 58: /* refact ::= RESTRICT */ -{ yymsp[0].minor.yy394 = OE_Restrict; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy144 = OE_Restrict; /* EV: R-33326-45252 */} break; case 59: /* refact ::= NO ACTION */ -{ yymsp[-1].minor.yy394 = OE_None; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy144 = OE_None; /* EV: R-33326-45252 */} break; case 60: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ -{yymsp[-2].minor.yy394 = 0;} +{yymsp[-2].minor.yy144 = 0;} break; case 61: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ case 76: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==76); - case 171: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==171); -{yymsp[-1].minor.yy394 = yymsp[0].minor.yy394;} + case 173: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==173); +{yymsp[-1].minor.yy144 = yymsp[0].minor.yy144;} break; case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80); - case 217: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==217); - case 220: /* in_op ::= NOT IN */ yytestcase(yyruleno==220); - case 245: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==245); -{yymsp[-1].minor.yy394 = 1;} + case 219: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==219); + case 222: /* in_op ::= NOT IN */ yytestcase(yyruleno==222); + case 247: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==247); +{yymsp[-1].minor.yy144 = 1;} break; case 64: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ -{yymsp[-1].minor.yy394 = 0;} +{yymsp[-1].minor.yy144 = 0;} break; case 66: /* tconscomma ::= COMMA */ {pParse->constraintName.n = 0;} break; case 68: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ -{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy322,yymsp[0].minor.yy394,yymsp[-2].minor.yy394,0);} +{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy14,yymsp[0].minor.yy144,yymsp[-2].minor.yy144,0);} break; case 69: /* tcons ::= UNIQUE LP sortlist RP onconf */ -{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy322,yymsp[0].minor.yy394,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy14,yymsp[0].minor.yy144,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 70: /* tcons ::= CHECK LP expr RP onconf */ -{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy528,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy454,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} break; case 71: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ { - sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy322, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy322, yymsp[-1].minor.yy394); - sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy394); + sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy14, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[-1].minor.yy144); + sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy144); } break; case 73: /* onconf ::= */ case 75: /* orconf ::= */ yytestcase(yyruleno==75); -{yymsp[1].minor.yy394 = OE_Default;} +{yymsp[1].minor.yy144 = OE_Default;} break; case 74: /* onconf ::= ON CONFLICT resolvetype */ -{yymsp[-2].minor.yy394 = yymsp[0].minor.yy394;} +{yymsp[-2].minor.yy144 = yymsp[0].minor.yy144;} break; case 77: /* resolvetype ::= IGNORE */ -{yymsp[0].minor.yy394 = OE_Ignore;} +{yymsp[0].minor.yy144 = OE_Ignore;} break; case 78: /* resolvetype ::= REPLACE */ - case 172: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==172); -{yymsp[0].minor.yy394 = OE_Replace;} + case 174: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==174); +{yymsp[0].minor.yy144 = OE_Replace;} break; case 79: /* cmd ::= DROP TABLE ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy131, 0, yymsp[-1].minor.yy394); + sqlite3DropTable(pParse, yymsp[0].minor.yy203, 0, yymsp[-1].minor.yy144); } break; case 82: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ { - sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy322, yymsp[0].minor.yy47, yymsp[-7].minor.yy394, yymsp[-5].minor.yy394); + sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[0].minor.yy555, yymsp[-7].minor.yy144, yymsp[-5].minor.yy144); } break; case 83: /* cmd ::= DROP VIEW ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy131, 1, yymsp[-1].minor.yy394); + sqlite3DropTable(pParse, yymsp[0].minor.yy203, 1, yymsp[-1].minor.yy144); } break; case 84: /* cmd ::= select */ { SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0, 0}; - sqlite3Select(pParse, yymsp[0].minor.yy47, &dest); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy47); + sqlite3Select(pParse, yymsp[0].minor.yy555, &dest); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy555); } break; case 85: /* select ::= WITH wqlist selectnowith */ -{yymsp[-2].minor.yy47 = attachWithToSelect(pParse,yymsp[0].minor.yy47,yymsp[-1].minor.yy521);} +{yymsp[-2].minor.yy555 = attachWithToSelect(pParse,yymsp[0].minor.yy555,yymsp[-1].minor.yy59);} break; case 86: /* select ::= WITH RECURSIVE wqlist selectnowith */ -{yymsp[-3].minor.yy47 = attachWithToSelect(pParse,yymsp[0].minor.yy47,yymsp[-1].minor.yy521);} +{yymsp[-3].minor.yy555 = attachWithToSelect(pParse,yymsp[0].minor.yy555,yymsp[-1].minor.yy59);} break; case 87: /* select ::= selectnowith */ { - Select *p = yymsp[0].minor.yy47; + Select *p = yymsp[0].minor.yy555; if( p ){ parserDoubleLinkSelect(pParse, p); } @@ -175047,8 +176433,8 @@ static YYACTIONTYPE yy_reduce( break; case 88: /* selectnowith ::= selectnowith multiselect_op oneselect */ { - Select *pRhs = yymsp[0].minor.yy47; - Select *pLhs = yymsp[-2].minor.yy47; + Select *pRhs = yymsp[0].minor.yy555; + Select *pLhs = yymsp[-2].minor.yy555; if( pRhs && pRhs->pPrior ){ SrcList *pFrom; Token x; @@ -175058,148 +176444,145 @@ static YYACTIONTYPE yy_reduce( pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0); } if( pRhs ){ - pRhs->op = (u8)yymsp[-1].minor.yy394; + pRhs->op = (u8)yymsp[-1].minor.yy144; pRhs->pPrior = pLhs; if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; pRhs->selFlags &= ~SF_MultiValue; - if( yymsp[-1].minor.yy394!=TK_ALL ) pParse->hasCompound = 1; + if( yymsp[-1].minor.yy144!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); } - yymsp[-2].minor.yy47 = pRhs; + yymsp[-2].minor.yy555 = pRhs; } break; case 89: /* multiselect_op ::= UNION */ case 91: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==91); -{yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-OP*/} +{yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-OP*/} break; case 90: /* multiselect_op ::= UNION ALL */ -{yymsp[-1].minor.yy394 = TK_ALL;} +{yymsp[-1].minor.yy144 = TK_ALL;} break; case 92: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ { - yymsp[-8].minor.yy47 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy322,yymsp[-5].minor.yy131,yymsp[-4].minor.yy528,yymsp[-3].minor.yy322,yymsp[-2].minor.yy528,yymsp[-1].minor.yy322,yymsp[-7].minor.yy394,yymsp[0].minor.yy528); + yymsp[-8].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy14,yymsp[-5].minor.yy203,yymsp[-4].minor.yy454,yymsp[-3].minor.yy14,yymsp[-2].minor.yy454,yymsp[-1].minor.yy14,yymsp[-7].minor.yy144,yymsp[0].minor.yy454); } break; case 93: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ { - yymsp[-9].minor.yy47 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy322,yymsp[-6].minor.yy131,yymsp[-5].minor.yy528,yymsp[-4].minor.yy322,yymsp[-3].minor.yy528,yymsp[-1].minor.yy322,yymsp[-8].minor.yy394,yymsp[0].minor.yy528); - if( yymsp[-9].minor.yy47 ){ - yymsp[-9].minor.yy47->pWinDefn = yymsp[-2].minor.yy41; + yymsp[-9].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy14,yymsp[-6].minor.yy203,yymsp[-5].minor.yy454,yymsp[-4].minor.yy14,yymsp[-3].minor.yy454,yymsp[-1].minor.yy14,yymsp[-8].minor.yy144,yymsp[0].minor.yy454); + if( yymsp[-9].minor.yy555 ){ + yymsp[-9].minor.yy555->pWinDefn = yymsp[-2].minor.yy211; }else{ - sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy41); + sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy211); } } break; case 94: /* values ::= VALUES LP nexprlist RP */ { - yymsp[-3].minor.yy47 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy322,0,0,0,0,0,SF_Values,0); + yymsp[-3].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy14,0,0,0,0,0,SF_Values,0); } break; - case 95: /* values ::= values COMMA LP nexprlist RP */ + case 95: /* oneselect ::= mvalues */ { - Select *pRight, *pLeft = yymsp[-4].minor.yy47; - pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy322,0,0,0,0,0,SF_Values|SF_MultiValue,0); - if( ALWAYS(pLeft) ) pLeft->selFlags &= ~SF_MultiValue; - if( pRight ){ - pRight->op = TK_ALL; - pRight->pPrior = pLeft; - yymsp[-4].minor.yy47 = pRight; - }else{ - yymsp[-4].minor.yy47 = pLeft; - } + sqlite3MultiValuesEnd(pParse, yymsp[0].minor.yy555); +} + break; + case 96: /* mvalues ::= values COMMA LP nexprlist RP */ + case 97: /* mvalues ::= mvalues COMMA LP nexprlist RP */ yytestcase(yyruleno==97); +{ + yymsp[-4].minor.yy555 = sqlite3MultiValues(pParse, yymsp[-4].minor.yy555, yymsp[-1].minor.yy14); } break; - case 96: /* distinct ::= DISTINCT */ -{yymsp[0].minor.yy394 = SF_Distinct;} + case 98: /* distinct ::= DISTINCT */ +{yymsp[0].minor.yy144 = SF_Distinct;} break; - case 97: /* distinct ::= ALL */ -{yymsp[0].minor.yy394 = SF_All;} + case 99: /* distinct ::= ALL */ +{yymsp[0].minor.yy144 = SF_All;} break; - case 99: /* sclp ::= */ - case 132: /* orderby_opt ::= */ yytestcase(yyruleno==132); - case 142: /* groupby_opt ::= */ yytestcase(yyruleno==142); - case 232: /* exprlist ::= */ yytestcase(yyruleno==232); - case 235: /* paren_exprlist ::= */ yytestcase(yyruleno==235); - case 240: /* eidlist_opt ::= */ yytestcase(yyruleno==240); -{yymsp[1].minor.yy322 = 0;} + case 101: /* sclp ::= */ + case 134: /* orderby_opt ::= */ yytestcase(yyruleno==134); + case 144: /* groupby_opt ::= */ yytestcase(yyruleno==144); + case 234: /* exprlist ::= */ yytestcase(yyruleno==234); + case 237: /* paren_exprlist ::= */ yytestcase(yyruleno==237); + case 242: /* eidlist_opt ::= */ yytestcase(yyruleno==242); +{yymsp[1].minor.yy14 = 0;} break; - case 100: /* selcollist ::= sclp scanpt expr scanpt as */ + case 102: /* selcollist ::= sclp scanpt expr scanpt as */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy322, yymsp[-2].minor.yy528); - if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy322, &yymsp[0].minor.yy0, 1); - sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy322,yymsp[-3].minor.yy522,yymsp[-1].minor.yy522); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[-2].minor.yy454); + if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy14, &yymsp[0].minor.yy0, 1); + sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy14,yymsp[-3].minor.yy168,yymsp[-1].minor.yy168); } break; - case 101: /* selcollist ::= sclp scanpt STAR */ + case 103: /* selcollist ::= sclp scanpt STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); sqlite3ExprSetErrorOffset(p, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); - yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy322, p); + yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy14, p); } break; - case 102: /* selcollist ::= sclp scanpt nm DOT STAR */ + case 104: /* selcollist ::= sclp scanpt nm DOT STAR */ { Expr *pRight, *pLeft, *pDot; pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0); sqlite3ExprSetErrorOffset(pRight, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0); pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, pDot); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, pDot); } break; - case 103: /* as ::= AS nm */ - case 115: /* dbnm ::= DOT nm */ yytestcase(yyruleno==115); - case 256: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==256); - case 257: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==257); + case 105: /* as ::= AS nm */ + case 117: /* dbnm ::= DOT nm */ yytestcase(yyruleno==117); + case 258: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==258); + case 259: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==259); {yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;} break; - case 105: /* from ::= */ - case 108: /* stl_prefix ::= */ yytestcase(yyruleno==108); -{yymsp[1].minor.yy131 = 0;} + case 107: /* from ::= */ + case 110: /* stl_prefix ::= */ yytestcase(yyruleno==110); +{yymsp[1].minor.yy203 = 0;} break; - case 106: /* from ::= FROM seltablist */ + case 108: /* from ::= FROM seltablist */ { - yymsp[-1].minor.yy131 = yymsp[0].minor.yy131; - sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy131); + yymsp[-1].minor.yy203 = yymsp[0].minor.yy203; + sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy203); } break; - case 107: /* stl_prefix ::= seltablist joinop */ + case 109: /* stl_prefix ::= seltablist joinop */ { - if( ALWAYS(yymsp[-1].minor.yy131 && yymsp[-1].minor.yy131->nSrc>0) ) yymsp[-1].minor.yy131->a[yymsp[-1].minor.yy131->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy394; + if( ALWAYS(yymsp[-1].minor.yy203 && yymsp[-1].minor.yy203->nSrc>0) ) yymsp[-1].minor.yy203->a[yymsp[-1].minor.yy203->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy144; } break; - case 109: /* seltablist ::= stl_prefix nm dbnm as on_using */ + case 111: /* seltablist ::= stl_prefix nm dbnm as on_using */ { - yymsp[-4].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy131,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561); + yymsp[-4].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy203,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); } break; - case 110: /* seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ + case 112: /* seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ { - yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy561); - sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy131, &yymsp[-1].minor.yy0); + yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy269); + sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy203, &yymsp[-1].minor.yy0); } break; - case 111: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ + case 113: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ { - yymsp[-7].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy131,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561); - sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy131, yymsp[-3].minor.yy322); + yymsp[-7].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy203,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); + sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy203, yymsp[-3].minor.yy14); } break; - case 112: /* seltablist ::= stl_prefix LP select RP as on_using */ + case 114: /* seltablist ::= stl_prefix LP select RP as on_using */ { - yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy47,&yymsp[0].minor.yy561); + yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy555,&yymsp[0].minor.yy269); } break; - case 113: /* seltablist ::= stl_prefix LP seltablist RP as on_using */ + case 115: /* seltablist ::= stl_prefix LP seltablist RP as on_using */ { - if( yymsp[-5].minor.yy131==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy561.pOn==0 && yymsp[0].minor.yy561.pUsing==0 ){ - yymsp[-5].minor.yy131 = yymsp[-3].minor.yy131; - }else if( ALWAYS(yymsp[-3].minor.yy131!=0) && yymsp[-3].minor.yy131->nSrc==1 ){ - yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561); - if( yymsp[-5].minor.yy131 ){ - SrcItem *pNew = &yymsp[-5].minor.yy131->a[yymsp[-5].minor.yy131->nSrc-1]; - SrcItem *pOld = yymsp[-3].minor.yy131->a; + if( yymsp[-5].minor.yy203==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy269.pOn==0 && yymsp[0].minor.yy269.pUsing==0 ){ + yymsp[-5].minor.yy203 = yymsp[-3].minor.yy203; + }else if( ALWAYS(yymsp[-3].minor.yy203!=0) && yymsp[-3].minor.yy203->nSrc==1 ){ + yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); + if( yymsp[-5].minor.yy203 ){ + SrcItem *pNew = &yymsp[-5].minor.yy203->a[yymsp[-5].minor.yy203->nSrc-1]; + SrcItem *pOld = yymsp[-3].minor.yy203->a; pNew->zName = pOld->zName; pNew->zDatabase = pOld->zDatabase; pNew->pSelect = pOld->pSelect; @@ -175215,159 +176598,159 @@ static YYACTIONTYPE yy_reduce( pOld->zName = pOld->zDatabase = 0; pOld->pSelect = 0; } - sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy131); + sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy203); }else{ Select *pSubquery; - sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy131); - pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy131,0,0,0,0,SF_NestedFrom,0); - yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy561); + sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy203); + pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy203,0,0,0,0,SF_NestedFrom,0); + yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy269); } } break; - case 114: /* dbnm ::= */ - case 129: /* indexed_opt ::= */ yytestcase(yyruleno==129); + case 116: /* dbnm ::= */ + case 131: /* indexed_opt ::= */ yytestcase(yyruleno==131); {yymsp[1].minor.yy0.z=0; yymsp[1].minor.yy0.n=0;} break; - case 116: /* fullname ::= nm */ + case 118: /* fullname ::= nm */ { - yylhsminor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); - if( IN_RENAME_OBJECT && yylhsminor.yy131 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy131->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); + if( IN_RENAME_OBJECT && yylhsminor.yy203 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy203->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy131 = yylhsminor.yy131; + yymsp[0].minor.yy203 = yylhsminor.yy203; break; - case 117: /* fullname ::= nm DOT nm */ + case 119: /* fullname ::= nm DOT nm */ { - yylhsminor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); - if( IN_RENAME_OBJECT && yylhsminor.yy131 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy131->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); + if( IN_RENAME_OBJECT && yylhsminor.yy203 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy203->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy131 = yylhsminor.yy131; + yymsp[-2].minor.yy203 = yylhsminor.yy203; break; - case 118: /* xfullname ::= nm */ -{yymsp[0].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} + case 120: /* xfullname ::= nm */ +{yymsp[0].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} break; - case 119: /* xfullname ::= nm DOT nm */ -{yymsp[-2].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 121: /* xfullname ::= nm DOT nm */ +{yymsp[-2].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 120: /* xfullname ::= nm DOT nm AS nm */ + case 122: /* xfullname ::= nm DOT nm AS nm */ { - yymsp[-4].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ - if( yymsp[-4].minor.yy131 ) yymsp[-4].minor.yy131->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-4].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ + if( yymsp[-4].minor.yy203 ) yymsp[-4].minor.yy203->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; - case 121: /* xfullname ::= nm AS nm */ + case 123: /* xfullname ::= nm AS nm */ { - yymsp[-2].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ - if( yymsp[-2].minor.yy131 ) yymsp[-2].minor.yy131->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-2].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ + if( yymsp[-2].minor.yy203 ) yymsp[-2].minor.yy203->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; - case 122: /* joinop ::= COMMA|JOIN */ -{ yymsp[0].minor.yy394 = JT_INNER; } + case 124: /* joinop ::= COMMA|JOIN */ +{ yymsp[0].minor.yy144 = JT_INNER; } break; - case 123: /* joinop ::= JOIN_KW JOIN */ -{yymsp[-1].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} + case 125: /* joinop ::= JOIN_KW JOIN */ +{yymsp[-1].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} break; - case 124: /* joinop ::= JOIN_KW nm JOIN */ -{yymsp[-2].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} + case 126: /* joinop ::= JOIN_KW nm JOIN */ +{yymsp[-2].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} break; - case 125: /* joinop ::= JOIN_KW nm nm JOIN */ -{yymsp[-3].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} + case 127: /* joinop ::= JOIN_KW nm nm JOIN */ +{yymsp[-3].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} break; - case 126: /* on_using ::= ON expr */ -{yymsp[-1].minor.yy561.pOn = yymsp[0].minor.yy528; yymsp[-1].minor.yy561.pUsing = 0;} + case 128: /* on_using ::= ON expr */ +{yymsp[-1].minor.yy269.pOn = yymsp[0].minor.yy454; yymsp[-1].minor.yy269.pUsing = 0;} break; - case 127: /* on_using ::= USING LP idlist RP */ -{yymsp[-3].minor.yy561.pOn = 0; yymsp[-3].minor.yy561.pUsing = yymsp[-1].minor.yy254;} + case 129: /* on_using ::= USING LP idlist RP */ +{yymsp[-3].minor.yy269.pOn = 0; yymsp[-3].minor.yy269.pUsing = yymsp[-1].minor.yy132;} break; - case 128: /* on_using ::= */ -{yymsp[1].minor.yy561.pOn = 0; yymsp[1].minor.yy561.pUsing = 0;} + case 130: /* on_using ::= */ +{yymsp[1].minor.yy269.pOn = 0; yymsp[1].minor.yy269.pUsing = 0;} break; - case 130: /* indexed_by ::= INDEXED BY nm */ + case 132: /* indexed_by ::= INDEXED BY nm */ {yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;} break; - case 131: /* indexed_by ::= NOT INDEXED */ + case 133: /* indexed_by ::= NOT INDEXED */ {yymsp[-1].minor.yy0.z=0; yymsp[-1].minor.yy0.n=1;} break; - case 133: /* orderby_opt ::= ORDER BY sortlist */ - case 143: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==143); -{yymsp[-2].minor.yy322 = yymsp[0].minor.yy322;} + case 135: /* orderby_opt ::= ORDER BY sortlist */ + case 145: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==145); +{yymsp[-2].minor.yy14 = yymsp[0].minor.yy14;} break; - case 134: /* sortlist ::= sortlist COMMA expr sortorder nulls */ + case 136: /* sortlist ::= sortlist COMMA expr sortorder nulls */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322,yymsp[-2].minor.yy528); - sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy322,yymsp[-1].minor.yy394,yymsp[0].minor.yy394); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14,yymsp[-2].minor.yy454); + sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy14,yymsp[-1].minor.yy144,yymsp[0].minor.yy144); } break; - case 135: /* sortlist ::= expr sortorder nulls */ + case 137: /* sortlist ::= expr sortorder nulls */ { - yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy528); /*A-overwrites-Y*/ - sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy322,yymsp[-1].minor.yy394,yymsp[0].minor.yy394); + yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy454); /*A-overwrites-Y*/ + sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy14,yymsp[-1].minor.yy144,yymsp[0].minor.yy144); } break; - case 136: /* sortorder ::= ASC */ -{yymsp[0].minor.yy394 = SQLITE_SO_ASC;} + case 138: /* sortorder ::= ASC */ +{yymsp[0].minor.yy144 = SQLITE_SO_ASC;} break; - case 137: /* sortorder ::= DESC */ -{yymsp[0].minor.yy394 = SQLITE_SO_DESC;} + case 139: /* sortorder ::= DESC */ +{yymsp[0].minor.yy144 = SQLITE_SO_DESC;} break; - case 138: /* sortorder ::= */ - case 141: /* nulls ::= */ yytestcase(yyruleno==141); -{yymsp[1].minor.yy394 = SQLITE_SO_UNDEFINED;} + case 140: /* sortorder ::= */ + case 143: /* nulls ::= */ yytestcase(yyruleno==143); +{yymsp[1].minor.yy144 = SQLITE_SO_UNDEFINED;} break; - case 139: /* nulls ::= NULLS FIRST */ -{yymsp[-1].minor.yy394 = SQLITE_SO_ASC;} + case 141: /* nulls ::= NULLS FIRST */ +{yymsp[-1].minor.yy144 = SQLITE_SO_ASC;} break; - case 140: /* nulls ::= NULLS LAST */ -{yymsp[-1].minor.yy394 = SQLITE_SO_DESC;} + case 142: /* nulls ::= NULLS LAST */ +{yymsp[-1].minor.yy144 = SQLITE_SO_DESC;} break; - case 144: /* having_opt ::= */ - case 146: /* limit_opt ::= */ yytestcase(yyruleno==146); - case 151: /* where_opt ::= */ yytestcase(yyruleno==151); - case 153: /* where_opt_ret ::= */ yytestcase(yyruleno==153); - case 230: /* case_else ::= */ yytestcase(yyruleno==230); - case 231: /* case_operand ::= */ yytestcase(yyruleno==231); - case 250: /* vinto ::= */ yytestcase(yyruleno==250); -{yymsp[1].minor.yy528 = 0;} + case 146: /* having_opt ::= */ + case 148: /* limit_opt ::= */ yytestcase(yyruleno==148); + case 153: /* where_opt ::= */ yytestcase(yyruleno==153); + case 155: /* where_opt_ret ::= */ yytestcase(yyruleno==155); + case 232: /* case_else ::= */ yytestcase(yyruleno==232); + case 233: /* case_operand ::= */ yytestcase(yyruleno==233); + case 252: /* vinto ::= */ yytestcase(yyruleno==252); +{yymsp[1].minor.yy454 = 0;} break; - case 145: /* having_opt ::= HAVING expr */ - case 152: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==152); - case 154: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==154); - case 229: /* case_else ::= ELSE expr */ yytestcase(yyruleno==229); - case 249: /* vinto ::= INTO expr */ yytestcase(yyruleno==249); -{yymsp[-1].minor.yy528 = yymsp[0].minor.yy528;} + case 147: /* having_opt ::= HAVING expr */ + case 154: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==154); + case 156: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==156); + case 231: /* case_else ::= ELSE expr */ yytestcase(yyruleno==231); + case 251: /* vinto ::= INTO expr */ yytestcase(yyruleno==251); +{yymsp[-1].minor.yy454 = yymsp[0].minor.yy454;} break; - case 147: /* limit_opt ::= LIMIT expr */ -{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy528,0);} + case 149: /* limit_opt ::= LIMIT expr */ +{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy454,0);} break; - case 148: /* limit_opt ::= LIMIT expr OFFSET expr */ -{yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} + case 150: /* limit_opt ::= LIMIT expr OFFSET expr */ +{yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} break; - case 149: /* limit_opt ::= LIMIT expr COMMA expr */ -{yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy528,yymsp[-2].minor.yy528);} + case 151: /* limit_opt ::= LIMIT expr COMMA expr */ +{yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy454,yymsp[-2].minor.yy454);} break; - case 150: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ + case 152: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy131, &yymsp[-3].minor.yy0); + sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy203, &yymsp[-3].minor.yy0); #ifndef SQLITE_ENABLE_UPDATE_DELETE_LIMIT - if( yymsp[-1].minor.yy322 || yymsp[0].minor.yy528 ){ - updateDeleteLimitError(pParse,yymsp[-1].minor.yy322,yymsp[0].minor.yy528); - yymsp[-1].minor.yy322 = 0; - yymsp[0].minor.yy528 = 0; + if( yymsp[-1].minor.yy14 || yymsp[0].minor.yy454 ){ + updateDeleteLimitError(pParse,yymsp[-1].minor.yy14,yymsp[0].minor.yy454); + yymsp[-1].minor.yy14 = 0; + yymsp[0].minor.yy454 = 0; } #endif - sqlite3DeleteFrom(pParse,yymsp[-4].minor.yy131,yymsp[-2].minor.yy528,yymsp[-1].minor.yy322,yymsp[0].minor.yy528); + sqlite3DeleteFrom(pParse,yymsp[-4].minor.yy203,yymsp[-2].minor.yy454,yymsp[-1].minor.yy14,yymsp[0].minor.yy454); } break; - case 155: /* where_opt_ret ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy322); yymsp[-1].minor.yy528 = 0;} + case 157: /* where_opt_ret ::= RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy14); yymsp[-1].minor.yy454 = 0;} break; - case 156: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy322); yymsp[-3].minor.yy528 = yymsp[-2].minor.yy528;} + case 158: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy14); yymsp[-3].minor.yy454 = yymsp[-2].minor.yy454;} break; - case 157: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ + case 159: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-7].minor.yy131, &yymsp[-6].minor.yy0); - if( yymsp[-3].minor.yy131 ){ - SrcList *pFromClause = yymsp[-3].minor.yy131; + sqlite3SrcListIndexedBy(pParse, yymsp[-7].minor.yy203, &yymsp[-6].minor.yy0); + if( yymsp[-3].minor.yy203 ){ + SrcList *pFromClause = yymsp[-3].minor.yy203; if( pFromClause->nSrc>1 ){ Select *pSubquery; Token as; @@ -175376,100 +176759,100 @@ static YYACTIONTYPE yy_reduce( as.z = 0; pFromClause = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&as,pSubquery,0); } - yymsp[-7].minor.yy131 = sqlite3SrcListAppendList(pParse, yymsp[-7].minor.yy131, pFromClause); + yymsp[-7].minor.yy203 = sqlite3SrcListAppendList(pParse, yymsp[-7].minor.yy203, pFromClause); } - sqlite3ExprListCheckLength(pParse,yymsp[-4].minor.yy322,"set list"); + sqlite3ExprListCheckLength(pParse,yymsp[-4].minor.yy14,"set list"); #ifndef SQLITE_ENABLE_UPDATE_DELETE_LIMIT - if( yymsp[-1].minor.yy322 || yymsp[0].minor.yy528 ){ - updateDeleteLimitError(pParse,yymsp[-1].minor.yy322,yymsp[0].minor.yy528); - yymsp[-1].minor.yy322 = 0; - yymsp[0].minor.yy528 = 0; + if( yymsp[-1].minor.yy14 || yymsp[0].minor.yy454 ){ + updateDeleteLimitError(pParse,yymsp[-1].minor.yy14,yymsp[0].minor.yy454); + yymsp[-1].minor.yy14 = 0; + yymsp[0].minor.yy454 = 0; } #endif - sqlite3Update(pParse,yymsp[-7].minor.yy131,yymsp[-4].minor.yy322,yymsp[-2].minor.yy528,yymsp[-8].minor.yy394,yymsp[-1].minor.yy322,yymsp[0].minor.yy528,0); + sqlite3Update(pParse,yymsp[-7].minor.yy203,yymsp[-4].minor.yy14,yymsp[-2].minor.yy454,yymsp[-8].minor.yy144,yymsp[-1].minor.yy14,yymsp[0].minor.yy454,0); } break; - case 158: /* setlist ::= setlist COMMA nm EQ expr */ + case 160: /* setlist ::= setlist COMMA nm EQ expr */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy322, yymsp[0].minor.yy528); - sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy322, &yymsp[-2].minor.yy0, 1); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[0].minor.yy454); + sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, 1); } break; - case 159: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ + case 161: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ { - yymsp[-6].minor.yy322 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy322, yymsp[-3].minor.yy254, yymsp[0].minor.yy528); + yymsp[-6].minor.yy14 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy14, yymsp[-3].minor.yy132, yymsp[0].minor.yy454); } break; - case 160: /* setlist ::= nm EQ expr */ + case 162: /* setlist ::= nm EQ expr */ { - yylhsminor.yy322 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy528); - sqlite3ExprListSetName(pParse, yylhsminor.yy322, &yymsp[-2].minor.yy0, 1); + yylhsminor.yy14 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy454); + sqlite3ExprListSetName(pParse, yylhsminor.yy14, &yymsp[-2].minor.yy0, 1); } - yymsp[-2].minor.yy322 = yylhsminor.yy322; + yymsp[-2].minor.yy14 = yylhsminor.yy14; break; - case 161: /* setlist ::= LP idlist RP EQ expr */ + case 163: /* setlist ::= LP idlist RP EQ expr */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy254, yymsp[0].minor.yy528); + yymsp[-4].minor.yy14 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy132, yymsp[0].minor.yy454); } break; - case 162: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + case 164: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ { - sqlite3Insert(pParse, yymsp[-3].minor.yy131, yymsp[-1].minor.yy47, yymsp[-2].minor.yy254, yymsp[-5].minor.yy394, yymsp[0].minor.yy444); + sqlite3Insert(pParse, yymsp[-3].minor.yy203, yymsp[-1].minor.yy555, yymsp[-2].minor.yy132, yymsp[-5].minor.yy144, yymsp[0].minor.yy122); } break; - case 163: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + case 165: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ { - sqlite3Insert(pParse, yymsp[-4].minor.yy131, 0, yymsp[-3].minor.yy254, yymsp[-6].minor.yy394, 0); + sqlite3Insert(pParse, yymsp[-4].minor.yy203, 0, yymsp[-3].minor.yy132, yymsp[-6].minor.yy144, 0); } break; - case 164: /* upsert ::= */ -{ yymsp[1].minor.yy444 = 0; } + case 166: /* upsert ::= */ +{ yymsp[1].minor.yy122 = 0; } break; - case 165: /* upsert ::= RETURNING selcollist */ -{ yymsp[-1].minor.yy444 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy322); } + case 167: /* upsert ::= RETURNING selcollist */ +{ yymsp[-1].minor.yy122 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy14); } break; - case 166: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ -{ yymsp[-11].minor.yy444 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy322,yymsp[-6].minor.yy528,yymsp[-2].minor.yy322,yymsp[-1].minor.yy528,yymsp[0].minor.yy444);} + case 168: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ +{ yymsp[-11].minor.yy122 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy14,yymsp[-6].minor.yy454,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454,yymsp[0].minor.yy122);} break; - case 167: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ -{ yymsp[-8].minor.yy444 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy322,yymsp[-3].minor.yy528,0,0,yymsp[0].minor.yy444); } + case 169: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ +{ yymsp[-8].minor.yy122 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy14,yymsp[-3].minor.yy454,0,0,yymsp[0].minor.yy122); } break; - case 168: /* upsert ::= ON CONFLICT DO NOTHING returning */ -{ yymsp[-4].minor.yy444 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } + case 170: /* upsert ::= ON CONFLICT DO NOTHING returning */ +{ yymsp[-4].minor.yy122 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } break; - case 169: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ -{ yymsp[-7].minor.yy444 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy322,yymsp[-1].minor.yy528,0);} + case 171: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ +{ yymsp[-7].minor.yy122 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454,0);} break; - case 170: /* returning ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy322);} + case 172: /* returning ::= RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy14);} break; - case 173: /* idlist_opt ::= */ -{yymsp[1].minor.yy254 = 0;} + case 175: /* idlist_opt ::= */ +{yymsp[1].minor.yy132 = 0;} break; - case 174: /* idlist_opt ::= LP idlist RP */ -{yymsp[-2].minor.yy254 = yymsp[-1].minor.yy254;} + case 176: /* idlist_opt ::= LP idlist RP */ +{yymsp[-2].minor.yy132 = yymsp[-1].minor.yy132;} break; - case 175: /* idlist ::= idlist COMMA nm */ -{yymsp[-2].minor.yy254 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy254,&yymsp[0].minor.yy0);} + case 177: /* idlist ::= idlist COMMA nm */ +{yymsp[-2].minor.yy132 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy132,&yymsp[0].minor.yy0);} break; - case 176: /* idlist ::= nm */ -{yymsp[0].minor.yy254 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} + case 178: /* idlist ::= nm */ +{yymsp[0].minor.yy132 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} break; - case 177: /* expr ::= LP expr RP */ -{yymsp[-2].minor.yy528 = yymsp[-1].minor.yy528;} + case 179: /* expr ::= LP expr RP */ +{yymsp[-2].minor.yy454 = yymsp[-1].minor.yy454;} break; - case 178: /* expr ::= ID|INDEXED|JOIN_KW */ -{yymsp[0].minor.yy528=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 180: /* expr ::= ID|INDEXED|JOIN_KW */ +{yymsp[0].minor.yy454=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 179: /* expr ::= nm DOT nm */ + case 181: /* expr ::= nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); - yylhsminor.yy528 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); + yylhsminor.yy454 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); } - yymsp[-2].minor.yy528 = yylhsminor.yy528; + yymsp[-2].minor.yy454 = yylhsminor.yy454; break; - case 180: /* expr ::= nm DOT nm DOT nm */ + case 182: /* expr ::= nm DOT nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-4].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); @@ -175478,27 +176861,27 @@ static YYACTIONTYPE yy_reduce( if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, 0, temp1); } - yylhsminor.yy528 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); + yylhsminor.yy454 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); } - yymsp[-4].minor.yy528 = yylhsminor.yy528; + yymsp[-4].minor.yy454 = yylhsminor.yy454; break; - case 181: /* term ::= NULL|FLOAT|BLOB */ - case 182: /* term ::= STRING */ yytestcase(yyruleno==182); -{yymsp[0].minor.yy528=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 183: /* term ::= NULL|FLOAT|BLOB */ + case 184: /* term ::= STRING */ yytestcase(yyruleno==184); +{yymsp[0].minor.yy454=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 183: /* term ::= INTEGER */ + case 185: /* term ::= INTEGER */ { - yylhsminor.yy528 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); - if( yylhsminor.yy528 ) yylhsminor.yy528->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); + yylhsminor.yy454 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); + if( yylhsminor.yy454 ) yylhsminor.yy454->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); } - yymsp[0].minor.yy528 = yylhsminor.yy528; + yymsp[0].minor.yy454 = yylhsminor.yy454; break; - case 184: /* expr ::= VARIABLE */ + case 186: /* expr ::= VARIABLE */ { if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ u32 n = yymsp[0].minor.yy0.n; - yymsp[0].minor.yy528 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); - sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy528, n); + yymsp[0].minor.yy454 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); + sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy454, n); }else{ /* When doing a nested parse, one can include terms in an expression ** that look like this: #1 #2 ... These terms refer to registers @@ -175507,194 +176890,203 @@ static YYACTIONTYPE yy_reduce( assert( t.n>=2 ); if( pParse->nested==0 ){ sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); - yymsp[0].minor.yy528 = 0; + yymsp[0].minor.yy454 = 0; }else{ - yymsp[0].minor.yy528 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); - if( yymsp[0].minor.yy528 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy528->iTable); + yymsp[0].minor.yy454 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); + if( yymsp[0].minor.yy454 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy454->iTable); } } } break; - case 185: /* expr ::= expr COLLATE ID|STRING */ + case 187: /* expr ::= expr COLLATE ID|STRING */ { - yymsp[-2].minor.yy528 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy528, &yymsp[0].minor.yy0, 1); + yymsp[-2].minor.yy454 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy454, &yymsp[0].minor.yy0, 1); } break; - case 186: /* expr ::= CAST LP expr AS typetoken RP */ + case 188: /* expr ::= CAST LP expr AS typetoken RP */ { - yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); - sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy528, yymsp[-3].minor.yy528, 0); + yymsp[-5].minor.yy454 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); + sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy454, yymsp[-3].minor.yy454, 0); } break; - case 187: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + case 189: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy394); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy144); } - yymsp[-4].minor.yy528 = yylhsminor.yy528; + yymsp[-4].minor.yy454 = yylhsminor.yy454; break; - case 188: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy322, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy394); - sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-1].minor.yy322); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy14, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy144); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy454, yymsp[-1].minor.yy14); } - yymsp[-7].minor.yy528 = yylhsminor.yy528; + yymsp[-7].minor.yy454 = yylhsminor.yy454; break; - case 189: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + case 191: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); } - yymsp[-3].minor.yy528 = yylhsminor.yy528; + yymsp[-3].minor.yy454 = yylhsminor.yy454; break; - case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + case 192: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy322, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy394); - sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy14, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy144); + sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); } - yymsp[-5].minor.yy528 = yylhsminor.yy528; + yymsp[-5].minor.yy454 = yylhsminor.yy454; break; - case 191: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + case 193: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy322, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy394); - sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); - sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-2].minor.yy322); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy14, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy144); + sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy454, yymsp[-2].minor.yy14); } - yymsp[-8].minor.yy528 = yylhsminor.yy528; + yymsp[-8].minor.yy454 = yylhsminor.yy454; break; - case 192: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + case 194: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); - sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); + sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); } - yymsp[-4].minor.yy528 = yylhsminor.yy528; + yymsp[-4].minor.yy454 = yylhsminor.yy454; break; - case 193: /* term ::= CTIME_KW */ + case 195: /* term ::= CTIME_KW */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); } - yymsp[0].minor.yy528 = yylhsminor.yy528; + yymsp[0].minor.yy454 = yylhsminor.yy454; break; - case 194: /* expr ::= LP nexprlist COMMA expr RP */ + case 196: /* expr ::= LP nexprlist COMMA expr RP */ { - ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528); - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); - if( yymsp[-4].minor.yy528 ){ - yymsp[-4].minor.yy528->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy14, yymsp[-1].minor.yy454); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); + if( yymsp[-4].minor.yy454 ){ + yymsp[-4].minor.yy454->x.pList = pList; if( ALWAYS(pList->nExpr) ){ - yymsp[-4].minor.yy528->flags |= pList->a[0].pExpr->flags & EP_Propagate; + yymsp[-4].minor.yy454->flags |= pList->a[0].pExpr->flags & EP_Propagate; } }else{ sqlite3ExprListDelete(pParse->db, pList); } } break; - case 195: /* expr ::= expr AND expr */ -{yymsp[-2].minor.yy528=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} + case 197: /* expr ::= expr AND expr */ +{yymsp[-2].minor.yy454=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} break; - case 196: /* expr ::= expr OR expr */ - case 197: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==197); - case 198: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==198); - case 199: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==199); - case 200: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==200); - case 201: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==201); - case 202: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==202); -{yymsp[-2].minor.yy528=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} + case 198: /* expr ::= expr OR expr */ + case 199: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==199); + case 200: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==200); + case 201: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==201); + case 202: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==202); + case 203: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==203); + case 204: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==204); +{yymsp[-2].minor.yy454=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} break; - case 203: /* likeop ::= NOT LIKE_KW|MATCH */ + case 205: /* likeop ::= NOT LIKE_KW|MATCH */ {yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} break; - case 204: /* expr ::= expr likeop expr */ + case 206: /* expr ::= expr likeop expr */ { ExprList *pList; int bNot = yymsp[-1].minor.yy0.n & 0x80000000; yymsp[-1].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy528); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy528); - yymsp[-2].minor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); - if( bNot ) yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy528, 0); - if( yymsp[-2].minor.yy528 ) yymsp[-2].minor.yy528->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy454); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy454); + yymsp[-2].minor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + if( bNot ) yymsp[-2].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy454, 0); + if( yymsp[-2].minor.yy454 ) yymsp[-2].minor.yy454->flags |= EP_InfixFunc; } break; - case 205: /* expr ::= expr likeop expr ESCAPE expr */ + case 207: /* expr ::= expr likeop expr ESCAPE expr */ { ExprList *pList; int bNot = yymsp[-3].minor.yy0.n & 0x80000000; yymsp[-3].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy528); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528); - yymsp[-4].minor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); - if( bNot ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); - if( yymsp[-4].minor.yy528 ) yymsp[-4].minor.yy528->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy454); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy454); + yymsp[-4].minor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); + if( bNot ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + if( yymsp[-4].minor.yy454 ) yymsp[-4].minor.yy454->flags |= EP_InfixFunc; } break; - case 206: /* expr ::= expr ISNULL|NOTNULL */ -{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy528,0);} + case 208: /* expr ::= expr ISNULL|NOTNULL */ +{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy454,0);} break; - case 207: /* expr ::= expr NOT NULL */ -{yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy528,0);} + case 209: /* expr ::= expr NOT NULL */ +{yymsp[-2].minor.yy454 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy454,0);} break; - case 208: /* expr ::= expr IS expr */ + case 210: /* expr ::= expr IS expr */ { - yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy528,yymsp[0].minor.yy528); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-2].minor.yy528, TK_ISNULL); + yymsp[-2].minor.yy454 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy454,yymsp[0].minor.yy454); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-2].minor.yy454, TK_ISNULL); } break; - case 209: /* expr ::= expr IS NOT expr */ + case 211: /* expr ::= expr IS NOT expr */ { - yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy528,yymsp[0].minor.yy528); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-3].minor.yy528, TK_NOTNULL); + yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy454,yymsp[0].minor.yy454); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-3].minor.yy454, TK_NOTNULL); } break; - case 210: /* expr ::= expr IS NOT DISTINCT FROM expr */ + case 212: /* expr ::= expr IS NOT DISTINCT FROM expr */ { - yymsp[-5].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy528,yymsp[0].minor.yy528); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-5].minor.yy528, TK_ISNULL); + yymsp[-5].minor.yy454 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy454,yymsp[0].minor.yy454); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-5].minor.yy454, TK_ISNULL); } break; - case 211: /* expr ::= expr IS DISTINCT FROM expr */ + case 213: /* expr ::= expr IS DISTINCT FROM expr */ { - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy528,yymsp[0].minor.yy528); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-4].minor.yy528, TK_NOTNULL); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy454,yymsp[0].minor.yy454); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-4].minor.yy454, TK_NOTNULL); } break; - case 212: /* expr ::= NOT expr */ - case 213: /* expr ::= BITNOT expr */ yytestcase(yyruleno==213); -{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy528, 0);/*A-overwrites-B*/} + case 214: /* expr ::= NOT expr */ + case 215: /* expr ::= BITNOT expr */ yytestcase(yyruleno==215); +{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy454, 0);/*A-overwrites-B*/} break; - case 214: /* expr ::= PLUS|MINUS expr */ + case 216: /* expr ::= PLUS|MINUS expr */ { - yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy528, 0); - /*A-overwrites-B*/ + Expr *p = yymsp[0].minor.yy454; + u8 op = yymsp[-1].major + (TK_UPLUS-TK_PLUS); + assert( TK_UPLUS>TK_PLUS ); + assert( TK_UMINUS == TK_MINUS + (TK_UPLUS - TK_PLUS) ); + if( p && p->op==TK_UPLUS ){ + p->op = op; + yymsp[-1].minor.yy454 = p; + }else{ + yymsp[-1].minor.yy454 = sqlite3PExpr(pParse, op, p, 0); + /*A-overwrites-B*/ + } } break; - case 215: /* expr ::= expr PTR expr */ + case 217: /* expr ::= expr PTR expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy528); - pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy528); - yylhsminor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy454); + pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy454); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); } - yymsp[-2].minor.yy528 = yylhsminor.yy528; + yymsp[-2].minor.yy454 = yylhsminor.yy454; break; - case 216: /* between_op ::= BETWEEN */ - case 219: /* in_op ::= IN */ yytestcase(yyruleno==219); -{yymsp[0].minor.yy394 = 0;} + case 218: /* between_op ::= BETWEEN */ + case 221: /* in_op ::= IN */ yytestcase(yyruleno==221); +{yymsp[0].minor.yy144 = 0;} break; - case 218: /* expr ::= expr between_op expr AND expr */ + case 220: /* expr ::= expr between_op expr AND expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528); - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy528, 0); - if( yymsp[-4].minor.yy528 ){ - yymsp[-4].minor.yy528->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy454); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy454, 0); + if( yymsp[-4].minor.yy454 ){ + yymsp[-4].minor.yy454->x.pList = pList; }else{ sqlite3ExprListDelete(pParse->db, pList); } - if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); } break; - case 221: /* expr ::= expr in_op LP exprlist RP */ + case 223: /* expr ::= expr in_op LP exprlist RP */ { - if( yymsp[-1].minor.yy322==0 ){ + if( yymsp[-1].minor.yy14==0 ){ /* Expressions of the form ** ** expr1 IN () @@ -175703,208 +177095,208 @@ static YYACTIONTYPE yy_reduce( ** simplify to constants 0 (false) and 1 (true), respectively, ** regardless of the value of expr1. */ - sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy528); - yymsp[-4].minor.yy528 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy394 ? "true" : "false"); - if( yymsp[-4].minor.yy528 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy528); - }else{ - Expr *pRHS = yymsp[-1].minor.yy322->a[0].pExpr; - if( yymsp[-1].minor.yy322->nExpr==1 && sqlite3ExprIsConstant(pRHS) && yymsp[-4].minor.yy528->op!=TK_VECTOR ){ - yymsp[-1].minor.yy322->a[0].pExpr = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); + sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy454); + yymsp[-4].minor.yy454 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy144 ? "true" : "false"); + if( yymsp[-4].minor.yy454 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy454); + }else{ + Expr *pRHS = yymsp[-1].minor.yy14->a[0].pExpr; + if( yymsp[-1].minor.yy14->nExpr==1 && sqlite3ExprIsConstant(pParse,pRHS) && yymsp[-4].minor.yy454->op!=TK_VECTOR ){ + yymsp[-1].minor.yy14->a[0].pExpr = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0); - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy528, pRHS); - }else if( yymsp[-1].minor.yy322->nExpr==1 && pRHS->op==TK_SELECT ){ - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pRHS->x.pSelect); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy454, pRHS); + }else if( yymsp[-1].minor.yy14->nExpr==1 && pRHS->op==TK_SELECT ){ + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pRHS->x.pSelect); pRHS->x.pSelect = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); - }else{ - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); - if( yymsp[-4].minor.yy528==0 ){ - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); - }else if( yymsp[-4].minor.yy528->pLeft->op==TK_VECTOR ){ - int nExpr = yymsp[-4].minor.yy528->pLeft->x.pList->nExpr; - Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy322); + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); + }else{ + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); + if( yymsp[-4].minor.yy454==0 ){ + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); + }else if( yymsp[-4].minor.yy454->pLeft->op==TK_VECTOR ){ + int nExpr = yymsp[-4].minor.yy454->pLeft->x.pList->nExpr; + Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy14); if( pSelectRHS ){ parserDoubleLinkSelect(pParse, pSelectRHS); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pSelectRHS); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pSelectRHS); } }else{ - yymsp[-4].minor.yy528->x.pList = yymsp[-1].minor.yy322; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy528); + yymsp[-4].minor.yy454->x.pList = yymsp[-1].minor.yy14; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy454); } } - if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); } } break; - case 222: /* expr ::= LP select RP */ + case 224: /* expr ::= LP select RP */ { - yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); - sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy528, yymsp[-1].minor.yy47); + yymsp[-2].minor.yy454 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy454, yymsp[-1].minor.yy555); } break; - case 223: /* expr ::= expr in_op LP select RP */ + case 225: /* expr ::= expr in_op LP select RP */ { - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, yymsp[-1].minor.yy47); - if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, yymsp[-1].minor.yy555); + if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); } break; - case 224: /* expr ::= expr in_op nm dbnm paren_exprlist */ + case 226: /* expr ::= expr in_op nm dbnm paren_exprlist */ { SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0); - if( yymsp[0].minor.yy322 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy322); - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pSelect); - if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + if( yymsp[0].minor.yy14 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy14); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pSelect); + if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); } break; - case 225: /* expr ::= EXISTS LP select RP */ + case 227: /* expr ::= EXISTS LP select RP */ { Expr *p; - p = yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); - sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy47); + p = yymsp[-3].minor.yy454 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); + sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy555); } break; - case 226: /* expr ::= CASE case_operand case_exprlist case_else END */ + case 228: /* expr ::= CASE case_operand case_exprlist case_else END */ { - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy528, 0); - if( yymsp[-4].minor.yy528 ){ - yymsp[-4].minor.yy528->x.pList = yymsp[-1].minor.yy528 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[-1].minor.yy528) : yymsp[-2].minor.yy322; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy528); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy454, 0); + if( yymsp[-4].minor.yy454 ){ + yymsp[-4].minor.yy454->x.pList = yymsp[-1].minor.yy454 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454) : yymsp[-2].minor.yy14; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy454); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy322); - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy528); + sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy14); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy454); } } break; - case 227: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ + case 229: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[-2].minor.yy528); - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[0].minor.yy528); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[-2].minor.yy454); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[0].minor.yy454); } break; - case 228: /* case_exprlist ::= WHEN expr THEN expr */ + case 230: /* case_exprlist ::= WHEN expr THEN expr */ { - yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); - yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy322, yymsp[0].minor.yy528); + yymsp[-3].minor.yy14 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); + yymsp[-3].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy14, yymsp[0].minor.yy454); } break; - case 233: /* nexprlist ::= nexprlist COMMA expr */ -{yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[0].minor.yy528);} + case 235: /* nexprlist ::= nexprlist COMMA expr */ +{yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[0].minor.yy454);} break; - case 234: /* nexprlist ::= expr */ -{yymsp[0].minor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy528); /*A-overwrites-Y*/} + case 236: /* nexprlist ::= expr */ +{yymsp[0].minor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy454); /*A-overwrites-Y*/} break; - case 236: /* paren_exprlist ::= LP exprlist RP */ - case 241: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==241); -{yymsp[-2].minor.yy322 = yymsp[-1].minor.yy322;} + case 238: /* paren_exprlist ::= LP exprlist RP */ + case 243: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==243); +{yymsp[-2].minor.yy14 = yymsp[-1].minor.yy14;} break; - case 237: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + case 239: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ { sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, - sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy322, yymsp[-10].minor.yy394, - &yymsp[-11].minor.yy0, yymsp[0].minor.yy528, SQLITE_SO_ASC, yymsp[-8].minor.yy394, SQLITE_IDXTYPE_APPDEF); + sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy14, yymsp[-10].minor.yy144, + &yymsp[-11].minor.yy0, yymsp[0].minor.yy454, SQLITE_SO_ASC, yymsp[-8].minor.yy144, SQLITE_IDXTYPE_APPDEF); if( IN_RENAME_OBJECT && pParse->pNewIndex ){ sqlite3RenameTokenMap(pParse, pParse->pNewIndex->zName, &yymsp[-4].minor.yy0); } } break; - case 238: /* uniqueflag ::= UNIQUE */ - case 280: /* raisetype ::= ABORT */ yytestcase(yyruleno==280); -{yymsp[0].minor.yy394 = OE_Abort;} + case 240: /* uniqueflag ::= UNIQUE */ + case 282: /* raisetype ::= ABORT */ yytestcase(yyruleno==282); +{yymsp[0].minor.yy144 = OE_Abort;} break; - case 239: /* uniqueflag ::= */ -{yymsp[1].minor.yy394 = OE_None;} + case 241: /* uniqueflag ::= */ +{yymsp[1].minor.yy144 = OE_None;} break; - case 242: /* eidlist ::= eidlist COMMA nm collate sortorder */ + case 244: /* eidlist ::= eidlist COMMA nm collate sortorder */ { - yymsp[-4].minor.yy322 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy322, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); + yymsp[-4].minor.yy14 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy144, yymsp[0].minor.yy144); } break; - case 243: /* eidlist ::= nm collate sortorder */ + case 245: /* eidlist ::= nm collate sortorder */ { - yymsp[-2].minor.yy322 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); /*A-overwrites-Y*/ + yymsp[-2].minor.yy14 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy144, yymsp[0].minor.yy144); /*A-overwrites-Y*/ } break; - case 246: /* cmd ::= DROP INDEX ifexists fullname */ -{sqlite3DropIndex(pParse, yymsp[0].minor.yy131, yymsp[-1].minor.yy394);} + case 248: /* cmd ::= DROP INDEX ifexists fullname */ +{sqlite3DropIndex(pParse, yymsp[0].minor.yy203, yymsp[-1].minor.yy144);} break; - case 247: /* cmd ::= VACUUM vinto */ -{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy528);} + case 249: /* cmd ::= VACUUM vinto */ +{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy454);} break; - case 248: /* cmd ::= VACUUM nm vinto */ -{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy528);} + case 250: /* cmd ::= VACUUM nm vinto */ +{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy454);} break; - case 251: /* cmd ::= PRAGMA nm dbnm */ + case 253: /* cmd ::= PRAGMA nm dbnm */ {sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} break; - case 252: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ + case 254: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);} break; - case 253: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ + case 255: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);} break; - case 254: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ + case 256: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);} break; - case 255: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ + case 257: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);} break; - case 258: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + case 260: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ { Token all; all.z = yymsp[-3].minor.yy0.z; all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; - sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy33, &all); + sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy427, &all); } break; - case 259: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + case 261: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ { - sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy394, yymsp[-4].minor.yy180.a, yymsp[-4].minor.yy180.b, yymsp[-2].minor.yy131, yymsp[0].minor.yy528, yymsp[-10].minor.yy394, yymsp[-8].minor.yy394); + sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy144, yymsp[-4].minor.yy286.a, yymsp[-4].minor.yy286.b, yymsp[-2].minor.yy203, yymsp[0].minor.yy454, yymsp[-10].minor.yy144, yymsp[-8].minor.yy144); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ } break; - case 260: /* trigger_time ::= BEFORE|AFTER */ -{ yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/ } + case 262: /* trigger_time ::= BEFORE|AFTER */ +{ yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-X*/ } break; - case 261: /* trigger_time ::= INSTEAD OF */ -{ yymsp[-1].minor.yy394 = TK_INSTEAD;} + case 263: /* trigger_time ::= INSTEAD OF */ +{ yymsp[-1].minor.yy144 = TK_INSTEAD;} break; - case 262: /* trigger_time ::= */ -{ yymsp[1].minor.yy394 = TK_BEFORE; } + case 264: /* trigger_time ::= */ +{ yymsp[1].minor.yy144 = TK_BEFORE; } break; - case 263: /* trigger_event ::= DELETE|INSERT */ - case 264: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==264); -{yymsp[0].minor.yy180.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy180.b = 0;} + case 265: /* trigger_event ::= DELETE|INSERT */ + case 266: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==266); +{yymsp[0].minor.yy286.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy286.b = 0;} break; - case 265: /* trigger_event ::= UPDATE OF idlist */ -{yymsp[-2].minor.yy180.a = TK_UPDATE; yymsp[-2].minor.yy180.b = yymsp[0].minor.yy254;} + case 267: /* trigger_event ::= UPDATE OF idlist */ +{yymsp[-2].minor.yy286.a = TK_UPDATE; yymsp[-2].minor.yy286.b = yymsp[0].minor.yy132;} break; - case 266: /* when_clause ::= */ - case 285: /* key_opt ::= */ yytestcase(yyruleno==285); -{ yymsp[1].minor.yy528 = 0; } + case 268: /* when_clause ::= */ + case 287: /* key_opt ::= */ yytestcase(yyruleno==287); +{ yymsp[1].minor.yy454 = 0; } break; - case 267: /* when_clause ::= WHEN expr */ - case 286: /* key_opt ::= KEY expr */ yytestcase(yyruleno==286); -{ yymsp[-1].minor.yy528 = yymsp[0].minor.yy528; } + case 269: /* when_clause ::= WHEN expr */ + case 288: /* key_opt ::= KEY expr */ yytestcase(yyruleno==288); +{ yymsp[-1].minor.yy454 = yymsp[0].minor.yy454; } break; - case 268: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + case 270: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ { - assert( yymsp[-2].minor.yy33!=0 ); - yymsp[-2].minor.yy33->pLast->pNext = yymsp[-1].minor.yy33; - yymsp[-2].minor.yy33->pLast = yymsp[-1].minor.yy33; + assert( yymsp[-2].minor.yy427!=0 ); + yymsp[-2].minor.yy427->pLast->pNext = yymsp[-1].minor.yy427; + yymsp[-2].minor.yy427->pLast = yymsp[-1].minor.yy427; } break; - case 269: /* trigger_cmd_list ::= trigger_cmd SEMI */ + case 271: /* trigger_cmd_list ::= trigger_cmd SEMI */ { - assert( yymsp[-1].minor.yy33!=0 ); - yymsp[-1].minor.yy33->pLast = yymsp[-1].minor.yy33; + assert( yymsp[-1].minor.yy427!=0 ); + yymsp[-1].minor.yy427->pLast = yymsp[-1].minor.yy427; } break; - case 270: /* trnm ::= nm DOT nm */ + case 272: /* trnm ::= nm DOT nm */ { yymsp[-2].minor.yy0 = yymsp[0].minor.yy0; sqlite3ErrorMsg(pParse, @@ -175912,367 +177304,377 @@ static YYACTIONTYPE yy_reduce( "statements within triggers"); } break; - case 271: /* tridxby ::= INDEXED BY nm */ + case 273: /* tridxby ::= INDEXED BY nm */ { sqlite3ErrorMsg(pParse, "the INDEXED BY clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 272: /* tridxby ::= NOT INDEXED */ + case 274: /* tridxby ::= NOT INDEXED */ { sqlite3ErrorMsg(pParse, "the NOT INDEXED clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 273: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ -{yylhsminor.yy33 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy131, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528, yymsp[-7].minor.yy394, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy522);} - yymsp[-8].minor.yy33 = yylhsminor.yy33; + case 275: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ +{yylhsminor.yy427 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy203, yymsp[-3].minor.yy14, yymsp[-1].minor.yy454, yymsp[-7].minor.yy144, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy168);} + yymsp[-8].minor.yy427 = yylhsminor.yy427; break; - case 274: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + case 276: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ { - yylhsminor.yy33 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy254,yymsp[-2].minor.yy47,yymsp[-6].minor.yy394,yymsp[-1].minor.yy444,yymsp[-7].minor.yy522,yymsp[0].minor.yy522);/*yylhsminor.yy33-overwrites-yymsp[-6].minor.yy394*/ + yylhsminor.yy427 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy132,yymsp[-2].minor.yy555,yymsp[-6].minor.yy144,yymsp[-1].minor.yy122,yymsp[-7].minor.yy168,yymsp[0].minor.yy168);/*yylhsminor.yy427-overwrites-yymsp[-6].minor.yy144*/ } - yymsp[-7].minor.yy33 = yylhsminor.yy33; + yymsp[-7].minor.yy427 = yylhsminor.yy427; break; - case 275: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ -{yylhsminor.yy33 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy528, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy522);} - yymsp[-5].minor.yy33 = yylhsminor.yy33; + case 277: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ +{yylhsminor.yy427 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy454, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy168);} + yymsp[-5].minor.yy427 = yylhsminor.yy427; break; - case 276: /* trigger_cmd ::= scanpt select scanpt */ -{yylhsminor.yy33 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy47, yymsp[-2].minor.yy522, yymsp[0].minor.yy522); /*yylhsminor.yy33-overwrites-yymsp[-1].minor.yy47*/} - yymsp[-2].minor.yy33 = yylhsminor.yy33; + case 278: /* trigger_cmd ::= scanpt select scanpt */ +{yylhsminor.yy427 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy555, yymsp[-2].minor.yy168, yymsp[0].minor.yy168); /*yylhsminor.yy427-overwrites-yymsp[-1].minor.yy555*/} + yymsp[-2].minor.yy427 = yylhsminor.yy427; break; - case 277: /* expr ::= RAISE LP IGNORE RP */ + case 279: /* expr ::= RAISE LP IGNORE RP */ { - yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); - if( yymsp[-3].minor.yy528 ){ - yymsp[-3].minor.yy528->affExpr = OE_Ignore; + yymsp[-3].minor.yy454 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); + if( yymsp[-3].minor.yy454 ){ + yymsp[-3].minor.yy454->affExpr = OE_Ignore; } } break; - case 278: /* expr ::= RAISE LP raisetype COMMA nm RP */ + case 280: /* expr ::= RAISE LP raisetype COMMA nm RP */ { - yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); - if( yymsp[-5].minor.yy528 ) { - yymsp[-5].minor.yy528->affExpr = (char)yymsp[-3].minor.yy394; + yymsp[-5].minor.yy454 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); + if( yymsp[-5].minor.yy454 ) { + yymsp[-5].minor.yy454->affExpr = (char)yymsp[-3].minor.yy144; } } break; - case 279: /* raisetype ::= ROLLBACK */ -{yymsp[0].minor.yy394 = OE_Rollback;} + case 281: /* raisetype ::= ROLLBACK */ +{yymsp[0].minor.yy144 = OE_Rollback;} break; - case 281: /* raisetype ::= FAIL */ -{yymsp[0].minor.yy394 = OE_Fail;} + case 283: /* raisetype ::= FAIL */ +{yymsp[0].minor.yy144 = OE_Fail;} break; - case 282: /* cmd ::= DROP TRIGGER ifexists fullname */ + case 284: /* cmd ::= DROP TRIGGER ifexists fullname */ { - sqlite3DropTrigger(pParse,yymsp[0].minor.yy131,yymsp[-1].minor.yy394); + sqlite3DropTrigger(pParse,yymsp[0].minor.yy203,yymsp[-1].minor.yy144); } break; - case 283: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + case 285: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ { - sqlite3Attach(pParse, yymsp[-3].minor.yy528, yymsp[-1].minor.yy528, yymsp[0].minor.yy528); + sqlite3Attach(pParse, yymsp[-3].minor.yy454, yymsp[-1].minor.yy454, yymsp[0].minor.yy454); } break; - case 284: /* cmd ::= DETACH database_kw_opt expr */ + case 286: /* cmd ::= DETACH database_kw_opt expr */ { - sqlite3Detach(pParse, yymsp[0].minor.yy528); + sqlite3Detach(pParse, yymsp[0].minor.yy454); } break; - case 287: /* cmd ::= REINDEX */ + case 289: /* cmd ::= REINDEX */ {sqlite3Reindex(pParse, 0, 0);} break; - case 288: /* cmd ::= REINDEX nm dbnm */ + case 290: /* cmd ::= REINDEX nm dbnm */ {sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 289: /* cmd ::= ANALYZE */ + case 291: /* cmd ::= ANALYZE */ {sqlite3Analyze(pParse, 0, 0);} break; - case 290: /* cmd ::= ANALYZE nm dbnm */ + case 292: /* cmd ::= ANALYZE nm dbnm */ {sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 291: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ + case 293: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ { - sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy131,&yymsp[0].minor.yy0); + sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy203,&yymsp[0].minor.yy0); } break; - case 292: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + case 294: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ { yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n; sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0); } break; - case 293: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + case 295: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ { - sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy131, &yymsp[0].minor.yy0); + sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy203, &yymsp[0].minor.yy0); } break; - case 294: /* add_column_fullname ::= fullname */ + case 296: /* add_column_fullname ::= fullname */ { disableLookaside(pParse); - sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy131); + sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy203); } break; - case 295: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + case 297: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ { - sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy131, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy203, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 296: /* cmd ::= create_vtab */ + case 298: /* cmd ::= create_vtab */ {sqlite3VtabFinishParse(pParse,0);} break; - case 297: /* cmd ::= create_vtab LP vtabarglist RP */ + case 299: /* cmd ::= create_vtab LP vtabarglist RP */ {sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);} break; - case 298: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + case 300: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ { - sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy394); + sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy144); } break; - case 299: /* vtabarg ::= */ + case 301: /* vtabarg ::= */ {sqlite3VtabArgInit(pParse);} break; - case 300: /* vtabargtoken ::= ANY */ - case 301: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==301); - case 302: /* lp ::= LP */ yytestcase(yyruleno==302); + case 302: /* vtabargtoken ::= ANY */ + case 303: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==303); + case 304: /* lp ::= LP */ yytestcase(yyruleno==304); {sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);} break; - case 303: /* with ::= WITH wqlist */ - case 304: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==304); -{ sqlite3WithPush(pParse, yymsp[0].minor.yy521, 1); } + case 305: /* with ::= WITH wqlist */ + case 306: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==306); +{ sqlite3WithPush(pParse, yymsp[0].minor.yy59, 1); } break; - case 305: /* wqas ::= AS */ -{yymsp[0].minor.yy516 = M10d_Any;} + case 307: /* wqas ::= AS */ +{yymsp[0].minor.yy462 = M10d_Any;} break; - case 306: /* wqas ::= AS MATERIALIZED */ -{yymsp[-1].minor.yy516 = M10d_Yes;} + case 308: /* wqas ::= AS MATERIALIZED */ +{yymsp[-1].minor.yy462 = M10d_Yes;} break; - case 307: /* wqas ::= AS NOT MATERIALIZED */ -{yymsp[-2].minor.yy516 = M10d_No;} + case 309: /* wqas ::= AS NOT MATERIALIZED */ +{yymsp[-2].minor.yy462 = M10d_No;} break; - case 308: /* wqitem ::= nm eidlist_opt wqas LP select RP */ + case 310: /* wqitem ::= withnm eidlist_opt wqas LP select RP */ { - yymsp[-5].minor.yy385 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy322, yymsp[-1].minor.yy47, yymsp[-3].minor.yy516); /*A-overwrites-X*/ + yymsp[-5].minor.yy67 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy14, yymsp[-1].minor.yy555, yymsp[-3].minor.yy462); /*A-overwrites-X*/ } break; - case 309: /* wqlist ::= wqitem */ + case 311: /* withnm ::= nm */ +{pParse->bHasWith = 1;} + break; + case 312: /* wqlist ::= wqitem */ { - yymsp[0].minor.yy521 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy385); /*A-overwrites-X*/ + yymsp[0].minor.yy59 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy67); /*A-overwrites-X*/ } break; - case 310: /* wqlist ::= wqlist COMMA wqitem */ + case 313: /* wqlist ::= wqlist COMMA wqitem */ { - yymsp[-2].minor.yy521 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy521, yymsp[0].minor.yy385); + yymsp[-2].minor.yy59 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy59, yymsp[0].minor.yy67); } break; - case 311: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ + case 314: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ { - assert( yymsp[0].minor.yy41!=0 ); - sqlite3WindowChain(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy41); - yymsp[0].minor.yy41->pNextWin = yymsp[-2].minor.yy41; - yylhsminor.yy41 = yymsp[0].minor.yy41; + assert( yymsp[0].minor.yy211!=0 ); + sqlite3WindowChain(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy211); + yymsp[0].minor.yy211->pNextWin = yymsp[-2].minor.yy211; + yylhsminor.yy211 = yymsp[0].minor.yy211; } - yymsp[-2].minor.yy41 = yylhsminor.yy41; + yymsp[-2].minor.yy211 = yylhsminor.yy211; break; - case 312: /* windowdefn ::= nm AS LP window RP */ + case 315: /* windowdefn ::= nm AS LP window RP */ { - if( ALWAYS(yymsp[-1].minor.yy41) ){ - yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); + if( ALWAYS(yymsp[-1].minor.yy211) ){ + yymsp[-1].minor.yy211->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); } - yylhsminor.yy41 = yymsp[-1].minor.yy41; + yylhsminor.yy211 = yymsp[-1].minor.yy211; } - yymsp[-4].minor.yy41 = yylhsminor.yy41; + yymsp[-4].minor.yy211 = yylhsminor.yy211; break; - case 313: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + case 316: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ { - yymsp[-4].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, 0); + yymsp[-4].minor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy14, yymsp[-1].minor.yy14, 0); } break; - case 314: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + case 317: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ { - yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, &yymsp[-5].minor.yy0); + yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy14, yymsp[-1].minor.yy14, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy41 = yylhsminor.yy41; + yymsp[-5].minor.yy211 = yylhsminor.yy211; break; - case 315: /* window ::= ORDER BY sortlist frame_opt */ + case 318: /* window ::= ORDER BY sortlist frame_opt */ { - yymsp[-3].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, 0); + yymsp[-3].minor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, yymsp[-1].minor.yy14, 0); } break; - case 316: /* window ::= nm ORDER BY sortlist frame_opt */ + case 319: /* window ::= nm ORDER BY sortlist frame_opt */ { - yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0); + yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy41 = yylhsminor.yy41; + yymsp[-4].minor.yy211 = yylhsminor.yy211; break; - case 317: /* window ::= nm frame_opt */ + case 320: /* window ::= nm frame_opt */ { - yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, 0, &yymsp[-1].minor.yy0); + yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, 0, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy41 = yylhsminor.yy41; + yymsp[-1].minor.yy211 = yylhsminor.yy211; break; - case 318: /* frame_opt ::= */ + case 321: /* frame_opt ::= */ { - yymsp[1].minor.yy41 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); + yymsp[1].minor.yy211 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); } break; - case 319: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + case 322: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ { - yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy394, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy516); + yylhsminor.yy211 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy144, yymsp[-1].minor.yy509.eType, yymsp[-1].minor.yy509.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy462); } - yymsp[-2].minor.yy41 = yylhsminor.yy41; + yymsp[-2].minor.yy211 = yylhsminor.yy211; break; - case 320: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + case 323: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ { - yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy394, yymsp[-3].minor.yy595.eType, yymsp[-3].minor.yy595.pExpr, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, yymsp[0].minor.yy516); + yylhsminor.yy211 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy144, yymsp[-3].minor.yy509.eType, yymsp[-3].minor.yy509.pExpr, yymsp[-1].minor.yy509.eType, yymsp[-1].minor.yy509.pExpr, yymsp[0].minor.yy462); } - yymsp[-5].minor.yy41 = yylhsminor.yy41; + yymsp[-5].minor.yy211 = yylhsminor.yy211; break; - case 322: /* frame_bound_s ::= frame_bound */ - case 324: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==324); -{yylhsminor.yy595 = yymsp[0].minor.yy595;} - yymsp[0].minor.yy595 = yylhsminor.yy595; + case 325: /* frame_bound_s ::= frame_bound */ + case 327: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==327); +{yylhsminor.yy509 = yymsp[0].minor.yy509;} + yymsp[0].minor.yy509 = yylhsminor.yy509; break; - case 323: /* frame_bound_s ::= UNBOUNDED PRECEDING */ - case 325: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==325); - case 327: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==327); -{yylhsminor.yy595.eType = yymsp[-1].major; yylhsminor.yy595.pExpr = 0;} - yymsp[-1].minor.yy595 = yylhsminor.yy595; + case 326: /* frame_bound_s ::= UNBOUNDED PRECEDING */ + case 328: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==328); + case 330: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==330); +{yylhsminor.yy509.eType = yymsp[-1].major; yylhsminor.yy509.pExpr = 0;} + yymsp[-1].minor.yy509 = yylhsminor.yy509; break; - case 326: /* frame_bound ::= expr PRECEDING|FOLLOWING */ -{yylhsminor.yy595.eType = yymsp[0].major; yylhsminor.yy595.pExpr = yymsp[-1].minor.yy528;} - yymsp[-1].minor.yy595 = yylhsminor.yy595; + case 329: /* frame_bound ::= expr PRECEDING|FOLLOWING */ +{yylhsminor.yy509.eType = yymsp[0].major; yylhsminor.yy509.pExpr = yymsp[-1].minor.yy454;} + yymsp[-1].minor.yy509 = yylhsminor.yy509; break; - case 328: /* frame_exclude_opt ::= */ -{yymsp[1].minor.yy516 = 0;} + case 331: /* frame_exclude_opt ::= */ +{yymsp[1].minor.yy462 = 0;} break; - case 329: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ -{yymsp[-1].minor.yy516 = yymsp[0].minor.yy516;} + case 332: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ +{yymsp[-1].minor.yy462 = yymsp[0].minor.yy462;} break; - case 330: /* frame_exclude ::= NO OTHERS */ - case 331: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==331); -{yymsp[-1].minor.yy516 = yymsp[-1].major; /*A-overwrites-X*/} + case 333: /* frame_exclude ::= NO OTHERS */ + case 334: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==334); +{yymsp[-1].minor.yy462 = yymsp[-1].major; /*A-overwrites-X*/} break; - case 332: /* frame_exclude ::= GROUP|TIES */ -{yymsp[0].minor.yy516 = yymsp[0].major; /*A-overwrites-X*/} + case 335: /* frame_exclude ::= GROUP|TIES */ +{yymsp[0].minor.yy462 = yymsp[0].major; /*A-overwrites-X*/} break; - case 333: /* window_clause ::= WINDOW windowdefn_list */ -{ yymsp[-1].minor.yy41 = yymsp[0].minor.yy41; } + case 336: /* window_clause ::= WINDOW windowdefn_list */ +{ yymsp[-1].minor.yy211 = yymsp[0].minor.yy211; } break; - case 334: /* filter_over ::= filter_clause over_clause */ + case 337: /* filter_over ::= filter_clause over_clause */ { - if( yymsp[0].minor.yy41 ){ - yymsp[0].minor.yy41->pFilter = yymsp[-1].minor.yy528; + if( yymsp[0].minor.yy211 ){ + yymsp[0].minor.yy211->pFilter = yymsp[-1].minor.yy454; }else{ - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy528); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy454); } - yylhsminor.yy41 = yymsp[0].minor.yy41; + yylhsminor.yy211 = yymsp[0].minor.yy211; } - yymsp[-1].minor.yy41 = yylhsminor.yy41; + yymsp[-1].minor.yy211 = yylhsminor.yy211; break; - case 335: /* filter_over ::= over_clause */ + case 338: /* filter_over ::= over_clause */ { - yylhsminor.yy41 = yymsp[0].minor.yy41; + yylhsminor.yy211 = yymsp[0].minor.yy211; } - yymsp[0].minor.yy41 = yylhsminor.yy41; + yymsp[0].minor.yy211 = yylhsminor.yy211; break; - case 336: /* filter_over ::= filter_clause */ + case 339: /* filter_over ::= filter_clause */ { - yylhsminor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yylhsminor.yy41 ){ - yylhsminor.yy41->eFrmType = TK_FILTER; - yylhsminor.yy41->pFilter = yymsp[0].minor.yy528; + yylhsminor.yy211 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yylhsminor.yy211 ){ + yylhsminor.yy211->eFrmType = TK_FILTER; + yylhsminor.yy211->pFilter = yymsp[0].minor.yy454; }else{ - sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy528); + sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy454); } } - yymsp[0].minor.yy41 = yylhsminor.yy41; + yymsp[0].minor.yy211 = yylhsminor.yy211; break; - case 337: /* over_clause ::= OVER LP window RP */ + case 340: /* over_clause ::= OVER LP window RP */ { - yymsp[-3].minor.yy41 = yymsp[-1].minor.yy41; - assert( yymsp[-3].minor.yy41!=0 ); + yymsp[-3].minor.yy211 = yymsp[-1].minor.yy211; + assert( yymsp[-3].minor.yy211!=0 ); } break; - case 338: /* over_clause ::= OVER nm */ + case 341: /* over_clause ::= OVER nm */ { - yymsp[-1].minor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yymsp[-1].minor.yy41 ){ - yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); + yymsp[-1].minor.yy211 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yymsp[-1].minor.yy211 ){ + yymsp[-1].minor.yy211->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); } } break; - case 339: /* filter_clause ::= FILTER LP WHERE expr RP */ -{ yymsp[-4].minor.yy528 = yymsp[-1].minor.yy528; } + case 342: /* filter_clause ::= FILTER LP WHERE expr RP */ +{ yymsp[-4].minor.yy454 = yymsp[-1].minor.yy454; } + break; + case 343: /* term ::= QNUMBER */ +{ + yylhsminor.yy454=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); + sqlite3DequoteNumber(pParse, yylhsminor.yy454); +} + yymsp[0].minor.yy454 = yylhsminor.yy454; break; default: - /* (340) input ::= cmdlist */ yytestcase(yyruleno==340); - /* (341) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==341); - /* (342) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=342); - /* (343) ecmd ::= SEMI */ yytestcase(yyruleno==343); - /* (344) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==344); - /* (345) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=345); - /* (346) trans_opt ::= */ yytestcase(yyruleno==346); - /* (347) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==347); - /* (348) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==348); - /* (349) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==349); - /* (350) savepoint_opt ::= */ yytestcase(yyruleno==350); - /* (351) cmd ::= create_table create_table_args */ yytestcase(yyruleno==351); - /* (352) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=352); - /* (353) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==353); - /* (354) columnlist ::= columnname carglist */ yytestcase(yyruleno==354); - /* (355) nm ::= ID|INDEXED|JOIN_KW */ yytestcase(yyruleno==355); - /* (356) nm ::= STRING */ yytestcase(yyruleno==356); - /* (357) typetoken ::= typename */ yytestcase(yyruleno==357); - /* (358) typename ::= ID|STRING */ yytestcase(yyruleno==358); - /* (359) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=359); - /* (360) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=360); - /* (361) carglist ::= carglist ccons */ yytestcase(yyruleno==361); - /* (362) carglist ::= */ yytestcase(yyruleno==362); - /* (363) ccons ::= NULL onconf */ yytestcase(yyruleno==363); - /* (364) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==364); - /* (365) ccons ::= AS generated */ yytestcase(yyruleno==365); - /* (366) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==366); - /* (367) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==367); - /* (368) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=368); - /* (369) tconscomma ::= */ yytestcase(yyruleno==369); - /* (370) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=370); - /* (371) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=371); - /* (372) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=372); - /* (373) oneselect ::= values */ yytestcase(yyruleno==373); - /* (374) sclp ::= selcollist COMMA */ yytestcase(yyruleno==374); - /* (375) as ::= ID|STRING */ yytestcase(yyruleno==375); - /* (376) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=376); - /* (377) returning ::= */ yytestcase(yyruleno==377); - /* (378) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=378); - /* (379) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==379); - /* (380) case_operand ::= expr */ yytestcase(yyruleno==380); - /* (381) exprlist ::= nexprlist */ yytestcase(yyruleno==381); - /* (382) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=382); - /* (383) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=383); - /* (384) nmnum ::= ON */ yytestcase(yyruleno==384); - /* (385) nmnum ::= DELETE */ yytestcase(yyruleno==385); - /* (386) nmnum ::= DEFAULT */ yytestcase(yyruleno==386); - /* (387) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==387); - /* (388) foreach_clause ::= */ yytestcase(yyruleno==388); - /* (389) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==389); - /* (390) trnm ::= nm */ yytestcase(yyruleno==390); - /* (391) tridxby ::= */ yytestcase(yyruleno==391); - /* (392) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==392); - /* (393) database_kw_opt ::= */ yytestcase(yyruleno==393); - /* (394) kwcolumn_opt ::= */ yytestcase(yyruleno==394); - /* (395) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==395); - /* (396) vtabarglist ::= vtabarg */ yytestcase(yyruleno==396); - /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==397); - /* (398) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==398); - /* (399) anylist ::= */ yytestcase(yyruleno==399); - /* (400) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==400); - /* (401) anylist ::= anylist ANY */ yytestcase(yyruleno==401); - /* (402) with ::= */ yytestcase(yyruleno==402); - /* (403) windowdefn_list ::= windowdefn (OPTIMIZED OUT) */ assert(yyruleno!=403); - /* (404) window ::= frame_opt (OPTIMIZED OUT) */ assert(yyruleno!=404); + /* (344) input ::= cmdlist */ yytestcase(yyruleno==344); + /* (345) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==345); + /* (346) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=346); + /* (347) ecmd ::= SEMI */ yytestcase(yyruleno==347); + /* (348) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==348); + /* (349) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=349); + /* (350) trans_opt ::= */ yytestcase(yyruleno==350); + /* (351) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==351); + /* (352) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==352); + /* (353) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==353); + /* (354) savepoint_opt ::= */ yytestcase(yyruleno==354); + /* (355) cmd ::= create_table create_table_args */ yytestcase(yyruleno==355); + /* (356) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=356); + /* (357) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==357); + /* (358) columnlist ::= columnname carglist */ yytestcase(yyruleno==358); + /* (359) nm ::= ID|INDEXED|JOIN_KW */ yytestcase(yyruleno==359); + /* (360) nm ::= STRING */ yytestcase(yyruleno==360); + /* (361) typetoken ::= typename */ yytestcase(yyruleno==361); + /* (362) typename ::= ID|STRING */ yytestcase(yyruleno==362); + /* (363) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=363); + /* (364) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=364); + /* (365) carglist ::= carglist ccons */ yytestcase(yyruleno==365); + /* (366) carglist ::= */ yytestcase(yyruleno==366); + /* (367) ccons ::= NULL onconf */ yytestcase(yyruleno==367); + /* (368) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==368); + /* (369) ccons ::= AS generated */ yytestcase(yyruleno==369); + /* (370) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==370); + /* (371) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==371); + /* (372) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=372); + /* (373) tconscomma ::= */ yytestcase(yyruleno==373); + /* (374) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=374); + /* (375) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=375); + /* (376) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=376); + /* (377) oneselect ::= values */ yytestcase(yyruleno==377); + /* (378) sclp ::= selcollist COMMA */ yytestcase(yyruleno==378); + /* (379) as ::= ID|STRING */ yytestcase(yyruleno==379); + /* (380) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=380); + /* (381) returning ::= */ yytestcase(yyruleno==381); + /* (382) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=382); + /* (383) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==383); + /* (384) case_operand ::= expr */ yytestcase(yyruleno==384); + /* (385) exprlist ::= nexprlist */ yytestcase(yyruleno==385); + /* (386) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=386); + /* (387) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=387); + /* (388) nmnum ::= ON */ yytestcase(yyruleno==388); + /* (389) nmnum ::= DELETE */ yytestcase(yyruleno==389); + /* (390) nmnum ::= DEFAULT */ yytestcase(yyruleno==390); + /* (391) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==391); + /* (392) foreach_clause ::= */ yytestcase(yyruleno==392); + /* (393) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==393); + /* (394) trnm ::= nm */ yytestcase(yyruleno==394); + /* (395) tridxby ::= */ yytestcase(yyruleno==395); + /* (396) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==396); + /* (397) database_kw_opt ::= */ yytestcase(yyruleno==397); + /* (398) kwcolumn_opt ::= */ yytestcase(yyruleno==398); + /* (399) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==399); + /* (400) vtabarglist ::= vtabarg */ yytestcase(yyruleno==400); + /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==401); + /* (402) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==402); + /* (403) anylist ::= */ yytestcase(yyruleno==403); + /* (404) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==404); + /* (405) anylist ::= anylist ANY */ yytestcase(yyruleno==405); + /* (406) with ::= */ yytestcase(yyruleno==406); + /* (407) windowdefn_list ::= windowdefn (OPTIMIZED OUT) */ assert(yyruleno!=407); + /* (408) window ::= frame_opt (OPTIMIZED OUT) */ assert(yyruleno!=408); break; /********** End reduce actions ************************************************/ }; @@ -176459,19 +177861,12 @@ SQLITE_PRIVATE void sqlite3Parser( (int)(yypParser->yytos - yypParser->yystack)); } #endif -#if YYSTACKDEPTH>0 if( yypParser->yytos>=yypParser->yystackEnd ){ - yyStackOverflow(yypParser); - break; - } -#else - if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ if( yyGrowStack(yypParser) ){ yyStackOverflow(yypParser); break; } } -#endif } yyact = yy_reduce(yypParser,yyruleno,yymajor,yyminor sqlite3ParserCTX_PARAM); }else if( yyact <= YY_MAX_SHIFTREDUCE ){ @@ -177542,27 +178937,58 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ *tokenType = TK_INTEGER; #ifndef SQLITE_OMIT_HEX_INTEGER if( z[0]=='0' && (z[1]=='x' || z[1]=='X') && sqlite3Isxdigit(z[2]) ){ - for(i=3; sqlite3Isxdigit(z[i]); i++){} - return i; - } + for(i=3; 1; i++){ + if( sqlite3Isxdigit(z[i])==0 ){ + if( z[i]==SQLITE_DIGIT_SEPARATOR ){ + *tokenType = TK_QNUMBER; + }else{ + break; + } + } + } + }else #endif - for(i=0; sqlite3Isdigit(z[i]); i++){} + { + for(i=0; 1; i++){ + if( sqlite3Isdigit(z[i])==0 ){ + if( z[i]==SQLITE_DIGIT_SEPARATOR ){ + *tokenType = TK_QNUMBER; + }else{ + break; + } + } + } #ifndef SQLITE_OMIT_FLOATING_POINT - if( z[i]=='.' ){ - i++; - while( sqlite3Isdigit(z[i]) ){ i++; } - *tokenType = TK_FLOAT; - } - if( (z[i]=='e' || z[i]=='E') && - ( sqlite3Isdigit(z[i+1]) - || ((z[i+1]=='+' || z[i+1]=='-') && sqlite3Isdigit(z[i+2])) - ) - ){ - i += 2; - while( sqlite3Isdigit(z[i]) ){ i++; } - *tokenType = TK_FLOAT; - } + if( z[i]=='.' ){ + if( *tokenType==TK_INTEGER ) *tokenType = TK_FLOAT; + for(i++; 1; i++){ + if( sqlite3Isdigit(z[i])==0 ){ + if( z[i]==SQLITE_DIGIT_SEPARATOR ){ + *tokenType = TK_QNUMBER; + }else{ + break; + } + } + } + } + if( (z[i]=='e' || z[i]=='E') && + ( sqlite3Isdigit(z[i+1]) + || ((z[i+1]=='+' || z[i+1]=='-') && sqlite3Isdigit(z[i+2])) + ) + ){ + if( *tokenType==TK_INTEGER ) *tokenType = TK_FLOAT; + for(i+=2; 1; i++){ + if( sqlite3Isdigit(z[i])==0 ){ + if( z[i]==SQLITE_DIGIT_SEPARATOR ){ + *tokenType = TK_QNUMBER; + }else{ + break; + } + } + } + } #endif + } while( IdChar(z[i]) ){ *tokenType = TK_ILLEGAL; i++; @@ -177727,10 +179153,13 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ if( tokenType>=TK_WINDOW ){ assert( tokenType==TK_SPACE || tokenType==TK_OVER || tokenType==TK_FILTER || tokenType==TK_ILLEGAL || tokenType==TK_WINDOW + || tokenType==TK_QNUMBER ); #else if( tokenType>=TK_SPACE ){ - assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL ); + assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL + || tokenType==TK_QNUMBER + ); #endif /* SQLITE_OMIT_WINDOWFUNC */ if( AtomicLoad(&db->u1.isInterrupted) ){ pParse->rc = SQLITE_INTERRUPT; @@ -177763,7 +179192,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ assert( n==6 ); tokenType = analyzeFilterKeyword((const u8*)&zSql[6], lastTokenParsed); #endif /* SQLITE_OMIT_WINDOWFUNC */ - }else{ + }else if( tokenType!=TK_QNUMBER ){ Token x; x.z = zSql; x.n = n; @@ -188758,22 +190187,24 @@ static int fts3IntegrityMethod( char **pzErr /* Write error message here */ ){ Fts3Table *p = (Fts3Table*)pVtab; - int rc; + int rc = SQLITE_OK; int bOk = 0; UNUSED_PARAMETER(isQuick); rc = sqlite3Fts3IntegrityCheck(p, &bOk); - assert( rc!=SQLITE_CORRUPT_VTAB || bOk==0 ); - if( rc!=SQLITE_OK && rc!=SQLITE_CORRUPT_VTAB ){ + assert( rc!=SQLITE_CORRUPT_VTAB ); + if( rc==SQLITE_ERROR || (rc&0xFF)==SQLITE_CORRUPT ){ *pzErr = sqlite3_mprintf("unable to validate the inverted index for" " FTS%d table %s.%s: %s", p->bFts4 ? 4 : 3, zSchema, zTabname, sqlite3_errstr(rc)); - }else if( bOk==0 ){ + if( *pzErr ) rc = SQLITE_OK; + }else if( rc==SQLITE_OK && bOk==0 ){ *pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s", p->bFts4 ? 4 : 3, zSchema, zTabname); + if( *pzErr==0 ) rc = SQLITE_NOMEM; } sqlite3Fts3SegmentsClose(p); - return SQLITE_OK; + return rc; } @@ -200435,7 +201866,12 @@ SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk){ sqlite3_finalize(pStmt); } - *pbOk = (rc==SQLITE_OK && cksum1==cksum2); + if( rc==SQLITE_CORRUPT_VTAB ){ + rc = SQLITE_OK; + *pbOk = 0; + }else{ + *pbOk = (rc==SQLITE_OK && cksum1==cksum2); + } return rc; } @@ -201341,7 +202777,7 @@ static void fts3SnippetDetails( } mCover |= mPhrase; - for(j=0; jnToken; j++){ + for(j=0; jnToken && jnSnippet; j++){ mHighlight |= (mPos>>j); } @@ -204002,7 +205438,6 @@ static void jsonAppendRawNZ(JsonString *p, const char *zIn, u32 N){ } } - /* Append formatted text (not to exceed N bytes) to the JsonString. */ static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){ @@ -204060,6 +205495,40 @@ static void jsonAppendSeparator(JsonString *p){ jsonAppendChar(p, ','); } +/* c is a control character. Append the canonical JSON representation +** of that control character to p. +** +** This routine assumes that the output buffer has already been enlarged +** sufficiently to hold the worst-case encoding plus a nul terminator. +*/ +static void jsonAppendControlChar(JsonString *p, u8 c){ + static const char aSpecial[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + assert( sizeof(aSpecial)==32 ); + assert( aSpecial['\b']=='b' ); + assert( aSpecial['\f']=='f' ); + assert( aSpecial['\n']=='n' ); + assert( aSpecial['\r']=='r' ); + assert( aSpecial['\t']=='t' ); + assert( c>=0 && cnUsed+7 <= p->nAlloc ); + if( aSpecial[c] ){ + p->zBuf[p->nUsed] = '\\'; + p->zBuf[p->nUsed+1] = aSpecial[c]; + p->nUsed += 2; + }else{ + p->zBuf[p->nUsed] = '\\'; + p->zBuf[p->nUsed+1] = 'u'; + p->zBuf[p->nUsed+2] = '0'; + p->zBuf[p->nUsed+3] = '0'; + p->zBuf[p->nUsed+4] = "0123456789abcdef"[c>>4]; + p->zBuf[p->nUsed+5] = "0123456789abcdef"[c&0xf]; + p->nUsed += 6; + } +} + /* Append the N-byte string in zIn to the end of the JsonString string ** under construction. Enclose the string in double-quotes ("...") and ** escape any double-quotes or backslash characters contained within the @@ -204119,35 +205588,14 @@ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ } c = z[0]; if( c=='"' || c=='\\' ){ - json_simple_escape: if( (p->nUsed+N+3 > p->nAlloc) && jsonStringGrow(p,N+3)!=0 ) return; p->zBuf[p->nUsed++] = '\\'; p->zBuf[p->nUsed++] = c; }else if( c=='\'' ){ p->zBuf[p->nUsed++] = c; }else{ - static const char aSpecial[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; - assert( sizeof(aSpecial)==32 ); - assert( aSpecial['\b']=='b' ); - assert( aSpecial['\f']=='f' ); - assert( aSpecial['\n']=='n' ); - assert( aSpecial['\r']=='r' ); - assert( aSpecial['\t']=='t' ); - assert( c>=0 && cnUsed+N+7 > p->nAlloc) && jsonStringGrow(p,N+7)!=0 ) return; - p->zBuf[p->nUsed++] = '\\'; - p->zBuf[p->nUsed++] = 'u'; - p->zBuf[p->nUsed++] = '0'; - p->zBuf[p->nUsed++] = '0'; - p->zBuf[p->nUsed++] = "0123456789abcdef"[c>>4]; - p->zBuf[p->nUsed++] = "0123456789abcdef"[c&0xf]; + jsonAppendControlChar(p, c); } z++; N--; @@ -204848,7 +206296,10 @@ static u32 jsonbValidityCheck( if( !jsonIsOk[z[j]] && z[j]!='\'' ){ if( z[j]=='"' ){ if( x==JSONB_TEXTJ ) return j+1; - }else if( z[j]!='\\' || j+1>=k ){ + }else if( z[j]<=0x1f ){ + /* Control characters in JSON5 string literals are ok */ + if( x==JSONB_TEXTJ ) return j+1; + }else if( NEVER(z[j]!='\\') || j+1>=k ){ return j+1; }else if( strchr("\"\\/bfnrt",z[j+1])!=0 ){ j++; @@ -205143,9 +206594,14 @@ json_parse_restart: return -1; } }else if( c<=0x1f ){ - /* Control characters are not allowed in strings */ - pParse->iErr = j; - return -1; + if( c==0 ){ + pParse->iErr = j; + return -1; + } + /* Control characters are not allowed in canonical JSON string + ** literals, but are allowed in JSON5 string literals. */ + opcode = JSONB_TEXT5; + pParse->hasNonstd = 1; }else if( c=='"' ){ opcode = JSONB_TEXT5; } @@ -205361,6 +206817,7 @@ json_parse_restart: return i+4; } /* fall-through into the default case that checks for NaN */ + /* no break */ deliberate_fall_through } default: { u32 k; @@ -205629,7 +207086,7 @@ static u32 jsonTranslateBlobToText( zIn = (const char*)&pParse->aBlob[i+n]; jsonAppendChar(pOut, '"'); while( sz2>0 ){ - for(k=0; k0 ){ jsonAppendRawNZ(pOut, zIn, k); if( k>=sz2 ){ @@ -205644,6 +207101,13 @@ static u32 jsonTranslateBlobToText( sz2--; continue; } + if( zIn[0]<=0x1f ){ + if( pOut->nUsed+7>pOut->nAlloc && jsonStringGrow(pOut,7) ) break; + jsonAppendControlChar(pOut, zIn[0]); + zIn++; + sz2--; + continue; + } assert( zIn[0]=='\\' ); assert( sz2>=1 ); if( sz2<2 ){ @@ -205746,6 +207210,112 @@ static u32 jsonTranslateBlobToText( return i+n+sz; } +/* Context for recursion of json_pretty() +*/ +typedef struct JsonPretty JsonPretty; +struct JsonPretty { + JsonParse *pParse; /* The BLOB being rendered */ + JsonString *pOut; /* Generate pretty output into this string */ + const char *zIndent; /* Use this text for indentation */ + u32 szIndent; /* Bytes in zIndent[] */ + u32 nIndent; /* Current level of indentation */ +}; + +/* Append indentation to the pretty JSON under construction */ +static void jsonPrettyIndent(JsonPretty *pPretty){ + u32 jj; + for(jj=0; jjnIndent; jj++){ + jsonAppendRaw(pPretty->pOut, pPretty->zIndent, pPretty->szIndent); + } +} + +/* +** Translate the binary JSONB representation of JSON beginning at +** pParse->aBlob[i] into a JSON text string. Append the JSON +** text onto the end of pOut. Return the index in pParse->aBlob[] +** of the first byte past the end of the element that is translated. +** +** This is a variant of jsonTranslateBlobToText() that "pretty-prints" +** the output. Extra whitespace is inserted to make the JSON easier +** for humans to read. +** +** If an error is detected in the BLOB input, the pOut->eErr flag +** might get set to JSTRING_MALFORMED. But not all BLOB input errors +** are detected. So a malformed JSONB input might either result +** in an error, or in incorrect JSON. +** +** The pOut->eErr JSTRING_OOM flag is set on a OOM. +*/ +static u32 jsonTranslateBlobToPrettyText( + JsonPretty *pPretty, /* Pretty-printing context */ + u32 i /* Start rendering at this index */ +){ + u32 sz, n, j, iEnd; + const JsonParse *pParse = pPretty->pParse; + JsonString *pOut = pPretty->pOut; + n = jsonbPayloadSize(pParse, i, &sz); + if( n==0 ){ + pOut->eErr |= JSTRING_MALFORMED; + return pParse->nBlob+1; + } + switch( pParse->aBlob[i] & 0x0f ){ + case JSONB_ARRAY: { + j = i+n; + iEnd = j+sz; + jsonAppendChar(pOut, '['); + if( jnIndent++; + while( pOut->eErr==0 ){ + jsonPrettyIndent(pPretty); + j = jsonTranslateBlobToPrettyText(pPretty, j); + if( j>=iEnd ) break; + jsonAppendRawNZ(pOut, ",\n", 2); + } + jsonAppendChar(pOut, '\n'); + pPretty->nIndent--; + jsonPrettyIndent(pPretty); + } + jsonAppendChar(pOut, ']'); + i = iEnd; + break; + } + case JSONB_OBJECT: { + j = i+n; + iEnd = j+sz; + jsonAppendChar(pOut, '{'); + if( jnIndent++; + while( pOut->eErr==0 ){ + jsonPrettyIndent(pPretty); + j = jsonTranslateBlobToText(pParse, j, pOut); + if( j>iEnd ){ + pOut->eErr |= JSTRING_MALFORMED; + break; + } + jsonAppendRawNZ(pOut, ": ", 2); + j = jsonTranslateBlobToPrettyText(pPretty, j); + if( j>=iEnd ) break; + jsonAppendRawNZ(pOut, ",\n", 2); + } + jsonAppendChar(pOut, '\n'); + pPretty->nIndent--; + jsonPrettyIndent(pPretty); + } + jsonAppendChar(pOut, '}'); + i = iEnd; + break; + } + default: { + i = jsonTranslateBlobToText(pParse, i, pOut); + break; + } + } + return i; +} + + /* Return true if the input pJson ** ** For performance reasons, this routine does not do a detailed check of the @@ -206996,11 +208566,12 @@ static void jsonParseFunc( if( p==0 ) return; if( argc==1 ){ jsonDebugPrintBlob(p, 0, p->nBlob, 0, &out); - sqlite3_result_text64(ctx, out.zText, out.nChar, SQLITE_DYNAMIC, SQLITE_UTF8); + sqlite3_result_text64(ctx,out.zText,out.nChar,SQLITE_TRANSIENT,SQLITE_UTF8); }else{ jsonShowParse(p); } jsonParseFree(p); + sqlite3_str_reset(&out); } #endif /* SQLITE_DEBUG */ @@ -207099,13 +208670,6 @@ static void jsonArrayLengthFunc( jsonParseFree(p); } -/* True if the string is all digits */ -static int jsonAllDigits(const char *z, int n){ - int i; - for(i=0; i $[NUMBER] // Not PG. Purely for convenience */ jsonStringInit(&jx, ctx); - if( jsonAllDigits(zPath, nPath) ){ + if( sqlite3_value_type(argv[i])==SQLITE_INTEGER ){ jsonAppendRawNZ(&jx, "[", 1); jsonAppendRaw(&jx, zPath, nPath); jsonAppendRawNZ(&jx, "]", 2); @@ -207664,6 +209228,40 @@ json_type_done: jsonParseFree(p); } +/* +** json_pretty(JSON) +** json_pretty(JSON, INDENT) +** +** Return text that is a pretty-printed rendering of the input JSON. +** If the argument is not valid JSON, return NULL. +** +** The INDENT argument is text that is used for indentation. If omitted, +** it defaults to four spaces (the same as PostgreSQL). +*/ +static void jsonPrettyFunc( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv +){ + JsonString s; /* The output string */ + JsonPretty x; /* Pretty printing context */ + + memset(&x, 0, sizeof(x)); + x.pParse = jsonParseFuncArg(ctx, argv[0], 0); + if( x.pParse==0 ) return; + x.pOut = &s; + jsonStringInit(&s, ctx); + if( argc==1 || (x.zIndent = (const char*)sqlite3_value_text(argv[1]))==0 ){ + x.zIndent = " "; + x.szIndent = 4; + }else{ + x.szIndent = (u32)strlen(x.zIndent); + } + jsonTranslateBlobToPrettyText(&x, 0); + jsonReturnString(&s, 0, 0); + jsonParseFree(x.pParse); +} + /* ** json_valid(JSON) ** json_valid(JSON, FLAGS) @@ -208678,6 +210276,8 @@ SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void){ JFUNCTION(jsonb_object, -1,0,1, 1,1,0, jsonObjectFunc), JFUNCTION(json_patch, 2,1,1, 0,0,0, jsonPatchFunc), JFUNCTION(jsonb_patch, 2,1,0, 0,1,0, jsonPatchFunc), + JFUNCTION(json_pretty, 1,1,0, 0,0,0, jsonPrettyFunc), + JFUNCTION(json_pretty, 2,1,0, 0,0,0, jsonPrettyFunc), JFUNCTION(json_quote, 1,0,1, 1,0,0, jsonQuoteFunc), JFUNCTION(json_remove, -1,1,1, 0,0,0, jsonRemoveFunc), JFUNCTION(jsonb_remove, -1,1,0, 0,1,0, jsonRemoveFunc), @@ -210577,6 +212177,8 @@ static int deserializeGeometry(sqlite3_value *pValue, RtreeConstraint *pCons){ return SQLITE_OK; } +SQLITE_PRIVATE int sqlite3IntFloatCompare(i64,double); + /* ** Rtree virtual table module xFilter method. */ @@ -210606,7 +212208,8 @@ static int rtreeFilter( i64 iNode = 0; int eType = sqlite3_value_numeric_type(argv[0]); if( eType==SQLITE_INTEGER - || (eType==SQLITE_FLOAT && sqlite3_value_double(argv[0])==iRowid) + || (eType==SQLITE_FLOAT + && 0==sqlite3IntFloatCompare(iRowid,sqlite3_value_double(argv[0]))) ){ rc = findLeafNode(pRtree, iRowid, &pLeaf, &iNode); }else{ @@ -211961,6 +213564,7 @@ constraint: */ static int rtreeBeginTransaction(sqlite3_vtab *pVtab){ Rtree *pRtree = (Rtree *)pVtab; + assert( pRtree->inWrTrans==0 ); pRtree->inWrTrans = 1; return SQLITE_OK; } @@ -215515,7 +217119,7 @@ static void icuLoadCollation( UCollator *pUCollator; /* ICU library collation object */ int rc; /* Return code from sqlite3_create_collation_x() */ - assert(nArg==2); + assert(nArg==2 || nArg==3); (void)nArg; /* Unused parameter */ zLocale = (const char *)sqlite3_value_text(apArg[0]); zName = (const char *)sqlite3_value_text(apArg[1]); @@ -215530,7 +217134,39 @@ static void icuLoadCollation( return; } assert(p); - + if(nArg==3){ + const char *zOption = (const char*)sqlite3_value_text(apArg[2]); + static const struct { + const char *zName; + UColAttributeValue val; + } aStrength[] = { + { "PRIMARY", UCOL_PRIMARY }, + { "SECONDARY", UCOL_SECONDARY }, + { "TERTIARY", UCOL_TERTIARY }, + { "DEFAULT", UCOL_DEFAULT_STRENGTH }, + { "QUARTERNARY", UCOL_QUATERNARY }, + { "IDENTICAL", UCOL_IDENTICAL }, + }; + unsigned int i; + for(i=0; i=sizeof(aStrength)/sizeof(aStrength[0]) ){ + sqlite3_str *pStr = sqlite3_str_new(sqlite3_context_db_handle(p)); + sqlite3_str_appendf(pStr, + "unknown collation strength \"%s\" - should be one of:", + zOption); + for(i=0; ipTblIter, &p->zErrmsg); pIter->zTbl = 0; + pIter->zDataTbl = 0; }else{ pIter->zTbl = (const char*)sqlite3_column_text(pIter->pTblIter, 0); pIter->zDataTbl = (const char*)sqlite3_column_text(pIter->pTblIter,1); @@ -219483,7 +221122,7 @@ static i64 rbuShmChecksum(sqlite3rbu *p){ u32 volatile *ptr; p->rc = pDb->pMethods->xShmMap(pDb, 0, 32*1024, 0, (void volatile**)&ptr); if( p->rc==SQLITE_OK ){ - iRet = ((i64)ptr[10] << 32) + ptr[11]; + iRet = (i64)(((u64)ptr[10] << 32) + ptr[11]); } } return iRet; @@ -226954,14 +228593,14 @@ static int sessionChangesetNextOne( p->rc = sessionInputBuffer(&p->in, 2); if( p->rc!=SQLITE_OK ) return p->rc; + sessionDiscardData(&p->in); + p->in.iCurrent = p->in.iNext; + /* If the iterator is already at the end of the changeset, return DONE. */ if( p->in.iNext>=p->in.nData ){ return SQLITE_DONE; } - sessionDiscardData(&p->in); - p->in.iCurrent = p->in.iNext; - op = p->in.aData[p->in.iNext++]; while( op=='T' || op=='P' ){ if( pbNew ) *pbNew = 1; @@ -228696,6 +230335,7 @@ struct sqlite3_changegroup { int rc; /* Error code */ int bPatch; /* True to accumulate patchsets */ SessionTable *pList; /* List of tables in current patch */ + SessionBuffer rec; sqlite3 *db; /* Configured by changegroup_schema() */ char *zDb; /* Configured by changegroup_schema() */ @@ -228994,108 +230634,128 @@ static int sessionChangesetExtendRecord( } /* -** Add all changes in the changeset traversed by the iterator passed as -** the first argument to the changegroup hash tables. +** Locate or create a SessionTable object that may be used to add the +** change currently pointed to by iterator pIter to changegroup pGrp. +** If successful, set output variable (*ppTab) to point to the table +** object and return SQLITE_OK. Otherwise, if some error occurs, return +** an SQLite error code and leave (*ppTab) set to NULL. */ -static int sessionChangesetToHash( - sqlite3_changeset_iter *pIter, /* Iterator to read from */ - sqlite3_changegroup *pGrp, /* Changegroup object to add changeset to */ - int bRebase /* True if hash table is for rebasing */ +static int sessionChangesetFindTable( + sqlite3_changegroup *pGrp, + const char *zTab, + sqlite3_changeset_iter *pIter, + SessionTable **ppTab ){ - u8 *aRec; - int nRec; int rc = SQLITE_OK; SessionTable *pTab = 0; - SessionBuffer rec = {0, 0, 0}; - - while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec, 0) ){ - const char *zNew; - int nCol; - int op; - int iHash; - int bIndirect; - SessionChange *pChange; - SessionChange *pExist = 0; - SessionChange **pp; - - /* Ensure that only changesets, or only patchsets, but not a mixture - ** of both, are being combined. It is an error to try to combine a - ** changeset and a patchset. */ - if( pGrp->pList==0 ){ - pGrp->bPatch = pIter->bPatchset; - }else if( pIter->bPatchset!=pGrp->bPatch ){ - rc = SQLITE_ERROR; - break; - } + int nTab = (int)strlen(zTab); + u8 *abPK = 0; + int nCol = 0; - sqlite3changeset_op(pIter, &zNew, &nCol, &op, &bIndirect); - if( !pTab || sqlite3_stricmp(zNew, pTab->zName) ){ - /* Search the list for a matching table */ - int nNew = (int)strlen(zNew); - u8 *abPK; + *ppTab = 0; + sqlite3changeset_pk(pIter, &abPK, &nCol); - sqlite3changeset_pk(pIter, &abPK, 0); - for(pTab = pGrp->pList; pTab; pTab=pTab->pNext){ - if( 0==sqlite3_strnicmp(pTab->zName, zNew, nNew+1) ) break; - } - if( !pTab ){ - SessionTable **ppTab; + /* Search the list for an existing table */ + for(pTab = pGrp->pList; pTab; pTab=pTab->pNext){ + if( 0==sqlite3_strnicmp(pTab->zName, zTab, nTab+1) ) break; + } - pTab = sqlite3_malloc64(sizeof(SessionTable) + nCol + nNew+1); - if( !pTab ){ - rc = SQLITE_NOMEM; - break; - } - memset(pTab, 0, sizeof(SessionTable)); - pTab->nCol = nCol; - pTab->abPK = (u8*)&pTab[1]; - memcpy(pTab->abPK, abPK, nCol); - pTab->zName = (char*)&pTab->abPK[nCol]; - memcpy(pTab->zName, zNew, nNew+1); - - if( pGrp->db ){ - pTab->nCol = 0; - rc = sessionInitTable(0, pTab, pGrp->db, pGrp->zDb); - if( rc ){ - assert( pTab->azCol==0 ); - sqlite3_free(pTab); - break; - } - } + /* If one was not found above, create a new table now */ + if( !pTab ){ + SessionTable **ppNew; - /* The new object must be linked on to the end of the list, not - ** simply added to the start of it. This is to ensure that the - ** tables within the output of sqlite3changegroup_output() are in - ** the right order. */ - for(ppTab=&pGrp->pList; *ppTab; ppTab=&(*ppTab)->pNext); - *ppTab = pTab; - } + pTab = sqlite3_malloc64(sizeof(SessionTable) + nCol + nTab+1); + if( !pTab ){ + return SQLITE_NOMEM; + } + memset(pTab, 0, sizeof(SessionTable)); + pTab->nCol = nCol; + pTab->abPK = (u8*)&pTab[1]; + memcpy(pTab->abPK, abPK, nCol); + pTab->zName = (char*)&pTab->abPK[nCol]; + memcpy(pTab->zName, zTab, nTab+1); - if( !sessionChangesetCheckCompat(pTab, nCol, abPK) ){ - rc = SQLITE_SCHEMA; - break; + if( pGrp->db ){ + pTab->nCol = 0; + rc = sessionInitTable(0, pTab, pGrp->db, pGrp->zDb); + if( rc ){ + assert( pTab->azCol==0 ); + sqlite3_free(pTab); + return rc; } } - if( nColnCol ){ - assert( pGrp->db ); - rc = sessionChangesetExtendRecord(pGrp, pTab, nCol, op, aRec, nRec, &rec); - if( rc ) break; - aRec = rec.aBuf; - nRec = rec.nBuf; - } + /* The new object must be linked on to the end of the list, not + ** simply added to the start of it. This is to ensure that the + ** tables within the output of sqlite3changegroup_output() are in + ** the right order. */ + for(ppNew=&pGrp->pList; *ppNew; ppNew=&(*ppNew)->pNext); + *ppNew = pTab; + } - if( sessionGrowHash(0, pIter->bPatchset, pTab) ){ - rc = SQLITE_NOMEM; - break; - } + /* Check that the table is compatible. */ + if( !sessionChangesetCheckCompat(pTab, nCol, abPK) ){ + rc = SQLITE_SCHEMA; + } + + *ppTab = pTab; + return rc; +} + +/* +** Add the change currently indicated by iterator pIter to the hash table +** belonging to changegroup pGrp. +*/ +static int sessionOneChangeToHash( + sqlite3_changegroup *pGrp, + sqlite3_changeset_iter *pIter, + int bRebase +){ + int rc = SQLITE_OK; + int nCol = 0; + int op = 0; + int iHash = 0; + int bIndirect = 0; + SessionChange *pChange = 0; + SessionChange *pExist = 0; + SessionChange **pp = 0; + SessionTable *pTab = 0; + u8 *aRec = &pIter->in.aData[pIter->in.iCurrent + 2]; + int nRec = (pIter->in.iNext - pIter->in.iCurrent) - 2; + + /* Ensure that only changesets, or only patchsets, but not a mixture + ** of both, are being combined. It is an error to try to combine a + ** changeset and a patchset. */ + if( pGrp->pList==0 ){ + pGrp->bPatch = pIter->bPatchset; + }else if( pIter->bPatchset!=pGrp->bPatch ){ + rc = SQLITE_ERROR; + } + + if( rc==SQLITE_OK ){ + const char *zTab = 0; + sqlite3changeset_op(pIter, &zTab, &nCol, &op, &bIndirect); + rc = sessionChangesetFindTable(pGrp, zTab, pIter, &pTab); + } + + if( rc==SQLITE_OK && nColnCol ){ + SessionBuffer *pBuf = &pGrp->rec; + rc = sessionChangesetExtendRecord(pGrp, pTab, nCol, op, aRec, nRec, pBuf); + aRec = pBuf->aBuf; + nRec = pBuf->nBuf; + assert( pGrp->db ); + } + + if( rc==SQLITE_OK && sessionGrowHash(0, pIter->bPatchset, pTab) ){ + rc = SQLITE_NOMEM; + } + + if( rc==SQLITE_OK ){ + /* Search for existing entry. If found, remove it from the hash table. + ** Code below may link it back in. */ iHash = sessionChangeHash( pTab, (pIter->bPatchset && op==SQLITE_DELETE), aRec, pTab->nChange ); - - /* Search for existing entry. If found, remove it from the hash table. - ** Code below may link it back in. - */ for(pp=&pTab->apChange[iHash]; *pp; pp=&(*pp)->pNext){ int bPkOnly1 = 0; int bPkOnly2 = 0; @@ -229110,19 +230770,41 @@ static int sessionChangesetToHash( break; } } + } + if( rc==SQLITE_OK ){ rc = sessionChangeMerge(pTab, bRebase, pIter->bPatchset, pExist, op, bIndirect, aRec, nRec, &pChange ); - if( rc ) break; - if( pChange ){ - pChange->pNext = pTab->apChange[iHash]; - pTab->apChange[iHash] = pChange; - pTab->nEntry++; - } + } + if( rc==SQLITE_OK && pChange ){ + pChange->pNext = pTab->apChange[iHash]; + pTab->apChange[iHash] = pChange; + pTab->nEntry++; + } + + if( rc==SQLITE_OK ) rc = pIter->rc; + return rc; +} + +/* +** Add all changes in the changeset traversed by the iterator passed as +** the first argument to the changegroup hash tables. +*/ +static int sessionChangesetToHash( + sqlite3_changeset_iter *pIter, /* Iterator to read from */ + sqlite3_changegroup *pGrp, /* Changegroup object to add changeset to */ + int bRebase /* True if hash table is for rebasing */ +){ + u8 *aRec; + int nRec; + int rc = SQLITE_OK; + + while( SQLITE_ROW==(sessionChangesetNext(pIter, &aRec, &nRec, 0)) ){ + rc = sessionOneChangeToHash(pGrp, pIter, bRebase); + if( rc!=SQLITE_OK ) break; } - sqlite3_free(rec.aBuf); if( rc==SQLITE_OK ) rc = pIter->rc; return rc; } @@ -229250,6 +230932,23 @@ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup *pGrp, int nData, void return rc; } +/* +** Add a single change to a changeset-group. +*/ +SQLITE_API int sqlite3changegroup_add_change( + sqlite3_changegroup *pGrp, + sqlite3_changeset_iter *pIter +){ + if( pIter->in.iCurrent==pIter->in.iNext + || pIter->rc!=SQLITE_OK + || pIter->bInvert + ){ + /* Iterator does not point to any valid entry or is an INVERT iterator. */ + return SQLITE_ERROR; + } + return sessionOneChangeToHash(pGrp, pIter, 0); +} + /* ** Obtain a buffer containing a changeset representing the concatenation ** of all changesets added to the group so far. @@ -229299,6 +230998,7 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup *pGrp){ if( pGrp ){ sqlite3_free(pGrp->zDb); sessionDeleteTable(0, pGrp->pList); + sqlite3_free(pGrp->rec.aBuf); sqlite3_free(pGrp); } } @@ -229700,6 +231400,7 @@ SQLITE_API int sqlite3rebaser_rebase_strm( SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p){ if( p ){ sessionDeleteTable(0, p->grp.pList); + sqlite3_free(p->grp.rec.aBuf); sqlite3_free(p); } } @@ -229797,8 +231498,8 @@ struct Fts5PhraseIter { ** EXTENSION API FUNCTIONS ** ** xUserData(pFts): -** Return a copy of the context pointer the extension function was -** registered with. +** Return a copy of the pUserData pointer passed to the xCreateFunction() +** API when the extension function was registered. ** ** xColumnTotalSize(pFts, iCol, pnToken): ** If parameter iCol is less than zero, set output variable *pnToken @@ -231394,6 +233095,9 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); ** sqlite3Fts5ParserARG_STORE Code to store %extra_argument into fts5yypParser ** sqlite3Fts5ParserARG_FETCH Code to extract %extra_argument from fts5yypParser ** sqlite3Fts5ParserCTX_* As sqlite3Fts5ParserARG_ except for %extra_context +** fts5YYREALLOC Name of the realloc() function to use +** fts5YYFREE Name of the free() function to use +** fts5YYDYNSTACK True if stack space should be extended on heap ** fts5YYERRORSYMBOL is the code number of the error symbol. If not ** defined, then do no error processing. ** fts5YYNSTATE the combined number of states. @@ -231407,6 +233111,8 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); ** fts5YY_NO_ACTION The fts5yy_action[] code for no-op ** fts5YY_MIN_REDUCE Minimum value for reduce actions ** fts5YY_MAX_REDUCE Maximum value for reduce actions +** fts5YY_MIN_DSTRCTR Minimum symbol value that has a destructor +** fts5YY_MAX_DSTRCTR Maximum symbol value that has a destructor */ #ifndef INTERFACE # define INTERFACE 1 @@ -231433,6 +233139,9 @@ typedef union { #define sqlite3Fts5ParserARG_PARAM ,pParse #define sqlite3Fts5ParserARG_FETCH Fts5Parse *pParse=fts5yypParser->pParse; #define sqlite3Fts5ParserARG_STORE fts5yypParser->pParse=pParse; +#define fts5YYREALLOC realloc +#define fts5YYFREE free +#define fts5YYDYNSTACK 0 #define sqlite3Fts5ParserCTX_SDECL #define sqlite3Fts5ParserCTX_PDECL #define sqlite3Fts5ParserCTX_PARAM @@ -231450,6 +233159,8 @@ typedef union { #define fts5YY_NO_ACTION 82 #define fts5YY_MIN_REDUCE 83 #define fts5YY_MAX_REDUCE 110 +#define fts5YY_MIN_DSTRCTR 16 +#define fts5YY_MAX_DSTRCTR 24 /************* End control #defines *******************************************/ #define fts5YY_NLOOKAHEAD ((int)(sizeof(fts5yy_lookahead)/sizeof(fts5yy_lookahead[0]))) @@ -231465,6 +233176,22 @@ typedef union { # define fts5yytestcase(X) #endif +/* Macro to determine if stack space has the ability to grow using +** heap memory. +*/ +#if fts5YYSTACKDEPTH<=0 || fts5YYDYNSTACK +# define fts5YYGROWABLESTACK 1 +#else +# define fts5YYGROWABLESTACK 0 +#endif + +/* Guarantee a minimum number of initial stack slots. +*/ +#if fts5YYSTACKDEPTH<=0 +# undef fts5YYSTACKDEPTH +# define fts5YYSTACKDEPTH 2 /* Need a minimum stack size */ +#endif + /* Next are the tables used to determine what action to take based on the ** current state and lookahead token. These tables are used to implement @@ -231625,14 +233352,9 @@ struct fts5yyParser { #endif sqlite3Fts5ParserARG_SDECL /* A place to hold %extra_argument */ sqlite3Fts5ParserCTX_SDECL /* A place to hold %extra_context */ -#if fts5YYSTACKDEPTH<=0 - int fts5yystksz; /* Current side of the stack */ - fts5yyStackEntry *fts5yystack; /* The parser's stack */ - fts5yyStackEntry fts5yystk0; /* First stack entry */ -#else - fts5yyStackEntry fts5yystack[fts5YYSTACKDEPTH]; /* The parser's stack */ - fts5yyStackEntry *fts5yystackEnd; /* Last entry in the stack */ -#endif + fts5yyStackEntry *fts5yystackEnd; /* Last entry in the stack */ + fts5yyStackEntry *fts5yystack; /* The parser stack */ + fts5yyStackEntry fts5yystk0[fts5YYSTACKDEPTH]; /* Initial stack space */ }; typedef struct fts5yyParser fts5yyParser; @@ -231739,37 +233461,45 @@ static const char *const fts5yyRuleName[] = { #endif /* NDEBUG */ -#if fts5YYSTACKDEPTH<=0 +#if fts5YYGROWABLESTACK /* ** Try to increase the size of the parser stack. Return the number ** of errors. Return 0 on success. */ static int fts5yyGrowStack(fts5yyParser *p){ + int oldSize = 1 + (int)(p->fts5yystackEnd - p->fts5yystack); int newSize; int idx; fts5yyStackEntry *pNew; - newSize = p->fts5yystksz*2 + 100; - idx = p->fts5yytos ? (int)(p->fts5yytos - p->fts5yystack) : 0; - if( p->fts5yystack==&p->fts5yystk0 ){ - pNew = malloc(newSize*sizeof(pNew[0])); - if( pNew ) pNew[0] = p->fts5yystk0; + newSize = oldSize*2 + 100; + idx = (int)(p->fts5yytos - p->fts5yystack); + if( p->fts5yystack==p->fts5yystk0 ){ + pNew = fts5YYREALLOC(0, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; + memcpy(pNew, p->fts5yystack, oldSize*sizeof(pNew[0])); }else{ - pNew = realloc(p->fts5yystack, newSize*sizeof(pNew[0])); + pNew = fts5YYREALLOC(p->fts5yystack, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; } - if( pNew ){ - p->fts5yystack = pNew; - p->fts5yytos = &p->fts5yystack[idx]; + p->fts5yystack = pNew; + p->fts5yytos = &p->fts5yystack[idx]; #ifndef NDEBUG - if( fts5yyTraceFILE ){ - fprintf(fts5yyTraceFILE,"%sStack grows from %d to %d entries.\n", - fts5yyTracePrompt, p->fts5yystksz, newSize); - } -#endif - p->fts5yystksz = newSize; + if( fts5yyTraceFILE ){ + fprintf(fts5yyTraceFILE,"%sStack grows from %d to %d entries.\n", + fts5yyTracePrompt, oldSize, newSize); } - return pNew==0; +#endif + p->fts5yystackEnd = &p->fts5yystack[newSize-1]; + return 0; } +#endif /* fts5YYGROWABLESTACK */ + +#if !fts5YYGROWABLESTACK +/* For builds that do no have a growable stack, fts5yyGrowStack always +** returns an error. +*/ +# define fts5yyGrowStack(X) 1 #endif /* Datatype of the argument to the memory allocated passed as the @@ -231789,24 +233519,14 @@ static void sqlite3Fts5ParserInit(void *fts5yypRawParser sqlite3Fts5ParserCTX_PD #ifdef fts5YYTRACKMAXSTACKDEPTH fts5yypParser->fts5yyhwm = 0; #endif -#if fts5YYSTACKDEPTH<=0 - fts5yypParser->fts5yytos = NULL; - fts5yypParser->fts5yystack = NULL; - fts5yypParser->fts5yystksz = 0; - if( fts5yyGrowStack(fts5yypParser) ){ - fts5yypParser->fts5yystack = &fts5yypParser->fts5yystk0; - fts5yypParser->fts5yystksz = 1; - } -#endif + fts5yypParser->fts5yystack = fts5yypParser->fts5yystk0; + fts5yypParser->fts5yystackEnd = &fts5yypParser->fts5yystack[fts5YYSTACKDEPTH-1]; #ifndef fts5YYNOERRORRECOVERY fts5yypParser->fts5yyerrcnt = -1; #endif fts5yypParser->fts5yytos = fts5yypParser->fts5yystack; fts5yypParser->fts5yystack[0].stateno = 0; fts5yypParser->fts5yystack[0].major = 0; -#if fts5YYSTACKDEPTH>0 - fts5yypParser->fts5yystackEnd = &fts5yypParser->fts5yystack[fts5YYSTACKDEPTH-1]; -#endif } #ifndef sqlite3Fts5Parser_ENGINEALWAYSONSTACK @@ -231920,9 +233640,26 @@ static void fts5yy_pop_parser_stack(fts5yyParser *pParser){ */ static void sqlite3Fts5ParserFinalize(void *p){ fts5yyParser *pParser = (fts5yyParser*)p; - while( pParser->fts5yytos>pParser->fts5yystack ) fts5yy_pop_parser_stack(pParser); -#if fts5YYSTACKDEPTH<=0 - if( pParser->fts5yystack!=&pParser->fts5yystk0 ) free(pParser->fts5yystack); + + /* In-lined version of calling fts5yy_pop_parser_stack() for each + ** element left in the stack */ + fts5yyStackEntry *fts5yytos = pParser->fts5yytos; + while( fts5yytos>pParser->fts5yystack ){ +#ifndef NDEBUG + if( fts5yyTraceFILE ){ + fprintf(fts5yyTraceFILE,"%sPopping %s\n", + fts5yyTracePrompt, + fts5yyTokenName[fts5yytos->major]); + } +#endif + if( fts5yytos->major>=fts5YY_MIN_DSTRCTR ){ + fts5yy_destructor(pParser, fts5yytos->major, &fts5yytos->minor); + } + fts5yytos--; + } + +#if fts5YYGROWABLESTACK + if( pParser->fts5yystack!=pParser->fts5yystk0 ) fts5YYFREE(pParser->fts5yystack); #endif } @@ -232149,25 +233886,19 @@ static void fts5yy_shift( assert( fts5yypParser->fts5yyhwm == (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack) ); } #endif -#if fts5YYSTACKDEPTH>0 - if( fts5yypParser->fts5yytos>fts5yypParser->fts5yystackEnd ){ - fts5yypParser->fts5yytos--; - fts5yyStackOverflow(fts5yypParser); - return; - } -#else - if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz] ){ + fts5yytos = fts5yypParser->fts5yytos; + if( fts5yytos>fts5yypParser->fts5yystackEnd ){ if( fts5yyGrowStack(fts5yypParser) ){ fts5yypParser->fts5yytos--; fts5yyStackOverflow(fts5yypParser); return; } + fts5yytos = fts5yypParser->fts5yytos; + assert( fts5yytos <= fts5yypParser->fts5yystackEnd ); } -#endif if( fts5yyNewState > fts5YY_MAX_SHIFT ){ fts5yyNewState += fts5YY_MIN_REDUCE - fts5YY_MIN_SHIFTREDUCE; } - fts5yytos = fts5yypParser->fts5yytos; fts5yytos->stateno = fts5yyNewState; fts5yytos->major = fts5yyMajor; fts5yytos->minor.fts5yy0 = fts5yyMinor; @@ -232604,19 +234335,12 @@ static void sqlite3Fts5Parser( (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)); } #endif -#if fts5YYSTACKDEPTH>0 if( fts5yypParser->fts5yytos>=fts5yypParser->fts5yystackEnd ){ - fts5yyStackOverflow(fts5yypParser); - break; - } -#else - if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz-1] ){ if( fts5yyGrowStack(fts5yypParser) ){ fts5yyStackOverflow(fts5yypParser); break; } } -#endif } fts5yyact = fts5yy_reduce(fts5yypParser,fts5yyruleno,fts5yymajor,fts5yyminor sqlite3Fts5ParserCTX_PARAM); }else if( fts5yyact <= fts5YY_MAX_SHIFTREDUCE ){ @@ -235293,7 +237017,11 @@ static int sqlite3Fts5ExprNew( } sqlite3_free(sParse.apPhrase); - *pzErr = sParse.zErr; + if( 0==*pzErr ){ + *pzErr = sParse.zErr; + }else{ + sqlite3_free(sParse.zErr); + } return sParse.rc; } @@ -237421,6 +239149,7 @@ static Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( assert( pRight->eType==FTS5_STRING || pRight->eType==FTS5_TERM || pRight->eType==FTS5_EOF + || (pRight->eType==FTS5_AND && pParse->bPhraseToAnd) ); if( pLeft->eType==FTS5_AND ){ @@ -249588,6 +251317,7 @@ static int fts5UpdateMethod( rc = SQLITE_ERROR; }else{ rc = fts5SpecialDelete(pTab, apVal); + bUpdateOrDelete = 1; } }else{ rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]); @@ -250762,14 +252492,16 @@ static int sqlite3Fts5GetTokenizer( if( pMod==0 ){ assert( nArg>0 ); rc = SQLITE_ERROR; - *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]); + if( pzErr ) *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]); }else{ rc = pMod->x.xCreate( pMod->pUserData, (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->pTok ); pConfig->pTokApi = &pMod->x; if( rc!=SQLITE_OK ){ - if( pzErr ) *pzErr = sqlite3_mprintf("error in tokenizer constructor"); + if( pzErr && rc!=SQLITE_NOMEM ){ + *pzErr = sqlite3_mprintf("error in tokenizer constructor"); + } }else{ pConfig->ePattern = sqlite3Fts5TokenizerPattern( pMod->x.xCreate, pConfig->pTok @@ -250828,7 +252560,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2024-04-15 13:34:05 8653b758870e6ef0c98d46b3ace27849054af85da891eb121e9aaa537f1e8355", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33", -1, SQLITE_TRANSIENT); } /* @@ -250863,18 +252595,25 @@ static int fts5IntegrityMethod( assert( pzErr!=0 && *pzErr==0 ); UNUSED_PARAM(isQuick); + assert( pTab->p.pConfig->pzErrmsg==0 ); + pTab->p.pConfig->pzErrmsg = pzErr; rc = sqlite3Fts5StorageIntegrity(pTab->pStorage, 0); - if( (rc&0xff)==SQLITE_CORRUPT ){ - *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s", - zSchema, zTabname); - }else if( rc!=SQLITE_OK ){ - *pzErr = sqlite3_mprintf("unable to validate the inverted index for" - " FTS5 table %s.%s: %s", - zSchema, zTabname, sqlite3_errstr(rc)); + if( *pzErr==0 && rc!=SQLITE_OK ){ + if( (rc&0xff)==SQLITE_CORRUPT ){ + *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s", + zSchema, zTabname); + rc = (*pzErr) ? SQLITE_OK : SQLITE_NOMEM; + }else{ + *pzErr = sqlite3_mprintf("unable to validate the inverted index for" + " FTS5 table %s.%s: %s", + zSchema, zTabname, sqlite3_errstr(rc)); + } } + sqlite3Fts5IndexCloseReader(pTab->p.pIndex); + pTab->p.pConfig->pzErrmsg = 0; - return SQLITE_OK; + return rc; } static int fts5Init(sqlite3 *db){ @@ -252306,7 +254045,7 @@ static int fts5AsciiCreate( int i; memset(p, 0, sizeof(AsciiTokenizer)); memcpy(p->aTokenChar, aAsciiTokenChar, sizeof(aAsciiTokenChar)); - for(i=0; rc==SQLITE_OK && ibFold = 1; pNew->iFoldParam = 0; - for(i=0; rc==SQLITE_OK && iiFoldParam!=0 && pNew->bFold==0 ){ rc = SQLITE_ERROR; diff --git a/src/database/sqlite/sqlite3.h b/src/database/sqlite/sqlite3.h index 2618b37a7..f64ca0172 100644 --- a/src/database/sqlite/sqlite3.h +++ b/src/database/sqlite/sqlite3.h @@ -146,9 +146,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.45.3" -#define SQLITE_VERSION_NUMBER 3045003 -#define SQLITE_SOURCE_ID "2024-04-15 13:34:05 8653b758870e6ef0c98d46b3ace27849054af85da891eb121e9aaa537f1e8355" +#define SQLITE_VERSION "3.46.1" +#define SQLITE_VERSION_NUMBER 3046001 +#define SQLITE_SOURCE_ID "2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -764,11 +764,11 @@ struct sqlite3_file { ** ** xLock() upgrades the database file lock. In other words, xLock() moves the ** database file lock in the direction NONE toward EXCLUSIVE. The argument to -** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never +** xLock() is always one of SHARED, RESERVED, PENDING, or EXCLUSIVE, never ** SQLITE_LOCK_NONE. If the database file lock is already at or above the ** requested lock, then the call to xLock() is a no-op. ** xUnlock() downgrades the database file lock to either SHARED or NONE. -* If the lock is already at or below the requested lock state, then the call +** If the lock is already at or below the requested lock state, then the call ** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, @@ -3305,8 +3305,8 @@ SQLITE_API int sqlite3_set_authorizer( #define SQLITE_RECURSIVE 33 /* NULL NULL */ /* -** CAPI3REF: Tracing And Profiling Functions -** METHOD: sqlite3 +** CAPI3REF: Deprecated Tracing And Profiling Functions +** DEPRECATED ** ** These routines are deprecated. Use the [sqlite3_trace_v2()] interface ** instead of the routines described here. @@ -6887,6 +6887,12 @@ SQLITE_API int sqlite3_autovacuum_pages( ** The exceptions defined in this paragraph might change in a future ** release of SQLite. ** +** Whether the update hook is invoked before or after the +** corresponding change is currently unspecified and may differ +** depending on the type of change. Do not rely on the order of the +** hook call with regards to the final result of the operation which +** triggers the hook. +** ** The update hook implementation must not do anything that will modify ** the database connection that invoked the update hook. Any actions ** to modify the database connection must be deferred until after the @@ -8357,7 +8363,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); ** The sqlite3_keyword_count() interface returns the number of distinct ** keywords understood by SQLite. ** -** The sqlite3_keyword_name(N,Z,L) interface finds the N-th keyword and +** The sqlite3_keyword_name(N,Z,L) interface finds the 0-based N-th keyword and ** makes *Z point to that keyword expressed as UTF8 and writes the number ** of bytes in the keyword into *L. The string that *Z points to is not ** zero-terminated. The sqlite3_keyword_name(N,Z,L) routine returns @@ -9936,24 +9942,45 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); **

  • ** ^(If the sqlite3_vtab_distinct() interface returns 2, that means ** that the query planner does not need the rows returned in any particular -** order, as long as rows with the same values in all "aOrderBy" columns -** are adjacent.)^ ^(Furthermore, only a single row for each particular -** combination of values in the columns identified by the "aOrderBy" field -** needs to be returned.)^ ^It is always ok for two or more rows with the same -** values in all "aOrderBy" columns to be returned, as long as all such rows -** are adjacent. ^The virtual table may, if it chooses, omit extra rows -** that have the same value for all columns identified by "aOrderBy". -** ^However omitting the extra rows is optional. +** order, as long as rows with the same values in all columns identified +** by "aOrderBy" are adjacent.)^ ^(Furthermore, when two or more rows +** contain the same values for all columns identified by "colUsed", all but +** one such row may optionally be omitted from the result.)^ +** The virtual table is not required to omit rows that are duplicates +** over the "colUsed" columns, but if the virtual table can do that without +** too much extra effort, it could potentially help the query to run faster. ** This mode is used for a DISTINCT query. **

  • -** ^(If the sqlite3_vtab_distinct() interface returns 3, that means -** that the query planner needs only distinct rows but it does need the -** rows to be sorted.)^ ^The virtual table implementation is free to omit -** rows that are identical in all aOrderBy columns, if it wants to, but -** it is not required to omit any rows. This mode is used for queries +** ^(If the sqlite3_vtab_distinct() interface returns 3, that means the +** virtual table must return rows in the order defined by "aOrderBy" as +** if the sqlite3_vtab_distinct() interface had returned 0. However if +** two or more rows in the result have the same values for all columns +** identified by "colUsed", then all but one such row may optionally be +** omitted.)^ Like when the return value is 2, the virtual table +** is not required to omit rows that are duplicates over the "colUsed" +** columns, but if the virtual table can do that without +** too much extra effort, it could potentially help the query to run faster. +** This mode is used for queries ** that have both DISTINCT and ORDER BY clauses. ** ** +**

    The following table summarizes the conditions under which the +** virtual table is allowed to set the "orderByConsumed" flag based on +** the value returned by sqlite3_vtab_distinct(). This table is a +** restatement of the previous four paragraphs: +** +** +** +**
    sqlite3_vtab_distinct() return value +** Rows are returned in aOrderBy order +** Rows with the same value in all aOrderBy columns are adjacent +** Duplicates over all colUsed columns may be omitted +**
    0yesyesno +**
    1noyesno +**
    2noyesyes +**
    3yesyesyes +**
    +** ** ^For the purposes of comparing virtual table output values to see if the ** values are same value for sorting purposes, two NULL values are considered ** to be the same. In other words, the comparison operator is "IS" @@ -11998,6 +12025,30 @@ SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const c */ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); +/* +** CAPI3REF: Add A Single Change To A Changegroup +** METHOD: sqlite3_changegroup +** +** This function adds the single change currently indicated by the iterator +** passed as the second argument to the changegroup object. The rules for +** adding the change are just as described for [sqlite3changegroup_add()]. +** +** If the change is successfully added to the changegroup, SQLITE_OK is +** returned. Otherwise, an SQLite error code is returned. +** +** The iterator must point to a valid entry when this function is called. +** If it does not, SQLITE_ERROR is returned and no change is added to the +** changegroup. Additionally, the iterator must not have been opened with +** the SQLITE_CHANGESETAPPLY_INVERT flag. In this case SQLITE_ERROR is also +** returned. +*/ +SQLITE_API int sqlite3changegroup_add_change( + sqlite3_changegroup*, + sqlite3_changeset_iter* +); + + + /* ** CAPI3REF: Obtain A Composite Changeset From A Changegroup ** METHOD: sqlite3_changegroup @@ -12802,8 +12853,8 @@ struct Fts5PhraseIter { ** EXTENSION API FUNCTIONS ** ** xUserData(pFts): -** Return a copy of the context pointer the extension function was -** registered with. +** Return a copy of the pUserData pointer passed to the xCreateFunction() +** API when the extension function was registered. ** ** xColumnTotalSize(pFts, iCol, pnToken): ** If parameter iCol is less than zero, set output variable *pnToken diff --git a/src/database/sqlite/sqlite_aclk.c b/src/database/sqlite/sqlite_aclk.c index adbe4d9d3..b3f926e2f 100644 --- a/src/database/sqlite/sqlite_aclk.c +++ b/src/database/sqlite/sqlite_aclk.c @@ -3,7 +3,14 @@ #include "sqlite_functions.h" #include "sqlite_aclk.h" +void sanity_check(void) { + // make sure the compiler will stop on misconfigurations + BUILD_BUG_ON(WORKER_UTILIZATION_MAX_JOB_TYPES < ACLK_MAX_ENUMERATIONS_DEFINED); +} + #include "sqlite_aclk_node.h" +#include "../aclk_query_queue.h" +#include "../aclk_query.h" struct aclk_sync_config_s { uv_thread_t thread; @@ -11,16 +18,12 @@ struct aclk_sync_config_s { uv_timer_t timer_req; uv_async_t async; bool initialized; + mqtt_wss_client client; + int aclk_queries_running; SPINLOCK cmd_queue_lock; struct aclk_database_cmd *cmd_base; } aclk_sync_config = { 0 }; -void sanity_check(void) { - // make sure the compiler will stop on misconfigurations - BUILD_BUG_ON(WORKER_UTILIZATION_MAX_JOB_TYPES < ACLK_MAX_ENUMERATIONS_DEFINED); -} - -#ifdef ENABLE_ACLK static struct aclk_database_cmd aclk_database_deq_cmd(void) { struct aclk_database_cmd ret = { 0 }; @@ -39,7 +42,6 @@ static struct aclk_database_cmd aclk_database_deq_cmd(void) return ret; } -#endif static void aclk_database_enq_cmd(struct aclk_database_cmd *cmd) { @@ -165,14 +167,14 @@ static int create_host_callback(void *data, int argc, char **argv, char **column #ifdef NETDATA_INTERNAL_CHECKS char node_str[UUID_STR_LEN] = ""; - if (likely(host->node_id)) - uuid_unparse_lower(*host->node_id, node_str); - internal_error(true, "Adding archived host \"%s\" with GUID \"%s\" node id = \"%s\" ephemeral=%d", rrdhost_hostname(host), host->machine_guid, node_str, is_ephemeral); + if (likely(!UUIDiszero(host->node_id))) + uuid_unparse_lower(host->node_id.uuid, node_str); + internal_error(true, "Adding archived host \"%s\" with GUID \"%s\" node id = \"%s\" ephemeral=%d", + rrdhost_hostname(host), host->machine_guid, node_str, is_ephemeral); #endif return 0; } -#ifdef ENABLE_ACLK #define SQL_SELECT_ACLK_ALERT_TABLES \ "SELECT 'DROP '||type||' IF EXISTS '||name||';' FROM sqlite_schema WHERE name LIKE 'aclk_alert_%' AND type IN ('table', 'trigger', 'index')" @@ -204,8 +206,6 @@ fail: static void invalidate_host_last_connected(nd_uuid_t *host_uuid) { sqlite3_stmt *res = NULL; - if (!host_uuid) - return; if (!PREPARE_STATEMENT(db_meta, SQL_INVALIDATE_HOST_LAST_CONNECTED, &res)) return; @@ -291,13 +291,92 @@ static void timer_cb(uv_timer_t *handle) uv_update_time(handle->loop); struct aclk_database_cmd cmd = { 0 }; - if (aclk_connected) { + if (aclk_online_for_alerts()) { cmd.opcode = ACLK_DATABASE_PUSH_ALERT; aclk_database_enq_cmd(&cmd); aclk_check_node_info_and_collectors(); } } +struct aclk_query_payload { + uv_work_t request; + void *data; + struct aclk_sync_config_s *config; +}; + +static void after_aclk_run_query_job(uv_work_t *req, int status __maybe_unused) +{ + worker_is_busy(ACLK_QUERY_EXECUTE); + struct aclk_query_payload *payload = req->data; + struct aclk_sync_config_s *config = payload->config; + config->aclk_queries_running--; + freez(payload); +} + +static void aclk_run_query(struct aclk_sync_config_s *config, aclk_query_t query) +{ + if (query->type == UNKNOWN || query->type >= ACLK_QUERY_TYPE_COUNT) { + error_report("Unknown query in query queue. %u", query->type); + return; + } + + if (query->type == HTTP_API_V2) { + http_api_v2(config->client, query); + } else { + send_bin_msg(config->client, query); + } + aclk_query_free(query); +} + +static void aclk_run_query_job(uv_work_t *req) +{ + struct aclk_query_payload *payload = req->data; + struct aclk_sync_config_s *config = payload->config; + aclk_query_t query = (aclk_query_t) payload->data; + + aclk_run_query(config, query); +} + +static int read_query_thread_count() +{ + int threads = MIN(get_netdata_cpus()/2, 6); + threads = MAX(threads, 2); + threads = config_get_number(CONFIG_SECTION_CLOUD, "query thread count", threads); + if(threads < 1) { + netdata_log_error("You need at least one query thread. Overriding configured setting of \"%d\"", threads); + threads = 1; + config_set_number(CONFIG_SECTION_CLOUD, "query thread count", threads); + } + else { + if (threads > libuv_worker_threads / 2) { + threads = MAX(libuv_worker_threads / 2, 2); + config_set_number(CONFIG_SECTION_CLOUD, "query thread count", threads); + } + } + return threads; +} + +static void node_update_timer_cb(uv_timer_t *handle) +{ + struct aclk_sync_cfg_t *ahc = handle->data; + RRDHOST *host = ahc->host; + + spinlock_lock(&host->receiver_lock); + int live = (host == localhost || host->receiver || !(rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) ? 1 : 0; + spinlock_unlock(&host->receiver_lock); + nd_log(NDLS_ACLK, NDLP_DEBUG,"Timer: Sending node update info for %s, LIVE = %d", rrdhost_hostname(host), live); + aclk_host_state_update(host, live, 1); +} + +static void close_callback(uv_handle_t *handle, void *data __maybe_unused) +{ + if (handle->type == UV_TIMER) { + uv_timer_stop((uv_timer_t *)handle); + } + + uv_close(handle, NULL); // Automatically close and free the handle +} + static void aclk_synchronization(void *arg) { struct aclk_sync_config_s *config = arg; @@ -309,6 +388,8 @@ static void aclk_synchronization(void *arg) worker_register_job_name(ACLK_DATABASE_NODE_STATE, "node state"); worker_register_job_name(ACLK_DATABASE_PUSH_ALERT, "alert push"); worker_register_job_name(ACLK_DATABASE_PUSH_ALERT_CONFIG, "alert conf push"); + worker_register_job_name(ACLK_QUERY_EXECUTE, "query execute"); + worker_register_job_name(ACLK_QUERY_EXECUTE_SYNC, "query execute sync"); worker_register_job_name(ACLK_DATABASE_TIMER, "timer"); uv_loop_t *loop = &config->loop; @@ -325,7 +406,10 @@ static void aclk_synchronization(void *arg) sql_delete_aclk_table_list(); - while (likely(service_running(SERVICE_ACLKSYNC))) { + int query_thread_count = read_query_thread_count(); + netdata_log_info("Starting ACLK synchronization thread with %d parallel query threads", query_thread_count); + + while (likely(service_running(SERVICE_ACLK))) { enum aclk_database_opcode opcode; worker_is_idle(); uv_run(loop, UV_RUN_DEFAULT); @@ -334,27 +418,54 @@ static void aclk_synchronization(void *arg) do { struct aclk_database_cmd cmd = aclk_database_deq_cmd(); - if (unlikely(!service_running(SERVICE_ACLKSYNC))) + if (unlikely(!service_running(SERVICE_ACLK))) break; opcode = cmd.opcode; - if(likely(opcode != ACLK_DATABASE_NOOP)) + if(likely(opcode != ACLK_DATABASE_NOOP && opcode != ACLK_QUERY_EXECUTE)) worker_is_busy(opcode); switch (opcode) { - default: case ACLK_DATABASE_NOOP: /* the command queue was empty, do nothing */ break; // NODE STATE case ACLK_DATABASE_NODE_STATE:; RRDHOST *host = cmd.param[0]; - int live = (host == localhost || host->receiver || !(rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) ? 1 : 0; struct aclk_sync_cfg_t *ahc = host->aclk_config; - if (unlikely(!ahc)) - create_aclk_config(host, &host->host_uuid, host->node_id); + if (unlikely(!ahc)) { + create_aclk_config(host, &host->host_id.uuid, &host->node_id.uuid); + ahc = host->aclk_config; + } + + if (ahc) { + uint64_t schedule_time = (uint64_t)(uintptr_t)cmd.param[1]; + if (!ahc->timer_initialized) { + int rc = uv_timer_init(loop, &ahc->timer); + if (!rc) { + ahc->timer_initialized = true; + ahc->timer.data = ahc; + } + } + + if (ahc->timer_initialized) { + if (uv_is_active((uv_handle_t *)&ahc->timer)) + uv_timer_stop(&ahc->timer); + + ahc->timer.data = ahc; + int rc = uv_timer_start(&ahc->timer, node_update_timer_cb, schedule_time, 0); + if (!rc) + break; // Timer started, exit + } + } + + // This is fallback if timer fails + spinlock_lock(&host->receiver_lock); + int live = (host == localhost || host->receiver || !(rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) ? 1 : 0; + spinlock_unlock(&host->receiver_lock); aclk_host_state_update(host, live, 1); + nd_log(NDLS_ACLK, NDLP_DEBUG,"Sending node update info for %s, LIVE = %d", rrdhost_hostname(host), live); break; case ACLK_DATABASE_NODE_UNREGISTER: sql_unregister_node(cmd.param[0]); @@ -366,14 +477,49 @@ static void aclk_synchronization(void *arg) case ACLK_DATABASE_PUSH_ALERT: aclk_push_alert_events_for_all_hosts(); break; + + case ACLK_MQTT_WSS_CLIENT: + config->client = (mqtt_wss_client) cmd.param[0]; + break; + + case ACLK_QUERY_EXECUTE:; + aclk_query_t query = (aclk_query_t)cmd.param[0]; + + struct aclk_query_payload *payload = NULL; + config->aclk_queries_running++; + bool execute_now = (config->aclk_queries_running > query_thread_count); + if (!execute_now) { + payload = mallocz(sizeof(*payload)); + payload->request.data = payload; + payload->config = config; + payload->data = query; + execute_now = uv_queue_work(loop, &payload->request, aclk_run_query_job, after_aclk_run_query_job); + } + + if (execute_now) { + worker_is_busy(ACLK_QUERY_EXECUTE_SYNC); + aclk_run_query(config, query); + freez(payload); + config->aclk_queries_running--; + } + break; + + default: + break; } } while (opcode != ACLK_DATABASE_NOOP); } + config->initialized = false; if (!uv_timer_stop(&config->timer_req)) uv_close((uv_handle_t *)&config->timer_req, NULL); uv_close((uv_handle_t *)&config->async, NULL); + uv_run(loop, UV_RUN_DEFAULT); + + uv_walk(loop, (uv_walk_cb) close_callback, NULL); + uv_run(loop, UV_RUN_DEFAULT); + (void) uv_loop_close(loop); worker_unregister(); @@ -386,13 +532,11 @@ static void aclk_synchronization_init(void) memset(&aclk_sync_config, 0, sizeof(aclk_sync_config)); fatal_assert(0 == uv_thread_create(&aclk_sync_config.thread, aclk_synchronization, &aclk_sync_config)); } -#endif // ------------------------------------------------------------- void create_aclk_config(RRDHOST *host __maybe_unused, nd_uuid_t *host_uuid __maybe_unused, nd_uuid_t *node_id __maybe_unused) { -#ifdef ENABLE_ACLK if (!host || host->aclk_config) return; @@ -402,16 +546,14 @@ void create_aclk_config(RRDHOST *host __maybe_unused, nd_uuid_t *host_uuid __may uuid_unparse_lower(*node_id, wc->node_id); host->aclk_config = wc; - if (node_id && !host->node_id) { - host->node_id = mallocz(sizeof(*host->node_id)); - uuid_copy(*host->node_id, *node_id); + if (node_id && UUIDiszero(host->node_id)) { + uuid_copy(host->node_id.uuid, *node_id); } wc->host = host; wc->stream_alerts = false; time_t now = now_realtime_sec(); wc->node_info_send_time = (host == localhost || NULL == localhost) ? now - 25 : now; -#endif } #define SQL_FETCH_ALL_HOSTS \ @@ -447,7 +589,6 @@ void sql_aclk_sync_init(void) // Trigger host context load for hosts that have been created metadata_queue_load_host_context(NULL); -#ifdef ENABLE_ACLK if (!number_of_children) aclk_queue_node_info(localhost, true); @@ -460,7 +601,6 @@ void sql_aclk_sync_init(void) aclk_synchronization_init(); netdata_log_info("ACLK sync initialization completed"); -#endif } static inline void queue_aclk_sync_cmd(enum aclk_database_opcode opcode, const void *param0, const void *param1) @@ -481,20 +621,30 @@ void aclk_push_alert_config(const char *node_id, const char *config_hash) queue_aclk_sync_cmd(ACLK_DATABASE_PUSH_ALERT_CONFIG, strdupz(node_id), strdupz(config_hash)); } -void schedule_node_info_update(RRDHOST *host __maybe_unused) +void aclk_execute_query(aclk_query_t query) { -#ifdef ENABLE_ACLK - if (unlikely(!host)) + if (unlikely(!aclk_sync_config.initialized)) return; - queue_aclk_sync_cmd(ACLK_DATABASE_NODE_STATE, host, NULL); -#endif + + queue_aclk_sync_cmd(ACLK_QUERY_EXECUTE, query, NULL); +} + +void aclk_query_init(mqtt_wss_client client) { + + queue_aclk_sync_cmd(ACLK_MQTT_WSS_CLIENT, client, NULL); +} + +void schedule_node_state_update(RRDHOST *host, uint64_t delay) +{ + if (unlikely(!aclk_sync_config.initialized || !host)) + return; + + queue_aclk_sync_cmd(ACLK_DATABASE_NODE_STATE, host, (void *)(uintptr_t)delay); } -#ifdef ENABLE_ACLK void unregister_node(const char *machine_guid) { if (unlikely(!machine_guid)) return; queue_aclk_sync_cmd(ACLK_DATABASE_NODE_UNREGISTER, strdupz(machine_guid), NULL); } -#endif diff --git a/src/database/sqlite/sqlite_aclk.h b/src/database/sqlite/sqlite_aclk.h index ec8cfa9dd..90e980ad3 100644 --- a/src/database/sqlite/sqlite_aclk.h +++ b/src/database/sqlite/sqlite_aclk.h @@ -15,17 +15,15 @@ static inline int uuid_parse_fix(char *in, nd_uuid_t uuid) return uuid_parse(in, uuid); } -static inline int claimed() -{ - return localhost->aclk_state.claimed_id != NULL; -} - enum aclk_database_opcode { ACLK_DATABASE_NOOP = 0, ACLK_DATABASE_NODE_STATE, ACLK_DATABASE_PUSH_ALERT, ACLK_DATABASE_PUSH_ALERT_CONFIG, ACLK_DATABASE_NODE_UNREGISTER, + ACLK_MQTT_WSS_CLIENT, + ACLK_QUERY_EXECUTE, + ACLK_QUERY_EXECUTE_SYNC, ACLK_DATABASE_TIMER, // leave this last @@ -41,6 +39,8 @@ struct aclk_database_cmd { typedef struct aclk_sync_cfg_t { RRDHOST *host; + uv_timer_t timer; + bool timer_initialized; int8_t send_snapshot; bool stream_alerts; int alert_count; @@ -55,9 +55,7 @@ typedef struct aclk_sync_cfg_t { void create_aclk_config(RRDHOST *host, nd_uuid_t *host_uuid, nd_uuid_t *node_id); void sql_aclk_sync_init(void); void aclk_push_alert_config(const char *node_id, const char *config_hash); -void schedule_node_info_update(RRDHOST *host); -#ifdef ENABLE_ACLK +void schedule_node_state_update(RRDHOST *host, uint64_t delay); void unregister_node(const char *machine_guid); -#endif #endif //NETDATA_SQLITE_ACLK_H diff --git a/src/database/sqlite/sqlite_aclk_alert.c b/src/database/sqlite/sqlite_aclk_alert.c index dbe5a5045..4aa640484 100644 --- a/src/database/sqlite/sqlite_aclk_alert.c +++ b/src/database/sqlite/sqlite_aclk_alert.c @@ -3,7 +3,6 @@ #include "sqlite_functions.h" #include "sqlite_aclk_alert.h" -#ifdef ENABLE_ACLK #include "../../aclk/aclk_alarm_api.h" #define SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param) \ @@ -116,14 +115,14 @@ static int insert_alert_to_submit_queue(RRDHOST *host, int64_t health_log_id, ui return 1; } - if (is_event_from_alert_variable_config(unique_id, &host->host_uuid)) + if (is_event_from_alert_variable_config(unique_id, &host->host_id.uuid)) return 2; if (!PREPARE_COMPILED_STATEMENT(db_meta, SQL_QUEUE_ALERT_TO_CLOUD, &res)) return -1; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, health_log_id)); SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, (int64_t) unique_id)); @@ -152,7 +151,7 @@ static int delete_alert_from_submit_queue(RRDHOST *host, int64_t first_seq_id, i return -1; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, first_seq_id)); SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, last_seq_id)); @@ -266,7 +265,7 @@ static void commit_alert_events(RRDHOST *host) return; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); int64_t first_sequence_id = 0; int64_t last_sequence_id = 0; @@ -424,27 +423,25 @@ void health_alarm_log_populate( static void aclk_push_alert_event(RRDHOST *host __maybe_unused) { + CLAIM_ID claim_id = claim_id_get(); - char *claim_id = get_agent_claimid(); - if (!claim_id || !host->node_id) + if (!claim_id_is_set(claim_id) || UUIDiszero(host->node_id)) return; sqlite3_stmt *res = NULL; - if (!PREPARE_STATEMENT(db_meta, SQL_SELECT_ALERT_TO_PUSH, &res)) { - freez(claim_id); + if (!PREPARE_STATEMENT(db_meta, SQL_SELECT_ALERT_TO_PUSH, &res)) return; - } int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); char node_id_str[UUID_STR_LEN]; - uuid_unparse_lower(*host->node_id, node_id_str); + uuid_unparse_lower(host->node_id.uuid, node_id_str); struct alarm_log_entry alarm_log; alarm_log.node_id = node_id_str; - alarm_log.claim_id = claim_id; + alarm_log.claim_id = claim_id.str; int64_t first_id = 0; int64_t last_id = 0; @@ -484,8 +481,6 @@ static void aclk_push_alert_event(RRDHOST *host __maybe_unused) done: REPORT_BIND_FAIL(res, param); SQLITE_FINALIZE(res); - - freez(claim_id); } #define SQL_DELETE_PROCESSED_ROWS "DELETE FROM alert_queue WHERE host_id = @host_id AND rowid = @row" @@ -498,7 +493,7 @@ static void delete_alert_from_pending_queue(RRDHOST *host, int64_t row) return; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, row)); param = 0; @@ -528,7 +523,7 @@ void rebuild_host_alert_version_table(RRDHOST *host) return; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); param = 0; int rc = execute_insert(res); @@ -541,7 +536,7 @@ void rebuild_host_alert_version_table(RRDHOST *host) if (!PREPARE_STATEMENT(db_meta, SQL_REBUILD_HOST_ALERT_VERSION_TABLE, &res)) return; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); param = 0; rc = execute_insert(res); @@ -566,7 +561,7 @@ bool process_alert_pending_queue(RRDHOST *host) int param = 0; int added =0, count = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); param = 0; while (sqlite3_step_monitored(res) == SQLITE_ROW) { @@ -777,7 +772,7 @@ static uint64_t calculate_node_alert_version(RRDHOST *host) uint64_t version = 0; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); param = 0; while (sqlite3_step_monitored(res) == SQLITE_ROW) { @@ -792,6 +787,16 @@ done: static void schedule_alert_snapshot_if_needed(struct aclk_sync_cfg_t *wc, uint64_t cloud_version) { + if (cloud_version == 1) { + nd_log( + NDLS_ACCESS, + NDLP_NOTICE, + "Cloud requested to skip alert version verification for host \"%s\", node \"%s\"", + rrdhost_hostname(wc->host), + wc->node_id); + return; + } + uint64_t local_version = calculate_node_alert_version(wc->host); if (local_version != cloud_version) { nd_log( @@ -899,23 +904,21 @@ void send_alert_snapshot_to_cloud(RRDHOST *host __maybe_unused) return; } - char *claim_id = get_agent_claimid(); - if (unlikely(!claim_id)) + CLAIM_ID claim_id = claim_id_get(); + if (unlikely(!claim_id_is_set(claim_id))) return; - // Check database for this node to see how many alerts we will need to put in the snapshot - int cnt = calculate_alert_snapshot_entries(&host->host_uuid); - if (!cnt) { - freez(claim_id); + // Check the database for this node to see how many alerts we will need to put in the snapshot + int cnt = calculate_alert_snapshot_entries(&host->host_id.uuid); + if (!cnt) return; - } sqlite3_stmt *res = NULL; if (!PREPARE_STATEMENT(db_meta, SQL_GET_SNAPSHOT_ENTRIES, &res)) return; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); nd_uuid_t local_snapshot_uuid; char snapshot_uuid_str[UUID_STR_LEN]; @@ -935,13 +938,13 @@ void send_alert_snapshot_to_cloud(RRDHOST *host __maybe_unused) struct alarm_log_entry alarm_log; alarm_snap.node_id = wc->node_id; - alarm_snap.claim_id = claim_id; + alarm_snap.claim_id = claim_id.str; alarm_snap.snapshot_uuid = snapshot_uuid; alarm_snap.chunks = chunks; alarm_snap.chunk = 1; alarm_log.node_id = wc->node_id; - alarm_log.claim_id = claim_id; + alarm_log.claim_id = claim_id.str; cnt = 0; param = 0; @@ -960,7 +963,7 @@ void send_alert_snapshot_to_cloud(RRDHOST *host __maybe_unused) version += alarm_log.version; if (cnt == ALARM_EVENTS_PER_CHUNK) { - if (aclk_connected) + if (aclk_online_for_alerts()) aclk_send_alarm_snapshot(snapshot_proto); cnt = 0; if (alarm_snap.chunk < chunks) { @@ -986,8 +989,6 @@ void send_alert_snapshot_to_cloud(RRDHOST *host __maybe_unused) done: REPORT_BIND_FAIL(res, param); SQLITE_FINALIZE(res); - - freez(claim_id); } // Start streaming alerts @@ -1021,25 +1022,24 @@ void aclk_alert_version_check(char *node_id, char *claim_id, uint64_t cloud_vers { nd_uuid_t node_uuid; - if (unlikely(!node_id || !claim_id || !claimed() || uuid_parse(node_id, node_uuid))) + if (unlikely(!node_id || !claim_id || !is_agent_claimed() || uuid_parse(node_id, node_uuid))) return; - char *agent_claim_id = get_agent_claimid(); - if (claim_id && agent_claim_id && strcmp(agent_claim_id, claim_id) != 0) { - nd_log(NDLS_ACCESS, NDLP_NOTICE, "ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT VALIDATION REQUEST RECEIVED WITH INVALID CLAIM ID", node_id); - goto done; + CLAIM_ID agent_claim_id = claim_id_get(); + if (claim_id && claim_id_is_set(agent_claim_id) && strcmp(agent_claim_id.str, claim_id) != 0) { + nd_log(NDLS_ACCESS, NDLP_NOTICE, + "ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT VALIDATION REQUEST RECEIVED WITH INVALID CLAIM ID", + node_id); + return; } struct aclk_sync_cfg_t *wc; RRDHOST *host = find_host_by_node_id(node_id); if ((!host || !(wc = host->aclk_config))) - nd_log(NDLS_ACCESS, NDLP_NOTICE, "ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT VALIDATION REQUEST RECEIVED FOR INVALID NODE", node_id); + nd_log(NDLS_ACCESS, NDLP_NOTICE, + "ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT VALIDATION REQUEST RECEIVED FOR INVALID NODE", + node_id); else schedule_alert_snapshot_if_needed(wc, cloud_version); - -done: - freez(agent_claim_id); } - -#endif diff --git a/src/database/sqlite/sqlite_aclk_node.c b/src/database/sqlite/sqlite_aclk_node.c index 411b8bd70..f5816465e 100644 --- a/src/database/sqlite/sqlite_aclk_node.c +++ b/src/database/sqlite/sqlite_aclk_node.c @@ -6,8 +6,6 @@ #include "../../aclk/aclk_contexts_api.h" #include "../../aclk/aclk_capas.h" -#ifdef ENABLE_ACLK - DICTIONARY *collectors_from_charts(RRDHOST *host, DICTIONARY *dict) { RRDSET *st; char name[500]; @@ -32,16 +30,18 @@ static void build_node_collectors(RRDHOST *host) struct update_node_collectors upd_node_collectors; DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED); + CLAIM_ID claim_id = claim_id_get(); upd_node_collectors.node_id = wc->node_id; - upd_node_collectors.claim_id = get_agent_claimid(); + upd_node_collectors.claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL; upd_node_collectors.node_collectors = collectors_from_charts(host, dict); aclk_update_node_collectors(&upd_node_collectors); dictionary_destroy(dict); - freez(upd_node_collectors.claim_id); - nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK RES [%s (%s)]: NODE COLLECTORS SENT", wc->node_id, rrdhost_hostname(host)); + nd_log(NDLS_ACCESS, NDLP_DEBUG, + "ACLK RES [%s (%s)]: NODE COLLECTORS SENT", + wc->node_id, rrdhost_hostname(host)); } static void build_node_info(RRDHOST *host) @@ -50,9 +50,11 @@ static void build_node_info(RRDHOST *host) struct aclk_sync_cfg_t *wc = host->aclk_config; + CLAIM_ID claim_id = claim_id_get(); + rrd_rdlock(); node_info.node_id = wc->node_id; - node_info.claim_id = get_agent_claimid(); + node_info.claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL; node_info.machine_guid = host->machine_guid; node_info.child = (host != localhost); node_info.ml_info.ml_capable = ml_capable(); @@ -64,11 +66,11 @@ static void build_node_info(RRDHOST *host) char *host_version = NULL; if (host != localhost) { - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); host_version = strdupz( host->receiver && host->receiver->program_version ? host->receiver->program_version : rrdhost_program_version(host)); - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); } node_info.data.name = rrdhost_hostname(host); @@ -108,7 +110,6 @@ static void build_node_info(RRDHOST *host) host == localhost ? "parent" : "child"); rrd_rdunlock(); - freez(node_info.claim_id); freez(node_info.node_instance_capabilities); freez(host_version); @@ -133,7 +134,7 @@ void aclk_check_node_info_and_collectors(void) { RRDHOST *host; - if (unlikely(!aclk_connected)) + if (unlikely(!aclk_online_for_nodes())) return; size_t context_loading = 0; @@ -153,6 +154,9 @@ void aclk_check_node_info_and_collectors(void) continue; } + if (!wc->node_info_send_time && !wc->node_collectors_send) + continue; + if (unlikely(host_is_replicating(host))) { internal_error(true, "ACLK SYNC: Host %s is still replicating", rrdhost_hostname(host)); replicating++; @@ -167,12 +171,7 @@ void aclk_check_node_info_and_collectors(void) if (pp_queue_empty && wc->node_info_send_time && wc->node_info_send_time + 30 < now) { wc->node_info_send_time = 0; build_node_info(host); - if (netdata_cloud_enabled) { - netdata_mutex_lock(&host->receiver_lock); - int live = (host == localhost || host->receiver || !(rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) ? 1 : 0; - netdata_mutex_unlock(&host->receiver_lock); - aclk_host_state_update(host, live, 1); - } + schedule_node_state_update(host, 10000); internal_error(true, "ACLK SYNC: Sending node info for %s", rrdhost_hostname(host)); } @@ -196,5 +195,3 @@ void aclk_check_node_info_and_collectors(void) context_pp); } } - -#endif diff --git a/src/database/sqlite/sqlite_functions.c b/src/database/sqlite/sqlite_functions.c index e62743f59..f48401e33 100644 --- a/src/database/sqlite/sqlite_functions.c +++ b/src/database/sqlite/sqlite_functions.c @@ -140,6 +140,10 @@ int configure_sqlite_database(sqlite3 *database, int target_version, const char if (init_database_batch(database, list, description)) return 1; + snprintfz(buf, sizeof(buf) - 1, "PRAGMA optimize=0x10002"); + if (init_database_batch(database, list, description)) + return 1; + return 0; } @@ -248,14 +252,16 @@ int db_execute(sqlite3 *db, const char *cmd) int cnt = 0; while (cnt < SQL_MAX_RETRY) { - char *err_msg; + char *err_msg = NULL; rc = sqlite3_exec_monitored(db, cmd, 0, 0, &err_msg); if (likely(rc == SQLITE_OK)) break; ++cnt; - error_report("Failed to execute '%s', rc = %d (%s) -- attempt %d", cmd, rc, err_msg, cnt); - sqlite3_free(err_msg); + error_report("Failed to execute '%s', rc = %d (%s) -- attempt %d", cmd, rc, err_msg ? err_msg : "unknown", cnt); + if (err_msg) { + sqlite3_free(err_msg); + } if (likely(rc == SQLITE_BUSY || rc == SQLITE_LOCKED)) { usleep(SQLITE_INSERT_DELAY * USEC_PER_MS); @@ -338,7 +344,6 @@ void sql_close_database(sqlite3 *database, const char *database_name) if (unlikely(!database)) return; - (void) db_execute(database, "PRAGMA analysis_limit=10000"); (void) db_execute(database, "PRAGMA optimize"); netdata_log_info("%s: Closing sqlite database", database_name); diff --git a/src/database/sqlite/sqlite_health.c b/src/database/sqlite/sqlite_health.c index b3ad12857..44cd644d8 100644 --- a/src/database/sqlite/sqlite_health.c +++ b/src/database/sqlite/sqlite_health.c @@ -134,7 +134,6 @@ int calculate_delay(RRDCALC_STATUS old_status, RRDCALC_STATUS new_status) return delay; } -#ifdef ENABLE_ACLK #define SQL_INSERT_ALERT_PENDING_QUEUE \ "INSERT INTO alert_queue (host_id, health_log_id, unique_id, alarm_id, status, date_scheduled)" \ " VALUES (@host_id, @health_log_id, @unique_id, @alarm_id, @new_status, UNIXEPOCH() + @delay)" \ @@ -162,7 +161,7 @@ static void insert_alert_queue( int submit_delay = calculate_delay(old_status, new_status); int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, (sqlite3_int64)health_log_id)); SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, unique_id)); SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, alarm_id)); @@ -179,7 +178,6 @@ done: REPORT_BIND_FAIL(res, param); SQLITE_RESET(res); } -#endif #define SQL_INSERT_HEALTH_LOG_DETAIL \ "INSERT INTO health_log_detail (health_log_id, unique_id, alarm_id, alarm_event_id, " \ @@ -255,7 +253,7 @@ static void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) return; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, (sqlite3_int64) ae->alarm_id)); SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &ae->config_hash_id, sizeof(ae->config_hash_id), SQLITE_STATIC)); SQLITE_BIND_FAIL(done, SQLITE3_BIND_STRING_OR_NULL(res, ++param, ae->name)); @@ -272,11 +270,8 @@ static void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) if (rc == SQLITE_ROW) { health_log_id = (size_t)sqlite3_column_int64(res, 0); sql_health_alarm_log_insert_detail(host, health_log_id, ae); -#ifdef ENABLE_ACLK - if (netdata_cloud_enabled) - insert_alert_queue( - host, health_log_id, (int64_t)ae->unique_id, (int64_t)ae->alarm_id, ae->old_status, ae->new_status); -#endif + insert_alert_queue( + host, health_log_id, (int64_t)ae->unique_id, (int64_t)ae->alarm_id, ae->old_status, ae->new_status); } else error_report("HEALTH [%s]: Failed to execute SQL_INSERT_HEALTH_LOG, rc = %d", rrdhost_hostname(host), rc); @@ -315,8 +310,8 @@ void sql_health_alarm_log_cleanup(RRDHOST *host) return; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); - SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, (sqlite3_int64)host->health_log.health_log_history)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, (sqlite3_int64)host->health_log.health_log_retention_s)); param = 0; rc = sqlite3_step_monitored(res); @@ -344,7 +339,7 @@ bool sql_update_transition_in_health_log(RRDHOST *host, uint32_t alarm_id, nd_uu SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, transition_id, sizeof(*transition_id), SQLITE_STATIC)); SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, (sqlite3_int64)alarm_id)); SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, last_transition, sizeof(*last_transition), SQLITE_STATIC)); - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); param = 0; rc = execute_insert(res); @@ -432,14 +427,10 @@ static void sql_inject_removed_status( //update the old entry in health_log sql_update_transition_in_health_log(host, alarm_id, &transition_id, last_transition); -#ifdef ENABLE_ACLK - if (netdata_cloud_enabled) { - int64_t health_log_id = sqlite3_column_int64(res, 0); - RRDCALC_STATUS old_status = (RRDCALC_STATUS)sqlite3_column_double(res, 1); - insert_alert_queue( - host, health_log_id, (int64_t)max_unique_id, (int64_t)alarm_id, old_status, RRDCALC_STATUS_REMOVED); - } -#endif + int64_t health_log_id = sqlite3_column_int64(res, 0); + RRDCALC_STATUS old_status = (RRDCALC_STATUS)sqlite3_column_double(res, 1); + insert_alert_queue( + host, health_log_id, (int64_t)max_unique_id, (int64_t)alarm_id, old_status, RRDCALC_STATUS_REMOVED); } done: @@ -461,7 +452,7 @@ uint32_t sql_get_max_unique_id (RRDHOST *host) return 0; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); param = 0; while (sqlite3_step_monitored(res) == SQLITE_ROW) @@ -487,7 +478,7 @@ void sql_check_removed_alerts_state(RRDHOST *host) return; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); param = 0; while (sqlite3_step_monitored(res) == SQLITE_ROW) { @@ -521,7 +512,7 @@ static void sql_remove_alerts_from_deleted_charts(RRDHOST *host, nd_uuid_t *host sqlite3_stmt *res = NULL; int ret; - nd_uuid_t *actual_uuid = host ? &host->host_uuid : host_uuid; + nd_uuid_t *actual_uuid = host ? &host->host_id.uuid : host_uuid; if (!actual_uuid) return; @@ -602,7 +593,7 @@ void sql_health_alarm_log_load(RRDHOST *host) return; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); DICTIONARY *all_rrdcalcs = dictionary_create( DICT_OPTION_NAME_LINK_DONT_CLONE | DICT_OPTION_VALUE_LINK_DONT_CLONE | DICT_OPTION_DONT_OVERWRITE_VALUE); @@ -736,8 +727,10 @@ void sql_health_alarm_log_load(RRDHOST *host) dictionary_destroy(all_rrdcalcs); all_rrdcalcs = NULL; - if(!host->health_max_unique_id) host->health_max_unique_id = (uint32_t)now_realtime_sec(); - if(!host->health_max_alarm_id) host->health_max_alarm_id = (uint32_t)now_realtime_sec(); + if (!host->health_max_unique_id) + host->health_max_unique_id = get_uint32_id(); + if (!host->health_max_alarm_id) + host->health_max_alarm_id = get_uint32_id(); host->health_log.next_log_id = host->health_max_unique_id + 1; if (unlikely(!host->health_log.next_alarm_id || host->health_log.next_alarm_id <= host->health_max_alarm_id)) @@ -900,7 +893,7 @@ int sql_health_get_last_executed_event(RRDHOST *host, ALARM_ENTRY *ae, RRDCALC_S return ret; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); SQLITE_BIND_FAIL(done, sqlite3_bind_int(res, ++param, (int) ae->alarm_id)); SQLITE_BIND_FAIL(done, sqlite3_bind_int(res, ++param, (int) ae->unique_id)); SQLITE_BIND_FAIL(done, sqlite3_bind_int(res, ++param, (uint32_t) HEALTH_ENTRY_FLAG_EXEC_RUN)); @@ -963,7 +956,7 @@ void sql_health_alarm_log2json(RRDHOST *host, BUFFER *wb, time_t after, const ch stmt_query = *active_stmt; int param = 0; - rc = sqlite3_bind_blob(stmt_query, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); + rc = sqlite3_bind_blob(stmt_query, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { error_report("Failed to bind host_id for SQL_SELECT_HEALTH_LOG."); goto finish; @@ -1246,7 +1239,7 @@ uint32_t sql_get_alarm_id(RRDHOST *host, STRING *chart, STRING *name, uint32_t * return alarm_id; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); SQLITE_BIND_FAIL(done, SQLITE3_BIND_STRING_OR_NULL(res, ++param, chart)); SQLITE_BIND_FAIL(done, SQLITE3_BIND_STRING_OR_NULL(res, ++param, name)); diff --git a/src/database/sqlite/sqlite_metadata.c b/src/database/sqlite/sqlite_metadata.c index 1b801b731..f0874ba43 100644 --- a/src/database/sqlite/sqlite_metadata.c +++ b/src/database/sqlite/sqlite_metadata.c @@ -78,7 +78,9 @@ const char *database_config[] = { "CREATE INDEX IF NOT EXISTS health_log_d_ind_7 on health_log_detail (alarm_id)", "CREATE INDEX IF NOT EXISTS health_log_d_ind_8 on health_log_detail (new_status, updated_by_id)", -#ifdef ENABLE_ACLK + "CREATE TABLE IF NOT EXISTS agent_event_log (id INTEGER PRIMARY KEY, version TEXT, event_type INT, value, date_created INT)", + "CREATE INDEX IF NOT EXISTS idx_agent_event_log1 on agent_event_log (event_type)", + "CREATE TABLE IF NOT EXISTS alert_queue " " (host_id BLOB, health_log_id INT, unique_id INT, alarm_id INT, status INT, date_scheduled INT, " " UNIQUE(host_id, health_log_id, alarm_id))", @@ -88,7 +90,6 @@ const char *database_config[] = { "CREATE TABLE IF NOT EXISTS aclk_queue (sequence_id INTEGER PRIMARY KEY, host_id blob, health_log_id INT, " "unique_id INT, date_created INT, UNIQUE(host_id, health_log_id))", -#endif NULL }; @@ -257,26 +258,21 @@ static inline void set_host_node_id(RRDHOST *host, nd_uuid_t *node_id) return; if (unlikely(!node_id)) { - freez(host->node_id); - __atomic_store_n(&host->node_id, NULL, __ATOMIC_RELAXED); + host->node_id = UUID_ZERO; return; } struct aclk_sync_cfg_t *wc = host->aclk_config; - if (unlikely(!host->node_id)) { - nd_uuid_t *t = mallocz(sizeof(*host->node_id)); - uuid_copy(*t, *node_id); - __atomic_store_n(&host->node_id, t, __ATOMIC_RELAXED); - } - else { - uuid_copy(*(host->node_id), *node_id); - } + uuid_copy(host->node_id.uuid, *node_id); if (unlikely(!wc)) - create_aclk_config(host, &host->host_uuid, node_id); + create_aclk_config(host, &host->host_id.uuid, node_id); else uuid_unparse_lower(*node_id, wc->node_id); + + rrdpush_receiver_send_node_and_claim_id_to_child(host); + stream_path_node_id_updated(host); } #define SQL_SET_HOST_LABEL \ @@ -315,7 +311,7 @@ done: #define SQL_UPDATE_NODE_ID "UPDATE node_instance SET node_id = @node_id WHERE host_id = @host_id" -int update_node_id(nd_uuid_t *host_id, nd_uuid_t *node_id) +int sql_update_node_id(nd_uuid_t *host_id, nd_uuid_t *node_id) { sqlite3_stmt *res = NULL; RRDHOST *host = NULL; @@ -459,7 +455,7 @@ struct node_instance_list *get_node_list(void) node_list[row].live = (host == localhost || host->receiver || !(rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) ? 1 : 0; node_list[row].hops = host->system_info ? host->system_info->hops : - uuid_eq(*host_id, localhost->host_uuid) ? 0 : 1; + uuid_eq(*host_id, localhost->host_id.uuid) ? 0 : 1; node_list[row].hostname = sqlite3_column_bytes(res, 2) ? strdupz((char *)sqlite3_column_text(res, 2)) : NULL; } @@ -488,7 +484,7 @@ void sql_load_node_id(RRDHOST *host) return; int param = 0; - SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); param = 0; int rc = sqlite3_step_monitored(res); @@ -948,7 +944,7 @@ static int store_host_metadata(RRDHOST *host) return false; int param = 0; - SQLITE_BIND_FAIL(bind_fail, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(bind_fail, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); SQLITE_BIND_FAIL(bind_fail, bind_text_null(res, ++param, rrdhost_hostname(host), 0)); SQLITE_BIND_FAIL(bind_fail, bind_text_null(res, ++param, rrdhost_registry_hostname(host), 1)); SQLITE_BIND_FAIL(bind_fail, sqlite3_bind_int(res, ++param, host->rrd_update_every)); @@ -1018,30 +1014,30 @@ static bool store_host_systeminfo(RRDHOST *host) int ret = 0; - ret += add_host_sysinfo_key_value("NETDATA_CONTAINER_OS_NAME", system_info->container_os_name, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_CONTAINER_OS_ID", system_info->container_os_id, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_CONTAINER_OS_ID_LIKE", system_info->container_os_id_like, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_CONTAINER_OS_VERSION", system_info->container_os_version, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_CONTAINER_OS_VERSION_ID", system_info->container_os_version_id, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_CONTAINER_OS_DETECTION", system_info->host_os_detection, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_HOST_OS_NAME", system_info->host_os_name, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_HOST_OS_ID", system_info->host_os_id, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_HOST_OS_ID_LIKE", system_info->host_os_id_like, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_HOST_OS_VERSION", system_info->host_os_version, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_HOST_OS_VERSION_ID", system_info->host_os_version_id, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_HOST_OS_DETECTION", system_info->host_os_detection, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_KERNEL_NAME", system_info->kernel_name, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_CPU_LOGICAL_CPU_COUNT", system_info->host_cores, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_CPU_FREQ", system_info->host_cpu_freq, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_TOTAL_RAM", system_info->host_ram_total, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_TOTAL_DISK_SIZE", system_info->host_disk_space, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_KERNEL_VERSION", system_info->kernel_version, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_ARCHITECTURE", system_info->architecture, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_VIRTUALIZATION", system_info->virtualization, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_VIRT_DETECTION", system_info->virt_detection, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_CONTAINER", system_info->container, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_CONTAINER_DETECTION", system_info->container_detection, &host->host_uuid); - ret += add_host_sysinfo_key_value("NETDATA_HOST_IS_K8S_NODE", system_info->is_k8s_node, &host->host_uuid); + ret += add_host_sysinfo_key_value("NETDATA_CONTAINER_OS_NAME", system_info->container_os_name, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_CONTAINER_OS_ID", system_info->container_os_id, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_CONTAINER_OS_ID_LIKE", system_info->container_os_id_like, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_CONTAINER_OS_VERSION", system_info->container_os_version, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_CONTAINER_OS_VERSION_ID", system_info->container_os_version_id, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_CONTAINER_OS_DETECTION", system_info->host_os_detection, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_HOST_OS_NAME", system_info->host_os_name, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_HOST_OS_ID", system_info->host_os_id, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_HOST_OS_ID_LIKE", system_info->host_os_id_like, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_HOST_OS_VERSION", system_info->host_os_version, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_HOST_OS_VERSION_ID", system_info->host_os_version_id, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_HOST_OS_DETECTION", system_info->host_os_detection, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_KERNEL_NAME", system_info->kernel_name, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_CPU_LOGICAL_CPU_COUNT", system_info->host_cores, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_CPU_FREQ", system_info->host_cpu_freq, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_TOTAL_RAM", system_info->host_ram_total, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_TOTAL_DISK_SIZE", system_info->host_disk_space, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_KERNEL_VERSION", system_info->kernel_version, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_ARCHITECTURE", system_info->architecture, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_VIRTUALIZATION", system_info->virtualization, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_VIRT_DETECTION", system_info->virt_detection, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_CONTAINER", system_info->container, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_SYSTEM_CONTAINER_DETECTION", system_info->container_detection, &host->host_id.uuid); + ret += add_host_sysinfo_key_value("NETDATA_HOST_IS_K8S_NODE", system_info->is_k8s_node, &host->host_id.uuid); return !(24 == ret); } @@ -1060,7 +1056,7 @@ static int store_chart_metadata(RRDSET *st) int param = 0; SQLITE_BIND_FAIL(bind_fail, sqlite3_bind_blob(res, ++param, &st->chart_uuid, sizeof(st->chart_uuid), SQLITE_STATIC)); - SQLITE_BIND_FAIL(bind_fail, sqlite3_bind_blob(res, ++param, &st->rrdhost->host_uuid, sizeof(st->rrdhost->host_uuid), SQLITE_STATIC)); + SQLITE_BIND_FAIL(bind_fail, sqlite3_bind_blob(res, ++param, &st->rrdhost->host_id.uuid, sizeof(st->rrdhost->host_id.uuid), SQLITE_STATIC)); SQLITE_BIND_FAIL(bind_fail, sqlite3_bind_text(res, ++param, string2str(st->parts.type), -1, SQLITE_STATIC)); SQLITE_BIND_FAIL(bind_fail, sqlite3_bind_text(res, ++param, string2str(st->parts.id), -1, SQLITE_STATIC)); @@ -1479,9 +1475,7 @@ static void cleanup_health_log(struct metadata_wc *wc) (void) db_execute(db_meta,"DELETE FROM health_log WHERE host_id NOT IN (SELECT host_id FROM host)"); (void) db_execute(db_meta,"DELETE FROM health_log_detail WHERE health_log_id NOT IN (SELECT health_log_id FROM health_log)"); -#ifdef ENABLE_ACLK (void) db_execute(db_meta,"DELETE FROM alert_version WHERE health_log_id NOT IN (SELECT health_log_id FROM health_log)"); -#endif } // @@ -1631,9 +1625,7 @@ static void restore_host_context(void *arg) rrdhost_flag_clear(host, RRDHOST_FLAG_PENDING_CONTEXT_LOAD); -#ifdef ENABLE_ACLK aclk_queue_node_info(host, false); -#endif nd_log( NDLS_DAEMON, @@ -1927,13 +1919,13 @@ static void start_metadata_hosts(uv_work_t *req __maybe_unused) if (unlikely(rrdhost_flag_check(host, RRDHOST_FLAG_METADATA_LABELS))) { rrdhost_flag_clear(host, RRDHOST_FLAG_METADATA_LABELS); - int rc = exec_statement_with_uuid(SQL_DELETE_HOST_LABELS, &host->host_uuid); + int rc = exec_statement_with_uuid(SQL_DELETE_HOST_LABELS, &host->host_id.uuid); if (likely(!rc)) { query_counter++; buffer_flush(work_buffer); struct query_build tmp = {.sql = work_buffer, .count = 0}; - uuid_unparse_lower(host->host_uuid, tmp.uuid_str); + uuid_unparse_lower(host->host_id.uuid, tmp.uuid_str); rrdlabels_walkthrough_read(host->rrdlabels, host_label_store_to_sql_callback, &tmp); buffer_strcat(work_buffer, " ON CONFLICT (host_id, label_key) DO UPDATE SET source_type = excluded.source_type, label_value=excluded.label_value, date_created=UNIXEPOCH()"); rc = db_execute(db_meta, buffer_tostring(work_buffer)); @@ -1952,12 +1944,12 @@ static void start_metadata_hosts(uv_work_t *req __maybe_unused) if (unlikely(rrdhost_flag_check(host, RRDHOST_FLAG_METADATA_CLAIMID))) { rrdhost_flag_clear(host, RRDHOST_FLAG_METADATA_CLAIMID); - nd_uuid_t uuid; int rc; - if (likely(host->aclk_state.claimed_id && !uuid_parse(host->aclk_state.claimed_id, uuid))) - rc = store_claim_id(&host->host_uuid, &uuid); + ND_UUID uuid = claim_id_get_uuid(); + if(!UUIDiszero(uuid)) + rc = store_claim_id(&host->host_id.uuid, &uuid.uuid); else - rc = store_claim_id(&host->host_uuid, NULL); + rc = store_claim_id(&host->host_id.uuid, NULL); if (unlikely(rc)) rrdhost_flag_set(host, RRDHOST_FLAG_METADATA_CLAIMID | RRDHOST_FLAG_METADATA_UPDATE); @@ -2348,6 +2340,62 @@ uint64_t sqlite_get_meta_space(void) return sqlite_get_db_space(db_meta); } +#define SQL_ADD_AGENT_EVENT_LOG \ + "INSERT INTO agent_event_log (event_type, version, value, date_created) VALUES " \ + " (@event_type, @version, @value, UNIXEPOCH())" + +void add_agent_event(event_log_type_t event_id, int64_t value) +{ + sqlite3_stmt *res = NULL; + + if (!PREPARE_STATEMENT(db_meta, SQL_ADD_AGENT_EVENT_LOG, &res)) + return; + + int param = 0; + SQLITE_BIND_FAIL(done, sqlite3_bind_int(res, ++param, event_id)); + SQLITE_BIND_FAIL(done, sqlite3_bind_text(res, ++param, NETDATA_VERSION, -1, SQLITE_STATIC)); + SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, value)); + + param = 0; + int rc = execute_insert(res); + if (rc != SQLITE_DONE) + error_report("Failed to store agent event information, rc = %d", rc); +done: + REPORT_BIND_FAIL(res, param); + SQLITE_FINALIZE(res); +} + +void cleanup_agent_event_log(void) +{ + (void) db_execute(db_meta, "DELETE FROM agent_event_log WHERE date_created < UNIXEPOCH() - 30 * 86400"); +} + +#define SQL_GET_AGENT_EVENT_TYPE_MEDIAN \ + "SELECT AVG(value) AS median FROM " \ + "(SELECT value FROM agent_event_log WHERE event_type = @event ORDER BY value " \ + " LIMIT 2 - (SELECT COUNT(*) FROM agent_event_log WHERE event_type = @event) % 2 " \ + "OFFSET(SELECT(COUNT(*) - 1) / 2 FROM agent_event_log WHERE event_type = @event)) " + +usec_t get_agent_event_time_median(event_log_type_t event_id) +{ + sqlite3_stmt *res = NULL; + if (!PREPARE_STATEMENT(db_meta, SQL_GET_AGENT_EVENT_TYPE_MEDIAN, &res)) + return 0; + + usec_t avg_time = 0; + int param = 0; + SQLITE_BIND_FAIL(done, sqlite3_bind_int(res, ++param, event_id)); + + param = 0; + if (sqlite3_step_monitored(res) == SQLITE_ROW) + avg_time = sqlite3_column_int64(res, 0); + +done: + REPORT_BIND_FAIL(res, param); + SQLITE_FINALIZE(res); + return avg_time; +} + // // unitests // diff --git a/src/database/sqlite/sqlite_metadata.h b/src/database/sqlite/sqlite_metadata.h index 9e76e2a50..a5e68eb8c 100644 --- a/src/database/sqlite/sqlite_metadata.h +++ b/src/database/sqlite/sqlite_metadata.h @@ -6,6 +6,11 @@ #include "sqlite3.h" #include "sqlite_functions.h" +typedef enum event_log_type { + EVENT_AGENT_START_TIME = 1, + EVENT_AGENT_SHUTDOWN_TIME, +} event_log_type_t; + // return a node list struct node_instance_list { nd_uuid_t node_id; @@ -41,7 +46,7 @@ void vacuum_database(sqlite3 *database, const char *db_alias, int threshold, int int sql_metadata_cache_stats(int op); int get_node_id(nd_uuid_t *host_id, nd_uuid_t *node_id); -int update_node_id(nd_uuid_t *host_id, nd_uuid_t *node_id); +int sql_update_node_id(nd_uuid_t *host_id, nd_uuid_t *node_id); struct node_instance_list *get_node_list(void); void sql_load_node_id(RRDHOST *host); @@ -54,6 +59,10 @@ bool sql_set_host_label(nd_uuid_t *host_id, const char *label_key, const char *l uint64_t sqlite_get_meta_space(void); int sql_init_meta_database(db_check_action_type_t rebuild, int memory); +void cleanup_agent_event_log(void); +void add_agent_event(event_log_type_t event_id, int64_t value); +usec_t get_agent_event_time_median(event_log_type_t event_id); + // UNIT TEST int metadata_unittest(void); #endif //NETDATA_SQLITE_METADATA_H diff --git a/src/exporting/README.md b/src/exporting/README.md index 83b391f72..a626ee66b 100644 --- a/src/exporting/README.md +++ b/src/exporting/README.md @@ -1,13 +1,3 @@ - - # Exporting reference Welcome to the exporting engine reference guide. This guide contains comprehensive information about enabling, @@ -18,7 +8,7 @@ For a quick introduction to the exporting engine's features, read our doc on [ex databases](/docs/exporting-metrics/README.md), or jump in to [enabling a connector](/docs/exporting-metrics/enable-an-exporting-connector.md). The exporting engine has a modular structure and supports metric exporting via multiple exporting connector instances at -the same time. You can have different update intervals and filters configured for every exporting connector instance. +the same time. You can have different update intervals and filters configured for every exporting connector instance. When you enable the exporting engine and a connector, the Netdata Agent exports metrics _beginning from the time you restart its process_, not the entire [database of long-term metrics](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md). @@ -37,24 +27,24 @@ The exporting engine uses a number of connectors to send Netdata metrics to exte [list of supported databases](/docs/exporting-metrics/README.md#supported-databases) for information on which connector to enable and configure for your database of choice. -- [**AWS Kinesis Data Streams**](/src/exporting/aws_kinesis/README.md): Metrics are sent to the service in `JSON` +- [**AWS Kinesis Data Streams**](/src/exporting/aws_kinesis/README.md): Metrics are sent to the service in `JSON` format. -- [**Google Cloud Pub/Sub Service**](/src/exporting/pubsub/README.md): Metrics are sent to the service in `JSON` +- [**Google Cloud Pub/Sub Service**](/src/exporting/pubsub/README.md): Metrics are sent to the service in `JSON` format. -- [**Graphite**](/src/exporting/graphite/README.md): A plaintext interface. Metrics are sent to the database server as +- [**Graphite**](/src/exporting/graphite/README.md): A plaintext interface. Metrics are sent to the database server as `prefix.hostname.chart.dimension`. `prefix` is configured below, `hostname` is the hostname of the machine (can also be configured). Learn more in our guide to [export and visualize Netdata metrics in Graphite](/src/exporting/graphite/README.md). -- [**JSON** document databases](/src/exporting/json/README.md) -- [**OpenTSDB**](/src/exporting/opentsdb/README.md): Use a plaintext or HTTP interfaces. Metrics are sent to +- [**JSON** document databases](/src/exporting/json/README.md) +- [**OpenTSDB**](/src/exporting/opentsdb/README.md): Use a plaintext or HTTP interfaces. Metrics are sent to OpenTSDB as `prefix.chart.dimension` with tag `host=hostname`. -- [**MongoDB**](/src/exporting/mongodb/README.md): Metrics are sent to the database in `JSON` format. -- [**Prometheus**](/src/exporting/prometheus/README.md): Use an existing Prometheus installation to scrape metrics +- [**MongoDB**](/src/exporting/mongodb/README.md): Metrics are sent to the database in `JSON` format. +- [**Prometheus**](/src/exporting/prometheus/README.md): Use an existing Prometheus installation to scrape metrics from node using the Netdata API. -- [**Prometheus remote write**](/src/exporting/prometheus/remote_write/README.md). A binary snappy-compressed protocol +- [**Prometheus remote write**](/src/exporting/prometheus/remote_write/README.md). A binary snappy-compressed protocol buffer encoding over HTTP. Supports many [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). -- [**TimescaleDB**](/src/exporting/TIMESCALE.md): Use a community-built connector that takes JSON streams from a +- [**TimescaleDB**](/src/exporting/TIMESCALE.md): Use a community-built connector that takes JSON streams from a Netdata client and writes them to a TimescaleDB table. ### Chart filtering @@ -62,14 +52,14 @@ connector to enable and configure for your database of choice. Netdata can filter metrics, to send only a subset of the collected metrics. You can use the configuration file -```txt +```text [prometheus:exporter] send charts matching = system.* ``` or the URL parameter `filter` in the `allmetrics` API call. -```txt +```text http://localhost:19999/api/v1/allmetrics?format=shell&filter=system.* ``` @@ -77,17 +67,17 @@ http://localhost:19999/api/v1/allmetrics?format=shell&filter=system.* Netdata supports three modes of operation for all exporting connectors: -- `as-collected` sends to external databases the metrics as they are collected, in the units they are collected. +- `as-collected` sends to external databases the metrics as they are collected, in the units they are collected. So, counters are sent as counters and gauges are sent as gauges, much like all data collectors do. For example, to calculate CPU utilization in this format, you need to know how to convert kernel ticks to percentage. -- `average` sends to external databases normalized metrics from the Netdata database. In this mode, all metrics +- `average` sends to external databases normalized metrics from the Netdata database. In this mode, all metrics are sent as gauges, in the units Netdata uses. This abstracts data collection and simplifies visualization, but you will not be able to copy and paste queries from other sources to convert units. For example, CPU utilization percentage is calculated by Netdata, so Netdata will convert ticks to percentage and send the average percentage to the external database. -- `sum` or `volume`: the sum of the interpolated values shown on the Netdata graphs is sent to the external +- `sum` or `volume`: the sum of the interpolated values shown on the Netdata graphs is sent to the external database. So, if Netdata is configured to send data to the database every 10 seconds, the sum of the 10 values shown on the Netdata charts will be used. @@ -102,7 +92,7 @@ see in Netdata, which is not necessarily true for the other modes of operation. ### Independent operation -This code is smart enough, not to slow down Netdata, independently of the speed of the external database server. +This code is smart enough, not to slow down Netdata, independently of the speed of the external database server. > ❗ You should keep in mind though that many exporting connector instances can consume a lot of CPU resources if they > run their batches at the same time. You can set different update intervals for every exporting connector instance, @@ -111,12 +101,12 @@ This code is smart enough, not to slow down Netdata, independently of the speed ## Configuration Here are the configuration blocks for every supported connector. Your current `exporting.conf` file may look a little -different. +different. You can configure each connector individually using the available [options](#options). The `[graphite:my_graphite_instance]` block contains examples of some of these additional options in action. -```conf +```text [exporting:global] enabled = yes send configured labels = no @@ -192,23 +182,23 @@ You can configure each connector individually using the available [options](#opt ### Sections -- `[exporting:global]` is a section where you can set your defaults for all exporting connectors -- `[prometheus:exporter]` defines settings for Prometheus exporter API queries (e.g.: +- `[exporting:global]` is a section where you can set your defaults for all exporting connectors +- `[prometheus:exporter]` defines settings for Prometheus exporter API queries (e.g.: `http://NODE:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`). -- `[:]` keeps settings for a particular exporting connector instance, where: - - `type` selects the exporting connector type: graphite | opentsdb:telnet | opentsdb:http | +- `[:]` keeps settings for a particular exporting connector instance, where: +- `type` selects the exporting connector type: graphite | opentsdb:telnet | opentsdb:http | prometheus_remote_write | json | kinesis | pubsub | mongodb. For graphite, opentsdb, json, and prometheus_remote_write connectors you can also use `:http` or `:https` modifiers (e.g.: `opentsdb:https`). - - `name` can be arbitrary instance name you chose. +- `name` can be arbitrary instance name you chose. ### Options Configure individual connectors and override any global settings with the following options. -- `enabled = yes | no`, enables or disables an exporting connector instance +- `enabled = yes | no`, enables or disables an exporting connector instance -- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and +- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the **first available** to send the metrics. The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`. @@ -223,13 +213,13 @@ Configure individual connectors and override any global settings with the follow Example IPv4: -```conf +```text destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242 ``` Example IPv6 and IPv4 together: -```conf +```text destination = [ffff:...:0001]:2003 10.11.12.1:2003 ``` @@ -246,48 +236,48 @@ Configure individual connectors and override any global settings with the follow For the Pub/Sub exporting connector `destination` can be set to a specific service endpoint. -- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will +- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will be sent to the external database. -- `hostname = my-name`, is the hostname to be used for sending data to the external database server. By default this +- `hostname = my-name`, is the hostname to be used for sending data to the external database server. By default this is `[global].hostname`. -- `prefix = Netdata`, is the prefix to add to all metrics. +- `prefix = Netdata`, is the prefix to add to all metrics. -- `update every = 10`, is the number of seconds between sending data to the external database. Netdata will add some +- `update every = 10`, is the number of seconds between sending data to the external database. Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers send data to the same database. This randomness does not affect the quality of the data, only the time they are sent. -- `buffer on failures = 10`, is the number of iterations (each iteration is `update every` seconds) to buffer data, +- `buffer on failures = 10`, is the number of iterations (each iteration is `update every` seconds) to buffer data, when the external database server is not available. If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it). -- `timeout ms = 20000`, is the timeout in milliseconds to wait for the external database server to process the data. +- `timeout ms = 20000`, is the timeout in milliseconds to wait for the external database server to process the data. By default this is `2 * update_every * 1000`. -- `send hosts matching = localhost *` includes one or more space separated patterns, using `*` as wildcard (any number +- `send hosts matching = localhost *` includes one or more space separated patterns, using `*` as wildcard (any number of times within each pattern). The patterns are checked against the hostname (the localhost is always checked as `localhost`), allowing us to filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts. A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`, use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative). -- `send charts matching = *` includes one or more space separated patterns, using `*` as wildcard (any number of times +- `send charts matching = *` includes one or more space separated patterns, using `*` as wildcard (any number of times within each pattern). The patterns are checked against both chart id and chart name. A pattern starting with `!` gives a negative match. So to match all charts named `apps.*` except charts ending in `*reads`, use `!*reads apps.*` (so, the order is important: the first pattern matching the chart id or the chart name will be used - positive or negative). There is also a URL parameter `filter` that can be used while querying `allmetrics`. The URL parameter has a higher priority than the configuration option. -- `send names instead of ids = yes | no` controls the metric names Netdata should send to the external database. +- `send names instead of ids = yes | no` controls the metric names Netdata should send to the external database. Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc. -- `send configured labels = yes | no` controls if host labels defined in the `[host labels]` section in `netdata.conf` +- `send configured labels = yes | no` controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database -- `send automatic labels = yes | no` controls if automatically created labels, like `_os_name` or `_architecture` +- `send automatic labels = yes | no` controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database ## HTTPS @@ -302,14 +292,14 @@ HTTPS communication between Netdata and an external database. You can set up a r Netdata creates five charts in the dashboard, under the **Netdata Monitoring** section, to help you monitor the health and performance of the exporting engine itself: -1. **Buffered metrics**, the number of metrics Netdata added to the buffer for dispatching them to the +1. **Buffered metrics**, the number of metrics Netdata added to the buffer for dispatching them to the external database server. -2. **Exporting data size**, the amount of data (in KB) Netdata added the buffer. +2. **Exporting data size**, the amount of data (in KB) Netdata added the buffer. -3. **Exporting operations**, the number of operations performed by Netdata. +3. **Exporting operations**, the number of operations performed by Netdata. -4. **Exporting thread CPU usage**, the CPU resources consumed by the Netdata thread, that is responsible for sending +4. **Exporting thread CPU usage**, the CPU resources consumed by the Netdata thread, that is responsible for sending the metrics to the external database server. ![image](https://cloud.githubusercontent.com/assets/2662304/20463536/eb196084-af3d-11e6-8ee5-ddbd3b4d8449.png) @@ -318,10 +308,8 @@ and performance of the exporting engine itself: Netdata adds 3 alerts: -1. `exporting_last_buffering`, number of seconds since the last successful buffering of exported data -2. `exporting_metrics_sent`, percentage of metrics sent to the external database server -3. `exporting_metrics_lost`, number of metrics lost due to repeating failures to contact the external database server +1. `exporting_last_buffering`, number of seconds since the last successful buffering of exported data +2. `exporting_metrics_sent`, percentage of metrics sent to the external database server +3. `exporting_metrics_lost`, number of metrics lost due to repeating failures to contact the external database server ![image](https://cloud.githubusercontent.com/assets/2662304/20463779/a46ed1c2-af43-11e6-91a5-07ca4533cac3.png) - - diff --git a/src/exporting/TIMESCALE.md b/src/exporting/TIMESCALE.md index 3bad28379..179527c14 100644 --- a/src/exporting/TIMESCALE.md +++ b/src/exporting/TIMESCALE.md @@ -1,12 +1,3 @@ - - # Writing metrics to TimescaleDB Thanks to Netdata's community of developers and system administrators, and Mahlon Smith @@ -23,14 +14,18 @@ What's TimescaleDB? Here's how their team defines the project on their [GitHub p To get started archiving metrics to TimescaleDB right away, check out Mahlon's [`netdata-timescale-relay` repository](https://github.com/mahlonsmith/netdata-timescale-relay) on GitHub. Please be aware that backends subsystem was removed and Netdata configuration should be moved to the new `exporting.conf` configuration file. Use -```conf + +```text [json:my_instance] ``` + in `exporting.conf` instead of -```conf + +```text [backend] type = json ``` + in `netdata.conf`. This small program takes JSON streams from a Netdata client and writes them to a PostgreSQL (aka TimescaleDB) table. @@ -67,5 +62,3 @@ blog](https://blog.timescale.com/blog/writing-it-metrics-from-netdata-to-timesca Thank you to Mahlon, Rune, TimescaleDB, and the members of the Netdata community that requested and then built this exporting connection between Netdata and TimescaleDB! - - diff --git a/src/exporting/WALKTHROUGH.md b/src/exporting/WALKTHROUGH.md index 450789d9d..1b3d255bd 100644 --- a/src/exporting/WALKTHROUGH.md +++ b/src/exporting/WALKTHROUGH.md @@ -37,7 +37,7 @@ This stack will offer you visibility into your application and systems performan To begin let's create our container which we will install Netdata on. We need to run a container, forward the necessary port that Netdata listens on, and attach a tty so we can interact with the bash shell on the container. But before we do this we want name resolution between the two containers to work. In order to accomplish this we will create a -user-defined network and attach both containers to this network. The first command we should run is: +user-defined network and attach both containers to this network. The first command we should run is: ```sh docker network create --driver bridge netdata-tutorial @@ -90,15 +90,15 @@ We will be installing Prometheus in a container for purpose of demonstration. Wh container I would like to walk through the install process and setup on a fresh container. This will allow anyone reading to migrate this tutorial to a VM or Server of any sort. -Let's start another container in the same fashion as we did the Netdata container. +Let's start another container in the same fashion as we did the Netdata container. ```sh docker run -it --name prometheus --hostname prometheus \ --network=netdata-tutorial -p 9090:9090 centos:latest '/bin/bash' -``` +``` This should drop you into a shell once again. Once there quickly install your favorite editor as we will be editing -files later in this tutorial. +files later in this tutorial. ```sh yum install vim -y @@ -137,7 +137,7 @@ point to talk about Prometheus's data model which can be viewed here: /dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/clean_connectors.c b/src/exporting/clean_connectors.c index c850c5ffa..81413e661 100644 --- a/src/exporting/clean_connectors.c +++ b/src/exporting/clean_connectors.c @@ -67,9 +67,7 @@ void simple_connector_cleanup(struct instance *instance) freez(current_buffer); } -#ifdef ENABLE_HTTPS netdata_ssl_close(&simple_connector_data->ssl); -#endif freez(simple_connector_data); diff --git a/src/exporting/exporting_engine.c b/src/exporting/exporting_engine.c index eb5f8a0a8..7abe0b5ce 100644 --- a/src/exporting/exporting_engine.c +++ b/src/exporting/exporting_engine.c @@ -6,7 +6,6 @@ static struct engine *engine = NULL; void analytics_exporting_connectors_ssl(BUFFER *b) { -#ifdef ENABLE_HTTPS if (netdata_ssl_exporting_ctx) { for (struct instance *instance = engine->instance_root; instance; instance = instance->next) { struct simple_connector_data *connector_specific_data = instance->connector_specific_data; @@ -16,7 +15,6 @@ void analytics_exporting_connectors_ssl(BUFFER *b) } } } -#endif buffer_strcat(b, "|"); } @@ -197,12 +195,11 @@ void *exporting_main(void *ptr) RRDDIM *rd_main_system = NULL; create_main_rusage_chart(&st_main_rusage, &rd_main_user, &rd_main_system); - usec_t step_ut = localhost->rrd_update_every * USEC_PER_SEC; heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, localhost->rrd_update_every * USEC_PER_SEC); while (service_running(SERVICE_EXPORTERS)) { - heartbeat_next(&hb, step_ut); + heartbeat_next(&hb); engine->now = now_realtime_sec(); if (mark_scheduled_instances(engine)) diff --git a/src/exporting/exporting_engine.h b/src/exporting/exporting_engine.h index beaa0ba87..44a2da322 100644 --- a/src/exporting/exporting_engine.h +++ b/src/exporting/exporting_engine.h @@ -124,9 +124,7 @@ struct simple_connector_data { struct simple_connector_buffer *first_buffer; struct simple_connector_buffer *last_buffer; -#ifdef ENABLE_HTTPS NETDATA_SSL ssl; -#endif }; struct prometheus_remote_write_specific_config { diff --git a/src/exporting/graphite/graphite.c b/src/exporting/graphite/graphite.c index 1fc1f2b04..a54339892 100644 --- a/src/exporting/graphite/graphite.c +++ b/src/exporting/graphite/graphite.c @@ -19,12 +19,10 @@ int init_graphite_instance(struct instance *instance) struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data)); instance->connector_specific_data = connector_specific_data; -#ifdef ENABLE_HTTPS connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION; if (instance->config.options & EXPORTING_OPTION_USE_TLS) { netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX); } -#endif instance->start_batch_formatting = NULL; instance->start_host_formatting = format_host_labels_graphite_plaintext; diff --git a/src/exporting/graphite/integrations/blueflood.md b/src/exporting/graphite/integrations/blueflood.md index 56220fb6a..d65f9503b 100644 --- a/src/exporting/graphite/integrations/blueflood.md +++ b/src/exporting/graphite/integrations/blueflood.md @@ -37,8 +37,8 @@ further analysis, or correlation with data from other sources. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/graphite/integrations/graphite.md b/src/exporting/graphite/integrations/graphite.md index c38b1aac4..0f2b91d16 100644 --- a/src/exporting/graphite/integrations/graphite.md +++ b/src/exporting/graphite/integrations/graphite.md @@ -37,8 +37,8 @@ further analysis, or correlation with data from other sources. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/graphite/integrations/influxdb.md b/src/exporting/graphite/integrations/influxdb.md index 4d49febe0..6af2616a3 100644 --- a/src/exporting/graphite/integrations/influxdb.md +++ b/src/exporting/graphite/integrations/influxdb.md @@ -37,8 +37,8 @@ further analysis, or correlation with data from other sources. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/graphite/integrations/kairosdb.md b/src/exporting/graphite/integrations/kairosdb.md index d5dad7f42..c2bd27bb8 100644 --- a/src/exporting/graphite/integrations/kairosdb.md +++ b/src/exporting/graphite/integrations/kairosdb.md @@ -37,8 +37,8 @@ further analysis, or correlation with data from other sources. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/json/integrations/json.md b/src/exporting/json/integrations/json.md index 0b17aa318..b6d872492 100644 --- a/src/exporting/json/integrations/json.md +++ b/src/exporting/json/integrations/json.md @@ -36,8 +36,8 @@ further analysis, or correlation with data from other sources The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/json/json.c b/src/exporting/json/json.c index e9c4db635..d696c7358 100644 --- a/src/exporting/json/json.c +++ b/src/exporting/json/json.c @@ -70,12 +70,10 @@ int init_json_http_instance(struct instance *instance) struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data)); instance->connector_specific_data = connector_specific_data; -#ifdef ENABLE_HTTPS connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION; if (instance->config.options & EXPORTING_OPTION_USE_TLS) { netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX); } -#endif instance->start_batch_formatting = open_batch_json_http; instance->start_host_formatting = format_host_labels_json_plaintext; diff --git a/src/exporting/mongodb/integrations/mongodb.md b/src/exporting/mongodb/integrations/mongodb.md index c32ff5ee1..9d333bd09 100644 --- a/src/exporting/mongodb/integrations/mongodb.md +++ b/src/exporting/mongodb/integrations/mongodb.md @@ -26,7 +26,7 @@ for long-term storage, further analysis, or correlation with data from other sou #### -- To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher. +- To use MongoDB as an external storage for long-term archiving, you should first [install](https://www.mongodb.com/docs/languages/c/c-driver/current/libmongoc/tutorials/obtaining-libraries/installing/#std-label-installing) libmongoc 1.7.0 or higher. - Next, re-install Netdata from the source, which detects that the required library is now available. @@ -38,8 +38,8 @@ for long-term storage, further analysis, or correlation with data from other sou The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/mongodb/metadata.yaml b/src/exporting/mongodb/metadata.yaml index 87aafc02d..6597df714 100644 --- a/src/exporting/mongodb/metadata.yaml +++ b/src/exporting/mongodb/metadata.yaml @@ -20,7 +20,7 @@ setup: list: - title: '' description: | - - To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher. + - To use MongoDB as an external storage for long-term archiving, you should first [install](https://www.mongodb.com/docs/languages/c/c-driver/current/libmongoc/tutorials/obtaining-libraries/installing/#std-label-installing) libmongoc 1.7.0 or higher. - Next, re-install Netdata from the source, which detects that the required library is now available. configuration: file: diff --git a/src/exporting/opentsdb/integrations/opentsdb.md b/src/exporting/opentsdb/integrations/opentsdb.md index ddf8cdf25..6538e0c35 100644 --- a/src/exporting/opentsdb/integrations/opentsdb.md +++ b/src/exporting/opentsdb/integrations/opentsdb.md @@ -37,8 +37,8 @@ further analysis, or correlation with data from other sources. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/opentsdb/opentsdb.c b/src/exporting/opentsdb/opentsdb.c index ab4495cb2..bee0f443a 100644 --- a/src/exporting/opentsdb/opentsdb.c +++ b/src/exporting/opentsdb/opentsdb.c @@ -20,12 +20,10 @@ int init_opentsdb_telnet_instance(struct instance *instance) struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data)); instance->connector_specific_data = connector_specific_data; -#ifdef ENABLE_HTTPS connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION; if (instance->config.options & EXPORTING_OPTION_USE_TLS) { netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX); } -#endif instance->start_batch_formatting = NULL; instance->start_host_formatting = format_host_labels_opentsdb_telnet; @@ -75,12 +73,10 @@ int init_opentsdb_http_instance(struct instance *instance) connector_specific_config->default_port = 4242; struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data)); -#ifdef ENABLE_HTTPS connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION; if (instance->config.options & EXPORTING_OPTION_USE_TLS) { netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX); } -#endif instance->connector_specific_data = connector_specific_data; instance->start_batch_formatting = open_batch_json_http; diff --git a/src/exporting/prometheus/README.md b/src/exporting/prometheus/README.md index 81e62b7ec..cc35b1d42 100644 --- a/src/exporting/prometheus/README.md +++ b/src/exporting/prometheus/README.md @@ -203,7 +203,7 @@ interrupts, QoS classes, statsd synthetic charts, etc. The default is controlled in `exporting.conf`: -```conf +```text [prometheus:exporter] send names instead of ids = yes | no ``` @@ -217,7 +217,7 @@ You can overwrite it from Prometheus, by appending to the URL: Netdata can filter the metrics it sends to Prometheus with this setting: -```conf +```text [prometheus:exporter] send charts matching = * ``` @@ -233,7 +233,7 @@ used. Netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this: -```conf +```text [prometheus:exporter] prefix = netdata ``` diff --git a/src/exporting/prometheus/integrations/appoptics.md b/src/exporting/prometheus/integrations/appoptics.md index 73ed5c843..35aabe56e 100644 --- a/src/exporting/prometheus/integrations/appoptics.md +++ b/src/exporting/prometheus/integrations/appoptics.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/azure_data_explorer.md b/src/exporting/prometheus/integrations/azure_data_explorer.md index 8acbef88a..3df0a700c 100644 --- a/src/exporting/prometheus/integrations/azure_data_explorer.md +++ b/src/exporting/prometheus/integrations/azure_data_explorer.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/azure_event_hub.md b/src/exporting/prometheus/integrations/azure_event_hub.md index 42e2a0515..ac643988c 100644 --- a/src/exporting/prometheus/integrations/azure_event_hub.md +++ b/src/exporting/prometheus/integrations/azure_event_hub.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/chronix.md b/src/exporting/prometheus/integrations/chronix.md index c7d315b79..519851a2b 100644 --- a/src/exporting/prometheus/integrations/chronix.md +++ b/src/exporting/prometheus/integrations/chronix.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/cortex.md b/src/exporting/prometheus/integrations/cortex.md index 91fe3946d..4ab5122dc 100644 --- a/src/exporting/prometheus/integrations/cortex.md +++ b/src/exporting/prometheus/integrations/cortex.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/cratedb.md b/src/exporting/prometheus/integrations/cratedb.md index 87f30bc79..5d79e41ee 100644 --- a/src/exporting/prometheus/integrations/cratedb.md +++ b/src/exporting/prometheus/integrations/cratedb.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/elasticsearch.md b/src/exporting/prometheus/integrations/elasticsearch.md index 42fac5f67..b900691cb 100644 --- a/src/exporting/prometheus/integrations/elasticsearch.md +++ b/src/exporting/prometheus/integrations/elasticsearch.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/gnocchi.md b/src/exporting/prometheus/integrations/gnocchi.md index 457adefc8..154edfcda 100644 --- a/src/exporting/prometheus/integrations/gnocchi.md +++ b/src/exporting/prometheus/integrations/gnocchi.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/google_bigquery.md b/src/exporting/prometheus/integrations/google_bigquery.md index c9cb54cc7..7331bb737 100644 --- a/src/exporting/prometheus/integrations/google_bigquery.md +++ b/src/exporting/prometheus/integrations/google_bigquery.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/greptimedb.md b/src/exporting/prometheus/integrations/greptimedb.md index cf1453eeb..d660b9c8c 100644 --- a/src/exporting/prometheus/integrations/greptimedb.md +++ b/src/exporting/prometheus/integrations/greptimedb.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/irondb.md b/src/exporting/prometheus/integrations/irondb.md index 6ab7c8f06..9a6e387e8 100644 --- a/src/exporting/prometheus/integrations/irondb.md +++ b/src/exporting/prometheus/integrations/irondb.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/kafka.md b/src/exporting/prometheus/integrations/kafka.md index 207f292ff..2c315c60c 100644 --- a/src/exporting/prometheus/integrations/kafka.md +++ b/src/exporting/prometheus/integrations/kafka.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/m3db.md b/src/exporting/prometheus/integrations/m3db.md index 75ff05b5d..7b5621cfc 100644 --- a/src/exporting/prometheus/integrations/m3db.md +++ b/src/exporting/prometheus/integrations/m3db.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/metricfire.md b/src/exporting/prometheus/integrations/metricfire.md index 8e8797ca9..1ff9619f5 100644 --- a/src/exporting/prometheus/integrations/metricfire.md +++ b/src/exporting/prometheus/integrations/metricfire.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/new_relic.md b/src/exporting/prometheus/integrations/new_relic.md index 7ecedd497..11fbb3345 100644 --- a/src/exporting/prometheus/integrations/new_relic.md +++ b/src/exporting/prometheus/integrations/new_relic.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/opeansearch.md b/src/exporting/prometheus/integrations/opeansearch.md index 77c494284..a76e738ab 100644 --- a/src/exporting/prometheus/integrations/opeansearch.md +++ b/src/exporting/prometheus/integrations/opeansearch.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/postgresql.md b/src/exporting/prometheus/integrations/postgresql.md index 4a899b5d4..94ace1dcf 100644 --- a/src/exporting/prometheus/integrations/postgresql.md +++ b/src/exporting/prometheus/integrations/postgresql.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/prometheus_remote_write.md b/src/exporting/prometheus/integrations/prometheus_remote_write.md index 6b073d511..296e7fa0a 100644 --- a/src/exporting/prometheus/integrations/prometheus_remote_write.md +++ b/src/exporting/prometheus/integrations/prometheus_remote_write.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/quasardb.md b/src/exporting/prometheus/integrations/quasardb.md index 4682f0800..a570e949c 100644 --- a/src/exporting/prometheus/integrations/quasardb.md +++ b/src/exporting/prometheus/integrations/quasardb.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/splunk_signalfx.md b/src/exporting/prometheus/integrations/splunk_signalfx.md index 792808817..f464e19b8 100644 --- a/src/exporting/prometheus/integrations/splunk_signalfx.md +++ b/src/exporting/prometheus/integrations/splunk_signalfx.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/thanos.md b/src/exporting/prometheus/integrations/thanos.md index de61e29a6..e0ddcd8db 100644 --- a/src/exporting/prometheus/integrations/thanos.md +++ b/src/exporting/prometheus/integrations/thanos.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/tikv.md b/src/exporting/prometheus/integrations/tikv.md index 74a62938c..1d2dbfc0c 100644 --- a/src/exporting/prometheus/integrations/tikv.md +++ b/src/exporting/prometheus/integrations/tikv.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/timescaledb.md b/src/exporting/prometheus/integrations/timescaledb.md index 56a8fd49b..9d173e752 100644 --- a/src/exporting/prometheus/integrations/timescaledb.md +++ b/src/exporting/prometheus/integrations/timescaledb.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/victoriametrics.md b/src/exporting/prometheus/integrations/victoriametrics.md index c2667ea73..57dfebbd7 100644 --- a/src/exporting/prometheus/integrations/victoriametrics.md +++ b/src/exporting/prometheus/integrations/victoriametrics.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/vmware_aria.md b/src/exporting/prometheus/integrations/vmware_aria.md index 6015c398e..36c488f69 100644 --- a/src/exporting/prometheus/integrations/vmware_aria.md +++ b/src/exporting/prometheus/integrations/vmware_aria.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/integrations/wavefront.md b/src/exporting/prometheus/integrations/wavefront.md index 1803d30a6..58ae31c84 100644 --- a/src/exporting/prometheus/integrations/wavefront.md +++ b/src/exporting/prometheus/integrations/wavefront.md @@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures. The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/prometheus/prometheus.c b/src/exporting/prometheus/prometheus.c index 0ba83a939..92d2fe64b 100644 --- a/src/exporting/prometheus/prometheus.c +++ b/src/exporting/prometheus/prometheus.c @@ -88,9 +88,8 @@ static netdata_mutex_t prometheus_server_root_mutex = NETDATA_MUTEX_INITIALIZER; */ void prometheus_clean_server_root() { + netdata_mutex_lock(&prometheus_server_root_mutex); if (prometheus_server_root) { - netdata_mutex_lock(&prometheus_server_root_mutex); - struct prometheus_server *ps; for (ps = prometheus_server_root; ps; ) { struct prometheus_server *current = ps; @@ -101,8 +100,8 @@ void prometheus_clean_server_root() freez(current); } prometheus_server_root = NULL; - netdata_mutex_unlock(&prometheus_server_root_mutex); } + netdata_mutex_unlock(&prometheus_server_root_mutex); } /** @@ -149,24 +148,11 @@ static inline time_t prometheus_server_last_access(const char *server, RRDHOST * * * @param d a destination string. * @param s a source string. - * @param usable the number of characters to copy. + * @param size the number of characters to copy. * @return Returns the length of the copied string. */ -inline size_t prometheus_name_copy(char *d, const char *s, size_t usable) -{ - size_t n; - - for (n = 0; *s && n < usable; d++, s++, n++) { - register char c = *s; - - if (!isalnum(c)) - *d = '_'; - else - *d = c; - } - *d = '\0'; - - return n; +inline void prometheus_name_copy(char *d, const char *s, size_t size) { + prometheus_rrdlabels_sanitize_name(d, s, size); } /** @@ -174,28 +160,13 @@ inline size_t prometheus_name_copy(char *d, const char *s, size_t usable) * * @param d a destination string. * @param s a source string. - * @param usable the number of characters to copy. + * @param size the number of characters to copy. * @return Returns the length of the copied string. */ -inline size_t prometheus_label_copy(char *d, const char *s, size_t usable) -{ - size_t n; - - // make sure we can escape one character without overflowing the buffer - usable--; - - for (n = 0; *s && n < usable; d++, s++, n++) { - register char c = *s; - - if (unlikely(c == '"' || c == '\\' || c == '\n')) { - *d++ = '\\'; - n++; - } - *d = c; - } - *d = '\0'; - - return n; +inline void prometheus_label_copy(char *d, const char *s, size_t size) { + // our label values are already compatible with prometheus label values + // so, just copy them + strncpyz(d, s, size - 1); } /** @@ -299,8 +270,8 @@ static int format_prometheus_label_callback(const char *name, const char *value, char k[PROMETHEUS_ELEMENT_MAX + 1]; char v[PROMETHEUS_ELEMENT_MAX + 1]; - prometheus_name_copy(k, name, PROMETHEUS_ELEMENT_MAX); - prometheus_label_copy(v, value, PROMETHEUS_ELEMENT_MAX); + prometheus_name_copy(k, name, sizeof(k)); + prometheus_label_copy(v, value, sizeof(v)); if (*k && *v) { if (d->count > 0) buffer_strcat(d->instance->labels_buffer, ","); @@ -341,8 +312,8 @@ static int format_prometheus_chart_label_callback(const char *name, const char * char k[PROMETHEUS_ELEMENT_MAX + 1]; char v[PROMETHEUS_ELEMENT_MAX + 1]; - prometheus_name_copy(k, name, PROMETHEUS_ELEMENT_MAX); - prometheus_label_copy(v, value, PROMETHEUS_ELEMENT_MAX); + prometheus_name_copy(k, name, sizeof(k)); + prometheus_label_copy(v, value, sizeof(v)); if (*k && *v) buffer_sprintf(wb, ",%s=\"%s\"", k, v); @@ -630,9 +601,9 @@ static int prometheus_rrdset_to_json(RRDSET *st, void *data) prometheus_label_copy(chart, (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ? - rrdset_name(st) : rrdset_id(st), PROMETHEUS_ELEMENT_MAX); - prometheus_label_copy(family, rrdset_family(st), PROMETHEUS_ELEMENT_MAX); - prometheus_name_copy(context, rrdset_context(st), PROMETHEUS_ELEMENT_MAX); + rrdset_name(st) : rrdset_id(st), sizeof(chart)); + prometheus_label_copy(family, rrdset_family(st), sizeof(family)); + prometheus_name_copy(context, rrdset_context(st), sizeof(context)); int as_collected = (EXPORTING_OPTIONS_DATA_SOURCE(opts->exporting_options) == EXPORTING_SOURCE_DATA_AS_COLLECTED); @@ -708,7 +679,7 @@ static int prometheus_rrdset_to_json(RRDSET *st, void *data) prometheus_label_copy( dimension, (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd), - PROMETHEUS_ELEMENT_MAX); + sizeof(dimension)); } else { // the dimensions of the chart, do not have the same algorithm, multiplier or divisor @@ -717,7 +688,7 @@ static int prometheus_rrdset_to_json(RRDSET *st, void *data) prometheus_name_copy( dimension, (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd), - PROMETHEUS_ELEMENT_MAX); + sizeof(dimension)); } generate_as_collected_from_metric(wb, &p, homogeneous, prometheus_collector, st->rrdlabels); } @@ -738,7 +709,7 @@ static int prometheus_rrdset_to_json(RRDSET *st, void *data) prometheus_label_copy( dimension, (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd), - PROMETHEUS_ELEMENT_MAX); + sizeof(dimension)); if (opts->output_options & PROMETHEUS_OUTPUT_HELP_TYPE) { generate_as_collected_prom_help(wb, prefix, context, units, suffix, st); @@ -837,7 +808,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus( SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT, true); char hostname[PROMETHEUS_ELEMENT_MAX + 1]; - prometheus_label_copy(hostname, rrdhost_hostname(host), PROMETHEUS_ELEMENT_MAX); + prometheus_label_copy(hostname, rrdhost_hostname(host), sizeof(hostname)); format_host_labels_prometheus(instance, host); diff --git a/src/exporting/prometheus/prometheus.h b/src/exporting/prometheus/prometheus.h index 0a537fd77..f5b0e4874 100644 --- a/src/exporting/prometheus/prometheus.h +++ b/src/exporting/prometheus/prometheus.h @@ -27,8 +27,8 @@ void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts( EXPORTING_OPTIONS exporting_options, PROMETHEUS_OUTPUT_OPTIONS output_options); int can_send_rrdset(struct instance *instance, RRDSET *st, SIMPLE_PATTERN *filter); -size_t prometheus_name_copy(char *d, const char *s, size_t usable); -size_t prometheus_label_copy(char *d, const char *s, size_t usable); +void prometheus_name_copy(char *d, const char *s, size_t size); +void prometheus_label_copy(char *d, const char *s, size_t size); char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits); void format_host_labels_prometheus(struct instance *instance, RRDHOST *host); diff --git a/src/exporting/prometheus/remote_write/remote_write.c b/src/exporting/prometheus/remote_write/remote_write.c index b4b6f996b..fc8c5b3ba 100644 --- a/src/exporting/prometheus/remote_write/remote_write.c +++ b/src/exporting/prometheus/remote_write/remote_write.c @@ -114,12 +114,10 @@ int init_prometheus_remote_write_instance(struct instance *instance) struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data)); instance->connector_specific_data = simple_connector_data; -#ifdef ENABLE_HTTPS simple_connector_data->ssl = NETDATA_SSL_UNSET_CONNECTION; if (instance->config.options & EXPORTING_OPTION_USE_TLS) { netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX); } -#endif struct prometheus_remote_write_specific_data *connector_specific_data = callocz(1, sizeof(struct prometheus_remote_write_specific_data)); @@ -147,8 +145,8 @@ static int format_remote_write_label_callback(const char *name, const char *valu char k[PROMETHEUS_ELEMENT_MAX + 1]; char v[PROMETHEUS_ELEMENT_MAX + 1]; - prometheus_name_copy(k, name, PROMETHEUS_ELEMENT_MAX); - prometheus_label_copy(v, value, PROMETHEUS_ELEMENT_MAX); + prometheus_name_copy(k, name, sizeof(k)); + prometheus_label_copy(v, value, sizeof(v)); add_label(d->write_request, k, v); return 1; } @@ -171,7 +169,7 @@ int format_host_prometheus_remote_write(struct instance *instance, RRDHOST *host prometheus_label_copy( hostname, (host == localhost) ? instance->config.hostname : rrdhost_hostname(host), - PROMETHEUS_ELEMENT_MAX); + sizeof(hostname)); add_host_info( connector_specific_data->write_request, @@ -200,9 +198,9 @@ int format_chart_prometheus_remote_write(struct instance *instance, RRDSET *st) prometheus_label_copy( chart, (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? rrdset_name(st) : rrdset_id(st), - PROMETHEUS_ELEMENT_MAX); - prometheus_label_copy(family, rrdset_family(st), PROMETHEUS_ELEMENT_MAX); - prometheus_name_copy(context, rrdset_context(st), PROMETHEUS_ELEMENT_MAX); + sizeof(chart)); + prometheus_label_copy(family, rrdset_family(st), sizeof(family)); + prometheus_name_copy(context, rrdset_context(st), sizeof(context)); as_collected = (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED); homogeneous = 1; @@ -268,7 +266,7 @@ int format_dimension_prometheus_remote_write(struct instance *instance, RRDDIM * prometheus_label_copy( dimension, (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd), - PROMETHEUS_ELEMENT_MAX); + sizeof(dimension)); snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", instance->config.prefix, context, suffix); add_metric( @@ -283,9 +281,9 @@ int format_dimension_prometheus_remote_write(struct instance *instance, RRDDIM * prometheus_name_copy( dimension, (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd), - PROMETHEUS_ELEMENT_MAX); + sizeof(dimension)); snprintf( - name, PROMETHEUS_LABELS_MAX, "%s_%s_%s%s", instance->config.prefix, context, dimension, + name, sizeof(name), "%s_%s_%s%s", instance->config.prefix, context, dimension, suffix); add_metric( @@ -309,7 +307,7 @@ int format_dimension_prometheus_remote_write(struct instance *instance, RRDDIM * prometheus_label_copy( dimension, (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd), - PROMETHEUS_ELEMENT_MAX); + sizeof(dimension)); snprintf( name, PROMETHEUS_LABELS_MAX, "%s_%s%s%s", instance->config.prefix, context, units, suffix); @@ -340,8 +338,8 @@ static int format_variable_prometheus_remote_write_callback(const DICTIONARY_ITE char name[PROMETHEUS_LABELS_MAX + 1]; char *suffix = ""; - prometheus_name_copy(context, rrdvar_name(rv), PROMETHEUS_ELEMENT_MAX); - snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", instance->config.prefix, context, suffix); + prometheus_name_copy(context, rrdvar_name(rv), sizeof(context)); + snprintf(name, sizeof(name), "%s_%s%s", instance->config.prefix, context, suffix); NETDATA_DOUBLE value = rrdvar2number(rv); add_variable(connector_specific_data->write_request, name, diff --git a/src/exporting/pubsub/integrations/google_cloud_pub_sub.md b/src/exporting/pubsub/integrations/google_cloud_pub_sub.md index 1adfd408e..f51c90af8 100644 --- a/src/exporting/pubsub/integrations/google_cloud_pub_sub.md +++ b/src/exporting/pubsub/integrations/google_cloud_pub_sub.md @@ -38,8 +38,8 @@ Export metrics to Google Cloud Pub/Sub Service The configuration file name for this integration is `exporting.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/exporting/read_config.c b/src/exporting/read_config.c index cd8af6bf6..13fe10883 100644 --- a/src/exporting/read_config.c +++ b/src/exporting/read_config.c @@ -5,11 +5,7 @@ EXPORTING_OPTIONS global_exporting_options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES; const char *global_exporting_prefix = "netdata"; -struct config exporting_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config exporting_config = APPCONFIG_INITIALIZER; struct instance *prometheus_exporter_instance = NULL; @@ -32,7 +28,7 @@ static _CONNECTOR_INSTANCE *find_instance(const char *section) return local_ci; } -char *expconfig_get(struct config *root, const char *section, const char *name, const char *default_value) +static const char *expconfig_get(struct config *root, const char *section, const char *name, const char *default_value) { _CONNECTOR_INSTANCE *local_ci; @@ -207,14 +203,14 @@ struct engine *read_exporting_config() if (unlikely(engine)) return engine; - char *filename = strdupz_path_subpath(netdata_configured_user_config_dir, EXPORTING_CONF); + char *filename = filename_from_path_entry_strdupz(netdata_configured_user_config_dir, EXPORTING_CONF); exporting_config_exists = appconfig_load(&exporting_config, filename, 0, NULL); if (!exporting_config_exists) { netdata_log_info("CONFIG: cannot load user exporting config '%s'. Will try the stock version.", filename); freez(filename); - filename = strdupz_path_subpath(netdata_configured_stock_config_dir, EXPORTING_CONF); + filename = filename_from_path_entry_strdupz(netdata_configured_stock_config_dir, EXPORTING_CONF); exporting_config_exists = appconfig_load(&exporting_config, filename, 0, NULL); if (!exporting_config_exists) netdata_log_info("CONFIG: cannot load stock exporting config '%s'. Running with internal defaults.", filename); @@ -243,7 +239,7 @@ struct engine *read_exporting_config() prometheus_exporter_instance->config.options |= global_exporting_options & EXPORTING_OPTIONS_SOURCE_BITS; - char *data_source = prometheus_config_get("data source", "average"); + const char *data_source = prometheus_config_get("data source", "average"); prometheus_exporter_instance->config.options = exporting_parse_data_source(data_source, prometheus_exporter_instance->config.options); @@ -378,7 +374,7 @@ struct engine *read_exporting_config() tmp_instance->config.hosts_pattern = simple_pattern_create( exporter_get(instance_name, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT, true); - char *data_source = exporter_get(instance_name, "data source", "average"); + const char *data_source = exporter_get(instance_name, "data source", "average"); tmp_instance->config.options = exporting_parse_data_source(data_source, tmp_instance->config.options); if (EXPORTING_OPTIONS_DATA_SOURCE(tmp_instance->config.options) != EXPORTING_SOURCE_DATA_AS_COLLECTED && @@ -468,8 +464,6 @@ struct engine *read_exporting_config() tmp_instance->config.hostname = strdupz(exporter_get(instance_name, "hostname", engine->config.hostname)); -#ifdef ENABLE_HTTPS - #define STR_GRAPHITE_HTTPS "graphite:https" #define STR_JSON_HTTPS "json:https" #define STR_OPENTSDB_HTTPS "opentsdb:https" @@ -487,7 +481,6 @@ struct engine *read_exporting_config() strlen(STR_PROMETHEUS_REMOTE_WRITE_HTTPS)))) { tmp_instance->config.options |= EXPORTING_OPTION_USE_TLS; } -#endif #ifdef NETDATA_INTERNAL_CHECKS netdata_log_info( diff --git a/src/exporting/send_data.c b/src/exporting/send_data.c index 097b7fd4b..02e9c7b29 100644 --- a/src/exporting/send_data.c +++ b/src/exporting/send_data.c @@ -2,7 +2,6 @@ #include "exporting_engine.h" -#ifdef ENABLE_HTTPS /** * Check if TLS is enabled in the configuration * @@ -19,7 +18,6 @@ static int exporting_tls_is_enabled(EXPORTING_CONNECTOR_TYPE type __maybe_unused type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) && options & EXPORTING_OPTION_USE_TLS; } -#endif /** * Discard response @@ -69,28 +67,23 @@ void simple_connector_receive_response(int *sock, struct instance *instance) response = buffer_create(4096, &netdata_buffers_statistics.buffers_exporters); struct stats *stats = &instance->stats; -#ifdef ENABLE_HTTPS uint32_t options = (uint32_t)instance->config.options; struct simple_connector_data *connector_specific_data = instance->connector_specific_data; if (options & EXPORTING_OPTION_USE_TLS) ERR_clear_error(); -#endif errno_clear(); // loop through to collect all data while (*sock != -1 && errno != EWOULDBLOCK) { ssize_t r; -#ifdef ENABLE_HTTPS if (SSL_connection(&connector_specific_data->ssl)) r = netdata_ssl_read(&connector_specific_data->ssl, &response->buffer[response->len], (int) (response->size - response->len)); else r = recv(*sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT); -#else - r = recv(*sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT); -#endif + if (likely(r > 0)) { // we received some data response->len += r; @@ -136,13 +129,11 @@ void simple_connector_send_buffer( flags += MSG_NOSIGNAL; #endif -#ifdef ENABLE_HTTPS uint32_t options = (uint32_t)instance->config.options; struct simple_connector_data *connector_specific_data = instance->connector_specific_data; if (options & EXPORTING_OPTION_USE_TLS) ERR_clear_error(); -#endif struct stats *stats = &instance->stats; ssize_t header_sent_bytes = 0; @@ -150,7 +141,6 @@ void simple_connector_send_buffer( size_t header_len = buffer_strlen(header); size_t buffer_len = buffer_strlen(buffer); -#ifdef ENABLE_HTTPS if (SSL_connection(&connector_specific_data->ssl)) { if (header_len) @@ -166,12 +156,6 @@ void simple_connector_send_buffer( if ((size_t)header_sent_bytes == header_len) buffer_sent_bytes = send(*sock, buffer_tostring(buffer), buffer_len, flags); } -#else - if (header_len) - header_sent_bytes = send(*sock, buffer_tostring(header), header_len, flags); - if ((size_t)header_sent_bytes == header_len) - buffer_sent_bytes = send(*sock, buffer_tostring(buffer), buffer_len, flags); -#endif if ((size_t)buffer_sent_bytes == buffer_len) { // we sent the data successfully @@ -221,12 +205,11 @@ void simple_connector_worker(void *instance_p) snprintfz(threadname, ND_THREAD_TAG_MAX, "EXPSMPL[%zu]", instance->index); uv_thread_set_name_np(threadname); -#ifdef ENABLE_HTTPS uint32_t options = (uint32_t)instance->config.options; if (options & EXPORTING_OPTION_USE_TLS) ERR_clear_error(); -#endif + struct simple_connector_config *connector_specific_config = instance->config.connector_specific_config; int sock = -1; @@ -303,7 +286,7 @@ void simple_connector_worker(void *instance_p) &reconnects, connector_specific_data->connected_to, CONNECTED_TO_MAX); -#ifdef ENABLE_HTTPS + if (exporting_tls_is_enabled(instance->config.type, options) && sock != -1) { if (netdata_ssl_exporting_ctx) { if (sock_delnonblock(sock) < 0) @@ -326,7 +309,6 @@ void simple_connector_worker(void *instance_p) } } } -#endif stats->reconnects += reconnects; } diff --git a/src/go/cmd/godplugin/main.go b/src/go/cmd/godplugin/main.go index cae9fa1b4..961aa91d1 100644 --- a/src/go/cmd/godplugin/main.go +++ b/src/go/cmd/godplugin/main.go @@ -15,9 +15,9 @@ import ( "github.com/netdata/netdata/go/plugins/logger" "github.com/netdata/netdata/go/plugins/pkg/buildinfo" "github.com/netdata/netdata/go/plugins/pkg/executable" + "github.com/netdata/netdata/go/plugins/pkg/multipath" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent" "github.com/netdata/netdata/go/plugins/plugin/go.d/cli" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/multipath" "github.com/jessevdk/go-flags" "golang.org/x/net/http/httpproxy" diff --git a/src/go/go.mod b/src/go/go.mod index 25153fc61..90277365c 100644 --- a/src/go/go.mod +++ b/src/go/go.mod @@ -6,64 +6,70 @@ replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0. require ( github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/Masterminds/sprig/v3 v3.2.3 + github.com/Masterminds/sprig/v3 v3.3.0 github.com/Wing924/ltsv v0.3.1 github.com/apparentlymart/go-cidr v1.1.0 github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de github.com/axiomhq/hyperloglog v0.1.0 github.com/blang/semver/v4 v4.0.0 - github.com/bmatcuk/doublestar/v4 v4.6.1 + github.com/bmatcuk/doublestar/v4 v4.7.1 github.com/clbanning/rfile/v2 v2.0.0-20231024120205-ac3fca974b0e github.com/cloudflare/cfssl v1.6.5 github.com/coreos/go-systemd/v22 v22.5.0 - github.com/docker/docker v27.1.2+incompatible - github.com/facebook/time v0.0.0-20240626113945-18207c5d8ddc - github.com/fsnotify/fsnotify v1.7.0 - github.com/go-redis/redis/v8 v8.11.5 + github.com/docker/docker v27.3.1+incompatible + github.com/facebook/time v0.0.0-20241003211043-324a0f2e94c0 + github.com/fsnotify/fsnotify v1.8.0 + github.com/go-ldap/ldap/v3 v3.4.8 github.com/go-sql-driver/mysql v1.8.1 github.com/godbus/dbus/v5 v5.1.0 github.com/gofrs/flock v0.12.1 github.com/golang/mock v1.6.0 + github.com/google/uuid v1.6.0 + github.com/gorcon/rcon v1.3.5 github.com/gosnmp/gosnmp v1.38.0 github.com/ilyam8/hashstructure v1.1.0 github.com/jackc/pgx/v4 v4.18.3 - github.com/jackc/pgx/v5 v5.6.0 + github.com/jackc/pgx/v5 v5.7.1 github.com/jessevdk/go-flags v1.6.1 github.com/kanocz/fcgi_client v0.0.0-20210113082628-fff85c8adfb7 - github.com/likexian/whois v1.15.4 - github.com/likexian/whois-parser v1.24.19 + github.com/likexian/whois v1.15.5 + github.com/likexian/whois-parser v1.24.20 github.com/lmittmann/tint v1.0.5 github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-xmlrpc v0.0.3 github.com/miekg/dns v1.1.62 github.com/mitchellh/go-homedir v1.1.0 - github.com/prometheus-community/pro-bing v0.4.1 - github.com/prometheus/common v0.55.0 + github.com/prometheus-community/pro-bing v0.4.2-0.20241106090159-5a5f1d731cf5 + github.com/prometheus/common v0.60.1 github.com/prometheus/prometheus v2.5.0+incompatible + github.com/redis/go-redis/v9 v9.7.0 + github.com/sijms/go-ora/v2 v2.8.22 github.com/stretchr/testify v1.9.0 - github.com/tidwall/gjson v1.17.3 + github.com/tidwall/gjson v1.18.0 github.com/valyala/fastjson v1.6.4 - github.com/vmware/govmomi v0.42.0 - go.mongodb.org/mongo-driver v1.16.1 - golang.org/x/net v0.28.0 - golang.org/x/text v0.17.0 + github.com/vmware/govmomi v0.46.0 + go.mongodb.org/mongo-driver v1.17.1 + golang.org/x/net v0.30.0 + golang.org/x/text v0.19.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20220504211119-3d4a969bb56b gopkg.in/ini.v1 v1.67.0 gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.2 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.31.0 - k8s.io/apimachinery v0.31.0 - k8s.io/client-go v0.31.0 + k8s.io/api v0.31.2 + k8s.io/apimachinery v0.31.2 + k8s.io/client-go v0.31.2 layeh.com/radius v0.0.0-20190322222518-890bc1058917 ) require ( + dario.cat/mergo v1.0.1 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect @@ -73,6 +79,7 @@ require ( github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.20.0 // indirect @@ -85,19 +92,18 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect - github.com/huandu/xstrings v1.3.3 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.14.3 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgproto3/v2 v2.3.3 // indirect - github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jackc/pgtype v1.14.0 // indirect - github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/josharian/native v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -107,8 +113,8 @@ require ( github.com/mdlayher/genetlink v1.3.2 // indirect github.com/mdlayher/netlink v1.7.2 // indirect github.com/mdlayher/socket v0.4.1 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect - github.com/mitchellh/reflectwalk v1.0.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -120,9 +126,9 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/shopspring/decimal v1.2.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect @@ -130,18 +136,18 @@ require ( github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect - github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect + github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect go.opentelemetry.io/otel v1.22.0 // indirect go.opentelemetry.io/otel/metric v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect - golang.org/x/crypto v0.26.0 // indirect + golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.18.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.23.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/term v0.25.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.22.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect diff --git a/src/go/go.sum b/src/go/go.sum index c572aa7c4..35c2fb6ff 100644 --- a/src/go/go.sum +++ b/src/go/go.sum @@ -1,21 +1,27 @@ +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Wing924/ltsv v0.3.1 h1:hbjzQ6YuS/sOm7nQJG7ddT9ua1yYmcH25Q8lsuiQE0A= github.com/Wing924/ltsv v0.3.1/go.mod h1:zl47wq7H23LocdDHg7yJAH/Qdc4MWHXu1Evx9Ahilmo= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de h1:FxWPpzIjnTlhPwqqXc4/vE0f7GvRjuAsbW+HOIe8KnA= @@ -26,14 +32,18 @@ github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2io github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= -github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q= +github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/clbanning/rfile/v2 v2.0.0-20231024120205-ac3fca974b0e h1:Iw4JdD/TlCUvlVWIjuV1M98rGNo/C+NxM6U5ghStom4= github.com/clbanning/rfile/v2 v2.0.0-20231024120205-ac3fca974b0e/go.mod h1:Y53jAgtl30vLWEnRWkZFT+CpwLNsrQJb0F5AwHieNGs= github.com/cloudflare/cfssl v1.6.5 h1:46zpNkm6dlNkMZH/wMW22ejih6gIaJbzL2du6vD7ZeI= @@ -58,24 +68,30 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v27.1.2+incompatible h1:AhGzR1xaQIy53qCkxARaFluI00WPGtXn0AJuoQsVYTY= -github.com/docker/docker v27.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/facebook/time v0.0.0-20240626113945-18207c5d8ddc h1:0VQsg5ZXW9MPUxzemUHW7UBK8gfIO8K+YJGbdv4kBIM= -github.com/facebook/time v0.0.0-20240626113945-18207c5d8ddc/go.mod h1:2UFAomOuD2vAK1x68czUtCVjAqmyWCEnAXOlmGqf+G0= +github.com/facebook/time v0.0.0-20241003211043-324a0f2e94c0 h1:Z/QF2fnP1QWi755nNDKxiOfnSboaF/lvsYlB2eh4Hiw= +github.com/facebook/time v0.0.0-20241003211043-324a0f2e94c0/go.mod h1:ROiLXrJb1QHiB4rvK6Sqhl6SAgkjV47z5O/Oy5jKzgU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= +github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap/v3 v3.4.8 h1:loKJyspcRezt2Q3ZRMq2p/0v8iOurlmeXDPw6fikSvQ= +github.com/go-ldap/ldap/v3 v3.4.8/go.mod h1:qS3Sjlu76eHfHGpUdWkAXQTw4beih+cHsco2jXlIXrk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -90,8 +106,6 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -127,9 +141,12 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorcon/rcon v1.3.5 h1:YE/Vrw6R99uEP08wp0EjdPAP3Jwz/ys3J8qxI1nYoeU= +github.com/gorcon/rcon v1.3.5/go.mod h1:zR1qfKZttF8vAgH1NsP6CdpachOvLDq8jE64NboTpIM= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gosnmp/gosnmp v1.38.0 h1:I5ZOMR8kb0DXAFg/88ACurnuwGwYkXWq3eLpJPHMEYc= github.com/gosnmp/gosnmp v1.38.0/go.mod h1:FE+PEZvKrFz9afP9ii1W3cprXuVZ17ypCcyyfYuu5LY= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= @@ -139,12 +156,14 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8 github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ilyam8/hashstructure v1.1.0 h1:N8t8hzzKLf2Da87XgC/DBYqXUmSbclgx+2cZxS5/klU= github.com/ilyam8/hashstructure v1.1.0/go.mod h1:LoLuwBSNpZOi3eTMfAqe2i4oW9QkI08e6g1Pci9h7hs= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= @@ -177,8 +196,8 @@ github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwX github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= @@ -191,13 +210,25 @@ github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQ github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= -github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= -github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= +github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= +github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= -github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4= github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -232,10 +263,10 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/likexian/gokit v0.25.15 h1:QjospM1eXhdMMHwZRpMKKAHY/Wig9wgcREmLtf9NslY= github.com/likexian/gokit v0.25.15/go.mod h1:S2QisdsxLEHWeD/XI0QMVeggp+jbxYqUxMvSBil7MRg= -github.com/likexian/whois v1.15.4 h1:r5En62c+S9HKFgJtdh2WsdmRGTcxE4WUtGBdZkSBXmM= -github.com/likexian/whois v1.15.4/go.mod h1:rXFTPcQdNlPQBJCQpPWTSIDGzzmgKBftmhdOOcLpwXk= -github.com/likexian/whois-parser v1.24.19 h1:vT8lWhnV8ogkdaYLyef6IvE5VTHVCwlUDG5BUXCx06k= -github.com/likexian/whois-parser v1.24.19/go.mod h1:rAtaofg2luol09H+ogDzGIfcG8ig1NtM5R16uQADDz4= +github.com/likexian/whois v1.15.5 h1:gpPxyCTJtLtJDmakHCo//0ZjK/ocI01GCAd/WBJ2oH8= +github.com/likexian/whois v1.15.5/go.mod h1:4b6o1QTCfjwrB5I3KeNQnn79QtuPUTsewsE+ys94I78= +github.com/likexian/whois-parser v1.24.20 h1:oxEkRi0GxgqWQRLDMJpXU1EhgWmLmkqEFZ2ChXTeQLE= +github.com/likexian/whois-parser v1.24.20/go.mod h1:rAtaofg2luol09H+ogDzGIfcG8ig1NtM5R16uQADDz4= github.com/lmittmann/tint v1.0.5 h1:NQclAutOfYsqs2F1Lenue6OoWCajs5wJcP3DfWVpePw= github.com/lmittmann/tint v1.0.5/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -260,13 +291,12 @@ github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= @@ -283,12 +313,9 @@ github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7P github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= @@ -307,14 +334,16 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-community/pro-bing v0.4.1 h1:aMaJwyifHZO0y+h8+icUz0xbToHbia0wdmzdVZ+Kl3w= -github.com/prometheus-community/pro-bing v0.4.1/go.mod h1:aLsw+zqCaDoa2RLVVSX3+UiCkBBXTMtZC3c7EkfWnAE= +github.com/prometheus-community/pro-bing v0.4.2-0.20241106090159-5a5f1d731cf5 h1:zHS7rMBAaRLMQUlKPT/gFHQVn3RldvPBNNDSpIB6WtM= +github.com/prometheus-community/pro-bing v0.4.2-0.20241106090159-5a5f1d731cf5/go.mod h1:zZf++wJG7OTBQY+deqxF1XgN9GqlbYPCPZPd1aX9wqQ= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/prometheus v0.50.1 h1:N2L+DYrxqPh4WZStU+o1p/gQlBaqFbcLBTjlp3vpdXw= github.com/prometheus/prometheus v0.50.1/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU= +github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= +github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= @@ -325,15 +354,18 @@ github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThC github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sijms/go-ora/v2 v2.8.22 h1:3ABgRzVKxS439cEgSLjFKutIwOyhnyi4oOSBywEdOlU= +github.com/sijms/go-ora/v2 v2.8.22/go.mod h1:QgFInVi3ZWyqAiJwzBQA+nbKYKH77tdp1PYoCqhR2dU= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -353,16 +385,16 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tidwall/gjson v1.17.3 h1:bwWLZU7icoKRG+C+0PNwIKC6FCJO/Q3p2pZvuP0jN94= -github.com/tidwall/gjson v1.17.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= -github.com/vmware/govmomi v0.42.0 h1:MbvAlVfjNBE1mHMaQ7yOSop1KLB0/93x6VAGuCtjqtI= -github.com/vmware/govmomi v0.42.0/go.mod h1:1H5LWwsBif8HKZqbFp0FdoKTHyJE4FzL6ACequMKYQg= +github.com/vmware/govmomi v0.46.0 h1:vKrY5gG8Udz5HGlBYMrmRy03j9Rey+g5q8S3dQIjOyc= +github.com/vmware/govmomi v0.46.0/go.mod h1:uoLVU9zlXC4p4GmLVG+ZJmBC0Gn3Q7mytOJvi39OhxA= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= @@ -371,15 +403,15 @@ github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4l8= -go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw= +go.mongodb.org/mongo-driver v1.17.1 h1:Wic5cJIwJgSpBhe3lx3+/RybR5PiYRMpVFgO7cOHyIM= +go.mongodb.org/mongo-driver v1.17.1/go.mod h1:wwWm/+BuOddhcq3n68LKRmgk2wXzmF6s0SFOa0GINL4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= @@ -418,9 +450,11 @@ golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -430,6 +464,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -437,22 +472,28 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -476,16 +517,22 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -493,9 +540,11 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -511,6 +560,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -555,12 +605,10 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.2 h1:tczPZjdz6soV2thcuq1IFOuNLrBUGonFyUXBbIWXWis= gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.2/go.mod h1:c7Wo0IjB7JL9B9Avv0UZKorYJCUhiergpj3u1WtGT1E= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -569,12 +617,12 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= -k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= -k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= -k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= -k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= diff --git a/src/go/logger/journal_linux.go b/src/go/logger/journal_linux.go index 00f335075..f55006bdb 100644 --- a/src/go/logger/journal_linux.go +++ b/src/go/logger/journal_linux.go @@ -5,29 +5,10 @@ package logger import ( - "os" - "strconv" - "strings" - "syscall" + "github.com/coreos/go-systemd/v22/journal" ) func isStderrConnectedToJournal() bool { - stream := os.Getenv("JOURNAL_STREAM") - if stream == "" { - return false - } - - idx := strings.IndexByte(stream, ':') - if idx <= 0 { - return false - } - - dev, ino := stream[:idx], stream[idx+1:] - - var stat syscall.Stat_t - if err := syscall.Fstat(int(os.Stderr.Fd()), &stat); err != nil { - return false - } - - return dev == strconv.Itoa(int(stat.Dev)) && ino == strconv.FormatUint(stat.Ino, 10) + ok, _ := journal.StderrIsJournalStream() + return ok } diff --git a/src/go/pkg/matcher/README.md b/src/go/pkg/matcher/README.md new file mode 100644 index 000000000..a4428b581 --- /dev/null +++ b/src/go/pkg/matcher/README.md @@ -0,0 +1,134 @@ +# matcher +## Supported Format + +* string +* glob +* regexp +* simple patterns + +Depending on the symbol at the start of the string, the `matcher` will use one of the supported formats. + +| matcher | short format | long format | +|-----------------|--------------|-------------------| +| string | ` =` | `string` | +| glob | `*` | `glob` | +| regexp | `~` | `regexp` | +| simple patterns | | `simple_patterns` | + +Example: + +- `* pattern`: It will use the `glob` matcher to find the `pattern` in the string. + +### Syntax + +**Tip**: Read `::=` as `is defined as`. + +``` +Short Syntax + [ ] + + ::= '!' + negative expression + ::= [ '=', '~', '*' ] + '=' means string match + '~' means regexp match + '*' means glob match + ::= { ' ' | '\t' | '\n' | '\n' | '\r' } + ::= any string + + Long Syntax + [ ] + + ::= [ 'string' | 'glob' | 'regexp' | 'simple_patterns' ] + ::= '!' + negative expression + ::= ':' + ::= any string +``` + +When using the short syntax, you can enable the glob format by starting the string with a `*`, while in the long syntax +you need to define it more explicitly. The following examples are identical. `simple_patterns` can be used **only** with +the long syntax. + +Examples: + +- Short Syntax: `'* * '` +- Long Syntax: `'glob:*'` + +### String matcher + +The string matcher reports whether the given value equals to the string. + +Examples: + +- `'= foo'` matches only if the string is `foo`. +- `'!= bar'` matches any string that is not `bar`. + +String matcher means **exact match** of the `string`. There are other string match related cases: + +- string has prefix `something` +- string has suffix `something` +- string contains `something` + +This is achievable using the `glob` matcher: + +- `* PREFIX*`, means that it matches with any string that *starts* with `PREFIX`, e.g `PREFIXnetdata` +- `* *SUFFIX`, means that it matches with any string that *ends* with `SUFFIX`, e.g `netdataSUFFIX` +- `* *SUBSTRING*`, means that it matches with any string that *contains* `SUBSTRING`, e.g `netdataSUBSTRINGnetdata` + +### Glob matcher + +The glob matcher reports whether the given value matches the wildcard pattern. It uses the standard `golang` +library `path`. You can read more about the library in the [golang documentation](https://golang.org/pkg/path/#Match), +where you can also practice with the library in order to learn the syntax and use it in your Netdata configuration. + +The pattern syntax is: + +``` + pattern: + { term } + term: + '*' matches any sequence of characters + '?' matches any single character + '[' [ '^' ] { character-range } ']' + character class (must be non-empty) + c matches character c (c != '*', '?', '\\', '[') + '\\' c matches character c + + character-range: + c matches character c (c != '\\', '-', ']') + '\\' c matches character c + lo '-' hi matches character c for lo <= c <= hi +``` + +Examples: + +- `* ?` matches any string that is a single character. +- `'?a'` matches any 2 character string that starts with any character and the second character is `a`, like `ba` but + not `bb` or `bba`. +- `'[^abc]'` matches any character that is NOT a,b,c. `'[abc]'` matches only a, b, c. +- `'*[a-d]'` matches any string (`*`) that ends with a character that is between `a` and `d` (i.e `a,b,c,d`). + +### Regexp matcher + +The regexp matcher reports whether the given value matches the RegExp pattern ( use regexp.Match ). + +The RegExp syntax is described at https://golang.org/pkg/regexp/syntax/. + +Learn more about regular expressions at [RegexOne](https://regexone.com/). + +### Simple patterns matcher + +The simple patterns matcher reports whether the given value matches the simple patterns. + +Simple patterns are a space separated list of words. Each word may use any number of wildcards `*`. Simple patterns +allow negative matches by prefixing a word with `!`. + +Examples: + +- `!*bad* *` matches anything, except all those that contain the word bad. +- `*foobar* !foo* !*bar *` matches everything containing foobar, except strings that start with foo or end with bar. + + + + diff --git a/src/go/pkg/matcher/cache.go b/src/go/pkg/matcher/cache.go new file mode 100644 index 000000000..4594fa06f --- /dev/null +++ b/src/go/pkg/matcher/cache.go @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import "sync" + +type ( + cachedMatcher struct { + matcher Matcher + + mux sync.RWMutex + cache map[string]bool + } +) + +// WithCache adds cache to the matcher. +func WithCache(m Matcher) Matcher { + switch m { + case TRUE(), FALSE(): + return m + default: + return &cachedMatcher{matcher: m, cache: make(map[string]bool)} + } +} + +func (m *cachedMatcher) Match(b []byte) bool { + s := string(b) + if result, ok := m.fetch(s); ok { + return result + } + result := m.matcher.Match(b) + m.put(s, result) + return result +} + +func (m *cachedMatcher) MatchString(s string) bool { + if result, ok := m.fetch(s); ok { + return result + } + result := m.matcher.MatchString(s) + m.put(s, result) + return result +} + +func (m *cachedMatcher) fetch(key string) (result bool, ok bool) { + m.mux.RLock() + result, ok = m.cache[key] + m.mux.RUnlock() + return +} + +func (m *cachedMatcher) put(key string, result bool) { + m.mux.Lock() + m.cache[key] = result + m.mux.Unlock() +} diff --git a/src/go/pkg/matcher/cache_test.go b/src/go/pkg/matcher/cache_test.go new file mode 100644 index 000000000..a545777b3 --- /dev/null +++ b/src/go/pkg/matcher/cache_test.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWithCache(t *testing.T) { + regMatcher, _ := NewRegExpMatcher("[0-9]+") + cached := WithCache(regMatcher) + + assert.True(t, cached.MatchString("1")) + assert.True(t, cached.MatchString("1")) + assert.True(t, cached.Match([]byte("2"))) + assert.True(t, cached.Match([]byte("2"))) +} + +func TestWithCache_specialCase(t *testing.T) { + assert.Equal(t, TRUE(), WithCache(TRUE())) + assert.Equal(t, FALSE(), WithCache(FALSE())) +} +func BenchmarkCachedMatcher_MatchString_cache_hit(b *testing.B) { + benchmarks := []struct { + name string + expr string + target string + }{ + {"stringFullMatcher", "= abc123", "abc123"}, + {"stringPrefixMatcher", "~ ^abc123", "abc123456"}, + {"stringSuffixMatcher", "~ abc123$", "hello abc123"}, + {"stringSuffixMatcher", "~ abc123", "hello abc123 world"}, + {"globMatcher", "* abc*def", "abc12345678def"}, + {"regexp", "~ [0-9]+", "1234567890"}, + } + for _, bm := range benchmarks { + m := Must(Parse(bm.expr)) + b.Run(bm.name+"_raw", func(b *testing.B) { + for i := 0; i < b.N; i++ { + m.MatchString(bm.target) + } + }) + b.Run(bm.name+"_cache", func(b *testing.B) { + cached := WithCache(m) + b.ResetTimer() + for i := 0; i < b.N; i++ { + cached.MatchString(bm.target) + } + }) + } +} diff --git a/src/go/pkg/matcher/doc.go b/src/go/pkg/matcher/doc.go new file mode 100644 index 000000000..33b06988d --- /dev/null +++ b/src/go/pkg/matcher/doc.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +/* +Package matcher implements vary formats of string matcher. + +Supported Format + + string + glob + regexp + simple patterns + +The string matcher reports whether the given value equals to the string ( use == ). + +The glob matcher reports whether the given value matches the wildcard pattern. +The pattern syntax is: + + pattern: + { term } + term: + '*' matches any sequence of characters + '?' matches any single character + '[' [ '^' ] { character-range } ']' + character class (must be non-empty) + c matches character c (c != '*', '?', '\\', '[') + '\\' c matches character c + + character-range: + c matches character c (c != '\\', '-', ']') + '\\' c matches character c + lo '-' hi matches character c for lo <= c <= hi + +The regexp matcher reports whether the given value matches the RegExp pattern ( use regexp.Match ). +The RegExp syntax is described at https://golang.org/pkg/regexp/syntax/. + +The simple patterns matcher reports whether the given value matches the simple patterns. +The simple patterns is a custom format used in netdata, +it's syntax is described at https://docs.netdata.cloud/libnetdata/simple_pattern/. +*/ +package matcher diff --git a/src/go/pkg/matcher/doc_test.go b/src/go/pkg/matcher/doc_test.go new file mode 100644 index 000000000..46c7467ac --- /dev/null +++ b/src/go/pkg/matcher/doc_test.go @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher_test + +import ( + "github.com/netdata/netdata/go/plugins/pkg/matcher" +) + +func ExampleNew_string_format() { + // create a string matcher, which perform full text match + m, err := matcher.New(matcher.FmtString, "hello") + if err != nil { + panic(err) + } + m.MatchString("hello") // => true + m.MatchString("hello world") // => false +} + +func ExampleNew_glob_format() { + // create a glob matcher, which perform wildcard match + m, err := matcher.New(matcher.FmtString, "hello*") + if err != nil { + panic(err) + } + m.MatchString("hello") // => true + m.MatchString("hello world") // => true + m.MatchString("Hello world") // => false +} + +func ExampleNew_simple_patterns_format() { + // create a simple patterns matcher, which perform wildcard match + m, err := matcher.New(matcher.FmtSimplePattern, "hello* !*world *") + if err != nil { + panic(err) + } + m.MatchString("hello") // => true + m.MatchString("hello world") // => true + m.MatchString("Hello world") // => false + m.MatchString("Hello world!") // => false +} + +func ExampleNew_regexp_format() { + // create a regexp matcher, which perform wildcard match + m, err := matcher.New(matcher.FmtRegExp, "[0-9]+") + if err != nil { + panic(err) + } + m.MatchString("1") // => true + m.MatchString("1a") // => true + m.MatchString("a") // => false +} diff --git a/src/go/pkg/matcher/expr.go b/src/go/pkg/matcher/expr.go new file mode 100644 index 000000000..e5ea0cb2e --- /dev/null +++ b/src/go/pkg/matcher/expr.go @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "errors" + "fmt" +) + +type ( + Expr interface { + Parse() (Matcher, error) + } + + // SimpleExpr is a simple expression to describe the condition: + // (includes[0].Match(v) || includes[1].Match(v) || ...) && !(excludes[0].Match(v) || excludes[1].Match(v) || ...) + SimpleExpr struct { + Includes []string `yaml:"includes,omitempty" json:"includes"` + Excludes []string `yaml:"excludes,omitempty" json:"excludes"` + } +) + +var ( + ErrEmptyExpr = errors.New("empty expression") +) + +// Empty returns true if both Includes and Excludes are empty. You can't +func (s *SimpleExpr) Empty() bool { + return len(s.Includes) == 0 && len(s.Excludes) == 0 +} + +// Parse parses the given matchers in Includes and Excludes +func (s *SimpleExpr) Parse() (Matcher, error) { + if len(s.Includes) == 0 && len(s.Excludes) == 0 { + return nil, ErrEmptyExpr + } + var ( + includes = FALSE() + excludes = FALSE() + ) + if len(s.Includes) > 0 { + for _, item := range s.Includes { + m, err := Parse(item) + if err != nil { + return nil, fmt.Errorf("parse matcher %q error: %v", item, err) + } + includes = Or(includes, m) + } + } else { + includes = TRUE() + } + + for _, item := range s.Excludes { + m, err := Parse(item) + if err != nil { + return nil, fmt.Errorf("parse matcher %q error: %v", item, err) + } + excludes = Or(excludes, m) + } + + return And(includes, Not(excludes)), nil +} diff --git a/src/go/pkg/matcher/expr_test.go b/src/go/pkg/matcher/expr_test.go new file mode 100644 index 000000000..93a183226 --- /dev/null +++ b/src/go/pkg/matcher/expr_test.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSimpleExpr_none(t *testing.T) { + expr := &SimpleExpr{} + + m, err := expr.Parse() + assert.EqualError(t, err, ErrEmptyExpr.Error()) + assert.Nil(t, m) +} + +func TestSimpleExpr_include(t *testing.T) { + expr := &SimpleExpr{ + Includes: []string{ + "~ /api/", + "~ .php$", + }, + } + + m, err := expr.Parse() + assert.NoError(t, err) + + assert.True(t, m.MatchString("/api/a.php")) + assert.True(t, m.MatchString("/api/a.php2")) + assert.True(t, m.MatchString("/api2/a.php")) + assert.True(t, m.MatchString("/api/img.php")) + assert.False(t, m.MatchString("/api2/img.php2")) +} + +func TestSimpleExpr_exclude(t *testing.T) { + expr := &SimpleExpr{ + Excludes: []string{ + "~ /api/img", + }, + } + + m, err := expr.Parse() + assert.NoError(t, err) + + assert.True(t, m.MatchString("/api/a.php")) + assert.True(t, m.MatchString("/api/a.php2")) + assert.True(t, m.MatchString("/api2/a.php")) + assert.False(t, m.MatchString("/api/img.php")) + assert.True(t, m.MatchString("/api2/img.php2")) +} + +func TestSimpleExpr_both(t *testing.T) { + expr := &SimpleExpr{ + Includes: []string{ + "~ /api/", + "~ .php$", + }, + Excludes: []string{ + "~ /api/img", + }, + } + + m, err := expr.Parse() + assert.NoError(t, err) + + assert.True(t, m.MatchString("/api/a.php")) + assert.True(t, m.MatchString("/api/a.php2")) + assert.True(t, m.MatchString("/api2/a.php")) + assert.False(t, m.MatchString("/api/img.php")) + assert.False(t, m.MatchString("/api2/img.php2")) +} + +func TestSimpleExpr_Parse_NG(t *testing.T) { + { + expr := &SimpleExpr{ + Includes: []string{ + "~ (ab", + "~ .php$", + }, + } + + m, err := expr.Parse() + assert.Error(t, err) + assert.Nil(t, m) + } + { + expr := &SimpleExpr{ + Excludes: []string{ + "~ (ab", + "~ .php$", + }, + } + + m, err := expr.Parse() + assert.Error(t, err) + assert.Nil(t, m) + } +} diff --git a/src/go/pkg/matcher/glob.go b/src/go/pkg/matcher/glob.go new file mode 100644 index 000000000..75d977ff6 --- /dev/null +++ b/src/go/pkg/matcher/glob.go @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "errors" + "path/filepath" + "regexp" + "unicode/utf8" +) + +// globMatcher implements Matcher, it uses filepath.MatchString to match. +type globMatcher string + +var ( + errBadGlobPattern = errors.New("bad glob pattern") + erGlobPattern = regexp.MustCompile(`(?s)^(?:[*?]|\[\^?([^\\-\]]|\\.|.-.)+]|\\.|[^*?\\\[])*$`) +) + +// NewGlobMatcher create a new matcher with glob format +func NewGlobMatcher(expr string) (Matcher, error) { + switch expr { + case "": + return stringFullMatcher(""), nil + case "*": + return TRUE(), nil + } + + // any strings pass this regexp check are valid pattern + if !erGlobPattern.MatchString(expr) { + return nil, errBadGlobPattern + } + + size := len(expr) + chars := []rune(expr) + startWith := true + endWith := true + startIdx := 0 + endIdx := size - 1 + if chars[startIdx] == '*' { + startWith = false + startIdx = 1 + } + if chars[endIdx] == '*' { + endWith = false + endIdx-- + } + + unescapedExpr := make([]rune, 0, endIdx-startIdx+1) + for i := startIdx; i <= endIdx; i++ { + ch := chars[i] + if ch == '\\' { + nextCh := chars[i+1] + unescapedExpr = append(unescapedExpr, nextCh) + i++ + } else if isGlobMeta(ch) { + return globMatcher(expr), nil + } else { + unescapedExpr = append(unescapedExpr, ch) + } + } + + return NewStringMatcher(string(unescapedExpr), startWith, endWith) +} + +func isGlobMeta(ch rune) bool { + switch ch { + case '*', '?', '[': + return true + default: + return false + } +} + +// Match matches. +func (m globMatcher) Match(b []byte) bool { + return m.MatchString(string(b)) +} + +// MatchString matches. +func (m globMatcher) MatchString(line string) bool { + rs, _ := m.globMatch(line) + return rs +} + +func (m globMatcher) globMatch(name string) (matched bool, err error) { + pattern := string(m) +Pattern: + for len(pattern) > 0 { + var star bool + var chunk string + star, chunk, pattern = scanChunk(pattern) + if star && chunk == "" { + // Trailing * matches rest of string unless it has a /. + // return !strings.Contains(name, string(Separator)), nil + + return true, nil + } + // Look for match at current position. + t, ok, err := matchChunk(chunk, name) + // if we're the last chunk, make sure we've exhausted the name + // otherwise we'll give a false result even if we could still match + // using the star + if ok && (len(t) == 0 || len(pattern) > 0) { + name = t + continue + } + if err != nil { + return false, err + } + if star { + // Look for match skipping i+1 bytes. + // Cannot skip /. + for i := 0; i < len(name); i++ { + //for i := 0; i < len(name) && name[i] != Separator; i++ { + t, ok, err := matchChunk(chunk, name[i+1:]) + if ok { + // if we're the last chunk, make sure we exhausted the name + if len(pattern) == 0 && len(t) > 0 { + continue + } + name = t + continue Pattern + } + if err != nil { + return false, err + } + } + } + return false, nil + } + return len(name) == 0, nil +} + +// scanChunk gets the next segment of pattern, which is a non-star string +// possibly preceded by a star. +func scanChunk(pattern string) (star bool, chunk, rest string) { + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + star = true + } + inrange := false + var i int +Scan: + for i = 0; i < len(pattern); i++ { + switch pattern[i] { + case '\\': + if i+1 < len(pattern) { + i++ + } + case '[': + inrange = true + case ']': + inrange = false + case '*': + if !inrange { + break Scan + } + } + } + return star, pattern[0:i], pattern[i:] +} + +// matchChunk checks whether chunk matches the beginning of s. +// If so, it returns the remainder of s (after the match). +// Chunk is all single-character operators: literals, char classes, and ?. +func matchChunk(chunk, s string) (rest string, ok bool, err error) { + for len(chunk) > 0 { + if len(s) == 0 { + return + } + switch chunk[0] { + case '[': + // character class + r, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + // We can't end right after '[', we're expecting at least + // a closing bracket and possibly a caret. + if len(chunk) == 0 { + err = filepath.ErrBadPattern + return + } + // possibly negated + negated := chunk[0] == '^' + if negated { + chunk = chunk[1:] + } + // parse all ranges + match := false + nrange := 0 + for { + if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { + chunk = chunk[1:] + break + } + var lo, hi rune + if lo, chunk, err = getEsc(chunk); err != nil { + return + } + hi = lo + if chunk[0] == '-' { + if hi, chunk, err = getEsc(chunk[1:]); err != nil { + return + } + } + if lo <= r && r <= hi { + match = true + } + nrange++ + } + if match == negated { + return + } + + case '?': + //if s[0] == Separator { + // return + //} + _, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + + case '\\': + chunk = chunk[1:] + if len(chunk) == 0 { + err = filepath.ErrBadPattern + return + } + fallthrough + + default: + if chunk[0] != s[0] { + return + } + s = s[1:] + chunk = chunk[1:] + } + } + return s, true, nil +} + +// getEsc gets a possibly-escaped character from chunk, for a character class. +func getEsc(chunk string) (r rune, nchunk string, err error) { + if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { + err = filepath.ErrBadPattern + return + } + if chunk[0] == '\\' { + chunk = chunk[1:] + if len(chunk) == 0 { + err = filepath.ErrBadPattern + return + } + } + r, n := utf8.DecodeRuneInString(chunk) + if r == utf8.RuneError && n == 1 { + err = filepath.ErrBadPattern + } + nchunk = chunk[n:] + if len(nchunk) == 0 { + err = filepath.ErrBadPattern + } + return +} diff --git a/src/go/pkg/matcher/glob_test.go b/src/go/pkg/matcher/glob_test.go new file mode 100644 index 000000000..09d456105 --- /dev/null +++ b/src/go/pkg/matcher/glob_test.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewGlobMatcher(t *testing.T) { + cases := []struct { + expr string + matcher Matcher + }{ + {"", stringFullMatcher("")}, + {"a", stringFullMatcher("a")}, + {"a*b", globMatcher("a*b")}, + {`a*\b`, globMatcher(`a*\b`)}, + {`a\[`, stringFullMatcher(`a[`)}, + {`ab\`, nil}, + {`ab[`, nil}, + {`ab]`, stringFullMatcher("ab]")}, + } + for _, c := range cases { + t.Run(c.expr, func(t *testing.T) { + m, err := NewGlobMatcher(c.expr) + if c.matcher != nil { + assert.NoError(t, err) + assert.Equal(t, c.matcher, m) + } else { + assert.Error(t, err) + } + }) + } +} + +func TestGlobMatcher_MatchString(t *testing.T) { + + cases := []struct { + expected bool + expr string + line string + }{ + {true, "/a/*/d", "/a/b/c/d"}, + {true, "foo*", "foo123"}, + {true, "*foo*", "123foo123"}, + {true, "*foo", "123foo"}, + {true, "foo*bar", "foobar"}, + {true, "foo*bar", "foo baz bar"}, + {true, "a[bc]d", "abd"}, + {true, "a[^bc]d", "add"}, + {true, "a??d", "abcd"}, + {true, `a\??d`, "a?cd"}, + {true, "a[b-z]d", "abd"}, + {false, "/a/*/d", "a/b/c/d"}, + {false, "/a/*/d", "This will fail!"}, + } + + for _, c := range cases { + t.Run(c.line, func(t *testing.T) { + m := globMatcher(c.expr) + assert.Equal(t, c.expected, m.Match([]byte(c.line))) + assert.Equal(t, c.expected, m.MatchString(c.line)) + }) + } +} + +func BenchmarkGlob_MatchString(b *testing.B) { + benchmarks := []struct { + expr string + test string + }{ + {"", ""}, + {"abc", "abcd"}, + {"*abc", "abcd"}, + {"abc*", "abcd"}, + {"*abc*", "abcd"}, + {"[a-z]", "abcd"}, + } + for _, bm := range benchmarks { + b.Run(bm.expr+"_raw", func(b *testing.B) { + m := globMatcher(bm.expr) + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.MatchString(bm.test) + } + }) + b.Run(bm.expr+"_optimized", func(b *testing.B) { + m, _ := NewGlobMatcher(bm.expr) + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.MatchString(bm.test) + } + }) + } +} diff --git a/src/go/pkg/matcher/logical.go b/src/go/pkg/matcher/logical.go new file mode 100644 index 000000000..af07be8f4 --- /dev/null +++ b/src/go/pkg/matcher/logical.go @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +type ( + trueMatcher struct{} + falseMatcher struct{} + andMatcher struct{ lhs, rhs Matcher } + orMatcher struct{ lhs, rhs Matcher } + negMatcher struct{ Matcher } +) + +var ( + matcherT trueMatcher + matcherF falseMatcher +) + +// TRUE returns a matcher which always returns true +func TRUE() Matcher { + return matcherT +} + +// FALSE returns a matcher which always returns false +func FALSE() Matcher { + return matcherF +} + +// Not returns a matcher which positive the sub-matcher's result +func Not(m Matcher) Matcher { + switch m { + case TRUE(): + return FALSE() + case FALSE(): + return TRUE() + default: + return negMatcher{m} + } +} + +// And returns a matcher which returns true only if all of it's sub-matcher return true +func And(lhs, rhs Matcher, others ...Matcher) Matcher { + var matcher Matcher + switch lhs { + case TRUE(): + matcher = rhs + case FALSE(): + matcher = FALSE() + default: + switch rhs { + case TRUE(): + matcher = lhs + case FALSE(): + matcher = FALSE() + default: + matcher = andMatcher{lhs, rhs} + } + } + if len(others) > 0 { + return And(matcher, others[0], others[1:]...) + } + return matcher +} + +// Or returns a matcher which returns true if any of it's sub-matcher return true +func Or(lhs, rhs Matcher, others ...Matcher) Matcher { + var matcher Matcher + switch lhs { + case TRUE(): + matcher = TRUE() + case FALSE(): + matcher = rhs + default: + switch rhs { + case TRUE(): + matcher = TRUE() + case FALSE(): + matcher = lhs + default: + matcher = orMatcher{lhs, rhs} + } + } + if len(others) > 0 { + return Or(matcher, others[0], others[1:]...) + } + return matcher +} + +func (trueMatcher) Match(_ []byte) bool { return true } +func (trueMatcher) MatchString(_ string) bool { return true } + +func (falseMatcher) Match(_ []byte) bool { return false } +func (falseMatcher) MatchString(_ string) bool { return false } + +func (m andMatcher) Match(b []byte) bool { return m.lhs.Match(b) && m.rhs.Match(b) } +func (m andMatcher) MatchString(s string) bool { return m.lhs.MatchString(s) && m.rhs.MatchString(s) } + +func (m orMatcher) Match(b []byte) bool { return m.lhs.Match(b) || m.rhs.Match(b) } +func (m orMatcher) MatchString(s string) bool { return m.lhs.MatchString(s) || m.rhs.MatchString(s) } + +func (m negMatcher) Match(b []byte) bool { return !m.Matcher.Match(b) } +func (m negMatcher) MatchString(s string) bool { return !m.Matcher.MatchString(s) } diff --git a/src/go/pkg/matcher/logical_test.go b/src/go/pkg/matcher/logical_test.go new file mode 100644 index 000000000..64491f1ad --- /dev/null +++ b/src/go/pkg/matcher/logical_test.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTRUE(t *testing.T) { + assert.True(t, TRUE().Match(nil)) + assert.True(t, TRUE().MatchString("")) +} + +func TestFALSE(t *testing.T) { + assert.False(t, FALSE().Match(nil)) + assert.False(t, FALSE().MatchString("")) +} + +func TestAnd(t *testing.T) { + assert.Equal(t, + matcherF, + And(FALSE(), stringFullMatcher(""))) + assert.Equal(t, + matcherF, + And(stringFullMatcher(""), FALSE())) + + assert.Equal(t, + stringFullMatcher(""), + And(TRUE(), stringFullMatcher(""))) + assert.Equal(t, + stringFullMatcher(""), + And(stringFullMatcher(""), TRUE())) + + assert.Equal(t, + andMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")}, + And(stringPartialMatcher("a"), stringPartialMatcher("b"))) + + assert.Equal(t, + andMatcher{ + andMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")}, + stringPartialMatcher("c"), + }, + And(stringPartialMatcher("a"), stringPartialMatcher("b"), stringPartialMatcher("c"))) +} + +func TestOr(t *testing.T) { + assert.Equal(t, + stringFullMatcher(""), + Or(FALSE(), stringFullMatcher(""))) + assert.Equal(t, + stringFullMatcher(""), + Or(stringFullMatcher(""), FALSE())) + + assert.Equal(t, + TRUE(), + Or(TRUE(), stringFullMatcher(""))) + assert.Equal(t, + TRUE(), + Or(stringFullMatcher(""), TRUE())) + + assert.Equal(t, + orMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")}, + Or(stringPartialMatcher("a"), stringPartialMatcher("b"))) + + assert.Equal(t, + orMatcher{ + orMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")}, + stringPartialMatcher("c"), + }, + Or(stringPartialMatcher("a"), stringPartialMatcher("b"), stringPartialMatcher("c"))) +} + +func TestAndMatcher_Match(t *testing.T) { + and := andMatcher{ + stringPrefixMatcher("a"), + stringSuffixMatcher("c"), + } + assert.True(t, and.Match([]byte("abc"))) + assert.True(t, and.MatchString("abc")) +} + +func TestOrMatcher_Match(t *testing.T) { + or := orMatcher{ + stringPrefixMatcher("a"), + stringPrefixMatcher("c"), + } + assert.True(t, or.Match([]byte("aaa"))) + assert.True(t, or.MatchString("ccc")) +} + +func TestNegMatcher_Match(t *testing.T) { + neg := negMatcher{stringPrefixMatcher("a")} + assert.False(t, neg.Match([]byte("aaa"))) + assert.True(t, neg.MatchString("ccc")) +} diff --git a/src/go/pkg/matcher/matcher.go b/src/go/pkg/matcher/matcher.go new file mode 100644 index 000000000..76d903325 --- /dev/null +++ b/src/go/pkg/matcher/matcher.go @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "errors" + "fmt" + "regexp" +) + +type ( + // Matcher is an interface that wraps MatchString method. + Matcher interface { + // Match performs match against given []byte + Match(b []byte) bool + // MatchString performs match against given string + MatchString(string) bool + } + + // Format matcher format + Format string +) + +const ( + // FmtString is a string match format. + FmtString Format = "string" + // FmtGlob is a glob match format. + FmtGlob Format = "glob" + // FmtRegExp is a regex match format. + FmtRegExp Format = "regexp" + // FmtSimplePattern is a simple pattern match format + // https://docs.netdata.cloud/libnetdata/simple_pattern/ + FmtSimplePattern Format = "simple_patterns" + + // Separator is a separator between match format and expression. + Separator = ":" +) + +const ( + symString = "=" + symGlob = "*" + symRegExp = "~" +) + +var ( + reShortSyntax = regexp.MustCompile(`(?s)^(!)?(.)\s*(.*)$`) + reLongSyntax = regexp.MustCompile(`(?s)^(!)?([^:]+):(.*)$`) + + errNotShortSyntax = errors.New("not short syntax") +) + +// Must is a helper that wraps a call to a function returning (Matcher, error) and panics if the error is non-nil. +// It is intended for use in variable initializations such as +// +// var m = matcher.Must(matcher.New(matcher.FmtString, "hello world")) +func Must(m Matcher, err error) Matcher { + if err != nil { + panic(err) + } + return m +} + +// New create a matcher +func New(format Format, expr string) (Matcher, error) { + switch format { + case FmtString: + return NewStringMatcher(expr, true, true) + case FmtGlob: + return NewGlobMatcher(expr) + case FmtRegExp: + return NewRegExpMatcher(expr) + case FmtSimplePattern: + return NewSimplePatternsMatcher(expr) + default: + return nil, fmt.Errorf("unsupported matcher format: '%s'", format) + } +} + +// Parse parses line and returns appropriate matcher based on matched format. +// +// Short Syntax +// +// ::= [ ] +// ::= '!' +// negative expression +// ::= [ '=', '~', '*' ] +// '=' means string match +// '~' means regexp match +// '*' means glob match +// ::= { ' ' | '\t' | '\n' | '\n' | '\r' } +// ::= any string +// +// Long Syntax +// +// ::= [ ] +// ::= [ 'string' | 'glob' | 'regexp' | 'simple_patterns' ] +// ::= '!' +// negative expression +// ::= ':' +// ::= any string +func Parse(line string) (Matcher, error) { + matcher, err := parseShortFormat(line) + if err == nil { + return matcher, nil + } + return parseLongSyntax(line) +} + +func parseShortFormat(line string) (Matcher, error) { + m := reShortSyntax.FindStringSubmatch(line) + if m == nil { + return nil, errNotShortSyntax + } + var format Format + switch m[2] { + case symString: + format = FmtString + case symGlob: + format = FmtGlob + case symRegExp: + format = FmtRegExp + default: + return nil, fmt.Errorf("invalid short syntax: unknown symbol '%s'", m[2]) + } + expr := m[3] + matcher, err := New(format, expr) + if err != nil { + return nil, err + } + if m[1] != "" { + matcher = Not(matcher) + } + return matcher, nil +} + +func parseLongSyntax(line string) (Matcher, error) { + m := reLongSyntax.FindStringSubmatch(line) + if m == nil { + return nil, fmt.Errorf("invalid syntax") + } + matcher, err := New(Format(m[2]), m[3]) + if err != nil { + return nil, err + } + if m[1] != "" { + matcher = Not(matcher) + } + return matcher, nil +} diff --git a/src/go/pkg/matcher/matcher_test.go b/src/go/pkg/matcher/matcher_test.go new file mode 100644 index 000000000..f304d983d --- /dev/null +++ b/src/go/pkg/matcher/matcher_test.go @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "log" + "reflect" + "regexp" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/stretchr/testify/assert" +) + +func TestParse(t *testing.T) { + tests := []struct { + valid bool + line string + matcher Matcher + }{ + {false, "", nil}, + {false, "abc", nil}, + {false, `~ abc\`, nil}, + {false, `invalid_fmt:abc`, nil}, + + {true, "=", stringFullMatcher("")}, + {true, "= ", stringFullMatcher("")}, + {true, "=full", stringFullMatcher("full")}, + {true, "= full", stringFullMatcher("full")}, + {true, "= \t\ffull", stringFullMatcher("full")}, + + {true, "string:", stringFullMatcher("")}, + {true, "string:full", stringFullMatcher("full")}, + + {true, "!=", Not(stringFullMatcher(""))}, + {true, "!=full", Not(stringFullMatcher("full"))}, + {true, "!= full", Not(stringFullMatcher("full"))}, + {true, "!= \t\ffull", Not(stringFullMatcher("full"))}, + + {true, "!string:", Not(stringFullMatcher(""))}, + {true, "!string:full", Not(stringFullMatcher("full"))}, + + {true, "~", TRUE()}, + {true, "~ ", TRUE()}, + {true, `~ ^$`, stringFullMatcher("")}, + {true, "~ partial", stringPartialMatcher("partial")}, + {true, `~ part\.ial`, stringPartialMatcher("part.ial")}, + {true, "~ ^prefix", stringPrefixMatcher("prefix")}, + {true, "~ suffix$", stringSuffixMatcher("suffix")}, + {true, "~ ^full$", stringFullMatcher("full")}, + {true, "~ [0-9]+", regexp.MustCompile(`[0-9]+`)}, + {true, `~ part\s1`, regexp.MustCompile(`part\s1`)}, + + {true, "!~", FALSE()}, + {true, "!~ ", FALSE()}, + {true, "!~ partial", Not(stringPartialMatcher("partial"))}, + {true, `!~ part\.ial`, Not(stringPartialMatcher("part.ial"))}, + {true, "!~ ^prefix", Not(stringPrefixMatcher("prefix"))}, + {true, "!~ suffix$", Not(stringSuffixMatcher("suffix"))}, + {true, "!~ ^full$", Not(stringFullMatcher("full"))}, + {true, "!~ [0-9]+", Not(regexp.MustCompile(`[0-9]+`))}, + + {true, `regexp:partial`, stringPartialMatcher("partial")}, + {true, `!regexp:partial`, Not(stringPartialMatcher("partial"))}, + + {true, `*`, stringFullMatcher("")}, + {true, `* foo`, stringFullMatcher("foo")}, + {true, `* foo*`, stringPrefixMatcher("foo")}, + {true, `* *foo`, stringSuffixMatcher("foo")}, + {true, `* *foo*`, stringPartialMatcher("foo")}, + {true, `* foo*bar`, globMatcher("foo*bar")}, + {true, `* *foo*bar`, globMatcher("*foo*bar")}, + {true, `* foo?bar`, globMatcher("foo?bar")}, + + {true, `!*`, Not(stringFullMatcher(""))}, + {true, `!* foo`, Not(stringFullMatcher("foo"))}, + {true, `!* foo*`, Not(stringPrefixMatcher("foo"))}, + {true, `!* *foo`, Not(stringSuffixMatcher("foo"))}, + {true, `!* *foo*`, Not(stringPartialMatcher("foo"))}, + {true, `!* foo*bar`, Not(globMatcher("foo*bar"))}, + {true, `!* *foo*bar`, Not(globMatcher("*foo*bar"))}, + {true, `!* foo?bar`, Not(globMatcher("foo?bar"))}, + + {true, "glob:foo*bar", globMatcher("foo*bar")}, + {true, "!glob:foo*bar", Not(globMatcher("foo*bar"))}, + + {true, `simple_patterns:`, FALSE()}, + {true, `simple_patterns: `, FALSE()}, + {true, `simple_patterns: foo`, simplePatternsMatcher{ + {stringFullMatcher("foo"), true}, + }}, + {true, `simple_patterns: !foo`, simplePatternsMatcher{ + {stringFullMatcher("foo"), false}, + }}, + } + for _, test := range tests { + t.Run(test.line, func(t *testing.T) { + m, err := Parse(test.line) + if test.valid { + require.NoError(t, err) + if test.matcher != nil { + log.Printf("%s %#v", reflect.TypeOf(m).Name(), m) + assert.Equal(t, test.matcher, m) + } + } else { + assert.Error(t, err) + } + }) + } +} + +func TestMust(t *testing.T) { + assert.NotPanics(t, func() { + m := Must(New(FmtRegExp, `[0-9]+`)) + assert.NotNil(t, m) + }) + + assert.Panics(t, func() { + Must(New(FmtRegExp, `[0-9]+\`)) + }) +} diff --git a/src/go/pkg/matcher/regexp.go b/src/go/pkg/matcher/regexp.go new file mode 100644 index 000000000..3a297f3b3 --- /dev/null +++ b/src/go/pkg/matcher/regexp.go @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import "regexp" + +// NewRegExpMatcher create new matcher with RegExp format +func NewRegExpMatcher(expr string) (Matcher, error) { + switch expr { + case "", "^", "$": + return TRUE(), nil + case "^$", "$^": + return NewStringMatcher("", true, true) + } + size := len(expr) + chars := []rune(expr) + var startWith, endWith bool + startIdx := 0 + endIdx := size - 1 + if chars[startIdx] == '^' { + startWith = true + startIdx = 1 + } + if chars[endIdx] == '$' { + endWith = true + endIdx-- + } + + unescapedExpr := make([]rune, 0, endIdx-startIdx+1) + for i := startIdx; i <= endIdx; i++ { + ch := chars[i] + if ch == '\\' { + if i == endIdx { // end with '\' => invalid format + return regexp.Compile(expr) + } + nextCh := chars[i+1] + if !isRegExpMeta(nextCh) { // '\' + mon-meta char => special meaning + return regexp.Compile(expr) + } + unescapedExpr = append(unescapedExpr, nextCh) + i++ + } else if isRegExpMeta(ch) { + return regexp.Compile(expr) + } else { + unescapedExpr = append(unescapedExpr, ch) + } + } + + return NewStringMatcher(string(unescapedExpr), startWith, endWith) +} + +// isRegExpMeta reports whether byte b needs to be escaped by QuoteMeta. +func isRegExpMeta(b rune) bool { + switch b { + case '\\', '.', '+', '*', '?', '(', ')', '|', '[', ']', '{', '}', '^', '$': + return true + default: + return false + } +} diff --git a/src/go/pkg/matcher/regexp_test.go b/src/go/pkg/matcher/regexp_test.go new file mode 100644 index 000000000..fe644747b --- /dev/null +++ b/src/go/pkg/matcher/regexp_test.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRegExpMatch_Match(t *testing.T) { + m := regexp.MustCompile("[0-9]+") + + cases := []struct { + expected bool + line string + }{ + { + expected: true, + line: "2019", + }, + { + expected: true, + line: "It's over 9000!", + }, + { + expected: false, + line: "This will never fail!", + }, + } + + for _, c := range cases { + assert.Equal(t, c.expected, m.MatchString(c.line)) + } +} + +func BenchmarkRegExp_MatchString(b *testing.B) { + benchmarks := []struct { + expr string + test string + }{ + {"", ""}, + {"abc", "abcd"}, + {"^abc", "abcd"}, + {"abc$", "abcd"}, + {"^abc$", "abcd"}, + {"[a-z]+", "abcd"}, + } + for _, bm := range benchmarks { + b.Run(bm.expr+"_raw", func(b *testing.B) { + m := regexp.MustCompile(bm.expr) + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.MatchString(bm.test) + } + }) + b.Run(bm.expr+"_optimized", func(b *testing.B) { + m, _ := NewRegExpMatcher(bm.expr) + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.MatchString(bm.test) + } + }) + } +} diff --git a/src/go/pkg/matcher/simple_patterns.go b/src/go/pkg/matcher/simple_patterns.go new file mode 100644 index 000000000..91a0a3bbd --- /dev/null +++ b/src/go/pkg/matcher/simple_patterns.go @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "strings" +) + +type ( + simplePatternTerm struct { + matcher Matcher + positive bool + } + + // simplePatternsMatcher patterns. + simplePatternsMatcher []simplePatternTerm +) + +// NewSimplePatternsMatcher creates new simple patterns. It returns error in case one of patterns has bad syntax. +func NewSimplePatternsMatcher(expr string) (Matcher, error) { + ps := simplePatternsMatcher{} + + for _, pattern := range strings.Fields(expr) { + if err := ps.add(pattern); err != nil { + return nil, err + } + } + if len(ps) == 0 { + return FALSE(), nil + } + return ps, nil +} + +func (m *simplePatternsMatcher) add(term string) error { + p := simplePatternTerm{} + if term[0] == '!' { + p.positive = false + term = term[1:] + } else { + p.positive = true + } + matcher, err := NewGlobMatcher(term) + if err != nil { + return err + } + + p.matcher = matcher + *m = append(*m, p) + + return nil +} + +func (m simplePatternsMatcher) Match(b []byte) bool { + return m.MatchString(string(b)) +} + +// MatchString matches. +func (m simplePatternsMatcher) MatchString(line string) bool { + for _, p := range m { + if p.matcher.MatchString(line) { + return p.positive + } + } + return false +} diff --git a/src/go/pkg/matcher/simple_patterns_test.go b/src/go/pkg/matcher/simple_patterns_test.go new file mode 100644 index 000000000..016096d57 --- /dev/null +++ b/src/go/pkg/matcher/simple_patterns_test.go @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewSimplePatternsMatcher(t *testing.T) { + tests := []struct { + expr string + expected Matcher + }{ + {"", FALSE()}, + {" ", FALSE()}, + {"foo", simplePatternsMatcher{ + {stringFullMatcher("foo"), true}, + }}, + {"!foo", simplePatternsMatcher{ + {stringFullMatcher("foo"), false}, + }}, + {"foo bar", simplePatternsMatcher{ + {stringFullMatcher("foo"), true}, + {stringFullMatcher("bar"), true}, + }}, + {"*foobar* !foo* !*bar *", simplePatternsMatcher{ + {stringPartialMatcher("foobar"), true}, + {stringPrefixMatcher("foo"), false}, + {stringSuffixMatcher("bar"), false}, + {TRUE(), true}, + }}, + {`ab\`, nil}, + } + for _, test := range tests { + t.Run(test.expr, func(t *testing.T) { + matcher, err := NewSimplePatternsMatcher(test.expr) + if test.expected == nil { + assert.Error(t, err) + } else { + assert.Equal(t, test.expected, matcher) + } + }) + } +} + +func TestSimplePatterns_Match(t *testing.T) { + m, err := NewSimplePatternsMatcher("*foobar* !foo* !*bar *") + + require.NoError(t, err) + + cases := []struct { + expected bool + line string + }{ + { + expected: true, + line: "hello world", + }, + { + expected: false, + line: "hello world bar", + }, + { + expected: true, + line: "hello world foobar", + }, + } + + for _, c := range cases { + t.Run(c.line, func(t *testing.T) { + assert.Equal(t, c.expected, m.MatchString(c.line)) + assert.Equal(t, c.expected, m.Match([]byte(c.line))) + }) + } +} + +func TestSimplePatterns_Match2(t *testing.T) { + m, err := NewSimplePatternsMatcher("*foobar") + + require.NoError(t, err) + + assert.True(t, m.MatchString("foobar")) + assert.True(t, m.MatchString("foo foobar")) + assert.False(t, m.MatchString("foobar baz")) +} diff --git a/src/go/pkg/matcher/string.go b/src/go/pkg/matcher/string.go new file mode 100644 index 000000000..43ba43eb3 --- /dev/null +++ b/src/go/pkg/matcher/string.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "bytes" + "strings" +) + +type ( + // stringFullMatcher implements Matcher, it uses "==" to match. + stringFullMatcher string + + // stringPartialMatcher implements Matcher, it uses strings.Contains to match. + stringPartialMatcher string + + // stringPrefixMatcher implements Matcher, it uses strings.HasPrefix to match. + stringPrefixMatcher string + + // stringSuffixMatcher implements Matcher, it uses strings.HasSuffix to match. + stringSuffixMatcher string +) + +// NewStringMatcher create a new matcher with string format +func NewStringMatcher(s string, startWith, endWith bool) (Matcher, error) { + if startWith { + if endWith { + return stringFullMatcher(s), nil + } + return stringPrefixMatcher(s), nil + } + if endWith { + return stringSuffixMatcher(s), nil + } + return stringPartialMatcher(s), nil +} + +func (m stringFullMatcher) Match(b []byte) bool { return string(m) == string(b) } +func (m stringFullMatcher) MatchString(line string) bool { return string(m) == line } + +func (m stringPartialMatcher) Match(b []byte) bool { return bytes.Contains(b, []byte(m)) } +func (m stringPartialMatcher) MatchString(line string) bool { return strings.Contains(line, string(m)) } + +func (m stringPrefixMatcher) Match(b []byte) bool { return bytes.HasPrefix(b, []byte(m)) } +func (m stringPrefixMatcher) MatchString(line string) bool { return strings.HasPrefix(line, string(m)) } + +func (m stringSuffixMatcher) Match(b []byte) bool { return bytes.HasSuffix(b, []byte(m)) } +func (m stringSuffixMatcher) MatchString(line string) bool { return strings.HasSuffix(line, string(m)) } diff --git a/src/go/pkg/matcher/string_test.go b/src/go/pkg/matcher/string_test.go new file mode 100644 index 000000000..1694efbd0 --- /dev/null +++ b/src/go/pkg/matcher/string_test.go @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var stringMatcherTestCases = []struct { + line string + expr string + full, prefix, suffix, partial bool +}{ + {"", "", true, true, true, true}, + {"abc", "", false, true, true, true}, + {"power", "pow", false, true, false, true}, + {"netdata", "data", false, false, true, true}, + {"abc", "def", false, false, false, false}, + {"soon", "o", false, false, false, true}, +} + +func TestStringFullMatcher_MatchString(t *testing.T) { + for _, c := range stringMatcherTestCases { + t.Run(c.line, func(t *testing.T) { + m := stringFullMatcher(c.expr) + assert.Equal(t, c.full, m.Match([]byte(c.line))) + assert.Equal(t, c.full, m.MatchString(c.line)) + }) + } +} + +func TestStringPrefixMatcher_MatchString(t *testing.T) { + for _, c := range stringMatcherTestCases { + t.Run(c.line, func(t *testing.T) { + m := stringPrefixMatcher(c.expr) + assert.Equal(t, c.prefix, m.Match([]byte(c.line))) + assert.Equal(t, c.prefix, m.MatchString(c.line)) + }) + } +} + +func TestStringSuffixMatcher_MatchString(t *testing.T) { + for _, c := range stringMatcherTestCases { + t.Run(c.line, func(t *testing.T) { + m := stringSuffixMatcher(c.expr) + assert.Equal(t, c.suffix, m.Match([]byte(c.line))) + assert.Equal(t, c.suffix, m.MatchString(c.line)) + }) + } +} + +func TestStringPartialMatcher_MatchString(t *testing.T) { + for _, c := range stringMatcherTestCases { + t.Run(c.line, func(t *testing.T) { + m := stringPartialMatcher(c.expr) + assert.Equal(t, c.partial, m.Match([]byte(c.line))) + assert.Equal(t, c.partial, m.MatchString(c.line)) + }) + } +} diff --git a/src/go/pkg/multipath/multipath.go b/src/go/pkg/multipath/multipath.go new file mode 100644 index 000000000..6172def06 --- /dev/null +++ b/src/go/pkg/multipath/multipath.go @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package multipath + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "slices" + "strings" + + "github.com/mitchellh/go-homedir" +) + +type ErrNotFound struct{ msg string } + +func (e ErrNotFound) Error() string { return e.msg } + +// IsNotFound returns a boolean indicating whether the error is ErrNotFound or not. +func IsNotFound(err error) bool { + var errNotFound ErrNotFound + return errors.As(err, &errNotFound) +} + +// MultiPath multi-paths +type MultiPath []string + +// New multi-paths +func New(paths ...string) MultiPath { + set := map[string]bool{} + mPath := make(MultiPath, 0) + + for _, dir := range paths { + if dir == "" { + continue + } + if d, err := homedir.Expand(dir); err != nil { + dir = d + } + if !set[dir] { + mPath = append(mPath, dir) + set[dir] = true + } + } + + return mPath +} + +// Find finds a file in given paths +func (p MultiPath) Find(filename string) (string, error) { + for _, dir := range p { + file := filepath.Join(dir, filename) + if _, err := os.Stat(file); !os.IsNotExist(err) { + return file, nil + } + } + return "", ErrNotFound{msg: fmt.Sprintf("can't find '%s' in %v", filename, p)} +} + +func (p MultiPath) FindFiles(suffixes ...string) ([]string, error) { + set := make(map[string]bool) + var files []string + + for _, dir := range p { + entries, err := os.ReadDir(dir) + if err != nil { + continue + } + + for _, e := range entries { + if !e.Type().IsRegular() { + continue + } + + ext := filepath.Ext(e.Name()) + name := strings.TrimSuffix(e.Name(), ext) + + if (len(suffixes) != 0 && !slices.Contains(suffixes, ext)) || set[name] { + continue + } + + set[name] = true + file := filepath.Join(dir, e.Name()) + files = append(files, file) + } + } + + return files, nil +} diff --git a/src/go/pkg/multipath/multipath_test.go b/src/go/pkg/multipath/multipath_test.go new file mode 100644 index 000000000..cd6c90d95 --- /dev/null +++ b/src/go/pkg/multipath/multipath_test.go @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package multipath + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + assert.Len( + t, + New("path1", "path2", "path2", "", "path3"), + 3, + ) +} + +func TestMultiPath_Find(t *testing.T) { + m := New("path1", "testdata/data1") + + v, err := m.Find("not exist") + assert.Zero(t, v) + assert.Error(t, err) + + v, err = m.Find("test-empty.conf") + assert.Equal(t, "testdata/data1/test-empty.conf", v) + assert.Nil(t, err) + + v, err = m.Find("test.conf") + assert.Equal(t, "testdata/data1/test.conf", v) + assert.Nil(t, err) +} + +func TestIsNotFound(t *testing.T) { + assert.True(t, IsNotFound(ErrNotFound{})) + assert.False(t, IsNotFound(errors.New(""))) +} + +func TestMultiPath_FindFiles(t *testing.T) { + m := New("path1", "testdata/data2", "testdata/data1") + + files, err := m.FindFiles(".conf") + assert.NoError(t, err) + assert.Equal(t, []string{"testdata/data2/test-empty.conf", "testdata/data2/test.conf"}, files) + + files, err = m.FindFiles() + assert.NoError(t, err) + assert.Equal(t, []string{"testdata/data2/test-empty.conf", "testdata/data2/test.conf"}, files) + + files, err = m.FindFiles(".not_exist") + assert.NoError(t, err) + assert.Equal(t, []string(nil), files) + + m = New("path1", "testdata/data1", "testdata/data2") + files, err = m.FindFiles(".conf") + assert.NoError(t, err) + assert.Equal(t, []string{"testdata/data1/test-empty.conf", "testdata/data1/test.conf"}, files) +} diff --git a/src/go/pkg/multipath/testdata/data1/test-empty.conf b/src/go/pkg/multipath/testdata/data1/test-empty.conf new file mode 100644 index 000000000..e69de29bb diff --git a/src/go/pkg/multipath/testdata/data1/test.conf b/src/go/pkg/multipath/testdata/data1/test.conf new file mode 100644 index 000000000..aebe64730 --- /dev/null +++ b/src/go/pkg/multipath/testdata/data1/test.conf @@ -0,0 +1 @@ +not empty! \ No newline at end of file diff --git a/src/go/pkg/multipath/testdata/data2/test-empty.conf b/src/go/pkg/multipath/testdata/data2/test-empty.conf new file mode 100644 index 000000000..e69de29bb diff --git a/src/go/pkg/multipath/testdata/data2/test.conf b/src/go/pkg/multipath/testdata/data2/test.conf new file mode 100644 index 000000000..aebe64730 --- /dev/null +++ b/src/go/pkg/multipath/testdata/data2/test.conf @@ -0,0 +1 @@ +not empty! \ No newline at end of file diff --git a/src/go/pkg/netdataapi/api.go b/src/go/pkg/netdataapi/api.go new file mode 100644 index 000000000..4f3faefc8 --- /dev/null +++ b/src/go/pkg/netdataapi/api.go @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package netdataapi + +import ( + "bytes" + "fmt" + "io" + "strconv" +) + +type ( + // API implements Netdata external plugins API. + // https://learn.netdata.cloud/docs/agent/plugins.d#the-output-of-the-plugin + API struct { + io.Writer + } +) + +const quotes = "' '" + +var ( + end = []byte("END\n\n") + clabelCommit = []byte("CLABEL_COMMIT\n") + newLine = []byte("\n") +) + +func New(w io.Writer) *API { return &API{w} } + +// CHART creates or update a chart. +func (a *API) CHART( + typeID string, + ID string, + name string, + title string, + units string, + family string, + context string, + chartType string, + priority int, + updateEvery int, + options string, + plugin string, + module string) error { + _, err := a.Write([]byte("CHART " + "'" + + typeID + "." + ID + quotes + + name + quotes + + title + quotes + + units + quotes + + family + quotes + + context + quotes + + chartType + quotes + + strconv.Itoa(priority) + quotes + + strconv.Itoa(updateEvery) + quotes + + options + quotes + + plugin + quotes + + module + "'\n")) + return err +} + +// DIMENSION adds or update a dimension to the chart just created. +func (a *API) DIMENSION( + ID string, + name string, + algorithm string, + multiplier int, + divisor int, + options string) error { + _, err := a.Write([]byte("DIMENSION '" + + ID + quotes + + name + quotes + + algorithm + quotes + + strconv.Itoa(multiplier) + quotes + + strconv.Itoa(divisor) + quotes + + options + "'\n")) + return err +} + +// CLABEL adds or update a label to the chart. +func (a *API) CLABEL(key, value string, source int) error { + _, err := a.Write([]byte("CLABEL '" + + key + quotes + + value + quotes + + strconv.Itoa(source) + "'\n")) + return err +} + +// CLABELCOMMIT adds labels to the chart. Should be called after one or more CLABEL. +func (a *API) CLABELCOMMIT() error { + _, err := a.Write(clabelCommit) + return err +} + +// BEGIN initializes data collection for a chart. +func (a *API) BEGIN(typeID string, ID string, msSince int) (err error) { + if msSince > 0 { + _, err = a.Write([]byte("BEGIN " + "'" + typeID + "." + ID + "' " + strconv.Itoa(msSince) + "\n")) + } else { + _, err = a.Write([]byte("BEGIN " + "'" + typeID + "." + ID + "'\n")) + } + return err +} + +// SET sets the value of a dimension for the initialized chart. +func (a *API) SET(ID string, value int64) error { + _, err := a.Write([]byte("SET '" + ID + "' = " + strconv.FormatInt(value, 10) + "\n")) + return err +} + +// SETEMPTY sets the empty value of a dimension for the initialized chart. +func (a *API) SETEMPTY(ID string) error { + _, err := a.Write([]byte("SET '" + ID + "' = \n")) + return err +} + +// VARIABLE sets the value of a CHART scope variable for the initialized chart. +func (a *API) VARIABLE(ID string, value int64) error { + _, err := a.Write([]byte("VARIABLE CHART '" + ID + "' = " + strconv.FormatInt(value, 10) + "\n")) + return err +} + +// END completes data collection for the initialized chart. +func (a *API) END() error { + _, err := a.Write(end) + return err +} + +// DISABLE disables this plugin. This will prevent Netdata from restarting the plugin. +func (a *API) DISABLE() error { + _, err := a.Write([]byte("DISABLE\n")) + return err +} + +// EMPTYLINE writes an empty line. +func (a *API) EMPTYLINE() error { + _, err := a.Write(newLine) + return err +} + +func (a *API) HOSTINFO(guid, hostname string, labels map[string]string) error { + if err := a.HOSTDEFINE(guid, hostname); err != nil { + return err + } + for k, v := range labels { + if err := a.HOSTLABEL(k, v); err != nil { + return err + } + } + return a.HOSTDEFINEEND() +} + +func (a *API) HOSTDEFINE(guid, hostname string) error { + _, err := fmt.Fprintf(a, "HOST_DEFINE '%s' '%s'\n", guid, hostname) + return err +} + +func (a *API) HOSTLABEL(name, value string) error { + _, err := fmt.Fprintf(a, "HOST_LABEL '%s' '%s'\n", name, value) + return err +} + +func (a *API) HOSTDEFINEEND() error { + _, err := fmt.Fprintf(a, "HOST_DEFINE_END\n\n") + return err +} + +func (a *API) HOST(guid string) error { + _, err := a.Write([]byte("HOST " + "'" + + guid + "'\n\n")) + return err +} + +func (a *API) FUNCRESULT(uid, contentType, payload, code, expireTimestamp string) { + var buf bytes.Buffer + + buf.WriteString("FUNCTION_RESULT_BEGIN " + + uid + " " + + code + " " + + contentType + " " + + expireTimestamp + "\n", + ) + + if payload != "" { + buf.WriteString(payload + "\n") + } + + buf.WriteString("FUNCTION_RESULT_END\n\n") + + _, _ = buf.WriteTo(a) +} + +func (a *API) CONFIGCREATE(id, status, configType, path, sourceType, source, supportedCommands string) { + // https://learn.netdata.cloud/docs/contributing/external-plugins/#config + + _, _ = a.Write([]byte("CONFIG " + + id + " " + + "create" + " " + + status + " " + + configType + " " + + path + " " + + sourceType + " '" + + source + "' '" + + supportedCommands + "' 0x0000 0x0000\n\n", + )) +} + +func (a *API) CONFIGDELETE(id string) { + _, _ = a.Write([]byte("CONFIG " + id + " delete\n\n")) +} + +func (a *API) CONFIGSTATUS(id, status string) { + _, _ = a.Write([]byte("CONFIG " + id + " status " + status + "\n\n")) +} diff --git a/src/go/pkg/netdataapi/api_test.go b/src/go/pkg/netdataapi/api_test.go new file mode 100644 index 000000000..e5087839b --- /dev/null +++ b/src/go/pkg/netdataapi/api_test.go @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package netdataapi + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAPI_CHART(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.CHART( + "", + "id", + "name", + "title", + "units", + "family", + "context", + "line", + 1, + 1, + "", + "plugin", + "module", + ) + + assert.Equal( + t, + "CHART '.id' 'name' 'title' 'units' 'family' 'context' 'line' '1' '1' '' 'plugin' 'module'\n", + buf.String(), + ) +} + +func TestAPI_DIMENSION(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.DIMENSION( + "id", + "name", + "absolute", + 1, + 1, + "", + ) + + assert.Equal( + t, + "DIMENSION 'id' 'name' 'absolute' '1' '1' ''\n", + buf.String(), + ) +} + +func TestAPI_BEGIN(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.BEGIN( + "typeID", + "id", + 0, + ) + + assert.Equal( + t, + "BEGIN 'typeID.id'\n", + buf.String(), + ) + + buf.Reset() + + _ = a.BEGIN( + "typeID", + "id", + 1, + ) + + assert.Equal( + t, + "BEGIN 'typeID.id' 1\n", + buf.String(), + ) +} + +func TestAPI_SET(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.SET("id", 100) + + assert.Equal( + t, + "SET 'id' = 100\n", + buf.String(), + ) +} + +func TestAPI_SETEMPTY(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.SETEMPTY("id") + + assert.Equal( + t, + "SET 'id' = \n", + buf.String(), + ) +} + +func TestAPI_VARIABLE(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.VARIABLE("id", 100) + + assert.Equal( + t, + "VARIABLE CHART 'id' = 100\n", + buf.String(), + ) +} + +func TestAPI_END(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.END() + + assert.Equal( + t, + "END\n\n", + buf.String(), + ) +} + +func TestAPI_CLABEL(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.CLABEL("key", "value", 1) + + assert.Equal( + t, + "CLABEL 'key' 'value' '1'\n", + buf.String(), + ) +} + +func TestAPI_CLABELCOMMIT(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.CLABELCOMMIT() + + assert.Equal( + t, + "CLABEL_COMMIT\n", + buf.String(), + ) +} + +func TestAPI_DISABLE(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.DISABLE() + + assert.Equal( + t, + "DISABLE\n", + buf.String(), + ) +} + +func TestAPI_EMPTYLINE(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.EMPTYLINE() + + assert.Equal( + t, + "\n", + buf.String(), + ) +} + +func TestAPI_HOST(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.HOST("guid") + + assert.Equal( + t, + "HOST 'guid'\n\n", + buf.String(), + ) +} + +func TestAPI_HOSTDEFINE(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.HOSTDEFINE("guid", "hostname") + + assert.Equal( + t, + "HOST_DEFINE 'guid' 'hostname'\n", + buf.String(), + ) +} + +func TestAPI_HOSTLABEL(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.HOSTLABEL("name", "value") + + assert.Equal( + t, + "HOST_LABEL 'name' 'value'\n", + buf.String(), + ) +} + +func TestAPI_HOSTDEFINEEND(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.HOSTDEFINEEND() + + assert.Equal( + t, + "HOST_DEFINE_END\n\n", + buf.String(), + ) +} + +func TestAPI_HOSTINFO(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.HOSTINFO("guid", "hostname", map[string]string{"label1": "value1"}) + + assert.Equal( + t, + `HOST_DEFINE 'guid' 'hostname' +HOST_LABEL 'label1' 'value1' +HOST_DEFINE_END + +`, + buf.String(), + ) +} + +func TestAPI_FUNCRESULT(t *testing.T) { + +} diff --git a/src/go/pkg/safewriter/writer.go b/src/go/pkg/safewriter/writer.go new file mode 100644 index 000000000..533c1055d --- /dev/null +++ b/src/go/pkg/safewriter/writer.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package safewriter + +import ( + "io" + "os" + "sync" +) + +var Stdout = New(os.Stdout) + +func New(w io.Writer) io.Writer { + return &writer{ + mx: &sync.Mutex{}, + w: w, + } +} + +type writer struct { + mx *sync.Mutex + w io.Writer +} + +func (w *writer) Write(p []byte) (n int, err error) { + w.mx.Lock() + n, err = w.w.Write(p) + w.mx.Unlock() + return n, err +} diff --git a/src/go/pkg/ticker/ticker.go b/src/go/pkg/ticker/ticker.go new file mode 100644 index 000000000..e4228fe4c --- /dev/null +++ b/src/go/pkg/ticker/ticker.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ticker + +import "time" + +type ( + // Ticker holds a channel that delivers ticks of a clock at intervals. + // The ticks are aligned to interval boundaries. + Ticker struct { + C <-chan int + done chan struct{} + loops int + interval time.Duration + } +) + +// New returns a new Ticker containing a channel that will send the time with a period specified by the duration argument. +// It adjusts the intervals or drops ticks to make up for slow receivers. +// The duration must be greater than zero; if not, New will panic. Stop the Ticker to release associated resources. +func New(interval time.Duration) *Ticker { + ticker := &Ticker{ + interval: interval, + done: make(chan struct{}, 1), + } + ticker.start() + return ticker +} + +func (t *Ticker) start() { + ch := make(chan int) + t.C = ch + go func() { + LOOP: + for { + now := time.Now() + nextRun := now.Truncate(t.interval).Add(t.interval) + + time.Sleep(nextRun.Sub(now)) + select { + case <-t.done: + close(ch) + break LOOP + case ch <- t.loops: + t.loops++ + } + } + }() +} + +// Stop turns off a Ticker. After Stop, no more ticks will be sent. +// Stop does not close the channel, to prevent a read from the channel succeeding incorrectly. +func (t *Ticker) Stop() { + t.done <- struct{}{} +} diff --git a/src/go/pkg/ticker/ticket_test.go b/src/go/pkg/ticker/ticket_test.go new file mode 100644 index 000000000..193085365 --- /dev/null +++ b/src/go/pkg/ticker/ticket_test.go @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ticker + +import ( + "testing" + "time" +) + +// TODO: often fails Circle CI (~200-240) +var allowedDelta = 500 * time.Millisecond + +func TestTickerParallel(t *testing.T) { + for i := 0; i < 100; i++ { + i := i + go func() { + time.Sleep(time.Second / 100 * time.Duration(i)) + TestTicker(t) + }() + } + time.Sleep(4 * time.Second) +} + +func TestTicker(t *testing.T) { + tk := New(time.Second) + defer tk.Stop() + prev := time.Now() + for i := 0; i < 3; i++ { + <-tk.C + now := time.Now() + diff := abs(now.Round(time.Second).Sub(now)) + if diff >= allowedDelta { + t.Errorf("Ticker is not aligned: expect delta < %v but was: %v (%s)", allowedDelta, diff, now.Format(time.RFC3339Nano)) + } + if i > 0 { + dt := now.Sub(prev) + if abs(dt-time.Second) >= allowedDelta { + t.Errorf("Ticker interval: expect delta < %v ns but was: %v", allowedDelta, abs(dt-time.Second)) + } + } + prev = now + } +} + +func abs(a time.Duration) time.Duration { + if a < 0 { + return -a + } + return a +} diff --git a/src/go/plugin/go.d/README.md b/src/go/plugin/go.d/README.md index 28f046ab9..ecb8c45fa 100644 --- a/src/go/plugin/go.d/README.md +++ b/src/go/plugin/go.d/README.md @@ -1,43 +1,18 @@ - - # go.d.plugin -`go.d.plugin` is a [Netdata](https://github.com/netdata/netdata) external plugin. It is an **orchestrator** for data -collection modules written in `go`. - -1. It runs as an independent process (`ps fax` shows it). -2. It is started and stopped automatically by Netdata. -3. It communicates with Netdata via a unidirectional pipe (sending data to the Netdata daemon). -4. Supports any number of data collection modules. -5. Allows each module to have any number of data collection jobs. - -## Bug reports, feature requests, and questions - -Are welcome! We are using [netdata/netdata](https://github.com/netdata/netdata/) repository for bugs, feature requests, -and questions. - -- [GitHub Issues](https://github.com/netdata/netdata/issues/new/choose): report bugs or open a new feature request. -- [GitHub Discussions](https://github.com/netdata/netdata/discussions): ask a question or suggest a new idea. +`go.d.plugin` is a [Netdata](https://github.com/netdata/netdata) external plugin: -## Install - -Go.d.plugin is shipped with Netdata. +- **Independent Operation**: Runs as a separate process from Netdata core, visible in system process lists (`ps fax`). +- **Automated Management**: Integrated with Netdata's lifecycle management, managed automatically by Netdata (start/stop operations). +- **Efficient Communication**: Uses a unidirectional pipe for optimal data transfer to Netdata. +- **Modular Architecture**: + - Supports an unlimited number of data collection modules. + - Each module can run multiple collection jobs simultaneously. + - Easy to extend with new collection modules ### Required Linux capabilities -All capabilities are set automatically during Netdata installation using -the [official installation method](/packaging/installer/methods/kickstart.md). -No further action required. If you have used a different installation method and need to set the capabilities manually, -see the appropriate collector readme. +All capabilities are set automatically during Netdata installation using the [official installation method](/packaging/installer/methods/kickstart.md). | Capability | Required by | |:--------------------|:-------------------------------------------------------------------------------------------------------:| @@ -47,15 +22,21 @@ see the appropriate collector readme. ## Available modules +

    +Data Collection Modules + | Name | Monitors | |:-------------------------------------------------------------------------------------------------------------------|:-----------------------------:| | [adaptec_raid](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/adaptecraid) | Adaptec Hardware RAID | | [activemq](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/activemq) | ActiveMQ | | [ap](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ap) | Wireless AP | | [apache](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/apache) | Apache | +| [apcupsd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/apcupsd) | UPS (APC) | | [beanstalk](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/beanstalk) | Beanstalk | | [bind](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/bind) | ISC Bind | +| [boinc](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/boinc) | BOINC | | [cassandra](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/cassandra) | Cassandra | +| [ceph](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ceph) | Ceph | | [chrony](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/chrony) | Chrony | | [clickhouse](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/clickhouse) | ClickHouse | | [cockroachdb](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/cockroachdb) | CockroachDB | @@ -74,7 +55,6 @@ see the appropriate collector readme. | [dovecot](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dovecot) | Dovecot | | [elasticsearch](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/elasticsearch) | Elasticsearch/OpenSearch | | [envoy](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/envoy) | Envoy | -| [example](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/example) | - | | [exim](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/exim) | Exim | | [fail2ban](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/fail2ban) | Fail2Ban Jails | | [filecheck](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/filecheck) | Files and Directories | @@ -98,6 +78,7 @@ see the appropriate collector readme. | [logind](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/logind) | systemd-logind | | [logstash](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/logstash) | Logstash | | [lvm](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/lvm) | LVM logical volumes | +| [maxscale](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/maxscale) | MaxScale | | [megacli](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/megacli) | MegaCli Hardware Raid | | [memcached](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/memcached) | Memcached | | [mongoDB](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/mongodb) | MongoDB | @@ -105,13 +86,17 @@ see the appropriate collector readme. | [mysql](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/mysql) | MySQL | | [nginx](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nginx) | NGINX | | [nginxplus](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nginxplus) | NGINX Plus | +| [nginxunit](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nginxunit) | NGINX Unit | | [nginxvts](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nginxvts) | NGINX VTS | | [nsd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nsd) | NSD (NLnet Labs) | | [ntpd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ntpd) | NTP daemon | +| [nvidia_smi](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nvidia_smi) | Nvidia SMI | | [nvme](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nvme) | NVMe devices | +| [openldap](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/openldap) | OpenLDAP | | [openvpn](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/openvpn) | OpenVPN | | [openvpn_status_log](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/openvpn_status_log) | OpenVPN | | [pgbouncer](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/pgbouncer) | PgBouncer | +| [oracledb](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/oracledb) | Oracle DB | | [phpdaemon](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/phpdaemon) | phpDaemon | | [phpfpm](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/phpfpm) | PHP-FPM | | [pihole](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/pihole) | Pi-hole | @@ -131,12 +116,14 @@ see the appropriate collector readme. | [rethinkdb](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/rethinkdb) | RethinkDB | | [riakkv](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/riakkv) | Riak KV | | [rspamd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/rspamd) | Rspamd | +| [samba](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/samba) | Samba | | [scaleio](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/scaleio) | Dell EMC ScaleIO | | [sensors](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/sensors) | Hardware Sensors | | [SNMP](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/snmp) | SNMP | | [squid](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/squid) | Squid | | [squidlog](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/squidlog) | Squid | | [smartctl](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/smartctl) | S.M.A.R.T Storage Devices | +| [spigotmc](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/spigotmc) | SpigotMC | | [storcli](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/storcli) | Broadcom Hardware RAID | | [supervisord](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/supervisord) | Supervisor | | [systemdunits](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/systemdunits) | Systemd unit state | @@ -144,11 +131,15 @@ see the appropriate collector readme. | [tomcat](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/tomcat) | Tomcat | | [tor](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/tor) | Tor | | [traefik](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/traefik) | Traefik | -| [upsd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/upsd) | UPSd (Nut) | +| [typesense](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/typesense) | Typesense | | [unbound](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/unbound) | Unbound | +| [upsd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/upsd) | UPSd (Nut) | +| [uwsgi](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/uwsgi) | uWSGI | +| [varnish](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/varnish) | Varnish | | [vcsa](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/vcsa) | vCenter Server Appliance | | [vernemq](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/vernemq) | VerneMQ | | [vsphere](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/vsphere) | VMware vCenter Server | +| [w1sensor](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/w1sensor) | 1-Wire Sensors | | [web_log](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/weblog) | Apache/NGINX | | [wireguard](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/wireguard) | WireGuard | | [whoisquery](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/whoisquery) | Domain Expiry | @@ -157,6 +148,8 @@ see the appropriate collector readme. | [zfspool](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/zfspool) | ZFS Pools | | [zookeeper](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/zookeeper) | ZooKeeper | +
    + ## Configuration Edit the `go.d.conf` configuration file using `edit-config` from the @@ -192,18 +185,7 @@ modules: example: yes ``` -Then [restart netdata](/docs/netdata-agent/start-stop-restart.md) -for the change to take effect. - -## Contributing - -If you want to contribute to this project, we are humbled. Please take a look at -our [contributing guidelines](https://github.com/netdata/.github/blob/main/CONTRIBUTING.md) and don't hesitate to -contact us in our forums. - -### How to develop a collector - -Read [how to write a Netdata collector in Go](/src/go/plugin/go.d/docs/how-to-write-a-module.md). +Then [restart netdata](/docs/netdata-agent/start-stop-restart.md) for the change to take effect. ## Troubleshooting @@ -235,10 +217,3 @@ sudo su -s /bin/bash netdata ``` Change `` to the [module name](#available-modules) you want to debug. - -## Netdata Community - -This repository follows the Netdata Code of Conduct and is part of the Netdata Community. - -- [Community Forums](https://community.netdata.cloud) -- [Netdata Code of Conduct](https://github.com/netdata/.github/blob/main/CODE_OF_CONDUCT.md) diff --git a/src/go/plugin/go.d/agent/agent.go b/src/go/plugin/go.d/agent/agent.go index 2423e84e0..014f544b5 100644 --- a/src/go/plugin/go.d/agent/agent.go +++ b/src/go/plugin/go.d/agent/agent.go @@ -13,6 +13,9 @@ import ( "time" "github.com/netdata/netdata/go/plugins/logger" + "github.com/netdata/netdata/go/plugins/pkg/multipath" + "github.com/netdata/netdata/go/plugins/pkg/netdataapi" + "github.com/netdata/netdata/go/plugins/pkg/safewriter" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/filelock" @@ -20,10 +23,6 @@ import ( "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/functions" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/jobmgr" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/netdataapi" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/safewriter" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/vnodes" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/multipath" "github.com/mattn/go-isatty" ) @@ -187,9 +186,7 @@ func (a *Agent) run(ctx context.Context) { jobMgr.ConfigDefaults = discCfg.Registry jobMgr.FnReg = fnMgr - if reg := a.setupVnodeRegistry(); reg == nil || reg.Len() == 0 { - vnodes.Disabled = true - } else { + if reg := a.setupVnodeRegistry(); reg != nil && reg.Len() > 0 { jobMgr.Vnodes = reg } diff --git a/src/go/plugin/go.d/agent/agent_test.go b/src/go/plugin/go.d/agent/agent_test.go index 9096b9015..39e12f751 100644 --- a/src/go/plugin/go.d/agent/agent_test.go +++ b/src/go/plugin/go.d/agent/agent_test.go @@ -9,8 +9,8 @@ import ( "testing" "time" + "github.com/netdata/netdata/go/plugins/pkg/safewriter" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/safewriter" "github.com/stretchr/testify/assert" ) diff --git a/src/go/plugin/go.d/agent/confgroup/config_test.go b/src/go/plugin/go.d/agent/confgroup/config_test.go index 98c6c3e78..d8e865b44 100644 --- a/src/go/plugin/go.d/agent/confgroup/config_test.go +++ b/src/go/plugin/go.d/agent/confgroup/config_test.go @@ -13,7 +13,7 @@ import ( func TestConfig_Name(t *testing.T) { tests := map[string]struct { cfg Config - expected interface{} + expected any }{ "string": {cfg: Config{"name": "name"}, expected: "name"}, "empty string": {cfg: Config{"name": ""}, expected: ""}, @@ -32,7 +32,7 @@ func TestConfig_Name(t *testing.T) { func TestConfig_Module(t *testing.T) { tests := map[string]struct { cfg Config - expected interface{} + expected any }{ "string": {cfg: Config{"module": "module"}, expected: "module"}, "empty string": {cfg: Config{"module": ""}, expected: ""}, @@ -51,7 +51,7 @@ func TestConfig_Module(t *testing.T) { func TestConfig_FullName(t *testing.T) { tests := map[string]struct { cfg Config - expected interface{} + expected any }{ "name == module": {cfg: Config{"name": "name", "module": "name"}, expected: "name"}, "name != module": {cfg: Config{"name": "name", "module": "module"}, expected: "module_name"}, @@ -68,7 +68,7 @@ func TestConfig_FullName(t *testing.T) { func TestConfig_UpdateEvery(t *testing.T) { tests := map[string]struct { cfg Config - expected interface{} + expected any }{ "int": {cfg: Config{"update_every": 1}, expected: 1}, "not int": {cfg: Config{"update_every": "1"}, expected: 0}, @@ -86,7 +86,7 @@ func TestConfig_UpdateEvery(t *testing.T) { func TestConfig_AutoDetectionRetry(t *testing.T) { tests := map[string]struct { cfg Config - expected interface{} + expected any }{ "int": {cfg: Config{"autodetection_retry": 1}, expected: 1}, "not int": {cfg: Config{"autodetection_retry": "1"}, expected: 0}, @@ -104,7 +104,7 @@ func TestConfig_AutoDetectionRetry(t *testing.T) { func TestConfig_Priority(t *testing.T) { tests := map[string]struct { cfg Config - expected interface{} + expected any }{ "int": {cfg: Config{"priority": 1}, expected: 1}, "not int": {cfg: Config{"priority": "1"}, expected: 0}, diff --git a/src/go/plugin/go.d/agent/config.go b/src/go/plugin/go.d/agent/config.go index fef68c7e0..e0c3f4605 100644 --- a/src/go/plugin/go.d/agent/config.go +++ b/src/go/plugin/go.d/agent/config.go @@ -47,13 +47,13 @@ func (c *config) isEnabled(moduleName string, explicit bool) bool { return c.DefaultRun } -func (c *config) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *config) UnmarshalYAML(unmarshal func(any) error) error { type plain config if err := unmarshal((*plain)(c)); err != nil { return err } - var m map[string]interface{} + var m map[string]any if err := unmarshal(&m); err != nil { return err } diff --git a/src/go/plugin/go.d/agent/discovery/file/parse.go b/src/go/plugin/go.d/agent/discovery/file/parse.go index 5fd31f32a..ae48fcb8f 100644 --- a/src/go/plugin/go.d/agent/discovery/file/parse.go +++ b/src/go/plugin/go.d/agent/discovery/file/parse.go @@ -97,7 +97,7 @@ func parseSDFormat(reg confgroup.Registry, path string, bs []byte) (*confgroup.G } func cfgFormat(bs []byte) format { - var data interface{} + var data any if err := yaml.Unmarshal(bs, &data); err != nil { return unknownFormat } diff --git a/src/go/plugin/go.d/agent/discovery/file/sim_test.go b/src/go/plugin/go.d/agent/discovery/file/sim_test.go index 3219c6892..a0a8c4425 100644 --- a/src/go/plugin/go.d/agent/discovery/file/sim_test.go +++ b/src/go/plugin/go.d/agent/discovery/file/sim_test.go @@ -110,7 +110,7 @@ func (d *tmpDir) renameFile(origFilename, newFilename string) { require.NoError(d.t, err) } -func (d *tmpDir) writeYAML(filename string, in interface{}) { +func (d *tmpDir) writeYAML(filename string, in any) { bs, err := yaml.Marshal(in) require.NoError(d.t, err) err = os.WriteFile(filename, bs, 0644) diff --git a/src/go/plugin/go.d/agent/discovery/sd/conffile.go b/src/go/plugin/go.d/agent/discovery/sd/conffile.go index e08a4021b..60a7208d2 100644 --- a/src/go/plugin/go.d/agent/discovery/sd/conffile.go +++ b/src/go/plugin/go.d/agent/discovery/sd/conffile.go @@ -7,7 +7,7 @@ import ( "os" "github.com/netdata/netdata/go/plugins/logger" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/multipath" + "github.com/netdata/netdata/go/plugins/pkg/multipath" ) type confFile struct { diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/docker.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/docker.go index 1cea014a9..e79d8b562 100644 --- a/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/docker.go +++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/docker.go @@ -13,8 +13,8 @@ import ( "github.com/netdata/netdata/go/plugins/logger" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/dockerhost" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" "github.com/docker/docker/api/types" typesContainer "github.com/docker/docker/api/types/container" @@ -64,9 +64,9 @@ func NewDiscoverer(cfg Config) (*Discoverer, error) { type Config struct { Source string - Tags string `yaml:"tags"` - Address string `yaml:"address"` - Timeout web.Duration `yaml:"timeout"` + Tags string `yaml:"tags"` + Address string `yaml:"address"` + Timeout confopt.Duration `yaml:"timeout"` } type ( diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes.go index 439e2b695..26f8a1bd2 100644 --- a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes.go +++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes.go @@ -234,7 +234,7 @@ func (d *KubeDiscoverer) setupServiceDiscoverer(ctx context.Context, namespace s return td } -func enqueue(queue *workqueue.Type, obj any) { +func enqueue(queue *workqueue.Typed[any], obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go index ba60a47b4..908a2a192 100644 --- a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go +++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go @@ -134,7 +134,7 @@ func TestKubeDiscoverer_Discover(t *testing.T) { } func prepareDiscoverer(role role, namespaces []string, objects ...runtime.Object) (*KubeDiscoverer, kubernetes.Interface) { - client := fake.NewSimpleClientset(objects...) + client := fake.NewClientset(objects...) tags, _ := model.ParseTags("k8s") disc := &KubeDiscoverer{ tags: tags, diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/pod.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/pod.go index 617081742..f5918ca35 100644 --- a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/pod.go +++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/pod.go @@ -58,7 +58,7 @@ func newPodDiscoverer(pod, cmap, secret cache.SharedInformer) *podDiscoverer { panic("nil pod or cmap or secret informer") } - queue := workqueue.NewWithConfig(workqueue.QueueConfig{Name: "pod"}) + queue := workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[any]{Name: "pod"}) _, _ = pod.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj any) { enqueue(queue, obj) }, @@ -82,7 +82,7 @@ type podDiscoverer struct { podInformer cache.SharedInformer cmapInformer cache.SharedInformer secretInformer cache.SharedInformer - queue *workqueue.Type + queue *workqueue.Typed[any] } func (p *podDiscoverer) String() string { diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/service.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/service.go index 1d5ae7cd5..ebcfe31cc 100644 --- a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/service.go +++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/service.go @@ -53,7 +53,7 @@ type serviceDiscoverer struct { model.Base informer cache.SharedInformer - queue *workqueue.Type + queue *workqueue.Typed[any] } func newServiceDiscoverer(inf cache.SharedInformer) *serviceDiscoverer { @@ -61,7 +61,8 @@ func newServiceDiscoverer(inf cache.SharedInformer) *serviceDiscoverer { panic("nil service informer") } - queue := workqueue.NewWithConfig(workqueue.QueueConfig{Name: "service"}) + queue := workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[any]{Name: "service"}) + _, _ = inf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj any) { enqueue(queue, obj) }, UpdateFunc: func(_, obj any) { enqueue(queue, obj) }, diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/ll.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/ll.go new file mode 100644 index 000000000..fdb70f5a3 --- /dev/null +++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/ll.go @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package netlisteners + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/netdata/netdata/go/plugins/pkg/executable" +) + +type localListeners interface { + discover(ctx context.Context) ([]byte, error) +} + +func newLocalListeners(timeout time.Duration) localListeners { + dir := os.Getenv("NETDATA_PLUGINS_DIR") + if dir == "" { + dir = executable.Directory + } + if dir == "" { + dir, _ = os.Getwd() + } + + return &localListenersExec{ + binPath: filepath.Join(dir, "local-listeners"), + timeout: timeout, + } +} + +type localListenersExec struct { + binPath string + timeout time.Duration +} + +func (e *localListenersExec) discover(ctx context.Context) ([]byte, error) { + execCtx, cancel := context.WithTimeout(ctx, e.timeout) + defer cancel() + + // TCPv4/6 and UPDv4 sockets in LISTEN state + // https://github.com/netdata/netdata/blob/master/src/collectors/utils/local_listeners.c + args := []string{ + "no-udp6", + "no-local", + "no-inbound", + "no-outbound", + "no-namespaces", + } + + cmd := exec.CommandContext(execCtx, e.binPath, args...) + + bs, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("error on executing '%s': %v", cmd, err) + } + + return bs, nil +} diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners.go index 60dd92cb4..b38d0506f 100644 --- a/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners.go +++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners.go @@ -10,8 +10,6 @@ import ( "fmt" "log/slog" "net" - "os" - "os/exec" "path/filepath" "sort" "strconv" @@ -19,8 +17,8 @@ import ( "time" "github.com/netdata/netdata/go/plugins/logger" - "github.com/netdata/netdata/go/plugins/pkg/executable" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/ilyam8/hashstructure" ) @@ -30,18 +28,27 @@ var ( fullName = fmt.Sprintf("sd:%s", shortName) ) +type Config struct { + Source string `yaml:"-"` + Tags string `yaml:"tags"` + + Interval *confopt.Duration `yaml:"interval"` + Timeout confopt.Duration `yaml:"timeout"` +} + func NewDiscoverer(cfg Config) (*Discoverer, error) { tags, err := model.ParseTags(cfg.Tags) if err != nil { return nil, fmt.Errorf("parse tags: %v", err) } - dir := os.Getenv("NETDATA_PLUGINS_DIR") - if dir == "" { - dir = executable.Directory + interval := time.Minute * 2 + if cfg.Interval != nil { + interval = cfg.Interval.Duration() } - if dir == "" { - dir, _ = os.Getwd() + timeout := time.Second * 5 + if cfg.Timeout.Duration() != 0 { + timeout = cfg.Timeout.Duration() } d := &Discoverer{ @@ -49,12 +56,10 @@ func NewDiscoverer(cfg Config) (*Discoverer, error) { slog.String("component", "service discovery"), slog.String("discoverer", shortName), ), - cfgSource: cfg.Source, - ll: &localListenersExec{ - binPath: filepath.Join(dir, "local-listeners"), - timeout: time.Second * 5, - }, - interval: time.Minute * 2, + cfgSource: cfg.Source, + ll: newLocalListeners(timeout), + interval: interval, + timeout: timeout, expiryTime: time.Minute * 10, cache: make(map[uint64]*cacheItem), started: make(chan struct{}), @@ -65,11 +70,6 @@ func NewDiscoverer(cfg Config) (*Discoverer, error) { return d, nil } -type Config struct { - Source string `yaml:"-"` - Tags string `yaml:"tags"` -} - type ( Discoverer struct { *logger.Logger @@ -78,6 +78,7 @@ type ( cfgSource string interval time.Duration + timeout time.Duration ll localListeners expiryTime time.Duration @@ -92,9 +93,6 @@ type ( lastSeenTime time.Time tgt model.Target } - localListeners interface { - discover(ctx context.Context) ([]byte, error) - } ) func (d *Discoverer) String() string { @@ -103,6 +101,7 @@ func (d *Discoverer) String() string { func (d *Discoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup) { d.Info("instance is started") + d.Debugf("used config: interval: %s, timeout: %s, cache expiration time: %s", d.interval, d.timeout, d.expiryTime) defer func() { d.Info("instance is stopped") }() close(d.started) @@ -112,6 +111,10 @@ func (d *Discoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup return } + if d.interval == 0 { + return + } + tk := time.NewTicker(d.interval) defer tk.Stop() @@ -295,35 +298,6 @@ func (d *Discoverer) parseLocalListeners(bs []byte) ([]model.Target, error) { return tgts[:n], nil } -type localListenersExec struct { - binPath string - timeout time.Duration -} - -func (e *localListenersExec) discover(ctx context.Context) ([]byte, error) { - execCtx, cancel := context.WithTimeout(ctx, e.timeout) - defer cancel() - - // TCPv4/6 and UPDv4 sockets in LISTEN state - // https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/local_listeners.c - args := []string{ - "no-udp6", - "no-local", - "no-inbound", - "no-outbound", - "no-namespaces", - } - - cmd := exec.CommandContext(execCtx, e.binPath, args...) - - bs, err := cmd.Output() - if err != nil { - return nil, fmt.Errorf("error on executing '%s': %v", cmd, err) - } - - return bs, nil -} - func extractComm(cmdLine string) string { if i := strings.IndexByte(cmdLine, ' '); i != -1 { cmdLine = cmdLine[:i] diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/funcmap.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/funcmap.go index 5ed188a54..378e03c25 100644 --- a/src/go/plugin/go.d/agent/discovery/sd/pipeline/funcmap.go +++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/funcmap.go @@ -7,14 +7,16 @@ import ( "strconv" "text/template" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/Masterminds/sprig/v3" "github.com/bmatcuk/doublestar/v4" ) func newFuncMap() template.FuncMap { - custom := map[string]interface{}{ + fm := sprig.TxtFuncMap() + + extra := map[string]any{ "match": funcMatchAny, "glob": func(value, pattern string, patterns ...string) bool { return funcMatchAny("glob", value, pattern, patterns...) @@ -25,9 +27,7 @@ func newFuncMap() template.FuncMap { }, } - fm := sprig.HermeticTxtFuncMap() - - for name, fn := range custom { + for name, fn := range extra { fm[name] = fn } diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/pipeline_test.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/pipeline_test.go index e67b6d7ce..4f2e11199 100644 --- a/src/go/plugin/go.d/agent/discovery/sd/pipeline/pipeline_test.go +++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/pipeline_test.go @@ -35,13 +35,13 @@ func Test_defaultConfigs(t *testing.T) { require.NoError(t, err, "abs path") bs, err := os.ReadFile(file) - require.NoError(t, err, "read config file") + require.NoErrorf(t, err, "read config file '%s'", file) var cfg Config - require.NoError(t, yaml.Unmarshal(bs, &cfg), "unmarshal") + require.NoErrorf(t, yaml.Unmarshal(bs, &cfg), "unmarshal '%s'", e.Name()) _, err = New(cfg) - require.NoError(t, err, "create pipeline") + require.NoErrorf(t, err, "create pipeline '%s'", e.Name()) } } diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/promport.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/promport.go index 646e1abb1..7edf227c7 100644 --- a/src/go/plugin/go.d/agent/discovery/sd/pipeline/promport.go +++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/promport.go @@ -46,7 +46,6 @@ var prometheusPortAllocations = map[int]string{ 9125: "statsd_exporter", 9126: "new_relic_exporter", 9127: "pgbouncer_exporter", - 9128: "ceph_exporter", 9129: "haproxy_log_exporter", 9130: "unifi_poller", 9131: "varnish_exporter", @@ -193,7 +192,6 @@ var prometheusPortAllocations = map[int]string{ 9280: "citrix_netscaler_exporter", 9281: "fastd_exporter", 9282: "freeswitch_exporter", - 9283: "ceph_ceph-mgr_prometheus_plugin", 9284: "gobetween", 9285: "database_exporter", 9286: "vdo_compression_and_deduplication_exporter", diff --git a/src/go/plugin/go.d/agent/discovery/sd/sd.go b/src/go/plugin/go.d/agent/discovery/sd/sd.go index 687ebfba8..90207219d 100644 --- a/src/go/plugin/go.d/agent/discovery/sd/sd.go +++ b/src/go/plugin/go.d/agent/discovery/sd/sd.go @@ -9,9 +9,9 @@ import ( "sync" "github.com/netdata/netdata/go/plugins/logger" + "github.com/netdata/netdata/go/plugins/pkg/multipath" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/pipeline" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/multipath" "gopkg.in/yaml.v2" ) diff --git a/src/go/plugin/go.d/agent/discovery/sim_test.go b/src/go/plugin/go.d/agent/discovery/sim_test.go index b20344c3c..134ec29f9 100644 --- a/src/go/plugin/go.d/agent/discovery/sim_test.go +++ b/src/go/plugin/go.d/agent/discovery/sim_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/src/go/plugin/go.d/agent/functions/manager.go b/src/go/plugin/go.d/agent/functions/manager.go index b7cdecd6a..fe4228a75 100644 --- a/src/go/plugin/go.d/agent/functions/manager.go +++ b/src/go/plugin/go.d/agent/functions/manager.go @@ -13,8 +13,8 @@ import ( "time" "github.com/netdata/netdata/go/plugins/logger" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/netdataapi" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/safewriter" + "github.com/netdata/netdata/go/plugins/pkg/netdataapi" + "github.com/netdata/netdata/go/plugins/pkg/safewriter" ) func NewManager() *Manager { diff --git a/src/go/plugin/go.d/agent/jobmgr/manager.go b/src/go/plugin/go.d/agent/jobmgr/manager.go index 59947be77..b2ba7a2c7 100644 --- a/src/go/plugin/go.d/agent/jobmgr/manager.go +++ b/src/go/plugin/go.d/agent/jobmgr/manager.go @@ -12,12 +12,12 @@ import ( "time" "github.com/netdata/netdata/go/plugins/logger" + "github.com/netdata/netdata/go/plugins/pkg/netdataapi" + "github.com/netdata/netdata/go/plugins/pkg/safewriter" + "github.com/netdata/netdata/go/plugins/pkg/ticker" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/functions" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/netdataapi" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/safewriter" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/ticker" "github.com/mattn/go-isatty" "gopkg.in/yaml.v2" diff --git a/src/go/plugin/go.d/agent/jobmgr/sim_test.go b/src/go/plugin/go.d/agent/jobmgr/sim_test.go index 9fe67175a..63369c33f 100644 --- a/src/go/plugin/go.d/agent/jobmgr/sim_test.go +++ b/src/go/plugin/go.d/agent/jobmgr/sim_test.go @@ -10,10 +10,11 @@ import ( "testing" "time" + "github.com/netdata/netdata/go/plugins/pkg/netdataapi" + "github.com/netdata/netdata/go/plugins/pkg/safewriter" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/netdataapi" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/safewriter" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/src/go/plugin/go.d/agent/module/charts.go b/src/go/plugin/go.d/agent/module/charts.go index b60b3bac1..70b024702 100644 --- a/src/go/plugin/go.d/agent/module/charts.go +++ b/src/go/plugin/go.d/agent/module/charts.go @@ -78,7 +78,7 @@ type ( } // Chart represents a chart. - // For the full description please visit https://docs.netdata.cloud/collectors/plugins.d/#chart + // For the full description please visit https://docs.netdata.cloud/plugins.d/#chart Chart struct { // typeID is the unique identification of the chart, if not specified, // the orchestrator will use job full name + chart ID as typeID (default behaviour). @@ -128,7 +128,7 @@ type ( } // Dim represents a chart dimension. - // For detailed description please visit https://docs.netdata.cloud/collectors/plugins.d/#dimension. + // For detailed description please visit https://docs.netdata.cloud/plugins.d/#dimension. Dim struct { ID string Name string @@ -141,7 +141,7 @@ type ( } // Var represents a chart variable. - // For detailed description please visit https://docs.netdata.cloud/collectors/plugins.d/#variable + // For detailed description please visit https://docs.netdata.cloud/plugins.d/#variable Var struct { ID string Name string @@ -465,27 +465,19 @@ func checkID(id string) int { } func TestMetricsHasAllChartsDims(t *testing.T, charts *Charts, mx map[string]int64) { - for _, chart := range *charts { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "missing data for dimension '%s' in chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "missing data for variable '%s' in chart '%s'", v.ID, chart.ID) - } - } + TestMetricsHasAllChartsDimsSkip(t, charts, mx, nil) } -func TestMetricsHasAllChartsDimsSkip(t *testing.T, charts *Charts, mx map[string]int64, skip func(chart *Chart) bool) { +func TestMetricsHasAllChartsDimsSkip(t *testing.T, charts *Charts, mx map[string]int64, skip func(chart *Chart, dim *Dim) bool) { for _, chart := range *charts { - if chart.Obsolete || (skip != nil && skip(chart)) { + if chart.Obsolete { continue } for _, dim := range chart.Dims { + if skip != nil && skip(chart, dim) { + continue + } + _, ok := mx[dim.ID] assert.Truef(t, ok, "missing data for dimension '%s' in chart '%s'", dim.ID, chart.ID) } diff --git a/src/go/plugin/go.d/agent/module/job.go b/src/go/plugin/go.d/agent/module/job.go index 67fae8aa2..3db06ef00 100644 --- a/src/go/plugin/go.d/agent/module/job.go +++ b/src/go/plugin/go.d/agent/module/job.go @@ -16,8 +16,7 @@ import ( "time" "github.com/netdata/netdata/go/plugins/logger" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/netdataapi" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/vnodes" + "github.com/netdata/netdata/go/plugins/pkg/netdataapi" ) var obsoleteLock = &sync.Mutex{} @@ -228,14 +227,14 @@ func (j *Job) AutoDetection() (err error) { } if err = j.init(); err != nil { - j.Error("init failed") + j.Errorf("init failed: %v", err) j.Unmute() j.disableAutoDetection() return err } if err = j.check(); err != nil { - j.Error("check failed") + j.Errorf("check failed: %v", err) j.Unmute() return err } @@ -244,7 +243,7 @@ func (j *Job) AutoDetection() (err error) { j.Info("check success") if err = j.postCheck(); err != nil { - j.Error("postCheck failed") + j.Errorf("postCheck failed: %v", err) j.disableAutoDetection() return err } @@ -299,13 +298,11 @@ func (j *Job) Cleanup() { return } - if !vnodes.Disabled { - if !j.vnodeCreated && j.vnodeGUID != "" { - _ = j.api.HOSTINFO(j.vnodeGUID, j.vnodeHostname, j.vnodeLabels) - j.vnodeCreated = true - } - _ = j.api.HOST(j.vnodeGUID) + if !j.vnodeCreated && j.vnodeGUID != "" { + _ = j.api.HOSTINFO(j.vnodeGUID, j.vnodeHostname, j.vnodeLabels) + j.vnodeCreated = true } + _ = j.api.HOST(j.vnodeGUID) if j.runChart.created { j.runChart.MarkRemove() @@ -397,15 +394,22 @@ func (j *Job) collect() (result map[string]int64) { } func (j *Job) processMetrics(metrics map[string]int64, startTime time.Time, sinceLastRun int) bool { - if !vnodes.Disabled { - if !j.vnodeCreated && j.vnodeGUID != "" { + if !j.vnodeCreated { + if j.vnodeGUID == "" { + if v := j.module.VirtualNode(); v != nil && v.GUID != "" && v.Hostname != "" { + j.vnodeGUID = v.GUID + j.vnodeHostname = v.Hostname + j.vnodeLabels = v.Labels + } + } + if j.vnodeGUID != "" { _ = j.api.HOSTINFO(j.vnodeGUID, j.vnodeHostname, j.vnodeLabels) j.vnodeCreated = true } - - _ = j.api.HOST(j.vnodeGUID) } + _ = j.api.HOST(j.vnodeGUID) + if !ndInternalMonitoringDisabled && !j.runChart.created { j.runChart.ID = fmt.Sprintf("execution_time_of_%s", j.FullName()) j.createChart(j.runChart) @@ -489,15 +493,15 @@ func (j *Job) createChart(chart *Chart) { if ls == 0 { ls = LabelSourceAuto } - _ = j.api.CLABEL(l.Key, l.Value, ls) + _ = j.api.CLABEL(l.Key, lblReplacer.Replace(l.Value), ls) } } for k, v := range j.labels { if !seen[k] { - _ = j.api.CLABEL(k, v, LabelSourceConf) + _ = j.api.CLABEL(k, lblReplacer.Replace(v), LabelSourceConf) } } - _ = j.api.CLABEL("_collect_job", j.Name(), LabelSourceAuto) + _ = j.api.CLABEL("_collect_job", lblReplacer.Replace(j.Name()), LabelSourceAuto) _ = j.api.CLABELCOMMIT() for _, dim := range chart.Dims { @@ -643,3 +647,5 @@ func handleZero(v int) int { } return v } + +var lblReplacer = strings.NewReplacer("'", "") diff --git a/src/go/plugin/go.d/agent/module/module.go b/src/go/plugin/go.d/agent/module/module.go index 13e20f2ae..8d28d8059 100644 --- a/src/go/plugin/go.d/agent/module/module.go +++ b/src/go/plugin/go.d/agent/module/module.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/netdata/netdata/go/plugins/logger" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/vnodes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -35,6 +36,8 @@ type Module interface { GetBase() *Base Configuration() any + + VirtualNode() *vnodes.VirtualNode } // Base is a helper struct. All modules should embed this struct. @@ -44,12 +47,14 @@ type Base struct { func (b *Base) GetBase() *Base { return b } +func (b *Base) VirtualNode() *vnodes.VirtualNode { return nil } + func TestConfigurationSerialize(t *testing.T, mod Module, cfgJSON, cfgYAML []byte) { t.Helper() tests := map[string]struct { config []byte - unmarshal func(in []byte, out interface{}) (err error) - marshal func(in interface{}) (out []byte, err error) + unmarshal func(in []byte, out any) (err error) + marshal func(in any) (out []byte, err error) }{ "json": {config: cfgJSON, marshal: json.Marshal, unmarshal: json.Unmarshal}, "yaml": {config: cfgYAML, marshal: yaml.Marshal, unmarshal: yaml.Unmarshal}, diff --git a/src/go/plugin/go.d/agent/netdataapi/api.go b/src/go/plugin/go.d/agent/netdataapi/api.go deleted file mode 100644 index 4f2b7a9b5..000000000 --- a/src/go/plugin/go.d/agent/netdataapi/api.go +++ /dev/null @@ -1,213 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package netdataapi - -import ( - "bytes" - "fmt" - "io" - "strconv" -) - -type ( - // API implements Netdata external plugins API. - // https://learn.netdata.cloud/docs/agent/collectors/plugins.d#the-output-of-the-plugin - API struct { - io.Writer - } -) - -const quotes = "' '" - -var ( - end = []byte("END\n\n") - clabelCommit = []byte("CLABEL_COMMIT\n") - newLine = []byte("\n") -) - -func New(w io.Writer) *API { return &API{w} } - -// CHART creates or update a chart. -func (a *API) CHART( - typeID string, - ID string, - name string, - title string, - units string, - family string, - context string, - chartType string, - priority int, - updateEvery int, - options string, - plugin string, - module string) error { - _, err := a.Write([]byte("CHART " + "'" + - typeID + "." + ID + quotes + - name + quotes + - title + quotes + - units + quotes + - family + quotes + - context + quotes + - chartType + quotes + - strconv.Itoa(priority) + quotes + - strconv.Itoa(updateEvery) + quotes + - options + quotes + - plugin + quotes + - module + "'\n")) - return err -} - -// DIMENSION adds or update a dimension to the chart just created. -func (a *API) DIMENSION( - ID string, - name string, - algorithm string, - multiplier int, - divisor int, - options string) error { - _, err := a.Write([]byte("DIMENSION '" + - ID + quotes + - name + quotes + - algorithm + quotes + - strconv.Itoa(multiplier) + quotes + - strconv.Itoa(divisor) + quotes + - options + "'\n")) - return err -} - -// CLABEL adds or update a label to the chart. -func (a *API) CLABEL(key, value string, source int) error { - _, err := a.Write([]byte("CLABEL '" + - key + quotes + - value + quotes + - strconv.Itoa(source) + "'\n")) - return err -} - -// CLABELCOMMIT adds labels to the chart. Should be called after one or more CLABEL. -func (a *API) CLABELCOMMIT() error { - _, err := a.Write(clabelCommit) - return err -} - -// BEGIN initializes data collection for a chart. -func (a *API) BEGIN(typeID string, ID string, msSince int) (err error) { - if msSince > 0 { - _, err = a.Write([]byte("BEGIN " + "'" + typeID + "." + ID + "' " + strconv.Itoa(msSince) + "\n")) - } else { - _, err = a.Write([]byte("BEGIN " + "'" + typeID + "." + ID + "'\n")) - } - return err -} - -// SET sets the value of a dimension for the initialized chart. -func (a *API) SET(ID string, value int64) error { - _, err := a.Write([]byte("SET '" + ID + "' = " + strconv.FormatInt(value, 10) + "\n")) - return err -} - -// SETEMPTY sets the empty value of a dimension for the initialized chart. -func (a *API) SETEMPTY(ID string) error { - _, err := a.Write([]byte("SET '" + ID + "' = \n")) - return err -} - -// VARIABLE sets the value of a CHART scope variable for the initialized chart. -func (a *API) VARIABLE(ID string, value int64) error { - _, err := a.Write([]byte("VARIABLE CHART '" + ID + "' = " + strconv.FormatInt(value, 10) + "\n")) - return err -} - -// END completes data collection for the initialized chart. -func (a *API) END() error { - _, err := a.Write(end) - return err -} - -// DISABLE disables this plugin. This will prevent Netdata from restarting the plugin. -func (a *API) DISABLE() error { - _, err := a.Write([]byte("DISABLE\n")) - return err -} - -// EMPTYLINE writes an empty line. -func (a *API) EMPTYLINE() error { - _, err := a.Write(newLine) - return err -} - -func (a *API) HOSTINFO(guid, hostname string, labels map[string]string) error { - if err := a.HOSTDEFINE(guid, hostname); err != nil { - return err - } - for k, v := range labels { - if err := a.HOSTLABEL(k, v); err != nil { - return err - } - } - return a.HOSTDEFINEEND() -} - -func (a *API) HOSTDEFINE(guid, hostname string) error { - _, err := fmt.Fprintf(a, "HOST_DEFINE '%s' '%s'\n", guid, hostname) - return err -} - -func (a *API) HOSTLABEL(name, value string) error { - _, err := fmt.Fprintf(a, "HOST_LABEL '%s' '%s'\n", name, value) - return err -} - -func (a *API) HOSTDEFINEEND() error { - _, err := fmt.Fprintf(a, "HOST_DEFINE_END\n\n") - return err -} - -func (a *API) HOST(guid string) error { - _, err := a.Write([]byte("HOST " + "'" + - guid + "'\n\n")) - return err -} - -func (a *API) FUNCRESULT(uid, contentType, payload, code, expireTimestamp string) { - var buf bytes.Buffer - - buf.WriteString("FUNCTION_RESULT_BEGIN " + - uid + " " + - code + " " + - contentType + " " + - expireTimestamp + "\n", - ) - - if payload != "" { - buf.WriteString(payload + "\n") - } - - buf.WriteString("FUNCTION_RESULT_END\n\n") - - _, _ = buf.WriteTo(a) -} - -func (a *API) CONFIGCREATE(id, status, configType, path, sourceType, source, supportedCommands string) { - // https://learn.netdata.cloud/docs/contributing/external-plugins/#config - - _, _ = a.Write([]byte("CONFIG " + - id + " " + - "create" + " " + - status + " " + - configType + " " + - path + " " + - sourceType + " '" + - source + "' '" + - supportedCommands + "' 0x0000 0x0000\n\n", - )) -} - -func (a *API) CONFIGDELETE(id string) { - _, _ = a.Write([]byte("CONFIG " + id + " delete\n\n")) -} - -func (a *API) CONFIGSTATUS(id, status string) { - _, _ = a.Write([]byte("CONFIG " + id + " status " + status + "\n\n")) -} diff --git a/src/go/plugin/go.d/agent/netdataapi/api_test.go b/src/go/plugin/go.d/agent/netdataapi/api_test.go deleted file mode 100644 index e5087839b..000000000 --- a/src/go/plugin/go.d/agent/netdataapi/api_test.go +++ /dev/null @@ -1,265 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package netdataapi - -import ( - "bytes" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAPI_CHART(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.CHART( - "", - "id", - "name", - "title", - "units", - "family", - "context", - "line", - 1, - 1, - "", - "plugin", - "module", - ) - - assert.Equal( - t, - "CHART '.id' 'name' 'title' 'units' 'family' 'context' 'line' '1' '1' '' 'plugin' 'module'\n", - buf.String(), - ) -} - -func TestAPI_DIMENSION(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.DIMENSION( - "id", - "name", - "absolute", - 1, - 1, - "", - ) - - assert.Equal( - t, - "DIMENSION 'id' 'name' 'absolute' '1' '1' ''\n", - buf.String(), - ) -} - -func TestAPI_BEGIN(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.BEGIN( - "typeID", - "id", - 0, - ) - - assert.Equal( - t, - "BEGIN 'typeID.id'\n", - buf.String(), - ) - - buf.Reset() - - _ = a.BEGIN( - "typeID", - "id", - 1, - ) - - assert.Equal( - t, - "BEGIN 'typeID.id' 1\n", - buf.String(), - ) -} - -func TestAPI_SET(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.SET("id", 100) - - assert.Equal( - t, - "SET 'id' = 100\n", - buf.String(), - ) -} - -func TestAPI_SETEMPTY(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.SETEMPTY("id") - - assert.Equal( - t, - "SET 'id' = \n", - buf.String(), - ) -} - -func TestAPI_VARIABLE(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.VARIABLE("id", 100) - - assert.Equal( - t, - "VARIABLE CHART 'id' = 100\n", - buf.String(), - ) -} - -func TestAPI_END(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.END() - - assert.Equal( - t, - "END\n\n", - buf.String(), - ) -} - -func TestAPI_CLABEL(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.CLABEL("key", "value", 1) - - assert.Equal( - t, - "CLABEL 'key' 'value' '1'\n", - buf.String(), - ) -} - -func TestAPI_CLABELCOMMIT(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.CLABELCOMMIT() - - assert.Equal( - t, - "CLABEL_COMMIT\n", - buf.String(), - ) -} - -func TestAPI_DISABLE(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.DISABLE() - - assert.Equal( - t, - "DISABLE\n", - buf.String(), - ) -} - -func TestAPI_EMPTYLINE(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.EMPTYLINE() - - assert.Equal( - t, - "\n", - buf.String(), - ) -} - -func TestAPI_HOST(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.HOST("guid") - - assert.Equal( - t, - "HOST 'guid'\n\n", - buf.String(), - ) -} - -func TestAPI_HOSTDEFINE(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.HOSTDEFINE("guid", "hostname") - - assert.Equal( - t, - "HOST_DEFINE 'guid' 'hostname'\n", - buf.String(), - ) -} - -func TestAPI_HOSTLABEL(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.HOSTLABEL("name", "value") - - assert.Equal( - t, - "HOST_LABEL 'name' 'value'\n", - buf.String(), - ) -} - -func TestAPI_HOSTDEFINEEND(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.HOSTDEFINEEND() - - assert.Equal( - t, - "HOST_DEFINE_END\n\n", - buf.String(), - ) -} - -func TestAPI_HOSTINFO(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.HOSTINFO("guid", "hostname", map[string]string{"label1": "value1"}) - - assert.Equal( - t, - `HOST_DEFINE 'guid' 'hostname' -HOST_LABEL 'label1' 'value1' -HOST_DEFINE_END - -`, - buf.String(), - ) -} - -func TestAPI_FUNCRESULT(t *testing.T) { - -} diff --git a/src/go/plugin/go.d/agent/safewriter/writer.go b/src/go/plugin/go.d/agent/safewriter/writer.go deleted file mode 100644 index 533c1055d..000000000 --- a/src/go/plugin/go.d/agent/safewriter/writer.go +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package safewriter - -import ( - "io" - "os" - "sync" -) - -var Stdout = New(os.Stdout) - -func New(w io.Writer) io.Writer { - return &writer{ - mx: &sync.Mutex{}, - w: w, - } -} - -type writer struct { - mx *sync.Mutex - w io.Writer -} - -func (w *writer) Write(p []byte) (n int, err error) { - w.mx.Lock() - n, err = w.w.Write(p) - w.mx.Unlock() - return n, err -} diff --git a/src/go/plugin/go.d/agent/ticker/ticker.go b/src/go/plugin/go.d/agent/ticker/ticker.go deleted file mode 100644 index e4228fe4c..000000000 --- a/src/go/plugin/go.d/agent/ticker/ticker.go +++ /dev/null @@ -1,55 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package ticker - -import "time" - -type ( - // Ticker holds a channel that delivers ticks of a clock at intervals. - // The ticks are aligned to interval boundaries. - Ticker struct { - C <-chan int - done chan struct{} - loops int - interval time.Duration - } -) - -// New returns a new Ticker containing a channel that will send the time with a period specified by the duration argument. -// It adjusts the intervals or drops ticks to make up for slow receivers. -// The duration must be greater than zero; if not, New will panic. Stop the Ticker to release associated resources. -func New(interval time.Duration) *Ticker { - ticker := &Ticker{ - interval: interval, - done: make(chan struct{}, 1), - } - ticker.start() - return ticker -} - -func (t *Ticker) start() { - ch := make(chan int) - t.C = ch - go func() { - LOOP: - for { - now := time.Now() - nextRun := now.Truncate(t.interval).Add(t.interval) - - time.Sleep(nextRun.Sub(now)) - select { - case <-t.done: - close(ch) - break LOOP - case ch <- t.loops: - t.loops++ - } - } - }() -} - -// Stop turns off a Ticker. After Stop, no more ticks will be sent. -// Stop does not close the channel, to prevent a read from the channel succeeding incorrectly. -func (t *Ticker) Stop() { - t.done <- struct{}{} -} diff --git a/src/go/plugin/go.d/agent/ticker/ticket_test.go b/src/go/plugin/go.d/agent/ticker/ticket_test.go deleted file mode 100644 index 193085365..000000000 --- a/src/go/plugin/go.d/agent/ticker/ticket_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package ticker - -import ( - "testing" - "time" -) - -// TODO: often fails Circle CI (~200-240) -var allowedDelta = 500 * time.Millisecond - -func TestTickerParallel(t *testing.T) { - for i := 0; i < 100; i++ { - i := i - go func() { - time.Sleep(time.Second / 100 * time.Duration(i)) - TestTicker(t) - }() - } - time.Sleep(4 * time.Second) -} - -func TestTicker(t *testing.T) { - tk := New(time.Second) - defer tk.Stop() - prev := time.Now() - for i := 0; i < 3; i++ { - <-tk.C - now := time.Now() - diff := abs(now.Round(time.Second).Sub(now)) - if diff >= allowedDelta { - t.Errorf("Ticker is not aligned: expect delta < %v but was: %v (%s)", allowedDelta, diff, now.Format(time.RFC3339Nano)) - } - if i > 0 { - dt := now.Sub(prev) - if abs(dt-time.Second) >= allowedDelta { - t.Errorf("Ticker interval: expect delta < %v ns but was: %v", allowedDelta, abs(dt-time.Second)) - } - } - prev = now - } -} - -func abs(a time.Duration) time.Duration { - if a < 0 { - return -a - } - return a -} diff --git a/src/go/plugin/go.d/agent/vnodes/vnodes.go b/src/go/plugin/go.d/agent/vnodes/vnodes.go index 3d332c261..2c0027b88 100644 --- a/src/go/plugin/go.d/agent/vnodes/vnodes.go +++ b/src/go/plugin/go.d/agent/vnodes/vnodes.go @@ -11,11 +11,10 @@ import ( "github.com/netdata/netdata/go/plugins/logger" + "github.com/google/uuid" "gopkg.in/yaml.v2" ) -var Disabled = false // TODO: remove after Netdata v1.39.0. Fix for "from source" stable-channel installations. - func New(confDir string) *Vnodes { vn := &Vnodes{ Logger: logger.New().With( @@ -39,9 +38,9 @@ type ( vnodes map[string]*VirtualNode } VirtualNode struct { - GUID string `yaml:"guid"` - Hostname string `yaml:"hostname"` - Labels map[string]string `yaml:"labels"` + GUID string `yaml:"guid" json:"guid"` + Hostname string `yaml:"hostname" json:"hostname"` + Labels map[string]string `yaml:"labels" json:"labels"` } ) @@ -101,7 +100,11 @@ func (vn *Vnodes) readConfDir() { for _, v := range cfg { if v.Hostname == "" || v.GUID == "" { - vn.Warningf("skipping virtual node '%+v': some required fields are missing (%s)", v, path) + vn.Warningf("skipping virtual node '%+v': required fields are missing (%s)", v, path) + continue + } + if err := uuid.Validate(v.GUID); err != nil { + vn.Warningf("skipping virtual node '%+v': invalid GUID: %v (%s)", v, err, path) continue } if _, ok := vn.vnodes[v.Hostname]; ok { @@ -127,7 +130,7 @@ func isConfigFile(path string) bool { } } -func loadConfigFile(conf interface{}, path string) error { +func loadConfigFile(conf any, path string) error { f, err := os.Open(path) if err != nil { return err diff --git a/src/go/plugin/go.d/config/go.d.conf b/src/go/plugin/go.d/config/go.d.conf index 198bcd086..a2435e205 100644 --- a/src/go/plugin/go.d/config/go.d.conf +++ b/src/go/plugin/go.d/config/go.d.conf @@ -19,8 +19,11 @@ modules: # activemq: yes # ap: yes # apache: yes +# apcupsd: yes # beanstalk: yes # bind: yes +# boinc: yes +# ceph: yes # chrony: yes # clickhouse: yes # cockroachdb: yes @@ -39,7 +42,6 @@ modules: # dovecot: yes # elasticsearch: yes # envoy: yes -# example: no # exim: yes # fail2ban: yes # filecheck: yes @@ -62,6 +64,7 @@ modules: # logind: yes # logstash: yes # lvm: yes +# maxscale: yes # megacli: yes # memcached: yes # mongodb: yes @@ -69,11 +72,13 @@ modules: # mysql: yes # nginx: yes # nginxplus: yes +# nginxunit: yes # nginxvts: yes # nsd: yes # ntpd: yes -# nvme: yes # nvidia_smi: no +# nvme: yes +# openldap: yes # openvpn: no # openvpn_status_log: yes # ping: yes @@ -95,12 +100,14 @@ modules: # rethinkdb: yes # riakkv: yes # rspamd: yes +# samba: yes # scaleio: yes # sensors: yes # snmp: yes # squid: yes # squidlog: yes # smartctl: yes +# spigotmc: yes # storcli: yes # supervisord: yes # systemdunits: yes @@ -108,12 +115,15 @@ modules: # tomcat: yes # tor: yes # traefik: yes +# typesense: yes # upsd: yes # unbound: yes # uwsgi: yes +# varnish: yes # vernemq: yes # vcsa: yes # vsphere: yes +# w1sensor: yes # web_log: yes # wireguard: yes # whoisquery: yes diff --git a/src/go/plugin/go.d/config/go.d/apcupsd.conf b/src/go/plugin/go.d/config/go.d/apcupsd.conf new file mode 100644 index 000000000..2b40366f4 --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/apcupsd.conf @@ -0,0 +1,6 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/apcupsd#readme + +#jobs: +# - name: local +# address: 127.0.0.1:3551 diff --git a/src/go/plugin/go.d/config/go.d/boinc.conf b/src/go/plugin/go.d/config/go.d/boinc.conf new file mode 100644 index 000000000..be46d6a15 --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/boinc.conf @@ -0,0 +1,6 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/boinc#readme + +#jobs: +# - name: local +# address: 127.0.0.1:31416 diff --git a/src/go/plugin/go.d/config/go.d/ceph.conf b/src/go/plugin/go.d/config/go.d/ceph.conf new file mode 100644 index 000000000..34cc3b08a --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/ceph.conf @@ -0,0 +1,6 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ceph#readme + +#jobs: +# - name: local +# url: https://127.0.0.1:8443 diff --git a/src/go/plugin/go.d/config/go.d/example.conf b/src/go/plugin/go.d/config/go.d/example.conf deleted file mode 100644 index f92669a68..000000000 --- a/src/go/plugin/go.d/config/go.d/example.conf +++ /dev/null @@ -1,5 +0,0 @@ -## All available configuration options, their descriptions and default values: -## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/example#readme - -jobs: - - name: example diff --git a/src/go/plugin/go.d/config/go.d/maxscale.conf b/src/go/plugin/go.d/config/go.d/maxscale.conf new file mode 100644 index 000000000..97e7e17bd --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/maxscale.conf @@ -0,0 +1,6 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/maxscale#readme + +#jobs: +# - name: local +# url: http://127.0.0.1:8989 diff --git a/src/go/plugin/go.d/config/go.d/nginxunit.conf b/src/go/plugin/go.d/config/go.d/nginxunit.conf new file mode 100644 index 000000000..04241a7fb --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/nginxunit.conf @@ -0,0 +1,6 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nginxunit#readme + +#jobs: +# - name: local +# url: http://127.0.0.1:8000 diff --git a/src/go/plugin/go.d/config/go.d/nvidia_smi.conf b/src/go/plugin/go.d/config/go.d/nvidia_smi.conf index 4c1e01a40..39a701436 100644 --- a/src/go/plugin/go.d/config/go.d/nvidia_smi.conf +++ b/src/go/plugin/go.d/config/go.d/nvidia_smi.conf @@ -3,4 +3,3 @@ jobs: - name: nvidia_smi - use_csv_format: no diff --git a/src/go/plugin/go.d/config/go.d/openldap.conf b/src/go/plugin/go.d/config/go.d/openldap.conf new file mode 100644 index 000000000..6d1005943 --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/openldap.conf @@ -0,0 +1,8 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/openldap#readme + +#jobs: +# - name: local +# url: ldap://127.0.0.1:389 +# username: cn=user,dc=example,dc=com +# password: password diff --git a/src/go/plugin/go.d/config/go.d/oracledb.conf b/src/go/plugin/go.d/config/go.d/oracledb.conf new file mode 100644 index 000000000..945a77670 --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/oracledb.conf @@ -0,0 +1,9 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/oracledb#readme + +#jobs: +# - name: local +# dsn: oracle://username:password@127.0.0.1:1521/XE +# +# - name: local +# dsn: 'oracle://username:password@127.0.0.1:1521/XE?ssl=true&ssl verify=false' # TLS connection diff --git a/src/go/plugin/go.d/config/go.d/samba.conf b/src/go/plugin/go.d/config/go.d/samba.conf new file mode 100644 index 000000000..c8a9236a2 --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/samba.conf @@ -0,0 +1,5 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/samba#readme + +jobs: + - name: samba diff --git a/src/go/plugin/go.d/config/go.d/sd/docker.conf b/src/go/plugin/go.d/config/go.d/sd/docker.conf index c93fbef87..e8bac9b1e 100644 --- a/src/go/plugin/go.d/config/go.d/sd/docker.conf +++ b/src/go/plugin/go.d/config/go.d/sd/docker.conf @@ -28,6 +28,8 @@ classify: expr: '{{ match "sp" .Image "httpd httpd:* */apache */apache:* */apache2 */apache2:*" }}' - tags: "beanstalk" expr: '{{ match "sp" .Image "*/beanstalkd */beanstalkd:*" }}' + - tags: "boinc" + expr: '{{ match "sp" .Image "boinc/client boinc/client:* */boinc */boinc:*" }}' - tags: "cockroachdb" expr: '{{ match "sp" .Image "cockroachdb/cockroach cockroachdb/cockroach:*" }}' - tags: "consul" @@ -48,6 +50,8 @@ classify: expr: '{{ and (eq .PrivatePort "5001") (match "sp" .Image "ipfs/kubo ipfs/kubo:*") }}' - tags: "lighttpd" expr: '{{ match "sp" .Image "*/lighttpd */lighttpd:*" }}' + - tags: "maxscale" + expr: '{{ or (eq .PrivatePort "8989") (match "sp" .Image "mariadb/maxscale mariadb/maxscale:*") }}' - tags: "memcached" expr: '{{ or (eq .PrivatePort "11211") (match "sp" .Image "memcached memcached:* */memcached */memcached:*") }}' - tags: "mongodb" @@ -56,6 +60,10 @@ classify: expr: '{{ or (eq .PrivatePort "3306") (match "sp" .Image "mysql mysql:* */mysql */mysql:* mariadb mariadb:* */mariadb */mariadb:* percona percona:* */percona-mysql */percona-mysql:*") }}' - tags: "nginx" expr: '{{ match "sp" .Image "nginx nginx:*" }}' + - tags: "nginxunit" + expr: '{{ match "sp" .Image "nginx/unit nginx/unit:*" }}' + - tags: "oracledb" + expr: '{{ and (eq .PrivatePort "1521" "2484") (match "sp" .Image "oracle/database oracle/database:*") }}' - tags: "pgbouncer" expr: '{{ or (eq .PrivatePort "6432") (match "sp" .Image "*/pgbouncer */pgbouncer:*") }}' - tags: "pika" @@ -80,6 +88,10 @@ classify: expr: '{{ and (eq .PrivatePort "9051") (match "sp" .Image "*/tor */tor:*") }}' - tags: "tomcat" expr: '{{ match "sp" .Image "tomcat tomcat:* */tomcat */tomcat:*" }}' + - tags: "typesense" + expr: '{{ match "sp" .Image "typesense/typesense typesense/typesense:*" }}' + - tags: "varnish" + expr: '{{ match "sp" .Image "varnish varnish:*" }}' - tags: "vernemq" expr: '{{ match "sp" .Image "*/vernemq */vernemq:*" }}' - tags: "zookeeper" @@ -98,6 +110,11 @@ compose: module: beanstalk name: docker_{{.Name}} address: {{.Address}} + - selector: "boinc" + template: | + module: boinc + name: docker_{{.Name}} + address: {{.Address}} - selector: "cockroachdb" template: | module: cockroachdb @@ -160,6 +177,11 @@ compose: module: lighttpd name: docker_{{.Name}} url: http://{{.Address}}/server-status?auto + - selector: "maxscale" + template: | + module: maxscale + name: docker_{{.Name}} + url: http://{{.Address}} - selector: "memcached" template: | module: memcached @@ -189,6 +211,20 @@ compose: - module: nginx name: docker_{{.Name}} url: http://{{.Address}}/status + - selector: "nginxunit" + template: | + - module: nginxunit + name: docker_{{.Name}} + url: http://{{.Address}} + - selector: "oracledb" + template: | + module: oracledb + name: docker_{{.Name}} + {{ if eq .PrivatePort "1521" -}} + dsn: 'oracle://username:password@{{.Address}}/XE' + {{ else -}} + dsn: 'oracle://username:password@{{.Address}}/XE?ssl=true&ssl verify=false' + {{ end -}} - selector: "pgbouncer" template: | module: pgbouncer @@ -245,11 +281,22 @@ compose: module: tomcat name: docker_{{.Name}} url: http://{{.Address}} + - selector: "typesense" + template: | + module: typesense + name: docker_{{.Name}} + url: http://{{.Address}} + api_key: {{ trimPrefix "--api-key=" (regexFind "--api-key=[^ ]+" .Command) -}} - selector: "tor" template: | module: tor name: docker_{{.Name}} address: {{.Address}} + - selector: "varnish" + template: | + module: varnish + name: docker_{{.Name}} + docker_container: {{.Name}} - selector: "vernemq" template: | module: vernemq diff --git a/src/go/plugin/go.d/config/go.d/sd/net_listeners.conf b/src/go/plugin/go.d/config/go.d/sd/net_listeners.conf index 4462fc112..6a7f7d4a7 100644 --- a/src/go/plugin/go.d/config/go.d/sd/net_listeners.conf +++ b/src/go/plugin/go.d/config/go.d/sd/net_listeners.conf @@ -16,12 +16,18 @@ classify: expr: '{{ and (eq .Port "8161") (eq .Comm "activemq") }}' - tags: "apache" expr: '{{ and (eq .Port "80" "8080") (eq .Comm "apache" "apache2" "httpd") }}' + - tags: "apcupsd" + expr: '{{ or (eq .Port "3551") (eq .Comm "apcupsd") }}' - tags: "beanstalk" expr: '{{ or (eq .Port "11300") (eq .Comm "beanstalkd") }}' + - tags: "boinc" + expr: '{{ and (eq .Port "31416") (eq .Comm "boinc") }}' - tags: "bind" expr: '{{ and (eq .Port "8653") (eq .Comm "bind" "named") }}' - tags: "cassandra" expr: '{{ and (eq .Port "7072") (glob .Cmdline "*cassandra*") }}' + - tags: "ceph" + expr: '{{ and (eq .Port "8443") (eq .Comm "ceph-mgr") }}' - tags: "chrony" expr: '{{ and (eq .Port "323") (eq .Comm "chronyd") }}' - tags: "clickhouse" @@ -76,6 +82,8 @@ classify: expr: '{{ and (eq .Port "80" "8080") (eq .Comm "lighttpd") }}' - tags: "logstash" expr: '{{ and (eq .Port "9600") (glob .Cmdline "*logstash*") }}' + - tags: "maxscale" + expr: '{{ or (eq .Port "8989") (eq .Comm "maxscale") }}' - tags: "memcached" expr: '{{ or (eq .Port "11211") (eq .Comm "memcached") }}' - tags: "mongodb" @@ -86,10 +94,16 @@ classify: expr: '{{ or (eq .Port "3306") (eq .Comm "mysqld" "mariadbd") }}' - tags: "nginx" expr: '{{ and (eq .Port "80" "8080") (eq .Comm "nginx") }}' + - tags: "nginxunit" + expr: '{{ and (eq .Port "8000") (eq .Comm "unit") }}' - tags: "ntpd" expr: '{{ or (eq .Port "123") (eq .Comm "ntpd") }}' + - tags: "openldap" + expr: '{{ eq .Comm "slapd" }}' - tags: "openvpn" expr: '{{ and (eq .Port "7505") (eq .Comm "openvpn") }}' + - tags: "oracledb" + expr: '{{ and (eq .Port "1521" "2484") (eq .Comm "tnslsnr") }}' - tags: "pgbouncer" expr: '{{ or (eq .Port "6432") (eq .Comm "pgbouncer") }}' - tags: "pihole" @@ -118,6 +132,8 @@ classify: expr: '{{ and (eq .Port "11334") (eq .Comm "rspamd") }}' - tags: "squid" expr: '{{ and (eq .Port "3128") (eq .Comm "squid") }}' + - tags: "spigotmc" + expr: '{{ and (eq .Port "25575") (glob .Cmdline "*spigot*") }}' - tags: "supervisord" expr: '{{ and (eq .Port "9001") (eq .Comm "supervisord") }}' - tags: "tomcat" @@ -126,6 +142,8 @@ classify: expr: '{{ and (eq .Port "9051") (eq .Comm "tor") }}' - tags: "traefik" expr: '{{ and (eq .Port "80" "8080") (eq .Comm "traefik") }}' + - tags: "typesense" + expr: '{{ and (eq .Port "8108") (eq .Comm "typesense-server") }}' - tags: "unbound" expr: '{{ and (eq .Port "8953") (eq .Comm "unbound") }}' - tags: "upsd" @@ -157,6 +175,11 @@ compose: module: apache name: local url: http://{{.Address}}/server-status?auto + - selector: "apcupsd" + template: | + module: apcupsd + name: local_{{.Port}} + address: {{.Address}} - selector: "beanstalk" template: | module: beanstalk @@ -167,11 +190,21 @@ compose: module: bind name: local url: http://{{.Address}}/json/v1 + - selector: "boinc" + template: | + module: boinc + name: local + address: {{.Address}} - selector: "cassandra" template: | module: cassandra name: local url: http://{{.Address}}/metrics + - selector: "ceph" + template: | + module: ceph + name: local + url: https://{{.Address}} - selector: "chrony" template: | module: chrony @@ -330,6 +363,11 @@ compose: module: logstash name: local url: http://{{.Address}} + - selector: "maxscale" + template: | + module: maxscale + name: local + url: http://{{.Address}} - selector: "memcached" template: | module: memcached @@ -369,17 +407,36 @@ compose: - module: nginx name: local url: http://{{.Address}}/status + - selector: "nginxunit" + template: | + - module: nginxunit + name: local + url: http://{{.Address}} - selector: "ntpd" template: | module: ntpd name: local address: {{.Address}} collect_peers: no + - selector: "openldap" + template: | + module: openldap + name: local + url: ldap://{{.Address}} - selector: "openvpn" template: | module: openvpn name: local address: {{.Address}} + - selector: "oracledb" + template: | + module: oracledb + name: local + {{ if eq .Port "1521" -}} + dsn: 'oracle://username:password@{{.Address}}/XE' + {{ else -}} + dsn: 'oracle://username:password@{{.Address}}/XE?ssl=true&ssl verify=false' + {{ end -}} - selector: "pgbouncer" template: | module: pgbouncer @@ -464,6 +521,11 @@ compose: module: squid name: local url: http://{{.Address}} + - selector: "spigotmc" + template: | + module: spigotmc + name: local + address: {{.Address}} - selector: "supervisord" template: | module: supervisord @@ -474,6 +536,12 @@ compose: module: traefik name: local url: http://{{.Address}}/metrics + - selector: "typesense" + template: | + module: typesense + name: local + url: http://{{.Address}} + api_key: {{ trimPrefix "--api-key=" (regexFind "--api-key=[^ ]+" .Cmdline) -}} - selector: "tomcat" template: | module: tomcat diff --git a/src/go/plugin/go.d/config/go.d/spigotmc.conf b/src/go/plugin/go.d/config/go.d/spigotmc.conf new file mode 100644 index 000000000..1ee243068 --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/spigotmc.conf @@ -0,0 +1,6 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/spigotmc#readme + +#jobs: +# - name: local +# address: 127.0.0.1:25575 diff --git a/src/go/plugin/go.d/config/go.d/typesense.conf b/src/go/plugin/go.d/config/go.d/typesense.conf new file mode 100644 index 000000000..1ca549803 --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/typesense.conf @@ -0,0 +1,6 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/typesense#readme + +#jobs: +# - name: local +# url: http://127.0.0.1:8108 diff --git a/src/go/plugin/go.d/config/go.d/varnish.conf b/src/go/plugin/go.d/config/go.d/varnish.conf new file mode 100644 index 000000000..64950d6cd --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/varnish.conf @@ -0,0 +1,5 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/varnish#readme + +jobs: + - name: local diff --git a/src/go/plugin/go.d/config/go.d/w1sensor.conf b/src/go/plugin/go.d/config/go.d/w1sensor.conf new file mode 100644 index 000000000..005f58058 --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/w1sensor.conf @@ -0,0 +1,6 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/w1sensor#readme + +jobs: + - name: w1sensor + sensors_path: /sys/bus/w1/devices diff --git a/src/go/plugin/go.d/docs/how-to-write-a-module.md b/src/go/plugin/go.d/docs/how-to-write-a-module.md index bf7d3bc6d..a861cf8c9 100644 --- a/src/go/plugin/go.d/docs/how-to-write-a-module.md +++ b/src/go/plugin/go.d/docs/how-to-write-a-module.md @@ -1,14 +1,3 @@ - - # How to write a Netdata collector in Go ## Prerequisites @@ -22,7 +11,7 @@ sidebar_position: 20 ## Write and test a simple collector -> :exclamation: You can skip most of these steps if you first experiment directy with the existing +> :exclamation: You can skip most of these steps if you first experiment directly with the existing > [example module](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/example), which > will > give you an idea of how things work. @@ -33,9 +22,9 @@ The steps are: - Add the source code to [`modules/example2/`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules). - - [module interface](#module-interface). - - [suggested module layout](#module-layout). - - [helper packages](#helper-packages). + - [module interface](#module-interface). + - [suggested module layout](#module-layout). + - [helper packages](#helper-packages). - Add the configuration to [`config/go.d/example2.conf`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/config/go.d). - Add the module @@ -48,7 +37,7 @@ The steps are: developed collector. It will be placed into the `bin` directory (e.g `go.d.plugin/bin`) - Run it in the debug mode `bin/godplugin -d -m `. This will output the `STDOUT` of the collector, the same output that is sent to the Netdata Agent and is transformed into charts. You can read more about this collector API in - our [documentation](/src/collectors/plugins.d/README.md#external-plugins-api). + our [documentation](/src/plugins.d/README.md#external-plugins-api). - If you want to test the collector with the actual Netdata Agent, you need to replace the `go.d.plugin` binary that exists in the Netdata Agent installation directory with the one you just compiled. Once you restart the Netdata Agent, it will detect and run it, creating all the charts. It is advised not to remove the default `go.d.plugin` binary, but simply rename it to `go.d.plugin.old` so that the Agent doesn't run it, but you can easily rename it back once you are done. @@ -58,7 +47,7 @@ The steps are: Every module should implement the following interface: -``` +```go type Module interface { Init() bool Check() bool @@ -75,7 +64,7 @@ type Module interface { We propose to use the following template: -``` +```go // example.go func (e *Example) Init() bool { @@ -97,7 +86,7 @@ func (e *Example) Init() bool { } ``` -Move specific initialization methods into the `init.go` file. See [suggested module layout](#module-Layout). +Move specific initialization methods into the `init.go` file. See [suggested module layout](#module-layout). ### Check method @@ -108,7 +97,7 @@ Move specific initialization methods into the `init.go` file. See [suggested mod The simplest way to implement `Check` is to see if we are getting any metrics from `Collect`. A lot of modules use such approach. -``` +```go // example.go func (e *Example) Check() bool { @@ -119,7 +108,7 @@ func (e *Example) Check() bool { ### Charts method :exclamation: Netdata module -produces [`charts`](/src/collectors/plugins.d/README.md#chart), not +produces [`charts`](/src/plugins.d/README.md#chart), not raw metrics. Use [`agent/module`](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/agent/module/charts.go) @@ -127,14 +116,14 @@ package to create them, it contains charts and dimensions structs. - `Charts` returns - the [charts](/src/collectors/plugins.d/README.md#chart) (`*module.Charts`). + the [charts](/src/plugins.d/README.md#chart) (`*module.Charts`). - Called after `Check` and only if `Check` returned `true`. - If it returns `nil`, the job will be disabled - :warning: Make sure not to share returned value between module instances (jobs). Usually charts initialized in `Init` and `Chart` method just returns the charts instance: -``` +```go // example.go func (e *Example) Charts() *Charts { @@ -151,7 +140,7 @@ func (e *Example) Charts() *Charts { We propose to use the following template: -``` +```go // example.go func (e *Example) Collect() map[string]int64 { @@ -167,7 +156,7 @@ func (e *Example) Collect() map[string]int64 { } ``` -Move metrics collection logic into the `collect.go` file. See [suggested module layout](#module-Layout). +Move metrics collection logic into the `collect.go` file. See [suggested module layout](#module-layout). ### Cleanup method @@ -176,7 +165,7 @@ Move metrics collection logic into the `collect.go` file. See [suggested module If you have nothing to clean up: -``` +```go // example.go func (Example) Cleanup() {} @@ -229,7 +218,7 @@ All the module initialization details should go in this file. - make a function for each value that needs to be initialized. - a function should return a value(s), not implicitly set/change any values in the main struct. -``` +```go // init.go // Prefer this approach. @@ -244,7 +233,7 @@ func (e *Example) initSomeValue() error { m.someValue = someValue return nil } -``` +``` ### File `collect.go` @@ -257,7 +246,7 @@ Feel free to split it into several files if you think it makes the code more rea Use `collect_` prefix for the filenames: `collect_this.go`, `collect_that.go`, etc. -``` +```go // collect.go func (e *Example) collect() (map[string]int64, error) { @@ -273,10 +262,10 @@ func (e *Example) collect() (map[string]int64, error) { > :exclamation: See the > example: [`example_test.go`](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/example/example_test.go). - +> > if you have no experience in testing we recommend starting > with [testing package documentation](https://golang.org/pkg/testing/). - +> > we use `assert` and `require` packages from [github.com/stretchr/testify](https://github.com/stretchr/testify) > library, > check [their documentation](https://pkg.go.dev/github.com/stretchr/testify). @@ -299,4 +288,3 @@ be [`testdata`](https://golang.org/cmd/go/#hdr-Package_lists_and_patterns). There are [some helper packages](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg) for writing a module. - diff --git a/src/go/plugin/go.d/examples/simple/main.go b/src/go/plugin/go.d/examples/simple/main.go index 215e91f14..ce1d7ec80 100644 --- a/src/go/plugin/go.d/examples/simple/main.go +++ b/src/go/plugin/go.d/examples/simple/main.go @@ -10,13 +10,13 @@ import ( "os" "path" + "github.com/jessevdk/go-flags" + "github.com/netdata/netdata/go/plugins/logger" + "github.com/netdata/netdata/go/plugins/pkg/multipath" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/cli" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/multipath" - - "github.com/jessevdk/go-flags" ) var version = "v0.0.1-example" diff --git a/src/go/plugin/go.d/modules/activemq/activemq.go b/src/go/plugin/go.d/modules/activemq/activemq.go index bf47be72a..bd04dd7d3 100644 --- a/src/go/plugin/go.d/modules/activemq/activemq.go +++ b/src/go/plugin/go.d/modules/activemq/activemq.go @@ -5,10 +5,12 @@ package activemq import ( _ "embed" "errors" + "fmt" "time" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *ActiveMQ { return &ActiveMQ{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8161", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, Webadmin: "admin", @@ -45,13 +47,13 @@ func New() *ActiveMQ { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - Webadmin string `yaml:"webadmin,omitempty" json:"webadmin"` - MaxQueues int `yaml:"max_queues" json:"max_queues"` - MaxTopics int `yaml:"max_topics" json:"max_topics"` - QueuesFilter string `yaml:"queues_filter,omitempty" json:"queues_filter"` - TopicsFilter string `yaml:"topics_filter,omitempty" json:"topics_filter"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + Webadmin string `yaml:"webadmin,omitempty" json:"webadmin"` + MaxQueues int `yaml:"max_queues" json:"max_queues"` + MaxTopics int `yaml:"max_topics" json:"max_topics"` + QueuesFilter string `yaml:"queues_filter,omitempty" json:"queues_filter"` + TopicsFilter string `yaml:"topics_filter,omitempty" json:"topics_filter"` } type ActiveMQ struct { @@ -74,31 +76,27 @@ func (a *ActiveMQ) Configuration() any { func (a *ActiveMQ) Init() error { if err := a.validateConfig(); err != nil { - a.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } qf, err := a.initQueuesFiler() if err != nil { - a.Error(err) - return err + return fmt.Errorf("init queues filer: %v", err) } a.queuesFilter = qf tf, err := a.initTopicsFilter() if err != nil { - a.Error(err) - return err + return fmt.Errorf("init topics filter: %v", err) } a.topicsFilter = tf - client, err := web.NewHTTPClient(a.Client) + client, err := web.NewHTTPClient(a.ClientConfig) if err != nil { - a.Error(err) - return err + return fmt.Errorf("create http client: %v", err) } - a.apiClient = newAPIClient(client, a.Request, a.Webadmin) + a.apiClient = newAPIClient(client, a.RequestConfig, a.Webadmin) return nil } @@ -106,7 +104,6 @@ func (a *ActiveMQ) Init() error { func (a *ActiveMQ) Check() error { mx, err := a.collect() if err != nil { - a.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/activemq/activemq_test.go b/src/go/plugin/go.d/modules/activemq/activemq_test.go index e2640f440..1b0ef480a 100644 --- a/src/go/plugin/go.d/modules/activemq/activemq_test.go +++ b/src/go/plugin/go.d/modules/activemq/activemq_test.go @@ -178,7 +178,7 @@ func TestActiveMQ_Check(t *testing.T) { defer ts.Close() job := New() - job.HTTP.Request = web.Request{URL: ts.URL} + job.HTTPConfig.RequestConfig = web.RequestConfig{URL: ts.URL} job.Webadmin = "webadmin" require.NoError(t, job.Init()) @@ -211,7 +211,7 @@ func TestActiveMQ_Collect(t *testing.T) { defer ts.Close() job := New() - job.HTTP.Request = web.Request{URL: ts.URL} + job.HTTPConfig.RequestConfig = web.RequestConfig{URL: ts.URL} job.Webadmin = "webadmin" require.NoError(t, job.Init()) @@ -319,7 +319,7 @@ func TestActiveMQ_404(t *testing.T) { job := New() job.Webadmin = "webadmin" - job.HTTP.Request = web.Request{URL: ts.URL} + job.HTTPConfig.RequestConfig = web.RequestConfig{URL: ts.URL} require.NoError(t, job.Init()) assert.Error(t, job.Check()) @@ -333,7 +333,7 @@ func TestActiveMQ_InvalidData(t *testing.T) { mod := New() mod.Webadmin = "webadmin" - mod.HTTP.Request = web.Request{URL: ts.URL} + mod.HTTPConfig.RequestConfig = web.RequestConfig{URL: ts.URL} require.NoError(t, mod.Init()) assert.Error(t, mod.Check()) diff --git a/src/go/plugin/go.d/modules/activemq/apiclient.go b/src/go/plugin/go.d/modules/activemq/apiclient.go index 7f99c9bad..fd790b885 100644 --- a/src/go/plugin/go.d/modules/activemq/apiclient.go +++ b/src/go/plugin/go.d/modules/activemq/apiclient.go @@ -5,10 +5,7 @@ package activemq import ( "encoding/xml" "fmt" - "io" "net/http" - "net/url" - "path" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -45,7 +42,7 @@ type stats struct { const pathStats = "/%s/xml/%s.jsp" -func newAPIClient(client *http.Client, request web.Request, webadmin string) *apiClient { +func newAPIClient(client *http.Client, request web.RequestConfig, webadmin string) *apiClient { return &apiClient{ httpClient: client, request: request, @@ -55,83 +52,36 @@ func newAPIClient(client *http.Client, request web.Request, webadmin string) *ap type apiClient struct { httpClient *http.Client - request web.Request + request web.RequestConfig webadmin string } func (a *apiClient) getQueues() (*queues, error) { - req, err := a.createRequest(fmt.Sprintf(pathStats, a.webadmin, keyQueues)) + req, err := web.NewHTTPRequestWithPath(a.request, fmt.Sprintf(pathStats, a.webadmin, keyQueues)) if err != nil { - return nil, fmt.Errorf("error on creating request '%s' : %v", a.request.URL, err) - } - - resp, err := a.doRequestOK(req) - - defer closeBody(resp) - - if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create HTTP request '%s': %v", a.request.URL, err) } var queues queues - if err := xml.NewDecoder(resp.Body).Decode(&queues); err != nil { - return nil, fmt.Errorf("error on decoding resp from %s : %s", req.URL, err) + if err := web.DoHTTP(a.httpClient).RequestXML(req, &queues); err != nil { + return nil, err } return &queues, nil } func (a *apiClient) getTopics() (*topics, error) { - req, err := a.createRequest(fmt.Sprintf(pathStats, a.webadmin, keyTopics)) + req, err := web.NewHTTPRequestWithPath(a.request, fmt.Sprintf(pathStats, a.webadmin, keyTopics)) if err != nil { - return nil, fmt.Errorf("error on creating request '%s' : %v", a.request.URL, err) - } - - resp, err := a.doRequestOK(req) - - defer closeBody(resp) - - if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create HTTP request '%s': %v", a.request.URL, err) } var topics topics - if err := xml.NewDecoder(resp.Body).Decode(&topics); err != nil { - return nil, fmt.Errorf("error on decoding resp from %s : %s", req.URL, err) - } - - return &topics, nil -} - -func (a *apiClient) doRequestOK(req *http.Request) (*http.Response, error) { - resp, err := a.httpClient.Do(req) - if err != nil { - return resp, fmt.Errorf("error on request to %s : %v", req.URL, err) - } - - if resp.StatusCode != http.StatusOK { - return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) - } - - return resp, err -} - -func (a *apiClient) createRequest(urlPath string) (*http.Request, error) { - req := a.request.Copy() - u, err := url.Parse(req.URL) - if err != nil { + if err := web.DoHTTP(a.httpClient).RequestXML(req, &topics); err != nil { return nil, err } - u.Path = path.Join(u.Path, urlPath) - req.URL = u.String() - return web.NewHTTPRequest(req) -} -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } + return &topics, nil } diff --git a/src/go/plugin/go.d/modules/activemq/config_schema.json b/src/go/plugin/go.d/modules/activemq/config_schema.json index df71bcadf..894956118 100644 --- a/src/go/plugin/go.d/modules/activemq/config_schema.json +++ b/src/go/plugin/go.d/modules/activemq/config_schema.json @@ -140,7 +140,6 @@ "url", "webadmin" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/activemq/init.go b/src/go/plugin/go.d/modules/activemq/init.go index e48dacad5..2797af7cb 100644 --- a/src/go/plugin/go.d/modules/activemq/init.go +++ b/src/go/plugin/go.d/modules/activemq/init.go @@ -4,7 +4,8 @@ package activemq import ( "errors" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + + "github.com/netdata/netdata/go/plugins/pkg/matcher" ) func (a *ActiveMQ) validateConfig() error { diff --git a/src/go/plugin/go.d/modules/activemq/integrations/activemq.md b/src/go/plugin/go.d/modules/activemq/integrations/activemq.md index fc215bfb9..f882df0d5 100644 --- a/src/go/plugin/go.d/modules/activemq/integrations/activemq.md +++ b/src/go/plugin/go.d/modules/activemq/integrations/activemq.md @@ -91,8 +91,8 @@ No action required. The configuration file name for this integration is `go.d/activemq.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -113,8 +113,8 @@ The following options can be defined globally: update_every, autodetection_retry | webadmin | Webadmin root path. | admin | yes | | max_queues | Maximum number of concurrently collected queues. | 50 | no | | max_topics | Maximum number of concurrently collected topics. | 50 | no | -| queues_filter | Queues filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no | -| topics_filter | Topics filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no | +| queues_filter | Queues filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no | +| topics_filter | Topics filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | | proxy_username | Username for proxy basic HTTP authentication. | | no | diff --git a/src/go/plugin/go.d/modules/adaptecraid/adaptec.go b/src/go/plugin/go.d/modules/adaptecraid/adaptec.go index 264390e10..03302f811 100644 --- a/src/go/plugin/go.d/modules/adaptecraid/adaptec.go +++ b/src/go/plugin/go.d/modules/adaptecraid/adaptec.go @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package adaptecraid import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,7 +31,7 @@ func init() { func New() *AdaptecRaid { return &AdaptecRaid{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: &module.Charts{}, lds: make(map[string]bool), @@ -37,8 +40,8 @@ func New() *AdaptecRaid { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type ( @@ -66,8 +69,7 @@ func (a *AdaptecRaid) Configuration() any { func (a *AdaptecRaid) Init() error { arcconfExec, err := a.initArcconfCliExec() if err != nil { - a.Errorf("arcconf exec initialization: %v", err) - return err + return fmt.Errorf("arcconf exec initialization: %v", err) } a.exec = arcconfExec @@ -77,7 +79,6 @@ func (a *AdaptecRaid) Init() error { func (a *AdaptecRaid) Check() error { mx, err := a.collect() if err != nil { - a.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/adaptecraid/adaptec_test.go b/src/go/plugin/go.d/modules/adaptecraid/adaptec_test.go index 9abe5c984..5f2b3f0e7 100644 --- a/src/go/plugin/go.d/modules/adaptecraid/adaptec_test.go +++ b/src/go/plugin/go.d/modules/adaptecraid/adaptec_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package adaptecraid import ( diff --git a/src/go/plugin/go.d/modules/adaptecraid/charts.go b/src/go/plugin/go.d/modules/adaptecraid/charts.go index 65be20199..291bb035a 100644 --- a/src/go/plugin/go.d/modules/adaptecraid/charts.go +++ b/src/go/plugin/go.d/modules/adaptecraid/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package adaptecraid import ( diff --git a/src/go/plugin/go.d/modules/adaptecraid/collect.go b/src/go/plugin/go.d/modules/adaptecraid/collect.go index b4439ba8e..718d5ee76 100644 --- a/src/go/plugin/go.d/modules/adaptecraid/collect.go +++ b/src/go/plugin/go.d/modules/adaptecraid/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package adaptecraid import ( diff --git a/src/go/plugin/go.d/modules/adaptecraid/collect_ld.go b/src/go/plugin/go.d/modules/adaptecraid/collect_ld.go index 180f97490..9204f330c 100644 --- a/src/go/plugin/go.d/modules/adaptecraid/collect_ld.go +++ b/src/go/plugin/go.d/modules/adaptecraid/collect_ld.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package adaptecraid import ( diff --git a/src/go/plugin/go.d/modules/adaptecraid/collect_pd.go b/src/go/plugin/go.d/modules/adaptecraid/collect_pd.go index 272266b47..026e3d4f3 100644 --- a/src/go/plugin/go.d/modules/adaptecraid/collect_pd.go +++ b/src/go/plugin/go.d/modules/adaptecraid/collect_pd.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package adaptecraid import ( diff --git a/src/go/plugin/go.d/modules/adaptecraid/config_schema.json b/src/go/plugin/go.d/modules/adaptecraid/config_schema.json index ad54f1585..5b1962813 100644 --- a/src/go/plugin/go.d/modules/adaptecraid/config_schema.json +++ b/src/go/plugin/go.d/modules/adaptecraid/config_schema.json @@ -19,7 +19,6 @@ "default": 2 } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/adaptecraid/doc.go b/src/go/plugin/go.d/modules/adaptecraid/doc.go new file mode 100644 index 000000000..cd96e5ffa --- /dev/null +++ b/src/go/plugin/go.d/modules/adaptecraid/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package adaptecraid diff --git a/src/go/plugin/go.d/modules/adaptecraid/exec.go b/src/go/plugin/go.d/modules/adaptecraid/exec.go index 0577e6234..4bc506ce7 100644 --- a/src/go/plugin/go.d/modules/adaptecraid/exec.go +++ b/src/go/plugin/go.d/modules/adaptecraid/exec.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package adaptecraid import ( diff --git a/src/go/plugin/go.d/modules/adaptecraid/init.go b/src/go/plugin/go.d/modules/adaptecraid/init.go index de8acc273..67e0934f9 100644 --- a/src/go/plugin/go.d/modules/adaptecraid/init.go +++ b/src/go/plugin/go.d/modules/adaptecraid/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package adaptecraid import ( diff --git a/src/go/plugin/go.d/modules/adaptecraid/integrations/adaptec_raid.md b/src/go/plugin/go.d/modules/adaptecraid/integrations/adaptec_raid.md index a38207ffb..b2fefa1b3 100644 --- a/src/go/plugin/go.d/modules/adaptecraid/integrations/adaptec_raid.md +++ b/src/go/plugin/go.d/modules/adaptecraid/integrations/adaptec_raid.md @@ -33,7 +33,10 @@ Executed commands: -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux +- BSD This collector only supports collecting metrics from a single instance of this integration. @@ -126,8 +129,8 @@ No action required. The configuration file name for this integration is `go.d/adaptec_raid.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/adaptecraid/metadata.yaml b/src/go/plugin/go.d/modules/adaptecraid/metadata.yaml index e573994f5..5ab748e75 100644 --- a/src/go/plugin/go.d/modules/adaptecraid/metadata.yaml +++ b/src/go/plugin/go.d/modules/adaptecraid/metadata.yaml @@ -33,7 +33,7 @@ modules: - `arcconf GETCONFIG 1 PD` method_description: "" supported_platforms: - include: [] + include: [Linux, BSD] exclude: [] multi_instance: false additional_permissions: diff --git a/src/go/plugin/go.d/modules/ap/ap.go b/src/go/plugin/go.d/modules/ap/ap.go index 93dd06d08..c79bbaee0 100644 --- a/src/go/plugin/go.d/modules/ap/ap.go +++ b/src/go/plugin/go.d/modules/ap/ap.go @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package ap import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -29,7 +32,7 @@ func New() *AP { return &AP{ Config: Config{ BinaryPath: "/usr/sbin/iw", - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: &module.Charts{}, seenIfaces: make(map[string]*iwInterface), @@ -37,9 +40,9 @@ func New() *AP { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - BinaryPath string `yaml:"binary_path,omitempty" json:"binary_path"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + BinaryPath string `yaml:"binary_path,omitempty" json:"binary_path"` } type ( @@ -65,14 +68,12 @@ func (a *AP) Configuration() any { func (a *AP) Init() error { if err := a.validateConfig(); err != nil { - a.Errorf("config validation: %s", err) - return err + return fmt.Errorf("config validation: %s", err) } iw, err := a.initIwExec() if err != nil { - a.Errorf("iw dev exec initialization: %v", err) - return err + return fmt.Errorf("iw exec initialization: %v", err) } a.exec = iw @@ -82,7 +83,6 @@ func (a *AP) Init() error { func (a *AP) Check() error { mx, err := a.collect() if err != nil { - a.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/ap/ap_test.go b/src/go/plugin/go.d/modules/ap/ap_test.go index 237e00e9e..19eab52d2 100644 --- a/src/go/plugin/go.d/modules/ap/ap_test.go +++ b/src/go/plugin/go.d/modules/ap/ap_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package ap import ( @@ -193,7 +195,7 @@ func TestAP_Collect(t *testing.T) { prepareMock: prepareMockErrOnDevices, wantMetrics: nil, }, - "error on statis stats call": { + "error on station stats call": { prepareMock: prepareMockErrOnStationStats, wantMetrics: nil, }, @@ -211,25 +213,11 @@ func TestAP_Collect(t *testing.T) { mx := ap.Collect() assert.Equal(t, test.wantMetrics, mx) - assert.Equal(t, test.wantCharts, len(*ap.Charts()), "Charts") - testMetricsHasAllChartsDims(t, ap, mx) - }) - } -} -func testMetricsHasAllChartsDims(t *testing.T, ap *AP, mx map[string]int64) { - for _, chart := range *ap.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } + assert.Equal(t, test.wantCharts, len(*ap.Charts()), "wantCharts") + + module.TestMetricsHasAllChartsDims(t, ap.Charts(), mx) + }) } } diff --git a/src/go/plugin/go.d/modules/ap/charts.go b/src/go/plugin/go.d/modules/ap/charts.go index b8c51c433..f4aa5d80a 100644 --- a/src/go/plugin/go.d/modules/ap/charts.go +++ b/src/go/plugin/go.d/modules/ap/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package ap import ( diff --git a/src/go/plugin/go.d/modules/ap/collect.go b/src/go/plugin/go.d/modules/ap/collect.go index ba32f3ef7..caf2d9e03 100644 --- a/src/go/plugin/go.d/modules/ap/collect.go +++ b/src/go/plugin/go.d/modules/ap/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package ap import ( diff --git a/src/go/plugin/go.d/modules/ap/config_schema.json b/src/go/plugin/go.d/modules/ap/config_schema.json index 4566247f1..621e1eba9 100644 --- a/src/go/plugin/go.d/modules/ap/config_schema.json +++ b/src/go/plugin/go.d/modules/ap/config_schema.json @@ -28,7 +28,6 @@ "required": [ "binary_path" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/ap/doc.go b/src/go/plugin/go.d/modules/ap/doc.go new file mode 100644 index 000000000..4c0be0914 --- /dev/null +++ b/src/go/plugin/go.d/modules/ap/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ap diff --git a/src/go/plugin/go.d/modules/ap/exec.go b/src/go/plugin/go.d/modules/ap/exec.go index 8c25f6777..1dcaca862 100644 --- a/src/go/plugin/go.d/modules/ap/exec.go +++ b/src/go/plugin/go.d/modules/ap/exec.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package ap import ( diff --git a/src/go/plugin/go.d/modules/ap/init.go b/src/go/plugin/go.d/modules/ap/init.go index 6031f6caa..2efb64cea 100644 --- a/src/go/plugin/go.d/modules/ap/init.go +++ b/src/go/plugin/go.d/modules/ap/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package ap import ( diff --git a/src/go/plugin/go.d/modules/ap/integrations/access_points.md b/src/go/plugin/go.d/modules/ap/integrations/access_points.md index fa2134ed1..d1581d1ea 100644 --- a/src/go/plugin/go.d/modules/ap/integrations/access_points.md +++ b/src/go/plugin/go.d/modules/ap/integrations/access_points.md @@ -102,8 +102,8 @@ Make sure the `iw` utility is installed. The configuration file name for this integration is `go.d/ap.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/apache/apache.go b/src/go/plugin/go.d/modules/apache/apache.go index d0869353d..aa8a62f34 100644 --- a/src/go/plugin/go.d/modules/apache/apache.go +++ b/src/go/plugin/go.d/modules/apache/apache.go @@ -5,11 +5,13 @@ package apache import ( _ "embed" "errors" + "fmt" "net/http" "sync" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -27,12 +29,12 @@ func init() { func New() *Apache { return &Apache{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1/server-status?auto", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -42,8 +44,8 @@ func New() *Apache { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Apache struct { @@ -63,14 +65,12 @@ func (a *Apache) Configuration() any { func (a *Apache) Init() error { if err := a.validateConfig(); err != nil { - a.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } httpClient, err := a.initHTTPClient() if err != nil { - a.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } a.httpClient = httpClient @@ -83,7 +83,6 @@ func (a *Apache) Init() error { func (a *Apache) Check() error { mx, err := a.collect() if err != nil { - a.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/apache/apache_test.go b/src/go/plugin/go.d/modules/apache/apache_test.go index 64fa6ed96..303010045 100644 --- a/src/go/plugin/go.d/modules/apache/apache_test.go +++ b/src/go/plugin/go.d/modules/apache/apache_test.go @@ -55,16 +55,16 @@ func TestApache_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, "fail when URL has no wantMetrics suffix": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: "http://127.0.0.1:38001"}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:38001"}, }, }, }, diff --git a/src/go/plugin/go.d/modules/apache/collect.go b/src/go/plugin/go.d/modules/apache/collect.go index 79de7722a..bbf790554 100644 --- a/src/go/plugin/go.d/modules/apache/collect.go +++ b/src/go/plugin/go.d/modules/apache/collect.go @@ -6,7 +6,6 @@ import ( "bufio" "fmt" "io" - "net/http" "strconv" "strings" @@ -31,22 +30,24 @@ func (a *Apache) collect() (map[string]int64, error) { } func (a *Apache) scrapeStatus() (*serverStatus, error) { - req, err := web.NewHTTPRequest(a.Request) + req, err := web.NewHTTPRequest(a.RequestConfig) if err != nil { return nil, err } - resp, err := a.httpClient.Do(req) - if err != nil { - return nil, fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) + var stats *serverStatus + var perr error - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) + if err := web.DoHTTP(a.httpClient).Request(req, func(body io.Reader) error { + if stats, perr = parseResponse(body); perr != nil { + return perr + } + return nil + }); err != nil { + return nil, err } - return parseResponse(resp.Body) + return stats, nil } func parseResponse(r io.Reader) (*serverStatus, error) { @@ -154,10 +155,3 @@ func parseFloat(value string) *float64 { } return &v } - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/apache/config_schema.json b/src/go/plugin/go.d/modules/apache/config_schema.json index b92363e93..4c68bbd57 100644 --- a/src/go/plugin/go.d/modules/apache/config_schema.json +++ b/src/go/plugin/go.d/modules/apache/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/apache/init.go b/src/go/plugin/go.d/modules/apache/init.go index e13186f01..cb20f94f1 100644 --- a/src/go/plugin/go.d/modules/apache/init.go +++ b/src/go/plugin/go.d/modules/apache/init.go @@ -21,5 +21,5 @@ func (a *Apache) validateConfig() error { } func (a *Apache) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(a.Client) + return web.NewHTTPClient(a.ClientConfig) } diff --git a/src/go/plugin/go.d/modules/apache/integrations/apache.md b/src/go/plugin/go.d/modules/apache/integrations/apache.md index ec9f88883..c97d26c90 100644 --- a/src/go/plugin/go.d/modules/apache/integrations/apache.md +++ b/src/go/plugin/go.d/modules/apache/integrations/apache.md @@ -108,8 +108,8 @@ There are no alerts configured by default for this integration. The configuration file name for this integration is `go.d/apache.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/apache/integrations/httpd.md b/src/go/plugin/go.d/modules/apache/integrations/httpd.md index 258365180..02b3face7 100644 --- a/src/go/plugin/go.d/modules/apache/integrations/httpd.md +++ b/src/go/plugin/go.d/modules/apache/integrations/httpd.md @@ -108,8 +108,8 @@ There are no alerts configured by default for this integration. The configuration file name for this integration is `go.d/apache.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/apcupsd/README.md b/src/go/plugin/go.d/modules/apcupsd/README.md new file mode 120000 index 000000000..fc6681fe6 --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/README.md @@ -0,0 +1 @@ +integrations/apc_ups.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/apcupsd/apcupsd.go b/src/go/plugin/go.d/modules/apcupsd/apcupsd.go new file mode 100644 index 000000000..97c24aa20 --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/apcupsd.go @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package apcupsd + +import ( + _ "embed" + "errors" + "time" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("apcupsd", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *Apcupsd { + return &Apcupsd{ + Config: Config{ + Address: "127.0.0.1:3551", + Timeout: confopt.Duration(time.Second * 3), + }, + newConn: newUpsdConn, + charts: charts.Copy(), + } +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` +} + +type Apcupsd struct { + module.Base + Config `yaml:",inline" json:""` + + charts *module.Charts + + conn apcupsdConn + newConn func(Config) apcupsdConn +} + +func (a *Apcupsd) Configuration() any { + return a.Config +} + +func (a *Apcupsd) Init() error { + if a.Address == "" { + return errors.New("config: 'address' not set") + } + + return nil +} + +func (a *Apcupsd) Check() error { + mx, err := a.collect() + if err != nil { + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil +} + +func (a *Apcupsd) Charts() *module.Charts { + return a.charts +} + +func (a *Apcupsd) Collect() map[string]int64 { + mx, err := a.collect() + if err != nil { + a.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (a *Apcupsd) Cleanup() { + if a.conn != nil { + if err := a.conn.disconnect(); err != nil { + a.Warningf("error on disconnect: %v", err) + } + a.conn = nil + } +} diff --git a/src/go/plugin/go.d/modules/apcupsd/apcupsd_test.go b/src/go/plugin/go.d/modules/apcupsd/apcupsd_test.go new file mode 100644 index 000000000..fa189974f --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/apcupsd_test.go @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package apcupsd + +import ( + "errors" + "os" + "strings" + "testing" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") + + dataStatus, _ = os.ReadFile("testdata/status.txt") + dataStatusCommlost, _ = os.ReadFile("testdata/status_commlost.txt") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + "dataStatus": dataStatus, + "dataStatusCommlost": dataStatusCommlost, + } { + require.NotNil(t, data, name) + } +} + +func TestApcupsd_ConfigurationSerialize(t *testing.T) { + module.TestConfigurationSerialize(t, &Apcupsd{}, dataConfigJSON, dataConfigYAML) +} + +func TestApcupsd_Cleanup(t *testing.T) { + apc := New() + + require.NotPanics(t, apc.Cleanup) + + mock := prepareMockOk() + apc.newConn = func(Config) apcupsdConn { return mock } + + require.NoError(t, apc.Init()) + _ = apc.Collect() + require.NotPanics(t, apc.Cleanup) + assert.True(t, mock.calledDisconnect) +} + +func TestApcupsd_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + wantFail: false, + config: New().Config, + }, + "fails when 'address' option not set": { + wantFail: true, + config: Config{Address: ""}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + apc := New() + apc.Config = test.config + + if test.wantFail { + assert.Error(t, apc.Init()) + } else { + assert.NoError(t, apc.Init()) + } + }) + } +} + +func TestApcupsd_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func() *mockApcupsdConn + wantFail bool + }{ + "case ok": { + wantFail: false, + prepareMock: prepareMockOk, + }, + "case commlost": { + wantFail: false, + prepareMock: prepareMockOkCommlost, + }, + "error on connect()": { + wantFail: true, + prepareMock: prepareMockErrOnConnect, + }, + "error on status()": { + wantFail: true, + prepareMock: prepareMockErrOnStatus, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + apc := New() + apc.newConn = func(Config) apcupsdConn { return test.prepareMock() } + + require.NoError(t, apc.Init()) + + if test.wantFail { + assert.Error(t, apc.Check()) + } else { + assert.NoError(t, apc.Check()) + } + }) + } +} + +func TestApcupsd_Charts(t *testing.T) { + apc := New() + require.NoError(t, apc.Init()) + assert.NotNil(t, apc.Charts()) +} + +func TestApcupsd_Collect(t *testing.T) { + tests := map[string]struct { + prepareMock func() *mockApcupsdConn + wantCollected map[string]int64 + wantCharts int + wantConnDisconnect bool + }{ + "case ok": { + prepareMock: prepareMockOk, + wantCollected: map[string]int64{ + "battery_charge": 10000, + "battery_seconds_since_replacement": 86400, + "battery_voltage": 2790, + "battery_voltage_nominal": 2400, + "input_frequency": 5000, + "input_voltage": 23530, + "input_voltage_max": 23920, + "input_voltage_min": 23400, + "itemp": 3279, + "load": 5580, + "load_percent": 930, + "output_voltage": 23660, + "output_voltage_nominal": 23000, + "selftest_BT": 0, + "selftest_IP": 0, + "selftest_NG": 0, + "selftest_NO": 1, + "selftest_OK": 0, + "selftest_UNK": 0, + "selftest_WN": 0, + "status_BOOST": 0, + "status_CAL": 0, + "status_COMMLOST": 0, + "status_LOWBATT": 0, + "status_NOBATT": 0, + "status_ONBATT": 0, + "status_ONLINE": 1, + "status_OVERLOAD": 0, + "status_REPLACEBATT": 0, + "status_SHUTTING_DOWN": 0, + "status_SLAVE": 0, + "status_SLAVEDOWN": 0, + "status_TRIM": 0, + "timeleft": 780000, + }, + wantConnDisconnect: false, + }, + "case commlost": { + prepareMock: prepareMockOkCommlost, + wantCollected: map[string]int64{ + "status_BOOST": 0, + "status_CAL": 0, + "status_COMMLOST": 1, + "status_LOWBATT": 0, + "status_NOBATT": 0, + "status_ONBATT": 0, + "status_ONLINE": 0, + "status_OVERLOAD": 0, + "status_REPLACEBATT": 0, + "status_SHUTTING_DOWN": 0, + "status_SLAVE": 0, + "status_SLAVEDOWN": 0, + "status_TRIM": 0, + }, + wantConnDisconnect: false, + }, + "error on connect()": { + prepareMock: prepareMockErrOnConnect, + wantCollected: nil, + wantConnDisconnect: false, + }, + "error on status()": { + prepareMock: prepareMockErrOnStatus, + wantCollected: nil, + wantConnDisconnect: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + apc := New() + require.NoError(t, apc.Init()) + + mock := test.prepareMock() + apc.newConn = func(Config) apcupsdConn { return mock } + + mx := apc.Collect() + + if _, ok := mx["battery_seconds_since_replacement"]; ok { + mx["battery_seconds_since_replacement"] = 86400 + } + + assert.Equal(t, test.wantCollected, mx) + + if len(test.wantCollected) > 0 { + if strings.Contains(name, "commlost") { + module.TestMetricsHasAllChartsDimsSkip(t, apc.Charts(), mx, func(chart *module.Chart, _ *module.Dim) bool { + return chart.ID != statusChart.ID + }) + } else { + module.TestMetricsHasAllChartsDims(t, apc.Charts(), mx) + } + } + + assert.Equalf(t, test.wantConnDisconnect, mock.calledDisconnect, "calledDisconnect") + }) + } +} + +func prepareMockOk() *mockApcupsdConn { + return &mockApcupsdConn{ + dataStatus: dataStatus, + } +} + +func prepareMockOkCommlost() *mockApcupsdConn { + return &mockApcupsdConn{ + dataStatus: dataStatusCommlost, + } +} + +func prepareMockErrOnConnect() *mockApcupsdConn { + return &mockApcupsdConn{errOnConnect: true} +} + +func prepareMockErrOnStatus() *mockApcupsdConn { + return &mockApcupsdConn{errOnStatus: true} +} + +type mockApcupsdConn struct { + errOnConnect bool + errOnStatus bool + calledDisconnect bool + + dataStatus []byte +} + +func (m *mockApcupsdConn) connect() error { + if m.errOnConnect { + return errors.New("mock error on connect()") + } + return nil +} + +func (m *mockApcupsdConn) disconnect() error { + m.calledDisconnect = true + return nil +} + +func (m *mockApcupsdConn) status() ([]byte, error) { + if m.errOnStatus { + return nil, errors.New("mock error on status()") + } + + return m.dataStatus, nil +} diff --git a/src/go/plugin/go.d/modules/apcupsd/charts.go b/src/go/plugin/go.d/modules/apcupsd/charts.go new file mode 100644 index 000000000..b6d99e214 --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/charts.go @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package apcupsd + +import ( + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +const ( + prioUpsStatus = module.Priority + iota + prioUpsSelftest + + prioUpsBatteryCharge + prioUpsBatteryTimeRemaining + prioUpsBatteryTimeSinceReplacement + prioUpsBatteryVoltage + + prioUpsLoadCapacityUtilization + prioUpsLoad + + prioUpsTemperature + + prioUpsInputVoltage + prioUpsInputFrequency + + prioUpsOutputVoltage +) + +var charts = module.Charts{ + statusChart.Copy(), + selftestChart.Copy(), + + batteryChargeChart.Copy(), + batteryTimeRemainingChart.Copy(), + batteryTimeSinceReplacementChart.Copy(), + batteryVoltageChart.Copy(), + + loadCapacityUtilizationChart.Copy(), + loadChart.Copy(), + + internalTemperatureChart.Copy(), + + inputVoltageChart.Copy(), + inputFrequencyChart.Copy(), + + outputVoltageChart.Copy(), +} + +// Status +var ( + statusChart = func() module.Chart { + chart := module.Chart{ + ID: "ups_status", + Title: "UPS Status", + Units: "status", + Fam: "status", + Ctx: "apcupsd.ups_status", + Priority: prioUpsStatus, + Type: module.Line, + } + for _, v := range upsStatuses { + chart.Dims = append(chart.Dims, &module.Dim{ID: "status_" + v, Name: v}) + } + return chart + }() + selftestChart = func() module.Chart { + chart := module.Chart{ + ID: "ups_selftest", + Title: "UPS Self-Test Status", + Units: "status", + Fam: "status", + Ctx: "apcupsd.ups_selftest", + Priority: prioUpsSelftest, + Type: module.Line, + } + for _, v := range upsSelftestStatuses { + chart.Dims = append(chart.Dims, &module.Dim{ID: "selftest_" + v, Name: v}) + } + return chart + }() +) + +// Battery +var ( + batteryChargeChart = module.Chart{ + ID: "ups_battery_charge", + Title: "UPS Battery Charge", + Units: "percent", + Fam: "battery", + Ctx: "apcupsd.ups_battery_charge", + Priority: prioUpsBatteryCharge, + Type: module.Area, + Dims: module.Dims{ + {ID: "battery_charge", Name: "charge", Div: precision}, + }, + } + batteryTimeRemainingChart = module.Chart{ + ID: "ups_battery_time_remaining", + Title: "UPS Estimated Runtime on Battery", + Units: "seconds", + Fam: "battery", + Ctx: "apcupsd.ups_battery_time_remaining", + Priority: prioUpsBatteryTimeRemaining, + Type: module.Line, + Dims: module.Dims{ + {ID: "timeleft", Name: "timeleft", Div: precision}, + }, + } + batteryTimeSinceReplacementChart = module.Chart{ + ID: "ups_battery_time_since_replacement", + Title: "UPS Time Since Battery Replacement", + Units: "seconds", + Fam: "battery", + Ctx: "apcupsd.ups_battery_time_since_replacement", + Priority: prioUpsBatteryTimeSinceReplacement, + Type: module.Line, + Dims: module.Dims{ + {ID: "battery_seconds_since_replacement", Name: "since_replacement"}, + }, + } + batteryVoltageChart = module.Chart{ + ID: "ups_battery_voltage", + Title: "UPS Battery Voltage", + Units: "Volts", + Fam: "battery", + Ctx: "apcupsd.ups_battery_voltage", + Priority: prioUpsBatteryVoltage, + Type: module.Line, + Dims: module.Dims{ + {ID: "battery_voltage", Name: "voltage", Div: precision}, + {ID: "battery_voltage_nominal", Name: "nominal_voltage", Div: precision}, + }, + } +) + +// Load +var ( + loadCapacityUtilizationChart = module.Chart{ + ID: "ups_load_capacity_utilization", + Title: "UPS Load Capacity Utilization", + Units: "percent", + Fam: "load", + Ctx: "apcupsd.ups_load_capacity_utilization", + Priority: prioUpsLoadCapacityUtilization, + Type: module.Line, + Dims: module.Dims{ + {ID: "load_percent", Name: "load", Div: precision}, + }, + } + loadChart = module.Chart{ + ID: "ups_load", + Title: "UPS Load", + Units: "Watts", + Fam: "load", + Ctx: "apcupsd.ups_load", + Priority: prioUpsLoad, + Type: module.Line, + Dims: module.Dims{ + {ID: "load", Name: "load", Div: precision}, + }, + } +) + +// Temperature +var ( + internalTemperatureChart = module.Chart{ + ID: "ups_temperature", + Title: "UPS Internal Temperature", + Units: "Celsius", + Fam: "temperature", + Ctx: "apcupsd.ups_temperature", + Priority: prioUpsTemperature, + Type: module.Line, + Dims: module.Dims{ + {ID: "itemp", Name: "temperature", Div: precision}, + }, + } +) + +// Input +var ( + inputVoltageChart = module.Chart{ + ID: "ups_input_voltage", + Title: "UPS Input Voltage", + Units: "Volts", + Fam: "input", + Ctx: "apcupsd.ups_input_voltage", + Priority: prioUpsInputVoltage, + Type: module.Line, + Dims: module.Dims{ + {ID: "input_voltage", Name: "voltage", Div: precision}, + {ID: "input_voltage_min", Name: "min_voltage", Div: precision}, + {ID: "input_voltage_max", Name: "max_voltage", Div: precision}, + }, + } + inputFrequencyChart = module.Chart{ + ID: "ups_input_frequency", + Title: "UPS Input Frequency", + Units: "Hz", + Fam: "input", + Ctx: "apcupsd.ups_input_frequency", + Priority: prioUpsInputFrequency, + Type: module.Line, + Dims: module.Dims{ + {ID: "input_frequency", Name: "frequency", Div: precision}, + }, + } +) + +// Output +var ( + outputVoltageChart = module.Chart{ + ID: "ups_output_voltage", + Title: "UPS Output Voltage", + Units: "Volts", + Fam: "output", + Ctx: "apcupsd.ups_output_voltage", + Priority: prioUpsOutputVoltage, + Type: module.Line, + Dims: module.Dims{ + {ID: "output_voltage", Name: "voltage", Div: precision}, + }, + } +) diff --git a/src/go/plugin/go.d/modules/apcupsd/client.go b/src/go/plugin/go.d/modules/apcupsd/client.go new file mode 100644 index 000000000..713bb92ef --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/client.go @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package apcupsd + +import ( + "bytes" + "encoding/binary" + "io" + "net" + "time" +) + +type apcupsdConn interface { + connect() error + disconnect() error + status() ([]byte, error) +} + +func newUpsdConn(conf Config) apcupsdConn { + return &apcupsdClient{ + address: conf.Address, + timeout: conf.Timeout.Duration(), + } +} + +type apcupsdClient struct { + address string + timeout time.Duration + conn net.Conn +} + +func (c *apcupsdClient) connect() error { + if c.conn != nil { + _ = c.disconnect() + } + + conn, err := net.DialTimeout("tcp", c.address, c.timeout) + if err != nil { + return err + } + + c.conn = conn + + return nil +} + +func (c *apcupsdClient) disconnect() error { + if c.conn != nil { + err := c.conn.Close() + c.conn = nil + return err + } + return nil +} + +func (c *apcupsdClient) status() ([]byte, error) { + if err := c.send("status"); err != nil { + return nil, err + } + return c.receive() +} + +func (c *apcupsdClient) send(cmd string) error { + // https://github.com/therealbstern/apcupsd/blob/224d19d5faa508d04267f6135fe53d50800550de/src/lib/apclibnis.c#L153 + + msgLength := make([]byte, 2) + + binary.BigEndian.PutUint16(msgLength, uint16(len(cmd))) + + if err := c.conn.SetWriteDeadline(c.deadline()); err != nil { + return err + } + + if _, err := c.conn.Write(append(msgLength, cmd...)); err != nil { + return err + } + + return nil +} + +func (c *apcupsdClient) receive() ([]byte, error) { + // https://github.com/therealbstern/apcupsd/blob/224d19d5faa508d04267f6135fe53d50800550de/src/apcnis.c#L54 + + var buf bytes.Buffer + msgLength := make([]byte, 2) + + for { + if err := c.conn.SetReadDeadline(c.deadline()); err != nil { + return nil, err + } + + if _, err := io.ReadFull(c.conn, msgLength); err != nil { + return nil, err + } + + length := binary.BigEndian.Uint16(msgLength) + if length == 0 { + break + } + + if err := c.conn.SetReadDeadline(c.deadline()); err != nil { + return nil, err + } + + if _, err := io.CopyN(&buf, c.conn, int64(length)); err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +func (c *apcupsdClient) deadline() time.Time { + return time.Now().Add(c.timeout) +} diff --git a/src/go/plugin/go.d/modules/apcupsd/collect.go b/src/go/plugin/go.d/modules/apcupsd/collect.go new file mode 100644 index 000000000..74d566e00 --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/collect.go @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package apcupsd + +import ( + "errors" + "fmt" + "strings" + "time" +) + +const precision = 100 + +func (a *Apcupsd) collect() (map[string]int64, error) { + if a.conn == nil { + conn, err := a.establishConnection() + if err != nil { + return nil, err + } + a.conn = conn + } + + resp, err := a.conn.status() + if err != nil { + a.Cleanup() + return nil, err + } + + mx := make(map[string]int64) + + if err := a.collectStatus(mx, resp); err != nil { + return nil, err + } + + return mx, nil +} + +func (a *Apcupsd) collectStatus(mx map[string]int64, resp []byte) error { + st, err := parseStatus(resp) + if err != nil { + return fmt.Errorf("failed to parse status: %v", err) + } + + if st.status == "" { + return errors.New("unexpected response: status is empty") + } + + for _, v := range upsStatuses { + mx["status_"+v] = 0 + } + for _, v := range strings.Fields(st.status) { + mx["status_"+v] = 1 + } + + switch st.status { + case "COMMLOST", "SHUTTING_DOWN": + return nil + } + + if st.selftest != "" { + for _, v := range upsSelftestStatuses { + mx["selftest_"+v] = 0 + } + mx["selftest_"+st.selftest] = 1 + } + + if st.bcharge != nil { + mx["battery_charge"] = int64(*st.bcharge * precision) + } + if st.battv != nil { + mx["battery_voltage"] = int64(*st.battv * precision) + } + if st.nombattv != nil { + mx["battery_voltage_nominal"] = int64(*st.nombattv * precision) + } + if st.linev != nil { + mx["input_voltage"] = int64(*st.linev * precision) + } + if st.minlinev != nil { + mx["input_voltage_min"] = int64(*st.minlinev * precision) + } + if st.maxlinev != nil { + mx["input_voltage_max"] = int64(*st.maxlinev * precision) + } + if st.linefreq != nil { + mx["input_frequency"] = int64(*st.linefreq * precision) + } + if st.outputv != nil { + mx["output_voltage"] = int64(*st.outputv * precision) + } + if st.nomoutv != nil { + mx["output_voltage_nominal"] = int64(*st.nomoutv * precision) + } + if st.loadpct != nil { + mx["load_percent"] = int64(*st.loadpct * precision) + } + if st.itemp != nil { + mx["itemp"] = int64(*st.itemp * precision) + } + if st.timeleft != nil { + mx["timeleft"] = int64(*st.timeleft * 60 * precision) // to seconds + } + if st.nompower != nil && st.loadpct != nil { + mx["load"] = int64(*st.nompower * *st.loadpct) + } + if st.battdate != "" { + if v, err := battdateSecondsAgo(st.battdate); err != nil { + a.Debugf("failed to calculate time since battery replacement for date '%s': %v", st.battdate, err) + } else { + mx["battery_seconds_since_replacement"] = v + } + } + + return nil +} + +func battdateSecondsAgo(battdate string) (int64, error) { + var layout string + + if strings.ContainsRune(battdate, '-') { + layout = "2006-01-02" + } else { + layout = "01/02/06" + } + + date, err := time.Parse(layout, battdate) + if err != nil { + return 0, err + } + + secsAgo := int64(time.Now().Sub(date).Seconds()) + + return secsAgo, nil +} + +func (a *Apcupsd) establishConnection() (apcupsdConn, error) { + conn := a.newConn(a.Config) + + if err := conn.connect(); err != nil { + return nil, err + } + + return conn, nil +} diff --git a/src/go/plugin/go.d/modules/apcupsd/config_schema.json b/src/go/plugin/go.d/modules/apcupsd/config_schema.json new file mode 100644 index 000000000..b9504e575 --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/config_schema.json @@ -0,0 +1,43 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Apcupsd collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "address": { + "title": "Address", + "description": "The IP address and port where the Apcupsd daemon listens for connections.", + "type": "string", + "default": "127.0.0.1:3551" + }, + "timeout": { + "title": "Timeout", + "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.", + "type": "number", + "minimum": 0.5, + "default": 1 + } + }, + "required": [ + "address" + ], + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + } + } +} diff --git a/src/go/plugin/go.d/modules/apcupsd/integrations/apc_ups.md b/src/go/plugin/go.d/modules/apcupsd/integrations/apc_ups.md new file mode 100644 index 000000000..67b041335 --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/integrations/apc_ups.md @@ -0,0 +1,240 @@ + + +# APC UPS + + + + + +Plugin: go.d.plugin +Module: apcupsd + + + +## Overview + +This collector monitors Uninterruptible Power Supplies by polling the Apcupsd daemon. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects Apcupsd instances running on localhost that are listening on port 3551. +On startup, it tries to collect metrics from: + +- 127.0.0.1:3551 + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per ups + +These metrics refer to the UPS unit. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| apcupsd.ups_status | TRIM, BOOST, CAL, ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, SHUTTING_DOWN | status | +| apcupsd.ups_selftest | NO, NG, WN, IP, OK, BT, UNK | status | +| apcupsd.ups_battery_charge | charge | percent | +| apcupsd.ups_battery_time_remaining | timeleft | seconds | +| apcupsd.ups_battery_time_since_replacement | since_replacement | seconds | +| apcupsd.ups_battery_voltage | voltage, nominal_voltage | Volts | +| apcupsd.ups_load_capacity_utilization | load | percent | +| apcupsd.ups_load | load | Watts | +| apcupsd.ups_temperature | temperature | Celsius | +| apcupsd.ups_input_voltage | voltage, min_voltage, max_voltage | Volts | +| apcupsd.ups_input_frequency | frequency | Hz | +| apcupsd.ups_output_voltage | voltage | Volts | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ apcupsd_ups_load_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_load_capacity_utilization | APC UPS average load over the last 10 minutes | +| [ apcupsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_battery_charge | APC UPS average battery charge over the last minute | +| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS number of seconds since the last successful data collection | +| [ apcupsd_ups_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_selftest | APC UPS self-test failed due to insufficient battery capacity or due to overload | +| [ apcupsd_ups_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS has switched to battery power because the input power has failed | +| [ apcupsd_ups_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS is overloaded and cannot supply enough power to the load | +| [ apcupsd_ups_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS battery is low and needs to be recharged | +| [ apcupsd_ups_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS battery has reached the end of its lifespan and needs to be replaced | +| [ apcupsd_ups_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS has no battery | +| [ apcupsd_ups_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS communication link is lost | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/apcupsd.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/apcupsd.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | Apcupsd daemon address in IP:PORT format. | 127.0.0.1:3551 | yes | +| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no | + +
    + +#### Examples + +##### Basic + +A basic example configuration. + +
    Config + +```yaml +jobs: + - name: local + address: 127.0.0.1:3551 + +``` +
    + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
    Config + +```yaml +jobs: + - name: local + address: 127.0.0.1:3551 + + - name: remote + address: 203.0.113.0:3551 + +``` +
    + + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `apcupsd` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m apcupsd + ``` + +### Getting Logs + +If you're encountering problems with the `apcupsd` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep apcupsd +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep apcupsd /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep apcupsd +``` + + diff --git a/src/go/plugin/go.d/modules/apcupsd/metadata.yaml b/src/go/plugin/go.d/modules/apcupsd/metadata.yaml new file mode 100644 index 000000000..5739a854d --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/metadata.yaml @@ -0,0 +1,244 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-apcupsd + plugin_name: go.d.plugin + module_name: apcupsd + monitored_instance: + name: APC UPS + link: https://www.apc.com + icon_filename: apc.svg + categories: + - data-collection.ups + keywords: + - ups + - apcupsd + - apc + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Uninterruptible Power Supplies by polling the Apcupsd daemon. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + By default, it detects Apcupsd instances running on localhost that are listening on port 3551. + On startup, it tries to collect metrics from: + + - 127.0.0.1:3551 + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/apcupsd.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: Apcupsd daemon address in IP:PORT format. + default_value: 127.0.0.1:3551 + required: true + - name: timeout + description: Connection/read/write timeout in seconds. The timeout includes name resolution, if required. + default_value: 2 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + address: 127.0.0.1:3551 + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + address: 127.0.0.1:3551 + + - name: remote + address: 203.0.113.0:3551 + troubleshooting: + problems: + list: [] + alerts: + - name: apcupsd_ups_load_capacity + metric: apcupsd.ups_load_capacity_utilization + info: "APC UPS average load over the last 10 minutes" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf + - name: apcupsd_ups_battery_charge + metric: apcupsd.ups_battery_charge + info: "APC UPS average battery charge over the last minute" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf + - name: apcupsd_last_collected_secs + metric: apcupsd.ups_status + info: "APC UPS number of seconds since the last successful data collection" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf + - name: apcupsd_ups_selftest_warning + metric: apcupsd.ups_selftest + info: "APC UPS self-test failed due to insufficient battery capacity or due to overload" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf + - name: apcupsd_ups_status_onbatt + metric: apcupsd.ups_status + info: "APC UPS has switched to battery power because the input power has failed" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf + - name: apcupsd_ups_status_overload + metric: apcupsd.ups_status + info: "APC UPS is overloaded and cannot supply enough power to the load" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf + - name: apcupsd_ups_status_lowbatt + metric: apcupsd.ups_status + info: "APC UPS battery is low and needs to be recharged" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf + - name: apcupsd_ups_status_replacebatt + metric: apcupsd.ups_status + info: "APC UPS battery has reached the end of its lifespan and needs to be replaced" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf + - name: apcupsd_ups_status_nobatt + metric: apcupsd.ups_status + info: "APC UPS has no battery" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf + - name: apcupsd_ups_status_commlost + metric: apcupsd.ups_status + info: "APC UPS communication link is lost" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: ups + description: These metrics refer to the UPS unit. + labels: [] + metrics: + - name: apcupsd.ups_status + description: UPS Status + unit: status + chart_type: line + dimensions: + - name: TRIM + - name: BOOST + - name: CAL + - name: ONLINE + - name: ONBATT + - name: OVERLOAD + - name: LOWBATT + - name: REPLACEBATT + - name: NOBATT + - name: SLAVE + - name: SLAVEDOWN + - name: COMMLOST + - name: SHUTTING_DOWN + - name: apcupsd.ups_selftest + description: UPS Self-Test Status + unit: status + chart_type: line + dimensions: + - name: NO + - name: NG + - name: WN + - name: IP + - name: OK + - name: BT + - name: UNK + - name: apcupsd.ups_battery_charge + description: UPS Battery Charge + unit: percent + chart_type: area + dimensions: + - name: charge + - name: apcupsd.ups_battery_time_remaining + description: UPS Estimated Runtime on Battery + unit: seconds + chart_type: line + dimensions: + - name: timeleft + - name: apcupsd.ups_battery_time_since_replacement + description: UPS Time Since Battery Replacement + unit: seconds + chart_type: line + dimensions: + - name: since_replacement + - name: apcupsd.ups_battery_voltage + description: UPS Battery Voltage + unit: Volts + chart_type: line + dimensions: + - name: voltage + - name: nominal_voltage + - name: apcupsd.ups_load_capacity_utilization + description: UPS Load Capacity Utilization + unit: percent + chart_type: area + dimensions: + - name: load + - name: apcupsd.ups_load + description: UPS Load + unit: Watts + chart_type: line + dimensions: + - name: load + - name: apcupsd.ups_temperature + description: UPS Internal Temperature + unit: Celsius + chart_type: line + dimensions: + - name: temperature + - name: apcupsd.ups_input_voltage + description: UPS Input Voltage + unit: Volts + chart_type: line + dimensions: + - name: voltage + - name: min_voltage + - name: max_voltage + - name: apcupsd.ups_input_frequency + description: UPS Input Frequency + unit: Hz + chart_type: line + dimensions: + - name: frequency + - name: apcupsd.ups_output_voltage + description: UPS Output Voltage + unit: Volts + chart_type: line + dimensions: + - name: voltage diff --git a/src/go/plugin/go.d/modules/apcupsd/status.go b/src/go/plugin/go.d/modules/apcupsd/status.go new file mode 100644 index 000000000..7250d1591 --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/status.go @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package apcupsd + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" +) + +var upsStatuses = []string{ + "CAL", + "TRIM", + "BOOST", + "ONLINE", + "ONBATT", + "OVERLOAD", + "LOWBATT", + "REPLACEBATT", + "NOBATT", + "SLAVE", + "SLAVEDOWN", + "COMMLOST", + "SHUTTING_DOWN", +} + +var upsSelftestStatuses = []string{ + "NO", + "NG", + "WN", + "IP", + "OK", + "BT", + "UNK", +} + +// examples: https://github.com/therealbstern/apcupsd/tree/master/examples/status +type apcupsdStatus struct { + bcharge *float64 // battery charge level (percentage) + battv *float64 // battery voltage (Volts) + nombattv *float64 // nominal battery voltage (Volts) + linev *float64 // line voltage (Volts) + minlinev *float64 // min line voltage (Volts) + maxlinev *float64 // max line voltage (Volts) + linefreq *float64 // line frequency (Hz) + outputv *float64 // output voltage (Volts) + nomoutv *float64 // nominal output voltage (Volts) + loadpct *float64 // UPS Load (Percent Load Capacity) + itemp *float64 // internal UPS temperature (Celsius) + nompower *float64 // nominal power (Watts) + timeleft *float64 // estimated runtime left (minutes) + battdate string // Last battery change date (MM/DD/YY or YYYY-MM-DD) + status string + selftest string +} + +func parseStatus(resp []byte) (*apcupsdStatus, error) { + var st apcupsdStatus + sc := bufio.NewScanner(bytes.NewBuffer(resp)) + + for sc.Scan() { + line := sc.Text() + + key, value, ok := strings.Cut(line, ":") + if !ok { + continue + } + + key, value = strings.TrimSpace(key), strings.TrimSpace(value) + + if value == "N/A" { + continue + } + + var err error + + // https://github.com/therealbstern/apcupsd/blob/224d19d5faa508d04267f6135fe53d50800550de/src/lib/apcstatus.c#L30 + switch key { + case "BCHARGE": + st.bcharge, err = parseFloat(value) + case "BATTV": + st.battv, err = parseFloat(value) + case "NOMBATTV": + st.nombattv, err = parseFloat(value) + case "LINEV": + st.linev, err = parseFloat(value) + case "MINLINEV": + st.minlinev, err = parseFloat(value) + case "MAXLINEV": + st.maxlinev, err = parseFloat(value) + case "LINEFREQ": + st.linefreq, err = parseFloat(value) + case "OUTPUTV": + st.outputv, err = parseFloat(value) + case "NOMOUTV": + st.nomoutv, err = parseFloat(value) + case "LOADPCT": + st.loadpct, err = parseFloat(value) + case "ITEMP": + st.itemp, err = parseFloat(value) + case "NOMPOWER": + st.nompower, err = parseFloat(value) + case "TIMELEFT": + st.timeleft, err = parseFloat(value) + case "BATTDATE": + st.battdate = value + case "STATUS": + if value == "SHUTTING DOWN" { + value = "SHUTTING_DOWN" + } + st.status = value + case "SELFTEST": + if value == "??" { + value = "UNK" + } + st.selftest = value + default: + continue + } + if err != nil { + return nil, fmt.Errorf("line '%s': %v", line, err) + } + } + + return &st, nil +} + +func parseFloat(s string) (*float64, error) { + val, _, _ := strings.Cut(s, " ") + f, err := strconv.ParseFloat(val, 64) + if err != nil { + return nil, err + } + return &f, nil +} diff --git a/src/go/plugin/go.d/modules/apcupsd/testdata/config.json b/src/go/plugin/go.d/modules/apcupsd/testdata/config.json new file mode 100644 index 000000000..e86834720 --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/testdata/config.json @@ -0,0 +1,5 @@ +{ + "update_every": 123, + "address": "ok", + "timeout": 123.123 +} diff --git a/src/go/plugin/go.d/modules/apcupsd/testdata/config.yaml b/src/go/plugin/go.d/modules/apcupsd/testdata/config.yaml new file mode 100644 index 000000000..1b81d09eb --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/testdata/config.yaml @@ -0,0 +1,3 @@ +update_every: 123 +address: "ok" +timeout: 123.123 diff --git a/src/go/plugin/go.d/modules/apcupsd/testdata/status.txt b/src/go/plugin/go.d/modules/apcupsd/testdata/status.txt new file mode 100644 index 000000000..b89a3fecf --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/testdata/status.txt @@ -0,0 +1,56 @@ +DATE : Wed Sep 27 17:30:23 CEST 2000 +HOSTNAME : test +RELEASE : 3.7.3-20000925 +CABLE : Custom Cable Smart +MODEL : SMART-UPS 1000 +UPSMODE : Stand Alone +STARTTIME: Wed Sep 27 10:39:23 CEST 2000 +UPSNAME : UPS_IDEN +STATUS : ONLINE +LINEV : 235.3 Volts +LOADPCT : 9.3 Percent Load Capacity +BCHARGE : 100.0 Percent +TIMELEFT : 130.0 Minutes +MBATTCHG : 5 Percent +MINTIMEL : 3 Minutes +MAXTIME : 0 Seconds +MAXLINEV : 239.2 Volts +MINLINEV : 234.0 Volts +OUTPUTV : 236.6 Volts +SENSE : High +DWAKE : 000 Seconds +DSHUTD : 020 Seconds +DLOWBATT : 02 Minutes +LOTRANS : 196.0 Volts +HITRANS : 253.0 Volts +RETPCT : 000.0 Percent +ITEMP : 32.8 C Internal +ALARMDEL : 5 seconds +BATTV : 27.9 Volts +LINEFREQ : 50.0 Hz +LASTXFER : Line voltage notch or spike +NUMXFERS : 0 +XONBATT : N/A +TONBATT : 0 seconds +CUMONBATT: 0 seconds +XOFFBATT : N/A +SELFTEST : NO +STESTI : 336 +STATFLAG : 0x08 Status Flag +DIPSW : 0x00 Dip Switch +REG1 : 0x00 Register 1 +REG2 : 0x00 Register 2 +REG3 : 0x00 Register 3 +MANDATE : 07/31/99 +SERIALNO : QS9931125245 +BATTDATE : 07/31/99 +NOMOUTV : 230 +NOMBATTV : 24.0 +NOMPOWER : 600 Watts +HUMIDITY : N/A +AMBTEMP : N/A +EXTBATTS : 0 +BADBATTS : N/A +FIRMWARE : 60.11.I +APCMODEL : IWI +END APC : Wed Sep 27 17:30:31 CEST 2000 diff --git a/src/go/plugin/go.d/modules/apcupsd/testdata/status_commlost.txt b/src/go/plugin/go.d/modules/apcupsd/testdata/status_commlost.txt new file mode 100644 index 000000000..1fe377f44 --- /dev/null +++ b/src/go/plugin/go.d/modules/apcupsd/testdata/status_commlost.txt @@ -0,0 +1,18 @@ +APC : 001,017,0427 +DATE : 2024-09-06 20:22:06 +0300 +HOSTNAME : test +VERSION : 3.14.14 (31 May 2016) debian +CABLE : USB Cable +DRIVER : USB UPS Driver +UPSMODE : Stand Alone +STARTTIME: 2024-09-03 10:15:36 +0300 +STATUS : COMMLOST +MBATTCHG : 5 Percent +MINTIMEL : 3 Minutes +MAXTIME : 0 Seconds +NUMXFERS : 0 +TONBATT : 0 Seconds +CUMONBATT: 0 Seconds +XOFFBATT : N/A +STATFLAG : 0x05000100 +END APC : 2024-09-07 19:26:34 +0300 diff --git a/src/go/plugin/go.d/modules/beanstalk/beanstalk.go b/src/go/plugin/go.d/modules/beanstalk/beanstalk.go index f37cbeda4..d56e011bd 100644 --- a/src/go/plugin/go.d/modules/beanstalk/beanstalk.go +++ b/src/go/plugin/go.d/modules/beanstalk/beanstalk.go @@ -9,9 +9,9 @@ import ( "time" "github.com/netdata/netdata/go/plugins/logger" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -29,7 +29,7 @@ func New() *Beanstalk { return &Beanstalk{ Config: Config{ Address: "127.0.0.1:11300", - Timeout: web.Duration(time.Second * 1), + Timeout: confopt.Duration(time.Second * 1), TubeSelector: "*", }, @@ -42,10 +42,10 @@ func New() *Beanstalk { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - TubeSelector string `yaml:"tube_selector,omitempty" json:"tube_selector"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + TubeSelector string `yaml:"tube_selector,omitempty" json:"tube_selector"` } type Beanstalk struct { @@ -85,7 +85,6 @@ func (b *Beanstalk) Init() error { func (b *Beanstalk) Check() error { mx, err := b.collect() if err != nil { - b.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/beanstalk/client.go b/src/go/plugin/go.d/modules/beanstalk/client.go index 66a8b1cef..00fd8a13c 100644 --- a/src/go/plugin/go.d/modules/beanstalk/client.go +++ b/src/go/plugin/go.d/modules/beanstalk/client.go @@ -88,11 +88,9 @@ func newBeanstalkConn(conf Config, log *logger.Logger) beanstalkConn { return &beanstalkClient{ Logger: log, client: socket.New(socket.Config{ - Address: conf.Address, - ConnectTimeout: conf.Timeout.Duration(), - ReadTimeout: conf.Timeout.Duration(), - WriteTimeout: conf.Timeout.Duration(), - TLSConf: nil, + Address: conf.Address, + Timeout: conf.Timeout.Duration(), + TLSConf: nil, }), } } diff --git a/src/go/plugin/go.d/modules/beanstalk/config_schema.json b/src/go/plugin/go.d/modules/beanstalk/config_schema.json index aa600ac03..5224604cd 100644 --- a/src/go/plugin/go.d/modules/beanstalk/config_schema.json +++ b/src/go/plugin/go.d/modules/beanstalk/config_schema.json @@ -35,7 +35,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/beanstalk/init.go b/src/go/plugin/go.d/modules/beanstalk/init.go index 50916b3a7..0ae61c404 100644 --- a/src/go/plugin/go.d/modules/beanstalk/init.go +++ b/src/go/plugin/go.d/modules/beanstalk/init.go @@ -5,7 +5,7 @@ package beanstalk import ( "errors" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" ) func (b *Beanstalk) validateConfig() error { diff --git a/src/go/plugin/go.d/modules/beanstalk/integrations/beanstalk.md b/src/go/plugin/go.d/modules/beanstalk/integrations/beanstalk.md index c8efd988a..ee84b3d79 100644 --- a/src/go/plugin/go.d/modules/beanstalk/integrations/beanstalk.md +++ b/src/go/plugin/go.d/modules/beanstalk/integrations/beanstalk.md @@ -127,8 +127,8 @@ No action required. The configuration file name for this integration is `go.d/beanstalk.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/bind/README.md b/src/go/plugin/go.d/modules/bind/README.md index 90906ac21..e42a8c2e5 100644 --- a/src/go/plugin/go.d/modules/bind/README.md +++ b/src/go/plugin/go.d/modules/bind/README.md @@ -1,13 +1,3 @@ - - # Bind9 collector [`Bind9`](https://www.isc.org/bind/) (or named) is a very flexible, full-featured DNS system. diff --git a/src/go/plugin/go.d/modules/bind/bind.go b/src/go/plugin/go.d/modules/bind/bind.go index 6087f6f74..91ca14fea 100644 --- a/src/go/plugin/go.d/modules/bind/bind.go +++ b/src/go/plugin/go.d/modules/bind/bind.go @@ -5,13 +5,14 @@ package bind import ( _ "embed" "errors" + "fmt" "net/http" "time" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" - + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) //go:embed "config_schema.json" @@ -28,12 +29,12 @@ func init() { func New() *Bind { return &Bind{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8653/json/v1", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -42,9 +43,9 @@ func New() *Bind { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - PermitView string `yaml:"permit_view,omitempty" json:"permit_view"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + PermitView string `yaml:"permit_view,omitempty" json:"permit_view"` } type ( @@ -71,30 +72,26 @@ func (b *Bind) Configuration() any { func (b *Bind) Init() error { if err := b.validateConfig(); err != nil { - b.Errorf("config verification: %v", err) - return err + return fmt.Errorf("config verification: %v", err) } pvm, err := b.initPermitViewMatcher() if err != nil { - b.Error(err) - return err + return fmt.Errorf("init permit view matcher: %v", err) } if pvm != nil { b.permitView = pvm } - httpClient, err := web.NewHTTPClient(b.Client) + httpClient, err := web.NewHTTPClient(b.ClientConfig) if err != nil { - b.Errorf("creating http client : %v", err) - return err + return fmt.Errorf("creating http client : %v", err) } b.httpClient = httpClient bindClient, err := b.initBindApiClient(httpClient) if err != nil { - b.Error(err) - return err + return fmt.Errorf("init bind api client: %v", err) } b.bindAPIClient = bindClient @@ -104,7 +101,6 @@ func (b *Bind) Init() error { func (b *Bind) Check() error { mx, err := b.collect() if err != nil { - b.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/bind/config_schema.json b/src/go/plugin/go.d/modules/bind/config_schema.json index 29bb739ea..7c92be419 100644 --- a/src/go/plugin/go.d/modules/bind/config_schema.json +++ b/src/go/plugin/go.d/modules/bind/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/bind/init.go b/src/go/plugin/go.d/modules/bind/init.go index fe533b974..7a02b1bf7 100644 --- a/src/go/plugin/go.d/modules/bind/init.go +++ b/src/go/plugin/go.d/modules/bind/init.go @@ -8,7 +8,7 @@ import ( "net/http" "strings" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" ) func (b *Bind) validateConfig() error { @@ -28,9 +28,9 @@ func (b *Bind) initPermitViewMatcher() (matcher.Matcher, error) { func (b *Bind) initBindApiClient(httpClient *http.Client) (bindAPIClient, error) { switch { case strings.HasSuffix(b.URL, "/xml/v3"): // BIND 9.9+ - return newXML3Client(httpClient, b.Request), nil + return newXML3Client(httpClient, b.RequestConfig), nil case strings.HasSuffix(b.URL, "/json/v1"): // BIND 9.10+ - return newJSONClient(httpClient, b.Request), nil + return newJSONClient(httpClient, b.RequestConfig), nil default: return nil, fmt.Errorf("URL %s is wrong, supported endpoints: `/xml/v3`, `/json/v1`", b.URL) } diff --git a/src/go/plugin/go.d/modules/bind/json_client.go b/src/go/plugin/go.d/modules/bind/json_client.go index 04eecdb04..f6d7e5754 100644 --- a/src/go/plugin/go.d/modules/bind/json_client.go +++ b/src/go/plugin/go.d/modules/bind/json_client.go @@ -3,12 +3,8 @@ package bind import ( - "encoding/json" "fmt" - "io" "net/http" - "net/url" - "path" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -33,50 +29,26 @@ type jsonViewResolver struct { CacheStats map[string]int64 } -func newJSONClient(client *http.Client, request web.Request) *jsonClient { +func newJSONClient(client *http.Client, request web.RequestConfig) *jsonClient { return &jsonClient{httpClient: client, request: request} } type jsonClient struct { httpClient *http.Client - request web.Request + request web.RequestConfig } func (c jsonClient) serverStats() (*serverStats, error) { - req := c.request.Copy() - u, err := url.Parse(req.URL) + req, err := web.NewHTTPRequestWithPath(c.request, "/server") if err != nil { - return nil, fmt.Errorf("error on parsing URL: %v", err) + return nil, fmt.Errorf("failed to create HTTP request: %v", err) } - u.Path = path.Join(u.Path, "/server") - req.URL = u.String() + var stats jsonServerStats - httpReq, err := web.NewHTTPRequest(req) - if err != nil { - return nil, fmt.Errorf("error on creating HTTP request: %v", err) - } - - resp, err := c.httpClient.Do(httpReq) - if err != nil { - return nil, fmt.Errorf("error on request : %v", err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s returned HTTP status %d", httpReq.URL, resp.StatusCode) + if err := web.DoHTTP(c.httpClient).RequestJSON(req, &stats); err != nil { + return nil, err } - stats := &jsonServerStats{} - if err = json.NewDecoder(resp.Body).Decode(stats); err != nil { - return nil, fmt.Errorf("error on decoding response from %s : %v", httpReq.URL, err) - } - return stats, nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } + return &stats, nil } diff --git a/src/go/plugin/go.d/modules/bind/xml3_client.go b/src/go/plugin/go.d/modules/bind/xml3_client.go index c48d1af31..357bef674 100644 --- a/src/go/plugin/go.d/modules/bind/xml3_client.go +++ b/src/go/plugin/go.d/modules/bind/xml3_client.go @@ -3,11 +3,8 @@ package bind import ( - "encoding/xml" "fmt" "net/http" - "net/url" - "path" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -34,44 +31,27 @@ type xml3View struct { CounterGroups []xml3CounterGroup `xml:"counters"` } -func newXML3Client(client *http.Client, request web.Request) *xml3Client { +func newXML3Client(client *http.Client, request web.RequestConfig) *xml3Client { return &xml3Client{httpClient: client, request: request} } type xml3Client struct { httpClient *http.Client - request web.Request + request web.RequestConfig } func (c xml3Client) serverStats() (*serverStats, error) { - req := c.request.Copy() - u, err := url.Parse(req.URL) + req, err := web.NewHTTPRequestWithPath(c.request, "/server") if err != nil { - return nil, fmt.Errorf("error on parsing URL: %v", err) + return nil, fmt.Errorf("failed to create HTTP request: %v", err) } - u.Path = path.Join(u.Path, "/server") - req.URL = u.String() + var stats xml3Stats - httpReq, err := web.NewHTTPRequest(req) - if err != nil { - return nil, fmt.Errorf("error on creating HTTP request: %v", err) - } - - resp, err := c.httpClient.Do(httpReq) - if err != nil { - return nil, fmt.Errorf("error on request : %v", err) + if err := web.DoHTTP(c.httpClient).RequestXML(req, &stats); err != nil { + return nil, err } - defer closeBody(resp) - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s returned HTTP status %d", httpReq.URL, resp.StatusCode) - } - - stats := xml3Stats{} - if err = xml.NewDecoder(resp.Body).Decode(&stats); err != nil { - return nil, fmt.Errorf("error on decoding response from %s : %v", httpReq.URL, err) - } return convertXML(stats), nil } diff --git a/src/go/plugin/go.d/modules/boinc/README.md b/src/go/plugin/go.d/modules/boinc/README.md new file mode 120000 index 000000000..22c10ca17 --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/README.md @@ -0,0 +1 @@ +integrations/boinc.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/boinc/boinc.go b/src/go/plugin/go.d/modules/boinc/boinc.go new file mode 100644 index 000000000..139a0097a --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/boinc.go @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package boinc + +import ( + _ "embed" + "errors" + "time" + + "github.com/netdata/netdata/go/plugins/logger" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("boinc", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *Boinc { + return &Boinc{ + Config: Config{ + Address: "127.0.0.1:31416", + Timeout: confopt.Duration(time.Second * 1), + }, + newConn: newBoincConn, + charts: charts.Copy(), + } +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout" json:"timeout"` + Password string `yaml:"password" json:"password"` +} + +type Boinc struct { + module.Base + Config `yaml:",inline" json:""` + + charts *module.Charts + + newConn func(Config, *logger.Logger) boincConn + conn boincConn +} + +func (b *Boinc) Configuration() any { + return b.Config +} + +func (b *Boinc) Init() error { + if b.Address == "" { + return errors.New("config: 'address' not set") + } + + return nil +} + +func (b *Boinc) Check() error { + mx, err := b.collect() + if err != nil { + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil +} + +func (b *Boinc) Charts() *module.Charts { + return b.charts +} + +func (b *Boinc) Collect() map[string]int64 { + mx, err := b.collect() + if err != nil { + b.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} + +func (b *Boinc) Cleanup() { + if b.conn != nil { + b.conn.disconnect() + b.conn = nil + } +} diff --git a/src/go/plugin/go.d/modules/boinc/boinc_test.go b/src/go/plugin/go.d/modules/boinc/boinc_test.go new file mode 100644 index 000000000..83ccf307d --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/boinc_test.go @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package boinc + +import ( + "encoding/xml" + "errors" + "os" + "testing" + + "github.com/netdata/netdata/go/plugins/logger" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") + + dataGetResults, _ = os.ReadFile("testdata/get_results.xml") + dataGetResultsNoTasks, _ = os.ReadFile("testdata/get_results_no_tasks.xml") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + "dataGetResults": dataGetResults, + "dataGetResultsNoTasks": dataGetResultsNoTasks, + } { + require.NotNil(t, data, name) + } +} + +func TestBoinc_ConfigurationSerialize(t *testing.T) { + module.TestConfigurationSerialize(t, &Boinc{}, dataConfigJSON, dataConfigYAML) +} + +func TestBoinc_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success with default config": { + wantFail: false, + config: New().Config, + }, + "fails if address not set": { + wantFail: true, + config: func() Config { + conf := New().Config + conf.Address = "" + return conf + }(), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + boinc := New() + boinc.Config = test.config + + if test.wantFail { + assert.Error(t, boinc.Init()) + } else { + assert.NoError(t, boinc.Init()) + } + }) + } +} + +func TestBoinc_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func() *Boinc + }{ + "not initialized": { + prepare: func() *Boinc { + return New() + }, + }, + "after check": { + prepare: func() *Boinc { + boinc := New() + boinc.newConn = func(Config, *logger.Logger) boincConn { return prepareMockOk() } + _ = boinc.Check() + return boinc + }, + }, + "after collect": { + prepare: func() *Boinc { + boinc := New() + boinc.newConn = func(Config, *logger.Logger) boincConn { return prepareMockOk() } + _ = boinc.Collect() + return boinc + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + boinc := test.prepare() + + assert.NotPanics(t, boinc.Cleanup) + }) + } +} + +func TestBoinc_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestBoinc_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func() *mockBoincConn + wantFail bool + }{ + "success case": { + wantFail: false, + prepareMock: prepareMockOk, + }, + "err on connect": { + wantFail: true, + prepareMock: prepareMockErrOnConnect, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + boinc := New() + mock := test.prepareMock() + boinc.newConn = func(Config, *logger.Logger) boincConn { return mock } + + if test.wantFail { + assert.Error(t, boinc.Check()) + } else { + assert.NoError(t, boinc.Check()) + } + }) + } +} + +func TestBoinc_Collect(t *testing.T) { + tests := map[string]struct { + prepareMock func() *mockBoincConn + wantMetrics map[string]int64 + disconnectBeforeCleanup bool + disconnectAfterCleanup bool + }{ + "success case with tasks": { + prepareMock: prepareMockOk, + disconnectBeforeCleanup: false, + disconnectAfterCleanup: true, + wantMetrics: map[string]int64{ + "abort_pending": 0, + "aborted": 0, + "active": 14, + "compute_error": 0, + "copy_pending": 0, + "executing": 11, + "files_downloaded": 110, + "files_downloading": 0, + "files_uploaded": 8, + "files_uploading": 0, + "new": 0, + "preempted": 3, + "quit_pending": 0, + "scheduled": 11, + "suspended": 3, + "total": 118, + "uninitialized": 0, + "upload_failed": 0, + }, + }, + "success case no tasks": { + prepareMock: prepareMockOkNoTasks, + disconnectBeforeCleanup: false, + disconnectAfterCleanup: true, + wantMetrics: map[string]int64{ + "abort_pending": 0, + "aborted": 0, + "active": 0, + "compute_error": 0, + "copy_pending": 0, + "executing": 0, + "files_downloaded": 0, + "files_downloading": 0, + "files_uploaded": 0, + "files_uploading": 0, + "new": 0, + "preempted": 0, + "quit_pending": 0, + "scheduled": 0, + "suspended": 0, + "total": 0, + "uninitialized": 0, + "upload_failed": 0, + }, + }, + "err on connect": { + prepareMock: prepareMockErrOnConnect, + disconnectBeforeCleanup: false, + disconnectAfterCleanup: false, + }, + "err on get results": { + prepareMock: prepareMockErrOnGetResults, + disconnectBeforeCleanup: true, + disconnectAfterCleanup: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + boinc := New() + mock := test.prepareMock() + boinc.newConn = func(Config, *logger.Logger) boincConn { return mock } + + mx := boinc.Collect() + + require.Equal(t, test.wantMetrics, mx) + + if len(test.wantMetrics) > 0 { + module.TestMetricsHasAllChartsDims(t, boinc.Charts(), mx) + } + + assert.Equal(t, test.disconnectBeforeCleanup, mock.disconnectCalled, "disconnect before cleanup") + boinc.Cleanup() + assert.Equal(t, test.disconnectAfterCleanup, mock.disconnectCalled, "disconnect after cleanup") + }) + } +} + +func prepareMockOk() *mockBoincConn { + return &mockBoincConn{ + getResultsResp: dataGetResults, + } +} + +func prepareMockOkNoTasks() *mockBoincConn { + return &mockBoincConn{ + getResultsResp: dataGetResultsNoTasks, + } +} + +func prepareMockErrOnConnect() *mockBoincConn { + return &mockBoincConn{ + errOnConnect: true, + } +} + +func prepareMockErrOnGetResults() *mockBoincConn { + return &mockBoincConn{ + errOnGetResults: true, + } +} + +type mockBoincConn struct { + errOnConnect bool + + errOnGetResults bool + getResultsResp []byte + + authCalled bool + disconnectCalled bool +} + +func (m *mockBoincConn) connect() error { + if m.errOnConnect { + return errors.New("mock.connect() error") + } + return nil +} + +func (m *mockBoincConn) disconnect() { + m.disconnectCalled = true +} + +func (m *mockBoincConn) authenticate() error { + m.authCalled = true + return nil +} + +func (m *mockBoincConn) getResults() ([]boincReplyResult, error) { + if m.errOnGetResults { + return nil, errors.New("mock.getResults() error") + } + + var resp boincReply + if err := xml.Unmarshal(m.getResultsResp, &resp); err != nil { + return nil, err + } + + return resp.Results, nil +} diff --git a/src/go/plugin/go.d/modules/boinc/charts.go b/src/go/plugin/go.d/modules/boinc/charts.go new file mode 100644 index 000000000..2af5f4bef --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/charts.go @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package boinc + +import ( + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +const ( + prioTasks = module.Priority + iota + prioTasksState + prioActiveTasksState + prioActiveTasksSchedulerState +) + +var charts = module.Charts{ + tasksChart.Copy(), + tasksStateChart.Copy(), + activeTasksStateChart.Copy(), + activeTasksSchedulerStateChart.Copy(), +} +var ( + tasksChart = module.Chart{ + ID: "tasks", + Title: "Overall Tasks", + Units: "tasks", + Fam: "tasks", + Ctx: "boinc.tasks", + Priority: prioTasks, + Dims: module.Dims{ + {ID: "total"}, + {ID: "active"}, + }, + } + tasksStateChart = module.Chart{ + ID: "tasks_state", + Title: "Tasks per State", + Units: "tasks", + Fam: "tasks", + Ctx: "boinc.tasks_per_state", + Priority: prioTasksState, + Dims: module.Dims{ + {ID: "new"}, + {ID: "files_downloading", Name: "downloading"}, + {ID: "files_downloaded", Name: "downloaded"}, + {ID: "compute_error"}, + {ID: "files_uploading", Name: "uploading"}, + {ID: "files_uploaded", Name: "uploaded"}, + {ID: "aborted"}, + {ID: "upload_failed"}, + }, + } + activeTasksStateChart = module.Chart{ + ID: "active_tasks_state", + Title: "Active Tasks per State", + Units: "tasks", + Fam: "tasks", + Ctx: "boinc.active_tasks_per_state", + Priority: prioActiveTasksState, + Dims: module.Dims{ + {ID: "uninitialized"}, + {ID: "executing"}, + {ID: "abort_pending"}, + {ID: "quit_pending"}, + {ID: "suspended"}, + {ID: "copy_pending"}, + }, + } + activeTasksSchedulerStateChart = module.Chart{ + ID: "active_tasks_scheduler_state", + Title: "Active Tasks per Scheduler State", + Units: "tasks", + Fam: "tasks", + Ctx: "boinc.active_tasks_per_scheduler_state", + Priority: prioActiveTasksSchedulerState, + Dims: module.Dims{ + {ID: "uninitialized"}, + {ID: "preempted"}, + {ID: "scheduled"}, + }, + } +) diff --git a/src/go/plugin/go.d/modules/boinc/client.go b/src/go/plugin/go.d/modules/boinc/client.go new file mode 100644 index 000000000..7635330a2 --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/client.go @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package boinc + +import ( + "bytes" + "crypto/md5" + "encoding/xml" + "errors" + "fmt" + "log/slog" + "strings" + + "github.com/netdata/netdata/go/plugins/logger" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket" +) + +// Based on: https://github.com/vorot93/boinc-client-rest-server/tree/master + +type boincConn interface { + connect() error + disconnect() + authenticate() error + getResults() ([]boincReplyResult, error) +} + +func newBoincConn(conf Config, log *logger.Logger) boincConn { + return &boincClient{ + Logger: log, + + password: conf.Password, + conn: socket.New(socket.Config{ + Address: conf.Address, + Timeout: conf.Timeout.Duration(), + })} +} + +type boincClient struct { + *logger.Logger + password string + conn socket.Client +} + +func (c *boincClient) connect() error { + return c.conn.Connect() +} + +func (c *boincClient) disconnect() { + _ = c.conn.Disconnect() +} + +func (c *boincClient) authenticate() error { + // https://boinc.berkeley.edu/trac/wiki/GuiRpcProtocol#Authentication + + req := &boincRequest{ + Auth1: &struct{}{}, + } + + resp, err := c.send(req) + if err != nil { + return err + } + if resp.Nonce == nil { + return errors.New("auth1: empty nonce") + } + + req = &boincRequest{ + Auth2: &boincRequestAuthNonce{Hash: makeNonceMD5(*resp.Nonce, c.password)}, + } + + resp, err = c.send(req) + if err != nil { + return err + } + if resp.Unauthorized != nil || resp.Authorized == nil { + return errors.New("auth2: unauthorized") + } + + return nil +} + +func (c *boincClient) getResults() ([]boincReplyResult, error) { + req := &boincRequest{ + GetResults: &boincRequestGetResults{}, + } + + resp, err := c.send(req) + if err != nil { + return nil, err + } + + return resp.Results, nil +} + +func (c *boincClient) send(req *boincRequest) (*boincReply, error) { + reqData, err := xml.Marshal(req) + if err != nil { + return nil, fmt.Errorf("failed to marshal request: %v", err) + } + + reqData = append(reqData, 3) + + if logger.Level.Enabled(slog.LevelDebug) { + c.Debugf("sending request: %s", string(reqData)) + } + + const ( + respStart = "" + respEnd = "" + ) + + var b bytes.Buffer + + clientErr := c.conn.Command(string(reqData), func(bs []byte) bool { + s := strings.TrimSpace(string(bs)) + if s == "" { + return true + } + + if b.Len() == 0 && s != respStart { + err = fmt.Errorf("unexpected response first line: %s", s) + return false + } + + b.WriteString(s) + + return s != respEnd + }) + if clientErr != nil { + return nil, fmt.Errorf("failed to send command: %v", clientErr) + } + if err != nil { + return nil, fmt.Errorf("failed to send command: %v", err) + } + + if logger.Level.Enabled(slog.LevelDebug) { + c.Debugf("received response: %s", string(b.Bytes())) + } + + respData := cleanReplyData(b.Bytes()) + + var resp boincReply + + if err := xml.Unmarshal(respData, &resp); err != nil { + return nil, fmt.Errorf("failed to unmarshal reply: %v", err) + } + + if resp.Error != nil { + return nil, fmt.Errorf("received error from server: %s", *resp.Error) + } + if resp.BadRequest != nil { + return nil, errors.New("received bad request response from server") + } + if resp.Unauthorized != nil { + return nil, errors.New("received unauthorized response from server") + } + + return &resp, nil +} + +func cleanReplyData(resp []byte) []byte { + tags := []string{"bad_request", "authorized", "unauthorized", "have_credentials", "cookie_required"} + s := expandEmptyTags(string(resp), tags) + return []byte(strings.ReplaceAll(s, `encoding="ISO-8859-1"`, `encoding="UTF-8"`)) +} + +func makeNonceMD5(nonce, pass string) string { + hex := fmt.Sprintf("%x", md5.Sum([]byte(nonce+pass))) + return hex +} + +func expandEmptyTags(xmlString string, tags []string) string { + for _, tag := range tags { + emptyTag := fmt.Sprintf("<%s/>", tag) + expandedTag := fmt.Sprintf("<%s>", tag, tag) + xmlString = strings.ReplaceAll(xmlString, emptyTag, expandedTag) + xmlString = strings.ReplaceAll(xmlString, fmt.Sprintf("<%s />", tag), expandedTag) + } + return xmlString +} diff --git a/src/go/plugin/go.d/modules/boinc/client_proto.go b/src/go/plugin/go.d/modules/boinc/client_proto.go new file mode 100644 index 000000000..bdcdbf19c --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/client_proto.go @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package boinc + +import ( + "encoding/xml" +) + +// https://boinc.berkeley.edu/trac/wiki/GuiRpcProtocol + +type boincRequest struct { + XMLName xml.Name `xml:"boinc_gui_rpc_request"` + Auth1 *struct{} `xml:"auth1"` + Auth2 *boincRequestAuthNonce `xml:"auth2"` + GetResults *boincRequestGetResults `xml:"get_results"` +} + +type ( + boincRequestAuthNonce struct { + Hash string `xml:"nonce_hash"` + } + boincRequestGetResults struct { + ActiveOnly int `xml:"active_only"` + } +) + +type boincReply struct { + XMLName xml.Name `xml:"boinc_gui_rpc_reply"` + Error *string `xml:"error"` + BadRequest *struct{} `xml:"bad_request"` + Authorized *struct{} `xml:"authorized"` + Unauthorized *struct{} `xml:"unauthorized"` + Nonce *string `xml:"nonce"` + Results []boincReplyResult `xml:"results>result"` +} + +type ( + boincReplyResult struct { + State int `xml:"state"` + ActiveTask *boincReplyResultActiveTask `xml:"active_task"` + } + boincReplyResultActiveTask struct { + ActiveTaskState int `xml:"active_task_state"` + SchedulerState int `xml:"scheduler_state"` + } +) + +func (r *boincReplyResult) state() string { + if v, ok := resultStateMap[r.State]; ok { + return v + } + return "unknown" +} + +func (r *boincReplyResult) activeTaskState() string { + if r.ActiveTask == nil { + return "no_active_task" + } + if v, ok := activeTaskStateMap[r.ActiveTask.ActiveTaskState]; ok { + return v + } + return "unknown" +} + +func (r *boincReplyResult) schedulerState() string { + if r.ActiveTask == nil { + return "no_scheduler" + } + if v, ok := schedulerStateMap[r.ActiveTask.SchedulerState]; ok { + return v + } + return "unknown" +} + +var resultStateMap = map[int]string{ + // https://github.com/BOINC/boinc/blob/a3b79635d87423c972125efa318e4e880ad698dd/html/inc/common_defs.inc#L75 + 0: "new", + 1: "files_downloading", + 2: "files_downloaded", + 3: "compute_error", + 4: "files_uploading", + 5: "files_uploaded", + 6: "aborted", + 7: "upload_failed", +} + +var activeTaskStateMap = map[int]string{ + // https://github.com/BOINC/boinc/blob/a3b79635d87423c972125efa318e4e880ad698dd/lib/common_defs.h#L227 + 0: "uninitialized", + 1: "executing", + //2: "exited", + //3: "was_signaled", + //4: "exit_unknown", + 5: "abort_pending", + //6: "aborted", + //7: "couldnt_start", + 8: "quit_pending", + 9: "suspended", + 10: "copy_pending", +} + +var schedulerStateMap = map[int]string{ + // https://github.com/BOINC/boinc/blob/a3b79635d87423c972125efa318e4e880ad698dd/lib/common_defs.h#L56 + 0: "uninitialized", + 1: "preempted", + 2: "scheduled", +} diff --git a/src/go/plugin/go.d/modules/boinc/collect.go b/src/go/plugin/go.d/modules/boinc/collect.go new file mode 100644 index 000000000..8de25274d --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/collect.go @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package boinc + +import ( + "fmt" + "net" +) + +func (b *Boinc) collect() (map[string]int64, error) { + if b.conn == nil { + conn, err := b.establishConn() + if err != nil { + return nil, err + } + b.conn = conn + } + + results, err := b.conn.getResults() + if err != nil { + b.Cleanup() + return nil, err + } + + mx := make(map[string]int64) + + if err := b.collectResults(mx, results); err != nil { + return nil, err + } + + return mx, nil +} + +func (b *Boinc) collectResults(mx map[string]int64, results []boincReplyResult) error { + mx["total"] = int64(len(results)) + mx["active"] = 0 + + for _, v := range resultStateMap { + mx[v] = 0 + } + for _, v := range activeTaskStateMap { + mx[v] = 0 + } + for _, v := range schedulerStateMap { + mx[v] = 0 + } + + for _, r := range results { + mx[r.state()]++ + if r.ActiveTask != nil { + mx["active"]++ + mx[r.activeTaskState()]++ + mx[r.schedulerState()]++ + } + } + + return nil +} + +func (b *Boinc) establishConn() (boincConn, error) { + conn := b.newConn(b.Config, b.Logger) + + if err := conn.connect(); err != nil { + return nil, fmt.Errorf("failed to establish connection: %w", err) + } + + if host, _, err := net.SplitHostPort(b.Address); err == nil { + // for the commands we use, authentication is only required for remote connections + ip := net.ParseIP(host) + if host == "localhost" || (ip != nil && ip.IsLoopback()) { + return conn, nil + } + } + + if err := conn.authenticate(); err != nil { + return nil, fmt.Errorf("failed to authenticate: %w", err) + } + + return conn, nil +} diff --git a/src/go/plugin/go.d/modules/boinc/config_schema.json b/src/go/plugin/go.d/modules/boinc/config_schema.json new file mode 100644 index 000000000..9a9d727cb --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/config_schema.json @@ -0,0 +1,52 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "BOINC collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "address": { + "title": "Address", + "description": "The IP address and port where the BOINC client listens for connections.", + "type": "string", + "default": "127.0.0.1:31416" + }, + "timeout": { + "title": "Timeout", + "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "password": { + "title": "Password", + "description": "The GUI RPC password for authentication.", + "type": "string", + "sensitive": true + } + }, + "required": [ + "address" + ], + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + }, + "password": { + "ui:widget": "password" + } + } +} diff --git a/src/go/plugin/go.d/modules/boinc/integrations/boinc.md b/src/go/plugin/go.d/modules/boinc/integrations/boinc.md new file mode 100644 index 000000000..7dd91fc08 --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/integrations/boinc.md @@ -0,0 +1,229 @@ + + +# BOINC + + + + + +Plugin: go.d.plugin +Module: boinc + + + +## Overview + +This collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client. + + +It communicates with BOING using [GIU RPC Protocol](https://boinc.berkeley.edu/trac/wiki/GuiRpcProtocol). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects BOINC client instances running on localhost that are listening on port 31416. +On startup, it tries to collect metrics from: + +- http://127.0.0.1:31416 + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per BOINC instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| boinc.tasks | total, active | tasks | +| boinc.tasks_per_state | new, downloading, downloaded, compute_error, uploading, uploaded, aborted, upload_failed | tasks | +| boinc.active_tasks_per_state | uninitialized, executing, abort_pending, quit_pending, suspended, copy_pending | tasks | +| boinc.active_tasks_per_scheduler_state | uninitialized, preempted, scheduled | tasks | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes | +| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes | +| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks_state | average number of compute errors over the last 10 minutes | +| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks_state | average number of failed uploads over the last 10 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/boinc.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/boinc.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | The IP address and port where the BOINC client listens for connections. | 127.0.0.1:31416 | yes | +| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no | +| password | The GUI RPC password for authentication. | | no | + +
    + +#### Examples + +##### Basic + +A basic example configuration. + +
    Config + +```yaml +jobs: + - name: local + address: 127.0.0.1:31416 + +``` +
    + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
    Config + +```yaml +jobs: + - name: local + address: 127.0.0.1:31416 + + - name: remote + address: 203.0.113.0:31416 + password: somePassword + +``` +
    + + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `boinc` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m boinc + ``` + +### Getting Logs + +If you're encountering problems with the `boinc` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep boinc +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep boinc /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep boinc +``` + + diff --git a/src/go/plugin/go.d/modules/boinc/metadata.yaml b/src/go/plugin/go.d/modules/boinc/metadata.yaml new file mode 100644 index 000000000..0696ac3bc --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/metadata.yaml @@ -0,0 +1,171 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-boinc + plugin_name: go.d.plugin + module_name: boinc + monitored_instance: + name: BOINC + link: https://boinc.berkeley.edu/ + categories: + - data-collection.database-servers + icon_filename: "bolt.svg" + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - boinc + - distributed + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client. + method_description: | + It communicates with BOING using [GIU RPC Protocol](https://boinc.berkeley.edu/trac/wiki/GuiRpcProtocol). + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + By default, it detects BOINC client instances running on localhost that are listening on port 31416. + On startup, it tries to collect metrics from: + + - http://127.0.0.1:31416 + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/boinc.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: The IP address and port where the BOINC client listens for connections. + default_value: 127.0.0.1:31416 + required: true + - name: timeout + description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution. + default_value: 1 + required: false + - name: password + description: The GUI RPC password for authentication. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + address: 127.0.0.1:31416 + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + address: 127.0.0.1:31416 + + - name: remote + address: 203.0.113.0:31416 + password: somePassword + troubleshooting: + problems: + list: [] + alerts: + - name: boinc_total_tasks + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf + metric: boinc.tasks + info: average number of total tasks over the last 10 minutes + - name: boinc_active_tasks + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf + metric: boinc.tasks + info: average number of active tasks over the last 10 minutes + - name: boinc_compute_errors + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf + metric: boinc.tasks_state + info: average number of compute errors over the last 10 minutes + - name: boinc_upload_errors + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf + metric: boinc.tasks_state + info: average number of failed uploads over the last 10 minutes + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: "These metrics refer to the entire monitored application." + labels: [] + metrics: + - name: boinc.tasks + description: Overall Tasks + unit: tasks + chart_type: line + dimensions: + - name: total + - name: active + - name: boinc.tasks_per_state + description: Tasks per State + unit: tasks + chart_type: line + dimensions: + - name: new + - name: downloading + - name: downloaded + - name: compute_error + - name: uploading + - name: uploaded + - name: aborted + - name: upload_failed + - name: boinc.active_tasks_per_state + description: Active Tasks per State + unit: tasks + chart_type: line + dimensions: + - name: uninitialized + - name: executing + - name: abort_pending + - name: quit_pending + - name: suspended + - name: copy_pending + - name: boinc.active_tasks_per_scheduler_state + description: Active Tasks per Scheduler State + unit: tasks + chart_type: line + dimensions: + - name: uninitialized + - name: preempted + - name: scheduled diff --git a/src/go/plugin/go.d/modules/boinc/testdata/config.json b/src/go/plugin/go.d/modules/boinc/testdata/config.json new file mode 100644 index 000000000..76769305c --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/testdata/config.json @@ -0,0 +1,6 @@ +{ + "update_every": 123, + "address": "ok", + "timeout": 123.123, + "password": "ok" +} diff --git a/src/go/plugin/go.d/modules/boinc/testdata/config.yaml b/src/go/plugin/go.d/modules/boinc/testdata/config.yaml new file mode 100644 index 000000000..95ba970ba --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/testdata/config.yaml @@ -0,0 +1,4 @@ +update_every: 123 +address: "ok" +timeout: 123.123 +password: "ok" diff --git a/src/go/plugin/go.d/modules/boinc/testdata/get_results.xml b/src/go/plugin/go.d/modules/boinc/testdata/get_results.xml new file mode 100644 index 000000000..257cb3808 --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/testdata/get_results.xml @@ -0,0 +1,2090 @@ + + + + de_nbody_08_14_2024_v186_pal5__data__31_1722509702_1402808_1 + de_nbody_08_14_2024_v186_pal5__data__31_1722509702_1402808 + x86_64-pc-linux-gnu + 187 + mt + https://milkyway.cs.rpi.edu/milkyway/ + 5864.012000 + 490.057107 + 0 + 5 + 1725395660.000000 + 1724358860.746429 + 0.000000 + + 1724410236.238138 + 16 CPUs + + + de_nbody_08_16_2024_v186_pal5__data__32_1722509702_1722514_1 + de_nbody_08_16_2024_v186_pal5__data__32_1722509702_1722514 + x86_64-pc-linux-gnu + 187 + mt + https://milkyway.cs.rpi.edu/milkyway/ + 4914.036000 + 431.088796 + 0 + 5 + 1725395660.000000 + 1724358860.746429 + 0.000000 + + 1724410809.823943 + 16 CPUs + + + de_nbody_08_16_2024_v186_pal5__data__33_1722509702_1745544_0 + de_nbody_08_16_2024_v186_pal5__data__33_1722509702_1745544 + x86_64-pc-linux-gnu + 187 + mt + https://milkyway.cs.rpi.edu/milkyway/ + 4691.749000 + 409.659022 + 0 + 5 + 1725395659.000000 + 1724358860.746429 + 0.000000 + + 1724411260.714536 + 16 CPUs + + + de_nbody_08_22_2024_v186_pal5__data__35_1722509702_1745537_0 + de_nbody_08_22_2024_v186_pal5__data__35_1722509702_1745537 + x86_64-pc-linux-gnu + 187 + mt + https://milkyway.cs.rpi.edu/milkyway/ + 2972.012000 + 268.757779 + 0 + 5 + 1725395660.000000 + 1724358860.746429 + 0.000000 + + 1724411550.619217 + 16 CPUs + + + de_nbody_08_16_2024_v186_pal5__data__33_1722509702_1745546_0 + de_nbody_08_16_2024_v186_pal5__data__33_1722509702_1745546 + x86_64-pc-linux-gnu + 187 + mt + https://milkyway.cs.rpi.edu/milkyway/ + 0.000000 + 0.000000 + 0 + 2 + 1725395659.000000 + 1724358860.746429 + 647.619023 + + 1 + 187 + 2 + 24925 + 2 + 0.000000 + 0.349152 + 1860.163000 + 180.004807 + 154140672.000000 + 19636224.000000 + 19635277.937500 + 0.000000 + 0.000000 + 0.000000 + 0.002442 + + 16 CPUs + + + de_nbody_08_16_2024_v186_pal5__data__33_1722509702_1745541_0 + de_nbody_08_16_2024_v186_pal5__data__33_1722509702_1745541 + x86_64-pc-linux-gnu + 187 + mt + https://milkyway.cs.rpi.edu/milkyway/ + 0.000000 + 0.000000 + 0 + 2 + 1725395659.000000 + 1724358860.746429 + 910.108536 + 16 CPUs + + + de_nbody_08_22_2024_v186_pal5__data__34_1722509702_1744291_1 + de_nbody_08_22_2024_v186_pal5__data__34_1722509702_1744291 + x86_64-pc-linux-gnu + 187 + mt + https://milkyway.cs.rpi.edu/milkyway/ + 0.000000 + 0.000000 + 0 + 2 + 1725395659.000000 + 1724358860.746429 + 1545.811780 + 16 CPUs + + + ps_240812_input_38021_39_0 + ps_240812_input_38021_39 + x86_64-pc-linux-gnu + 10218 + opencl_101_amd_linux + https://asteroidsathome.net/boinc/ + 8.984530 + 1494.941208 + 0 + 5 + 1725266485.000000 + 1724359286.328312 + 0.000000 + + 1724411439.883245 + 0.1 CPUs + 1 AMD/ATI GPU + + + ps_240812_input_38023_112_1 + ps_240812_input_38023_112 + x86_64-pc-linux-gnu + 10218 + opencl_101_amd_linux + https://asteroidsathome.net/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1725266485.000000 + 1724359286.328312 + 1450.222860 + + 1 + 10218 + 0 + 24770 + 2 + 0.000000 + 0.000507 + 4.532240 + 296.817903 + 7183269888.000000 + 1396408320.000000 + 1394201813.312912 + 0.000000 + 0.000000 + 0.000000 + 0.000001 + + 0.1 CPUs + 1 AMD/ATI GPU + + + ps_240812_input_38023_120_1 + ps_240812_input_38023_120 + x86_64-pc-linux-gnu + 10218 + opencl_101_amd_linux + https://asteroidsathome.net/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1725266485.000000 + 1724359286.328312 + 1450.959116 + 0.1 CPUs + 1 AMD/ATI GPU + + + ps_240812_input_38023_108_1 + ps_240812_input_38023_108 + x86_64-pc-linux-gnu + 10218 + opencl_101_amd_linux + https://asteroidsathome.net/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1725266485.000000 + 1724359286.328312 + 1450.959116 + 0.1 CPUs + 1 AMD/ATI GPU + + + ps_240812_input_38022_10_0 + ps_240812_input_38022_10 + x86_64-pc-linux-gnu + 10218 + opencl_101_amd_linux + https://asteroidsathome.net/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1725266485.000000 + 1724359286.328312 + 1450.959116 + 0.1 CPUs + 1 AMD/ATI GPU + + + ps_240812_input_38022_37_0 + ps_240812_input_38022_37 + x86_64-pc-linux-gnu + 10218 + opencl_101_amd_linux + https://asteroidsathome.net/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1725266485.000000 + 1724359286.328312 + 1450.959116 + 0.1 CPUs + 1 AMD/ATI GPU + + + ps_240812_input_38023_105_1 + ps_240812_input_38023_105 + x86_64-pc-linux-gnu + 10218 + opencl_101_amd_linux + https://asteroidsathome.net/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1725266485.000000 + 1724359286.328312 + 1450.959116 + 0.1 CPUs + 1 AMD/ATI GPU + + + ps_240812_input_38021_16_0 + ps_240812_input_38021_16 + x86_64-pc-linux-gnu + 10218 + opencl_101_amd_linux + https://asteroidsathome.net/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1725266485.000000 + 1724359286.328312 + 1450.959116 + 0.1 CPUs + 1 AMD/ATI GPU + + + ps_240812_input_38023_117_0 + ps_240812_input_38023_117 + x86_64-pc-linux-gnu + 10218 + opencl_101_amd_linux + https://asteroidsathome.net/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1725266485.000000 + 1724359286.328312 + 1450.959116 + 0.1 CPUs + 1 AMD/ATI GPU + + + RNM_zeta6_central_0_3_-12_26_0 + RNM_zeta6_central_0_3_-12_26 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724830273.000000 + 1724398275.411155 + 824.209337 + + 9 + 111 + 12 + 22281 + 1 + 0.000000 + 0.569222 + 1081.370000 + 1089.096061 + 1145729024.000000 + 99713024.000000 + 99713024.000000 + 0.000000 + 0.000000 + 0.000000 + 0.000523 + + + + RNM_zeta6_central_0_3_-14_96_1 + RNM_zeta6_central_0_3_-14_96 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724830273.000000 + 1724398275.411155 + 829.389925 + + 9 + 111 + 16 + 22406 + 1 + 0.000000 + 0.564452 + 1069.810000 + 1074.853175 + 1145720832.000000 + 99414016.000000 + 99414016.000000 + 0.000000 + 0.000000 + 0.000000 + 0.000525 + + + + RNM_zeta6_central_0_3_-25_-27_1 + RNM_zeta6_central_0_3_-25_-27 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724830273.000000 + 1724398275.411155 + 1248.369952 + + 9 + 111 + 10 + 23232 + 1 + 0.000000 + 0.067766 + 90.900000 + 90.746972 + 1145708544.000000 + 100388864.000000 + 100388864.000000 + 0.000000 + 0.000000 + 0.000000 + 0.000747 + + + + RNM_zeta6_central_0_3_-24_-8_1 + RNM_zeta6_central_0_3_-24_-8 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724830273.000000 + 1724398275.411155 + 1293.212826 + + + RNM_zeta6_central_0_3_27_51_0 + RNM_zeta6_central_0_3_27_51 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724830273.000000 + 1724398275.411155 + 1293.212826 + + + RNM_zeta6_central_0_3_-13_-144_1 + RNM_zeta6_central_0_3_-13_-144 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724830273.000000 + 1724398275.411155 + 1293.212826 + + + RNM_zeta6_central_0_3_19_-102_0 + RNM_zeta6_central_0_3_19_-102 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724830273.000000 + 1724398275.411155 + 1293.212826 + + + RNM_zeta6_central_0_3_-21_116_0 + RNM_zeta6_central_0_3_-21_116 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724830273.000000 + 1724398275.411155 + 1293.212826 + + + RNM_zeta6_central_0_3_26_-143_0 + RNM_zeta6_central_0_3_26_-143 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724830273.000000 + 1724398275.411155 + 1293.212826 + + + RNM_zeta6_central_0_3_-20_64_0 + RNM_zeta6_central_0_3_-20_64 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724830273.000000 + 1724398275.411155 + 1293.212826 + + + MCM1_0223124_8550_0 + MCM1_0223124_8550 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 5119.133000 + 5313.731008 + 0 + 5 + 1724920136.000000 + 1724401737.328771 + 0.000000 + + 1724410727.349168 + + + MCM1_0223124_8504_1 + MCM1_0223124_8504 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 5069.315000 + 5268.942987 + 0 + 5 + 1724920136.000000 + 1724401737.328771 + 0.000000 + + 1724410701.088402 + + + MCM1_0223124_8582_0 + MCM1_0223124_8582 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 5102.360000 + 5300.827280 + 0 + 5 + 1724920136.000000 + 1724401737.328771 + 0.000000 + + 1724410965.820940 + + + RNM_zeta6_central_0_-12_43_48_0 + RNM_zeta6_central_0_-12_43_48 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_33_-59_1 + RNM_zeta6_central_0_-12_33_-59 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_37_104_0 + RNM_zeta6_central_0_-12_37_104 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_40_-149_1 + RNM_zeta6_central_0_-12_40_-149 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_-40_-95_0 + RNM_zeta6_central_0_-12_-40_-95 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_31_-44_1 + RNM_zeta6_central_0_-12_31_-44 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_-42_74_0 + RNM_zeta6_central_0_-12_-42_74 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_32_-9_0 + RNM_zeta6_central_0_-12_32_-9 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_27_98_0 + RNM_zeta6_central_0_-12_27_98 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_-40_40_0 + RNM_zeta6_central_0_-12_-40_40 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_43_-132_0 + RNM_zeta6_central_0_-12_43_-132 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_29_-102_1 + RNM_zeta6_central_0_-12_29_-102 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_32_-69_0 + RNM_zeta6_central_0_-12_32_-69 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_37_-5_1 + RNM_zeta6_central_0_-12_37_-5 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_-31_-83_0 + RNM_zeta6_central_0_-12_-31_-83 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_-41_-118_0 + RNM_zeta6_central_0_-12_-41_-118 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_-33_-41_0 + RNM_zeta6_central_0_-12_-33_-41 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_-40_-104_0 + RNM_zeta6_central_0_-12_-40_-104 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_-32_-15_0 + RNM_zeta6_central_0_-12_-32_-15 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_-39_146_0 + RNM_zeta6_central_0_-12_-39_146 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_36_140_1 + RNM_zeta6_central_0_-12_36_140 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_37_-101_0 + RNM_zeta6_central_0_-12_37_-101 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_35_-50_0 + RNM_zeta6_central_0_-12_35_-50 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_31_106_0 + RNM_zeta6_central_0_-12_31_106 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724837960.000000 + 1724405962.718375 + 1293.212826 + + + RNM_zeta6_central_0_-12_39_-23_1 + RNM_zeta6_central_0_-12_39_-23 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_28_-91_0 + RNM_zeta6_central_0_-12_28_-91 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_33_-114_1 + RNM_zeta6_central_0_-12_33_-114 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839665.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_27_-133_0 + RNM_zeta6_central_0_-12_27_-133 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_-33_148_0 + RNM_zeta6_central_0_-12_-33_148 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_43_-95_0 + RNM_zeta6_central_0_-12_43_-95 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_-31_100_0 + RNM_zeta6_central_0_-12_-31_100 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839665.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_-29_0_0 + RNM_zeta6_central_0_-12_-29_0 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839665.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_34_-69_1 + RNM_zeta6_central_0_-12_34_-69 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_27_5_0 + RNM_zeta6_central_0_-12_27_5 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_-41_-27_0 + RNM_zeta6_central_0_-12_-41_-27 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839665.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_-35_58_1 + RNM_zeta6_central_0_-12_-35_58 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_-36_82_1 + RNM_zeta6_central_0_-12_-36_82 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_27_-102_1 + RNM_zeta6_central_0_-12_27_-102 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839665.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_-38_0_0 + RNM_zeta6_central_0_-12_-38_0 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_27_1_1 + RNM_zeta6_central_0_-12_27_1 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_-40_62_1 + RNM_zeta6_central_0_-12_-40_62 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_42_48_1 + RNM_zeta6_central_0_-12_42_48 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839665.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_29_-82_1 + RNM_zeta6_central_0_-12_29_-82 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_-31_-125_1 + RNM_zeta6_central_0_-12_-31_-125 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839665.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_33_68_0 + RNM_zeta6_central_0_-12_33_68 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_38_30_0 + RNM_zeta6_central_0_-12_38_30 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_-36_37_0 + RNM_zeta6_central_0_-12_-36_37 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + RNM_zeta6_central_0_-12_-35_120_1 + RNM_zeta6_central_0_-12_-35_120 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724839664.000000 + 1724407666.808453 + 1293.212826 + + + data_collect_v4_1723564685_973653_0 + data_collect_v4_1723564685_973653 + x86_64-pc-linux-gnu + 432 + nci + https://wuprop.boinc-af.org/ + 0.000000 + 0.000000 + 0 + 2 + 1725013336.000000 + 1724408536.967980 + 356.010534 + + 1 + 432 + 1 + 22506 + 2 + 1.859445 + 0.885246 + 1.870363 + 2746.366930 + 4716589056.000000 + 52871168.000000 + 52871168.000000 + 0.000000 + 0.000000 + 0.000000 + 0.000317 + + + + MCM1_0223146_6613_0 + MCM1_0223146_6613 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724927431.000000 + 1724409031.996882 + 2979.343224 + + 1 + 761 + 3 + 23089 + 2 + 1804.811000 + 0.441489 + 1929.714000 + 1989.865694 + 80437248.000000 + 37511168.000000 + 37312866.961706 + 0.000000 + 0.000000 + 0.000000 + 0.000220 + + /var/lib/boinc/projects/www.worldcommunitygrid.org/wcgrid_mcm1_gfx_7.61_x86_64-pc-linux-gnu + + /var/lib/boinc/slots/3 + + + + MCM1_0223123_4936_0 + MCM1_0223123_4936 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724927431.000000 + 1724409031.996882 + 3036.944011 + + 1 + 761 + 8 + 23126 + 2 + 1804.810000 + 0.430692 + 1889.306000 + 1943.500127 + 80273408.000000 + 37036032.000000 + 36483747.223671 + 0.000000 + 0.000000 + 0.000000 + 0.000219 + + /var/lib/boinc/projects/www.worldcommunitygrid.org/wcgrid_mcm1_gfx_7.61_x86_64-pc-linux-gnu + + /var/lib/boinc/slots/8 + + + + MCM1_0223146_6617_0 + MCM1_0223146_6617 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724927431.000000 + 1724409031.996882 + 3036.092634 + + 1 + 761 + 9 + 23140 + 2 + 1802.867000 + 0.430851 + 1884.823000 + 1937.221575 + 80437248.000000 + 36397056.000000 + 36351190.337210 + 0.000000 + 0.000000 + 0.000000 + 0.000220 + + /var/lib/boinc/projects/www.worldcommunitygrid.org/wcgrid_mcm1_gfx_7.61_x86_64-pc-linux-gnu + + /var/lib/boinc/slots/9 + + + + MCM1_0223123_4939_0 + MCM1_0223123_4939 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724927431.000000 + 1724409031.996882 + 3050.418815 + + 1 + 761 + 5 + 23171 + 2 + 1804.495000 + 0.428165 + 1867.378000 + 1918.902911 + 80138240.000000 + 37175296.000000 + 37090482.473151 + 0.000000 + 0.000000 + 0.000000 + 0.000221 + + /var/lib/boinc/projects/www.worldcommunitygrid.org/wcgrid_mcm1_gfx_7.61_x86_64-pc-linux-gnu + + /var/lib/boinc/slots/5 + + + + MCM1_0223146_6616_0 + MCM1_0223146_6616 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724927431.000000 + 1724409031.996882 + 3072.033980 + + 1 + 761 + 6 + 23178 + 2 + 1803.626000 + 0.424113 + 1863.947000 + 1913.693516 + 80437248.000000 + 36859904.000000 + 36688394.175103 + 0.000000 + 0.000000 + 0.000000 + 0.000219 + + /var/lib/boinc/projects/www.worldcommunitygrid.org/wcgrid_mcm1_gfx_7.61_x86_64-pc-linux-gnu + + /var/lib/boinc/slots/6 + + + + MCM1_0223123_4943_0 + MCM1_0223123_4943 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724927431.000000 + 1724409031.996882 + 3109.372417 + + 1 + 761 + 4 + 23198 + 2 + 1804.213000 + 0.417114 + 1833.617000 + 1879.455350 + 80138240.000000 + 37842944.000000 + 37695777.228438 + 0.000000 + 0.000000 + 0.000000 + 0.000220 + + /var/lib/boinc/projects/www.worldcommunitygrid.org/wcgrid_mcm1_gfx_7.61_x86_64-pc-linux-gnu + + /var/lib/boinc/slots/4 + + + + RNM_zeta6_central_0_-12_-32_144_0 + RNM_zeta6_central_0_-12_-32_144 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_36_-98_1 + RNM_zeta6_central_0_-12_36_-98 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_-36_-74_1 + RNM_zeta6_central_0_-12_-36_-74 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_29_-23_0 + RNM_zeta6_central_0_-12_29_-23 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_34_72_1 + RNM_zeta6_central_0_-12_34_72 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_28_-61_1 + RNM_zeta6_central_0_-12_28_-61 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_41_-57_1 + RNM_zeta6_central_0_-12_41_-57 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_28_-124_1 + RNM_zeta6_central_0_-12_28_-124 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_-43_-96_0 + RNM_zeta6_central_0_-12_-43_-96 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_-39_-13_1 + RNM_zeta6_central_0_-12_-39_-13 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_38_16_0 + RNM_zeta6_central_0_-12_38_16 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_-37_-91_1 + RNM_zeta6_central_0_-12_-37_-91 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_30_85_0 + RNM_zeta6_central_0_-12_30_85 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_41_-102_0 + RNM_zeta6_central_0_-12_41_-102 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_34_83_1 + RNM_zeta6_central_0_-12_34_83 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_27_-113_1 + RNM_zeta6_central_0_-12_27_-113 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_-35_65_0 + RNM_zeta6_central_0_-12_-35_65 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_33_134_1 + RNM_zeta6_central_0_-12_33_134 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_-29_141_1 + RNM_zeta6_central_0_-12_-29_141 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_-32_-97_0 + RNM_zeta6_central_0_-12_-32_-97 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_43_65_0 + RNM_zeta6_central_0_-12_43_65 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_36_-93_1 + RNM_zeta6_central_0_-12_36_-93 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_-35_24_0 + RNM_zeta6_central_0_-12_-35_24 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + RNM_zeta6_central_0_-12_32_-143_1 + RNM_zeta6_central_0_-12_32_-143 + x86_64-pc-linux-gnu + 111 + + https://rnma.xyz/boinc/ + 0.000000 + 0.000000 + 0 + 2 + 1724842193.000000 + 1724410194.993412 + 1293.212826 + + + MCM1_0223146_8035_1 + MCM1_0223146_8035 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724928911.000000 + 1724410512.731151 + 4220.263869 + + 1 + 761 + 7 + 24133 + 2 + 603.406000 + 0.208865 + 905.882400 + 925.782978 + 80302080.000000 + 36491264.000000 + 36826641.749235 + 0.000000 + 0.000000 + 0.000000 + 0.000221 + + /var/lib/boinc/projects/www.worldcommunitygrid.org/wcgrid_mcm1_gfx_7.61_x86_64-pc-linux-gnu + + /var/lib/boinc/slots/7 + + + + MCM1_0223122_7935_0 + MCM1_0223122_7935 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724928912.000000 + 1724410512.731151 + 4463.616912 + + 1 + 761 + 11 + 24388 + 2 + 603.694900 + 0.163246 + 715.765100 + 728.616999 + 80138240.000000 + 36429824.000000 + 36382228.920719 + 0.000000 + 0.000000 + 0.000000 + 0.000218 + + /var/lib/boinc/projects/www.worldcommunitygrid.org/wcgrid_mcm1_gfx_7.61_x86_64-pc-linux-gnu + + /var/lib/boinc/slots/11 + + + + MCM1_0223146_8059_0 + MCM1_0223146_8059 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724928912.000000 + 1724410512.731151 + 5334.443471 + + + MCM1_0223122_7942_1 + MCM1_0223122_7942 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724928912.000000 + 1724410512.731151 + 5334.443471 + + + MCM1_0223121_5839_0 + MCM1_0223121_5839 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724928912.000000 + 1724410512.731151 + 5334.443471 + + + MCM1_0223121_5845_0 + MCM1_0223121_5845 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724928912.000000 + 1724410512.731151 + 5334.443471 + + + MCM1_0223122_7939_1 + MCM1_0223122_7939 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724928912.000000 + 1724410512.731151 + 5334.443471 + + + MCM1_0223121_5843_1 + MCM1_0223121_5843 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724928912.000000 + 1724410512.731151 + 5334.443471 + + + MCM1_0223122_7931_1 + MCM1_0223122_7931 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724928912.000000 + 1724410512.731151 + 5334.443471 + + + MCM1_0223122_7933_0 + MCM1_0223122_7933 + x86_64-pc-linux-gnu + 761 + + http://www.worldcommunitygrid.org/ + 0.000000 + 0.000000 + 0 + 2 + 1724928912.000000 + 1724410512.731151 + 5334.443471 + + + diff --git a/src/go/plugin/go.d/modules/boinc/testdata/get_results_no_tasks.xml b/src/go/plugin/go.d/modules/boinc/testdata/get_results_no_tasks.xml new file mode 100644 index 000000000..e8306be9e --- /dev/null +++ b/src/go/plugin/go.d/modules/boinc/testdata/get_results_no_tasks.xml @@ -0,0 +1,3 @@ + + + diff --git a/src/go/plugin/go.d/modules/cassandra/cassandra.go b/src/go/plugin/go.d/modules/cassandra/cassandra.go index 5352703df..d96f16413 100644 --- a/src/go/plugin/go.d/modules/cassandra/cassandra.go +++ b/src/go/plugin/go.d/modules/cassandra/cassandra.go @@ -5,9 +5,11 @@ package cassandra import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -29,12 +31,12 @@ func init() { func New() *Cassandra { return &Cassandra{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:7072/metrics", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 5), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 5), }, }, }, @@ -45,8 +47,8 @@ func New() *Cassandra { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Cassandra struct { @@ -68,14 +70,12 @@ func (c *Cassandra) Configuration() any { func (c *Cassandra) Init() error { if err := c.validateConfig(); err != nil { - c.Errorf("error on validating config: %v", err) - return err + return fmt.Errorf("error on validating config: %v", err) } prom, err := c.initPrometheusClient() if err != nil { - c.Errorf("error on init prometheus client: %v", err) - return err + return fmt.Errorf("error on init prometheus client: %v", err) } c.prom = prom @@ -85,7 +85,6 @@ func (c *Cassandra) Init() error { func (c *Cassandra) Check() error { mx, err := c.collect() if err != nil { - c.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/cassandra/cassandra_test.go b/src/go/plugin/go.d/modules/cassandra/cassandra_test.go index 0b6af9362..30f196985 100644 --- a/src/go/plugin/go.d/modules/cassandra/cassandra_test.go +++ b/src/go/plugin/go.d/modules/cassandra/cassandra_test.go @@ -47,7 +47,7 @@ func TestCassandra_Init(t *testing.T) { }{ "success if 'url' is set": { config: Config{ - HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:7072"}}}, + HTTPConfig: web.HTTPConfig{RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:7072"}}}, }, "success on default config": { wantFail: false, @@ -55,7 +55,7 @@ func TestCassandra_Init(t *testing.T) { }, "fails if 'url' is unset": { wantFail: true, - config: Config{HTTP: web.HTTP{Request: web.Request{URL: ""}}}, + config: Config{HTTPConfig: web.HTTPConfig{RequestConfig: web.RequestConfig{URL: ""}}}, }, } diff --git a/src/go/plugin/go.d/modules/cassandra/collect.go b/src/go/plugin/go.d/modules/cassandra/collect.go index 08cdfbe94..0abbd6bdb 100644 --- a/src/go/plugin/go.d/modules/cassandra/collect.go +++ b/src/go/plugin/go.d/modules/cassandra/collect.go @@ -4,8 +4,9 @@ package cassandra import ( "errors" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "strings" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" ) const ( diff --git a/src/go/plugin/go.d/modules/cassandra/config_schema.json b/src/go/plugin/go.d/modules/cassandra/config_schema.json index c4ca5f4f9..d3e93a120 100644 --- a/src/go/plugin/go.d/modules/cassandra/config_schema.json +++ b/src/go/plugin/go.d/modules/cassandra/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/cassandra/init.go b/src/go/plugin/go.d/modules/cassandra/init.go index 1a74fdf9b..bdb9fc8e8 100644 --- a/src/go/plugin/go.d/modules/cassandra/init.go +++ b/src/go/plugin/go.d/modules/cassandra/init.go @@ -17,9 +17,9 @@ func (c *Cassandra) validateConfig() error { } func (c *Cassandra) initPrometheusClient() (prometheus.Prometheus, error) { - client, err := web.NewHTTPClient(c.Client) + client, err := web.NewHTTPClient(c.ClientConfig) if err != nil { return nil, err } - return prometheus.New(client, c.Request), nil + return prometheus.New(client, c.RequestConfig), nil } diff --git a/src/go/plugin/go.d/modules/cassandra/integrations/cassandra.md b/src/go/plugin/go.d/modules/cassandra/integrations/cassandra.md index 61c4d1439..79bb042a7 100644 --- a/src/go/plugin/go.d/modules/cassandra/integrations/cassandra.md +++ b/src/go/plugin/go.d/modules/cassandra/integrations/cassandra.md @@ -151,8 +151,8 @@ To configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx The configuration file name for this integration is `go.d/cassandra.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/ceph/README.md b/src/go/plugin/go.d/modules/ceph/README.md new file mode 120000 index 000000000..654248b70 --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/README.md @@ -0,0 +1 @@ +integrations/ceph.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/ceph/api.go b/src/go/plugin/go.d/modules/ceph/api.go new file mode 100644 index 000000000..1a51f8ed2 --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/api.go @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ceph + +import ( + "net/url" +) + +// https://docs.ceph.com/en/reef/mgr/ceph_api/ + +const ( + urlPathApiAuth = "/api/auth" + urlPathApiAuthCheck = "/api/auth/check" + urlPathApiAuthLogout = "/api/auth/logout" + urlPathApiHealthMinimal = "/api/health/minimal" + urlPathApiMonitor = "/api/monitor" + urlPathApiOsd = "/api/osd" + urlPathApiPool = "/api/pool" +) + +var ( + urlQueryApiPool = url.Values{"stats": {"true"}}.Encode() +) + +const ( + hdrAcceptVersion = "application/vnd.ceph.api.v1.0+json" + hdrContentTypeJson = "application/json" +) + +type apiHealthMinimalResponse struct { + Health struct { + Status string `json:"status"` + } `json:"health"` + MonStatus struct { + MonMap struct { + Mons []any `json:"mons"` + } `json:"monmap"` + } `json:"mon_status"` + ScrubStatus string `json:"scrub_status"` + OsdMap struct { + Osds []struct { + In int64 `json:"in"` + Up int64 `json:"up"` + } `json:"osds"` + } `json:"osd_map"` + PgInfo struct { + ObjectStats struct { + NumObjects int64 `json:"num_objects"` + NumObjectsDegraded int64 `json:"num_objects_degraded"` + NumObjectsMisplaced int64 `json:"num_objects_misplaced"` + NumObjectsUnfound int64 `json:"num_objects_unfound"` + } `json:"object_stats"` + Statuses map[string]int64 `json:"statuses"` + PgsPerOsd float64 `json:"pgs_per_osd"` + } `json:"pg_info"` + Pools []any `json:"pools"` + MgrMap struct { + ActiveName string `json:"active_name"` + Standbys []struct { + Gid int `json:"gid"` + } `json:"standbys"` + } `json:"mgr_map"` + Df struct { + Stats struct { + TotalAvailBytes int64 `json:"total_avail_bytes"` + TotalBytes int64 `json:"total_bytes"` + TotalUsedRawBytes int64 `json:"total_used_raw_bytes"` + } `json:"stats"` + } `json:"df"` + ClientPerf struct { + ReadBytesSec float64 `json:"read_bytes_sec"` + ReadOpPerSec float64 `json:"read_op_per_sec"` + WriteBytesSec float64 `json:"write_bytes_sec"` + WriteOpPerSec float64 `json:"write_op_per_sec"` + RecoveringBytesPerSec float64 `json:"recovering_bytes_per_sec"` + } `json:"client_perf"` + Hosts int64 `json:"hosts"` + Rgw int64 `json:"rgw"` + IscsiDaemons struct { + Up int64 `json:"up"` + Down int64 `json:"down"` + } `json:"iscsi_daemons"` +} + +type apiOsdResponse struct { + UUID string `json:"uuid"` + ID int64 `json:"id"` + Up int64 `json:"up"` + In int64 `json:"in"` + OsdStats struct { + Statfs struct { + Total int64 `json:"total"` + Available int64 `json:"available"` + } `json:"statfs"` + PerfStat struct { + CommitLatencyMs float64 `json:"commit_latency_ms"` + ApplyLatencyMs float64 `json:"apply_latency_ms"` + } `json:"perf_stat"` + } `json:"osd_stats"` + Stats struct { + OpW float64 `json:"op_w"` + OpInBytes float64 `json:"op_in_bytes"` + OpR float64 `json:"op_r"` + OpOutBytes float64 `json:"op_out_bytes"` + } `json:"stats"` + Tree struct { + DeviceClass string `json:"device_class"` + Type string `json:"type"` + Name string `json:"name"` + } `json:"tree"` +} + +type apiPoolResponse struct { + PoolName string `json:"pool_name"` + Stats struct { + Stored struct{ Latest float64 } `json:"stored"` + Objects struct{ Latest float64 } `json:"objects"` + AvailRaw struct{ Latest float64 } `json:"avail_raw"` + BytesUsed struct{ Latest float64 } `json:"bytes_used"` + PercentUsed struct{ Latest float64 } `json:"percent_used"` + Reads struct{ Latest float64 } `json:"rd"` + ReadBytes struct{ Latest float64 } `json:"rd_bytes"` + Writes struct{ Latest float64 } `json:"wr"` + WrittenBytes struct{ Latest float64 } `json:"wr_bytes"` + } `json:"stats"` +} diff --git a/src/go/plugin/go.d/modules/ceph/auth.go b/src/go/plugin/go.d/modules/ceph/auth.go new file mode 100644 index 000000000..ed7cc0b33 --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/auth.go @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ceph + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "net/http" + "net/url" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +type ( + authLoginResp struct { + Token string `json:"token"` + } + authCheckResp struct { + Username string `json:"username"` + Permissions map[string]any `json:"permissions"` + } +) + +func (c *Ceph) authLogin() (string, error) { + // https://docs.ceph.com/en/reef/mgr/ceph_api/#post--api-auth + + req, err := func() (*http.Request, error) { + var credentials = struct { + Username string `json:"username"` + Password string `json:"password"` + }{ + Username: c.Username, + Password: c.Password, + } + + bs, err := json.Marshal(credentials) + if err != nil { + return nil, err + } + + req, err := web.NewHTTPRequestWithPath(c.RequestConfig, urlPathApiAuth) + if err != nil { + return nil, err + } + + body := bytes.NewReader(bs) + + req.Body = io.NopCloser(body) + req.ContentLength = int64(body.Len()) + req.Method = http.MethodPost + req.Header.Set("Accept", hdrAcceptVersion) + req.Header.Set("Content-Type", hdrContentTypeJson) + + return req, nil + }() + if err != nil { + return "", err + } + + var tok authLoginResp + + if err := c.webClient(201).RequestJSON(req, &tok); err != nil { + return "", err + } + + if tok.Token == "" { + return "", errors.New("empty token") + } + + return tok.Token, nil +} + +func (c *Ceph) authCheck() (bool, error) { + // https://docs.ceph.com/en/reef/mgr/ceph_api/#post--api-auth-check + if c.token == "" { + return false, nil + } + + req, err := func() (*http.Request, error) { + bs, err := json.Marshal(authLoginResp{Token: c.token}) + if err != nil { + return nil, err + } + + req, err := web.NewHTTPRequestWithPath(c.RequestConfig, urlPathApiAuthCheck) + if err != nil { + return nil, err + } + + body := bytes.NewReader(bs) + + req.Body = io.NopCloser(body) + req.ContentLength = int64(body.Len()) + req.URL.RawQuery = url.Values{"token": {c.token}}.Encode() // TODO: it seems not necessary? + req.Method = http.MethodPost + req.Header.Set("Accept", hdrAcceptVersion) + req.Header.Set("Content-Type", hdrContentTypeJson) + return req, nil + }() + if err != nil { + return false, err + } + + var resp authCheckResp + + if err := c.webClient().RequestJSON(req, &resp); err != nil { + return false, err + } + + return resp.Username != "", nil +} + +func (c *Ceph) authLogout() error { + // https://docs.ceph.com/en/reef/mgr/ceph_api/#post--api-auth-logout + + if c.token == "" { + return nil + } + defer func() { c.token = "" }() + + req, err := func() (*http.Request, error) { + req, err := web.NewHTTPRequestWithPath(c.RequestConfig, urlPathApiAuthLogout) + if err != nil { + return nil, err + } + + req.Method = http.MethodPost + req.Header.Set("Accept", hdrAcceptVersion) + req.Header.Set("Authorization", "Bearer "+c.token) + return req, nil + }() + if err != nil { + return err + } + + return c.webClient().Request(req, nil) +} diff --git a/src/go/plugin/go.d/modules/ceph/ceph.go b/src/go/plugin/go.d/modules/ceph/ceph.go new file mode 100644 index 000000000..be0676356 --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/ceph.go @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ceph + +import ( + _ "embed" + "errors" + "fmt" + "net/http" + "sync" + "time" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("ceph", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 10, + }, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *Ceph { + return &Ceph{ + Config: Config{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ + URL: "https://127.0.0.1:8443", + }, + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 2), + TLSConfig: tlscfg.TLSConfig{ + InsecureSkipVerify: true, + }, + }, + }, + }, + charts: &module.Charts{}, + seenPools: make(map[string]bool), + seenOsds: make(map[string]bool), + } +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` +} + +type Ceph struct { + module.Base + Config `yaml:",inline" json:""` + + charts *module.Charts + addClusterChartsOnce sync.Once + + httpClient *http.Client + + token string + + fsid string // a unique identifier for the cluster + + seenPools map[string]bool + seenOsds map[string]bool +} + +func (c *Ceph) Configuration() any { + return c.Config +} + +func (c *Ceph) Init() error { + if err := c.validateConfig(); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + + httpClient, err := web.NewHTTPClient(c.ClientConfig) + if err != nil { + return fmt.Errorf("create http client: %v", err) + } + c.httpClient = httpClient + + return nil +} + +func (c *Ceph) Check() error { + mx, err := c.collect() + if err != nil { + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil +} + +func (c *Ceph) Charts() *module.Charts { + return c.charts +} + +func (c *Ceph) Collect() map[string]int64 { + mx, err := c.collect() + if err != nil { + c.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} + +func (c *Ceph) Cleanup() { + if c.httpClient != nil { + if err := c.authLogout(); err != nil { + c.Warningf("failed to logout: %v", err) + } + c.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/plugin/go.d/modules/ceph/ceph_test.go b/src/go/plugin/go.d/modules/ceph/ceph_test.go new file mode 100644 index 000000000..1110d5ea6 --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/ceph_test.go @@ -0,0 +1,331 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ceph + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "os" + "sync/atomic" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + + "github.com/stretchr/testify/require" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") + + dataVer16ApiHealthMinimal, _ = os.ReadFile("testdata/v16.2.15/api_health_minimal.json") + dataVer16ApiOsd, _ = os.ReadFile("testdata/v16.2.15/api_osd.json") + dataVer16ApiPoolStats, _ = os.ReadFile("testdata/v16.2.15/api_pool_stats.json") + dataVer16ApiMonitor, _ = os.ReadFile("testdata/v16.2.15/api_monitor.json") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + "dataVer16ApiHealthMinimal": dataVer16ApiHealthMinimal, + "dataVer16ApiOsd": dataVer16ApiOsd, + "dataVer16ApiPoolStats": dataVer16ApiPoolStats, + "dataVer16ApiMonitor": dataVer16ApiMonitor, + } { + require.NotNil(t, data, name) + } +} + +func TestCeph_Configuration(t *testing.T) { + module.TestConfigurationSerialize(t, &Ceph{}, dataConfigJSON, dataConfigYAML) +} + +func TestCeph_Init(t *testing.T) { + tesceph := map[string]struct { + wantFail bool + config Config + }{ + "fails with default": { + wantFail: true, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, + }, + }, + }, + } + + for name, test := range tesceph { + t.Run(name, func(t *testing.T) { + ceph := New() + ceph.Config = test.config + + if test.wantFail { + assert.Error(t, ceph.Init()) + } else { + assert.NoError(t, ceph.Init()) + } + }) + } +} + +func TestCeph_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) (ceph *Ceph, cleanup func()) + }{ + "success with valid API key": { + wantFail: false, + prepare: caseOk, + }, + "fail on connection refused": { + wantFail: true, + prepare: caseConnectionRefused, + }, + "fail on 404 response": { + wantFail: true, + prepare: case404, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ceph, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.Error(t, ceph.Check()) + } else { + assert.NoError(t, ceph.Check()) + } + }) + } +} + +func TestCeph_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestCeph_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (ceph *Ceph, cleanup func()) + wantNumOfCharts int + wantMetrics map[string]int64 + }{ + "success with valid API key": { + prepare: caseOk, + wantNumOfCharts: len(clusterCharts) + len(osdChartsTmpl)*2 + len(poolChartsTmpl)*2, + wantMetrics: map[string]int64{ + "client_perf_read_bytes_sec": 1, + "client_perf_read_op_per_sec": 1, + "client_perf_recovering_bytes_per_sec": 1, + "client_perf_write_bytes_sec": 1, + "client_perf_write_op_per_sec": 1, + "health_err": 0, + "health_ok": 0, + "health_warn": 1, + "hosts_num": 1, + "iscsi_daemons_down_num": 1, + "iscsi_daemons_num": 2, + "iscsi_daemons_up_num": 1, + "mgr_active_num": 1, + "mgr_standby_num": 1, + "monitors_num": 1, + "objects_degraded_num": 1, + "objects_healthy_num": 3, + "objects_misplaced_num": 1, + "objects_num": 6, + "objects_unfound_num": 1, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_apply_latency_ms": 1, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_commit_latency_ms": 1, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_read_bytes": 1, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_read_ops": 1, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_size_bytes": 68715282432, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_space_avail_bytes": 68410753024, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_space_used_bytes": 304529408, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_status_down": 0, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_status_in": 1, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_status_out": 0, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_status_up": 1, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_write_ops": 1, + "osd_f5bbbe9d-e85b-419c-af5a-a57e2527cad3_written_bytes": 1, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_apply_latency_ms": 1, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_commit_latency_ms": 1, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_read_bytes": 1, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_read_ops": 1, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_size_bytes": 107369988096, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_space_avail_bytes": 107065458688, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_space_used_bytes": 304529408, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_status_down": 0, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_status_in": 1, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_status_out": 0, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_status_up": 1, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_write_ops": 1, + "osd_f78537db-9b18-4c62-a24f-a4344fc28de7_written_bytes": 1, + "osds_down_num": 0, + "osds_in_num": 2, + "osds_num": 2, + "osds_out_num": 0, + "osds_up_num": 2, + "pg_status_category_clean": 1, + "pg_status_category_unknown": 0, + "pg_status_category_warning": 1, + "pg_status_category_working": 0, + "pgs_num": 2, + "pgs_per_osd": 2, + "pool_device_health_metrics_objects": 3, + "pool_device_health_metrics_read_bytes": 1, + "pool_device_health_metrics_read_ops": 1, + "pool_device_health_metrics_size": 166530172973, + "pool_device_health_metrics_space_avail_bytes": 166530172972, + "pool_device_health_metrics_space_used_bytes": 1, + "pool_device_health_metrics_space_utilization": 1000, + "pool_device_health_metrics_write_ops": 3, + "pool_device_health_metrics_written_bytes": 6144, + "pool_mySuperPool_objects": 1, + "pool_mySuperPool_read_bytes": 1, + "pool_mySuperPool_read_ops": 1, + "pool_mySuperPool_size": 166530172973, + "pool_mySuperPool_space_avail_bytes": 166530172972, + "pool_mySuperPool_space_used_bytes": 1, + "pool_mySuperPool_space_utilization": 1000, + "pool_mySuperPool_write_ops": 1, + "pool_mySuperPool_written_bytes": 1, + "pools_num": 2, + "raw_capacity_avail_bytes": 175476178944, + "raw_capacity_used_bytes": 609091584, + "raw_capacity_utilization": 345, + "rgw_num": 1, + "scrub_status_active": 0, + "scrub_status_disabled": 0, + "scrub_status_inactive": 1, + }, + }, + "fail on connection refused": { + prepare: caseConnectionRefused, + wantMetrics: nil, + }, + "fail on 404 response": { + prepare: case404, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ceph, cleanup := test.prepare(t) + defer cleanup() + + _ = ceph.Check() + + mx := ceph.Collect() + + require.Equal(t, test.wantMetrics, mx) + + if len(test.wantMetrics) > 0 { + assert.Equal(t, test.wantNumOfCharts, len(*ceph.Charts()), "want charts") + + module.TestMetricsHasAllChartsDims(t, ceph.Charts(), mx) + } + }) + } +} + +func caseOk(t *testing.T) (*Ceph, func()) { + t.Helper() + + loginResp, _ := json.Marshal(authLoginResp{Token: "secret_token"}) + checkResp, _ := json.Marshal(authCheckResp{Username: "username"}) + var loggedIn atomic.Bool + + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + switch r.URL.Path { + case urlPathApiAuth: + _, _ = w.Write(loginResp) + w.WriteHeader(http.StatusCreated) + loggedIn.Store(true) + case urlPathApiAuthCheck: + bs, _ := io.ReadAll(r.Body) + if bytes.Equal(bs, loginResp) { + _, _ = w.Write(checkResp) + } else { + w.WriteHeader(http.StatusNotFound) + } + case urlPathApiAuthLogout: + w.WriteHeader(http.StatusOK) + loggedIn.Store(false) + default: + w.WriteHeader(http.StatusNotFound) + } + case http.MethodGet: + if !loggedIn.Load() { + w.WriteHeader(http.StatusUnauthorized) + return + } + switch r.URL.Path { + case urlPathApiHealthMinimal: + _, _ = w.Write(dataVer16ApiHealthMinimal) + case urlPathApiOsd: + _, _ = w.Write(dataVer16ApiOsd) + case urlPathApiPool: + if r.URL.RawQuery != urlQueryApiPool { + w.WriteHeader(http.StatusNotFound) + } else { + _, _ = w.Write(dataVer16ApiPoolStats) + } + case urlPathApiMonitor: + _, _ = w.Write(dataVer16ApiMonitor) + default: + w.WriteHeader(http.StatusNotFound) + } + } + })) + + ceph := New() + ceph.URL = srv.URL + ceph.Username = "user" + ceph.Password = "password" + require.NoError(t, ceph.Init()) + + return ceph, srv.Close +} + +func caseConnectionRefused(t *testing.T) (*Ceph, func()) { + t.Helper() + ceph := New() + ceph.URL = "http://127.0.0.1:65001" + ceph.Username = "user" + ceph.Password = "password" + require.NoError(t, ceph.Init()) + + return ceph, func() {} +} + +func case404(t *testing.T) (*Ceph, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + ceph := New() + ceph.URL = srv.URL + ceph.Username = "user" + ceph.Password = "password" + require.NoError(t, ceph.Init()) + + return ceph, srv.Close +} diff --git a/src/go/plugin/go.d/modules/ceph/charts.go b/src/go/plugin/go.d/modules/ceph/charts.go new file mode 100644 index 000000000..95d9a8d40 --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/charts.go @@ -0,0 +1,576 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ceph + +import ( + "fmt" + "strings" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +const ( + prioClusterStatus = module.Priority + iota + prioClusterHostsCount + prioClusterMonitorsCount + prioClusterOSDsCount + prioClusterOSDsByStatusCount + prioClusterManagersCount + prioClusterObjectGatewaysCount + prioClusterIScsiGatewaysCount + prioClusterIScsiGatewaysByStatusCount + + prioClusterPhysCapacityUtilization + prioClusterPhysCapacityUsage + prioClusterObjectsCount + prioClusterObjectsByStatusPercent + prioClusterPoolsCount + prioClusterPGsCount + prioClusterPGsByStatusCount + prioClusterPGsPerOsdCount + + prioClusterClientIO + prioClusterClientIOPS + prioClusterClientRecoveryThroughput + prioClusterScrubStatus + + prioOsdStatus + prioOsdSpaceUsage + prioOsdIO + prioOsdIOPS + prioOsdLatency + + prioPoolSpaceUtilization + prioPoolSpaceUsage + prioPoolObjectsCount + prioPoolIO + prioPoolIOPS +) + +var clusterCharts = module.Charts{ + clusterStatusChart.Copy(), + clusterHostsCountChart.Copy(), + clusterMonitorsCountChart.Copy(), + clusterOsdsCountChart.Copy(), + clusterOsdsByStatusCountChart.Copy(), + clusterManagersCountChart.Copy(), + clusterObjectGatewaysCountChart.Copy(), + clusterIScsiGatewaysCountChart.Copy(), + clusterIScsiGatewaysByStatusCountChart.Copy(), + + clusterPhysCapacityUtilizationChart.Copy(), + clusterPhysCapacityUsageChart.Copy(), + clusterObjectsCountChart.Copy(), + clusterObjectsByStatusPercentChart.Copy(), + clusterPoolsCountChart.Copy(), + clusterPGsCountChart.Copy(), + clusterPGsByStatusCountChart.Copy(), + clusterPgsPerOsdCountChart.Copy(), + + clusterClientIOChart.Copy(), + clusterClientIOPSChart.Copy(), + clusterRecoveryThroughputChart.Copy(), + clusterScrubStatusChart.Copy(), +} + +var osdChartsTmpl = module.Charts{ + osdStatusChartTmpl.Copy(), + osdSpaceUsageChartTmpl.Copy(), + osdIOChartTmpl.Copy(), + osdIOPSChartTmpl.Copy(), + osdLatencyChartTmpl.Copy(), +} + +var poolChartsTmpl = module.Charts{ + poolSpaceUtilizationChartTmpl.Copy(), + poolSpaceUsageChartTmpl.Copy(), + poolObjectsCountChartTmpl.Copy(), + poolIOChartTmpl.Copy(), + poolIOPSChartTmpl.Copy(), +} + +var ( + clusterStatusChart = module.Chart{ + ID: "cluster_status", + Title: "Ceph Cluster Status", + Fam: "status", + Units: "status", + Ctx: "ceph.cluster_status", + Type: module.Line, + Priority: prioClusterStatus, + Dims: module.Dims{ + {ID: "health_ok", Name: "ok"}, + {ID: "health_err", Name: "err"}, + {ID: "health_warn", Name: "warn"}, + }, + } + clusterHostsCountChart = module.Chart{ + ID: "cluster_hosts_count", + Title: "Ceph Cluster Hosts", + Fam: "status", + Units: "hosts", + Ctx: "ceph.cluster_hosts_count", + Type: module.Line, + Priority: prioClusterHostsCount, + Dims: module.Dims{ + {ID: "hosts_num", Name: "hosts"}, + }, + } + clusterMonitorsCountChart = module.Chart{ + ID: "cluster_monitors_count", + Title: "Ceph Cluster Monitors", + Fam: "status", + Units: "monitors", + Ctx: "ceph.cluster_monitors_count", + Type: module.Line, + Priority: prioClusterMonitorsCount, + Dims: module.Dims{ + {ID: "monitors_num", Name: "monitors"}, + }, + } + clusterOsdsCountChart = module.Chart{ + ID: "cluster_osds_count", + Title: "Ceph Cluster OSDs", + Fam: "status", + Units: "osds", + Ctx: "ceph.cluster_osds_count", + Type: module.Line, + Priority: prioClusterOSDsCount, + Dims: module.Dims{ + {ID: "osds_num", Name: "osds"}, + }, + } + clusterOsdsByStatusCountChart = module.Chart{ + ID: "cluster_osds_by_status_count", + Title: "Ceph Cluster OSDs by Status", + Fam: "status", + Units: "osds", + Ctx: "ceph.cluster_osds_by_status_count", + Type: module.Line, + Priority: prioClusterOSDsByStatusCount, + Dims: module.Dims{ + {ID: "osds_up_num", Name: "up"}, + {ID: "osds_down_num", Name: "down"}, + {ID: "osds_in_num", Name: "in"}, + {ID: "osds_out_num", Name: "out"}, + }, + } + clusterManagersCountChart = module.Chart{ + ID: "cluster_managers_count", + Title: "Ceph Cluster Managers", + Fam: "status", + Units: "managers", + Ctx: "ceph.cluster_managers_count", + Type: module.Line, + Priority: prioClusterManagersCount, + Dims: module.Dims{ + {ID: "mgr_active_num", Name: "active"}, + {ID: "mgr_standby_num", Name: "standby"}, + }, + } + clusterObjectGatewaysCountChart = module.Chart{ + ID: "cluster_object_gateways_count", + Title: "Ceph Cluster Object Gateways (RGW)", + Fam: "status", + Units: "gateways", + Ctx: "ceph.cluster_object_gateways_count", + Type: module.Line, + Priority: prioClusterObjectGatewaysCount, + Dims: module.Dims{ + {ID: "rgw_num", Name: "object"}, + }, + } + clusterIScsiGatewaysCountChart = module.Chart{ + ID: "cluster_iscsi_gateways_count", + Title: "Ceph Cluster iSCSI Gateways", + Fam: "status", + Units: "gateways", + Ctx: "ceph.cluster_iscsi_gateways_count", + Type: module.Line, + Priority: prioClusterIScsiGatewaysCount, + Dims: module.Dims{ + {ID: "iscsi_daemons_num", Name: "iscsi"}, + }, + } + clusterIScsiGatewaysByStatusCountChart = module.Chart{ + ID: "cluster_iscsi_gateways_by_status_count", + Title: "Ceph Cluster iSCSI Gateways by Status", + Fam: "status", + Units: "gateways", + Ctx: "ceph.cluster_iscsi_gateways_by_status_count", + Type: module.Line, + Priority: prioClusterIScsiGatewaysByStatusCount, + Dims: module.Dims{ + {ID: "iscsi_daemons_up_num", Name: "up"}, + {ID: "iscsi_daemons_down_num", Name: "down"}, + }, + } +) + +var ( + clusterPhysCapacityUtilizationChart = module.Chart{ + ID: "cluster_physical_capacity_utilization", + Title: "Ceph Cluster Physical Capacity Utilization", + Fam: "capacity", + Units: "percent", + Ctx: "ceph.cluster_physical_capacity_utilization", + Type: module.Area, + Priority: prioClusterPhysCapacityUtilization, + Dims: module.Dims{ + {ID: "raw_capacity_utilization", Name: "utilization", Div: precision}, + }, + } + clusterPhysCapacityUsageChart = module.Chart{ + ID: "cluster_physical_capacity_usage", + Title: "Ceph Cluster Physical Capacity Usage", + Fam: "capacity", + Units: "bytes", + Ctx: "ceph.cluster_physical_capacity_usage", + Type: module.Stacked, + Priority: prioClusterPhysCapacityUsage, + Dims: module.Dims{ + {ID: "raw_capacity_avail_bytes", Name: "avail"}, + {ID: "raw_capacity_used_bytes", Name: "used"}, + }, + } + clusterObjectsCountChart = module.Chart{ + ID: "cluster_objects_count", + Title: "Ceph Cluster Objects", + Fam: "capacity", + Units: "objects", + Ctx: "ceph.cluster_objects_count", + Type: module.Line, + Priority: prioClusterObjectsCount, + Dims: module.Dims{ + {ID: "objects_num", Name: "objects"}, + }, + } + clusterObjectsByStatusPercentChart = module.Chart{ + ID: "cluster_objects_by_status", + Title: "Ceph Cluster Objects by Status", + Fam: "capacity", + Units: "percent", + Ctx: "ceph.cluster_objects_by_status_distribution", + Type: module.Stacked, + Priority: prioClusterObjectsByStatusPercent, + Dims: module.Dims{ + {ID: "objects_healthy_num", Name: "healthy", Algo: module.PercentOfAbsolute}, + {ID: "objects_misplaced_num", Name: "misplaced", Algo: module.PercentOfAbsolute}, + {ID: "objects_degraded_num", Name: "degraded", Algo: module.PercentOfAbsolute}, + {ID: "objects_unfound_num", Name: "unfound", Algo: module.PercentOfAbsolute}, + }, + } + clusterPoolsCountChart = module.Chart{ + ID: "cluster_pools_count", + Title: "Ceph Cluster Pools", + Fam: "capacity", + Units: "pools", + Ctx: "ceph.cluster_pools_count", + Type: module.Line, + Priority: prioClusterPoolsCount, + Dims: module.Dims{ + {ID: "pools_num", Name: "pools"}, + }, + } + clusterPGsCountChart = module.Chart{ + ID: "cluster_pgs_count", + Title: "Ceph Cluster Placement Groups", + Fam: "capacity", + Units: "pgs", + Ctx: "ceph.cluster_pgs_count", + Type: module.Line, + Priority: prioClusterPGsCount, + Dims: module.Dims{ + {ID: "pgs_num", Name: "pgs"}, + }, + } + clusterPGsByStatusCountChart = module.Chart{ + ID: "cluster_pgs_by_status_count", + Title: "Ceph Cluster Placement Groups by Status", + Fam: "capacity", + Units: "pgs", + Ctx: "ceph.cluster_pgs_by_status_count", + Type: module.Stacked, + Priority: prioClusterPGsByStatusCount, + Dims: module.Dims{ + {ID: "pg_status_category_clean", Name: "clean"}, + {ID: "pg_status_category_working", Name: "working"}, + {ID: "pg_status_category_warning", Name: "warning"}, + {ID: "pg_status_category_unknown", Name: "unknown"}, + }, + } + clusterPgsPerOsdCountChart = module.Chart{ + ID: "cluster_pgs_per_osd_count", + Title: "Ceph Cluster Placement Groups per OSD", + Fam: "capacity", + Units: "pgs", + Ctx: "ceph.cluster_pgs_per_osd_count", + Type: module.Line, + Priority: prioClusterPGsPerOsdCount, + Dims: module.Dims{ + {ID: "pgs_per_osd", Name: "per_osd"}, + }, + } +) + +var ( + clusterClientIOChart = module.Chart{ + ID: "cluster_client_io", + Title: "Ceph Cluster Client IO", + Fam: "performance", + Units: "bytes/s", + Ctx: "ceph.cluster_client_io", + Type: module.Area, + Priority: prioClusterClientIO, + Dims: module.Dims{ + {ID: "client_perf_read_bytes_sec", Name: "read"}, + {ID: "client_perf_write_bytes_sec", Name: "written", Mul: -1}, + }, + } + clusterClientIOPSChart = module.Chart{ + ID: "cluster_client_iops", + Title: "Ceph Cluster Client IOPS", + Fam: "performance", + Units: "ops/s", + Ctx: "ceph.cluster_client_iops", + Type: module.Line, + Priority: prioClusterClientIOPS, + Dims: module.Dims{ + {ID: "client_perf_read_op_per_sec", Name: "read"}, + {ID: "client_perf_write_op_per_sec", Name: "write", Mul: -1}, + }, + } + clusterRecoveryThroughputChart = module.Chart{ + ID: "cluster_recovery_throughput", + Title: "Ceph Cluster Recovery Throughput", + Fam: "performance", + Units: "bytes/s", + Ctx: "ceph.cluster_recovery_throughput", + Type: module.Line, + Priority: prioClusterClientRecoveryThroughput, + Dims: module.Dims{ + {ID: "client_perf_recovering_bytes_per_sec", Name: "recovery"}, + }, + } + clusterScrubStatusChart = module.Chart{ + ID: "cluster_scrub_status", + Title: "Ceph Cluster Scrubbing Status", + Fam: "performance", + Units: "status", + Ctx: "ceph.cluster_scrub_status", + Type: module.Line, + Priority: prioClusterScrubStatus, + Dims: module.Dims{ + {ID: "scrub_status_disabled", Name: "disabled"}, + {ID: "scrub_status_active", Name: "active"}, + {ID: "scrub_status_inactive", Name: "inactive"}, + }, + } +) + +var ( + osdStatusChartTmpl = module.Chart{ + ID: "osd_%s_status", + Title: "Ceph OSD Status", + Fam: "osd", + Units: "status", + Ctx: "ceph.osd_status", + Type: module.Line, + Priority: prioOsdStatus, + Dims: module.Dims{ + {ID: "osd_%s_status_up", Name: "up"}, + {ID: "osd_%s_status_down", Name: "down"}, + {ID: "osd_%s_status_in", Name: "in"}, + {ID: "osd_%s_status_out", Name: "out"}, + }, + } + osdSpaceUsageChartTmpl = module.Chart{ + ID: "osd_%s_space_usage", + Title: "Ceph OSD Space Usage", + Fam: "osd", + Units: "bytes", + Ctx: "ceph.osd_space_usage", + Type: module.Stacked, + Priority: prioOsdSpaceUsage, + Dims: module.Dims{ + {ID: "osd_%s_space_avail_bytes", Name: "avail"}, + {ID: "osd_%s_space_used_bytes", Name: "used"}, + }, + } + osdIOChartTmpl = module.Chart{ + ID: "osd_%s_io", + Title: "Ceph OSD IO", + Fam: "osd", + Units: "bytes/s", + Ctx: "ceph.osd_io", + Type: module.Area, + Priority: prioOsdIO, + Dims: module.Dims{ + {ID: "osd_%s_read_bytes", Name: "read", Algo: module.Incremental}, + {ID: "osd_%s_written_bytes", Name: "written", Algo: module.Incremental, Mul: -1}, + }, + } + osdIOPSChartTmpl = module.Chart{ + ID: "osd_%s_iops", + Title: "Ceph OSD IOPS", + Fam: "osd", + Units: "ops/s", + Ctx: "ceph.osd_iops", + Type: module.Line, + Priority: prioOsdIOPS, + Dims: module.Dims{ + {ID: "osd_%s_read_ops", Name: "read", Algo: module.Incremental}, + {ID: "osd_%s_write_ops", Name: "write", Algo: module.Incremental}, + }, + } + osdLatencyChartTmpl = module.Chart{ + ID: "osd_%s_latency", + Title: "Ceph OSD Latency", + Fam: "osd", + Units: "milliseconds", + Ctx: "ceph.osd_latency", + Type: module.Line, + Priority: prioOsdLatency, + Dims: module.Dims{ + {ID: "osd_%s_commit_latency_ms", Name: "commit"}, + {ID: "osd_%s_apply_latency_ms", Name: "apply"}, + }, + } +) + +var ( + poolSpaceUtilizationChartTmpl = module.Chart{ + ID: "pool_%s_space_utilization", + Title: "Ceph Pool Space Utilization", + Fam: "pool", + Units: "percent", + Ctx: "ceph.pool_space_utilization", + Type: module.Area, + Priority: prioPoolSpaceUtilization, + Dims: module.Dims{ + {ID: "pool_%s_space_utilization", Name: "utilization", Div: precision}, + }, + } + poolSpaceUsageChartTmpl = module.Chart{ + ID: "pool_%s_space_usage", + Title: "Ceph Pool Space Usage", + Fam: "pool", + Units: "bytes", + Ctx: "ceph.pool_space_usage", + Type: module.Stacked, + Priority: prioPoolSpaceUsage, + Dims: module.Dims{ + {ID: "pool_%s_space_avail_bytes", Name: "avail"}, + {ID: "pool_%s_space_used_bytes", Name: "used"}, + }, + } + poolObjectsCountChartTmpl = module.Chart{ + ID: "pool_%s_objects_count", + Title: "Ceph Pool Objects", + Fam: "pool", + Units: "objects", + Ctx: "ceph.pool_objects_count", + Type: module.Line, + Priority: prioPoolObjectsCount, + Dims: module.Dims{ + {ID: "pool_%s_objects", Name: "objects"}, + }, + } + poolIOChartTmpl = module.Chart{ + ID: "pool_%s_io", + Title: "Ceph Pool IO", + Fam: "pool", + Units: "bytes/s", + Ctx: "ceph.pool_io", + Type: module.Area, + Priority: prioPoolIO, + Dims: module.Dims{ + {ID: "pool_%s_read_bytes", Name: "read", Algo: module.Incremental}, + {ID: "pool_%s_written_bytes", Name: "written", Algo: module.Incremental, Mul: -1}, + }, + } + poolIOPSChartTmpl = module.Chart{ + ID: "pool_%s_iops", + Title: "Ceph Pool IOPS", + Fam: "pool", + Units: "ops/s", + Ctx: "ceph.pool_iops", + Type: module.Line, + Priority: prioPoolIOPS, + Dims: module.Dims{ + {ID: "pool_%s_read_ops", Name: "read", Algo: module.Incremental}, + {ID: "pool_%s_write_ops", Name: "write", Algo: module.Incremental, Mul: -1}, + }, + } +) + +func (c *Ceph) addClusterCharts() { + charts := clusterCharts.Copy() + + for _, chart := range *charts { + chart.Labels = []module.Label{ + {Key: "fsid", Value: c.fsid}, + } + } + + if err := c.Charts().Add(*charts...); err != nil { + c.Warning(err) + } +} + +func (c *Ceph) addOsdCharts(osdUuid, devClass, osdName string) { + charts := osdChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, osdUuid) + chart.ID = cleanChartID(chart.ID) + chart.Labels = []module.Label{ + {Key: "fsid", Value: c.fsid}, + {Key: "osd_uuid", Value: osdUuid}, + {Key: "osd_name", Value: osdName}, + {Key: "device_class", Value: devClass}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, osdUuid) + } + } + + if err := c.Charts().Add(*charts...); err != nil { + c.Warning(err) + } +} + +func (c *Ceph) addPoolCharts(poolName string) { + charts := poolChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, poolName) + chart.ID = cleanChartID(chart.ID) + chart.Labels = []module.Label{ + {Key: "fsid", Value: c.fsid}, + {Key: "pool_name", Value: poolName}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, poolName) + } + } + + if err := c.Charts().Add(*charts...); err != nil { + c.Warning(err) + } +} + +func (c *Ceph) removeCharts(prefix string) { + prefix = cleanChartID(prefix) + for _, chart := range *c.Charts() { + if strings.HasPrefix(chart.ID, prefix) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} + +func cleanChartID(id string) string { + r := strings.NewReplacer(".", "_", " ", "_") + return strings.ToLower(r.Replace(id)) +} diff --git a/src/go/plugin/go.d/modules/ceph/collect.go b/src/go/plugin/go.d/modules/ceph/collect.go new file mode 100644 index 000000000..54f8cb96a --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/collect.go @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ceph + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "slices" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +const precision = 1000 + +func (c *Ceph) collect() (map[string]int64, error) { + mx := make(map[string]int64) + + if err := c.auth(); err != nil { + return nil, err + } + + if c.fsid == "" { + fsid, err := c.getFsid() + if err != nil { + return nil, fmt.Errorf("failed to get fsid: %v", err) + } + c.fsid = fsid + c.addClusterChartsOnce.Do(c.addClusterCharts) + } + + if err := c.collectHealth(mx); err != nil { + return nil, fmt.Errorf("failed to collect health: %v", err) + } + if err := c.collectOsds(mx); err != nil { + return nil, fmt.Errorf("failed to collect osds: %v", err) + } + if err := c.collectPools(mx); err != nil { + return nil, fmt.Errorf("failed to collect pools: %v", err) + } + + return mx, nil +} + +func (c *Ceph) auth() error { + if c.token != "" { + ok, err := c.authCheck() + if err != nil { + return err + } + if ok { + return nil + } + c.token = "" + } + + tok, err := c.authLogin() + if err != nil { + return err + } + c.token = tok + + return nil +} + +func (c *Ceph) getFsid() (string, error) { + req, err := web.NewHTTPRequestWithPath(c.RequestConfig, urlPathApiMonitor) + if err != nil { + return "", err + } + + req.Header.Set("Accept", hdrAcceptVersion) + req.Header.Set("Content-Type", hdrContentTypeJson) + req.Header.Set("Authorization", "Bearer "+c.token) + + var resp struct { + MonStatus struct { + MonMap struct { + FSID string `json:"fsid"` + } `json:"monmap"` + } `json:"mon_status"` + } + + if err := c.webClient().RequestJSON(req, &resp); err != nil { + return "", err + } + + if resp.MonStatus.MonMap.FSID == "" { + return "", errors.New("no fsid") + } + + return resp.MonStatus.MonMap.FSID, nil +} + +func (c *Ceph) webClient(statusCodes ...int) *web.Client { + return web.DoHTTP(c.httpClient).OnNokCode(func(resp *http.Response) (bool, error) { + if slices.Contains(statusCodes, resp.StatusCode) { + return true, nil + } + var msg struct { + Detail string `json:"detail"` + } + if err := json.NewDecoder(resp.Body).Decode(&msg); err == nil && msg.Detail != "" { + return false, errors.New(msg.Detail) + } + return false, nil + }) +} diff --git a/src/go/plugin/go.d/modules/ceph/collect_health.go b/src/go/plugin/go.d/modules/ceph/collect_health.go new file mode 100644 index 000000000..fca49eac4 --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/collect_health.go @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ceph + +import ( + "strings" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +func (c *Ceph) collectHealth(mx map[string]int64) error { + req, err := web.NewHTTPRequestWithPath(c.RequestConfig, urlPathApiHealthMinimal) + if err != nil { + return err + } + + req.Header.Set("Accept", hdrAcceptVersion) + req.Header.Set("Content-Type", hdrContentTypeJson) + req.Header.Set("Authorization", "Bearer "+c.token) + + var resp apiHealthMinimalResponse + + if err := c.webClient().RequestJSON(req, &resp); err != nil { + return err + } + + for _, v := range []string{"health_err", "health_warn", "health_ok"} { + mx[v] = 0 + } + mx[strings.ToLower(resp.Health.Status)] = 1 + + mx["mgr_active_num"] = 0 + if resp.MgrMap.ActiveName != "" { + mx["mgr_active_num"] = 1 + } + mx["mgr_standby_num"] = int64(len(resp.MgrMap.Standbys)) + mx["hosts_num"] = resp.Hosts + mx["rgw_num"] = resp.Rgw + mx["monitors_num"] = int64(len(resp.MonStatus.MonMap.Mons)) + mx["osds_num"] = int64(len(resp.OsdMap.Osds)) + + for _, v := range []string{"up", "down", "in", "out"} { + mx["osds_"+v+"_num"] = 0 + } + for _, v := range resp.OsdMap.Osds { + s := map[int64]string{0: "out", 1: "in"} + mx["osds_"+s[v.In]+"_num"]++ + + s = map[int64]string{0: "down", 1: "up"} + mx["osds_"+s[v.Up]+"_num"]++ + } + + mx["pools_num"] = int64(len(resp.Pools)) + mx["iscsi_daemons_num"] = resp.IscsiDaemons.Up + resp.IscsiDaemons.Down + mx["iscsi_daemons_up_num"] = resp.IscsiDaemons.Up + mx["iscsi_daemons_down_num"] = resp.IscsiDaemons.Down + + df := resp.Df.Stats + mx["raw_capacity_used_bytes"] = df.TotalBytes - df.TotalAvailBytes + mx["raw_capacity_avail_bytes"] = df.TotalAvailBytes + mx["raw_capacity_utilization"] = 0 + if df.TotalAvailBytes > 0 { + mx["raw_capacity_utilization"] = int64(float64(df.TotalBytes-df.TotalAvailBytes) / float64(df.TotalBytes) * 100 * precision) + } + + objs := resp.PgInfo.ObjectStats + mx["objects_num"] = objs.NumObjects + mx["objects_healthy_num"] = objs.NumObjects - (objs.NumObjectsMisplaced + objs.NumObjectsDegraded + objs.NumObjectsUnfound) + mx["objects_misplaced_num"] = objs.NumObjectsMisplaced + mx["objects_degraded_num"] = objs.NumObjectsDegraded + mx["objects_unfound_num"] = objs.NumObjectsUnfound + mx["pgs_per_osd"] = int64(resp.PgInfo.PgsPerOsd) + + mx["pgs_num"] = 0 + for _, v := range []string{"clean", "working", "warning", "unknown"} { + mx["pg_status_category_"+v] = 0 + } + for k, v := range resp.PgInfo.Statuses { + mx["pg_status_category_"+pgStatusCategory(k)] += v + mx["pgs_num"] += v + } + + perf := resp.ClientPerf + mx["client_perf_read_bytes_sec"] = int64(perf.ReadBytesSec) + mx["client_perf_read_op_per_sec"] = int64(perf.ReadOpPerSec) + mx["client_perf_write_bytes_sec"] = int64(perf.WriteBytesSec) + mx["client_perf_write_op_per_sec"] = int64(perf.WriteOpPerSec) + mx["client_perf_recovering_bytes_per_sec"] = int64(perf.RecoveringBytesPerSec) + + for _, v := range []string{"disabled", "active", "inactive"} { + mx["scrub_status_"+v] = 0 + } + mx["scrub_status_"+strings.ToLower(resp.ScrubStatus)] = 1 + + return nil +} + +func pgStatusCategory(status string) string { + // 'status' is formated as 'status1+status2+...+statusN' + + states := strings.Split(status, "+") + + var clean, working, warning, unknown int + + for _, s := range states { + switch s { + case "active", "clean": + clean++ + case "activating", + "backfill_wait", + "backfilling", + "creating", + "deep", + "degraded", + "forced_backfill", + "forced_recovery", + "peering", + "peered", + "recovering", + "recovery_wait", + "repair", + "scrubbing", + "snaptrim", + "snaptrim_wait": + working++ + case "backfill_toofull", + "backfill_unfound", + "down", + "incomplete", + "inconsistent", + "recovery_toofull", + "recovery_unfound", + "remapped", + "snaptrim_error", + "stale", + "undersized": + warning++ + default: + unknown++ + } + } + + switch { + case warning > 0: + return "warning" + case unknown > 0: + return "unknown" + case working > 0: + return "working" + case clean > 0: + return "clean" + default: + return "unknown" + } +} diff --git a/src/go/plugin/go.d/modules/ceph/collect_osd.go b/src/go/plugin/go.d/modules/ceph/collect_osd.go new file mode 100644 index 000000000..0c8695c6d --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/collect_osd.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ceph + +import ( + "fmt" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +func (c *Ceph) collectOsds(mx map[string]int64) error { + req, err := web.NewHTTPRequestWithPath(c.RequestConfig, urlPathApiOsd) + if err != nil { + return err + } + + req.Header.Set("Accept", hdrAcceptVersion) + req.Header.Set("Content-Type", hdrContentTypeJson) + req.Header.Set("Authorization", "Bearer "+c.token) + + var osds []apiOsdResponse + + if err := c.webClient().RequestJSON(req, &osds); err != nil { + return err + } + + seen := make(map[string]bool) + + for _, osd := range osds { + px := fmt.Sprintf("osd_%s_", osd.UUID) + + seen[osd.UUID] = true + if !c.seenOsds[osd.UUID] { + c.seenOsds[osd.UUID] = true + c.addOsdCharts(osd.UUID, osd.Tree.DeviceClass, osd.Tree.Name) + } + + mx[px+"status_up"], mx[px+"status_down"] = 1, 0 + if osd.Up == 0 { + mx[px+"status_up"], mx[px+"status_down"] = 0, 1 + } + mx[px+"status_in"], mx[px+"status_out"] = 1, 0 + if osd.In == 0 { + mx[px+"status_in"], mx[px+"status_out"] = 0, 1 + } + + mx[px+"size_bytes"] = osd.OsdStats.Statfs.Total + mx[px+"space_used_bytes"] = osd.OsdStats.Statfs.Total - osd.OsdStats.Statfs.Available + mx[px+"space_avail_bytes"] = osd.OsdStats.Statfs.Available + mx[px+"read_ops"] = int64(osd.Stats.OpR) + mx[px+"read_bytes"] = int64(osd.Stats.OpOutBytes) + mx[px+"write_ops"] = int64(osd.Stats.OpW) + mx[px+"written_bytes"] = int64(osd.Stats.OpInBytes) + mx[px+"commit_latency_ms"] = int64(osd.OsdStats.PerfStat.CommitLatencyMs) + mx[px+"apply_latency_ms"] = int64(osd.OsdStats.PerfStat.ApplyLatencyMs) + } + + for uuid := range c.seenOsds { + if !seen[uuid] { + delete(c.seenOsds, uuid) + c.removeCharts(fmt.Sprintf("osd_%s_", uuid)) + } + } + + return nil +} diff --git a/src/go/plugin/go.d/modules/ceph/collect_pools.go b/src/go/plugin/go.d/modules/ceph/collect_pools.go new file mode 100644 index 000000000..272004b59 --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/collect_pools.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ceph + +import ( + "fmt" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +func (c *Ceph) collectPools(mx map[string]int64) error { + req, err := web.NewHTTPRequestWithPath(c.RequestConfig, urlPathApiPool) + if err != nil { + return err + } + + req.URL.RawQuery = urlQueryApiPool + req.Header.Set("Accept", hdrAcceptVersion) + req.Header.Set("Content-Type", hdrContentTypeJson) + req.Header.Set("Authorization", "Bearer "+c.token) + + var pools []apiPoolResponse + + if err := c.webClient().RequestJSON(req, &pools); err != nil { + return err + } + + seen := make(map[string]bool) + + for _, pool := range pools { + px := fmt.Sprintf("pool_%s_", pool.PoolName) + + seen[pool.PoolName] = true + if !c.seenPools[pool.PoolName] { + c.seenPools[pool.PoolName] = true + c.addPoolCharts(pool.PoolName) + } + + mx[px+"objects"] = int64(pool.Stats.Objects.Latest) + mx[px+"size"] = int64(pool.Stats.AvailRaw.Latest) + mx[px+"space_used_bytes"] = int64(pool.Stats.BytesUsed.Latest) + mx[px+"space_avail_bytes"] = int64(pool.Stats.AvailRaw.Latest - pool.Stats.BytesUsed.Latest) + mx[px+"space_utilization"] = int64(pool.Stats.PercentUsed.Latest * precision) + mx[px+"read_ops"] = int64(pool.Stats.Reads.Latest) + mx[px+"read_bytes"] = int64(pool.Stats.ReadBytes.Latest) + mx[px+"write_ops"] = int64(pool.Stats.Writes.Latest) + mx[px+"written_bytes"] = int64(pool.Stats.WrittenBytes.Latest) + } + + for name := range c.seenPools { + if !seen[name] { + delete(c.seenPools, name) + c.removeCharts(fmt.Sprintf("pool_%s_", name)) + } + } + + return nil +} diff --git a/src/go/plugin/go.d/modules/ceph/config_schema.json b/src/go/plugin/go.d/modules/ceph/config_schema.json new file mode 100644 index 000000000..2b1cc8104 --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/config_schema.json @@ -0,0 +1,185 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Ceph collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 10 + }, + "url": { + "title": "URL", + "description": "The URL of the [Ceph Manager API](https://docs.ceph.com/en/reef/mgr/ceph_api/).", + "type": "string", + "default": "https://127.0.0.1:8443", + "format": "uri" + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 2 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "username": { + "title": "Username", + "description": "The username for basic authentication.", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication.", + "type": "string", + "sensitive": true + }, + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server.", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication.", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication.", + "type": "string", + "sensitive": true + }, + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "string" + } + }, + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set, TLS certificate verification will be skipped.", + "type": "boolean", + "default": true + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string", + "pattern": "^$|^/" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string", + "pattern": "^$|^/" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string", + "pattern": "^$|^/" + }, + "body": { + "title": "Body", + "type": "string" + }, + "method": { + "title": "Method", + "type": "string" + } + }, + "required": [ + "url", + "username", + "password" + ], + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + }, + "body": { + "ui:widget": "hidden" + }, + "method": { + "ui:widget": "hidden" + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + }, + "username": { + "ui:widget": "password" + }, + "proxy_username": { + "ui:widget": "password" + }, + "password": { + "ui:widget": "password" + }, + "proxy_password": { + "ui:widget": "password" + }, + "ui:flavour": "tabs", + "ui:options": { + "tabs": [ + { + "title": "Base", + "fields": [ + "update_every", + "url", + "timeout", + "not_follow_redirects" + ] + }, + { + "title": "Auth", + "fields": [ + "username", + "password" + ] + }, + { + "title": "TLS", + "fields": [ + "tls_skip_verify", + "tls_ca", + "tls_cert", + "tls_key" + ] + }, + { + "title": "Proxy", + "fields": [ + "proxy_url", + "proxy_username", + "proxy_password" + ] + }, + { + "title": "Headers", + "fields": [ + "headers" + ] + } + ] + } + } +} diff --git a/src/go/plugin/go.d/modules/ceph/init.go b/src/go/plugin/go.d/modules/ceph/init.go new file mode 100644 index 000000000..1642abbcd --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/init.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ceph + +import ( + "fmt" +) + +func (c *Ceph) validateConfig() error { + if c.URL == "" { + return fmt.Errorf("URL is required but not set") + } + if c.Username == "" || c.Password == "" { + return fmt.Errorf("username and password are required but not set") + } + return nil +} diff --git a/src/go/plugin/go.d/modules/ceph/integrations/ceph.md b/src/go/plugin/go.d/modules/ceph/integrations/ceph.md new file mode 100644 index 000000000..7a0c3dd6d --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/integrations/ceph.md @@ -0,0 +1,307 @@ + + +# Ceph + + + + + +Plugin: go.d.plugin +Module: ceph + + + +## Overview + +This collector monitors the overall health status and performance of your Ceph clusters. +It gathers key metrics for the entire cluster, individual Pools, and OSDs. + + +It collects metrics by periodically issuing HTTP GET requests to the Ceph Manager [REST API](https://docs.ceph.com/en/reef/mgr/ceph_api/#): + +- [/api/monitor](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-monitor) (only once to get the Ceph cluster id (fsid)) +- [/api/health/minimal](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-health-minimal) +- [/api/osd](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-osd) +- [/api/pool?stats=true](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-pool) + + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +The collector can automatically detect Ceph Manager instances running on: + +- localhost that are listening on port 8443 +- within Docker containers + +> **Note that the Ceph REST API requires a username and password**. +> While Netdata can automatically detect Ceph Manager instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per cluster + +These metrics refer to the entire Ceph cluster. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| fsid | A unique identifier of the cluster. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ceph.cluster_status | ok, err, warn | status | +| ceph.cluster_hosts_count | hosts | hosts | +| ceph.cluster_monitors_count | monitors | monitors | +| ceph.cluster_osds_count | osds | osds | +| ceph.cluster_osds_by_status_count | up, down, in, out | status | +| ceph.cluster_managers_count | active, standby | managers | +| ceph.cluster_object_gateways_count | object | gateways | +| ceph.cluster_iscsi_gateways_count | iscsi | gateways | +| ceph.cluster_iscsi_gateways_by_status_count | up, down | gateways | +| ceph.cluster_physical_capacity_utilization | utilization | percent | +| ceph.cluster_physical_capacity_usage | avail, used | bytes | +| ceph.cluster_objects_count | objects | objects | +| ceph.cluster_objects_by_status_distribution | healthy, misplaced, degraded, unfound | percent | +| ceph.cluster_pools_count | pools | pools | +| ceph.cluster_pgs_count | pgs | pgs | +| ceph.cluster_pgs_by_status_count | clean, working, warning, unknown | pgs | +| ceph.cluster_pgs_per_osd_count | per_osd | pgs | + +### Per osd + +These metrics refer to the Object Storage Daemon (OSD). + +Labels: + +| Label | Description | +|:-----------|:----------------| +| fsid | A unique identifier of the cluster. | +| osd_uuid | OSD UUID. | +| osd_name | OSD name. | +| device_class | OSD device class. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ceph.osd_status | up, down, in, out | status | +| ceph.osd_space_usage | avail, used | bytes | +| ceph.osd_io | read, written | bytes/s | +| ceph.osd_iops | read, write | ops/s | +| ceph.osd_latency | commit, apply | milliseconds | + +### Per pool + +These metrics refer to the Pool. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| fsid | A unique identifier of the cluster. | +| pool_name | Pool name. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ceph.pool_space_utilization | utilization | percent | +| ceph.pool_space_usage | avail, used | bytes | +| ceph.pool_objects_count | object | objects | +| ceph.pool_io | read, written | bytes/s | +| ceph.pool_iops | read, write | ops/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ ceph_cluster_physical_capacity_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.cluster_physical_capacity_utilization | Ceph cluster ${label:fsid} disk space utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/ceph.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/ceph.conf +``` +#### Options + +The following options can be defined globally: update_every. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | The URL of the [Ceph Manager API](https://docs.ceph.com/en/reef/mgr/ceph_api/). | https://127.0.0.1:8443 | yes | +| timeout | HTTP request timeout. | 2 | no | +| username | Username for basic HTTP authentication. | | yes | +| password | Password for basic HTTP authentication. | | yes | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +
    + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: https://127.0.0.1:8443 + username: user + password: pass + +``` +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +```yaml +jobs: + - name: local + url: https://127.0.0.1:8443 + username: user + password: pass + + - name: remote + url: https://192.0.2.1:8443 + username: user + password: pass + +``` + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `ceph` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m ceph + ``` + +### Getting Logs + +If you're encountering problems with the `ceph` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep ceph +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep ceph /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep ceph +``` + + diff --git a/src/go/plugin/go.d/modules/ceph/metadata.yaml b/src/go/plugin/go.d/modules/ceph/metadata.yaml new file mode 100644 index 000000000..84204e835 --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/metadata.yaml @@ -0,0 +1,391 @@ +plugin_name: go.d.plugin +modules: + - meta: + plugin_name: go.d.plugin + module_name: ceph + monitored_instance: + name: Ceph + link: "https://ceph.io/" + categories: + - data-collection.storage-mount-points-and-filesystems + icon_filename: "ceph.svg" + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - ceph + - storage + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors the overall health status and performance of your Ceph clusters. + It gathers key metrics for the entire cluster, individual Pools, and OSDs. + method_description: | + It collects metrics by periodically issuing HTTP GET requests to the Ceph Manager [REST API](https://docs.ceph.com/en/reef/mgr/ceph_api/#): + + - [/api/monitor](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-monitor) (only once to get the Ceph cluster id (fsid)) + - [/api/health/minimal](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-health-minimal) + - [/api/osd](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-osd) + - [/api/pool?stats=true](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-pool) + supported_platforms: + include: [Linux] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + The collector can automatically detect Ceph Manager instances running on: + + - localhost that are listening on port 8443 + - within Docker containers + + > **Note that the Ceph REST API requires a username and password**. + > While Netdata can automatically detect Ceph Manager instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials. + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/ceph.conf + options: + description: | + The following options can be defined globally: update_every. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: The URL of the [Ceph Manager API](https://docs.ceph.com/en/reef/mgr/ceph_api/). + default_value: https://127.0.0.1:8443 + required: true + - name: timeout + description: HTTP request timeout. + default_value: 2 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: true + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: true + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: "GET" + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: yes + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: "" + enabled: false + list: + - name: Basic + description: A basic example configuration. + folding: + enabled: false + config: | + jobs: + - name: local + url: https://127.0.0.1:8443 + username: user + password: pass + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: https://127.0.0.1:8443 + username: user + password: pass + + - name: remote + url: https://192.0.2.1:8443 + username: user + password: pass + troubleshooting: + problems: + list: [] + alerts: + - name: ceph_cluster_physical_capacity_utilization + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf + metric: ceph.cluster_physical_capacity_utilization + info: 'Ceph cluster ${label:fsid} disk space utilization' + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: cluster + description: "These metrics refer to the entire Ceph cluster." + labels: + - name: fsid + description: A unique identifier of the cluster. + metrics: + - name: ceph.cluster_status + description: Ceph Cluster Status + unit: status + chart_type: line + dimensions: + - name: ok + - name: err + - name: warn + - name: ceph.cluster_hosts_count + description: Ceph Cluster Hosts + unit: hosts + chart_type: line + dimensions: + - name: hosts + - name: ceph.cluster_monitors_count + description: Ceph Cluster Monitors + unit: monitors + chart_type: line + dimensions: + - name: monitors + - name: ceph.cluster_osds_count + description: Ceph Cluster OSDs + unit: osds + chart_type: line + dimensions: + - name: osds + - name: ceph.cluster_osds_by_status_count + description: Ceph Cluster OSDs by Status + unit: status + chart_type: line + dimensions: + - name: up + - name: down + - name: in + - name: out + - name: ceph.cluster_managers_count + description: Ceph Cluster Managers + unit: managers + chart_type: line + dimensions: + - name: active + - name: standby + - name: ceph.cluster_object_gateways_count + description: Ceph Cluster Object Gateways (RGW) + unit: gateways + chart_type: line + dimensions: + - name: object + - name: ceph.cluster_iscsi_gateways_count + description: Ceph Cluster iSCSI Gateways + unit: gateways + chart_type: line + dimensions: + - name: iscsi + - name: ceph.cluster_iscsi_gateways_by_status_count + description: Ceph Cluster iSCSI Gateways by Status + unit: gateways + chart_type: line + dimensions: + - name: up + - name: down + - name: ceph.cluster_physical_capacity_utilization + description: Ceph Cluster Physical Capacity Utilization + unit: percent + chart_type: area + dimensions: + - name: utilization + - name: ceph.cluster_physical_capacity_usage + description: Ceph Cluster Physical Capacity Usage + unit: bytes + chart_type: stacked + dimensions: + - name: avail + - name: used + - name: ceph.cluster_objects_count + description: Ceph Cluster Objects + unit: objects + chart_type: line + dimensions: + - name: objects + - name: ceph.cluster_objects_by_status_distribution + description: Ceph Cluster Objects by Status + unit: percent + chart_type: stacked + dimensions: + - name: healthy + - name: misplaced + - name: degraded + - name: unfound + - name: ceph.cluster_pools_count + description: Ceph Cluster Pools + unit: pools + chart_type: line + dimensions: + - name: pools + - name: ceph.cluster_pgs_count + description: Ceph Cluster Placement Groups + unit: pgs + chart_type: line + dimensions: + - name: pgs + - name: ceph.cluster_pgs_by_status_count + description: Ceph Cluster Placement Groups by Status + unit: pgs + chart_type: stacked + dimensions: + - name: clean + - name: working + - name: warning + - name: unknown + - name: ceph.cluster_pgs_per_osd_count + description: Ceph Cluster Placement Groups per OSD + unit: pgs + chart_type: line + dimensions: + - name: per_osd + - name: osd + description: These metrics refer to the Object Storage Daemon (OSD). + labels: + - name: fsid + description: A unique identifier of the cluster. + - name: osd_uuid + description: OSD UUID. + - name: osd_name + description: OSD name. + - name: device_class + description: OSD device class. + metrics: + - name: ceph.osd_status + description: Ceph OSD Status + unit: status + chart_type: line + dimensions: + - name: up + - name: down + - name: in + - name: out + - name: ceph.osd_space_usage + description: Ceph OSD Space Usage + unit: bytes + chart_type: stacked + dimensions: + - name: avail + - name: used + - name: ceph.osd_io + description: Ceph OSD IO + unit: bytes/s + chart_type: area + dimensions: + - name: read + - name: written + - name: ceph.osd_iops + description: Ceph OSD IOPS + unit: ops/s + chart_type: area + dimensions: + - name: read + - name: write + - name: ceph.osd_latency + description: Ceph OSD Latency + unit: milliseconds + chart_type: line + dimensions: + - name: commit + - name: apply + - name: pool + description: These metrics refer to the Pool. + labels: + - name: fsid + description: A unique identifier of the cluster. + - name: pool_name + description: Pool name. + metrics: + - name: ceph.pool_space_utilization + description: Ceph Pool Space Utilization + unit: percent + chart_type: area + dimensions: + - name: utilization + - name: ceph.pool_space_usage + description: Ceph Pool Space Usage + unit: bytes + chart_type: stacked + dimensions: + - name: avail + - name: used + - name: ceph.pool_objects_count + description: Ceph Pool Objects + unit: objects + chart_type: line + dimensions: + - name: object + - name: ceph.pool_io + description: Ceph Pool IO + unit: bytes/s + chart_type: area + dimensions: + - name: read + - name: written + - name: ceph.pool_iops + description: Ceph Pool IOPS + unit: ops/s + chart_type: area + dimensions: + - name: read + - name: write diff --git a/src/go/plugin/go.d/modules/ceph/testdata/config.json b/src/go/plugin/go.d/modules/ceph/testdata/config.json new file mode 100644 index 000000000..984c3ed6e --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/testdata/config.json @@ -0,0 +1,20 @@ +{ + "update_every": 123, + "url": "ok", + "body": "ok", + "method": "ok", + "headers": { + "ok": "ok" + }, + "username": "ok", + "password": "ok", + "proxy_url": "ok", + "proxy_username": "ok", + "proxy_password": "ok", + "timeout": 123.123, + "not_follow_redirects": true, + "tls_ca": "ok", + "tls_cert": "ok", + "tls_key": "ok", + "tls_skip_verify": true +} diff --git a/src/go/plugin/go.d/modules/ceph/testdata/config.yaml b/src/go/plugin/go.d/modules/ceph/testdata/config.yaml new file mode 100644 index 000000000..8558b61cc --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/testdata/config.yaml @@ -0,0 +1,17 @@ +update_every: 123 +url: "ok" +body: "ok" +method: "ok" +headers: + ok: "ok" +username: "ok" +password: "ok" +proxy_url: "ok" +proxy_username: "ok" +proxy_password: "ok" +timeout: 123.123 +not_follow_redirects: yes +tls_ca: "ok" +tls_cert: "ok" +tls_key: "ok" +tls_skip_verify: yes diff --git a/src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_health_minimal.json b/src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_health_minimal.json new file mode 100644 index 000000000..9b321688e --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_health_minimal.json @@ -0,0 +1,105 @@ +{ + "health": { + "status": "HEALTH_WARN", + "checks": [ + { + "severity": "HEALTH_WARN", + "summary": { + "message": "Reduced data availability: 1 pg inactive, 1 pg incomplete", + "count": 2 + }, + "detail": [ + { + "message": "pg 1.0 is creating+incomplete, acting [1,2147483647,2147483647,2147483647] (reducing pool mySuperPool min_size from 3 may help; search ceph.com/docs for 'incomplete')" + } + ], + "muted": false, + "type": "PG_AVAILABILITY" + } + ], + "mutes": [] + }, + "mon_status": { + "monmap": { + "mons": [ + {} + ] + }, + "quorum": [ + 0 + ] + }, + "fs_map": { + "filesystems": [], + "standbys": [] + }, + "osd_map": { + "osds": [ + { + "in": 1, + "up": 1, + "state": [ + "exists", + "up" + ] + }, + { + "in": 1, + "up": 1, + "state": [ + "exists", + "up" + ] + } + ] + }, + "scrub_status": "Inactive", + "pg_info": { + "object_stats": { + "num_objects": 6, + "num_object_copies": 6, + "num_objects_degraded": 1, + "num_objects_misplaced": 1, + "num_objects_unfound": 1 + }, + "statuses": { + "active+clean": 1, + "creating+incomplete": 1 + }, + "pgs_per_osd": 2 + }, + "mgr_map": { + "active_name": "pve-deb-work.snrdap", + "standbys": [ + { + "gid": 24118, + "name": "pve-deb-work.rjothn", + "mgr_features": 4540138314316775400 + } + ] + }, + "pools": [ + {}, + {} + ], + "df": { + "stats": { + "total_avail_bytes": 175476178944, + "total_bytes": 176085270528, + "total_used_raw_bytes": 609091584 + } + }, + "client_perf": { + "read_bytes_sec": 1, + "read_op_per_sec": 1, + "recovering_bytes_per_sec": 1, + "write_bytes_sec": 1, + "write_op_per_sec": 1 + }, + "hosts": 1, + "rgw": 1, + "iscsi_daemons": { + "up": 1, + "down": 1 + } +} diff --git a/src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_monitor.json b/src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_monitor.json new file mode 100644 index 000000000..e56a96cc2 --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_monitor.json @@ -0,0 +1,315 @@ +{ + "mon_status": { + "name": "pve-deb-work", + "rank": 0, + "state": "leader", + "election_epoch": 7, + "quorum": [ + 0 + ], + "quorum_age": 315075, + "features": { + "required_con": "2449958747317026820", + "required_mon": [ + "kraken", + "luminous", + "mimic", + "osdmap-prune", + "nautilus", + "octopus", + "pacific", + "elector-pinging" + ], + "quorum_con": "4540138314316775423", + "quorum_mon": [ + "kraken", + "luminous", + "mimic", + "osdmap-prune", + "nautilus", + "octopus", + "pacific", + "elector-pinging" + ] + }, + "outside_quorum": [], + "extra_probe_peers": [], + "sync_provider": [], + "monmap": { + "epoch": 1, + "fsid": "28c31cb4-79ce-11ef-9a4d-e6007f1f06b0", + "modified": "2024-09-23T17:06:26.264498Z", + "created": "2024-09-23T17:06:26.264498Z", + "min_mon_release": 16, + "min_mon_release_name": "pacific", + "election_strategy": 1, + "disallowed_leaders: ": "", + "stretch_mode": false, + "tiebreaker_mon": "", + "removed_ranks: ": "", + "features": { + "persistent": [ + "kraken", + "luminous", + "mimic", + "osdmap-prune", + "nautilus", + "octopus", + "pacific", + "elector-pinging" + ], + "optional": [] + }, + "mons": [ + { + "rank": 0, + "name": "pve-deb-work", + "public_addrs": { + "addrvec": [ + { + "type": "v2", + "addr": "127.0.0.1:3300", + "nonce": 0 + }, + { + "type": "v1", + "addr": "127.0.0.1:6789", + "nonce": 0 + } + ] + }, + "addr": "127.0.0.1:6789/0", + "public_addr": "127.0.0.1:6789/0", + "priority": 0, + "weight": 0, + "crush_location": "{}", + "stats": { + "num_sessions": [ + [ + 1727428059.43674, + 9 + ], + [ + 1727428064.436794, + 9 + ], + [ + 1727428069.4371185, + 9 + ], + [ + 1727428074.437272, + 9 + ], + [ + 1727428079.437457, + 9 + ], + [ + 1727428084.4377377, + 9 + ], + [ + 1727428089.4380398, + 9 + ], + [ + 1727428094.4382815, + 9 + ], + [ + 1727428099.4385583, + 9 + ], + [ + 1727428104.4388537, + 9 + ], + [ + 1727428109.439028, + 9 + ], + [ + 1727428114.439225, + 9 + ], + [ + 1727428119.4395273, + 9 + ], + [ + 1727428124.4398105, + 9 + ], + [ + 1727428129.4401586, + 9 + ], + [ + 1727428134.4403143, + 9 + ], + [ + 1727428139.4406898, + 9 + ], + [ + 1727428144.4407866, + 9 + ], + [ + 1727428149.4410205, + 9 + ], + [ + 1727428154.4414103, + 9 + ] + ] + } + } + ] + }, + "feature_map": { + "mon": [ + { + "features": "0x3f01cfbdfffdffff", + "release": "luminous", + "num": 1 + } + ], + "osd": [ + { + "features": "0x3f01cfbdfffdffff", + "release": "luminous", + "num": 2 + } + ], + "client": [ + { + "features": "0x3f01cfbdfffdffff", + "release": "luminous", + "num": 4 + } + ], + "mgr": [ + { + "features": "0x3f01cfbdfffdffff", + "release": "luminous", + "num": 2 + } + ] + }, + "stretch_mode": false + }, + "in_quorum": [ + { + "rank": 0, + "name": "pve-deb-work", + "public_addrs": { + "addrvec": [ + { + "type": "v2", + "addr": "127.0.0.1:3300", + "nonce": 0 + }, + { + "type": "v1", + "addr": "127.0.0.1:6789", + "nonce": 0 + } + ] + }, + "addr": "127.0.0.1:6789/0", + "public_addr": "127.0.0.1:6789/0", + "priority": 0, + "weight": 0, + "crush_location": "{}", + "stats": { + "num_sessions": [ + [ + 1727428059.43674, + 9 + ], + [ + 1727428064.436794, + 9 + ], + [ + 1727428069.4371185, + 9 + ], + [ + 1727428074.437272, + 9 + ], + [ + 1727428079.437457, + 9 + ], + [ + 1727428084.4377377, + 9 + ], + [ + 1727428089.4380398, + 9 + ], + [ + 1727428094.4382815, + 9 + ], + [ + 1727428099.4385583, + 9 + ], + [ + 1727428104.4388537, + 9 + ], + [ + 1727428109.439028, + 9 + ], + [ + 1727428114.439225, + 9 + ], + [ + 1727428119.4395273, + 9 + ], + [ + 1727428124.4398105, + 9 + ], + [ + 1727428129.4401586, + 9 + ], + [ + 1727428134.4403143, + 9 + ], + [ + 1727428139.4406898, + 9 + ], + [ + 1727428144.4407866, + 9 + ], + [ + 1727428149.4410205, + 9 + ], + [ + 1727428154.4414103, + 9 + ] + ] + } + } + ], + "out_quorum": [] +} diff --git a/src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_osd.json b/src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_osd.json new file mode 100644 index 000000000..b5fc4d68e --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_osd.json @@ -0,0 +1,930 @@ +[ + { + "osd": 0, + "up": 1, + "in": 1, + "weight": 1.0, + "primary_affinity": 1.0, + "last_clean_begin": 0, + "last_clean_end": 0, + "up_from": 16, + "up_thru": 23, + "down_at": 0, + "lost_at": 0, + "public_addrs": { + "addrvec": [ + { + "type": "v2", + "nonce": 390161213, + "addr": "127.0.0.1:6802" + }, + { + "type": "v1", + "nonce": 390161213, + "addr": "127.0.0.1:6803" + } + ] + }, + "cluster_addrs": { + "addrvec": [ + { + "type": "v2", + "nonce": 390161213, + "addr": "127.0.0.1:6804" + }, + { + "type": "v1", + "nonce": 390161213, + "addr": "127.0.0.1:6805" + } + ] + }, + "heartbeat_back_addrs": { + "addrvec": [ + { + "type": "v2", + "nonce": 390161213, + "addr": "127.0.0.1:6808" + }, + { + "type": "v1", + "nonce": 390161213, + "addr": "127.0.0.1:6809" + } + ] + }, + "heartbeat_front_addrs": { + "addrvec": [ + { + "type": "v2", + "nonce": 390161213, + "addr": "127.0.0.1:6806" + }, + { + "type": "v1", + "nonce": 390161213, + "addr": "127.0.0.1:6807" + } + ] + }, + "state": [ + "exists", + "up" + ], + "uuid": "f78537db-9b18-4c62-a24f-a4344fc28de7", + "public_addr": "127.0.0.1:6803/390161213", + "cluster_addr": "127.0.0.1:6805/390161213", + "heartbeat_back_addr": "127.0.0.1:6809/390161213", + "heartbeat_front_addr": "127.0.0.1:6807/390161213", + "id": 0, + "osd_stats": { + "osd": 0, + "up_from": 16, + "seq": 68719494425, + "num_pgs": 2, + "num_osds": 1, + "num_per_pool_osds": 1, + "num_per_pool_omap_osds": 1, + "kb": 104853504, + "kb_used": 297392, + "kb_used_data": 296, + "kb_used_omap": 0, + "kb_used_meta": 297088, + "kb_avail": 104556112, + "statfs": { + "total": 107369988096, + "available": 107065458688, + "internally_reserved": 0, + "allocated": 303104, + "data_stored": 115778, + "data_compressed": 0, + "data_compressed_allocated": 0, + "data_compressed_original": 0, + "omap_allocated": 0, + "internal_metadata": 304218112 + }, + "hb_peers": [ + 1 + ], + "snap_trim_queue_len": 0, + "num_snap_trimming": 0, + "num_shards_repaired": 0, + "op_queue_age_hist": { + "histogram": [], + "upper_bound": 1 + }, + "perf_stat": { + "commit_latency_ms": 1.0, + "apply_latency_ms": 1.0, + "commit_latency_ns": 0, + "apply_latency_ns": 0 + }, + "alerts": [] + }, + "tree": { + "id": 0, + "device_class": "hdd", + "type": "osd", + "type_id": 0, + "crush_weight": 0.097686767578125, + "depth": 2, + "pool_weights": {}, + "exists": 1, + "status": "up", + "reweight": 1.0, + "primary_affinity": 1.0, + "name": "osd.0" + }, + "host": { + "id": -3, + "name": "pve-deb-work", + "type": "host", + "type_id": 1, + "pool_weights": {}, + "children": [ + 1, + 0 + ] + }, + "stats": { + "op_w": 1.0, + "op_in_bytes": 1.0, + "op_r": 1.0, + "op_out_bytes": 1.0, + "numpg": 2, + "stat_bytes": 107369988096, + "stat_bytes_used": 304529408 + }, + "stats_history": { + "op_w": [ + [ + 1727260221.6928742, + 0.0 + ], + [ + 1727260226.693285, + 0.0 + ], + [ + 1727260231.693595, + 0.0 + ], + [ + 1727260236.6938233, + 0.0 + ], + [ + 1727260241.6942236, + 0.0 + ], + [ + 1727260246.6945205, + 0.0 + ], + [ + 1727260251.694739, + 0.0 + ], + [ + 1727260256.6951063, + 0.0 + ], + [ + 1727260261.6954763, + 0.0 + ], + [ + 1727260266.695751, + 0.0 + ], + [ + 1727260271.6960592, + 0.0 + ], + [ + 1727260276.6963248, + 0.0 + ], + [ + 1727260281.6966574, + 0.0 + ], + [ + 1727260286.696938, + 0.0 + ], + [ + 1727260291.6973395, + 0.0 + ], + [ + 1727260296.697657, + 0.0 + ], + [ + 1727260301.6980677, + 0.0 + ], + [ + 1727260306.698276, + 0.0 + ], + [ + 1727260311.6986544, + 0.0 + ] + ], + "op_in_bytes": [ + [ + 1727260221.6928742, + 0.0 + ], + [ + 1727260226.693285, + 0.0 + ], + [ + 1727260231.693595, + 0.0 + ], + [ + 1727260236.6938233, + 0.0 + ], + [ + 1727260241.6942236, + 0.0 + ], + [ + 1727260246.6945205, + 0.0 + ], + [ + 1727260251.694739, + 0.0 + ], + [ + 1727260256.6951063, + 0.0 + ], + [ + 1727260261.6954763, + 0.0 + ], + [ + 1727260266.695751, + 0.0 + ], + [ + 1727260271.6960592, + 0.0 + ], + [ + 1727260276.6963248, + 0.0 + ], + [ + 1727260281.6966574, + 0.0 + ], + [ + 1727260286.696938, + 0.0 + ], + [ + 1727260291.6973395, + 0.0 + ], + [ + 1727260296.697657, + 0.0 + ], + [ + 1727260301.6980677, + 0.0 + ], + [ + 1727260306.698276, + 0.0 + ], + [ + 1727260311.6986544, + 0.0 + ] + ], + "op_r": [ + [ + 1727260221.6928742, + 0.0 + ], + [ + 1727260226.693285, + 0.0 + ], + [ + 1727260231.693595, + 0.0 + ], + [ + 1727260236.6938233, + 0.0 + ], + [ + 1727260241.6942236, + 0.0 + ], + [ + 1727260246.6945205, + 0.0 + ], + [ + 1727260251.694739, + 0.0 + ], + [ + 1727260256.6951063, + 0.0 + ], + [ + 1727260261.6954763, + 0.0 + ], + [ + 1727260266.695751, + 0.0 + ], + [ + 1727260271.6960592, + 0.0 + ], + [ + 1727260276.6963248, + 0.0 + ], + [ + 1727260281.6966574, + 0.0 + ], + [ + 1727260286.696938, + 0.0 + ], + [ + 1727260291.6973395, + 0.0 + ], + [ + 1727260296.697657, + 0.0 + ], + [ + 1727260301.6980677, + 0.0 + ], + [ + 1727260306.698276, + 0.0 + ], + [ + 1727260311.6986544, + 0.0 + ] + ], + "op_out_bytes": [ + [ + 1727260221.6928742, + 0.0 + ], + [ + 1727260226.693285, + 0.0 + ], + [ + 1727260231.693595, + 0.0 + ], + [ + 1727260236.6938233, + 0.0 + ], + [ + 1727260241.6942236, + 0.0 + ], + [ + 1727260246.6945205, + 0.0 + ], + [ + 1727260251.694739, + 0.0 + ], + [ + 1727260256.6951063, + 0.0 + ], + [ + 1727260261.6954763, + 0.0 + ], + [ + 1727260266.695751, + 0.0 + ], + [ + 1727260271.6960592, + 0.0 + ], + [ + 1727260276.6963248, + 0.0 + ], + [ + 1727260281.6966574, + 0.0 + ], + [ + 1727260286.696938, + 0.0 + ], + [ + 1727260291.6973395, + 0.0 + ], + [ + 1727260296.697657, + 0.0 + ], + [ + 1727260301.6980677, + 0.0 + ], + [ + 1727260306.698276, + 0.0 + ], + [ + 1727260311.6986544, + 0.0 + ] + ] + }, + "operational_status": "working" + }, + { + "osd": 1, + "up": 1, + "in": 1, + "weight": 1.0, + "primary_affinity": 1.0, + "last_clean_begin": 0, + "last_clean_end": 0, + "up_from": 22, + "up_thru": 22, + "down_at": 0, + "lost_at": 0, + "public_addrs": { + "addrvec": [ + { + "type": "v2", + "nonce": 1173056633, + "addr": "127.0.0.1:6810" + }, + { + "type": "v1", + "nonce": 1173056633, + "addr": "127.0.0.1:6811" + } + ] + }, + "cluster_addrs": { + "addrvec": [ + { + "type": "v2", + "nonce": 1173056633, + "addr": "127.0.0.1:6812" + }, + { + "type": "v1", + "nonce": 1173056633, + "addr": "127.0.0.1:6813" + } + ] + }, + "heartbeat_back_addrs": { + "addrvec": [ + { + "type": "v2", + "nonce": 1173056633, + "addr": "127.0.0.1:6816" + }, + { + "type": "v1", + "nonce": 1173056633, + "addr": "127.0.0.1:6817" + } + ] + }, + "heartbeat_front_addrs": { + "addrvec": [ + { + "type": "v2", + "nonce": 1173056633, + "addr": "127.0.0.1:6814" + }, + { + "type": "v1", + "nonce": 1173056633, + "addr": "127.0.0.1:6815" + } + ] + }, + "state": [ + "exists", + "up" + ], + "uuid": "f5bbbe9d-e85b-419c-af5a-a57e2527cad3", + "public_addr": "127.0.0.1:6811/1173056633", + "cluster_addr": "127.0.0.1:6813/1173056633", + "heartbeat_back_addr": "127.0.0.1:6817/1173056633", + "heartbeat_front_addr": "127.0.0.1:6815/1173056633", + "id": 1, + "osd_stats": { + "osd": 1, + "up_from": 22, + "seq": 94489298171, + "num_pgs": 2, + "num_osds": 1, + "num_per_pool_osds": 1, + "num_per_pool_omap_osds": 1, + "kb": 67104768, + "kb_used": 297392, + "kb_used_data": 296, + "kb_used_omap": 0, + "kb_used_meta": 297088, + "kb_avail": 66807376, + "statfs": { + "total": 68715282432, + "available": 68410753024, + "internally_reserved": 0, + "allocated": 303104, + "data_stored": 115778, + "data_compressed": 0, + "data_compressed_allocated": 0, + "data_compressed_original": 0, + "omap_allocated": 0, + "internal_metadata": 304218112 + }, + "hb_peers": [ + 0 + ], + "snap_trim_queue_len": 0, + "num_snap_trimming": 0, + "num_shards_repaired": 0, + "op_queue_age_hist": { + "histogram": [], + "upper_bound": 1 + }, + "perf_stat": { + "commit_latency_ms": 1.0, + "apply_latency_ms": 1.0, + "commit_latency_ns": 0, + "apply_latency_ns": 0 + }, + "alerts": [] + }, + "tree": { + "id": 1, + "device_class": "ssd", + "type": "osd", + "type_id": 0, + "crush_weight": 0.0625, + "depth": 2, + "pool_weights": {}, + "exists": 1, + "status": "up", + "reweight": 1.0, + "primary_affinity": 1.0, + "name": "osd.1" + }, + "host": { + "id": -3, + "name": "pve-deb-work", + "type": "host", + "type_id": 1, + "pool_weights": {}, + "children": [ + 1, + 0 + ] + }, + "stats": { + "op_w": 1.0, + "op_in_bytes": 1.0, + "op_r": 1.0, + "op_out_bytes": 1.0, + "numpg": 2, + "stat_bytes": 68715282432, + "stat_bytes_used": 304529408 + }, + "stats_history": { + "op_w": [ + [ + 1727260228.751263, + 0.0 + ], + [ + 1727260233.7515125, + 0.0 + ], + [ + 1727260238.7518487, + 0.0 + ], + [ + 1727260243.752178, + 0.0 + ], + [ + 1727260248.752556, + 0.0 + ], + [ + 1727260253.7527573, + 0.0 + ], + [ + 1727260258.7530267, + 0.0 + ], + [ + 1727260263.753484, + 0.0 + ], + [ + 1727260268.753807, + 0.0 + ], + [ + 1727260273.754063, + 0.0 + ], + [ + 1727260278.7543082, + 0.0 + ], + [ + 1727260283.7546039, + 0.0 + ], + [ + 1727260288.754978, + 0.0 + ], + [ + 1727260293.7552564, + 0.0 + ], + [ + 1727260298.755653, + 0.0 + ], + [ + 1727260303.7559133, + 0.0 + ], + [ + 1727260308.7562194, + 0.0 + ], + [ + 1727260313.7565064, + 0.0 + ] + ], + "op_in_bytes": [ + [ + 1727260228.751263, + 0.0 + ], + [ + 1727260233.7515125, + 0.0 + ], + [ + 1727260238.7518487, + 0.0 + ], + [ + 1727260243.752178, + 0.0 + ], + [ + 1727260248.752556, + 0.0 + ], + [ + 1727260253.7527573, + 0.0 + ], + [ + 1727260258.7530267, + 0.0 + ], + [ + 1727260263.753484, + 0.0 + ], + [ + 1727260268.753807, + 0.0 + ], + [ + 1727260273.754063, + 0.0 + ], + [ + 1727260278.7543082, + 0.0 + ], + [ + 1727260283.7546039, + 0.0 + ], + [ + 1727260288.754978, + 0.0 + ], + [ + 1727260293.7552564, + 0.0 + ], + [ + 1727260298.755653, + 0.0 + ], + [ + 1727260303.7559133, + 0.0 + ], + [ + 1727260308.7562194, + 0.0 + ], + [ + 1727260313.7565064, + 0.0 + ] + ], + "op_r": [ + [ + 1727260228.751263, + 0.0 + ], + [ + 1727260233.7515125, + 0.0 + ], + [ + 1727260238.7518487, + 0.0 + ], + [ + 1727260243.752178, + 0.0 + ], + [ + 1727260248.752556, + 0.0 + ], + [ + 1727260253.7527573, + 0.0 + ], + [ + 1727260258.7530267, + 0.0 + ], + [ + 1727260263.753484, + 0.0 + ], + [ + 1727260268.753807, + 0.0 + ], + [ + 1727260273.754063, + 0.0 + ], + [ + 1727260278.7543082, + 0.0 + ], + [ + 1727260283.7546039, + 0.0 + ], + [ + 1727260288.754978, + 0.0 + ], + [ + 1727260293.7552564, + 0.0 + ], + [ + 1727260298.755653, + 0.0 + ], + [ + 1727260303.7559133, + 0.0 + ], + [ + 1727260308.7562194, + 0.0 + ], + [ + 1727260313.7565064, + 0.0 + ] + ], + "op_out_bytes": [ + [ + 1727260228.751263, + 0.0 + ], + [ + 1727260233.7515125, + 0.0 + ], + [ + 1727260238.7518487, + 0.0 + ], + [ + 1727260243.752178, + 0.0 + ], + [ + 1727260248.752556, + 0.0 + ], + [ + 1727260253.7527573, + 0.0 + ], + [ + 1727260258.7530267, + 0.0 + ], + [ + 1727260263.753484, + 0.0 + ], + [ + 1727260268.753807, + 0.0 + ], + [ + 1727260273.754063, + 0.0 + ], + [ + 1727260278.7543082, + 0.0 + ], + [ + 1727260283.7546039, + 0.0 + ], + [ + 1727260288.754978, + 0.0 + ], + [ + 1727260293.7552564, + 0.0 + ], + [ + 1727260298.755653, + 0.0 + ], + [ + 1727260303.7559133, + 0.0 + ], + [ + 1727260308.7562194, + 0.0 + ], + [ + 1727260313.7565064, + 0.0 + ] + ] + }, + "operational_status": "working" + } +] diff --git a/src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_pool_stats.json b/src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_pool_stats.json new file mode 100644 index 000000000..12e1f4999 --- /dev/null +++ b/src/go/plugin/go.d/modules/ceph/testdata/v16.2.15/api_pool_stats.json @@ -0,0 +1,1923 @@ +[ + { + "pool": 1, + "pool_name": "mySuperPool", + "flags": 32769, + "flags_names": "hashpspool,creating", + "type": "erasure", + "size": 4, + "min_size": 3, + "crush_rule": "erasure-code", + "peering_crush_bucket_count": 0, + "peering_crush_bucket_target": 0, + "peering_crush_bucket_barrier": 0, + "peering_crush_bucket_mandatory_member": 2147483647, + "object_hash": 2, + "pg_autoscale_mode": "on", + "pg_num": 1, + "pg_placement_num": 1, + "pg_placement_num_target": 32, + "pg_num_target": 32, + "pg_num_pending": 1, + "last_pg_merge_meta": { + "ready_epoch": 0, + "last_epoch_started": 0, + "last_epoch_clean": 0, + "source_pgid": "0.0", + "source_version": "0'0", + "target_version": "0'0" + }, + "auid": 0, + "snap_mode": "selfmanaged", + "snap_seq": 0, + "snap_epoch": 0, + "pool_snaps": [], + "quota_max_bytes": 2147483648, + "quota_max_objects": 0, + "tiers": [], + "tier_of": -1, + "read_tier": -1, + "write_tier": -1, + "cache_mode": "none", + "target_max_bytes": 0, + "target_max_objects": 0, + "cache_target_dirty_ratio_micro": 400000, + "cache_target_dirty_high_ratio_micro": 600000, + "cache_target_full_ratio_micro": 800000, + "cache_min_flush_age": 0, + "cache_min_evict_age": 0, + "erasure_code_profile": "default", + "hit_set_params": { + "type": "none" + }, + "hit_set_period": 0, + "hit_set_count": 0, + "use_gmt_hitset": true, + "min_read_recency_for_promote": 0, + "min_write_recency_for_promote": 0, + "hit_set_grade_decay_rate": 0, + "hit_set_search_last_n": 0, + "grade_table": [], + "stripe_width": 8192, + "expected_num_objects": 0, + "fast_read": false, + "options": {}, + "application_metadata": [], + "create_time": "2024-09-23T17:27:53.650381+0000", + "last_change": "18", + "last_force_op_resend": "0", + "last_force_op_resend_prenautilus": "0", + "last_force_op_resend_preluminous": "0", + "removed_snaps": "[]", + "pg_status": { + "creating+incomplete": 1 + }, + "stats": { + "stored": { + "latest": 1, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "stored_data": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "stored_omap": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "objects": { + "latest": 1, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "kb_used": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "bytes_used": { + "latest": 1, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "data_bytes_used": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "omap_bytes_used": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "percent_used": { + "latest": 1.0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "max_avail": { + "latest": 83265085440, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "quota_objects": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "quota_bytes": { + "latest": 2147483648, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "dirty": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "rd": { + "latest": 1, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "rd_bytes": { + "latest": 1, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "wr": { + "latest": 1, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "wr_bytes": { + "latest": 1, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "compress_bytes_used": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "compress_under_bytes": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "stored_raw": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "avail_raw": { + "latest": 166530172973, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + } + } + }, + { + "pool": 2, + "pool_name": "device_health_metrics", + "flags": 1, + "flags_names": "hashpspool", + "type": "replicated", + "size": 2, + "min_size": 1, + "crush_rule": "replicated_rule", + "peering_crush_bucket_count": 0, + "peering_crush_bucket_target": 0, + "peering_crush_bucket_barrier": 0, + "peering_crush_bucket_mandatory_member": 2147483647, + "object_hash": 2, + "pg_autoscale_mode": "on", + "pg_num": 1, + "pg_placement_num": 1, + "pg_placement_num_target": 1, + "pg_num_target": 1, + "pg_num_pending": 1, + "last_pg_merge_meta": { + "ready_epoch": 0, + "last_epoch_started": 0, + "last_epoch_clean": 0, + "source_pgid": "0.0", + "source_version": "0'0", + "target_version": "0'0" + }, + "auid": 0, + "snap_mode": "selfmanaged", + "snap_seq": 0, + "snap_epoch": 0, + "pool_snaps": [], + "quota_max_bytes": 0, + "quota_max_objects": 0, + "tiers": [], + "tier_of": -1, + "read_tier": -1, + "write_tier": -1, + "cache_mode": "none", + "target_max_bytes": 0, + "target_max_objects": 0, + "cache_target_dirty_ratio_micro": 400000, + "cache_target_dirty_high_ratio_micro": 600000, + "cache_target_full_ratio_micro": 800000, + "cache_min_flush_age": 0, + "cache_min_evict_age": 0, + "erasure_code_profile": "", + "hit_set_params": { + "type": "none" + }, + "hit_set_period": 0, + "hit_set_count": 0, + "use_gmt_hitset": true, + "min_read_recency_for_promote": 0, + "min_write_recency_for_promote": 0, + "hit_set_grade_decay_rate": 0, + "hit_set_search_last_n": 0, + "grade_table": [], + "stripe_width": 0, + "expected_num_objects": 0, + "fast_read": false, + "options": { + "pg_num_max": 32, + "pg_num_min": 1 + }, + "application_metadata": [ + "mgr_devicehealth" + ], + "create_time": "2024-09-24T10:00:22.967240+0000", + "last_change": "25", + "last_force_op_resend": "0", + "last_force_op_resend_prenautilus": "0", + "last_force_op_resend_preluminous": "0", + "removed_snaps": "[]", + "pg_status": { + "active+clean": 1 + }, + "stats": { + "stored": { + "latest": 1, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "stored_data": { + "latest": 1, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "stored_omap": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "objects": { + "latest": 3, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "kb_used": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "bytes_used": { + "latest": 1, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "data_bytes_used": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "omap_bytes_used": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "percent_used": { + "latest": 1.0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "max_avail": { + "latest": 83265085440, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "quota_objects": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "quota_bytes": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "dirty": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "rd": { + "latest": 1, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "rd_bytes": { + "latest": 1, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "wr": { + "latest": 3, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "wr_bytes": { + "latest": 6144, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "compress_bytes_used": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "compress_under_bytes": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "stored_raw": { + "latest": 0, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + }, + "avail_raw": { + "latest": 166530172973, + "rate": 0.0, + "rates": [ + [ + 1727260045.5636568, + 0.0 + ], + [ + 1727260050.5730257, + 0.0 + ], + [ + 1727260055.56124, + 0.0 + ], + [ + 1727260060.564262, + 0.0 + ], + [ + 1727260065.5657525, + 0.0 + ], + [ + 1727260070.563676, + 0.0 + ], + [ + 1727260105.6695807, + 0.0 + ], + [ + 1727260165.6647935, + 0.0 + ], + [ + 1727260212.0559795, + 0.0 + ] + ] + } + } + } +] diff --git a/src/go/plugin/go.d/modules/chrony/charts.go b/src/go/plugin/go.d/modules/chrony/charts.go index 37a6fa3e6..00f3d0534 100644 --- a/src/go/plugin/go.d/modules/chrony/charts.go +++ b/src/go/plugin/go.d/modules/chrony/charts.go @@ -20,10 +20,8 @@ const ( prioRefMeasurementTime prioLeapStatus prioActivity - //prioNTPPackets - //prioCommandPackets - //prioNKEConnections - //prioClientLogRecords + prioNTPPackets + prioCommandPackets ) var charts = module.Charts{ @@ -216,105 +214,42 @@ var ( } ) -//var serverStatsVer1Charts = module.Charts{ -// ntpPacketsChart.Copy(), -// commandPacketsChart.Copy(), -// clientLogRecordsChart.Copy(), -//} -// -//var serverStatsVer2Charts = module.Charts{ -// ntpPacketsChart.Copy(), -// commandPacketsChart.Copy(), -// clientLogRecordsChart.Copy(), -// nkeConnectionChart.Copy(), -//} -// -//var serverStatsVer3Charts = module.Charts{ -// ntpPacketsChart.Copy(), -// commandPacketsChart.Copy(), -// clientLogRecordsChart.Copy(), -// nkeConnectionChart.Copy(), -//} -// -//var serverStatsVer4Charts = module.Charts{ -// ntpPacketsChart.Copy(), -// commandPacketsChart.Copy(), -// clientLogRecordsChart.Copy(), -// nkeConnectionChart.Copy(), -//} +var serverStatsCharts = module.Charts{ + ntpPacketsChart.Copy(), + commandPacketsChart.Copy(), +} -// ServerStats charts -//var ( -// ntpPacketsChart = module.Chart{ -// ID: "ntp_packets", -// Title: "NTP packets", -// Units: "packets/s", -// Fam: "client requests", -// Ctx: "chrony.ntp_packets", -// Type: module.Stacked, -// Priority: prioNTPPackets, -// Dims: module.Dims{ -// {ID: "ntp_packets_received", Name: "received", Algo: module.Incremental}, -// {ID: "ntp_packets_dropped", Name: "dropped", Algo: module.Incremental}, -// }, -// } -// commandPacketsChart = module.Chart{ -// ID: "command_packets", -// Title: "Command packets", -// Units: "packets/s", -// Fam: "client requests", -// Ctx: "chrony.command_packets", -// Type: module.Stacked, -// Priority: prioCommandPackets, -// Dims: module.Dims{ -// {ID: "command_packets_received", Name: "received", Algo: module.Incremental}, -// {ID: "command_packets_dropped", Name: "dropped", Algo: module.Incremental}, -// }, -// } -// nkeConnectionChart = module.Chart{ -// ID: "nke_connections", -// Title: "NTS-KE connections", -// Units: "connections/s", -// Fam: "client requests", -// Ctx: "chrony.nke_connections", -// Type: module.Stacked, -// Priority: prioNKEConnections, -// Dims: module.Dims{ -// {ID: "nke_connections_accepted", Name: "accepted", Algo: module.Incremental}, -// {ID: "nke_connections_dropped", Name: "dropped", Algo: module.Incremental}, -// }, -// } -// clientLogRecordsChart = module.Chart{ -// ID: "client_log_records", -// Title: "Client log records", -// Units: "records/s", -// Fam: "client requests", -// Ctx: "chrony.client_log_records", -// Type: module.Stacked, -// Priority: prioClientLogRecords, -// Dims: module.Dims{ -// {ID: "client_log_records_dropped", Name: "dropped", Algo: module.Incremental}, -// }, -// } -//) +var ( + ntpPacketsChart = module.Chart{ + ID: "ntp_packets", + Title: "NTP packets", + Units: "packets/s", + Fam: "client requests", + Ctx: "chrony.ntp_packets", + Type: module.Line, + Priority: prioNTPPackets, + Dims: module.Dims{ + {ID: "ntp_packets_received", Name: "received", Algo: module.Incremental}, + {ID: "ntp_packets_dropped", Name: "dropped", Algo: module.Incremental}, + }, + } + commandPacketsChart = module.Chart{ + ID: "command_packets", + Title: "Command packets", + Units: "packets/s", + Fam: "client requests", + Ctx: "chrony.command_packets", + Type: module.Line, + Priority: prioCommandPackets, + Dims: module.Dims{ + {ID: "command_packets_received", Name: "received", Algo: module.Incremental}, + {ID: "command_packets_dropped", Name: "dropped", Algo: module.Incremental}, + }, + } +) -//func (c *Chrony) addServerStatsCharts(stats *serverStats) { -// var err error -// -// switch { -// case stats.v1 != nil: -// err = c.Charts().Add(*serverStatsVer1Charts.Copy()...) -// case stats.v2 != nil: -// err = c.Charts().Add(*serverStatsVer2Charts.Copy()...) -// case stats.v3 != nil: -// err = c.Charts().Add(*serverStatsVer3Charts.Copy()...) -// case stats.v4 != nil: -// err = c.Charts().Add(*serverStatsVer4Charts.Copy()...) -// default: -// err = errors.New("unknown stats chart") -// } -// -// if err != nil { -// c.Warning(err) -// } -//} +func (c *Chrony) addServerStatsCharts() { + if err := c.Charts().Add(*serverStatsCharts.Copy()...); err != nil { + c.Warning(err) + } +} diff --git a/src/go/plugin/go.d/modules/chrony/chrony.go b/src/go/plugin/go.d/modules/chrony/chrony.go index 0bdd3183c..cfe3067c8 100644 --- a/src/go/plugin/go.d/modules/chrony/chrony.go +++ b/src/go/plugin/go.d/modules/chrony/chrony.go @@ -5,13 +5,12 @@ package chrony import ( _ "embed" "errors" + "fmt" "sync" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" - - "github.com/facebook/time/ntp/chrony" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -29,38 +28,32 @@ func New() *Chrony { return &Chrony{ Config: Config{ Address: "127.0.0.1:323", - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, - charts: charts.Copy(), - addStatsChartsOnce: &sync.Once{}, - newClient: newChronyClient, + charts: charts.Copy(), + addServerStatsChartsOnce: &sync.Once{}, + newConn: newChronyConn, } } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } -type ( - Chrony struct { - module.Base - Config `yaml:",inline" json:""` +type Chrony struct { + module.Base + Config `yaml:",inline" json:""` - charts *module.Charts - addStatsChartsOnce *sync.Once + charts *module.Charts + addServerStatsChartsOnce *sync.Once - client chronyClient - newClient func(c Config) (chronyClient, error) - } - chronyClient interface { - Tracking() (*chrony.ReplyTracking, error) - Activity() (*chrony.ReplyActivity, error) - ServerStats() (*serverStats, error) - Close() - } -) + exec chronyBinary + + conn chronyConn + newConn func(c Config) (chronyConn, error) +} func (c *Chrony) Configuration() any { return c.Config @@ -68,8 +61,12 @@ func (c *Chrony) Configuration() any { func (c *Chrony) Init() error { if err := c.validateConfig(); err != nil { - c.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) + } + + var err error + if c.exec, err = c.initChronycBinary(); err != nil { + c.Warningf("chronyc binary init failed: %v (serverstats metrics collection is disabled)", err) } return nil @@ -78,7 +75,6 @@ func (c *Chrony) Init() error { func (c *Chrony) Check() error { mx, err := c.collect() if err != nil { - c.Error(err) return err } if len(mx) == 0 { @@ -105,8 +101,8 @@ func (c *Chrony) Collect() map[string]int64 { } func (c *Chrony) Cleanup() { - if c.client != nil { - c.client.Close() - c.client = nil + if c.conn != nil { + c.conn.close() + c.conn = nil } } diff --git a/src/go/plugin/go.d/modules/chrony/chrony_test.go b/src/go/plugin/go.d/modules/chrony/chrony_test.go index 407724e75..dc380c207 100644 --- a/src/go/plugin/go.d/modules/chrony/chrony_test.go +++ b/src/go/plugin/go.d/modules/chrony/chrony_test.go @@ -155,11 +155,13 @@ func TestChrony_Collect(t *testing.T) { prepare func() *Chrony expected map[string]int64 }{ - "tracking: success, activity: success": { + "tracking: success, activity: success, serverstats: success": { prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{}) }, expected: map[string]int64{ "burst_offline_sources": 3, "burst_online_sources": 4, + "command_packets_dropped": 1, + "command_packets_received": 652, "current_correction": 154872, "frequency": 51051185607, "last_offset": 3095, @@ -167,6 +169,8 @@ func TestChrony_Collect(t *testing.T) { "leap_status_insert_second": 1, "leap_status_normal": 0, "leap_status_unsynchronised": 0, + "ntp_packets_dropped": 1, + "ntp_packets_received": 1, "offline_sources": 2, "online_sources": 8, "ref_measurement_time": 63793323616, @@ -219,12 +223,13 @@ func TestChrony_Collect(t *testing.T) { c := test.prepare() require.NoError(t, c.Init()) + c.exec = &mockChronyc{} _ = c.Check() - collected := c.Collect() - copyRefMeasurementTime(collected, test.expected) + mx := c.Collect() + copyRefMeasurementTime(mx, test.expected) - assert.Equal(t, test.expected, collected) + assert.Equal(t, test.expected, mx) }) } } @@ -232,13 +237,32 @@ func TestChrony_Collect(t *testing.T) { func prepareChronyWithMock(m *mockClient) *Chrony { c := New() if m == nil { - c.newClient = func(_ Config) (chronyClient, error) { return nil, errors.New("mock.newClient error") } + c.newConn = func(_ Config) (chronyConn, error) { return nil, errors.New("mock.newClient error") } } else { - c.newClient = func(_ Config) (chronyClient, error) { return m, nil } + c.newConn = func(_ Config) (chronyConn, error) { return m, nil } } return c } +type mockChronyc struct{} + +func (m *mockChronyc) serverStats() ([]byte, error) { + data := ` +NTP packets received : 1 +NTP packets dropped : 1 +Command packets received : 652 +Command packets dropped : 1 +Client log records dropped : 1 +NTS-KE connections accepted: 1 +NTS-KE connections dropped : 1 +Authenticated NTP packets : 1 +Interleaved NTP packets : 1 +NTP timestamps held : 1 +NTP timestamp span : 0 +` + return []byte(data), nil +} + type mockClient struct { errOnTracking bool errOnActivity bool @@ -246,7 +270,7 @@ type mockClient struct { closeCalled bool } -func (m *mockClient) Tracking() (*chrony.ReplyTracking, error) { +func (m *mockClient) tracking() (*chrony.ReplyTracking, error) { if m.errOnTracking { return nil, errors.New("mockClient.Tracking call error") } @@ -271,7 +295,7 @@ func (m *mockClient) Tracking() (*chrony.ReplyTracking, error) { return &reply, nil } -func (m *mockClient) Activity() (*chrony.ReplyActivity, error) { +func (m *mockClient) activity() (*chrony.ReplyActivity, error) { if m.errOnActivity { return nil, errors.New("mockClient.Activity call error") } @@ -287,31 +311,7 @@ func (m *mockClient) Activity() (*chrony.ReplyActivity, error) { return &reply, nil } -func (m *mockClient) ServerStats() (*serverStats, error) { - if m.errOnServerStats { - return nil, errors.New("mockClient.ServerStats call error") - } - - reply := serverStats{ - v3: &chrony.ServerStats3{ - NTPHits: 10, - NKEHits: 10, - CMDHits: 10, - NTPDrops: 1, - NKEDrops: 1, - CMDDrops: 1, - LogDrops: 1, - NTPAuthHits: 10, - NTPInterleavedHits: 10, - NTPTimestamps: 0, - NTPSpanSeconds: 0, - }, - } - - return &reply, nil -} - -func (m *mockClient) Close() { +func (m *mockClient) close() { m.closeCalled = true } diff --git a/src/go/plugin/go.d/modules/chrony/client.go b/src/go/plugin/go.d/modules/chrony/client.go index 233e78f19..f07f4902a 100644 --- a/src/go/plugin/go.d/modules/chrony/client.go +++ b/src/go/plugin/go.d/modules/chrony/client.go @@ -10,55 +10,40 @@ import ( "github.com/facebook/time/ntp/chrony" ) -func newChronyClient(c Config) (chronyClient, error) { - conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration()) +type chronyConn interface { + tracking() (*chrony.ReplyTracking, error) + activity() (*chrony.ReplyActivity, error) + close() +} + +func newChronyConn(cfg Config) (chronyConn, error) { + conn, err := net.DialTimeout("udp", cfg.Address, cfg.Timeout.Duration()) if err != nil { return nil, err } - client := &simpleClient{ + client := &chronyClient{ conn: conn, - client: &chrony.Client{Connection: &connWithTimeout{ - Conn: conn, - timeout: c.Timeout.Duration(), - }}, + client: &chrony.Client{ + Connection: &connWithTimeout{ + Conn: conn, + timeout: cfg.Timeout.Duration(), + }, + }, } return client, nil } -type connWithTimeout struct { - net.Conn - timeout time.Duration -} - -func (c *connWithTimeout) Read(p []byte) (n int, err error) { - if err := c.Conn.SetReadDeadline(c.deadline()); err != nil { - return 0, err - } - return c.Conn.Read(p) -} - -func (c *connWithTimeout) Write(p []byte) (n int, err error) { - if err := c.Conn.SetWriteDeadline(c.deadline()); err != nil { - return 0, err - } - return c.Conn.Write(p) -} - -func (c *connWithTimeout) deadline() time.Time { - return time.Now().Add(c.timeout) -} - -type simpleClient struct { +type chronyClient struct { conn net.Conn client *chrony.Client } -func (sc *simpleClient) Tracking() (*chrony.ReplyTracking, error) { +func (c *chronyClient) tracking() (*chrony.ReplyTracking, error) { req := chrony.NewTrackingPacket() - reply, err := sc.client.Communicate(req) + reply, err := c.client.Communicate(req) if err != nil { return nil, err } @@ -67,13 +52,14 @@ func (sc *simpleClient) Tracking() (*chrony.ReplyTracking, error) { if !ok { return nil, fmt.Errorf("unexpected reply type, want=%T, got=%T", &chrony.ReplyTracking{}, reply) } + return tracking, nil } -func (sc *simpleClient) Activity() (*chrony.ReplyActivity, error) { +func (c *chronyClient) activity() (*chrony.ReplyActivity, error) { req := chrony.NewActivityPacket() - reply, err := sc.client.Communicate(req) + reply, err := c.client.Communicate(req) if err != nil { return nil, err } @@ -82,90 +68,36 @@ func (sc *simpleClient) Activity() (*chrony.ReplyActivity, error) { if !ok { return nil, fmt.Errorf("unexpected reply type, want=%T, got=%T", &chrony.ReplyActivity{}, reply) } + return activity, nil } -type serverStats struct { - v1 *chrony.ServerStats - v2 *chrony.ServerStats2 - v3 *chrony.ServerStats3 - v4 *chrony.ServerStats4 +func (c *chronyClient) close() { + if c.conn != nil { + _ = c.conn.Close() + c.conn = nil + } } -func (sc *simpleClient) ServerStats() (*serverStats, error) { - req := chrony.NewServerStatsPacket() +type connWithTimeout struct { + net.Conn + timeout time.Duration +} - reply, err := sc.client.Communicate(req) - if err != nil { - return nil, err +func (c *connWithTimeout) Read(p []byte) (n int, err error) { + if err := c.Conn.SetReadDeadline(c.deadline()); err != nil { + return 0, err } + return c.Conn.Read(p) +} - var stats serverStats - - switch v := reply.(type) { - case *chrony.ReplyServerStats: - stats.v1 = &chrony.ServerStats{ - NTPHits: v.NTPHits, - CMDHits: v.CMDHits, - NTPDrops: v.NTPDrops, - CMDDrops: v.CMDDrops, - LogDrops: v.LogDrops, - } - case *chrony.ReplyServerStats2: - stats.v2 = &chrony.ServerStats2{ - NTPHits: v.NTPHits, - NKEHits: v.NKEHits, - CMDHits: v.CMDHits, - NTPDrops: v.NTPDrops, - NKEDrops: v.NKEDrops, - CMDDrops: v.CMDDrops, - LogDrops: v.LogDrops, - NTPAuthHits: v.NTPAuthHits, - } - case *chrony.ReplyServerStats3: - stats.v3 = &chrony.ServerStats3{ - NTPHits: v.NTPHits, - NKEHits: v.NKEHits, - CMDHits: v.CMDHits, - NTPDrops: v.NTPDrops, - NKEDrops: v.NKEDrops, - CMDDrops: v.CMDDrops, - LogDrops: v.LogDrops, - NTPAuthHits: v.NTPAuthHits, - NTPInterleavedHits: v.NTPInterleavedHits, - NTPTimestamps: v.NTPTimestamps, - NTPSpanSeconds: v.NTPSpanSeconds, - } - case *chrony.ReplyServerStats4: - stats.v4 = &chrony.ServerStats4{ - NTPHits: v.NTPHits, - NKEHits: v.NKEHits, - CMDHits: v.CMDHits, - NTPDrops: v.NTPDrops, - NKEDrops: v.NKEDrops, - CMDDrops: v.CMDDrops, - LogDrops: v.LogDrops, - NTPAuthHits: v.NTPAuthHits, - NTPInterleavedHits: v.NTPInterleavedHits, - NTPTimestamps: v.NTPTimestamps, - NTPSpanSeconds: v.NTPSpanSeconds, - NTPDaemonRxtimestamps: v.NTPDaemonRxtimestamps, - NTPDaemonTxtimestamps: v.NTPDaemonTxtimestamps, - NTPKernelRxtimestamps: v.NTPKernelRxtimestamps, - NTPKernelTxtimestamps: v.NTPKernelTxtimestamps, - NTPHwRxTimestamps: v.NTPHwRxTimestamps, - NTPHwTxTimestamps: v.NTPHwTxTimestamps, - } - default: - return nil, fmt.Errorf("unexpected reply type, want=ReplyServerStats, got=%T", reply) +func (c *connWithTimeout) Write(p []byte) (n int, err error) { + if err := c.Conn.SetWriteDeadline(c.deadline()); err != nil { + return 0, err } - - return &stats, nil + return c.Conn.Write(p) } -func (sc *simpleClient) Close() { - if sc.conn != nil { - _ = sc.conn.Close() - sc.conn = nil - } +func (c *connWithTimeout) deadline() time.Time { + return time.Now().Add(c.timeout) } diff --git a/src/go/plugin/go.d/modules/chrony/collect.go b/src/go/plugin/go.d/modules/chrony/collect.go index 1a3a286fc..c95b1b8aa 100644 --- a/src/go/plugin/go.d/modules/chrony/collect.go +++ b/src/go/plugin/go.d/modules/chrony/collect.go @@ -3,19 +3,32 @@ package chrony import ( + "bufio" + "bytes" + "errors" "fmt" + "strconv" + "strings" "time" ) const scaleFactor = 1000000000 +const ( + // https://github.com/mlichvar/chrony/blob/7daf34675a5a2487895c74d1578241ca91a4eb70/ntp.h#L70-L75 + leapStatusNormal = 0 + leapStatusInsertSecond = 1 + leapStatusDeleteSecond = 2 + leapStatusUnsynchronised = 3 +) + func (c *Chrony) collect() (map[string]int64, error) { - if c.client == nil { - client, err := c.newClient(c.Config) + if c.conn == nil { + client, err := c.newConn(c.Config) if err != nil { return nil, err } - c.client = client + c.conn = client } mx := make(map[string]int64) @@ -26,28 +39,20 @@ func (c *Chrony) collect() (map[string]int64, error) { if err := c.collectActivity(mx); err != nil { return mx, err } - //if strings.HasPrefix(c.Address, "/") { - // TODO: Allowed only through the Unix domain socket (requires "_chrony" group membership). - // See https://github.com/facebook/time/blob/18207c5d8ddc7242e8d4192985898b6dbe66932c/cmd/ntpcheck/checker/chrony.go#L38 - // ^^ For some reason doesn't work, Chrony doesn't respond. Additional configuration needed? - //if err := c.collectServerStats(mx); err != nil { - // return mx, err - //} - //} + if c.exec != nil { + if err := c.collectServerStats(mx); err != nil { + c.Warning(err) + c.exec = nil + } else { + c.addServerStatsChartsOnce.Do(c.addServerStatsCharts) + } + } return mx, nil } -const ( - // https://github.com/mlichvar/chrony/blob/7daf34675a5a2487895c74d1578241ca91a4eb70/ntp.h#L70-L75 - leapStatusNormal = 0 - leapStatusInsertSecond = 1 - leapStatusDeleteSecond = 2 - leapStatusUnsynchronised = 3 -) - func (c *Chrony) collectTracking(mx map[string]int64) error { - reply, err := c.client.Tracking() + reply, err := c.conn.tracking() if err != nil { return fmt.Errorf("error on collecting tracking: %v", err) } @@ -76,7 +81,7 @@ func (c *Chrony) collectTracking(mx map[string]int64) error { } func (c *Chrony) collectActivity(mx map[string]int64) error { - reply, err := c.client.Activity() + reply, err := c.conn.activity() if err != nil { return fmt.Errorf("error on collecting activity: %v", err) } @@ -90,56 +95,42 @@ func (c *Chrony) collectActivity(mx map[string]int64) error { return nil } -//func (c *Chrony) collectServerStats(mx map[string]int64) error { -// stats, err := c.client.ServerStats() -// if err != nil { -// return fmt.Errorf("error on collecting server stats: %v", err) -// } -// -// switch { -// case stats.v4 != nil: -// mx["ntp_packets_received"] = int64(stats.v4.NTPHits) -// mx["ntp_packets_dropped"] = int64(stats.v4.NTPDrops) -// mx["command_packets_received"] = int64(stats.v4.CMDHits) -// mx["command_packets_dropped"] = int64(stats.v4.CMDDrops) -// mx["client_log_records_dropped"] = int64(stats.v4.LogDrops) -// mx["nke_connections_accepted"] = int64(stats.v4.NKEHits) -// mx["nke_connections_dropped"] = int64(stats.v4.NKEDrops) -// mx["authenticated_ntp_packets"] = int64(stats.v4.NTPAuthHits) -// mx["interleaved_ntp_packets"] = int64(stats.v4.NTPInterleavedHits) -// case stats.v3 != nil: -// mx["ntp_packets_received"] = int64(stats.v3.NTPHits) -// mx["ntp_packets_dropped"] = int64(stats.v3.NTPDrops) -// mx["command_packets_received"] = int64(stats.v3.CMDHits) -// mx["command_packets_dropped"] = int64(stats.v3.CMDDrops) -// mx["client_log_records_dropped"] = int64(stats.v3.LogDrops) -// mx["nke_connections_accepted"] = int64(stats.v3.NKEHits) -// mx["nke_connections_dropped"] = int64(stats.v3.NKEDrops) -// mx["authenticated_ntp_packets"] = int64(stats.v3.NTPAuthHits) -// mx["interleaved_ntp_packets"] = int64(stats.v3.NTPInterleavedHits) -// case stats.v2 != nil: -// mx["ntp_packets_received"] = int64(stats.v2.NTPHits) -// mx["ntp_packets_dropped"] = int64(stats.v2.NTPDrops) -// mx["command_packets_received"] = int64(stats.v2.CMDHits) -// mx["command_packets_dropped"] = int64(stats.v2.CMDDrops) -// mx["client_log_records_dropped"] = int64(stats.v2.LogDrops) -// mx["nke_connections_accepted"] = int64(stats.v2.NKEHits) -// mx["nke_connections_dropped"] = int64(stats.v2.NKEDrops) -// mx["authenticated_ntp_packets"] = int64(stats.v2.NTPAuthHits) -// case stats.v1 != nil: -// mx["ntp_packets_received"] = int64(stats.v1.NTPHits) -// mx["ntp_packets_dropped"] = int64(stats.v1.NTPDrops) -// mx["command_packets_received"] = int64(stats.v1.CMDHits) -// mx["command_packets_dropped"] = int64(stats.v1.CMDDrops) -// mx["client_log_records_dropped"] = int64(stats.v1.LogDrops) -// default: -// return errors.New("invalid server stats reply") -// } -// -// //c.addStatsChartsOnce.Do(func() { c.addServerStatsCharts(stats) }) -// -// return nil -//} +func (c *Chrony) collectServerStats(mx map[string]int64) error { + bs, err := c.exec.serverStats() + if err != nil { + return fmt.Errorf("error on collecting server stats: %v", err) + } + + sc := bufio.NewScanner(bytes.NewReader(bs)) + var n int + + for sc.Scan() { + key, value, ok := strings.Cut(sc.Text(), ":") + if !ok { + continue + } + + key, value = strings.TrimSpace(key), strings.TrimSpace(value) + + switch key { + case "NTP packets received", + "NTP packets dropped", + "Command packets received", + "Command packets dropped": + if v, err := strconv.ParseInt(value, 10, 64); err == nil { + key = strings.ToLower(strings.ReplaceAll(key, " ", "_")) + mx[key] = v + n++ + } + } + } + + if n == 0 { + return errors.New("no server stats metrics found in the response") + } + + return nil +} func boolToInt(v bool) int64 { if v { diff --git a/src/go/plugin/go.d/modules/chrony/config_schema.json b/src/go/plugin/go.d/modules/chrony/config_schema.json index 5de10a822..5b7c6f069 100644 --- a/src/go/plugin/go.d/modules/chrony/config_schema.json +++ b/src/go/plugin/go.d/modules/chrony/config_schema.json @@ -27,7 +27,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/chrony/exec.go b/src/go/plugin/go.d/modules/chrony/exec.go new file mode 100644 index 000000000..c6792d846 --- /dev/null +++ b/src/go/plugin/go.d/modules/chrony/exec.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package chrony + +import ( + "context" + "fmt" + "os/exec" + "time" + + "github.com/netdata/netdata/go/plugins/logger" +) + +type chronyBinary interface { + serverStats() ([]byte, error) +} + +func newChronycExec(ndsudoPath string, timeout time.Duration, log *logger.Logger) *chronycExec { + return &chronycExec{ + Logger: log, + ndsudoPath: ndsudoPath, + timeout: timeout, + } +} + +type chronycExec struct { + *logger.Logger + + ndsudoPath string + timeout time.Duration +} + +func (e *chronycExec) serverStats() ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, e.ndsudoPath, "chronyc-serverstats") + e.Debugf("executing '%s'", cmd) + + bs, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("error on '%s': %v", cmd, err) + } + + return bs, nil +} diff --git a/src/go/plugin/go.d/modules/chrony/init.go b/src/go/plugin/go.d/modules/chrony/init.go index 828112c9d..2ad63ec67 100644 --- a/src/go/plugin/go.d/modules/chrony/init.go +++ b/src/go/plugin/go.d/modules/chrony/init.go @@ -4,6 +4,12 @@ package chrony import ( "errors" + "fmt" + "net" + "os" + "path/filepath" + + "github.com/netdata/netdata/go/plugins/pkg/executable" ) func (c *Chrony) validateConfig() error { @@ -12,3 +18,30 @@ func (c *Chrony) validateConfig() error { } return nil } + +func (c *Chrony) initChronycBinary() (chronyBinary, error) { + host, _, err := net.SplitHostPort(c.Address) + if err != nil { + return nil, err + } + + // 'serverstats' allowed only through the Unix domain socket + if !isLocalhost(host) { + return nil, nil + } + + ndsudoPath := filepath.Join(executable.Directory, "ndsudo") + + if _, err := os.Stat(ndsudoPath); err != nil { + return nil, fmt.Errorf("ndsudo executable not found: %v", err) + } + + chronyc := newChronycExec(ndsudoPath, c.Timeout.Duration(), c.Logger) + + return chronyc, nil +} + +func isLocalhost(host string) bool { + ip := net.ParseIP(host) + return host == "localhost" || (ip != nil && ip.IsLoopback()) +} diff --git a/src/go/plugin/go.d/modules/chrony/integrations/chrony.md b/src/go/plugin/go.d/modules/chrony/integrations/chrony.md index e9b9454d9..6ef6cf183 100644 --- a/src/go/plugin/go.d/modules/chrony/integrations/chrony.md +++ b/src/go/plugin/go.d/modules/chrony/integrations/chrony.md @@ -23,7 +23,10 @@ Module: chrony This collector monitors the system's clock performance and peers activity status + It collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6. +Additionally, for data collection jobs that connect to localhost Chrony instances, it collects serverstats metrics (NTP packets, command packets received/dropped) by executing the 'chronyc serverstats' command. + This collector is supported on all platforms. @@ -80,6 +83,8 @@ Metrics: | chrony.ref_measurement_time | ref_measurement_time | seconds | | chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status | | chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources | +| chrony.ntp_packets | received, dropped | packets/s | +| chrony.command_packets | received, dropped | packets/s | @@ -101,8 +106,8 @@ No action required. The configuration file name for this integration is `go.d/chrony.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/chrony/metadata.yaml b/src/go/plugin/go.d/modules/chrony/metadata.yaml index 18f9152e6..b7842aff5 100644 --- a/src/go/plugin/go.d/modules/chrony/metadata.yaml +++ b/src/go/plugin/go.d/modules/chrony/metadata.yaml @@ -20,8 +20,11 @@ modules: most_popular: false overview: data_collection: - metrics_description: This collector monitors the system's clock performance and peers activity status - method_description: It collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6. + metrics_description: | + This collector monitors the system's clock performance and peers activity status + method_description: | + It collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6. + Additionally, for data collection jobs that connect to localhost Chrony instances, it collects serverstats metrics (NTP packets, command packets received/dropped) by executing the 'chronyc serverstats' command. supported_platforms: include: [] exclude: [] @@ -206,3 +209,19 @@ modules: - name: burst_online - name: burst_offline - name: unresolved + - name: chrony.ntp_packets + availability: [] + description: NTP packets + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: dropped + - name: chrony.command_packets + availability: [] + description: Command packets + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: dropped diff --git a/src/go/plugin/go.d/modules/clickhouse/clickhouse.go b/src/go/plugin/go.d/modules/clickhouse/clickhouse.go index 3e34f7261..ab37d826c 100644 --- a/src/go/plugin/go.d/modules/clickhouse/clickhouse.go +++ b/src/go/plugin/go.d/modules/clickhouse/clickhouse.go @@ -5,10 +5,12 @@ package clickhouse import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *ClickHouse { return &ClickHouse{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8123", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -42,8 +44,8 @@ func New() *ClickHouse { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type ( @@ -68,14 +70,12 @@ func (c *ClickHouse) Configuration() any { func (c *ClickHouse) Init() error { if err := c.validateConfig(); err != nil { - c.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } httpClient, err := c.initHTTPClient() if err != nil { - c.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } c.httpClient = httpClient @@ -88,7 +88,6 @@ func (c *ClickHouse) Init() error { func (c *ClickHouse) Check() error { mx, err := c.collect() if err != nil { - c.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/clickhouse/clickhouse_test.go b/src/go/plugin/go.d/modules/clickhouse/clickhouse_test.go index c3defbda7..29dfdacb3 100644 --- a/src/go/plugin/go.d/modules/clickhouse/clickhouse_test.go +++ b/src/go/plugin/go.d/modules/clickhouse/clickhouse_test.go @@ -58,8 +58,8 @@ func TestClickHouse_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, @@ -243,25 +243,14 @@ func TestClickHouse_Collect(t *testing.T) { mx := click.Collect() require.Equal(t, test.wantMetrics, mx) + if len(test.wantMetrics) > 0 { - testMetricsHasAllChartsDims(t, click, mx) + module.TestMetricsHasAllChartsDims(t, click.Charts(), mx) } }) } } -func testMetricsHasAllChartsDims(t *testing.T, click *ClickHouse, mx map[string]int64) { - for _, chart := range *click.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - } -} - func prepareCaseOk(t *testing.T) (*ClickHouse, func()) { t.Helper() srv := httptest.NewServer(http.HandlerFunc( diff --git a/src/go/plugin/go.d/modules/clickhouse/collect.go b/src/go/plugin/go.d/modules/clickhouse/collect.go index 8bb756528..2000f9e21 100644 --- a/src/go/plugin/go.d/modules/clickhouse/collect.go +++ b/src/go/plugin/go.d/modules/clickhouse/collect.go @@ -9,6 +9,8 @@ import ( "net/http" "net/url" "slices" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) const precision = 1000 @@ -38,18 +40,10 @@ func (c *ClickHouse) collect() (map[string]int64, error) { return mx, nil } -func (c *ClickHouse) doOKDecodeCSV(req *http.Request, assign func(column, value string, lineEnd bool)) error { - resp, err := c.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - return readCSVResponseData(resp.Body, assign) +func (c *ClickHouse) doHTTP(req *http.Request, assign func(column, value string, lineEnd bool)) error { + return web.DoHTTP(c.httpClient).Request(req, func(body io.Reader) error { + return readCSVResponseData(body, assign) + }) } func readCSVResponseData(reader io.Reader, assign func(column, value string, lineEnd bool)) error { @@ -87,10 +81,3 @@ func readCSVResponseData(reader io.Reader, assign func(column, value string, lin func makeURLQuery(q string) string { return url.Values{"query": {q}}.Encode() } - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/clickhouse/collect_system_async_metrics.go b/src/go/plugin/go.d/modules/clickhouse/collect_system_async_metrics.go index 79b7e0ffd..4106e98f3 100644 --- a/src/go/plugin/go.d/modules/clickhouse/collect_system_async_metrics.go +++ b/src/go/plugin/go.d/modules/clickhouse/collect_system_async_metrics.go @@ -22,7 +22,7 @@ where ` func (c *ClickHouse) collectSystemAsyncMetrics(mx map[string]int64) error { - req, _ := web.NewHTTPRequest(c.Request) + req, _ := web.NewHTTPRequest(c.RequestConfig) req.URL.RawQuery = makeURLQuery(querySystemAsyncMetrics) want := map[string]float64{ @@ -35,7 +35,7 @@ func (c *ClickHouse) collectSystemAsyncMetrics(mx map[string]int64) error { var metric string var n int - err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) { + err := c.doHTTP(req, func(column, value string, lineEnd bool) { switch column { case "metric": metric = value diff --git a/src/go/plugin/go.d/modules/clickhouse/collect_system_disks.go b/src/go/plugin/go.d/modules/clickhouse/collect_system_disks.go index 4b9829bf6..5c6c5ffe6 100644 --- a/src/go/plugin/go.d/modules/clickhouse/collect_system_disks.go +++ b/src/go/plugin/go.d/modules/clickhouse/collect_system_disks.go @@ -26,7 +26,7 @@ type diskStats struct { } func (c *ClickHouse) collectSystemDisks(mx map[string]int64) error { - req, _ := web.NewHTTPRequest(c.Request) + req, _ := web.NewHTTPRequest(c.RequestConfig) req.URL.RawQuery = makeURLQuery(querySystemDisks) seen := make(map[string]*diskStats) @@ -42,7 +42,7 @@ func (c *ClickHouse) collectSystemDisks(mx map[string]int64) error { var name string - err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) { + err := c.doHTTP(req, func(column, value string, lineEnd bool) { switch column { case "name": name = value diff --git a/src/go/plugin/go.d/modules/clickhouse/collect_system_events.go b/src/go/plugin/go.d/modules/clickhouse/collect_system_events.go index de3c33a1e..6b103eacf 100644 --- a/src/go/plugin/go.d/modules/clickhouse/collect_system_events.go +++ b/src/go/plugin/go.d/modules/clickhouse/collect_system_events.go @@ -18,14 +18,14 @@ FROM ` func (c *ClickHouse) collectSystemEvents(mx map[string]int64) error { - req, _ := web.NewHTTPRequest(c.Request) + req, _ := web.NewHTTPRequest(c.RequestConfig) req.URL.RawQuery = makeURLQuery(querySystemEvents) px := "events_" var event string var n int - err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) { + err := c.doHTTP(req, func(column, value string, lineEnd bool) { switch column { case "event": event = value diff --git a/src/go/plugin/go.d/modules/clickhouse/collect_system_metrics.go b/src/go/plugin/go.d/modules/clickhouse/collect_system_metrics.go index 26891f808..8bfafe3d3 100644 --- a/src/go/plugin/go.d/modules/clickhouse/collect_system_metrics.go +++ b/src/go/plugin/go.d/modules/clickhouse/collect_system_metrics.go @@ -18,14 +18,14 @@ FROM ` func (c *ClickHouse) collectSystemMetrics(mx map[string]int64) error { - req, _ := web.NewHTTPRequest(c.Request) + req, _ := web.NewHTTPRequest(c.RequestConfig) req.URL.RawQuery = makeURLQuery(querySystemMetrics) px := "metrics_" var metric string var n int - err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) { + err := c.doHTTP(req, func(column, value string, lineEnd bool) { switch column { case "metric": metric = value diff --git a/src/go/plugin/go.d/modules/clickhouse/collect_system_parts.go b/src/go/plugin/go.d/modules/clickhouse/collect_system_parts.go index 3e9dc6ac2..e60589278 100644 --- a/src/go/plugin/go.d/modules/clickhouse/collect_system_parts.go +++ b/src/go/plugin/go.d/modules/clickhouse/collect_system_parts.go @@ -34,7 +34,7 @@ type tableStats struct { } func (c *ClickHouse) collectSystemParts(mx map[string]int64) error { - req, _ := web.NewHTTPRequest(c.Request) + req, _ := web.NewHTTPRequest(c.RequestConfig) req.URL.RawQuery = makeURLQuery(querySystemParts) seen := make(map[string]*tableStats) @@ -51,7 +51,7 @@ func (c *ClickHouse) collectSystemParts(mx map[string]int64) error { var database, table string - err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) { + err := c.doHTTP(req, func(column, value string, lineEnd bool) { switch column { case "database": database = value diff --git a/src/go/plugin/go.d/modules/clickhouse/collect_system_processes.go b/src/go/plugin/go.d/modules/clickhouse/collect_system_processes.go index 53698ea6c..cf3ec1858 100644 --- a/src/go/plugin/go.d/modules/clickhouse/collect_system_processes.go +++ b/src/go/plugin/go.d/modules/clickhouse/collect_system_processes.go @@ -16,10 +16,10 @@ FROM ` func (c *ClickHouse) collectLongestRunningQueryTime(mx map[string]int64) error { - req, _ := web.NewHTTPRequest(c.Request) + req, _ := web.NewHTTPRequest(c.RequestConfig) req.URL.RawQuery = makeURLQuery(queryLongestQueryTime) - return c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) { + return c.doHTTP(req, func(column, value string, lineEnd bool) { if column == "value" { if v, err := strconv.ParseFloat(value, 64); err == nil { mx["LongestRunningQueryTime"] = int64(v * precision) diff --git a/src/go/plugin/go.d/modules/clickhouse/config_schema.json b/src/go/plugin/go.d/modules/clickhouse/config_schema.json index 8b0129ece..591615610 100644 --- a/src/go/plugin/go.d/modules/clickhouse/config_schema.json +++ b/src/go/plugin/go.d/modules/clickhouse/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/clickhouse/init.go b/src/go/plugin/go.d/modules/clickhouse/init.go index 4b8ce3e4f..4d73129b3 100644 --- a/src/go/plugin/go.d/modules/clickhouse/init.go +++ b/src/go/plugin/go.d/modules/clickhouse/init.go @@ -17,5 +17,5 @@ func (c *ClickHouse) validateConfig() error { } func (c *ClickHouse) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(c.Client) + return web.NewHTTPClient(c.ClientConfig) } diff --git a/src/go/plugin/go.d/modules/clickhouse/integrations/clickhouse.md b/src/go/plugin/go.d/modules/clickhouse/integrations/clickhouse.md index c4f1384c0..3e0ada214 100644 --- a/src/go/plugin/go.d/modules/clickhouse/integrations/clickhouse.md +++ b/src/go/plugin/go.d/modules/clickhouse/integrations/clickhouse.md @@ -203,8 +203,8 @@ No action required. The configuration file name for this integration is `go.d/clickhouse.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/cockroachdb/cockroachdb.go b/src/go/plugin/go.d/modules/cockroachdb/cockroachdb.go index 32d13fa78..088954f0f 100644 --- a/src/go/plugin/go.d/modules/cockroachdb/cockroachdb.go +++ b/src/go/plugin/go.d/modules/cockroachdb/cockroachdb.go @@ -5,9 +5,11 @@ package cockroachdb import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -33,12 +35,12 @@ func init() { func New() *CockroachDB { return &CockroachDB{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8080/_status/vars", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -47,8 +49,8 @@ func New() *CockroachDB { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type CockroachDB struct { @@ -66,14 +68,12 @@ func (c *CockroachDB) Configuration() any { func (c *CockroachDB) Init() error { if err := c.validateConfig(); err != nil { - c.Errorf("error on validating config: %v", err) - return err + return fmt.Errorf("error on validating config: %v", err) } prom, err := c.initPrometheusClient() if err != nil { - c.Error(err) - return err + return fmt.Errorf("error on initializing prometheus client: %v", err) } c.prom = prom @@ -88,7 +88,6 @@ func (c *CockroachDB) Init() error { func (c *CockroachDB) Check() error { mx, err := c.collect() if err != nil { - c.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/cockroachdb/cockroachdb_test.go b/src/go/plugin/go.d/modules/cockroachdb/cockroachdb_test.go index 886b65fab..f6a537705 100644 --- a/src/go/plugin/go.d/modules/cockroachdb/cockroachdb_test.go +++ b/src/go/plugin/go.d/modules/cockroachdb/cockroachdb_test.go @@ -56,7 +56,7 @@ func TestCockroachDB_Init_ReturnsFalseIfConfigURLIsNotSet(t *testing.T) { func TestCockroachDB_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { cdb := prepareCockroachDB() - cdb.Client.TLSConfig.TLSCA = "testdata/tls" + cdb.ClientConfig.TLSConfig.TLSCA = "testdata/tls" assert.Error(t, cdb.Init()) } @@ -221,9 +221,11 @@ func TestCockroachDB_Collect(t *testing.T) { "valcount": 124081, } - collected := cdb.Collect() - assert.Equal(t, expected, collected) - testCharts(t, cdb, collected) + mx := cdb.Collect() + + assert.Equal(t, expected, mx) + + module.TestMetricsHasAllChartsDims(t, cdb.Charts(), mx) } func TestCockroachDB_Collect_ReturnsNilIfNotCockroachDBMetrics(t *testing.T) { @@ -254,23 +256,6 @@ func TestCockroachDB_Collect_ReturnsNilIfReceiveResponse404(t *testing.T) { assert.Nil(t, cdb.Collect()) } -func testCharts(t *testing.T, cdb *CockroachDB, collected map[string]int64) { - ensureCollectedHasAllChartsDimsVarsIDs(t, cdb, collected) -} - -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, c *CockroachDB, collected map[string]int64) { - for _, chart := range *c.Charts() { - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareCockroachDB() *CockroachDB { cdb := New() cdb.URL = "http://127.0.0.1:38001/metrics" diff --git a/src/go/plugin/go.d/modules/cockroachdb/config_schema.json b/src/go/plugin/go.d/modules/cockroachdb/config_schema.json index 51b94f6a6..f808c68d5 100644 --- a/src/go/plugin/go.d/modules/cockroachdb/config_schema.json +++ b/src/go/plugin/go.d/modules/cockroachdb/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/cockroachdb/init.go b/src/go/plugin/go.d/modules/cockroachdb/init.go index 7558e9952..3d9549218 100644 --- a/src/go/plugin/go.d/modules/cockroachdb/init.go +++ b/src/go/plugin/go.d/modules/cockroachdb/init.go @@ -4,9 +4,9 @@ package cockroachdb import ( "errors" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) func (c *CockroachDB) validateConfig() error { @@ -17,9 +17,9 @@ func (c *CockroachDB) validateConfig() error { } func (c *CockroachDB) initPrometheusClient() (prometheus.Prometheus, error) { - client, err := web.NewHTTPClient(c.Client) + client, err := web.NewHTTPClient(c.ClientConfig) if err != nil { return nil, err } - return prometheus.New(client, c.Request), nil + return prometheus.New(client, c.RequestConfig), nil } diff --git a/src/go/plugin/go.d/modules/cockroachdb/integrations/cockroachdb.md b/src/go/plugin/go.d/modules/cockroachdb/integrations/cockroachdb.md index 52e27a87e..9388a4336 100644 --- a/src/go/plugin/go.d/modules/cockroachdb/integrations/cockroachdb.md +++ b/src/go/plugin/go.d/modules/cockroachdb/integrations/cockroachdb.md @@ -154,8 +154,8 @@ No action required. The configuration file name for this integration is `go.d/cockroachdb.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/consul/collect.go b/src/go/plugin/go.d/modules/consul/collect.go index 3033e046e..628bde1b9 100644 --- a/src/go/plugin/go.d/modules/consul/collect.go +++ b/src/go/plugin/go.d/modules/consul/collect.go @@ -3,10 +3,9 @@ package consul import ( - "encoding/json" "fmt" - "io" "net/http" + "slices" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -68,44 +67,23 @@ func (c *Consul) isServer() bool { return c.cfg.Config.Server } -func (c *Consul) doOKDecode(urlPath string, in interface{}, statusCodes ...int) error { - req, err := web.NewHTTPRequestWithPath(c.Request, urlPath) +func (c *Consul) client(statusCodes ...int) *web.Client { + return web.DoHTTP(c.httpClient).OnNokCode(func(resp *http.Response) (bool, error) { + return slices.Contains(statusCodes, resp.StatusCode), nil + }) +} + +func (c *Consul) createRequest(urlPath string) (*http.Request, error) { + req, err := web.NewHTTPRequestWithPath(c.RequestConfig, urlPath) if err != nil { - return fmt.Errorf("error on creating request: %v", err) + return nil, fmt.Errorf("failed to create '%s' request: %w", urlPath, err) } if c.ACLToken != "" { req.Header.Set("X-Consul-Token", c.ACLToken) } - resp, err := c.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on request to %s : %v", req.URL, err) - } - - defer closeBody(resp) - - codes := map[int]bool{http.StatusOK: true} - for _, v := range statusCodes { - codes[v] = true - } - - if !codes[resp.StatusCode] { - return fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) - } - - if err = json.NewDecoder(resp.Body).Decode(&in); err != nil { - return fmt.Errorf("error on decoding response from %s : %v", req.URL, err) - } - - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } + return req, nil } func boolToInt(v bool) int64 { diff --git a/src/go/plugin/go.d/modules/consul/collect_autopilot.go b/src/go/plugin/go.d/modules/consul/collect_autopilot.go index e73ce9b25..7ae98e439 100644 --- a/src/go/plugin/go.d/modules/consul/collect_autopilot.go +++ b/src/go/plugin/go.d/modules/consul/collect_autopilot.go @@ -25,11 +25,16 @@ type autopilotHealth struct { } func (c *Consul) collectAutopilotHealth(mx map[string]int64) error { + req, err := c.createRequest(urlPathOperationAutopilotHealth) + if err != nil { + return err + } + var health autopilotHealth // The HTTP status code will indicate the health of the cluster: 200 is healthy, 429 is unhealthy. // https://github.com/hashicorp/consul/blob/c7ef04c5979dbc311ff3c67b7bf3028a93e8b0f1/agent/operator_endpoint.go#L325 - if err := c.doOKDecode(urlPathOperationAutopilotHealth, &health, http.StatusTooManyRequests); err != nil { + if err := c.client(http.StatusTooManyRequests).RequestJSON(req, &health); err != nil { return err } diff --git a/src/go/plugin/go.d/modules/consul/collect_checks.go b/src/go/plugin/go.d/modules/consul/collect_checks.go index 88ea4612b..fd9e7026b 100644 --- a/src/go/plugin/go.d/modules/consul/collect_checks.go +++ b/src/go/plugin/go.d/modules/consul/collect_checks.go @@ -18,9 +18,14 @@ type agentCheck struct { } func (c *Consul) collectChecks(mx map[string]int64) error { + req, err := c.createRequest(urlPathAgentChecks) + if err != nil { + return err + } + var checks map[string]*agentCheck - if err := c.doOKDecode(urlPathAgentChecks, &checks); err != nil { + if err := c.client().RequestJSON(req, &checks); err != nil { return err } diff --git a/src/go/plugin/go.d/modules/consul/collect_config.go b/src/go/plugin/go.d/modules/consul/collect_config.go index 14c77067f..493a7a6e4 100644 --- a/src/go/plugin/go.d/modules/consul/collect_config.go +++ b/src/go/plugin/go.d/modules/consul/collect_config.go @@ -46,9 +46,14 @@ type consulConfig struct { } func (c *Consul) collectConfiguration() error { + req, err := c.createRequest(urlPathAgentSelf) + if err != nil { + return err + } + var cfg consulConfig - if err := c.doOKDecode(urlPathAgentSelf, &cfg); err != nil { + if err := c.client().RequestJSON(req, &cfg); err != nil { return err } diff --git a/src/go/plugin/go.d/modules/consul/collect_net_rtt.go b/src/go/plugin/go.d/modules/consul/collect_net_rtt.go index 80330d23c..1dce8d371 100644 --- a/src/go/plugin/go.d/modules/consul/collect_net_rtt.go +++ b/src/go/plugin/go.d/modules/consul/collect_net_rtt.go @@ -23,9 +23,14 @@ type nodeCoordinates struct { } func (c *Consul) collectNetworkRTT(mx map[string]int64) error { + req, err := c.createRequest(urlPathCoordinateNodes) + if err != nil { + return err + } + var coords []nodeCoordinates - if err := c.doOKDecode(urlPathCoordinateNodes, &coords); err != nil { + if err := c.client().RequestJSON(req, &coords); err != nil { return err } diff --git a/src/go/plugin/go.d/modules/consul/config_schema.json b/src/go/plugin/go.d/modules/consul/config_schema.json index a716e15e4..135667dd3 100644 --- a/src/go/plugin/go.d/modules/consul/config_schema.json +++ b/src/go/plugin/go.d/modules/consul/config_schema.json @@ -111,7 +111,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/consul/consul.go b/src/go/plugin/go.d/modules/consul/consul.go index 6389d0650..13ab25d52 100644 --- a/src/go/plugin/go.d/modules/consul/consul.go +++ b/src/go/plugin/go.d/modules/consul/consul.go @@ -5,11 +5,13 @@ package consul import ( _ "embed" "errors" + "fmt" "net/http" "sync" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" @@ -33,12 +35,12 @@ func init() { func New() *Consul { return &Consul{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8500", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -50,9 +52,9 @@ func New() *Consul { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - ACLToken string `yaml:"acl_token,omitempty" json:"acl_token"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + ACLToken string `yaml:"acl_token,omitempty" json:"acl_token"` } type Consul struct { @@ -79,21 +81,18 @@ func (c *Consul) Configuration() any { func (c *Consul) Init() error { if err := c.validateConfig(); err != nil { - c.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } httpClient, err := c.initHTTPClient() if err != nil { - c.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } c.httpClient = httpClient prom, err := c.initPrometheusClient(httpClient) if err != nil { - c.Errorf("init Prometheus client: %v", err) - return err + return fmt.Errorf("init Prometheus client: %v", err) } c.prom = prom @@ -103,7 +102,6 @@ func (c *Consul) Init() error { func (c *Consul) Check() error { mx, err := c.collect() if err != nil { - c.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/consul/consul_test.go b/src/go/plugin/go.d/modules/consul/consul_test.go index ccc9f99be..ded6bde4f 100644 --- a/src/go/plugin/go.d/modules/consul/consul_test.go +++ b/src/go/plugin/go.d/modules/consul/consul_test.go @@ -75,8 +75,8 @@ func TestConsul_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, diff --git a/src/go/plugin/go.d/modules/consul/init.go b/src/go/plugin/go.d/modules/consul/init.go index 4ba5b86ea..9f19decdd 100644 --- a/src/go/plugin/go.d/modules/consul/init.go +++ b/src/go/plugin/go.d/modules/consul/init.go @@ -19,13 +19,13 @@ func (c *Consul) validateConfig() error { } func (c *Consul) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(c.Client) + return web.NewHTTPClient(c.ClientConfig) } const urlPathAgentMetrics = "/v1/agent/metrics" func (c *Consul) initPrometheusClient(httpClient *http.Client) (prometheus.Prometheus, error) { - r, err := web.NewHTTPRequest(c.Request.Copy()) + r, err := web.NewHTTPRequest(c.RequestConfig.Copy()) if err != nil { return nil, err } @@ -34,7 +34,7 @@ func (c *Consul) initPrometheusClient(httpClient *http.Client) (prometheus.Prome "format": []string{"prometheus"}, }.Encode() - req := c.Request.Copy() + req := c.RequestConfig.Copy() req.URL = r.URL.String() if c.ACLToken != "" { diff --git a/src/go/plugin/go.d/modules/consul/integrations/consul.md b/src/go/plugin/go.d/modules/consul/integrations/consul.md index 3a364bfd4..55a1bbf59 100644 --- a/src/go/plugin/go.d/modules/consul/integrations/consul.md +++ b/src/go/plugin/go.d/modules/consul/integrations/consul.md @@ -202,8 +202,8 @@ Required **only if authentication is enabled**. The configuration file name for this integration is `go.d/consul.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -223,7 +223,7 @@ The following options can be defined globally: update_every, autodetection_retry | url | Server URL. | http://localhost:8500 | yes | | acl_token | ACL token used in every request. | | no | | max_checks | Checks processing/charting limit. | | no | -| max_filter | Checks processing/charting filter. Uses [simple patterns](/src/libnetdata/simple_pattern/README.md). | | no | +| max_filter | Checks processing/charting filter. Uses [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md). | | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | | proxy_url | Proxy URL. | | no | diff --git a/src/go/plugin/go.d/modules/coredns/collect.go b/src/go/plugin/go.d/modules/coredns/collect.go index d6137b181..c801c56bc 100644 --- a/src/go/plugin/go.d/modules/coredns/collect.go +++ b/src/go/plugin/go.d/modules/coredns/collect.go @@ -7,9 +7,10 @@ import ( "fmt" "strings" - "github.com/blang/semver/v4" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" + + "github.com/blang/semver/v4" ) const ( @@ -174,9 +175,9 @@ func (cd *CoreDNS) collectSummaryRequests(mx *metrics, raw prometheus.Series) { // continue // } // -// setRequestDuration(&mx.Summary.Request, value, le) +// setRequestDuration(&mx.Summary.RequestConfig, value, le) // } -// processRequestDuration(&mx.Summary.Request) +// processRequestDuration(&mx.Summary.RequestConfig) //} func (cd *CoreDNS) collectSummaryRequestsPerType(mx *metrics, raw prometheus.Series) { @@ -290,10 +291,10 @@ func (cd *CoreDNS) collectPerServerRequests(mx *metrics, raw prometheus.Series) // mx.PerServer[server] = &requestResponse{} // } // -// setRequestDuration(&mx.PerServer[server].Request, value, le) +// setRequestDuration(&mx.PerServer[server].RequestConfig, value, le) // } // for _, s := range mx.PerServer { -// processRequestDuration(&s.Request) +// processRequestDuration(&s.RequestConfig) // } //} @@ -433,10 +434,10 @@ func (cd *CoreDNS) collectPerZoneRequests(mx *metrics, raw prometheus.Series) { // mx.PerZone[zone] = &requestResponse{} // } // -// setRequestDuration(&mx.PerZone[zone].Request, value, le) +// setRequestDuration(&mx.PerZone[zone].RequestConfig, value, le) // } // for _, s := range mx.PerZone { -// processRequestDuration(&s.Request) +// processRequestDuration(&s.RequestConfig) // } //} diff --git a/src/go/plugin/go.d/modules/coredns/config_schema.json b/src/go/plugin/go.d/modules/coredns/config_schema.json index d5f87912b..18dc678f8 100644 --- a/src/go/plugin/go.d/modules/coredns/config_schema.json +++ b/src/go/plugin/go.d/modules/coredns/config_schema.json @@ -177,7 +177,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/coredns/coredns.go b/src/go/plugin/go.d/modules/coredns/coredns.go index c91af7d15..7a29ee85f 100644 --- a/src/go/plugin/go.d/modules/coredns/coredns.go +++ b/src/go/plugin/go.d/modules/coredns/coredns.go @@ -5,10 +5,12 @@ package coredns import ( _ "embed" "errors" + "fmt" "time" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" @@ -29,12 +31,12 @@ func init() { func New() *CoreDNS { return &CoreDNS{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:9153/metrics", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -46,7 +48,7 @@ func New() *CoreDNS { type Config struct { UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + web.HTTPConfig `yaml:",inline" json:""` PerServerStats matcher.SimpleExpr `yaml:"per_server_stats,omitempty" json:"per_server_stats"` PerZoneStats matcher.SimpleExpr `yaml:"per_zone_stats,omitempty" json:"per_zone_stats"` } @@ -74,14 +76,12 @@ func (cd *CoreDNS) Configuration() any { func (cd *CoreDNS) Init() error { if err := cd.validateConfig(); err != nil { - cd.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } sm, err := cd.initPerServerMatcher() if err != nil { - cd.Error(err) - return err + return fmt.Errorf("init per_server_stats: %v", err) } if sm != nil { cd.perServerMatcher = sm @@ -89,8 +89,7 @@ func (cd *CoreDNS) Init() error { zm, err := cd.initPerZoneMatcher() if err != nil { - cd.Error(err) - return err + return fmt.Errorf("init per_zone_stats: %v", err) } if zm != nil { cd.perZoneMatcher = zm @@ -98,8 +97,7 @@ func (cd *CoreDNS) Init() error { prom, err := cd.initPrometheusClient() if err != nil { - cd.Error(err) - return err + return fmt.Errorf("init prometheus client: %v", err) } cd.prom = prom @@ -109,7 +107,6 @@ func (cd *CoreDNS) Init() error { func (cd *CoreDNS) Check() error { mx, err := cd.collect() if err != nil { - cd.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/coredns/init.go b/src/go/plugin/go.d/modules/coredns/init.go index e2b888bb6..2cb46240c 100644 --- a/src/go/plugin/go.d/modules/coredns/init.go +++ b/src/go/plugin/go.d/modules/coredns/init.go @@ -5,7 +5,7 @@ package coredns import ( "errors" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -32,9 +32,9 @@ func (cd *CoreDNS) initPerZoneMatcher() (matcher.Matcher, error) { } func (cd *CoreDNS) initPrometheusClient() (prometheus.Prometheus, error) { - client, err := web.NewHTTPClient(cd.Client) + client, err := web.NewHTTPClient(cd.ClientConfig) if err != nil { return nil, err } - return prometheus.New(client, cd.Request), nil + return prometheus.New(client, cd.RequestConfig), nil } diff --git a/src/go/plugin/go.d/modules/coredns/integrations/coredns.md b/src/go/plugin/go.d/modules/coredns/integrations/coredns.md index 549e2d8d9..2f485188a 100644 --- a/src/go/plugin/go.d/modules/coredns/integrations/coredns.md +++ b/src/go/plugin/go.d/modules/coredns/integrations/coredns.md @@ -137,8 +137,8 @@ No action required. The configuration file name for this integration is `go.d/coredns.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -177,7 +177,7 @@ The following options can be defined globally: update_every, autodetection_retry Metrics of servers matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format). +- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format). - Syntax: ```yaml @@ -195,7 +195,7 @@ per_server_stats: Metrics of zones matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format). +- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format). - Syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/coredns/metadata.yaml b/src/go/plugin/go.d/modules/coredns/metadata.yaml index e128ab546..e52a99288 100644 --- a/src/go/plugin/go.d/modules/coredns/metadata.yaml +++ b/src/go/plugin/go.d/modules/coredns/metadata.yaml @@ -70,7 +70,7 @@ modules: detailed_description: | Metrics of servers matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) - - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format). + - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format). - Syntax: ```yaml @@ -89,7 +89,7 @@ modules: detailed_description: | Metrics of zones matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) - - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format). + - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format). - Syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/couchbase/collect.go b/src/go/plugin/go.d/modules/couchbase/collect.go index 6027ac918..364748827 100644 --- a/src/go/plugin/go.d/modules/couchbase/collect.go +++ b/src/go/plugin/go.d/modules/couchbase/collect.go @@ -3,10 +3,7 @@ package couchbase import ( - "encoding/json" "fmt" - "io" - "net/http" "net/url" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" @@ -112,41 +109,18 @@ func (cb *Couchbase) addDimToChart(chartID string, dim *module.Dim) { } func (cb *Couchbase) scrapeCouchbase() (*cbMetrics, error) { - req, err := web.NewHTTPRequestWithPath(cb.Request, urlPathBucketsStats) + req, err := web.NewHTTPRequestWithPath(cb.RequestConfig, urlPathBucketsStats) if err != nil { return nil, err } req.URL.RawQuery = url.Values{"skipMap": []string{"true"}}.Encode() ms := &cbMetrics{} - if err := cb.doOKDecode(req, &ms.BucketsBasicStats); err != nil { + if err := web.DoHTTP(cb.httpClient).RequestJSON(req, &ms.BucketsBasicStats); err != nil { return nil, err } - return ms, nil -} - -func (cb *Couchbase) doOKDecode(req *http.Request, in interface{}) error { - resp, err := cb.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - return nil -} -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } + return ms, nil } func indexDimID(name, metric string) string { diff --git a/src/go/plugin/go.d/modules/couchbase/config_schema.json b/src/go/plugin/go.d/modules/couchbase/config_schema.json index 6ef455a97..caae8903e 100644 --- a/src/go/plugin/go.d/modules/couchbase/config_schema.json +++ b/src/go/plugin/go.d/modules/couchbase/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/couchbase/couchbase.go b/src/go/plugin/go.d/modules/couchbase/couchbase.go index 8ef880c2c..1edc09343 100644 --- a/src/go/plugin/go.d/modules/couchbase/couchbase.go +++ b/src/go/plugin/go.d/modules/couchbase/couchbase.go @@ -5,10 +5,12 @@ package couchbase import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -29,12 +31,12 @@ func init() { func New() *Couchbase { return &Couchbase{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8091", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -43,8 +45,8 @@ func New() *Couchbase { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Couchbase struct { @@ -64,21 +66,18 @@ func (cb *Couchbase) Configuration() any { func (cb *Couchbase) Init() error { err := cb.validateConfig() if err != nil { - cb.Errorf("check configuration: %v", err) - return err + return fmt.Errorf("check configuration: %v", err) } httpClient, err := cb.initHTTPClient() if err != nil { - cb.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } cb.httpClient = httpClient charts, err := cb.initCharts() if err != nil { - cb.Errorf("init charts: %v", err) - return err + return fmt.Errorf("init charts: %v", err) } cb.charts = charts @@ -88,7 +87,6 @@ func (cb *Couchbase) Init() error { func (cb *Couchbase) Check() error { mx, err := cb.collect() if err != nil { - cb.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/couchbase/couchbase_test.go b/src/go/plugin/go.d/modules/couchbase/couchbase_test.go index b28c8e8fe..2877a59b8 100644 --- a/src/go/plugin/go.d/modules/couchbase/couchbase_test.go +++ b/src/go/plugin/go.d/modules/couchbase/couchbase_test.go @@ -47,8 +47,8 @@ func TestCouchbase_Init(t *testing.T) { "fails on unset 'URL'": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "", }, }, @@ -57,8 +57,8 @@ func TestCouchbase_Init(t *testing.T) { "fails on invalid URL": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "127.0.0.1:9090", }, }, @@ -166,10 +166,10 @@ func TestCouchbase_Collect(t *testing.T) { cb, cleanup := test.prepare(t) defer cleanup() - collected := cb.Collect() + mx := cb.Collect() - assert.Equal(t, test.wantCollected, collected) - ensureCollectedHasAllChartsDimsVarsIDs(t, cb, collected) + assert.Equal(t, test.wantCollected, mx) + module.TestMetricsHasAllChartsDims(t, cb.Charts(), mx) }) } } @@ -222,19 +222,3 @@ func prepareCouchbaseConnectionRefused(t *testing.T) (*Couchbase, func()) { return cb, func() {} } - -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, cb *Couchbase, collected map[string]int64) { - for _, chart := range *cb.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) - } - } -} diff --git a/src/go/plugin/go.d/modules/couchbase/init.go b/src/go/plugin/go.d/modules/couchbase/init.go index 196e6998c..b25771a3e 100644 --- a/src/go/plugin/go.d/modules/couchbase/init.go +++ b/src/go/plugin/go.d/modules/couchbase/init.go @@ -25,14 +25,14 @@ func (cb *Couchbase) initCharts() (*Charts, error) { } func (cb *Couchbase) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(cb.Client) + return web.NewHTTPClient(cb.ClientConfig) } func (cb *Couchbase) validateConfig() error { if cb.URL == "" { return errors.New("URL not set") } - if _, err := web.NewHTTPRequest(cb.Request); err != nil { + if _, err := web.NewHTTPRequest(cb.RequestConfig); err != nil { return err } return nil diff --git a/src/go/plugin/go.d/modules/couchbase/integrations/couchbase.md b/src/go/plugin/go.d/modules/couchbase/integrations/couchbase.md index b53dc940c..e50957a26 100644 --- a/src/go/plugin/go.d/modules/couchbase/integrations/couchbase.md +++ b/src/go/plugin/go.d/modules/couchbase/integrations/couchbase.md @@ -93,8 +93,8 @@ No action required. The configuration file name for this integration is `go.d/couchbase.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/couchdb/collect.go b/src/go/plugin/go.d/modules/couchdb/collect.go index 21b38fb3a..1902da4d8 100644 --- a/src/go/plugin/go.d/modules/couchdb/collect.go +++ b/src/go/plugin/go.d/modules/couchdb/collect.go @@ -120,40 +120,46 @@ func (cdb *CouchDB) scrapeCouchDB() *cdbMetrics { } func (cdb *CouchDB) scrapeNodeStats(ms *cdbMetrics) { - req, _ := web.NewHTTPRequestWithPath(cdb.Request, fmt.Sprintf(urlPathOverviewStats, cdb.Config.Node)) + req, _ := web.NewHTTPRequestWithPath(cdb.RequestConfig, fmt.Sprintf(urlPathOverviewStats, cdb.Config.Node)) var stats cdbNodeStats - if err := cdb.doOKDecode(req, &stats); err != nil { + + if err := cdb.client().RequestJSON(req, &stats); err != nil { cdb.Warning(err) return } + ms.NodeStats = &stats } func (cdb *CouchDB) scrapeSystemStats(ms *cdbMetrics) { - req, _ := web.NewHTTPRequestWithPath(cdb.Request, fmt.Sprintf(urlPathSystemStats, cdb.Config.Node)) + req, _ := web.NewHTTPRequestWithPath(cdb.RequestConfig, fmt.Sprintf(urlPathSystemStats, cdb.Config.Node)) var stats cdbNodeSystem - if err := cdb.doOKDecode(req, &stats); err != nil { + + if err := cdb.client().RequestJSON(req, &stats); err != nil { cdb.Warning(err) return } + ms.NodeSystem = &stats } func (cdb *CouchDB) scrapeActiveTasks(ms *cdbMetrics) { - req, _ := web.NewHTTPRequestWithPath(cdb.Request, urlPathActiveTasks) + req, _ := web.NewHTTPRequestWithPath(cdb.RequestConfig, urlPathActiveTasks) var stats []cdbActiveTask - if err := cdb.doOKDecode(req, &stats); err != nil { + + if err := cdb.client().RequestJSON(req, &stats); err != nil { cdb.Warning(err) return } + ms.ActiveTasks = stats } func (cdb *CouchDB) scrapeDBStats(ms *cdbMetrics) { - req, _ := web.NewHTTPRequestWithPath(cdb.Request, urlPathDatabases) + req, _ := web.NewHTTPRequestWithPath(cdb.RequestConfig, urlPathDatabases) req.Method = http.MethodPost req.Header.Add("Accept", "application/json") req.Header.Add("Content-Type", "application/json") @@ -170,20 +176,22 @@ func (cdb *CouchDB) scrapeDBStats(ms *cdbMetrics) { req.Body = io.NopCloser(bytes.NewReader(body)) var stats []cdbDBStats - if err := cdb.doOKDecode(req, &stats); err != nil { + + if err := cdb.client().RequestJSON(req, &stats); err != nil { cdb.Warning(err) return } + ms.DBStats = stats } -func findMaxMQSize(MessageQueues map[string]interface{}) int64 { +func findMaxMQSize(MessageQueues map[string]any) int64 { var maxSize float64 for _, mq := range MessageQueues { switch mqSize := mq.(type) { case float64: maxSize = math.Max(maxSize, mqSize) - case map[string]interface{}: + case map[string]any: if v, ok := mqSize["count"].(float64); ok { maxSize = math.Max(maxSize, v) } @@ -193,10 +201,11 @@ func findMaxMQSize(MessageQueues map[string]interface{}) int64 { } func (cdb *CouchDB) pingCouchDB() error { - req, _ := web.NewHTTPRequest(cdb.Request) + req, _ := web.NewHTTPRequest(cdb.RequestConfig) var info struct{ Couchdb string } - if err := cdb.doOKDecode(req, &info); err != nil { + + if err := cdb.client().RequestJSON(req, &info); err != nil { return err } @@ -207,30 +216,17 @@ func (cdb *CouchDB) pingCouchDB() error { return nil } -func (cdb *CouchDB) doOKDecode(req *http.Request, in interface{}) error { - resp, err := cdb.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - // TODO: read resp body, it contains reason - // ex.: {"error":"bad_request","reason":"`keys` member must exist."} (400) - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } +func (cdb *CouchDB) client() *web.Client { + return web.DoHTTP(cdb.httpClient).OnNokCode(func(resp *http.Response) (bool, error) { + var msg struct { + Error string `json:"error"` + Reason string `json:"reason"` + } + if err := json.NewDecoder(resp.Body).Decode(&msg); err == nil && msg.Error != "" { + return false, fmt.Errorf("error '%s', reason '%s'", msg.Error, msg.Reason) + } + return false, nil + }) } func merge(dst, src map[string]int64, prefix string) { diff --git a/src/go/plugin/go.d/modules/couchdb/config_schema.json b/src/go/plugin/go.d/modules/couchdb/config_schema.json index 0df439b07..4862ce37d 100644 --- a/src/go/plugin/go.d/modules/couchdb/config_schema.json +++ b/src/go/plugin/go.d/modules/couchdb/config_schema.json @@ -117,7 +117,6 @@ "url", "node" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/couchdb/couchdb.go b/src/go/plugin/go.d/modules/couchdb/couchdb.go index 56563ec7b..95abc9c08 100644 --- a/src/go/plugin/go.d/modules/couchdb/couchdb.go +++ b/src/go/plugin/go.d/modules/couchdb/couchdb.go @@ -5,11 +5,13 @@ package couchdb import ( _ "embed" "errors" + "fmt" "net/http" "strings" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -30,12 +32,12 @@ func init() { func New() *CouchDB { return &CouchDB{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:5984", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 2), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 2), }, }, Node: "_local", @@ -44,10 +46,10 @@ func New() *CouchDB { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - Node string `yaml:"node,omitempty" json:"node"` - Databases string `yaml:"databases,omitempty" json:"databases"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + Node string `yaml:"node,omitempty" json:"node"` + Databases string `yaml:"databases,omitempty" json:"databases"` } type CouchDB struct { @@ -68,23 +70,20 @@ func (cdb *CouchDB) Configuration() any { func (cdb *CouchDB) Init() error { err := cdb.validateConfig() if err != nil { - cdb.Errorf("check configuration: %v", err) - return err + return fmt.Errorf("check configuration: %v", err) } cdb.databases = strings.Fields(cdb.Config.Databases) httpClient, err := cdb.initHTTPClient() if err != nil { - cdb.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } cdb.httpClient = httpClient charts, err := cdb.initCharts() if err != nil { - cdb.Errorf("init charts: %v", err) - return err + return fmt.Errorf("init charts: %v", err) } cdb.charts = charts @@ -93,13 +92,11 @@ func (cdb *CouchDB) Init() error { func (cdb *CouchDB) Check() error { if err := cdb.pingCouchDB(); err != nil { - cdb.Error(err) return err } mx, err := cdb.collect() if err != nil { - cdb.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/couchdb/couchdb_test.go b/src/go/plugin/go.d/modules/couchdb/couchdb_test.go index 99b7825fd..d4bd570ca 100644 --- a/src/go/plugin/go.d/modules/couchdb/couchdb_test.go +++ b/src/go/plugin/go.d/modules/couchdb/couchdb_test.go @@ -63,15 +63,15 @@ func TestCouchDB_Init(t *testing.T) { "URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }}, }, "invalid TLSCA": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Client: web.Client{ + HTTPConfig: web.HTTPConfig{ + ClientConfig: web.ClientConfig{ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, }, }}, @@ -357,35 +357,19 @@ func TestCouchDB_Collect(t *testing.T) { cdb, cleanup := prepareCouchDB(t, test.prepare) defer cleanup() - var collected map[string]int64 + var mx map[string]int64 for i := 0; i < 10; i++ { - collected = cdb.Collect() + mx = cdb.Collect() } - assert.Equal(t, test.wantCollected, collected) + assert.Equal(t, test.wantCollected, mx) if test.checkCharts { - ensureCollectedHasAllChartsDimsVarsIDs(t, cdb, collected) + module.TestMetricsHasAllChartsDims(t, cdb.Charts(), mx) } }) } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, cdb *CouchDB, collected map[string]int64) { - for _, chart := range *cdb.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareCouchDB(t *testing.T, createCDB func() *CouchDB) (cdb *CouchDB, cleanup func()) { t.Helper() cdb = createCDB() diff --git a/src/go/plugin/go.d/modules/couchdb/init.go b/src/go/plugin/go.d/modules/couchdb/init.go index 65e555749..159450b9b 100644 --- a/src/go/plugin/go.d/modules/couchdb/init.go +++ b/src/go/plugin/go.d/modules/couchdb/init.go @@ -17,14 +17,14 @@ func (cdb *CouchDB) validateConfig() error { if cdb.Node == "" { return errors.New("'node' not set") } - if _, err := web.NewHTTPRequest(cdb.Request); err != nil { + if _, err := web.NewHTTPRequest(cdb.RequestConfig); err != nil { return err } return nil } func (cdb *CouchDB) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(cdb.Client) + return web.NewHTTPClient(cdb.ClientConfig) } func (cdb *CouchDB) initCharts() (*Charts, error) { diff --git a/src/go/plugin/go.d/modules/couchdb/integrations/couchdb.md b/src/go/plugin/go.d/modules/couchdb/integrations/couchdb.md index 5e7f578cc..48bf45ef2 100644 --- a/src/go/plugin/go.d/modules/couchdb/integrations/couchdb.md +++ b/src/go/plugin/go.d/modules/couchdb/integrations/couchdb.md @@ -101,8 +101,8 @@ No action required. The configuration file name for this integration is `go.d/couchdb.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/couchdb/metrics.go b/src/go/plugin/go.d/modules/couchdb/metrics.go index 4d2f02679..31ad7a432 100644 --- a/src/go/plugin/go.d/modules/couchdb/metrics.go +++ b/src/go/plugin/go.d/modules/couchdb/metrics.go @@ -182,7 +182,7 @@ type cdbNodeSystem struct { ProcessCount float64 `stm:"process_count" json:"process_count"` InternalReplicationJobs float64 `stm:"internal_replication_jobs" json:"internal_replication_jobs"` - MessageQueues map[string]interface{} `json:"message_queues"` + MessageQueues map[string]any `json:"message_queues"` } type cdbDBStats struct { diff --git a/src/go/plugin/go.d/modules/dmcache/charts.go b/src/go/plugin/go.d/modules/dmcache/charts.go index c77f3d878..30899aa47 100644 --- a/src/go/plugin/go.d/modules/dmcache/charts.go +++ b/src/go/plugin/go.d/modules/dmcache/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package dmcache import ( diff --git a/src/go/plugin/go.d/modules/dmcache/collect.go b/src/go/plugin/go.d/modules/dmcache/collect.go index eae961b73..8bfaaae17 100644 --- a/src/go/plugin/go.d/modules/dmcache/collect.go +++ b/src/go/plugin/go.d/modules/dmcache/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package dmcache import ( diff --git a/src/go/plugin/go.d/modules/dmcache/config_schema.json b/src/go/plugin/go.d/modules/dmcache/config_schema.json index 4428b4d1b..8d5d0af85 100644 --- a/src/go/plugin/go.d/modules/dmcache/config_schema.json +++ b/src/go/plugin/go.d/modules/dmcache/config_schema.json @@ -19,7 +19,6 @@ "default": 2 } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/dmcache/dmcache.go b/src/go/plugin/go.d/modules/dmcache/dmcache.go index 9f3844b15..11fd9b2a7 100644 --- a/src/go/plugin/go.d/modules/dmcache/dmcache.go +++ b/src/go/plugin/go.d/modules/dmcache/dmcache.go @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package dmcache import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,7 +31,7 @@ func init() { func New() *DmCache { return &DmCache{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: &module.Charts{}, devices: make(map[string]bool), @@ -36,8 +39,8 @@ func New() *DmCache { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type ( @@ -63,8 +66,7 @@ func (c *DmCache) Configuration() any { func (c *DmCache) Init() error { dmsetup, err := c.initDmsetupCLI() if err != nil { - c.Errorf("dmsetup exec initialization: %v", err) - return err + return fmt.Errorf("dmsetup exec initialization: %v", err) } c.exec = dmsetup @@ -74,7 +76,6 @@ func (c *DmCache) Init() error { func (c *DmCache) Check() error { mx, err := c.collect() if err != nil { - c.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/dmcache/dmcache_test.go b/src/go/plugin/go.d/modules/dmcache/dmcache_test.go index 218ae044c..62e7c521c 100644 --- a/src/go/plugin/go.d/modules/dmcache/dmcache_test.go +++ b/src/go/plugin/go.d/modules/dmcache/dmcache_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package dmcache import ( @@ -192,21 +194,11 @@ func TestLVM_Collect(t *testing.T) { mx := dmcache.Collect() assert.Equal(t, test.wantMetrics, mx) - assert.Len(t, *dmcache.Charts(), test.wantCharts) - testMetricsHasAllChartsDims(t, dmcache, mx) - }) - } -} -func testMetricsHasAllChartsDims(t *testing.T, dmcache *DmCache, mx map[string]int64) { - for _, chart := range *dmcache.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } + assert.Len(t, *dmcache.Charts(), test.wantCharts, "wantCharts") + + module.TestMetricsHasAllChartsDims(t, dmcache.Charts(), mx) + }) } } diff --git a/src/go/plugin/go.d/modules/dmcache/doc.go b/src/go/plugin/go.d/modules/dmcache/doc.go new file mode 100644 index 000000000..8fe5a4a44 --- /dev/null +++ b/src/go/plugin/go.d/modules/dmcache/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dmcache diff --git a/src/go/plugin/go.d/modules/dmcache/exec.go b/src/go/plugin/go.d/modules/dmcache/exec.go index 1cd11be31..a933867fe 100644 --- a/src/go/plugin/go.d/modules/dmcache/exec.go +++ b/src/go/plugin/go.d/modules/dmcache/exec.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package dmcache import ( diff --git a/src/go/plugin/go.d/modules/dmcache/init.go b/src/go/plugin/go.d/modules/dmcache/init.go index 229972da7..f25c06ab4 100644 --- a/src/go/plugin/go.d/modules/dmcache/init.go +++ b/src/go/plugin/go.d/modules/dmcache/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package dmcache import ( diff --git a/src/go/plugin/go.d/modules/dmcache/integrations/dmcache_devices.md b/src/go/plugin/go.d/modules/dmcache/integrations/dmcache_devices.md index ac61311b9..a8f9dfc5a 100644 --- a/src/go/plugin/go.d/modules/dmcache/integrations/dmcache_devices.md +++ b/src/go/plugin/go.d/modules/dmcache/integrations/dmcache_devices.md @@ -95,8 +95,8 @@ No action required. The configuration file name for this integration is `go.d/dmcache.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/dnsdist/collect.go b/src/go/plugin/go.d/modules/dnsdist/collect.go index 9b860abf4..6b2b0a22e 100644 --- a/src/go/plugin/go.d/modules/dnsdist/collect.go +++ b/src/go/plugin/go.d/modules/dnsdist/collect.go @@ -3,10 +3,6 @@ package dnsdist import ( - "encoding/json" - "fmt" - "io" - "net/http" "net/url" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" @@ -36,41 +32,16 @@ func (d *DNSdist) collectStatistic(collected map[string]int64, statistics *stati } func (d *DNSdist) scrapeStatistics() (*statisticMetrics, error) { - req, err := web.NewHTTPRequestWithPath(d.Request, urlPathJSONStat) + req, err := web.NewHTTPRequestWithPath(d.RequestConfig, urlPathJSONStat) if err != nil { return nil, err } req.URL.RawQuery = url.Values{"command": []string{"stats"}}.Encode() - var statistics statisticMetrics - if err := d.doOKDecode(req, &statistics); err != nil { + var stats statisticMetrics + if err := web.DoHTTP(d.httpClient).RequestJSON(req, &stats); err != nil { return nil, err } - return &statistics, nil -} - -func (d *DNSdist) doOKDecode(req *http.Request, in interface{}) error { - resp, err := d.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } + return &stats, nil } diff --git a/src/go/plugin/go.d/modules/dnsdist/config_schema.json b/src/go/plugin/go.d/modules/dnsdist/config_schema.json index a71faaa04..717d775b9 100644 --- a/src/go/plugin/go.d/modules/dnsdist/config_schema.json +++ b/src/go/plugin/go.d/modules/dnsdist/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/dnsdist/dnsdist.go b/src/go/plugin/go.d/modules/dnsdist/dnsdist.go index fd0d8a381..70e497033 100644 --- a/src/go/plugin/go.d/modules/dnsdist/dnsdist.go +++ b/src/go/plugin/go.d/modules/dnsdist/dnsdist.go @@ -5,10 +5,12 @@ package dnsdist import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -29,12 +31,12 @@ func init() { func New() *DNSdist { return &DNSdist{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8083", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -42,8 +44,8 @@ func New() *DNSdist { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type DNSdist struct { @@ -62,21 +64,18 @@ func (d *DNSdist) Configuration() any { func (d *DNSdist) Init() error { err := d.validateConfig() if err != nil { - d.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } client, err := d.initHTTPClient() if err != nil { - d.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } d.httpClient = client cs, err := d.initCharts() if err != nil { - d.Errorf("init charts: %v", err) - return err + return fmt.Errorf("init charts: %v", err) } d.charts = cs @@ -86,7 +85,6 @@ func (d *DNSdist) Init() error { func (d *DNSdist) Check() error { mx, err := d.collect() if err != nil { - d.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/dnsdist/dnsdist_test.go b/src/go/plugin/go.d/modules/dnsdist/dnsdist_test.go index 18212c79d..372ddbe4d 100644 --- a/src/go/plugin/go.d/modules/dnsdist/dnsdist_test.go +++ b/src/go/plugin/go.d/modules/dnsdist/dnsdist_test.go @@ -3,12 +3,12 @@ package dnsdist import ( - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "net/http" "net/http/httptest" "os" "testing" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" @@ -48,19 +48,19 @@ func TestDNSdist_Init(t *testing.T) { "fails on unset URL": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, "fails on invalid TLSCA": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:38001", }, - Client: web.Client{ + ClientConfig: web.ClientConfig{ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, }, }, @@ -189,32 +189,16 @@ func TestDNSdist_Collect(t *testing.T) { defer cleanup() require.NoError(t, dist.Init()) - collected := dist.Collect() + mx := dist.Collect() - assert.Equal(t, test.wantCollected, collected) + assert.Equal(t, test.wantCollected, mx) if len(test.wantCollected) > 0 { - ensureCollectedHasAllChartsDimsVarsIDs(t, dist, collected) + module.TestMetricsHasAllChartsDims(t, dist.Charts(), mx) } }) } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, dist *DNSdist, collected map[string]int64) { - for _, chart := range *dist.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) - } - } -} - func preparePowerDNSdistV151() (*DNSdist, func()) { srv := preparePowerDNSDistEndpoint() ns := New() diff --git a/src/go/plugin/go.d/modules/dnsdist/init.go b/src/go/plugin/go.d/modules/dnsdist/init.go index d331da928..5f8075251 100644 --- a/src/go/plugin/go.d/modules/dnsdist/init.go +++ b/src/go/plugin/go.d/modules/dnsdist/init.go @@ -15,7 +15,7 @@ func (d *DNSdist) validateConfig() error { return errors.New("URL not set") } - if _, err := web.NewHTTPRequest(d.Request); err != nil { + if _, err := web.NewHTTPRequest(d.RequestConfig); err != nil { return err } @@ -23,7 +23,7 @@ func (d *DNSdist) validateConfig() error { } func (d *DNSdist) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(d.Client) + return web.NewHTTPClient(d.ClientConfig) } func (d *DNSdist) initCharts() (*module.Charts, error) { diff --git a/src/go/plugin/go.d/modules/dnsdist/integrations/dnsdist.md b/src/go/plugin/go.d/modules/dnsdist/integrations/dnsdist.md index 934245a57..72c970316 100644 --- a/src/go/plugin/go.d/modules/dnsdist/integrations/dnsdist.md +++ b/src/go/plugin/go.d/modules/dnsdist/integrations/dnsdist.md @@ -101,8 +101,8 @@ For collecting metrics via HTTP, you need to [enable the built-in webserver](htt The configuration file name for this integration is `go.d/dnsdist.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/dnsmasq/config_schema.json b/src/go/plugin/go.d/modules/dnsmasq/config_schema.json index 79396b364..4a5525026 100644 --- a/src/go/plugin/go.d/modules/dnsmasq/config_schema.json +++ b/src/go/plugin/go.d/modules/dnsmasq/config_schema.json @@ -39,7 +39,6 @@ "address", "protocol" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/dnsmasq/dnsmasq.go b/src/go/plugin/go.d/modules/dnsmasq/dnsmasq.go index 2d2112c05..a623b26b6 100644 --- a/src/go/plugin/go.d/modules/dnsmasq/dnsmasq.go +++ b/src/go/plugin/go.d/modules/dnsmasq/dnsmasq.go @@ -5,10 +5,11 @@ package dnsmasq import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/miekg/dns" ) @@ -29,7 +30,7 @@ func New() *Dnsmasq { Config: Config{ Protocol: "udp", Address: "127.0.0.1:53", - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, newDNSClient: func(network string, timeout time.Duration) dnsClient { @@ -42,10 +43,10 @@ func New() *Dnsmasq { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Protocol string `yaml:"protocol,omitempty" json:"protocol"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Protocol string `yaml:"protocol,omitempty" json:"protocol"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type ( @@ -70,21 +71,18 @@ func (d *Dnsmasq) Configuration() any { func (d *Dnsmasq) Init() error { err := d.validateConfig() if err != nil { - d.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } client, err := d.initDNSClient() if err != nil { - d.Errorf("init DNS client: %v", err) - return err + return fmt.Errorf("init DNS client: %v", err) } d.dnsClient = client charts, err := d.initCharts() if err != nil { - d.Errorf("init charts: %v", err) - return err + return fmt.Errorf("init charts: %v", err) } d.charts = charts @@ -94,7 +92,6 @@ func (d *Dnsmasq) Init() error { func (d *Dnsmasq) Check() error { mx, err := d.collect() if err != nil { - d.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/dnsmasq/dnsmasq_test.go b/src/go/plugin/go.d/modules/dnsmasq/dnsmasq_test.go index b3d54ac9c..423aa2daf 100644 --- a/src/go/plugin/go.d/modules/dnsmasq/dnsmasq_test.go +++ b/src/go/plugin/go.d/modules/dnsmasq/dnsmasq_test.go @@ -152,32 +152,16 @@ func TestDnsmasq_Collect(t *testing.T) { dnsmasq := test.prepare() require.NoError(t, dnsmasq.Init()) - collected := dnsmasq.Collect() + mx := dnsmasq.Collect() - assert.Equal(t, test.wantCollected, collected) + assert.Equal(t, test.wantCollected, mx) if len(test.wantCollected) > 0 { - ensureCollectedHasAllChartsDimsVarsIDs(t, dnsmasq, collected) + module.TestMetricsHasAllChartsDims(t, dnsmasq.Charts(), mx) } }) } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, dnsmasq *Dnsmasq, collected map[string]int64) { - for _, chart := range *dnsmasq.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) - } - } -} - func prepareOKDnsmasq() *Dnsmasq { dnsmasq := New() dnsmasq.newDNSClient = func(network string, timeout time.Duration) dnsClient { diff --git a/src/go/plugin/go.d/modules/dnsmasq/integrations/dnsmasq.md b/src/go/plugin/go.d/modules/dnsmasq/integrations/dnsmasq.md index d5c358a29..fae8811c2 100644 --- a/src/go/plugin/go.d/modules/dnsmasq/integrations/dnsmasq.md +++ b/src/go/plugin/go.d/modules/dnsmasq/integrations/dnsmasq.md @@ -89,8 +89,8 @@ No action required. The configuration file name for this integration is `go.d/dnsmasq.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/charts.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/charts.go index bcef8aa3f..f5be46ae7 100644 --- a/src/go/plugin/go.d/modules/dnsmasq_dhcp/charts.go +++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package dnsmasq_dhcp import ( diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/collect.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/collect.go index 6de2fa215..df5fd5b8f 100644 --- a/src/go/plugin/go.d/modules/dnsmasq_dhcp/collect.go +++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package dnsmasq_dhcp import ( diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/config_schema.json b/src/go/plugin/go.d/modules/dnsmasq_dhcp/config_schema.json index f51a3b2a2..0347cbbf2 100644 --- a/src/go/plugin/go.d/modules/dnsmasq_dhcp/config_schema.json +++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/config_schema.json @@ -37,7 +37,6 @@ "leases_path", "conf_path" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp.go index de56723f7..eb1b24a4e 100644 --- a/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp.go +++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp.go @@ -1,10 +1,13 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package dnsmasq_dhcp import ( _ "embed" "errors" + "fmt" "net" "time" @@ -67,12 +70,10 @@ func (d *DnsmasqDHCP) Configuration() any { func (d *DnsmasqDHCP) Init() error { if err := d.validateConfig(); err != nil { - d.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } if err := d.checkLeasesPath(); err != nil { - d.Errorf("leases path check: %v", err) - return err + return fmt.Errorf("leases path check: %v", err) } return nil @@ -81,7 +82,6 @@ func (d *DnsmasqDHCP) Init() error { func (d *DnsmasqDHCP) Check() error { mx, err := d.collect() if err != nil { - d.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp_test.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp_test.go index a5774ae4a..1224d5e32 100644 --- a/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp_test.go +++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package dnsmasq_dhcp import ( diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/doc.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/doc.go new file mode 100644 index 000000000..4b221bb16 --- /dev/null +++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsmasq_dhcp diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/init.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/init.go index 6c74674a3..7019e46cc 100644 --- a/src/go/plugin/go.d/modules/dnsmasq_dhcp/init.go +++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package dnsmasq_dhcp import "errors" diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md b/src/go/plugin/go.d/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md index 751ebf089..44e146eb0 100644 --- a/src/go/plugin/go.d/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md +++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md @@ -32,7 +32,9 @@ By default, it uses: -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux This collector only supports collecting metrics from a single instance of this integration. @@ -116,8 +118,8 @@ No action required. The configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/metadata.yaml b/src/go/plugin/go.d/modules/dnsmasq_dhcp/metadata.yaml index 13b73336c..5a377f6ad 100644 --- a/src/go/plugin/go.d/modules/dnsmasq_dhcp/metadata.yaml +++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/metadata.yaml @@ -31,7 +31,7 @@ modules: - `/etc/dnsmasq.d` to find additional configurations. method_description: "" supported_platforms: - include: [] + include: [Linux] exclude: [] multi_instance: false additional_permissions: diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/parse_configuration.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/parse_configuration.go index 5ef29f28e..2bbaadac9 100644 --- a/src/go/plugin/go.d/modules/dnsmasq_dhcp/parse_configuration.go +++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/parse_configuration.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package dnsmasq_dhcp import ( diff --git a/src/go/plugin/go.d/modules/dnsquery/config_schema.json b/src/go/plugin/go.d/modules/dnsquery/config_schema.json index cfa6f3a14..c35932b5a 100644 --- a/src/go/plugin/go.d/modules/dnsquery/config_schema.json +++ b/src/go/plugin/go.d/modules/dnsquery/config_schema.json @@ -105,7 +105,6 @@ "servers", "network" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/dnsquery/dnsquery.go b/src/go/plugin/go.d/modules/dnsquery/dnsquery.go index 408b08ee8..a77a8b6fa 100644 --- a/src/go/plugin/go.d/modules/dnsquery/dnsquery.go +++ b/src/go/plugin/go.d/modules/dnsquery/dnsquery.go @@ -4,10 +4,11 @@ package dnsquery import ( _ "embed" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/miekg/dns" ) @@ -29,7 +30,7 @@ func init() { func New() *DNSQuery { return &DNSQuery{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), Network: "udp", RecordTypes: []string{"A"}, Port: 53, @@ -44,14 +45,14 @@ func New() *DNSQuery { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - Domains []string `yaml:"domains" json:"domains"` - Servers []string `yaml:"servers" json:"servers"` - Network string `yaml:"network,omitempty" json:"network"` - RecordType string `yaml:"record_type,omitempty" json:"record_type"` - RecordTypes []string `yaml:"record_types,omitempty" json:"record_types"` - Port int `yaml:"port,omitempty" json:"port"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + Domains []string `yaml:"domains" json:"domains"` + Servers []string `yaml:"servers" json:"servers"` + Network string `yaml:"network,omitempty" json:"network"` + RecordType string `yaml:"record_type,omitempty" json:"record_type"` + RecordTypes []string `yaml:"record_types,omitempty" json:"record_types"` + Port int `yaml:"port,omitempty" json:"port"` } type ( @@ -77,21 +78,18 @@ func (d *DNSQuery) Configuration() any { func (d *DNSQuery) Init() error { if err := d.verifyConfig(); err != nil { - d.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } rt, err := d.initRecordTypes() if err != nil { - d.Errorf("init record type: %v", err) - return err + return fmt.Errorf("init record type: %v", err) } d.recordTypes = rt charts, err := d.initCharts() if err != nil { - d.Errorf("init charts: %v", err) - return err + return fmt.Errorf("init charts: %v", err) } d.charts = charts diff --git a/src/go/plugin/go.d/modules/dnsquery/dnsquery_test.go b/src/go/plugin/go.d/modules/dnsquery/dnsquery_test.go index a9f55d6e4..3da622def 100644 --- a/src/go/plugin/go.d/modules/dnsquery/dnsquery_test.go +++ b/src/go/plugin/go.d/modules/dnsquery/dnsquery_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/miekg/dns" "github.com/stretchr/testify/assert" @@ -47,7 +47,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordTypes: []string{"A"}, Port: 53, - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, }, "success when using deprecated record_type": { @@ -58,7 +58,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordType: "A", Port: 53, - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, }, "fail with default": { @@ -73,7 +73,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordTypes: []string{"A"}, Port: 53, - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, }, "fail when servers not set": { @@ -84,7 +84,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordTypes: []string{"A"}, Port: 53, - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, }, "fail when network is invalid": { @@ -95,7 +95,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "gcp", RecordTypes: []string{"A"}, Port: 53, - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, }, "fail when record_type is invalid": { @@ -106,7 +106,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordTypes: []string{"B"}, Port: 53, - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, }, } diff --git a/src/go/plugin/go.d/modules/dnsquery/init.go b/src/go/plugin/go.d/modules/dnsquery/init.go index 5899a27b2..a19f85646 100644 --- a/src/go/plugin/go.d/modules/dnsquery/init.go +++ b/src/go/plugin/go.d/modules/dnsquery/init.go @@ -5,6 +5,7 @@ package dnsquery import ( "errors" "fmt" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/miekg/dns" diff --git a/src/go/plugin/go.d/modules/dnsquery/integrations/dns_query.md b/src/go/plugin/go.d/modules/dnsquery/integrations/dns_query.md index b081a7bbc..6b4b003d8 100644 --- a/src/go/plugin/go.d/modules/dnsquery/integrations/dns_query.md +++ b/src/go/plugin/go.d/modules/dnsquery/integrations/dns_query.md @@ -98,8 +98,8 @@ No action required. The configuration file name for this integration is `go.d/dns_query.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/docker/config_schema.json b/src/go/plugin/go.d/modules/docker/config_schema.json index bd48c9126..5cf7250ee 100644 --- a/src/go/plugin/go.d/modules/docker/config_schema.json +++ b/src/go/plugin/go.d/modules/docker/config_schema.json @@ -33,7 +33,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/docker/docker.go b/src/go/plugin/go.d/modules/docker/docker.go index 88890b9fe..0a808a5e6 100644 --- a/src/go/plugin/go.d/modules/docker/docker.go +++ b/src/go/plugin/go.d/modules/docker/docker.go @@ -9,8 +9,8 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/dockerhost" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" "github.com/docker/docker/api/types" typesContainer "github.com/docker/docker/api/types/container" @@ -34,7 +34,7 @@ func New() *Docker { return &Docker{ Config: Config{ Address: docker.DefaultDockerHost, - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), CollectContainerSize: false, }, @@ -47,10 +47,10 @@ func New() *Docker { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - CollectContainerSize bool `yaml:"collect_container_size" json:"collect_container_size"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + CollectContainerSize bool `yaml:"collect_container_size" json:"collect_container_size"` } type ( @@ -90,7 +90,6 @@ func (d *Docker) Init() error { func (d *Docker) Check() error { mx, err := d.collect() if err != nil { - d.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/docker/integrations/docker.md b/src/go/plugin/go.d/modules/docker/integrations/docker.md index cb5452530..f3fe8aafe 100644 --- a/src/go/plugin/go.d/modules/docker/integrations/docker.md +++ b/src/go/plugin/go.d/modules/docker/integrations/docker.md @@ -121,8 +121,8 @@ No action required. The configuration file name for this integration is `go.d/docker.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/docker_engine/config_schema.json b/src/go/plugin/go.d/modules/docker_engine/config_schema.json index 1e40bb585..fa6d6c0c0 100644 --- a/src/go/plugin/go.d/modules/docker_engine/config_schema.json +++ b/src/go/plugin/go.d/modules/docker_engine/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/docker_engine/docker_engine.go b/src/go/plugin/go.d/modules/docker_engine/docker_engine.go index 4f50ecb43..7cc7b3aa0 100644 --- a/src/go/plugin/go.d/modules/docker_engine/docker_engine.go +++ b/src/go/plugin/go.d/modules/docker_engine/docker_engine.go @@ -5,9 +5,11 @@ package docker_engine import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *DockerEngine { return &DockerEngine{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:9323/metrics", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -39,8 +41,8 @@ func New() *DockerEngine { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type DockerEngine struct { @@ -59,14 +61,12 @@ func (de *DockerEngine) Configuration() any { func (de *DockerEngine) Init() error { if err := de.validateConfig(); err != nil { - de.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } prom, err := de.initPrometheusClient() if err != nil { - de.Error(err) - return err + return fmt.Errorf("init prometheus client: %v", err) } de.prom = prom @@ -76,7 +76,6 @@ func (de *DockerEngine) Init() error { func (de *DockerEngine) Check() error { mx, err := de.collect() if err != nil { - de.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/docker_engine/docker_engine_test.go b/src/go/plugin/go.d/modules/docker_engine/docker_engine_test.go index 1734f1829..9c998df5b 100644 --- a/src/go/plugin/go.d/modules/docker_engine/docker_engine_test.go +++ b/src/go/plugin/go.d/modules/docker_engine/docker_engine_test.go @@ -56,13 +56,13 @@ func TestDockerEngine_Init(t *testing.T) { config: New().Config, }, "empty URL": { - config: Config{HTTP: web.HTTP{Request: web.Request{URL: ""}}}, + config: Config{HTTPConfig: web.HTTPConfig{RequestConfig: web.RequestConfig{URL: ""}}}, wantFail: true, }, "nonexistent TLS CA": { - config: Config{HTTP: web.HTTP{ - Request: web.Request{URL: "http://127.0.0.1:9323/metrics"}, - Client: web.Client{TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}}}}, + config: Config{HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:9323/metrics"}, + ClientConfig: web.ClientConfig{TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}}}}, wantFail: true, }, } @@ -253,29 +253,15 @@ func TestDockerEngine_Collect(t *testing.T) { for i := 0; i < 10; i++ { _ = pulsar.Collect() } - collected := pulsar.Collect() + mx := pulsar.Collect() - require.NotNil(t, collected) - require.Equal(t, test.expected, collected) - ensureCollectedHasAllChartsDimsVarsIDs(t, pulsar, collected) + require.NotNil(t, mx) + require.Equal(t, test.expected, mx) + module.TestMetricsHasAllChartsDims(t, pulsar.Charts(), mx) }) } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, dockerEngine *DockerEngine, collected map[string]int64) { - t.Helper() - for _, chart := range *dockerEngine.Charts() { - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareClientServerV17050CE(t *testing.T) (*DockerEngine, *httptest.Server) { t.Helper() srv := httptest.NewServer(http.HandlerFunc( diff --git a/src/go/plugin/go.d/modules/docker_engine/init.go b/src/go/plugin/go.d/modules/docker_engine/init.go index 5610af9a9..dbac37fcc 100644 --- a/src/go/plugin/go.d/modules/docker_engine/init.go +++ b/src/go/plugin/go.d/modules/docker_engine/init.go @@ -4,9 +4,9 @@ package docker_engine import ( "errors" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) func (de *DockerEngine) validateConfig() error { @@ -17,9 +17,9 @@ func (de *DockerEngine) validateConfig() error { } func (de *DockerEngine) initPrometheusClient() (prometheus.Prometheus, error) { - client, err := web.NewHTTPClient(de.Client) + client, err := web.NewHTTPClient(de.ClientConfig) if err != nil { return nil, err } - return prometheus.New(client, de.Request), nil + return prometheus.New(client, de.RequestConfig), nil } diff --git a/src/go/plugin/go.d/modules/docker_engine/integrations/docker_engine.md b/src/go/plugin/go.d/modules/docker_engine/integrations/docker_engine.md index eaba917e7..97dafd417 100644 --- a/src/go/plugin/go.d/modules/docker_engine/integrations/docker_engine.md +++ b/src/go/plugin/go.d/modules/docker_engine/integrations/docker_engine.md @@ -99,8 +99,8 @@ To enable built-in Prometheus exporter, follow the [official documentation](http The configuration file name for this integration is `go.d/docker_engine.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/dockerhub/apiclient.go b/src/go/plugin/go.d/modules/dockerhub/apiclient.go index f0da897f8..cf702aeb5 100644 --- a/src/go/plugin/go.d/modules/dockerhub/apiclient.go +++ b/src/go/plugin/go.d/modules/dockerhub/apiclient.go @@ -3,9 +3,7 @@ package dockerhub import ( - "encoding/json" "fmt" - "io" "net/http" "net/url" "path" @@ -22,13 +20,13 @@ type repository struct { LastUpdated string `json:"last_updated"` } -func newAPIClient(client *http.Client, request web.Request) *apiClient { +func newAPIClient(client *http.Client, request web.RequestConfig) *apiClient { return &apiClient{httpClient: client, request: request} } type apiClient struct { httpClient *http.Client - request web.Request + request web.RequestConfig } func (a apiClient) getRepository(repoName string) (*repository, error) { @@ -37,34 +35,17 @@ func (a apiClient) getRepository(repoName string) (*repository, error) { return nil, fmt.Errorf("error on creating http request : %v", err) } - resp, err := a.doRequestOK(req) - defer closeBody(resp) - if err != nil { - return nil, err - } - var repo repository - if err := json.NewDecoder(resp.Body).Decode(&repo); err != nil { - return nil, fmt.Errorf("error on parsing response from %s : %v", req.URL, err) + if err := web.DoHTTP(a.httpClient).RequestJSON(req, &repo); err != nil { + return nil, err } return &repo, nil } -func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { - resp, err := a.httpClient.Do(req) - if err != nil { - return nil, fmt.Errorf("error on request: %v", err) - } - - if resp.StatusCode != http.StatusOK { - return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) - } - return resp, nil -} - func (a apiClient) createRequest(urlPath string) (*http.Request, error) { req := a.request.Copy() + u, err := url.Parse(req.URL) if err != nil { return nil, err @@ -72,12 +53,6 @@ func (a apiClient) createRequest(urlPath string) (*http.Request, error) { u.Path = path.Join(u.Path, urlPath) req.URL = u.String() - return web.NewHTTPRequest(req) -} -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } + return web.NewHTTPRequest(req) } diff --git a/src/go/plugin/go.d/modules/dockerhub/config_schema.json b/src/go/plugin/go.d/modules/dockerhub/config_schema.json index 7998516f4..ced42368e 100644 --- a/src/go/plugin/go.d/modules/dockerhub/config_schema.json +++ b/src/go/plugin/go.d/modules/dockerhub/config_schema.json @@ -121,7 +121,6 @@ "url", "repositories" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/dockerhub/dockerhub.go b/src/go/plugin/go.d/modules/dockerhub/dockerhub.go index 37cf64960..c4135793b 100644 --- a/src/go/plugin/go.d/modules/dockerhub/dockerhub.go +++ b/src/go/plugin/go.d/modules/dockerhub/dockerhub.go @@ -5,9 +5,11 @@ package dockerhub import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -28,12 +30,12 @@ func init() { func New() *DockerHub { return &DockerHub{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "https://hub.docker.com/v2/repositories", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 2), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 2), }, }, }, @@ -41,9 +43,9 @@ func New() *DockerHub { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - Repositories []string `yaml:"repositories" json:"repositories"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + Repositories []string `yaml:"repositories" json:"repositories"` } type DockerHub struct { @@ -59,14 +61,12 @@ func (dh *DockerHub) Configuration() any { func (dh *DockerHub) Init() error { if err := dh.validateConfig(); err != nil { - dh.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } client, err := dh.initApiClient() if err != nil { - dh.Error(err) - return err + return fmt.Errorf("init api client: %v", err) } dh.client = client @@ -76,7 +76,6 @@ func (dh *DockerHub) Init() error { func (dh *DockerHub) Check() error { mx, err := dh.collect() if err != nil { - dh.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/dockerhub/init.go b/src/go/plugin/go.d/modules/dockerhub/init.go index 7e502a5a7..609337dce 100644 --- a/src/go/plugin/go.d/modules/dockerhub/init.go +++ b/src/go/plugin/go.d/modules/dockerhub/init.go @@ -4,6 +4,7 @@ package dockerhub import ( "errors" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -18,9 +19,9 @@ func (dh *DockerHub) validateConfig() error { } func (dh *DockerHub) initApiClient() (*apiClient, error) { - client, err := web.NewHTTPClient(dh.Client) + client, err := web.NewHTTPClient(dh.ClientConfig) if err != nil { return nil, err } - return newAPIClient(client, dh.Request), nil + return newAPIClient(client, dh.RequestConfig), nil } diff --git a/src/go/plugin/go.d/modules/dockerhub/integrations/docker_hub_repository.md b/src/go/plugin/go.d/modules/dockerhub/integrations/docker_hub_repository.md index 72c171d6a..f6ef3eafb 100644 --- a/src/go/plugin/go.d/modules/dockerhub/integrations/docker_hub_repository.md +++ b/src/go/plugin/go.d/modules/dockerhub/integrations/docker_hub_repository.md @@ -91,8 +91,8 @@ No action required. The configuration file name for this integration is `go.d/dockerhub.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/dovecot/client.go b/src/go/plugin/go.d/modules/dovecot/client.go index 245d1743f..b7b5fa2c6 100644 --- a/src/go/plugin/go.d/modules/dovecot/client.go +++ b/src/go/plugin/go.d/modules/dovecot/client.go @@ -16,10 +16,8 @@ type dovecotConn interface { func newDovecotConn(conf Config) dovecotConn { return &dovecotClient{conn: socket.New(socket.Config{ - Address: conf.Address, - ConnectTimeout: conf.Timeout.Duration(), - ReadTimeout: conf.Timeout.Duration(), - WriteTimeout: conf.Timeout.Duration(), + Address: conf.Address, + Timeout: conf.Timeout.Duration(), })} } diff --git a/src/go/plugin/go.d/modules/dovecot/config_schema.json b/src/go/plugin/go.d/modules/dovecot/config_schema.json index cf99b6939..01e139b46 100644 --- a/src/go/plugin/go.d/modules/dovecot/config_schema.json +++ b/src/go/plugin/go.d/modules/dovecot/config_schema.json @@ -28,7 +28,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/dovecot/dovecot.go b/src/go/plugin/go.d/modules/dovecot/dovecot.go index ee3d62399..65383c6c6 100644 --- a/src/go/plugin/go.d/modules/dovecot/dovecot.go +++ b/src/go/plugin/go.d/modules/dovecot/dovecot.go @@ -8,7 +8,7 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -26,7 +26,7 @@ func New() *Dovecot { return &Dovecot{ Config: Config{ Address: "127.0.0.1:24242", - Timeout: web.Duration(time.Second * 1), + Timeout: confopt.Duration(time.Second * 1), }, newConn: newDovecotConn, charts: charts.Copy(), @@ -34,9 +34,9 @@ func New() *Dovecot { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout" json:"timeout"` } type Dovecot struct { @@ -55,8 +55,7 @@ func (d *Dovecot) Configuration() any { func (d *Dovecot) Init() error { if d.Address == "" { - d.Error("config: 'address' not set") - return errors.New("address not set") + return errors.New("config: 'address' not set") } return nil @@ -65,7 +64,6 @@ func (d *Dovecot) Init() error { func (d *Dovecot) Check() error { mx, err := d.collect() if err != nil { - d.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/dovecot/integrations/dovecot.md b/src/go/plugin/go.d/modules/dovecot/integrations/dovecot.md index 8b45e2de0..884b817bb 100644 --- a/src/go/plugin/go.d/modules/dovecot/integrations/dovecot.md +++ b/src/go/plugin/go.d/modules/dovecot/integrations/dovecot.md @@ -105,8 +105,8 @@ To enable `old_stats` plugin, see [Old Statistics](https://doc.dovecot.org/confi The configuration file name for this integration is `go.d/dovecot.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/elasticsearch/collect.go b/src/go/plugin/go.d/modules/elasticsearch/collect.go index 4f46f1088..561ba83b3 100644 --- a/src/go/plugin/go.d/modules/elasticsearch/collect.go +++ b/src/go/plugin/go.d/modules/elasticsearch/collect.go @@ -3,12 +3,10 @@ package elasticsearch import ( - "encoding/json" "errors" "fmt" - "io" "math" - "net/http" + "slices" "strconv" "strings" "sync" @@ -165,10 +163,10 @@ func (es *Elasticsearch) scrapeNodesStats(ms *esMetrics) { p = urlPathLocalNodeStats } - req, _ := web.NewHTTPRequestWithPath(es.Request, p) + req, _ := web.NewHTTPRequestWithPath(es.RequestConfig, p) var stats esNodesStats - if err := es.doOKDecode(req, &stats); err != nil { + if err := web.DoHTTP(es.httpClient).RequestJSON(req, &stats); err != nil { es.Warning(err) return } @@ -177,10 +175,10 @@ func (es *Elasticsearch) scrapeNodesStats(ms *esMetrics) { } func (es *Elasticsearch) scrapeClusterHealth(ms *esMetrics) { - req, _ := web.NewHTTPRequestWithPath(es.Request, urlPathClusterHealth) + req, _ := web.NewHTTPRequestWithPath(es.RequestConfig, urlPathClusterHealth) var health esClusterHealth - if err := es.doOKDecode(req, &health); err != nil { + if err := web.DoHTTP(es.httpClient).RequestJSON(req, &health); err != nil { es.Warning(err) return } @@ -189,10 +187,10 @@ func (es *Elasticsearch) scrapeClusterHealth(ms *esMetrics) { } func (es *Elasticsearch) scrapeClusterStats(ms *esMetrics) { - req, _ := web.NewHTTPRequestWithPath(es.Request, urlPathClusterStats) + req, _ := web.NewHTTPRequestWithPath(es.RequestConfig, urlPathClusterStats) var stats esClusterStats - if err := es.doOKDecode(req, &stats); err != nil { + if err := web.DoHTTP(es.httpClient).RequestJSON(req, &stats); err != nil { es.Warning(err) return } @@ -201,11 +199,11 @@ func (es *Elasticsearch) scrapeClusterStats(ms *esMetrics) { } func (es *Elasticsearch) scrapeLocalIndicesStats(ms *esMetrics) { - req, _ := web.NewHTTPRequestWithPath(es.Request, urlPathIndicesStats) + req, _ := web.NewHTTPRequestWithPath(es.RequestConfig, urlPathIndicesStats) req.URL.RawQuery = "local=true&format=json" var stats []esIndexStats - if err := es.doOKDecode(req, &stats); err != nil { + if err := web.DoHTTP(es.httpClient).RequestJSON(req, &stats); err != nil { es.Warning(err) return } @@ -214,13 +212,12 @@ func (es *Elasticsearch) scrapeLocalIndicesStats(ms *esMetrics) { } func (es *Elasticsearch) getClusterName() (string, error) { - req, _ := web.NewHTTPRequest(es.Request) + req, _ := web.NewHTTPRequest(es.RequestConfig) var info struct { ClusterName string `json:"cluster_name"` } - - if err := es.doOKDecode(req, &info); err != nil { + if err := web.DoHTTP(es.httpClient).RequestJSON(req, &info); err != nil { return "", err } @@ -231,30 +228,6 @@ func (es *Elasticsearch) getClusterName() (string, error) { return info.ClusterName, nil } -func (es *Elasticsearch) doOKDecode(req *http.Request, in interface{}) error { - resp, err := es.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} - func convertIndexStoreSizeToBytes(size string) int64 { var num float64 switch { @@ -289,15 +262,9 @@ func boolToInt(v bool) int64 { } func removeSystemIndices(indices []esIndexStats) []esIndexStats { - var i int - for _, index := range indices { - if strings.HasPrefix(index.Index, ".") { - continue - } - indices[i] = index - i++ - } - return indices[:i] + return slices.DeleteFunc(indices, func(stats esIndexStats) bool { + return strings.HasPrefix(stats.Index, ".") + }) } func merge(dst, src map[string]int64, prefix string) { diff --git a/src/go/plugin/go.d/modules/elasticsearch/config_schema.json b/src/go/plugin/go.d/modules/elasticsearch/config_schema.json index 230993b05..905b837cf 100644 --- a/src/go/plugin/go.d/modules/elasticsearch/config_schema.json +++ b/src/go/plugin/go.d/modules/elasticsearch/config_schema.json @@ -135,7 +135,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/elasticsearch/elasticsearch.go b/src/go/plugin/go.d/modules/elasticsearch/elasticsearch.go index 22280f2dd..7ff1cfcfd 100644 --- a/src/go/plugin/go.d/modules/elasticsearch/elasticsearch.go +++ b/src/go/plugin/go.d/modules/elasticsearch/elasticsearch.go @@ -5,11 +5,13 @@ package elasticsearch import ( _ "embed" "errors" + "fmt" "net/http" "sync" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -30,12 +32,12 @@ func init() { func New() *Elasticsearch { return &Elasticsearch{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:9200", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 2), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 2), }, }, ClusterMode: false, @@ -56,7 +58,7 @@ func New() *Elasticsearch { type Config struct { UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + web.HTTPConfig `yaml:",inline" json:""` ClusterMode bool `yaml:"cluster_mode" json:"cluster_mode"` DoNodeStats bool `yaml:"collect_node_stats" json:"collect_node_stats"` DoClusterHealth bool `yaml:"collect_cluster_health" json:"collect_cluster_health"` @@ -86,14 +88,12 @@ func (es *Elasticsearch) Configuration() any { func (es *Elasticsearch) Init() error { err := es.validateConfig() if err != nil { - es.Errorf("check configuration: %v", err) - return err + return fmt.Errorf("check configuration: %v", err) } httpClient, err := es.initHTTPClient() if err != nil { - es.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } es.httpClient = httpClient @@ -103,7 +103,6 @@ func (es *Elasticsearch) Init() error { func (es *Elasticsearch) Check() error { mx, err := es.collect() if err != nil { - es.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/elasticsearch/elasticsearch_test.go b/src/go/plugin/go.d/modules/elasticsearch/elasticsearch_test.go index ca3aa526a..315b46db8 100644 --- a/src/go/plugin/go.d/modules/elasticsearch/elasticsearch_test.go +++ b/src/go/plugin/go.d/modules/elasticsearch/elasticsearch_test.go @@ -3,12 +3,12 @@ package elasticsearch import ( - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "net/http" "net/http/httptest" "os" "testing" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" @@ -57,8 +57,8 @@ func TestElasticsearch_Init(t *testing.T) { }, "all stats": { config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: "http://127.0.0.1:38001"}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:38001"}, }, DoNodeStats: true, DoClusterHealth: true, @@ -68,8 +68,8 @@ func TestElasticsearch_Init(t *testing.T) { }, "only node_stats": { config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: "http://127.0.0.1:38001"}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:38001"}, }, DoNodeStats: true, DoClusterHealth: false, @@ -80,15 +80,15 @@ func TestElasticsearch_Init(t *testing.T) { "URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }}, }, "invalid TLSCA": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Client: web.Client{ + HTTPConfig: web.HTTPConfig{ + ClientConfig: web.ClientConfig{ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, }, }}, @@ -96,8 +96,8 @@ func TestElasticsearch_Init(t *testing.T) { "all API calls are disabled": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: "http://127.0.0.1:38001"}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:38001"}, }, DoNodeStats: false, DoClusterHealth: false, @@ -636,40 +636,13 @@ func TestElasticsearch_Collect(t *testing.T) { mx = es.Collect() } - //m := mx - //l := make([]string, 0) - //for k := range m { - // l = append(l, k) - //} - //sort.Strings(l) - //for _, value := range l { - // fmt.Println(fmt.Sprintf("\"%s\": %d,", value, m[value])) - //} - //return - assert.Equal(t, test.wantCollected, mx) assert.Len(t, *es.Charts(), test.wantCharts) - ensureCollectedHasAllChartsDimsVarsIDs(t, es, mx) + module.TestMetricsHasAllChartsDims(t, es.Charts(), mx) }) } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, es *Elasticsearch, collected map[string]int64) { - for _, chart := range *es.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareElasticsearch(t *testing.T, createES func() *Elasticsearch) (es *Elasticsearch, cleanup func()) { t.Helper() srv := prepareElasticsearchEndpoint() diff --git a/src/go/plugin/go.d/modules/elasticsearch/init.go b/src/go/plugin/go.d/modules/elasticsearch/init.go index f87b594f8..6de37551b 100644 --- a/src/go/plugin/go.d/modules/elasticsearch/init.go +++ b/src/go/plugin/go.d/modules/elasticsearch/init.go @@ -16,12 +16,12 @@ func (es *Elasticsearch) validateConfig() error { if !(es.DoNodeStats || es.DoClusterHealth || es.DoClusterStats || es.DoIndicesStats) { return errors.New("all API calls are disabled") } - if _, err := web.NewHTTPRequest(es.Request); err != nil { + if _, err := web.NewHTTPRequest(es.RequestConfig); err != nil { return err } return nil } func (es *Elasticsearch) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(es.Client) + return web.NewHTTPClient(es.ClientConfig) } diff --git a/src/go/plugin/go.d/modules/elasticsearch/integrations/elasticsearch.md b/src/go/plugin/go.d/modules/elasticsearch/integrations/elasticsearch.md index ab6f7d00d..79dde2971 100644 --- a/src/go/plugin/go.d/modules/elasticsearch/integrations/elasticsearch.md +++ b/src/go/plugin/go.d/modules/elasticsearch/integrations/elasticsearch.md @@ -193,8 +193,8 @@ No action required. The configuration file name for this integration is `go.d/elasticsearch.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/elasticsearch/integrations/opensearch.md b/src/go/plugin/go.d/modules/elasticsearch/integrations/opensearch.md index 9426ada75..46e3ddf14 100644 --- a/src/go/plugin/go.d/modules/elasticsearch/integrations/opensearch.md +++ b/src/go/plugin/go.d/modules/elasticsearch/integrations/opensearch.md @@ -193,8 +193,8 @@ No action required. The configuration file name for this integration is `go.d/elasticsearch.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/envoy/config_schema.json b/src/go/plugin/go.d/modules/envoy/config_schema.json index 7073337dd..666380944 100644 --- a/src/go/plugin/go.d/modules/envoy/config_schema.json +++ b/src/go/plugin/go.d/modules/envoy/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/envoy/envoy.go b/src/go/plugin/go.d/modules/envoy/envoy.go index 194acf17f..eca5f4581 100644 --- a/src/go/plugin/go.d/modules/envoy/envoy.go +++ b/src/go/plugin/go.d/modules/envoy/envoy.go @@ -5,9 +5,11 @@ package envoy import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *Envoy { return &Envoy{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:9091/stats/prometheus", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -48,8 +50,8 @@ func New() *Envoy { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Envoy struct { @@ -74,14 +76,12 @@ func (e *Envoy) Configuration() any { func (e *Envoy) Init() error { if err := e.validateConfig(); err != nil { - e.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } prom, err := e.initPrometheusClient() if err != nil { - e.Errorf("init Prometheus client: %v", err) - return err + return fmt.Errorf("init Prometheus client: %v", err) } e.prom = prom @@ -91,7 +91,6 @@ func (e *Envoy) Init() error { func (e *Envoy) Check() error { mx, err := e.collect() if err != nil { - e.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/envoy/envoy_test.go b/src/go/plugin/go.d/modules/envoy/envoy_test.go index cbda31f9a..cfcafe10a 100644 --- a/src/go/plugin/go.d/modules/envoy/envoy_test.go +++ b/src/go/plugin/go.d/modules/envoy/envoy_test.go @@ -3,12 +3,12 @@ package envoy import ( - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "net/http" "net/http/httptest" "os" "testing" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" "github.com/stretchr/testify/assert" @@ -50,8 +50,8 @@ func TestEnvoy_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, @@ -504,24 +504,11 @@ func TestEnvoy_Collect(t *testing.T) { mx := envoy.Collect() require.Equal(t, test.wantMetrics, mx) - ensureCollectedHasAllChartsDimsVarsIDs(t, envoy, mx) + module.TestMetricsHasAllChartsDims(t, envoy.Charts(), mx) }) } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, envoy *Envoy, mx map[string]int64) { - for _, chart := range *envoy.Charts() { - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareCaseEnvoyConsulDataplaneStats() (*Envoy, func()) { srv := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { diff --git a/src/go/plugin/go.d/modules/envoy/init.go b/src/go/plugin/go.d/modules/envoy/init.go index 8eba65d95..1a9a5aa58 100644 --- a/src/go/plugin/go.d/modules/envoy/init.go +++ b/src/go/plugin/go.d/modules/envoy/init.go @@ -17,10 +17,10 @@ func (e *Envoy) validateConfig() error { } func (e *Envoy) initPrometheusClient() (prometheus.Prometheus, error) { - httpClient, err := web.NewHTTPClient(e.Client) + httpClient, err := web.NewHTTPClient(e.ClientConfig) if err != nil { return nil, err } - return prometheus.New(httpClient, e.Request), nil + return prometheus.New(httpClient, e.RequestConfig), nil } diff --git a/src/go/plugin/go.d/modules/envoy/integrations/envoy.md b/src/go/plugin/go.d/modules/envoy/integrations/envoy.md index 3865ca529..0a1523c08 100644 --- a/src/go/plugin/go.d/modules/envoy/integrations/envoy.md +++ b/src/go/plugin/go.d/modules/envoy/integrations/envoy.md @@ -140,8 +140,8 @@ No action required. The configuration file name for this integration is `go.d/envoy.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/example/README.md b/src/go/plugin/go.d/modules/example/README.md deleted file mode 100644 index 934dfd108..000000000 --- a/src/go/plugin/go.d/modules/example/README.md +++ /dev/null @@ -1,80 +0,0 @@ - - -# Example module - -An example data collection module. Use it as an example writing a new module. - -## Charts - -This module produces example charts with random values. Number of charts, dimensions and chart type is configurable. - -## Configuration - -Edit the `go.d/example.conf` configuration file using `edit-config` from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`. - -```bash -cd /etc/netdata # Replace this path with your Netdata config directory -sudo ./edit-config go.d/example.conf -``` - -Disabled by default. Should be explicitly enabled -in [go.d.conf](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d.conf). - -```yaml -# go.d.conf -modules: - example: yes -``` - -Here is an example configuration with several jobs: - -```yaml -jobs: - - name: example - charts: - num: 3 - dimensions: 5 - - - name: hidden_example - hidden_charts: - num: 3 - dimensions: 5 -``` - ---- - -For all available options, see the Example -collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d/example.conf). - -## Troubleshooting - -To troubleshoot issues with the `example` collector, run the `go.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `go.d.plugin` to debug the collector: - - ```bash - ./go.d.plugin -d -m example - ``` diff --git a/src/go/plugin/go.d/modules/example/charts.go b/src/go/plugin/go.d/modules/example/charts.go deleted file mode 100644 index 71ecafdb4..000000000 --- a/src/go/plugin/go.d/modules/example/charts.go +++ /dev/null @@ -1,59 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package example - -import ( - "fmt" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" -) - -var chartTemplate = module.Chart{ - ID: "random_%d", - Title: "A Random Number", - Units: "random", - Fam: "random", - Ctx: "example.random", -} - -var hiddenChartTemplate = module.Chart{ - ID: "hidden_random_%d", - Title: "A Random Number", - Units: "random", - Fam: "random", - Ctx: "example.random", - Opts: module.Opts{ - Hidden: true, - }, -} - -func newChart(num, ctx, labels int, typ module.ChartType) *module.Chart { - chart := chartTemplate.Copy() - chart.ID = fmt.Sprintf(chart.ID, num) - chart.Type = typ - if ctx > 0 { - chart.Ctx += fmt.Sprintf("_%d", ctx) - } - for i := 0; i < labels; i++ { - chart.Labels = append(chart.Labels, module.Label{ - Key: fmt.Sprintf("example_name_%d", i), - Value: fmt.Sprintf("example_value_%d_%d", num, i), - }) - } - return chart -} - -func newHiddenChart(num, ctx, labels int, typ module.ChartType) *module.Chart { - chart := hiddenChartTemplate.Copy() - chart.ID = fmt.Sprintf(chart.ID, num) - chart.Type = typ - if ctx > 0 { - chart.Ctx += fmt.Sprintf("_%d", ctx) - } - for i := 0; i < labels; i++ { - chart.Labels = append(chart.Labels, module.Label{ - Key: fmt.Sprintf("example_name_%d", i), - Value: fmt.Sprintf("example_value_%d_%d", num, i), - }) - } - return chart -} diff --git a/src/go/plugin/go.d/modules/example/collect.go b/src/go/plugin/go.d/modules/example/collect.go deleted file mode 100644 index b72d3c252..000000000 --- a/src/go/plugin/go.d/modules/example/collect.go +++ /dev/null @@ -1,47 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package example - -import ( - "fmt" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" -) - -func (e *Example) collect() (map[string]int64, error) { - collected := make(map[string]int64) - - for _, chart := range *e.Charts() { - e.collectChart(collected, chart) - } - return collected, nil -} - -func (e *Example) collectChart(collected map[string]int64, chart *module.Chart) { - var num int - if chart.Opts.Hidden { - num = e.Config.HiddenCharts.Dims - } else { - num = e.Config.Charts.Dims - } - - for i := 0; i < num; i++ { - name := fmt.Sprintf("random%d", i) - id := fmt.Sprintf("%s_%s", chart.ID, name) - - if !e.collectedDims[id] { - e.collectedDims[id] = true - - dim := &module.Dim{ID: id, Name: name} - if err := chart.AddDim(dim); err != nil { - e.Warning(err) - } - chart.MarkNotCreated() - } - if i%2 == 0 { - collected[id] = e.randInt() - } else { - collected[id] = -e.randInt() - } - } -} diff --git a/src/go/plugin/go.d/modules/example/config_schema.json b/src/go/plugin/go.d/modules/example/config_schema.json deleted file mode 100644 index 328773f6d..000000000 --- a/src/go/plugin/go.d/modules/example/config_schema.json +++ /dev/null @@ -1,177 +0,0 @@ -{ - "jsonSchema": { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Example collector configuration.", - "type": "object", - "properties": { - "update_every": { - "title": "Update every", - "description": "Data collection interval, measured in seconds.", - "type": "integer", - "minimum": 1, - "default": 1 - }, - "charts": { - "title": "Charts configuration", - "type": [ - "object", - "null" - ], - "properties": { - "type": { - "title": "Chart type", - "description": "The type of all charts.", - "type": "string", - "enum": [ - "line", - "area", - "stacked" - ], - "default": "line" - }, - "num": { - "title": "Number of charts", - "description": "The total number of charts to create.", - "type": "integer", - "minimum": 0, - "default": 1 - }, - "contexts": { - "title": "Number of contexts", - "description": "The total number of unique contexts.", - "type": "integer", - "minimum": 0, - "default": 0 - }, - "dimensions": { - "title": "Number of dimensions", - "description": "The number of dimensions each chart will have.", - "type": "integer", - "minimum": 1, - "default": 4 - }, - "labels": { - "title": "Number of labels", - "description": "The number of labels each chart will have.", - "type": "integer", - "minimum": 0, - "default": 0 - } - }, - "required": [ - "type", - "num", - "contexts", - "dimensions", - "labels" - ] - }, - "hidden_charts": { - "title": "Hidden charts configuration", - "type": [ - "object", - "null" - ], - "properties": { - "type": { - "title": "Chart type", - "description": "The type of all charts.", - "type": "string", - "enum": [ - "line", - "area", - "stacked" - ], - "default": "line" - }, - "num": { - "title": "Number of charts", - "description": "The total number of charts to create.", - "type": "integer", - "minimum": 0, - "default": 0 - }, - "contexts": { - "title": "Number of contexts", - "description": "The total number of unique contexts.", - "type": "integer", - "minimum": 0, - "default": 0 - }, - "dimensions": { - "title": "Number of dimensions", - "description": "The number of dimensions each chart will have.", - "type": "integer", - "minimum": 1, - "default": 4 - }, - "labels": { - "title": "Number of labels", - "description": "The number of labels each chart will have.", - "type": "integer", - "minimum": 0, - "default": 0 - } - }, - "required": [ - "type", - "num", - "contexts", - "dimensions", - "labels" - ] - } - }, - "required": [ - "charts" - ], - "additionalProperties": false, - "patternProperties": { - "^name$": {} - } - }, - "uiSchema": { - "uiOptions": { - "fullPage": true - }, - "charts": { - "type": { - "ui:widget": "radio", - "ui:options": { - "inline": true - } - } - }, - "hidden_charts": { - "type": { - "ui:widget": "radio", - "ui:options": { - "inline": true - } - } - }, - "ui:flavour": "tabs", - "ui:options": { - "tabs": [ - { - "title": "Base", - "fields": [ - "update_every" - ] - }, - { - "title": "Charts", - "fields": [ - "charts" - ] - }, - { - "title": "Hidden charts", - "fields": [ - "hidden_charts" - ] - } - ] - } - } -} diff --git a/src/go/plugin/go.d/modules/example/example.go b/src/go/plugin/go.d/modules/example/example.go deleted file mode 100644 index 2ca0ad976..000000000 --- a/src/go/plugin/go.d/modules/example/example.go +++ /dev/null @@ -1,110 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package example - -import ( - _ "embed" - "math/rand" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" -) - -//go:embed "config_schema.json" -var configSchema string - -func init() { - module.Register("example", module.Creator{ - JobConfigSchema: configSchema, - Defaults: module.Defaults{ - UpdateEvery: module.UpdateEvery, - Priority: module.Priority, - Disabled: true, - }, - Create: func() module.Module { return New() }, - Config: func() any { return &Config{} }, - }) -} - -func New() *Example { - return &Example{ - Config: Config{ - Charts: ConfigCharts{ - Num: 1, - Dims: 4, - }, - HiddenCharts: ConfigCharts{ - Num: 0, - Dims: 4, - }, - }, - - randInt: func() int64 { return rand.Int63n(100) }, - collectedDims: make(map[string]bool), - } -} - -type ( - Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Charts ConfigCharts `yaml:"charts" json:"charts"` - HiddenCharts ConfigCharts `yaml:"hidden_charts" json:"hidden_charts"` - } - ConfigCharts struct { - Type string `yaml:"type,omitempty" json:"type"` - Num int `yaml:"num" json:"num"` - Contexts int `yaml:"contexts" json:"contexts"` - Dims int `yaml:"dimensions" json:"dimensions"` - Labels int `yaml:"labels" json:"labels"` - } -) - -type Example struct { - module.Base // should be embedded by every module - Config `yaml:",inline"` - - randInt func() int64 - charts *module.Charts - collectedDims map[string]bool -} - -func (e *Example) Configuration() any { - return e.Config -} - -func (e *Example) Init() error { - err := e.validateConfig() - if err != nil { - e.Errorf("config validation: %v", err) - return err - } - - charts, err := e.initCharts() - if err != nil { - e.Errorf("charts init: %v", err) - return err - } - e.charts = charts - return nil -} - -func (e *Example) Check() error { - return nil -} - -func (e *Example) Charts() *module.Charts { - return e.charts -} - -func (e *Example) Collect() map[string]int64 { - mx, err := e.collect() - if err != nil { - e.Error(err) - } - - if len(mx) == 0 { - return nil - } - return mx -} - -func (e *Example) Cleanup() {} diff --git a/src/go/plugin/go.d/modules/example/example_test.go b/src/go/plugin/go.d/modules/example/example_test.go deleted file mode 100644 index 26b3ec9c8..000000000 --- a/src/go/plugin/go.d/modules/example/example_test.go +++ /dev/null @@ -1,351 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package example - -import ( - "os" - "testing" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - dataConfigJSON, _ = os.ReadFile("testdata/config.json") - dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") -) - -func Test_testDataIsValid(t *testing.T) { - for name, data := range map[string][]byte{ - "dataConfigJSON": dataConfigJSON, - "dataConfigYAML": dataConfigYAML, - } { - require.NotNil(t, data, name) - } -} - -func TestExample_ConfigurationSerialize(t *testing.T) { - module.TestConfigurationSerialize(t, &Example{}, dataConfigJSON, dataConfigYAML) -} - -func TestNew(t *testing.T) { - // We want to ensure that module is a reference type, nothing more. - - assert.IsType(t, (*Example)(nil), New()) -} - -func TestExample_Init(t *testing.T) { - // 'Init() bool' initializes the module with an appropriate config, so to test it we need: - // - provide the config. - // - set module.Config field with the config. - // - call Init() and compare its return value with the expected value. - - // 'test' map contains different test cases. - tests := map[string]struct { - config Config - wantFail bool - }{ - "success on default config": { - config: New().Config, - }, - "success when only 'charts' set": { - config: Config{ - Charts: ConfigCharts{ - Num: 1, - Dims: 2, - }, - }, - }, - "success when only 'hidden_charts' set": { - config: Config{ - HiddenCharts: ConfigCharts{ - Num: 1, - Dims: 2, - }, - }, - }, - "success when 'charts' and 'hidden_charts' set": { - config: Config{ - Charts: ConfigCharts{ - Num: 1, - Dims: 2, - }, - HiddenCharts: ConfigCharts{ - Num: 1, - Dims: 2, - }, - }, - }, - "fails when 'charts' and 'hidden_charts' set, but 'num' == 0": { - wantFail: true, - config: Config{ - Charts: ConfigCharts{ - Num: 0, - Dims: 2, - }, - HiddenCharts: ConfigCharts{ - Num: 0, - Dims: 2, - }, - }, - }, - "fails when only 'charts' set, 'num' > 0, but 'dimensions' == 0": { - wantFail: true, - config: Config{ - Charts: ConfigCharts{ - Num: 1, - Dims: 0, - }, - }, - }, - "fails when only 'hidden_charts' set, 'num' > 0, but 'dimensions' == 0": { - wantFail: true, - config: Config{ - HiddenCharts: ConfigCharts{ - Num: 1, - Dims: 0, - }, - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - example := New() - example.Config = test.config - - if test.wantFail { - assert.Error(t, example.Init()) - } else { - assert.NoError(t, example.Init()) - } - }) - } -} - -func TestExample_Check(t *testing.T) { - // 'Check() bool' reports whether the module is able to collect any data, so to test it we need: - // - provide the module with a specific config. - // - initialize the module (call Init()). - // - call Check() and compare its return value with the expected value. - - // 'test' map contains different test cases. - tests := map[string]struct { - prepare func() *Example - wantFail bool - }{ - "success on default": {prepare: prepareExampleDefault}, - "success when only 'charts' set": {prepare: prepareExampleOnlyCharts}, - "success when only 'hidden_charts' set": {prepare: prepareExampleOnlyHiddenCharts}, - "success when 'charts' and 'hidden_charts' set": {prepare: prepareExampleChartsAndHiddenCharts}, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - example := test.prepare() - require.NoError(t, example.Init()) - - if test.wantFail { - assert.Error(t, example.Check()) - } else { - assert.NoError(t, example.Check()) - } - }) - } -} - -func TestExample_Charts(t *testing.T) { - // We want to ensure that initialized module does not return 'nil'. - // If it is not 'nil' we are ok. - - // 'test' map contains different test cases. - tests := map[string]struct { - prepare func(t *testing.T) *Example - wantNil bool - }{ - "not initialized collector": { - wantNil: true, - prepare: func(t *testing.T) *Example { - return New() - }, - }, - "initialized collector": { - prepare: func(t *testing.T) *Example { - example := New() - require.NoError(t, example.Init()) - return example - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - example := test.prepare(t) - - if test.wantNil { - assert.Nil(t, example.Charts()) - } else { - assert.NotNil(t, example.Charts()) - } - }) - } -} - -func TestExample_Cleanup(t *testing.T) { - // Since this module has nothing to clean up, - // we want just to ensure that Cleanup() not panics. - - assert.NotPanics(t, New().Cleanup) -} - -func TestExample_Collect(t *testing.T) { - // 'Collect() map[string]int64' returns collected data, so to test it we need: - // - provide the module with a specific config. - // - initialize the module (call Init()). - // - call Collect() and compare its return value with the expected value. - - // 'test' map contains different test cases. - tests := map[string]struct { - prepare func() *Example - wantCollected map[string]int64 - }{ - "default config": { - prepare: prepareExampleDefault, - wantCollected: map[string]int64{ - "random_0_random0": 1, - "random_0_random1": -1, - "random_0_random2": 1, - "random_0_random3": -1, - }, - }, - "only 'charts' set": { - prepare: prepareExampleOnlyCharts, - wantCollected: map[string]int64{ - "random_0_random0": 1, - "random_0_random1": -1, - "random_0_random2": 1, - "random_0_random3": -1, - "random_0_random4": 1, - "random_1_random0": 1, - "random_1_random1": -1, - "random_1_random2": 1, - "random_1_random3": -1, - "random_1_random4": 1, - }, - }, - "only 'hidden_charts' set": { - prepare: prepareExampleOnlyHiddenCharts, - wantCollected: map[string]int64{ - "hidden_random_0_random0": 1, - "hidden_random_0_random1": -1, - "hidden_random_0_random2": 1, - "hidden_random_0_random3": -1, - "hidden_random_0_random4": 1, - "hidden_random_1_random0": 1, - "hidden_random_1_random1": -1, - "hidden_random_1_random2": 1, - "hidden_random_1_random3": -1, - "hidden_random_1_random4": 1, - }, - }, - "'charts' and 'hidden_charts' set": { - prepare: prepareExampleChartsAndHiddenCharts, - wantCollected: map[string]int64{ - "hidden_random_0_random0": 1, - "hidden_random_0_random1": -1, - "hidden_random_0_random2": 1, - "hidden_random_0_random3": -1, - "hidden_random_0_random4": 1, - "hidden_random_1_random0": 1, - "hidden_random_1_random1": -1, - "hidden_random_1_random2": 1, - "hidden_random_1_random3": -1, - "hidden_random_1_random4": 1, - "random_0_random0": 1, - "random_0_random1": -1, - "random_0_random2": 1, - "random_0_random3": -1, - "random_0_random4": 1, - "random_1_random0": 1, - "random_1_random1": -1, - "random_1_random2": 1, - "random_1_random3": -1, - "random_1_random4": 1, - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - example := test.prepare() - require.NoError(t, example.Init()) - - collected := example.Collect() - - assert.Equal(t, test.wantCollected, collected) - ensureCollectedHasAllChartsDimsVarsIDs(t, example, collected) - }) - } -} - -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, e *Example, collected map[string]int64) { - for _, chart := range *e.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, - "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, - "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - -func prepareExampleDefault() *Example { - return prepareExample(New().Config) -} - -func prepareExampleOnlyCharts() *Example { - return prepareExample(Config{ - Charts: ConfigCharts{ - Num: 2, - Dims: 5, - }, - }) -} - -func prepareExampleOnlyHiddenCharts() *Example { - return prepareExample(Config{ - HiddenCharts: ConfigCharts{ - Num: 2, - Dims: 5, - }, - }) -} - -func prepareExampleChartsAndHiddenCharts() *Example { - return prepareExample(Config{ - Charts: ConfigCharts{ - Num: 2, - Dims: 5, - }, - HiddenCharts: ConfigCharts{ - Num: 2, - Dims: 5, - }, - }) -} - -func prepareExample(cfg Config) *Example { - example := New() - example.Config = cfg - example.randInt = func() int64 { return 1 } - return example -} diff --git a/src/go/plugin/go.d/modules/example/init.go b/src/go/plugin/go.d/modules/example/init.go deleted file mode 100644 index f159c4b53..000000000 --- a/src/go/plugin/go.d/modules/example/init.go +++ /dev/null @@ -1,63 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package example - -import ( - "errors" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" -) - -func (e *Example) validateConfig() error { - if e.Config.Charts.Num <= 0 && e.Config.HiddenCharts.Num <= 0 { - return errors.New("'charts->num' or `hidden_charts->num` must be > 0") - } - if e.Config.Charts.Num > 0 && e.Config.Charts.Dims <= 0 { - return errors.New("'charts->dimensions' must be > 0") - } - if e.Config.HiddenCharts.Num > 0 && e.Config.HiddenCharts.Dims <= 0 { - return errors.New("'hidden_charts->dimensions' must be > 0") - } - return nil -} - -func (e *Example) initCharts() (*module.Charts, error) { - charts := &module.Charts{} - - var ctx int - v := calcContextEvery(e.Config.Charts.Num, e.Config.Charts.Contexts) - for i := 0; i < e.Config.Charts.Num; i++ { - if i != 0 && v != 0 && ctx < (e.Config.Charts.Contexts-1) && i%v == 0 { - ctx++ - } - chart := newChart(i, ctx, e.Config.Charts.Labels, module.ChartType(e.Config.Charts.Type)) - - if err := charts.Add(chart); err != nil { - return nil, err - } - } - - ctx = 0 - v = calcContextEvery(e.Config.HiddenCharts.Num, e.Config.HiddenCharts.Contexts) - for i := 0; i < e.Config.HiddenCharts.Num; i++ { - if i != 0 && v != 0 && ctx < (e.Config.HiddenCharts.Contexts-1) && i%v == 0 { - ctx++ - } - chart := newHiddenChart(i, ctx, e.Config.HiddenCharts.Labels, module.ChartType(e.Config.HiddenCharts.Type)) - - if err := charts.Add(chart); err != nil { - return nil, err - } - } - - return charts, nil -} - -func calcContextEvery(charts, contexts int) int { - if contexts <= 1 { - return 0 - } - if contexts > charts { - return 1 - } - return charts / contexts -} diff --git a/src/go/plugin/go.d/modules/example/testdata/config.json b/src/go/plugin/go.d/modules/example/testdata/config.json deleted file mode 100644 index af06e85ac..000000000 --- a/src/go/plugin/go.d/modules/example/testdata/config.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "update_every": 123, - "charts": { - "type": "ok", - "num": 123, - "contexts": 123, - "dimensions": 123, - "labels": 123 - }, - "hidden_charts": { - "type": "ok", - "num": 123, - "contexts": 123, - "dimensions": 123, - "labels": 123 - } -} diff --git a/src/go/plugin/go.d/modules/example/testdata/config.yaml b/src/go/plugin/go.d/modules/example/testdata/config.yaml deleted file mode 100644 index a5f6556fd..000000000 --- a/src/go/plugin/go.d/modules/example/testdata/config.yaml +++ /dev/null @@ -1,13 +0,0 @@ -update_every: 123 -charts: - type: "ok" - num: 123 - contexts: 123 - dimensions: 123 - labels: 123 -hidden_charts: - type: "ok" - num: 123 - contexts: 123 - dimensions: 123 - labels: 123 diff --git a/src/go/plugin/go.d/modules/exim/config_schema.json b/src/go/plugin/go.d/modules/exim/config_schema.json index 6561ea34f..ed98b2f8a 100644 --- a/src/go/plugin/go.d/modules/exim/config_schema.json +++ b/src/go/plugin/go.d/modules/exim/config_schema.json @@ -19,7 +19,6 @@ "default": 2 } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/exim/exim.go b/src/go/plugin/go.d/modules/exim/exim.go index f3c3e6e78..ae0cb73ea 100644 --- a/src/go/plugin/go.d/modules/exim/exim.go +++ b/src/go/plugin/go.d/modules/exim/exim.go @@ -5,10 +5,11 @@ package exim import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,15 +29,15 @@ func init() { func New() *Exim { return &Exim{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: charts.Copy(), } } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type Exim struct { @@ -55,8 +56,7 @@ func (e *Exim) Configuration() any { func (e *Exim) Init() error { exim, err := e.initEximExec() if err != nil { - e.Errorf("exim exec initialization: %v", err) - return err + return fmt.Errorf("exim exec initialization: %v", err) } e.exec = exim @@ -66,7 +66,6 @@ func (e *Exim) Init() error { func (e *Exim) Check() error { mx, err := e.collect() if err != nil { - e.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/exim/integrations/exim.md b/src/go/plugin/go.d/modules/exim/integrations/exim.md index 78f45683c..837f43b3c 100644 --- a/src/go/plugin/go.d/modules/exim/integrations/exim.md +++ b/src/go/plugin/go.d/modules/exim/integrations/exim.md @@ -88,8 +88,8 @@ No action required. The configuration file name for this integration is `go.d/exim.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/fail2ban/charts.go b/src/go/plugin/go.d/modules/fail2ban/charts.go index 3015c7388..28d40b99e 100644 --- a/src/go/plugin/go.d/modules/fail2ban/charts.go +++ b/src/go/plugin/go.d/modules/fail2ban/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package fail2ban import ( diff --git a/src/go/plugin/go.d/modules/fail2ban/collect.go b/src/go/plugin/go.d/modules/fail2ban/collect.go index 8ca413c3b..b7778b2ff 100644 --- a/src/go/plugin/go.d/modules/fail2ban/collect.go +++ b/src/go/plugin/go.d/modules/fail2ban/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package fail2ban import ( diff --git a/src/go/plugin/go.d/modules/fail2ban/config_schema.json b/src/go/plugin/go.d/modules/fail2ban/config_schema.json index 7fd0d91af..a6cfc7ced 100644 --- a/src/go/plugin/go.d/modules/fail2ban/config_schema.json +++ b/src/go/plugin/go.d/modules/fail2ban/config_schema.json @@ -19,7 +19,6 @@ "default": 2 } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/fail2ban/doc.go b/src/go/plugin/go.d/modules/fail2ban/doc.go new file mode 100644 index 000000000..caab6b754 --- /dev/null +++ b/src/go/plugin/go.d/modules/fail2ban/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package fail2ban diff --git a/src/go/plugin/go.d/modules/fail2ban/exec.go b/src/go/plugin/go.d/modules/fail2ban/exec.go index b3037a6cf..3828461ad 100644 --- a/src/go/plugin/go.d/modules/fail2ban/exec.go +++ b/src/go/plugin/go.d/modules/fail2ban/exec.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package fail2ban import ( diff --git a/src/go/plugin/go.d/modules/fail2ban/fail2ban.go b/src/go/plugin/go.d/modules/fail2ban/fail2ban.go index 45dcb6e2e..1d19eeb2e 100644 --- a/src/go/plugin/go.d/modules/fail2ban/fail2ban.go +++ b/src/go/plugin/go.d/modules/fail2ban/fail2ban.go @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package fail2ban import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,7 +31,7 @@ func init() { func New() *Fail2Ban { return &Fail2Ban{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: &module.Charts{}, discoverEvery: time.Minute * 5, @@ -37,8 +40,8 @@ func New() *Fail2Ban { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type ( @@ -70,8 +73,7 @@ func (f *Fail2Ban) Configuration() any { func (f *Fail2Ban) Init() error { f2bClientExec, err := f.initFail2banClientCliExec() if err != nil { - f.Errorf("fail2ban-client exec initialization: %v", err) - return err + return fmt.Errorf("fail2ban-client exec initialization: %v", err) } f.exec = f2bClientExec @@ -81,7 +83,6 @@ func (f *Fail2Ban) Init() error { func (f *Fail2Ban) Check() error { mx, err := f.collect() if err != nil { - f.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/fail2ban/fail2ban_test.go b/src/go/plugin/go.d/modules/fail2ban/fail2ban_test.go index ae84959bd..ada39fdfa 100644 --- a/src/go/plugin/go.d/modules/fail2ban/fail2ban_test.go +++ b/src/go/plugin/go.d/modules/fail2ban/fail2ban_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package fail2ban import ( @@ -170,30 +172,16 @@ func TestFail2Ban_Collect(t *testing.T) { mx := f2b.Collect() assert.Equal(t, test.wantMetrics, mx) + if len(test.wantMetrics) > 0 { - assert.Len(t, *f2b.Charts(), len(jailChartsTmpl)*2) - testMetricsHasAllChartsDims(t, f2b, mx) + assert.Len(t, *f2b.Charts(), len(jailChartsTmpl)*2, "wantCharts") + + module.TestMetricsHasAllChartsDims(t, f2b.Charts(), mx) } }) } } -func testMetricsHasAllChartsDims(t *testing.T, f2b *Fail2Ban, mx map[string]int64) { - for _, chart := range *f2b.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareMockOk() *mockFail2BanClientCliExec { return &mockFail2BanClientCliExec{ statusData: dataStatus, diff --git a/src/go/plugin/go.d/modules/fail2ban/init.go b/src/go/plugin/go.d/modules/fail2ban/init.go index ab963616c..e2209c1ee 100644 --- a/src/go/plugin/go.d/modules/fail2ban/init.go +++ b/src/go/plugin/go.d/modules/fail2ban/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package fail2ban import ( diff --git a/src/go/plugin/go.d/modules/fail2ban/integrations/fail2ban.md b/src/go/plugin/go.d/modules/fail2ban/integrations/fail2ban.md index 0b9679256..890899948 100644 --- a/src/go/plugin/go.d/modules/fail2ban/integrations/fail2ban.md +++ b/src/go/plugin/go.d/modules/fail2ban/integrations/fail2ban.md @@ -26,7 +26,9 @@ This collector tracks two main metrics for each jail: currently banned IPs and a -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux This collector only supports collecting metrics from a single instance of this integration. @@ -101,8 +103,8 @@ There are no alerts configured by default for this integration. The configuration file name for this integration is `go.d/fail2ban.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/fail2ban/metadata.yaml b/src/go/plugin/go.d/modules/fail2ban/metadata.yaml index 922b4e5ad..d4e919f46 100644 --- a/src/go/plugin/go.d/modules/fail2ban/metadata.yaml +++ b/src/go/plugin/go.d/modules/fail2ban/metadata.yaml @@ -30,7 +30,7 @@ modules: This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management. method_description: "" supported_platforms: - include: [] + include: [Linux] exclude: [] multi_instance: false additional_permissions: diff --git a/src/go/plugin/go.d/modules/filecheck/config_schema.json b/src/go/plugin/go.d/modules/filecheck/config_schema.json index c64bb941f..859f3e5a9 100644 --- a/src/go/plugin/go.d/modules/filecheck/config_schema.json +++ b/src/go/plugin/go.d/modules/filecheck/config_schema.json @@ -107,7 +107,6 @@ ] } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/filecheck/filecheck.go b/src/go/plugin/go.d/modules/filecheck/filecheck.go index 8d19c7c64..a6d0dfecd 100644 --- a/src/go/plugin/go.d/modules/filecheck/filecheck.go +++ b/src/go/plugin/go.d/modules/filecheck/filecheck.go @@ -4,11 +4,12 @@ package filecheck import ( _ "embed" + "fmt" "time" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,7 +29,7 @@ func init() { func New() *Filecheck { return &Filecheck{ Config: Config{ - DiscoveryEvery: web.Duration(time.Minute * 1), + DiscoveryEvery: confopt.Duration(time.Minute * 1), Files: filesConfig{}, Dirs: dirsConfig{CollectDirSize: false}, }, @@ -40,10 +41,10 @@ func New() *Filecheck { type ( Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - DiscoveryEvery web.Duration `yaml:"discovery_every,omitempty" json:"discovery_every"` - Files filesConfig `yaml:"files" json:"files"` - Dirs dirsConfig `yaml:"dirs" json:"dirs"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + DiscoveryEvery confopt.Duration `yaml:"discovery_every,omitempty" json:"discovery_every"` + Files filesConfig `yaml:"files" json:"files"` + Dirs dirsConfig `yaml:"dirs" json:"dirs"` } filesConfig struct { Include []string `yaml:"include" json:"include"` @@ -80,21 +81,18 @@ func (f *Filecheck) Configuration() any { func (f *Filecheck) Init() error { err := f.validateConfig() if err != nil { - f.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } ff, err := f.initFilesFilter() if err != nil { - f.Errorf("files filter initialization: %v", err) - return err + return fmt.Errorf("files filter initialization: %v", err) } f.filesFilter = ff df, err := f.initDirsFilter() if err != nil { - f.Errorf("dirs filter initialization: %v", err) - return err + return fmt.Errorf("dirs filter initialization: %v", err) } f.dirsFilter = df diff --git a/src/go/plugin/go.d/modules/filecheck/filecheck_test.go b/src/go/plugin/go.d/modules/filecheck/filecheck_test.go index 43024b0bc..76777c854 100644 --- a/src/go/plugin/go.d/modules/filecheck/filecheck_test.go +++ b/src/go/plugin/go.d/modules/filecheck/filecheck_test.go @@ -244,21 +244,11 @@ func TestFilecheck_Collect(t *testing.T) { mx := fc.Collect() copyModTime(test.wantCollected, mx) + assert.Equal(t, test.wantCollected, mx) - testMetricsHasAllChartsDims(t, fc, mx) - }) - } -} -func testMetricsHasAllChartsDims(t *testing.T, fc *Filecheck, mx map[string]int64) { - for _, chart := range *fc.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "mx metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } + module.TestMetricsHasAllChartsDims(t, fc.Charts(), mx) + }) } } diff --git a/src/go/plugin/go.d/modules/filecheck/init.go b/src/go/plugin/go.d/modules/filecheck/init.go index 20b30964f..2168e2af8 100644 --- a/src/go/plugin/go.d/modules/filecheck/init.go +++ b/src/go/plugin/go.d/modules/filecheck/init.go @@ -5,7 +5,7 @@ package filecheck import ( "errors" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" ) func (f *Filecheck) validateConfig() error { diff --git a/src/go/plugin/go.d/modules/filecheck/integrations/files_and_directories.md b/src/go/plugin/go.d/modules/filecheck/integrations/files_and_directories.md index ed131a125..f5bf1892d 100644 --- a/src/go/plugin/go.d/modules/filecheck/integrations/files_and_directories.md +++ b/src/go/plugin/go.d/modules/filecheck/integrations/files_and_directories.md @@ -113,8 +113,8 @@ No action required. The configuration file name for this integration is `go.d/filecheck.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/fluentd/apiclient.go b/src/go/plugin/go.d/modules/fluentd/apiclient.go index 1c6bf85a9..8996dec45 100644 --- a/src/go/plugin/go.d/modules/fluentd/apiclient.go +++ b/src/go/plugin/go.d/modules/fluentd/apiclient.go @@ -3,9 +3,7 @@ package fluentd import ( - "encoding/json" "fmt" - "io" "net/http" "net/url" "path" @@ -40,13 +38,13 @@ func (p pluginData) hasBufferTotalQueuedSize() bool { return p.BufferTotalQueuedSize != nil } -func newAPIClient(client *http.Client, request web.Request) *apiClient { +func newAPIClient(client *http.Client, request web.RequestConfig) *apiClient { return &apiClient{httpClient: client, request: request} } type apiClient struct { httpClient *http.Client - request web.Request + request web.RequestConfig } func (a apiClient) getPluginsInfo() (*pluginsInfo, error) { @@ -55,32 +53,14 @@ func (a apiClient) getPluginsInfo() (*pluginsInfo, error) { return nil, fmt.Errorf("error on creating request : %v", err) } - resp, err := a.doRequestOK(req) - defer closeBody(resp) - if err != nil { - return nil, err - } - var info pluginsInfo - if err = json.NewDecoder(resp.Body).Decode(&info); err != nil { - return nil, fmt.Errorf("error on decoding response from %s : %v", req.URL, err) + if err := web.DoHTTP(a.httpClient).RequestJSON(req, &info); err != nil { + return nil, fmt.Errorf("error on decoding request : %v", err) } return &info, nil } -func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { - resp, err := a.httpClient.Do(req) - if err != nil { - return nil, fmt.Errorf("error on request: %v", err) - } - - if resp.StatusCode != http.StatusOK { - return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) - } - return resp, nil -} - func (a apiClient) createRequest(urlPath string) (*http.Request, error) { req := a.request.Copy() u, err := url.Parse(req.URL) @@ -92,10 +72,3 @@ func (a apiClient) createRequest(urlPath string) (*http.Request, error) { req.URL = u.String() return web.NewHTTPRequest(req) } - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/fluentd/config_schema.json b/src/go/plugin/go.d/modules/fluentd/config_schema.json index 037420f74..c680d79e4 100644 --- a/src/go/plugin/go.d/modules/fluentd/config_schema.json +++ b/src/go/plugin/go.d/modules/fluentd/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/fluentd/fluentd.go b/src/go/plugin/go.d/modules/fluentd/fluentd.go index 467edaac8..548015e27 100644 --- a/src/go/plugin/go.d/modules/fluentd/fluentd.go +++ b/src/go/plugin/go.d/modules/fluentd/fluentd.go @@ -5,10 +5,12 @@ package fluentd import ( _ "embed" "errors" + "fmt" "time" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *Fluentd { return &Fluentd{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:24220", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }}, activePlugins: make(map[string]bool), @@ -40,9 +42,9 @@ func New() *Fluentd { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - PermitPlugin string `yaml:"permit_plugin_id,omitempty" json:"permit_plugin_id"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + PermitPlugin string `yaml:"permit_plugin_id,omitempty" json:"permit_plugin_id"` } type Fluentd struct { @@ -63,21 +65,18 @@ func (f *Fluentd) Configuration() any { func (f *Fluentd) Init() error { if err := f.validateConfig(); err != nil { - f.Error(err) - return err + return fmt.Errorf("invalid config: %v", err) } pm, err := f.initPermitPluginMatcher() if err != nil { - f.Error(err) - return err + return fmt.Errorf("init permit_plugin_id: %v", err) } f.permitPlugin = pm client, err := f.initApiClient() if err != nil { - f.Error(err) - return err + return fmt.Errorf("init api client: %v", err) } f.apiClient = client @@ -90,7 +89,6 @@ func (f *Fluentd) Init() error { func (f *Fluentd) Check() error { mx, err := f.collect() if err != nil { - f.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/fluentd/init.go b/src/go/plugin/go.d/modules/fluentd/init.go index 6ee71c0a6..2e3d53260 100644 --- a/src/go/plugin/go.d/modules/fluentd/init.go +++ b/src/go/plugin/go.d/modules/fluentd/init.go @@ -5,7 +5,7 @@ package fluentd import ( "errors" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,10 +26,10 @@ func (f *Fluentd) initPermitPluginMatcher() (matcher.Matcher, error) { } func (f *Fluentd) initApiClient() (*apiClient, error) { - client, err := web.NewHTTPClient(f.Client) + client, err := web.NewHTTPClient(f.ClientConfig) if err != nil { return nil, err } - return newAPIClient(client, f.Request), nil + return newAPIClient(client, f.RequestConfig), nil } diff --git a/src/go/plugin/go.d/modules/fluentd/integrations/fluentd.md b/src/go/plugin/go.d/modules/fluentd/integrations/fluentd.md index b4740a77a..fbcea02c2 100644 --- a/src/go/plugin/go.d/modules/fluentd/integrations/fluentd.md +++ b/src/go/plugin/go.d/modules/fluentd/integrations/fluentd.md @@ -92,8 +92,8 @@ To enable monitor agent, follow the [official documentation](https://docs.fluent The configuration file name for this integration is `go.d/fluentd.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/freeradius/config_schema.json b/src/go/plugin/go.d/modules/freeradius/config_schema.json index 7e1a3a4e9..2ad175a5a 100644 --- a/src/go/plugin/go.d/modules/freeradius/config_schema.json +++ b/src/go/plugin/go.d/modules/freeradius/config_schema.json @@ -41,7 +41,6 @@ "port", "secret" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/freeradius/freeradius.go b/src/go/plugin/go.d/modules/freeradius/freeradius.go index e3c995b5e..46aa06195 100644 --- a/src/go/plugin/go.d/modules/freeradius/freeradius.go +++ b/src/go/plugin/go.d/modules/freeradius/freeradius.go @@ -5,11 +5,12 @@ package freeradius import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/freeradius/api" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -29,17 +30,17 @@ func New() *FreeRADIUS { Address: "127.0.0.1", Port: 18121, Secret: "adminsecret", - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, } } type Config struct { - UpdateEvery int `yaml:"update_every" json:"update_every"` - Address string `yaml:"address" json:"address"` - Port int `yaml:"port" json:"port"` - Secret string `yaml:"secret" json:"secret"` - Timeout web.Duration `yaml:"timeout" json:"timeout"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + Address string `yaml:"address" json:"address"` + Port int `yaml:"port" json:"port"` + Secret string `yaml:"secret" json:"secret"` + Timeout confopt.Duration `yaml:"timeout" json:"timeout"` } type ( @@ -60,8 +61,7 @@ func (f *FreeRADIUS) Configuration() any { func (f *FreeRADIUS) Init() error { if err := f.validateConfig(); err != nil { - f.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } f.client = api.New(api.Config{ @@ -77,7 +77,6 @@ func (f *FreeRADIUS) Init() error { func (f *FreeRADIUS) Check() error { mx, err := f.collect() if err != nil { - f.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/freeradius/freeradius_test.go b/src/go/plugin/go.d/modules/freeradius/freeradius_test.go index 58e2dce59..21ad25517 100644 --- a/src/go/plugin/go.d/modules/freeradius/freeradius_test.go +++ b/src/go/plugin/go.d/modules/freeradius/freeradius_test.go @@ -117,10 +117,10 @@ func TestFreeRADIUS_Collect(t *testing.T) { "proxy-acct-dropped-requests": 33, "proxy-acct-unknown-types": 34, } - collected := freeRADIUS.Collect() + mx := freeRADIUS.Collect() - assert.Equal(t, expected, collected) - ensureCollectedHasAllChartsDimsVarsIDs(t, freeRADIUS, collected) + assert.Equal(t, expected, mx) + module.TestMetricsHasAllChartsDims(t, freeRADIUS.Charts(), mx) } func TestFreeRADIUS_Collect_ReturnsNilIfClientStatusReturnsError(t *testing.T) { @@ -134,19 +134,6 @@ func TestFreeRADIUS_Cleanup(t *testing.T) { New().Cleanup() } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, f *FreeRADIUS, collected map[string]int64) { - for _, chart := range *f.Charts() { - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func newOKMockClient() *mockClient { return &mockClient{} } diff --git a/src/go/plugin/go.d/modules/freeradius/integrations/freeradius.md b/src/go/plugin/go.d/modules/freeradius/integrations/freeradius.md index 59b124f7e..57f5a5653 100644 --- a/src/go/plugin/go.d/modules/freeradius/integrations/freeradius.md +++ b/src/go/plugin/go.d/modules/freeradius/integrations/freeradius.md @@ -102,8 +102,8 @@ To enable status server, follow the [official documentation](https://wiki.freera The configuration file name for this integration is `go.d/freeradius.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/gearman/client.go b/src/go/plugin/go.d/modules/gearman/client.go index dff9a1be4..c42b2b9bd 100644 --- a/src/go/plugin/go.d/modules/gearman/client.go +++ b/src/go/plugin/go.d/modules/gearman/client.go @@ -19,10 +19,8 @@ type gearmanConn interface { func newGearmanConn(conf Config) gearmanConn { return &gearmanClient{conn: socket.New(socket.Config{ - Address: conf.Address, - ConnectTimeout: conf.Timeout.Duration(), - ReadTimeout: conf.Timeout.Duration(), - WriteTimeout: conf.Timeout.Duration(), + Address: conf.Address, + Timeout: conf.Timeout.Duration(), })} } diff --git a/src/go/plugin/go.d/modules/gearman/config_schema.json b/src/go/plugin/go.d/modules/gearman/config_schema.json index dd5d3a0b8..9e0285a2b 100644 --- a/src/go/plugin/go.d/modules/gearman/config_schema.json +++ b/src/go/plugin/go.d/modules/gearman/config_schema.json @@ -28,7 +28,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/gearman/gearman.go b/src/go/plugin/go.d/modules/gearman/gearman.go index e1780a95c..027cc4c3a 100644 --- a/src/go/plugin/go.d/modules/gearman/gearman.go +++ b/src/go/plugin/go.d/modules/gearman/gearman.go @@ -8,7 +8,7 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -26,7 +26,7 @@ func New() *Gearman { return &Gearman{ Config: Config{ Address: "127.0.0.1:4730", - Timeout: web.Duration(time.Second * 1), + Timeout: confopt.Duration(time.Second * 1), }, newConn: newGearmanConn, charts: summaryCharts.Copy(), @@ -36,9 +36,9 @@ func New() *Gearman { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout" json:"timeout"` } type Gearman struct { @@ -60,8 +60,7 @@ func (g *Gearman) Configuration() any { func (g *Gearman) Init() error { if g.Address == "" { - g.Error("config: 'address' not set") - return errors.New("address not set") + return errors.New("config: 'address' not set") } return nil @@ -70,7 +69,6 @@ func (g *Gearman) Init() error { func (g *Gearman) Check() error { mx, err := g.collect() if err != nil { - g.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/gearman/integrations/gearman.md b/src/go/plugin/go.d/modules/gearman/integrations/gearman.md index 0a97a4cd4..929aaa695 100644 --- a/src/go/plugin/go.d/modules/gearman/integrations/gearman.md +++ b/src/go/plugin/go.d/modules/gearman/integrations/gearman.md @@ -110,8 +110,8 @@ No action required. The configuration file name for this integration is `go.d/gearman.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/geth/config_schema.json b/src/go/plugin/go.d/modules/geth/config_schema.json index 00b3071d0..f117d6ec0 100644 --- a/src/go/plugin/go.d/modules/geth/config_schema.json +++ b/src/go/plugin/go.d/modules/geth/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/geth/geth.go b/src/go/plugin/go.d/modules/geth/geth.go index 6448965f5..aace9cd1c 100644 --- a/src/go/plugin/go.d/modules/geth/geth.go +++ b/src/go/plugin/go.d/modules/geth/geth.go @@ -5,9 +5,11 @@ package geth import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *Geth { return &Geth{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:6060/debug/metrics/prometheus", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -40,8 +42,8 @@ func New() *Geth { } type Config struct { - web.HTTP `yaml:",inline" json:""` - UpdateEvery int `yaml:"update_every" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every" json:"update_every"` } type Geth struct { @@ -59,14 +61,12 @@ func (g *Geth) Configuration() any { func (g *Geth) Init() error { if err := g.validateConfig(); err != nil { - g.Errorf("error on validating config: %g", err) - return err + return fmt.Errorf("error on validating config: %g", err) } prom, err := g.initPrometheusClient() if err != nil { - g.Error(err) - return err + return fmt.Errorf("error on initializing prometheus client: %g", err) } g.prom = prom @@ -76,7 +76,6 @@ func (g *Geth) Init() error { func (g *Geth) Check() error { mx, err := g.collect() if err != nil { - g.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/geth/init.go b/src/go/plugin/go.d/modules/geth/init.go index da908560e..cfa06fdea 100644 --- a/src/go/plugin/go.d/modules/geth/init.go +++ b/src/go/plugin/go.d/modules/geth/init.go @@ -15,10 +15,10 @@ func (g *Geth) validateConfig() error { } func (g *Geth) initPrometheusClient() (prometheus.Prometheus, error) { - client, err := web.NewHTTPClient(g.Client) + client, err := web.NewHTTPClient(g.ClientConfig) if err != nil { return nil, err } - return prometheus.New(client, g.Request), nil + return prometheus.New(client, g.RequestConfig), nil } diff --git a/src/go/plugin/go.d/modules/geth/integrations/go-ethereum.md b/src/go/plugin/go.d/modules/geth/integrations/go-ethereum.md index 86f830529..d315a8011 100644 --- a/src/go/plugin/go.d/modules/geth/integrations/go-ethereum.md +++ b/src/go/plugin/go.d/modules/geth/integrations/go-ethereum.md @@ -102,8 +102,8 @@ No action required. The configuration file name for this integration is `go.d/geth.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/haproxy/config_schema.json b/src/go/plugin/go.d/modules/haproxy/config_schema.json index 6a794145e..3b077e38f 100644 --- a/src/go/plugin/go.d/modules/haproxy/config_schema.json +++ b/src/go/plugin/go.d/modules/haproxy/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/haproxy/haproxy.go b/src/go/plugin/go.d/modules/haproxy/haproxy.go index 0e3f9f3d1..27c64e270 100644 --- a/src/go/plugin/go.d/modules/haproxy/haproxy.go +++ b/src/go/plugin/go.d/modules/haproxy/haproxy.go @@ -5,9 +5,11 @@ package haproxy import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *Haproxy { return &Haproxy{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8404/metrics", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -43,8 +45,8 @@ func New() *Haproxy { } type Config struct { - web.HTTP `yaml:",inline" json:""` - UpdateEvery int `yaml:"update_every" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every" json:"update_every"` } type Haproxy struct { @@ -65,14 +67,12 @@ func (h *Haproxy) Configuration() any { func (h *Haproxy) Init() error { if err := h.validateConfig(); err != nil { - h.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } prom, err := h.initPrometheusClient() if err != nil { - h.Errorf("prometheus client initialization: %v", err) - return err + return fmt.Errorf("prometheus client initialization: %v", err) } h.prom = prom @@ -82,7 +82,6 @@ func (h *Haproxy) Init() error { func (h *Haproxy) Check() error { mx, err := h.collect() if err != nil { - h.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/haproxy/haproxy_test.go b/src/go/plugin/go.d/modules/haproxy/haproxy_test.go index 80a733ffb..1cf915cf0 100644 --- a/src/go/plugin/go.d/modules/haproxy/haproxy_test.go +++ b/src/go/plugin/go.d/modules/haproxy/haproxy_test.go @@ -47,15 +47,15 @@ func TestHaproxy_Init(t *testing.T) { }, "fails on unset 'url'": { wantFail: true, - config: Config{HTTP: web.HTTP{ - Request: web.Request{}, + config: Config{HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{}, }}, }, "fails on invalid TLSCA": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Client: web.Client{ + HTTPConfig: web.HTTPConfig{ + ClientConfig: web.ClientConfig{ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, }, }}, @@ -173,11 +173,11 @@ func TestHaproxy_Collect(t *testing.T) { h, cleanup := test.prepare(t) defer cleanup() - ms := h.Collect() + mx := h.Collect() - assert.Equal(t, test.wantCollected, ms) + assert.Equal(t, test.wantCollected, mx) if len(test.wantCollected) > 0 { - ensureCollectedHasAllChartsDimsVarsIDs(t, h, ms) + module.TestMetricsHasAllChartsDims(t, h.Charts(), mx) } }) } @@ -245,19 +245,3 @@ func prepareCaseConnectionRefused(t *testing.T) (*Haproxy, func()) { return h, func() {} } - -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, h *Haproxy, ms map[string]int64) { - for _, chart := range *h.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := ms[dim.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := ms[v.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) - } - } -} diff --git a/src/go/plugin/go.d/modules/haproxy/init.go b/src/go/plugin/go.d/modules/haproxy/init.go index 0922a9b2d..4f697c423 100644 --- a/src/go/plugin/go.d/modules/haproxy/init.go +++ b/src/go/plugin/go.d/modules/haproxy/init.go @@ -14,19 +14,19 @@ func (h *Haproxy) validateConfig() error { if h.URL == "" { return errors.New("'url' is not set") } - if _, err := web.NewHTTPRequest(h.Request); err != nil { + if _, err := web.NewHTTPRequest(h.RequestConfig); err != nil { return err } return nil } func (h *Haproxy) initPrometheusClient() (prometheus.Prometheus, error) { - httpClient, err := web.NewHTTPClient(h.Client) + httpClient, err := web.NewHTTPClient(h.ClientConfig) if err != nil { return nil, err } - prom := prometheus.NewWithSelector(httpClient, h.Request, sr) + prom := prometheus.NewWithSelector(httpClient, h.RequestConfig, sr) return prom, nil } diff --git a/src/go/plugin/go.d/modules/haproxy/integrations/haproxy.md b/src/go/plugin/go.d/modules/haproxy/integrations/haproxy.md index 1619b9d70..6c27bed09 100644 --- a/src/go/plugin/go.d/modules/haproxy/integrations/haproxy.md +++ b/src/go/plugin/go.d/modules/haproxy/integrations/haproxy.md @@ -107,8 +107,8 @@ To enable PROMEX addon, follow the [official documentation](https://github.com/h The configuration file name for this integration is `go.d/haproxy.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/hddtemp/client.go b/src/go/plugin/go.d/modules/hddtemp/client.go index b89be10a2..d289e3a8a 100644 --- a/src/go/plugin/go.d/modules/hddtemp/client.go +++ b/src/go/plugin/go.d/modules/hddtemp/client.go @@ -3,42 +3,47 @@ package hddtemp import ( + "time" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket" ) -func newHddTempConn(conf Config) hddtempConn { - return &hddtempClient{conn: socket.New(socket.Config{ - Address: conf.Address, - ConnectTimeout: conf.Timeout.Duration(), - ReadTimeout: conf.Timeout.Duration(), - WriteTimeout: conf.Timeout.Duration(), - })} +type hddtempConn interface { + queryHddTemp() (string, error) } -type hddtempClient struct { - conn socket.Client -} - -func (c *hddtempClient) connect() error { - return c.conn.Connect() +func newHddTempConn(conf Config) hddtempConn { + return &hddtempClient{ + address: conf.Address, + timeout: conf.Timeout.Duration(), + } } -func (c *hddtempClient) disconnect() { - _ = c.conn.Disconnect() +type hddtempClient struct { + address string + timeout time.Duration } func (c *hddtempClient) queryHddTemp() (string, error) { var i int var s string - err := c.conn.Command("", func(bytes []byte) bool { + + cfg := socket.Config{ + Address: c.address, + Timeout: c.timeout, + } + + err := socket.ConnectAndRead(cfg, func(bs []byte) bool { if i++; i > 1 { return false } - s = string(bytes) + s = string(bs) return true + }) if err != nil { return "", err } + return s, nil } diff --git a/src/go/plugin/go.d/modules/hddtemp/collect.go b/src/go/plugin/go.d/modules/hddtemp/collect.go index f5c75db04..8e0ebf486 100644 --- a/src/go/plugin/go.d/modules/hddtemp/collect.go +++ b/src/go/plugin/go.d/modules/hddtemp/collect.go @@ -17,15 +17,7 @@ type diskStats struct { } func (h *HddTemp) collect() (map[string]int64, error) { - conn := h.newHddTempConn(h.Config) - - if err := conn.connect(); err != nil { - return nil, err - } - - defer conn.disconnect() - - msg, err := conn.queryHddTemp() + msg, err := h.conn.queryHddTemp() if err != nil { return nil, err } diff --git a/src/go/plugin/go.d/modules/hddtemp/config_schema.json b/src/go/plugin/go.d/modules/hddtemp/config_schema.json index 2858fbe02..45b7c12f6 100644 --- a/src/go/plugin/go.d/modules/hddtemp/config_schema.json +++ b/src/go/plugin/go.d/modules/hddtemp/config_schema.json @@ -28,7 +28,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/hddtemp/hddtemp.go b/src/go/plugin/go.d/modules/hddtemp/hddtemp.go index ac283d6ee..312854e75 100644 --- a/src/go/plugin/go.d/modules/hddtemp/hddtemp.go +++ b/src/go/plugin/go.d/modules/hddtemp/hddtemp.go @@ -5,10 +5,11 @@ package hddtemp import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -26,40 +27,31 @@ func New() *HddTemp { return &HddTemp{ Config: Config{ Address: "127.0.0.1:7634", - Timeout: web.Duration(time.Second * 1), + Timeout: confopt.Duration(time.Second * 1), }, - newHddTempConn: newHddTempConn, - charts: &module.Charts{}, - disks: make(map[string]bool), - disksTemp: make(map[string]bool), + charts: &module.Charts{}, + disks: make(map[string]bool), + disksTemp: make(map[string]bool), } } type Config struct { - UpdateEvery int `yaml:"update_every" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout" json:"timeout"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout" json:"timeout"` } -type ( - HddTemp struct { - module.Base - Config `yaml:",inline" json:""` +type HddTemp struct { + module.Base + Config `yaml:",inline" json:""` - charts *module.Charts + charts *module.Charts - newHddTempConn func(Config) hddtempConn + conn hddtempConn - disks map[string]bool - disksTemp map[string]bool - } - - hddtempConn interface { - connect() error - disconnect() - queryHddTemp() (string, error) - } -) + disks map[string]bool + disksTemp map[string]bool +} func (h *HddTemp) Configuration() any { return h.Config @@ -67,17 +59,17 @@ func (h *HddTemp) Configuration() any { func (h *HddTemp) Init() error { if h.Address == "" { - h.Error("config: 'address' not set") - return errors.New("address not set") + return fmt.Errorf("config: 'address' not set") } + h.conn = newHddTempConn(h.Config) + return nil } func (h *HddTemp) Check() error { mx, err := h.collect() if err != nil { - h.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/hddtemp/hddtemp_test.go b/src/go/plugin/go.d/modules/hddtemp/hddtemp_test.go index d20d79edb..0194bb0c9 100644 --- a/src/go/plugin/go.d/modules/hddtemp/hddtemp_test.go +++ b/src/go/plugin/go.d/modules/hddtemp/hddtemp_test.go @@ -82,7 +82,7 @@ func TestHddTemp_Cleanup(t *testing.T) { "after check": { prepare: func() *HddTemp { hdd := New() - hdd.newHddTempConn = func(config Config) hddtempConn { return prepareMockAllDisksOk() } + hdd.conn = prepareMockAllDisksOk() _ = hdd.Check() return hdd }, @@ -90,7 +90,7 @@ func TestHddTemp_Cleanup(t *testing.T) { "after collect": { prepare: func() *HddTemp { hdd := New() - hdd.newHddTempConn = func(config Config) hddtempConn { return prepareMockAllDisksOk() } + hdd.conn = prepareMockAllDisksOk() _ = hdd.Collect() return hdd }, @@ -123,10 +123,6 @@ func TestHddTemp_Check(t *testing.T) { wantFail: false, prepareMock: prepareMockAllDisksSleep, }, - "err on connect": { - wantFail: true, - prepareMock: prepareMockErrOnConnect, - }, "unexpected response": { wantFail: true, prepareMock: prepareMockUnexpectedResponse, @@ -140,8 +136,7 @@ func TestHddTemp_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { hdd := New() - mock := test.prepareMock() - hdd.newHddTempConn = func(config Config) hddtempConn { return mock } + hdd.conn = test.prepareMock() if test.wantFail { assert.Error(t, hdd.Check()) @@ -219,10 +214,6 @@ func TestHddTemp_Collect(t *testing.T) { "disk_ata-WDC_WD10EARS-00Y5B1_WD-WCAV5R693922_temp_sensor_status_unk": 0, }, }, - "err on connect": { - prepareMock: prepareMockErrOnConnect, - wantDisconnect: false, - }, "unexpected response": { prepareMock: prepareMockUnexpectedResponse, wantDisconnect: true, @@ -236,32 +227,16 @@ func TestHddTemp_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { hdd := New() - mock := test.prepareMock() - hdd.newHddTempConn = func(config Config) hddtempConn { return mock } + hdd.conn = test.prepareMock() mx := hdd.Collect() assert.Equal(t, test.wantMetrics, mx) - assert.Len(t, *hdd.Charts(), test.wantCharts) - assert.Equal(t, test.wantDisconnect, mock.disconnectCalled) - testMetricsHasAllChartsDims(t, hdd, mx) - }) - } -} -func testMetricsHasAllChartsDims(t *testing.T, hdd *HddTemp, mx map[string]int64) { - for _, chart := range *hdd.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } + assert.Len(t, *hdd.Charts(), test.wantCharts, "wantCharts") + + module.TestMetricsHasAllChartsDims(t, hdd.Charts(), mx) + }) } } @@ -277,12 +252,6 @@ func prepareMockAllDisksSleep() *mockHddTempConn { } } -func prepareMockErrOnConnect() *mockHddTempConn { - return &mockHddTempConn{ - errOnConnect: true, - } -} - func prepareMockUnexpectedResponse() *mockHddTempConn { return &mockHddTempConn{ hddTempLine: "Lorem ipsum dolor sit amet, consectetur adipiscing elit.", @@ -296,21 +265,8 @@ func prepareMockEmptyResponse() *mockHddTempConn { } type mockHddTempConn struct { - errOnConnect bool errOnQueryHddTemp bool hddTempLine string - disconnectCalled bool -} - -func (m *mockHddTempConn) connect() error { - if m.errOnConnect { - return errors.New("mock.connect() error") - } - return nil -} - -func (m *mockHddTempConn) disconnect() { - m.disconnectCalled = true } func (m *mockHddTempConn) queryHddTemp() (string, error) { diff --git a/src/go/plugin/go.d/modules/hddtemp/integrations/hdd_temperature.md b/src/go/plugin/go.d/modules/hddtemp/integrations/hdd_temperature.md index 3d5f3e71a..645002a93 100644 --- a/src/go/plugin/go.d/modules/hddtemp/integrations/hdd_temperature.md +++ b/src/go/plugin/go.d/modules/hddtemp/integrations/hdd_temperature.md @@ -99,8 +99,8 @@ Install `hddtemp` using your distribution's package manager. The configuration file name for this integration is `go.d/hddtemp.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/hdfs/client.go b/src/go/plugin/go.d/modules/hdfs/client.go deleted file mode 100644 index 3c43348be..000000000 --- a/src/go/plugin/go.d/modules/hdfs/client.go +++ /dev/null @@ -1,69 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package hdfs - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" -) - -func newClient(httpClient *http.Client, request web.Request) *client { - return &client{ - httpClient: httpClient, - request: request, - } -} - -type client struct { - httpClient *http.Client - request web.Request -} - -func (c *client) do() (*http.Response, error) { - req, err := web.NewHTTPRequest(c.request) - if err != nil { - return nil, fmt.Errorf("error on creating http request to %s : %v", c.request.URL, err) - } - - // req.Header.Add("Accept-Encoding", "gzip") - // req.Header.Set("User-Agent", "netdata/go.d.plugin") - - return c.httpClient.Do(req) -} - -func (c *client) doOK() (*http.Response, error) { - resp, err := c.do() - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return resp, fmt.Errorf("%s returned %d", c.request.URL, resp.StatusCode) - } - return resp, nil -} - -func (c *client) doOKWithDecodeJSON(dst interface{}) error { - resp, err := c.doOK() - defer closeBody(resp) - if err != nil { - return err - } - - err = json.NewDecoder(resp.Body).Decode(dst) - if err != nil { - return fmt.Errorf("error on decoding response from %s : %v", c.request.URL, err) - } - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/hdfs/collect.go b/src/go/plugin/go.d/modules/hdfs/collect.go index 6ac022b87..c6e505817 100644 --- a/src/go/plugin/go.d/modules/hdfs/collect.go +++ b/src/go/plugin/go.d/modules/hdfs/collect.go @@ -9,12 +9,17 @@ import ( "strings" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) func (h *HDFS) collect() (map[string]int64, error) { - var raw rawJMX - err := h.client.doOKWithDecodeJSON(&raw) + req, err := web.NewHTTPRequest(h.RequestConfig) if err != nil { + return nil, fmt.Errorf("failed to create HTTP request: %v", err) + } + + var raw rawJMX + if err := web.DoHTTP(h.httpClient).RequestJSON(req, &raw); err != nil { return nil, err } @@ -28,9 +33,13 @@ func (h *HDFS) collect() (map[string]int64, error) { } func (h *HDFS) determineNodeType() (nodeType, error) { - var raw rawJMX - err := h.client.doOKWithDecodeJSON(&raw) + req, err := web.NewHTTPRequest(h.RequestConfig) if err != nil { + return "", fmt.Errorf("failed to create HTTP request: %v", err) + } + + var raw rawJMX + if err := web.DoHTTP(h.httpClient).RequestJSON(req, &raw); err != nil { return "", err } @@ -69,40 +78,33 @@ func (h *HDFS) collectRawJMX(raw rawJMX) *metrics { } func (h *HDFS) collectNameNode(mx *metrics, raw rawJMX) { - err := h.collectJVM(mx, raw) - if err != nil { + if err := h.collectJVM(mx, raw); err != nil { h.Debugf("error on collecting jvm : %v", err) } - err = h.collectRPCActivity(mx, raw) - if err != nil { + if err := h.collectRPCActivity(mx, raw); err != nil { h.Debugf("error on collecting rpc activity : %v", err) } - err = h.collectFSNameSystem(mx, raw) - if err != nil { + if err := h.collectFSNameSystem(mx, raw); err != nil { h.Debugf("error on collecting fs name system : %v", err) } } func (h *HDFS) collectDataNode(mx *metrics, raw rawJMX) { - err := h.collectJVM(mx, raw) - if err != nil { + if err := h.collectJVM(mx, raw); err != nil { h.Debugf("error on collecting jvm : %v", err) } - err = h.collectRPCActivity(mx, raw) - if err != nil { + if err := h.collectRPCActivity(mx, raw); err != nil { h.Debugf("error on collecting rpc activity : %v", err) } - err = h.collectFSDatasetState(mx, raw) - if err != nil { + if err := h.collectFSDatasetState(mx, raw); err != nil { h.Debugf("error on collecting fs dataset state : %v", err) } - err = h.collectDataNodeActivity(mx, raw) - if err != nil { + if err := h.collectDataNodeActivity(mx, raw); err != nil { h.Debugf("error on collecting datanode activity state : %v", err) } } @@ -192,7 +194,7 @@ func (h *HDFS) collectDataNodeActivity(mx *metrics, raw rawJMX) error { return nil } -func writeJSONTo(dst interface{}, src interface{}) error { +func writeJSONTo(dst, src any) error { b, err := json.Marshal(src) if err != nil { return err diff --git a/src/go/plugin/go.d/modules/hdfs/config_schema.json b/src/go/plugin/go.d/modules/hdfs/config_schema.json index 528cc4dbf..26e33e1d0 100644 --- a/src/go/plugin/go.d/modules/hdfs/config_schema.json +++ b/src/go/plugin/go.d/modules/hdfs/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/hdfs/hdfs.go b/src/go/plugin/go.d/modules/hdfs/hdfs.go index 44b5840bb..668330e8d 100644 --- a/src/go/plugin/go.d/modules/hdfs/hdfs.go +++ b/src/go/plugin/go.d/modules/hdfs/hdfs.go @@ -5,9 +5,12 @@ package hdfs import ( _ "embed" "errors" + "fmt" + "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -24,12 +27,12 @@ func init() { func New() *HDFS { config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:9870/jmx", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, } @@ -40,8 +43,8 @@ func New() *HDFS { } type Config struct { - web.HTTP `yaml:",inline" json:""` - UpdateEvery int `yaml:"update_every" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every" json:"update_every"` } type ( @@ -49,7 +52,7 @@ type ( module.Base Config `yaml:",inline" json:""` - client *client + httpClient *http.Client nodeType } @@ -66,17 +69,15 @@ func (h *HDFS) Configuration() any { } func (h *HDFS) Init() error { - if err := h.validateConfig(); err != nil { - h.Errorf("config validation: %v", err) - return err + if h.URL == "" { + return errors.New("URL is required but not set") } - cl, err := h.createClient() + httpClient, err := web.NewHTTPClient(h.ClientConfig) if err != nil { - h.Errorf("error on creating client : %v", err) - return err + return fmt.Errorf("failed to create HTTP client: %v", err) } - h.client = cl + h.httpClient = httpClient return nil } @@ -84,19 +85,19 @@ func (h *HDFS) Init() error { func (h *HDFS) Check() error { typ, err := h.determineNodeType() if err != nil { - h.Errorf("error on node type determination : %v", err) - return err + return fmt.Errorf("error on node type determination : %v", err) } h.nodeType = typ mx, err := h.collect() if err != nil { - h.Error(err) return err } + if len(mx) == 0 { return errors.New("no metrics collected") } + return nil } @@ -113,12 +114,8 @@ func (h *HDFS) Charts() *Charts { func (h *HDFS) Collect() map[string]int64 { mx, err := h.collect() - if err != nil { h.Error(err) - } - - if len(mx) == 0 { return nil } @@ -126,7 +123,7 @@ func (h *HDFS) Collect() map[string]int64 { } func (h *HDFS) Cleanup() { - if h.client != nil && h.client.httpClient != nil { - h.client.httpClient.CloseIdleConnections() + if h.httpClient != nil { + h.httpClient.CloseIdleConnections() } } diff --git a/src/go/plugin/go.d/modules/hdfs/hdfs_test.go b/src/go/plugin/go.d/modules/hdfs/hdfs_test.go index d24e50bb6..994bfe5f3 100644 --- a/src/go/plugin/go.d/modules/hdfs/hdfs_test.go +++ b/src/go/plugin/go.d/modules/hdfs/hdfs_test.go @@ -47,7 +47,7 @@ func TestHDFS_Init(t *testing.T) { func TestHDFS_InitErrorOnCreatingClientWrongTLSCA(t *testing.T) { job := New() - job.Client.TLSConfig.TLSCA = "testdata/tls" + job.ClientConfig.TLSConfig.TLSCA = "testdata/tls" assert.Error(t, job.Init()) } diff --git a/src/go/plugin/go.d/modules/hdfs/init.go b/src/go/plugin/go.d/modules/hdfs/init.go deleted file mode 100644 index 1159ab73b..000000000 --- a/src/go/plugin/go.d/modules/hdfs/init.go +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package hdfs - -import ( - "errors" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" -) - -func (h *HDFS) validateConfig() error { - if h.URL == "" { - return errors.New("url not set") - } - return nil -} - -func (h *HDFS) createClient() (*client, error) { - httpClient, err := web.NewHTTPClient(h.Client) - if err != nil { - return nil, err - } - - return newClient(httpClient, h.Request), nil -} diff --git a/src/go/plugin/go.d/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md b/src/go/plugin/go.d/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md index e37ccde0c..2b39cd249 100644 --- a/src/go/plugin/go.d/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md +++ b/src/go/plugin/go.d/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md @@ -120,8 +120,8 @@ No action required. The configuration file name for this integration is `go.d/hdfs.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/hpssa/config_schema.json b/src/go/plugin/go.d/modules/hpssa/config_schema.json index 788d7685e..f9182e00e 100644 --- a/src/go/plugin/go.d/modules/hpssa/config_schema.json +++ b/src/go/plugin/go.d/modules/hpssa/config_schema.json @@ -19,7 +19,6 @@ "default": 2 } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/hpssa/hpssa.go b/src/go/plugin/go.d/modules/hpssa/hpssa.go index 1245f477f..c3d556e38 100644 --- a/src/go/plugin/go.d/modules/hpssa/hpssa.go +++ b/src/go/plugin/go.d/modules/hpssa/hpssa.go @@ -5,10 +5,11 @@ package hpssa import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,7 +29,7 @@ func init() { func New() *Hpssa { return &Hpssa{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: &module.Charts{}, seenControllers: make(map[string]*hpssaController), @@ -39,8 +40,8 @@ func New() *Hpssa { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type ( @@ -67,12 +68,11 @@ func (h *Hpssa) Configuration() any { } func (h *Hpssa) Init() error { - ssacliExec, err := h.initSsacliExec() + ssacli, err := h.initSsacliExec() if err != nil { - h.Errorf("ssacli exec initialization: %v", err) - return err + return fmt.Errorf("ssacli exec initialization: %v", err) } - h.exec = ssacliExec + h.exec = ssacli return nil } @@ -80,7 +80,6 @@ func (h *Hpssa) Init() error { func (h *Hpssa) Check() error { mx, err := h.collect() if err != nil { - h.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/hpssa/hpssa_test.go b/src/go/plugin/go.d/modules/hpssa/hpssa_test.go index a3e90d2a7..001e62ca4 100644 --- a/src/go/plugin/go.d/modules/hpssa/hpssa_test.go +++ b/src/go/plugin/go.d/modules/hpssa/hpssa_test.go @@ -352,8 +352,10 @@ func TestHpssa_Collect(t *testing.T) { mx := hpe.Collect() assert.Equal(t, test.wantMetrics, mx) - assert.Len(t, *hpe.Charts(), test.wantCharts) - testMetricsHasAllChartsDims(t, hpe, mx) + + assert.Len(t, *hpe.Charts(), test.wantCharts, "wantCharts") + + module.TestMetricsHasAllChartsDims(t, hpe.Charts(), mx) }) } } @@ -412,19 +414,3 @@ func (m *mockSsacliExec) controllersInfo() ([]byte, error) { } return m.infoData, nil } - -func testMetricsHasAllChartsDims(t *testing.T, hpe *Hpssa, mx map[string]int64) { - for _, chart := range *hpe.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} diff --git a/src/go/plugin/go.d/modules/hpssa/integrations/hpe_smart_arrays.md b/src/go/plugin/go.d/modules/hpssa/integrations/hpe_smart_arrays.md index 47fe74739..961248f85 100644 --- a/src/go/plugin/go.d/modules/hpssa/integrations/hpe_smart_arrays.md +++ b/src/go/plugin/go.d/modules/hpssa/integrations/hpe_smart_arrays.md @@ -168,8 +168,8 @@ See [official installation instructions](https://support.hpe.com/connect/s/softw The configuration file name for this integration is `go.d/ssacli.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/httpcheck/collect.go b/src/go/plugin/go.d/modules/httpcheck/collect.go index fa0c96bc3..9ba55c317 100644 --- a/src/go/plugin/go.d/modules/httpcheck/collect.go +++ b/src/go/plugin/go.d/modules/httpcheck/collect.go @@ -25,9 +25,9 @@ const ( ) func (hc *HTTPCheck) collect() (map[string]int64, error) { - req, err := web.NewHTTPRequest(hc.Request) + req, err := web.NewHTTPRequest(hc.RequestConfig) if err != nil { - return nil, fmt.Errorf("error on creating HTTP requests to %s : %v", hc.Request.URL, err) + return nil, fmt.Errorf("error on creating HTTP requests to %s : %v", hc.RequestConfig.URL, err) } if hc.CookieFile != "" { @@ -40,7 +40,7 @@ func (hc *HTTPCheck) collect() (map[string]int64, error) { resp, err := hc.httpClient.Do(req) dur := time.Since(start) - defer closeBody(resp) + defer web.CloseBody(resp) var mx metrics @@ -176,14 +176,6 @@ func (hc *HTTPCheck) readCookieFile() error { return nil } -func closeBody(resp *http.Response) { - if resp == nil || resp.Body == nil { - return - } - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() -} - func durationToMs(duration time.Duration) int { return int(duration) / (int(time.Millisecond) / int(time.Nanosecond)) } diff --git a/src/go/plugin/go.d/modules/httpcheck/config_schema.json b/src/go/plugin/go.d/modules/httpcheck/config_schema.json index 82ffc7cb5..99b879f74 100644 --- a/src/go/plugin/go.d/modules/httpcheck/config_schema.json +++ b/src/go/plugin/go.d/modules/httpcheck/config_schema.json @@ -178,7 +178,6 @@ "url", "status_accepted" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/httpcheck/httpcheck.go b/src/go/plugin/go.d/modules/httpcheck/httpcheck.go index 1c7b6b1c0..75b4dddeb 100644 --- a/src/go/plugin/go.d/modules/httpcheck/httpcheck.go +++ b/src/go/plugin/go.d/modules/httpcheck/httpcheck.go @@ -5,11 +5,13 @@ package httpcheck import ( _ "embed" "errors" + "fmt" "net/http" "regexp" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -30,9 +32,9 @@ func init() { func New() *HTTPCheck { return &HTTPCheck{ Config: Config{ - HTTP: web.HTTP{ - Client: web.Client{ - Timeout: web.Duration(time.Second), + HTTPConfig: web.HTTPConfig{ + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, AcceptedStatuses: []int{200}, @@ -45,7 +47,7 @@ func New() *HTTPCheck { type ( Config struct { UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + web.HTTPConfig `yaml:",inline" json:""` AcceptedStatuses []int `yaml:"status_accepted" json:"status_accepted"` ResponseMatch string `yaml:"response_match,omitempty" json:"response_match"` CookieFile string `yaml:"cookie_file,omitempty" json:"cookie_file"` @@ -80,30 +82,26 @@ func (hc *HTTPCheck) Configuration() any { func (hc *HTTPCheck) Init() error { if err := hc.validateConfig(); err != nil { - hc.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } hc.charts = hc.initCharts() httpClient, err := hc.initHTTPClient() if err != nil { - hc.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } hc.httpClient = httpClient re, err := hc.initResponseMatchRegexp() if err != nil { - hc.Errorf("init response match regexp: %v", err) - return err + return fmt.Errorf("init response match regexp: %v", err) } hc.reResponse = re hm, err := hc.initHeaderMatch() if err != nil { - hc.Errorf("init header match: %v", err) - return err + return fmt.Errorf("init header match: %v", err) } hc.headerMatch = hm @@ -113,7 +111,7 @@ func (hc *HTTPCheck) Init() error { hc.Debugf("using URL %s", hc.URL) hc.Debugf("using HTTP timeout %s", hc.Timeout.Duration()) - hc.Debugf("using accepted HTTP statuses %v", hc.AcceptedStatuses) + hc.Debugf("using accepted HTTPConfig statuses %v", hc.AcceptedStatuses) if hc.reResponse != nil { hc.Debugf("using response match regexp %s", hc.reResponse) } @@ -124,7 +122,6 @@ func (hc *HTTPCheck) Init() error { func (hc *HTTPCheck) Check() error { mx, err := hc.collect() if err != nil { - hc.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/httpcheck/httpcheck_test.go b/src/go/plugin/go.d/modules/httpcheck/httpcheck_test.go index 9ae0cf4ed..43bc3a1e4 100644 --- a/src/go/plugin/go.d/modules/httpcheck/httpcheck_test.go +++ b/src/go/plugin/go.d/modules/httpcheck/httpcheck_test.go @@ -3,13 +3,14 @@ package httpcheck import ( - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "net/http" "net/http/httptest" "os" "testing" "time" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" "github.com/stretchr/testify/assert" @@ -42,8 +43,8 @@ func TestHTTPCheck_Init(t *testing.T) { "success if url set": { wantFail: false, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: "http://127.0.0.1:38001"}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:38001"}, }, }, }, @@ -54,16 +55,16 @@ func TestHTTPCheck_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, "fail if wrong response regex": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: "http://127.0.0.1:38001"}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:38001"}, }, ResponseMatch: "(?:qwe))", }, @@ -495,7 +496,7 @@ func prepareSuccessCase() (*HTTPCheck, func()) { func prepareTimeoutCase() (*HTTPCheck, func()) { httpCheck := New() httpCheck.UpdateEvery = 1 - httpCheck.Timeout = web.Duration(time.Millisecond * 100) + httpCheck.Timeout = confopt.Duration(time.Millisecond * 100) srv := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { diff --git a/src/go/plugin/go.d/modules/httpcheck/init.go b/src/go/plugin/go.d/modules/httpcheck/init.go index a4a3ae27d..3dc38c823 100644 --- a/src/go/plugin/go.d/modules/httpcheck/init.go +++ b/src/go/plugin/go.d/modules/httpcheck/init.go @@ -8,8 +8,8 @@ import ( "net/http" "regexp" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -27,7 +27,7 @@ func (hc *HTTPCheck) validateConfig() error { } func (hc *HTTPCheck) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(hc.Client) + return web.NewHTTPClient(hc.ClientConfig) } func (hc *HTTPCheck) initResponseMatchRegexp() (*regexp.Regexp, error) { diff --git a/src/go/plugin/go.d/modules/httpcheck/integrations/http_endpoints.md b/src/go/plugin/go.d/modules/httpcheck/integrations/http_endpoints.md index b94735dee..787a04530 100644 --- a/src/go/plugin/go.d/modules/httpcheck/integrations/http_endpoints.md +++ b/src/go/plugin/go.d/modules/httpcheck/integrations/http_endpoints.md @@ -105,8 +105,8 @@ No action required. The configuration file name for this integration is `go.d/httpcheck.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -129,7 +129,7 @@ The following options can be defined globally: update_every, autodetection_retry | headers_match | This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response. | [] | no | | headers_match.exclude | This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it. | no | no | | headers_match.key | The exact name of the HTTP header to check for. | | yes | -| headers_match.value | The [pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format) to match against the value of the specified header. | | no | +| headers_match.value | The [pattern](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format) to match against the value of the specified header. | | no | | cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). | | no | | timeout | HTTP request timeout. | 1 | no | | username | Username for basic HTTP authentication. | | no | @@ -201,7 +201,7 @@ jobs: ##### With `header_match` -Example configurations with `header_match`. See the value [pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format) syntax. +Example configurations with `header_match`. See the value [pattern](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format) syntax.
    Config diff --git a/src/go/plugin/go.d/modules/httpcheck/metadata.yaml b/src/go/plugin/go.d/modules/httpcheck/metadata.yaml index f34993b5e..13a30218f 100644 --- a/src/go/plugin/go.d/modules/httpcheck/metadata.yaml +++ b/src/go/plugin/go.d/modules/httpcheck/metadata.yaml @@ -94,7 +94,7 @@ modules: default_value: "" required: true - name: headers_match.value - description: "The [pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format) to match against the value of the specified header." + description: "The [pattern](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format) to match against the value of the specified header." default_value: "" required: false - name: cookie_file @@ -188,7 +188,7 @@ modules: - 200 - 204 - name: With `header_match` - description: Example configurations with `header_match`. See the value [pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format) syntax. + description: Example configurations with `header_match`. See the value [pattern](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format) syntax. config: | jobs: # The "X-Robots-Tag" header must be present in the HTTP response header, diff --git a/src/go/plugin/go.d/modules/icecast/collect.go b/src/go/plugin/go.d/modules/icecast/collect.go index 102ad31e5..9986fdc60 100644 --- a/src/go/plugin/go.d/modules/icecast/collect.go +++ b/src/go/plugin/go.d/modules/icecast/collect.go @@ -3,10 +3,7 @@ package icecast import ( - "encoding/json" "fmt" - "io" - "net/http" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -68,40 +65,15 @@ func (ic *Icecast) collectServerStats(mx map[string]int64) error { } func (ic *Icecast) queryServerStats() (*serverStats, error) { - req, err := web.NewHTTPRequestWithPath(ic.Request, urlPathServerStats) + req, err := web.NewHTTPRequestWithPath(ic.RequestConfig, urlPathServerStats) if err != nil { return nil, err } var stats serverStats - - if err := ic.doOKDecode(req, &stats); err != nil { + if err := web.DoHTTP(ic.httpClient).RequestJSON(req, &stats); err != nil { return nil, err } return &stats, nil } - -func (ic *Icecast) doOKDecode(req *http.Request, in interface{}) error { - resp, err := ic.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/icecast/config_schema.json b/src/go/plugin/go.d/modules/icecast/config_schema.json index 3abda6e75..368518fbd 100644 --- a/src/go/plugin/go.d/modules/icecast/config_schema.json +++ b/src/go/plugin/go.d/modules/icecast/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/icecast/icecast.go b/src/go/plugin/go.d/modules/icecast/icecast.go index e999421f7..289781fec 100644 --- a/src/go/plugin/go.d/modules/icecast/icecast.go +++ b/src/go/plugin/go.d/modules/icecast/icecast.go @@ -5,10 +5,12 @@ package icecast import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *Icecast { return &Icecast{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8000", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 1), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 1), }, }, }, @@ -42,8 +44,8 @@ func New() *Icecast { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Icecast struct { @@ -63,14 +65,12 @@ func (ic *Icecast) Configuration() any { func (ic *Icecast) Init() error { if ic.URL == "" { - ic.Error("URL not set") return errors.New("url not set") } - client, err := web.NewHTTPClient(ic.Client) + client, err := web.NewHTTPClient(ic.ClientConfig) if err != nil { - ic.Error(err) - return err + return fmt.Errorf("creating http client: %w", err) } ic.httpClient = client @@ -83,7 +83,6 @@ func (ic *Icecast) Init() error { func (ic *Icecast) Check() error { mx, err := ic.collect() if err != nil { - ic.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/icecast/icecast_test.go b/src/go/plugin/go.d/modules/icecast/icecast_test.go index 40132986d..6879d802d 100644 --- a/src/go/plugin/go.d/modules/icecast/icecast_test.go +++ b/src/go/plugin/go.d/modules/icecast/icecast_test.go @@ -52,8 +52,8 @@ func TestIcecast_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, diff --git a/src/go/plugin/go.d/modules/icecast/integrations/icecast.md b/src/go/plugin/go.d/modules/icecast/integrations/icecast.md index 9ff06a4dd..2a5545039 100644 --- a/src/go/plugin/go.d/modules/icecast/integrations/icecast.md +++ b/src/go/plugin/go.d/modules/icecast/integrations/icecast.md @@ -92,8 +92,8 @@ Needs at least Icecast version >= 2.4.0 The configuration file name for this integration is `go.d/icecast.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/init.go b/src/go/plugin/go.d/modules/init.go index 8271a70ee..d71cf56a2 100644 --- a/src/go/plugin/go.d/modules/init.go +++ b/src/go/plugin/go.d/modules/init.go @@ -7,9 +7,12 @@ import ( _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/adaptecraid" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/ap" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/apache" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/apcupsd" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/beanstalk" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/bind" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/boinc" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/cassandra" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/ceph" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/chrony" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/clickhouse" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/cockroachdb" @@ -28,7 +31,6 @@ import ( _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/dovecot" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/elasticsearch" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/envoy" - _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/example" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/exim" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/fail2ban" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/filecheck" @@ -53,6 +55,7 @@ import ( _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/logind" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/logstash" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/lvm" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/maxscale" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/megacli" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/memcached" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/mongodb" @@ -60,13 +63,16 @@ import ( _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/mysql" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nginx" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nginxplus" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nginxunit" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nginxvts" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nsd" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/ntpd" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nvidia_smi" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nvme" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/openldap" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/openvpn" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/openvpn_status_log" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/oracledb" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/pgbouncer" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/phpdaemon" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/phpfpm" @@ -87,25 +93,31 @@ import ( _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/rethinkdb" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/riakkv" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/rspamd" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/samba" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/scaleio" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/sensors" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/smartctl" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/snmp" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/spigotmc" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/squid" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/squidlog" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/storcli" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/supervisord" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/systemdunits" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/tengine" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/testrandom" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/tomcat" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/tor" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/traefik" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/typesense" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/unbound" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/upsd" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/uwsgi" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/varnish" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vcsa" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vernemq" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/w1sensor" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/weblog" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/whoisquery" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/windows" diff --git a/src/go/plugin/go.d/modules/intelgpu/config_schema.json b/src/go/plugin/go.d/modules/intelgpu/config_schema.json index ac8183421..6ca920bfe 100644 --- a/src/go/plugin/go.d/modules/intelgpu/config_schema.json +++ b/src/go/plugin/go.d/modules/intelgpu/config_schema.json @@ -17,7 +17,6 @@ "type": "string" } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/intelgpu/integrations/intel_gpu.md b/src/go/plugin/go.d/modules/intelgpu/integrations/intel_gpu.md index 696746601..bcd744c33 100644 --- a/src/go/plugin/go.d/modules/intelgpu/integrations/intel_gpu.md +++ b/src/go/plugin/go.d/modules/intelgpu/integrations/intel_gpu.md @@ -110,8 +110,8 @@ Install `intel-gpu-tools` using your distribution's package manager. The configuration file name for this integration is `go.d/intelgpu.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/intelgpu/intelgpu.go b/src/go/plugin/go.d/modules/intelgpu/intelgpu.go index 8e98c688d..b18d72080 100644 --- a/src/go/plugin/go.d/modules/intelgpu/intelgpu.go +++ b/src/go/plugin/go.d/modules/intelgpu/intelgpu.go @@ -5,7 +5,7 @@ package intelgpu import ( _ "embed" "errors" - + "fmt" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" ) @@ -58,8 +58,7 @@ func (ig *IntelGPU) Configuration() any { func (ig *IntelGPU) Init() error { topExec, err := ig.initIntelGPUTopExec() if err != nil { - ig.Error(err) - return err + return fmt.Errorf("init intelgpu top exec: %v", err) } ig.exec = topExec @@ -70,7 +69,6 @@ func (ig *IntelGPU) Init() error { func (ig *IntelGPU) Check() error { mx, err := ig.collect() if err != nil { - ig.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/ipfs/collect.go b/src/go/plugin/go.d/modules/ipfs/collect.go index 6bd0b128a..0a88ae616 100644 --- a/src/go/plugin/go.d/modules/ipfs/collect.go +++ b/src/go/plugin/go.d/modules/ipfs/collect.go @@ -3,10 +3,7 @@ package ipfs import ( - "encoding/json" "fmt" - "io" - "net/http" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -125,13 +122,13 @@ func (ip *IPFS) collectPinLs(mx map[string]int64) error { } func (ip *IPFS) queryStatsBandwidth() (*ipfsStatsBw, error) { - req, err := web.NewHTTPRequestWithPath(ip.Request, urlPathStatsBandwidth) + req, err := web.NewHTTPRequestWithPath(ip.RequestConfig, urlPathStatsBandwidth) if err != nil { return nil, err } var stats ipfsStatsBw - if err := ip.doOKDecode(req, &stats); err != nil { + if err := web.DoHTTP(ip.httpClient).RequestJSON(req, &stats); err != nil { return nil, err } @@ -143,13 +140,13 @@ func (ip *IPFS) queryStatsBandwidth() (*ipfsStatsBw, error) { } func (ip *IPFS) querySwarmPeers() (*ipfsSwarmPeers, error) { - req, err := web.NewHTTPRequestWithPath(ip.Request, urlPathSwarmPeers) + req, err := web.NewHTTPRequestWithPath(ip.RequestConfig, urlPathSwarmPeers) if err != nil { return nil, err } var stats ipfsSwarmPeers - if err := ip.doOKDecode(req, &stats); err != nil { + if err := web.DoHTTP(ip.httpClient).RequestJSON(req, &stats); err != nil { return nil, err } @@ -157,13 +154,13 @@ func (ip *IPFS) querySwarmPeers() (*ipfsSwarmPeers, error) { } func (ip *IPFS) queryStatsRepo() (*ipfsStatsRepo, error) { - req, err := web.NewHTTPRequestWithPath(ip.Request, urlPathStatsRepo) + req, err := web.NewHTTPRequestWithPath(ip.RequestConfig, urlPathStatsRepo) if err != nil { return nil, err } var stats ipfsStatsRepo - if err := ip.doOKDecode(req, &stats); err != nil { + if err := web.DoHTTP(ip.httpClient).RequestJSON(req, &stats); err != nil { return nil, err } @@ -171,39 +168,15 @@ func (ip *IPFS) queryStatsRepo() (*ipfsStatsRepo, error) { } func (ip *IPFS) queryPinLs() (*ipfsPinsLs, error) { - req, err := web.NewHTTPRequestWithPath(ip.Request, urlPathPinLs) + req, err := web.NewHTTPRequestWithPath(ip.RequestConfig, urlPathPinLs) if err != nil { return nil, err } var stats ipfsPinsLs - if err := ip.doOKDecode(req, &stats); err != nil { + if err := web.DoHTTP(ip.httpClient).RequestJSON(req, &stats); err != nil { return nil, err } return &stats, nil } - -func (ip *IPFS) doOKDecode(req *http.Request, in interface{}) error { - resp, err := ip.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/ipfs/config_schema.json b/src/go/plugin/go.d/modules/ipfs/config_schema.json index ce4921c3e..c440d5c66 100644 --- a/src/go/plugin/go.d/modules/ipfs/config_schema.json +++ b/src/go/plugin/go.d/modules/ipfs/config_schema.json @@ -115,7 +115,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/ipfs/integrations/ipfs.md b/src/go/plugin/go.d/modules/ipfs/integrations/ipfs.md index 4357b8665..44a37ed9a 100644 --- a/src/go/plugin/go.d/modules/ipfs/integrations/ipfs.md +++ b/src/go/plugin/go.d/modules/ipfs/integrations/ipfs.md @@ -110,8 +110,8 @@ No action required. The configuration file name for this integration is `go.d/ipfs.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/ipfs/ipfs.go b/src/go/plugin/go.d/modules/ipfs/ipfs.go index 0caed8d9b..8509595b9 100644 --- a/src/go/plugin/go.d/modules/ipfs/ipfs.go +++ b/src/go/plugin/go.d/modules/ipfs/ipfs.go @@ -5,10 +5,12 @@ package ipfs import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,13 +28,13 @@ func init() { func New() *IPFS { return &IPFS{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:5001", Method: http.MethodPost, }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 1), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 1), }, }, QueryRepoApi: false, @@ -43,10 +45,10 @@ func New() *IPFS { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - QueryPinApi bool `yaml:"pinapi" json:"pinapi"` - QueryRepoApi bool `yaml:"repoapi" json:"repoapi"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + QueryPinApi bool `yaml:"pinapi" json:"pinapi"` + QueryRepoApi bool `yaml:"repoapi" json:"repoapi"` } type IPFS struct { @@ -64,14 +66,12 @@ func (ip *IPFS) Configuration() any { func (ip *IPFS) Init() error { if ip.URL == "" { - ip.Error("URL not set") return errors.New("url not set") } - client, err := web.NewHTTPClient(ip.Client) + client, err := web.NewHTTPClient(ip.ClientConfig) if err != nil { - ip.Error(err) - return err + return fmt.Errorf("http client init: %w", err) } ip.httpClient = client @@ -93,7 +93,6 @@ func (ip *IPFS) Init() error { func (ip *IPFS) Check() error { mx, err := ip.collect() if err != nil { - ip.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/ipfs/ipfs_test.go b/src/go/plugin/go.d/modules/ipfs/ipfs_test.go index 5e353a1bc..f4274d7eb 100644 --- a/src/go/plugin/go.d/modules/ipfs/ipfs_test.go +++ b/src/go/plugin/go.d/modules/ipfs/ipfs_test.go @@ -54,8 +54,8 @@ func TestIPFS_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, @@ -165,29 +165,14 @@ func TestIPFS_Collect(t *testing.T) { mx := ipfs.Collect() require.Equal(t, test.wantMetrics, mx) + if len(test.wantMetrics) > 0 { - testMetricsHasAllChartsDims(t, ipfs, mx) + module.TestMetricsHasAllChartsDims(t, ipfs.Charts(), mx) } }) } } -func testMetricsHasAllChartsDims(t *testing.T, ipfs *IPFS, mx map[string]int64) { - for _, chart := range *ipfs.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareCaseOkDefault(t *testing.T) (*IPFS, func()) { t.Helper() srv := httptest.NewServer(http.HandlerFunc( diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/charts.go b/src/go/plugin/go.d/modules/isc_dhcpd/charts.go index a8b3581ea..102487011 100644 --- a/src/go/plugin/go.d/modules/isc_dhcpd/charts.go +++ b/src/go/plugin/go.d/modules/isc_dhcpd/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package isc_dhcpd import ( diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/collect.go b/src/go/plugin/go.d/modules/isc_dhcpd/collect.go index 08716a108..c5ba6c327 100644 --- a/src/go/plugin/go.d/modules/isc_dhcpd/collect.go +++ b/src/go/plugin/go.d/modules/isc_dhcpd/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package isc_dhcpd import ( diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/config_schema.json b/src/go/plugin/go.d/modules/isc_dhcpd/config_schema.json index a34e79c70..25ca14ad4 100644 --- a/src/go/plugin/go.d/modules/isc_dhcpd/config_schema.json +++ b/src/go/plugin/go.d/modules/isc_dhcpd/config_schema.json @@ -57,7 +57,6 @@ "leases_path", "pools" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/doc.go b/src/go/plugin/go.d/modules/isc_dhcpd/doc.go new file mode 100644 index 000000000..12e346017 --- /dev/null +++ b/src/go/plugin/go.d/modules/isc_dhcpd/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package isc_dhcpd diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/init.go b/src/go/plugin/go.d/modules/isc_dhcpd/init.go index d103a223c..be7e7122a 100644 --- a/src/go/plugin/go.d/modules/isc_dhcpd/init.go +++ b/src/go/plugin/go.d/modules/isc_dhcpd/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package isc_dhcpd import ( diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/integrations/isc_dhcp.md b/src/go/plugin/go.d/modules/isc_dhcpd/integrations/isc_dhcp.md index 4607c1a5a..7a6fa049d 100644 --- a/src/go/plugin/go.d/modules/isc_dhcpd/integrations/isc_dhcp.md +++ b/src/go/plugin/go.d/modules/isc_dhcpd/integrations/isc_dhcp.md @@ -26,7 +26,10 @@ This collector monitors ISC DHCP lease usage by reading the DHCP client lease da -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux +- BSD This collector supports collecting metrics from multiple instances of this integration, including remote instances. @@ -103,8 +106,8 @@ No action required. The configuration file name for this integration is `go.d/isc_dhcpd.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd.go b/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd.go index 1733cb221..7fef82a74 100644 --- a/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd.go +++ b/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd.go @@ -1,10 +1,13 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package isc_dhcpd import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" @@ -65,21 +68,18 @@ func (d *DHCPd) Configuration() any { func (d *DHCPd) Init() error { err := d.validateConfig() if err != nil { - d.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } pools, err := d.initPools() if err != nil { - d.Errorf("ip pools init: %v", err) - return err + return fmt.Errorf("ip pools init: %v", err) } d.pools = pools charts, err := d.initCharts(pools) if err != nil { - d.Errorf("charts init: %v", err) - return err + return fmt.Errorf("charts init: %v", err) } d.charts = charts @@ -92,7 +92,6 @@ func (d *DHCPd) Init() error { func (d *DHCPd) Check() error { mx, err := d.collect() if err != nil { - d.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd_test.go b/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd_test.go index 24540ea2f..0d9b67386 100644 --- a/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd_test.go +++ b/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package isc_dhcpd import ( @@ -226,32 +228,16 @@ func TestDHCPd_Collect(t *testing.T) { dhcpd := test.prepare() require.NoError(t, dhcpd.Init()) - collected := dhcpd.Collect() + mx := dhcpd.Collect() - assert.Equal(t, test.wantCollected, collected) - if len(collected) > 0 { - ensureCollectedHasAllChartsDimsVarsIDs(t, dhcpd, collected) + assert.Equal(t, test.wantCollected, mx) + if len(mx) > 0 { + module.TestMetricsHasAllChartsDims(t, dhcpd.Charts(), mx) } }) } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, dhcpd *DHCPd, collected map[string]int64) { - for _, chart := range *dhcpd.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareDHCPdLeasesNotExists() *DHCPd { dhcpd := New() dhcpd.Config = Config{ diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/metadata.yaml b/src/go/plugin/go.d/modules/isc_dhcpd/metadata.yaml index 09eee81d0..14efca465 100644 --- a/src/go/plugin/go.d/modules/isc_dhcpd/metadata.yaml +++ b/src/go/plugin/go.d/modules/isc_dhcpd/metadata.yaml @@ -35,7 +35,7 @@ modules: description: "" multi_instance: true supported_platforms: - include: [] + include: [Linux, BSD] exclude: [] setup: prerequisites: diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/parse.go b/src/go/plugin/go.d/modules/isc_dhcpd/parse.go index cb4161745..176c7e577 100644 --- a/src/go/plugin/go.d/modules/isc_dhcpd/parse.go +++ b/src/go/plugin/go.d/modules/isc_dhcpd/parse.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package isc_dhcpd import ( diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/config_schema.json b/src/go/plugin/go.d/modules/k8s_kubelet/config_schema.json index 16f9029a6..380d568af 100644 --- a/src/go/plugin/go.d/modules/k8s_kubelet/config_schema.json +++ b/src/go/plugin/go.d/modules/k8s_kubelet/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/init.go b/src/go/plugin/go.d/modules/k8s_kubelet/init.go index 803cd984c..1d125d442 100644 --- a/src/go/plugin/go.d/modules/k8s_kubelet/init.go +++ b/src/go/plugin/go.d/modules/k8s_kubelet/init.go @@ -26,10 +26,10 @@ func (k *Kubelet) initAuthToken() string { } func (k *Kubelet) initPrometheusClient() (prometheus.Prometheus, error) { - httpClient, err := web.NewHTTPClient(k.Client) + httpClient, err := web.NewHTTPClient(k.ClientConfig) if err != nil { return nil, err } - return prometheus.New(httpClient, k.Request), nil + return prometheus.New(httpClient, k.RequestConfig), nil } diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/integrations/kubelet.md b/src/go/plugin/go.d/modules/k8s_kubelet/integrations/kubelet.md index d92f82be7..d3b37c148 100644 --- a/src/go/plugin/go.d/modules/k8s_kubelet/integrations/kubelet.md +++ b/src/go/plugin/go.d/modules/k8s_kubelet/integrations/kubelet.md @@ -124,8 +124,8 @@ No action required. The configuration file name for this integration is `go.d/k8s_kubelet.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/kubelet.go b/src/go/plugin/go.d/modules/k8s_kubelet/kubelet.go index 19fb9dd9e..c0e2e8030 100644 --- a/src/go/plugin/go.d/modules/k8s_kubelet/kubelet.go +++ b/src/go/plugin/go.d/modules/k8s_kubelet/kubelet.go @@ -5,9 +5,11 @@ package k8s_kubelet import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -30,13 +32,13 @@ func init() { func New() *Kubelet { return &Kubelet{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:10255/metrics", Headers: make(map[string]string), }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, TokenPath: "/var/run/secrets/kubernetes.io/serviceaccount/token", @@ -48,9 +50,9 @@ func New() *Kubelet { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - TokenPath string `yaml:"token_path,omitempty" json:"token_path"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + TokenPath string `yaml:"token_path,omitempty" json:"token_path"` } type Kubelet struct { @@ -70,19 +72,17 @@ func (k *Kubelet) Configuration() any { func (k *Kubelet) Init() error { if err := k.validateConfig(); err != nil { - k.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } prom, err := k.initPrometheusClient() if err != nil { - k.Error(err) - return err + return fmt.Errorf("init prometheus client: %v", err) } k.prom = prom if tok := k.initAuthToken(); tok != "" { - k.Request.Headers["Authorization"] = "Bearer " + tok + k.RequestConfig.Headers["Authorization"] = "Bearer " + tok } return nil @@ -91,7 +91,6 @@ func (k *Kubelet) Init() error { func (k *Kubelet) Check() error { mx, err := k.collect() if err != nil { - k.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/kubelet_test.go b/src/go/plugin/go.d/modules/k8s_kubelet/kubelet_test.go index d55ee31a3..1d46b54d7 100644 --- a/src/go/plugin/go.d/modules/k8s_kubelet/kubelet_test.go +++ b/src/go/plugin/go.d/modules/k8s_kubelet/kubelet_test.go @@ -54,12 +54,12 @@ func TestKubelet_Init_ReadServiceAccountToken(t *testing.T) { job.TokenPath = "testdata/token.txt" assert.NoError(t, job.Init()) - assert.Equal(t, "Bearer "+string(dataServiceAccountToken), job.Request.Headers["Authorization"]) + assert.Equal(t, "Bearer "+string(dataServiceAccountToken), job.RequestConfig.Headers["Authorization"]) } func TestKubelet_InitErrorOnCreatingClientWrongTLSCA(t *testing.T) { job := New() - job.Client.TLSConfig.TLSCA = "testdata/tls" + job.ClientConfig.TLSConfig.TLSCA = "testdata/tls" assert.Error(t, job.Init()) } diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/config_schema.json b/src/go/plugin/go.d/modules/k8s_kubeproxy/config_schema.json index f5d2d3424..03aeba9e6 100644 --- a/src/go/plugin/go.d/modules/k8s_kubeproxy/config_schema.json +++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/init.go b/src/go/plugin/go.d/modules/k8s_kubeproxy/init.go index 93e4427e3..8b775f531 100644 --- a/src/go/plugin/go.d/modules/k8s_kubeproxy/init.go +++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/init.go @@ -17,10 +17,10 @@ func (kp *KubeProxy) validateConfig() error { } func (kp *KubeProxy) initPrometheusClient() (prometheus.Prometheus, error) { - httpClient, err := web.NewHTTPClient(kp.Client) + httpClient, err := web.NewHTTPClient(kp.ClientConfig) if err != nil { return nil, err } - return prometheus.New(httpClient, kp.Request), nil + return prometheus.New(httpClient, kp.RequestConfig), nil } diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/integrations/kubeproxy.md b/src/go/plugin/go.d/modules/k8s_kubeproxy/integrations/kubeproxy.md index bfeb00b54..a67393946 100644 --- a/src/go/plugin/go.d/modules/k8s_kubeproxy/integrations/kubeproxy.md +++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/integrations/kubeproxy.md @@ -91,8 +91,8 @@ No action required. The configuration file name for this integration is `go.d/k8s_kubeproxy.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/kubeproxy.go b/src/go/plugin/go.d/modules/k8s_kubeproxy/kubeproxy.go index 3c9848431..c6ea19bdf 100644 --- a/src/go/plugin/go.d/modules/k8s_kubeproxy/kubeproxy.go +++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/kubeproxy.go @@ -5,9 +5,11 @@ package k8s_kubeproxy import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -30,12 +32,12 @@ func init() { func New() *KubeProxy { return &KubeProxy{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:10249/metrics", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -44,8 +46,8 @@ func New() *KubeProxy { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type KubeProxy struct { @@ -63,14 +65,12 @@ func (kp *KubeProxy) Configuration() any { func (kp *KubeProxy) Init() error { if err := kp.validateConfig(); err != nil { - kp.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } prom, err := kp.initPrometheusClient() if err != nil { - kp.Error(err) - return err + return fmt.Errorf("init prometheus client: %v", err) } kp.prom = prom @@ -80,7 +80,6 @@ func (kp *KubeProxy) Init() error { func (kp *KubeProxy) Check() error { mx, err := kp.collect() if err != nil { - kp.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/k8s_state/charts.go b/src/go/plugin/go.d/modules/k8s_state/charts.go index 471d12577..cfc7e027f 100644 --- a/src/go/plugin/go.d/modules/k8s_state/charts.go +++ b/src/go/plugin/go.d/modules/k8s_state/charts.go @@ -43,6 +43,7 @@ const ( prioPodMemLimitsUsed prioPodCondition prioPodPhase + prioPodStatusReason prioPodAge prioPodContainersCount prioPodContainersState @@ -106,6 +107,7 @@ var podChartsTmpl = module.Charts{ podMemLimitsUsedChartTmpl.Copy(), podConditionChartTmpl.Copy(), podPhaseChartTmpl.Copy(), + podStatusReasonChartTmpl.Copy(), podAgeChartTmpl.Copy(), podContainersCountChartTmpl.Copy(), podContainersStateChartTmpl.Copy(), @@ -247,15 +249,24 @@ var ( }, } // condition - nodeConditionsChartTmpl = module.Chart{ - IDSep: true, - ID: "node_%s.condition_status", - Title: "Condition status", - Units: "status", - Fam: "node condition", - Ctx: "k8s_state.node_condition", - Priority: prioNodeConditions, - } + nodeConditionsChartTmpl = func() module.Chart { + chart := module.Chart{ + IDSep: true, + ID: "node_%s.condition_status", + Title: "Condition status", + Units: "status", + Fam: "node condition", + Ctx: "k8s_state.node_condition", + Priority: prioNodeConditions, + } + for _, v := range nodeConditionStatuses { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: "node_%s_cond_" + v, + Name: v, + }) + } + return chart + }() nodeSchedulabilityChartTmpl = module.Chart{ IDSep: true, ID: "node_%s.schedulability", @@ -426,24 +437,6 @@ func (ks *KubeState) removeNodeCharts(ns *nodeState) { } } -func (ks *KubeState) addNodeConditionToCharts(ns *nodeState, cond string) { - id := fmt.Sprintf(nodeConditionsChartTmpl.ID, replaceDots(ns.id())) - c := ks.Charts().Get(id) - if c == nil { - ks.Warningf("chart '%s' does not exist", id) - return - } - dim := &module.Dim{ - ID: fmt.Sprintf("node_%s_cond_%s", ns.id(), strings.ToLower(cond)), - Name: cond, - } - if err := c.AddDim(dim); err != nil { - ks.Warning(err) - return - } - c.MarkNotCreated() -} - var ( podCPURequestsUsedChartTmpl = module.Chart{ IDSep: true, @@ -523,6 +516,24 @@ var ( {ID: "pod_%s_phase_pending", Name: "pending"}, }, } + podStatusReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + IDSep: true, + ID: "pod_%s.status_reason", + Title: "Status reason", + Units: "status", + Fam: "pod status", + Ctx: "k8s_state.pod_status_reason", + Priority: prioPodStatusReason, + } + for _, v := range podStatusReasons { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: "pod_%s_status_reason_" + v, + Name: v, + }) + } + return chart + }() podAgeChartTmpl = module.Chart{ IDSep: true, ID: "pod_%s.age", @@ -681,24 +692,42 @@ var ( {ID: "pod_%s_container_%s_state_terminated", Name: "terminated"}, }, } - containersStateWaitingChartTmpl = module.Chart{ - IDSep: true, - ID: "pod_%s_container_%s.state_waiting_reason", - Title: "Container waiting state reason", - Units: "state", - Fam: "container waiting reason", - Ctx: "k8s_state.pod_container_waiting_state_reason", - Priority: prioPodContainerWaitingStateReason, - } - containersStateTerminatedChartTmpl = module.Chart{ - IDSep: true, - ID: "pod_%s_container_%s.state_terminated_reason", - Title: "Container terminated state reason", - Units: "state", - Fam: "container terminated reason", - Ctx: "k8s_state.pod_container_terminated_state_reason", - Priority: prioPodContainerTerminatedStateReason, - } + containersStateWaitingChartTmpl = func() module.Chart { + chart := module.Chart{ + IDSep: true, + ID: "pod_%s_container_%s.state_waiting_reason", + Title: "Container waiting state reason", + Units: "state", + Fam: "container waiting reason", + Ctx: "k8s_state.pod_container_waiting_state_reason", + Priority: prioPodContainerWaitingStateReason, + } + for _, v := range containerWaitingStateReasons { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: "pod_%s_container_%s_state_waiting_reason_" + v, + Name: v, + }) + } + return chart + }() + containersStateTerminatedChartTmpl = func() module.Chart { + chart := module.Chart{ + IDSep: true, + ID: "pod_%s_container_%s.state_terminated_reason", + Title: "Container terminated state reason", + Units: "state", + Fam: "container terminated reason", + Ctx: "k8s_state.pod_container_terminated_state_reason", + Priority: prioPodContainerTerminatedStateReason, + } + for _, v := range containerTerminatedStateReasons { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: "pod_%s_container_%s_state_terminated_reason_" + v, + Name: v, + }) + } + return chart + }() ) func (ks *KubeState) newContainerCharts(ps *podState, cs *containerState) *module.Charts { @@ -728,42 +757,6 @@ func (ks *KubeState) addContainerCharts(ps *podState, cs *containerState) { } } -func (ks *KubeState) addContainerWaitingStateReasonToChart(ps *podState, cs *containerState, reason string) { - id := fmt.Sprintf(containersStateWaitingChartTmpl.ID, replaceDots(ps.id()), cs.name) - c := ks.Charts().Get(id) - if c == nil { - ks.Warningf("chart '%s' does not exist", id) - return - } - dim := &module.Dim{ - ID: fmt.Sprintf("pod_%s_container_%s_state_waiting_reason_%s", ps.id(), cs.name, reason), - Name: reason, - } - if err := c.AddDim(dim); err != nil { - ks.Warning(err) - return - } - c.MarkNotCreated() -} - -func (ks *KubeState) addContainerTerminatedStateReasonToChart(ps *podState, cs *containerState, reason string) { - id := fmt.Sprintf(containersStateTerminatedChartTmpl.ID, replaceDots(ps.id()), cs.name) - c := ks.Charts().Get(id) - if c == nil { - ks.Warningf("chart '%s' does not exist", id) - return - } - dim := &module.Dim{ - ID: fmt.Sprintf("pod_%s_container_%s_state_terminated_reason_%s", ps.id(), cs.name, reason), - Name: reason, - } - if err := c.AddDim(dim); err != nil { - ks.Warning(err) - return - } - c.MarkNotCreated() -} - var discoveryStatusChart = module.Chart{ ID: "discovery_discoverers_state", Title: "Running discoverers state", diff --git a/src/go/plugin/go.d/modules/k8s_state/cluster_meta.go b/src/go/plugin/go.d/modules/k8s_state/cluster_meta.go index e7eb809cc..842b5f8d5 100644 --- a/src/go/plugin/go.d/modules/k8s_state/cluster_meta.go +++ b/src/go/plugin/go.d/modules/k8s_state/cluster_meta.go @@ -3,11 +3,14 @@ package k8s_state import ( + "errors" "fmt" "io" "net/http" "time" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -21,7 +24,7 @@ func (ks *KubeState) getKubeClusterID() string { } func (ks *KubeState) getKubeClusterName() string { - client := http.Client{Timeout: time.Second} + client := &http.Client{Timeout: time.Second} n, err := getGKEKubeClusterName(client) if err != nil { ks.Debugf("error on getting GKE cluster name: %v", err) @@ -29,7 +32,7 @@ func (ks *KubeState) getKubeClusterName() string { return n } -func getGKEKubeClusterName(client http.Client) (string, error) { +func getGKEKubeClusterName(client *http.Client) (string, error) { id, err := doMetaGKEHTTPReq(client, "http://metadata/computeMetadata/v1/project/project-id") if err != nil { return "", err @@ -46,39 +49,30 @@ func getGKEKubeClusterName(client http.Client) (string, error) { return fmt.Sprintf("gke_%s_%s_%s", id, loc, name), nil } -func doMetaGKEHTTPReq(client http.Client, url string) (string, error) { +func doMetaGKEHTTPReq(client *http.Client, url string) (string, error) { req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return "", err } req.Header.Add("Metadata-Flavor", "Google") - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer closeHTTPRespBody(resp) - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("'%s' returned HTTP status code %d", url, resp.StatusCode) - } + var resp string - bs, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - - s := string(bs) - if s == "" { - return "", fmt.Errorf("an empty response from '%s'", url) - } + if err := web.DoHTTP(client).Request(req, func(body io.Reader) error { + bs, rerr := io.ReadAll(body) + if rerr != nil { + return rerr + } - return s, nil -} + if resp = string(bs); len(resp) == 0 { + return errors.New("empty response") + } -func closeHTTPRespBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() + return nil + }); err != nil { + return "", err } + + return resp, nil } diff --git a/src/go/plugin/go.d/modules/k8s_state/collect.go b/src/go/plugin/go.d/modules/k8s_state/collect.go index 081a0fdf1..12eb07ab8 100644 --- a/src/go/plugin/go.d/modules/k8s_state/collect.go +++ b/src/go/plugin/go.d/modules/k8s_state/collect.go @@ -5,7 +5,7 @@ package k8s_state import ( "errors" "fmt" - "strings" + "slices" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" @@ -15,6 +15,48 @@ import ( const precision = 1000 +var ( + podStatusReasons = []string{ + "Evicted", + "NodeAffinity", + "NodeLost", + "Shutdown", + "UnexpectedAdmissionError", + "Other", + } + + containerWaitingStateReasons = []string{ + "ContainerCreating", + "CrashLoopBackOff", + "CreateContainerConfigError", + "CreateContainerError", + "ErrImagePull", + "ImagePullBackOff", + "InvalidImageName", + "PodInitializing", + "Other", + } + containerTerminatedStateReasons = []string{ + "Completed", + "ContainerCannotRun", + "DeadlineExceeded", + "Error", + "Evicted", + "OOMKilled", + "Other", + } +) + +var ( + nodeConditionStatuses = []string{ + "Ready", + "DiskPressure", + "MemoryPressure", + "NetworkUnavailable", + "PIDPressure", + } +) + func (ks *KubeState) collect() (map[string]int64, error) { if ks.discoverer == nil { return nil, errors.New("nil discoverer") @@ -32,6 +74,7 @@ func (ks *KubeState) collect() (map[string]int64, error) { ks.kubeClusterID = ks.getKubeClusterID() ks.kubeClusterName = ks.getKubeClusterName() + if chart := ks.Charts().Get(discoveryStatusChart.ID); chart != nil { chart.Labels = []module.Label{ {Key: labelKeyClusterID, Value: ks.kubeClusterID, Source: module.LabelSourceK8s}, @@ -68,7 +111,7 @@ func (ks *KubeState) collectKubeState(mx map[string]int64) { func (ks *KubeState) collectPodsState(mx map[string]int64) { now := time.Now() for _, ps := range ks.state.pods { - // Skip cronjobs (each of them is a unique container because name contains hash) + // Skip cronjobs (each of them is a unique container because the name contains hash) // to avoid overwhelming Netdata with high cardinality metrics. // Related issue https://github.com/netdata/netdata/issues/16412 if ps.controllerKind == "Job" { @@ -80,6 +123,7 @@ func (ks *KubeState) collectPodsState(mx map[string]int64) { ks.removePodCharts(ps) continue } + if ps.new { ps.new = false ks.addPodCharts(ps) @@ -106,12 +150,14 @@ func (ks *KubeState) collectPodsState(mx map[string]int64) { ns.stats.podsPhaseRunning += boolToInt(ps.phase == corev1.PodRunning) ns.stats.podsPhaseSucceeded += boolToInt(ps.phase == corev1.PodSucceeded) ns.stats.podsPhaseFailed += boolToInt(ps.phase == corev1.PodFailed) + for _, cs := range ps.initContainers { ns.stats.initContainers++ ns.stats.initContStateRunning += boolToInt(cs.stateRunning) ns.stats.initContStateWaiting += boolToInt(cs.stateWaiting) ns.stats.initContStateTerminated += boolToInt(cs.stateTerminated) } + for _, cs := range ps.containers { ns.stats.containers++ ns.stats.contStateRunning += boolToInt(cs.stateRunning) @@ -131,6 +177,17 @@ func (ks *KubeState) collectPodsState(mx map[string]int64) { mx[px+"phase_succeeded"] = boolToInt(ps.phase == corev1.PodSucceeded) mx[px+"phase_pending"] = boolToInt(ps.phase == corev1.PodPending) mx[px+"age"] = int64(now.Sub(ps.creationTime).Seconds()) + + for _, v := range podStatusReasons { + mx[px+"status_reason_"+v] = 0 + } + if v := ps.statusReason; v != "" { + if !slices.Contains(podStatusReasons, v) { + v = "Other" + } + mx[px+"status_reason_"+v] = 1 + } + mx[px+"cpu_requests_used"] = ps.reqCPU mx[px+"cpu_limits_used"] = ps.limitCPU mx[px+"mem_requests_used"] = ps.reqMem @@ -142,6 +199,7 @@ func (ks *KubeState) collectPodsState(mx map[string]int64) { mx[px+"init_containers_state_running"] = 0 mx[px+"init_containers_state_waiting"] = 0 mx[px+"init_containers_state_terminated"] = 0 + for _, cs := range ps.initContainers { mx[px+"init_containers_state_running"] += boolToInt(cs.stateRunning) mx[px+"init_containers_state_waiting"] += boolToInt(cs.stateWaiting) @@ -150,6 +208,7 @@ func (ks *KubeState) collectPodsState(mx map[string]int64) { mx[px+"containers_state_running"] = 0 mx[px+"containers_state_waiting"] = 0 mx[px+"containers_state_terminated"] = 0 + for _, cs := range ps.containers { if cs.new { cs.new = false @@ -165,19 +224,25 @@ func (ks *KubeState) collectPodsState(mx map[string]int64) { mx[ppx+"state_terminated"] = boolToInt(cs.stateTerminated) mx[ppx+"readiness"] = boolToInt(cs.ready) mx[ppx+"restarts"] = cs.restarts - for _, r := range cs.stateWaitingReasons { - if r.new { - r.new = false - ks.addContainerWaitingStateReasonToChart(ps, cs, r.reason) + + for _, v := range containerWaitingStateReasons { + mx[ppx+"state_waiting_reason_"+v] = 0 + } + if v := cs.waitingReason; v != "" { + if !slices.Contains(containerWaitingStateReasons, v) { + v = "Other" } - mx[ppx+"state_waiting_reason_"+r.reason] = boolToInt(r.active) + mx[ppx+"state_waiting_reason_"+v] = 1 + } + + for _, v := range containerTerminatedStateReasons { + mx[ppx+"state_terminated_reason_"+v] = 0 } - for _, r := range cs.stateTerminatedReasons { - if r.new { - r.new = false - ks.addContainerTerminatedStateReasonToChart(ps, cs, r.reason) + if v := cs.terminatedReason; v != "" { + if !slices.Contains(containerTerminatedStateReasons, v) { + v = "Other" } - mx[ppx+"state_terminated_reason_"+r.reason] = boolToInt(r.active) + mx[ppx+"state_terminated_reason_"+v] = 1 } } } @@ -198,12 +263,11 @@ func (ks *KubeState) collectNodesState(mx map[string]int64) { px := fmt.Sprintf("node_%s_", ns.id()) - for typ, cond := range ns.conditions { - if cond.new { - cond.new = false - ks.addNodeConditionToCharts(ns, typ) - } - mx[px+"cond_"+strings.ToLower(typ)] = condStatusToInt(cond.status) + for _, v := range nodeConditionStatuses { + mx[px+"cond_"+v] = 0 + } + for _, v := range ns.conditions { + mx[px+"cond_"+string(v.Type)] = condStatusToInt(v.Status) } mx[px+"age"] = int64(now.Sub(ns.creationTime).Seconds()) diff --git a/src/go/plugin/go.d/modules/k8s_state/config_schema.json b/src/go/plugin/go.d/modules/k8s_state/config_schema.json index ae66d7cb5..77e7bbf56 100644 --- a/src/go/plugin/go.d/modules/k8s_state/config_schema.json +++ b/src/go/plugin/go.d/modules/k8s_state/config_schema.json @@ -12,7 +12,6 @@ "default": 1 } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/k8s_state/discover_kubernetes.go b/src/go/plugin/go.d/modules/k8s_state/discover_kubernetes.go index 5d435871a..4f13fee0a 100644 --- a/src/go/plugin/go.d/modules/k8s_state/discover_kubernetes.go +++ b/src/go/plugin/go.d/modules/k8s_state/discover_kubernetes.go @@ -132,7 +132,7 @@ func (d *kubeDiscovery) setupDiscoverers(ctx context.Context) []discoverer { } } -func enqueue(queue *workqueue.Type, obj interface{}) { +func enqueue(queue *workqueue.Typed[any], obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return diff --git a/src/go/plugin/go.d/modules/k8s_state/discover_node.go b/src/go/plugin/go.d/modules/k8s_state/discover_node.go index 1d91436c8..ec7a774e9 100644 --- a/src/go/plugin/go.d/modules/k8s_state/discover_node.go +++ b/src/go/plugin/go.d/modules/k8s_state/discover_node.go @@ -16,11 +16,12 @@ func newNodeDiscoverer(si cache.SharedInformer, l *logger.Logger) *nodeDiscovere panic("nil node shared informer") } - queue := workqueue.NewWithConfig(workqueue.QueueConfig{Name: "node"}) + queue := workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[any]{Name: "node"}) + _, _ = si.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { enqueue(queue, obj) }, - UpdateFunc: func(_, obj interface{}) { enqueue(queue, obj) }, - DeleteFunc: func(obj interface{}) { enqueue(queue, obj) }, + AddFunc: func(obj any) { enqueue(queue, obj) }, + UpdateFunc: func(_, obj any) { enqueue(queue, obj) }, + DeleteFunc: func(obj any) { enqueue(queue, obj) }, }) return &nodeDiscoverer{ @@ -34,17 +35,17 @@ func newNodeDiscoverer(si cache.SharedInformer, l *logger.Logger) *nodeDiscovere type nodeResource struct { src string - val interface{} + val any } func (r nodeResource) source() string { return r.src } func (r nodeResource) kind() kubeResourceKind { return kubeResourceNode } -func (r nodeResource) value() interface{} { return r.val } +func (r nodeResource) value() any { return r.val } type nodeDiscoverer struct { *logger.Logger informer cache.SharedInformer - queue *workqueue.Type + queue *workqueue.Typed[any] readyCh chan struct{} stopCh chan struct{} } diff --git a/src/go/plugin/go.d/modules/k8s_state/discover_pod.go b/src/go/plugin/go.d/modules/k8s_state/discover_pod.go index 53e9ceb92..eac652d2b 100644 --- a/src/go/plugin/go.d/modules/k8s_state/discover_pod.go +++ b/src/go/plugin/go.d/modules/k8s_state/discover_pod.go @@ -16,11 +16,12 @@ func newPodDiscoverer(si cache.SharedInformer, l *logger.Logger) *podDiscoverer panic("nil pod shared informer") } - queue := workqueue.NewWithConfig(workqueue.QueueConfig{Name: "pod"}) + queue := workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[any]{Name: "pod"}) + _, _ = si.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { enqueue(queue, obj) }, - UpdateFunc: func(_, obj interface{}) { enqueue(queue, obj) }, - DeleteFunc: func(obj interface{}) { enqueue(queue, obj) }, + AddFunc: func(obj any) { enqueue(queue, obj) }, + UpdateFunc: func(_, obj any) { enqueue(queue, obj) }, + DeleteFunc: func(obj any) { enqueue(queue, obj) }, }) return &podDiscoverer{ @@ -34,17 +35,17 @@ func newPodDiscoverer(si cache.SharedInformer, l *logger.Logger) *podDiscoverer type podResource struct { src string - val interface{} + val any } func (r podResource) source() string { return r.src } func (r podResource) kind() kubeResourceKind { return kubeResourcePod } -func (r podResource) value() interface{} { return r.val } +func (r podResource) value() any { return r.val } type podDiscoverer struct { *logger.Logger informer cache.SharedInformer - queue *workqueue.Type + queue *workqueue.Typed[any] readyCh chan struct{} stopCh chan struct{} } diff --git a/src/go/plugin/go.d/modules/k8s_state/integrations/kubernetes_cluster_state.md b/src/go/plugin/go.d/modules/k8s_state/integrations/kubernetes_cluster_state.md index 5f5e36f87..8d2aaf767 100644 --- a/src/go/plugin/go.d/modules/k8s_state/integrations/kubernetes_cluster_state.md +++ b/src/go/plugin/go.d/modules/k8s_state/integrations/kubernetes_cluster_state.md @@ -80,7 +80,7 @@ Metrics: | k8s_state.node_allocatable_mem_limits_used | limits | bytes | | k8s_state.node_allocatable_pods_utilization | allocated | % | | k8s_state.node_allocatable_pods_usage | available, allocated | pods | -| k8s_state.node_condition | a dimension per condition | status | +| k8s_state.node_condition | Ready, DiskPressure, MemoryPressure, NetworkUnavailable, PIDPressure | status | | k8s_state.node_schedulability | schedulable, unschedulable | state | | k8s_state.node_pods_readiness | ready | % | | k8s_state.node_pods_readiness_state | ready, unready | pods | @@ -118,6 +118,7 @@ Metrics: | k8s_state.pod_mem_limits_used | limits | bytes | | k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state | | k8s_state.pod_phase | running, failed, succeeded, pending | state | +| k8s_state.pod_status_reason | Evicted, NodeAffinity, NodeLost, Shutdown, UnexpectedAdmissionError, Other | status | | k8s_state.pod_age | age | seconds | | k8s_state.pod_containers | containers, init_containers | containers | | k8s_state.pod_containers_state | running, waiting, terminated | containers | @@ -148,8 +149,8 @@ Metrics: | k8s_state.pod_container_readiness_state | ready | state | | k8s_state.pod_container_restarts | restarts | restarts | | k8s_state.pod_container_state | running, waiting, terminated | state | -| k8s_state.pod_container_waiting_state_reason | a dimension per reason | state | -| k8s_state.pod_container_terminated_state_reason | a dimension per reason | state | +| k8s_state.pod_container_waiting_state_reason | ContainerCreating, CrashLoopBackOff, CreateContainerConfigError, CreateContainerError, ErrImagePull, ImagePullBackOff, InvalidImageName, PodInitializing, Other | state | +| k8s_state.pod_container_terminated_state_reason | Completed, ContainerCannotRun, DeadlineExceeded, Error, Evicted, OOMKilled, Other | state | @@ -171,8 +172,8 @@ No action required. The configuration file name for this integration is `go.d/k8s_state.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/k8s_state/kube_state.go b/src/go/plugin/go.d/modules/k8s_state/kube_state.go index 26962928e..1247aa4dc 100644 --- a/src/go/plugin/go.d/modules/k8s_state/kube_state.go +++ b/src/go/plugin/go.d/modules/k8s_state/kube_state.go @@ -80,8 +80,7 @@ func (ks *KubeState) Configuration() any { func (ks *KubeState) Init() error { client, err := ks.initClient() if err != nil { - ks.Errorf("client initialization: %v", err) - return err + return fmt.Errorf("init k8s client: %v", err) } ks.client = client @@ -94,15 +93,12 @@ func (ks *KubeState) Init() error { func (ks *KubeState) Check() error { if ks.client == nil || ks.discoverer == nil { - ks.Error("not initialized job") return errors.New("not initialized") } ver, err := ks.client.Discovery().ServerVersion() if err != nil { - err := fmt.Errorf("failed to connect to K8s API server: %v", err) - ks.Error(err) - return err + return fmt.Errorf("failed to connect to K8s API server: %v", err) } ks.Infof("successfully connected to the Kubernetes API server '%s'", ver) diff --git a/src/go/plugin/go.d/modules/k8s_state/kube_state_test.go b/src/go/plugin/go.d/modules/k8s_state/kube_state_test.go index cf52c08b6..be7e42723 100644 --- a/src/go/plugin/go.d/modules/k8s_state/kube_state_test.go +++ b/src/go/plugin/go.d/modules/k8s_state/kube_state_test.go @@ -51,7 +51,7 @@ func TestKubeState_Init(t *testing.T) { wantFail: false, prepare: func() *KubeState { ks := New() - ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil } + ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewClientset(), nil } return ks }, }, @@ -87,7 +87,7 @@ func TestKubeState_Check(t *testing.T) { wantFail: false, prepare: func() *KubeState { ks := New() - ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil } + ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewClientset(), nil } return ks }, }, @@ -95,7 +95,7 @@ func TestKubeState_Check(t *testing.T) { wantFail: true, prepare: func() *KubeState { ks := New() - client := &brokenInfoKubeClient{fake.NewSimpleClientset()} + client := &brokenInfoKubeClient{fake.NewClientset()} ks.newKubeClient = func() (kubernetes.Interface, error) { return client, nil } return ks }, @@ -133,7 +133,7 @@ func TestKubeState_Cleanup(t *testing.T) { doCollect: false, prepare: func() *KubeState { ks := New() - ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil } + ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewClientset(), nil } return ks }, }, @@ -142,7 +142,7 @@ func TestKubeState_Cleanup(t *testing.T) { doCollect: false, prepare: func() *KubeState { ks := New() - ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil } + ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewClientset(), nil } return ks }, }, @@ -151,7 +151,7 @@ func TestKubeState_Cleanup(t *testing.T) { doCollect: true, prepare: func() *KubeState { ks := New() - ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil } + ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewClientset(), nil } return ks }, }, @@ -192,7 +192,7 @@ func TestKubeState_Collect(t *testing.T) { }{ "Node only": { create: func(t *testing.T) testCase { - client := fake.NewSimpleClientset( + client := fake.NewClientset( newNode("node01"), ) @@ -213,11 +213,11 @@ func TestKubeState_Collect(t *testing.T) { "node_node01_alloc_pods_allocated": 0, "node_node01_alloc_pods_available": 110, "node_node01_alloc_pods_util": 0, - "node_node01_cond_diskpressure": 0, - "node_node01_cond_memorypressure": 0, - "node_node01_cond_networkunavailable": 0, - "node_node01_cond_pidpressure": 0, - "node_node01_cond_ready": 1, + "node_node01_cond_DiskPressure": 0, + "node_node01_cond_MemoryPressure": 0, + "node_node01_cond_NetworkUnavailable": 0, + "node_node01_cond_PIDPressure": 0, + "node_node01_cond_Ready": 1, "node_node01_schedulability_schedulable": 1, "node_node01_schedulability_unschedulable": 0, "node_node01_containers": 0, @@ -240,12 +240,15 @@ func TestKubeState_Collect(t *testing.T) { "node_node01_pods_readiness_ready": 0, "node_node01_pods_readiness_unready": 0, } + copyAge(expected, mx) + assert.Equal(t, expected, mx) assert.Equal(t, len(nodeChartsTmpl)+len(baseCharts), len(*ks.Charts()), ) + module.TestMetricsHasAllChartsDims(t, ks.Charts(), mx) } return testCase{ @@ -257,47 +260,86 @@ func TestKubeState_Collect(t *testing.T) { "Pod only": { create: func(t *testing.T) testCase { pod := newPod("node01", "pod01") - client := fake.NewSimpleClientset( + client := fake.NewClientset( pod, ) step1 := func(t *testing.T, ks *KubeState) { mx := ks.Collect() expected := map[string]int64{ - "discovery_node_discoverer_state": 1, - "discovery_pod_discoverer_state": 1, - "pod_default_pod01_age": 3, - "pod_default_pod01_cpu_limits_used": 400, - "pod_default_pod01_cpu_requests_used": 200, - "pod_default_pod01_mem_limits_used": 419430400, - "pod_default_pod01_mem_requests_used": 209715200, - "pod_default_pod01_cond_containersready": 1, - "pod_default_pod01_cond_podinitialized": 1, - "pod_default_pod01_cond_podready": 1, - "pod_default_pod01_cond_podscheduled": 1, - "pod_default_pod01_container_container1_readiness": 1, - "pod_default_pod01_container_container1_restarts": 0, - "pod_default_pod01_container_container1_state_running": 1, - "pod_default_pod01_container_container1_state_terminated": 0, - "pod_default_pod01_container_container1_state_waiting": 0, - "pod_default_pod01_container_container2_readiness": 1, - "pod_default_pod01_container_container2_restarts": 0, - "pod_default_pod01_container_container2_state_running": 1, - "pod_default_pod01_container_container2_state_terminated": 0, - "pod_default_pod01_container_container2_state_waiting": 0, - "pod_default_pod01_containers": 2, - "pod_default_pod01_containers_state_running": 2, - "pod_default_pod01_containers_state_terminated": 0, - "pod_default_pod01_containers_state_waiting": 0, - "pod_default_pod01_init_containers": 1, - "pod_default_pod01_init_containers_state_running": 0, - "pod_default_pod01_init_containers_state_terminated": 1, - "pod_default_pod01_init_containers_state_waiting": 0, - "pod_default_pod01_phase_failed": 0, - "pod_default_pod01_phase_pending": 0, - "pod_default_pod01_phase_running": 1, - "pod_default_pod01_phase_succeeded": 0, + "discovery_node_discoverer_state": 1, + "discovery_pod_discoverer_state": 1, + "pod_default_pod01_age": 3, + "pod_default_pod01_cond_containersready": 1, + "pod_default_pod01_cond_podinitialized": 1, + "pod_default_pod01_cond_podready": 1, + "pod_default_pod01_cond_podscheduled": 1, + "pod_default_pod01_container_container1_readiness": 1, + "pod_default_pod01_container_container1_restarts": 0, + "pod_default_pod01_container_container1_state_running": 1, + "pod_default_pod01_container_container1_state_terminated": 0, + "pod_default_pod01_container_container1_state_terminated_reason_Completed": 0, + "pod_default_pod01_container_container1_state_terminated_reason_ContainerCannotRun": 0, + "pod_default_pod01_container_container1_state_terminated_reason_DeadlineExceeded": 0, + "pod_default_pod01_container_container1_state_terminated_reason_Error": 0, + "pod_default_pod01_container_container1_state_terminated_reason_Evicted": 0, + "pod_default_pod01_container_container1_state_terminated_reason_OOMKilled": 0, + "pod_default_pod01_container_container1_state_terminated_reason_Other": 0, + "pod_default_pod01_container_container1_state_waiting": 0, + "pod_default_pod01_container_container1_state_waiting_reason_ContainerCreating": 0, + "pod_default_pod01_container_container1_state_waiting_reason_CrashLoopBackOff": 0, + "pod_default_pod01_container_container1_state_waiting_reason_CreateContainerConfigError": 0, + "pod_default_pod01_container_container1_state_waiting_reason_CreateContainerError": 0, + "pod_default_pod01_container_container1_state_waiting_reason_ErrImagePull": 0, + "pod_default_pod01_container_container1_state_waiting_reason_ImagePullBackOff": 0, + "pod_default_pod01_container_container1_state_waiting_reason_InvalidImageName": 0, + "pod_default_pod01_container_container1_state_waiting_reason_Other": 0, + "pod_default_pod01_container_container1_state_waiting_reason_PodInitializing": 0, + "pod_default_pod01_container_container2_readiness": 1, + "pod_default_pod01_container_container2_restarts": 0, + "pod_default_pod01_container_container2_state_running": 1, + "pod_default_pod01_container_container2_state_terminated": 0, + "pod_default_pod01_container_container2_state_terminated_reason_Completed": 0, + "pod_default_pod01_container_container2_state_terminated_reason_ContainerCannotRun": 0, + "pod_default_pod01_container_container2_state_terminated_reason_DeadlineExceeded": 0, + "pod_default_pod01_container_container2_state_terminated_reason_Error": 0, + "pod_default_pod01_container_container2_state_terminated_reason_Evicted": 0, + "pod_default_pod01_container_container2_state_terminated_reason_OOMKilled": 0, + "pod_default_pod01_container_container2_state_terminated_reason_Other": 0, + "pod_default_pod01_container_container2_state_waiting": 0, + "pod_default_pod01_container_container2_state_waiting_reason_ContainerCreating": 0, + "pod_default_pod01_container_container2_state_waiting_reason_CrashLoopBackOff": 0, + "pod_default_pod01_container_container2_state_waiting_reason_CreateContainerConfigError": 0, + "pod_default_pod01_container_container2_state_waiting_reason_CreateContainerError": 0, + "pod_default_pod01_container_container2_state_waiting_reason_ErrImagePull": 0, + "pod_default_pod01_container_container2_state_waiting_reason_ImagePullBackOff": 0, + "pod_default_pod01_container_container2_state_waiting_reason_InvalidImageName": 0, + "pod_default_pod01_container_container2_state_waiting_reason_Other": 0, + "pod_default_pod01_container_container2_state_waiting_reason_PodInitializing": 0, + "pod_default_pod01_containers": 2, + "pod_default_pod01_containers_state_running": 2, + "pod_default_pod01_containers_state_terminated": 0, + "pod_default_pod01_containers_state_waiting": 0, + "pod_default_pod01_cpu_limits_used": 400, + "pod_default_pod01_cpu_requests_used": 200, + "pod_default_pod01_init_containers": 1, + "pod_default_pod01_init_containers_state_running": 0, + "pod_default_pod01_init_containers_state_terminated": 1, + "pod_default_pod01_init_containers_state_waiting": 0, + "pod_default_pod01_mem_limits_used": 419430400, + "pod_default_pod01_mem_requests_used": 209715200, + "pod_default_pod01_phase_failed": 0, + "pod_default_pod01_phase_pending": 0, + "pod_default_pod01_phase_running": 1, + "pod_default_pod01_phase_succeeded": 0, + "pod_default_pod01_status_reason_Evicted": 0, + "pod_default_pod01_status_reason_NodeAffinity": 0, + "pod_default_pod01_status_reason_NodeLost": 0, + "pod_default_pod01_status_reason_Other": 0, + "pod_default_pod01_status_reason_Shutdown": 0, + "pod_default_pod01_status_reason_UnexpectedAdmissionError": 0, } + copyAge(expected, mx) assert.Equal(t, expected, mx) @@ -305,6 +347,7 @@ func TestKubeState_Collect(t *testing.T) { len(podChartsTmpl)+len(containerChartsTmpl)*len(pod.Spec.Containers)+len(baseCharts), len(*ks.Charts()), ) + module.TestMetricsHasAllChartsDims(t, ks.Charts(), mx) } return testCase{ @@ -317,7 +360,7 @@ func TestKubeState_Collect(t *testing.T) { create: func(t *testing.T) testCase { node := newNode("node01") pod := newPod(node.Name, "pod01") - client := fake.NewSimpleClientset( + client := fake.NewClientset( node, pod, ) @@ -325,78 +368,117 @@ func TestKubeState_Collect(t *testing.T) { step1 := func(t *testing.T, ks *KubeState) { mx := ks.Collect() expected := map[string]int64{ - "discovery_node_discoverer_state": 1, - "discovery_pod_discoverer_state": 1, - "node_node01_age": 3, - "node_node01_alloc_cpu_limits_used": 400, - "node_node01_alloc_cpu_limits_util": 11428, - "node_node01_alloc_cpu_requests_used": 200, - "node_node01_alloc_cpu_requests_util": 5714, - "node_node01_alloc_mem_limits_used": 419430400, - "node_node01_alloc_mem_limits_util": 11428, - "node_node01_alloc_mem_requests_used": 209715200, - "node_node01_alloc_mem_requests_util": 5714, - "node_node01_alloc_pods_allocated": 1, - "node_node01_alloc_pods_available": 109, - "node_node01_alloc_pods_util": 909, - "node_node01_cond_diskpressure": 0, - "node_node01_cond_memorypressure": 0, - "node_node01_cond_networkunavailable": 0, - "node_node01_cond_pidpressure": 0, - "node_node01_cond_ready": 1, - "node_node01_schedulability_schedulable": 1, - "node_node01_schedulability_unschedulable": 0, - "node_node01_containers": 2, - "node_node01_containers_state_running": 2, - "node_node01_containers_state_terminated": 0, - "node_node01_containers_state_waiting": 0, - "node_node01_init_containers": 1, - "node_node01_init_containers_state_running": 0, - "node_node01_init_containers_state_terminated": 1, - "node_node01_init_containers_state_waiting": 0, - "node_node01_pods_cond_containersready": 1, - "node_node01_pods_cond_podinitialized": 1, - "node_node01_pods_cond_podready": 1, - "node_node01_pods_cond_podscheduled": 1, - "node_node01_pods_phase_failed": 0, - "node_node01_pods_phase_pending": 0, - "node_node01_pods_phase_running": 1, - "node_node01_pods_phase_succeeded": 0, - "node_node01_pods_readiness": 100000, - "node_node01_pods_readiness_ready": 1, - "node_node01_pods_readiness_unready": 0, - "pod_default_pod01_age": 3, - "pod_default_pod01_cpu_limits_used": 400, - "pod_default_pod01_cpu_requests_used": 200, - "pod_default_pod01_mem_limits_used": 419430400, - "pod_default_pod01_mem_requests_used": 209715200, - "pod_default_pod01_cond_containersready": 1, - "pod_default_pod01_cond_podinitialized": 1, - "pod_default_pod01_cond_podready": 1, - "pod_default_pod01_cond_podscheduled": 1, - "pod_default_pod01_container_container1_readiness": 1, - "pod_default_pod01_container_container1_restarts": 0, - "pod_default_pod01_container_container1_state_running": 1, - "pod_default_pod01_container_container1_state_terminated": 0, - "pod_default_pod01_container_container1_state_waiting": 0, - "pod_default_pod01_container_container2_readiness": 1, - "pod_default_pod01_container_container2_restarts": 0, - "pod_default_pod01_container_container2_state_running": 1, - "pod_default_pod01_container_container2_state_terminated": 0, - "pod_default_pod01_container_container2_state_waiting": 0, - "pod_default_pod01_containers": 2, - "pod_default_pod01_containers_state_running": 2, - "pod_default_pod01_containers_state_terminated": 0, - "pod_default_pod01_containers_state_waiting": 0, - "pod_default_pod01_init_containers": 1, - "pod_default_pod01_init_containers_state_running": 0, - "pod_default_pod01_init_containers_state_terminated": 1, - "pod_default_pod01_init_containers_state_waiting": 0, - "pod_default_pod01_phase_failed": 0, - "pod_default_pod01_phase_pending": 0, - "pod_default_pod01_phase_running": 1, - "pod_default_pod01_phase_succeeded": 0, + "discovery_node_discoverer_state": 1, + "discovery_pod_discoverer_state": 1, + "node_node01_age": 3, + "node_node01_alloc_cpu_limits_used": 400, + "node_node01_alloc_cpu_limits_util": 11428, + "node_node01_alloc_cpu_requests_used": 200, + "node_node01_alloc_cpu_requests_util": 5714, + "node_node01_alloc_mem_limits_used": 419430400, + "node_node01_alloc_mem_limits_util": 11428, + "node_node01_alloc_mem_requests_used": 209715200, + "node_node01_alloc_mem_requests_util": 5714, + "node_node01_alloc_pods_allocated": 1, + "node_node01_alloc_pods_available": 109, + "node_node01_alloc_pods_util": 909, + "node_node01_cond_DiskPressure": 0, + "node_node01_cond_MemoryPressure": 0, + "node_node01_cond_NetworkUnavailable": 0, + "node_node01_cond_PIDPressure": 0, + "node_node01_cond_Ready": 1, + "node_node01_containers": 2, + "node_node01_containers_state_running": 2, + "node_node01_containers_state_terminated": 0, + "node_node01_containers_state_waiting": 0, + "node_node01_init_containers": 1, + "node_node01_init_containers_state_running": 0, + "node_node01_init_containers_state_terminated": 1, + "node_node01_init_containers_state_waiting": 0, + "node_node01_pods_cond_containersready": 1, + "node_node01_pods_cond_podinitialized": 1, + "node_node01_pods_cond_podready": 1, + "node_node01_pods_cond_podscheduled": 1, + "node_node01_pods_phase_failed": 0, + "node_node01_pods_phase_pending": 0, + "node_node01_pods_phase_running": 1, + "node_node01_pods_phase_succeeded": 0, + "node_node01_pods_readiness": 100000, + "node_node01_pods_readiness_ready": 1, + "node_node01_pods_readiness_unready": 0, + "node_node01_schedulability_schedulable": 1, + "node_node01_schedulability_unschedulable": 0, + "pod_default_pod01_age": 3, + "pod_default_pod01_cond_containersready": 1, + "pod_default_pod01_cond_podinitialized": 1, + "pod_default_pod01_cond_podready": 1, + "pod_default_pod01_cond_podscheduled": 1, + "pod_default_pod01_container_container1_readiness": 1, + "pod_default_pod01_container_container1_restarts": 0, + "pod_default_pod01_container_container1_state_running": 1, + "pod_default_pod01_container_container1_state_terminated": 0, + "pod_default_pod01_container_container1_state_terminated_reason_Completed": 0, + "pod_default_pod01_container_container1_state_terminated_reason_ContainerCannotRun": 0, + "pod_default_pod01_container_container1_state_terminated_reason_DeadlineExceeded": 0, + "pod_default_pod01_container_container1_state_terminated_reason_Error": 0, + "pod_default_pod01_container_container1_state_terminated_reason_Evicted": 0, + "pod_default_pod01_container_container1_state_terminated_reason_OOMKilled": 0, + "pod_default_pod01_container_container1_state_terminated_reason_Other": 0, + "pod_default_pod01_container_container1_state_waiting": 0, + "pod_default_pod01_container_container1_state_waiting_reason_ContainerCreating": 0, + "pod_default_pod01_container_container1_state_waiting_reason_CrashLoopBackOff": 0, + "pod_default_pod01_container_container1_state_waiting_reason_CreateContainerConfigError": 0, + "pod_default_pod01_container_container1_state_waiting_reason_CreateContainerError": 0, + "pod_default_pod01_container_container1_state_waiting_reason_ErrImagePull": 0, + "pod_default_pod01_container_container1_state_waiting_reason_ImagePullBackOff": 0, + "pod_default_pod01_container_container1_state_waiting_reason_InvalidImageName": 0, + "pod_default_pod01_container_container1_state_waiting_reason_Other": 0, + "pod_default_pod01_container_container1_state_waiting_reason_PodInitializing": 0, + "pod_default_pod01_container_container2_readiness": 1, + "pod_default_pod01_container_container2_restarts": 0, + "pod_default_pod01_container_container2_state_running": 1, + "pod_default_pod01_container_container2_state_terminated": 0, + "pod_default_pod01_container_container2_state_terminated_reason_Completed": 0, + "pod_default_pod01_container_container2_state_terminated_reason_ContainerCannotRun": 0, + "pod_default_pod01_container_container2_state_terminated_reason_DeadlineExceeded": 0, + "pod_default_pod01_container_container2_state_terminated_reason_Error": 0, + "pod_default_pod01_container_container2_state_terminated_reason_Evicted": 0, + "pod_default_pod01_container_container2_state_terminated_reason_OOMKilled": 0, + "pod_default_pod01_container_container2_state_terminated_reason_Other": 0, + "pod_default_pod01_container_container2_state_waiting": 0, + "pod_default_pod01_container_container2_state_waiting_reason_ContainerCreating": 0, + "pod_default_pod01_container_container2_state_waiting_reason_CrashLoopBackOff": 0, + "pod_default_pod01_container_container2_state_waiting_reason_CreateContainerConfigError": 0, + "pod_default_pod01_container_container2_state_waiting_reason_CreateContainerError": 0, + "pod_default_pod01_container_container2_state_waiting_reason_ErrImagePull": 0, + "pod_default_pod01_container_container2_state_waiting_reason_ImagePullBackOff": 0, + "pod_default_pod01_container_container2_state_waiting_reason_InvalidImageName": 0, + "pod_default_pod01_container_container2_state_waiting_reason_Other": 0, + "pod_default_pod01_container_container2_state_waiting_reason_PodInitializing": 0, + "pod_default_pod01_containers": 2, + "pod_default_pod01_containers_state_running": 2, + "pod_default_pod01_containers_state_terminated": 0, + "pod_default_pod01_containers_state_waiting": 0, + "pod_default_pod01_cpu_limits_used": 400, + "pod_default_pod01_cpu_requests_used": 200, + "pod_default_pod01_init_containers": 1, + "pod_default_pod01_init_containers_state_running": 0, + "pod_default_pod01_init_containers_state_terminated": 1, + "pod_default_pod01_init_containers_state_waiting": 0, + "pod_default_pod01_mem_limits_used": 419430400, + "pod_default_pod01_mem_requests_used": 209715200, + "pod_default_pod01_phase_failed": 0, + "pod_default_pod01_phase_pending": 0, + "pod_default_pod01_phase_running": 1, + "pod_default_pod01_phase_succeeded": 0, + "pod_default_pod01_status_reason_Evicted": 0, + "pod_default_pod01_status_reason_NodeAffinity": 0, + "pod_default_pod01_status_reason_NodeLost": 0, + "pod_default_pod01_status_reason_Other": 0, + "pod_default_pod01_status_reason_Shutdown": 0, + "pod_default_pod01_status_reason_UnexpectedAdmissionError": 0, } + copyAge(expected, mx) assert.Equal(t, expected, mx) @@ -404,6 +486,7 @@ func TestKubeState_Collect(t *testing.T) { len(nodeChartsTmpl)+len(podChartsTmpl)+len(containerChartsTmpl)*len(pod.Spec.Containers)+len(baseCharts), len(*ks.Charts()), ) + module.TestMetricsHasAllChartsDims(t, ks.Charts(), mx) } return testCase{ @@ -417,7 +500,7 @@ func TestKubeState_Collect(t *testing.T) { ctx := context.Background() node := newNode("node01") pod := newPod(node.Name, "pod01") - client := fake.NewSimpleClientset( + client := fake.NewClientset( node, pod, ) @@ -443,11 +526,11 @@ func TestKubeState_Collect(t *testing.T) { "node_node01_alloc_pods_allocated": 0, "node_node01_alloc_pods_available": 110, "node_node01_alloc_pods_util": 0, - "node_node01_cond_diskpressure": 0, - "node_node01_cond_memorypressure": 0, - "node_node01_cond_networkunavailable": 0, - "node_node01_cond_pidpressure": 0, - "node_node01_cond_ready": 1, + "node_node01_cond_DiskPressure": 0, + "node_node01_cond_MemoryPressure": 0, + "node_node01_cond_NetworkUnavailable": 0, + "node_node01_cond_PIDPressure": 0, + "node_node01_cond_Ready": 1, "node_node01_schedulability_schedulable": 1, "node_node01_schedulability_unschedulable": 0, "node_node01_containers": 0, @@ -481,6 +564,7 @@ func TestKubeState_Collect(t *testing.T) { len(podChartsTmpl)+len(containerChartsTmpl)*len(pod.Spec.Containers), calcObsoleteCharts(*ks.Charts()), ) + module.TestMetricsHasAllChartsDims(t, ks.Charts(), mx) } return testCase{ @@ -495,7 +579,7 @@ func TestKubeState_Collect(t *testing.T) { node := newNode("node01") podOrig := newPod(node.Name, "pod01") podOrig.Spec.NodeName = "" - client := fake.NewSimpleClientset( + client := fake.NewClientset( node, podOrig, ) @@ -506,7 +590,7 @@ func TestKubeState_Collect(t *testing.T) { for _, c := range *ks.Charts() { if strings.HasPrefix(c.ID, "pod_") { ok := isLabelValueSet(c, labelKeyNodeName) - assert.Falsef(t, ok, "chart '%s' has not empty %s label", c.ID, labelKeyNodeName) + assert.Falsef(t, ok, "chart '%s' has no empty %s label", c.ID, labelKeyNodeName) } } } @@ -535,7 +619,7 @@ func TestKubeState_Collect(t *testing.T) { node := newNode("node01") pod1 := newPod(node.Name, "pod01") pod2 := newPod(node.Name, "pod02") - client := fake.NewSimpleClientset( + client := fake.NewClientset( node, pod1, ) @@ -547,109 +631,186 @@ func TestKubeState_Collect(t *testing.T) { step2 := func(t *testing.T, ks *KubeState) { mx := ks.Collect() expected := map[string]int64{ - "discovery_node_discoverer_state": 1, - "discovery_pod_discoverer_state": 1, - "node_node01_age": 4, - "node_node01_alloc_cpu_limits_used": 800, - "node_node01_alloc_cpu_limits_util": 22857, - "node_node01_alloc_cpu_requests_used": 400, - "node_node01_alloc_cpu_requests_util": 11428, - "node_node01_alloc_mem_limits_used": 838860800, - "node_node01_alloc_mem_limits_util": 22857, - "node_node01_alloc_mem_requests_used": 419430400, - "node_node01_alloc_mem_requests_util": 11428, - "node_node01_alloc_pods_allocated": 2, - "node_node01_alloc_pods_available": 108, - "node_node01_alloc_pods_util": 1818, - "node_node01_cond_diskpressure": 0, - "node_node01_cond_memorypressure": 0, - "node_node01_cond_networkunavailable": 0, - "node_node01_cond_pidpressure": 0, - "node_node01_cond_ready": 1, - "node_node01_schedulability_schedulable": 1, - "node_node01_schedulability_unschedulable": 0, - "node_node01_containers": 4, - "node_node01_containers_state_running": 4, - "node_node01_containers_state_terminated": 0, - "node_node01_containers_state_waiting": 0, - "node_node01_init_containers": 2, - "node_node01_init_containers_state_running": 0, - "node_node01_init_containers_state_terminated": 2, - "node_node01_init_containers_state_waiting": 0, - "node_node01_pods_cond_containersready": 2, - "node_node01_pods_cond_podinitialized": 2, - "node_node01_pods_cond_podready": 2, - "node_node01_pods_cond_podscheduled": 2, - "node_node01_pods_phase_failed": 0, - "node_node01_pods_phase_pending": 0, - "node_node01_pods_phase_running": 2, - "node_node01_pods_phase_succeeded": 0, - "node_node01_pods_readiness": 100000, - "node_node01_pods_readiness_ready": 2, - "node_node01_pods_readiness_unready": 0, - "pod_default_pod01_age": 4, - "pod_default_pod01_cpu_limits_used": 400, - "pod_default_pod01_cpu_requests_used": 200, - "pod_default_pod01_mem_limits_used": 419430400, - "pod_default_pod01_mem_requests_used": 209715200, - "pod_default_pod01_cond_containersready": 1, - "pod_default_pod01_cond_podinitialized": 1, - "pod_default_pod01_cond_podready": 1, - "pod_default_pod01_cond_podscheduled": 1, - "pod_default_pod01_container_container1_readiness": 1, - "pod_default_pod01_container_container1_restarts": 0, - "pod_default_pod01_container_container1_state_running": 1, - "pod_default_pod01_container_container1_state_terminated": 0, - "pod_default_pod01_container_container1_state_waiting": 0, - "pod_default_pod01_container_container2_readiness": 1, - "pod_default_pod01_container_container2_restarts": 0, - "pod_default_pod01_container_container2_state_running": 1, - "pod_default_pod01_container_container2_state_terminated": 0, - "pod_default_pod01_container_container2_state_waiting": 0, - "pod_default_pod01_containers": 2, - "pod_default_pod01_containers_state_running": 2, - "pod_default_pod01_containers_state_terminated": 0, - "pod_default_pod01_containers_state_waiting": 0, - "pod_default_pod01_init_containers": 1, - "pod_default_pod01_init_containers_state_running": 0, - "pod_default_pod01_init_containers_state_terminated": 1, - "pod_default_pod01_init_containers_state_waiting": 0, - "pod_default_pod01_phase_failed": 0, - "pod_default_pod01_phase_pending": 0, - "pod_default_pod01_phase_running": 1, - "pod_default_pod01_phase_succeeded": 0, - "pod_default_pod02_age": 4, - "pod_default_pod02_cpu_limits_used": 400, - "pod_default_pod02_cpu_requests_used": 200, - "pod_default_pod02_mem_limits_used": 419430400, - "pod_default_pod02_mem_requests_used": 209715200, - "pod_default_pod02_cond_containersready": 1, - "pod_default_pod02_cond_podinitialized": 1, - "pod_default_pod02_cond_podready": 1, - "pod_default_pod02_cond_podscheduled": 1, - "pod_default_pod02_container_container1_readiness": 1, - "pod_default_pod02_container_container1_restarts": 0, - "pod_default_pod02_container_container1_state_running": 1, - "pod_default_pod02_container_container1_state_terminated": 0, - "pod_default_pod02_container_container1_state_waiting": 0, - "pod_default_pod02_container_container2_readiness": 1, - "pod_default_pod02_container_container2_restarts": 0, - "pod_default_pod02_container_container2_state_running": 1, - "pod_default_pod02_container_container2_state_terminated": 0, - "pod_default_pod02_container_container2_state_waiting": 0, - "pod_default_pod02_containers": 2, - "pod_default_pod02_containers_state_running": 2, - "pod_default_pod02_containers_state_terminated": 0, - "pod_default_pod02_containers_state_waiting": 0, - "pod_default_pod02_init_containers": 1, - "pod_default_pod02_init_containers_state_running": 0, - "pod_default_pod02_init_containers_state_terminated": 1, - "pod_default_pod02_init_containers_state_waiting": 0, - "pod_default_pod02_phase_failed": 0, - "pod_default_pod02_phase_pending": 0, - "pod_default_pod02_phase_running": 1, - "pod_default_pod02_phase_succeeded": 0, + "discovery_node_discoverer_state": 1, + "discovery_pod_discoverer_state": 1, + "node_node01_age": 4, + "node_node01_alloc_cpu_limits_used": 800, + "node_node01_alloc_cpu_limits_util": 22857, + "node_node01_alloc_cpu_requests_used": 400, + "node_node01_alloc_cpu_requests_util": 11428, + "node_node01_alloc_mem_limits_used": 838860800, + "node_node01_alloc_mem_limits_util": 22857, + "node_node01_alloc_mem_requests_used": 419430400, + "node_node01_alloc_mem_requests_util": 11428, + "node_node01_alloc_pods_allocated": 2, + "node_node01_alloc_pods_available": 108, + "node_node01_alloc_pods_util": 1818, + "node_node01_cond_DiskPressure": 0, + "node_node01_cond_MemoryPressure": 0, + "node_node01_cond_NetworkUnavailable": 0, + "node_node01_cond_PIDPressure": 0, + "node_node01_cond_Ready": 1, + "node_node01_containers": 4, + "node_node01_containers_state_running": 4, + "node_node01_containers_state_terminated": 0, + "node_node01_containers_state_waiting": 0, + "node_node01_init_containers": 2, + "node_node01_init_containers_state_running": 0, + "node_node01_init_containers_state_terminated": 2, + "node_node01_init_containers_state_waiting": 0, + "node_node01_pods_cond_containersready": 2, + "node_node01_pods_cond_podinitialized": 2, + "node_node01_pods_cond_podready": 2, + "node_node01_pods_cond_podscheduled": 2, + "node_node01_pods_phase_failed": 0, + "node_node01_pods_phase_pending": 0, + "node_node01_pods_phase_running": 2, + "node_node01_pods_phase_succeeded": 0, + "node_node01_pods_readiness": 100000, + "node_node01_pods_readiness_ready": 2, + "node_node01_pods_readiness_unready": 0, + "node_node01_schedulability_schedulable": 1, + "node_node01_schedulability_unschedulable": 0, + "pod_default_pod01_age": 4, + "pod_default_pod01_cond_containersready": 1, + "pod_default_pod01_cond_podinitialized": 1, + "pod_default_pod01_cond_podready": 1, + "pod_default_pod01_cond_podscheduled": 1, + "pod_default_pod01_container_container1_readiness": 1, + "pod_default_pod01_container_container1_restarts": 0, + "pod_default_pod01_container_container1_state_running": 1, + "pod_default_pod01_container_container1_state_terminated": 0, + "pod_default_pod01_container_container1_state_terminated_reason_Completed": 0, + "pod_default_pod01_container_container1_state_terminated_reason_ContainerCannotRun": 0, + "pod_default_pod01_container_container1_state_terminated_reason_DeadlineExceeded": 0, + "pod_default_pod01_container_container1_state_terminated_reason_Error": 0, + "pod_default_pod01_container_container1_state_terminated_reason_Evicted": 0, + "pod_default_pod01_container_container1_state_terminated_reason_OOMKilled": 0, + "pod_default_pod01_container_container1_state_terminated_reason_Other": 0, + "pod_default_pod01_container_container1_state_waiting": 0, + "pod_default_pod01_container_container1_state_waiting_reason_ContainerCreating": 0, + "pod_default_pod01_container_container1_state_waiting_reason_CrashLoopBackOff": 0, + "pod_default_pod01_container_container1_state_waiting_reason_CreateContainerConfigError": 0, + "pod_default_pod01_container_container1_state_waiting_reason_CreateContainerError": 0, + "pod_default_pod01_container_container1_state_waiting_reason_ErrImagePull": 0, + "pod_default_pod01_container_container1_state_waiting_reason_ImagePullBackOff": 0, + "pod_default_pod01_container_container1_state_waiting_reason_InvalidImageName": 0, + "pod_default_pod01_container_container1_state_waiting_reason_Other": 0, + "pod_default_pod01_container_container1_state_waiting_reason_PodInitializing": 0, + "pod_default_pod01_container_container2_readiness": 1, + "pod_default_pod01_container_container2_restarts": 0, + "pod_default_pod01_container_container2_state_running": 1, + "pod_default_pod01_container_container2_state_terminated": 0, + "pod_default_pod01_container_container2_state_terminated_reason_Completed": 0, + "pod_default_pod01_container_container2_state_terminated_reason_ContainerCannotRun": 0, + "pod_default_pod01_container_container2_state_terminated_reason_DeadlineExceeded": 0, + "pod_default_pod01_container_container2_state_terminated_reason_Error": 0, + "pod_default_pod01_container_container2_state_terminated_reason_Evicted": 0, + "pod_default_pod01_container_container2_state_terminated_reason_OOMKilled": 0, + "pod_default_pod01_container_container2_state_terminated_reason_Other": 0, + "pod_default_pod01_container_container2_state_waiting": 0, + "pod_default_pod01_container_container2_state_waiting_reason_ContainerCreating": 0, + "pod_default_pod01_container_container2_state_waiting_reason_CrashLoopBackOff": 0, + "pod_default_pod01_container_container2_state_waiting_reason_CreateContainerConfigError": 0, + "pod_default_pod01_container_container2_state_waiting_reason_CreateContainerError": 0, + "pod_default_pod01_container_container2_state_waiting_reason_ErrImagePull": 0, + "pod_default_pod01_container_container2_state_waiting_reason_ImagePullBackOff": 0, + "pod_default_pod01_container_container2_state_waiting_reason_InvalidImageName": 0, + "pod_default_pod01_container_container2_state_waiting_reason_Other": 0, + "pod_default_pod01_container_container2_state_waiting_reason_PodInitializing": 0, + "pod_default_pod01_containers": 2, + "pod_default_pod01_containers_state_running": 2, + "pod_default_pod01_containers_state_terminated": 0, + "pod_default_pod01_containers_state_waiting": 0, + "pod_default_pod01_cpu_limits_used": 400, + "pod_default_pod01_cpu_requests_used": 200, + "pod_default_pod01_init_containers": 1, + "pod_default_pod01_init_containers_state_running": 0, + "pod_default_pod01_init_containers_state_terminated": 1, + "pod_default_pod01_init_containers_state_waiting": 0, + "pod_default_pod01_mem_limits_used": 419430400, + "pod_default_pod01_mem_requests_used": 209715200, + "pod_default_pod01_phase_failed": 0, + "pod_default_pod01_phase_pending": 0, + "pod_default_pod01_phase_running": 1, + "pod_default_pod01_phase_succeeded": 0, + "pod_default_pod01_status_reason_Evicted": 0, + "pod_default_pod01_status_reason_NodeAffinity": 0, + "pod_default_pod01_status_reason_NodeLost": 0, + "pod_default_pod01_status_reason_Other": 0, + "pod_default_pod01_status_reason_Shutdown": 0, + "pod_default_pod01_status_reason_UnexpectedAdmissionError": 0, + "pod_default_pod02_age": 4, + "pod_default_pod02_cond_containersready": 1, + "pod_default_pod02_cond_podinitialized": 1, + "pod_default_pod02_cond_podready": 1, + "pod_default_pod02_cond_podscheduled": 1, + "pod_default_pod02_container_container1_readiness": 1, + "pod_default_pod02_container_container1_restarts": 0, + "pod_default_pod02_container_container1_state_running": 1, + "pod_default_pod02_container_container1_state_terminated": 0, + "pod_default_pod02_container_container1_state_terminated_reason_Completed": 0, + "pod_default_pod02_container_container1_state_terminated_reason_ContainerCannotRun": 0, + "pod_default_pod02_container_container1_state_terminated_reason_DeadlineExceeded": 0, + "pod_default_pod02_container_container1_state_terminated_reason_Error": 0, + "pod_default_pod02_container_container1_state_terminated_reason_Evicted": 0, + "pod_default_pod02_container_container1_state_terminated_reason_OOMKilled": 0, + "pod_default_pod02_container_container1_state_terminated_reason_Other": 0, + "pod_default_pod02_container_container1_state_waiting": 0, + "pod_default_pod02_container_container1_state_waiting_reason_ContainerCreating": 0, + "pod_default_pod02_container_container1_state_waiting_reason_CrashLoopBackOff": 0, + "pod_default_pod02_container_container1_state_waiting_reason_CreateContainerConfigError": 0, + "pod_default_pod02_container_container1_state_waiting_reason_CreateContainerError": 0, + "pod_default_pod02_container_container1_state_waiting_reason_ErrImagePull": 0, + "pod_default_pod02_container_container1_state_waiting_reason_ImagePullBackOff": 0, + "pod_default_pod02_container_container1_state_waiting_reason_InvalidImageName": 0, + "pod_default_pod02_container_container1_state_waiting_reason_Other": 0, + "pod_default_pod02_container_container1_state_waiting_reason_PodInitializing": 0, + "pod_default_pod02_container_container2_readiness": 1, + "pod_default_pod02_container_container2_restarts": 0, + "pod_default_pod02_container_container2_state_running": 1, + "pod_default_pod02_container_container2_state_terminated": 0, + "pod_default_pod02_container_container2_state_terminated_reason_Completed": 0, + "pod_default_pod02_container_container2_state_terminated_reason_ContainerCannotRun": 0, + "pod_default_pod02_container_container2_state_terminated_reason_DeadlineExceeded": 0, + "pod_default_pod02_container_container2_state_terminated_reason_Error": 0, + "pod_default_pod02_container_container2_state_terminated_reason_Evicted": 0, + "pod_default_pod02_container_container2_state_terminated_reason_OOMKilled": 0, + "pod_default_pod02_container_container2_state_terminated_reason_Other": 0, + "pod_default_pod02_container_container2_state_waiting": 0, + "pod_default_pod02_container_container2_state_waiting_reason_ContainerCreating": 0, + "pod_default_pod02_container_container2_state_waiting_reason_CrashLoopBackOff": 0, + "pod_default_pod02_container_container2_state_waiting_reason_CreateContainerConfigError": 0, + "pod_default_pod02_container_container2_state_waiting_reason_CreateContainerError": 0, + "pod_default_pod02_container_container2_state_waiting_reason_ErrImagePull": 0, + "pod_default_pod02_container_container2_state_waiting_reason_ImagePullBackOff": 0, + "pod_default_pod02_container_container2_state_waiting_reason_InvalidImageName": 0, + "pod_default_pod02_container_container2_state_waiting_reason_Other": 0, + "pod_default_pod02_container_container2_state_waiting_reason_PodInitializing": 0, + "pod_default_pod02_containers": 2, + "pod_default_pod02_containers_state_running": 2, + "pod_default_pod02_containers_state_terminated": 0, + "pod_default_pod02_containers_state_waiting": 0, + "pod_default_pod02_cpu_limits_used": 400, + "pod_default_pod02_cpu_requests_used": 200, + "pod_default_pod02_init_containers": 1, + "pod_default_pod02_init_containers_state_running": 0, + "pod_default_pod02_init_containers_state_terminated": 1, + "pod_default_pod02_init_containers_state_waiting": 0, + "pod_default_pod02_mem_limits_used": 419430400, + "pod_default_pod02_mem_requests_used": 209715200, + "pod_default_pod02_phase_failed": 0, + "pod_default_pod02_phase_pending": 0, + "pod_default_pod02_phase_running": 1, + "pod_default_pod02_phase_succeeded": 0, + "pod_default_pod02_status_reason_Evicted": 0, + "pod_default_pod02_status_reason_NodeAffinity": 0, + "pod_default_pod02_status_reason_NodeLost": 0, + "pod_default_pod02_status_reason_Other": 0, + "pod_default_pod02_status_reason_Shutdown": 0, + "pod_default_pod02_status_reason_UnexpectedAdmissionError": 0, } + copyAge(expected, mx) assert.Equal(t, expected, mx) @@ -661,6 +822,7 @@ func TestKubeState_Collect(t *testing.T) { len(baseCharts), len(*ks.Charts()), ) + module.TestMetricsHasAllChartsDims(t, ks.Charts(), mx) } return testCase{ diff --git a/src/go/plugin/go.d/modules/k8s_state/metadata.yaml b/src/go/plugin/go.d/modules/k8s_state/metadata.yaml index 7617b297f..aa247a8f9 100644 --- a/src/go/plugin/go.d/modules/k8s_state/metadata.yaml +++ b/src/go/plugin/go.d/modules/k8s_state/metadata.yaml @@ -141,7 +141,11 @@ modules: unit: status chart_type: line dimensions: - - name: a dimension per condition + - name: Ready + - name: DiskPressure + - name: MemoryPressure + - name: NetworkUnavailable + - name: PIDPressure - name: k8s_state.node_schedulability description: Schedulability unit: state @@ -271,6 +275,17 @@ modules: - name: failed - name: succeeded - name: pending + - name: k8s_state.pod_status_reason + description: Status reason + unit: status + chart_type: line + dimensions: + - name: Evicted + - name: NodeAffinity + - name: NodeLost + - name: Shutdown + - name: UnexpectedAdmissionError + - name: Other - name: k8s_state.pod_age description: Age unit: seconds @@ -347,10 +362,24 @@ modules: unit: state chart_type: line dimensions: - - name: a dimension per reason + - name: ContainerCreating + - name: CrashLoopBackOff + - name: CreateContainerConfigError + - name: CreateContainerError + - name: ErrImagePull + - name: ImagePullBackOff + - name: InvalidImageName + - name: PodInitializing + - name: Other - name: k8s_state.pod_container_terminated_state_reason description: Container terminated state reason unit: state chart_type: line dimensions: - - name: a dimension per reason + - name: Completed + - name: ContainerCannotRun + - name: DeadlineExceeded + - name: Error + - name: Evicted + - name: OOMKilled + - name: Other diff --git a/src/go/plugin/go.d/modules/k8s_state/resource.go b/src/go/plugin/go.d/modules/k8s_state/resource.go index cabd41a67..4cd42b55a 100644 --- a/src/go/plugin/go.d/modules/k8s_state/resource.go +++ b/src/go/plugin/go.d/modules/k8s_state/resource.go @@ -11,7 +11,7 @@ import ( type resource interface { source() string kind() kubeResourceKind - value() interface{} + value() any } type kubeResourceKind uint8 @@ -21,7 +21,7 @@ const ( kubeResourcePod ) -func toNode(i interface{}) (*corev1.Node, error) { +func toNode(i any) (*corev1.Node, error) { switch v := i.(type) { case *corev1.Node: return v, nil @@ -32,7 +32,7 @@ func toNode(i interface{}) (*corev1.Node, error) { } } -func toPod(i interface{}) (*corev1.Pod, error) { +func toPod(i any) (*corev1.Pod, error) { switch v := i.(type) { case *corev1.Pod: return v, nil diff --git a/src/go/plugin/go.d/modules/k8s_state/state.go b/src/go/plugin/go.d/modules/k8s_state/state.go index 72bac88ee..61566a5f1 100644 --- a/src/go/plugin/go.d/modules/k8s_state/state.go +++ b/src/go/plugin/go.d/modules/k8s_state/state.go @@ -19,9 +19,8 @@ func newKubeState() *kubeState { func newNodeState() *nodeState { return &nodeState{ - new: true, - labels: make(map[string]string), - conditions: make(map[string]*nodeStateCondition), + new: true, + labels: make(map[string]string), } } @@ -36,9 +35,7 @@ func newPodState() *podState { func newContainerState() *containerState { return &containerState{ - new: true, - stateWaitingReasons: make(map[string]*containerStateReason), - stateTerminatedReasons: make(map[string]*containerStateReason), + new: true, } } @@ -60,16 +57,10 @@ type ( allocatableCPU int64 allocatableMem int64 allocatablePods int64 - conditions map[string]*nodeStateCondition + conditions []corev1.NodeCondition stats nodeStateStats } - nodeStateCondition struct { - new bool - // https://kubernetes.io/docs/concepts/architecture/nodes/#condition - //typ corev1.NodeConditionType - status corev1.ConditionStatus - } nodeStateStats struct { reqCPU int64 limitCPU int64 @@ -129,7 +120,8 @@ type ( condPodInitialized corev1.ConditionStatus condPodReady corev1.ConditionStatus // https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase - phase corev1.PodPhase + phase corev1.PodPhase + statusReason string initContainers map[string]*containerState containers map[string]*containerState @@ -138,28 +130,22 @@ type ( func (ps podState) id() string { return ps.namespace + "_" + ps.name } -type ( - containerState struct { - new bool - - name string - uid string - - podName string - nodeName string - namespace string - - ready bool - restarts int64 - stateRunning bool - stateWaiting bool - stateTerminated bool - stateWaitingReasons map[string]*containerStateReason - stateTerminatedReasons map[string]*containerStateReason - } - containerStateReason struct { - new bool - reason string - active bool - } -) +type containerState struct { + new bool + + name string + uid string + + podName string + nodeName string + namespace string + + ready bool + restarts int64 + stateRunning bool + stateWaiting bool + stateTerminated bool + + waitingReason string + terminatedReason string +} diff --git a/src/go/plugin/go.d/modules/k8s_state/update_node_state.go b/src/go/plugin/go.d/modules/k8s_state/update_node_state.go index 80f5c26c8..57a43ab66 100644 --- a/src/go/plugin/go.d/modules/k8s_state/update_node_state.go +++ b/src/go/plugin/go.d/modules/k8s_state/update_node_state.go @@ -36,12 +36,5 @@ func (ks *KubeState) updateNodeState(r resource) { } ns.unSchedulable = node.Spec.Unschedulable - - for _, c := range node.Status.Conditions { - if v, ok := ns.conditions[string(c.Type)]; !ok { - ns.conditions[string(c.Type)] = &nodeStateCondition{new: true, status: c.Status} - } else { - v.status = c.Status - } - } + ns.conditions = node.Status.Conditions } diff --git a/src/go/plugin/go.d/modules/k8s_state/update_pod_state.go b/src/go/plugin/go.d/modules/k8s_state/update_pod_state.go index 16b0f433b..eafe0aaec 100644 --- a/src/go/plugin/go.d/modules/k8s_state/update_pod_state.go +++ b/src/go/plugin/go.d/modules/k8s_state/update_pod_state.go @@ -78,15 +78,7 @@ func (ks *KubeState) updatePodState(r resource) { } ps.phase = pod.Status.Phase - - for _, cs := range ps.containers { - for _, r := range cs.stateWaitingReasons { - r.active = false - } - for _, r := range cs.stateTerminatedReasons { - r.active = false - } - } + ps.statusReason = pod.Status.Reason for _, cntr := range pod.Status.ContainerStatuses { cs, ok := ps.containers[cntr.Name] @@ -108,23 +100,10 @@ func (ks *KubeState) updatePodState(r resource) { cs.stateTerminated = cntr.State.Terminated != nil if cntr.State.Waiting != nil { - reason := cntr.State.Waiting.Reason - r, ok := cs.stateWaitingReasons[reason] - if !ok { - r = &containerStateReason{new: true, reason: reason} - cs.stateWaitingReasons[reason] = r - } - r.active = true + cs.waitingReason = cntr.State.Waiting.Reason } - if cntr.State.Terminated != nil { - reason := cntr.State.Terminated.Reason - r, ok := cs.stateTerminatedReasons[reason] - if !ok { - r = &containerStateReason{new: true, reason: reason} - cs.stateTerminatedReasons[reason] = r - } - r.active = true + cs.terminatedReason = cntr.State.Terminated.Reason } } diff --git a/src/go/plugin/go.d/modules/lighttpd/apiclient.go b/src/go/plugin/go.d/modules/lighttpd/apiclient.go deleted file mode 100644 index 1686272cd..000000000 --- a/src/go/plugin/go.d/modules/lighttpd/apiclient.go +++ /dev/null @@ -1,170 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package lighttpd - -import ( - "bufio" - "fmt" - "io" - "net/http" - "strconv" - "strings" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" -) - -const ( - busyWorkers = "BusyWorkers" - idleWorkers = "IdleWorkers" - - busyServers = "BusyServers" - idleServers = "IdleServers" - totalAccesses = "Total Accesses" - totalkBytes = "Total kBytes" - uptime = "Uptime" - scoreBoard = "Scoreboard" -) - -func newAPIClient(client *http.Client, request web.Request) *apiClient { - return &apiClient{httpClient: client, request: request} -} - -type apiClient struct { - httpClient *http.Client - request web.Request -} - -func (a apiClient) getServerStatus() (*serverStatus, error) { - req, err := web.NewHTTPRequest(a.request) - - if err != nil { - return nil, fmt.Errorf("error on creating request : %v", err) - } - - resp, err := a.doRequestOK(req) - - defer closeBody(resp) - - if err != nil { - return nil, err - } - - status, err := parseResponse(resp.Body) - - if err != nil { - return nil, fmt.Errorf("error on parsing response from %s : %v", req.URL, err) - } - - return status, nil -} - -func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { - resp, err := a.httpClient.Do(req) - if err != nil { - return nil, fmt.Errorf("error on request : %v", err) - } - if resp.StatusCode != http.StatusOK { - return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) - } - return resp, nil -} - -func parseResponse(r io.Reader) (*serverStatus, error) { - s := bufio.NewScanner(r) - var status serverStatus - - for s.Scan() { - parts := strings.Split(s.Text(), ":") - if len(parts) != 2 { - continue - } - key, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) - - switch key { - default: - case busyWorkers, idleWorkers: - return nil, fmt.Errorf("found '%s', apache data", key) - case busyServers: - status.Servers.Busy = mustParseInt(value) - case idleServers: - status.Servers.Idle = mustParseInt(value) - case totalAccesses: - status.Total.Accesses = mustParseInt(value) - case totalkBytes: - status.Total.KBytes = mustParseInt(value) - case uptime: - status.Uptime = mustParseInt(value) - case scoreBoard: - status.Scoreboard = parseScoreboard(value) - } - } - - return &status, nil -} - -func parseScoreboard(value string) *scoreboard { - // Descriptions from https://blog.serverdensity.com/monitor-lighttpd/ - // - // “.” = Opening the TCP connection (connect) - // “C” = Closing the TCP connection if no other HTTP request will use it (close) - // “E” = hard error - // “k” = Keeping the TCP connection open for more HTTP requests from the same client to avoid the TCP handling overhead (keep-alive) - // “r” = ReadAsMap the content of the HTTP request (read) - // “R” = ReadAsMap the content of the HTTP request (read-POST) - // “W” = Write the HTTP response to the socket (write) - // “h” = Decide action to take with the request (handle-request) - // “q” = Start of HTTP request (request-start) - // “Q” = End of HTTP request (request-end) - // “s” = Start of the HTTP request response (response-start) - // “S” = End of the HTTP request response (response-end) - // “_” Waiting for Connection (NOTE: not sure, copied the description from apache score board) - - var sb scoreboard - for _, s := range strings.Split(value, "") { - switch s { - case "_": - sb.Waiting++ - case ".": - sb.Open++ - case "C": - sb.Close++ - case "E": - sb.HardError++ - case "k": - sb.KeepAlive++ - case "r": - sb.Read++ - case "R": - sb.ReadPost++ - case "W": - sb.Write++ - case "h": - sb.HandleRequest++ - case "q": - sb.RequestStart++ - case "Q": - sb.RequestEnd++ - case "s": - sb.ResponseStart++ - case "S": - sb.ResponseEnd++ - } - } - - return &sb -} - -func mustParseInt(value string) *int64 { - v, err := strconv.ParseInt(value, 10, 64) - if err != nil { - panic(err) - } - return &v -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/lighttpd/collect.go b/src/go/plugin/go.d/modules/lighttpd/collect.go index 84c88af45..30c3273f0 100644 --- a/src/go/plugin/go.d/modules/lighttpd/collect.go +++ b/src/go/plugin/go.d/modules/lighttpd/collect.go @@ -4,22 +4,29 @@ package lighttpd import ( "fmt" + "io" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) func (l *Lighttpd) collect() (map[string]int64, error) { - status, err := l.apiClient.getServerStatus() - + req, err := web.NewHTTPRequest(l.RequestConfig) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create HTTP request: %v", err) } - mx := stm.ToMap(status) + var status *serverStatus + var perr error - if len(mx) == 0 { - return nil, fmt.Errorf("nothing was collected from %s", l.URL) + if err := web.DoHTTP(l.httpClient).Request(req, func(body io.Reader) error { + if status, perr = parseResponse(body); perr != nil { + return perr + } + return nil + }); err != nil { + return nil, err } - return mx, nil + return stm.ToMap(status), nil } diff --git a/src/go/plugin/go.d/modules/lighttpd/config_schema.json b/src/go/plugin/go.d/modules/lighttpd/config_schema.json index 32700b3b2..1b8c92fb8 100644 --- a/src/go/plugin/go.d/modules/lighttpd/config_schema.json +++ b/src/go/plugin/go.d/modules/lighttpd/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/lighttpd/init.go b/src/go/plugin/go.d/modules/lighttpd/init.go deleted file mode 100644 index 0923262c3..000000000 --- a/src/go/plugin/go.d/modules/lighttpd/init.go +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package lighttpd - -import ( - "errors" - "fmt" - "strings" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" -) - -func (l *Lighttpd) validateConfig() error { - if l.URL == "" { - return errors.New("url not set") - } - if !strings.HasSuffix(l.URL, "?auto") { - return fmt.Errorf("bad URL '%s', should ends in '?auto'", l.URL) - } - return nil -} - -func (l *Lighttpd) initApiClient() (*apiClient, error) { - client, err := web.NewHTTPClient(l.Client) - if err != nil { - return nil, err - } - return newAPIClient(client, l.Request), nil -} diff --git a/src/go/plugin/go.d/modules/lighttpd/integrations/lighttpd.md b/src/go/plugin/go.d/modules/lighttpd/integrations/lighttpd.md index bcf434fc5..9a49615be 100644 --- a/src/go/plugin/go.d/modules/lighttpd/integrations/lighttpd.md +++ b/src/go/plugin/go.d/modules/lighttpd/integrations/lighttpd.md @@ -101,8 +101,8 @@ To enable status support, see the [official documentation](https://redmine.light The configuration file name for this integration is `go.d/lighttpd.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/lighttpd/lighttpd.go b/src/go/plugin/go.d/modules/lighttpd/lighttpd.go index 1b17833e9..ce0091915 100644 --- a/src/go/plugin/go.d/modules/lighttpd/lighttpd.go +++ b/src/go/plugin/go.d/modules/lighttpd/lighttpd.go @@ -5,9 +5,13 @@ package lighttpd import ( _ "embed" "errors" + "fmt" + "net/http" + "strings" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -24,27 +28,31 @@ func init() { func New() *Lighttpd { return &Lighttpd{Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1/server-status?auto", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 2), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 2), }, }, - }} + }, + charts: charts.Copy(), + } } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Lighttpd struct { module.Base Config `yaml:",inline" json:""` - apiClient *apiClient + charts *module.Charts + + httpClient *http.Client } func (l *Lighttpd) Configuration() any { @@ -52,17 +60,18 @@ func (l *Lighttpd) Configuration() any { } func (l *Lighttpd) Init() error { - if err := l.validateConfig(); err != nil { - l.Errorf("config validation: %v", err) - return err + if l.URL == "" { + return errors.New("URL is required but not set") + } + if !strings.HasSuffix(l.URL, "?auto") { + return fmt.Errorf("bad URL '%s', should ends in '?auto'", l.URL) } - client, err := l.initApiClient() + httpClient, err := web.NewHTTPClient(l.ClientConfig) if err != nil { - l.Error(err) - return err + return fmt.Errorf("failed to create http client: %v", err) } - l.apiClient = client + l.httpClient = httpClient l.Debugf("using URL %s", l.URL) l.Debugf("using timeout: %s", l.Timeout.Duration()) @@ -73,22 +82,22 @@ func (l *Lighttpd) Init() error { func (l *Lighttpd) Check() error { mx, err := l.collect() if err != nil { - l.Error(err) return err } + if len(mx) == 0 { return errors.New("no metrics collected") } + return nil } func (l *Lighttpd) Charts() *Charts { - return charts.Copy() + return l.charts } func (l *Lighttpd) Collect() map[string]int64 { mx, err := l.collect() - if err != nil { l.Error(err) return nil @@ -98,7 +107,7 @@ func (l *Lighttpd) Collect() map[string]int64 { } func (l *Lighttpd) Cleanup() { - if l.apiClient != nil && l.apiClient.httpClient != nil { - l.apiClient.httpClient.CloseIdleConnections() + if l.httpClient != nil { + l.httpClient.CloseIdleConnections() } } diff --git a/src/go/plugin/go.d/modules/lighttpd/lighttpd_test.go b/src/go/plugin/go.d/modules/lighttpd/lighttpd_test.go index 05c7504ee..9df5a90a3 100644 --- a/src/go/plugin/go.d/modules/lighttpd/lighttpd_test.go +++ b/src/go/plugin/go.d/modules/lighttpd/lighttpd_test.go @@ -41,7 +41,6 @@ func TestLighttpd_Init(t *testing.T) { job := New() require.NoError(t, job.Init()) - assert.NotNil(t, job.apiClient) } func TestLighttpd_InitNG(t *testing.T) { diff --git a/src/go/plugin/go.d/modules/lighttpd/metrics.go b/src/go/plugin/go.d/modules/lighttpd/metrics.go deleted file mode 100644 index 6c39d2d06..000000000 --- a/src/go/plugin/go.d/modules/lighttpd/metrics.go +++ /dev/null @@ -1,33 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package lighttpd - -type ( - serverStatus struct { - Total struct { - Accesses *int64 `stm:"accesses"` - KBytes *int64 `stm:"kBytes"` - } `stm:"total"` - Servers struct { - Busy *int64 `stm:"busy_servers"` - Idle *int64 `stm:"idle_servers"` - } `stm:""` - Uptime *int64 `stm:"uptime"` - Scoreboard *scoreboard `stm:"scoreboard"` - } - scoreboard struct { - Waiting int64 `stm:"waiting"` - Open int64 `stm:"open"` - Close int64 `stm:"close"` - HardError int64 `stm:"hard_error"` - KeepAlive int64 `stm:"keepalive"` - Read int64 `stm:"read"` - ReadPost int64 `stm:"read_post"` - Write int64 `stm:"write"` - HandleRequest int64 `stm:"handle_request"` - RequestStart int64 `stm:"request_start"` - RequestEnd int64 `stm:"request_end"` - ResponseStart int64 `stm:"response_start"` - ResponseEnd int64 `stm:"response_end"` - } -) diff --git a/src/go/plugin/go.d/modules/lighttpd/status.go b/src/go/plugin/go.d/modules/lighttpd/status.go new file mode 100644 index 000000000..a81eb98ed --- /dev/null +++ b/src/go/plugin/go.d/modules/lighttpd/status.go @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package lighttpd + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +const ( + busyWorkers = "BusyWorkers" + idleWorkers = "IdleWorkers" + + busyServers = "BusyServers" + idleServers = "IdleServers" + totalAccesses = "Total Accesses" + totalkBytes = "Total kBytes" + uptime = "Uptime" + scoreBoard = "Scoreboard" +) + +type ( + serverStatus struct { + Total struct { + Accesses *int64 `stm:"accesses"` + KBytes *int64 `stm:"kBytes"` + } `stm:"total"` + Servers struct { + Busy *int64 `stm:"busy_servers"` + Idle *int64 `stm:"idle_servers"` + } `stm:""` + Uptime *int64 `stm:"uptime"` + Scoreboard *scoreboard `stm:"scoreboard"` + } + scoreboard struct { + Waiting int64 `stm:"waiting"` + Open int64 `stm:"open"` + Close int64 `stm:"close"` + HardError int64 `stm:"hard_error"` + KeepAlive int64 `stm:"keepalive"` + Read int64 `stm:"read"` + ReadPost int64 `stm:"read_post"` + Write int64 `stm:"write"` + HandleRequest int64 `stm:"handle_request"` + RequestStart int64 `stm:"request_start"` + RequestEnd int64 `stm:"request_end"` + ResponseStart int64 `stm:"response_start"` + ResponseEnd int64 `stm:"response_end"` + } +) + +func parseResponse(r io.Reader) (*serverStatus, error) { + s := bufio.NewScanner(r) + var status serverStatus + + for s.Scan() { + parts := strings.Split(s.Text(), ":") + if len(parts) != 2 { + continue + } + key, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + + switch key { + default: + case busyWorkers, idleWorkers: + return nil, fmt.Errorf("found '%s', apache data", key) + case busyServers: + status.Servers.Busy = mustParseInt(value) + case idleServers: + status.Servers.Idle = mustParseInt(value) + case totalAccesses: + status.Total.Accesses = mustParseInt(value) + case totalkBytes: + status.Total.KBytes = mustParseInt(value) + case uptime: + status.Uptime = mustParseInt(value) + case scoreBoard: + status.Scoreboard = parseScoreboard(value) + } + } + + return &status, nil +} + +func parseScoreboard(value string) *scoreboard { + // Descriptions from https://blog.serverdensity.com/monitor-lighttpd/ + // + // “.” = Opening the TCP connection (connect) + // “C” = Closing the TCP connection if no other HTTP request will use it (close) + // “E” = hard error + // “k” = Keeping the TCP connection open for more HTTP requests from the same client to avoid the TCP handling overhead (keep-alive) + // “r” = ReadAsMap the content of the HTTP request (read) + // “R” = ReadAsMap the content of the HTTP request (read-POST) + // “W” = Write the HTTP response to the socket (write) + // “h” = Decide action to take with the request (handle-request) + // “q” = Start of HTTP request (request-start) + // “Q” = End of HTTP request (request-end) + // “s” = Start of the HTTP request response (response-start) + // “S” = End of the HTTP request response (response-end) + // “_” Waiting for Connection (NOTE: not sure, copied the description from apache score board) + + var sb scoreboard + for _, s := range strings.Split(value, "") { + switch s { + case "_": + sb.Waiting++ + case ".": + sb.Open++ + case "C": + sb.Close++ + case "E": + sb.HardError++ + case "k": + sb.KeepAlive++ + case "r": + sb.Read++ + case "R": + sb.ReadPost++ + case "W": + sb.Write++ + case "h": + sb.HandleRequest++ + case "q": + sb.RequestStart++ + case "Q": + sb.RequestEnd++ + case "s": + sb.ResponseStart++ + case "S": + sb.ResponseEnd++ + } + } + + return &sb +} + +func mustParseInt(value string) *int64 { + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + panic(err) + } + return &v +} diff --git a/src/go/plugin/go.d/modules/litespeed/charts.go b/src/go/plugin/go.d/modules/litespeed/charts.go index b7309f287..febfd1e74 100644 --- a/src/go/plugin/go.d/modules/litespeed/charts.go +++ b/src/go/plugin/go.d/modules/litespeed/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package litespeed import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" diff --git a/src/go/plugin/go.d/modules/litespeed/collect.go b/src/go/plugin/go.d/modules/litespeed/collect.go index a68cf119c..715140ac1 100644 --- a/src/go/plugin/go.d/modules/litespeed/collect.go +++ b/src/go/plugin/go.d/modules/litespeed/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package litespeed import ( diff --git a/src/go/plugin/go.d/modules/litespeed/config_schema.json b/src/go/plugin/go.d/modules/litespeed/config_schema.json index 2ec13468f..24aa1fa95 100644 --- a/src/go/plugin/go.d/modules/litespeed/config_schema.json +++ b/src/go/plugin/go.d/modules/litespeed/config_schema.json @@ -21,7 +21,6 @@ "required": [ "reports_dir" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/litespeed/doc.go b/src/go/plugin/go.d/modules/litespeed/doc.go new file mode 100644 index 000000000..c6987cdb2 --- /dev/null +++ b/src/go/plugin/go.d/modules/litespeed/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package litespeed diff --git a/src/go/plugin/go.d/modules/litespeed/integrations/litespeed.md b/src/go/plugin/go.d/modules/litespeed/integrations/litespeed.md index 96858fdab..9d4cf2444 100644 --- a/src/go/plugin/go.d/modules/litespeed/integrations/litespeed.md +++ b/src/go/plugin/go.d/modules/litespeed/integrations/litespeed.md @@ -25,7 +25,10 @@ Examine Litespeed metrics for insights into web server operations. Analyze reque The collector uses the statistics under /tmp/lshttpd to gather the metrics. -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux +- BSD This collector only supports collecting metrics from a single instance of this integration. @@ -93,8 +96,8 @@ No action required. The configuration file name for this integration is `go.d/litespeed.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/litespeed/litespeed.go b/src/go/plugin/go.d/modules/litespeed/litespeed.go index f57c0eed5..5dbef374b 100644 --- a/src/go/plugin/go.d/modules/litespeed/litespeed.go +++ b/src/go/plugin/go.d/modules/litespeed/litespeed.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package litespeed import ( @@ -62,7 +64,6 @@ func (l *Litespeed) Init() error { func (l *Litespeed) Check() error { mx, err := l.collect() if err != nil { - l.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/litespeed/litespeed_test.go b/src/go/plugin/go.d/modules/litespeed/litespeed_test.go index 576609dca..10a6b1be7 100644 --- a/src/go/plugin/go.d/modules/litespeed/litespeed_test.go +++ b/src/go/plugin/go.d/modules/litespeed/litespeed_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package litespeed import ( @@ -128,29 +130,14 @@ func TestLitespeed_Collect(t *testing.T) { mx := lite.Collect() assert.Equal(t, test.wantMetrics, mx) + if len(test.wantMetrics) > 0 { - testMetricsHasAllChartsDims(t, lite, mx) + module.TestMetricsHasAllChartsDims(t, lite.Charts(), mx) } }) } } -func testMetricsHasAllChartsDims(t *testing.T, lite *Litespeed, mx map[string]int64) { - for _, chart := range *lite.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareLitespeedOk() *Litespeed { lite := New() lite.ReportsDir = "testdata" diff --git a/src/go/plugin/go.d/modules/litespeed/metadata.yaml b/src/go/plugin/go.d/modules/litespeed/metadata.yaml index 1c7957532..46525e8eb 100644 --- a/src/go/plugin/go.d/modules/litespeed/metadata.yaml +++ b/src/go/plugin/go.d/modules/litespeed/metadata.yaml @@ -25,7 +25,7 @@ modules: metrics_description: "Examine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery." method_description: "The collector uses the statistics under /tmp/lshttpd to gather the metrics." supported_platforms: - include: [] + include: [Linux, BSD] exclude: [] multi_instance: false additional_permissions: diff --git a/src/go/plugin/go.d/modules/logind/charts.go b/src/go/plugin/go.d/modules/logind/charts.go index 61fa0490c..ccc686857 100644 --- a/src/go/plugin/go.d/modules/logind/charts.go +++ b/src/go/plugin/go.d/modules/logind/charts.go @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package logind diff --git a/src/go/plugin/go.d/modules/logind/collect.go b/src/go/plugin/go.d/modules/logind/collect.go index 1f22478b1..dab2ce59e 100644 --- a/src/go/plugin/go.d/modules/logind/collect.go +++ b/src/go/plugin/go.d/modules/logind/collect.go @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package logind diff --git a/src/go/plugin/go.d/modules/logind/config_schema.json b/src/go/plugin/go.d/modules/logind/config_schema.json index 0a8618538..0ec050df5 100644 --- a/src/go/plugin/go.d/modules/logind/config_schema.json +++ b/src/go/plugin/go.d/modules/logind/config_schema.json @@ -19,7 +19,6 @@ "default": 1 } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/logind/connection.go b/src/go/plugin/go.d/modules/logind/connection.go index b97387acf..5afa0431c 100644 --- a/src/go/plugin/go.d/modules/logind/connection.go +++ b/src/go/plugin/go.d/modules/logind/connection.go @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package logind diff --git a/src/go/plugin/go.d/modules/logind/integrations/systemd-logind_users.md b/src/go/plugin/go.d/modules/logind/integrations/systemd-logind_users.md index 3450ff669..b34417e34 100644 --- a/src/go/plugin/go.d/modules/logind/integrations/systemd-logind_users.md +++ b/src/go/plugin/go.d/modules/logind/integrations/systemd-logind_users.md @@ -26,7 +26,9 @@ This collector monitors number of sessions and users as reported by the `org.fre -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux This collector supports collecting metrics from multiple instances of this integration, including remote instances. @@ -89,8 +91,8 @@ No action required. The configuration file name for this integration is `go.d/logind.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/logind/logind.go b/src/go/plugin/go.d/modules/logind/logind.go index ff2866349..7e5a5bb0f 100644 --- a/src/go/plugin/go.d/modules/logind/logind.go +++ b/src/go/plugin/go.d/modules/logind/logind.go @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package logind @@ -11,7 +10,7 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -31,7 +30,7 @@ func init() { func New() *Logind { return &Logind{ Config: Config{ - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, newLogindConn: func(cfg Config) (logindConnection, error) { return newLogindConnection(cfg.Timeout.Duration()) @@ -41,8 +40,8 @@ func New() *Logind { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type Logind struct { @@ -66,7 +65,6 @@ func (l *Logind) Init() error { func (l *Logind) Check() error { mx, err := l.collect() if err != nil { - l.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/logind/logind_test.go b/src/go/plugin/go.d/modules/logind/logind_test.go index 21cbba871..65755408b 100644 --- a/src/go/plugin/go.d/modules/logind/logind_test.go +++ b/src/go/plugin/go.d/modules/logind/logind_test.go @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package logind diff --git a/src/go/plugin/go.d/modules/logind/metadata.yaml b/src/go/plugin/go.d/modules/logind/metadata.yaml index 792a515fe..503db34fe 100644 --- a/src/go/plugin/go.d/modules/logind/metadata.yaml +++ b/src/go/plugin/go.d/modules/logind/metadata.yaml @@ -25,7 +25,7 @@ modules: This collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API. method_description: "" supported_platforms: - include: [] + include: [Linux] exclude: [] multi_instance: true additional_permissions: diff --git a/src/go/plugin/go.d/modules/logstash/collect.go b/src/go/plugin/go.d/modules/logstash/collect.go index ff506d640..ad7d41dd8 100644 --- a/src/go/plugin/go.d/modules/logstash/collect.go +++ b/src/go/plugin/go.d/modules/logstash/collect.go @@ -3,10 +3,7 @@ package logstash import ( - "encoding/json" "fmt" - "io" - "net/http" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" @@ -45,47 +42,16 @@ func (l *Logstash) updateCharts(pipelines map[string]pipelineStats) { } func (l *Logstash) queryNodeStats() (*nodeStats, error) { - req, err := web.NewHTTPRequestWithPath(l.Request, urlPathNodeStatsAPI) + req, err := web.NewHTTPRequestWithPath(l.RequestConfig, urlPathNodeStatsAPI) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create HTTP request: %w", err) } var stats nodeStats - if err := l.doWithDecode(&stats, req); err != nil { + if err := web.DoHTTP(l.httpClient).RequestJSON(req, &stats); err != nil { return nil, err } return &stats, nil } - -func (l *Logstash) doWithDecode(dst interface{}, req *http.Request) error { - l.Debugf("executing %s '%s'", req.Method, req.URL) - resp, err := l.httpClient.Do(req) - if err != nil { - return err - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned %d status code (%s)", req.URL, resp.StatusCode, resp.Status) - } - - content, err := io.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("error on reading response from %s : %v", req.URL, err) - } - - if err := json.Unmarshal(content, dst); err != nil { - return fmt.Errorf("error on parsing response from %s : %v", req.URL, err) - } - - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/logstash/config_schema.json b/src/go/plugin/go.d/modules/logstash/config_schema.json index c08d136f1..9b251f495 100644 --- a/src/go/plugin/go.d/modules/logstash/config_schema.json +++ b/src/go/plugin/go.d/modules/logstash/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/logstash/integrations/logstash.md b/src/go/plugin/go.d/modules/logstash/integrations/logstash.md index 0ca751ebf..cdd3d18ed 100644 --- a/src/go/plugin/go.d/modules/logstash/integrations/logstash.md +++ b/src/go/plugin/go.d/modules/logstash/integrations/logstash.md @@ -114,8 +114,8 @@ No action required. The configuration file name for this integration is `go.d/logstatsh.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/logstash/logstash.go b/src/go/plugin/go.d/modules/logstash/logstash.go index 3ee95594e..f15b144ae 100644 --- a/src/go/plugin/go.d/modules/logstash/logstash.go +++ b/src/go/plugin/go.d/modules/logstash/logstash.go @@ -5,10 +5,12 @@ package logstash import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *Logstash { return &Logstash{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://localhost:9600", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -41,8 +43,8 @@ func New() *Logstash { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Logstash struct { @@ -62,14 +64,12 @@ func (l *Logstash) Configuration() any { func (l *Logstash) Init() error { if l.URL == "" { - l.Error("config validation: 'url' cannot be empty") - return errors.New("url not set") + return errors.New("config: 'url' cannot be empty") } - httpClient, err := web.NewHTTPClient(l.Client) + httpClient, err := web.NewHTTPClient(l.ClientConfig) if err != nil { - l.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } l.httpClient = httpClient @@ -82,7 +82,6 @@ func (l *Logstash) Init() error { func (l *Logstash) Check() error { mx, err := l.collect() if err != nil { - l.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/logstash/logstash_test.go b/src/go/plugin/go.d/modules/logstash/logstash_test.go index 166d39815..4afc82636 100644 --- a/src/go/plugin/go.d/modules/logstash/logstash_test.go +++ b/src/go/plugin/go.d/modules/logstash/logstash_test.go @@ -3,12 +3,12 @@ package logstash import ( - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "net/http" "net/http/httptest" "os" "testing" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" "github.com/stretchr/testify/assert" @@ -49,8 +49,8 @@ func TestLogstash_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, @@ -180,25 +180,12 @@ func TestLogstash_Collect(t *testing.T) { require.Equal(t, test.wantMetrics, mx) if len(test.wantMetrics) > 0 { assert.Equal(t, test.wantNumOfCharts, len(*ls.Charts())) - ensureCollectedHasAllChartsDimsVarsIDs(t, ls, mx) + module.TestMetricsHasAllChartsDims(t, ls.Charts(), mx) } }) } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, ls *Logstash, mx map[string]int64) { - for _, chart := range *ls.Charts() { - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func caseValidResponse(t *testing.T) (*Logstash, func()) { t.Helper() srv := httptest.NewServer(http.HandlerFunc( diff --git a/src/go/plugin/go.d/modules/lvm/charts.go b/src/go/plugin/go.d/modules/lvm/charts.go index 8d2f0fa19..d602b5c05 100644 --- a/src/go/plugin/go.d/modules/lvm/charts.go +++ b/src/go/plugin/go.d/modules/lvm/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || netbsd + package lvm import ( diff --git a/src/go/plugin/go.d/modules/lvm/collect.go b/src/go/plugin/go.d/modules/lvm/collect.go index 8f57a1a80..e413e5fb0 100644 --- a/src/go/plugin/go.d/modules/lvm/collect.go +++ b/src/go/plugin/go.d/modules/lvm/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || netbsd + package lvm import ( diff --git a/src/go/plugin/go.d/modules/lvm/config_schema.json b/src/go/plugin/go.d/modules/lvm/config_schema.json index 1e0788074..f45c1129f 100644 --- a/src/go/plugin/go.d/modules/lvm/config_schema.json +++ b/src/go/plugin/go.d/modules/lvm/config_schema.json @@ -19,7 +19,6 @@ "default": 2 } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/lvm/doc.go b/src/go/plugin/go.d/modules/lvm/doc.go new file mode 100644 index 000000000..2355ba610 --- /dev/null +++ b/src/go/plugin/go.d/modules/lvm/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package lvm diff --git a/src/go/plugin/go.d/modules/lvm/exec.go b/src/go/plugin/go.d/modules/lvm/exec.go index 66863a051..5c3c0ee75 100644 --- a/src/go/plugin/go.d/modules/lvm/exec.go +++ b/src/go/plugin/go.d/modules/lvm/exec.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || netbsd + package lvm import ( @@ -11,6 +13,10 @@ import ( "github.com/netdata/netdata/go/plugins/logger" ) +type lvmCLI interface { + lvsReportJson() ([]byte, error) +} + func newLVMCLIExec(ndsudoPath string, timeout time.Duration, log *logger.Logger) *lvmCLIExec { return &lvmCLIExec{ Logger: log, diff --git a/src/go/plugin/go.d/modules/lvm/init.go b/src/go/plugin/go.d/modules/lvm/init.go index 5c4db1add..1d9d16816 100644 --- a/src/go/plugin/go.d/modules/lvm/init.go +++ b/src/go/plugin/go.d/modules/lvm/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || netbsd + package lvm import ( diff --git a/src/go/plugin/go.d/modules/lvm/integrations/lvm_logical_volumes.md b/src/go/plugin/go.d/modules/lvm/integrations/lvm_logical_volumes.md index 1d76c3635..14750afd3 100644 --- a/src/go/plugin/go.d/modules/lvm/integrations/lvm_logical_volumes.md +++ b/src/go/plugin/go.d/modules/lvm/integrations/lvm_logical_volumes.md @@ -26,7 +26,10 @@ This collector monitors the health of LVM logical volumes. It relies on the [`lv -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux +- NetBSD This collector only supports collecting metrics from a single instance of this integration. @@ -99,8 +102,8 @@ No action required. The configuration file name for this integration is `go.d/lvm.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/lvm/lvm.go b/src/go/plugin/go.d/modules/lvm/lvm.go index c6754e06a..90a1d7a3f 100644 --- a/src/go/plugin/go.d/modules/lvm/lvm.go +++ b/src/go/plugin/go.d/modules/lvm/lvm.go @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || netbsd + package lvm import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,7 +31,7 @@ func init() { func New() *LVM { return &LVM{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: &module.Charts{}, lvmThinPools: make(map[string]bool), @@ -36,25 +39,20 @@ func New() *LVM { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } -type ( - LVM struct { - module.Base - Config `yaml:",inline" json:""` +type LVM struct { + module.Base + Config `yaml:",inline" json:""` - charts *module.Charts + charts *module.Charts - exec lvmCLI + exec lvmCLI - lvmThinPools map[string]bool - } - lvmCLI interface { - lvsReportJson() ([]byte, error) - } -) + lvmThinPools map[string]bool +} func (l *LVM) Configuration() any { return l.Config @@ -63,8 +61,7 @@ func (l *LVM) Configuration() any { func (l *LVM) Init() error { lvmExec, err := l.initLVMCLIExec() if err != nil { - l.Errorf("lvm exec initialization: %v", err) - return err + return fmt.Errorf("init lvm exec: %v", err) } l.exec = lvmExec @@ -74,7 +71,6 @@ func (l *LVM) Init() error { func (l *LVM) Check() error { mx, err := l.collect() if err != nil { - l.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/lvm/lvm_test.go b/src/go/plugin/go.d/modules/lvm/lvm_test.go index a3c072837..f7f290b5c 100644 --- a/src/go/plugin/go.d/modules/lvm/lvm_test.go +++ b/src/go/plugin/go.d/modules/lvm/lvm_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || netbsd + package lvm import ( diff --git a/src/go/plugin/go.d/modules/lvm/metadata.yaml b/src/go/plugin/go.d/modules/lvm/metadata.yaml index 46d036946..7689d9de2 100644 --- a/src/go/plugin/go.d/modules/lvm/metadata.yaml +++ b/src/go/plugin/go.d/modules/lvm/metadata.yaml @@ -28,7 +28,7 @@ modules: This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management. method_description: "" supported_platforms: - include: [] + include: [Linux, NetBSD] exclude: [] multi_instance: false additional_permissions: diff --git a/src/go/plugin/go.d/modules/maxscale/README.md b/src/go/plugin/go.d/modules/maxscale/README.md new file mode 120000 index 000000000..9202ef1cf --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/README.md @@ -0,0 +1 @@ +integrations/maxscale.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/maxscale/charts.go b/src/go/plugin/go.d/modules/maxscale/charts.go new file mode 100644 index 000000000..b22fd4b6a --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/charts.go @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package maxscale + +import ( + "fmt" + "strings" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +const ( + prioPollEvents = module.Priority + iota + + prioSessions + prioZombies + + prioServerState + prioServerConnections + + prioThreadsByState + + prioCurrentFDs + + prioQCCacheEfficiency + prioQCCacheOperations + + prioUptime +) + +var charts = module.Charts{ + pollEventsChart.Copy(), + currentSessionsChart.Copy(), + currentZombieConnectionsChart.Copy(), + threadsByStateChart.Copy(), + currentFDsChart.Copy(), + qcCacheEfficiencyChart.Copy(), + qcCacheOperationsChart.Copy(), + uptimeChart.Copy(), +} + +var ( + pollEventsChart = module.Chart{ + ID: "poll_events", + Title: "Poll Events", + Units: "events/s", + Fam: "poll events", + Ctx: "maxscale.poll_events", + Priority: prioPollEvents, + Dims: module.Dims{ + {ID: "threads_reads", Name: "reads", Algo: module.Incremental}, + {ID: "threads_writes", Name: "writes", Algo: module.Incremental}, + {ID: "threads_accepts", Name: "accepts", Algo: module.Incremental}, + {ID: "threads_errors", Name: "errors", Algo: module.Incremental}, + {ID: "threads_hangups", Name: "hangups", Algo: module.Incremental}, + }, + } + + currentSessionsChart = module.Chart{ + ID: "current_sessions", + Title: "Curren Sessions", + Units: "sessions", + Fam: "sessions", + Ctx: "maxscale.current_sessions", + Priority: prioSessions, + Dims: module.Dims{ + {ID: "threads_sessions", Name: "sessions"}, + }, + } + currentZombieConnectionsChart = module.Chart{ + ID: "current_zombie_connections", + Title: "Current Zombie Connections", + Units: "connections", + Fam: "sessions", + Ctx: "maxscale.current_zombie_connections", + Priority: prioZombies, + Dims: module.Dims{ + {ID: "threads_zombies", Name: "zombie"}, + }, + } + + threadsByStateChart = func() module.Chart { + chart := module.Chart{ + ID: "threads_by_state", + Title: "Threads Count by State", + Units: "threads", + Fam: "threads", + Ctx: "maxscale.threads_by_state", + Priority: prioThreadsByState, + Type: module.Stacked, + } + for _, v := range threadStates { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: "threads_state_" + v, + Name: strings.ToLower(v), + }) + } + return chart + }() + + currentFDsChart = module.Chart{ + ID: "current_file_descriptors", + Title: "Current Managed File Descriptors", + Units: "fds", + Fam: "fds", + Ctx: "maxscale.current_fds", + Priority: prioCurrentFDs, + Dims: module.Dims{ + {ID: "threads_current_fds", Name: "managed"}, + }, + } + + qcCacheEfficiencyChart = module.Chart{ + ID: "qc_cache_efficiency", + Title: "QC Cache Efficiency", + Units: "requests/s", + Fam: "qc cache", + Ctx: "maxscale.qc_cache_efficiency", + Priority: prioQCCacheEfficiency, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "threads_qc_cache_hits", Name: "hits", Algo: module.Incremental}, + {ID: "threads_qc_cache_misses", Name: "misses", Algo: module.Incremental}, + }, + } + qcCacheOperationsChart = module.Chart{ + ID: "qc_cache_operations", + Title: "QC Cache Operations", + Units: "operations/s", + Fam: "qc cache", + Ctx: "maxscale.qc_cache_operations", + Priority: prioQCCacheOperations, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "threads_qc_cache_inserts", Name: "inserts", Algo: module.Incremental}, + {ID: "threads_qc_cache_evictions", Name: "evictions", Algo: module.Incremental}, + }, + } + + uptimeChart = module.Chart{ + ID: "uptime", + Title: "Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "maxscale.uptime", + Priority: prioUptime, + Dims: module.Dims{ + {ID: "uptime"}, + }, + } +) + +var serverChartsTmpl = module.Charts{ + serverStateChartTmpl.Copy(), + serverCurrentConnectionsChartTmpl.Copy(), +} + +var ( + serverStateChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "server_%s_state", + Title: "Server State", + Units: "state", + Fam: "servers", + Ctx: "maxscale.server_state", + Priority: prioServerState, + } + for _, v := range serverStates { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: "server_%s_state_" + v, + Name: strings.ToLower(cleanChartID(v)), + }) + } + return chart + }() + serverCurrentConnectionsChartTmpl = module.Chart{ + ID: "server_%s_current_connections", + Title: "Server Current Connections", + Units: "connections", + Fam: "servers", + Ctx: "maxscale.server_current_connections", + Priority: prioServerConnections, + Dims: module.Dims{ + {ID: "server_%s_connections", Name: "connections"}, + }, + } +) + +func (m *MaxScale) addServerCharts(id, addr string) { + srvCharts := serverChartsTmpl.Copy() + + for _, chart := range *srvCharts { + chart.ID = fmt.Sprintf(chart.ID, id) + chart.ID = cleanChartID(chart.ID) + chart.Labels = []module.Label{ + {Key: "server", Value: id}, + {Key: "address", Value: addr}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, id) + } + } + + if err := m.Charts().Add(*srvCharts...); err != nil { + m.Warning(err) + } +} + +func (m *MaxScale) removeServerCharts(id string) { + px := fmt.Sprintf("server_%s_", id) + px = cleanChartID(px) + + for _, chart := range *m.Charts() { + if strings.HasPrefix(chart.ID, px) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} + +func cleanChartID(id string) string { + r := strings.NewReplacer(".", "_", " ", "_") + return r.Replace(id) +} diff --git a/src/go/plugin/go.d/modules/maxscale/collect.go b/src/go/plugin/go.d/modules/maxscale/collect.go new file mode 100644 index 000000000..6c3994573 --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/collect.go @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package maxscale + +import ( + "fmt" + "net" + "strconv" + "strings" + "unicode" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +const ( + urlPathMaxscale = "/maxscale" + urlPathMaxscaleThreads = "/maxscale/threads" + urlPathServers = "/servers" +) + +func (m *MaxScale) collect() (map[string]int64, error) { + mx := make(map[string]int64) + + if err := m.collectMaxScaleGlobal(mx); err != nil { + return nil, err + } + if err := m.collectMaxScaleThreads(mx); err != nil { + return nil, err + } + if err := m.collectServers(mx); err != nil { + return nil, err + } + + return mx, nil +} + +func (m *MaxScale) collectMaxScaleGlobal(mx map[string]int64) error { + req, err := web.NewHTTPRequestWithPath(m.RequestConfig, urlPathMaxscale) + if err != nil { + return fmt.Errorf("failed to create HTTP request: %v", err) + } + + var resp maxscaleGlobalResponse + + if err := web.DoHTTP(m.httpClient).RequestJSON(req, &resp); err != nil { + return err + } + + if resp.Data == nil { + return fmt.Errorf("invalid response from '%s': missing expected MaxScale data", req.URL) + } + + mx["uptime"] = resp.Data.Attrs.Uptime + + return nil +} + +func (m *MaxScale) collectMaxScaleThreads(mx map[string]int64) error { + req, err := web.NewHTTPRequestWithPath(m.RequestConfig, urlPathMaxscaleThreads) + if err != nil { + return fmt.Errorf("failed to create HTTP request: %v", err) + } + + var resp maxscaleThreadsResponse + + if err := web.DoHTTP(m.httpClient).RequestJSON(req, &resp); err != nil { + return err + } + + for _, r := range resp.Data { + st := r.Attrs.Stats + mx["threads_reads"] += st.Reads + mx["threads_writes"] += st.Writes + mx["threads_errors"] += st.Errors + mx["threads_hangups"] += st.Hangups + mx["threads_accepts"] += st.Accepts + mx["threads_sessions"] += st.Sessions + mx["threads_zombies"] += st.Zombies + mx["threads_current_fds"] += st.CurrentDescriptors + mx["threads_total_fds"] += st.TotalDescriptors + mx["threads_qc_cache_inserts"] += st.QCCache.Inserts + mx["threads_qc_cache_evictions"] += st.QCCache.Evictions + mx["threads_qc_cache_hits"] += st.QCCache.Hits + mx["threads_qc_cache_misses"] += st.QCCache.Misses + for _, v := range threadStates { + mx["threads_state_"+v] = 0 + } + mx["threads_state_"+st.State]++ + } + + return nil +} + +func (m *MaxScale) collectServers(mx map[string]int64) error { + req, err := web.NewHTTPRequestWithPath(m.RequestConfig, urlPathServers) + if err != nil { + return fmt.Errorf("failed to create HTTP request: %v", err) + } + + var resp serversResponse + + if err := web.DoHTTP(m.httpClient).RequestJSON(req, &resp); err != nil { + return err + } + + seen := make(map[string]bool) + + for _, r := range resp.Data { + if r.ID == "" { + continue + } + + seen[r.ID] = true + + if !m.seenServers[r.ID] { + m.seenServers[r.ID] = true + addr := net.JoinHostPort(r.Attrs.Params.Address, strconv.Itoa(int(r.Attrs.Params.Port))) + m.addServerCharts(r.ID, addr) + } + + px := fmt.Sprintf("server_%s_", r.ID) + + mx[px+"connections"] = r.Attrs.Statistics.Connections + + for _, v := range serverStates { + mx[px+"state_"+v] = 0 + } + for _, v := range strings.FieldsFunc(r.Attrs.State, unicode.IsSpace) { + mx[px+"state_"+v] = 1 + } + } + + for id := range m.seenServers { + if !seen[id] { + delete(m.seenServers, id) + m.removeServerCharts(id) + } + } + + return nil +} diff --git a/src/go/plugin/go.d/modules/maxscale/config_schema.json b/src/go/plugin/go.d/modules/maxscale/config_schema.json new file mode 100644 index 000000000..9dc0eecc3 --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/config_schema.json @@ -0,0 +1,184 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "MaxScale collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "url": { + "title": "URL", + "description": "The URL of the MaxScale HTTP API endpoint.", + "type": "string", + "default": "http://127.0.0.1:8989", + "format": "uri" + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "username": { + "title": "Username", + "description": "The username for basic authentication.", + "type": "string", + "default": "admin", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication.", + "type": "string", + "default": "mariadb", + "sensitive": true + }, + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server.", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication.", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication.", + "type": "string", + "sensitive": true + }, + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "string" + } + }, + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string", + "pattern": "^$|^/" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string", + "pattern": "^$|^/" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string", + "pattern": "^$|^/" + }, + "body": { + "title": "Body", + "type": "string" + }, + "method": { + "title": "Method", + "type": "string" + } + }, + "required": [ + "url" + ], + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + }, + "body": { + "ui:widget": "hidden" + }, + "method": { + "ui:widget": "hidden" + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + }, + "username": { + "ui:widget": "password" + }, + "proxy_username": { + "ui:widget": "password" + }, + "password": { + "ui:widget": "password" + }, + "proxy_password": { + "ui:widget": "password" + }, + "ui:flavour": "tabs", + "ui:options": { + "tabs": [ + { + "title": "Base", + "fields": [ + "update_every", + "url", + "timeout", + "not_follow_redirects" + ] + }, + { + "title": "Auth", + "fields": [ + "username", + "password" + ] + }, + { + "title": "TLS", + "fields": [ + "tls_skip_verify", + "tls_ca", + "tls_cert", + "tls_key" + ] + }, + { + "title": "Proxy", + "fields": [ + "proxy_url", + "proxy_username", + "proxy_password" + ] + }, + { + "title": "Headers", + "fields": [ + "headers" + ] + } + ] + } + } +} diff --git a/src/go/plugin/go.d/modules/maxscale/integrations/maxscale.md b/src/go/plugin/go.d/modules/maxscale/integrations/maxscale.md new file mode 100644 index 000000000..b77bf65ec --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/integrations/maxscale.md @@ -0,0 +1,276 @@ + + +# MaxScale + + + + + +Plugin: go.d.plugin +Module: maxscale + + + +## Overview + +This collector monitors the activity and performance of MaxScale servers. + + +It sends HTTP requests to the MaxScale [REST API](https://mariadb.com/kb/en/maxscale-24-02rest-api/). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +The collector can automatically detect MaxScale instances running on: + +- localhost that are listening on port 8989 +- within Docker containers + +> **Note that the MaxScale REST API requires a username and password**. +> While Netdata can automatically detect MaxScale instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per MaxScale instance + +These metrics refer to the monitored MaxScale instance. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| maxscale.poll_events | reads, writes, accepts, errors, hangups | events/s | +| maxscale.current_sessions | sessions | sessions | +| maxscale.current_zombie_connections | zombie | connections | +| maxscale.threads_by_state | active, draining, dormant | threads | +| maxscale.current_fds | managed | fds | +| maxscale.qc_cache_efficiency | hits, misses | requests/s | +| maxscale.qc_cache_operations | inserts, evictions | operations/s | +| maxscale.uptime | uptime | seconds | + +### Per server + +These metrics refer to the MariaDB server. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| server | Server ID. | +| address | Server address. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| maxscale.server_state | master, slave, running, down, maintenance, draining, drained, relay_master, binlog_relay, synced | state | +| maxscale.server_current_connections | connections | connections | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/maxscale.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/maxscale.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | The URL of the MaxScale HTTP API endpoint. | http://127.0.0.1:8989 | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | admin | no | +| password | Password for basic HTTP authentication. | mariadb | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +
    + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8989 + username: admin + password: mariadb + +``` +##### HTTPS with self-signed certificate + +MaxScale with enabled HTTPS and self-signed certificate. + +
    Config + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8989 + username: admin + password: mariadb + tls_skip_verify: yes + +``` +
    + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
    Config + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8989 + username: admin + password: mariadb + + - name: remote + url: http://192.0.2.1:8989 + username: admin + password: mariadb + +``` +
    + + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `maxscale` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m maxscale + ``` + +### Getting Logs + +If you're encountering problems with the `maxscale` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep maxscale +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep maxscale /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep maxscale +``` + + diff --git a/src/go/plugin/go.d/modules/maxscale/maxscale.go b/src/go/plugin/go.d/modules/maxscale/maxscale.go new file mode 100644 index 000000000..09dc133ca --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/maxscale.go @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package maxscale + +import ( + _ "embed" + "errors" + "fmt" + "net/http" + "time" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("maxscale", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *MaxScale { + return &MaxScale{ + Config: Config{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ + URL: "http://127.0.0.1:8989", + Username: "admin", + Password: "mariadb", + }, + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 1), + }, + }, + }, + charts: charts.Copy(), + seenServers: make(map[string]bool), + } +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` +} + +type MaxScale struct { + module.Base + Config `yaml:",inline" json:""` + + charts *module.Charts + + httpClient *http.Client + + seenServers map[string]bool +} + +func (m *MaxScale) Configuration() any { + return m.Config +} + +func (m *MaxScale) Init() error { + if m.URL == "" { + return errors.New("URL required but not set") + } + + httpClient, err := web.NewHTTPClient(m.ClientConfig) + if err != nil { + return fmt.Errorf("failed initializing http client: %w", err) + } + m.httpClient = httpClient + + m.Debugf("using URL %s", m.URL) + m.Debugf("using timeout: %s", m.Timeout) + + return nil +} + +func (m *MaxScale) Check() error { + mx, err := m.collect() + if err != nil { + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil +} + +func (m *MaxScale) Charts() *module.Charts { + return m.charts +} + +func (m *MaxScale) Collect() map[string]int64 { + mx, err := m.collect() + if err != nil { + m.Error(err) + return nil + } + + return mx +} + +func (m *MaxScale) Cleanup() { + if m.httpClient != nil { + m.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/plugin/go.d/modules/maxscale/maxscale_test.go b/src/go/plugin/go.d/modules/maxscale/maxscale_test.go new file mode 100644 index 000000000..8395493d5 --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/maxscale_test.go @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package maxscale + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") + + dataVer24MaxScale, _ = os.ReadFile("testdata/v24.02.3/maxscale.json") + dataVer24MaxScaleThreads, _ = os.ReadFile("testdata/v24.02.3/maxscale_threads.json") + dataVer24Servers, _ = os.ReadFile("testdata/v24.02.3/servers.json") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + "dataVer24MaxScale": dataVer24MaxScale, + "dataVer24MaxScaleThreads": dataVer24MaxScaleThreads, + "dataVer24Servers": dataVer24Servers, + } { + require.NotNil(t, data, name) + } +} + +func TestMaxScale_ConfigurationSerialize(t *testing.T) { + module.TestConfigurationSerialize(t, &MaxScale{}, dataConfigJSON, dataConfigYAML) +} + +func TestMaxScale_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "success with default": { + wantFail: false, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ms := New() + ms.Config = test.config + + if test.wantFail { + assert.Error(t, ms.Init()) + } else { + assert.NoError(t, ms.Init()) + } + }) + } +} + +func TestMaxScale_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) (nu *MaxScale, cleanup func()) + }{ + "success on valid response": { + wantFail: false, + prepare: caseOk, + }, + "fail on unexpected JSON response": { + wantFail: true, + prepare: caseUnexpectedJsonResponse, + }, + "fail on invalid data response": { + wantFail: true, + prepare: caseInvalidDataResponse, + }, + "fail on connection refused": { + wantFail: true, + prepare: caseConnectionRefused, + }, + "fail on 404 response": { + wantFail: true, + prepare: case404, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ms, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.Error(t, ms.Check()) + } else { + assert.NoError(t, ms.Check()) + } + }) + } +} + +func TestMaxScale_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestMaxScale_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (nu *MaxScale, cleanup func()) + wantNumOfCharts int + wantMetrics map[string]int64 + }{ + "success on valid response": { + prepare: caseOk, + wantNumOfCharts: len(charts) + len(serverChartsTmpl)*1, + wantMetrics: map[string]int64{ + "server_kek_connections": 0, + "server_kek_state_Binlog Relay": 0, + "server_kek_state_Down": 1, + "server_kek_state_Drained": 0, + "server_kek_state_Draining": 0, + "server_kek_state_Maintenance": 0, + "server_kek_state_Master": 0, + "server_kek_state_Relay Master": 0, + "server_kek_state_Running": 0, + "server_kek_state_Slave": 0, + "server_kek_state_Synced": 0, + "threads_accepts": 0, + "threads_current_fds": 3, + "threads_errors": 0, + "threads_hangups": 0, + "threads_qc_cache_evictions": 0, + "threads_qc_cache_hits": 0, + "threads_qc_cache_inserts": 0, + "threads_qc_cache_misses": 0, + "threads_reads": 68359, + "threads_sessions": 0, + "threads_state_Active": 1, + "threads_state_Dormant": 0, + "threads_state_Draining": 0, + "threads_total_fds": 3, + "threads_writes": 0, + "threads_zombies": 0, + "uptime": 61298, + }, + }, + "fail on unexpected JSON response": { + prepare: caseUnexpectedJsonResponse, + wantMetrics: nil, + }, + "fail on invalid data response": { + prepare: caseInvalidDataResponse, + wantMetrics: nil, + }, + "fail on connection refused": { + prepare: caseConnectionRefused, + wantMetrics: nil, + }, + "fail on 404 response": { + prepare: case404, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ms, cleanup := test.prepare(t) + defer cleanup() + + _ = ms.Check() + + mx := ms.Collect() + + require.Equal(t, test.wantMetrics, mx) + + if len(test.wantMetrics) > 0 { + assert.Equal(t, test.wantNumOfCharts, len(*ms.Charts()), "want charts") + + module.TestMetricsHasAllChartsDims(t, ms.Charts(), mx) + } + }) + } +} + +func caseOk(t *testing.T) (*MaxScale, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathMaxscale: + _, _ = w.Write(dataVer24MaxScale) + case urlPathMaxscaleThreads: + _, _ = w.Write(dataVer24MaxScaleThreads) + case urlPathServers: + _, _ = w.Write(dataVer24Servers) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + ms := New() + ms.URL = srv.URL + require.NoError(t, ms.Init()) + + return ms, srv.Close +} + +func caseUnexpectedJsonResponse(t *testing.T) (*MaxScale, func()) { + t.Helper() + resp := ` +{ + "elephant": { + "burn": false, + "mountain": true, + "fog": false, + "skin": -1561907625, + "burst": "anyway", + "shadow": 1558616893 + }, + "start": "ever", + "base": 2093056027, + "mission": -2007590351, + "victory": 999053756, + "die": false +} +` + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(resp)) + })) + ms := New() + ms.URL = srv.URL + require.NoError(t, ms.Init()) + + return ms, srv.Close +} + +func caseInvalidDataResponse(t *testing.T) (*MaxScale, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + ms := New() + ms.URL = srv.URL + require.NoError(t, ms.Init()) + + return ms, srv.Close +} + +func caseConnectionRefused(t *testing.T) (*MaxScale, func()) { + t.Helper() + ms := New() + ms.URL = "http://127.0.0.1:65001" + require.NoError(t, ms.Init()) + + return ms, func() {} +} + +func case404(t *testing.T) (*MaxScale, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + ms := New() + ms.URL = srv.URL + require.NoError(t, ms.Init()) + + return ms, srv.Close +} diff --git a/src/go/plugin/go.d/modules/maxscale/metadata.yaml b/src/go/plugin/go.d/modules/maxscale/metadata.yaml new file mode 100644 index 000000000..618b27b8d --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/metadata.yaml @@ -0,0 +1,272 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-maxscale + plugin_name: go.d.plugin + module_name: maxscale + monitored_instance: + name: MaxScale + link: https://mariadb.com/kb/en/maxscale/ + categories: + - data-collection.database-servers + icon_filename: maxscale.svg + related_resources: + integrations: + list: [] + alternative_monitored_instances: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - maria + - mariadb + - maxscale + - database + - db + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors the activity and performance of MaxScale servers. + method_description: | + It sends HTTP requests to the MaxScale [REST API](https://mariadb.com/kb/en/maxscale-24-02rest-api/). + default_behavior: + auto_detection: + description: | + The collector can automatically detect MaxScale instances running on: + + - localhost that are listening on port 8989 + - within Docker containers + + > **Note that the MaxScale REST API requires a username and password**. + > While Netdata can automatically detect MaxScale instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials. + limits: + description: "" + performance_impact: + description: "" + additional_permissions: + description: "" + multi_instance: true + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/maxscale.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: The URL of the MaxScale HTTP API endpoint. + default_value: http://127.0.0.1:8989 + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "admin" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "mariadb" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + folding: + enabled: false + config: | + jobs: + - name: local + url: http://127.0.0.1:8989 + username: admin + password: mariadb + - name: HTTPS with self-signed certificate + description: MaxScale with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: http://127.0.0.1:8989 + username: admin + password: mariadb + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8989 + username: admin + password: mariadb + + - name: remote + url: http://192.0.2.1:8989 + username: admin + password: mariadb + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the monitored MaxScale instance. + labels: [] + metrics: + - name: maxscale.poll_events + description: Poll Events + unit: events/s + chart_type: line + dimensions: + - name: reads + - name: writes + - name: accepts + - name: errors + - name: hangups + - name: maxscale.current_sessions + description: Current Sessions + unit: sessions + chart_type: line + dimensions: + - name: sessions + - name: maxscale.current_zombie_connections + description: Current Zombie Connections + unit: connections + chart_type: line + dimensions: + - name: zombie + - name: maxscale.threads_by_state + description: Threads Count by State + unit: threads + chart_type: stacked + dimensions: + - name: active + - name: draining + - name: dormant + - name: maxscale.current_fds + description: Current Managed File Descriptors + unit: fds + chart_type: line + dimensions: + - name: managed + - name: maxscale.qc_cache_efficiency + description: QC Cache Efficiency + unit: requests/s + chart_type: stacked + dimensions: + - name: hits + - name: misses + - name: maxscale.qc_cache_operations + description: QC Cache Operations + unit: operations/s + chart_type: stacked + dimensions: + - name: inserts + - name: evictions + - name: maxscale.uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime + - name: server + description: These metrics refer to the MariaDB server. + labels: + - name: server + description: Server ID. + - name: address + description: Server address. + metrics: + - name: maxscale.server_state + description: Server State + unit: state + chart_type: line + dimensions: + - name: master + - name: slave + - name: running + - name: down + - name: maintenance + - name: draining + - name: drained + - name: relay_master + - name: binlog_relay + - name: synced + - name: maxscale.server_current_connections + description: Server Current connections + unit: connections + chart_type: line + dimensions: + - name: connections diff --git a/src/go/plugin/go.d/modules/maxscale/restapi.go b/src/go/plugin/go.d/modules/maxscale/restapi.go new file mode 100644 index 000000000..724941301 --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/restapi.go @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package maxscale + +// https://mariadb.com/kb/en/maxscale-24-02rest-api/ + +type maxscaleGlobalResponse struct { + Data *struct { + Attrs struct { + Params struct { + Threads int64 `json:"threads"` + Passive bool `json:"passive"` + } `json:"parameters"` + Uptime int64 `json:"uptime"` + } `json:"attributes"` + } `json:"data"` +} + +// https://github.com/mariadb-corporation/MaxScale/blob/f72af9927243f59f2b20cc835273dbe6dc158623/server/core/routingworker.cc#L3021 +// https://github.com/mariadb-corporation/MaxScale/blob/f72af9927243f59f2b20cc835273dbe6dc158623/maxctrl/lib/show.js#L471 +type maxscaleThreadsResponse struct { + Data []struct { + ID string `json:"id"` + Attrs struct { + Stats struct { + State string `json:"state"` + Reads int64 `json:"reads"` + Writes int64 `json:"writes"` + Errors int64 `json:"errors"` + Hangups int64 `json:"hangups"` + Accepts int64 `json:"accepts"` + Sessions int64 `json:"sessions"` + Zombies int64 `json:"zombies"` + CurrentDescriptors int64 `json:"current_descriptors"` + TotalDescriptors int64 `json:"total_descriptors"` + QCCache struct { + Size int64 `json:"size"` + Inserts int64 `json:"inserts"` + Hits int64 `json:"hits"` + Misses int64 `json:"misses"` + Evictions int64 `json:"evictions"` + } `json:"query_classifier_cache"` + } `json:"stats"` + } `json:"attributes"` + } `json:"data"` +} + +// // https://github.com/mariadb-corporation/MaxScale/blob/f72af9927243f59f2b20cc835273dbe6dc158623/server/core/routingworker.cc#L3064 +var threadStates = []string{ + "Active", + "Draining", + "Dormant", +} + +type serversResponse struct { + Data []struct { + ID string `json:"id"` + Type string `json:"type"` + Attrs struct { + Params struct { + Address string `json:"address"` + Port int `json:"port"` + } `json:"parameters"` + State string `json:"state"` + Statistics struct { + Connections int64 `json:"connections"` + } `json:"statistics"` + } `json:"attributes"` + } `json:"data"` +} + +// https://github.com/mariadb-corporation/MaxScale/blob/f72af9927243f59f2b20cc835273dbe6dc158623/system-test/maxtest/src/maxscales.cc#L43 +var serverStates = []string{ + "Master", + "Slave", + "Running", + "Down", + "Maintenance", + "Draining", + "Drained", + "Relay Master", + "Binlog Relay", + "Synced", +} diff --git a/src/go/plugin/go.d/modules/maxscale/testdata/config.json b/src/go/plugin/go.d/modules/maxscale/testdata/config.json new file mode 100644 index 000000000..984c3ed6e --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/testdata/config.json @@ -0,0 +1,20 @@ +{ + "update_every": 123, + "url": "ok", + "body": "ok", + "method": "ok", + "headers": { + "ok": "ok" + }, + "username": "ok", + "password": "ok", + "proxy_url": "ok", + "proxy_username": "ok", + "proxy_password": "ok", + "timeout": 123.123, + "not_follow_redirects": true, + "tls_ca": "ok", + "tls_cert": "ok", + "tls_key": "ok", + "tls_skip_verify": true +} diff --git a/src/go/plugin/go.d/modules/maxscale/testdata/config.yaml b/src/go/plugin/go.d/modules/maxscale/testdata/config.yaml new file mode 100644 index 000000000..8558b61cc --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/testdata/config.yaml @@ -0,0 +1,17 @@ +update_every: 123 +url: "ok" +body: "ok" +method: "ok" +headers: + ok: "ok" +username: "ok" +password: "ok" +proxy_url: "ok" +proxy_username: "ok" +proxy_password: "ok" +timeout: 123.123 +not_follow_redirects: yes +tls_ca: "ok" +tls_cert: "ok" +tls_key: "ok" +tls_skip_verify: yes diff --git a/src/go/plugin/go.d/modules/maxscale/testdata/v24.02.3/maxscale.json b/src/go/plugin/go.d/modules/maxscale/testdata/v24.02.3/maxscale.json new file mode 100644 index 000000000..d615f55cb --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/testdata/v24.02.3/maxscale.json @@ -0,0 +1,124 @@ +{ + "data": { + "attributes": { + "activated_at": "Wed, 23 Oct 2024 18:39:44 GMT", + "commit": "d77a737db314a3dab3811a1e4b81e20ac2aedbe9", + "config_sync": null, + "parameters": { + "admin_audit": false, + "admin_audit_exclude_methods": [], + "admin_audit_file": "/var/log/maxscale/admin_audit.csv", + "admin_auth": true, + "admin_enabled": true, + "admin_gui": true, + "admin_host": "0.0.0.0", + "admin_jwt_algorithm": "auto", + "admin_jwt_issuer": "maxscale", + "admin_jwt_key": null, + "admin_jwt_max_age": "86400000ms", + "admin_log_auth_failures": true, + "admin_oidc_url": null, + "admin_pam_readonly_service": null, + "admin_pam_readwrite_service": null, + "admin_port": 8989, + "admin_readonly_hosts": "%", + "admin_readwrite_hosts": "%", + "admin_secure_gui": false, + "admin_ssl_ca": null, + "admin_ssl_cert": null, + "admin_ssl_key": null, + "admin_ssl_version": "MAX", + "admin_verify_url": null, + "auth_connect_timeout": "10000ms", + "auth_read_timeout": "10000ms", + "auth_write_timeout": "10000ms", + "auto_tune": [], + "cachedir": "/var/cache/maxscale", + "config_sync_cluster": null, + "config_sync_db": "mysql", + "config_sync_interval": "5000ms", + "config_sync_password": null, + "config_sync_timeout": "10000ms", + "config_sync_user": null, + "connector_plugindir": "/usr/lib64/maxscale/plugin", + "datadir": "/var/lib/maxscale", + "debug": null, + "dump_last_statements": "never", + "execdir": "/usr/bin", + "key_manager": "none", + "language": "/var/lib/maxscale", + "libdir": "/usr/lib64/maxscale", + "load_persisted_configs": true, + "local_address": null, + "log_debug": false, + "log_info": false, + "log_notice": true, + "log_throttling": { + "count": 10, + "suppress": 10000, + "window": 1000 + }, + "log_warn_super_user": false, + "log_warning": true, + "logdir": "/var/log/maxscale", + "max_auth_errors_until_block": 10, + "max_read_amount": 0, + "maxlog": true, + "module_configdir": "/etc/maxscale.modules.d", + "ms_timestamp": false, + "passive": false, + "persist_runtime_changes": true, + "persistdir": "/var/lib/maxscale/maxscale.cnf.d", + "piddir": "/var/run/maxscale", + "query_classifier_cache_size": 3779569459, + "query_retries": 1, + "query_retry_timeout": "5000ms", + "rebalance_period": "0ms", + "rebalance_threshold": 20, + "rebalance_window": 10, + "retain_last_statements": 0, + "secretsdir": null, + "session_trace": 0, + "session_trace_match": null, + "skip_name_resolve": false, + "sql_mode": "default", + "syslog": false, + "threads": 1, + "threads_max": 256, + "users_refresh_interval": "0ms", + "users_refresh_time": "30000ms", + "writeq_high_water": 65536, + "writeq_low_water": 1024 + }, + "process_datadir": "/var/lib/maxscale/data20", + "started_at": "Wed, 23 Oct 2024 18:39:44 GMT", + "system": { + "machine": { + "cores_available": 16, + "cores_physical": 16, + "cores_virtual": 16.0, + "memory_available": 25197129728, + "memory_physical": 25197129728 + }, + "maxscale": { + "query_classifier_cache_size": 3779569459, + "threads": 1 + }, + "os": { + "machine": "x86_64", + "nodename": "pve-deb-work", + "release": "6.1.0-25-amd64", + "sysname": "Linux", + "version": "#1 SMP PREEMPT_DYNAMIC Debian 6.1.106-3 (2024-08-26)" + } + }, + "uptime": 61298, + "version": "24.02.3" + }, + "id": "maxscale", + "type": "maxscale" + }, + "links": { + "self": "http://localhost:8989/v1/maxscale/" + } +} diff --git a/src/go/plugin/go.d/modules/maxscale/testdata/v24.02.3/maxscale_threads.json b/src/go/plugin/go.d/modules/maxscale/testdata/v24.02.3/maxscale_threads.json new file mode 100644 index 000000000..f8e68fdbc --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/testdata/v24.02.3/maxscale_threads.json @@ -0,0 +1,51 @@ +{ + "data": [ + { + "attributes": { + "stats": { + "accepts": 0, + "avg_event_queue_length": 1, + "current_descriptors": 3, + "errors": 0, + "hangups": 0, + "listening": true, + "load": { + "last_hour": 0, + "last_minute": 0, + "last_second": 0 + }, + "max_event_queue_length": 1, + "max_exec_time": 0, + "max_queue_time": 0, + "memory": { + "query_classifier": 0, + "sessions": 0, + "total": 0, + "zombies": 0 + }, + "query_classifier_cache": { + "evictions": 0, + "hits": 0, + "inserts": 0, + "misses": 0, + "size": 0 + }, + "reads": 68359, + "sessions": 0, + "state": "Active", + "total_descriptors": 3, + "writes": 0, + "zombies": 0 + } + }, + "id": "0", + "links": { + "self": "http://localhost:8989/v1/threads/0/" + }, + "type": "threads" + } + ], + "links": { + "self": "http://localhost:8989/v1/maxscale/threads/" + } +} diff --git a/src/go/plugin/go.d/modules/maxscale/testdata/v24.02.3/servers.json b/src/go/plugin/go.d/modules/maxscale/testdata/v24.02.3/servers.json new file mode 100644 index 000000000..a6c053193 --- /dev/null +++ b/src/go/plugin/go.d/modules/maxscale/testdata/v24.02.3/servers.json @@ -0,0 +1,198 @@ +{ + "data": [ + { + "attributes": { + "parameters": { + "address": "127.0.0.1", + "disk_space_threshold": null, + "extra_port": 0, + "max_routing_connections": 0, + "monitorpw": null, + "monitoruser": null, + "persistmaxtime": "0ms", + "persistpoolmax": 0, + "port": 3306, + "priority": 0, + "private_address": null, + "proxy_protocol": false, + "rank": "primary", + "replication_custom_options": null, + "socket": null, + "ssl": false, + "ssl_ca": null, + "ssl_cert": null, + "ssl_cert_verify_depth": 9, + "ssl_cipher": null, + "ssl_key": null, + "ssl_verify_peer_certificate": false, + "ssl_verify_peer_host": false, + "ssl_version": "MAX" + }, + "replication_lag": -1, + "source": { + "file": "/var/lib/maxscale/maxscale.cnf.d/kek.cnf", + "type": "runtime" + }, + "state": "Down", + "statistics": { + "active_operations": 0, + "adaptive_avg_select_time": "0ns", + "connection_pool_empty": 0, + "connections": 0, + "failed_auths": 0, + "max_connections": 0, + "max_pool_size": 0, + "persistent_connections": 0, + "response_time_distribution": { + "read": { + "distribution": [ + { + "count": 0, + "time": "0.000001", + "total": 0 + }, + { + "count": 0, + "time": "0.000010", + "total": 0 + }, + { + "count": 0, + "time": "0.000100", + "total": 0 + }, + { + "count": 0, + "time": "0.001000", + "total": 0 + }, + { + "count": 0, + "time": "0.010000", + "total": 0 + }, + { + "count": 0, + "time": "0.100000", + "total": 0 + }, + { + "count": 0, + "time": "1.000000", + "total": 0 + }, + { + "count": 0, + "time": "10.000000", + "total": 0 + }, + { + "count": 0, + "time": "100.000000", + "total": 0 + }, + { + "count": 0, + "time": "1000.000000", + "total": 0 + }, + { + "count": 0, + "time": "10000.000000", + "total": 0 + }, + { + "count": 0, + "time": "100000.000000", + "total": 0 + } + ], + "operation": "read", + "range_base": 10 + }, + "write": { + "distribution": [ + { + "count": 0, + "time": "0.000001", + "total": 0 + }, + { + "count": 0, + "time": "0.000010", + "total": 0 + }, + { + "count": 0, + "time": "0.000100", + "total": 0 + }, + { + "count": 0, + "time": "0.001000", + "total": 0 + }, + { + "count": 0, + "time": "0.010000", + "total": 0 + }, + { + "count": 0, + "time": "0.100000", + "total": 0 + }, + { + "count": 0, + "time": "1.000000", + "total": 0 + }, + { + "count": 0, + "time": "10.000000", + "total": 0 + }, + { + "count": 0, + "time": "100.000000", + "total": 0 + }, + { + "count": 0, + "time": "1000.000000", + "total": 0 + }, + { + "count": 0, + "time": "10000.000000", + "total": 0 + }, + { + "count": 0, + "time": "100000.000000", + "total": 0 + } + ], + "operation": "write", + "range_base": 10 + } + }, + "reused_connections": 0, + "routed_packets": 0, + "total_connections": 0 + }, + "uptime": 0, + "version_string": "" + }, + "id": "kek", + "links": { + "self": "http://localhost:8989/v1/servers/kek/" + }, + "relationships": {}, + "type": "servers" + } + ], + "links": { + "self": "http://localhost:8989/v1/servers/" + } +} diff --git a/src/go/plugin/go.d/modules/megacli/charts.go b/src/go/plugin/go.d/modules/megacli/charts.go index c479d5677..455620022 100644 --- a/src/go/plugin/go.d/modules/megacli/charts.go +++ b/src/go/plugin/go.d/modules/megacli/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package megacli import ( diff --git a/src/go/plugin/go.d/modules/megacli/collect.go b/src/go/plugin/go.d/modules/megacli/collect.go index c4e74b78b..91d7d2a72 100644 --- a/src/go/plugin/go.d/modules/megacli/collect.go +++ b/src/go/plugin/go.d/modules/megacli/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package megacli import ( diff --git a/src/go/plugin/go.d/modules/megacli/collect_bbu.go b/src/go/plugin/go.d/modules/megacli/collect_bbu.go index 33b048e64..251c6b601 100644 --- a/src/go/plugin/go.d/modules/megacli/collect_bbu.go +++ b/src/go/plugin/go.d/modules/megacli/collect_bbu.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package megacli import ( diff --git a/src/go/plugin/go.d/modules/megacli/collect_phys_drives.go b/src/go/plugin/go.d/modules/megacli/collect_phys_drives.go index 71d4546e3..aec0e4cce 100644 --- a/src/go/plugin/go.d/modules/megacli/collect_phys_drives.go +++ b/src/go/plugin/go.d/modules/megacli/collect_phys_drives.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package megacli import ( diff --git a/src/go/plugin/go.d/modules/megacli/config_schema.json b/src/go/plugin/go.d/modules/megacli/config_schema.json index 6eb36519d..f1cbd8492 100644 --- a/src/go/plugin/go.d/modules/megacli/config_schema.json +++ b/src/go/plugin/go.d/modules/megacli/config_schema.json @@ -19,7 +19,6 @@ "default": 2 } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/megacli/doc.go b/src/go/plugin/go.d/modules/megacli/doc.go new file mode 100644 index 000000000..9df1136d2 --- /dev/null +++ b/src/go/plugin/go.d/modules/megacli/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package megacli diff --git a/src/go/plugin/go.d/modules/megacli/exec.go b/src/go/plugin/go.d/modules/megacli/exec.go index 846952b25..266a8b4a5 100644 --- a/src/go/plugin/go.d/modules/megacli/exec.go +++ b/src/go/plugin/go.d/modules/megacli/exec.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package megacli import ( diff --git a/src/go/plugin/go.d/modules/megacli/init.go b/src/go/plugin/go.d/modules/megacli/init.go index 78b7bf482..384836c72 100644 --- a/src/go/plugin/go.d/modules/megacli/init.go +++ b/src/go/plugin/go.d/modules/megacli/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package megacli import ( diff --git a/src/go/plugin/go.d/modules/megacli/integrations/megacli_megaraid.md b/src/go/plugin/go.d/modules/megacli/integrations/megacli_megaraid.md index d1efa7df1..3f7508dd6 100644 --- a/src/go/plugin/go.d/modules/megacli/integrations/megacli_megaraid.md +++ b/src/go/plugin/go.d/modules/megacli/integrations/megacli_megaraid.md @@ -33,7 +33,10 @@ Executed commands: -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux +- BSD This collector only supports collecting metrics from a single instance of this integration. @@ -147,8 +150,8 @@ No action required. The configuration file name for this integration is `go.d/megacli.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/megacli/megacli.go b/src/go/plugin/go.d/modules/megacli/megacli.go index 41abd7a12..2a2d2de94 100644 --- a/src/go/plugin/go.d/modules/megacli/megacli.go +++ b/src/go/plugin/go.d/modules/megacli/megacli.go @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package megacli import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,7 +31,7 @@ func init() { func New() *MegaCli { return &MegaCli{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: &module.Charts{}, adapters: make(map[string]bool), @@ -38,8 +41,8 @@ func New() *MegaCli { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type ( @@ -68,8 +71,7 @@ func (m *MegaCli) Configuration() any { func (m *MegaCli) Init() error { lvmExec, err := m.initMegaCliExec() if err != nil { - m.Errorf("megacli exec initialization: %v", err) - return err + return fmt.Errorf("init megacli exec: %v", err) } m.exec = lvmExec @@ -79,7 +81,6 @@ func (m *MegaCli) Init() error { func (m *MegaCli) Check() error { mx, err := m.collect() if err != nil { - m.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/megacli/megacli_test.go b/src/go/plugin/go.d/modules/megacli/megacli_test.go index 4991a28ce..6b583d6e4 100644 --- a/src/go/plugin/go.d/modules/megacli/megacli_test.go +++ b/src/go/plugin/go.d/modules/megacli/megacli_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package megacli import ( diff --git a/src/go/plugin/go.d/modules/megacli/metadata.yaml b/src/go/plugin/go.d/modules/megacli/metadata.yaml index da5f4fefa..999654dcd 100644 --- a/src/go/plugin/go.d/modules/megacli/metadata.yaml +++ b/src/go/plugin/go.d/modules/megacli/metadata.yaml @@ -33,7 +33,7 @@ modules: - `megacli -AdpBbuCmd -aAll -NoLog` method_description: "" supported_platforms: - include: [] + include: [Linux, BSD] exclude: [] multi_instance: false additional_permissions: diff --git a/src/go/plugin/go.d/modules/memcached/client.go b/src/go/plugin/go.d/modules/memcached/client.go index 679e3eb0f..b6bab9bf8 100644 --- a/src/go/plugin/go.d/modules/memcached/client.go +++ b/src/go/plugin/go.d/modules/memcached/client.go @@ -11,10 +11,8 @@ import ( func newMemcachedConn(conf Config) memcachedConn { return &memcachedClient{conn: socket.New(socket.Config{ - Address: conf.Address, - ConnectTimeout: conf.Timeout.Duration(), - ReadTimeout: conf.Timeout.Duration(), - WriteTimeout: conf.Timeout.Duration(), + Address: conf.Address, + Timeout: conf.Timeout.Duration(), })} } diff --git a/src/go/plugin/go.d/modules/memcached/config_schema.json b/src/go/plugin/go.d/modules/memcached/config_schema.json index f92a8eee9..67ac2f84e 100644 --- a/src/go/plugin/go.d/modules/memcached/config_schema.json +++ b/src/go/plugin/go.d/modules/memcached/config_schema.json @@ -28,7 +28,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/memcached/integrations/memcached.md b/src/go/plugin/go.d/modules/memcached/integrations/memcached.md index 1e653902f..86fb38c6a 100644 --- a/src/go/plugin/go.d/modules/memcached/integrations/memcached.md +++ b/src/go/plugin/go.d/modules/memcached/integrations/memcached.md @@ -106,8 +106,8 @@ No action required. The configuration file name for this integration is `go.d/memcached.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/memcached/memcached.go b/src/go/plugin/go.d/modules/memcached/memcached.go index bd6039aee..a823a9d3b 100644 --- a/src/go/plugin/go.d/modules/memcached/memcached.go +++ b/src/go/plugin/go.d/modules/memcached/memcached.go @@ -8,7 +8,7 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -26,7 +26,7 @@ func New() *Memcached { return &Memcached{ Config: Config{ Address: "127.0.0.1:11211", - Timeout: web.Duration(time.Second * 1), + Timeout: confopt.Duration(time.Second * 1), }, newMemcachedConn: newMemcachedConn, charts: charts.Copy(), @@ -34,9 +34,9 @@ func New() *Memcached { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout" json:"timeout"` } type ( @@ -62,8 +62,7 @@ func (m *Memcached) Configuration() any { func (m *Memcached) Init() error { if m.Address == "" { - m.Error("config: 'address' not set") - return errors.New("address not set") + return errors.New("config: 'address' not set") } return nil @@ -72,7 +71,6 @@ func (m *Memcached) Init() error { func (m *Memcached) Check() error { mx, err := m.collect() if err != nil { - m.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/mongodb/config_schema.json b/src/go/plugin/go.d/modules/mongodb/config_schema.json index fc5c42eff..69586f1c5 100644 --- a/src/go/plugin/go.d/modules/mongodb/config_schema.json +++ b/src/go/plugin/go.d/modules/mongodb/config_schema.json @@ -64,7 +64,6 @@ "required": [ "uri" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/mongodb/documents.go b/src/go/plugin/go.d/modules/mongodb/documents.go index 5c95e952e..b95d2018d 100644 --- a/src/go/plugin/go.d/modules/mongodb/documents.go +++ b/src/go/plugin/go.d/modules/mongodb/documents.go @@ -20,7 +20,7 @@ type documentServerStatus struct { Tcmalloc *documentTCMallocStatus `bson:"tcmalloc" stm:"tcmalloc"` Locks *documentLocks `bson:"locks" stm:"locks"` WiredTiger *documentWiredTiger `bson:"wiredTiger" stm:"wiredtiger"` - Repl interface{} `bson:"repl"` + Repl any `bson:"repl"` } type ( diff --git a/src/go/plugin/go.d/modules/mongodb/integrations/mongodb.md b/src/go/plugin/go.d/modules/mongodb/integrations/mongodb.md index e47c3865d..0f22134bc 100644 --- a/src/go/plugin/go.d/modules/mongodb/integrations/mongodb.md +++ b/src/go/plugin/go.d/modules/mongodb/integrations/mongodb.md @@ -248,8 +248,8 @@ Create a read-only user for Netdata in the admin database. The configuration file name for this integration is `go.d/mongodb.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/mongodb/metadata.yaml b/src/go/plugin/go.d/modules/mongodb/metadata.yaml index ae013539f..6ab6f5bb1 100644 --- a/src/go/plugin/go.d/modules/mongodb/metadata.yaml +++ b/src/go/plugin/go.d/modules/mongodb/metadata.yaml @@ -104,7 +104,7 @@ modules: Metrics of databases matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) - - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format). + - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format). - Syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/mongodb/mongodb.go b/src/go/plugin/go.d/modules/mongodb/mongodb.go index 7b8550251..d8b962bff 100644 --- a/src/go/plugin/go.d/modules/mongodb/mongodb.go +++ b/src/go/plugin/go.d/modules/mongodb/mongodb.go @@ -5,12 +5,13 @@ package mongo import ( _ "embed" "errors" + "fmt" "sync" "time" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,7 +29,7 @@ func New() *Mongo { return &Mongo{ Config: Config{ URI: "mongodb://localhost:27017", - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), Databases: matcher.SimpleExpr{ Includes: []string{}, Excludes: []string{}, @@ -50,7 +51,7 @@ func New() *Mongo { type Config struct { UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` URI string `yaml:"uri" json:"uri"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` Databases matcher.SimpleExpr `yaml:"databases,omitempty" json:"databases"` } @@ -76,13 +77,11 @@ func (m *Mongo) Configuration() any { func (m *Mongo) Init() error { if err := m.verifyConfig(); err != nil { - m.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } if err := m.initDatabaseSelector(); err != nil { - m.Errorf("init database selector: %v", err) - return err + return fmt.Errorf("init database selector: %v", err) } return nil @@ -91,7 +90,6 @@ func (m *Mongo) Init() error { func (m *Mongo) Check() error { mx, err := m.collect() if err != nil { - m.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/mongodb/mongodb_test.go b/src/go/plugin/go.d/modules/mongodb/mongodb_test.go index 835ea20e2..fdbf8816c 100644 --- a/src/go/plugin/go.d/modules/mongodb/mongodb_test.go +++ b/src/go/plugin/go.d/modules/mongodb/mongodb_test.go @@ -9,8 +9,8 @@ import ( "testing" "time" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/src/go/plugin/go.d/modules/monit/collect.go b/src/go/plugin/go.d/modules/monit/collect.go index 580aa6d99..642363d31 100644 --- a/src/go/plugin/go.d/modules/monit/collect.go +++ b/src/go/plugin/go.d/modules/monit/collect.go @@ -6,8 +6,6 @@ import ( "encoding/xml" "errors" "fmt" - "io" - "net/http" "net/url" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" @@ -74,44 +72,18 @@ func (m *Monit) collectStatus(mx map[string]int64) error { } func (m *Monit) fetchStatus() (*monitStatus, error) { - req, err := web.NewHTTPRequestWithPath(m.Request, urlPathStatus) + req, err := web.NewHTTPRequestWithPath(m.RequestConfig, urlPathStatus) if err != nil { return nil, err } req.URL.RawQuery = urlQueryStatus var status monitStatus - if err := m.doOKDecode(req, &status); err != nil { + if err := web.DoHTTP(m.httpClient).RequestXML(req, &status, func(d *xml.Decoder) { + d.CharsetReader = charset.NewReaderLabel + }); err != nil { return nil, err } return &status, nil } - -func (m *Monit) doOKDecode(req *http.Request, in interface{}) error { - resp, err := m.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - dec := xml.NewDecoder(resp.Body) - dec.CharsetReader = charset.NewReaderLabel - - if err := dec.Decode(in); err != nil { - return fmt.Errorf("error on decoding XML response from '%s': %v", req.URL, err) - } - - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/monit/config_schema.json b/src/go/plugin/go.d/modules/monit/config_schema.json index 4d23760b3..ec8c2556c 100644 --- a/src/go/plugin/go.d/modules/monit/config_schema.json +++ b/src/go/plugin/go.d/modules/monit/config_schema.json @@ -107,7 +107,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/monit/integrations/monit.md b/src/go/plugin/go.d/modules/monit/integrations/monit.md index 8d3739ac4..e04a75368 100644 --- a/src/go/plugin/go.d/modules/monit/integrations/monit.md +++ b/src/go/plugin/go.d/modules/monit/integrations/monit.md @@ -100,8 +100,8 @@ See [Syntax for TCP port](https://mmonit.com/monit/documentation/monit.html#TCP- The configuration file name for this integration is `go.d/monit.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/monit/monit.go b/src/go/plugin/go.d/modules/monit/monit.go index d0fe90b14..4cb596882 100644 --- a/src/go/plugin/go.d/modules/monit/monit.go +++ b/src/go/plugin/go.d/modules/monit/monit.go @@ -5,10 +5,12 @@ package monit import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,14 +28,14 @@ func init() { func New() *Monit { return &Monit{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:2812", Username: "admin", Password: "monit", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -43,8 +45,8 @@ func New() *Monit { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Monit struct { @@ -64,14 +66,12 @@ func (m *Monit) Configuration() any { func (m *Monit) Init() error { if m.URL == "" { - m.Error("config: monit url is required but not set") - return errors.New("config: missing URL") + return fmt.Errorf("config: monit url is required but not set") } - httpClient, err := web.NewHTTPClient(m.Client) + httpClient, err := web.NewHTTPClient(m.ClientConfig) if err != nil { - m.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } m.httpClient = httpClient @@ -84,7 +84,6 @@ func (m *Monit) Init() error { func (m *Monit) Check() error { mx, err := m.collect() if err != nil { - m.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/monit/monit_test.go b/src/go/plugin/go.d/modules/monit/monit_test.go index 7735dcdc2..5dbe73e8a 100644 --- a/src/go/plugin/go.d/modules/monit/monit_test.go +++ b/src/go/plugin/go.d/modules/monit/monit_test.go @@ -49,8 +49,8 @@ func TestMonit_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, diff --git a/src/go/plugin/go.d/modules/mysql/config_schema.json b/src/go/plugin/go.d/modules/mysql/config_schema.json index 20bb265c0..d17f808a3 100644 --- a/src/go/plugin/go.d/modules/mysql/config_schema.json +++ b/src/go/plugin/go.d/modules/mysql/config_schema.json @@ -33,7 +33,6 @@ "required": [ "dsn" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/mysql/integrations/mariadb.md b/src/go/plugin/go.d/modules/mysql/integrations/mariadb.md index b10e84b2a..d08014aee 100644 --- a/src/go/plugin/go.d/modules/mysql/integrations/mariadb.md +++ b/src/go/plugin/go.d/modules/mysql/integrations/mariadb.md @@ -233,8 +233,8 @@ be able to gather statistics without being able to alter or affect operations in The configuration file name for this integration is `go.d/mysql.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/mysql/integrations/mysql.md b/src/go/plugin/go.d/modules/mysql/integrations/mysql.md index f4f8a423a..6c10ada56 100644 --- a/src/go/plugin/go.d/modules/mysql/integrations/mysql.md +++ b/src/go/plugin/go.d/modules/mysql/integrations/mysql.md @@ -233,8 +233,8 @@ be able to gather statistics without being able to alter or affect operations in The configuration file name for this integration is `go.d/mysql.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/mysql/integrations/percona_mysql.md b/src/go/plugin/go.d/modules/mysql/integrations/percona_mysql.md index 2c967e229..712c8d0b4 100644 --- a/src/go/plugin/go.d/modules/mysql/integrations/percona_mysql.md +++ b/src/go/plugin/go.d/modules/mysql/integrations/percona_mysql.md @@ -233,8 +233,8 @@ be able to gather statistics without being able to alter or affect operations in The configuration file name for this integration is `go.d/mysql.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/mysql/mysql.go b/src/go/plugin/go.d/modules/mysql/mysql.go index 1e11de39e..dd7d16e2e 100644 --- a/src/go/plugin/go.d/modules/mysql/mysql.go +++ b/src/go/plugin/go.d/modules/mysql/mysql.go @@ -6,16 +6,17 @@ import ( "database/sql" _ "embed" "errors" + "fmt" "strings" "sync" "time" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" - "github.com/blang/semver/v4" "github.com/go-sql-driver/mysql" _ "github.com/go-sql-driver/mysql" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -33,7 +34,7 @@ func New() *MySQL { return &MySQL{ Config: Config{ DSN: "root@tcp(localhost:3306)/", - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, charts: baseCharts.Copy(), @@ -55,10 +56,10 @@ func New() *MySQL { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - DSN string `yaml:"dsn" json:"dsn"` - MyCNF string `yaml:"my.cnf,omitempty" json:"my.cnf"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + DSN string `yaml:"dsn" json:"dsn"` + MyCNF string `yaml:"my.cnf,omitempty" json:"my.cnf"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type MySQL struct { @@ -105,24 +106,21 @@ func (m *MySQL) Init() error { if m.MyCNF != "" { dsn, err := dsnFromFile(m.MyCNF) if err != nil { - m.Error(err) return err } m.DSN = dsn } if m.DSN == "" { - m.Error("dsn not set") - return errors.New("dsn not set") + return errors.New("config: dsn not set") } cfg, err := mysql.ParseDSN(m.DSN) if err != nil { - m.Errorf("error on parsing DSN: %v", err) - return err + return fmt.Errorf("error on parsing DSN: %v", err) } - cfg.Passwd = strings.Repeat("*", len(cfg.Passwd)) + cfg.Passwd = strings.Repeat("x", len(cfg.Passwd)) m.safeDSN = cfg.FormatDSN() m.Debugf("using DSN [%s]", m.DSN) @@ -133,7 +131,6 @@ func (m *MySQL) Init() error { func (m *MySQL) Check() error { mx, err := m.collect() if err != nil { - m.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/mysql/mysql_test.go b/src/go/plugin/go.d/modules/mysql/mysql_test.go index 300f8dabe..33c6239a9 100644 --- a/src/go/plugin/go.d/modules/mysql/mysql_test.go +++ b/src/go/plugin/go.d/modules/mysql/mysql_test.go @@ -1666,23 +1666,17 @@ func TestMySQL_Collect(t *testing.T) { } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, mySQL *MySQL, collected map[string]int64) { - for _, chart := range *mySQL.Charts() { +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, mySQL *MySQL, mx map[string]int64) { + module.TestMetricsHasAllChartsDimsSkip(t, mySQL.Charts(), mx, func(chart *module.Chart, _ *module.Dim) bool { if mySQL.isMariaDB { // https://mariadb.com/kb/en/server-status-variables/#connection_errors_accept if mySQL.version.LT(semver.Version{Major: 10, Minor: 0, Patch: 4}) && chart.ID == "connection_errors" { - continue + return true } } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } + return false + + }) } func copyProcessListQueryDuration(dst, src map[string]int64) { diff --git a/src/go/plugin/go.d/modules/nginx/apiclient.go b/src/go/plugin/go.d/modules/nginx/apiclient.go deleted file mode 100644 index 53d9f2245..000000000 --- a/src/go/plugin/go.d/modules/nginx/apiclient.go +++ /dev/null @@ -1,168 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package nginx - -import ( - "bufio" - "fmt" - "io" - "net/http" - "regexp" - "strconv" - "strings" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" -) - -const ( - connActive = "connActive" - connAccepts = "connAccepts" - connHandled = "connHandled" - requests = "requests" - requestTime = "requestTime" - connReading = "connReading" - connWriting = "connWriting" - connWaiting = "connWaiting" -) - -var ( - nginxSeq = []string{ - connActive, - connAccepts, - connHandled, - requests, - connReading, - connWriting, - connWaiting, - } - tengineSeq = []string{ - connActive, - connAccepts, - connHandled, - requests, - requestTime, - connReading, - connWriting, - connWaiting, - } - - reStatus = regexp.MustCompile(`^Active connections: ([0-9]+)\n[^\d]+([0-9]+) ([0-9]+) ([0-9]+) ?([0-9]+)?\nReading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)`) -) - -func newAPIClient(client *http.Client, request web.Request) *apiClient { - return &apiClient{httpClient: client, request: request} -} - -type apiClient struct { - httpClient *http.Client - request web.Request -} - -func (a apiClient) getStubStatus() (*stubStatus, error) { - req, err := web.NewHTTPRequest(a.request) - if err != nil { - return nil, fmt.Errorf("error on creating request : %v", err) - } - - resp, err := a.doRequestOK(req) - defer closeBody(resp) - if err != nil { - return nil, err - } - - status, err := parseStubStatus(resp.Body) - if err != nil { - return nil, fmt.Errorf("error on parsing response : %v", err) - } - - return status, nil -} - -func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { - resp, err := a.httpClient.Do(req) - if err != nil { - return resp, fmt.Errorf("error on request : %v", err) - } - - if resp.StatusCode != http.StatusOK { - return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) - } - - return resp, err -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} - -func parseStubStatus(r io.Reader) (*stubStatus, error) { - sc := bufio.NewScanner(r) - var lines []string - - for sc.Scan() { - lines = append(lines, strings.Trim(sc.Text(), "\r\n ")) - } - - parsed := reStatus.FindStringSubmatch(strings.Join(lines, "\n")) - - if len(parsed) == 0 { - return nil, fmt.Errorf("can't parse '%v'", lines) - } - - parsed = parsed[1:] - - var ( - seq []string - status stubStatus - ) - - switch len(parsed) { - default: - return nil, fmt.Errorf("invalid number of fields, got %d, expect %d or %d", len(parsed), len(nginxSeq), len(tengineSeq)) - case len(nginxSeq): - seq = nginxSeq - case len(tengineSeq): - seq = tengineSeq - } - - for i, key := range seq { - strValue := parsed[i] - if strValue == "" { - continue - } - value := mustParseInt(strValue) - switch key { - default: - return nil, fmt.Errorf("unknown key in seq : %s", key) - case connActive: - status.Connections.Active = value - case connAccepts: - status.Connections.Accepts = value - case connHandled: - status.Connections.Handled = value - case requests: - status.Requests.Total = value - case connReading: - status.Connections.Reading = value - case connWriting: - status.Connections.Writing = value - case connWaiting: - status.Connections.Waiting = value - case requestTime: - status.Requests.Time = &value - } - } - - return &status, nil -} - -func mustParseInt(value string) int64 { - v, err := strconv.ParseInt(value, 10, 64) - if err != nil { - panic(err) - } - return v -} diff --git a/src/go/plugin/go.d/modules/nginx/collect.go b/src/go/plugin/go.d/modules/nginx/collect.go index 459570ae5..0eb98cc7c 100644 --- a/src/go/plugin/go.d/modules/nginx/collect.go +++ b/src/go/plugin/go.d/modules/nginx/collect.go @@ -3,13 +3,28 @@ package nginx import ( + "fmt" + "io" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) func (n *Nginx) collect() (map[string]int64, error) { - status, err := n.apiClient.getStubStatus() - + req, err := web.NewHTTPRequest(n.RequestConfig) if err != nil { + return nil, fmt.Errorf("failed to create HTTP request to '%s': %w'", n.URL, err) + } + + var status *stubStatus + var perr error + + if err := web.DoHTTP(n.httpClient).Request(req, func(body io.Reader) error { + if status, perr = parseStubStatus(body); perr != nil { + return perr + } + return nil + }); err != nil { return nil, err } diff --git a/src/go/plugin/go.d/modules/nginx/config_schema.json b/src/go/plugin/go.d/modules/nginx/config_schema.json index 25fead781..df6f47ff9 100644 --- a/src/go/plugin/go.d/modules/nginx/config_schema.json +++ b/src/go/plugin/go.d/modules/nginx/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/nginx/integrations/nginx.md b/src/go/plugin/go.d/modules/nginx/integrations/nginx.md index 6d8338a10..a13494c81 100644 --- a/src/go/plugin/go.d/modules/nginx/integrations/nginx.md +++ b/src/go/plugin/go.d/modules/nginx/integrations/nginx.md @@ -102,8 +102,8 @@ Configure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_ The configuration file name for this integration is `go.d/nginx.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/nginx/metrics.go b/src/go/plugin/go.d/modules/nginx/metrics.go deleted file mode 100644 index 66e6a160e..000000000 --- a/src/go/plugin/go.d/modules/nginx/metrics.go +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package nginx - -type stubStatus struct { - Connections struct { - // The current number of active client connections including Waiting connections. - Active int64 `stm:"active"` - - // The total number of accepted client connections. - Accepts int64 `stm:"accepts"` - - // The total number of handled connections. - // Generally, the parameter value is the same as accepts unless some resource limits have been reached. - Handled int64 `stm:"handled"` - - // The current number of connections where nginx is reading the request header. - Reading int64 `stm:"reading"` - - // The current number of connections where nginx is writing the response back to the client. - Writing int64 `stm:"writing"` - - // The current number of idle client connections waiting for a request. - Waiting int64 `stm:"waiting"` - } `stm:""` - Requests struct { - // The total number of client requests. - Total int64 `stm:"requests"` - - // Note: tengine specific - // The total requests' response time, which is in millisecond - Time *int64 `stm:"request_time"` - } `stm:""` -} diff --git a/src/go/plugin/go.d/modules/nginx/nginx.go b/src/go/plugin/go.d/modules/nginx/nginx.go index 4a8e77439..178a4a137 100644 --- a/src/go/plugin/go.d/modules/nginx/nginx.go +++ b/src/go/plugin/go.d/modules/nginx/nginx.go @@ -5,9 +5,12 @@ package nginx import ( _ "embed" "errors" + "fmt" + "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -25,27 +28,31 @@ func init() { func New() *Nginx { return &Nginx{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1/stub_status", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 1), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 1), }, }, - }} + }, + charts: charts.Copy(), + } } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Nginx struct { module.Base Config `yaml:",inline" json:""` - apiClient *apiClient + charts *module.Charts + + httpClient *http.Client } func (n *Nginx) Configuration() any { @@ -54,17 +61,14 @@ func (n *Nginx) Configuration() any { func (n *Nginx) Init() error { if n.URL == "" { - n.Error("URL not set") - return errors.New("url not set") + return errors.New("nginx URL required but not set") } - client, err := web.NewHTTPClient(n.Client) + httpClient, err := web.NewHTTPClient(n.ClientConfig) if err != nil { - n.Error(err) - return err + return fmt.Errorf("failed initializing http client: %w", err) } - - n.apiClient = newAPIClient(client, n.Request) + n.httpClient = httpClient n.Debugf("using URL %s", n.URL) n.Debugf("using timeout: %s", n.Timeout) @@ -75,18 +79,18 @@ func (n *Nginx) Init() error { func (n *Nginx) Check() error { mx, err := n.collect() if err != nil { - n.Error(err) return err } + if len(mx) == 0 { return errors.New("no metrics collected") - } + return nil } func (n *Nginx) Charts() *Charts { - return charts.Copy() + return n.charts } func (n *Nginx) Collect() map[string]int64 { @@ -100,7 +104,7 @@ func (n *Nginx) Collect() map[string]int64 { } func (n *Nginx) Cleanup() { - if n.apiClient != nil && n.apiClient.httpClient != nil { - n.apiClient.httpClient.CloseIdleConnections() + if n.httpClient != nil { + n.httpClient.CloseIdleConnections() } } diff --git a/src/go/plugin/go.d/modules/nginx/nginx_test.go b/src/go/plugin/go.d/modules/nginx/nginx_test.go index 255ea384c..6cdf65985 100644 --- a/src/go/plugin/go.d/modules/nginx/nginx_test.go +++ b/src/go/plugin/go.d/modules/nginx/nginx_test.go @@ -45,7 +45,6 @@ func TestNginx_Init(t *testing.T) { job := New() require.NoError(t, job.Init()) - assert.NotNil(t, job.apiClient) } func TestNginx_Check(t *testing.T) { diff --git a/src/go/plugin/go.d/modules/nginx/status.go b/src/go/plugin/go.d/modules/nginx/status.go new file mode 100644 index 000000000..e2d0cdc76 --- /dev/null +++ b/src/go/plugin/go.d/modules/nginx/status.go @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginx + +import ( + "bufio" + "fmt" + "io" + "regexp" + "strconv" + "strings" +) + +const ( + connActive = "connActive" + connAccepts = "connAccepts" + connHandled = "connHandled" + requests = "requests" + requestTime = "requestTime" + connReading = "connReading" + connWriting = "connWriting" + connWaiting = "connWaiting" +) + +var ( + nginxSeq = []string{ + connActive, + connAccepts, + connHandled, + requests, + connReading, + connWriting, + connWaiting, + } + tengineSeq = []string{ + connActive, + connAccepts, + connHandled, + requests, + requestTime, + connReading, + connWriting, + connWaiting, + } + + reStatus = regexp.MustCompile(`^Active connections: ([0-9]+)\n[^\d]+([0-9]+) ([0-9]+) ([0-9]+) ?([0-9]+)?\nReading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)`) +) + +type stubStatus struct { + Connections struct { + // The current number of active client connections including Waiting connections. + Active int64 `stm:"active"` + + // The total number of accepted client connections. + Accepts int64 `stm:"accepts"` + + // The total number of handled connections. + // Generally, the parameter value is the same as accepts unless some resource limits have been reached. + Handled int64 `stm:"handled"` + + // The current number of connections where nginx is reading the request header. + Reading int64 `stm:"reading"` + + // The current number of connections where nginx is writing the response back to the client. + Writing int64 `stm:"writing"` + + // The current number of idle client connections waiting for a request. + Waiting int64 `stm:"waiting"` + } `stm:""` + Requests struct { + // The total number of client requests. + Total int64 `stm:"requests"` + + // Note: tengine specific + // The total requests' response time, which is in millisecond + Time *int64 `stm:"request_time"` + } `stm:""` +} + +func parseStubStatus(r io.Reader) (*stubStatus, error) { + sc := bufio.NewScanner(r) + var lines []string + + for sc.Scan() { + lines = append(lines, strings.Trim(sc.Text(), "\r\n ")) + } + + parsed := reStatus.FindStringSubmatch(strings.Join(lines, "\n")) + + if len(parsed) == 0 { + return nil, fmt.Errorf("can't parse '%v'", lines) + } + + parsed = parsed[1:] + + var ( + seq []string + status stubStatus + ) + + switch len(parsed) { + default: + return nil, fmt.Errorf("invalid number of fields, got %d, expect %d or %d", len(parsed), len(nginxSeq), len(tengineSeq)) + case len(nginxSeq): + seq = nginxSeq + case len(tengineSeq): + seq = tengineSeq + } + + for i, key := range seq { + strValue := parsed[i] + if strValue == "" { + continue + } + value := mustParseInt(strValue) + switch key { + default: + return nil, fmt.Errorf("unknown key in seq : %s", key) + case connActive: + status.Connections.Active = value + case connAccepts: + status.Connections.Accepts = value + case connHandled: + status.Connections.Handled = value + case requests: + status.Requests.Total = value + case connReading: + status.Connections.Reading = value + case connWriting: + status.Connections.Writing = value + case connWaiting: + status.Connections.Waiting = value + case requestTime: + status.Requests.Time = &value + } + } + + return &status, nil +} + +func mustParseInt(value string) int64 { + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + panic(err) + } + return v +} diff --git a/src/go/plugin/go.d/modules/nginxplus/config_schema.json b/src/go/plugin/go.d/modules/nginxplus/config_schema.json index fd4c38ef1..ac07d53d8 100644 --- a/src/go/plugin/go.d/modules/nginxplus/config_schema.json +++ b/src/go/plugin/go.d/modules/nginxplus/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/nginxplus/integrations/nginx_plus.md b/src/go/plugin/go.d/modules/nginxplus/integrations/nginx_plus.md index 9ebb4b195..0e99deafb 100644 --- a/src/go/plugin/go.d/modules/nginxplus/integrations/nginx_plus.md +++ b/src/go/plugin/go.d/modules/nginxplus/integrations/nginx_plus.md @@ -279,8 +279,8 @@ To configure API, see the [official documentation](https://docs.nginx.com/nginx/ The configuration file name for this integration is `go.d/nginxplus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/nginxplus/nginx_http_api_query.go b/src/go/plugin/go.d/modules/nginxplus/nginx_http_api_query.go index b54cd142a..fd2269c4d 100644 --- a/src/go/plugin/go.d/modules/nginxplus/nginx_http_api_query.go +++ b/src/go/plugin/go.d/modules/nginxplus/nginx_http_api_query.go @@ -3,10 +3,8 @@ package nginxplus import ( - "encoding/json" "errors" "fmt" - "io" "net/http" "sync" @@ -46,10 +44,10 @@ type nginxMetrics struct { } func (n *NginxPlus) queryAPIVersion() (int64, error) { - req, _ := web.NewHTTPRequestWithPath(n.Request, urlPathAPIVersions) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, urlPathAPIVersions) var versions nginxAPIVersions - if err := n.doWithDecode(&versions, req); err != nil { + if err := n.doHTTP(req, &versions); err != nil { return 0, err } @@ -61,10 +59,10 @@ func (n *NginxPlus) queryAPIVersion() (int64, error) { } func (n *NginxPlus) queryAvailableEndpoints() error { - req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIEndpointsRoot, n.apiVersion)) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPIEndpointsRoot, n.apiVersion)) var endpoints []string - if err := n.doWithDecode(&endpoints, req); err != nil { + if err := n.doHTTP(req, &endpoints); err != nil { return err } @@ -89,9 +87,9 @@ func (n *NginxPlus) queryAvailableEndpoints() error { if hasHTTP { endpoints = endpoints[:0] - req, _ = web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIEndpointsHTTP, n.apiVersion)) + req, _ = web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPIEndpointsHTTP, n.apiVersion)) - if err := n.doWithDecode(&endpoints, req); err != nil { + if err := n.doHTTP(req, &endpoints); err != nil { return err } @@ -114,9 +112,9 @@ func (n *NginxPlus) queryAvailableEndpoints() error { if hasStream { endpoints = endpoints[:0] - req, _ = web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIEndpointsStream, n.apiVersion)) + req, _ = web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPIEndpointsStream, n.apiVersion)) - if err := n.doWithDecode(&endpoints, req); err != nil { + if err := n.doHTTP(req, &endpoints); err != nil { return err } @@ -167,11 +165,11 @@ func (n *NginxPlus) queryMetrics() *nginxMetrics { } func (n *NginxPlus) queryNginxInfo(ms *nginxMetrics) { - req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPINginx, n.apiVersion)) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPINginx, n.apiVersion)) var v nginxInfo - if err := n.doWithDecode(&v, req); err != nil { + if err := n.doHTTP(req, &v); err != nil { n.endpoints.nginx = !errors.Is(err, errPathNotFound) n.Warning(err) return @@ -181,11 +179,11 @@ func (n *NginxPlus) queryNginxInfo(ms *nginxMetrics) { } func (n *NginxPlus) queryConnections(ms *nginxMetrics) { - req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIConnections, n.apiVersion)) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPIConnections, n.apiVersion)) var v nginxConnections - if err := n.doWithDecode(&v, req); err != nil { + if err := n.doHTTP(req, &v); err != nil { n.endpoints.connections = !errors.Is(err, errPathNotFound) n.Warning(err) return @@ -195,11 +193,11 @@ func (n *NginxPlus) queryConnections(ms *nginxMetrics) { } func (n *NginxPlus) querySSL(ms *nginxMetrics) { - req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPISSL, n.apiVersion)) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPISSL, n.apiVersion)) var v nginxSSL - if err := n.doWithDecode(&v, req); err != nil { + if err := n.doHTTP(req, &v); err != nil { n.endpoints.ssl = !errors.Is(err, errPathNotFound) n.Warning(err) return @@ -209,11 +207,11 @@ func (n *NginxPlus) querySSL(ms *nginxMetrics) { } func (n *NginxPlus) queryHTTPRequests(ms *nginxMetrics) { - req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIHTTPRequests, n.apiVersion)) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPIHTTPRequests, n.apiVersion)) var v nginxHTTPRequests - if err := n.doWithDecode(&v, req); err != nil { + if err := n.doHTTP(req, &v); err != nil { n.endpoints.httpRequest = !errors.Is(err, errPathNotFound) n.Warning(err) return @@ -223,11 +221,11 @@ func (n *NginxPlus) queryHTTPRequests(ms *nginxMetrics) { } func (n *NginxPlus) queryHTTPServerZones(ms *nginxMetrics) { - req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIHTTPServerZones, n.apiVersion)) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPIHTTPServerZones, n.apiVersion)) var v nginxHTTPServerZones - if err := n.doWithDecode(&v, req); err != nil { + if err := n.doHTTP(req, &v); err != nil { n.endpoints.httpServerZones = !errors.Is(err, errPathNotFound) n.Warning(err) return @@ -237,11 +235,11 @@ func (n *NginxPlus) queryHTTPServerZones(ms *nginxMetrics) { } func (n *NginxPlus) queryHTTPLocationZones(ms *nginxMetrics) { - req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIHTTPLocationZones, n.apiVersion)) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPIHTTPLocationZones, n.apiVersion)) var v nginxHTTPLocationZones - if err := n.doWithDecode(&v, req); err != nil { + if err := n.doHTTP(req, &v); err != nil { n.endpoints.httpLocationZones = !errors.Is(err, errPathNotFound) n.Warning(err) return @@ -251,11 +249,11 @@ func (n *NginxPlus) queryHTTPLocationZones(ms *nginxMetrics) { } func (n *NginxPlus) queryHTTPUpstreams(ms *nginxMetrics) { - req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIHTTPUpstreams, n.apiVersion)) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPIHTTPUpstreams, n.apiVersion)) var v nginxHTTPUpstreams - if err := n.doWithDecode(&v, req); err != nil { + if err := n.doHTTP(req, &v); err != nil { n.endpoints.httpUpstreams = !errors.Is(err, errPathNotFound) n.Warning(err) return @@ -265,11 +263,11 @@ func (n *NginxPlus) queryHTTPUpstreams(ms *nginxMetrics) { } func (n *NginxPlus) queryHTTPCaches(ms *nginxMetrics) { - req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIHTTPCaches, n.apiVersion)) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPIHTTPCaches, n.apiVersion)) var v nginxHTTPCaches - if err := n.doWithDecode(&v, req); err != nil { + if err := n.doHTTP(req, &v); err != nil { n.endpoints.httpCaches = !errors.Is(err, errPathNotFound) n.Warning(err) return @@ -279,11 +277,11 @@ func (n *NginxPlus) queryHTTPCaches(ms *nginxMetrics) { } func (n *NginxPlus) queryStreamServerZones(ms *nginxMetrics) { - req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIStreamServerZones, n.apiVersion)) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPIStreamServerZones, n.apiVersion)) var v nginxStreamServerZones - if err := n.doWithDecode(&v, req); err != nil { + if err := n.doHTTP(req, &v); err != nil { n.endpoints.streamServerZones = !errors.Is(err, errPathNotFound) n.Warning(err) return @@ -293,11 +291,11 @@ func (n *NginxPlus) queryStreamServerZones(ms *nginxMetrics) { } func (n *NginxPlus) queryStreamUpstreams(ms *nginxMetrics) { - req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIStreamUpstreams, n.apiVersion)) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPIStreamUpstreams, n.apiVersion)) var v nginxStreamUpstreams - if err := n.doWithDecode(&v, req); err != nil { + if err := n.doHTTP(req, &v); err != nil { n.endpoints.streamUpstreams = !errors.Is(err, errPathNotFound) n.Warning(err) return @@ -307,11 +305,11 @@ func (n *NginxPlus) queryStreamUpstreams(ms *nginxMetrics) { } func (n *NginxPlus) queryResolvers(ms *nginxMetrics) { - req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIResolvers, n.apiVersion)) + req, _ := web.NewHTTPRequestWithPath(n.RequestConfig, fmt.Sprintf(urlPathAPIResolvers, n.apiVersion)) var v nginxResolvers - if err := n.doWithDecode(&v, req); err != nil { + if err := n.doHTTP(req, &v); err != nil { n.endpoints.resolvers = !errors.Is(err, errPathNotFound) n.Warning(err) return @@ -324,38 +322,17 @@ var ( errPathNotFound = errors.New("path not found") ) -func (n *NginxPlus) doWithDecode(dst interface{}, req *http.Request) error { +func (n *NginxPlus) doHTTP(req *http.Request, dst any) error { n.Debugf("executing %s '%s'", req.Method, req.URL) - resp, err := n.httpClient.Do(req) - if err != nil { - return err - } - defer closeBody(resp) - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("%s returned %d status code (%w)", req.URL, resp.StatusCode, errPathNotFound) - } - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned %d status code (%s)", req.URL, resp.StatusCode, resp.Status) - } - - content, err := io.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("error on reading response from %s : %v", req.URL, err) - } - if err := json.Unmarshal(content, dst); err != nil { - return fmt.Errorf("error on parsing response from %s : %v", req.URL, err) - } - - return nil -} + cl := web.DoHTTP(n.httpClient).OnNokCode(func(resp *http.Response) (bool, error) { + if resp.StatusCode == http.StatusNotFound { + return false, errPathNotFound + } + return false, nil + }) -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } + return cl.RequestJSON(req, dst) } func (n *nginxMetrics) empty() bool { diff --git a/src/go/plugin/go.d/modules/nginxplus/nginxplus.go b/src/go/plugin/go.d/modules/nginxplus/nginxplus.go index f737e6819..dbdf45901 100644 --- a/src/go/plugin/go.d/modules/nginxplus/nginxplus.go +++ b/src/go/plugin/go.d/modules/nginxplus/nginxplus.go @@ -5,10 +5,12 @@ package nginxplus import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *NginxPlus { return &NginxPlus{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 1), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 1), }, }, }, @@ -42,8 +44,8 @@ func New() *NginxPlus { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type NginxPlus struct { @@ -79,14 +81,12 @@ func (n *NginxPlus) Configuration() any { func (n *NginxPlus) Init() error { if n.URL == "" { - n.Error("config validation: 'url' can not be empty'") - return errors.New("url not set") + return errors.New("config: 'url' can not be empty'") } - client, err := web.NewHTTPClient(n.Client) + client, err := web.NewHTTPClient(n.ClientConfig) if err != nil { - n.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } n.httpClient = client @@ -96,7 +96,6 @@ func (n *NginxPlus) Init() error { func (n *NginxPlus) Check() error { mx, err := n.collect() if err != nil { - n.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/nginxplus/nginxplus_test.go b/src/go/plugin/go.d/modules/nginxplus/nginxplus_test.go index 2628cc688..9b2e57391 100644 --- a/src/go/plugin/go.d/modules/nginxplus/nginxplus_test.go +++ b/src/go/plugin/go.d/modules/nginxplus/nginxplus_test.go @@ -77,8 +77,8 @@ func TestNginxPlus_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, @@ -463,7 +463,9 @@ func TestNginxPlus_Collect(t *testing.T) { require.Equal(t, test.wantMetrics, mx) if len(test.wantMetrics) > 0 { assert.Equalf(t, test.wantNumOfCharts, len(*nginx.Charts()), "number of charts") - ensureCollectedHasAllChartsDimsVarsIDs(t, nginx, mx) + module.TestMetricsHasAllChartsDimsSkip(t, nginx.Charts(), mx, func(chart *module.Chart, _ *module.Dim) bool { + return chart.ID == uptimeChart.ID + }) } }) } @@ -578,19 +580,3 @@ func caseConnectionRefused(t *testing.T) (*NginxPlus, func()) { return nginx, func() {} } - -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, n *NginxPlus, mx map[string]int64) { - for _, chart := range *n.Charts() { - if chart.ID == uptimeChart.ID { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} diff --git a/src/go/plugin/go.d/modules/nginxunit/README.md b/src/go/plugin/go.d/modules/nginxunit/README.md new file mode 120000 index 000000000..023710453 --- /dev/null +++ b/src/go/plugin/go.d/modules/nginxunit/README.md @@ -0,0 +1 @@ +integrations/nginx_unit.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/nginxunit/charts.go b/src/go/plugin/go.d/modules/nginxunit/charts.go new file mode 100644 index 000000000..aec012cfe --- /dev/null +++ b/src/go/plugin/go.d/modules/nginxunit/charts.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxunit + +import ( + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +const ( + prioRequestsRate = module.Priority + iota + prioConnectionsRate + prioConnectionsCurrent +) + +var charts = module.Charts{ + requestsRateChart.Copy(), + connectionsRateChart.Copy(), + connectionsCurrentChart.Copy(), +} + +var requestsRateChart = module.Chart{ + ID: "requests", + Title: "Requests", + Units: "requests/s", + Fam: "requests", + Ctx: "nginxunit.requests_rate", + Priority: prioRequestsRate, + Dims: module.Dims{ + {ID: "requests_total", Name: "requests", Algo: module.Incremental}, + }, +} + +var connectionsRateChart = module.Chart{ + ID: "connections_rate", + Title: "Connections", + Units: "connections/s", + Fam: "connections", + Ctx: "nginxunit.connections_rate", + Priority: prioConnectionsRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "connections_accepted", Name: "accepted", Algo: module.Incremental}, + {ID: "connections_closed", Name: "closed", Algo: module.Incremental}, + }, +} + +var connectionsCurrentChart = module.Chart{ + ID: "connections_current", + Title: "Current Connections", + Units: "connections", + Fam: "connections", + Ctx: "nginxunit.connections_current", + Priority: prioConnectionsCurrent, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "connections_active", Name: "active"}, + {ID: "connections_idle", Name: "idle"}, + }, +} diff --git a/src/go/plugin/go.d/modules/nginxunit/collect.go b/src/go/plugin/go.d/modules/nginxunit/collect.go new file mode 100644 index 000000000..c415945c8 --- /dev/null +++ b/src/go/plugin/go.d/modules/nginxunit/collect.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxunit + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +const ( + urlPathStatus = "/status" +) + +// https://unit.nginx.org/statusapi/ +type nuStatus struct { + Connections *struct { + Accepted int64 `json:"accepted" stm:"accepted"` + Active int64 `json:"active" stm:"active"` + Idle int64 `json:"idle" stm:"idle"` + Closed int64 `json:"closed" stm:"closed"` + } `json:"connections" stm:"connections"` + Requests struct { + Total int64 `json:"total" stm:"total"` + } `json:"requests" stm:"requests"` +} + +func (n *NginxUnit) collect() (map[string]int64, error) { + req, err := web.NewHTTPRequestWithPath(n.RequestConfig, urlPathStatus) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP request to '%s': %v", n.URL, err) + } + + var status nuStatus + + wc := web.DoHTTP(n.httpClient).OnNokCode(func(resp *http.Response) (bool, error) { + var msg struct { + Error string `json:"error"` + } + if json.NewDecoder(resp.Body).Decode(&msg) == nil && msg.Error != "" { + return false, errors.New(msg.Error) + } + return false, nil + }) + + if err := wc.RequestJSON(req, &status); err != nil { + return nil, err + } + + if status.Connections == nil { + return nil, errors.New("unexpected response: no connections available") + } + + return stm.ToMap(status), nil +} diff --git a/src/go/plugin/go.d/modules/nginxunit/config_schema.json b/src/go/plugin/go.d/modules/nginxunit/config_schema.json new file mode 100644 index 000000000..1de5a118a --- /dev/null +++ b/src/go/plugin/go.d/modules/nginxunit/config_schema.json @@ -0,0 +1,182 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "NGINX Unit collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "url": { + "title": "URL", + "description": "The URL of the NGINX Unit HTTP Control API.", + "type": "string", + "default": "http://127.0.0.1:8000", + "format": "uri" + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "username": { + "title": "Username", + "description": "The username for basic authentication.", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication.", + "type": "string", + "sensitive": true + }, + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server.", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication.", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication.", + "type": "string", + "sensitive": true + }, + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "string" + } + }, + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string", + "pattern": "^$|^/" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string", + "pattern": "^$|^/" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string", + "pattern": "^$|^/" + }, + "body": { + "title": "Body", + "type": "string" + }, + "method": { + "title": "Method", + "type": "string" + } + }, + "required": [ + "url" + ], + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "ui:flavour": "tabs", + "ui:options": { + "tabs": [ + { + "title": "Base", + "fields": [ + "update_every", + "url", + "timeout", + "not_follow_redirects" + ] + }, + { + "title": "Auth", + "fields": [ + "username", + "password" + ] + }, + { + "title": "TLS", + "fields": [ + "tls_skip_verify", + "tls_ca", + "tls_cert", + "tls_key" + ] + }, + { + "title": "Proxy", + "fields": [ + "proxy_url", + "proxy_username", + "proxy_password" + ] + }, + { + "title": "Headers", + "fields": [ + "headers" + ] + } + ] + }, + "uiOptions": { + "fullPage": true + }, + "body": { + "ui:widget": "hidden" + }, + "method": { + "ui:widget": "hidden" + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + }, + "username": { + "ui:widget": "password" + }, + "proxy_username": { + "ui:widget": "password" + }, + "password": { + "ui:widget": "password" + }, + "proxy_password": { + "ui:widget": "password" + } + } +} diff --git a/src/go/plugin/go.d/modules/nginxunit/integrations/nginx_unit.md b/src/go/plugin/go.d/modules/nginxunit/integrations/nginx_unit.md new file mode 100644 index 000000000..881c535fd --- /dev/null +++ b/src/go/plugin/go.d/modules/nginxunit/integrations/nginx_unit.md @@ -0,0 +1,262 @@ + + +# NGINX Unit + + + + + +Plugin: go.d.plugin +Module: nginxunit + + + +## Overview + +This collector monitors the activity and performance of NGINX Unit servers, and collects metrics such as the number of connections, their status, and client requests. + + +It sends HTTP requests to the NGINX Unit [Status API](https://unit.nginx.org/statusapi/). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +The collector can automatically detect NGINX Unit instances running on: + +- localhost that are listening on port 8000 +- within Docker containers + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per NGINX Unit instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginxunit.requests_rate | requests | requests/s | +| nginxunit.connections_rate | accepted, closed | connections/s | +| nginxunit.connections_current | active, idle | connections | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable HTTP Control API + +See [Control API](https://unit.nginx.org/controlapi/#configuration-api) documentation. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/nginxunit.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/nginxunit.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | The URL of the NGINX Unit HTTP Control API. | http://127.0.0.1:8000 | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +
    + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8000 + +``` +##### HTTP authentication + +Basic HTTP authentication. + +
    Config + +```yaml +jobs: + - name: local + url: http://127.0.0.1::8000 + username: username + password: password + +``` +
    + +##### HTTPS with self-signed certificate + +NGINX Unit with enabled HTTPS and self-signed certificate. + +
    Config + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8000 + tls_skip_verify: yes + +``` +
    + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
    Config + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8000 + + - name: remote + url: http://192.0.2.1:8000 + +``` +
    + + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `nginxunit` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m nginxunit + ``` + +### Getting Logs + +If you're encountering problems with the `nginxunit` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep nginxunit +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep nginxunit /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep nginxunit +``` + + diff --git a/src/go/plugin/go.d/modules/nginxunit/metadata.yaml b/src/go/plugin/go.d/modules/nginxunit/metadata.yaml new file mode 100644 index 000000000..b44f96111 --- /dev/null +++ b/src/go/plugin/go.d/modules/nginxunit/metadata.yaml @@ -0,0 +1,207 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-nginxunit + plugin_name: go.d.plugin + module_name: nginxunit + monitored_instance: + name: NGINX Unit + link: https://unit.nginx.org/ + categories: + - data-collection.web-servers-and-web-proxies + icon_filename: nginx.svg + related_resources: + integrations: + list: [] + alternative_monitored_instances: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - nginx + - unit + - web + - appserver + - http + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors the activity and performance of NGINX Unit servers, and collects metrics such as the number of connections, their status, and client requests. + method_description: | + It sends HTTP requests to the NGINX Unit [Status API](https://unit.nginx.org/statusapi/). + default_behavior: + auto_detection: + description: | + The collector can automatically detect NGINX Unit instances running on: + + - localhost that are listening on port 8000 + - within Docker containers + limits: + description: "" + performance_impact: + description: "" + additional_permissions: + description: "" + multi_instance: true + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: + - title: Enable HTTP Control API + description: | + See [Control API](https://unit.nginx.org/controlapi/#configuration-api) documentation. + configuration: + file: + name: go.d/nginxunit.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: The URL of the NGINX Unit HTTP Control API. + default_value: http://127.0.0.1:8000 + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + folding: + enabled: false + config: | + jobs: + - name: local + url: http://127.0.0.1:8000 + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1::8000 + username: username + password: password + - name: HTTPS with self-signed certificate + description: NGINX Unit with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: http://127.0.0.1:8000 + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8000 + + - name: remote + url: http://192.0.2.1:8000 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: nginxunit.requests_rate + description: Requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: nginxunit.connections_rate + description: Connections + unit: connections/s + chart_type: stacked + dimensions: + - name: accepted + - name: closed + - name: nginxunit.connections_current + description: Current Connections + unit: connections + chart_type: stacked + dimensions: + - name: active + - name: idle diff --git a/src/go/plugin/go.d/modules/nginxunit/nginxunit.go b/src/go/plugin/go.d/modules/nginxunit/nginxunit.go new file mode 100644 index 000000000..cd6bb3886 --- /dev/null +++ b/src/go/plugin/go.d/modules/nginxunit/nginxunit.go @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxunit + +import ( + _ "embed" + "errors" + "fmt" + "net/http" + "time" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("nginxunit", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *NginxUnit { + return &NginxUnit{ + Config: Config{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ + URL: "http://127.0.0.1:8000", + }, + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 1), + }, + }, + }, + charts: charts.Copy(), + } +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` +} + +type NginxUnit struct { + module.Base + Config `yaml:",inline" json:""` + + charts *module.Charts + + httpClient *http.Client +} + +func (n *NginxUnit) Configuration() any { + return n.Config +} + +func (n *NginxUnit) Init() error { + if n.URL == "" { + return errors.New("URL required but not set") + } + + httpClient, err := web.NewHTTPClient(n.ClientConfig) + if err != nil { + return fmt.Errorf("failed initializing http client: %w", err) + } + n.httpClient = httpClient + + n.Debugf("using URL %s", n.URL) + n.Debugf("using timeout: %s", n.Timeout) + + return nil +} + +func (n *NginxUnit) Check() error { + mx, err := n.collect() + if err != nil { + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil +} + +func (n *NginxUnit) Charts() *module.Charts { + return n.charts +} + +func (n *NginxUnit) Collect() map[string]int64 { + mx, err := n.collect() + if err != nil { + n.Error(err) + return nil + } + + return mx +} + +func (n *NginxUnit) Cleanup() { + if n.httpClient != nil { + n.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/plugin/go.d/modules/nginxunit/nginxunit_test.go b/src/go/plugin/go.d/modules/nginxunit/nginxunit_test.go new file mode 100644 index 000000000..31c259d61 --- /dev/null +++ b/src/go/plugin/go.d/modules/nginxunit/nginxunit_test.go @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxunit + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") + + dataVer1291Status, _ = os.ReadFile("testdata/v1.29.1/status.json") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + "dataVer1291Status": dataVer1291Status, + } { + require.NotNil(t, data, name) + + } +} + +func TestNginxUnit_ConfigurationSerialize(t *testing.T) { + module.TestConfigurationSerialize(t, &NginxUnit{}, dataConfigJSON, dataConfigYAML) +} + +func TestNginxUnit_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "success with default": { + wantFail: false, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + nu := New() + nu.Config = test.config + + if test.wantFail { + assert.Error(t, nu.Init()) + } else { + assert.NoError(t, nu.Init()) + } + }) + } +} + +func TestNginxUnit_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) (nu *NginxUnit, cleanup func()) + }{ + "success on valid response": { + wantFail: false, + prepare: caseOk, + }, + "fail on unexpected JSON response": { + wantFail: true, + prepare: caseUnexpectedJsonResponse, + }, + "fail on invalid data response": { + wantFail: true, + prepare: caseInvalidDataResponse, + }, + "fail on connection refused": { + wantFail: true, + prepare: caseConnectionRefused, + }, + "fail on 404 response": { + wantFail: true, + prepare: case404, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + nu, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.Error(t, nu.Check()) + } else { + assert.NoError(t, nu.Check()) + } + }) + } +} + +func TestNginxUnit_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestNginxUnit_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (nu *NginxUnit, cleanup func()) + wantNumOfCharts int + wantMetrics map[string]int64 + }{ + "success on valid response": { + prepare: caseOk, + wantNumOfCharts: len(charts), + wantMetrics: map[string]int64{ + "connections_accepted": 1, + "connections_active": 1, + "connections_closed": 1, + "connections_idle": 1, + "requests_total": 1, + }, + }, + "fail on unexpected JSON response": { + prepare: caseUnexpectedJsonResponse, + wantMetrics: nil, + }, + "fail on invalid data response": { + prepare: caseInvalidDataResponse, + wantMetrics: nil, + }, + "fail on connection refused": { + prepare: caseConnectionRefused, + wantMetrics: nil, + }, + "fail on 404 response": { + prepare: case404, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + nu, cleanup := test.prepare(t) + defer cleanup() + + _ = nu.Check() + + mx := nu.Collect() + + require.Equal(t, test.wantMetrics, mx) + + if len(test.wantMetrics) > 0 { + assert.Equal(t, test.wantNumOfCharts, len(*nu.Charts()), "want charnu") + + module.TestMetricsHasAllChartsDims(t, nu.Charts(), mx) + } + }) + } +} + +func caseOk(t *testing.T) (*NginxUnit, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathStatus: + _, _ = w.Write(dataVer1291Status) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + nu := New() + nu.URL = srv.URL + require.NoError(t, nu.Init()) + + return nu, srv.Close +} + +func caseUnexpectedJsonResponse(t *testing.T) (*NginxUnit, func()) { + t.Helper() + resp := ` +{ + "elephant": { + "burn": false, + "mountain": true, + "fog": false, + "skin": -1561907625, + "burst": "anyway", + "shadow": 1558616893 + }, + "start": "ever", + "base": 2093056027, + "mission": -2007590351, + "victory": 999053756, + "die": false +} +` + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(resp)) + })) + nu := New() + nu.URL = srv.URL + require.NoError(t, nu.Init()) + + return nu, srv.Close +} + +func caseInvalidDataResponse(t *testing.T) (*NginxUnit, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + nu := New() + nu.URL = srv.URL + require.NoError(t, nu.Init()) + + return nu, srv.Close +} + +func caseConnectionRefused(t *testing.T) (*NginxUnit, func()) { + t.Helper() + nu := New() + nu.URL = "http://127.0.0.1:65001" + require.NoError(t, nu.Init()) + + return nu, func() {} +} + +func case404(t *testing.T) (*NginxUnit, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + nu := New() + nu.URL = srv.URL + require.NoError(t, nu.Init()) + + return nu, srv.Close +} diff --git a/src/go/plugin/go.d/modules/nginxunit/testdata/config.json b/src/go/plugin/go.d/modules/nginxunit/testdata/config.json new file mode 100644 index 000000000..984c3ed6e --- /dev/null +++ b/src/go/plugin/go.d/modules/nginxunit/testdata/config.json @@ -0,0 +1,20 @@ +{ + "update_every": 123, + "url": "ok", + "body": "ok", + "method": "ok", + "headers": { + "ok": "ok" + }, + "username": "ok", + "password": "ok", + "proxy_url": "ok", + "proxy_username": "ok", + "proxy_password": "ok", + "timeout": 123.123, + "not_follow_redirects": true, + "tls_ca": "ok", + "tls_cert": "ok", + "tls_key": "ok", + "tls_skip_verify": true +} diff --git a/src/go/plugin/go.d/modules/nginxunit/testdata/config.yaml b/src/go/plugin/go.d/modules/nginxunit/testdata/config.yaml new file mode 100644 index 000000000..8558b61cc --- /dev/null +++ b/src/go/plugin/go.d/modules/nginxunit/testdata/config.yaml @@ -0,0 +1,17 @@ +update_every: 123 +url: "ok" +body: "ok" +method: "ok" +headers: + ok: "ok" +username: "ok" +password: "ok" +proxy_url: "ok" +proxy_username: "ok" +proxy_password: "ok" +timeout: 123.123 +not_follow_redirects: yes +tls_ca: "ok" +tls_cert: "ok" +tls_key: "ok" +tls_skip_verify: yes diff --git a/src/go/plugin/go.d/modules/nginxunit/testdata/v1.29.1/status.json b/src/go/plugin/go.d/modules/nginxunit/testdata/v1.29.1/status.json new file mode 100644 index 000000000..5b1fe2416 --- /dev/null +++ b/src/go/plugin/go.d/modules/nginxunit/testdata/v1.29.1/status.json @@ -0,0 +1,12 @@ +{ + "connections": { + "accepted": 1, + "active": 1, + "idle": 1, + "closed": 1 + }, + "requests": { + "total": 1 + }, + "applications": {} +} diff --git a/src/go/plugin/go.d/modules/nginxvts/collect.go b/src/go/plugin/go.d/modules/nginxvts/collect.go index 02fe7cb65..609eec428 100644 --- a/src/go/plugin/go.d/modules/nginxvts/collect.go +++ b/src/go/plugin/go.d/modules/nginxvts/collect.go @@ -3,11 +3,6 @@ package nginxvts import ( - "encoding/json" - "fmt" - "io" - "net/http" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -18,7 +13,7 @@ func (vts *NginxVTS) collect() (map[string]int64, error) { return nil, nil } - collected := make(map[string]interface{}) + collected := make(map[string]any) vts.collectMain(collected, ms) vts.collectSharedZones(collected, ms) vts.collectServerZones(collected, ms) @@ -26,16 +21,16 @@ func (vts *NginxVTS) collect() (map[string]int64, error) { return stm.ToMap(collected), nil } -func (vts *NginxVTS) collectMain(collected map[string]interface{}, ms *vtsMetrics) { +func (vts *NginxVTS) collectMain(collected map[string]any, ms *vtsMetrics) { collected["uptime"] = (ms.NowMsec - ms.LoadMsec) / 1000 collected["connections"] = ms.Connections } -func (vts *NginxVTS) collectSharedZones(collected map[string]interface{}, ms *vtsMetrics) { +func (vts *NginxVTS) collectSharedZones(collected map[string]any, ms *vtsMetrics) { collected["sharedzones"] = ms.SharedZones } -func (vts *NginxVTS) collectServerZones(collected map[string]interface{}, ms *vtsMetrics) { +func (vts *NginxVTS) collectServerZones(collected map[string]any, ms *vtsMetrics) { if !ms.hasServerZones() { return } @@ -45,37 +40,13 @@ func (vts *NginxVTS) collectServerZones(collected map[string]interface{}, ms *vt } func (vts *NginxVTS) scapeVTS() (*vtsMetrics, error) { - req, _ := web.NewHTTPRequest(vts.Request) + req, _ := web.NewHTTPRequest(vts.RequestConfig) var total vtsMetrics - - if err := vts.doOKDecode(req, &total); err != nil { + if err := web.DoHTTP(vts.httpClient).RequestJSON(req, &total); err != nil { vts.Warning(err) return nil, err } - return &total, nil -} - -func (vts *NginxVTS) doOKDecode(req *http.Request, in interface{}) error { - resp, err := vts.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } + return &total, nil } diff --git a/src/go/plugin/go.d/modules/nginxvts/config_schema.json b/src/go/plugin/go.d/modules/nginxvts/config_schema.json index ef6a1d237..58124a57e 100644 --- a/src/go/plugin/go.d/modules/nginxvts/config_schema.json +++ b/src/go/plugin/go.d/modules/nginxvts/config_schema.json @@ -104,7 +104,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/nginxvts/init.go b/src/go/plugin/go.d/modules/nginxvts/init.go index 2e738e4d1..09142ea75 100644 --- a/src/go/plugin/go.d/modules/nginxvts/init.go +++ b/src/go/plugin/go.d/modules/nginxvts/init.go @@ -15,14 +15,14 @@ func (vts *NginxVTS) validateConfig() error { return errors.New("URL not set") } - if _, err := web.NewHTTPRequest(vts.Request); err != nil { + if _, err := web.NewHTTPRequest(vts.RequestConfig); err != nil { return err } return nil } func (vts *NginxVTS) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(vts.Client) + return web.NewHTTPClient(vts.ClientConfig) } func (vts *NginxVTS) initCharts() (*module.Charts, error) { diff --git a/src/go/plugin/go.d/modules/nginxvts/integrations/nginx_vts.md b/src/go/plugin/go.d/modules/nginxvts/integrations/nginx_vts.md index 59918b39e..98899bed2 100644 --- a/src/go/plugin/go.d/modules/nginxvts/integrations/nginx_vts.md +++ b/src/go/plugin/go.d/modules/nginxvts/integrations/nginx_vts.md @@ -102,8 +102,8 @@ To configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#insta The configuration file name for this integration is `go.d/nginxvts.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/nginxvts/nginxvts.go b/src/go/plugin/go.d/modules/nginxvts/nginxvts.go index 56868ff0a..dc2335c82 100644 --- a/src/go/plugin/go.d/modules/nginxvts/nginxvts.go +++ b/src/go/plugin/go.d/modules/nginxvts/nginxvts.go @@ -5,10 +5,12 @@ package nginxvts import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -29,12 +31,12 @@ func init() { func New() *NginxVTS { return &NginxVTS{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://localhost/status/format/json", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -42,8 +44,8 @@ func New() *NginxVTS { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type NginxVTS struct { @@ -69,20 +71,18 @@ func (vts *NginxVTS) Cleanup() { func (vts *NginxVTS) Init() error { err := vts.validateConfig() if err != nil { - vts.Errorf("check configuration: %v", err) - return err + return fmt.Errorf("config: %v", err) } httpClient, err := vts.initHTTPClient() if err != nil { - vts.Errorf("init HTTP client: %v", err) + return fmt.Errorf("init HTTP client: %v", err) } vts.httpClient = httpClient charts, err := vts.initCharts() if err != nil { - vts.Errorf("init charts: %v", err) - return err + return fmt.Errorf("init charts: %v", err) } vts.charts = charts @@ -92,7 +92,6 @@ func (vts *NginxVTS) Init() error { func (vts *NginxVTS) Check() error { mx, err := vts.collect() if err != nil { - vts.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/nginxvts/nginxvts_test.go b/src/go/plugin/go.d/modules/nginxvts/nginxvts_test.go index f4c110372..e5cada4f7 100644 --- a/src/go/plugin/go.d/modules/nginxvts/nginxvts_test.go +++ b/src/go/plugin/go.d/modules/nginxvts/nginxvts_test.go @@ -54,15 +54,15 @@ func TestNginxVTS_Init(t *testing.T) { "URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }}, }, "invalid TLSCA": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Client: web.Client{ + HTTPConfig: web.HTTPConfig{ + ClientConfig: web.ClientConfig{ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, }, }}, @@ -170,32 +170,16 @@ func TestNginxVTS_Collect(t *testing.T) { vts, cleanup := test.prepare(t) defer cleanup() - collected := vts.Collect() + mx := vts.Collect() - assert.Equal(t, test.wantCollected, collected) + assert.Equal(t, test.wantCollected, mx) if test.checkCharts { - ensureCollectedHasAllChartsDimsVarsIDs(t, vts, collected) + module.TestMetricsHasAllChartsDims(t, vts.Charts(), mx) } }) } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, vts *NginxVTS, collected map[string]int64) { - for _, chart := range *vts.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareNginxVTS(t *testing.T, createNginxVTS func() *NginxVTS) (vts *NginxVTS, cleanup func()) { t.Helper() vts = createNginxVTS() diff --git a/src/go/plugin/go.d/modules/nsd/charts.go b/src/go/plugin/go.d/modules/nsd/charts.go index aed4f3098..0da1652db 100644 --- a/src/go/plugin/go.d/modules/nsd/charts.go +++ b/src/go/plugin/go.d/modules/nsd/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly || darwin + package nsd import ( diff --git a/src/go/plugin/go.d/modules/nsd/collect.go b/src/go/plugin/go.d/modules/nsd/collect.go index d07341df3..4609b9ddc 100644 --- a/src/go/plugin/go.d/modules/nsd/collect.go +++ b/src/go/plugin/go.d/modules/nsd/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly || darwin + package nsd import ( diff --git a/src/go/plugin/go.d/modules/nsd/config_schema.json b/src/go/plugin/go.d/modules/nsd/config_schema.json index d49107c71..8d8301413 100644 --- a/src/go/plugin/go.d/modules/nsd/config_schema.json +++ b/src/go/plugin/go.d/modules/nsd/config_schema.json @@ -19,7 +19,6 @@ "default": 2 } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/nsd/doc.go b/src/go/plugin/go.d/modules/nsd/doc.go new file mode 100644 index 000000000..2ba41e59f --- /dev/null +++ b/src/go/plugin/go.d/modules/nsd/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nsd diff --git a/src/go/plugin/go.d/modules/nsd/exec.go b/src/go/plugin/go.d/modules/nsd/exec.go index b05082f3c..d58cbe152 100644 --- a/src/go/plugin/go.d/modules/nsd/exec.go +++ b/src/go/plugin/go.d/modules/nsd/exec.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly || darwin + package nsd import ( diff --git a/src/go/plugin/go.d/modules/nsd/init.go b/src/go/plugin/go.d/modules/nsd/init.go index 63843caba..f451d5d42 100644 --- a/src/go/plugin/go.d/modules/nsd/init.go +++ b/src/go/plugin/go.d/modules/nsd/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly || darwin + package nsd import ( diff --git a/src/go/plugin/go.d/modules/nsd/integrations/nsd.md b/src/go/plugin/go.d/modules/nsd/integrations/nsd.md index 745b872d7..64e5161eb 100644 --- a/src/go/plugin/go.d/modules/nsd/integrations/nsd.md +++ b/src/go/plugin/go.d/modules/nsd/integrations/nsd.md @@ -28,7 +28,11 @@ Executed commands: -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux +- BSD +- macOS This collector only supports collecting metrics from a single instance of this integration. @@ -100,8 +104,8 @@ No action required. The configuration file name for this integration is `go.d/nsd.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/nsd/metadata.yaml b/src/go/plugin/go.d/modules/nsd/metadata.yaml index a31aa38af..0474061d4 100644 --- a/src/go/plugin/go.d/modules/nsd/metadata.yaml +++ b/src/go/plugin/go.d/modules/nsd/metadata.yaml @@ -15,7 +15,7 @@ modules: - dns related_resources: integrations: - list: [] + list: [ ] info_provided_to_referring_integrations: description: "" most_popular: false @@ -32,8 +32,8 @@ modules: - `nsd-control stats_noreset` method_description: "" supported_platforms: - include: [] - exclude: [] + include: [ Linux, BSD, macOS ] + exclude: [ ] multi_instance: false additional_permissions: description: "" @@ -46,7 +46,7 @@ modules: description: "" setup: prerequisites: - list: [] + list: [ ] configuration: file: name: go.d/nsd.conf @@ -78,18 +78,18 @@ modules: update_every: 5 # Collect logical volume statistics every 5 seconds troubleshooting: problems: - list: [] - alerts: [] + list: [ ] + alerts: [ ] metrics: folding: title: Metrics enabled: false description: "" - availability: [] + availability: [ ] scopes: - name: global description: These metrics refer to the the entire monitored application. - labels: [] + labels: [ ] metrics: - name: nsd.queries description: Queries diff --git a/src/go/plugin/go.d/modules/nsd/nsd.go b/src/go/plugin/go.d/modules/nsd/nsd.go index fae0f67f3..6cff8dac8 100644 --- a/src/go/plugin/go.d/modules/nsd/nsd.go +++ b/src/go/plugin/go.d/modules/nsd/nsd.go @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly || darwin + package nsd import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,15 +31,15 @@ func init() { func New() *Nsd { return &Nsd{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: charts.Copy(), } } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type Nsd struct { @@ -55,8 +58,7 @@ func (n *Nsd) Configuration() any { func (n *Nsd) Init() error { nsdControl, err := n.initNsdControlExec() if err != nil { - n.Errorf("nsd-control exec initialization: %v", err) - return err + return fmt.Errorf("nsd-control exec initialization: %v", err) } n.exec = nsdControl @@ -66,7 +68,6 @@ func (n *Nsd) Init() error { func (n *Nsd) Check() error { mx, err := n.collect() if err != nil { - n.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/nsd/nsd_test.go b/src/go/plugin/go.d/modules/nsd/nsd_test.go index 24f38b512..2345dd423 100644 --- a/src/go/plugin/go.d/modules/nsd/nsd_test.go +++ b/src/go/plugin/go.d/modules/nsd/nsd_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly || darwin + package nsd import ( diff --git a/src/go/plugin/go.d/modules/nsd/stats_counters.go b/src/go/plugin/go.d/modules/nsd/stats_counters.go index 8ebe706a5..80d77b558 100644 --- a/src/go/plugin/go.d/modules/nsd/stats_counters.go +++ b/src/go/plugin/go.d/modules/nsd/stats_counters.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly || darwin + package nsd // Docs: https://nsd.docs.nlnetlabs.nl/en/latest/manpages/nsd-control.html?highlight=elapsed#statistics-counters diff --git a/src/go/plugin/go.d/modules/ntpd/config_schema.json b/src/go/plugin/go.d/modules/ntpd/config_schema.json index f4d763b82..8d03fd325 100644 --- a/src/go/plugin/go.d/modules/ntpd/config_schema.json +++ b/src/go/plugin/go.d/modules/ntpd/config_schema.json @@ -33,7 +33,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/ntpd/integrations/ntpd.md b/src/go/plugin/go.d/modules/ntpd/integrations/ntpd.md index c0094c524..ec907208d 100644 --- a/src/go/plugin/go.d/modules/ntpd/integrations/ntpd.md +++ b/src/go/plugin/go.d/modules/ntpd/integrations/ntpd.md @@ -122,8 +122,8 @@ No action required. The configuration file name for this integration is `go.d/ntpd.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/ntpd/ntpd.go b/src/go/plugin/go.d/modules/ntpd/ntpd.go index 011624681..8bff01cc1 100644 --- a/src/go/plugin/go.d/modules/ntpd/ntpd.go +++ b/src/go/plugin/go.d/modules/ntpd/ntpd.go @@ -9,8 +9,8 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/iprange" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) //go:embed "config_schema.json" @@ -28,7 +28,7 @@ func New() *NTPd { return &NTPd{ Config: Config{ Address: "127.0.0.1:123", - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), CollectPeers: false, }, charts: systemCharts.Copy(), @@ -39,10 +39,10 @@ func New() *NTPd { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - CollectPeers bool `yaml:"collect_peers" json:"collect_peers"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + CollectPeers bool `yaml:"collect_peers" json:"collect_peers"` } type ( @@ -75,14 +75,12 @@ func (n *NTPd) Configuration() any { func (n *NTPd) Init() error { if n.Address == "" { - n.Error("config validation: 'address' can not be empty") - return errors.New("address not set") + return errors.New("config: 'address' can not be empty") } txt := "0.0.0.0 127.0.0.0/8" r, err := iprange.ParseRanges(txt) if err != nil { - n.Errorf("error on parsing ip range '%s': %v", txt, err) return fmt.Errorf("error on parsing ip range '%s': %v", txt, err) } @@ -94,7 +92,6 @@ func (n *NTPd) Init() error { func (n *NTPd) Check() error { mx, err := n.collect() if err != nil { - n.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/nvidia_smi/config_schema.json b/src/go/plugin/go.d/modules/nvidia_smi/config_schema.json index 46b48095d..7dabd2bae 100644 --- a/src/go/plugin/go.d/modules/nvidia_smi/config_schema.json +++ b/src/go/plugin/go.d/modules/nvidia_smi/config_schema.json @@ -34,7 +34,6 @@ "required": [ "binary_path" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/nvidia_smi/integrations/nvidia_gpu.md b/src/go/plugin/go.d/modules/nvidia_smi/integrations/nvidia_gpu.md index 620c09639..9ae71db20 100644 --- a/src/go/plugin/go.d/modules/nvidia_smi/integrations/nvidia_gpu.md +++ b/src/go/plugin/go.d/modules/nvidia_smi/integrations/nvidia_gpu.md @@ -63,7 +63,8 @@ Labels: | Label | Description | |:-----------|:----------------| -| uuid | GPU id (e.g. 00000000:00:04.0) | +| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) | +| index | GPU index (nvidia_smi typically orders GPUs by PCI bus ID) | | product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) | Metrics: @@ -95,7 +96,7 @@ Labels: | Label | Description | |:-----------|:----------------| -| uuid | GPU id (e.g. 00000000:00:04.0) | +| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) | | product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) | | gpu_instance_id | GPU instance id (e.g. 1) | @@ -126,8 +127,8 @@ No action required. The configuration file name for this integration is `go.d/nvidia_smi.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -145,7 +146,7 @@ The following options can be defined globally: update_every, autodetection_retry | update_every | Data collection frequency. | 10 | no | | autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | | binary_path | Path to nvidia_smi binary. The default is "nvidia_smi" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no | -| timeout | nvidia_smi binary execution timeout. | 2 | no | +| timeout | The maximum duration, in seconds, to wait for an `nvidia-smi` command to complete. This setting applies differently based on the collector's mode. **Loop Mode:** In loop mode, the timeout primarily determines how long to wait for the initial `nvidia-smi` execution. If the initial query takes longer than the timeout, the collector may report an error. For systems with multiple GPUs, the initial load time can sometimes be significant (e.g., 5-10 seconds). **Regular Mode:** If the collector is in regular mode, the timeout specifies how long to wait for each individual `nvidia-smi` execution. | 10 | no | | loop_mode | When enabled, `nvidia-smi` is executed continuously in a separate thread using the `-l` option. | yes | no |
    diff --git a/src/go/plugin/go.d/modules/nvidia_smi/nvidia_smi.go b/src/go/plugin/go.d/modules/nvidia_smi/nvidia_smi.go index 3f89df05a..041d90fd9 100644 --- a/src/go/plugin/go.d/modules/nvidia_smi/nvidia_smi.go +++ b/src/go/plugin/go.d/modules/nvidia_smi/nvidia_smi.go @@ -5,10 +5,11 @@ package nvidia_smi import ( _ "embed" "errors" + "runtime" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,8 +29,10 @@ func init() { func New() *NvidiaSmi { return &NvidiaSmi{ Config: Config{ - Timeout: web.Duration(time.Second * 10), - LoopMode: true, + Timeout: confopt.Duration(time.Second * 10), + // Disable loop mode on Windows due to go.d.plugin's non-graceful exit + // which can leave `nvidia_smi` processes running indefinitely. + LoopMode: !(runtime.GOOS == "windows"), }, binName: "nvidia-smi", charts: &module.Charts{}, @@ -40,10 +43,10 @@ func New() *NvidiaSmi { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - BinaryPath string `yaml:"binary_path" json:"binary_path"` - LoopMode bool `yaml:"loop_mode,omitempty" json:"loop_mode"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + BinaryPath string `yaml:"binary_path" json:"binary_path"` + LoopMode bool `yaml:"loop_mode,omitempty" json:"loop_mode"` } type NvidiaSmi struct { @@ -65,9 +68,11 @@ func (nv *NvidiaSmi) Configuration() any { func (nv *NvidiaSmi) Init() error { if nv.exec == nil { + if runtime.GOOS == "windows" && nv.LoopMode { + nv.LoopMode = false + } smi, err := nv.initNvidiaSmiExec() if err != nil { - nv.Error(err) return err } nv.exec = smi @@ -79,7 +84,6 @@ func (nv *NvidiaSmi) Init() error { func (nv *NvidiaSmi) Check() error { mx, err := nv.collect() if err != nil { - nv.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/nvme/charts.go b/src/go/plugin/go.d/modules/nvme/charts.go index 08e215ec8..5059f3a52 100644 --- a/src/go/plugin/go.d/modules/nvme/charts.go +++ b/src/go/plugin/go.d/modules/nvme/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package nvme import ( @@ -237,13 +239,16 @@ var ( } ) -func (n *NVMe) addDeviceCharts(device string) { +func (n *NVMe) addDeviceCharts(devicePath, model string) { + device := extractDeviceFromPath(devicePath) + charts := deviceChartsTmpl.Copy() for _, chart := range *charts { chart.ID = fmt.Sprintf(chart.ID, device) chart.Labels = []module.Label{ {Key: "device", Value: device}, + {Key: "model_number", Value: model}, } for _, dim := range chart.Dims { dim.ID = fmt.Sprintf(dim.ID, device) @@ -255,7 +260,9 @@ func (n *NVMe) addDeviceCharts(device string) { } } -func (n *NVMe) removeDeviceCharts(device string) { +func (n *NVMe) removeDeviceCharts(devicePath string) { + device := extractDeviceFromPath(devicePath) + px := fmt.Sprintf("device_%s", device) for _, chart := range *n.Charts() { diff --git a/src/go/plugin/go.d/modules/nvme/collect.go b/src/go/plugin/go.d/modules/nvme/collect.go index 1cc942395..aaa6b14b5 100644 --- a/src/go/plugin/go.d/modules/nvme/collect.go +++ b/src/go/plugin/go.d/modules/nvme/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package nvme import ( @@ -43,59 +45,60 @@ func (n *NVMe) collectNVMeDevice(mx map[string]int64, devicePath string) error { return fmt.Errorf("exec nvme smart-log for '%s': %v", devicePath, err) } - device := extractDeviceFromPath(devicePath) - - mx["device_"+device+"_temperature"] = int64(float64(parseValue(stats.Temperature)) - 273.15) // Kelvin => Celsius - mx["device_"+device+"_percentage_used"] = parseValue(stats.PercentUsed) - mx["device_"+device+"_available_spare"] = parseValue(stats.AvailSpare) - mx["device_"+device+"_data_units_read"] = parseValue(stats.DataUnitsRead) * 1000 * 512 // units => bytes - mx["device_"+device+"_data_units_written"] = parseValue(stats.DataUnitsWritten) * 1000 * 512 // units => bytes - mx["device_"+device+"_host_read_commands"] = parseValue(stats.HostReadCommands) - mx["device_"+device+"_host_write_commands"] = parseValue(stats.HostWriteCommands) - mx["device_"+device+"_power_cycles"] = parseValue(stats.PowerCycles) - mx["device_"+device+"_power_on_time"] = parseValue(stats.PowerOnHours) * 3600 // hours => seconds - mx["device_"+device+"_unsafe_shutdowns"] = parseValue(stats.UnsafeShutdowns) - mx["device_"+device+"_media_errors"] = parseValue(stats.MediaErrors) - mx["device_"+device+"_num_err_log_entries"] = parseValue(stats.NumErrLogEntries) - mx["device_"+device+"_controller_busy_time"] = parseValue(stats.ControllerBusyTime) * 60 // minutes => seconds - mx["device_"+device+"_warning_temp_time"] = parseValue(stats.WarningTempTime) * 60 // minutes => seconds - mx["device_"+device+"_critical_comp_time"] = parseValue(stats.CriticalCompTime) * 60 // minutes => seconds - mx["device_"+device+"_thm_temp1_trans_count"] = parseValue(stats.ThmTemp1TransCount) - mx["device_"+device+"_thm_temp2_trans_count"] = parseValue(stats.ThmTemp2TransCount) - mx["device_"+device+"_thm_temp1_total_time"] = parseValue(stats.ThmTemp1TotalTime) // seconds - mx["device_"+device+"_thm_temp2_total_time"] = parseValue(stats.ThmTemp2TotalTime) // seconds - - mx["device_"+device+"_critical_warning_available_spare"] = boolToInt(parseValue(stats.CriticalWarning)&1 != 0) - mx["device_"+device+"_critical_warning_temp_threshold"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<1) != 0) - mx["device_"+device+"_critical_warning_nvm_subsystem_reliability"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<2) != 0) - mx["device_"+device+"_critical_warning_read_only"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<3) != 0) - mx["device_"+device+"_critical_warning_volatile_mem_backup_failed"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<4) != 0) - mx["device_"+device+"_critical_warning_persistent_memory_read_only"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<5) != 0) + dev := extractDeviceFromPath(devicePath) + + mx["device_"+dev+"_temperature"] = int64(float64(parseValue(stats.Temperature)) - 273.15) // Kelvin => Celsius + mx["device_"+dev+"_percentage_used"] = parseValue(stats.PercentUsed) + mx["device_"+dev+"_available_spare"] = parseValue(stats.AvailSpare) + mx["device_"+dev+"_data_units_read"] = parseValue(stats.DataUnitsRead) * 1000 * 512 // units => bytes + mx["device_"+dev+"_data_units_written"] = parseValue(stats.DataUnitsWritten) * 1000 * 512 // units => bytes + mx["device_"+dev+"_host_read_commands"] = parseValue(stats.HostReadCommands) + mx["device_"+dev+"_host_write_commands"] = parseValue(stats.HostWriteCommands) + mx["device_"+dev+"_power_cycles"] = parseValue(stats.PowerCycles) + mx["device_"+dev+"_power_on_time"] = parseValue(stats.PowerOnHours) * 3600 // hours => seconds + mx["device_"+dev+"_unsafe_shutdowns"] = parseValue(stats.UnsafeShutdowns) + mx["device_"+dev+"_media_errors"] = parseValue(stats.MediaErrors) + mx["device_"+dev+"_num_err_log_entries"] = parseValue(stats.NumErrLogEntries) + mx["device_"+dev+"_controller_busy_time"] = parseValue(stats.ControllerBusyTime) * 60 // minutes => seconds + mx["device_"+dev+"_warning_temp_time"] = parseValue(stats.WarningTempTime) * 60 // minutes => seconds + mx["device_"+dev+"_critical_comp_time"] = parseValue(stats.CriticalCompTime) * 60 // minutes => seconds + mx["device_"+dev+"_thm_temp1_trans_count"] = parseValue(stats.ThmTemp1TransCount) + mx["device_"+dev+"_thm_temp2_trans_count"] = parseValue(stats.ThmTemp2TransCount) + mx["device_"+dev+"_thm_temp1_total_time"] = parseValue(stats.ThmTemp1TotalTime) // seconds + mx["device_"+dev+"_thm_temp2_total_time"] = parseValue(stats.ThmTemp2TotalTime) // seconds + + mx["device_"+dev+"_critical_warning_available_spare"] = boolToInt(parseValue(stats.CriticalWarning)&1 != 0) + mx["device_"+dev+"_critical_warning_temp_threshold"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<1) != 0) + mx["device_"+dev+"_critical_warning_nvm_subsystem_reliability"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<2) != 0) + mx["device_"+dev+"_critical_warning_read_only"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<3) != 0) + mx["device_"+dev+"_critical_warning_volatile_mem_backup_failed"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<4) != 0) + mx["device_"+dev+"_critical_warning_persistent_memory_read_only"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<5) != 0) return nil } func (n *NVMe) listNVMeDevices() error { - devices, err := n.exec.list() + devList, err := n.exec.list() if err != nil { return fmt.Errorf("exec nvme list: %v", err) } + n.Debugf("found %d NVMe devices (%v)", len(devList.Devices), devList.Devices) + seen := make(map[string]bool) - for _, v := range devices.Devices { - device := extractDeviceFromPath(v.DevicePath) - seen[device] = true - if !n.devicePaths[v.DevicePath] { - n.devicePaths[v.DevicePath] = true - n.addDeviceCharts(device) + for _, dev := range devList.Devices { + path := dev.DevicePath + seen[path] = true + if !n.devicePaths[path] { + n.devicePaths[path] = true + n.addDeviceCharts(path, dev.ModelNumber) } } for path := range n.devicePaths { - device := extractDeviceFromPath(path) - if !seen[device] { - delete(n.devicePaths, device) - n.removeDeviceCharts(device) + if !seen[path] { + delete(n.devicePaths, path) + n.removeDeviceCharts(path) } } diff --git a/src/go/plugin/go.d/modules/nvme/config_schema.json b/src/go/plugin/go.d/modules/nvme/config_schema.json index 179a24ab1..8c5b6350a 100644 --- a/src/go/plugin/go.d/modules/nvme/config_schema.json +++ b/src/go/plugin/go.d/modules/nvme/config_schema.json @@ -20,7 +20,6 @@ } }, "required": [], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/nvme/doc.go b/src/go/plugin/go.d/modules/nvme/doc.go new file mode 100644 index 000000000..5c8715207 --- /dev/null +++ b/src/go/plugin/go.d/modules/nvme/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvme diff --git a/src/go/plugin/go.d/modules/nvme/exec.go b/src/go/plugin/go.d/modules/nvme/exec.go index 8c1281a2f..6b3d0bdb0 100644 --- a/src/go/plugin/go.d/modules/nvme/exec.go +++ b/src/go/plugin/go.d/modules/nvme/exec.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package nvme import ( @@ -12,10 +14,10 @@ import ( type nvmeDeviceList struct { Devices []struct { - DevicePath string `json:"DevicePath"` - UsedBytes nvmeNumber `json:"UsedBytes"` - PhysicalSize nvmeNumber `json:"PhysicalSize"` - SectorSize nvmeNumber `json:"SectorSize"` + DevicePath string `json:"DevicePath"` + Firmware string `json:"Firmware"` + ModelNumber string `json:"ModelNumber"` + SerialNumber string `json:"SerialNumber"` } } diff --git a/src/go/plugin/go.d/modules/nvme/init.go b/src/go/plugin/go.d/modules/nvme/init.go index 7196208e8..5dd8105ce 100644 --- a/src/go/plugin/go.d/modules/nvme/init.go +++ b/src/go/plugin/go.d/modules/nvme/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package nvme import ( diff --git a/src/go/plugin/go.d/modules/nvme/integrations/nvme_devices.md b/src/go/plugin/go.d/modules/nvme/integrations/nvme_devices.md index 9a93c11d0..a8c776439 100644 --- a/src/go/plugin/go.d/modules/nvme/integrations/nvme_devices.md +++ b/src/go/plugin/go.d/modules/nvme/integrations/nvme_devices.md @@ -26,7 +26,10 @@ This collector monitors the health of NVMe devices. It relies on the [`nvme`](ht -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux +- BSD This collector supports collecting metrics from multiple instances of this integration, including remote instances. @@ -63,6 +66,7 @@ Labels: | Label | Description | |:-----------|:----------------| | device | NVMe device name | +| model_number | NVMe device model | Metrics: @@ -138,8 +142,8 @@ Include the following option in your `docker run` command or add the device mapp The configuration file name for this integration is `go.d/nvme.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/nvme/metadata.yaml b/src/go/plugin/go.d/modules/nvme/metadata.yaml index 98f35af65..2a43d8896 100644 --- a/src/go/plugin/go.d/modules/nvme/metadata.yaml +++ b/src/go/plugin/go.d/modules/nvme/metadata.yaml @@ -27,7 +27,7 @@ modules: This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management. method_description: "" supported_platforms: - include: [] + include: [Linux, BSD] exclude: [] multi_instance: true additional_permissions: @@ -120,6 +120,8 @@ modules: labels: - name: device description: NVMe device name + - name: model_number + description: NVMe device model metrics: - name: nvme.device_estimated_endurance_perc description: Estimated endurance diff --git a/src/go/plugin/go.d/modules/nvme/nvme.go b/src/go/plugin/go.d/modules/nvme/nvme.go index b1b22f594..78dc66958 100644 --- a/src/go/plugin/go.d/modules/nvme/nvme.go +++ b/src/go/plugin/go.d/modules/nvme/nvme.go @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package nvme import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,7 +31,7 @@ func init() { func New() *NVMe { return &NVMe{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: &module.Charts{}, @@ -39,8 +42,8 @@ func New() *NVMe { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type ( @@ -70,8 +73,7 @@ func (n *NVMe) Configuration() any { func (n *NVMe) Init() error { nvmeExec, err := n.initNVMeCLIExec() if err != nil { - n.Errorf("init nvme-cli exec: %v", err) - return err + return fmt.Errorf("init nvme-cli exec: %v", err) } n.exec = nvmeExec @@ -81,7 +83,6 @@ func (n *NVMe) Init() error { func (n *NVMe) Check() error { mx, err := n.collect() if err != nil { - n.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/nvme/nvme_test.go b/src/go/plugin/go.d/modules/nvme/nvme_test.go index 2009f789c..9ddbedc50 100644 --- a/src/go/plugin/go.d/modules/nvme/nvme_test.go +++ b/src/go/plugin/go.d/modules/nvme/nvme_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package nvme import ( diff --git a/src/go/plugin/go.d/modules/openldap/README.md b/src/go/plugin/go.d/modules/openldap/README.md new file mode 120000 index 000000000..45f36b9b9 --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/README.md @@ -0,0 +1 @@ +integrations/openldap.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/openldap/charts.go b/src/go/plugin/go.d/modules/openldap/charts.go new file mode 100644 index 000000000..0dc6a17c7 --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/charts.go @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openldap + +import ( + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +const ( + prioCurrentConnections = module.Priority + iota + prioTotalConnections + prioBytesSent + prioEntries + prioReferrals + prioOperations + prioOperationsByType + prioWaiters +) + +var charts = module.Charts{ + currentConnectionsChart.Copy(), + connectionsChart.Copy(), + + bytesSentChart.Copy(), + referralsSentChart.Copy(), + entriesSentChart.Copy(), + + operationsChart.Copy(), + operationsByTypeChart.Copy(), + + waitersChart.Copy(), +} + +var ( + currentConnectionsChart = module.Chart{ + ID: "current_connections", + Title: "Current Connections", + Units: "connections", + Fam: "connections", + Ctx: "openldap.current_connections", + Priority: prioCurrentConnections, + Type: module.Line, + Dims: module.Dims{ + {ID: "current_connections", Name: "active"}, + }, + } + connectionsChart = module.Chart{ + ID: "connections", + Title: "Connections", + Units: "connections/s", + Fam: "connections", + Ctx: "openldap.connections", + Priority: prioTotalConnections, + Type: module.Line, + Dims: module.Dims{ + {ID: "total_connections", Name: "connections", Algo: module.Incremental}, + }, + } + + bytesSentChart = module.Chart{ + ID: "bytes_sent", + Title: "Traffic", + Units: "bytes/s", + Fam: "activity", + Ctx: "openldap.traffic", + Priority: prioBytesSent, + Type: module.Area, + Dims: module.Dims{ + {ID: "bytes_sent", Name: "sent", Algo: module.Incremental}, + }, + } + entriesSentChart = module.Chart{ + ID: "entries_sent", + Title: "Entries", + Units: "entries/s", + Fam: "activity", + Ctx: "openldap.entries", + Priority: prioEntries, + Type: module.Line, + Dims: module.Dims{ + {ID: "entries_sent", Name: "sent", Algo: module.Incremental}, + }, + } + referralsSentChart = module.Chart{ + ID: "referrals_sent", + Title: "Referrals", + Units: "referrals/s", + Fam: "activity", + Ctx: "openldap.referrals", + Priority: prioReferrals, + Type: module.Line, + Dims: module.Dims{ + {ID: "referrals_sent", Name: "sent", Algo: module.Incremental}, + }, + } + + operationsChart = module.Chart{ + ID: "operations", + Title: "Operations", + Units: "operations/s", + Fam: "operations", + Ctx: "openldap.operations", + Priority: prioOperations, + Type: module.Line, + Dims: module.Dims{ + {ID: "completed_operations", Name: "completed", Algo: module.Incremental}, + {ID: "initiated_operations", Name: "initiated", Algo: module.Incremental}, + }, + } + operationsByTypeChart = module.Chart{ + ID: "operations_by_type", + Title: "Operations by Type", + Units: "operations/s", + Fam: "operations", + Ctx: "openldap.operations_by_type", + Priority: prioOperationsByType, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "completed_bind_operations", Name: "bind", Algo: module.Incremental}, + {ID: "completed_search_operations", Name: "search", Algo: module.Incremental}, + {ID: "completed_unbind_operations", Name: "unbind", Algo: module.Incremental}, + {ID: "completed_add_operations", Name: "add", Algo: module.Incremental}, + {ID: "completed_delete_operations", Name: "delete", Algo: module.Incremental}, + {ID: "completed_modify_operations", Name: "modify", Algo: module.Incremental}, + {ID: "completed_compare_operations", Name: "compare", Algo: module.Incremental}, + }, + } + waitersChart = module.Chart{ + ID: "waiters", + Title: "Waiters", + Units: "waiters/s", + Fam: "operations", + Ctx: "openldap.waiters", + Priority: prioWaiters, + Type: module.Line, + Dims: module.Dims{ + {ID: "read_waiters", Name: "read", Algo: module.Incremental}, + {ID: "write_waiters", Name: "write", Algo: module.Incremental}, + }, + } +) diff --git a/src/go/plugin/go.d/modules/openldap/client.go b/src/go/plugin/go.d/modules/openldap/client.go new file mode 100644 index 000000000..4af5f5fb4 --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/client.go @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openldap + +import ( + "net" + + "github.com/go-ldap/ldap/v3" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" +) + +type ldapConn interface { + connect() error + disconnect() error + search(*ldap.SearchRequest) (*ldap.SearchResult, error) +} + +func newLdapConn(cfg Config) ldapConn { + return &ldapClient{Config: cfg} +} + +type ldapClient struct { + Config + + conn *ldap.Conn +} + +func (c *ldapClient) search(req *ldap.SearchRequest) (*ldap.SearchResult, error) { + return c.conn.Search(req) +} + +func (c *ldapClient) connect() error { + opts, err := c.connectOpts() + if err != nil { + return err + } + + conn, err := ldap.DialURL(c.URL, opts...) + if err != nil { + return err + } + + if c.Password == "" { + err = conn.UnauthenticatedBind(c.Username) + } else { + err = conn.Bind(c.Username, c.Password) + } + if err != nil { + _ = conn.Close() + return err + } + + c.conn = conn + + return nil +} + +func (c *ldapClient) connectOpts() ([]ldap.DialOpt, error) { + d := &net.Dialer{ + Timeout: c.Timeout.Duration(), + } + + opts := []ldap.DialOpt{ldap.DialWithDialer(d)} + + tlsConf, err := tlscfg.NewTLSConfig(c.TLSConfig) + if err != nil { + return nil, err + } + if tlsConf != nil { + opts = append(opts, ldap.DialWithTLSConfig(tlsConf)) + } + + return opts, nil +} + +func (c *ldapClient) disconnect() error { + defer func() { c.conn = nil }() + if c.conn != nil { + return c.conn.Close() + } + return nil +} diff --git a/src/go/plugin/go.d/modules/openldap/collect.go b/src/go/plugin/go.d/modules/openldap/collect.go new file mode 100644 index 000000000..de8ac41a8 --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/collect.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openldap + +import ( + "github.com/go-ldap/ldap/v3" +) + +func (l *OpenLDAP) collect() (map[string]int64, error) { + if l.conn == nil { + conn, err := l.establishConn() + if err != nil { + return nil, err + } + l.conn = conn + } + + mx := make(map[string]int64) + + if err := l.collectMonitorCounters(mx); err != nil { + l.Cleanup() + return nil, err + } + if err := l.collectOperations(mx); err != nil { + l.Cleanup() + return nil, err + } + + return mx, nil +} + +func (l *OpenLDAP) doSearchRequest(req *ldap.SearchRequest, fn func(*ldap.Entry)) error { + resp, err := l.conn.search(req) + if err != nil { + return err + } + + for _, entry := range resp.Entries { + if len(entry.Attributes) != 0 { + fn(entry) + } + } + + return nil +} + +func (l *OpenLDAP) establishConn() (ldapConn, error) { + conn := l.newConn(l.Config) + + if err := conn.connect(); err != nil { + return nil, err + } + + return conn, nil +} diff --git a/src/go/plugin/go.d/modules/openldap/collect_mon_counters.go b/src/go/plugin/go.d/modules/openldap/collect_mon_counters.go new file mode 100644 index 000000000..be96b3c03 --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/collect_mon_counters.go @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openldap + +import ( + "strconv" + + "github.com/go-ldap/ldap/v3" +) + +const ( + attrMonitorCounter = "monitorCounter" +) + +func (l *OpenLDAP) collectMonitorCounters(mx map[string]int64) error { + req := newLdapMonitorCountersSearchRequest() + + dnMetricMap := map[string]string{ + "cn=Current,cn=Connections,cn=Monitor": "current_connections", + "cn=Total,cn=Connections,cn=Monitor": "total_connections", + "cn=Bytes,cn=Statistics,cn=Monitor": "bytes_sent", + "cn=Referrals,cn=Statistics,cn=Monitor": "referrals_sent", + "cn=Entries,cn=Statistics,cn=Monitor": "entries_sent", + "cn=Write,cn=Waiters,cn=Monitor": "write_waiters", + "cn=Read,cn=Waiters,cn=Monitor": "read_waiters", + } + + return l.doSearchRequest(req, func(entry *ldap.Entry) { + metric := dnMetricMap[entry.DN] + if metric == "" { + l.Debugf("skipping entry '%s'", entry.DN) + return + } + + s := entry.GetAttributeValue(attrMonitorCounter) + if s == "" { + l.Debugf("entry '%s' does not have attribute '%s'", entry.DN, attrMonitorCounter) + return + } + + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + l.Debugf("failed to parse entry '%s' value '%s': %v", entry.DN, s, err) + return + } + + mx[metric] = v + }) +} + +func newLdapMonitorCountersSearchRequest() *ldap.SearchRequest { + return ldap.NewSearchRequest( + "cn=Monitor", + ldap.ScopeWholeSubtree, + ldap.NeverDerefAliases, + 0, + 0, + false, + "(objectclass=monitorCounterObject)", + []string{attrMonitorCounter}, + nil, + ) +} diff --git a/src/go/plugin/go.d/modules/openldap/collect_operations.go b/src/go/plugin/go.d/modules/openldap/collect_operations.go new file mode 100644 index 000000000..09593d842 --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/collect_operations.go @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openldap + +import ( + "strconv" + + "github.com/go-ldap/ldap/v3" +) + +const ( + attrMonitorOpInitiated = "monitorOpInitiated" + attrMonitorOpCompleted = "monitorOpCompleted" +) + +func (l *OpenLDAP) collectOperations(mx map[string]int64) error { + req := newLdapOperationsSearchRequest() + + dnMetricMap := map[string]string{ + "cn=Bind,cn=Operations,cn=Monitor": "bind_operations", + "cn=Unbind,cn=Operations,cn=Monitor": "unbind_operations", + "cn=Add,cn=Operations,cn=Monitor": "add_operations", + "cn=Delete,cn=Operations,cn=Monitor": "delete_operations", + "cn=Modify,cn=Operations,cn=Monitor": "modify_operations", + "cn=Compare,cn=Operations,cn=Monitor": "compare_operations", + "cn=Search,cn=Operations,cn=Monitor": "search_operations", + } + + return l.doSearchRequest(req, func(entry *ldap.Entry) { + metric := dnMetricMap[entry.DN] + if metric == "" { + l.Debugf("skipping entry '%s'", entry.DN) + return + } + + attrs := map[string]string{ + "initiated": attrMonitorOpInitiated, + "completed": attrMonitorOpCompleted, + } + + for prefix, attr := range attrs { + s := entry.GetAttributeValue(attr) + if s == "" { + l.Debugf("entry '%s' does not have attribute '%s'", entry.DN, attr) + continue + } + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + l.Debugf("failed to parse entry '%s' value '%s': %v", entry.DN, s, err) + continue + } + + mx[prefix+"_"+metric] = v + mx[prefix+"_operations"] += v + } + }) +} + +func newLdapOperationsSearchRequest() *ldap.SearchRequest { + return ldap.NewSearchRequest( + "cn=Operations,cn=Monitor", + ldap.ScopeWholeSubtree, + ldap.NeverDerefAliases, + 0, + 0, + false, + "(objectclass=monitorOperation)", + []string{attrMonitorOpInitiated, attrMonitorOpCompleted}, + nil, + ) +} diff --git a/src/go/plugin/go.d/modules/openldap/config_schema.json b/src/go/plugin/go.d/modules/openldap/config_schema.json new file mode 100644 index 000000000..362028157 --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/config_schema.json @@ -0,0 +1,109 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "OpenLDAP collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.", + "type": "number", + "minimum": 0.5, + "default": 2 + }, + "url": { + "title": "URL", + "description": "LDAP server URL.", + "type": "string", + "default": "ldap://127.0.0.1:389" + }, + "username": { + "title": "DN", + "description": "The distinguished name (DN) of the user authorized to view the monitor database.", + "type": "string", + "default": "" + }, + "password": { + "title": "Password", + "description": "The password associated with the user identified by the DN.", + "type": "string", + "default": "" + }, + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string", + "pattern": "^$|^/" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string", + "pattern": "^$|^/" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string", + "pattern": "^$|^/" + } + }, + "required": [ + "url", + "username" + ], + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + }, + "ui:flavour": "tabs", + "ui:options": { + "tabs": [ + { + "title": "Base", + "fields": [ + "update_every", + "url", + "timeout", + "username", + "password" + ] + }, + { + "title": "TLS", + "fields": [ + "tls_skip_verify", + "tls_ca", + "tls_cert", + "tls_key" + ] + } + ] + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + }, + "username": { + "ui:placeholder": "cn=admin,dc=example,dc=com" + }, + "password": { + "ui:widget": "password" + } + } +} diff --git a/src/go/plugin/go.d/modules/openldap/integrations/openldap.md b/src/go/plugin/go.d/modules/openldap/integrations/openldap.md new file mode 100644 index 000000000..ccdf5a756 --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/integrations/openldap.md @@ -0,0 +1,228 @@ + + +# OpenLDAP + + + + + +Plugin: go.d.plugin +Module: openldap + + + +## Overview + +This collector monitors OpenLDAP metrics about connections, operations, referrals and more. + + +It gathers the metrics using the [go-ldap](https://github.com/go-ldap/ldap) module and the [Monitor backend](https://www.openldap.org/doc/admin24/monitoringslapd.html) of OpenLDAP. + + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +The collector cannot auto-detect OpenLDAP instances, because credential configuration is required. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per OpenLDAP instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| openldap.current_connections | active | connections | +| openldap.connections | connections | connections/s | +| openldap.traffic | sent | bytes/s | +| openldap.entries | sent | entries/s | +| openldap.referrals | sent | referrals/s | +| openldap.operations | completed, initiated | operations/s | +| openldap.operations_by_type | bind, search, unbind, add, delete, modify, compare | operations/s | +| openldap.waiters | write, read | waiters/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable the openLDAP Monitor Backend. + +Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/openldap.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/openldap.conf +``` +#### Options + +The following options can be defined globally: update_every. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| timeout | Timeout for establishing a connection and communication (reading and writing) in seconds. | 2 | no | +| url | LDAP server URL. | ldap://127.0.0.1:389 | yes | +| username | The distinguished name (DN) of the user authorized to view the monitor database. | | yes | +| password | The password associated with the user identified by the DN. | | yes | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +
    + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: ldap://localhost:389 + username: cn=netdata,dc=example,dc=com + password: secret + +``` +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +```yaml +jobs: + - name: local + url: ldap://localhost:389 + username: cn=netdata,dc=example,dc=com + password: secret + + - name: remote + url: ldap://192.0.2.1:389 + username: cn=netdata,dc=example,dc=com + password: secret + +``` + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `openldap` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m openldap + ``` + +### Getting Logs + +If you're encountering problems with the `openldap` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep openldap +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep openldap /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep openldap +``` + + diff --git a/src/go/plugin/go.d/modules/openldap/metadata.yaml b/src/go/plugin/go.d/modules/openldap/metadata.yaml new file mode 100644 index 000000000..8d5b9c1a5 --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/metadata.yaml @@ -0,0 +1,192 @@ +plugin_name: go.d.plugin +modules: + - meta: + plugin_name: go.d.plugin + module_name: openldap + monitored_instance: + name: OpenLDAP + link: https://www.openldap.org/ + categories: + - data-collection.authentication-and-authorization + icon_filename: openldap.svg + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - openldap + - RBAC + - Directory access + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors OpenLDAP metrics about connections, operations, referrals and more. + method_description: | + It gathers the metrics using the [go-ldap](https://github.com/go-ldap/ldap) module and the [Monitor backend](https://www.openldap.org/doc/admin24/monitoringslapd.html) of OpenLDAP. + supported_platforms: + include: ["Linux"] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "The collector cannot auto-detect OpenLDAP instances, because credential configuration is required." + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable the openLDAP Monitor Backend. + description: | + Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface. + configuration: + file: + name: go.d/openldap.conf + options: + description: | + The following options can be defined globally: update_every. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: timeout + description: Timeout for establishing a connection and communication (reading and writing) in seconds. + default_value: 2 + required: false + - name: url + description: LDAP server URL. + default_value: ldap://127.0.0.1:389 + required: true + - name: username + description: The distinguished name (DN) of the user authorized to view the monitor database. + default_value: "" + required: true + - name: password + description: The password associated with the user identified by the DN. + default_value: "" + required: true + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: "" + enabled: false + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + url: ldap://localhost:389 + username: cn=netdata,dc=example,dc=com + password: secret + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: ldap://localhost:389 + username: cn=netdata,dc=example,dc=com + password: secret + + - name: remote + url: ldap://192.0.2.1:389 + username: cn=netdata,dc=example,dc=com + password: secret + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: "These metrics refer to the entire monitored application." + labels: [] + metrics: + - name: openldap.current_connections + description: Current Connections + unit: "connections" + chart_type: line + dimensions: + - name: active + - name: openldap.connections + description: Connections + unit: "connections/s" + chart_type: line + dimensions: + - name: connections + - name: openldap.traffic + description: Traffic + unit: "bytes/s" + chart_type: area + dimensions: + - name: sent + - name: openldap.entries + description: Entries + unit: "entries/s" + chart_type: line + dimensions: + - name: sent + - name: openldap.referrals + description: Referrals + unit: "referrals/s" + chart_type: line + dimensions: + - name: sent + - name: openldap.operations + description: Operations + unit: "operations/s" + chart_type: line + dimensions: + - name: completed + - name: initiated + - name: openldap.operations_by_type + description: Operations by Typ + unit: "operations/s" + chart_type: stacked + dimensions: + - name: bind + - name: search + - name: unbind + - name: add + - name: delete + - name: modify + - name: compare + - name: openldap.waiters + description: Waiters + unit: "waiters/s" + chart_type: line + dimensions: + - name: write + - name: read diff --git a/src/go/plugin/go.d/modules/openldap/openldap.go b/src/go/plugin/go.d/modules/openldap/openldap.go new file mode 100644 index 000000000..8c6b08dff --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/openldap.go @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openldap + +import ( + _ "embed" + "errors" + "time" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("openldap", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 1, + }, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *OpenLDAP { + return &OpenLDAP{ + Config: Config{ + URL: "ldap://127.0.0.1:389", + Timeout: confopt.Duration(time.Second * 2), + }, + + newConn: newLdapConn, + + charts: charts.Copy(), + } + +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + URL string `yaml:"url" json:"url"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + Username string `yaml:"username" json:"username"` + Password string `yaml:"password" json:"password"` + tlscfg.TLSConfig `yaml:",inline" json:""` +} + +type OpenLDAP struct { + module.Base + Config `yaml:",inline" json:""` + + charts *module.Charts + + conn ldapConn + newConn func(Config) ldapConn +} + +func (l *OpenLDAP) Configuration() any { + return l.Config +} + +func (l *OpenLDAP) Init() error { + if l.URL == "" { + return errors.New("empty LDAP server url") + } + if l.Username == "" { + return errors.New("empty LDAP username") + } + + return nil +} + +func (l *OpenLDAP) Check() error { + mx, err := l.collect() + if err != nil { + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil +} + +func (l *OpenLDAP) Charts() *module.Charts { + return l.charts +} + +func (l *OpenLDAP) Collect() map[string]int64 { + mx, err := l.collect() + if err != nil { + l.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} + +func (l *OpenLDAP) Cleanup() { + if l.conn != nil { + if err := l.conn.disconnect(); err != nil { + l.Warningf("error disconnecting ldap client: %v", err) + } + l.conn = nil + } +} diff --git a/src/go/plugin/go.d/modules/openldap/openldap_test.go b/src/go/plugin/go.d/modules/openldap/openldap_test.go new file mode 100644 index 000000000..aa624bfda --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/openldap_test.go @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openldap + +import ( + "errors" + "fmt" + "os" + "testing" + + "github.com/go-ldap/ldap/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + } { + assert.NotNil(t, data, name) + } +} + +func TestOpenLDAP_ConfigurationSerialize(t *testing.T) { + module.TestConfigurationSerialize(t, &OpenLDAP{}, dataConfigJSON, dataConfigYAML) +} + +func TestOpenLDAP_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "fails with default config": { + wantFail: true, + config: New().Config, + }, + "fails if URL not set": { + wantFail: true, + config: func() Config { + conf := New().Config + conf.URL = "" + return conf + }(), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + oldap := New() + oldap.Config = test.config + + if test.wantFail { + assert.Error(t, oldap.Init()) + } else { + assert.NoError(t, oldap.Init()) + } + }) + } +} + +func TestOpenLDAP_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func() *OpenLDAP + }{ + "not initialized": { + prepare: func() *OpenLDAP { + return New() + }, + }, + "after check": { + prepare: func() *OpenLDAP { + oldap := New() + oldap.newConn = func(Config) ldapConn { return prepareMockOk() } + _ = oldap.Check() + return oldap + }, + }, + "after collect": { + prepare: func() *OpenLDAP { + oldap := New() + oldap.newConn = func(Config) ldapConn { return prepareMockOk() } + _ = oldap.Collect() + return oldap + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + oldap := test.prepare() + + assert.NotPanics(t, oldap.Cleanup) + }) + } +} + +func TestOpenLDAP_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestOpenLDAP_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func() *mockOpenLDAPConn + wantFail bool + }{ + "success case": { + wantFail: false, + prepareMock: prepareMockOk, + }, + "err on connect": { + wantFail: true, + prepareMock: prepareMockErrOnConnect, + }, + "err on search": { + wantFail: true, + prepareMock: prepareMockErrOnSearch, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + oldap := New() + mock := test.prepareMock() + oldap.newConn = func(Config) ldapConn { return mock } + + if test.wantFail { + assert.Error(t, oldap.Check()) + } else { + assert.NoError(t, oldap.Check()) + } + }) + } +} + +func TestOpenLDAP_Collect(t *testing.T) { + tests := map[string]struct { + prepareMock func() *mockOpenLDAPConn + wantMetrics map[string]int64 + disconnectBeforeCleanup bool + disconnectAfterCleanup bool + }{ + "success case": { + prepareMock: prepareMockOk, + disconnectBeforeCleanup: false, + disconnectAfterCleanup: true, + wantMetrics: map[string]int64{ + "bytes_sent": 1, + "completed_add_operations": 1, + "completed_bind_operations": 1, + "completed_compare_operations": 1, + "completed_delete_operations": 1, + "completed_modify_operations": 1, + "completed_operations": 7, + "completed_search_operations": 1, + "completed_unbind_operations": 1, + "current_connections": 1, + "entries_sent": 1, + "initiated_add_operations": 1, + "initiated_bind_operations": 1, + "initiated_compare_operations": 1, + "initiated_delete_operations": 1, + "initiated_modify_operations": 1, + "initiated_operations": 7, + "initiated_search_operations": 1, + "initiated_unbind_operations": 1, + "read_waiters": 1, + "referrals_sent": 1, + "total_connections": 1, + "write_waiters": 1, + }, + }, + "err on connect": { + prepareMock: prepareMockErrOnConnect, + disconnectBeforeCleanup: false, + disconnectAfterCleanup: false, + }, + "err on search": { + prepareMock: prepareMockErrOnSearch, + disconnectBeforeCleanup: true, + disconnectAfterCleanup: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + oldap := New() + mock := test.prepareMock() + oldap.newConn = func(Config) ldapConn { return mock } + + mx := oldap.Collect() + + require.Equal(t, test.wantMetrics, mx) + + if len(test.wantMetrics) > 0 { + module.TestMetricsHasAllChartsDims(t, oldap.Charts(), mx) + } + + assert.Equal(t, test.disconnectBeforeCleanup, mock.disconnectCalled, "disconnect before cleanup") + oldap.Cleanup() + assert.Equal(t, test.disconnectAfterCleanup, mock.disconnectCalled, "disconnect after cleanup") + }) + } +} + +func prepareMockOk() *mockOpenLDAPConn { + return &mockOpenLDAPConn{ + dataSearchMonCounters: &ldap.SearchResult{ + Entries: []*ldap.Entry{ + { + DN: "cn=Current,cn=Connections,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorCounter, Values: []string{"1"}}, + }, + }, + { + DN: "cn=Total,cn=Connections,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorCounter, Values: []string{"1"}}, + }, + }, + { + DN: "cn=Bytes,cn=Statistics,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorCounter, Values: []string{"1"}}, + }, + }, + { + DN: "cn=Referrals,cn=Statistics,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorCounter, Values: []string{"1"}}, + }, + }, + { + DN: "cn=Entries,cn=Statistics,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorCounter, Values: []string{"1"}}, + }, + }, + { + DN: "cn=Write,cn=Waiters,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorCounter, Values: []string{"1"}}, + }, + }, + { + DN: "cn=Read,cn=Waiters,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorCounter, Values: []string{"1"}}, + }, + }, + }, + }, + dataSearchMonOperations: &ldap.SearchResult{ + Entries: []*ldap.Entry{ + { + DN: "cn=Bind,cn=Operations,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorOpInitiated, Values: []string{"1"}}, + {Name: attrMonitorOpCompleted, Values: []string{"1"}}, + }, + }, + { + DN: "cn=Unbind,cn=Operations,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorOpInitiated, Values: []string{"1"}}, + {Name: attrMonitorOpCompleted, Values: []string{"1"}}, + }, + }, + { + DN: "cn=Add,cn=Operations,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorOpInitiated, Values: []string{"1"}}, + {Name: attrMonitorOpCompleted, Values: []string{"1"}}, + }, + }, + { + DN: "cn=Delete,cn=Operations,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorOpInitiated, Values: []string{"1"}}, + {Name: attrMonitorOpCompleted, Values: []string{"1"}}, + }, + }, + { + DN: "cn=Modify,cn=Operations,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorOpInitiated, Values: []string{"1"}}, + {Name: attrMonitorOpCompleted, Values: []string{"1"}}, + }, + }, + { + DN: "cn=Compare,cn=Operations,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorOpInitiated, Values: []string{"1"}}, + {Name: attrMonitorOpCompleted, Values: []string{"1"}}, + }, + }, + { + DN: "cn=Search,cn=Operations,cn=Monitor", + Attributes: []*ldap.EntryAttribute{ + {Name: attrMonitorOpInitiated, Values: []string{"1"}}, + {Name: attrMonitorOpCompleted, Values: []string{"1"}}, + }, + }, + }, + }, + } +} + +func prepareMockErrOnConnect() *mockOpenLDAPConn { + return &mockOpenLDAPConn{ + errOnConnect: true, + } +} + +func prepareMockErrOnSearch() *mockOpenLDAPConn { + return &mockOpenLDAPConn{ + errOnSearch: true, + } +} + +type mockOpenLDAPConn struct { + errOnConnect bool + disconnectCalled bool + + dataSearchMonCounters *ldap.SearchResult + dataSearchMonOperations *ldap.SearchResult + errOnSearch bool +} + +func (m *mockOpenLDAPConn) connect() error { + if m.errOnConnect { + return errors.New("mock.connect() error") + } + return nil +} + +func (m *mockOpenLDAPConn) disconnect() error { + m.disconnectCalled = true + return nil +} + +func (m *mockOpenLDAPConn) search(req *ldap.SearchRequest) (*ldap.SearchResult, error) { + if m.errOnSearch { + return nil, errors.New("mock.search() error") + } + + switch req.BaseDN { + case "cn=Monitor": + return m.dataSearchMonCounters, nil + case "cn=Operations,cn=Monitor": + return m.dataSearchMonOperations, nil + default: + return nil, fmt.Errorf("mock.search(): unknown BaseDSN: %s", req.BaseDN) + } +} diff --git a/src/go/plugin/go.d/modules/openldap/testdata/config.json b/src/go/plugin/go.d/modules/openldap/testdata/config.json new file mode 100644 index 000000000..2c69135fd --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/testdata/config.json @@ -0,0 +1,11 @@ +{ + "update_every": 123, + "url": "ok", + "timeout": 123.123, + "username": "ok", + "password": "ok", + "tls_ca": "ok", + "tls_cert": "ok", + "tls_key": "ok", + "tls_skip_verify": false +} diff --git a/src/go/plugin/go.d/modules/openldap/testdata/config.yaml b/src/go/plugin/go.d/modules/openldap/testdata/config.yaml new file mode 100644 index 000000000..97014066d --- /dev/null +++ b/src/go/plugin/go.d/modules/openldap/testdata/config.yaml @@ -0,0 +1,8 @@ +update_every: 123 +url: "ok" +timeout: 123.123 +username: "ok" +password: "ok" +tls_ca: "ok" +tls_cert: "ok" +tls_key: "ok" diff --git a/src/go/plugin/go.d/modules/openvpn/client/client_test.go b/src/go/plugin/go.d/modules/openvpn/client/client_test.go index d40f6ea1b..d1257e877 100644 --- a/src/go/plugin/go.d/modules/openvpn/client/client_test.go +++ b/src/go/plugin/go.d/modules/openvpn/client/client_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket" + "github.com/stretchr/testify/assert" ) diff --git a/src/go/plugin/go.d/modules/openvpn/config_schema.json b/src/go/plugin/go.d/modules/openvpn/config_schema.json index 8bbda1fd4..3de21408e 100644 --- a/src/go/plugin/go.d/modules/openvpn/config_schema.json +++ b/src/go/plugin/go.d/modules/openvpn/config_schema.json @@ -64,7 +64,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/openvpn/init.go b/src/go/plugin/go.d/modules/openvpn/init.go index 563edbaa6..76fc66614 100644 --- a/src/go/plugin/go.d/modules/openvpn/init.go +++ b/src/go/plugin/go.d/modules/openvpn/init.go @@ -3,8 +3,8 @@ package openvpn import ( + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/openvpn/client" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket" ) @@ -21,10 +21,8 @@ func (o *OpenVPN) initPerUserMatcher() (matcher.Matcher, error) { func (o *OpenVPN) initClient() *client.Client { config := socket.Config{ - Address: o.Address, - ConnectTimeout: o.Timeout.Duration(), - ReadTimeout: o.Timeout.Duration(), - WriteTimeout: o.Timeout.Duration(), + Address: o.Address, + Timeout: o.Timeout.Duration(), } return &client.Client{Client: socket.New(config)} } diff --git a/src/go/plugin/go.d/modules/openvpn/integrations/openvpn.md b/src/go/plugin/go.d/modules/openvpn/integrations/openvpn.md index 612d5eaab..e56f4d12d 100644 --- a/src/go/plugin/go.d/modules/openvpn/integrations/openvpn.md +++ b/src/go/plugin/go.d/modules/openvpn/integrations/openvpn.md @@ -115,8 +115,8 @@ It is disabled to not break other tools which use `Management Interface`. The configuration file name for this integration is `go.d/openvpn.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/openvpn/metadata.yaml b/src/go/plugin/go.d/modules/openvpn/metadata.yaml index 49360b2fd..c69e34d0c 100644 --- a/src/go/plugin/go.d/modules/openvpn/metadata.yaml +++ b/src/go/plugin/go.d/modules/openvpn/metadata.yaml @@ -84,7 +84,7 @@ modules: Metrics of users matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) - - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format). + - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format). - Syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/openvpn/openvpn.go b/src/go/plugin/go.d/modules/openvpn/openvpn.go index 52bada3ee..ac3619705 100644 --- a/src/go/plugin/go.d/modules/openvpn/openvpn.go +++ b/src/go/plugin/go.d/modules/openvpn/openvpn.go @@ -6,11 +6,11 @@ import ( _ "embed" "time" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/openvpn/client" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) //go:embed "config_schema.json" @@ -28,7 +28,7 @@ func New() *OpenVPN { return &OpenVPN{ Config: Config{ Address: "127.0.0.1:7505", - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, charts: charts.Copy(), @@ -39,7 +39,7 @@ func New() *OpenVPN { type Config struct { UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` PerUserStats matcher.SimpleExpr `yaml:"per_user_stats,omitempty" json:"per_user_stats"` } @@ -69,13 +69,11 @@ func (o *OpenVPN) Configuration() any { func (o *OpenVPN) Init() error { if err := o.validateConfig(); err != nil { - o.Error(err) return err } m, err := o.initPerUserMatcher() if err != nil { - o.Error(err) return err } o.perUserMatcher = m @@ -89,14 +87,12 @@ func (o *OpenVPN) Init() error { func (o *OpenVPN) Check() error { if err := o.client.Connect(); err != nil { - o.Error(err) return err } defer func() { _ = o.client.Disconnect() }() ver, err := o.client.Version() if err != nil { - o.Error(err) o.Cleanup() return err } diff --git a/src/go/plugin/go.d/modules/openvpn/openvpn_test.go b/src/go/plugin/go.d/modules/openvpn/openvpn_test.go index d81747ceb..863081204 100644 --- a/src/go/plugin/go.d/modules/openvpn/openvpn_test.go +++ b/src/go/plugin/go.d/modules/openvpn/openvpn_test.go @@ -6,9 +6,9 @@ import ( "os" "testing" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/openvpn/client" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket" "github.com/stretchr/testify/assert" diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/config_schema.json b/src/go/plugin/go.d/modules/openvpn_status_log/config_schema.json index db3af2cc8..368354443 100644 --- a/src/go/plugin/go.d/modules/openvpn_status_log/config_schema.json +++ b/src/go/plugin/go.d/modules/openvpn_status_log/config_schema.json @@ -58,7 +58,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/init.go b/src/go/plugin/go.d/modules/openvpn_status_log/init.go index f2e6bee37..c603736bf 100644 --- a/src/go/plugin/go.d/modules/openvpn_status_log/init.go +++ b/src/go/plugin/go.d/modules/openvpn_status_log/init.go @@ -4,7 +4,8 @@ package openvpn_status_log import ( "errors" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + + "github.com/netdata/netdata/go/plugins/pkg/matcher" ) func (o *OpenVPNStatusLog) validateConfig() error { diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/integrations/openvpn_status_log.md b/src/go/plugin/go.d/modules/openvpn_status_log/integrations/openvpn_status_log.md index 9a5b56663..96a7c33b3 100644 --- a/src/go/plugin/go.d/modules/openvpn_status_log/integrations/openvpn_status_log.md +++ b/src/go/plugin/go.d/modules/openvpn_status_log/integrations/openvpn_status_log.md @@ -106,8 +106,8 @@ No action required. The configuration file name for this integration is `go.d/openvpn_status_log.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/metadata.yaml b/src/go/plugin/go.d/modules/openvpn_status_log/metadata.yaml index 8636de63b..1c1a0839d 100644 --- a/src/go/plugin/go.d/modules/openvpn_status_log/metadata.yaml +++ b/src/go/plugin/go.d/modules/openvpn_status_log/metadata.yaml @@ -71,7 +71,7 @@ modules: details: | Metrics of users matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) - - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format). + - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format). - Syntax: ```yaml per_user_stats: diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/openvpn.go b/src/go/plugin/go.d/modules/openvpn_status_log/openvpn.go index 7b2914df9..61ad11031 100644 --- a/src/go/plugin/go.d/modules/openvpn_status_log/openvpn.go +++ b/src/go/plugin/go.d/modules/openvpn_status_log/openvpn.go @@ -5,9 +5,10 @@ package openvpn_status_log import ( _ "embed" "errors" + "fmt" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" ) //go:embed "config_schema.json" @@ -53,14 +54,12 @@ func (o *OpenVPNStatusLog) Configuration() any { func (o *OpenVPNStatusLog) Init() error { if err := o.validateConfig(); err != nil { - o.Errorf("error on validating config: %v", err) - return err + return fmt.Errorf("error on validating config: %v", err) } m, err := o.initPerUserStatsMatcher() if err != nil { - o.Errorf("error on creating 'per_user_stats' matcher: %v", err) - return err + return fmt.Errorf("error on creating 'per_user_stats' matcher: %v", err) } if m != nil { o.perUserMatcher = m @@ -72,7 +71,6 @@ func (o *OpenVPNStatusLog) Init() error { func (o *OpenVPNStatusLog) Check() error { mx, err := o.collect() if err != nil { - o.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/openvpn_test.go b/src/go/plugin/go.d/modules/openvpn_status_log/openvpn_test.go index f3d852d5a..36ac45e5b 100644 --- a/src/go/plugin/go.d/modules/openvpn_status_log/openvpn_test.go +++ b/src/go/plugin/go.d/modules/openvpn_status_log/openvpn_test.go @@ -7,8 +7,8 @@ import ( "strings" "testing" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/src/go/plugin/go.d/modules/oracledb/README.md b/src/go/plugin/go.d/modules/oracledb/README.md new file mode 120000 index 000000000..a75e3611e --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/README.md @@ -0,0 +1 @@ +integrations/oracle_db.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/oracledb/charts.go b/src/go/plugin/go.d/modules/oracledb/charts.go new file mode 100644 index 000000000..4df42b741 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/charts.go @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package oracledb + +import ( + "fmt" + "strings" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +const ( + prioSessionsCount = module.Priority + iota + prioActiveSessionsCount + prioSessionsUtilization + + prioCurrentLogons + prioLogonsRate + + prioTablespaceUtilization + prioTablespaceUsage + + prioDatabaseWaitTimeRatio + prioSqlServiceResponseTime + prioWaitClassWaitTime + prioEnqueueTimeouts + + prioActivities + + prioDiskIO + prioDiskIOPS + + prioDiskSorts + + prioTableScans + + prioCacheHitRatio + prioGlobalCacheBlocks +) + +var globalCharts = module.Charts{ + sessionsCountChart.Copy(), + averageActiveSessionsCountChart.Copy(), + sessionsUtilizationChart.Copy(), + + currentLogonsChart.Copy(), + logonsRateChart.Copy(), + + databaseWaitTimeRatioChart.Copy(), + sqlServiceResponseTimeChart.Copy(), + enqueueTimeoutsChart.Copy(), + + activityChart.Copy(), + + diskIOChart.Copy(), + diskIOPSChart.Copy(), + + sortsChart.Copy(), + + tableScansChart.Copy(), + + cacheHitRatioChart.Copy(), + globalCacheBlocksChart.Copy(), +} + +var ( + sessionsCountChart = module.Chart{ + ID: "sessions", + Title: "Sessions", + Units: "sessions", + Fam: "sessions", + Ctx: "oracledb.sessions", + Type: module.Line, + Priority: prioSessionsCount, + Dims: module.Dims{ + {ID: "Session Count", Name: "sessions", Div: precision}, + }, + } + averageActiveSessionsCountChart = module.Chart{ + ID: "average_active_sessions", + Title: "Average Active Sessions", + Units: "sessions", + Fam: "sessions", + Ctx: "oracledb.average_active_sessions", + Type: module.Line, + Priority: prioActiveSessionsCount, + Dims: module.Dims{ + {ID: "Average Active Sessions", Name: "active", Div: precision}, + }, + } + sessionsUtilizationChart = module.Chart{ + ID: "sessions_utilization", + Title: "Sessions Limit %", + Units: "percent", + Fam: "sessions", + Ctx: "oracledb.sessions_utilization", + Type: module.Area, + Priority: prioSessionsUtilization, + Dims: module.Dims{ + {ID: "Session Limit %", Name: "session_limit", Div: precision}, + }, + } + + currentLogonsChart = module.Chart{ + ID: "current_logons", + Title: "Current Logons", + Units: "logons", + Fam: "logons", + Ctx: "oracledb.current_logons", + Type: module.Line, + Priority: prioCurrentLogons, + Dims: module.Dims{ + {ID: "logons current", Name: "logons"}, + }, + } + logonsRateChart = module.Chart{ + ID: "logons_rate", + Title: "Logons", + Units: "logons/s", + Fam: "logons", + Ctx: "oracledb.logons", + Type: module.Line, + Priority: prioLogonsRate, + Dims: module.Dims{ + {ID: "logons cumulative", Name: "logons", Algo: module.Incremental}, + }, + } + + databaseWaitTimeRatioChart = module.Chart{ + ID: "database_wait_time_ratio", + Title: "Database Wait Time Ratio", + Units: "percent", + Fam: "performance", + Ctx: "oracledb.database_wait_time_ratio", + Type: module.Area, + Priority: prioDatabaseWaitTimeRatio, + Dims: module.Dims{ + {ID: "Database Wait Time Ratio", Name: "db_wait_time", Div: precision}, + }, + } + sqlServiceResponseTimeChart = module.Chart{ + ID: "sql_service_response_time", + Title: "SQL Service Response Time", + Units: "seconds", + Fam: "performance", + Ctx: "oracledb.sql_service_response_time", + Type: module.Line, + Priority: prioSqlServiceResponseTime, + Dims: module.Dims{ + {ID: "SQL Service Response Time", Name: "sql_resp_time", Div: precision * 100}, + }, + } + enqueueTimeoutsChart = module.Chart{ + ID: "enqueue_timeouts", + Title: "Enqueue Timeouts", + Units: "timeouts/s", + Fam: "performance", + Ctx: "oracledb.enqueue_timeouts", + Type: module.Line, + Priority: prioEnqueueTimeouts, + Dims: module.Dims{ + {ID: "enqueue timeouts", Name: "enqueue", Algo: module.Incremental}, + }, + } + + diskIOChart = module.Chart{ + ID: "disk_io", + Title: "Disk IO", + Units: "bytes/s", + Fam: "disk", + Ctx: "oracledb.disk_io", + Type: module.Area, + Priority: prioDiskIO, + Dims: module.Dims{ + {ID: "physical read bytes", Name: "read", Algo: module.Incremental}, + {ID: "physical write bytes", Name: "written", Mul: -1, Algo: module.Incremental}, + }, + } + + diskIOPSChart = module.Chart{ + ID: "disk_physical_iops", + Title: "Disk IOPS", + Units: "operations/s", + Fam: "disk", + Ctx: "oracledb.disk_iops", + Type: module.Line, + Priority: prioDiskIOPS, + Dims: module.Dims{ + {ID: "physical reads", Name: "read", Algo: module.Incremental}, + {ID: "physical writes", Name: "write", Mul: -1, Algo: module.Incremental}, + }, + } + + sortsChart = module.Chart{ + ID: "sorts", + Title: "Sorts", + Units: "sorts/s", + Fam: "sorts", + Ctx: "oracledb.sorts", + Type: module.Line, + Priority: prioDiskSorts, + Dims: module.Dims{ + {ID: "sorts (memory)", Name: "memory", Algo: module.Incremental}, + {ID: "sorts (disk)", Name: "disk", Algo: module.Incremental}, + }, + } + + tableScansChart = module.Chart{ + ID: "table_scans", + Title: "Table Scans", + Units: "scans/s", + Fam: "table scans", + Ctx: "oracledb.table_scans", + Type: module.Line, + Priority: prioTableScans, + Dims: module.Dims{ + {ID: "table scans (short tables)", Name: "short_table", Algo: module.Incremental}, + {ID: "table scans (long tables)", Name: "long_table", Algo: module.Incremental}, + }, + } + + cacheHitRatioChart = module.Chart{ + ID: "cache_hit_ratio", + Title: "Cache Hit Ratio", + Units: "percent", + Fam: "cache", + Ctx: "oracledb.cache_hit_ratio", + Type: module.Line, + Priority: prioCacheHitRatio, + Dims: module.Dims{ + {ID: "Buffer Cache Hit Ratio", Name: "buffer", Div: precision}, + {ID: "Cursor Cache Hit Ratio", Name: "cursor", Div: precision}, + {ID: "Library Cache Hit Ratio", Name: "library", Div: precision}, + {ID: "Row Cache Hit Ratio", Name: "row", Div: precision}, + }, + } + globalCacheBlocksChart = module.Chart{ + ID: "global_cache_blocks", + Title: "Global Cache Blocks", + Units: "blocks/s", + Fam: "cache", + Ctx: "oracledb.global_cache_blocks", + Type: module.Line, + Priority: prioGlobalCacheBlocks, + Dims: module.Dims{ + {ID: "Global Cache Blocks Corrupted", Name: "corrupted", Algo: module.Incremental, Div: precision}, + {ID: "Global Cache Blocks Lost", Name: "lost", Algo: module.Incremental, Div: precision}, + }, + } +) + +var ( + activityChart = module.Chart{ + ID: "activity", + Title: "Activities", + Units: "events/s", + Fam: "activity", + Ctx: "oracledb.activity", + Type: module.Line, + Priority: prioActivities, + Dims: module.Dims{ + {ID: "parse count (total)", Name: "parse", Algo: module.Incremental}, + {ID: "execute count", Name: "execute", Algo: module.Incremental}, + {ID: "user commits", Name: "user_commits", Algo: module.Incremental}, + {ID: "user rollbacks", Name: "user_rollbacks", Algo: module.Incremental}, + }, + } +) + +var waitClassChartsTmpl = module.Charts{ + waitClassWaitTimeChartTmpl.Copy(), +} + +var ( + waitClassWaitTimeChartTmpl = module.Chart{ + ID: "wait_class_%s_wait_time", + Title: "Wait Class Wait Time", + Units: "milliseconds", + Fam: "performance", + Ctx: "oracledb.wait_class_wait_time", + Type: module.Line, + Priority: prioWaitClassWaitTime, + Dims: module.Dims{ + {ID: "wait_class_%s_wait_time", Name: "wait_time", Div: precision}, + }, + } +) + +var tablespaceChartsTmpl = module.Charts{ + tablespaceUtilizationChartTmpl.Copy(), + tablespaceUsageChartTmpl.Copy(), +} + +var ( + tablespaceUtilizationChartTmpl = module.Chart{ + ID: "tablespace_%s_utilization", + Title: "Tablespace Utilization", + Units: "percent", + Fam: "tablespace", + Ctx: "oracledb.tablespace_utilization", + Type: module.Area, + Priority: prioTablespaceUtilization, + Dims: module.Dims{ + {ID: "tablespace_%s_utilization", Name: "utilization", Div: precision}, + }, + } + tablespaceUsageChartTmpl = module.Chart{ + ID: "tablespace_%s_usage", + Title: "Tablespace Usage", + Units: "bytes", + Fam: "tablespace", + Ctx: "oracledb.tablespace_usage", + Type: module.Stacked, + Priority: prioTablespaceUsage, + Dims: module.Dims{ + {ID: "tablespace_%s_avail_bytes", Name: "avail"}, + {ID: "tablespace_%s_used_bytes", Name: "used"}, + }, + } +) + +func (o *OracleDB) addTablespaceCharts(tablespace string) { + charts := tablespaceChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = cleanChartId(fmt.Sprintf(chart.ID, tablespace)) + chart.Labels = []module.Label{ + {Key: "tablespace", Value: tablespace}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, tablespace) + } + } + + if err := o.Charts().Add(*charts...); err != nil { + o.Warningf("failed to add tablespace '%s' charts: %v", tablespace, err) + } +} + +func (o *OracleDB) removeTablespaceChart(tablespace string) {} + +func (o *OracleDB) addWaitClassCharts(waitClass string) { + charts := waitClassChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = cleanChartId(fmt.Sprintf(chart.ID, waitClass)) + chart.Labels = []module.Label{ + {Key: "wait_class", Value: waitClass}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, waitClass) + } + } + + if err := o.Charts().Add(*charts...); err != nil { + o.Warningf("failed to add wait class '%s' charts: %v", waitClass, err) + } +} + +func cleanChartId(id string) string { + r := strings.NewReplacer(" ", "_", ".", "_") + return r.Replace(id) +} diff --git a/src/go/plugin/go.d/modules/oracledb/collect.go b/src/go/plugin/go.d/modules/oracledb/collect.go new file mode 100644 index 000000000..08ab77e45 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/collect.go @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package oracledb + +import ( + "context" + "database/sql" + "fmt" + "time" +) + +const precision = 1000 + +func (o *OracleDB) collect() (map[string]int64, error) { + if o.db == nil { + if err := o.openConnection(); err != nil { + return nil, fmt.Errorf("failed to open connection: %v", err) + } + } + + mx := make(map[string]int64) + + // TODO: https://www.oracle.com/technical-resources/articles/schumacher-analysis.html + + if err := o.collectSysMetrics(mx); err != nil { + return nil, fmt.Errorf("failed to collect system metrics: %v", err) + } + if err := o.collectSysStat(mx); err != nil { + return nil, fmt.Errorf("failed to collect activities: %v", err) + } + if err := o.collectWaitClass(mx); err != nil { + return nil, fmt.Errorf("failed to collect wait time: %v", err) + } + if err := o.collectTablespace(mx); err != nil { + return nil, fmt.Errorf("failed to collect tablespace: %v", err) + } + + return mx, nil +} + +func (o *OracleDB) doQuery(query string, assign func(column, value string, lineEnd bool) error) error { + ctx, cancel := context.WithTimeout(context.Background(), o.Timeout.Duration()) + defer cancel() + + rows, err := o.db.QueryContext(ctx, query) + if err != nil { + return err + } + defer func() { _ = rows.Close() }() + + columns, err := rows.Columns() + if err != nil { + return err + } + + vs := makeValues(len(columns)) + + for rows.Next() { + if err := rows.Scan(vs...); err != nil { + return err + } + for i, l := 0, len(vs); i < l; i++ { + if err := assign(columns[i], valueToString(vs[i]), i == l-1); err != nil { + return err + } + } + } + + return rows.Err() +} + +func (o *OracleDB) openConnection() error { + db, err := sql.Open("oracle", o.DSN) + if err != nil { + return fmt.Errorf("error on sql open: %v", err) + } + + db.SetConnMaxLifetime(10 * time.Minute) + + ctx, cancel := context.WithTimeout(context.Background(), o.Timeout.Duration()) + defer cancel() + + if err := db.PingContext(ctx); err != nil { + _ = db.Close() + return fmt.Errorf("error on pinging: %v", err) + } + + o.db = db + + return nil +} + +func makeValues(size int) []any { + vs := make([]any, size) + for i := range vs { + vs[i] = &sql.NullString{} + } + return vs +} + +func valueToString(value any) string { + v, ok := value.(*sql.NullString) + if !ok || !v.Valid { + return "" + } + return v.String +} diff --git a/src/go/plugin/go.d/modules/oracledb/collect_sysmetric.go b/src/go/plugin/go.d/modules/oracledb/collect_sysmetric.go new file mode 100644 index 000000000..d9c69128d --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/collect_sysmetric.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package oracledb + +import ( + "fmt" + "strconv" +) + +const querySysMetrics = ` +SELECT + METRIC_NAME, + VALUE +FROM + v$sysmetric +WHERE + METRIC_NAME IN ( + 'Session Count', + 'Session Limit %', + 'Average Active Sessions', + 'Buffer Cache Hit Ratio', + 'Cursor Cache Hit Ratio', + 'Library Cache Hit Ratio', + 'Row Cache Hit Ratio', + 'Global Cache Blocks Corrupted', + 'Global Cache Blocks Lost', + 'Database Wait Time Ratio', + 'SQL Service Response Time' + ) + AND + intsize_csec + = (SELECT max(intsize_csec) FROM sys.v_$sysmetric) +` + +func (o *OracleDB) collectSysMetrics(mx map[string]int64) error { + q := querySysMetrics + o.Debugf("executing query: %s", q) + + var name, val string + + return o.doQuery(q, func(column, value string, lineEnd bool) error { + switch column { + case "METRIC_NAME": + name = value + case "VALUE": + val = value + } + if lineEnd { + v, err := strconv.ParseFloat(val, 64) + if err != nil { + return fmt.Errorf("could not parse metric '%s' value '%s': %w", name, val, err) + } + mx[name] = int64(v * precision) + + } + return nil + }) +} diff --git a/src/go/plugin/go.d/modules/oracledb/collect_sysstat.go b/src/go/plugin/go.d/modules/oracledb/collect_sysstat.go new file mode 100644 index 000000000..97f6f7439 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/collect_sysstat.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package oracledb + +import ( + "fmt" + "strconv" +) + +const querySysStat = ` +SELECT + name, + value +FROM + v$sysstat +WHERE + name IN ( + 'enqueue timeouts', + 'table scans (long tables)', + 'table scans (short tables)', + 'sorts (disk)', + 'sorts (memory)', + 'physical write bytes', + 'physical read bytes', + 'physical writes', + 'physical reads', + 'logons cumulative', + 'logons current', + 'parse count (total)', + 'execute count', + 'user commits', + 'user rollbacks' + ) +` + +func (o *OracleDB) collectSysStat(mx map[string]int64) error { + q := querySysStat + o.Debugf("executing query: %s", q) + + var name, val string + + return o.doQuery(q, func(column, value string, lineEnd bool) error { + switch column { + case "NAME": + name = value + case "VALUE": + val = value + } + if lineEnd { + v, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return fmt.Errorf("could not parse activity '%s' value '%s': %w", name, val, err) + } + mx[name] = v + } + return nil + }) +} diff --git a/src/go/plugin/go.d/modules/oracledb/collect_tablespace.go b/src/go/plugin/go.d/modules/oracledb/collect_tablespace.go new file mode 100644 index 000000000..bc54ff239 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/collect_tablespace.go @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package oracledb + +import ( + "fmt" + "strconv" +) + +const queryTablespace = ` +SELECT + f.tablespace_name, + f.autoextensible, + SUM(f.bytes) AS allocated_bytes, + SUM(f.maxbytes) AS max_bytes, + (SUM(f.bytes) - COALESCE(SUM(fs.free_bytes), 0)) AS used_bytes +FROM + dba_data_files f +LEFT JOIN + ( + SELECT + tablespace_name, + SUM(bytes) AS free_bytes + FROM + dba_free_space + GROUP BY + tablespace_name + ) fs + ON f.tablespace_name = fs.tablespace_name +GROUP BY + f.tablespace_name, f.autoextensible +` + +func (o *OracleDB) collectTablespace(mx map[string]int64) error { + q := queryTablespace + o.Debugf("executing query: %s", q) + + var ts struct { + name string + autoExtent bool + allocBytes float64 + maxBytes float64 + usedBytes float64 + } + + seen := make(map[string]bool) + + err := o.doQuery(q, func(column, value string, lineEnd bool) error { + var err error + + switch column { + case "TABLESPACE_NAME": + ts.name = value + case "AUTOEXTENSIBLE": + ts.autoExtent = value == "YES" + case "ALLOCATED_BYTES": + ts.allocBytes, err = strconv.ParseFloat(value, 64) + case "MAX_BYTES": + ts.maxBytes, err = strconv.ParseFloat(value, 64) + case "USED_BYTES": + ts.usedBytes, err = strconv.ParseFloat(value, 64) + } + if err != nil { + return fmt.Errorf("could not parse column '%s' value '%s': %w", column, value, err) + } + + if lineEnd { + seen[ts.name] = true + + limit := ts.allocBytes + if ts.autoExtent { + limit = ts.maxBytes + } + + px := fmt.Sprintf("tablespace_%s_", ts.name) + + mx[px+"max_size_bytes"] = int64(limit) + mx[px+"used_bytes"] = int64(ts.usedBytes) + mx[px+"avail_bytes"] = int64(limit - ts.usedBytes) + mx[px+"utilization"] = 0 + if limit > 0 { + mx[px+"utilization"] = int64(ts.usedBytes / limit * 100 * precision) + } + } + + return nil + }) + if err != nil { + return err + } + + for name := range seen { + if !o.seenTablespaces[name] { + o.seenTablespaces[name] = true + o.addTablespaceCharts(name) + } + } + for name := range o.seenTablespaces { + if !seen[name] { + delete(o.seenTablespaces, name) + o.removeTablespaceChart(name) + } + } + + return nil +} diff --git a/src/go/plugin/go.d/modules/oracledb/collect_wait_class.go b/src/go/plugin/go.d/modules/oracledb/collect_wait_class.go new file mode 100644 index 000000000..fa706fedb --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/collect_wait_class.go @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package oracledb + +import ( + "fmt" + "strconv" +) + +const queryWaitClass = ` +SELECT + n.wait_class AS wait_class, + round(m.time_waited / m.intsize_csec, 3) AS wait_time +FROM + v$waitclassmetric m, + v$system_wait_class n +WHERE + m.wait_class_id = n.wait_class_id + AND n.wait_class != 'Idle' +` + +func (o *OracleDB) collectWaitClass(mx map[string]int64) error { + q := queryWaitClass + o.Debugf("executing query: %s", q) + + seen := make(map[string]bool) + var wclass, wtime string + + err := o.doQuery(q, func(column, value string, lineEnd bool) error { + switch column { + case "WAIT_CLASS": + wclass = value + case "WAIT_TIME": + wtime = value + } + if lineEnd { + seen[wclass] = true + + v, err := strconv.ParseFloat(wtime, 64) + if err != nil { + return fmt.Errorf("could not parse class '%s' value '%s': %w", wclass, wtime, err) + } + mx["wait_class_"+wclass+"_wait_time"] = int64(v * precision) + } + + return nil + }) + if err != nil { + return err + } + + for name := range seen { + if !o.seenWaitClasses[name] { + o.seenWaitClasses[name] = true + o.addWaitClassCharts(name) + } + } + + return nil +} diff --git a/src/go/plugin/go.d/modules/oracledb/config_schema.json b/src/go/plugin/go.d/modules/oracledb/config_schema.json new file mode 100644 index 000000000..4e86891b6 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/config_schema.json @@ -0,0 +1,47 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "OracleDB collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "dsn": { + "title": "DSN", + "description": "Oracle server Data Source Name specifying the connection details.", + "type": "string", + "default": "" + }, + "timeout": { + "title": "Timeout", + "description": "Query timeout, in seconds.", + "type": "number", + "minimum": 0.5, + "default": 1 + } + }, + "required": [ + "dsn" + ], + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + }, + "dsn": { + "ui:help": "Format is `oracle://username:password@host:port/service`.", + "ui:placeholder": "oracle://username:password@host:port/service?param1=value1&...¶mN=valueN" + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + } + } +} diff --git a/src/go/plugin/go.d/modules/oracledb/init.go b/src/go/plugin/go.d/modules/oracledb/init.go new file mode 100644 index 000000000..c722546c0 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/init.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package oracledb + +import ( + "errors" + "net/url" + "strings" + + goora "github.com/sijms/go-ora/v2" +) + +func (o *OracleDB) validateDSN() (string, error) { + if o.DSN == "" { + return "", errors.New("dsn required but not set") + } + if _, err := goora.ParseConfig(o.DSN); err != nil { + return "", err + } + + u, err := url.Parse(o.DSN) + if err != nil { + return "", err + } + + if u.User == nil { + return u.String(), nil + } + + var user, pass string + if user = u.User.Username(); user != "" { + user = strings.Repeat("x", len(user)) + } + if pass, _ = u.User.Password(); pass != "" { + pass = strings.Repeat("x", len(pass)) + } + + u.User = url.UserPassword(user, pass) + + return u.String(), nil +} diff --git a/src/go/plugin/go.d/modules/oracledb/integrations/oracle_db.md b/src/go/plugin/go.d/modules/oracledb/integrations/oracle_db.md new file mode 100644 index 000000000..1c64dc367 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/integrations/oracle_db.md @@ -0,0 +1,299 @@ + + +# Oracle DB + + + + + +Plugin: go.d.plugin +Module: oracledb + + + +## Overview + +This collector monitors the health and performance of Oracle DB servers and collects general statistics, replication and user metrics. + + +It establishes a connection to the Oracle DB instance via a TCP or UNIX socket and extracts metrics from the following database tables: + +- `v$sysmetric` +- `v$sysstat` +- `v$waitclassmetric` +- `v$system_wait_class` +- `dba_data_files` +- `dba_free_space` + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +The collector can automatically detect Oracle DB instances running on: + +- Localhost, listening on port 1521 +- Within Docker containers + +> **Note**: Oracle DB requires a username and password. While Netdata can automatically discover Oracle DB instances and create data collection jobs, these jobs will fail unless you provide the correct credentials. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Oracle DB instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| oracledb.sessions | session | sessions | +| oracledb.average_active_sessions | active | sessions | +| oracledb.sessions_utilization | session_limit | percent | +| oracledb.current_logons | logons | logons | +| oracledb.logons | logons | logons/s | +| oracledb.database_wait_time_ratio | db_wait_time | percent | +| oracledb.sql_service_response_time | sql_resp_time | seconds | +| oracledb.enqueue_timeouts | enqueue | timeouts/s | +| oracledb.disk_io | read, written | bytes/s | +| oracledb.disk_iops | read, write | operations/s | +| oracledb.sorts | memory, disk | sorts/s | +| oracledb.table_scans | short_table, long_table | scans/s | +| oracledb.cache_hit_ratio | buffer, cursor, library, row | percent | +| oracledb.global_cache_blocks | corrupted, lost | blocks/s | +| oracledb.activity | parse, execute, user_commits, user_rollbacks | events/s | + +### Per tablespace + +These metrics refer to the Tablespace. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| tablespace | Tablespace name. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| oracledb.tablespace_utilization | utilization | percent | +| oracledb.tablespace_usage | avail, used | bytes | + +### Per wait class + +These metrics refer to the [Wait Class](https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/classes-of-wait-events.html). + +Labels: + +| Label | Description | +|:-----------|:----------------| +| wait_class | [Wait Class name](https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/classes-of-wait-events.html). | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| oracledb.wait_class_wait_time | wait_time | milliseconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Create a read only user for netdata + +Follow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach + +Connect to your Oracle database with an administrative user and execute: + +```bash +CREATE USER netdata IDENTIFIED BY ; + +GRANT CONNECT TO netdata; +GRANT SELECT_CATALOG_ROLE TO netdata; +``` + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/oracledb.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/oracledb.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| dsn | Oracle server DSN (Data Source Name). Format is `oracle://username:password@host:port/service?param1=value1&...¶mN=valueN`. | | yes | +| timeout | Query timeout in seconds. | 1 | no | + +
    + +#### Examples + +##### TCP socket + +An example configuration. + +
    Config + +```yaml +jobs: + - name: local + dsn: oracle://netdata:secret@127.0.0.1:1521/XE + +``` +
    + +##### TLS connection (TCPS) + +An example configuration for TLS connection. + +
    Config + +```yaml +jobs: + - name: local + dsn: 'oracle://netdata:secret@127.0.0.1:1521/XE?ssl=true&ssl verify=true' + +``` +
    + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +
    Config + +```yaml +jobs: + - name: local + dsn: oracle://netdata:secret@127.0.0.1:1521/XE + + - name: remote + dsn: oracle://netdata:secret@203.0.113.0:1521/XE + +``` +
    + + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `oracledb` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m oracledb + ``` + +### Getting Logs + +If you're encountering problems with the `oracledb` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep oracledb +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep oracledb /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep oracledb +``` + + diff --git a/src/go/plugin/go.d/modules/oracledb/metadata.yaml b/src/go/plugin/go.d/modules/oracledb/metadata.yaml new file mode 100644 index 000000000..fbfa6221d --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/metadata.yaml @@ -0,0 +1,271 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-oracledb + plugin_name: go.d.plugin + module_name: oracledb + monitored_instance: + name: Oracle DB + link: https://www.oracle.com/database/ + categories: + - data-collection.database-servers + icon_filename: oracle.svg + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - database + - oracle + - sql + most_popular: false + overview: + multi_instance: true + data_collection: + metrics_description: | + This collector monitors the health and performance of Oracle DB servers and collects general statistics, replication and user metrics. + method_description: | + It establishes a connection to the Oracle DB instance via a TCP or UNIX socket and extracts metrics from the following database tables: + + - `v$sysmetric` + - `v$sysstat` + - `v$waitclassmetric` + - `v$system_wait_class` + - `dba_data_files` + - `dba_free_space` + default_behavior: + auto_detection: + description: | + The collector can automatically detect Oracle DB instances running on: + + - Localhost, listening on port 1521 + - Within Docker containers + + > **Note**: Oracle DB requires a username and password. While Netdata can automatically discover Oracle DB instances and create data collection jobs, these jobs will fail unless you provide the correct credentials. + limits: + description: "" + performance_impact: + description: "" + additional_permissions: + description: "" + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: + - title: Create a read only user for netdata + description: | + Follow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach + + Connect to your Oracle database with an administrative user and execute: + + ```bash + CREATE USER netdata IDENTIFIED BY ; + + GRANT CONNECT TO netdata; + GRANT SELECT_CATALOG_ROLE TO netdata; + ``` + configuration: + file: + name: go.d/oracledb.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: dsn + description: Oracle server DSN (Data Source Name). Format is `oracle://username:password@host:port/service?param1=value1&...¶mN=valueN`. + default_value: "" + required: true + - name: timeout + description: Query timeout in seconds. + default_value: 1 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: TCP socket + description: An example configuration. + config: | + jobs: + - name: local + dsn: oracle://netdata:secret@127.0.0.1:1521/XE + - name: TLS connection (TCPS) + description: An example configuration for TLS connection. + config: | + jobs: + - name: local + dsn: 'oracle://netdata:secret@127.0.0.1:1521/XE?ssl=true&ssl verify=true' + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + dsn: oracle://netdata:secret@127.0.0.1:1521/XE + + - name: remote + dsn: oracle://netdata:secret@203.0.113.0:1521/XE + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: oracledb.sessions + description: Sessions + unit: sessions + chart_type: line + dimensions: + - name: session + - name: oracledb.average_active_sessions + description: Average Active Sessions + unit: sessions + chart_type: line + dimensions: + - name: active + - name: oracledb.sessions_utilization + description: Sessions Limit % + unit: percent + chart_type: area + dimensions: + - name: session_limit + - name: oracledb.current_logons + description: Current Logons + unit: logons + chart_type: line + dimensions: + - name: logons + - name: oracledb.logons + description: Logons + unit: logons/s + chart_type: line + dimensions: + - name: logons + - name: oracledb.database_wait_time_ratio + description: Database Wait Time Ratio + unit: percent + chart_type: area + dimensions: + - name: db_wait_time + - name: oracledb.sql_service_response_time + description: SQL Service Response Time + unit: seconds + chart_type: line + dimensions: + - name: sql_resp_time + - name: oracledb.enqueue_timeouts + description: Enqueue Timeouts + unit: timeouts/s + chart_type: line + dimensions: + - name: enqueue + - name: oracledb.disk_io + description: Disk IO + unit: bytes/s + chart_type: area + dimensions: + - name: read + - name: written + - name: oracledb.disk_iops + description: Disk IOPS + unit: operations/s + chart_type: line + dimensions: + - name: read + - name: write + - name: oracledb.sorts + description: Sorts + unit: sorts/s + chart_type: line + dimensions: + - name: memory + - name: disk + - name: oracledb.table_scans + description: Table Scans + unit: scans/s + chart_type: line + dimensions: + - name: short_table + - name: long_table + - name: oracledb.cache_hit_ratio + description: Cache Hit Ratio + unit: percent + chart_type: line + dimensions: + - name: buffer + - name: cursor + - name: library + - name: row + - name: oracledb.global_cache_blocks + description: Global Cache Blocks + unit: blocks/s + chart_type: line + dimensions: + - name: corrupted + - name: lost + - name: oracledb.activity + description: Activities + unit: events/s + chart_type: line + dimensions: + - name: parse + - name: execute + - name: user_commits + - name: user_rollbacks + - name: tablespace + description: These metrics refer to the Tablespace. + labels: + - name: tablespace + description: Tablespace name. + metrics: + - name: oracledb.tablespace_utilization + description: Tablespace Utilization + unit: percent + chart_type: area + dimensions: + - name: utilization + - name: oracledb.tablespace_usage + description: Tablespace Usage + unit: bytes + chart_type: stacked + dimensions: + - name: avail + - name: used + - name: wait class + description: These metrics refer to the [Wait Class](https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/classes-of-wait-events.html). + labels: + - name: wait_class + description: '[Wait Class name](https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/classes-of-wait-events.html).' + metrics: + - name: oracledb.wait_class_wait_time + description: Wait Class Wait Time + unit: milliseconds + chart_type: line + dimensions: + - name: wait_time diff --git a/src/go/plugin/go.d/modules/oracledb/oracledb.go b/src/go/plugin/go.d/modules/oracledb/oracledb.go new file mode 100644 index 000000000..744fcd742 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/oracledb.go @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package oracledb + +import ( + "database/sql" + _ "embed" + "errors" + "fmt" + "time" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("oracledb", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *OracleDB { + return &OracleDB{ + Config: Config{ + Timeout: confopt.Duration(time.Second * 2), + charts: globalCharts.Copy(), + seenTablespaces: make(map[string]bool), + seenWaitClasses: make(map[string]bool), + }, + } +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + DSN string `json:"dsn" yaml:"dsn"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + + charts *module.Charts + + publicDSN string // with hidden username/password + + seenTablespaces map[string]bool + seenWaitClasses map[string]bool +} + +type OracleDB struct { + module.Base + Config `yaml:",inline" json:""` + + db *sql.DB +} + +func (o *OracleDB) Configuration() any { + return o.Config +} + +func (o *OracleDB) Init() error { + dsn, err := o.validateDSN() + if err != nil { + return fmt.Errorf("invalid oracle DSN: %w", err) + } + + o.publicDSN = dsn + + return nil +} + +func (o *OracleDB) Check() error { + mx, err := o.collect() + if err != nil { + return fmt.Errorf("failed to collect metrics [%s]: %w", o.publicDSN, err) + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil +} + +func (o *OracleDB) Charts() *module.Charts { + return o.charts +} + +func (o *OracleDB) Collect() map[string]int64 { + mx, err := o.collect() + if err != nil { + o.Error(fmt.Sprintf("failed to collect metrics [%s]: %s", o.publicDSN, err)) + } + + if len(mx) == 0 { + return nil + } + + return mx +} + +func (o *OracleDB) Cleanup() { + if o.db != nil { + if err := o.db.Close(); err != nil { + o.Errorf("cleanup: error on closing connection [%s]: %v", o.publicDSN, err) + } + o.db = nil + } +} diff --git a/src/go/plugin/go.d/modules/oracledb/oracledb_test.go b/src/go/plugin/go.d/modules/oracledb/oracledb_test.go new file mode 100644 index 000000000..93efd4780 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/oracledb_test.go @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package oracledb + +import ( + "bufio" + "bytes" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") + + dataVer2130XESysMetric, _ = os.ReadFile("testdata/v21.3.0-xe/sysmetric.txt") + dataVer2130XESysStat, _ = os.ReadFile("testdata/v21.3.0-xe/sysstat.txt") + dataVer2130XETablespace, _ = os.ReadFile("testdata/v21.3.0-xe/tablespace.txt") + dataVer2130XEWaitClass, _ = os.ReadFile("testdata/v21.3.0-xe/wait_class.txt") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + "dataVer2130XESysMetric": dataVer2130XESysMetric, + "dataVer2130XESysStat": dataVer2130XESysStat, + "dataVer2130XETablespace": dataVer2130XETablespace, + "dataVer2130XEWaitClass": dataVer2130XEWaitClass, + } { + require.NotNil(t, data, name) + if !strings.HasPrefix(name, "dataConfig") { + _, err := prepareMockRows(data) + require.NoError(t, err, fmt.Sprintf("prepare mock rows: %s", name)) + } + } +} + +func TestOracleDB_ConfigurationSerialize(t *testing.T) { + module.TestConfigurationSerialize(t, &OracleDB{}, dataConfigJSON, dataConfigYAML) +} + +func TestOracleDB_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestOracleDB_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "empty DSN": { + config: Config{DSN: ""}, + wantFail: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ora := New() + ora.Config = test.config + + if test.wantFail { + assert.Error(t, ora.Init()) + } else { + assert.NoError(t, ora.Init()) + } + }) + } +} + +func TestOracleDB_Cleanup(t *testing.T) { + tests := map[string]func(t *testing.T) (ora *OracleDB, cleanup func()){ + "db connection not initialized": func(t *testing.T) (ora *OracleDB, cleanup func()) { + return New(), func() {} + }, + "db connection initialized": func(t *testing.T) (ora *OracleDB, cleanup func()) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + + mock.ExpectClose() + ora = New() + ora.db = db + cleanup = func() { _ = db.Close() } + + return ora, cleanup + }, + } + + for name, prepare := range tests { + t.Run(name, func(t *testing.T) { + ora, cleanup := prepare(t) + defer cleanup() + + assert.NotPanics(t, ora.Cleanup) + assert.Nil(t, ora.db) + }) + } + +} + +func TestOracleDB_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func(t *testing.T, m sqlmock.Sqlmock) + wantFail bool + }{ + "success on all queries": { + wantFail: false, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, querySysMetrics, dataVer2130XESysMetric) + mockExpect(t, m, querySysStat, dataVer2130XESysStat) + mockExpect(t, m, queryWaitClass, dataVer2130XEWaitClass) + mockExpect(t, m, queryTablespace, dataVer2130XETablespace) + }, + }, + "fail if any query fails": { + wantFail: true, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpectErr(m, querySysMetrics) + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db, mock, err := sqlmock.New( + sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), + ) + require.NoError(t, err) + ora := New() + ora.DSN = "oracle://user:pass@127.0.0.1:32001/XE" + ora.db = db + defer func() { _ = db.Close() }() + + require.NoError(t, ora.Init()) + + test.prepareMock(t, mock) + + if test.wantFail { + assert.Error(t, ora.Check()) + } else { + assert.NoError(t, ora.Check()) + } + assert.NoError(t, mock.ExpectationsWereMet()) + }) + } +} + +func TestOracleDB_Collect(t *testing.T) { + tests := map[string]struct { + prepareMock func(t *testing.T, m sqlmock.Sqlmock) + wantCharts int + wantMetrics map[string]int64 + }{ + "success on all queries": { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, querySysMetrics, dataVer2130XESysMetric) + mockExpect(t, m, querySysStat, dataVer2130XESysStat) + mockExpect(t, m, queryWaitClass, dataVer2130XEWaitClass) + mockExpect(t, m, queryTablespace, dataVer2130XETablespace) + }, + wantCharts: len(globalCharts) + len(tablespaceChartsTmpl)*4 + len(waitClassChartsTmpl)*10, + wantMetrics: map[string]int64{ + "Average Active Sessions": 93, + "Buffer Cache Hit Ratio": 100000, + "Cursor Cache Hit Ratio": 377385, + "Database Wait Time Ratio": 0, + "Global Cache Blocks Corrupted": 0, + "Global Cache Blocks Lost": 0, + "Library Cache Hit Ratio": 98779, + "Row Cache Hit Ratio": 99640, + "SQL Service Response Time": 247, + "Session Count": 142000, + "Session Limit %": 7274, + "enqueue timeouts": 229, + "execute count": 4066130, + "logons cumulative": 8717, + "logons current": 93, + "parse count (total)": 1251128, + "physical read bytes": 538132480, + "physical reads": 65690, + "physical write bytes": 785661952, + "physical writes": 95906, + "sorts (disk)": 0, + "sorts (memory)": 220071, + "table scans (long tables)": 998, + "table scans (short tables)": 798515, + "tablespace_SYSAUX_avail_bytes": 215023616, + "tablespace_SYSAUX_max_size_bytes": 912261120, + "tablespace_SYSAUX_used_bytes": 697237504, + "tablespace_SYSAUX_utilization": 76429, + "tablespace_SYSTEM_avail_bytes": 5898240, + "tablespace_SYSTEM_max_size_bytes": 1415577600, + "tablespace_SYSTEM_used_bytes": 1409679360, + "tablespace_SYSTEM_utilization": 99583, + "tablespace_UNDOTBS1_avail_bytes": 114032640, + "tablespace_UNDOTBS1_max_size_bytes": 125829120, + "tablespace_UNDOTBS1_used_bytes": 11796480, + "tablespace_UNDOTBS1_utilization": 9375, + "tablespace_USERS_avail_bytes": 2424832, + "tablespace_USERS_max_size_bytes": 5242880, + "tablespace_USERS_used_bytes": 2818048, + "tablespace_USERS_utilization": 53750, + "user commits": 16056, + "user rollbacks": 2, + "wait_class_Administrative_wait_time": 0, + "wait_class_Application_wait_time": 0, + "wait_class_Commit_wait_time": 0, + "wait_class_Concurrency_wait_time": 0, + "wait_class_Configuration_wait_time": 0, + "wait_class_Network_wait_time": 0, + "wait_class_Other_wait_time": 0, + "wait_class_Scheduler_wait_time": 0, + "wait_class_System I/O_wait_time": 4, + "wait_class_User I/O_wait_time": 0, + }, + }, + "fail if any query fails": { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpectErr(m, querySysMetrics) + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db, mock, err := sqlmock.New( + sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), + ) + require.NoError(t, err) + ora := New() + ora.DSN = "oracle://user:pass@127.0.0.1:32001/XE" + ora.db = db + defer func() { _ = db.Close() }() + + require.NoError(t, ora.Init()) + + test.prepareMock(t, mock) + + mx := ora.Collect() + + require.Equal(t, test.wantMetrics, mx) + if len(test.wantMetrics) > 0 { + assert.Equal(t, test.wantCharts, len(*ora.Charts()), "wantCharts") + module.TestMetricsHasAllChartsDims(t, ora.Charts(), mx) + } + + assert.NoError(t, mock.ExpectationsWereMet()) + }) + } +} + +func mockExpect(t *testing.T, mock sqlmock.Sqlmock, query string, rows []byte) { + mockRows, err := prepareMockRows(rows) + require.NoError(t, err) + mock.ExpectQuery(query).WillReturnRows(mockRows).RowsWillBeClosed() +} + +func mockExpectErr(mock sqlmock.Sqlmock, query string) { + mock.ExpectQuery(query).WillReturnError(fmt.Errorf("mock error (%s)", query)) +} + +func prepareMockRows(data []byte) (*sqlmock.Rows, error) { + if len(data) == 0 { + return sqlmock.NewRows(nil), nil + } + + r := bytes.NewReader(data) + sc := bufio.NewScanner(r) + + var numColumns int + var rows *sqlmock.Rows + + for sc.Scan() { + line := strings.TrimSpace(sc.Text()) + if line == "" || strings.HasPrefix(line, "-") { + continue + } + + parts := strings.Split(line, "|") + for i, v := range parts { + parts[i] = strings.TrimSpace(v) + } + + if rows == nil { + numColumns = len(parts) + rows = sqlmock.NewRows(parts) + continue + } + + if len(parts) != numColumns { + return nil, fmt.Errorf("prepareMockRows(): columns != values (%d/%d)", numColumns, len(parts)) + } + + values := make([]driver.Value, len(parts)) + for i, v := range parts { + values[i] = v + } + rows.AddRow(values...) + } + + if rows == nil { + return nil, errors.New("prepareMockRows(): nil rows result") + } + + return rows, sc.Err() +} diff --git a/src/go/plugin/go.d/modules/oracledb/testdata/config.json b/src/go/plugin/go.d/modules/oracledb/testdata/config.json new file mode 100644 index 000000000..ed8b72dcb --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/testdata/config.json @@ -0,0 +1,5 @@ +{ + "update_every": 123, + "dsn": "ok", + "timeout": 123.123 +} diff --git a/src/go/plugin/go.d/modules/oracledb/testdata/config.yaml b/src/go/plugin/go.d/modules/oracledb/testdata/config.yaml new file mode 100644 index 000000000..caff49039 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/testdata/config.yaml @@ -0,0 +1,3 @@ +update_every: 123 +dsn: "ok" +timeout: 123.123 diff --git a/src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/sysmetric.txt b/src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/sysmetric.txt new file mode 100644 index 000000000..811fd4133 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/sysmetric.txt @@ -0,0 +1,13 @@ +METRIC_NAME | VALUE +---------------------------------------------------------------- ---------- +Buffer Cache Hit Ratio | 100 +Cursor Cache Hit Ratio | 377.385159 +Global Cache Blocks Corrupted | 0 +Global Cache Blocks Lost | 0 +SQL Service Response Time | .24775561 +Database Wait Time Ratio | 0 +Row Cache Hit Ratio | 99.6407723 +Library Cache Hit Ratio | 98.7795576 +Session Limit % | 7.27459016 +Session Count | 142 +Average Active Sessions | .093455298 diff --git a/src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/sysstat.txt b/src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/sysstat.txt new file mode 100644 index 000000000..920fda0ea --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/sysstat.txt @@ -0,0 +1,17 @@ +NAME | VALUE +---------------------------------------------------------------- ---------- +logons cumulative | 8717 +logons current | 93 +user commits | 16056 +user rollbacks | 2 +enqueue timeouts | 229 +physical reads | 65690 +physical read bytes | 538132480 +physical writes | 95906 +physical write bytes | 785661952 +table scans (short tables) | 798515 +table scans (long tables) | 998 +parse count (total) | 1251128 +execute count | 4066130 +sorts (memory) | 220071 +sorts (disk) | 0 diff --git a/src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/tablespace.txt b/src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/tablespace.txt new file mode 100644 index 000000000..556472e10 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/tablespace.txt @@ -0,0 +1,6 @@ +TABLESPACE_NAME | AUT | ALLOCATED_BYTES | MAX_BYTES | USED_BYTES +------------------------------ --- --------------- ---------- ---------- +SYSTEM | YES | 1415577600 | 3.4360E+10 | 1409679360 +SYSAUX | YES | 912261120 | 3.4360E+10 | 697237504 +UNDOTBS1 | YES | 125829120 | 3.4360E+10 | 11796480 +USERS | YES | 5242880 | 3.4360E+10 | 2818048 diff --git a/src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/wait_class.txt b/src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/wait_class.txt new file mode 100644 index 000000000..4416570e8 --- /dev/null +++ b/src/go/plugin/go.d/modules/oracledb/testdata/v21.3.0-xe/wait_class.txt @@ -0,0 +1,12 @@ +WAIT_CLASS | WAIT_TIME +---------------------------------------------------------------- ---------- +Other | 0 +Application | 0 +Configuration | 0 +Administrative | 0 +Concurrency | 0 +Commit | 0 +Network | 0 +User I/O | 0 +System I/O | .004 +Scheduler | 0 diff --git a/src/go/plugin/go.d/modules/pgbouncer/config_schema.json b/src/go/plugin/go.d/modules/pgbouncer/config_schema.json index d8d08bc51..386e9b507 100644 --- a/src/go/plugin/go.d/modules/pgbouncer/config_schema.json +++ b/src/go/plugin/go.d/modules/pgbouncer/config_schema.json @@ -28,7 +28,6 @@ "required": [ "dsn" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/pgbouncer/integrations/pgbouncer.md b/src/go/plugin/go.d/modules/pgbouncer/integrations/pgbouncer.md index 1b5e6e719..17fe0ec99 100644 --- a/src/go/plugin/go.d/modules/pgbouncer/integrations/pgbouncer.md +++ b/src/go/plugin/go.d/modules/pgbouncer/integrations/pgbouncer.md @@ -150,8 +150,8 @@ To create the `netdata` user: The configuration file name for this integration is `go.d/pgbouncer.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/pgbouncer/pgbouncer.go b/src/go/plugin/go.d/modules/pgbouncer/pgbouncer.go index fbe554dc3..2c03f5a7f 100644 --- a/src/go/plugin/go.d/modules/pgbouncer/pgbouncer.go +++ b/src/go/plugin/go.d/modules/pgbouncer/pgbouncer.go @@ -6,10 +6,11 @@ import ( "database/sql" _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/blang/semver/v4" _ "github.com/jackc/pgx/v4/stdlib" @@ -29,7 +30,7 @@ func init() { func New() *PgBouncer { return &PgBouncer{ Config: Config{ - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), DSN: "postgres://postgres:postgres@127.0.0.1:6432/pgbouncer", }, charts: globalCharts.Copy(), @@ -41,9 +42,9 @@ func New() *PgBouncer { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - DSN string `yaml:"dsn" json:"dsn"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + DSN string `yaml:"dsn" json:"dsn"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type PgBouncer struct { @@ -69,8 +70,7 @@ func (p *PgBouncer) Configuration() any { func (p *PgBouncer) Init() error { err := p.validateConfig() if err != nil { - p.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } return nil @@ -79,7 +79,6 @@ func (p *PgBouncer) Init() error { func (p *PgBouncer) Check() error { mx, err := p.collect() if err != nil { - p.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/phpdaemon/client.go b/src/go/plugin/go.d/modules/phpdaemon/client.go deleted file mode 100644 index bc54265d3..000000000 --- a/src/go/plugin/go.d/modules/phpdaemon/client.go +++ /dev/null @@ -1,77 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package phpdaemon - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" -) - -type decodeFunc func(dst interface{}, reader io.Reader) error - -func decodeJson(dst interface{}, reader io.Reader) error { return json.NewDecoder(reader).Decode(dst) } - -func newAPIClient(httpClient *http.Client, request web.Request) *client { - return &client{ - httpClient: httpClient, - request: request, - } -} - -type client struct { - httpClient *http.Client - request web.Request -} - -func (c *client) queryFullStatus() (*FullStatus, error) { - var status FullStatus - err := c.doWithDecode(&status, decodeJson, c.request) - if err != nil { - return nil, err - } - - return &status, nil -} - -func (c *client) doWithDecode(dst interface{}, decode decodeFunc, request web.Request) error { - req, err := web.NewHTTPRequest(request) - if err != nil { - return fmt.Errorf("error on creating http request to %s : %v", request.URL, err) - } - - resp, err := c.doOK(req) - defer closeBody(resp) - if err != nil { - return err - } - - if err = decode(dst, resp.Body); err != nil { - return fmt.Errorf("error on parsing response from %s : %v", req.URL, err) - } - - return nil -} - -func (c *client) doOK(req *http.Request) (*http.Response, error) { - resp, err := c.httpClient.Do(req) - if err != nil { - return resp, fmt.Errorf("error on request : %v", err) - } - - if resp.StatusCode != http.StatusOK { - return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) - } - - return resp, err -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/phpdaemon/collect.go b/src/go/plugin/go.d/modules/phpdaemon/collect.go index 9be718ea9..2185f18a6 100644 --- a/src/go/plugin/go.d/modules/phpdaemon/collect.go +++ b/src/go/plugin/go.d/modules/phpdaemon/collect.go @@ -2,18 +2,61 @@ package phpdaemon -import "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" +import ( + "fmt" -func (p *PHPDaemon) collect() (map[string]int64, error) { - s, err := p.client.queryFullStatus() + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +// https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Core/Daemon.php +// see getStateOfWorkers() + +type fullStatus struct { + // Alive is sum of Idle, Busy and Reloading + Alive int64 `json:"alive" stm:"alive"` + Shutdown int64 `json:"shutdown" stm:"shutdown"` + + // Idle that the worker is not in the middle of execution valuable callback (e.g. request) at this moment of time. + // It does not mean that worker not have any pending operations. + // Idle is sum of Preinit, Init and Initialized. + Idle int64 `json:"idle" stm:"idle"` + // Busy means that the worker is in the middle of execution valuable callback. + Busy int64 `json:"busy" stm:"busy"` + Reloading int64 `json:"reloading" stm:"reloading"` + + Preinit int64 `json:"preinit" stm:"preinit"` + // Init means that worker is starting right now. + Init int64 `json:"init" stm:"init"` + // Initialized means that the worker is in Idle state. + Initialized int64 `json:"initialized" stm:"initialized"` + + Uptime *int64 `json:"uptime" stm:"uptime"` +} +func (p *PHPDaemon) collect() (map[string]int64, error) { + req, err := web.NewHTTPRequest(p.RequestConfig) if err != nil { + return nil, fmt.Errorf("failed to create HTTP request to '%s': %w", p.URL, err) + } + + var st fullStatus + + if err := web.DoHTTP(p.httpClient).RequestJSON(req, &st); err != nil { return nil, err } // https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Core/Daemon.php // see getStateOfWorkers() - s.Initialized = s.Idle - (s.Init + s.Preinit) + st.Initialized = st.Idle - (st.Init + st.Preinit) + + mx := stm.ToMap(st) + + p.once.Do(func() { + if _, ok := mx["uptime"]; ok { + _ = p.charts.Add(uptimeChart.Copy()) + } + }) - return stm.ToMap(s), nil + return mx, nil } diff --git a/src/go/plugin/go.d/modules/phpdaemon/config_schema.json b/src/go/plugin/go.d/modules/phpdaemon/config_schema.json index a154aaa59..77f7d52a5 100644 --- a/src/go/plugin/go.d/modules/phpdaemon/config_schema.json +++ b/src/go/plugin/go.d/modules/phpdaemon/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/phpdaemon/init.go b/src/go/plugin/go.d/modules/phpdaemon/init.go deleted file mode 100644 index ec9925b7a..000000000 --- a/src/go/plugin/go.d/modules/phpdaemon/init.go +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package phpdaemon - -import ( - "errors" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" -) - -func (p *PHPDaemon) validateConfig() error { - if p.URL == "" { - return errors.New("url not set") - } - if _, err := web.NewHTTPRequest(p.Request); err != nil { - return err - } - return nil -} - -func (p *PHPDaemon) initClient() (*client, error) { - httpClient, err := web.NewHTTPClient(p.Client) - if err != nil { - return nil, err - } - return newAPIClient(httpClient, p.Request), nil -} diff --git a/src/go/plugin/go.d/modules/phpdaemon/integrations/phpdaemon.md b/src/go/plugin/go.d/modules/phpdaemon/integrations/phpdaemon.md index 11445455f..12fb5f407 100644 --- a/src/go/plugin/go.d/modules/phpdaemon/integrations/phpdaemon.md +++ b/src/go/plugin/go.d/modules/phpdaemon/integrations/phpdaemon.md @@ -164,8 +164,8 @@ class FullStatusRequest extends Generic { The configuration file name for this integration is `go.d/phpdaemon.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/phpdaemon/metrics.go b/src/go/plugin/go.d/modules/phpdaemon/metrics.go deleted file mode 100644 index 1be3c0be3..000000000 --- a/src/go/plugin/go.d/modules/phpdaemon/metrics.go +++ /dev/null @@ -1,33 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package phpdaemon - -// https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Core/Daemon.php -// see getStateOfWorkers() - -// WorkerState represents phpdaemon worker state. -type WorkerState struct { - // Alive is sum of Idle, Busy and Reloading - Alive int64 `stm:"alive"` - Shutdown int64 `stm:"shutdown"` - - // Idle that the worker is not in the middle of execution valuable callback (e.g. request) at this moment of time. - // It does not mean that worker not have any pending operations. - // Idle is sum of Preinit, Init and Initialized. - Idle int64 `stm:"idle"` - // Busy means that the worker is in the middle of execution valuable callback. - Busy int64 `stm:"busy"` - Reloading int64 `stm:"reloading"` - - Preinit int64 `stm:"preinit"` - // Init means that worker is starting right now. - Init int64 `stm:"init"` - // Initialized means that the worker is in Idle state. - Initialized int64 `stm:"initialized"` -} - -// FullStatus FullStatus. -type FullStatus struct { - WorkerState `stm:""` - Uptime *int64 `stm:"uptime"` -} diff --git a/src/go/plugin/go.d/modules/phpdaemon/phpdaemon.go b/src/go/plugin/go.d/modules/phpdaemon/phpdaemon.go index d9af10591..95af04a20 100644 --- a/src/go/plugin/go.d/modules/phpdaemon/phpdaemon.go +++ b/src/go/plugin/go.d/modules/phpdaemon/phpdaemon.go @@ -5,9 +5,13 @@ package phpdaemon import ( _ "embed" "errors" + "fmt" + "net/http" + "sync" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -25,12 +29,12 @@ func init() { func New() *PHPDaemon { return &PHPDaemon{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8509/FullStatus", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -39,8 +43,8 @@ func New() *PHPDaemon { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type PHPDaemon struct { @@ -48,8 +52,9 @@ type PHPDaemon struct { Config `yaml:",inline" json:""` charts *Charts + once sync.Once - client *client + httpClient *http.Client } func (p *PHPDaemon) Configuration() any { @@ -57,17 +62,15 @@ func (p *PHPDaemon) Configuration() any { } func (p *PHPDaemon) Init() error { - if err := p.validateConfig(); err != nil { - p.Error(err) - return err + if p.URL == "" { + return errors.New("phpDaemon URL is required but not set") } - c, err := p.initClient() + httpClient, err := web.NewHTTPClient(p.ClientConfig) if err != nil { - p.Error(err) - return err + return fmt.Errorf("failed to initialize http client: %w", err) } - p.client = c + p.httpClient = httpClient p.Debugf("using URL %s", p.URL) p.Debugf("using timeout: %s", p.Timeout) @@ -78,17 +81,13 @@ func (p *PHPDaemon) Init() error { func (p *PHPDaemon) Check() error { mx, err := p.collect() if err != nil { - p.Error(err) return err } + if len(mx) == 0 { return errors.New("no metrics collected") } - if _, ok := mx["uptime"]; ok { - _ = p.charts.Add(uptimeChart.Copy()) - } - return nil } @@ -98,7 +97,6 @@ func (p *PHPDaemon) Charts() *Charts { func (p *PHPDaemon) Collect() map[string]int64 { mx, err := p.collect() - if err != nil { p.Error(err) return nil @@ -108,7 +106,7 @@ func (p *PHPDaemon) Collect() map[string]int64 { } func (p *PHPDaemon) Cleanup() { - if p.client != nil && p.client.httpClient != nil { - p.client.httpClient.CloseIdleConnections() + if p.httpClient != nil { + p.httpClient.CloseIdleConnections() } } diff --git a/src/go/plugin/go.d/modules/phpdaemon/phpdaemon_test.go b/src/go/plugin/go.d/modules/phpdaemon/phpdaemon_test.go index e9e35af6d..73099ff19 100644 --- a/src/go/plugin/go.d/modules/phpdaemon/phpdaemon_test.go +++ b/src/go/plugin/go.d/modules/phpdaemon/phpdaemon_test.go @@ -39,7 +39,6 @@ func TestPHPDaemon_Init(t *testing.T) { job := New() require.NoError(t, job.Init()) - assert.NotNil(t, job.client) } func TestPHPDaemon_Check(t *testing.T) { diff --git a/src/go/plugin/go.d/modules/phpfpm/client.go b/src/go/plugin/go.d/modules/phpfpm/client.go index 4e8e8cec8..0789dd5b8 100644 --- a/src/go/plugin/go.d/modules/phpfpm/client.go +++ b/src/go/plugin/go.d/modules/phpfpm/client.go @@ -53,11 +53,11 @@ type client interface { type httpClient struct { client *http.Client - req web.Request + req web.RequestConfig dec decoder } -func newHTTPClient(c *http.Client, r web.Request) (*httpClient, error) { +func newHTTPClient(c *http.Client, r web.RequestConfig) (*httpClient, error) { u, err := url.Parse(r.URL) if err != nil { return nil, err @@ -77,25 +77,15 @@ func newHTTPClient(c *http.Client, r web.Request) (*httpClient, error) { func (c *httpClient) getStatus() (*status, error) { req, err := web.NewHTTPRequest(c.req) if err != nil { - return nil, fmt.Errorf("error on creating HTTP request: %v", err) - } - - resp, err := c.client.Do(req) - if err != nil { - return nil, fmt.Errorf("error on HTTP request to '%s': %v", req.URL, err) - } - defer func() { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - }() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + return nil, fmt.Errorf("failed to create HTTP request: %v", err) } st := &status{} - if err := c.dec(resp.Body, st); err != nil { - return nil, fmt.Errorf("error parsing HTTP response from '%s': %v", req.URL, err) + + if err := web.DoHTTP(c.client).Request(req, func(body io.Reader) error { + return c.dec(body, st) + }); err != nil { + return nil, err } return st, nil diff --git a/src/go/plugin/go.d/modules/phpfpm/collect.go b/src/go/plugin/go.d/modules/phpfpm/collect.go index 08a3b9f61..fa48c72c1 100644 --- a/src/go/plugin/go.d/modules/phpfpm/collect.go +++ b/src/go/plugin/go.d/modules/phpfpm/collect.go @@ -49,7 +49,7 @@ func hasIdleProcesses(processes []proc) bool { type accessor func(p proc) int64 func statProcesses(m map[string]int64, processes []proc, met string, acc accessor) { - var sum, count, min, max int64 + var sum, count, minv, maxv int64 for _, proc := range processes { if proc.State != "Idle" { continue @@ -59,14 +59,14 @@ func statProcesses(m map[string]int64, processes []proc, met string, acc accesso sum += val count += 1 if count == 1 { - min, max = val, val + minv, maxv = val, val continue } - min = int64(math.Min(float64(min), float64(val))) - max = int64(math.Max(float64(max), float64(val))) + minv = int64(math.Min(float64(minv), float64(val))) + maxv = int64(math.Max(float64(maxv), float64(val))) } - m["min"+met] = min - m["max"+met] = max + m["min"+met] = minv + m["max"+met] = maxv m["avg"+met] = sum / count } diff --git a/src/go/plugin/go.d/modules/phpfpm/config_schema.json b/src/go/plugin/go.d/modules/phpfpm/config_schema.json index 81b4005af..5d1f6cbd1 100644 --- a/src/go/plugin/go.d/modules/phpfpm/config_schema.json +++ b/src/go/plugin/go.d/modules/phpfpm/config_schema.json @@ -122,7 +122,6 @@ "type": "string" } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/phpfpm/init.go b/src/go/plugin/go.d/modules/phpfpm/init.go index 5615012f0..191f61f1e 100644 --- a/src/go/plugin/go.d/modules/phpfpm/init.go +++ b/src/go/plugin/go.d/modules/phpfpm/init.go @@ -25,14 +25,14 @@ func (p *Phpfpm) initClient() (client, error) { } func (p *Phpfpm) initHTTPClient() (*httpClient, error) { - c, err := web.NewHTTPClient(p.Client) + c, err := web.NewHTTPClient(p.ClientConfig) if err != nil { return nil, fmt.Errorf("create HTTP client: %v", err) } p.Debugf("using HTTP client: url='%s', timeout='%s'", p.URL, p.Timeout) - return newHTTPClient(c, p.Request) + return newHTTPClient(c, p.RequestConfig) } func (p *Phpfpm) initSocketClient() (*socketClient, error) { diff --git a/src/go/plugin/go.d/modules/phpfpm/integrations/php-fpm.md b/src/go/plugin/go.d/modules/phpfpm/integrations/php-fpm.md index 1839d00d6..5daea9f7d 100644 --- a/src/go/plugin/go.d/modules/phpfpm/integrations/php-fpm.md +++ b/src/go/plugin/go.d/modules/phpfpm/integrations/php-fpm.md @@ -95,8 +95,8 @@ Uncomment the `pm.status_path = /status` variable in the `php-fpm` config file. The configuration file name for this integration is `go.d/phpfpm.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/phpfpm/phpfpm.go b/src/go/plugin/go.d/modules/phpfpm/phpfpm.go index 76057c8f9..0da4c6c44 100644 --- a/src/go/plugin/go.d/modules/phpfpm/phpfpm.go +++ b/src/go/plugin/go.d/modules/phpfpm/phpfpm.go @@ -5,9 +5,11 @@ package phpfpm import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -25,12 +27,12 @@ func init() { func New() *Phpfpm { return &Phpfpm{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1/status?full&json", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, FcgiPath: "/status", @@ -39,11 +41,11 @@ func New() *Phpfpm { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - Socket string `yaml:"socket,omitempty" json:"socket"` - Address string `yaml:"address,omitempty" json:"address"` - FcgiPath string `yaml:"fcgi_path,omitempty" json:"fcgi_path"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + Socket string `yaml:"socket,omitempty" json:"socket"` + Address string `yaml:"address,omitempty" json:"address"` + FcgiPath string `yaml:"fcgi_path,omitempty" json:"fcgi_path"` } type Phpfpm struct { @@ -60,8 +62,7 @@ func (p *Phpfpm) Configuration() any { func (p *Phpfpm) Init() error { c, err := p.initClient() if err != nil { - p.Errorf("init client: %v", err) - return err + return fmt.Errorf("init client: %v", err) } p.client = c @@ -71,7 +72,6 @@ func (p *Phpfpm) Init() error { func (p *Phpfpm) Check() error { mx, err := p.collect() if err != nil { - p.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/pihole/collect.go b/src/go/plugin/go.d/modules/pihole/collect.go index c9e6d8451..7d43da678 100644 --- a/src/go/plugin/go.d/modules/pihole/collect.go +++ b/src/go/plugin/go.d/modules/pihole/collect.go @@ -4,6 +4,7 @@ package pihole import ( "encoding/json" + "errors" "fmt" "io" "net/http" @@ -131,7 +132,7 @@ func (p *Pihole) queryMetrics(pmx *piholeMetrics, doConcurrently bool) { } func (p *Pihole) querySummary(pmx *piholeMetrics) { - req, err := web.NewHTTPRequestWithPath(p.Request, urlPathAPI) + req, err := web.NewHTTPRequestWithPath(p.RequestConfig, urlPathAPI) if err != nil { p.Error(err) return @@ -143,7 +144,7 @@ func (p *Pihole) querySummary(pmx *piholeMetrics) { }.Encode() var v summaryRawMetrics - if err = p.doWithDecode(&v, req); err != nil { + if err = p.doHTTP(req, &v); err != nil { p.Error(err) return } @@ -152,7 +153,7 @@ func (p *Pihole) querySummary(pmx *piholeMetrics) { } func (p *Pihole) queryQueryTypes(pmx *piholeMetrics) { - req, err := web.NewHTTPRequestWithPath(p.Request, urlPathAPI) + req, err := web.NewHTTPRequestWithPath(p.RequestConfig, urlPathAPI) if err != nil { p.Error(err) return @@ -164,7 +165,7 @@ func (p *Pihole) queryQueryTypes(pmx *piholeMetrics) { }.Encode() var v queryTypesMetrics - err = p.doWithDecode(&v, req) + err = p.doHTTP(req, &v) if err != nil { p.Error(err) return @@ -174,7 +175,7 @@ func (p *Pihole) queryQueryTypes(pmx *piholeMetrics) { } func (p *Pihole) queryForwardedDestinations(pmx *piholeMetrics) { - req, err := web.NewHTTPRequestWithPath(p.Request, urlPathAPI) + req, err := web.NewHTTPRequestWithPath(p.RequestConfig, urlPathAPI) if err != nil { p.Error(err) return @@ -186,7 +187,7 @@ func (p *Pihole) queryForwardedDestinations(pmx *piholeMetrics) { }.Encode() var v forwardDestinations - err = p.doWithDecode(&v, req) + err = p.doHTTP(req, &v) if err != nil { p.Error(err) return @@ -196,7 +197,7 @@ func (p *Pihole) queryForwardedDestinations(pmx *piholeMetrics) { } func (p *Pihole) queryAPIVersion() (int, error) { - req, err := web.NewHTTPRequestWithPath(p.Request, urlPathAPI) + req, err := web.NewHTTPRequestWithPath(p.RequestConfig, urlPathAPI) if err != nil { return 0, err } @@ -207,7 +208,7 @@ func (p *Pihole) queryAPIVersion() (int, error) { }.Encode() var v piholeAPIVersion - err = p.doWithDecode(&v, req) + err = p.doHTTP(req, &v) if err != nil { return 0, err } @@ -215,32 +216,24 @@ func (p *Pihole) queryAPIVersion() (int, error) { return v.Version, nil } -func (p *Pihole) doWithDecode(dst interface{}, req *http.Request) error { - resp, err := p.httpClient.Do(req) - if err != nil { - return err - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned %d status code", req.URL, resp.StatusCode) - } - - content, err := io.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("error on reading response from %s : %v", req.URL, err) - } +func (p *Pihole) doHTTP(req *http.Request, dst any) error { + return web.DoHTTP(p.httpClient).Request(req, func(body io.Reader) error { + content, err := io.ReadAll(body) + if err != nil { + return fmt.Errorf("failed to read response: %v", err) + } - // empty array if unauthorized query or wrong query - if isEmptyArray(content) { - return fmt.Errorf("unauthorized access to %s", req.URL) - } + // empty array if unauthorized query or wrong query + if isEmptyArray(content) { + return errors.New("unauthorized access") + } - if err := json.Unmarshal(content, dst); err != nil { - return fmt.Errorf("error on parsing response from %s : %v", req.URL, err) - } + if err := json.Unmarshal(content, dst); err != nil { + return fmt.Errorf("failed to decode JSON response: %v", err) + } - return nil + return nil + }) } func isEmptyArray(data []byte) bool { @@ -248,13 +241,6 @@ func isEmptyArray(data []byte) bool { return len(data) == len(empty) && string(data) == empty } -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} - func boolToInt(b bool) int64 { if !b { return 0 diff --git a/src/go/plugin/go.d/modules/pihole/config_schema.json b/src/go/plugin/go.d/modules/pihole/config_schema.json index 14523a2e8..e05922fb3 100644 --- a/src/go/plugin/go.d/modules/pihole/config_schema.json +++ b/src/go/plugin/go.d/modules/pihole/config_schema.json @@ -111,7 +111,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/pihole/init.go b/src/go/plugin/go.d/modules/pihole/init.go index bd5d952cc..8cf2c8ad7 100644 --- a/src/go/plugin/go.d/modules/pihole/init.go +++ b/src/go/plugin/go.d/modules/pihole/init.go @@ -21,7 +21,7 @@ func (p *Pihole) validateConfig() error { } func (p *Pihole) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(p.Client) + return web.NewHTTPClient(p.ClientConfig) } func (p *Pihole) getWebPassword() string { diff --git a/src/go/plugin/go.d/modules/pihole/integrations/pi-hole.md b/src/go/plugin/go.d/modules/pihole/integrations/pi-hole.md index 290dfcb03..1203ae4f3 100644 --- a/src/go/plugin/go.d/modules/pihole/integrations/pi-hole.md +++ b/src/go/plugin/go.d/modules/pihole/integrations/pi-hole.md @@ -103,8 +103,8 @@ No action required. The configuration file name for this integration is `go.d/pihole.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/pihole/pihole.go b/src/go/plugin/go.d/modules/pihole/pihole.go index 9c93d0512..72951b227 100644 --- a/src/go/plugin/go.d/modules/pihole/pihole.go +++ b/src/go/plugin/go.d/modules/pihole/pihole.go @@ -5,11 +5,13 @@ package pihole import ( _ "embed" "errors" + "fmt" "net/http" "sync" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -30,12 +32,12 @@ func init() { func New() *Pihole { return &Pihole{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 5), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 5), }, }, SetupVarsPath: "/etc/pihole/setupVars.conf", @@ -48,9 +50,9 @@ func New() *Pihole { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - SetupVarsPath string `yaml:"setup_vars_path" json:"setup_vars_path"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + SetupVarsPath string `yaml:"setup_vars_path" json:"setup_vars_path"` } type Pihole struct { @@ -72,14 +74,12 @@ func (p *Pihole) Configuration() any { func (p *Pihole) Init() error { if err := p.validateConfig(); err != nil { - p.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } httpClient, err := p.initHTTPClient() if err != nil { - p.Errorf("init http client: %v", err) - return err + return fmt.Errorf("init http client: %v", err) } p.httpClient = httpClient @@ -96,7 +96,6 @@ func (p *Pihole) Init() error { func (p *Pihole) Check() error { mx, err := p.collect() if err != nil { - p.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/pihole/pihole_test.go b/src/go/plugin/go.d/modules/pihole/pihole_test.go index 86b17b623..b6325e590 100644 --- a/src/go/plugin/go.d/modules/pihole/pihole_test.go +++ b/src/go/plugin/go.d/modules/pihole/pihole_test.go @@ -60,8 +60,8 @@ func TestPihole_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, diff --git a/src/go/plugin/go.d/modules/pika/config_schema.json b/src/go/plugin/go.d/modules/pika/config_schema.json index 885cbed0f..f71a0e6cc 100644 --- a/src/go/plugin/go.d/modules/pika/config_schema.json +++ b/src/go/plugin/go.d/modules/pika/config_schema.json @@ -51,7 +51,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/pika/init.go b/src/go/plugin/go.d/modules/pika/init.go index b51152952..af2947816 100644 --- a/src/go/plugin/go.d/modules/pika/init.go +++ b/src/go/plugin/go.d/modules/pika/init.go @@ -8,7 +8,7 @@ import ( "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" ) func (p *Pika) validateConfig() error { diff --git a/src/go/plugin/go.d/modules/pika/integrations/pika.md b/src/go/plugin/go.d/modules/pika/integrations/pika.md index 04a2b329c..8e4dbd6e2 100644 --- a/src/go/plugin/go.d/modules/pika/integrations/pika.md +++ b/src/go/plugin/go.d/modules/pika/integrations/pika.md @@ -111,8 +111,8 @@ No action required. The configuration file name for this integration is `go.d/pika.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/pika/pika.go b/src/go/plugin/go.d/modules/pika/pika.go index 705c3db49..785ff9f51 100644 --- a/src/go/plugin/go.d/modules/pika/pika.go +++ b/src/go/plugin/go.d/modules/pika/pika.go @@ -6,14 +6,15 @@ import ( "context" _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" "github.com/blang/semver/v4" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" ) //go:embed "config_schema.json" @@ -31,7 +32,7 @@ func New() *Pika { return &Pika{ Config: Config{ Address: "redis://@localhost:9221", - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, collectedCommands: make(map[string]bool), @@ -40,9 +41,9 @@ func New() *Pika { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` tlscfg.TLSConfig `yaml:",inline" json:""` } @@ -73,21 +74,18 @@ func (p *Pika) Configuration() any { func (p *Pika) Init() error { err := p.validateConfig() if err != nil { - p.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } pdb, err := p.initRedisClient() if err != nil { - p.Errorf("init redis client: %v", err) - return err + return fmt.Errorf("init redis client: %v", err) } p.pdb = pdb charts, err := p.initCharts() if err != nil { - p.Errorf("init charts: %v", err) - return err + return fmt.Errorf("init charts: %v", err) } p.charts = charts @@ -97,7 +95,6 @@ func (p *Pika) Init() error { func (p *Pika) Check() error { mx, err := p.collect() if err != nil { - p.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/pika/pika_test.go b/src/go/plugin/go.d/modules/pika/pika_test.go index 940619255..c2cf30f79 100644 --- a/src/go/plugin/go.d/modules/pika/pika_test.go +++ b/src/go/plugin/go.d/modules/pika/pika_test.go @@ -11,7 +11,7 @@ import ( "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -187,11 +187,11 @@ func TestPika_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { pika := test.prepare(t) - ms := pika.Collect() + mx := pika.Collect() - assert.Equal(t, test.wantCollected, ms) + assert.Equal(t, test.wantCollected, mx) if len(test.wantCollected) > 0 { - ensureCollectedHasAllChartsDimsVarsIDs(t, pika, ms) + module.TestMetricsHasAllChartsDims(t, pika.Charts(), mx) ensureCollectedCommandsAddedToCharts(t, pika) ensureCollectedDbsAddedToCharts(t, pika) } @@ -226,22 +226,6 @@ func preparePikaWithRedisMetrics(t *testing.T) *Pika { return pika } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, pika *Pika, ms map[string]int64) { - for _, chart := range *pika.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := ms[dim.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := ms[v.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) - } - } -} - func ensureCollectedCommandsAddedToCharts(t *testing.T, pika *Pika) { for _, id := range []string{ chartCommandsCalls.ID, diff --git a/src/go/plugin/go.d/modules/ping/config_schema.json b/src/go/plugin/go.d/modules/ping/config_schema.json index 1168e3388..5a5ef3a15 100644 --- a/src/go/plugin/go.d/modules/ping/config_schema.json +++ b/src/go/plugin/go.d/modules/ping/config_schema.json @@ -58,7 +58,7 @@ }, "interface": { "title": "Interface", - "description": "The name of the network interface whose IP address will be used as the source for sending ping packets.", + "description": "The network device name (e.g., `eth0`, `wlan0`) used as the source for ICMP echo requests.", "type": "string", "default": "" } @@ -66,7 +66,6 @@ "required": [ "hosts" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/ping/init.go b/src/go/plugin/go.d/modules/ping/init.go index 62d78c8e6..df38990e6 100644 --- a/src/go/plugin/go.d/modules/ping/init.go +++ b/src/go/plugin/go.d/modules/ping/init.go @@ -30,7 +30,7 @@ func (p *Ping) initProber() (prober, error) { conf := pingProberConfig{ privileged: p.Privileged, packets: p.SendPackets, - iface: p.Interface, + ifaceName: p.Interface, interval: p.Interval.Duration(), deadline: deadline, } diff --git a/src/go/plugin/go.d/modules/ping/integrations/ping.md b/src/go/plugin/go.d/modules/ping/integrations/ping.md index db97288b0..8cab9d9da 100644 --- a/src/go/plugin/go.d/modules/ping/integrations/ping.md +++ b/src/go/plugin/go.d/modules/ping/integrations/ping.md @@ -25,22 +25,27 @@ This module measures round-trip time and packet loss by sending ping messages to There are two operational modes: -- privileged (send raw ICMP ping, default). Requires - CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges: - > **Note**: set automatically during Netdata installation. +- **Privileged** (send raw ICMP ping, default). Requires the necessary permissions ([CAP_NET_RAW](https://man7.org/linux/man-pages/man7/capabilities.7.html) on Linux, `setuid` bit on other systems). - ```bash - sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin - ``` + These permissions are **automatically** set during Netdata installation. However, if you need to set them manually: + - set `CAP_NET_RAW` (Linux only). + ```bash + sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin + ``` + - set `setuid` bit (Other OS). + ```bash + sudo chmod 4750 /usr/libexec/netdata/plugins.d/go.d.plugin + ``` -- unprivileged (send UDP ping, Linux only). - Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html): +- **Unprivileged** (send UDP ping, Linux only). Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html): + + This configuration is **not set automatically** and requires manual configuration. ```bash sudo sysctl -w net.ipv4.ping_group_range="0 2147483647" ``` - To persist the change add `net.ipv4.ping_group_range=0 2147483647` to `/etc/sysctl.conf` and - execute `sudo sysctl -p`. + + To persist the change add `net.ipv4.ping_group_range=0 2147483647` to `/etc/sysctl.conf` and execute `sudo sysctl -p`. @@ -119,8 +124,8 @@ No action required. The configuration file name for this integration is `go.d/ping.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/ping/metadata.yaml b/src/go/plugin/go.d/modules/ping/metadata.yaml index 8686d103b..8dfd2fff2 100644 --- a/src/go/plugin/go.d/modules/ping/metadata.yaml +++ b/src/go/plugin/go.d/modules/ping/metadata.yaml @@ -22,25 +22,30 @@ modules: data_collection: metrics_description: | This module measures round-trip time and packet loss by sending ping messages to network hosts. - + There are two operational modes: - - - privileged (send raw ICMP ping, default). Requires - CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges: - > **Note**: set automatically during Netdata installation. - - ```bash - sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin - ``` - - - unprivileged (send UDP ping, Linux only). - Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html): - + + - **Privileged** (send raw ICMP ping, default). Requires the necessary permissions ([CAP_NET_RAW](https://man7.org/linux/man-pages/man7/capabilities.7.html) on Linux, `setuid` bit on other systems). + + These permissions are **automatically** set during Netdata installation. However, if you need to set them manually: + - set `CAP_NET_RAW` (Linux only). + ```bash + sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin + ``` + - set `setuid` bit (Other OS). + ```bash + sudo chmod 4750 /usr/libexec/netdata/plugins.d/go.d.plugin + ``` + + - **Unprivileged** (send UDP ping, Linux only). Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html): + + This configuration is **not set automatically** and requires manual configuration. + ```bash sudo sysctl -w net.ipv4.ping_group_range="0 2147483647" ``` - To persist the change add `net.ipv4.ping_group_range=0 2147483647` to `/etc/sysctl.conf` and - execute `sudo sysctl -p`. + + To persist the change add `net.ipv4.ping_group_range=0 2147483647` to `/etc/sysctl.conf` and execute `sudo sysctl -p`. method_description: "" supported_platforms: include: [] @@ -84,6 +89,10 @@ modules: description: "Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6)." default_value: "ip" required: false + - name: interface + description: "The network device name (e.g., `eth0`, `wlan0`) used as the source for ICMP echo requests." + default_value: "" + required: false - name: privileged description: Ping packets type. "no" means send an "unprivileged" UDP ping, "yes" - raw ICMP ping. default_value: true @@ -121,7 +130,7 @@ modules: - name: Multi-instance description: | > **Note**: When you define multiple jobs, their names must be unique. - + Multiple instances. config: | jobs: @@ -129,7 +138,7 @@ modules: hosts: - 192.0.2.0 - 192.0.2.1 - + - name: example2 packets: 10 hosts: diff --git a/src/go/plugin/go.d/modules/ping/ping.go b/src/go/plugin/go.d/modules/ping/ping.go index 9d1ef929f..b105395d2 100644 --- a/src/go/plugin/go.d/modules/ping/ping.go +++ b/src/go/plugin/go.d/modules/ping/ping.go @@ -5,11 +5,12 @@ package ping import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/logger" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" probing "github.com/prometheus-community/pro-bing" ) @@ -34,7 +35,7 @@ func New() *Ping { Network: "ip", Privileged: true, SendPackets: 5, - Interval: web.Duration(time.Millisecond * 100), + Interval: confopt.Duration(time.Millisecond * 100), }, charts: &module.Charts{}, @@ -44,13 +45,13 @@ func New() *Ping { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Hosts []string `yaml:"hosts" json:"hosts"` - Network string `yaml:"network,omitempty" json:"network"` - Privileged bool `yaml:"privileged" json:"privileged"` - SendPackets int `yaml:"packets,omitempty" json:"packets"` - Interval web.Duration `yaml:"interval,omitempty" json:"interval"` - Interface string `yaml:"interface,omitempty" json:"interface"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Hosts []string `yaml:"hosts" json:"hosts"` + Network string `yaml:"network,omitempty" json:"network"` + Privileged bool `yaml:"privileged" json:"privileged"` + SendPackets int `yaml:"packets,omitempty" json:"packets"` + Interval confopt.Duration `yaml:"interval,omitempty" json:"interval"` + Interface string `yaml:"interface,omitempty" json:"interface"` } type ( @@ -77,14 +78,12 @@ func (p *Ping) Configuration() any { func (p *Ping) Init() error { err := p.validateConfig() if err != nil { - p.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } pr, err := p.initProber() if err != nil { - p.Errorf("init prober: %v", err) - return err + return fmt.Errorf("init prober: %v", err) } p.prober = pr diff --git a/src/go/plugin/go.d/modules/ping/prober.go b/src/go/plugin/go.d/modules/ping/prober.go index 70c31dcde..4b769d662 100644 --- a/src/go/plugin/go.d/modules/ping/prober.go +++ b/src/go/plugin/go.d/modules/ping/prober.go @@ -3,9 +3,7 @@ package ping import ( - "errors" "fmt" - "net" "time" "github.com/netdata/netdata/go/plugins/logger" @@ -14,32 +12,22 @@ import ( ) func newPingProber(conf pingProberConfig, log *logger.Logger) prober { - var source string - if conf.iface != "" { - if addr, err := getInterfaceIPAddress(conf.iface); err != nil { - log.Warningf("error getting interface '%s' IP address: %v", conf.iface, err) - } else { - log.Infof("interface '%s' IP address '%s', will use it as the source", conf.iface, addr) - source = addr - } - } - return &pingProber{ - network: conf.network, - privileged: conf.privileged, - packets: conf.packets, - source: source, - interval: conf.interval, - deadline: conf.deadline, - Logger: log, + network: conf.network, + interfaceName: conf.ifaceName, + privileged: conf.privileged, + packets: conf.packets, + interval: conf.interval, + deadline: conf.deadline, + Logger: log, } } type pingProberConfig struct { network string + ifaceName string privileged bool packets int - iface string interval time.Duration deadline time.Duration } @@ -47,12 +35,12 @@ type pingProberConfig struct { type pingProber struct { *logger.Logger - network string - privileged bool - packets int - source string - interval time.Duration - deadline time.Duration + network string + interfaceName string + privileged bool + packets int + interval time.Duration + deadline time.Duration } func (p *pingProber) ping(host string) (*probing.Statistics, error) { @@ -64,16 +52,17 @@ func (p *pingProber) ping(host string) (*probing.Statistics, error) { return nil, fmt.Errorf("DNS lookup '%s' : %v", host, err) } - pr.Source = p.source pr.RecordRtts = false pr.Interval = p.interval pr.Count = p.packets pr.Timeout = p.deadline + pr.InterfaceName = p.interfaceName pr.SetPrivileged(p.privileged) pr.SetLogger(nil) if err := pr.Run(); err != nil { - return nil, fmt.Errorf("pinging host '%s' (ip %s): %v", pr.Addr(), pr.IPAddr(), err) + return nil, fmt.Errorf("pinging host '%s' (ip '%s' iface '%s'): %v", + pr.Addr(), pr.IPAddr(), pr.InterfaceName, err) } stats := pr.Statistics() @@ -82,30 +71,3 @@ func (p *pingProber) ping(host string) (*probing.Statistics, error) { return stats, nil } - -func getInterfaceIPAddress(ifaceName string) (ipaddr string, err error) { - iface, err := net.InterfaceByName(ifaceName) - if err != nil { - return "", err - } - - addresses, err := iface.Addrs() - if err != nil { - return "", err - } - - // FIXME: add IPv6 support - var v4Addr string - for _, addr := range addresses { - if ipnet, ok := addr.(*net.IPNet); ok && ipnet.IP.To4() != nil { - v4Addr = ipnet.IP.To4().String() - break - } - } - - if v4Addr == "" { - return "", errors.New("ipv4 addresses not found") - } - - return v4Addr, nil -} diff --git a/src/go/plugin/go.d/modules/portcheck/README.md b/src/go/plugin/go.d/modules/portcheck/README.md index 4bee556ef..daf2b1aac 120000 --- a/src/go/plugin/go.d/modules/portcheck/README.md +++ b/src/go/plugin/go.d/modules/portcheck/README.md @@ -1 +1 @@ -integrations/tcp_endpoints.md \ No newline at end of file +integrations/tcp-udp_endpoints.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/portcheck/charts.go b/src/go/plugin/go.d/modules/portcheck/charts.go index 6797f00a6..99224b63d 100644 --- a/src/go/plugin/go.d/modules/portcheck/charts.go +++ b/src/go/plugin/go.d/modules/portcheck/charts.go @@ -13,54 +13,103 @@ const ( prioCheckStatus = module.Priority + iota prioCheckInStatusDuration prioCheckLatency + + prioUDPCheckStatus + prioUDPCheckInStatusDuration ) -var chartsTmpl = module.Charts{ - checkStatusChartTmpl.Copy(), - checkInStateDurationChartTmpl.Copy(), - checkConnectionLatencyChartTmpl.Copy(), +var tcpPortChartsTmpl = module.Charts{ + tcpPortCheckStatusChartTmpl.Copy(), + tcpPortCheckInStateDurationChartTmpl.Copy(), + tcpPortCheckConnectionLatencyChartTmpl.Copy(), } -var checkStatusChartTmpl = module.Chart{ - ID: "port_%d_status", - Title: "TCP Check Status", - Units: "boolean", - Fam: "status", - Ctx: "portcheck.status", - Priority: prioCheckStatus, - Dims: module.Dims{ - {ID: "port_%d_success", Name: "success"}, - {ID: "port_%d_failed", Name: "failed"}, - {ID: "port_%d_timeout", Name: "timeout"}, - }, +var udpPortChartsTmpl = module.Charts{ + udpPortCheckStatusChartTmpl.Copy(), + udpPortCheckInStatusDurationChartTmpl.Copy(), } -var checkInStateDurationChartTmpl = module.Chart{ - ID: "port_%d_current_state_duration", - Title: "Current State Duration", - Units: "seconds", - Fam: "status duration", - Ctx: "portcheck.state_duration", - Priority: prioCheckInStatusDuration, - Dims: module.Dims{ - {ID: "port_%d_current_state_duration", Name: "time"}, - }, +var ( + tcpPortCheckStatusChartTmpl = module.Chart{ + ID: "port_%d_status", + Title: "TCP Check Status", + Units: "boolean", + Fam: "status", + Ctx: "portcheck.status", + Priority: prioCheckStatus, + Dims: module.Dims{ + {ID: "tcp_port_%d_success", Name: "success"}, + {ID: "tcp_port_%d_failed", Name: "failed"}, + {ID: "tcp_port_%d_timeout", Name: "timeout"}, + }, + } + tcpPortCheckInStateDurationChartTmpl = module.Chart{ + ID: "port_%d_current_state_duration", + Title: "Current State Duration", + Units: "seconds", + Fam: "status duration", + Ctx: "portcheck.state_duration", + Priority: prioCheckInStatusDuration, + Dims: module.Dims{ + {ID: "tcp_port_%d_current_state_duration", Name: "time"}, + }, + } + tcpPortCheckConnectionLatencyChartTmpl = module.Chart{ + ID: "port_%d_connection_latency", + Title: "TCP Connection Latency", + Units: "ms", + Fam: "latency", + Ctx: "portcheck.latency", + Priority: prioCheckLatency, + Dims: module.Dims{ + {ID: "tcp_port_%d_latency", Name: "time"}, + }, + } +) + +var ( + udpPortCheckStatusChartTmpl = module.Chart{ + ID: "udp_port_%d_check_status", + Title: "UDP Port Check Status", + Units: "status", + Fam: "status", + Ctx: "portcheck.udp_port_status", + Priority: prioUDPCheckStatus, + Dims: module.Dims{ + {ID: "udp_port_%d_open_filtered", Name: "open/filtered"}, + {ID: "udp_port_%d_closed", Name: "closed"}, + }, + } + udpPortCheckInStatusDurationChartTmpl = module.Chart{ + ID: "udp_port_%d_current_status_duration", + Title: "UDP Port Current Status Duration", + Units: "seconds", + Fam: "status duration", + Ctx: "portcheck.udp_port_status_duration", + Priority: prioUDPCheckInStatusDuration, + Dims: module.Dims{ + {ID: "udp_port_%d_current_status_duration", Name: "time"}, + }, + } +) + +func (pc *PortCheck) addTCPPortCharts(port *tcpPort) { + charts := newPortCharts(pc.Host, port.number, tcpPortChartsTmpl.Copy()) + + if err := pc.Charts().Add(*charts...); err != nil { + pc.Warning(err) + } } -var checkConnectionLatencyChartTmpl = module.Chart{ - ID: "port_%d_connection_latency", - Title: "TCP Connection Latency", - Units: "ms", - Fam: "latency", - Ctx: "portcheck.latency", - Priority: prioCheckLatency, - Dims: module.Dims{ - {ID: "port_%d_latency", Name: "time"}, - }, +func (pc *PortCheck) addUDPPortCharts(port *udpPort) { + charts := newPortCharts(pc.Host, port.number, udpPortChartsTmpl.Copy()) + + if err := pc.Charts().Add(*charts...); err != nil { + pc.Warning(err) + } } -func newPortCharts(host string, port int) *module.Charts { - charts := chartsTmpl.Copy() +func newPortCharts(host string, port int, charts *module.Charts) *module.Charts { for _, chart := range *charts { chart.Labels = []module.Label{ {Key: "host", Value: host}, diff --git a/src/go/plugin/go.d/modules/portcheck/check_tcp_port.go b/src/go/plugin/go.d/modules/portcheck/check_tcp_port.go new file mode 100644 index 000000000..6d41575d4 --- /dev/null +++ b/src/go/plugin/go.d/modules/portcheck/check_tcp_port.go @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package portcheck + +import ( + "time" +) + +const ( + tcpPortCheckStateSuccess = "success" + tcpPortCheckStateTimeout = "timeout" + tcpPortCheckStateFailed = "failed" +) + +type tcpPort struct { + number int + status string + statusChangeTs time.Time + latency int +} + +func (pc *PortCheck) checkTCPPort(port *tcpPort) { + start := time.Now() + + addr := pc.address(port.number) + conn, err := pc.dialTCP("tcp", addr, pc.Timeout.Duration()) + + dur := time.Since(start) + + defer func() { + if conn != nil { + _ = conn.Close() + } + }() + + if err != nil { + if v, ok := err.(interface{ Timeout() bool }); ok && v.Timeout() { + pc.setTcpPortCheckState(port, tcpPortCheckStateTimeout) + } else { + pc.setTcpPortCheckState(port, tcpPortCheckStateFailed) + } + return + } + + pc.setTcpPortCheckState(port, tcpPortCheckStateSuccess) + port.latency = durationToMs(dur) +} + +func (pc *PortCheck) setTcpPortCheckState(port *tcpPort, state string) { + if port.status != state { + port.status = state + port.statusChangeTs = time.Now() + } else if port.statusChangeTs.IsZero() { + port.statusChangeTs = time.Now() + } +} diff --git a/src/go/plugin/go.d/modules/portcheck/check_udp_port.go b/src/go/plugin/go.d/modules/portcheck/check_udp_port.go new file mode 100644 index 000000000..b61831f9f --- /dev/null +++ b/src/go/plugin/go.d/modules/portcheck/check_udp_port.go @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package portcheck + +import ( + "errors" + "fmt" + "net" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +const ( + udpPortCheckStateOpenFiltered = "open_filtered" + udpPortCheckStateClosed = "closed" +) + +type udpPort struct { + number int + status string + statusChangeTs time.Time + + err error +} + +func (pc *PortCheck) checkUDPPort(port *udpPort) { + port.err = nil + + timeout := time.Duration(max(float64(100*time.Millisecond), float64(pc.Timeout.Duration())*0.7)) + addr := pc.address(port.number) + + open, err := pc.scanUDP(addr, timeout) + if err != nil { + pc.Warningf("UDP port check failed for '%s': %v", addr, err) + port.err = err + return + } + + state := udpPortCheckStateOpenFiltered + if !open { + state = udpPortCheckStateClosed + } + + pc.setUDPPortCheckState(port, state) +} + +func (pc *PortCheck) setUDPPortCheckState(port *udpPort, state string) { + if port.status != state { + port.status = state + port.statusChangeTs = time.Now() + } else if port.statusChangeTs.IsZero() { + port.statusChangeTs = time.Now() + } +} + +func scanUDPPort(address string, timeout time.Duration) (bool, error) { + // With this scan type, we send 0-byte UDP packets to the port on the target system. + // Receipt of an ICMP Destination Unreachable message signifies the port is closed; + // otherwise it is assumed open (timeout). + // This is equivalent to "close"/"open/filtered" states reported by nmap. + + raddr, err := net.ResolveUDPAddr("udp", address) + if err != nil { + return false, fmt.Errorf("failed to resolve UDP address: %w", err) + } + + network, icmpNetwork, icmpProto := getUDPNetworkParams(raddr.IP) + + udpConn, err := net.DialUDP(network, nil, raddr) + if err != nil { + return false, fmt.Errorf("failed to open UDP connection to '%s': %w", raddr.String(), err) + } + defer func() { _ = udpConn.Close() }() + + icmpConn, err := icmp.ListenPacket(icmpNetwork, "") + if err != nil { + return false, fmt.Errorf("failed to listen for ICMP packets: %w", err) + } + defer func() { _ = icmpConn.Close() }() + + if _, err = udpConn.Write([]byte{}); err != nil { + return false, fmt.Errorf("failed to send UDP packet: %w", err) + } + + return readICMPResponse(icmpConn, udpConn, icmpProto, timeout) +} + +func readICMPResponse(icmpConn *icmp.PacketConn, udpConn *net.UDPConn, icmpProto int, timeout time.Duration) (bool, error) { + buff := make([]byte, 1500) + + if err := icmpConn.SetReadDeadline(time.Now().Add(timeout)); err != nil { + return false, fmt.Errorf("failed to set read deadline on ICMP connection: %w", err) + } + + localPort := uint16(udpConn.LocalAddr().(*net.UDPAddr).Port) + + for { + n, _, err := icmpConn.ReadFrom(buff) + if err != nil { + if errors.Is(err, net.ErrClosed) { + return false, fmt.Errorf("ICMP connection closed unexpectedly") + } + var netErr net.Error + if errors.As(err, &netErr) && netErr.Timeout() { + return true, nil // Timeout means no ICMP response, assume port is open + } + return false, fmt.Errorf("failed to read ICMP packet: %w", err) + } + + if n == 0 { + continue + } + + msg, err := icmp.ParseMessage(icmpProto, buff[:n]) + if err != nil { + return false, fmt.Errorf("failed to parse ICMP message: %w", err) + } + + if msg.Type != ipv4.ICMPTypeDestinationUnreachable && msg.Type != ipv6.ICMPTypeDestinationUnreachable { + continue + } + + body, ok := msg.Body.(*icmp.DstUnreach) + if !ok { + continue + } + + srcPort, err := extractSourcePort(msg.Type, body.Data) + if err != nil { + return false, err + } + + if srcPort == localPort { + return false, nil // Received ICMP Destination Unreachable, port is closed + } + } +} + +func getUDPNetworkParams(ip net.IP) (network, icmpNetwork string, icmpProto int) { + if ip.To4() != nil { + return "udp4", "ip4:icmp", 1 + } + return "udp6", "ip6:ipv6-icmp", 58 +} + +func extractSourcePort(msgType icmp.Type, data []byte) (uint16, error) { + const udpHeaderLen = 8 + var headerLen, minLen int + + switch msgType { + case ipv4.ICMPTypeDestinationUnreachable: + headerLen, minLen = ipv4.HeaderLen, ipv4.HeaderLen+udpHeaderLen + case ipv6.ICMPTypeDestinationUnreachable: + headerLen, minLen = ipv6.HeaderLen, ipv6.HeaderLen+udpHeaderLen + default: + return 0, fmt.Errorf("unexpected ICMP message type: %v", msgType) + } + + if len(data) < minLen { + return 0, fmt.Errorf("ICMP message too short: want %d got %d", minLen, len(data)) + } + + return (uint16(data[headerLen]) << udpHeaderLen) | uint16(data[headerLen+1]), nil +} diff --git a/src/go/plugin/go.d/modules/portcheck/collect.go b/src/go/plugin/go.d/modules/portcheck/collect.go index dab45ec41..f9bd43ce0 100644 --- a/src/go/plugin/go.d/modules/portcheck/collect.go +++ b/src/go/plugin/go.d/modules/portcheck/collect.go @@ -3,75 +3,95 @@ package portcheck import ( + "errors" "fmt" + "net" + "strconv" + "strings" "sync" "time" ) -type checkState string - -const ( - checkStateSuccess checkState = "success" - checkStateTimeout checkState = "timeout" - checkStateFailed checkState = "failed" -) - func (pc *PortCheck) collect() (map[string]int64, error) { wg := &sync.WaitGroup{} - for _, p := range pc.ports { + for _, port := range pc.tcpPorts { wg.Add(1) - go func(p *port) { pc.checkPort(p); wg.Done() }(p) + port := port + go func() { defer wg.Done(); pc.checkTCPPort(port) }() + } + + if pc.doUdpPorts { + for _, port := range pc.udpPorts { + wg.Add(1) + port := port + go func() { defer wg.Done(); pc.checkUDPPort(port) }() + } } + wg.Wait() mx := make(map[string]int64) - for _, p := range pc.ports { - mx[fmt.Sprintf("port_%d_current_state_duration", p.number)] = int64(p.inState) - mx[fmt.Sprintf("port_%d_latency", p.number)] = int64(p.latency) - mx[fmt.Sprintf("port_%d_%s", p.number, checkStateSuccess)] = 0 - mx[fmt.Sprintf("port_%d_%s", p.number, checkStateTimeout)] = 0 - mx[fmt.Sprintf("port_%d_%s", p.number, checkStateFailed)] = 0 - mx[fmt.Sprintf("port_%d_%s", p.number, p.state)] = 1 + now := time.Now() + + for _, p := range pc.tcpPorts { + if !pc.seenTcpPorts[p.number] { + pc.seenTcpPorts[p.number] = true + pc.addTCPPortCharts(p) + } + + px := fmt.Sprintf("tcp_port_%d_", p.number) + + mx[px+"current_state_duration"] = int64(now.Sub(p.statusChangeTs).Seconds()) + mx[px+"latency"] = int64(p.latency) + mx[px+tcpPortCheckStateSuccess] = 0 + mx[px+tcpPortCheckStateTimeout] = 0 + mx[px+tcpPortCheckStateFailed] = 0 + mx[px+p.status] = 1 } - return mx, nil -} + if pc.doUdpPorts { + for _, p := range pc.udpPorts { + if p.err != nil { + if isListenOpNotPermittedError(p.err) { + pc.doUdpPorts = false + break + } + continue + } -func (pc *PortCheck) checkPort(p *port) { - start := time.Now() - conn, err := pc.dial("tcp", fmt.Sprintf("%s:%d", pc.Host, p.number), pc.Timeout.Duration()) - dur := time.Since(start) + if !pc.seenUdpPorts[p.number] { + pc.seenUdpPorts[p.number] = true + pc.addUDPPortCharts(p) + } - defer func() { - if conn != nil { - _ = conn.Close() - } - }() - - if err != nil { - v, ok := err.(interface{ Timeout() bool }) - if ok && v.Timeout() { - pc.setPortState(p, checkStateTimeout) - } else { - pc.setPortState(p, checkStateFailed) + px := fmt.Sprintf("udp_port_%d_", p.number) + + mx[px+"current_status_duration"] = int64(now.Sub(p.statusChangeTs).Seconds()) + mx[px+udpPortCheckStateOpenFiltered] = 0 + mx[px+udpPortCheckStateClosed] = 0 + mx[px+p.status] = 1 } - return } - pc.setPortState(p, checkStateSuccess) - p.latency = durationToMs(dur) + + return mx, nil } -func (pc *PortCheck) setPortState(p *port, s checkState) { - if p.state != s { - p.inState = pc.UpdateEvery - p.state = s - } else { - p.inState += pc.UpdateEvery - } +func (pc *PortCheck) address(port int) string { + // net.JoinHostPort expects literal IPv6 address, it adds [] + host := strings.Trim(pc.Host, "[]") + return net.JoinHostPort(host, strconv.Itoa(port)) } func durationToMs(duration time.Duration) int { return int(duration) / (int(time.Millisecond) / int(time.Nanosecond)) } + +func isListenOpNotPermittedError(err error) bool { + // icmp.ListenPacket failed (socket: operation not permitted) + var opErr *net.OpError + return errors.As(err, &opErr) && + opErr.Op == "listen" && + strings.Contains(opErr.Error(), "operation not permitted") +} diff --git a/src/go/plugin/go.d/modules/portcheck/config_schema.json b/src/go/plugin/go.d/modules/portcheck/config_schema.json index 025b78f85..47fc7378f 100644 --- a/src/go/plugin/go.d/modules/portcheck/config_schema.json +++ b/src/go/plugin/go.d/modules/portcheck/config_schema.json @@ -22,10 +22,11 @@ "host": { "title": "Network host", "description": "The IP address or domain name of the network host.", - "type": "string" + "type": "string", + "default": "127.0.0.1" }, "ports": { - "title": "Ports", + "title": "TCP ports", "description": "A list of ports to monitor for TCP service availability and response time.", "type": [ "array", @@ -36,20 +37,56 @@ "type": "integer", "minimum": 1 }, - "minItems": 1, + "uniqueItems": true + }, + "udp_ports": { + "title": "UDP ports", + "description": "A list of ports to monitor for UDP service availability.", + "type": [ + "array", + "null" + ], + "items": { + "title": "Port", + "type": "integer", + "minimum": 1 + }, "uniqueItems": true } }, "required": [ - "host", - "ports" + "host" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } }, "uiSchema": { + "ui:flavour": "tabs", + "ui:options": { + "tabs": [ + { + "title": "Base", + "fields": [ + "update_every", + "timeout", + "host" + ] + }, + { + "title": "TCP", + "fields": [ + "ports" + ] + }, + { + "title": "UDP", + "fields": [ + "udp_ports" + ] + } + ] + }, "uiOptions": { "fullPage": true }, @@ -61,6 +98,10 @@ }, "ports": { "ui:listFlavour": "list" + }, + "udp_ports": { + "ui:help": "The collector sends 0-byte UDP packets to each port on the target system. If an ICMP Destination Unreachable message is received, the port is considered closed. Otherwise, it is assumed to be open or filtered (if no response is received within the timeout). This approach is similar to the behavior of the `close`/`open/filtered` states reported by `nmap`. However, note that the `open/filtered` state is a best-effort determination, as the collector does not actually exchange data with the application on the target system.", + "ui:listFlavour": "list" } } } diff --git a/src/go/plugin/go.d/modules/portcheck/init.go b/src/go/plugin/go.d/modules/portcheck/init.go index 17b402340..1f71ba366 100644 --- a/src/go/plugin/go.d/modules/portcheck/init.go +++ b/src/go/plugin/go.d/modules/portcheck/init.go @@ -6,44 +6,27 @@ import ( "errors" "net" "time" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" ) -type dialFunc func(network, address string, timeout time.Duration) (net.Conn, error) - -type port struct { - number int - state checkState - inState int - latency int -} +type dialTCPFunc func(network, address string, timeout time.Duration) (net.Conn, error) func (pc *PortCheck) validateConfig() error { if pc.Host == "" { - return errors.New("'host' parameter not set") + return errors.New("missing required parameter: 'host' must be specified") } - if len(pc.Ports) == 0 { - return errors.New("'ports' parameter not set") + if len(pc.Ports) == 0 && len(pc.UDPPorts) == 0 { + return errors.New("missing required parameters: at least one of 'ports' (TCP) or 'udp_ports' (UDP) must be specified") } return nil } -func (pc *PortCheck) initCharts() (*module.Charts, error) { - charts := module.Charts{} - - for _, port := range pc.Ports { - if err := charts.Add(*newPortCharts(pc.Host, port)...); err != nil { - return nil, err - } - } - - return &charts, nil -} - -func (pc *PortCheck) initPorts() (ports []*port) { +func (pc *PortCheck) initPorts() (tcpPorts []*tcpPort, udpPorts []*udpPort) { for _, p := range pc.Ports { - ports = append(ports, &port{number: p}) + tcpPorts = append(tcpPorts, &tcpPort{number: p}) } - return ports + for _, p := range pc.UDPPorts { + udpPorts = append(udpPorts, &udpPort{number: p}) + } + + return tcpPorts, udpPorts } diff --git a/src/go/plugin/go.d/modules/portcheck/integrations/tcp-udp_endpoints.md b/src/go/plugin/go.d/modules/portcheck/integrations/tcp-udp_endpoints.md new file mode 100644 index 000000000..41eb5c4fa --- /dev/null +++ b/src/go/plugin/go.d/modules/portcheck/integrations/tcp-udp_endpoints.md @@ -0,0 +1,327 @@ + + +# TCP/UDP Endpoints + + + + + +Plugin: go.d.plugin +Module: portcheck + + + +## Overview + +Collector for monitoring service availability and response time. It can be used to check if specific ports are open or reachable on a target system. + +It supports both TCP and UDP protocols over IPv4 and IPv6 networks. + +| Protocol | Check Description | +|----------|-----------------------------------------------------------------------------------------------------------------------------| +| TCP | Attempts to establish a TCP connection to the specified ports on the target system. | +| UDP | Sends a 0-byte UDP packet to the specified ports on the target system and analyzes ICMP responses to determine port status. | + +Possible TCP statuses: + +| TCP Status | Description | +|------------|-------------------------------------------------------------| +| success | Connection established successfully. | +| timeout | Connection timed out after waiting for configured duration. | +| failed | An error occurred during the connection attempt. | + +Possible UDP statuses: + +| UDP Status | Description | +|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| open/filtered | No response received within the configured timeout. This status indicates the port is either open or filtered, but the exact state cannot be determined definitively. | +| closed | Received an ICMP Destination Unreachable message, indicating the port is closed. | + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per TCP endpoint + +These metrics refer to the TCP endpoint. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| host | The hostname or IP address of the target system, as specified in the configuration. | +| port | The TCP port being monitored, as defined in the 'ports' configuration parameter. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| portcheck.status | success, failed, timeout | boolean | +| portcheck.state_duration | time | seconds | +| portcheck.latency | time | ms | + +### Per UDP endpoint + +These metrics refer to the UDP endpoint. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| host | The hostname or IP address of the target system, as specified in the configuration. | +| port | The UDP port being monitored, as defined in the 'udp_ports' configuration parameter. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| portcheck.udp_port_status | open/filtered, closed | status | +| portcheck.udp_port_status_duration | time | seconds | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ portcheck_service_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | TCP host ${label:host} port ${label:port} liveness status | +| [ portcheck_connection_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes | +| [ portcheck_connection_fails ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/portcheck.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/portcheck.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes | +| ports | Target TCP ports. Must be specified in numeric format. | | no | +| udp_ports | Target UDP ports. Must be specified in numeric format. | | no | +| timeout | HTTP request timeout. | 2 | no | + +
    + +#### Examples + +##### Check TCP ports (IPv4) + +An example configuration. + +
    Config + +```yaml +jobs: + - name: local + host: 127.0.0.1 + ports: + - 22 + - 23 + +``` +
    + +##### Check TCP ports (IPv6) + +An example configuration. + +
    Config + +```yaml +jobs: + - name: local + host: "[2001:DB8::1]" + ports: + - 80 + - 8080 + +``` +
    + +##### Check UDP ports (IPv4) + +An example configuration. + +
    Config + +```yaml +jobs: + - name: local + host: 127.0.0.1 + udp_ports: + - 3120 + - 3121 + +``` +
    + +##### Check UDP ports (IPv6) + +An example configuration. + +
    Config + +```yaml +jobs: + - name: local + host: [::1] + udp_ports: + - 3120 + - 3121 + +``` +
    + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Multiple instances. + + +
    Config + +```yaml +jobs: + - name: server1 + host: 127.0.0.1 + ports: + - 22 + - 23 + + - name: server2 + host: 203.0.113.10 + ports: + - 22 + - 23 + +``` +
    + + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `portcheck` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m portcheck + ``` + +### Getting Logs + +If you're encountering problems with the `portcheck` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep portcheck +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep portcheck /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep portcheck +``` + + diff --git a/src/go/plugin/go.d/modules/portcheck/integrations/tcp_endpoints.md b/src/go/plugin/go.d/modules/portcheck/integrations/tcp_endpoints.md deleted file mode 100644 index 9259afd3b..000000000 --- a/src/go/plugin/go.d/modules/portcheck/integrations/tcp_endpoints.md +++ /dev/null @@ -1,252 +0,0 @@ - - -# TCP Endpoints - - - - - -Plugin: go.d.plugin -Module: portcheck - - - -## Overview - -This collector monitors TCP services availability and response time. - - - - -This collector is supported on all platforms. - -This collector supports collecting metrics from multiple instances of this integration, including remote instances. - - -### Default Behavior - -#### Auto-Detection - -This integration doesn't support auto-detection. - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per tcp endpoint - -These metrics refer to the TCP endpoint. - -Labels: - -| Label | Description | -|:-----------|:----------------| -| host | host | -| port | port | - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| portcheck.status | success, failed, timeout | boolean | -| portcheck.state_duration | time | seconds | -| portcheck.latency | time | ms | - - - -## Alerts - - -The following alerts are available: - -| Alert name | On metric | Description | -|:------------|:----------|:------------| -| [ portcheck_service_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | TCP host ${label:host} port ${label:port} liveness status | -| [ portcheck_connection_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes | -| [ portcheck_connection_fails ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes | - - -## Setup - -### Prerequisites - -No action required. - -### Configuration - -#### File - -The configuration file name for this integration is `go.d/portcheck.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config go.d/portcheck.conf -``` -#### Options - -The following options can be defined globally: update_every, autodetection_retry. - - -
    Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| update_every | Data collection frequency. | 5 | no | -| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | -| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes | -| ports | Remote host ports. Must be specified in numeric format. | | yes | -| timeout | HTTP request timeout. | 2 | no | - -
    - -#### Examples - -##### Check SSH and telnet - -An example configuration. - -
    Config - -```yaml -jobs: - - name: server1 - host: 127.0.0.1 - ports: - - 22 - - 23 - -``` -
    - -##### Check webserver with IPv6 address - -An example configuration. - -
    Config - -```yaml -jobs: - - name: server2 - host: "[2001:DB8::1]" - ports: - - 80 - - 8080 - -``` -
    - -##### Multi-instance - -> **Note**: When you define multiple jobs, their names must be unique. - -Multiple instances. - - -
    Config - -```yaml -jobs: - - name: server1 - host: 127.0.0.1 - ports: - - 22 - - 23 - - - name: server2 - host: 203.0.113.10 - ports: - - 22 - - 23 - -``` -
    - - - -## Troubleshooting - -### Debug Mode - -**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. - -To troubleshoot issues with the `portcheck` collector, run the `go.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `go.d.plugin` to debug the collector: - - ```bash - ./go.d.plugin -d -m portcheck - ``` - -### Getting Logs - -If you're encountering problems with the `portcheck` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep portcheck -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep portcheck /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep portcheck -``` - - diff --git a/src/go/plugin/go.d/modules/portcheck/metadata.yaml b/src/go/plugin/go.d/modules/portcheck/metadata.yaml index c0ccfde1d..1bee8a9af 100644 --- a/src/go/plugin/go.d/modules/portcheck/metadata.yaml +++ b/src/go/plugin/go.d/modules/portcheck/metadata.yaml @@ -5,7 +5,7 @@ modules: plugin_name: go.d.plugin module_name: portcheck monitored_instance: - name: TCP Endpoints + name: TCP/UDP Endpoints link: "" icon_filename: globe.svg categories: @@ -20,7 +20,29 @@ modules: overview: data_collection: metrics_description: | - This collector monitors TCP services availability and response time. + Collector for monitoring service availability and response time. It can be used to check if specific ports are open or reachable on a target system. + + It supports both TCP and UDP protocols over IPv4 and IPv6 networks. + + | Protocol | Check Description | + |----------|-----------------------------------------------------------------------------------------------------------------------------| + | TCP | Attempts to establish a TCP connection to the specified ports on the target system. | + | UDP | Sends a 0-byte UDP packet to the specified ports on the target system and analyzes ICMP responses to determine port status. | + + Possible TCP statuses: + + | TCP Status | Description | + |------------|-------------------------------------------------------------| + | success | Connection established successfully. | + | timeout | Connection timed out after waiting for configured duration. | + | failed | An error occurred during the connection attempt. | + + Possible UDP statuses: + + | UDP Status | Description | + |---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + | open/filtered | No response received within the configured timeout. This status indicates the port is either open or filtered, but the exact state cannot be determined definitively. | + | closed | Received an ICMP Destination Unreachable message, indicating the port is closed. | method_description: "" supported_platforms: include: [] @@ -61,9 +83,13 @@ modules: default_value: "" required: true - name: ports - description: Remote host ports. Must be specified in numeric format. + description: Target TCP ports. Must be specified in numeric format. default_value: "" - required: true + required: false + - name: udp_ports + description: Target UDP ports. Must be specified in numeric format. + default_value: "" + required: false - name: timeout description: HTTP request timeout. default_value: 2 @@ -73,24 +99,42 @@ modules: title: Config enabled: true list: - - name: Check SSH and telnet + - name: Check TCP ports (IPv4) description: An example configuration. config: | jobs: - - name: server1 + - name: local host: 127.0.0.1 ports: - 22 - 23 - - name: Check webserver with IPv6 address + - name: Check TCP ports (IPv6) description: An example configuration. config: | jobs: - - name: server2 + - name: local host: "[2001:DB8::1]" ports: - 80 - 8080 + - name: Check UDP ports (IPv4) + description: An example configuration. + config: | + jobs: + - name: local + host: 127.0.0.1 + udp_ports: + - 3120 + - 3121 + - name: Check UDP ports (IPv6) + description: An example configuration. + config: | + jobs: + - name: local + host: [::1] + udp_ports: + - 3120 + - 3121 - name: Multi-instance description: | > **Note**: When you define multiple jobs, their names must be unique. @@ -132,13 +176,13 @@ modules: description: "" availability: [] scopes: - - name: tcp endpoint + - name: TCP endpoint description: These metrics refer to the TCP endpoint. labels: - name: host - description: host + description: The hostname or IP address of the target system, as specified in the configuration. - name: port - description: port + description: The TCP port being monitored, as defined in the 'ports' configuration parameter. metrics: - name: portcheck.status description: TCP Check Status @@ -160,3 +204,24 @@ modules: chart_type: line dimensions: - name: time + - name: UDP endpoint + description: These metrics refer to the UDP endpoint. + labels: + - name: host + description: The hostname or IP address of the target system, as specified in the configuration. + - name: port + description: The UDP port being monitored, as defined in the 'udp_ports' configuration parameter. + metrics: + - name: portcheck.udp_port_status + description: UDP Port Check Status + unit: status + chart_type: line + dimensions: + - name: open/filtered + - name: closed + - name: portcheck.udp_port_status_duration + description: UDP Port Current Status Duration + unit: seconds + chart_type: line + dimensions: + - name: time diff --git a/src/go/plugin/go.d/modules/portcheck/portcheck.go b/src/go/plugin/go.d/modules/portcheck/portcheck.go index 3a6da78ac..04614d6da 100644 --- a/src/go/plugin/go.d/modules/portcheck/portcheck.go +++ b/src/go/plugin/go.d/modules/portcheck/portcheck.go @@ -4,11 +4,13 @@ package portcheck import ( _ "embed" + "errors" + "fmt" "net" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,17 +30,26 @@ func init() { func New() *PortCheck { return &PortCheck{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, - dial: net.DialTimeout, + charts: &module.Charts{}, + + dialTCP: net.DialTimeout, + + scanUDP: scanUDPPort, + doUdpPorts: true, + + seenUdpPorts: make(map[int]bool), + seenTcpPorts: make(map[int]bool), } } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Host string `yaml:"host" json:"host"` - Ports []int `yaml:"ports" json:"ports"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Host string `yaml:"host" json:"host"` + Ports []int `yaml:"ports" json:"ports"` + UDPPorts []int `yaml:"udp_ports,omitempty" json:"udp_ports"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type PortCheck struct { @@ -47,9 +58,15 @@ type PortCheck struct { charts *module.Charts - dial dialFunc + dialTCP dialTCPFunc + scanUDP func(address string, timeout time.Duration) (bool, error) - ports []*port + tcpPorts []*tcpPort + seenTcpPorts map[int]bool + + udpPorts []*udpPort + seenUdpPorts map[int]bool + doUdpPorts bool } func (pc *PortCheck) Configuration() any { @@ -58,27 +75,28 @@ func (pc *PortCheck) Configuration() any { func (pc *PortCheck) Init() error { if err := pc.validateConfig(); err != nil { - pc.Errorf("config validation: %v", err) - return err - } - - charts, err := pc.initCharts() - if err != nil { - pc.Errorf("init charts: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } - pc.charts = charts - pc.ports = pc.initPorts() + pc.tcpPorts, pc.udpPorts = pc.initPorts() pc.Debugf("using host: %s", pc.Host) - pc.Debugf("using ports: %v", pc.Ports) - pc.Debugf("using TCP connection timeout: %s", pc.Timeout) + pc.Debugf("using ports: tcp %v udp %v", pc.Ports, pc.UDPPorts) + pc.Debugf("using connection timeout: %s", pc.Timeout) return nil } func (pc *PortCheck) Check() error { + mx, err := pc.collect() + if err != nil { + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } @@ -95,6 +113,7 @@ func (pc *PortCheck) Collect() map[string]int64 { if len(mx) == 0 { return nil } + return mx } diff --git a/src/go/plugin/go.d/modules/portcheck/portcheck_test.go b/src/go/plugin/go.d/modules/portcheck/portcheck_test.go index 86a2c9679..373d44de6 100644 --- a/src/go/plugin/go.d/modules/portcheck/portcheck_test.go +++ b/src/go/plugin/go.d/modules/portcheck/portcheck_test.go @@ -40,7 +40,7 @@ func TestPortCheck_Init(t *testing.T) { job.Host = "127.0.0.1" job.Ports = []int{39001, 39002} assert.NoError(t, job.Init()) - assert.Len(t, job.ports, 2) + assert.Len(t, job.tcpPorts, 2) } func TestPortCheck_InitNG(t *testing.T) { job := New() @@ -53,7 +53,7 @@ func TestPortCheck_InitNG(t *testing.T) { } func TestPortCheck_Check(t *testing.T) { - assert.NoError(t, New().Check()) + assert.Error(t, New().Check()) } func TestPortCheck_Cleanup(t *testing.T) { @@ -65,7 +65,6 @@ func TestPortCheck_Charts(t *testing.T) { job.Ports = []int{1, 2} job.Host = "localhost" require.NoError(t, job.Init()) - assert.Len(t, *job.Charts(), len(chartsTmpl)*len(job.Ports)) } func TestPortCheck_Collect(t *testing.T) { @@ -74,92 +73,92 @@ func TestPortCheck_Collect(t *testing.T) { job.Host = "127.0.0.1" job.Ports = []int{39001, 39002} job.UpdateEvery = 5 - job.dial = testDial(nil) + job.dialTCP = testDial(nil) require.NoError(t, job.Init()) require.NoError(t, job.Check()) - copyLatency := func(dst, src map[string]int64) { + copyLatencyDuration := func(dst, src map[string]int64) { for k := range dst { - if strings.HasSuffix(k, "latency") { + if strings.HasSuffix(k, "latency") || strings.HasSuffix(k, "duration") { dst[k] = src[k] } } } expected := map[string]int64{ - "port_39001_current_state_duration": int64(job.UpdateEvery), - "port_39001_failed": 0, - "port_39001_latency": 0, - "port_39001_success": 1, - "port_39001_timeout": 0, - "port_39002_current_state_duration": int64(job.UpdateEvery), - "port_39002_failed": 0, - "port_39002_latency": 0, - "port_39002_success": 1, - "port_39002_timeout": 0, + "tcp_port_39001_current_state_duration": int64(job.UpdateEvery * 2), + "tcp_port_39001_failed": 0, + "tcp_port_39001_latency": 0, + "tcp_port_39001_success": 1, + "tcp_port_39001_timeout": 0, + "tcp_port_39002_current_state_duration": int64(job.UpdateEvery * 2), + "tcp_port_39002_failed": 0, + "tcp_port_39002_latency": 0, + "tcp_port_39002_success": 1, + "tcp_port_39002_timeout": 0, } - collected := job.Collect() - copyLatency(expected, collected) + mx := job.Collect() + copyLatencyDuration(expected, mx) - assert.Equal(t, expected, collected) + assert.Equal(t, expected, mx) expected = map[string]int64{ - "port_39001_current_state_duration": int64(job.UpdateEvery) * 2, - "port_39001_failed": 0, - "port_39001_latency": 0, - "port_39001_success": 1, - "port_39001_timeout": 0, - "port_39002_current_state_duration": int64(job.UpdateEvery) * 2, - "port_39002_failed": 0, - "port_39002_latency": 0, - "port_39002_success": 1, - "port_39002_timeout": 0, + "tcp_port_39001_current_state_duration": int64(job.UpdateEvery) * 3, + "tcp_port_39001_failed": 0, + "tcp_port_39001_latency": 0, + "tcp_port_39001_success": 1, + "tcp_port_39001_timeout": 0, + "tcp_port_39002_current_state_duration": int64(job.UpdateEvery) * 3, + "tcp_port_39002_failed": 0, + "tcp_port_39002_latency": 0, + "tcp_port_39002_success": 1, + "tcp_port_39002_timeout": 0, } - collected = job.Collect() - copyLatency(expected, collected) + mx = job.Collect() + copyLatencyDuration(expected, mx) - assert.Equal(t, expected, collected) + assert.Equal(t, expected, mx) - job.dial = testDial(errors.New("checkStateFailed")) + job.dialTCP = testDial(errors.New("checkStateFailed")) expected = map[string]int64{ - "port_39001_current_state_duration": int64(job.UpdateEvery), - "port_39001_failed": 1, - "port_39001_latency": 0, - "port_39001_success": 0, - "port_39001_timeout": 0, - "port_39002_current_state_duration": int64(job.UpdateEvery), - "port_39002_failed": 1, - "port_39002_latency": 0, - "port_39002_success": 0, - "port_39002_timeout": 0, + "tcp_port_39001_current_state_duration": int64(job.UpdateEvery), + "tcp_port_39001_failed": 1, + "tcp_port_39001_latency": 0, + "tcp_port_39001_success": 0, + "tcp_port_39001_timeout": 0, + "tcp_port_39002_current_state_duration": int64(job.UpdateEvery), + "tcp_port_39002_failed": 1, + "tcp_port_39002_latency": 0, + "tcp_port_39002_success": 0, + "tcp_port_39002_timeout": 0, } - collected = job.Collect() - copyLatency(expected, collected) + mx = job.Collect() + copyLatencyDuration(expected, mx) - assert.Equal(t, expected, collected) + assert.Equal(t, expected, mx) - job.dial = testDial(timeoutError{}) + job.dialTCP = testDial(timeoutError{}) expected = map[string]int64{ - "port_39001_current_state_duration": int64(job.UpdateEvery), - "port_39001_failed": 0, - "port_39001_latency": 0, - "port_39001_success": 0, - "port_39001_timeout": 1, - "port_39002_current_state_duration": int64(job.UpdateEvery), - "port_39002_failed": 0, - "port_39002_latency": 0, - "port_39002_success": 0, - "port_39002_timeout": 1, + "tcp_port_39001_current_state_duration": int64(job.UpdateEvery), + "tcp_port_39001_failed": 0, + "tcp_port_39001_latency": 0, + "tcp_port_39001_success": 0, + "tcp_port_39001_timeout": 1, + "tcp_port_39002_current_state_duration": int64(job.UpdateEvery), + "tcp_port_39002_latency": 0, + "tcp_port_39002_success": 0, + "tcp_port_39002_timeout": 1, + "tcp_port_39002_failed": 0, } - collected = job.Collect() - copyLatency(expected, collected) + mx = job.Collect() + copyLatencyDuration(expected, mx) - assert.Equal(t, expected, collected) + assert.Equal(t, expected, mx) } -func testDial(err error) dialFunc { +func testDial(err error) dialTCPFunc { return func(_, _ string, _ time.Duration) (net.Conn, error) { return &net.TCPConn{}, err } } diff --git a/src/go/plugin/go.d/modules/portcheck/testdata/config.json b/src/go/plugin/go.d/modules/portcheck/testdata/config.json index a69a6ac38..1f5fc5f30 100644 --- a/src/go/plugin/go.d/modules/portcheck/testdata/config.json +++ b/src/go/plugin/go.d/modules/portcheck/testdata/config.json @@ -4,5 +4,8 @@ "ports": [ 123 ], + "udp_ports": [ + 123 + ], "timeout": 123.123 } diff --git a/src/go/plugin/go.d/modules/portcheck/testdata/config.yaml b/src/go/plugin/go.d/modules/portcheck/testdata/config.yaml index 72bdfd549..82ef63c34 100644 --- a/src/go/plugin/go.d/modules/portcheck/testdata/config.yaml +++ b/src/go/plugin/go.d/modules/portcheck/testdata/config.yaml @@ -2,4 +2,6 @@ update_every: 123 host: "ok" ports: - 123 +udp_ports: + - 123 timeout: 123.123 diff --git a/src/go/plugin/go.d/modules/postfix/config_schema.json b/src/go/plugin/go.d/modules/postfix/config_schema.json index da416f14b..3cbdb9a40 100644 --- a/src/go/plugin/go.d/modules/postfix/config_schema.json +++ b/src/go/plugin/go.d/modules/postfix/config_schema.json @@ -28,7 +28,6 @@ "required": [ "binary_path" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/postfix/integrations/postfix.md b/src/go/plugin/go.d/modules/postfix/integrations/postfix.md index 503a8c66d..94bbdb913 100644 --- a/src/go/plugin/go.d/modules/postfix/integrations/postfix.md +++ b/src/go/plugin/go.d/modules/postfix/integrations/postfix.md @@ -91,8 +91,8 @@ No action required. The configuration file name for this integration is `go.d/postfix.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/postfix/postfix.go b/src/go/plugin/go.d/modules/postfix/postfix.go index 3622811ee..16e29bbea 100644 --- a/src/go/plugin/go.d/modules/postfix/postfix.go +++ b/src/go/plugin/go.d/modules/postfix/postfix.go @@ -5,10 +5,11 @@ package postfix import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -29,16 +30,16 @@ func New() *Postfix { return &Postfix{ Config: Config{ BinaryPath: "/usr/sbin/postqueue", - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: charts.Copy(), } } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - BinaryPath string `yaml:"binary_path,omitempty" json:"binary_path"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + BinaryPath string `yaml:"binary_path,omitempty" json:"binary_path"` } type ( @@ -61,14 +62,12 @@ func (p *Postfix) Configuration() any { func (p *Postfix) Init() error { if err := p.validateConfig(); err != nil { - p.Errorf("config validation: %s", err) - return err + return fmt.Errorf("config validation: %s", err) } pq, err := p.initPostqueueExec() if err != nil { - p.Errorf("postqueue exec initialization: %v", err) - return err + return fmt.Errorf("postqueue exec initialization: %v", err) } p.exec = pq @@ -78,7 +77,6 @@ func (p *Postfix) Init() error { func (p *Postfix) Check() error { mx, err := p.collect() if err != nil { - p.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/postgres/config_schema.json b/src/go/plugin/go.d/modules/postgres/config_schema.json index 42bff329b..7347becad 100644 --- a/src/go/plugin/go.d/modules/postgres/config_schema.json +++ b/src/go/plugin/go.d/modules/postgres/config_schema.json @@ -91,7 +91,6 @@ "required": [ "dsn" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/postgres/init.go b/src/go/plugin/go.d/modules/postgres/init.go index e2bbecc16..35c791ae2 100644 --- a/src/go/plugin/go.d/modules/postgres/init.go +++ b/src/go/plugin/go.d/modules/postgres/init.go @@ -5,7 +5,7 @@ package postgres import ( "errors" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" ) func (p *Postgres) validateConfig() error { diff --git a/src/go/plugin/go.d/modules/postgres/integrations/postgresql.md b/src/go/plugin/go.d/modules/postgres/integrations/postgresql.md index 4f2a91101..48dab8dc9 100644 --- a/src/go/plugin/go.d/modules/postgres/integrations/postgresql.md +++ b/src/go/plugin/go.d/modules/postgres/integrations/postgresql.md @@ -267,7 +267,7 @@ GRANT pg_monitor TO netdata; ``` After creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or -the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your +the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/start-stop-restart.md) for your system. @@ -279,8 +279,8 @@ system. The configuration file name for this integration is `go.d/postgres.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -299,7 +299,7 @@ The following options can be defined globally: update_every, autodetection_retry | autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | | dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes | | timeout | Query timeout in seconds. | 2 | no | -| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#simple-patterns-matcher). | | no | +| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#simple-patterns-matcher). | | no | | max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no | | max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no | diff --git a/src/go/plugin/go.d/modules/postgres/metadata.yaml b/src/go/plugin/go.d/modules/postgres/metadata.yaml index aacd19adb..7cdf4c7b7 100644 --- a/src/go/plugin/go.d/modules/postgres/metadata.yaml +++ b/src/go/plugin/go.d/modules/postgres/metadata.yaml @@ -98,7 +98,7 @@ modules: default_value: 2 required: false - name: collect_databases_matching - description: Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#simple-patterns-matcher). + description: Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#simple-patterns-matcher). default_value: "" required: false - name: max_db_tables diff --git a/src/go/plugin/go.d/modules/postgres/postgres.go b/src/go/plugin/go.d/modules/postgres/postgres.go index 57491039a..be437048f 100644 --- a/src/go/plugin/go.d/modules/postgres/postgres.go +++ b/src/go/plugin/go.d/modules/postgres/postgres.go @@ -6,13 +6,14 @@ import ( "database/sql" _ "embed" "errors" + "fmt" "sync" "time" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" "github.com/jackc/pgx/v5/stdlib" _ "github.com/jackc/pgx/v5/stdlib" @@ -32,7 +33,7 @@ func init() { func New() *Postgres { return &Postgres{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), DSN: "postgres://postgres:postgres@127.0.0.1:5432/postgres", XactTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10}, QueryTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10}, @@ -58,14 +59,14 @@ func New() *Postgres { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - DSN string `yaml:"dsn" json:"dsn"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - DBSelector string `yaml:"collect_databases_matching,omitempty" json:"collect_databases_matching"` - XactTimeHistogram []float64 `yaml:"transaction_time_histogram,omitempty" json:"transaction_time_histogram"` - QueryTimeHistogram []float64 `yaml:"query_time_histogram,omitempty" json:"query_time_histogram"` - MaxDBTables int64 `yaml:"max_db_tables" json:"max_db_tables"` - MaxDBIndexes int64 `yaml:"max_db_indexes" json:"max_db_indexes"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + DSN string `yaml:"dsn" json:"dsn"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + DBSelector string `yaml:"collect_databases_matching,omitempty" json:"collect_databases_matching"` + XactTimeHistogram []float64 `yaml:"transaction_time_histogram,omitempty" json:"transaction_time_histogram"` + QueryTimeHistogram []float64 `yaml:"query_time_histogram,omitempty" json:"query_time_histogram"` + MaxDBTables int64 `yaml:"max_db_tables" json:"max_db_tables"` + MaxDBIndexes int64 `yaml:"max_db_indexes" json:"max_db_indexes"` } type ( @@ -105,14 +106,12 @@ func (p *Postgres) Configuration() any { func (p *Postgres) Init() error { err := p.validateConfig() if err != nil { - p.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } sr, err := p.initDBSelector() if err != nil { - p.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } p.dbSr = sr @@ -125,7 +124,6 @@ func (p *Postgres) Init() error { func (p *Postgres) Check() error { mx, err := p.collect() if err != nil { - p.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/postgres/postgres_test.go b/src/go/plugin/go.d/modules/postgres/postgres_test.go index 95652458f..8b9ab6694 100644 --- a/src/go/plugin/go.d/modules/postgres/postgres_test.go +++ b/src/go/plugin/go.d/modules/postgres/postgres_test.go @@ -12,8 +12,8 @@ import ( "strings" "testing" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" "github.com/DATA-DOG/go-sqlmock" "github.com/stretchr/testify/assert" diff --git a/src/go/plugin/go.d/modules/powerdns/authoritativens.go b/src/go/plugin/go.d/modules/powerdns/authoritativens.go index b9c02b86f..508de1f39 100644 --- a/src/go/plugin/go.d/modules/powerdns/authoritativens.go +++ b/src/go/plugin/go.d/modules/powerdns/authoritativens.go @@ -5,10 +5,12 @@ package powerdns import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *AuthoritativeNS { return &AuthoritativeNS{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8081", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -39,8 +41,8 @@ func New() *AuthoritativeNS { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type AuthoritativeNS struct { @@ -59,21 +61,18 @@ func (ns *AuthoritativeNS) Configuration() any { func (ns *AuthoritativeNS) Init() error { err := ns.validateConfig() if err != nil { - ns.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } client, err := ns.initHTTPClient() if err != nil { - ns.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } ns.httpClient = client cs, err := ns.initCharts() if err != nil { - ns.Errorf("init charts: %v", err) - return err + return fmt.Errorf("init charts: %v", err) } ns.charts = cs @@ -83,7 +82,6 @@ func (ns *AuthoritativeNS) Init() error { func (ns *AuthoritativeNS) Check() error { mx, err := ns.collect() if err != nil { - ns.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/powerdns/authoritativens_test.go b/src/go/plugin/go.d/modules/powerdns/authoritativens_test.go index d506c9778..a4242a0b6 100644 --- a/src/go/plugin/go.d/modules/powerdns/authoritativens_test.go +++ b/src/go/plugin/go.d/modules/powerdns/authoritativens_test.go @@ -50,19 +50,19 @@ func TestAuthoritativeNS_Init(t *testing.T) { "fails on unset URL": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, "fails on invalid TLSCA": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:38001", }, - Client: web.Client{ + ClientConfig: web.ClientConfig{ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, }, }, @@ -244,32 +244,16 @@ func TestAuthoritativeNS_Collect(t *testing.T) { defer cleanup() require.NoError(t, ns.Init()) - collected := ns.Collect() + mx := ns.Collect() - assert.Equal(t, test.wantCollected, collected) + assert.Equal(t, test.wantCollected, mx) if len(test.wantCollected) > 0 { - ensureCollectedHasAllChartsDimsVarsIDs(t, ns, collected) + module.TestMetricsHasAllChartsDims(t, ns.Charts(), mx) } }) } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, ns *AuthoritativeNS, collected map[string]int64) { - for _, chart := range *ns.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) - } - } -} - func preparePowerDNSAuthoritativeNSV430() (*AuthoritativeNS, func()) { srv := preparePowerDNSAuthoritativeNSEndpoint() ns := New() diff --git a/src/go/plugin/go.d/modules/powerdns/collect.go b/src/go/plugin/go.d/modules/powerdns/collect.go index c2831e0f2..32bcad2cf 100644 --- a/src/go/plugin/go.d/modules/powerdns/collect.go +++ b/src/go/plugin/go.d/modules/powerdns/collect.go @@ -3,11 +3,7 @@ package powerdns import ( - "encoding/json" "errors" - "fmt" - "io" - "net/http" "strconv" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" @@ -65,36 +61,12 @@ func (ns *AuthoritativeNS) collectStatistics(collected map[string]int64, statist } func (ns *AuthoritativeNS) scrapeStatistics() ([]statisticMetric, error) { - req, _ := web.NewHTTPRequestWithPath(ns.Request, urlPathLocalStatistics) + req, _ := web.NewHTTPRequestWithPath(ns.RequestConfig, urlPathLocalStatistics) - var statistics statisticMetrics - if err := ns.doOKDecode(req, &statistics); err != nil { + var stats statisticMetrics + if err := web.DoHTTP(ns.httpClient).RequestJSON(req, &stats); err != nil { return nil, err } - return statistics, nil -} - -func (ns *AuthoritativeNS) doOKDecode(req *http.Request, in interface{}) error { - resp, err := ns.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } + return stats, nil } diff --git a/src/go/plugin/go.d/modules/powerdns/config_schema.json b/src/go/plugin/go.d/modules/powerdns/config_schema.json index 2ec6565c1..4b59d7e43 100644 --- a/src/go/plugin/go.d/modules/powerdns/config_schema.json +++ b/src/go/plugin/go.d/modules/powerdns/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/powerdns/init.go b/src/go/plugin/go.d/modules/powerdns/init.go index 0819459fe..85675eeb4 100644 --- a/src/go/plugin/go.d/modules/powerdns/init.go +++ b/src/go/plugin/go.d/modules/powerdns/init.go @@ -14,14 +14,14 @@ func (ns *AuthoritativeNS) validateConfig() error { if ns.URL == "" { return errors.New("URL not set") } - if _, err := web.NewHTTPRequest(ns.Request); err != nil { + if _, err := web.NewHTTPRequest(ns.RequestConfig); err != nil { return err } return nil } func (ns *AuthoritativeNS) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(ns.Client) + return web.NewHTTPClient(ns.ClientConfig) } func (ns *AuthoritativeNS) initCharts() (*module.Charts, error) { diff --git a/src/go/plugin/go.d/modules/powerdns/integrations/powerdns_authoritative_server.md b/src/go/plugin/go.d/modules/powerdns/integrations/powerdns_authoritative_server.md index b4060a613..18550d6a2 100644 --- a/src/go/plugin/go.d/modules/powerdns/integrations/powerdns_authoritative_server.md +++ b/src/go/plugin/go.d/modules/powerdns/integrations/powerdns_authoritative_server.md @@ -104,8 +104,8 @@ Follow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#ena The configuration file name for this integration is `go.d/powerdns.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/powerdns/metrics.go b/src/go/plugin/go.d/modules/powerdns/metrics.go index 3efa2c980..09f5f5e45 100644 --- a/src/go/plugin/go.d/modules/powerdns/metrics.go +++ b/src/go/plugin/go.d/modules/powerdns/metrics.go @@ -8,6 +8,6 @@ type ( statisticMetric struct { Name string Type string - Value interface{} + Value any } ) diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/collect.go b/src/go/plugin/go.d/modules/powerdns_recursor/collect.go index 784093ccf..27c979e0a 100644 --- a/src/go/plugin/go.d/modules/powerdns_recursor/collect.go +++ b/src/go/plugin/go.d/modules/powerdns_recursor/collect.go @@ -3,11 +3,7 @@ package powerdns_recursor import ( - "encoding/json" "errors" - "fmt" - "io" - "net/http" "strconv" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" @@ -65,36 +61,12 @@ func (r *Recursor) collectStatistics(collected map[string]int64, statistics stat } func (r *Recursor) scrapeStatistics() ([]statisticMetric, error) { - req, _ := web.NewHTTPRequestWithPath(r.Request, urlPathLocalStatistics) + req, _ := web.NewHTTPRequestWithPath(r.RequestConfig, urlPathLocalStatistics) - var statistics statisticMetrics - if err := r.doOKDecode(req, &statistics); err != nil { + var stats statisticMetrics + if err := web.DoHTTP(r.httpClient).RequestJSON(req, &stats); err != nil { return nil, err } - return statistics, nil -} - -func (r *Recursor) doOKDecode(req *http.Request, in interface{}) error { - resp, err := r.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } + return stats, nil } diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/config_schema.json b/src/go/plugin/go.d/modules/powerdns_recursor/config_schema.json index 1b76938ce..4179f86b9 100644 --- a/src/go/plugin/go.d/modules/powerdns_recursor/config_schema.json +++ b/src/go/plugin/go.d/modules/powerdns_recursor/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/init.go b/src/go/plugin/go.d/modules/powerdns_recursor/init.go index cadc6d2c2..7e8bb357b 100644 --- a/src/go/plugin/go.d/modules/powerdns_recursor/init.go +++ b/src/go/plugin/go.d/modules/powerdns_recursor/init.go @@ -14,14 +14,14 @@ func (r *Recursor) validateConfig() error { if r.URL == "" { return errors.New("URL not set") } - if _, err := web.NewHTTPRequest(r.Request); err != nil { + if _, err := web.NewHTTPRequest(r.RequestConfig); err != nil { return err } return nil } func (r *Recursor) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(r.Client) + return web.NewHTTPClient(r.ClientConfig) } func (r *Recursor) initCharts() (*module.Charts, error) { diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/integrations/powerdns_recursor.md b/src/go/plugin/go.d/modules/powerdns_recursor/integrations/powerdns_recursor.md index 68a3da0a9..efb055f90 100644 --- a/src/go/plugin/go.d/modules/powerdns_recursor/integrations/powerdns_recursor.md +++ b/src/go/plugin/go.d/modules/powerdns_recursor/integrations/powerdns_recursor.md @@ -107,8 +107,8 @@ Follow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling The configuration file name for this integration is `go.d/powerdns_recursor.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/metrics.go b/src/go/plugin/go.d/modules/powerdns_recursor/metrics.go index a7fbd63c1..1f6c476a4 100644 --- a/src/go/plugin/go.d/modules/powerdns_recursor/metrics.go +++ b/src/go/plugin/go.d/modules/powerdns_recursor/metrics.go @@ -13,6 +13,6 @@ type ( statisticMetric struct { Name string Type string - Value interface{} + Value any } ) diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/recursor.go b/src/go/plugin/go.d/modules/powerdns_recursor/recursor.go index 4b9c3e72f..99c5e97f2 100644 --- a/src/go/plugin/go.d/modules/powerdns_recursor/recursor.go +++ b/src/go/plugin/go.d/modules/powerdns_recursor/recursor.go @@ -5,10 +5,12 @@ package powerdns_recursor import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *Recursor { return &Recursor{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8081", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -39,8 +41,8 @@ func New() *Recursor { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Recursor struct { @@ -59,21 +61,18 @@ func (r *Recursor) Configuration() any { func (r *Recursor) Init() error { err := r.validateConfig() if err != nil { - r.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } client, err := r.initHTTPClient() if err != nil { - r.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } r.httpClient = client cs, err := r.initCharts() if err != nil { - r.Errorf("init charts: %v", err) - return err + return fmt.Errorf("init charts: %v", err) } r.charts = cs @@ -83,7 +82,6 @@ func (r *Recursor) Init() error { func (r *Recursor) Check() error { mx, err := r.collect() if err != nil { - r.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/recursor_test.go b/src/go/plugin/go.d/modules/powerdns_recursor/recursor_test.go index 09475e223..1cf3561ea 100644 --- a/src/go/plugin/go.d/modules/powerdns_recursor/recursor_test.go +++ b/src/go/plugin/go.d/modules/powerdns_recursor/recursor_test.go @@ -50,19 +50,19 @@ func TestRecursor_Init(t *testing.T) { "fails on unset URL": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, "fails on invalid TLSCA": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:38001", }, - Client: web.Client{ + ClientConfig: web.ClientConfig{ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, }, }, @@ -279,32 +279,16 @@ func TestRecursor_Collect(t *testing.T) { defer cleanup() require.NoError(t, recursor.Init()) - collected := recursor.Collect() + mx := recursor.Collect() - assert.Equal(t, test.wantCollected, collected) + assert.Equal(t, test.wantCollected, mx) if len(test.wantCollected) > 0 { - ensureCollectedHasAllChartsDimsVarsIDs(t, recursor, collected) + module.TestMetricsHasAllChartsDims(t, recursor.Charts(), mx) } }) } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, rec *Recursor, collected map[string]int64) { - for _, chart := range *rec.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) - } - } -} - func preparePowerDNSRecursorV431() (*Recursor, func()) { srv := preparePowerDNSRecursorEndpoint() recursor := New() diff --git a/src/go/plugin/go.d/modules/prometheus/charts.go b/src/go/plugin/go.d/modules/prometheus/charts.go index c78f9b1b0..bdddc623d 100644 --- a/src/go/plugin/go.d/modules/prometheus/charts.go +++ b/src/go/plugin/go.d/modules/prometheus/charts.go @@ -41,7 +41,7 @@ func (p *Prometheus) addGaugeChart(id, name, help string, labels labels.Labels) for _, lbl := range labels { chart.Labels = append(chart.Labels, module.Label{ - Key: lbl.Name, + Key: p.labelName(lbl.Name), Value: apostropheReplacer.Replace(lbl.Value), }, ) @@ -84,7 +84,7 @@ func (p *Prometheus) addCounterChart(id, name, help string, labels labels.Labels for _, lbl := range labels { chart.Labels = append(chart.Labels, module.Label{ - Key: lbl.Name, + Key: p.labelName(lbl.Name), Value: apostropheReplacer.Replace(lbl.Value), }, ) @@ -154,7 +154,7 @@ func (p *Prometheus) addSummaryCharts(id, name, help string, labels labels.Label for _, chart := range charts { for _, lbl := range labels { chart.Labels = append(chart.Labels, module.Label{ - Key: lbl.Name, + Key: p.labelName(lbl.Name), Value: apostropheReplacer.Replace(lbl.Value), }) } @@ -222,7 +222,7 @@ func (p *Prometheus) addHistogramCharts(id, name, help string, labels labels.Lab for _, chart := range charts { for _, lbl := range labels { chart.Labels = append(chart.Labels, module.Label{ - Key: lbl.Name, + Key: p.labelName(lbl.Name), Value: apostropheReplacer.Replace(lbl.Value), }) } @@ -241,6 +241,13 @@ func (p *Prometheus) application() string { return p.Name } +func (p *Prometheus) labelName(lblName string) string { + if p.LabelPrefix == "" { + return lblName + } + return p.LabelPrefix + "_" + lblName +} + func getChartTitle(name, help string) string { if help == "" { return fmt.Sprintf("Metric \"%s\"", name) diff --git a/src/go/plugin/go.d/modules/prometheus/config_schema.json b/src/go/plugin/go.d/modules/prometheus/config_schema.json index 2df96b049..3cfcd0f54 100644 --- a/src/go/plugin/go.d/modules/prometheus/config_schema.json +++ b/src/go/plugin/go.d/modules/prometheus/config_schema.json @@ -34,6 +34,11 @@ "description": "If an endpoint does not return at least one metric with the specified prefix, the data is not processed.", "type": "string" }, + "label_prefix": { + "title": "Label prefix", + "description": "An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name).", + "type": "string" + }, "app": { "title": "Application", "description": "If set, this value will be used in the chart context as 'prometheus.{app}.{metric_name}'.", @@ -205,7 +210,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/prometheus/init.go b/src/go/plugin/go.d/modules/prometheus/init.go index afb92af32..4171e9fdc 100644 --- a/src/go/plugin/go.d/modules/prometheus/init.go +++ b/src/go/plugin/go.d/modules/prometheus/init.go @@ -7,7 +7,7 @@ import ( "fmt" "os" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -20,12 +20,12 @@ func (p *Prometheus) validateConfig() error { } func (p *Prometheus) initPrometheusClient() (prometheus.Prometheus, error) { - httpClient, err := web.NewHTTPClient(p.Client) + httpClient, err := web.NewHTTPClient(p.ClientConfig) if err != nil { return nil, fmt.Errorf("init HTTP client: %v", err) } - req := p.Request.Copy() + req := p.RequestConfig.Copy() if p.BearerTokenFile != "" { token, err := os.ReadFile(p.BearerTokenFile) if err != nil { diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/4d_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/4d_server.md index 479fbe132..fdffb6364 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/4d_server.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/4d_server.md @@ -94,8 +94,8 @@ Install [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporte The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/8430ft_modem.md b/src/go/plugin/go.d/modules/prometheus/integrations/8430ft_modem.md index d5087d8c1..a0e31f32f 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/8430ft_modem.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/8430ft_modem.md @@ -94,8 +94,8 @@ Install [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/a10_acos_network_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/a10_acos_network_devices.md index 886572d83..d179bb483 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/a10_acos_network_devices.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/a10_acos_network_devices.md @@ -94,8 +94,8 @@ Install [A10-Networks Prometheus Exporter](https://github.com/a10networks/Promet The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/airthings_waveplus_air_sensor.md b/src/go/plugin/go.d/modules/prometheus/integrations/airthings_waveplus_air_sensor.md index d6353d5c4..dcf5961ba 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/airthings_waveplus_air_sensor.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/airthings_waveplus_air_sensor.md @@ -94,8 +94,8 @@ Install [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_ex The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/akamai_edge_dns_traffic.md b/src/go/plugin/go.d/modules/prometheus/integrations/akamai_edge_dns_traffic.md index d61275eb6..1d52099aa 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/akamai_edge_dns_traffic.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/akamai_edge_dns_traffic.md @@ -94,8 +94,8 @@ Install [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edge The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/akamai_global_traffic_management.md b/src/go/plugin/go.d/modules/prometheus/integrations/akamai_global_traffic_management.md index 6c1dbbf3a..872274e29 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/akamai_global_traffic_management.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/akamai_global_traffic_management.md @@ -94,8 +94,8 @@ Install [Akamai Global Traffic Management Metrics Exporter](https://github.com/a The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/akami_cloudmonitor.md b/src/go/plugin/go.d/modules/prometheus/integrations/akami_cloudmonitor.md index 480892401..d300f46f4 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/akami_cloudmonitor.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/akami_cloudmonitor.md @@ -94,8 +94,8 @@ Install [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_expo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/alamos_fe2_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/alamos_fe2_server.md index 1f5552ac6..0b000cfd5 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/alamos_fe2_server.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/alamos_fe2_server.md @@ -94,8 +94,8 @@ Install [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-expo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/alibaba_cloud.md b/src/go/plugin/go.d/modules/prometheus/integrations/alibaba_cloud.md index 51a5203fe..8aaf602c1 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/alibaba_cloud.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/alibaba_cloud.md @@ -94,8 +94,8 @@ Install [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by fo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/altaro_backup.md b/src/go/plugin/go.d/modules/prometheus/integrations/altaro_backup.md index c5200c889..e643d0c4d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/altaro_backup.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/altaro_backup.md @@ -94,8 +94,8 @@ Install [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporte The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/amd_cpu_&_gpu.md b/src/go/plugin/go.d/modules/prometheus/integrations/amd_cpu_&_gpu.md index 0eb582743..f4c0f7d13 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/amd_cpu_&_gpu.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/amd_cpu_&_gpu.md @@ -94,8 +94,8 @@ Install [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/andrews_&_arnold_line_status.md b/src/go/plugin/go.d/modules/prometheus/integrations/andrews_&_arnold_line_status.md index 52d282bab..ec773815c 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/andrews_&_arnold_line_status.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/andrews_&_arnold_line_status.md @@ -94,8 +94,8 @@ Install [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp- The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/apache_airflow.md b/src/go/plugin/go.d/modules/prometheus/integrations/apache_airflow.md index 5a5d15074..fc19537c0 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/apache_airflow.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/apache_airflow.md @@ -94,8 +94,8 @@ Install [Airflow exporter](https://github.com/shalb/airflow-exporter) by followi The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/apache_flink.md b/src/go/plugin/go.d/modules/prometheus/integrations/apache_flink.md index 325b15d67..a356371a9 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/apache_flink.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/apache_flink.md @@ -94,8 +94,8 @@ Install [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_expor The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/apicast.md b/src/go/plugin/go.d/modules/prometheus/integrations/apicast.md index 7c36df053..0e1854187 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/apicast.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/apicast.md @@ -94,8 +94,8 @@ Install [APIcast](https://github.com/3scale/apicast) by following the instructio The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/apple_time_machine.md b/src/go/plugin/go.d/modules/prometheus/integrations/apple_time_machine.md index e3a916ebc..95e3b850b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/apple_time_machine.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/apple_time_machine.md @@ -94,8 +94,8 @@ Install [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timem The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/arm_hwcpipe.md b/src/go/plugin/go.d/modules/prometheus/integrations/arm_hwcpipe.md index 14a4386f4..a7f48a775 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/arm_hwcpipe.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/arm_hwcpipe.md @@ -94,8 +94,8 @@ Install [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) b The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aruba_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/aruba_devices.md index c848873b2..21ef08263 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/aruba_devices.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/aruba_devices.md @@ -94,8 +94,8 @@ Install [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by followi The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/arvancloud_cdn.md b/src/go/plugin/go.d/modules/prometheus/integrations/arvancloud_cdn.md index 81bcbd70a..a43455042 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/arvancloud_cdn.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/arvancloud_cdn.md @@ -94,8 +94,8 @@ Install [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-export The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/audisto.md b/src/go/plugin/go.d/modules/prometheus/integrations/audisto.md index 81c450889..1871b51c4 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/audisto.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/audisto.md @@ -94,8 +94,8 @@ Install [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by fo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/authlog.md b/src/go/plugin/go.d/modules/prometheus/integrations/authlog.md index 86f20e30b..605a48ef3 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/authlog.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/authlog.md @@ -94,8 +94,8 @@ Install [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by follo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_compute_instances.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_compute_instances.md index c31b72dc1..1bcd8f19b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_compute_instances.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_compute_instances.md @@ -94,8 +94,8 @@ Install [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by follo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_spot_instance.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_spot_instance.md index 908624b4c..a7759fa67 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_spot_instance.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_spot_instance.md @@ -94,8 +94,8 @@ Install [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_ecs.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_ecs.md index aed1877b8..731aed66d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/aws_ecs.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_ecs.md @@ -94,8 +94,8 @@ Install [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by followi The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_health_events.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_health_events.md index dd1d4bc6a..c9d215d45 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/aws_health_events.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_health_events.md @@ -94,8 +94,8 @@ Install [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_instance_health.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_instance_health.md index 82da72d23..e03ad1b2e 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/aws_instance_health.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_instance_health.md @@ -94,8 +94,8 @@ Install [AWS instance health exporter](https://github.com/bobtfish/aws-instance- The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_quota.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_quota.md index 67970fdf8..300f37323 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/aws_quota.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_quota.md @@ -94,8 +94,8 @@ Install [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_rds.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_rds.md index acd1e7101..8ffffed51 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/aws_rds.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_rds.md @@ -94,8 +94,8 @@ Install [rds_exporter](https://github.com/percona/rds_exporter) by following the The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_s3_buckets.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_s3_buckets.md index e4628d718..67ab9022a 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/aws_s3_buckets.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_s3_buckets.md @@ -94,8 +94,8 @@ Install [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by followin The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_sqs.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_sqs.md index b2760e205..41be2d97e 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/aws_sqs.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_sqs.md @@ -94,8 +94,8 @@ Install [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/azure_ad_app_passwords.md b/src/go/plugin/go.d/modules/prometheus/integrations/azure_ad_app_passwords.md index 1f1ce0a85..88d6497ba 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/azure_ad_app_passwords.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/azure_ad_app_passwords.md @@ -94,8 +94,8 @@ Install [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-sec The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/azure_application.md b/src/go/plugin/go.d/modules/prometheus/integrations/azure_application.md index 55f124658..ac8c58830 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/azure_application.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/azure_application.md @@ -94,8 +94,8 @@ Install [Azure Monitor exporter](https://github.com/RobustPerception/azure_metri The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/azure_elastic_pool_sql.md b/src/go/plugin/go.d/modules/prometheus/integrations/azure_elastic_pool_sql.md index 0fa89bff2..943ef7a36 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/azure_elastic_pool_sql.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/azure_elastic_pool_sql.md @@ -94,8 +94,8 @@ Install [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_s The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/azure_resources.md b/src/go/plugin/go.d/modules/prometheus/integrations/azure_resources.md index c63e0ad1d..faaed6214 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/azure_resources.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/azure_resources.md @@ -94,8 +94,8 @@ Install [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/azure_service_bus.md b/src/go/plugin/go.d/modules/prometheus/integrations/azure_service_bus.md index c1a641aaa..c5fa99c5b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/azure_service_bus.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/azure_service_bus.md @@ -94,8 +94,8 @@ Install [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_e The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/azure_sql.md b/src/go/plugin/go.d/modules/prometheus/integrations/azure_sql.md index 98a933eb6..0f6d5b01c 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/azure_sql.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/azure_sql.md @@ -94,8 +94,8 @@ Install [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by f The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/bigquery.md b/src/go/plugin/go.d/modules/prometheus/integrations/bigquery.md index a76ff8fb3..cc17f055a 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/bigquery.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/bigquery.md @@ -94,8 +94,8 @@ Install [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporte The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/bird_routing_daemon.md b/src/go/plugin/go.d/modules/prometheus/integrations/bird_routing_daemon.md index 43318c4c5..c4392a0ee 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/bird_routing_daemon.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/bird_routing_daemon.md @@ -94,8 +94,8 @@ Install [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/blackbox.md b/src/go/plugin/go.d/modules/prometheus/integrations/blackbox.md index d37019b6d..7918feddd 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/blackbox.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/blackbox.md @@ -94,8 +94,8 @@ Install [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/bobcat_miner_300.md b/src/go/plugin/go.d/modules/prometheus/integrations/bobcat_miner_300.md index c00ccaa7d..9cc67f97e 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/bobcat_miner_300.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/bobcat_miner_300.md @@ -94,8 +94,8 @@ Install [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/borg_backup.md b/src/go/plugin/go.d/modules/prometheus/integrations/borg_backup.md index 67a175340..8b8dec6a5 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/borg_backup.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/borg_backup.md @@ -94,8 +94,8 @@ Install [Borg backup exporter](https://github.com/k0ral/borg-exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/bosh.md b/src/go/plugin/go.d/modules/prometheus/integrations/bosh.md index c8fc354f3..f2b6c0b3b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/bosh.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/bosh.md @@ -94,8 +94,8 @@ Install [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by fol The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/bpftrace_variables.md b/src/go/plugin/go.d/modules/prometheus/integrations/bpftrace_variables.md index 76ed9a2f0..52a3752d6 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/bpftrace_variables.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/bpftrace_variables.md @@ -94,8 +94,8 @@ Install [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporte The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/bungeecord.md b/src/go/plugin/go.d/modules/prometheus/integrations/bungeecord.md index cebba3d2f..29978c9ef 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/bungeecord.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/bungeecord.md @@ -94,8 +94,8 @@ Install [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-pr The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cadvisor.md b/src/go/plugin/go.d/modules/prometheus/integrations/cadvisor.md index a40221af5..b5c54dae1 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/cadvisor.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/cadvisor.md @@ -94,8 +94,8 @@ Install [cAdvisor](https://github.com/google/cadvisor) by following the instruct The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/celery.md b/src/go/plugin/go.d/modules/prometheus/integrations/celery.md index 2cb4e8219..96b014892 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/celery.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/celery.md @@ -94,8 +94,8 @@ Install [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/certificate_transparency.md b/src/go/plugin/go.d/modules/prometheus/integrations/certificate_transparency.md index b741f95ff..38ead0780 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/certificate_transparency.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/certificate_transparency.md @@ -95,8 +95,8 @@ Install [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the in The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -118,6 +118,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -138,7 +139,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/checkpoint_device.md b/src/go/plugin/go.d/modules/prometheus/integrations/checkpoint_device.md index 4d63f806e..f089696f6 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/checkpoint_device.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/checkpoint_device.md @@ -94,8 +94,8 @@ Install [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExp The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/chia.md b/src/go/plugin/go.d/modules/prometheus/integrations/chia.md index 158b6990e..3fc726413 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/chia.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/chia.md @@ -94,8 +94,8 @@ Install [Chia Exporter](https://github.com/chia-network/chia-exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md b/src/go/plugin/go.d/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md index 71f6460f3..b0e5cb5a5 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md @@ -94,8 +94,8 @@ Install [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cilium_agent.md b/src/go/plugin/go.d/modules/prometheus/integrations/cilium_agent.md index 77369adaa..951588f0f 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/cilium_agent.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/cilium_agent.md @@ -94,8 +94,8 @@ Install [Cilium Agent](https://github.com/cilium/cilium) by following the instru The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cilium_operator.md b/src/go/plugin/go.d/modules/prometheus/integrations/cilium_operator.md index 4083f7b0b..38d3fb30e 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/cilium_operator.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/cilium_operator.md @@ -94,8 +94,8 @@ Install [Cilium Operator](https://github.com/cilium/cilium) by following the ins The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cilium_proxy.md b/src/go/plugin/go.d/modules/prometheus/integrations/cilium_proxy.md index cfffa6299..7a9aeb638 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/cilium_proxy.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/cilium_proxy.md @@ -94,8 +94,8 @@ Install [Cilium Proxy](https://github.com/cilium/proxy) by following the instruc The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cisco_aci.md b/src/go/plugin/go.d/modules/prometheus/integrations/cisco_aci.md index 9766e88d1..c15c0a3d9 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/cisco_aci.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/cisco_aci.md @@ -94,8 +94,8 @@ Install [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_expor The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/citrix_netscaler.md b/src/go/plugin/go.d/modules/prometheus/integrations/citrix_netscaler.md index e6b704031..8596a3a05 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/citrix_netscaler.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/citrix_netscaler.md @@ -94,8 +94,8 @@ Install [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-E The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/clamav_daemon.md b/src/go/plugin/go.d/modules/prometheus/integrations/clamav_daemon.md index ea0398be5..932a57ae3 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/clamav_daemon.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/clamav_daemon.md @@ -94,8 +94,8 @@ Install [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_e The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/clamscan_results.md b/src/go/plugin/go.d/modules/prometheus/integrations/clamscan_results.md index 4cc488b1c..c74f77581 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/clamscan_results.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/clamscan_results.md @@ -94,8 +94,8 @@ Install [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by f The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/clash.md b/src/go/plugin/go.d/modules/prometheus/integrations/clash.md index 23b80bd30..e511fdfb9 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/clash.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/clash.md @@ -94,8 +94,8 @@ Install [Clash exporter](https://github.com/elonzh/clash_exporter) by following The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry.md b/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry.md index 2d1b36c25..bb194aad7 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry.md @@ -94,8 +94,8 @@ Install [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry_firehose.md b/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry_firehose.md index d6405b416..8fdfd6e03 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry_firehose.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry_firehose.md @@ -94,8 +94,8 @@ Install [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/fir The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cloudflare_pcap.md b/src/go/plugin/go.d/modules/prometheus/integrations/cloudflare_pcap.md index 2c1c479a4..6eb75106e 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/cloudflare_pcap.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/cloudflare_pcap.md @@ -94,8 +94,8 @@ Install [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloud The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cloudwatch.md b/src/go/plugin/go.d/modules/prometheus/integrations/cloudwatch.md index 816c0450e..7b7a8905d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/cloudwatch.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/cloudwatch.md @@ -94,8 +94,8 @@ Install [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/clustercontrol_cmon.md b/src/go/plugin/go.d/modules/prometheus/integrations/clustercontrol_cmon.md index c69cb434c..c5edda39a 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/clustercontrol_cmon.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/clustercontrol_cmon.md @@ -94,8 +94,8 @@ Install [CMON Exporter](https://github.com/severalnines/cmon_exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/collectd.md b/src/go/plugin/go.d/modules/prometheus/integrations/collectd.md index 972146881..76b3f0f92 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/collectd.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/collectd.md @@ -94,8 +94,8 @@ Install [Collectd exporter](https://github.com/prometheus/collectd_exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/concourse.md b/src/go/plugin/go.d/modules/prometheus/integrations/concourse.md index ce7baff4b..94015fd8c 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/concourse.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/concourse.md @@ -94,8 +94,8 @@ To configure the built-in Prometheus exporter, follow the [official documentatio The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/craftbeerpi.md b/src/go/plugin/go.d/modules/prometheus/integrations/craftbeerpi.md index f4dae54c5..4210ab590 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/craftbeerpi.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/craftbeerpi.md @@ -94,8 +94,8 @@ Install [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/crowdsec.md b/src/go/plugin/go.d/modules/prometheus/integrations/crowdsec.md index a59069dd3..f7ec5bd8d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/crowdsec.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/crowdsec.md @@ -94,8 +94,8 @@ To configure the built-in Prometheus exporter, follow the [official documentatio The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/crypto_exchanges.md b/src/go/plugin/go.d/modules/prometheus/integrations/crypto_exchanges.md index a56ed0db5..649a7f121 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/crypto_exchanges.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/crypto_exchanges.md @@ -94,8 +94,8 @@ Install [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cryptowatch.md b/src/go/plugin/go.d/modules/prometheus/integrations/cryptowatch.md index 554910783..d21b2c3b0 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/cryptowatch.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/cryptowatch.md @@ -94,8 +94,8 @@ Install [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/custom_exporter.md b/src/go/plugin/go.d/modules/prometheus/integrations/custom_exporter.md index 9d309e624..00ddedf00 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/custom_exporter.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/custom_exporter.md @@ -94,8 +94,8 @@ Install [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cvmfs_clients.md b/src/go/plugin/go.d/modules/prometheus/integrations/cvmfs_clients.md index b283f220c..23c1bf696 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/cvmfs_clients.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/cvmfs_clients.md @@ -94,8 +94,8 @@ Install [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ddwrt_routers.md b/src/go/plugin/go.d/modules/prometheus/integrations/ddwrt_routers.md index e0b898fbf..677aeda8f 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ddwrt_routers.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ddwrt_routers.md @@ -94,8 +94,8 @@ Install [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by fo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_ecs_cluster.md b/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_ecs_cluster.md index 6d268ca64..c1b116d10 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_ecs_cluster.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_ecs_cluster.md @@ -94,8 +94,8 @@ Install [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exp The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_isilon_cluster.md b/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_isilon_cluster.md index 5f29528ad..d65caef94 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_isilon_cluster.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_isilon_cluster.md @@ -94,8 +94,8 @@ Install [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon- The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_xtremio_cluster.md b/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_xtremio_cluster.md index fe7285234..2fb7ef892 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_xtremio_cluster.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_xtremio_cluster.md @@ -94,8 +94,8 @@ Install [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtrem The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dell_powermax.md b/src/go/plugin/go.d/modules/prometheus/integrations/dell_powermax.md index 200e2f049..88d7ff82e 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/dell_powermax.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/dell_powermax.md @@ -94,8 +94,8 @@ Install [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by f The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dependency-track.md b/src/go/plugin/go.d/modules/prometheus/integrations/dependency-track.md index 22d41e643..ad19b5fa8 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/dependency-track.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/dependency-track.md @@ -94,8 +94,8 @@ Install [Dependency-Track Exporter](https://github.com/jetstack/dependency-track The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/digitalocean.md b/src/go/plugin/go.d/modules/prometheus/integrations/digitalocean.md index 8978434c2..bfeb133db 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/digitalocean.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/digitalocean.md @@ -94,8 +94,8 @@ Install [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_expor The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/discourse.md b/src/go/plugin/go.d/modules/prometheus/integrations/discourse.md index adffe3fc3..139c3ce7a 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/discourse.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/discourse.md @@ -94,8 +94,8 @@ Install [Discourse Exporter](https://github.com/discourse/discourse-prometheus) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dmarc.md b/src/go/plugin/go.d/modules/prometheus/integrations/dmarc.md index 2d02e75a9..a67a71470 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/dmarc.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/dmarc.md @@ -94,8 +94,8 @@ Install [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-expor The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dnsbl.md b/src/go/plugin/go.d/modules/prometheus/integrations/dnsbl.md index e79517968..20fe2d376 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/dnsbl.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/dnsbl.md @@ -94,8 +94,8 @@ Install [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by followin The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dutch_electricity_smart_meter.md b/src/go/plugin/go.d/modules/prometheus/integrations/dutch_electricity_smart_meter.md index cf2dabd7b..35db3c116 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/dutch_electricity_smart_meter.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/dutch_electricity_smart_meter.md @@ -94,8 +94,8 @@ Install [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dynatrace.md b/src/go/plugin/go.d/modules/prometheus/integrations/dynatrace.md index 96e3969d6..505defd44 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/dynatrace.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/dynatrace.md @@ -94,8 +94,8 @@ Install [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) b The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/eaton_ups.md b/src/go/plugin/go.d/modules/prometheus/integrations/eaton_ups.md index c6c1823c8..83b1da1ac 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/eaton_ups.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/eaton_ups.md @@ -94,8 +94,8 @@ Install [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-e The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/elgato_key_light_devices..md b/src/go/plugin/go.d/modules/prometheus/integrations/elgato_key_light_devices..md index b4bc8d5d6..e337e0736 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/elgato_key_light_devices..md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/elgato_key_light_devices..md @@ -94,8 +94,8 @@ Install [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporte The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/energomera_smart_power_meters.md b/src/go/plugin/go.d/modules/prometheus/integrations/energomera_smart_power_meters.md index 74764ae52..7256bf643 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/energomera_smart_power_meters.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/energomera_smart_power_meters.md @@ -94,8 +94,8 @@ Install [energomera-exporter Energomera electricity meter exporter](https://gith The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/eos.md b/src/go/plugin/go.d/modules/prometheus/integrations/eos.md index b2e3d590a..e65f3062b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/eos.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/eos.md @@ -94,8 +94,8 @@ Install [EOS exporter](https://github.com/cern-eos/eos_exporter) by following th The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/etcd.md b/src/go/plugin/go.d/modules/prometheus/integrations/etcd.md index b24d6b241..b2a1933ec 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/etcd.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/etcd.md @@ -90,8 +90,8 @@ No action required. The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -113,6 +113,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -133,7 +134,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/excel_spreadsheet.md b/src/go/plugin/go.d/modules/prometheus/integrations/excel_spreadsheet.md index 6039ee832..85745aa07 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/excel_spreadsheet.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/excel_spreadsheet.md @@ -94,8 +94,8 @@ Install [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by fol The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/fastd.md b/src/go/plugin/go.d/modules/prometheus/integrations/fastd.md index 2442dff82..cbc9b546c 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/fastd.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/fastd.md @@ -94,8 +94,8 @@ Install [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) b The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/fortigate_firewall.md b/src/go/plugin/go.d/modules/prometheus/integrations/fortigate_firewall.md index b89853a99..01cdef64a 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/fortigate_firewall.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/fortigate_firewall.md @@ -94,8 +94,8 @@ Install [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by f The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_nfs.md b/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_nfs.md index cf60803ad..a6f0ac3bd 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_nfs.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_nfs.md @@ -94,8 +94,8 @@ Install [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_rctl-racct.md b/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_rctl-racct.md index bfe6e9e93..a5f10f83f 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_rctl-racct.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_rctl-racct.md @@ -94,8 +94,8 @@ Install [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by follo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/freifunk_network.md b/src/go/plugin/go.d/modules/prometheus/integrations/freifunk_network.md index 847e305d1..195152f8e 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/freifunk_network.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/freifunk_network.md @@ -94,8 +94,8 @@ Install [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/fritzbox_network_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/fritzbox_network_devices.md index 0158b0ba6..dfbef9d80 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/fritzbox_network_devices.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/fritzbox_network_devices.md @@ -94,8 +94,8 @@ Install [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/frrouting.md b/src/go/plugin/go.d/modules/prometheus/integrations/frrouting.md index 5f492a475..d3327e081 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/frrouting.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/frrouting.md @@ -94,8 +94,8 @@ Install [FRRouting Exporter](https://github.com/tynany/frr_exporter) by followin The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/gcp_gce.md b/src/go/plugin/go.d/modules/prometheus/integrations/gcp_gce.md index 34c6d7673..0443874ce 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/gcp_gce.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/gcp_gce.md @@ -94,8 +94,8 @@ Install [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by follo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/gcp_quota.md b/src/go/plugin/go.d/modules/prometheus/integrations/gcp_quota.md index 85959b677..40324024a 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/gcp_quota.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/gcp_quota.md @@ -94,8 +94,8 @@ Install [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by fo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/generic_command_line_output.md b/src/go/plugin/go.d/modules/prometheus/integrations/generic_command_line_output.md index 27f1cb647..d5fe88dbe 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/generic_command_line_output.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/generic_command_line_output.md @@ -94,8 +94,8 @@ Install [Generic Command Line Output Exporter](https://github.com/MarioMartReq/g The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/generic_storage_enclosure_tool.md b/src/go/plugin/go.d/modules/prometheus/integrations/generic_storage_enclosure_tool.md index ac8f74a43..06a0bf469 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/generic_storage_enclosure_tool.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/generic_storage_enclosure_tool.md @@ -94,8 +94,8 @@ Install [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/github_api_rate_limit.md b/src/go/plugin/go.d/modules/prometheus/integrations/github_api_rate_limit.md index 548430349..9aca46747 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/github_api_rate_limit.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/github_api_rate_limit.md @@ -95,8 +95,8 @@ Install [GitHub API rate limit Exporter](https://github.com/lunarway/github-rate The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -118,6 +118,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -138,7 +139,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/github_repository.md b/src/go/plugin/go.d/modules/prometheus/integrations/github_repository.md index f96fc527a..c90c49282 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/github_repository.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/github_repository.md @@ -94,8 +94,8 @@ Install [GitHub Exporter](https://github.com/githubexporter/github-exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/gitlab_runner.md b/src/go/plugin/go.d/modules/prometheus/integrations/gitlab_runner.md index 6982b7a59..9180c7921 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/gitlab_runner.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/gitlab_runner.md @@ -94,8 +94,8 @@ To configure the built-in Prometheus exporter, follow the [official documentatio The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/gobetween.md b/src/go/plugin/go.d/modules/prometheus/integrations/gobetween.md index 7ea5ec62c..aefdc9674 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/gobetween.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/gobetween.md @@ -90,8 +90,8 @@ No action required. The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -113,6 +113,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -133,7 +134,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/google_cloud_platform.md b/src/go/plugin/go.d/modules/prometheus/integrations/google_cloud_platform.md index 50fad9263..b70df347d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/google_cloud_platform.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/google_cloud_platform.md @@ -94,8 +94,8 @@ Install [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-export The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/google_pagespeed.md b/src/go/plugin/go.d/modules/prometheus/integrations/google_pagespeed.md index a3a3ecefe..186a4fddc 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/google_pagespeed.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/google_pagespeed.md @@ -94,8 +94,8 @@ Install [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by fol The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/google_stackdriver.md b/src/go/plugin/go.d/modules/prometheus/integrations/google_stackdriver.md index ef8fc5734..7b80aaa2e 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/google_stackdriver.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/google_stackdriver.md @@ -94,8 +94,8 @@ Install [Google Stackdriver exporter](https://github.com/prometheus-community/st The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/gpsd.md b/src/go/plugin/go.d/modules/prometheus/integrations/gpsd.md index 68a588515..397c64c06 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/gpsd.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/gpsd.md @@ -94,8 +94,8 @@ Install [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/grafana.md b/src/go/plugin/go.d/modules/prometheus/integrations/grafana.md index 2c0baa395..38372a671 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/grafana.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/grafana.md @@ -90,8 +90,8 @@ No action required. The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -113,6 +113,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -133,7 +134,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/graylog_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/graylog_server.md index 8888ae210..adfa77363 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/graylog_server.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/graylog_server.md @@ -94,8 +94,8 @@ To configure the built-in Prometheus exporter, follow the [official documentatio The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/gtp.md b/src/go/plugin/go.d/modules/prometheus/integrations/gtp.md index edd3b3a56..d64ce2e16 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/gtp.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/gtp.md @@ -94,8 +94,8 @@ Install [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the i The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/halon.md b/src/go/plugin/go.d/modules/prometheus/integrations/halon.md index 3a288e53b..d872d2d48 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/halon.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/halon.md @@ -94,8 +94,8 @@ Install [Halon exporter](https://github.com/tobiasbp/halon_exporter) by followin The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hana.md b/src/go/plugin/go.d/modules/prometheus/integrations/hana.md index 75d84fef6..13a1fcfde 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/hana.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/hana.md @@ -94,8 +94,8 @@ Install [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by foll The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hashicorp_vault_secrets.md b/src/go/plugin/go.d/modules/prometheus/integrations/hashicorp_vault_secrets.md index c619344d4..fc45d285b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/hashicorp_vault_secrets.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/hashicorp_vault_secrets.md @@ -94,8 +94,8 @@ Install [Vault Assessment Prometheus Exporter](https://github.com/tomtom-interna The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hasura_graphql_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/hasura_graphql_server.md index d95a9199b..bdbde3b8f 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/hasura_graphql_server.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/hasura_graphql_server.md @@ -95,8 +95,8 @@ Install [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by followin The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -118,6 +118,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -138,7 +139,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hdsentinel.md b/src/go/plugin/go.d/modules/prometheus/integrations/hdsentinel.md index 1daad64a5..ac9996856 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/hdsentinel.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/hdsentinel.md @@ -94,8 +94,8 @@ Install [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) b The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/helium_hotspot.md b/src/go/plugin/go.d/modules/prometheus/integrations/helium_hotspot.md index 6ce0d3348..223f04f02 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/helium_hotspot.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/helium_hotspot.md @@ -94,8 +94,8 @@ Install [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_expor The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/helium_miner_validator.md b/src/go/plugin/go.d/modules/prometheus/integrations/helium_miner_validator.md index a8fdb2814..eab5c5e53 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/helium_miner_validator.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/helium_miner_validator.md @@ -94,8 +94,8 @@ Install [Helium miner (validator) exporter](https://github.com/tedder/miner_expo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hhvm.md b/src/go/plugin/go.d/modules/prometheus/integrations/hhvm.md index 4201947be..51e86902f 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/hhvm.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/hhvm.md @@ -95,8 +95,8 @@ Install [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_ex The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -118,6 +118,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -138,7 +139,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hitron_cgn_series_cpe.md b/src/go/plugin/go.d/modules/prometheus/integrations/hitron_cgn_series_cpe.md index 069062f61..710af1c4b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/hitron_cgn_series_cpe.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/hitron_cgn_series_cpe.md @@ -94,8 +94,8 @@ Install [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by foll The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hitron_coda_cable_modem.md b/src/go/plugin/go.d/modules/prometheus/integrations/hitron_coda_cable_modem.md index c62b7b24a..2d82d8b63 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/hitron_coda_cable_modem.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/hitron_coda_cable_modem.md @@ -94,8 +94,8 @@ Install [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hit The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/homebridge.md b/src/go/plugin/go.d/modules/prometheus/integrations/homebridge.md index ca56a7647..5be7eaad8 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/homebridge.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/homebridge.md @@ -94,8 +94,8 @@ Install [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge- The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/homey.md b/src/go/plugin/go.d/modules/prometheus/integrations/homey.md index b17aae574..dbd15582c 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/homey.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/homey.md @@ -94,8 +94,8 @@ Install [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/honeypot.md b/src/go/plugin/go.d/modules/prometheus/integrations/honeypot.md index 28fdf70b2..f95c18db4 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/honeypot.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/honeypot.md @@ -94,8 +94,8 @@ Install [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_expo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hp_ilo.md b/src/go/plugin/go.d/modules/prometheus/integrations/hp_ilo.md index 54de557cb..753734e72 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/hp_ilo.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/hp_ilo.md @@ -94,8 +94,8 @@ Install [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporte The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/huawei_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/huawei_devices.md index 2f1e95733..734ed7e6b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/huawei_devices.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/huawei_devices.md @@ -94,8 +94,8 @@ Install [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hubble.md b/src/go/plugin/go.d/modules/prometheus/integrations/hubble.md index 36bd86d69..366f32457 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/hubble.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/hubble.md @@ -94,8 +94,8 @@ To configure the built-in Prometheus exporter, follow the [official documentatio The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_aix_systems_njmon.md b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_aix_systems_njmon.md index 5a4499e6a..b370f44b2 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_aix_systems_njmon.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_aix_systems_njmon.md @@ -94,8 +94,8 @@ Install [NJmon](https://github.com/crooks/njmon_exporter) by following the instr The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md index f32cdd0c4..cdde59ae1 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md @@ -94,8 +94,8 @@ Install [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s39 The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_mq.md b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_mq.md index d41219bbb..548c7f8c9 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_mq.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_mq.md @@ -94,8 +94,8 @@ Install [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum.md b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum.md index edffab950..8eaaa148d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum.md @@ -94,8 +94,8 @@ Install [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum_virtualize.md b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum_virtualize.md index 5d3dab9e7..00233cecd 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum_virtualize.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum_virtualize.md @@ -94,8 +94,8 @@ Install [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtu The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_z_hardware_management_console.md b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_z_hardware_management_console.md index 5cca9c2ae..2ec9932ec 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_z_hardware_management_console.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_z_hardware_management_console.md @@ -94,8 +94,8 @@ Install [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-expor The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/influxdb.md b/src/go/plugin/go.d/modules/prometheus/integrations/influxdb.md index 817144efb..5ab11ffdd 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/influxdb.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/influxdb.md @@ -94,8 +94,8 @@ Install [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/iota_full_node.md b/src/go/plugin/go.d/modules/prometheus/integrations/iota_full_node.md index 74ba5a3ef..90eea4d07 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/iota_full_node.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/iota_full_node.md @@ -94,8 +94,8 @@ Install [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by fol The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ipmi_by_soundcloud.md b/src/go/plugin/go.d/modules/prometheus/integrations/ipmi_by_soundcloud.md index 52966c728..7f3960249 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ipmi_by_soundcloud.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ipmi_by_soundcloud.md @@ -94,8 +94,8 @@ Install [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter s The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md b/src/go/plugin/go.d/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md index 9e2ed89a5..514854e81 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md @@ -94,8 +94,8 @@ Install [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by follo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/jarvis_standing_desk.md b/src/go/plugin/go.d/modules/prometheus/integrations/jarvis_standing_desk.md index cd392a297..96a23e54c 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/jarvis_standing_desk.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/jarvis_standing_desk.md @@ -94,8 +94,8 @@ Install [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/jenkins.md b/src/go/plugin/go.d/modules/prometheus/integrations/jenkins.md index 203ae3d69..533805099 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/jenkins.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/jenkins.md @@ -94,8 +94,8 @@ Install [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/jetbrains_floating_license_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/jetbrains_floating_license_server.md index cde4e22a6..add838438 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/jetbrains_floating_license_server.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/jetbrains_floating_license_server.md @@ -94,8 +94,8 @@ Install [JetBrains Floating License Server Export](https://github.com/mkreu/jetb The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/jmx.md b/src/go/plugin/go.d/modules/prometheus/integrations/jmx.md index 6813a8087..15c10e93b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/jmx.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/jmx.md @@ -94,8 +94,8 @@ Install [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/jolokia.md b/src/go/plugin/go.d/modules/prometheus/integrations/jolokia.md index 187b40be1..c710b527d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/jolokia.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/jolokia.md @@ -94,8 +94,8 @@ Install [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by fol The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/journald.md b/src/go/plugin/go.d/modules/prometheus/integrations/journald.md index 0d016ad21..59f834555 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/journald.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/journald.md @@ -94,8 +94,8 @@ Install [journald-exporter](https://github.com/dead-claudia/journald-exporter) b The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/kafka.md b/src/go/plugin/go.d/modules/prometheus/integrations/kafka.md index fb328f740..4ffa97f8f 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/kafka.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/kafka.md @@ -94,8 +94,8 @@ Install [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/kafka_connect.md b/src/go/plugin/go.d/modules/prometheus/integrations/kafka_connect.md index c28c90f49..aa50ebcec 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/kafka_connect.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/kafka_connect.md @@ -94,8 +94,8 @@ Install [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-expo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/kafka_consumer_lag.md b/src/go/plugin/go.d/modules/prometheus/integrations/kafka_consumer_lag.md index 6003d3af9..123d65f28 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/kafka_consumer_lag.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/kafka_consumer_lag.md @@ -94,8 +94,8 @@ Install [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consum The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/kafka_zookeeper.md b/src/go/plugin/go.d/modules/prometheus/integrations/kafka_zookeeper.md index cbf799ca3..f3415005a 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/kafka_zookeeper.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/kafka_zookeeper.md @@ -94,8 +94,8 @@ Install [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/kannel.md b/src/go/plugin/go.d/modules/prometheus/integrations/kannel.md index a2264e9d9..eb58ded27 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/kannel.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/kannel.md @@ -94,8 +94,8 @@ Install [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/keepalived.md b/src/go/plugin/go.d/modules/prometheus/integrations/keepalived.md index aeb0d99b0..ed19aaa7b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/keepalived.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/keepalived.md @@ -94,8 +94,8 @@ Install [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md b/src/go/plugin/go.d/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md index 759ce0cbe..865b116b8 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md @@ -94,8 +94,8 @@ Install [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/lagerist_disk_latency.md b/src/go/plugin/go.d/modules/prometheus/integrations/lagerist_disk_latency.md index 73019995c..9614c8339 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/lagerist_disk_latency.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/lagerist_disk_latency.md @@ -94,8 +94,8 @@ Install [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ldap.md b/src/go/plugin/go.d/modules/prometheus/integrations/ldap.md index 705d1e198..eefdb63cd 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ldap.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ldap.md @@ -94,8 +94,8 @@ Install [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following t The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/linode.md b/src/go/plugin/go.d/modules/prometheus/integrations/linode.md index eff67ae75..a74147a82 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/linode.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/linode.md @@ -94,8 +94,8 @@ Install [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by follo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/loki.md b/src/go/plugin/go.d/modules/prometheus/integrations/loki.md index 002634a10..784e42887 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/loki.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/loki.md @@ -94,8 +94,8 @@ Install [loki](https://github.com/grafana/loki) according to its documentation. The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/lustre_metadata.md b/src/go/plugin/go.d/modules/prometheus/integrations/lustre_metadata.md index 2fe27331e..b69bc2481 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/lustre_metadata.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/lustre_metadata.md @@ -94,8 +94,8 @@ Install [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporte The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/lynis_audit_reports.md b/src/go/plugin/go.d/modules/prometheus/integrations/lynis_audit_reports.md index 47b87c2d3..6c94b79f0 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/lynis_audit_reports.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/lynis_audit_reports.md @@ -94,8 +94,8 @@ Install [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by fol The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/machbase.md b/src/go/plugin/go.d/modules/prometheus/integrations/machbase.md index 23e928296..3c30dd9dd 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/machbase.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/machbase.md @@ -94,8 +94,8 @@ Install [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-expo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/maildir.md b/src/go/plugin/go.d/modules/prometheus/integrations/maildir.md index a7c106e83..b4d1fadca 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/maildir.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/maildir.md @@ -94,8 +94,8 @@ Install [mailexporter](https://github.com/cherti/mailexporter) by following the The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/meilisearch.md b/src/go/plugin/go.d/modules/prometheus/integrations/meilisearch.md index 60cad4a91..2aa331502 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/meilisearch.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/meilisearch.md @@ -94,8 +94,8 @@ Install [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporte The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/memcached_community.md b/src/go/plugin/go.d/modules/prometheus/integrations/memcached_community.md index 45acae167..890525533 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/memcached_community.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/memcached_community.md @@ -94,8 +94,8 @@ Install [Memcached exporter](https://github.com/prometheus/memcached_exporter) b The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/meraki_dashboard.md b/src/go/plugin/go.d/modules/prometheus/integrations/meraki_dashboard.md index 28626195a..35f58489e 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/meraki_dashboard.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/meraki_dashboard.md @@ -94,8 +94,8 @@ Install [Meraki dashboard data exporter using API](https://github.com/TheHolm/me The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mesos.md b/src/go/plugin/go.d/modules/prometheus/integrations/mesos.md index c1f7cd0ee..85f752ecd 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/mesos.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/mesos.md @@ -94,8 +94,8 @@ Install [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by followi The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_devices.md index 8d846fd26..6e7668db4 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_devices.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_devices.md @@ -94,8 +94,8 @@ Install [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-e The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_routeros_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_routeros_devices.md index e988add25..a23756a9b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_routeros_devices.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_routeros_devices.md @@ -94,8 +94,8 @@ Install [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/minecraft.md b/src/go/plugin/go.d/modules/prometheus/integrations/minecraft.md index f8649bbcb..4c437be04 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/minecraft.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/minecraft.md @@ -94,8 +94,8 @@ Install [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-ex The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/modbus_protocol.md b/src/go/plugin/go.d/modules/prometheus/integrations/modbus_protocol.md index f6266cd43..d11e43b53 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/modbus_protocol.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/modbus_protocol.md @@ -94,8 +94,8 @@ Install [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_export The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mogilefs.md b/src/go/plugin/go.d/modules/prometheus/integrations/mogilefs.md index becc6c194..94d3c05be 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/mogilefs.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/mogilefs.md @@ -94,8 +94,8 @@ Install [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by follo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/monnit_sensors_mqtt.md b/src/go/plugin/go.d/modules/prometheus/integrations/monnit_sensors_mqtt.md index 05517f39f..33d747910 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/monnit_sensors_mqtt.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/monnit_sensors_mqtt.md @@ -94,8 +94,8 @@ Install [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mosquitto.md b/src/go/plugin/go.d/modules/prometheus/integrations/mosquitto.md index 115dde093..9986311b4 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/mosquitto.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/mosquitto.md @@ -94,8 +94,8 @@ Install [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by fol The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mp707_usb_thermometer.md b/src/go/plugin/go.d/modules/prometheus/integrations/mp707_usb_thermometer.md index f032dcfb6..b4a215878 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/mp707_usb_thermometer.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/mp707_usb_thermometer.md @@ -94,8 +94,8 @@ Install [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mqtt_blackbox.md b/src/go/plugin/go.d/modules/prometheus/integrations/mqtt_blackbox.md index 2f6e6ca57..962011865 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/mqtt_blackbox.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/mqtt_blackbox.md @@ -94,8 +94,8 @@ Install [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporte The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mtail.md b/src/go/plugin/go.d/modules/prometheus/integrations/mtail.md index e44f88d4c..2a206599e 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/mtail.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/mtail.md @@ -94,8 +94,8 @@ Install [mtail](https://github.com/google/mtail) by following the instructions m The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/naemon.md b/src/go/plugin/go.d/modules/prometheus/integrations/naemon.md index 208777b95..813109e4e 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/naemon.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/naemon.md @@ -94,8 +94,8 @@ Install [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by fo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nagios.md b/src/go/plugin/go.d/modules/prometheus/integrations/nagios.md index bdd669c76..865b97d57 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/nagios.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/nagios.md @@ -95,8 +95,8 @@ Install [Nagios exporter](https://github.com/wbollock/nagios_exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -118,6 +118,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -138,7 +139,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nature_remo_e_lite_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/nature_remo_e_lite_devices.md index c102e4a7c..0c0e56b49 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/nature_remo_e_lite_devices.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/nature_remo_e_lite_devices.md @@ -94,8 +94,8 @@ Install [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/netapp_ontap_api.md b/src/go/plugin/go.d/modules/prometheus/integrations/netapp_ontap_api.md index 80e4dce3b..8fa72eb1f 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/netapp_ontap_api.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/netapp_ontap_api.md @@ -94,8 +94,8 @@ Install [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/netapp_solidfire.md b/src/go/plugin/go.d/modules/prometheus/integrations/netapp_solidfire.md index a15aef5fb..5433b0352 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/netapp_solidfire.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/netapp_solidfire.md @@ -94,8 +94,8 @@ Install [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-expor The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/netatmo_sensors.md b/src/go/plugin/go.d/modules/prometheus/integrations/netatmo_sensors.md index 8420a5fe0..b04ca4dc0 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/netatmo_sensors.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/netatmo_sensors.md @@ -94,8 +94,8 @@ Install [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by f The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/netflow.md b/src/go/plugin/go.d/modules/prometheus/integrations/netflow.md index 0b23e39b0..f5ff536de 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/netflow.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/netflow.md @@ -94,8 +94,8 @@ Install [netflow exporter](https://github.com/paihu/netflow_exporter) by followi The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/netmeter.md b/src/go/plugin/go.d/modules/prometheus/integrations/netmeter.md index 97c9893d3..b9402564a 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/netmeter.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/netmeter.md @@ -94,8 +94,8 @@ Install [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by fo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/new_relic.md b/src/go/plugin/go.d/modules/prometheus/integrations/new_relic.md index 9ca6b4c8a..2698cd360 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/new_relic.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/new_relic.md @@ -94,8 +94,8 @@ Install [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by f The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nextcloud_servers.md b/src/go/plugin/go.d/modules/prometheus/integrations/nextcloud_servers.md index 9e61c6be8..ec0d1b7ce 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/nextcloud_servers.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/nextcloud_servers.md @@ -94,8 +94,8 @@ Install [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nextdns.md b/src/go/plugin/go.d/modules/prometheus/integrations/nextdns.md index 3d5bc0a6d..b1cf0a821 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/nextdns.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/nextdns.md @@ -94,8 +94,8 @@ Install [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nftables.md b/src/go/plugin/go.d/modules/prometheus/integrations/nftables.md index acce8b8af..a634a40b9 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/nftables.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/nftables.md @@ -94,8 +94,8 @@ Install [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by fo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nrpe_daemon.md b/src/go/plugin/go.d/modules/prometheus/integrations/nrpe_daemon.md index e3a03e356..10dcf8ecd 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/nrpe_daemon.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/nrpe_daemon.md @@ -94,8 +94,8 @@ Install [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nsx-t.md b/src/go/plugin/go.d/modules/prometheus/integrations/nsx-t.md index 4e670ba56..ce3527330 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/nsx-t.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/nsx-t.md @@ -94,8 +94,8 @@ Install [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nvml.md b/src/go/plugin/go.d/modules/prometheus/integrations/nvml.md index 54bb3f1fb..74a399ebe 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/nvml.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/nvml.md @@ -94,8 +94,8 @@ Install [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following th The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/obs_studio.md b/src/go/plugin/go.d/modules/prometheus/integrations/obs_studio.md index 254833af5..bcac047e6 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/obs_studio.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/obs_studio.md @@ -94,8 +94,8 @@ Install [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/odbc.md b/src/go/plugin/go.d/modules/prometheus/integrations/odbc.md index d128b647b..2cabeb711 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/odbc.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/odbc.md @@ -94,8 +94,8 @@ Install [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/open_vswitch.md b/src/go/plugin/go.d/modules/prometheus/integrations/open_vswitch.md index c8d24a876..586c4b554 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/open_vswitch.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/open_vswitch.md @@ -94,8 +94,8 @@ Install [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_expo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openhab.md b/src/go/plugin/go.d/modules/prometheus/integrations/openhab.md index 52a2ac94d..8e697bb9d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/openhab.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/openhab.md @@ -94,8 +94,8 @@ Install [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by follo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openldap_community.md b/src/go/plugin/go.d/modules/prometheus/integrations/openldap_community.md index c1a547211..f13e82251 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/openldap_community.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/openldap_community.md @@ -94,8 +94,8 @@ Install [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openrc.md b/src/go/plugin/go.d/modules/prometheus/integrations/openrc.md index bc5dfa902..6c34d3437 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/openrc.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/openrc.md @@ -94,8 +94,8 @@ Install [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by followin The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openrct2.md b/src/go/plugin/go.d/modules/prometheus/integrations/openrct2.md index 7995839b1..3c5625e3a 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/openrct2.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/openrct2.md @@ -94,8 +94,8 @@ Install [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-pr The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openroadm_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/openroadm_devices.md index d1e23dc3c..f8cf26bb5 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/openroadm_devices.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/openroadm_devices.md @@ -94,8 +94,8 @@ Install [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_expo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openstack.md b/src/go/plugin/go.d/modules/prometheus/integrations/openstack.md index 874cf5ce7..68db8ea80 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/openstack.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/openstack.md @@ -94,8 +94,8 @@ Install [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstac The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openvas.md b/src/go/plugin/go.d/modules/prometheus/integrations/openvas.md index 09681ae7e..9c699f298 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/openvas.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/openvas.md @@ -94,8 +94,8 @@ Install [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openweathermap.md b/src/go/plugin/go.d/modules/prometheus/integrations/openweathermap.md index 624478e2b..f4becd579 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/openweathermap.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/openweathermap.md @@ -94,8 +94,8 @@ Install [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-expor The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/oracle_db_community.md b/src/go/plugin/go.d/modules/prometheus/integrations/oracle_db_community.md index ab59c3181..f5eaa97f2 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/oracle_db_community.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/oracle_db_community.md @@ -94,8 +94,8 @@ Install [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by fo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/otrs.md b/src/go/plugin/go.d/modules/prometheus/integrations/otrs.md index 8eadb3410..3056f7089 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/otrs.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/otrs.md @@ -94,8 +94,8 @@ Install [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/patroni.md b/src/go/plugin/go.d/modules/prometheus/integrations/patroni.md index e4fe20123..cc43ece55 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/patroni.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/patroni.md @@ -94,8 +94,8 @@ Install [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by fol The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/personal_weather_station.md b/src/go/plugin/go.d/modules/prometheus/integrations/personal_weather_station.md index af1482067..bce7faa20 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/personal_weather_station.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/personal_weather_station.md @@ -94,8 +94,8 @@ Install [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/pgbackrest.md b/src/go/plugin/go.d/modules/prometheus/integrations/pgbackrest.md index 19c60d95a..efcb9c103 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/pgbackrest.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/pgbackrest.md @@ -94,8 +94,8 @@ Install [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/pgpool-ii.md b/src/go/plugin/go.d/modules/prometheus/integrations/pgpool-ii.md index a7cfd941f..33bbedd23 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/pgpool-ii.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/pgpool-ii.md @@ -94,8 +94,8 @@ Install [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by foll The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/philips_hue.md b/src/go/plugin/go.d/modules/prometheus/integrations/philips_hue.md index 47dd77b0e..7f3ad1073 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/philips_hue.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/philips_hue.md @@ -94,8 +94,8 @@ Install [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by follo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/pimoroni_enviro+.md b/src/go/plugin/go.d/modules/prometheus/integrations/pimoroni_enviro+.md index 12b5719c5..642643f09 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/pimoroni_enviro+.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/pimoroni_enviro+.md @@ -94,8 +94,8 @@ Install [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-env The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/pingdom.md b/src/go/plugin/go.d/modules/prometheus/integrations/pingdom.md index 758b80eff..6fceb96f0 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/pingdom.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/pingdom.md @@ -94,8 +94,8 @@ Install [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by fo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/podman.md b/src/go/plugin/go.d/modules/prometheus/integrations/podman.md index 346e765cf..98dd88154 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/podman.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/podman.md @@ -94,8 +94,8 @@ Install [PODMAN exporter](https://github.com/containers/prometheus-podman-export The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/powerpal_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/powerpal_devices.md index cc7b681ee..c2c2c5d7c 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/powerpal_devices.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/powerpal_devices.md @@ -94,8 +94,8 @@ Install [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by fol The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/proftpd.md b/src/go/plugin/go.d/modules/prometheus/integrations/proftpd.md index f92612383..a4ce5bf9f 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/proftpd.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/proftpd.md @@ -94,8 +94,8 @@ Install [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by fol The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/prometheus_endpoint.md b/src/go/plugin/go.d/modules/prometheus/integrations/prometheus_endpoint.md index 18bbd9d0a..7d206ecdf 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/prometheus_endpoint.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/prometheus_endpoint.md @@ -90,8 +90,8 @@ No action required. The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -113,6 +113,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -133,7 +134,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/proxmox_ve.md b/src/go/plugin/go.d/modules/prometheus/integrations/proxmox_ve.md index ad4bdfe63..93cae3e48 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/proxmox_ve.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/proxmox_ve.md @@ -94,8 +94,8 @@ Install [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-e The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/radio_thermostat.md b/src/go/plugin/go.d/modules/prometheus/integrations/radio_thermostat.md index 8004e7ff1..a74b8601c 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/radio_thermostat.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/radio_thermostat.md @@ -94,8 +94,8 @@ Install [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermosta The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/radius.md b/src/go/plugin/go.d/modules/prometheus/integrations/radius.md index 22e2567e6..d75072203 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/radius.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/radius.md @@ -94,8 +94,8 @@ Install [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by follo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/rancher.md b/src/go/plugin/go.d/modules/prometheus/integrations/rancher.md index 945813b1d..ee9e6904d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/rancher.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/rancher.md @@ -94,8 +94,8 @@ Install [Rancher Exporter](https://github.com/infinityworksltd/prometheus-ranche The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/raritan_pdu.md b/src/go/plugin/go.d/modules/prometheus/integrations/raritan_pdu.md index 2781c3af8..019409c34 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/raritan_pdu.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/raritan_pdu.md @@ -94,8 +94,8 @@ Install [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pd The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/redis_queue.md b/src/go/plugin/go.d/modules/prometheus/integrations/redis_queue.md index d3fb16d4d..2cf171540 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/redis_queue.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/redis_queue.md @@ -94,8 +94,8 @@ Install [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ripe_atlas.md b/src/go/plugin/go.d/modules/prometheus/integrations/ripe_atlas.md index 7aa35e8d5..0b8d3ca4d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ripe_atlas.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ripe_atlas.md @@ -94,8 +94,8 @@ Install [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by fol The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sabnzbd.md b/src/go/plugin/go.d/modules/prometheus/integrations/sabnzbd.md index 3c98fa9e1..8d1c8350b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/sabnzbd.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/sabnzbd.md @@ -94,8 +94,8 @@ Install [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by follo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/salicru_eqx_inverter.md b/src/go/plugin/go.d/modules/prometheus/integrations/salicru_eqx_inverter.md index b7c5b46c3..b747078a8 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/salicru_eqx_inverter.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/salicru_eqx_inverter.md @@ -94,8 +94,8 @@ Install [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicr The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sense_energy.md b/src/go/plugin/go.d/modules/prometheus/integrations/sense_energy.md index 837d30ceb..29117391d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/sense_energy.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/sense_energy.md @@ -94,8 +94,8 @@ Install [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_promethe The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sentry.md b/src/go/plugin/go.d/modules/prometheus/integrations/sentry.md index ae878cedf..765b507c4 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/sentry.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/sentry.md @@ -94,8 +94,8 @@ Install [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by fo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/servertech.md b/src/go/plugin/go.d/modules/prometheus/integrations/servertech.md index d287fb65b..7cae7d20d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/servertech.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/servertech.md @@ -94,8 +94,8 @@ Install [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/shell_command.md b/src/go/plugin/go.d/modules/prometheus/integrations/shell_command.md index dec29a66c..4a833f214 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/shell_command.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/shell_command.md @@ -94,8 +94,8 @@ Install [Command runner exporter](https://github.com/tomwilkie/prom-run) by foll The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/shelly_humidity_sensor.md b/src/go/plugin/go.d/modules/prometheus/integrations/shelly_humidity_sensor.md index baf6fa58f..741fe0a68 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/shelly_humidity_sensor.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/shelly_humidity_sensor.md @@ -94,8 +94,8 @@ Install [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by followi The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sia.md b/src/go/plugin/go.d/modules/prometheus/integrations/sia.md index 6fe4a3684..96ad462a3 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/sia.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/sia.md @@ -94,8 +94,8 @@ Install [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/siemens_s7_plc.md b/src/go/plugin/go.d/modules/prometheus/integrations/siemens_s7_plc.md index c6aec71e2..000a9a812 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/siemens_s7_plc.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/siemens_s7_plc.md @@ -94,8 +94,8 @@ Install [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-export The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/site_24x7.md b/src/go/plugin/go.d/modules/prometheus/integrations/site_24x7.md index 8faefa53e..07f7e5599 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/site_24x7.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/site_24x7.md @@ -94,8 +94,8 @@ Install [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by f The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/slurm.md b/src/go/plugin/go.d/modules/prometheus/integrations/slurm.md index 00d27ca19..1b8be6581 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/slurm.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/slurm.md @@ -94,8 +94,8 @@ Install [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sma_inverters.md b/src/go/plugin/go.d/modules/prometheus/integrations/sma_inverters.md index f739362eb..01cd20db5 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/sma_inverters.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/sma_inverters.md @@ -94,8 +94,8 @@ Install [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/smart_meters_sml.md b/src/go/plugin/go.d/modules/prometheus/integrations/smart_meters_sml.md index 1201475a5..6d834aa31 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/smart_meters_sml.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/smart_meters_sml.md @@ -94,8 +94,8 @@ Install [SML Exporter](https://github.com/mweinelt/sml-exporter) by following th The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/smartrg_808ac_cable_modem.md b/src/go/plugin/go.d/modules/prometheus/integrations/smartrg_808ac_cable_modem.md index 1dadc3d85..ea1f42ac3 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/smartrg_808ac_cable_modem.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/smartrg_808ac_cable_modem.md @@ -94,8 +94,8 @@ Install [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_expor The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/softether_vpn_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/softether_vpn_server.md index 30fd7cb64..dd79923fa 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/softether_vpn_server.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/softether_vpn_server.md @@ -94,8 +94,8 @@ Install [SoftEther Exporter](https://github.com/dalance/softether_exporter) by f The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/solar_logging_stick.md b/src/go/plugin/go.d/modules/prometheus/integrations/solar_logging_stick.md index 35c78085e..09fb37725 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/solar_logging_stick.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/solar_logging_stick.md @@ -94,8 +94,8 @@ Install [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/solaredge_inverters.md b/src/go/plugin/go.d/modules/prometheus/integrations/solaredge_inverters.md index 266f2d05c..c102db244 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/solaredge_inverters.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/solaredge_inverters.md @@ -94,8 +94,8 @@ Install [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/solis_ginlong_5g_inverters.md b/src/go/plugin/go.d/modules/prometheus/integrations/solis_ginlong_5g_inverters.md index d0d0658f5..14053594f 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/solis_ginlong_5g_inverters.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/solis_ginlong_5g_inverters.md @@ -94,8 +94,8 @@ Install [Solis Exporter](https://github.com/candlerb/solis_exporter) by followin The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sonic_nos.md b/src/go/plugin/go.d/modules/prometheus/integrations/sonic_nos.md index 455f14fbf..921b035a7 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/sonic_nos.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/sonic_nos.md @@ -94,8 +94,8 @@ Install [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by fol The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/spacelift.md b/src/go/plugin/go.d/modules/prometheus/integrations/spacelift.md index ab83110bb..61df3e4a6 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/spacelift.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/spacelift.md @@ -94,8 +94,8 @@ Install [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/speedify_cli.md b/src/go/plugin/go.d/modules/prometheus/integrations/speedify_cli.md index beed0bd1a..75bdf3ca5 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/speedify_cli.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/speedify_cli.md @@ -94,8 +94,8 @@ Install [Speedify Exporter](https://github.com/willshen/speedify_exporter) by fo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sphinx.md b/src/go/plugin/go.d/modules/prometheus/integrations/sphinx.md index 1116f91e0..33e4da740 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/sphinx.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/sphinx.md @@ -94,8 +94,8 @@ Install [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sql_database_agnostic.md b/src/go/plugin/go.d/modules/prometheus/integrations/sql_database_agnostic.md index 6a0a523c6..3ca6e0ecb 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/sql_database_agnostic.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/sql_database_agnostic.md @@ -94,8 +94,8 @@ Install [SQL Exporter](https://github.com/free/sql_exporter) by following the in The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ssh.md b/src/go/plugin/go.d/modules/prometheus/integrations/ssh.md index 7ffe9b203..7feb454d8 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ssh.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ssh.md @@ -94,8 +94,8 @@ Install [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following t The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ssl_certificate.md b/src/go/plugin/go.d/modules/prometheus/integrations/ssl_certificate.md index 2c1d519a8..7d841a1e1 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ssl_certificate.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ssl_certificate.md @@ -94,8 +94,8 @@ Install [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) b The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/starlink_spacex.md b/src/go/plugin/go.d/modules/prometheus/integrations/starlink_spacex.md index b48f32c9a..362e1cff3 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/starlink_spacex.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/starlink_spacex.md @@ -94,8 +94,8 @@ Install [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_expo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md b/src/go/plugin/go.d/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md index 9b7409b83..f075aac51 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md @@ -94,8 +94,8 @@ Install [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exp The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/statuspage.md b/src/go/plugin/go.d/modules/prometheus/integrations/statuspage.md index 6038729dc..41591f749 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/statuspage.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/statuspage.md @@ -94,8 +94,8 @@ Install [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/steam.md b/src/go/plugin/go.d/modules/prometheus/integrations/steam.md index 44b346593..aab02075d 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/steam.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/steam.md @@ -94,8 +94,8 @@ Install [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following t The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/storidge.md b/src/go/plugin/go.d/modules/prometheus/integrations/storidge.md index 48a320ce6..886f5cd3a 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/storidge.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/storidge.md @@ -94,8 +94,8 @@ Install [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/maste The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/stream.md b/src/go/plugin/go.d/modules/prometheus/integrations/stream.md index fb21cb4da..e47f629eb 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/stream.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/stream.md @@ -94,8 +94,8 @@ Install [Stream exporter](https://github.com/carlpett/stream_exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/strongswan.md b/src/go/plugin/go.d/modules/prometheus/integrations/strongswan.md index ffddfb022..9952e2422 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/strongswan.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/strongswan.md @@ -94,8 +94,8 @@ Install [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_expor The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sunspec_solar_energy.md b/src/go/plugin/go.d/modules/prometheus/integrations/sunspec_solar_energy.md index 552c5583b..93081267f 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/sunspec_solar_energy.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/sunspec_solar_energy.md @@ -94,8 +94,8 @@ Install [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-su The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/suricata.md b/src/go/plugin/go.d/modules/prometheus/integrations/suricata.md index d5bdd01b5..a828e5233 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/suricata.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/suricata.md @@ -94,8 +94,8 @@ Install [Suricata Exporter](https://github.com/corelight/suricata_exporter) by f The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/synology_activebackup.md b/src/go/plugin/go.d/modules/prometheus/integrations/synology_activebackup.md index b558bbf92..de0b43980 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/synology_activebackup.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/synology_activebackup.md @@ -94,8 +94,8 @@ Install [Synology ActiveBackup Exporter](https://github.com/codemonauts/activeba The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sysload.md b/src/go/plugin/go.d/modules/prometheus/integrations/sysload.md index 369a43020..01b31bafc 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/sysload.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/sysload.md @@ -94,8 +94,8 @@ Install [Sysload Exporter](https://github.com/egmc/sysload_exporter) by followin The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md b/src/go/plugin/go.d/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md index 55b26bf9c..e1516426e 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md @@ -94,8 +94,8 @@ Install [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tacacs.md b/src/go/plugin/go.d/modules/prometheus/integrations/tacacs.md index 5d3534393..c2c807eeb 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/tacacs.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/tacacs.md @@ -94,8 +94,8 @@ Install [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by follo The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tado_smart_heating_solution.md b/src/go/plugin/go.d/modules/prometheus/integrations/tado_smart_heating_solution.md index ece7fb677..d4de23d49 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/tado_smart_heating_solution.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/tado_smart_heating_solution.md @@ -94,8 +94,8 @@ Install [Tado Exporter](https://github.com/eko/tado-exporter) by following the i The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tankerkoenig_api.md b/src/go/plugin/go.d/modules/prometheus/integrations/tankerkoenig_api.md index 01eb6557a..a7693559b 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/tankerkoenig_api.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/tankerkoenig_api.md @@ -94,8 +94,8 @@ Install [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_e The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tesla_powerwall.md b/src/go/plugin/go.d/modules/prometheus/integrations/tesla_powerwall.md index c24163111..5258e5bda 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/tesla_powerwall.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/tesla_powerwall.md @@ -94,8 +94,8 @@ Install [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tesla_vehicle.md b/src/go/plugin/go.d/modules/prometheus/integrations/tesla_vehicle.md index 56617affd..a19e928d0 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/tesla_vehicle.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/tesla_vehicle.md @@ -94,8 +94,8 @@ Install [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tesla_wall_connector.md b/src/go/plugin/go.d/modules/prometheus/integrations/tesla_wall_connector.md index 8e3c0e901..7d932812a 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/tesla_wall_connector.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/tesla_wall_connector.md @@ -94,8 +94,8 @@ Install [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_c The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tp-link_p110.md b/src/go/plugin/go.d/modules/prometheus/integrations/tp-link_p110.md index 5dd150413..307c070c6 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/tp-link_p110.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/tp-link_p110.md @@ -94,8 +94,8 @@ Install [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p1 The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/traceroute.md b/src/go/plugin/go.d/modules/prometheus/integrations/traceroute.md index 0896fd9ca..90d1e6654 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/traceroute.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/traceroute.md @@ -94,8 +94,8 @@ Install [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptrace The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/twincat_ads_web_service.md b/src/go/plugin/go.d/modules/prometheus/integrations/twincat_ads_web_service.md index e276e598d..8ce7d27ea 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/twincat_ads_web_service.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/twincat_ads_web_service.md @@ -94,8 +94,8 @@ Install [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twin The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/twitch.md b/src/go/plugin/go.d/modules/prometheus/integrations/twitch.md index f08f81bd9..3e66d0aad 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/twitch.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/twitch.md @@ -94,8 +94,8 @@ Install [Twitch exporter](https://github.com/damoun/twitch_exporter) by followin The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ubiquiti_ufiber_olt.md b/src/go/plugin/go.d/modules/prometheus/integrations/ubiquiti_ufiber_olt.md index 810ebbea3..be66ca546 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/ubiquiti_ufiber_olt.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/ubiquiti_ufiber_olt.md @@ -94,8 +94,8 @@ Install [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/uptimerobot.md b/src/go/plugin/go.d/modules/prometheus/integrations/uptimerobot.md index 9c6b5395a..288096289 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/uptimerobot.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/uptimerobot.md @@ -94,8 +94,8 @@ Install [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) b The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/vault_pki.md b/src/go/plugin/go.d/modules/prometheus/integrations/vault_pki.md index a7d11cd16..e1f53b1b3 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/vault_pki.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/vault_pki.md @@ -94,8 +94,8 @@ Install [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by f The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/vertica.md b/src/go/plugin/go.d/modules/prometheus/integrations/vertica.md index 8463d713f..7e9fdb053 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/vertica.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/vertica.md @@ -94,8 +94,8 @@ Install [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometh The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/vscode.md b/src/go/plugin/go.d/modules/prometheus/integrations/vscode.md index 5fcffca01..1de536283 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/vscode.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/vscode.md @@ -94,8 +94,8 @@ Install [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by foll The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/warp10.md b/src/go/plugin/go.d/modules/prometheus/integrations/warp10.md index e9e60dea6..f412192fb 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/warp10.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/warp10.md @@ -94,8 +94,8 @@ Install [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/xiaomi_mi_flora.md b/src/go/plugin/go.d/modules/prometheus/integrations/xiaomi_mi_flora.md index 51314b8b2..aa70d41d3 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/xiaomi_mi_flora.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/xiaomi_mi_flora.md @@ -94,8 +94,8 @@ Install [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowerca The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/xmpp_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/xmpp_server.md index eacae8393..1df6de867 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/xmpp_server.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/xmpp_server.md @@ -94,8 +94,8 @@ Install [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporte The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/yourls_url_shortener.md b/src/go/plugin/go.d/modules/prometheus/integrations/yourls_url_shortener.md index 6b84c5ee6..e756f47f6 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/yourls_url_shortener.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/yourls_url_shortener.md @@ -94,8 +94,8 @@ Install [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourl The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/zerto.md b/src/go/plugin/go.d/modules/prometheus/integrations/zerto.md index 3d316461f..f4f562855 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/zerto.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/zerto.md @@ -94,8 +94,8 @@ Install [Zerto Exporter](https://github.com/claranet/zerto-exporter) by followin The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/zulip.md b/src/go/plugin/go.d/modules/prometheus/integrations/zulip.md index 91e652c47..7e5c5b569 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/zulip.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/zulip.md @@ -94,8 +94,8 @@ Install [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by follow The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/zyxel_gs1200-8.md b/src/go/plugin/go.d/modules/prometheus/integrations/zyxel_gs1200-8.md index 4f0b43431..ecea956ca 100644 --- a/src/go/plugin/go.d/modules/prometheus/integrations/zyxel_gs1200-8.md +++ b/src/go/plugin/go.d/modules/prometheus/integrations/zyxel_gs1200-8.md @@ -94,8 +94,8 @@ Install [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) The configuration file name for this integration is `go.d/prometheus.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -117,6 +117,7 @@ The following options can be defined globally: update_every, autodetection_retry | fallback_type | Time series selector (filter). | | no | | max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | | max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| label_prefix | An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name). | | no | | timeout | HTTP request timeout. | 10 | no | | username | Username for basic HTTP authentication. | | no | | password | Password for basic HTTP authentication. | | no | @@ -137,7 +138,7 @@ The following options can be defined globally: update_every, autodetection_retry This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) -- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md). +- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/pkg/prometheus/selector/README.md). - Option syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/prometheus/metadata.yaml b/src/go/plugin/go.d/modules/prometheus/metadata.yaml index fee2b820b..1a2a88c36 100644 --- a/src/go/plugin/go.d/modules/prometheus/metadata.yaml +++ b/src/go/plugin/go.d/modules/prometheus/metadata.yaml @@ -113,6 +113,10 @@ modules: description: Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. default_value: 200 required: false + - name: label_prefix + description: "An optional prefix that will be added to all labels of all charts. If set, the label names will be automatically formatted as `prefix_name` (the prefix followed by an underscore and the original name)." + default_value: "" + required: false - name: timeout description: HTTP request timeout. default_value: 10 diff --git a/src/go/plugin/go.d/modules/prometheus/prometheus.go b/src/go/plugin/go.d/modules/prometheus/prometheus.go index b3f97fbd3..110b1a63a 100644 --- a/src/go/plugin/go.d/modules/prometheus/prometheus.go +++ b/src/go/plugin/go.d/modules/prometheus/prometheus.go @@ -5,10 +5,12 @@ package prometheus import ( _ "embed" "errors" + "fmt" "time" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus/selector" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" @@ -31,9 +33,9 @@ func init() { func New() *Prometheus { return &Prometheus{ Config: Config{ - HTTP: web.HTTP{ - Client: web.Client{ - Timeout: web.Duration(time.Second * 10), + HTTPConfig: web.HTTPConfig{ + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 10), }, }, MaxTS: 2000, @@ -46,9 +48,10 @@ func New() *Prometheus { type Config struct { UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + web.HTTPConfig `yaml:",inline" json:""` Name string `yaml:"name,omitempty" json:"name"` Application string `yaml:"app,omitempty" json:"app"` + LabelPrefix string `yaml:"label_prefix,omitempty" json:"label_prefix"` BearerTokenFile string `yaml:"bearer_token_file,omitempty" json:"bearer_token_file"` Selector selector.Expr `yaml:"selector,omitempty" json:"selector"` ExpectedPrefix string `yaml:"expected_prefix,omitempty" json:"expected_prefix"` @@ -81,28 +84,24 @@ func (p *Prometheus) Configuration() any { func (p *Prometheus) Init() error { if err := p.validateConfig(); err != nil { - p.Errorf("validating config: %v", err) - return err + return fmt.Errorf("validating config: %v", err) } prom, err := p.initPrometheusClient() if err != nil { - p.Errorf("init prometheus client: %v", err) - return err + return fmt.Errorf("init prometheus client: %v", err) } p.prom = prom m, err := p.initFallbackTypeMatcher(p.FallbackType.Counter) if err != nil { - p.Errorf("init counter fallback type matcher: %v", err) - return err + return fmt.Errorf("init counter fallback type matcher: %v", err) } p.fallbackType.counter = m m, err = p.initFallbackTypeMatcher(p.FallbackType.Gauge) if err != nil { - p.Errorf("init counter fallback type matcher: %v", err) - return err + return fmt.Errorf("init counter fallback type matcher: %v", err) } p.fallbackType.gauge = m @@ -112,7 +111,6 @@ func (p *Prometheus) Init() error { func (p *Prometheus) Check() error { mx, err := p.collect() if err != nil { - p.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/prometheus/prometheus_test.go b/src/go/plugin/go.d/modules/prometheus/prometheus_test.go index 5a5475cc9..1dc397110 100644 --- a/src/go/plugin/go.d/modules/prometheus/prometheus_test.go +++ b/src/go/plugin/go.d/modules/prometheus/prometheus_test.go @@ -42,13 +42,13 @@ func TestPrometheus_Init(t *testing.T) { }{ "non empty URL": { wantFail: false, - config: Config{HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:9090/metric"}}}, + config: Config{HTTPConfig: web.HTTPConfig{RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:9090/metric"}}}, }, "invalid selector syntax": { wantFail: true, config: Config{ - HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:9090/metric"}}, - Selector: selector.Expr{Allow: []string{`name{label=#"value"}`}}, + HTTPConfig: web.HTTPConfig{RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:9090/metric"}}, + Selector: selector.Expr{Allow: []string{`name{label=#"value"}`}}, }, }, "default": { diff --git a/src/go/plugin/go.d/modules/prometheus/testdata/config.json b/src/go/plugin/go.d/modules/prometheus/testdata/config.json index 2e9b2e138..75d7e9ba3 100644 --- a/src/go/plugin/go.d/modules/prometheus/testdata/config.json +++ b/src/go/plugin/go.d/modules/prometheus/testdata/config.json @@ -19,6 +19,7 @@ "tls_skip_verify": true, "name": "ok", "app": "ok", + "label_prefix": "ok", "bearer_token_file": "ok", "selector": { "allow": [ diff --git a/src/go/plugin/go.d/modules/prometheus/testdata/config.yaml b/src/go/plugin/go.d/modules/prometheus/testdata/config.yaml index 37a411b9a..d7ab417ec 100644 --- a/src/go/plugin/go.d/modules/prometheus/testdata/config.yaml +++ b/src/go/plugin/go.d/modules/prometheus/testdata/config.yaml @@ -17,6 +17,7 @@ tls_key: "ok" tls_skip_verify: yes name: "ok" app: "ok" +label_prefix: "ok" bearer_token_file: "ok" selector: allow: diff --git a/src/go/plugin/go.d/modules/proxysql/config_schema.json b/src/go/plugin/go.d/modules/proxysql/config_schema.json index c0c880a2e..793291835 100644 --- a/src/go/plugin/go.d/modules/proxysql/config_schema.json +++ b/src/go/plugin/go.d/modules/proxysql/config_schema.json @@ -28,7 +28,6 @@ "required": [ "dsn" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/proxysql/integrations/proxysql.md b/src/go/plugin/go.d/modules/proxysql/integrations/proxysql.md index 90d42114e..f3a9024b1 100644 --- a/src/go/plugin/go.d/modules/proxysql/integrations/proxysql.md +++ b/src/go/plugin/go.d/modules/proxysql/integrations/proxysql.md @@ -170,8 +170,8 @@ No action required. The configuration file name for this integration is `go.d/proxysql.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/proxysql/proxysql.go b/src/go/plugin/go.d/modules/proxysql/proxysql.go index fc4677b1d..63ed0618d 100644 --- a/src/go/plugin/go.d/modules/proxysql/proxysql.go +++ b/src/go/plugin/go.d/modules/proxysql/proxysql.go @@ -6,12 +6,13 @@ import ( "database/sql" _ "embed" "errors" - _ "github.com/go-sql-driver/mysql" "sync" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" + + _ "github.com/go-sql-driver/mysql" ) //go:embed "config_schema.json" @@ -29,7 +30,7 @@ func New() *ProxySQL { return &ProxySQL{ Config: Config{ DSN: "stats:stats@tcp(127.0.0.1:6032)/", - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), }, charts: baseCharts.Copy(), @@ -43,9 +44,9 @@ func New() *ProxySQL { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - DSN string `yaml:"dsn" json:"dsn"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + DSN string `yaml:"dsn" json:"dsn"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type ProxySQL struct { @@ -66,7 +67,6 @@ func (p *ProxySQL) Configuration() any { func (p *ProxySQL) Init() error { if p.DSN == "" { - p.Error("dsn not set") return errors.New("dsn not set") } @@ -78,7 +78,6 @@ func (p *ProxySQL) Init() error { func (p *ProxySQL) Check() error { mx, err := p.collect() if err != nil { - p.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/pulsar/config_schema.json b/src/go/plugin/go.d/modules/pulsar/config_schema.json index b4bc8b45f..aedffec2d 100644 --- a/src/go/plugin/go.d/modules/pulsar/config_schema.json +++ b/src/go/plugin/go.d/modules/pulsar/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/pulsar/init.go b/src/go/plugin/go.d/modules/pulsar/init.go index f165327a5..c98c3c1df 100644 --- a/src/go/plugin/go.d/modules/pulsar/init.go +++ b/src/go/plugin/go.d/modules/pulsar/init.go @@ -5,7 +5,7 @@ package pulsar import ( "errors" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -18,12 +18,12 @@ func (p *Pulsar) validateConfig() error { } func (p *Pulsar) initPrometheusClient() (prometheus.Prometheus, error) { - client, err := web.NewHTTPClient(p.Client) + client, err := web.NewHTTPClient(p.ClientConfig) if err != nil { return nil, err } - return prometheus.New(client, p.Request), nil + return prometheus.New(client, p.RequestConfig), nil } func (p *Pulsar) initTopicFilerMatcher() (matcher.Matcher, error) { diff --git a/src/go/plugin/go.d/modules/pulsar/integrations/apache_pulsar.md b/src/go/plugin/go.d/modules/pulsar/integrations/apache_pulsar.md index 8538fbf9c..feafdfce8 100644 --- a/src/go/plugin/go.d/modules/pulsar/integrations/apache_pulsar.md +++ b/src/go/plugin/go.d/modules/pulsar/integrations/apache_pulsar.md @@ -148,8 +148,8 @@ No action required. The configuration file name for this integration is `go.d/pulsar.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/pulsar/pulsar.go b/src/go/plugin/go.d/modules/pulsar/pulsar.go index aa5ac35fc..16235feac 100644 --- a/src/go/plugin/go.d/modules/pulsar/pulsar.go +++ b/src/go/plugin/go.d/modules/pulsar/pulsar.go @@ -5,11 +5,13 @@ package pulsar import ( _ "embed" "errors" + "fmt" "sync" "time" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -31,12 +33,12 @@ func init() { func New() *Pulsar { return &Pulsar{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8080/metrics", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 5), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 5), }, }, TopicFilter: matcher.SimpleExpr{ @@ -54,9 +56,9 @@ func New() *Pulsar { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - TopicFilter matcher.SimpleExpr `yaml:"topic_filter,omitempty" json:"topic_filter"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + TopicFilter matcher.SimpleExpr `yaml:"topic_filter,omitempty" json:"topic_filter"` } type Pulsar struct { @@ -81,21 +83,18 @@ func (p *Pulsar) Configuration() any { func (p *Pulsar) Init() error { if err := p.validateConfig(); err != nil { - p.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } prom, err := p.initPrometheusClient() if err != nil { - p.Error(err) - return err + return fmt.Errorf("init prometheus client: %v", err) } p.prom = prom m, err := p.initTopicFilerMatcher() if err != nil { - p.Error(err) - return err + return fmt.Errorf("init topic filer: %v", err) } p.topicFilter = m @@ -105,7 +104,6 @@ func (p *Pulsar) Init() error { func (p *Pulsar) Check() error { mx, err := p.collect() if err != nil { - p.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/pulsar/pulsar_test.go b/src/go/plugin/go.d/modules/pulsar/pulsar_test.go index 330656156..cf5423786 100644 --- a/src/go/plugin/go.d/modules/pulsar/pulsar_test.go +++ b/src/go/plugin/go.d/modules/pulsar/pulsar_test.go @@ -9,8 +9,8 @@ import ( "strings" "testing" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" @@ -54,22 +54,22 @@ func TestPulsar_Init(t *testing.T) { config: New().Config, }, "empty topic filter": { - config: Config{HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:8080/metric"}}}, + config: Config{HTTPConfig: web.HTTPConfig{RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:8080/metric"}}}, }, "bad syntax topic filer": { config: Config{ - HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:8080/metrics"}}, + HTTPConfig: web.HTTPConfig{RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:8080/metrics"}}, TopicFilter: matcher.SimpleExpr{Includes: []string{"+"}}}, wantFail: true, }, "empty URL": { - config: Config{HTTP: web.HTTP{Request: web.Request{URL: ""}}}, + config: Config{HTTPConfig: web.HTTPConfig{RequestConfig: web.RequestConfig{URL: ""}}}, wantFail: true, }, "nonexistent TLS CA": { - config: Config{HTTP: web.HTTP{ - Request: web.Request{URL: "http://127.0.0.1:8080/metric"}, - Client: web.Client{TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}}}}, + config: Config{HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:8080/metric"}, + ClientConfig: web.ClientConfig{TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}}}}, wantFail: true, }, } @@ -171,11 +171,11 @@ func TestPulsar_Collect(t *testing.T) { for i := 0; i < 10; i++ { _ = pulsar.Collect() } - collected := pulsar.Collect() + mx := pulsar.Collect() - require.NotNil(t, collected) - require.Equal(t, test.expected, collected) - ensureCollectedHasAllChartsDimsVarsIDs(t, pulsar, collected) + require.NotNil(t, mx) + require.Equal(t, test.expected, mx) + module.TestMetricsHasAllChartsDims(t, pulsar.Charts(), mx) }) } } @@ -212,19 +212,6 @@ func TestPulsar_Collect_RemoveAddNamespacesTopicsInRuntime(t *testing.T) { } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, pulsar *Pulsar, collected map[string]int64) { - for _, chart := range *pulsar.Charts() { - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareClientServerStdV250Namespaces(t *testing.T) (*Pulsar, *httptest.Server) { t.Helper() srv := httptest.NewServer(http.HandlerFunc( diff --git a/src/go/plugin/go.d/modules/puppet/collect.go b/src/go/plugin/go.d/modules/puppet/collect.go index a1b95e09c..5fc5ef759 100644 --- a/src/go/plugin/go.d/modules/puppet/collect.go +++ b/src/go/plugin/go.d/modules/puppet/collect.go @@ -3,10 +3,7 @@ package puppet import ( - "encoding/json" "fmt" - "io" - "net/http" "net/url" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" @@ -31,7 +28,7 @@ func (p *Puppet) collect() (map[string]int64, error) { } func (p *Puppet) queryStatsService() (*statusServiceResponse, error) { - req, err := web.NewHTTPRequestWithPath(p.Request, urlPathStatusService) + req, err := web.NewHTTPRequestWithPath(p.RequestConfig, urlPathStatusService) if err != nil { return nil, err } @@ -39,7 +36,7 @@ func (p *Puppet) queryStatsService() (*statusServiceResponse, error) { req.URL.RawQuery = urlQueryStatusService var stats statusServiceResponse - if err := p.doOKDecode(req, &stats); err != nil { + if err := web.DoHTTP(p.httpClient).RequestJSON(req, &stats); err != nil { return nil, err } @@ -49,27 +46,3 @@ func (p *Puppet) queryStatsService() (*statusServiceResponse, error) { return &stats, nil } - -func (p *Puppet) doOKDecode(req *http.Request, in interface{}) error { - resp, err := p.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/puppet/config_schema.json b/src/go/plugin/go.d/modules/puppet/config_schema.json index 92cbcb87f..780f502de 100644 --- a/src/go/plugin/go.d/modules/puppet/config_schema.json +++ b/src/go/plugin/go.d/modules/puppet/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/puppet/integrations/puppet.md b/src/go/plugin/go.d/modules/puppet/integrations/puppet.md index 23e85dc4d..1b8a7089a 100644 --- a/src/go/plugin/go.d/modules/puppet/integrations/puppet.md +++ b/src/go/plugin/go.d/modules/puppet/integrations/puppet.md @@ -94,8 +94,8 @@ No action required. The configuration file name for this integration is `go.d/puppet.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/puppet/puppet.go b/src/go/plugin/go.d/modules/puppet/puppet.go index e6eb7b058..fbbe59e09 100644 --- a/src/go/plugin/go.d/modules/puppet/puppet.go +++ b/src/go/plugin/go.d/modules/puppet/puppet.go @@ -5,10 +5,12 @@ package puppet import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *Puppet { return &Puppet{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "https://127.0.0.1:8140", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 1), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 1), }, }, }, @@ -40,8 +42,8 @@ func New() *Puppet { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Puppet struct { @@ -59,14 +61,12 @@ func (p *Puppet) Configuration() any { func (p *Puppet) Init() error { if p.URL == "" { - p.Error("URL not set") return errors.New("url not set") } - client, err := web.NewHTTPClient(p.Client) + client, err := web.NewHTTPClient(p.ClientConfig) if err != nil { - p.Error(err) - return err + return fmt.Errorf("create http client: %v", err) } p.httpClient = client @@ -79,7 +79,6 @@ func (p *Puppet) Init() error { func (p *Puppet) Check() error { mx, err := p.collect() if err != nil { - p.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/puppet/puppet_test.go b/src/go/plugin/go.d/modules/puppet/puppet_test.go index 7c80a638a..fafadffb7 100644 --- a/src/go/plugin/go.d/modules/puppet/puppet_test.go +++ b/src/go/plugin/go.d/modules/puppet/puppet_test.go @@ -48,8 +48,8 @@ func TestPuppet_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, @@ -151,29 +151,14 @@ func TestPuppet_Collect(t *testing.T) { mx := puppet.Collect() require.Equal(t, test.wantMetrics, mx) + if len(test.wantMetrics) > 0 { - testMetricsHasAllChartsDims(t, puppet, mx) + module.TestMetricsHasAllChartsDims(t, puppet.Charts(), mx) } }) } } -func testMetricsHasAllChartsDims(t *testing.T, puppet *Puppet, mx map[string]int64) { - for _, chart := range *puppet.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareCaseOkDefault(t *testing.T) (*Puppet, func()) { t.Helper() srv := httptest.NewServer(http.HandlerFunc( diff --git a/src/go/plugin/go.d/modules/rabbitmq/collect.go b/src/go/plugin/go.d/modules/rabbitmq/collect.go index 70b2aa033..0e9141760 100644 --- a/src/go/plugin/go.d/modules/rabbitmq/collect.go +++ b/src/go/plugin/go.d/modules/rabbitmq/collect.go @@ -3,10 +3,7 @@ package rabbitmq import ( - "encoding/json" "fmt" - "io" - "net/http" "path/filepath" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" @@ -44,8 +41,13 @@ func (r *RabbitMQ) collect() (map[string]int64, error) { } func (r *RabbitMQ) collectOverviewStats(mx map[string]int64) error { + req, err := web.NewHTTPRequestWithPath(r.RequestConfig, urlPathAPIOverview) + if err != nil { + return fmt.Errorf("failed to create overview stats request: %w", err) + } + var stats overviewStats - if err := r.doOKDecode(urlPathAPIOverview, &stats); err != nil { + if err := web.DoHTTP(r.httpClient).RequestJSON(req, &stats); err != nil { return err } @@ -65,22 +67,32 @@ func (r *RabbitMQ) collectNodeStats(mx map[string]int64) error { return nil } + req, err := web.NewHTTPRequestWithPath(r.RequestConfig, filepath.Join(urlPathAPINodes, r.nodeName)) + if err != nil { + return fmt.Errorf("failed to create node stats request: %w", err) + } + var stats nodeStats - if err := r.doOKDecode(filepath.Join(urlPathAPINodes, r.nodeName), &stats); err != nil { + if err := web.DoHTTP(r.httpClient).RequestJSON(req, &stats); err != nil { return err } for k, v := range stm.ToMap(stats) { mx[k] = v } - mx["proc_available"] = int64(stats.ProcTotal - stats.ProcUsed) + mx["proc_available"] = stats.ProcTotal - stats.ProcUsed return nil } func (r *RabbitMQ) collectVhostsStats(mx map[string]int64) error { + req, err := web.NewHTTPRequestWithPath(r.RequestConfig, urlPathAPIVhosts) + if err != nil { + return fmt.Errorf("failed to create vhosts stats request: %w", err) + } + var stats []vhostStats - if err := r.doOKDecode(urlPathAPIVhosts, &stats); err != nil { + if err := web.DoHTTP(r.httpClient).RequestJSON(req, &stats); err != nil { return err } @@ -112,8 +124,13 @@ func (r *RabbitMQ) collectVhostsStats(mx map[string]int64) error { } func (r *RabbitMQ) collectQueuesStats(mx map[string]int64) error { + req, err := web.NewHTTPRequestWithPath(r.RequestConfig, urlPathAPIQueues) + if err != nil { + return fmt.Errorf("failed to create queues stats request: %w", err) + } + var stats []queueStats - if err := r.doOKDecode(urlPathAPIQueues, &stats); err != nil { + if err := web.DoHTTP(r.httpClient).RequestJSON(req, &stats); err != nil { return err } @@ -143,35 +160,3 @@ func (r *RabbitMQ) collectQueuesStats(mx map[string]int64) error { return nil } - -func (r *RabbitMQ) doOKDecode(urlPath string, in interface{}) error { - req, err := web.NewHTTPRequestWithPath(r.Request, urlPath) - if err != nil { - return fmt.Errorf("error on creating request: %v", err) - } - - r.Debugf("doing HTTP %s to '%s'", req.Method, req.URL) - resp, err := r.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on request to %s: %v", req.URL, err) - } - - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status %d (%s)", req.URL, resp.StatusCode, resp.Status) - } - - if err = json.NewDecoder(resp.Body).Decode(&in); err != nil { - return fmt.Errorf("error on decoding response from %s: %v", req.URL, err) - } - - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/rabbitmq/config_schema.json b/src/go/plugin/go.d/modules/rabbitmq/config_schema.json index defa70142..1aaf387a7 100644 --- a/src/go/plugin/go.d/modules/rabbitmq/config_schema.json +++ b/src/go/plugin/go.d/modules/rabbitmq/config_schema.json @@ -113,7 +113,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/rabbitmq/integrations/rabbitmq.md b/src/go/plugin/go.d/modules/rabbitmq/integrations/rabbitmq.md index e4c9df588..67aa47fca 100644 --- a/src/go/plugin/go.d/modules/rabbitmq/integrations/rabbitmq.md +++ b/src/go/plugin/go.d/modules/rabbitmq/integrations/rabbitmq.md @@ -145,8 +145,8 @@ To enable see [Management Plugin](https://www.rabbitmq.com/management.html#getti The configuration file name for this integration is `go.d/rabbitmq.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/rabbitmq/rabbitmq.go b/src/go/plugin/go.d/modules/rabbitmq/rabbitmq.go index 74805dab7..3b160d389 100644 --- a/src/go/plugin/go.d/modules/rabbitmq/rabbitmq.go +++ b/src/go/plugin/go.d/modules/rabbitmq/rabbitmq.go @@ -5,10 +5,12 @@ package rabbitmq import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,14 +28,14 @@ func init() { func New() *RabbitMQ { return &RabbitMQ{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://localhost:15672", Username: "guest", Password: "guest", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, CollectQueues: false, @@ -45,9 +47,9 @@ func New() *RabbitMQ { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - CollectQueues bool `yaml:"collect_queues_metrics" json:"collect_queues_metrics"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + CollectQueues bool `yaml:"collect_queues_metrics" json:"collect_queues_metrics"` } type ( @@ -74,14 +76,12 @@ func (r *RabbitMQ) Configuration() any { func (r *RabbitMQ) Init() error { if r.URL == "" { - r.Error("'url' can not be empty") - return errors.New("url not set") + return errors.New("config: url not set") } - client, err := web.NewHTTPClient(r.Client) + client, err := web.NewHTTPClient(r.ClientConfig) if err != nil { - r.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } r.httpClient = client @@ -94,7 +94,6 @@ func (r *RabbitMQ) Init() error { func (r *RabbitMQ) Check() error { mx, err := r.collect() if err != nil { - r.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/rabbitmq/rabbitmq_test.go b/src/go/plugin/go.d/modules/rabbitmq/rabbitmq_test.go index 7c4fe719e..083f9d1ec 100644 --- a/src/go/plugin/go.d/modules/rabbitmq/rabbitmq_test.go +++ b/src/go/plugin/go.d/modules/rabbitmq/rabbitmq_test.go @@ -55,8 +55,8 @@ func TestRabbitMQ_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, diff --git a/src/go/plugin/go.d/modules/redis/collect.go b/src/go/plugin/go.d/modules/redis/collect.go index 026164672..f464c3aa9 100644 --- a/src/go/plugin/go.d/modules/redis/collect.go +++ b/src/go/plugin/go.d/modules/redis/collect.go @@ -7,9 +7,10 @@ import ( "context" "errors" "fmt" - "github.com/blang/semver/v4" "regexp" "strings" + + "github.com/blang/semver/v4" ) const precision = 1000 // float values multiplier and dimensions divisor diff --git a/src/go/plugin/go.d/modules/redis/config_schema.json b/src/go/plugin/go.d/modules/redis/config_schema.json index c57b06ac0..90ab49b97 100644 --- a/src/go/plugin/go.d/modules/redis/config_schema.json +++ b/src/go/plugin/go.d/modules/redis/config_schema.json @@ -67,7 +67,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/redis/init.go b/src/go/plugin/go.d/modules/redis/init.go index 8190be778..47b19f61a 100644 --- a/src/go/plugin/go.d/modules/redis/init.go +++ b/src/go/plugin/go.d/modules/redis/init.go @@ -8,7 +8,7 @@ import ( "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" ) func (r *Redis) validateConfig() error { diff --git a/src/go/plugin/go.d/modules/redis/integrations/redis.md b/src/go/plugin/go.d/modules/redis/integrations/redis.md index 52dfbf8f2..10003d0f8 100644 --- a/src/go/plugin/go.d/modules/redis/integrations/redis.md +++ b/src/go/plugin/go.d/modules/redis/integrations/redis.md @@ -128,8 +128,8 @@ No action required. The configuration file name for this integration is `go.d/redis.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/redis/redis.go b/src/go/plugin/go.d/modules/redis/redis.go index 954205e1e..11eeec9bd 100644 --- a/src/go/plugin/go.d/modules/redis/redis.go +++ b/src/go/plugin/go.d/modules/redis/redis.go @@ -6,16 +6,17 @@ import ( "context" _ "embed" "errors" + "fmt" "sync" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" "github.com/blang/semver/v4" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" ) //go:embed "config_schema.json" @@ -33,7 +34,7 @@ func New() *Redis { return &Redis{ Config: Config{ Address: "redis://@localhost:6379", - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), PingSamples: 5, }, @@ -46,11 +47,11 @@ func New() *Redis { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - Username string `yaml:"username,omitempty" json:"username"` - Password string `yaml:"password,omitempty" json:"password"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + Username string `yaml:"username,omitempty" json:"username"` + Password string `yaml:"password,omitempty" json:"password"` tlscfg.TLSConfig `yaml:",inline" json:""` PingSamples int `yaml:"ping_samples" json:"ping_samples"` } @@ -86,21 +87,18 @@ func (r *Redis) Configuration() any { func (r *Redis) Init() error { err := r.validateConfig() if err != nil { - r.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } rdb, err := r.initRedisClient() if err != nil { - r.Errorf("init redis client: %v", err) - return err + return fmt.Errorf("init redis client: %v", err) } r.rdb = rdb charts, err := r.initCharts() if err != nil { - r.Errorf("init charts: %v", err) - return err + return fmt.Errorf("init charts: %v", err) } r.charts = charts @@ -110,7 +108,6 @@ func (r *Redis) Init() error { func (r *Redis) Check() error { mx, err := r.collect() if err != nil { - r.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/redis/redis_test.go b/src/go/plugin/go.d/modules/redis/redis_test.go index e295f0f97..597fde1dd 100644 --- a/src/go/plugin/go.d/modules/redis/redis_test.go +++ b/src/go/plugin/go.d/modules/redis/redis_test.go @@ -12,7 +12,7 @@ import ( "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -298,13 +298,13 @@ func TestRedis_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { rdb := test.prepare(t) - ms := rdb.Collect() + mx := rdb.Collect() - copyTimeRelatedMetrics(ms, test.wantCollected) + copyTimeRelatedMetrics(mx, test.wantCollected) - assert.Equal(t, test.wantCollected, ms) + assert.Equal(t, test.wantCollected, mx) if len(test.wantCollected) > 0 { - ensureCollectedHasAllChartsDimsVarsIDs(t, rdb, ms) + module.TestMetricsHasAllChartsDims(t, rdb.Charts(), mx) ensureCollectedCommandsAddedToCharts(t, rdb) ensureCollectedDbsAddedToCharts(t, rdb) } @@ -338,23 +338,6 @@ func prepareRedisWithPikaMetrics(t *testing.T) *Redis { } return rdb } - -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, rdb *Redis, ms map[string]int64) { - for _, chart := range *rdb.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := ms[dim.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := ms[v.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) - } - } -} - func ensureCollectedCommandsAddedToCharts(t *testing.T, rdb *Redis) { for _, id := range []string{ chartCommandsCalls.ID, diff --git a/src/go/plugin/go.d/modules/rethinkdb/config_schema.json b/src/go/plugin/go.d/modules/rethinkdb/config_schema.json index 9a84aeca4..c751ca092 100644 --- a/src/go/plugin/go.d/modules/rethinkdb/config_schema.json +++ b/src/go/plugin/go.d/modules/rethinkdb/config_schema.json @@ -40,7 +40,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/rethinkdb/integrations/rethinkdb.md b/src/go/plugin/go.d/modules/rethinkdb/integrations/rethinkdb.md index 3cc116e40..d9e5c0d3e 100644 --- a/src/go/plugin/go.d/modules/rethinkdb/integrations/rethinkdb.md +++ b/src/go/plugin/go.d/modules/rethinkdb/integrations/rethinkdb.md @@ -114,8 +114,8 @@ No action required. The configuration file name for this integration is `go.d/rethinkdb.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/rethinkdb/rethinkdb.go b/src/go/plugin/go.d/modules/rethinkdb/rethinkdb.go index ccde593de..5eab80c33 100644 --- a/src/go/plugin/go.d/modules/rethinkdb/rethinkdb.go +++ b/src/go/plugin/go.d/modules/rethinkdb/rethinkdb.go @@ -8,7 +8,7 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -26,7 +26,7 @@ func New() *Rethinkdb { return &Rethinkdb{ Config: Config{ Address: "127.0.0.1:28015", - Timeout: web.Duration(time.Second * 1), + Timeout: confopt.Duration(time.Second * 1), }, charts: clusterCharts.Copy(), @@ -36,11 +36,11 @@ func New() *Rethinkdb { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - Username string `yaml:"username,omitempty" json:"username"` - Password string `yaml:"password,omitempty" json:"password"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + Username string `yaml:"username,omitempty" json:"username"` + Password string `yaml:"password,omitempty" json:"password"` } type ( @@ -63,8 +63,7 @@ func (r *Rethinkdb) Configuration() any { func (r *Rethinkdb) Init() error { if r.Address == "" { - r.Error("address is not set") - return errors.New("address is not set") + return errors.New("config: address is not set") } return nil } @@ -72,7 +71,6 @@ func (r *Rethinkdb) Init() error { func (r *Rethinkdb) Check() error { mx, err := r.collect() if err != nil { - r.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/rethinkdb/rethinkdb_test.go b/src/go/plugin/go.d/modules/rethinkdb/rethinkdb_test.go index f23c49747..1e5b52894 100644 --- a/src/go/plugin/go.d/modules/rethinkdb/rethinkdb_test.go +++ b/src/go/plugin/go.d/modules/rethinkdb/rethinkdb_test.go @@ -154,12 +154,12 @@ func TestRethinkdb_Collect(t *testing.T) { prepare func() *Rethinkdb wantMetrics map[string]int64 wantCharts int - skipChart func(chart *module.Chart) bool + skipChart func(chart *module.Chart, dim *module.Dim) bool }{ "success on valid response": { prepare: prepareCaseOk, wantCharts: len(clusterCharts) + len(serverChartsTmpl)*3, - skipChart: func(chart *module.Chart) bool { + skipChart: func(chart *module.Chart, dim *module.Dim) bool { return strings.HasPrefix(chart.ID, "server_0f74c641-af5f-48d6-a005-35b8983c576a") && !strings.Contains(chart.ID, "stats_request_status") }, diff --git a/src/go/plugin/go.d/modules/riakkv/collect.go b/src/go/plugin/go.d/modules/riakkv/collect.go index 0b3be9438..d44655da6 100644 --- a/src/go/plugin/go.d/modules/riakkv/collect.go +++ b/src/go/plugin/go.d/modules/riakkv/collect.go @@ -3,10 +3,7 @@ package riakkv import ( - "encoding/json" "errors" - "fmt" - "io" "net/http" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" @@ -31,44 +28,24 @@ func (r *RiakKv) collect() (map[string]int64, error) { } func (r *RiakKv) getStats() (*riakStats, error) { - req, err := web.NewHTTPRequest(r.Request) + req, err := web.NewHTTPRequest(r.RequestConfig) if err != nil { return nil, err } var stats riakStats - if err := r.doOKDecode(req, &stats); err != nil { + if err := r.client().RequestJSON(req, &stats); err != nil { return nil, err } return &stats, nil } -func (r *RiakKv) doOKDecode(req *http.Request, in interface{}) error { - resp, err := r.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - msg := fmt.Sprintf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) +func (r *RiakKv) client() *web.Client { + return web.DoHTTP(r.httpClient).OnNokCode(func(resp *http.Response) (bool, error) { if resp.StatusCode == http.StatusNotFound { - msg = fmt.Sprintf("%s (riak_kv_stat is not enabled)", msg) + return false, errors.New("riak_kv_stat is not enabled)") } - return errors.New(msg) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } + return false, nil + }) } diff --git a/src/go/plugin/go.d/modules/riakkv/config_schema.json b/src/go/plugin/go.d/modules/riakkv/config_schema.json index 402c2c106..07d77df90 100644 --- a/src/go/plugin/go.d/modules/riakkv/config_schema.json +++ b/src/go/plugin/go.d/modules/riakkv/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/riakkv/integrations/riak_kv.md b/src/go/plugin/go.d/modules/riakkv/integrations/riak_kv.md index 872736277..09f1352a4 100644 --- a/src/go/plugin/go.d/modules/riakkv/integrations/riak_kv.md +++ b/src/go/plugin/go.d/modules/riakkv/integrations/riak_kv.md @@ -118,8 +118,8 @@ See the RiakKV [configuration reference](https://docs.riak.com/riak/kv/2.2.3/dev The configuration file name for this integration is `go.d/riakkv.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/riakkv/riakkv.go b/src/go/plugin/go.d/modules/riakkv/riakkv.go index 64aeda1c1..d8b684273 100644 --- a/src/go/plugin/go.d/modules/riakkv/riakkv.go +++ b/src/go/plugin/go.d/modules/riakkv/riakkv.go @@ -5,11 +5,13 @@ package riakkv import ( _ "embed" "errors" + "fmt" "net/http" "sync" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -33,13 +35,13 @@ func init() { func New() *RiakKv { return &RiakKv{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ // https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html URL: "http://127.0.0.1:8098/stats", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -49,8 +51,8 @@ func New() *RiakKv { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type RiakKv struct { @@ -69,14 +71,12 @@ func (r *RiakKv) Configuration() any { func (r *RiakKv) Init() error { if r.URL == "" { - r.Errorf("url required but not set") - return errors.New("url not set") + return errors.New("config: url not set") } - httpClient, err := web.NewHTTPClient(r.Client) + httpClient, err := web.NewHTTPClient(r.ClientConfig) if err != nil { - r.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } r.httpClient = httpClient @@ -89,7 +89,6 @@ func (r *RiakKv) Init() error { func (r *RiakKv) Check() error { mx, err := r.collect() if err != nil { - r.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/riakkv/riakkv_test.go b/src/go/plugin/go.d/modules/riakkv/riakkv_test.go index de4e24092..1b9ccf888 100644 --- a/src/go/plugin/go.d/modules/riakkv/riakkv_test.go +++ b/src/go/plugin/go.d/modules/riakkv/riakkv_test.go @@ -49,8 +49,8 @@ func TestRiakKv_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, diff --git a/src/go/plugin/go.d/modules/rspamd/collect.go b/src/go/plugin/go.d/modules/rspamd/collect.go index ecbe4a034..caa785eb4 100644 --- a/src/go/plugin/go.d/modules/rspamd/collect.go +++ b/src/go/plugin/go.d/modules/rspamd/collect.go @@ -3,10 +3,7 @@ package rspamd import ( - "encoding/json" "fmt" - "io" - "net/http" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" @@ -50,13 +47,13 @@ func (r *Rspamd) collect() (map[string]int64, error) { } func (r *Rspamd) queryRspamdStats() (*rspamdStats, error) { - req, err := web.NewHTTPRequestWithPath(r.Request, "/stat") + req, err := web.NewHTTPRequestWithPath(r.RequestConfig, "/stat") if err != nil { return nil, err } var stats rspamdStats - if err := r.doOKDecode(req, &stats); err != nil { + if err := web.DoHTTP(r.httpClient).RequestJSON(req, &stats); err != nil { return nil, err } @@ -66,27 +63,3 @@ func (r *Rspamd) queryRspamdStats() (*rspamdStats, error) { return &stats, nil } - -func (r *Rspamd) doOKDecode(req *http.Request, in interface{}) error { - resp, err := r.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/rspamd/config_schema.json b/src/go/plugin/go.d/modules/rspamd/config_schema.json index c7b866d87..eba2ec8ee 100644 --- a/src/go/plugin/go.d/modules/rspamd/config_schema.json +++ b/src/go/plugin/go.d/modules/rspamd/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/rspamd/integrations/rspamd.md b/src/go/plugin/go.d/modules/rspamd/integrations/rspamd.md index fe0949422..c60fd2b43 100644 --- a/src/go/plugin/go.d/modules/rspamd/integrations/rspamd.md +++ b/src/go/plugin/go.d/modules/rspamd/integrations/rspamd.md @@ -93,8 +93,8 @@ No action required. The configuration file name for this integration is `go.d/rspamd.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/rspamd/rspamd.go b/src/go/plugin/go.d/modules/rspamd/rspamd.go index 0a5c4ffe5..77131b3db 100644 --- a/src/go/plugin/go.d/modules/rspamd/rspamd.go +++ b/src/go/plugin/go.d/modules/rspamd/rspamd.go @@ -5,10 +5,12 @@ package rspamd import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *Rspamd { return &Rspamd{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:11334", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 1), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 1), }, }, }, @@ -40,8 +42,8 @@ func New() *Rspamd { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Rspamd struct { @@ -59,14 +61,12 @@ func (r *Rspamd) Configuration() any { func (r *Rspamd) Init() error { if r.URL == "" { - r.Error("URL not set") - return errors.New("url not set") + return errors.New("config: url not set") } - client, err := web.NewHTTPClient(r.Client) + client, err := web.NewHTTPClient(r.ClientConfig) if err != nil { - r.Error(err) - return err + return fmt.Errorf("create http client: %v", err) } r.httpClient = client @@ -79,7 +79,6 @@ func (r *Rspamd) Init() error { func (r *Rspamd) Check() error { mx, err := r.collect() if err != nil { - r.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/rspamd/rspamd_test.go b/src/go/plugin/go.d/modules/rspamd/rspamd_test.go index 0c8cc8e5b..a05658eae 100644 --- a/src/go/plugin/go.d/modules/rspamd/rspamd_test.go +++ b/src/go/plugin/go.d/modules/rspamd/rspamd_test.go @@ -48,8 +48,8 @@ func TestRspamd_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, @@ -156,29 +156,14 @@ func TestRspamd_Collect(t *testing.T) { mx := rsp.Collect() require.Equal(t, test.wantMetrics, mx) + if len(test.wantMetrics) > 0 { - testMetricsHasAllChartsDims(t, rsp, mx) + module.TestMetricsHasAllChartsDims(t, rsp.Charts(), mx) } }) } } -func testMetricsHasAllChartsDims(t *testing.T, rsp *Rspamd, mx map[string]int64) { - for _, chart := range *rsp.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareCaseOk(t *testing.T) (*Rspamd, func()) { t.Helper() srv := httptest.NewServer(http.HandlerFunc( diff --git a/src/go/plugin/go.d/modules/samba/README.md b/src/go/plugin/go.d/modules/samba/README.md new file mode 120000 index 000000000..3b63bbab6 --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/README.md @@ -0,0 +1 @@ +integrations/samba.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/samba/charts.go b/src/go/plugin/go.d/modules/samba/charts.go new file mode 100644 index 000000000..e87768faf --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/charts.go @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package samba + +import ( + "fmt" + "strings" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +const ( + prioSyscallCalls = module.Priority + iota + prioSyscallTransferredData + + prioSmb2CallCalls + prioSmb2CallTransferredData +) + +var ( + syscallCallsChartTmpl = module.Chart{ + ID: "syscall_%s_calls", + Title: "Syscalls Count", + Units: "calls/s", + Fam: "syscalls", + Ctx: "samba.syscall_calls", + Priority: prioSyscallCalls, + Type: module.Line, + Dims: module.Dims{ + {ID: "syscall_%s_count", Name: "syscalls", Algo: module.Incremental}, + }, + } + syscallTransferredDataChartTmpl = module.Chart{ + ID: "syscall_%s_transferred_data", + Title: "Syscall Transferred Data", + Units: "bytes/s", + Fam: "syscalls", + Ctx: "samba.syscall_transferred_data", + Priority: prioSyscallTransferredData, + Type: module.Area, + Dims: module.Dims{ + {ID: "syscall_%s_bytes", Name: "transferred", Algo: module.Incremental}, + }, + } + + smb2CallCallsChartTmpl = module.Chart{ + ID: "smb2_call_%s_calls", + Title: "SMB2 Calls Count", + Units: "calls/s", + Fam: "smb2 calls", + Ctx: "samba.smb2_call_calls", + Priority: prioSmb2CallCalls, + Type: module.Line, + Dims: module.Dims{ + {ID: "smb2_%s_count", Name: "smb2", Algo: module.Incremental}, + }, + } + smb2CallTransferredDataChartTmpl = module.Chart{ + ID: "smb2_call_%s_transferred_data", + Title: "SMB2 Call Transferred Data", + Units: "bytes/s", + Fam: "smb2 calls", + Ctx: "samba.smb2_call_transferred_data", + Priority: prioSmb2CallTransferredData, + Type: module.Area, + Dims: module.Dims{ + {ID: "smb2_%s_inbytes", Name: "in", Algo: module.Incremental}, + {ID: "smb2_%s_outbytes", Name: "out", Algo: module.Incremental, Mul: -1}, + }, + } +) + +func (s *Samba) addCharts(mx map[string]int64) { + for k := range mx { + if name, ok := extractCallName(k, "syscall_", "_count"); ok { + s.addSysCallChart(name, syscallCallsChartTmpl.Copy()) + } else if name, ok := extractCallName(k, "syscall_", "_bytes"); ok { + s.addSysCallChart(name, syscallTransferredDataChartTmpl.Copy()) + } else if name, ok := extractCallName(k, "smb2_", "_count"); ok { + s.addSmb2CallChart(name, smb2CallCallsChartTmpl.Copy()) + // all smb2* metrics have inbytes and outbytes + s.addSmb2CallChart(name, smb2CallTransferredDataChartTmpl.Copy()) + } + } +} + +func (s *Samba) addSysCallChart(syscall string, chart *module.Chart) { + chart = chart.Copy() + chart.ID = fmt.Sprintf(chart.ID, syscall) + chart.Labels = []module.Label{ + {Key: "syscall", Value: syscall}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, syscall) + } + + if err := s.Charts().Add(chart); err != nil { + s.Warning(err) + } +} + +func (s *Samba) addSmb2CallChart(smb2Call string, chart *module.Chart) { + chart = chart.Copy() + chart.ID = fmt.Sprintf(chart.ID, smb2Call) + chart.Labels = []module.Label{ + {Key: "smb2call", Value: smb2Call}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, smb2Call) + } + + if err := s.Charts().Add(chart); err != nil { + s.Warning(err) + } +} + +func extractCallName(s, prefix, suffix string) (string, bool) { + if !(strings.HasPrefix(s, prefix) && strings.HasSuffix(s, suffix)) { + return "", false + } + name := strings.TrimPrefix(s, prefix) + name = strings.TrimSuffix(name, suffix) + return name, true +} diff --git a/src/go/plugin/go.d/modules/samba/collect.go b/src/go/plugin/go.d/modules/samba/collect.go new file mode 100644 index 000000000..9dddc1e95 --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/collect.go @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package samba + +import ( + "bufio" + "bytes" + "errors" + "strconv" + "strings" +) + +func (s *Samba) collect() (map[string]int64, error) { + bs, err := s.exec.profile() + if err != nil { + return nil, err + } + + mx := make(map[string]int64) + + if err := s.collectSmbStatusProfile(mx, bs); err != nil { + return nil, err + } + + s.once.Do(func() { + s.addCharts(mx) + }) + + return mx, nil +} + +func (s *Samba) collectSmbStatusProfile(mx map[string]int64, profileData []byte) error { + sc := bufio.NewScanner(bytes.NewReader(profileData)) + + for sc.Scan() { + line := strings.TrimSpace(sc.Text()) + + switch { + case strings.HasPrefix(line, "syscall_"): + case strings.HasPrefix(line, "smb2_"): + default: + continue + } + + key, value, ok := strings.Cut(line, ":") + if !ok { + s.Debugf("failed to parse line: '%s'", line) + continue + } + + key, value = strings.TrimSpace(key), strings.TrimSpace(value) + + if !(strings.HasSuffix(key, "count") || strings.HasSuffix(key, "bytes")) { + continue + } + + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + s.Debugf("failed to parse value in '%s': %v", line, err) + continue + } + + mx[key] = v + } + + if len(mx) == 0 { + return errors.New("unexpected smbstatus profile response: no metrics found") + } + + return nil +} diff --git a/src/go/plugin/go.d/modules/samba/config_schema.json b/src/go/plugin/go.d/modules/samba/config_schema.json new file mode 100644 index 000000000..600a88d14 --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/config_schema.json @@ -0,0 +1,34 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Samba collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 10 + }, + "timeout": { + "title": "Timeout", + "description": "Timeout for executing the binary, specified in seconds.", + "type": "number", + "minimum": 0.5, + "default": 2 + } + }, + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + } + } +} diff --git a/src/go/plugin/go.d/modules/samba/exec.go b/src/go/plugin/go.d/modules/samba/exec.go new file mode 100644 index 000000000..e35ade504 --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/exec.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package samba + +import ( + "context" + "fmt" + "os/exec" + "time" + + "github.com/netdata/netdata/go/plugins/logger" +) + +type smbStatusBinary interface { + profile() ([]byte, error) +} + +func newSmbStatusBinary(ndsudoPath string, timeout time.Duration, log *logger.Logger) smbStatusBinary { + return &smbStatusExec{ + Logger: log, + ndsudoPath: ndsudoPath, + timeout: timeout, + } +} + +type smbStatusExec struct { + *logger.Logger + + ndsudoPath string + timeout time.Duration +} + +func (e *smbStatusExec) profile() ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, e.ndsudoPath, "smbstatus-profile") + + e.Debugf("executing '%s'", cmd) + + bs, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("error on '%s': %v", cmd, err) + } + + return bs, nil +} diff --git a/src/go/plugin/go.d/modules/samba/init.go b/src/go/plugin/go.d/modules/samba/init.go new file mode 100644 index 000000000..a932b5696 --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/init.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package samba + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/netdata/netdata/go/plugins/pkg/executable" +) + +func (s *Samba) initSmbStatusBinary() (smbStatusBinary, error) { + ndsudoPath := filepath.Join(executable.Directory, "ndsudo") + if _, err := os.Stat(ndsudoPath); err != nil { + return nil, fmt.Errorf("ndsudo executable not found: %v", err) + + } + + smbStatus := newSmbStatusBinary(ndsudoPath, s.Timeout.Duration(), s.Logger) + + return smbStatus, nil +} diff --git a/src/go/plugin/go.d/modules/samba/integrations/samba.md b/src/go/plugin/go.d/modules/samba/integrations/samba.md new file mode 100644 index 000000000..9b1195bd5 --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/integrations/samba.md @@ -0,0 +1,240 @@ + + +# Samba + + + + + +Plugin: go.d.plugin +Module: samba + + + +## Overview + +This collector monitors Samba syscalls and SMB2 calls. It relies on the [`smbstatus`](https://www.samba.org/samba/docs/current/man-html/smbstatus.1.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management. +Executed commands: +- `smbstatus -P` + + + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per syscall + +These metrics refer to the the Syscall. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| syscall | Syscall name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| samba.syscall_calls | syscalls | calls/s | +| samba.syscall_transferred_data | transferred | bytes/s | + +### Per smb2call + +These metrics refer to the the SMB2 Call. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| smb2call | SMB2 call name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| samba.smb2_call_calls | smb2 | calls/s | +| samba.smb2_call_transferred_data | in, out | bytes/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Verifying and Enabling Profiling for SMBd + +1. **Check for Profiling Support** + + Before enabling profiling, it's important to verify if `smbd` was compiled with profiling capabilities. Run the following command as root user (using `sudo`) to check: + + ```bash + $ sudo smbd --build-options | grep WITH_PROFILE + WITH_PROFILE + ``` + + If the command outputs `WITH_PROFILE`, profiling is supported. If not, you'll need to recompile `smbd` with profiling enabled (refer to Samba documentation for specific instructions). + +2. **Enable Profiling** + + Once you've confirmed profiling support, you can enable it using one of the following methods: + + - **Command-Line Option** + Start smbd with the `-P 1` option when invoking it directly from the command line. + - **Configuration File** + Modify the `smb.conf` configuration file located at `/etc/samba/smb.conf` (the path might vary slightly depending on your system). Add the following line to the `[global]` section: + + ```bash + smbd profiling level = count + ``` +3. **Restart the Samba service** + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/samba.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/samba.conf +``` +#### Options + +The following options can be defined globally: update_every. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| timeout | smbstatus binary execution timeout. | 2 | no | + +
    + +#### Examples + +##### Custom update_every + +Allows you to override the default data collection interval. + +
    Config + +```yaml +jobs: + - name: samba + update_every: 5 # Collect statistics every 5 seconds + +``` +
    + + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `samba` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m samba + ``` + +### Getting Logs + +If you're encountering problems with the `samba` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep samba +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep samba /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep samba +``` + + diff --git a/src/go/plugin/go.d/modules/samba/metadata.yaml b/src/go/plugin/go.d/modules/samba/metadata.yaml new file mode 100644 index 000000000..fcd4a73f7 --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/metadata.yaml @@ -0,0 +1,153 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-samba + plugin_name: go.d.plugin + module_name: samba + monitored_instance: + name: Samba + link: "https://www.samba.org/samba/" + icon_filename: 'samba.svg' + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: + - samba + - smb + - file sharing + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: > + This collector monitors Samba syscalls and SMB2 calls. + It relies on the [`smbstatus`](https://www.samba.org/samba/docs/current/man-html/smbstatus.1.html) CLI tool but avoids directly executing the binary. + Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. + This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management. + + Executed commands: + + - `smbstatus -P` + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: false + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: "Verifying and Enabling Profiling for SMBd" + description: | + 1. **Check for Profiling Support** + + Before enabling profiling, it's important to verify if `smbd` was compiled with profiling capabilities. Run the following command as root user (using `sudo`) to check: + + ```bash + $ sudo smbd --build-options | grep WITH_PROFILE + WITH_PROFILE + ``` + + If the command outputs `WITH_PROFILE`, profiling is supported. If not, you'll need to recompile `smbd` with profiling enabled (refer to Samba documentation for specific instructions). + + 2. **Enable Profiling** + + Once you've confirmed profiling support, you can enable it using one of the following methods: + + - **Command-Line Option** + Start smbd with the `-P 1` option when invoking it directly from the command line. + - **Configuration File** + Modify the `smb.conf` configuration file located at `/etc/samba/smb.conf` (the path might vary slightly depending on your system). Add the following line to the `[global]` section: + + ```bash + smbd profiling level = count + ``` + 3. **Restart the Samba service** + configuration: + file: + name: go.d/samba.conf + options: + description: | + The following options can be defined globally: update_every. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 10 + required: false + - name: timeout + description: smbstatus binary execution timeout. + default_value: 2 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Custom update_every + description: Allows you to override the default data collection interval. + config: | + jobs: + - name: samba + update_every: 5 # Collect statistics every 5 seconds + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: syscall + description: These metrics refer to the the Syscall. + labels: + - name: syscall + description: Syscall name + metrics: + - name: samba.syscall_calls + description: Syscalls Count + unit: calls/s + chart_type: line + dimensions: + - name: syscalls + - name: samba.syscall_transferred_data + description: Syscalls Transferred Data + unit: bytes/s + chart_type: area + dimensions: + - name: transferred + - name: smb2call + description: These metrics refer to the the SMB2 Call. + labels: + - name: smb2call + description: SMB2 call name + metrics: + - name: samba.smb2_call_calls + description: SMB2 Calls Count + unit: calls/s + chart_type: line + dimensions: + - name: smb2 + - name: samba.smb2_call_transferred_data + description: SMB2 Call Transferred Data + unit: bytes/s + chart_type: area + dimensions: + - name: in + - name: out diff --git a/src/go/plugin/go.d/modules/samba/samba.go b/src/go/plugin/go.d/modules/samba/samba.go new file mode 100644 index 000000000..5e444d065 --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/samba.go @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package samba + +import ( + _ "embed" + "errors" + "fmt" + "sync" + "time" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("samba", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 10, + }, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *Samba { + return &Samba{ + Config: Config{ + Timeout: confopt.Duration(time.Second * 2), + }, + charts: &module.Charts{}, + once: &sync.Once{}, + } +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` +} + +type Samba struct { + module.Base + Config `yaml:",inline" json:""` + + charts *module.Charts + once *sync.Once + + exec smbStatusBinary +} + +func (s *Samba) Configuration() any { + return s.Config +} + +func (s *Samba) Init() error { + smbStatus, err := s.initSmbStatusBinary() + if err != nil { + return fmt.Errorf("smbstatus exec initialization: %v", err) + } + s.exec = smbStatus + + return nil +} + +func (s *Samba) Check() error { + mx, err := s.collect() + if err != nil { + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil +} + +func (s *Samba) Charts() *module.Charts { + return s.charts +} + +func (s *Samba) Collect() map[string]int64 { + mx, err := s.collect() + if err != nil { + s.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} + +func (s *Samba) Cleanup() {} diff --git a/src/go/plugin/go.d/modules/samba/samba_test.go b/src/go/plugin/go.d/modules/samba/samba_test.go new file mode 100644 index 000000000..bf56401bb --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/samba_test.go @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package samba + +import ( + "errors" + "os" + "testing" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") + + dataSmbStatusProfile, _ = os.ReadFile("testdata/smbstatus-profile.txt") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + "dataSmbStatusProfile": dataSmbStatusProfile, + } { + require.NotNil(t, data, name) + + } +} + +func TestSamba_Configuration(t *testing.T) { + module.TestConfigurationSerialize(t, &Samba{}, dataConfigJSON, dataConfigYAML) +} + +func TestSamba_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "fails if failed to locate ndsudo": { + wantFail: true, + config: New().Config, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + smb := New() + smb.Config = test.config + + if test.wantFail { + assert.Error(t, smb.Init()) + } else { + assert.NoError(t, smb.Init()) + } + }) + } +} + +func TestSamba_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func() *Samba + }{ + "not initialized exec": { + prepare: func() *Samba { + return New() + }, + }, + "after check": { + prepare: func() *Samba { + smb := New() + smb.exec = prepareMockOk() + _ = smb.Check() + return smb + }, + }, + "after collect": { + prepare: func() *Samba { + smb := New() + smb.exec = prepareMockOk() + _ = smb.Collect() + return smb + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + smb := test.prepare() + + assert.NotPanics(t, smb.Cleanup) + }) + } +} + +func TestSambaCharts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestSamba_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func() *mockSmbStatusBinary + wantFail bool + }{ + "success case": { + prepareMock: prepareMockOk, + wantFail: false, + }, + "error on exec": { + prepareMock: prepareMockErr, + wantFail: true, + }, + "empty response": { + prepareMock: prepareMockEmptyResponse, + wantFail: true, + }, + "unexpected response": { + prepareMock: prepareMockUnexpectedResponse, + wantFail: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + smb := New() + mock := test.prepareMock() + smb.exec = mock + + if test.wantFail { + assert.Error(t, smb.Check()) + } else { + assert.NoError(t, smb.Check()) + } + }) + } +} + +func TestSamba_Collect(t *testing.T) { + tests := map[string]struct { + prepareMock func() *mockSmbStatusBinary + wantMetrics map[string]int64 + wantCharts int + }{ + "success case": { + prepareMock: prepareMockOk, + wantCharts: 53 /*syscall count*/ + 8 /*syscall bytes*/ + (19 * 2), /*smb2calls count and bytes*/ + wantMetrics: map[string]int64{ + "smb2_break_count": 0, + "smb2_break_inbytes": 0, + "smb2_break_outbytes": 0, + "smb2_cancel_count": 0, + "smb2_cancel_inbytes": 0, + "smb2_cancel_outbytes": 0, + "smb2_close_count": 0, + "smb2_close_inbytes": 0, + "smb2_close_outbytes": 0, + "smb2_create_count": 0, + "smb2_create_inbytes": 0, + "smb2_create_outbytes": 0, + "smb2_find_count": 0, + "smb2_find_inbytes": 0, + "smb2_find_outbytes": 0, + "smb2_flush_count": 0, + "smb2_flush_inbytes": 0, + "smb2_flush_outbytes": 0, + "smb2_getinfo_count": 0, + "smb2_getinfo_inbytes": 0, + "smb2_getinfo_outbytes": 0, + "smb2_ioctl_count": 0, + "smb2_ioctl_inbytes": 0, + "smb2_ioctl_outbytes": 0, + "smb2_keepalive_count": 0, + "smb2_keepalive_inbytes": 0, + "smb2_keepalive_outbytes": 0, + "smb2_lock_count": 0, + "smb2_lock_inbytes": 0, + "smb2_lock_outbytes": 0, + "smb2_logoff_count": 0, + "smb2_logoff_inbytes": 0, + "smb2_logoff_outbytes": 0, + "smb2_negprot_count": 0, + "smb2_negprot_inbytes": 0, + "smb2_negprot_outbytes": 0, + "smb2_notify_count": 0, + "smb2_notify_inbytes": 0, + "smb2_notify_outbytes": 0, + "smb2_read_count": 0, + "smb2_read_inbytes": 0, + "smb2_read_outbytes": 0, + "smb2_sesssetup_count": 0, + "smb2_sesssetup_inbytes": 0, + "smb2_sesssetup_outbytes": 0, + "smb2_setinfo_count": 0, + "smb2_setinfo_inbytes": 0, + "smb2_setinfo_outbytes": 0, + "smb2_tcon_count": 0, + "smb2_tcon_inbytes": 0, + "smb2_tcon_outbytes": 0, + "smb2_tdis_count": 0, + "smb2_tdis_inbytes": 0, + "smb2_tdis_outbytes": 0, + "smb2_write_count": 0, + "smb2_write_inbytes": 0, + "smb2_write_outbytes": 0, + "syscall_asys_fsync_bytes": 0, + "syscall_asys_fsync_count": 0, + "syscall_asys_getxattrat_bytes": 0, + "syscall_asys_getxattrat_count": 0, + "syscall_asys_pread_bytes": 0, + "syscall_asys_pread_count": 0, + "syscall_asys_pwrite_bytes": 0, + "syscall_asys_pwrite_count": 0, + "syscall_brl_cancel_count": 0, + "syscall_brl_lock_count": 0, + "syscall_brl_unlock_count": 0, + "syscall_chdir_count": 0, + "syscall_chmod_count": 0, + "syscall_close_count": 0, + "syscall_closedir_count": 0, + "syscall_createfile_count": 0, + "syscall_fallocate_count": 0, + "syscall_fchmod_count": 0, + "syscall_fchown_count": 0, + "syscall_fcntl_count": 0, + "syscall_fcntl_getlock_count": 0, + "syscall_fcntl_lock_count": 0, + "syscall_fdopendir_count": 0, + "syscall_fntimes_count": 0, + "syscall_fstat_count": 0, + "syscall_fstatat_count": 0, + "syscall_ftruncate_count": 0, + "syscall_get_alloc_size_count": 0, + "syscall_get_quota_count": 0, + "syscall_get_sd_count": 0, + "syscall_getwd_count": 0, + "syscall_lchown_count": 0, + "syscall_linkat_count": 0, + "syscall_linux_setlease_count": 0, + "syscall_lseek_count": 0, + "syscall_lstat_count": 0, + "syscall_mkdirat_count": 0, + "syscall_mknodat_count": 0, + "syscall_open_count": 0, + "syscall_openat_count": 0, + "syscall_opendir_count": 0, + "syscall_pread_bytes": 0, + "syscall_pread_count": 0, + "syscall_pwrite_bytes": 0, + "syscall_pwrite_count": 0, + "syscall_readdir_count": 0, + "syscall_readlinkat_count": 0, + "syscall_realpath_count": 0, + "syscall_recvfile_bytes": 0, + "syscall_recvfile_count": 0, + "syscall_renameat_count": 0, + "syscall_rewinddir_count": 0, + "syscall_seekdir_count": 0, + "syscall_sendfile_bytes": 0, + "syscall_sendfile_count": 0, + "syscall_set_quota_count": 0, + "syscall_set_sd_count": 0, + "syscall_stat_count": 0, + "syscall_symlinkat_count": 0, + "syscall_telldir_count": 0, + "syscall_unlinkat_count": 0, + }, + }, + "error on exec": { + prepareMock: prepareMockErr, + wantMetrics: nil, + }, + "empty response": { + prepareMock: prepareMockEmptyResponse, + wantMetrics: nil, + }, + "unexpected response": { + prepareMock: prepareMockUnexpectedResponse, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + smb := New() + mock := test.prepareMock() + smb.exec = mock + + mx := smb.Collect() + + assert.Equal(t, test.wantMetrics, mx) + + if len(test.wantMetrics) > 0 { + assert.Len(t, *smb.Charts(), test.wantCharts, "want charts") + module.TestMetricsHasAllChartsDims(t, smb.Charts(), mx) + } + }) + } +} + +func prepareMockOk() *mockSmbStatusBinary { + return &mockSmbStatusBinary{ + data: dataSmbStatusProfile, + } +} + +func prepareMockErr() *mockSmbStatusBinary { + return &mockSmbStatusBinary{ + err: true, + } +} + +func prepareMockEmptyResponse() *mockSmbStatusBinary { + return &mockSmbStatusBinary{} +} + +func prepareMockUnexpectedResponse() *mockSmbStatusBinary { + return &mockSmbStatusBinary{ + data: []byte(` +Lorem ipsum dolor sit amet, consectetur adipiscing elit. +Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus. +Fusce et felis pulvinar, posuere sem non, porttitor eros. +`), + } +} + +type mockSmbStatusBinary struct { + err bool + data []byte +} + +func (m *mockSmbStatusBinary) profile() ([]byte, error) { + if m.err { + return nil, errors.New("mock.profile() error") + } + return m.data, nil +} diff --git a/src/go/plugin/go.d/modules/samba/testdata/config.json b/src/go/plugin/go.d/modules/samba/testdata/config.json new file mode 100644 index 000000000..291ecee3d --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/testdata/config.json @@ -0,0 +1,4 @@ +{ + "update_every": 123, + "timeout": 123.123 +} diff --git a/src/go/plugin/go.d/modules/samba/testdata/config.yaml b/src/go/plugin/go.d/modules/samba/testdata/config.yaml new file mode 100644 index 000000000..25b0b4c78 --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/testdata/config.yaml @@ -0,0 +1,2 @@ +update_every: 123 +timeout: 123.123 diff --git a/src/go/plugin/go.d/modules/samba/testdata/smbstatus-profile.txt b/src/go/plugin/go.d/modules/samba/testdata/smbstatus-profile.txt new file mode 100644 index 000000000..989b8fc1b --- /dev/null +++ b/src/go/plugin/go.d/modules/samba/testdata/smbstatus-profile.txt @@ -0,0 +1,451 @@ +**** SMBD loop **************************************************************** +connect_count: 0 +disconnect_count: 0 +idle_count: 0 +idle_time: 0 +cpu_user_time: 0 +cpu_system_time: 0 +request_count: 0 +push_sec_ctx_count: 0 +push_sec_ctx_time: 0 +set_sec_ctx_count: 0 +set_sec_ctx_time: 0 +set_root_sec_ctx_count: 0 +set_root_sec_ctx_time: 0 +pop_sec_ctx_count: 0 +pop_sec_ctx_time: 0 +**** System Calls ************************************************************* +syscall_opendir_count: 0 +syscall_opendir_time: 0 +syscall_fdopendir_count: 0 +syscall_fdopendir_time: 0 +syscall_readdir_count: 0 +syscall_readdir_time: 0 +syscall_seekdir_count: 0 +syscall_seekdir_time: 0 +syscall_telldir_count: 0 +syscall_telldir_time: 0 +syscall_rewinddir_count: 0 +syscall_rewinddir_time: 0 +syscall_mkdirat_count: 0 +syscall_mkdirat_time: 0 +syscall_closedir_count: 0 +syscall_closedir_time: 0 +syscall_open_count: 0 +syscall_open_time: 0 +syscall_openat_count: 0 +syscall_openat_time: 0 +syscall_createfile_count: 0 +syscall_createfile_time: 0 +syscall_close_count: 0 +syscall_close_time: 0 +syscall_pread_count: 0 +syscall_pread_time: 0 +syscall_pread_idle: 0 +syscall_pread_bytes: 0 +syscall_asys_pread_count: 0 +syscall_asys_pread_time: 0 +syscall_asys_pread_idle: 0 +syscall_asys_pread_bytes: 0 +syscall_pwrite_count: 0 +syscall_pwrite_time: 0 +syscall_pwrite_idle: 0 +syscall_pwrite_bytes: 0 +syscall_asys_pwrite_count: 0 +syscall_asys_pwrite_time: 0 +syscall_asys_pwrite_idle: 0 +syscall_asys_pwrite_bytes: 0 +syscall_lseek_count: 0 +syscall_lseek_time: 0 +syscall_sendfile_count: 0 +syscall_sendfile_time: 0 +syscall_sendfile_idle: 0 +syscall_sendfile_bytes: 0 +syscall_recvfile_count: 0 +syscall_recvfile_time: 0 +syscall_recvfile_idle: 0 +syscall_recvfile_bytes: 0 +syscall_renameat_count: 0 +syscall_renameat_time: 0 +syscall_asys_fsync_count: 0 +syscall_asys_fsync_time: 0 +syscall_asys_fsync_idle: 0 +syscall_asys_fsync_bytes: 0 +syscall_stat_count: 0 +syscall_stat_time: 0 +syscall_fstat_count: 0 +syscall_fstat_time: 0 +syscall_lstat_count: 0 +syscall_lstat_time: 0 +syscall_fstatat_count: 0 +syscall_fstatat_time: 0 +syscall_get_alloc_size_count: 0 +syscall_get_alloc_size_time: 0 +syscall_unlinkat_count: 0 +syscall_unlinkat_time: 0 +syscall_chmod_count: 0 +syscall_chmod_time: 0 +syscall_fchmod_count: 0 +syscall_fchmod_time: 0 +syscall_fchown_count: 0 +syscall_fchown_time: 0 +syscall_lchown_count: 0 +syscall_lchown_time: 0 +syscall_chdir_count: 0 +syscall_chdir_time: 0 +syscall_getwd_count: 0 +syscall_getwd_time: 0 +syscall_fntimes_count: 0 +syscall_fntimes_time: 0 +syscall_ftruncate_count: 0 +syscall_ftruncate_time: 0 +syscall_fallocate_count: 0 +syscall_fallocate_time: 0 +syscall_fcntl_lock_count: 0 +syscall_fcntl_lock_time: 0 +syscall_fcntl_count: 0 +syscall_fcntl_time: 0 +syscall_linux_setlease_count: 0 +syscall_linux_setlease_time: 0 +syscall_fcntl_getlock_count: 0 +syscall_fcntl_getlock_time: 0 +syscall_readlinkat_count: 0 +syscall_readlinkat_time: 0 +syscall_symlinkat_count: 0 +syscall_symlinkat_time: 0 +syscall_linkat_count: 0 +syscall_linkat_time: 0 +syscall_mknodat_count: 0 +syscall_mknodat_time: 0 +syscall_realpath_count: 0 +syscall_realpath_time: 0 +syscall_get_quota_count: 0 +syscall_get_quota_time: 0 +syscall_set_quota_count: 0 +syscall_set_quota_time: 0 +syscall_get_sd_count: 0 +syscall_get_sd_time: 0 +syscall_set_sd_count: 0 +syscall_set_sd_time: 0 +syscall_brl_lock_count: 0 +syscall_brl_lock_time: 0 +syscall_brl_unlock_count: 0 +syscall_brl_unlock_time: 0 +syscall_brl_cancel_count: 0 +syscall_brl_cancel_time: 0 +syscall_asys_getxattrat_count: 0 +syscall_asys_getxattrat_time: 0 +syscall_asys_getxattrat_idle: 0 +syscall_asys_getxattrat_bytes: 0 +**** ACL Calls **************************************************************** +get_nt_acl_count: 0 +get_nt_acl_time: 0 +get_nt_acl_at_count: 0 +get_nt_acl_at_time: 0 +fget_nt_acl_count: 0 +fget_nt_acl_time: 0 +fset_nt_acl_count: 0 +fset_nt_acl_time: 0 +**** Stat Cache *************************************************************** +statcache_lookups_count: 0 +statcache_misses_count: 0 +statcache_hits_count: 0 +**** SMB Calls **************************************************************** +SMBmkdir_count: 0 +SMBmkdir_time: 0 +SMBrmdir_count: 0 +SMBrmdir_time: 0 +SMBopen_count: 0 +SMBopen_time: 0 +SMBcreate_count: 0 +SMBcreate_time: 0 +SMBclose_count: 0 +SMBclose_time: 0 +SMBflush_count: 0 +SMBflush_time: 0 +SMBunlink_count: 0 +SMBunlink_time: 0 +SMBmv_count: 0 +SMBmv_time: 0 +SMBgetatr_count: 0 +SMBgetatr_time: 0 +SMBsetatr_count: 0 +SMBsetatr_time: 0 +SMBread_count: 0 +SMBread_time: 0 +SMBwrite_count: 0 +SMBwrite_time: 0 +SMBlock_count: 0 +SMBlock_time: 0 +SMBunlock_count: 0 +SMBunlock_time: 0 +SMBctemp_count: 0 +SMBctemp_time: 0 +SMBmknew_count: 0 +SMBmknew_time: 0 +SMBcheckpath_count: 0 +SMBcheckpath_time: 0 +SMBexit_count: 0 +SMBexit_time: 0 +SMBlseek_count: 0 +SMBlseek_time: 0 +SMBlockread_count: 0 +SMBlockread_time: 0 +SMBwriteunlock_count: 0 +SMBwriteunlock_time: 0 +SMBreadbraw_count: 0 +SMBreadbraw_time: 0 +SMBreadBmpx_count: 0 +SMBreadBmpx_time: 0 +SMBreadBs_count: 0 +SMBreadBs_time: 0 +SMBwritebraw_count: 0 +SMBwritebraw_time: 0 +SMBwriteBmpx_count: 0 +SMBwriteBmpx_time: 0 +SMBwriteBs_count: 0 +SMBwriteBs_time: 0 +SMBwritec_count: 0 +SMBwritec_time: 0 +SMBsetattrE_count: 0 +SMBsetattrE_time: 0 +SMBgetattrE_count: 0 +SMBgetattrE_time: 0 +SMBlockingX_count: 0 +SMBlockingX_time: 0 +SMBtrans_count: 0 +SMBtrans_time: 0 +SMBtranss_count: 0 +SMBtranss_time: 0 +SMBioctl_count: 0 +SMBioctl_time: 0 +SMBioctls_count: 0 +SMBioctls_time: 0 +SMBcopy_count: 0 +SMBcopy_time: 0 +SMBmove_count: 0 +SMBmove_time: 0 +SMBecho_count: 0 +SMBecho_time: 0 +SMBwriteclose_count: 0 +SMBwriteclose_time: 0 +SMBopenX_count: 0 +SMBopenX_time: 0 +SMBreadX_count: 0 +SMBreadX_time: 0 +SMBwriteX_count: 0 +SMBwriteX_time: 0 +SMBtrans2_count: 0 +SMBtrans2_time: 0 +SMBtranss2_count: 0 +SMBtranss2_time: 0 +SMBfindclose_count: 0 +SMBfindclose_time: 0 +SMBfindnclose_count: 0 +SMBfindnclose_time: 0 +SMBtcon_count: 0 +SMBtcon_time: 0 +SMBtdis_count: 0 +SMBtdis_time: 0 +SMBnegprot_count: 0 +SMBnegprot_time: 0 +SMBsesssetupX_count: 0 +SMBsesssetupX_time: 0 +SMBulogoffX_count: 0 +SMBulogoffX_time: 0 +SMBtconX_count: 0 +SMBtconX_time: 0 +SMBdskattr_count: 0 +SMBdskattr_time: 0 +SMBsearch_count: 0 +SMBsearch_time: 0 +SMBffirst_count: 0 +SMBffirst_time: 0 +SMBfunique_count: 0 +SMBfunique_time: 0 +SMBfclose_count: 0 +SMBfclose_time: 0 +SMBnttrans_count: 0 +SMBnttrans_time: 0 +SMBnttranss_count: 0 +SMBnttranss_time: 0 +SMBntcreateX_count: 0 +SMBntcreateX_time: 0 +SMBntcancel_count: 0 +SMBntcancel_time: 0 +SMBntrename_count: 0 +SMBntrename_time: 0 +SMBsplopen_count: 0 +SMBsplopen_time: 0 +SMBsplwr_count: 0 +SMBsplwr_time: 0 +SMBsplclose_count: 0 +SMBsplclose_time: 0 +SMBsplretq_count: 0 +SMBsplretq_time: 0 +SMBsends_count: 0 +SMBsends_time: 0 +SMBsendb_count: 0 +SMBsendb_time: 0 +SMBfwdname_count: 0 +SMBfwdname_time: 0 +SMBcancelf_count: 0 +SMBcancelf_time: 0 +SMBgetmac_count: 0 +SMBgetmac_time: 0 +SMBsendstrt_count: 0 +SMBsendstrt_time: 0 +SMBsendend_count: 0 +SMBsendend_time: 0 +SMBsendtxt_count: 0 +SMBsendtxt_time: 0 +SMBinvalid_count: 0 +SMBinvalid_time: 0 +**** Trans2 Calls ************************************************************* +Trans2_open_count: 0 +Trans2_open_time: 0 +Trans2_findfirst_count: 0 +Trans2_findfirst_time: 0 +Trans2_findnext_count: 0 +Trans2_findnext_time: 0 +Trans2_qfsinfo_count: 0 +Trans2_qfsinfo_time: 0 +Trans2_setfsinfo_count: 0 +Trans2_setfsinfo_time: 0 +Trans2_qpathinfo_count: 0 +Trans2_qpathinfo_time: 0 +Trans2_setpathinfo_count: 0 +Trans2_setpathinfo_time: 0 +Trans2_qfileinfo_count: 0 +Trans2_qfileinfo_time: 0 +Trans2_setfileinfo_count: 0 +Trans2_setfileinfo_time: 0 +Trans2_fsctl_count: 0 +Trans2_fsctl_time: 0 +Trans2_ioctl_count: 0 +Trans2_ioctl_time: 0 +Trans2_findnotifyfirst_count: 0 +Trans2_findnotifyfirst_time: 0 +Trans2_findnotifynext_count: 0 +Trans2_findnotifynext_time: 0 +Trans2_mkdir_count: 0 +Trans2_mkdir_time: 0 +Trans2_session_setup_count: 0 +Trans2_session_setup_time: 0 +Trans2_get_dfs_referral_count: 0 +Trans2_get_dfs_referral_time: 0 +Trans2_report_dfs_inconsistancy_count: 0 +Trans2_report_dfs_inconsistancy_time: 0 +**** NT Transact Calls ******************************************************** +NT_transact_create_count: 0 +NT_transact_create_time: 0 +NT_transact_ioctl_count: 0 +NT_transact_ioctl_time: 0 +NT_transact_set_security_desc_count: 0 +NT_transact_set_security_desc_time: 0 +NT_transact_notify_change_count: 0 +NT_transact_notify_change_time: 0 +NT_transact_rename_count: 0 +NT_transact_rename_time: 0 +NT_transact_query_security_desc_count: 0 +NT_transact_query_security_desc_time: 0 +NT_transact_get_user_quota_count: 0 +NT_transact_get_user_quota_time: 0 +NT_transact_set_user_quota_count: 0 +NT_transact_set_user_quota_time: 0 +**** SMB2 Calls *************************************************************** +smb2_negprot_count: 0 +smb2_negprot_time: 0 +smb2_negprot_idle: 0 +smb2_negprot_inbytes: 0 +smb2_negprot_outbytes: 0 +smb2_sesssetup_count: 0 +smb2_sesssetup_time: 0 +smb2_sesssetup_idle: 0 +smb2_sesssetup_inbytes: 0 +smb2_sesssetup_outbytes: 0 +smb2_logoff_count: 0 +smb2_logoff_time: 0 +smb2_logoff_idle: 0 +smb2_logoff_inbytes: 0 +smb2_logoff_outbytes: 0 +smb2_tcon_count: 0 +smb2_tcon_time: 0 +smb2_tcon_idle: 0 +smb2_tcon_inbytes: 0 +smb2_tcon_outbytes: 0 +smb2_tdis_count: 0 +smb2_tdis_time: 0 +smb2_tdis_idle: 0 +smb2_tdis_inbytes: 0 +smb2_tdis_outbytes: 0 +smb2_create_count: 0 +smb2_create_time: 0 +smb2_create_idle: 0 +smb2_create_inbytes: 0 +smb2_create_outbytes: 0 +smb2_close_count: 0 +smb2_close_time: 0 +smb2_close_idle: 0 +smb2_close_inbytes: 0 +smb2_close_outbytes: 0 +smb2_flush_count: 0 +smb2_flush_time: 0 +smb2_flush_idle: 0 +smb2_flush_inbytes: 0 +smb2_flush_outbytes: 0 +smb2_read_count: 0 +smb2_read_time: 0 +smb2_read_idle: 0 +smb2_read_inbytes: 0 +smb2_read_outbytes: 0 +smb2_write_count: 0 +smb2_write_time: 0 +smb2_write_idle: 0 +smb2_write_inbytes: 0 +smb2_write_outbytes: 0 +smb2_lock_count: 0 +smb2_lock_time: 0 +smb2_lock_idle: 0 +smb2_lock_inbytes: 0 +smb2_lock_outbytes: 0 +smb2_ioctl_count: 0 +smb2_ioctl_time: 0 +smb2_ioctl_idle: 0 +smb2_ioctl_inbytes: 0 +smb2_ioctl_outbytes: 0 +smb2_cancel_count: 0 +smb2_cancel_time: 0 +smb2_cancel_idle: 0 +smb2_cancel_inbytes: 0 +smb2_cancel_outbytes: 0 +smb2_keepalive_count: 0 +smb2_keepalive_time: 0 +smb2_keepalive_idle: 0 +smb2_keepalive_inbytes: 0 +smb2_keepalive_outbytes: 0 +smb2_find_count: 0 +smb2_find_time: 0 +smb2_find_idle: 0 +smb2_find_inbytes: 0 +smb2_find_outbytes: 0 +smb2_notify_count: 0 +smb2_notify_time: 0 +smb2_notify_idle: 0 +smb2_notify_inbytes: 0 +smb2_notify_outbytes: 0 +smb2_getinfo_count: 0 +smb2_getinfo_time: 0 +smb2_getinfo_idle: 0 +smb2_getinfo_inbytes: 0 +smb2_getinfo_outbytes: 0 +smb2_setinfo_count: 0 +smb2_setinfo_time: 0 +smb2_setinfo_idle: 0 +smb2_setinfo_inbytes: 0 +smb2_setinfo_outbytes: 0 +smb2_break_count: 0 +smb2_break_time: 0 +smb2_break_idle: 0 +smb2_break_inbytes: 0 +smb2_break_outbytes: 0 diff --git a/src/go/plugin/go.d/modules/scaleio/client/client.go b/src/go/plugin/go.d/modules/scaleio/client/client.go index 698b2d174..cc07580b8 100644 --- a/src/go/plugin/go.d/modules/scaleio/client/client.go +++ b/src/go/plugin/go.d/modules/scaleio/client/client.go @@ -74,7 +74,7 @@ Relationships: */ // New creates new ScaleIO client. -func New(client web.Client, request web.Request) (*Client, error) { +func New(client web.ClientConfig, request web.RequestConfig) (*Client, error) { httpClient, err := web.NewHTTPClient(client) if err != nil { return nil, err @@ -88,7 +88,7 @@ func New(client web.Client, request web.Request) (*Client, error) { // Client represents ScaleIO client. type Client struct { - Request web.Request + Request web.RequestConfig httpClient *http.Client token *token } @@ -105,7 +105,7 @@ func (c *Client) Login() error { } req := c.createLoginRequest() resp, err := c.doOK(req) - defer closeBody(resp) + defer web.CloseBody(resp) if err != nil { return err } @@ -128,7 +128,7 @@ func (c *Client) Logout() error { c.token.unset() resp, err := c.do(req) - defer closeBody(resp) + defer web.CloseBody(resp) return err } @@ -136,7 +136,7 @@ func (c *Client) Logout() error { func (c *Client) APIVersion() (Version, error) { req := c.createAPIVersionRequest() resp, err := c.doOK(req) - defer closeBody(resp) + defer web.CloseBody(resp) if err != nil { return Version{}, err } @@ -160,7 +160,7 @@ func (c *Client) Instances() (Instances, error) { return instances, err } -func (c *Client) createLoginRequest() web.Request { +func (c *Client) createLoginRequest() web.RequestConfig { req := c.Request.Copy() u, _ := url.Parse(req.URL) u.Path = path.Join(u.Path, "/api/login") @@ -168,7 +168,7 @@ func (c *Client) createLoginRequest() web.Request { return req } -func (c *Client) createLogoutRequest() web.Request { +func (c *Client) createLogoutRequest() web.RequestConfig { req := c.Request.Copy() u, _ := url.Parse(req.URL) u.Path = path.Join(u.Path, "/api/logout") @@ -177,7 +177,7 @@ func (c *Client) createLogoutRequest() web.Request { return req } -func (c *Client) createAPIVersionRequest() web.Request { +func (c *Client) createAPIVersionRequest() web.RequestConfig { req := c.Request.Copy() u, _ := url.Parse(req.URL) u.Path = path.Join(u.Path, "/api/version") @@ -186,7 +186,7 @@ func (c *Client) createAPIVersionRequest() web.Request { return req } -func (c *Client) createSelectedStatisticsRequest(query []byte) web.Request { +func (c *Client) createSelectedStatisticsRequest(query []byte) web.RequestConfig { req := c.Request.Copy() u, _ := url.Parse(req.URL) u.Path = path.Join(u.Path, "/api/instances/querySelectedStatistics") @@ -200,7 +200,7 @@ func (c *Client) createSelectedStatisticsRequest(query []byte) web.Request { return req } -func (c *Client) createInstancesRequest() web.Request { +func (c *Client) createInstancesRequest() web.RequestConfig { req := c.Request.Copy() u, _ := url.Parse(req.URL) u.Path = path.Join(u.Path, "/api/instances") @@ -209,7 +209,7 @@ func (c *Client) createInstancesRequest() web.Request { return req } -func (c *Client) do(req web.Request) (*http.Response, error) { +func (c *Client) do(req web.RequestConfig) (*http.Response, error) { httpReq, err := web.NewHTTPRequest(req) if err != nil { return nil, fmt.Errorf("error on creating http request to %s: %v", req.URL, err) @@ -217,7 +217,7 @@ func (c *Client) do(req web.Request) (*http.Response, error) { return c.httpClient.Do(httpReq) } -func (c *Client) doOK(req web.Request) (*http.Response, error) { +func (c *Client) doOK(req web.RequestConfig) (*http.Response, error) { resp, err := c.do(req) if err != nil { return nil, err @@ -228,7 +228,7 @@ func (c *Client) doOK(req web.Request) (*http.Response, error) { return resp, err } -func (c *Client) doOKWithRetry(req web.Request) (*http.Response, error) { +func (c *Client) doOKWithRetry(req web.RequestConfig) (*http.Response, error) { resp, err := c.do(req) if err != nil { return nil, err @@ -246,22 +246,15 @@ func (c *Client) doOKWithRetry(req web.Request) (*http.Response, error) { return resp, err } -func (c *Client) doJSONWithRetry(dst interface{}, req web.Request) error { +func (c *Client) doJSONWithRetry(dst any, req web.RequestConfig) error { resp, err := c.doOKWithRetry(req) - defer closeBody(resp) + defer web.CloseBody(resp) if err != nil { return err } return json.NewDecoder(resp.Body).Decode(dst) } -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} - func checkStatusCode(resp *http.Response) error { // For all 4xx and 5xx return codes, the body may contain an apiError // instance with more specifics about the failure. diff --git a/src/go/plugin/go.d/modules/scaleio/client/client_test.go b/src/go/plugin/go.d/modules/scaleio/client/client_test.go index 02e1988b0..c8da45fdf 100644 --- a/src/go/plugin/go.d/modules/scaleio/client/client_test.go +++ b/src/go/plugin/go.d/modules/scaleio/client/client_test.go @@ -13,7 +13,7 @@ import ( ) func TestNew(t *testing.T) { - _, err := New(web.Client{}, web.Request{}) + _, err := New(web.ClientConfig{}, web.RequestConfig{}) assert.NoError(t, err) } @@ -110,7 +110,7 @@ func prepareSrvClient(t *testing.T) (*httptest.Server, *Client) { Instances: testInstances, Statistics: testStatistics, }) - client, err := New(web.Client{}, web.Request{ + client, err := New(web.ClientConfig{}, web.RequestConfig{ URL: srv.URL, Username: testUser, Password: testPassword, diff --git a/src/go/plugin/go.d/modules/scaleio/config_schema.json b/src/go/plugin/go.d/modules/scaleio/config_schema.json index 97aea7faf..bfd14d7d4 100644 --- a/src/go/plugin/go.d/modules/scaleio/config_schema.json +++ b/src/go/plugin/go.d/modules/scaleio/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/scaleio/integrations/dell_emc_scaleio.md b/src/go/plugin/go.d/modules/scaleio/integrations/dell_emc_scaleio.md index 36d022526..bee56ca27 100644 --- a/src/go/plugin/go.d/modules/scaleio/integrations/dell_emc_scaleio.md +++ b/src/go/plugin/go.d/modules/scaleio/integrations/dell_emc_scaleio.md @@ -143,8 +143,8 @@ No action required. The configuration file name for this integration is `go.d/scaleio.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/scaleio/scaleio.go b/src/go/plugin/go.d/modules/scaleio/scaleio.go index d32ccbffe..98bc110c3 100644 --- a/src/go/plugin/go.d/modules/scaleio/scaleio.go +++ b/src/go/plugin/go.d/modules/scaleio/scaleio.go @@ -5,10 +5,12 @@ package scaleio import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/scaleio/client" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *ScaleIO { return &ScaleIO{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "https://127.0.0.1", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -41,8 +43,8 @@ func New() *ScaleIO { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type ( @@ -71,14 +73,12 @@ func (s *ScaleIO) Configuration() any { func (s *ScaleIO) Init() error { if s.Username == "" || s.Password == "" { - s.Error("username and password aren't set") - return errors.New("username and password aren't set") + return errors.New("config: username and password aren't set") } - c, err := client.New(s.Client, s.Request) + c, err := client.New(s.ClientConfig, s.RequestConfig) if err != nil { - s.Errorf("error on creating ScaleIO client: %v", err) - return err + return fmt.Errorf("error on creating ScaleIO client: %v", err) } s.client = c @@ -90,12 +90,10 @@ func (s *ScaleIO) Init() error { func (s *ScaleIO) Check() error { if err := s.client.Login(); err != nil { - s.Error(err) return err } mx, err := s.collect() if err != nil { - s.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/scaleio/scaleio_test.go b/src/go/plugin/go.d/modules/scaleio/scaleio_test.go index bb906333e..66752ebc8 100644 --- a/src/go/plugin/go.d/modules/scaleio/scaleio_test.go +++ b/src/go/plugin/go.d/modules/scaleio/scaleio_test.go @@ -53,7 +53,7 @@ func TestScaleIO_Init_ErrorOnCreatingClientWrongTLSCA(t *testing.T) { job := New() job.Username = "username" job.Password = "password" - job.Client.TLSConfig.TLSCA = "testdata/tls" + job.ClientConfig.TLSConfig.TLSCA = "testdata/tls" assert.Error(t, job.Init()) } @@ -298,9 +298,11 @@ func TestScaleIO_Collect(t *testing.T) { "system_total_iops_write": 617200, } - collected := scaleIO.Collect() - assert.Equal(t, expected, collected) - testCharts(t, scaleIO, collected) + mx := scaleIO.Collect() + + assert.Equal(t, expected, mx) + + testCharts(t, scaleIO, mx) } func TestScaleIO_Collect_ConnectionRefused(t *testing.T) { @@ -317,7 +319,7 @@ func testCharts(t *testing.T, scaleIO *ScaleIO, collected map[string]int64) { t.Helper() ensureStoragePoolChartsAreCreated(t, scaleIO) ensureSdcChartsAreCreated(t, scaleIO) - ensureCollectedHasAllChartsDimsVarsIDs(t, scaleIO, collected) + module.TestMetricsHasAllChartsDims(t, scaleIO.Charts(), collected) } func ensureStoragePoolChartsAreCreated(t *testing.T, scaleIO *ScaleIO) { @@ -336,19 +338,6 @@ func ensureSdcChartsAreCreated(t *testing.T, scaleIO *ScaleIO) { } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, scaleIO *ScaleIO, collected map[string]int64) { - for _, chart := range *scaleIO.Charts() { - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareSrvMockScaleIO(t *testing.T) (*httptest.Server, *client.MockScaleIOAPIServer, *ScaleIO) { t.Helper() const ( diff --git a/src/go/plugin/go.d/modules/sensors/README.md b/src/go/plugin/go.d/modules/sensors/README.md index 4e92b0882..c22195a32 120000 --- a/src/go/plugin/go.d/modules/sensors/README.md +++ b/src/go/plugin/go.d/modules/sensors/README.md @@ -1 +1 @@ -integrations/linux_sensors_lm-sensors.md \ No newline at end of file +integrations/linux_sensors.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/sensors/charts.go b/src/go/plugin/go.d/modules/sensors/charts.go index 05081e1ad..bfb17bfd4 100644 --- a/src/go/plugin/go.d/modules/sensors/charts.go +++ b/src/go/plugin/go.d/modules/sensors/charts.go @@ -1,154 +1,519 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package sensors import ( "fmt" "strings" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/sensors/lmsensors" ) const ( - prioSensorTemperature = module.Priority + iota - prioSensorVoltage - prioSensorCurrent - prioSensorPower - prioSensorFan - prioSensorEnergy - prioSensorHumidity + prioTemperatureSensorInput = module.Priority + iota + prioTemperatureSensorAlarm + + prioVoltageSensorInput + prioVoltageSensorAverage + prioVoltageSensorAlarm + + prioFanSensorInput + prioFanSensorAlarm + + prioCurrentSensorInput + prioCurrentSensorAverage + prioCurrentSensorAlarm + + prioPowerSensorInput + prioPowerSensorAverage + prioPowerSensorAlarm + + prioEnergySensorInput + + prioHumiditySensorInput + + prioIntrusionSensorAlarm ) -var sensorTemperatureChartTmpl = module.Chart{ - ID: "sensor_chip_%s_feature_%s_subfeature_%s_temperature", - Title: "Sensor temperature", - Units: "Celsius", - Fam: "temperature", - Ctx: "sensors.sensor_temperature", - Type: module.Line, - Priority: prioSensorTemperature, - Dims: module.Dims{ - {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "temperature", Div: precision}, - }, -} - -var sensorVoltageChartTmpl = module.Chart{ - ID: "sensor_chip_%s_feature_%s_subfeature_%s_voltage", - Title: "Sensor voltage", - Units: "Volts", - Fam: "voltage", - Ctx: "sensors.sensor_voltage", - Type: module.Line, - Priority: prioSensorVoltage, - Dims: module.Dims{ - {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "voltage", Div: precision}, - }, -} - -var sensorCurrentChartTmpl = module.Chart{ - ID: "sensor_chip_%s_feature_%s_subfeature_%s_current", - Title: "Sensor current", - Units: "Amperes", - Fam: "current", - Ctx: "sensors.sensor_current", - Type: module.Line, - Priority: prioSensorCurrent, - Dims: module.Dims{ - {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "current", Div: precision}, - }, -} - -var sensorPowerChartTmpl = module.Chart{ - ID: "sensor_chip_%s_feature_%s_subfeature_%s_power", - Title: "Sensor power", - Units: "Watts", - Fam: "power", - Ctx: "sensors.sensor_power", - Type: module.Line, - Priority: prioSensorPower, - Dims: module.Dims{ - {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "power", Div: precision}, - }, -} - -var sensorFanChartTmpl = module.Chart{ - ID: "sensor_chip_%s_feature_%s_subfeature_%s_fan", - Title: "Sensor fan speed", - Units: "RPM", - Fam: "fan", - Ctx: "sensors.sensor_fan_speed", - Type: module.Line, - Priority: prioSensorFan, - Dims: module.Dims{ - {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "fan", Div: precision}, - }, -} - -var sensorEnergyChartTmpl = module.Chart{ - ID: "sensor_chip_%s_feature_%s_subfeature_%s_energy", - Title: "Sensor energy", - Units: "Joules", - Fam: "energy", - Ctx: "sensors.sensor_energy", - Type: module.Line, - Priority: prioSensorEnergy, - Dims: module.Dims{ - {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "energy", Div: precision}, - }, -} - -var sensorHumidityChartTmpl = module.Chart{ - ID: "sensor_chip_%s_feature_%s_subfeature_%s_humidity", - Title: "Sensor humidity", - Units: "percent", - Fam: "humidity", - Ctx: "sensors.sensor_humidity", - Type: module.Area, - Priority: prioSensorHumidity, - Dims: module.Dims{ - {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "humidity", Div: precision}, - }, -} - -func (s *Sensors) addSensorChart(sn sensorStats) { - var chart *module.Chart - - switch sensorType(sn) { - case sensorTypeTemp: - chart = sensorTemperatureChartTmpl.Copy() - case sensorTypeVoltage: - chart = sensorVoltageChartTmpl.Copy() - case sensorTypePower: - chart = sensorPowerChartTmpl.Copy() - case sensorTypeHumidity: - chart = sensorHumidityChartTmpl.Copy() - case sensorTypeFan: - chart = sensorFanChartTmpl.Copy() - case sensorTypeCurrent: - chart = sensorCurrentChartTmpl.Copy() - case sensorTypeEnergy: - chart = sensorEnergyChartTmpl.Copy() - default: - return +var temperatureSensorChartsTmpl = module.Charts{ + temperatureSensorInputChartTmpl.Copy(), + temperatureSensorAlarmChartTmpl.Copy(), +} + +var voltageSensorChartsTmpl = module.Charts{ + voltageSensorInputChartTmpl.Copy(), + voltageSensorAverageChartTmpl.Copy(), + voltageSensorAlarmChartTmpl.Copy(), +} + +var fanSensorChartsTmpl = module.Charts{ + fanSensorInputChartTmpl.Copy(), + fanSensorAlarmChartTmpl.Copy(), +} + +var currentSensorChartsTmpl = module.Charts{ + currentSensorInputChartTmpl.Copy(), + currentSensorAverageChartTmpl.Copy(), + currentSensorAlarmChartTmpl.Copy(), +} + +var powerSensorChartsTmpl = module.Charts{ + powerSensorInputChartTmpl.Copy(), + powerSensorAverageChartTmpl.Copy(), + powerSensorAlarmChartTmpl.Copy(), +} + +var energySensorChartsTmpl = module.Charts{ + energySensorInputChartTmpl.Copy(), +} + +var humiditySensorChartsTmpl = module.Charts{ + humiditySensorInputChartTmpl.Copy(), +} + +var intrusionSensorChartsTmpl = module.Charts{ + intrusionSensorAlarmChartTmpl.Copy(), +} + +var ( + temperatureSensorInputChartTmpl = module.Chart{ + ID: "%s_%s_temperature", + Title: "Sensor Temperature", + Units: "Celsius", + Fam: "temperature", + Ctx: "sensors.chip_sensor_temperature", + Type: module.Line, + Priority: prioTemperatureSensorInput, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_input", Name: "input", Div: precision}, + }, + } + temperatureSensorAlarmChartTmpl = module.Chart{ + ID: "%s_%s_temperature_alarm", + Title: "Temperature Sensor Alarm", + Units: "status", + Fam: "temperature", + Ctx: "sensors.chip_sensor_temperature_alarm", + Type: module.Line, + Priority: prioTemperatureSensorAlarm, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_alarm_clear", Name: "clear"}, + {ID: "chip_%s_sensor_%s_alarm_triggered", Name: "triggered"}, + }, + } +) + +var ( + voltageSensorInputChartTmpl = module.Chart{ + ID: "%s_%s_voltage", + Title: "Sensor Voltage", + Units: "Volts", + Fam: "voltage", + Ctx: "sensors.chip_sensor_voltage", + Type: module.Line, + Priority: prioVoltageSensorInput, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_input", Name: "input", Div: precision}, + }, + } + voltageSensorAverageChartTmpl = module.Chart{ + ID: "%s_%s_voltage_average", + Title: "Sensor Voltage Average", + Units: "Volts", + Fam: "voltage", + Ctx: "sensors.chip_sensor_voltage_average", + Type: module.Line, + Priority: prioVoltageSensorAverage, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_average", Name: "average", Div: precision}, + }, + } + voltageSensorAlarmChartTmpl = module.Chart{ + ID: "%s_%s_voltage_alarm", + Title: "Voltage Sensor Alarm", + Units: "status", + Fam: "voltage", + Ctx: "sensors.chip_sensor_voltage_alarm", + Type: module.Line, + Priority: prioVoltageSensorAlarm, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_alarm_clear", Name: "clear"}, + {ID: "chip_%s_sensor_%s_alarm_triggered", Name: "triggered"}, + }, + } +) + +var ( + fanSensorInputChartTmpl = module.Chart{ + ID: "%s_%s_fan", + Title: "Sensor Fan", + Units: "RPM", + Fam: "fan", + Ctx: "sensors.chip_sensor_fan", + Type: module.Line, + Priority: prioFanSensorInput, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_input", Name: "input", Div: precision}, + }, + } + fanSensorAlarmChartTmpl = module.Chart{ + ID: "%s_%s_fan_alarm", + Title: "Fan Sensor Alarm", + Units: "status", + Fam: "fan", + Ctx: "sensors.chip_sensor_fan_alarm", + Type: module.Line, + Priority: prioFanSensorAlarm, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_alarm_clear", Name: "clear"}, + {ID: "chip_%s_sensor_%s_alarm_triggered", Name: "triggered"}, + }, + } +) + +var ( + currentSensorInputChartTmpl = module.Chart{ + ID: "%s_%s_current", + Title: "Sensor Current", + Units: "Amperes", + Fam: "current", + Ctx: "sensors.chip_sensor_current", + Type: module.Line, + Priority: prioCurrentSensorInput, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_input", Name: "input", Div: precision}, + }, + } + currentSensorAverageChartTmpl = module.Chart{ + ID: "%s_%s_current_average", + Title: "Sensor Current Average", + Units: "Amperes", + Fam: "current", + Ctx: "sensors.chip_sensor_current_average", + Type: module.Line, + Priority: prioCurrentSensorAverage, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_average", Name: "average", Div: precision}, + }, + } + currentSensorAlarmChartTmpl = module.Chart{ + ID: "%s_%s_current_alarm", + Title: "Sensor Alarm", + Units: "status", + Fam: "current", + Ctx: "sensors.chip_sensor_current_alarm", + Type: module.Line, + Priority: prioCurrentSensorAlarm, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_alarm_clear", Name: "clear"}, + {ID: "chip_%s_sensor_%s_alarm_triggered", Name: "triggered"}, + }, + } +) + +var ( + powerSensorInputChartTmpl = module.Chart{ + ID: "%s_%s_power", + Title: "Sensor Power", + Units: "Watts", + Fam: "power", + Ctx: "sensors.chip_sensor_power", + Type: module.Line, + Priority: prioPowerSensorInput, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_input", Name: "input", Div: precision}, + }, + } + powerSensorAverageChartTmpl = module.Chart{ + ID: "%s_%s_power_average", + Title: "Sensor Power Average", + Units: "Watts", + Fam: "power", + Ctx: "sensors.chip_sensor_power_average", + Type: module.Line, + Priority: prioPowerSensorAverage, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_average", Name: "average", Div: precision}, + }, + } + powerSensorAlarmChartTmpl = module.Chart{ + ID: "%s_%s_power_alarm", + Title: "Power Sensor Alarm", + Units: "status", + Fam: "current", + Ctx: "sensors.chip_sensor_power_alarm", + Type: module.Line, + Priority: prioPowerSensorAlarm, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_alarm_clear", Name: "clear"}, + {ID: "chip_%s_sensor_%s_alarm_triggered", Name: "triggered"}, + }, + } +) + +var ( + energySensorInputChartTmpl = module.Chart{ + ID: "%s_%s_energy", + Title: "Sensor Energy", + Units: "Joules", + Fam: "energy", + Ctx: "sensors.chip_sensor_energy", + Type: module.Line, + Priority: prioEnergySensorInput, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_input", Name: "input", Div: precision}, + }, + } +) + +var ( + humiditySensorInputChartTmpl = module.Chart{ + ID: "%s_%s_humidity", + Title: "Sensor Humidity", + Units: "percent", + Fam: "humidity", + Ctx: "sensors.chip_sensor_humidity", + Type: module.Line, + Priority: prioHumiditySensorInput, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_input", Name: "input", Div: precision}, + }, + } +) + +var ( + intrusionSensorAlarmChartTmpl = module.Chart{ + ID: "%s_%s_intrusion_alarm", + Title: "Sensor Intrusion Alarm", + Units: "status", + Fam: "intrusion", + Ctx: "sensors.chip_sensor_intrusion_alarm", + Type: module.Line, + Priority: prioIntrusionSensorAlarm, + Dims: module.Dims{ + {ID: "chip_%s_sensor_%s_alarm_clear", Name: "clear"}, + {ID: "chip_%s_sensor_%s_alarm_triggered", Name: "triggered"}, + }, + } +) + +func (s *Sensors) updateCharts(chips []*lmsensors.Chip) { + seen := make(map[string]bool) + + for _, chip := range chips { + for _, sn := range chip.Sensors.Voltage { + key := chip.UniqueName + "_" + sn.Name + seen[key] = true + if !s.seenSensors[key] { + s.seenSensors[key] = true + s.addVoltageCharts(chip, sn) + } + } + for _, sn := range chip.Sensors.Fan { + key := chip.UniqueName + "_" + sn.Name + seen[key] = true + if !s.seenSensors[key] { + s.seenSensors[key] = true + s.addFanCharts(chip, sn) + } + } + for _, sn := range chip.Sensors.Temperature { + key := chip.UniqueName + "_" + sn.Name + seen[key] = true + if !s.seenSensors[key] { + s.seenSensors[key] = true + s.addTemperatureCharts(chip, sn) + } + } + for _, sn := range chip.Sensors.Current { + key := chip.UniqueName + "_" + sn.Name + seen[key] = true + if !s.seenSensors[key] { + s.seenSensors[key] = true + s.addCurrentCharts(chip, sn) + } + } + for _, sn := range chip.Sensors.Power { + key := chip.UniqueName + "_" + sn.Name + seen[key] = true + if !s.seenSensors[key] { + s.seenSensors[key] = true + s.addPowerCharts(chip, sn) + } + } + for _, sn := range chip.Sensors.Energy { + key := chip.UniqueName + "_" + sn.Name + seen[key] = true + if !s.seenSensors[key] { + s.seenSensors[key] = true + s.addEnergyCharts(chip, sn) + } + } + for _, sn := range chip.Sensors.Humidity { + key := chip.UniqueName + "_" + sn.Name + seen[key] = true + if !s.seenSensors[key] { + s.seenSensors[key] = true + s.addHumidityCharts(chip, sn) + } + } + for _, sn := range chip.Sensors.Intrusion { + key := chip.UniqueName + "_" + sn.Name + seen[key] = true + if !s.seenSensors[key] { + s.seenSensors[key] = true + s.addIntrusionCharts(chip, sn) + } + } + } + + for key := range s.seenSensors { + if !seen[key] { + delete(s.seenSensors, key) + s.removeSensorChart(key) + } + } +} + +func (s *Sensors) addTemperatureCharts(chip *lmsensors.Chip, sn *lmsensors.TemperatureSensor) { + charts := temperatureSensorChartsTmpl.Copy() + + if sn.Input == nil { + _ = charts.Remove(temperatureSensorInputChartTmpl.ID) + } + if sn.Alarm == nil { + _ = charts.Remove(temperatureSensorAlarmChartTmpl.ID) + } + + s.addCharts(charts, chip.UniqueName, chip.SysDevice, sn.Name, sn.Label) +} + +func (s *Sensors) addVoltageCharts(chip *lmsensors.Chip, sn *lmsensors.VoltageSensor) { + charts := voltageSensorChartsTmpl.Copy() + + if sn.Input == nil { + _ = charts.Remove(voltageSensorInputChartTmpl.ID) + } + if sn.Average == nil { + _ = charts.Remove(voltageSensorAverageChartTmpl.ID) + } + if sn.Alarm == nil { + _ = charts.Remove(voltageSensorAlarmChartTmpl.ID) + } + + s.addCharts(charts, chip.UniqueName, chip.SysDevice, sn.Name, sn.Label) +} + +func (s *Sensors) addFanCharts(chip *lmsensors.Chip, sn *lmsensors.FanSensor) { + charts := fanSensorChartsTmpl.Copy() + + if sn.Input == nil { + _ = charts.Remove(fanSensorInputChartTmpl.ID) + } + if sn.Alarm == nil { + _ = charts.Remove(fanSensorAlarmChartTmpl.ID) + } + + s.addCharts(charts, chip.UniqueName, chip.SysDevice, sn.Name, sn.Label) +} + +func (s *Sensors) addCurrentCharts(chip *lmsensors.Chip, sn *lmsensors.CurrentSensor) { + charts := currentSensorChartsTmpl.Copy() + + if sn.Input == nil { + _ = charts.Remove(currentSensorInputChartTmpl.ID) + } + if sn.Average == nil { + _ = charts.Remove(currentSensorAverageChartTmpl.ID) + } + if sn.Alarm == nil { + _ = charts.Remove(currentSensorAlarmChartTmpl.ID) + } + + s.addCharts(charts, chip.UniqueName, chip.SysDevice, sn.Name, sn.Label) +} + +func (s *Sensors) addPowerCharts(chip *lmsensors.Chip, sn *lmsensors.PowerSensor) { + charts := powerSensorChartsTmpl.Copy() + + if sn.Input == nil { + _ = charts.Remove(powerSensorInputChartTmpl.ID) + } + if sn.Average == nil { + _ = charts.Remove(powerSensorAverageChartTmpl.ID) + } + if sn.Alarm == nil { + _ = charts.Remove(powerSensorAlarmChartTmpl.ID) + } + + s.addCharts(charts, chip.UniqueName, chip.SysDevice, sn.Name, sn.Label) +} + +func (s *Sensors) addEnergyCharts(chip *lmsensors.Chip, sn *lmsensors.EnergySensor) { + charts := energySensorChartsTmpl.Copy() + + if sn.Input == nil { + _ = charts.Remove(energySensorInputChartTmpl.ID) + } + + s.addCharts(charts, chip.UniqueName, chip.SysDevice, sn.Name, sn.Label) +} + +func (s *Sensors) addHumidityCharts(chip *lmsensors.Chip, sn *lmsensors.HumiditySensor) { + charts := humiditySensorChartsTmpl.Copy() + + if sn.Input == nil { + _ = charts.Remove(humiditySensorInputChartTmpl.ID) } - chip, feat, subfeat := snakeCase(sn.chip), snakeCase(sn.feature), snakeCase(sn.subfeature) + s.addCharts(charts, chip.UniqueName, chip.SysDevice, sn.Name, sn.Label) +} + +func (s *Sensors) addIntrusionCharts(chip *lmsensors.Chip, sn *lmsensors.IntrusionSensor) { + charts := intrusionSensorChartsTmpl.Copy() + + if sn.Alarm == nil { + _ = charts.Remove(intrusionSensorAlarmChartTmpl.ID) + } + + s.addCharts(charts, chip.UniqueName, chip.SysDevice, sn.Name, sn.Label) +} + +func (s *Sensors) addCharts(charts *module.Charts, chipUniqueName, chipSysDevice, snName, snLabel string) { + if len(*charts) == 0 { + return + } - chart.ID = fmt.Sprintf(chart.ID, chip, feat, subfeat) - chart.Labels = []module.Label{ - {Key: "chip", Value: sn.chip}, - {Key: "feature", Value: sn.feature}, + if lbl := s.relabel(chipUniqueName, snName); lbl != "" { + snLabel = lbl } - for _, dim := range chart.Dims { - dim.ID = fmt.Sprintf(dim.ID, chip, feat, subfeat) + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, chipUniqueName, snName) + chart.ID = cleanChartId(chart.ID) + chart.Labels = []module.Label{ + {Key: "chip", Value: chipSysDevice}, + {Key: "chip_id", Value: chipUniqueName}, + {Key: "sensor", Value: snName}, + {Key: "label", Value: snLabel}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, chipUniqueName, snName) + } } - if err := s.Charts().Add(chart); err != nil { + if err := s.Charts().Add(*charts...); err != nil { s.Warning(err) } } func (s *Sensors) removeSensorChart(px string) { + px = cleanChartId(px) + for _, chart := range *s.Charts() { if strings.HasPrefix(chart.ID, px) { chart.MarkRemove() @@ -157,3 +522,33 @@ func (s *Sensors) removeSensorChart(px string) { } } } + +func (s *Sensors) relabel(chipUniqueName, snName string) string { + for _, rv := range s.Relabel { + if rv.Chip == "" { + return "" + } + + mr, err := matcher.NewSimplePatternsMatcher(rv.Chip) + if err != nil { + s.Debugf("failed to create simple pattern matcher from '%s': %v", rv.Chip, err) + return "" + } + + if !mr.MatchString(chipUniqueName) { + return "" + } + + for _, sv := range rv.Sensors { + if sv.Name == snName { + return sv.Label + } + } + } + return "" +} + +func cleanChartId(id string) string { + r := strings.NewReplacer(" ", "_", ".", "_") + return strings.ToLower(r.Replace(id)) +} diff --git a/src/go/plugin/go.d/modules/sensors/collect.go b/src/go/plugin/go.d/modules/sensors/collect.go index 46e900ad0..e8181a775 100644 --- a/src/go/plugin/go.d/modules/sensors/collect.go +++ b/src/go/plugin/go.d/modules/sensors/collect.go @@ -1,179 +1,184 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package sensors import ( - "bufio" - "bytes" "errors" "fmt" - "strconv" - "strings" -) - -type sensorStats struct { - chip string - feature string - subfeature string - value string -} - -func (s *sensorStats) String() string { - return fmt.Sprintf("chip:%s feat:%s subfeat:%s value:%s", s.chip, s.feature, s.subfeature, s.value) -} -const ( - sensorTypeTemp = "temperature" - sensorTypeVoltage = "voltage" - sensorTypePower = "power" - sensorTypeHumidity = "humidity" - sensorTypeFan = "fan" - sensorTypeCurrent = "current" - sensorTypeEnergy = "energy" + "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/sensors/lmsensors" ) const precision = 1000 func (s *Sensors) collect() (map[string]int64, error) { - bs, err := s.exec.sensorsInfo() - if err != nil { - return nil, err + if s.sc == nil { + return nil, errors.New("sysfs scanner is not initialized") } - if len(bs) == 0 { - return nil, errors.New("empty response from sensors") - } - - sensors, err := parseSensors(bs) + chips, err := s.sc.Scan() if err != nil { return nil, err } - if len(sensors) == 0 { - return nil, errors.New("no sensors found") + + if len(chips) == 0 { + return nil, errors.New("no chips found on the system") } mx := make(map[string]int64) - seen := make(map[string]bool) - for _, sn := range sensors { - // TODO: Most likely we need different values depending on the type of sensor. - if !strings.HasSuffix(sn.subfeature, "_input") { - s.Debugf("skipping non input sensor: '%s'", sn) - continue + for _, chip := range chips { + for _, sn := range chip.Sensors.Voltage { + writeVoltage(mx, chip, sn) } - - v, err := strconv.ParseFloat(sn.value, 64) - if err != nil { - s.Debugf("parsing value for sensor '%s': %v", sn, err) - continue + for _, sn := range chip.Sensors.Fan { + writeFan(mx, chip, sn) } - - if sensorType(sn) == "" { - s.Debugf("can not find type for sensor '%s'", sn) - continue + for _, sn := range chip.Sensors.Temperature { + writeTemperature(mx, chip, sn) } - - if minVal, maxVal, ok := sensorLimits(sn); ok && (v < minVal || v > maxVal) { - s.Debugf("value outside limits [%d/%d] for sensor '%s'", int64(minVal), int64(maxVal), sn) - continue + for _, sn := range chip.Sensors.Current { + writeCurrent(mx, chip, sn) } - - key := fmt.Sprintf("sensor_chip_%s_feature_%s_subfeature_%s", sn.chip, sn.feature, sn.subfeature) - key = snakeCase(key) - if !s.sensors[key] { - s.sensors[key] = true - s.addSensorChart(sn) + for _, sn := range chip.Sensors.Power { + writePower(mx, chip, sn) } - - seen[key] = true - - mx[key] = int64(v * precision) - } - - for k := range s.sensors { - if !seen[k] { - delete(s.sensors, k) - s.removeSensorChart(k) + for _, sn := range chip.Sensors.Energy { + writeEnergy(mx, chip, sn) + } + for _, sn := range chip.Sensors.Humidity { + writeHumidity(mx, chip, sn) + } + for _, sn := range chip.Sensors.Intrusion { + writeIntrusion(mx, chip, sn) } } + s.updateCharts(chips) + return mx, nil } -func snakeCase(n string) string { - return strings.ToLower(strings.ReplaceAll(n, " ", "_")) +func writeVoltage(mx map[string]int64, chip *lmsensors.Chip, sn *lmsensors.VoltageSensor) { + px := sensorPrefix(chip.UniqueName, sn.Name) + + mx[px+"read_time"] = sn.ReadTime.Milliseconds() + writeMetricAlarm(mx, px, sn.Alarm) + writeMetric(mx, px+"input", sn.Input) + writeMetric(mx, px+"average", sn.Average) + writeMetric(mx, px+"min", sn.Min) + writeMetric(mx, px+"max", sn.Max) + writeMetric(mx, px+"lcrit", sn.CritMin) + writeMetric(mx, px+"crit", sn.CritMax) + writeMetric(mx, px+"lowest", sn.Lowest) + writeMetric(mx, px+"highest", sn.Highest) } -func sensorLimits(sn sensorStats) (minVal float64, maxVal float64, ok bool) { - switch sensorType(sn) { - case sensorTypeTemp: - return -127, 1000, true - case sensorTypeVoltage: - return -400, 400, true - case sensorTypeCurrent: - return -127, 127, true - case sensorTypeFan: - return 0, 65535, true - default: - return 0, 0, false - } +func writeFan(mx map[string]int64, chip *lmsensors.Chip, sn *lmsensors.FanSensor) { + px := sensorPrefix(chip.UniqueName, sn.Name) + + mx[px+"read_time"] = sn.ReadTime.Milliseconds() + writeMetricAlarm(mx, px, sn.Alarm) + writeMetric(mx, px+"input", sn.Input) + writeMetric(mx, px+"min", sn.Min) + writeMetric(mx, px+"max", sn.Max) + writeMetric(mx, px+"target", sn.Target) } -func sensorType(sn sensorStats) string { - switch { - case strings.HasPrefix(sn.subfeature, "temp"): - return sensorTypeTemp - case strings.HasPrefix(sn.subfeature, "in"): - return sensorTypeVoltage - case strings.HasPrefix(sn.subfeature, "power"): - return sensorTypePower - case strings.HasPrefix(sn.subfeature, "humidity"): - return sensorTypeHumidity - case strings.HasPrefix(sn.subfeature, "fan"): - return sensorTypeFan - case strings.HasPrefix(sn.subfeature, "curr"): - return sensorTypeCurrent - case strings.HasPrefix(sn.subfeature, "energy"): - return sensorTypeEnergy - default: - return "" - } +func writeTemperature(mx map[string]int64, chip *lmsensors.Chip, sn *lmsensors.TemperatureSensor) { + px := sensorPrefix(chip.UniqueName, sn.Name) + + mx[px+"read_time"] = sn.ReadTime.Milliseconds() + writeMetricAlarm(mx, px, sn.Alarm) + writeMetric(mx, px+"input", sn.Input) + writeMetric(mx, px+"min", sn.Min) + writeMetric(mx, px+"max", sn.Max) + writeMetric(mx, px+"lcrit", sn.CritMin) + writeMetric(mx, px+"crit", sn.CritMax) + writeMetric(mx, px+"emergency", sn.Emergency) + writeMetric(mx, px+"lowest", sn.Lowest) + writeMetric(mx, px+"highest", sn.Highest) } -func parseSensors(output []byte) ([]sensorStats, error) { - var sensors []sensorStats +func writeCurrent(mx map[string]int64, chip *lmsensors.Chip, sn *lmsensors.CurrentSensor) { + px := sensorPrefix(chip.UniqueName, sn.Name) + + mx[px+"read_time"] = sn.ReadTime.Milliseconds() + writeMetricAlarm(mx, px, sn.Alarm) + writeMetric(mx, px+"max", sn.Max) + writeMetric(mx, px+"min", sn.Min) + writeMetric(mx, px+"lcrit", sn.CritMin) + writeMetric(mx, px+"crit", sn.CritMax) + writeMetric(mx, px+"input", sn.Input) + writeMetric(mx, px+"average", sn.Average) + writeMetric(mx, px+"lowest", sn.Lowest) + writeMetric(mx, px+"highest", sn.Highest) +} - sc := bufio.NewScanner(bytes.NewReader(output)) +func writePower(mx map[string]int64, chip *lmsensors.Chip, sn *lmsensors.PowerSensor) { + px := sensorPrefix(chip.UniqueName, sn.Name) + + mx[px+"read_time"] = sn.ReadTime.Milliseconds() + writeMetricAlarm(mx, px, sn.Alarm) + writeMetric(mx, px+"average", sn.Average) + writeMetric(mx, px+"average_highest", sn.AverageHighest) + writeMetric(mx, px+"average_lowest", sn.AverageLowest) + writeMetric(mx, px+"average_max", sn.AverageMax) + writeMetric(mx, px+"average_min", sn.AverageMin) + writeMetric(mx, px+"input", sn.Input) + writeMetric(mx, px+"input_highest", sn.InputHighest) + writeMetric(mx, px+"input_lowest", sn.InputLowest) + writeMetric(mx, px+"accuracy", sn.Accuracy) + writeMetric(mx, px+"cap", sn.Cap) + writeMetric(mx, px+"cap_max", sn.CapMax) + writeMetric(mx, px+"cap_min", sn.CapMin) + writeMetric(mx, px+"max", sn.Max) + writeMetric(mx, px+"crit", sn.CritMax) +} - var chip, feat string +func writeEnergy(mx map[string]int64, chip *lmsensors.Chip, sn *lmsensors.EnergySensor) { + px := sensorPrefix(chip.UniqueName, sn.Name) - for sc.Scan() { - text := sc.Text() - if text == "" { - chip, feat = "", "" - continue - } + mx[px+"read_time"] = sn.ReadTime.Milliseconds() + writeMetric(mx, px+"input", sn.Input) +} - switch { - case strings.HasPrefix(text, " ") && chip != "" && feat != "": - parts := strings.Split(text, ":") - if len(parts) != 2 { - continue - } - subfeat, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) - sensors = append(sensors, sensorStats{ - chip: chip, - feature: feat, - subfeature: subfeat, - value: value, - }) - case strings.HasSuffix(text, ":") && chip != "": - feat = strings.TrimSpace(strings.TrimSuffix(text, ":")) - default: - chip = text - feat = "" - } +func writeHumidity(mx map[string]int64, chip *lmsensors.Chip, sn *lmsensors.HumiditySensor) { + px := sensorPrefix(chip.UniqueName, sn.Name) + + mx[px+"read_time"] = sn.ReadTime.Milliseconds() + writeMetric(mx, px+"input", sn.Input) +} + +func writeIntrusion(mx map[string]int64, chip *lmsensors.Chip, sn *lmsensors.IntrusionSensor) { + px := sensorPrefix(chip.UniqueName, sn.Name) + + mx[px+"read_time"] = sn.ReadTime.Milliseconds() + writeMetricAlarm(mx, px, sn.Alarm) +} + +func writeMetric(mx map[string]int64, key string, value *float64) { + if value != nil { + mx[key] = int64(*value * precision) + } +} + +func writeMetricAlarm(mx map[string]int64, px string, value *bool) { + if value != nil { + mx[px+"alarm_clear"] = boolToInt(!*value) + mx[px+"alarm_triggered"] = boolToInt(*value) } +} - return sensors, nil +func sensorPrefix(chip, sensor string) string { + return fmt.Sprintf("chip_%s_sensor_%s_", chip, sensor) +} + +func boolToInt(b bool) int64 { + if b { + return 1 + } + return 0 } diff --git a/src/go/plugin/go.d/modules/sensors/config_schema.json b/src/go/plugin/go.d/modules/sensors/config_schema.json index 6c12ca9b8..018de0118 100644 --- a/src/go/plugin/go.d/modules/sensors/config_schema.json +++ b/src/go/plugin/go.d/modules/sensors/config_schema.json @@ -11,24 +11,71 @@ "minimum": 1, "default": 10 }, - "binary_path": { - "title": "Binary path", - "description": "Path to the `sensors` binary.", - "type": "string", - "default": "/usr/bin/sensors" - }, - "timeout": { - "title": "Timeout", - "description": "Timeout for executing the binary, specified in seconds.", - "type": "number", - "minimum": 0.5, - "default": 2 + "relabel": { + "title": "Update Labels", + "description": " This configuration can be used to update existing sensor labels or add labels to sensors that don't have them.", + "type": [ + "array", + "null" + ], + "items": { + "title": "", + "description": "", + "type": [ + "object", + "null" + ], + "properties": { + "chip": { + "title": "Chip", + "description": "[Pattern](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#simple-patterns-matcher) to match the `chip_id` label.", + "type": "string" + }, + "sensors": { + "title": "Relabel", + "description": "A list of sensors to be relabeled for the specified chip. The sensor name will be matched against the `sensor` label value.", + "type": [ + "array", + "null" + ], + "items": { + "title": "Sensor", + "description": "", + "type": [ + "object", + "null" + ], + "properties": { + "name": { + "title": "Name", + "description": "", + "type": "string" + }, + "label": { + "title": "New label", + "description": "", + "type": "string" + } + }, + "required": [ + "name", + "label" + ] + }, + "minItems": 1, + "uniqueItems": true + } + }, + "required": [ + "chip", + "sensors" + ] + }, + "uniqueItems": true } }, "required": [ - "binary_path" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } @@ -37,11 +84,37 @@ "uiOptions": { "fullPage": true }, - "binary_path": { - "ui:help": "If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable." + "relabel": { + "items": { + "chip": { + "ui:placeholder": "it8688-*" + }, + "sensors": { + "ui:listFlavour": "list", + "items": { + "name": { + "ui:placeholder": "For example, temp1, in1, or voltage1." + } + } + } + } }, - "timeout": { - "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + "ui:flavour": "tabs", + "ui:options": { + "tabs": [ + { + "title": "Base", + "fields": [ + "update_every" + ] + }, + { + "title": "Relabel Sensors", + "fields": [ + "relabel" + ] + } + ] } } } diff --git a/src/go/plugin/go.d/modules/sensors/doc.go b/src/go/plugin/go.d/modules/sensors/doc.go new file mode 100644 index 000000000..9b672fd89 --- /dev/null +++ b/src/go/plugin/go.d/modules/sensors/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package sensors diff --git a/src/go/plugin/go.d/modules/sensors/exec.go b/src/go/plugin/go.d/modules/sensors/exec.go deleted file mode 100644 index c386ddd7d..000000000 --- a/src/go/plugin/go.d/modules/sensors/exec.go +++ /dev/null @@ -1,41 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package sensors - -import ( - "context" - "fmt" - "os/exec" - "time" - - "github.com/netdata/netdata/go/plugins/logger" -) - -func newSensorsCliExec(binPath string, timeout time.Duration) *sensorsCliExec { - return &sensorsCliExec{ - binPath: binPath, - timeout: timeout, - } -} - -type sensorsCliExec struct { - *logger.Logger - - binPath string - timeout time.Duration -} - -func (e *sensorsCliExec) sensorsInfo() ([]byte, error) { - ctx, cancel := context.WithTimeout(context.Background(), e.timeout) - defer cancel() - - cmd := exec.CommandContext(ctx, e.binPath, "-A", "-u") - e.Debugf("executing '%s'", cmd) - - bs, err := cmd.Output() - if err != nil { - return nil, fmt.Errorf("error on '%s': %v", cmd, err) - } - - return bs, nil -} diff --git a/src/go/plugin/go.d/modules/sensors/init.go b/src/go/plugin/go.d/modules/sensors/init.go deleted file mode 100644 index 6753693da..000000000 --- a/src/go/plugin/go.d/modules/sensors/init.go +++ /dev/null @@ -1,38 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package sensors - -import ( - "errors" - "os" - "os/exec" - "strings" -) - -func (s *Sensors) validateConfig() error { - if s.BinaryPath == "" { - return errors.New("no sensors binary path specified") - } - return nil -} - -func (s *Sensors) initSensorsCliExec() (sensorsCLI, error) { - binPath := s.BinaryPath - - if !strings.HasPrefix(binPath, "/") { - path, err := exec.LookPath(binPath) - if err != nil { - return nil, err - } - binPath = path - } - - if _, err := os.Stat(binPath); err != nil { - return nil, err - } - - sensorsExec := newSensorsCliExec(binPath, s.Timeout.Duration()) - sensorsExec.Logger = s.Logger - - return sensorsExec, nil -} diff --git a/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors.md b/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors.md new file mode 100644 index 000000000..ccbb6b2f0 --- /dev/null +++ b/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors.md @@ -0,0 +1,249 @@ + + +# Linux Sensors + + + + + +Plugin: go.d.plugin +Module: sensors + + + +## Overview + +This collector gathers real-time system sensor statistics using the [sysfs](https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface) interface. + +Supported sensors: + +- Temperature +- Voltage +- Fan +- Current +- Power +- Energy +- Humidity +- Intrusion + + + + +This collector is only supported on the following platforms: + +- Linux + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +Automatically discovers and exposes all available sensors on the system through the [sysfs](https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface) interface. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per sensor + +These metrics refer to the system sensor. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| chip | The path to the sensor's chip device, excluding the /sys/devices prefix. This provides a unique identifier for the physical hardware component. | +| chip_id | A unique identifier for the sensor's chip, formatted as `chipName-busType-hash`. | +| sensor | The name of the specific sensor within the chip device. This provides a direct identifier for the individual measurement point. | +| label | A label provided by the kernel driver to indicate the intended use or purpose of the sensor. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| sensors.chip_sensor_temperature | input | Celsius | +| sensors.chip_sensor_temperature_alarm | clear, triggered | status | +| sensors.chip_sensor_voltage | input | Volts | +| sensors.chip_sensor_voltage_average | average | Volts | +| sensors.chip_sensor_voltage_alarm | clear, triggered | status | +| sensors.chip_sensor_fan | input | RPM | +| sensors.chip_sensor_fan_alarm | clear, triggered | status | +| sensors.chip_sensor_current | input | Amperes | +| sensors.chip_sensor_current_average | average | Amperes | +| sensors.chip_sensor_current_alarm | clear, triggered | status | +| sensors.chip_sensor_power | input | Watts | +| sensors.chip_sensor_power_average | average | Watts | +| sensors.chip_sensor_power_alarm | clear, triggered | status | +| sensors.chip_sensor_energy | input | Joules | +| sensors.chip_sensor_humidity | input | percent | +| sensors.chip_sensor_intrusion_alarm | clear, triggered | status | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/sensors.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/sensors.conf +``` +#### Options + +The following options can be defined globally: update_every. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| relabel | A list used to update existing sensor labels or add labels to sensors that don't have them. | [] | no | +| relabel[].chip | [Pattern](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns) to match the `chip_id` label value. | | no | +| relabel[].sensors | A list of sensors to be relabeled for the specified chip. | [] | no | +| relabel[].sensors[].name | The exact sensor name (e.g., `'temp1'`, `'in1'`, `'voltage1'`). | | no | +| relabel[].sensors[].label | The new label value for the sensor. | | no | + +
    + +#### Examples + +##### Custom update_every + +Allows you to override the default data collection interval. + +
    Config + +```yaml +jobs: + - name: sensors + update_every: 5 # Collect sensors statistics every 5 seconds + +``` +
    + +##### Renaming labels + +Allows you to override/add labels. + +
    Config + +```yaml +jobs: + - name: sensors + relabel: + - chip: as99127f-* + sensors: + - name: temp1 + label: Mobo Temp + - name: temp2 + label: CPU0 Temp + +``` +
    + + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `sensors` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m sensors + ``` + +### Getting Logs + +If you're encountering problems with the `sensors` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep sensors +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep sensors /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep sensors +``` + + diff --git a/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md b/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md deleted file mode 100644 index d5e948c42..000000000 --- a/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md +++ /dev/null @@ -1,215 +0,0 @@ - - -# Linux Sensors (lm-sensors) - - - - - -Plugin: go.d.plugin -Module: sensors - - - -## Overview - -This collector gathers real-time system sensor statistics, including temperature, voltage, current, power, fan speed, energy consumption, and humidity, utilizing the [sensors](https://linux.die.net/man/1/sensors) binary. - - - - -This collector is supported on all platforms. - -This collector only supports collecting metrics from a single instance of this integration. - - -### Default Behavior - -#### Auto-Detection - -The following type of sensors are auto-detected: - -- temperature -- fan -- voltage -- current -- power -- energy -- humidity - - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per sensor - -These metrics refer to the sensor. - -Labels: - -| Label | Description | -|:-----------|:----------------| -| chip | The hardware component responsible for the sensor monitoring. | -| feature | The specific sensor or monitoring point provided by the chip. | - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| sensors.sensor_temperature | temperature | Celsius | -| sensors.sensor_voltage | voltage | Volts | -| sensors.sensor_current | current | Amperes | -| sensors.sensor_power | power | Watts | -| sensors.sensor_fan_speed | fan | RPM | -| sensors.sensor_energy | energy | Joules | -| sensors.sensor_humidity | humidity | percent | - - - -## Alerts - -There are no alerts configured by default for this integration. - - -## Setup - -### Prerequisites - -#### Install lm-sensors - -- Install `lm-sensors` using your distribution's package manager. -- Run `sensors-detect` to detect hardware monitoring chips. - - - -### Configuration - -#### File - -The configuration file name for this integration is `go.d/sensors.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config go.d/sensors.conf -``` -#### Options - -The following options can be defined globally: update_every. - - -
    Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| update_every | Data collection frequency. | 10 | no | -| binary_path | Path to the `sensors` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/bin/sensors | yes | -| timeout | Timeout for executing the binary, specified in seconds. | 2 | no | - -
    - -#### Examples - -##### Custom binary path - -The executable is not in the directories specified in the PATH environment variable. - -
    Config - -```yaml -jobs: - - name: sensors - binary_path: /usr/local/sbin/sensors - -``` -
    - - - -## Troubleshooting - -### Debug Mode - -**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. - -To troubleshoot issues with the `sensors` collector, run the `go.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `go.d.plugin` to debug the collector: - - ```bash - ./go.d.plugin -d -m sensors - ``` - -### Getting Logs - -If you're encountering problems with the `sensors` collector, follow these steps to retrieve logs and identify potential issues: - -- **Run the command** specific to your system (systemd, non-systemd, or Docker container). -- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. - -#### System with systemd - -Use the following command to view logs generated since the last Netdata service restart: - -```bash -journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep sensors -``` - -#### System without systemd - -Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: - -```bash -grep sensors /var/log/netdata/collector.log -``` - -**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. - -#### Docker Container - -If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: - -```bash -docker logs netdata 2>&1 | grep sensors -``` - - diff --git a/src/go/plugin/go.d/modules/sensors/lmsensors/LICENSE.md b/src/go/plugin/go.d/modules/sensors/lmsensors/LICENSE.md new file mode 100644 index 000000000..031350dc8 --- /dev/null +++ b/src/go/plugin/go.d/modules/sensors/lmsensors/LICENSE.md @@ -0,0 +1,10 @@ +MIT License +=========== + +Copyright (C) 2016 Matt Layher + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/go/plugin/go.d/modules/sensors/lmsensors/README.md b/src/go/plugin/go.d/modules/sensors/lmsensors/README.md new file mode 100644 index 000000000..e1ed52bbb --- /dev/null +++ b/src/go/plugin/go.d/modules/sensors/lmsensors/README.md @@ -0,0 +1,4 @@ +lmsensors +========= + +Modified version of [mdlayher/lmsensors](https://github.com/mdlayher/lmsensors). \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/sensors/lmsensors/doc.go b/src/go/plugin/go.d/modules/sensors/lmsensors/doc.go new file mode 100644 index 000000000..cc6bf2d7d --- /dev/null +++ b/src/go/plugin/go.d/modules/sensors/lmsensors/doc.go @@ -0,0 +1,2 @@ +// Package lmsensors provides access to Linux monitoring sensors data, such as temperatures, voltage, and fan speeds. +package lmsensors diff --git a/src/go/plugin/go.d/modules/sensors/lmsensors/fs.go b/src/go/plugin/go.d/modules/sensors/lmsensors/fs.go new file mode 100644 index 000000000..d0027fcd2 --- /dev/null +++ b/src/go/plugin/go.d/modules/sensors/lmsensors/fs.go @@ -0,0 +1,44 @@ +package lmsensors + +import ( + "io/fs" + "os" + "path/filepath" + "strings" +) + +// A filesystem is an interface to a filesystem, used for testing. +type filesystem interface { + ReadDir(name string) ([]fs.DirEntry, error) + ReadFile(filename string) (string, error) + Readlink(name string) (string, error) + Stat(name string) (os.FileInfo, error) + WalkDir(root string, walkFn fs.WalkDirFunc) error +} + +// A systemFilesystem is a filesystem which uses operations on the host filesystem. +type systemFilesystem struct{} + +func (s *systemFilesystem) ReadDir(name string) ([]fs.DirEntry, error) { + return os.ReadDir(name) +} + +func (s *systemFilesystem) ReadFile(filename string) (string, error) { + b, err := os.ReadFile(filename) + if err != nil { + return "", err + } + return strings.TrimSpace(string(b)), nil +} + +func (s *systemFilesystem) Readlink(name string) (string, error) { + return os.Readlink(name) +} + +func (s *systemFilesystem) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (s *systemFilesystem) WalkDir(root string, walkFn fs.WalkDirFunc) error { + return filepath.WalkDir(root, walkFn) +} diff --git a/src/go/plugin/go.d/modules/sensors/lmsensors/parse.go b/src/go/plugin/go.d/modules/sensors/lmsensors/parse.go new file mode 100644 index 000000000..cfd1afb78 --- /dev/null +++ b/src/go/plugin/go.d/modules/sensors/lmsensors/parse.go @@ -0,0 +1,364 @@ +package lmsensors + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" +) + +type ( + rawSensors map[string]map[string]rawValue // e.g. [temp1][input] + + rawValue struct { + value string + readTime time.Duration + } +) + +// parseSensors parses all Sensors from an input raw data slice, produced during a filesystem walk. +func parseSensors(rawSns rawSensors) (*Sensors, error) { + var sensors Sensors + + for name, values := range rawSns { + typ := name + if i := strings.IndexFunc(name, unicode.IsDigit); i > 0 { + typ = name[:i] + } + + switch typ { + case "in": + sn := &VoltageSensor{Name: name} + if err := parseVoltage(sn, values); err != nil { + return nil, fmt.Errorf("voltage sensor '%s': %w", name, err) + } + sensors.Voltage = append(sensors.Voltage, sn) + case "fan": + sn := &FanSensor{Name: name} + if err := parseFan(sn, values); err != nil { + return nil, fmt.Errorf("fan sensor '%s': %w", name, err) + } + sensors.Fan = append(sensors.Fan, sn) + case "temp": + sn := &TemperatureSensor{Name: name} + if err := parseTemperature(sn, values); err != nil { + return nil, fmt.Errorf("temperature sensor '%s': %w", name, err) + } + sensors.Temperature = append(sensors.Temperature, sn) + case "curr": + sn := &CurrentSensor{Name: name} + if err := parseCurrent(sn, values); err != nil { + return nil, fmt.Errorf("current sensor '%s': %w", name, err) + } + sensors.Current = append(sensors.Current, sn) + case "power": + sn := &PowerSensor{Name: name} + if err := parsePower(sn, values); err != nil { + return nil, fmt.Errorf("power sensor '%s': %w", name, err) + } + sensors.Power = append(sensors.Power, sn) + case "energy": + sn := &EnergySensor{Name: name} + if err := parseEnergy(sn, values); err != nil { + return nil, fmt.Errorf("energy sensor '%s': %w", name, err) + } + sensors.Energy = append(sensors.Energy, sn) + case "humidity": + sn := &HumiditySensor{Name: name} + if err := parseHumidity(sn, values); err != nil { + return nil, fmt.Errorf("humidity sensor '%s': %w", name, err) + } + sensors.Humidity = append(sensors.Humidity, sn) + case "intrusion": + sn := &IntrusionSensor{Name: name} + if err := parseIntrusion(sn, values); err != nil { + return nil, fmt.Errorf("intrusion sensor '%s': %w", name, err) + } + sensors.Intrusion = append(sensors.Intrusion, sn) + default: + continue + } + } + + return &sensors, nil +} + +func parseVoltage(s *VoltageSensor, values map[string]rawValue) error { + const div = 1e3 // raw in milli degree Celsius + + for name, val := range values { + var err error + + s.ReadTime += val.readTime + v := val.value + + switch name { + case "label": + s.Label = v + case "alarm": + s.Alarm = ptr(v != "0") + case "min": + s.Min, err = parseFloat(v, div) + case "lcrit": + s.CritMin, err = parseFloat(v, div) + case "max": + s.Max, err = parseFloat(v, div) + case "crit": + s.CritMax, err = parseFloat(v, div) + case "input": + s.Input, err = parseFloat(v, div) + case "average": + s.Average, err = parseFloat(v, div) + case "lowest": + s.Lowest, err = parseFloat(v, div) + case "highest": + s.Highest, err = parseFloat(v, div) + } + if err != nil { + return fmt.Errorf("subfeature '%s' value '%s': %v", name, v, err) + } + } + + return nil +} + +func parseFan(s *FanSensor, values map[string]rawValue) error { + const div = 1 // raw in revolution/min (RPM) + + for name, val := range values { + var err error + + s.ReadTime += val.readTime + v := val.value + + switch name { + case "input": + s.Input, err = parseFloat(v, div) + case "min": + s.Min, err = parseFloat(v, div) + case "max": + s.Max, err = parseFloat(v, div) + case "target": + s.Target, err = parseFloat(v, div) + case "alarm": + s.Alarm = ptr(v != "0") + case "label": + s.Label = v + } + if err != nil { + return fmt.Errorf("subfeature '%s' value '%s': %v", name, v, err) + } + } + + return nil +} + +func parseTemperature(s *TemperatureSensor, values map[string]rawValue) error { + const div = 1000 // raw in milli degree Celsius + + for name, val := range values { + var err error + + s.ReadTime += val.readTime + v := val.value + + switch name { + case "max": + s.Max, err = parseFloat(v, div) + case "min": + s.Min, err = parseFloat(v, div) + case "input": + s.Input, err = parseFloat(v, div) + case "crit": + s.CritMax, err = parseFloat(v, div) + case "emergency": + s.Emergency, err = parseFloat(v, div) + case "lcrit": + s.CritMin, err = parseFloat(v, div) + case "lowest": + s.Lowest, err = parseFloat(v, div) + case "highest": + s.Highest, err = parseFloat(v, div) + case "alarm": + s.Alarm = ptr(v != "0") + case "type": + t, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TempTypeRaw = t + case "label": + s.Label = v + } + if err != nil { + return fmt.Errorf("subfeature '%s' value '%s': %v", name, v, err) + } + } + + return nil +} + +func parseCurrent(s *CurrentSensor, values map[string]rawValue) error { + const div = 1e3 // raw in milli ampere + + for name, val := range values { + var err error + + s.ReadTime += val.readTime + v := val.value + + switch name { + case "max": + s.Max, err = parseFloat(v, div) + case "min": + s.Min, err = parseFloat(v, div) + case "lcrit": + s.CritMin, err = parseFloat(v, div) + case "crit": + s.CritMax, err = parseFloat(v, div) + case "input": + s.Input, err = parseFloat(v, div) + case "average": + s.Average, err = parseFloat(v, div) + case "lowest": + s.Lowest, err = parseFloat(v, div) + case "highest": + s.Highest, err = parseFloat(v, div) + case "alarm": + s.Alarm = ptr(v != "0") + case "label": + s.Label = v + } + if err != nil { + return fmt.Errorf("subfeature '%s' value '%s': %v", name, v, err) + } + } + + return nil +} + +func parsePower(s *PowerSensor, values map[string]rawValue) error { + const div = 1e6 // raw in microWatt + + for name, val := range values { + var err error + + s.ReadTime += val.readTime + v := val.value + + switch name { + case "label": + s.Label = v + case "alarm": + s.Alarm = ptr(v != "0") + case "average": + s.Average, err = parseFloat(v, div) + case "average_highest": + s.AverageHighest, err = parseFloat(v, div) + case "average_lowest": + s.AverageLowest, err = parseFloat(v, div) + case "average_max": + s.AverageMax, err = parseFloat(v, div) + case "average_min": + s.AverageMin, err = parseFloat(v, div) + case "input": + s.Input, err = parseFloat(v, div) + case "input_highest": + s.InputHighest, err = parseFloat(v, div) + case "input_lowest": + s.InputLowest, err = parseFloat(v, div) + case "accuracy": + v = strings.TrimSuffix(v, "%") + s.Accuracy, err = parseFloat(v, 1) + case "cap": + s.Cap, err = parseFloat(v, div) + case "cap_max": + s.CapMax, err = parseFloat(v, div) + case "cap_min": + s.CapMin, err = parseFloat(v, div) + case "max": + s.Max, err = parseFloat(v, div) + case "crit": + s.CritMax, err = parseFloat(v, div) + } + if err != nil { + return fmt.Errorf("subfeature '%s' value '%s': %v", name, v, err) + } + } + + return nil +} + +func parseEnergy(s *EnergySensor, values map[string]rawValue) error { + const div = 1e6 // raw in microJoule + + for name, val := range values { + var err error + + s.ReadTime += val.readTime + v := val.value + + switch name { + case "label": + s.Label = v + case "input": + s.Input, err = parseFloat(v, div) + } + if err != nil { + return fmt.Errorf("subfeature '%s' value '%s': %v", name, v, err) + } + } + + return nil +} + +func parseHumidity(s *HumiditySensor, values map[string]rawValue) error { + const div = 1e3 // raw in milli percent + + for name, val := range values { + var err error + + s.ReadTime += val.readTime + v := val.value + + switch name { + case "label": + s.Label = v + case "input": + s.Input, err = parseFloat(v, div) + } + if err != nil { + return fmt.Errorf("subfeature '%s' value '%s': %v", name, v, err) + } + } + + return nil +} + +func parseIntrusion(s *IntrusionSensor, values map[string]rawValue) error { + for name, val := range values { + s.ReadTime += val.readTime + v := val.value + + switch name { + case "label": + s.Label = v + case "alarm": + s.Alarm = ptr(v != "0") + } + } + + return nil +} + +func parseFloat(s string, div float64) (*float64, error) { + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, err + } + + return ptr(f / div), nil +} + +func ptr[T any](v T) *T { return &v } diff --git a/src/go/plugin/go.d/modules/sensors/lmsensors/scanner.go b/src/go/plugin/go.d/modules/sensors/lmsensors/scanner.go new file mode 100644 index 000000000..8d010613f --- /dev/null +++ b/src/go/plugin/go.d/modules/sensors/lmsensors/scanner.go @@ -0,0 +1,228 @@ +package lmsensors + +import ( + "encoding/hex" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + "time" + + "github.com/cloudflare/cfssl/scan/crypto/sha1" + + "github.com/netdata/netdata/go/plugins/logger" +) + +// New creates a new Scanner. +func New() *Scanner { + return &Scanner{ + fs: &systemFilesystem{}, + } +} + +// A Scanner scans for Devices, so data can be read from their Sensors. +type Scanner struct { + *logger.Logger + + fs filesystem +} + +// Scan scans for Devices and their Sensors. +func (sc *Scanner) Scan() ([]*Chip, error) { + paths, err := sc.detectDevicePaths() + if err != nil { + return nil, err + } + + sc.Debugf("sysfs scanner: found %d paths", len(paths)) + + var chips []*Chip + + for _, path := range paths { + sc.Debugf("sysfs scanner: scanning %s", path) + + var chip Chip + + rawSns := make(rawSensors) + + des, err := sc.fs.ReadDir(path) + if err != nil { + return nil, err + } + + for _, de := range des { + if !de.Type().IsRegular() || shouldSkip(de.Name()) { + continue + } + + filePath := filepath.Join(path, de.Name()) + + now := time.Now() + content, err := sc.fs.ReadFile(filePath) + if err != nil { + sc.Debugf("sysfs scanner: failed to read '%s': %v", filePath, err) + continue + } + since := time.Since(now) + + sc.Debugf("sysfs scanner: reading file '%s' took %s", filePath, since) + + if de.Name() == "name" { + chip.Name = content + continue + } + + // Sensor names in format "sensor#_foo", e.g. "temp1_input" + feat, subfeat, ok := strings.Cut(de.Name(), "_") + if !ok { + continue + } + + // power average_max can be unknown (https://github.com/netdata/netdata/issues/18805) + if content == "unknown" && subfeat != "label" { + continue + } + + if _, ok := rawSns[feat]; !ok { + rawSns[feat] = make(map[string]rawValue) + } + + rawSns[feat][subfeat] = rawValue{value: content, readTime: since} + } + + sensors, err := parseSensors(rawSns) + if err != nil { + return nil, fmt.Errorf("sysfs scanner: failed to parse (device '%s', path '%s'): %v", chip.Name, path, err) + } + + if sensors != nil { + chip.Sensors = *sensors + } + + chip.SysDevice = getDevicePath(path) + chip.UniqueName = fmt.Sprintf("%s-%s-%s", chip.Name, getBusType(chip.SysDevice), getHash(chip.SysDevice)) + + chips = append(chips, &chip) + } + + return chips, nil +} + +// detectDevicePaths performs a filesystem walk to paths where devices may reside on Linux. +func (sc *Scanner) detectDevicePaths() ([]string, error) { + const lookPath = "/sys/class/hwmon" + + var paths []string + + err := sc.fs.WalkDir(lookPath, func(path string, de os.DirEntry, err error) error { + if err != nil { + return err + } + + if de.Type()&os.ModeSymlink == 0 { + return nil + } + + dest, err := sc.fs.Readlink(path) + if err != nil { + return err + } + + dest = filepath.Join(lookPath, filepath.Clean(dest)) + + // Symlink destination has a file called name, meaning a sensor exists here and data can be retrieved + fi, err := sc.fs.Stat(filepath.Join(dest, "name")) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return err + } + if err == nil && fi.Mode().IsRegular() { + paths = append(paths, dest) + return nil + } + + // Symlink destination has another symlink called device, which can be read and used to retrieve data + device := filepath.Join(dest, "device") + fi, err = sc.fs.Stat(device) + if err != nil { + if !errors.Is(err, fs.ErrNotExist) { + return err + } + return nil + } + + if fi.Mode()&os.ModeSymlink != 0 { + return nil + } + + device, err = sc.fs.Readlink(device) + if err != nil { + return err + } + + dest = filepath.Join(dest, filepath.Clean(device)) + + // Symlink destination has a file called name, meaning a sensor exists here and data can be retrieved + if _, err := sc.fs.Stat(filepath.Join(dest, "name")); err != nil { + if !errors.Is(err, fs.ErrNotExist) { + return err + } + return nil + } + + paths = append(paths, dest) + + return nil + }) + + return paths, err +} + +func getDevicePath(path string) string { + devPath, err := filepath.EvalSymlinks(filepath.Join(path, "device")) + if err != nil { + devPath = path + if i := strings.Index(devPath, "/hwmon"); i > 0 { + devPath = devPath[:i] + } + } + return strings.TrimPrefix(devPath, "/sys/devices/") +} + +func getHash(devPath string) string { + hash := sha1.Sum([]byte(devPath)) + return hex.EncodeToString(hash[:])[:8] +} + +func getBusType(devPath string) string { + devPath = filepath.Join("/", devPath) + devPath = strings.ToLower(devPath) + + for _, v := range []string{"i2c", "isa", "pci", "spi", "virtual", "acpi", "hid", "mdio", "scsi"} { + if strings.Contains(devPath, "/"+v) { + return v + } + } + return "unk" +} + +// shouldSkip indicates if a given filename should be skipped during the filesystem walk operation. +func shouldSkip(file string) bool { + if strings.HasPrefix(file, "runtime_") { + return true + } + + switch file { + case "async": + case "autosuspend_delay_ms": + case "control": + case "driver_override": + case "modalias": + case "uevent": + default: + return false + } + + return true +} diff --git a/src/go/plugin/go.d/modules/sensors/lmsensors/scanner_test.go b/src/go/plugin/go.d/modules/sensors/lmsensors/scanner_test.go new file mode 100644 index 000000000..44d4a7fc5 --- /dev/null +++ b/src/go/plugin/go.d/modules/sensors/lmsensors/scanner_test.go @@ -0,0 +1,523 @@ +package lmsensors + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "slices" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestScanner_Scan(t *testing.T) { + var tests = map[string]struct { + fs filesystem + wantDevices []*Chip + }{ + "Power sensor": { + wantDevices: []*Chip{ + { + Name: "power_meter", + UniqueName: "power_meter-acpi-b37a4ed3", + SysDevice: "LNXSYSTM:00/device:00/ACPI0000:00", + Sensors: Sensors{ + Power: []*PowerSensor{ + { + Name: "power1", + Label: "some_label", + Alarm: ptr(false), + Average: ptr(345.0), + AverageHighest: ptr(345.0), + AverageLowest: ptr(345.0), + AverageMin: ptr(345.0), + Input: ptr(345.0), + InputHighest: ptr(345.0), + InputLowest: ptr(345.0), + Accuracy: ptr(34.5), + Cap: ptr(345.0), + CapMax: ptr(345.0), + CapMin: ptr(345.0), + Max: ptr(345.0), + CritMax: ptr(345.0), + }, + }, + }}, + }, + fs: &memoryFilesystem{ + symlinks: map[string]string{ + "/sys/class/hwmon/hwmon0": "../../devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0", + "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device": "../../../ACPI0000:00", + }, + files: []memoryFile{ + {name: "/sys/class/hwmon", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/class/hwmon/hwmon0", de: &memoryDirEntry{mode: os.ModeSymlink}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/name", err: os.ErrNotExist}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device", de: &memoryDirEntry{}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/name", val: "power_meter"}, + + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_label", val: "some_label"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_alarm", val: "0"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_average", val: "345000000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_average_highest", val: "345000000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_average_lowest", val: "345000000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_average_max", val: "unknown"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_average_min", val: "345000000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_input", val: "345000000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_input_highest", val: "345000000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_input_lowest", val: "345000000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_accuracy", val: "34.5%"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_cap", val: "345000000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_cap_max", val: "345000000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_cap_min", val: "345000000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_max", val: "345000000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/power1_crit", val: "345000000"}, + }, + }, + }, + "Temperature sensor": { + wantDevices: []*Chip{ + { + Name: "temp_meter", + UniqueName: "temp_meter-pci-e3b89088", + SysDevice: "pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0", + Sensors: Sensors{ + Temperature: []*TemperatureSensor{ + { + Name: "temp1", + Label: "some_label", + Alarm: ptr(false), + TempTypeRaw: 1, + Input: ptr(42.0), + Max: ptr(42.0), + Min: ptr(42.0), + CritMin: ptr(42.0), + CritMax: ptr(42.0), + Emergency: ptr(42.0), + Lowest: ptr(42.0), + Highest: ptr(42.0), + }, + }, + }}, + }, + fs: &memoryFilesystem{ + symlinks: map[string]string{ + "/sys/class/hwmon/hwmon0": "../../devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0", + "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/device": "../../../0000:81:00.0", + }, + files: []memoryFile{ + {name: "/sys/class/hwmon", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/class/hwmon/hwmon0", de: &memoryDirEntry{mode: os.ModeSymlink}}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/hwmon/hwmon0/name", err: os.ErrNotExist}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/hwmon/hwmon0/device", de: &memoryDirEntry{}}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/name", val: "temp_meter"}, + + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/temp1_label", val: "some_label"}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/temp1_alarm", val: "0"}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/temp1_type", val: "1"}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/temp1_max", val: "42000"}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/temp1_min", val: "42000"}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/temp1_input", val: "42000"}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/temp1_crit", val: "42000"}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/temp1_emergency", val: "42000"}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/temp1_lcrit", val: "42000"}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/temp1_lowest", val: "42000"}, + {name: "/sys/devices/pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0/hwmon0/temp1_highest", val: "42000"}, + }, + }, + }, + "Voltage sensor": { + wantDevices: []*Chip{ + { + Name: "voltage_meter", + UniqueName: "voltage_meter-acpi-b37a4ed3", + SysDevice: "LNXSYSTM:00/device:00/ACPI0000:00", + Sensors: Sensors{ + Voltage: []*VoltageSensor{ + { + Name: "in1", + Label: "some_label", + Alarm: ptr(false), + Input: ptr(42.0), + Average: ptr(42.0), + Min: ptr(42.0), + Max: ptr(42.0), + CritMin: ptr(42.0), + CritMax: ptr(42.0), + Lowest: ptr(42.0), + Highest: ptr(42.0), + }, + }, + }}, + }, + fs: &memoryFilesystem{ + symlinks: map[string]string{ + "/sys/class/hwmon/hwmon0": "../../devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0", + "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device": "../../../ACPI0000:00", + }, + files: []memoryFile{ + {name: "/sys/class/hwmon", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/class/hwmon/hwmon0", de: &memoryDirEntry{mode: os.ModeSymlink}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/name", err: os.ErrNotExist}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device", de: &memoryDirEntry{}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/name", val: "voltage_meter"}, + + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/in1_label", val: "some_label"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/in1_alarm", val: "0"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/in1_input", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/in1_average", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/in1_min", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/in1_max", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/in1_lcrit", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/in1_crit", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/in1_lowest", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/in1_highest", val: "42000"}, + }, + }, + }, + "Fan sensor": { + wantDevices: []*Chip{ + { + Name: "fan_meter", + UniqueName: "fan_meter-acpi-b37a4ed3", + SysDevice: "LNXSYSTM:00/device:00/ACPI0000:00", + Sensors: Sensors{ + Fan: []*FanSensor{ + { + Name: "fan1", + Label: "some_label", + Alarm: ptr(false), + Input: ptr(42.0), + Min: ptr(42.0), + Max: ptr(42.0), + Target: ptr(42.0), + }, + }, + }}, + }, + fs: &memoryFilesystem{ + symlinks: map[string]string{ + "/sys/class/hwmon/hwmon0": "../../devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0", + "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device": "../../../ACPI0000:00", + }, + files: []memoryFile{ + {name: "/sys/class/hwmon", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/class/hwmon/hwmon0", de: &memoryDirEntry{mode: os.ModeSymlink}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/name", err: os.ErrNotExist}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device", de: &memoryDirEntry{}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/name", val: "fan_meter"}, + + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/fan1_label", val: "some_label"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/fan1_alarm", val: "0"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/fan1_input", val: "42"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/fan1_min", val: "42"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/fan1_max", val: "42"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/fan1_target", val: "42"}, + }, + }, + }, + "Energy sensor": { + wantDevices: []*Chip{ + { + Name: "energy_meter", + UniqueName: "energy_meter-acpi-b37a4ed3", + SysDevice: "LNXSYSTM:00/device:00/ACPI0000:00", + Sensors: Sensors{ + Energy: []*EnergySensor{ + { + Name: "energy1", + Label: "some_label", + Input: ptr(42.0), + }, + }, + }}, + }, + fs: &memoryFilesystem{ + symlinks: map[string]string{ + "/sys/class/hwmon/hwmon0": "../../devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0", + "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device": "../../../ACPI0000:00", + }, + files: []memoryFile{ + {name: "/sys/class/hwmon", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/class/hwmon/hwmon0", de: &memoryDirEntry{mode: os.ModeSymlink}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/name", err: os.ErrNotExist}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device", de: &memoryDirEntry{}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/name", val: "energy_meter"}, + + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/energy1_label", val: "some_label"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/energy1_alarm", val: "0"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/energy1_input", val: "42000000"}, + }, + }, + }, + "Current sensor": { + wantDevices: []*Chip{ + { + Name: "current_meter", + UniqueName: "current_meter-acpi-b37a4ed3", + SysDevice: "LNXSYSTM:00/device:00/ACPI0000:00", + Sensors: Sensors{ + Current: []*CurrentSensor{ + { + Name: "curr1", + Label: "some_label", + Alarm: ptr(false), + Max: ptr(42.0), + Min: ptr(42.0), + CritMin: ptr(42.0), + CritMax: ptr(42.0), + Input: ptr(42.0), + Average: ptr(42.0), + Lowest: ptr(42.0), + Highest: ptr(42.0), + }, + }, + }}, + }, + fs: &memoryFilesystem{ + symlinks: map[string]string{ + "/sys/class/hwmon/hwmon0": "../../devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0", + "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device": "../../../ACPI0000:00", + }, + files: []memoryFile{ + {name: "/sys/class/hwmon", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/class/hwmon/hwmon0", de: &memoryDirEntry{mode: os.ModeSymlink}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/name", err: os.ErrNotExist}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device", de: &memoryDirEntry{}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/name", val: "current_meter"}, + + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/curr1_label", val: "some_label"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/curr1_alarm", val: "0"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/curr1_max", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/curr1_min", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/curr1_lcrit", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/curr1_crit", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/curr1_input", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/curr1_average", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/curr1_lowest", val: "42000"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/curr1_highest", val: "42000"}, + }, + }, + }, + "Humidity sensor": { + wantDevices: []*Chip{ + { + Name: "humidity_meter", + UniqueName: "humidity_meter-acpi-b37a4ed3", + SysDevice: "LNXSYSTM:00/device:00/ACPI0000:00", + Sensors: Sensors{ + Humidity: []*HumiditySensor{ + { + Name: "humidity1", + Label: "some_label", + Input: ptr(42.0), + }, + }, + }}, + }, + fs: &memoryFilesystem{ + symlinks: map[string]string{ + "/sys/class/hwmon/hwmon0": "../../devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0", + "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device": "../../../ACPI0000:00", + }, + files: []memoryFile{ + {name: "/sys/class/hwmon", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/class/hwmon/hwmon0", de: &memoryDirEntry{mode: os.ModeSymlink}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/name", err: os.ErrNotExist}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device", de: &memoryDirEntry{}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/name", val: "humidity_meter"}, + + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/humidity1_label", val: "some_label"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/humidity1_alarm", val: "0"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/humidity1_input", val: "42000"}, + }, + }, + }, + "Intrusion sensor": { + wantDevices: []*Chip{ + { + Name: "intrusion_meter", + UniqueName: "intrusion_meter-acpi-b37a4ed3", + SysDevice: "LNXSYSTM:00/device:00/ACPI0000:00", + Sensors: Sensors{ + Intrusion: []*IntrusionSensor{ + { + Name: "intrusion1", + Label: "some_label", + Alarm: ptr(false), + }, + }, + }}, + }, + fs: &memoryFilesystem{ + symlinks: map[string]string{ + "/sys/class/hwmon/hwmon0": "../../devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0", + "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device": "../../../ACPI0000:00", + }, + files: []memoryFile{ + {name: "/sys/class/hwmon", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/class/hwmon/hwmon0", de: &memoryDirEntry{mode: os.ModeSymlink}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00", de: &memoryDirEntry{isDir: true}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/name", err: os.ErrNotExist}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/hwmon/hwmon0/device", de: &memoryDirEntry{}}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/name", val: "intrusion_meter"}, + + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/intrusion1_label", val: "some_label"}, + {name: "/sys/devices/LNXSYSTM:00/device:00/ACPI0000:00/intrusion1_alarm", val: "0"}, + }, + }, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sc := New() + sc.fs = test.fs + + devices, err := sc.Scan() + require.NoError(t, err) + + for _, dev := range devices { + for _, sn := range dev.Sensors.Voltage { + require.NotZerof(t, sn.ReadTime, "zero read time: [voltage] dev '%s' sensor '%s'", dev.Name, sn.Name) + sn.ReadTime = 0 + } + for _, sn := range dev.Sensors.Fan { + require.NotZerof(t, sn.ReadTime, "zero read time: [fan] dev '%s' sensor '%s'", dev.Name, sn.Name) + sn.ReadTime = 0 + } + for _, sn := range dev.Sensors.Temperature { + require.NotZerof(t, sn.ReadTime, "zero read time: [temp] dev '%s' sensor '%s'", dev.Name, sn.Name) + sn.ReadTime = 0 + } + for _, sn := range dev.Sensors.Current { + require.NotZerof(t, sn.ReadTime, "zero read time: [curr] dev '%s' sensor '%s'", dev.Name, sn.Name) + sn.ReadTime = 0 + } + for _, sn := range dev.Sensors.Power { + require.NotZerof(t, sn.ReadTime, "zero read time: [power] dev '%s' sensor '%s'", dev.Name, sn.Name) + sn.ReadTime = 0 + } + for _, sn := range dev.Sensors.Energy { + require.NotZerof(t, sn.ReadTime, "zero read time: [energy] dev '%s' sensor '%s'", dev.Name, sn.Name) + sn.ReadTime = 0 + } + for _, sn := range dev.Sensors.Humidity { + require.NotZerof(t, sn.ReadTime, "zero read time: [humidity] dev '%s' sensor '%s'", dev.Name, sn.Name) + sn.ReadTime = 0 + } + for _, sn := range dev.Sensors.Intrusion { + require.NotZerof(t, sn.ReadTime, "zero read time: [intrusion] dev '%s' sensor '%s'", dev.Name, sn.Name) + sn.ReadTime = 0 + } + } + + require.Equal(t, test.wantDevices, devices) + }) + } +} + +type memoryFilesystem struct { + symlinks map[string]string + files []memoryFile +} + +func (m *memoryFilesystem) ReadDir(name string) ([]fs.DirEntry, error) { + if !slices.ContainsFunc(m.files, func(file memoryFile) bool { return file.name == name }) { + return nil, fmt.Errorf("readdir: dir %s not in memory", name) + } + var des []fs.DirEntry + for _, v := range m.files { + if strings.HasPrefix(v.name, name) { + des = append(des, &memoryDirEntry{name: filepath.Base(v.name), isDir: false}) + } + } + return des, nil +} + +func (m *memoryFilesystem) ReadFile(filename string) (string, error) { + for _, f := range m.files { + if f.name == filename { + return f.val, nil + } + } + + return "", fmt.Errorf("readfile: file %q not in memory", filename) +} + +func (m *memoryFilesystem) Readlink(name string) (string, error) { + if l, ok := m.symlinks[name]; ok { + return l, nil + } + + return "", fmt.Errorf("readlink: symlink %q not in memory", name) +} + +func (m *memoryFilesystem) Stat(name string) (os.FileInfo, error) { + for _, f := range m.files { + if f.name == name { + de := f.de + if de == nil { + de = &memoryDirEntry{} + } + info, _ := de.Info() + return info, f.err + } + } + + return nil, fmt.Errorf("stat: file %q not in memory", name) +} + +func (m *memoryFilesystem) WalkDir(root string, walkFn fs.WalkDirFunc) error { + if _, err := m.Stat(root); err != nil { + return err + } + + for _, f := range m.files { + if !strings.HasPrefix(f.name, root) { + continue + } + + de := f.de + if de == nil { + de = &memoryDirEntry{} + } + + if err := walkFn(f.name, de, nil); err != nil { + return err + } + } + + return nil +} + +type memoryFile struct { + name string + val string + de fs.DirEntry + err error +} + +type memoryDirEntry struct { + name string + mode os.FileMode + isDir bool +} + +func (fi *memoryDirEntry) Name() string { return fi.name } +func (fi *memoryDirEntry) Type() os.FileMode { return fi.mode } +func (fi *memoryDirEntry) IsDir() bool { return fi.isDir } +func (fi *memoryDirEntry) Info() (fs.FileInfo, error) { return fi, nil } +func (fi *memoryDirEntry) Sys() any { return nil } +func (fi *memoryDirEntry) Size() int64 { return 0 } +func (fi *memoryDirEntry) Mode() os.FileMode { return fi.Type() } +func (fi *memoryDirEntry) ModTime() time.Time { return time.Now() } diff --git a/src/go/plugin/go.d/modules/sensors/lmsensors/sensor.go b/src/go/plugin/go.d/modules/sensors/lmsensors/sensor.go new file mode 100644 index 000000000..9e8e067ca --- /dev/null +++ b/src/go/plugin/go.d/modules/sensors/lmsensors/sensor.go @@ -0,0 +1,177 @@ +package lmsensors + +import ( + "time" +) + +// A Chip is a physical or virtual device which may have zero or more Sensors. +type Chip struct { + Name string + UniqueName string + SysDevice string + Sensors Sensors +} + +type Sensors struct { + // https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface + Voltage []*VoltageSensor + Fan []*FanSensor + Temperature []*TemperatureSensor + Current []*CurrentSensor + Power []*PowerSensor + Energy []*EnergySensor + Humidity []*HumiditySensor + Intrusion []*IntrusionSensor +} + +// A VoltageSensor is a Sensor that detects voltage. +type VoltageSensor struct { + ReadTime time.Duration + + Name string + Label string + + Alarm *bool + + Input *float64 + Average *float64 + + Lowest *float64 + Highest *float64 + + Min *float64 + Max *float64 + CritMin *float64 + CritMax *float64 +} + +// A FanSensor is a Sensor that detects fan speeds in rotations per minute. +type FanSensor struct { + ReadTime time.Duration + + Name string + Label string + + Alarm *bool + + Input *float64 + Min *float64 + Max *float64 + Target *float64 +} + +// A TemperatureSensor is a Sensor that detects temperatures in degrees Celsius. +type TemperatureSensor struct { + ReadTime time.Duration + + Name string + Label string + TempTypeRaw int + + Alarm *bool + + Input *float64 + Lowest *float64 + Highest *float64 + Min *float64 + Max *float64 + CritMin *float64 + CritMax *float64 + Emergency *float64 +} + +func (s TemperatureSensor) TempType() string { + switch s.TempTypeRaw { + case 1: + return "CPU embedded diode" + case 2: + return "3904 transistor" + case 3: + return "thermal diode" + case 4: + return "thermistor" + case 5: + return "AMD AMDSI" + case 6: + return "Intel PECI" + default: + return "" + } +} + +// A CurrentSensor is a Sensor that detects current in Amperes. +type CurrentSensor struct { + ReadTime time.Duration + + Name string + Label string + + Alarm *bool + + Input *float64 + Lowest *float64 + Highest *float64 + Min *float64 + Max *float64 + CritMin *float64 + CritMax *float64 + + Average *float64 +} + +// A PowerSensor is a Sensor that detects average electrical power consumption in watts. +type PowerSensor struct { + ReadTime time.Duration + + Name string + Label string + + Alarm *bool + + Input *float64 + InputLowest *float64 + InputHighest *float64 + Cap *float64 + CapMin *float64 + CapMax *float64 + Max *float64 + CritMax *float64 + + Average *float64 + AverageMin *float64 + AverageMax *float64 + AverageLowest *float64 + AverageHighest *float64 + + Accuracy *float64 +} + +// A EnergySensor is a Sensor that detects energy in microJoule. +type EnergySensor struct { + ReadTime time.Duration + + Name string + Label string + + Input *float64 +} + +// A HumiditySensor is a Sensor that detects humidity in milli-percent. +type HumiditySensor struct { + ReadTime time.Duration + + Name string + Label string + + Input *float64 +} + +// An IntrusionSensor is a Sensor that detects when the machine's chassis has been opened. +type IntrusionSensor struct { + ReadTime time.Duration + + Name string + Label string + + Alarm *bool +} diff --git a/src/go/plugin/go.d/modules/sensors/metadata.yaml b/src/go/plugin/go.d/modules/sensors/metadata.yaml index 5ea94f398..58b251766 100644 --- a/src/go/plugin/go.d/modules/sensors/metadata.yaml +++ b/src/go/plugin/go.d/modules/sensors/metadata.yaml @@ -5,7 +5,7 @@ modules: plugin_name: go.d.plugin module_name: sensors monitored_instance: - name: Linux Sensors (lm-sensors) + name: Linux Sensors link: https://hwmon.wiki.kernel.org/lm_sensors icon_filename: "microchip.svg" categories: @@ -19,6 +19,7 @@ modules: - fan - energy - humidity + - intrusion related_resources: integrations: list: [] @@ -27,13 +28,22 @@ modules: most_popular: false overview: data_collection: - metrics_description: > - This collector gathers real-time system sensor statistics, - including temperature, voltage, current, power, fan speed, energy consumption, and humidity, - utilizing the [sensors](https://linux.die.net/man/1/sensors) binary. + metrics_description: | + This collector gathers real-time system sensor statistics using the [sysfs](https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface) interface. + + Supported sensors: + + - Temperature + - Voltage + - Fan + - Current + - Power + - Energy + - Humidity + - Intrusion method_description: "" supported_platforms: - include: [] + include: [Linux] exclude: [] multi_instance: false additional_permissions: @@ -41,26 +51,14 @@ modules: default_behavior: auto_detection: description: | - The following type of sensors are auto-detected: - - - temperature - - fan - - voltage - - current - - power - - energy - - humidity + Automatically discovers and exposes all available sensors on the system through the [sysfs](https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface) interface. limits: description: "" performance_impact: description: "" setup: prerequisites: - list: - - title: Install lm-sensors - description: | - - Install `lm-sensors` using your distribution's package manager. - - Run `sensors-detect` to detect hardware monitoring chips. + list: [] configuration: file: name: go.d/sensors.conf @@ -75,25 +73,49 @@ modules: description: Data collection frequency. default_value: 10 required: false - - name: binary_path - description: Path to the `sensors` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. - default_value: /usr/bin/sensors - required: true - - name: timeout - description: Timeout for executing the binary, specified in seconds. - default_value: 2 + - name: relabel + description: A list used to update existing sensor labels or add labels to sensors that don't have them. + default_value: "[]" + required: false + - name: relabel[].chip + description: "[Pattern](/src/libnetdata/simple_pattern/README.md#simple-patterns) to match the `chip_id` label value." + default_value: "" + required: false + - name: relabel[].sensors + description: A list of sensors to be relabeled for the specified chip. + default_value: "[]" + required: false + - name: relabel[].sensors[].name + description: The exact sensor name (e.g., `'temp1'`, `'in1'`, `'voltage1'`). + default_value: "" + required: false + - name: relabel[].sensors[].label + description: The new label value for the sensor. + default_value: "" required: false examples: folding: title: Config enabled: true list: - - name: Custom binary path - description: The executable is not in the directories specified in the PATH environment variable. + - name: Custom update_every + description: Allows you to override the default data collection interval. + config: | + jobs: + - name: sensors + update_every: 5 # Collect sensors statistics every 5 seconds + - name: Renaming labels + description: Allows you to override/add labels. config: | jobs: - name: sensors - binary_path: /usr/local/sbin/sensors + relabel: + - chip: as99127f-* + sensors: + - name: temp1 + label: Mobo Temp + - name: temp2 + label: CPU0 Temp troubleshooting: problems: list: [] @@ -106,52 +128,116 @@ modules: availability: [] scopes: - name: sensor - description: These metrics refer to the sensor. + description: These metrics refer to the system sensor. labels: - name: chip - description: The hardware component responsible for the sensor monitoring. - - name: feature - description: The specific sensor or monitoring point provided by the chip. + description: The path to the sensor's chip device, excluding the /sys/devices prefix. This provides a unique identifier for the physical hardware component. + - name: chip_id + description: A unique identifier for the sensor's chip, formatted as `chipName-busType-hash`. + - name: sensor + description: The name of the specific sensor within the chip device. This provides a direct identifier for the individual measurement point. + - name: label + description: A label provided by the kernel driver to indicate the intended use or purpose of the sensor. metrics: - - name: sensors.sensor_temperature - description: Sensor temperature + - name: sensors.chip_sensor_temperature + description: Sensor Temperature unit: Celsius chart_type: line dimensions: - - name: temperature - - name: sensors.sensor_voltage - description: Sensor voltage + - name: input + - name: sensors.chip_sensor_temperature_alarm + description: Temperature Sensor Alarm + unit: status + chart_type: line + dimensions: + - name: clear + - name: triggered + - name: sensors.chip_sensor_voltage + description: Sensor Voltage unit: Volts chart_type: line dimensions: - - name: voltage - - name: sensors.sensor_current - description: Sensor current + - name: input + - name: sensors.chip_sensor_voltage_average + description: Sensor Voltage Average + unit: Volts + chart_type: line + dimensions: + - name: average + - name: sensors.chip_sensor_voltage_alarm + description: Voltage Sensor Alarm + unit: status + chart_type: line + dimensions: + - name: clear + - name: triggered + - name: sensors.chip_sensor_fan + description: Sensor Fan + unit: RPM + chart_type: line + dimensions: + - name: input + - name: sensors.chip_sensor_fan_alarm + description: Fan Sensor Alarm + unit: status + chart_type: line + dimensions: + - name: clear + - name: triggered + - name: sensors.chip_sensor_current + description: Sensor Current unit: Amperes chart_type: line dimensions: - - name: current - - name: sensors.sensor_power - description: Sensor power + - name: input + - name: sensors.chip_sensor_current_average + description: Sensor Current Average + unit: Amperes + chart_type: line + dimensions: + - name: average + - name: sensors.chip_sensor_current_alarm + description: Current Sensor Alarm + unit: status + chart_type: line + dimensions: + - name: clear + - name: triggered + - name: sensors.chip_sensor_power + description: Sensor Power unit: Watts chart_type: line dimensions: - - name: power - - name: sensors.sensor_fan_speed - description: Sensor fan speed - unit: RPM + - name: input + - name: sensors.chip_sensor_power_average + description: Sensor Power Average + unit: Watts + chart_type: line + dimensions: + - name: average + - name: sensors.chip_sensor_power_alarm + description: Power Sensor Alarm + unit: status chart_type: line dimensions: - - name: fan - - name: sensors.sensor_energy - description: Sensor energy + - name: clear + - name: triggered + - name: sensors.chip_sensor_energy + description: Sensor Energy unit: Joules chart_type: line dimensions: - - name: energy - - name: sensors.sensor_humidity - description: Sensor humidity + - name: input + - name: sensors.chip_sensor_humidity + description: Sensor Humidity unit: percent - chart_type: area + chart_type: line + dimensions: + - name: input + - name: sensors.chip_sensor_intrusion_alarm + description: Intrusion Sensor Alarm + unit: status + chart_type: line dimensions: - - name: humidity + - name: clear + - name: triggered diff --git a/src/go/plugin/go.d/modules/sensors/sensors.go b/src/go/plugin/go.d/modules/sensors/sensors.go index 379d44deb..c52cbe947 100644 --- a/src/go/plugin/go.d/modules/sensors/sensors.go +++ b/src/go/plugin/go.d/modules/sensors/sensors.go @@ -1,14 +1,15 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package sensors import ( _ "embed" "errors" - "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/sensors/lmsensors" ) //go:embed "config_schema.json" @@ -27,19 +28,21 @@ func init() { func New() *Sensors { return &Sensors{ - Config: Config{ - BinaryPath: "/usr/bin/sensors", - Timeout: web.Duration(time.Second * 2), - }, - charts: &module.Charts{}, - sensors: make(map[string]bool), + Config: Config{}, + charts: &module.Charts{}, + seenSensors: make(map[string]bool), } } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - BinaryPath string `yaml:"binary_path" json:"binary_path"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Relabel []struct { + Chip string `yaml:"chip" json:"chip"` + Sensors []struct { + Name string `yaml:"name" json:"name"` + Label string `yaml:"label" json:"label"` + } `yaml:"sensors,omitempty" json:"sensors"` + } `yaml:"relabel,omitempty" json:"relabel"` } type ( @@ -49,12 +52,12 @@ type ( charts *module.Charts - exec sensorsCLI + sc sysfsScanner - sensors map[string]bool + seenSensors map[string]bool } - sensorsCLI interface { - sensorsInfo() ([]byte, error) + sysfsScanner interface { + Scan() ([]*lmsensors.Chip, error) } ) @@ -63,17 +66,9 @@ func (s *Sensors) Configuration() any { } func (s *Sensors) Init() error { - if err := s.validateConfig(); err != nil { - s.Errorf("config validation: %s", err) - return err - } - - sensorsExec, err := s.initSensorsCliExec() - if err != nil { - s.Errorf("sensors exec initialization: %v", err) - return err - } - s.exec = sensorsExec + sc := lmsensors.New() + sc.Logger = s.Logger + s.sc = sc return nil } @@ -81,7 +76,6 @@ func (s *Sensors) Init() error { func (s *Sensors) Check() error { mx, err := s.collect() if err != nil { - s.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/sensors/sensors_test.go b/src/go/plugin/go.d/modules/sensors/sensors_test.go index a370d7500..0e7ee089c 100644 --- a/src/go/plugin/go.d/modules/sensors/sensors_test.go +++ b/src/go/plugin/go.d/modules/sensors/sensors_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux + package sensors import ( @@ -8,6 +10,7 @@ import ( "testing" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/sensors/lmsensors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -16,18 +19,12 @@ import ( var ( dataConfigJSON, _ = os.ReadFile("testdata/config.json") dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") - - dataSensorsTemp, _ = os.ReadFile("testdata/sensors-temp.txt") - dataSensorsTempInCurrPowerFan, _ = os.ReadFile("testdata/sensors-temp-in-curr-power-fan.txt") ) func Test_testDataIsValid(t *testing.T) { for name, data := range map[string][]byte{ "dataConfigJSON": dataConfigJSON, "dataConfigYAML": dataConfigYAML, - - "dataSensorsTemp": dataSensorsTemp, - "dataSensorsTempInCurrPowerFan": dataSensorsTempInCurrPowerFan, } { require.NotNil(t, data, name) @@ -43,17 +40,9 @@ func TestSensors_Init(t *testing.T) { config Config wantFail bool }{ - "fails if 'binary_path' is not set": { - wantFail: true, - config: Config{ - BinaryPath: "", - }, - }, - "fails if failed to find binary": { - wantFail: true, - config: Config{ - BinaryPath: "sensors!!!", - }, + "success with default config": { + wantFail: false, + config: New().Config, }, } @@ -83,7 +72,7 @@ func TestSensors_Cleanup(t *testing.T) { "after check": { prepare: func() *Sensors { sensors := New() - sensors.exec = prepareMockOkOnlyTemp() + sensors.sc = prepareMockScannerOk() _ = sensors.Check() return sensors }, @@ -91,7 +80,7 @@ func TestSensors_Cleanup(t *testing.T) { "after collect": { prepare: func() *Sensors { sensors := New() - sensors.exec = prepareMockOkTempInCurrPowerFan() + sensors.sc = prepareMockScannerOk() _ = sensors.Collect() return sensors }, @@ -113,36 +102,23 @@ func TestSensors_Charts(t *testing.T) { func TestSensors_Check(t *testing.T) { tests := map[string]struct { - prepareMock func() *mockSensorsCLIExec + prepareMock func() *mockScanner wantFail bool }{ - "only temperature": { - wantFail: false, - prepareMock: prepareMockOkOnlyTemp, - }, - "temperature and voltage": { + "multiple sensors": { wantFail: false, - prepareMock: prepareMockOkTempInCurrPowerFan, + prepareMock: prepareMockScannerOk, }, - "error on sensors info call": { + "error on scan": { wantFail: true, - prepareMock: prepareMockErr, - }, - "empty response": { - wantFail: true, - prepareMock: prepareMockEmptyResponse, - }, - "unexpected response": { - wantFail: true, - prepareMock: prepareMockUnexpectedResponse, + prepareMock: prepareMockScannerErr, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { sensors := New() - mock := test.prepareMock() - sensors.exec = mock + sensors.sc = test.prepareMock() if test.wantFail { assert.Error(t, sensors.Check()) @@ -155,154 +131,367 @@ func TestSensors_Check(t *testing.T) { func TestSensors_Collect(t *testing.T) { tests := map[string]struct { - prepareMock func() *mockSensorsCLIExec - wantMetrics map[string]int64 - wantCharts int + prepareScanner func() *mockScanner + wantMetrics map[string]int64 + wantCharts int }{ - "only temperature": { - prepareMock: prepareMockOkOnlyTemp, - wantCharts: 24, - wantMetrics: map[string]int64{ - "sensor_chip_bnxt_en-pci-6200_feature_temp1_subfeature_temp1_input": 80000, - "sensor_chip_bnxt_en-pci-6201_feature_temp1_subfeature_temp1_input": 81000, - "sensor_chip_k10temp-pci-00c3_feature_tccd1_subfeature_temp3_input": 58250, - "sensor_chip_k10temp-pci-00c3_feature_tccd2_subfeature_temp4_input": 60250, - "sensor_chip_k10temp-pci-00c3_feature_tccd3_subfeature_temp5_input": 57000, - "sensor_chip_k10temp-pci-00c3_feature_tccd4_subfeature_temp6_input": 57250, - "sensor_chip_k10temp-pci-00c3_feature_tccd5_subfeature_temp7_input": 57750, - "sensor_chip_k10temp-pci-00c3_feature_tccd6_subfeature_temp8_input": 59500, - "sensor_chip_k10temp-pci-00c3_feature_tccd7_subfeature_temp9_input": 58500, - "sensor_chip_k10temp-pci-00c3_feature_tccd8_subfeature_temp10_input": 61250, - "sensor_chip_k10temp-pci-00c3_feature_tctl_subfeature_temp1_input": 62000, - "sensor_chip_k10temp-pci-00cb_feature_tccd1_subfeature_temp3_input": 54000, - "sensor_chip_k10temp-pci-00cb_feature_tccd2_subfeature_temp4_input": 55500, - "sensor_chip_k10temp-pci-00cb_feature_tccd3_subfeature_temp5_input": 56000, - "sensor_chip_k10temp-pci-00cb_feature_tccd4_subfeature_temp6_input": 52750, - "sensor_chip_k10temp-pci-00cb_feature_tccd5_subfeature_temp7_input": 53500, - "sensor_chip_k10temp-pci-00cb_feature_tccd6_subfeature_temp8_input": 55250, - "sensor_chip_k10temp-pci-00cb_feature_tccd7_subfeature_temp9_input": 53000, - "sensor_chip_k10temp-pci-00cb_feature_tccd8_subfeature_temp10_input": 53750, - "sensor_chip_k10temp-pci-00cb_feature_tctl_subfeature_temp1_input": 57500, - "sensor_chip_nouveau-pci-4100_feature_temp1_subfeature_temp1_input": 51000, - "sensor_chip_nvme-pci-0100_feature_composite_subfeature_temp1_input": 39850, - "sensor_chip_nvme-pci-6100_feature_composite_subfeature_temp1_input": 48850, - "sensor_chip_nvme-pci-8100_feature_composite_subfeature_temp1_input": 39850, - }, - }, "multiple sensors": { - prepareMock: prepareMockOkTempInCurrPowerFan, - wantCharts: 19, + prepareScanner: prepareMockScannerOk, + wantCharts: 24, wantMetrics: map[string]int64{ - "sensor_chip_acpitz-acpi-0_feature_temp1_subfeature_temp1_input": 88000, - "sensor_chip_amdgpu-pci-0300_feature_edge_subfeature_temp1_input": 53000, - "sensor_chip_amdgpu-pci-0300_feature_fan1_subfeature_fan1_input": 0, - "sensor_chip_amdgpu-pci-0300_feature_junction_subfeature_temp2_input": 58000, - "sensor_chip_amdgpu-pci-0300_feature_mem_subfeature_temp3_input": 57000, - "sensor_chip_amdgpu-pci-0300_feature_vddgfx_subfeature_in0_input": 787, - "sensor_chip_amdgpu-pci-6700_feature_edge_subfeature_temp1_input": 60000, - "sensor_chip_amdgpu-pci-6700_feature_ppt_subfeature_power1_input": 8144, - "sensor_chip_amdgpu-pci-6700_feature_vddgfx_subfeature_in0_input": 1335, - "sensor_chip_amdgpu-pci-6700_feature_vddnb_subfeature_in1_input": 973, - "sensor_chip_asus-isa-0000_feature_cpu_fan_subfeature_fan1_input": 5700000, - "sensor_chip_asus-isa-0000_feature_gpu_fan_subfeature_fan2_input": 6600000, - "sensor_chip_bat0-acpi-0_feature_in0_subfeature_in0_input": 17365, - "sensor_chip_k10temp-pci-00c3_feature_tctl_subfeature_temp1_input": 90000, - "sensor_chip_nvme-pci-0600_feature_composite_subfeature_temp1_input": 33850, - "sensor_chip_nvme-pci-0600_feature_sensor_1_subfeature_temp2_input": 48850, - "sensor_chip_nvme-pci-0600_feature_sensor_2_subfeature_temp3_input": 33850, - "sensor_chip_ucsi_source_psy_usbc000:001-isa-0000_feature_curr1_subfeature_curr1_input": 0, - "sensor_chip_ucsi_source_psy_usbc000:001-isa-0000_feature_in0_subfeature_in0_input": 0, + "chip_chip0-pci-xxxxxxxx_sensor_curr1_alarm_clear": 1, + "chip_chip0-pci-xxxxxxxx_sensor_curr1_alarm_triggered": 0, + "chip_chip0-pci-xxxxxxxx_sensor_curr1_average": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr1_crit": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr1_highest": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr1_input": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr1_lcrit": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr1_lowest": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr1_max": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr1_min": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr1_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_curr2_crit": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr2_highest": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr2_input": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr2_lcrit": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr2_lowest": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr2_max": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr2_min": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_curr2_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_energy1_input": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_energy1_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_energy2_input": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_energy2_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_fan1_alarm_clear": 1, + "chip_chip0-pci-xxxxxxxx_sensor_fan1_alarm_triggered": 0, + "chip_chip0-pci-xxxxxxxx_sensor_fan1_input": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_fan1_max": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_fan1_min": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_fan1_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_fan1_target": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_fan2_input": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_fan2_max": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_fan2_min": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_fan2_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_fan2_target": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_humidity1_input": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_humidity1_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_humidity2_input": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_humidity2_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_in1_alarm_clear": 1, + "chip_chip0-pci-xxxxxxxx_sensor_in1_alarm_triggered": 0, + "chip_chip0-pci-xxxxxxxx_sensor_in1_average": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in1_crit": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in1_highest": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in1_input": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in1_lcrit": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in1_lowest": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in1_max": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in1_min": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in1_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_in2_crit": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in2_highest": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in2_input": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in2_lcrit": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in2_lowest": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in2_max": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in2_min": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_in2_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_intrusion1_alarm_clear": 1, + "chip_chip0-pci-xxxxxxxx_sensor_intrusion1_alarm_triggered": 0, + "chip_chip0-pci-xxxxxxxx_sensor_intrusion1_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_intrusion2_alarm_clear": 0, + "chip_chip0-pci-xxxxxxxx_sensor_intrusion2_alarm_triggered": 1, + "chip_chip0-pci-xxxxxxxx_sensor_intrusion2_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_power1_accuracy": 34500, + "chip_chip0-pci-xxxxxxxx_sensor_power1_alarm_clear": 1, + "chip_chip0-pci-xxxxxxxx_sensor_power1_alarm_triggered": 0, + "chip_chip0-pci-xxxxxxxx_sensor_power1_average": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_average_highest": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_average_lowest": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_average_max": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_average_min": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_cap": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_cap_max": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_cap_min": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_crit": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_input": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_input_highest": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_input_lowest": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_max": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power1_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_power2_accuracy": 34500, + "chip_chip0-pci-xxxxxxxx_sensor_power2_average_highest": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power2_average_lowest": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power2_average_max": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power2_average_min": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power2_cap": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power2_cap_max": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power2_cap_min": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power2_crit": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power2_input": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power2_input_highest": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power2_input_lowest": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power2_max": 345000, + "chip_chip0-pci-xxxxxxxx_sensor_power2_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_temp1_alarm_clear": 1, + "chip_chip0-pci-xxxxxxxx_sensor_temp1_alarm_triggered": 0, + "chip_chip0-pci-xxxxxxxx_sensor_temp1_crit": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp1_emergency": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp1_highest": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp1_input": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp1_lcrit": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp1_lowest": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp1_max": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp1_min": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp1_read_time": 0, + "chip_chip0-pci-xxxxxxxx_sensor_temp2_crit": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp2_emergency": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp2_highest": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp2_input": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp2_lcrit": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp2_lowest": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp2_max": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp2_min": 42000, + "chip_chip0-pci-xxxxxxxx_sensor_temp2_read_time": 0, }, }, - "error on sensors info call": { - prepareMock: prepareMockErr, - wantMetrics: nil, - }, - "empty response": { - prepareMock: prepareMockEmptyResponse, - wantMetrics: nil, - }, - "unexpected response": { - prepareMock: prepareMockUnexpectedResponse, - wantMetrics: nil, + "error on scan": { + prepareScanner: prepareMockScannerErr, + wantMetrics: nil, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { sensors := New() - mock := test.prepareMock() - sensors.exec = mock + sensors.sc = test.prepareScanner() var mx map[string]int64 + for i := 0; i < 10; i++ { mx = sensors.Collect() } assert.Equal(t, test.wantMetrics, mx) + assert.Len(t, *sensors.Charts(), test.wantCharts) - testMetricsHasAllChartsDims(t, sensors, mx) - }) - } -} -func testMetricsHasAllChartsDims(t *testing.T, sensors *Sensors, mx map[string]int64) { - for _, chart := range *sensors.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } + if len(test.wantMetrics) > 0 { + module.TestMetricsHasAllChartsDims(t, sensors.Charts(), mx) + } + }) } } -func prepareMockOkOnlyTemp() *mockSensorsCLIExec { - return &mockSensorsCLIExec{ - sensorsInfoData: dataSensorsTemp, +func prepareMockScannerOk() *mockScanner { + return &mockScanner{ + scanData: mockChips(), } } -func prepareMockOkTempInCurrPowerFan() *mockSensorsCLIExec { - return &mockSensorsCLIExec{ - sensorsInfoData: dataSensorsTempInCurrPowerFan, +func prepareMockScannerErr() *mockScanner { + return &mockScanner{ + errOnScan: true, } } -func prepareMockErr() *mockSensorsCLIExec { - return &mockSensorsCLIExec{ - errOnSensorsInfo: true, - } +type mockScanner struct { + errOnScan bool + scanData []*lmsensors.Chip } -func prepareMockUnexpectedResponse() *mockSensorsCLIExec { - return &mockSensorsCLIExec{ - sensorsInfoData: []byte(` -Lorem ipsum dolor sit amet, consectetur adipiscing elit. -Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus. -Fusce et felis pulvinar, posuere sem non, porttitor eros. -`), +func (m *mockScanner) Scan() ([]*lmsensors.Chip, error) { + if m.errOnScan { + return nil, errors.New("mock.scan() error") } + return m.scanData, nil } -func prepareMockEmptyResponse() *mockSensorsCLIExec { - return &mockSensorsCLIExec{} -} - -type mockSensorsCLIExec struct { - errOnSensorsInfo bool - sensorsInfoData []byte -} - -func (m *mockSensorsCLIExec) sensorsInfo() ([]byte, error) { - if m.errOnSensorsInfo { - return nil, errors.New("mock.sensorsInfo() error") +func ptr[T any](v T) *T { return &v } + +func mockChips() []*lmsensors.Chip { + return []*lmsensors.Chip{ + { + Name: "chip0", + UniqueName: "chip0-pci-xxxxxxxx", + SysDevice: "pci0000:80/0000:80:01.4/0000:81:00.0/nvme/nvme0", + Sensors: lmsensors.Sensors{ + Voltage: []*lmsensors.VoltageSensor{ + { + Name: "in1", + Label: "some_label1", + Alarm: ptr(false), + Input: ptr(42.0), + Average: ptr(42.0), + Min: ptr(42.0), + Max: ptr(42.0), + CritMin: ptr(42.0), + CritMax: ptr(42.0), + Lowest: ptr(42.0), + Highest: ptr(42.0), + }, + { + Name: "in2", + Label: "some_label2", + Input: ptr(42.0), + Min: ptr(42.0), + Max: ptr(42.0), + CritMin: ptr(42.0), + CritMax: ptr(42.0), + Lowest: ptr(42.0), + Highest: ptr(42.0), + }, + }, + Fan: []*lmsensors.FanSensor{ + { + Name: "fan1", + Label: "some_label1", + Alarm: ptr(false), + Input: ptr(42.0), + Min: ptr(42.0), + Max: ptr(42.0), + Target: ptr(42.0), + }, + { + Name: "fan2", + Label: "some_label2", + Input: ptr(42.0), + Min: ptr(42.0), + Max: ptr(42.0), + Target: ptr(42.0), + }, + }, + Temperature: []*lmsensors.TemperatureSensor{ + { + Name: "temp1", + Label: "some_label1", + Alarm: ptr(false), + TempTypeRaw: 1, + Input: ptr(42.0), + Max: ptr(42.0), + Min: ptr(42.0), + CritMin: ptr(42.0), + CritMax: ptr(42.0), + Emergency: ptr(42.0), + Lowest: ptr(42.0), + Highest: ptr(42.0), + }, + { + Name: "temp2", + Label: "some_label2", + TempTypeRaw: 1, + Input: ptr(42.0), + Max: ptr(42.0), + Min: ptr(42.0), + CritMin: ptr(42.0), + CritMax: ptr(42.0), + Emergency: ptr(42.0), + Lowest: ptr(42.0), + Highest: ptr(42.0), + }, + }, + Current: []*lmsensors.CurrentSensor{ + { + Name: "curr1", + Label: "some_label1", + Alarm: ptr(false), + Max: ptr(42.0), + Min: ptr(42.0), + CritMin: ptr(42.0), + CritMax: ptr(42.0), + Input: ptr(42.0), + Average: ptr(42.0), + Lowest: ptr(42.0), + Highest: ptr(42.0), + }, + { + Name: "curr2", + Label: "some_label2", + Max: ptr(42.0), + Min: ptr(42.0), + CritMin: ptr(42.0), + CritMax: ptr(42.0), + Input: ptr(42.0), + Lowest: ptr(42.0), + Highest: ptr(42.0), + }, + }, + Power: []*lmsensors.PowerSensor{ + { + Name: "power1", + Label: "some_label1", + Alarm: ptr(false), + Average: ptr(345.0), + AverageHighest: ptr(345.0), + AverageLowest: ptr(345.0), + AverageMax: ptr(345.0), + AverageMin: ptr(345.0), + Input: ptr(345.0), + InputHighest: ptr(345.0), + InputLowest: ptr(345.0), + Accuracy: ptr(34.5), + Cap: ptr(345.0), + CapMax: ptr(345.0), + CapMin: ptr(345.0), + Max: ptr(345.0), + CritMax: ptr(345.0), + }, + { + Name: "power2", + Label: "some_label2", + AverageHighest: ptr(345.0), + AverageLowest: ptr(345.0), + AverageMax: ptr(345.0), + AverageMin: ptr(345.0), + Input: ptr(345.0), + InputHighest: ptr(345.0), + InputLowest: ptr(345.0), + Accuracy: ptr(34.5), + Cap: ptr(345.0), + CapMax: ptr(345.0), + CapMin: ptr(345.0), + Max: ptr(345.0), + CritMax: ptr(345.0), + }, + }, + Energy: []*lmsensors.EnergySensor{ + { + Name: "energy1", + Label: "some_label1", + Input: ptr(42.0), + }, + { + Name: "energy2", + Label: "some_label2", + Input: ptr(42.0), + }, + }, + Humidity: []*lmsensors.HumiditySensor{ + { + Name: "humidity1", + Label: "some_label1", + Input: ptr(42.0), + }, + { + Name: "humidity2", + Label: "some_label2", + Input: ptr(42.0), + }, + }, + Intrusion: []*lmsensors.IntrusionSensor{ + { + Name: "intrusion1", + Label: "some_label1", + Alarm: ptr(false), + }, + { + Name: "intrusion2", + Label: "some_label2", + Alarm: ptr(true), + }, + }, + }, + }, } - - return m.sensorsInfoData, nil } diff --git a/src/go/plugin/go.d/modules/sensors/testdata/config.json b/src/go/plugin/go.d/modules/sensors/testdata/config.json index 095713193..703489961 100644 --- a/src/go/plugin/go.d/modules/sensors/testdata/config.json +++ b/src/go/plugin/go.d/modules/sensors/testdata/config.json @@ -1,5 +1,14 @@ { "update_every": 123, - "timeout": 123.123, - "binary_path": "ok" + "relabel": [ + { + "chip": "ok", + "sensors": [ + { + "name": "ok", + "label": "ok" + } + ] + } + ] } diff --git a/src/go/plugin/go.d/modules/sensors/testdata/config.yaml b/src/go/plugin/go.d/modules/sensors/testdata/config.yaml index baf3bcd0b..a966a249e 100644 --- a/src/go/plugin/go.d/modules/sensors/testdata/config.yaml +++ b/src/go/plugin/go.d/modules/sensors/testdata/config.yaml @@ -1,3 +1,6 @@ update_every: 123 -timeout: 123.123 -binary_path: "ok" +relabel: + - chip: "ok" + sensors: + - name: "ok" + label: "ok" diff --git a/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp-in-curr-power-fan.txt b/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp-in-curr-power-fan.txt deleted file mode 100644 index a38c7ab4e..000000000 --- a/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp-in-curr-power-fan.txt +++ /dev/null @@ -1,72 +0,0 @@ -asus-isa-0000 -cpu_fan: - fan1_input: 5700.000 -gpu_fan: - fan2_input: 6600.000 -nvme-pci-0600 -Composite: - temp1_input: 33.850 - temp1_max: 83.850 - temp1_min: -40.150 - temp1_crit: 87.850 - temp1_alarm: 0.000 -Sensor 1: - temp2_input: 48.850 - temp2_max: 65261.850 - temp2_min: -273.150 -Sensor 2: - temp3_input: 33.850 - temp3_max: 65261.850 - temp3_min: -273.150 -amdgpu-pci-6700 -vddgfx: - in0_input: 1.335 -vddnb: - in1_input: 0.973 -edge: - temp1_input: 60.000 -PPT: - power1_average: 5.088 - power1_input: 8.144 -BAT0-acpi-0 -in0: - in0_input: 17.365 -ucsi_source_psy_USBC000:001-isa-0000 -in0: - in0_input: 0.000 - in0_min: 0.000 - in0_max: 0.000 -curr1: - curr1_input: 0.000 - curr1_max: 0.000 -k10temp-pci-00c3 -Tctl: - temp1_input: 90.000 -amdgpu-pci-0300 -vddgfx: - in0_input: 0.787 -fan1: - fan1_input: 0.000 - fan1_min: 0.000 - fan1_max: 4900.000 -edge: - temp1_input: 53.000 - temp1_crit: 100.000 - temp1_crit_hyst: -273.150 - temp1_emergency: 105.000 -junction: - temp2_input: 58.000 - temp2_crit: 100.000 - temp2_crit_hyst: -273.150 - temp2_emergency: 105.000 -mem: - temp3_input: 57.000 - temp3_crit: 105.000 - temp3_crit_hyst: -273.150 - temp3_emergency: 110.000 -PPT: - power1_average: 29.000 - power1_cap: 120.000 -acpitz-acpi-0 -temp1: - temp1_input: 88.000 diff --git a/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp.txt b/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp.txt deleted file mode 100644 index decc7ee39..000000000 --- a/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp.txt +++ /dev/null @@ -1,81 +0,0 @@ -k10temp-pci-00cb -Tctl: - temp1_input: 57.500 -Tccd1: - temp3_input: 54.000 -Tccd2: - temp4_input: 55.500 -Tccd3: - temp5_input: 56.000 -Tccd4: - temp6_input: 52.750 -Tccd5: - temp7_input: 53.500 -Tccd6: - temp8_input: 55.250 -Tccd7: - temp9_input: 53.000 -Tccd8: - temp10_input: 53.750 - -bnxt_en-pci-6201 -temp1: - temp1_input: 81.000 - -nvme-pci-6100 -Composite: - temp1_input: 48.850 - temp1_max: 89.850 - temp1_min: -20.150 - temp1_crit: 94.850 - temp1_alarm: 0.000 - -nvme-pci-0100 -Composite: - temp1_input: 39.850 - temp1_max: 89.850 - temp1_min: -20.150 - temp1_crit: 94.850 - temp1_alarm: 0.000 - -nouveau-pci-4100 -temp1: - temp1_input: 51.000 - temp1_max: 95.000 - temp1_max_hyst: 3.000 - temp1_crit: 105.000 - temp1_crit_hyst: 5.000 - temp1_emergency: 135.000 - temp1_emergency_hyst: 5.000 - -k10temp-pci-00c3 -Tctl: - temp1_input: 62.000 -Tccd1: - temp3_input: 58.250 -Tccd2: - temp4_input: 60.250 -Tccd3: - temp5_input: 57.000 -Tccd4: - temp6_input: 57.250 -Tccd5: - temp7_input: 57.750 -Tccd6: - temp8_input: 59.500 -Tccd7: - temp9_input: 58.500 -Tccd8: - temp10_input: 61.250 - -bnxt_en-pci-6200 -temp1: - temp1_input: 80.000 - -nvme-pci-8100 -Composite: - temp1_input: 39.850 - temp1_max: 89.850 - temp1_min: -20.150 - temp1_crit: 94.850 - temp1_alarm: 0.000 diff --git a/src/go/plugin/go.d/modules/smartctl/charts.go b/src/go/plugin/go.d/modules/smartctl/charts.go index 461f73501..d261106c7 100644 --- a/src/go/plugin/go.d/modules/smartctl/charts.go +++ b/src/go/plugin/go.d/modules/smartctl/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package smartctl import ( diff --git a/src/go/plugin/go.d/modules/smartctl/collect.go b/src/go/plugin/go.d/modules/smartctl/collect.go index b76d0998e..735bd0f72 100644 --- a/src/go/plugin/go.d/modules/smartctl/collect.go +++ b/src/go/plugin/go.d/modules/smartctl/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package smartctl import ( diff --git a/src/go/plugin/go.d/modules/smartctl/config_schema.json b/src/go/plugin/go.d/modules/smartctl/config_schema.json index afe7ce1a9..1fe04caf4 100644 --- a/src/go/plugin/go.d/modules/smartctl/config_schema.json +++ b/src/go/plugin/go.d/modules/smartctl/config_schema.json @@ -82,7 +82,6 @@ } } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/smartctl/doc.go b/src/go/plugin/go.d/modules/smartctl/doc.go new file mode 100644 index 000000000..514e64a44 --- /dev/null +++ b/src/go/plugin/go.d/modules/smartctl/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package smartctl diff --git a/src/go/plugin/go.d/modules/smartctl/exec.go b/src/go/plugin/go.d/modules/smartctl/exec.go index 94974c0d3..631cd7a24 100644 --- a/src/go/plugin/go.d/modules/smartctl/exec.go +++ b/src/go/plugin/go.d/modules/smartctl/exec.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package smartctl import ( diff --git a/src/go/plugin/go.d/modules/smartctl/init.go b/src/go/plugin/go.d/modules/smartctl/init.go index 6d3731a18..0a6046ffb 100644 --- a/src/go/plugin/go.d/modules/smartctl/init.go +++ b/src/go/plugin/go.d/modules/smartctl/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package smartctl import ( @@ -8,7 +10,7 @@ import ( "path/filepath" "github.com/netdata/netdata/go/plugins/pkg/executable" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" ) func (s *Smartctl) validateConfig() error { diff --git a/src/go/plugin/go.d/modules/smartctl/integrations/s.m.a.r.t..md b/src/go/plugin/go.d/modules/smartctl/integrations/s.m.a.r.t..md index b9eb9f368..2410593fd 100644 --- a/src/go/plugin/go.d/modules/smartctl/integrations/s.m.a.r.t..md +++ b/src/go/plugin/go.d/modules/smartctl/integrations/s.m.a.r.t..md @@ -33,7 +33,10 @@ Executed commands: -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux +- BSD This collector only supports collecting metrics from a single instance of this integration. @@ -147,8 +150,8 @@ Install `smartmontools` version 7.0 or later using your distribution's package m The configuration file name for this integration is `go.d/smartctl.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/smartctl/metadata.yaml b/src/go/plugin/go.d/modules/smartctl/metadata.yaml index e748e82ae..14214c4ef 100644 --- a/src/go/plugin/go.d/modules/smartctl/metadata.yaml +++ b/src/go/plugin/go.d/modules/smartctl/metadata.yaml @@ -34,7 +34,7 @@ modules: - `smartctl --json --all {deviceName} --device {deviceType} --nocheck {powerMode}` method_description: "" supported_platforms: - include: [] + include: [Linux, BSD] exclude: [] multi_instance: false additional_permissions: diff --git a/src/go/plugin/go.d/modules/smartctl/scan.go b/src/go/plugin/go.d/modules/smartctl/scan.go index d904ca289..14eb8b120 100644 --- a/src/go/plugin/go.d/modules/smartctl/scan.go +++ b/src/go/plugin/go.d/modules/smartctl/scan.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package smartctl import ( diff --git a/src/go/plugin/go.d/modules/smartctl/smart_device.go b/src/go/plugin/go.d/modules/smartctl/smart_device.go index 280281aad..b43fadc21 100644 --- a/src/go/plugin/go.d/modules/smartctl/smart_device.go +++ b/src/go/plugin/go.d/modules/smartctl/smart_device.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package smartctl import ( diff --git a/src/go/plugin/go.d/modules/smartctl/smartctl.go b/src/go/plugin/go.d/modules/smartctl/smartctl.go index 36f390a37..206e936b5 100644 --- a/src/go/plugin/go.d/modules/smartctl/smartctl.go +++ b/src/go/plugin/go.d/modules/smartctl/smartctl.go @@ -1,15 +1,18 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package smartctl import ( _ "embed" "errors" + "fmt" "time" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/tidwall/gjson" ) @@ -31,9 +34,9 @@ func init() { func New() *Smartctl { return &Smartctl{ Config: Config{ - Timeout: web.Duration(time.Second * 5), - ScanEvery: web.Duration(time.Minute * 15), - PollDevicesEvery: web.Duration(time.Minute * 5), + Timeout: confopt.Duration(time.Second * 5), + ScanEvery: confopt.Duration(time.Minute * 15), + PollDevicesEvery: confopt.Duration(time.Minute * 5), NoCheckPowerMode: "standby", DeviceSelector: "*", }, @@ -47,9 +50,9 @@ func New() *Smartctl { type ( Config struct { UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - ScanEvery web.Duration `yaml:"scan_every,omitempty" json:"scan_every"` - PollDevicesEvery web.Duration `yaml:"poll_devices_every,omitempty" json:"poll_devices_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + ScanEvery confopt.Duration `yaml:"scan_every,omitempty" json:"scan_every"` + PollDevicesEvery confopt.Duration `yaml:"poll_devices_every,omitempty" json:"poll_devices_every"` NoCheckPowerMode string `yaml:"no_check_power_mode,omitempty" json:"no_check_power_mode"` DeviceSelector string `yaml:"device_selector,omitempty" json:"device_selector"` ExtraDevices []ConfigExtraDevice `yaml:"extra_devices,omitempty" json:"extra_devices"` @@ -93,21 +96,18 @@ func (s *Smartctl) Configuration() any { func (s *Smartctl) Init() error { if err := s.validateConfig(); err != nil { - s.Errorf("config validation error: %s", err) - return err + return fmt.Errorf("config validation: %s", err) } sr, err := s.initDeviceSelector() if err != nil { - s.Errorf("device selector initialization: %v", err) - return err + return fmt.Errorf("device selector initialization: %v", err) } s.deviceSr = sr smartctlExec, err := s.initSmartctlCli() if err != nil { - s.Errorf("smartctl exec initialization: %v", err) - return err + return fmt.Errorf("smartctl exec initialization: %v", err) } s.exec = smartctlExec @@ -117,7 +117,6 @@ func (s *Smartctl) Init() error { func (s *Smartctl) Check() error { mx, err := s.collect() if err != nil { - s.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/smartctl/smartctl_test.go b/src/go/plugin/go.d/modules/smartctl/smartctl_test.go index 7c56605f6..46d8bffa7 100644 --- a/src/go/plugin/go.d/modules/smartctl/smartctl_test.go +++ b/src/go/plugin/go.d/modules/smartctl/smartctl_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package smartctl import ( @@ -9,7 +11,7 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -357,8 +359,8 @@ func TestSmartctl_Collect(t *testing.T) { } mock := test.prepareMock() smart.exec = mock - smart.ScanEvery = web.Duration(time.Microsecond * 1) - smart.PollDevicesEvery = web.Duration(time.Microsecond * 1) + smart.ScanEvery = confopt.Duration(time.Microsecond * 1) + smart.PollDevicesEvery = confopt.Duration(time.Microsecond * 1) var mx map[string]int64 for i := 0; i < 10; i++ { @@ -366,25 +368,11 @@ func TestSmartctl_Collect(t *testing.T) { } assert.Equal(t, test.wantMetrics, mx) - assert.Len(t, *smart.Charts(), test.wantCharts) - testMetricsHasAllChartsDims(t, smart, mx) - }) - } -} -func testMetricsHasAllChartsDims(t *testing.T, smart *Smartctl, mx map[string]int64) { - for _, chart := range *smart.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } + assert.Len(t, *smart.Charts(), test.wantCharts, "wantCharts") + + module.TestMetricsHasAllChartsDims(t, smart.Charts(), mx) + }) } } diff --git a/src/go/plugin/go.d/modules/snmp/charts.go b/src/go/plugin/go.d/modules/snmp/charts.go index dd31f1cc7..5079f3779 100644 --- a/src/go/plugin/go.d/modules/snmp/charts.go +++ b/src/go/plugin/go.d/modules/snmp/charts.go @@ -162,7 +162,8 @@ func (s *SNMP) addNetIfaceCharts(iface *netInterface) { for _, chart := range *charts { chart.ID = fmt.Sprintf(chart.ID, cleanIfaceName(iface.ifName)) chart.Labels = []module.Label{ - {Key: "sysName", Value: s.sysName}, + {Key: "vendor", Value: s.sysInfo.organization}, + {Key: "sysName", Value: s.sysInfo.name}, {Key: "ifDescr", Value: iface.ifDescr}, {Key: "ifName", Value: iface.ifName}, {Key: "ifType", Value: ifTypeMapping[iface.ifType]}, @@ -190,7 +191,8 @@ func (s *SNMP) removeNetIfaceCharts(iface *netInterface) { func (s *SNMP) addSysUptimeChart() { chart := uptimeChart.Copy() chart.Labels = []module.Label{ - {Key: "sysName", Value: s.sysName}, + {Key: "vendor", Value: s.sysInfo.organization}, + {Key: "sysName", Value: s.sysInfo.name}, } if err := s.Charts().Add(chart); err != nil { s.Warning(err) diff --git a/src/go/plugin/go.d/modules/snmp/collect.go b/src/go/plugin/go.d/modules/snmp/collect.go index 24cc49dbc..92ea1175c 100644 --- a/src/go/plugin/go.d/modules/snmp/collect.go +++ b/src/go/plugin/go.d/modules/snmp/collect.go @@ -3,32 +3,29 @@ package snmp import ( - "errors" "fmt" - "log/slog" - "sort" + "slices" "strings" - "github.com/netdata/netdata/go/plugins/logger" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/vnodes" + "github.com/google/uuid" "github.com/gosnmp/gosnmp" ) -const ( - oidSysUptime = "1.3.6.1.2.1.1.3.0" - oidSysName = "1.3.6.1.2.1.1.5.0" - rootOidIfMibIfTable = "1.3.6.1.2.1.2.2" - rootOidIfMibIfXTable = "1.3.6.1.2.1.31.1.1" -) - func (s *SNMP) collect() (map[string]int64, error) { - if s.sysName == "" { - sysName, err := s.getSysName() + if s.sysInfo == nil { + si, err := s.getSysInfo() if err != nil { return nil, err } - s.sysName = sysName + + s.sysInfo = si s.addSysUptimeChart() + + if s.CreateVnode { + s.vnode = s.setupVnode(si) + } } mx := make(map[string]int64) @@ -43,7 +40,7 @@ func (s *SNMP) collect() (map[string]int64, error) { } } - if len(s.oids) > 0 { + if len(s.customOids) > 0 { if err := s.collectOIDs(mx); err != nil { return nil, err } @@ -52,271 +49,45 @@ func (s *SNMP) collect() (map[string]int64, error) { return mx, nil } -func (s *SNMP) getSysName() (string, error) { - resp, err := s.snmpClient.Get([]string{oidSysName}) - if err != nil { - return "", err - } - if len(resp.Variables) == 0 { - return "", errors.New("no system name") - } - return pduToString(resp.Variables[0]) -} - -func (s *SNMP) collectSysUptime(mx map[string]int64) error { - resp, err := s.snmpClient.Get([]string{oidSysUptime}) - if err != nil { - return err - } - if len(resp.Variables) == 0 { - return errors.New("no system uptime") - } - v, err := pduToInt(resp.Variables[0]) - if err != nil { - return err +func (s *SNMP) walkAll(rootOid string) ([]gosnmp.SnmpPDU, error) { + if s.snmpClient.Version() == gosnmp.Version1 { + return s.snmpClient.WalkAll(rootOid) } - - mx["uptime"] = v / 100 // the time is in hundredths of a second - - return nil + return s.snmpClient.BulkWalkAll(rootOid) } -func (s *SNMP) collectNetworkInterfaces(mx map[string]int64) error { - if s.checkMaxReps { - ok, err := s.adjustMaxRepetitions() - if err != nil { - return err - } - - s.checkMaxReps = false - - if !ok { - s.collectIfMib = false - - if len(s.oids) == 0 { - return errors.New("no IF-MIB data returned") - } - - s.Warning("no IF-MIB data returned") - return nil - } - } - - ifMibTable, err := s.walkAll(rootOidIfMibIfTable) - if err != nil { - return err - } - - ifMibXTable, err := s.walkAll(rootOidIfMibIfXTable) - if err != nil { - return err - } - - if len(ifMibTable) == 0 && len(ifMibXTable) == 0 { - s.Warning("no IF-MIB data returned") - s.collectIfMib = false - return nil - } - - for _, i := range s.netInterfaces { - i.updated = false +func (s *SNMP) setupVnode(si *sysInfo) *vnodes.VirtualNode { + if s.Vnode.GUID == "" { + s.Vnode.GUID = uuid.NewSHA1(uuid.NameSpaceDNS, []byte(s.Hostname)).String() } - pdus := make([]gosnmp.SnmpPDU, 0, len(ifMibTable)+len(ifMibXTable)) - pdus = append(pdus, ifMibTable...) - pdus = append(pdus, ifMibXTable...) + hostnames := []string{s.Vnode.Hostname, si.name, "snmp-device"} + i := slices.IndexFunc(hostnames, func(s string) bool { return s != "" }) - for _, pdu := range pdus { - i := strings.LastIndexByte(pdu.Name, '.') - if i == -1 { - continue - } - - idx := pdu.Name[i+1:] - oid := strings.TrimPrefix(pdu.Name[:i], ".") - - iface, ok := s.netInterfaces[idx] - if !ok { - iface = &netInterface{idx: idx} - } + s.Vnode.Hostname = fmt.Sprintf("%s(%s)", hostnames[i], s.Hostname) - switch oid { - case oidIfIndex: - iface.ifIndex, err = pduToInt(pdu) - case oidIfDescr: - iface.ifDescr, err = pduToString(pdu) - case oidIfType: - iface.ifType, err = pduToInt(pdu) - case oidIfMtu: - iface.ifMtu, err = pduToInt(pdu) - case oidIfSpeed: - iface.ifSpeed, err = pduToInt(pdu) - case oidIfAdminStatus: - iface.ifAdminStatus, err = pduToInt(pdu) - case oidIfOperStatus: - iface.ifOperStatus, err = pduToInt(pdu) - case oidIfInOctets: - iface.ifInOctets, err = pduToInt(pdu) - case oidIfInUcastPkts: - iface.ifInUcastPkts, err = pduToInt(pdu) - case oidIfInNUcastPkts: - iface.ifInNUcastPkts, err = pduToInt(pdu) - case oidIfInDiscards: - iface.ifInDiscards, err = pduToInt(pdu) - case oidIfInErrors: - iface.ifInErrors, err = pduToInt(pdu) - case oidIfInUnknownProtos: - iface.ifInUnknownProtos, err = pduToInt(pdu) - case oidIfOutOctets: - iface.ifOutOctets, err = pduToInt(pdu) - case oidIfOutUcastPkts: - iface.ifOutUcastPkts, err = pduToInt(pdu) - case oidIfOutNUcastPkts: - iface.ifOutNUcastPkts, err = pduToInt(pdu) - case oidIfOutDiscards: - iface.ifOutDiscards, err = pduToInt(pdu) - case oidIfOutErrors: - iface.ifOutErrors, err = pduToInt(pdu) - case oidIfName: - iface.ifName, err = pduToString(pdu) - case oidIfInMulticastPkts: - iface.ifInMulticastPkts, err = pduToInt(pdu) - case oidIfInBroadcastPkts: - iface.ifInBroadcastPkts, err = pduToInt(pdu) - case oidIfOutMulticastPkts: - iface.ifOutMulticastPkts, err = pduToInt(pdu) - case oidIfOutBroadcastPkts: - iface.ifOutBroadcastPkts, err = pduToInt(pdu) - case oidIfHCInOctets: - iface.ifHCInOctets, err = pduToInt(pdu) - case oidIfHCInUcastPkts: - iface.ifHCInUcastPkts, err = pduToInt(pdu) - case oidIfHCInMulticastPkts: - iface.ifHCInMulticastPkts, err = pduToInt(pdu) - case oidIfHCInBroadcastPkts: - iface.ifHCInBroadcastPkts, err = pduToInt(pdu) - case oidIfHCOutOctets: - iface.ifHCOutOctets, err = pduToInt(pdu) - case oidIfHCOutUcastPkts: - iface.ifHCOutUcastPkts, err = pduToInt(pdu) - case oidIfHCOutMulticastPkts: - iface.ifHCOutMulticastPkts, err = pduToInt(pdu) - case oidIfHCOutBroadcastPkts: - iface.ifHCOutMulticastPkts, err = pduToInt(pdu) - case oidIfHighSpeed: - iface.ifHighSpeed, err = pduToInt(pdu) - case oidIfAlias: - iface.ifAlias, err = pduToString(pdu) - default: - continue - } + labels := make(map[string]string) - if err != nil { - return fmt.Errorf("OID '%s': %v", pdu.Name, err) - } - - s.netInterfaces[idx] = iface - iface.updated = true + for k, v := range s.Vnode.Labels { + labels[k] = v } - - for _, iface := range s.netInterfaces { - if iface.ifName == "" { - continue - } - - typeStr := ifTypeMapping[iface.ifType] - if s.netIfaceFilterByName.MatchString(iface.ifName) || s.netIfaceFilterByType.MatchString(typeStr) { - continue - } - - if !iface.updated { - delete(s.netInterfaces, iface.idx) - if iface.hasCharts { - s.removeNetIfaceCharts(iface) - } - continue - } - if !iface.hasCharts { - iface.hasCharts = true - s.addNetIfaceCharts(iface) - } - - px := fmt.Sprintf("net_iface_%s_", iface.ifName) - mx[px+"traffic_in"] = iface.ifHCInOctets * 8 / 1000 // kilobits - mx[px+"traffic_out"] = iface.ifHCOutOctets * 8 / 1000 // kilobits - mx[px+"ucast_in"] = iface.ifHCInUcastPkts - mx[px+"ucast_out"] = iface.ifHCOutUcastPkts - mx[px+"mcast_in"] = iface.ifHCInMulticastPkts - mx[px+"mcast_out"] = iface.ifHCOutMulticastPkts - mx[px+"bcast_in"] = iface.ifHCInBroadcastPkts - mx[px+"bcast_out"] = iface.ifHCOutBroadcastPkts - mx[px+"errors_in"] = iface.ifInErrors - mx[px+"errors_out"] = iface.ifOutErrors - mx[px+"discards_in"] = iface.ifInDiscards - mx[px+"discards_out"] = iface.ifOutDiscards - - for _, v := range ifAdminStatusMapping { - mx[px+"admin_status_"+v] = 0 - } - mx[px+"admin_status_"+ifAdminStatusMapping[iface.ifAdminStatus]] = 1 - - for _, v := range ifOperStatusMapping { - mx[px+"oper_status_"+v] = 0 - } - mx[px+"oper_status_"+ifOperStatusMapping[iface.ifOperStatus]] = 1 + if si.descr != "" { + labels["sysDescr"] = si.descr } - - if logger.Level.Enabled(slog.LevelDebug) { - ifaces := make([]*netInterface, 0, len(s.netInterfaces)) - for _, nif := range s.netInterfaces { - ifaces = append(ifaces, nif) - } - sort.Slice(ifaces, func(i, j int) bool { return ifaces[i].ifIndex < ifaces[j].ifIndex }) - for _, iface := range ifaces { - s.Debugf("found %s", iface) - } + if si.contact != "" { + labels["sysContact"] = si.contact } - - return nil -} - -func (s *SNMP) adjustMaxRepetitions() (bool, error) { - orig := s.Config.Options.MaxRepetitions - maxReps := s.Config.Options.MaxRepetitions - - for { - v, err := s.walkAll(oidIfIndex) - if err != nil { - return false, err - } - - if len(v) > 0 { - if orig != maxReps { - s.Infof("changed 'max_repetitions' %d => %d", orig, maxReps) - } - return true, nil - } - - if maxReps > 5 { - maxReps = max(5, maxReps-5) - } else { - maxReps-- - } - - if maxReps <= 0 { - return false, nil - } - - s.Debugf("no IF-MIB data returned, trying to decrese 'max_repetitions' to %d", maxReps) - s.snmpClient.SetMaxRepetitions(uint32(maxReps)) + if si.location != "" { + labels["sysLocation"] = si.location } -} + // FIXME: vendor should be obtained from sysDescr, org should be used as a fallback + labels["vendor"] = si.organization -func (s *SNMP) walkAll(rootOid string) ([]gosnmp.SnmpPDU, error) { - if s.snmpClient.Version() == gosnmp.Version1 { - return s.snmpClient.WalkAll(rootOid) + return &vnodes.VirtualNode{ + GUID: s.Vnode.GUID, + Hostname: s.Vnode.Hostname, + Labels: labels, } - return s.snmpClient.BulkWalkAll(rootOid) } func pduToString(pdu gosnmp.SnmpPDU) (string, error) { @@ -330,6 +101,12 @@ func pduToString(pdu gosnmp.SnmpPDU) (string, error) { return strings.ToValidUTF8(string(bs), "�"), nil case gosnmp.Counter32, gosnmp.Counter64, gosnmp.Integer, gosnmp.Gauge32: return gosnmp.ToBigInt(pdu.Value).String(), nil + case gosnmp.ObjectIdentifier: + v, ok := pdu.Value.(string) + if !ok { + return "", fmt.Errorf("ObjectIdentifier is not a string but %T", pdu.Value) + } + return strings.TrimPrefix(v, "."), nil default: return "", fmt.Errorf("unussported type: '%v'", pdu.Type) } @@ -355,41 +132,3 @@ func pduToInt(pdu gosnmp.SnmpPDU) (int64, error) { // } // return strings.Join(parts, ":"), nil //} - -func (s *SNMP) collectOIDs(mx map[string]int64) error { - for i, end := 0, 0; i < len(s.oids); i += s.Options.MaxOIDs { - if end = i + s.Options.MaxOIDs; end > len(s.oids) { - end = len(s.oids) - } - - oids := s.oids[i:end] - resp, err := s.snmpClient.Get(oids) - if err != nil { - s.Errorf("cannot get SNMP data: %v", err) - return err - } - - for i, oid := range oids { - if i >= len(resp.Variables) { - continue - } - - switch v := resp.Variables[i]; v.Type { - case gosnmp.Boolean, - gosnmp.Counter32, - gosnmp.Counter64, - gosnmp.Gauge32, - gosnmp.TimeTicks, - gosnmp.Uinteger32, - gosnmp.OpaqueFloat, - gosnmp.OpaqueDouble, - gosnmp.Integer: - mx[oid] = gosnmp.ToBigInt(v.Value).Int64() - default: - s.Debugf("skipping OID '%s' (unsupported type '%s')", oid, v.Type) - } - } - } - - return nil -} diff --git a/src/go/plugin/go.d/modules/snmp/collect_custom_oids.go b/src/go/plugin/go.d/modules/snmp/collect_custom_oids.go new file mode 100644 index 000000000..59cb5c759 --- /dev/null +++ b/src/go/plugin/go.d/modules/snmp/collect_custom_oids.go @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package snmp + +import ( + "github.com/gosnmp/gosnmp" +) + +func (s *SNMP) collectOIDs(mx map[string]int64) error { + for i, end := 0, 0; i < len(s.customOids); i += s.Options.MaxOIDs { + if end = i + s.Options.MaxOIDs; end > len(s.customOids) { + end = len(s.customOids) + } + + oids := s.customOids[i:end] + resp, err := s.snmpClient.Get(oids) + if err != nil { + s.Errorf("cannot get SNMP data: %v", err) + return err + } + + for i, oid := range oids { + if i >= len(resp.Variables) { + continue + } + + switch v := resp.Variables[i]; v.Type { + case gosnmp.Boolean, + gosnmp.Counter32, + gosnmp.Counter64, + gosnmp.Gauge32, + gosnmp.TimeTicks, + gosnmp.Uinteger32, + gosnmp.OpaqueFloat, + gosnmp.OpaqueDouble, + gosnmp.Integer: + mx[oid] = gosnmp.ToBigInt(v.Value).Int64() + default: + s.Debugf("skipping OID '%s' (unsupported type '%s')", oid, v.Type) + } + } + } + + return nil +} diff --git a/src/go/plugin/go.d/modules/snmp/collect_if_mib.go b/src/go/plugin/go.d/modules/snmp/collect_if_mib.go new file mode 100644 index 000000000..584e0eb19 --- /dev/null +++ b/src/go/plugin/go.d/modules/snmp/collect_if_mib.go @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package snmp + +import ( + "errors" + "fmt" + "log/slog" + "sort" + "strings" + + "github.com/netdata/netdata/go/plugins/logger" + + "github.com/gosnmp/gosnmp" +) + +const ( + rootOidIfMibIfTable = "1.3.6.1.2.1.2.2" + rootOidIfMibIfXTable = "1.3.6.1.2.1.31.1.1" +) + +func (s *SNMP) collectNetworkInterfaces(mx map[string]int64) error { + if s.checkMaxReps { + ok, err := s.adjustMaxRepetitions() + if err != nil { + return err + } + + s.checkMaxReps = false + + if !ok { + s.collectIfMib = false + + if len(s.customOids) == 0 { + return errors.New("no IF-MIB data returned") + } + + s.Warning("no IF-MIB data returned") + return nil + } + } + + ifMibTable, err := s.walkAll(rootOidIfMibIfTable) + if err != nil { + return err + } + + ifMibXTable, err := s.walkAll(rootOidIfMibIfXTable) + if err != nil { + return err + } + + if len(ifMibTable) == 0 && len(ifMibXTable) == 0 { + s.Warning("no IF-MIB data returned") + s.collectIfMib = false + return nil + } + + for _, i := range s.netInterfaces { + i.updated = false + } + + pdus := make([]gosnmp.SnmpPDU, 0, len(ifMibTable)+len(ifMibXTable)) + pdus = append(pdus, ifMibTable...) + pdus = append(pdus, ifMibXTable...) + + for _, pdu := range pdus { + i := strings.LastIndexByte(pdu.Name, '.') + if i == -1 { + continue + } + + idx := pdu.Name[i+1:] + oid := strings.TrimPrefix(pdu.Name[:i], ".") + + iface, ok := s.netInterfaces[idx] + if !ok { + iface = &netInterface{idx: idx} + } + + switch oid { + case oidIfIndex: + iface.ifIndex, err = pduToInt(pdu) + case oidIfDescr: + iface.ifDescr, err = pduToString(pdu) + case oidIfType: + iface.ifType, err = pduToInt(pdu) + case oidIfMtu: + iface.ifMtu, err = pduToInt(pdu) + case oidIfSpeed: + iface.ifSpeed, err = pduToInt(pdu) + case oidIfAdminStatus: + iface.ifAdminStatus, err = pduToInt(pdu) + case oidIfOperStatus: + iface.ifOperStatus, err = pduToInt(pdu) + case oidIfInOctets: + iface.ifInOctets, err = pduToInt(pdu) + case oidIfInUcastPkts: + iface.ifInUcastPkts, err = pduToInt(pdu) + case oidIfInNUcastPkts: + iface.ifInNUcastPkts, err = pduToInt(pdu) + case oidIfInDiscards: + iface.ifInDiscards, err = pduToInt(pdu) + case oidIfInErrors: + iface.ifInErrors, err = pduToInt(pdu) + case oidIfInUnknownProtos: + iface.ifInUnknownProtos, err = pduToInt(pdu) + case oidIfOutOctets: + iface.ifOutOctets, err = pduToInt(pdu) + case oidIfOutUcastPkts: + iface.ifOutUcastPkts, err = pduToInt(pdu) + case oidIfOutNUcastPkts: + iface.ifOutNUcastPkts, err = pduToInt(pdu) + case oidIfOutDiscards: + iface.ifOutDiscards, err = pduToInt(pdu) + case oidIfOutErrors: + iface.ifOutErrors, err = pduToInt(pdu) + case oidIfName: + iface.ifName, err = pduToString(pdu) + case oidIfInMulticastPkts: + iface.ifInMulticastPkts, err = pduToInt(pdu) + case oidIfInBroadcastPkts: + iface.ifInBroadcastPkts, err = pduToInt(pdu) + case oidIfOutMulticastPkts: + iface.ifOutMulticastPkts, err = pduToInt(pdu) + case oidIfOutBroadcastPkts: + iface.ifOutBroadcastPkts, err = pduToInt(pdu) + case oidIfHCInOctets: + iface.ifHCInOctets, err = pduToInt(pdu) + case oidIfHCInUcastPkts: + iface.ifHCInUcastPkts, err = pduToInt(pdu) + case oidIfHCInMulticastPkts: + iface.ifHCInMulticastPkts, err = pduToInt(pdu) + case oidIfHCInBroadcastPkts: + iface.ifHCInBroadcastPkts, err = pduToInt(pdu) + case oidIfHCOutOctets: + iface.ifHCOutOctets, err = pduToInt(pdu) + case oidIfHCOutUcastPkts: + iface.ifHCOutUcastPkts, err = pduToInt(pdu) + case oidIfHCOutMulticastPkts: + iface.ifHCOutMulticastPkts, err = pduToInt(pdu) + case oidIfHCOutBroadcastPkts: + iface.ifHCOutMulticastPkts, err = pduToInt(pdu) + case oidIfHighSpeed: + iface.ifHighSpeed, err = pduToInt(pdu) + case oidIfAlias: + iface.ifAlias, err = pduToString(pdu) + default: + continue + } + + if err != nil { + return fmt.Errorf("OID '%s': %v", pdu.Name, err) + } + + s.netInterfaces[idx] = iface + iface.updated = true + } + + for _, iface := range s.netInterfaces { + if iface.ifName == "" { + continue + } + + typeStr := ifTypeMapping[iface.ifType] + if s.netIfaceFilterByName.MatchString(iface.ifName) || s.netIfaceFilterByType.MatchString(typeStr) { + continue + } + + if !iface.updated { + delete(s.netInterfaces, iface.idx) + if iface.hasCharts { + s.removeNetIfaceCharts(iface) + } + continue + } + if !iface.hasCharts { + iface.hasCharts = true + s.addNetIfaceCharts(iface) + } + + px := fmt.Sprintf("net_iface_%s_", iface.ifName) + mx[px+"traffic_in"] = iface.ifHCInOctets * 8 / 1000 // kilobits + mx[px+"traffic_out"] = iface.ifHCOutOctets * 8 / 1000 // kilobits + mx[px+"ucast_in"] = iface.ifHCInUcastPkts + mx[px+"ucast_out"] = iface.ifHCOutUcastPkts + mx[px+"mcast_in"] = iface.ifHCInMulticastPkts + mx[px+"mcast_out"] = iface.ifHCOutMulticastPkts + mx[px+"bcast_in"] = iface.ifHCInBroadcastPkts + mx[px+"bcast_out"] = iface.ifHCOutBroadcastPkts + mx[px+"errors_in"] = iface.ifInErrors + mx[px+"errors_out"] = iface.ifOutErrors + mx[px+"discards_in"] = iface.ifInDiscards + mx[px+"discards_out"] = iface.ifOutDiscards + + for _, v := range ifAdminStatusMapping { + mx[px+"admin_status_"+v] = 0 + } + mx[px+"admin_status_"+ifAdminStatusMapping[iface.ifAdminStatus]] = 1 + + for _, v := range ifOperStatusMapping { + mx[px+"oper_status_"+v] = 0 + } + mx[px+"oper_status_"+ifOperStatusMapping[iface.ifOperStatus]] = 1 + } + + if logger.Level.Enabled(slog.LevelDebug) { + ifaces := make([]*netInterface, 0, len(s.netInterfaces)) + for _, nif := range s.netInterfaces { + ifaces = append(ifaces, nif) + } + sort.Slice(ifaces, func(i, j int) bool { return ifaces[i].ifIndex < ifaces[j].ifIndex }) + for _, iface := range ifaces { + s.Debugf("found %s", iface) + } + } + + return nil +} + +func (s *SNMP) adjustMaxRepetitions() (bool, error) { + orig := s.Config.Options.MaxRepetitions + maxReps := s.Config.Options.MaxRepetitions + + for { + v, err := s.walkAll(oidIfIndex) + if err != nil { + return false, err + } + + if len(v) > 0 { + if orig != maxReps { + s.Infof("changed 'max_repetitions' %d => %d", orig, maxReps) + } + return true, nil + } + + if maxReps > 5 { + maxReps = max(5, maxReps-5) + } else { + maxReps-- + } + + if maxReps <= 0 { + return false, nil + } + + s.Debugf("no IF-MIB data returned, trying to decrese 'max_repetitions' to %d", maxReps) + s.snmpClient.SetMaxRepetitions(uint32(maxReps)) + } +} diff --git a/src/go/plugin/go.d/modules/snmp/collect_sys_info.go b/src/go/plugin/go.d/modules/snmp/collect_sys_info.go new file mode 100644 index 000000000..b171d3b69 --- /dev/null +++ b/src/go/plugin/go.d/modules/snmp/collect_sys_info.go @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package snmp + +import ( + "errors" + "fmt" + "strings" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/snmp/entnum" +) + +const ( + rootOidMibSystem = "1.3.6.1.2.1.1" + oidSysDescr = "1.3.6.1.2.1.1.1.0" + oidSysObject = "1.3.6.1.2.1.1.2.0" + oidSysUptime = "1.3.6.1.2.1.1.3.0" + oidSysContact = "1.3.6.1.2.1.1.4.0" + oidSysName = "1.3.6.1.2.1.1.5.0" + oidSysLocation = "1.3.6.1.2.1.1.6.0" +) + +type sysInfo struct { + descr string + contact string + name string + location string + + organization string +} + +func (s *SNMP) getSysInfo() (*sysInfo, error) { + pdus, err := s.snmpClient.WalkAll(rootOidMibSystem) + if err != nil { + return nil, err + } + + si := &sysInfo{ + organization: "Unknown", + } + + r := strings.NewReplacer("\n", "\\n", "\r", "\\r") + + for _, pdu := range pdus { + oid := strings.TrimPrefix(pdu.Name, ".") + + switch oid { + case oidSysDescr: + if si.descr, err = pduToString(pdu); err == nil { + si.descr = r.Replace(si.descr) + } + case oidSysObject: + var sysObj string + if sysObj, err = pduToString(pdu); err == nil { + si.organization = entnum.LookupBySysObject(sysObj) + s.Debugf("device sysObject '%s', organization '%s'", sysObj, si.organization) + } + case oidSysContact: + si.contact, err = pduToString(pdu) + case oidSysName: + si.name, err = pduToString(pdu) + case oidSysLocation: + si.location, err = pduToString(pdu) + } + if err != nil { + return nil, fmt.Errorf("OID '%s': %v", pdu.Name, err) + } + } + + if si.name == "" { + return nil, errors.New("no system name") + } + + return si, nil +} + +func (s *SNMP) collectSysUptime(mx map[string]int64) error { + resp, err := s.snmpClient.Get([]string{oidSysUptime}) + if err != nil { + return err + } + if len(resp.Variables) == 0 { + return errors.New("no system uptime") + } + v, err := pduToInt(resp.Variables[0]) + if err != nil { + return err + } + + mx["uptime"] = v / 100 // the time is in hundredths of a second + + return nil +} diff --git a/src/go/plugin/go.d/modules/snmp/config.go b/src/go/plugin/go.d/modules/snmp/config.go index 631c47d39..740244320 100644 --- a/src/go/plugin/go.d/modules/snmp/config.go +++ b/src/go/plugin/go.d/modules/snmp/config.go @@ -2,10 +2,14 @@ package snmp +import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/vnodes" + type ( Config struct { UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` Hostname string `yaml:"hostname" json:"hostname"` + CreateVnode bool `yaml:"create_vnode,omitempty" json:"create_vnode"` + Vnode vnodes.VirtualNode `yaml:"vnode,omitempty" json:"vnode"` Community string `yaml:"community,omitempty" json:"community"` User User `yaml:"user,omitempty" json:"user"` Options Options `yaml:"options,omitempty" json:"options"` diff --git a/src/go/plugin/go.d/modules/snmp/config_schema.json b/src/go/plugin/go.d/modules/snmp/config_schema.json index 8deb4f6c8..82de07a22 100644 --- a/src/go/plugin/go.d/modules/snmp/config_schema.json +++ b/src/go/plugin/go.d/modules/snmp/config_schema.json @@ -21,6 +21,42 @@ "type": "string", "default": "public" }, + "create_vnode": { + "title": "Create", + "description": "If set, the collector will create a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes) for this SNMP device, which will appear as a separate Node in Netdata.", + "type": "boolean" + }, + "vnode": { + "title": "Configuration", + "description": "", + "type": [ + "object", + "null" + ], + "properties": { + "guid": { + "title": "GUID", + "description": "A unique identifier for the Virtual Node. If not set, a GUID will be automatically generated from the device's IP address.", + "type": "string" + }, + "hostname": { + "title": "Hostname", + "description": "The hostname that will be used for the Virtual Node. If not set, the device's hostname will be used.", + "type": "string" + }, + "labels": { + "title": "Labels", + "description": "Additional key-value pairs to associate with the Virtual Node.", + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "string" + } + } + } + }, "network_interface_filter": { "title": "Network interface filter", "description": "Configuration for filtering specific network interfaces. If left empty, no interfaces will be filtered. You can filter interfaces by name or type using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).", @@ -312,7 +348,6 @@ "community", "options" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } @@ -397,6 +432,13 @@ "community" ] }, + { + "title": "Vnode", + "fields": [ + "create_vnode", + "vnode" + ] + }, { "title": "Options", "fields": [ diff --git a/src/go/plugin/go.d/modules/snmp/entnum/enterprise-numbers.txt b/src/go/plugin/go.d/modules/snmp/entnum/enterprise-numbers.txt new file mode 100644 index 000000000..0e00d1ab0 --- /dev/null +++ b/src/go/plugin/go.d/modules/snmp/entnum/enterprise-numbers.txt @@ -0,0 +1,249594 @@ +PRIVATE ENTERPRISE NUMBERS + +(last updated 2024-09-10) + +SMI Network Management Private Enterprise Codes: + +Prefix: iso.org.dod.internet.private.enterprise (1.3.6.1.4.1) + +This file is https://www.iana.org/assignments/enterprise-numbers.txt + +Decimal +| Organization +| | Contact +| | | Email +| | | | +0 + Reserved + Internet Assigned Numbers Authority + iana&iana.org +1 + NxNetworks + Michael Kellen + OID.Admin&NxNetworks.com +2 + IBM (https://w3.ibm.com/standards ) + Glenn Daly + gdaly&us.ibm.com +3 + Carnegie Mellon + Mark Poepping + host-master&andrew.cmu.edu +4 + Unix + Keith Sklower + sklower&okeeffe.berkeley.edu +5 + ACC + Art Berggreen + art&SALT.ACC.COM +6 + TWG + John Lunny + jlunny&eco.twg.com +7 + CAYMAN + Beth Miaoulis + beth&cayman.com +8 + PSI + Marty Schoffstahl + schoff&NISC.NYSER.NET +9 + ciscoSystems + Dave Jones + davej&cisco.com +10 + NSC + John Lyman + lyman&network.com +11 + Hewlett-Packard + Harry Lynch + harry.lynch&hp.com +12 + Epilogue + Karl Auerbach + karl&cavebear.com +13 + U of Tennessee + Jeffrey Case + case&CS.UTK.EDU +14 + BBN Technologies + Matthew Gillen + matthew.gillen&raytheon.com +15 + Xylogics, Inc. + Jim Barnes + barnes&xylogics.com +16 + Timeplex + Laura Bridge + laura&uunet.UU.NET +17 + Canstar + Sanand Patel + sanand&HUB.TORONTO.EDU +18 + Wellfleet + Sharon Chisholm + schishol&nortelnetworks.com +19 + TRW + Eric Jung + eric.jung&trw.com +20 + MIT + Jeffrey I. Shiller + jis&mit.edu +21 + EON + Michael Waters + ---none--- +22 + Fibronics + Jakob Apelblat + jakob&fibronics.co.il +23 + Novell + Steve Bostock + steveb&novell.com +24 + Spider Systems + Peter Reid + peter&spider.co.uk +25 + NSFNET + Hans-Werner Braun + HWB&MCR.UMICH.EDU +26 + Hughes LAN Systems + Keith McCloghrie + KZM&HLS.COM +27 + Intergraph + Guy Streeter + guy&guy.bll.ingr.com +28 + Interlan + Bruce Taber + taber&europa.InterLan.COM +29 + Vitalink Communications + Bill Anderson + ---none--- +30 + Ulana + Bill Anderson + wda&MITRE-BEDFORD.ORG +31 + NSWC + Matthew J. Curry + currymj&nswccd.navy.mil +32 + Santa Cruz Operation + Keith Reynolds + keithr&SCO.COM +33 + MRV Communications, In-Reach Product Division + Faith Szydlo + fszydlo&itouchcom.com +34 + Cray + Hunaid Engineer + hunaid&OPUS.CRAY.COM +35 + Nortel Networks + Glenn Waters + gww&nortelnetworks.com +36 + DEC + Ron Bhanukitsiri + rbhank&DECVAX.DEC.COM +37 + Touch + Brad Benson + ---none--- +38 + Network Research Corp. + Bill Versteeg + bvs&NCR.COM +39 + Baylor College of Medicine + Stan Barber + SOB&BCM.TMC.EDU +40 + NMFECC-LLNL + Steven Hunter + hunter&CCC.MFECC.LLNL.GOV +41 + SRI + David Wolfe + ctabka&TSCA.ISTC.SRI.COM +42 + Sun Microsystems + Dennis Yaro + yaro&SUN.COM +43 + 3Com + Jeremy Siegel + jzs&NSD.3Com.COM +44 + CMC + Dave Preston + ---none--- +45 + SynOptics + Sharon Chisholm + schishol&nortelnetworks.com +46 + Cheyenne Software + Reijane Huai + sibal&CSD2.NYU.EDU +47 + Prime Computer + Mike Spina + WIZARD%enr.prime.com&RELAY.CS.NET +48 + MCNC/North Carolina + Data Network Ken Whitfield + ken&MCNC.ORG +49 + Chippcom + John Cook + cook&chipcom.com +50 + Optical Data Systems + Josh Fielk + ---none--- +51 + gated + Sue Hares + snmp&nexthop.com +52 + Enterasys Networks Inc. + Charles N. McTague + cmctague&enterasys.com +53 + Apollo Computers + Jeffrey Buffun + jbuffum&APOLLO.COM +54 + DeskTalk Systems, Inc. + David Kaufman + ---none--- +55 + SSDS + Ron Strich + ---none--- +56 + Castle Rock Computing + John Sancho + ---none--- +57 + MIPS Computer Systems + Charles Marker II + marker&MIPS.COM +58 + TGV, Inc. + Ken Adelman + Adelman&TGV.COM +59 + Silicon Graphics, Inc. + Michel Bourget + snmp_admin&sgi.com +60 + University of British Columbia + Hari Mailvaganam + hari.mailvaganam&ubc.ca +61 + Merit + Bill Norton + wbn&MERIT.EDU +62 + NetEdge + Dave Minnich + dave_minnich&netedge.com +63 + Apple Computer, Inc. + Gary LaVoy + ianaoid&apple.com +64 + Gandalf + Henry Kaijak + ---none--- +65 + Dartmouth College + Scott Rea + Scott.Rea&dartmouth.edu +66 + David Systems + Kathryn de Graaf + degraaf&davidsys.com +67 + Reuter + Bob Zaniolo + ---none--- +68 + Cornell + Laurie Collinsworth + ljc1&cornell.edu +69 + Michael Sabo + L. Michael Sabo + michael.sabo&dbnetworks.com +70 + Locus Computing Corp. + Arthur Salazar + lcc.arthur&SEAS.UCLA.EDU +71 + NASA + Philip Posey + Philip.E.Posey&nasa.gov +72 + Retix + Alex Martin + ---none--- +73 + Boeing + John O'Meara + mib_contact&bandit.ns.cs.boeing.com +74 + AT&T + Domain Administrator + att-domains&att.com +75 + Ungermann-Bass + Didier Moretti + ---none--- +76 + Digital Analysis Corporation + Skip Koppenhaver + stubby!skip&uunet.UU.NET +77 + LAN Manager + Doug Karl + KARL-D&OSU-20.IRCC.OHIO-STATE.EDU +78 + LogMatrix Inc (formerly 'OpenService Inc.') + Greg Moberg + techsupport&logmatrix.com +79 + Fujitsu Services + Steve Atherton + steve.atherton&uk.fujitsu.com +80 + Auspex Systems, Inc + Marc D. Behr + mbehr&auspex.com +81 + Lannet Company + Efrat Ramati + ---none--- +82 + Network Computing Devices + Dave Mackie + lupine!djm&UUNET.UU.NET +83 + Raycom Systems + Bruce Willins + ---none--- +84 + Pirelli Focom Ltd. + Sam Lau + ---none--- +85 + Datability Software Systems + Larry Fischer + lfischer&dss.com +86 + Network Application Technology + Jim Kinder + jkinder&nat.com +87 + Institute of Telematics, Karlsruhe Institute of Technology (KIT) + Roland Bless + roland.bless&kit.edu +88 + New York University + Jimmy Kyriannis + jimmy.kyriannis&nyu.edu +89 + RND + Rina Nethaniel + ---none--- +90 + InterCon Systems Corporation + Amanda Walker + AMANDA&INTERCON.COM +91 + Coral Network Corporation + Jason Perreault + jason&coral.com +92 + Webster Computer Corporation + Robert R. Elz + kre&munnari.oz.au +93 + Frontier Technologies Corporation + Prakash Ambegaonkar + ---none--- +94 + Nokia + Petri Piira + petri.piira&nokia.com +95 + Rockwell Automation, Inc. (formerly 'Allen-Bradley Company') + Amir S. Ansari + asansari&ra.rockwell.com +96 + CERN + Frédéric Hemmer + Frederic.Hemmer&cern.ch +97 + Sigma Network Systems, Inc. + Ken Virgile + signet!ken&xylogics.COM +98 + Emerging Technologies, Inc. + Dennis E. Baasch + etinc!dennis&uu.psi.com +99 + SNMP Research + Jeffrey Case + case&SNMP.COM +100 + Ohio State University + Shamim Ahmed + ahmed&nisca.ircc.ohio-state.edu +101 + Ultra Network Technologies Julie + Dmytryk + Julie_Dmytryk.MKT&usun.ultra.com +102 + Microcom + Josh Kitchens + jkitchensµcom.tv +103 + Lockheed Martin + David Rageth + david.a.rageth&lmco.com +104 + Micro Technology + Mike Erlinger + mike&lexcel.com +105 + Process Software Corporation + Bernie Volz + VOLZ&PROCESS.COM +106 + EMC Data General Division + Rene Fontaine + fontaine_rene&emc.com +107 + Bull Company + Alain BOUCHET + alain.bouchet&bull.net +108 + Emulex Corporation + Jeff Freeman + ---none--- +109 + Warwick University Computing Services + Israel Drori + raanan&techunix.technion.ac.il +110 + NetScout Systems, Inc. (formerly 'Network General Corporation') + Ashwani Singhal + ashwani.Singhal&netscout.com +111 + Oracle + John Priest + john.priest&oracle.com +112 + Control Data Corporation + Nelluri L. Reddy + reddy&uc.msc.umn.edu +113 + Hughes Aircraft Company + Keith McCloghrie + KZM&HLS.COM +114 + Synernetics, Inc. + Jas Parmar + jas&synnet.com +115 + Mitre + Bede McCall + bede&mitre.org +116 + Hitachi, Ltd. + Hirotaka Usuda + ---none--- +117 + Telebit + Mark S. Lewis + mlewis&telebit.com +118 + Salomon Technology Services + Paul Maurer II + ---none--- +119 + NEC Corporation + Yoshiyuki Akiyama + kddlab!ccs.mt.nec.co.jp!y-akiyam&uunet.uu.net +120 + Fibermux + Michael Sung + msung&ccrelay.fibermux.com +121 + FTP Software Inc. + Stev Knowles + stev&vax.ftp.com +122 + Sony + Takashi Hagiwara + Hagiwara&Sm.Sony.Co.Jp +123 + Newbridge Networks Corporation + James Watt + james&newbridge.com +124 + Racal-Datacom + Frank DaCosta + frank_dacosta&usa.racal.com +125 + CR SYSTEMS + Soren H. Sorensen + ---none--- +126 + DSET Corporation + Dan Shia + dset!shia&uunet.UU.NET +127 + Computone + Nick Hennenfent + nick&computone.com +128 + Tektronix, Inc. + Dennis Thomas + dennist&tektronix.TEK.COM +129 + Interactive Systems Corporation + Steve Alexander + stevea&i88.isc.com +130 + Banyan Systems Inc. + Deepak Taneja + eepak=Taneja%Eng%Banyan&Thing.banyan.com +131 + Sintrom Datanet Limited + ---none--- + ---none--- +132 + Bell Canada + Mark Fabbi + markf&gpu.utcs.utoronto.ca +133 + Olicom Enterprise Products Inc. + Claus Tondering + cto&olicom.dk +134 + Rice University + Paul Engle + pengle&rice.edu +135 + OnStream Networks + Annie Dang + annie&onstream.com +136 + Concurrent Computer Corporation + Pablo Ongini + pablo.ongini&ccur.com +137 + Basser + Paul O'Donnell + paulod&cs.su.oz.au +138 + Luxcom + ---none--- + ---none--- +139 + Artel + Jon Ziegler + Ziegler&Artel.com +140 + Independence Technologies, Inc.(ITI) + Gerard Berthet + gerard&indetech.com +141 + NetScout Systems, Inc. (formerly 'Frontier Software Development') + Ashwani Singhal + Ashwani.Singhal&netscout.com +142 + Digital Computer Limited + Osamu Fujiki + ---none--- +143 + Eyring, Inc. + Ron Holt + ron&Eyring.COM +144 + Case Communications + Andrew Saoulis + andys&casecomms.com +145 + Penril DataComm, Inc. + Keith Hogan + keith%penril&uunet.uu.net +146 + American Airlines, Inc. + Dan Glass + dan.glass&aa.com +147 + Sequent Computer Systems + Louis Fernandez + lfernandez&sequent.com +148 + Bellcore + Kaj Tesink + kaj&nvuxr.cc.bellcore.com +149 + Concord Communications + Terry Stader + tstader&concord.com +150 + University of Washington + Richard J. Letts + netops&uw.edu +151 + Develcon + Sheri Mayhew + zaphod!sherim&herald.usask.ca +152 + Solarix Systems + Paul Afshar + paul&solar1.portal.com +153 + Unifi Communications Corp. + Yigal Hochberg + yigal&unifi.com +154 + Roadnet + Dale Shelton + ---none--- +155 + Network Systems Corp. + Nadya K. El-Afandi + nadya&khara.network.com +156 + ENE (European Network Engineering) + Peter Cox + ---none--- +157 + Dansk Data Elektronik A/S + Per Bech Hansen + pbh&dde.dk +158 + Morningstar, Inc. + Ryan Johnson + ryan.johnson&morningstar.com +159 + Dupont EOP + Oscar Rodriguez + ---none--- +160 + Legato Systems, Inc. + Jon Kepecs + kepecs&Legato.COM +161 + Motorola + Joe Schaeffer + internic&motorola.com +162 + European Space Agency (ESA) + ESANIC + esanic&esa.int +163 + Aethis sa/nv + Thomas Grootaers + Thomas.Grootaers&aethis.be +164 + Rad Data Communications Ltd. + Raphael Drai + raphael_d&rad.com +165 + Tom + Thomas Lewis + thomaslewis&0xaa55.me +166 + Shiva Corporation + John Shriver + jas&shiva.com +167 + Fujikura America + Debbie Reed + ---none--- +168 + Xlnt Designs INC (XDI) + Mike Anello + mike&xlnt.com +169 + Tandem Computers + Rex Davis + ---none--- +170 + BICC + David A. Brown + fzbicdb&uk.ac.ucl +171 + D-Link Systems, Inc. + Henry P. Nagai + ---none--- +172 + AMP, Inc. + Rick Downs + ---none--- +173 + Netlink + Mauro Zallocco + ---none--- +174 + C. Itoh Electronics + Larry Davis + ---none--- +175 + Sumitomo Electric Industries (SEI) + Kent Tsuno + tsuno&sumitomo.com +176 + DHL Systems, Inc. + Veselin Terzic + vterzic&systems.dhl.com +177 + Sonus Networks, Inc. + Information Security + infosec&sonusnet.com +178 + APTEC Computer Systems + Larry Burton + ssds!larryb&uunet.UU.NET +179 + Schneider & Koch & Co, Datensysteme GmbH + Thomas Ruf + tom&rsp.de +180 + Hill Air Force Base + Russell G. Wilson + rwilson&oodis01.af.mil +181 + Kentrox + Engineering MIB Administrator + snmp&kentrox.com +182 + Japan Radio Co. + Nagayuki Kojima + nkojima&lab.nihonmusen.co.jp +183 + Versitron + Matt Harris + ---none--- +184 + Telecommunication Systems + Hugh Lockhart + ---none--- +185 + Interphase + Peter S. Wang + pwang&iphase.com +186 + Toshiba Corporation + Mike Asagami + toshiba&mothra.nts.uci.edu +187 + Clearpoint Research Corp. + Frank Kastenholz + kasten&asherah.clearpoint.com +188 + Ascom + Hector Davie + oid-admin&ascom.ch +189 + Fujitsu America + Deryk Bukowski + dbukowski&fujitsu.com +190 + NovaQuest InfoSystems + Dale Cabell + dalec&novaquest.com +191 + NCR + Tracye Lord + tracye.lord&ncr.com +192 + Dr. Materna GmbH + Torsten Beyer + tb&Materna.de +193 + Ericsson AB + David Partain + david.partain&ericsson.com +194 + Metaphor Computer Systems + Paul Rodwick + ---none--- +195 + Patriot Partners + Paul Rodwick + ---none--- +196 + The Software Group Limited (TSG) + Ragnar Paulson + tsgfred!ragnar&uunet.UU.NET +197 + Kalpana, Inc. + Anil Bhavnani + ---none--- +198 + University of Waterloo + Network Services + ns-tech&ist.uwaterloo.ca +199 + CCL/ITRI + Ming-Perng Chen + N100CMP0%TWNITRI1.BITNET&CUNYVM.CUNY.EDU +200 + Coeur Postel + Professor Kynikos + Special Consultant +201 + RYOSEI, Ltd. + Yasuma Kanamori + kanamori.yasuma&ryosys.com +202 + SMC + Lance Sprung + smcorg&meantinc.com +203 + Crescendo Communication, Inc. + Prem Jain + prem&cres.com +204 + Douglas Goodall, Consultant + Douglas Goodall + doug&goodall.com +205 + Intecom + Patrick Deloulay + pdelou&intecom.com +206 + Victoria University of Wellington + Laurie Ellims + laurie.ellims&vuw.ac.nz +207 + Allied Telesis, Inc. + Hermin Anggawijaya + hermin.anggawijaya&alliedtelesis.co.nz +208 + Cray Communications A/S + Hartvig Ekner + hj&craycom.dk +209 + Protools + Glen Arp + ---none--- +210 + NIPPON TELEGRAPH AND TELEPHONE CORPORATION + Tatsuya Miyagi + netadmin&ml.hco.ntt.co.jp +211 + Fujitsu Limited + Kei Nakata + nakata.kei&fujitsu.com +212 + Network Peripherals Inc. + Creighton Chong + cchong&fastnet.com +213 + Netronix, Inc. + Jacques Roth + ---none--- +214 + University of Wisconsin Madison + Keith Hazelton + hazelton&doit.wisc.edu +215 + NetWorth, Inc. + Craig Scott + ---none--- +216 + Tandberg Data A/S + Harald Hoeg + haho%huldra.uucp&nac.no +217 + Technically Elite Concepts, Inc. + Russell S. Dietz + Russell_Dietz&Mcimail.com +218 + Labtam Australia Pty. Ltd. + Michael Podhorodecki + michael&labtam.oz.au +219 + Republic Telcom Systems, Inc. + Steve Harris + rtsc!harris&boulder.Colorado.edu +220 + ADI Systems, Inc. + Paul Liu + ---none--- +221 + Microwave Bypass Systems, Inc. + Tad Artis + ---none--- +222 + Pyramid Technology Corp. + Richard Rein + rein&pyramid.com +223 + Unisys_Corp + Grae Crofoot + grae.crofoot&unisys.com +224 + LANOPTICS LTD., Israel + Israel Drori + raanan&techunix.technion.ac.il +225 + NKK Corporation + J. Yoshida + ---none--- +226 + CODIMA Technologies Ltd + Dave Barratt + dbarratt&codimatech.com +227 + Acals + Patrick Cheng + pcheng&dill.ind.trw.com +228 + ASTEC, Inc. + Hiroshi Fujii + fujii&astec.co.jp +229 + Delmarva Power + John K. Scoggin, Jr. + scoggin&delmarva.com +230 + Telematics International, Inc. + Kevin Smith + ---none--- +231 + Fujitsu Technology Solutions GmbH (formerly 'Fujitsu Siemens Computers') + Detlef Rothe + detlef.rothe&ts.fujitsu.com +232 + Compaq + ---none--- + ---none--- +233 + NetManage, Inc. + William Dunn + netmanage&cup.portal.com +234 + NC State University + NC State Information Technology Division + admin&ncsu.edu +235 + Empirical Tools and Technologies + Karl Auerbach + karl&empirical.com +236 + Samsung Electronics Co., LTD. + Won Jong, Yang + wjyang&samsung.com +237 + Takaoka Electric Mfg. Co., Ltd. + Hidekazu Hagiwara + hagiwara&takaoka.takaoka-electric.co.jp +238 + NxNetworks + Michael Kellen + OID.Admin&NxNetworks.com +239 + WINDATA + Bob Rosenbaum + ---none--- +240 + RC International A/S + Carl H. Dreyer + chd&rci.dk +241 + Netexp Research + Henk Boetzkes + into henk&boetzkes.org +242 + Internode Systems Pty Ltd + Simon Hackett + simon&ucs.adelaide.edu.au +243 + netCS Informationstechnik GmbH + Oliver Korfmacher + okorf&bunt.netcs.com +244 + Lantronix + Greg Wheeler + gregw&lantronix.com +245 + Avatar Consultants + Kory Hamzeh + ames!avatar.com!kory&harvard.harvard.edu +246 + Furukawa Electoric Co. Ltd. + Shoji Fukutomi + kddlab!polo.furukawa.co.jp!fuku&uunet.UU.NET +247 + ND SatCom - Gesellschaft für SatellitenkommunikationssystemembH + Rüdiger Osten + ruediger.osten&ndsatcom.de +248 + Richard Hirschmann GmbH & Co. + Heinz Nisi + mia&intsun.rus.uni-stuttgart.de +249 + G2R Inc. + Khalid Hireche + ---none--- +250 + University of Michigan + Robert Klingsten + robkli&umich.edu +251 + Netcomm, Ltd. + W.R. Maynard-Smith + ---none--- +252 + Sable Technology Corporation + Rodney Thayer + rodney&sabletech.com +253 + Xerox + Fonda Lix Pallone + Fonda_Lix_Pallone.PARC&Xerox.Com +254 + Conware Computer Consulting GmbH + Michael Sapich + sapich&conware.de +255 + Compatible Systems Corp. + John Gawf + gawf&compatible.com +256 + Scitec Communications Systems Ltd. + Stephen Lewis + ---none--- +257 + Transarc Corporation + Pat Barron + Pat_Barron&TRANSARC.COM +258 + Matsushita Electric Industrial Co., Ltd. + Nob Mizuno + mizuno&isl.mei.co.jp +259 + ACCTON Technology + Don Rooney + ---none--- +260 + Star-Tek, Inc. + Carl Madison + carl&startek.com +261 + ADC Codenoll Technology Corporation + Dutt Bulusu and Michael Coden + bulusud&worldnet.att.net +262 + Formation, Inc. + Carl Marcinik + ---none--- +263 + Seiko Instruments, Inc. + (SII) + Yasuyoshi Watanabe ---none--- +264 + RCE (Reseaux de Communication d'Entreprise S.A.) + Etienne Baudras-Chardigny + ---none--- +265 + Xenocom, Inc. + Sean Welch + welch&raven.ulowell.edu +266 + Nexans Deutschland Industries + Hubert Theissen + hubert.theissen&nexans.com +267 + Systech Computer Corporation + Eric Lighthart + eric&systech.com +268 + Visual + Brian O'Shea + bos&visual.com +269 + CSC Airline Solutions Denmark A/S + Kasper Ibsen + kibsen&csc.com +270 + Zenith Electronics Corporation + David Lin + ---none--- +271 + Telia Company AB + Joni Rapanen + cainfo&telia.fi +272 + BinTec Communications GmbH + Elisabeth Hertlein + lies&BinTec.DE +273 + EUnet Germany + Marc Sheldon + ms&Germany.EU.net +274 + PictureTel Corporation + Oliver Jones + oj&pictel.com +275 + Michigan State University + DNS Technical Support + dnstech&msu.edu +276 + GTE Government Systems - Network Management Organization + Grant Gifford + gifford_grant&nmo.gtegsc.com +277 + Cascade Communications Corp. + Chikong Shue + alpo!chi&uunet.uu.net +278 + APRESIA Systems, Ltd. (formerly 'Hitachi Cable, Ltd.') + SUGAWARA, Shingo + shingo.sugawara.gm&hitachi-metals.com +279 + Olivetti + Marco Framba + framba&orc.olivetti.com +280 + Vitacom Corporation + Parag Rastogi + parag&cup.portal.com +281 + INMOS + Graham Hudspith + gwh&inmos.co.uk +282 + AIC Systems Laboratories Ltd. + Glenn Mansfield + glenn&aic.co.jp +283 + Cameo Communications, Inc. + Alan Brind + ---none--- +284 + Diab Data AB + Mats Lindstrom + mli&diab.se +285 + Olicom A/S + Lars Povlsen + krus&olicom.dk +286 + Digital-Kienzle Computersystems + Hans Jurgen Dorr + ---none--- +287 + CSELT(Centro Studi E Laboratori Telecomunicazioni) + Paolo Coppo + coppo&cz8700.cselt.stet.it +288 + Electronic Data Systems + EDS NNAM + hostmaster&eds.com +289 + Brocade Communications Systems, Inc. (formerly 'McData Corporation') + Scott Kipp + skipp&brocade.com +290 + L3Harris Corporation + L3Harris SNMP Admins + harris-snmp&l3harris.com +291 + Technology Dynamics, Inc. + Chip Standifer + TDYNAMICS&MCIMAIL.COM +292 + DATAHOUSE Information Systems Ltd. + Kim Le + ---none--- +293 + Teltrend (NZ) Limited + Tony van der Peet + Tony.vanderPeet&teltrend.co.nz +294 + Texas Instruments + Michael Line + a0216664&ti.com +295 + PlainTree Systems Inc. + Paul Chefurka + chefurka&plntree.UUCP +296 + Hedemann Software Development + Stefan Hedemann + 100015.2504&compuserve.com +297 + FUJIFILM Business Innovation Corp. + Takatomo Wakibayashi + takatomo.wakibayashi.jt&fujifilm.com +298 + Asante Technology + Hsiang Ming Ma + ---none--- +299 + Stanford University + Bruce Vincent + bvincent&stanford.edu +300 + Digital Link + Thinh Nguyen + thinh_nguyen&dl.com +301 + Raylan Corporation + Mark S. Lewis + mlewis&telebit.com +302 + Commscraft + Len Rochford + len.rochford&commscraft.com +303 + Hughes Communications, Inc. + David Whitefield + David.Whitefield&hughes.com +304 + Farallon Computing, Inc. + Sam Roberts + sroberts&farallon.com +305 + GE Information Services + Steve Bush + sfb&ncoast.org +306 + Gambit Computer Communications + Zohar Seigal + ---none--- +307 + Livingston Enterprises, Inc. + Steve Willens + steve&livingston.com +308 + Star Technologies + Jim Miner + miner&star.com +309 + Micronics Computers Inc. + Darren Croke + dcµnics.com +310 + Basis, Inc. + Heidi Stettner + heidi&mtxinu.COM +311 + Microsoft + Paul Russell + paulrµsoft.com +312 + US West Advance Technologies + Donna Hopkins + dmhopki&uswat.uswest.com +313 + University College London + Tom Lodge + t.lodge&cs.ucl.ac.uk +314 + Eastman Kodak Company + W. James Colosky + w.colosky&kodak.com +315 + Network Resources Corporation + Kathy Weninger + ---none--- +316 + Atlas Telecom + Bruce Kropp + ktxc8!bruce&uunet.UU.NET +317 + Bridgeway + Umberto Vizcaino + ---none--- +318 + American Power Conversion Corp. + Peter C. Yoest + apc!yoest&uunet.uu.net +319 + DOE Atmospheric Radiation Measurement Project + Matt Macduff + matt.macduff&pnl.gov +320 + VerSteeg CodeWorks + Bill Versteeg + bvs&NCR.COM +321 + Verilink Corp + Bill Versteeg + bvs&NCR.COM +322 + Sybus Corportation + Mark T. Dauscher + mdauscher&sybus.com +323 + Tekelec + Sidney Antommarchi + santomm2&tekelec.com +324 + NASA Ames Research Center + Andrew Kjell Nielsen + andrew.nielsen&arc.nasa.gov +325 + Simon Fraser University + Robert Urquhart + quipu&sfu.ca +326 + Fore Systems, Inc. + Eric Cooper + ecc&fore.com +327 + Centrum Communications, Inc. + Vince Liu + ---none--- +328 + NeXT Computer, Inc. + Lennart Lovstrand + Lennart_Lovstrand&NeXT.COM +329 + Netcore, Inc. + Skip Morton + ---none--- +330 + Northwest Digital Systems + Brian Dockter + ---none--- +331 + Andrew Corporation + Ted Tran + ---none--- +332 + Digi International + Harald Remmert + harald.remmert&digi.com +333 + Computer Network Technology + Mike Morandi + mike_morandi&cnt.com +334 + Lotus Development Corp. + Bill + Flanagan bflanagan&lotus.com +335 + MICOM Communication Corporation + Donna Beatty + SYSAD&prime.micom.com +336 + ASCII Corporation + Toshiharu Ohno + tony-o&ascii.co.jp +337 + PUREDATA Research + Tony Baxter + tony&puredata.com +338 + NTT DATA + Yasuhiro Kohata + kohata&rd.nttdata.jp +339 + Siemens Industry Inc. + Bala Marimuthu + marimuthu.bala&siemens.com +340 + Kendall Square Research (KSR) + Dave Hudson + tdh&uunet.UU.NET +341 + ORNL + Gary Haney + hny&ornl.gov +342 + Network Innovations, Inc. + Pete Grillo + pete&ni2.com +343 + Intel Corporation + Adam Kaminski + adam.kaminski&intel.com +344 + Compuware Corporation + Ling Thio + ling_thio&compuware.com +345 + Epson Research Center + Richard Schneider + rschneid&epson.com +346 + Fibernet + George Sandoval + ---none--- +347 + Seagate Technology + Jon Wolfe + jon.a.wolfe&seagate.com +348 + American Express Company + Fred Gibbins + oidadmin&aexp.com +349 + Compu-Shack + Tomas Vocetka + OPLER%CSEARN.bitnet&CUNYVM.CUNY.EDU +350 + Parallan Computer, Inc. + Charles Dulin + ---none--- +351 + Stratacom + Clyde Iwamoto + cki&strata.com +352 + Open Networks Engineering, Inc. + Russ Blaesing + rrb&one.com +353 + ATM Forum + Keith McCloghrie + KZM&HLS.COM +354 + SSD Management, Inc. + Bill Rose + ---none--- +355 + Automated Network Management, Inc. + Carl Vanderbeek + ---none--- +356 + Magnalink Communications Corporation + David E. Kaufman + --none +357 + Kasten Chase Applied Research + Garry McCracken + pdxgmcc&rvax.kasten.on.ca +358 + Skyline Technology, Inc. + Don Weir + ---none--- +359 + Nu-Mega Technologies, Inc. + Patrick Klos + patrickk&numega.com +360 + Morgan Stanley & Co. International PLC + Jake Scott (PEN Managers) + oids&morganstanley.com +361 + Integrated Business Network + Michael Bell + ---none--- +362 + L & N Technologies, Ltd. + Steve Loring + ---none--- +363 + Cincinnati Bell Information Systems, Inc. + Deron Meranda + dmeranda&cbis.COM +364 + RAMA Technologies + Chris Avis + c.avis&oscomwa.com.au +365 + GM Labs, LLC. + Grayson Martin + gm&graysonmartin.net +366 + Datapoint Corporation + Lee Ziegenhals + lcz&sat.datapoint.com +367 + RICOH Co. Ltd. + Toshio Watanbe + watanabe&godzilla.rsc.spdd.ricoh.co.jp +368 + Axis Communications AB + Martin Gren + martin&axis.se +369 + Pacer Software + Wayne Tackabury + wft&pacersoft.com +370 + 3COM/Axon + Robin Iddon + Robin_Iddon&3mail.3com.com +371 + Alebra Technologies, Inc. + Harold Stevenson + harold.stevenson&alebra.com +372 + GSI + Etienne Demailly + etienne.demailly&gsi.fr +373 + Tatung Co., Ltd. + Chih-Yi Chen + TCCISM1%TWNTTIT.BITNET&pucc.Princeton.EDU +374 + DIS Research Ltd. + Ray Compton + rayc&command.com +375 + Quotron Systems, Inc. + Richard P. Stubbs + richard&atd.quotron.com +376 + Dassault Electronique + Olivier J. Caleff + caleff&dassault-elec.fr +377 + Corollary, Inc. + James L. Gula + gula&corollary.com +378 + SEEL, Ltd. + Ken Ritchie + ---none--- +379 + Lexcel + Mike Erlinger + mike&lexcel.com +380 + pier64 + Bill Parducci + bill&pier64.com +381 + OST + A. Pele + ---none--- +382 + Megadata Pty Ltd. + Andrew McRae + andrew&megadata.mega.oz.au +383 + LLNL Livermore Computer Center + Richard Mark + rmark&llnl.gov +384 + Dynatech Communications + Graham Welling + s8000!gcw&uunet.uu.net +385 + Symplex Communications Corp. + Cyrus Azar + ---none--- +386 + Tribe Computer Works + Ken Fujimoto + fuji&tribe.com +387 + Taligent, Inc. + Lorenzo Aguilar + lorenzo&taligent.com +388 + Symbol Technologies, Inc. + Carl Mower + cmower&symbol.com +389 + Lancert + Mark Hankin + ---none--- +390 + Alantec + Paul V. Fries + pvf&alantec.com +391 + Ridgeback Solutions + Errol Ginsberg + bacchus!zulu!errol&uu2.psi.com +392 + Metrix, Inc. + D. Venkatrangan + venkat&metrix.com +393 + Symantec Corporation + Paul Sangster + Paul_Sangster&Symantec.com +394 + NRL Communication Systems Branch + R.K. Nair + nair&itd.nrl.navy.mil +395 + I.D.E. Corporation + Rob Spade + ---none--- +396 + Panasonic Corporation Eco Solutions Company (formerly 'Panasonic Electric Works Co., Ltd.') + Hiroshige Nakatani + nakatani.hiroshige&jp.panasonic.com +397 + MegaPAC + Ian George + ---none--- +398 + Tyco Electronics + Dave Atkinson + dave.atkinson&tycoelectronics.com +399 + Hitachi Computer Products (America), Inc. + Masha Golosovker + masha&hicomb.hi.com +400 + METEO FRANCE + Remy Giraud + Remy.Giraud&meteo.fr +401 + PRC Inc. + Jim Noble + noble_jim&prc.com +402 + Wal-Mart Stores, Inc. + Wal-Mart Webmaster + webmaster&wal-mart.com +403 + Nissin Electric Company, Ltd. + Aki Komatsuzaki + (408) 737-0274 +404 + Distributed Support Information Standard + Mike Migliano + mike&uwm.edu +405 + SMDS Interest Group (SIG) + Elysia C. Tan + ecmt1&sword.bellcore.com +406 + SolCom Systems Ltd. + Hugh Evans + 0506 873855 +407 + Bell Atlantic + Colin deSa + socrates!bm5ld15&bagout.BELL-ATL.COM +408 + Advanced Multiuser Technologies Corporation + ---none--- + ---none--- +409 + Mitsubishi Electric Corporation + Yoshitaka Ogawa + ogawa&nkai.cow.melco.co.jp +410 + C.O.L. Systems, Inc. + Frank Castellucci + (914) 277-4312 +411 + University of Auckland + Nevil Brownlee + n.brownlee&aukuni.ac.nz +412 + Distributed Management Task Force (DMTF) + Raymond C. Williams + Raymond_Williams&tivoli.com +413 + Klever Computers, Inc.Tom Su + ---none--- + kci&netcom.com +414 + Amdahl Corporation + Steve Young + sy&uts.admahl.com +415 + JTEC Pty, Ltd. + Edward Groenendaal + eddyg&jtec.com.au +416 + Matra Communcation + Hong-Loc Nguyen + (33.1) 34.60.85.25 +417 + HAL Computer Systems + Michael A. Petonic + petonic&hal.com +418 + Lawrence Berkeley National Laboratory + Greg Haverkamp + gahaverkamp&lbl.gov +419 + Dale Computer Corporation + Dean Craven + 1-800-336-7483 +420 + University of Tuebingen + Heinrich Abele + heinrich.abele&uni-tuebingen.de +421 + Bytex Corporation + Mary Ann Burt + bytex!ws054!maryann&uunet.UU.NET +422 + Cogwheel, Inc. + Brian Ellis + bri&Cogwheel.COM +423 + Lanwan Technologies + Thomas Liu + (408) 986-8899 +424 + Thomas-Conrad Corporation + Karen Boyd + (512)-836-1935 +425 + TxPort + Bill VerSteeg + bvs&ver.com +426 + Compex, Inc. + Andrew Corlett + BDA&ORION.OAC.UCI.EDU +427 + Evergreen Systems, Inc. + Bill Grace + (415) 897-8888 +428 + HNV, Inc. + James R. Simons + jrs&denver.ssds.COM +429 + UTStarcom Incorporated + Bill Vroman + bill.vroman&utstar.com +430 + Canada Post Corporation + Walter Brown + +1 613 722-8843 +431 + Open Systems Solutions, Inc. + David Ko + davidk&ossi.com +432 + Toronto Stock Exchange + Paul Kwan + (416) 947-4284 +433 + Mamakos\TransSys Consulting + Louis A. Mamakos + louie&transsys.com +434 + EICON + Vartan Narikian + vartan&eicon.qc.ca +435 + Jupiter Systems + Russell Leefer + rml&jupiter.com +436 + SSTI + Philip Calas + (33) 61 44 19 51 +437 + Grand Junction Networks + Randy Ryals + randyr&grandjunction.com +438 + Pegasus Solutions, Inc. + John Beckner + sysadm&pegs.com +439 + Edward D. Jones and Company + John Caruso + (314) 851-3422 +440 + Amnet, Inc. + Richard Mak + mak&amnet.COM +441 + Chase Research + Kevin Gage + ---none--- +442 + BMC Software + Eugene Golovinsky + egolovin&bmc.com +443 + Gateway Communications, Inc. + Ed Fudurich + ---none--- +444 + Peregrine Systems + Eric Olinger + eric&peregrine.com +445 + Daewoo Telecom + SeeYoung Oh + oco&scorpio.dwt.co.kr +446 + Norwegian Telecom Research + Paul Hoff + paalh&brage.nta.no +447 + WilTel + David Oldham + david.oldham&wiltel.com +448 + Ericsson-Camtec + Satish Popat + ---none--- +449 + Codex + Thomas McGinty + ---none--- +450 + Basis + Heidi Stettner + heidi&mtxinu.COM +451 + AGE Logic + Syd Logan + syd&age.com +452 + INDE Electronics + Gordon Day + gday&inde.ubc.ca +453 + Isode Limited + Steve Kille + Steve.Kille&isode.com +454 + J.I. Case + Mike Oswald + mike&helios.uwsp.edu +455 + Trillium + Jeff Lawrence + j_lawrence&trillium.com +456 + Bacchus Inc. + Errol Ginsberg + bacchus!zulu!errol&uu2.psi.com +457 + MCC + Doug Rosenthal + rosenthal&mcc.com +458 + Stratus Computer + ---none--- + ---none--- +459 + Quotron + Richard P. Stubbs + richard&atd.quotron.com +460 + Beame & Whiteside + Carl Beame + beame&ns.bws.com +461 + Cellular Technical Services + Keith Gregoire + keith&celtech.com +462 + Shore Microsystems, Inc. + Gordon Elam + (309) 229-3009 +463 + Telecommunications Techniques Corp. + Brenda Hawkins + hawkinb&ttc.com +464 + DNPAP (Technical University Delft) + Jan van Oorschot + bJan.vOorschot&dnpap.et.tudelft.nl +465 + Plexcom, Inc. + Bruce Miller + (805) 522-3333 +466 + Tylink + Stavros Mohlulis + (508) 285-0033 +467 + Brookhaven Laboratory + John Bigrow + big&bnl.gov +468 + Computer Communication Systems + Gerard Laborde + Gerard.Laborde&sp1.y-net.fr +469 + Norand Corporation + Joseph Dusio + dusioj&norand.com +470 + MUX-LAP + Philippe Labrosse + 514-735-2741 +471 + Premisys Communications, Inc + Harley Frazee + harley&premisys.com +472 + Bell South Telecommunications + Johnny Walker + 205-988-7105 +473 + J. Stainsbury PLC + Steve Parker + 44-71-921-7550 +474 + Manage Operations + Jim Corrigan + corrigan&ManageOperations.net +475 + Wandel and Goltermann Technologies + ---none--- + walter&wg.com +476 + Vertiv (formerly 'Emerson Computer Power') + Phil Ulrich + Phil.Ulrich&vertivco.com +477 + Network Software Associates + Leslie Santiago + SANTIAGL&netsoft.com +478 + Procter and Gamble + Peter Marshall + 513-983-1100x5988 +479 + Meridian Technology Corporation + Kenneth B. Denson + kdenson&magic.meridiantc.com +480 + QMS, Inc. + Bill Lott + lott&imagen.com +481 + Network ExpressTom Jarema + ---none--- + ITOH&MSEN.COM +482 + LANcity Corporation + Pam Yassini + pam&lancity.com +483 + Dayna Communications, Inc. + Sanchaita Datta + datta&signus.utah.edu +484 + kn-X Ltd. + Sam Lau + 44 943 467007 +485 + Sync Research, Inc. + Alan Bartky + (714) 588-2070 +486 + PremNet + Ken Huang + HuangK&rimail.interlan.com +487 + SIAC + Peter Ripp + (212) 383-9061 +488 + New York Stock Exchange + Peter Ripp + (212) 383-9061 +489 + American Stock Exchange + Peter Ripp + (212) 383-9061 +490 + FCR Software, Inc. + Brad Parker + brad&fcr.com +491 + National Medical Care, Inc. + Robert Phelan + (617) 466-9850 +492 + DCS Dialog Communication Systems Aktiengesellschaft Berlin + Frank Rogall + fr&dcs.de +493 + NorTele + Bjorn Kvile + +47 2 48 89 90 +494 + Madge Networks, Inc. + Duncan Greatwood + dgreatwo&madge.mhs.compuserve.com +495 + Memotec Communications + Michel Turcott + turcotm&memotec.com +496 + ON + Nick Hennenfent + nicholas&cton.com +497 + Leap Technology, Inc. + George Economou + ---none--- +498 + General DataComm, Inc. + William Meltzer + meltzer&gdc.com +499 + ACE Communications, Ltd. + Danny On + 972-3-570-1423 +500 + ADP + Barry Miracle + barry_miracle&adp.com +501 + European Agency of Digital Trust (formerly 'Programa SPRITEL') + Julian Inza + julian.inza&eadtrust.eu +502 + Adacom + Aial Haorch + 972-4-899-899 +503 + Metrodata Ltd + Nick Brown + 100022.767&compuserve.com +504 + Ericsson AB - 4G5G (formerly 'Ellemtel Telecommunication Systems Laboratories') + Jack Song + jack.song&ericsson.com +505 + Arizona Public Service + Duane Booher + DBOOHER&APSC.COM +506 + NETWIZ, Ltd., + Emanuel Wind + eumzvir&techunix.technion.ac.il +507 + Science and Engineering Research Council (SERC) + Paul Kummer + P.Kummer&daresbury.ac.uk +508 + 508 Credit Suisse First Boston - Watcher + Thomas P Wood + thomas.wood&csfb.com +509 + Hadax Electronics Inc. + Marian Kramarczyk + 73477.2731&compuserve.com +510 + VTKK + Markku Lamminluoto + lamminluoto&vtkes1.vtkk.fi +511 + North Hills Israel Ltd. + Carmi Cohen + carmi&north.hellnet.org +512 + TECSIEL + R. Burlon + sr&teculx.tecsiel.it +513 + Bayerische Motoren Werke (BMW) AG + Ronny Ehrlich + Ronny.Ehrlich&bmwgroup.com +514 + CNET Technologies + Nelson Su + 408-954-8000 +515 + MCI + Jim Potter + jim.potter&mci.com +516 + Human Engineering AG (HEAG) + Urs Brunner + ubrunner&clients.switch.ch +517 + FileNet Corporation + Joe Raby + raby&filenet.com +518 + Kongsberg Gruppen ASA (formerly 'NFT-Ericsson') + Helge Andre Sundt + helge.sundt&kongsberg.com +519 + Dun & Bradstreet + Mark Sugarman + sugarmanm&dnb.com +520 + Intercomputer Communications + Brian Kean + 513-745-0500x244 +521 + Defense Intelligence Agency + Barry Atkinson + DIA-DMS&DDN-CONUS.DDN.MIL +522 + Telesystems SLW Inc. + Joe Magony + 416-441-9966 +523 + APT Communications + David Kloper + 301-831-1182 +524 + Delta Airlines + Jim Guy + 404-715-2948 +525 + California Microwave + Kevin Braun + 408-720-6520 +526 + Avid Technology Inc + Bob Craig + bob.craig&avid.com +527 + Integro Advanced Computer Systems + Pascal Turbiez + +33-20-08-00-40 +528 + RPTI + Chris Shin + 886-2-918-3006 +529 + Ascend Communications Inc. + Matthias Bannach + mbannach&ascend.com +530 + Eden Computer Systems Inc. + Louis Brando + 305-591-7752 +531 + Kawasaki-Steel Corp + Tomoo Watanabe + nrd&info.kawasaki-steel.co.jp +532 + Systems Management Infrasture, Barclays Bank PLC + Colin Walls + colin.walls&barclays.co.uk +533 + B.U.G., Inc. + Isao Tateishi + tateishi&bug.co.jp +534 + Eaton Corporation + Tom Brennan + ThomasJBrennan&eaton.com +535 + Superconducting Supercollider Lab. + Carl W. Kalbfleisch + cwk&irrational.ssc.gov +536 + Triticom + Barry Trent + btrent&triticom.com +537 + Universal Instruments Corp. + Tom Dinnel + BA06791%BINGVAXA.bitnet&CUNYVM.CUNY.EDU +538 + Information Resources, Inc. + Jeff Gear + jjg&infores.com +539 + Westell Inc. (formerly 'Kentrox') + Ken Huffman + snmp&westell.com +540 + Crypto AG + Roland Luthi + luthi&iis.ethz.ch +541 + Infinite Networks, Ltd. + Sean Harding + +44 923 710 277 +542 + Tangram Enterprise Solutions, Inc. + Steve Kuekes + skuekes&tangram.com +543 + Alebra Technologies, Inc. + Harold Stevenson + harold.stevenson&alebra.com +544 + Equinox Systems, Inc. + Monty Norwood + 1-800-275-3500 x293 +545 + Hayes Microcomputer Products + Joe Pendergrass + jpendergrass&hayes.com +546 + Empire Technologies Inc. + Cheryl Krupczak + cheryl&cc.gatech.edu +547 + Glaxochem, Ltd. + Andy Wilson + 0229 52261547 +548 + Software Professionals, Inc + Gordon Vickers + gordon&netpartners.com +549 + Agent Technology, Inc. + Ibi Dhilla + idhilla&genesis.nred.ma.us +550 + Dornier GMBH + Arens Heinrech + 49-7545-8 ext 9337 +551 + Telxon Corporation + Frank Ciotti + frankc&teleng.telxon.com +552 + Entergy Corporation + Louis Cureau + 504-364-7630 +553 + GarrettCom, Inc (formerly 'Garrett Communications') + Rajesh Kukreja + rajesh&garrettcom.com +554 + Agile Networks, Inc. + Dave Donegan + ddonegan&agile.com +555 + Larscom + Sameer Jayakar + 415-969-7572 +556 + Stock Equipment + Karl Klebenow + 216-543-6000 +557 + ITT Corporation + Kevin M. McCauley + kmm&vaxf.acdnj.itt.com +558 + Universal Data Systems, Inc. + Howard Cunningham + 70400.3671&compuserve.com +559 + Sonix Communications, Ltd. + David Webster + +44 285 641 651 +560 + Paul Freeman Associates, Inc. + Pete Wilson + pwilson&world.std.com +561 + John S. Barnes, Corp. + Michael Lynch + 704-878-4107 +562 + Northern Telecom, Ltd. + Sharon Chisholm + schishol¬elnetworks.com +563 + CAP Debris + Patrick Preuss + ppr&lfs.hamburg.cap-debris.de +564 + Telco Systems NAC + Harry Hirani + Harry&telco-nac.com +565 + Tosco Refining Co + Fred Sanderson + 510-602-4358 +566 + Russell Info Sys + Atul Desai + 714-362-4040 +567 + University of Salford + Richard Letts + R.J.Letts&salford.ac.uk +568 + NetQuest Corp. + Jerry Jacobus + netquest&tigger.jvnc.net +569 + Armon Networking Ltd. + Yigal Jacoby + yigal&armon.hellnet.org +570 + IA Corporation + Didier Fort + Didier.Fort&lia.com +571 + AU-System Communicaton AB + Torbjorn Ryding + 8-7267572 +572 + GoldStar Information & Communications, Ltd. + Soo N. Kim + ksn&giconet.gsic.co.kr +573 + SECTRA AB + Tommy Pedersen + tcp§ra.se +574 + ONEAC Corporation + Bill Elliot + ONEACWRE&AOL.COM +575 + Tree Technologies + Michael Demjanenko + (716) 688-4640 +576 + General Dynamics Mission Systems + Charlie Limoges + charlie.limoges&gd-ms.com +577 + Geneva Software, Inc. + Andy Denenberg + andyd&genevasoft.com +578 + Interlink Computer Sciences, Inc. + Fred Bohle + fab&md.interlink.com +579 + Bridge Information Systems, Inc. + Stephen Harvey + (314) 567-8482 +580 + Leeds and Northrup Australia (LNA) Nigel Cook + nigelc&lna.oz.au + ---none--- +581 + CSG Systems International (formerly 'Intec Telecom Systems') + Michael Harvey + MIchael.Harvey&csgi.com +582 + Newport Systems Solutions, Inc. + Pauline Chen + paulinec&cisco.com +583 + azel Corporation + Narender Reddy Vangati + vnr&atrium.com +584 + ROBOTIKER + Maribel Narganes + maribel&teletek.es +585 + PeerLogic Inc. + Keith Richardson + registrar&peerlogic.com +586 + Digital Transmittion Systems + Bill VerSteeg + bvs&ver.com +587 + Far Point Communications + Bill VerSteeg + bvs&ver.com +588 + Xircom + Bill VerSteeg + bvs&ver.com +589 + Mead Data Central + Stephanie Bowman + steph&meaddata.com +590 + Royal Bank of Canada + N. Lim + (416) 348-5197 +591 + Advantis, Inc. + Janet Brehm + 813 878-4298 +592 + Chemical Banking Corp. + Paul McDonnell + pmcdonnl&world.std.com +593 + Eagle Technology + Ted Haynes + (408) 441-4043 +594 + BT + Tim Oldham + tim.oldham&bt.com +595 + Radix BV + P. Groenendaal + project2&radix.nl +596 + TAINET Communication System Corp. + Joseph Chen + +886-2-6583000 (R.O.C.) +597 + Comtek Services Inc. + Steve Harris + (703) 506-9556 +598 + Fair Isaac Corporation + Cert Admin + certadmin&fairisaac.com +599 + AST Research Inc. + Bob Beard + bobb&ast.com +600 + Soft*Star s.r.l. Ing. Enrico Badella + softstar&pol88a.polito.it + ---none--- +601 + Bancomm + Joe Fontes + jwf&bancomm.com +602 + Trusted Information Systems, Inc. + James M. Galvin + galvin&tis.com +603 + Harris & Jeffries, Inc. + Deepak Shahane + deepak&hjinc.com +604 + Axel Technology Corp. + Henry Ngai + (714) 455-1688 +605 + NetTest Inc. + David Hardy + dave.hardy&nettest.com +606 + CAP debis + Patrick Preuss + +49 40 527 28 366 +607 + Lachman Technology, Inc. + Steve Alexander + stevea&lachman.com +608 + Galcom Networking Ltd. + Zeev Greenblatt + galnet&vax.trendline.co.il +609 + BAZIS + M. van Luijt + martin&bazis.nl +610 + SYNAPTEL + Eric Remond + remond&synaptel.fr +611 + Investment Management Services, Inc. + J. Laurens Troost + rens&stimpys.imsi.com +612 + Taiwan Telecommunication Lab + Dennis Tseng + LOUIS%TWNMOCTL.BITNET&pucc.Princeton.EDU +613 + Anagram Corporation + Michael Demjanenko + (716) 688-4640 +614 + Univel + John Nunneley + jnunnele&univel.com +615 + University of California, San Diego + Arthur Bierer + abierer&ucsd.edu +616 + CompuServe + Ed Isaacs, Brian Biggs + SYSADM&csi.compuserve.com +617 + Telstra - OTC Australia + Peter Hanselmann + peterhan&turin.research.otc.com.au +618 + Westinghouse Electric Corp. + Ananth Kupanna + ananth&access.digex.com +619 + DGA Ltd. + Tom L. Willis + twillis&pintu.demon.co.uk +620 + Elegant Communications Inc. + Robert Story + Robert.Story&Elegant.COM +621 + Experdata + Claude Lubin + +33 1 41 28 70 00 +622 + Unisource Business Networks Sweden AB + Goran Sterner + gsr&tip.net +623 + Molex, Inc. + Steven Joffe + molex&mcimail.com +624 + Quay Financial Software + Mick Fleming + mickf&quay.ie +625 + VMX Inc. + Joga Ryali + joga&vmxi.cerfnet.com +626 + Hypercom, Inc. + Noor Chowdhury + (602) 548-2113 +627 + University of Guelph + Kent Percival + Percival&CCS.UoGuelph.CA +628 + DIaLOGIKa + Juergen Jungfleisch + 0 68 97 9 35-0 +629 + NBASE Switch Communication + Sergiu Rotenstein + 75250.1477&compuserve.com +630 + Anchor Datacomm B.V. + Erik Snoek + sdrierik&diamond.sara.nl +631 + PACDATA + John Reed + johnr&hagar.pacdata.com +632 + University of Colorado + Evi Nemeth + evi&cs.colorado.edu +633 + Tricom Communications Limited + Robert Barrett + 0005114429&mcimail.com +634 + Santix Software GmbH + Michael Santifaller + santi%mozart&santix.guug.de +635 + Encore Networks, Inc. + Colin P. Roper + croper&encorenetworks.com +636 + Georgia Institute of Technology + George P. Burdell + gpb&oit.gatech.edu +637 + Nokia (formerly 'Alcatel-Lucent') + Mehrab Syed + Mehrab.Syed&alcatel-lucent.com +638 + GTECH + Brian Ruptash + bar>ech.com +639 + UNOCAL Corporation + Peter Ho + ho&unocal.com +640 + First Pacific Network + Randy Hamilton + 408-703-2763 +641 + Lexmark International + Don Wright + don&lexmark.com +642 + Qnix Computer + Sang Weon, Yoo + swyoo&qns.qnix.co.kr +643 + Jigsaw Software Concepts (Pty) Ltd. + Willem van Biljon + wvb&itu2.sun.ac.za +644 + Eastern Research Inc. + Nevio Poljak + npoljak&erinc.com +645 + nsgdata.com Inc + Graham C. Welling + gwelling&nsgdata.com +646 + SEIKO Communication Systems, Inc. + Lyn T. Robertson + ltr&seikotsi.com +647 + Unified Management + Andy Barnhouse + (612) 561-4944 +648 + RADLINX Ltd. + Ady Lifshes + ady%rndi&uunet.uu.net +649 + Microplex Systems Ltd. + Fred Fierling + fffµplex.com +650 + Trio Information Systems AB + Marten Karlsson + snmp&trio.se +651 + Phoenix Microsystems + Bill VerSteeg + bvs&ver.com +652 + Distributed Systems International, Inc. + Ron Mackey + rem&dsiinc.com +653 + Evolving Systems, Inc. + Maqsood Pasha + iana&evolving.com +654 + SAT GmbH + Walter Eichelburg + 100063.74&compuserve.com +655 + CeLAN Technology, Inc. + Mark Liu + 886--35-772780 +656 + Landmark Systems Corp. + Steve Sonnenberg + steves&socrates.umd.edu +657 + Netone Systems Co., Ltd. + YongKui Shao + syk&new-news.netone.co.jp +658 + Loral Data Systems + Jeff Price + jprice&cps070.lds.loral.com +659 + Cellware Broadband Technology + Michael Roth + mike&cellware.de +660 + MuSys Corporation + Gaylord Miyata + miyata&musys.com +661 + IMC Networks Corp. + Jerry Roby + (714) 724-1070 +662 + Octel Communications Corp. + Alan Newman + (408) 321-5182 +663 + RIT Technologies Ltd. + Ghiora Drori + drori&dcl.hellnet.org +664 + Adtran + Jeff Wells + 205-971-8000 +665 + Netvion, Inc. + Ray Caruso + ray.caruso&netvion.com +666 + Oki Electric Industry Co., Ltd. + Naoki Hayashi + hayashi753&oki.com +667 + Specialix International + Jeremy Rolls + jeremyr&specialix.co.uk +668 + INESC (Instituto de Engenharia de Sistemas e Computadores) + Pedro Ramalho Carlos + prc&inesc.pt +669 + Globalnet Communications + Real Barriere + (514) 651-6164 +670 + Product Line Engineer SVEC Computer Corp. + Rich Huang + msumgr&enya.cc.fcu.edu.tw +671 + Printer Systems Corp. + Bill Babson + bill&prsys.com +672 + Contec Micro Electronics USA + David Sheih + (408) 434-6767 +673 + Unix Integration Services + Chris Howard + chris&uis.com +674 + Dell Inc. + David L. Douglas + david_l_douglas&dell.com +675 + Whittaker Electronic Systems + Michael McCune + mccune&cerf.net +676 + QPSX Communications + David Pascoe + davidp&qpsx.oz.au +677 + Loral WDl + Mike Aronson + Mike_Aronson&msgate.wdl.loral.com +678 + Federal Express Corp. + Randy Hale + (901) 369-2152 +679 + E-COMMS Inc. + Harvey Teale + (206) 857-3399 +680 + Software Clearing House + Tom Caris + ca&sch.com +681 + Antlow Computers Ltd. + C. R. Bates + 44-635-871829 +682 + Emcom Corp. + Mike Swartz + emcom&cerf.net +683 + Extended Systems, Inc. + Al Youngwerth + alberty&tommy.extendsys.com +684 + Sola Electric + Mike Paulsen + (708) 439-2800 +685 + Esix Systems, Inc. + Anthony Chung + esix&esix.tony.com +686 + 3M/MMM + Tony Grosso + agrosso&mmm.com +687 + Cylink Corp. + Ed Chou + ed&cylink.com +688 + Znyx Advanced Systems Division, Inc. + Alan Deikman + aland&netcom.com +689 + Texaco, Inc. + Jeff Lin + linj&Texaco.com +690 + McCaw Cellular Communication Corp. + Tri Phan + tri.phan&mccaw.com +691 + ASP Computer Product Inc. + Elise Moss + 71053.1066&compuserve.com +692 + HiPerformance Systems + Mike Brien + +27-11-806-1000 +693 + Regionales Rechenzentrum Erlangen + Frank Tröger + verzeichnisdienst&rrze.uni-erlangen.de +694 + SAP AG + Dr. Uwe Hommel + +49 62 27 34 0 +695 + ElectroSpace System Inc. + Dr. Joseph Cleveland + e03353&esitx.esi.org +696 + Unassigned + ---none--- + ---none--- +697 + MultiPort Corporation + Reuben Sivan + rsivan&multiport.com +698 + Combinet, Inc. + Samir Sawhney + samir&combinet.com +699 + TSCC + Carl Wist + carlw&tscc.com +700 + Teleos Communications Inc. + Bill Nayavich + wln&teleoscom.com +701 + Alta Research + Jack Moyer + ian&altarsrch.com +702 + Independence Blue Cross + Bill Eshbach + esh&ibx.com +703 + ADACOM Station Interconnectivity Ltd. + Itay Kariv + +9 72 48 99 89 9 +704 + MIROR Systems + Frank Kloes + +27 12 911 0003 +705 + Merlin Gerin + Adam Stolinski + (714) 557-1637 x249 +706 + Owen-Corning Fiberglas + Tom Mann + mann.td&ocf.compuserve.com +707 + Talking Networks Inc. + Terry Braun + tab&lwt.mtxinu.com +708 + Cubix Corporation + Rebekah Marshall + (702) 883-7611 +709 + Formation Inc. + Bob Millis + bobm&formail.formation.com +710 + Lannair Ltd. + Pablo Brenner + pablo&lannet.com +711 + LightStream Corp. + Chris Chiotasso + chris&lightstream.com +712 + LANart Corp. + Doron I. Gartner + doron&lanart.com +713 + University of Stellenbosch + Andries Nieuwoudt + apcn&sun.ac.za +714 + Wyse Technology + Bill Rainey + bill&wyse.com +715 + DSC Communications Corp. + Colm Bergin + cbergin&cpdsc.com +716 + NetEc + Thomas Krichel + NetEc&netec.mcc.ac.uk +717 + Breltenbach Software Engineering GmbH + Hilmar Tuneke + tuneke&namu01.gwdg.de +718 + Victor Company of Japan,Limited + Atsushi Sakamoto + 101176.2703&compuserve.com +719 + Japan Direx Corporation + Teruo Tomiyama + +81 3 3498 5050 +720 + NECSY Network Control Systems S.p.A. Piero Fiozzo + fip&necsy.it + ---none--- +721 + ISDN Systems Corp. + Jeff Milloy + p00633&psilink.com +722 + Zero-One Technologies, Ltd. + Curt Chen + + 88 62 56 52 32 33 +723 + Radix Technologies, Inc. + Steve Giles + giless&delphi.com +724 + National Institute of Standards and Technology + Jim West + west&mgmt3.ncsl.nist.gov +725 + Digital Technology Inc. + Chris Gianattasio + gto&lanhawk.com +726 + Castelle Corp. + Waiming Mok + wmm&castelle.com +727 + Memotec Inc. + Aster Fleury + Aster.Fleury&memotec.com +728 + Showa Electric Wire & Cable Co., Ltd. + Robert O'Grady + kfn&tanuki.twics.co.jp +729 + SpectraGraphics + Jack Hinkle + hinkle&spectra.com +730 + Connectware Inc. + Rick Downs + rxd4&acsysinc.com +731 + Wind River Systems + Emily Hipp + hipp&wrs.com +732 + RADWAY International Ltd. + Doron Kolton + 0005367977&mcimail.com +733 + System Management ARTS, Inc. + Yuri Rabover + yuri&smarts.com +734 + Persoft, Inc. + Steven M. Entine + entine&pervax.persoft.com +735 + Xnet Technology Inc. + Esther Chung + estchung&xnet-tech.com +736 + Unison-Tymlabs + Dean Andrews + ada&unison.com +737 + Micro-Matic Research + Patrick Lemli + 73677.2373&compuserve.com +738 + B.A.T.M. Advance Technologies + Nahum Killim + bcrystal&actcom.co.il +739 + University of Copenhagen + Kim H|glund + shotokan&diku.dk +740 + Network Security Systems, Inc. + Carleton Smith + rpitt&nic.cerf.net +741 + JNA Telecommunications + Sean Cody + seanc&jna.com.au +742 + Encore Computer Corporation + Tony Shafer + tshafer&encore.com +743 + Central Intelligence Agency + David Wheelock + davidw&ucia.gov +744 + ISC (GB) Limited + Mike Townsend + miket&cix.compulink.co.uk +745 + Digital Communication Associates + Ravi Shankar + shankarr&dca.com +746 + CyberMedia Inc. + Unni Warrier + unni&cs.ucla.edu +747 + Distributed Systems International, Inc. + Ron Mackey + rem&dsiinc.com +748 + Peter Radig + Peter Radig + peter&radig.de +749 + Vicorp Interactive Systems + Phil Romine + phil&vis.com +750 + Inet Inc. + Bennie Lopez + brl&inetinc.com +751 + Argonne National Lab + Linda Winkler + winkler&mcs.anl.gov +752 + Teklogix + Lee Fryer-Davis + lfryerda&teklogix.com +753 + North Western University + Phil Draughon + jpd&nwu.edu +754 + Astarte Fiber Networks + James Garnett + garnett&catbelly.com +755 + Diederich & Associates, Inc. + Douglas Capitano + dlcapitano&delphi.com +756 + Florida Power Corporation + Bob England + rengland&fpc.com +757 + Ingres Corporation + Raymond Fan + ray.fan&ingres.com +758 + Open Network Enterprise + Spada Stefano + +39 39 245-8101 +759 + The Home Depot + Allen Thomas + art01&homedepot.com +760 + Pan Dacom Telekommunikations + Jens Andresen + +49 40 644 09 71 +761 + NetTek + Steve Kennedy + steve&gbnet.com +762 + Karlnet Corp. + Doug Kall + kbridge&osu.edu +763 + Efficient Networks, Inc. + Stephen Egbert + egbert&efficient.com +764 + Fiberdata + Jan Fernquist + +46 828 8383 +765 + Lanser + Emil Smilovici + (514) 485-7104 +766 + Ericsson Denmark A/S, Telebit Division + Peder Chr. Nørgaard + Peder.Chr.Norgaard&ericsson.com +767 + QoSCom + Hans Lackner + Hans.Lackner&qoscom.de +768 + Network Computing Inc. + Fredrik Noon + fnoon&ncimail.mhs.compuserve.com +769 + Walgreens Company + Denis Renaud + (708) 317-5054 (708) 818-4662 +770 + Internet Initiative Japan Inc. + Toshiharu Ohno + tony-o&iij.ad.jp +771 + GP van Niekerk Ondernemings + Gerrit van Niekerk + gvanniek&dos-lan.cs.up.ac.za +772 + Queen's University Belfast + Patrick McGleenon + p.mcgleenon&ee.queens-belfast.ac.uk +773 + Securities Industry Automation Corporation + Chiu Szeto + cszeto&prism.poly.edu +774 + SYNaPTICS + David Gray + david&synaptics.ie +775 + Data Switch Corporation + Joe Welfeld + jwelfeld&dasw.com +776 + Telindus Distribution + Karel Van den Bogaert + kava&telindus.be +777 + MAXM Systems Corporation + Gary Greathouse + ggreathouse&maxm.com +778 + Fraunhofer Gesellschaft + FRBB-Support + support&berlin.fhg.de +779 + EQS Business Services + Ken Roberts + kroberts&esq.com +780 + CNet Technology Inc. + Repus Hsiung + idps17&shts.seed.net.tw +781 + Datentechnik GmbH + Harald Jicha + +43 1 50100 1264 +782 + Network Solutions, LLC + Donald E. Bynum + dbynum&networksolutions.com +783 + Viaman Software + Vikram Duvvoori + info&viman.com +784 + Schweizerische Bankgesellschaft Zuerich + Roland Bernet + Roland.Bernet&zh014.ubs.ubs.ch +785 + University of Twente - TIOS + Aiko Pras + pras&cs.utwente.nl +786 + Simplesoft Inc. + Sudhir Pendse + sudhir&netcom.com +787 + Stony Brook, Inc. + Ken Packert + p01006&psilink.com +788 + Unified Systems Solutions, Inc. + Steven Morgenthal + smorgenthal&attmail.com +789 + Network Appliance Corporation + Brian Pawlowski + xdl-iana&netapp.com +790 + Ornet Data Communication Technologies Ltd. + Haim Kurz + haim&ornet.co.il +791 + Computer Associates International + Glenn Gianino + giagl01&usildaca.cai.com +792 + Wireless Incorporated + James Kahkoska + jkahkoska&wire-less-inc.com +793 + NYNEX Science & Technology + Lily Lau + llau&nynexst.com +794 + Commercial Link Systems + Wiljo Heinen + wiljo&freeside.cls.de +795 + Adaptec Inc. + Tom Battle + tab&lwt.mtxinu.com +796 + Softswitch + Charles Springer + cjs&ssw.com +797 + Link Technologies, Inc. + Roy Chu + royc&wyse.com +798 + IIS + Olry Rappaport + iishaifa&attmail.com +799 + Mobile Solutions Inc. + Dale Shelton + dshelton&srg.srg.af.mil +800 + Xylan Corp. + Burt Cyr + burt&xylan.com +801 + Airtech Software Forge Limited + Callum Paterson + tsf&cix.compulink.co.uk +802 + National Semiconductor + Brian Marley + brian.marley&nsc.com +803 + Video Lottery Technologies + Angelo Lovisa + ange&awd.cdc.com +804 + National Semiconductor Corp + Waychi Doo + wcd&berlioz.nsc.com +805 + Applications Management Corp + Terril (Terry) Steichen + Steichen tjs&washington.ssds.com +806 + Travelers Insurance Company + Eric Miner + ustrv67v&ibmmail.com +807 + Taiwan International Standard Electronics Ltd. + B. J. Chen + bjchen&taisel.com.tw +808 + US Patent and Trademark Office + Jimmy Orona + jimmy.orona&uspto.gov +809 + Hynet, Ltd. + Amir Fuhrmann + amf&teleop.co.il +810 + Aydin, Corp. + Rick Veher + (215) 657-8600 +811 + ADDTRON Technology Co., Ltd. + Tommy Tasi + +8 86-2-4514507 +812 + Fannie Mae + David King + s4ujdk&fnma.com +813 + MultiNET Services + Hubert Martens + martens&multinet.de +814 + GECKO mbH + Holger Dopp + hdo&gecko.de +815 + Memorex Telex + Mike Hill + hill&raleng.mtc.com +816 + Advanced Communications Networks (ACN) SA + Antoine Boss +41 38 247434 + ---none--- +817 + Telekurs AG + Thomas Blunschi + thomas.blunschi&payserv.telekurs.com +818 + IMV Victron bv + Theo Versteeg + theo&victron.nl +819 + CF6 Company + Francois Caron + +331 4696 0060 +820 + Walker Richer and Quinn Inc. + Rebecca Higgins + rebecca&elmer.wrq.com +821 + Saturn Systems + Paul Parker + paul_parker&parker.fac.cs.cmu.edu +822 + Mitsui Marine and Fire Insurance Co. Ltd. + Kijuro Ikeda +813 5389 8111 + ---none--- +823 + Loop Telecommunication International, Inc. + Charng-Show Li +886 35 787 696 + ---none--- +824 + Telenex Corporation + James Krug + (609) 866-1100 +825 + Bus-Tech, Inc. + Tyler Dunlap + dunlap&bustech.com +826 + ATRIE + Fred B.R. Tuang + cmp&fddi3.ccl.itri.org.tw +827 + Gallagher & Robertson A/S + Arild Braathen + arild&gar.no +828 + Networks Northwest, Inc. + John J. Hansen + jhansen&networksnw.com +829 + Conner Peripherials + Richard Boyd + rboyd&mailserver.conner.com +830 + Elf Antar France + P. Noblanc + +33 1 47 44 45 46 +831 + Lloyd Internetworking + Glenn McGregor + glenn&lloyd.com +832 + Datatec Industries, Inc. + Chris Wiener + cwiener&datatec.com +833 + TAICOM + Scott Tseng + cmp&fddi3.ccl.itri.org.tw +834 + Brown's Operating System Services Ltd. + Alistair Bell + alistair&browns.co.uk +835 + MiLAN Technology Corp. + Gopal Hegde + gopal&milan.com +836 + NetEdge Systems, Inc. + Dave Minnich + Dave_Minnich&netedge.com +837 + NetFrame Systems + George Mathew + george_mathew&netframe.com +838 + Xedia Corporation + Colin Kincaid + colin%madway.uucp&dmc.com +839 + Pepsi + Niraj Katwala + niraj&netcom.com +840 + Tricord Systems, Inc. + Mark Dillon + mdillon&tricord.mn.org +841 + Proxim Wireless, Inc + Cor van de Water + IANA&proxim.com +842 + Applications Plus, Inc. + ----- no contact + ---none--- +843 + Pacific Bell + Aijaz Asif + saasif&srv.PacBell.COM +844 + Scorpio Communications + Sharon Barkai + sharon&supernet.com +845 + TPS-Teleprocessing Systems + Manfred Gorr + gorr&tpscad.tps.de +846 + Technology Solutions Company + Niraj Katwala + niraj&netcom.com +847 + Computer Site Technologies + Tim Hayes + (805) 967-3494 +848 + NetPort Software + John Bartas + jbartas&sunlight.com +849 + Alon Systems + Menachem Szus + 70571.1350&compuserve.com +850 + Tripp Lite + Lawren Markle + 72170.460&compuserve.com +851 + NetComm Limited + Paul Ripamonti + paulri&msmail.netcomm.pronet.com +852 + Precision Systems, Inc.(PSI) + Fred Griffin + cheryl&empiretech.com +853 + Objective Systems Integrators + Ed Reeder + Ed.Reeder&osi.com +854 + Simpact, Inc. + Ron Tabor + rtabor&simpact.com +855 + Systems Enhancement Corporation + Steve Held + 71165.2156&compuserve.com +856 + Information Integration, Inc. + Gina Sun + iiii&netcom.com +857 + CETREL S.C. + Jacques Flesch + flesch&cetrel.lu +858 + Platinum Technology, Inc. + Theodore J. Collins III + ted.collins&vtdev.mn.org +859 + Olivetti North America + Tom Purcell + tomp&mail.spk.olivetti.com +860 + WILMA + Nikolaus Schaller + hns&ldv.e-technik.tu-muenchen.de +861 + Thomson Financial + Sam Narang + sam.narang&thomson.com +862 + Total Peripherals Inc. + Mark Ustik + (508) 393-1777 +863 + SunNetworks Consultant + John Brady + jbrady&fedeast.east.sun.com +864 + Arkhon Technologies, Inc. + Joe Wang + rkhon&nic.cerf.net +865 + Computer Sciences Corporation + Dorian Smith + dsmith33&csc.com +866 + Philips Communication d'Entreprise Claude Lubin + +331412870 00 + ---none--- +867 + Katron Technologies Inc. + Robert Kao + +88 627 991 064 +868 + Transition Engineering Inc. + Hemant Trivedi + hemant&transition.com +869 + Altos Engineering Applications, Inc. + Wes Weber or Dave Erhart + altoseng&netcom.com +870 + Nicecom Ltd. + Arik Ramon + arik&nicecom.nice.com +871 + Fiskars/Deltec + Carl Smith + (619) 291-2973 +872 + AVM GmbH + Andreas Stockmeier + stocki&avm-berlin.de +873 + Comm Vision + Richard Havens + (408) 923 0301 x22 +874 + Institute for Information Industry + Peter Pan + peterpan&pdd.iii.org.tw +875 + Legent Corporation + Gary Strohm + gstrohm&legent.com +876 + Network Automation + Doug Jackson + +64 6 285 1711 +877 + EView Technology + Mike Davidson + mdavidson&eview-tech.com +878 + Coman Data Communications Ltd. + Zvi Sasson + coman&nms.cc.huji.ac.il +879 + Skattedirektoratet + Karl Olav Wroldsen + +47 2207 7162 +880 + Client-Server Technologies + Timo Metsaportti + timo&itf.fi +881 + Societe Internationale de Telecommunications Aeronautiques + Chuck Noren + chuck.noren&es.atl.sita.int +882 + Maximum Strategy Inc. + Paul Stolle + pstolle&maxstrat.com +883 + Integrated Systems, Inc. + SysAdmin + psos-net&isi.com +884 + E-Systems + Hai H. Nguyen + hai_nguyen_sy&fallschurch.esys.com +885 + RELTEC Corporation + Hung Ma + mah&reu.relteccorp.com +886 + Summa Four Inc. + Paul Nelson + (603) 625-4050 +887 + J & L Information Systems + Rex Jackson + (818) 709-1778 +888 + Forest Computer Inc. + Dave Black + dave&forest.com +889 + Palindrome Corp. + Jim Gast + jgast&palindro.mhs.compuserve.com +890 + ZyXEL Communications Corp. + Harry Chou + howie&csie.nctu.edu.tw +891 + Network Managers (UK) Ltd, + Mark D Dooley + mark&netmgrs.co.uk +892 + Sensible Office Systems Inc. + Pat Townsend + (712) 276-0034 +893 + Informix Software + Anthony Daniel + anthony&informix.com +894 + Dynatek Communications + Howard Linton + (703) 490-7205 +895 + Versalynx Corp. + Dave Fisler + (619) 536-8023 +896 + Potomac Scheduling Communications Company + David Labovitz + del&access.digex.net +897 + Sybase, Inc + David Clegg + davec&sybase.com +898 + DiviCom Inc. + Eyal Opher + eyal&divi.com +899 + Datus elektronische Informationssysteme GmbH + Hubert Mertens + marcus&datus.uucp +900 + Matrox Electronic Systems Limited + Peter Michelakis + licenseadm&matrox.com +901 + Digital Products, Inc. + Ross Dreyer + rdreyer&digprod.com +902 + Scitex Corp.Ltd. + Yoav Chalfon + yoav_h&ird.scitex.com +903 + RAD Vision + Oleg Pogorelik + radvis&vax.trendline.co.il +904 + Tran Network Systems + Bill Hamlin + billh&revco.com +905 + Scorpion Logic + Sean Harding + +09 2324 5672 +906 + Inotech Inc.Eric Jacobs + ---none--- + ejacobs&inotech.com +907 + Controlled Power Co. + Yu Chin + 76500,3160&compuserve.com +908 + ABB Inc. (formerly 'Elsag Bailey Incorporated') + Matthew Miller + Matthew.Miller&us.abb.com +909 + J.P. Morgan + Chung Szeto + szeto_chung&jpmorgan.com +910 + Clear Communications Corp. + Kurt Hall + khall&clear.com +911 + General Technology Inc. + Perry Rockwell + (407) 242-2733 +912 + Adax Inc. + Jory Gessow + jory&adax.com +913 + Mtel Technologies, Inc. + Jon Robinson + 552-3355&mcimail.com +914 + Underscore, Inc. + Joseph K. Martin + jkm&underscore.com +915 + SerComm Corp. + Ben Lin + +8 862-577-5400 +916 + Allegiance Corporation + Ray Klemmer + klemmerr&allegiance.net +917 + Tellus Technology + Ron Cimorelli + (510) 498-8500 +918 + Continuous Electron Beam Accelerator Facility + Paul Banta + banta&cebaf.gov +919 + Canoga Perkins + Margret Siska + (818) 718-6300 +920 + R.I.S Technologies + Fabrice Lacroix + +33 7884 6400 +921 + INFONEX Corp. + Kazuhiro Watanabe + kazu&infonex.co.jp +922 + WordPerfect Corp. + Douglas Eddy + eddy&wordperfect.com +923 + NRaD + Russ Carleton + roccor&netcom.com +924 + Hong Kong Telecommunications Ltd. + K. S. Luk + kar-shun.luk&pccw.com +925 + Signature Systems + Doug Goodall + goodall&crl.com +926 + Alpha Technologies, Inc. + Bill Crawford + engineering&alpha.com +927 + PairGain Technologies, Inc. + Ken Huang + kenh&pairgain.com +928 + Sonic Systems + Sudhakar Ravi + sudhakar&sonicsys.com +929 + Steinbrecher Corp. + Kary Robertson + krobertson&delphi.com +930 + Centillion Networks, Inc. + Derek Pitcher + derek&lanspd.com +931 + Network Communication Corp. + Tracy Clark + ncc!central!tracyc&netcomm.attmail.com +932 + Sysnet A.S. + Carstein Seeberg + case&sysnet.no +933 + Telecommunication Systems Lab + Gerald Maguire + maguire&it.kth.se +934 + QMI + Scott Brickner + Scott_Brickner.QMI-DEV&FIDO.qmi.mei.com +935 + Phoenixtec Power Co., Ltd. + An-Hsiang Tu + +8 862 646 3311 +936 + Hirakawa Hewtech Corp. + H. Ukaji + lde02513&niftyserve.or.jp +937 + No Wires Needed B.V. + Arnoud Zwemmer + arnoud&nwn.nl +938 + Primary Access + Kerstin Lodman + lodman&priacc.com +939 + FD Software AS + Dag Framstad + dag.framstad&fdsw.no +940 + g.a.m.s. edv-dienstleistungen + Vinzenz Grabner + zen&gams.net +941 + Nemesys Research Ltd. + Michael Dixon + mjd&nemesys.co.uk +942 + Pacific Communication Sciences, Inc. + (PSCI) + Yvonne Kammer mib-contact&pcsi.com +943 + Level One Communications, Inc. + Moshe Kochinski + moshek&level1.com +944 + Intellimon Software, LLC. + Andrew Dimmick + adimmick&home.com +945 + Accenture (formerly 'Andersen Consulting') + Greg Tilford + greg.a.tilford&accenture.com +946 + Bay Technologies Pty Ltd. + Paul Simpson + pauls&baytech.com.au +947 + Integrated Network Corp. + Daniel Joffe + wandan&integnet.com +948 + CyberPro International + Jeff Davison + jdavison&digital.net +949 + Wang Laboratories Inc. + Pete Reilley + pvr&wiis.wang.com +950 + Polaroid Corp. + Sari Germanos + sari&temerity.polaroid.com +951 + Sunrise Sierra + Gerald Olson + (510) 443-1133 +952 + Silcon Group + Bjarne Bonvang + +45 75 54 22 55 +953 + Coastcom + Peter Doleman + pdoleman&coastcom.com +954 + 4th DIMENSION SOFTWARE Ltd. + Thomas Segev/Ely Hofner + autumn&zeus.datasrv.co.il +955 + SEIKO SOLUTIONS Inc. + Toshikazu Tanaka + toshikazu.tanaka&seiko-sol.co.jp +956 + PERFORM + Pierre Germain + pgermain&perform.fr +957 + TV/COM International + Jean Tellier + (619) 675-1376 +958 + Network Integration, Inc. + Scott C. Lemon + slemon&nii.mhs.compuserve.com +959 + Sola Electric, A Unit of General Signal + Bruce Rhodes + 72360,2436&compuserve.com +960 + Gradient Technologies, Inc. + Geoff Charron + geoff&gradient.com +961 + Tokyo Electric Co., Ltd. + A. Akiyama + +81 558 76 9606 +962 + Codonics, Inc. + Joe Kulig + jjk&codonics.com +963 + Delft Technical University + Mark Schenk + m.schenk&ced.tudelft.nl +964 + Carrier Access Corp. + Technical Support + tech-support&carrieraccess.com +965 + eoncorp + Barb Wilson + wilsonb&eon.com +966 + Naval Undersea Warfare Center + Thomas L. Eilert + eilerttl&npt.nuwc.navy.mil +967 + AWA Limited + Mike Williams + +61 28 87 71 11 +968 + Distinct Corp. + Tarcisio Pedrotti + tarci&distinct.com +969 + National Technical University of Athens + Theodoros Karounos + karounos&phgasos.ntua.gr +970 + BGS Systems, Inc. + Amr Hafez + amr&bgs.com +971 + AT&T Wireless (McCaw Wireless Data) + Paul Martin + paul.martin&attws.com +972 + Bekaert + Koen De Vleeschauwer + kdv&bekaert.com +973 + Epic Data Inc. + Russ Beinder + russ.beinder&epicdata.com +974 + Prodigy Services Co. + Ed Ravin + elr&wp.prodigy.com +975 + First Pacific Networks (FPN) + Randy Hamilton + randy&fpn.com +976 + Xylink Ltd. + Bahman Rafatjoo + 100117.665&compuserve.com +977 + Relia Technologies Corp. + Fred Chen + fredc&relia1.relia.com.tw +978 + Legacy Storage Systems Inc. + James Hayes + james&lss-chq.mhs.compuserve.com +979 + Digicom, SPA + Claudio Biotti + +39 3312 0 0122 +980 + Ark Telecom + Alan DeMars + alan&arktel.com +981 + National Security Agency (NSA) + Cynthia Beighley + maedeen&romulus.ncsc.mil +982 + Southwestern Bell Corporation (AT&T) (formerly 'Southwestern Bell Corporation') + Jeremy Monroe + att-domains&att.com +983 + Virtual Design Group, Inc. + Chip Standifer + 70650.3316&compuserve.com +984 + Rhone Poulenc + Olivier Pignault + +33 1348 2 4053 +985 + Swiss Bank Corporation + Neil Todd + toddn&gb.swissbank.com +986 + ATEA N.V. + Walter van Brussel + p81710&banyan.atea.be +987 + Computer Communications Specialists, Inc. + Carolyn Zimmer + cczimmer&crl.com +988 + Object Quest, Inc. + Michael L. Kornegay + mlk&bir.com +989 + DCL System International, Ltd. + Gady Amit + gady-a&dcl-see.co.il +990 + SOLITON SYSTEMS K.K. + Masayuki Yamai + +81 33356 6091 +991 + U S Software + Richard Ames + richard&ussw.com +992 + Systems Research and Applications Corporation + Todd Herr + herrt&smtplink.sra.com +993 + University of Florida + Identity and Access Management + iam&it.ufl.edu +994 + Dantel, Inc. + John Litster + (209) 292-1111 +995 + Multi-Tech Systems, Inc. + Dale Martenson + (612) 785-3500 x519 +996 + Softlink Ltd. + Moshe Leibovitch + moshe&softlink.com +997 + ProSum + Christian Bucari + +33.1.4590.6231 +998 + March Systems Consultancy, Ltd. + Ross Wakelin + r.wakelin&march.co.uk +999 + EasyNet, Inc. + Nancy M Robinson + easynet&companycontacts.net +1000 + Internet Assigned Numbers Authority + Authority + iana&iana.org +1001 + PECO Energy Co. + Rick Rioboli + u002rdr&peco.com +1002 + United Parcel Service + Steve Pollini + spollini&ups.com +1003 + Storage Dimensions, Inc. + Michael Torhan + miketorh&xstor.com +1004 + ITV Technologies, Inc. + Jacob Chen + itv&netcom.com +1005 + TCPSI + Victor San Jose + Victor.Sanjose&sp1.y-net.es +1006 + Promptus Communications, Inc. + Paul Fredette + (401) 683-6100 +1007 + Norman Data Defense Systems + Kristian A. Bognaes + norman&norman.no +1008 + Pilot Network Services, Inc. + Rob Carrade + carrade&pilot.net +1009 + Integrated Systems Solutions Corporation + Chris Cowan + cc&austin.ibm.com +1010 + SISRO + Kamp Alexandre + 100074.344&compuserve.com +1011 + NetVantage + Kevin Bailey + speed&kaiwan.com +1012 + Marconi + Scott Mansfield + scott.mansfield&marconi.com +1013 + SURECOM + Mike S. T. Hsieh + +886.25.92232 +1014 + Royal Hong Kong Jockey Club + Edmond Lee + 100267.3660&compuserve.com +1015 + Gupta + Howard Cohen + hcohen&gupta.com +1016 + Tone Software Corporation + Neil P. Harkins + (714) 991-9460 +1017 + Opus Telecom + Pace Willisson + pace&blitz.com +1018 + Cogsys Ltd. + Ryllan Kraft + ryllan&ryllan.demon.co.uk +1019 + Komatsu, Ltd. + Akifumi Katsushima + +81 463.22.84.30 +1020 + ROI Systems, Inc + Michael Wong + (801) 942-1752 +1021 + Lightning Instrumentation SA + Mike O'Dowd + odowd&lightning.ch +1022 + TimeStep Corp. + Stephane Lacelle + slacelle&newbridge.com +1023 + INTELSAT + Jason Winans + janon.winans&intelsat.com +1024 + Network Research Corporation Japan, Ltd. + Tsukasa Ueda + 100156.2712&compuserve.com +1025 + Relational Development, Inc. + Steven Smith + rdi&ins.infonet.net +1026 + Emerald Systems, Corp. + Robert A. Evans Jr. + (619) 673-2161 x5120 +1027 + Mitel, Corp. + Andy Brezinsky + andy.brezinsky&mitel.com +1028 + Software AG + Peter Cohen + sagpc&sagus.com +1029 + MillenNet, Inc. + Manh Do + (510) 770-9390 +1030 + NK-EXA Corp. + Ken'ichi Hayami + hayami&dst.nk-exa.co.jp +1031 + BMC Software + Eugene Golovinsky + egolovin&bmc.com +1032 + StarFire Enterprises, Inc. + Kelsi Compton + kelsi&StarFire.com +1033 + Hybrid Networks, Inc. + Doug Muirhead + dougm&hybrid.com +1034 + Quantum Software GmbH + Thomas Omerzu + omerzu&quantum.de +1035 + Openvision Technologies Limited + Andrew Lockhart + alockhart&openvision.co.uk +1036 + Healthcare Communications, Inc.(HCI) + Larry Streepy + streepy&healthcare.com +1037 + SAIT Systems + Hai Dotu + +3223.7053.11 +1038 + SAGEMCOM SAS + COZZI Didier + didier.cozzi&sagemcom.com +1039 + CompuSci Inc. + John M. McNally + jmcnally&sechq.com +1040 + Aim Technology + Ganesh Rajappan + ganeshr&aim.com +1041 + CIESIN + Kalpesh Unadkat + kalpesh&ciesin.org +1042 + Systems & Technologies International + Howard Smith + ghamex&aol.com +1043 + Israeli Electric Company (IEC) Yoram Harlev + yoram&yor.iec.co.il + ---none--- +1044 + Phoenix Wireless Group, Inc. + Gregory M Buchanan + buchanan&pwgi.com +1045 + SWL + Bill Kight + wkightgrci.com +1046 + nCUBE + Greg Thompson + gregt&ncube.com +1047 + Cerner, Corp. + Dennis Avondet + (816) 221.1024 +1048 + Andersen Consulting + Mark Lindberg + mlindber&andersen.com +1049 + Windstream Communications + Rick Frey + snmp-admin&ops.windstream.net +1050 + Acer + Jay Tao + jtao&Altos.COM +1051 + Cedros + Juergen Haakert + +49.2241.9701.80 +1052 + AirAccess + Ido Ophir + 100274.365&compuserve.com +1053 + Expersoft Corporation + David Curtis + curtis&expersoft.com +1054 + Eskom + Sanjay Lakhani + h00161&duvi.eskom.co.za +1055 + SBE, Inc. + Vimal Vaidya + vimal&sbei.com +1056 + SS8 + Peter Baak + peter.baak&ss8.com +1057 + American Computer and Electronics, Corp. + Tom Abraham + tha&acec.com +1058 + Syndesis Limited + Wil Macaulay + wil&syndesis.com +1059 + Isis Distributed Systems, Inc. + ---none--- + ---none--- +1060 + Priority Call Management + Greg Schumacher + gregs&world.std.com +1061 + Koelsch & Altmann GmbH + Christian Schreyer + 100142.154&compuserve.com +1062 + WIPRO INFOTECH Ltd. + Chandrashekar Kapse + kapse&wipinfo.soft.net +1063 + Controlware + Uli Blatz + ublatz&cware.de +1064 + Mosaic Software + W.van Biljon + willem&mosaic.co.za +1065 + Canon Information Systems + Victor Villalpando + vvillalp&cisoc.canon.com +1066 + AOL Inc. + Bill Burns + oid-admin&aol.net +1067 + Whitetree Network Technologies, Inc. + Carl Yang + cyang&whitetree.com +1068 + Northrop Grumman / Xetron + System Admin + cincyit&ngc.com +1069 + Target Concepts, Inc. + Bill Price + bprice&tamu.edu +1070 + DMH Software + Yigal Hochberg + 72144.3704&compuserve.com +1071 + Innosoft International, Inc. + Jeff Allison + jeff&innosoft.com +1072 + Controlware GmbH + Adolfo Lucha + adolfo.lucha&controlware.de +1073 + Telecommunications Industry Association (TIA) Mike + Youngberg + mikey&synacom.com +1074 + Boole & Babbage + Rami Rubin + rami&boole.com +1075 + System Engineering Support, Ltd. + Vince Taylor + +44 454.614.638 +1076 + SURF + Bas Zoetekouw + Bas.Zoetekouw&surf.nl +1077 + OpenConnect Systems, Inc. + Mark Rensmeyer + mrensme&oc.com +1078 + PDTS (Process Data Technology and Systems) + Martin Gutenbrunner + admin-snmp-oid&NOSPAMpdts.at +1079 + Cornet, Inc. + Nat Kumar + (703) 658-3400 +1080 + NetStar, Inc. + John K. Renwick + jkr&netstar.com +1081 + Semaphore Communications, Corp. + Jimmy Soetarman + (408) 980-7766 +1082 + Casio Computer Co., Ltd. + Shouzo Ohdate + ohdate&casio.co.jp +1083 + CSIR + Frikkie Strecker + fstreck&marge.mikom.csir.co.za +1084 + APOGEE Communications + Olivier Caleff + caleff&apogee-com.fr +1085 + Information Management Company + Michael D. Liss + mliss&imc.com +1086 + Wordlink, Inc. + Mike Aleckson + (314) 878-1422 +1087 + PEER + Avinash S. Rao + arao&cranel.com +1088 + Telstra Corp + Craig Bateman + Craig.Bateman&team.telstra.com +1089 + Net X, Inc. + Sridhar Kodela + techsupp&netx.unicomp.net +1090 + PNC PLC + Gordon Tees + +44 716.061.200 +1091 + DanaSoft, Inc. + Michael Pierce + mpierce&danasoft.com +1092 + Yokogawa-Hewlett-Packard + Hisao Ogane + hisao&yhp.hp.com +1093 + Citem + Manfred R. Siegl + m.siegl&citem.at +1094 + Link Telecom, Ltd. + Michael Smith + michael&ska.com +1095 + Xirion bv + Frans Schippers + frans&xirion.nl +1096 + Centigram Communications, Corp. + Mike Nguyen + michael.nguyen¢igram.com +1097 + Gensym Corp. + Greg Stanley + gms&gensym.com +1098 + Apricot Computers, Ltd. + Paul Bostock + paulb&apricot.co.uk +1099 + CANAL+ + Clément Calvier + clement.calvier&canal-plus.com +1100 + Cambridge Technology Partners + Peter Wong + pwong&ctp.com +1101 + MoNet Systems, Inc. + Frank Jiang + fjiang&irvine.dlink.com +1102 + Metricom, Inc. + Harold E. Austin + austin&metricom.com +1103 + Xact, Inc + Keith Wiles + keith&iphase.com +1104 + Brave Software, Inc. + Marshall T. Rose + mrose&brave.com +1105 + NetCell Systems, Inc. + Frank Jiang + fjiang&irvine.dlink.com +1106 + Uni-QLennart Norlanderlennart.norlander&uniq.se + or + mib&uniq.se +1107 + DISA Space Systems Development Division + William Reed + reedw&cc.ims.disa.mil +1108 + INTERSOLV + Gary Greenfield + Gary_Greenfield&intersolv.com +1109 + Vela Research, Inc. + Ajoy Jain + cheryl&empiretech.com +1110 + Tetherless Access, Inc. + Richard Fox + kck&netcom.com +1111 + Magistrat Wien, AT + Michael Gsandtner + gsa&adv.magwien.gv.at +1112 + Franklin Telecom, Inc. + Mike Parkhurst + mikes&fdihq.com +1113 + EDA Instruments, Inc. + Alex Chow + alexc&eda.com +1114 + EFI Electronics, Corporation + Tim Bailey + efiups&ix.netcom.com +1115 + GMD + Ferdinand Hommes + Ferdinand.Hommes&gmd.de +1116 + Voicetek, Corp + Joe Micozzi + jam&voicetek.com +1117 + Avanti Technology, Inc. + Steve Meyer, Sr. + stevem&avanti-tech.com +1118 + ATLan LTD + Emanuel Wind + ew&actcom.co.il +1119 + Lehman Brothers + Information Security + internic&lehman.com +1120 + LAN-hopper Systems, Inc. + Jim Baugh + 76227.307&compuserve.com +1121 + Web-Systems + Cecile Mulder + web&aztec.co.za +1122 + Piller GmbH + Stephan Leschke + 100063.3642&compuserve.com +1123 + Engenio Information Technologies, Inc. + MSW Architecture team (Sean Olson) + mswarch&netapp.com +1124 + NetSpan, Corp. + Lawrence Halcomb + 214-690-8844 +1125 + Nielsen Media Research + Andrew R. Reese + reesear&msmail.dun.nielsen.com +1126 + Sterling Software + Greg Rose + Greg_Rose&sydney.sterling.com +1127 + Applied Network Technology, Inc. + Abbot Gilman + gilman&antech.com +1128 + Union Pacific Railroad + Ed Hoppe + emhoppe¬es.up.com +1129 + Tec Corporation + Tomoaki Suzuki + nab00570&niftyserve.or.jp +1130 + Datametrics Systems, Corporation Karl S. + Friedrich + friedrich&datametrics.com +1131 + Intersection Development Corporation Michael + McCrary + mikem43190&aol.com +1132 + BACS Limited, GB + Eric Bishop + eric.bishop&bacs.co.uk +1133 + Engage Communication + Peter Gibson + peterg&cruzio.com +1134 + Fastware, S.A. + Christian Berge + +33 4748 0616 +1135 + LONGSHINE Electronics Corp. + C.T. Tseng + via&tpts1.seed.net.tw +1136 + BOW Networks, Inc. + David Eastcott + david.eastcott&bownetworks.com +1137 + emotion, Inc. + Jesus Ortiz + jesus_ortiz&emotion.com +1138 + Rautaruukki steel factory, Information systems + Raine Haapasaari + rhaapasa&ratol.fi +1139 + EMC Corp + Rene Fontaine + rene.fontaine&emc.com +1140 + University of West England + Tom Johnson + tom-x&csd.uwe.ac.uk +1141 + Com21 + Randy Miyazaki + randy&com21.com +1142 + Compression Tehnologies Inc. + Paul Wilson + paul&compression.com +1143 + Buslogic Inc. + Janakiraman Gopalan + janaki&buslogic.com +1144 + Firefox Corporation + John Severs + johns&firefox.co.uk +1145 + Mercury Communications Ltd + David Renshaw + ag13&cityscape.co.uk +1146 + COMPUTER PROTOCOL MALAYSIA SDN. BHD. + Ronald Khoo + ronald&cpm.com.my +1147 + Institute for Information Industry + Shein-Tung Wu + hunter&netrd.net.tw +1148 + Pacific Electric Wire & Cable Co. Ltd. + Cheng Chen + tony&tpts1.seed.net.tw +1149 + MPR Teltech Ltd + Chris Sullivan + sullivan&mprott.ott.mpr.ca +1150 + P-COM, Inc + Joe Shiran + joesh&netcom.com +1151 + Anritsu Corporation + Manabu Usami + usami&accpd1.anritsu.co.jp +1152 + SPYRUS + Russ Housley + housley&spyrus.com +1153 + NeTpower, Inc. + Mark Davoren + markd&netpower.com +1154 + Diehl ISDN GmbH + Larry Butler + lrb&diehl.de +1155 + CARNet + Nevenko Bartolincic + Nevenko.Bartolincic&CARNet.hr +1156 + AS-TECH + Jean Pierre Joerg + +33 6770 8926 +1157 + SG2 Innovation et Produits + Pascal Donnart + bcouderc&altern.com +1158 + CellAccess Technology, Inc. + Steve Krichman + cati&netcom.com +1159 + Bureau of Meteorology + Paul Hambleton + paul.hambleton&bom.gov.au +1160 + ELTRAX + T. Max Devlin + mdevlin&eltrax.com +1161 + Thames Water Utilities Limited + Derek Manning + +44 1734 591159 +1162 + Micropolis, Corp. + Jerry Sorcsek + jerome_sorcsekµp.com +1163 + Integrated Systems Technology + William Marshall + marshall&kingcrab.nrl.navy.mil +1164 + Brite Voice Systems, Inc. + John Morrison + john.morrison&brite.com +1165 + Associated Grocer + Michael Zwarts + (206) 764-7506 +1166 + General Instrument + Fred Gotwald + fgotwald&gi.com +1167 + Stanford Telecom + Luther Edwards + ledwards&fuji.sed.stel.com +1168 + ICOM Informatique + Jean-Luc Collet + 100074,36&compuserve.com +1169 + MPX Data Systems Inc. + Bill Hayes + bhayes&mpx.com +1170 + Syntellect + Kevin Newsom + kevin&syntellect.com +1171 + Polyhedra Ltd (formerly 'Perihelion Technology Ltd') + Nigel Day + nigel.day&polyhedra.com +1172 + Shoppers Drug Mart + Ian McMaster + imcmaster&shoppersdrugmart.ca +1173 + Apollo Travel Services Judith Williams-Murphy + judyats&cscns.com + ---none--- +1174 + Time Warner Cable, Inc. + George Sarosi + george.sarosi&twcable.com +1175 + American Technology Labs Inc. + Laura Payton + (301) 695-1547 +1176 + Dow Jones & Company, Inc. + John Ruccolo + (609) 520 5505 +1177 + FRA + Per Hansson + Per.Hansson&fra.se +1178 + Equitable Life Assurance Society + Barry Rubin + 75141,1531&compuserve.com +1179 + Smith Barney Inc. + James A. LaFleur + (212) 723-3919 +1180 + Compact Data Ltd + Stephen Ades + sa&compactdata.co.uk +1181 + I.Net Communications + Stephane Appleton + +33 1607 20205 +1182 + Yamaha Corporation + Ryota Hirose + hirose&soundnet.yamaha.co.jp +1183 + Illinois State University + ISU Admin + i-admin&ilstu.edu +1184 + RADGuard Ltd. + omer karp + omer&radguard.co.il +1185 + Calypso Software Systems, Inc. + Paul J. LaFrance + lafrance&calsof.com +1186 + ACT Networks Inc. + Joseph P. Tokarski + joet&acti-ct.com +1187 + Kingston Communications + Nick Langford + +49 0127 9600016 +1188 + Incite + Susan M. Sauter + ssauter&intecom.com +1189 + VVNET, Inc. + C. M. Heard + heard&pobox.com +1190 + Ontario Hydro + Bruce A Nuclear + robc&flute.candu.aecl.ca +1191 + CS-Telecom + Bertrand Velle + bertrand.velle&csee-com.fr +1192 + ICTV + Ellen Fratzke + efratzke&ictv.com +1193 + CORE International Inc. + Bill Cloud + (407) 997-6033 +1194 + Mibs4You + David T. Perkins + dperkins&scruznet.com +1195 + ITK + Jan Elliger + jan.elliger&itk.de +1196 + Network Integrity, Inc. + Mark Fox + mfox&netint.com +1197 + BlueLine Software, Inc. + Paul K. Moyer + moyer002&gold.tc.umn.edu +1198 + Migrant Computing Services,Inc. + Gil Nardo + gil&netcom.com +1199 + Linklaters & Paines + Suheil Shahryar + sshahrya&landp.co.uk +1200 + EJV Partners, L.P. + Shean-Guang Chang + schang&ejv.com +1201 + Guardeonic Solutions Ltd. + Pearse Kennedy + pearse.kennedy&guardeonic.com +1202 + VARCOM Corporation + Prathibha Boregowda or Judy Smith + pboregowda&varcom.com or jsmith&varcom.com +1203 + Equitel + Marcelo Raseira + m.raseira.sulbbs%ttbbs&ibase.org.br +1204 + The Southern Company + George Ellenburg + gellenbr&southernco.com +1205 + Dataproducts Corporation + Ron Bergman + rbergma&dpc.com +1206 + National Electrical Manufacturers Association (NEMA) + Bruce J. Schopp + bru_schopp&nema.org +1207 + RISCmanagement, Inc. + Roger Hale + roger&riscman.com +1208 + GVC Corporation + Timon Sloane + timon&timonWare.com +1209 + timonWare Inc. + Timon Sloane + timon&timonWare.com +1210 + Capital Resources Computer Corporation + Jeff Lee + jeff&capres.com +1211 + Storage Technology Corporation + Dominique Ambach + Dominique_Ambach&stortek.com +1212 + Tadiran Telecom TTL. + Gal Ben-Yair + Gal.ben-yair&tadirantele.com +1213 + NCP + Reiner Walter + rwa&ncp.de +1214 + Operations Control Systems (OCS) + Christine Young + cyoung&ocsinc.com +1215 + The NASDAQ Stock Market Inc. + Hibbard Smith + (203) 385-4580 +1216 + Tiernan Communications, Inc. + Girish Chandran + girish&tiernan.com +1217 + Goldman, Sachs Company + Steven Polinsky + polins&gsco.com +1218 + Advanced Telecommunications Modules Ltd + William Stoye + wrs&atml.co.uk +1219 + Phoenix Data Communications + Michel Robidoux + phoenix&cam.org +1220 + Quality Consulting Services + Alan Boutelle + alanb&quality.com +1221 + MILAN + Deh-Min Wu + wu&fokus.gmd.de +1222 + Instrumental Inc. + Henry Newman + hsn&instrumental.com +1223 + Yellow Technology Services Inc. + Martin Kline + (913)344-5341 +1224 + Mier Communications Inc. + Edwin E. Mier + ed&mier.com +1225 + Cable Services Group Inc. + Jack Zhi + j.zhi&gonix.gonix.com +1226 + Forte Networks Inc. + Mark Copley + mhc&fortenet.com +1227 + American Management Systems, Inc. + Robert Lindsay + robert_lindsay&mail.amsinc.com +1228 + Choice Hotels Intl. + Robert Peters + robert&sunnet.chotel.com +1229 + SEH Computertechnik GmbH + Rainer Ellerbrake + r.ellerbrake&seh.de +1230 + McAFee Associates Inc. + Perry Smith + pcs&cc.mcafee.com +1231 + Network Intelligent Inc. + Bob Bessin + (415) 494-6473 +1232 + Luxcom Technologies, Inc. + Tony Szanto + (631) 825-3788 +1233 + ITRON Inc. + Roger Cole + rogersc&itron-ca.com +1234 + Linkage Software Inc. + Brian Kress + briank&linkage.com +1235 + Spardat AG + Wolfgang Mader + mader&telecom.at +1236 + VeriFone Inc. + Alejandro Chirife + alejandro_c1&verifone.com +1237 + Revco D.S., Inc. + Paul Winkeler + paulw&revco.com +1238 + HRB Systems, Inc. + Craig R. Watkins + crw&icf.hrb.com +1239 + Litton Fibercom + Mark Robison + robison&fibercom.com +1240 + Silex Technology America, Inc. (formerly 'XCD, Incorporated') + Lee Aydelotte + laydelotte&silexamerica.com +1241 + ProsjektLeveranser AS + Rolf Frydenberg + rolff&kinfix.no +1242 + Halcyon Inc. + Mark Notten + mnotten&swi.com +1243 + SBB + Michel Buetschi + michel.buetschi&sbb.ch +1244 + LeuTek + W. Kruck + (0711) 790067 +1245 + Zeitnet, Inc + Mario Garakani + mario.garakani&zeitnet.com +1246 + Visual Networks, Inc. + Tom Nisbet + nisbet&po.mctec.com +1247 + Coronet Systems + Ling Thio + ling_thio&compuware.com +1248 + SEIKO EPSON CORPORATION + Nagahashi Toshinori + nagahasi&hd.epson.co.jp +1249 + DnH Technologies + Aleksandar Simic + aasimic&mobility.com +1250 + Deluxe Data + Mike Clemens + mclemens&execpc.com +1251 + Michael A. Okulski Inc. + Mike Okulski + mike&okulski.com +1252 + Saber Software Corporation + David Jackson + (214) 361-8086 +1253 + Mission Systems, Inc. + Mark Lo Chiano + p00231&psilink.com +1254 + Siemens Plessey Electronics Systems + Terry Atkinson + terence.atkinson&p1.sps.co.uk +1255 + Applied Communications Inc, + Al Doney + /s=doneya/o=apcom/p=apcom.oma/admd=telemail/c=us/&sprint.com +1256 + Transaction Technology, Inc. + Bill Naylor + naylor&tti.com +1257 + HST Ltd + Ricardo Moraes Akaki + ricardo.araki&mpcbbs.ax.apc.org +1258 + Michigan Technological University Onwuka + Uchendu + ouchendu&mtu.edu +1259 + Next Level Communications + James J. Song + jsong&nlc.com +1260 + Instinet Corp. + John Funchion + funchion&instinet.com +1261 + Analog & Digital Systems Ltd. + Brijesh Patel + jay&ads.axcess.net.in +1262 + Ansaldo Trasporti SpA + Giovanni Sorrentino + mibadm&ansaldo.it +1263 + ECCI + Scott Platenberg + scottp&ecci.com +1264 + Imatek Corporation + Charlie Slater + cslater&imatek.com +1265 + PTT Telecom bv + Heine Maring + marin002&telecom.ptt.nl +1266 + Data Race, Inc. + Lee Ziegenhals + lcz&datarace.com +1267 + Network Safety Group, Inc. + Les Biffle + les&networksafety.com +1268 + Application des Techniques Nouvelles en Electronique + Michel Ricart + mricart&dialup.francenet.fr +1269 + MFS Communications Company + Steve Feldman + feldman&mfsdatanet.com +1270 + Information Services Division + Phil Draughon + jpd&is.rpslmc.edu +1271 + Ciena Corporation + Wes Jones + wjones&ciena.com +1272 + Fujitsu Nexion + Bill Anderson + anderson&nexen.com +1273 + Standard Networks, Inc + Tony Perri + tony&stdnet.com +1274 + Scientific Research Corporation + James F. Durkin + jdurkin&scires.com +1275 + micado SoftwareConsult GmbH + Markus Michels + Markus_Michels.MICADO¬es.compuserve.com +1276 + Concert Management Services, Inc. + Jim McWalters + CONCERT/RSMPO02/mcwaltj%Concert_-_Reston_1&mcimail.com +1277 + University of Delaware + Emilio Recio + emrecio&udel.edu +1278 + Bias Consultancy Ltd. + Marc Wilkinson + marc&bias.com +1279 + Micromuse Inc. + Rob Cowart + rcowartµmuse.com +1280 + Translink Systems + Richard Fleming + richard&finboro.demon.co.uk +1281 + PI-NET + Kirk Trafficante + pinet&netcom.com +1282 + Amber Wave Systems + Bruce Kling + bkling&amberwave.com +1283 + Superior Electronics Group Inc. + Bob Feather + seggroup&packet.net +1284 + Network Telemetrics Inc + Jonathan Youngman + jyoungman&telemtrx.com +1285 + BSW-Data + P.P. Stander + philip&bsw.co.za +1286 + ECI Telecom Ltd. + Yuval Ben-Haim + yuval&ecitele.com +1287 + BroadVision + Chuck Price + cprice&broadvision.com +1288 + ALFA, Inc. + Jau-yang Chen + cjy&alfa.com.tw +1289 + TELEFONICA SISTEMAS, S.A. + Enrique Le More + elemore&ts.es +1290 + Image Sciences, Inc. + Al Marmora + ajm&sail.iac.net +1291 + MITSUBISHI ELECTRIC INFORMATION NETWORK CORPORATION (MIND) + CHIKAO IMAMICHI + imamichi&mind.melco.co.jp +1292 + Central Flow Management Unit + Ramesh Rao + ramesh.rao&eurocontrol.be +1293 + Woods Hole Oceanographic Institution + Andrew R. Maffei + amaffei&whoi.edu +1294 + Raptor Systems, Inc. + Alan Kirby + akirby&raptor.com +1295 + TeleLink Technologies Inc. + Dean Neumann + dneum&telelink.com +1296 + First Virtual Corporation + K.D. Bindra + kd&fvc.com +1297 + Network Services Group + Graham King + ukking&aol.com +1298 + SilCom Manufacturing Technology Inc. + Brian Munshaw + brian.munshaw&canrem.com +1299 + NETSOFT Inc. + Tim Su + paullee&cameonet.cameo.com.tw +1300 + Fidelity Investments + AhLek Chin + ahlek.chin&fmr.com +1301 + Telrad Telecommunications + Eli Greenberg + greenberg&moon.elex.co.il +1302 + VERITAS Software Corp. + Marcus Porterfield + marcus.porterfield&veritas.com +1303 + LeeMah DataCom Security Corporation + Cedric Hui + chui&cs.umb.edu +1304 + Racemi, Inc. + Luis P Caamano + lpc&racemi.com +1305 + USAir, Inc + Loren Cain + loren&usair.com +1306 + Jet Propulsion Laboratory + Paul Springer + pls&jpl.nasa.gov +1307 + ABIT Co + Matjaz Vrecko + vrecko&abit.co.jp +1308 + Dataplex Pty. Ltd. + Warwick Freeman + wef&dataplex.com.au +1309 + Creative Interaction Technologies, Inc. + Dave Neal + daven&ashwin.com +1310 + AimNet Solutions + Bill Myerson + wmyerson&aimnetsolutions.com +1311 + Unassigned + Returned 2004-03-18 + ---none--- +1312 + Klos Technologies, Inc. + Patrick Klos + klos&klos.com +1313 + ACOTEC + Martin Streller + mst&acotec.de +1314 + Datacomm Management Sciences Inc. + Dennis Vane + 70372.2235&compuserve.com +1315 + MG-SOFT d.o.o. + Matjaz Vrecko + matjaz&mg-soft.si +1316 + Plessey Tellumat SA + Eddie Theart + etheart&plessey.co.za +1317 + PaineWebber, Inc. + Sean Coates + coates&pwj.com +1318 + DATASYS Ltd. + Michael Kodet + kodet&syscae.cz +1319 + QVC Inc. + John W. Mehl + John_Mehl&QVC.Com +1320 + IPL Systems + Kevin Fitzgerald + kdf&bu.edu +1321 + Pacific Micro Data, Inc. + Larry Sternaman + mloomis&ix.netcom.com +1322 + DeskNet Systems, Inc + Ajay Joseph + ajay&desknet.com +1323 + TC Technologies + Murray Cockerell + murrayc&tctech.com.au +1324 + Racotek, Inc. + Baruch Jamilly + (612) 832-9800 +1325 + CelsiusTech AB + Leif Amnefelt + leam&celsiustech.se +1326 + Xing Technology Corp. + Jon Walker + jwalker&xingtech.com +1327 + dZine n.v. + Dirk Ghekiere + 100273,1157&compuserve.com +1328 + Electronic merchant Services, Inc. + James B. Moore + JBM&SCEMS.COM +1329 + Linmor Information Systems Management, Inc. + Thomas Winkler + thomas.winkler&linmor.com +1330 + ABL Canada Inc. + Marc Johnston + marc.johnston&abl.ca +1331 + University of Coimbra + Fernando P. L. Boavida Fernandes + boavida&mercurio.uc.pt +1332 + Iskratel, Ltd., Telecommunications Systems + Ante Juros + juros&iskratel.si +1333 + ISA Co.,Ltd. + Koji Yanagihara + koji&isa-j.co.jp +1334 + CONNECT, Inc. + Greg Kling + greg&connectrf.com +1335 + Digital Video + Tom Georges + tom.georges&antec.com +1336 + InterVoice, Inc. + Brian Spraetz + bspraetz&intervoice.com +1337 + Liveware Tecnologia a Servico a Ltda + Fabio Minoru Tanada + tanada&lvw.ftpt.br +1338 + Precept Software, Inc. + Karl Auerbach + karl&precept.com +1339 + Heroix Corporation + Sameer J. Apte + sja&sja.heroix.com +1340 + Holland House B.V. + Johan Harmsen + johan&holhouse.nl +1341 + Dedalus Engenharia S/C Ltda + Philippe de M. Sevestre + dedalus.engenharia&dialdata.com.br +1342 + GEC ALSTHOM I.T. + Terry McCracken + terrym&nsg.com.au +1343 + Deutsches Elektronen-Synchrotron + Kars Ohrenberg + Kars.Ohrenberg&desy.de +1344 + Avotus Corporation + Ed Vannatter + ed.vannatter&avotus.com +1345 + Dacoll Ltd + Dan McDougall + dan&stonelaw.demon.co.uk +1346 + NetCorp Inc. + Yanick Dumas + Yanick&NetCorp.com +1347 + KYOCERA Corporation + Shinji Mochizuki + SUPERVISOR&KYOCERA.CCMAIL.COMPUSERVE.COM +1348 + The Longaberger Company + George Haller + 75452.376&compuserve.com +1349 + ILEX + J.Dominique GUILLEMET + dodo&ilex.remcomp.fr +1350 + Conservation Through Innovation, Limited + Doug Hibberd + dhibberd&cti-ltd.com +1351 + SeeBeyond Technology Corporation + Pete Wenzel + pete&seebeyond.com +1352 + Multex Systems, Inc. + Alex Rosin + alexr&multexsys.com +1353 + Gambit Communications, Inc. + Uwe Zimmermann + gambit>i.com +1354 + Central Data Corporation + Jeff Randall + randall&cd.com +1355 + CompuCom Systems, Inc. + Timothy J. Perna + tperna&compucom.com +1356 + Generex Systems GMBH + F.Blettenberger + 100334.1263&compuserve.com +1357 + Periphonics Corporation + John S. Muller + john&peri.com +1358 + Freddie Mac + Bruce E. Cochran + Bruce_Cochran&freddiemac.com +1359 + Digital Equipment bv + Henk van Steeg + henk.van.steeg&uto.mts.dec.com +1360 + PhoneLink plc + Nick James + Nickj&Phonelink.com +1361 + Voice-Tel Enterprises, Inc. + Jay Parekh + vnet&ix.netcom.com +1362 + AUDILOG + Laurent EYRAUD + eyraud&audilog.fr +1363 + SanRex Corporation + Carey O'Donnell + sanrex&aol.com +1364 + Chloride + Jean Phillippe Gallon + 33-1-60-82-04-04 +1365 + GA Systems Ltd + Garth Eaglesfield + geaglesfield&gasystems.co.uk +1366 + Microdyne Corporation + Evan Wheeler + wheelere&mcdy.com +1367 + Boston College + Eileen Shepard + eileen&bc.edu +1368 + Orange (formerly 'France Telecom') + Olivier Dubuisson + Olivier.Dubuisson&orange.com +1369 + Stonesoft Corp + Jukka Maki-Kullas + juke&stone.fi +1370 + A. G. Edwards & Sons, Inc. + Mike Benoist + benoisme&hqnmon1.agedwards.com +1371 + Attachmate Corp. + Brian L. Henry + brianhe&atm.com +1372 + LSI Logic + Gary Bridgewater + gjb&lsil.com +1373 + interWAVE Communications, Inc. + Bruce Nelson + bruce&iwv.com +1374 + mdl-Consult + Marc De Loore + marcd&mdl.be +1375 + Frobbit AB (formerly 'Firma PAF') + Patrik Fältström + info&frobbit.se +1376 + Nashoba Networks Inc + Rich Curran + rcurran&nashoba.com +1377 + Comedia Information AB + Rickard Schoultz + schoultz&comedia.se +1378 + Harvey Mudd College + Mike Erlinger + mike&cs.hmc.edu +1379 + First National Bank of Chicago + Mark J. Conroy + mark.conroy&fnb.sprint.com +1380 + Department of National Defence (Canada) + Larry Bonin + burke&alex.disem.dnd.ca +1381 + CBM Technologies, Inc. + George Grenley + grenley&aol.com +1382 + InterProc Inc. + Frank W. Hansen + fhansen&noghri.cycare.com +1383 + Glenayre R&D Inc. + Joseph Tosey + jtosey&glenayre.com +1384 + Telenet GmbH Kommunikationssysteme + Mr. H. Uebelacker + uebelacker&muc.telenet.de +1385 + Softlab GmbH + Martin Keller + kem&softlab.de +1386 + Storage Computer Corporation + William R. Funk, III + funk&world.std.com +1387 + CellStack Systems Ltd + Afzal Haider + ahaider&cellstack.com +1388 + Viewgate Networks + Dominic Fisk + Dominic.Fisk&viewgate.com +1389 + Simple Network Magic Corporation + Daris A Nevil + dnevil&snmc.com +1390 + Stallion Technologies Pty Ltd + Christopher Biggs + chris&stallion.oz.au +1391 + Loan System + Yann Guernion + 100135.426&compuserve.com +1392 + DLR - Deutsche Forschungsanstalt fuer Luft- und Raumfahrt e.V. + Mr. Klaus Bernhardt + klaus.bernhardt&dlr.de +1393 + ICRA, Inc. + Michael R. Wade + MWADE&ibm.com +1394 + Probita + Steve Johnson + johnson&probita.com +1395 + NEXOR Ltd + Colin Robbins + c.robbins&nexor.co.uk +1396 + American Internation Facsimile Products + Tom Denny + denny&aifp.com +1397 + Tellabs + Stuart Barr + stuart.barr&tellabs.com +1398 + DATAX + Casier Fred + 100142.2571&compuserve.com +1399 + IntelliSys Corporation + Pauline Sha + 76600.114&compuserve.com +1400 + Sandia National Laboratories + Diana Eichert + deicher&sandia.gov +1401 + Synerdyne Corp. + Dan Burns + 310-453-0404 +1402 + UNICOM Electric, Inc. + Christopher Lin + jlo&interserv.com +1403 + Central Design Systems Inc. + Bala Parthasarathy + bala&cdsi.com +1404 + The Silk Road Group, Ltd. + Tim Bass + bass&silkroad.com +1405 + Positive Computing Concepts + Russel Duncan + 100026.1001&compuserve.com +1406 + First Data Resources + Bobbi Durbin + bdurbin&marlton.1dc.com +1407 + INETCO Systems Limited + Paul A. Girone + paul_girone&inetco.com +1408 + NTT Mobile Communications Network Inc. + Hideaki Nishio + nishio&trans.nttdocomo.co.jp +1409 + Target Stores + Tim Hadden + tim_hadden&target.e-mail.com +1410 + Advanced Peripherals Technologies, Inc. + Yoshio Kurishita + kurishi&mb.tokyo.infoweb.or.jp +1411 + Juniper Networks/Funk Software + Kenneth Culbert + kculbert&juniper.net +1412 + DunsGate, a Dun and Bradstreet Company + David Willen + WILLENDC&acm.org +1413 + AFP + Christophe MONGARDIEN + mykeeper&afp.com +1414 + Communications & Power Industries, Satcom Division + TJ Boswell + Tajuana.Boswell&gd-ms.com +1415 + The Williams Companies, Inc. + Josh Garrett + CertificateAdministrator&Williams.com +1416 + ASP Technologies, Inc. + Phil Hutchinson + VantageASP&aol.com +1417 + Philips Communication Systems + Jan Maat + Jan.Maat&philips.com +1418 + Dataprobe Inc. + David Weiss + dweiss&dataprobe.com +1419 + ASTROCOM Corp. + DONALD A. LUCAS + 612-378-7800 +1420 + CSTI(Communication Systems Technology, Inc.) + Ronald P.Ward + rward&csti-md.com +1421 + Sprint + Chuck Irvine + chuck.irvine&mail.sprint.com +1422 + Syntax + Joseph A. Dudar + joe&syntax.com +1423 + LIGHT-INFOCON + Mr. Santos Farias + Katyusco&cgsoft.softex.br +1424 + Performance Technology, Inc. + Lewis Donzis + lew&perftech.com +1425 + CXR + Didier ANA + didier.ana&cxr.fr +1426 + Amir Technology Labs + Derek Palma + dpalma&atlabs.com +1427 + ISOCOR + Marcel DePaolis + marcel&isocor.com +1428 + Array Technology Corportion + Mark Schnorbeger + postmaster&arraytech.com +1429 + Scientific-Atlanta, Inc. + Tamsen Pechman + Tamsen.Pechman&SciAtl.com +1430 + GammaTech, Inc. + Benny N. Ormson + ormson&ionet.net +1431 + Telkom SA Limited + Victor Wilson + wilsonvm&telkom.co.za +1432 + CIREL SYSTEMES + Isabelle REGLEY + 100142.443&compuserve.com +1433 + Redflex Limited Australia + Eric Phan + epyl&mulga.cs.mu.oz.au +1434 + Hermes - Enterprise Messaging LTD + Shaul Marcus + shaul&hermes.co.il +1435 + Acacia Networks Inc. + Steve DesRochers + sdesrochers&acacianet.com +1436 + NATIONAL AUSTRALIA BANK Ltd. + Mr. Lindsay Hall + lindsay&nabaus.com.au +1437 + SineTec Technology Co.,Ltd. + Louis Fu + louis&rd.sinetec.com.tw +1438 + Applied Innovation Inc. + Engineering MIB Administrator + snmp&aiinet.com +1439 + Arizona State University + Hosoon Ku + Hosoon.Ku&asu.edu +1440 + Xionics Document Technologies, Inc. + Robert McComiskie + rmccomiskie&xionics.com +1441 + Southern Information System Inc. + Dr.Ruey-der Lou + idps74&shts.seed.net.tw +1442 + Nebula Consultants Inc. + Peter Schmelcher + nebula&telus.net +1443 + SITRE, SA + PEDRO CALERO RODRIGUEZ + sitre&gapd.id.es +1444 + Paradigm Technology Ltd + Roland Heymanns + roland¶digm.co.nz +1445 + Telub AB + Morgan Svensson + morgan.svensson&telub.se +1446 + Virginia Polytechnic Institute and State University + Phil Benchoff + hostmaster&vt.edu +1447 + Martis Oy + Seppo Hirviniemi + Seppo.Hirviniemi&martis.fi +1448 + ISKRA TRANSMISSION + Lado Morela + Lado.Morela&guest.arnes.si +1449 + QUALCOMM Incorporated + Frank Quick + fquick&qualcomm.com +1450 + AOL / Netscape Communications Corp. + Bill Burns + oid-admin&aol.net +1451 + BellSouth Wireless, Inc. + Chris Hamilton + hamilton.chris&bwi.bls.com +1452 + NUKO Information Systems, Inc. + Rajesh Raman + nuko&netcom.com +1453 + IPC Information Systems, Inc. + Kenneth Lockhart + lockhark&ipc.com +1454 + Estudios y Proyectos de Telecomunicacion, S.A. + Bruno Alonso Plaza + 100746.3074&compuserve.com +1455 + Winstar Wireless + Bob Hannan + bhannan&winstar.com +1456 + Terayon Corp. + Amir Fuhrmann + amir&terayon.com +1457 + CyberGuard CorporationDavid Rhein + ---none--- + David.Rhein&mail.cybg.com +1458 + AMCC + Todd Martin + todd.martin&amcc.com +1459 + Jupiter Technology, Inc. + Bill Kwan + billk&jti.com +1460 + Delphi Internet Services + Diego Cassinera + diego&newscorp.com +1461 + Kesmai Corporation + Diego Cassinera + diego&newscorp.com +1462 + Compact Devices, Inc. + John Bartas + jbartas&devices.com +1463 + OPTIQUEST + ERIK WILLEY + optiques&wdc.net +1464 + Loral Defense Systems-Eagan + Marvin Kubischta + mkubisch&eag.unisysgsg.com +1465 + OnRamp Technologies + Carl W. Kalbfleisch + cwk&onramp.net +1466 + Mark Wahl + Mark Wahl + M.Wahl&isode.com +1467 + Loran International Technologies, Inc. + David Schenkel + schenkel&loran.com +1468 + S & S International PLC + Paul Gartside + pgartside&sands.uk.com +1469 + Atlantech Technologies Ltd. + Robin A Hill + robinh&atlantec.demon.co.uk +1470 + IN-SNEC + Patrick Lamourette + fauquet&calvanet.calvacom.fr +1471 + Melita International Corporation + Bob Scott + rescott&melita.com +1472 + Sharp Laboratories of America + Randy Turner + turnerr&sharpsla.com +1473 + Groupe Decan + Nicolas lacouture + (33)78-64-31-00 +1474 + Spectronics Micro Systems Limited + David Griffiths + davidg&spectronics.co.uk +1475 + varetis COMMUNICATIONS GmbH + Alexander Osterloh + Alexander.Osterloh&varetis.de +1476 + ION Networks, Inc. + Zoran Lazarevic + Zoran.Lazarevic&ion-networks.com +1477 + Telegate GlobalAccess Technology Ltd. + Amir Wassermann + Daeg&zeus.datasrv.co.il +1478 + Merrill Lynch & Co., Inc. + Robert F. Marano + rmarano&ml.com +1479 + JCPenney Co., Inc. + Edward Cox + ecox&jcpenney.com +1480 + The Torrington Company + Robert Harwood + harwood&hydra.torrington.com +1481 + GS-ProActive + Giovanni Sciavicco + giovanni_sciavicco&yahoo.com +1482 + Synamedia + Nick Fielibert + nfielibert&synamedia.com +1483 + vortex Computersysteme GmbH + Vitus Jensen + vitus&vortex.de +1484 + DataFusion Systems (Pty) Ltd + Mr. H Dijkman + dijkman&stb.dfs.co.za +1485 + Allen & Overy + Aaron Gibbs + aaron.gibbs&AllenOvery.com +1486 + Atlantic Systems Group + Roy Nicholl + Roy.Nicholl&ASG.unb.ca +1487 + Kongsberg Informasjonskontroll AS + Paal Hoff + ph&inko.no +1488 + ELTECO a.s. + Ing. Miroslav Jergus + elteco&uvt.utc.sk +1489 + Schlumberger Limited + Matthew D. Smith + hostmaster&slb.com +1490 + CNI Communications Network International GmbH + Dr. Michael Bauer + michael.Bauer&cni.net +1491 + M&C Systems, Inc. + Seth A. Levy + mcsys&ix.netcom.com +1492 + OM Systems International (OMSI) Mats + Andersson + mats.andersson&om.se +1493 + DAVIC (Digital Audio-Visual Council) Richard + Lau + cll&nyquist.bellcore.com +1494 + ISM GmbH + Bernd Richte + brichter&ism.mhs.compuserve.com +1495 + E.F. Johnson Co. + Dan Bown + dbown&efjohnson.com +1496 + Baranof Software, Inc. + Ben Littauer + littauer&baranof.com +1497 + University of Texas Houston + William A. Weems + wweems&oac.hsc.uth.tmc.edu +1498 + Ukiah Software Solutions/EDS/HDS + Tim Landers + tlanders&hds.eds.com +1499 + STERIA + Christian Jamin + c.jamin&X400.steria.fr +1500 + ATI Australia Pty Limited + Peter Choquenot + pchoq&&jolt.mpx.com.au +1501 + The Aerospace Corporation Michael + Erlinger + erlinger&aero.org +1502 + Orckit Communications Ltd. + Nimrod Ben-Natan + nimrod&orckit.co.il +1503 + Tertio Limited + James Ho + jamesho&tertio.co.uk +1504 + Frequentis Comsoft GmbH (formerly 'Comsoft Solutions GmbH') + Frank Kulasik + frank.kulasik&frequentis.com +1505 + Power Center Software LLC + Jay Whitney + jw&powercenter.com +1506 + Technologic, Inc. + Perry Flinn + perry&tlogic.com +1507 + Vertex Data Science Limited + Norman Fern + norman_fern&htstamp.demon.co.uk +1508 + ESIGETEL + Nader Soukouti + soukouti&esigetel.fr +1509 + Illinois Business Training Center Weixiong + Ho + wxho&nastg.gsfc.nasa.gov +1510 + Arris Networks, Inc. + Eric Peterson + epeterson&casc.com +1511 + TeamQuest Corporation + Jon Hill + jdh&teamquest.com +1512 + Sentient Networks + Jeffrey Price + jprice&sentientnet.com +1513 + Skyrr hf. + Helgi Helgason + helgi.helgason&skyrr.is +1514 + Tecnologia y Gestion de la Innovacion + Manuel Lopez-Martin + mlm&tgi.es +1515 + Connector GmbH + Matthias Reinwarth + Matthias.Reinwarth&connector.de +1516 + Kaspia Systems, Inc. + Jeff Yarnell + jeffya&kaspia.com +1517 + SmithKline Beecham + Campbell White + 0181-975-3030 +1518 + NetCentric Corp. + Gilbert Benghiat + gilbert.benghiat&netcentric.com +1519 + ATecoM GmbH + Michael Joost + joost&atecom.de +1520 + Citibank Canada + Mike Rothwell + 416-941-6007 +1521 + MMS (Matra Marconi Space) + PLANCHOU Fabrice + planchou&mms.matra-espace.fr +1522 + Intermedia Communications, Inc. + Ray Tumi + RMTUMI&intermedia.com +1523 + School of Computer Science, University Science of Malaysia + Mr. Sureswaran Ramadass + sures&cs.usm.my +1524 + University of Limerick + Mr. Brian Adley + brian.adley&ul.ie +1525 + ACTANE + Jean Vincent + actane&pacwan.mm-soft.fr +1526 + Collaborative Information Technology Research Institute(CITRI) + Nam Hong Cheng + hong&catt.citri.edu.au +1527 + Intermedium A/S + Peter Shorty + intermed&inet.uni-c.dk +1528 + ANS CO+RE Systems, Inc. + Dennis Shiao + shiao&ans.net +1529 + UUNET Technologies, Inc. + Jim Potter + jim.potter&mci.com +1530 + Telesciences, Inc. + Hitesh Patel + h.patel&telesciences.com +1531 + QSC Audio Products + Ron Neely + RON_NEELY&qscaudio.com +1532 + Australian Department of Employment, Education and Training + Peter McMahon + peter_mcmahon&vnet.ibm.com +1533 + Network Media Communications Ltd. + Martin Butterworth + mb&netmc.com +1534 + Sodalia + Giovanni Cortese + cortese&sodalia.sodalia.it +1535 + Innovative Concepts, Inc. + Andy Feldstein + andy&innocon.com +1536 + Japan Computer Industry Inc. + Yuji Sasaki + kyagi&po.iijnet.or.jp +1537 + Telogy Networks, Inc. + Oren D. Eisner + oeisner&telogy.com +1538 + Merck & Company, Inc. + Timothy Chamberlin + tim_chamberlin&merck.com +1539 + GeoTel Communications Corporation + Jerry Stern + jerrys&geotel.com +1540 + Sun Alliance (UK) + Peter Lancaster + +44 1403 234437 +1541 + AG Communication Systems + Pratima Shah + shahp&agcs.com +1542 + Pivotal Networking, Inc. + Francis Huang + pivotal&netcom.com +1543 + TSI TelSys Inc. + Jay Costenbader + mib-info&tsi-telsys.com +1544 + Harmonic Systems Incorporated + Timon Sloane + timon&timonware.com +1545 + ASTRONET Corporation + Chester Brummett + cbrummet&astronet.mea.com +1546 + Frontec + Erik Steinholtz + Erik.Steinholtz&sth.frontec.se +1547 + NetVision + Anne Gowdy + gowdy&ix.netcom.com +1548 + FlowPoint Corporation + Philippe Roger + roger&flowpoint.com +1549 + Allied Data Technologies + Peter Kuiper + peter&tron.nl +1550 + Nuera Communication Inc. + Kuogee Hsieh + kgh&pcsi.cirrus.com +1551 + Radnet Ltd. + Osnat Cogan + radnet&radmail.rad.co.il +1552 + Océ Technologies BV + Rob van den Tillaart + standards&oce.com +1553 + Air France + Chantal NEU + neuch&airfrance.fr +1554 + Communications & Power Engineering, Inc. + Ken Dayton + kd&compwr.com +1555 + Charter Systems + Michael Williams + mwilliams&charter.com +1556 + Sonus Networks, Inc. + Information Security + infosec&sonusnet.com +1557 + Paragon Networks International + Joseph Welfeld + jwelfeld&howl.com +1558 + Skog-Data AS + Kent Aune + ka&skogdata.no +1559 + mitec a/s + Arne-J=F8rgen Auberg + mitec&sn.no +1560 + THOMSON-CSF / Departement Reseaux d'Entreprise + Eric BOUCHER + eric.boucher&rcc.thomson.fr +1561 + Ipsilon Networks, Inc. + Joe Wei + jwei&ipsilon.com +1562 + Kingston Technology Company + Barry Man + barry_man&kingston.com +1563 + Harmonic Lightwaves + Abi Shilon + abi&harmonic.co.il +1564 + InterActive Digital Solutions + Rajesh Raman + rraman&sgi.com +1565 + Coactive Aesthetics, Inc. + Dan Hennage + dan&coactive.com +1566 + Tech Data Corporation + Michael Brave + mbrave&techdata.com +1567 + Z-Com + Huang Hsuang Wang + hhwang¢er.zcom.com.tw +1568 + COTEP + Didier VECTEN + 100331.1626&COMPUSERVE.COM +1569 + Raytheon Company + Dean DeFreitas + pki&raytheon.com +1570 + Telesend Inc. + Craig Sharper + csharper&telesend.com +1571 + NCC + Nathan Guedalia + natig&ncc.co.il +1572 + Forte Software, Inc. + Geoff Puterbaugh + geoff&forte.com +1573 + McAfee (formerly 'Secure Computing Corporation') + Paul Meyer + Paul_Meyer&mcafee.com +1574 + BEZEQ + Tom Lorber + ltomy&dialup.netvision.net.il +1575 + TU Braunschweig + Jan-Marc Pilawa + noc&tu-bs.de +1576 + Stac Inc. + Laurence Church + lchurch&stac.com +1577 + StarNet Communications + Christopher Shepherd + belgo&winternet.com +1578 + Universidade do Minho + Dr Vasco Freitas + vf&di.uminho.pt +1579 + Department of Computer Science, University of Liverpool + Dave Shield + D.T.Shield&csc.liv.ac.uk +1580 + Tekram Technology, Ltd. + Mr. Joseph Kuo + jkuo&tekram.com.tw +1581 + RATP + Pierre MARTIN + pma&ratp.fr +1582 + Rainbow Diamond Limited + Frank O'Dwyer + fod&brd.ie +1583 + Magellan Communications, Inc + Paul Stone + paul&milestone.com +1584 + Bay Networks Incorporated + Steven P. Onishi + sonishi&BayNetworks.com +1585 + Quantitative Data Systems (QDS) + Joe R. Lindsay Jr. + JLindsay&QDS.COM +1586 + ESYS Limited + Mark Gamble + mgamble&esys.co.uk +1587 + Switched Network Technologies (SNT) + Bob Breitenstein + bstein&sntc.com +1588 + Brocade Communications Systems, Inc. + Scott Kipp + skipp&brocade.com +1589 + Computer Resources International A/S (CRI) + Ole Rieck Sorensen + ors&cri.dk +1590 + Luchtverkeersleiding Nederland + Antony Verheijen + administrative&lvnl.nl +1591 + GTIL + Yakov Roitman + yakovr&pst.co.il +1592 + XactLabs Corporation + Bill Carroll + billc&xactlabs.com +1593 + Quest Software, Inc. (formerly 'NetPro Computing, Inc.') + Iana Administrator + iana&quest.com +1594 + TELESYNC + Fred R Stearns + frs&mindspring.com +1595 + ewt breitbandnetze gmbh + Thomas Anders + thomas.anders&ewt-bn.de +1596 + INS GmbH + Andreas Frackowiak + af&ins.de +1597 + Distributed Processing Technology + Joseph A. Ballard + ballard&dpt.com +1598 + Tivoli Systems Inc. + Greg Kattawar + greg.kattawar&tivoli.com +1599 + Network Management Technologies + Mark Hammett + mhammett&nmt.com.au +1600 + SIRTI + Mr. Angelo ZUCCHETI + A.Zucchetti&sirti.it +1601 + TASKE Technology Inc. + Dennis Johns + dennis&taske.com +1602 + CANON Inc. + Masatoshi Otani + otani&cptd.canon.co.jp +1603 + Systems and Synchronous, Inc. + Dominic D. Ricci + ddr&ssinc.com +1604 + XFER International + Fred Champlain + fred&xfer.com +1605 + Scandpower A/S + Bjorn Brevig + Bjorn.Brevig&halden.scandpower.no +1606 + Consultancy & Projects Group srl + Monica Lausi + monica&cpg.it +1607 + STS Technologies, Inc. + Scott Chaney + ststech&icon-stl.net +1608 + Mylex Corporation + Vytla Chandramouli + mouli&mylex.com +1609 + CRYPTOCard Corporation + Tony Walker + tony&cryptocard.com +1610 + LXE, Inc. + Don Hall + dhh2347&lxe.com +1611 + BDM International, Inc. + John P. Crouse + jcrouse&bdm.com +1612 + Spacenet Inc. + Alan Schneider + alan.schneider&spacenet.com +1613 + Datanet GmbH + Mr. Juraj Synak + juraj&datanet.co.at +1614 + Opcom, Inc. + Terry O'Neal + opcomeng&swbell.net +1615 + Mlink Internet Inc. + Patrick Bernier + pat&4p.com +1616 + SR-Telecom Inc. + Michael A. Antonelli + michael_antonelli&srtelecom.com +1617 + Net Partners Inc. + Deepak Khosla + dkhosla&npartners.com +1618 + Peek Traffic - Transyt Corp. + Robert De Roche + Robert2161&aol.com +1619 + Comverse Information Systems + Carlo San Andres + Carlo_San_Andres&Comverse.com +1620 + Data Comm for Business, Inc. + John McCain + jmccain&dcbnet.com +1621 + CYBEC Pty. Ltd. + Jia Dong HUANG + 100240.3004&compuserve.com +1622 + Mitsui Knowledge Industry Co.,Ltd. + Daisuke Kumada + kumada&pc.mki.co.jp +1623 + Tech Laboratories, Inc. + Pierre Bergeron + pierrebergeron&techlabsinc.com +1624 + Blockade Systems Corp. + Fino Napoleone + fino.napoleone&blockade.com +1625 + Nixu Oy + Tuomas Mettanen + sysadmin-root&nixu.com +1626 + Australian Software Innovations (Services) Pty. Ltd. + Andrew Campbell + andrew&asi.oz.au +1627 + Omicron Telesystems Inc. + Martin Gadbois + mgadb&ibm.net +1628 + DEMON Internet Ltd. + Ashley Burston + ashleyb&demon.net +1629 + PB Farradyne, Inc. + Alan J. Ungar + UngarA&farradyne.com +1630 + Telos Corporation Sharon + Sutherlin + sharon.sutherlin&telos.com +1631 + Manage Information Technologies + Kim N. Le + 72124.2250&compuserve.com +1632 + Harlow Butler Broking Services Ltd. + Kevin McCarthy + +44 171 407 5555 x 5246 +1633 + Eurologic Systems Ltd + Brian Meagher + eurologic&attmail.com +1634 + Telco Research Corporation + Bizhan Ghavami + ghavami&telcores.com +1635 + Mercedes-Benz AG + Philipp Weber + philipp.wp.weber&mercedes-benz.com +1636 + HOB GmbH & Co. KG - HOB Germany + Richard Wunderlich + richard.wunderlich&hob.de +1637 + NOAA + Ken Sragg + ksragg&sao.noaa.gov +1638 + Cornerstone Software + Jennifer Parent + JenniferParent&corsof.com +1639 + Wink Communications + Dave Brubeck + dave.brubeck&wink.com +1640 + Thomson Electronic Information Resources (TEIR) + John Roberts + jroberts&teir.com +1641 + Saab Traffic Management + R. Maan + remco.maan&saabgroup.com +1642 + KPMG + Richard Ellis + richard.ellis&kpmg.co.uk +1643 + Loral Federal Systems + Mike Gulden + mgulden&lfs.loral.com +1644 + S.I.A.- Societa Interbancaria per l'Automazione + Fiorenzo Claus + claus&sia.it +1645 + United States Cellular Corp. + Tim Schaffer + USCDLISIESENTERPRISESECURITY-IdentityAccessMgmt&uscellular.com +1646 + AMPER DATOS S.A. + Angel Miguel Herrero + 34-1-8040909 +1647 + Carelcomp Forest Oy + Rauno Hujanen + Rauno.Hujanen&im.ccfo.carel.fi +1648 + Open Environment Australia + Geoff Bullen + gbullen&jarrah.com.au +1649 + Integrated Telecom Technology, Inc. + Suresh Rangachar + suresh&igt.com +1650 + Langner Gesellschaft fuer Datentechnik mbH + Heiko Bobzin + hb&langner.com +1651 + Wayne State University + Juan J. Richardson + cc5107&wayne.edu +1652 + SICC (SsangYong Information & Communications Corp.) + Mi-Young, Lee + traum&toody.sicc.co.kr +1653 + THOMSON - CSF + De Zaeytydt + 33 1 69 33 00 47 +1654 + Teleconnect Dresden GmbH + Andreas Hanßke + hana&teleconnect.de +1655 + Panorama Software Inc. + Bill Brookshire + bbrooksh&pansoft.com +1656 + CompuNet Systemhaus GmbH + Heiko Vogeler + hvo&compunet.de +1657 + JAPAN TELECOM CO.,LTD. + Seiji Kuroda + kuroda&japan-telecom.co.jp +1658 + TechForce Corporation + Mark Dauscher + mdauscher&techforce.com +1659 + Granite Systems Inc. + Michael Fine + mfine&arp.com +1660 + Bit Incorporated + Tom Alexander + talex&bitinc.com +1661 + Companhia de Informatica do Parana - Celepar + Armando Rech Filho + armando&lepus.celepar.br +1662 + Rockwell International Corporation + Mary Vogel + mcvogel&corp.rockwell.com +1663 + Ancor Communications + Bently H. Preece + benp&ancor.com +1664 + Royal Institute of Technology, Sweden (KTH) + Rickard Schoultz + staff&kth.se +1665 + SUNET, Swedish University Network + Rickard Schoultz + staff&sunet.se +1666 + Sage Instruments, Inc. + jack craig + jackc&sageinst.com +1667 + Candle Corporation + Dannis Yang + Dannis_Yang&Candle.Com +1668 + CSO GmbH + Andreas Kientopp + 100334.274&compuserve.com +1669 + M3i Systems Inc. + Louis St-Pierre + lstpier&m3isystems.qc.ca +1670 + CREDINTRANS + Pascal BON + BON&credintrans.fr +1671 + ADVA Optical Networking Ltd. + Alistair Swales + aswales&advaoptical.com +1672 + Pierce & Associates + Fred Pierce + fred&hdhntr.com +1673 + RTS Wireless + Mike Kovalenko + mikekov&rtswireless.com +1674 + R.I.C. Electronics + Andrew Philip + 102135.1051&compuserve.com +1675 + Amoco Corporation + Tim Martin + tlmartin&amoco.com +1676 + Qualix Group, Inc. + Takeshi Suganuma + tk&qualix.com +1677 + Sahara Networks, Inc. + Thomas K Johnson + johnson&saharanet.com +1678 + Hyundai Electronics Industries Co.,Ltd. + Ha-Young OH + hyoh&super5.hyundai.co.kr +1679 + RICH, Inc. + Yuri Salkinder + yuri.salkinder&richinc.com +1680 + Amati Communications Corp. + Mr. Gail Cone + gpc&amati.com +1681 + P.H.U. RysTECH + Rafal Fagas + apc&silter.silesia.ternet.pl +1682 + Data Labs Inc. + Raul Montalvo + raul.montalvo&datalabsinc.com +1683 + Occidental Petroleum Corporation + Howard D. Heerwagen + admin_oxy&oxy.com +1684 + Rijnhaave Internet Services + Thierry van Herwijnen + t.vanherwijnen&rijnhaave.net +1685 + Lynx Real-Time Systems, Inc. + Ganesan Vivekanandan + ganesan&lynx.com +1686 + QA IT Services Ltd + Eric G Smith + Eric.G.Smith&qa.com +1687 + SofTouch Systems, Inc. + Kody Mason + 405-947-8080 +1688 + Sonda S.A. + Hermann von Borries + h_vborries&sonda.cl +1689 + McCormick Nunes Company + Charles Craft + chucksea&mindspring.com +1690 + Ume E5 Universitet + Roland Hedberg + Roland.Hedberg&umdac.umu.se +1691 + NetiQ Corporation + Ching-Fa Hwang + cfh&netiq.com +1692 + Starlight Networks + Jim Nelson + jimn&starlight.com +1693 + Informacion Selectiva S.A. de C.V. ( Infosel ) + Francisco Javier Reyna Castillo + freyna&infosel.com.mx +1694 + HCL Technologies Limited + Ms. Bindu Dandapani + bindud&hcl.in +1695 + Maryville Data Systems, Inc + Bernard W. Favara + bernie.favara&maryville.com +1696 + EtherCom Corp + Nafis Ahmad + nafisðercom.com +1697 + MultiCom Software + Ari Hallikainen + ari.hallikainen&multicom.fi +1698 + BEA Systems Ltd. + Garth Eaglesfield + geaglesfield&beasys.co.uk +1699 + Advanced Technology Ltd. + Yuda Sidi + atlsidi&inter.net.il +1700 + Mobil Oil + Oscar Masters + Oscar_Masters&email.mobil.com +1701 + Arena Consulting Limited + Colin Haxton + Colin.Haxton&arena.co.nz +1702 + Netsys International (Pty) Ltd + Wayne Botha + wayne&inetsys.alt.za +1703 + Titan Information Systems Corp. + Edgar St.Pierre + edgar&titan.com +1704 + Cogent Data Technologies + Wade Andrews + wadea&cogentdata.com +1705 + Reliasoft Corporation + Chao-Li Tarng + chaoli&reliasoft.com +1706 + Midland Business Systems, Inc. + Bryan Letcher + bletcher&ccmailgw.str.com +1707 + Optimal Networks + Don Wagner + donw&optimal.com +1708 + Gresham Computing plc + Tony Churchill + tchurchill&gresham.co.uk +1709 + Leidos, Inc. (formerly 'SAIC') + John Brady (Network Information Center) + nic&net.saic.com +1710 + Acclaim Communications + Pratima Janakir + janakir&acclaiminc.com +1711 + BISS Limited + David Bird + dbird&biss.co.uk +1712 + Caravelle Inc. + Trent Antille + tech&caravelle.com +1713 + Diamond Lane Communications Corporation + Bill Hong + hong&dlcc.com +1714 + Infortrend Technology, Inc. + Michael Schnapp + michael&infortrend.com.tw +1715 + Ardatis N.V (formerly 'Orda-B N.V.') + Tom Lauwereins + tom.lauwereins&ardatis.com +1716 + Ariel Corporation + Allan Chu + allan.chu&ariel.com +1717 + Datalex Communications Ltd. + David Tracey + d_tracey&datalex.ie +1718 + Server Technology Inc. + Brian P. Auclair + brian&servertech.com +1719 + Unimax Systems Corporation + Bill Sparks + bsparks&unimax.com +1720 + DeTeMobil GmbH + Olaf Geschinske + Olaf.Geschinske&ms.DeTeMobil.de +1721 + INFONOVA GmbH + Ing. Alois Hofbauer + alois.hofbauer&infonova.telecom.at +1722 + Kudelski SA + Eric Chaubert + chaubert&nagra-kudelski.ch +1723 + Pronet GmbH + Juergen Littwin + jl&pronet.de +1724 + Westell, Inc. + Rodger D. Higgins + rhiggins&westell.com +1725 + Nupon Computing, Inc. + Tim Lee + tim&nupon.com +1726 + Cianet Ind e Com Ltda (Cianet Networking) + Norberto Dias + ndias&cianet.ind.br +1727 + Aumtech of Virginia (amteva) + Deepak Patil + dpatil&amteva.com +1728 + CheongJo data communication, Inc. + HyeonJae Choi + cyber&cdi.cheongjo.co.kr +1729 + Genesys Telecommunications Laboratories Inc.(Genesys Labs.) + Igor Neyman + igor&genesyslab.com +1730 + Progress SoftwareAndrew Neumann + aneumann&progress.com + anilv&bedford.progress.com +1731 + ERICSSON FIBER ACCESS + George Lin + gglin&rides.raynet.com +1732 + Open Access Pty Ltd + Richard Colley + richardc&oa.com.au +1733 + Sterling Commerce + Dale Moberg + dale_moberg&stercomm.com +1734 + Predictive Systems Inc. + Adam Steckelman + asteckelman&predictive.com +1735 + Architel Systems Corporation + Natalie Chew + n.chew&architel.com +1736 + QWEST NMS + Joel Lutz + joel.lutz&qwest.com +1737 + Eclipse Technologies Inc. + Alex Holland + alexh&eclipse-technologies.com +1738 + Navy + Ryan Huynh + huynhr&manta.nosc.mil +1739 + Bindi Technologies, Pty Ltd + Tim Potter + bindi&ozemail.com.au +1740 + Hallmark Cards Inc. + Kevin Leonard + kleonard&hallmark.com +1741 + Object Design, Inc. + George M. Feinberg + gmf&odi.com +1742 + Unassigned + ---none--- + ---none--- +1743 + Zenith Data Systems (ZDS) + Daniel G. Peters + dg.peters&zds.com +1744 + Gobi Corp. + Kenneth Hart + khart&cmp.com +1745 + Universitat de Barcelona + Ricard de Mingo + ricardo&ub.es +1746 + Institute for Simulation and Training (IST) + Seng Tan + stan&ist.ucf.edu +1747 + US Agency for International Development + Ken Roko + kroko&usaid.gov +1748 + Tut Systems, Inc. + Mark Miller + markm&tutsys.com +1749 + AnswerZ Pty Ltd (Australia) + Bernie Ryan + bernier&answerz.com.au +1750 + H.Bollmann Manufacturers Ltd (HBM) + Klaus Bollmann + mallen&hbmuk.com +1751 + Lucent Technologies + Richard Bantel + richard.bantel&lucent.com +1752 + phase2 networks Inc. + Jeffrey Pickering + p01152&psilink.com +1753 + Unify Corporation + Bill Bonney + sa&unify.com +1754 + Gadzoox Microsystems Inc. + Kim Banker + 408-399-4877 +1755 + Network One, Inc. + David Eison + deison&faxjet.com +1756 + MuLogic b.v. + Jos H.J. Beck + jos_beck&euronet.nl +1757 + Optical Microwave Networks, Inc. + Joe McCrate + elaw&omnisj.com +1758 + SITEL, Ltd. + Boris Jurkovic + jurkovic&iskratel.si +1759 + Cerg Finance + Philippe BONNEAU + 101605.1403&compuserve.com +1760 + American Internet Corporation + Brad Parker + brad&american.com +1761 + PLUSKOM GmbH + Norbert Zerrer + zerrer&ibm.net +1762 + Dept. of Communications, Graz University of Technology + Dipl.-Ing. Thomas Leitner + tom&finwds01.tu-graz.ac.at +1763 + EarthLink Inc. + Kihoon Jeoung + kihoonj&corp.earthlink.net +1764 + Real Soft, Inc + Rajan Desai + rajan&realsoftinc.com +1765 + Apex Voice Communications, Inc. + Osvaldo Gold + osvaldo&apexvoice.com +1766 + National DataComm Corporation + Ms. Changhua Chiang + 101400.242&compuserve.com +1767 + Telenor Conax AS + Aasmund Skomedal + Asmund.Skomedal&oslo.conax.telenor.no +1768 + Patton Electronics Company + William B. Clery III + benson&ari.net +1769 + Digital Fairway Corporation + Jan de Visser + jdevisser&digitalfairway.com +1770 + BroadBand Technologies, Inc. + Keith R. Schomburg + krs&bbt.com +1771 + Myricom + Chris Lee + clee&myri.com +1772 + DecisionOne + Doug Green + douggr&world.std.com +1773 + Tandberg Television + Olav Nybo + saxebol&sn.no +1774 + AUDITEC SA + Pascal CHEVALIER + pascalch&dialup.remcomp.fr +1775 + PC Magic + Tommy Cheng + TommyCheng&compuserve.com +1776 + Koninklijke Philips Electronics NV + Philips Intellectual Property & Standards (Section DOMAIN NAMES) + ips.domain&philips.com +1777 + ORIGIN + Bob Rossiter + Robert.M.Rossiter&nl.cis.philips.com +1778 + CSG Systems + Gordon Saltz + gsaltz&probe.net +1779 + Alphameric Technologies Ltd + Mr Tim Raby + 00034.1074&compuserve.com +1780 + NCR Austria Michael Ostendorf + Michael.Ostendorf&Austria.NCR.COM + ---none--- +1781 + ChuckK, Inc. + Chuck Koleczek + chuckk&well.net +1782 + PowerTV, Inc. + David Ma + dma&powertv.com +1783 + webMethods + Andrew Mastropietro + amastrop&webmethods.com +1784 + Enron Capitol & Trade Resources + Steven R. Lovett + slovett&ect.enron.com +1785 + ORBCOMM + Todd Hara + thara&orbcomm.net +1786 + Jw direct shop + pavel deng + ivb00285&192.72.158.10 +1787 + B.E.T.A. + Brian Mcgovern + mcgovern&spoon.beta.com +1788 + Healtheon + Marco Framba + framba&hscape.com +1789 + Integralis Ltd. + Andy Harris + Andy.Harris&Integralis.co.uk +1790 + Folio Corporation + Eric Isom + eisom&folio.com +1791 + ECTF + Joe Micozzi + jam&voicetek.com +1792 + WebPlanet + Ray Taft + Ray_Taft&webplanet.com +1793 + nStor Corporation + Bret Jones + Bret.Jones&4dmg.net +1794 + Deutsche Bahn AG + Ralf Ziegler + ralf.ziegler&bku.db.de +1795 + Paradyne + Kevin Baughman + klb&eng.paradyne.com +1796 + Nastel Technologies, Inc. + Krish Shetty + nastel&nyc.pipeline.com +1797 + Metaphase Technology, Inc. + Michael Engbrecht + Michael.Engbrecht&metaphasetech.com +1798 + Zweigart & Sawitzki + Mr. Andreas Georgii + 100316.2050&compuserve.com +1799 + PIXEL + Mauro Ieva + mieva&mbox.vol.it +1800 + WaveAccess Inc. + Yoram Feldman + yoram&waveaccess.com +1801 + The SABRE Group + Richard Buentello + richb&fastlane.net +1802 + Redland Technology Corp. + Kody Mason + kody&ionet.net +1803 + PBS + Seton R. Droppers + droppers&pbs.org +1804 + Consensus Development Corporation + Christopher Allen + consensus&consensus.com +1805 + SAGEM SA + DESRAYAUD Andre + 33 1 30 73 70 20 +1806 + I-Cube Inc. + Sundar Rajan + sundar&icube.com +1807 + INTRACOM S.A HELLENIC TELECOMMUNICATION AND ELECTRONICS INDUSTRY) + N.B Pronios + npro&intranet.gr +1808 + Aetna, Inc. + Lee Kirk + KirkL&Aetna.com +1809 + Dow Jones Markets, Inc. + Geri Cluc + geri&fx.com +1810 + Czech Railways s.o. CITJaroslav Militky + +42 2 24213223 + bernard&cit.cdrail.cz +1811 + Scan-Matic A/S + Svein Moholt+47 37 05 95 00 + svein&scanmatic.no +1812 + DECISION Europe Joel CHOTARD + (33) 51 41 41 + 89decision&calva.net +1813 + VTEL Corporation + Bill Black + bblack&vtel.com +1814 + Bloomberg, L.P. + Franko Rahimi + frahimi&bny18.bloomberg.com +1815 + Verint Systems, Inc (formerly Witness Systems, Inc) + Marc Calahan + marc.calahan&verint.com +1816 + Rose-Hulman Institute of Technology + Lans H. Carstensen + Lans.H.Carstensen&rose-hulman.edu +1817 + Aether Technologies + Mark Levy + mlevy&aethertech.com +1818 + Infonet Software SolutionsDavid Hauck + 604 436 2922 (x234) + hauck&vancouver.osiware.bc.ca +1819 + CSTI (Compagnie des Signaux / Technologies Informatiques)Mr Camille Breeus + +33 72 35 84 97 + breeus&csti.fr +1820 + LEROY MERLINRIGAULT Alain + ---none--- + lmreseau&calva.net +1821 + Total Entertainment Network + Will Coertnik + will&tenetwork.com +1822 + Open Port Technology + Jeffrey Nowland + jnowland&openport.com +1823 + Mikroelektronik Anwendungszentrum Hamburg GmbHZ + bynek Bazanowski + ba&maz-hh.de +1824 + International Management Consulting, Inc. + Mohammad Feizipour + mfeizipour&imci.net +1825 + Fore Systems, Inc. + Dan Nydick + dnydick&fore.com +1826 + MTech Systems + Timothy J. Madden + www-tmadden&aol.com +1827 + RxSoft Ltd.Timothy Madden + Timothy Madden + www-tmadden194&aol.com +1828 + Dept. Computer Studies, Loughborough University + Jon Knight + jon&net.lut.ac.uk +1829 + Beta80 S.p.A. + Flavio Gatti + attif&beta80.it +1830 + Galiso Incorporated + Lindsey Lewis + lindsey&montrose.net +1831 + S2 Systems, Inc. + Shu Dong + Shu_Dong&stratus.com +1832 + Optivision, Inc. + Ciro Aloisio Noronha Jr. + ciro&optivision.com +1833 + Countrywide Home Loans + Jon Codispoti + jon_codispoti&countrywide.com +1834 + OA Laboratory Co., Ltd. + Jun Kumakura + kumakura&oalab.co.jp +1835 + SDX Business Systems Ltd + Mike Davison + davison&sdxbsl.com +1836 + West End Systems Corp. + Paul Noseworthy + paul_noseworthy&qmail.newbridge.com +1837 + DK Digital Media + Sid Kalin + kalin&dkdigital.com +1838 + Westel + Jacob Heitz + westelws&iinet.net.au +1839 + Fujitsu Service Limited + Jara Kandhola + jara.kandhola&uk.fujitsu.com +1840 + Inmarsat + Steve Cox + steve.cox&inmarsat.com +1841 + TIMS Technology Ltd + Oliver Goh + go&tims.ch +1842 + CallWare Technologies + Adam Christensen + achriste&callware.com +1843 + NextLink, L.L.C. + Randy Scheets + rscheets&nextlink.net +1844 + TurnQuay Solutions Limited + Roger Thomas + 100014.123&compuserve.com +1845 + Accusort Systems Inc + Wayne J Klein + wjklein&accusort.com +1846 + Deutscher Bundestag + Thomas Mattern + thomas.mattern&bundestag.de +1847 + Joint Research Centre + ---none--- + rui.meneses&jrc.it +1848 + FaxSav + Neil Martin + nim&digitran.com +1849 + Chevy Chase Applications Design + Bryan Chastel de Boinville + ccappdesign&prodigy.com +1850 + Bank Brussel Lambert (BBL) + Mr. Lieven Merckx + lmr&bbl.be +1851 + OutBack Resource Group, Inc. + Jim Pickering + Jrp&outbackinc.com +1852 + Screen Subtitling Systems Ltd + Paul Collins + paul.collins&screen.subtitling.com +1853 + Cambridge Parallel Processing Ltd + Richard Hellier + rlh&cppuk.co.uk +1854 + Boston University + Charles von Lichtenberg + chuckles&bu.edu +1855 + News Digital Systems Ltd + Eli Gurvitz + egurvitz&ndc.co.il +1856 + NuTek 2000, Inc. + Anthony J. Brooks + brooksa&usa.pipeline.com +1857 + Overland Mobile Communication AB + Goran Sander + goran.sander&axon.se +1858 + Axon IT AB + Goran Sander + goran.sander&axon.se +1859 + Gradient Medical Systems + Goran Sander + goran.sander&axon.se +1860 + WaveSpan Corporation + Roberto Marcoccia + roberto&wavespan.com +1861 + Net Research, Inc. + Derek Palma + dpalma&netcom.com +1862 + Browncroft Community Church + Paul R. Austin + austin&sdsp.mc.xerox.com +1863 + Net2Net Corporation + Ralph Beck + beck&net2net.com +1864 + US Internet + Reed Wade + rwade&usit.net +1865 + Absolute Time + Terry Osterdock + dpalma&netcom.com +1866 + VPNet + Idris Kothari + ikothari&vpnet.com +1867 + NTech + Troy Nolen + tnolen&ntechltd.com +1868 + Nippon Unisoft Corporation + Jinnosuke Nakatani + nak&jusoft.co.jp +1869 + Optical Transmission Labs, Inc. + Niraj Gupta + niraj&syngroup.com +1870 + CyberCash, Inc. + Andrew Jackson + jackson&cybercash.com +1871 + NetSpeed, Inc. + Robert C. Taylor + rtaylor&netspeed.com +1872 + Alteon Networks, Inc. + Sharon Chisholm + schishol&nortelnetworks.com +1873 + Internet Middleware Corporation + Peter Danzig + danzig&netcache.com +1874 + ISOnova GmbH + Matthias Weigel + 101511.327&compuserve.com +1875 + Amiga IOPS Project + Niall Teasdale + aip-mib&hedgehog.demon.co.uk +1876 + Softbank Services Group + Paul Hebert + paulh&sbservices.com +1877 + Sourcecom Corporation + Tet Tran + tet&sourcecom.com +1878 + Telia Promotor AB + Mr Rikard Bladh + Rikard.G.Bladh&Telia.se +1879 + HeliOss Communications, Inc. + Larry Fisher + 71340.2603&compuserve.com +1880 + Optical Access International, Inc. + Matt Bowen + bowen&oai.com +1881 + MMC Networks, Inc. + Sanjeev Shalia + sshalia&mmcnet.com +1882 + Lanyon Ltd. + Alan Stiemens + alan.stiemens&lanyon.com +1883 + Rubico + Heinrich Schlechter + heine&rubico.com +1884 + Quantum Telecom Solutions, Inc. + Michael Flaster + flaster&qts.com +1885 + Archinet + Loh Chin Yean + chinyean&hk.super.net +1886 + i-cubed Ltd. + Douglas J. Berry + dberry&i-cubed.co.uk +1887 + Albis Technologies Ltd. (formerly 'Siemens Switzerland Ltd.') + Thomas Glaus + thomas.glaus&albistechnologies.com +1888 + GigaLabs, Inc. + Simon Fok + sfok&netcom.com +1889 + MET Matra-Ericsson + Francois Gauthie + metfgar&met.fr +1890 + Red Lion Controls (JBM Electronics) + Denis Aull + Engineering&RedLion.net +1891 + OPTIM Systems, Inc. + Mr. Sunil Meht + smehta&access.digex.net +1892 + Software Brewery + David Foster + dfoster&ccnet.com +1893 + WaveLinQ + Kim Luong + kluong&mcimail.com +1894 + Siemens ICN + Anne Robb + anne.robb&icn.siemens.com +1895 + IEX Corporation + Paul B. Westerfield + pbw&iex.com +1896 + TrueTime + Mark Elliot + elliot&nbn.com +1897 + HT Communications Inc. + Vaughn Nemecek + vnemecek&htcomm.com +1898 + Avantcomp Oy + Juha Luoma + Juha.Luoma&avantcomp.fi +1899 + InfoVista + Yann Le Helloco + ylehelloco&infovista.com +1900 + Openwave Systems, Inc. + Seetharaman Ramasubraman + seetharaman.ramasubramani&openwave.com +1901 + Sea Wonders + Ed Wiser + ewiser&dp-2-30.iglou.net +1902 + HeadStart Enterprise + Rick Manzanares + rickmanz&msn.com +1903 + B-SMART Inc. + Neil Peters + info&b-smart.com +1904 + ISMA Ltd + Stephen Dunne + sdun&isma.co.uk +1905 + 3DV Technology, Inc. + Charles A. Dellacona + charlie&dddv.com +1906 + StarCom Technologies Inc. + Jon Fatula + jon&starcomtech.com +1907 + L.L.Bean + Chuck McAllister + chuck.mcallister&llbean.com +1908 + NetIcs Inc. + Dahai Ding + ding&netics-inc.com +1909 + Infratec plus GmbH + Michael Groeger + mgroeger&infratec-plus.de +1910 + 3edges + Daniel Drucker + dmd&3e.org +1911 + GISE mbHVolkmar Brisse / Hans-Jurgen Laub + brisse&gise.com + laub&gise.com +1912 + lan & pc services + Juan A. Fernandez + lanpc&erols.com +1913 + RedPoint Software Corporation + Tim S. Woodall + tim&redpt.com +1914 + Atempo Inc + Fabrice Clara + fabrice.clara&atempo.com +1915 + I-95-CC + JOERG "NU" ROSENBOHM + JOSENBOHM&FARRADYNE.COM +1916 + Extreme Networks + Gary W. Hanning + ghanning&extremenetworks.com +1917 + Village of Rockville Centre + John Peters + rvc&li.net +1918 + Swichtec Power Systems + Adrian Jackson + ajackson&swichtec.co.nz +1919 + Deutscher Wetterdienst + Dietmar Glaser + lanadm&dwd.d400.de +1920 + Bluebird Software + Linda Kasparek + ljk&bluebird.zipnet.net +1921 + Svaha Interactive Media, Inc. + Matthew Baya + mbaya&pobox.com +1922 + Sully Solutions + Alan Sullivan + sully&frontiernet.net +1923 + Blue Line + J. Ortiz + blueline12&msn.com +1924 + Castleton Network Systems Corp Glen Tracey tracey&castleton.com + Lawrence Lou + llou&castleton.com +1925 + Visual Edge Software Ltd. + Daniel M. Foody + dan&vedge.com +1926 + NetGuard Technologies, Inc. + Stu Selig + stuselig&msn.com +1927 + SoftSell, Inc. + John R. Murray + John_and_Elaine&msn.com +1928 + MARNE SOFTWARE + JAMES R. CLOWERS + marne1&tac-wa3-16.ix.netcom.com +1929 + Cadia Networks, Inc. + Cheryl Scattaglia + cscattaglia&cadia.com +1930 + Milton + Michael Milton + MHPMilton&msn.com +1931 + Del Mar Solutions, Inc. + Tim Flagg + timf&Delmarsol.COM +1932 + KUMARAN SYSTEMS + G. CHANDRASEKHAR + smart&kumaran.com +1933 + Equivalence + Craig Southeren + equival&ozemail.com.au +1934 + Homewatch International, Inc. + H. J. McElroy + hw corp&aol.com +1935 + John Rivers + john rivers + jar&clarkston.com +1936 + Remark Services, Inc. + H. J. McElroy + mkt mac&aol.com +1937 + Deloitte & Touche Consulting Group + David Reed + dreed&dttus.com +1938 + Flying Penguin Productions + Ronald J. Fitzherbert + ron&penguin.net +1939 + The Matrix + Yasha Harari + harari&erols.com +1940 + Eastern Computers, Inc. + Simon Zhu + simonz&ecihq.com +1941 + Princeton BioMedica Inc. + Walter Kang + prinbiomed&aol.com +1942 + SanCom Technology, Inc. + Gene Huang + jye&travelin.com +1943 + National Computing Centre Ltd. + Dermot Dwyer + dermot&ncc.co.uk +1944 + Aval Communications + Larry Gadallah + gadallahl&aval.com +1945 + WORTEC SearchNet CO. + D.C. Dhabolt + wortec&netins.net +1946 + Dogwood Media + Dave Cornejo + dave&dogwood.com +1947 + Allied Domecq + David Nichol + DNICHOL.Allied&dial.pipex.com +1948 + Telesoft Russia + Verbitsky Alexandr + verbitsk&tlsoft.ru +1949 + UTStarcom, Inc. + Ruchir Godura + godura&utstar.com +1950 + comunit + Bjoern Kriews + bkr&comunit.com +1951 + Traffic Sofware Ltd + John Toohey + johnt&traffic.is +1952 + Qualop Systems Corp + Simon Chen + schen&qualop.com +1953 + Vinca Corporation + Al Mudrow + al&vinca.co +1954 + AMTEC spa + Giovanni SANTONI + amtec&interbusiness.it +1955 + GRETACODER Data Systems AG + Kevin Smyth + ksmyth&pax.eunet.ch +1956 + KMSystems, Inc. + Roy Chastain + roy&kmsys.com +1957 + GEVA + Burkhard Kaas + bkaas&GEVA.de +1958 + Red Creek Communications, Inc. + Ramesh Kamath + yin&best.com +1959 + BORG Technology Inc. + Ralph Magnan + ralph&borgtech.com +1960 + Concord Electronics + Greg Hanks + GHanks&concord-elex.com +1961 + Richard Ricci DDS + Richard Ricci + RRicci201&aol.com +1962 + Link International Corp. + Joshua Kim + linkpr&bora.dacom.co.kr +1963 + Intermec Technologies Corp. + Joseph Dusio + Joe.Dusio&Intermec.Com +1964 + OPTIMUM Data AG + Robert Mantl + optdata&ibm.net +1965 + DMCNW + Cherice Jobmann + CJOBMANN&INNOVAWIRELESS.COM +1966 + Perle Systems Limited + Moti Renkosinski + mrenkosinski&perle.com +1967 + inktomi corporation + eric hollander + hh&inktomi.com +1968 + TELE-TV Systems, L.P. + Emmanuel D. Ericta + damannix&TELE-TV.com +1969 + Fritz-Haber-Institut + Heinz Junkes + junkes&fhi-berlin.mpg.de +1970 + mediaone.net + Ed Pimentel + epimntl&mail.mediaone.net +1971 + SeaChange International Peter H. + Burgess + PeterB&204.213.65.53 +1972 + CASTON Corporation + Rodney L Caston Sr + caston&premier.net +1973 + Local Net + Vincent Palmieri + palmieri&local.net +1974 + JapanNet + KIYOFUSA FUJII + kfujii&japannet.or.jp +1975 + NabiscoKen ChristChristK&nabisco.com + Carolyn Sitnik + sitnikC&nabisco.com +1976 + micrologica GmbH + Axel Brauns + braunsµlogica.de +1977 + Network Harmoni, Inc. + Mike Schulze + mike&networkharmoni.com +1978 + SITA ADS + Fulko Hew + fulko&wecan.com +1979 + Global Maintech Corporation + Kent Rieger + krieger&globalmt.com +1980 + Tele2 AB + Hans Engren + hans&swip.net +1981 + EMC CLARiiON Advanced Storage Solutions + Rene Fontaine + fontaine_rene&emc.com +1982 + ITS Corporation + Tim Greer + tgreer&itscorp.com +1983 + CleverSoft, Inc. + Debbie Glidden + dglidden&cleversoft.com +1984 + The Perseus Group, Inc. + Alan Mikhak + amikhak&aol.com +1985 + Joe's WWW Pages + Joe Burke + burke&northweb.com +1986 + Everything Internet Store + SHAWN TEDD + stedder&ica.net +1987 + Numara Software, Inc + Tony Thomas + tony.thomas&numarasoftware.com +1988 + Lycoming County PA + Richard Karp + rkarp&pennet.net +1989 + Statens Institutions styrelse SiS + Jimmy Haller + Jimmy.Haller&mailbox.swipnet.se +1990 + INware Solutions Inc. + Mario Godin + mgodin&inware.com +1991 + Brocade Communication Systems, Inc. (formerly 'Foundry Networks, Inc.') + Scott Kipp + skipp&brocade.com +1992 + Deutsche Bank + Michael Doyle + mib-admin&list.db.com +1993 + Xyratex + Richard Harris + rharris&uk.xyratex.com +1994 + Bausch Datacom B.V. + Ron Verheijen + RVerheijen&bausch.nl +1995 + Advanced Radio Telecom (ART) + Craig Eide + craige&artelecom.com +1996 + Copper Mountain Communications Inc. + Bhola Ray + bray&cmtn.com +1997 + PlaNet Software Inc. + Steve Curtis + scurtis&planetsoftware.com +1998 + Carltan Computer Corporation + Danilo L. Signo + carltan&gateway.portalinc.com +1999 + Littva Mitchell, Inc. + Edward P. Mitchell + mitchell&interaccess.com +2000 + TIBCO Inc. + Ed Shnayder + shnayder&tibco.com +2001 + Oki Data Corporation + Iguchi Satoru + Iguchi&okidata.co.jp +2002 + GoTel + Craig Goss + cgoss&infi.net +2003 + Adobe Systems Incorporated + Steve Zilles + szilles&adobe.com +2004 + Sentricity + Gregg Welker + gnjal&pacificnet.net +2005 + Aeroports De Paris + Eric Barnier + eric.barnier&adp.fr +2006 + ECONZ Ltd + Tim Mew + tim&econz.co.nz +2007 + TELDAT, S.A. + Eduardo Robles Esteban + teldat&offcampus.es +2008 + Offset Info Service srl + Enrico Talin + etalin&tradenet.it +2009 + A. J. Boggs & Company + Richard Vissers + rvissers&ajboggs.com +2010 + Stale Odegaard AS + Stale Odegaard + stale&odegaard.no +2011 + HUAWEI Technology Co.,Ltd + Zhao Lwu + zl&writeme.com +2012 + nVent, Schroff GmbH + Dietmar Mann + Dietmar.Mann&nVent.com +2013 + Rehabilitation Institute of Chicago Angie + Hoelting + a-hoelting&nwu.edu +2014 + ADC Telecommunications, Inc. + John Caughron + john_caughron&adc.com +2015 + SYSTOR AG + Urs Studer + Urs.Studer&SYSTOR.Com +2016 + GraIyMage, Inc. + Eric Gray + eric.gray&nh.ultranet.com +2017 + Symicron Computer Communications Ltd. + M.Powell + cmp&symicron.com +2018 + Scandorama AB + Peter Andersen + andersen&scandorama.se +2019 + I-NET + Wesley McClure + wes_mcclure&ccmail.inet.com +2020 + Xland, Ltd. + Boris A. Gribovsky + boris&xland.ru +2021 + U.C. Davis, ECE Dept. Tom + Arons + arons&ece.ucdavis.edu +2022 + CANARY COMMUNICATIONS, Inc. + JIM MCSEATON + jmcseaton&canarycom.com +2023 + NetGain + Niklas Hellberg + niklas.hellberg&netgain.se +2024 + West Information Publishing Group + Joseph R. Prokott + jprokott&westpub.com +2025 + Deutsche Bundesbank + Alexander Ulbricht + pki-services&bundesbank.de +2026 + Broadxent, Inc + Kok Chin Chang + kokchinc&broadxent.com +2027 + Gauss Interprise AG + Michael Schlottmann + michael.schlottmann&opentext.com +2028 + Aldiscon + Michael Mc Mahon + michaelm&aldiscon.ie +2029 + Vivid Image + Andrea L. Fiedler + andrea&vividimage.com +2030 + AfriQ*Access, Inc. + Tierno S. Bah + tsbah&afriq.net +2031 + Reliant Networks Corporation Steven + Fancher + Steven&Fancher.com +2032 + Mavenir Systems (formerly 'airwide solutions') + Nick Worth + nick.worth&mavenir.com +2033 + McKinney Lighting & Sound + Justin Barbalace + justin&204.49.136.10 +2034 + Whole Systems Design, Inc. + Peter J. Weyland + Peter&Look.net +2035 + O'Reilly Media, Inc. + Dean Roman + nic-tc&oreilly.com +2036 + Quantum Corporation + Carsten H Prigge + carsten.prigge&quantum.com +2037 + Ernst and Young LLP + Marcus Oh + marcus.oh&ey.com +2038 + Teleware Oy + Thomas Aschan + thomas&teleware.fi +2039 + Fiducia Informationszentrale AG Ian + Williams + nic&fiducia.de +2040 + Kinetics, Inc. + David S. Melnik + David.Melnik&KineticsUSA.com +2041 + EMCEE Broadcast Products + Frank Curtis + ENGR1&MAIL.MICROSERVE.NET +2042 + Clariant Corporation + Gerald Hammonds + Gerald.Hammonds&clariant.com +2043 + IEEE 802.5 + Robert D Love + rdlove&vnet.ibm.com +2044 + Open Development Corporation + Jeff Gibson + jgibson&opendev.com +2045 + RFG SystemsRamon Ferreris + nomar1&ix.netcom.com + / nomar1&aol.com +2046 + Aspect Telecommunications + Richard Ney + richard.ney&aspect.com +2047 + Leo & Associates + Leo Hau + leohau1&ibm.net +2048 + SoftLinx, Inc. + Mark Ellison + ellison&world.std.com +2049 + Generale Bank + Edwin GORIS + egoris&gbank.be +2050 + Windward Technologies Inc. + Ray Drueke + rueke&windwardtech.com +2051 + NetSolve, Inc. + Gary Vandenberg + vandeng&netsolve.com +2052 + Xantel + Mark E. Fogle + mefogle&xantel.com +2053 + arago, Institut fuer komplexes Datenmanagement GmbH + Joerg Hegermann + hegermann&arago.de +2054 + Kokusai Denshin Denwa Co., Ltd + Yasutomo Miyake + ys-miyake&kdd.co.jp +2055 + GILLAM-SATEL + J. MATHIEU + gillam & interpac.be +2056 + MOEBIUS SYSTEMS + MICHAEL CLARK + MYTHIA&WHYTEL.COM +2057 + Financial Internet Technology + Klaus Amelung + ka&fit.dk +2058 + MARC Systems + Steven C Johnson + scj&MARCSYS.COM +2059 + Bova Gallery + Martin Raymond + bovazone&earthlink.net +2060 + OSx Telecomunicacoes + Emmanuel Caldas + efcaldas&nutecnet.com.br +2061 + Telecom Solutions + Mark S. Smith + msmith&telecom.com +2062 + CyberIQ Systems + Lawrence Ho + lho&cyberiqsys.com +2063 + Ardent Communications Corporation + Chao-Li Tarng + cltarng&ardentcom.com +2064 + Aware, Inc. + Ellis Wong + ewong&aware.com +2065 + Racal Radio Limited + E.P Thornberry + 101346.3100&compuserve.com +2066 + Control Resources Corporation + Sadhan Mandal + 102263.2101&compuserve.com +2067 + Advanced Fibre Communications (AFC) + Richard D. Nichols + richard.nichols&fibre.com +2068 + Elproma Electronica B.V. + Kees Onneweer + r&d&elproma.nl +2069 + MTA SZTAKI + Gabor Kiss + kissg&sztaki.hu +2070 + Consensys Computers Inc + Eric Mah + eric&consensys.com +2071 + Jade Digital Research Co. + William L. Cassidy + wcassidy&baobei.com +2072 + Byte This Interactive Pty.Ltd. Mike + Cornelius + mike&bytethis.com.au +2073 + Financial Network Technologies Inc. + Duncan Harrod + NTDH&AOL.COM +2074 + BROKAT Informationssysteme GmbH + A.Schlumpberger + aschlum&brokat.de +2075 + MediaWise Networks + Jim Kinder + jkinder&mediawisenetworks.com +2076 + Future Software + Products Division + support&future.futsoft.com +2077 + Commit Information Systems + Peter Manders + mandep&commit.nl +2078 + Virtual Access Ltd + Gerry Keogh + gerry.keogh&VirtualAccess.com +2079 + JDS FITEL Inc. + Dr A.G.Self + arthur_self&jdsfitel.com +2080 + IPM DATACOM + Emilio Tufano + braccose&mbox.vol.it +2081 + StarBurst Communications Corporation Kevin + McKenna + kmckenna&starburstcom.com +2082 + Tollgrade Communications, Inc. + Jim Ostrosky + jostrosky&tollgrade.com +2083 + Orange Services US + Frank Drake + fdrake&orange.us +2084 + GS Yuasa International Ltd. + Toshiyuki Miyamoto + toshiyuki.miyamoto&jp.gs-yuasa.com +2085 + Isolation Systems Limited + Erica Liu + liue&isolation.com +2086 + AVIDIA Systems, Inc. + David Jenkins + engineering&avidia.com +2087 + Cidera-Mainstream Services + Kym Hogan + khogan&cidera-mainstream.com +2088 + Radstone Technology Plc + Paul Goffin + goffin&radstone.co.uk +2089 + Philips Business Communications + Helmut Wvrz + woerz.philips&t-online.de +2090 + FMS Services + Karl Schwartz + schwartz&max2e.netropolis.net +2091 + Supernova Communications + Charles Tumlinson + c.tumlinson&telescan.com +2092 + Murphy & Murphy Real Estate + Bill Murphy + billmurf&nh.ultranet.com +2093 + Multi-Platform Information Systems + Thomas Mullaney + thomasm&token.net +2094 + Allegro Consultants, Inc. + Stan Sieler + sieler&allegro.com +2095 + AIAB + Lennart Asklund + asklu&algonet.se +2096 + Preview Multimedia Services + Brian Foster + Preview&Farmline.com +2097 + Access Beyond + Ed Brencovich + ebrencovich&accessbeyond.com +2098 + SunBurst Technology, Inc. + C.J. Stoddard/ Bob Mahler + sunybod&teleport.com +2099 + sotas + Yongchae Kim / Van Anderson + cmum&aol.com +2100 + CyberSouls Eternal Life Systems Inc. + Richard Reid + rreid&cybersouls.com +2101 + HANWHA CORP./TELECOM + YOUNG-SIK LEE + yslee&dmc.htc.hanwha.co.kr +2102 + COMET TELECOMMUNICATIONS INC + Anthony Fernandes + comet&ppp200.inet-on.net +2103 + CARY SYSTEMS, Inc. + Jane Liu + liu&carysys.com +2104 + Peerless Systems Corp Frank + Hernandez + f-hernandez&peerless.com +2105 + Adicom Wireless, Inc + Rick Chen + rchen&adicomw.com +2106 + High Technology Software Corp + Ken Lowrie + Ken&hitecsoft.com +2107 + Lynk + Tamar Krupnik, Avi Oron Lynk&boscom.com, + Oavi&boscom.com +2108 + Robin's Limousine + Robin Miller + roblimo&primenet.com +2109 + Secant Network Tech + Nathan H. Hillery + hillery&secantnet.com +2110 + Orion Pictures Corporation + Kevin Gray + kgray&orionpictures.com +2111 + Global Village Communication, Inc. + Emanoel Daryoush + emanoel&globalvillage.com +2112 + ioWave, Inc. + Bruce Flanders + ioWave&aol.com +2113 + Signals and Semaphores + Bobby Young + b.young&ix.netcom.com +2114 + Mayo Foundation + Thomas B. Fisk + fisk&mayo.com +2115 + KRONE AG + Wolfgang Kraft + 106005.1072&compuserve.com +2116 + Computer Networking Resources, Inc + Joe Rumolo + jrumolo&mindspring.com +2117 + Telenetworks + Mike Sanders + ms&tn.com +2118 + Staffordshire University + Andrew J. Sherwood + A.J.Sherwood&staffs.ac.uk +2119 + Broadband Networks Inc. + Michael Sanderson + msanderson&bni.ca +2120 + Federal Aviation Administration + Alan Hayes + alan_hayes&faa.dot.gov +2121 + Technical Communications Corporation + John Maher + jmaher&tccsecure.com +2122 + REZO+ + Artur Silveira da Cunha + ASilveira&rezo.com +2123 + GrafxLab, Inc. + Anthony Anthamatten + elvis&memphisonline.com +2124 + Savant Corp + Andy Bruce + AndyBruce&msn.com +2125 + COMTEC SYSTEMS CO.,LTD. DEOK-HYOENG + HAN + hansony&maru.comtec.co.kr +2126 + Satcom Media + Martin Miller + martinm&satmedia.com +2127 + Sonus Networks, Inc. + Information Security + infosec&sonusnet.com +2128 + TPG Network + Bill Leckey + tpgnrd&tpgi.com.au +2129 + CNJ Incorporated + Doan Van Hay Gilbert + doanvhay&usa.net +2130 + Greenbrier & Russel + Ron Phillips + ronp&gr.com +2131 + mainnet + Khaled Soussi + khaled&mainnet.net +2132 + Comnet DatensystemeHolger Zimmermanhzimmermann&cnd.de + (backup: Michael Lemke + 100341.417&CompuServe.COM) +2133 + Novadigm, Inc. + Phil Burgard + philb&novadigm.com +2134 + Alfatech, Inc. + Satoru Usami + KHA04261&niftyserve.or.jp +2135 + Financial Sciences Corporation Gary + Makhija + makhijag&fisci.com +2136 + Electronics For Imaging, Inc. + Eugene Chen + eugene.chen&eng.efi.com +2137 + Casabyte + Ben Castrogiovanni + ben&casabyte.com +2138 + AssureNet Pathways, Inc. + Michael Rosselli + miker&anpi.com +2139 + Alexander LAN, Inc. + Scott Penziner + scott&alexander.com +2140 + Gill-Simpson + Thomas M. McGhan + tmcghan&gill-simpson.com +2141 + MCNS, L.P. + Michelle Kuska + kuska.michelle&tci.com +2142 + Future Systems, Inc. + Kim Moon-Jeong + mjkim&future.co.kr +2143 + IMGIS + Joe Lindsay + joe&lindsay.net +2144 + Skywire Corporation + Oscar Pearce + opearce&skywire.com +2145 + Irdeto B. V. + Hans Dekker + hdekker&irdeto.com +2146 + Peasantworks + Stan Gafner + sharongafner&msn.com +2147 + Onion Peel Software + Steve Giles + sgiles&ops.com +2148 + PS Partnership + Thomas Lee + tfl&psp.co.uk +2149 + IRdg, Inc. + Ryan Campbell + rcampbell&ipost.net +2150 + SDS Ltd. + Dave Soteros + dsoteros&direct.ca +2151 + Promus Hotel Corporation Oscar Pearce + 76106.1541&compuserve.com + ---none--- +2152 + Cavid Lawrence Center + Ed Bernosky + eddy&dlcmhc.com +2153 + Insider Technologies Ltd Paul Hancock + Paul Hancock + 706001.1542 compuserv.com +2154 + Berkeley Networks + Bob Thomas + bob.thomas&berkeleynet.com +2155 + Infonautics Corporation + Mike Schwankl + mschwankl&infonautics.com +2156 + Easy Software + Leif Hagman + leif&easysoft.se +2157 + CESG + Derek Mariott + derek&caprof.demon.co.uk +2158 + SALIX Technologies, Inc. + Stephen Scheirey + sscheire&salixtech.com +2159 + Essential Communications + Marck Doppke + marck&esscom.com +2160 + University of Hawaii + David Lassner + david&hawaii.edu +2161 + Foxtel Management Pty + Keith Cohen + Cohenk&foxtel.com.au +2162 + ZOHO Corporation (formerly 'Advent Network Management') + Rajkumar Balasubramanian + raj&zohocorp.com +2163 + Vayris, S.A. + Jose Ramon Salvador Collado + vayris&ptv.es +2164 + Telecom Multimedia Systems, Inc. + Marc Church + mchurch&telecommm.com +2165 + Guardall Ltd. + Nick Banyard + banyard&guardall.co.uk +2166 + WKK SYSTEMS, Inc. + W. K. Kessler + wkk&wkk.com +2167 + Prominet Corporation + Steve Horowitz + witz&prominet.com +2168 + LMC Lan Management Consulting GmbH + Georg Kieferl + gk&lmc.de +2169 + Lewis Enterprise + Robert Lewis + yaa&cci-internet.com +2170 + Teles AG + Peter Schoenberger + ps&teles.de +2171 + PCSI (Phoenix Control) Wayne Edward + Conrad + wconrad&pcsiaz.com +2172 + Fourth Wave Designs, Inc. + Gene Litt + glitt&4wave.com +2173 + MediaGate, Inc. + Brett Francis + brett.francis&mediagate.com +2174 + Interactive Online Services, Inc. + Adam Laufer + adam613&aol.com +2175 + Mutek Transcom Ltd. + Alan Benn + abenn&mdco.demon.co.uk +2176 + University of Dortmund, IRB + Gerd Sokolies + gs&informatik.uni-dortmund.de +2177 + Network Diagnostic Clinic + Ray Sprong + netdiag&mc.net +2178 + TSI - Telecom Systems Ltd. + Giora Josua + giorajo&netvision.net.il +2179 + WireSpeed Comm. Corp. + Brooke Johnson + brookej&wirespeed.com +2180 + Versanet Communications, Inc. + Mark Hall + (909) 860-7968 +2181 + EUnet Communications Services BV + Koen De Vleeschauwer + noc&EU.net +2182 + pow communications + Harry Perkins + harry.perkins&pow.com +2183 + AMCommunications Inc. + William F. Kinsley + williamk&amcomm.com +2184 + Open Architecture Systems Integration Solutions (OASIS),Inc. + Bob Altman + BAltman&oasis-inc.com +2185 + NetPartner s.r.o. + Milan Soukup + mylan&login.cz +2186 + Vina Technologies + Bob Luxenberg + bobl&vina-tech.com +2187 + Reserved + RFC-pti-pen-registration-10 + ---none--- +2188 + Reserved + RFC-pti-pen-registration-10 + ---none--- +2189 + Deutsches Klimarechenzentrum GmbH + Lutz Brunke + brunke&dkrz.de +2190 + ABSYSSClaude-Aime MOTONGANEmotongane&absyss.fr + Marc BENOLIEL + mbenoliel&absyss.fr +2191 + Quadrophonics, Inc. + Dennis Warner + denniswa&vegas.quik.com +2192 + Hypercore Technology Inc. + Ken Sailor + sailor&sask.trlabs.ca +2193 + OBTK, Inc., dba Network Designs Corporation + David Kirsch + dkirsch&networkdesigns.com +2194 + VOIS Corporation + William Dietrich + info&voiscorp.com +2195 + IXO S.A. + Fabrice Lacroix + fabrice&ixo.fr +2196 + Macro4 Open Systems Ltd. + David J. Newcomb + david.newcomb¯o4.com +2197 + RSA Security + John G. Brainard + jbrainard&rsasecurity.com +2198 + NextWave Wireless Inc. + Bob Kryger + bkryger&nextwavetel.com +2199 + Pisces Consultancy + Dr Anil K Dhiri + webmaster&pathit.com +2200 + TPS Call Sciences, Inc (TPS) Paul L. + Mowatt + plm&jamaica.tpsinc.com +2201 + ICONSULT + Hakan Tandogan + hakan&iconsult.com +2202 + Third Point Systems + Richard Parker + richard_parker&thirdpoint.com +2203 + MAS Technology Ltd. + Stephen Cheng + SCheng&mas.co.nz +2204 + Advanced Logic Research, Inc.(ALR) + Cameron Spears + cameron&alr.com +2205 + Documentum, Inc. + Lalith G.Subramanian + lalith&documentum.com +2206 + Siemens Business Communication Systems, Inc. + Beejan Beheshti + Beejan.Beheshti&siemenscom.com +2207 + Telmax Communications Corp. + Welson C. Lin + WelsonLin&Juno.com +2208 + Zypcom, Inc. + Karl Zorzi + Zypcom&tdl.com +2209 + Remote Sense + KC Glaser + kc&remotesense.com +2210 + OOTek Corporation + Keith Sarbaugh + keith&ootek.com +2211 + eSoft, Inc. + Phil Becker + phil&esoft.net +2212 + anydata limited + Fred Youhanaie + fred&anydata.co.uk +2213 + Data Fellows Ltd. + Santeri Kangas + snmp-development&datafellows.com +2214 + Productions Medialog Inc. + Andre G. Cote + medialog&ppp35.point-net.com +2215 + Inovamerci, Lda + Marco Pinto + mapi&mail.telepac.pt +2216 + OKITEC + Tsutomu Chigama + chigama&otec.co.jp +2217 + Vertex Networks Inc. + Frank Huang + frankh&vertex-networks.com +2218 + Pulse Communications + Ben Tetteh + bentet&pulse.com +2219 + CXA Communications Ltd. + Jarrod Hollingworth + jarrod&cxa.com.au +2220 + IDD Information Service + Alice Gossmann + ayg&iddis.com +2221 + Atlas Computer Equipment, Inc. + Kathy Rayburn + kathyr&ace360.com +2222 + Syntegra + Mib administrator + snmp.mib&syntegra.com +2223 + CCC Information Services + John Marland + jmarland&cccis.com +2224 + W. Quinn Associates + Najaf Husain + nhusain&wquinn.com +2225 + Broadcom Eireann Research Ltd. + Michael Slevin + ms&broadcom.ie +2226 + Risk Management Services llc + K. Krishna Murthi + rmsllc>o.net.om +2227 + Watkins-Johnson Company + Jagat Shah + jagat.shah&wj.com +2228 + Eric E. Westbrook + Eric E. Westbrook + eric&westbrook.com +2229 + Martinho-Davis Systems Inc. + Stephen Davis + sdavis&metaware.ca +2230 + XYPOINT Corporation + Kurt White + kwhite&xypoint.com +2231 + Innovat Communications, Inc. + Michael Lin + mlin&innovat-comm.com +2232 + Charleswood & Co. + Jan Henriksson + vci.55400jh&memo.volvo.se +2233 + ID Software AS + Truls Hjelle + truls.hjelle&idgruppen.no +2234 + Telia AB + Mats Mellstrand + mats&telia.net +2235 + Exploration Enterprises, Inc. + Eric E. Westbrook + westbrook&eei.net +2236 + Daimler-Benz Aerospace AG + Wolfgang Mueggenburg + Wolfgang.Mueggenburg&ri.dasa.de +2237 + Xara Networks Ltd. + Simon Evans + spse&xara.net +2238 + The FreeBSD Project + Poul-Henning Kamp + phk&FreeBSD.ORG +2239 + World Merchandise Exchange (WOMEX) Ltd. + Michael Connolly + connolly&womex.com +2240 + lysis + arno streuli + arno&lysis.ch +2241 + CFL Research + Clem Lundie + clundie&ix.netcom.com +2242 + NET-TEL Computer Systems Limited + Andrew Gabriel + Andrew.Gabriel&net-tel.co.uk +2243 + Sattel Communications + Phillip Soltan + P.Soltan&sattelcomm.com +2244 + Promatory Communications Inc. + Plamen Minev + plamen&promatory.com +2245 + Catalogic Software Inc. (formerly 'Syncsort, Inc.') + Chi Shih Chang + cchang&catalogicsoftware.com +2246 + LloydsTSB Group Plc + Peter Byrne + byrnep&lloyds-bank.co.uk +2247 + IT Consultancy Engineering Management Group Ltd. + Mr Vano Porchkhidze + vano&kheta.ge +2248 + LITE-ON COMMUNICATIONS Corp. + Jason Yu + jasonyu&lccr1.ltnlcc.com.tw +2249 + The New Millennium + Robert Garrard + garrard1&charlotte.infi.net +2250 + Quatraco Yugoslavia + Milos Eric + meric&EUnet.yu +2251 + BR Business Systems + John Wiseman + jnw&rail.co.uk +2252 + WheelGroup Corporation Jonathan + Beakley + beakley&wheelgroup.com +2253 + Ultimate Technology, Inc. + Sasha Ostojic + sasha&UltimateTech.com +2254 + Delta Electronics, Inc. + David Chen + dei-david-chen&delta.com.tw +2255 + Waffle Productions + Michael Page + mpage9&mail.idt.net +2256 + Korea Internet + j.w.lee + ing&internet-shopping.com +2257 + Selex Communications Limited (formerly 'BAE SYSTEMS') + Phil Clark + phil.m.clark&selex-comms.com +2258 + THOMSON BROADCAST SYSTEMS + Alain PUILLANDRE + puillandrea&tcetbs1.thomson.fr +2259 + Workflow Automation Company Ltd. + Caesar Cheng + caesar&204.225.174.150 +2260 + Associated RT, Inc. + Joe Sheehan + jsheehan&agrp.com +2261 + DRS Codem Systems + Robert Hauck + hauck&drs-cs.com +2262 + RIGHT TIME WATCH CENTER FELIX + ZALTSBERG + fzrtime&dimensional.com +2263 + Advanced-Vision Technologies, Inc. + Hsiang-Ming Ma + hsiangm&server.yam.com +2264 + Applied Intelligence Group Dana + French + dfrench&aig.vialink.com +2265 + Acorn Computers Ltd. + John Farrell + jfarrell&acorn.co.uk +2266 + Tempest Consulting Inc. + Francis Cianfrocca + francis&tempestsoft.com +2267 + Digital Sound Corporation + Jim Crook + jxc&dsc.com +2268 + Fastlan Solutions, Inc. + Jim McNaul + jmcnaul&fastlan.com +2269 + Ordinox Network, Inc. + Yvon Belec + ybelec&ordinox.com +2270 + Telinc Corporation + Tony Barbaro + testlinktb&aol.com +2271 + DRS Consulting Group + Darren Starr + ElmerFudd&activepages.com +2272 + Rapid City Communication + Sharon Chisholm + schishol&nortelnetworks.com +2273 + Invisible Fence Sales Company + George Ewing + gedit&ix.netcom.com +2274 + Troika Management Services + William McLean + xbill&sprynet.com +2275 + VXtreme Inc. + David Herron + dherron&vxtreme.com +2276 + CryptSoft Pty Ltd + Tim Hudson + tjh&cryptsoft.com +2277 + Brooktrout Technology + Ron Ching + rching&bng.com +2278 + GRASS mbH + Ingo Eibich-Meyer + GRASSmbH&t-online.de +2279 + EPiCon Inc. + Michael Khalandovsky + mlk&epicon.com +2280 + SAD Trasporto Locale S.p.a + Maurizio Cachia + mau&sad.it +2281 + Giganet Ltd + Eitan Birati + eitan&giganet.co.il +2282 + INCAA Informatica Italia srl + Henry van Uffelen + uffelen&mail.skylink.it +2283 + Vermont Firmware Corporation + David Garen + dgaren&vtfirmware.com +2284 + Automated Concepts + Blaine King + blaine&cotton.com +2285 + Flash Networks Ltd + Albert Berlovitch + albert&flash-networks.com +2286 + Oracom Inc. + Marc Pierrat + marcp&oracom.com +2287 + Shell Information Technology International Inc. + Rogier Carper + rogier-l.carper&shell.com +2288 + Black Pigs of Death + Thomas Boden + tbboden&sprynet.com +2289 + N3ERZ + Scott Smith + riverofcode&yahoo.com +2290 + Technology Rendezvous Inc. + Srinivasa Addepalli + srao&trinc.com +2291 + ZapNet! Inc. + Michael Chalkley + mikech&iproute.com +2292 + Premier Technologies + Fausto Marasco + fmarasco&premier.com.au +2293 + Tennyson Technologies + George Benko + GBenko&tennyson.com.au +2294 + Dot Hill Systems + Gary Dunlap + gdunlap&dothill.com +2295 + DH Technology, Inc. + John Tarbotton + jtarbott&cogsol.com +2296 + DAGAZ Technologies, Inc. + Vibhash Desai + desaiv&integnet.com +2297 + Ganymede Software Inc. + Vik Chandra + vc&ganymedesoftware.com +2298 + Tele-Communications Inc. + Andy Kersting + droid&ndtc.tci.com +2299 + FreeGate Corportation + Ken Mayer + kmayer&freegate.net +2300 + MainControl Inc. + Alexander Dorman + dorman&maincontrol.com +2301 + Luminate Software Corp. + Leo Liou + leol&luminate.com +2302 + K2Net + Vergel Blake + vergelb&ktwonet.com +2303 + Aurora Communciations Pty. Ltd. + John Larsen + johnl&netcomm.com.au +2304 + LANscape Limited + Ricard Kelly + kellyr&lanscape.co.nz +2305 + Gateway Technologies Inc. + Jay Hogg + j_hogg&compuserve.com +2306 + Zergo Limited + Chris Trobridge + ctrob&cix.compulink.co.uk +2307 + C4U Solutions + Noreen Rucinski + NoreenSR&msn.com +2308 + BOLL Engineering AG + Thomas Boll + tb&boll.ch +2309 + Internet Mail Consortium + Paul Hoffman + phoffman&imc.org +2310 + College of Mathematics and Science - Univ. of Central Oklahoma + Bill McDaniel + mcdaniel&aix1.ucok.edu +2311 + Institute for Applied Supercomputing - CSUSB + Yasha Karant + karant&ias.csusb.edu +2312 + Red Hat + Richard Monk + rmonk&redhat.com +2313 + Legal & General Assurance Society Ltd. + Steve Reid + gblegjff&ibmmail.com +2314 + Fire Networks Inc. + Steven Gordon + sgordon&firenetworks.com +2315 + icti + Ron Nau + ron.nau&icti.com +2316 + Internet Communication Security + Lai Zit Seng + lzs&pobox.com +2317 + TALX Corporation + Bryan D. Garcia + bgarcia&talx.com +2318 + Repeater Technologies Inc. + Ron Earley + rge&ix.netcom.com +2319 + Aumtech Inc. + Jeff Shmerler + jeff&aumtechinc.com +2320 + EuroSInet + John Horton + j.horton&imc.exec.nhs.uk +2321 + ke Kommunikations-Elektronik + Martin Weigelt + weigelt&kecam-han.de +2322 + Starvision Multimedia Corp. + Srdjan Knezevic + sknezevi&starvision.com +2323 + Alcatel Telecom ASD + Marc De Vries + VRIESM&btmaa.bel.alcatel.be +2324 + AVAL DATA Coporation + Yuichi Maruyama + jaavl106&infos.or.jp +2325 + Pacific Northwest National Laboratory + S. Cullen Tollbom + Cullen.Tollbom&pnl.gov +2326 + Tortoise Software Systems + Frank Flaherty + Fxf52&aol.com +2327 + Verio, Inc. + Carl W. Kalbfleisch + cwk&verio.net +2328 + ArrayComm, Inc. + Marc Goldburg + marcg&arraycomm.com +2329 + DST Systems Inc. + Russell Petree + rpetree&dstsystems.com +2330 + Vision Service Plan + Kevin Parrish + kevinp&isd.vsp.com +2331 + Best Buy + Beth Singer + beth.singer&bestbuy.com +2332 + Shared Network Services (SNS) + Norman Brie + norm_brie&sns.ca +2333 + BBC + Brandon Butterworth + hostmaster&bbc.co.uk +2334 + Packeteer Inc. + Robert Purvy + bpurvy&packeteer.com +2335 + Applied Digital Access + Jeff Swann + jeff.swann&ada.com +2336 + HIS Technologies + Nick de Smith + nick.desmith&histech.com +2337 + DNE Technologies, Inc. + Martin Maloney + mmaloney&dne.com +2338 + Vertical Networks, Inc. + Paul Petronelli + plp&palmcorp.com +2339 + CSoft Ltd. + Nedelcho Stanev + decho&iname.com +2340 + National Grocers + Henry Fong + hfong&ngco.com +2341 + Reliance Computer Corp. + Sujith Arramreddy + sujith&rccorp.com +2342 + AK-NORD EDV Vertriebsges mbH + Michael Weber + AK-NORD&t-online.de +2343 + Financial Technologies International + Rich Bemindt + bemindt&ftintl.com +2344 + SpaceWorks, Inc. + Mitchell Song + mcs&spaceworks.com +2345 + Torrent Networking Technologies Corp. + Rohit Dube + rohit&torrentnet.com +2346 + CTI + Warren Baxley + warrenb&cfer.com +2347 + Datastream International Abbas Foroughi + aforoughi&datastream.com + ---none--- +2348 + Killion Inc. + Bob Newkirk + bnewkirk&iname.com +2349 + Mission Critical Software, Inc. + Von Jones + vjones&mcsnotes.missioncritical.com +2350 + Data Research and Applications, Inc. + Dan Duckworth + dduckwor&dra-hq.com +2351 + Resonate Inc. + Ed Liu + zliu&resonateinc.com +2352 + Ericsson, Inc. (formerly 'RedBack Networks') + Michael Thatcher + michael.thatcher&ericsson.com +2353 + Nexware Corporation + Hunghsin Kuo + hkuo&nexware.com +2354 + ADC Wireless Systems + Ruby Zhang + rubyz&pcssolutions.com +2355 + ITIS + Franck Dupin + technic&itis.galeode.fr +2356 + LANCOM Systems + Udo Brocker + udo.brocker&lancom-systems.de +2357 + PSIMED Corporation + Freda Hoo + fhoo&msn.com +2358 + TDT AG + Patrick Kirschenhofer + support&tdt.de +2359 + T.I.P. Group S.A. + Olivier Mascia + om&tipgroup.com +2360 + Redlink + Daniel R. Lindsay + dlindsay&usa.net +2361 + Japan Information Engineering Co, Ltd. + Hidehiko Sakashita + sakasita&jiec.co.jp +2362 + Richter Systems Development, Inc. + JameS Lehmer + lehmer&co.richtersystems.com +2363 + Eurocontrol MAS UAC + Erik Van Releghem + erik.van-releghem&eurocontrol.be +2364 + Konica Corporation + Chiharu Kobayashi + c.koby&konica.co.jp +2365 + Viacom + Edward Lalor + snmp&viacom.com +2366 + XIOtech Corporation + Randy Maas + randym&xiotech.com +2367 + IMS Gesellschaft fuer Informations- und Managementsysteme mbH + Mr. Arndt Hoppe + infoserv&ims.wes.eunet.de +2368 + Softworks + Robert Rogers + mrrogers&softworkscc.com +2369 + MobileWare Corporation + Bill Sager + bsager&mobileware.com +2370 + Memco Software Ltd. + Zakie Mashiah + zakie&memco.co.il +2371 + Advanced TechCom, Inc. + Tim Jennings + TJENNINGS&atiradio.com +2372 + Bedford Associates, Inc. + Jose Badia + jose.badia&bedford.com +2373 + CyberWizard, Inc. + Randy Shane + randy123&airmail.net +2374 + SMART Technologies, Inc. + Craig Dunn + cdunn&smartdna.com +2375 + Concentric Network Corporation + Malik Khan + malik&concentric.net +2376 + The SNMP WorkShop + Thomas R. Cikoski + snmpshop&ix.netcom.com +2377 + Reltec Corp + Greg Saltis + GS4611&llp.relteccorp.com +2378 + Nera + Helge Lund + helu&networks.nera.no +2379 + Nations Bank + James Moskalik + jimmo&crt.com +2380 + Integrated Design Techniques Limited + Robin Jefferson + robin&idtuk.com +2381 + OpenLink Software, Inc. + Cees A. de Groot + cg&pobox.com +2382 + NetReality, Inc. + Ilan Raab + iraab&nreality.com +2383 + Imation Corp. + Jim Albers + jwalbers&imation.com +2384 + SIBIS Ltd. + Dr. Batyr Karryev + batyr&tm.synapse.ru +2385 + SHARP Corporation + Akira Saitoh + saitoh&trl.mkhar.sharp.co.jp +2386 + Desktop Data, Inc. + Fred Yao + fred.yao&desktopdata.com +2387 + Telecom Device K.K. + Tomoyuki Takayama + takayama&tcd.co.jp +2388 + Captech Communication Inc. + Alain Martineau + martino&francomedia.qc.ca +2389 + Performance Telecom Corp. + Glenn Burdett + gsb&performance.com +2390 + Com'X + Mickael Badar + mbadar&comx.fr +2391 + Karim + Kim Inok + iokim&www.karim.co.kr +2392 + Systems Integration Group + Duncan Hare + Duncan.Hare&iname.com +2393 + Witcom Innovative Radio Systems + Yakov Roitman + yakovr&wit-com.com +2394 + S.F. Software + Sam Roberts + sroberts&farallon.com +2395 + MARBEN Italia S.p.A. + Marco Mondini + mmondini&marben.it +2396 + ActivCard, Inc. + Dominic Fedronic + fedronic&activcard.fr +2397 + Cognos, Inc. + Paul Renaud + Paul.Renaud&Cognos.COM +2398 + Eagle Traffic Control Systems W.L.(Bud) + Kent + kentb&eagletcs.com +2399 + Netwave + Diane Heckman + dheckman&netwave-wireless.com +2400 + Hemet.net + Greg Reed + leadman&207.137.47.189 +2401 + NBX Corporation + Kevin Short + kshort&nbxcorp.com +2402 + Al-Bader Shipping & Gen. Cont. Co. Domnic Faia + absckt&ncc.moc.kw + ---none--- +2403 + @Home Network + Christopher A. Dorsey + dorsey&home.net +2404 + Primeur + Loris Talpo + primeurne.ma&primeur.com +2405 + ILTS Inc. + Ralph M. Johnson + rmjohnson&itshqcb.com +2406 + Computer Generation, Inc. + Gary Aviv + gary&compgen.com +2407 + Mouton Noir Enterprises Inc. + Brandon Sussman + bsussman&xtdl.com +2408 + Baystate Sound & Recording + William Arnold Jr. + wpa.bsr&worldnet.att.net +2409 + Metapath Corporation + Peter Larsen + plarsen&metapath.com +2410 + Tajeet Gourmet Food Manufacturing + Randy Ghotra + RGHOTRA&tst43-max1.tstonramp.com +2411 + Telcel + Ken Ogura + ogura&telcel.net.ve +2412 + Intertrader Ltd + Rachel Willmer + rachel&intertrader.com +2413 + Maxtronics + Joseph Braun + braunj&dial.eunet.ch +2414 + Spiderplant + Reed Wade + wade&spiderplant.com +2415 + Software.com, Inc. + Santiago Parede + santi&software.com +2416 + Adherent Systems Ltd + Mike Sales + mjs&adherent.com +2417 + Korfmacher + O. Korfmacher + ok&netcs.com +2418 + Svenska EDIT AB + Claes Berg + Claes.Berg&edit.se +2419 + MLM5000 + Chase + CHASE1208&aol.com +2420 + INIT + Dirk Stocksmeier + Dirk.Stocksmeier&init.de +2421 + Teltone Corporation + Mike Balch + mbalch&teltone.com +2422 + Faircross Computers + Brian van Luipan + brian&mbvl.demon.co.uk +2423 + Carycom (H.K.) LTD + Cary Leung + cary&cary.com +2424 + Dominio Publico Internet, S.L. + Antonio Noguera + anoguera&redestb.es +2425 + bkr - Network Systems + Bjoern Kriews + bkr&jumper.org +2426 + Mariposa Technology, Inc. + John Osanitsch + josanitsch&mariposa-atm.com +2427 + Brocade Communications Systems, Inc. (formerly 'NuView Inc.') + Scott Kipp + skipp&brocade.com +2428 + Uninett + Alf Hansen + Alf.Hansen&uninett.no +2429 + A.C.E. + Prikhodiko Dimitri + direction&ace.ci +2430 + Oy Comptel Ab + Mikael Pesonius + Mikael.Pesonius&comptel.fi +2431 + mms Communication GmbH + Frank Heinzius + fhs&mms.de +2432 + Sortova Consulting Group, Inc. + Tarus Balog + tarus&sortova.com +2433 + ENVOY Corporation + Art Fogel + art.fogel&envoy-neic.com +2434 + Metron Technology Limited + Charles Reilly + charles&metron.co.uk +2435 + Brother Industries, Ltd. + Takami Takeuchi + takami.takeuchi&brother.co.jp +2436 + NetCom Systems, Inc. + Niten Ved + nved&netcom-sys.com +2437 + Kapsch AG. + Gerhard HUDECEK + hudecek&kapsch.net +2438 + Shomiti Systems + David Colodny + dcolodny&shomiti.com +2439 + Computerm Corporation + Bill Elliott + belliott&computerm.com +2440 + Efficient IP + Jean Yves Bisiaux + jyb&efficientip.com +2441 + CertiSoft Tecnologia Ltda. + Eduardo Rosemberg de Moura + eduardor&spacenet.com.br +2442 + EDI Enterprises, Inc. + Michael Dorin + mike&chaski.com +2443 + CCII Systems (Pty) Ltd + Hyram Serretta + HSE&CCII.co.za +2444 + Connetix, Inc. + Jonathan P. Baker + jbaker&connetix.com +2445 + TUNIX Open System Consultants + Leo Willems + leo&tunix.nl +2446 + GNP Computers + Tom Buscher + tbuscher&gnp.com +2447 + Intercope International + Jens Klose + jklose&intercope.com +2448 + NXT + Steve McLaughlin + smclaughin&nxt.com +2449 + Pan Dacom Forcom Telekommunikationssysteme GmbH + A. Jenne + PanDacom.Forcom&t-online.de +2450 + Auco, Inc. + Nick Webb + nwebb&auco.com +2451 + Tecnotree (formerly 'Tecnomen') + Kari Haapala + kari.haapala&tecnotree.com +2452 + Helax AB + Tobias Johansson + tobias.johansson&helax.se +2453 + Omtool Ltd. + Larry Klingler + KLINGLER&omtool.com +2454 + G-connect + Israel Goldside + israel.goldshide&g-connect.co.il +2455 + Dynamic Mutual Funds + Ed Heffernan + eheffernan&dynamic.ca +2456 + Antec Network Technologies + Duane Bacon + duane.bacon&antec.com +2457 + Premiere Promotions + Maureen Joy + maureenjoy&usa.net +2458 + LANQuest + Sylvia Siu + ssiu&lanquest.com +2459 + Guardian Bank p. Zagreb + Igor Onipko + guardbank&aol.com +2460 + ihlas net + Lutfi Tekin + lutfi&ihlas.net.tr +2461 + WAVTrace + Cherice Jobmann + cjobmann&mmwave.com +2462 + VIGGEN Corporation + Ken Vaughn, Joerg Rosenbohm + kvaughn&mail.viggen.com, nu&mail.viggen.com +2463 + SAIF ALI CO., Ltd. + ABDULKADER SALEH ABDULLAH + abdul&mozart.inet.co.th +2464 + CARYNET Information Center + Cary Leung + cary&cary.net +2465 + Application Telematiques, Numeriques et Reseaux (ATNR) + Michel RICART + mricart&atnr.fr +2466 + Channelmatic-LIMT, Inc. + Michael Wells + MikeOct51&aol.com +2467 + ArrowPoint Communications Inc. + Steven Colby + scolby&arrowpoint.com +2468 + Ingrasys + Andy Chung + AndyChung&ingrasys.com +2469 + Netbuilding + Rudy Van Ginneken + rudyvg&netbuilding.be +2470 + Personal & Confidential Klaus + Peter Blaesing + kpb&pccom.demon.co.uk +2471 + Comsys International B.V. + Ronald Elderhorst + rdm&comsys.nl +2472 + Advance Telecommunication Krisada Arjinpattara + beer&sd1.sd.ac.th + ---none--- +2473 + GateKey Solutions, Inc. + Thomas Mullaney + thomasm&gatekey.com +2474 + Avici Systems, Inc. + Patrick Gili + pgili&avici.com +2475 + Sierra Technology, Inc.John Fischerjfischer&stisierra.com + Joshua Yoo + jyoo&stisierra.com +2476 + Encanto Networks Inc. + Robb Kane + rkane&encanto.com +2477 + Mount Olive College + Dan Colligan + dpcolligan&152.37.105.140 +2478 + FUJITSU ACCESS LIMITED + Tatsuyuki Muramatsu + ttmura&access.fujitsu.com +2479 + EDS GmbH + Matthias Lange + lanm&teleconnect.de +2480 + Jyra Research Inc. + Moray Goodwin + moray&jyra.com +2481 + Summit Communications + Reiney Brown + reiney&sumcom.net +2482 + Ministry of Transport, Public Works and Water Management + Mr. Sil Dekker + S.Dekker&mdi.rws.minvenw.nl +2483 + WinNet MCS Inc. + Tmima Koren + tmimak&winnet-corp.com +2484 + ICG Communications + Gideon Wober + gideon_wober&icgcomm.com +2485 + CrossLink Internet Services + Michael Shields + shields&crosslink.net +2486 + Cygnus Computer Associates, Ltd. + Marc Zimmermann + marcz&interlog.com +2487 + Phoenix Technologies Ltd. + Ian Anderson + ian_anderson&phoenix.com +2488 + Internetclub + CHMURA SHLOMO + internetclub&clubmember.org +2489 + CV. MITRA ADI PRANATA Ir. Fx Wahyu Hartono + map_wahyu&hotmail.com + ---none--- +2490 + Vixel Corporation + Tom Sweet + tsweet&seattle.vixel.com +2491 + Atmosphere Networks Inc. + Luis Roa + lroa&altamar.com +2492 + Montana Tel-Net + Kevin Kerr + kk&mttn.net +2493 + JCP Computer Services Ltd. + Jonathan Sowler + jonathan&jcp.co.uk +2494 + Inter Clear Service Ltd. + Jonathan Sowler + jonathan&jcp.co.uk +2495 + Internet Systems Consortium, Inc. + Ray Bellis + ray&isc.org +2496 + LightSpeed International, Inc. + David Turvene + DTurvene&lsiinc.com +2497 + GammaGraphX, Inc. + Bihari Srinivasan + bihari&ggx.com +2498 + iManage Inc. + Aseem Parikh + parikh&hotmail.com +2499 + Internet Security Systems + Don Hall + dhall&iss.net +2500 + Vienna Systems Corporation + Brian Baker + bbaker&viennasys.com +2501 + Yago Systems, Inc. + Shantanu R. Kothavale + sk&yagosys.com +2502 + LunarWave Communications + Mike Dugas + mad&lunarwave.net +2503 + Bangkok Pattaya Hospital + Satit Viddayakorn + satit&bgh.co.th +2504 + Roke Manor Research Limited + Rob Britton + rob.britton&roke.co.uk +2505 + New Oak Communications, Inc. + Sharon Chisholm + schishol&nortelnetworks.com +2506 + Bug Free Development + Jim Philippou + 71171.3015&compuserve.com +2507 + ARC Technologies Group, Inc. + David Campbell + Dave.Campbell&worldnet.att.net +2508 + Internet Dynamics, Inc. + Larry Lipstone + lrl&interdyn.com +2509 + Aviat Networks + Martin Howard + Martin.Howard&Aviatnet.com +2510 + Bear Mountain Software + Thomas Dignan + tdignan&bearmtnsw.com +2511 + AccessLAN Communications,Inc. + Hiren Desai + hdesai&accesslan.com +2512 + Crossroads Systems, Inc. + Cathy Everitt + ceveritt&Crossroads.com +2513 + CR2A-DI + Herve Thomas + hthomas&cr2a-di.fr +2514 + Mantra Communications Inc. + Suresh Rangachar + s.rangachar&worldnet.att.net +2515 + DiscoverNet + Neil Abeynayake + neil&discover.net +2516 + VocalTec Communications Ltd. + Yoav Eilat + Yoav_Eilat&vocaltec.com +2517 + Riversoft Limited + Philip Tee + phil&riversoft.com +2518 + Phaos Technology Corp. + Joel Fan + jfan&phaos.com +2519 + POWEREDCOM, Inc. + Hisashi Nakagawa + hi-nakagawa&poweredcom.net +2520 + Internet Systems Inc. + Nayef E. Rashwan + rashwan&qatar.net.qa +2521 + ComConsult + Stefan Kreicker + stefan.kreicker&comconsult.de +2522 + Osicom Technologies + John Long + jlong&osicom.com +2523 + Hitron Technology Inc. + David Cheng + swell&hc.ht.net.tw +2524 + Rabenstein Enterprises + Jamie Rabenstein + jamie&rabenstein.com +2525 + AT Sistemas, C.A. + Alejandro Trigueros + atsis&telcel.net.ve +2526 + iPass Inc. + Jay Farhat + jfarhat&ipass.com +2527 + InterLinear Technology Inc + George Detlefsen + drgeorge&ilt.com +2528 + World One Telecom Ltd + George Detlefsen + drgeorge&w1t.com +2529 + Quadritek Systems, Inc. + David Cross + dcross&quadritek.com +2530 + Syseca + Frederic Koch + frederic.koch&syseca.thomson.fr +2531 + NetSpeak Corporation + Glenn Harter + glenn&netspeak.com +2532 + OpNet Inc. + Jeff Weisberg + snmp&op.net +2533 + MRM Consulting + Michael R. MacFaden + mrm&acm.org +2534 + TNSys-Trading Net System + Marco Antonio Reginatto + reginato&tnsys.com.br +2535 + JCMT + azrin + azrin&post1.com +2536 + Endeavour Hills Computer Services + Phillip Morgan + pjm&diamond.fox.net.au +2537 + Diversified Technology, Inc. + Albert Bolian + ajb&teclink.net +2538 + Lateral Management Limited + Bob Berryman + bobb&es.co.nz +2539 + Proxy Software Systems Ltd. + Yossi Cohen-Shahar + yossi&proxy.co.il +2540 + Combox Ltd. + Uri Bendelac + bendelac&combox.co.il +2541 + Spectrix Corporation + Matt Heneghan + mheneghan&spectrixcorp.com +2542 + Electronics and Telecommunications Research Institute + DongIl Seo + bluesea&etri.re.kr +2543 + Arlotto Comnet, Inc. + Mark Liu + markliu&address.com.tw +2544 + ADVA AG Optical Networking + Andreas Klar + aklar&advaoptical.com +2545 + NewTec GmbH Systementwicklung und Beratung Harald + Molle + molle&newtec.de +2546 + PVT a.s. - pvt.net + Michal Muhlapchr + michalm&pvt.net +2547 + Catholic University of Pelotas + Luiz Fernando Tavares Meirelles + lftm&amadeus.ucpel.tche.br +2548 + Cryptonym Corporation + Andrew D. Fernandes + andrew&cryptonym.com +2549 + Aker Consultoria e Informatica Rodrigo Ormonde + ormonde&cnt.org.br + ---none--- +2550 + ELVIS-PLUS + Mark Koshelev + marc&elvis.ru +2551 + Telegyr Systems + Marc-Etienne Walser + marc-etienne.walser&ch.telegyr.com +2552 + Netegrity, Inc. + Vadim Lander + vlander&netegrity.com +2553 + Cardinal Network, New Zealand Ltd + Martyn Leadley + mleadley&cardnet.co.nz +2554 + Micro Integrated Innnovations Paul + Stewart + paul_stewart&eee.org +2555 + JayaTek Sdn. Bhd. + Wai-Sun Chia + waisun&pc.jaring.my +2556 + Central Electronic Industry + Sang Ho . LEE + shlee&inote.com +2557 + Transcend Access Systems, Inc. + David Peters + daye&compuserve.com +2558 + Outreach Communications Corp. + Chris Fowler + fowler&outreach.com +2559 + BocaTel + Sean McEnroe + smcenroe&bocatel.com +2560 + AT&T GNMC Amsterdam + Mike Wessling + mikew&att.nl +2561 + Teamphone.com Ltd, + Alan Stokes + A.Stokes&Teamphone.com +2562 + SBB Software Beratung GmbH + Wolfgang Dall + dall&sbb.at +2563 + Comstat DataComm Corporation + Larry Kent + lkent&comstat.com +2564 + The Network Technology Group + Albert Holt + alberth&ntgi.com +2565 + Avery Dennison + Matthew Domanowski + domanowskimatt&averydennison.com +2566 + ROHDE & SCHWARZ GmbH & Co.KG + Andreas Rau + hostmaster&rohde-schwarz.com +2567 + Datamedia SA + Bruno Bosqued + bbosqued&datamedia.fr +2568 + Integrix, Inc. + Jeff Zheng + jzheng&integrix.com +2569 + Telenor Novit AS + Atle Ruud + Atle.Ruud&novit.no +2570 + Prefered Communications + Alan Sisisky + security&safari.net +2571 + Mu'Tah University + Tarawneh Mokhled Suliman + mokhled¢er.mutah.edu.jo +2572 + Network TeleSystems, Inc. + Lewis Greer + lewis&nts.com +2573 + Decision-Science Applications,Inc. Simeon Fitch + sfitch&dsava.com + ---none--- +2574 + Concentricity, LLC + Ted Eiles + tedeiles&concentricity.com +2575 + Artiza Networks Inc. + Tatsuya Yamaura + yamaura_tatsuya&artiza.co.jp +2576 + ComputerShare Systems Limited + Peter Brew + peter.brew&computershare.com.au +2577 + EDR Technologies + Mike Helton + helton&edrtech.com +2578 + AbirNet + Lev Kantorovich + lev&abirnet.com +2579 + Trikota, Inc. + David Barmann + dbarmann&trikota.com +2580 + Diebold Company of Canada Limited Rajan Raman + rajan&rdstasks.com + ---none--- +2581 + Precise Connectivity Solutions + Tal Grynbaum + tal_g&precisesoft.co.il +2582 + ANS Communications + Alex Kit + kit&ans.net +2583 + Hydro-Quebec TransEnergie + Alain Martineau + Martineau.Alain&hydro.qc.ca +2584 + RadioLAN, Inc. + Welson Lin + wlin&radiolan.com +2585 + Youth Opportunities Upheld, Inc. + Mark Merchant + merchantm&youinc.org +2586 + Teracom AB + Per Ruottinen + pru&teracom.se +2587 + Freemont Avenue Software, Inc. + Jim Livermore + jim&lsli.com +2588 + Positron Fiber Systems + Eric Paquet + epaquet&positronfiber.com +2589 + Chuo Electronics, Co., Ltd. + Masaya Kikuchi + kikuchi&cec.co.jp +2590 + Minolta Co., Ltd. + Takahiro Fujii + t-fujii&mol.minolta.co.jp +2591 + Radyne Corporation + Paul Wilson + paw&netwrx.net +2592 + NSI Software + David Demlow + DDemlow&NSISW.COM +2593 + Exstream PC + Warren Evans + wceace&aol.com +2594 + Simulation Laboritories Inc. + Hassan Ashok + csti&erols.com +2595 + WebTV Networks, Inc. + Jeff Allen + jra&corp.webtv.net +2596 + Credit Management Solutions, Inc. + Gregory Wright + greg_wright&cmsinc.com +2597 + Chisholm Technologies Inc. + Steve Bergeron + stevieb&chistech.com +2598 + WonderNet International Corp + Raymond Lee + raymond&wonder.net.tw +2599 + Percpetics Corporation + Robb Eads + rweads&usit.net +2600 + Distributed Systems Logic, Inc. + Robb Eads + rweads&usit.net +2601 + US West !nterprise Networking Services + Peter Schow + pschow&advtech.uswest.com +2602 + Intrasoft Corporation + Gene Dragotta + dragotta&bit-net.com +2603 + Allot Communications + Rich Waterman + rich&allot.com +2604 + Sophos Plc + Richard Baldry + rjb&sophos.com +2605 + TaylorMade-Math + Alice Ward + carloloc&ix.netcom.com +2606 + Rittal-Werk Rudolf Loh GmbH & Co.KG + . Strackbein + info&rittal.de +2607 + LAN International, Inc. + Hans Karlsson + hans.karlsson&lanint.com +2608 + Precise Software Solutions + Elias Yoni + yoni&precisesoft.co.il +2609 + New Prime Inc. + Jim Peterson + JPETER01&mail.orion.org +2610 + DataHaven Project, Inc. + Sean Goller + wipeout&dhp.com +2611 + Interspeed + Skip Carlson + skip&interspeed.com +2612 + MPI Tech a/s (formerly 'i-data international a-s') + Paul Gerelle + info&mpitech.com +2613 + Accelerated Networks, Inc. + Sean Finlay + sean&acceleratednetworks.com +2614 + Forschungszentrum Karlsruhe GmbH + Torsten Neck + neck&iai.fzk.de +2615 + ixMicro + Jianxin (Joe) Li + joe.li&ixmicro.com +2616 + CAO Diffusion + Khalil Rabehi-Isard + gep&caodiff.com +2617 + Computer Communications Consulting, Inc. + Anil Parthasarathy + pap&orca.overthe.net +2618 + Tracewell Systems, Inc. + David Keene + davidkeene&compuserve.com +2619 + Advanced Internet Management, Inc. + Deb Dutta Ganguly + ddg&iname.com +2620 + Check Point Software Technologies Ltd + Gonen Fink + gonen&CheckPoint.com +2621 + Martin Zwernemann + Martin Zwernemann + martin&zwernemann.de +2622 + Amarex Technology, Inc. + Gordon Flayter + gordon&amarex.com +2623 + ASUSTek Computer Inc. + James Hsu + james_hsu&asus.com.tw +2624 + Wave Wireless Networking + Philip Decker + iana&wavewireless.com +2625 + FCI Telecommunications Corporation + Eden Akhavi + eden.akhavi&fci.co.uk +2626 + Entuity Limited (formerly 'Prosum Ltd') + Lee Walker + iana&entuity.com +2627 + TCAM Systems (UK) Ltd + Fabrice Franceschi + Fabrice_Franceschi&stratus.com +2628 + Natural MicroSystems + Edwin Jacques + epj&nmss.com +2629 + City of Wauwatosa + Cheryl Chaney + chaneyc&dreamland.chaney.net +2630 + The Esys Corporation + Mark Miller + Mark.Miller&esys.ca +2631 + Altvater Airdata Systems GmbH Peter Haaf + peter.haaf&altvater.com + ---none--- +2632 + PT Wiryamas Sinar Palapa + fatahuddin djauzak + kanyet&pekanbaru.indo.net.id +2633 + Compucentre + Jeff Williams + jwilliams&cti.ca +2634 + Western Telematic, Inc. + Anthony Barrera + anthonyb&wti.com +2635 + ADTX + Chris Jan Cortes + chrisjan&adtex.com.ph +2636 + Juniper Networks, Inc. + Jeff Cheng + jc&juniper.net +2637 + Aptis Communications, Inc. + Dave McCool + dave&aptis.com +2638 + Bstpromark + Bill Boscarino + BBOSCARI&bproelm.mhs.compuserve.com +2639 + EdgePoint Networks, Inc. + Jacob Hsu + Jacob&edgepoint.com +2640 + AIMetrix Incorporated + David Tanel + dtanel&aimetrix.com +2641 + Arctunn Consulting + Brad Horner + arctunn&hotmail.com +2642 + Computel Electronica S.A.Jose + Barbetta + barbetta¢roin.com.br +2643 + FlowWise Networks Inc. + Chi Chong + cchong&flowwise.com +2644 + Synaptyx Corporation + Gregory Smith + gregsmith&synaptyx.com +2645 + First Union National Bank + Sia Yiu + sia.yiu&capmark.funb.com +2646 + Kommunikator GmbH + Simon Ney + Simon.Ney&kommunikator.de +2647 + C2S (Communication Systeme Service) + Eric Juin + 100434.140&compuserve.com +2648 + Siligom + Juan Jose Portela Zardetto + juanjo&siligom.com +2649 + Radcom Ltd. + Yoav Banin + yoav&radcom.co.il +2650 + Go Ahead Software, Inc. + Peter Gravestock + peter&goahead.com +2651 + Space Connection NV + Danny De Roover + danny&space-connection.be +2652 + Merck-Medco Managed Care LLC + Adam Lichtenstein + adam_lichtenstein&merck.com +2653 + City Com BV + Robert Doetsch + rdo&citycom.nl +2654 + R&S BICK Mobilfunk GmbH Andreas + Helmer + Andreas_Helmer&rsbick.de +2655 + Kepler Software, Ltd. + Raz Gordon + Raz&KeplerSoft.com +2656 + Banque Paribas + Jeremy Hodsman + jeremy_hodsman&paribas.com +2657 + Zitech Net + Ebbe Petersen/Bo Quist Besser + Swebservice&zitech.dk +2658 + Century Analysis Inc. + Jeff Wygal + jeffw&cainc.com +2659 + Talent Development GmbH Hans-Georg + Daun + hansgdaun&talentdev.ch +2660 + CopperCom Inc. + Bruno Sartirana + bruno&coppercom.com +2661 + Yutaka Electric Mfg. Co. Ltd. + Masao Tanabe + mstanabe&ppp.bekkoame.or.jp +2662 + SBF-Bourse De Paris + Yves Rouchou + Yves.Rouchou&bourseparis.com +2663 + Economatica + Gustavo Exel + gustavo&economatica.com.br +2664 + GVN Technologies + Karl Schlenther + karl_schlenther&gvntech.com +2665 + Olsy UK + Able Frenandes + abel.fernandes&olsy.co.uk +2666 + Room 42 Software, LLC + Linda Fisher + linda&room42.com +2667 + Cirilium + Terry Gin + terry_gin&cirilium.com +2668 + Tavve Software Co. + Anthony Edwards + tavve&mindspring.com +2669 + Solari di Udine + A. Candussio + fids.tech&solariud.it +2670 + NetVenture, Inc. + Jeffrey Cleary + registrar&easy-registry.com +2671 + Connected Systems Group + Robert J. DuWors + rjd&csgroup.com +2672 + Corporate Software & Technologies, INT, Inc. + Kent Tse + oid-admin&cst.ca +2673 + Fibex Systems + James Song + jsong&fibex.com +2674 + Claude Jardine Design + P.A. Wessels + cjardine&iafrica.com +2675 + Net Marketing, Inc. + Rankin Johnson + rankin&radiks.net +2676 + IBP, Inc. + Jim Weier + jweier&ibpinc.com +2677 + RD6 Inc. + Michel Hetu + mhetu&rd6.ca +2678 + MassMedia Communications Inc. + Ken Wa + kenward&erols.com +2679 + Nexans Suisse SA. + Niksa Vucinic + niksa.vucinic&nexans.com +2680 + Peak Audio, Inc. + Kevin Gross + Keving&peakaudio.com +2681 + Sia Yiu + Sia Yiu + sia.yiu&funb.com +2682 + DPS Inc. + Marshall DenHarting + sales&dpstele.com +2683 + Callisto Software + Bob Daley + RDaley&callisto.com +2684 + ViaVideo Communications, Inc. + Earl Manning + emanning&viavideo.com +2685 + Sequel Technology Corporation + Alan Chedalawada + achedalawada&sequeltech.com +2686 + Wi-LAN Inc. + Rashed Haydar + rashedh&wi-lan.com +2687 + Network System Technologies, Inc. + Paul Petronelli + plp&palmcorp.com +2688 + Center Technology + Mark Lo Chiano + MarkLoChiano¢ertechnology.com +2689 + Coby Roberts + Coby Roberts + CROBERTS&geometric.com +2690 + Netronix Inc. + Lee Chang + lee&aten.com +2691 + Network Computer, Incorporated + Doug McBride + dougm&nc.com +2692 + WebWeaving + Dirk-Willem van Gulik + dirkx&webweaving.org +2693 + Institut Jozef Stefan + Borka Jerman-Blazic + borka&e5.ijs.si +2694 + Eldat Communication Ltd. + Oren Rosenfeld + orenr&eldat.com +2695 + MetaCommunications. Inc. + Branislav Meandziija + bran&metacomm.com +2696 + Digital Video Broadcasting (DVB) + Peter MacAvock + MacAvock&dvb.org +2697 + Bayly Communications Inc. + Graham Clarkson + engineering&bayly.com +2698 + Poznan Supercomputing and Networking Center - POZMAN + Pawel Wolniewicz + pawelw&man.poznan.pl +2699 + Printer Working Group + Jeffrey Schnitzer + admin&pwg.org +2700 + DIRECTV + Andrew Piecka + apiecka&directv.com +2701 + Argon Networks Inc. + Ken Chapman + kchapman&argon-net.com +2702 + WACOS Inc. + Spero Koulouras + spero&utstar.com +2703 + Object Zone AB + Paul Panotzki + paul&objectzone.se +2704 + Unassigned + Returned 2017-07-12 + ---none--- +2705 + SECUDE IT Security GmbH + Stephan André + stephan.andre&secude.com +2706 + Institute for Applied Information Processing and Communications,Graz University of Technology + Peter Lipp + plipp&iaik.tu-graz.ac.at +2707 + International Network Services + Steve Waldbusser + stevew&INS.COM +2708 + JNR Systems + Rick Geesen + rgg&jnrsystems.com +2709 + Congreve Computing Ltd. + Malcolm Sparks + malcolm&congreve.com +2710 + Northrop Grumman - Surveillance and Battle Management Systems + Doug White + whitedo2&mail.northgrum.com +2711 + Littlewoods Stores, Ltd. + Mike Ryan + mike.ryan&littlewoods-stores.co.uk +2712 + ICE-TEL TLCA + Keld Knudsen + Keld.Knudsen&uni-c.dk +2713 + Mauswerks, Inc. + Brian Topping + topping&mauswerks.com +2714 + Dep. of Signal Theory and Communications - UPC + Tibold Balogh + tibold&hpgrc3.upc.es +2715 + Zapex Technologiesn Inc. + Glenn Arbitaylo + glenn&zapex.com +2716 + Glueck & Kanja Technology AG + Christoph Fausak + cfausak&glueckkanja.de +2717 + Alcatel Telspace + Didier Jorand + jorand&telspace.alcatel.fr +2718 + Intercall + Alain Chateau + interca1&club-internet.fr +2719 + Townsend Analytics Ltd. + Ryan Pierce + rpierce&taltrade.com +2720 + NorCom Informationstechnologie und Unternehmensberatung GmbH + Ulf Lindquist + lindquist&norcom.de +2721 + News Internet Services + Stuart Garner + stuartmg&newscorp.com +2722 + Georgia Tech Research Institute + Information Systems Department + iana.oid.mgmt>ri.gatech.edu +2723 + Guerrilla Mail, Inc. + Michael Cohen + gmail&concentric.net +2724 + Atmosera, Inc. + Scott Harvey + iana-admins&atmosera.com +2725 + Art Technology Group, Inc. + Fumi Matsumoto + fm&atg.com +2726 + Capital One Financial Corp. + William Franklin + will.franklin&capitalone.com +2727 + SFA, Inc. + Janes Amos + jamos&sfa.com +2728 + Packard Bell NEC, Inc. + Juh-Horng Lu + j.lu&neccsd.com +2729 + Empire Net + Chris Cappuccio + chrisc&empnet.com +2730 + Ottosen + Steen Ottosen + steen&ottosen.com +2731 + Dialogdesign + Sven Nielsen + sn&dialogdesign.com +2732 + Innovative Data Technology + S. Nichols + nichols&idtalston.com +2733 + Group 2000 Nederland b.v. + A. Bonetta + andreab&group.nl +2734 + Digital Lightwave, Inc. + Xavier Lujan + xlujan&lightwave.com +2735 + MIBS-R-US + Steven Johnson + steven_c_johnson&hp.com +2736 + EtherWAN Systems, Inc. + Mitch Yang + mitchðerwan.com +2737 + Cordless Technology A/S + Adreas Szameit + andreas.szameit&detewe.de +2738 + Punjab Communications Ltd.(PunCom) + Rajeev Mohal + isd&puncom.com +2739 + Tanstaafl! Consulting + Henning Schmiedehausen + hps&tanstaafl.de +2740 + Artevea + James Beattie + james.beattie&artevea.com +2741 + Calirnet Systems, Inc. + David Chen + dchen&clarinetsys.com +2742 + Manage.com + Ashwani Dhawan + ashwani&manage.com +2743 + RFL Electronics, Inc. + William Higinbotham + RFLENG&nac.net +2744 + Sarnoff Real Time Corporation + James Armstrong + jba&srtc.com +2745 + LANCAST, Inc. + Mark Webber + Webber&LANCAST.net +2746 + Martin Communications + Andrew Martin + andrew&martin.com.au +2747 + Dirig Software, Inc. + Paul J. LaFrance + pjl&dirig.com +2748 + ICL Retail Systems Europe + Neil Roberts + nroberts&iclretail.icl.com +2749 + Aptia, Inc. + Wade Ju + wju&aptia.com +2750 + Vecima Networks Inc. (formerly 'WaveCom Electronics Inc.') + Laird Froese + external.registrations&vecima.com +2751 + Globalcast Communications Inc. + Naveen Rastogi + naveen&gcast.com +2752 + McComm International bv + Hans van de Pol + hpol&mccomm.nl +2753 + ARGO Data Resource Corporation Thomas + Koncz + tkoncz&argodata.com +2754 + Excel Switching Corporation + Norman St. Pierre + nstpierre&excelswitching.com +2755 + Palomar Communications, Inc. + Jason Rhubottom + jrhubottom&ptc.palpro.com +2756 + NetStart, Inc. + George Hill + george&netstartinc.com +2757 + SmartCommerce Solutions + Scott Guthery + sguthery&tiac.net +2758 + Universal Micro Applications, Inc. + J.H. Estes + joele&uma.com +2759 + SNS Consultants + Jannie van Zyl + jannie&snscon.co.za +2760 + Enhanced Messaging System, Inc. + John Jackson + jjackson&emsg.com +2761 + Informatica S.p.A. + Carlo Tordella + carlo.tordella&informatica-spa.it +2762 + Netgame Ltd. + Lifshitz Yossi + yossi&ngweb.netgame.co.il +2763 + IntelliNet Technologies, Inc. + Joe Pakrasi + j.pakrasi&intellinet-tech.com +2764 + Acxiom Corporation + Keith Gregory + kgrego&acxiom.com +2765 + Dafur GmbH + Thomas Spitzer + TS_Dafuer&classic.msn.com +2766 + Platform Computing Corporation + Khalid Ahmed + ahmedk&platform.com +2767 + Automotive Products plc + Chris Hill + hillcf&apgroup.co.uk +2768 + RandD Computer Services Razvan + Dumitrescu + randd&rogers.wave.ca +2769 + Knuerr AG + Christian Keil + knuerr.de&t-online.de +2770 + Eurotel Praha s.r.o. + Martin Zampach + martin_zampach&eurotel.cz +2771 + Inlab Software GmbH + Thomas Obermair + obermair&acm.org +2772 + Intersolve Technologies + Andre Moen + amoen&euronet.nl +2773 + Redstone Communications, Inc. + Jason Perreault + jperreault&redstonecom.com +2774 + Algorithmic Research Ltd. + Yoram Mizrachi + yoram&arx.com +2775 + AGT International, Inc. + John Cachat + jcachat&agti.com +2776 + Fourthtrack Systems + Bob Barrett + market&fourthtrack.com +2777 + Flextel S.p.a. + Francesca Bisson + fbisson&flextel.it +2778 + WarpSpeed Computers + Chris Graham + chrisg&warpspeed.com.au +2779 + 21C3 + Chris Graham + chrisg&21c3.com.au +2780 + Neo Networks Inc. + Hemant Trivedi + hemant&neonetworks.com +2781 + Technical University of Madrid (UPM) + David Larrabeiti + dlarra&dit.upm.es +2782 + BOM Computer Services Ltd. + Bartley O'Malley + bartley&bom.uk.com +2783 + Control Systems International + Kenny Garrison + kennyg1&airmail.net +2784 + bbcom Broadband Communications GmbH & Co. KG + Gerald Schreiber + g.schreiber&bbcom-hh.de +2785 + Tecnopro SA + Hugo A. Pagola + hpagola&tecnopro.com.ar +2786 + Politecnico di Torino + Antonio Lioy + lioy&polito.it +2787 + ING Group + F. Roos + Fred.m.Roos&ing.com +2788 + Wytec Incorporated Dave + Downey + ddowney&wytecinternational.com +2789 + Mauro Enterprise + Douglas Mauro + doug&mauro.com +2790 + RoadRunner + Douglas Mauro + opsmail1&nycap.rr.com +2791 + Deterministic Networks, Inc. + Daljit Singh + daljit&juno.com +2792 + Sprint PCS + Robert Rowland + rrowla01&sprintspectrum.com +2793 + Interactive Intelligence + Jeff Swartz + JeffS&inter-intelli.com +2794 + JAYCOR + Jonathan Anspach + janspach&lsf.kirtland.af.mil +2795 + Edify Corporation + Daniel Yeung + daniely&edify.com +2796 + Fox IT Ltd + Ed Moon + Ed.Moon&foxit.net +2797 + University of Pennsylvania + John O'Brien + obrienjw&upenn.edu +2798 + Metawave Communications Corp. + Alex Bobotek + alexb&metawave.com +2799 + Enterprise Solutions Ltd + Neil Cook + neil.cook&esltd.com +2800 + CBL GmbH + Stefan Kirsch + info&cbl.de +2801 + ADP Dealer Services + Phil Parker + Phil.Parker&ds.adp.dk +2802 + EFKON + Peter Gruber + efkon.electronics&styria.com +2803 + SICAN GmbH + Frank Christ + christ&sican.de +2804 + KeyTrend Inc. + Tim Chang + tim&keytrend.com.tw +2805 + ACC TelEnterprises + James FitzGibbon + james&ican.net +2806 + EBA + Patrick Kara + Patrick&EBA.NET +2807 + Teleware Co., Ltd. + Mangeun Ryu + mryu&tware.co.kr +2808 + eFusion, Inc. + Kevin Brinkley + kevin_brinkley&efusion.com +2809 + Participants Trust Company + Neil Hixon + 74223.1750&compuserve.com +2810 + PeopleSoft, Inc. + Doyle White + dwhite&peoplesoft.com +2811 + Entrata Communication + Kuogee Hsieh + khsieh&entrata.com +2812 + Musics.com + Terry James + NetAdmin&Musics.com +2813 + First Telecom plc. + Linda Jackson + yy81&dial.pipex.com +2814 + Telesnap GmbH + Thomas Januschke + tjanuschke&Telesnap.de +2815 + Newpoint Technologies, Inc. + Gary Barrett + Gary.Barrett&kratosdefense.com +2816 + T&E + Tom Jenkins + jenkintl&jnpcs.com +2817 + Disney Regional Entertainment, Inc. + Bill Redmann + bill&wdi.disney.com +2818 + Ramp Networks, Inc. + Sri Bathina + sri&rampnet.com +2819 + Open Software Associates + Adam Frey + adam&osa.com.au +2820 + Procom Technology + James Leonard + jleonard&procom.com +2821 + University of Notre Dame (Office of Information Technology) + Ray Storer + rstorer&nd.edu +2822 + Arquitectura Animada + Juan Cieri + cieri&cvtci.com.ar +2823 + Sumbha Holograms & Packaging Systems Ltd. + Pratap Bhama + stumbha&hotmail.com +2824 + The A Consulting Team, Inc. + Jeffrey Singer + jsinger&tact.com +2825 + WorldGate Communications, Inc. + Bruce Bauman + bbauman&wgate.com +2826 + TOA Electronics Ltd. + Hidenobu Kawakami + hkawakami&toadp.co.jp +2827 + Sytex Systems Corp. + Vincent J. DiPippo + vdipippo&sytexcorp.com +2828 + Zell Distributors + Michael Zelkowski + bldr&ts014d02.det-mi.concentric.net +2829 + YRless Internet Corporation + Jody Mehring + jody&yrless.com +2830 + HALO Technologies + Brian Boyd + brian&halocorp.com +2831 + Beijing Univ. of Posts & Telecom., Training Center + Huang Leijun + zhuxn&bupt.edu.cn +2832 + Virtual Data Systems, Inc. + Virgil Mocanu + virgil&virtualdatasystems.com +2833 + NetDox, Inc. + Elliott Krieter + ekrieter&netdox.com +2834 + Expert Computer Service Jim Mac + Farlane + webmaster&207.34.83.130 +2835 + Dictaphone + Bob Kiraly + bkir&dictaphone.com +2836 + Unex Technology Corporation + Shih-Chun Wei + edwei&ms11.hinet.net +2837 + Global Mobility Systems, Inc. + Peter Hartmaier + PeterH&gmswireless.com +2838 + TFM Associates + Stephen Diercouff + sgd&tfm.com +2839 + Teleran Technologies, L.P. Carmen Randazzo + crandazzo&teleran.com + ---none--- +2840 + Digital Telecommunications, Inc. + Song Liu + songliu&dxc.com +2841 + KB Internet Ltd. + Paul Kalish + root&shell.kindbud.org +2842 + Agri Datalog + Frank Neulichedl + frank&websmile.com +2843 + Braid Systems Limited + James Allen + james.allen&braid.co.uk +2844 + Newsnet ITN + Nigel Watson + nigel&newsnet.com.au +2845 + JTCS + Joe Tomkowitz + joet&jtcs.net +2846 + KEYCORP Pty. Ltd. + Peter Achelles + pachelles&keycorp.com.au +2847 + GTE Internetworking + Bill Funk + bfunk&bbn.com +2848 + Royalblue Technologies plc Trevor Goff + Trevor.Goff&royalblue.com + ---none--- +2849 + U&R Consultores Argentina + Marcelo Utard + mutard&uyr.com.ar +2850 + Tevycom Fapeco S.A. + Marcelo Utard + mutard&uyr.com.ar +2851 + Polaris Communications + Debra Hollenback + deb&polariscomm.com +2852 + Competitive Automation, Inc. + Berry Kercheval + berry&join.com +2853 + IDEXX Laboratories, Inc. + George Rusak + winsystems&idexx.com +2854 + Network Computing Technologies, Inc. + Martin Cooley + martinc&ncomtech.com +2855 + Axxcelera Broadband Wireless + Tony Masters + tmasters&axxcelera.com +2856 + Cableware Electronics + Lee Dusbabek + cableware&aol.com +2857 + Network Power and Light + Douglas Hart + hart&npal.com +2858 + Clarent Corporation + Chris Brazdziunas + crb&verso.com +2859 + Kingston - SCL + Alan Fair + alan.fair&kscl.com +2860 + netVest + David Deist + dmdeist&netvest.com +2861 + VSN systemen BV + Martien Poels + info&OpenTSP.com +2862 + Northwest Consulting Services Randy + Scheets + randy&freerange.com +2863 + Thomson Inc. + David Jeffries + JeffriesDa&tce.com +2864 + Digitel S/A Industria Eletronica + Andre Baggio + baggio&digitel.com.br +2865 + Nortel Networks - Optical Metro + Tao Liu + traceyli&nortelnetworks.com +2866 + Technical Insights + Brett Grothoff + Brettg&erols.com +2867 + NKF Electronics + N. Wielage/P. de Konick + nick.wielage&dlf1.nkf.nl +2868 + Glasshouse Business Networks B.V. + J.K. Jongkind + j.k.jongkind&glasshouse.nl +2869 + VSI Enterprises + Michael L. Miller + mike.miller&vsin.com +2870 + E-TECH, Inc. + Craig Chen + craig_chen&e-tech.com.tw +2871 + UltraDNS + Steve De Jong + steve.dejong&neustar.com +2872 + Unisource Business Networks Nederland bv + Frank de Lange + frank.de.lange&inet.unisource.nl +2873 + AGENTics + Amir Kolsky + amir&agentics.com +2874 + OTC Telecom Inc. + Matthew Wang + mwang&ezylink.com +2875 + G.U.I.Dev. International Inc. + Lee Sutton + lsutton&guidev.com +2876 + Cothern Computer Systems + Dominic Tynes + dominict&ccslink.com +2877 + Arbinet Communications Inc. + Ryan Douglas + rdouglas&arbinet.com +2878 + FaxForward Canada, Ltd. + Yong Shao + yshao&cls.passport.ca +2879 + Sonus Networks, Inc. + Information Security + infosec&sonusnet.com +2880 + IPHighway Ltd. + Rina Nathaniel + rina&IPHighway.com +2881 + Clarion + Koichi Kato + kato&tlab.clarion.co.jp +2882 + Pepperl+Fuchs Comtrol + Kambiz Taghizadeh + ktaghizadeh&us.pepperl-fuchs.com +2883 + Coherent Communications Systems Inc + Chris Roller + croller&coherent.com +2884 + ADS Networks + K.Srinivas Aithal + adsvij&giasbg01.vsnl.net.in +2885 + Chicago-Soft Ltd. + Richard Hammond + hammond&chicago-soft.com +2886 + Netbalance + John Mullins + john_mullins&bigfoot.com +2887 + AS Proekspert + Andrus Suitsu + andrus&proexpert.ee +2888 + Adfm Multimedia Internet + Didier Mermet + adfm&adfm.com +2889 + Praxis International Inc. + Rick Goodman + rick_goodman&praxisint.com +2890 + Solectek Corporation + Larry Butler + lbutler&solectek.com +2891 + NanoSpace, Inc. + Peter Kaminski + kaminski&nanospace.com +2892 + KAPS, Inc. + Adhish Desai + adesai&cyberenet.net +2893 + Computer Associates, Italy + Mauro Ieva + ievma01&cai.com +2894 + Mainsail Networks, Inc. + Yuri Sharonin + yuri.sharonin&mainsailnet.com +2895 + EDS, SSMC-Tools Support and Development + Michael Stollery + michael.stollery&eds.com +2896 + Breece Hill Technologies Inc. + Timothy Sesow + TSesow&BreeceHill.com +2897 + AT&T Capital Corp Ernest + Shawn Cooney + shawn_cooney&attcapital.com +2898 + HighGround Systems, Inc Thomas Bakerman + tbakerman&highground.com + ---none--- +2899 + Omnia Communications, Inc. + Richard Dietz + rdietz&omnia.com +2900 + Mer Telemanagement Solutions + Eran Shapiro + erans&tabs.co.il +2901 + Replicase, Inc. + Cadir Lee + cadir&replicase.com +2902 + Microlog Corporation + Ly Peang-Meth + lyp&mlog.com +2903 + Smartways Technology Limited + Tracey Hill + tracey.hill&smartways.net +2904 + Computer Services + Gene Wills + gene&snet.net +2905 + Trumpet Software International Pty Ltd + Peter Tattam + peter&trumpet.com +2906 + Rsi Solutions Ltd. + Barrie Lynch + bl&rsi.co.uk +2907 + C-Cor Electronics + Dovel Myers + dtm&c-cor.com +2908 + Castle Networks, Inc. + Technical Support + support&CastleNetworks.com +2909 + Nexabit Networks, LLC + James J. Halpin + jhalpin&nexabit.com +2910 + General Electric Company + Internet Registrations + domain.admin&ge.com +2911 + Objective Software Services, Inc. + Ron Morasse + morasse&ossinc.com +2912 + Ameristar Technologies Corp. + Richard Neill + rich&ameristar.com +2913 + Hycor Biomedical, Inc. + Trevor Hammonds + trevor&royal.net +2914 + Fellesdata AS + Knut Frang/Alf Witzoe + knut.frang&fellesdata.no +2915 + Network Engines, Inc. + Manager + SNMP.Manager&NetworkEngines.Com +2916 + Daimler-Benz AG + Joachim Schlette + joachim.schlette&str.daimler-benz.com +2917 + Data Interface Systems Corp + Diego Alfarache + diego&di3270.com +2918 + Symmetry Communications Systems, Inc. + Yun-Chian Cheng + yunchianc&symmetrycomm.com +2919 + Rambus Corp. + Nancy Saputra + nsaputra&RAMBUS.COM +2920 + Will-Do Information Services + William Daugherty + william&will-do.com +2921 + Swiss Pharma Contract Ltd. + Derek Brandt + dbrandt&pharmacontract.ch +2922 + I-O Corporation + Gary Meacham + gary.meacham&iocorp.com +2923 + Formula Consultants Inc. + Fritz Lorenz + florenz&formula.com +2924 + Star TV (Satellite Television Asia Region Ltd) + Andrew Lamond + andrewl&startv.com +2925 + Cyclades Corporation + Marcio Saito + marcio&cyclades.com +2926 + Sonoma Systems, Inc. + Chirs Hawkinson + chrish&sonoma-systems.com +2927 + Jacksonville Electric Authority + Robert Raesemann + raesrc&jea.com +2928 + Net Insight AB + Martin Carlsson + martin.carlsson&netinsight.se +2929 + Quallaby + Stephane Combemorel + stephane&quallaby.com +2930 + ValiCert, Inc. + Ambarish Malpani + ambarish&valicert.com +2931 + GADC Networks + David Bird + DavidB&gadc.co.uk +2932 + TERMA Elektronik AS + Ole Rieck Sorensen + ors&terma.com +2933 + Floware System Solutions Ltd. + Amir Freund + amir&floware.com +2934 + Citicorp + Russell Jansch + russell.jansch&citicorp.com +2935 + Quantum Corp. (formerly 'Pathlight Technology Inc.') + Carsten Prigge + carsten.prigge&quantum.com +2936 + Prominence Dot Com, Inc. + Jim McNee + jim&prominence.com +2937 + Deutsche Telekom AG + Peter Krupp + Peter.Krupp&telekom.de +2938 + Proginet Corporation + Thomas Bauer + tom&proginet.com +2939 + InfoExpress, Inc. + Stacey Lum + lum&infoexpress.com +2940 + Argent Software + Jeff Lecko + aqm_jso&argent-nt.com +2941 + ReadyCom Inc. + Matt Posner + mposner&readycom.com +2942 + COCOM A/S + Susanne Osted + sos&cocom.dk +2943 + ObjTech Software + Mario Leboute + leboute&pro.via-rs.com.br +2944 + Top Layer Networks, Inc. + Michael Paquette + paquette&toplayer.com +2945 + TdSoft Communications Software Ltd. + Konstantin Rubinsky + kosta&tdsoft.com +2946 + SWITCH + Simon Leinen + noc&switch.ch +2947 + Best Power - A Division of General Signal Power Systems + Brian Young + brian.young&bestpower.gensig.com +2948 + TeleSuite Corporation + Gary Thompson + jgaryt&sprintmail.com +2949 + Global Quest Consaltance + Sirish + challagullas&usa.net +2950 + Ampersand, Inc. + Mark Atwood + m.atwood&ersand.com +2951 + Nentec + Klaus Becker + becker&nentec.de +2952 + T&E Soft + Stefan Finzel + Stefan.G.R.Finzel&T-Online.de +2953 + Imedia + Max Shkud + snmp&imedia.com +2954 + Universitaet Bielefeld, Technische Fakultaet + Sascha Frey, TechFak Support + support&TechFak.Uni-Bielefeld.DE +2955 + PSINet UK Ltd. + Ben Rogers + bdr&uk.psi.com +2956 + InfoLibria, Inc. + David Yates + dyates&infolibria.com +2957 + Ericsson Communications Ltd. + Mark Henson + mark.henson&ericsson.com +2958 + Secure Network Solutions Ltd.Brendan Simon + & Geoff McGowan + bsimon&randata.com.au +2959 + Workstation Solutions, Inc. + James Ward + jimw&worksta.com +2960 + National Landscape Assn. Inc. + James Koester + jamesk11&prodigy.nrt +2961 + DIALOGS Software GmbH + Ralf Schlueter + schlueter&dialogs.de +2962 + Netwise AB + Lennart Bauer + lennart.bauer&netwise.se +2963 + Security Dynamics Technology, Inc. + Andrew Nash + anash&securitydynamics.com +2964 + Zeta Communications Ltd. + John Vrabel + john&zetacom.demon.co.uk +2965 + Fujikura Solutions Ltd. (formerly 'Syscom Ltd.') + Tomoyuki Ide + tomoyuki.ide&jp.fujikura.com +2966 + Digital ChoreoGraphics + DV Black + dcg&softcafe.net +2967 + CableData Inc. + Ragan Wilkinson + ragan_wilkinson&cabledata.com +2968 + Allen Telecom Systems + Seeta Hariharan + harihas&ats-forest.com +2969 + Charles Craft + Charles Craft + chucksea&mindspring.com +2970 + Sunstone Enterprises + J. Marc Stonebraker + sunstone&mci2000.com +2971 + Corecess Inc. + Son Yeong Bong + ybson&medialincs.co.kr +2972 + Network Alchemy, Inc. + L. Stuart Vance + vance&network-alchemy.com +2973 + Integral Access, Inc. + Jeff Wake + jeffwake&integralaccess.com +2974 + IP Metrics Software, Inc. + Brett Dolecheck + Dolecheck&IPMetrics.com +2975 + Notarius T.S.I.N. Inc. + Suzanne Thibodeau + thibodsu¬arius.com +2976 + Sphairon Technologies GmbH (formerly 'Philips Multimedia Network Systems GmbH') + Michael Sladek + michael.sladek&sphairon.com +2977 + Teubner and Associates, Inc. + K. Legako + kathy&teubner.com +2978 + ImageCom Ltd. + Paul Carter + paul&imagecom.co.uk +2979 + Waverider Communications Inc. + Paul Wilson + pwilson&waverider.com +2980 + ENT - Empresa Nacional de Telecomunicacoes, S.A. + Miguel Gentil Gomes + Miguel.Gomes&ent.efacec.pt +2981 + Duke Energy + Paul Edmunds + pedmunds&duke-energy.com +2982 + Deutsches Patentamt + Oswald Rausch + dpanet&deutsches-patentamt.de +2983 + SEMA Group GmbH, TS-V + Dirk Schmalenbach + dirk&sema.de +2984 + Keycode Style Ltd. + Andrew MacLean + andy&keycode.demon.co.uk +2985 + Bay Systems Consulting, Inc. + Asim Mughal + asim&baysyst.com +2986 + Qiangjin Corp.Ltd. + Wenyu + wenyu&gicom2000.com +2987 + IPivot + Cary Jardin + cjardin&servnow.com +2988 + Consultronics Development Ltd. + Zolton Varga + zvarga&gw.cdk.bme.hu +2989 + University of North London + Matthew Mower + m.mower&unl.ac.uk +2990 + Illuminata, Inc. + Jonathan Eunice + jonathan&illuminata.com +2991 + Enterprise IT, Inc. + Peter Nadasi + peter.nadasi&frends.com +2992 + CyberTel, Inc.Jonathan Chu + jcchu&mail.monmouth.com + poget&mail.ntis.com.tw +2993 + ConvergeNet Technologies, Inc. + Rick Lagueux + raljr&ix.netcom.com +2994 + Teligent + Conny Larsson + conny&teligent.se +2995 + AcuComm, Inc. + Li Song + li.song&acucomm.com +2996 + SpectraWorks Inc. + Shamit Bal + shamit&spectraworks.com +2997 + RedTitan + Peter Henry + pete&redtitan.com +2998 + Wire-Tap, Inc. + Thomas Anderson + iana-pen&wire-tap.net +2999 + American Family Insurance + Dan Tadysak + dtadysak&amfam.com +3000 + IDB Systems, a Division of WorldCom Inc. + John Isaacson + John.Isaacson&IDBSYSTEMS.COM +3001 + BAILO + Mamadou TRAORE + bailo&africaonline.co.ci +3002 + ADAXIS Group + Thomas Dale Kerby + adaxis&juno.com +3003 + Packet Engines Inc. + J.J. DeBarros + jjd&packetengines.com +3004 + Softwire Corporation + Dan Sifter + dan.sifter&softwire.com +3005 + TDS (Telecoms Data Systems) + Jean-Louis Guibourg + tds&tds.fr +3006 + HCI Technologies + Diwakar H. Prabhakar + diwakar&mnsinc.com +3007 + TOPCALL International + Frans Bouwmeester + bm&topcall.co.at +3008 + LogMatrix Inc (formerly 'Open Service') + Chris Boldiston + techsupport&logmatrix.com +3009 + SYNCLAYER Inc. + Yuichi Hasegawa + software&synclayer.co.jp +3010 + university of aizu + Yoshinari Sato + sato&ccss1051.u-aizu.ac.jp +3011 + VideoServer, Inc. + George W. Kajos + gkajos&videoserver.com +3012 + Space & Telecommunications Systems Pte. Ltd. + Steven Alexander + ssacsd&rad.net.id +3013 + Bicol Infonet System,Inc. + Riad B. Hammad + rbh&202.167.21.68 +3014 + MediaSoft Telecom + Barry Turner + bturner&mediasoft.ca +3015 + Netpro Computing, Inc. + Corbin Glowacki + corbing&netpro.com +3016 + OzEmail Pty Ltd + David Woolley + david.woolley&team.ozemail.com.au +3017 + Arcxel Technologies, Inc. + Stuart Berman + sberman&arcxel.com +3018 + EnterNet Corporation + Radhakrishnan R. Nair + Nair1&aol.com +3019 + Jones Waldo Holbrook McDonough + Forrest Tessen + jlr&jonesWaldo.com +3020 + University Access + Tyler Clark + tyler&afn.org +3021 + Sendit AB + John Wu + john&sendit.se +3022 + Telecom Sciences Corporation Limited + William Berrie + wberrie&telsci.co.uk +3023 + Quality Quorm, Inc. + Aleksey Romanov + qqi&world.std.com +3024 + Grapevine Systems Inc + Rick Stein + rstein&grapevinesystems.com +3025 + The Panda Project, Inc. + Gary E. Miller + gem&pandaproject.com +3026 + Mission Control Development + David L. Barker + dave&missionctl.com +3027 + IONA Technologies Ltd + Chris McCauley + cmcauley&iona.com +3028 + Dialogic Corporation + Richard Hubbard + hubbardr&dialogic.com +3029 + Digital Data Security + Peter Gutmann + pgut001&cs.auckland.ac.nz +3030 + ISCNI + Dennis Oszuscik + denniso&cninews.com +3031 + dao Consulting, LLC + Number Resources + numbers&daoConsulting.com +3032 + Beaufort Memorial Hospital + Mary Crouch + mjc&hargray.com +3033 + Informationstechnik Dr. Hansmeyer + Jochen Hansmeyer + cjh&krypton.de +3034 + URMET SUD s.p.a. + Claudio Locuratolo + MC8858&MCLINK.IT +3035 + Avesta Technologies Inc + Johnson Lu + jclu&avestatech.com +3036 + Hyundai Electronics America + Tom Tofigh + ttofigh&va.hea.com +3037 + DMV Ltd + Anthony Platt + a.platt&dmv.co.uk +3038 + Fax International, Inc. + Dave Christiansen + dchristi&faxint.com +3039 + MidAmerican Energy Company (MEC) + Michael Hindman + mshindman&midamerican.com +3040 + Bellsouth.net + Jeff Smith + smith&bellsouth.net +3041 + Assured Access Technology, Inc. + Ben Tseng + tseng&AssuredAccess.com +3042 + Logicon - Eagle Technology + John Lambert + jlambert&logicon.com +3043 + Frequentis GmbH + Florian Cernik + iana-admin&frequentis.com +3044 + ISIS 2000 + Debbie Cook + dcook&isis2k.com +3045 + james e. gray, atty + jim gray + jgray&grapevinenet.com +3046 + Jamaica Cable T.V. & Internet Services + Elias azan + barney&jol_ppp4.jol.com.jm +3047 + Information Technology Consultants Pty. Ltd. + Les Pall + les.pall&itc.oz.au +3048 + LinickGrp.com + Roger Dextor + LinickGrp&worldnet.att.net +3049 + Yankee Know-How + Carl Nilson + agcarl&aol.com +3050 + SeAH group + HyeonJae Choi + cyber&snet.co.kr +3051 + Cinco Networks, Inc. + Dean Au + deana&cinco.com +3052 + Asentria Corporation + Paul Renton + pen&asentria.com +3053 + Genie Telecommunication Inc. + Anderson Kao + klk&genie.genie.com.tw +3054 + Ixia Communications + Steve Lord + stevel&ixiacom.com +3055 + Transmeta Corporation + MIS Role Account + mis&transmeta.com +3056 + Systemsoft Corp. + Dudley Nostrand + dnostrand&systemsoft.com +3057 + Compumatic + Paul Slootweg + paul.slootweg&gmail.com +3058 + T-Systems + Meint Carstensen + Meint.Carstensen&t-systems.com +3059 + Sisler Promotions, Inc. + Matt Weidner + mweidner&law.fsu.edu +3060 + ice-man refrigeration + Larry + temp111444&aol.com +3061 + Listing Service Solutions, Inc. + Scott Nixon + dnixon&ipass.net +3062 + Jovian Networks + Thomas Mullaney + tpm&jovian.net +3063 + Elebra Com. Dados + Rafael Vagner de Boni + eledado&ibm.net +3064 + Safetran Systems + Michael Van Hulle + mike.van.hulle&safetran.com +3065 + Video Network Communications, Inc. + Mathew Reno + Mathew_Reno&vnci.net +3066 + Phasecom Ltd. + Shmuel Rynar + shirr&phasecom.co.il +3067 + Eurocontrol + Richard Beck + Richard.Beck&eurocontrol.fr +3068 + SilverStream Software Inc. + Sande Gilda + sgilda&silverstream.com +3069 + Cownet + Josh Ferguson + fergo&usa.net +3070 + World Access, Inc. + Gerald Olson + geraldo&waxs.com +3071 + Virtual Line L.L.P. + Steve McConnell + stevemc&vline.net +3072 + Integrated Concepts L.L.P. + Steve McConnell + stevemc&vline.net +3073 + Exabyte Corporation + Bruce Eifler + brucee&exabyte.com +3074 + Interactive Media Corporation + George Howitt + george_howitt&interactivemedia.com +3075 + NetCore Systems, Inc. + Kwabena Akufo + kakufo&netcoresys.com +3076 + Altiga Networks, Inc. + Todd Short + tshort&altiga.com +3077 + National Center for Supercomputing Applications + Jeff Terstriep + jefft&ncsa.uiuc.edu +3078 + EMASS Inc. + Thomas Messer + thomasm&emass.com +3079 + PRIMA Telematic + Marc Lachapelle + marcl&prima.ca +3080 + BackWeb Technologies + Lior Hass + lior&backweb.com +3081 + NTP Software + Bruce Backa + bbacka&ntpsoftware.com +3082 + PBS A/S + Christian Ravn + chr¬es.pbs.dk +3083 + W.Quinn Associates, Inc. + Eric Liu + eliu&wquinn.com +3084 + QUZA (Racal-Integralis) + Ian Rawlings + Ian.Rawlings&uk.quza.com +3085 + Cosine Communications + Lianghwa Jou + ljou&cosinecom.com +3086 + PipeLinks Inc. + Fraser Street + Fraser_Street&pipelinks.com +3087 + WaiLAN Communications, Inc. + Richard Hu + hu&wailan.com +3088 + Axent Technologies + Sandeep Kumar + skumar&axent.com +3089 + SPAWAR + Ron Broersma + ron&spawar.navy.mil +3090 + Airsys ATM S.A. + Benoit Magneron + Benoit.B.M.MAGNERON&fr.airsysatm.thomson-csf.com +3091 + Whiter Morn Software, Inc. + Jon Salmon + pike&mtco.com +3092 + ENTV + Ali Kaidi + Legisnet&ist.cerist.dz +3093 + CyberTAN Technology, Inc. + Jerry Wu + jerrywu3&ms19.hinet.net +3094 + Frilot, Patridge, Kohnke & Clements, L.C. + Mark Robinson + MRobinso&fpkc.com +3095 + FirstSense Software, Inc. + Scott Marlow + smarlow&firstsense.com +3096 + StarVox, Inc. + Chaojung Li + chaol&starvox.com +3097 + WatchGuard Technologies Inc. + Phillip Dillinger + phil&watchguard.com +3098 + MTI Technology Corporation + Rich Ramos + rich&chi.mti.com +3099 + Lumbrera + Willy Gomez + wgomez&lumbrera.com.gt +3100 + CELOGIC + Laurent Degardin + ldegardin&celogic.com +3101 + Experian Information Solutions Inc. + Clara Hsieh + clara.hsieh&experian.com +3102 + Kansai Electric Co., Ltd. + Takashi Matsumuro + matsumurot&kansai-elec.co.jp +3103 + Innet + JeoungHyun Lee + jhlee&innet.co.kr +3104 + Thales Deutschland GmbH + Bernhard Nowara + Bernhard.Nowara&thalesgroup.com +3105 + Vodafone Sweden + Per Assarsson + per.assarsson.removethis&vodafone.se +3106 + LCI International, Inc. + Larry Hymer + hymerl&lci.net +3107 + City of Los Angeles + David Patterson + dpatterson&la911.com +3108 + G2 Networks + Allison Parsons + aparsons&g2networks.com +3109 + TradeWeb LLC. + Cyber Security + cybersecurity&tradeweb.com +3110 + Rafael + Avy Strominger + avys&rafael.co.il +3111 + Crystal Group Inc. + Brian Grimm + brian.grimm&crystalrugged.com +3112 + C-bridge Internet Solutions + Ron Bodkin + rjbodkin&c-bridge.com +3113 + Phase Forward + Gilbert Benghiat + gilbert.benghiat&phaseforward.com +3114 + WONOA + H. Zolty + zoltyh&dnt.dialog.com +3115 + Dialog + H. Zolty + zoltyh&dnt.dialog.com +3116 + NICE CTI Systems UK Ltd. + Eric Roger + eric.roger_at_nice.com&ANTISPAM +3117 + E*TRADE Group Inc. + Alan Cima + acima&etrade.com +3118 + Juno Online Services, Inc. + Juno Domain Administration + dns&juno.net +3119 + DnB ASA + Adne Hestenes + adne.hestenes&DnB.no +3120 + Cintel Technologies, Inc. + Ryu Cheol + ryuch&cintel.co.kr +3121 + Tele1024 Denmark + Thomas Secher + thomas.secher&tele1024.dk +3122 + Interlink Network Group, Inc. + Russell Carleton + roccor&inc-g.com +3123 + L. Richards' Enterprises, Inc. + Lawrence Richards + LRomeil&msn.com +3124 + Media Communications Eur AB + Oscar Jacobsson + oscar&medcom.se +3125 + Rocx Software Corp. + Mark Thibault + mthibault&rocx.com +3126 + Ardax Systems, Inc. + Aram Gharakhanian + aramg&ardax.com +3127 + Pluris, Inc. + Raghu Yadavalli + raghu&pluris.com +3128 + OAZ Communications + Ajay Batheja + ajay&oaz.com +3129 + Advanced Switching Communications, Inc. + Ralph Wolman + rwolman&asc1.com +3130 + GreatLink Networks, Inc. + Chenchen Ku + chenchen&greatlink.net +3131 + Aydin Telecom + Harold Gilje + hgilje&aydin.com +3132 + NetKit Inc. + Sam Stavro + netkit&ix.netcom.com +3133 + IDP + Despatin Jean + despatin&idp.fr +3134 + TTM Nederland + Robert Dijkman Dulkes + Info&ttmnl.com +3135 + Labouchere + Robert Dijkman Dulkes + R.DijkmanDulkes&Labouchere.nl +3136 + Comtrend Corporation + Frank Chuang + frankc&comtrend.com +3137 + Berbee Information Networks Corp. + Jim Berbee + snmpmgr&binc.net +3138 + Wireless Online, Inc. + Alex Segalovitz + alexsega&shani.net +3139 + LIFFE + Brian Power + power_b&hotmail.com +3140 + Celo Communications AB + Neil Costigan + neil&celocom.se +3141 + Mark IV Industries Ltd.(F-P Electronics Division) + Stephen Galbraith + sgalbrai&fpelectronics.com +3142 + Leitch Technology International Incorporated + Steve Sulte + Steve.Sulte&mars.leitch.com +3143 + Chalcroft International + Barry Chalcroft + barryc&gstis.net +3144 + Clarity Wireless Inc. + Joseph Raja + jraja&clarity-wireless.com +3145 + C-C-C Technology Ltd + Paul Moore + pmoore&cccgroup.co.uk +3146 + FREQUENTIS Network Systems GmbH + Rainer Bittermann + Rainer.Bittermann&frqnet.de +3147 + Daewoo Electronics + Taeseung Lim + lts&phoenix.dwe.co.kr +3148 + France Caraibe Mobiles + Patrick Raimond + mis&fcm.gp +3149 + Winchester Systems Inc. + Jim Picard + jpicard&winsys.com +3150 + SWD + Uwe Wannags + uwannags&swd.de +3151 + Automotive Industry Action Group (AIAG) + Mike Prusak + mprusak&aiag.org +3152 + Orion Technologies Inc. + Tim Pushor + timp&orion.ab.ca +3153 + DirectoryNET, Inc. + Michael Gladden + mgladden&directorynet.com +3154 + Kisan Telecom Co., LTD + Man Hee Lee + lmh99&kisantel.co.kr +3155 + Concord-Eracom + Huub van Vliet + hvanvliet&concord-eracom.nl +3156 + Secant + Amit Kapoor + amit&netcom.com +3157 + NetraCorp, LLC + Shane Kinsch + shane.kinsch&netracorp.com +3158 + MASPRO DENKOH Corp. + Toshiyuki Maeshima + rd2mahhs&maspro.co.jp +3159 + Utimaco GmbH + Dieter Bong + dieter.bong&utimaco.com +3160 + Financial Information System Center (FISC) + Thomson Hsu + thomson_hsu&mail.fisc.org.tw +3161 + Xybx Inc. + Denis Robbie + robbied&rogers.wave.ca +3162 + Relational Data Systems + David Chang + dchang&fsautomation.com +3163 + M&T Clear Solutions Inc. + Michael Levitian + levitian&home.com +3164 + ARBED S.A. + Marc Michels + marc.michels&tradearbed.com +3165 + Cap Gemini Telecom + Chaouki Ayadi + cayadi&capgemini.fr +3166 + Westek Technology Ltd John Tucker + jtucker&westek-technology.co.uk + ---none--- +3167 + NICE Systems Ltd. + Avi Shai + avi&nice.com +3168 + INC S.A. + Paul Retter + retter&silis.lu +3169 + Silis Sarl + Paul Retter + retter&silis.lu +3170 + InterWorking Labs, Inc. + Karl Auerbach + karl&iwl.com +3171 + Ikon Systems, Inc. + Vincent French + vince&ikonsystems.net +3172 + GTE Intelligent Network Services + Matthew Ward + mward&admin.gte.net +3173 + Turnstone Systems, Inc. + Pawan Singh + psingh&turnstonesystems.com +3174 + Tasman Networks, Inc. + Madhusudhan K Srinivasan + madhu&tasmannetworks.com +3175 + WebTrends Corporation + Victor Lu + victorl&webtrends.com +3176 + Werner Training and Consulting, Inc. + Brad Werner + brad&wernerconsulting.com +3177 + IVC, Inc. + Brad Crittenden + bac&ivc.com +3178 + Blue Cross and Blue Shield of Florida + Dave Lentsch + hostmaster&bcbsfl.com +3179 + Level8 Systems + Gregory Nicholls + gnicholls&level8.com +3180 + RESCOM A/S + Klaus Vink + kv&rescom.dk +3181 + MICROSENS GmbH & Co. KG + Hannes Bauer + hbauerµsens.de +3182 + Unihold Technologies + Ricardo Figueira + ricardof&ust.co.za +3183 + Wired for Management + Ramin Neshati + ramin.neshati&intel.com +3184 + Raymond and Lae Engineering, Inc. + Donald Raymond + dmraymon&rletech.com +3185 + Parapsco Designs Ltd. + Paul Strawbridge + paul&patapsco.demon.co.uk +3186 + TouchNet Information Systems, Inc. + Mark Stockmyer + mstockmyer&touchnet.com +3187 + FUZZY! Informatik GmbH + Joerg Meyer + joerg.meyer&fuzzy-online.de +3188 + Sunny Comm. Inc. + Shufen Zhang + sfzhang99&hotmail.com +3189 + DSD Computing + Darrell Dortch + darrell&dsdcomputing.com +3190 + Caja de Ahorros del Mediterraneo + Manuel Berna + mberna&cam.es +3191 + Dynetcom Guernsey Ltd. + Jim Travers + jimt&dyn.guernsey.net +3192 + Tachyon, Inc. + Tom McCann + tmccann&tachyon.net +3193 + Silent Communications + Claudio Naldi + info&silent.ch +3194 + EFFNET AB + Mathias Engan + snmp&effnet.se +3195 + AUDI AG + Stefan Bacher + stefan.bacher&audi.de +3196 + Side by Side GmbH + Armin Luginbuehl + a.lugin&sidebyside.ch +3197 + Vodacom South Africa + Helene Cloete + cloeteh&vodacom.co.za +3198 + Volamp LtdW.G. Saich+44(0) 1252 724055 + David Lunn + David.Lunn&btinternet.com +3199 + Shasta Networks + Anthony Alles + aalles&shastanets.com +3200 + Applied Resources, Inc. + Rick Kerbel + rkerbel&pobox.com +3201 + LANCOME + Renato Senta + fnathurm&hotmail.com +3202 + Spar Aerospace Limited + Luc Pinsonneault + lpinsonn&spar.ca +3203 + GlaxoWellcome Inc. + David Mccord + dkm10789&glaxowellcome.com +3204 + A.T.I. System Co., Ltd. + David Min + ktmin&www.ati.co.kr +3205 + EXODUS Communications Inc. + Ramesh Gopinath + gopinath&exodus.net +3206 + Assured Digital, Inc. + Scott Hilton + shilton&assured-digital.com +3207 + Web@venture + Eric Van Camp + sky73916&toaster.skynet.be +3208 + Athens University of Economics and Business + Theodore Apostolopoulos + thodoros&aueb.gr +3209 + Dynarc AB + Jakob Ellerstedt + jakob&dynarc.se +3210 + VOLKSWAGEN AG + Andreas Krengel + penmaster&volkswagen.de +3211 + Allgon AB + Hakan Grenabo + hakan.grenabo&allgon.se +3212 + Crestron Electronics, Inc. + John Pavlik + jpavlik&crestron.com +3213 + TRANSICIEL + Planche Philippe + sammour&transiciel.com +3214 + SAN People (Pty) Ltd. + James Keen + james&san.co.za +3215 + Network Instruments, LLC + Roman Oliynyk + roman&netinst.com +3216 + Texas Networking, Inc. + Michael Douglass + mikedoug&texas.net +3217 + Pini Computer Trading + Remo Pini + rp&rpini.com +3218 + XLN-t + Guido de Cuyper + g_de_cuyper&glo.be +3219 + Silicomp + Francois Doublet + fdo&silicomp.com +3220 + Signet Systems Pty Ltd + Charles Moore + cmoore&signet.org.au +3221 + Ohkura Electric Co., Ltd. + Takeshi Yamaguchi + tyama&ohkura.co.jp +3222 + New Elite Technologies, Inc. + Yaung K. Lu + yklu&neti.com.tw +3223 + TXCOM + Frederic Paille + fr.paille&txcom.fr +3224 + NetScreen Technologies, Inc. + Jay Fu + jay&netscreen.com +3225 + Sycamore Networks + Bill Sears + bill&sycamorenet.com +3226 + France Connexion Ingenierie + Thibault Dangreaux + Thibault.Dangreaux&.com +3227 + NetLeader, Inc. + Shane Thomas + shane&netleader.com +3228 + Tekmar Sistemi s.r.l. + Andrea Sarneri + lab&tekmar.it +3229 + QUT - DSTC + Dean Povey + qut-domain-admin&qut.edu.au +3230 + Runtop Inc. + David Wang + dwang&mail.runtop.com.tw +3231 + L-3 Communications + Tom Schmitt or Paul Mehrlich + ADMIN.OID&L-3Com.com +3232 + Eumetsat + Michael Schick + michael.schick&eumetsat.int +3233 + TongGong High New Technology Development Company + Ni Gui Qiang + niguiqiang&njiceatm.ice.edu.cn +3234 + Trifolium, Inc. + Bennett Groshong + bennett&trifolium.com +3235 + Zenon N.S.P. + Victor L. Belov + scream&zenon.net +3236 + ERCOM + Daniel Braun + netmgt&ercom.fr +3237 + SDC + Kjeld Bak + kbak&sdc.dk +3238 + Los Angeles Web + Troy Korjuslommi + tk&software.gyw.com +3239 + Florence on Line s.r.l. + Mike HAgen + mhagen&fol.it +3240 + Escalate Networks + Mark Carroll + mcarroll&escalate.net +3241 + TranNexus + Bradley Walton + brad.walton&transnexus.com +3242 + Brigham Young University + Frank Sorenson + frank&byu.net +3243 + ConNova Systems AB + Stefan Asp + stas&connova.se +3244 + Voxtron Flanders NV + Marc Bau + marc.bau&voxtron.com +3245 + Yomi Software Ltd. + Janne Hankaankorpi + janne.hankaankorpi&yomi.com +3246 + Mirapoint, Inc. + Mark Lovell + mlovell&mirapoint.com +3247 + Colorbus + Ian Holland + ian&colorbus.com +3248 + DPB S.A. + Pablo Barros Martinez + pbarros&dpb.com.ar +3249 + StarGuide Digital Networks, Inc. + Roz Roberts + rroberts&starguidedigital.com +3250 + Telinet Technologies, LLC. Donald + Hopper + donald.hopper&telinet.com +3251 + Authentica Security Technologies, Inc. + Brad Brower + bbrower&bigfoot.com +3252 + Hologram Systems Ltd. + M. Western + matthew&hologram-systems.co.uk +3253 + TranSystem, Inc. + Ming Chung, Wang + ilovelu&alumni.nctu.edu.tw +3254 + R.R.C. Exports + Challagulla Sirish Kumar + challagullas&usa.net +3255 + Lakeside Software, Inc. + Mike Schumacher + mike&LakesideSoftware.com +3256 + Channel 100 + Al Channing + achannin&pcis.net +3257 + MAGMA, Inc. + Daniel Zivkovic + daniel&magmainc.on.ca +3258 + LANSource Technologies Inc. + Christopher Wells + chris_wells&lansource.com +3259 + INCAA Datacom BV + Peter de Bruin + PDB&incaa.nl +3260 + GlobeSet, Inc. + Jim Chou + jhc&globeset.com +3261 + Martin Pestana + Martin Pestana + martinp&netverk.com.ar +3262 + Acuson Corporation + Pete Sheill + psheill&acuson.com +3263 + Drake Automation Limited + Ian Kerr + ikerr&kvs.com +3264 + Kerr Vayne Systems Ltd. + Ian Kerr + ikerr&kvs.com +3265 + KSquared Consulting + Ken Key + key&network-alchemy.com +3266 + HSBC Group + Paul Ready + paul.k.ready&hsbcgroup.com +3267 + IronBridge Networks, Inc. + David Waitzman + djw&ironbridgenetworks.com +3268 + Real Time Logic Inc. + Gary Barrett + Gary.Barrett&kratosdefense.com +3269 + TelServe + Neal Packard + nealp&telserve.com +3270 + UNIQUEST-Korea + SangHo Lee & David Lee + david&uqk.uniquest.co.kr +3271 + StockPower, Inc. + Rick Wesson + rhw&stockpower.com +3272 + Yontem Computer & Electronics + Ersin Gulacti + yontem&orion.net.tr +3273 + nwe GmbH + Thomas Friedrich + tom&nwe.de +3274 + The Information Systems Manager Inc. + Peg Harp + pharp&perfman.com +3275 + Kosmos Image S.r.l. + Tiberio Menecozzi + t.menecozzi&kosmos.it +3276 + Taihan Electric Wire Co., Ltd. + Kim Hwajoon + hjkim&tecnet.co.kr +3277 + Telspec + Miguel Martinez Rodriguez + miguel.martinez&telspec.es +3278 + C-COM Corporation + S.C. Lin + sclin&mail.c-com.com.tw +3279 + Adlex Corp. + Mariusz Piontas + mpiontas&adlex.com +3280 + CCI Europe + Jan Buchholdt + jab&cci.dk +3281 + SMS Enterprises + Matt Tucker + mtucker&smscreations.com +3282 + Vicom Systems, Inc. + Jian Liu + JianL&vicom.com +3283 + International Software Solutions + Reat Christophe + creat&iss2you.com +3284 + OASIS Consortium + Tadhg O'Meara + tadhg&net-cs.ucd.ie +3285 + NOVA Telecommunications, Inc. + Gil Tadmor + gtadmor&novatelecom.com +3286 + Nera Satcom AS + Roar Mosand + rm&nera.no +3287 + Proactive Networks, Inc. + Minh Do + mdo&proactivenet.com +3288 + Jacobs Rimell Limited + Paulo Pinto + paulo.pinto&jacobsrimell.com +3289 + Cryptomathic A/S + Anette Byskov + abyskov&cryptomathic.dk +3290 + AppliScope + Igor Dolinsek + igor&result.si +3291 + Simac Techniek NV + Jan Vet + Jan.Vet&simac.nl +3292 + Earthmen Technology + Annabel Newnham + annabel&earthmen.com +3293 + Biffsters International + Tom Spindler + dogcow&BIFF.NET +3294 + Digitronic + Robert Martens + rm&digitronic.de +3295 + Internet Multifeed Co. + Katsuyasu Toyama + tech-c&mfeed.ad.jp +3296 + Argosy Research Inc. + Jett Chen + jett&email.gcn.net.tw +3297 + NxNetworks + Michael Kellen + OID.Admin&NxNetworks.com +3298 + MQSoftware, Inc. + Craig Ching + craig&mqsoftware.com +3299 + Altair Data System + Paolo Zangheri + paolo.zangheri&altair.it +3300 + Telsis Limited + Steve Hight + steve.hight&telsis.co.uk +3301 + IMPACT + Pierre Mandon + p.mandon&mail.dotcom.fr +3302 + SMI Computersysteme GmbH Nicole + Schwermath + nisch&smi-glauchau.de +3303 + IDM, Ltd. + Vladimir Kozlov + Vladimir.Kozlov&idm.ru +3304 + WinVista Corp. + Mark Hennessy + mhennessy&winvista.com +3305 + Splitrock Services, Inc. + Robert Ollerton + rollerton&Splitrock.net +3306 + Vail Systems Incorporated + Dave Fruin + david&vailsys.com +3307 + zed5 + Erik Kline + erik&alum.mit.edu +3308 + Ensemble Solutions, Inc. + Frank Spies + F.Spies&EnsembleSolutions.com +3309 + Nomadix + Ken Caswell + ken&nomadix.com +3310 + Jett International Inc. + Chen Kuo-Tsan + jett&mx1.misnet.net +3311 + Crocodial Communications Entwicklungsgesellschaft mbH + Benjamin Riefenstahl + benny&crocodial.de +3312 + Consulting Informatico de Cantabria S.L. + Marta Gonzalez Rodriguez + mtgonzalez&cic-sl.es +3313 + Broadcast Services + David Coffman + dcoffman&cyberramp.net +3314 + Bergstresser Associates + Philip Bergstresser + phil&bergstresser.org +3315 + KingStar Computer Ltd. + Jesse Kuang + kjx&poboxes.com +3316 + Micro Logic Systems + Damien Raczy + raczy&mls.nc +3317 + Port Community Rotterdam + J. v. Groningen + cargocard&port-it.nl +3318 + Computer & Competence GmbH + Tim Themann + tim&comp-comp.com +3319 + GNOME project + Jochen Friedrich + snmp&gnome.org +3320 + Shanghai Baud Data Communication Development Corp. + Lin Bin + bdcom&public.sta.net.cn +3321 + Teledata Communication Ltd. + Avi Berger + berger&teledata.co.il +3322 + Ipswitch, Inc. + Roger Greene + roger&ipswitch.com +3323 + Microwave Networks Incorporated + Robert Gulde + robertgµwavenetworks.com +3324 + Call Technologies, Inc. + Justin Anderson + janderso&calltec.com +3325 + Vocalis Ltd. + Alan Milne + alan.milne&vocalis.com +3326 + Bergen Data Consulting + Oddbjorn Steffensen + oddbjorn&oddbjorn.bdc.no +3327 + CA Technologies, Inc. + John Bird + john.bird&ca.com +3328 + Indus River Networks, Inc. + Bradford Kemp + bradkemp&indusriver.com +3329 + NewCom Technologies, Inc. + Bill Goetz + billg&ricochet.net +3330 + PartnerGroup + helpdesk&partnergroup.com + ---none--- +3331 + DeTeWe - Deutsche Telephonwerke Aktiengesellschaft & Co. + Andre Schmidt + andre.schmidt&detewe.de +3332 + RCX System + Dragos Pop + dragos&cs.ubbcluj.ro +3333 + Auburn University + Doug Hughes + Doug.Hughes&eng.auburn.edu +3334 + Cap'Mediatel + Vianney Rancurel + rancurel&capmedia.fr +3335 + HAHT Software + Michael Kelley + michaelk&haht.com +3336 + UTBF + Sean Cheng + simex&hotmail.com +3337 + Chicago Police Department - Data Systems Division + Richard Ramos + rich.ramos&prodigy.net +3338 + MORA Technological Services + Enrique Mora + enrique.mora&moralogic.com +3339 + JHC + John Healy + john&idigital.net +3340 + OpenTV Inc. + Vahid Koussari + vahid&opentv.com +3341 + SwitchSoft Systems, Inc. + Lynn LeBaron + llebaron&sssys.com +3342 + MachOne Communications Inc. + Thomas Obenhuber + thomas&machone.net +3343 + Philips Digital Video Systems Harry + Koiter + koiterh&ce.philips.nl +3344 + Helsinki Televisio Oy + Pekka Laakkonen + pekka.laakkonen&helsinkimedia.fi +3345 + Nemetschek SE + Michel Kohler + mkohler&nemetschek.com +3346 + Vocom + Wang LiPing + wlpwlp&usa.net +3347 + Hitachi Kokusai Electric Inc. + Kazuko.Suzuki + suzuki.kazuko&h-kokusai.com +3348 + Reliable Network Solutions + Werner Vogels + vogels&rnets.com +3349 + Vogo Networks + Nate Waddoups + nathan&connectsoft.com +3350 + beusen + Stephan Witt + witt&beusen.de +3351 + Overland Data, Inc. + Robert Kingsley + bkingsley&overlanddata.com +3352 + Go2 Technologies, Inc. + Anthony Molinaro + anthonym&goto.com +3353 + TransMedia Communications, Inc. + Eric Yang + eyang&trsmedia.com +3354 + InnoMedia, Inc. + Jacek Minko + jminko&InnoMedia.com +3355 + Orkit FI + Michele Hallak + michele&orckit.com +3356 + WebMaster, Incorporated + David Schwartz + davids&webmaster.com +3357 + Software & Management Associates, Inc. + Buddy Horne + unet&smainc.com +3358 + Researcher + Sunny Gupta + sunny&ca.ibm.com +3359 + Cygnus Global Consulting + Eric Jung + ejung&milehigh.net +3360 + Columbine JDS Systems Inc. + Michael Ledwich + mledwich&CJDS.COM +3361 + Intraplex + Jeffrey Merrow + jmerrow&gatesair.com +3362 + Selta S.p.A. + Danilo Dealberti + d.dealberti&selta.it +3363 + Southern New England Telecommunications + Timothy Peterson + Timothy.Peterson&snet.com +3364 + Baltic Oil Ltd. + Sergey Gavrilov + sergey&otenet.gr +3365 + MailWizard Incorporated + Tom Johnson + tj&mailwizard.com +3366 + Da Vinci Systems cc + Tom Theron + davinci&pixie.co.za +3367 + NMS Research + David + ktmin&hanimail.com +3368 + KimSungEun Co., Ltd. + Sung Eun Kim + sekim&chollian.net +3369 + Genicom Corporation + Jerry Podojil + jpodojil&genicom.com +3370 + Trango Software Corporation + Terry Voth + tvoth&trangosoft.com +3371 + SungEun Systems + Sung-Eun Kim + sekim&chollian.net +3372 + COVE Sistemas, S.L. + Jose Carles + carles&cove.es +3373 + SIAE Microelettronica S.p.A. + Andrea Pirotta + siaemi&siaemic.it +3374 + Cybertek Corp. + Greg Willis + gregwillis&cybertek.com +3375 + F5 Labs, Inc. + Tom Kee Ryan Kearny + r.kearny&f5.com +3376 + Valencia Systems + John Tracy + jtracy&valenciasystems.com +3377 + HKC Communications, Inc. + Wyatt Kenoly + hkccomm&msn.com +3378 + Plant Equipment Inc. + Donald Scott + dscott&peinc.com +3379 + HT Industrial Co. + Dandy + didan&ggg.net +3380 + Fuelling & Partner + Hueckinghaus + fp&fp.do.uunet.de +3381 + Atreve Software, Inc. + Gerry Seaward + gerry&atreve.com +3382 + Venturi Wireless + John Hardin + snmp&venturiwireless.com +3383 + South East Water Limited + Darren O'Connor + doconnor&sewl.com.au +3384 + WAM!NET + Jeff Konz + jkonz&wamnet.com +3385 + University of Leicester + Matthew Newton + mcn4&leicester.ac.uk +3386 + 21st Century Net + Rudolf Meyer + Rudolf.Meyer&21st-century.net +3387 + Intellivoice, Inc + Race Vanderdecken + rvanderdecken&intellivoice.com +3388 + Integral Partners + Daniel W. Schaenzer + dschaenzer&iisol.com +3389 + Novotec Computers GmbH + Martin Schroedl + schroedl&novotec.com +3390 + Marathon Technologies Corporation + Mark Pratt + pratt&marathontechnologies.com +3391 + Software Technologies Group, Inc. + Chris Herzog + zog&stg.com +3392 + Quvintheumn Foundation + S. Lars G Ahlen + slg.ahlen.qf&uppsala.mail.telia.com +3393 + SandS International + John C. Scaduto Sr. + JoeSideri&aol.com +3394 + NeTrue Communications + Russ Glenn + rglenn&netrue.com +3395 + Certicom Corp. + John Goyo + jgoyo&certicom.com +3396 + DICOS GmbH Kommunikationssysteme + Stephan Hesse + s.hesse&dicos.de +3397 + Border Blues Productions + Phillip Dyer + seuart&iamerica.net +3398 + Fieldbus Foundation + David Glanzer + dglanzer&fieldbus.org +3399 + Olencom Electronics Ltd. + Evgeny Olentuch + admin&olencom.com +3400 + Alacrity Communications Inc. + Frank Guan + fguan&alacritycom.com +3401 + McAfee Inc. (formerly 'Network Associates, Inc.') + Brandon Conway + itsecurity&mcafee.com +3402 + Magicom Integrated Solutions + Yossi Appleboum + yossia&magicom.co.il +3403 + Marimba, Inc. + Senthilvasan Supramaniam + senthil&marimba.com +3404 + Adicom + Lukes Richard, Ing. + adicom&adicom.cz +3405 + Expand Networks Inc. + Einam Schonberg + standards&infit.com +3406 + EIS Corporation + Rodney Thayer + rodney&unitran.com +3407 + compu-DAWN, Inc. + Samir Patel + dynasty&unix.asb.com +3408 + Nylcare Health Plans + Darnel Lyles + LYLESD&corporate.nylcare.com +3409 + Z-Tel Communications, Inc. + Jeff Jones + jdjones&Z-TEL.com +3410 + Land-5 Corporation + Larry Dickson + ldickson&land-5.com +3411 + J. Slivko's Web Design Consulting + Jonathan Slivko + JSlivko&WildNet.Org +3412 + SanCastle Technologies Inc. + Yaron + yaronch&internet-zahav.net +3413 + Radiotel + Issac Shapira + isaac&radiotel.co.il +3414 + VoiceStream Wireless, Inc. + Trey Valenta + trey.valenta&voicestream.com +3415 + Mobile Telephone Networks + Eugene Pretorius + pretor_e&mtn.co.za +3416 + Neto Corporation + Julian Chang + tcchang&neto.net +3417 + CacheFlow Inc. + Gary Sager + gary.sager&cacheflow.com +3418 + Interactive Channel Technologies, Inc. + Adam Tran + adamt&cableshare.com +3419 + DERA + Dave Myles + djmyles&dera.gov.uk +3420 + Rossiyskiy Kredit Bank + Ruslan Polyansky + ruslan&roscredit.ru +3421 + Performance Reporting Services Ltd + Hash Valabh + hash&prs.co.nz +3422 + Network Aware, Inc. + Subodh Nijsure + subodh&networkaware.com +3423 + Project 25 + Craig Jorgensen + jorgensen&sisna.com +3424 + Evident Software, Inc. (formerly 'Apogee Networks, Inc.') + Ivan Ho + iho&evidentsoftware.com +3425 + Amsdell Inc. + Benedict Chan + bchan&amsdell.com +3426 + Tokyo Denshi Sekei K.K. + Akihiro Fujinoki + fuji&tds.co.jp +3427 + MicroJuris, Inc. + Fernando Lloveras + lloverasfµjuris.com +3428 + Computer Associates TCG Software + Kalyan Dakshit + kalyan_d1&catsglobal.com +3429 + GenNet Technology Co., Ltd. + Tomy Chen + tomy&gennet.com.tw +3430 + Microtronix Datacom Ltd. + Ken Hill + khillµtronix.com +3431 + Western DataCom Co., Inc. + Jeff Sweitzer + jeff&western-data.com +3432 + Tellium, Inc. + Y. Alysha Cheng + acheng&tellium.com +3433 + Goldencom Technologies, Inc. + John Yu + johnyu&goldencom.com +3434 + Leightronix, Inc. + David Leighton + dleighton&leightronix.com +3435 + Porta Systems Ltd + Paul Wragg + Paul_A_Wragg&csi.com +3436 + Brivida, Inc. + Tom Bohannon + tab&brivida.com +3437 + PitchonPe + Pninat Yanay + a_com&netvision.net.il +3438 + Missouri FreeNet + J.A. Terranson + sysadmin&mfn.org +3439 + Braintree Communications Pty Ltd + Peter Mason + peter.mason&braintree.com.au +3440 + Borealis Technology + Will Wood + wwood&brls.com +3441 + South Carolina State Ports Authority (SCSPA) + Ken Rigsby + krigsby&scspa.com +3442 + Advantech Inc. + Alexander Mazur + mazur&advantech.ca +3443 + United Healthcare + Scott Danielson + scott_j_danielson&uhc.com +3444 + egnite GmbH + Ute Kipp + ute.kipp&egnite.de +3445 + Radiant communications Corp. + David Mandell + dmandell&rccfiber.com +3446 + Ridge Technologies Dave + Holzer + dave.holzer&ridgetechnologies.com +3447 + JGI, Inc. + Yoshiaki Kawabe + kwb&rz.jgi.co.jp +3448 + Rivkin Science & Technology, Inc. + David Rivkin + david.rivkin&sciandtech.com +3449 + Fisher Berkeley Corp. + Scott Amundson + scotta&ccnet.com +3450 + Ardence, Inc. + Clark Jarvis + cjarvis&ardence.com +3451 + Vita Nuova Limited + Dr. Charles Forsyth + charles&vitanuova.com +3452 + MDSI Mobile Data Solutions Inc. + Paul Lui + plui&mdsi.ca +3453 + AAE Systems, Inc. + Network Administrator + mis&aaesys.com +3454 + ELVIS-PLUS + Mark Koshelev + marc&elvis.ru +3455 + Internet Freaks Luxembourg a.s.b.l.Department Technique + et Informatiqe + dti&ifl.lu +3456 + Adtech, Inc. + Mike Gouveia + mgouveia&adtech-inc.com +3457 + Advanced Intelligent Networks Corp. + David Roland + dsr&sohonet.net +3458 + Transaction Network Services, Inc. + Celeste Lipford + clipford&tnsi.com +3459 + COM:ON Communication Systems GmbH + Dirk Leber + d.leber&com-on.de +3460 + Telecommunications Specialists Pte Ltd + Desmond Ee + desmondee&pacific.net.sg +3461 + Inferentia SPA + Roberto Gilberti + Roberto.Gilberti&inferentia.it +3462 + Makonin Consulting Corp. + Stephen Makonin + stephen&makonin.com +3463 + Toucan Technology Ltd. + Mark Rawlings + Mark.Rawlings&toucan.ie +3464 + Gimlet Management Consultants Ltd + Kenny Robb + kenny_robb&gimlet.co.uk +3465 + Sanyo Denki Co., Ltd. + Akihiro Tsukada + Akihiro_Tsukada&sanyodenki.co.jp +3466 + Optical Networks, Inc. + Dan Tian + oni_contact&opticworks.com +3467 + NORCOM Networks Corporation + Bruce Robinson + brucer&norcom.net +3468 + GTE Interactive + Steve Bryer + steve.bryer&gsc.gte.com +3469 + Schumann Unternehmensberatung AG + Thomas Heckers + Thomas.Heckers&Schumann-AG.DE +3470 + ATM R&D Center of BUPT + Liu Fang + liufang&bupt.edu.cn +3471 + Bear Stearns & Company, Inc. + Daniel Sferas + dsferas&bear.com +3472 + Telamon, Inc. + Ross Scroggs + ross&telamon.com +3473 + Microgate Corporation + Paul Fulghum + paulkfµgate.com +3474 + Fujitu ICL Espana S.A. + Francisco Santiandreu Lopez + FSLOPEZ&MAIL.FUJITSU.ES +3475 + Network Concepts + Greg Obleshchuk + greg.obleshchuk&nwc.com.au +3476 + Arepa Inc. + Mark Ellison + ellison&ieee.org +3477 + Dorado Software + Mark Pope + mpope&doradosoftware.com +3478 + Spectra Logic + John Maxson + johnmax&spectralogic.com +3479 + ViewTouch, Inc. + Gene Mosher + gene&viewtouch.com +3480 + VIEWS Net, Inc. + Michael Nowlin + mike&viewsnet.com +3481 + Himel Technology + Boris Panteleev + bpanteleev&himel.com +3482 + Ton & Lichttechnik + Michael Sorg + tonlicht&compuserve.com +3483 + Mariner Networks + Sol Guggenheim + sag&odetics.com +3484 + Alaska Textiles, Inc. + Dana Martens + dana&7x.com +3485 + Alaska Cleaners, Inc. + Dana Martens + dana&7x.com +3486 + Wellsprings Holdings, LLC + Dana Martens + dana&7x.com +3487 + Allure of Alaska + Dana Martens + dana&7x.com +3488 + SevenX + Dana Martens + dana&7x.com +3489 + Denali Sites, Inc. + Dana Martens + dana&denalisites.com +3490 + United Systems Base + Matthew Moyer + mmoyer&usbase.org +3491 + CDConsultants Inc. + Edgard Lopez + CDConsultants&hotmail.com +3492 + Comdisco, Inc. + Kenneth Gradowski + kegrabowski&comdisco.com +3493 + Broadband Access Systems, Inc. + Tavit Ohanian + tavit&basystems.com +3494 + Convergent Networks, Inc. + Eric Lin + elin&convergentNet.com +3495 + National Laboratory for Applied Network Research + Duane Wessels + wessels&ircache.net +3496 + Web-Resumes + Ralph Rasmussen + ideas&mill.net +3497 + Virtual Vendor Inc. + Peter Odehnal + petero&virtual-vendor.com +3498 + BusinessBuilder Technologies Inc. + Peter Odehnal + petero&biz-serv.com +3499 + Cyber Server Park Inc. + Peter Odehnal + petero&biz-serv.com +3500 + COMSAT Laboratories + Subramanian Vijayarangam + rangam&ntd.comsat.com +3501 + Vodafone Value Added Services Ltd + Neil Taberner + neil.taberner&vas.vodafone.co.uk +3502 + J & A Services + Jose Gutierrez + Joe&jgutz.com +3503 + Blue Lance, Inc. + Peter Thomas + pthomas&bluelance.com +3504 + Sandvik Coromant + Claes Engman + claes.engman&sandvik.com +3505 + Virtual Virgin Islands, Inc. + Hal Borns + HalBornsHB150&VirtualVI.com +3506 + PageTek + Bryan Reece + reece&pagetek.net +3507 + e-Net, Inc. + Austin Bingham + abingham&austin.datatelephony.com +3508 + NEST + Pietro Brunati + pbrunati&iol.it +3509 + Capital Holdings Ltd + Ury Segal + ury&cs.huji.ac.il +3510 + TWO-WAY LAUNDRY + William + WJK11&WORLDNET.ATT.NET +3511 + SkyStream, Inc. + Ed Hodapp + ed&skystream.com +3512 + Portal Software, Inc. + Majid Mohazzab + majid&corp.portal.com +3513 + Reserved + RFC-pti-pen-registration-10 + ---none--- +3514 + VStream Incorporated + Charlie Wanek + cwanek&vstream.com +3515 + Joanneum Research GesmbH + Vjekoslav Matic + vjekoslav.matic&joanneum.ac.at +3516 + Cybernetica + Arne Ansper + arne&cyber.ee +3517 + Tieto Technology A/S, Denmark + Jorgen Richter + jri&tt-tech.dk +3518 + Pressler Inc. + Chet Pressler + chet&pressler.com +3519 + amplify.net, Inc. + Raymond Hou + rhou&lifynet.com +3520 + TPS (La Television Par Satellite) Denis + Vergnaud + dvergnau&tps.fr +3521 + Atlas Technologies, Inc. + Brian Miller + bmiller&atlas-tech.com +3522 + Biodata GmbH + Stephan Scholz + s.scholz&biodata.de +3523 + Netco GmbH + Anett Zuber + anett.zuber&netco.de +3524 + Continium + Alejandro Gil Sanda + asanda&arnet.com.ar +3525 + SilverBack Technologies + Buddy Bruger + bud&silverbacktech.com +3526 + ITC GmbH + Joerg Stangl + jstangl&itc-germany.com +3527 + IntraSoft, Inc. + John Cheeseman + john_cheeseman&keyvision.com +3528 + ESP, LLC + Jim Ziegler + jczjcz&ibm.net +3529 + AVT Corporation + Doug Murray + dmurray&avtc.com +3530 + Research In Motion Ltd. + Allan Lewis + alewis&rim.net +3531 + Orange DK + Arnt Christensen + arc&orange.dk +3532 + Meisei System Service Company + Nobuaki Nishimoto + t51917&meiji-life.co.jp +3533 + Acies Sistemas S/C Ltda. + Roberto Parra + rparra&acies.com.br +3534 + CTAM Pty. Ltd. + Peter Sim + psim&scs.wow.aust.com +3535 + Hutchison Avenue Software Corp. + Colin Bradley + colin&hasc.com +3536 + Globus + Carl Kessleman + carl&ISI.EDU +3537 + AirFiber, Inc. + Eric Shoquist + eshoquist&airfiberinc.com +3538 + Europe Connection Ltd + Thomas Wiegt + tag55&dial.pipex.com +3539 + Unassigned + ---none--- + ---none--- +3540 + Conelly International, Inc. + Jim Sluder + bricpu&teleport.com +3541 + Bindview Development Corp. + Sridhar Balaji + sbalaji&bindview.com +3542 + Galea Network Security + Daniel Letendre + dletendr&galea.com +3543 + Abilis gmbh + Lino Predella + predella&abilis.net +3544 + Baycorp ID Services Ltd. + David Young + david.young&baycorpid.com +3545 + Maddox Broadcast Ltd. + Jon Taylor + jont&maddox.co.uk +3546 + Acute Communications Corporation + David Chang + davidyc&accton.com +3547 + Tollbridge Technologies + Arun Mahajan + arun&tollbridgetech.com +3548 + Oresis Communications + John Lloyd + jlloyd&oresis.com +3549 + MLI Enterprises + Mark Lewis + lewisma&swbell.net +3550 + Allstor Software Limited + Simon Copeland + Simon&allstor-sw.co.uk +3551 + Spring Tide Networks, Inc. + Bob Power + bpower&springtidenet.com +3552 + EES Technology Ltd. + John Cooper + john&eestech.com +3553 + CSP AG + Martin Walther + mwalther&csp.de +3554 + SAS Institue Inc + Doug Bradley + dobrad&wnt.sas.com +3555 + NetLock Ltd. + Katalin Szűcs + szucs.katalin&netlock.hu +3556 + GENO-RZ GmbH + Herr Medovic + Zdenko_Medovic&genorz.de +3557 + MS3.net + Michael Stoddard + mstoddar&hotmail.com +3558 + BGS Systemplanung AG + Ulrich Muller + ulrich.mueller&bgs-ag.de +3559 + The Digital Schoolhouse + Network Operations Center + noc&tds.edu +3560 + Sphere Logic Corporation + Jerry Iwanski + jerryi&spherelogic.com +3561 + Broadband Forum (previously 'DSL Forum') + Assigned Numbers Manager + help&broadband-forum.org +3562 + Jim Marsden Development (formerly 'Selway Moore Limited') + Jim Marsden + jim&jmdev.org +3563 + National Network Data Services + William Hemmle + whemmle&multinetconnections.net +3564 + Ciphernet + Dimitri Vekris + dv&interlog.com +3565 + Grolier Interactive Europe On Line Groupe + Laurent T. Abiteboul + lta&t-online.fr +3566 + Midnight Technologies + Kyle Unice + kyle&midnighttech.com +3567 + Scott Supply Service, Inc. + Brett Scott + blscott&scottnet.com +3568 + Service Co LLC + Richard Newcomb + rnewcomb&twcny.rr.com +3569 + Electronic Payment Services, Inc. + Jim Cole + jcole&netEPS.com +3570 + Tait Limited + Anthony Lister + anthony.lister&taitradio.com +3571 + Gift-Trek Malaysia Sdn. Bhd. + Susan Chooi Meng Kuen + susancho&tm.net.my +3572 + HanA Systems, Inc. + Kim Keon-Hyeong + hyeong&hanasys.co.kr +3573 + South African Networking People (Pty) Ltd + James Keen + james&san.co.za +3574 + ORSYP SA + Laure Faure + laure.faure&orsyp.com +3575 + RKB Onsite Computer Service + Robert Breton + BOBBRETON&HOTMAIL.COM +3576 + MCI + Jim Potter + jim.potter&mci.com +3577 + Himachal Futuristic Communications Limited + Dr. Balram + balram&hfcl.com +3578 + PixStream Incorporated + Don Bowman + don&pixstream.com +3579 + Hurley + Tim Hurley + hurleyt&mindspring.com +3580 + Bell Emergis + Andrew Fernandes + andrew&fernandes.org +3581 + Seagate Technology + Jon Wolfe + jon.a.wolfe&seagate.com +3582 + LSI Logic + Ken Wisniewski + kenw&lsil.com +3583 + JetCell, Inc. + Randy Theobald + randyt&jetcell.com +3584 + Pacific Fiberoptics, Inc. + Niraj Gupta + niraj&pacfiber.com +3585 + Omnisec AG + Peter Lips + lips&omnisec.ch +3586 + Diebold, Incorporated + Jan Bredon + bredonj&diebold.com +3587 + TIW Systems, Inc. + Mike Huang + huang&tiw.com +3588 + NovoGroup Oyj + Minna Karkkainen + minna.karkkainen&novogroup.com +3589 + SoGot + W. Gothier + hwg&gmx.net +3590 + IA Information Systems AG + Matthias Mueller + mm&ia96.de +3591 + R.W. Shore + R.W. Shore + rws228&gmail.com +3592 + Drägerwerk AG & Co. KGaA (formerly 'Draeger Medizintechnik GmbH') + Harald Schurack + harald.schurack&draeger.com +3593 + Alcatel Sistemas de Informacion + Juan Cuervas-Mons + cuervas&alcatel.es +3594 + LJL Enterprises, Inc. + Larry Layten + larry&ljl.com +3595 + BC TEL Advanced Communications + Robert Lee + robert_lee&bctel.net +3596 + CMLTechnologies Inc. + Moise Gaspard + mgaspard&cmltech.com +3597 + WildThings + William Pantano + wjpantano&hotmail.com +3598 + Dixie Cake + Timothy Stafford + jtstafford&WEBTV.NET +3599 + Type & Graphics Pty Limited + Raif Naffah + raif&fl.net.au +3600 + Teltronics, Inc. + Peter G. Tuckerman + ptuckerman&teltronics.com +3601 + C.R. McGuffin Consulting Services + Craig McGuffin + RMcGuffin&CRMcG.COM +3602 + International Datacasting Corporation + Heather McGregor + hmcgregor&intldata.ca +3603 + Westpac Banking Corporation + Jack Szewczyk + jszewczyk&westpac.com.au +3604 + XYPI MEDIANET PVT. Ltd. + Adityakumar Atre + xypi&hotmail.com +3605 + Nesser & Nesser Consulting + Philip Nesser + pjnesser&nesser.com +3606 + Incognito Software Systems Inc. + Andre Kostur + akostur&incognito.com +3607 + Cerent Corporation + Chris Eich + chris.eich&cerent.com +3608 + The Tillerman Group + Rodney Thayer + rodney&tillerman.nu +3609 + Cequs Inc. + Peter Bachman + peterb&cequs.com +3610 + Ryan Net Works + John Ryan + john&cybertrace.com +3611 + Foo Chicken, Ltd + Brett McCormick + brett&chicken.org +3612 + Marcel Enterprises + Roy Ferdinand + RFerdinand&proxy.aol.com +3613 + Rubicon Technologies, Inc. + Rodney Hocutt + rhocutt&rubicon-tech.com +3614 + Altor plc + Giles Martin + giles.martin&altor.co.uk +3615 + SoftWell Performance AB + Tomas Ruden + tomas.ruden&softwell.se +3616 + United Resource Economic & Trading Center C., + Cui Lisheng + cui&public.east.cn.net +3617 + SurfControl plc + Hywel Morgan + Hywel.Morgan&surfcontrol.com +3618 + Flying Crocodile, Inc + Andy Edmond + president&mail.flyingcroc.com +3619 + ProxyMed, Inc. + Kiran Sanghi + ksanghi&proxymed.com +3620 + Transact Systems, Inc. + Claudio Mendoza + claudio&e-transact.com +3621 + Nuance Communications + Mark Klenk + mklenk&nuance.com +3622 + GEFM + Hermann Maurer + hermann.maurer&db.com +3623 + Systemintegrering AB + Pelle Arvinder + pelle&systemintegrering.se +3624 + Enator Communications AB + Ake Englund + ake.englund&enator.se +3625 + iHighway.net, Inc. + John Brown + jmbrown&ihighway.net +3626 + Dipl.-Ing. (FH) Markus Drechsler + Markus Drechsler + Info&Drechsler-Soft.de +3627 + Criptolab + Jorge Davila + jdavila&fi.upm.es +3628 + Tietokesko Ltd + Harri Hietanen + harri.hietanen&kesko.fi +3629 + Atos Origin + Wolfgang Klein + wolfgang.klein&atosorigin.com +3630 + DeltaKabel Telecom cv + M. Vermeiden + MVermeid&DKT.NL +3631 + Bridgewater Systems Corp. + Mark Jones + mjones&bridgewatersystems.com +3632 + MaxComm Technologies Inc. + Baktha Muralidharan + muralidb&maxcommtech.com +3633 + iD2 Technologies AB + Hakan Persson + hakan.persson&iD2tech.com +3634 + Allied Riser Communications Inc. + Scott Matthews + smatthews&arcmail.com +3635 + Wavesat Telecom, Inc. + Yvon Belec + ybelec&videotron.ca +3636 + dpa Deutsche Presse-Agentur GmbH + Marco Ladermann + ladermann&dpa.de +3637 + Power & Data Technology, Inc. + Michael Williams + mwilliams&powerdatatech.com +3638 + IntelliReach Corporation + Greg + greg&intellireach.com +3639 + WM-data + Per Hagero + pehae&wmdata.com +3640 + DataPath, Inc. + Adam Kirkley + adam.kirkley&datapath.com +3641 + Netaphor Software, Inc. + Rakesh Mahajan + rmahajan&netaphor.com +3642 + CryptoConsult + Ulrich Latzenhofer + latz&crypto.at +3643 + DIRTSA + Omar + jacy&tab1.telmex.net.mx +3644 + Carden Enterprise Ltd + Anthony Carden + acarden&voicenet.com +3645 + SONZ Ltd + Andrew Jordon Prast + andrew&prast.net +3646 + ASKEY Computer Corp. + Jeff Kao + jkao&askey.com +3647 + RaidTec, Inc. + Douglas Hart + douglas&gw.r16a.com +3648 + Harcourt Brace & Company + Jay Goldbach + sysadm&hbicg.com +3649 + Rollins Technology Inc. + Matt Rollins + matt&networkcomputer.net +3650 + NetOps Corp + John Deuel + kink&netops.com +3651 + Know IT AB + Claes Berg + Claes.Berg&knowit.se +3652 + Pan Dacom Direkt GmbH (formerly 'Pan Dacom Networking AG') + Michael Lindner + lindner&pandacomdirekt.de +3653 + Cirque Networks, Inc. + Kevin Hoff + kevin.hoff&cirque-networks.com +3654 + NaviNet + Jeffrey Johnson + itcambridge&navinet.net +3655 + Germanischer Lloyd AG + Stefan Christiansen + smc&germanlloyd.org +3656 + FORCE Computers GmbH + Jens Wiegand + jewi&Force.DE +3657 + Ericsson Wireless LAN Systems + Kjell Jansson + kjell.jansson&era.ericsson.se +3658 + Dalian F.T.Z. TianYang Int'l Trade Co., Ltd. + Raymond Wang + sinotyw&pub.dl.lnpta.net.cn +3659 + Ethercity Designs and Hosting Solutions + Joey Barrett + grinan&prodigy.net +3660 + G3M Corporation + Greg Campbell + Greg&G3M.com +3661 + Secure Data Access Inc. + Jose Perea + RACCOM&AOL.COM +3662 + QWES.com Incorporated + Robert MacDonald + bmacdonald&qwes.com +3663 + Megaxess + Joonbum Byun + jbyun&atanetwork.net +3664 + Polygon + Buck Caldwell + buck_c&polygon.com +3665 + Netoids Inc. + Sridhar Ramachandran + sridhar&netoids.com +3666 + Acriter Software B.V. + Cees de Groot + cg&acriter.com +3667 + InteleNet Communications + Mark Nagel + nagel&intelenet.net +3668 + Control Module Inc.(CMI) + David Horan + DHoran&ControlMod.com +3669 + Aveo Inc. + Diego Cordovez + dcordovez&aveo.com +3670 + MD PREI + Igor Ovcharenko + igori&mo.msk.ru +3671 + Picazo Communication Inc. + James Davidson + james&picazo.com +3672 + Scottsdale Securities, Inc. + Mike Tully + mtully&alpha.scottsave.com +3673 + WebManage Technologies, Inc. + Krishna Mangipudi + krishna&webmanage.com +3674 + Infoclubindia + B.P.Mukherji + bpm&infoclubindia.com +3675 + Connor Plumbing & Heating + Michael Connor + mjconnor&eznet.net +3676 + Sentryl Software, Inc. + Eric Green + egreen&sentryl.com +3677 + Engetron - Engenharia Eletronica Ind. e Com. Ltda. + Wilton Padrao + wpadrao&engetron.com.br +3678 + Icaro + Cyd Delgado + cyd&nutecnet.com.br +3679 + Unity Health + Terry Penn + pennts&stlo.smhs.com +3680 + Parity Software Dev. Corp. + Bob Edgar + BobE&ParitySoftware.com +3681 + David D. Hartman, CPA + David Hartman + hartman&eramp.net +3682 + ComGates Communications Ltd. + Danny Bukshpan + dbukshpan&comgates.com +3683 + Honeywell Oy, Varkaus + Pekka Salpakari + Pekka.Salpakari&honeywell.fi +3684 + InterWorld Corp. + Arthur Yeo + ArthurY&InterWorld.com +3685 + Sento Pty Ltd + Richard Volzke + richardv&sento.com.au +3686 + Wicks By Julie + Julie Richardson + Duo&Defnet.com +3687 + HSD - Hardware Software Development GmbH + Ing. Markus Huemer + markus.huemer&hsd.at +3688 + Morpho e-documents (formerly 'Sagem Orga GmbH') + Hanno Dietrich + med.oid&morpho.com +3689 + New Technology Development, Inc. + Rich Lyman + rich&gordian.com +3690 + TIAA-CREF + Michael Smith + ms&gf.org +3691 + Team2it-CopyLeft S.r.l. + Enrico Gardenghi + garden&team2it.com +3692 + Intuit + Doug Small + Doug_Small&intuit.com +3693 + Hakusan Corporation + Yuka Hirata + hirata&datamark.co.jp +3694 + Thyssen Informatik GmbH + Hannes Loehr + loehr&tic.thyssen.com +3695 + Chromatis Networks Inc. + Roni Rosen + roni&chromatis.com +3696 + MicroProdigy + Earl Tyler + etyler&netscape.net +3697 + Quantum Corporation + Carsten H Prigge + carsten.prigge&quantum.com +3698 + Saraide + Joe Ireland + joe.ireland&saraide.com +3699 + Network Technologies Inc + Carl Jagatich + intermux.lara&ntigo.com +3700 + Stellar One Corporation + Jack Cook + jackc&stellar.com +3701 + TurboNet Communications + Bob Himlin + rhimlin&turbonet-comm.com +3702 + Printrak International Inc. + Tom Gruschus + gruschus&printrak.com +3703 + CyberFax Inc. + Claire Genin + cgenin&hotmail.com +3704 + Advanced Micro Devices, Inc + James Last + james.last&amd.com +3705 + ICET SpA + Gerardo Tomasi + g.tomasi&ieee.org +3706 + ADiTel Telekommunikation Network GmbH + Peter Adler + pa&cybertron.at +3707 + ADiT Holding GmbH + Peter Adler + pa&cybertron.at +3708 + ADLER DATA Software GmbH + Peter Adler + pa&cybertron.at +3709 + Teracom Telematica Ltda. + Tassilo Luiz Kalberer Pires + tassilo&datacom-tel.com +3710 + LANmetrix Pty Ltd + Brett Airey + brett&lanmetrix.co.za +3711 + SINETICA + David Hill + dkhill&dial.pipex.com +3712 + GigaNet Incorporated + Peter Desnoyers + pjd&giganet.com +3713 + Voxent Systems Ltd + David Fullbrook + david.fullbrook&voxent.com +3714 + BellSouth Wireless Data, L.P. + Chris Wiesner + cwiesner&bellsouthwd.com +3715 + Teleste Corporation + Matti Susi + matti.susi&teleste.com +3716 + Brand Communications Limited + Peter Vince + peterv&brandcomms.com +3717 + GeNUA mbH + Konstantin Agouros + Konstantin.Agouros&GeNUA.DE +3718 + Philips Broadband Networks + Jim Reynolds + jreynolds&iname.com +3719 + Exmicro + Ted Baginski + ted_baginski&hotmail.com +3720 + Visiqn + Rich Lyman + rich&gordian.com +3721 + OTONET s.a.r.l. + Elisabeth Cochard + e.cochard&otonet-lab.com +3722 + Vulkan-Com Ltd. + Leonid Goldblat + han&mmtel.msk.su +3723 + Ankey + Victor Makarov + victor.makarov&ankey.ru +3724 + Interactive Communications Systems + Owen Walcher + owalcher&icstelephony.com +3725 + AC&E Ltd + Tyre Nelson + tnelson&aceltd.com +3726 + enCommerce, Incorporated + James Harwood + james&encommerce.com +3727 + Western Multiplex + Herman Lee + hlee&mux.glenayre.com +3728 + REALM Information Technoligies, Inc. + Scott Smyth + ssmyth&realminfo.com +3729 + Nokia (formerly 'Alcatel-Lucent') + Michael Anthony + michael.anthony&nokia.com +3730 + Westport Technologies + Neil Lefebvre + nlefebvre&westporttech.com +3731 + The SABRE Group + Anthony J. Sealy + anthony_sealy&sabre.com +3732 + Calamp Wireless Networks Inc (formerly 'Dataradio Inc.') + Pierre Olivier + polivier&calamp.com +3733 + Datakom Austria DI Erich + Rescheneder + erich.rescheneder&datakom.at +3734 + Security-7 Ltd. + Tal Rosen + tal&security7.com +3735 + Telesafe AS + Petter J. Brudal + petter.brudal&telesafe.no +3736 + Goodfield Corp. + Alexander Webb + ceo&unitedstates.com +3737 + Pleiades Communications, Inc. + Susan L. Pirzchalski + slp&pleiadescom.com +3738 + StreamSoft, Inc. + Sanjay Lokare + slokare&streamsoft.com +3739 + The Eighteenth Software Co.,Ltd. + Michihide Hotta + sim&remus.dti.ne.jp +3740 + Aquila Technologies Group, Inc + Michel J Hoy + mhoy&aquilagroup.com +3741 + Foliage Software Systems + Steven Morlock + smorloc&foliage.com +3742 + VIATechnologies,Inc + Saten Shih + saten&via.com.tw +3743 + P.D. Systems International Ltd + Ian Stuchbury + ian&pdsi.demon.co.uk +3744 + DATEV eG + Dietmar Sengenleitner + Dietmar.Sengenleitner&datev.de +3745 + ClustRa AS + Oystein Grovlen + oystein.grovlen&clustra.com +3746 + Swisscom AG + Nico Wieland + nico.wieland&swisscom.com +3747 + AS Yegen + Aleksei Sujurov + alex&anet.ee +3748 + Bank of America + James W. Burton + james.w.burton&bankamerica.com +3749 + TeleHub Communication Corp + Thevi Sundaralingam + tsundaralingam&telehub.com +3750 + Iscape Software + Jukka Vaisanen + jukka.vaisanen&iscape.fi +3751 + Dragon Industries + Michael Storroesten + ms&dragon.no +3752 + Thales Norway AS + Erna Margrete Korslien + erna.korslien&no.thalesgroup.com +3753 + Aitek S.r.L. + Ernesto Troiano + et&aitek.it +3754 + Crag Technologies + Dave Holzer + dholzer&cragtech.com +3755 + ATOP Technologies, Inc. + David Huang + david&atop.com.tw +3756 + Julien Daniel + Julien Daniel + tazdevil&total.net +3757 + PT. Usaha Mediantara Intranet + Harry + harry&spot.net.id +3758 + Core Networks, Inc + Chris Thornhill + enterprise_contact&cjt.ca +3759 + OMEGA Micro Systems + William O'Neill + omegamic&vianet.on.ca +3760 + Content Technologies Ltd + Andy Harris + andy.harris&mimesweeper.com +3761 + HAGER-ELECTRONICS GmbH + Peter-Michael Hager + Hager&Dortmund.net +3762 + Kwangwoon University + Kuk-Hyun Cho + khcho&infotel.kwangwoon.ac.kr +3763 + Veramark + Jim Gulley + jgulley&veramark.com +3764 + Quantum Corporation (formerly 'Advanced Digital Information Corporation') + Carsten Prigge + carsten.prigge&quantum.com +3765 + StrategicLink Consulting + Tim Cahill + cahillt&strategiclink.com +3766 + Hannibal Teknologies + Richard White II + richard&hannibaltek.com +3767 + Pan-International Industrial Corp. + George Huang + georgeh&mail.panpi.com.tw +3768 + Department of Veterans Affairs + Jason Miller + vaitengineeringcisidm&va.gov +3769 + Banyan Networks Pvt. Ltd. + L.N. Rajaram + raja&banyan.tenet.res.in +3770 + MCK Communications + Richard Ozer + oz&mck.com +3771 + ko6yd + Dane Westvik + ko6yd&jps.net +3772 + POS Resources Inc. + Dane Westvik + dwestvik&posr.com +3773 + Siara Systems + Jianxin (Joe) Li + joe&siara.com +3774 + Wavelink + Roy Morris + rmorris&pin-corp.com +3775 + AGFA Corporation + John Saba + john.saba.b&us.agfa.com +3776 + Millenium Solutions + Jeremy Adorna + jadorna&csom.umn.edu +3777 + HydraWEB Technologies + Seth Robertson + seth&hydraweb.com +3778 + CP Eletronica Industrial S/A + Mario Magalhaes + leboute&pro.via-rs.com.br +3779 + Kingmax Technology Inc. + Paul Lee + Rd&kingmax.com.tw +3780 + Level 3 Communications, Inc. + Teri Blackstock + Teri.Blackstock&Level3.com +3781 + WXN, Inc. + Matt Wixson + mwixson&wxn.com +3782 + University of North Texas + Philip Baczewski + baczewski&UNT.EDU +3783 + EMR Corporation + Slade Grove + slade&emr.net +3784 + Speakerbus Ltd. + Brian Philpotts + brian.philpotts&speakerbus.co.uk +3785 + Cirrus Logic + Mike Press + mpress&crystal.cirrus.com +3786 + Highland Technology Group, Inc. + Adam Mitchell + adamm&mindspring.com +3787 + Russel Lane & Associates, Inc. + Russel Lane + russel&rlane.com +3788 + Talktyme Technologies Inc + Elzbieta Klimczyk + talktyme&talktyme.com +3789 + Wire Terminator (WT) + Yoram Har-Lev + harlev&walla.co.il +3790 + Hamamatsu Photonics K.K. Kazuhiko + Wakamori + wakamori&crl.hpk.co.jp +3791 + TeleComp, Inc. + Angel Gomez + angel&trdcusa.com +3792 + LOGEC Systems + Neil McKinnon + ntm&tusc.com.au +3793 + Lanier Worldwide, Inc. + Antonio del Valle + adelvall&lanier.com +3794 + Midas Communication Technologies Private Limited + R. Balajee + rbala&midas.tenet.res.in +3795 + Enact Inc. + Dan Dzina Jr. + ddzina&enactinc.com +3796 + imt Information Management Technology AG + Thomas Gusset + thomas.gusset&imt.ch +3797 + BENQ Corporation + Andy Huang + andythuang&benq.com +3798 + Jinny Paging + Georges Yazbek + gyazbek&jinny.com.lb +3799 + Live Networking Inc. + Russ Carleton + roccor&livenetworking.com +3800 + Unisource Italia S.p.A. + Davide Moroni + noc&unisource.it +3801 + Agranat Systems, Inc. + Kenneth Giusti + giusti&agranat.com +3802 + Softamed + Bernard Schaballie + bernard.schaballie&softamed.com +3803 + Praxon + Chris Aiuto + chris&praxon.com +3804 + Standard Chartered Bank (Treasury) + Mark Pearson + mark_pearson&stanchart.com +3805 + Longhai Yongchuan Foods Co., Ltd. + Shuying Su + lhycspgs&public.zzptt.fj.cn +3806 + Shiron Satellite Communications(1996) Ltd. + Andrey Shkinev + andreys&shiron.com +3807 + Wuhan Research Institute of Posts and Telecommunications + Chen Bing + chenbmail&163.net +3808 + Cyber Power System Inc. + Barts Lin + global.service&cyberpower.com +3809 + Cyras Systems Inc + Shirish Sandesara + ssandesara&cyras.com +3810 + NetLine + Jean-Marc Odinot + Jean-Marc.Odinot&NetLine.fr +3811 + SpectraSoft Inc. + Yirong Li + yirong.li&spectrasoft.com +3812 + Anda Networks, Inc. + Ray Jamp + rjamp&andanets.com +3813 + Ellacoya Networks, Inc. + Kurt Dobbins + kurtdobbins&ellacoya.com +3814 + CallNet Communications, Inc. + Mukesh Sundaram + mukesh&callnetcomm.com +3815 + Control Solutions, Inc. + Jim Hogenson + jimhogenson&csimn.com +3816 + Nominet UK + Geoffrey Sisson + geoff&nominet.org.uk +3817 + Monfox, Inc. + Stefan King + sking&monfox.com +3818 + MetraTech Corp. + Kevin Fitzgerald + kevin&metratech.com +3819 + OptiSystems Solutions Ltd. + Boris Goldberg + bgoldberg&optisystems.com +3820 + Ziga Corporation + Steven Knight + knight&ziga.com +3821 + Indian Valley Enterpriseses Inc. + Thomas Roberts + roberts&andassoc.com +3822 + Edimax Technology Co., Ltd. + Peter Pan + peter&edimax.com.tw +3823 + Touchbase Communications + Richard Hall + teedoff98&aol.com +3824 + Attune Networks + Lior Horn + lior_horn&attune-networks.com +3825 + Advanced Network & Services, Inc. + Bill Cerveny + cerveny&advanced.org +3826 + Nextpoint Networks, Inc. + Marat Vaysman + vaysman&nextpoint.com +3827 + Moscow Central Depository + Andrew Vostropiatov + andrv&mcd.ru +3828 + STG Inc. + Tom Gueth + TGueth&compuserve.com +3829 + Imaging Technologies Corporation + Jeff Johnson + jjohnson&imagetechcorp.com +3830 + Acision + Gertjan van Wingerde + gertjan.van.wingerde&acision.com +3831 + Oblix Inc. + Prakash Ramamurthy + prakash&oblix.com +3832 + Taylored Solutions + Kent Taylor + Kent&TayloredSolutions.com +3833 + Schneider Electric + Dennis Dube + dennis.dube&us.schneider-electric.com +3834 + Novartis Pharma AG + Fabrice Musy + Fabrice.musy&novartis.com +3835 + ALPS Electric + Yuichiro Sawame + sawame&alps.co.jp +3836 + Terese Brown Real Estate + Terese Brown + trebrown&capecod.net +3837 + HBOC Imaging Solutions Group + Don Ruby + druby&imnet.com +3838 + Gasper Corporation + Mark Marratta + mmarratta&gasper-corp.com +3839 + NeoWave Inc. + DuckMoon Kang + dmkang&NeoWave.co.kr +3840 + Globe Institute of Technology + Ali Daneshmand + adaneshmand&hotmail.com +3841 + Flycast Communications Corp. + Steve Heyman + sheyman&flycast.com +3842 + lkis + Areifudin + lkis&indosat.net.id +3843 + Pyderion Contact Technologies Inc. + Ron Stone + rstone&ottawa.com +3844 + Graham Technology plc + Alexander Hoogerhuis + alexh>net.com +3845 + Citrix Systems Inc. + Bill Powell + snmp&citrix.com +3846 + QMaster Software Solutions, Inc. + Grant Gilmour + grant&qmaster.com +3847 + Ensemble Communications Incorporated + Jason William Hershberger + jason&ensemblecom.com +3848 + Northchurch Communications, Inc. + Matt Guertin + matt&northc.com +3849 + Object Integration, Inc. + Brad Klein + bradK&obji.com +3850 + Xnet Communications GmbH + Christian Mock + chrimo&xdsnet.de +3851 + Optika Inc. + Doug Telford + DTelford&optika.com +3852 + Soft-Inter Technologies + Stephan Malric + malric&softinter.com +3853 + ViaGate Technologies + Allan Lawrence + lawrenca&viagate.com +3854 + KCP, Inc. + Brad Klein + brad&kcpinc.com +3855 + Elastic Networks + Glenn Trimble + gtrimble&elastic.com +3856 + Siebel Systems + Daniel Sternbergh + dsternbergh&siebel.com +3857 + Sage Research Facility + Charles Thurber + Charles&Thurber.org +3858 + Capricon Engineers + Amit Kapoor + amitk&tande.com +3859 + VXL Instruments Ltd + Shelly Varghese + shellyv&vxl.co.in +3860 + First International Computer, Inc. + C.-H. Kevin Liu + kevinliu&rd.fic.com.tw +3861 + Fujitsu Network Communications, Inc. + Corey Sharman + corey.sharman&fnc.fujitsu.com +3862 + Royal Bank of Scotland + Gwyllym Jones + jonesgt&rbos.co.uk +3863 + Canadian Marconi Company + Luc Germain + lgermain&mtl.marconi.ca +3864 + InTalk, Inc. + Simon Black + simonblack&intalk.com +3865 + Thorne, West + Dan Wasserman + danogma&aol.com +3866 + Global Net Center + Carl Suarez + csuarez&initiative-one.net +3867 + Presence Technology GmbH+Co.KGMichael + Staubermann + admin&pt-online.de +3868 + Concentrix + Karsten Huele + karsten.huele&concentrix.com +3869 + IntelliLogic Networks, Inc. + Hilton Keats + hkeats&intellilogic.com +3870 + Internet Business Emporium + Dorrien Le Roux + ibe&thesouth.co.za +3871 + Ditech Corporation + Alex Kurzhanskiy + AKurzhanskiy&DitechCorp.com +3872 + Miranda Technologies Inc. + Tom Montgomery + tmontgom&miranda.com +3873 + QLogic + Chuck Micalizzi + c_micalizzi&qlc.com +3874 + InfoValue Computing, Inc. + Philip Hwang + phwang&infovalue.com +3875 + Metro Computing Consultants, Inc. + Michael Cash + info&metrocomputing.com +3876 + ARINC Incorporated + Jim Bradbury + jbrad&arinc.com +3877 + First American National Bank + Mark Neill + Mark.Neill&fanb.com +3878 + Real Software Company + Steve Coles + aascolsa&rdg.ac.uk +3879 + Taiwan Telecommunication Industry Co., Ltd. + Michael C.C. Liou + daml&ttic01.tatung.com.tw +3880 + Wireless Information Transfer Systems + Eric Christensen + Eric_Christensen-P27660&email.mot.com +3881 + Telefonaktiebolaget LM Ericsson + Tomas Rahkonen + tomas.rahkonen&lme.ericsson.se +3882 + Pacom Systems Pty Ltd + Steve Barton + steveb&pacomsystems.com +3883 + Next plc + Mike Rankin + mrankin&next.co.uk +3884 + Phobos Corporation + Rory Cejka + rcejka&phobos.com +3885 + Lifeline Systems Inc. + Rick Wasserboehr + rwasserboehr&lifelinesys.com +3886 + MiMax Information + Liu Yongxiang + liuyx&comp.nus.edu.sg +3887 + Elder Enterprises + Alex + RElder1&aol.com +3888 + Iapetus Software + Michael Nelson + mikenel&iapetus.com +3889 + CE Infosys GmbH + Stefan Ritter + sales&ce-infosys.com +3890 + Across Wireless AB + Lars Johansson + lars.johansson&acrosswireless.com +3891 + Chicago Board of Trade + Albert Anders + aand44&info.cbot.com +3892 + ATEB + Phil Vice + pvice&syngate.ateb.com +3893 + Parks Comunicacoes Digitais + Giovani Nardi + gnardi&parks.com.br +3894 + Pitney Bowes + Kevin Bodie + bodieke&pb.com +3895 + Advent Communications Ltd + Robert Davies + robert.davies&advent-comm.co.uk +3896 + Automated Integrated Solutions, Inc. + Roger Gaulin + rgaulin&aissoftware.com +3897 + Edison Technology Solutions + Joseph Pumilio + jpumilio&edisontec.com +3898 + Mitsubishi Telecommunications Network Division + Aung Htay + ahtay&mtnd.com +3899 + South China Morning Post Publishers Ltd + Ivan Wang + ivanwang&scmp.com +3900 + Raster Solutions Pty. Ltd. + David Keeffe + avid&raster.onthe.net.au +3901 + Managed Messaging, LLC + Tom Johnson + tj&terramar.net +3902 + Zhongxing Telecom Co.,ltd. (abbr. ZTE) + Zhang Jiaming + zhang.jiaming&mail.zte.com.cn +3903 + Tornado Development, Inc. + Ryan Kim + ryan&tems.com +3904 + Xlink Internet Service GmbH + Heiko Rupp + hwr&xlink.net +3905 + Telenordia Internet + Dennis Wennstrom + dew&algonet.se +3906 + Data Communication Technology Research Institute + Zhang Zhiheng + sjsbmbbb&public3.bta.net.cn +3907 + California Independent System Operator + Steve Dougherty + sdougherty&caiso.com +3908 + GSP + Michael Kartashov + mike&vgts.ru +3909 + Nodes, Inc. + Kevin White + klw&nodes.com +3910 + Railtrack PLC + Andy Nott + mitchdj&globalnet.co.uk +3911 + Glasner Consulting + Luke Glasner + lglasner&student.umass.edu +3912 + GWcom, Inc. + C.W. Chung + cwchung&gwcom.com +3913 + Array Telecom Corp. + Mark Scott + Mark.Scott&arraytel.com +3914 + TCOSoft, Inc. + Steve Ross + info&tcosoft.com +3915 + Teknis Electronics + Stephen Lechowicz + teknis&teknis.net +3916 + Neo-Core, Inc. + Richard Moore + rmoore&neocore.com +3917 + V-Bits, Inc. + Raymond Tam + raymond_tam&v-bits.com +3918 + Watson Wyatt Worldwide + Phil Grigson + phil_grigson&watsonwyatt.com +3919 + Monterey Networks, Inc. + Bhadresh Trivedi + btrivedi&montereynets.com +3920 + CSNet Consulting, Inc. + Chip Sutton + chip&cs-net.com +3921 + Aplion Networks, Inc. + Deepak Ottur + dottur&aplion.com +3922 + WidePoint Cybersecurity Solutions Corporation + Caroline Godfrey + admin&orc.com +3923 + Intrak, Inc. + Frank Fullen + ffullen&intrak.com +3924 + Policy Management Systems Corp. + David Wallace + root&pmsc.com +3925 + Encompass Enterprise Management Consultants + Kevin Austin + kda2&msn.com +3926 + NewSouth Communications Corp. + Tracy Cooper + tcooper&newsouth.com +3927 + WarpSpeed Communications + Jay Riddell + jayr&warpspeed.net +3928 + Sandwich Wireless Communications, Inc. + Anthony Taves + tony&snd.softfarm.com +3929 + NEITH Creative Beauty + Djed Wade + djed&concentric.net +3930 + NTT Electronics Corporation + Masakazu Sato + m-sato&yoko.nel.co.jp +3931 + EasyAccess + David Tung + dtung&netscape.com +3932 + Lara Technology, Inc. + James Washburn + jwashburn&laratech.com +3933 + NEXO + Richard Tai + richardtai&nexo.com.tw +3934 + Net-Wise Communications Ltd + Daniel Fedak + dfedak&net-wise.co.uk +3935 + Centro Cantonale d'Informatica + Lorenza Rusca Picchetti + lorenza.rusca&ti.ch +3936 + SINTECA + Volker Rast + vrast&hotmail.com +3937 + EMS Technologies Canada Ltd. + Luc Pinsonneault + lpinsonn&spar.ca +3938 + Deccan Technologies, Inc.Vijay Burgula/Vik Jang + ices&nyct.net + softsolinc&worldnet.att.net +3939 + Internet Devices, Inc. + Rodney Thayer + rodney&internetdevices.com +3940 + Ninety.De + Christian Kruetzfeldt + ckruetze&foni.net +3941 + Santak Electronics Co. Ltd. + Huang Fei + huangfei&sc.stk.com.cn +3942 + Infinet LLC (formerly 'Aqua Project Group') + Dmitry Okorokov + dmitry&infinetwireless.com +3943 + Deva.net + Albert Hui + avatar&deva.net +3944 + Data Solutions Group + William Theiss + wtheiss&baims.com +3945 + Sylantro Systems + James Logajan + Jim.Logajan&Sylantro.com +3946 + Conklin Corporation + LuJack Ewell + lewell&conklincorp.com +3947 + Inverse Network Technology + Matt Burdick + burdick&inversenet.com +3948 + Sonus Networks, Inc. + Information Security + infosec&sonusnet.com +3949 + Intrinsix Corporation + Mason Gibson + mason&vadm.com +3950 + TierTwo Systems + John Dick + jdick&tier2.com +3951 + Visionik A/S + Peter Holst Andersen + p.h.andersen&visionik.dk +3952 + Caly Corporation + Amir Fuhrmann + amir&calynet.com +3953 + Albert Ackermann GmbH + Co KG + Oliver Lehm + o.lehm&ackermann.com +3954 + Mitra Imaging Inc. + Wallace Gaebel + Wallace&mitra.com +3955 + Linksys + Greg LaPolla + GLapolla&Linksys.com +3956 + Envox + Denis Kotlar + denis.kotlar&envox.com +3957 + Globalarchitect + Kiewwen Pau + kpauarcht&erols.com +3958 + Solution Associates + Jordan Rechis + rechis&ti.com +3959 + InterCall Communications & Consulting + Joe Cho + joecho&intercallco.com +3960 + Government Technology Solutions + Jeff Deitz + jdeitz>sisgsa.com +3961 + Corona Networks + Sam Hancock + sam&coronanetworks.com +3962 + MGL Groupe AUBAY + Joubert Jean-Paul + jpjoubert&mgl-aubay.com +3963 + R.C. Wright & Associates + Paul Webb + paulwebb&rcwa.com.au +3964 + Danet GmbH + Harald Rieder + Harald.Rieder&danet.de +3965 + Spacebridge Networks Corporation + Alain Gauthier + agauthier&spacebridge.com +3966 + Allianz Elementar Versicherungs-Aktiengesellschaften + Roland Mayer + roland.mayer&allianz-elementar.at +3967 + Bosch Sicherheitssysteme Engineering GmbH + Konrad Simon + konrad.simon&de.bosch.com +3968 + Webline Communications Corp. + Jeffrey Anuszczyk + Jeff.Anuszczyk&webline.com +3969 + RealNetworks, Inc. + Craig Robinson + crobinson&real.com +3970 + FragRage Network + Bertil Nilsson + bertil&frage.net +3971 + IMECOM + Farnoux + FarnouxN&BigFoot.com +3972 + Apsion + Pete Oliveira + pete&apsion.com +3973 + Quest Software Inc. + Eugene Pugin + legal&quest.com +3974 + Custom Internetworking Inc. + Jeremy Guthrie + jguthrie&cinet.net +3975 + Boston Communications Group, Inc. + Vivek Marya + Vivek_Marya&boscomm.net +3976 + LinkData Solutions (Pty) Limited + Veit Roth + veit&linkdataSA.com +3977 + Broadband Networks, Inc. + Keith Kendall + kkendall&bnisolutions.com +3978 + Ingenieurbuero fuer Telekommunikations- undSoftware-Systemloesungen + Arno-Can Uestuensoez + acue&i4p.de +3979 + AsGa Microeletronica S.A. + Rege Scarabucci + rege&asga.com.br +3980 + Home Account Network, Inc. + Doug Heathcoat + dheathco&homeaccount.com +3981 + Troika Networks, Inc. + Benjamin Kuo + benk&troikanetworks.com +3982 + FaxNET Corporation + Brian Dowling + bdowling&faxnet.net +3983 + Video Networks, Inc + Steve Normann + snormann&vninet.com +3984 + EBSCO Publishing + David Newman + dnewman&epnet.com +3985 + Avery Computer Systems + Michael Avery + averycs&aol.com +3986 + HighwayMaster + Greg Shetler + gshetle&highwaymaster.com +3987 + Concept Webcd Services Pvt. Ltd. + Jatin Rawal + jesco&bom3.vsnl.net.in +3988 + Finecom Co., Ltd. + Namsuk Baek + nsbaek&fine.finecom.co.kr +3989 + DigiComm Corporation + Paul Grimshaw + paulgq&intap.net +3990 + Innovative Technology Software Systems Ltd + Denham Coates-Evelyn + innoteknol&aol.com +3991 + Reticom + Daniel Crowl + dancrowl&reticom.com +3992 + Ohmega Electronic Products Ltd. + Alan Breeze + alanbreeze.ohmega&onyxnet.co.uk +3993 + Perfect Order + William Hathaway + wdh&poss.com +3994 + Virtual Resources Communications Inc. + Whai Lim + wlim&vrcomm.com +3995 + VASCO Data Security International, Inc. + John Im + jci&vasco.com +3996 + Open Systems AG + Raphael Thoma + iana&open.ch +3997 + ImproWare AG + Patrick Guelat + patrick.guelat&imp.ch +3998 + Cherus + Dmitri Tseitline + tsdima&cherus.msk.ru +3999 + dydx + James Lyell + james&dydx.com +4000 + Hi-net Research Group + Kazushige Obara + obara&geo.bosai.go.jp +4001 + KADAK Products Ltd. + Douglas Seymour + amxsales&kadak.com +4002 + Banco del Buen Ayre + Alejandro H. Gil Sanda + asanda&bba.buenayre.com.ar +4003 + George Mason University + Brian Davidson + bdavids1&gmu.edu +4004 + Aloha Networks Inc. + Tom Houweling + tom&alohanet.com +4005 + Tundo Corporation + Michael Bar-Joseph + michaelb&tundo.com +4006 + Tundo Communication and Telephony Ltd. + Michael Bar-Joseph + michaelb&tundo.com +4007 + Cable & Wireless Communications plc + Brian Norris + brian.norris&cwcom.co.uk +4008 + Ardebil Pty Ltd + Tzu-Pei Chen + tchen&ardebil.com.au +4009 + Telephonics Corporation + Abid Khan + khan&telephonics.com +4010 + Dorsai Technology + Simon Dunford + dorsai&cableinet.co.uk +4011 + CNUT Archetype Ltd + Phil Hayes + phil.hayes&cnut.co.uk +4012 + Selectica, Inc. + Brian Knoth + bknoth&selectica.com +4013 + KPMG LLP ICE Telecom-SVO + Eduardo Navarrete + enavarrete&kpmg.com +4014 + StarBurst Software + Li Chou + li.chou&starburstsoftware.com +4015 + Computer Configurations Holdings + Mike Brien + mikeb&configs.co.za +4016 + Iperia, Incorporated + Eric Martin + emartin&iperia.com +4017 + Logisistem Srl + Paolo Palma + paolo.palma&logisistem.it +4018 + Security First Technologies, Inc. + Garner Andrews + garner.andrews&s1.com +4019 + APIS Software GmbH + Juergen Schwibs + Juergen.Schwibs&apissoft.com +4020 + Coop Genossenschaft Switzerland + UNIX Solutions + unix.solutions&coop.ch +4021 + Ensim Corporation + Shaw Chuang + shaw&ensim.net +4022 + AEC Ltd. + Daniel Cvrcek + dan&aec.cz +4023 + Imran + Imran Anwar + imran&imran.com +4024 + Avantel S.A. + Carlos Robles + crobles&avantel.com.mx +4025 + Lexitech, Inc. + Stephen MarcAurele + stevem&lexitech.com +4026 + Internet Access AG + Marc Liyanage + webmaster2&access.ch +4027 + GTE Laboratories Incorporated + Jonathan Klotzbach + jklotzbach>e.com +4028 + Decision Networks + Ted Rohling + tedr&instructors.net +4029 + NSI Communications Systems Inc. + Daniel Richard + drichard&nsicomm.com +4030 + Mitsubishi Electric Automation, Inc.- UPS division + David Garner + dgarner&sechq.com +4031 + Orillion USA, Inc. + Thang Lu + thanglu&orillion.com +4032 + DataKinetics Ltd + Gareth Kiely + gkiely&datakinetics.co.uk +4033 + Signal Core + Gene Yu + gene&signalcore.com +4034 + IntraNet, Inc. + Jonathan Edwards + edwards&intranet.com +4035 + Ignitus Communications + Mahesh Ganmukhi + maheshg&lucent.com +4036 + Enator Telub AB + Bengt Nilsson + bengt.nilsson&enator.se +4037 + Presbyterian Church (USA) + William Hovingh + whovingh&ctr.pcusa.org +4038 + Softov Advanced Systems Ltd. + Sam Bercovici + sam&softov.co.il +4039 + Mayan Networks Corporation + K. Ramesh Babu + ramesh.babu&mayannetworks.com +4040 + MicroSignals Group + Futaya Yamazaki + yamazaki&ilt.com +4041 + Nomura International Plc + Yogesh Patel / Geoff Alders + hostmaster&nomura.co.uk +4042 + DigiCommerce Ltd. + Raphael Salomon + raphael&digicommerce.com +4043 + QNX Software Systems Ltd. + Dave Brown + dabrown&qnx.com +4044 + Eloquence Limited + Ian Rogers + ian&eloquence.co.uk +4045 + Thomson-CSF Communications + Pierre Petit + Pierre.PETIT&tcc.thomson-csf.com +4046 + SCORT + Jeff Maury + jfmaury&scort.com +4047 + Hunan Computer CO., Ltd. + Jinmei Lv + hcfrd&public.cs.hn.cn +4048 + Kinko's, Inc. + Karl Miller + karl.miller&kinkos.com +4049 + Opus Comunicacao de Dados + Felipe Salgado + felipe&opus.com.br +4050 + Infuntainment Limited + Ashton Pereira + Benhur&bogo.co.uk +4051 + SowsEar Solution Design Group + Larry Bartz + larrybartz&netscape.net +4052 + Vanguard Security Technologies Ltd Raviv Karnieli + raviv&vguard.com + ---none--- +4053 + Switchcore + Stephan Korsback + stephan.korsback&switchcore.com +4054 + Abatis Systems Corporation + Philippe Fajeau + pfajeau&abatissys.com +4055 + Nimbus Software AS + Carstein Seeberg + case&nimsoft.no +4056 + StreamCORE + Francois Tete + francois.tete&stream-core.com +4057 + ControlNet, Inc. + Vivek + vivek&controlnet.co.in +4058 + Generali IT-Solutions GmbH + Klaus Ablinger + klaus.ablinger&generali.at +4059 + Broadlogic + Sridhar Sikha + sridhar.sikha&broadlogic.com +4060 + JFAX.COM + Felipe Hervias + fhervias&jfax.com +4061 + Technology Control Services + Jonathan Silver + jonathan.silver&techcontrol.com +4062 + Astral Point Communications, Inc. + Steven Sherry + ssherry&astralpoint.com +4063 + ASC Technologies AG (formerly 'ASC Telecom AG') + Peter Schmitt + p.schmitt&asc.de +4064 + Elma Oy Electronic Trading + Mikko Ahonen + mikko.ahonen&elma.net +4065 + InfraServ GmbH & Co Gendorf KG + Bernhard Eberhartinger + bernhard.eberhartinger&gendorf.de +4066 + Carioli Consulting Inc. + Maurizio Carioli + carioli&netscape.net +4067 + Fivemere Ltd. + Richard Horne + rhorne&cabletron.com +4068 + Pathway Inc. + Jerry Fragapane + jerry_fragapane&pathway-inc.com +4069 + Ellison Software Consulting, Inc. + Mark Ellison + ellison&ieee.org +4070 + US West Internet Services + Paul Lundgren + paul&uswest.net +4071 + PRAIM S.p.A. + Beppe Platania + beppep&praim.com +4072 + Qeyton Systems AB + Per Svensson + Per.Svensson&qeyton.se +4073 + TeleDiffusion de France + Serge Marcy + serge.marcy&c2r.tdf.fr +4074 + Krutulis Enterprises + Joe Krutulis + joekltap&ibm.net +4075 + Sedona Networks + Susheel Jalali + susheel&sedonanetworks.com +4076 + Novera Software, Inc. + Michael Frey + mfrey&novera.com +4077 + The Limited, Inc. + Herb Berger + hberger&limited.com +4078 + Symon Communications + Raymond Rogers/Keith Roller + SNMP_admin&symon.com +4079 + 24/7 Media, Inc. + Rani Chouha + rani&riddler.com +4080 + Archangels Realty, Inc. + Manny Caballero + manny&myarchangels.com +4081 + Zelea + Michael Allan + mallan&pathcom.com +4082 + Timko Enterprises + Steve Timko + steve&timko.com +4083 + Spark New Zealand (formerly 'Telecom New Zealand') + IAM Team + iamteam&spark.co.nz +4084 + Trilogy Development Group + David Meeker + noc&trilogy.com +4085 + D.I.B. Ges. fuer Standortbetreiberdienste mbH + K. Bothmann + dibbothm&dbmail.debis.de +4086 + Ericsson Research Montreal (LMC) + Stephane Desrochers + lmcstde&lmc.ericsson.se +4087 + USWeb/CKS + David Smith + dsmith&uswebcks.com +4088 + aku awekku & co. + babu + aalang6&tl-79-92.tm.net.my +4089 + SALUtel + Alex Rabey + arabey&erols.com +4090 + Consorte Tele AS + Baard Bugge + baard&consorte.no +4091 + Infitel Italia srl + Angelo Primavera + a.primavera&infitel.it +4092 + Computel Standby BV + Sander Steffann + s.steffann&computel.nl +4093 + Merlot Communications, Inc. + Tom Coryat + tcoryat&merlotcom.com +4094 + Quality Tank & Construction Co. Inc. + Arthur Koch + box1&qualitytank.com +4095 + Axent Technologies + David Heath + dheath&axent.com +4096 + Thales e-Security + Callum Paterson + callum.paterson&thales-esecurity.com +4097 + Elan Text to Speech + Cedric Buzay + cbuzay&elan.fr +4098 + Signaal Communications + O.G. Hooijen + o_hooijen&signaal.nl +4099 + Tristrata Inc. + Aziz Ahmad + aziz&tristrata.com +4100 + Wavetek Wandel Goltermann + Pierre Monfort + pierre.monfort&wago.de +4101 + John Hancock Financial Services + Laura Glowick + lglowick&gateway-1.jhancock.com +4102 + SHYM Technology Inc. + Henry Tumblin + tumblin­m.com +4103 + CNN + David C. Snyder + David.Snyder&turner.com +4104 + Redwood Technology B.V. + Leen Droogendijk + leen&redwood.nl +4105 + Stargus, Inc. + Jason Schnitzer + jason&stargus.com +4106 + Astrophysikalisches Institut Potsdam + Andre Saar + ASaar&aip.de +4107 + Beijing Telecom Administration, China + Bin Zou + zoubin&axp.nmefc.gov.cn +4108 + Serendip + Robert Barrett + bobbarrett&earthlink.net +4109 + Durango Security Group + Kelly Edward Gibbs + e_gibbs&hotmail.com +4110 + Softstart Services Inc. + Thomas Jones-Low + tjoneslo&softstart.com +4111 + Westell (UK) Ltd + Darren Wray + darren.wray&westell.co.uk +4112 + Tunitas Group + Bill Pankey + tunitas&earthlink.net +4113 + Tenor Networks, Inc. + Caralyn Brown + cbrown&tenornetworks.com +4114 + T10.net + Tom Nats + admin&t10.net +4115 + Arris Interactive LLC + Robert Wynn + robert.wynn&arris-i.com +4116 + InfoInterActive Inc. + Trent MacDougall + Trent.MacDougall&InfoInterActive.Com +4117 + Entertainment Systems Technology + James Murray + murray&entsystech.com +4118 + Tesla Liptovsky Hradok a.s. + Ing. Tibor Racko + racko&teslalh.sk +4119 + Remote Management Systems Pty Ltd + Michael Cannard + mcannard&rmsystems.com +4120 + Sonik Technologies Corp. + David Fulthorp + david&sonik.com +4121 + Digital Chicago.net + Jaidev Bhola + jbhola&bcschicago.com +4122 + The University of Texas Health Science Center at Houston + Barry R Ribbeck + support&uth.tmc.edu +4123 + Hitachi America Ltd + Roy Pillay + roy.pillay&hal.hitachi.com +4124 + Unify Consulting Group, Inc. + Derrick Arias + derrick&unifygroup.com +4125 + Siemens AG ICP Kornelius + Nehling + kornelius.nehling&pdb.siemens.de +4126 + LogMatrix Inc (formerly 'Open Service') + Chris Boldiston + techsupport&logmatrix.com +4127 + Mercury Computer Systems, Inc. + Yan Xiao + yxiao&mc.com +4128 + ARM Ltd. + Jon Brawn + jbrawn&arm.com +4129 + Mnaccari@consulting + Michael Naccari + Mnaccari_consulting&worldnet.att.net +4130 + Microwave Data Systems + Robert Broccolo + bbroccolo&mdsroc.com +4131 + Tridium + Bill Smith + bsmith&tridium.com +4132 + Connecticut Hospital Association + Wing-Yan Leung + Leung&chime.org +4133 + philipjeddycpas + Alun Hughson + hughson&bendigo.net.au +4134 + Philip J. Eddy & Partners Pty Ltd + Alun Hughson + hughson&bendigo.net.au +4135 + NiceTec GmbH + Joerg Paulus + tech&nicetec.de +4136 + UQAM | Université du Québec à Montréal + Stéphane Talbot + talbot.stephane&uqam.ca +4137 + GAURI Info-Comm.Inc. + Se Youn Ban + syban&www.gauri.co.kr +4138 + Sychron Ltd + Vasil Vasilev + vasil&sychron.com +4139 + WapIT Ltd. + Mikael Gueck + mikael.gueck&wapit.com +4140 + Computer & Telephony Systems AB + Rickard Ekeroth + rickard.ekeroth&cts.se +4141 + IMR Worldwide Pty Ltd + Matthew Donald + mdonald&imrworldwide.com +4142 + FDS Networks Limited + Ivan Chan + ichan&hk.super.net +4143 + M&I Data Services Alexander + Senkevitch + alex.senkevitch&midata.com +4144 + PricewaterhouseCoopers-FP5 + Dean Scotson + dean.z.scotson&uk.pwcglobal.com +4145 + Cristie Data Products Limited + Arumugam + aru&cristie.com +4146 + GlobalSign NV/SA + GlobalSign Product Management + pm&globalsign.com +4147 + Zuercher Kantonalbank + Roman Bischoff + roman.bischoff&zkb.ch +4148 + DeWitt, Ross &Stevens, S.C. + Suzanne Arbet + sea&dewittross.com +4149 + Accrue Software, Inc. + Bob Page + bob.page&accrue.com +4150 + Northwestern Mutual + Chris Eaton + chriseaton&northwesternmutual.com +4151 + iMPath Networks Inc. + Hussein Said + hsaid&impathnetworks.com +4152 + Two Way TV + Jason Malaure + jmalaure&twowaytv.co.uk +4153 + Simple Networks + Kevin Schmidt + kjs&vagrant.org +4154 + Versa Technology, Inc. + Calvin Hsieh + calvinh&versatek.com +4155 + Cemoc Ltd + Richard Day + richard&cemoc.co.uk +4156 + New Access Communications, Inc. + Kamran Ghane + kghane&new-access.com +4157 + BCE Emergis + Nicolas Viau + nicolas.viau&emergis.com +4158 + Milman Consulting + Harris Milman + harris&singlepointsys.com +4159 + Electronic Theatre Controls, Inc. + John Freeborg + jfreeborg&etcconnect.com +4160 + VIVE Synergies Inc. + Xiangfeng Liu + xfliu&vive.com +4161 + Qtera Corporation + Wei Li + li&qtera.com +4162 + Input Output Inc. + Kim Le + Kim_Le&i-o.com +4163 + Quarry Technologies + Mark Duffy + mduffy&quarrytech.com +4164 + Reserved + RFC-pti-pen-registration-10 + ---none--- +4165 + SE Electronics + Jasper Rijnsburger + Jasper.Rijnsburger&Salland.com +4166 + The Mercy Foundation + Robert Lindsay + webmaster&themercyfoundation.org +4167 + Markus Lauer IT Consulting + Markus Lauer + pen-contact&lauerit.de +4168 + Systems Management Specialists + Todd Jackson + toddj&smsnet.com +4169 + Syzygy Solutions + Jeff Lowcock + Jeff.Lowcock&syzygysolutions.com +4170 + University of Wisconsin-Milwaukee + Jeffrey Olenchek + jeff&uwm.edu +4171 + BOSUNG Data Communication Co. + Jong-Hyuck Sun + sunny34&hotmail.com +4172 + Narae Information & Communication Enterprise + H.S. Hwang + hhs&naraeinfo.com +4173 + VIERLING Communication S.A.S. + Eric Levenez + eric.levenez&vierling-group.com +4174 + Critical Devices, Inc. + Andrew Levi + alevi&aztecsystems.com +4175 + Tessler's Nifty Tools + Gary Tessler + GaryTessler&NiftyTools.com +4176 + Newton Solutions + Duncon Wakler + duncan&Personal2.freeserve.co.uk +4177 + Redding Enterprises + J.D. Redding + reddi&carrollsweb.com +4178 + Aspect Software + Maxim Tseitlin + mtseitlin&AspectSoft.com +4179 + Commercial Technologies Corp + H. Sublett + sublett&ct-corp.com +4180 + Talkstar.Com Inc. + Wendell Brown + comments&talkstar.com +4181 + Generic Telecom Ltd. + Alexandre Dulaunoy + adulau&unix.be.EU.org +4182 + Leningrad Nuclear Power Plant + Vladimir Polechtchouk + pve&laes.sbor.ru +4183 + Hammer Technologies + Albert Seeley + aseeley&hammer.com +4184 + SMS - Shared Medical Systems, Inc. + Mike Flannery + Mike.Flannery&smed.com +4185 + Boostworks + Fabrice Clara + fclara&boostworks.com +4186 + AKA Consulting, Inc. + Rex Powell + aqalliance&worldnet.att.net +4187 + Storage Area Networks Ltd + Nigel Squibb + n.squibb&san.com +4188 + Realize Communications + Michael Ginn + vendor13&realize.com +4189 + EGIS K.K. + Vanessa Cornu + egis&twics.com +4190 + ETRADE Securities + Allen Lung + alung&etrade.com +4191 + SIGOS Systemintegration GmbH + Martin Loehlein + Martin.Loehlein&sigos.de +4192 + Tennessee Valley Authority + Philip Stewart + pstewart0&tva.gov +4193 + VUT BRNO, faculty of EE and CS + Petr Penas + xpenas00&stud.fee.vutbr.cz +4194 + Leonia plc + Tapio Paronen + tapio.paronen&tietoleonia.fi +4195 + TeleSoft Inc. + Vinod Chandran + vinod&indts.com +4196 + Siemens AG Automation & Drives + Harald Thrum + harald.thrum&khe.siemens.de +4197 + Cap Gemini Denmark A/S + Jesper Goertz + jesper.goertz&capgemini.dk +4198 + VPN Consortium + Paul Hoffman + paul.hoffman&vpnc.org +4199 + Cellnet Technology, Inc. + Jim Coburn + jim.coburn&cellnet.com +4200 + Fast Search & Transfer + Oystein Haug Olsen + Oystein.Haug.Olsen&fast.no +4201 + Tonna Electronique + Gilles BOUR + g.bour&tonna.com +4202 + Samwoo Telecommunications Co., Ltd. + Kwang-Sung Kim + raider&neptune.samwoo.co.kr +4203 + The OpenLDAP Foundation + Kurt Zeilenga + Kurt&OpenLDAP.Org +4204 + Adtec Co., Ltd. + Park Kyung Ran + orchid&adtec.co.kr +4205 + Durak Unlimited + Sean Dreilinger + sean&durak.org +4206 + MITA Industrial Co., Ltd. + Tokimune Nagayama + tokky&mita.co.jp +4207 + Daydream Promotions + Robert Gladson + buster&daydreampromotions.com +4208 + Tantivy Communications, Inc. + Andy Stein + astein&tantivy.com +4209 + CDVS Inc. + Bob Woodard + bwoodard&ican.net +4210 + Assumption University + Wanchat Chesdavanijkul + khunwanchat&youvegotmail.net +4211 + Corillian + Jeff Grossman + jeffg&corillian.com +4212 + SPM - Systementwicklung und Projektmanagement GmbH + Piet Quade + office&spm.de +4213 + Infrax Inc. + Benoit Dicaire + Benoit.Dicaire&Infrax.Com +4214 + Sunquest Information Systems, Inc. + Michael Buchanan + Michael.Buchanan&sunquest.com +4215 + SilkRoad, Inc. + Oliver Severin + oliver.severin&silkroadcorp.com +4216 + Triton Network Systems + Jeff Truong + jtruong&triton-network.com +4217 + Opalis + Laurent Domenech + laurent&opalis.com +4218 + Gecko Software Limited + Tony Smith + tony&geckoware.com +4219 + regioconnect GmbH + Michael Rueve + rueve®ioconnect.net +4220 + ARCANVS + Kepa Zubeldia + kepa&arcanvs.com +4221 + Soundscaipe + Chuck Shea + chuckshea&aol.com +4222 + OnDisplay Incorporated + Patrick McMahon + patrick&ondisplay.com +4223 + Milgo Solutions, Inc. + Frank DaCosta + frank_dacosta&milgo.com +4224 + John H. Harland Company + John Payne + jpayne&harland.net +4225 + Peach Networks + Paz Meoded + yossia&magicom.co.il +4226 + Composit Communications + Avi Philosoph + yossia&magicom.co.il +4227 + Sixtra Chile S.A. + Douglas Conley + dconley&sixbell.cl +4228 + DASCOM, Inc. + Michael Powell + powell&dascom.com +4229 + Westfair Foods Ltd. + David Elving + delving&westfair.ca +4230 + Pirouette, Inc. + Julian Kamil + julian&us.net +4231 + OB Telematics + Kwaku Okyere-Boateng + okyere1&mdx.ac.uk +4232 + CTX Opto-Electronics Corp. + Router Hsu + Router&ctxopto.com.tw +4233 + Dalian Daxian Network System Co.ltd + Shalei + dxgh&public.dalian.cngb.com +4234 + Pragma Ltda. + Gregorio Alejandro Patino Zabala + gpatino&pragma.com.co +4235 + CultureShock Multimedia + Quintin Woo + quintin&cshock.com +4236 + Spectel Ltd. + John McBride + jmcbride&spectel.ie +4237 + Busby Software + Bob Busby + rbusby&cis.ksu.edu +4238 + Media Station Inc. + Laurence Kirchmeier + laurie&mediastation.com +4239 + Kommunedata A/S + Jette Dahl Bauer + jdb&kmd.dk +4240 + Vodafone Information Systems GmbH + Carsten Queren + carsten.queren&vodafone.com +4241 + Peakstone Corporation + Derek Palma + dpalma&peakstone.com +4242 + The Clorox Company + Paul Rarey + Paul.Rarey&Clorox.com +4243 + NPO Infoservice + Vladimir Vodolazkiy + vvv&remount.cs.msu.su +4244 + Cinnabar Networks Inc. + Stephen Klump + oidhound&cinnabar.ca +4245 + Posten SDS AS + Bard Prestholt + bard.prestholt&sds.no +4246 + Comverse Network Systems (CNS) + Roni Avni + Roni_Avni&icomverse.com +4247 + The University of Edinburgh + Dave Graham + hostmaster&ed.ac.uk +4248 + Interconexion Electrica S.A. + Carlos Albeto Gomez Pineda + cagomez&redglobal.net +4249 + NATEKS Ltd. + Alex Rousnak + alex&nateks.ru +4250 + H.I.T. Industries Sales Ltd + A. Tawil + ait2334&aol.com +4251 + System Design Repair + Tuoc N. Bui + tuckyb&sdrep.com +4252 + High Speed Access + Jim Pang + JimP&hsacorp.net +4253 + LuxN, Inc. + Mark Cvitkovich + markc&luxn.com +4254 + arvato systems GmbH + Holger Simons + IANA&arvato-systems.de +4255 + Perfecto Technologies + Eytan Segal + eytan.segal&perfectotech.com +4256 + Kipling Information Technology AB + Reine Beck + reine.beck&kipling.se +4257 + Cyberstation, Inc. + Dan Walters + djw&cyberstation.net +4258 + Open4Rent + Dusty Hunt + goodlovn97&aol.com +4259 + FDP + Jack O'Neill + jack&germanium.xtalwind.net +4260 + 3rd Millennium Consulting + Michael Naccari + Millennium_Consulting&worldnet.att.net +4261 + Globol Solutions + Naveed Anwar + NaveedAnwar&consultant.com +4262 + Jean=Claude Metal Craft + Jean E. Powerll Sr. + hoza&bellsouth.net +4263 + Dr. Andreas Muller, Beratung und Entwicklung + Dr. Andreas Muller + afm&othello.ch +4264 + World Ramp, Inc. + Rob McKinney + rob&worldramp.net +4265 + Tachion Network Technologies Inc. + Cheenu Srinivasan + cheenu&tachion.com +4266 + FernUniversitaet Hagen + Carsten Schippang + carsten.schippang&fernuni-hagen.de +4267 + Transcend, Inc. + Craig Watkins + crw+nic&transcend.com +4268 + BancTec Computer and Network Services + Sam James + jamessp&sce.com +4269 + WorldPort Communications, Inc. + Russell Cook + russell.cook&wrdp.com +4270 + EDV-Beratung Blechschmidt + Robert Hoehndorf + robert.hoehndorf&it-systems.de +4271 + Nevex Software Technologies Inc. + Irving Reid + irving&nevex.com +4272 + Exact Solutions, Inc. + Anand Mohanadoss + anand&exact-solutions.com +4273 + LEROY AUTOMATIQUE INDUSTRIELLE + Olivier Barthel + Olivier.Barthel&leroy-autom.com +4274 + Pangolin UK Ltd + Helen Pownall + helen_pownall&pangolin.co.uk +4275 + Duke University + Suzanne P. Maupin + suzanne.maupin&duke.edu +4276 + PCS Innovations Inc. + Robert Leclair + robert.leclair&pcsinnov.com +4277 + Telocity Communications, Inc. + Creighton Chong + cchong&telocity.net +4278 + Yahoo! + Derek Balling + dballing&yahoo-inc.com +4279 + Sirocco Systems + Douglas Uhl + Douglas.Uhl&siroccosystems.com +4280 + ARtem GmbHMichael Marsanu/Catrinel Catrinescu + mma&artem.de + cca&artem.de +4281 + Assumption + Wanchat Chesdavanijkul + khunwanchat&hotmail.com +4282 + KenCast Inc. + Kamen Ralev + kralev&kencast.com +4283 + Dual-Zentrumn GmbH + Axel Simroth + asimroth&dual-zentrum.de +4284 + Norweb Internett Tjenester + Ronny Berntzen + ronny&norweb.no +4285 + Ruslan CommunicationsDmitry Shibayev, Alexandr Gorbachev + dmitry&ruslan-com.ru + alex&ruslan-com.ru +4286 + e-Security, Inc. + Christopher Wilson + chris.wilson&esecurityinc.com +4287 + Philips Consumer Electronics + Wim Pasman + w.pasman&pbc.be.philips.com +4288 + Forge Research Pty Ltd + David Taylor + DavidTaylor&forge.com.au +4289 + VBrick Systems, Inc. + Richard Phillips + shaggy&aya.yale.edu +4290 + Logic Innovations, Inc. + Leon Havin + lhavin&logici.com +4291 + Chordiant Software Inc. + Joe Tumminaro + joe&chordiant.com +4292 + Entera, Inc. + John Bell + johnbell&entera.com +4293 + Bensons + Charles + mailman&sandbach.force9.co.uk +4294 + Salbu (Pty) Ltd + Mark Larsen + mark&larsenhome.com +4295 + Kalki Communication Technologies Pvt Ltd + Prasanth Gopalakrishnan + prasanth&kalkitech.com +4296 + Cenfor S.L. + Francisco Mora + fmoras&interbook.net +4297 + Finisar Corporation + Patrick Wong + pwong&finisar.com +4298 + ETC (Excellence in Technology Consulting) + Raymond Wm. Morgan + ETC1792&aol.com +4299 + Open Solution Providers + Erik Meinders + erik&osp.nl +4300 + Inmon Corp. + Peter Phaal + pp&ricochet.net +4301 + UniServe Consulting Limited + Bernard Ingram + contact&uniserveconsulting.com +4302 + Cybex Computer Products Corporation + Paul Durden + Paul.Durden&cybex.com +4303 + CamART + Aparimana + ap&camart.co.uk +4304 + CSIRO - Div. of Animal Health + Dave Maurer + david.maurer&dah.csiro.au +4305 + University of Maryland + Bruce Crabill + bruce&umd.edu +4306 + JMDEL Systems + Kevin Castner + kcc&jmdel.com +4307 + Office Connect, Inc. + Kevin Castner + kevinc&officeconnect.com +4308 + Consejo Superior de Camaras + Ramiro Munoz Munoz + ramirom&camerdata.es +4309 + IP Technologies + Jeffrey Elkins + jeff&iptec.net +4310 + Unique Computer Services, Inc. + Francis Santitoro + fts&unique-inc.com +4311 + Equinix + Diarmuid Flynn + flynn&equinix.com +4312 + VITA Systems, Inc. + David Wu + davidw&vitasystems.com +4313 + Allayer Technologies + Yongbum Kim + ybkim&allayer.com +4314 + Xilinx, Inc. + John Donovan + john.donovan&xilinx.com +4315 + XACCT Technologies, Ltd. + Yuval Tal + yuvalt&xacct.com +4316 + Brandeis University + Rich Graves + hostmaster&brandeis.edu +4317 + Javelin Technology Corp. + Warren Golla + war&javelintech.com +4318 + Edixia + Francoise Nevoux + f.nevoux&edixia.fr +4319 + Ennovate Networks, Inc + Lazarus Vekiarides + laz&ennovatenetworks.com +4320 + Freshwater Software, Inc. + Pete Welter + pete&freshtech.com +4321 + Riverbed Technologies + David Liu + dliu&riverbedtech.com +4322 + Murata Machinery, Ltd. + Yoshifumi Tanimoto + ytanimoto&muratec.co.jp +4323 + Quantum Bridge + Marat Vaysman + mvaysman&quantumbridge.com +4324 + SAEJIN T&M Co., Ltd. + Seo,Hee-Kyung + icarus&sjtm.co.kr +4325 + Aperto Networks + Welson Lin + wlin&apertonet.com +4326 + Crown International + Bruce Vander Werf + bvanderwerf&crownintl.com +4327 + Trading WorldCom + Seth Longo + longo&trading-world.com +4328 + M.I. Systems, K.K. + Takatoshi Ikeda + ikeda&misystems.co.jp +4329 + Siemens AG + Mrs. Daniela Gajdosik + daniela.gajdosik&siemens.com +4330 + PMC-Sierra Inc. + Paul Chefurka + Paul_Chefurka&pmc-sierra.com +4331 + Aventail Corporation + Mrs. Reshma Jadhav + rjadhav&aventail.com +4332 + Institute of Systems & information Technologies/KYUSHU + Yuji SUGA + suga&k-isit.or.jp +4333 + Insight Technology, Inc. + Masaya Ishikawa + mishikaw&insight-tec.co.jp +4334 + Ampersand Chantilly Communications + Paul Lonsdale + tdocs&idirect.com +4335 + TechWorld Incorporated + Paul Lonsdale + tdocs&idirect.com +4336 + Expertech Pty Ltd + Caevan Sachinwalla + caevan&expertech.com.au +4337 + RadiSys Corp. + Judi Linger + Judi.Linger&RadiSys.com +4338 + Case Corporation + Charles Berry + cberry&casecorp.com +4339 + Rhode Island Economic Development Corporation + Tarek Farid + tfarid&riedc.com +4340 + Bacteriophage Lambda + Vibha Sazawal + vibha&cs.washington.edu +4341 + Spider Internet Services + Rob Ladouceur + rob&tec.puv.fi +4342 + USHealth Real Estate + Gary Jabara + gjabara&ushealth.org +4343 + Boundless Technologies + Mark Dennison + mark.dennison&boundless.com +4344 + Post-Industrial Training Institute + Bill Reed + WDReed&compuserve.com +4345 + Thomas & Betts + Bob Shaeffer + bob_shaeffer&tnb.com +4346 + Phoenix Contact GmbH & Co. + Frank Schewe + fschewe&phoenixcontact.com +4347 + MessageWise Inc. + David Jones + djones&messagewise.com +4348 + Domino Computers Nigeria Ltd + Woye Adeyemi + dominoµ.com.ng +4349 + LXCO Technologies AG + Juergen Frank + jfrank&lxco.com +4350 + Maxpert AG + Wolfgang Erb + Wolfgang.Erb&maxpert.de +4351 + Network Systems Group + Danilin Michael + info&nsg-ru.com +4352 + Urgle + Derek Olsen + dolsen&gstis.net +4353 + Builders Network Ltd + Colin + look&netlet.netkonect.co.uk +4354 + NetDragon Ltd + Robert Waters + icann&rwater.globalnet.co.uk +4355 + RapidStream, Inc. + James Lin + james_lin&rapidstream.com +4356 + inform solutions GmbH + Andre Beckers + andre.beckers&scutum-group.com +4357 + Coteng + Hans Verrijcken + Hans.Verrijcken&Coteng.com +4358 + Ziatech Corporation + Jason Varley + jason_varley&ziatech.com +4359 + TelGen Corporation + James Trithart + trithart&telgen.com +4360 + Tumbleweed Communications + Jesus Ortiz + Jesus.Ortiz&tumbleweed.com +4361 + Amgen, Inc. + Yalda Mirzai + ymirzai&amgen.com +4362 + Nylorac Software, Inc. + Albert Fitzpatrick + ajf&oid.nylorac.com +4363 + University of Bristol Julius + Clayton + Julius.Clayton&bristol.ac.uk +4364 + BV Solutions Group + Ram Rathan/Terry Nance + rathanr&bvsg.com +4365 + Myowngig + F. Taylor + comst&earthlink.net +4366 + Locus Corp. + Jaeyoung Heo + jyheo&locus.co.kr +4367 + Electronic Laboratory Services CC + Tony Kempe + kempet&elab.co.za +4368 + H.A.N.D. GmbH + Klaus Kaltwasser + kaltwasser&hand-gmbh.de +4369 + Brocade Communications Systems, Inc. (formerly 'McDATA,Inc') + Scott Kipp + skipp&brocade.com +4370 + Tokyo DisneySea + Joseph C. Hu + joseph.hu&disney.com +4371 + Digital United Inc. + Ching-Wei Lin + cwlin&mozart.seed.net.tw +4372 + Softlink s.r.o. + Peter Volny + Peter.Volny&softlink.cz +4373 + Rivere Corporation + Herve Rivere + rivere&prodigy.net +4374 + Motive Communications, Inc. + Jerry Frain + jerry&motive.com +4375 + DT Research, Inc + Jason Lin + jason.lin&usa.net +4376 + Nettech Systems, Inc. + Tatiana Landa + tanya&nettechrf.com +4377 + X-Point Communications + David Hoerl + dfh&home.com +4378 + ---none--- + ---none--- + ---none--- +4379 + Alien Internet Services + Simon Butcher + simonb&alien.net.au +4380 + Elipse Software + Alexandre Balestrin Correa + abc&elipse.com.br +4381 + Astracon Inc. + Bryan Benke + bryan.benke&astracon.com +4382 + Aladdin Knowledge Systems Ltd. + Shimon Lichter + shimonl&aks.com +4383 + Glassey.com + Todd Glassey + todd.glassey&Glassey.COM +4384 + Meridianus + Todd Glassey + todd.glassey&meridianus.com +4385 + Stime.org WG + Michael McNeil + Michael.McNeil&STime.ORG +4386 + TDC A/S + Kristen Nielsen + krn&tdc.dk +4387 + Ubique Ltd. + Yaron Yogev + yaron&ubique.com +4388 + Alcatel Altech Telecoms + Tinus Viljoen + tviljoen&alcatel.altech.co.za +4389 + Sys-Dis + Sebastien David + sdavid&sysdis.fr +4390 + Kemper Insurance + Cindy Weng + cweng&kemperinsurance.com +4391 + Texas A&M University + Division of Information Technology + identity&tamu.edu +4392 + Northbrook Services, Inc. + S. Lane Pierce + lpierce&nbservices.com +4393 + Pentacom Ltd. + Eldad Bar-Eli + eldadb&penta-com.com +4394 + SoftFx + Maikel Maasakkers + M.A.M.Maasakkers&stud.tue.nl +4395 + Unified Technologies Sverige HB + Daniel Sorlov + daniel&unified-technologies.com +4396 + The Open Group + Shane McCarron + s.mccarron&opengroup.org +4397 + OPT Technologies Limited + Steve Brandom + steve.brandom&ctgholdings.co.uk +4398 + B&L Associates, Inc. + Thomas Julian + tjulian&bandl.com +4399 + Johnson Controls, Inc. + Clark Bobert + Clark.L.Bobert&jci.com +4400 + Cypress Corporation + Paul Vagnozzi + pvagnozzi&cypressdelivers.com +4401 + MoonVine + Christine Tomlinson + chris&moonvine.org +4402 + NetPredict, Inc + Jonathan Wilcox + Jonathan.Wilcox&netpredict.com +4403 + Visual Brain Ltd S.a.r.l. + George M. Doujaji + vbrain&vbrain.com.lb +4404 + Tekelec + Francois Cetre + francois.cetre&tekelec.com +4405 + Ansid Inc. + Daniel Niederhauser + daniel.niederhauser&ansid.ch +4406 + Toyo Information Systems Co., Ltd. + Yasushi Okamura + yokamura&kingston.tis.co.jp +4407 + Dracom Ltd. + Shen Zhenyu + Dracom&Public1.sta.net.cn +4408 + EDSL + Alex Oberlander + alexo&edsl.com +4409 + Campus Pipeline, Inc. + Jan Nielsen + jnielsen&campuspipeline.com +4410 + Earth Star Group + Gary Ellis + gerardo5&flash.net +4411 + Swinburne.com + Robert Briede + 104619&wilbur.ld.swin.edu.au +4412 + Wrox Press + John Franklin + johnf&wrox.com +4413 + Broadcom Limited + Broadcom Assigned Numbers and Naming Authority + mibs.pdl&broadcom.com +4414 + Scandinavian Softline Technology Oy + Kari Kailamaki + kari.kailamaki&softline.fi +4415 + Florida Department of Law Enforcement + James L. Geuin + jimgeuin&fdle.state.fl.us +4416 + Starfire Experts Ltd + Michael Bennett + mjb&world.std.com +4417 + Alidian Networks, Inc. + Derek Pitcher + dpitcher&terabitnetworks.com +4418 + MegaSys Computer Technologies + Doug Woronuk + doug.woronuk&megasys.com +4419 + Sony Online Entertainment + Mark Kortekaas + mis&station.sony.com +4420 + Westica Limited + Eugene Crozier + eugenec&westica.co.uk +4421 + Santera Systems Inc. + Cathy Fulton + cathy.fulton&santera.com +4422 + GTE I.T. + Kevin Mathis + kevin.mathis&telops.gte.com +4423 + Garnet Systems Co., Ltd. + Dong Hee Lee + leedong&garnets.com +4424 + Rapid Logic + Kedron Wolcott + kedron&rapidlogic.com +4425 + Meta Gymnastics, Inc. + De Kang Deng + TomDeng&metagym.com +4426 + Fujitsu Australia Software Technology Pty Ltd + Robert Dowe + bob&fast.fujitsu.com.au +4427 + Pironet Intranet AG + Robert Stupp + rstupp&piro.net +4428 + Supercomputing Systems AG + Martin Frey + frey&scs.ch +4429 + MegaChips Corporation + Shigenori Motooka + motooka&megachips.co.jp +4430 + Silicon Automation Systems (India) Ltd + Santosh Xavier + santosh&sasi.com +4431 + Netia + Eric Cocquerez + e.cocquerez&netia.fr +4432 + Apani Networks + Neal Taylor + ianareg&apani.com +4433 + Strategic Financial Planning + Sam DeLuca + sam_deluca&hotmail.com +4434 + Bluestone Software Inc. + Susan Lindeboom + susan&bluestone.com +4435 + Suedtiroler Informatik AG + Klaus Vonmetz + sysadmin&provinz.bz.it +4436 + Mission Critical + Pierre De Boeck + pde&miscrit.be +4437 + Canadian Imperial Bank of Commerce + Jack Dickie + Jack.Dickie&CIBC.com +4438 + Göteborg Energi AB + Peter Karlsson + peter.karlsson&goteborgenergi.se +4439 + EnZane Enterprise + Terry Doherty + terren&desupernet.net +4440 + Purdue University + Rob Stanfield + iamo&purdue.edu +4441 + GE Capital Fleet Services + Thomas Cooper + thomas.cooper&fleet.gecapital.com +4442 + KARA + Wendy + wendy&aol.com +4443 + Ned Boddie & Assoc. + Ned Boddie + nb&myhq.org +4444 + SAINCO + Javier Amores + fjag&sainco.abengoa.com +4445 + INTER s.a.r.l. + Tufic Najjar + tufic&inter.net.lb +4446 + Prairie Development, Inc. + Jeffrey Muller + jeffm&prairiedev.com +4447 + Rochester Institute of Technology + Michael Young + Michael.Young&rit.edu +4448 + E-Lock Technologies, Inc. + Ray Langford + ray&elock.com +4449 + SSH Communications Security, Inc. + Rodney Thayer + rodney&ipsec.com +4450 + iC-Consult + Roland Fichtl + Fichtl&ic-consult.de +4451 + MORION + Nikolai Korelin + support&pi.ccl.ru +4452 + Telenor 4tel + Jan Ivar Nymo + jan-ivar.nymo&telenor.com +4453 + Infonet Services Corp. + Clark Rudder + Clark_Rudder&infonet.com +4454 + Gottfried Web and Computer Consulting + Hal Gottfried + hal&gottfried.com +4455 + I-Bus Corporation + Frank MacLachian + frankm&ibus.com +4456 + AWI (formerly 'Qualimetrics') + Neal Dillman + ndillman&allweatherinc.com +4457 + O ROCK Outerwear + B. Maddigan + bmaddigan&yahoo.com +4458 + Radwin Ltd. + Shumel Vagner + shmuel_v&rad.co.il +4459 + Industree B.V. + Jan Vet + Jan.Vet&industree.nl +4460 + FirstWorld Communications + Dean Franklin + dean.franklin&firstworld.com +4461 + OpenNetwork Technologies + Randy Sturgill + rsturgill&pobox.com +4462 + SVM Microwaves, s.r.o. + Jiri Smitka + smitka&icom.cz +4463 + TaoNet + Maccucari Carlo + c.mammucari&taonet.it +4464 + MPB Communications Inc. + Support + support&mpbc.ca +4465 + ViewCast.com + Kevin Conley + KevinC&dfw.viewcast.com +4466 + Harmonic Video Network (formerly 'Tadiran Scopus') + Merav Ben-Elia + merav.ben-elia&harmonicinc.com +4467 + FibroLan + Israel Stein + yossia&magicom.co.il +4468 + Telkoor-QPS + Beny Steinfeld + yossia&magicom.co.il +4469 + Diversinet Corp. + Stephen Klump + oidauth&dvnet.com +4470 + TeleDream Inc. + B.C. Kim + bckim&teledream.co.kr +4471 + Network Security Wizards + Ron Gula + rgula&securitywizards.com +4472 + MONTAGE IT Services Inc. + Peter Lui-Hing + peter.lui-hing&montage.ca +4473 + Opto 22 + Kevin Kuhns + kkuhns&opto22.com +4474 + PaxComm + Kim Hwa Joon + joon21&paxcomm.com +4475 + Rainbow Software Solutions, Inc. + Arlen Hall + arlen&rainbowsoftware.com +4476 + Lightrealm + Erik Anderson + eanderson&lightrealm.com +4477 + Infocom Systems Services + Rajesh Nandan + rajesh&infocomsystems.com +4478 + Alacritech + Richard Blackborow + richard&alacritech.com +4479 + SpectraPoint Wireless LLC + Markus Weber + mweber&BoschTelecomInc.com +4480 + FastForward Networks, Inc. + Bill DeStein + bill&ffnet.com +4481 + CIA Europe + Jacques Pernet + jacques.pernet&skynet.be +4482 + RWE AG + Mr. Dietrich + timo.dietrich&RWE.DE +4483 + IBI Co., Ltd. + Lee Pan-Jung + ibi3&ibi.net +4484 + Pacific Softworks, Inc. + Leonard Gomelsky + leonard&pacificsw.com +4485 + Dataport Communications + Paul Ramos + paul&applehill.net +4486 + Verio Web Hosting + Jennifer Johnson + jenny&iserver.com +4487 + Johnson & Johnson NCS + Bob Rudis + brudis&ncsus.jnj.com +4488 + MediaHouse Software Inc + Peter Cooper + pcooper&mediahouse.com +4489 + Sierra Networks, Inc. + Zeta Division + Lisa Moyer lisam&zeta-sni.com +4490 + POLYGON Consultants in Informatics Ltd. + Zoltan Kolb + kolb&polygon.hu +4491 + Cable Television Laboratories, Inc. + Jean-Francois Mule + jf.mule&cablelabs.com +4492 + SolutionSoft Systems, Inc. + Eric Bruno + ebruno&solution-soft.com +4493 + UniRel Sistemi srl Mauro + Fantechi + mauro.fantechi&unirelsistemi.it +4494 + Novartis AG + Eric Luijer + eric.luijer&novartis.com +4495 + Taima Corp. + Owen Peterson + opeterso&taima.net +4496 + Siemens Canada Ltd. + Roland Quandieu + roland.quandieu&innovation.siemens.ca +4497 + Avail Networks, Inc. + Don Zick + dzick&nei.com +4498 + NetQoS, Inc. + Cathy Fulton + fulton&netqos.com +4499 + Safefunds.com + Jere Horwitz + jvh&jvhinc.com +4500 + Jordan Tech + Rashid Ahmed + RASHID&JORDAN.COM.CO +4501 + EforNet Corporation + David Zucker + diz&earthlink.net +4502 + playbeing.org + Bert Driehuis + driehuis&playbeing.org +4503 + Corporate Information Technologies + Lawrence Cruciana + lawrence&corp-infotech.com +4504 + Seamless Kludge Internetworking Labs Ltd + Craig Haney + craig&seamless.kludge.net +4505 + Caltex Australia Petroleum Pty Ltd + Rodd Jefferson + rjeffers&caltex.com.au +4506 + Channels Measurement Services + Dawie de Villiers + dawie&channels.co.za +4507 + The Miami Herald + Ricardo de la Fuente + lafuente&herald.com +4508 + Geeks Like Us + Shane O'Donnell + shaneo&cyberdude.com +4509 + Nakayo Telecommunications, Inc. + Seiji Takano + takano&itl.nyc.co.jp +4510 + Dracom + Wang Xiang + iamwangxiang&netease.com +4511 + Concord-Eracom Computer Security GmbH + Matthias Gaertner + mgaertner&concord-eracom.de +4512 + Sofreavia + Patrick Eidel + eidelp&sofreavia.fr +4513 + Terawave Communications, Inc. + Anatoly Kaplan + akaplan&terawave.com +4514 + Bank America + James Moskalik + jimmo&crt.com +4515 + PacketLight Networks Ltd. + Omri Viner + Omri_Viner&packetlight.com +4516 + SIAS + Antonio Storino + as&sias.it +4517 + Helius, Inc. + Jack Thomasson + jkt&Helius.COM +4518 + KMZ Consulting Group, Inc. + Kerry Carlin + kcarlin&shrike.depaul.edu +4519 + VERO Electronics Ltd. + Barry Maidment + bmaidment&apw-enclosures.com +4520 + Joohong Information and Communications + Seong Chan Jeon + scjeon&joohong.co.kr +4521 + Global ADSI Soltuions, Inc. + Gary Steinmetz + gary.steinmetz&gladsis.com +4522 + Ontario Power Generation + Ken Strauss + ken.r.strauss&ontariopowergeneration.com +4523 + eXaLink Ltd. + Yoram Mizrachi + yoram&exalink.com +4524 + StorageSoft, Inc. + Doug Anderson + douga&storagesoft.com +4525 + Micron Technology, Inc. + Robert Clayton + rclaytonµn.com +4526 + Netgear + Michael Shields + mshields&netgearinc.com +4527 + zeitgeist y2k01 Ltd. + Mark Weitzel + oidiana&zy2k01.com +4528 + 8x8 Incorporated + Chanan Shamir + chanans&8x8.com +4529 + Internet Service Dept, WorldTelecom Plc + Kenny Du + kenny.du&pmail.net +4530 + Tunbridge Wells Equitable Friendly Society Ltd + Nick Wickens + nick_wickens&twefs.co.uk +4531 + ON Technology Corporation Robert Smokey Montgomery + smontgom&on.com + ---none--- +4532 + GVCTW Corporation + Susan Wang + suwang&gvc.com +4533 + Atcomm Corporation + Barry Dame + bdame&atcomm.com +4534 + onebox.com + Ross Dargahi + rossd&onebox.com +4535 + Javelinx Corporation + Warren Golla + war&javelintech.com +4536 + Digitech + Tom Quinlan + tquinlan&digitechinc.com +4537 + Planex Communications Inc. + Kyoko Ito + kito&plane.co.jp +4538 + Easybuy + Carleton Glover + slim&gateway.net +4539 + RemarQ Communities, Inc. + Robert Sparks + bsparks&remarq.com +4540 + Intelect Network Technologies Inc. + Weijun Lee + wlee&intelectinc.com +4541 + OutReach Technologies, Inc. + Rob Trainer + rtrainer&outreachtech.com +4542 + Alerting Specifications Forum + Steven Williams + steven.d.williams&intel.com +4543 + Digitellum, Inc. + Vanessa Irmarosa + digitellum&uswest.net +4544 + Gjensidige Forsikring + Roman Jost + roman.jost&gjensidige.no +4545 + Atlantis Software Inc + Rick Gordon + rick&atlantissoftware.com +4546 + AST Engineering Services, Inc. + George Krasovec + gkrasovec&astes.com +4547 + ATTO Technology, Inc. + David Cuddihy + dcuddihy&attotech.com +4548 + QuickStart Consulting Inc. + Michael Walsh + mww&warwick.net +4549 + WebGear, Inc. + Chris Stacey + chris.stacey&webgear.com +4550 + The Japan Electrical Manufacturers' Association + Hiroshi Inoue + hiroshi_inoue&jema-net.or.jp +4551 + Empirix, Inc + Jim Washburn + jwashburn&empirix.com +4552 + Wayport, Inc. + Jim Thompson + jim&wayport.net +4553 + NextCom K.K. + Masaki Takano + takano&nextcom.co.jp +4554 + Trisol Technologies + Russ Campbell + apep&host-209-215-54-15.pbi.bellsouth.net +4555 + Socomec Sicon Ups + Pancheri Ivo + csu&sicon-ups.com +4556 + Scali + Anders Liverud + al&scali.no +4557 + Qwest + Walt Haberland + walt.haberland&qwest.com +4558 + Euromove s.r.o. + Stanislav Parnicky + parnicky&euromove.sk +4559 + NVision + Stuart Antcliff + santcliff&nvision.co.uk +4560 + Shebang Networking + A. Lueckerath + al&shebang.org +4561 + OpenDOF Project, Inc. (formerly 'Panasonic Electric Works Laboratory of America, Inc./SLC Lab') + Bryant Eastham + protocol&opendof.org +4562 + Centermark Engineering LC + Tim Bowman + tim&cmark.com +4563 + Syllogi, Inc. + Harold E. Austin, Jr. + haustin&diversenet.com +4564 + Diverse Networks, Inc. + Harold E. Austin, + Jr.haustin&diversenet.com +4565 + Reserved + RFC-pti-pen-registration-10 + ---none--- +4566 + Cedelbank + Paul Rees + prees&cedelglobalservices.com +4567 + Cedel Global Services + Paul Rees + prees&cedelglobalservices.com +4568 + Cedel International + Paul Rees + prees&cedelglobalservices.com +4569 + Ensigma Ltd + Sue Brace + S.Brace&ensigma.com +4570 + NetEnterprise, Inc. + J. Toth + jtoth&netenterprise.com +4571 + JMCS, Inc. + J. Toth + jtoth&jmcs.com +4572 + Daedalus Corporation + J. Toth + jtoth&dcorp.com +4573 + SecureSoft Inc. + Jaeyeong Lee + jylee&securesoft.co.kr +4574 + Compu-Alvarado + Juan Alvarado + juan_alvarado&hotmail.com +4575 + MANi Network Co., Ltd. + Inho Lee + ihlee&maninet.co.kr +4576 + Corporacion ZIGOR S.A. + Jeronimo Quesada + software&zigor.com +4577 + Internet Research + Daisuke Kato + webmaster&advan.net +4578 + SSE Telecom + David Peavey + david.peavey&sset.com +4579 + Vest Internett + Ragnar Kjorstad + post&vestdata.no +4580 + Diversified Business Group + Peter Lindsay + peter_lindsay&progressive.com +4581 + Seeburger GmbH + Maik Thraenert + m.thraenert&seeburger.de +4582 + World Telecom plc + Kenny Du + kenny.du&pmail.net +4583 + NetStar + Daniel Harrison + dharrison&netstarnetworks.com +4584 + Headhunters London Limited + Roy Huxley + ROY.HUXLEY&BTINTERNET.COM +4585 + Eel Valley Internet + Gregory Baird + gbaird&eelvalley.net +4586 + Enterprise Consulting Group + Mark Griffith + mdg&ec-group.com +4587 + Diamond Multimedia Systems, Inc. + Glenn Smith + glenns&diamondmm.com +4588 + Critical Path, Inc. + Tristan Horn + tristan+snmp&cp.net +4589 + DATAP Division of TCEnet Inc. + Quentin Shaw + qshaw&datap.ca +4590 + Zoom Telephonics, Inc. + Hume Vance + humev&zoomtel.com +4591 + SpaceCom Systems, Inc. + Wayne Van Sickle + wayne&spacecom.com +4592 + Frontier Communications + David Weiss + daw&frontierdev.com +4593 + SAET I.S. S.p.A. + Flavio Molinelli + fmoli&show.it +4594 + Saritel S.p.A. + Roberto Paris + paris&saritel.it +4595 + IS Production + Bernard Dugas + bernard.dugas&is-production.com +4596 + Videoframe Systems + Graeme Little + glittle&videoframesystems.com +4597 + Fiberview Technologies Inc. + Robert Y.H. Li + robertl&fiberview.com +4598 + JCampus + James Wallace + jwallace&jcampus.org +4599 + MIMSOFT + Miroslav Jergus + mimsoft&pobox.sk +4600 + Reserved + RFC-pti-pen-registration-10 + ---none--- +4601 + Cybertime Informatik GmbH + Rico Pajarola + pajarola&cybertime.ch +4602 + BEA Systems + Tom Eliason + tom.eliason&beasys.com +4603 + TERS Ltd. + Eugene Mikhailov + jhn&aha.ru +4604 + Beca Carter Hollings & Ferner Ltd Mike Beamish + mbeamish&beca.co.nz + ---none--- +4605 + Toronto School of Business + Tran Tan + tsbk&golden.net +4606 + Information Security Agency Ltd. + Alexey Kuksenko + akuksenko&kit.kz +4607 + Software Shelf Technologies + Fernando Chapa + fernando&chapa.com +4608 + Harco Technology Ltd + Stuart Harvey + stuart.harvey&harco.co.uk +4609 + Seamless Technologies, Inc. + Robert Kane + rkane&seamlessti.com +4610 + Strategic Technologies + David Hustace + david.hustace&stratech.com +4611 + Digital Wireless Corporation + Greg Ratzel + gratzel&digiwrls.com +4612 + Baker Street Technologies Inc. + David Neto + neto&bakerstreettech.com +4613 + Sphere Communications Inc + Dave Niesman + dniesman&spherecom.com +4614 + Luminous Networks, Inc + Peter Jones + pjones&luminous.com +4615 + I-O Data Device, Inc. + Yoshinari Araki + yaraki&iodata.co.jp +4616 + ComputerJobs.com + Patrick McAndrew + patrick.mcandrew&computerjobs.com +4617 + MARCOMPUTER + Roberto Marconi + j.fiorani&wnt.it +4618 + ARMILLAIRE TECHNOLOGIES + Michael Wolf + mwolf&armillaire.com +4619 + e!reminder.com + Andrew Goldberg + cbarnett&nh.ultranet.com +4620 + Progressive Systems, Inc. + Ge' Weijers + ge&Progressive-Systems.Com +4621 + NSTOP Technologies Inc. + Claude Arbour + claude.arbour&nstop-tech.com +4622 + Legian Consultancy & Network Services + Rene Stoutjesdijk + r.stoutjesdijk&legian.nl +4623 + Lifix Systems Oy + Bjorn Andersson + bjorn&lifix.fi +4624 + Training for Tomorrow + James Smith + jimmydarts&aol.com +4625 + Mazone Systems + Michael Anderson + mikea&mazone.com +4626 + WestLB + Jonas Koch + jonas.koch&westlb.de +4627 + SAET IS s.r.l. + Flavio Molinelli + fmoli&show.it +4628 + Xtra On-Line + Matt Reynolds + mreynolds&xol.com +4629 + Veraz Networks Inc. (formerly 'ipVerse') + Wing Lee + wlee&veraznet.com +4630 + FileTek, Inc. + Eugene Furtman + elf&filetek.com +4631 + homeloandotcom + Dan Draper + dandraper&msn.com +4632 + FreeMarkets + Bob Monroe + bmonroe&freemarkets.com +4633 + CQOS, Inc. + Andrew Corlett + bda001&concentric.net +4634 + VCON Telecommunications Ltd. + Tzvi Kasten + zvik&vcon.co.il +4635 + The VE Group + Atul Elhence + atul&ve-group.com +4636 + Intrust Software + Fernando Chapa + fernando&chapa.com +4637 + RGE, Inc. + Rex Walker + snmp-admin&rge-inc.com +4638 + SGI Soluciones Globales Internet + Antonio Cabanas Adame + acabanas&esegi.es +4639 + TETRAGONE S.A. + Gilles Parmantier + gparmant&tetragone.fr +4640 + Eckerd College + Edmund Gallizzi + gallizzi&eckerd.edu +4641 + Tellabs Inc (ADP) + Annie Dang + annie.dang&tellabs.com +4642 + Bel. Studio H. Sager + Hermann Sager + bel&daddeldu.de +4643 + nworks + Greg Stephens + greg&nworks.net +4644 + Wincom Technology Inc. + Kyong-min Shin + wincom2&wintelecom.com +4645 + Data Ductus AB + Stefan Wallin + stefan.wallin&dataductus.se +4646 + NetConvergence, Inc. + Andrew Chew + mollusc&mindspring.com +4647 + Internet Chess Club + Doug Luce + doug&chessclub.com +4648 + The Grateful Net + Bryan Levin + grateful&grateful.net +4649 + CPlane, Inc. + Diego Vinas + diego&cplane.com +4650 + Marc August International + Marc August + marc&marcaugust.com +4651 + Aztec Radiomedia + Gilles Misslin + gilles&aztecland.com +4652 + Technical University of Ilmenau + Herr Ritschel + thomas.springer&rz.tu-ilmenau.de +4653 + Precise Software Technologies Inc. + Sebastian Silgardo + sebast&psti.com +4654 + OCLC Online Computer Library Center, Inc. + Lora Chappelear-Pearson + chappele&oclc.org +4655 + TeleCheck International Inc. + Luis Ossorio + Luis.Ossorio&TeleCheck.com +4656 + Banco de Galicia y Buenos Aires + Carlos Eugenio Pace + eugenio.pace&bancogalicia.com.ar +4657 + Goodall Secure Services + Douglas W. Goodall + doug&goodall.com +4658 + Entertainment International, Inc. + Peter Bjorklund + peter&t-con.com +4659 + Teco Image Systems Co., Ltd. + Marconi Huang + marconi&tecoimage.com.tw +4660 + RCMS Ltd + Andrew Liles + andrew.liles&rcms.com +4661 + Spazio R&D + Lorenzo Buonomo + spazio.ds&primeur.com +4662 + Frank Matthiess + Frank Matthiess + frank&matthiess.de +4663 + decor metall GmbH + CO. KG + Frank Matthiess + Frank.Matthiess&decor-metall.de +4664 + Ark Research Corporation + James Bergsten + bergsten&arkres.com +4665 + Performance Design Limited + Michael Browning + mbrowning&pdltd.com +4666 + Itchigo Communications GmbH + Bodo Rueskamp + iana+spam&itchigo.com +4667 + Telperion Network Systems + Hayoung OH + dongle99&yahoo.com +4668 + Turning Point Technologies + Kevin Farrington + kfarrington&tptc.com +4669 + Muro Enterprises, Inc. + Christopher Muro + christopher&muro.org +4670 + National Computational Science Alliance + Randy Butler + rbutler&ncsa.uiuc.edu +4671 + Advanced Telecom Systems, Inc. + Kenneth Lai + kdl&dynres.com +4672 + US Healthcare PKI + Kepa Zubeldia + Kepa.Zubeldia&envoy.com +4673 + Wave Research N.V. + Jan Van Riel + jvanriel&waveresearch.com +4674 + Crunch Technologies BV + Ton Plooy + tonp&crunchtech.com +4675 + WK Audiosystems BV + N.A. Coesel + nctnico&cistron.nl +4676 + Healthaxis.com Inc. + Larry Weber + lweber&healthaxis.com +4677 + Concert Technologies + Ricardo Yokoyama + ricardo&concertech.com +4678 + Information Developers Unlimited + Lisa Andrews + andrews&ecrc.org +4679 + OPICOM + Myungeon Kim + matty&203.243.253.142 +4680 + Telecommunications Systems Group - UCL + Jonathan Robinson + pants&ee.ucl.ac.uk +4681 + dvg Hannover Datenverarbeitungsgesellschaft mbH + Markus Moeller + markus.moeller&dvg.de +4682 + Linux-HA Project + Alan Robertson + alanr&unix.sh +4683 + Trading Technologies International, Inc. + Network Operations + networkops&tradingtechnologies.com +4684 + Ambit Microsystems Corporation + Willy Chang + willy.chang&ambit.com.tw +4685 + TONTRU Information Industry Group Co. Ltd. + Mao Yan + newtru&public1.ptt.js.cn +4686 + VegaStream + Mike Cohen + mikec&vegastream.com +4687 + Digitro Tecnologia Ltda + Milton Joao de Espindula + snmp-contact&digitro.com.br +4688 + Luimes Computer Consulting + Mark Luimes + mark&luimes.ca +4689 + Urbis.Net Ltd + Alex Tronin + at&urbis.net +4690 + MBC Europe, B.V. + Frits Obers + mbce&mbc.nl +4691 + VAW Aluminum Technologie GmbH + Markus Rogawski + rogawski.markus&vaw.de +4692 + Digital Technics, LP + Dr. Mikailov + mikailov&digtech.com +4693 + Maxtor Corp., + Marcia Tsuchiya + marcia_tsuchiya&maxtor.com +4694 + Willamette University + Casey Feskens + cfeskens&willamette.edu +4695 + Extricity Software + Ted Bashor + bashor&extricity.com +4696 + WEBB Studios + Densel Webb III + wollf&netzero.com +4697 + ATLANTEL + Jerome Girard + j.girard&atlantel.fr +4698 + Connectivity Software Systems + Don Reeve + dreeve&csusa.com +4699 + Burning Door + Eric Lunt + eric&burningdoor.com +4700 + InternetPirate.com + Jeffrey Winter + jgwinter&earthlink.net +4701 + Syskoplan GmbH + Heiko Giesebrecht + heiko.giesebrecht&syskoplan.de +4702 + SpeechWorks International, Inc. + Mark Eastley + mark.eastley&speechworks.com +4703 + Sanford C. Bernstein & Co. Inc. + John Talbot + talbotjr&bernstein.com +4704 + Visual Media Technologies, Inc. + Jason Prondak + jprondak&visualmedia.com +4705 + Gabriel Communications + Jonathan Gettys + jgettys&gabrielcom.net +4706 + Zero7.com + Eric Lozes + eric&Zero7.com +4707 + Aldea Internet, S.A. de C.V. Javier + Rodriguez + arturo&aldea.com.mx +4708 + iMedium Inc + John Case + john.case&imedium.com +4709 + Oxydian S.A. + Boisard Sebastien + boisard&oxydian.com +4710 + Safelayer S.A. + Jordi Buch + jbt&safelayer.com +4711 + Mail.com + Brendan Flood + bflood&staff.mail.com +4712 + Entropic Ltd + Anthony Bearon + Anthony.Bearon&entropic.co.uk +4713 + WhereNet, Inc. + Alan Freier + afreier&wherenet.com +4714 + Centerpoint Broadband Technologies + Mario Gallotta + mgallotta&cptbt.com +4715 + Advice Netbusiness Ltda + Nelson Pedrozo + nelson&domain.com.br +4716 + Arbortext + John Dreystadt + jdreysta&arbortext.com +4717 + Media Management Consulting + Grojean + grojean&aol.com +4718 + MDL Information Systems + Jeff Younker + jeff&mdli.com +4719 + Montagnaleader s.c.a.r.l. + Flavio Righini + flavio&ten.it +4720 + Lunatech Research + Bart Schuller + schuller+iana&lunatech.com +4721 + Cositel Inc. + Denis Martel + dmartel&cositel.com +4722 + Jacksonville University + Dennis Dormady + dwd&ju.edu +4723 + Mockingbird Networks + Martin Walsh + mwalsh&mbird.com +4724 + TechnoSoft + Sinisa Sremac + sremac&eunet.yu +4725 + Bestnet Internet Inc + Eric Weigel + ericw&bestnet.org +4726 + Capital Computer Services, Inc. + Jim Butler + jim-butler&msn.com +4727 + Langtang JV Company + Dr. Rayamajhi I + iswar&ishonch.uz +4728 + NSI Technology + Shin Hosubv + oreo&nsit.co.kr +4729 + Crannog Software + Eamonn McGonigle + eamonn&crannog-software.com +4730 + epita + Sadirac + rn&epita.fr +4731 + Socketware, Inc. + Steven Sparks + sparks&socketware.com +4732 + CVF + Yann Le Doare + yledoare&cvf.fr +4733 + Middlesex University + David Webb + d.webb&mdx.ac.uk +4734 + Zarak Systems Corporation + Ken Hesky + khesky&zarak.com +4735 + SOMA Networks, Inc. + Chris Davis + cdavis&somanetworks.com +4736 + Appliant, Inc. + Brian Marsh + marsh&appliant.com +4737 + Crosswalk.com, Inc. + Steven Sedlmeyer + ssedlmeyer&crosswalk.com +4738 + Shanghai E-way Computer Network Technology Ltd. + Sun + e_wayer&yahoo.com +4739 + OLDE Discount Corporation + Albert Tobey + atobey&olde.com +4740 + VoteHere + Jim Adler + jim&votehere.net +4741 + Amber Networks, Inc + Jack Yang + jyang&ambernetworks.com +4742 + Operational Technologies Services, Inc. + Michael Gariazzo + mgariazz&ots-inc.com +4743 + NextNet + Vladimir Kelman + kelmanv&nextnetworks.com +4744 + Internalnetwork + Jim O'Brien + jamesobrien&mindspring.com +4745 + DigiSAFE Pte Ltd + Ee Thiam Chai + eetc&cet.st.com.sg +4746 + PT Inovacao + Jorge Concalves + jgoncal&cet.pt +4747 + Service Technique de la Navigation Aerienne + Pont Thierry + PONT_Thierry&stna.dgac.fr +4748 + DoBiT nv + Marc Bruers + mbruers&dobit.com +4749 + e-Plaza + Gerardo Martinez Zuniga + gmartine&iteso.mx +4750 + Lykon Consulting + Mingzhe Lu + mingzhe_lu&yahoo.com +4751 + SARL K1 + Franck Lefevre + franck&k1info.com +4752 + Crescent Networks + Linsey O'Brien + lbob&crescentnets.com +4753 + MontaVista Software, Inc. + Joseph Green + jgreen&mvista.com +4754 + Symas Corp. + Howard Chu + hyc&symas.com +4755 + Directory Works + Alexis Bor + alexis.bor&directoryworks.com +4756 + CTC Union Technologies Co., Ltd. + Thomas Fan + ctcu&ms2.hinet.net +4757 + IBS + Steffen Reithermann + sr&IBS.de +4758 + AnIX Group Ltd + Anthony Roberts + roberta&anix.co.uk +4759 + Peco II, Inc. + Gregory Ratliff + gratcliff&peco2.com +4760 + Viditec, Inc. + Jason Lai + laijc&viditec.com +4761 + NuDesign Technologies Inc. + Brian Munshaw + contact&ndt-inc.com +4762 + GRIC Communication Inc + Wilson Tse + wilsont&gric.com +4763 + Teddybear Computer Services + Karen Kenworthy + karenk&teddybear.com +4764 + Global Crossing + IP Software Development + ipsd&gblx.net +4765 + Tomorrow Factory + Ron Hitchens + ron&tomorrowfactory.com +4766 + Hochschule Heilbronn + Florian Kronberger + florian.kronberger&hs-heilbronn.de +4767 + TrafficMaster PLC + Nathan Grant + nathan.grant&trafficmaster.co.uk +4768 + E.ON SE (formerly 'E.ON AG') + Florian Dietrich, Peter Marschall + pki&eon.com +4769 + IBM Corporation + Victor Sample + vsample&us.ibm.com +4770 + Ol'e Communications, Inc. + Dustin Doo + dtu&olecomm.com +4771 + Narus Inc + Stas Khirman + stask&narus.com +4772 + CyberSource Corporation + Roger Hayes + rhayes&cybersource.com +4773 + RealNames Corporation + Yves Arrouye + yves&realnames.com +4774 + Netpliance.net + Richard Buckman + richard.buckman&netpliance.net +4775 + Network ICE + Robert Graham + snmp&networkice.com +4776 + Knight Fisk Ltd + Ian Wakeling + Ian.Wakeling&KnightFisk.com +4777 + Cuperus Consultants + Bart Cuperus + b.cuperus&cuperus.nl +4778 + Biscom, Inc. + Tomas L. Keri + tkeri&biscom.com +4779 + Bay Technical Associates + Alex North + anorth&baytechdcd.com +4780 + VADEM + Jeff Saxton + jsaxton&vadem.com +4781 + E.piphany, Inc. + Chad Whipkey + whipkey&epiphany.com +4782 + 3Cube, Inc. + Yuri Rabover + yurir&3Cube.com +4783 + CrosStor Software + Tony Schene + tony.schene&crosstor.com +4784 + March Networks + Pierre Doiron + pdoiron&marchnetworks.com +4785 + Appian Communications, Inc. + Ren Yonghong + ren&appiancom.com +4786 + Sierra PartnersStephen Ells + sells&chw.edu + sae&foothill.net +4787 + Shanghai Holdfast Online Information + Jason Fang + locky_ymc&yahoo.com +4788 + D-Trust GmbH + Andreas Ziska + a.ziska&d-trust.net +4789 + Telica, Inc. + Rick Evans + revans&telica.com +4790 + SecureLogix Corporation + Stephen Johns + stephen.johns&securelogix.com +4791 + Dresdner Bank AG + Axel Dalz + Axel.Dalz&Dresdner-Bank.de +4792 + Wavefront + David Ladiges + david&wavefront.cc +4793 + Levi, Ray & Shoup, Inc. + Dennis Grim + dgrim&lrs.com +4794 + eCoin, Inc. + Horng-Twu Lihn + slihn&ecoin.net +4795 + Unified Productions, Inc. + Jason Yardi + unified&west.net +4796 + Joe Chapa + Joe Chapa + joechapa&joechapa.com +4797 + City Group Inc. + Brad Austin + AUSCO&HOME.COM +4798 + Vigil Technologies Ltd. + Mor Schlesinger + mor&vigiltech.com +4799 + Leaselogix, Inc. + Charles Corson + cccbttb&aol.com +4800 + Jensley Pty Ltd + Darren Collins + dmc&infoxchange.net.au +4801 + Compass Corporate Systems, Inc. + David Lethe + david.lethe&compass-corp.com +4802 + Systematic Software Engineering A/S + Jesper Gravgaard + jgj&systematic.dk +4803 + POWWOW + Michael Castello + webmaster&MarinaDelRey.com +4804 + Castello Cities Internet Network + David Castello + david&palmsprings.com +4805 + INOVA Corporation + Brit Minor + bminor&inovacorp.com +4806 + Rosslea Associates LLC + Eileen Graydon + eileen_graydon&rosslea-associates.com +4807 + Control Z Corporation + Seiji Eguchi + eguchi&czc.co.jp +4808 + Net-star Technology Corporation + Jackie Chang + jackie&net-star.com.tw +4809 + BSW Telecoms + Bryan Booth + bryan.booth&co.za +4810 + Bloemenveiling Holland + Marco van Katwijk + m.katwijk&bvh.nl +4811 + Network Flight Recorder, Inc. + Marcus Ranum + mjr&nfr.net +4812 + shanghai radio communication equipment manufacture company(srcem) + Feng Dai + fengdai&263.net +4813 + GlenEvin + Kevin Castner + kcc&glenevin.com +4814 + Alex Temex Multimedia S.A. + Nicolas Thareau + nthareau&alex.fr +4815 + H.B. Fuller Company + Todd Meadows + Todd.Meadows&hbfuller.com +4816 + Pacific Gas & Electric Company + Eric Vo + EXVQ&pge.com +4817 + Innovative Technologies & Consulting, Inc. + Milan Habijanac + milan&itcamerica.com +4818 + Sinclair Internetworking Services + Keith Sinclair + keith&sinclair.org.au +4819 + RMS Technology Integration, Inc. + Phil Draughon + jpd&rmsbus.com +4820 + Quicknet Technologies, Inc. + Greg Herlein + gherlein&quicknet.net +4821 + SN2AI + Bruno Barbieri + bbarbieri&aai.fr +4822 + Fial Computer Inc. + Ron Fial + ron&fial.com +4823 + Shanghai HuaLong Information Technology Development Center + Chen Jianqiang + cjq95&yahoo.com +4824 + DSL Communications + Jason Tang + jtang&dsl-com.com +4825 + Golden Screens Interactive Technologies, Inc. + Ran Livnat or Ishai Biran + ishai&gsit.com +4826 + The European Clearing House + Paul Rees + prees&cedelglobalservices.com +4827 + Interoute Telecommunications Inc + Shridar Iyengar + siyengar&interouteusa.com +4828 + Intelidata Technologies Corp. + Scott Vivian + svivian&intelidata.com +4829 + A to Z Pest Control + Randall Miller + wdirep&msn.com +4830 + Gloabl Media Corp. + Sophia Xu + sxu&globalmedia.com +4831 + BANCHILE + Jose Antonio Muena Barria + jmuena&banchile.cl +4832 + Network Phenomena, LLC. + Daniel Lewis + dnllewis&netscape.com +4833 + SDNI Inc. + Ivo Gorinov + ivo&sdni.com +4834 + Factum Electronics AB + Lars Boberg + Lars.Boberg&factel.se +4835 + OPNET Technologies Co., Ltd. + Melody Tsai + and&opnet.com.tw +4836 + LHS Telekom GmbH & Co. KG + Martin Schlothauer + Martin.Schlothauer&lhsgroup.com +4837 + trrrippleRRRdesigns + Kathi Tomlinson + tomkat&u.washington.edu +4838 + New Image Company + Gregory Tang + tang&www.newimage.com.tw +4839 + 2Wire, Inc. + Randy Turner + rturner&2Wire.com +4840 + Bedet Information Technologies + W.T. Bedet + wbedet&compuserve.com +4841 + iFace.com + Alex Vishnev + alex&iface.com +4842 + SecureAgent + Brent Johnson + r.brent.johnson&mail.securenotes.com +4843 + Amazon.com Inc. + Alan O'Leary + alano&amazon.com +4844 + NeoPoint, Inc. + Dwight Newton + dnewton&neopoint.com +4845 + Miralink Corp + Bill Shelton + bills&miralink.com +4846 + Lucent INS + Matthias Bannach + mbannach&lucent.com +4847 + Vikram Kulkarni + Vikram Kulkarni + vikramkulkarni&gmail.com +4848 + Interphiz Ltd + Sam Corn + sam&interphiz.com +4849 + Dipl. Phys. Peer Stritzinger GmbH + Peer Stritzinger + peer&stritzinger.com +4850 + Eddie George Limited + Eddie George + software.engineering&egl.com +4851 + KRONE Telecell GmbH + Holger Vogel + vogel&telecell.de +4852 + CMA Small Systems AB + Alexey Ryskov + Alexey.Ryskov&cma.ru +4853 + Syndeo Corporation + Matt Rhoades + matt&syndeocorp.com +4854 + Mk1 Design + Mark Chapman + mk1design&cs.com +4855 + AddPac Technology Co., Ltd. + Youngsik Lee + yslee&addpac.com +4856 + Veraluz International Corporation + Phillam Sera Jose + phillam&i-manila.com.ph +4857 + Cisco's Etc. + Emma Johnson + cjoh1103&bellsouth.net +4858 + Fortech + Dainis Strupis + dainis&fortech.lv +4859 + GEMS + Will Ballantyne + Will.Ballantyne&gems1.gov.bc.ca +4860 + boo.com Group LTD + Scot Elliott + scot.elliott&boo.com +4861 + PowerCom Technologies Inc Katta Veeraiah + veera_katta&216.61.195.17 + ---none--- +4862 + Redwood Marketing + Curtis Ruck + ruckc&crosswinds.net +4863 + Gothaer Versicherungsbank VVaG + Axel Laemmert + Axel.Laemmert&Gothaer.de +4864 + JOH-DATA A/S + Roger Kristiansen + roger&norgesgruppen.no +4865 + ERG Group + Bill Wong + bwong&erggroup.com +4866 + Moseley Associate Inc. + Jamal Hamdani + info&moseleysb.com +4867 + Viet Marketing + Ngo Quyen + ngoquyen&trungtam.com +4868 + Nextra (Schweiz) AG + Rolf Gartmann + rolf&nextra.ch +4869 + SIT Europe + Jean-Charles Oualid + jean-charles&sit.fr +4870 + Fritz Egger GmbH & Co + Joahn Memelink + johan.memelink&egger.com +4871 + DiscoveryCom + Rich Gautreaux + rgautreaux&discoverycom.com +4872 + Bouygues Telecom + Stephane Jeanjean + sjeanjea&bouyguestelecom.fr +4873 + Seay Systems, Inc. + Vernon Hurdle + vernon&seaysystems.com +4874 + Juniper Networks/Unisphere + John Scano + jscano&juniper.net +4875 + DoubleClick Inc. + Tom Klempay + tklempay&doubleclick.net +4876 + Eyestreet Software + Charlie Hill + chill&eyestreet.com +4877 + Salon Press + Serge Stikhin + postmaster&salon.ru +4878 + Village Networks, Inc. + Yong-Qing Cheng + cheng&vill.com +4879 + SecureMethods, Inc. + Ken Walker + ken.walker&securemethods.com +4880 + Standard & Poors Compustat + Kevin Nervick + kevin_nervick&standardandpoors.com +4881 + Ruijie Networks Co., Ltd. (formerly 'Start Network Technology Co., Ltd.') + Zhengrong Yu + yuzr&ruijie.com.cn +4882 + Root, Inc. + Naoto Shimazaki + shimaz-n&root-hq.com +4883 + Saatch & Saatchi + Alessandra dell'andrea + a.dellandrea&saatchi.it +4884 + Protek Ltd + Richrd Sizeland + rsizeland&protek.com +4885 + Photon Technology Co., Ltd. + James Kou + jameskou&yahoo.com +4886 + Westwave Communications + Jean-Marc Serre + jean-marc.serre&westwave.com +4887 + IQ Wireless GmbH + Holger Vogel + holger.vogel&iq-wireless.com +4888 + Multidata GmbH + Willfried Dersch + w.dersch&www.multidata.de +4889 + Inflow + Mark Anderson + manderson&inflow.com +4890 + Skinners Computer Center + Ted Skinner + tskinner&max1-p121.Bayou.COM +4891 + Network Address Solutions + Jerry Roy + jroy&flashcom.com +4892 + The Chinese University of Hong Kong + S.T. Wong + st-wong&cuhk.edu.hk +4893 + ATSHAW Technologies + Allyn Tyler-Shaw + atshaw&verio.net +4894 + Kleinwort Benson Ltd. + Nigel Southgate + nigel.southgate&dresdnerkb.com +4895 + Woodwind Communications Systems Inc. + Brian Hardy + bhardy&wcsinc.com +4896 + TeleSoft International, Inc. + Charles Summers + CKSummers&acm.org +4897 + DoBusinessOnline Services + Ken Netherland + ken&dobusinessonline.com +4898 + Time Inc. + Leon Misiukiewicz + leon_misiukiewicz&timeinc.com +4899 + Walker Systems Corporation + Oleksandr Kravchenko + okravchenko&walkersys.com +4900 + Conexant Systems + Terry Rodman + terry.rodman&conexant.com +4901 + USAA + Carl Mehner + usaadomains&usaa.com +4902 + Beijing Huaguang Electronics Co., Ltd. + Liang Wenjing + liangwj&hg.com.cn +4903 + GCC Technologies Inc. + Michael Fryar + mfryar&gcctech.com +4904 + SDF + Viktor Leijon + viktor&sdf.se +4905 + WebDialogs, Inc + Mike Melo + mmelo&webdialogs.com +4906 + Edgix Corporation + Mark Hurst + mhurst&edgix.com +4907 + AppWorx Corporation + George Del Busto + gdelbusto&router.appworx.com +4908 + ATS, Advanced Technology Solutions S.A. + Julio Riviere + jriviere&ats.com.ar +4909 + Experts Exchange + Steve Miller + steve&experts-exchange.com +4910 + Ubizen + Hugo Embrechts + Hugo.Embrechts&ubizen.com +4911 + pcOrder.com, Inc. + Ron Rudd + ron.rudd&pcorder.com +4912 + HolisticMeta, LLC (formerly 'One World Information System') + Roy Roebuck + royroebuck&holisticmeta.com +4913 + Reserved + RFC-pti-pen-registration-10 + ---none--- +4914 + hole-in-the.net + Joe Warren-Meeks + joe&hole-in-the.net +4915 + Sipher Internet Technology Ltd.+44 1494 765335 + sipher&sipher.co.uk + ---none--- +4916 + Blacksound SA + Mamamdou M'Baye + mamadou&blacksound.com +4917 + Sociedad Estatal de Loterias y Apuestas de Estado + Julio Sánchez Fernández + oidregistry&selae.es +4918 + Taurusent Technologies + Linford D. Hayes + taurusen&flinthills.com +4919 + Luminate Software Corporation + David Korz + dkorz&luminate.com +4920 + Boston Globe + Richard Farrell + farrell&globe.com +4921 + Network Solutions + Brad McMillen + bradtm&internic.net +4922 + Telcordia Technologies, Inc. + Kaj Tesink + kaj&research.telcordia.com +4923 + AudioCodes + Chai Forsher + chai-f&audiocodes.com +4924 + SAN Valley Systems, Inc. + Allison Parsons + allison&sanvalley.com +4925 + Zuma Networks + Rueben Silvan + rsivan&zumanetworks.com +4926 + TouchTunes Digital Jukebox + Eugen Pavica + paveu&touchtunes.com +4927 + time4you GmbH + Sven Doerr + doerr&time4you.de +4928 + Xrosstech, Inc. + Jihno Chun + jhchun&xrosstech.com +4929 + LAN Crypto + Julia Kopytina + lanc&aha.ru +4930 + Concord Technologies + Athir Nuaimi + anuaimi&ca.concordfax.com +4931 + Standard & Poor's Corp. + Martin Niederer + martin_niederer&standardand poors.com +4932 + Foglight Software + Michael Tsou + mtsou&foglight.com +4933 + Shunra Software Ltd. + Benny Daon + benny&shunra.com +4934 + WebDialogs, Inc + Mike Melo + mmelo&webdialogs.com +4935 + Media5 Corporation / M5 Technologies + Jerome Lagasse + iana&media5corp.com +4936 + First American Financial Corporation John + Thuener + jthuener&firstam.com +4937 + Stormbreaker Network Services + David Britt + dbritt&cdm.com.au +4938 + Daeyoung Electronic Ind.CO., Ltd. + Jin-Hak Yeom + yeomjh&hanmail.net +4939 + Procter & Gamble + Patryk Szudrowicz + szudrowicz.p&pg.com +4940 + Converg Media + Shane Kinsch + shane.kinsch&converg.com +4941 + Echelon Corporation + Chris Stanfield + standfield&echelon.com +4942 + Liberty Press & Letter Service Joseph De Silvis + Libertyink&AOL.com + ---none--- +4943 + Novell GmbH + Alexander Adam + Alexander_Adam&Novell.com +4944 + Future Networks, Inc. + Michael Rand + michael.rand&future-networks.com +4945 + Logicon, Inc. + Jerry N. Baker + jbaker&logicon.com +4946 + Psychedelic Illuminations Magazine + Ron Piper + ronnipiper&hotmail.com +4947 + Grass Valley USA, LLC + Cody Nelson + cody.nelson&grassvalley.com +4948 + Migros + Simon Hauri + netadmin-datacenter&mgb.ch +4949 + Fortress Technologies + Bill McIntosh + bmcintoch&fortresstech.com +4950 + Luxor Software Inc. + Walid Bakr + WalidB&LuxorSoft.com +4951 + State Farm Insurance + Brian L. Detweiler + brian.l.detweiler.gc2k&statefarm.com +4952 + Thinking Objects GmbH + Markus Klingspor + markus.klingspor&to.com +4953 + Tecnet Teleinformatica Ltda. + Paulo Eduardo Macagnani + macagnani&tecnet.ind.br +4954 + Wrox Press Itd + Jeremy Beacock + jeremyb&mail.wrox.co.uk +4955 + Asgard Technologies, Inc + Vivian Pecus + vipecus&aol.com +4956 + GRAPHICS FIVE + KEYHAN + KEYHAN&WE-24-130-9-85.we.medioane.net +4957 + CNet Computer Systeme Netzwerk GmbH + +49.335.68339.90 + bernd&pflugrad.de +4958 + TerraLink Technologies, LLC + Keith A. Tuson + tuson&terralinktech.com +4959 + U Force, Inc. + Scott Francis + scott.francis&uforce.com +4960 + Chromisys + Evan McGinnis + evan&chromisys.com +4961 + Ardent Technologies + Dean C. Wang + dwang&ardentek.com +4962 + Artel Video Systems, Inc. + Ed Shober + eshober&artel.com +4963 + @manage + Derrick Arias + derrick&amanage.com +4964 + W.B. Love Enterprises Inc. + Walter L. Shaw Jr. + waltlove&ix. netcom. com +4965 + Greenwich Mean Time + Alec Bray + apb&gmt-2000.com +4966 + AACom + Eric Dillman + edillmann&AAcom.fr +4967 + Starwood Hotels & Resorts + Scott Baughman + scott.baughman&starwoodhotels.com +4968 + Universtiy of North Texas (unofficial) + Kevin W. Mullet + kwm&unt.edu +4969 + Park Air Systems Ltd. + Martin Brunt + m.brunt&uk.parkairsystems.com +4970 + National Grid for Learning + Nathan Chandler + nathan&ngfl.gov.uk +4971 + Anglers Club + Joyce Yaffe + joyceyaffe&aol.com +4972 + Los Alamos National LaboratoryGiri + Raichur + graichur&lanl.gov +4973 + SetNet Corporation + Nicolas Fodor + nfodor&setnet.com +4974 + eATM + Jimmy Wang + wangjc&wellsfargo.com +4975 + INTERVU Inc. + Chuck Norris + vgale&intervu.net +4976 + AGENT++ + Frank Fock + fock&agentpp.com +4977 + Tiesse S.p.A + Ombretta Miraglio + o.miraglio.tiesse&iol.it +4978 + Direkcija RS za poslovno informacijsko sredisce + Igor Milavec + igor.milavec&l-sol.si +4979 + Licer Solutions + Igor Milavec + igor.milavec&l-sol.si +4980 + Oxymium + Manuel Guesdon + mguesdon&oxymium.net +4981 + RiverDelta Networks + Thor Kirleis + thor&riverdelta.com +4982 + Persistence Software Inc. + Olivier Caudron + caudron&persistence.com +4983 + InnoMediaLogic Inc. + Francois Morel + francois.morel&iml-cti.com +4984 + Pinnacle Systems + Jacob Gsoedl + jgsoedl&pinnaclesys.com +4985 + Vigilant Networks + Ashwin Kovummal + ashwin&lecroy.com +4986 + KB/Tel + Lucero Maria Ayala Lugo + aclmal&hotmail.com +4987 + Simpler Networks Inc. + Serge Blais + serge.blais&simplernetworks.com +4988 + Synkro AS + Ole Petter Ronningen + ole&synkro.no +4989 + Connect Austria GmbH + Zahari Tassev + zahari.tassev&one.at +4990 + TTI Telecom + Shlomo Cwang + scwang&tti-telecom.com +4991 + Stonebridge Technologies, Inc.S.E. + John Harris + jharris&tacticsus.com +4992 + Thyssen Krupp Information Systems GmbHHaynes + Lohr + loehr&tkis.thyssenkrupp.com +4993 + CTU Prague + Milan Sova + sova&fel.cvut.cz +4994 + CUT-THROAT TRAVEL OUTLET + Mel Cohen + cuthraot&sirius.com +4995 + Universtiy of California, Berkeley + Robert Reid + robreid&socrates.berkeley.edu +4996 + Forest Networks LLC + Mark Penrose + mark.penrose&forestnetworks.com +4997 + Inetd.Com Consulting + Joe Elliott + joe&inetd.com +4998 + Cadant Inc. + Yung Nguyen + ynguyen&cadant.com +4999 + Reserved + RFC-pti-pen-registration-10 + ---none--- +5000 + Personal Business + Matias Guerrero + perbus2&hotpop.com +5001 + LogicMedia + Rudolph Balaz + rudy&logicmedia.com +5002 + RC Networks + Jim Sung + jsung&rcnets.com +5003 + AudioCodes LTD + Oren Kudevitzki + orenku&audiocodes.com +5004 + Predictive Networks + Rich Zimmerman + zimm&predictivenetworks.com +5005 + NCvision + Stern Rita + rita&ncvision.com +5006 + Vishwnet India + Mahesh Kulkarni + shri.mahesh&yahoo.co.in +5007 + Effective Computer Solutions, Inc. + Wilhelm Horrix + EffectiveComputerSolutions&compuserve.com +5008 + drugstore.com + Trey Valenta + trey&drugstore.com +5009 + Schiano + Andrea Schiano + schiano&mcn.mc +5010 + Splash Technology, Inc. + Minhhai Nguyen + Minhhai.Nguyen&splashtech.com +5011 + Scaldis + Geert Bosuil Gommeren + geert.gommeren&rug.ac.be +5012 + Lottomatica spa + Alberto Simili + simili&lottomatica.it +5013 + Maverick Internet Technology + Arie Joosse + ariejoosse&usa.net +5014 + Viacast + Alan Haines + ahaines&idinets.com +5015 + Verbind, Inc. + Director of IT + it&verbind.com +5016 + Oregon State University + Bill Ayres + ayres&nws.orst.edu +5017 + University of Akron + Keith Hunt + keith&uakron.edu +5018 + Ameritrade Technology Group + Dale Botkin + dbotkin&ameritrade.com +5019 + eBusiness Interactive + Adam Barclay + adam&ebinteractive.com.au +5020 + sekwang eng. co. + choi jin young + cjy1004&kornet.net +5021 + Television Internacional S.A. de C.V. + Eduardo Gutierrez + etejeda&INTERCABLE.NET +5022 + Equipe Communications Corporation + Chris Pecina + cpecina&equipecom.com +5023 + Bit by Bit Solutions, Inc. + David Richardson + superior&bitbybitsolutions.com +5024 + 3Domes, Inc. + Minainyo Sekibo + mina&3domes.com +5025 + OPUS 2 Revenue Technologies + Dave Gray + dgray&opus2.com +5026 + Aspiro AB + Peter Kjellberg + peter.kjellberg&aspiro.com +5027 + Orebro Kommun + Carl Bergudden + clbn&orebro.se +5028 + Lightbridge + Michael Arena + arena&lightbridge.com +5029 + Comma Soft AG + Robert Draken + Robert.Draken&comma-soft.com +5030 + University of Ulm + Karl Gaissmaier + karl.gaissmaier&uni-ulm.de +5031 + Recovery + Frederic Rouyre + rfr&inter-land.net +5032 + Norscan Instruments Ltd + Daniel Goertzen + goertzen&norscan.com +5033 + eCharge Corporation + Alan Chedalawada + achedalawada&echarge.com +5034 + LogicalSolutions.net + Jim Salviski + jim&logicalsolutions.net +5035 + Inter-National Research Institute + Randy Proctor + mrp&inri.com +5036 + Input Software + Tom Bridgwater + tbridgwater&inputsw.com +5037 + Bri-link Technologies Inc. + Andy Huang + andyhuang&brilink.com.tw +5038 + T&S Software Associates Inc. + Michael Thayer + mthayer&ts-software.com +5039 + Xstreamis plc + Ian Moir + ian.moir&xstreamis.com +5040 + Wiesemann & Theis GmbH + Klaus Immig + info&wut.de +5041 + Menicx International Co. Ltd. + Erik Reid + erik&menicx.com.tw +5042 + Broadwing Inc. + Ashok Kumar Padhy + apadhy&broadwing.com +5043 + Micro Focus International Ltd + Henry Szabranski + henry.szabranskiµfocus.com +5044 + Velocity Software Systems Ltd. + Leslie Mulder + lesm&velocity.ca +5045 + Bithop Systems, Inc. + Sohail Shaikh + sohail.shaikh&lexis-nexis.com +5046 + CS SI + Marc Milan + marc.milan&cssystemes.cie-signaux.fr +5047 + Kimley-Horn and Associates + David Robison + DRobison&kimley-horn.com +5048 + Kudale Inc. + Jack Kudale + info&kudaleinc.com +5049 + Equifax Inc. + Larry Laughlin + hostmaster&equifax.com +5050 + Nordmark NorLan Consult + Tore Nordmark + tonord&online.no +5051 + Brix Networks + Scott Harvell + sharvell&brixnet.com +5052 + Intermine Pty Ltd + Scott McCallum + scott&intermine.com.au +5053 + Agilent Technologies + Florence Claros + pdl-agilent-iana-oid&agilent.com +5054 + will + koyongje + koyongje&mail.will.co.kr +5055 + Eolring + Yoann Noisette + y.noisette&eolring.fr +5056 + Frank Lima + Frank Lima + ylaser&bellatlantic.net +5057 + Gifford Hesketh + Gifford Hesketh + gifford&telebot.net +5058 + Avistar Systems + Chris Lauwers + lauwers&avistar.com +5059 + Carmona Engineering Services + Ron Carmona + rcarmona&earthlink.net +5060 + Singapore Press Holdings Ltd + Kow Kee Nge + kowkn&asia1.com.sg +5061 + Swisskey Ltd + Juerg Spoerndli + jspoerndli&swisskey.ch +5062 + DFN Directory Services + Peter Gietz + peter.gietz&directory.dfn.de +5063 + Telesta + Jimmy Spets + jimmy.spets&telesta.com +5064 + Deutsche Post AG + Thomas Gierg + T.Gierg&DeutschePost.de +5065 + PrivateExpress.com + Patrick Day + pday&privateexpress.com +5066 + NetVision, Inc. + Jay Adams + jadams&netvision.com +5067 + Open Society Fund - BH + Tomo Radovanovic + tomo&soros.org.ba +5068 + Jewsih Community of Bosnia and Herzegovina + Tomo Radovanovic + tomo&soros.org.ba +5069 + Call Connect + Tim Slighter + slighter&callconnect.com +5070 + Ganna Construction, Inc. + Abdul Iscandari + gannaeng&aol.com +5071 + HIQ Networks + Janet Soung + jsoung&hiqnetworks.com +5072 + Ditech Communications Corporation + Serge Stepanoff + sstepanoff&ditechcom.com +5073 + knOwhere, Inc. + Russ White + rwhite&knowherestore.com +5074 + Miva Corporation + James Woods + jwoods&miva.com.au +5075 + CNL CentralNet GmbH + Oliver Marugg + noc¢ralnet.ch +5076 + LongView International, Inc. + Tracy Tondro + ttondro&lvi.com +5077 + Clicknet Software + Kenny Bright + kbright&clicknet.com +5078 + Media Vision Computer Technologies + David Sataty + davids&ncc.co.il +5079 + Crosskeys Systems Corporation + Scott Searcy + scott.searcy&crosskeys.com +5080 + Power Systems + Gary Roseland + gr57&earthlink.net +5081 + Empowerment Group, Inc + J.D. Wegner + jd&emgp.com +5082 + More Magic Software MMS Oy + Mika P. Nieminen + Mika&moremagic.com +5083 + Daktronics + Brian Iwerks + biwerks&daktronics.com +5084 + SierraCom + Denise Chasse + dchasse&sierracom.com +5085 + SmartMove + Wim De Munck + Wim.Demunck&smartmove.be +5086 + ICS Advent + Steve Potocny + spotocny&icsadvent.com +5087 + Great Dragon Telecom(Group) + Liu Bin + Henry_liu&ri.gdt.com.cn +5088 + Guy Cole + Guy Cole + guycole&gmail.com +5089 + Clavister AB + Peter Emanuelsson + registry&clavister.com +5090 + Carumba + Jauder Ho + jauderho&carumba.com +5091 + Norske Troll AS + Stein Vrale + stein&trolldom.com +5092 + INFORMATIONSTECHNOLOGIE AUSTRIA GES. M.B.H. + alfred Reibenschuh + alfred.reibenschuh&it-austria.com +5093 + SDF Enterprise + Scott D. Florez + damien7&flash.net +5094 + The University of Tulsa + Jared Housh + jared-housh&utulsa.edu +5095 + Credit Suisse Group + Paul Kalma (CANA Manager) + admin.cana&csg.ch +5096 + Computer Science and Engineering, CUHK + Wong Yin Bun Terence + wongyb&cse.cuhk.edu.hk +5097 + Rock Marketing + Diego Rodriguez + diego&portland.quik.com +5098 + OPUSWAVE Networks, Inc. + Abid Inam + ainam&opuswave.com +5099 + Reserved + RFC-pti-pen-registration-10 + ---none--- +5100 + Tricast Multimedia + Jesse Whitehorn + jwhitethorn&hotmail.com +5101 + Novalabs + Alessandro Morelli + alex&novalabs.net +5102 + Integro A.C.S. + Francois Compagne + fcompagne&integro-acs.fr +5103 + Foxcom Ltd. + Yoav Wechsler + yoavw&foxcom.com +5104 + SarangNet + Young-jae Chang + robocop&sarang.net +5105 + Datang Telecom Technology CO., LTD + youping Hu + huyouping&163.net +5106 + elephant project + Wolfgang Schell + wolfgang.schell&elephant-project.de +5107 + Pinkl Consulting Services + Thomas J Pinkl + tpinkl&voicenet.com +5108 + Modius Inc. + Scott Brown + scott.brown&modius.com +5109 + CHINA GREAT DRAGON TELECOMMUNICATION(GROUP) CO., LTD + Liu Bin + Henry_liu&ri.gdt.com.cn +5110 + Health Business Systems, Inc. + Thomas J Pinkl + tom&hbsrx.com +5111 + Medea Corporation + Roger S. Mabon + Rmabon&pacbell.net +5112 + Corvia Networks, Inc. + Reva Bailey + reva&corvia.com +5113 + gridware + Joachim Gabler + J.Gabler&gridware.de WWW: http://www.gridware.de +5114 + Future fibre Technologies + Peter Nunn + pnunn&fft.com.au +5115 + PowerCom Technology Co., Ltd. + Wei-En Wang + wewang&powercom.com.tw +5116 + IBM, NUMA-Q Division + David Arndt + daa&sequent.com +5117 + Kaelin Colclasure + Kaelin Colclasure + kaelin&acm.org +5118 + Dantel,Inc. + Mark R. Huggett + mhuggett&dantel.com +5119 + SYCOR GmbH + Michael Kunze + michael.kunze&sycor.de +5120 + EMF Home Inspection Inc. + Mark Fredenberg + EMFinspection&Hotmail.com +5121 + League Scoring + Karl Rullman + Karl_Rullman&Yahoo.com +5122 + Everest eCommerce, Inc. + Kaelin Colclasure + kaelin&everest.com +5123 + Lucent Tech. Taiwan Telco. + Sky Wang + skyw&tw.lucent.com +5124 + Phonetic Systems Ltd. + Nir Halperin + tshtofblat&PhoneticSystems.com +5125 + Celestica Power + Ken Clark + kclark&clestica.com +5126 + Symtrex Inc. + Robert Hocking + rhocking&symtrex.com +5127 + Western Digital Corporation + Shola Agbelemose + shola.agbelemose&wdc.com +5128 + Saitama University Far Laboratory + Hassan Hajji + hajji&cit.ics.saitama-u.ac.jp +5129 + Macquarie University + Simon Kissane + simon.kissane&mq.edu.au +5130 + Omron Canada Inc. + Denis Pimentel + denis_pimentel&omron.com +5131 + lotz.de + Christian lotz + cl&lotz.de +5132 + Mammut Net + Klaus J. Koch + Klaus.Koch&mammut.net +5133 + Halfdome Systems, Inc. + Andrew Koo + andykoo&halfdome-ift.com +5134 + IP Unity + Tony Ma + tony&ipunity.com +5135 + CyberSafe Corporation + Dave Arnold + craig.hotchkiss&cybersafe.com +5136 + Gruner + Jahr AG & Co KG (formerly 'Electronic Media Service') + Thomas Doschke + OIDTeam&guj.de +5137 + DFC, Inc. + Jade Meskill + jmeskill&forchrist.org +5138 + Easynet Group Plc + Sven Verluyten + oid&be.easynet.net +5139 + ARESCOM, Inc. + Alfred Ma + alfred&arescom.com +5140 + Compudisk Systems Ltd. + Alf Hardy + alf.hardy&virgin.net +5141 + Hart Edwards Corporation, Inc. + Christopher Hart + hart&hartedwards.com +5142 + IVANS + Chris Van Sant + chris.van.sant&ivans.com +5143 + Cereva Networks Inc. + Beth Miaoulis + beth_miaoulis&cereva.com +5144 + Reserved + RFC-pti-pen-registration-10 + ---none--- +5145 + ICT electronics SA + Joan-Lluis Montore Parera + jlm&ict.es +5146 + Eclipsys Corporation + Fred Reimer + Fred.Reimer&eclipsys.com +5147 + MICEX + Alexander Ovchinnikov + ovchinkv&micex.com +5148 + cTc Computer Technik Czakay GmbH + Woodisch + wodisch&ctc.de +5149 + Managed Object Solutions, Inc. + E. Adam Cusson + ecusson&mos-inc.com +5150 + Opsware + Dave Jagoda + dj&opsware.com +5151 + NetGain, LLC + Richard Huddleston + richard&netgainllc.com +5152 + Cable AML, Inc. + Javier Olivan + jolivan&cableaml.com +5153 + The University of Akron + Keith Hunt + keith&uakron.edu +5154 + Incyte Genomics + Brett Lemoine + bl&incyte.com +5155 + CS & S GH computer System Tech. Co., Ltd. + Qigang Zhu + zhu_qigang +5156 + newproductshowroom.com + Marc August + marcaugust&iname.com +5157 + The University of Queensland + Dr. Rodney G. McDuff + mcduff&its.uq.edu.au +5158 + CompuTECH Services + Harry A. Smith + smithha&hotmail.com +5159 + Ultra d.o.o. + Kristijan Gresak + kristijan.gresak&ultra.si +5160 + DAIN Telecom Co., Ltd + Dong-Suk Yun + kseom&203.248.101.130 +5161 + Morehead State University + Iroshan Navaratne + i.navaratne&moreheadstate.edu +5162 + Societe Europeenne des Satellites + Alan Kuresevic + Alan_Kuresevic&ses-astra.com +5163 + Digital Marketplace, Inc. + Neal Taylor + ntjr&dmcom.net +5164 + Cygnet Technologies, Inc. + Tim Michals + tim&cygnetinc.com +5165 + Sassafras Software Inc. + Mark Valence + mark&sassafras.com +5166 + Mercom Systems, Inc. + Steve Danis + steve.danis&mercom.com +5167 + Orchestream Ltd. + Benedict Enweani + benweani&orchestream.com +5168 + Levitte Programming + Richard Levitte + levitte&lp.se +5169 + NET CONSULTING S.R.L. + Nicola di Fant + nidifant&tin.it +5170 + Aegis Data Systems, Inc. + Mark Stingley + chief&aegisdata.com +5171 + WhizBang! Labs + Dan Rapp + drapp&whizbang.com +5172 + Protocom Development Systems + Jason Hart + support&serversystems.com +5173 + Sonus Networks, Inc. + Information Security + infosec&sonusnet.com +5174 + PrivateExpress .com + Mok Ku + mok&privateexpress.com +5175 + Lindsay Electronics + Dan Kolis + ipmanager&hq.lindsayelec.com +5176 + 2Win Information Systems, Inc. + Alex Lee + alee&2win.com +5177 + Private Express Technologies Pte, Ltd. + Wong Chee Hong + cheehong&privateexpress.com.sg +5178 + Telephony Experts, Inc. + James Sandahl + jsandahl&telephonyexperts.com +5179 + Arima Computer Corp. + Norman Hung + norman&rioworks.com +5180 + The DocSpace Company Inc. + Nash Stamenkovic + nstamenkovic&docspace.com +5181 + Firat Universites + Abdulkadyr Yonar + ayonar&firat.edu.tr +5182 + I Theta Corp. + Miles Whitener + mbw&i-theta.com +5183 + ---none--- + ---none--- + ---none--- +5184 + C&N Touristic AG + Edgar Scheuermann + edgar.scheuermann&cun.de +5185 + Sungmi Telecom Electronics Co., Ltd. + Jinsoo Yoo + jsyoo&sungmi.co.kr +5186 + ---none--- + ---none--- + ---none--- +5187 + Bytware, Inc. + Michael Grant + iana&bytware.com +5188 + BITHOP SYSTEMS, Inc. + Sohail Shaikh + sohail.shaikh&firewall5.lexis-nexis.com +5189 + TELEFONICA I+D + Javier Marcos Iglesias + jmarcos&tid.es +5190 + Organic + Henry Goldwire + henry&organic.com +5191 + DEKRA AG + Wolfgang Hiller + wolfgang.hiller&edv.dekra.de +5192 + Gotham Networks + George W. Kajos + gkajos&gothamnetworks.com +5193 + Chemical Abstracts Service + James R. Schrock + jschrock&cas.org +5194 + Okanagan Spring Brewery + Michael F. Hertel + mhertel&okspring.com +5195 + AdRem Software + Tomasz Kunicki + tkunicki&adrem.com.pl +5196 + E-Tech, Inc. + Jone Yi + jone_yi&e-tech.com.tw +5197 + Startup .com + Katell Pleven + katell49&aol.com +5198 + "Universita`" degli Studi di Roma "Tor Vergata" + Lorenzo M. Catucci + catucci&ccd.uniroma2.it +5199 + Odetics ITS + Ken Vaughn + klv&odetics.com +5200 + EnFlex Corp. + John W. Markham + jmarkham&enflex.net +5201 + Reserved + RFC-pti-pen-registration-10 + ---none--- +5202 + Aalto University (formerly 'Helsinki University of Technology') + Niko Suominen + niko.suominen&aalto.fi +5203 + The Naqvi Group + Shams Naqvi + SSNAQVI&aol.com +5204 + ClickNet Software Corporation + Eric Solberg + esolberg&clicknet.com +5205 + Ruby Tech Corp. + Pete lai + pete&mail.rubytech.com.tw +5206 + Voltaire + Tzahi Oved + tzahio&voltaire.com +5207 + USA-NET CORPORATIONS + Dirk Hilbig + net-ops&usanet.com +5208 + UPMACS Communications Inc. + Peter Berg + peter&upmacs.com +5209 + Profound Rational Organization(PRO), Internatioal + Pae Choi + pro&bsc.net +5210 + Linkline/Freegates + Frederick Houdmont + fh&freegates.be +5211 + NCvision Ltd. + Stern Rita + rita&ncvision.com +5212 + Placeware, Inc. + Mike Dixon + mdixon&placeware.com +5213 + Greylink, Inc. + Edgar Tu + edgar&keymail.com +5214 + Athens Chamber + George Koukoulas + gkouk&acci.gr +5215 + United Connections + Andrew Liu + andrew&amh.linuxhk.com +5216 + Senior Informatica Ltda. + Benedicto de Souza Franco Jr. + bene&senior.psi.br +5217 + SOFHA GmbH + Christoph Oeters + coeters&sofha.de +5218 + Networks Experts, Inc. + Richard Hornbaker + RichardH&NetworkExperts.com +5219 + BeVocal Inc. + Mikael Berner + mikael&bevocal.com +5220 + World Telecom Labs + E P Thornberry + pthornberry&harmonix.co.uk +5221 + Wireless Systems International + Andrew Chilcott + ajc&wsi.co.uk +5222 + Dantel Inc. + Mark R. Huggett + mhuggett&dantel.com +5223 + Korea Electronics Technology Institute + Song Byoungchul + songbc&203.253.128.2 +5224 + Hitachi Data Systems (Europe) Ltd. + John Murray + John.Murray&hds.com +5225 + Universidad de Cantabria- ATC + Estela Martinez + estela&atc.unican.es +5226 + Harmonix Limited + Patrick Thornberry + www.harmonix.co.uk +5227 + MELCO Inc. + Toshinari Nakai + nakai&melcoinc.co.jp +5228 + Littlefeet Inc. + Samuel K. Sun + ssun&littlefeet-inc.com +5229 + Big Fish Communications + Thomas Johnson + tj&bigfishcom.com +5230 + ComRoesGroup + Felix B. Adegboyega + felix&ComRoes.com +5231 + Oswego State University + Brandon Brockway + brockway&oswego.edu +5232 + Counterpane Internet Security + Jon Callas + callas&counterpane.com +5233 + Mercury Interactive Corp. + Ido Sarig + isarig&merc-int.com +5234 + Shenzhen SED Info. Tech. Corp. + He, Bing + szsed&public.szptt.net.cn +5235 + Persistent Systems Private Limited + Shiv Kumar Harikrishna + shiv&pspl.co.in +5236 + Great Dragon Telecom(Group) + Zhou Ming + zhouming&ri.gdt.com.cn +5237 + Trustis Limited + Andrew Jackson + amj&trustis.com +5238 + SOcieta GEnerale di Informatica (SOGEI SPA) + Dr. Ciro Maddaloni + cmaddaloni&sogei.it +5239 + FESTE + Oscar Conesa + oconesa&feste.org +5240 + RAND + Michael Misumi + misumi&rand.org +5241 + TECHMATH AG + Friedel Van Megan + vanmegen&medien.tecmath.de +5242 + Envilogg AB + Marcelo Tapia + marcelo&envilogg.se +5243 + ICIS, School of EEE,Nayang Technological University + S. Rohit + PR766125&ntu.edu.sg +5244 + Casey's Baja Tours + Casey Hamlin + casey&200.33.413.229 +5245 + Quarterstone Communications Inc.(QCI) + Dave Lankester + dlankester&quarterstone.com +5246 + SS8 Networks Inc. + David Zinman + davidz&ss8networks.com +5247 + Zinnia Design + Julie Low + jlw&cs.unh.edu +5248 + Michael Brehm + Michael Brehm + mbrehm&erols.com +5249 + Warner-Lambert + Mike Olsakowski + michael.olsakowski&wl.com +5250 + Center7 + Glen Lewis + glen¢er7.com +5251 + Donald E. Bynum + Donald E. Bynum + xxtcs&aol.com +5252 + CIS Technology + Johnson Lee + johnsonc&cis.com.tw +5253 + Aldebaran + Stefan Hackenthal + sh&aldebaran.de +5254 + Datang Telecom Technology CO., LTD + youping Hu + huyouping&21cn.com +5255 + EuroPKI + Antonio Lioy + lioy&polito.it +5256 + Freeonline .com. au Pty Ltd + Matthew Walker + matthew_walker&freeonline.com.au +5257 + AXUS Microsystems Inc. + Stephen Wang + hlwang&axus.com.tw +5258 + Zeta Technologies Co. Ltd + Chow Kai Ching Karen + karen&zetatran.com +5259 + MATRA SYSTEMES & INFORMATION + Philippe Gerard / Thierry Auger + gerard&matra-ms2i.fr +5260 + Digital Engineering + Mark Cullen + markc&digeng.co.uk +5261 + LUTHER DANIEL + Luther Daniel + LGDAN&WEBTV.NET +5262 + QWIKWIRE. NET + Brian Crawford + brian&investorsbeacon.com +5263 + CAIS INTERNET + Ed Van Horne + e.vanhorne&caissoft.com +5264 + Varian Medical Systems + Mark P. Morris + mmorris&oscs.varian.com +5265 + TeleDanmark Erhverv, CTIUdvikling + Henning Ebberup Peterson + heepe&tdk.dk +5266 + Johannes Gutenberg-Universitaet Mainz + Carsten Allendoerfer + Allendoerfer&Uni-Mainz.DE +5267 + Hong Kong University of Science and Technology + Lai Yiu Fai + ccyflai&ust.hk +5268 + IKEA IT CENTER AB + Anders Osling + anders.ostling&neurope.ikea.com +5269 + The Fantastic Corporation + Giuseppe Greco + giuseppe.greco&fantastic.ch +5270 + da6d Technologies + David Spanburg + da6d&spacely.net +5271 + Comverse Network Systems + Vitaly Lisovsky + vlisovsk&comversens.com +5272 + w-Trade Technologies + Igor Khomyakov + ikh&w-trade.com +5273 + Unistra Technologies + Igor Khomyakov + ikh&w-trade.com +5274 + UNIF/X + Igor Khomyakov + ikh&w-trade.com +5275 + Malibu Networks + Ross Roberts + ross&malibunetworks.com +5276 + Hearme.com + Richard Katz + richkatz&hearme.com +5277 + SarangNET + JA-HUN KU + sarang&sarang.net +5278 + Swisscom. AG + Thomas Abegglen + thomas.abegglen&swisscom.com +5279 + 2Support Directory Solutions + Rutger van Bergen + rbergen&2support.nl +5280 + Citi + Brian Geoghegan + brian.geoghegan&ssmb.com +5281 + Graeffet Communications + Thomas Aeby + aeby&graeff.com +5282 + Connected Systems + David McClintock + dcm&connectedsystems.com +5283 + CheckFree Corporation + Kelvin Tucker + KTucker&CheckFree.com +5284 + Filanet Corporation + Jose A. Gonzalez + jose&filanet.com +5285 + Network Experts, Inc. + Richard Hornbaker + Richard&NetworkExperts.com +5286 + CALENCE, Inc. + Richard Hornbaker + RHornbaker&Forte.net +5287 + Persistent Systems Private Limited + Shridhar Shukla + shukla&pspl.co.in +5288 + A-Plus Networks, Inc. + Randy Rankin + E-mail-aplus&quik.com +5289 + VidyaWeb. com, Inc. + Alak Deb + alakd&aol.com +5290 + Guide + Per Sundberg + per.sundberg&guide.se +5291 + Bloemenveiling Aalsmeer + Frans Woudstra + frans.woudstra&vba.nl +5292 + Pirus Networks + Howard Hall + hhall&ultranet.com +5293 + HYOSUNG CORPORATION + Jaehyoung, Shim + jaehshim&hyosung.co.kr +5294 + Infinity Trade Inc + Sheri + Shamsher&idt.net +5295 + "DigitalThink Inc." + Jon Skelton + jons&digitalthink.com +5296 + IPCell Technologies, Inc. + Chuck Homola + chomola&ipcell.com +5297 + Baxworks, Inc. + James H. Baxter + jim.baxter&baxworks.com +5298 + xputer.com + Lousseief Abdelwahab + aloussei&transtec.de +5299 + WorldCast Systems (formerly 'AUDEMAT") + Frederic Allard + it&worldcastsystems.com +5300 + Jetstream Communications + Gene Yao + gyao&jetstream.com +5301 + PentaSafe, Inc. + Kurt Goolsbee + k.goolsbee&pentasafe.com +5302 + Advanced Health Technologies + Douglas Price + price&ahtech.com +5303 + IPmobile Inc. + Hong Luo + hluo&ipmobile.com +5304 + Airslide Systems Inc. + Roie Geron + roie&airslide.com +5305 + IIT KANPUR + Narayana Murthy + chitturi&iitk.ac.in +5306 + XESystems, Inc. + Scott Weller + Scott.Weller&usa.xerox.com +5307 + Ciprico, Inc. + Brad Johnson + bjohnson&ciprico.com +5308 + SiemensS.A.(Portugal) + Paulo Inacio + Paulo.Inacio&net.siemens.pt +5309 + EdelWeb SA + Peter Sylvester + contact&edelweb.fr +5310 + Depository Trust & Clearing Corporation + William Izzo + wizzo&dtcc.com +5311 + F&F Inc. + Faiz Moinuddin + ffaiz786&yahoo.com +5312 + DIGICERT SDN + Amir Suhaimi Nassan + amir&digicert.com.my +5313 + Revlis, Inc. + Bradley D. Molzen + bmolzen&revlisinc.com +5314 + Baunwall A/S + Michael Frandsen + michaelf&baunwall.dk +5315 + CCIT(Beijing Creative Century Information Technology)Co. Ltd. + Yang Taohai + thyang&chinatelecom.cninfo.net +5316 + Guide Unusual + Jan Stenvall + Jan.Stenvall&unusual.se +5317 + Adaptive Computer Development Corp. + Troy Payne + troypayne&mediaone.net +5318 + Robert Bosch GmbH + Daniel Zendler + oid&de.bosch.com +5319 + EarthWatch Inc. + D.Todd Vollmer + tvollmer&digitalglobe.com +5320 + Surety Technologies Inc. + Wes Doonan + ietf&surety.com +5321 + Burrito.org + Jason Kraft + burrito&burrito.org +5322 + PADL Software Pty Ltd + Luke Howard + lukeh&padl.com +5323 + Telrad + Hilla Hartman + hilla.hartman&telrad.co.il +5324 + Delta Networks Inc. + Jones Huang + jones.huang&delta.com.tw +5325 + Cmgi + Paul Kiely + pkiely&cmgi.com +5326 + LightChip, Inc. + Kevin Short + kshort&lightchip.com +5327 + KIM Computing + Paul Baldam + pb&kimlab.com +5328 + North Communications Inc. + Trang A. Nguyen + tnguyen&infonorth.com +5329 + ShoreTel, Inc (formerly 'Shoreline Teleworks') + Kevin Coyne + kcoyne&shoretel.com +5330 + Spacelink Systems + Jim Fitzgerald + jfitz&spacelink.com +5331 + OI ELECTRIC CO., Ltd. + Yasuaki Nakanishi + yasuaki_nakanishi&ooi.co.jp +5332 + Information Freeway + Vanessa Schumert + VanessaSchumert&24.25.217.87 +5333 + Celera Genomics + Jim Jordan + James.Jordan&celera.com +5334 + START + Zhang Da Rong + zhangdr&start.com.cn +5335 + Multifunction Products Association + Craig Douglas + admin&mfpa.org +5336 + BlueKite.com + James Leonard + james_leonard&bluekite.com +5337 + Ciprico, Inc. + Brad Johnson + bjohnson&ciprico.com +5338 + ELDEC Corporation + Kaz Furmanczyk + kfurmanc&eldec.com +5339 + VisioWave + Thibault Dangreaux + Thibault Dangreaux&visiowave.com +5340 + CITI, S.A. de C.V. + Marcelo Rodriguez + crodrigu&citi.com.mx +5341 + PC Network Design + Jerry Sandusky + jerry.sandusky&sharp.com +5342 + Sensormatic + Robert Venditti + robert_venditti&swhouse.com +5343 + Lone Wolf ComputingGeorge + Giles + gsgiles&inferno.lonewolfcomputing.com +5344 + Originative Solutions Ltd. + Paul Richards + paul&originative.co.uk +5345 + SUNGMI TELECOM ELECTRONICS CO., Ltd. + Seung-Ok Lim + solim&sungmi.co.kr +5346 + SSM Health Care + DNS Administrator + DNSAdmin&ssmhc.com +5347 + GE Harris Aviation Information Solutions, LLC + Tom Berrisford + thomas.berrisford&ae.ge.com +5348 + TimeSpace Radio AB + Paer Sjoeholm + paer.sjoeholm×pace.se +5349 + Inalp Networks Inc. + Peter Egli + peter.egli&inalp.com +5350 + Infotron System Corp + Mariano Gallo + mgallo&infotron.com +5351 + ATM S.A. + Jaroslaw Kowalski + jarek&atm.com.pl +5352 + Maelstrom + Steve Ferris + steve&maelstrom.org.uk +5353 + EVS Broadcast Equipment + Benoit Michel + b.michel&evs-broadcast.com +5354 + Simplement Ltd + Tzvika Chumash + tzvika&simplement.co.il +5355 + Eland Technologies + Mark Lenahan + mlenahan&elandtech.com +5356 + Object Oriented Pty Ltd. + Daryl Wilding-Mcbride + darylw&oopl.com.au +5357 + Evans Computer Consulting + Jeffrey R. Evans + webmaster&evanscomputers.com +5358 + enhanced Global Convergence Services (eGCS) + Mary Slocum + mclocum&egcsinc.com +5359 + InnoVentry + Christian du Croix deWilde + cdewilde&innoventry.com +5360 + Haedong + Park Kaemyoung + pigangel&haedong.re.kr +5361 + Westbridge Design Ltd. + Tim Riches + timriches&compuserve.com +5362 + QASYS CORP + C. Figueroa + cristian.figueroa&cgroupsa.com +5363 + R. Smith Engineering Co. + Gregory Swofford + gs&gomr.com +5364 + Certall Finland OY + Petri Puhakainen + petri.puhakainen&certall.fi +5365 + Neartek, Inc. + Mark Spitz + Mark.Spitz&neartek.com +5366 + Charlotte's Web Networks Ltd. + Yoel ferrera + yoel&cwnt.com +5367 + Telena S.p.A. + Daniele Trapella + dtrapella&telena.it +5368 + CoreExpress + Michael Walters + michael.walters&coreexpress.net +5369 + APD Communications Limited + Peter Stensones + peter.stensones&apdcomms.co.uk +5370 + BANCO ZARAGOZANO S.A. + Banco Zaragozano + comunicaciones&bancozaragozano.es +5371 + NetSubject Canada, Inc. + J. Di Paola + j.dipaola&attcanada.net +5372 + myCFO, Inc. + Andrew Ryan + andrewr&myCFO.com +5373 + Open Telecommunications Limited + Noel O'Connor + noelo&ot.com.au +5374 + Dirigo Incorporated + Kevin Muldoon + kevinm&dirigo.com +5375 + BAE SYSTEMS - Maritime Naval Ships + Phil Church + phil.church&baesystems.com +5376 + Oleane + Fabien Tassin + fta&oleane.net +5377 + TriNexus + Homer Robson + Homer.Robson&TriNexus.com +5378 + PrairieFyre Software Inc. + Clarke La Prairie + clarke&prairiefyre.com +5379 + Colonial State Bank + Steve McJannet + sjannet&sbnsw.com.au +5380 + CDOT + Mohammed Rafiq K + kmdrafiq&cdotb.ernet.in +5381 + SuperNova + Ron Heusdens + ronh&supernova.nl +5382 + IRIS Technologies, Inc. + Mark Bereit + mbereit&iristech.com +5383 + iFleet Inc. + John Quinn + jquinn&ifleet.com +5384 + Tsuruki Promotions + Michiko Nagai + tsurukipro&mc.net +5385 + City-NET CZ, s.r.o. + Ivo Machulda + im&point.cz +5386 + M-S Technology Consultants + R. Mike Smith + ms4260>e.net +5387 + HyperFeed + Ken Felix + kfelix&hyperfeed.com +5388 + Network Alchemy Ltd. + Imerio Ballarini + imeriob&networkalchemy.co.uk +5389 + Inter-Tel + Sapna Natarajan + Sapna_Natarajan&inter-tel.com +5390 + OPNET Technologies Co., Ltd. + K. T. Wu + kwu&opnet.com.tw +5391 + Tyco Submarine Systems Ltd. + Tom Kunz + tkunz&submarinesystems.com +5392 + "Online Creations", Inc. + Anil Gurnani + anil&oncr.com +5393 + Renault + Philippe JOYEZ + philippe.joyez&renault.com +5394 + Gateway Inc. + John Sarraffe + john.sarraffe&gateway.com +5395 + Laurel Networks, Inc. + Ramesh Uppuluri + ramesh&laurelnetworks.com +5396 + BigChalk.com + Jerry D. Hedden + jerry_hedden&bigchalk.com +5397 + Standard and Poor's Fund Services + Simon Churcher + simoncµpal.com +5398 + ElephantX + Anil Gurnani + anil&oncr.com +5399 + Extremis + George Cox + gjvc&extremis.net +5400 + Evercom systems International Inc. + Peter Cehn + rnd&evercom9.com.tw +5401 + Master Soft + Alberto Pastore + alberto&pastore.cc +5402 + IDF + Yaron Zehavi + yaronz&hotmail.com +5403 + Vircom + Clive Paterson + clive.paterson&vircom.com.au +5404 + eConvergence Pty Ltd. + Steve McJannet + ifax1&hotmail.com +5405 + Start Printer equipment co. Ltd. + Jiang-xufeng + jiangxf&start.com.cn +5406 + Nick Conte, Inc. + Nicholas J. Conte, Jr. + njcontejr&msn.com +5407 + NetSupport GmbH + Gerd Bierbrauer + gbi&netsupport-gmbh.de +5408 + Intellitel Communications + Jyri Syvaoja + jyri.syvaoja&intellitel.com +5409 + East West Consulting K.K. + Conan O'Harrow + oharrow&ewc.co.jp +5410 + Les Howard + Les Howard + les&lesandchris.com +5411 + SignalSoft Corporation + Maureen O'Neill + moneill&signalsoftcorp.com +5412 + Zantaz.com, Inc. + Ramesh Gupta + rgupta&zantaz.com +5413 + PeopleWeb CommunicationsInc. + William Webber + william&live.com.au +5414 + John & Associate + Frank Chan + fchan100&netvigator.com +5415 + Fujitsu Asia Pte Ltd + Choy Kum Wah + choykw&sg.fujitsu.com +5416 + Nesral + Bo Philip Larsen + bpl&nesral.dk +5417 + ABSA Group Ltd. + Gideons Serfontein + gideons&absa.co.za +5418 + Fortis, Inc. + Dan Bozicevich + Dan.Bozicevich&us.fortis.com +5419 + Cambridge Broadband Ltd. + John Naylon + snmp&cambridgebroadband.com +5420 + Spider Technologies + Michael J. Donahue + mjd2000&email.com +5421 + Marietta Dodge Inc. + Richard J.Bishop + mdcars&bellsouth.net +5422 + RHC Enterprises Inc. + Richard J. Bishop + mdcars&bellsouth.net +5423 + McLeodUSA + Ralph Trcka + rtrcka&mcleodusa.com +5424 + Columbia Diversified Services + Naeem Igbal + naeem&cdsx.org +5425 + NetSpace Online Systems + Aris Theocharides + aris&netspace.net.au +5426 + GadLine Ltd. + Yossi Zadah + yossi&gadline.co.il +5427 + stroeder.com + Michael Ströder + michael&stroeder.com +5428 + ENDFORCE, Inc. + Mark Anthony Beadles + mbeadles&endforce.com +5429 + Propack Data Soft- und Hardware Entwicklungs GmbH + Michael Stroder + x_mst&propack-data.com +5430 + Masterguard GmbH + Tobias Grueninger + tobias.grueninger&masterguard.de +5431 + LM Digital + Hans Nawrath Meyer + nawrath&redlink.cl +5432 + SightPath + Mark Day + mday&sightpath.com +5433 + Netonomy + Guillaume Le Stum + gls&netonomy.com +5434 + Advanced Hi-Tech Corporation + Henry Lai + ycl&aht.com +5435 + OvisLink Corp + Span Hsu + ovislink&ms24.hinet.net +5436 + OPEN + Dipl.Ing Florek + open&ba.telecom.sk +5437 + Pensar Corporation + Richard Baxter (CTO) + rbaxter&pensar.com +5438 + Utrecht School of Arts + Gerard Ranke + gerard.ranke&kmt.hku.nl +5439 + Parallel Ltd. + Tim Moore + Tim.Moore¶llel.ltd.uk +5440 + Primeon, Ltd. + Stewart Hu + shu&primeon.com +5441 + The Timken Company + Terry A. Moore + tmoore&timken.com +5442 + New Zealand Post Limited + Ron Hooft + Ron.Hooft&nzpost.co.nz +5443 + Nekema.com + Omer Kose + omerk&nekema.com +5444 + Joe Minineri + Joe Minieri + jminieri&mindspring.com +5445 + Metrostat Technologies, Inc. + John Kevlin + John.Kevlin&Metrostat.com +5446 + Skygate Technology Ltd + Pete Chown + pc&skygate.co.uk +5447 + Aeolon Research + Michael Alyn Miller + malyn&aeolon.com +5448 + Kykink Communications Corp + Kenny Chou + kennychou&kylink.com.tw +5449 + OneNetPlus.com + Joseph Sturonas + Joe.Sturonas&OneNetPlus.com +5450 + I-Link Inc. + Rami Shmueli + rami&vianet.co.il +5451 + SEGAINTERSETTLE AG + Marcel Schuehle + marcel.schuehle&sisclear.com +5452 + Business Layers + Ziv Katzman + zivk&businesslayers.com +5453 + Intelis, Inc + Leonard Thornton + LeonardT&Intelis-inc.com +5454 + Trango Networks, LLC + Christopher A. Gustaf + chris&gotrango.com +5455 + Artemis Management Systems + Murray A. Snowden + Murray_Snowden&artemispm.com +5456 + FOCUS Online GmbH + Robert Wimmer + rwimmer&focus.de +5457 + CastleNet Technology Inc. + Eugene Chen + eugene&castlenet.com.tw +5458 + Gupta + Alok Gupta + a.dadarya&mailcity.com +5459 + SANtools, Inc + David A. Lethe + david&santools.com +5460 + BroadLink Communications, Inc. + John Whipple + john&broadlink.com +5461 + KSI Inc + Dick Mosehauer + rmosehauer&ksix.com +5462 + Resume.Com + Marc Poulin + mcp&resume.com +5463 + Eduardo Fermin + Eduardo Fermin + ejfermin&hotmail.com +5464 + Manukau Institute of Technology + Christopher Stott + chris&manukau.ac.nz +5465 + eBusiness Technologies + David Parker + dparker&ebt.com +5466 + International Biometric Society, IBS + Nanny Wermuth + nanny.wermuth&uni-mainz.de +5467 + TELEFONICA INVESTIGACION Y DESARROLLO + Javier Marcos Iglesias + jmarcos&tid.es +5468 + Actelis Networks + Edward Beili + edward.beili&actelis.com +5469 + Codebase + Kevin Lindley + kevin.lindley&codebase.demon.co.uk +5470 + Transparity Limited + Teow-Hin Ngair + sysadmin&transparity.com +5471 + Switzerland + Grogg Peter + domainmanage&post.ch +5472 + timeproof + Jorg Seidel + seidel&timeproof.de +5473 + AlgaCom + Stefan Mueller + stefan.mueller&algacom.ch +5474 + Ericsson Ahead Communications Systems GmbH + Martin Weiss + martin.weiss&aheadcom.com +5475 + Thysys Engineering + Steven Pitzl + spitzl&thysys.com +5476 + Apex Inc. + Todd Davis + todd.davis&apex.com +5477 + Netattach, Inc + Mike Young + myoung&netattach.com +5478 + Critical Path Berlin/LEM + Oliver Korfmacher + oliver.korfmacher&cp.net +5479 + Pinnacle Data Systems Inc (PDSi) + Tony Beckett + tony.beckett&pinnacle.com +5480 + T. Sqware Incorporated + Ronald Naismith + rnaismith&tsqware.com +5481 + Agencia de Certificion Electronica + Miguel Angel Perez Acevedo + mapa&ace.es +5482 + Murakami Electro-Communication Laboratories, Inc. + Mamoru Murakami + murakami&sphere.ad.jp +5483 + Netensity, Inc. + Manlio Marquez + mmarquez&netensity.com +5484 + University of the Aegean + Thomas Spyrou + tsp&aegean.gr +5485 + The OPEN Group Ltd + Jeremy Smith + jeremy.smith&open.co.nz +5486 + China Merchants Bank + Xiong Shaojun + xsj&cmbchina.com +5487 + Multitrade Spa + Luca Ceresa + luca.ceresa&ilsole24ore.it +5488 + Temblast + Renate Pyhel + snmp&temblast.com +5489 + ALS International Ltd. + Alan Ramsbottom + acr&als.co.uk +5490 + CommNav, Inc. + Andrew Libby + alibby&perfectorder.com +5491 + UPS Manufacturing + Mr. Zampieri + e.pesente&riello-ups.com +5492 + Telephia + Andrew Northrop + anorthrop&telephia.com +5493 + Palm Computing + Fermin Soriano + fermin_soriano&palm.com +5494 + Marathon Innovations, Inc. + Wayne Franklin + waynef&marathoninnovations.com +5495 + Convergence Equipment Company + Mike Brent + mike&gxc.com +5496 + GEMPLUS + Philippe Leblanc + philippe.leblanc&gemplus.com +5497 + Trondent Development Corp. + David Wood + dwood&trondent.com +5498 + Kardinia Software + Joseph Fernandez + jfernand&kardinia.com +5499 + YhKim Co. Ltd. + So-Young Hwang + young&juno.cs.pusan.ac.kr +5500 + Gemeentelijk Havenbedrijf Rotterdam + Jouke Dijkstra + jouke.dijkstra&port.rotterdam.nl +5501 + NTT PC Communications, Inc. + Mamoru Murakami + murakami&nttpc.co.jp +5502 + Canon Finetech Nisca Inc. + Moriyoshi Inaba + inaba.moriyoshi&mail.canon +5503 + Orinda Technology Group + Min Yu + yumin&home.com +5504 + Zhone Technologies, Inc. + Allen Goldberg + agoldberg&zhone.com +5505 + Metrostat Technologies, Inc. + John Kevlin + John.Kevlin&Metrostat.com +5506 + Digital-X, Inc. + Ramesh Sharma + rsharma&digital-x.com +5507 + Tight Informatics + Dennis Mulder + dennis.mulder&port.rotterdam.nl +5508 + SWOD Org. + Dennis Mulder + dennis.mulder&port.rotterdam.nl +5509 + B&E Construction Co. Dennis + Mulder + dennis.mulder&port.rotterdam.nl +5510 + PrismTech + Steve Osselton + steve&prismtechnologies.com +5511 + syscall() Network Solutions GbR + Olaf Schreck + chakl&syscall.de +5512 + GMD FIRST + Bernd Oestmann + boe&first.gmd.de +5513 + iXL + MIS Admin + tocadmin&ixl.com +5514 + Timeline Technology Inc. + Kevin Armstrong + karmstrong&timelinetech.com +5515 + Directory Tools and Application Services, Inc. + Bruce Greenblatt + bgreenblatt&directory-applications.com +5516 + SecureWorks, Inc. + Shu Dong + sdong&secureworks.net +5517 + Rapid5 Networks + Sidney Antommarchi + sid&rapid5.com +5518 + TDS Telecom + Jim OBrien + jim.obrien&tdstelecom.com +5519 + LSITEC + Volnys Borges Bernal + volnys&lsi.usp.br +5520 + Alfred Wegener Institute for Polar and Marine Research + Siegfried Makedanz + smakedanz&AWI-Bremerhaven.DE +5521 + St. John Health System + Pamela J.Prime + pam.prime&stjohn.org +5522 + Cybernet Corporation + Shahril Ghazali + shahrilg&cybernetcorporation.org +5523 + GRCP + Jean-Pierre Gourdon + jpg&compuserve.com +5524 + Emory University + Alan Dobkin + ADobkin&Emory.Edu +5525 + SSF + Vad Osadchuk + ssf65&usa.net +5526 + Adero, Inc. + Paul Biciunas + pbiciunas&adero.com +5527 + Context Systems Group + Eolo Lucentini + elucentini&csg.it +5528 + NetBotz + John Fowler + john&netbotz.com +5529 + Neoforma.com + Girish Venkat + girish&neoforma.com +5530 + Cescom Inc. + Alex Fournier + alex.fournier&cescom.ca +5531 + Mien Information Solutions + Diane Kiesel + dkiesel&mien.com +5532 + Q-Telecell GmbH + Holger Vogel + holger.vogel&iq-wireless.com +5533 + WideAwake Ltd + Patrick Knight + p.knight&wideawake.co.uk +5534 + Vogon AB + Daniel Lundqvist + daniel.lundqvist&vogon.se +5535 + 3rd Generation Partnership Project 2 (3GPP2) + Allen Long + along&cisco.com +5536 + Quintus Corporation + Kevin McKenna + kevin.mckenna&quintus.com +5537 + Comdial Corporation + Doug Whitman + dwhitman&comdial.com +5538 + Micron Tech. Information co. kr + Bum Soo Park + sworn&netian.com +5539 + Cybertek Holdings + Buyle You + ybl&cybertek.co.kr +5540 + RWTH Aachen University + Guido Bunsen + Bunsen&itc.RWTH-Aachen.DE +5541 + Paragea Communications, Inc + Laique Ahmed + lahmed¶gea.com +5542 + eOn Communications Corporation + Dave A. Kelly + dkelly&eoncc.com +5543 + INIEMP HOLDINGS CORPORATION S.L. + Alejandro Sanchez Muro + alessandro&mundivia.es +5544 + Thomson-CSF Systems Canada + Jeff Young + jdyoung&thomson-csf.ca +5545 + TANTAU Software Inc. + Sanjay Laud + Sanjay.Laud&tantau.com +5546 + MailVision Inc. + Yossi Cohen + yossi&talkmail.com +5547 + BSQUARE Corporation + Paula Tomlinson + snmp&bsquare.com +5548 + Cobalt Networks + Larry Coryell + lcoryell&cobalt.com +5549 + TimesTen Performance Software + Joe Chung + chung×ten.com +5550 + Monggo, Inc. + Edgar Tu + edgar&keymail.com +5551 + Oscilloquartz, S.A. + Jorge Tellez + tellez&oscilloquartz.com +5552 + Air Atlanta Icelandic + Jon Agust Reynisson + jonni&atlanta.is +5553 + Macromedia eBusiness Solutions + Jex + ariadev&andromedia.com +5554 + SpotCast Communications + Eric Johnston + ejohnst&sccsi.com +5555 + Authentic8 pty Ltd + Philip Mullarkey + Philip.Mullarkey&authentic8.com +5556 + Service Factory + Torsten Jacobsson + torsten&servicefactory.com +5557 + OneMain.com + John Clinton + john.clinton&eng.onemain.com +5558 + S-Link Corporation + Seamus Gilchrist + sgilchrist&ss7-link.com +5559 + Vitria Technology, Inc. + Matthew Doar + iana-snmp&vitria.com +5560 + The Color Registry + Gwen St. Clair + webmaster&adoptacolor.com +5561 + 2nd Wave, Inc. + Chris Cowan + chris.cowan&2nd-wave.com +5562 + Redknee Inc. + Joel Hughes + joel.hughes&redknee.com +5563 + Ola Internet + Antonio Narvaez + anarvaez&olanet.es +5564 + Omega Enterprise + Dana Dixon + ddixon11&gvtc.com +5565 + Syswave Co., Ltd + Chang Pil Kim + scarface&syswave.com +5566 + VisionGlobal Network Corporation + Jess Walker + jwalker&vgnlabs.com +5567 + Riverstone Networks + Michael MacFaden + mrm&riverstonenetworks.com +5568 + Southview Technologies, Inc. + Scott Parker + scott.parker&southernview.com +5569 + Soluzioni Tecnologiche Bancarie s.r.l. + Michele Marenzoni + marenz&tin.it +5570 + Sony Pictures Entertainment + Marc-Alan Dahle + marc-alan_dahle&spe.sony.com +5571 + GetThere.Com + Mani Balasubramanian + mani&getthere.com +5572 + HoTek TechnologyCo., Ltd. + Simon Hsieh + hansome&ms1.hinet.net +5573 + Tong + Sprinna Wu + bonbear&netease.com +5574 + BankEngine Inc. + Alicia da Conceicao + alicia&bankengine.com +5575 + CertEngine Inc. + Alicia da Conceicao + alicia&certengine.com +5576 + T.I.L.L. Photonics GmbHAnselm Kruis + (Network Mangagement Department) + admin&till-photonics.de +5577 + Persimmon Development + Kevin Timm + kevindtimm&home.com +5578 + New Mexico State University + Ian Logan + ian&nmsu.edu +5579 + Mercata, Inc. + Shanke Liu + shankel&mercata.com +5580 + EXEJone + Gkhrakovsky + gkhrakovsky&hotmail.com +5581 + Communications Networks of Africa (GH) Ltd (NETAFRICA) + Bill Kingsley + kntb&hotmail.com +5582 + iTRUST Solutions AG + Markus Glaus + markus.glaus&itrustsolutions.com +5583 + MD Information Systems + Alexander Rovny + rovny&mdis.ru +5584 + General Bandwidth + Cuong Nguyen + cuong.nguyen&genband.com +5585 + Very Clever Software Ltd. + Mike Pellatt + M.Pellatt&vcs.co.uk +5586 + IPWireless Inc. + Andrew Williams + awilliam&ipwireless.com +5587 + Flughafen Muenchen GmbH + Harald Englert + harald.englert&munich-airport.de +5588 + Thomcast Communication, Inc.Comwave Division ("Comwave") + Carl P. Ungvarsky + cungvarsky&thomcastcom.com +5589 + Synopsys, Inc. + Hostmaster + hmaster&synopsys.com +5590 + Marimba Inc. + Simon Wynn, Engineering Manager + simon&marimba.com +5591 + SCTE + standards staff + standards&scte.org +5592 + Wilson & Sanders, Inc. + Michael Wilson + brainfried&earthlink.net +5593 + Magnum Technologies Inc. + Tim Hadden + haddent&magnum-tech.com +5594 + Koankeiso Co., Ltd. + Fumihito Sone + sone&koan.co.jp +5595 + Ingrian Systems, Inc + Glenn Chisholm + glenn&thecount.net +5596 + Tandberg ASA + Stig A. Olsen + stig.arne.olsen&tandberg.no +5597 + Meinberg + IANA Registry Administration + iana-admin&meinberg.de +5598 + Submarine Warfare Systems Centre + David Laarakkers + David.laarakkers&dao.defence.com.au +5599 + Comp Sci & Eng, 'De Montfort University' + Jonathan Hughes + jrh&dmu.ac.uk +5600 + Clearstream Services + Paul Rees + prees&cedelglobalservices.com +5601 + Clearstream Banking + Paul Rees + press&cedelglobalservices.com +5602 + T/R Systems, Inc. + Mike Barry + mbarry&trsystems.com +5603 + Capital One Financial Services + Tony Reynolds + tony.reynolds&capitalone.com +5604 + digit-safe + Allan Wind + wind&freewwweb.com +5605 + William Data Systems Ltd. + Liam Hammond + liam.hammond&willdata.com +5606 + DocuSign France + Erwann Abalea + dsfcompliance-risk-safety&docusign.com +5607 + Erwann ABALEA + Erwann ABALEA + erwann&abalea.com +5608 + Red Planet Technologies + Brant Jones + brant&redplanettechnologies.com +5609 + Smartleaf, Inc. + Daniel Hagerty + hag&smartleaf.com +5610 + Exbit TechnologyA/S + Morten Jagd Christensen + mjc&exbit.dk +5611 + vmunix.org + Torsten Blum + torstenb&vmunix.org +5612 + Korea Data Communications + yu-mi, Park + jenesys&kdcre.co.kr +5613 + tdressler.net (formerly 'SQLcompetence') + Thomas Dressler + tdressler&tdressler.net +5614 + SonyBPE + Nneka Akwule + nneka.akwule&spd.sonybpe.com +5615 + Inherit S AB + Roland Hedayat + roland&inherit.se +5616 + TEKOPS + David Beecher + dbeecher&tekops.com +5617 + Trio Communications 2000 Pty. Ltd + Andreas Mann + Andreas&trio.com.au +5618 + WareNet Inc. + Noah Campbell + develop&ware.net +5619 + Amaranth Networks Inc. + Daniel Senie + dts&senie.com +5620 + CFX Communications + Brian Caverley + cfxi&home.com +5621 + Heriot-Watt University + David Morriss + D.J.Morriss&hw.ac.uk +5622 + DreGIS GmbH + Gunter Baumann + gunter.baumann&DreGIS.com +5623 + KPMG + Steve Christensen + schristensen&kpmg.com +5624 + Enterasys Networks + Charles N. McTague + cmctague&enterasys.com +5625 + A. Gell, CxA + Allen Gell + a.gell&196.3.74.237 +5626 + Internet Barter Inc.aka Bartertrust.com + Thomas J. Ackermann + tjack&bartertrust.com +5627 + Hitachi Process Computer Engineering, Inc. + Tatsuya Kawamata + kawamata&hipro.hitachi-hipro.co.jp +5628 + X.O. Soft, Ltd + Sergei Kaplan + serg&xosoft.com +5629 + Continuus Software Corporation + Van Hoang + vhoang&continuus.com +5630 + ExiO Communications Inc. + Jay Hong + jhong&exio.com +5631 + Alliance Systems, Inc. + John Morrison + john.morrison&alliancesystems.com +5632 + TelePassport Hellas S.A. + Ikonomidis Kyriakos + kikonomidis&telepassport.gr +5633 + BASF Computer Services GmbH + Damian Langhamer + damian.langhamer&basf-c-s.de +5634 + Universiteit van Amsterdam + Marijke Vandecappelle + m.i.vandecappelle&uva.nl +5635 + Dale W. Liu + Dale W. Liu + dliu&pipeline.com +5636 + Dignos EDV GmbH + Kai Morich + kai.morich&dignos.com +5637 + IDN Technology Inc. + Luhai + luhai&bupt.edu.cn +5638 + PK Electronics + Lee Bong Peng + bplee&pkelectronics.com.my +5639 + Dept. Of Biology Western KY University + Maxx Christopher Lobo + maxx&linux.wku.edu +5640 + Lama Law Firm + Ciano Lama + Ciano4&aol.com +5641 + Anthem Inc. + Matt King + matt.king&anthem.com +5642 + MicroCast, Inc. + Mark Thibault + mthibaultµcast.net +5643 + University of Arizona + Todd Merritt + tmerritt&u.Arizona.EDU +5644 + PassEdge + George Peden + george&passedge.com +5645 + BowStreet Software + Michael Burati + mburati&bowstreet.com +5646 + Onyx Networks + Jim Pfleger + jpfleger&onyx.net +5647 + Emperative, Inc. + Tim McCandless + tmccand&emperative.com +5648 + L-3 Communications (PrimeWave Communications) + Muralidhar Ganga + mganga&pwcwireless.com +5649 + Webswap Inc. + Vikram D. Gaitonde + vikram&webswap.com +5650 + Merck & Co., Inc. + David Van Skiver + david_van_skiver&merck.com +5651 + Maipu Electric Industrial Co., Ltd + Zheng Xue + maipu2&mail.sc.cninfo.net +5652 + Kraig Sigman + Kraig Sigman + deadeye&laf.cioe.com +5653 + CSP + Massimo Milanesio + milanesio&csp.it +5654 + Ando Electric Corporation + Kazuki Taniya + taniya-k&ando.co.jp +5655 + P-Cube Ltd. + Rony Gotesdyner + ronyg&p-cube.com +5656 + Monmouth University + Robert Carsey + rcarsey&monmouth.edu +5657 + Universidad de La Coruna + Manuel J. Posse + mposse&udc.es +5658 + ISL, Institute of Shipping Economics and Logistics + Marc Brueckner + snmp&isl.org +5659 + CoProSys Inc. + Ales Makarov + amakarov&coprosys.cz +5660 + XI'AN DATANG TELEPHONE Corp. + Weiyuan + yw2000&263.net +5661 + T-Mobile + Sean Hinde + sean.hinde&t-mobile.co.uk +5662 + Nordic Global Inc. + Holger Kruse + kruse&nordicglobal.com +5663 + TecnoLogica Informatica + Antonio Marcos Ferreira Soares + amfs&tecnologica.com.br +5664 + Monastery of the Glorious Ascension, Inc. + Fr. Maximos Weimar + frmaximos&monastery.org +5665 + Vertical One, Inc. + Dima Ulberg + dulberg&verticalone.com +5666 + Servevcast + Philip Daly + philip&servecast.com +5667 + Teldata Computer Industries, Inc. + Derek Williams, Vic Mitchell + teldatac&mindspring.com +5668 + Mycroft Inc. + Jon Freeman + jon.freeman&mycroftinc.com +5669 + Digital Island + Maureen Byrne + mbyrne&digisle.net +5670 + Redwood Technologies Ltd. + Kevin Robertson + kpr&redwoodtech.com +5671 + Horus IT GmbH + Brigitte Jellinek + oid&horus.at +5672 + CIENA Corporation (formerly 'ONI Systems Corp.') + Terry Gasner + tgasner&ciena.com +5673 + eConvergent, Inc. + Michael Jones + michael.jones&econvergent.com +5674 + Texcel Technology Plc. + Andy McLeod + Andy.McLeod&texceltechnology.com +5675 + Genosys Technology Management Inc. + Jerry Wong + jwong&genosys.net +5676 + DataFlow/Alaska, Inc. + Eric Hutchins + eric&dataflowalaska.com +5677 + Clunix, Inc. + Yongjae Lee + yjlee&clunix.com +5678 + Stalker Software, Inc + Vladimir Butenko + butenko&stalker.com +5679 + EWE & EVE's Gourds & Things + Eugene & Elaine Endicott + eweeve&fidnet.com +5680 + Windsor Group + Brian Dorminey + dorminey&popmail.com +5681 + fruittm + Ari Jb Ferreira + arijbf&zipmail.com.br +5682 + Synergon Ltd. + Laszlo Vadaszi + www.synergon.hu +5683 + Reserved + RFC-pti-pen-registration-10 + ---none--- +5684 + Metro Optix, Inc. + Beecher Adams + beecher.adams&metro-optix.com +5685 + DataLink SNMP Solution + David Cardimino + dcardimino&datalinksnmp.com +5686 + A H Computer Company + Shawky Ahmed + shawky&idsc1.gov.eg +5687 + Icon Laboratories, Inc. + Alan Grau + alan_grau&icon-labs.com +5688 + StrataSource, Inc. + Mark D. Nagel + mnagel&stratasource.com +5689 + Net & Sys Co., Ltd + Hansen Hong + hansenh&hitel.net +5690 + Agri-Com Holdings + Hendri Du toit + afm&bhm.dorea.co.za +5691 + SilverPlatter Information + Kevin Stone + kstone&silverplatter.com +5692 + Kilfoil Information Technologies, Inc. + John J. Kilfoil + jk&kilfoil.com +5693 + Accordion Networks + Dharini Hiremagalur + dharini&accordionnetworks.com +5694 + Integrated Digital Solutions Limited + Trevor Turner + trevor.turner&ids.co.nz +5695 + bbq.com + Drew Riconosciuto + drew&bbq.com +5696 + Walter Graphtek GmbH + Peter Jacobi + pj&walter-graphtek.com +5697 + HanseNetTelefongesellschaft mbH + Peter Evans + evans&hansenet.com +5698 + Digitrans + Alan Gustafson + asgustafson&hotmail.com +5699 + Cornerstone Solutions Corporation + Karl Wagner + khwagner&cornerstonecorp.com +5700 + University of the West Indies + Feisal Mohammed + feisal&uwi.tt +5701 + Maple Networks, Inc. + Ravi Manghirmalani + ravi&maplenetworks.com +5702 + Touch Technology International + Youri Baudoin + ybaudoin&touchtechnology.com +5703 + NVIDIA Corporation + Mark Krueger + mkrueger&nvidia.com +5704 + CITGO Petroleum Corporation + Rick Urdahl + rurdahl&citgo.com +5705 + DTA + Don Tuer + dtaadv&ionsys.com +5706 + LGS Group Inc. + Don Tuer + Don_Tuer&lgs.com +5707 + Fiberspace Unlimited, LLC + Russ Reisner + russ&fiberspace.net +5708 + CTS Network Services + Jim Fitzgerald + jfitz&cts.com +5709 + EDS/CFSM + Robert Meddaugh + robert.meddaugh&eds.com +5710 + Wellknit + Aruna Sangli + asangli&wellknit.com +5711 + ECCS, Inc. + Dan Davis + dand&eccs.com +5712 + System Integrators, Incorporated + Richard Martin + rmartin&sii.com +5713 + Niksun Inc. + Kerry Lowe + klowe&niksun.com +5714 + Insh_Allah + Gerrit E.G. Hobbelt + Ger.Hobbelt&insh-allah.com +5715 + Enigma Enterprises + Douglas Fraser + douglas.fraser&perth.ndirect.co.uk +5716 + WebSpectrum Software Pvt. Ltd. + Rathnakumar K S + info&wsspl.com +5717 + UUcom + Matthew Whalen + dakota&uucom.com +5718 + Cellit, Inc. + Jeff Stout + jstout&cellit.com +5719 + PNC Financial Services Group + Jayme A. DiSanti + jayme.disanti&pncbank.com +5720 + iMimic Networking, Inc. + Y. Charles Hu + ychu&imimic.com +5721 + IntellOps + Mitch Shields + mshields&intellops.com +5722 + OPNET Technologies, Inc (formerly 'Altaworks Corporation') + Edward Macomber + tmacomber&opnet.com +5723 + SAMAC Software GmbH + Markus Weber + markus.weber&samac.com +5724 + Cicero Communications, Inc. + Deborah Scharfetter + deborah&scharfetter.com +5725 + Xel Communications + Marcel Wolf + wolfman&xel.com +5726 + Lyondell Chemical Company + James Epperson + james.epperson&lyondell.com +5727 + Smart Card Applications Pty Limited + Jon Hughes + jon.hughes&smartcard.com.au +5728 + K Ring Technologies + Simon P. Jackson + jacko&kring.co.uk +5729 + SQLI + Stephane Cachat + scachat&sqli.com +5730 + Simpson Professional Services + Stephen Simpson + steve&sybase-dba.com +5731 + DJM Enterprises + Dan Maus + dan.oid&MyNet.org +5732 + One, Inc - Plano + Kent Perrier + kent.perrier&oneco.net +5733 + Dept.3,ISCAS(Institute of Software, the Chinese Academyof Sciences + Mr. Wu Zhimei + WZM&isdn.iscas.ac.cn +5734 + FABRICA NACIONAL DE MONEDA Y TIMBRE - REAL CASA DE LA MONEDA + Victor Jimenez + vjimenez&fnmt.es +5735 + EBSnet Inc. + Sarah Richardson + sarah&ebsnetinc.com +5736 + Power Conversion Products, LLC + Brian Click + brianc&pcp.com +5737 + E-Commerce Enterprises, LLC + Gabrielle Mancuso Alexandra + Gabrielle&mail.com +5738 + Vovida Networks + Tom Maneri + tmaneri&vovida.com +5739 + Xpeed, Inc. + Sang Park + sang.park&xpeed.com +5740 + Birionic Pvt Ltd + Anil/Leekha + leekhaanil&usa.net +5741 + a2zcom + Anil/Leekha + leekhaanil&usa.net +5742 + S.S.C. 'HENGELO' + T. Smit + ssc&planet.nl +5743 + GT Group Telecom Services Corp. + George Macri + gmacri>.ca +5744 + TARSEC Inc. + Pascal Buchbinder + buchbinder&tarsec.com +5745 + Ericsson Nikola Tesla d.d. + Sebastijan Mrkus + sebastijan.mrkus&etk.ericsson.se +5746 + Lanex Sp. z o.o. + Dariusz Kusinski + design&lanex.lublin.pl +5747 + Bluetail AB + Martin Bjorklund + mbj&bluetail.com +5748 + Applied Expert Systems + David Cheng + davec&aesclever.com +5749 + TGS-NOPEC Geophysical Company + Rebekah Hayes + rebekah&tgsnopec.com +5750 + General Mills + Mike Meinz + Mike.Meinz&GenMills.com +5751 + Illumination Enterprises, Inc. + Christopher J. Dole + ChrisDole&IEI-Soft.com +5752 + HyperSoft, Inc. + Fred Waite + fwaite&hypersoft.net +5753 + University of Maribor + Prof. Zarko CUCEJ, Ph.D. + zarko.cucej&uni-mb.si +5754 + Clearstream International + Phil Ratcliffe + pratcliffe&clearstream.net +5755 + ITS (UK) Ltd. + Kim + domaindetectives&usa.net +5756 + Supertel , JSC + Sergei Lebedev + ls&supertel.ru +5757 + Trilithic + Gregg Rodgers + grodgers&trilithic.com +5758 + Stins Coman + Alexander Skorokhodov + askoroh&stinscoman.com +5759 + bridges.com + Craig Skelton + cskelton&bridges.com +5760 + Bell Atlantic Mobile + Eladio Gonzalez + GonzaEL&bam.com +5761 + Hannetware Inc. + Tae-Won Ham + kyungbok&hannetware.com +5762 + Interland + jskim + gstar&nownuri.net +5763 + DB POWER ELECTRONICS (P) Ltd. + S.G. Waghmare + dbhouse&vsnl.com +5764 + Jazzey GmbH + Andreas Oberhuemer + oberhuemer&jazzey.com +5765 + aXess-pro networks GmbH + Oliver Schoenfeld + Oliver.Schoenfeld&axess-pro.de +5766 + IBM Global Services + brian Bainter + bainterb&us.ibm.com +5767 + Quark, Inc. + Jeff Schreiber + jschreiber&quark.com +5768 + Polish-Japanese Institute of Information Technology + Tomasz Fornalik + fornalik&pjwstk.waw.pl +5769 + HealthMagic, Inc. + Robert Sturkie + robert.sturkie&healthmagic.com +5770 + Medepass.com, Inc. + John R. Hanson + jono&medepass.com +5771 + Cisco Systems, Inc. + Murray Mar + mmar&cisco.com +5772 + C-it + Arjen de Ronde + Arjen.de.Ronde&C-it.nl +5773 + Equiinet Ltd. + Jon Summers + jsummers&equiinet.com +5774 + Beijing Telecable Network System Ltd. + Mr. Fanbin + francis&telecable.com.cn +5775 + Advanced Technology Solutions International + Pete Ellis + pete.ellis&.atsi-uk.com +5776 + Express Scripts, Inc. + Tony Strand + astrand&express-scripts.com +5777 + Reserved + RFC-pti-pen-registration-10 + ---none--- +5778 + Universidad Autonoma de Madrid + Javier Martinez Rodriguez + Javier.Martinez&ii.uam.es +5779 + Data-GUYS Inc. + Christopher J. Born + bornc&mindspring.com +5780 + PDA Verticals Corp. + Dave Carrigan + dave&pdaverticals.com +5781 + Jordan Network Engineering + Tom Jordan + tjordan&doit.wisc.edu +5782 + University of Wisconsin System + Jonathan J Miner + miner&doit.wisc.edu +5783 + BROADPAC Communications + Murali Sarjapuram + muralis&broadpac.com +5784 + Cogita Ltd + Allan Brearley + allan.brearley&cogita.co.uk +5785 + Intershop Communications AG + Jan Hamel + notifications&intershop.de +5786 + Mc Coy Marine Consultants + Captain R.D. Mc Coy + Boxcarbobmccoy&netscape.net +5787 + Adventist Health + Tracy McCurdy + McCurdTJ&ah.org +5788 + Softdev Corp. + Allan Schougaard + seeger&softdevcorp.com +5789 + MobileQ.com Inc. + Ayaz Somani + ayaz.somani&mobileq.com +5790 + Shawn Starr + Shawn Starr + shawn.starr&rogers.com +5791 + Compu-Mentor, Inc. + Mike Noland + mike&compu-mentor.net +5792 + Dean Gakis + Dean Gakis + dean&gakis.org +5793 + The Edge Consultants + Shiraz Shahabudeen + shiraz&edge.com.sg +5794 + OBR CTM + Christopher Bialas + snmp.admin&ctm.gdynia.pl +5795 + BPT TELBANK SA + Bernard Medrzyvcki + Bernard.Medrzycki&telbank.pl +5796 + STC Informatik + Dominik Steiner + dominik.steiner&stc-informatik.ch +5797 + Western Power Distribution + Jim Buckley + jbuckley&westernpower.co.uk +5798 + VIACCESS S.A. + Mme Laurence BECQ + laurence.becq&francetelecom.fr +5799 + Nyherji + Petur Eythorsson + petur.eythorsson&nyherji.is +5800 + Cruise Controls Pvt. Ltd.- FRESNEL + Mangesh Kale + mangeshk&cruise-controls.com +5801 + Second Opinion Software + Richard Shank + Corp&2Opinion.com +5802 + Electroline Equipment Inc. + Frederick Plante + fp&electrolinequip.com +5803 + 2AB, Inc. + Sam Lumkin + slumpkin&2ab.com +5804 + Lantern Communications + Steven Krichman + krichman&lanterncom.com +5805 + Televideo, Inc. + Chang Sae Lee + cslee&televideo.com +5806 + SK Telecom + Dongkie Leigh + galahad&sktelecom.com +5807 + Flora van Kesteren Tuinadviezen + Sander van Kesteren + svkesteren&hetnet.nl +5808 + INRANGE Technologies Corporation + Steve Zetye + steve.zetye&inrange.com +5809 + Nieden-Nsm + Andreas Nieden + Andreas.Nieden&nieden-nsm.de +5810 + Global Crossing + Barbara Roseman + ipadmin&gblx.net +5811 + The GoldParrot Corporation + Brian P. Michael + bmichael&goldparrot.com +5812 + Coriolis Networks + Anil Navkal + anil&coriolisnet.com +5813 + The OpenNMS Group, Inc. + Jeff Gehlbach + jeffg&opennms.com +5814 + GCI Communications Corp + GCI Registrar + registrar&gci.com +5815 + Optima Tele.com, Inc. + Ralf Doewich + ralf.doewich&optimatele.com +5816 + Lockstep Systems + Karl Forster + kforster&lockstep.com +5817 + High Precision Record Company Limited + Aiyaret Birden + ben&hpr.net +5818 + Cherry Cyber Net + Aiyaret Birden + ben&birden.com +5819 + X.Net Ltda + Nain Esmelyn Daza R + xnetltda&col3.telecom.com.co +5820 + Flyforms + Kefu Zhou + kefuzhou&hotmail.com +5821 + Orangesoft, Inc. + Watanabe Naoaki + kitarou&orangesoft.co.jp +5822 + Reynolds and Reynolds Pty Ltd + Gregor Scott + gregors&reynolds.com.au +5823 + nRose + Bob Wang + moonriver21&yahoo.com +5824 + Nikkai Electronic, Inc. + Michael Ho + mikeho&attglobal.net +5825 + Native Networks + Michal Weinbaum + michal&nativenetworks.com +5826 + MIND CTI Ltd. + Raanan Grinwald + graanan&mindcti.com +5827 + HERMES SoftLab + Boris Gasperin (IT Manager) + it-mgr&hermes.si +5828 + SpaceNet Communication AB + Peter Carlsson + peter.carlsson&spacenet.se +5829 + AMCC Switching Corp. + Peter Benschop + pbenschop&amcc.com +5830 + XcelleNet + Jessica Landisman + Jessica.Landisman&xcellenet.com +5831 + Cleondris GmbH + Fredi Hinz + noc&cleondris.ch +5832 + Horgan.net + Sean Horgan + seanhorgan&onebox.com +5833 + Tailyn Communication Company + Jenny Hsu + jennyhsu&tailyn.com.tw +5834 + Identikey ltd + Michael Daniell + mdaniell&identikey.com +5835 + Newtec Cy + Luc Claeys + luc.claeys&newtec.be +5836 + TOYO COMMUNICATION EQUIPMENT CO., LTD + Junichi wakai + wakai&toyocom.co.jp +5837 + Nedcor Limited + Ian Gemmell + iang&nedcor.com +5838 + Cabletime Ltd + Mr. Ian Reed + i.reed&cabletime.com +5839 + Bioplasma Andaluza S.L. + Santos Hernandez + shg&scentex.com +5840 + Aravox Technologies, Inc. + Charles Rissmeyer + crissmeyer&aravox.com +5841 + Netfish Technologies + Lucy Phan + lphan&netfish.com +5842 + Cisco Systems + Lucy Phan + luphan&cisco.com +5843 + FiberLogic Communications + Foxes C.H. Hung + foxes&fiberlogic.com +5844 + LG Space Engineering Corp + Louis Granados + rogerdsl&swbell.net +5845 + Cyberus Online Inc. + Roy Hooper + rhooper&cyberus.ca +5846 + NetCentrex + Bernard Jannes + bj&netcentrex.net +5847 + Knowledge Design + Yuki Yamazaki + yyamazaki&knowd.co.jp +5848 + Rissa Solutions Corp. + Eliina Vornanen + eliina.vornanen&rissasolutions.com +5849 + Signal-COM + Helen Nikonova + signal&gin.ru +5850 + H.S. Leader Srl + Luca Paoli/ Massimo Melani + hsleader&hsleader.net +5851 + eWindowShop.com + Casey Lee + cplee&ewshop.com +5852 + Oscar Jacobsson + Oscar Jacobsson + oscar&celocom.se +5853 + Bell Technology + Walter Ungureanu + walter.ungureanu&bell.ca +5854 + Bell Network Solutions + Mike Scott + michael.scott&bell.ca +5855 + John Deere (dba Deere & Company) + Kirk Denison + DenisonKirkR&johndeere.com +5856 + Vianet Technologies, Inc. + Clara S. Johnson + sjohnson&vianet.com +5857 + VirtualWorkout + Taryn Kelly + taryn.c.kelly&us.pwcglobal.com +5858 + Cyclone Commerce, Inc. + Jhon Honce + honce&cyclonecommerce.com +5859 + EFA Software Services Ltd. + Geoff Kratz + gkratz&efasoftware.com +5860 + TeleNet Inc. + David C. Diniz Jr. + dcdiniz&aol.com +5861 + Mitsubishi Heavy Industries, Ltd + Yoshiaki Sonoda + yoshiaki&csl.ngsrdc.mhi.co.jp +5862 + CEYCO Internet Applications + Christian Ey + ey&inweb.de +5863 + D'ALASKA CO. + Fernando Ortega + nanty1&aol.com +5864 + REITC, LLC + Keith Sarbaugh + keith.sarbaugh&gmail.com +5865 + Parallel Networks + Vinay Mohta + vmohta&optigrab.com +5866 + London School of Economics Computer Security Research Centre + Dave Freestone + d.j.freestone&lse.ac.uk +5867 + San Joaquin Delta Community College District + Matt Rosen + mrosen&sjdccd.cc.ca.us +5868 + CRONOS Research Centre Sdn Bhd + Justin Tan + justin&extol.com.my +5869 + Deutsche Post eBusiness + Alexander Finger + a.finger&deutschepost.de +5870 + BioFone Inc. + David C. Diniz + dcdiniz&aol.com +5871 + HyperXS + Ken Huang + ken.huang&accton.com +5872 + TDC Systems + Angela Hoffman + ahoffman&kscable.com +5873 + Software Dynamics Inc. + George Gu + George&mail.sdinc.com +5874 + Tellabs MNG + Dr. Chris Roller + chris.roller&tellabs.com +5875 + China Advanced Info-Optical Network (CAINONET) + Weisheng Hu + net863&moon.bjnet.edu.cn +5876 + A2B s.r.o. + Jiri Guyrkovsky + a2b&post.sk +5877 + Ruhr-Universitaet Bochum + Ute Dederek-Breuer + Ute.Dederek-Breuer&ruhr-uni-bochum.de +5878 + NICE GmbH + Siegfried Rupietta + Siegfried.Rupietta&nice.de +5879 + Unassigned + Removed 2006-10-02 + ---none--- +5880 + Opteway + Pierre Sigrist + pierre.sigrist&opteway.com +5881 + ARZ Allgemeiness Rechenzentrum GmbH + Bertram Gstraunthaler + Bertram_Gstraunthaler&arz.co.at +5882 + Inopoly Inc. + Bart Fielder + bfielder&slip.net +5883 + Zaffire, Inc. + Kamran Ghane + kghane&zaffire.com +5884 + University of Utah + Bryan Wooten + bryan.wooten&utah.edu +5885 + McGough Enterprises L.L.C. + Tim McGough + tim&themcgoughs.org +5886 + NuSpeed Internet Systems + Jim Muchow + jim.muchow&nuspeed.com +5887 + Falconnect, Inc. + Christopher Lee Falcone + clfalcone289&hotmail.com +5888 + Agiliti, Inc. + Jim Illetschko + jim.illetschko&agiliti.com +5889 + Universidad Iberoamericana GC + Ramon F. Tecolt G. + rtecolt&uiagc.pue.uia.mx +5890 + Lantop Systems LTD + Arik Ben-Dov + arik&lantop.com +5891 + Autotote Systems, Inc. + Dion Ashbee + dashbee&autotote.com +5892 + Legacy Computer Services Limited + Peter Chisnall + peter.chisnall&excite.com +5893 + PinPoint Corporation + Samuel Levy + samuel&pinpointco.com +5894 + Dreamchal Inc. + KyoungWook Cheon + praiser&dreamchal.com +5895 + At Fut AS + Jaan Pruulmann + fut&fut.ee +5896 + csseEvens St. Hilaire + marvens&flashcom.net + / evens&nyp.org +5897 + BP Amoco PLC + Andrew Haynes + haynesaj&bp.com +5898 + RITLABS S.R.L. + Max Masyutin + max&ritlabs.com +5899 + Microm Electronics + David Hudson + daveµm-electronics.com +5900 + Schrader-Bridgeport International + Bob Holton + bholton&schrader-bridgeport.net +5901 + Nominum Inc. + Ashley Kitto + ashley.kitto&nominum.com +5902 + Maharajah Infosys + Nalin Shah + nshavalli&hotmail.com +5903 + Pace Micro Technology plc + Stewart Brodie + stewart.brodie&pace.co.uk +5904 + Walgreens + Rishi Khullar + rishi.khullar&walgreens.com +5905 + StorageNetworks + Jim McDonald + Jim.McDonald&storagenetworks.com +5906 + Travsys BV + Lex Loep + lex.loep&travsys.com +5907 + Lokasoft + Lex Loep + lex.loep&lokasoft.nl +5908 + Otelnet, Inc. + Farokh Eskafi + eskafi&otelnet.com +5909 + Skybitz Inc. + Ven Chava + vchava&skybitz.com +5910 + Availant + Donald B. Lehman + dlehman&availant.com +5911 + cbrook + Harish Rajan + info&cbrook.com +5912 + Yuasa Corporation + Takumi Kimura + takumi_kimura&yuasa-jpn.co.jp +5913 + Oliver Fehr + Oliver Fehr + Oliver.Fehr&ofehr.com +5914 + Balsa Software + Darrin Husmann + info&balsa-tech.com +5915 + Trilogic Systems + Todd Harrington + todd&trilogic.com +5916 + i-Nebula.com + Dan Powers + dan.powers&i-nebula.com +5917 + Gigalink + Sunho Kim + ggoma&gigalink.co.kr +5918 + BBL + Bruno Guillaume + bruno.guillaume&st.bbl.be +5919 + Departamento de Informatica da Fac. Ciencias da Univ. Lisboa + Nuno Miguel Neves + nneves&di.fc.ul.pt +5920 + Zurich Financial Services Group + Stefan Moser + stefan.moser&zurich.com +5921 + Sema Group AB + Goran Karlberg + goran.karlberg&got.sema.se +5922 + Real Time Monitors, Inc. + Tim Kness + TKness&rtmi.com +5923 + Internet2 + Michael J. LaHaye + mjl&internet2.edu +5924 + Xevo Corp. + Peter Ashley + pashley&xevo.com +5925 + Interactive People Unplugged AB + Patrick Lindergren + patrik&ipunplugged.com +5926 + interlink + Jeong Jeonghu + jeong&ils.interlink.co.kr +5927 + NexComm Systems, Inc. + Yanggi Jung + ygjung&nexcomm.co.kr +5928 + Zydacron, Inc. + Patrick M. Murphy + pmurphy&Zydacron.Com +5929 + Amer.com + JB + jb3&amer.com +5930 + ImagicTV Inc. + Al Parker + aparker&imagictv.com +5931 + PRESIDEO, Inc. + Dan Twadell + DTWADELL&PRESIDEO.COM +5932 + OpenSystems.com, Inc. + Ken Reiff + kreiff&opensystems.com +5933 + Software Services + Bill Rizzi + rizzi&softserv.com +5934 + University of Louisiana at Lafayette + Patrick Landry + pml&louisiana.edu +5935 + ACADEMIA Sinica Computing Centre + JeiZhii Lee + jzlee&ascc.net +5936 + Ahhaaa AB + Johan Stenberg + Johan.Stenberg&ahhaaa.com +5937 + HypoVereinsbank + Thomas Stoertkuhl + thomas.stoertkuhl&hypovereinsbank.de +5938 + Macfarlane TeleSystems Ltd. + Paul Jones + PJones&macfar.co.uk +5939 + HALCOM d.d. + Luka Ribicic + luka.ribicic&halcom.si +5940 + Red Lion Controls (JBM Electronics Co., Inc.) + Denis Aull + Engineering&RedLion.net +5941 + Cosmocom Inc. + Rick Marlborough + rmarlborough&cosmocom.com +5942 + ThoughtShare Communications + George Myers + info&thoughtshare.com +5943 + MatchCraft, Inc. + Dorab Patel + dorab&matchcraft.com +5944 + nROSE + Anyhony Wang + tony_wang2001&yahoo.com +5945 + AtBusiness Communications Oyj + Matti Suuronen + Matti.Suuronen&atbusiness.com +5946 + MULTITEL Inc. + Daniel Fecteau + dfecteau&multitel.com +5947 + WebForce LLC + Eric Lee + ericlee&webasket.com +5948 + 3NO Systems + Henry D. Nissenbaum + henryn&3no.com +5949 + IBM, AIX Tools Team + Derry Cannon + derryc&austin.ibm.com +5950 + Imagineering Inc. + Ron Chase + ronchase&imagineeringinc.net +5951 + Netscaler + Yogendra Singh + yogendra&netscaler.com +5952 + Breakaway Solutions, Inc. + Connie Kratz + ckratz&breakaway.com +5953 + EleTel Inc. + George Krucik + george&eletel.com +5954 + Aaron Telecommunication Technology + Jiwon Lim + jwin&aaron.co.kr +5955 + ASCONA + Andreas Wolff + awolff&ascona.de +5956 + Travelport (formerly 'Worldspan') + Larry Hedrick + larry.hedrick&travelport.com +5957 + Essematica S.r.l. + Paulo Gallo + pgallo&essematica.it +5958 + KCell + John Idun + joidu&hotmail.com +5959 + Eastern Communications Co. Ltd. + Jun Shen + jshen&eastcom.com +5960 + CEFRIEL + Alberto Castelli + castelli&cefriel.it +5961 + Powec AS + Marius Tannum + mariust&powec.no +5962 + David Clunie + David Clunie + dclunie&dclunie.com +5963 + RadioScape Ltd. + Duncan Mackay + dmackay&radioscape.com +5964 + Fenestrae B.V. + Sandra Van Leeuwen + SandravL&Fenestrae.com +5965 + Cenosis + Patrick Tremblay + dgi&cenosis.com +5966 + Hummingbird Ltd. + Christopher Chin + christopher.chin&hummingbird.com +5967 + Jane Ellen Shatz, Ph. D. + Jane Ellen Shatz, Ph.D. + jshatz&CNMNetwork.com +5968 + VAMS + Adil Dzubur + dzubur&vams.com +5969 + Lightspeed Systems + Bradley White + NOC&lightspeedsystems.com +5970 + CSIRO + John Morrissey + John.Morrissey&its.csiro.au +5971 + LEAD Technologies, Inc. + Andreas Freeman + freeman&leadtools.com +5972 + AirNet Communications Corporation + Ronald P. Adkins + radkins&aircom.com +5973 + Rainbow Technologies, Inc. + Jay Cunningham + jcunningham&rainbow.com +5974 + Telecom Technologies, Inc. + Majdi Abuelbassal + majdi&ieee.org +5975 + Telchemy + Alan Clark + alan&telchemy.com +5976 + Red Hill Networks + David Terlinden + dterlinden&redhillnetworks.com +5977 + University of Texas at Dallas (UTD) + Sidney Antommarchi + lsid&hotmail.com +5978 + California State University Northridge + Pavel May + pavel.may&csun.edu +5979 + CSCare Inc. + Tomas Vocetka + tvocetka&cscare.com +5980 + CSSoftware Inc. + Tomas Vocetka + tvocetka&cssoftware.com +5981 + MTG AG + Guus Gerrits + GGerrits&mtg.de +5982 + Infor + Tom Buttliere + tom.buttliere&infor.com +5983 + Descartes Systems Group Inc. + Raimond Diederik + rdiederik&descartes.com +5984 + Cedere Corporation + Michael J. Banks + mbanks&cedere.com +5985 + BioCor, L.L.C. + Dennis Loreman + dennis_loreman&biocor.com +5986 + Vpacket Communications, Inc. + Kaushik Patel + kpatel&vpacket.com +5987 + Pacific Broadband Communications + James Yee + yee&pbc.com +5988 + esutel + Jose Luis Vidal + jvidal&inictel.gob.pe +5989 + Go.com + Stacey Rosenberry + stacey.rosenberry&corp.go.com +5990 + INFORMZASCHITA + Vladimir U. Gaikovich + hotline&infosec.ru +5991 + Ntown Communications, Inc. + Jeff Parker + jparker&ntown.com +5992 + ePower Solutions, Inc. + Michael Appelmans + mappelmans&epower-inc.com +5993 + Terabeam Networks + Doug Hill + doug.hill&terabeam.com +5994 + Arcom Control Systems + Arlen Nipper + anipper&arcomcontrols.com +5995 + Everypath Inc. + Rajiv Anand + ranand&everypath.com +5996 + Communication Weaver Co., Ltd. + John Lee + coweaver&unitel.co.kr +5997 + TeamTronic p.s.c.r.l. + Umberto Salsi + salsi&mailbox.dsnet.it +5998 + CITI-DIC IT CO., LTD + Eddie Wu + p328&hotmail.com +5999 + SCITEL Industrieberatungs-GmbH + Dr. Gerhart Hlawatsch + ghl&scitel.de +6000 + CES Computer Solutions Inc. + Peter Gamitsky + pete&ceshome.com +6001 + Memorial Sloan-Kettering Cancer Center + Angel R. Deras + derasa&mskcc.org +6002 + Impresse Corporation + Jerry Soung + jsoung&impresse.com +6003 + Jasmine Networks, Inc. + David Peters + dpeters&jasminenetworks.com +6004 + SYSMATE Co., Ltd. + Yoo Young Jae + zenith&sysmate.com +6005 + City of Sydney + Sam Au + sau&cityofsydney.nsw.gov.au +6006 + The Schleutker Sites + Douglas E. Schleutker + doug&schleutker.net +6007 + Redux Communications Ltd. + Yuval Ben-Haim + yuval&reduxcom.com +6008 + Lynxus, Inc. + Mark Tippetts + bishop&lynxus.net +6009 + gcs Global Communication & Services GmbH + Peter Maurutschek + pmm&gcs-salzburg.at +6010 + SERVICECO + Wolver Alemao + wolver&serviceco.co.in +6011 + Cinta Corporation + Nik Trimble + nik.trimble&cintacom.com +6012 + PiNGPoNG.CoM + Sean Crumpler + sean.crumpler&pingpong.com +6013 + Tieturi + Sakari Kouti + sakari.kouti&tieturi.fi +6014 + National Library of Medicine + Terry Luedtke + terry_luedtke&nlm.nih.gov +6015 + Chicago Board Options Exchange + Jeremy McMillan + mcmillan&cboe.com +6016 + Technauts + Prahlad Ranganathan + prahladr&technauts.com +6017 + G2X Software + Sergey Opanasets + sopanasets&g2x.com +6018 + Excel Medical Electronics Inc. + Richard Crane + Richard.crane&excel-medical.com +6019 + Simplified Telesys, Inc. + Kevin Grahm + kgraham&simpletel.com +6020 + Lutris Technologies + Scott Pirie at Lutris Technologies (USA) or Guy Smith at Plugged In Software (AUSTRALIA) + guy&pisoftware.com +6021 + Arcadian Wireless + Mr. Cheam Tat Inn, President + cheamti&arcadianwireless.com +6022 + DFKI GmbH + Markus Bolz + isg-sb&dfki.de +6023 + Steele Raymond Solicitors + Cliff Forrest + mail&steeleraymond.co.uk +6024 + Scannex Electronics Ltd. + Ray Golding + rgolding&scannex.co.uk +6025 + Advanced Network Solutions S.p.A. + Marco Lozza + marco.lozza&ans.it +6026 + U.S. Army STRICOM + Russell Milliner + webmaster&stricom.army.mil +6027 + Force10 Networks, Inc. + Raju Shah + raju&force10networks.com +6028 + ODSI Coalition + K. Arvind + arvind&tenornetworks.com +6029 + Exactis.com + Lonnie Maynard + lmaynard&exactis.com +6030 + Fluke Electronics + Charles Klement + charles.klement&FLUKE.COM +6031 + Computer Problem Solving + Richard H. Gumpertz + IANA&rhg.ComputerProblemSolving.com +6032 + Stainless Steel Networks + Tim Kennedy + iana&timkennedy.net +6033 + e-talk Corporation + Steve Graff + sgraff&e-talkcorp.com +6034 + BroadJump + Vinod Nair + vnair&broadjump.com +6035 + Sensis Corporation + Paul Blust + Paul.Blust&sensis.com +6036 + Bose Corporation + Gregory Paris + gregory_paris&bose.com +6037 + Edge Networks Corp. + Gord Scarth + gscarth&edgenetworkscorp.com +6038 + Netwhistle.com + Martin Hobson + martin&netwhistle.com +6039 + Resscom Computers and Communications + Mohamad Muhsin Mahmud + resscom&tm.net.my +6040 + University of Kalmar + Pekka Rossi + pekka.rossi&hik.se +6041 + Kokua Communications + Noah Breslow + nbreslow&kokuacom.com +6042 + Southeastern Aluminum Products, Inc. + Suzanne M. Reott + seap&southeasternaluminum.com +6043 + AstroTerra Corporation + Alex Shek + shek&astroterra.com +6044 + Poyntz, Inc. + Phil Dodderidge + pdodde&poyntz.com +6045 + Synaptique Information & Technologie + Anthony Converse + aconverse&synaptique.com +6046 + Near2 Communications, Inc. + Richard Liming + rpl&near2.com +6047 + Florida Teaching Profession - NEA + Patrick Salmon + psalmon&ftp.nea.org +6048 + Exyst + Darryl Rubarth + darryl&exyst.com +6049 + Hitachi Information Systems, Ltd. + Wada Yoshiya + y-wada&hitachijoho.com +6050 + GAMATRONIC ELECTRONIC INDUSTRIES LTD + Asher Avissar + aavissar&gamatronic.co.il +6051 + Ameritrade + Rodger Devillier + rdevillier&hotmail.com +6052 + Novasonics + Gerard J. Cerchio + gjpc&in-cube.com +6053 + Eredyne Corporation + Alexander Zakharov + alexzakharov&hotmail.com +6054 + Access360 + Tony Gullotta + tgullotta&access360.com +6055 + Lees Communication + Harold Warden + harwar&ameritech.net +6056 + Rensselaer Polytechnic Institute + Mike Douglass + douglm&rpi.edu +6057 + AppNet + Andy White + andrew.white&appnet.com +6058 + Havas Interactive + Kody Dickerson + kody.dickerson&sierra.com +6059 + AdNovum Informatik AG + Bruno Kaiser + bruno.kaiser&adnovum.com +6060 + ISP Systems Pty. Ltd. + Jeremy Wright + jeremy&ispsystems.com.au +6061 + Netuitive, Inc. + JF Huard + jfhuard&netuitive.com +6062 + TEKELEC TEMEX + Jean-Pierre Boudot + jean-pierre.boudot&temex.fr +6063 + Axent Technologies, Inc.(Utah) + Brett Cutler + bcutler&axent.com +6064 + JK microsystems + Kelly Hall + khall&jkmicro.com +6065 + ZUniversity.com + Lenny Shuhala + lshuhala&zuniversity.com +6066 + Occam Networks, Inc. + Elie Azar + elie&occamnetworks.com +6067 + Adapcom, Inc. + Albert Ko + ako&atmxdsl.com +6068 + kc. marching + e-beug + jeni&202.183.228.236 +6069 + BlocWare, Inc. + Frederick Hunter + fredh&blocware.com +6070 + AVIV INFOCOM CO., + Ltd. + kdh93002&avivinfo.com +6071 + SIGMA Informatique + Chateau Laurent + lchateau&sigma.fr +6072 + Plan Software GmbH + Clemens Huwig + admin&plansoftware.com +6073 + Business Global Systems + Radovan Semancik + semancik&bgs.sk +6074 + Cenosis, Inc. + Patrick Tremblay + ptr&cenosis.com +6075 + Siemens AG + Bruno Krauss + bruno.krauss&erl9.siemens.de +6076 + TrelliSoft, Inc. + Ed McCrickard + mccrickard&trellisoft.com +6077 + Call-Net Technology Services Inc. + Elvis Lee + elee&sprint-canada.com +6078 + CyberIntelligent Technologies + Tsani Jones + tsani.jones&onebox.com +6079 + appoconnect.com + Jamie Vachon + jvachon&sprintmail.com +6080 + BridgeWave Communications + Ed Richardson + edr&bridgewave.com +6081 + Nexsi + Taqi Hasan + taqi.hasan&nexsi.com +6082 + AReS + Danilo Formentini + ares&tread.it +6083 + AATR + Olivier Castellane + aatr&wanadoo.fr +6084 + PRISMedia Networks, Inc. + Matthew Orzen + matt&prismedia.com +6085 + Hypergene AB + Peter Eriksson + kpe&hypergene.com +6086 + New York Life Insurance Co. + Phedre Francois + Phedre_Francois&NewYorkLife.com +6087 + Mentat Inc. + Bryan Blackman + bryan&mentat.co +6088 + opNIX, Inc. + Nick Estes + nick&opnix.com +6089 + LastMile + Eric Hoedt + ehoedt&lastmile.de +6090 + DespatchBox + Barney Flint + bflint&despatchbox.com +6091 + epki + Olivier Schyns + info&e-pki.net +6092 + Agillion Corporation + Khalil Javid + kjavid&agillion.com +6093 + TESSAG AG + Andreas F. Klenz + Andreas.Klenz&tessag.com +6094 + Wapcom + Zeev Greenblatt + zeev&wapcominc.com +6095 + KHALED FAHMI + Khaled Fahmi + KHALED&FAHMI.COM +6096 + Netsecure Software + Raphael Robin + rrn&netsecuresoftware.com +6097 + State of Michigan + Beth Jurkovic + jurkovicb&state.mi.us +6098 + AvantGo + Stevan Arychuk + stevan&avantgo.com +6099 + INTELLIDEN, Incorporated + Ken Rider + Ken.Rider&intelliden.com +6100 + Covalent Technologies, Inc. + James Harter + jharter&covalent.net +6101 + Trend Micro Inc. + Oliver Guanwen Wang + guanwen&trendmicro.com +6102 + Integrated Research Ltd. + Heather Gray + Heather.Gray&ir.com +6103 + Al Rasheed + Sadi Arabi + rshd2000&hotmail.com +6104 + Cosmobridge Co., Ltd. + Rhee, Jong-ho + likeu&cosmobridge.com +6105 + Center Vlade za Informatiko + Ales Dobnikar + ales.dobnikar&gov.si +6106 + Maharaja Infosys Limited + Nalin Shah + nalin&Hotmail.com +6107 + IDN Telecom, Inc. + Lily Kuo + lily&idntelecom.com +6108 + Insurance Auto Auctions + Tim Hunnewell + thunnewell&iaai.com +6109 + ADIC + Rajesh Chawla + Rajesh.Chawla&adic.com +6110 + Atrica + Ronen Ofek + Ronen_Ofek&Atrica.com +6111 + Worldsport Networks Ltd. + Adam Nealis + anealis&worldsport.com +6112 + XINETRON CO., LTD + C.H. Chen + chchen&xinetron.com.tw +6113 + Datek Telecom S.A. + Leonard Lichi + llichi&datek.ro +6114 + ProSyst Software AG + Ivan Georgiev Datshev + i_dachev&prosyst.bg +6115 + Open Interactive Limited + Peter Harwood + peter.harwood&open-talk.co.uk +6116 + Medtegrity, Inc. + Russ Weiser, Principal Scientist + russel.weiser&digsigtrust.com +6117 + Fiber Network Engineering + Richard Fellows + rfellows&pacbell.net +6118 + Quintiles + Walter Turyn + walt.turyn&quintiles.com +6119 + IPWorks, Inc. + Bernard Volz + volz&ipworks.com +6120 + NXTV + Patrick G. Heffernan + pheffernan&nxtvinc.com +6121 + Excite@Home E-Business Services + Jon Rusho + jonr&excitehome.net +6122 + Mover S.p.A. + Antonello Giannangeli + antonellogiannangeli&libero.it +6123 + National Institute of Telecommunications + Dominik Loniewski + dominik&itl.waw.pl +6124 + Sane Solutions, LLC + Frank Faubert + frank&sane.com +6125 + ACERFI-MICRONICSFrancois Regis K. + BESSALA ATEBA + acerfi&iccnet.cm +6126 + epicRealm + Dennis McGuire + dmcguire&epicrealm.com +6127 + LapLink.com + Sean Mathias + seanm&laplink.com +6128 + Wireless Planet + Mr. Chris Hill + chris.hill&ieee.org +6129 + Bromax Communication Inc., + Jane Chang + janech&kingmax.com.tw +6130 + CVI + Ales Dobnikar + ales.dobnikar&gov.si +6131 + Starmedia Mobile + Fadi Kalach + fadi.kalach&starmedia.net +6132 + LVL7 Systems, Inc. + Steve Ledford + sledford&lvl7.com +6133 + Dead Fish Technologies + Benjamin Reed + ranger&befunk.com +6134 + Trioniq + Yan Levesque & Daniel Bindley + trioniq&trioniq.com +6135 + Chubu Telecommunications Co., Inc. + Norihisa Ichihashi + info&dc.ctc.ad.jp +6136 + idealhost.com + Kevin Rinehart + 1&idealhost.com +6137 + University of Canterbury + Brendon Wyber + brendon.wyber&canterbury.ac.nz +6138 + Trasys + Frederic Poncin + frederic.poncin&trasys.be +6139 + Healthcare Specialists, Inc. + Patrick Horine + hsi&hsinc.com +6140 + Enikia Incorporated + Vladimir Strupinsky + vstrupinsky&enikia.com +6141 + World Wide Packets + JJ DeBarros + jj.debarros&worldwidepackets.com +6142 + TF1 + Patrick Mendes + pmendes&tf1.fr +6143 + Eventlogic + Chris Buben + cbuben&eventlogic.com +6144 + SUNTECH Sp. zo.o + Slawomir Marciniak + slawek&suntech.com.pl +6145 + Cendio Systems AB + Jan Smith + smith&cendio.se +6146 + Whirlpool Corporation + Douglas Wegscheid + Douglas_E_Wegscheid&email.whirlpool.com +6147 + Alliegiance Telecom + John M. Johnson III + john.johnson&algx.com +6148 + Cidera, Inc. + Scott Lipcon + slipcon&cidera.com +6149 + London School of Economics + Jeremy Skelton + J.Skelton&lse.ac.uk +6150 + ONE Investment Group Limited + Peter J. Bone + pete&one.co.uk +6151 + Copyprint S.L. + Gustavo + grevelles&steinweb.net +6152 + Sendmail, Inc. + Randall S. Winchester + rsw&sendmail.com +6153 + JOLT Ltd. + Alex Berchoer + alex_b&jolt.co.il +6154 + Soneris Engineering + Bernhard Keller + bek&soneris.ch +6155 + FIREBIT Ltd. + Adi Aviad + adi&firebit.co.il +6156 + TTC TESLA TELEKOMUNIKACE, Ltd. + Premysl Klima + klima&ttc.cz +6157 + ICCRI BANCA FEDERALE EUROPEA SPA + Angelo Mantineo + Angelo.Mantineo&ICCRI.IT +6158 + NAGUS + Brian Lewis + brlewis&novell.com +6159 + University of Iowa + Chris Pruess + chris-pruess&uiowa.edu +6160 + Affinity Technology Group + Kenneth Dawkins + kenneth_dawkins&affi.net +6161 + OnFiber Communications, Inc. + Billy Davis + bdavis&onfiber.com +6162 + Solid Data Systems + Jeff Brown + jbrown&soliddata.com +6163 + Inara Networks, Inc. + Akbal Karlcut + akarlcut&inaranetworks.com +6164 + Kinva Network System Ltd. + Li Jia + lijia&kinva.com +6165 + Portwell Inc. + Kin Tse Hong + kthong&mail.portwell.com.tw +6166 + Interactive Enterprise + Peter Martin + pmartin&iel.ie +6167 + SPHINX + Hans W. Fell + fell&bsi.de +6168 + Banco do Brasil S/A + Alessandra Fruet + techs&bancobrasil.com.br +6169 + Serendipity Simplex + Robert Stone + iana&serendipity.cx +6170 + PartMiner, Inc. + Jerry S.Wen + jwen&partminer.com +6171 + Kidata AG + Marc Mielke + mm&kidata.de +6172 + Worldwide Entrepreneuric Enterprises, Inc. + Mrs. Ruth A. D'Agostino + weecompany&aol.com +6173 + Fisher & Paykel Industries Limited + Karl Howard + HowardK&fphcare.fp.co.nz +6174 + Fusionx + Dongmyoung Lee + itomay&fusionx.co.kr +6175 + Pines of Woodedge + Karen M. Wingate + &kwingate&synchronet.com +6176 + VIPCom + Wolfgang Socher + Wolfgang.Socher&vipcomag.de +6177 + Blitz Information Technologies Berhad + Zen Woo + zenw&blitz-com.net +6178 + NTT Advanced Technology Corporation + Tadahisa HASHIDO + jimukyoku.bka&ml.ntt-at.co.jp +6179 + R.M. EDIZIONI SRL + Claudio Torbinio + webmaster&netbrokers.it +6180 + CanDo.com + Rich Guyon + rguyon&CanDo.com +6181 + Creative Logic Corporation + Wendell Thompson + wendell&hiwaay.net +6182 + Encore Electronics Inc. + Jenny Chang or Kevin Huang + kevin&netronixinc.com +6183 + Sharemedia + Davis Mcpherson + davism&sharemedia.com +6184 + Mitsui & Co., Ltd. + Naofumi Tamura + n.tamura&tks.xm.mitsui.co.jp +6185 + Ghent University + Geert De Soete + Geert.DeSoete&rug.ac.be +6186 + Space CyberLink Inc. + Eunhong Park + ehpark&sdpia.com +6187 + Dartmouth-Hitchcock Medical Center + Stephen Cochran + stephen.a.cochran&hitchcock.org +6188 + University of Massachusetts Lowell + Teng-Chiang Tsao + ttsao&cs.uml.edu +6189 + Eviden Germany GmbH - Trustcenter + Thomas Beckmann + thomas.beckmann&eviden.com +6190 + Network365 Ltd + Denis Hennessy + dhennessy&network365.com +6191 + Plasmon, Inc. + Chris Lehn + clehn&plasmon.com +6192 + Environmental Monitoring Solutions + Richard Corn + rac&racc.com +6193 + CPL Systems Ltd. + Richard Corn + rac&racc.com +6194 + NetCalibrate Inc. + Leon Leong + lyleong&netcalibrate.com +6195 + Advanced Communication Research + Elric Osmont + e.osmont&acresearch.com +6196 + Atlantec Enterprise Solutions GmbH + Thomas Koch + Thomas.Koch&atlantec-es.com +6197 + Arcordia/JP Morgan Chase + Andy Doddington + andrew.doddington&jpmorgan.com +6198 + Eastern Nazarene College + Charles Owens + its-iana&enc.edu +6199 + Nyherji hf + Throstur Sigurjonsson + throstur.sigurjonsson&nyherji.is +6200 + DNPG + Dave Lyons + dlyons&dnpg.com +6201 + Asenti, Inc. + Dipak Ghosal + ghosal&cs.ucdavis.edu +6202 + Nimble, Inc. + Scott Taylor + staylor&emailknowledge.com +6203 + Topspin Networks + Edwin Tsang + edwin&topspinnet.com +6204 + SIBS - Sociedade Interbancaria de Servicos, S.A. + Jose Eduardo Pina Miranda + pm&sibs.pt +6205 + LKC-Technology + Luis Cantu + ln_cantu&hotmail.com +6206 + eCritical Inc. + Mark Whitehouse + markwhitehouse&home.com +6207 + The Polished Group SA + Waclaw Sierek + waclaw.sierek&tpg.pl +6208 + Enterprise Internet Services Limited + Steve Brewin + sbrewin&enterpriseis.co.uk +6209 + The Open University + Jamie Slee + j.slee&open.ac.uk +6210 + Mayah Communications GmbH + Jorg Rimkus + jr&mayah.com +6211 + Murex + Elias Coutya + elias.coutya&murex.fr +6212 + XM Satellite Radio, Inc. + Royce Kincaid + royce.kincaid&xmradio.com +6213 + Redline Networks, Inc. + Michel Hoche-Mong + hoche&redlinenetworks.com +6214 + SureFire Commerce Inc. + Michel Hétu + hetum&sfcommerce.com +6215 + Streaming21, Inc. + Horng-Juing Lee + hjlee&streaming21.com +6216 + Ci Technologies + Greg Roberts + GregRoberts&cit.com.au +6217 + ngkindsoftltd + ngkmurthy + leelakrishna&onebox.com +6218 + SCAN COIN + Anders Gustafsson + ag&scancoin.se +6219 + Iskra SYSEN d.d. + Davor Munda + davor.munda&sysen.si +6220 + Cyber Internet Services + Amjad H. Lone + ahlone&cyber.net.pk +6221 + BAE Systems, CNIR + Michael Perrotta + michael.perrotta&baesystems.com +6222 + STSN, Inc. + Randy Tate + rtate&stsn.com +6223 + CDS Technologies + Mark Komus + mark&wilinx.com +6224 + Roxy.com + Lester Herrington + lester.herrington&roxy.com +6225 + Broadcast Towers + Scott Stubbs + scott&concealed.net +6226 + BARIELD'S INTERNET PROVIDER SERVICE + Tyrone Barfield + BARFIELD&COAM.NET +6227 + Resolve Engineering Pty Ltd + David Gregson + david_gregson&resolve.com.au +6228 + Engineering Center for Information Security,Chinese Acadamy of Sciences + Xuzhen + xuzhen&ercist.iscas.ac.cn +6229 + Tango/04 Computing Group + Jordi Lisbona + jlisbona&tango04.net +6230 + SITE Skunkworks + Martin Hallerdal + marty&site.se +6231 + Fiduciary Automation + W. E. Perry + wperry&fiduciary.com +6232 + Corinex Global Corp. + Ing.Martin Paska + martin.paska&corinex.com +6233 + UUNET EMEA + Mark Visser + mark.visser&nl.verizon.com +6234 + Paul Abrahams, Sc.D. + Paul Abrahams + abrahams&acm.org +6235 + nawilson.com + Neil A. Wilson + wilson.neil&usa.net +6236 + Crayfish Co.,Ltd. + Osamu Igasaki + osamu_iwasawa&Crayfish.CO.JP +6237 + NetAge Solutions GmbH Netzwerklösungen + Bernd Raschke + bernd.raschke&netage.de +6238 + Queen Mary, University of London + Martin Evans + dirmaster&qmul.ac.uk +6239 + TechnoTrend AG + Michael Six + michael.six&technotrend.de +6240 + Bryttan, Ltd. + J R Bryttan + net-admin&bryttan.com +6241 + Calvin College + Brian J. Baas + bbaas&ieee.org +6242 + Possm Technologies + Bruce Gillespie + bruceg&nwd.com +6243 + Weber Enterprise + Markus Weber + mw9141&flash.net +6244 + PDR/SAGN Publishing + David Pannkuk + dcpann&fuse.net +6245 + CiRCLEX Limited + Koji Kurosawa + k-kurosawa&circle.co.jp +6246 + Mythological Solutions + Philip Kizer + pck-iana&nostrum.com +6247 + Comtech EFData Corporation + Norm Konya + nkonya&comtechefdata.com +6248 + Atecs Dematic + Andreas Engel + andreas.engel&dematic.de +6249 + HCL Technologies Ltd + S.V.Rajasekar + vesubram&cisco.com +6250 + PepsiCo Business Solutions Group (PBSG) + Chris Castrianni + chris.l.castrianni&fritolay.com +6251 + Atoga Systems, Inc. + Debasish Biswas + debasish&atoga.com +6252 + SrvLoc.Org + James Kempf + james.kempf&sun.com +6253 + ipSEAL, Inc + Bobby Mohanty + mohanty&ipseal.com +6254 + TimeVision Inc. + Eric Purcell + EricP&timevision.com +6255 + Commerce Onc, Inc. + Bill Tang + btang&commerceone.com +6256 + Southwest Communications Institute + Qing Yu + qingyu163&163.net +6257 + Disappearing, Inc. + Michael Sierchio + michael&disappearing.com +6258 + Iwatsu Electric Co., Ltd. + Daichi Yagi + Yagi&iwatsu.co.jp +6259 + Sohonet Limited + Jon Ferguy + jon.ferguy&sohonet.co.uk +6260 + Reserved + RFC-pti-pen-registration-10 + ---none--- +6261 + MAKU Informationstechnik GmbH + Marco Rohleder + mrohleder&maku.de +6262 + IMC Communications + Nick Hadjiangeli + nhadjian&imc.net.au +6263 + Avstar Systems, LLC + Kevin Burton + Kevin.Burton&avstarnews.com +6264 + OpenNetwork Technologies + Steve Anderson + sanderson&opennetwork.com +6265 + gocek.com + Gary Gocek + gary&gocek.com +6266 + Everest eCommerce Inc. + Sunil Mishra + smishra&everest.com +6267 + MUSHTAQ SONS + Ijaz Ahmad + msons&brain.net.pk +6268 + Evistel + Vincent GRENET + vincent.grenet&free.fr +6269 + DiGiMATiON + David McKellar + djm&digimation.ca +6270 + iAsiaWorks + Damien O'Rourke + damien&iasiaworks.com.au +6271 + N.S.C. + Miss M. Aharoni + maayan&nsc.co.il +6272 + KPNQwest Finland + Kaj J. Niemi + kajtzu&kpnqwest.fi +6273 + Weir Consulting Ltd + Paul Gray + paulg&weir-consulting.co.uk +6274 + Drael Computer Corporation + Dwight Leard + DwightL&drael.com +6275 + HOB GmbH & Co. KG - HOB Germany + Richard Wunderlich + richard.wunderlich&hob.de +6276 + Mahindra British Telecom Limited + Ranvir Jatana + ranvir&mahindrabt.com +6277 + Federación de Organizaciones Privadas de Desarrollo - FOPRIDEH + Paco Alcaide Canata + fedopds&sdnhon.org.hn +6278 + Invision.com, Inc + Matt Martini + matt&invision.net +6279 + Visible Markets, Inc. + Phil Gabardi + frg&visiblemarkets.com +6280 + Coreon, Inc. + Ryan Moats + rmoats&coreon.net +6281 + Jingle Cats Music + Mike Spalla + jimpowers&deliveryman.com +6282 + SDC Communications Ltd + John Leitch + john.leitch&sdccom.co.uk +6283 + City-Net Technology Inc. + Liwei Chen + liweic&aol.com +6284 + Wireless Networks, Inc. + Cedric Berger + cberger&wireless-networks.com +6285 + Crossport Systems + Nikos Mouat + nikm&crossport.com +6286 + One Tech Telecom Co.,Ltd. + Cho, SeYil + csyil&ottelecom.com +6287 + Nextenso + Olivier Epaud + olivier.epaud&nextenso.com +6288 + Euskaltel S.A. + Alexander Rivas Bilbao + arivas&euskaltel.es +6289 + Adapcom Inc. + Dawkoon Lee + dawklee&usa.net +6290 + LightSand Communications, Inc. + Ravi Natarajan + ravin&lightsand.com +6291 + Bates College + Karen McArthur + kmcarthu&bates.edu +6292 + Leadership Technologies, Inc. + Scott Winder + scott.winder&usa.net +6293 + Kasenna, Inc. + Bob Bateman + bateman&kasenna.com +6294 + Active Telco + Azeem Butt + azeem&activetelco.com +6295 + Wilhoyt Industries + Michael Wilhoyt + wilhoyt&wilhoyt.com +6296 + DASAN Co.,LTD. + Wonhee Lee + whlee&da-san.com +6297 + Enel.it + Antonello Laise + laise.antonello&enel.it +6298 + MP3.com + Scott Kelley + scottk&mp3.com +6299 + SUNY Cortland + Scott Thomas + scottT&em.cortland.edu +6300 + Karl Miller + Karl Miller + marvin1969&aol.com +6301 + Packet Dynamics Ltd + Hugh Evans + hugh.evans&packetdynamics.com +6302 + Vertiv (formerly 'Emerson Energy Systems') + David Wilczewski + david.wilczewski&vertivco.com +6303 + Cityspace Ltd + Philip Lishman + phil.lishman&cityspace.com +6304 + fogcity + MinSuk Kim + K9235&CHOLLIAN.NET +6305 + Flextronics International + Tommie Persson + oid&flextronics.com +6306 + Dartware, LLC + William Fisher + william.w.fisher&dartware.com +6307 + Extreme Networks (formerly 'Ipanema Technologies') + Cristian Mircea + cmircea&extremenetworks.com +6308 + cholul + Will Ballantyne + will&cholul.com +6309 + Dan Nielsen Consulting + Dan Nielsen + dan&dan-nielsen.dk +6310 + KaanBaan Technologies + Anthony Graffeo + agraffeo&kaanbaan.com +6311 + Gomez Advisors, Inc. + Brian Thomas + noc&gomez.com +6312 + Pelican Security, Inc. + Ayelet Steinitz + ayelet&PelicanSecurity.com +6313 + InnovaCom, Inc. + Ken Regnier + kenr&transpeg.com +6314 + VillaMontage Systems + Balaram Donthi + bdonthi&villamontage.com +6315 + Comissão para Coordenação do Projeto SIVAM + Luiz Anésio de Miranda + anesio&cc.sivam.gov.br +6316 + Bill Fenner + Bill Fenner + bfenner&acm.org +6317 + Netfox + Lam Kam Chor + geofox&softhome.net +6318 + lesswire AG + Matthias Mahlig + mahlig&lesswire.de +6319 + Caldera Systems, Inc. + Randy Cook + randy&calderasystems.com +6320 + simonstl.com + Simon St.Laurent + simonstl&simonstl.com +6321 + Calix Networks + Abel Tong + abel.tong&calix-networks.com +6322 + Entone Technologies Ltd. + Mike Tang + mike.tang&entone.com +6323 + NewCross Technologies, Inc + Thompson Wong + twong&newxt.com +6324 + CoolAlerts, Inc. + Kirk Noda + kirk&coolalerts.com +6325 + Harmonix Corporation + Shey Hakusui + shey&hxi.com +6326 + LongBoard, Inc. + Weidung Chuang + wchuang&lboard.com +6327 + Drews TC + Heinz Drews + heinz&drews.tc +6328 + WizWise Technology + E.C. vande Stadt + office&wizwise.nl +6329 + Internet Business Factory + Girish Bajaj + bajajg&usa.net +6330 + Microdot Computing Services + Andrew Smith + Andy-Smithµdotcomputingservices.co.uk +6331 + Optimation New Zealand Limited + Seth Yates + seth.yates&optimation.co.nz +6332 + Four Seasons Produce, Inc. + Bennett Samowich + brs&fsproduce.com +6333 + Accord Networks Ltd. + Roni Even + roni_e&accord.co.il +6334 + beTRUSTed + Jay Robertson + support&betrusted.com +6335 + Adroit Controls + J.S. Nisheeth + adroit&icenet.net +6336 + Digital Fountain + David M. Kushi + kushi&digitalfountain.com +6337 + Crytaliz, Inc. + Chris Stacy + cstacy&crystaliz.com +6338 + Sportshuddle.Com + Chris Nugent + chris.nugent&sportshuddle.com +6339 + Digital China (Shanghai) Networks Ltd. + Emily Jin + jinnan&digitalchina.com +6340 + Otto Kind + Stefan Kaesler + s.kaesler&kind-ag.de +6341 + Harvard University + Marilyn M. Shesko + marilyn_shesko&harvard.edu +6342 + Hyperchip, Inc. + Preet Mohan S Ahluwalia + pahluwalia&hyperchip.com +6343 + YMCA England + Stephen Buckley + intranet&ymca.org.uk +6344 + Hunter Technology Inc. + Hyunajae Yim + jayi&htt.co.kr +6345 + Compuprint S.P.A + Paolo Tagliavini + Paolo.Tagliavini&compuprint.it +6346 + DIMAT, S.A. + Josep Salat + jsv&dimat.es +6347 + Sierra Monitor Corporation + Adam OwYoung + AOwYoung&sierramonitor.com +6348 + Liquid Digital Information Systems + Jonathan Donald + jonathan&ldis.com +6349 + Treeblossom Technologies + Jonathan Donald + jeddak&bellatlantic.net +6350 + Blue Cross Blue Shield of Michigan + Pamela Hensley + phensley&bcbsm.com +6351 + Shawn Systems, Inc. + Arthur Harris + harris&shawn.com +6352 + Ciena Corporation (formerly 'Catena Networks') + Daniel Rivaud + drivaud&ciena.com +6353 + Avisto SA + Scott Willy + scott.willy&avisto.com +6354 + Telways Communication Co.,Ltd. + David Chang + davidhsu&telways.com +6355 + Sipher Internet Tachnology + Girish Chopra + girish&sipher.co.uk +6356 + Innovative Software AG + Andreas Herbst + snmp&isg.de +6357 + B2 Systems, Inc. + David Schwartz + david_schwartz&b2systems.com +6358 + Unimark, Inc. + Steven Blitstein + ssb&unimark.com +6359 + Cyveillance Inc. + David Heath + dheath&cyveillance.com +6360 + LiveVault Corporation + Ted Hess + thess&livevault.com +6361 + Siemens Building Technologies Ltd + Steve Reid + steve.reid&gb.sibt.com +6362 + Ritter Technologie + Ralf Taegener + ralf.taegener&rittec.de +6363 + CyberSolutions GmbH + Martin Wille + snmp&cys.de +6364 + Andréa Informatique + Juan Camacho + andrea&andrea.fr +6365 + Novamedia Sistemas S.L. + Sergio Muñoz García + smg&novamedia.es +6366 + KOREA LINK + Byoung-Hyo Cheon + ddorong&korealink.ne.kr +6367 + Linkabit - A Titan Systems Company + Mark A. Cusano + mcusano&titan.com +6368 + Schmid Telecom AG + Gian Lucas Poltera + poltera&schmid-telecom.ch +6369 + Lexi-Comp, Inc. + Jason McCormick + jasonm&lexi.com +6370 + First USA Bank + Jon Austin + JonAustin&FirstUSA.com +6371 + Minerva Networks Inc + Jackie Pang + jpang&minervanetworks.com +6372 + Kresa Ranch + George L. Kresa + gkresa&yolo.net +6373 + 4DL Inc. + Youngmin Jeon + touch&4dl.com +6374 + CECI + chia-fan chung + CCF&a169.iam.ntu.edu.tw +6375 + CHINA ENGINEERING CONSULTANTS,INC. + STEVEN LIN + steven&ceci.org.tw +6376 + PDVSA + Lisbeth Schloeter + Lisbeth.Schloeter&intesa.com +6377 + OpenPages, Inc. + Daniel French + Daniel_French&openpages.com +6378 + Lanit + Sergey Labutin + lsm&lanit.ru +6379 + Servgate Technologies Canada Inc. + Jing Jiang + jjiang&servgatecanada.com +6380 + Isdnet + David Ponzone + dponzone&isdnet.net +6381 + burst.com + Arthur Allen + arthur.allen&burst.com +6382 + SAGA Software, Incorporated + Jonathan Arnold + Jonathan.Arnold&sagasoftware.com +6383 + OC + Dieter Kirchner + dk&oclink.de +6384 + V&S Multimedia S.A. + Ismael Mendez + ismael&vsm.es +6385 + Pfizer + Jon B Collins + jon_b_collins&groton.pfizer.com +6386 + Saturn Technologies + Bill Woloch + bwoloch&saturntech.com +6387 + Big Band Networks, Ltd + Hezi Oved + hezio&bigbandnet.com +6388 + TeleRelay + Stephan Biesbroeck + stephan.biesbroeck&telerelay.com +6389 + COM DEV International + Brett Moser + brett.moser&comdev.cc +6390 + DBCORP Information Systems Inc. + Bruce McCartney + bmccartn&dbcorp.com +6391 + Xtremesoft Inc. + Ian G. Melven + imelven&xtremesoft.com +6392 + Chiaro Networks Ltd. + Jennifer Zhao + jzhao&chiaro.com +6393 + Haliplex Pty Ltd + Anthony Merry + support&haliplex.com.au +6394 + Skelton Consulting GmbH + Charles Skelton + c.skelton&skelton.de +6395 + ProSaldo Software B.V. + J. Rump + jim&prosaldo.nl +6396 + Absolute Website Solutions + Gloria Terrill + gloriasdei&juno.com +6397 + Nayna Networks, Inc. + Carmody Quinn + carmody&nayna.com +6398 + Lindner & Pelc Consult GmbH + Olaf Lindner + lindner&lpconsult.de +6399 + Intelligent Maintenance Systems Ltd (formerly 'Edrich Integration') + Paul Edrich + paul.edrich&ims-evolve.com +6400 + Millenux GmbH + Thomas Uhl + thomas.uhl&millenux.com +6401 + JTI OF GEORGIA + CHRISTOPHER G. JOHNSON + CJOHNSON&JTECHNOLOGYOFGA.COM +6402 + OPTEL Ges. für Lasertechnik und Elektronik mbH + Ralf Magiera + Ralf.Magiera&optel.de +6403 + Universidad Blas Pascal + Alfredo Edye + aedye&ubp.edu.ar +6404 + ECRESO + Eric PERE + it&worldcastsystems.com +6405 + Activ Australia + Jonathan Downes + jonno&activ.net.au +6406 + Neomar + Patrick Hundal + hundal&neomar.com +6407 + bit-Services GmbH + Christian Voss + cvoss&dimetis.de +6408 + Mikom GmbH + Hans-Peter Kabout + kai.kabout&mikom.com +6409 + Mondial Assistance Limited + Geoff English + geoff_english&mondial-assistance.co.uk +6410 + Zoneworx, Inc. + Carl Marcinik + clmarcinik&zoneworx.com +6411 + Overland Storage, Inc. (formerly 'Quantum Corp - Snap Division') + Dan Burrows + dburrows&overlandstorage.com +6412 + HiddenMind Technology + David Zimmerman + dzimm&hiddenmind.com +6413 + I.S. Associates, Inc. + John Marshall + jpm&isassoc.com +6414 + Embry-Riddle University + Phil D'Amore + damorep&db.erau.edu +6415 + Caspian Networks, Inc. + Brian Hassink + bhassink&caspiannetworks.com +6416 + SolutionZ Pty Ltd (Australia) + Ed Tarento + etarento&solutionz.com.au +6417 + Victorian Automobile Chamber of Commerce + David Nolan + d.nolan&vacc.asn.au +6418 + The Templar Corporation + R.W. Shore + rws&templarcorp.com +6419 + Pro-bel Ltd + Malcolm Butler + malcolm.butler&pro-bel.co.uk +6420 + IN-MX + Oliver Marugg + o.marugg&in-mx.ch +6421 + H M Customs & Excise + Marian Kay + Marian.kay&hmce.gsi.gov.uk +6422 + BT + Jasper Lanek + jasper.lanek&bt.com +6423 + ObjectSpace, Inc. + Jim Canter + jcanter&objectspace.com +6424 + Viawest Internet Services + Bruce Kiefer + bkiefer&viawest.net +6425 + Branch Systems + Michael A. Branch + unix_wizard&yahoo.com +6426 + e-Appliance Corporation + Robert Edwards + redwards&eappliancecorp.com +6427 + Locus Dialogue + Raymond Menard + rmenard&locusdialogue.com +6428 + T-Online International AG + H. Enders + h.enders&t-online.net +6429 + ALTO + Frank Joly + franck.joly&alto.org +6430 + Ericsson Consulting GmbH + Juergen Petrias + juergen.petrias&ericsson.com +6431 + BroadSoft, Inc. + Ken McInerney + ken&broadsoft.com +6432 + Orblynx, Inc. + Jaime Vargas + jvargas&orblynx.com +6433 + Excel.Net, Inc. + Larry A. Weidig + lweidig&excel.net +6434 + WebMD + Fiaz Hossain + fiaz&webmd.net +6435 + Holistix, Inc. + Ken Dornback + ken.dornback&holistix.net +6436 + Wapme Systems AG + Stipe Tolj + tolj&wapme-systems.de +6437 + Kathrein Werke K.G. + Peter Mack + mack&kathrein.de +6438 + MKNET ADVIES + Marcel Kuiper + marcel&mknet.nl +6439 + NexTReT s.l. + Domènec Sos i Vallès + dsv&nextret.net +6440 + TradeCast, Ltd. + Manmeet Singh, CTO + msingh&tcast.com +6441 + i2 Technologies, Inc. + Padma Reddy + Padma_Reddy&i2.com +6442 + TWCT dba Daughters and Dad + Timothy W. Tucker + TWCT&aol.com +6443 + Out of our minds. + Timothy W. Tucker + TWCT&aol.com +6444 + Gearhead Group + Donis Marshall + donis&gearheadpress.com +6445 + Sportsplanet2000 + Andrew Sawtelle + iajones110&aol.com +6446 + Gaton Technologies LTD + Keren Kroglen + kerenk&post.tau.ac.il +6447 + Synaxia Networks + Jeff King + jeff.king&synaxia.com +6448 + DigiVision Tech. + Jianxin Geng + frank_gjx&shdv.com +6449 + Sectigo Limited + Rob Stradling + rob§igo.com +6450 + Idsec Limited + Stephen Bishop + sbishop&idsec.co.uk +6451 + Mystical Creations + Scott Beveridge + sjb&mysticalcreations.com +6452 + Advanced Network Solutions, Inc + Tom Strack + TStrack&Advnw.com +6453 + CORE COMMUNICATIONS CO., Ltd. + Gun Uk Nam + gunam&corecom.co.kr +6454 + Postel Services + Soobok lee + lsb&postel.co.kr +6455 + Technology Thesaurus Corp. + Kevin Huang + kevin&ttc-group.com.tw +6456 + NETdefence Co. Limited + Matthew Tam + mtam&netdefence.com +6457 + Eureka Soft + Moussa Chahine + moussa.chahine&eurekasoft.fr +6458 + GPr Sistemas Ltda + Rodrigo Dias Arruda Senra + gpr&gpr.com.br +6459 + Linfield College + Rob Tanner + rtanner&linfield.edu +6460 + uclick + Scott Kahler + scottk&uclick.com +6461 + Center for Internet Research + Prashant Agrawal + prashant&comp.nus.edu.sg +6462 + freiheit.com + Joerg Kirchhof + joerg&freiheit.com +6463 + VocalData, Inc + Steve Bakke + sbakke&vocaldata.com +6464 + Stevens Institute of Technology OIT + Joe Formoso + jformoso&stevens-tech.edu +6465 + Sparkworx (formerly 'Cacheon, Inc.') + Ian W. Kuvik + sparkworx&gmail.com +6466 + The Devil You Say + Timothy W. Tucker + TWCT&aol.com +6467 + Land Of Linking + Timothy W. Tucker + TWCT&aol.com +6468 + Patent Off + Timothy W. Tucker + TWCT&aol.com +6469 + Karma Kahn + Timothy W. Tucker + TWCT&aol.com +6470 + We're no dummies + Timothy W. Tucker + TWCT&aol.com +6471 + Con Temporaries + Timothy W. Tucker + TWCT&aol.com +6472 + Antiballistic + Timothy W. Tucker + TWCT&aol.com +6473 + Halls Of Ivy + Timothy W. Tucker + TWCT&aol.com +6474 + Wiffenpoof + Timothy W. Tucker + TWCT&aol.com +6475 + Home Is The Hunter + Timothy W. Tucker + TWCT&aol.com +6476 + The Craft of Elves + Timothy W. Tucker + TWCT&aol.com +6477 + I Was Misinformed + Timothy W. Tucker + TWCT&aol.com +6478 + Shot To Shingles + Timothy W. Tucker + TWCT&aol.com +6479 + Right-hand Twist + Timothy W. Tucker + TWCT&aol.com +6480 + Solomon Tech. Corp. + Peter Yen + jyan&solomon.com.tw +6481 + MAGISTRAL, Ltd. + Gennady S. Ratin + idcmag59&cityline.ru +6482 + ICSynergy + Martin Gee + martin.gee&icsynergy.com +6483 + Captus Networks + Mark Ontiveros + mark&captusnetworks.com +6484 + Campus Crusade for Christ, Inc. + Karl Kranich + kkranich&ccci.org +6485 + Spatial Technologies + Douglas Sellers + doug.sellers&cmgisolutions.com +6486 + Alcatel-Lucent Enterprise (previous was 'Alcatel') + Ravi Govil + ravi.govil&al-enterprise.com +6487 + YouSync + Albert Allen + aallen&yousync.com +6488 + TalentStorm Enterprises + Christopher Buzzetta + chris&talentstorm.com +6489 + Siemens Cerberus + Armin Born + armin.born&cerberus.ch +6490 + DeTeMobil Deutsche Telekom MobilNet GmbH + Jörg Rass + joerg.rass&t-mobil.de +6491 + Cytek Media Systems, Inc. + Tom Dolsky + tomtek&cytekmedia.com +6492 + Phyve + Colin Frahm + colin.frahm&phyve.com +6493 + COMTRUST + Tayseer A. Hasan + tayseer&emirates.net.ae +6494 + Kraft und Partner GmbH + Ingo Fischer + fischer&kraftup.com +6495 + SeeDragon Software + Laura Jauregui + mljm&seedragon.com +6496 + Cottons Car Superstore + Janice Cotton + jan.com&hotmail.com +6497 + NetEye Solutions + Alex Kogan + kogan.alex&telrad.co.il +6498 + Maintenance One + Frank Jones + frank.jones&m-1.co.uk +6499 + ekit.com Pty Ltd + Michael Bailey + mbailey&ekit-inc.com +6500 + VA Linux Systems + San Mehat + smehat&valinux.com +6501 + Macro 4 + John Clarkson + john.clarkson¯o4.com +6502 + refat + Refat + refatcom&usa.com +6503 + NETGEM + MARC BURY + marc.bury&netgem.com +6504 + Navini Networks + Dexter Lu + dexter&navini.com +6505 + TARGUSinfo + Jay Steiger + jaysteiger&targusinfo.com +6506 + LiveWire Communications, Inc. + Josh Fryman + fryman&lw.net +6507 + OptiMight Communications Inc. + Harry Wang + hwang1&optimight.com +6508 + Shanghai DigiVision Technology Co.,Ltd + Bruce Zhou + cable_modem&shdv.com +6509 + Antrim Studios and Software Cellar + Jimmy Russell + jprussell&earthling.net +6510 + Pioneer Corporation + Akio Fukushima + akio&orl.pioneer.co.jp +6511 + TelCom Technology + Jae Woon, Lee + telcomt&chollian.net +6512 + FamTek Professional Services, Inc. + Michael Hankinson + mike&famtek.com +6513 + Whale Communications + Shlomi Afia + shlomi&whale-com.com +6514 + Innovative Systems, L.L.C. + Shane Warren + shanew&innovsys.com +6515 + Zvolve Systems, Inc. + Tsani Jones + tjones&zvolve.com +6516 + Comprehensive Test Ban Treaty Organization + John Coyne + John.Coyne&ctbto.org +6517 + FlowServer + David Gonzalez + se04532&salleURL.edu +6518 + Systems Solutions of the Southern Tier, Inc. + Peter Kinev + pkinev&bga.com +6519 + Sabre Inc./ Sabre BTS + Casey Fahey + casey.fahey&sabre.com +6520 + Dot Hill Systems + Gary Dunlap + gdunlap&dothill.com +6521 + ChainLink Technologies + Rajat Verma + rverma&clti.com +6522 + cv cryptovision GmbH + Markus Tesche + markus.tesche&cryptovision.com +6523 + Bilboko Trading + Lewis Heath + lewisheath&netscape.net +6524 + Greenbytes GmbH + Julian F. Reschke + fax&greenbytes.de +6525 + Jasmine Networks, Inc. + George Lin + glin&jasminenetworks.com +6526 + Windward Consulting Group + Jim Kelly + jkelly&windwardcg.com +6527 + Nokia (formerly 'Alcatel-Lucent') + Jason Sterne + Jason.Sterne&nokia.com +6528 + Source One + David Steinbrecher + Steino1128&aol.com +6529 + Bundesamt für Sicherheit in der Informationstechnik + Michael Thiel + michael.thiel&bsi.bund.de +6530 + 2N spol. s r.o. + Tomas Trefny + trefny&tel-2n.cz +6531 + TeamWork Consulting + Torbjørn Mastad + torbjmas&online.no +6532 + Niobrara R&D Corp. + Scott Henson + shenson&niobrara.com +6533 + FaceTime Communications + Yuri Khodosh + ykhodosh&facetime.com +6534 + hp + Jung Kyun Kim + church99&truth.or.kr +6535 + TECORE, Inc. + Tara Pittman + pittman&tecore.com +6536 + NetForensics.com + Kevin Hanrahan + kevin&netforensics.com +6537 + Brown University + Steven Carmody + Steven_Carmody&brown.edu +6538 + Winford Engineering + Benjamin Bright + bbright&winfordeng.com +6539 + EdgeOn Systems, Inc. + Jaspal Singh + JSingh&EdgeOn.com +6540 + Alamo Personal Computers, Inc. + David Lozano + david&alamopc.net +6541 + Transindigo, Inc + John Field + john.field&transindigo.com +6542 + Slamdunk Networks + Ram Chinta + rchinta&slamdunknetworks.com +6543 + AmBell + Carlos Rosado + carlos&ambell.com +6544 + Fivesight Technologies + Justin Guinney + jguinney&fivesight.com +6545 + THINK iT + Stephan Schoenberger + ssc&thinkit.ch +6546 + ITOCHU Techno-Solutions Corporation + Tomokazu Ono + tomokazu.ono&ctc-g.co.jp +6547 + Network Management Solutions, Inc. + Dave LaDue + mktg&netmg.com +6548 + Wavelength Digital Limited + Dr D K Anderson + danderson&wavelength-digital.com +6549 + Axxcelera Broadband Wireless + Jamal Hamdani + info&moseleysb.com +6550 + weblicon technologies AG + Angelika Drees + angelika.drees&weblicon.net +6551 + Hotline Communications Ltd. + Stephen Klump + oidhound&bigredh.com +6552 + Virbus AG + Andreas Piesk + apiesk&virbus.de +6553 + GHAKO FISHERIES COMPANY LIMITED + MR.YUN BAEK, PARK + ybpark&ghana.com +6554 + Optima Technologies, Inc. + Eric Eason + eseason&optima-tech.com +6555 + DataFoundry.net, Inc. + Michael Douglass + mikedoug&datafoundry.net +6556 + Sigma Networks + Steve Feldman + sfeldman&sigmanetworks.com +6557 + dotRocket, Inc + James Tosh + tosh&dotrocket.com +6558 + Security Technologies Inc. + Davey Park + dhpark&stitec.com +6559 + Usha Communications Technology + Koushik Chatterjee + koushik.chatterjee&ushacomm.co.in +6560 + TelEnergy H. GmbH + Dr. Dilmagani + telenergy&t-online.de +6561 + P.S.K. AG + Bernhard Voit + bernhard.voit&psk.at +6562 + Nexland Inc. + Daniel Sultan + dsultan&nexland.com +6563 + TimeBridge Technologies + David C. Smith + dcsmith&timebridge.com +6564 + Gradient Solutions + Paschal Nee + Paschal.Nee&gradient.ie +6565 + Johns Hopkins Institutions + Etan Weintraub + eweintra&jhmi.edu +6566 + Metropolitan Life Insurance + Stefan Kelber + Directory_Services&metlife.com +6567 + Klimke Industry Consulting + Horst Dieter Klimke + klimke&addcom.de +6568 + Promptu Systems Corporation (formerly 'AgileTV') + Blair Fidler + bfidler&promptu.com +6569 + INVENTEC CORPORATION + JH CHYAN + chyan.jh&inventec.com +6570 + ACOMZ NETWORKS Corp. + Younkwan, Lee + younkwan&acomz.com +6571 + Fusion In Tech co.,Ltd. + Yong Son + yongson&fusionintech.com +6572 + NETLINKS CO.,LTD. + Jin-seon Hwang + jinseon&netlinks.co.kr +6573 + PerVigil, Inc. + John M. Johnson III + support&pervigil.com +6574 + Synology Inc. + Justin Deng + justind&synology.com +6575 + Marriott International, Inc. + Kathy Memenza + Kathy.Memenza&marriott.com +6576 + Privada, Inc. + Josh Lubliner + josh&privada.net +6577 + Indura Corporation + Bruce Kosbab + bruce.kosbab&indurasoft.com +6578 + RedWave Networks + Rishi Grover + rgrover&redwavenet.com +6579 + Maru Networks, Inc. + Eugene Oh + eugeneoh&sdpia.com +6580 + OSS Systems + Shraddha + ossi&vsnl.com +6581 + Exército Brasileiro + Cristiano da CUNHA Duarte + cunha&citex.eb.mil.br +6582 + Sigma Services Sénégal + Gade Ndiaye + alysarr&hotmail.com +6583 + Barone, Budge and Dominick + Walluce Pinkham + Walluce&jhb.bbd.co.za +6584 + Bundesrechenzentrum GmbH + Andreas Windisch + andreas.windisch&brz.gv.at +6585 + W-Phone Inc. + Jack Song + jsong&w-phone.com +6586 + GeoworksAdam + de Boor + adam&geoworks.com +6587 + Airvana Inc. + Jecko Li + jecko.li&airvananet.com +6588 + tietoenator + apa + ajith.poovathkunnel&tietoenator.com +6589 + IFA Meß-, Regel- und Elektrotechnik GmbH & Co. + Roland Leister + roland.leister&ifa-mre.de +6590 + Keipher Solutions + Keith Rowland + ker&keipher.com +6591 + Haansoft Inc. + Jerry Chung + reverse&haansoft.com +6592 + Darin Vision Co + Sergei Shinkevich + shink&darim.com +6593 + neoConsulting, LLC + Aliester Campbell + aliester&neoconsulting.com +6594 + Shijin & Co + shiji + shijin_s&yahoo.com +6595 + SA Mart.com + Conrad Classen + cclassen&sa-mart.com +6596 + Julie Julie + Julie Liaw + liawjulie&yahoo.com +6597 + MessageHero, Inc. + Michael A. Ganton + mganton&messagehero.com +6598 + Cybertron Network Solutions + Steve Kowalik + stevenk&hasnolife.com +6599 + Silver Bow Co.Ltd + Mr.Mingzhe Zhang + mingzhe&web.de +6600 + International Charities Dep. of Taihe Travel Agency,China + Fu Ying + qifei9999&yahoo.com.cn +6601 + IP Dynamics Inc. + Raymond Lee + raymondl&ipdynamics.com +6602 + Sunrise Telecom Inc. + John Sha + john_sha&sunrisetelecom.com +6603 + Vitesse Semiconductors + Ravi Gidvani + ravi&vitesse.com +6604 + Tosky Management Consulting Co.ltd + Mr.zhenyu Tang + tangxin&zb-public.sd.cninfo.net +6605 + Euroclear Operations Centre + Ignace Hemeryck + ignace.hemeryck&euroclear.com +6606 + Platypus Technology + Adrian Sheedy + adrians&platypus.com +6607 + Radiant Networks Plc + Sean Lynch + sean.lynch&radiantnetworks.co.uk +6608 + circuLAB + Henk Jonas + Henk.Jonas&circulab.com +6609 + Western New England University + Peter Szaban + pszaban&wne.edu +6610 + Aditya Systems + Shaikh R Hoque + rajibul&hotmail.com +6611 + SIGMA SERVICES SENEGAL + Gade Ndiaye + alysarr&hotmail.com +6612 + Juniper Financial Corp. + Mark Rouse + mrouse&juniper.com +6613 + Millimetrix Broadband NetworksLtd. + Yehuda Kasirer + yehudak&millimetrix.net +6614 + Telecom Technologies Inc. + Eugene Williams + eugene.williams&ttimail.com +6615 + Clairvoyant Software + Lee Atchison + lee&clairvoyant.com +6616 + Lexica, LLC + Jason Schuchert + jschuchert&lexica.net +6617 + MicroStrategy + Franck Telman + ftelman&strategy.com +6618 + Sonus Networks, Inc. + Information Security + infosec&sonusnet.com +6619 + Reserved + RFC-pti-pen-registration-10 + ---none--- +6620 + Global Telemann System Inc. + David Kim + cckskim&telemann.com +6621 + Department of Foreign Affairs and Trade (Australia) + Ravi Vasudevan + ravi.vasudevan&dfat.gov.au +6622 + secondomillennio + sauro + sauro&secondomillennio +6623 + servizi2000 + sauro + saurogolf&hotmail.com +6624 + Sauro Prosperi + sauro + saurogolf&hotmail.com +6625 + PVT, a.s. + Martin Szotkowski + oper&ica.cz +6626 + Tekotelecom S.p.A. + Stefano Parenti + info&tekotelecom.it +6627 + Telematica International B.V. + Lucas Oorschot + L.Oorschot&connect-it.ti.nl +6628 + Juaben + Kwaku Okyere + okyere&england.com +6629 + ulticom + Valerie Gastaud + valerie.gastaud&ulticom.com +6630 + Accuris Ltd + John O'Donnell + jodonnel&accuris.ie +6631 + Bowling Green State University + A.J. Fuller + fuller&bgnet.bgsu.edu +6632 + DST Controls + Jonathan Cutting + jcutting&dstcontrols.com +6633 + Personal Genie, Inc. + Nazario Irizarry, Jr. + nirizarry&personalgenie.com +6634 + UUNET + Jim Potter + jim.potter&mci.com +6635 + Cenus Technologies, Inc. + Brad Smith + brad&eng.cenus.com +6636 + Indiana District - LCMS + Greg Zelt + gzelt&in.lcms.org +6637 + Rainbow Computer Warehouse + Wilfred Lau + wlau&isasolution.com +6638 + 3Ware Inc + Medi Montaseri + medi.montaseri&3ware.com +6639 + hhl + Hanhe + hhl888wf&china.com.cn +6640 + GMV Network, LLC + Bob Alvarez + bob&gmvnetwork.com +6641 + Netco Systems Inc. + Jaebong Lee + jblee&netcos.co.kr +6642 + scommtech + Tae seop An + greatman99&freechal.com +6643 + IPR Systems Pty Ltd + Renato Iannella + renato&iprsystems.com +6644 + OzAuthors Pty Ltd + Renato Iannella + renato&iprsystems.com +6645 + gForce Systems + R M Jacobs + rjacobs&gforce.com +6646 + Chevron + R. H. (Bob) Beach + rhbea&chevron.com +6647 + Sevio Srl + Daniele Orlandi + daniele&orlandi.com +6648 + Intramon Ltd + Chris Hansen + chansen&intramon.com +6649 + NewGrid Technology Inc. + Jongsun An + jongsun&newgrid.co.kr +6650 + The Distribution Systems Research Institute + HISAO FURUKAWA + furukawa&obn.dsri-dcc.or.jp +6651 + FIREBONE + hostmaster&alto.com + ---none--- +6652 + ID-Karta + Jan Korycan + jan.korycan&id-karta.cz +6653 + Tyan Computer Corporation + Kenus Lee + KenusL&tyan.com +6654 + Chic Technology Corp. + Oscar Yen + Osacr_Yen&chic.com.tw +6655 + Agenetics Inc + Matthew Gressek + webmaster&agenetics.com +6656 + Cauris + Harald Puhl + oid&cauris.de +6657 + Courion Corporation + Kent Welch + kwelch&courion.com +6658 + Koshy's Computer Academy, Edathua + Thomas Koshy + koshycom&vsnl.com +6659 + Alisa + Lleshi + &alisa_style&hotmail.com +6660 + keraladotcom + tomichen + adhitchy&md5.vsnl.net.in +6661 + Spike Broadband Systems, Inc. + Dan Feldman + dan.feldman&spikebroadband.net +6662 + S.E.S.A. AG + Anke Koglin + ako&koeln.sesa.de +6663 + Janeva Interactive + Bert Blevins + info&janeva.com +6664 + Precidia Technologies + Elmer Horvath + elmer&precidia.com +6665 + Atreus Systems Corp. + Chris Wilson + cwilson&atreuscorp.com +6666 + innovaphone GmbH + Martin Streller + mstreller&innovaphone.com +6667 + mediumrareJulian + Priest + julian&mediumrare.net +6668 + To Be Or Not + Timothy Tucker + TWCT&aol.com +6669 + Videotron ltee + Pierre Roy + pierre_roy&videotron.com +6670 + Intelligent Instrumentation + Paul Tobey + ptobey&instrument.com +6671 + Versonix + Igor Vilenski + igor&vcsonnet.com +6672 + Proscend Communications Inc., + Swell Chen + swell&proscend.com +6673 + Align Systems Pty Ltd + Paul Shirren + webmaster&align.com.au +6674 + Mercator + K U Varghese + kuvarghese&emirates.com +6675 + News Interactive + Doug Scoular + scoulard&news.com.au +6676 + Science Dynamics Corporation + Siamak Behta + sbehta&scidyn.com +6677 + ONELINE AG + Mario Wolframm + wolframm&oneline.de +6678 + Hyperknowledge (Europe) + Simon Stirley + s.stirley&hyperknowledge.com +6679 + Surgient Networks, Inc. + Kody Mason + Kody.Mason&Surgient.com +6680 + SOUTHPAW + VINCENT D'AGUANNO + FATBSTRD&BELLATLANTIC.NET +6681 + Altea Communications Inc. + Nicolas Souty + n_souty&altea.qc.ca +6682 + CommsHouse Ltd + Andy Trayler + andy.trayler&celticon.com +6683 + AlphaTrust Corporation + Bill Brice + bill.brice&alphatrust.com +6684 + eOriginal Inc. + Ellis Peterson + ekpeterson&eoriginal.com +6685 + INOVA Diagnostics, Inc. + Chuk Gawlik + info&inovadx.com +6686 + VeCommerce (NZ) Ltd. + Tony Quinn + Tony_Quinn&vecommerce.co.nz +6687 + pAsia Inc. + David Lin + DavidLin&pAsia.com +6688 + National Center for Software Technology,Ministry of InformationTechnology,Government of India + R.Balaji + netops&sangam.ncst.ernet.in +6689 + Zolera Systems, Inc + Rich Salz + rsalz&zolera.com +6690 + OmniCluster Technologies, Inc. + Ron Valli + ron.valli&omnicluster.com +6691 + UTEL + Jean-François C. MORFIN + jefsey&wanadoo.fr +6692 + Telenisus Corporation + Weston Nicolls + wnicolls&telenisus.com +6693 + NetCertainty + Ryan W. Kasten + rwkasten&mindspring.com +6694 + P.G.P. s.a. + Stephan Leclercq + sleclercq&pgp.be +6695 + SAMS, Inc. + Chris Horrocks + chorr87752&aol.com +6696 + Niagara Mohawk Power Corporation + Andrew Gould + goulda&nimo.com +6697 + DOTCOM + Mr. Arnaud KOPP + arnaud.KOPP&dotcom.fr +6698 + Micronet srl + Claudio Ciccia + micronetnqnµnetnqn.com +6699 + Vulcan Networks + Sidney Antommarchi + lsid&hotmail.com +6700 + FinGO + Sébastien Rodriguez + rodriguez&fingo.com +6701 + Creativ Network Technologies + Anne Vaillancourt + anne&creativnt.com +6702 + Entrust CygnaCom + Matt Cooper + mcooper&sisko.cygnacom.com +6703 + Cognitronics Corporation + Andrew Consiglio + aconsiglio&cognitronics.com +6704 + Vancouver Language Institute Ltd. + David C. Jackson + dcj&schools.bc.ca +6705 + Zygox, Inc. + Vasu Murthy + vasu&zygox.com +6706 + NOAA/PMEL/EPIC + Donald Denbo + dwd&pmel.noaa.gov +6707 + Unassigned + Returned 2020-02-24 + ---none--- +6708 + Handuru Telecom + Kim sang min + ksm&handuru.com +6709 + Scarlet Telecom BV + L. de Rooy + ldr&globalxs.nl +6710 + South Birmingham College + Phil Watts + philw&sbirmc.ac.uk +6711 + Connect One + Jakob Apelblat + jakoba&connectone.com +6712 + VOXTRONIC Technology + Ing. Roman Roiss + rr&voxtronic.com +6713 + EIZO + Philipp Liebmann + philipp.liebmann&eizo.com +6714 + Banco Santander Central Hispano + Raul Lopez Marin + rlm2&bancosantander.es +6715 + HTT Consulting + Robert G. Moskowitz + rgm&htt-consult.com +6716 + AMS Systems + Mark Gillies + mg&amslux.lu +6717 + GiantLoop Network, Inc. + Andrew King + andrew.king&giantloop.com +6718 + Exfo Fiber Optic Test Equipment + Jocelyn Cimon + jocelyn.cimon&exfo.com +6719 + TransComm Technology System, Inc + Jihua Cheng + jcheng&transcomm.com +6720 + PT. Hariff DTE + Alwi Hasan + hariff&rad.net.id +6721 + Lodbroker Pty Ltd + Nils Oman + nils.oman&lodbroker.com +6722 + comMATCH Ltd. + Dan Davidson + dan.davidson&commatch.com +6723 + Internet Names UK + Mr. Suissa + zvi&law.com +6724 + Cap Gemini Telecom France IPNS + Vincent Thinselin + vthinsel&capgemini.fr +6725 + Data Interchange Plc + Andrew Filby + andrew.filby&dip.co.uk +6726 + CellPoint Systems + Magnus Ericsson + me&cellpoint.com +6727 + WINIT s.a. + Carlos Crosetti + ccrosetti&winit.com.ar +6728 + Interlink Networks, Inc. + Wei Wang + weiwang&interlinknetworks.com +6729 + Dolby Laboratories, Inc. + Steve Oddo + sjo&dolby.com +6730 + Nextel Partners Inc. + John Goodman + john.goodman&nextelpartners.com +6731 + WIT Soundview + Sanjay Das + sdas&witsoundview.com +6732 + Intercim Inc. + Dan Edgar + edgardm&intercim.com +6733 + CMD Technology + Mark O'Dell + odell&cmd.com +6734 + WEGO, Inc + Brian J. Coan + brian&wego.com +6735 + Tenebrous + Ronan Flood + rpf&acm.org +6736 + Jinny Software Ltd + George Yazbek + george.yazbek&jinny.ie +6737 + New Era of Networks, Inc + Mike Bachand + mbachand&neonsoft.com +6738 + Riversman + Simon E. La Rosa + iananumbers&riversman.com +6739 + Spring Yellow + 91474794437 + &springyellow&operamail.com +6740 + Cal Poly State University + Dan Malone + mwadmin&calpoly.edu +6741 + Nulli Secundus Inc + David Bennett + dave&nulli.com +6742 + priceline.com Incorporated + Jonathan Taylor + jonathan.taylor&priceline.com +6743 + Sophicomm + Pierre Nadeau + pierre.nadeau&sophicomm.com +6744 + Dantron + Daniel Thomas + Fallenone&Msn.com +6745 + LASAT Networks A/S + Jacob Frost + jfr&lasat.com +6746 + Aftek Infosys Ltd. + Mahesh Vaidya + maheshv&aftek.com +6747 + TeraBurst Networks + Moshe Rozenblit + mrozenblit&teraburst.com +6748 + Up4grab. Inc + Thomas Chapman + cooling&up4grab.com +6749 + Bosung Hi-Net Co., Ltd + Seok-Hwan Kim + president&hi-net.co.kr +6750 + Gascom + Vladimir Dubinkin + dubinkin&gascom.ru +6751 + Dolphin Interconnect Solutions AS + Hugo Kohmann + hugo&dolphinics.no +6752 + Axtronics Inc. + Pedro Ting + pedro&iavista.com +6753 + SysControl AG + Simon Poole + simon.poole&syscontrol.ch +6754 + EzGov + Chris Wilkenloh + cwilkenloh&ezgov.com +6755 + Rush Presbyterian St. Lukes Medical Center + Douglas C. Wojcik + dwojcik&rush.edu +6756 + Unixguy.org + Douglas C. Wojcik + dwojcik&rush.edu +6757 + Grid + Carl Kesselman + carl&isi.edu +6758 + SimplySay Inc. + Lee Walker + lwalker&simplysay.com +6759 + Allcharge + Boris Temkin + borist&allcharge.com +6760 + Virginia Tech + Ronald Jarrell + jarrell&vt.edu +6761 + Axonn Wireless Solutions + Nathan Teske + NathanT&Axonn.com +6762 + Cambridge Positioning Systems Limit + Martyn Lancaster + martyn.lancaster&cursor-system.com +6763 + Metakernel Network Laboratories + Alexandre Dulaunoy + adulau&metakernel.com +6764 + Imagine Solutions + Tom Guyton, Systems Engineer + tom_guyton&imaginesolutions.com +6765 + ML Hightower, Inc. + J.J. Hageman + jhageman&hightower.com +6766 + CPNI + Danny Chouinard + hostmaster&cpnicore.com +6767 + RLX Technologies, Inc. + David Blankenship + david.blankenship&rlx.com +6768 + Pixelmetrix Corporation Pte + Martin Janzen + janzen&pixelmetrix.com +6769 + Dynamic Imaging Corporation + Gabriel Duque + gduque&dimaging.com +6770 + OpenCon Communication Systems + Kamal Motwani + registration&opencon.com +6771 + CyberPath Inc. + Sean Blinn + registration&cyberpathinc.com +6772 + eBSure Inc. + Motty Alon + motty.alon&ebsure.com +6773 + Engineering International Corporation + Joel E. Steiger + jsteiger&eic-us.com +6774 + University of Virginia + James Jokl + jaj&virginia.edu +6775 + Cablevision + John Pomeroy + john&cv.net +6776 + bmypc + Keshet Ohad-Ziv + ohadziv&bmypc.com +6777 + TriGem InfoComm + Ho Sung, Ko + hsko&tgicc.com +6778 + AboCom Systems, Inc. + SHIH, CHIH-I + trees&abocom.com.tw +6779 + CHL NETHERLANDS B.V. + Mr R. Lens + marketing&chl.nl +6780 + Linux Communications AS + Ørnulf Nielsen + on&lincom.no +6781 + Vast + Glady + gladwin_r&hotmail.com +6782 + Ciena (formerly 'Akara Inc.') + Randy Jones + rjones&ciena.com +6783 + Tidepoint + Ed DeBus + ed.debus&tidepoint.net +6784 + JP Systems + Scott Lopez + scott.lopez&jpsystems.com +6785 + Delta Electronics Inc. + Chris Tsai + chris.tsai&delta.com.tw +6786 + Dreamworks SKG + Kevin Gray + kgray&dreamworks.com +6787 + NetConnect + John Horton + john.horton&netconnect.co.uk +6788 + Whitebird + Rolf Welde Skeie + rws&thewhitebird.com +6789 + OCTOGON Software Development GmbH + Martin Helmling + martin.helmling&octo-soft.de +6790 + Finjan Software + Yigal Edery + yigal&finjan.com +6791 + EXE Technologies, Inc. + Mike Bradshaw + mike_bradshaw&exe.com +6792 + Active+ Software + Daniel Doguet + danield&activeplus.com +6793 + TUIASI (previously 'UTIasi') + Gheorghiță BUTNARU + hostmaster&tuiasi.ro +6794 + Adhesive Software Incorporated + John Kellar + john&dhcp-169.adhesive.com +6795 + Pocono Pet Sitters + MaryElizabeth O'Hara + zach&pnpa.net +6796 + TekVision Communications + Keith Kim + keith.kim&tekvision.net +6797 + ConfigureSoft, Inc. + David J. Easter + David.Easter&configuresoft.com +6798 + MaxLinear Hispania S.L.U. + Mayte Bacete + mbacete&maxlinear.com +6799 + GlueCode, Inc. + Tracy M. Adewunmi + tadewunmi&gluecode.com +6800 + Express Rail Link sdn Bhd + Kenny Hor + macrohard1&hotmail.com +6801 + Gamma Technologies + Bratkov Yuriy + yuriy&gamma.kz +6802 + M Design Communications + Shigekazu Kitagawa + shige&m-design.com +6803 + BCI (Basic Commerce and Industries, Inc.) + Phillip Rando + PRando&bcisse.com +6804 + Weinschel Corp + Jerry Messina + jmessina&weinschel.com +6805 + PSA WORLD NET Inc. + PS.ANDERSEN + SERAF&FORUM.DK +6806 + The Pennsylvania State University + Academic Services and Emerging Technologies + root&aset.psu.edu +6807 + Eotvos Lorand University + Vonderviszt Lajos + vondervi&ludens.elte.hu +6808 + Buyers Laboratory Inc. + Len Wolak + lwolak&bellatlantic.net +6809 + Ontika Computersystems + Kai Bankett + kai.bankett&ontika.net +6810 + MUXCOM Inc. + Cheong Lee + cheong&muxcom.com +6811 + Nameflow + Darryl C Price + darryl&nameflow.com +6812 + Yoda Communications, Inc. + Raymond Tsai + raymond&yoda.com.tw +6813 + Olympia College + Muazzem + muazzem1&yahoo.com +6814 + ItchyFeet Networks + Andrew Agnew + andy&itchyfeet.net +6815 + CyberSpacia Pty Ltd + Andrew Agnew + andy&itchyfeet.net +6816 + b4 + luiggi + lolo&iponet.es +6817 + Metanate Ltd + David Lamkin + drl&shelford.org +6818 + Tripwire, Inc. + Fenil Manani + vendoralerts&tripwire.com +6819 + Musicfans, Inc. + Chris Maresca + ckm&musicfans.com +6820 + Menta Software + Yaniv Azriel + yaniv&mentasoftware.com +6821 + IMT Nord Europe + Jacques Landru + jacques.landru&imt-nord-europe.fr +6822 + University Information Services, University of Cambridge + Hostmaster + hostmaster&cam.ac.uk +6823 + Distributed Object Technologies, Inc. + Jeffrey C. Rice + jr&dotech.com +6824 + List & Niemann + Klaus Niemann + klaus.niemann&listniemann.de +6825 + iCue Systems, Inc. + Larry Perng + larryp&icuesys.com +6826 + Dark Matter Network Technologies, Inc. + Scott Apgar + sapgar&darkmatternet.com +6827 + Evertz Microsystems Ltd + Geoffrey Tam + geoff&evertz.com +6828 + Caviness Software + Pete Caviness + pete&caviness.com +6829 + RAMiX Inc. + Mark Bronson + mark&ramix.com +6830 + onehottmin Inc. + Chester Potter + onehottmin&home.com +6831 + DSS Digital Security Service GmbH + Sven-Holger Wabnitz + wabnitz&digital-security.com +6832 + VCHEQ.com Pte Ltd + Technology Team + netadmin&vcheq.com +6833 + Scarptor Network ABThomas Berghemmer or Martin Moreythomas.berghemmer&scarptor.se + or + martin.morey&scarptor.se +6834 + sbuck networks + Stephen Buck + services&sbuck.net +6835 + chello broadband austria + David Angerer + dangerer&chello.at +6836 + Felleskjøpet Agri BA + Gunnar Andresen + gunnar.andresen&felleskjopet.no +6837 + Solcom + Andrew J Baxman + andyb&solcom.com +6838 + i.g.c. + Martin Jean + mjean&clic.net +6839 + Accelight Networks + Sebastien Bellefeuille + sebas&accelight.com +6840 + Deutsches Elektronen-Synchrotron + Kars Ohrenberg + Kars.Ohrenberg&desy.de +6841 + VYX + Filip Zawadiak + philz&vyx.net +6842 + Telect, Inc. + Daniel S. Freepons + danf&telect.com +6843 + Global Solution + Takeshi Yasuhara + yasuhara&g-solution.com +6844 + SAT Consulting ApS + Bjarne Maschoreck + bjma&sat-automation.com +6845 + Micro Energetics Corp. + David Hicks + dhicks&nightware.com +6846 + Paulosoft + Pawel Kasprowski + pk&paulosoft.com.pl +6847 + Teleias, Inc + Rayan Zachariassen + Rayan&Teleias.com +6848 + Crossbeam Systems, Inc. + Stephen Rhen + srhen&crossbeamsys.com +6849 + Reddo Networks AB + Johan Svedberg + johan.svedberg&reddonetworks.se +6850 + MCCT Inc. + Elwin Macomber + elwin&mcct.com +6851 + XOR Inc. + Joshua Prismon + josh&xor.com +6852 + University of Colorado at Boulder + Richard A. Jones + jones&colorado.edu +6853 + Television Systems Ltd + Steve Rolls + stever&televisionsystems.ltd.uk +6854 + Régie Des Transports De Marseille + R.SOLA M.GUASCO + m.guasco&rtm.fr +6855 + WNF Consulting, Inc. + Andrew Woodward + awoodwar&wnf.com +6856 + LiquidLight + Jeff W. Li + jli&liquidlightinc.com +6857 + DigitalConvergence + Doug Davis, President Technology Group + ddavis&digitalconvergence.com +6858 + Fantasma Networks + Paul R. Boehm + pboehm&fantasma.net +6859 + Actzero Inc + Cayce Ullman + cayce&actzero.com +6860 + ÔøÐÛÎÄ + ÔøÐÛÎÄ + hunterzxw&netease.com +6861 + Druma Inc + Bapi Ahmad + bm&compuserve.com +6862 + Wapinspain.com + Ismael Mendez + ismael&wapinspain.com +6863 + ZyTrax Communications, Inc. + Ron Aitchison + raitchison&zytrax.com +6864 + Harmania Multimedia Inc + Gilles Lacombe + info&harmania.com +6865 + Elron Telesoft + Shlomo Tzidkani + shlomot&elrontelesoft.com +6866 + Lyonnaise Communication + Dominique RITOURET + dominique.ritouret&noos.com +6867 + IPOptical, Inc. + Andy Zhao + andy.zhao&ipoptical.com +6868 + Georgetown University + Charles Leonhardt + lenhardt&georgetown.edu +6869 + Steve Burton + Steve Burton + steve&sliderule.demon.co.uk +6870 + Core Software Technology + Robert A. Zawarski + zawarski&coresw.com +6871 + CERT Coordination Center + Bill Fithen + wlf&cert.org +6872 + Help/Systems, Inc. + Tom Huntington + tom.huntington&helpsystems.com +6873 + bholdcompany + Bert Verdonk + b.verdonk&bholdcompany.com +6874 + Woodwind Communications Systems, Inc + David Wheeler + dwheeler&woodwindcom.com +6875 + NewEarth Swedenborgian BBS + Michael V. David + michael&newearth.org +6876 + VMware Inc. + Michael MacFaden + mrm&vmware.com +6877 + Evergreen Internet + Jeff Huff + jhuff&evergreen.com +6878 + Black Box Corp. + David Sefzik + David.Sefzik&Blackbox.com +6879 + digiMine.com + Jay Sauls + jays&digimine.com +6880 + ---none--- + ---none--- + ---none--- +6881 + Kal + Aravinda Korala + aravinda&kal.com +6882 + Micro Electronics Inc + Anthony Dean + adeanµelectronics.com +6883 + Linux NetworX, Inc. + Clark Roundy + croundy&linuxnetworx.com +6884 + boco + jiaoli + jiao_li&263.net +6885 + Intelligent Telecommunications Inc. + Park, Yongsuk + piao1234&it.co.kr +6886 + KPN + Marc Titulaer + marc.titulaer&kpn.com +6887 + IT-Conference + Yuri Hramov + yri&itconf.ru +6888 + Millenium Installer + M. Hasan + mamatmillenium&hotmail.com +6889 + Avaya Communication + Dan Romascanu + dromasca&avaya.com +6890 + Shell Information Technology International B.V. + Rogier Carper + rogier-l.carper&shell.com +6891 + Acepta.com + Juan Carlos Pérez Aguayo + juancarlos.perez&acepta.com +6892 + Cnation + David Pisoni + hostmaster&cnation.com +6893 + TurboLinux, Inc. + Brad Midgley + brad&turbolinux.com +6894 + DataDirect Networks + Al Saraie + asaraie&datadirectnet.com +6895 + Ereo, Inc. + Steve Swoboda + legal&ereo.com +6896 + Mahi Networks + Tom Lowe + tlowe&mahinetworks.com +6897 + JDV + Jon Vestal + jvestal&speakeasy.org +6898 + Verance Corporation + Douglas Wong + dwong&verance.com +6899 + Nanyang Technological University + Adminstrator + ywengkin&singnet.com.sg +6900 + Baltic Institute of Economics and Finance + Andrew A. Soukharew + andrews&bief.ru +6901 + AETA.COM + Gerard Lamar + glamar&aeta.fr +6902 + Plisch Gmbh & Co KG + Hans-Dieter Kosch + kuehlwein&plisch.de +6903 + Visoft (Pty) Ltd + Johann Botha + jbotha&visoft.co.za +6904 + IBM, Infrastructure Support Group + Eric Hameleers + eric.hameleers&nl.ibm.com +6905 + Brocade Communication Systems, Inc. (formerly 'Rhapsody Networks Inc.') + Scott Kipp + skipp&brocade.com +6906 + Asiana Communications + Bin Li + libin&asiana.com +6907 + Broad Telecom, S.A. (Btesa) + Maribel Vimbela + m.vimbela&btesa.com +6908 + Movaz Networks + Ryan Schmidt + ryans&trusted.net +6909 + Network Security Technologies, Inc. + Elaine Harvey + eharvey&netsec.net +6910 + Inquent Technologies + Dejan Mijailovic + dejan.mijailovic&inquent.com +6911 + AUCS Communications Services + Martin van den Nieuwelaar + martin.nieuwelaar&infonet-europe.com +6912 + Morgan Stanley Dean Witter Online + Sam Shen + sam.shen&msdw.com +6913 + Bona Technology + Junpyo Hong + gigawatt&bonatech.co.kr +6914 + Timing Solutions Corporation + Scott Melton + melton&timing.com +6915 + Computable Functions Limited + Iain MacKay + Iain&computable-functions.com +6916 + Norbert Klasen + Norbert Klasen + klasen&gmx.net +6917 + Alternative Advice + Stéphane Mariel + stf&alternative-advice.com +6918 + Belle Systems A/S + Ivan S. Joergensen + isj&bellesystems.com +6919 + Cubenet s.r.l + Flavio Righini + flavio&ten.it +6920 + Think & Do Software, Inc. + Dan Bracciano + coop2&thinkndo.com +6921 + Morrison Industries + Adam Tauno Williams + adam&morrison-ind.com +6922 + Amino Communications Ltd + Dr Martin J Neville-Smith + mnsmith&aminocom.com +6923 + Shutterfly + Chris Guthrie + cguthrie&shutterfly.com +6924 + Capital Internet, LLC + Brian Beaulieu + brian&capital-internet.net +6925 + IT Communications, Kennedy Space Center (KSC) + Stephen A. Kerney + stephen.a.kerney&nasa.gov +6926 + Optical Access Inc. + Sergiu Rotenstein + srotenstein&opticalaccess.com +6927 + MAIL.COM + Santhosh Kumar Iddyamadom + siddyamadom&staff.mail.com +6928 + Mobility Next Generation + James Halls + james.iana&mobyn.com +6929 + The Root Group + Joshua Weinberg + josh_iana&rootgroup.com +6930 + Lycee Leonard de Vinci + Frederic Varni + fvarni&wanadoo.fr +6931 + Interessengemeinschaft Software-Engineering + Ralf Hägele + ralf.haegele&software-engineering.org +6932 + Internet Content Register + Mike Hawkes + mike&internet.org.uk +6933 + Reputy + Florent Guillaume + florent.guillaume&reputy.com +6934 + Corporation One of America, Inc. + Trevor G. Hammonds + trevor&royal.net +6935 + Arachne + John Lederer + john&jhml.org +6936 + VICORSOFT CORPORATION + VIJAY KUMAR BANSAL + VKBANSAL&mailnebraska.com +6937 + NetReliance, Inc. + Kevin Collins + kevin.collins&net-reliance.com +6938 + Enigma Inc. + Elan Freedy + elanf&enigma.com +6939 + Hemp.org + Shaun savage + savages&hemp.org +6940 + Tufts University + University Systems Group + usg&tufts.edu +6941 + proflash cross-media AG + Marcus Mennemeier + m.mennemeier&proflash.de +6942 + James Cook University + Jeffrey Bird + Jeffrey.Bird&jcu.edu.au +6943 + R-Alpha Ltd. + Vladimir A. Grishin + info&alpha.ru +6944 + REDSYS (formerly 'SERMEPA') + Gonzalo Alvarez-Castellanos + galvarez&redsys.es +6945 + LAKE Communications + Mark McCarville + mark.mccarville&lakecommunications.com +6946 + Fouche Enterprises, LLC. + Roger Fouche + info&fouche.net +6947 + Central Bank of Russia, Arkhangelsk + Oleg Laptev + oleg&acb.ru +6948 + Tumsan Oy + Juhani Leskinen + juhani.leskinen&tumsan.fi +6949 + Nextra BW CSP GmbH + Jochen Scharrlach + js&mgm-net.de +6950 + Aztec + Network Management Manager + nsm&aztectech.com +6951 + Freeler BV. + Peter Zijlstra + peter.zijlstra&hq.freeler.nl +6952 + iBiomatics LLC + Drew Foglia + Drew.Foglia&ibiomatics.com +6953 + Air2Net AB + Turbo Fredriksson + turbo&air2.net +6954 + Sema Group + Lee Anne Morgan + lee.morgan&sema.be +6955 + ComSol Kommunikationssysteme GmbH + Schober Klaus + schober-klaus&t-online.de +6956 + eTopware Labs + Gabriel Sala + gab&etopware.com +6957 + Rapi10.net + Sebastiao Rocha + rocha&rapi10.net +6958 + Kent State University + Douglas M. Westfall + dougw&net.kent.edu +6959 + bluestreak + Drew Jenkins + djenkins&bluestreak.com +6960 + FELIX DANZIGER INTERNATIONAL KUNSTHANDEL + WALENCIK ADAM + AWALENCIK&AOL.COM +6961 + tele.ring Telekom Service Gmbh + Thomas Stagl + thomas.stagl&1012surf.net +6962 + The University of Sydney + Jie Gao + J.Gao&isu.usyd.edu.au +6963 + Andrews IT Engineering Ltd + Zoltan BORBELY + bozo&andrews.hu +6964 + Soft Teleware Inc. + Choi, Soo-Jin + nada&softteleware.com +6965 + RSBX + Raymond S Brand + rsbx&acm.org +6966 + The Bright Oceans Corporation + jiao li + lij&boco.com.cn +6967 + Digismart + Dhananjay Nene + dhananjay&digismart.com +6968 + Systek AS + Terje Trane + trane&systek.no +6969 + Copernicus Global Billing Services + John Bass + jbass&copernicusgbs.com +6970 + VCON Ltd. + Naji Sourani + najis&vcon.co.il +6971 + Ferimex IT spol. s r.o. + Ing. Marian Repka + repka&ferimexit.sk +6972 + Canbox + Joerg Moellenkamp + moellenkamp&msg-ag.com +6973 + Savatech Inc. + Jason Patton + jpatton&savatech.com +6974 + EarthLink Inc. + Kihoon Jeoung + kihoonj&corp.earthlink.net +6975 + WaterCove Networks, Inc. + Paul Ciarfella + pciarfella&watercove.com +6976 + HowStuffWorks.com, Inc + Igor Brezac + igor.brezac&howstuffworks.com +6977 + Resilience Corporation + Fred Marshall + fred&resilience.com +6978 + JBruce.net + Jason Bruce + jbruce&jbruce.net +6979 + KereniX + Yossi Werker + Yossi_Werker&Kerenix.com +6980 + MRC Project + Satoru YAGI + yagi&core.ntt.co.jp +6981 + interQ, Inc. + Kevin Ying + sysadmin&interq.or.jp +6982 + Erba Holding + Rosa Nicolas + nicolas.rosa&habitat.be +6983 + Sofaware Technologies + Amir Rapson + rapson&sofaware.com +6984 + Fovea, Inc. + Serge Zloto + serge&i-fovea.com +6985 + Jumpy Cat Systems, Inc. (formerly 'Prism Software, Inc.') + Peter Shiner + peter.shiner&gmail.com +6986 + Kiodex, Inc. + Richard Seroka + richard.seroka&kiodex.com +6987 + WRonline GbR + Stefan Paletta + stefanp-iana-oid&WRonline.de +6988 + University of Northern British Columbia + Nicholas Waller + operations&unbc.ca +6989 + Extel Communications Pty Ltd + David Chiam + engineering&extel.com.au +6990 + Redfern Broadband Networks + David Funk + dfunk&RedfernNetworks.com +6991 + N-Space Pty Ltd + Tim Curtin + tim&n-space.com.au +6992 + KnowNow, Inc + Rick Cobb + rcobb&knownow.com +6993 + VirtualNet + Strata R. Chalup + strata&virtual.net +6994 + The Midgard Project (Open Source) + Jean-Philippe BRUNON + jpbrunon&aurora-linux.com +6995 + c-base e.V. + Felix Vogel + admin&c-base.org +6996 + WhatEverNet Computing + Jose Carlos Correia + webmaster&whatevernet.pt +6997 + Yale University + Andrew Newman + newman-andy&yale.edu +6998 + Channelogics Incorporated + DNS Administrator + dnsadmin&channelogics.com +6999 + University of Indianapolis + Computing Services + ucs&uindy.edu +7000 + NET-volution srl + Roberto Gilberti + roberto.gilberti&net-volution.com +7001 + Big Net, Inc. + Edward Kmett + ekmett&bignet.net +7002 + Sebastian Crespo + Sebastian Crespo + sacrespo&go.com +7003 + CISCO-MASBU + Amit Tropper + atropper&cisco.com +7004 + Metodo Srl + Franco Violi + fvioli&metodo.net +7005 + Kagoor Networks + Rama Eshel + rama&kagoor.com +7006 + eyeo.com + David E. Storey + David.Storey&eyeo.com +7007 + Guangzhou Thinker Communications Technology Co., Ltd." + Lu yanqing + luyq&gsta.com +7008 + Discordian Alliance For Teaching + Darren Stalder + darren&daft.com +7009 + Chemtex Engineering of India Ltd + N Henriques + nhenriques&chemtex.co.in +7010 + Oranje Consulting, Inc. + Jeff van Eek + vaneek&bellsouth.net +7011 + Prism Microsystems, Inc. + A N Ananth + ananth&prismcomm.com +7012 + Edisontel S.p.A. + Sergio Chiesa + sergioc&edisontel.it +7013 + ByteSphere LLC + Nicholas Saparoff + webmaster&bytesphere.com +7014 + Adexa Inc. + Adrian Basescu + adrian&adexa.com +7015 + Vignette Corporation + Larry Harkrider + lharkrider&vignette.com +7016 + Seneca Networks + Vipul Sharma + vsharma&senecanetworks.com +7017 + Adari TeleComp Inc + Seva Adari + Seva&Adari.Net +7018 + PT Multimedia + Joao Pedro Goncalves + joaop&ptm.pt +7019 + Polaris Networks, Inc. + Manh Do + mdo&polarisnetworks.com +7020 + Saecos Corporation + Jack Lund + jack.lund&saecos.com +7021 + SysNet S.n.c. + +39 0382 531274 + info&sys-net.it +7022 + MRZ AG + Gion Manetsch + manetsch&datacomm.ch +7023 + The Direct Connection Ltd + Stephen Clifford + stephen.clifford&dircon.net +7024 + VHB Technologies, Inc. + Darryl Torske + dtorske&vhbtech.com +7025 + Double D Investments + Dennis Doyle + DenDoy&aol.com +7026 + Sonera SmartTrust + Magnus Robertsson + magnus.robertsson&sonera.com +7027 + Onix Microsystems Inc + Charles Yang + cyang&onixmicrosys.com +7028 + Alloptic, Inc. + Bryan Shadish + bryan.shadish&alloptic.com +7029 + CUBIweb, LLC + Kyle Welsh + kyle&cubiweb.com +7030 + Ethernet, Solucoes Informaticas, lda + Nuno Loureiro + nunoð.pt +7031 + McAfee, Inc. (formerly 'Securify, Inc.') + Mark Hoy + mark_hoy&mcafee.com +7032 + Chartered Semiconductor Manufacturing + Justin Lim + bretwatson&charteredsemi.com +7033 + Matías Software Group + Salvador Ortiz + sog&msg.com.mx +7034 + inyourwake + Jeff Hagberg + jeffhagberg&hotmail.com +7035 + VTT Electronics + Tommi Saarinen + Tommi.Saarinen&vtt.fi +7036 + MBDyn + Pierangelo Masarati + masarati&aero.polimi.it +7037 + Ubris + Bart Den Haese + uubris&netscape.net +7038 + Tron Network Solutions + Jankok L. + ljankok&wanadoo.nl +7039 + bison GmbH + Helmut Wirth + netmaster&bison-soft.de +7040 + DataLinx Inc + Jajati K. Samal + jajati&datalinx.net +7041 + Integra + Guy Kastenbaum + kastenbaum&integra.fr +7042 + Permanent Vacations + Ralph Hightower + lynmax&logicsouth.com +7043 + Apt Computer Systems Limited + Danny Shobrook + danny&aptcompsys.com +7044 + Primal + Nathan Reed + nreed&primal.com +7045 + TBA Communications + Trey Valenta + trey&anvils.org +7046 + Tom's NET + Tom Oehser + Tom&Toms.NET +7047 + Flux Productions + Laurence Brockman + laurence&flux.ab.ca +7048 + Pico Communications, Inc. + Lung Yeh + lungyeh&pico.net +7049 + DataNet Development Group, Inc. + Stephen L. McMahon + stephen&mcmahon.com +7050 + Kazan State University + Petr Lopuhov + petr&ksu.ru +7051 + Epicom S.A. + Rafael Gonzalez + epicom&idecnet.com +7052 + Dipl.-Ing. Markus Leist + Markus Leist + markus&lei.st +7053 + Videon CableSystems Alberta Inc. + Laurence Brockman + l.brockman&videon.ca +7054 + Riverbed Technology (formerly 'Mazu Networks, Inc.') + Mari Kotlov + mkotlov&riverbed.com +7055 + Cox Communications, Inc. + Martin M. Cade + martin.cade&cox.com +7056 + Senterprise Inc. + Ilan Shamir + ishamir&senterprise.com +7057 + SuSE Linux AG + Ralf Haferkamp + suse-oid&suse.de +7058 + BB4 Technologies Inc + Sean MacGuire + sean&bb4.com +7059 + Complete Business Solutions - Global Health + Ralph Hightower + rhightow&cbsinc.com +7060 + Naikel Software + Naikel Aparicio + naikel&SoftHome.net +7061 + Automated Logic Corporation + Tom Samford + tsamford&automatedlogic.com +7062 + Oasis Telecommunications, Inc. + Craig J Constantine + oid&ot.com +7063 + Australian Defence Force Academy + Geoff Collin + g.collin&adfa.edu.au +7064 + UTStarcom (China) Co., Ltd Shenzhen Branch + Tom Ren + tom.ren&utstar.com +7065 + Shimin Denshi Jouhou Mou + YASUDA Yukihiro + support&poem.co.jp +7066 + Dr. Srinivas + Dr Srinivas + sriradesh&hotmail.com +7067 + TELCO TECH GmbH + Gernot Tenchio + info&telco-tech.de +7068 + CryptoPRO + Roman E. Pavlov + pre&mo.msk.ru +7069 + SFI Open Source Services + Eckard Koch + eko&sfi.ch +7070 + Narad Networks + Prasad Dorbala, Director Software Engineering + dorbalap&naradnetworks.com +7071 + Tele-Info-System + Janusz Krzysztofik + jkrzyszt&tis.icnet.pl +7072 + Ehrhart Jewelers + Jack Ehrhart + ehrunl&aol.com +7073 + r00tkit.com + Joshua Buysse + buysse&r00tkit.com +7074 + TheMagicProgrammer + Bob Butler + rbutler&themagicprogrammer.com +7075 + Lothar Maronn + Lothar Maronn + maronn&saloma.de +7076 + Australian Online Solutions + Sam Johnston + samj&ausonline.com.au +7077 + Desarrollos Digitales + Daniel Perez + danielp&feedback.net.ar +7078 + True North Communications + Richard Reid + rreid&truenorth.com +7079 + Megisto Systems, Inc. + Mark Shelor + mshelor&megisto.com +7080 + xeli.net + Andrew Liu + askliu&xeli.net +7081 + Maaslandse Unix & Linux Laboratorium (MULL) + Hugo van der Kooij + hugo&vanderkooij.org +7082 + DAIDEN CO., Ltd. + Isao Fukuda + fukuda&dyden.co.jp +7083 + Applied Generics Ltd + Joe Dixon + mjd&appliedgenerics.com +7084 + Friends Provident + Dave Benson + dave.benson&friendsprovident.co.uk +7085 + Cybercomm BV + Leon de Rooy + beheer&cybercomm.nl +7086 + GTONG Shenzhen Ltd. + John Yang + ravi>ong.com +7087 + KMC Technologies (formerly 'Selis Networks Inc.') + Yigal Korolevski, Managing Director + ykorol&nms-guru.com +7088 + Network Systems + Vladislav Bogdanov + slava&nsys.by +7089 + Centrum Arbeidsverhoudingen + A.J.M. van der Harg + Postmaster&caop.nl +7090 + El Bit Salvaje + Javier Baliosian + javierba&adinet.com.uy +7091 + CREN + Cristino Arocho, Jr. + aromont&cren.net +7092 + ScienceXchange + John Stoddard + stoddard&sciencexchange.com +7093 + Ericsson - Geobility + Mitch De Felice + mitch&geoportal.net +7094 + Wachovia Trust + Matthew P. O'Reilly + matt.oreilly&wachovia.com +7095 + Pert + Nicolas Baumgarten + nico&pert.com.ar +7096 + JSEIC + Larry + zhu_lan&china.com +7097 + LeatherXchange, S.A. + Scott Dejmal + scott.dejmal&leatherxchange.com +7098 + Edison Mission Energy + Phil Moors + pmoors&edisonmission.com +7099 + Ripeco Ltd + James Turnock + james&jturnock.fsnet.co.uk +7100 + Killik & Co. + Graham Bygrave + graham.bygrave&killik.co.uk +7101 + CO3i + Russell Packer + russellp&co3i.com +7102 + IDEALX + Olivier Lemaire + olivier.lemaire&IDEALX.com +7103 + SaltFire, Inc + Toby Cabot + toby&saltfire.com +7104 + Supervertical Sistemas + Alexandre Ribeiro do Nascimento + aribeiro&supervertical.com.br +7105 + CANTV + Anibal Canelon + acanel&cantv.com.ve +7106 + Teleport Corp + Richard Uren + richard&starport.net +7107 + Geocast Network Systems + Castor Fu + castor&geocast.com +7108 + Urscheler Software + Roger Urscheler + roger.urscheler&icn.siemens.com +7109 + City University of Hongkong + Chau-man HUI + cmhui&cs.cityu.edu.hk +7110 + Kaashyap Radiant System ltd + Mr . P . F Kishore + javakishore2000&yahoo.com +7111 + Sunamerican Communication Technology Inc. + Mr.Shuangjiang Tan + tsj&sohu.com +7112 + U.S. Department of Commerce + Don Badrak + dbadrak&census.gov +7113 + YottaYotta + Javier Lozano + jlozano&yottayotta.com +7114 + Universidad de Navarra + Ignacio Coupeau + icoupeau&unav.es +7115 + book2eat.com limited + Elias Papavassilopoulos + ep104&book2eat.com +7116 + TechTrade International Management AB + Torsten Silverstolpe + ts&techtradeinter.se +7117 + Invertix, Inc + Thu Kyaw + tkyaw&invertix.com +7118 + NetContinuum, Inc + Joe Nguyen + joe&netcontinuum.com +7119 + OPNET Technologies, Inc. (formerly 'Network Physics, Inc.') + Romain Kang + rkang&opnet.com +7120 + Sentry Telecom Systems Inc. + Chris J. Veeneman + cveeneman&sentrytelecom.com +7121 + Comunicaciones Netglobalis + Marcelo Bartsch + mbartsch&netglobalis.net +7122 + Lusars.net + Chad Slaughter + slaught&lusars.net +7123 + nSOF Parallel Software Ltd. + Dan Bar Dov + dan&nsof.co.il +7124 + Stand-by + Andreas Birkner + admins&stand-by.de +7125 + Digital Creations + Christopher Petrilli + petrilli&digicool.com +7126 + Positron Public Safety Systems Inc. + Claude Chaussé + cchausse&positron.qc.ca +7127 + Nadeem Hasan + Nadeem Hasan + nhasan&nadmm.com +7128 + Worcester Polytechnic Institute + Allan E. Johannesen + aej&wpi.edu +7129 + Lancaster University + Steve Bennett + S.Bennett&lancaster.ac.uk +7130 + KiddiePot.com + Daniel O'Brien + cdim7&disinfo.com +7131 + Orient Telecom + Min-Hwan, Kim + mhkim&orientel.co.kr +7132 + ELANsat TECHNOLOGIES Inc. + Jett Chen + jett&chenx.org +7133 + Corenix System Inc. + Jett Chen + jett&corenix.com +7134 + Prosolve + Sean Mathias + seanm&prosolve.com +7135 + Comité Réseau des Universités + Christian Claveleira + Christian.Claveleira&cru.fr +7136 + SysNet + Francesco Defilippo + francesco&sys-net.it +7137 + Tunisie Telecom + Ezzeddine + aezzddine&excite.com +7138 + Allaire Corporation + Dave Gruber + dgruber&allaire.com +7139 + EchoStar Data Networks + Joseph B. Schofield + joe.schofield&echostar.com +7140 + INPG, Institut National Polytechnique de Grenoble + Claire Rubat du Merac + Claire.Rubat-du-Merac&inpg.fr +7141 + Freesoft + Brent Baccala + baccala&freesoft.org +7142 + Pingtel Corp. + Richard Schaaf + rschaaf&pingtel.com +7143 + Entrust Technologies, Inc. + James Harwood + james.harwood&ENTRUST.COM +7144 + FaceTime Communications, Inc. + Franck Mangin + franck&facetime.com +7145 + eve.com + Hiram Aguirre + hiram&eve.com +7146 + Zeus Technology Ltd + Damian Reeves + dreeves&zeus.com +7147 + Imperial Technology, Inc. + Richard Major + rmajor&imperialtech.com +7148 + Unitree Software + Jason Wells + jasonw&unitree.com +7149 + Arcot Systems, Inc. + Rob Jerdonek + rob&arcot.com +7150 + INITECH, Co.,Ltd. + Kwon, YongChul + godslord&initech.com +7151 + r pdeshpande + R.P.DESHPANDE + rpdeshpande&excite.com +7152 + CAPE Technologies Ltd., + Michael Slevin + ms&capetechnologies.com +7153 + Foertsch EDV Beratung + Torsten Foertsch + torsten.foertsch&gmx.net +7154 + Intelligent Platform Management Interface forum + Thomas M. Slaight + tom.slaight&intel.com +7155 + neohaven.net + Benson Schliesser + bensons&neohaven.net +7156 + Princeton Networks + Bhavesh Patel + bhavep&yahoo.com +7157 + SC Dart IMPEX SRL + Wlad S. Hanciuta + wlad&dart.ro +7158 + 1 Global City.com, Inc. + Michael Frey + michael.frey&1globalcity.com +7159 + Allianz Technology SE + Thi Hang Nguyen + thi-hang.nguyen&allianz.de +7160 + Granada Learning + Pete Birkinshaw + peterb&granada-learning.com +7161 + Network Solutions, Inc. + Mark Kosters + markk&netsol.com +7162 + Delancey Management and Consulting + Jeff Strong + stronghj&aol.com +7163 + Fiorano Software, Inc. + Albert Holt + alberth&fiorano.com +7164 + SchiZo Information Network + David Wimsey + bits&schizo.com +7165 + SAMBA Team + Volker Lendecke + iana&samba.org +7166 + Peters Securities Co, L.P. + Scott Zionic + zionic&petersweb.com +7167 + WinterTek, LLC + Julia Winter + jkwinter01&aol.com +7168 + Eurosoft Sas + Giampaolo Tomassoni + info&eurosoft-italia.com +7169 + Engage, Inc. + Chao Feng + cfeng&ipro.com +7170 + Centell Telecommunication Company + Chen Shaoqiang + chensq&btamail.net.cn +7171 + Europesave S.A. + Francisco Cabral + francisco.cabral&europesave.com +7172 + SPARKnet S.A. + Angelos Vasdaris + angel&spark.net.gr +7173 + Jiangsu University of Science and Technology + Huang Yan + jh_jsust&sina.com +7174 + Tonic Software, Inc. + Rick Bansal + rick.bansal&tonic.com +7175 + TSP Lab(Telecommunication and Signal Processing Lab.) Peter + Tatai + tatai&bme-tel.ttt.bme.hu +7176 + orangefood.com + Robert Petersen + robert&orangefood.com +7177 + IPITEK + Roger Oakes + roakes&ipitek.com +7178 + SANJOSEDEOCOA.COM.DO + DANIEL GUERRA + GUERRADANIEL&YAHOO.COM +7179 + ARIO Data Networks + David C Chiu + dchiu&ariodata.com +7180 + Talarian Corporation + Louna Petrova + louna&talarian.com +7181 + Trendium, Inc. + Tom Zhou + tzhou&trendium.com +7182 + ypass.net + Eric Kilfoil + eric&ypass.net +7183 + Springfield Public Schools + Jacob Shaw + jshaw&sps.lane.edu +7184 + ---none--- + ---none--- + ---none--- +7185 + Latitude Communications + Daniel X. Qu + daniel.qu&latitude.com +7186 + BayPackets Inc + Sachin Jog + sachin.jog&baypackets.com +7187 + Netsystems, Inc. + Hidehiko Iwasa + iwasa&netsystems.co.jp +7188 + Curtin University of Technology + Scott Pleitner + S.Pleitner&curtin.edu.au +7189 + EMS-Global + Tim Horner + thorner&ems-global.com +7190 + Legato Systems India + PVS + subraman&legato.com +7191 + Gebrueder Weiss GmbH + Arthur Kalb + netwatch&toveno.com +7192 + ID-PRO Aktiengesellschaft + Martin Dehn + dehn&id-pro.de +7193 + Bourse Connect + Jousse Patrice + pjousse&bourse-connect.com +7194 + Stevens Gagnon Services Info-Techniques + Stevens Gagnon + sgagnon&sgsit.com +7195 + Iquilibrium, Inc. + Stan O'Day + soday&iquilibrium.com +7196 + Imagine Software + Yee Liu + yeel&imagine-sw.com +7197 + Intertech + Frank Fischer + fischer&intertech.de +7198 + CeLight + Wen Hwang + whwang&celight.com +7199 + 2Bridge Software + Supart Pornnapa + part&2bridge.com +7200 + Forebrick International + Supart Pornnapa + part&forebrick.com +7201 + Druker Consulting + David Druker + druker&ieee.org +7202 + Seastem + Jerome Girard + j.girard&seastem.com +7203 + Technology Builders, Inc. + John Morey + jmorey&tbi.com +7204 + PerformanceIT, Inc. + Greg Morgan + gmorgan&performanceit.com +7205 + CoreTech Consulting Group, Inc. + Ken Avery + kavery&coretech.com +7206 + Real Media, Inc + Jim Roberts + jroberts&realmedia.com +7207 + DENON DENTAL + Putz Yuriy and Yaroshyk Taras + denon&mail.lviv.ua +7208 + AFCON Software and Electronics Ltd. + Meir Saggie + meirs&fil.co.il +7209 + Societe Bryarde de Travaux Publics + Patrick Lerouge + lerouge&im3.inserm.fr +7210 + WorkSpot Inc + Jeff Ullmann + workspotoperations&workspot.com +7211 + Raiffeisen Schweiz + Martin Hagmann + martin.hagmann&raiffeisen.ch +7212 + Raiffeisen Schweiz + Martin Hagmann + martin.hagmann&raiffeisen.ch +7213 + Datria Systems + Steve Cox + steve.cox&datria.com +7214 + Virginia-Meryland Regional College of Veterinary Medicine + Micah Almond + malmond&vt.edu +7215 + ABN AMRO + Michael Arredia + michael.arredia&abnamro.com +7216 + Institut National de la Recherche Agronomique + Christophe Caron + Christophe.Caron&jouy.inra.fr +7217 + The University of Memphis + Dr. Tom Barton + tbarton&memphis.edu +7218 + HBESoftware Inc. + Marc Heckmann + heckmann&hbesoftware.com +7219 + Firedrop Inc + Zhigang Chen + zchen&firedrop.com +7220 + Shanghai Optical Networking Technology Co. Ltd. + Kai Wu + wu1&guomai.sh.cn +7221 + Handlink Technologies Inc. + Yousee Chen + yousee&handlink.com.tw +7222 + TIScover + Jacob Rief + jacob.rief&tiscover.com +7223 + ofehr.net + Oliver Fehr + Oliver.Fehr&ofehr.net +7224 + Netarchitects SA + Oliver Fehr + ofehr&netarchitects.com +7225 + Spacechecker nv + I.T. Department + tech&spacechecker.com +7226 + Micro F/X + Matt Mahoney + mattmµfx.net +7227 + Centre Informatique National de l'Enseignement Superieur + Jean-Christophe Penalva + jean-christophe.penalva&cines.fr +7228 + The Web Foundry Limited + Lee Shakespeare + lee&webfoundry.co.uk +7229 + Vogeler Enterprise Architectures + Volkhard Vogeler + VolkhardV&Vogeler.com +7230 + Eastern Michigan University + Matthew J. Goebel + goebel&emunix.emich.edu +7231 + MidStream Technologies + Director of Engineer + dir_engineering&midstream.com +7232 + VR1 Entertainment + Les Barstow + lbarstow&vr1.com +7233 + SierraCities.com + Terry Laurenzo + tlaurenzo&nexsoft.com +7234 + KORE + Branko Lepan + blepan&kore.com +7235 + eRadlink + Michael Goodwin + mgoodwin&eradlink.com +7236 + SIMM-Comm + Sherin Ibrahim + sherin&simmcomm.ch +7237 + Aaronsen Group, Ltd. + Doug Luce + luce&aaronsen.com +7238 + OM Technology + Jonas Lindholm + Jonas.Lindholm&omgroup.com +7239 + LEGRAND + Patrick RABIER + HOSTMASTER&GRPLEG.NET +7240 + Princeton Networks Inc + Long Pham + long.pham&mail.com +7241 + Inter.net + Gaige B. Paulsen + gaige&team.inter.net +7242 + QwicNet, Inc. + Daniel P Marcellus + dmarcellus&qwicnet.com +7243 + ZIVO Wellington + Jules Anderson + jules.anderson&zivo.co.nz +7244 + Quanta Computer Inc. + Strong Chen + &strong.chen&quantatw.com +7245 + Northern Districts In-Line Hockey Club + Lucas James + shadow1&dove.net.au +7246 + Project X Consulting Ltd + Mark Howell + mark.howell&pxcl.com +7247 + Nexsan Technologies, Ltd. + Gary Watson + gary&nexsan.com +7248 + HyperTrust + Maarten Willems + maarten.willems&hypertrust.com +7249 + Manage-IT + Arend Melissant + ame&manage-it.be +7250 + Viewgraphics + Dima Lomazov + dima&viewgraphics.com +7251 + SPC solutions s.r.o. + Pavlo Ivakhiv + spc&spc.cz +7252 + Institut Mondor de Médecine Moléculaire + Patrick Lerouge + lerouge&im3.inserm.fr +7253 + StreamCache + Farshad Abasi + farshad&moderngroove.com +7254 + Intronix Corporation + Mike Willis + mike&intronix.net +7255 + PointOne Telecommunications, Inc. + Roman Milner + rmilner&pointone.com +7256 + Geyser Networks, Inc. + James Yang + jy&geysernetworks.com +7257 + Mar-Net Ltd. + Moshe or Vladimir + avnet&Marnetwork.com +7258 + Centre national de Documentation Pedagogique + Farida azeggagh + farida.azeggagh&poste.cndp.fr +7259 + OULLIM Information Technology, Inc. + Brian Kim + double73&oullim.co.kr +7260 + Opthos Inc. + Mahesh Shah + mshah&opthos.com +7261 + InfoSpace + Adam Jacob + adam.jacob&infospace.com +7262 + DragonWave + Rod McMullin + rmcmullin&dragonwaveinc.com +7263 + Litt Productions Ltd. + Joshua Litt + president&littpro.com +7264 + ---none--- + ---none--- + ---none--- +7265 + Interactive Portal, Inc. + Eric Bartley + ericb&interactiveportal.com +7266 + Softfront + Tatsushi Fujisawa + fujisawa&softfront.co.jp +7267 + BroadLink Technologies, Inc. + Jeff Hsue + jeffhsue&gvc.com +7268 + Axiavoice Software + Nicolas Bougues + nbougues&axialys.net +7269 + University of Klagenfurt + Gerald Hochegger + gerald.hochegger&uni-klu.ac.at +7270 + Supanet Limited + Peter Duffy + peter.duffy&supanet.net.uk +7271 + TuxWarp + Wolfgang Pichler + wolfgang.pichler&tuxwarp.org +7272 + RedSiren Technologies Inc. + Jackson Chanthapannha + jackson&redsiren.com +7273 + When Pigs Fly Club + Robert Londberg + rvrmark&hotmail.com +7274 + CITY OF WILLCOX + RONALD ALLEN + Rallen&willcoxcity.org +7275 + Amsterdamse Hogeschool voor de Kunsten + Willem van den Oord + willem&ahk.nl +7276 + Parabon Computation, Inc. + Dabe Murphy + dabe¶bon.com +7277 + PolyServe,Inc. + Noelan Olson + noelan&polyserve.com +7278 + Digital Archway + Thang Ngo + tngo&digitalarchway.com +7279 + Zembu Labs + Bill Studenmund + wrstuden&zembu.com +7280 + Vodafone Network Pty Ltd Australia + Adam Spence + adam.spence&vodafone.com.au +7281 + Socix Business Internet Software Co., Ltd + Simon Chang + mail&socix.com +7282 + Superchannel ApS + Sean Treadway + seant&superchannel.org +7283 + Defence Science & Technology Agency + Casen Yap Cheng San + casenyap&starnet.gov.sg +7284 + Asita Technologies + Brendan Ryan + brendan.ryan&asitatech.com +7285 + Hutchison Ports UK Limited + Phil Doughty + doughtypm&hpuk.co.uk +7286 + betanet + Flavio Gatti + flavio.gatti&beta-net.it +7287 + Premier Programming Ltd + James Outhwaite + james.outhwaite&rbos.com +7288 + ActiveSky Inc. + Peter Robinson + peterr&activesky.com +7289 + Visual Analysis AG + Martin Stockhammer + martin.stockhammer&visualanalysis.com +7290 + Samjung Data Service LTD.,CO. + Youngbong Choe + hurd&sds.co.kr +7291 + LUTEUS SARL (formerly 'Lecointe Ludovic') + Lecointe Ludovic + support&loriotpro.com +7292 + Krone Multimedia Ges.m.b.H Co KG + Hans Peleska + j.peleska&krone.at +7293 + POWERCOM CO., Ltd. + VINCENT HUA + vincent&upspowercom.com.tw +7294 + Société Générale de Téléinformatique + ROGNARD + jmro&sgt.fr +7295 + plan b. solutions GmbH + Bernd Strehhuber + info&planb.de +7296 + eFORCE, Inc.- New York + Dan Brunton + dbrunton&eforceglobal.com +7297 + Adventist Care Centers + Jim Burrill + oid&adventistcare.org +7298 + Yuma Educational Consortium + Craig Sawyer + do_sawye&yumaed.org +7299 + Valence Semiconductor, Inc. + Mats Persson + MPersson&valencesemi.com +7300 + Bangladesh Consultants Ltd. + Md Salahuddin Ahmed + bcl&citechco.net +7301 + Kinzebach + Joerg Kinzebach + joerg&istac.de +7302 + Medsys + Michel Nogatchewsky + mny&inetlab.net +7303 + Byg Informatique + Michel Nogatchewsky + mny&inetlab.net +7304 + WWW. DEPARTMENTSTORE + J®p»£ + S2893142&student.thu.edu.tw +7305 + esurvey + Mr. Hans Brunila + info&esurvey.fi +7306 + Boku, Inc. + Peter + peter&boku.net +7307 + PonyExpress.net + Matthew Hoskins + matth&npgco.com +7308 + Zaantek, LLC + Ryan Gesler + ryan&zaantek.com +7309 + Argus Technologies + Doddy Wijanta + dwijanta&argus.ca +7310 + EnStor + Bart Steanes + bart.steanes&enstor.com.au +7311 + T-Systems Enterprise Services GmbH + Klaus Biedenbach + klaus.biedenbach&t-systems.com +7312 + BARCO Control Rooms + Thomas Kappler + thomas.kappler&barco.com +7313 + 1stWAVE + René Lippertz + rlippertz&1stwave.de +7314 + CodeWeavers, Inc + Jeremy White + jwhite&codeweavers.com +7315 + CAB Software + Colin Bate + SNMP&bate2bate.co.uk +7316 + Bridge Solutions AG + Rene Eigenheer + reigenheer&bridgesolutions.net +7317 + Mobile Reach Technologies, Inc. + David Hustace + dhustace&mobilereach.com +7318 + Sinia Corporation + Jagane Sundar + jagane&sinia.com +7319 + Universite de Toulon et du Var + Didier Benza + benza&univ-tln.fr +7320 + Ivron Systems Ltd + Jeremy Bruce-Smith + jeremy.smith&ivron.com +7321 + connext + www.i.am/tummodeling + fanalo&yahoo.com +7322 + Tiburon Networks, Inc + Rob Cole + robc&tiburonnet.com +7323 + Voicecom + Marko Pajula + marko&voicecom.ee +7324 + Chambre Syndicale des Banques Populaires + Christian Brossier + christian.brossier&csbp.banquepopulaire.fr +7325 + Redeye Telecommunications + Steve Oberski + sfo&deterministic.com +7326 + MCI + Jim Potter + jim.potter&mci.com +7327 + WaveSmith Networks + Bernard Farrell + bfarrell&wavesmithnetworks.com +7328 + MatrixOne, Inc. + Kevin J. Dyer + kdyer&matrixone.com +7329 + Virtual Learn Inc + Benjamin Madsen + ben.madsen&virtuallearn.com +7330 + Bundesanstalt fuer Wasserbau + Marco Reinhardt + marco.reinhardt&baw.de +7331 + DDS Projects BV + Antoine van der Ven + antoine&dds.nl +7332 + DiviTech A/S + Henrik B. Sorensen + hbs&divitech.dk +7333 + E-Cottage Industries + Charlie Ferry + Hawkeye_CNY&hotmail.com +7334 + The San Joaquin Valley Science Collective + Sean Champ + sjvsc&netscape.net +7335 + eXperts-MD + Olivier Deckmyn + olivier.deckmyn&experts-md.com +7336 + 6WIND + Guillaume Gaudonville + support&6wind.com +7337 + Lipro AG + Joachim Adler + Joachim.Adler&lipro.de +7338 + Paramhamsa Tech + Rama Buddhineni + paramhamsa&hotmail.com +7339 + GeneralSearch.com + Jim Raney + jraney&generalsearch.net +7340 + Northwestern University + Phil Tracy + IDS-NUIT&northwestern.edu +7341 + University of Alabama at Birmingham + Landy Manderson + landy&uab.edu +7342 + Omnitron Systems Technology + Steve Mood + smood&omnitron-systems.com +7343 + BroadConnex Networks, Inc. + Zoneson Chen + zoneson&scci.com +7344 + Jedai Broadband Networks + Nanping Houl + nanping_houl&jedai.com +7345 + Learnsoft Corporation + Michael Kahn + mkahn&learnsoft.ca +7346 + Uni-C + Ole Nomann Thomsen + ont&uni-c.dk +7347 + Point Reyes Networks, Inc. + Alex Chen + chen&pointreyesnet.com +7348 + Torren Software Company + Dong Shuo + shuo&telekbird.com.cn +7349 + Hangzhou Torren Software Company + Dong Shuo + shuo&telekbird.com.cn +7350 + Belkamneft + Dmitry Melekhov + dm&belkam.com +7351 + TeleDanmark Development + Finn Barrett + fb&tdk.dk +7352 + Gilat Satellite Networks + Noam Green + noamgr&gilat.com +7353 + Datel Defence Limited + David Attwood + david.attwood&dateldef.co.uk +7354 + Vesper S.A. + Denis Martel + dmartel&vesper.com.br +7355 + Hass Asscoiates + Kishore Nayak + knayak&ha-i.com +7356 + Gateway to the Web + Bryan Porter + bporter>w.net +7357 + scroner.com + Tony Morlan + armorlan&rconnect.com +7358 + POV Corp. + Tiago Pierezan Camargo + darksky&brturbo.com +7359 + SteelEye Technology + Bob Williamson + bob.williamson&steeleye.com +7360 + Telecom Personal + Alfredo Edye + aedye&telecompersonal.com.ar +7361 + Certificado Digital S.A. + Daniel E. Cortes + dcortes&tlc-sa.com.ar +7362 + Department Land, Air and Water Resources,University of California,Davis + Quinn Hart + qjhart&ucdavis.edu +7363 + Prominence Networks Inc. + Sid Nag + sid&prominencenet.com +7364 + IMS Health + William M. Weist + wweist&us.imshealth.com +7365 + Easily Ltd + Dominic Ijichi + dom&easily.co.uk +7366 + MarketTouch + J Wadsworth + j.wadsworth&markettouch.com +7367 + DrayTek Corporation + Tom Chen + tchen&draytek.com.tw +7368 + FalconStor, Inc. + Fenfen Shueh + fenfen&falconstor.com.tw +7369 + Suncom Communications + Fung Lo + lf&mail.suncom.com.tw +7370 + Cbeyond Communications + Scott Lee + scott.lee&cbeyond.net +7371 + INSERM + Vicente Gérard + vicente&idf.inserm.fr +7372 + Cratos Networks, Inc + Joseph Benoit + jbenoit&cratosnetworks.com +7373 + Narad Networks Inc. + Bill Bleem + bleemb&naradnetworks.com +7374 + C&D Micro Solutions + Cyd Lantz + cydl&cydav.com +7375 + iSOFT Plc + Alan Singfield + alan.singfield&isoftplc.com +7376 + Magenta Sites + Valtteri Vuorikoski + vuori&magenta.net +7377 + Ubiquity Software Corporation + Michael Doyle + mdoyle&ubiquity.net +7378 + FireSummit + Jim Flom + jflom&firesummit.com +7379 + Soliton Technologies CO., + Ltd. + kccheng&soliton.com.tw +7380 + Shore Corporation + Iñaki Santamaria + isantamaria&gmail.com +7381 + OWL-Online GmbH & Co. KG + Arvid Requate + requate&Team.OWL-Online.DE +7382 + JustIT Sdn. Bhd. + Lars Boegild Thomsen + lth&cow.dk +7383 + LIBERTECH + Alain Abbas + aabbas&libertech.fr +7384 + CryptoTech + Dariusz Lewicki + dariusz&cryptotech.com.pl +7385 + Celox Networks Inc. + Jean Pierre Bordes + jpb&celoxnetworks.com +7386 + Cake Farm + Regan King + regan&cakefarm.com +7387 + Canon Information Systems Research Australia + Andrew Raphael + raphael&research.canon.com.au +7388 + Lune Networks + Nelson Junior + nelson&lunenetworks.com.br +7389 + VDSL Systems Oy + MR. Tomi Tirri + tomi.tirri&vdslsystems.com +7390 + Grisoft + Petr Prazak + prazak&grisoft.cz +7391 + Institut National des Telecommunications (INT) + +33(0)1 60 76 43 25 + Eric.Collery&int-evry.fr +7392 + Telispark + Brian Seborg + brian.seborg&telispark.com +7393 + Oasys Telecom, Inc. + Frank Hartley + fhartley&oasystel.com +7394 + CosmoCom, Inc. + Rich Turner + rturner&cosmocom.com +7395 + Beeline Networks, Inc. + Rodrigo Alonso + ralonso&beln.com +7396 + OLYMPO controls Ltd. + Richard Sobotka + richard.sobotka&olympo.cz +7397 + OnePage, inc + Mike Timmons + mtimmons&onepage.com +7398 + Corporation 124 + Mahesh Mehta + maheshm&corp124.com +7399 + Data Systems West + Michael Stoller + mstoller&dsw.net +7400 + EHI-INSM + Michael Stoller + mstoller&dsw.net +7401 + EkotaCentral + Paul Vaillant + paul.vaillant&ekotacentral.com +7402 + Vigor Soft Private Limited + Mr. Atul Garg + atul&vigorsoft.com +7403 + Biomed Translations Ltd. + David John Williams + &biomed-translations&t-online.de +7404 + D.J. Williams Editorial Ltd.David + John Williams + &biomed-translations&t-online.de +7405 + IPS d.o.o. + Dusan Banko + banko&ips.si +7406 + EFS : Etablisssement Francais du Sang + Corine HAMEAU + Corine.Hameau&efs.sante.fr +7407 + Observatoire de Besancon + Francoise Gazelle + fg&obs-besancon.fr +7408 + Kunskapstv i Sverige AB + David Looberger + dlooberger&kworld.se +7409 + Jan Niehusmann + Jan Niehusmann + reg111&gondor.com +7410 + Telekurs Financial + Helmut Horn + hoh&telekurs.com +7411 + Finix + Paul Tovey + Paul.Tovey&thb.scot.nhs.uk +7412 + Angstrom Microsystems + Lalit Jain + ljain&angstrommicro.com +7413 + Computer Service Depot + Simon Ditner + simon&csdepot.com +7414 + Muffett & Co + Mark Muffett + mark&muffett.net +7415 + Berlin-Brandenburgische Akademie der Wissenschaften + Tilo Lange + lange&bbaw.de +7416 + Software Advanced Technology Lab + Maarten Spoek + spoek&satl.com +7417 + Cornell University + James W. Howell + jwh2&cornell.edu +7418 + Procket Networks + Sam Chang + schang&procket.com +7419 + Dai hoc su pham ky thuat + nguyen van long + nvlong80&hotmail.com +7420 + Taylor Company + Doug Newsom + dnewsom&taylor-company.com +7421 + Milcom Systems Corporation + Steve Simpson + ssimpson&milcom-systems.com +7422 + Currenex + Justin Fitzhugh + jfitzhugh¤ex.com +7423 + MedPlus, Inc. + Deron E. Meranda + dmeranda&medplus.com +7424 + Infosec Technologies Co., Ltd. + Dong-Wook Kim + dwkim&infosec.co.kr +7425 + Company of the Far Countries, Ltd. + Erich Morisse + e.morisse&farcountries.com +7426 + NeST Information Technologies + M I Theodore + theodore&nestinfotech.com +7427 + Yokogawa Electric Corporation + Yokogawa Electric Corporation Development Infrastructure Dept. + snmp0608&cs.jp.yokogawa.com +7428 + PowerDsine + Avi Berger + avib&powerdsine.com +7429 + BroadWeb Corporation + Chao-Chin Chou + juju&broadweb.com.tw +7430 + mediascape communications AG + Stefan Mohr + sm&mediascape.de +7431 + Altiva Solutions + Frederic Bernoud + ldap.oid&altiva.fr +7432 + Excentis + Wim De Ketelaere + wim.deketelaere&excentis.com +7433 + SITTI spa + Marco Prosperi + prosperi&sitti.it +7434 + Planetasia Ltd + Satish Sukumar + satishs&planetasia.com +7435 + T.K COMPANY + Tomio Katsui + kat4k&ginga,net +7436 + Oxir Internet Solutions + Alex Kovshov + sandy&tb1.wh.ru +7437 + Siemens ICN Italy + Daniele Behar + iana.interface&icn.siemens.it +7438 + SAFE. Inc + Jean-Marc Pigeon + jmp&safe.ca +7439 + AppStream, Inc. + Soumendra Bhakta + dev_renew&appstream.com +7440 + Omnitech Computer Corp + Luke Suchocki + suchoclu&omnitechcorp.com +7441 + CipherTrust, Inc. + William Alan Ritch + bill.ritch&ciphertrust.com +7442 + Idaho National Engineering and Environmental Laboratory + David N. Tillitt + DXT&inel.gov +7443 + EVEREX COMMUNICATION + SUSIE CHU, Ph.D + susiec&everexcomm.net +7444 + Imagine Networks + Stephen M. Weiss + steve&imaginenetworks.com +7445 + Javelina Software + Dave Ritchie + gilk&netpro.com +7446 + Institute of Leadership and Management + Naseer Bhatti + goni&marmoset.net +7447 + Signal Technology INC + Sung Hoon Park + hoonie&signal-tec.com +7448 + Advanced Training & Consulting Services, Inc. + Brian Watrous + brianw&atcs.net +7449 + WHAM COMPUTER + OKTA RUMPAK + rumpak&mailcity.com +7450 + Framfab + Rolf Svensson + rolf.svensson&framfab.se +7451 + nSYS Technologies Co., Ltd + Chang-Woo Seo + leo&nsystech.com +7452 + Guangzhou Gaoke Communication Equipment Co., Ltd + Jiangiang Hu + 1fox&163.net +7453 + Turtlefoot International + Scott Dejmal + sdejmal&turtlefoot.com +7454 + OKsystem Ltd. + Jindrich Stepanek + stepanek&oksystem.cz +7455 + dev/consulting GmbH + Andreas Ferber + af&devcon.net +7456 + Virtual Net + S. Namèche + snameche&virtual-net.fr +7457 + ZAO EMAX + Andrey Ignatov + andrey_ignatov&e-max.ru +7458 + Clemens Benden + Clemens Benden + info&clemens-benden.de +7459 + memIQ + Till Schnupp + till.schnupp&memiq.com +7460 + XS4ALL Internet B.V. + Scott A. McIntyre + scott&xs4all.nl +7461 + Page One AG + Clemens Benden + clemens&page-one.de +7462 + Nordnorsk helsenett + Anders Baardsgaard + anders&nhn.no +7463 + white.cell,inc + Eran Kessler + eran.kessler&white-cell.com +7464 + ECMWF (European Centre for Medium-Range Weather Forecast) + Matteo Dell'Acqua + mdellacqua&ecmwf.int +7465 + WISI + Dirk Froese + Dirk.Froese&WISI.de +7466 + CLI GmbH + B.Eckstein + be&cli.de +7467 + IVU Traffic Technologies AG + B.Eckstein + eck&ivu.de +7468 + Mixbaal + Diego Barajas + dbarajas&mixbaal.com +7469 + VoiceGenie Technologies + Rob Marchand + rob&voicegenie.com +7470 + Princeton Financial Systems + John Thorpe + jthorpe&pfs.com +7471 + Discreet Communications Group + Joseph Pallante + jody&trustid.net +7472 + MShow.com + Joe Balque + jbalque&mshow.com +7473 + easyplanet Corp. + Administrator + administrator&easyplanet.com +7474 + Inabled Online + Barbara Newton + bnewton&inabled.com +7475 + Shanghai Bell Company Ltd. + Cao Liang + scecl&sbell.com.cn +7476 + Departamento General de Irrigacion + Juan Jose Ciarlante + jjo&mendoza.gov.ar +7477 + Unk SA - Argentina + Juan Jose Ciarlante + jjo&mendoza.gov.ar +7478 + NetCraft Sdn Bhd + Wei Yin, Teo + wyt&netcraft.com.my +7479 + Triad Development Group NA, LLC + Franz Bahadosingh + f.bahadosingh&triadna.com +7480 + @MySpend.Com + R.J. Bedijn + reneb&dse.nl +7481 + Talon + Diederik de Groot + ddegroot&talon.nl +7482 + Widax Corporation + David Husson + dhusson&widax-corp.com +7483 + Nokia (Formerly 'Alcatel-Lucent') + Jeff Donnelly + jeff.donnelly&nokia.com +7484 + Teleias Inc. + Mirko Karanovic + mkaranovic&teleias.com +7485 + SPS, Inc. + Hash Malik + hash.malik&spsnet.com +7486 + Embassy Systems, Ltda. + Paulo Gomide Cohn + pgcohn&embassy-systems.com.br +7487 + MEISolutions.com, Inc. + Steven Wilson + swilson&meisolutions.com +7488 + Infinitec Networks + Munzer Haque + mhaque&infinitecnetworks.com +7489 + e-HealthSign + Lori Reed-Fourquet + fourquet&ix.netcom.com +7490 + TotalWisdom Inc. + Greg McFall + mcfall&bellatlantic.net +7491 + gnumonks.org + Harald Welte + laforge&gnumonks.org +7492 + Kommunikationsnetz Franken e.V. + Harald Welte + tc&franken.de +7493 + ldap-abook + David Leigh + dleigh&sameasiteverwas.net +7494 + Rovor Technologies + T. Garlick + tgarlick&rovor.com +7495 + Advanced Web Solutions, Inc + John Dole + john&doorway.net +7496 + JB-Holding b.v. + Sebastian Vogels + b.vogels&jb-holding.com +7497 + Magnolia Broadband + Alex Segalovitz + alexsega&magnoliabroadband.co.il +7498 + Gorski.net + Darrin Gorski + Darrin&Gorski.net +7499 + Pacific Century CyberWorks - Hongkong Telecom + Nick Gazo + gazo.am.nick&cwhkt.com +7500 + IICS + Stefan Drees + sdrees&acm.org +7501 + HUBER+SUHNER BKtel GmbH (HFC) + Juergen Anhorn + juergen.anhorn&hubersuhner.com +7502 + African Medical Imaging + Ben Wright + ben&ami-online.com +7503 + Henkels and McCoy + Ernest Facciolini + efacciolini&henkelsandmccoy.com +7504 + AerSoft Ltd + Joe Murphy + joe&aersoft.com +7505 + Calista Ltd. + Andy Pepperell + andy.pepperell&calista.com +7506 + Scoreline Technologies + Wayne de Villiers + wayne&scoreline.com +7507 + WayneWare + Wayne de Villiers + wayne&scoreline.com +7508 + Fuji Electric Co.,Ltd. + Yasuhiro Tanaka + tanaka-yasu&fujielectric.co.jp +7509 + Storability, Inc. + Brian Tolland + Brian.Tolland&storability.com +7510 + Luxxon Corporation + Jon Taylor + jtaylor&luxxon.com +7511 + wdt + Se-Yong Cheon + lifo&dreamwiz.com +7512 + NetEnS Corporation + Takumi Kadode + netens_it&entens.co.jp +7513 + Elimpex - Medizintechnik GesmbH. + Peter Mueller + peter.mueller&elimpex.com +7514 + St Helens And Knowsley Hospitals NHS Trust + Tony Atherton + tonyatherton&yahoo.com +7515 + P. Lancier + Alphons Wernsmann + alphons.wernsmann&lancier.de +7516 + Andy Stubbs + Andy Stubbs + andy&andystubbs.com +7517 + Acxsys Corporation + George Gorsline + ggorsline&interac.org +7518 + Chicago Mercantile Exchange + Stephen Goldman + sgooldman&cme.com +7519 + RateIntegration, Inc. + Dallas Wrege + dwrege&rateintegration.com +7520 + Harris Corporation (formerly 'M/A-COM Private Radio Systems, Inc.') + Harris Corp SNMP + Harris-SNMP&harris.com +7521 + Logicalis Australia Pty Ltd (formerly 'NetStar Networks') + Garry Thomas + gthomas&au.logicalis.com +7522 + Brains Corp + Takehito Horiuchi + horiuchi&brains.co.jp +7523 + AccessNet + Chairil + chairil&teamcast.com +7524 + RapidSolution Software GmbH + Juraj Polakovic + juraj.polakovic&rapidsolution.de +7525 + eCal Corporation + Dudley Atkinson + datkinson&ecal.com +7526 + snom technology AG + Karsten Wernicke + wernicke&snom.de +7527 + Quest Software + Doug Hussey + dhussey&quest.com +7528 + Metabox Inc + Larry Mayer + lmayer&metaboxinc.com +7529 + Netarx, Inc. + Fred Eisele or Sandy Kronenberg + phreed&netarx.com +7530 + OptXCon Inc. + Charles Marsh + cmarsh&optxcon.com +7531 + RapidWAN + Colin Cox + colin.cox&rapidwan.com +7532 + Munat, Inc. + Charles F. Munat + chas&munat.com +7533 + HONDA ELECTRON CO.,LTD + HITOSHI SASAKI + hit&honda-elc.com +7534 + Universita' degli Studi di Genova + Marco Ferrante + apm&unige.it +7535 + CapCLEAR Limited + Anthony Sharp + anthony.sharp&capclear.com +7536 + H+H Zentrum f. Rechnerkommunikation GmbH + Jens Diedrich + diedrich&hh-zfrk.com +7537 + e2 Communications + John Keene + jkeene&e2communications.com +7538 + Veriprise Wireless Company + Yong Su + yong.su&veriprise.com +7539 + QUACK.COM + Garth K. Williams + gwilliams&quack.com +7540 + ISPsoft + Edwin Park + esp&ispsoft.com +7541 + iBuilding.com + John Donovan + jdonovan&ibuilding.com +7542 + Mount Allison University + Petre Crawshaw + pcrawshaw&mta.ca +7543 + VisionShop + Mark W. Smith + mark&visionshop.com +7544 + Wirelink Co., Ltd. + Seong Jae, Lee + sjlee&wirelink.co.kr +7545 + QoSBA Systems Co., Ltd. + Dong-Min Kim + danny&qosba.co.kr +7546 + AXXESSIT ASA + Jon Erik E. Jensen + jon.erik.jensen&axxessit.no +7547 + RedIRIS (formerly 'CSIC/RedIRIS') + Javier Masa + javier.masa&rediris.es +7548 + Netcentric Systems Ltd. + Keith Garrett + keith.garrett&netcentricsystems.net +7549 + DataCenterDirect.com, Inc. + Gary Coulbourne + garyc&datacenterdirect.com +7550 + Delta Data Software + Christian Plazas + cplazas&deltadatasoft.com +7551 + Enatel + Denis Galiana + galiana&enatel.com +7552 + Infra Valley + Hahyung, Cho + hhcho&orgio.net +7553 + Tampere University of Technology + Martti Jokipii + Martti.Jokipii&tut.fi +7554 + Zlooper Media Perkasa + Rustam Miharja + rustamiharja&yahoo.com +7555 + Institution Saint-Joseph + Thierry GUYOT + tguyot&instit-st-jo.asso.fr +7556 + Movilogic S.A. + Daniel Perez + danielp&movilogic.com +7557 + Echomine + Chris Chen + ckchris&echomine.com +7558 + National Air Traffic Services Ltd + Suki Lall + suki.lall&nats.co.uk +7559 + Egenera, Inc + Jeremy Decker + jdecker&egenera.com +7560 + Daisy Group + Ray Tremayne + ray.tremayne&daisygroup.com +7561 + Askoo SA + Olivier Dumas + odumas&askoo.com +7562 + ICPSR (Inter-university Consortium for Political & Social Research) + John Gray + johngray&umich.edu +7563 + OneSpot, Inc. + Christopher Hoover + hostmaster&onespot.com +7564 + Clickarrray Networks, Inc. + Shiow-Jen Jin + sjj&clickarray.com +7565 + USGS South Florida Information Access Project + R. B. Irvin + rbirvin&usgs.gov +7566 + CodeStream Technologies Corporation + Sherry Huang + huang&codestream.net +7567 + Portera Systems + Nick Cuccia + ncuccia&portera.com +7568 + Ircam + Julien Boissinot + Julien.Boissinot&ircam.fr +7569 + Convedia Corporation + Will Wong + willwong&convedia.com +7570 + Brience, Inc. + Steven Zhou + szhou&brience.com +7571 + SAF tehnika + Dainis Strupis + Dainis.Strupis&safequipment.com +7572 + Quest Software + Brady Orand + Brady_Orand&quest.com +7573 + NBC Internet + Steve Carlson + Steve.Carlson&nbci.com +7574 + Torque Systems, Inc + Steven Ryan + buffy&torque.com +7575 + Maharashtra Institute of Technology, Pune + Rushi Desai + rushi&mitp.ac.in +7576 + GEA IT Services GmbH + Hans Wetser + hwetser&grasso.nl +7577 + University Of Strathclyde + J R Binks + network-manager&strath.ac.uk +7578 + Energis Deutschland + J. Olaf Waitkus + owaitkus&energisd.de +7579 + University of Freiburg + Martin Walter + mawa&uni-freiburg.de +7580 + AnIX Group Limited + Derek Milliner + derek.milliner&anix.co.uk +7581 + Advent Networks, Inc. + Dan Carter + dcarter&adventnetworks.com +7582 + E-Z Data, Inc. + Raymond Jia + raymond&ez-data.com +7583 + The Eleven + Tom Legg + tlegg&the-eleven.com +7584 + Intertrust Technologies Corporation + Seacert Operations + operations&seacert.com +7585 + Genius Software Ltda + Jose Roberto Fernandes + jrfndes&geniussoftware.com.br +7586 + Parametric Technology Corporation + Kevin Jordan + kjordan&ptc.com +7587 + World Digitel Corporation + Sung-Won, Kwon + worldjk&hitel.net +7588 + WIS COMMUNICATION CO. LTD + ZhengFei + knifer&21cn.com +7589 + MemlinkEyal + Sadovsky + eyals&mem-link.com +7590 + bodachina + Precilla Luo + precilla&263.net +7591 + Divisa Informática y Telecomunicaciones S.A. + David Rodríguez Alfayate + drodriguez&divisait.com +7592 + Swedish Institute of Space Physics + Mats Luspa + mats.luspa&irf.se +7593 + University of Cambridge, Clinical and Biomedical Computing Unit + Graham Phillips + graham&cbcu.cam.ac.uk +7594 + Citadel LLC (formerly 'Citadel Investment Group') + Eric Lambert + hostmaster&citadel.com +7595 + RATIO Entwicklungen GmbH + Oliver Thulke + oth&ratio.de +7596 + Trellis Photonics + Yair Sa'ad + yair&Trellis-Photonics.com +7597 + NOVA LJUBLJANSKA BANKA + Rastko Skaljak, Tercelj Mladen + skaljakr&n-lb.si +7598 + KTSI + Robert Sherwood + rsherwood&ktsi.net +7599 + Rebel.com, Inc. + Roy Hooper + roy.hooper&rebel.com +7600 + eazel + Rick Willmore + rick&eazel.com +7601 + Trinity College + Richard Wraith + IT_Manager&trinity.unimelb.edu.au +7602 + Covers.de + Kai Roessel + info&covers.de +7603 + Kingfisher ICT-Services + W.H Mollenvanger + wilfredm&kingfisher.nl +7604 + MSE-iT + Thomas Maierhofer + TMaierhofer&mse-it.de +7605 + e-point S.A + Patryk Swiderski + psw&e-point.pl +7606 + The University of Alabama in Huntsville + James H. McCullars + mccullj&uah.edu +7607 + Appgate Cybersecurity, Inc. + Jamie Bodley-Scott + jamie.bodley-scott&appgate.com +7608 + Celion Networks, Inc. + Eric Nelson + eric.nelson&celion.com +7609 + SpinState + Gordon Jenkins + gordon&spinstate.com +7610 + Something 4 Ltd + Mr Corrie Griffiths + corrie.griffiths&something4.com +7611 + Federal Reserve System + Bob Nasti + bob.nasti&ids.frb.org +7612 + Digi-Data Corp + Ken Willis + kwillis&digidata.com +7613 + Panther Software, Inc. + Alex Krassel + alexkr&panthersoft.com +7614 + Isovia + Vinay Pulim + vinay.pulim&isovia.com +7615 + Universitat de Girona + Salvador Salanova Fortmann + salvador.salanova&pas.udg.es +7616 + STMICROELECTRONICS + Christian Bonato + christian.bonato&st.com +7617 + Stendahls.net AB + Henrik Segergren + henrik&stendahls.net +7618 + Adomo, Inc. + Peter Bruun + pbruun&adomo.com +7619 + Amon Technologies + Walter De Groof + degroofw&amontech.com +7620 + France Teaser + Laurent Wacrenier + lwa&teaser.fr +7621 + DISH Network L.L.C. (formerly 'EchoStar Corporation') + Dwayne Barstad + dwayne.barstad&dishnetwork.com +7622 + Mesa Systems International + Ronald E. Fortin + ref&mesasys.com +7623 + DXO Telecom Co., Ltd. + Jaehyoung, Shim + wrkshim&netian.com +7624 + akal systems + Surinder Singh + singh&eng.sun.com +7625 + ComCon GmbH + Markus Palme + markusp&comcon-gmbh.com +7626 + University of Osnabrueck + Rolf Nienhueser + Rolf.Nienhueser&Uni-Osnabrueck.DE +7627 + TeleBroad Networks + Maroof Mian + zafar&telebroadnetworks.com +7628 + Reed-Matthews, Inc. + Ed Reed + eer&oncalldba.com +7629 + 4unet + Atif Ghaffar + atif&developer.ch +7630 + Tioga Technologies + Machlev Nissim + nmachlev&tiogatech.com +7631 + INTERFACE CONCEPT + Mr. EUZEN + pheuzen&interfaceconcept.com +7632 + GNS + Jenny Kim + jenny&glonetsys.com +7633 + City University + David Vinograd + d.r.vinograd&city.ac.uk +7634 + Bungee Communications + Shlomo Yariv + shlomoy&bungee-com.com +7635 + THOMSON multimedia R&D France + Marc Picart + picartm&thmulti.com +7636 + Laponia Veiculos + Cristiano Jose Sulzbach + cris&laponia.com.br +7637 + JPM Consulting + Jan-Piet Mens + jp&mens.de +7638 + Carrier1 + Jay Tribick + jay.tribick&carrier1.net +7639 + Netverk S.A. + Gustavo Grela + gustavo.grela&netverk.com +7640 + Quantum Corp. + Carsten H Prigge + carsten.prigge&quantum.com +7641 + Astronomical Society of Edinburgh + Graham Rule + asewww&roe.ac.uk +7642 + SpiderTech Software Pvt. Ltd + Prashant Deshpande + sj2&hotmail.com +7643 + National Chung Cheng University + TING-CHAO HOU + tch&ee.ccu.edu.tw +7644 + EK Großeinkauf eG + Matthias Ohlemeyer + Matthias.Ohlemeyer&EK-Grosseinkauf.de +7645 + NetXL + Olivier Mary + olivier&netxl.fr +7646 + Exemus Ltd + Peter Rendle + hostmaster&exemus.com +7647 + Oregan Networks Ltd + Adrian Gartland + adrian.gartland&oregan.net +7648 + Nationwide Mutual Insurance Company + Cher West + westc1&nationwide.com +7649 + EverFile, Inc. + Joseph Chapman + joe&everfile.com +7650 + Leibniz-Rechenzentrum + Claus Wimmer + Claus.Wimmer&lrz-muenchen.de +7651 + UniSecurity + Alex Song + alex&unisecurity.com +7652 + DataCore Software Corporation + Robert Bassett + robert.bassett&datacoresoftware.com +7653 + Dashbit Incorporated + Steven Downs + ldapadmin&dashbit.com +7654 + SnoNet + Eric Aislinn + eric.aislinn&snonet.org +7655 + Sean Kelly, Independent Consultant + Sean Kelly + kelly&seankelly.biz +7656 + Bertrandt AG + Joern Lippold + joern.lippold&de.bertrandt.com +7657 + DISAT SISTEMAS TELECOMUNICACIÓN + ANTONIO JOSE ANTON + antonio.jose.anton&wanadoo.es +7658 + Webonomics GmbH + Daniel Chabrol + chabrol&webonomics.de +7659 + UESTC + Meng Zhou Hou + mzhhou&263.net +7660 + Okiok Data + Carl Benoit + cbenoit&okiok.com +7661 + HumanScan GmbH + Hagen Zurek + h.zurek&bioid.com +7662 + SOGETEK + Georges de Oliveira + admin&sogetek.fr +7663 + Taral Networks Inc. + Jennifer Kim + jkim&taralnetworks.com +7664 + mezzoNET Easy Technologie S.L. + Sven Slezak + ldap&mezzo.net +7665 + INTRIA-HP Corp + Bala Balabaskaran + bala.balabaskaran&intria.com +7666 + Provenco Group Ltd + Duncan Mackay + duncan.mackay&provenco.com +7667 + Banque Nationale du Canada + Laurent Frere + Laurent.Frere&bnc.ca +7668 + Praeter Technologies, Inc. + Scott Hebert + scott&praeter.com +7669 + Network Photonics, Inc. + Shudong Lu + slu&networkphotonics.com +7670 + Viator Incorporated + Chris Burnley + chris&viator.com +7671 + Invertix Corporation + Graham Stead + gstead&invertix.com +7672 + Fujitsu NNC Ltd./ PBN + Takeshi Matsunaga + matunaga&nnc.ts.fujitsu.co.jp +7673 + Fusion Communications Corp. + Kan Sasaki + sasaki&fusioncom.co.jp +7674 + Northern Principle Limited + Tomas Doran + tomas&principle.co.uk +7675 + SPAR Handels AG + Frank Meszaros + Frank_Meszaros&spar.de +7676 + Strawberry Software Limited + John Swan + john&strawberry-software.com +7677 + Lynx Photonic Networks Inc. + Guy Kronenthal + kguy&lynxpn.com +7678 + XYRIUS + Henri Mesquida + henri&xyrius.com +7679 + WiNag.com + Zahai Tassev + harry&oneoffice.at +7680 + Corrigent Systems Inc. + Muly Ilan + mulyi&corrigent.com +7681 + Trakonic + Michael Chapman + mike&trakonic.com +7682 + nCipher PLC + Kevin McKeogh + kevin&ncipher.com +7683 + Air Liquide Electronics + Bob Irwin + & bob.irwin&airliquide.com +7684 + NexTone Communications + Rakendu Devdhar + rdevdhar&nextone.com +7685 + KPMG Consulting, LLC + Russell Martinez + russellmartinez&kpmg.com +7686 + Pêches et Océans Canada - Garde côtière + Serge Deschamps + deschampss&dfo-mpo.gc.ca +7687 + UFSC/LabSEC + Ricardo Felipe Custodio + custodio&inf.ufsc.br +7688 + Social Change Online + Wayne Browne + wbrowne&socialchange.net.au +7689 + rasimusi + Raul + blanely&aol.com +7690 + ThruPoint, Inc. + Rich Pepe + rpepe&thrupoint.net +7691 + Warp Solutions, Inc. + Lenny Primak + lprimak&warpsolutions.com +7692 + The Stoddard Group, Inc. + Peter E. Stoddard + PEStoddard&aol.com +7693 + International Life Sciences Institute (ILSI) + Sean Bozarth + sbozarth&ilsi.org +7694 + Summit Technologies + Stephen Leung + SLEUNG&SUMMTECH.COM +7695 + Media Cloud, Inc. + Jim Baker + jim&mediacloud.com +7696 + Bach Systems, Inc. + R. Michael Judd + mike.judd&bachsystems.com +7697 + Athabasca University + Barry W. Kokotailo + merlin&athabascau.ca +7698 + Context Media, Inc. + Dave Costantino + daver&contextmedia.com +7699 + Syncwave Networks, Inc. + Ron Green + rgreen&austin.rr.com +7700 + Endeavors Technology, Inc. + Jim Lowrey + jim&magisoftware.com +7701 + GemStone Systems Inc. + Mike Culbertson + mculbert&gemstone.com +7702 + Keicho,Ltd. + Kazuhiro Yoshida + yoshi&azn.ne.jp +7703 + Ecrio Inc. + Bijal V. Mehta + bmehta&ecrio.com +7704 + Der Polizeipraesident in Berlin + Thomas Noack + zseiiic4&polizei.berlin.de +7705 + Centre For Wireless Communications + Joshua Liew + liewjoshua&cwc.nus.edu.sg +7706 + JonCol.com + Jon Collins + jon&joncol.com +7707 + Proactive Technology Ltd. + KW Pong + kwpong&proactive.com.hk +7708 + WydeBand + Oleg Pogorelik + olegp&wydeband.com +7709 + Aristotle University of Thessaloniki + Dimitris Zacharopoulos + jimmy&it.auth.gr +7710 + Agrilis (pty) Ltd + T. Kenedi + agrilis&iafrica.com +7711 + GNKC + Vlad Beliaev + vbel68&yahoo.com +7712 + SECARTIS AG + Wolfgang Christ + wolfgang.christ&secartis.com +7713 + Tomorrow Technologies Marcus + Rating + marcus.rating&tomorrow-tech.de +7714 + EVIDIAN + Marie-France Dubreuil + Marie-France.Dubreuil&evidian.com +7715 + EYE Communications AG + Martin Kobel + kobel&eye.ch +7716 + ART S.p.A. + Marco Ercoli + marco.ercoli&art-web.it +7717 + Flying J Inc. + Karl Bolingbroke + karl.bolingbroke&flyingj.com +7718 + INA + Chicca Roberto + r.chicca&gruppoina.it +7719 + Avilinks + Stephane Rolland + srolland&avilinks.com +7720 + W.I. Simonson Inc. + Ron Reed + gmman11&aol.com +7721 + caatoosee schweiz ag + Marcel Wiedemeier + marcel.wiedemeier&caatoosee.com +7722 + net-linx-ps-sii + Rajesh Ghag + ghag&sii.com +7723 + Benham.net + Darren Benham + darren&benham.net +7724 + Unassigned + ---none--- + ---none--- +7725 + Hark Tower Systems + James Armstrong + james&harksystems.com +7726 + Pointgain Corporation + Surendra Reddy + skreddy&pointgain.com +7727 + CIN-NIC + Patrick Verertbruggen + patrick.veretbruggen&socmut.be +7728 + 3R Inc. + Hyunseok Shin + hsshin&3r.co.kr +7729 + Telson + Manuel Martinez + mmh&telson-internet.com +7730 + Voxpath Networks + Sidney Antommarchi + sid&voxpath.com +7731 + HomeStore.com + Henry Gunst + henry&springstreet.com +7732 + philiplo + Philip Lo + phlo&home.com +7733 + Pulsar Technologies, Inc. + Joan M. Noto + joan.noto&pulsartech.com +7734 + ProQuent Systems Corporation + Todd Thompson + todd&proquent.com +7735 + Airbiquity Inc + Allen Hartley + ahartley&airbiquity.com +7736 + Jarna + Andrew Toy + atoy&jarna.com +7737 + Internet Photonics Inc. + Dhritiman Banerjee + banerjed&lucent.com +7738 + Encendra Communications + Surendra Reddy + skreddy&encendra.com +7739 + DigEncP + Fredrick Lee + flee&digencp.org +7740 + NetMonger Communications Christopher + Masto + chris+oid&netmonger.net +7741 + SNMP Technologies + Chris Avis + chris&snmptech.com +7742 + Aptira Pty Ltd (formerly 'Antithesys Pty Ltd') + Tristan Goode + pen&aptira.com +7743 + Salomon Smith Barney, Autralia + Michael O'Sullivan + osullim&ssmb.com.au +7744 + SPE Systemhaus GmbH + Ronald Winter + winter&spe-systemhaus.de +7745 + AETHRA + Claudio Panini + c.panini&aethra.it +7746 + Watershed Consultants Ltd + Anthony Price + anthony.price&talk21.com +7747 + Federation + Ran Locar + ranl&FederationWeb.com +7748 + GlobeCom Interactive + Ariel Brosh + ariel&globecom-interactive.com +7749 + HICOM GmbH + Sven Heuer + heuer&hicom.de +7750 + Helicon Networks + John Anderson + janderson&HeliconNetworks.com +7751 + mDiversity, Inc. + Melanie Datta + melanie.datta&mdiversity.com +7752 + Bravara Communications, Inc. + Steve Stolz + stolz&bravara.com +7753 + ZELPOS + Ales Jurik + ajurik&volny.cz +7754 + Infosquare Corp. + Giovanni Iamonte + iamonte&infosquare.com +7755 + The Walt Disney Company + Ian Funk + ian.funk&disney.com +7756 + Netarena Communications + Michael Lum + mlum&netarena.com +7757 + Tonbu Inc + Keith Heybourne + kheybourne&tonbu.com +7758 + E-BUSINESSNETWORK.COM + Surendra Reddy + skreddy&pointgain.com +7759 + EENADU.COM + Surendra Reddy + skreddy&pointgain.com +7760 + ANYSCAPE.COM + Surendra Reddy + skreddy&pointgain.com +7761 + POINTGO.COM + Surendra Reddy + skreddy&pointgain.com +7762 + EBIZXML.COM + Surendra Reddy + skreddy&pointgain.com +7763 + POINTGAIN.NET + Surendra Reddy + skreddy&pointgain.com +7764 + EBUSINESSXCHANGE.COM + Surendra Reddy + skreddy&pointgain.com +7765 + SYSDYNAMICS.COM + Surendra Reddy + skreddy&pointgain.com +7766 + BUSINESSACROSS.COM + Surendra Reddy + skreddy&pointgain.com +7767 + EBUSINESSOPENWORLD.COM + Surendra Reddy + skreddy&pointgain.com +7768 + E-BUSINESSOPENWORLD.COM + Surendra Reddy + skreddy&pointgain.com +7769 + E-BUSINESSOPENWORLD.NET + Surendra Reddy + skreddy&pointgain.com +7770 + EBUSINESSOPENWORLD.NET + Surendra Reddy + skreddy&pointgain.com +7771 + EBUSINESSACROSS.COM + Surendra Reddy + skreddy&pointgain.com +7772 + EBIZDYNAMICS.COM + Surendra Reddy + skreddy&pointgain.com +7773 + E-BUSINESSDYNAMICS.COM + Surendra Reddy + skreddy&pointgain.com +7774 + EBUSINESSX.COM + Surendra Reddy + skreddy&pointgain.com +7775 + E-DYNAMICS.COM + Surendra Reddy + skreddy&pointgain.com +7776 + E-DYNAMICS.NET + Surendra Reddy + skreddy&pointgain.com +7777 + E-DYNAMICS.ORG + Surendra Reddy + skreddy&pointgain.com +7778 + Excess Trade Zone, Inc. + Jerry Wen + jwen&partminer.com +7779 + InfoBlox Inc. + Stuart Bailey + sbailey&infoblox.com +7780 + Mirametric + Ritchie Young + ritchie.young&mirametric.com +7781 + Conservatoire National des Arts et Metiers + Jean-Paul Monteil + monteil&cnam.fr +7782 + Greek School Network + Limperis Antonis + limperis&cti.gr +7783 + Riello S.p.A. + Ict Structures + ict.structures&riellogroup.com +7784 + Lattis Enterprise Management Ltd + Jason Turner + jason.turner&lattis.co.uk +7785 + Rivne Communication Technologies Ltd. + Liubomir Ferents + postmaster&rct.net.ua +7786 + IBCnet Ltd. + Sandor Dobos + itgroup&ibcnet.hu +7787 + frontsite AG + Alexander Behr + alexander.behr&frontsite.de +7788 + Liebregts + Janus Liebregts + janus&liebregts.nl +7789 + LPM Online + Brad Welker + noc&lpmonline.net +7790 + Lombardia Informatica S.p.A. (formerly 'LISIT - Lombardia Integrata Servizi Infotelematici per ilTerritorio') + Luigi Bongiorni + luigi.bongiorni&lispa.it +7791 + Conseil General de la Loire + Jean-Marc Faure + jean-marc.faure&cg42.fr +7792 + incshopkorea + Jackson Kang + ohmylove&mail.co.kr +7793 + HMS + Walter Schinnerl + wschinn&iicm.edu +7794 + Infinity Tel-Data Inc + Weston Bustraan + weston&itdonline.net +7795 + Custom Software Solutions + Marc Rivers + mrivers&speedfactory.net +7796 + BowdenGS Technoloiges, LLC + Jeffrey "Jesse" James Bowden + bowdenclan&earthlink.net +7797 + Aculeus, Inc. + Jay Bolster + jay_bolster&msn.com +7798 + Toko Electric Co., Ltd. + Motonori Kawasaki + kawasaki&rd.tokodenki.co.jp +7799 + The PacificRoot + Bradley D. Thornton + tallship&tallship.net +7800 + Premier Network Co., Ltd + Kwanho Park + khpark&premiernet.co.kr +7801 + Radionet Oy + Marko Nieminen + mage&radionet.fi +7802 + 110 Limited + Samir Patel + samir.patel&one-ten.com +7803 + Terra Networks Brasil S/A + Werner Michels + werner.michels&corp.terra.com.br +7804 + elcan Corp. + Werner Oppenheimer + oppen&erols.com +7805 + NetNation Communications Inc. + Lynden Lindahl + noc&netnation.com +7806 + Techsar + Dilip Nayak + dnayak&techsar.com +7807 + Stoneware, Inc. + Tony Thompson + Tony.Thompson&stone-ware.com +7808 + Rearden Steel Technologies, Inc. + Scott Foster + hostmaster&reardensteel.com +7809 + Phil Pearl (formerly 'Finch Computer Services') + Phil Pearl + phil&pearl-family.com +7810 + Seek Systems + Steve Kahle + skahle&seeksystems.com +7811 + 40 Street Records + Amen Moja Ra + mojaray2k&yahoo.com +7812 + Pfadi Limmattal + Markus Grieder + mgrieder&gmx.ch +7813 + Cogetec HSI + Gerard Dufaux + g.dufaux&cogetec.com +7814 + Infinity Tel-Data Inc. + Weston Bustraan + weston&itdonline.net +7815 + Gilmore House Associates Ltd + David Mullins + david&gilmorehouse.com +7816 + Wissenschaftladen Dortmund e.V. + Sascha Gresk + ldap&free.de +7817 + Chiphead Consulting, Inc. + Wayne A. Hogue II + chiphead&chiphead.net& +7818 + Rohit Communications Pte. Ltd. + Santhosh Kumar Pilakkat + santhoshkp&ieee.org +7819 + Opensource Consult + Sascha Gresk + sascha.gresk&opensource-consult.de +7820 + Configate Inc. + Irene Mikhlin + irene&configate.com +7821 + University of Silesia + Maciej Uhlig + muhlig&us.edu.pl +7822 + Athena Security + Wayne Pierce + Pierce&AthenaSecurity.Com +7823 + Clickaction + Tony Hayes + antonioh&clickaction.com +7824 + LG Electronics Inc. + Sunghee Lee + kamangee&wm.lge.co.kr +7825 + Cognizant Technology Solutions + Mohammad Fayazur Rahaman + mfayazur&chn.cts-corp.com +7826 + KPNQwest Austria GmbH. + Alexander Mayrhofer + noc&KPNQwest.at +7827 + Sofadex-Puratos + Mr DENIS + medenis&iam.net.ma +7828 + Commsbloke.com + Nick Baldwin + nick&commsbloke.com +7829 + Axon Digital Design B.V. + Christiaan Simons + rdsw&axon.tv +7830 + Service Info-Techno Inc. + Marc Laflamm + craml&hotmail.com +7831 + Dataflex Design Communications Limited + Stuart Peskett + stuart.peskett&dataflex.co.uk +7832 + ICSA Labs + Darren Hartman + dhartman&icsalabs.com +7833 + daveb.net + Dave Bailey + dave&daveb.net +7834 + Wissenschaftsladen Dortmund e.V. + Frank Nord + ldap&free.de +7835 + Hope Tranportation + eisa osman + issa_osman&hotmail.com +7836 + Nuark Co., Ltd. + Jang SangBae + sbjang&nuark.co.kr +7837 + Sumtel Communications, Inc. + Henry Lin + hsinchuwalker&sinamail.com +7838 + UTOMI AG + Jürgen Ringler + jringler&utomi.de +7839 + Telia UK Limited + Richard Moon + rmoon&telia.co.uk +7840 + Zelax + Sergey Suhman + suhman&zelax.ru +7841 + Skynet Ltd. + Ilya V Kotusev + il&sky.ru +7842 + Université Paris 12 - Val de Marne + Patrick Rousselot + Patrick.Rousselot&univ-paris12.fr +7843 + Callera Ltd + Jonathan Webb + jonathan.webb&callera.net +7844 + KGB Entertainment + Jurg van Vliet + jurg&kgbe.net +7845 + Cipher + Boyko Sergey + sales&cipher.kiev.ua +7846 + ETRALI + Stephane Payaud + edelcamp&sinfor.fr +7847 + The Evangelical Lutheran Good Samaritan Society + Kevin Lehan + klehan&good-sam.com +7848 + Staatsbetrieb Saechsische Informatik Dienste + Marcus Knappe + svn&sid.sachsen.de +7849 + CommeceOne Auction Services + Anil Madan + anil.madan&commerceone.com +7850 + Penford Corporation + Dennis Glatting + dennis.glatting&software-munitions.com +7851 + Software Munitions + Dennis Glatting + oid&pki2.com +7852 + CyberElves + Simon Lockington + simon.lockington&cyberelves.com +7853 + Teknoids + Elmer Masters + elmer&teknoids.net +7854 + nazit + Hwan Yun + fhlove&hanmail.net +7855 + ISI-CNR + Mario Cannataro + cannataro&si.deis.unical.it +7856 + REALTRONIC + Laurent TEXIER + ltexier&realtronic.fr +7857 + XenoSite + G.J. Moed + gjmoed&xenosite.net +7858 + Outcome Technology Associates, Inc. + Sean McLinden, MD + mclinden&informed.net +7859 + NetMount + Alex Oberlander + alexo&netmount.com +7860 + Datatone Ltd + Dave Humphreys + dave&datatone.co.uk +7861 + U4EA Technologies Ltd. + Richard Williamson + richard.williamson&u4eatech.com +7862 + Herrns PC + Alf Lovbo + alf&herrn.com +7863 + FreeW3, Inc. + Robert J. Hammond + bob&cmitech.com +7864 + Megadot Pty Ltd + Mitko Stoyanov + mstoyan&megadot.com.au +7865 + Litnet NOC + Marius Urkis + marius&litnet.lt +7866 + Corinthian Engineering Pty Ltd + Richard Perini + rpp&ci.com.au +7867 + Mailvision + Diego Besprosvan + maria&mailvision.com +7868 + Universite de Sherbrooke + Christian Houde + christian.houde&courrier.usherb.ca +7869 + Byzantium Solutions Ltd + Mr J.A.Woodforth + jon.woodforth&byzantium.com +7870 + Polska Platforma Internetowa + Maciek + maciek&profnet.pl +7871 + NICsys GbR + Volker Holfeld + volker.holfeld&nicsys.de +7872 + Arrista Technologies + Wayne Schellekens + wschellekens&arrista.com +7873 + Info-Onscreen Software Systems Pty. Ltd. + Anthony Turco + anthony.turco&info-onscreen.com.au +7874 + Highland Injection Molding Inc. + Jon Stokkeland + stoker&highland-plastics.com +7875 + Shibaura Institute of Technology , Science Information Center + Tsutomu Hoshino + hoshino&sic.shibaura-it.ac.jp +7876 + Statens vegvesen + Svein Olav Bjerkeset + hostmaster&vegvesen.no +7877 + Basefarm AS + Rein Tollevik + rein&basefarm.no +7878 + The University of the South Pacific + Simon Greaves + Simon.Greaves&usp.ac.fj +7879 + T-TeleSec + Detlef Dienst + detlef.dienst&telekom.de +7880 + Peco II Inc. + Tom Dannemiller + tdannemiller&peco2.com +7881 + Cognitas Technologies, Inc. + Santiago Marra + santiago.marra&cognitas.com +7882 + City of Prescott IT Department + Chris Noe + noec&pr.erau.edu +7883 + amirk.com + Amir Khosrowshahi + amirk&amirk.com +7884 + Francis W. Parker School + Arek Dreyer + arek&arekdreyer.com +7885 + Arek Dreyer + Arek Dreyer + pen&arekdreyer.com +7886 + K Application + Bruce Kilgore + bruce&kapplication.com +7887 + nCUBE, Corp. + Dave Spurbeck + dspurbeck&.ncube.com +7888 + Iamba Technologies Ltd. + Yuval Ben-Haim + Yuval_Ben-Haim&iamba.com +7889 + Lingnan University + W. M. Cheung + waiman&ln.edu.hk +7890 + Arrow Electronics, Inc. + Philip Lembo + plembo&arrow.com +7891 + Confederation of Norwegian Business and Industry + Lars Uppheim + lars.uppheim&nho.no +7892 + Lannert.de + Detlef Lannert + dl&lannert.de +7893 + Yacom Internet Factory S.A + Lorenzo Planas + lplanas&ya.com +7894 + VanCo.net s.r.o. + Pavel Uhliar + uhliar&vanco.cz +7895 + Amethon Solutions + Paul Nicholls + paul1&amethon.com +7896 + Rietumu Banka + Sergej Bondar + bsb&rietumu.lv +7897 + Knowledge Base BV + Mark van der Snel + jaac&worldonline.nl +7898 + Openet Telecom Ltd. + Niall Couse + snmp&openet-telecom.com +7899 + Cap Gemini, portugal + Joao Bernardo + joao.bernardo&capgemini.pt +7900 + Neuberger & Hughes GmbH + Florian Klein + fk&n-h.com +7901 + Capiro + Doan Veitia + doan&capiro.vcl.sld.cu +7902 + alberta wellnet + Luda Pogrebinsky + luda.pogrebinsky&albertawellnet.org +7903 + EcoProg Ltd. + Smirnova Tatiana V. + smirnova&ecoprog.ru +7904 + Greenwich Capital Markets + Michael Zanga + zangam&gcm.com +7905 + National Training Institute, Inc. + Derrick Hamner + derrickh&ntiusa.com +7906 + Brainworkers Srl + Luca de Marinis + loop23&inwind.it +7907 + Alliance Race Car Builders + Kevin T. Anderson + &bloomquis&45.com +7908 + TelSoft Solutions, Inc + Omer Azmon + oazmon&telsoft-solutions.com +7909 + American Communication Technologies International, Inc. + Clayton E. Cramer + CCramer&acticorp.com +7910 + CSTS, Inc. + Eric Einem + eric&csts.com +7911 + Dreamintek Co.,Ltd + Kyungjae Han + econo&dreamintek.com +7912 + Staffware PLC + Chris Arnold + carnold&staffware.com +7913 + Space Hellas S.A. + Theodore J. Soldatos + noc&space.gr +7914 + qmail-ldap project + Andre Oppermann + opi&nrg4u.com +7915 + Cetevo AB + Pertti Palokangas + pertti&cetevo.com +7916 + PAGE + JAVIER ORDEN + jorden&pagetelecom.com +7917 + Area de Ciencias de la Computación e Inteligencia Artificial + David Cabrero Souto + cabrero&uvigo.es +7918 + Electrosonic Ltd + Michael Scuffham + mscuffham&electrosonic-uk.com +7919 + Spyn Inc. + Alex Bewley + alex&spyn.com +7920 + National Education Association of the United States + Peter Cotterill + pcotterill&nea.org +7921 + AethyrNet Soluitions + John Hoke + john&hoke.org +7922 + Spiro Kommunikation + Fredrik Hultkrantz + fjutt&blink.se +7923 + OrgaNet GmbH + Stefan Lang + operator&organet.com +7924 + Bundesamt fuer Sicherheit in der Informationstechnik + Michael Thiel + Michael.Thiel&bsi.bund.de +7925 + SPECS GmbH Surface Analysis and Computer Technology + Bernhard Otto + otto&specs.de +7926 + University of Antwerp + Wim Holemans + wim.holemans&ua.ac.be +7927 + Splendid Internet GmbH + Jens Kintrup + jkintrup&splendid.de +7928 + Wysdom Inc. + Steven Chen + schen&wysdom.com +7929 + HBOS plc + Paul Mayes + PaulMayes&hbosplc.com +7930 + Telecom Argentina + Claudio Lapidus + clapidus&ta.telecom.com.ar +7931 + SMCC + S. Maloney / M + DBA&Tampabay.rr.com +7932 + AmeriNet, Incorporated + H. Hammond + DBA&Debit-It.com +7933 + Promise Technology Inc. + Ram Natarajan + ramn&promise.com +7934 + Shanghai JiaoTong University + Shanghai JiaoTong University + zhenyuw&263.net +7935 + IVT + Michael Roberts + miroberts&ivtweb.com +7936 + YANGJAE MICRO + YoungSam-Yu + young&yjmicro.co.kr +7937 + inPACT technologies + Peter Mee + peter&inpact-tech.co.uk +7938 + IEE + Charles Lopes + Charles.Lopes&iee.lu +7939 + Liscon GmbH + Gerhard Schaden + ges&liscon.com +7940 + Celeritas + Kenny Krupnik + kenny&celeritas.co.il +7941 + Tecnica 6000 S.L. + Luis Moral + t6000&t6000.es +7942 + Xtempus + Steve Woodcock + steve.woodcock&xtempus.com +7943 + Home Wireless Networks, Inc. + Dan Wilding + dwilding&homewireless.com +7944 + PennyLan + Jean-Christophe DROUET + jc.drouet&pennylan.com +7945 + UMB Bank, n.a. + Dan Darrow + daniel.darrow&umb.com +7946 + FlashPoint Technology, Inc. + Jan Pope + pope&flashpoint.com +7947 + MSP Technolgies + Zaki Alam + zaki&mspt.com +7948 + DASIR ENTERPRISES + GEORGE STEINBERG + georgepq&email.com +7949 + Broadcloud Communications, Inc. + Jim Chou + jchou&broadcloud.com +7950 + Overture Networks + Prayson Pate + prayson.pate&overturenetworks.com +7951 + CNCTC + Liu GuoBiao + wwwlgb&163.net +7952 + Too Corporation + Kenji Kusaka + kusaka&too.co.jp +7953 + Majitek + Andrew Birch + administrator&majitek.com +7954 + Siemens Business Services AS + Kaare Smith + kaare.smith&sbs.siemens.no +7955 + Alfaskop AB + Andreas Stenmark + andreas.stenmark&alfaskop.se +7956 + Initio IT-løsninger AS + Øyvind Møll + oyvindmo&initio.no +7957 + Lares Technology + Gregory Holt + greg&lares.com +7958 + Efficient Channel Coding, Inc. + Jim Kapcio + jkapcio&eccincorp.com +7959 + Gabriel Roman + Gabriel Roman + gabrielroman2000&yahoo.com +7960 + FreeRein + John Chu + JohnChu&FreeRein.com +7961 + Harry L. Styron, Attorney at Law + Harry L. Styron + hlstyron&wcrklaw.com +7962 + Not Another Corporation, Inc. + Norman J. Harman Jr. + njharman&knoggin.com +7963 + Tebie + Qingfeng Pan + checksum&tebie.com +7964 + Master Comunicacao e Marketing Ltda. Cass + Surek + cass&master.com.br +7965 + Ingenieurbuero Tartsch + Michael Tartsch + oid-reg-iana&michael-tartsch.de +7966 + SPY internetworking + Dustin Sallings + dustin+iana&spy.net +7967 + serveroperator.com (formerly 'Tekbabble') + Adam Burford + iana&serveroperator.com +7968 + Spearhead Technologies Ltd. + Boaz Greenberg + boaz&spearhead.net +7969 + BeComm + Brady Montz + bradym&becomm.com +7970 + Athelon Corporation + Aris Athens + aris.iana&athelon.com +7971 + Tølveguten Lars Bahner + Lars Bahner + lars&bahner.com +7972 + XingTang Communication Technology Co.,Ltd. + Qiuyun Li + liqiuyun&sina.com +7973 + TELEFONICA DE ESPAÑA + Juan Carlos Galiana Zaragoza + juancarlos.galianazaragoza&telefonica.es +7974 + TURBOMECA + Jean-Paul Benninger + administrateur.messagerie&turbomeca.fr +7975 + Ark e-Management Ltd. + Martin Stephen Pegler + mpegler&ark-online.net +7976 + Brotsman & Dreger, Inc. + Edward Dreger + eddy_iana_spam&brotsman.com +7977 + VidSoft GmbH + Sascha Kuemmel + kuemmel&vidsoft.de +7978 + France Telecom Hosting + Jean-Paul Poiret + JeanPaul.Poiret&fth.net +7979 + Storigen Systems + Paul John Biciunas + paul.biciunas&storigen.com +7980 + Sistemas y Aplicaciones de Telecomunicaciones S.A.Jose Luis + Jose Luis Marques Bosch + jlmarques&satelsa.com +7981 + Interbind + Dan Rosner + drosner&interbind.com +7982 + The Simon Shapiro Foundation + Simon Shapiro + shimon&simon-shapiro.com +7983 + HPEB (Health technology Planning & Evaluation Board) + Ae Kyung Kwon + help&hpeb.re.kr +7984 + Ticket Master/CitySearch Corp. + Eric Rickerson + eric&citysearch.com +7985 + University of Catania + Dr. Enrico Commis + Enrico.Commis&unict.it +7986 + ip.access + Neil Piercy + Neil.Piercy&ipaccess.com +7987 + Universite de Pau et des Pays de l'Adour + Michel Beheregaray + michel.beheregaray&univ-pau.fr +7988 + Université Nancy 2 + Vincent Mathieu + reseau&univ-nancy2.fr +7989 + IMAG + Pierre Laforgue + Pierre.Laforgue&imag.fr +7990 + EOI Technologies, Inc. + Sean Patrick Daly + dalys&eoitech.com +7991 + eLaw.com Inc + Jeffrey T. Jones + jjones&elaw.com +7992 + PRFD + Riccardo Rosso + prfd&libero.it +7993 + Factorit S.p.A + Maurizio Biletta + maurizio.biletta&factorit.it +7994 + Continuous Computing Corp. + Robert Cagle + rcagle&ccpu.com +7995 + Snell & Wilcox Ltd + Sandy Kellagher + snmp.support&snellwilcox.com +7996 + Electric Power Research Institute(EPRI) + James T. L. Kou + Jameskou&yahoo.com +7997 + Ball Corporation + Curt Hastings + chastings&ball.com +7998 + MailFriday + Mona He + mhe&inxight.com +7999 + TP Internet + Pawel Terczynski + pawel.terczynski&tpi.pl +8000 + Ipace Inc. + Alex Lee + alex&ipace.com +8001 + Qitek + Paul Yang + paul&qitek.com.tw +8002 + Metamerge AS + Bjørn Stadheim + bjorn&metamerge.com +8003 + Swisscom IP-Plus + Daniel Matuschek + daniel&ip-plus.net +8004 + MID GmbH + Michael Buchau + m.buchau&mid.de +8005 + Physics Department, Queen Mary, University of London + Alex Martin + a.j.martin&qmw.ac.uk +8006 + EINSTEINet GmbH + Dierk Lucyga + dierk.lucyga&einsteinet.de +8007 + threllis GmbH + Tobias Hintze + KYoznqfGDZv5I&threllis.de +8008 + Pactolus Communication Software, Inc. + Jeff Gibson + jgibson&pactolus.com +8009 + iSecuritas + Mark West + markw&iSecuritas.com +8010 + ExaNet + Anat Eyal + anat&exanet-storage.com +8011 + McKee Foods Corporation + Charles Leeds + charles_leeds&mckee.com +8012 + Hyperdrive Sistemas Ltda + Mateus Cordeiro Inssa + mateus&hyperdrive.com.br +8013 + Nextlevel.com Internet Productions Inc. + System Administrator + admin&nextlevel.com +8014 + Stonefly, Inc + Jeff Byers + jbyers&stonefly.com +8015 + Airocon + George Shung + gshung&ultranet.com +8016 + Ravensbourne College of Design & Communication + Miles Metcalfe + m.metcalfe&rave.ac.uk +8017 + Primus Canada + Joe Hohertz + jhohertz&primus.ca +8018 + Hogeschool van Arnhem en Nijmegen + Jeroen Langestraat + J.Langestraat&han.nl +8019 + The Stellar Ice Company + Sean C. McLaughlin + seanm&intersurf.com +8020 + Ecole Normale Supérieure de Lyon + Jean-Louis Moisy + Jean-Louis.Moisy&ens-lyon.fr +8021 + G. A. Enterprises + Jim Bradbury + jim&bradbury.org +8022 + Senets Broadband, Inc. + Kevin McCarthy + kmccarthy&senets.com +8023 + InfoCruiser Inc. + Chris Bringard + chris&infocruiser.com +8024 + QuoVadis Limited + Walter Cooke + wcooke&quovadis.bm +8025 + Friendlyworks + Doug Lee + dlee&friendlyworks.com +8026 + Rockbrook Systems Group + Doug Lee + dlee&rockbrook.com +8027 + binHOST.com, Inc. + Justin B. Newman + justin.newman&binhost.com +8028 + cast + Ma Shengcun + msc72&263.net +8029 + DCTC + feifei + ffei928&mail1.sjtu.edu.cn +8030 + Prakash Cotton Mills Ltd. + Rohit Jalan + rohitj&prakashcotton.com +8031 + University Library of Kassel, Germany + Michael Plate + plate&hrz.uni-kassel.de +8032 + Agenda d.o.o. + Matjaz Godec + gody&agenda.si +8033 + Almende + Kees Klop + kees&almende.com +8034 + Rohill Technologies B.V. + Bert Bouwers + e.bouwers&rohill.nl +8035 + RY Enterprises + ryasuda&yahoo.com + ryasuda&yahoo.com +8036 + USA.NET + Jens Moller + Jens.Moller&corp.usa.net +8037 + COLOMAR Group, Inc + Scott Guthrie + scott&colomar.com +8038 + HomeBase Work Solutions + Joy Smathers + jsmathers&infocast-corp.com +8039 + silly-ants + Wang Qiang + wq_john&email.com +8040 + Thunder Systems, Inc. + Ray Leblond + rayl&thundersystems.com +8041 + Hostway Corporation + Jim Cusick + jim&hostway.com +8042 + eCELL Technologies, Inc. + Michael S. Cohen + mscohen&bellatlantic.net +8043 + ProActiveMonitoring, Inc. + Billy Ray Wilson + brwilson&mindspring.com +8044 + M.J. DuChene & Associates + Mike DuChene + mduchene&mjduchene.net +8045 + Glen Lewis + Glen Lewis + glen&glenlewis.com +8046 + Utah Java User Group + Glen Lewis + glen&utahjava.org +8047 + OSEDU + Glen Lewis + glen&osedu.org +8048 + Avatar Internet Services + Chris Darrow + cdarrow&avatar-isp.com +8049 + NetAccess, Inc. + Ryan Tucker + rtucker&netacc.net +8050 + RISS-Telecom + Dmitry Frolov + frolov&riss-telecom.ru +8051 + Michael McCafferty + Michael McCafferty + mikemcc&silver.he.net +8052 + TesserNet Systems Inc. + Ray Fong + ray&tessernet.com +8053 + Islington ave. + Islington ave. + isln-ave&catnet.ne.jp +8054 + The Scan Shop Pty. Ltd. + Daniel Morriss + dmorriss&projectx.com.au +8055 + Optilink Technologies Company Limited + Liu Ying + liuying_vienal&yahoo.com +8056 + SHENYANG NEU-ALPINE SOFTWARE CO.,LTD. + Shi Lei + shil&neu-alpine.com +8057 + CESNET + CESNET masters team + masters&cesnet.cz +8058 + MessageVine + Yair Halevi + spock&messagevine.com +8059 + Net To Net Technologies + Ronald Fotino + rfotino&nettonettech.com +8060 + Armstrong Atlantic State University + Thomas Eason + iana-contact&armstrong.edu +8061 + Multipoint Communications Ltd + John Kennett + john.kennett&multipoint.co.uk +8062 + ColorMax Interactive + Derek O'Dell + director&colormaxinteractive.com +8063 + M@innet Communication Ltd. + Rami Refaeli + rami&mainnet.co.il +8064 + Global Technology Solutions + Walt Greenberg + wgreenberg>sinc.net +8065 + Uni-Ter Underwriting Management Corporation + John Robinson + jrobinson&uni-ter.com +8066 + WatchMark Corporation + Sharon Borough + sharon.borough&watchmark.com +8067 + MCI + Jim Potter + jim.potter&mci.com +8068 + Buffalo Rock Company + David Mackay + dmackay&buffalorock.com +8069 + Rozz Enterprises + Hostmaster&Rozz.com + Hostmaster&Rozz.com +8070 + ZeroHype Technologies, Inc. + Don Papp + donp&zerohype.com +8071 + Sandstorm Enterprises, Inc. + Enterprise MIB Administrator + enterprise-mib-admin&sandstorm.net +8072 + net-snmp + Wes Hardaker + hardaker&users.sourceforge.net +8073 + Lumos Technologies Inc + Arun Nair + arun&lumos.com +8074 + Box Solutions Corporation + Erik Reid + erik&box-sol.com +8075 + Lateral Concepts and Designs + Darren Hitchman + dhitchman&lcd.com.au +8076 + Siemens PSE Austria + Markus Öllinger + markus.oellinger&siemens.at +8077 + ChamberSign France + Pierre-Yves Nicholas + pierre-yves.nicolas&chambersign.tm.fr +8078 + INU Consultancy + Karl Lovink + karl&inu.nl +8079 + ensoport.com + Kevin Hildebrand + kevin&hq.ensoport.com +8080 + Fastweb S.p.A. + Antonio Carlini + antonio.carlini&fastweb.it +8081 + Magnum Solutions Ltd + Dave Cross + dave&mag-sol.com +8082 + SEPATON, Inc. + Miklos Sandorfi + msandorfi&sepaton.com +8083 + Packet Design + Kim Dimick + dimick&packetdesign.com +8084 + tangali.eu.org + A.J. Tangali + ajtangali&yahoo.com +8085 + IronStream Inc + Paul Koning + pkoning&ironstream.com +8086 + NUASIS Corporation + Dan Rich + dan.rich&nuasis.com +8087 + TierNext + Glen Lewis + glen&tiernext©com +8088 + DCSI + Jim Rowan + jmr&computing.com +8089 + Covad Communications + Geoffrey Parsons + geoffrey.parsons&bluestar.net +8090 + Enterprise Computing Services + Daniel Watkins + daniel&ecs-trade.com +8091 + IP Soft, Inc. + Bill Russell + Bill.Russell&IP-Soft.NET +8092 + Elgo Maribor d.o.o. + Matjaz Godec + matjaz.godec&elgo.si +8093 + Aliroo Ltd. + Ram Cohen + ram&aliroo.com +8094 + DreamArts Corp. + Kensuke Ishida + kensuke&dreamarts.co.jp +8095 + INELCOM Ingeniería Electrónica Comercial S.A + Juan José García + jjgarcia&inelcom.com +8096 + The Voice Technology Forum + Sanjyot Bharathan + theweb&india.com +8097 + Servicom 2000, S. L. + Scott Prater + scott.prater&servicom2000.com +8098 + Morat Games Ltd + Mark Cheverton + ennui&morat.net +8099 + Alex Wilson Coldstream Ltd + Anil Amarakoon + awcl&awcoldstream.com +8100 + Softricity, Inc. + Shane Marshall + smarshall&softricity.com +8101 + Zambeel, Inc. + Gaurang Mehta + gaurang&zambeel.com +8102 + Mark Morgan Enterprises + Mark Morgan + markimorgan65&hotmail.com +8103 + Winstechnet Co.,Ltd + Mi-boon Hyeon + clarinse&hanmail.net +8104 + Sun Microsystems Professional Services + Peter Charpentier + Peter.Charpentier&sweden.sun.com +8105 + Appilog + Muli Assa + muli.assa&appilog.com +8106 + LUXMATE Controls GmbH + Georg Künz + kuenzge&luxmate.co.at +8107 + Kabira + Eric Tamke + tamke&kabira.com +8108 + VHI + John Murphy + john.murphy&vhi.ie +8109 + Price Express + Lelik P. Korchagin + lelik&price.ru +8110 + Message Central plc. + James Royan + james.royan&msgc.com +8111 + Pacific Trade & Technology + Jeffrey G Johnson + jeffj&pactrade-tech.com +8112 + Green Shield Canada + David Drouillard + ddrouill&greenshield.ca +8113 + Bellanet International Secretariat + Kevin McCann + kmccann&bellanet.org +8114 + Naval Operational Logisitics Support Center + Raymond Reyes + raymond.c.reyes.ctr&navy.mil +8115 + Birch Telecom + Randy Dickinson + trance&birch.net +8116 + Danware Data A/S + Thomas Eskesen + te&danware.dk +8117 + NAFEM (North American Association of Food Equipment Manufacturers) + Dave Vuicich + dvuicich&crescor.com +8118 + Capitol Systems Corporation + Dean Corn + deanc&capitolsystems.com +8119 + Mission Valley Regional Occupational Program David + David Powell + dpowell&mvrop.org +8120 + Transvirtual Technologies, Inc. + Mark Garey + mark&transvirtual.com +8121 + Ridgeway Systems & Software Ltd. + Jonathan Black + jblack&ridgeway-sys.com +8122 + KB Impuls Service GmbH + Alexander Efimov + admin&data.com.ru +8123 + Kompetenznetz Maligne Lymphome + Dr. Barbara Heller + barbara.heller&imise.uni-leipzig.de +8124 + Carrefour + Samuel Piau + samuel_piau&carrefour.com +8125 + Internet Services + Szymon Kosmala + hostmaster&is.com.pl +8126 + My Docs Online, Inc + David Motes + david&mydocsonline.com +8127 + Effinity Labs + Mike Heath + heathm&effinitylabs.com +8128 + Boco + Jiao Li + lij&boco.com.cn +8129 + JadeBird HuaGuang Ltd Co. + Zhao Xuegang + zhao.one&yeah.net +8130 + Heimann Biometric Systems GmbH + Ralph Lessmann + r.lessmann&hbs-jena.com +8131 + Glory Telecom. Co.,Ltd. + Chen-Po-An + mis&glorytel.com.tw +8132 + Nasik Welding Products Pvt.Ltd + (no contact name) + sheetaljagota&yahoo.com +8133 + FSSC-London + Martin Croker + martin.croker&accenture.com +8134 + Insitel S.A. + David Alejandro Saldaña Orjuela + dsaldana&insitel.com +8135 + Andong National University + Young-Wook Cha + ywcha&andong.ac.kr +8136 + Centrul de Pregatire in Informatica + Florin Izvoranu + florin&cpi.ro +8137 + Palisade Systems + Rose Gorrell + gorrellr&palisadesys.com +8138 + De Roeck Software Engineering BVBA + Nick De Roeck + de.roeck.software&skynet.be +8139 + Aaxion Software Corporation + Timothy Stout + stout&aaxion.com +8140 + C2 Technologies, Inc. + Christopher Broyles + broyles&c2-tech.com +8141 + Kashpureff Boling Laboratories, Inc. + Eugene Kashpureff + ekashp&kblabs.com +8142 + HCA The Healthcare Company + Jason A. Barnett + jason.barnett&hcahealthcare.com +8143 + ICTV + Ellen Fratzke + efratzke&ictv.com +8144 + Fiberxon (China) Corp., Ltd + Tony Xu + tony.xu&fiberxon.com +8145 + University of Salzburg + Andreas Maier + andi&cosy.sbg.ac.at +8146 + Snakegully + Darryl Luff + darryll&snakegully.nu +8147 + Mercury Telecommunications + Matt Camp + mcamp&quicksilver.co.nz +8148 + Niragongo Technologies Ltd. + Roy Amir + roya&niragongo.com +8149 + Generalitat Valenciana + Joaquin Galeano + galeano_joa&gva.es +8150 + SECARON AG + Michael Spreng + spreng&secaron.de +8151 + Worldweb.net + Duane Dinschel + ddinschel&worldweb.net +8152 + Visto Corporation + Jennifer Yiu + jyiu&visto.com +8153 + jGuild Internation ltd + Yannick Menager + ymenager&jguild.com +8154 + Riser Management Systems + Mike Kelly + mkelly&riser.com +8155 + Transcept, Inc. + Karl T. Cooper + kcooper&transcept.com +8156 + AgencyWorks + Hongwei Zhang + hongwei.zhang&agencyworks.com +8157 + SynCom Network, Inc. + Wei-Lee Chen + wlc&mail.syncom.com.tw +8158 + MSP-LUB + Lubomir Jancovic + JncvcL&aol.com +8159 + SHIN SATELLITE PUBLIC COMPANY LIMITED. Pho + Zanaka + phoz&thaicom.net +8160 + Digital Platform Support Services + Arthur Tew + atew&mweb.co.za +8161 + Star Home GMBH + Danny Parchack + danny_parchack&starhome.com +8162 + DEXIA + Patrick De Winne + patrick.dewinne&dexia.be +8163 + Double D Electronics Ltd + S. Davies + shd&ddelec.co.uk +8164 + Starent Networks + Daniel Walton + dwalton&starentnetworks.com +8165 + boutemy.com + Yves Boutemy + yves&boutemy.com +8166 + Viag Interkom + Joerg Herrmann + joerg.herrmann&viaginterkom.de +8167 + Transversal + Richard Watts + rrw&transversal.com +8168 + Nectar.com + Jacques A. Vidrine + n&nectar.com +8169 + Carroll Lutheran Village + Christopher A. Seward Sr. + cseward&clvillage.org +8170 + Monitor Group + Paul Kiely + pkiely&monitor.com +8171 + Oni Solutions + Ricardo Sousa + lan&meganet.pt +8172 + Johns Hopkins University, MSEL-LCS + Elliot Metsger + emetsger&jhu.edu +8173 + INDRA Sistemas + A. Chazarra + achazarra&indra.es +8174 + Netburner + Paul Breed + Paul&Netburner.com +8175 + Carrier9 Networks + Sukanta Ganguly + sganguly&carrier9.com +8176 + AstroDesign,Inc. + Toshiaki Ogawa + togawa&astrodesign.co.jp +8177 + HCL Communications + Clive Taylor + clive&snmp.co.uk +8178 + Just Innovative Software GmbH + Martin Eigel + eigel&justis.de +8179 + Communications Laboratories Telekommunikations Dienstleistungs-Ges.m.b.H. + Bodo Rueskamp + iana+spam&pinda.de +8180 + Spirea + Kim Laraqui + kim&spirea.com +8181 + Commarco GmbH (formerly 'Scholz & Friends Dresden GmbH') + Matthias Peplow + matthias.peplow&s-f.com +8182 + The Boston Bit Co + Matthew McNeely + matthew&bostonbit.com +8183 + NetSource America Inc. + Jeff Konz + jkonz&netsourceamerica.com +8184 + Inspira S.L. + Jose Lopez-Serrano + jlopez&inspira.es +8185 + Online Creation, Inc. + Monica Gupta + monica&oncr.com +8186 + California Institute of Technology + Robert Logan + bob&its.caltech.edu +8187 + Sharinga Networks Inc. + Pete Yandell + pete&sharinga.com +8188 + AXA Technology Services Switzerland AG (formerly 'Winterthur Versicherungen') + Thomas Burri + wana.admin&axa.ch +8189 + Gerichhausen, Adomeit & Terstappen GmbH + Christoph Adomeit + ado&niederrhein.de +8190 + 3M Deutschland GmbH (formerly 'Quante') + Lars Schrix + lschrix&mmm.com +8191 + glutam.at + Rupert Roesler-Schmidt + rupee&glutam.at +8192 + Symbiont Networks, Inc. + Andrew Scholnick + AScholnick&symbiontnetworks.com +8193 + Telemetry Technologies + David Taylor + dtaylor&telemetrytech.net +8194 + Targetnet.com Inc. + James FitzGibbon + james&targetnet.com +8195 + Effinity Net, LC + Mike Heath + heathm&effinitylabs.com +8196 + Teldar Corporation + Kevin Musick + kmusick&teldar.com +8197 + LearningSoft Corp. + Sunil Gupta + sg&learningsoft.net +8198 + eBuilt, Inc. + Bill Pier + ops&ebuilt.com +8199 + Instituto Tecnológico Autónomo de México + Cristian Martínez + cfuga&itam.mx +8200 + UNNATI COMPUTERS + Vikas S. Bhagwat + unncomp&bol.net +8201 + XCelaron Pty Ltd + Chris Avis + chris&xcelaron.com +8202 + Toro Assicurazioni S.p.A. + Franco Lanfranco + f.lanfranco&toroassicurazioni.it +8203 + LPG Innovations + Harri Mauria + harri.mauria&lpg.fi +8204 + Cymtec Systems, Inc. + Michael Mester + mmester&cymtec.com +8205 + Unassigned + Returned 2003-04-30 + ---none--- +8206 + Centro Regional de Investigaciones Basicas y Aplicadas de Bahia-Blanca + Carlos Matrangolo + symatran&criba.edu.ar +8207 + Integratus, Inc. + Kevin M. O'Leary + oleary&integratus.com +8208 + Best Western International + Amy Petersen + petersa&bestwestern.com +8209 + Informio + Duncan Fisher + dfisher&informio.com +8210 + Centegy + Pearl Parker + pearl_parker¢egy.com +8211 + ShenZhen TCL Communication Technology Co.,Ltd. + Anyin Zhu + zhuanna&263.net +8212 + Harbour Networks Co. + Li Yinan + lijg&harbournetworks.com +8213 + Faculty of Medicine, University of Sydney + Chris Albone + yvain&gmp.usyd.edu.au +8214 + Atomica + Nachman Palanker + nachmanp&atomica.com +8215 + Authentified + Henry Jean-luc + jlhenry&IDEALX.com +8216 + Cyzen Tech.,Co.LTD + Sang-Hyun Kim + kimppong&cyzen.net +8217 + SLAonline.com Ltd + Cliff Chapman + c.chapman&slaonline.com +8218 + EC - Erdoelchemie GmbH + T. Haeger + thomas.haeger&innovene.com +8219 + Ecomda + Joachim Gjesdal + joachim.gjesdal&ecomda.com +8220 + TCL Holdings Co., Ltd. + Yang Xuchun + yxuchun&sina.com +8221 + BAE SYSTEMS, Integrated Defense Solutions + Mike McNair + michael.mcnair&baesystems.com +8222 + Matthew White + Matthew White + mwhite42&yahoo.com +8223 + Atlas Commerce + Kevin Reester + reester.kevin&atlascommerce.com +8224 + LeasedMinds + Graham Wooden + graham&leasedminds.com +8225 + Open Connect AG + Romeo Benzoni + rob&opencon.ch +8226 + GigaNews.com, Inc. + Michael Douglass + mikedoug&giganews.com +8227 + CyberObject Corp + Edward Ma + mjp&cyberobject.com +8228 + Jalan Network Services, Inc. + Matthew Marlowe + ops&jalan.com +8229 + Phil Systems + Padmaja + pmudras&phil.com.sg +8230 + RING! ROSA Products Bv + Francis Butet + fgbutet&ringrosa.com +8231 + Lineo + Hugo Delchini + Hugo.Delchini&lineo.com +8232 + Technische Hochschule Mittelhessen (formerly 'Fachhochschule Giessen-Friedberg') + Sven Hartge + its&thm.de +8233 + iBasis, Inc. + Rick Genter + rgenter&ibasis.net +8234 + ECET International + Mike Gilbert + mike.gilbert&ecet.com +8235 + CENIT AG + Christian Brüssow + c.bruessow&cenit.com +8236 + White Horse Interactive + Pae Choi + paec&whitehorse.com +8237 + NCC GmbH + Gerd Magerhans + gm&ncc-gmbh.de +8238 + Lutheran Brotherhood + Michael Nadzieja + nadzieja.mike&luthbro.com +8239 + Peribit Networks + Thiagarajan Hariharan + hariharan&pobox.com +8240 + Path 1 + Ben Leong + bleong&path1.net +8241 + Securant Technologies + Steven Bazyl + sbazyl&securant.com +8242 + Security Portal, Inc + Matt Fisher + mfisher&securityportal.com +8243 + iPass, Inc. + Jim Underwood + junderwo&ipass.com +8244 + Brocade Communications Systems, Inc. (formerly 'McDATA Corp.') + Scott Kipp + skipp&brocade.com +8245 + Vsovt Ltd. + Eyal Sayag + eyals&vsoft.com +8246 + Cistron Internet Services + Michel Onstein + beheer&cistron.nl +8247 + University of Rostock + Jörg Zerbe + joerg.zerbe&rz.uni-rostock.de +8248 + Turin Networks Inc + Glen Baker + gbaker&turinnetworks.com +8249 + Verein für Computergenealogie e.V. + Jesper Zedlitz + compgen&genealogy.net +8250 + Shire.Net LLC + Chad Leigh + chad&shire.net +8251 + IIGA Co.,Ltd. + Kentaro Fujinuma + fuji&ask.iiga.co.jp +8252 + Digital Strategies, Inc. + Tom Strickland + tstrick&digitalstrategies.com +8253 + AWOLart.com + Art Vossberg + art&AWOLart.com +8254 + Aleph-Null, Inc. + Matthew Butcher + mbutcher1&qwest.net +8255 + Tejas Networks + Kumar SivaRajan + kumar&tejasnetworks.com +8256 + Solution - The Computer People eK + Andreas Lindenblatt + azrael&solution.de +8257 + Southern Ural State University + Anton Voronin + anton&urc.ac.ru +8258 + kuk-Systemprogrammierung + Kay Kaul + kuk&kuk.net +8259 + Banco de Sabadell, S.A. + Pol Navarro + navarrop&bancsabadell.com +8260 + Intrexa Corp + Richard Palmer + richard.palmer&intrexa.com +8261 + MediaCast + Franck Gaubert + franck.gaubert&mediacast.tv +8262 + Wavion + Yuval Shoham + yuval&wavion.com +8263 + Jamby BV + Taco Kampstra + taco&jamby.net +8264 + Recreativos Franco, S.A. + María José Gallardo Herreros + mjose&rfranco.com +8265 + Entrada Networks, Inc. + Russell Forte + rforte&entradanet.com +8266 + Intelnet S.A. + David Beamonte + dsarasa&intelnet.es +8267 + Arula Systems + Prasanna Nageswar + prasanna&arula.com +8268 + Mark Tippetts + Mark Tippetts + bishop&kalima.org +8269 + CJSC Malva + Andrey Fisunenko + contacts&malva.com.ua +8270 + MJL Technology + Oh-Kyu Kwon + okkwon&mjl.com +8271 + Fachhochschule Vorarlberg + Egon Niederacher + niederacher&fh-vorarlberg.ac.at +8272 + Nameplanet Ltd. + Ketil Froyn + ketil&froyn.com +8273 + Standard Communications + Tim Brewer + SoftEng&stdcom.com +8274 + Interadnet + Sean Michaels + smichaels&interadnet.com +8275 + INSA Toulouse + Jean-Marie Kubek + kubek&insa-tlse.fr +8276 + Mpathix + Branko Zatezalo + bzatezalo&mpathix.com +8277 + PSC-ITSolutions.NET + Teuwanda Parker + Teuwanda&hotmail.com +8278 + St. Elisabeth GmbH + Wolfgang Barth + W.Barth&st-elisabeth.de +8279 + f3pu5.net + David P Thomas + dave&outlook.net +8280 + Fiver LightCom Co., Ltd. + Scott Hsieh + scott&FiverLightCom.com.tw +8281 + Movelife + Qi Wang + myshadow&263.net +8282 + BAE Systems Defence Pty Ltd (formerly 'Tenix Defence Systems - Systems Adelaide') + David Boschma + au.ilsupport&baesystems.com +8283 + TSMT - Magic Touch Ltd + Markku Järvinen + mta&magic.fi +8284 + ACTL + Crokaert Pierre + pct&actl.be +8285 + FlexLight + Shiomon Caspi + shimonc&flexlight-networks.com +8286 + Incirco + Anders Johansson + anders.johansson&incirco.com +8287 + Klinikum der Johannes Gutenberg-Universität Mainz + Service Center IT + newtec&unimedizin-mainz.de +8288 + Jung von Matt Werbeagentur GmbH + Stephan Budach + stephan.budach&jvm.de +8289 + Universita' degli Studi di Napoli "Federico II" + Francesco Palmieri + fpalmieri&unina.it +8290 + Certus Technology, Inc. + Jeff Kennedy + jkennedy&certustech.com +8291 + DigiStamp, Inc. + Rick Borgers + rick.borgers&digistamp.com +8292 + Mt. Holyoke College + Ron Peterson + rpeterso&mtholyoke.edu +8293 + Bang Networks, Inc. + Chris Ranch + cranch&bangnetworks.com +8294 + infoman + Brian Bortz + brian&infoman.co.il +8295 + BWA + Willie Alayza + Alayza&breezecom.com +8296 + Dialogos3 s.r.l. + Reinhard Spisser + Reinhard.Spisser&dialogos3.com +8297 + IrisOnLine.com BV + Dhr A.Brussee + a.brussee&irisonline.com +8298 + v/h Nictoglobe + Dhr ing. A.G.A.M.Jacobs + andreas.jacobs&nictoglobe.com +8299 + secunet Security Networks AG + Joerg Marx + joerg.marx&secunet.com +8300 + State of Wisconsin - Network Access Management + Christopher Stotesbery + Christopher.Stotesbery&wisconsin.gov +8301 + Technische Universitaet Darmstadt + Dr. J.Ohrnberger + oid&hrz.tu-darmstadt.de +8302 + EdeNET Communications, Inc. + Steve Burke + steveburke&edenet.com +8303 + Gelco Information Network + Ryan Rager + ryan_rager&gelco.com +8304 + Atomic Blue Bear + Michael Lea + mlea-iana&atomicbluebear.org +8305 + OARnet + Albert School + aschool&oar.net +8306 + Go Local Consulting, Inc. + Igor Fedulov + igor&outlook.net +8307 + tecnetdev + Kevin J. Lamse + kevinl&pssg.com +8308 + GTran Inc. + Larry Gadallah + larry>ran.com +8309 + Virtela Communications + Jonathan Leech + jleech&virtela.net +8310 + Trak Systems + Ashley Jeffs + ashley&trak.com.au +8311 + Police Information Technology Organization + Simon Gray + simon.gray&pito.pnn.police.uk +8312 + OskolNET JSC + Vladimir Krymov + krymov&oskolnet.ru +8313 + JARDiX AG + Daniel Raedel + dr&jardix.de +8314 + AVL Emission Test Systems GmbH (formerly 'PEUS Systems GmbH') + Michael Speck + Michael.Speck&avl.com +8315 + Minacom Labs Inc. + Patrice Gagnon + pgagnon&minacom.com +8316 + Staffwell + Oleg Levchenko + oleg.levchenko&staffwell.com +8317 + FiberCycle Networks + Tom Milner + tom&fibercycle.com +8318 + Twisted Pair Solutions, Inc. + Shaun Botha + shaun.botha&twistpair.com +8319 + Blue Wave Systems + Brian Carr + bcarr&bluews.com +8320 + Vonova Corporation + Michael Stricklin + strickli&vonova.com +8321 + Entidad Acreditadora Ley 19.799 - Gobierno de Chile + Jaime Gré Zegers + jgre&minecon.cl +8322 + Education Networks of America + Robert Francis + rfrancis&ena.com +8323 + Gemteq Software + M. David Minnigerode + minniger&gemteq.com +8324 + The University of Western Australia + Grahame Bowland + grahame&ucs.uwa.edu.au +8325 + Wanadoo Spain + Carles Xavier Munyoz Baldó + carles.munyoz&ctv-jet.com +8326 + Unreal Technology spol. s r.o. + Rostislav Opocensky + orbis&unreal.cz +8327 + IUFM Reims + Mario Dragone + Mario.Dragone&reims.iufm.fr +8328 + GyD Iberica + José Manuel López González + jmanuel.lopez&es.gi-de.com +8329 + iPromoGroup Ltd. + Lubes Haidamaka + lubes&ipromogroup.com +8330 + Omnexus N.V., Amsterdam, Wädenswil Branch + Thorne King + thorne.king&omnexus.com +8331 + Matti Valtuustoon + Arto Mutanen + arto&mattivaltuustoon.com +8332 + université d'Evry Val d'Essonne + Defrance Alain + a.defrance&univ-evry.fr +8333 + GROW.co,.Ltd + Tsuyoshi Yoshizawa + sales&e-0ffice.com +8334 + dynamicsoft Inc. + Srivatsa K Srinivasan + ssrinivasan&dynamicsoft.com +8335 + Université des Sciences Sociales de Toulouse + Fabrice Prigent + Fabrice.Prigent&univ-tlse1.fr +8336 + Superior Consultant Holdings Corporation + Carlo Gagliardi + carlo_gagliardi&superiorconsultant.com +8337 + Hybricon Corp. + Gerry Cahill + gcahill&hybricon.com +8338 + Phonetics, Inc. + Robert Douglass + bdouglass&sensaphone.com +8339 + Young & Laramore + Ben Turney + ben41&hotmail.com +8340 + Image Computing Incorporated + Gary J. Dobek + dobekgj&erols.com +8341 + Utility Data Systems, Inc. + Charles Porter + cporter&udsinc.com +8342 + NTRU Cryptosystems + Ari Singer + asinger&ntru.com +8343 + NEXTRA Czech Republic s.r.o. + Andrew Zhilenko + system&nextra.cz +8344 + Apparate Networks + Jeremy Greene + jeremy&apparatenet.com +8345 + Health Net, Inc. + Ted Wilkinson + ted.wilkinson&health.net +8346 + Université de Limoges - S.C.I. - Laine Jean + Pierre + laine&unilim.fr +8347 + TechnoCage, Inc + Caskey Dickson + caskey-iana&technocage.com +8348 + Data Avenue, Inc. + Manish Dharwadker + manish&dataavenue.com +8349 + ConnectScape, Inc. + Manish Dharwadker + manish&connectscape.com +8350 + T-Systems Hungary Kft. + Krisztian Steber + Krisztian.Steber&t-systems.co.hu +8351 + I-Land Internet Services + Chris Kennedy + ckennedy&iland.net +8352 + Pronym S.A.R.L. + M. Ornato + ornato&pronym.com +8353 + Excelsior Systems Limited + Mr. P. J. Grice + pjgrice&excelsys.co.uk +8354 + GIB + Mr. P. J. Grice + pjgrice&glintbill.com +8355 + sarfata's net + Thomas Sarlandie + sarfata&altern.org +8356 + Visual Revolutions + J. Arturo Avila Salazar. + arturo&studiogdl.com +8357 + Mangosoft Corporation + Jim Plummer + jimp&mangosoft.com +8358 + TELOS Technology Inc. + Steve Williams + williams&telostech.com +8359 + IP blue + David Sullivan + dsullivan&ipblue.com +8360 + Mirant Corporation + B. Madderra + bob.madderra&mirant.com +8361 + Portail des IUFM + Kerherve Gweltaz + kerherve&bretagne.iufm.fr +8362 + mSAFE + Rafi Kiel + rkiel&msafe.com +8363 + Store-O-Matic, Inc. + Jose Antonio Esquivel + aesquivel&store-o-matic.com +8364 + Lichen Hosting + Dan Mingus + srf3&dana.ucc.nau.edu +8365 + Advanced Information Management Solutions Pty Ltd + Christopher Knight + noc&aims.com.au +8366 + Fiberlink Communications Corp + Ryan Hope + rhope&fiberlink.com +8367 + NationNet.Com + Joe Lesko + admin&nationnet.com +8368 + ServerWerks Inc + Todd Glassey + todd.glassey&serverwerks.cc +8369 + NTIkorea + William Kim + ugie&postech.ac.kr +8370 + NextSet + SaiPrashanth Reddy + sreddi&nextset.com +8371 + AGENDA + Hiroyuki Maruta + maruta&agenda.co.jp +8372 + University of Sussex + Dave Lewney + dml&sussex.ac.uk +8373 + SEAL Systems AG & Co. KG + Stefan Leipold + snmpadmin&sealsystems.de +8374 + DVZ Datenverarbeitungszentrum Mecklenburg-Vorpommern + Ralph Rehbein + R.Rehbein&dvz-mv.de +8375 + Outercurve Technologies + Bill Whitney + bwhitney&outercurve.com +8376 + Université de Bourgogne + Jean-Christophe Basaille + Jean-Christophe.Basaille&u-bourgogne.fr +8377 + ReefEdge, Inc. + Bill Squier + snmp&reefedge.com +8378 + Texas Memory Systems + Justin Haggard + justin&texmemsys.com +8379 + BroadRiver Communications + Steven Dossett + sdossett&broadriver.com +8380 + Mentum Group + Nick Mason + nick.mason&mentumgroup.com +8381 + Certainty Solutions, Inc. + MIB Administrator + mibadmin&certaintysolutions.com +8382 + C&C Engineering, Inc. + Chang-Shik, Choi + tempest&mail.cc.co.kr +8383 + SigmaSoft, Inc. + Thorsten Lockert + tholo&sigmasoft.com +8384 + eBay, Inc. + eBay Directory Services + DirectoryServices&eBay.com +8385 + ENSAM CER de Metz + Régis Gresson + regis.gresson&metz.ensam.fr +8386 + LION bioscience AG + Matthias Helmling + matthias.helmling&lionbioscience.com +8387 + Globe Internet s.r.o. + Jiri Bramburek + jiri.bramburek&globe.cz +8388 + Techniker Krankenkasse + Patrick Agsten + p.agsten&tk-online.net +8389 + Ergon Informatik + Marc Buetikofer + airlock-iana-contact&ergon.ch +8390 + Mettler Toledo + Jeff Griffith + Jeff.Griffith&mt.com +8391 + Bicman Netologies + Bob Bicman + BobBicman&consultant.com +8392 + XMission + Nathan Haugo + nhaugo&xmission.com +8393 + GIMAS mbH + C. Hohn + christian.hohn&topmail.de +8394 + Privador AS + Hardy Viilup + hardy&privador.com +8395 + Tulane University + Tim Deeves + tim&tulane.edu +8396 + virtual earth Gesellschaft für Wissens re/prä sentation mbH + Mathias Picker + Mathias.Picker&virtual-earth.de +8397 + Vecerina + Ivan Vecerina + ivec&mail.com +8398 + University of West Florida + David Hicks + dhicks&uwf.edu +8399 + iVEA Technologies + Soukthavy Sopha + ssopha&ivea.com +8400 + Evercom Systems, Inc. + Brian Waters + bwaters&evercom.net +8401 + FoxTravel + Nik Fox + foxres&btinternet.com +8402 + ePlus, Inc. + Jeff Yost + jyost&eplus.com +8403 + ARA Network Technologies, Co, Ltd. + Yang, Chul-Woong + cwyang&aratech.co.kr +8404 + Akamba Corporation + Jack Smith + jack&akamba.com +8405 + Tohono Consulting + Thomas Hicks + hickst&theriver.com +8406 + Morningstar Systems, Inc. + Mike Oliver + mikeo&morningstarsystems.com +8407 + MyBau.Com + Niall Murray + Niall.Murray&mybau.com +8408 + InterComponentWare AG + Thomas Kock + thomas.kock&intercomponentware.com +8409 + ztevision + Ma Shengcun + cast508&address.com +8410 + Landis & Gyr Communications SAS + Dominique Hérissé + dominique.herisse&landis-gyr.com +8411 + e-Qual + Jerome Thebert + jerome.thebert&e-qual.fr +8412 + Satyam Computer Services ltd + Sudarson + sudarson_pratihar&satyam.com +8413 + GlobalNaps + Chip Ach + chip&gnaps.com +8414 + Cedacri S.p.A. + Fendillo Giovanni + giovanni.fendillo&cedacri.it +8415 + CyberElan LLC + Sanjeev Hirve + shirve&cyberelan.com +8416 + Shaw Cablesystems, G.P. + Nancy Green + internet.engineering&shaw.ca +8417 + StorageApps, Inc. + Jerry Keselman + jerryk&storageapps.com +8418 + listless ennui + Karl Middlebrooks + kdm&listlessennui.com +8419 + Rockwell Collins + Benjamin Haan + Benjamin.Haan&collins.com +8420 + Digi-Sign Certification Services Limited + Jennifer Chung + jennifer.chung&dg-sign.com +8421 + VODTEL COMMUNICATIONS Inc. + Stanley Hsiao + stanleyh&vodtel.com.tw +8422 + Libritas, Inc + Scott Kaplan + scott&libritas.com +8423 + twisd AG + Klaus Reimer + hostmaster&twisd.de +8424 + ICUBE SRL + Marco Bizzarri + m.bizzarri&icube.it +8425 + AP Engines, Inc. + Jack Rourke + jrourke&apengines.com +8426 + Watlow + Leon McNutt + leon_mcnutt&watlow.com +8427 + Worldgate Networks Private Limited + Devdas Bhagat + devdas&worldgatein.net +8428 + Turtle Entertainment GmbH + Bjoern Metzdorf + bm&turtle-entertainment.de +8429 + 2NETFX (Media Now, Inc.DBA 2NETFX) + Joe Carter + jcarter&2netfx.com +8430 + TranSwitch Corporation + Ely Zavin + ely&txc.com +8431 + Broadcasting Interest Enterprise, Inc.Christopher + Adams + president&networkingmenu.com +8432 + TeraGlobal Communications + Guy Cardwell + gcardwell&teraglobal.com +8433 + 3e Technologies International Inc. + Gang Zheng + zheng&3eti.com +8434 + Fox Chase Cancer Center + Stephen A. Felicetti + sa_felicetti&fccc.edu +8435 + HyperEdge Inc. + Tom Fortmann + tfortmann&hyperedge.com +8436 + Requisite Technology + IANA Administrator + iana&requisite.com +8437 + Bolder Internet Group, Inc. + Robert J. Bubobn + rjb&bigi.com +8438 + ReFlex Communications, Inc. + Eugene Wong + eugene&reflexcomm.com +8439 + CADRE Design + Andrew Smith + asmith&cadre.com.au +8440 + Tsuda College + Etsuko Suzuki + suzuki&tsuda.ac.jp +8441 + ShangHai WaiGaoQiao Free Trade Zone network development Co.,Ltd + Yunbin Wu + sunny&netway.net.cn +8442 + Sendtek Corporation + Wien Shao + wien&sendtek.com.tw +8443 + Telisma + Gerard Musset + gmusset&telisma.com +8444 + IIE (Institut d'Informatique d'Entreprise) + Jean-Luc Kors + kors&iie.cnam.fr +8445 + Bilstone Ltd + Ian Steel + ian&bilstone.co.uk +8446 + Barclaycard + Ian Steel + ian.steel&barclaycard.co.uk +8447 + Riverglade Consultants Ltd + Julian Griffiths + j_griffiths&geocities.com +8448 + Avantis GmbH + Ralf Shippert + Ralf.Shippert&avantis.de +8449 + ePUREDATA Inc. + Charles Shong + charless&puredata.com +8450 + Cetacean Networks + Gregory Lyons + glyons&Cetacean.com +8451 + Petri Laitinen + Petri Laitinen + Petri.Laitinen&pp3.inet.fi +8452 + Guernsey.Net Ltd. + Neil de Carteret + ndc&guernsey.net +8453 + npw.net + Philipp Baer + phbaer&npw.net +8454 + Donald E Reynolds + Donald E Reynolds + donreynolds&usa.net +8455 + Old Donkey Enterprises + Robert Jordan + rjordan&bozac.com +8456 + OPTIBASE + Amir Geva + amirgeva&optibase.com +8457 + Deepend Prague + Jakub Skopal + jakub&deepend.cz +8458 + ZOOM Networks Inc. + Mary.M + mmrgood&sina.com +8459 + Shandong WeiFang Beida JadeBird Inc. + Liu Jiancheng + liujc&xinhuanet.com +8460 + University of Illinois at Chicago + Jim O'Leary + joleary&uic.edu +8461 + UFO Solutions Limited + Keith Wan + keith.wan&ufoss.com +8462 + Sonic.Net, Inc. + Nathan Patrick + np&sonic.net +8463 + PEM Electronics + Nathan Patrick + np&sonic.net +8464 + University of Wollongong IEEE Student Branch + Matthew Palmer + mjp16&ieee.uow.edu.au +8465 + LodeSoft Corp. + Zhihong Mao + mao&lodesoft.com +8466 + RAWK Systems, Inc. + Dan Romike + rawksystems&att.net +8467 + La Page Trad + Laurent Daverio + daverio&cri.ensmp.fr +8468 + American Medical Association + Peter Watkins + amair&ama-assn.org +8469 + Domestic WANs + Harald Born + Harald.Born&domestic-wan.de +8470 + Learning Machines, Inc. + Ted Eiles + ted.eiles&learningmachines.com +8471 + Universidad Del Bio-Bio + German Poo + gpoo&ubiobio.cl +8472 + Lynk a division of BOS Ltd + Yehoram B.Y. + yben&boscom.com +8473 + University of Massachusetts Amherst + Daniel Blanchard + blanchard&oit.umass.edu +8474 + Saltec Powerlink + Salman Quadri + info&saltec-powerlink.com +8475 + Veilex + Roberto Soto + rsoto&veilex.com +8476 + Kyuden Infocom Company, Inc. + Osamu Kidou + kidou&qic.co.jp +8477 + CyberTron Software GmbH + Heinrich A. Thurner + h.thurner&cybertron.co.at +8478 + TongTech CO., Ltd, + ChenXu + x_ch&263.net +8479 + now.com + Lam Ka Ming Benjamin + Benjamin.KM.Lam&pccw.com +8480 + Fairleigh Dickinson University + Brian Domenick + brian&fdu.edu +8481 + HiGHKU + Michael Elmore + melmore&highku.com +8482 + Colorado State University + Randy Miotke + Randy.Miotke&colostate.edu +8483 + Trenza, Corp + Kevin C. Coram + kcc&trenza.com +8484 + Allegheny Energy + Joel Critchfield + jcritch&alleghenyenergy.com +8485 + Virtual Software, S.L. + Jose Miguel Rodriguez + jmiguel&virtualsw.es +8486 + City-Net Tech. Inc. + Chris Chang + chris_chang&citynetek.com +8487 + MightyView, Inc. + Arnie Shimo + arnie.shimo&mightyview.com +8488 + University of Alabama + John Watters + John.Watters&UA.EDU +8489 + Pico Technology Ltd + Mike Green + mike&picotech.com +8490 + Direct Commerce + Jeffrey Baker + jwb&directcommerce.com +8491 + drzyzgula.org + Robert Drzyzgula + bob&drzyzgula.org +8492 + Iscanet Internet Services + Rocco Lucia + rlucia&iscanet.com +8493 + Web Office China + Jiang Kunping + szjkp&163.net +8494 + MOSAIC SOFTWARE AG + Marco Nietz + sysadm&mosaic-ag.com +8495 + MILESYS + Vincent Moynot + vincent.moynot&milesys.fr +8496 + Facilitair bedrijf, Hogeschool van Utrecht + Rene van Valkenburg + r.vanvalkenburg&fab.hvu.nl +8497 + Deluxe Video Services Ltd + DJ Adams + dj_adams&gmx.net +8498 + Akumiitti Ltd + Kai Rauha + Kai.Rauha&akumiitti.com +8499 + Eduserv + Owen Cliffe + occ&eduserv.ac.uk +8500 + Dr. Nagler & Cie. Consult GmbH + Rainer Pruy + Rainer.Pruy&Nagler-Consult.com +8501 + ANTEL - Operations + Carlos Martinez + carlosm&antel.net.uy +8502 + Internet Park, Inc. + Nick Thompson + nick&ipark.com +8503 + Unicess Networks + Jay Park + JayPark&unicess.com +8504 + Sparzo Enterprises + Darrel Sparzo + dsparzo&home.com +8505 + SKYTEC AG + Valentin Hilbig + valentin.hilbig&skytec-ag.de +8506 + Ximian, Inc. + Chris Toshok + toshok&ximian.com +8507 + CyberTrails, LLC + Shawn Ostapuk + shawn.ostapuk&cybertrails.net +8508 + Final Piece + Jeffrey Truong + jeff&finalpiece.com +8509 + ViNAWARA Co.,Ltd. + JIwon, Jeong + eek&medialincs.com +8510 + Yunbj Co.,Ltd. + Yun Byoung-jo + yunbj&medialincs.com +8511 + ThoughtWorks, Inc. + Barrow Kwan + bhkwan&thoughtworks.com +8512 + Capgemini Telecom Media & Networks Sweden AB + Carl Bergudden + carl.bergudden&capgemini.se +8513 + Katelco + Alex Shamanaev + alex&katelco.com +8514 + Sonocomp GmbH + Klaus-Dieter Lorig + kdlorig&sonocomp.de +8515 + Deonet Co., Ltd. + HyeonJae Choi + hjchoi&deonet.co.kr +8516 + Proxion Org + Andreas Herr + herr&proxion.de +8517 + APL/Software + Helge Schilling + helge.schilling&sos-berlin.com +8518 + JGiovatto + Joe Giovatto + joegio&hotmail.com +8519 + Trevalon, Inc. + Jack Heller + jack&trevalon.com +8520 + Winterlink Inc + Eric Wilkison + ericw&winterlink.net +8521 + Bytemobile, Inc. + Chye Lin Chee + chyelin&bytemobile.com +8522 + Robot Accomplice Software & IT Solutions (formerly 'Machinebuilt Software, Inc.') + Jonathan Machen + jonathan.machen&robotaccomplice.com +8523 + Petrasoft Inc + Mohamad F El-Bawab + melbawab&petrasoft-inc.com +8524 + Cybercafe + Md. Humayun Kabir Chowdhury + humayunkc&yahoo.com +8525 + Paula's Home Creations + Paula Evans + pevans914&aol.com +8526 + Consiglio Nazionale del Notariato + Pasquale Starace + webmaster¬ariato.it +8527 + Harvard MIT Data Center + Leonard Wisniewski + linux&lists.hmdc.harvard.edu +8528 + HeyAnita Inc. + Mandar Wadekar + MandarW&heyanita.com +8529 + Salira Optical Network Systems, Inc. + Faye Ly + faye&SALIRA.com +8530 + MomsDesk Commerce Corporation + Gopal Krishnan + gopalk&momsdesk.com +8531 + SyncWorks + James Lorenzo + jlorenzo&uswest.net +8532 + NANYA Technology Corp. + Liu, Cheng Hsien + oscarliu&ntc.com.tw +8533 + Snapshield + Danny Schaffer + danny.schaffer&snapshield.com +8534 + HiHat Enterprise Solutions + Michael McKibben + mike&hihat.net +8535 + Nekhem Technologies + Andrea Fanfani + admin&mixad.it +8536 + Datachorus + J.Y.Koh + jykoh&datachorus.com +8537 + Turbo NetWorks Co., Ltd. + Daniel Wang or Dennis Tseng + jht&turbonetworks.com.tw +8538 + Saxonia Systems AG + Ricardo Graf + ricardo.graf&saxsys.de +8539 + Institute "Norilskproject" + Eugene A. Doudine + dudin&np.nk.nornik.ru +8540 + LCPC + Karsenti Yves + Yves.Karsenti&lcpc.fr +8541 + Interactive Media S.r.l. + Claudio Spallaccini + cspallaccini&imnet.it +8542 + Sandial Systems, Inc. + Edward G. Rolfe + erolfe&sandial.com +8543 + Global TeleLink Services, Inc. + Toush Hy + thy>sgateway.com +8544 + SmallBuddha.org + David Berkman + dmberkma&pacbell.net +8545 + HTL Steyr + Franz Parzer + Franz.Parzer&htl-steyr.ac.at +8546 + Coleebris + Christophe Sollet + csollet&coleebris.com +8547 + Mississippi State University + Frank Peters + fwp&its.msstate.edu +8548 + South Suburban College + John McCormack + jmccormack&ssc.cc.il.us +8549 + Cylant Technology + Cory Stone + cory&cylant.com +8550 + Hatteras Networks + Richard Williams + rwilliams&hatterasnetworks.com +8551 + Yotta Networks, Inc + Philippe Tulula + ptulula&yottanetworks.com +8552 + TechTracker.com + Ladd Van Tol + lvantol&techtracker.com +8553 + Subtheory + Steven Cannon + cannon&subtheory.com +8554 + Departement Elektrotechnik, ETH Zuerich + Simon Moser + smoser&ee.ethz.ch +8555 + Digitalwave, Inc + Kyungran Kang + krkang&digitalwave.co.kr +8556 + Bank MENATEP SPb, Samara + Sergey Kurilkin + kurilkin&samara.menatepspb.com +8557 + Globaloop Ltd. + Oded + gmahlev&globaloop.com +8558 + Gimnazija Bezigrad Ljubljana + Matjaz Filo + mali&gimb.org +8559 + Coconut + Matt Braid + matt.braid&coconut.co.uk +8560 + Cleverlance s.r.o. + Michal Palicka + michal.palicka&cleverlance.com +8561 + Hotsip + Patrik Granholm + patrik.granholm&hotsip.com +8562 + Platinet Communications + Guy Weintraub + guyw&platinet.com +8563 + SSE + Jouko Sormunen + Jouko.Sormunen&sse.fi +8564 + CTI2 + Ron Avriel + ron&cti2.com +8565 + Medical Research Laboratories + Evert Carton + evert.carton&mrl-crl.com +8566 + Lasse Hillerøe Petersen + Lasse Hillerøe Petersen + lhp&toft-hp.dk +8567 + Active Power, Inc. + Mark Hill + mhill&activepower.com +8568 + VirtualTek Corporation + Frank Kim + frankk&joydesk.com +8569 + iXON + CC Wong + ip&ixongroup.com +8570 + Enonic + Vetle Roeim + vro&enonic.com +8571 + ConnectSuite + Philippe Le Borgne + pleborgne&connectsuite.com +8572 + TDS Informationstechnologie AG + Thomas Gebhardt + Thomas.Gebhardt&de.tds-global.com +8573 + S4i + Ronald Kraaijer + ron&s4i.be +8574 + SEAS LLC + Patrick Adlam + padlam&purdue.edu +8575 + ManageIT Company + George Kovachev + gio-k&bynet.co.il +8576 + Flowstone, Inc. + Jeff Medcalf + medcalf&flowstone.net +8577 + Axiowave Networks + Divya Prakash + dprakash&axiowave.com +8578 + People.Com, Inc, + Suresh Dussa + sureshd123&yahoo.com +8579 + Operadora Protel, SA de CV + Eduardo Vázquez Hernández + evazquez&protel.net.mx +8580 + Tellme Networks, Inc. + Danny Howard + entservers&tellme.com +8581 + Oy Comsel System Ab Kristian + Heimonen + kristian.heimonen&comsel.com +8582 + Netgene Tech. Inc. + Jay C., Kim + jayckim&netgenetech.com +8583 + Kabelfoon BV + Hans Hoppe + beheer&kabelfoon.nl +8584 + Sorbonne + Jean-Pierre Le Moan + lemoan&siris.sorbonne.fr +8585 + Saab Deutschland GmbH + Christian Ries + christian.ries&saabgroup.com +8586 + MODCOMP GmbH + Frank Lange + langef&modcomp.de +8587 + WiredMinds Informationssysteme GmbH + Markus Mueller + Markus.Mueller&WiredMinds.de +8588 + Munich Re Insurance AG + Beate Utz + BUtz&munichre.com +8589 + Framesoft AG Software Applications + Markus Buettner + markus.buettner&framesoft.com +8590 + Okena, Inc. + Ashok Nadkarni + ashok&okena.com +8591 + ManSoft + Mark Norman + markn&btinternet.com +8592 + CGI + OnDuty Tech + noccgi.si&cgi.com +8593 + WebTone Technologies + Samuel K. Mooney + smooney&webtonetech.com +8594 + Telia eTelia A/S + Troels Larsen + trl&etelia.dk +8595 + HITRON Technology, Inc. + Susan Wang + susanwang_BL&yahoo.com.tw +8596 + NEUSOFT CO.,LTD + Shi Lei + shil&neu-alpine.com +8597 + Access Solutions International Corporation + Mr. Tim Shee + tshee&accesssolutions.com.tw +8598 + Digital Diagnostic Imaging + Doug Mansell + doug.mansell&filmlessfuture.com +8599 + Velankani Information Systems Ltd. + Watsh Rajneesh + wrajneesh&velankani.com +8600 + Linux Generation B.V. + Robert van der Meulen + rvdm&lin-gen.com +8601 + BellSouth Customer Technologies + Patrick MacDonald + patrick.macdonald&corp.bellsouth.net +8602 + Qarana Solutions + Jose Luis Casas + jlcasas&qarana.com +8603 + Flarion Technologies + Patrick Hsu + p.hsu&flarion.com +8604 + Leapfrog Technologies LLC + Jimmy Cone + domains&bitstreet.net +8605 + Wireless Knowledge + Christopher Kavas + ckavas&wirelessknowledge.com +8606 + Vancouver School District # 37 + Jeff Samuelsen + jsamuels&vansd.org +8607 + Portland Internetworks + David Giller + dave&pdx.net +8608 + Cini Systems + Frank Cini + frank&cinisystems.com +8609 + Newsradar Deutschland + Martin Virtel + virtel&gmx.net +8610 + Snmp Everywhere + Arif Shouqi + shouqi&hotmail.com +8611 + VSoft + Bert Huijben + bert&vsoft.nl +8612 + Toe Technology Company Limited + David Shiu + david&toetec.com +8613 + HealthNetwork Systems + Bill Meyer + bmeyer&hns-net.com +8614 + Roxen Internet Software + Honza Petrous + hop&roxen.com +8615 + BioWare Corp. + Dave Hamel + postmaster&bioware.com +8616 + Witt Family + Wesley Witt + wesw&wittfamily.com +8617 + ATYF + Ricardo de Labra + rlabra&atyf.com +8618 + iProperty.com + Brian Chase + hostmaster&iproperty.com +8619 + David Hopwood Network Security + David Hopwood + hopwood&zetnet.co.uk +8620 + AKO + Kevin Lee + wblee&ako.net +8621 + Excido Pty Ltd + David Parrish + dparrish&excido.com +8622 + Satyam Infoway Limited + Sankaranarayanan S + sankar_s&satyam-infoway.com +8623 + RadioMobil a.s. + Marek Uher + ITWebAdmin&RadioMobil.cz +8624 + Denis Neuhart Computing + Denis Neuhart + dneuhart&aol.com +8625 + The Lifetime Healthcare Companies + Thomas Luce + thomas.luce&excellus.com +8626 + Club Internet + Laurent T. Abiteboul + lta&t-online.fr +8627 + Ailis + Klaus Reimer + k&ailis.de +8628 + Arkoon Network Security + Daniel Fages + dfages&arkoon.net +8629 + Fireclick + SNMP Administrator + snmp&fireclick.com +8630 + MPI tech + Erich Martin + emartin&mpisa.francenet.fr +8631 + MeshNetworks, Inc. + Donald Joslyn + DJoslyn&MeshNetworks.com +8632 + Winphoria Networks + Aashu Virmani + avirmani&winphoria.com +8633 + Atmos Energy Corporation + D. Dante Lorenso + dante.lorenso&atmosenergy.com +8634 + The ZaneRay Group, Inc. + Reed Gregerson + reed&zaneray.com +8635 + Rob Semenoff Enterprises + Robert Semenoff + semenoff&yahoo.com +8636 + Planet Pratt + Chris Pratt + chris&planetpratt.com +8637 + Amphus, Inc. + Jason Kurashige + jkurashige&hus.com +8638 + RouteScience Technologies, Inc. + Aspi Siganporia + aspi&speedtrak.com +8639 + Polytrust AB + Alexander Bottema + alexander.bottema&polytrust.com +8640 + Santam Limited + Network Department + root&santam.co.za +8641 + 7441.com + Willem Labuschagne + willem&7441.com +8642 + EnderUNIX + Bâkır EMRE + emre&enderunix.org +8643 + The Math Forum + Alex Vorobiev + sasha&mathforum.com +8644 + Anoto + Mattias Levin + mattias.levin&anoto.com +8645 + InfiniSwitch Corporation + Bill Anderson + banderson&infiniswitch.com +8646 + Scottish Qualifications Authority + Tony Douglas + tony.douglas&sqa.org.uk +8647 + Winbox.com + Pierre Cobbaert + cobby&winbox.com +8648 + Renaissance Technologies Corp. + Karsten Kuenne + kuenne&rentec.com +8649 + Pinnacle Technology, Inc. + Dave Johnson + davej&pinnaclet.com +8650 + Apriva, Inc. + Mike Klingen + mklingen&apriva.com +8651 + Heart Institute (InCor), University of São Paulo Medical School + Sergio Shiguemi Furuie + sergio.furuie&incor.usp.br +8652 + Uppsala University + Pål Axelsson + Pal.Axelsson&its.uu.se +8653 + unassigned + ---none--- + ---none--- +8654 + INTRINsec + Laurent Genier + lgr&intrinsec.com +8655 + Apcentric Limited + PJ Worrall + pj.worrall&apcentric.com +8656 + DCM Online Limited + PJ Worrall + pj.worrall&apcentric.com +8657 + Hochschulrechenzentrum, Fachhochschule fuer Wirtschaft und Technik(FHTW) Berlin + David Lichteblau + lichtebl&fhtw-berlin.de +8658 + Empresa Nacional de Certificacion Electronica + Jorge Rojas M. + jrojas&ccs.cl +8659 + Asia e-publications Ltd. + William Chung + andy0202&netvigator.com +8660 + Telaid + Mario Tito + mtito&telaid.com +8661 + University of Utah College of Engineering CADE Lab + Steven Barrus + sbarrus&eng.utah.edu +8662 + University of Louisville + Keith Stevenson + keith.stevenson&louisville.edu +8663 + Bermuda Triangle Services Ltd + Gerrit E.G. Hobbelt + i_a&bermuda-triangle.net +8664 + Akeena, Inc. + Chris Pratt + chris&planetpratt.com +8665 + Bruce Technical Services + Scott Bruce + scott&technomystic.org +8666 + FirstPeer + Mark Boyns + boyns&firstpeer.com +8667 + Textme.org Ltd + Lee Bolding + lee.bolding&textme.org +8668 + Eli Lilly and Company + Jason . P. Grimmer + grimmer_jason_p&lilly.com +8669 + Teracom Telecomunicações Ltda + Ivan Valter Basilio + nelson&burti.com.br +8670 + Polyester Media + Roberto Grandillo + roberto&polyester.com +8671 + ETI + Fernand Lussier + fernand.lussier&eti-gaming.com +8672 + Farmers Insurance + Tom Chang + tom.chang&farmersinsurance.com +8673 + Borderware Technologies Inc. + David Bell + db&borderware.com +8674 + Java Secure + Jeff Samuelsen + oidadmin&JavaSecure.com +8675 + Autodesk, Inc. + Samir Bajaj + samir.bajaj&autodesk.com +8676 + Transparent + Warner Chan + wchan&transparentoptical.com +8677 + ISDN-Net, Inc. + Timothy Stinson + tws&isdn.net +8678 + Intrusion.com + Joel R. Brown + jbrown&intrusion.com +8679 + Atlas Copco Airpower + Meel Erik + erik.meel&atlascopco.be +8680 + Open Roads Consulting Inc. + David Robison + DRRobison&OpenRoadsConsulting.com +8681 + Free Speech Media LLC + Lynn Winebarger + webmaster&freespeech.org +8682 + UltraSecure.com + Oliver Bode + oliver&ultrasecure.com +8683 + Integrity Systems Ltd. + Ilia Bunin + ilia_b&integrity-sys.com +8684 + frd.net + Fredrik Reuterswärd + fr&frd.net +8685 + Habeebee + Fredrik Wahlberg + fredrik&habeebee.com +8686 + Personal Evaluation + Jed Voller + jed&peiasap.com +8687 + Milestone R/D Labs + Marc Rassbach + marc&milestonerdl.com +8688 + Shepherd-Express + Doug Hissom + doug&shepherd-express.com +8689 + Miller Simon McGinn and Clark + Tom McGinn + tmcginn&milbizlaw.com +8690 + IPAGEON Co.Ltd + Jonghoon, kim + p_enms&ipageon.com +8691 + Moxa Technologies Co., Ltd. + Cindy Hung + cindy_hung&moxa.com.tw +8692 + Network Programs + Madhu Bajpai + snmp&npi.stpn.soft.net +8693 + A-xell Wireless AB (formerly 'Avitec AB') + Martin Hancock + martin.hancock&axellwireless.com +8694 + University of Wolverhampton + Max Caines + Max.Caines&wlv.ac.uk +8695 + Mission Data + Paul Kieckhefer + paulk&missiondata.com +8696 + myCustoms + Jeff Flanigan + jflanigan&mycustoms.com +8697 + Raiffeisen Informatik GmbH + Hostmaster + hostmaster&r-it.at +8698 + Viasat, Inc. + Nick Kominus + nick.kominus&atl.viasat.com +8699 + iWay Software + Richard Beck + dick_beck&iwaysoftware.com +8700 + World Savings + Adam Burford + aburford341&worldsavings.com +8701 + Intelligent Information Systems + Jay Lyerly + jayl&renewal-iis.com +8702 + Noncyclic Networks + Ted Tickell + tickell&noncyclic.net +8703 + Voyus Canada Inc. + Applications Development + ldap-administrator&voyus.com +8704 + Sakhalinsvyaz JSC + Dmitry Lebkov + admin&sakhalin.ru +8705 + Israel Local Authorities Data Processing Center LTD + Daniel Korem + danny&ladpc.co.il +8706 + Unassigned + Removed 2006-09-07 + ---none--- +8707 + SpiderNet Services Ltd + Ranko Zivojnovic + ranko&spidernet.net +8708 + Lumentis AB + Per Borg + per.borg&infinera.com +8709 + Landesamt für Besoldung und Versorgung NRW + Guido Sawatzky + guido.sawatzky&lbv.nrw.de +8710 + Envoy Networks + Brian Shimkin + bshimkin&envoynetworks.com +8711 + THALES Broadcast & Multimedia + Dat-son Nguyen + dat-son.nguyen&thomcast.thomson-csf.com +8712 + LANcope, Inc. + John Jerrim + jjerrim&lancope.com +8713 + Abako Media Oy + Hannu Taskinen + hannu.taskinen&abako.fi +8714 + iNOC, Inc. + Rick Smith + rick&internetnoc.com +8715 + Health Sciences Centre + Mauricio Mejia + mmejia&hsc.mb.ca +8716 + Triveni Digital, Inc. + Andrew Selder + aselder&trivenidigital.com +8717 + David Mistretta + David Mistretta + david_mistretta&yahoo.com +8718 + Gwent Consultancy + A. T. Butler + butlerat&gwent-consultancy.com +8719 + Raviant Networks, Inc. + Richard H. Gumpertz + IANA&Rick.Gumpertz.com +8720 + Zack Systems, Inc. + Cody Sherr + cody&zack.com +8721 + Ingram Technology Limited + Bernard Ingram + bingram&ingram-technology.com +8722 + etoolbelt.net + Steve Belt + sebelt&pacbell.net +8723 + Integrated Data Pty Ltd + Mr Peter Stoneley + peters&s055.aone.net.au +8724 + Human Technology + Jeehoon Song + jhsong1&humantel.com +8725 + Canberra Institute of Technology + Scott J McDonald + scott.mcdonald&cit.act.edu.au +8726 + Incisive Designs + Sean O'Grady + sean.ogrady&sheridanc.on.ca +8727 + Torry Harris Business Solutions Thirunavukarasu + S + s_thiru&thbs.com +8728 + Computer Network Information Center, Chinese Academy of Sciences + Nan Kai + nankai&sdb.ac.cn +8729 + LETEK Communications, Inc. + Lee Deugju + djlee&letek.com +8730 + Serck Controls Ltd. + Robert Bradford + rbradford&serck-controls.co.uk +8731 + Pantor Engineering AB + Ola Arvidson + ola.arvidson&pantor.com +8732 + Tokyo Institute of Technology + Katsuyoshi Iida + iida&gsic.titech.ac.jp +8733 + Ambient Computing, Inc. + Joseph Evans + evans&ambientcomputing.com +8734 + Matthias Wimmer + Matthias Wimmer + mail&matthias-wimmer.de +8735 + Quark Inc. + David Fowler + dfowler&quark.com +8736 + MetaVector Technologies + Jos Huybrighs + jos.huybrighs&metavectortech.com +8737 + SecureNet GmbH - Intranet & Internet Solutions + Knut Sander + knut.sander&secure-net.de +8738 + Pirelli Informatica S.p.A. + Marco Micci + marco.micci&pirelli.com +8739 + KAPTECH + Xavier Boemare + xboemare&kaptech.com +8740 + KOM Networks + Adrian Cho + adrianc&komnetworks.com +8741 + SonicWALL, Inc. + Susan Yan + susany&sonicwall.com +8742 + Alphion + Murali Sampath + msampath&alphion.com +8743 + RadioFrame Networks + Don Messenger + don&radioframenetworks.com +8744 + Colubris Networks Inc. + Eric Perie + Eric.Perie&colubris.com +8745 + Informática para todos, SA de CV + Eduardo Vázquez Hernández + evazquez&technologist.com +8746 + Queensland University of Technology + Alan Agnew + qut-domain-admin&qut.edu.au +8747 + Libelle AG + Hans-Joachim Krueger + hkrueger&libelle.com +8748 + LOQUENDO SpA + Piovano Luciano + Luciano.Piovano&LOQUENDO.com +8749 + TELCOM ITALIA LAB + Galliano Sergio + Sergio.Galliano&CSELT.it +8750 + Bucknell University + Chris Weber + systems&bucknell.edu +8751 + Geodesic Systems + John W. Marland + jwm&geodesic.com +8752 + Kernel + Park Keun O + lastnite&dreamwiz.com +8753 + Dataflow Alaska, Inc. + Eric Hutchins/Keif Mayers + kmayers&dataflowalaska.com +8754 + XouL + Danilo Aghemo + danilo&aghemo.com +8755 + Hebrew University Computation Center + Helen Zommer + helen&cc.huji.ac.il +8756 + Celltick + Oren Zamir + orenz&celltick.com +8757 + Feather Mobile Systems + Amir Kirsh + amir&feathersys.com +8758 + Walnut Concepts + Martin Torrella + tinram&hotmail.com +8759 + LG Innotek Co., Ltd. + Seoncheol Cho + sccho&lginnotek.com +8760 + NEITech + Yang Dayong + dyyang&netcore.com.cn +8761 + Zavod za varnostne tehnologije informacijske druzbe in elektronsko + poslovanje, SETCCE Tomaz Klobucar + centre&setcce.org +8762 + SI-CA + Tomaz Klobucar + centre&setcce.org +8763 + eIQnetworks + Prakash Reddy + jreddy&sitehosting.com +8764 + KB Electronics Ltd + Daniel Hebert + d.hebert&kbe.ns.ca +8765 + Telesoft Technologies Ltd + John Townsend + jtownsend&telesoft-technologies.com +8766 + Elsag S.p.A. + Giuseppe Ghiorzi + giuseppe.ghiorzi&elsag.it +8767 + Bayour.COM + Turbo Fredriksson + turbo&bayour.com +8768 + Nevion (formerly 'Network Electronics') + Jan Helgesen + jihelgesen&nevion.com +8769 + TeraOptic Networks, Inc. + Ashok Ranganath + terry&teraoptic.com +8770 + The Emerginggroup Group + Qiqi Dong + qdong&emerginggroup.com +8771 + ARCANVS, Inc. + Todd Romney + todd.romney&arcanvs.com +8772 + e-Manufacturing Networks Inc. + Stephen Lane-Smith + splane&e-mfg.net +8773 + CHINA TEXTILE NETWORK CO.,LTD. + Liu Peng + lpeng&ml.ctei.gov.cn +8774 + Universal Scientific Industrial Co., Ltd. + Black Su + blacksu&ms.usi.com.tw +8775 + BeST (Business Execution-Solutions & Technology) Dave + Dave Bean + daveb&bestholdings.com +8776 + Zman Tikshuv + Ofer Porat + porat&zt.co.il +8777 + aQute + Peter Kriens + Peter.Kriens&aQute.se +8778 + SYAC + Piergiorgio Menia + piergiorgio.menia&com.area.trieste.it +8779 + Telespazio + Marcello De Carolis + marcello_decarolis&telespazio.it +8780 + ip-connect GmbH + Sven Flossmann + sflo&ip-connect.de +8781 + ChamberSign + Mr. Hylko Oosterloo + oosterloo&eurochambres.be +8782 + Keutel + Jochen Keutel + jochen&keutel.de +8783 + akella.org + Mani Akella + makella&warwick.net +8784 + BroadQuay Consultancy Ltd. + Greg Mitchell + iana&broadquay.com +8785 + Guay Internet + Inaki Santamaria + isantamaria&guay.com +8786 + SecureOps Inc. + Patrick Ethier + patrick&secureops.com +8787 + Ivrnet Inc. + Roland Hordos + r.hordos&ivrnet.com +8788 + CyberGate Internet Services + Randy Doran + rtdoran&valueweb.com +8789 + PreNet Corporation + Maria Webster + mwebster&prenet.net +8790 + Prisa Networks + Don Deel + don&prisa.com +8791 + ivv GmbH + Markus-Alexander Matthe + markus-alexander.matthe&ivv.de +8792 + Axes India Ltd + N.SankarNarayanan + nsn_axes&yahoo.com +8793 + Realize IT GmbH + Alain Schneble + a.s&realize.ch +8794 + eYak Inc. + Margaret Hannemann + mhannemann&eyak.com +8795 + Inteliguard + Michael Donahue + mike.donahue&inteliguard.com +8796 + Gestion del Conocimiento S.A. Miquel + Bonastre + suport&gec.gecsa.com +8797 + Mannesmann Arcor AG & Co + Andreas Berger + Andreas.Berger&arcor.net +8798 + IT-Management & Solution GmbH + Ahmet Sahin + s.ahmet&itms-online.de +8799 + cypherOptics + Mauro Zallocco + mzallocc&yahoo.com +8800 + YH Consulting + Robert Ellis + r.ellis&snet.net +8801 + SenaReider + John Sturgeon + john.sturgeon&senareider.com +8802 + Yehti, Inc. + Ryan Addams + raddams&yehti.com +8803 + Thuridion + David Van Wagner + davevw&thuridion.com +8804 + Kirana Networks + Jatinder Bali + jbali&kirananetworks.com +8805 + Kessler-Huron Computer Systems + Tom Chang + khcs&hotmail.com +8806 + Gluon Networks + Bruno Rossi + bruno.rossi&gluonnetworks.com +8807 + Finestra Software + Rick Rasmussen + engnums&finestra.net +8808 + Xi'an Xinli Network System Technology Co.,Ltd + Xu Jing + xuj&xinli.com.cn +8809 + Tomorrow Software + Yangwen Chan + cywforce&163.net +8810 + Artigas Computer + Raul B. Heiduk + rh&pobox.com +8811 + Prompt2U + Orna Shechter + Orna&Prompt2U.com +8812 + ITBS OnLine + Floriano Conte + Floriano.Conte&itbs.fr +8813 + Skyline Communications + Ben. Vandenberghe + Ben.Vandenberghe&Skyline.be +8814 + Funcom NV + Hans Terje Bakke + hanstb&funcom.com +8815 + Collectively Sharper + Jonathan Henderson + jonathan.henderson&magi-europe.com +8816 + getyourcar AG + Siegmund Gorr + gorr&getyourcar.de +8817 + Websemantix + Olivier Swedor + olivier.swedor&websemantix.com +8818 + SandCherry Networks, Inc. + Stephen Earl + searl&sandcherry.com +8819 + Raytion + Markus Strickler + Markus.Strickler&raytion.com +8820 + sysTime-solutions AG + Eric Weiss + weiss&systime-solutions.de +8821 + Phi Kappa Theta Fraternity - Gamma Tau Chapter + Ivan Raikov + gte085h&prism.gatech.edu +8822 + Linuxcare, Inc. + Richard Childers + childers&linuxcare.com +8823 + I.NET S.p.A. + Marco Negri + marco.negri&inet.it +8824 + Wizzy + Andy Rabagliati + andyr&wizzy.com +8825 + DNX Communications + Weston Bustraan + weston&itdonline.net +8826 + ComNet Software Specialists,Inc + Larry Richardson + lrichardson&38.157.105.100 +8827 + Nu-kote International + Bill White + whiteb&nukote.com +8828 + General Dynamics - Mission Systems + Mike Smith + mike.smith&gd-ms.com +8829 + Axell Wireless Limited (formerly 'Aerial Facilities Limited') + Martin Hancock + martin.hancock&axellwireless.com +8830 + Xyphius Solutions Inc. + Joe Chen + joec&xyphius.com +8831 + Agora Systems Ltd + Sigurd Hogsbro + sigurd&agorasystems.co.uk +8832 + Ecominds Ltd + Sigurd Hogsbro + sigurd&ecominds.com +8833 + ISPMan + Ghaffar Atif + aghaffar&ispman.org +8834 + Cognet Corp. + Eyal Yechieli + eyal&cognet.co.il +8835 + Wizard Productions + Mike Smith + snmp&arch-mage.com +8836 + AUUG Incorporated + David Purdue + David.Purdue&auug.org.au +8837 + AscenVision + ChinMing Kuo + cmk&ascenvision.com +8838 + ACS Internet, Inc. + Royce Williams + dnstech&acsalaska.net +8839 + Microtest + Eugene Prigorodov + eprigorodovµtest.ru +8840 + MBridge Systems Inc. + Jaeyeong Lee + jylee&mbridge.co.kr +8841 + InterCom International Communications Ltd. + Attila Soos + sa&intercom.hu +8842 + Clockwork Web + Mark Himsley + markh&clockworkweb.com +8843 + INACCESS NETWORKS SA + Vassilis Nellas + vnellas&inaccessnetworks.com +8844 + Diveo Broadband Networks + Rubens Gomes + rgomes&diveo.net.br +8845 + Enablence USA FTTX Networks Inc (formerly 'wave7optics') + Maria Osterholt Cown + maria.cown&enablence.com +8846 + Caramail + Galliot Guillaume + guillaume&caramail.fr +8847 + Pivotech Systems, Inc + Larry Kong + lkong&pivotech.com +8848 + Rootroute Research + OID PEN Administrator + pen8848&rootr.net +8849 + LPMD.ORG + John Cosimano + ldap&lpmd.org +8850 + Cyradis Technology Group Inc. + John David Allamby + allambyjd&cyradis.com +8851 + Ultra Enterprises + John Fleming + jf147&prism.gatech.edu +8852 + AARNet Pty Ltd + Glen Turner + glen.turner&aarnet.edu.au +8853 + A-Trust + Franz Brandl + f.brandl&a-trust.at +8854 + INTENS IT + Simon Obstbaum + so&intensit.de +8855 + Intelsis Sistemas Inteligentes S.A. Cesar + Veiga + cveiga&intelsis.es +8856 + IdecNet SA + Juan Ignacio Jimenez + natz&idecnet.com +8857 + Nauticus Networks Inc. + Karen Wise + kwise&nauticusnet.com +8858 + destef.com Software + Jason DeStefano + destef&destef.com +8859 + Private Business Inc. + Byron McClain + security&privatebusiness.com +8860 + Holcim Group Support Ltd. + Niels Carstensen + Niels.Carstensen&hmc.ch +8861 + Secorvo Security Consulting GmbH + Hans-Joachim Knobloch + hans-joachim.knobloch&secorvo.de +8862 + AppWired, Inc. + Layton Freeman + layton.freeman&appwired.com +8863 + Etiers International Inc + Tim O'Callaghan + tim&etiers.com +8864 + Dystopium.com + Matthew Gamble + mgamble&dystopium.com +8865 + FAST Search and Transfer + Morten Hermanrud + mhe&fast.no +8866 + GeoTEC Internet + Chebon Huber + staff&geotec.net +8867 + StoreAge Networking Technologies + Nelson Nahum + nnahum&store-age.com +8868 + Velos, Inc. + Rajeev Kalathil + rkalathil&velos.com +8869 + Coscend Corporation + Tobias Crawley + tobias&coscend.com +8870 + KINS(Knowlege and Information Net for Sharing) Lab. + Yoon-Won Lee + kelovon&kdb.snu.ac.kr +8871 + Youngblood Technologies + Keith Youngblood + keithyoungblood&gmail.com +8872 + Saintjo + J.Alvarez + jalvarez&instit-st-jo.asso.fr +8873 + JAL INFORMATION TECHNOLOGY CO.,LTD. + Nobuyoshi Kasahara + nobuyoshi.kasahara&jalinfotec.co.jp +8874 + Utt Technologies + James Hongzhong Zhang + hzzhang&online.sh.cn +8875 + Columbitech AB + Patrik Westin + patrik.westin&columbitech.com +8876 + willeke.com + James Willeke + jim&willeke.com +8877 + Suntail + Graham Shaw + grahams&suntail.com +8878 + Firmenich SA + Thierry Baillif + baillif&firmenich.com +8879 + Zetac Limited + Mr. Phil Hedger + phil.hedger&randomx.demon.co.uk +8880 + IDENTEC Solutions AG + Peter Schuster + pschuster&identecsolutions.com +8881 + Trust Italia S.p.a. + Libero Marconi + lmarconi&trustitalia.it +8882 + Empower Interactive Group Limited + Ciaran Flynn + ciaran&eigroup.com +8883 + Moonlight Systems + Eric Winner + eric&moonlight.com +8884 + Versant Corporation + Andreas Renner + arenner&versant.com +8885 + Internet Security One Ltd. + Zhao,Liang + zhaol&is-one.net +8886 + Beijing Raisecom Scientific & Technology Development Co., Ltd. + Gao Lei + szf&bupt.edu.cn +8887 + VASoft (Pty) Ltd. + Vaughn Gavin + pak01267&pixie.co.za +8888 + WEBGSM + Ludovic Smadja + lsmadja&webgsm.com +8889 + virtual solution AG + Dr. Raoul-Thomas Herborg + raoul.herborg&virtual-solution.com +8890 + Broadcast Technology Limited + Tony Carr + tony.carr&btl.uk.com +8891 + sforce.org + Thomas Kruse + tk_iana&sforce.org +8892 + Chaman Productions + Jeannin Cédric + cedricj&cybercable.fr +8893 + Mobilitec + Benny Rachlevsky Reich + benny.reich&mobilitec.com +8894 + Rivermen AB + Magnus Heino + magnus.heino&rivermen.se +8895 + Information Systems Consulting (Insyc) + Robert Brautigam + demon&lilu.csoma.elte.hu +8896 + Sistematica + Roberto Ricci + roberto_ricci&sistematica-srl.it +8897 + Alltel Information Services + Shawn McKinney + shawn.mckinney&alltel.com +8898 + Weavers Network Consulting + Andre Derraik + andre.derraik&weavers.com.br +8899 + The McGraw-Hill Companies + John Gervasio + john_gervasio&mcgraw-hill.com +8900 + Intellitactics + Paul Sop + iana&itactics.com +8901 + Monkeybagel Hardware Solutions + Benjy Feen + hardware&monkeybagel.com +8902 + GeoTEC Internet + Chebon Huber + staff&geotec.net +8903 + NetNearU CorporateTAM + Kwok-yam + tam&nnu.com +8904 + Cooper Industries + Scott Dunning + sdunning&cooperpower.com +8905 + HuntCorp Enterprises + Neil Hunt + grover&huntcorp.com.au +8906 + Solnet Pty Ltd + Neil Hunt + grover&solnet.com.au +8907 + GE CompuNet Computer AG & Co. oHG + Frank Pooth + frank.pooth&gecits-eu.com +8908 + Belgacom NV of public Law + Kristoff Bonne + kristoff&belbone.net +8909 + DICA Technologies AG + Alfred Richter + arichter&dica.de +8910 + Meitner - Soluções Internet e Sistemas de Informação, Lda + Jorge Dionisio + zaga1&yahoo.com +8911 + Appelsiini Finland Oy + Antti Lehto + antti.lehto&appelsiini.com +8912 + Alice's Registry, Inc. + Rick Wesson + rick&ar.com +8913 + Federal Linux Systems + Ron Broberg + ronbroberg&yahoo.com +8914 + Galderma Laboratories L.P. + Mark Adkins + mark.adkins&galderma.com +8915 + Ralf Bensmann + Ralf Bensmann + iana&bensmann.com +8916 + Infogate Online + Izak Cohen + izak&infogateonline.com +8917 + Strelitzia Be + Lorenzo Planas + lorenzo.planas&strelitziabe.com +8918 + J-PHONE EAST CO.,LTD. + Yoshihito Katagai + yoshihito.katagai&j-phone-east.com +8919 + Huysmans en Kuypers Automatiseringsburo + Paul Lucassen + paul&bicat.com +8920 + Bamboo MediaCasting + Meir Fuchs + meir&bamboomc.com +8921 + Nice Work Textile Corp.Ltd + Richard van Leeuwen + Richard&niceworktextile.com +8922 + RESI Informatica S.r.l. + Angelo D'Ottavi + dottavi&resi.it +8923 + Keyware + Tim Dobbelaere + tdobbelaere&keyware.com +8924 + Inforad - Com. Serv Ltda. + Roberto Moraes Dantas Filho + junior&inforad.com.br +8925 + Transora + Jon Beyer + jon.beyer&transora.com +8926 + VIP Switch Inc. + Sean Harnedy + sharnedy&vipswitch.com +8927 + Paceline Systems Corporation + Hal Rosenstock + hal&pacelinesystems.com +8928 + Asta Networks + Jared Pfost + jared&astanetworks.com +8929 + Dominion Electronics Pty Ltd + James Mcleod + jamesm&dominion.net.au +8930 + Pelago Networks + Jennie Holmes + jholmes&pelagonet.com +8931 + Cordell, Inc + Ken Woodmansee + kenw&cordell.net +8932 + WaterWare Internet Services, Inc. + George Mount + george&waterware.com +8933 + Wokup + David Degouilles + ddeg&wokup.fr +8934 + b-process + Jean-luc Henry + jlhenry&b-processs.com +8935 + Wisdom Assen BV + W.H. Schraal + W.H.Schraal&wisdom.nl +8936 + WebToGrid + Tiago Pereira + tiago.pereira&intelidata.pt +8937 + Thirdspace Living Ltd + Dr Dave Robinson + dave&thirdspace.tv +8938 + Cyberell Oy + Olli-Pekka Rinta-Koski + ola&cyberell.com +8939 + Frontec netSolution AB + Frode Randers + Frode.Randers&frontec.se +8940 + SAVVIS Communications + Donald E. Bertier, Jr. + donb&savvis.net +8941 + JC Enterprises + Johnny Chang + uberchang&yahoo.com +8942 + AMX Corp + Ron Barber + ron.barber&amx.com +8943 + Analog Design Autoamtion Inc. + Benson Jin + benson&analogsynthesis.com +8944 + Obfuscated Networking + Michael Conlen + meconlen&obfuscated.net +8945 + In-Q-Tel Inc. + Joshua Icore + network&in-q-tel.org +8946 + ThinAirApps + Nathanial Freitas + Nathanial.Freitas&thinairapps.com +8947 + Viacore, Inc. + Dale Cabell + dale.cabell&viacore.net +8948 + A S Promotions LTD + Scott Wiginton + swiginton&wigintoninternational.com +8949 + OuterSpace Consultants + Heath Hendrickson + heath&outerspaceconsultants.com +8950 + webBASIS, Inc. + Benjamin Templeton + bent&coresoft.com +8951 + Platys Communications + Meenakshi Ramamoorthi + meena&stargateip.com +8952 + JSC Belsvyaz + Dmitry Golovanov + dmgol&belgtts.ru +8953 + MightySun LLC + Chin H Kow + chkow&yahoo.com +8954 + VTEX Typesetting Services + Sarunas Burdulis + sarunas&vtex.lt +8955 + GORGY TIMING + Christophe Poyet + christophe.poyet&gorgy-timing.fr +8956 + ZICOM Electronic Security Systems Ltd. + Amit Sarode + amits&zicomsecure.com +8957 + Railinfrabeheer BV + P. de Kok + p.deKok&railinfrabeheer.nl +8958 + James A. Mahlen & Associates + James A. Mahlen + jmahlen&texas.net +8959 + IER Incorporated + Wayne Parker + wparker&ier-tx.com +8960 + Coree Networks Inc. + Andrew Goldstein + agoldstein&coreenetworks.com +8961 + Paradigm Secure Communications + Roger Palmer + roger.palmer&astrium-space.com +8962 + IntruVert Networks, Inc. + Srikant Vissamsetti + srikant&intruvert.com +8963 + Advanced Web Communication, division of Xecom, Incorporated + Frank Zhang + fzhang&xecom.com +8964 + Bezirksregierung Duesseldorf + Carsten Wegner + carsten.wegner&brd.nrw.de +8965 + Dff Internet & Medien + Mr. Torsten Curdt + tcurdt&dff.st +8966 + PurOptix + Cliff Emerson + cliff&puroptix.com +8967 + NASA JSC + Alice Aman + alice.l.aman1&jsc.nasa.gov +8968 + Juha Saarinen IT Writer + Juha Saarinen + juha&saarinen.org +8969 + Rio Tinto + Christopher Deeble + christopher.deeble&riotinto.com +8970 + Argogroup Interactive Ltd + Dirk Moermans + dmoermans&argogroup.com +8971 + Speed to Market Engines + Rao Nimmakayala + rao&speedtomarket.com +8972 + IP Powerhouse Ltd. + Christopher Madden + chris.madden&ippowerhouse.com +8973 + Grupo Eurociber S.A. + Angel Redondo Fernandez-Rebollos + anredondo&banesto.es +8974 + Universitaet Marburg + Karl-Heinz Ergezinger + ldap&hrz.uni-marburg.de +8975 + Soluzione Script GmbH + Bastian Ziegler + hans.baier&soluzione.de +8976 + Airia, Inc. + Carl Herbst + cherbst&airiaglobal.com +8977 + Synamics Inc. + Sunil Rananavare + srana&synamics.com +8978 + Sears Holdings Corp + Ricc Babbitt + rbabbit&searshc.com +8979 + The Real Asset Company Limited + Reece Robinson + mail&trac-group.co.nz +8980 + Collective Technologies + Uma Vaiyapuri + uma&colltech.com +8981 + Joeunsecurity + Wongi Bae + im1gi&joeunsecurity.com +8982 + Ipertrade s.r.l. + Alberto Bottacin + a.bottacin&ipertrade.com +8983 + Bivio Networks + Ron Murphy + rmurphy&bivio.net +8984 + C J SPAANS + Jasper Spaans + jasper&spaans.ds9a.nl +8985 + The Synaptic Group, Inc. + Mark G. Christenson + mgc&synaptic.com +8986 + Arcotect Limited + Keith Chan + keith.chan&dotcompacific.com +8987 + State Infocommunication Foundation of Estonia + Urmo Mäeorg + urxx&riks.ee +8988 + Pingworks + Christoph Lukas + lukas&pingworks.de +8989 + Naino Corporation + Marc-Olivier Méhu + Marc-Olivier.Mehu&naino.com +8990 + Redpill Linpro AS (formerly 'Linpro AS') + Erik Inge Bolsø + iana&redpill-linpro.com +8991 + MARPOSS S.p.A. + Luca Simoncini + luca.simoncini&marposs.com +8992 + Unisoft Consultants + Mohamed A. Kamara + unisoftco&usa.net +8993 + Asera Inc. + Krishna Kumar + kkumar&asera.com +8994 + Avantas Networks + Melanie Gosselin + mgosselin&avantas.com +8995 + Lo-Wang.org + Frank Clements + neumonik&ptd.net +8996 + TransactPlus, Inc. + Robert Chomentowski + rob.chomentowski&transactplus.com +8997 + ProArp Inc. + Max Schreiner + ms&e-comerce.de +8998 + Cottay + Will Cottay + will&cottay.net +8999 + TVS eTechnology Limited + Ravi Desikan + ravi&tvsetech.com +9000 + Personal Path Systems, Inc. + Tom Hagan + hagant&personalpathsystems.com +9001 + Goltier Media Group (Sitepak) + Francois Belanger + francois&sitepak.com +9002 + LXPRO.COM + Greg Bailey + gbailey&lxpro.com +9003 + slimjones. + Paul Jean Jouve + p_jouve&hotmail.com +9004 + SuSE Linux Venezuela, C.A. + Xavier E. Mármol + xmarmol&suse.de +9005 + Video Networks Limited + Brendan Hole + b.hole&videonetworks.com +9006 + Apache Consulting Ltd. + Garry Morash + gmorash&apache.ab.ca +9007 + FineGround Networks + Chao Feng + chao&fineground.com +9008 + Digital Route + Joakim Österberg + joakim.osterberg&digitalroute.com +9009 + Agni Systems Limited + Name Server Admin + nsadmin&agni.com +9010 + Liebenzell Mission + EDV-Abteilung + edv&liebenzell.org +9011 + Laminar Software, Inc. + Diane Gruseck + dgruseck&laminarsoft.com +9012 + MailVision Ltd. + Diego + diegob&mailvision.com +9013 + SpiritSoft Inc + Rob MacNeill + Robert.MacNeill&spirit-soft.com +9014 + Belenos, Inc. + Johnny Chang + jchang&belenosinc.com +9015 + World Streaming Network, Inc. + Fred Crable + fcrable& worldstreaming.net +9016 + FreeRein Corporation + John Chu + JohnC&FreeRein.com +9017 + Cogent Systems, Inc. + James Xie + jamesxie&cogentsystems.com +9018 + CQR Data Ltd. + Martin Stpehen Pegler + mpegler&ark-online.net +9019 + Cap Gemini Telecom Media & Networks Deutschland GmbH + Tobias Wermann + tobias.wermann&capgemini.de +9020 + Computer Adaptive Technologies, Inc. + Corey Ehmke + corey_ehmke&hmco.com +9021 + WideOpenWest LLC + David Walden + dwalden&wideopenwest.com +9022 + TrueSAN Networks + Bill Jang + bjang&truesan.com +9023 + Netsanity Inc. + Leo Ramos + lramos&corp.netsanity.com +9024 + Telia Internet, Inc. + M. Groesbeek + mgroesbeek&us.telia.net +9025 + NetCologne + Roland Rosenfeld + rrosenfeld&netcologne.de +9026 + adisoft systems GmbH & Co. KG + Jens Salewski + j.salewski&adisoft-systems.de +9027 + SoftNerd + William R. Buckley + wrb&softnerd.com +9028 + Lendx + Dan Zhang + dzhang&lendx.com +9029 + BSD Users Group Italia + Giacomo Cariello + jwk&bug.it +9030 + iVMG Incorporated + Christopher N. Harrell + cnh&ivmg.net +9031 + Nexter Information Technology + Young-jae Chang + nobocop&nexter.com +9032 + IVSTel + Angus North + a.north&ivstel.com +9033 + Elion Enterprises Ltd + Klemens Kasemaa + klemens.kasemaa&elion.ee +9034 + Jabber.com + Constantin Nickonov + nickonov&jabber.com +9035 + CFS Brands, LLC + Andrew Nowlin + andrewnowlin&cfsbrands.com +9036 + Jim Dutton + Jim Dutton + jimd&siu.edu +9037 + Everyday Office Inc. + Richard Cowles + rcowles&everydayoffice.com +9038 + Well Diagnostics + Yngve Nyheim + yngve&welldiagnostics.com +9039 + Chengdu Ideal Telecom Technology,Inc. + Jiang Daming + mshou&sina.com +9040 + Chengdu Ideal Information Industry Co,Ltd. + Jiang Daming + mshou&sina.com +9041 + Cambridge University Engineering Department + Paul Taylor + head-of-it&eng.cam.ac.uk +9042 + Teinos + Massimo Travascio + m.travascio&teinos.com +9043 + Execulink + Erik Zweers + zweers&execulink.com +9044 + NEC Eluminant Technologies, Inc. + Sunil Unadkat + unadkats&eluminant.com +9045 + eNetSecure, Inc + Patrick Heim + patrick.heim&enetsec.com +9046 + Amecisco + Walter Yu + support&amecisco.com +9047 + Blue Cross Blue Shield of Florida + Thomas C. Graham, III + hostmaster&bcbsfl.com +9048 + Open System Consultants + Heikki Vatiainen + hvn&open.com.au +9049 + Integrity Online + John Wever + jwever&integrity.com +9050 + Telenor Link AS + Roar Pettersen + roar.pettersen&link.no +9051 + Faculdade de Ciências e Tecnologia da Universidade Nova de Lisboa + Paulo Matos + paulo.matos&fct.unl.pt +9052 + Sunbay Software AG + Sergey Starosek + star&sunbay.com +9053 + Tovaris IP LC + Michael Smith + msmith&tovaris.com +9054 + Tokheim + Mike Fox + mike.fox&tokheim.com +9055 + 9Rivers.com + Wei Wang + weiwang&9rivers.com +9056 + TelStrat International, Ltd. + Craig Lutgen + it&telstrat.com +9057 + Millennium Communication Network + Ronan Kelly + kelly&mcn-tower.com +9058 + Jamcracker + Yuriy Martsinkovskiy + yuriym&jamcracker.com +9059 + Vectrad Networks Corporation + Lee Dunbar + leed&sirosnetworks.com +9060 + RuleSpace, Inc. + James Dirksen + ops&rulespace.com +9061 + Deutscher Go-Bund + Dr. Christian Gawron + christian.g&awron.de +9062 + SRS Microsystems + Shawn Younger + syounger&srsmicro.com +9063 + iHive Technology Pte Ltd + Teng Siong, Ng + tengsiong&ihive.com +9064 + ATM ComputerSysteme GmbH + Luigi Bove + luigi.bove&atm-computer.de +9065 + Nettasking Technology (Singapore) Pte Ltd. + Wilfred Wong + wilfred.wong&nettasking.com +9066 + Global Security Technologies, Inc. + Robert Rich + rrich&gstisecurity.com +9067 + Cambia Networks + Paul Riley + priley&cambianetworks.com +9068 + Delphi Associates, Ltd. + Charles Reindorf + charles&reindorf.com +9069 + Phillip's Technical Services + Phillip Omlor + peomlor&uswest.net +9070 + Symmetricom + Dilip S. Dhanda + ddhanda&symmetricom.com +9071 + Spirian Technologies, Inc. + Joseph Sturonas + JSturonas&Spirian.com +9072 + AckleyNet + Jason Ackley + jason&ackley.net +9073 + Sunnyvale Community Players + Dan Rich + drich&sunnyvaleplayers.org +9074 + John M. Sutherland, Inc.Insurance + Stephen J. Kiernan + admin&vegamuse.org +9075 + Novex Solutions + Stephen J. Kiernan + admin&vegamuse.org +9076 + Tokyo Metallic Commuynications Corp. + Noboru Kurgouchi + kuro&metallic.co.jp +9077 + Dekart + Alexander Kutsy + joy&dekart.com +9078 + Be Connected Ltd + Lahiany Sigalit + Sigalit.Lahiany&be-connected.net +9079 + The Sanjyot Bharathan Design Association + Sanjyot Bharathan + techhead&graphic-designer.com +9080 + Babel Com Australia + Del Elson + del&babel.com.au +9081 + eachnet + Jsshi + jsshi&126.com +9082 + eXtremail + Richard González + extremail&extremail.com +9083 + eteamconsulting.com + John Grzyb + jandj&greenmount.org +9084 + Keynote Systems, Inc. + Krishna Khadloya + krishna.khadloya&keynote.com +9085 + Optical Switch Corporation + James Liou + jliou&opticalswitch.com +9086 + US Robotics + Anurag Batta + anurag_batta&usr.com +9087 + Brightmail, Inc. + Mark Lim + lim&brightmail.com +9088 + Oso Grande Technologies, Inc. + Systems Support + systems&osogrande.com +9089 + New Mexico Technet, Inc. + Monte Mitzelfelt + monte&nm.net +9090 + Datagrove + Jim Hurd + jimh&datagrove.com +9091 + MICRO-STAR INT'L CO., Ltd. + Norman Hung + normanhung&msi.com.tw +9092 + Veenstra Graphic Solutions + Rick Veenstra + rick&veenstra.org +9093 + Innovate I.T. Logistics BV + Martin Bokman + martin&innovate-it.nl +9094 + Amber Archer Consulting Co., Inc. + Kyle F. Downey + kdowney&amberarcher.com +9095 + NIPPON MITSUBISHI OIL CORPORATION + Takanobu Nakajima + takanobu.nakajima&nmoc.co.jp +9096 + Zentrum für Produkt Entwicklung, ETH Zürich + Stephan Müller + stmuller&imes.mavt.ethz.ch +9097 + CDFtel + Eurico Inocêncio + emi&cdftel.pt +9098 + DW Systems + Daniel W. Brodsky + dwb&twcny.rr.com +9099 + Central Systems International, Inc. + Greg Wiktor + gw&censys.net +9100 + Mstel + Jaehee Lee + jhlee&mstel.co.kr +9101 + Optum Computing Solutions, Inc. + Steve Davis + sdavis&optum-inc.com +9102 + Telecash Kommunikations-Service GmbH + Armin Krone + Armin.Krone&Telecash.de +9103 + Yu-Shei Real Estate Co. + Tsung-yiu Lai + alexlai&yushei.com +9104 + Chu-Shen Co. Ltd + Tsung-yiu Lai + alexlai&yushei.com +9105 + MIST + Natasa Grandic + ngrandic&mistwireless.com +9106 + ELIOS Informatique + Denis Salembier + denis.salembier&elios-informatique.fr +9107 + Ferguson Enterprises, Inc. + Jediah Logiodice + it.iana-pen.admin&ferguson.com +9108 + Random Walk Computing, Inc. + Joel Scotkin + jscotkin&randomwalk.com +9109 + infogear + Amit Tropper + atropper&cisco.com +9110 + Net Asia Angeles CIty + Tristan Cusi + tristan&netasia-angeles.net +9111 + Newway Tech. Inc. + Marco Chen + marco.chen&newwayit.com +9112 + Palamedes GmbH + Hannes Reinecke + H.Reinecke&palamedes.de +9113 + Wiral Ltd + Jukka Rissanen + admin&wiral.com +9114 + YLine Web Access Services + Philip Poten + philip.poten&yline.at +9115 + Archon Technologies, Inc. + Brian Massingill + bmassingill&archon-tech.com +9116 + Ubicom, Inc + Chris Waters + chris.waters&ubicom.com +9117 + LeWiz Communications Inc. + Taliaferro Smith + TollyS&Lewiz.com +9118 + 1Ci GmbH + Ralf Bensmann + iana&1ci.de +9119 + Advanced Rotorcraft Technology, Inc. + Joe English + jenglish&flightlab.com +9120 + Narada Networks Inc. + Brian Brown + brianb&naradanet.com +9121 + Eduquip Limited + Ron Segal + ron&segal.co.nz +9122 + UPCtv Interactive + Pim van der Heijden + pvdheijden&upctv.com +9123 + Université Pierre Mendès France + Jean Guillou + Jean.Guillou&upmf-grenoble.fr +9124 + Intellitactics Inc. + Mathew Johnston + mjohnston&itactics.com +9125 + Inspiration Technology Pty Ltd + Ted Bullen + ted&instech.com.au +9126 + Aetian Networks + Arpan Shah + arpan&aetian.com +9127 + ADVANCED PERIPHERALS TECH. Inc. + Toshiko Kojima + tkojima&apti.co.jp +9128 + Centre de traduction des organes de l'Union européenne + Pascal Dufour + Pascal.Dufour&cdt.eu.int +9129 + Linkvest SA + Jean-Eric Cuendet + ccsr&linkvest.com +9130 + Hemisphere Technologies + Carl Cronje + carl&hemtech.co.za +9131 + Roland Felnhofer + Roland Felnhofer + roland.felnhofer&chello.at +9132 + Hutchison OPTEL Telecom Technology CO.,Ltd + Lin Tong + lintong&optel.com.cn +9133 + Microraab Electronics + Szilard Birkas + birkas.szilardµraab.hu +9134 + Telenet NV + System Group + systemgroup&telenet-ops.be +9135 + Heyde AG + Thomas Huch + THuch&heyde.de +9136 + ISDN Communications Ltd + Nick Hoath + nhoath&isdn-comms.co.uk +9137 + Annatel + Dave Frary + dfrary&anatel.net +9138 + AnteFacto + John Looney + john&antefacto.com +9139 + Petroleum Business Institute + Alexander Goncharov + magic&petroleum.ru +9140 + Media General, Inc. + Michael Miller + inetadmin&mediageneral.com +9141 + Riptech + Jeff Odom + jeff&riptech.com +9142 + Infopaq International A/S + Peter Thygeesn + palaka&infopaq.dk +9143 + Edmunds.com + Sergey Fokichev + sfokichev&edmunds.com +9144 + Trader Electronic Media + Michael Gorman + gorman&traderonline.com +9145 + AccessPt. Inc. + Troy Harrison + Troy.Harrison&accessptinc.com +9146 + Reporter-Times, Inc. + IS Department + tech&reportert.com +9147 + TeaLeaf Technology, Inc. + Andrew Kosolobov + akosolobov&tealeaf.com +9148 + Acme Packet + Ephraim Dobbins + edobbins&acmepacket.com +9149 + LOCUS Corporation + Tain Lee + tilee&locus.com +9150 + Metromedia Fiber Network + Steve Miller + steve&mfnx.net +9151 + Wiral Ltd. + Juha Kumpulainen + juha.kumpulainen&wiral.com +9152 + Dit Company Limited + Kaoru Mitsuhashi + mitsu&dit.co.jp +9153 + ACTIA Telecom + Nicolas FOULQUIER + si&actiatelecom.fr +9154 + Figment Technologies Inc. + Albert Chau + alberto&figment.net +9155 + BlueNetworkX + Daniel Golle + daniel.golle&bluenetworkx.de +9156 + eGurkha Pvt Ltd. + Srinivas Ramanathan + srinivas&egurkha.com +9157 + CoreComm + Matt Holtz + root&voyager.net +9158 + The Internet Group + Simon Allard + simon.allard&staff.ihug.co.nz +9159 + SHad0ws.net + Kevin Black + shad0w&shad0ws.net +9160 + DGT Sp. z o.o. + Micha³ Porzeziñski + michael&dgt.com.pl +9161 + Universidad del Pais Vasco + Armando Ferro Vázquez + jtpfevaa&bi.ehu.es +9162 + Pädagogische Hochschule Freiburg + Stephan Lauffer + lauffer&ph-freiburg.de +9163 + GNU Radius Project + Sergey Poznyakoff + gray&gnu.org +9164 + Xware AB + Mats Nilsson + mats.nilsson&xware.se +9165 + Cabinet Remouit + Jean-Louis Remouit + remouit&aol.com +9166 + Universidad Galileo + Oscar Bonilla + obonilla&galileo.edu +9167 + ANXeBusiness Corp + Jeff Wade + wadej&anx.com +9168 + Transaccess + Pedro Sanchez Riofrio + psanches&setal.com.br +9169 + Novis Telecom, SA + Rui Cohen + rui.cohen&isp.novis.pt +9170 + Exaecon Inc. + Elahe Zafari-Kellermann + Elahe.Zafari-Kellermann&gmx.net +9171 + NetTone + phuster&nettone.com + phuster&nettone.com +9172 + HVC Technologies + Yimin Zheng + yiminzheng&hotvoice.com +9173 + Kohl's + Larry McCann + larry.mccann&kohls.com +9174 + A2V ict + Armand A. Verstappen + armand&a2vict.nl +9175 + Stream Theory, Inc + Bruce McKenzie + bruce&streamtheory.com +9176 + Open-IT + Joe Little + jlittle&open-it.org +9177 + ThinkFree.Com, Inc + Heungwook Song + hwsong&thinkfree.com +9178 + DaeSung Corp. + Heekyung Lee + hwsong1&netian.com +9179 + INTESABCI SISTEMI E SERVIZI + Michele Popescu + Michelepopescu&iss.bancaintesa.it +9180 + Maranti Networks, Inc + Sanjay Saxena + ssaxena&marantinetworks.com +9181 + Air Technology Systems + Henry Zektser + hzektser&air-tech.com +9182 + FileFish, Inc. + Robert Wygand + rob&filefish.com +9183 + Samba TNG + Sander Striker + striker&samba-tng.org +9184 + Alterbox + Mario Vojvoda + duke&alterbox.net +9185 + MS Integration Services Inc. + Mark Sobkow + msobkow&worldnet.att.net +9186 + Logipolis Pte Ltd + Nicholas Tan + nicholastan&logipolis.com +9187 + Eastman Chemical Company + Carson Gallimore + carsongallimore&eastman.com +9188 + Force Computers + Bharath Channakeshava + bharath.c&smartm.com +9189 + Promedico Gmbh + Bernd Juergens + juergens&promedico.com +9190 + Katanoo Technologies + Stephan Krings + skrings&katanoo.com +9191 + Burgiesoft + Jeff Meyer + root&burgiesoft.com +9192 + Pirkan Tietokone Oy + Martti Hyppänen + yukka&propelli.net +9193 + CIRPACK + Frederic Potter + fpotter&cirpack.com +9194 + AustLII + Philip Chung + philip&austlii.edu.au +9195 + AutocontControl Systems, spol. s r. o. + Jaroslav Chylek + vinklarek&autocont.cz +9196 + Algotronics + Serge Gosset + serge&algotronics.com +9197 + Lyndon State College + Stephen C. Allen + allens&mail.lsc.vsc.edu +9198 + Teletron Ltd. + Frank Kleinewoerdemann + frank&teletron.co.il +9199 + Daum Datentechnik + Klaus Daum + kdaum&t-online.de +9200 + Obermeier Software + Klaus Obermeier + klaus.obermeier&obermeier-software.de +9201 + Fasturn Inc. + Nikunj Mehta + nmehta&fasturn.com +9202 + EnsureLink + Aaron Jarson + ajarson&ensurelink.com +9203 + SeguriDATA Privada, S.A. de C.V. + Juan Gonzalez Cardoso + jgcardoso&seguridata.com +9204 + Aurora Networks, Inc. + Luis Yu + lyu&aurora.com +9205 + Moj. Net d.o.o. + Borut Mrak + borut.mrak&moj.net +9206 + DigitalSpark + Adam Strohl + iana&digitalspark.net +9207 + segNET Technology, Inc. + Adam Strohl + adams&segnet.com +9208 + Hamilcar Development + Adrian Kuepker + akuepker&pobox.com +9209 + Peace Technology, Inc. + Alex Roytman + roytmana&peacetech.com +9210 + Merlin I.T. Services Ltd + Phil Cooper + merlinit&lineone.net +9211 + Rabobank ICT + A.T. Bezemer + a.t.bezemer&rf.rabobank.nl +9212 + Wavium AB + Sten Hubendick + sten.hubendick&wavium.com +9213 + NetSeal Technologies + Ilka Pietikainen + ilkka.pietikainen&netseal.com +9214 + Kemuri Systems + Geert Vanderkelen + geert&kemuri.org +9215 + ERIDAN Informatique + Martin Philippe + pmartin&eridan-info.fr +9216 + Universidade de Évora, Departamento de Informática + Prof. Salvador Abreu + spa&di.uevora.pt +9217 + BIOBASE Biological Databases/ Biologische Datenbanken GmbH + Axel Wagner + axel.wagner&biobase.de +9218 + Skysoft Portugal + Carlos Juliao Duartenn + juliao&skysoft.pt +9219 + Juliao.com + Carlos Juliao Duartenn + juliao&juliao.com +9220 + OPENJCC + Andreas Binder + abi&openjcc.org +9221 + Shaw Communications + Enterprise Management + soc.entmgmt&sjrb.ca +9222 + Phillips Petroleum Company + james Vinatieri + jevinat&ppco.com +9223 + Roskilde University + Mads Freek Petersen + freek&ruc.dk +9224 + Lawrence University + Robert Lowe + network.manager&lawrence.edu +9225 + FJA + Fritz Scherer + net-admin&fja.com +9226 + Microtest + Ben Cohen + bcohenµtest.com +9227 + Entrisphere, Inc. + Matt Greenwood + mig&entrisphere.com +9228 + Globetech + Brett Ellis + brett.ellis&globetech.ch +9229 + Totilities + Dennis Totin + dtotin&home.com +9230 + #B4mad Research Lab Organization + Stephan Dühr + duehr&b4mad.net +9231 + Adir Technologies + John Berry + jlb&adirtech.com +9232 + Norwegian Mapping Authority + Frode Arild Ekroll + frode.ekroll&statkart.no +9233 + IER SA + Serge Favier + sfavier&ier.fr +9234 + Snowshore Networks Inc + Joseph Benoit + jbenoit&snowshore.com +9235 + FCB Direct Montreal + James Connolly + jconnolly&fcbdirect.com +9236 + Fundtech Corporation + Alexander Kouznetzoff + alexanderk&fundtech.com +9237 + Viking Enterprise Solutions + Barry Haddon + Barry.Haddon&VikingEnterprise.com +9238 + Imagine Broadband Limited + Shaf Girach + shaf.girach&imaginebroadband.com +9239 + JonesKnowledge.com + Robin R. Reid + rreid&jonesknowledge.com +9240 + International MRI Accreditation Resources, LLC + David Goldhaber + dmg&forimar.com +9241 + uni-X Software AG + Kai Schwermann + schwermann&uni-x.com +9242 + Instituto de Matemática e Estatística da Universidade de São Paulo + Francisco Reverbel + reverbel&ime.usp.br +9243 + Air'Prod + Benoit de Mulder + benoit&decollage.org +9244 + PureCarbon, Inc. + Stuart Piltch + stuart&purecarbon.com +9245 + Neuromics, Inc. + Rodney Barnett + rbarnett&neuromics.com +9246 + Automated Handling Systems, Inc. + David Scott isi.edu.autohandle.com + ---none--- +9247 + Federal-Mogul Corporation + Cindy Ward + cindy_ward&fmo.com +9248 + Paktronix Systems LLC + Matthew G. Marsh + mgm&paktronix.com +9249 + Musambi Corporation + Madhu Konety + mkonety&musambi.com +9250 + Strix Systems + Gabi Abraham + gabi.abraham&strixsystems.com +9251 + TR Associates + Tony Rosa + tony_rosa&bigfoot.com +9252 + Hôtel-Dieu De Saint-Jérôme + Marc Clément + marc.clement&ssss.gouv.qc.ca +9253 + Buildscape, LLC + Juan A. Pons + sysadmin&buildscape.com +9254 + Pajunas Interactive, Inc. + Allie Micka + allie&pajunas.com +9255 + Vicinity Corporation + Mike Diehn + mdiehn&vicinity.com +9256 + LightPointe Communications + Brad Hyslop + bhyslop&lightpointe.com +9257 + Volera + Steve Duckworth + sduckworth&volera.com +9258 + XMLStrategies.net + Lou Johnson + lou&webstrategies.org +9259 + SYSPOL Co., Ltd. + Song, Young Tae + ytsong&yangjae.co.kr +9260 + ADTEC Corporation + Takanori Kakigi + kakigi&adtec.co.jp +9261 + Delphi Automotive Systems Investment (China) Holding Co.,Ltd + Gordon Wang + gordon.wang&eds.com +9262 + EAST.NET Co. Ltd. + Haixiang Gong + haixiang&public.east.net.cn +9263 + Cybernetics Technology Co., Ltd. + Mikio Fukushima + mikio&cybercom.co.jp +9264 + Caerdroia + Jeffrey K Medcalf + medcalf&caerdroia.org +9265 + TDISIE + Orekhov Pavel + opa&oniltz.da.ru +9266 + Parsoft Systems Pvt. Ltd. + Sagar Sarma + sagar&parsoftsystems.com +9267 + in-systemeGmbH + Roman Huber + roman.huber&in-systeme.com +9268 + Ruhrverband + Joachim Sommer-Littwin + jso&ruhrverband.de +9269 + MediaBroadcast GmbH + Dr. Andreas Ehbrecht + andreas.ehbrecht&media-broadcast.com +9270 + Callsys Ltd. + Valery Vybornov + vvv.callsys.co.uk +9271 + Siemens NGNI + Matthias Frank + matthias.frank&er19.siemens.de +9272 + Elektrobit Ltd. + Teemu Pirinen + teemu.pirinen&elektrobit.fi +9273 + Neuro Telecom + Jang Sik Park + jspark&neurotel.co.kr +9274 + access:Seven Communications GmbH + Peter Marenbach + peter.marenbach&access-7.de +9275 + GENESYS + Nisrine FAJAJ + nisrine.fajaj&genesys.com +9276 + Sema Spain NEG + Alberto Mateos + alberto.mateos&sema.es +9277 + LOGICO Smart Card Solutions AG + Stefan GOMBAR + stefan.gombar&logico.net +9278 + econia.com + Andre Doehn + andre.doehn&econia.com +9279 + Arqana Technologies Inc. + Ulf Kellermann + ulf_kellermann&argana.com +9280 + AccessLine Communications Corporation + Sen Yang + syang&accessline.com +9281 + Standard Insurance Company + Ted Steffan + tsteffan&standard.com +9282 + NOCpulse + Mike Perez + mp&nocpulse.com +9283 + Costa Rica Institute of Technology + Christian Sanabria + csanabria&itcr.ac.cr +9284 + Western Illinois University + Dan Romano + d-romano&wiu.edu +9285 + InternetCDS + Jaye Mathisen + mrcpu&internetcds.com +9286 + Maximum Throughput Inc. + Gord R. Lamb + glamb&max-t.com +9287 + Zeus Communications + Howard Wang + howard.wang&zeuscomm.com +9288 + HostPro + Cameron Jones + cameron&hostpro.com +9289 + Unirez, Inc. + Brian Boyd + bboyd&unirez.com +9290 + Firinn.org + Mark Bainter + mark-spamx&firinn.org +9291 + Rainfinity + Jim Rice + jrice&rainfinity.com +9292 + Webley System + Alexander Litvak + alexl&webley.com +9293 + Staturn Computer + Wang Yongsheng + wys205&sina.com +9294 + W4y do Brasil Ltda + Antonio Mesquita + antonio.mesquita&w4y.com.br +9295 + Workers Compensation Fund + Matt Goldsberry + mgoldsbe&wcf-utah.com +9296 + Xlight Photonics Ltd + Miki Schnarch + mikis&xlight.com +9297 + Ayeca Inc. + Nathan Ladin + nathan&ayeca.com +9298 + University of Compiègne + David Lewis + david.lewis&utc.fr +9299 + Hellas On Line + Costa Tsaousis + ktsaou&hol.gr +9300 + Hypostasis + Kit Smith + kit&hypostasis.com +9301 + Spider Software + Roy de Ruiter + help&spider.com +9302 + ESI s.r.l. + Piero Serini + p.serini&esi.it +9303 + Waystream AB (formerly 'PacketFront Network Products AB') + Fredrik Nyman + fredrik.nyman&waystream.com +9304 + Connection Technology Systems + Wesley Liao + wesley_liao&ctsystem.com +9305 + Western Michigan University + Norm Grant + norm.grant&wmich.edu +9306 + Octave Communications + Bill McKinnon + bmckinnon&ocatavecomm.com +9307 + RCL Enterprises + Robert Lilly + robert&rclenterprises.com +9308 + Oasis Technology Ltd. + David Coles + dcoles&oasis-technology.com +9309 + Urzad Miasta Pabianic + Jan Fijalkowski + fijal&um.pabianice.pl +9310 + ABITAB S.A. + Adriana Calcetto + adriana.calcetto&abitab.com.uy +9311 + DI Trute + M. Trute + mtrute&topmail.de +9312 + Truth Consulting & Technology + David Skingsley + david.skingsley&truthtech.com +9313 + DaKoMa + R. Guenther + rguenther&dakoma.com +9314 + Curtis Family + Doug Curtis + doug&curtisfam.com +9315 + CARTEL SECURITE + Benoit Lecocq + lecocq&cartel-securite.fr +9316 + NetVoyager + Mo McKinlay + mmckinlay&gnu.org +9317 + Tech2Work, Inc. + Scott Grim + sgrim&tech2work.com +9318 + DEBEKA Versicherung + Axel Meyer + axel.meyer&debeka.de +9319 + NetAktiv + Stéphane Bortzmeyer + tech&netaktiv.com +9320 + Nanning Telecom + lupeijun + lewis&rose.nn.gx.cn +9321 + Screenpeaks Ltd. + Bezalel Finkelstien + bezalel&screenpeaks.com +9322 + NetTech S.A. + Jean-Louis Pagnuco + jlp&nettech-eur.com +9323 + PC Away + Olivier Jousselin + Olivier.Jousselin&pcaway.com +9324 + Division fo Cancer Studies, Birmingham UK + Dr. Debbie Cronin + d.a.cronin&bham.ac.uk +9325 + University of California, Riverside + Andrew Tristan + andrew.tristan&ucr.edu +9326 + Lewis and Clark College + Brad Wilkin + wilkin&lclark.edu +9327 + Hygeia Corporation + Joe Garcia-Montes + j.garcia&hygeia.net +9328 + National Institutes of Health (NIH) + John Dvorak + dvorakjo&mail.nih.gov +9329 + Crusoe Communications, Inc. + Mark de Groot + degroot&crusoe.net +9330 + Selesta S.p.A. + Emilio Masserotti + masserotti&selesta.it +9331 + MaXware International AS + Thor Egil Leirtro + thor.leirtro&maxware.com +9332 + Applied SNMP + Bob Natale + bob.natale&appliedsnmp.com +9333 + Lincom Wireless + Frank Ciotti + ciottif&lincom.com +9334 + IPDR.org + Aron Heintz + aheintz&ipdr.org +9335 + PowerSoft + Eric Teutsch + erict&powersoft.ca +9336 + Publix Super Markets, Inc. + Pat Hicks + pat.hicks&publix.com +9337 + Minter Corp. + Mariano Abad + fx&mintercorp.com +9338 + Petersen Ventures LLC + Erik Petersen + cthree&home.com +9339 + Left Coast Systems Corp. + Matthew Asham + matthew&leftcoast.com +9340 + Network Infinity + Mayo Jordanov + mayo&nfy.ca +9341 + Cyberfuse Technologies, LLC + Jason Tucker + jtucker&cyberfuse.com +9342 + Penguin Computing + Philip Pokorny + ppokorny&penguincomputing.com +9343 + Comcel S.A. + Jose Carlos Jaramillo + josecarlos&comcel.com.gt +9344 + Powerwave Technologies Inc. + Ray Mansell + pwregistry&pwav.com +9345 + infinetivity, Inc. + Mark Christenson + eng&infinetivity.com +9346 + iGeek, Inc. + David L. Smith-Uchida + dave&igeekinc.com +9347 + Avaz Networks + Khalid Jamil + khalid&avaznet.com +9348 + Golden Telecom Ukraine + Vlad Patenko + vp&goldentele.com +9349 + WizLAN Ltd. + Gadi Miller + gadi&wizlan.com +9350 + Netmagic Solutions Pvt. Ltd. + Mr. Jaibalan + raghav&netmagicsolutions.com +9351 + FORTHnet S.A. + Tzormpatzakis Alexis + atzo&forthnet.gr +9352 + Conundrum Communications + Matt Pounsett + matt&conundrum.com +9353 + Universite de Metz + Yves Agostini + agostini&univ-metz.fr +9354 + Grey Interactive France + Etienne Bernard + eb&gifrance.com +9355 + Krivorozhskiy Hlebokombinat N1 + Ivan Gudym + ivan&gasoil.kiev.ua +9356 + nPassage, Inc. + Carl P. Miller + carl.miller&npassage.com +9357 + IT-Beratung Schaffert + Sebastian Schaffert + schaffer&informatik.uni-muenchen.de +9358 + Wicom Communications Ltd + Arto Keskiniva + arto.keskiniva&wicom.com +9359 + Symeko Datasystems bv + Guido Hasenbos + g.hasenbos&symeko.com +9360 + Green Light Inc. + Yehudan O. Ron + f_light&msn.com +9361 + Océ Software Laboratories Namur + Jacques Flamand + jacques.flamand&osl.be +9362 + ENSEIRB + Kadionik + kadionik&enseirb.fr +9363 + Spelio + Lev Walkin + vlm&spelio.net.ru +9364 + InterPark Incorporated + Walt Tracy + walt.tracy&intpark.com +9365 + Mindsurf Networks Inc. + Christopher Sweigart + csweigart&mindsurfnetworks.com +9366 + Cifra + Mario Korva + mario.korva&cifra.si +9367 + FASTSIGNS International, Inc + Raj Croager + raj.croager&fastsigns.com +9368 + Storage Xstreams + Robert Lee + rlee&storagexstreams.com +9369 + Micah J. Schehl + Micah J. Schehl + m.schehl.computer.org +9370 + Space Telescope Science Institute + Don Stevens-Rayburn + stevray&stsci.edu +9371 + Okolona Christian Church + Phillip Mather + pmather1&okolonacc.org +9372 + Ikadega, Inc. + Kevin Day + kevind&ikadega.com +9373 + Clear Ink + Cove Schneider + hostmaster&clearink.com +9374 + Hermaion.org + Gordon Pedersen + gordon&visi.com +9375 + Basler Kantonalbank + JörnKnie-von Allmen + joern.knie&bkb.ch +9376 + CVS / PharmacyRussell + Pierce + rpierce&cvs.com +9377 + Voyager Computer Corporation + Richard Palm + rpalm&voyagercomputer.com +9378 + Ilex Systems + Emil Assarsson + emil.assarsson&bolina.hsb.se +9379 + Eumitcom Technology Inc. + Jeffrey Chou + jeffreychou&eumitcom.com +9380 + Octogon Gesellschaft für Computer-Dienstleistungen mbH + Lupe Christoph + lupe.christoph&octogon.de +9381 + Walter Biering GmbH - Mediahaus und Grafischer Betrieb + Lupe Christoph + lupe.christoph&octogon.de +9382 + WORAH.NET + Vashist Worah + vw&worah.net +9383 + Angeles Design Systems + Tom Laverty + tlaverty&angeles.com +9384 + D2K, Inc. + Boris Shpungin + bshpungin&d2k.com +9385 + Jason Wood + Jason Wood + woodja&ieee.org +9386 + Clayton College & State University + Dan Newcombe + newcombe&mordor.clayton.edu +9387 + Compania Mea + Rain Man + rain_man1972&yahoo.com +9388 + AsiaInfo + Mark Harrison + markh&usai.asiainfo.com +9389 + Magical Fruit + Mark Harrison + markh&usai.asiainfo.com +9390 + W.T. Services, Inc. + Allen Hyer + allenh&wtrt.net +9391 + Audientia + Yves Degoyon + ydegoyon&audientia.com +9392 + Cannon Hill Anglican College + Andrew Gibson + agibson&chac.gld.edu.au +9393 + Intecs Information Ltd. + Charles Teng + charles_teng&intecs.com.tw +9394 + Visana Services AG + Beat Kneubuehl + beat.kneubueh1&visana.ch +9395 + UEC Technologies + Kavish Parbhoo + kavishp&uec.co.za +9396 + Walt Disney International + Patrick Pfundstein + patrick.pfundstein&disney.com +9397 + Vartech Solutions Inc. + Andrew Williams + rainstain&digital-galaxy.net +9398 + Tokyo Metallic Communications Corp + Akira Isogai + isogai&metallic.co.jp +9399 + CONSUL Risk ManagementDirk Wisse, Koos Lodewijkx + dirk.wisse&consul.com, + koos.lodewijkx&consul.com +9400 + Fachhochschule Konstanz + Leitung Rechenzentrum + rechenzentrum&fh-konstanz.de +9401 + AML Wireless Systems + Raymon Hung + rhung&amlwireless.com +9402 + dCrypt Ltd + Liaquat Khan + liaquat.khan&dcrypt.co.uk +9403 + Allinstant + Matt Kressel + mkressel&allinstant.com +9404 + Dewpoint Inc. + Chris Guttridge + chris.guttridge&dewpoint.com +9405 + Kingland Systems Corporation + Thomas Johnson + tom.johnson&kingland.com +9406 + Synchrologic + Greg Montgomery + greg.montgomery&synchrologic.com +9407 + Broadata Communications Inc. + Joseph Rotenberg + jrotenberg&broadatacom.com +9408 + Chester Enterprises + Daniel Chester + dan&chesternet.net +9409 + ClickServices + Steve Rice + steve&clickservices.net +9410 + Axe Online Pty. Limited + Brian Lee + brian.lee&axe.net.au +9411 + Hitwise Pty Ltd + Tim Lewis + tim.lewis&hitwise.com +9412 + Contela + Jung Hyun Kim + levy&contela.com +9413 + Operax AB + Fredrik Pettersson + iana-contact&operax.com +9414 + Digital Globe, Inc. + Kazushi Minagawa + oze&digital-globe.co.jp +9415 + Archimed + Olivier Walbecq + o.walbecq&archimed.fr +9416 + Unassigned + Returned 2004-03-02 + ---none--- +9417 + egg:| + Gary Henn + gary.henn&egg.com +9418 + Ivistar AG + Andreas Micklei + andreas.micklei&ivistar.de +9419 + Siemens SiNiA + Rudi Neite + rudi.neite&er19.siemens.de +9420 + Baltimore Technologies + Lisa Dunphy + ldunphy&baltimore.com +9421 + Vodafone D2 + Steffen Weichselbaum + steffen.weichselbaum&d2vodafone.de +9422 + Atanion GmbH + Bernd Bohmann + bernd.bohmann&atanion.de +9423 + Msa-Infor Sistemas e Automacao + Thomas Kosfeld + thomas&msainfor.com.fr +9424 + A&C Automacao e Controle + Alexandre Ferez + ferez&terra.com.br +9425 + Rotterdam School of Management + Jimmy C. Tseng + jtseng&fbk.eur.nl +9426 + Adigida Solutions + Richard Kalman + rkalman&adigida.com +9427 + Categoric Software + John Templer + jtempler&categoric.com +9428 + Orgenic Software + Vladimir Dozen + dozen&osw.com.ru +9429 + Mariinsky Theatre + Pavel Levshin + flicker&mariinsky.ru +9430 + Progress Telecom + Kenneth J. Smith + ksmith&progresstelecom.com +9431 + Push, Inc. + Removed 2011-02-20 + ---none--- +9432 + Vingage Corporation + Chuck Baenen + chuck.baenen&vingage.com +9433 + Vaticor Inc. + Jack Yorgey + jackyorgey&vaticor.com +9434 + Telergy Network Services + Ken Morehouse + kmorehouse&telergy.net +9435 + Boletin Oficial del Estado + David Guerrero + david&boe.es +9436 + Crysberg A/S + Leif Jensen + leif&crysberg.dk +9437 + Lancer Insurance Company + Tom Malone + tmalone&lancer-ins.com +9438 + YellowShirt Inc. + Canny Quach + cquach&yellowshirt.com +9439 + Green Point Pty Ltd + Nick Chu + chun&tpg.com.au +9440 + eko system Inc. + Jesse Kuhnert + jkuhnert&ekosystems.com +9441 + Wall Street Systems, Inc. + Lin Geng + lin.geng&wallstreetsystems.com +9442 + Atheros communications, Inc. + Jennifer Kuang + jkuang&atheros.com +9443 + Virage, Inc. + Chad West + cwest&virage.com +9444 + FireMon + Jody Brazil + jbrazil&fishnetsecurity.com +9445 + TJB + Timothy Butler + rotordyn&yahoo.com +9446 + Indiana University + David E. Jent + djent&iu.edu +9447 + Red Fern Software + Tom Redfern + thos&fonebone.net +9448 + Zyfer + Chris Law + cjl&zyfer.com +9449 + Florida State University + Greg Simpson + gregory.simpson&fsu.edu +9450 + Zyoptics Inc. + Malcolm Northcott + mnorthcott&zyoptics.com +9451 + digeo + Brian Smith + brian.smith&digeo.com +9452 + NetLabs SRL + Eduardo J. Blanco + ejbs&netlabs.com.uy +9453 + Barnardos New Zealand + Brendon Ryniker + brendonr&web.co.nz +9454 + future gate software GmbH + Ronny Bremer + rbremer&future-gate.com +9455 + Intelliclaim, Inc. + Robert M. Vittori + rvittori&intelliclaim.com +9456 + Allegro Networks + Bryan Levin + snmp&allegronetworks.com +9457 + Broadband Storage, Inc. + Keith Philpott + kphilpott&broadstor.com +9458 + TCSI, Inc + Peter Buckner + pbuck&tcsi.com +9459 + NileSOFT Ltd. + Cheon-Geun Kim + enrl&nilesoft.co.kr +9460 + Salta Monte Solutions, Inc. + Tom Poe + tompoe&source.net +9461 + y2FUN.com + Singuan Iap + iap&y2fun.com +9462 + MMLAB KTU + Jonas Punys + media&mmlab.ktu.lt +9463 + Geyer und Weinig GmbH + Axel von Dielingen + avd&gwtel.de +9464 + SIEL S.p.A. + Lionello Sacchi + lsacchi&sielups.com +9465 + Oldenburgische Landesbank AG + Ingo Stadtsholte + ingo.stadtsholte&olb.de +9466 + CILEA + Paola Tentoni + tentoni&cilea.it +9467 + iBanx B.V. + Bart Selders + bart.selders&ibanx.nl +9468 + Universite de la Mediterranee (Aix Marseille 2) + Serge Portella + Serge.Portella&mediterranee.univ-mrs.fr +9469 + Starlab nv/sa + Ivo Clarysse + soggie&starlab.net +9470 + Alfa.con Team S.p.A. + Enzo Re + ere&alfaconteam.it +9471 + PortWise AB + Stefan Bogstedt + stefan.bogstedt&portwise.com +9472 + Jeje.org + Jerome Fleury + jeje&jeje.org +9473 + SafeStone Technologies PLC + Jeff Davis + jdavis&safestone.com +9474 + SIRT + Luke Guelfi + l.guelfi&sirt.tv +9475 + 365 Corporation + Malcolm Locke + malcolm.locke&365corp.com +9476 + ESRF + Staffan Olsson + ldapmaster&esrf.fr +9477 + Market Central, Inc. + Alan Gavaert + alang&mctech.com +9478 + MyLuckyWorld + Roger Mikalsen + roger&myluckyworld.com +9479 + WebCollage + Ori Sternberg + ori.sternberg&webcollage.com +9480 + Notus Key Limited + Benjamin Hille + benjamin.hille¬uskey.com +9481 + Backer Software Research + Rombout de Backer + rombout+iana&gewis.nl +9482 + ENS-Lyon + Martin Quinson + mquinson&ens-lyon.fr +9483 + PSI AG + Frank Bredenbroecker + fbredenbroecker&psi.de +9484 + ision france + Nicolas PY + npy&ision.fr +9485 + FUTUROCOM + Jean-Pierre BORG + jeanpierre.borg&futurocom.com +9486 + Powwow Germany GmbH + Jens Bollmann + jensb&powwowwater.de +9487 + saardata GmbH + Bernd Schmitt + b.schmitt&saardata.de +9488 + Circuit City Stores + Frank Smalara + domaintech&circuitcity.com +9489 + SAXA, Inc. (formerly 'Taiko Electric Works, LTD') + Yoshinari Hasegawa + hasegawa.yo&saxa.co.jp +9490 + Scorecon International + Ryan Denneny + rd&tap.net +9491 + VECTAN + Christian Beckmann + c.beckmann&vectan.de +9492 + The Cobalt Group + Marty Ahern + mahern&cobaltgroup.com +9493 + iOpen Technologies Ltd. + Richard Waid + richard&iopen.co.nz +9494 + IFILM + John Turk + turk&ifilm.com +9495 + etee2k.net + Ed Truitt + ed.truitt&etee2k.net +9496 + AT&T Enhanced Network Services + Dehui Peng + dpeng&attens.com +9497 + IDini Corporation + Swarraj Kulkarni + kulkarni&idini.com +9498 + Telekommunikation Mittleres Ruhrgebiet GmbH + Thomas Neumann + t.neumann&tmr.net +9499 + Exacube System Inc. + Byounggwon Gim + gbg&exacube.co.kr +9500 + Byte Alliance + Michael Bishop + mbishop&bytealliance.com +9501 + SVA-INTRUSION.COM Co. LTD + Sun Xian + sunxian&yahoo.com +9502 + Ross Technology Group, Inc. + Jason Mitchell + jmitchel&rtgx.com +9503 + Telemovil El Salvador + Roberto Carlos Navas + rcnavas&telemovil.com +9504 + tops.net GmbH & Co Online Publishing Services KG + Andreas Buschka + andreasbu&tops.net +9505 + bone labs GmbH + Burkhard Wiegel + b.wiegel&bonelabs.net +9506 + Real Solutions Ltd. + Brian Murphy + realsoln&ihug.co.nz +9507 + Wharton Electronics + Alastair S. Wharton + alastair&wharton.co.uk +9508 + MobileSpear Inc. + Nitzan Daube + nitzand&mobilespear.com +9509 + Enkash Payment Network + Denis Bider + enkash-oid&denisbider.com +9510 + KONSYS + Jacek Gretkiewicz + jacekg&konsys.com.pl +9511 + Vulkan Technic GmbH + Eric Hack + hack-consult&vulkantechnic.de +9512 + Online Consulting, Ltd. + Ivan Debnar + debnar&o-c.sk +9513 + Exacomm Systems + Jan van der Lugt + j.lugt&exacomm.nl +9514 + Konica Business Technologies, Inc. + Brian Spears + spearsb&konicabt.com +9515 + Sugar Creek Packing Co. + Todd Pugh + tpugh&sugar-creek.com +9516 + Digital World Services + Sean Corrigan + sean.corrigan&dwsco.com +9517 + Sanrise Inc + Suresh Ravoor + suresh.ravoor&sanrise.com +9518 + Pratt & Whitney + Douglas Conrath + conratda&pweh.com +9519 + Soward Network Solutions + John Soward + soward&soward.net +9520 + Phase2 Software Corp + Bill Spencer + bill&p2software.com +9521 + PED Computers + Joseph Engo + jengo&mail.com +9522 + Personeta + Ronen Wolfson + ronen&personeta.com +9523 + ST E15 + Sabine Feuchter + sabine.feuchter&icn.siemens.de +9524 + Andiamo Systems, Inc + Ian Wallis + iwallis&andiamo.com +9525 + BBC Monitoring + John Blakeley + john.blakeley&mon.bbc.co.uk +9526 + Shawmut Design & Construction + Russell Pitre + rpitre&shawmut.com +9527 + Inkras Networks Corporation + Ash Sundar + ash&inkra.com +9528 + Iyago + Neal Katz + nkatz&yahoo.com +9529 + Global Relay Inc. + Eric Parusel + eric.parusel&globalrelay.net +9530 + CQUR . COM + Duncan Matthew Stirling + duncan&cqur.com +9531 + Integra Telecom + David Stults + david.stults&integratelecom.com +9532 + Group 1 Software + Kyle Chapman + kyle_chapman&g1.com +9533 + TrueDisk + Don Buchholz + don&truedisk.com +9534 + Handrew + Jason Han + ccjhan&yahoo.com +9535 + Zurita Solutions + Brian Zurita + bz01&zurita.com +9536 + Massey University + Keith Linforth + soa&massey.ac.nz +9537 + Navox Corporation + Jacques Turcotte + jacques.turcotte&smisrd.com +9538 + StorageWay Inc. + Barry Van Hooser + bvh&storageway.com +9539 + STSN + Doug Burns + dburns&stsn.com +9540 + Altentia + Delavaux + delavaux&altentia.fr +9541 + Lokaal Netwerk Gravenstraat + B.A.M. van Zanten + Ben&van.Zanten.name +9542 + College of Engineering and Computer Science + Mike Wimple + wimple&ecs.csus.edu +9543 + Schwoo, Inc. + Jason Milletary + jason&schwoo.com +9544 + Electrical and Computer Engineering Dept., UBC + Robert Ross + admin&ece.ubc.ca +9545 + State Street Corp. + Gerard Puoplo + gpuoplo&statestreet.com +9546 + Dominet Systems + Denton Gentry + registrar&dominetsystems.com +9547 + foo Corporation + Jim Jones + jsjones&acm.org +9548 + Datachron Inc. + Clyde Davis + time&datachroninc.com +9549 + Southern Network Services, Inc. + Brian Snipes + bsnipes&southernnetwork.com +9550 + vox2vox Communications, Inc. + Manfred Koroschetz + mkoroschetz&vox2vox.com +9551 + Kompu-Art Studiu Komputerowe + Marek Habersack + grendel&vip.net.pl +9552 + Financial IQ Pty Ltd + Jye Tucker + jtucker&iqgroup.net +9553 + Automaton Ltd + Andrew Loh + andrew&automaton.com +9554 + Clear River Technologies, LLC + Dan Kuykendall + info&clearrivertech.com +9555 + AsiaInfo Technologies(China), Inc. + Jerry Jiang + jiangwr&asiainfo.com +9556 + Great Wall Broadbank Network Service Co.Lt + Gao Yu + gaoyu&gwbn.net.cn +9557 + Synso Inc. + Steve Tsai + steve&synso.com +9558 + Kent Ridge Digital Labs + Yang Lingjia + lingjia&krdl.org.sg +9559 + National Security Research Institute + Inkon Kang + firefly&etri.re.kr +9560 + SECUI + Namsik Yoon + namsik.yoon&secui.com +9561 + Netia Telekom S.A. + Piotr Roszatycki + piotr_roszatycki&netia.net.pl +9562 + Magnum Imperium Limited + Rob Hodgkinson + rob&magi.co.uk +9563 + Suffolk College + M Thomason + matthew.thomason&suffolk.ac.uk +9564 + mediaWays GmbH Internet-Services + Lars Uffmann + lars.uffmann&mediaways.net +9565 + Xerox Mobile Solutions + Andrew Everitt + andrew.everitt&gbr.xerox.com +9566 + HBO-CE + Lajos Okos + lajos&hbo.hu +9567 + Skilldeal AG + Dr. Guido Seifert + gseifert&skilldeal.com +9568 + DataGrid + Alex Martin + a.j.martin&qmw.ac.uk +9569 + MGF Logistique + Philippe Bezamat + phb&mgf.fr +9570 + University of Leoben/Austria + Zentraler Informatikdienst + kamenik&unileoben.ac.at +9571 + Erskine Systems Ltd + Simon T Smith + simon.smith&erskine-systems.co.uk +9572 + AlienThing.com + Jeremy Nicholas + boyalien&yahoo.com +9573 + MB-NET + Michael Bussmann + iana&mb-net.net +9574 + NCIC Clinical Trials + Mike Broekhoven + mbroekhoven&ctg.queensu.ca +9575 + Grupo de Programação na Web(WPG) + Rodrigo Prestes Machado + rpm&atlas.ucpel.tche.br +9576 + Crédit Mutuel de Bretagne + Tanguy Eric + partnre.tanguer&partnre.com +9577 + Xtelligent IT Consulting GmbH + Gottfried Scheckenbach + info&xtelligent.de +9578 + Mission Critical Linux, Inc. + Kris Corwin + corwin&MissionCriticalLinux.com +9579 + Followap + Dror + dror&followap.com +9580 + New Hemisphere Computer Services + Marcus Patton + marcuspatton&onetel.net.uk +9581 + Quad One Technologies Pvt. Ltd. + K. Sarath Chandra + sarath&quad-one.com +9582 + CCS Communications Pty. Ltd. + John Lewis + johnl&ccscomms.com.au +9583 + BBC Technology Supply Ltd + Danny Cooper + danny.cooper&bbc.co.uk +9584 + Lucent Technologies NADP + Roman Biesiada + bies&lucent.com +9585 + National Enhance Technology Corp. + Blake Hung + netsys&netsys.com.tw +9586 + Debian + Wichert Akkerman + iana&debian.org +9587 + Global Trust Authority + John Tunstall + john.tunstall>a.multicert.org +9588 + IT Insourcing + Claude Leuchter + leuchter&it-insourcing.ch +9589 + Currenex, Inc. + Tino Tran + tino.tran¤ex.com +9590 + The MathWorks Inc. + Joe Steele + jsteele&mathworks.com +9591 + Thinking Cat Enterprises + Leslie Daigle + leslie&thinkingcat.com +9592 + Luke Crawford Enterprises + Luke Crawford + luke&prgmr.com +9593 + SysNet Informática Ltda + Alvaro Manoel Martins Pereira + alvaro&sn-sysnet.com +9594 + Tape Products Company + David Frampton + daf&tapeproducts.com +9595 + Zephion Networks + Srinivas Upadhyaya + srinivas&zephion.net +9596 + PimpCode + Marc Martinez + lastxit&pimpcode.org +9597 + Blue Silicon + Jian Zheng + jzheng&blue-silicon.com +9598 + MBNA America + Roland Trevino + roland.trevino&mbna.com +9599 + LDAPGURU.COM LLC + Rohan Pinto + rpinto&ldapguru.net +9600 + Williams Technology Consulting Services + Garth K. Williams + garth.k.williams&gmail.com +9601 + Keewaytinook Okimakanak + Adi Linden + adilinden&knet.ca +9602 + Zoftech Inc. + Dustin Norman + dustin&zoftech.com +9603 + AF Enterprises + Andrew Finlay + andrew&andrewfinlay.com +9604 + afuu + Jean Brange + jean&afuu.fr +9605 + Xyterra Computing Inc + Joe Chisolm + jchisolm&xyterra.com +9606 + Linvision B.V. + Roel van Meer + r.vanmeer&linvision.com +9607 + MSTC + Hideichi Okamune + okamune&honbu.mstc.or.jp +9608 + ntelsOh + SungHwan + shoh&ntels.com +9609 + Impaster Co. Ltd + Jun Sheng + js_tessida&yahoo.com +9610 + Milosch.net + Miles Lott + milos&speakkeasy.net +9611 + Infosim + Marius Heuler + support&infosim.com +9612 + Kobe Steel, Ltd. + Hideo Ikeda + h-ikeda&rd.kcrl.kobelco.co.jp +9613 + nlsde + Jiangning + jiangning&nlsde.buaa.edu.cn +9614 + GoCruiseDirect.com + Steve Summerlin + ssummerlin&stertech.com +9615 + Danfoss A/S + Allan Sandbak + AS&danfoss.com +9616 + Kreatel Communications AB + Tomas Hagström + tomas.hagstrom&kreatel.se +9617 + GSX Groupware Solutions + Rodouan El Hichou + relhichou&gsx.net +9618 + Utrechtse Werkbedrijven + Rob Janssen + r.janssen&uw-werk.nl +9619 + Nextra Austria + Andreas Zoettl + andreas.zoettl&nextra.com +9620 + National Land Survey of Finland + Computing Centre + hostmaster&nls.fi +9621 + MOLO AFRIKA SPEECH TECHNOLOGIES + Charl Barnard + info&molo.co.za +9622 + QuesCom + Stephan Malric + stephan.malric&quescom.eu +9623 + Copenhagen Airports + IT Hostmaster + hostmaster&cph.dk +9624 + Radianz + Luke Yee + luke.yee&radianz.com +9625 + Utesch + Christian Utesch + christian&utesch-online.de +9626 + Alessandro Triglia + Alessandro Triglia + sandro&mclink.it +9627 + S J Shipinski Services + Stu Shipinski + stuship&mediaone.net +9628 + Groupe CASINO + David Barrat + dbarrat&groupe-casino.fr +9629 + Mastermind Technologies + Chris Mangold + cmangold&mmtmail.com +9630 + e-moveum + Fco Javier Lopez + fjlopez&e-moveum.com +9631 + eSniff, Inc. + Jack Flory + jack.flory&esniff.com +9632 + Individual + Adi Vaizovic + adi.vaizovic&zaslon-telecom.si +9633 + Calian, Ltd. + Wendy Cronin + wendy.cronin&calian.com +9634 + API Networks, Inc + Eric Rowe + eric.rowe&api-networks.com +9635 + Tropico Sistemas e Telecomunica=E7=F5es SA + Sileno Rocco + sileno.rocco&tropiconet.com.br +9636 + netFORMAT GmbH + Mario Beck + beck&netformat.de +9637 + Alliance & Leicester PLC + Paul Thompson + paul.thompson&alliance-leicester.co.uk +9638 + AlphaGraphics, Inc. + Greg Bailey + gbailey&alphagraphics.com +9639 + Appium AB + Thomas Tallinger + thomas.tallinger&appium.com +9640 + Astrolink International LLC + Jerry Hannon + jhannon&astrolink.com +9641 + Azusa Pacific University + James Janssen + jjanssen&apu.edu +9642 + Bibop Research, int. S.p.A. Gianugo + Rabellino + g.rabellino&bibop.it +9643 + CEULP/ULBRA + Augusto Jun Devegili + devegili&ulbra-to.br +9644 + Unassigned + Returned 2017-09-07 + ---none--- +9645 + courtade.net + Jeff Courtade + jeff&courtade.net +9646 + Eaton - Cooper Industries (Electrical) Inc. (formerly 'Cybectec') + Remi Dutil + remidutil&eaton.com +9647 + EWIP + Marius Costeschi + marius.costeschi&ewir.ro +9648 + GeekBone Co. LTD + Jimmy Cheng + jacob_d&usa.net +9649 + Hochschule Bremen + Uwe Jans + jans&hs-bremen.de +9650 + Infoclan + Vijay Kumar + vijay.c.kumar&mail.sprint.com +9651 + Kele + Wendell Murray + wendellm&kele.com +9652 + LNS SA + Mike Reilly + admin&lns.ch +9653 + Mosotech Limited + Bryan Wu + llwu&yahoo.com +9654 + NIC France + Lubrano Philippe + lubrano&nic.fr +9655 + Optidev AB + Anders Ericsson + anders.ericsson&optidev.se +9656 + Routit BV + Leon Boshuizen + llcboshuizen&routit.nl +9657 + S://Scape Communications Inc. + Colin Ryan + colinr&s-scape.ca +9658 + Solers + James Globe + jglobe&solers.com +9659 + Synapsys Ltd + Luke Hart + luken&synapsys.com +9660 + Telecel Comunicações pessoais S.A + Pedro Aleixo + aleixop&telecelvodafone.pt +9661 + Teletron INC + Sangwoo Jin + swjinjin&teletron.co.kr +9662 + TUEV-AT-TRUST Center GmbH + Stickler Gerald + stickler.gerald&tuev.or.at +9663 + UNIVERSITE LUMIERE LYON2 + Pierre Ageron + Pierre.Ageron&univ-lyon2.fr +9664 + Widgital + Kristy McKee + k&widgital.com +9665 + ZyGATE Communications Inc. + Astro Lin + astrolin&zygate.com.tw +9666 + Alcatel Optronics + Bernadette Leclaire + bernadette.leclaire&alcatel.fr +9667 + Austar United + Ronald Yii + ryii&austar.com.au +9668 + Talent Network Security Technology Co., Ltd. + Wanli Liu + liuwanli&263.net +9669 + University of London + Malcolm Ray + oids&ulcc.ac.uk +9670 + BROADi + bytexu + bytexu&sina.com +9671 + Time Warner + Bill Burns + oid-admin&aol.net +9672 + Apropos Technology, Inc. + Mike Schisler + mike.schisler&apropos.com +9673 + Denison University + Charlie Reitsma + reitsmac&denison.edu +9674 + Fingerhut + LANWAN Group + lanwan&fingerhut.com +9675 + Grupo Financiero Uno + Fernando Medina + fmedina&nic.pibnet.com +9676 + hottis.de + Wolfgang Hottgenroth + woho&hottis.de +9677 + Hydriads + Jose González Gómez + jgongo&mail.com +9678 + K.U. Leuven + Jan Du Caju + sst&kulnet.kuleuven.ac.be +9679 + mBalance + Eloy Nooren + eloy.nooren&mbalance.com +9680 + SelectMetrics, Inc. + Chris Lamb + hostmaster&selectmetrics.com +9681 + Mortgage Systems International + David Max + davidmax&mtgsi.com +9682 + Novadeck + Jean-Philippe Schaller + jps&novadeck.com +9683 + Plattsburgh State University + David Anderson + andersdl&plattsburgh.edu +9684 + PoweriseNet + Hu Yajun + yajun_hu&yahoo.com +9685 + Ranch Networks, Inc. + Satyen Chandragiri + satyen&ranchnetworks.com +9686 + Ruby Networks, Inc. + Payam Mirrashidi + payam&rubynetworks.com +9687 + SAProperty.com + Geoff Payne + geofflp&webmail.CO.za +9688 + Tahoe Networks, Inc. + Steve Alexander + sca&tahoenetworks.com +9689 + TBD Networks + Norbert Kiesel + nkiesel&tbdnetworks.com +9690 + Terralab + Adam Hull + adam&terralab.com +9691 + Children's Hospital Colorado (formerly 'The Children's Hospital of Denver') + Nobuo Matsushita + nobuo.matsushita&childrenscolorado.org +9692 + Tool Object + Guillaume Cardoner + gcardoner&toolobject.fr +9693 + UND School of Medicine and Health Sciences + Barry Pederson + barryp&medicine.nodak.edu +9694 + Arbor Networks + SNMP Administration + snmp&arbor.net +9695 + Encotone Ltd. + Ted Kandell + hyehosha&encotone.com +9696 + Uffizio India Software Consultants Pvt. Ltd. + Macneil Mendes + rajeshvn&rediffmail.com +9697 + Accelance + Thomas Favier + tf&accelance.fr +9698 + AIA Australia + Andrew Cosgriff + ajc&aiaa.com.au +9699 + Axis Consulting + Bill Brower + bbrower&axc.com +9700 + Boehringer-Ingelheim GmbH + Lothar Schildwaechter + schildwa&ing.boehringer-ingelheim.com +9701 + Commerce NTI + Mark Hollow + mhollow&commercenti.com +9702 + Comtech Belgium + Jean Michalski + jmichalski&comtech.be +9703 + Datafoundation Inc.Valerii + Soika + soika&datafoundation.com +9704 + Diversified Resourceful Solutions Inc. + David Verbrugge + drs&axxent.ca +9705 + ecos gmbh + Gerald Richter + richter&ecos.de +9706 + HackersLab + Byunghak Kim + bhkim&hackerslab.com +9707 + Institut National Polytechnique de Lorraine + Benoit Marchal + Benoit.Marchal&ensem.inpl-nancy.fr +9708 + Jack Morton Worldwide + Adam Hull + adam_hull&jackmorton.com +9709 + Micro Connect Pty Ltd + Craig Price + craig.priceµconnect.com.au +9710 + Monzoon Networks AG + Matthias R. Koch + support&monzoon.net +9711 + Novedia + Tuneke + tuneke&novedia.de +9712 + obzen Inc. + SungUng Yoo + ysu&obzen.com +9713 + Opera Software AS + Krystian Zubel + kzubel&opera.com +9714 + Ruhrsoft + Frank Bredenbröcker + frank.bredenbroecker&ruhrsoft.de +9715 + Sigma AB + Pia Westerberg-Selin + pia.selin&sigma.se +9716 + State of North Dakota + Jeff Carr + jcarr&state.nd.us +9717 + Teleca AB + Göran Johansson + goran.johansson&teleca.com +9718 + TransACT Communications + Mark Hill + mark.hill&transact.com.au +9719 + Tronicplanet Online Datendienst GmbH + Thoni Bernhard + webmaster&tronicplanet.de +9720 + Tryx + Rik Tytgat + rik.tytgat&tryx.com +9721 + VWR International + Terrence Meehan + tmeehan&vwrsp.com +9722 + Workhorse Computing + Steven Lembark + lembark&wrkhors.com +9723 + Coradiant + Frederic Dumoulin + fdumoulin&coradiant.com +9724 + Datacast Inc. + Christian Trepanier + ctrepanier&datadact.com +9725 + INFOservice + Hellmut Adolphs + hellmut_adolphs&infonet.it +9726 + Nitroba + Simson L. Garfinkel + simsong&nitroba.com +9727 + Tryllian + Y. Duppen + yigal&tryllian.com +9728 + WWF Wort + Ton GmbH e-media + Marc Pohl + marc.pohl&wwf-gruppe.de +9729 + EVERelite, Co., Ltd. + Nicholas Hsiao + nicholas.hsiao&ever.com.tw +9730 + Solutus Pty Limited + Megennis Weate + magennis.weate&solutus.com.au +9731 + 5down + Benny Morrison + benny_morrison&yahoo.com +9732 + Adrenaline Ingenierie Multimedia + Alexandre Allien + info&aim.fr +9733 + Aral Systems GmbH + Frank Baasner + frank.baasner&aral.de +9734 + Astrum Software Corporation + James Moody + james&astrumsoftware.com +9735 + Coredump Ky + Taneli Leppä + rosmo&coredump.fi +9736 + Dublin City University + Donal Hunt + sysops&dcu.ie +9737 + Fourtheye + Bob Wilkinson + bob9960&btinternet.com +9738 + Gedoplan GmbH + Stephan Zdunczyk-kohn + szk&gedoplan.de +9739 + Hobnobbers.net + Richard Stobart + richardstobart&mail.com +9740 + iBEAM Broadcasting Corporation + Stefan Hudson + shudson&ibeam.com +9741 + Keiren + Frank Keiren + frank&keiren.net +9742 + DESCOM CONSULTING, S.L. + Luis Colorado + luis.colorado&descom.es +9743 + NCHICA + W. Holt Anderson + holt&nchica.org +9744 + peoplecall.com + Francisco Olarte Sanz + folarte&peoplecall.com +9745 + Radix Controls Inc. + Marc Noel + marcn&radixcontrols.com +9746 + RFNet Technologies Pte Ltd + Tan Chin Thong + cttan&acm.org +9747 + RoamingTest Ltd + Steve Webster + swebster&roamingtest.com +9748 + ROW Software and Web Design + Kurt Weber + kmw&rowsw.com +9749 + SpectraLink Corporation + Mark Hamilton + mah&spectralink.com +9750 + The Pillars + Robert August Vincent II + sysadmin&pillars.net +9751 + Trevilon Corp. + Kenneth Vaughn + kvaughn&trevilon.com +9752 + University of Missouri + Tony Rivera + tony&missouri.edu +9753 + Vernier Networks, Inc. + Ken Klingman + kck&verniernetworks.com +9754 + Vertis DSG Chicago + Martyn Wilson + mwilson<cgroup.com +9755 + Webasto Informationssysteme GmbH + Markus Svec + swsadminnt&webasto.de +9756 + woerd Erdmenger & Wolter GbR + Joerg Erdmenger + joe&woerd.com +9757 + Aral Aktiengesellschaft & Co. KG + Thomas Vögel + thomas.voegel&aral.de +9758 + Metadigm Ltd + Alan McRae + amcrae&metadigm.co.uk +9759 + Aplicaciones y Consultoria, S.A. de C.V. + Javier Arturo Rodriguez + jrodriguez&agharti.org +9760 + Cellcast + Or Tal + ortal&cellcast.com +9761 + Corporate Express + Vince Taluskie + vince.taluskie&cexp.com +9762 + Datacom Systems Inc. + Andrew M. Perkins + aperkins&datacomsystems.com +9763 + Eonite, Inc. + Sean Casey + sean&eonite.com +9764 + Forschungszentrum Rossendorf e.V. + Nils Schmeisser + N.Schmeisser&fz-rossendorf.de +9765 + ILRYUNG TELESYS + Ohduck Kwon + haming&irts.co.kr +9766 + Joseph S Dovgan DDS MS + Joseph S Dovgan + jsdovgan&uswest.net +9767 + Lectron CO., LTD + Grant Kang + grant&lectron.com.tw +9768 + Lynchburg College + Paul Spaulding + spaulding_w&mail.lynchburg.edu +9769 + Nuance Communications + Floyd Haas + fhaas&nuance.com +9770 + OpenOffice.org + Lutz Hoeger + lho&openoffice.org +9771 + PAION Co. Ltd., + Someshwar Parate + parate&paion.com +9772 + pentasecurity + Lee Hyo Seob + tosun&pentasecurity.com +9773 + SpunHead Industries + Floyd Haas + fhaas&ix.netcom.com +9774 + Streets Online Ltd. + Andrew Schofield + andrews&infront.co.uk +9775 + TNT Software, Inc. + Steve Taylor + stevetaylor&tntsoftware.com +9776 + Wireless Network Services + Erkki Seppälä + erkki.seppala&wnsonline.net +9777 + Arelnet Ltd. + Avi Gliksberg + avig&arelnet.com +9778 + AxONLink + Marc Zerdoug + marc.zerdoug&ecitele.com +9779 + Empire State College + Bill Melvin + hostmaster&esc.edu +9780 + Entreprise des Postes et Telecommunications + Laurent Sliepen + laurent.sliepen&ept.lu +9781 + Jerntorget Sverige AB + Peter Östlund + ma&jerntorget.se +9782 + JM Family Enterprises, Inc. + Monitoring Admin + monitoringadmin&jmfamily.com +9783 + Vayusphere, Inc. + Pushpendra Mohta + pushp&vayusphere.com +9784 + icoserve information technologies + Raimund Vogl + r.vogl&icoserve.com +9785 + Praxis Technical Group, Inc. + Graeme Smecher + gsmecher&home.com +9786 + A.I.S. AngewandteInformations-Systeme GmbH + Andreas Heidemann + a.heidemann&windream.com +9787 + ActiveState Corp. + Massoud Sarrafi + massouds&activestate.com +9788 + Advisor Technologies Limited + James Unitt + james.unitt&advisortechnologies.com +9789 + Astaro AG + Dennis Koslowski + dkoslowski&astaro.de +9790 + Australia On Line + Michael Bethune + mike&ozonline.com.au +9791 + Centre de Telecomunicacions i Tecnologies de la Informacio + Jaume Diaz Serret + jdiaz&ctele.gencat.es +9792 + Clayhill KG + Christoph Grigutsch + cg&clayhill.de +9793 + CloudShield Technologies, Inc. + Neal Matthews + neal&cloudshield.com +9794 + CYTBeN + Bryce Wade + bryce&cytben.com +9795 + EBS Dealing Resources, Inc + Mike Merold + mmerold&ebsdr.com +9796 + ENX Association (formerly 'European Network Exchange') + Immo Wehrenberg + immo.wehrenberg&enx.com +9797 + EVER Sp. z o.o. + Piotr Strzelecki + pstrzelecki&ever.com.pl +9798 + Griff-IT Ltd. + Jon Griffiths + jon.griffiths&griff-it.com +9799 + Holos Software, Inc. + Frank S. Glass + fsg&holos.com +9800 + iNIT-8 + Steffo Weber + s-weber&init8.net +9801 + ISIS Frontier Communications + Kitt Diebold + isis_system&frontiernet.net +9802 + IUFM de Rouen + Nathalie Delestre + Nathalie.Delestre&rouen.iufm.fr +9803 + L-3 Communication Systems - East + Heshy Bucholz + robert.bucholz&l-3com.com +9804 + Lefthand Networks + Jose Faria + jfaria&lefthandnetworks.com +9805 + Neveda City School District + Bill MacAllister + bill.macallister&ncsd.k12.ca.us +9806 + PRIDE Industries + Bill MacAllister + bill.macallister&prideindustries.com +9807 + Prism Holding Limited + Piet de Weerdt + pietdw&prism.co.za +9808 + Rocklea Spinning Mills Pty Ltd + Grant Coffey + itdept&rocklea.com.au +9809 + Root, Int. + David Ulevitch + davidu&phreedom.net +9810 + Sagent + Derek Brouwer + dbrouwer&sagent.com +9811 + Secure Appliance Systems + Pascal André + andre&enetten.com +9812 + SolNet + Markus Binz + mbinz&solnet.ch +9813 + TAB Queensland Limited + Darryl Green + green&tabq.com.au +9814 + Telia IT-Service AB + Magnus Hoglund + magnus.j.hoglund&telia.se +9815 + VSpace, Inc. + Jeffrey C. Byrd + admin&vspacegroup.com +9816 + WANWALL + Alexander Finkelshtein + alex&wanwall.com +9817 + WaveNET International (Pvt) Ltd. + Vance Shipley + vance&wavenet.lk +9818 + Xwave GmbH + Marcus Spiegel + marcus.spiegel&xwave.ch +9819 + yLez Technologies Pte Ltd + Leonard Ye + leonard_ye&ylez.com +9820 + ZOX-IT + Markus Sell + marcus.sell&zox-it.com +9821 + Zoznam s.r.o. + Radoslav Volny + volny&firma.zoznam.sk +9822 + Accella Technologies Co., Ltd + Linwei + linwei&accellatek.com +9823 + Diageo + John Pooley + john.pooley&diageo.com +9824 + GenTek + Hugo Latapie + latapie&we.mediaone.net +9825 + Geospiza Inc. + Andrew Leonard + andy&geospiza.com +9826 + ImagiCode Studios + Robert Marano + rob&imagicode.com +9827 + Neocles + Leclere Alexandre + alecler&neocles.com +9828 + pgamAPD + Kip Cranford + kip.cranford&pgamapd.com +9829 + SAIT Ltd + Andrew Kouzmenko + sait-ltd&mtu-net.ru +9830 + Thunderbird, the American Graduate School of International Management + Johan Reinalda + netadmin&t-bird.edu +9831 + Vivre Inc + Piyush Srivastava + piyusrivastav&hotmail.com +9832 + solobird + JayeulKu + solobird&orgio.net +9833 + Legend Holdings Ltd. + Chunsheng Xu + xucs&legend.com +9834 + Agencja Uslug InformatycznychARTCOMP + Miroslaw Olkusz + artcomp&lomza.man.pl +9835 + Alien Network + Sergey Budyanskiy + svb&alien.ru +9836 + AOK Bayern – Die Gesundheitskasse + Rico Rieger + rico.rieger&kubus-it.de +9837 + Ball State University + OID Administrator + oidadmin&bsu.edu +9838 + Broadview Networks, Inc. + Bob Hill + bhill&broadviewnet.com +9839 + CAREL + Sebastiano Fiorindo + sebastiano.fiorindo&carel.com +9840 + carreregroup + stéphane camberlin + urnammu&free.fr +9841 + Christian Dusek EDV-Dienstleistungen + Christian Dusek + iana-pen&lists.dusek.ch +9842 + ET Networks + Edward Tulupnikov + et&et.com.ru +9843 + Kai Industries + Warren Kimberlin + wkimberl&engineer.com +9844 + Kansas State University + Neil Erdwien + neil&ksu.edu +9845 + LANBIRD Technology Co., Ltd. + Jong-Min Park + chianghs&lanbird.co.kr +9846 + LIP - Laboratório de Instrumentação e Física Experimental de Partículas + Jorge Gomes + jorge&lip.pt +9847 + Listen.com + Matthew T. Darling + mdarling&listen.com +9848 + Lou Johnson + Lou Johnson + lou&xmlstrategies.net +9849 + M-Web Indonesia + Edwin Pratomo + edwin&satunet.com +9850 + NextGig + Jeremy Worrells + jworrell&nextgig.com +9851 + NTT SOFT + Satoshi Yamashita + s_yama&po.ntts.co.jp +9852 + Odigo Inc + Ilan Ravid + ravid&odigo.com +9853 + Payment Technologies + Jim Hively + jim.hively&paytec.com +9854 + Red Box Recorders Limited + Robert Jones + rjones&origin-data.co.uk +9855 + Shawsoft GmbH + Michael Shaw + mike.shaw&shawsoft.comELIZABETH JOHNSON +9856 + UCLA Communications Technology Services + Mary Latham + mlatham&cts.ucla.edu +9857 + Unicom an e-security company + Diego Martin Barreiro Fandiño + dmartin&unicomsecurity.com +9858 + University of Maryland, Baltimore County + Robert Banz + robert.banz&umbc.edu +9859 + University of Medicine and Dentistry of New Jersey + Clifford Green + green&umdnj.edu +9860 + Vizional Technologies, Inc. + Gregory Kaestle + gkaestle&vizional.com +9861 + Webdesign Internet Service GmbH + Andreas Fischer + af&wdis.de +9862 + Claredi Corporation + Travis Stockwell + travis.stockwell&claredi.com +9863 + DataRojahn A/S + Asbjørn Krogtoft + krogtoft&hotmail.com +9864 + J Walter Thomposon Company + Chad Milam + chad.milam&jwt.com +9865 + Karmanos Cancer Institute + Jennifer Telisky + teliskyj&karmanos.org +9866 + Outokumpu Oyj + Juha Koljonen + juha.koljonen&outokumpu.com +9867 + Third Rail Americas + John Kenney + john.kenney&third-rail.net +9868 + Millinet Co., Ltd + Chang-Hyeon Choi + choitut&millinet.co.kr +9869 + Network Laboratory of Nankai University + Qiu Lin + qiulin&eyou.com +9870 + graviton, Inc. + Karyn Haaland + khaaland&graviton.com +9871 + Findlater Wine Merchants + Axel Dahl + aksel&esatclear.ie +9872 + GEZ + Frank Frost + frank.frost&gez.de +9873 + Viterra AG + Helmut Brachhaus + helmut.brachhaus&viterra.com +9874 + Åbo Akademi University + Barbro Sjöblom + babo&abo.fi +9875 + AVAILABLE SUPPLY Inc. + Elizabeth Johnson + availablesupply&gwe.net +9876 + B2I Toulouse + Francois GOAS + francois.goas&b2i-toulouse.com +9877 + California State University, East Bay + Kent C. McKinney + kent.mckinney&csueastbay.edu +9878 + CareGroup HealthCare System + Scott A. McWilliams + scott&caregroup.org +9879 + Cox Communications, Las Vegas + Steven Bruen + steve.bruen&cox.com +9880 + Data Junction Corporation + John Fogarty + jfogarty&datajunction.com +9881 + Eita Technologies, Inc. + Greg Hester + ghester&crosslogic.com +9882 + Flash Technology + Bradley T. Overton + brad.overton&americantower.com +9883 + Free Trade Online + Chen Chi-Wen + scott&fto.com.tw +9884 + Frost-EDV + Frank Frost + frank.frost&frost-edv.de +9885 + Green Packet, Inc. + Liangsheng Chen + lschen&greenpacket.com +9886 + Incendiary Networks + Jeremy Friedenthal + jeremyf&incendiary.net +9887 + Kristina Internet Business Solutions + Alejandro Rojo + arojo&kristina.com +9888 + Linux Terminal Server Project (LTSP) + James McQuillan + jam&mcquil.com +9889 + Winpresa Building Automation Technologies GmbH + Thomas Pischinger + Thomas.Pischinger&winpresa.com +9890 + Media General Inc + Chris Fanis + cfanis&mediageneral.com +9891 + Mensatec + David Font + dfont&mensatec.com +9892 + Oregon Graduate Institute of Science and Technology + David Barts + dbarts&cse.ogi.edu +9893 + Paranormal Sweden + Jonas Liljegren + jonas¶normal.se +9894 + Polestar Corp. + Seo, YongJin + yongjin&polestar.co.kr +9895 + Project49 AG + Florian Unterkircher + florian&project49.de +9896 + R&R Associés + P. Varet + varet&iie.cnam.fr +9897 + Seranoa Networks + Sam Shen + sam.shen&seranoa.com +9898 + Shindengen Electric Mfg. Co., Ltd. + Hiroshi Minegishi + minegishi&shindengen.co.jp +9899 + Stinnes AG + Frank Valdor + frank.valdor&stinnes.de +9900 + Telekom Applied Business Sdn. Bhd. + Zulhedry Abdul Manaf + zulhedry&tab.com.my +9901 + The College of William and Mary + Chris peck + crpeck&wm.edu +9902 + The University of Chicago + Bob Bartlett + rdb&uchicago.edu +9903 + TI SQUARE Technology + Jae Sil Leem + jsleem&tisquare.com +9904 + University of Nebraska + Greg Gray + ggray&nebraska.edu +9905 + Volubill + Pierre Rondel + pierre.rondel&volubill.com +9906 + Xtera Communications Inc. + Paul Pantages + pdp&xtera.com +9907 + Akkaya Consulting GmbH + Juergen Pabel + jpabel&akkaya.de +9908 + HiTRUST.COM (HK) Incorporated Limited + Kevin Liou + kevinl&hitrust.com +9909 + Pacific Technology Services + Kevin Chien + kchien&pusa.com +9910 + Absolute Software Corp. + Ben Xu + benxu&absolute.com +9911 + Bantu, Inc. + Justin Fidler + sysadmin&team.bantu.com +9912 + Baraga Telephone Company + Aaron LaBeau + noc&up.net +9913 + Cerebra Intergrated Technologies Ltd. + Mahesh R. + mahesh_r&cerebracomputers.com +9914 + Charter Communications + Ron Cooper + rcooper&chartercom.com +9915 + Commtouch Software Ltd + Arnon Ayal + arnon&commtouch.com +9916 + Datek Online Holdings Corp. + Jozef Skvarcek + jskvarcek&datek.com +9917 + Corecard Software + Brian Beuning + bbeuning&corecard.com +9918 + National Oceanic and Atmospheric Administration (NOAA) + Jefferson Ogata + IANA.PEN.9918&noaa.gov +9919 + earth9.com Pte Ltd + Earl Allan + earl&earth9.com +9920 + ENETCO GmbH + Stephan Meystre + welcome&enetco.ch +9921 + ERGO Versicherungsgruppe AG + Klaus Niehues + klaus.niehues&itergo.com +9922 + eSign Australia Limited + Richard Culshaw + rculshaw&esign.com.au +9923 + European Southern Observatory + Michael Naumann + michael.naumann&eso.org +9924 + Excite UK Ltd + Steven Haslam + steve.haslam&excitehome.net +9925 + Finest Webs + Frank Leprich + f.leprich&finest-webs.net +9926 + Genie Network Resource Management + Leah Bayer + leah&genienrm.com +9927 + Icehouse Net Services + Brendan Cassida + brendanc&icehouse.net +9928 + Inetrify + Marcus Adair + marcad&convineo.com +9929 + Integral Concepts, Inc. + Les Matheson + les&ivsds.com +9930 + InterNexus Plc + Peggy McNeal + peggy&inter-nexus.com +9931 + Inti + Heungkyu Lee + hklee&inti.co.kr +9932 + IPFilter + Darren Reed + darrenr&ipfilter.com.au +9933 + JAARS, Inc + Chris Sheeran + chris_sheeran&sil.org +9934 + JANUS Research Group + Scott Martin + scott.martin&janusresearch.com +9935 + Louisiana Tech University + Daniel Schales + dan&latech.edu +9936 + massconfusion.com + Palmer Sample + lsample&massconfusion.com +9937 + MaXXan Systems, Inc. + Nathan Chu + nvc&maxxan.com +9938 + Mercury Corporation + Amit Garg + amass&india.mercurykr.com +9939 + merNet Security + Mike Robbins + merobbins&yahoo.com +9940 + Nine Tiles + John Grant + j&ninetiles.com +9941 + Objective Reality Computer LLC + Nick McKinney + mckinney&oreality.com +9942 + Optimation Software Engineering + Ron Miller + ronm&optimation.com.au +9943 + Prometheon, Inc. + Ryan Nix + rnix&prometheon.net +9944 + SIAS Ltd + Pete Sykes + pete.sykes&sias.com +9945 + Siemens Business Services AB + Hans Jonsson + hans.jonsson&siemens.se +9946 + SoftSol India Limitedn + Srinivas B + srinivas&softsolindia.com +9947 + Swales Aerospace, Inc. + Dave Landis + dlandis&swales.com +9948 + Telekurier GesmbH & Co KG + Markus Lirnberger + markus.lirnberger&telekurier.at +9949 + Ubiquitous Corp. + Ken Kuroda + kuroken&ubiquitous.co.jp +9950 + Visionary Communications Inc. + Bryan Hawkins + bryan.hawkins&v-comm.net +9951 + Wireless M2M, LLC + Jack Lavender + jlavender&wirelessm2m.net +9952 + WuhanTIT + Shaofeng Hu + hsf&mail.hust.edu.cn +9953 + Bertelsmann Gamechannel + Marc Czichos + marc.czichos&gamechannel.de +9954 + craigbuchek.com + Craig Buchek + iana&craigbuchek.com +9955 + Crisp Hughes Evans L.L.P. + Nathaniel Wingfield + nwingfield&che-llp.com +9956 + NDS Surgical Imaging (formerly 'Planar Systems, Inc.') + Francis Sung + fsung&ndssi.com +9957 + Dynamic Equilibrium Pty Ltd + Ross Thomson + rthomson&dyneq.com.au +9958 + Formula/400 + Balázs Molnár + mob&formula400.hu +9959 + NODS + Reyes Patrick + reyespatrick&hotmail.com +9960 + iOpsMon (formerly 'nuisys.com') + Deepak Chohan + deepak_chohan&hotmail.com +9961 + FundsXpress Financial Network + Ken Gibson + kgibson&fundsxpress.com +9962 + VirCIO, LLP + Chris Garrigues + cwg-iana&deepeddy.com +9963 + Advanced Computer Technologies (ACT) Inc. + Suyoung Park + parksy&swplaza.co.kr +9964 + 50km Inc. + David Uhlman + iana_mail&50km.com +9965 + Amministrazione Provinciale di Grosseto + Aldo Vigetti + a.vigetti&provincia.grosseto.it +9966 + Beijing Huahuan Electronics Ltd. + Sun Minghai + sunminghai&huahuan.com +9967 + Bluesocket, Inc. + Suresh Gandhi + sgandhi&bluesocket.com +9968 + British Antarctic Survey + Jeremy Robst + jpro&bas.ac.uk +9969 + Condat AG + Jens Annighoefer + ja&condat.de +9970 + Custodix NV + Daniel Lucq + snmp&custodix.com +9971 + CyberTel, Inc. + Mat Curtis + matthew&cybertel.com +9972 + Emulive Imaging Corporation Inc. + Jory Anick + jory&emulive.com +9973 + Flexsys (UK) Ltd + Allan Latham + alatham&flexsys-group.com +9974 + Internet Transaction Services, Inc.(iTrans) + Martin Compton + martinc&itrans.com +9975 + J.W. Jensen, Inc. + Jim Jensen + jensen&computer.org +9976 + Flexsys Inc + Allan Latham + alatham&flexsys-group.com +9977 + jobpilot AG + Alexander Spannagel + spannagel&jobpilot.de +9978 + Linuxkorea, Inc. + Keun-woo Ryu + updong&linuxkorea.co.kr +9979 + Mabuse.De + Manfred Berndtgen + mb&mabuse.de +9980 + M-Web South Africa + Jeanette Greeff + jgreeff&mweb.com +9981 + NeT&Trade GmbH + Jens Thiel + jens&thiel.de +9982 + netEngine + Oliver Malang + malang&netengine.at +9983 + Netstech, Inc. + James Won-Ki Hong + jwkhong&netstech.com +9984 + Osmosislatina + Daniel Rubio + daniel&osmosislatina.com +9985 + RIC + John Wu + enigma&ric.ws +9986 + Sencore + Ben Schmid + ben.schmid&sencore.com +9987 + Signet + Gerard Haagh + gerard&signet.nl +9988 + Smart Technology Enablers, Inc. + David G. Lawrence + dlawrence&enablers.com +9989 + Stochastix + Jens Thiel + jens&thiel.de +9990 + Stonebranch, Incorporated + William Miller + trouble&stonebranch.com +9991 + synchrone + JF Meyers + jf.meyers&ibelgique.com +9992 + Terion + Jonathan Jaloszynski + jjaloszynski&terion.com +9993 + Terumo Medical Corporation + Shawn Miller + shawn.miller&terumomedical.com +9994 + The Linux Box Corporation + Matt Benjamin + matt&linuxbox.nu +9995 + ThinkEngine Networks, Inc. + Victor Novikov + vnovikov&thinkengine.net +9996 + Villanova University + John L. Center + john.center&villanova.edu +9997 + Virtual e-learning group (VEG) + Michael Schacht Hansen + msh&hi.au.dk +9998 + Wynd Communications + Erik Erlendsson + eerlendsson&wynd.com +9999 + Zerna, Koepper & Partner + Egbert Casper + cas&zkp.de +10000 + MapInfo Corporation + Dave Beckett + webcertadmin&mapinfo.com +10001 + blio Corporation + Johann L. Botha + joe&blio.net +10002 + Frogfoot Networks + Johann L. Botha + joe&frogfoot.net +10003 + IP ONE Inc. + Hye-Sun Han + sunh&ipone.co.kr +10004 + johnchildress.com + John Childress + john&johnchildress.com +10005 + Pure NetworX GmbH + Werner Reisberger + wr&pure.ch +10006 + SANTON + Yue Cao + sky.yue&263.net +10007 + RF Innovations Pty Ltd + Paul Spaapen + it.support&rfinnovations.com.au +10008 + Groupe ESIEE Paris + Frank Bonnet + f.bonnet&esiee.fr +10009 + KrystalBox Technologies, Inc. + Matt Hille + mhille&krystalbox.com +10010 + Network Storage Solutions, Inc. + Douglas Donsbach + dld&nssolutions.com +10011 + Advanced Simulation Technology Inc + Patrick Gaffney + patrickg&asti-usa.com +10012 + Alternium SA + Laurent Cupial + lcupial&alternium.com +10013 + caledo GmbH + Matthias Gronkiewicz + info&caledo.de +10014 + CERIAS + Vincent Koser + vkoser&cerias.purdue.edu +10015 + Certification Centre Ltd + Kaitti Kattai + kaitti&sk.ee +10016 + Computer & Communications Innovations + Adam Herbert + aherbert&cci-us.com +10017 + CREWAVE Co., Ltd. + Sangwook Lee + leesw&crewave.com +10018 + Double Precision, Inc. + Sam Varshavchik + mrsam&courier-mta.com +10019 + Fen Systems Ltd. + Michael Brown + mbrown&fensystems.co.uk +10020 + Globe + Maarten van Steen + steen&cs.vu.nl +10021 + Good Technology, Inc. + Jeff Stewart + jstewart&good.com +10022 + Iconnect + Luigi Erardi + lgerardi&iconnect.it +10023 + IMA L.t.d. + Zdenek Hladik + hladik&ima.cz +10024 + Integrated Technology Express, Inc + Amy Chung + amy.chung&ite.com.tw +10025 + Interland, Inc. + Jeff McNeil + jmcneil&interland.com +10026 + Interloci,Inc. + Larry Stilwell + lstilwell&interloci.com +10027 + Internet Data Systems S.A. + Krzysztof Wysocki + kwysocki&ids.pl +10028 + level 42 networks + Ryan Ribary + ryan&level-42.net +10029 + Men & Mice + Sigfus Magnusson + sigfusm&menandmice.com +10030 + Mudiaga Obada + Mudiaga Obada + mudi&obada.de +10031 + NIIEFA CTC CYCLONE + Serj Kochetovsky + electom&niiefa.spb.su +10032 + ONEMEDIA Inc. + Mark Haager + mhaager&onmd.com +10033 + OpenSOS S/B + Tze-Meng Tan + tmtan&tm.net.my +10034 + OSIX AB + Mikael Appelkvist + mikael.appelkvist&osix.se +10035 + SAY Technologies, Inc. + Kawasaki Shingo + skawasaki&say-tech.co.jp +10036 + Times N Systems + Chris D. Miller + cmiller×n.com +10037 + Ultimate People Company Ltd + Nigel Hannam + nigel.hannam&upco.co.uk +10038 + University of Texas Health Science Center at San Antonio + Scott Mitchell + mitchell&uthscsa.edu +10039 + Videotek, Inc. + Rob Zwiebel + zwieber&videotek.com +10040 + Yonsei University + Yong-Hoon Choi + andychoi72&yahoo.co.kr +10041 + ZONE. PL s.c. + Pawel Orzechowski + tech&zone.pl +10042 + comma Flex + Brendon Maragia + sinphony80&hotmail.com +10043 + eTime Capital, Inc. + Stephen P. Berry + spb&etimecapital.com +10044 + FRB-Par + Jorge Ventura + jorge.ventura&varig.com.br +10045 + NetFormFive Informationstechnologie GmbH + Harald Evers + harald.evers&netformfive.de +10046 + Protek, S.C. + Jose B. Alos Alquezar + jbaa&prometeo.cps.unizar.es +10047 + Servocomp Ltd. + Andre Yelistratov + andre&servocomp.ru +10048 + VXL eTech (P) Limited + John George + johng&vxl.co.in +10049 + WebRelay, Inc. + Sam Nicholson + scion&webrelay.net +10050 + Dana Corporation + Bill DenBesten + bill.denbesten&dana.com +10051 + Eftia OSS Solutions Inc + Misha Sekey + hostmaster&eftia.com +10052 + Electronic Commerce Link, Inc. + Joe Felix + iana-pen&eclink.com +10053 + keys + Marcus Augustus Alzona + marcus&keys.com +10054 + NetValue S.A. + Erwan Arzur + erwan&netvalue.com +10055 + Roaring Penguin Software Inc. + Dianne Skoll + dfs&roaringpenguin.com +10056 + AppDancer Networks + Samuel Li + samuell&appdancer.com +10057 + EMTE Sistemas + Raul Blanquez + rblanquez&emtesistemas.com +10058 + HAURI Inc. + Jong Seung Park + pjs0722&hauri.co.kr +10059 + SANRAD + Michele Hallak-Stamler + michele&sanrad.com +10060 + SELTA S.p.A. + Lorenzo Chiapponi + lorenzo.chiapponi&selta.com +10061 + Pacific Info Tech Corp. + Liao Jiang + jiangl&pacificinfotec.com +10062 + Comsquared Systems, Inc. + David Gianndrea + dgianndrea&comsquared.com +10063 + Aaron Grosky & Associates Inc + Carilda A. Thomas + cat&the-cat.com +10064 + ASH + Hauke Johannknecht + oid&ash.de +10065 + Ironoak Software + Tammy Race + race&cs.utk.edu +10066 + Digital Multimedia Technologies + Beretta Eugenio + e.beretta&dmtonline.com +10067 + DVTEL Inc. + Gil Fishman + camera&dvtel.com +10068 + E-OfficeServices Ltd. + Ashley Unitt + ashley&newvoicemedia.com +10069 + Epsilon AB + Staffan Andersson + staffan.andersson&epsilongroup.net +10070 + Flashwave Ltd + Daniel Wilson + dan&flashwave.co.uk +10071 + GINKO AG + Roland Haenel + rh&ginko.net +10072 + GW Technologies Co., Ltd. + Zhong Zhen + zhong_zhen&yeah.net +10073 + HanseNet Telekommunikation GmbH + Sylvia Ellrich + ellrich&hansenet.com +10074 + Ikimbo + David E. Storey + dstorey&ikimbo.com +10075 + Infodyne Corporation + Infodyne Corporation + falke&infodyne.com +10076 + Institutions of the European Union + Colin Fraser + colin.fraser&cec.eu.int +10077 + MbyN Inc. + Yuri Cho + yuricho&mbyn.com +10078 + Medienprojektverein Steiermark + Harald Klein + hklein&979fm.at +10079 + Metrosource, Inc. + David Bradley + dbradley&rochester.rr.com +10080 + Microcell I5 Inc. + Charles Arsenault + securityµcelli5.com +10081 + NrjCorporation + Nicolas Rosa + nrj.post&mailcity.com +10082 + Oculus Technologies Corporation + Andrew Crum + acrum&oculustech.com +10083 + Panda Vista + John Crunk + greypanda&pandavista.com +10084 + Priority Telecom N.V. Roger + Schobben + rschobben&prioritytelecom.com +10085 + Prove IT + Daniel Prinsloo + daniel&prove-it.co.uk +10086 + Segue Software, Inc. + Wolfram Hiebl + wolfram&segue.at +10087 + SilentOne Limited + Culley Angus + culley.angus&silentone.com +10088 + Techno Valley Co. + Steve Kang + sun&tvinc.co.kr +10089 + Tek Tools, Inc. + Ken Barth + snmp&tek-tools.com +10090 + The ClueNet Project + Daniel Roesen + iana-oid&cluenet.de +10091 + Transfar + Xu Tian + xutian&mail.transfar.com +10092 + Unassigned + ---none--- + (Removed 2002-05-02) +10093 + unamite GmbH + Andreas Roedl + andreas.roedl&unamite.com +10094 + University of Novi Sad + Zora Konjovic + armuns&uns.ac.rs +10095 + Wickander & Associates + James Richardson + james.richardson&wickander.com +10096 + Baxter Healthcare Corporation + Terri Stevens + stevent&baxter.com +10097 + ECI-IP Inc. + David A. Pitts + david.pitts&jax.ecitele.com +10098 + Gonicus GmbH + Lars Scheiter + lars.scheiter&gonicus.de +10099 + humanIT + Ingo Schwab + ingo.schwab&humanit.de +10100 + onDevice Corporation + Jeff Ng + jng&ondevice.com +10101 + Mirai Espana, S.L. + Minoru Watabe + webmaster&hotelsearch.com +10102 + Alt-N Technologies + Arvel Hathcock + arvel&altn.com +10103 + Lewis Curtis Consulting + Lewis Curtis + lewisccurtis&netscape.net +10104 + eBOA + Ing. Roelof Osinga + roelof&eboa.com +10105 + Lawson Software + Michael Kellen + OID.Admin&Lawson.com +10106 + Albalá Ingenieros, S.A + Juan A. Zaratiegui + zara&albalaing.es +10107 + 1stCom Technologies Corp. + Bo Yang + byang&1stcomtech.com +10108 + 7 Global Ltd + Matt Rosebury + mrosebury&7global.com +10109 + Active Telecom + Julien Delaporte + jdelaporte&active-telecom.com +10110 + Agile Storage, Inc. + Gilbert Ho + gilbert&agilestorage.com +10111 + Get Connected + Jörg Hanebuth + joerg&gecosnet.de +10112 + APLcomp Oy + Martti Pitkanen + martti.pitkanen&aplcomp.fi +10113 + ApplianceWare Inc. + Craig Spurgeon + cspurgeon&applianceware.com +10114 + Artesia Technologies + Bartee Natarajan + bartee.natarajan&artesia.com +10115 + ARTFUL + Philippe Huet + phuet&artful.net +10116 + Auga ApS + Troels Blum + troels&auga.com +10117 + Axerra Networks, Inc. + Aharon Strosbert + aharon&axerra.com +10118 + BenefitsXML, Inc. + George Austin + gaustin&benefitsxml.com +10119 + BigBangwidth + Steve Hyatt + shyatt&bigbangwidth.com +10120 + BRTech + Kang-Soo Lee + odinlee&orgio.net +10121 + CLEAR Communications Ltd + Nora Bowman + nbowman&clear.co.nz +10122 + Creatia Corporation + Paul C. Bryan + pbryan&creatia.com +10123 + Crimco Consulting + James G. Crimens + jcrimens&concentric.net +10124 + CUST - Université Blaise Pascal (Clermont II) + Thierry Chich + chich&cust.univ-bpclermont.fr +10125 + cyberPIXIE, Inc. + Phillip J. Birmingham + phillip.birmingham&cyberpixie.com +10126 + DAASI International GmbH + Peter Gietz + peter.gietz&daasi.de +10127 + Desana Systems Incorporated + Changming Shin + cshih&desanasystems.com +10128 + DISA + Aleksey Malakhovskiy + adisa&dialup.ptt.ru +10129 + DiscoverNet, Inc. + Cory Visi + visi&discovernet.net +10130 + Ecole Centrale de Lyon + Luc Mariaux + luc.mariaux&ec-lyon.fr +10131 + EINK + Pehr Anderson + pehr&eink.com +10132 + EM Solutions Pty Ltd + Peter Woodhead + peter.woodhead&emsolutions.com.au +10133 + FedEx + Pat O'Neil + pat.oneil&fedex.com +10134 + FerraraLUG + Simone Piunno + admin&ferrara.linux.it +10135 + Front Porch Inc. + David Miller + dmiller&frontporch.com +10136 + godot communication technologies gmbh + Michael Steinmetz + Michael.Steinmetz&godot.de +10137 + GTMP Foundation + Alexandre Jousset + midnite>mp.org +10138 + i2pi + Joshua Reich + josh&traffion.com +10139 + i3sp + Matthew Watson + mattw&i3sp.com +10140 + INCO Systems, Inc. + Jerry J. Blanchard + jblanchard&incosystems.com +10141 + Infinisys Pty Ltd + Ian Smith + ian.a.smith&bigpond.com +10142 + Interdimensions + Philip J Grabner + grabner&interdim.com +10143 + Introut Consulting + Darcy Westfall + darcy.westfall&introut.com +10144 + iORMYX + Roman Nemet + romann&iormyx.com +10145 + Backman Writing Services + Justin Backman + iana&backmanwritingservices.com +10146 + Kern Automatiseringsdiensten BV + Bob van der Kamp + oid&kern.nl +10147 + Lancaster-Lebanon Intermediate Unit 13 + Kevin Myer + kevin_myer&iu13.org +10148 + Lightel Systems Corporation + Margie Patterson + margiepatterson&lightel.com +10149 + LineOne + Anthony Lee + anthony.lee&uk.tiscali.com +10150 + Logic One Incorporated + Aaron Smith + as&gofuzion.com +10151 + Multimedia University + Chin Mun, Wee + cmwee&mmu.edu.my +10152 + Muspellsheim + Kenny Austin + kenny&muspellsheim.net +10153 + Net2Phone + Joshua Walfish + josh&net2phone.com +10154 + NetPlay, Inc. + Mike Coakley + mcoakley&netplay.net +10155 + Northern Parklife Inc + Thomas Wernersson + thomas&northern.se +10156 + Novoforum + Howard Neuwirth-Hirsch + hnhirsch&novvoforum.com +10157 + Onnet Technologies Co. + Daehwan Kim + dhkim&ont.co.kr +10158 + PanAmSat + Jerry Tuite + jtuite&panamsat.com +10159 + Panasas Inc. + Shri Lohia + slohia&panasas.com +10160 + Penn State Outreach Information Systems + Ken Borthwick + kmb11&outreach.psu.edu +10161 + PictureIQ + Brian Hutchison + brian.hutchison&pictureiq.com +10162 + poland.com SA + Robert Gorecki + robert.gorecki&poland.com +10163 + Remedy Corporation + Rich Mayfield + richm&remedy.com +10164 + Roads and Traffic Authority (RTA) + Per-Johan Lind + per-johan_lind&rta.nsw.gov.au +10165 + Robert Williams Consulting + Robert Williams + robwilliams&onebox.com +10166 + Secure Software Services ltd + Raphael Barber + raphael&securesoft.co.uk +10167 + Siemens Communications Limited + John Richardson + john.richardson&siemenscomms.co.uk +10168 + Space Biomedical Center + Yurii Reviakin + rev&telemed.ru +10169 + Suayan Design + William Suayan + ksuayan&yahoo.com +10170 + Telemig Celular S.A. + Luiz Carlos Portugal Starling + rede.cel&telemigcelular.com.br +10171 + Telsey + Mestriner Roberto + roberto.mestriner&telsey.it +10172 + Territory Business Solutions Pty. Ltd. + Chris Coleman + ccoleman&tbs.com.au +10173 + Texil + Ian McNish + ian&ians.net +10174 + The Chubb Group of Insurance Companies + Tod Thomas + ldap&chubb.com +10175 + Traffion Technologies + Joshua Reich + josh&traffion.com +10176 + Univention + Peter H. Ganten + peter.ganten&univention.com +10177 + University of Northern Iowa + Aaron Thompson + aaron.thompson&uni.edu +10178 + Western Kentucky University + Jeppie Sumpter + jeppie.sumpter&wku.edu +10179 + Wireless Solutions + Simone Piunno + simonep&wseurope.com +10180 + Am-utils Organization + Erez Zadok + ezk&am-utils.org +10181 + Cardinal Health Inc. + Internet Administrator + internetadmin&cardinal.com +10182 + futureLAB AG + Philipp Klaus + info&futurelab.ch +10183 + Master Quality snc + Amico Sabatini + masterquality&inwind.it +10184 + QCOM TV, Inc. + Chris Heimark + chris.heimark&targettv.com +10185 + SMP + Scott Didriksen + sdidrik&rmci.net +10186 + Weinbrenner Media Consult + Philipp Weinbrenner + softlab&weinbrenner.com +10187 + Teleview + Wooksoo Shin + wshin&teleview.co.kr +10188 + Piolink, Inc + Henry Moon + support&piolink.com +10189 + Congruency, Inc. + Eyal Segal + eyal&congruency.com +10190 + AEGMIS GmbH + Georg Groner + georg.groner&aegmis.de +10191 + DivergeNet, Inc. + Paul Traskal + ptraskal&DivergeNet.com +10192 + E.ON Energie AG + Florian Dietrich, Peter Marschall + pki&eon.com +10193 + erkle + Andrew Veitch + ajv-iana&erkle.org +10194 + is:energy + Christian Müller + christian.mueller&is-energy.de +10195 + SH Soft + Simon Heaton + simon&jazzsax.freeserve.co.uk +10196 + www.thinkingmachines.com + Christopher Secrest + c_secrest&lycos.com +10197 + Vovtel Networks, Inc. + Thomas Tsai + thomas&vovtel.com +10198 + Carroll-Net, Inc. + Jim Carroll + jim&carroll.com +10199 + projecthome limited + Norbert Klamann + Norbert.Klamann&projecthome.de +10200 + i-mail-box project + Hiroshi Kakehashi + npo&mvf.biglobe.ne.jp +10201 + Eftel + Steven Wilton + techs&team.eftel.com +10202 + GFT Solutions + Michael Voegler + michael.voegler&gft.com +10203 + Winter AG + Stefan Lefnaer + stefan.lefnaer&winter-ag.at +10204 + Shanghai SCOP Photonics Technology Co. ltd + Winner Chen + chenbing&263.net +10205 + Apama + Richard Bentley + rmb&apama.com +10206 + ePIK'us Software + Sebastian Pikur + epikus&poland.com +10207 + Hilgraeve Inc. + Mike Thompson + miket&hilgraeve.com +10208 + Valnet Sado S.A. + Pedro Vasconcelos + geral&valnetsado.pt +10209 + Andes Networks, Inc. + Brian Shiratsuki + hostmaster&andesnetworks.com +10210 + AXS-One + Arnie Listhaus + alisthaus&axsone.com +10211 + BENAU A/S + Carsten Joenck + caj&benau.dk +10212 + Cutler & Company + Paul Jacobson + pj&cutlerco.com.au +10213 + Meriton Networks + Ilya Kobrinsky + ilya.kobrinsky&meriton.com +10214 + ENIGMA SOI SP. Z O.O. + Artur Krystosik + akr&enigma.com.pl +10215 + FORWARD Technical Co.Ltd + WangZhiQiang + yy_wzq&21cn.com +10216 + Frantic Films + Shawn Wallbridge + swallbridge&franticfilms.com +10217 + HotDiary Inc + Manoj Joshi + manoj&hotdiary.com +10218 + Hsufarm + Gene Hsu + iana&hsufarm.com +10219 + I.D.E.A.L. Technology Corporation + Anthony L. Awtrey + tony&idealcorp.com +10220 + IFW Dresden + Thomas Fichte + t.fichte&ifw-dresden.de +10221 + IMPACT TECHNOLOGIES + Jean-Nicolas Durand + jndurand&itechno.com +10222 + InfiniCon Systems + Charles King + cking&infiniconsys.com +10223 + Leo A Daly Company + Duane Miller + mdmiller&leoadaly.com +10224 + Lufthansa Systems Infratec GmbH + Peter Boerner + peter.boerner&lhsystems.com +10225 + Marrakech Ltd. + Alan O'Kelly + aokelly&marrakech.com +10226 + MyDN + John Gray + jgray&mydn.com +10227 + Neesus Datacom + Sunil Sanwalka + sunil&neesus.com +10228 + Net & Publication Consultance GmbH + Joachim Schrod + jschrod&npc.de +10229 + Netgate s.c. + Adam Wojtkiewicz + adomas&ng.pl +10230 + Oriole Corporation + Stephane Faroult + sfaroult&oriole.com +10231 + Proficient Networks, Inc. + Allan Leinwand + allan&proficient.net +10232 + Signiant Corporation + Winston Khan + wkhan&signiant.com +10233 + Silicon Goblin Technologies + Steve Linberg + steve&silicongoblin.com +10234 + Snort.org + Glenn Mansfield Keeni + glenn&cysols.com +10235 + Startrack Communications (Australia)Pty Ltd + Rodney Kirk + rodney.kirk&startrack.com.au +10236 + Studio komputerowe AWA + Adam Wojtkiewicz + adomas&ng.pl +10237 + Sullins, Inc. + Ben Pierce + bpierc5&earthlink.net +10238 + Technische Universitaet Berlin (TU-Berlin) + Gerd Schering + ca&tu-berlin.de +10239 + Tess SA + Miguel Augusto + miguel.augusto&tess.net.br +10240 + TransNational Computer Technology + Leke osiname + leke&telecom.net.et +10241 + TRPG Association + Kevin Tseng + hytseng&trpg.org +10242 + University of Brighton + Mike Chemij + m.chemij&bton.ac.uk +10243 + Uplogix.com + James Dollar + jdollar&uplogix.com +10244 + VALIDATA + Simakov Sergey + sim&x509.ru +10245 + Valiosys SA + Nicolas Prochazka + sysadmin&valiosys.com +10246 + Virtual Education Space + T S Vreeland + tvreeland&ves.ws +10247 + CollegeUnits.com, Inc. + Chris Staszak + cstaszak&collegeunits.com +10248 + E.Central, Inc + Robert Tarrall + oid-admin&ecentral.com +10249 + CINtel Intelligent Telecom System Co.,Ltd. + Jianming Lan + lanjianming&263.net +10250 + NegativeTwenty + Kevin J. Menard, Jr. + kmenard&wpi.edu +10251 + Warlock + Yaroslav Dmitriev + yar&mi.ru +10252 + Diagnostic Laboratory Services, Inc. + Ronald Fox + rfox&dlabs.com +10253 + Napster, Inc. + Roger Smith + unixops&napster.com +10254 + Texuna Technologies Moscow + Serge Karpinski + sergey.karpinski&texunatech.com +10255 + Infoquenz IT-Dienstleistungen GmbH + Tobias Retts + trettstadt&infoquenz.de +10256 + ScioByte GmbH + Michael Neuffer + neuffer&sciobyte.de +10257 + Vocanter LLC + James Nyika + jnyika&gmail.com +10258 + Aracnet Internet Services + Sean Hanson + admin&aracnet.com +10259 + Avtec Systems, Inc. + Gregg Nemesure + nemesure&avtec.com +10260 + BeamReach Networks + Charuhas V Ghatge + cghatge&beamreachnetworks.com +10261 + Cedar Point Communications + Sandeep Asija + sasija&cedarpointcom.com +10262 + CNC Systems, Inc. + Adam Meyerowitz + ameyerow&optonline.net +10263 + Custom Linux Solutions + Matthew Brichacek + mbrichacek&customlinuxsolutions.com +10264 + DATA & INFORMATION TECHNOLOGY LTD + Joyce Farnese + joycef&dandit.demon.co.uk +10265 + Deltathree, Inc + Hillel Antonov + hillela&deltathree.com +10266 + DIZ Rheinland-Pfalz + Uwe Volkmer + uwe.volkmer&diz.de +10267 + Electro Optic Systems + Dean Cording + dsc68&acay.com.au +10268 + Enterprise Commerce + Mike Tynas + mtynas&bigpond.com +10269 + ePropose + Matt Lippert + matt.lippert&epropose.com +10270 + ES-Netze + Eduard Siemens + siemens&es-netze.de +10271 + Forgent + Ken Kalinoski + ken&forgent.com +10272 + Future Instrument AB + Ingemar Ernstsson + ingemar.ernstsson&futureinstrument.se +10273 + Giant Step Productions LLC + Bart Lilje + bart&giantstep.com +10274 + HEXAFLUX-SMS + Bunel Damien + dbunel&hexaflux.com +10275 + Master a Distancia + Angel Cabello + acabello&masterd.es +10276 + MSI Network Services, Ltd. + Paul Maine + paulm&msicc.com +10277 + Multnomah Education Service District + Eric Harrison + eharrison&mesd.k12.or.us +10278 + NetAcquire Corporation + David Hauck + davidh&netacquire.com +10279 + Object Valley (Asia Pacific) Limited + Stephen Won + stephen.won&objectvalley.com +10280 + Quixotech Systems + Robert Kettles + rob&quixotech.com +10281 + SELECT Technology + Dori Seliskar + dori.seliskar&select-tech.si +10282 + SpeedKom GmbH + Markus Trinler + mt&speedkom.net +10283 + TABCORP + A.LaBrooy + labrooya&tabcorp.com.au +10284 + Thomas Jefferson University + Drew Zebrowski + drew&jefferson.edu +10285 + Trenitalia S.p.A. + Claudio Baldrati + baldrati&asamrt.interbusiness.it +10286 + Trintech Inc. + George Burne + george.burne&trintech.com +10287 + University At Albany + Rob See + robsee&csc.albany.edu +10288 + Vox Mobili + Voxmobili System + system&voxmobili.com +10289 + Yukyung Telecom Co., Ltd. + KwanSoo, Yoon + ksyoon&yutc.co.kr +10290 + Internet Business Constellation S.A. + Christen Blom-Dahl + christen&inetbc.net +10291 + j-m-f.demon.co.uk + Justin Forder + justin&j-m-f.demon.co.uk +10292 + Laissez Faire City + Samuel Jacobs + domains&lfcity.com +10293 + LINKAGE SYSTEM INTEGERATION CO.,LTD + Cao Yang + caoyang&lianchuang.com +10294 + DATA SERVICE SRL + Michele Gandolfi + info&datas.it +10295 + iSOLUTION + Dirk Bajohr + db&isolution.de +10296 + ITI Ltd. + V.S. Vijay Kumar + dcesi_bgp&itiltd.co.in +10297 + Advantech Co., Ltd. + Guider Lee + guider.lee&advantech.com.tw +10298 + Arbeitsgemeinschaft fuer total abgehobene Technologie + Alexander Talos + at&atat.at +10299 + Avantia, Inc + Charles W. Loughry + cloughry&eavantia.com +10300 + Avantron Technologies Inc. + Martin-Pierre Frenette + mfrenette&avantron.com +10301 + Bluesocket Ltd + Christine Cook + ccook&bluesocket.com +10302 + Galaxy Systems, Incorporated + Sandipan Gangopadhyay + sandipan&galaxysi.com +10303 + ist isdn support technik GmbH + Halil Goektepe + halil.goektepe&ist-teltow.de +10304 + LGT Financial Services + Andreas Dittrich + andreas.dittrich&lgt.com +10305 + Magnifix Sdn Bhd + Izauddin Mohd Isa + izauddin&magnifix.com.my +10306 + MSO Technologies + John Park + admin&msotech.com +10307 + Optical Solutions Inc + Phil Friend + pfriend&opticalsolutions.com +10308 + OSGi + Peter Kriens + peter.kriens&aqute.se +10309 + SOHO Skyway + Ron Grant + engineering&sohoskyway.net +10310 + tang-IT Consulting GmbH + Marcus Thoss + marcus.thoss&tang-it.com +10311 + Kerio Technologies + Jaroslav Snajdr + jsnajdr&kerio.cz +10312 + Translogic Systems, Inc. + Wes Quinn + wes.quinn&tls-us.com +10313 + Autinform GmbH + Christian Engelmann + info&autinform.de +10314 + Provenir + Gil Milow + gmilow&provenir.com +10315 + Prima e.V. + Bodo Bellut + bb&prima.de +10316 + Rafael Ltd. + Malka Rosen + malkar&rafael.co.il +10317 + Nanum Technology + Jongseok Yoo + jsyoo&nanum.co.kr +10318 + SaskTel International Ltd. + Michael Moore + mike.moore&sasktel.sk.ca +10319 + Semantics AG + Roland Wepfer + rwe&semantics.ch +10320 + Amherst College + John W. Manly + jwmanly&amherst.edu +10321 + AUNet + Sudhakar Chandra + thaths&aunet.org +10322 + Centre For Advanced Technology + Shailendra Singh Tomar + tomar&cat.ernet.it +10323 + Convergence Network Research Ltd. + Ron Grant + iana-admin&cnresearch.com +10324 + CrabusLDAP + Philippe Lamiraux + lamira_p&epita.fr +10325 + EarthConnect Corporation + Omer Khan + ok&earthconnect.com +10326 + Ecole Nationale Superieure des Mines de Paris + Jose Marcio + martins&paris.ensmp.fr +10327 + FiberCity Networks + Jason Englander + jason&fiber-city.com +10328 + Globeflow SA + Bart Jenkins + bjenkins&globeflow.com +10329 + Integra Micro Systems (P) Ltd. + M.K. Srikant + srix&integramicro.com +10330 + KaVaDo Inc. + Yuval Ben-Itzhak + yuval&kavado.com +10331 + Lightning Data Networks + Chris Chekay + sycholic&adelphia.net +10332 + MaxHosting GBR + Sirk Johannsen + sven_sirk&gmx.net +10333 + MetaNet + Daniel Lawson + lawson&ihug.co.nz +10334 + Mischief Networking + Isaac A. Murchie + imurchie&burntsand.com +10335 + RLM Systems Pty. Ltd. + Benjamin Lyall + b.lyall&rlmststems.com.au +10336 + Summit Tech Communications + Eric Bellotti + ericb&summit-tech.ca +10337 + Valis LTD + Yaron Pikman + yaron&valis.co.il +10338 + Locale Systems + Thomas G Smith + tsmith&localesystems.com +10339 + StarRemote Wireless, Inc. + Jonas Gyllensvaan + jonas&starremote.com +10340 + Indiana State University + Christopher W. Hanna + cchanna&isugw.indstate.edu +10341 + Klinik Loewenstein gGmbH + Thomas Heiligenmann + thomas.heiligenmann&klinik-loewenstein.de +10342 + Miami University + Dirk Tepe + tepeds&muohio.edu +10343 + NetExcell + Henry Ngai + hngai&netexcell.com +10344 + Netregistry Pty Ltd + Adam Cassar + adam.cassar&netregistry.net +10345 + Newport Networks Limitd + John Holdsworth + john.holdsworth&newport-networks.com +10346 + OmegaBand, Inc. + Jim Hall + jimh&omegaband.com +10347 + Plustream, Inc. + Irina Kosinovsky + irina&stlport.com +10348 + SOFTPRO GmbH & Co. KG + Sven Kratochvil + skr&softpro.de +10349 + Sysgem AG + Simon Brown + support&sysgem.com +10350 + System Software Solutions + Steve Peterson + zygote&home.com +10351 + Oblivion + Edwin van Nuil + nuil&oblivion.nl +10352 + Retevisión Móvil, S.A. + Alberto González + agonzalc&amena.es +10353 + InnoCom + Omar Barreneche + ob&inno.com.uy +10354 + Brewster Academy + Jon Robertson + jon_robertson&brewsteracademy.org +10355 + ITAction + Yogesh Patel + support&itaction.co.uk +10356 + Target Revocable E-Mail Corporation + Michael Alyn Miller + malyn&trecorp.com +10357 + Roy S. Rapoport + Roy S. Rapoport + snmp&inorganic.org +10358 + Accelio Corporation + Chris Ethier + cethier&jetform.com +10359 + Adtel Software + Dhananjay Joshi + joshi&adtelsoft.com +10360 + AgesMUD Technology, Co., Ltd. + Eric Hu + DarkBringer&bigfoot.com +10361 + Bharat Heavy Electricals Limited + Pankaj A. Dadu + dadu&bhel.co.in +10362 + BluWare, Inc + Jack Williams + jwilliams&bluware.com +10363 + Catbird Networks + Chris Melville + cmelville&catbird.com +10364 + Central Manchester City Learning Center + Shaun Gilchrist + shaun¢ralmanclc.com +10365 + Davox Corp. + Martin Bernier + mbernier&davox.com +10366 + Dialpad Communications + DongSeon Park + dpark&dialpad.com +10367 + donnie21 + Donnie Kang + neokids&n-top.com +10368 + Fujitsu Technology Solutions GmbH (formerly 'Fujitsu Siemens Computers') + Detlef Rothe + detlef.rothe&ts.fujitsu.com +10369 + Fujitsu Prime Software Technologies Ltd. + yukihiko Sakurai + y-sakurai&pst.fujitsu.com +10370 + Impulsesoft + Girish Hampali + hampali&impulsesoft.com +10371 + Inabyte Inc. + Myles Cagney + webmaster&inabyte.com +10372 + Intelligent Appliance Lab + Tempest Guo + tempest&chttl.com.tw +10373 + Le Reseau + Eilko Bos + eilko&reseau.nl +10374 + Lifeline Systems Inc + Ameer Sulaiman + asulaiman&lifelinesys.com +10375 + Makina Corpus + Mose + contact&makina-corpus.com +10376 + NETFRONT + Francois Demeyer + netfront&netfront.fr +10377 + OnMobile System Inc + Kiran Anandampillai + kiranma&onmobile.com +10378 + PacketVideo + Andrew Jenkins + jenkins&pv.com +10379 + Princeton Solutions Group + John Gray + jgray&princeton-solutions.com +10380 + TeleSys Software, Inc. + Sankar Chanda + schanda&telesys.com +10381 + TrendPoint Systems + Bob Hunter + bhunter&trendpoint.com +10382 + UCNET + Marc Moeller + mm&uc.net +10383 + Universite de Liege + Andre Pirard + a.pirard&ulg.ac.be +10384 + Virginia Commonwealth University + Jim Toth + jnf&vcu.edu +10385 + Guardian Telecom Ltd. + Blaine Hatch + bhatch&guardiantelecom.com +10386 + YoungWoo Telecom Co., LTD + Dong-Ha Lee + dhl&ywtc.com +10387 + Dongah Elecomm + Jin Ju Lee + jjlee&dongahelecomm.co.kr +10388 + Supportcomm Teleinformatica SA + Rodolfo Contri Rondao + rodolfo&supportcomm.com.br +10389 + Intabo + Joachim Hahn + administrator&intabo.de +10390 + DELTA, Danish Electronics, Light & Acoustics + Thorbjørn Aaboe + taa&delta.dk +10391 + Bharat Sanchar Nigam Limited + M.K.Yadav + sstomar&indoretelephones.net +10392 + Abeona Networks + Ken Rhodes + ken&abeona.com +10393 + APEH + Hámori Miklós + hamori.miklos&akp.apeh.hu +10394 + Banamex, S.A. + Alfonso Diaz Jimenez + adiaz&banamex.com +10395 + Belo Corporation + Edward E. Iwanski + eiwanski&belo.com +10396 + California State University, Office of the Chancellor + Mark Crase + mcrase&calstate.edu +10397 + CK Software GmbH (formerly 'CKSOFT, Christian Kratzer Software Entwicklung und Vertrieb') + Christian Kratzer + ck&cksoft.de +10398 + Covasoft, Inc. + Larry Combs + lcombs&covasoft.com +10399 + Datakey + David Corbett + davec&datakey.com +10400 + Electric Lightwave, Inc. + Stuart Craig + root&eli.net +10401 + Ideo Concepts Co. Ltd. + Neil Hambleton + neil&ic.com.hk +10402 + interdev + Lee SangBeob + sblee&interdev4u.com +10403 + Istituto Nazionale di Fisica Nucleare + Roberto Cecchini + Roberto.Cecchini&fi.infn.it +10404 + LivingLogic AG + Alois Kastner-Maresch + info&livinglogic.de +10405 + Mental Health Cooperative, Inc. + Systems Architect + dmorse&mhc-tn.org +10406 + Neural Audio, Inc. + Aaron Warner + aaron&neuralaudio.com +10407 + North Coast Software + Gregory Peddle + gpeddle&ncoastsoft.com +10408 + Percula Consulting GmbH + Jeremy Brookfield + jeremy.brookfield&percula.com +10409 + Tellurian Pty Ltd + Matthew Geddes + mgeddes&tellurian.com.au +10410 + The Falcon's Nest + Steven Alexson + steve&alexson.org +10411 + University of North Carolina at Chapel Hill + Celeste Copeland + celeste_copeland&unc.edu +10412 + aTelo, Inc. + Roman Shpount + roman&atelo.com +10413 + System Innovations, Inc. + Bob Fish + rfish&mcqassociates.com +10414 + Brauerei C. & A. Veltins GmbH & Co. + Matthias Mackenbruck + matthias.mackenbruck&veltins.de +10415 + 3GPP + Laurent Andriantsiferana + landrian&cisco.com +10416 + ABC Virtual Communications + Kailash Mohan Kayastha + kailash.kayastha&abcv.com +10417 + Amerion, LLC + Kenny Schmidt + kennys&amerion.net +10418 + Avocent Corporation + Steven W. Clark + steven.clark&avocent.com +10419 + Bactol Technical Limited + L. Thomas + bactoltechnicalltd&correspondence.co.uk +10420 + Bayerisches Landeskriminalamt Abt. IuK + Thomas Heigl + blka.sg343&baypol.bayern.de +10421 + C-CURE cvba + Vanhercke Jan + jan.vanhercke&c-cure.be +10422 + China PTIC Information Industry Corporation + Deng Danhui + ddh&rdptic.com.cn +10423 + CityXpress Corp. + Henry Deng + netadmin&cityxpress.com +10424 + Cricket Communications + Michael Shostak + mshostak&cricketcommunications.com +10425 + Electricity Generating Authority of Thailand + Surapong Singshinsuk + rdosrp&egat.or.th +10426 + Elex NV + Koen Bosmans + kbo&elex.be +10427 + Embrace Networks + Homayoun Zariv + hzariv&embracenetworks.com +10428 + Furukawa Electric LatAm S.A (formerly 'Furukawa Industrial S.A. - Curitiba') + Juvenal Costa + juvenalc&furukawa.com.br +10429 + Gelwarg.Net + Adam Gelwarg + adam&gelwarg.net +10430 + globalremote.com AG + Martin Jelenek + mj&globalremote.com +10431 + Ingenieurbuero Kittelberger GmbH + Joachim Steinruck + joachim.steinruck&ibkit.de +10432 + Mott Community College + Randy Schapel + rschapel&edtech.mcc.edu +10433 + MuTek Solution Inc. + Mark Rogers + mrogers&us.mutek.com +10434 + NIKHEF + David Groep + davidg&nikhef.nl +10435 + Omnitel + Aleksej Janchiukas + a.janciukas&omnitel.net +10436 + Oxford university + Dominic Hargreaves + oids&rt.oucs.ox.ac.uk +10437 + Peppercon AG + Christian Krause + chkr-iana&peppercon.com +10438 + Portima SC + Christophe Cloesen + christophe.cloesen&portima.com +10439 + Power Measurement Ltd + John A Tarter + john_tarter&pml.com +10440 + Rose I.T. Solutions + Robert Rose + robert&rits.com.au +10441 + Silas Technologies, Inc. + Brian DeWyer + brian.dewyer&silastechnologies.com +10442 + Spider Networks + Jay Montilla + jay&spidernz.com +10443 + Three Pillars + Les Howard + les.howard&threepillars.com +10444 + tichen.net + Tianlong Chen + tchen&computer.org +10445 + Trillium Photonics + Thomas Taylor + ttaylor&trilliumphotonics.com +10446 + Trinity Expert Systems plc + Martin Kearn + martink&tesl.com +10447 + University of Mississippi + Jason Ferguson + jferguson&olemiss.edu +10448 + University of Puget Sound + Mark Young + myoung&ups.edu +10449 + Xceedium, Inc. + David Cheung + info&xceedium.com +10450 + Altamar Networks + Luis Roa + lroa&altamar.com +10451 + Boston Market Corporation + Jens Moller + jens&colomar.com +10452 + Egton Medical Information Systems Ltd + Chris Storah + cstorah&e-mis.com +10453 + IL JIN TELECOM ELEC.CO., Ltd. + Sang Moon Chung + smchung&ijte.co.kr +10454 + Optical Crossing Inc + Sudhakar Gummadi + sgummadi&opticalcrossing.com +10455 + Paradigm + Mike Chinea + mchinea&hotmail.com +10456 + PLANET Technology Corp. + Alan Huang + alan&planet.com.tw +10457 + DB Informatik Dienste GmbH + Christian Hinz + christian.hinz&bku.db.de +10458 + 2000-X Technologies, Inc. + Al Gonzalez + al.gonzalez&2000-x.com +10459 + Hexin Software + Guoxin + gx&myhexin.com +10460 + HiSolutions AG + René Grosser + email&hisolutions.com +10461 + Tixo Consulting + Peter Taylor + peter_taylor&usa.net +10462 + Atek Ltd. + Tunc Beyhan + tbeyhan&atek.net.tr +10463 + DEVK Versicherungen + Claas Vieler + sysadmhv&devk.de +10464 + elata plc + Rod French + netman&elata.com +10465 + Flypaper + Keith Smith + ksmith&flypaper.com +10466 + Holim Technology + Lee Ju Bok + jblee&hotech.net +10467 + Ingenieurbüro Jörg Geistmann + Jörg Geistmann + joerg&geistmann.com +10468 + IRTNOG + Matthew X. Economou + admin&irtnog.org +10469 + Lidcam Technology Pty Ltd + Adrian Carter + adrian&lidcam.com.au +10470 + Media Brokers International + Charles Marcus + charlesm&media-brokers.com +10471 + OpenFortress + Rick van Rein + rick&vanrein.org +10472 + Recourse Technologies, Inc. + Elvis Lim + elim&recourse.com +10473 + SEB AB + Roberth Edberg + roberth.edberg&seb.se +10474 + Servlets.Net Corporation + Tauren Mills + tauren&servlets.net +10475 + Slovak University of Agriculture + Jan Satko + jan.satko&uniag.sk +10476 + Summit Media Partners, LLC + Thomas L. Arnold + tarnold&smpllc.com +10477 + Telephony@Work Inc. + Ran Ezerzer + rezerzer&telephonyatwork.com +10478 + Transeonic Systems, Inc. + Jeff Brown + jrbrown&transeonic.com +10479 + Web Services, Inc. + William B. Uhl + buhl&web-services.net +10480 + Wily Technology + Daniel Scholnick + dscholnick&wilytech.com +10481 + AlJISR Information Services + Syed Tariq Mustafa + tariqmustafa&hotmail.com +10482 + Nortel + Sharon Chisholm + schishol&nortel.com +10483 + Winalysis Software + Steve Fullerton + support&winalysis.com +10484 + TF1 sa + Christophe HENRY + chenry&tf1.fr +10485 + About Web Services + Daniel Hanks + dhanks&about-inc.com +10486 + Aspelle Ltd + Matthew Hardy + matthew.hardy&aspelle.com +10487 + Business Information Publications + Martin Robertson + dev.ldap&bipcontracts.com +10488 + Chelsio Communications + Glenn Dasmalchi + glennd&chelsio.com +10489 + Design Combus Ltd + Joni Toivola + combus&dcombus.com +10490 + GNF + Gordon Tetlow + gordont&gnf.org +10491 + Hampshire College + Dan Parker + sa&hampshire.edu +10492 + HST High Soft Tech GmbH + Kurt Kiessling + kkiessling&hstnet.de +10493 + Innovance Networks Inc. + Leo Sun + lsun&innovance.com +10494 + InphoMatch Inc. + Ven Chava + ven.chava&inphomatch.com +10495 + Interlink, Inc + Erik S. LaBianca + erik&totalcirculation.com +10496 + Link Simulation & Training + Daniel Callahan + dfcallahan&link.com +10497 + Maly's of California + Mike Cassidy + mcassidy&malyswest.com +10498 + michaeljcrawford.com + Michael Crawford + crawford&sonic.net +10499 + Ministerium des Innern Sachsen-Anhalt + Werner Weingarten + werner.weingarten&mi.lsa-net.de +10500 + Music Choice + Rob Steinberg + rsteinberg&musicchoice.com +10501 + Netman Company + Masatsugu Fujita + fujita&netman.co.jp +10502 + North Dakota University System + Dick Jacobson + Dick.Jacobson&ndsu.nodak.edu +10503 + Power Innovations International + Vilmar L. Gaertner + gaertner&power-innovations.com +10504 + Printronix, Inc. + Tami Clayton + tclayton&printronix.com +10505 + Quorus Medical Systems + Alexei Ustyuzhaninov + aiu&quorus-ms.ru +10506 + RadioShack Corporation + Jon Frazier + jon.frazier&radioshack.com +10507 + Reciproca + Corno Schraverus + corno&dds.nl +10508 + Res Mod Man + Aron Burrell + amb&rifetech.com +10509 + Rightvision + Fabrice Nouet + fabrice.nouet&rightvision.com +10510 + ScreamingMedia + Jason Zheng + jasonz&screamingmedia.com +10511 + SimpleDevices, Inc. + Hanford Choy + hanford&simpledevices.com +10512 + Spectrum Image + Clay Atkins + catkins&spcmg.com +10513 + Steltor + David Ball + davidb&steltor.com +10514 + StrongAuth, Inc. + Arshad Noor + arshad.noor&strongauth.com +10515 + Terry Nazon Inc + Terry Nazon + tnazon&aol.com +10516 + T-Nova GmbH, Deutsche Telekom + Yanrui Dong + yanrui.dong&telekom.de +10517 + Toronto POlice Service + Chris Pentleton + chrispn&sympatico.ca +10518 + TrunkNet + Oliver Schoenfeld + oschoenfeld&trunknet.de +10519 + Tsinghua Unisplendour Co., ltd + Chen Yong + cy&thunis.com +10520 + Marc Hirsch + Marc Hirsch + marc.hirsch&omnionpower.com +10521 + VoicePlanet, Inc. + Johnny Wu + johnnywu&voiceplanet.com +10522 + Voigt & Haeffner GmbH + Gunter Schmitt + gunter.schmitt&vuh.de +10523 + Wanadoo Data + Nicolas Thill + nicolas.thill&wanadoodata.com +10524 + Widevine Technologies + Glenn Morten + gmorten&widevine.com +10525 + Evangelisch Lutherische Kirche Bayern + Uli Iske + iske&elkb.de +10526 + Facultad de Ingenieria + Carols M. Martinez + carlosm&fing.edu.uy +10527 + METRObility Optical Systems, Inc. + Manu Kaycee + ManuKaycee&metrobility.com +10528 + Solid Information Technology Corp + Kyosti Laiho + kyosti.laiho&solidtech.com +10529 + Gemtek Systems Holding BV + Bin Hai + bin.hai&263.net +10530 + NordicEdge + Christer Roslund + croslund&nordicedge.se +10531 + C.T.Com + Aleksey Kukhar + ctcom&odessa.net +10532 + RifeTech Inc. + Aron Burrell + litui&litui.net +10533 + SEITENBAU GbmH + Stefan Eichenhofer + eichenhofer&seitenbau.com +10534 + SECOM Co., Ltd. + Yasuyuki Tauchi + tau&ai.isl.secom.co.jp +10535 + London Internet Exchange + Rob Holland + rob&linx.org +10536 + Simon's Rock College of Bard + David Reed + dreed&simons-rock.edu +10537 + AB Trav och Galopp + Mats Hansson + mats.hansson&konsult.atg.se +10538 + Abstrakt Design + Andreas F. Bobak + bobak.iana&abstrakt.ch +10539 + AlwaysOn Ltd + Sam Cole + scole&alwaysonvpn.com +10540 + AXL Software + Michael Lecuyer + mjl&axlradius.com +10541 + Caymasa El Sendero, SA + Jose M. Palomar + jmpalomar&elsendero.es +10542 + ChongQing GRC Telecom Co., Ltd + wuhua + tech&grcinfo.com +10543 + DeJarnette Research Systems, Inc. + Jay Cothran + jcothran&dejarnette.com +10544 + Factual Data + Brad Epstein + oidadmin&factualdata.com +10545 + gentics net.solutions + Schremser Maximilian + mm.schremser&gentics.com +10546 + Inturio Property Rights Holdings Limited + William Hui Bon Hoa + william.hui&inturio.com +10547 + Matrix Applied Computing Ltd + Stuart Hodder + stu&matrix.co.nz +10548 + ENEA AB + Ilse Mueller + ilse.mueller&enea.com +10549 + NewVation + Don Stocks + dstocks&newvation.com +10550 + n-tv GmbH & Co. KG + Uwe Hunstock + uhunstock&n-tv.de +10551 + parergy + Steven Blair + steven.blair&parergy.com +10552 + Passlogix Inc. + Manung Han + manungh&passlogix.com +10553 + Q-Networks + Jan Pedersen + jp&q-networks.com +10554 + Qwest Wireless, L.L.C + Mike Hampton + mfhampt&qwest.com +10555 + Ramcar Group of Companies + Marco C. Lucena + marco.lucena&ramcargroup.com +10556 + recomp GmbH Netzwerke & Systemberatung + Oliver Reeh + oliver.reeh&recomp.de +10557 + Sherwood International Limited + Lee Ha + Lee.Ha&sherwoodinternational.com +10558 + Silogix + Jean-Paul Blanc + jean-paul_blanc&silogix-fr.com +10559 + Softeck + Terry L. Dalby + tldalby&juno.com +10560 + South Florida Water Management District + Mark Velasquez + mvelasq&sfwmd.gov +10561 + Star Valley Solutions Inc. + Richard Albertson + ralbertson&starvalleysolutions.com +10562 + Werkleitz Gesellschaft e.V. + Martin Mueller + mm&werkleitz.de +10563 + Portland Public Schools + Andy Payne + apayne&pps.k12.or.us +10564 + T.I.A. Scandinavia + Martin Andersson + d98mad&efd.lth.se +10565 + Universite de Savoie + Berard David + david.berard&univ-savoie.fr +10566 + Universiti Pendidikan Sultan Idris + Zahar Othman + zahar&upsi.edu.my +10567 + eyou.net Corp. + YingBo Qiu + qyb&eyou.net +10568 + China Sunbo Corp. + dingke + dke&zj001.com +10569 + Coker Net + Chuck Coker + chuck&coker-net.com +10570 + Manly Man Club + Chuck Coker + chuck&coker-net.com +10571 + Merck KGaA + Christine Simon + christine.simon&merck.de +10572 + 100world.com AG + Artur Schneider + artur.schneider&100world.com +10573 + Aitech Space Systems Inc. + Anthony Lai + alai&rugged.com +10574 + Carbon Based Pty Ltd. + Michael Gray + mick&carbonbased.com.au +10575 + Cumulus IT AS + Øyvind Jelstad + oyvind&cumulus.no +10576 + Cygsoft Limited + Pallav Bhatt + pallav&cygsoft.com +10577 + Eilat On-Line Ltd. + Shachar Elkayam + shachar&eilat.net +10578 + Electric Insurance Company + Pradeep K. Majumder + pmajumder&electricinsurance.com +10579 + Eswoosh + Jerald Dawson + jdawson&eswoosh.com +10580 + Handspring, Inc + David Champlin + dchamplin&handspring.com +10581 + Icomera + Martin Bergek + contact_iana&icomera.com +10582 + Linear Systems Ltd. + Tom Thorsteinson + tthor&linsys.ca +10583 + Netizen S.A. + Gabriel Contursi + gcontursi&netizen.com.ar +10584 + Hopcount Limited + Joe Abley + jabley&hopcount.ca +10585 + Paris-alesia + Francois Cattin + francois.cattin&free.fr +10586 + PennWell Corp + Michael Ritterbusch + noc&pennnet.com +10587 + PharmaPartners B.V. + Villy Kruse + vek&pharmapartners.nl +10588 + Pixelboxx GmbH + Carsten Heyl + hostmaster&pixelboxx.de +10589 + Rocketrader.com + James Horne + jhorne&rocketrader.com +10590 + Sage Information Consultants , Inc. + Byron Watts + Bwatts&sageconsultants.com +10591 + Salix Training Limited + Paul Smart + paul.smart&salixtraining.co.uk +10592 + SolutionInc + Chuck Feild + cfeild&solutioninc.com +10593 + SYBCOM GmbH + Oliver Pabst + oid&sybcom.de +10594 + Transynergy + Bent Nicolaisen + bni&transynergy.net +10595 + Tyrell Software Corporation + Chuck Coker + chuckc&tyrell.com +10596 + Vignon Informatique France + Philippe Carbonnier + contact&vif.tm.fr +10597 + Alliance Capital + John Chapin + john_chapin&acml.com +10598 + Applied System Design + Lindsay Morris + lmorris&servergraph.com +10599 + Department of Families + Martin Sharry + itbis&families.qld.gov.au +10600 + Innovative Computing Laboratory + Brett Ellis + iclsys&cs.utk.edu +10601 + Jataayu Software Pvt Ltd. + Arvind + arvind_katake&hotmail.com +10602 + JIERA + Jake Jung + just1jake&hotmail.com +10603 + Kafre + Saint-Genest Gwenael + gwen&kafre.fr +10604 + Zarlink Semiconductor + Thomas Ying + thomas.ying&zarlink.com +10605 + Infodesk S. A. + Victor V. Evtushenko + vevtushenko&idsk.com +10606 + IntraCom Australia Pty Ltd + Stuart V Begg + sbegg&intracom.com.au +10607 + ACG Solutions, LLC + Garry Stanfill + gstanfil&acg-solutions.net +10608 + alpha-lab + Jochen Witte + jwitte&alpha-lab.net +10609 + Applied Science Fiction + Jim Wilson + jwilson&asf.com +10610 + Aubergiste Inc. + R.C. Poirier + rcpoirier&aubergiste.com +10611 + BLU SPA + Alberto Bianchini + alberto.bianchini&mail.blu.it +10612 + Centralny Osrodek Informatyki Gornictwa S.A. + Jacek Bochenek + jacek&coig.katowice.pl +10613 + Envivio + Bigeard + nbigeard&envivio.fr +10614 + Equity Technology Group, Inc. + Dane Foster + dfoster&equitytg.com +10615 + GartonWorks, Inc. + Ned Kirby + nkirby&gartonworks.com +10616 + GeDInfo s.c.r.l. + Fabio Ferrari + fferrari&gedinfo.com +10617 + INFO Consulting GmbH + Martin Harm + harm&info-mainz.de +10618 + InfoStream ASP + Poul H. Sørensen + mib-contact&asp.infostream.no +10619 + innuworks + Sung-Jin Lee + sjlee&innuworks.com +10620 + iSoftel Ltd + Rajesh Lingappa + rajesh_lingappa&softel.com +10621 + bvba Med. Kab. dr. Sebrechts E. + Erik Sebrechts + erik.sebrechts&advalvas.be +10622 + Karma Designs + Michael Gisbers + mgisbers&popob.de +10623 + KnowledgeCube + Holyeast Lee + holyeast&kcube.co.kr +10624 + Richard Lucassen Electronics + Richard Lucassen + lucassen&bigfoot.com +10625 + Maincube Inc. + David Priban + mail&maincube.net +10626 + Mulvey Family + Rich Mulvey + domaincontact&mulveyfamily.com +10627 + N-SOFT + Roger Zini + rzini&n-soft.com +10628 + Project 49 AG + Jochen Witte + jochen.witte&project49.de +10629 + Questus Group, Inc. + Dustin Cobb + dcobb&questusgroup.com +10630 + Scintec AG + Stefan Schwarzer + stefan.schwarzer&scintec.com +10631 + SeRIQA Networks + Amir Dori + amird&seriqa.com +10632 + Telecomet International + Ryo Sueki + ryo-sueki&telecomet.co.jp +10633 + TICOM, Inc. + Geoffrey Bennett + geoffrey&ticom.com +10634 + Trio Networks + Lawrence Bennett + larry.bennett&trionetworks.com +10635 + Tucows Inc. + Edward Gray + egray&tucow.com +10636 + University of Applied Sciences Cologne + Michael Bank + michael.bank&fh-koeln.de +10637 + Vanderbilt University Medical Center + Jon Strong + jon.strong&vumc.org +10638 + Venation + Mark Butler + mark.butler&venation.com +10639 + Unassigned + Returned 2003-03-28 + ---none--- +10640 + West Interactive Corporation + Brian Roberson + Bjroberson&west.com +10641 + White Eagle Informatics + N.O. O'Monaghan + nils&a2000.nl +10642 + Zebra Technologies Corporation + Steven P. King + sking&zebra.com +10643 + NC Soft + Matt Walker + mwalker&britanniamanor.com +10644 + Interactive Software Design + Tim Nichols + tnichols&is-design.com +10645 + Pandanus Ltd + Geoff Newell + pandanus&lineone.net +10646 + ADI Computer Solutions + Martin Asher + masher&adi-cs.com +10647 + Advanced Science & Technology Institute + Rene Mendoza + renem&asti.dost.gov.ph +10648 + BlazeNet Ltd. + Karunesh Sharma + karunesh&north.org +10649 + DST Innovis + L. Scott Emmons + scotte¢er.uscs.com +10650 + Easter-eggs + Michaël Parienti + mparienti&easter-eggs.com +10651 + ENAC + Mounier + mounier&enac.fr +10652 + Ezitrust Limited + Donal O Mahony + donal.omahony&ezitrust.com +10653 + Fotre & Associates + Terry V. Fotre + tfotre&earthlink.net +10654 + Future Space, S.A. + Javier Domingo García + jdomingo&futurespace.es +10655 + Global Office Software Pty Ltd + Greg Wiltshire + greg.wiltshire&gos.com.au +10656 + Hardy and Associates + Jeffrey P. Hardy + linuxrlz&yahoo.com +10657 + InfoCyclone Ltd. + Michael Shurman + michaels&infocyclone.com +10658 + IP Services + Chad Ross + chad.ross&tcpipservices.com +10659 + ISR Global Telecom, Inc. + Roger Egbers + roger.egbers&isrglobal.com +10660 + KeyTrend Technology (S) Pte Ltd + Lim Siok Peng + siokpeng&transparity.com +10661 + MainBrain + Sven Wallage + sven&mainbrain.nl +10662 + Mid-Hudson Communications + Daniel Laffin + admin&mhcommunications.com +10663 + MQS + Kenji Arita + arita&mqs.fujitsu.com +10664 + Netropolis + Jakob Goldbach + jakob.goldbach&netropolis.dk +10665 + Ocyrus + Adam McGrath + adam&ocyrus.net +10666 + Organization of Residence Students + Aly Dharshi + aly.dharshi&uleth.ca +10667 + Proxima Technology Pty Ltd + Alex Napier + alex.napier&proxima-tech.com.au +10668 + riodata GmbH + Manuel Göldner + manuel.goeldner&riodata.de +10669 + San Miguel Industrial S.A. + Rafael H Mantilla + rmantilla&smi.com.pe +10670 + Solutions E.T.C. GmbH + Julian Wolff + wolff&solutionsetc.de +10671 + studio Alpha + Ichiro Kamiya + kamiya&studio-alpha.com +10672 + Swarthmore College + Adam Preset + preset&swarthmore.edu +10673 + TELEM GmbH + W. D. Oels + info&telem.de +10674 + TOP-keskus + Tero Pelander + tero.pelander&tkukoulu.fi +10675 + UNESCO + Lannaud Eric + e.lannaud&unesco.org +10676 + UniData Communication Systems, Inc + ByungHoon Lee + bbear&udcsystems.com +10677 + University of Debrecen + Kornel Ecsedi + ecsedi&unideb.hu +10678 + WorldWideWatson + Coburn Watson + cpwww&earthlink.net +10679 + Zzyzx Technologies Inc. + Glenn Geller + ggeller&zzyzxtech.com +10680 + CAPCom AG + Dung Ta Quang + taquang&capcom.de +10681 + KASYS Incorporated + Kelvin Desplanque + kasysinc&hotmail.com +10682 + UDcast + Luc Ottavj + luc.ottavj&udcast.com +10683 + Heaman System co.,ltd. + Yanbin + yanbin_s&163.net +10684 + Terabolic + Lee Hoffman + lee&terabol.com +10685 + Object Tools Limited + Iain MacKay + imm&iain-mackay.org +10686 + Thruport Technologies + Scott Bethke + kbethke&thruport.com +10687 + Townsend Communications, Inc. + Michael Klatt + iana&olympus.net +10688 + Unassigned + ---none--- + (Removed 2002-06-18) +10689 + Ahaza Systems + Tom Jackson + tomj&ahaza.com +10690 + Celltrex LTD + Rami Citrom + rcitrom&celltrex.com +10691 + Chataigner + Philippe Chataigner + philippe&chataigner.com +10692 + freenet.de AG + Marian Eichholz + postmaster&freenet.de +10693 + iTopia Inc. + Kent Thompson + kthompson&itopia.net +10694 + Measurable Solutions, Inc. + Michael Friedhoff + mfriedhoff&measurable.com +10695 + NIXC, Inc. + Darrin Walton + darrinw&nixc.net +10696 + Nuxeo + Florent Guillaume + fg&nuxeo.com +10697 + Optio Software, Inc. + Bobby Reid + breid&optiosoftware.com +10698 + Sanitätsbetrieb Meran + Roman Gritsch + roman.gritsch&asbmeran-o.it +10699 + SpellCaster Telecommunications Inc. + Matthew James + mdj&spellcast.com +10700 + Bulldog Technologies + Jason Hinze + jjh&cosmicflow.com +10701 + Telefonica DataCorp, S.A. + Fernando Sastre Jimenez + fernando.sastre&telefonica-data.com +10702 + Solvare + Christian Huldt + christian.huldt&solvare.com +10703 + ATnet + ATnet C/Team + c&atnet.at +10704 + Barracuda Networks AG (previous was 'phion Information Technologies') + Patsch Bernhard + bpatsch&barracuda.com +10705 + VOGT electronic AG + Volker Timper + vtimper&vogt-electronic.com +10706 + Accelerated Encryption Processing Ltd + Barry Kavanagh + barry.kavanagh&aep.ie +10707 + Adhersis + Edouard Viollet + snmp&adhersis.com +10708 + Advanced System Architectures Ltd + Simon Hall + simon.hall&asa.co.uk +10709 + ATUX + F.P.A. Kunst + frans&atux.nl +10710 + Avanade Inc + Steve Sall + stevesa&avanade.com +10711 + BAWI + Taejoon Kwon + linusben&bawi.org +10712 + Belkamneft + Dmitry Melekhov + dm&belkam.com +10713 + Chevin Ltd + Denis Laverty + denis.laverty&chevin.com +10714 + Dimension Data + Adrian Cuff + adrian.cuff&eu.didata.com +10715 + ForwardPath + Robert E Dolliver + rdolliver&forwardpath.com +10716 + Freedomland ITN SpA + Nikola Nikolic + n.nikolic&freedomland.it +10717 + Genie Telecom + Philippe Richard + p.richard&genietelecom.fr +10718 + Harris Wilder Pty Ltd + Jonathan Downes + jdownes&harriswilder.com.au +10719 + Atos Origin Nederland B.V. (aonl) + Peter Hoogendijk + Peter.Hoogendijk&AtosOrigin.com +10720 + LIMSI + Michel Lastes + Michel.Lastes&limsi.fr +10721 + Britestream Networks, Inc. + Ryan Nowakowski + ryan&britestream.com +10722 + Marque d'Or + Serge Tremblay + stremblay&marquedor.com +10723 + Mercury PS + Simon Reader + simon.reader&mercury-ps.com +10724 + Morpheus Net + Marek Isalski + iana&maz.nu +10725 + Pacific Internet Ltd + Hwee Hong Tan + noc&pacific.net.sg +10726 + Quantiva, Inc + Frank Stolze + frank&quantiva.com +10727 + Red Cientifica Peruana + Rolando Guerra + operador&rcp.net.pe +10728 + Redline Communications Inc. + Sinisa Djurkic + sdjurkic&redlinecommunications.com +10729 + Red-M Communications Ltd + Dave Wakelin + david.wakelin&red-m.com +10730 + SEE Telecom + Albert Marneffe + a.marneffe&see.be +10731 + Silicon Energy + Dale Fong + dale&siliconenergy.com +10732 + Symbidia Limited + Daniel Miao + daniel&symbidia.com +10733 + The Mercury File System + Josh + josh&mercuryfs.net +10734 + TippingPoint Technologies + Matthew D Brock + mbrock&tippingpoint.com +10735 + Unicorn Holding a.s. + David Kadlec + david.kadlec&unicorn.cz +10736 + University of Denver + Phil L. Tripp + Phil.Tripp&du.edu +10737 + Web Simulation + Riley Rainey + rrainey&websimulations.com +10738 + Comuniq Inc. + Bennett Clark + bennett&comuniq.com +10739 + DELPHIC Medical Systems + James Webster + james&delphic.co.nz +10740 + Neodev + Jean-Marc WEEGER + jm.weeger&isdnlabs.com +10741 + Vertical Inversion Systems, Inc. + Richard S. Riess + rriess&verticalinversion.com +10742 + Nozema N.V. + Gerrit van de Haar + gerrit.vd.haar&nozema.nl +10743 + Riksförsäkringsverket (RFV) + Björn Blomgren + bjorn.blomgren&rfv.sfa.se +10744 + Kvarnbäcken Konsult AB + Mats Hansson + kvarnbacken&yahoo.se +10745 + CSO Lanifex GmbH + Paul Gillingwater + paul&lanifex.com +10746 + Rechenzentrum der Bundesfinanzverwaltung Frankfurt am Main + Anja Kammerzell + anja.kammerzell&rzf.bfinv.bund400.de +10747 + Rowan University + Mark Sedlock + sedlock&rowan.edu +10748 + Software Factory GmbH + Peter Bartl + bartl&sf.com +10749 + /dev/secure Pty Ltd + Cris Bailiff + c.bailiff+iana&devsecure.com +10750 + 3G-Scene Plc + Bhapinder Singh Toor + bhapinder.toor&3g-scene.com +10751 + 3Plex + Jason Pincin + jpincin&3plex.com +10752 + Artiman Oy + Pekka Eloranta + elo23&saunalahti.fi +10753 + Billing for Enterprises + Stephan Kauss + stephan.kauss&b4esys.com +10754 + CAS Tecnologia S/A + Odair Marcondes Filho + registros&castecnologia.com.br +10755 + ChipData, Inc. + Stephen Garner + sgarner&chipdata.com +10756 + Chongho Information & Communications co., LTD + lee myung suk + mslee&chongho.com +10757 + Diogenes, Inc. + Ronald Trask + ron.trask&diogenesinc.com +10758 + Elyzium Limited + Probal Sil + probal.sil&elyzium.co.uk +10759 + Energos asa + Frode Stenstrom + frode.stenstrom&energos.com +10760 + Gravity Hill Technologies, Inc. + Catherine Victor + cathy&gravityhilltech.com +10761 + Greenheck Fan + Eric Pond + eric.pond&greenheck.com +10762 + Horizon Web Graphics + Paul Wolf + sabre1205&qwest.net +10763 + HanSung Teliann, Inc + Hee-Kyung Seo + zerocool&hsteliann.com +10764 + Infotrek + David Croft + david&infotrek.co.uk +10765 + INZEN Co., Ltd. + Hyunchul Kim + kimhc&inzen.com +10766 + ISG Systems AB + Johan Winäs + johan.winas&isg.se +10767 + James Madison University + Identity Management Team + idm&jmu.edu +10768 + L'Envol Limited + Gary Weatherhead + gary.weatherhead&lenvol.co.nz +10769 + ITDZ Berlin + Regina Gensicke + regina.gensicke&itdz-berlin.de +10770 + Make-Ing.com.ar + Fernando Roda Zoppi + fernando&make-ing.com.ar +10771 + Morrison Academy + Larry Dilley + sys-tech&mca.org.tw +10772 + novalis media + Juergen Geinitz + geinitz&novalis-media.de +10773 + Pacific Northwest Networks, Inc. + Judd Rock + judd.rook&pnn.com +10774 + Perforations Ltd + Warren Deane + warren&perforations.com +10775 + PERI Formwork Systems, Inc. + Zeke Brubaker + zeke.brubaker&peri-usa.com +10776 + Reactive Network Solutions Inc + David Scott + david&reactivenetwork.com +10777 + Revicon Srl + Jan Nielsen + nielsen&revicon.com +10778 + RTCL Lab, University of Michigan + Abhijit Bose + abose&engin.umich.edu +10779 + Sparkassenversicherung Wiesbaden + Tilman Bohn + tilman&gmx.de +10780 + SWAN, s.r.o. + Peter Hudak + peter.hudak&swan.sk +10781 + Schneider Electric Buildings AB (previous was 'TAC AB') + Jonas Bulow + jonas.bulow&schneider-electric.com +10782 + TAZZ Networks + Erika Hart + eh&tazznetworks.com +10783 + The Church of Jesus Christ of Latter-Day Saints + ICS Monitoring Team + ics-eng-monitoring&ldschurch.org +10784 + Time & Frequency Solutions Ltd + David Squires + david.squires&timefreq.com +10785 + Transtech Networks + Deh-phone Hsing + dhsing&transtechnetworks.com +10786 + UNSW Physics APT + Keith Bannister + k.bannister&student.unsw.edu.au +10787 + Verity, Inc. + David Wilbur + dwilbur&verity.com +10788 + Volt Information Sciences + Roger Smith + roger.smith&vde.co.uk +10789 + WebAgents GmbH + Peter Doemel + pdoemel&webagents.de +10790 + Websilicon + Iftah Bratspiess + iftah&web-silicon.com +10791 + WestOne + Ritchie Young + ritchie.young&westone.wa.gov.au +10792 + Xeline Co., Ltd. + Ki-Hyuk Yang + khyang&xeline.com +10793 + YourASP Pty Ltd + Alexander Liffers + alex&yourasp.com.au +10794 + ZTR Control System + Dennis Webster + dwebster&ztr.com +10795 + Bizz Technologies SAS + Jérôme Ramette + jerome.ramette&bizzgo.com +10796 + C.R.F. Consulting Ltd + Nik Clayton + nik&crf-consulting.co.uk +10797 + Commercial Data Systems + Jeff Chan + jeff&cdsinc.com +10798 + Commtel + Kanji T Bates + eng&ctel.net +10799 + Jungo Software Technologies Ltd. + Ron Kol + ron&jungo.com +10800 + Kinor + Roei Goldblat + roeig&yahoo.com +10801 + Rapid 7, Inc. + Tas Giakouminakis + tas&rapid7.com +10802 + RBG + Stefan Labich + stefan.labich&rbg.de +10803 + Staudinger Wels + DI. Staudinger Helmut + staudin&utanet.at +10804 + Union Bank of California + Neil Patel + neil.patel&uboc.com +10805 + Wedgetail Communications + Dean Povey + dpovey&wedgetail.com +10806 + Xebeo Communications, Inc. + Eric Dubuis + dubuis&xebeo.com +10807 + Centrale Financien Instellingen + Wiebe Hordijk + w.hordijk&cfi.nl +10808 + China Greatwall Computer Shenzhen Co., LTD + Wang Xiaopeng + wangxp&ggw.com.cn +10809 + Disha Technologies + Taizun Kanchwala + taizun&dishatech.com +10810 + Nobelmann + KyungHyun Kim + khkim97&dreamwiz.com +10811 + XiaMen Good First Optics Electricity Integration CO.,LTD + Yade Hong + apolloone&gfirst.com.cn +10812 + InfoTeCs + Feodor I. Getman + tig&infotecs.ru +10813 + Centre National de la Recherche Scientifique + Marc Dexet + marc.dexet&dsi.cnrs.fr +10814 + UNIFIED Technologies, Inc. + Ronald W. Henderson + rwh&unifiedtech.com +10815 + CyberTrader, Inc. + Dave Harding + dave&cybertrader.com +10816 + Datang Radio Communication Branch + Xiaoling Chen + cxl_k&263.net +10817 + Heraeus Holding GmbH + Martin Döring + martin.doering&heraeus.com +10818 + .windh AB + Marc Klefter + marc.klefter&windh.com +10819 + [IIX-NAP] MAHATEL, PT + Teddy A. Purwadi + teddyap&iixnetworks.net +10820 + LM Sistemas + Raul M. Fragoso + raul&lm.com.br +10821 + Absolight + Mathieu Arnold + ma&absolight.fr +10822 + ADTEC + Research and Development + philo01&adtec.co.kr +10823 + Advanced Research & Technology Ltd + Peter Vaughan + pete&ar-t.co.uk +10824 + AGAVA Software Ltd. + Vladimir Panfilovich + noc&agava.com +10825 + Alectta + Carlos Gonzalez + carlosj&alectta.com +10826 + Alice-Salomon-Fachhochschule für Sozialarbeit und Sozialpädagogik Berlin + Peter Daum + gator&asfh-berlin.de +10827 + ALVE Technology Corporation + Scott Taylor + scott&alve.com +10828 + American Data Technology, Inc. + Chris Ess + cess&localweb.com +10829 + Andover Controls Corporation + Cam Williams + WilliamsC2&AndoverControls.com +10830 + APCON, Inc. + Dennis Bauermeister + dennisb&apcon.com +10831 + At Oliver.com + Brian Olivier + brian&olivier.com +10832 + B2C2, Inc. + Augusto Cardoso + augusto&b2c2inc.com +10833 + binarycore networks + Chris Ward + cward&playdead.net +10834 + CargoBay + HJ Tsai + hjtsai&cargobay.com +10835 + cartoonviolence.net + Tobyn Baugher + trb&cartoonviolence.net +10836 + Command-e Technology Co., Ltd. + Jianshu Chou + choujs&263.net +10837 + CompHealth + David P. Boswell + dboswell&comphealth.com +10838 + Contec C.V. + Marc Van den Bergh + vandenbergh.m&contec.be +10839 + DISTRIBUTEL + Paul Khavkine + paul&distributel.net +10840 + Dragon Computer Consultants Inc. + Doug Nazar + nazard&dragoninc.on.ca +10841 + Ejasent Inc. + Norman Chin + nchin&ejasent.com +10842 + EnBW Systeme Infrastruktur Support GmbH + Ralf Fischer + ralf.fischer&enbw.com +10843 + Ferret + Paul Leonovich + lpaul&enteract.com +10844 + Fidelia, Inc. + Rajib Rashid + rajib.r&fidelia.com +10845 + grandmas, inc + Oliver Soell + Oliver&grandmas.org +10846 + Impact Technologies, Inc. + Bryan S. Adams + bryana&impacttech.com +10847 + Insystek Inc. + Alex Turner + alex&insystek.com +10848 + IPfonix, Inc. + D. R. Evans + n7dr&ipfonix.com +10849 + Ithiriel Software + Chris Ess + caess&ithiriel.com +10850 + KEFTA Inc. + Jason Matthews + jason&kefta.com +10851 + Knowledge Intelligence AG + Lars Bruechert + l.bruechert&ki-ag.de +10852 + Kuulalaakeri Oy + Kai Heikola + kai.heikola&kuulalaakeri.fi +10853 + LTC Sp. z o.o. + Przemyslaw Sztoch + psztoch&finn.pl +10854 + Lviv National Polytechnical University + Adrian Pavlykevych + pam&polynet.lviv.ua +10855 + Mascon Communication Technologies + Vishwavasu Chobari + vishwa&email.masconit.com +10856 + Menlo Park Technologies + Sean Doherty + sean&mpt.ie +10857 + Mindframe Software & Services Pvt. Ltd. + Varun Varma + varun&mindsw.com +10858 + Mycon Networks + Aubrey Kilian + aubrey&mycon.co.za +10859 + myinternet Ltd + Technical Contact + mitc&myinternet.com.au +10860 + Naumann EDV + Olaf Naumann + onaumann&netcologne.de +10861 + Niels Bohr Institute + Björn S. Nilsson + nilsson&nbi.dk +10862 + NOAH Industries, Inc. + James Fegan + jfegan&noahcorp.com +10863 + Odyssey Technologies Limited + A. Gayathri + gayathri&odysseytec.com +10864 + Pacific University + Brandon M. Browning + browning&pacificu.edu +10865 + Pete Wilson, consulting software engineer + Pete Wilson + pete&pwilson.net +10866 + Philip M Lembo + Philip Lembo + philip&lembobrothers.com +10867 + playdead networks + Chris Ward + cward&playdead.net +10868 + PremiTech A/S + Henrik Sloth + hsloth&premitech.com +10869 + Datalogic Mobile, Inc. + Reid Ligon + reid.ligon&datalogic.com +10870 + Safewww r&d Ltd + Boris Yevsikov + borisy&safewww.com +10871 + Shanghai Teraband Phtonics Co., Ltd. + Zheng Dayong + dayongzheng&163.net +10872 + SkyFlow Inc + George Krucik + george.krucik&skyflow.com +10873 + Soros Foundation Kazakhstan + Ilya Shadrin + ishadrin&soros.kz +10874 + Spectrum Astro Inc + Andrew Massiah + andrew.massiah&specastro.com +10875 + SSH Communications Security Corp. + Juha Holstila + juha.holstila&ssh.com +10876 + Super Micro Computer Inc. + Roy Chen + royc&supermicro.com +10877 + System Services Sp. z o. o. + Maciej Podanowski + maciej.podanowski&s-serv.pl +10878 + Techlink Desenvolvimentos Tecnologicos Ltda + Paulo Guimaraes + paulo.g&techlink.com.br +10879 + Tenebras, LLC + Michael Sierchio + kudzu&tenebras.com +10880 + Togabi Technologies, Inc. + Ron Tabor + rtabor&togabi.com +10881 + Uffizio Software Consultants Pvt. Ltd. + Rajesh Narayan + rajesh.narayan&india.uffizio.com +10882 + Université Paris 5 + Monique Lagarde + legarde&dsi.univ-paris5.fr +10883 + Unassigned + Returned 2006-04-28 + ---none--- +10884 + Vinzant, Inc. + David Vinzant + dvinzant&vinsoft.com +10885 + Virtual Monet Technologies + Frank Koenen + fkoenen&virtualmonet.com +10886 + Westland Helicopters Ltd + Graham Stone + stoneg&whl.co.uk +10887 + WizCom Ltd + Malcho + malcho&wizcom.bg +10888 + Xelus Inc + Paul Russello + paul_russello&xelus.com +10889 + AL Digital Ltd. + Bob Wilkinson + bob&aldigital.co.uk +10890 + CommWorks + Mike Borella + mike_borella&commworks.com +10891 + Dave's Network Services + David P. Boswell + dave&daves.net +10892 + Doctors.net.uk + Simon Potter + simon.potter&mess.doctors.org.uk +10893 + Iomedex Corporation + Peter Simpson + peter&med.com +10894 + Novra Technologies Inc. + Spiro Philopoulos + sphilopoulos&novra.com +10895 + Raonet Systems Inc. + Lee Nam Woo + tree&raonet.com +10896 + The Career and Education Network, Inc. + Lige Hensley + ligeh&yahoo.com +10897 + RECOMBINANTWEB + Saifi Khan + recombinantweb&vsnl.net +10898 + OOZOOD + Boujdaa Khalid + Kboujdaa&orange.fr +10899 + Consors Discount-Broker AG + Oliver Tschaeche + oliver.tschaeche&consors.de +10900 + Serco + Rod McKenzie + rod.mckenzie&cern.ch +10901 + Spinnaker Networks, Inc. + Anton W. Knaus + awk&spinnakernet.com +10902 + 3IC Inc. + Hyoung-Seok Park + hspark&3ic.co.kr +10903 + Activate + Matt Breed + mattb&activate.net +10904 + Alpine-Mayreder BauGmbH + Hans Lechner + edv&alpine.at +10905 + Arsenal Digital Solutions + Jim Kerick + jkerick&arsenaldigital.com +10906 + A-SK Computers + Sushil Kambampati + sushil&a-sk.com +10907 + Ateb Limited + Bob Gautier + rjg&ateb.co.uk +10908 + Bigsur Communications + G. Paul Ziemba + ziemba&best.com +10909 + BroadTel Communications + Souheil Abihanna + sabihanna&broadtel.com +10910 + Chateau Systems, Inc + Larry Walsh + larry.walsh&usa.net +10911 + Appello, a.s. + Tibor Jakab + tibor.jakab&appello.biz +10912 + Cincom Systems, Inc. + Leonard S. Lutomski + llutomski&cincom.com +10913 + Client Software International + Jeffrey Lee + jeffl&clientsoftware.com.au +10914 + COLT Telecommunications + John Hayday + jhayday&colt.net +10915 + ConAgra Foods, Inc. + Patrick Timmins + patrick.timmins&conagrafoods.com +10916 + Cyneta Networks + Chris Hill + chill&cynetanetworks.com +10917 + Datamax Coporation + Martyn Edwards + martyn_edwards&datamaxcorp.com +10918 + Datamission + Jeffrey Lee + support&datamission.com +10919 + dbap GmbH + Bjoern Dolkemeier + info&dbap.de +10920 + Dir Solutions bv + Erik Kaats + e.kaats&dirsolutions.nl +10921 + Drumgrange Limited + Steve Lindley + slindley&drumgrange.co.uk +10922 + ELSIS + Jonas Zaveckas + jonas.zaveckas&elsis.com +10923 + Ericsson AB - Packet Core Networks + Göran Hall + Goran.Hall&ericsson.com +10924 + ESTOS GmbH + Stefan Katschewitz + IT&estos.de +10925 + Firmseek + Miguel Cruz + miguel&firmseek.com +10926 + Food Brokers Limited + Ken Patching + netadmin&foodbrokers.co.uk +10927 + Fullsix + Sebastien Fibra + nic&fullsix.com +10928 + Fybubblan.com Consulting + Torbjorn Borg + torbjorn.borg&fybubblan.com +10929 + GCD Printlayout + Andreas Buchler + system&pl.gcd-erlangen.de +10930 + Georg-August Univeristy Goettingen (Internet-AG) + Christian Boye + cboye&goe.net +10931 + GIGA STREAM - UMTS Technologies GmbH + Clemens Dietel + cdietel&giga-stream.de +10932 + Gunadarma University + Avinanta Tarigan + avinanta&staff.gunadarma.ac.id +10933 + H.A. Technical Solutions + LeRoy D. Earl + leroy&tech-sol.com +10934 + Hamsch + Martin Hamsch + martin&hamsch.net +10935 + Horizon Digital Enterprise, Inc. + Yasuhiro Sumi + yasuhiro.sumi&hde.co.jp +10936 + Ijichi Research + Dominic Ijichi + dom&ijichi.org +10937 + INFALSYS + Jorge Castellet + jcastellet&infalsys.es +10938 + International Messaging Associates Corporation + Enzo Michelangeli + em&ima.com +10939 + internet news + Ishmael Plange + ishmaelplange&yahoo.com +10940 + JD Thomlinson, Ltd. + JD Thomlinson + jthom&xnet.com +10941 + Kandalaya + Raj Mathur + raju&kandalaya.org +10942 + LifeLine Networks bv + Bastiaan Bakker + bastiaan.bakker&lifeline.nl +10943 + Linagora + Christophe Jauffret + jauffret&linagora.com +10944 + Maikis - EDV Dienstleistungen + Markus Maikis + maikis&myhq.at +10945 + Midway Games West, Inc. + William R White III + wwhite&midway.com +10946 + Mouselink Ltd + Rob Chapman + rob.chapman&mouselink.co.uk +10947 + Multi-Druck GmbH + Robert Steiner + rst&foliendruck.de +10948 + MynaNET + Erik Keith + erik&mynanet.net +10949 + Netflix.com + Michael Tripoli + mtripoli&netflix.com +10950 + Netrake Corporation + Susan Burt + susan&netrake.com +10951 + NetUSE AG + Roland Kaltefleiter + rk&netuse.de +10952 + NetZero Inc. + Thod Nguyen + thodn&corp.netzero.net +10953 + Nightlife.se + Mårten Gustafsson + crew&sudac.org +10954 + nikolici.com + Nikola Nikolic + nikola&nikolici.com +10955 + Wescan Limited + Don Pullen + donpullen&wescanltd.com +10956 + Noviforum, Ltd. + Grega Bremec + grega.bremec&gbsoft.org +10957 + noze + Stefano Noferi + stefano&noze.it +10958 + OLICORP Technologies + Chabert Sébastien + chabert&olicorp.net +10959 + Politechnika Warszawska + Tadeusz Rogowski + ldap&coi.pw.edu.pl +10960 + Qualicontrol + Jorge Tellez + jtellez&testandgo.com +10961 + Ricciardi Technologies, Inc + Kevin J Anderson + kevin.anderson&rti-world.com +10962 + Rutgers, the State University of New Jersey + Charles Hedrick + hedrick&nbcs.rutgers.edu +10963 + SENA Systems + Shekhar Jha + shekhar&senasystems.com +10964 + SMT Data A/S + Christian Kullmann + ck&smt.dk +10965 + SolidStreaming, Inc. + Robert Solfanelli + bob&solidstreaming.com +10966 + Sollie codeWorks + Arve Sollie + codeworks&mobilpost.com +10967 + Surftool Systems + Robert Tykulsker + surftool&comcast.net +10968 + Systematic Software Engineering A/S + Bo Kirkegaard + bok&systematic.dk +10969 + Thales Communications + David Thompson + mib&ch.thalesgroup.com +10970 + Threshold Networks + Dave Hecht + dhecht&thresholdnetworks.com +10971 + VPN Dynamics, Inc. + Mark Schaeffer + mark&vpndynamics.com +10972 + WIT-Software + Pedro Manuel Jordão Pereira + pereira&wit-software.com +10973 + XMS + Martin Wickman + wic&xms.se +10974 + Youngstown State University + John Dalbec + jpdalbec&ysu.edu +10975 + Zion Software, LLC + David Ferrero + david.ferrero&zion.com +10976 + Zoomtown.com + Josh Banks + soc&fuse.net +10977 + Grid-Ireland + B.A. Coghlan + coghlan&cs.tcd.ie +10978 + Kumamoto Industrial Research Institute + Takao Kawakita + tkawakit&kmt-iri.go.jp +10979 + Capitel Group + Shengli Chen + chenshl&capitel.com.cn +10980 + IQStor Networks + Marcos Delmar + mdelmar&iqstor.com +10981 + RouterD Networks Limited + Zhang Qilin + iana&routerd.com +10982 + UBS Warburg + John Goulding + john.goulding&ubsw.com +10983 + BTG, Inc. + Greg Lambert + glambert&btg.com +10984 + bvba Woodstone + Dirk Bulinckx + dirk&woodstone.nu +10985 + Jive Software + Matt Tucker + info&jivesoftware.com +10986 + Beijing Polytechnic University + Sun Xiaopeng + littleroc&263.net +10987 + Eircell 2000 + Mary Hanley + noc&e-merge.ie +10988 + SoloMio Corporation + Drew Johnson + djohnson&solomio.com +10989 + Alibre Incorporated + Paul F. Williams + paulw&alibre.com +10990 + Astrum Consulting + S. William Schulz + swschulz&astrum.net +10991 + Axis Integrated + Casey Dinsmore + casey&axisintegrated.com +10992 + Babel Professional Services srl + Stefano Alessandri + stefanix&babelps.it +10993 + BabelTech A/S + Chris Larsen + vader&babeltech.dk +10994 + BISS GmbH + Jan Mazac + jwm&biss-net.com +10995 + Blue Mug + Michael Touloumtzis + miket&bluemug.com +10996 + CCSS (Europe) Ltd + Lee Bradshaw + lee&bigpond.net.au +10997 + Connection Software + Peter Burns + peter.burns&csoft.co.uk +10998 + Conversant Systems, LLC + Darryl C Price + darryl&convsys.com +10999 + COS AG / Austria + Dipl. Ing. Ulrich Scherb + ulrich.scherb&cos.at +11000 + Creative Networking Solutions + Mike Richards + mrichard&creativens.com +11001 + Dreyer Consulting + A. Dreyer + adreyer&adreyer.de +11002 + Du Maurier Ltd + Steve Manning + steve&dumaurier.co.uk +11003 + e^deltacom + David Botham + david&botham.net +11004 + EarthTrax, Inc. + Mark Hill + mhill&epicentre.net +11005 + E-Certify + Chris van sant + chris.van.sant&e-certify.com +11006 + FlashNet Computers Ltd + Craig Goodenough + craig&flashnet.co.nz +11007 + FMV, Swedish Defence Materiel Administration + Johannes Lindgren + jslin&fmv.se +11008 + Fujiwara Soft Office, Co., Ltd. + Toshiki Fujiwara + toshi-f&fusoft.co.jp +11009 + Hadmut Danisch + Hadmut Danisch + netmaster&danisch.de +11010 + Harrison Family Organization + Trevor Harrison + trevor&harrison.org +11011 + HiWAAY Information Services + Chris Adams + cmadams&hiwaay.net +11012 + ImagineAsia, Inc. + Benjamin C. Oris Jr. + benjamin.oris&imagineasia.com +11013 + LEC Information Systems + Donna Burke + dburke&lecis.com +11014 + Liberty University + Ken Hammill + khhammill&liberty.edu +11015 + Lokomo Systems AB + Patrik Johansson + patrik&lokomo.com +11016 + ManageStar.com, Inc. + David Young + david.young&managestar.com +11017 + MARA Systems AB + Henrik Nordström + hno&marasystems.com +11018 + Massillon Cable Communications + Christopher Altland + topher&sssnet.com +11019 + MENTA + Angel Herraiz Murciano + aherraiz&ctc.es +11020 + MVP Systems, Inc. + John Vottero + john&mvpsi.com +11021 + N2 Broadband, Inc. + Stephen O. Berger + sberger&n2bb.com +11022 + Omnisci Software, Inc. + Toshiki Fujiwara + toshi-f&omnisci.co.jp +11023 + University of Oxford IT Services + Michael Howe + oids&rt.oucs.ox.ac.uk +11024 + P&S DataCom Corporation + Glenn Sato + g.sato&psdatacom.com +11025 + Pagoo + Colin Weir + colinw&pagoo.com +11026 + Perfectway Corporation + Markus Weber + markus&perfectway.com +11027 + Physics Department, University of Kassel + Lars Haag + lhaag&physik.uni-kassel.de +11028 + Reliable Networks, Inc. + Dan Gurke + iana&noc.rninc.net +11029 + Reziprozitaet Consulting + Klaus J. Klein + kleink&reziprozitaet.de +11030 + RouteFree Inc + Chris Nguyen + cnguyen&routefree.com +11031 + SATEC SA + Jose Vences + sistemas&satec.es +11032 + Semandex Networks, Inc. + Max Ott + max&semandex.net +11033 + SentitO Networks + Barrie Saunders + bsaunders&sentito.com +11034 + Sentor Monitoring Systems Pty Ltd + David Mittnacht + dmittnacht&sentor.cc +11035 + Sheer Genius Software + Nick Tornow + ntornow&sheergeniussoftware.com +11036 + Softax + Lukasz Engel + lukasz.engel&softax.pl +11037 + Stratyc L.L.C. + Ben Nielsen + ben.nielsen&stratyc.com +11038 + TECO SOFT España, S.A. + Francisco Rueda Hernández + francisco.rueda&tecosoft.es +11039 + Tivo, Inc. + Andy Townsley + andyt&tivo.com +11040 + Tool Banking Solutions + David Deza + dezad&tb-solutions.com +11041 + UFSM + Marcio d'Avila Scheibler + marcio&cpd.ufsm.br +11042 + ViaCLIX, Inc. + Cory Adams + ckadams&viaclix.com +11043 + VIDA Software S.L. + Rafael del Valle + rafael&vida-software.com +11044 + Vodafone, Egypt + Mohamed Maher + mohamed.maher&vodafone.com.eg +11045 + Windborne Productions, Inc. + Branson Matheson + branson&windborne.net +11046 + Zero Computing S.r.l. + Paolo Pennisi + staff&zero.it +11047 + AK-Industrieinformatik GmbH + Ulrich Pfahler + upfahler&aki-gmbh.de +11048 + billiton internetservices + Patrick Ahlbrecht + admin&billiton.de +11049 + CP Corporate Planning AG + F. Ploss + fp&corporate-planning.com +11050 + IDX Systems Corporations + Douglas Herrick + douglas_herrick&idx.com +11051 + Point Systems + Sam Barnum + sbarnum&pointsystems.com +11052 + Achmea Holding N.V. + G.P. van Dijk + gerard.van.dijk&achmea.nl +11053 + Direct Internet, Inc. + Michael Yanok + myanok&direct-internet-inc.com +11054 + Roman Seidl + Roman Seidl + rot&granul.at +11055 + InnoAce CO., Ltd. + Wonpil Kal + wpkal&innoace.com +11056 + Allmycrap.com + Jim Carey + jim&allmycrap.com +11057 + Eyefinity, Inc + Ron Jacobs + ronja&eyefinity.com +11058 + Hitachi Information & Telecommunication Engineering, Ltd. + SatoshiI Yamaguchi + satoshi.yamaguchi.cc&hitachi.com +11059 + IMPRESS SOFTWARE AG + Udo Juettner + udo.juettner&impress.com +11060 + Intervoice-Brite + Travis Berthelot + tberthel&intervoice-brite.com +11061 + Longitude, Inc. + Steven Magnell + smagnell&longitude.com +11062 + Nurnberg Scientific + Keith Nasman + keith&nurnberg.com +11063 + PingMagic Limited + James N. C. Ho + hostmaster&pingmagic.com +11064 + RHB Securities + Hariraj + hariraj&rhb.com.my +11065 + Telcoware + Jin Ho Lee + siva&telcoware.com +11066 + Teniad Technologies + William Ferris + william.ferris&tenaid.com +11067 + University of Belgrade + Marina Vermezovic + marina.vermezovic&rcub.bg.ac.rs +11068 + Webscreen Technology Ltd. + Sean Witham + sean.witham&webscreen-technology.com +11069 + Yandex + Vasily Tchekalkin + bacek&yandex-team.ru +11070 + uReach Technologies, Inc. + Sean O'Donnell + skodonnell&ureach.com +11071 + Motus Technologies + Sebastien Pouliot + spouliot&motus.com +11072 + IOTEC AB + Matts Persson + matts.persson&iotec.se +11073 + 7th-Angel + Akira Ryowa + ryowa&yo.rim.or.jp +11074 + ALTER + Radoslaw Stachowiak + tech&alter.pl +11075 + Antarix e Applications Limited + Sanjeev Gopal + sgo&antarix.net +11076 + Banesto S.A. + Angel Redondo + anredondo&banesto.es +11077 + Andrej Ota (formerly 'C.net d.o.o.') + Andrej Ota + iana&ota.si +11078 + Centralworks LLC + Andy Smith + andy¢ralworks.com +11079 + EIT Ltd + Greg Malewski + gregm&eit.ltd.uk +11080 + Airwide Solutions (formerly 'First Hop') + Nick Worth + snmp&airwidesolutions.com +11081 + Hydrogen Line, Inc. + Brad G. Kyer + bard&hydrogenline.com +11082 + Illinois Central College + Mike McClelland + mmcclelland&icc.cc.il.us +11083 + Intersix Technologies S.A. + Fernando Tasso + fernando.tasso&intersix.com.br +11084 + ITDevices, Inc. + Pat McFaul + patmcfaul&itdevices.com +11085 + Manojam + Manoj A.M. + manojam&yahoo.com +11086 + MindTree Consulting Pvt Ltd + Ananda Rao Ladi + lar&mindtree.com +11087 + Netforest Inc. + UEDA Hiroyuki + sysadmin&netforest.ad.jp +11088 + Netus Technologies Co., Ltd. + Calvin Roh + calvin&netustech.com +11089 + Preferred Systems + David LaBissoniere + labisso&debian.org +11090 + Propero Limited + Nick Townsend + nick.townsend&propero.net +11091 + Raze Technologies + Chris Petrick + cpetrick&razetechnologies.com +11092 + St. Boniface General Hospital + Jason Mowat + jmowat&sbgh.mb.ca +11093 + Telamon Systems + Paul Pegrum + paul.pegrum&telamon.co.uk +11094 + xpedite systems inc + Subu Ayyagari + s.ayyagari&xpedite.com +11095 + Loea Communications + Thomas Lambert + tlambert&loeacom.com +11096 + BlueArc, part of Hitachi Data Systems + Carlo Garbagnati + hnas-mib-support&hds.com +11097 + HEXAL AG + Michael Anderson + michael.anderson&hexal.de +11098 + ComTelco (North America), Inc. + David Peterson + davidpeterson&bigfoot.com +11099 + Dickerson Enginering + Jay Tolbert + jay.tolbert&dei-pe.com +11100 + Linux NetworX + Levi Pearson + levi&lnxi.com +11101 + NRI SecureTechnologies, Ltd. + Tatsumi MASHIMO + iana-info&nri-secure.co.jp +11102 + OSA Technologies Inc. + Shawn Fang + shawn.fang&osatechnologies.com +11103 + Opus International Consultants Ltd + Neil Kane + neil.kane&opus.co.nz +11104 + Passback IT Consultancy + Keith Sharp + kms&passback.co.uk +11105 + Pivot Consulting Ltd + Brian Hoy + brian.hoy&pivot.co.nz +11106 + Scene7, Inc + Stephen Lucchese + stephen&scene7.com +11107 + Ucentric Systems + Jinyou Zhang + jzhang&ucentric.com +11108 + WaveMarket, Inc. + Scott A. Hotes + scott&wavemarket.com +11109 + XDV - Verein für experimentelle Datenverarbeitung + Stefan H. Holek + stefan&xdv.org +11110 + Arkivio + Albert Leung + albert&arkvio.com +11111 + Bell Ontario Service Assurance + Bruce Nichol + bruce.nichol&bell.ca +11112 + Stephen Gould Paper co + Brian Bizzaro + bbizzaro&stephengould.com +11113 + BJC HealthCare + John Glynn + jglynn&bjc.org +11114 + Citrus communications GmbH + Marco Hentschel + mhentschel&citrus-tel.de +11115 + George Fox University + Anthony Brock + admin&georgefox.edu +11116 + Marco Hentschel + Marco Hentschel + marco.hentschel&web.de +11117 + Vereniging Open DomeinZenon + Panoussis + info&opendomein.nl +11118 + Alpha1 Communications + Marius Strom + marius&alpha1.net +11119 + 123india.com + Shuhaid + shuhaid&corp.123india.com +11120 + Advanced Software Resources, Inc. + Barry Schwartz + barry.a.schwartz&lmco.com +11121 + C3 Ltd + Colin Everitt + colin.everitt&c3ltd.co.uk +11122 + Cencosud S.A. + Luis Alberto Cruz + lcruz&cencosud.com.ar +11123 + Colruyt Group + Mark Van Poucke + netwerk&colruytgroup.com +11124 + Contenture Ltd + Karl W. Feilder + karl&feilder.com +11125 + Domani Software Services + Michael DiChiappari + mdichiappari&domanisoft.com +11126 + ESCP-EAP + Pascal Jacquin + jacquin&escp-eap.net +11127 + Fst s.r.l. + Orlando Murru + orlando.murru&fst.it +11128 + Gadbois Consulting + David Gadbois + gadbois&computer.org +11129 + Google, Inc. + Ben Laurie + benl&google.com +11130 + Hülsmann Services e.K. + Ralf Hülsmann + ralf.huelsmann&huelsmann-services.de +11131 + INSIDERS Information Management GmbH + Steffen Leonhardt + s.leonhardt&im-insiders.de +11132 + Integral Solutions Corporation + Norm Mackey + dns&supportwizard.com +11133 + Jemmac Software Limited + Mark Freeman + mark.freeman&jemmac.com +11134 + Lentronics + Karlos Prokopiuk + karlos.prokopiuk&indsys.ge.com +11135 + Marian Eichhloz + Marian Eichholz + eichholz&computer.org +11136 + Meitetsu System Dev Co.Ltd + Masaharu Hemmi + hemmi&nag.navel.co.jp +11137 + Metaparadigm Pte Ltd + Michael Clark + michael&metaparadigm.com +11138 + mFormation Technologies Inc. + Lenny Rocci + lenny.rocci&mformation.com +11139 + NEOWIZ Corporation + KWI NAM CHOI + gamese&neowiz.com +11140 + Network Center, Shanghai Jiaotong University + Wenqi Li + lwq007&mail1.sjtu.edu.cn +11141 + Omneon Video Networks + Christopher Davis + chr&omneon.com +11142 + OÖ Online + Manfred Eder + m.eder&ooe.net +11143 + PCSNC + Pascal Cimon + pcimon&pcsnc.com +11144 + RadPharm + David Clunie + dclunie&radpharm.com +11145 + Siemens Financial Services GmbH + Harald Plattner + harald.plattner&sfs.siemens.de +11146 + Siemens Informations Systems Limited + Sachin Bansal + sachin.bansal&sisl.co.in +11147 + SVTO Hewlett-Packard + Gregg Pulley + gpul&fc.hp.com +11148 + Swan Systems + Sara Taillon + sara&swansystems.com +11149 + TI, d.o.o. + Miha Rozic + miha.rozic&tiol.si +11150 + Mobileum Inc. (formerly Roamware Inc) + Subhabrata Biswas + subhabrata.biswas&mobileum.com +11151 + Urschel Laboratories Incorporated + Mark Nehemiah + hostmaster&urschel.com +11152 + Vocalcom + Frederic Steppe + f.steppe&vocalcom.be +11153 + WebWear + Alexey A. Shelkovich + ashelk&nsys.by +11154 + WireX Communications, Inc. + Howard Abrams + howard&wirex.com +11155 + WVS-groep + Richard Baselier + baselier&wvs.nl +11156 + CDMEDIC + Pablo Sau + psau&cdmedic.com +11157 + MultiTech Solutions Inc. + Shekhar Sharma + ss&mulsol.com +11158 + Panservice + Giuliano Peritore + g.peritore&panservice.it +11159 + PC Dialogs Inc. + Robert Pesner + bpesner&pcdialogs.com +11160 + TDK Corporation + Hisashi Oyama + vxd&mb1.tdk.co.jp +11161 + Wistron Corporation + Jay Tao + jay_tao&wistron.com +11162 + Advanced Campus Services + Art Vandenberg + avandenberg&gsu.edu +11163 + HAL Computer Systems Ltd + Steve Rubie-Todd + stevert&hal.co.uk +11164 + PBH Planungsbuero Hammer + Hansjuergen Riess + hansjuergen.riess&wip-munich.de +11165 + Acer Mobile Networks Inc. + Chi-An Su + Casu&acermn.com.tw +11166 + ALLEWA GmbH + Kurt Artho + kurt.artho&allewa.net +11167 + Azurtec, Inc. + Dennis Loreman + dennisl&azurtec.com +11168 + Banca del Gottardo + Stefano Klett + stefano.klett&gottardo.com +11169 + Beacon Bioscience, Inc. + Andrew Kraus + akraus&beaconbioscience.com +11170 + Commsology + Neil Taylor + neiltaylor&commsology.co.uk +11171 + CRoL + Miroslav Zubcic + mvz&crol.net +11172 + Die Informatik-Werkstatt GmbH + Peter Rau + peter.rau&informatik-werkstatt.de +11173 + Directory Solutions + Aaron Arbery + aaron&arbery.com +11174 + DX Antenna Co., Ltd. + Makota Haswgawa + kaihatsu&dxantenna.co.jp +11175 + Epic Group plc + Paul Collis + paulc&epic.co.uk +11176 + Objective Pathology Services Limited (formerly 'Extelligence') + Kemp Watson + hostmaster&objectivepathology.com +11177 + Grapevine Interactive (Pty) Ltd + Mike Schronen + mike&vine.co.za +11178 + Guardian Controls International + Gareth Williams + garethwilliams2002&yahoo.co.uk +11179 + hereUare Communications, Inc. + Spencer Doherty + spencerd&hereuare.com +11180 + Invisible Hand Networks + Kevin Kretz + iana-admin&invisblehand.net +11181 + Jan Schmidt DV-Beratung + Jan Schmidt + jb.schmidt&gmx.de +11182 + Kretz Industries + Kevin Kretz + iana-admin&kretz.net +11183 + Mitac International Corp. + P.C. Wang + p.c.wang&mic.com.tw +11184 + Naray Information & Communication Enterprise + Kihyun Joe + khjoe24&narayinfo.com +11185 + POSSIO AB + Massie Inoue + massie.inoue&possio.com +11186 + Rattan Information + Karen Chen + karen&rt.com.tw +11187 + SNV Netherlands Development Organisation + Cathelijne Hornstra + chornstra&snv.nl +11188 + Universitat Rovira i Virgili + Lluis Alfons Ariño + lam&si.urv.es +11189 + University of Applied Sciences Schmalkalden + Jens Adner + oidmaster&fh-schmalkalden.de +11190 + Viox Services Inc. + Joseph Eaton + joe.eaton&viox-services.com +11191 + Xiam + Hugh O'Donoghue + hugh&xiam.com +11192 + Xiph.org Foundation + Jack Moffitt + jack&xiph.org +11193 + Salica Ltd + Steve Rubie-Todd + stevert&hal.co.uk +11194 + Wireless Maingate + Jan Fredriksson + jan.fredriksson&maingate.se +11195 + VECTOR sp. z o.o. + Grazyna Wielich + g.wielich&vector.com.pl +11196 + Marcant Internet Services GmbH + Marc Delker + noc&marcant.net +11197 + A l Jazeera Satellite Channel + Iyad Shiblaq + iyad.shiblaq&aljazeera.net +11198 + Atrid Systemes + Gilles Polart-Donat + g.polart&atrid.fr +11199 + Auspice, Inc. + Dino M. DiBiaso + dibiaso&auspiceinc.com +11200 + BellSouth + Eric Sheppard + eric.sheppard&bellsouth.com +11201 + Benefit Systems, Inc + James Turner + turner&blackbear.com +11202 + Blue Sky Studios + Andrew Siegel + abs&blueskystudios.com +11203 + Cybermation Inc. + Michael Borodiansky + mborodia&cybermation.com +11204 + Entercept Security Technologies + Laura Quiroga + lquiroga&entercept.com +11205 + Environmental Systems Research Institute (ESRI) + George Daswani + gdaswani&esri.com +11206 + EPLS Enterprise Computing Pte Ltd + Alan Kang + alan.kang&epls.com +11207 + Falk eSolutions AG + Tobias Wolff + wolff&falkag.de +11208 + Global Media Systems + Roland Roth + info&gms.lu +11209 + Gold Wire Technology + Wayne F. Tackabury + wayne&goldwiretech.com +11210 + Iain Stevenson + Iain Stevenson + iain&iainstevenson.com +11211 + MobiTV Inc. (formerly 'Idetic Incorporated') + Stephane Pelchat + spelchat&mobitv.com +11212 + IDM + Mathieu Poumeyrol + poumeyrol&idm.fr +11213 + Information Services Extended Inc. + Ed Huth + eehuth&isxinc.com +11214 + Meridis Corporation + Daniel Van Mosnenck + meridis&skynet.be +11215 + Navitaire, Inc. + Mike Gallagher + mike.gallagher&navitaire.com +11216 + Neoware Systems + Ed Parks + ed.parks&neoware.com +11217 + Oceanet Technology + Sébastien Guilbaud + sguilbaud&oceanet-technology.com +11218 + Oculan Corp + Jeff Schneider + jeff&oculan.com +11219 + OpVista + Ko-Hsing Chang + kchang&opvista.com +11220 + Pirelli Cavi e Sistemi S.p.A. + Carlo Rogialli + carlo.rogialli&pirelli.com +11221 + PROCON GmbH + Ramin Motakef + rmotakef&procon.de +11222 + Rochester General Hospital + Thomas Gibaud + tom.gibaud&viahealth.org +11223 + RWT Telefony Polskie S.A. + Marcin Slyk + marcin.slyk&rwttp.com.pl +11224 + SAEBEX (Pty) Ltd + Jasper Horrell + jasper&saebex.com +11225 + SCMB + Luigi Zuccarelli + zuccarell&scmb.co.za +11226 + Skandinaviska Magasin1 AB + Mikael Brandström + mikael.brandstrom&magasin1.se +11227 + StartCorp + Sherwyn Fernando + fernando&startcorp.com +11228 + SystemBase Co,.Ltd. + Hyun Myung Kim + ksmin&sysbas.com +11229 + Systemware + David Zhang + david.zhang&systemware.com +11230 + Voice Access Technologies + Jason Loveman + jloveman&voice-access.com +11231 + Wincom Systems, Inc. + Paul Ruocchio + paul.ruocchio&wincomsystems.com +11232 + Apocalypse Computing Services + David Lim + oc.cloud.labs&gmail.com +11233 + Harald Gliebe + Harald Gliebe + harald.gliebe&online.de +11234 + Hotcircles + Srinivas Nimmagadda + sriniwas_ns&hotmail.com +11235 + WhichMall.com + D. Moonfire + dmoonfire&hotmail.com +11236 + Mosakin International Corporation + Rotimi O. Gbadamosi + rotimi&mosakin.com +11237 + ITEK spf + Andrej V. Aleksandrov + research&itek.com.ua +11238 + APS Systeme + Kurt Bohn + kurt.bohn&aps-systeme.de +11239 + Bennett, Coleman & Co. Ltd. + Jatin Nansi + jatin.nansi×group.com +11240 + Caixa d'Estalvis i Pensions de Barcelona + David Soler + dsoler&lacaixa.es +11241 + Carmel Secondary School + Hui Chun Kit + ckhui&school.net.hk +11242 + Custom Electronics + Roger Banks + roger&custom-electronics.co.uk +11243 + DCWG + Patrick Patterson + ppatterson&carillonis.com +11244 + NET Control + Dejan Mijailovic + dmijailovic&primus.ca +11245 + E2open + Ari Flink + ari&e2open.com +11246 + GHZ Inc. + Maqsood Khan + ghzinc&hotmail.com +11247 + GlaxoSmithKline + Ron Chavez + ron.l.chavez&gsk.com +11248 + Greenwich Technology Partners, Inc. + Michael Friedhoff + mfriedhoff&greenwichtech.com +11249 + i-DNS.net International Inc + Maynard Kang + maynard&i-dns.net +11250 + Indiana Purdue University Ft. Wayne + John Carter + carter&ipfw.edu +11251 + Intrado Inc. + Mike Koepke + mkoepke&intrado.com +11252 + iTouch Labs + R.I.Pienaar + rip&itouchlabs.com +11253 + J.P. Stephenson Co. + John Paul Stephenson + jpstephe&swbell.net +11254 + Jydsk Consulting + Hans-Henrik Andresen + hha&unixhaj.dk +11255 + LocatioNet Systems LTD + Fridlender Yair + yair&locationet.com +11256 + Stormshield (formerly 'NETASQ') + Boris MARECHAL + boris.marechal&stormshield.eu +11257 + Net-ES + Per Jensen + pj&net-es.dk +11258 + Nogui AB + Christer Palm + palm&nogui.se +11259 + Sepulveda Systems + Eric Welsh + ewelsh&sepulvedasystems.com +11260 + Single Source oy + Mikko Syrjälahti + mikko.syrjalahti&kronodoc.fi +11261 + Tschaeche Services + Oliver Tschäche + services&tschaeche.com +11262 + Wire Card AG + Siegfried Blanz + siegfried.blanz&wirecard.com +11263 + WRnetworks + Christian Richter + crichter&wrnetworks.de +11264 + MobileArts AB + Jonas Andersson + jonas.andersson&mobilearts.se +11265 + SET Software, Inc. + Koh Kawanobe + kawanobe&setsw.co.jp +11266 + JiangSu Fujitsu Telecommunications Technology CO.,LTD. + MeiLin + meil&virgo.jftt.fujitsu.co.jp +11267 + DISSC - Presidencia del Gobierno + Alberto Bañón + dissc&dissc.presidencia.gob.es +11268 + Aastra + Mario Giacometto + mgiacometto&aastra.com +11269 + Aculab plc + Chris Brown + chris.brown&aculab.com +11270 + Adelphia Business Solutions + Somesh Batra + sbatra&adelphia.net +11271 + Aria Technology Pte Ltd + Yu-Gin Ng + yugin&aria.com.sg +11272 + Birdstep Technology + Stefano Holguin + stefano.holguin&birdstep.com +11273 + BJB Software, Inc. + Brian J. Butler + bjbutler&bjbsoftware.com +11274 + Centric Telecom Ltd + Iain Donaldson + idonaldson¢rictelecom.com +11275 + ChangingWorlds + Eamonn Newman + eamonn.newman&changingworlds.com +11276 + Commil Ltd + Ilan Yosef + ilany&commil.com +11277 + convergence integrated media gmbh + Network Administrator + admins&convergence.de +11278 + CS Engineering b.v. + Andre Canrinus + develop&corp.csnet.nl +11279 + Datalab Technologia S.A. + Jesus Benavente + jbenavente&dltec.net +11280 + Digimob Ltd + Gavin Kinghall Were + gavin&digimob.com +11281 + digital bus + John Castner + johnacastner&aol.com +11282 + DKSIN + DongGon Shin + red97&hitel.net +11283 + Dobsonics Laboratories + Robert L. Dobson + bobdobson&aol.com +11284 + ESW - EXTEL SYSTEMS WEDEL + Guenther Schenk + schenk2&esw-wedel.de +11285 + FlipDog.com + Justin Wood + justin&flipdog.com +11286 + H1 + Dickert Andreas + andreas.dickert&highwayone.de +11287 + High-Availability.com + Giles Gamon + giles&high-availability.com +11288 + iCreate Software, Inc. + Joe Perez + anotherj&pacbell.net +11289 + imagine LAN Inc. + Kirk Olsen + admin&imaginelan.com +11290 + Information Services Plc. + Dimitar Nikolov + dnikolov&egvrn.net +11291 + IONA Technologies + Jaspal Sandhu + jsandhu&iona.com +11292 + La Factoría de Comunicaciones Aplicadas,S.L. + Juan Ros Florenza + hsoft&all-done.com +11293 + LocaNet + Henning Holtschneider + hh&loca.net +11294 + Mobinor AS + Rune Hamnvik + rune&mobinor.no +11295 + Cybercity + Lars Thegler + iana&cybercity.dk +11296 + Neon Software, Inc. + Michael Swan + swan&neon.com +11297 + NetNet, Inc + Jason Seymour + seymour&netnet.net +11298 + NLweb.com + Janus Liebregts + janus&liebregts.nl +11299 + On Time Systems + Ron Lee + ron&otsys.com +11300 + Passageways Travel + Jeff Dorsch + jeff.dorsch&pways.com +11301 + Pearson Education + David Bastian + david.bastian&pearsoned.com +11302 + Perimeter Technology + Todd Bullock + tbullock&ip-acd.com +11303 + PhotoniXnet Corporation + Yoshihisa Koizumi + koizumi&photonixnet.co.jp +11304 + PrinterOn Corporation + Jeremy Finn + jfinn&printeron.net +11305 + Research Institute for Particle and Nuclear Physics, Department of Biophysics + Csardi Gabor + csardi&rmki.kfki.hu +11306 + Robarts Research Institute + Hua Qian + hqian&irus.rri.ca +11307 + Solarwinds.net + BK Rogers + bk_rogers&solarwinds.net +11308 + SWAPCOM + Frederic Aguiard + frederic.aguiard&swapcom.fr +11309 + TallGrass Communications, Inc. + Brett Zamora + bzamora&tallgrasscom.com +11310 + Technique Consultants Pty Ltd + Phillip Grayling + phil&kangaroopointcliffs.com +11311 + Tubitak - UEKAE + Bilal Aydos + bilal.aydos&tubitak.gov.tr +11312 + Università di Salerno - D.I.A. + Giuseppe Persiano + giuper&dia.unisa.it +11313 + University of Calgary + Jeremy Mortis + mortis&ucalgary.ca +11314 + University of Kansas + George F. Willard III + gfwillar&ku.edu +11315 + USAconnect + Joe Eaton + usaconnect_dns&yahoo.com +11316 + ware solutions + Rob Tholl + rob&ware-solutions.com +11317 + Wieland Electric GmbH + Thomas Gick + thomas.gick&wieland-electric.com +11318 + Xerox Research + Philip Ross + philip.ross&crt.xerox.com +11319 + Alcatel (Alcatel Bell Space) + Gerard Armstrong + gerard.armstrong&alcatel.be +11320 + Communal CC GmbH + Juergen Mueller + juergen&consultant.com +11321 + ECI IP Inc. + Steven Petroskey + spetroskey&jax.ecitele.com +11322 + Integrated Support Systems, Inc. + Keith Park + keith&isscorp.com +11323 + Manpower, Inc. + Steven Dambeck + steven.dambeck&na.manpower.com +11324 + netsols.de + Jan Wagner + wagner&netsols.de +11325 + OpenDesign, Inc. + Gregory Mushen + gregmushen&hotmail.com +11326 + Blue Scooter, Inc. + William Kallander + wkallander&bluescooter.com +11327 + Xi'an Jiaotong University Jump network Technology Co.,Ltd + LiShengLei + lsl&xajump.edu.cn +11328 + Rana + Christian Schlegel + rana.iana&gmx.at +11329 + LLNL-NIF + Randy J Sanchez + rsanchez&llnl.gov +11330 + Inpronetwork corporation + WonHo Kang + yjingold&hanmail.net +11331 + BetaSoft, Wojciech Kruk + Jacek Konieczny + jajcus&bnet.pl +11332 + Broadstorm Telecommunications, Inc. + Rossano Passarella + rpassarella&broadstorm.com +11333 + Media Data Systems Pte Ltd + Ho Kim Sing + hoks&globalsources.com +11334 + OLES polymedia AG + Oliver Lau + oliver.lau&oles-ag.de +11335 + Quris, Inc. + Alan Sparks + asparks&quris.com +11336 + R-Squared Technologies + Rich Ramos + iana-pen&r2tek.com +11337 + Software Workhouse of Hite Lee + Hite Lee + hite_lee&21cn.com +11338 + Success Information System Co., Ltd. + Narudom Roongsiriwong + narudom&success.net.th +11339 + Danam Communications Inc. + Kwang-il Koo + kikoo&danam.co.kr +11340 + DREGIS Dresdner Global IT-Services Gesellschaft mbH + Oliver Lau + oliver.lau&dregis.com +11341 + NoWalls + Ryan McGuire + ryan&nowalls.com +11342 + Finnet Networks Ltd. + Tuomo Rikman + tuomo.rikman&finnet.fi +11343 + Terra, Sol 3 + Ryan McGuire + tellarite&msn.com +11344 + The FreeRADIUS Server Project + Alan DeKok + aland&freeradius.org +11345 + [microstage] Ingenieurbuero Raphael Dierkes + Raphael Dierkes + r.dierkesµstage.de +11346 + Provinzial Versicherung + Uwe karhof + uwe.karhof&provinzial.de +11347 + Academ Consulting Services + Stan Barber + sob&academ.com +11348 + Airbus + Valentin KRETZSCHMAR + valentin.kretzschmar&airbus.com +11349 + at rete ag + Stefan Gallati + stefan.gallati&atrete.ch +11350 + Bondcar.com + Graham Norbury + gnorbury&bondcar.com +11351 + Bouwens + B.T. Bouwens + bbouwens&xs4all.nl +11352 + Cognigen Corporation + Cindy Kailburn + cindy.kailburn&cognigencorp.com +11353 + Das Buero am Draht GmbH + Gerhard Lehmann + mib-admin&dasburo.com +11354 + Data Design Systems Incorporated + Ron Erlich + erlich&datadesign.com +11355 + DataVault + Jon Larssen + jonlarssen&hotmail.com +11356 + DTV - Technical Knowledge Center of Denmark + Peter Popovics + pop&dtv.dk +11357 + edeal Schroeder Maihoefer GbR + Lukas Schroeder + lukas&edeal.de +11358 + Elcommtech Corp. + Vladimir Lirner + vlirner&elcommtech.com +11359 + EML Ltd + Jim Carter + jim.carter&eml-uk.com +11360 + Excenon + Kevin Zhao + zhaoxin_gl&sohu.com +11361 + FASTER CZ + Miroslav Hrad + hrad&faster.cz +11362 + FCS Partners + Jorma Bergius + jorma.bergius&fcspartners.fi +11363 + Federation of Norwegian Agricultural Cooperatives + Eigil Bjørgum + eigil.bjorgum&felleskjopet.no +11364 + Gaeasoft Corp. + Cheol-Woo Kim + francis&gaeasoft.co.kr +11365 + Orange Slovakia + Dusan Woletz + dusan.woletz&orange.com +11366 + Goanna Graphics + Richard Hancock + rhancock&primus.com.au +11367 + Helix GmbH + W. A. Dobrjanski + admin&helix-gmbh.net +11368 + IAVista, Inc. + Frank Jiang + frank&iavista.com +11369 + LenovoEMC Ltd (formerly 'Iomega') + Shyam Sareen + shyam.sareen&lenovoemc.com +11370 + ITT CO., Ltd. + Satoshi Tsuboi + staff&itt.co.jp +11371 + jung myung telecom. co., Ltd. + Jeongman Jang + jm7315&jmtelecom.co.kr +11372 + Meret Optical Communications + Bhaswar Sarkar + bsarkar&meretoptical.com +11373 + NetVision Sp. z o.o. + Marek Habersack + grendel&debian.org +11374 + OKO Bank Group + Jukka Ikaheimonen + jukka.ikaheimonen&okobank.fi +11375 + Pacific Geoscience Center + Richard Baldwin + baldwin&pgc.nrcan.gc.ca +11376 + Pedestal Software + Fernando Trias + fernando&pedestalsoftware.com +11377 + Product Technology Partners Ltd + Martin Saxon + mjs&ptpart.co.uk +11378 + Quartz Electronics Ltd. + Marc Smith + m.smith&quartzuk.com +11379 + Radiant Rt. + Tanka Robert + tanka.robert&radiant.hu +11380 + Retek Inc + Tom Cox + tom.cox&retek.com +11381 + Rogers Communications Inc + Ivan Brock + ivan.brock&rci.rogers.com +11382 + RR Enterprises + Yildiray Ozen + yildirayo&rrenterprises.on.ca +11383 + SaudiNet + Abdulsalam Abanmy + registry&saudi.net.sa +11384 + SevenLayer Services + Michael Steele + michael&netsteele.com +11385 + Spanlink Communications + Alvin Wong + alvin.wong&spanlink.com +11386 + Studer + Wolfgang Studer + wolfgang&pramach.de +11387 + T4 Consulting Group + Todd Bracken + tbracken&t4cg.com +11388 + Tarantola Labs + Carlo Tarantola + carlo&tarantola.com +11389 + TECFA (Technologie de Formation et Apprentissage) + Olivier Clavel + olivier.clavel&tecfa.unige.ch +11390 + Texas Department of Housing and Community Affairs + Eddie Rowe + eddie.rowe&tdhca.state.tx.us +11391 + The Texas GigaPOP + Stan Barber + sob&noc.gigapop.gen.tx.us +11392 + Tools4ever bv + Jacques Vriens + j.vriens&tools4ever.com +11393 + Tymlabs Management Inc + Mike Broadway + mike.broadway&rocsoftware.com +11394 + UD Technology + Shilong Zheng + szheng&udtechinc.com +11395 + Vancouver Public Library + Andre Fortin + andrefor&vpl.vancouver.bc.ca +11396 + Webex China + Hunter Wang + hunterw&hz.webex.com +11397 + Xpedio AB + Patrik Nilsson + patrik&xpedio.com +11398 + Pyx Engineering AG + Juerg Wanner + juerg&pyx.ch +11399 + Washington State University Vancouver + Chris Gruse + gruse&vancouver.wsu.edu +11400 + Dept. of Information and Communications Technology, HK Institute ofVocational Education (Tsing Yi) + Nick Urbanik + nicku&vtc.edu.hk +11401 + CONCATEL, S.L. + Diego Dal Cero + diego.dal.cero&bcn.concatel.com +11402 + MRO Software Inc. + Gary Freeman + gary.freeman&mro.com +11403 + Corente, Inc. + Sam Bendinelli (VP Engineering) + techsupport&corente.com +11404 + Seanet Corporation + Grigory Eremenko + admin&seanet.com +11405 + shanghaionline Inc. + Nick Yang + nickyang21&online.sh.cn +11406 + SANDY Group + Vladimir Dubrovin + vlad&sandy.ru +11407 + obsceneskills.com + Steven Lotito + steven.lotito&alumni.pitt.edu +11408 + Fiberhome Telecommunication Technologies Co.,LTD + Hairong Wan + whr&fhn.com.cn +11409 + adequate systems GmbH + Klaus Garms + garms&adequate-systems.com +11410 + Advanced Communication Devices Corp. + Tim Ti + tti&acdcorp.com +11411 + AOL Australia + Brandon Yap + byap&aol.net.au +11412 + HUBER+SUHNER BKtel GmbH (FTTH) + Martin Heldmann + martin.heldmann&hubersuhner.com +11413 + Cape Clear Software Ltd. + Jorgen Thelin + jorgen.thelin&capeclear.com +11414 + Carnation Technology Ltd. + Wu Deyou + huff&carnation.com.cn +11415 + CLCsoft + Edward Jang + hwjang&clcsoft.com +11416 + Allgera Corp. (formerly 'Coalsere Inc.') + Scott Ruple + sruple&allgera.com +11417 + Cynops GmbH + Martin Bartosch + m.bartosch&cynops.de +11418 + DFN-CERT Services GmbH + Reimer Karlsen-Masur + oidmaster&dfn-cert.de +11419 + FOTEL Corp. + Calvin Hsieh + chsieh&prodigy.net +11420 + GHS Data Management + Chris Jackson + cjackson&ghsinc.com +11421 + Industrial Networking Solution + Barry Baker + bbaker&industrialnetworking.com +11422 + Larmouth T&PDS Ltd + John Larmouth + j.larmouth&salford.ac.uk +11423 + Lucid Vision, Inc. + Thomas D. Nadeau + tnadeau&lucidvision.com +11424 + MITSUBISHI GAS CHEMICAL COMPANY, Inc. + Hiroyuki Urabe + mail-iamd&mgc.co.jp +11425 + MobileWebSurf + Sanjay Sinha + sanjay&mobilewebsurf.com +11426 + Netfusions s.r.l. + Antonio D'Alfonso + pietro.tiberi&inwind.it +11427 + Particle Physics Group + Marc Kelly + m.p.kelly&bristol.ac.uk +11428 + Redes de Computadoras + Daniel Alejandro Ragno + dragno&fi.uba.ar +11429 + Schumacher + Lothar Schumacher + lschumacher&acm.org +11430 + Simtek Corporation + Chris Gilbert + gilbert&simtek.com +11431 + Southern Polytechnic State University + Jim Herbert + jherbert&spsu.edu +11432 + TOWER Group Network + Benjamin B. Leblond + benjamin.leblond&nz.towerlimited.com +11433 + University of West Bohemia + Jiri Sitera + sitera&civ.zcu.cz +11434 + Utah Scientific, Inc. + Ed Shober + edshober&utsci.com +11435 + WebEx Communications, Inc. + Hunter Wang + hunterw&hz.webex.com +11436 + XML Global Technologies, Inc. + Matt MacKenzie + matt&xmlglobal.com +11437 + Xymbol + Charles Frank + cfrank&xymbollab.com +11438 + Canadian Center for Remote Sensing (GeoAccess Division) + Jean-Francois Doyon + jdoyon&nrcan.gc.ca +11439 + CCLRC + Chris Seelig + c.d.seelig&rl.ac.uk +11440 + Sapros + Peter Haight + peterh&sapros.com +11441 + Goshen College + Tim Boshart + timsb&goshen.edu +11442 + InsertAds Inc. + Bob Feather + bob.feather&insertads.com +11443 + Morder Devices + Liujun + liuj&mdclsoft.com +11444 + Frederick Engineering + David Gray + dgray&fetest.com +11445 + ANOREG-BR + Arnaldo H. Viegas de Lima + arnaldo&pobox.com +11446 + Adduce Networks + Rajesh Srivastava + rajesh&balcomt.com +11447 + XiLogic, L.L.C. + Daniel Bennett + ddbennet&xilogic.com +11448 + ITology Co., Ltd + Hur Yinggull + yinggull&itology.co.kr +11449 + Tethernet, Inc. + Frederick Hunter + fredh&tethernet.com +11450 + xitec.de + Armin Röther + oid&xitec.de +11451 + SINBON ELECTRONIC CO., LTD + Lily Huang + lily&sinbon.com +11452 + Allegient Systems + Carl Eastman + ceastman&allegientsystems.com +11453 + Altadis + Jose Gomez Garcia + jgomez&altadis.com +11454 + Avencis SA + David Wonner + david.wonner&avencis.net +11455 + Aviation Management Technologies GmbH + Röther Armin + armin.roether&avitech.de +11456 + CalvaEDI S.A. + John Hughes + john&calva.com +11457 + Cimcor, Inc. + Robert E. Johnson, III + johnson.robert&cimcor.com +11458 + Computing Services Department, University of Liverpool + Ian C. Smith + i.c.smith&liverpool.ac.uk +11459 + Data Track Technology PLC + David Johnstone + diana&dewlands.com +11460 + datafront + Rick Kilcoyne + rkilcoyne&datafront.com +11461 + Deloitte & Touche + Joel Moses + jmoses&deloitte.com +11462 + DroneDAP + Zachary Smith + zach+iana&euqaz.net +11463 + EC-Founder + Wei Tao + taow&ecfounder.com +11464 + Electronic Arts + Doug Warren + dwarren&ea.com +11465 + Envoy Technologies + Bill Vanyo + billv&envoytech.com +11466 + e-Pollination Enterprise, Inc. + Raymond Gao + raygao&home.com +11467 + IntermediaSP + Tomas Heredia + tomas&intermedia.com.ar +11468 + Jackson County School District 6 + David Seely + david.seely&district6.org +11469 + Kokong + Johan Troedsson + johan&kokong.com +11470 + LX networking + Joachim Rosskopf + Joachim.Rosskopf&lx-networking.de +11471 + mineit software ltd + Derek Johnston + derek&mineit.com +11472 + MoeTi Corp. + MOULIN Clément + mais&ouestil.com +11473 + Nordic Messaging Technologies AB + Stefan Norlin + stefan.norlin&nordicmessaging.se +11474 + NTT DATA COPORATION + Yuji Yamada + yamaday&nttdata.co.jp +11475 + Pro-Active + Johann Dantant + johann.d&pro-active.fr +11476 + Quest Controls, Inc. + Ed Goggin + questmail&questcontrols.com +11477 + Rasvia Systems Incorporation + Ling Tiing Puu + puu.ling&rasvia.com.tw +11478 + Scorpion Controls Inc + Andrew G Swales + aswales&ieee.org +11479 + Selso + Sébastien Lobstein + slobstein&selso.com +11480 + SunnyInd Corp + Geoff Rego + geoff&sunnyind.com +11481 + TEIMA Audiotex + José Parera Bermúdez + jparera&teima.es +11482 + Universal Traffic Management Society of Japan(UTMS) + Teruyuki Tajima + tajima&utms.or.jp +11483 + University of Illinois at Urbana-Champaign + Michael A. Grady + m-grady&uiuc.edu +11484 + viastore systems GmbH + Thorsten Sauter + t.sauter&viastore.de +11485 + Vida Network Technologies, Inc. + Larry Kong + larry_kong&yahoo.com +11486 + Vizzavi NL + R. Eygendaal + ronald.eygendaal&corp.vizzavi.net +11487 + VoxSurf Ltd. + Axel Voigt + axel.voigt&voxsurf.com +11488 + AB Computers + Alek Barsky + alekbarsky&hotmail.com +11489 + Alacritus + Roger Stager + rstager&alacritus.com +11490 + BetweenMarkets, Inc. + Craig Dunn + cdunn&betweenmarkets.com +11491 + University of Jyvaskyla + Network Administrator + hostmaster&jyu.fi +11492 + Micro Research Laboratory, Inc. + Seiichi Kaneko + kaneko&mrl.co.jp +11493 + Obourg Origny Informatique + Xavier Milliès-Lacroix + xavier.millies-lacroix&obourg-origny-info.com +11494 + Nebulon Pty. Ltd. + Paul Szego + paul.szego&nebulon.com +11495 + cubic.ch + Tim Tassonis + timtas&cubic.ch +11496 + foo.cz + Adam Buble + buble&cas.cz +11497 + Gamerz.NET Enterprises + Richard Rognlie + oid&spamblock.gamerz.net +11498 + HCCnet B.V. + Hidde Korenstra + h.korenstra&hccnet.nl +11499 + Hollmann Consulting Services + Frank Hollmann + frank.hollmann&epost.de +11500 + Infrant Technology Inc. + Wei GAo + wgao&infrant.com +11501 + IngmarNet + Ingmar Schmidt + ingmar-schmidt&gmx.de +11502 + ITU + Sébastien Castano + sebastien.castano&itu.int +11503 + METRAWARE + Jerome Monclard + jmonclard&metraware.com +11504 + NDSL, Inc. + Jerry Bastian + jbastian&cellwatch.com +11505 + Network-1 Security Solutions, Inc. + Cynthia Mills + mills&network-1.com +11506 + ---none--- + ---none--- + Unassigned (Removed 2002-05-02) +11507 + ProBusiness + Ian Clark + iclark&probusiness.com +11508 + School of Banking and Management in Cracow + Tomasz Wojdynski + tomwoj&wszib.krakow.pl +11509 + Schweizer Electronic AG + Gerold Mauch + IT10&seag.de +11510 + Service Intelligence + Sergei Agalakov + sagalakov&serviceintelligence.com +11511 + Southern Illinois University School of Medicine + Paul M Fleming + pfleming&siumed.edu +11512 + St. Joseph's College + George Leedle + gle2918&saintjoe.edu +11513 + Strele Informatik + Klaus Strele + kstrele&compuserve.com +11514 + Chris Abernethy + Chris Abernethy + cabernet&chrisabernethy.com +11515 + ASAP Technology SRL + Eldo Loguzzo + eldo.loguzzo&asaptechnology.com.ar +11516 + The Academy of Sciences of the Czech Republic + Adam Buble + buble&cas.cz +11517 + Trivadis AG + Tim Tassonis + tim.tassonis&trivadis.com +11518 + T-Soft Ltd. + Mike Rehder + mrehder&tsoft-tele.com +11519 + VET + P.S. Dekkers + paul&sterrenkunde.nl +11520 + Visilinx, Inc. + UNIX Department + unix&visilinx.com +11521 + Broadcast Music Incorporated + Jon Watts + noc&bmi.com +11522 + LINXTEK + DongSeak Yoon + manulsan&linxtek.com +11523 + Fujitsu Laboratories of America, Inc. + Dominic Greenwood + dpag&fla.fujitsu.com +11524 + Ch5 Finland Oy + Christian Grondahl + christian.grondahl&ch5finland.com +11525 + brain://on AG + Bernd Fix + brf&brainon.ch +11526 + GIRO LTD + Péter Kostenszky + peter.kostenszky&mail.giro.hu +11527 + Bromax Communications, Inc. + Irving Liu + irving.liu&bromax.com.tw +11528 + DVBern AG + Dragan Milic + dragan.milic&dvbern.ch +11529 + KOGA ELECTRONICS CO. + Okuno Makoto + okuno&koga.co.jp +11530 + 3G - NetWorks + Registration Services + 3gnt&3gnt.net +11531 + aha-systems + Albrecht Haug + adtjd&yahoo.de +11532 + Bond University + Stephen Kaspersen + techhelp&netactive.co.za +11533 + Chiba Industries + Mike Markley + mike&chibaindustries.com +11534 + coaXmedia + Keith L. Bernhardt + kbernhardt&coaxmedia.com +11535 + COGITIS + Daniel Alain + adaniel&cogitis.tm.fr +11536 + Cyber-Ark + Gal Cucuy + galc&cyber-ark.com +11537 + Dirección General de la Policía + Gonzalo Menendez Fraile + soporte&policia.es +11538 + Elata Ltd. + Greg Mitchell + gkm&elata.com +11539 + Essent Kabelcom + Berthold Nijp + iana&castel.nl +11540 + ETIT + SangJin Park + sj_park&etit.co.kr +11541 + Fugu Angst Productions + Russell Hay + seb&b0b.net +11542 + GuangZhou GaoKe Communication Equipment Co., Ltd. + Li Huapeng + lihuapeng&21cn.com +11543 + Internet Northwest + Dennis Peterson + dennispe&inetnw.com +11544 + Kunsthochschule Fuer Medien + Robert O'Kane + okane&khm.de +11545 + Kurtev Intergalactic + Dimiter Kurtev + dkurtev&kurtev.com +11546 + METRAWARE + Jerome Monclard + jmonclard&metraware.com +11547 + Mpower Communications Corporation + SNMP Management + snmp-mgmt&mpowercom.com +11548 + NABLA2 s.r.l. + Faglioni Giovanni + giova&faglioni.it +11549 + NETOUS TECHNOLOGIES Ltd. + W.M.Yiu + wm_yiu&netous.com +11550 + NETPIA SYSTEMS Co.,LTD + Tim Kim + tim&netpia.co.kr +11551 + Orca Orcinus, Inc. + Philip Poremba + phil&orcinus.com +11552 + page87 + Paul Rees + paulrees&page87.com +11553 + Planet Technologies NV + Dennis van Rossum + d.vanrossum&planetinternet.nl +11554 + Pomcor + Francisco Corella + francisco&pomcor.com +11555 + Rider University + Timothy Fairlie + fairlie&rider.edu +11556 + Robert Burrell Donkin + Robert Burrell Donkin + robertdonkin&mac.com +11557 + S&CI + Constant Dupuis + constant.dupuis&skynet.be +11558 + SOFICE + Hascoat Michel + system&sofice.fr +11559 + SoftGame International Pty. Ltd. + John M. Salvo Jr. + john&softgame.com.au +11560 + Tantia Technologies Inc. + Jacky Yu + jacky.yu&tantiatech.com +11561 + Topcon Positioning Systems, Inc. + Alexander Davydenko + alex&javad.ru +11562 + VE2UG + Rene Barbeau + nousdeux&videotron.ca +11563 + Ahnlab, Inc + Jinyoung Park + susia&ahnlab.co.kr +11564 + CiteItWrite + Jason Patterson + jrpatterson&hotmail.com +11565 + Instituto Nacional de Estatística - PORTUGAL + DSII/SGIT + admin&ine.pt +11566 + Prologue Software + Lise Didillon + ldidillon&prologue-software.fr +11567 + DeltaLoyd Deutschland + Peter Handloser + peter.handloser&deltalloyd.de +11568 + bmbwk + DI Klemens Urban + klemens.urban&bmbwk.gv.at +11569 + handy.de Vertriebs GmbH + Robert Rauchstaedt + robert&mail.handy.de +11570 + Mages Touch + Derick W Featherstone + derickf&mages-touch.com +11571 + NTI Studio's + Darren L Featherstone + darrenf&xs4all.nl +11572 + Rasvia Systems, Inc. + Hsing Yuan + hsing.yuan&rasvia.com +11573 + rockus.at + Oliver Gerler + oliver.gerler&rockus.at +11574 + Seven-Winds + Derick W Featherstone + derickf&seven-winds.com +11575 + Solution Design Laboratory + Ken Ingram + kingram&sdl.org +11576 + Sunnycal Inc. + Ricky Sun + sunyuxi&yahoo.com +11577 + Global System Services + Didier Dupuy d Angeac + dda&gss.webstore.fr +11578 + AirZip, Inc. + Dave Coleman + dave_coleman&airzip.com +11579 + Amadeus Data Processing GmbH + Timofei Zakrewski + tzakrews&amadeus.net +11580 + Amdocs + Yossi Rozen + yossiroz&amdocs.com +11581 + bylinux.net + Francis So + francis&bylinux.net +11582 + C&I Technologies + Dongseok Yang + scoranta&cnitec.com +11583 + Cayenta, Inc. + Philip Porreca + pporreca&cayenta.com +11584 + CLASS AG + Klaus Jungbauer + Klaus.Jungbauer&class.de +11585 + ColoradoBiz.net + Quentin Perkins + quinn&quinnperkins.com +11586 + DLR e.V. + Juergen Schmidt + juergen.schmidt&dlr.de +11587 + epictet AG + Wolf-Dietrich Seidlitz + wds&epictet.de +11588 + ERA a.s. + IANA PEN contact person + iana-pen&era.aero +11589 + Exsior Data & Information Tech. Inc. + Richard Huang + rich&edit.com.tw +11590 + Fortech Ltd. + Dalibor Toman + dtoman&fortech.cz +11591 + Free Software Foundation + Sergey Poznyakoff + gray&gnu.org +11592 + HANGZHOUSUNYARD INFORMATION ENGINEERING CO.,LTD + Xinbo Yan + yanxinbo&yeah.net +11593 + IMISE + Sebastian Dietzold + dietzold&imise.uni-leipzig.de +11594 + LDCOM Networks + Sebastien Louyot + sebastien.louyot&ldcom.fr +11595 + Lightmaze AG + Christoph Gasche + c.gasche&lightmaze.com +11596 + Mediakabel + J vd Voort + jvdvoort&mediakabel.nl +11597 + MFB-Multa spol. s r.o. + Lukas Blaha + lukas.blaha&mfb.cz +11598 + MGIC - Mortgage Guaranty Insurance Corp + Denise Huempfner + Denise_Huempfner&mgic.com +11599 + mille21 + Junyong Jo + charisma&mille21.com +11600 + Neogration Inc. + Jeff Hays + jeffrey.hays&abnamro.com +11601 + Netaxs Internet Services + George Robbins + grr&netaxs.com +11602 + New York State Office of the State Comptroller + Jean Moore + jmoore&osc.state.ny.us +11603 + Nexxient Communications + Andrew Lee + andrew.lee&nexxient.com +11604 + NorduGrid + Anders Wäänänen + waananen&nbi.dk +11605 + Optical Wireless Link Inc. + Paul Tzeng + ptzeng&opticalwirelesslink.com +11606 + Pacific Broadbank Networks + Ger Vloothuis + ger&pbn.com.au +11607 + Plumtree Software + Daniil Khidekel + daniil.khidekel&plumtree.com +11608 + Pracom Pty Ltd + Thomas Price + thomas.price&pracom.com.au +11609 + PUSANWEB Ltd + Woon-uk Lee + woonuk&pusanweb.co.kr +11610 + Sandvine Incorporated + Don Bowman + don&sandvine.com +11611 + Secretaria da Fazenda do Estado de Sp + Waldemar Scudeller Jr. + wsj&wsj.com.br +11612 + STT s.r.l. + Fabio Fedele + fafed&seeweb.com +11613 + Vortex Ltd + Stephen Wong + admin&vortex.com.hk +11614 + vrwg + rb + user04&vrwg.net +11615 + Webaronet Technology + Victor Wong + wongvic&hkem.com +11616 + WebPerform Group Ltd + Pete Shew + pete_shew&webperform.com +11617 + Cablevision Systems Holdings + Bill Dolan + bdolan&cablevision.com +11618 + SOLTECH CO., Ltd. + Dong-Hwa Lee + soltech&netsgo.com +11619 + PTC Solutions Ltd + Peter Blaney + pblaney&ptc.co.uk +11620 + PACE Anti-Piracy + Christopher Taylor + ctaylor&paceap.com +11621 + Agrotecnica Arpa scrl + Carlo Scarfoglio + scarfoglio&arpacoop.it +11622 + 3PARData + PhiDien Nguyen + snmp&3pardata.com +11623 + Aegis Software + Lloyd Mangnall + lloydm&aegisgrp.com +11624 + Bit-Lab PTY LTD + Nick de Sancha + nick&bitlab.com.au +11625 + Black Bear Software, LLC + James M. Turner + turner&blackbear.com +11626 + Combol GmbH + Jean-Pierre Bolengo + info&combol.ch +11627 + David Pitts + David Pitts + dpitts&mk.net +11628 + EDIPORT Telecommunication Ltd. + Attila Molnar + attila.molnar&ediport.hu +11629 + Educational Service Unit #2 + Mike Danahy + mdanahy&esu2.org +11630 + eircomnet + Donal Diamond + donal.diamond&eircom.net +11631 + EnablingTrust LLC + Bruce Kiley + bkiley&enablingtrust.com +11632 + e-Sec Tecnologia em Seguranca de Dados LTDA + Luciano da Silva Coelho + coelho&esec.com.br +11633 + Europop AG + Holger Patrick Schebek + h.schebek&europop.net +11634 + Exempla + Kevin Erickson + kerickso&exempla.net +11635 + Fiorano Software Inc. + Rishi Raj Yadav + rishi&fiorano.com +11636 + Focal Point Software, Inc. + Luigi Bai + lpb+iana&focalpoint.com +11637 + Generatio GmbH + Thomas Harmann + harmann&generatio.com +11638 + Grapes Network Services + William Brioschi + william.brioschi&grapesnet.com +11639 + Intersys Uruguay Ltda + Eduardo Roldan + eduardo&intersys.com.uy +11640 + Miritek, Inc. + Seogsoon Ahn + ssahn&miritek.com +11641 + NETOVA + Philippe Martinou + philippe.martinou&netova.net +11642 + Nextra Ensure (UK) + Peter Jestico + peter.jestico&nextra.co.uk +11643 + One Stop Consulting, Inc. + Anthony Cogan + anthony.cogan&thinkunix.com +11644 + Orbism Consulting + Dave OReilly + daveor&mobiustech.ie +11645 + Pentalog Inet + Iulia Talos + italos&pentalog.fr +11646 + PentaMedia Co., Ltd. + Harry Cho + hscho&pentamedia.com +11647 + Planetasia Ltd., + Ramapriya R.M. + ramapriyarm&planetasia.com +11648 + RemoteSite Technologies Inc. + Eric Sheffer + eric.sheffer&remotesite.com +11649 + Ross Stores, Inc. + Mike O'Connell + mike.oconnell&ros.com +11650 + Schnedermann Software-Consulting GmbH + Ekkard Schnedermann + Ekkard.Schnedermann&Schnedermann.de +11651 + SideSpace + Altaf Mohamed + altafm&speakeasy.net +11652 + Sonoma State University + Jack Ziegler + ziegler&sonoma.edu +11653 + Stack Computer Solutions + Stephen Cobham + scobham&stack.co.uk +11654 + Strott Network Solutions + Christian Strott + cstrott&snets.de +11655 + SY.O. srl + Andrea Spada + a.spada&syo.it +11656 + Synapse Systems AB + Per Bergqvist + per&synapse.se +11657 + Taiwan Telecommunication Network Services Co., Ltd. + Jung Wu + hostmaster&ttn.com.tw +11658 + Universite Blaise PASCAL + Denis Pays + denis.pays&univ-bpclermont.fr +11659 + Virtual Security Research + George D. Gal + ggal&vsecurity.com +11660 + VMS Limited + Isaac Coll + icoll&vmslimited.co.uk +11661 + Wahoo International Enterprise Co., Ltd. + Lord Lee + lord_li&wahoo.com.tw +11662 + Marcel Ruff + Marcel Ruff + mr&marcelruff.info +11663 + +X Altaïr Toulouse + Christophe Garrigue + christophe.garrigue&altair.fr +11664 + base2 + Daniel Powell + daniel&base2.com.au +11665 + dacom + Sun Jung + sjung&dacom.net +11666 + Institute of Communications Engineering - University of Hanover + Michael Meincke + meincke&ant.uni-hannover.de +11667 + New York State Education Department + Mark Macutek + mmacutek&mail.nysed.gov +11668 + WRLucas + Bill Lucas + mrwrlucas&netscape.net +11669 + Milhouse Technologies USA + Arnell Milhouse + arnell_milhouse&usa.com +11670 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11671 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11672 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11673 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11674 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11675 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11676 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11677 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11678 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11679 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11680 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11681 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11682 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11683 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11684 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11685 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11686 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11687 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11688 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11689 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11690 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11691 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11692 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11693 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11694 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11695 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11696 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11697 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11698 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11699 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11700 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11701 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11702 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11703 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11704 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11705 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11706 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11707 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11708 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11709 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11710 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11711 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11712 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11713 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11714 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11715 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11716 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11717 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11718 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11719 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11720 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11721 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11722 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11723 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11724 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11725 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11726 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11727 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11728 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11729 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11730 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11731 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11732 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11733 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11734 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11735 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11736 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11737 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11738 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11739 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11740 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11741 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11742 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11743 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11744 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11745 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11746 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11747 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11748 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11749 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11750 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11751 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11752 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11753 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11754 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11755 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11756 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11757 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11758 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11759 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11760 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11761 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11762 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11763 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11764 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11765 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11766 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11767 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11768 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11769 + Reserved + RFC-pti-pen-registration-10 + ---none--- +11770 + China Sanjiang Space Group + Jianxing Liu + liujianxing&263.net +11771 + 81 Broad Street + Graeme Defty + dgefty&attglobal.net +11772 + Adrock Software + Chris Crowe + chris&iisfaq.com +11773 + AGMODEL, NARC, JAPAN + Takuji Kiura + kiura&affrc.go.jp +11774 + ASYK S.A. + Fenny Hadjistamatiou + f.hadjistamatiou&asyk.ase.gr +11775 + Atchik + Guillaume Sanchez + guillaume.sanchez&atchik.com +11776 + AviBit data processing GmbH + Tom Leitner + t.leitner&avibit.com +11777 + Ayman LLC. + Davis Mcpherson + davism&aymanllc.com +11778 + BlueBridge Technologies AG + G.Reitberger + reitberger&bluebridge.de +11779 + Casedev Active Oy + Jukka Terho + jterho&casedev.com +11780 + CBIM + Ugo Cei + u.cei&cbim.it +11781 + celtro + Shlomo Hecht + shlomo.hecht&celtro.com +11782 + Charles Industries, LTD + Larry Page + lpage&charlesindustries.com +11783 + Canon Software Information Systems Inc. + Takashi Fukui + fukui.takashi&canon-js.co.jp +11784 + Credant Technologies + Richard Brooks + rbrooks&credant.com +11785 + Critical Integration Ltd + Redvers Davies + red&criticalintegration.com +11786 + Daisy Software + Info + info&daisysoftware.com +11787 + Datagate d.o.o. + Srdjan Srdic + ssdic&net.yu +11788 + fluxx.com e-commerce GmbH + Siegfried Renner + renner&fluxx.com +11789 + ForeScout Technologies, Inc. + Asaf Lavi + tech&forescout.com +11790 + GateWay Community College + Jose Candanedo + candanedo&gwc.maricopa.edu +11791 + Giesecke and Devrient + Karsten Fischer + karsten.fischer&gdm.de +11792 + Grant County PUD + Steven R. Wilson + swilson&gcpud.org +11793 + IBM Slovakia (test) + Viera Uhrinova + viera_uhrinova&sk.ibm.com +11794 + Incentive Technologies, Inc. + Chris Martin + chrismartin&incentive-tech.com +11795 + Infoglobe + John Hermes + jhermes&infoglobe.com +11796 + InnoviData GmbH + Roland Haag + roland.haag&innovidata.com +11797 + Kaboom Enterprises + Chris Ricker + kaboom&gatech.edu +11798 + Lateral Sands Pty Ltd + Phil Sutherland + phil&lateralsands.com.au +11799 + Latis Networks, Inc + David Greenstein + dave&latis.com +11800 + Logosoft + Faruk Telibecirovic + faruk&logosoft.ba +11801 + MPSV + Jiri Nedoma + mp&mpsv.cz +11802 + Northwest Airlines, Inc. + Wendy W Lou + wendy.lou&delta.com +11803 + Ohlone Community College + Tj Webb + twebb&ohlone.cc.ca.us +11804 + PCS + Chandu + cskumar9&lycos.com +11805 + Plexus + Cristian Fernandez + cristian&plx.com +11806 + Antonveneta ABN AMRO Bank + Riccardo Fontana + riccardo.fontana&aaasgr.it +11807 + SamuelBalle.com + Samuel Balle + sb&samuelballe.com +11808 + Sifry Consulting + David L. Sifry + david-iana&sifry.com +11809 + Skobeltsyn Institute of Nuclesr Physics, Moscow State University + Nikolai A. Kruglov + kruglov&lhc.sinp.msu.ru +11810 + Solesys SA + Pascal Roulin + pascal.roulin&solesys.ch +11811 + Spaces, Inc. + Mike Abbott + mikea&spacesinc.com +11812 + TDC Internet A/S + Steffen Winther Soerensen + stws&int.tele.dk +11813 + TMT Ltd. + David Ashkenazi + david_a&tmt3.com +11814 + University of Nantes + Pierre-Olivier Terrisse + terrisse&cri.univ-nantes.fr +11815 + Yellow Online Inc. + Network Administrator + netadmin&yellow.ca +11816 + yuantong technology + Zhaoxin + masterzhao&263.net +11817 + Zentrale Informatik, Universität Zürich + Cyril Stoll + lnx-linux-hosting&zi.uzh.ch +11818 + Z-Star Microelectronics Corporation + Liang Qi + cliff.liang&zsmc.com.cn +11819 + ZX Factory + Andree Toonk + a.toonk&zx.nl +11820 + Fonterra Co-operative Group + Jay Pieren + jay.pieren&nzmp.com +11821 + Jet Infosystems + Andrey Stolyarov + croco&jet.msk.su +11822 + SPSU Selinux-LDAP Team + Patrick Childers + sublime43z&msn.com +11823 + WSMicroSystems Inc. + Ricky Sun + ysun&atsinai.com +11824 + Computer & Technologies International Ltd + Guo Jing Dong + guoal&163.com +11825 + Daji Workshop + Haihan Ji + daji&public.cc.jl.cn +11826 + Sistemas Tecnicos de Enseñanza Consultores + Nuria de la Fuente Teixido + STEC&STEC.INFO +11827 + ACUMEN ADVANCED TECHNOLOGIES Inc. + Nahid Keshmirshekan + admin&acumentechnologies.com +11828 + Coast Mountains School District + Steve Tonnesen + tonnesen&cmsd.bc.ca +11829 + Corvil Networks + Ciaran Mitchell + ciaran.mitchell&corvil.com +11830 + F'Arty Crafts Pty Ltd + Les Carleton + les&fartycrafts.com +11831 + Interactive Transaction Services, Inc. + Chris Hagmann + chagmann&ipin.com +11832 + Joerg Preuss + Joerg Preuss + joerg&preuss.info +11833 + nCircle Network Security + Matthew Condren + mcondren&tripwire.com +11834 + PLIVA d.d + Dobrica Pavlinusic + pcst&pliva.hr +11835 + StreamServe, Inc. + Michael Kajen + michael.kajen&streamserve.com +11836 + Swiss National Bank + Christian Schmid + christian.schmid&snb.ch +11837 + Telenor Mobile Communications + Bjorn Steine + bjorn.steine&telenor.com +11838 + Vistaar + Chandra Shekkar Kumar + ckumar&vistaar.com +11839 + WebWayOne Ltd + Graham Murray + gmurray&webwayone.co.uk +11840 + YR CONSEIL + Yann Romefort + yann.romefort&yrconseil.com +11841 + Aaltonen & Vuorela + Marko Karppinen + marko.karppinen&aaltonenvuorela.com +11842 + Anchora + Ricardo Marimon + rmarimon&alumni.standord.org +11843 + Argus Systems Group, Inc + Mikel Matthews + mikl&argus-systems.cin +11844 + Beijing Super Channel Network Limited + Huang Yuzhen + huangyuzhen&bj.tom.com +11845 + Blueair Networks + Scott Francis + sfrancis&blueairnetworks.com +11846 + CITAP + Alex Peeters + alex.peeters&citap.com +11847 + Himnebaugh Consulting, Inc. + Bruce Himebaugh + bruce&hcd.net +11848 + INLINE Corporation + James Fernandez + jfernandez&inlinecorp.com +11849 + Janus Technologies + George Saad + gush&janus-tek.com +11850 + Native Names Corporation + Ahmed Abd-Allah + cto&nativenames.net +11851 + NetEconomist + Alex Turner + sysadmin&neteconomist.com +11852 + New Productivity Initiative Inc + Paul Foley + pfoley&newproductivity.org +11853 + Public Service Company of New Mexico + Jim Baylor + jbaylor&pnm.com +11854 + Healthvision (formerly 'Quovadx, Inc.') + Jesse Pangburn + jpangburn&healthvision.com +11855 + Retirement By Design + Stephen Bounds + sbounds&rbd.net.au +11856 + TNT Logistics North America + Noli Tablada + noli_tablada&tntlogistics.com +11857 + Transmode Systems AB + Per Borg + per.borg&infinera.com +11858 + UANGEL + Bohun Tak + bhtak&uangel.com +11859 + WorldChain Inc. + Jay Zhou + jay.zhou&worldchain.com +11860 + wossname.org.uk + Graeme Mathieson + mathie+iana&wossname.org.uk +11861 + Codenet + De Pus Gorik + gorik.depus&codenet.be +11862 + Columbia University in the City of New York + Alan Croswell + alan&columbia.edu +11863 + TP-Link Systems Inc. + Huo Chen + huochen&tp-link.com +11864 + Böke and Walterfang Electronic Systems Ltd + Kevin Cordery + kevin.cordery&bwesl.com +11865 + Explo-IT Research + Antonio Capani + capani&explo-it.com +11866 + GDTTI + Francisco Leong + frleong&gdtti.gov.mo +11867 + Loop Treinamento em Inform + Cláudia Viegas + cviegas&hsbc.com.br +11868 + Placenet + Conseil d'administration + adm&placenet.org +11869 + Roman Zielinski Metod och systemutveckling + Roman Zielinski + roman.zielinski&rozisys.se +11870 + Sandhills Community College + Steven Thomas + thomass&email.sandhills.cc.nc.us +11871 + SpaceNet AG + Christian Brunner + cbrunner-iana&space.net +11872 + Systemy Mikroprocesorowe + Krzysztof Blaszkowski + sysmikro&post.pl +11873 + Yolke Corporation + Russ White + me&russwhite.net +11874 + Scopus Tecnologia S. A. + Paulo S. L. M. Barreto + pbarreto&scopus.com.br +11875 + Biomet Merck BV + Christine Simon + christine.simon&merck.de +11876 + Siteseers Inc. + Mark Turner + markt&siteseers.net +11877 + 3UP Systems + Paul Harrington + pharrington&3upsystems.com +11878 + Achilles Guard, Inc d.b.a Critical Watch + Nelson W Bunker V + nelson_bunker&criticalwatch.com +11879 + Castel et Fromaget + Bernard Etienne + etienne.b&c-et-f.com +11880 + Custom Business Link, Inc. + Allan D. Jones + adjones17&networld.com +11881 + FiberSpans Corporation + Bill Hargen + bill.hargen&fiberspans.com +11882 + geschke internet consulting + Ralf Geschke + ralf&geschke.net +11883 + Ignite Communications + Andy Reid + andy&shakerlane.com +11884 + Indeed - Visual Concepts GmbH + Detlev Stalling + stalling&indeed3d.com +11885 + MMC Technology + Ki-Won Kang + kwkang&mmctech.com +11886 + Nextcard, Inc. + Thomas Arthurs + thomas.arthurs&nextcard.com +11887 + PSS Systems Inc + Senthil Supramaniam + senthil&pss-systems.com +11888 + Teamlog External + GODARD Jean-Charles + god&teamlog.fr +11889 + The Answer Group, L.L.C. + Greg Cook + gacook&webadvisers.com +11890 + The Nog Network + Tony Monroe + tmonroe+iana&nog.net +11891 + Unified Communications Pte Ltd + Tan Cheng Ho Colin + colin_tan&unifiedcomms.com +11892 + Vodafone Global Platform & Internet Services + Chris Nelson + cris.nelson&vodafone-us.com +11893 + The Pep Boys - Manny, Moe & Jack + Paul Davis + paul_davis&pepboys.com +11894 + Integrity Solutions, Inc + Joe Hruska + sysadmin&integritysi.com +11895 + Globalnet + Najeh Ben Nasrallah + najehbn&gnet.tn +11896 + 3G Lab + David Holland + oid-alloc&3glab.com +11897 + Acantho + Alessandro Fava + alessandro.fava&acantho.com +11898 + Agere Systems, Inc. + Daniel R. Borges + dborges&agere.com +11899 + Bitfone Corporation + Alex Vinogradov + avinogradov&bitfone.com +11900 + BlueSoft + Somech Ovad + ovad.somech&bluesoft-inc.com +11901 + Bonndata GmbH + Dieter Zysk + dieter.zysk&db.com +11902 + Cartel Communication Systems Inc + John Philpott + john&cartelsys.com +11903 + Case Western Reserve University + Jeffrey A. Gumpf + gumpf&cwru.edu +11904 + CirqueDigital, LLC + Sebastien Boving + seb&cirquedigital.com +11905 + Icom Inc + Hiroshi Kawasaki + hiroshi_kawasaki&icom.co.jp +11906 + IF DRAUDIMAS + Vaidas Damosevicius + vd&sampo.lt +11907 + ImaginaryRealities + Michael F. Collins, III + mfcollins3&sbcglobal.net +11908 + Instituto Cubano de Radio y Television + Carlos A. Carnero Delgado + carnero&icrt.cu +11909 + Log On America, Inc. + James Tavares + jtavares&loa.com +11910 + Michael Roettig Consulting + Michael Roettig + michael&roettig.de +11911 + Unassigned + Returned 2003-04-30 + ---none--- +11912 + NetMotion Wireless Inc. + Julia Renouard + julia.renouard&nmwco.com +11913 + netsys.com + Len Rose + len&netsys.com +11914 + National Information Infrastructure Development Institute + Gabor Kiss + kissg&niif.hu +11915 + Oldring & Associates, Inc. + Mike Bridge + admin&oldring.ca +11916 + Pablix + Pablo Jadot + webmaster&pablix.de +11917 + PAR3 Communications + Rudy Munguia + rudy.munguia&par2.com +11918 + PRAJA inc + Asquith Bailey + abailey&praja.com +11919 + QCOM, Inc. + Shawn Boyce + shawn&qcominc.com +11920 + quantumBEAM Ltd. + Neil Crumpton + registration&quantumbeam.com +11921 + Ruprecht & Partner OEG + Ruprecht Erich + office&rdcs.at +11922 + SD Inc. + Darko Zivkovic + darko1&rogers.com +11923 + Spectrum Control, Inc. + Chuck Drew + drew&spectrumcontrol.com +11924 + Techsystem Technologies + Robert K. Chang + robert&techsystem.net +11925 + The IQ Group, Inc. + Stephen Clouse + stephenc&theiqgroup.com +11926 + The United World Colleges (international) + Nike Ogundana + uwcio&uwc.org +11927 + Tytrast Systems Inc. + Susumu Ueno + susumu_ueno&tytrast.com +11928 + Indievisible + Scot Bontrager + scot&indievisible.org +11929 + Veridicom + James Coughlin + softwaredev&veridicom.com +11930 + Vseobecná úverová banka, a.s. + Ivan Masný + imasny&vub.sk +11931 + Staten Island University Hospital + Vladimir Berezniker + vberezniker&siuh.edu +11932 + The Wolf Group + Angel R. Rivera + angel&wolf.com +11933 + University of Alberta + James Woodward + jaw2&ualberta.ca +11934 + Stadtsparkasse Köln + Achim Krisinger + Achim.Krisinger&sk-koeln.de +11935 + Josuya Technology Corporation + Danya Kim + admin&josuya.com +11936 + Amadeus NMC Holding, Inc. + Hostmaster + hostmaster&amadeusboston.com +11937 + ArcSight, Inc. + Hector Aguilar + haguilar&arcsight.com +11938 + Cominet + Youster Park + ykpark&cominet.co.kr +11939 + Electronic Direct, Inc. + Gregory Karges + karges&edirect.ca +11940 + Medical Coding and Compliance Solutions, L.L.C. + Karlen Bailie + doc&flashcode.com +11941 + Mssu + Ming-Shan Su + mssu&ou.edu +11942 + Muhen Software + Akira Hangai + akira&hangai.net +11943 + Net Partnets Sp. z o.o. + Marek Turczyniak + marek&roztocze.com.pl +11944 + Paulmichl NetWork's + Juergen Paulmichl + juergen.paulmichl&gmx.net +11945 + q-station.net + Chris Leung + chris&q-station.net +11946 + Quanta Network Systems Inc. + Ted Yu + ted.yu&qnstw.com +11947 + Stowarzyszenie FREEZE NET ASK + Marek Turczyniak + marek&roztocze.com.pl +11948 + Syracuse University + Bonnie Dunay + badunay&syr.edu +11949 + Ukrsotsbank + Alexander Khokhlov + mailer&cbsd.donetsk.ua +11950 + VivoCom Inc. + Ned Krtolica + ned&vivocom.com +11951 + VTechnologies Sàrl + Messerli Reynald + info&vtechnologies.ch +11952 + Zeta Broadband Inc + Kyungsik Kim + david.kim&zetabroadband.com +11953 + ZUMtOBEL AG + Martin Eberle + martin.eberle&zumtobel.com +11954 + Elegant solutions consulting Inc. + Atsushi Ikeda + aikeda&elegant.ca +11955 + BeiJing NetPower TECHNOLOGIES Inc. + LiLian + gnailil&ihw.com.cn +11956 + Mobigen Co., Ltd. + Lee Myung gue + myung&mobigen.com +11957 + ITC + Serge L. Fidorichev + serko&bignet.ru +11958 + Premonitia, Inc + Jonathan Rodin + jrodin&premonitia.com +11959 + Coyote Rock + Terrell Gilliland + terrell&coyoterock.com +11960 + Downlode Media + Earle Martin + earle&downlode.org +11961 + LARC-USP + Fernando Redigolo + fernando&larc.usp.br +11962 + Nevala + Jarno Nevala + jarno.nevala&iki.fi +11963 + Propel + Steve Strnage + sjs&propel.com +11964 + Technical Chamber of Greece + Nikos Panagiotopoulos + net¢ral.tee.gr +11965 + Thomas Monitor Systems + Thomas Jensen + tje&thomsystems.com +11966 + Alstom Power Sweden AB + Marten Martensson + marten.martensson&power.alstom.com +11967 + MLP AG + Joachim Koch + joachim.koch&mlp-ag.com +11968 + SymLABS + Antonio Navarro + antonio&symlabs.com +11969 + Systech Retail Systems Inc. + Chris Smith + csmith&srspos.com +11970 + Watch4Net Solutions + Louis-Philippe Reid + info&watch4net.com +11971 + Peter-Service Ltd. + Aleksei A. Alekseenko + alex&billng.ru +11972 + TAIS, Inc. + Tim Derby + tim.derby&tais.com +11973 + North Central Collge + Mike Quintero + helpdesk&noctrl.edu +11974 + alpha-CNS GmbH + Daniel Kliche + iana&alpha-cns.com +11975 + ALPHA SYSTEMS + Masayuki Tsuchida + tsuchima&alpha.co.jp +11976 + AT&T Labs Intelligent Network Analysis + Darryl Parker + parkerd&att.com +11977 + Backwoods Communications, LLC + Jeremy McNamara + jj&indie.org +11978 + Boxcar Media + Joshua Nye + josh&boxcarmedia.com +11979 + BrainStorm Business Resources, Inc. + Felix E Quevedo + fquevedo&columbus.rr.com +11980 + Bravenet Web Services, Inc + Brad Knorr + brad&bravenet.com +11981 + CAC Ltd + James Cominsky + jim.cominsky&gmail.com +11982 + chiz.net + Paul Chizzo + paul&chiz.net +11983 + CollectiveBid Systems Inc. + Jack Leong + jleong&collectivebid.com +11984 + Content Management AG + Babak Vahedipour-Kunze + snmpec&cm-ag.de +11985 + Conysis S.A. + Ricardo Cárdenes + ricardo&conysis.com +11986 + CreekPath Systems + Craig Reed + reed&creekpath.com +11987 + Dakos data & communication co., Ltd. + Lee Yong Seob + yslee&dakos.net +11988 + diCarta Inc. + Yoav Eilat + yeilat&dicarta.com +11989 + DME Corporation + Michael Caulfield + mike.caulfield&dmecorp.net +11990 + gadgeteer.org + Allen Smith + allen&gadgeteer.org +11991 + Greenpeace + Derek Au + hotline&greenpeace.org +11992 + HeliXense Pte Ltd + Yusdi Santoso + ysantoso&helixense.com +11993 + Hilbert Computing, Inc. + Gary Murphy + glm&hilbertinc.com +11994 + Hubert Krause + Hubert Krause + hukrause&gmx.de +11995 + Hybrid Solutions, Inc. + Ken Breault + ken-breault&hybridsolutionsinc.net +11996 + IBSN + PC Drew + drewpc&ibsncentral.com +11997 + iKnowTheAnswer.com + Mark Mellman + mellman&iknowtheanswer.com +11998 + JSC Desarrollos y Aplicaciones + José Eduardo Mohedano + jemohedano&jscda.com +11999 + KFKI Systems + Miklos Nemeth + mnemeth&kfkisystems.com +12000 + Ki Consulting & Solutions AB + Pekka Rossi + pekka.rossi&kiconsulting.se +12001 + Lakefield Telecom, Inc. + Nicholas Lemberger + nickl&lakefield.net +12002 + Lemur Networks, Inc. + Ryan Moats + rmoats&lemurnetworks.net +12003 + luna8 Ltd. + Sven Huster + sven.huster&luna8.com +12004 + MCK Advance Technologies + Michael Chan + michaelchan&telus.net +12005 + MediaGS, Ltd. + Robert Posel + posel&d-d.si +12006 + N.C.H. spa Network Computer House + Alessandro Riaudo + alessandro.riaudo&nch.it +12007 + NAMS + Iftah Bratspiess + iftah&web-silicon.com +12008 + Pacher + Christian Pacher + mib-011226&pacher.com +12009 + Regenstrief Institute for Health Care + Gunther Schadow + gschadow®enstrief.org +12010 + Schnackelchen + Carsten Schaedlich + carsten&schnackelchen.de +12011 + SEAGULL + Wico van Helden + wvanhelden&seagull.nl +12012 + Seoul National University of Technology + Kil-Hung Lee + khlee&duck.snut.ac.kr +12013 + Softworks Group Inc. + Dewane Woodley + dewane_woodley&yahoo.com +12014 + Studio of Arts And Sciences + Roger Buck + rog&powernet.nsw.gov.au +12015 + TGA Technologies, Inc. + James Dabbs + jdabbs&tga.com +12016 + CY Cergy Paris Université + Benoit Clavel + disi-infra&ml.u-cergy.fr +12017 + VeratNET + Boris Manojlovic + steki&verat.net +12018 + Vivanet Inc. + Aiman Sawan + aiman.sawan&vivanet.qc.ca +12019 + waterford.org + Aslam Ebrahim + rmovva&waterford.org +12020 + Webware Technologies Pvt. Ltd. + Rajanikant Chirmade + rajanikant&webwaretech.com +12021 + Wesley WorldWide Solutions, Inc + Wesley Ye + wesleyye&yahoo.com +12022 + BRECIS Communications + Terry Donahue + tld&brecis.com +12023 + Unassigned + ---none--- + ---none--- +12024 + Miraesys Co., Ltd + Haeil Kim + hikim&miraesys.com +12025 + C-EISA + Wooseung Kim + kimws&c-eisa.com +12026 + Proactive Java Consultancy LLP + Mr Dyer + flicker&anduin.net +12027 + AccelaNET + Emily Backes + oid-contact&accela.net +12028 + AirWave Wireless, Inc. + Jason Luther + jason&airwave.com +12029 + Altawave Inc. + John Rose + jrose&altawave.com +12030 + ANAM Wireless Internet Solutions + Paul Keogh + paul.keogh&anam.com +12031 + BitStream Logic, Inc + HwaJoon Kim + otime&bitstreamlogic.com +12032 + Booz Allen Hamilton + Kenya Dorsey + dorsey_kenya&bah.com +12033 + Checkmate Management + Anthony Hurry + anthony_hurry&checkmate.co.uk +12034 + Czech Certification Authority s.r.o. + Miroslav Hrad + hrad&czechca.cz +12035 + EBJ Consulting + Michael Steinberg + steinmj&swbell.net +12036 + École normale supérieure - Paris + Alain Fréhel + alain.frehel&ens.fr +12037 + Euronet Worldwide, Inc. + Kenneth Vrana + kvrana&euronetworldwide.com +12038 + everyhost.com + Mark A. Hershberger + mah&everyhost.com +12039 + Exalead S.A. + Patrice Bertin + contact&exalead.com +12040 + gull house + Gary Ogasawara + garyogasawara&yahoo.com +12041 + HTL Braunau + Auer Andreas + aauer1&gmx.at +12042 + Infosys Corporation + John McGloughlin + johnmac&infosyscorp.com +12043 + Jasmap Inc. + Sean Yang + sean&jasmap.com +12044 + Lakesoft Consulting & ISP + Jonathan A. Laub + laubj&lakesoft.net +12045 + Medfusion, Inc + Vikram Natarajan + vikram&medfusion.net +12046 + Metro Information Concepts as + Andreas Lydersen + al&metroic.no +12047 + Meylan Consulting + Volnys Borges Bernal + volnys&uol.com.br +12048 + Millennium Microwave Corporation + Daniel G. Peters + danielpµwave2000.com +12049 + NetPlanetEarth + Gregory Vovk + gregory&planetsky.com +12050 + Ohman IT Consult + Johan Fredrik Ohman + johanfo&ohman.no +12051 + Onix Electronic Systems, LLC + David George + david&harktech.com +12052 + RCN Telecom + Dex Patel + dex.patel&rcn.net +12053 + Skjaerlund Software + Lars Skjaerlund + lars&skjaerlund.dk +12054 + Special Applied Intelligence Corporation + Fritz Kunstler + fritz&specialai.com +12055 + SUDAC + Johan Andersson + crew&sudac.org +12056 + Tadiran Communications + Svyatoslav Ivantsiv + slava&tadcomm.com +12057 + TE Data + Taymour El Erian + t.elerian&gega.net +12058 + The YAK Corporation + Ebbe Jonsson + ebx&y2.keilaranta.net +12059 + Digital Appliance Corp. + Bob Ellis + bob.ellis&dig-app.com +12060 + TruePosition, Inc. + Mike Amarosa + mamarosa&trueposition.com +12061 + University of Western Ontario + Ed Gibson + egibson&uwo.ca +12062 + User Interface Technologies Ltd. + Niall Mansfield + snmpreg&uit.co.uk +12063 + VeriCell, Inc. + Brian W. Spolarich + bspolarich&nephrostherapeutics.com +12064 + Zurich Scudder Investments, Inc + Avi Deitcher + avi_deitcher&scudder.com +12065 + APATON + Antonio Paton + apatonpariente&hotmail.com +12066 + Daedalus Networks + Paris A. Zafiris + saphire&daedalusnetworks.com +12067 + Pajo Networks + Alexander G. Paoli + alex.paoli&pajo.com +12068 + Wroc³aw Centre of Networking and Supercomputing + Józef Janyszek + tomasz.kowal&pwr.wroc.pl +12069 + Corpex Internet GmbH + Philipp Gaschuetz + admin&corpex.de +12070 + WARCO + Ryan Rounkles + rrounkles&warco.com +12071 + @Home Japan Co., Ltd. + Yusuke Mineno + minenoy&home.ad.jp +12072 + Acrowave Systems Co., Ltd + Hyung-Yeol Yoon + hryoon&acrowave.com +12073 + Aer Novo + Bryan J. Kolodziej + bryan&aernovo.com +12074 + Alison Associates + Garry Argrave + Garry.Argrave&alisonassociates.com +12075 + beckhaus consulting + Markus P. Beckhaus + markus&beckhaus.com +12076 + Bonware + Markus Urponen + markus.urponen&bonware.com +12077 + Broadmind Research Corporation + Chad Attermann + chad&broadmind.com +12078 + Circle24 Technologies + Narendra Desirazu + narendra&circle24.com +12079 + dotmaudot + Maurizio Codogno + dot.maudot&gmail.com +12080 + e-peopleserve + Neal Brett + neal.brett&e-peopleserve.com +12081 + Estic & Partners bv + ing. R.E. Brouwer + brouwer&estic.com +12082 + European Computer Telecoms AG + Hamish Barney + hamish.barney&ect-telecoms.de +12083 + Getronics Solutions Italia S.p.A. + Oronzo Berlen + oronzo.berlen&getronics.com +12084 + HighDeal + Christophe Trompette + christophe.trompette&highdeal.com +12085 + KR Consulting + Richard Dilbone + rdilbone&krcee.com +12086 + MKAPIUM + Khairul Anuar Mohammad + mkapium&idigi.net.my +12087 + netdirectory.org + Marc Giannoni + marcgiannoni&earthlink.net +12088 + Newdreamnet Co. Ltd. + Dae Young Choi + dychoi&newdreamnet.com +12089 + o3sis Infomation Technology AG + Hajo Dieterich + sysadmins&o3sis.com +12090 + RCS (formerly 'Prophet Systems') + Scott Gudgel + sgudgel&rcsworks.com +12091 + Real FS Inc. + Sohail Husain + suheyl&attglobal.net +12092 + Rotech Healthcare Inc. + Matthew Pike + mpike&rotech.com +12093 + Severoceska energetika, a.s. + Martin Horak + horakm&mail.sce.cz +12094 + Supélec Campus de Metz + Patrick Mercier + patrick.mercier&supelec.fr +12095 + Telenor Satellite Broadcasting + Kyrre Simonsen + kyrre.simonsen&telenor.com +12096 + Telic Communications, Inc. + Chad Attermann + chad&telic.net +12097 + UZA - Universitair Ziekenhuis Antwerpen + Peter Praet + peter.praet&uza.uia.ac.be +12098 + VooDoo X Solutions + Ben Maynard + bmaynard&voodoox.net +12099 + Woollen Services + David Woollen + woollen&computer.org +12100 + XecureNexus + Sang Young Oh + Syoh&xecurenexus.com +12101 + YCOM SA + Alexandre Ghisoli + support&ycom.ch +12102 + Airport Authority Hong Kong + Kenny Hui + huik&hkairport.com +12103 + 4Tier Software + Gregoire Huet + gregh&4tier.com +12104 + Asis Ltd. + David Wright + wright&asis.co.il +12105 + BISC + Junxiao Song + sunknight&371.net +12106 + BIPOP-CARIRE spa + Alessandro Casati + alessandro_casati&bipop.it +12107 + Caw Networks + Sanjay Raja + sraja&caw.com +12108 + Concordant + John Croft + john&concordant.co.uk +12109 + IT Service Omikron GmbH + Reindl Sergej + reindl&itso.de +12110 + Lifeway Christian resources + Benny Hauk + benny.hauk&lifeway.com +12111 + Mark Schnur Consulting + Mark Schnur + mkschnur&pacbell.net +12112 + NORSYS + Thomas Leveque + tleveque&norsys.fr +12113 + Zox, Inc. + LingYan Zhao + zhao_lingyan&yahoo.com +12114 + noris network AG + Ingo Kraupa + info&noris.net +12115 + Philips MP4Net + Noam Hoffer + noam.hoffer&philips.com +12116 + Advanced Reality + Derek Ruths + druths&advancedreality.com +12117 + Atmel Nantes + Laurent Menu + laurent.menu&nto.atmel.com +12118 + Centrata Inc. + Philippe Fajeau + philippe¢rata.com +12119 + City of Mesquite Texas + Stan Markham + smarkham&ci.mesquite.tx.us +12120 + Defense Supply center Richmond (DSCR) + David Huet + dhuet&dscr.dla.mil +12121 + Fastnet Communications + Martyn Roberts + martyn.a.roberts&bt.com +12122 + Henny Penny Corp. + Tim A. Landwehr + tlandwehr&hennypenny.com +12123 + iProcess Solutions + Roberto Dominguez + rdominguez&iprocesssolutions.com +12124 + Isilon Ststems + David Dunham + dwdunham&isilon.com +12125 + Mediavirtuel + Eriam Schaffter + eriam.schaffter&wanadoo.fr +12126 + Mirafor Associates Oy + Rainer Ketola + rainer.ketola&mirafor.com +12127 + Rambold + Christoph Rambold + mail&rambold.com +12128 + RoDoGu Consulting + Roberto Dominguez + rodogu&acm.org +12129 + Safeweb Ltda + Luiz Carlos Zancanella Junior + junior&safeweb.com.br +12130 + SciQuest, Inc + Alfred Iacovella + aiacovella&sciquest.com +12131 + Siegfried AG + Sven Ahrens + accounts.io&siegfried.ch +12132 + Simutronics Corporation + Jim Miller + admin&simutronics.com +12133 + Talking Blocks, Inc. + Tim Hall + tim.hall&talkingblocks.com +12134 + Tradetrans Inc + Justin Stevens + justins&pwi.com +12135 + Unique Broadbank Systems + Michael Kang + michaelk&uniquesys.com +12136 + Universidad Nacional de San Luis + Fernando Aversa + aversa&unsl.edu.ar +12137 + Universite de Paris-Sorbonne Paris IV + Thierry Besancon + Thierry.Besancon&paris4.sorbonne.fr +12138 + Laboratório de Sistemas Distribuídos /CEFET-PR + Fábio Henrique Flesch + fabio&lasd.cefetpr.br +12139 + Iowa Outdoors + Tony Bibbs + tony&iowaoutdoors.org +12140 + ALCOMA, Ltd + ing.Vaclav Vacek + vaclav.vacek&alcoma.cz +12141 + Aloes + Yohann Fourteau + yohann.fourteau&wanadoo.fr +12142 + bob corbett networks + Bob Corbett + bob&bobcorbett.net +12143 + Chrono-Logic + Tony Bibbs + tony&tonybibbs.com +12144 + Consumer Contact + Liam Cunningham + liam&consumercontact.com +12145 + Core Integrated Tecnologies, Ltd. + Eric S. Stickel + eric_stickel&coreinttech.com +12146 + SFT-Service (formerly 'Dialog.SFT-Service') + Vitaly Prtosko + villy&sft.ru +12147 + Die Weltregierung + Martin Schobert + martin&weltregierung.de +12148 + ELTEK Energy AS + Harald Johnsen + harald.johnsen&eltekenergy.com +12149 + Enalur S.A. + Pablo Alsina + noc&multitel.com.uy +12150 + Finmatica S.p.A. + Giuseppe Cattaneo + g.cattaneo&finmatica.com +12151 + Fyrplus AB + Marcus Andersson + maran&fyrplus.se +12152 + Guru Labs, L.C. + Dax Kelson + dax&gurulabs.com +12153 + HPCinteractive + Eric Buckhalt + ebuckhalt&hpcpub.com +12154 + IKEA IT AB + Anders Östling + anders.ostling&neurope.ikea.com +12155 + Innovay Inc. + Soonmyung Hong + sonnet&innovay.com +12156 + JOINT-STOCK COMPANY"STC CONTACT" + Andrey Kuptsov + kontakt&bn.by +12157 + KC Micro Specialists, Inc. + Kirk Deweese + hostmasterµ.com +12158 + Mikros Kosmos + Fotis Georgatos + Fotis.Georgatos&mikroskosmos.com +12159 + M-TEC N.V. + Koen Aerts + koen.aerts&mtecbroadband.com +12160 + NetCommplete Pty Lyd + Scott Muller + scott&netcommplete.com.au +12161 + New World Telephone Ltd + Ray Fung + ray.fung&newworldtel.com +12162 + Orsus Solutions + Alex Grivnin + alexg&orsus.com +12163 + Quebeber.com + Adolfo Gomez + dkmaster&punto-g.com +12164 + Ringling School of Art and Design + Glen Shere + gshere&ringling.edu +12165 + SANTIN E ASSOCIATI SRL + Massimo Santin + msantin&santineassociati.com +12166 + ScanPlus GmbH + Konrad Naumann + domains&scanplus.de +12167 + Sequitur Systems + Chip Yamasaki + chip&seqsys.com +12168 + server2000 internet technology gmbh + Server2000 Tech Role + tech&server2000.at +12169 + SHENZHEN MODERN COMPUTER MANUFACTER Co.,Ltd + DuJin + duj&mcm.com.cn +12170 + SZCOM Broadband Network Technology Co.,Ltd + Xia Ke Qing + xiakq&szcom.com.cn +12171 + Tony Bibbs + Tony Bibbs + tony&tonybibbs.com +12172 + Transparent Systems + John Zoetebier + john.zoetebier&transparent.co.nz +12173 + UNICC + Ray Compton + compton&unicc.org +12174 + Consolidated Freightways + Bob Pfingsten + pfingsten.robert&cf.com +12175 + Frontier Internet, Inc. + Charlie Watts + network&frontier.net +12176 + Canon IT Solutions Inc. (formerly 'Sumitomo Metal System Solutions Co.,Ltd') + Toyoji Mano + mano.toyoji&canon-its.co.jp +12177 + Henkel AG & Co. KGaA + Simon Kubanski + simon.kubanski&henkel.com +12178 + 3va.net + A van Drie + admin&3va.net +12179 + Alacris Inc. + Conrad Bayer + cbayer&alacris.com +12180 + ALI Technologies Inc + Marcel Sutanto + msutanto&alitech.com +12181 + Allegheny Energy Global Markets + Stephen Degler + sdegler&aeglobalmarkets.com +12182 + Airbus Space and Defence (formerly 'Astrium (UK)') + Kevin Rees + kevin.a.rees&airbus.com +12183 + Baldwin Hackett & Meeks, Inc. + Scott Boje + scott.boje&bhmi.com +12184 + Bamsco + Bill Petersen + unix_support&bigfoot.com +12185 + Ben Nederland BV + Arjan Landsmeer + arjan.landsmeer&ben.nl +12186 + Compagnon AG + Jörg Puschmann + jpuschmann&compagnon.net +12187 + Direct Solutions + Olivier Gaillard + ogaillard&directsolution.com +12188 + GBase Communications + James Chen + chenj&gbasecom.com +12189 + Holoplex Technologies, Inc. + Laurence Moindrot + laurence&holoplex.com +12190 + in medias res + Peter Kuehl + kuehl&in-medias-res.com +12191 + Ins-sure Services Limited + Andy Edwards + andy.edwards&ins-sure.com +12192 + Iron Hill Technology + Wm Terry Stoneman + stonemwt&ironhilltech.com +12193 + Leapstone Systems, Inc + Rajan Annadurai + rannadurai&leapstone.com +12194 + MSS Communications Design GmbH + Alexander Buchfink + ab&mss-medien.de +12195 + Noicom S.p.A. + Claudio Russo + c.russo&noicom.it +12196 + one4net GmbH + Simon Roach + simon.roach&one4net.com +12197 + ProvisionSoft + Govin Varadarajan + gvaradarajan&cmgion.com +12198 + RINET + Pam Christman + pchristman&ride.ri.net +12199 + SAGE SRL + Emilio Desimoni + edesi&sage.com.ar +12200 + Seneca Groep B.V. + Rick Bakker + rabakker&seneca.nl +12201 + University Health Network + Geoff Hallford + geoff.hallford&uhn.ca +12202 + UP3I + Christian Sack + christian.sack&ops.de +12203 + VTT Information Technology + Kari Seppanen + kari.seppanen&vtt.fi +12204 + DevonIT + Dan Melomedman + dan&devonitnet.com +12205 + Allianz Ireland PLC + Roy Madden + roy.madden&allianz.ie +12206 + QinetiQ Ltd + Adrian Jones + acjones&qinetiq.com +12207 + Acme Solutions + Massimiliano Liccardo + liccardo&acmesolutions.it +12208 + actina AG + Steffen Tschirpke + st.tschirpke&actina.de +12209 + ASTOR-PROMOCJA Poland + Aleksander Slominski + as10m&hotmail.com +12210 + Bauer Verlagsgruppe + Jens Wettermann + jwettermann&hbv.de +12211 + Biogem + Gianluca Busiello + busiello&biogem.it +12212 + BlueHaven Services Ltd + Sherman G. Havens + havens&bluehavenltd.com +12213 + CAMIS Inc. + Gavin Bee + gavin.bee&camis.com +12214 + Covenant Retirement Communities + Dan Brant + dpbrant&covenantretirement.org +12215 + CUETS + Daryle Niedermayer + niederda&cuets.ca +12216 + DGT-LAB + Mariusz Piontas + mariusz&dgt-lab.com.pl +12217 + Ecole Polytechnique de Montreal + Richard Labrie + richard.labrie&polymtl.ca +12218 + Glückert, Machelett & Partner + Mirko Zeibig + zeibig&gum.de +12219 + Hamilton and Sullivan, Ltd. + Rick Seavey + rick.seavey&hsltd.com +12220 + Institute of Environment and Development + Wei He + hewei&ied.org.cn +12221 + ITWM + Christian Peter + peter&itm.fhg.de +12222 + Kawatetsu Systems, Inc. + Hiroyuki Kawakami + kawakami&kawatetsu-systems.com +12223 + Kinetics Fluid Systems + Joe Tuazon + jtuazon&kineticsgroup.com +12224 + Knightnet + Julian Knight + oid&knightnet.org.uk +12225 + Korea Embedded Linux Laboratory + Jackson Kang + ohmylove&mail.co.kr +12226 + Max-Planck-Institut fuer Festkoerperforschung + Michael Wanitschek + m.wanitschek&fkf.mpg.de +12227 + Naumen + Eugene Prigorodov + eprigorodov&naumen.ru +12228 + Niedermayer Systems + Daryle Niedermayer + daryle&gpfn.ca +12229 + Nirvana Research + PEN Administrator + nirvana&got.net +12230 + odahoda.de + Benjamin Niemann + pink&odahoda.de +12231 + OpenAdvice IT Services GmbH + Christian Port + info&openadvice.de +12232 + Order N Ltd. + Mark Burton + markb&ordern.com +12233 + OZ Communications, Inc. + Jean-Marc Serre + jean-marc.serre&nokia.com +12234 + ProGuy.dk + Paul Fleischer + proguy&proguy.dk +12235 + Roanoke College + James R. Dalton + dalton&roanoke.edu +12236 + Sena Technologies, Inc. + InSik Um + uis&sena.com +12237 + SILOGIX + Pierre Chicourrat + Pierre_Chicourrat&silogix-fr.com +12238 + Spumoni + Scott McCrory + scott&mccrory.us +12239 + Symsoft AB + Jonas Wallenius + jonas&symsoft.se +12240 + System Solutions + Brendan Bosman + brendan.bosman&syssol.ie +12241 + Szkola Glowna Handlowa + Piotr Kucharski + chopin+iana&sgh.waw.pl +12242 + teamnet GmbH + Joerg Michels + michels&teamnet.de +12243 + Vislink PLC + Lalith Panditharatne + lalith&continental-microwave.co.uk +12244 + Celestica Corporation + Chris Watson + clwatson&celestica.com +12245 + Elvin + David Arnold + davida&pobox.com +12246 + Scottish Qualifications Authority + Jonathan McCalla + jonathan.mccalla&sqa.org.uk +12247 + IDP + Pascal Bazin + bazin&idp.fr +12248 + Availix SA + Hugo Delchini + hugo.delchini&availix.fr +12249 + Escola Universitaria Politècnica de Manresa + Ramon Navarro Bosch + ramon&eupm.upc.es +12250 + Alosys SpA + Roberto Micarelli + r.micarelli&alosys.it +12251 + Avalon Net Ltd + Samuel Levin + samuel&avalon-net.co.il +12252 + BAKU Construction office + Nobuhito Ozaki + kozaki&po.ntts.co.jp +12253 + CN Solutions, LLC + Christopher Audley + audley&cnsolutionsllc.com +12254 + Compubahn, Inc + Ajit Sowdas + asowdas&compubahn.com +12255 + Connecticut Telephone + Michael Klatsky + mklatsky&cttel.com +12256 + Covere + Stepehen Cawley + stephen.cawley&cna.com +12257 + Escosoft Technologies + Mamta Swaroop Sharan + mamta_sharan&escosoft-tech.com +12258 + European Technology Consultants + Ben Preston + ben.preston&etc.co.uk +12259 + Forum des Images + Bunel Stéphane + admin&forumdesimages.net +12260 + Global Telecom, Inc. + Nicholas Kass + shadow>i.net +12261 + GlobalCenter + Travis Fitch + travis&globalcenter.net.au +12262 + Gnosys S.A.R.L + Abdallah Deeb + abdallahdeeb&yahoo.com +12263 + HDFCBANK + Rehman S H + s.h.rehman&hdfcbank.com +12264 + Imagine-IT + Sebastian Schoenwetter + seb&imagine-it.be +12265 + Indraweb + Michael Hoey + mhoey&indraweb.com +12266 + ircd + Remi Malnar + krome&unit.cc +12267 + Johns Hopkins Applied Physics Laboratory + Robert Sealock + Robert.Sealock&jhuapl.edu +12268 + Liberty Communication Services, Inc. + Daniel M O'Brien + dmobrien&lcsi.net +12269 + Maybaum + Johannes Maybaum + gjm&pryde.2053.net +12270 + Novar + Marc Griffin + marc_griffin&novarcontrols.com +12271 + Pruthvi Soft + Pruthvi + pruthvihp&yahoo.com +12272 + Sacred Bytes + Michael Wiesner + mw&sacred-bytes.com +12273 + State of Minnesota + Shari Plumb + shari.plumb&state.mn.us +12274 + Universite Joseph Fourier - Grenoble 1 + Jacques Eudes + Jacques.Eudes&ujf-grenoble.fr +12275 + University of Southampton + Cédric Devivier + c.devivier&soton.ac.uk +12276 + F5 Networks Inc + Ravi Natarajan + r.natarajan&f5.com +12277 + VarySys Technologies GmbH & Co. KG + Frank Eberle + technik&varysys.com +12278 + VTX Technologie + Joseph Toledano + toledano&vtx.ch +12279 + Warp Link GmbH + Denti Luca + admin&warplink.ch +12280 + Western Institute of technology at Taranaki + Ian Armstrong + i.armstrong&witt.ac.nz +12281 + Yumemi, Inc. + Taiyo Watanabe + taiyo&yumemi.co.jp +12282 + Crew Co.,Ltd + Osamu Yokozeki + hiko&cmc.crew.ne.jp +12283 + PLANET internet commerce GmbH + Sebastian Lehn + net&planet-ic.de +12284 + IMAX NETWORKS (SHENZHEN) Ltd. + William Zhang + imax&imaxnetworks.com.cn +12285 + Acclamation Systems, Inc + Ryan Frantz + rfrantz&acclamation.com +12286 + Issaqua + Marcus Vogt + mgvogt&bigpond.com +12287 + UltiSat, Inc. + Christoher Ovale + christopher.ovale&ultisat.com +12288 + Andern Research Labs + Alexander Guy + a7r&andern.org +12289 + Beeweeb Srl + Giampiero Recco + g.recco&beeweeb.com +12290 + Celestica International Inc. + C. K. Lung + cklung&celestica.com +12291 + Cellular Specialties, Inc. + Don Mills + dmills&cellularspecialties.com +12292 + cindercat + Robert Wozniak + wozniak&hist.umn.edu +12293 + Digital Insight + James Zappia + james.zappia&digitalinsight.com +12294 + Emblaze Systems + Orit Baharav - Yaron + orit.baharav&emblaze.com +12295 + ERCIST + Gong Wen + gongwen&ercist.iscas.ac.cn +12296 + IDDEX Corp + Andrew Quale + quale&iddex.com +12297 + Independent Storage Corporation + Carl Madison + carlm&indstorage.com +12298 + kinge & Co limited + Kinge Paul + kinge&t-online.de +12299 + KTG, Inc. + Chris Hauck + chauck&temptrakwireless.com +12300 + Mennonite.net + Michael Sherer + msherer&goshen.edu +12301 + Mountain Visions P/L + Robert Foster + rfoster&mountainvisions.com.au +12302 + netikos spa + Alessandro Sinibaldi + alessandro.sinibaldi&netikos.com +12303 + New Bridges + Marcel Witt + witt&newbridges.nl +12304 + Next Advisors + Martin Fisher + mfisher&nextadvisors.com +12305 + PetraSync Inc. + Pete McCormick + pete&statspack.com +12306 + PreWorkX (Pty) Ltd. + Andre du Toit + andre&preworx.com +12307 + Red Wagon Solutions Incorporated + Kelvin Cookshaw + webreg-iana&redwagonsolutions.com +12308 + SecLab + Ahmad Mohammad Shafiee + shafiee&hadid.sharif.edu +12309 + self-indulgence.org + Metta Moore + metta_moore&yahoo.com.au +12310 + Simple Access Inc. + Mark Silverman + mark.silverman&simple-access.com +12311 + Strategic Service Alliance + Cory J. Hill + coryh&ssal.com +12312 + Taika Technologies, Ltd + Ilpo Ruotsalainen + lonewolf&taikatech.com +12313 + Tattersalls + Marcus Vogt + mgvogt&tattersalls.com.au +12314 + Universidade Federal de Campina Grande + Jacques Philippe Sauve + jacques&dsc.ufcg.edu.br +12315 + WorldWithoutWire.com + Adam Montague + am&wwow.ca +12316 + Chuo Electroplaters' Cooperative Asscosiation + Sotaro Hikosaka + hiko&cmc.crew.ne.jp +12317 + GL-Trade + Jean-Paul Lemaire + jplemaire&gltrade.fr +12318 + Loomis Chaffee School + Jesse Cooner + jesse_cooner&loomis.org +12319 + HEIWAKOGYO + Sotaro Hikosaka + hiko&cmc.crew.ne.jp +12320 + Aspen Systems, Inc. + Steve Spring + steves&aspsys.com +12321 + Bure Equity + Support + support&bure.se +12322 + Delta Information & Communication + Hyuck Han + auriga&netsgo.com +12323 + Department of Computer Science, Indiana University + Dennis Gannon + aslom&cs.indiana.edu +12324 + Deutsche Lebens-Rettungs-Gesellschaft e.V. + Reiner Keller + keller&dlrg.de +12325 + Fraunhofer FOKUS + Björn Riemer + iana-pen&fokus.fraunhofer.de +12326 + Hermes Precisa Australia + Mario Veronese + mariov&hpa.com.au +12327 + kjsanders professional consulting engineers + Keith J Sanders + kjsanders&kjsanders.com +12328 + LawsIT Pty Ltd + Dean Law + dean&presto.net.au +12329 + Panasonic OWL + Chris Holman + chrish&owl.co.uk +12330 + plan42 gmbh + Christian Lotz + cl&plan42.de +12331 + Presto Pty Ltd + Dean Law + dean&presto.net.au +12332 + Primus Telecommunications Group, Inc. + Darshan Kaler + dkaler&primustel.com +12333 + TE / TM-tiimi + Matti Kukkonen + matti.kukkonen&tietoenator.com +12334 + universal communication platform gmbh + Mag. Martin Sperl + martin.sperl&ucpag.com +12335 + University of York + Gavin Atkinson + gavin.atkinson&york.ac.uk +12336 + BuyWays + Jaap Bouma + jaap&buyways.nl +12337 + Broad band network center of Tsinghua University + Lin Guangguo + lgg01&mails.tsinghua.edu.cn +12338 + Netcore Network Technology + Gao Qi + huq&netcoretec.com +12339 + Empowered Networks + Caleb Dods + caleb.dods&empowerednetworks.com +12340 + Net Facilities Group + Paul J Stevens + beheer&nfg.nl +12341 + NOVOMODO Inc. + Bob Dulude + bob&novomodo.com +12342 + Pivia, Inc. + Christopher Small + csmall&pivia.com +12343 + SK Access Devices + Gerri Koerner + gerri.koerner&skaccess.com +12344 + Teros + Charlton Sun + csun&teros.com +12345 + VWB Group + Joris Mak + j.mak&vwbintermedical.nl +12346 + Hagero + Per Hagero + per&hagero.nu +12347 + Amalgamated Systems, LLC + Ron Chinn + ron&amalgasys.com +12348 + American Radio Relay League + Jon Bloom + jbloom&arrl.org +12349 + AT Consultancy + Ad Thiers + ad.thiers&atconsultancy.nl +12350 + ATMEL Hellas, S.A.n + Effie Kirkou + ekirkou&patras.atmel.com +12351 + Barking Cow + Jeff Hackert + jchack&barkingcow.com +12352 + Central Data Services + Tyson Boellstorff + t.boellstorff&gwccnet.com +12353 + Clariden Bank + Manuel Hilty + manuel.hilty&clariden.com +12354 + Colbourn Associates Ltd + Charles Colbourn + charles&colbourn.net +12355 + CTI PET Systems, Inc + Ellen Pearson + ellen.pearson&cpspet.com +12356 + Fortinet, Inc. + Michael Xie + mxie&fortinet.com +12357 + Generic Media, Inc. + Michael Kellner + m&genericmedia.com +12358 + Imagemedical + Gabor Horvath + ghorvath&imagemedical.com +12359 + Indigo Tango Ltd. + Ewan Ferguson + ewan&indigotango.com +12360 + Instituto Español de Informática y Derecho + Luis Rodriguez + lmr&ieid.org +12361 + KGEx.com Co., Ltd + Josan Lin + josanl&kgex.com.tw +12362 + Lombard Odier & Cie + Marc BERNEY + m.berney&lombardodier.com +12363 + net-labs Systemhaus GmbH + Ulrich Eck + ueck&net-labs.de +12364 + Newbreak LLC + José Vicente Núñez Zuleta + josevnz&newbreak.com +12365 + NTT COMWARE CORPORATION + Keiichi Sasaki + sasaki.keiichi&nttcom.co.jp +12366 + Pharmacia Corporation + David Ploch + david.w.ploch&pharmacia.com +12367 + QUICK Corp. + Kazuma Fujimi + fujimi&quick.co.jp +12368 + Raesemann Enterprises, Inc. + Robert Raesemann + rob_raesemann&yahoo.com +12369 + Röhrs Stahl- und Metallbau GmbH & Co.KG + Thomas Wieckhorst + T.Wieckhorst&roehrs-soltau.de +12370 + SARD Communications Ltd. + Angelina Markovic + ninam&bitsyu.net +12371 + Scrutiny, Inc. + Dave Duchesneau + dave&scrutiny.com +12372 + TeraCloud Corporation + Frank Snow + fsnow&tcloud.com +12373 + Tetrapod Communications, Inc. + Robert McKay + robert&tetrapod.com +12374 + Texas State Credit Company + Jason Tessmann + jtessmann&e-tscc.com +12375 + Universal Access, Inc. + Jacob McNutt + jmcnutt&universalaccess.net +12376 + University of Technology Dresden, Department of Computer Science + Stefan Koepsell + sk13&inf.tu-dresden.de +12377 + Vivendi Telecom Hungary + Tripolszky Zsolt + tripy&mail.vnet.hu +12378 + WellThot Inc. + Mark Schwenk + mas&wellthot.com +12379 + Wingra Technologies + Tomas Willis + tomas.willis&wingra.com +12380 + WiWo Support + M.F. van Dorp + m.vandorp&wiwo.nl +12381 + Worldcom Asia Pacific + Glenn Gore + gore.glenn&wcom.com.au +12382 + UNIHUB GLOBAL NETWORK + Xiaochuan Wu + wuxc&unihub.net +12383 + Chrysalis-ITS + Chris Dunn + lunatech&chrysalis-its.com +12384 + Commissariat a l'Energie Atomique + Dominique Schmit + gestion-oid&cea.fr +12385 + Common Voices + Duncan Fisher + snmp&commonvoices.com +12386 + InteQ Corporation + Sushil Menghani + sushil&inteqnet.com +12387 + IPDirections + Bernard Gilles + gb&ipdirections.net +12388 + JustThe.net LLC + Steve Sobol + sjsobol&justthe.net +12389 + MacsDesign Studio + Jonathan Lew + jlwe&macsdesign.com +12390 + Microcell Telecommunications Inc. + Guy Garon + snmp.masterµcell.ca +12391 + Universiteit Utrecht + Kees van Eijden + k.vaneijden&ict.uu.nl +12392 + Welcat Inc. + Yasutomo Sone + sone&welcat.co.jp +12393 + Active Hotels + Judd Muir + judd&activehotels.com +12394 + Alvarion Ltd. + Shai Yaniv + shai.yaniv&alvarion.com +12395 + Centro de Informatica da Universidade de Coimbra + Mário José Bernardes + mjb&ci.uc.pt +12396 + Cintech Solutions + John F. Huber + jhuber&cintechsolutions.com +12397 + Gesellschaft fuer Raumbildsysteme mbH + Carsten Grohmann + iana&raumbildsysteme.de +12398 + E. O. Ospedali Galliera + Marco De Benedetto + debe&galliera.it +12399 + FIX Express Corp. + Frank Proietti + frankp&fixcorp.com +12400 + Gatea Ltd. + John Shaffer + johns&gatea.com +12401 + Germinus Solutions + Javier Fernández-Sanguino Peña + jfernandez&germinus.com +12402 + IDLSPM + Joseph Maldjian, MD + maldjian&wfubmc.edu +12403 + Imedia Semiconductor Corp. + Raju Joag + raju.joag&imedia.com +12404 + Indianguide.com + Devaki Deshpande + devaki.d&genisys-group.com +12405 + Matrics Inc. + Yuxin Jiang + yjiang&matricsrfid.com +12406 + Medinet IDG S.A. + Enrique Santure + esanture&medinet-igd.es +12407 + NiceShipping Technology Corporation + Genie Wang + genie&mail.niceshipping.com +12408 + Noordelijke Hogeschool Leeuwarden + F.J. Bosscha + f.j.bosscha&nhl.nl +12409 + Objectivity, Inc. + Ibrahim Sallam + ibrahim.sallam&objectivity.com +12410 + OPS + Mario Ivankovits + mario&ops.co.at +12411 + Plasmedia + Mark P. Eliasaputra + mark.eliasaputra&plasmedia.com +12412 + Portbridge Internet + Igor Brezac + igor&portbridge.com +12413 + Radio Frequency Systems + Jean-Christophe Detay + RFS.MIBSupport&rfsworld.com +12414 + SCRYPTO Systems + Merlin Pascal + pmerlin&scrypto.fr +12415 + SinnerSchrader AG + Arnim Fluegge + iana&sinnerschrader.com +12416 + Summit Place Ales + Jonathan Brandenburg + jonathan.brandenburg&lante.com +12417 + Tata Infotech Limited + Ajit Kunjal + ajit.kunjal&tatainfotech.com +12418 + THALES + Renaud Dubois + renaud.dubois&thalesgroup.com +12419 + YAMAHA CORPORATION + Nobuaki Ohashi + nobuaki_ohashi&gmx.yamaha.com +12420 + Yokogawa Blue Star india Ltd + Sreedhara P. + psreedhara&ybil.com +12421 + JWAY GROUP, Inc. + Tomoo Watanabe + tomoo&jway.com +12422 + The Co-operators + Richard Sizer + richard_sizer&cooperators.ca +12423 + Concord Telephone Company + Kevin Carter + kevin&ctc.net +12424 + Congregation of the Sacred Hearts + Joshua Harding + jharding&sscc.org +12425 + ConLingual + Ed Brady + ed&ebrady.net +12426 + E-Control Systems + Vincent Ferreria + vincent.ferreria&econtrolsystems.com +12427 + Equinox Engineering Ltd. + Ehren Wilson + ewilson&equinox-eng.com +12428 + Genie Information Co. + Chun-chi Wang + genie&mail.genie.cc +12429 + Interact, Inc.Software Systems + Glenn E. Riedel + glenn&iivip.com +12430 + I-Silver + Lee Blakely + leerb&isilver-inc.com +12431 + iXus Technologiedienstleistungen GmbH + Nico Lochner + offerte&ixus.de +12432 + Jippi Group Oyj + Timo Fager + timo.fager&jippiigroup.com +12433 + John Sheahan & co. + John Sheahan + security&johnsheahan.com +12434 + Knox College + Don Hickey + dhickey&knox.edu +12435 + M2 Systems Corporation + Jack Curtin + jcurtin&m2-corp.com +12436 + Navosha, Inc. + Bryce Bingham + snmp&navosha.com +12437 + Next Level Design, Inc. + Elliot Metsger + emetsger&jhu.edu +12438 + Paradigm.One Pty Ltd + Marcel Jeffermans + mjuffermans¶digmone.com.au +12439 + Ritzlmayr Consulting + Henry Ritzlmayr + h.ritzlmayr&gmx.at +12440 + Securenet Ltd + Yoram Hacohen + yoram&securenet.co.il +12441 + SELECTED HOSTING + Jack Xie + jackxh1215&yahoo.com +12442 + TechGemini Inc + James Benedict + james.benedict&rogers.com +12443 + www.TrafficShaper.com + Bob Mahar + info&trafficshaper.com +12444 + Mangolution, Inc. + Raymond Reeves + Raymond.Reeves&mangolution.com +12445 + Teradyne + Russ Luchinske + luchinsk&ttd.teradyne.com +12446 + UNPHU + Raymond Reeves + Raymond.Reeves&mangolution.com +12447 + yc consultant + Yanic Chausse + yanic68&hotmail.com +12448 + 7TH VISION BILLING COMPANY + Serge Kozhenkov + 7th_vision&softhome.net +12449 + Audiofon Chat & Play GmbH + Burkhard Cruse + cruse&audiofon.de +12450 + Australian Department of Employment and Workplace Relations + Michael Glasson + michael.glasson&dewr.gov.au +12451 + Communication Concept GmbH + Andre Mamitzsch + a.mamitzsch&ccgmbh.de +12452 + IntraConnect GmbH + Eckehart Stamer + ecki&intraconnect.de +12453 + Alaska Power & Telephone + Matthew Schumacher + matt.s&aptalaska.net +12454 + Aria Solutions Inc + Shane Boulter + sboulter&ariasolutions.com +12455 + CaCo + Carsten Ungermann + carsten.ungermann&caco.de +12456 + CertiSur S.A. + Armando Carratala + register&certisur.com +12457 + Chunghwa Telecom Laboratories + James Chen + jamescja&cht.com.tw +12458 + Dickinson Ventures, Inc. (formerly 'GranIT Solutions, LLC') + David D. Dickinson + dave&thedickinsons.net +12459 + Global Name Registry Limited + Rue Turner + rturner&gnr.com +12460 + ISG Consultoria e Informatica + Rodrigo Miranda Terra + rodrigo&isg.com.br +12461 + Jamm + Dave Dribin + dave&dribin.org +12462 + Linuxfabrik GmbH + Stefan Schuermann + Stefan.Schuermann&linuxfabrik.de +12463 + Mendosus + Fredrik Johansson + fjn&mendosus.org +12464 + MicroDowell SpA + Elio Corbolante + eliocorµdowell.com +12465 + MyKitchenTable.net + Drew Tomlinson + drew&mykitchentable.net +12466 + Netfective Technology + Sébastien Daunit + sdaunit&netfective.com +12467 + OneSecure, Inc. + Ajit Sancheti + asanchet&onesecure.com +12468 + Pixion Inc + John Heng + jheng&pixion.com +12469 + QAC - Quality Assurance Consult Ltda + Gustavo Prado + gustavo&qaconsulting.com.br +12470 + Rechberger + Reinhold Rechberger + reinhold&rechberger.org +12471 + Rocket Software, Inc + Joe Devlin + Joe.Devlin&rocketsoftware.com +12472 + ScapeVision AG + Giuseppe Alagia + alagia&scapevision.ch +12473 + UNILANG + Sebastien Michea + smichea&free.fr +12474 + WAC + Shachar Shmelthzman + shachar&wireapp.com +12475 + Webmotion Inc. + Scott McIntyre + scott.mcintyre&webmotion.com +12476 + Carmody Consulting, Inc. + Rob Davies + robdavies&intra-connect.com +12477 + NORCE Norwegian Research Centre AS + Håvard Lygre + it-ops&norceresearch.no +12478 + Maryland Department of Transportation + Jim Moore + jmoore2&mdot.state.md.us +12479 + OpenDNS Corp + Pamela Patterson + dude&opendns.biz +12480 + Allied Worldwide + Rickard Lofberg + rickard&kontorsspecial.com +12481 + Altrick + Derick van der Merwe + vdmeraj&unjisa.ac.za +12482 + Departmento Técnico + Gotor Artajona Jaime + infosec&areatec.com +12483 + Ebel + Uwe Ebel + info&koboldmaki.de +12484 + Binary Solutions + Kenneth Vestergaard Schmidt + kvs&binarysolutions.dk +12485 + WUMAG GmbH Werk Ebersbach + Holger Huebner + huebner&wumag.de +12486 + Access Grid + Robert Olson + olson&mcs.anl.gov +12487 + Adam Communication Systems International + Tim Wright + tim.wright&adamint.co.uk +12488 + Ankor Systems + Michael McManaman + mgm7&ankorsystems.com +12489 + Arizona Mail Order Co., Inc + Network Administrator + netadmin&amo.com +12490 + B.Eckstein Systems + Bernd Eckstein + be&epost.de +12491 + BondDesk + Bill Pfeifer + bpfeifer&bonddesk.com +12492 + Cameron Net + Andrew Cameron + arcameron&nishanet.com +12493 + eList eXpress LLC + James Galvin + services+oid&elistx.com +12494 + enigmatec GmbH + Frank Rogall + f.rogall&enigmatec.de +12495 + Eskata Systems, Inc. + David Liu + david&eskata.com +12496 + goSBC + Arthur M. Gallagher + Arthur.Gallagher&goSBC.com +12497 + Holborn Colleg + John Rowley-Guyon + johnrg&holborncollege.ac.uk +12498 + Infofiend + Ben Giddings + bg.misc&infofiend.com +12499 + IPB + Rui Pedro Lopes + rlopes&ipb.pt +12500 + ISRO Satellite Centre + Rajesh Kumar + rajesh&isac.ernet.in +12501 + KeysoftEnterllect + Yves Galante + yves.galante&keysoftentellect.com +12502 + METZEMIX + Stefan Metzmacher + stefan.metzmacher&metzemix.de +12503 + MYUGLYFACE COMPUTING + Brian Sullivan + bjs&myuglyface.com +12504 + Onlight + Nic Bernstein + support&onlight.com +12505 + Photonic Bridges Inc. + Harrison Zou + hzou&photonicbridges.com +12506 + Pinpoint Networks, Inc. + Frans N. Benders + benders&pinpoint.com +12507 + PROFIS SA + Matei Conovici + cmatei&profis.ro +12508 + Scardine & Lopes Ltda + Paulo Luiz Scardine Silva + paulo&psi.com.br +12509 + SecureONE + Andrew Hatfield + support&secureone.com.au +12510 + Slackworks + Frans N. Benders + xac&slackworks.com +12511 + Telesudeste Celular + Fabio Douek + fabio.douek&telefonicacelular.com.br +12512 + University of Utah College of Mines & Earth Sciences + Daniel Trentman + trentman&mines.utah.edu +12513 + XiteMedia I.S. V.O.F. + E.W. Groot + egbert&xitemedia.nl +12514 + Abbott Laboratories + Michael H. Buselli + mib-admin&gprd.abbott.com +12515 + ORGA Kartensysteme GmbH + Syam Prasad Srikakulam + sprasad&orga.com +12516 + ARL HomeCommunications Sdn Bhd + Zon Hisham Bin Zainal Abidin + zon&homecomm.net.my +12517 + MAN Nutzfahrzeuge AG + Hans Sostak + hans-peter>sostak&mn.man.de +12518 + University of Mainz + Markus Wagner + wagner&imsd.uni-mainz.de +12519 + Activa3 + Frederic Alsina + falsina&activa3.net +12520 + ASYRES + Sébastien Durand + sdurand&asyres.fr +12521 + Avacue Ltd + Stephen Wong + admin&avacue.com +12522 + California State University, San Bernardino + Javier Torner + jtorner&csusb.edu +12523 + Credit Union Central of Manitoba + Randy Jung + randy.jung&creditunion.mb.ca +12524 + D.I.B. + Kim Yoon-Hwa + reno923&dib.net +12525 + Echostar Solutions + Aaron Anderson + aaron&echostar.ca +12526 + Groupe Eyrolles + Hussein Metdaoui + hmetdaoui&eyrolles.com +12527 + HOPF Elektronik GmbH + Ruprecht & Partner OEG + office&rdcs.at +12528 + PureSight Ltd + Royi Cohen + IANA&puresight.com +12529 + IDG Communications Ltd + Juha Saarinen + juha_saarinen&idg.co.nz +12530 + Informatikdienste, ETH Zuerich + DAvid McLaughlin + mclaughlin&id.ethz.ch +12531 + James K. Fegan Consulting + James K. Fegan + jkf82933&pegasus.cc.ucf.edu +12532 + Neoteris, Inc. + Shyam Davuluru + questions&neoteris.com +12533 + NOX CO.,LTD + Takashi Ogiwara + ogiwara&nox.co.jp +12534 + NZPhone S/A + Carlos Eduardo Ribeiro do Valle + cvalle&nztech.com.br +12535 + Schmidt & Co. (HK) Ltd. + Charles Luk + charlesluk&schmidtelectronics.com +12536 + SIMSAM Consulting Limited + Simon Barnes + simon.barnes&simsamconsulting.co.uk +12537 + Spider Eye Studios + Gary Wisniewski + iana_org&gw.spidereye.com +12538 + Telkom + Peter Brooks + peter_brooks&hp.com +12539 + The Palantir Corporation + Eric R. Medley + emedley&xylocore.com +12540 + T-Systems International + Michael Schaefer + mschaefer&t-systems.com +12541 + Universidad de Carabobo + Jose Dinuncio + jdinunci&uc.edu.ve +12542 + UNIZETO Sp. z o.o. + Krzysztof Cieplucha + kcieplucha&unizeto.pl +12543 + Virgil BV + Mark van Leeuwen + mark.van.leeuwen&virgil.nl +12544 + AEGON Nederland N.V. + Arthur Wijnschenk + awijnschenk&aegon.nl +12545 + is Industrial Services AG + Holger Gehrmann + holger.gehrmann&is-ag.com +12546 + VerySmartPeople Inc + Joop Cousteau + golf&hushmail.com +12547 + Principle Inc. + Shahriar Aghajani + aghajani&principle.com +12548 + HSH Nordbank AG + Thilo Franz + thilo.franz&hsh-nordbank.de +12549 + Air Navigation Services (ANS) of the Czech Republic + Otakar Liska + oldi&ans.cz +12550 + Association for Multidiscipline Education in the Health Sciences + Donald MacDougall + dmacdoug&usc.edu +12551 + CE+T + Greindl Jacques + j.greindl&cet.be +12552 + Corporation of Balcultha + Alan Milligan + alan&balclutha.org +12553 + Dispuut Meteoor + Michiel Kool + kool&meteoor.net +12554 + Ecclesias.net + Donald MacDougall + dmacdoug&usc.edu +12555 + Eduro Technologies, Inc. + Bill Gibbs + bgibbs&edurotech.com +12556 + EmSol LLC + Martin Green + martin_green&emsolllc.com +12557 + GuruMeditations.org + Jefferson Hunt + jhunt&gurumeditations.org +12558 + icoserve information technologies GmbH. + Markus Gspan + m.gspan&icoserve.com +12559 + INRIA + LDAP Administrator + ldap-admin&inria.fr +12560 + InterEpoch Technology, Inc. + Yuh-Rong Leu + yrleu&interepoch.com.tw +12561 + Khonraad Systems Engineering BV + A.J.G.H.M. Khonraad + postmaster&khonraad.nl +12562 + Marcom Department Shanghai Mitac Research Co. Ltd. + VL Zheng + zhengvl&mic.com.tw +12563 + megatel GmbH + Bernd Schuette + admin&megatel.de +12564 + Natverket + Magnus Beutner + beutner&algonet.se +12565 + Navaho Networks Inc + Russell Chan + russ&navahonetworks.com +12566 + NIKoil Investment Banking Group (joint stock bank) + Igor N. Kalinskov + kal_in&nikoil.ru +12567 + One5 Corporation + David W. Lewis + dwlewis&one5.com +12568 + Ranger Computers + Alan James + alan&rangercom.com +12569 + Rigel Corporation + Helen Emery + emery&rigelcorp.com +12570 + S.I.C.E. s.r.l. + Valentina Lomi + info&sicetelecom.it +12571 + Symnetics Holdings + Ralph Wong + ralph88&netvigator.com +12572 + TCD Enterprise, Inc. + Tuan Ho + TQH&onebox.com +12573 + Telonor Business Solutions + Bjorn NOrdbo + bn&nextra.com +12574 + TMH The Marilyn House srl + Massimiliano Sellari + maxe&tmh.it +12575 + UIC + Hubert Cheng + hubertch&uniform.com.tw +12576 + Unidirect + Samuel Gautier + s.gautier&unidirect.fr +12577 + Unique Interactive + Tom Gray + tom.gray&unique.com +12578 + Viola Systems + Jari Lahti + jari.lahti&violasystems.com +12579 + ZIGZAG Internet Service Provider + Jaroslaw Ajdukiewicz + jajdukiewicz&zab.zigzag.pl +12580 + ZIRION NETWORKS, Inc. + Takakazu Morioka + morioka&zirion.co.jp +12581 + Naruhodo Solutions + Glenn Connell + glennconnell&hotmail.com +12582 + Primerica Financial Services + Glenn Drawdy + glenn.drawdy&primerica.com +12583 + University of California, Irvine Information and Computer Science Department + Hans Wunsch + support&ics.uci.edu +12584 + Butler + R. Scott Butler + butlerrscott&mindspring.com +12585 + Alstom Holdings + John Bruylant + john.bruylant&chq.alstom.com +12586 + Manono Software, Inc. (formerly 'ApiaTech Information Technologies, Inc.') + Scott Rich + scott&manonosoft.com +12587 + 430am + Al Fleming + al&430am.com +12588 + Blueice Research AB + Thomas Holmstrom + thomas&blueiceresearch.com +12589 + Celestial Software LLC + Bill Campbell + bill_ld&celestial.com +12590 + Changi International Airport Services Pte Ltd + Jason Zhang Xiao + jason&cias.com.sg +12591 + Corus group plc. + Peter Kelder + peter.kelder&corusgroup.com +12592 + Dycec + Juan Luis López Blázquez + jllopez&dycec.com +12593 + EKL Solutions LLC + Oguzhan Eris + eris&ekls.com +12594 + Elma Ingenierie Informatique + Jean-Samuel REYNAUD + jsreynaud&elma.fr +12595 + EMS Enterprise Messaging Solutions GmbH + Armin Schlueter + armin.schlueter&emsgmbh.de +12596 + JOUVE S.A. + Erwan Gaugain + egaugain&jouve.fr +12597 + Lightwave Communications + Martin Green + marting&lantronix.com +12598 + Net Vista + Ashish Bhandari + abhandari&netvista.net +12599 + Ordyn Electronic Systems Private Limited + Sreeraj S + sreeraj&ordyn.com +12600 + Peramon Technology Ltd + Ivor Davies + ivor.davies&peramon.com +12601 + Reed College + Ben Poliakoff + benp&reed.edu +12602 + RooNetworks Inc. + Kevin Bergner + kevin&roonetworks.com +12603 + ShadowSupport, Inc. + Steve Tulloh + stulloh&shadowsupport.com +12604 + Spatial Wireless + Venkat Kadali + vkadali&spatialwireless.com +12605 + SHOM + Bruno Treguier + bruno.treguier&shom.fr +12606 + MBB Gelma GmbH + Christoph Müller + christoph.mueller&mbb-gelma.de +12607 + Advance Fiber Optics Inc + Gerald Kelly + gkelly&advancefiber.com +12608 + Ahapala + Keith Nasman + keith&ahapala.net +12609 + Ascella Technologies + Amith Varghese + amith.varghese&ascellatech.com +12610 + Aspen Business Logic Computing + Gabor Ivan + ivang&freemail.hu +12611 + Atlantique Software + Mario Matos + mario.matos&mail.com +12612 + Barco BCI + Tom De Man + tom.deman&barco.com +12613 + BroadJump + Gary Caldwell + gcaldwell&broadjump.com +12614 + CiRBA Inc. + Riyaz Somani + rsomani&cirba.com +12615 + Craig F. Donlan + Craig Donlan + donlan&acm.org +12616 + DataCorp + Lei Wei + leiwei99&yahoo.com +12617 + Espedair Systems + Jonathan King + jon.king&alphawest6.com.au +12618 + EZHi Technologies, Inc. + Kaiji Chen + kaiji_chen&ezhi.com +12619 + UFRGS + Lisandro Zambenedetti Granville + granville&inf.ufrgs.br +12620 + Future System Consulting Corp. + Tooru Kusakari + kusakari.tohru&future.co.jp +12621 + GE Medical System Information Technologies SW GmbH & Co.KG + Stefanie Rempfer + stefanie.rempfer&med.ge.com +12622 + Devicescape Software, Inc. + Bhavani Devi RG + bhavani&devicescape.com +12623 + IRCI S.A. + Philippe Auphelle + pauphelle&irci.fr +12624 + Kennesaw State University + Karen Driscoll + sysop&ksumail.kennesaw.edu +12625 + LabBook, Inc + Steve Roggenkamp + steve.roggenkamp&labbook.com +12626 + Ministry of Education, Singapore + V.N. Naidu + narayanasamy_v_naidu&moe.gov.sg +12627 + Morgan Stanley S.V.S.A + Jesus Martinez + jmrubio&msdw.es +12628 + Movielink + Calvin Poon + calvin.poon&movielink.com +12629 + MSE + Rick Heile + rick.heile&compaq.com +12630 + m-solutions Ltd. + Alex Lam + alexlam&msolutions.com.hk +12631 + Nets AG + Lars Schluckebier + lars.schluckebier&nets-ag.de +12632 + Objekt Management + Martin Roob + martin&objekt-management.de +12633 + Oceanbay Internet Services + Gary Graves + oceanbay&oceanbay.com +12634 + PICMG + Richard Somes + richard.somes&fci.com +12635 + Redsonic, Inc. + Kirk Murrin + kmurrin&redsonic.com +12636 + Scripps College + Jeffrey D Sessler + jeff&scrippscollege.edu +12637 + Shazam Entertainments ltd + Tony Miller + tony.miller&shazamteam.com +12638 + Siennax + Mark Ruijter + mark.ruijter&siennax.com +12639 + Supercable + Gustavo Garcia + ggarcia&supercable.net.co +12640 + The Houston-Harris County Immunization Registry + Bryan Tang + bytang&texaschildrenshospital.org +12641 + TRIBUNAL REGIONAL DO TRABALHO da 10ª REGIÃO + Gilberto Sena Rios + gsrios&trt10.gov.br +12642 + University of Southern Mississippi + David J. Sliman + david.sliman&usm.edu +12643 + UTI Systems S.A. + Victor Dumitrescu + victor.dumitrescu&uti.ro +12644 + VISAA + Manuguru Dhayaleeswaran Vijay + vijay_dhayal&yahoo.com +12645 + Vodafone Information Technology and Technology Management + Kevin Greene + kevin.greene&vodafone-us.com +12646 + WideBand Corporation + Roger E. Billings + billings&wband.com +12647 + Lulea University of Technology + Mattias Pantzare + pantzer&dc.luth.se +12648 + Systek Information Technology Ltd. + Alex Chan + alex.chan&systekit.com +12649 + Utfors AB + Rasmus Aveskogh + rasmus.aveskogh&utfors.se +12650 + Administrative Management Group, Inc. + Abhay Chitre + abhay.chitre&amgusa.com +12651 + Alaska Department of Labor + Paul Hegg + paul_hegg&labor.state.ak.us +12652 + Eivind Olsen Datakonsulent + Eivind Olsen + eivind&gluping.no +12653 + Armitel + Kevin Chae + kevinchae&armitel.com +12654 + ATH system + Josef Horak + horak&ath.cz +12655 + Utimaco IS GmbH (formerly 'exceet Secure Solutions GmbH' and 'AuthentiDate International AG') + Pierre Kraschinski + pierre.kraschinski&utimaco.com +12656 + Avest Plc. + Maxim Kostyshin + max_kostyshin&avest.org +12657 + Birmé Consulting + Jonas Birmé + jonas&birme.se +12658 + Blue Star Sustainable Technologies Inc. + Michael Branch + unix_wizard&yahoo.com +12659 + Brazier & Welch + Bill Welch + bill&brazier.com +12660 + CacheWare, Inc. + Stephen McHenry + stephen&cacheware.com +12661 + Caroline Chisholm Catholic College + Mathew McKernan + m.mckernan&cccc.vic.edu.au +12662 + CHM Inc. + John McDonald + jmcdonald&chm.net +12663 + DocFi + Daniel Moore + dan&docfi.com +12664 + DotCall + Joseph Shanley + jshanley&link.net +12665 + Enfotec, Inc. + Bob Bova + iana&enfotec.net +12666 + Etagon + Yahav Nussbaum + yahav_nussbaum&etagon.com +12667 + ETERE + Fabio Gattari + fabio.gattari&etere.com +12668 + Garuda Networks + Jackie Huen + jackie&garudanetworks.com +12669 + Hanenkamp.com + Andrew Sterling Hanenkamp + sterling&hanenkamp.com +12670 + Indigo Development Corp. + Eric Zidovec + ericz&indigodevelop.com +12671 + Institut fuer System-Management + Mathias Ohlerich + moh&landhaus-k.de +12672 + isMobile AB + Goran Lowkrantz + goran.lowkrantz&ismobile.com +12673 + iTelco Communications, Inc. + Luke Peacock + lpeacock&i-telco.com +12674 + Kaon Interactive Inc + Kevin Russo + russo&kaon.com +12675 + Linux Unlimited, LLC + Scott Sharkey + ssharkey&linuxunlimited.com +12676 + Logical + Kylie MacFadzean + kylie.macfadzean&au.logical.com +12677 + Mackeeg, Inc. + Jim Meincke + jmeincke&yahoo.com +12678 + Nankai University Chuangyuan Information Technologies Co., Ltd. + Hite Lee + lihaitao&itec.com.cn +12679 + NextFuture + Rink Springer + rink&ikuu.org +12680 + Objective Dynamics Ltd + Michael Baker + michael_baker&objectivedynamics.co.uk +12681 + Parker Abex NWL + Pete Miller + pjmiller&parker.com +12682 + PC Info Solutions + Mathew Locke + mlglobal99&hotmail.com +12683 + PreCache Inc. + Roger Leng + rleng&precache.com +12684 + Quantum Solutions + John Gormley + gormley&qsolutions.com +12685 + SCS Engineers + Jerry Keene + jkeene&scsengineers.com +12686 + SDS GmbH + Jens Burger + burger&sds-hagen.de +12687 + SiliconLogic Ltd. + Mark Duszyk + mduszyk&yahoo.com +12688 + Sohu.com Inc + KingYee + kingyee&sohu.com +12689 + SUPERONLINE INTERNATIONAL ONLINE INFORMATION AND COMMUNICATION SERVICES Inc. + Melih Ozdemir + melih.ozdemir&superonline.net +12690 + Technology Leaders, LLC + David T. Smith + David.Smith&Technology-Leaders.com +12691 + Terilogy Co., Ltd. + Yoichi Matsuura + ymatsu&terilogy.com +12692 + Think4You IT Services GmbH + Kim Laue + it-cgn&t4y-it.com +12693 + Ti.KOM Tirol Kommunikation + DI Stephan D'Costa + stephan.dcosta&tikom.at +12694 + UNIVERSITE PARIS-EST MARNE-LA-VALLEE + Centre de Ressources Informatiques + vishaal.golam&u-pem.fr +12695 + VS Vision Systems + Gruszka Damian + damian.gruszka&visionsystems.de +12696 + Xavier College + Anthony Sutton + anthony.sutton&xavier.sa.edu.au +12697 + YoungHome.Com, Inc + Darren Young + darren&younghome.com +12698 + AFRANET Co. Ltd,. + N. R. Neshaat + lir&afranet.com +12699 + Moy Corporation + Gang Gong Moy + gg&moy.com +12700 + UNC Health Care + John McNulty + jmcnulty&unch.unc.edu +12701 + Tuxnology + Craig Spurgeon + tuxnology&quiknet.com +12702 + 724 Solutions Inc + Andrew Schofield + andrew.schofield&bluewin.ch +12703 + Acelet + Wei Jiang + wei_jiang&acelet.com +12704 + ALLTEL Corporation + Timothy S Hoffman + Timothy.S.Hoffman&alltel.com +12705 + BAX Global + Raed Nashef + rnashef&baxglobal.com +12706 + BYTERAGE, INC + Marshall Sorenson + marshall&byterage.net +12707 + Connect Systems Inc + Gregory D. Newton + gregn&connectsys.com +12708 + CyberLancet Corporation + H. Jeff Tsai + jefft&cyberlancet.com +12709 + Davies, Inc. + Chris Davies + mcd&daviesinc.com +12710 + Dedicated Hosting Services + Sean O'Brien + sean&dhsmail.com +12711 + Experian Limited + Jonathan Deeming + jon.deeming&experian.com +12712 + Four Corners Telecommunications Corp + Ray Moore + rcm&fourcornerstelecom.com +12713 + HighSpeed Surfing + Soo-Hang Ryu + soohang.ryu&highspeedsurfing.com +12714 + Ikosaeder + Andreas Winderl + winderl&ikosaeder.de +12715 + Indian Institute of Technology, Bombay + G Sivakumar + siva&iitb.ac.in +12716 + Infologigruppen Norr AB + Goran Lowkrantz + goran.lowkrantz&infologigruppen.se +12717 + InterSAN, Inc + Barry H. Feild + barry&intersan.net +12718 + Rheinische Friedrich -Wilhelms-Universitaet Bonn + Irina Neerfeld + oid&uni-bonn.de +12719 + Open Software Services, LLC + Joe Koberg + joe&opensoftwareservices.com +12720 + Reliant Resources + Dylan Clark + dsclark&reliant.com +12721 + Rendition Networks Incorporated + Ajay Gummadi + ajay&renditionnetworks.com +12722 + Rite Aid Corporation + Steven P. Swartz + sswartz&riteaid.com +12723 + Seisint Inc. + Glenn Puchtel + gpuchtel&seisint.com +12724 + Sensinova AB + Hans Kalldal + hans&sensinova.se +12725 + Santa Clara County + Jimmy Liang + jimmy.liang&isd.sccgov.org +12726 + Sudoeste Serviços de Telemática Ltda + Antonio Dias + hostmaster&sst.com.br +12727 + Supervise Network + Pierrick Simier + pierrick.simier&supervise-network.fr +12728 + SYSWILL + Kwon Soon-Hong + shkwon&syswill.com +12729 + Be Shaping The Future - Performance, Transformation, Digital GmbH + Martin Hans + info&be-tse.de +12730 + TECSys Development, LP + Clyde T. Poole + c_poole&tditx.com +12731 + UTI Systems S.A. + Mihai Ianciu + mihai.ianciu&uti.ro +12732 + VoIP Group Inc. + Marcelo Garbarino + mgarbarino&voipgroup.com +12733 + Savvius, a Live Action Company + Jay Botelho + jbotelho&savvius.com +12734 + Northrop Grumman Information Systems + Ron Smudz + ron.smudz&ngc.com +12735 + VFSPIH + Dirk Dons + dirk.dons&vlafo.be +12736 + Elo & Mahout & Co + Nicholas Wehrmann + mahut42&t-online.de +12737 + Algenib Software, Inc. + Gerald Hewes + gerald&algenibsoft.com +12738 + BMW of North America + Robert Brown + robert.brown&bmwna.com +12739 + dgstar co., Ltd. + Sungmo Yang + smyang&dgstar.co.kr +12740 + EqualLogic + Irina Suconick + irina&equallogic.com +12741 + ErgoIntegration AS + Atle S. Ruud + atle.ruud&ergo.no +12742 + HomeConcept + Pascal Roulin + info&homeconcept.ch +12743 + Integrated Service Lab + Pojen Hsiao + pojen&guva.com +12744 + Nakisa Inc. + Romeo de Leon + rdeleon&nakisa.com +12745 + Nordisk Språkteknologi AS + Lars-Petter Helland + lars-petter.helland&nst.as +12746 + Spinoza Technology Inc. + Josh Doubleday + josh&sp1n.com +12747 + Teamsoft Inc. + Dany Ayotte + ayotte&teamsoft.com +12748 + Telenor ASA + Jan Egil Andreassen + jan-egil.andreassen&telenor.com +12749 + UTA Telekom AG + Heinz Ekker + heinz.ekker&uta.at +12750 + xi'an huahai medical info-tech co.,ltd. + DingYuqi + dyq&huahai.com.cn +12751 + e-chiceros + Javier Moreno + ciberado&hotmail.com +12752 + Acta Technology, Inc. + Matthew Rabuzzi + matt.rabuzzi&acta.com +12753 + Biomedical Informatics Laboratory + Daniel J. Valentino + dvalentino&mednet.ucla.edu +12754 + bTurtle + S. Aeschbacher + s.aeschbacher&bturtle.ch +12755 + Cup 2000 spa + Federico Calo + federico.calo&cup2000.it +12756 + Delta Networks, Inc. + James Liu + james.sh.liu&delta.com.tw +12757 + eCubeNet.com + Atsushi Hosono + hosono&ecubenet.com +12758 + ExPet Technologies, Inc. + Young II Kwak + ssen76&expet.co.kr +12759 + Garban Intercapital + Kenneth Cheung + kenny.cheung&us.icap.com +12760 + h00.org + Frank Hollmann + frank.hollmann&epost.de +12761 + Hipbone, Inc. + Robert Snedegar + rws&hipbone.com +12762 + iMpacct Technology Corp. + Taisan Yang + taisan&impacct.com.tw +12763 + ISK Systems Ltd + Ian King + isking&ntlworld.com +12764 + MEDIAOCEAN + Ross Rankin + ross&mediaocean.com +12765 + Mintera Corp. + Steve Bolton + steve.bolton&mintera.com +12766 + Mobixell Networks Inc. + Asher Besserglick + asherb&mobixell.com +12767 + National Computer Helpdesk + John Harris + John.Harris&NationalComputerHelpdesk.com.au +12768 + NaviSite Inc + David Yee + dyee&navisite.com +12769 + netvigator.com + Spencer KL Wong + kwok-leung.wong&pccw.com +12770 + Nyttab Ab / SurfNet + Artur Signell + info&surfnet.fi +12771 + Palfrader + Peter Palfrader + iana&palfrader.org +12772 + PZU Zycie SA + Pawel Witan + p.witan&pzuzycie.com.pl +12773 + Resource Center + Scott Gifford + sgifford&suspectclass.com +12774 + Sengent, Inc. + Paul Denninger + pdenninger&sengent.com +12775 + Shiron Satellite Communications Ltd. + Salit Drobiner + salitd&shiron.com +12776 + The Box IT + Patrick Bosson + patrick&thebox-it.com +12777 + TNG - the net generation GmbH + Carsten Tolkmit + ctolkmit&tng.de +12778 + TranTech, Inc. + Roy Roebuck + roy_roebuck&trantech-inc.com +12779 + University of Vermont + Jim Lawson + root&uvm.edu +12780 + VOLKTEK Corporation + James Chen + jameschen&volktek.com.tw +12781 + Progeny Linux Systems + John Daily + jdaily&progeny.com +12782 + Rost+Oberndoerfer Consulting GbR + Christian Rost + cr&rocon.de +12783 + University of Maine System + Wayne T. Smith + wts&maine.edu +12784 + Teamware Group AB + Jonas Israelsson + jis&teamware.se +12785 + Amacis Ltd + Kevin Mooney + kevin_mooney&amacis.com +12786 + Automatos, Inc. + Agostinho Villela + villela&automatos.com +12787 + AxxessAnywhere + Greger V. Teigre + greger&connectivitypartners.com +12788 + BladeLogic Inc + Balaji Srinivasa + balaji&bladelogic.com +12789 + Centre for Integrated Computer Systems Research + Jennifer Guo + jguo&mss.cicsr.ubc.ca +12790 + CESKY TELECOM a.s. - Imaginet o.z. + Martin Pala + martin.pala&hq.iol.cz +12791 + Data & Control Systems Ltd + Kozakis Dyonisis + noc&dcs.com.gr +12792 + Embarcadero Technologies, Inc. + Nigel Myers + nigel.myers&embarcadero.com +12793 + EUREM GmbH + Adruni Y. Ishan + ishan&eurem.de +12794 + Frank + Florian Frank + florianfrank&gmx.de +12795 + HCC Hobbynet + Ted Schipper + ted&hobby.nl +12796 + I/O Software + Matt Clements + matt&bendenajones.com +12797 + Interactive Payer Network + Stephanie Rogerson + stephanie.rogerson&interpaynet.com +12798 + Internet Kayttajat Ikuisesti - IKI ry + Timo J. Rinne + tri&iki.fi +12799 + IPv6 Research and Development + Jaco Engelbrecht + bje&ipv6rd.net +12800 + iqdoq + Ferruh Zamangoer + ferruh.zamangoer&materna.de +12801 + Linuxmagic + Michael Spyra + m.spyra&danzas-euronet.de +12802 + Mathcom Solutions Inc + Steve Sullivan + sullivan&mathcom.com +12803 + Multitask Consultoria Ltda + Sergio Fischer + fischer&multitasknet.com.br +12804 + National Library of Australia + Mark Corbould + mcorbould&nla.gov.au +12805 + Notnet Webhosting + Theo Zourzouvillys + theo¬net.co.uk +12806 + Peter Stamfest + Peter Stamfest + peter&stamfest.com +12807 + Rabbit Semiconductor + Stephan Hardy + shardy&zworld.com +12808 + S2IO Technologies Corp. + Leonid Grossman + leonid.grossman&s2io.com +12809 + San Diego Unified School District + Erik Pitti + epitti&sandi.net +12810 + Saratov State University + Paul P Komkoff Jr + i&stingr.net +12811 + SatService GmbH + Michael Ulbricht + mu&satservicegmbh.de +12812 + Trustwave Holdings, Inc. + OIDAdmin + OIDAdmin&trustwave.com +12813 + Speedwise Technologies + Ofer Yachimovitz + ofery&speedwise.com +12814 + Staatliche Studienakademie Leipzig + Ingolf Brunner + ingolf.brunner&ba-leipzig.de +12815 + Terraspring, Inc. + Benjamin Stoltz + stoltz&terraspring.com +12816 + Universität Trier + Helmut Steffes + steffes&uni-trier.de +12817 + Z-World Incorporated + Joel Baumert + jbaumert&zworld.com +12818 + The Key Centre for Human Factors and Applied Cognitive Psychology + Sarah Hollings + sysadmin&humanfactors.uq.edu.au +12819 + Banque Generale du Luxembourg + Roland Schoenauen + roland.schoenauen&bgl.lu +12820 + Electrum Information Technology Co.Ltd + Cao WeiWei + vickycao&163.net +12821 + e-Pie Entertainment & Technology Corporation(Beijing) + Tony Wang + wty&epiegame.com +12822 + icu + Lee Kye Chan + cagers96&hanmail.net +12823 + AMIT, Inc. + Rupert Li + iana-pen.mp&amit.com.tw +12824 + AndTeK GmbH + Stefan Grossberger + sgrossberger&andtek.com +12825 + ATC Information Services + Joel McCarty + jmccarty&atcis.com +12826 + AXL Performance Solutions Ltd + Steve Jepps + stevej&axl.co.uk +12827 + Barco, Inc. + Elias Chibli + elias.chibli&barco.com +12828 + Blue Heron Biotechnology Inc + IT Systems + systems&blueheronbio.com +12829 + Brady Motor Sports + Tom Brady + tjbrady&webtv.net +12830 + Central Florida Main + Christopher Clai + contiga&mpinet.net +12831 + Codemark + Neil Godfrey + neil&codemark.com +12832 + COUNCIL OF EUROPE + Samuel Chaboisseau + samuel.chabolisseau&coe.int +12833 + Covansys + Bryan Youdan + byoudan&covansys.com +12834 + CPE + Pierre Chifflier + chifflier&cpe.fr +12835 + Digitrust - Certificadora Notarial SA + Jose Luiz Brandao + brandao&digitrust.com.br +12836 + E-Advies + Emile van Bergen + pec&e-advies.info +12837 + Polytech'Lille + Xavier REDON + Xavier.Redon&polytech-lille.fr +12838 + Exis Srl + Andrea Trabucco + atrabucco&exis.it +12839 + German Gutierrez + German Gutierrez + errare_est&yahoo.com +12840 + Griffin Plaza Partners, LLC + Edwin Culp + eculp&encontacto.net +12841 + Interactive TKO Inc. + John Michelsen + john&itko.com +12842 + Mediso Ltd. + Zsolt Hegyi + hegyizs&mediso.hu +12843 + MIPTelecom + Hyuck Kun Choi + chk0705&miptel.com +12844 + Northern Michigan University + John Marra + jmarra&nmu.edu +12845 + Oak Lawn School District 229 + Jason Williams + jwilliams&olchs.org +12846 + ObjectCode + Bodo Junlgas + junglas&objectcode.de +12847 + Orincon Corporation + Meng Ngov + mngov&orincon.com +12848 + Saddleback College + Mark Sierakowski + msierakowsk&saddleback.edu +12849 + SKYRIX Software AG + Bjoern Stierand + bs&skyrix.com +12850 + Smart Tech Consulting + Ronie Lima + ronie&smartech.com.br +12851 + Syncope Communication Systems GmbH + Carsten Jenner + cj&syncope.de +12852 + SYSPAK + Paul Pejman + paul&pejman.dk +12853 + Technical Toys Limited + Derek Mulcahy + derek&technicaltoys.net +12854 + tellion + Jongmin Kim + jmkim&tellion.com +12855 + Vastweb Technology Ltd. + Jonathan Essex + jonathan.essex&vastweb.co.uk +12856 + Verge Networks + Simon Horman + horms&verge.net.au +12857 + Williams College + Ashley Frost + ashley.w.frost&williams.edu +12858 + eNetrex Inc. + Son Dong Gi + dgson&enetrex.com +12859 + HIGHWAY.RU + Peter A. Savitch + support&highway.ru +12860 + Zaryba Ltd + Gwyn Evans + gwyn&zaryba.com +12861 + Bayerische Beamten Versicherungen + Siegfried Huber + siegfried.huber&bbv.de +12862 + NEEF LAPPCOM GmbH + Axel Maertens + axel.maertens&neeflappcom.de +12863 + AirLink Technology, Inc. + Ji Young Soo + quietly&airlinktek.com +12864 + INCOGEN, Inc + Pae Choi + pae&incogen.com +12865 + Amin Group + Bryan Dumm + bryan&bcpub.com +12866 + CardEngine Inc. + Alicia da Conceicao + alicia&engine.ca +12867 + Cedar Point Communications + Thor Kirleis + tkirleis&cedarpointcom.com +12868 + CET Technologies Pte Ltd + Francis Ngian Bang Sin + ngianbs&cet.st.com.sg +12869 + Codent Networks + George Zhao + gzhao&codentnetworks.com +12870 + Danlab Electronics A/S + Torben Machon + tm&danlab.com +12871 + die netzwerker GmbH + Alexander Bauer + abu&netzwerker.de +12872 + DTE Energy + Steven M Chegash + steven.chegash&dteenergy.com +12873 + E.C.C. sa + Stefaan A Eeckels + stefaan.eeckels&ecc.lu +12874 + EFM networks Inc. + In Kim + inkim&efm-net.com +12875 + ExperShare + Gregory S. Messer + gmesser&expershare.com +12876 + Fokker Space + Joeri Bekker + j.bekker&fokkerspace.nl +12877 + FORTIS + Jean-Paul Colard + jean-paul.colard&fortisbank.com +12878 + Griffin Network Consulting + Brett Whinnen + bmw&griffin.net.au +12879 + IDNT Integrated Digital Network Technologies + Marcus Zoller + marcus.zoller&idnt.net +12880 + kiwi interaktive medien gmbh + Edward Bradburn + bradburn&kiwi.de +12881 + KnowGeeks + James Wood + dns&knowgeeks.com +12882 + LGC Wireless + Denny Yim + dyim&lgcwireless.com +12883 + Martinsson informationssystem + Niklas Back + niklas.back&martinsson.se +12884 + Moody Bible Institute + Erik A. Widholm + noc&moody.edu +12885 + MyArtic Communities + Mika Koivisto + mika.koivisto&myartic.net +12886 + National Technical Systems + Mark Tillinghast + tillinghast&earthlink.net +12887 + Netplex AB + Anders Hagman + anders.hagman&netplex.se +12888 + Prokom Bilgisayar ve Danismanlik Hizmetleri San. ve Tic. Ltd.Sti. + Sitki Kamil Karadenizli + skk&prokom.com.tr +12889 + PTC + Jay Sargent + jsargent&ptc.com +12890 + Shorty + Antoine Benoit + antoine_benoit&gmx.it +12891 + Syndicat Interhospitalier Limousin + Alain Meinieux + alain.meinieux&sil.fr +12892 + University of Patras + Victoria Daskalou + daskalou&upatras.gr +12893 + VisioWave S.A. + Jean-Claude Michelou + jcm&visiowave.com +12894 + Wmode Inc + David Cooper + david.cooper&wmode.com +12895 + Xsetup + Mark Pavlyk + info&xsetup.de +12896 + Xtria Healthcare + Jim Lawson + jlawson&xtriahc.com +12897 + ZiLOG Inc. + Murray Baker + mbaker&zilog.com +12898 + Sinter Consulting + Chris Kringel + cmk007&hotmail.com +12899 + Visogent Technologies + William Bondy + wmb&visogent.com +12900 + ITEC + Yan Ming Zhou + zhouyanming&itec.com.cn +12901 + Institute of Software,Chinese Academy of Sciences + Zhijun Liu + zjliu&otcaix.iscas.ac.cn +12902 + Advanced Network Technology Laboratories Pte Ltd + Toh Teck Kang + tohtk&antlabs.com +12903 + Bayshore Networks, Inc. + Francis Cianfrocca + francis&tempest.com +12904 + EVault, Inc. + Raymund Estrada + domainadmins&evault.com +12905 + Indian Institute of Technology Guwahati + Gobind Gaurav Bansal + gobind&iitg.ernet.in +12906 + japc + Jose Celestino + japc&co.sapo.pt +12907 + Kansai Broadband Planning Corp. + Hisato Koyama + koyama&kansai-bb.com +12908 + Litech Systems Design + Nathan Lutchansky + n143&cornell.edu +12909 + Machine Vision Products, Inc. + Michael D. Risser + michael&visionpro.com +12910 + Next Element + René van 't Hof + rvhof&next-element.nl +12911 + Oce NV + Peter Strous + tech-c&oce.com +12912 + PeerPro + Charles Meier + cmeier&peerpro.net +12913 + Procera Networks, Inc. + Mick Hansen + mbhansen&yahoo.com +12914 + Production Robots Engineering Ltd + Martin Lafferty + martinl&prel.co.uk +12915 + Radio Free Europe/Radio Liberty, Inc. + Swetal Jariwala + jariwalas&rferl.org +12916 + Rockland Community College + Lin Young + lyoung&sunyrockland.edu +12917 + Saber e Lazer, SA + Jose Celestino + japc&co.sapo.pt +12918 + TDI- Transistor Devices, Inc + Constantino Monroy + constantino_monroy&tdipower.com +12919 + XAVi Technologies Corporation + Gavin Ko + gavin&xavi.com.tw +12920 + Xteam Software Co, ltd + Qu Jianbing + qjb&xteamlinux.com.cn +12921 + Yeaman Associates + John Yeaman + jyeaman&metrocast.net +12922 + Your Voice S.p.A. + Marco Lanzotti + marco.lanzotti&yourvoice.com +12923 + Kigyo Zanmai Corp. + Noboru Kurogouchi + kuro&zanmai.biz +12924 + Klon/Jawor Association + Jerzy Filipowicz + klon&klon.org.pl +12925 + 3PAR Data + An Lam + an.lam&3pardata.com +12926 + Anheuser-Busch + Tim O'Day + tim.oday&anheuser-busch.com +12927 + Axiom (Cambridge) Ltd + Colin McGerty + sysadmin&mvhi.com +12928 + Clarus Systems + Kevin McGowan + kevin.mcgowan&clarussystems.com +12929 + Cliff Berg + Cliff Berg + cliff.berg&digitalfocus.com +12930 + CVS Travelhost International Pty Ltd + Izak Fourie + izakf&cvs.co.za +12931 + Dreefs GmbH Schaltgeräte & Systeme + Manuel Vazquez Lamas + manuel.vazquez&dreefs.de +12932 + FESD GmbH + David Gutman + d.gutman&fesd.de +12933 + Global Orchestra + Dadure Arnaud + info&globalorchestra.com +12934 + Hunkeler AG + Claudio Schiess + c.schiess&hunkeler.ch +12935 + i3 micro technology ab + Arne Jonsson + arne.jonsson&i3micro.com +12936 + InCert Software Corporation + Ruben E. Brown + rbrown&incert.com +12937 + Infradig Systems + Andrew Davison + andrew&infradig.com +12938 + Innovative Technology Solutions, Inc. + Pete Stevenson + pete&itsits.com +12939 + Modus (Scotland) Ltd + Paul Mitchell + pmitchell&modus-scotland.co.uk +12940 + Pedestal Networks + Faye Ly + faye&pedestalnetworks.com +12941 + Press-Data + Andre Konopka + andre.konopka&presse-data.de +12942 + SANSHA ELECTRIC MFG.CO.,LTD + Masaru Nishizuka + nisizuka&sansha.co.jp +12943 + Sengena + Nicolas Dimitrijevic + n001&sengena.com +12944 + Sockeye Networks, Inc. + Bradley Dunn + bdunn&sockeye.com +12945 + SoftProject GmbH + Martin Antes + martin.antes&softproject.de +12946 + State Services Commission + Leighton Corban + leighton.corban&ssc.govt.nz +12947 + SyntheSys Secure Technologies Inc. + Hal Fitch + hfitch&synthesysusa.com +12948 + Syred data systems + Mani Muthiah + mani&syred.com +12949 + T-Online France + Eng Ming Hung + mheng&t-online.fr +12950 + Venngo Inc. + Chris Davis + chris&venngo.com +12951 + VerizonWireless + Joe Convery + joe.convery&verizonwireless.com +12952 + warpFactor Inc. + Doug Grove + grovedc&warpfactor.com +12953 + Xandros Corporation + Matt Maynard + ldap-admin&xandros.com +12954 + WM-data SDC a/s + Leif Albjerg Nielsen + lenes&wmdatasdc.dk +12955 + Metarete s.r.l. + Carlo Todeschini + tode&metarete.it +12956 + Aarhus Universitet + Allan Egesbaek + ae&adm.au.dk +12957 + BLUE MARS GmbH + Christian Bauer + christian.bauer&bluemars.de +12958 + Questerra + Thomas Endo + thomasaendo&gmail.com +12959 + am professional services + Ferdinand Hoffmann + hoffmann&s.de +12960 + Andelina Corporation + Kshemendra Paul + kshemendra&earthlink.net +12961 + Andrew Hewett Consulting + Andrew Hewett + hewett&web.de +12962 + Decru, Inc. + Nobu Fukatsu + nobu&decru.com +12963 + DIMONsoft + Masyana Besbashennaya + daemon&hotbox.ru +12964 + ftlight.net + Mattias Nordstrom + matta&ftlight.net +12965 + New Media Data Marketing, Inc. + Scott Goodman + scott&searchbc.com +12966 + Next Generation Systems, Inc. + Martin H Davis Jr + mdavis&ngs-hq.com +12967 + Petrobras - Petroleo Brasileiro S.A. + Antiogenes M. dos Santos + antiogenes&petrobras.com.br +12968 + Menno Pieters (formerly 'Stelvio') + Menno Pieters + iana-assigned-numbers&menno.pieters.cx +12969 + The Rockefeller University + Lawry Persaud + persaud&rockefeller.edu +12970 + Urban Development Co. + Mohamed Azim Ahmed + azim&udcgroup.net +12971 + bigdom.com + Eugene Ventiana + eventi&hotmail.com +12972 + Callion Electronics Co., Ltd. + Yi Zhong + zhong_yi_ta&hotmail.com +12973 + Wuhan Public Information Co.Ltd. + Shicewei + scwei&whol.com +12974 + barrigon.com + Felipe Castro + felipe.castro&wanadoo.es +12975 + brainaid + Eddie C. Dost + ecd&brainaid.de +12976 + coWlan + Philip Julius Florian Poten + philip&cowlan.at +12977 + Level II Inc. + Ron Skinner + rskinner&leveltwo.com +12978 + Michael Fromme EDV-Beratung + Michael Fromme + fromme&fromme-edv.de +12979 + Optimacy Corporation + Tom Costandine + tom.costandine&optimacy.com +12980 + Person co., Ltd. + Yamaguchi Toru + sm&zenius.co.jp +12981 + Rotterdam CS + Aad Nales + aad.nales&rotterdam-cs.com +12982 + Synergy International Ltd + Dan Richardson + dan.richardson&synergy.co.nz +12983 + TBS INTERNET + JP Donnio + tag-snmp-enterprise&tbs-internet.com +12984 + Terraplay Systems AB + Jonas Jonsson + jonas.jonsson&terraplay.com +12985 + University of St Andrews + Duncan Brannen + dbb&st-andrews.ac.uk +12986 + Verlagsgruppe Straubinger Tagblatt / Landshuter Zeitung + Roland Hebertinger + hostmaster&idowa.de +12987 + Vineyard.NET, Inc. + Eric W. Bates + iana&vineyard.net +12988 + Vistorm Limited + Rhodri Davies + mib&vistorm.com +12989 + XIP + Phil Champon + flah&phess.org +12990 + Star Communication Network Technology Co., Ltd. + WeiZhong Li + liwz&startimes.com.cn +12991 + California College of Arts & Crafts + Marvin G. Dunn + mdunn&ccac-art.edu +12992 + T&A SYSTEME GmbH + Till Bockenheimer + till.bockenheimer&ta-systeme.com +12993 + weitlandt. Communication + Klaus Stein + ks&weitlandt.com +12994 + Abisoft Ltd. + Yury Novitsky + novym&abisoft.spb.ru +12995 + Accet Network Inc + Li Wang + li.wang&accetnetwork.com +12996 + adconsys AG + Uwe Ahrendt + hostmaster&adconsys.de +12997 + Advanced Biometrics, Inc. + Alan Chedalawada + achedalawada&attbi.com +12998 + Agile Software + Francis T. Leong + francis.leong&agile.com +12999 + Altiris + Leslie Bonsteel + lbonsteel&altiris.com +13000 + Anglia Polytechnic University + I.M. Kitching + i.m.kitching&apu.ac.uk +13001 + Anuvio Technologies + Jerry Cattell + jerry.cattell&anuvio.com +13002 + Augsburg College + Brad Christ + christ&augsburg.edu +13003 + Banca I.M.I. + Alessandro Pengue + apengue&bancaimi.it +13004 + beamNet + Thomas Viehmann + beam&beamnet.de +13005 + Bitrage, Inc. + Mark Halliday + mhalliday&bitrage.com +13006 + CAE-Technik Schmitt + Nikolaus Schmitt + n.schmitt&scaet.de +13007 + California Regional Intranet, Inc. + John W Davidson + cto&cari.net +13008 + CanPrint Communications Pty. Ltd. + Matthew Horoschun + mhoroschun&canprint.com.au +13009 + Coma + Fredrik Jonson + fredrik&teamcoma.dhs.org +13010 + Computer Business Sciences Ltd. + Igor Altshul + igor&talkie.co.il +13011 + Consejo General del Notariado + Fernando Fernandez Rey + soporte¬ariado.org +13012 + Cramer Systems Limited + James Pullen + james.pullen&cramer.com +13013 + Denisowski Consulting + Paul Denisowski + pdenisowski&nc.rr.com +13014 + Dreamcode Software Inc. + Eric Bridgwater + eric&ericbridgwater.com +13015 + Eastern Electronics Co., Ltd + Ford Chen + ford-chen&mail.eec.com.tw +13016 + eMagic.com + Chris Frey + chris_frey&mgic.com +13017 + e-Solutionist Inc. + Joseph Trudeau + jtrudea&esolutionist.net +13018 + ETH Zurich, Department of Computer Science + Peter Bircher + peter.bircher&inf.ethz.ch +13019 + ETSI + Ultan Mulligan + pex&etsi.fr +13020 + Florida International University + Maria Rosa Drake + maria&fiu.edu +13021 + Halifax Cetelem Credit Ltd + Tom Bowman + tom.bowman&halifaxcetelem.com +13022 + handhelds.org + Nick Duffek + duffekn&handhelds.org +13023 + Hub2b + Arnaud Duchamp + aduchamp&hub2b.com +13024 + Imagistics International Inc. + Lee Crystal + Lee.Crystal&Imagistics.com +13025 + Inferno Labs + Geir Thomassen + snmpadmin&in.fer.no +13026 + IT Intergroup ApS + Johnny Schultz + js&itintergroup.com +13027 + IXOS Software AG + Markus Beck + markus.beck&ixos.de +13028 + Kuokoa Networks, Inc. + Ravi Dara + rdara&kuokoa.com +13029 + Managed Service Partners International + Robert Albertson + ralbertson&mspintl.com +13030 + Monitor724 Services Limited + Harman Ng + harmanng&monitor724.com +13031 + NextGen Business Solution, Inc. (previously 'neix,Inc.') + Takehiko Saito + saito_takehiko&nextgen.co.jp +13032 + Reach Technologies + Todd Cochran + tcochran&reachapps.com +13033 + DragonBoxSolar + Lou Hutchinson + louhutchinsonjr&icloud.com +13034 + SoftNet Systems, Inc. + Samuel Du + shdu&yahoo.com +13035 + Sonangol + Charles Crouch + charles.crouch&netquotient.com +13036 + Stradient, Inc. + Chih-Ang Chen + cachen&stradient.com +13037 + Subnetworx Corporation + Christopher R Straley + cstraley&subnetworx.com +13038 + TELE Greenland Inc. + John Siegstad + luk&tele.gl +13039 + Tennessee Technological University + Annette Littrell + abl&tntech.edu +13040 + The Horde Project + Charles Hagenbuch + chuck&horde.org +13041 + Think Dynamics + Gabriel Iszlai + giszlai&thinkdynamics.com +13042 + Trust Company of America + Kyle Moore + kmoore&trustamerica.com +13043 + University of Wisconsin - River Falls + Marlys Nelson + marlys.a.nelson&uwrf.edu +13044 + Unternehmensberatung Rahn + Frank Rahn + info&frank-rahn.de +13045 + Uptime Devices, Inc. + Jean-Paul Daemen + jpd&uptimedevices.com +13046 + VisionShare Inc + Amy Coulter + amy.coulter&visionshareinc.com +13047 + Welliver Enterprises + Bill Welliver + hww3&riverweb.com +13048 + Xorba, Inc. + Richard Alvarez + xorba&bellsouth.net +13049 + Yacc Labs Ltd. + Simon Standley + si&yacc.com +13050 + Ydilo Advanced Voice Solutions S.A. + Juan Luis Garcia Rodriguez + JuanLuis.Garcia&ydilo.com +13051 + MetaSolv Software, Inc. + Leslie Angus + langus&metasolv.com +13052 + Telefonbau Arthur Schwabe GmbH & Co KG + Dieter Fischer + dfischer&tas.de +13053 + TIS System Service Inc. + Tatsuo Taniguchi + guchi&tis.co.jp +13054 + Ram Corp + Garret Halpin + gar&dol.ie +13055 + ShangHai JingLun Technologies CO.,LTD. + RunBin Ma + marb.sh&jinglun.com.cn +13056 + Tai Ping Life Insurance Co, Ltd. + Xue Zhong Sheng + xuezs&tplic.com +13057 + Hays DSIA France + Gilles Lami + gilles.lami&hays-dsia.fr +13058 + National Hockey League + Grant Nodine + gnodine&nhl.com +13059 + Ahold Supermercados + Juan Antonio Ortega de Oliveira + juan-antonio.ortega&ahold.es +13060 + Airpath Wireless, Inc. + Ed Whitesell + edw&airpath.com +13061 + AlphaC srl + Alessandro Morelli + alex&alphac.it +13062 + Ascom + Bruno Vigier + bruno.vigier&ascom.fr +13063 + Cambian + Nicholas Ardlie + nardlie&cambian.com +13064 + Claymountain Solutions Oy + Mauri Sahlberg + Mauri.Sahlberg&claymountain.com +13065 + Colomsat S.A. + Moisés David Rincón D'Hoyos + admin&colomsat.net.co +13066 + Connaught Air Services + Mike Barlow + mike.barlow&cas-logistics.com +13067 + CSH + Hatano Hirokazu + tcsh&tcsh.csh.sh +13068 + CzajSOFT + Przemyslaw Wegrzyn + czajnik&czajsoft.pl +13069 + Diveo do Brasil Telecom. LTDA + Eduardo G. Coutinho + ecoutinho&diveo.net.br +13070 + Drutt Corporation + Peter Larsson + peter.larsson&drutt.com +13071 + ESDS + Vincent Negrier + vnegrier&esds.com +13072 + Gallery IP Telephony Ltd. + David Rawet + david.rawet&g-ipt.com +13073 + gate5 AG + Jan Ludewig + nnreg&gate5.de +13074 + Labyrinth Connections + Matthew Bretherton + mrb&labyrinth.net.au +13075 + My Linux ISP + Brian Walters + brian&mylinuxisp.com +13076 + MyCompany + Ivo Peksens + ivo.peksens&energo.lv +13077 + NetWolves Technologies Corporation + Steve Clark + sclark&netwolves.com +13078 + NSSoft, Inc + Neeraj Jain + nsjain2000&yahoo.com +13079 + Persay LTD + Michael Salmon + michael.salmon&persay.com +13080 + Persona Inc. + Darren Richer + dricher&personainc.ca +13081 + Pretax Systems Oy ltd + Mauri Sahlberg + mauri.sahlberg&pretax.net +13082 + Shanghai Sunrise Electronic Technology Co. Ltd. + Lizhen + lizhen&sunrise-sh.com +13083 + Signal Technology Corporation + Jeffrey Krukonis + Jeffrey.Krukonis&sigtech.com +13084 + Sonae Distribuição Brasil S.A. + Cleber De Conto Pettinelli + deconto&sonae.com.br +13085 + StreamSec + Henrick Hellstrom + henrick&streamsec.se +13086 + Yamagata Prefectural Government + Yoshikazu Hayashi + jouhou&hnc.pref.yamagata.jp +13087 + Vaelit Group. + Remi Philippe + r.philippe&vaelit.com +13088 + Adquira España + Pablo M. Perez Ayala + pmperez&adquira.com +13089 + Aristotle University of Thessaloniki, Department of Physics + Triantafillos Hantziantoniou + t.hatziantoniou&physics.auth.gr +13090 + Artifact Entertainment + Chris Gray + chrisg&artifact-entertainment.com +13091 + Bethel College and Seminary + Brent J. Nordquist + ssl-admin&bethel.edu +13092 + Cellcloud Technologies Pvt Ltd + Chirag Parekh + chirag&cellcloud.com +13093 + College of Education + Aaron D. Moss + tri&coe.missouri.edu +13094 + Contego Solutions LLC + Jason Loving + jloving&contego.net +13095 + Dalhousie University + Bruce Hudson + bruce.hudson&dal.ca +13096 + Dan of Steel + Dan Campbell + abuse&danofsteel.com +13097 + Dascom Technology Co.Ltd + Song Yantao + songyt&mail.dascom.com.cn +13098 + dpa AFX Wirtschaftsnachrichten GmbH + Thomas Fahle + technik&dpa-afx.de +13099 + Drybridge Consulting + Arthur Colman + colman&drybridge.com +13100 + fun communications GmbH + Achim Stahlberger + achim.stahlberger&fun.de +13101 + GETEDES + Chouki Aktouf + chouki.aktouf&getedes.com +13102 + Hauni Maschinenbau AG + Sven Kleinecke + sven.kleinecke&hauni.com +13103 + Ilevo AB + Lennart Johannesson + lennart.johannesson&ilevo.com +13104 + Infraservices Corporation + Brad Hoyt + brad&infraservicescorp.com +13105 + Ingenium Technology Srl + Pierangelo Repetti + pierangelo.repetti&ingeniumtech.it +13106 + JLF Network + Javier Ledesma + iana&jlf.ch +13107 + JMatica Srl + Mauro Bertapelle + mauro.bertapelle&jmatica.com +13108 + Linuxlab + electuz + el&linuxlab.co.kr +13109 + Liontech Co., Ltd. + Kwangsoo Lee + kslee&liontech.co.kr +13110 + Manageable Inc. + Jeff Hassell + jeff&b-manageable.com +13111 + mcgu.net Consulting + Stephen McGuinness + mcgu&mcgu.net +13112 + MDS Proteomics Inc. + Ken Bantoft + kbantoft&mdsp.com +13113 + MindTribe + Jerry Ryle + jerry&mindtribe.com +13114 + Nextra Investment Management SGR S.p.A. + Delli Zuani Emiliano + emilianodellizuani&nextrasgr.it +13115 + Open Connections + JetPin + jetpin&tm.net.my +13116 + P & J Systems Support Station + JetPin + jetpin&tm.net.my +13117 + Plat'Home Co.,Ltd. + Ichiro Kamiya + kamiya&plathome.co.jp +13118 + PointDx, Inc. + David Ahn + ahn&pointdx.com +13119 + RAmEx Ars Medica, Inc. + Ramesh Rampertab + ramesh.rampertab&ramex.com +13120 + RASilient Systems, Inc. + Starry Chan + sschan&rasilient.com +13121 + Signatron Technology Corporation + James Zagami + zagami&signatron.com +13122 + Software Research Associates, Inc. + Noriyuki Soda + inet-admin&sra.co.jp +13123 + Starbak Communications Inc. + Benjamin Pinkerton + pinkerton&starbak.net +13124 + Terra Marketing + Reed Jackson + reedjackson&mac.com +13125 + TEYTEL, S.A. + JUAN CARLOS FERNANDEZ GARACHANA + joancarles&sumi.es +13126 + themountain + Johan Larsson + lajo&ds.hj.se +13127 + Thomas Jefferson University Hospital + Drew Zebrowski + drew&jefferson.edu +13128 + Universe Software + Support + support&universesoftware.co.uk +13129 + UPMC + Jean-Luc Munier + ldapmaster&jussieu.fr +13130 + Nevion AS + Arne-Johan Martinsen + ajmartinsen&nevion.com +13131 + de Passievruchten + Erik P. Otto + epo1968&hotmail.com +13132 + Softing Europe S.A. + Maurizio Delmestri + maurizio.delmestri&softingeurope.com +13133 + Michelin + Nicolas Bertolami + nicolas.bertolami&FR.michelin.com +13134 + Founder Broadband Network Technology Co.,Ltd + Xiao Bing + xbing&founderbn.com +13135 + SJTeK + Moon Joon Ko + mjko317&lycos.co.kr +13136 + AlexS.DE Private Network + Alexander Sarreiter + alexs&alexs.de +13137 + Biocare sarl + Elie Feghali + biocare&inco.com.lb +13138 + Boiled Frog Trading Co-operative + Matthew Emmett + matt&emmett.ca +13139 + caribNav Holdings, LLC + Leighton Esdaille + lfesdail&yahoo.com +13140 + Cluster File Systems, Inc. + Peter J. Braam + braam&clusterfs.com +13141 + FDK CO.LTD + Kenji Hanakawa + kenji_hanakawa&fdk.co.jp +13142 + I M Consultancy Pty Ltd + Ismael Matos + imconsul&bigpond.net.au +13143 + I&SI S.p.A. + Fabrizio Rossi + frossi&isisw.com +13144 + Jakob Hatteland Computer AS + Geir Harajuvet + geir.harajuvet&hatteland.com +13145 + Klik Systems Inc. + Gersham Charles + mail&kliksys.com +13146 + Klomp + Vincent Partington + vinny&klomp.org +13147 + Leadfly Technologies Co., Ltd. + Albert Chen + albertchen&feya.com.tw +13148 + LeanLogistics + Andy Bass + andyb&leanlogistics.com +13149 + Lee Printing, Inc. + Chris Lee + cmlee&leeprinting.com +13150 + m&m consulting intl + Kevin Mork + kevin&pfs1.biz +13151 + MB Consultants + Mark Burton + mark&the-burtons.org +13152 + Octalis S.A. + Jean-Francois Gobbers + iana-oid&octalis.com +13153 + RFSAW + William C Bonner + wbonner&rfsaw.com +13154 + RPost, Inc. + Jay Cai + jcai&rpost.com +13155 + Shimanek + Joseph Shimanek + joe&shimanek.net +13156 + SwitchPoint Networks, Inc + Kevin Crandall + kcrandall&switchpointnetworks.com +13157 + Tsinghua Unisplendour Bitway Networking Technology Co.,Ltd. + Yong Chen + cy&thunis.com +13158 + Universitaet Muenster + Michael Kamp + nic&uni-muenster.de +13159 + ZF Friedrichshafen AG + Diana Hodapp + diana.hodapp&zf.com +13160 + Cerca.com S.r.l. + Aldo Armiento + aldo&cerca.com +13161 + Quattro Software Limited + Glenn Robinson + glenn.robinson&quattroconsulting.co.uk +13162 + Software Quality Engineering + Mickey Epperson + mepperson&sqe.com +13163 + Ministry of Justice, Finland + Martti Karjalainen + martti.karjalainen&om.fi +13164 + CyberSuperStore, Inc. + Nico van Niekerk + nico&cybersuperstore.com +13165 + 9105 - 1938 Québec Inc. + Durand Christophe + christophe.durand&videotron.ca +13166 + Acamar Systems + Cheng-Lee Nee + cnee&acamarsystems.com +13167 + Oeko.neT Mueller & Brandt + Toni Mueller + support&oeko.net +13168 + Aries e-Publishing + Glenn Retsky + tradebullion&netzero.net +13169 + UNIPOWER, LLC. - IPS (formerly 'C&D Technologies, Inc.') + Cliff Murphy + cliff.murphy&unipowerco.com +13170 + caresys GmbH + Thomas Lohmueller + kontakt&caresys.ch +13171 + Cilys + Francois Maheux + francois.maheux&cilys.com +13172 + Dark Side of the Moon Software + Harold Abrker + hvb&dsms.com +13173 + DILAX Intelcom GmbH + Denny Gebel + iana&dilax.com +13174 + Eglin Air Force Base + Todd Tuckey + tuckey&eglin.af.mil +13175 + Eurofer + De Leeuw Guy + G.De_Leeuw&eurofer.be +13176 + European Bank for Reconstruction and Development + Martin Marshall + marshalm&ebrd.com +13177 + Firmaprofesional, SA + Jorge Bustos + jbustos&firmaprofesional.com +13178 + Fremont Computer Corporation + Queena Zhou + queena&tyanchina.com +13179 + Internet Information Group + Graham Maltby + admin&iig.com.au +13180 + JR Software + Janne Ruskeeniemi + janne.ruskeeniemi&kolumbus.fi +13181 + Junot Systems Inc. + Pete Slater + pslater&junotsystems.com +13182 + kapsch TrafficCom AG + Walter Pachlinger + walter.pachlinger&kapsch.net +13183 + Kevab Thebasestationcompany + Tauno Ruuska + tauno.ruuska&kevab.com +13184 + Lanscape Netzwerkdienste GmbH + Bastian Bluemel + b.bluemel&lanscape.de +13185 + Laqtib, Inc. + Abdesalam Laqtib + alaqtib&yahoo.com +13186 + Light Computing Services Ltd + Kenneth Duffill + kd&lightcomputingservices.ltd.uk +13187 + MDlink GmbH + Marcel Thranhardt + support&mdlink.de +13188 + Miami University Libraries + Andrew Farler + drew&lib.muohio.edu +13189 + MITAKE + Steven Lee + steven&mitake.com.tw +13190 + Northrop/Grumman- INRI division,NTCSS Support System + Franklin G. Richards + frichards&logicon.com +13191 + OneAccess + Pascal Kesteloot + pascal.kesteloot&oneaccess-net.com +13192 + PCLinX snc + Luigi Noris + gigi&pclinx.it +13193 + Portelco (Asia) Limited + Nelson Sung + nelson.sung&portelco.com +13194 + PORTSITE GmbH + Tim Reuter + reuter&portsite.de +13195 + Proditec + Florent Laudren + flaudren&proditec.fr +13196 + SBS GmbH & Co OHG CWS 41 + Markus Preller + markus.preller&cip.sbs.de +13197 + Security Crossing + Ken Graf + ken&securityxing.com +13198 + Signtrust + Eduward van der Zee + e.vanderzee&dpcom.de +13199 + SiMind Inc + Ki-Nam Choi + kchoi&simind.com +13200 + BPK Penabur + Irwan Hadi + irwanhadi&bpkpenabur.or.id +13201 + TJH Internet SP + TJ Hardman Jr + thardman&earthops.org +13202 + United States Advanced Network, Inc. + Lawence K. Newfield + lnewfield&usaninc.com +13203 + University of Pretoria + Trevor Nortje + tnortje&it.up.ac.za +13204 + VIA NET.WORKS Deutschland GmbH + Markus Warg + m.warg&vianetworks.de +13205 + WebSwimmer LLC + Jon Steer + jsteer&webswimmer.net +13206 + BSM Consulting + Brian Moyers + bsmoyers&hotmail.com +13207 + NTNU + Vidar Faltinsen + faltin&itea.ntnu.no +13208 + ARGSOFT + Gabriel Kniznik + gkniznik&argsoft.com +13209 + Aptilo Networks AB + Pontus Soderstrom + pontus.soderstrom&aptilo.com +13210 + Armstrong Group of Companies + Ed Hassler II + ehassler3&agoc.com +13211 + AT4.NET INTERNET Y COMUNICACION, SL + Antonio Mari Vallbona + hostmaster&at4.net +13212 + bTrade, Inc. + Kerri Apple + kapple&btrade.com +13213 + Centre Tecnologic de Manresa + Josep Maria Pinyol Fontseca + jm.pinyol&upc.es +13214 + Comunitel Global S.A. + Fernando Cela Diaz + fcela&comunitel.es +13215 + Cougaar + Sebastien Rosset + srosset&nai.com +13216 + Digital Age Design + Arnaud Geyskens + arnaud.geyskens&dad.be +13217 + FH Furtwangen + Claus-Peter Rohner + rohner&fh-furtwangen.de +13218 + Flowring Technology + J.J. Yang + jjyang&flowring.com +13219 + Gliwickie Stowarzyszenie Internautów + Pawel Zmyslowski + hocus&dronet.gliwice.pl +13220 + Interconnected Generation + Yves De Muyter + yves&connected.be +13221 + Kubota Systems Inc. + Naoki Kawaharasaki + naoaki&os.ksi.co.jp +13222 + Millicom Peru + Felipe Flores + fflores&millicomperu.com.pe +13223 + mwain Corp + Private Mwain + mwain&freenet.de +13224 + Net Integration Technologies + Patrick Patterson + ppatters&net-itech.com +13225 + Network Services Group, LLC + Donald Knopf + dk&networkservicesgrp.com +13226 + Network Technology Solutions + R.Vijayakumar + ntsvijay&yahoo.co.in +13227 + Officescape + James Wood + jwood&officescape.com +13228 + Redline Telecommunications SA (pty) Ltd + Wietz Joubert + wjoubert&redlinesa.com +13229 + Samad Pty Ltd + Alexander Samad + alex&samad.com.au +13230 + San Diego Supercomputer Center + Mason Katz + mjk&sdsc.edu +13231 + SchlumbergerSema Japan + Kenji Nishi + knishi&tokyo.sema.slb.com +13232 + SpottedOwlRecipes.com + Tyler Godfrey + godfreyt&att.net +13233 + Hockey Services (formerly 'Vimia GmbH') + Stephen Winnall + steve&winnall.ch +13234 + TELPRO Ltd. + Bondar Igor + postmaster&telpro.ru +13235 + Teralink Communications + Kang Kyung Wan + kwkang&teralinkcom.co.kr +13236 + The Brick Warehouse Corporation + Darryl Plunkie + dplunkie&thebrick.com +13237 + US Process, Inc + Rick Kitts + rkitts&usprocess.com +13238 + Vienna University of Economics and Business Administration + Alexander Bergolth + bergolth&wu-wien.ac.at +13239 + Vodatel + Darkol Budor + budor&vodatel.hr +13240 + VoIP Pty Ltd + Eric Martin + emartin&voip.com.au +13241 + Wieland Gmeiner + Wieland Gmeiner + e8607062&stud4.tuwien.ac.at +13242 + InfoCrypt + Morgasov Ilya + infocr&aha.ru +13243 + Chip PC + Ronit Pasternak + ronit&chippc.com +13244 + Duniya Technologies, Inc. + Brian Ashburn + bashburn&duniya.net +13245 + HST + Eunki Oh + eunki75&hismartech.com +13246 + Alliente, Inc. + Todd Maeshiro + todd.maeshiro&alliente.com +13247 + City of Naantali + IT Administration + root&naantali.fi +13248 + General Electric Power Management + Maciej Goraj + maciej.goraj&indsys.ge.com +13249 + ICEM Technologies GmbH + Sebastian Berthold + sysadmin&icem.de +13250 + INECO + Hugo Calzada + hugo.calzada&ineco.es +13251 + Ingenix + Timothy Peterson + timothy.g.peterson&ingenix.com +13252 + Kodansha Ltd. + Akio Ito + digital-admin&kodansha.co.jp +13253 + ky-on + Till Toenges + tt&kyon.de +13254 + London Stock Exchange + Sean Foley + sean.foley&accenture.com +13255 + net mobile AG + James Liang + james.liang&net-m.de +13256 + Rabid Badgers + Dustin Tinklin + dustin&tao.eolith.net +13257 + Registradores de la Propiedad y Mercantil de España + Emilio J. Martinez + emilio&corpme.es +13258 + Secretaria de Educación del Distrito (Bogota D.C Colombia) + Miguel Angel Ruiz + angel&ebstudio.com +13259 + solution-x Software GmbH + Florian G. Pflug + fgp&solution-x.com +13260 + Study Area Training Center + Kenny Chen + netman&study-area.net +13261 + T-Systems ITS GmbH - DMS/PP + Sven Schroeder + sven.schroeder&t-systems.com +13262 + ZbW - Zentrum fuer berufliche Weiterbildung + Gerhard Thimm + gthimm&zbw.ch +13263 + Professional Systems Integration + Chris Marschall + marshal_chris&hotmail.com +13264 + Atmel Germany GmbH + Sandra Koehl + sandra.koehl&hno.atmel.com +13265 + Atofina + Selim Baccar + sbaccar&kernel-networks.fr +13266 + C2 Creation + Calvin Seto + calvin&c2creation.com +13267 + Urban Traffic Management and Control (UTMC) (formerly 'City of York Council') + Ian Towner + ian.towner&york.gov.uk +13268 + Clark Hill PLC + Seth L. Blumberg + sethb&clarkhill.com +13269 + Complete Network Solutions, Inc. + Chuck Moss + cmcnsiana&mossc.com +13270 + Computing Center of Northeastern Univ. + Network Center + fuzw&mail.neu.edu.cn +13271 + Current Technologies + Darrell Way + dway¤ttechnologies.com +13272 + DaniloMassa + Danilo Massa + snmp&danilomassa.it +13273 + Design IT Solutions Ltd + Ariel Rodgers + arielrodgers&yahoo.com +13274 + Digital Media Lab Corporation + Toshiaki Kasai + tkasai&dmlcorp.co.jp +13275 + Golden Square Post Production + Dave Stinson + dave&gspp.co.uk +13276 + gsta + Xu Guowang + xugw&gsta.com +13277 + IEEE 802.11 + Stuart J. Kerry + stuart&ok-brit.com +13278 + iNabling Technologies + Rati Naren + rati_naren&yahoo.com +13279 + Intergate Browser Systems, Inc. + Andre Cruz + andre&intergate.com.ph +13280 + Internet Express, Inc. + Steve Langasek + vorlon&netexpress.net +13281 + IPSYN + Bergamo Jean-Louis + jlb&ipsyn.net +13282 + Kaparel Corporation + Jacques Houde + jhoude&kaparel.com +13283 + Laitilan Puhelinosuuskunta + Ala-Olla Sami + sami.ala-olla&laitilanpuhelin.fi +13284 + London Stock Exchange + Richard Lister + richard.lister&accenture.com +13285 + LYCOS FRANCE + Philippe Gramoulle + philippe.gramoulle&mmania.com +13286 + MDM I&C + Minsik Kim + mskim&mdminc.net +13287 + Membrain Technologies + Robert Banz + banz&membrain.com +13288 + MIS@MHIT + Tung_Kuang Wu + tkwu&mis.mhit.edu.tw +13289 + Neoscale Systems + Sanjay Sawhney + sanjay&neoscale.com +13290 + nextSource, Inc. + Doug Schmidt + dschmidt&nextsource.com +13291 + Ogangi Corporation + Oscar Anzola + oanzola&ogangi.com +13292 + feel3 UG (haftungsbeschraenkt) (formerly 'oneShell Internet Services GmbH') + Michael Maier + info&feel3.de +13293 + OTTO International (Hong Kong) Ltd. + Albert Wong + albert.wong&ottoasia.com +13294 + ph03n1x.net - Development + Matthew W. Yucha + ph03n1x&ph03n1x.net +13295 + Portugalmail + Nuno Lopes + nuno.lopes&portugalmail.pt +13296 + SOFTINTEGRO + Oleg Shulika + oleg&softintegro.ru +13297 + Soliton Associates Limited + Mike Symes + msy&soliton.com +13298 + SYNER S.A. + Daniel Rey + daniel.rey&mcrd.ch +13299 + Syntlogo GmbH + Giovanni Baruzzi + giovanni.baruzzi&syntlogo.de +13300 + Syrén Software AB + Tomas Carlsson + tomas.carlsson&syrensoftware.se +13301 + Teksouth Corporation + Jay Winks + jay.winks&teksouth.com +13302 + The Advantage Media Group + Gregory S Baker + greg&dgmedia.net +13303 + The Gillette Company + Vincent Perrin + vincent_perrin&gillette.com +13304 + T-Systems Solutions for Research GmbH + Marius-Julian Tamas + Marius-Julian.Tamas&t-systems.com +13305 + BFH Bern University of Applied Sciences + Daniel Baumann + bfh-linux-sysadmin&lists.bfh.science +13306 + University of Montana + Roger Holtom + unixadmin&selway.umt.edu +13307 + V.R.A.M. Rt. + Péter Bagári + peter.bagari&vodafone.hu +13308 + Városmajori Gimnázium + Bence Bärnkopf + barnkopf&debyl.vmg.sulinet.hu +13309 + Visanti A/S + Michael Kristensen + miv&visanti.com +13310 + wwwolf + Tim Heap + tim&wwwolf.co.uk +13311 + Joys Online, Inc + Jun Song + jypsong&hotmail.com +13312 + Factor-TS Ltd. + Vladimir D Novikov + novikov&factor-ts.ru +13313 + ANALOG + DON BALUNOS + don.balunos&analog.com +13314 + Blank + Bastian Blank + iana&blank.eu.org +13315 + BlueCat Networks + Richard Hyatt + rhyatt&bluecatnetworks.com +13316 + Byteworks + Michael Bischof + mb&byteworks.ch +13317 + Celite Systems Inc. + Andrew Kiggins + andrew.kiggins&celitesystems.com +13318 + Clarity AG + Juergen Froese + juergen.froese&clarity-ag.net +13319 + Dipartimento di Biochimica e Biotecnologie Mediche + Vittorio Lucignano + lucignano&dbbm.unina.it +13320 + Griffin Development + Thomas Griffin + tom&thegrif.net +13321 + Incard spa + Vincenzo Palazzo + vpalazzo&incard.it +13322 + Peel Teaching Assistants Association + Darryl Mabee + admin&darrylmabee.com +13323 + SoftLogic Solutions + Kevin McFall + kevinm&sls.co.nz +13324 + Web-Addr LLC + Ray Lance + rlance&web-addr.com +13325 + Associated Press + Tom Eck + teck&ap.org +13326 + Bermai + Paul Edwards + pedwards&bermai.com +13327 + htds + Kristofer Hoelzer + kh&htds.de +13328 + Apoapsis Ltd + Richard Fleming + richard&apoapsis.com +13329 + Area51 + Stephan Scheying + stephan&scheying.de +13330 + Ceyoniq Incorporated + Ted Garrett + t.garrett&ceyoniq.com +13331 + CICAIA - Universita` di Modena e Reggio Emilia + Cantaroni Roberta + roberta.cantaroni&unimore.it +13332 + Credit Union Central Alberta Limited + Allen Reid + areid&cucentral-ab.com +13333 + ClearSky Technologies, Inc. (formerly 'Data On Air') + Frank Danielson + fdanielson&csky.com +13334 + Hewett Inc. + Jeff E. Hewett + jhewett&gi.com +13335 + Internet Information Services + Dean Choate + dch4806&fibernetcc.com +13336 + IronHide Corp + Vikram D. Gaitonde + vikram&ironhide.com +13337 + Kochan und Partner + Thomas Paduch + paduch&kochan.de +13338 + Krocus Communications Oy + Mikko Koponen + mikko.koponen&helsinki.fi +13339 + Lange Software GmbH + Benno Lange + blml&lvsw.de +13340 + Lee Bradshaw + Lee Bradshaw + lee&bigpond.net.au +13341 + Live Systems Integration + Ted garrett + ted.garrett&tedgarrett.com +13342 + Unassigned + Removed 2002-10-07 + ---none--- +13343 + Networking Laboratory of DMIS, BUTE + Csaba TÓTH + toth&mit.bme.hu +13344 + Ningbo Success Information Industry CO.,LTD. + Ren ChaoHong + cyber_cactus&21cn.com +13345 + Pacificorp + Scott Kuntz + scott.kuntz&pacificorp.com +13346 + Pace France (formerly 'Philips CE STB') + Bernard Saby + bernard.saby&pace.com +13347 + Qbranch + Marten Gustafson + magu02&qbranch.se +13348 + Randall Kunkee + Randall Kunkee + randy&randallkunkee.com +13349 + RMP & Associates + Richard Neish + richardn&rmp.com.jm +13350 + SDN Online, Inc. + Jim Kimble + jkimble&zkey.com +13351 + Shanghai Mining Computer Software Co., Ltd. + Zhang Yuan + yuan.zhang&stockstar.com +13352 + SNS Solutions + Kyung-mo Kim + doublekm&snssol.co.kr +13353 + Sonofon + Karsten Thygesen + kay&sonofon.dk +13354 + SpaceTime Co., Ltd. + Eunjeong Go + gowill&ako.net +13355 + Stodge.org + Salim Fadhley + sal&stodge.org +13356 + Summit Imaging, Inc. + Darren K. Meyer + dkmeyer&summit-imaging.com +13357 + SunGard Bi-Tech + James Bennett + james.bennett&sungardbi-tech.com +13358 + Tim O Callaghan + Tim O Callaghan + timo&dspsrv.com +13359 + TM Ryder Insurance Agency, Inc. + Ralph W. Maddigan III + tmryder&msn.com +13360 + Trinity Convergence + Jonathan Beattie + dbrown&trinityconvergence.com +13361 + Université François Rabelais + Patrice Garnier + patrice.garnier&univ-tours.fr +13362 + University of Applied Sciences Stralsund + Marcus Linke + Marcus.Linke&fh-stralsund.de +13363 + University of Southern California + Shelley Henderson + shelley&usc.edu +13364 + ViaBridge + Jim Greer + jgreer&viabridge.com +13365 + ViewBridge Technologies, Inc + Joe Anderson + janderson&viewbridge-tech.com +13366 + W*H Interactive Ltd + Grant Taylor + gjt&whi.co.nz +13367 + Whale Queens Org + Hubert Quarantel-Colombani + lurenzu&whale-queens.org +13368 + Zand Elektronic + Dan Sandberg + dan.sandberg&medsci.uu.se +13369 + Zeta Associates Incorporated + Scott Zimmerman + msz&zai.com +13370 + 7+ Computer Networks Ltd. + Peter Darvas + darvas&plus7.hu +13371 + Qualityware Informática Ltda. + João Carlos Essenfelder Filho + joe&qwnet.com.br +13372 + UGC + Jean-Marc Weeger + jmweeger&ugc.fr +13373 + Apt Minds, LLC + Matt Midboe + matt&aptminds.com +13374 + Axitus + Hywel Jones + hywelbowdenjones&hotmail.com +13375 + Brainzsquare Inc. + Jeong Hwan Park + jhpark&brainz.co.kr +13376 + Carambole + Fredrik Steen + fredrik.steen&carambole.se +13377 + Conceptis Technologies Inc + Gord R. Lamb + glamb&conceptis.com +13378 + Concretio India Private Limited + Farooque Khan + farooquek&concretioindia.com +13379 + Crysec GmbH + Joerg Villiger + j.villiger&netprotect.ch +13380 + DOTSTAR Technology + Stephen Livezey + livezeysm&attbi.com +13381 + DRP Data + Dirk Prusok + dprusok&copper.net +13382 + INTEC International GmbH + SNMP Admin + snmp&intec-internatonal.com +13383 + iNTELEGO + Zoran Lorencic + info&intelego.net +13384 + Letins Corporation + Keynes Chiang + keynes&letins.com +13385 + Mount Saint Mary College + Arthur Emerson III + ae3&msmc.edu +13386 + Nagar + Raj Vardhan Singh + raj&shastry.com +13387 + NeuroStar Solutions + Arman Sharafshahi + armand&neurostarsolutions.com +13388 + Ticoon Technology Inc. + David Aspinall + davida&ticoon.com +13389 + Wraith Computer Systems + Benjamin C. Brodfuehrer + jolt-iana&wraithsys.net +13390 + AV Automotive Group + Ken speich + kenny&bmwofarlington.com +13391 + Callaway Golf + Paul M Vincent + paulv&callawaygolf.com +13392 + Cavena Image Products AB + Michael Collins + michael&cavena.se +13393 + Cite-SI + David Azoulay + dazoulay&cite-si.com +13394 + Colligo Networks + Nick Sawadsky + nsawadsky&colligo.com +13395 + CommerceQuest, Inc. + Edwin Fine + efine&commercequest.com +13396 + DANVILLE ASSOCIATES + Diran Ajetunmobi + danville&skannet.com +13397 + DemandTec Inc. + Mahesh Tyagarajan + mahesh.tyagarajan&demandtec.com +13398 + SicherByte GmbH Dr. Ralf Schwedler + Dr. Ralf Schwedler + schwedler&sicherbyte.com +13399 + Dune Semiconductor + Assaf Harel + assafh&dunenetworks.com +13400 + Vertiv Tech Co.,Ltd. (formerly 'Emerson Network Power Co.,Ltd.') + Yan Zebo + yan.zebo.eric&vertiv.com +13401 + ETE Software + Eric Evans + etevans&etesoftware.com +13402 + Exent Technologies Ltd. + Ami Klein + aklein&exent.com +13403 + Foton-2000 Kft. + Fekete Gyorgy + fgyuri&foton2000.hu +13404 + fSONA Communications + Brian Dewan + bdewan&fsona.com +13405 + Inside Products + Nalini Elkins + nalini_elkins&inside-products.com +13406 + Institute of Medical and Veterinary Science + Warwick Smith + warwick&imvs.sa.gov.au +13407 + Link2it Corp. + Dean Wallraff + deanw&link2it.com +13408 + Macrobyte Resources + Seth Dillingham + seth¯obyte.net +13409 + Mega System Technologies, Inc. + Sameul Peng + samuel&megatec.com.tw +13410 + National Public Radio + Susan Ator + sator&npr.org +13411 + ms Neumann-Elektronik GmbH + Detlev Hartwich + dhartwich&neumann-elektronik.com +13412 + Ophios GmbH + Thomas Meckel + meckel&ophios.com +13413 + Olabisi.com + Adedayo Olabisi + adedayo&olabisi.com +13414 + Pairlink + An Kee Min + amin&pairlink.com +13415 + Schlag&rahm GmbH + Reto Hirt + rhirt&schlagundrahm.ch +13416 + SecurityMatrix, Inc. + Dawn Hollingsworth + dawn.hollingsworth&securitymatrix.com +13417 + silicon broadcasts, Inc. + Nirmaljit Singh + nirmaljit&mailcity.com +13418 + SKKU Information and comunication lab + Ji-A Ha + jaha&songgang.skku.ac.kr +13419 + SRS SAKURA Internet Inc. + Kunihiro Tanaka + tanaka&sakura.ad.jp +13420 + Surgitec Pte Ltd + Stefan Lippstreu + sl&surgitec.net +13421 + Tango Telecom Limited + Jack Downey + jack.downey&tango.ie +13422 + Uniscope + Carlos Villegas + cav&uniscope.co.jp +13423 + universite de valenciennesJean-Luc Petit, Jean-Guy Avelin + Jean-Luc.Petit&univ-valenciennes.fr, + avelin&univ-valenciennes.fr +13424 + UPC Netherlands + Ronald van den Berg + rovdberg&upc.nl +13425 + Virtual Space Research + Sean Donnellan + postmaster&donnellan.de +13426 + WZab Software + Wojciech Zabolotny + wzab&acn.waw.pl +13427 + Artesyn Embedded Technologies + Colin Cameron + colin.cameron&artesyn.com +13428 + JAPAN STORAGE BATTERY CO., Ltd. + Yukio Tada + yukio_tada&gs.nippondenchi.co.jp +13429 + Queen Elizabeth School Old Students' Association + Tsoi Heung Sang + hstsoi&staff.ss.qesosa.edu.hk +13430 + Shanghai Online Bussiness Co.,Ltd. + Hong Gang + hong.gang&onlinebusiness.com.cn +13431 + A2E Ltd + Andrew Dobbing + adobbing&a2etech.com +13432 + CampusEdge Apartments + Michael Lewis + mlewis&gobcg.com +13433 + CodeZombie.com + Peter Ford + p_ford&mindspring.com +13434 + DYSER S.R.L. + Jose Fernandez + dyserfer&caoba.entelnet.bo +13435 + General Magic, Inc. + Sandy Joe + sandy_joe&genmagic.com +13436 + iControls, Inc. + Dae-Joo Kim + amglove&icontrols.co.kr +13437 + libits + Robert L. Baer + robert.baer&libits.com +13438 + Logic Eastern(I) Pvt Ltd + Gangadhar Sabat + gsabat&logiceastern.com +13439 + Melle Service GmbH + Steffen Schuetz + schuetz.steffen&melle.de +13440 + Network Telephone + Sunitha Elango + sunitha.elango&networktelephone.net +13441 + Norfolk Southern Corp + Chip Morgan + chip.morgan&nscorp.com +13442 + OpenVES + TS Vreeland + tvreeland&taconic.net +13443 + OZIS + Marcel Settels + marcel.settelsµbais.nl +13444 + Robert Bird and Partners + John Ward + johnw&robertbird.com.au +13445 + Silicon Cocoon Pty.Ltd. + Basil C.P. Borun + bborun&siliconc.com +13446 + Singapore Cable Vision Limited + Matt Ho + matt&scv.com.sg +13447 + The Training Mann + Nigel Mann + nigel-mann&partners-in-it.co.uk +13448 + Transat Technologies, Inc. + David K. Hui + dhui&transat-tech.com +13449 + trusdata.com + Song Ling Han + songling.han&trusdata.com +13450 + TT&S Tecnologia e Sistemas Ltda. + Newton Kiyotaka Miura + nmiura&ttstelecom.com.br +13451 + Network Information Center, univ.of buaa + Wei Li + liwlych&hotmail.com +13452 + My Lan Guys + Michael Klatsky + michael&mylanguys.com +13453 + Lanmix Technology Co. + George Wu + george&lanmix.com +13454 + Hays Supply Chain + Gilles Lami + gilles.lami&hays-dsia.fr +13455 + Deutsche Rentenversicherung Rechenzentrum Leipzig + Norbert H. Kunth + norbert.kunth&rzleipzig.de +13456 + A.E.T. Europe B.V. + Haaino Beljaars + beljaars&aeteurope.nl +13457 + ARM Holdings plc + Nick Stevenson + nick.stevenson&arm.com +13458 + ATMedia GmbH + Diethelm Schlegel + schlegel&atmedia.de +13459 + CAIS + David A Hughes + da.hughes&cais.com +13460 + COMELIS + Eric Bajus + tech.dir&comelis.fr +13461 + eBsuccess Solutions Inc. + Chih Ming Liang + cmliang&ebsuccess.com +13462 + estei + Vincent Demarquez + admin&estei.fr +13463 + Fort Wayne Community Schools + Randall Wert + randy.wert&fwcs.k12.in.us +13464 + GCOM Technologies Co.,Ltd. (formerly 'GREENNET TECHNOLOGY CO.,LTD.') + Deng Yu + dengyu&szgcom.com +13465 + Ingate Systems AB + Per Cederqvist + ceder&ingate.com +13466 + Jabber, Inc. + Joe Hildebrand + jhildebrand&jabber.com +13467 + Medasys + Sebastien Bahloul + sebastien.bahloul&medasys.org +13468 + Montclair State University + Brian Kelly + brian.kelly&montclair.edu +13469 + netLibrary, Inc. + Alan Deger + adeger&netlibrary.com +13470 + SecureGUARD GmbH (formerly 'Otto Security & Software Technologie GmbH') + Martin Rummerstorfer + mrummerstorfer&secureguard.at +13471 + PowerWAN, Inc + Siddana Gouda + sgouda&powerwan.com +13472 + RTS Realtimes Systems (Deutschland) AG + Jan Fiegert + support.net&rtsgroup.net +13473 + SafeWeb, Inc. + Zach White + zwhite&safeweb.com +13474 + Sierra Systems Group Inc. + Nicholas Drayer + nicholasdrayer&sierrasystems.com +13475 + SnapTrack, Inc. + Rajeev Gautam + plp&palmcorp.com +13476 + Telemant Corp. + KD Jung + kdjung&tmn.co.kr +13477 + Shanghai HAORUN Technologies Ltd. + Zhaohua Meng + mzh&eyou.com +13478 + Clear2Talk Ltd + John Deer + john&encrypt.co.uk +13479 + dstl + John Price + jdprice&dstl.gov.uk +13480 + Anyware Technology, Inc. + Ming Huang + ming&anywareusa.com +13481 + Appload Nordic AB + Fredrik Wahlberg + fredrik.wahlberg&appload.net +13482 + ATL Telecom + Martyn Wyatt + martyn.wyatt&atltelecom.com +13483 + Aviri + Bruce Krysiak + bruce&aviri.com +13484 + Choice One Communications + Gregory Rohman + grohman&choiceonecom.com +13485 + Comindico + Matthew Waite + matthew.waite&comindico.com.au +13486 + Princeton University + OIT Network Systems + networking&princeton.edu +13487 + CorVu Corporation + Lloyd Breckenridge + lloyd&corvu.com.au +13488 + DNCP, LLC + Michael Turner + maturner&lucent.com +13489 + Eastern Goldfields Senior High School + Nicholas Robinson + nickr&egshs.wa.edu.au +13490 + Eplication + Barak Azulay + bazulay&eplication.com +13491 + EuroMACC Ltd. + Peter Szemesy + p.szemesy&euromacc.hu +13492 + First Data Merchant Services + Ken Lisagar + netadmin&firstdata.com +13493 + Global Science & Technology, Inc. + James Noles + noles&gst.com +13494 + Illinois Mathematics and Science Academy + Steve Terrell + spt&imsa.edu +13495 + INAT GmbH + Werner Krings + werner.krings&inat.de +13496 + Institute of Informatics, Slovak Academy of Sciences + Miroslav Dobrucky + dobrucky.ui&savba.sk +13497 + IT3 Consultants + Gratien D'haese + gdha&it3.be +13498 + KKE, Inc + John Hermes + hostmaster&kakde.com +13499 + Metoda S.p.A. + Giuseppe Callipo + g.callipo&lineargruppo.it +13500 + MPL AG + MPL NOC + noc&mpl.ch +13501 + Nexstar Financial Corporation + Douglas E. Lecy + dlecy&nexstar.com +13502 + OneSquared + Sam Sargeant + sam&onesquared.net +13503 + Orchestria Limited + Andrew Stickler + andrew.stickler&orchestria.com +13504 + Polkomtel S.A. + Adam Pienczykowski + adam.pienczykowski&polkomtel.com.pl +13505 + PROCOS AG + Torsten Rendelmann + torsten.rendelmann&procos.com +13506 + Psionic Software, Inc. + Craig Rowland + crowland&psionic.com +13507 + Radcom Software Romania + Iulian Pavaloaia + iulian.pavaloaia&radcom.ro +13508 + SIGDCI + David Tassel + david.tassel&ville-lehavre.fr +13509 + SKY Computers + Jim Santos + santos&skycomputers.com +13510 + Somoma County Water Agency + Doug VanLeuven + doug&scwa.ca.gov +13511 + SpaceIP + Victor Klejman + klejman&attglobal.net +13512 + TeamWarrior Ltd + Matt Smuts + matt.smuts&cartezia.com +13513 + The Technology Partner + Massimo Fubini + Massimo.Fubini&ttpcompany.com +13514 + Unassigned + Removed 2011-11-02 + ---none--- +13515 + Unitech Networks .Ltd + David Wang + davidw2unitechnetworks.com +13516 + Viloke Oy + Panu Hallfors + panu.hallfors&viloke.fi +13517 + Xi'an Jiaotong University + Han Bo + bohan&mail.xjtu.edu.cn +13518 + ABN AMRO NL/CCC + Hans van Hattum + hans.van.hattum&nl.abnamro.com +13519 + Amsoft Systems India Inc + Aman Teja + aman.teja&amsoft.net +13520 + BizWebApps + Jean-Pierre Harrison + jp&selec.net +13521 + Sysnight + Jan Pedersen + jan&sysnight.dk +13522 + Account Synchronization Project + Kervin Pierre + kervin &blueprint-tech.com +13523 + Australian Industry Group + Phil Ware + aigplw&yahoo.com +13524 + Barham House Publishing, Inc. + Elizabeth Barham + soggytrousers&yahoo.com +13525 + Blarg! Online Services, Inc. + Marc Lewis + marc&blarg.net +13526 + Clickmarks Inc. + Shailesh Garg + shailesh&clickmarks.com +13527 + Digitasaru + Joseph Pingenot + ianacontact&digitasaru.net +13528 + DLESE + Mike Wright + mright&ucar.edu +13529 + DreamLAN Network Consulting Ltd. + Peter Kuo + peter&dreamlan.com +13530 + Fahnestock & Co. + Chris McElhone + cmcelhone&fahnestock.com +13531 + Commtia Systems S.A. + Raimon Casals + raimon.casals&commtia.com +13532 + Pennant Systems + Calvin Seto + calvin_seto&hotmail.com +13533 + Petr Zahradnik Computer Laboratory + Petr Zahradnik + clexpert&clexpert.cz +13534 + Proteus Mobile, Inc + Gerald Hewes + it&proteusmobile.com +13535 + IonPipe. Inc. + Eric White + eric.white&ionpipe.com +13536 + Serome Technology, Inc. + Tomy Jung + tomy&serome.co.kr +13537 + Shine Global + Stephen Woolerton + sdw&shineonline.co.nz +13538 + VideoBureau, Inc. + Anirban Chowdhuri + achowdhuri&videobureau.com +13539 + Voorhout Data Connection BV + Wim Voorhout + wim&vdcinfo.nl +13540 + Shenzhen Keybridge Communications Co.,Ltd. + Wang Buyun + buyunmail&163.net +13541 + DBV-Winterthur Versicherungen + Stephan Horn + stephan.horn&dbv-winterthur.de +13542 + Datapac s.r.o. + Roman Sladek + sladek&datapac.sk +13543 + 100 Percent IT Ltd + David Blundell + david.blundell&100percentit.com +13544 + Acopia Networks, Inc. + Mike Berger + mberger&acopianet.com +13545 + Adverb Software + Allen Gates + agates&adverb.com +13546 + Alta A/S + Stefan Barfred + postmaster&alta.net +13547 + Axiom Systems Limited + David Gandy + dgandy&axiomsystems.com +13548 + BakBone Software Inc + Richard Potter + richard.potter&bakbone.co.uk +13549 + Banque de France + Jeremy Martinville + jeremy.martinville&banque-france.fr +13550 + Celstream Technologies + Anirudh Mathuria + anirudh.mathuria&celstream.com +13551 + Cofunds LTD + Simon Bennett + simon.bennett&cofunds.co.uk +13552 + CQG, Inc + Andrew Hoying + farmers&cqg.com +13553 + Cyber Space Systems, Inc. + Kalin Dikov + dikov&c-inc.com +13554 + DTN SpeedNet Services, LLC + Rick Nordman + rick&dtnspeed.net +13555 + Enterprise Business Solutions + Hugo Jimenez-Perez + hjimenez&ebs.com.mx +13556 + Filtronic plc + Mike O'Carroll + snmp&filtronic.com +13557 + FLOORgraphics, Inc. + Tobin J Edwards + toby&floorgraphics.com +13558 + Gary Reynolds + Gary Reynolds + gary&touch.asn.au +13559 + Global Technology Associates, Inc. + Paul Emerson + paul>a.com +13560 + Grant County PUD #2 + Steve Wilson + swilson&gcpud.org +13561 + Griffith University + Jolyon Suthers + j.suthers&mailbox.gu.edu.au +13562 + Information Sciences Institute, USC + technical support + action&isi.edu +13563 + Integrated Software Technologies Inc. + Jeff Douglass + ist&att.net +13564 + Logistik World GmbH + Helmut Sailer + hsailer&lwplus.de +13565 + Lucent Technologies India Ltd + Vishnu Vardhan M Reddy + mvureddy&lucent.com +13566 + Neolytica + Ryan Dibble + rdibble&neolytica.com +13567 + UBIqube Solutions + Yves Dumarest + yves.dumarest&ubiqube.com +13568 + Neustar, Inc., a TransUnion company + Mary Barnes + mary.barnes&team.neustar +13569 + OIZ + Schuler Marc + marc.schuler&oiz.stzh.ch +13570 + Security Integration, Inc. + George A. Boitano + gboitano&securityintegration.com +13571 + TESIS SYSware GmbH + Rene Bauer + ren&tesis.de +13572 + TJEdwards + Tobin J Edwards + toby&tjedwards.com +13573 + Tran Empire Industries + Duc P Tran + ducphuc&tranempire.com +13574 + TVG Technologies Ltd. + Shalom Crown + shalom&realvision.co.il +13575 + University of Windsor + Robert Mavrinac + mavrinac&uwindsor.ca +13576 + WAGO Kontakttechnik GmbH + Stefan Zudse + stefan.zudse&wago.com +13577 + Youjin + Heechul Kim + jijisa&airheechul.com +13578 + apogee solutions + Arun Mehra + apogee&vsnl.com +13579 + LMN Associates + Lakshmanan Venkataraman + lakshmanan_v&hotmail.com +13580 + Europ Assistance France + Frederic Williams + frederic.williams&unilog.fr +13581 + TAMAGAWA UNIV. + Yuuichi Kamada + support&tamagawa.ac.jp +13582 + Nordija A/S + Kristian Sørensen + snmp&nordija.com +13583 + UCBIRL + David E. Miller + david.miller&uchsc.edu +13584 + Schering AG + Sabine Demitrowitz + sabine.demitrowitz&schering.de +13585 + Mangalore Refinery And Petrochemicals Limited + S.M. Khare + smk&mrplindia.com +13586 + Woodside Networks, Inc. + Ashok Ranganath + AshokRanganath&woodsidenet.com +13587 + Universite de Franche-Comte + Patrice Koch + patrice.koch&univ-fcomte.fr +13588 + Acegi Technology Pty Limited + Ben Alex + ben.alex&acegi.com.au +13589 + Air Traffic Control & Business Systems GmbH + Ulrich Bertsch + ulrich.bertsch&ac-b.de +13590 + Americredit Corp + Jay Lentz + jay.lentz&americredit.com +13591 + BE Intelligent Systems + Dan Crowson + dcrowson&crowson.com +13592 + Blunk Microsystems + Tim Stoutamore + stout&blunkmicro.com +13593 + CommerceFlow, Inc. + Inder Sabharwal + inder&commerceflow.com +13594 + Company Watch Limited + Lance French + lfrench&companywatch.net +13595 + EMKA electronic AG + Heiko Fischer + h.fischer&emka-electronic.de +13596 + Fachhochschule Bingen + Klaus Lang + lang&fh-bingen.de +13597 + FEIS, University of Hertfordshire + Matt Ross + m.g.ross&herts.ac.uk +13598 + General Dynamics Mission Systems Canada (formerly 'General Dynamics Canada') + Rene Allard + rene.allard&gd-ms.ca +13599 + Golden Triangle Online + Martin Kokkelink + iana&golden.net +13600 + greenmokey.net + Aaron Forster + omadawn&sonic.net +13601 + Hwa Chong Junior College + Chen Shiyuan + csy&hjc.edu.sg +13602 + IS4-O + Thomas Hefner + thomas.hefner&is4-o.com +13603 + KPNQwest Czechia s.r.o. + Jakub Kopecky + jakub.kopecky&kpnqwest.com +13604 + Liberate Technologies + Przemek Struminski + przemeks&liberate.com +13605 + Maverick workgroup + Wei Bohua + grayhare&public.nn.gx.cn +13606 + Nicomaque + Ronan Amicel + technique&nicomaque.org +13607 + Unassigned + Returned 2004-05-28 + ---none--- +13608 + OpenHandHome Inc + Michael Bell + mikebell90&yahoo.com +13609 + Palladium Consulting Inc. + Sebastian Good + sebastian&palladiumconsulting.com +13610 + Q Solutions + Martin Colley + qsolutions&telkomsa.net +13611 + RBC Data + Rene Bechmann + rene&rbcdata.com +13612 + Schlund + Partner AG + Rene Schumann + rene&schlund.de +13613 + StrataVerio + James Davidson + james&strataverio.com +13614 + Tartu Kõrgem Kunstikool + Lemmit Toomet + lemmit&art.tartu.ee +13615 + TH + Thomas Hefner + thomas.hefner&is4-o.com +13616 + THALES SYSTEM INTEGRATION GmbH + Ingmar Schmnidt + ingmar.schmidt&de.thalesgroup.com +13617 + Those Interactive Guys + Scott Dukes + scott&dukes.co.za +13618 + Transim Technology Corp. + Matthew Richardson + mrichardson&transim.com +13619 + UnderCoverWear Collection + Craig O'Shannessy + craig&ucw.com.au +13620 + University of Wisconsin - Eau Claire + Thomas Paine + paineta&uwec.edu +13621 + Wind Telecom + Tomasz Kiraga + t.kiraga&wind.pl +13622 + Yaina + Joerg Reuter + jreuter&yaina.de +13623 + point it GmbH + Oliver Jaenicke + contact&point-it.de +13624 + EASTCOM-BUPT INFORMATION TECHNOLOGY CO.,LTD. + Chengfei Yang + yangchengfei&ebupt.com +13625 + pohlcity.de + Christian Pohl + chris&pohlcity.de +13626 + Rastel + Dmitry Khazov + market&rastel.ru +13627 + Netfor, Inc. + Jeff Seifert + jseifert&netfor.com +13628 + SerCon GmbH + Carsten Horst + carsten.horst&sercon.de +13629 + A.G.Tech Network Service + Akihiko Gotanda + a-gota&agt.ne.jp +13630 + AFX News Ltd + Neil Fitzpatrick + neil.fitzpatrick&afxnews.com +13631 + Arax Communications + Jilkin Yuri + yuri&araxinfo.com +13632 + ATOMiX DESiGN + Henry Studenborg + hjstukenborg&mac.com +13633 + AVA Afzar Co. + Anooshiravan Merat + merata&avadom.com +13634 + B&B Computersysteme GbR + Dieter Brandmeier + Dieter.Brandmeier&bbcomputer.de +13635 + BSWS + Henning Brauer + hb-iana&bsws.de +13636 + Corestreet, Ltd. + David Engberg + dave+iana&corestreet.com +13637 + Cresent Software Products (PVT) Ltd. + Faried Nawaz + faried.nawaz&cressoft.com.pk +13638 + Expert Systems IVR (Asia) Co., Ltd. + Wilson Fan + wilson.fan&esi-asia.com +13639 + FibroTEC + Stephan Fink + stephan.fink&gmaare.migros.ch +13640 + Jon Wood + Joh Wood + jon&jellybob.co.uk +13641 + Kremlin Computing Limited + Simon Devlin + simon.devlin&kremlin-computing.com +13642 + LITAS, s.r.o. + Ludovit Launer + litas&stonline.sk +13643 + MIMOS Berhad + Syahrul Sazli Shaharir + sazli&mimos.my +13644 + Netgate + Jim Thompson + jim&netgate.com +13645 + OKB MEI + Vladimir Moshkin + moshkin&okbmei.msk.su +13646 + Oninit Ltd + Paul Watson + paul&oninit.com +13647 + Ostsee-Zeitung GmbH & Co. KG + Torsten Eymann + torsten.eymann&ostsee-zeitung.de +13648 + Pointred Technologies, Inc + Rick Balocca + rbalocca&enhancelon.com +13649 + Polizei Rheinland-Pfalz + Horst Barth + horst.barth&polizei.rlp.de +13650 + Relativity + Bouten,R.J. + r.bouten&hccnet.nl +13651 + RexKramer + Sven Gerlach + sven.gerlach&rexkramer.de +13652 + ---none--- + G.E. de Jong + g.e.dejong&student.utwente.nl +13653 + Seoul Mobile Telecom, Inc. + Seong Geun Lee + leesg&seoultel.co.kr +13654 + SHENZHEN XIFENG NETWORK TECHNOLOGIES CO.,LTD + Li WeiMing + limingstart&263.net +13655 + SpaceMonster Gaming Network + Bryn Moslow + bryn&spacemonster.org +13656 + SUNNYbell Technology + sunnybelltech support + proman2k&sunnybelltech.com +13657 + Proximus Luxembourg SA + Vincent Piocel + vincent.piocel&proximus.lu +13658 + Teddy's Network + Junzo Sato + jsato&fc.kuh.kumamoto-u.ac.jp +13659 + The Worley Companies + John McCawley + jmccawley&worleyco.com +13660 + U.S. Army CECOM + William Duncan + william.duncan&mail1.monmouth.army.mil +13661 + The University of Manchester + Andy Moore + andy.moore&manchester.ac.uk +13662 + Universidad de Palermo + Alejandro Popovsky + apopov&palermo.edu.ar +13663 + VRGroup.com + Victor G. Bonilla + victor&bonilla.com +13664 + VSS KB, a.s. + Martin Petak + mpetak&vsskb.cz +13665 + Webs-are.us + Erin Fortenberry + kahn&deadbbs.com +13666 + Wolf Hill, s.r.o. + Jiri Gubik + vjisa&wolfhill.cz +13667 + Wozzle Internet Solutions + Zac DeLesDernier + zac-iana&wozzle.com +13668 + Yes Technology + SangHo Kim + mercury&yestech.co.kr +13669 + Cubic Transportation Systems, Inc. + Pradip Mistry + pradip.mistry&cubic.com +13670 + San Diego Data Processing Corporation + Bill Reynolds + breynolds&sddpc.org +13671 + TechTV + Dan Benson + danbenson&techtvcorp.com +13672 + General Dynamics - Decision Systems/Secure Products + Phil Barker + Phil.Barker&gd-decisionsystems.com +13673 + Internet Czêstochowa + Piotr Steclik + master&icz.com.pl +13674 + PESA Switching Systems, Inc. + Doug Bailey + dbailey&pesa.com +13675 + bandwave internet + Benjamin Polson + bsp&bandwave.net +13676 + French Data Network + Sylvain Vallerot + bureau&fdn.org +13677 + Kb/TEL Telecomunicaciones SA de CV + Fernando Bracho + fbracho&kbtel.com +13678 + acter AG + Andreas Petralia + petralia&acter.ch +13679 + AltiGen Communications Inc. + Hui Yang + hyang&altigen.com +13680 + Aviva Solutions Inc. + Jean-Francois Levesque + jeanl&avivasolutions.com +13681 + CHILDLOCK solutions + Marcus Huenerbein + huenerbe&childlock.de +13682 + Ecole des Mines de Nantes + Laurent Menu-Kerforn + laurent.menu-kerforn&emn.fr +13683 + EFTechnologies, Inc. + Joseph Schwarz + is&eftech.net +13684 + ELPROMA Elektronika + Piotr Trojanek + trojanek&elproma.com.pl +13685 + Faculty of Mathematics and Computer Science, Nicholas Copernicus University of Torun + Rafal Metkowski + rafmet&mat.uni.torun.pl +13686 + go win + Wheib Bendjebbour + wheib&gowin.fr +13687 + Humboldt-Universitaet zu Berlin + Burckhard Schmidt + hostmaster&hu-berlin.de +13688 + Icequake Networks + Philip Thiem + ptt&umr.edu +13689 + In Town Consulting LLC + Michael Mendelson + mmendelson&intowndenver.com +13690 + Madras Computer Vertriebsges.m.b.H. + Daniel Himler + d.himler&madras.at +13691 + Maison d'en France + Robert Silve + robert&mtice.fr +13692 + mediface + Chul Hyun Park + licence&mediface.com +13693 + Mirage Networks, Inc + Michael J. McDaniels + mjmcdaniels&yahoo.com +13694 + Nervus Systems Ltd + Rob Macgregor + rob.macgregor&dial.pipex.com +13695 + Neurosphere Consulting + Stephen J. Scheck + sscheck&gmail.com +13696 + Ohio Wesleyan University + Kirk A. Rustin + ktrustin&owu.edu +13697 + Paul Dlug + Paul Dlug + paul&nerdlabs.com +13698 + RunCircle Ltd + Roger McCalman + snmp&runcircle.co.uk +13699 + Sarvega Inc. + Nagendra Kolluru + nkolluru&sarvega.com +13700 + Scale Eight + Brian Lanier + blanier&s8.com +13701 + Sonic Software + Matt Rothera + mrothera&sonicsoftware.com +13702 + Store Media Engineering + Mark Williams + mark&dimm.de +13703 + SunDataCom pvt.Ltd., + Krishna L Baisetti + krishna&sundatacomm.net +13704 + TNO + ing. F. (Frederik) Bonte + f.bonte&telecom.tno.nl +13705 + Universitas Indonesia + Maman Sutarman + maman&cs.ui.ac.id +13706 + Volvo Event Management + Darren Hampton + Darren.Hampton&volvooceanrace.org +13707 + wasiliana solutions + Claude Nanjo + nanjoc&wasilianasolutions.net +13708 + AIC NETWORK + Molines Jacques + tech&aic.fr +13709 + The Bank of New York + Oryst N. Kunka + okunka&bankofny.com +13710 + LVM Versicherungen + Stephan Terhorst + s.terhorst&lvm.de +13711 + Primus Telecommunications Australia Pty Ltd + Tristan Goode + netops&iprimus.com.au +13712 + ARINC (Aeronautical Radio, Inc.) Roy L. Courtney + rcourtne&arinc.com + ---none--- +13713 + Gateweaver + Chris Macwell + cmaxwell&themanor.net +13714 + Reea + Adrian Tofan + adi&reea.net +13715 + Alescere + Greg Fitzgerald + gregfitzgerald&prodisinc.com +13716 + Caja Madrid + Javier Ripoll + jripoll&cajamadrid.es +13717 + Carreker Corporation + Jerry Bowman + jbowman&carreker.com +13718 + CCITRIAD + Denise Gimarc + denise.gimarc&ccitriad.com +13719 + CEGETEL SI + Denis Liotard + smpsbo&cegetel.fr +13720 + CoreMedia AG + Rolf Watermann + rolf.watermann&coremedia.com +13721 + Corning Optical Communications + David Torres + torresdd&corning.com +13722 + deot.net + Oren Held + oren&deot.net +13723 + Division of BITL + John Crowe + j.crowe&murdoch.edu.au +13724 + Erasmus University Rotterdam + Paul Kranenburg + kranenburg&few.eur.nl +13725 + EuroNetics + Tomas Fasth + iana-contact&euronetics.com +13726 + FH Bonn-Rhein-Sieg + Steffen Kaiser + skiana&mail.inf.fh-bonn-rhein-sieg.de +13727 + Fundação CPqD - Centro de Pesquisa e Desenvolvimento em Telecomunicações + Isidro Lopes da Silva Neto + isidro&cpqd.com.br +13728 + Fundació> Aplicació + Ramon Fons + fundacio&callusdigital.org +13729 + Funktional Solutions Group + Julian Coyne + julianc&funktional.com.au +13730 + Geobot, Inc + Sharad Raisinghani + sharad&geobot.com +13731 + Huth Engineering Inc + Edward Huth + edhuth&huthengineering.com +13732 + iDirect + Brian Snyder + bsnyder&idirect.net +13733 + IMServ Europe Ltd + Geoff Hamilton + geoff.hamilton&imserv.invensys.com +13734 + Institut fuer Theorie der Elektrotechnik, Universitaet Stuttgart + Ralf Lederle + ralf.lederle&ite.uni-stuttgart.de +13735 + Integra5 Ltd. + Nimrod Gal-Oz + nimrod.gal-oz&integra5.com +13736 + Manufacture des Montres Rolex SA + Raphael Colliard + raphael.colliard&rolex-bienne.ch +13737 + Mark Wilcox & Associates + Mark Wilcox + mark&mjwilcox.com +13738 + MindMatics AG + Armin Barbalata + armin.barbalata&mindmatics.de +13739 + Pacesetter Electronics + Dale Puls + dpuls&pacesetterelectronics.com +13740 + Popwire Technology AB + Martin Akesson + hostmaster&popwire.com +13741 + Proinos, INC + Thu R Kyaw + tkyaw&proinos.com +13742 + Raritan Computer, Inc. + Allen Yang + allen&raritan.com +13743 + SGTE-IES + Isaert Nicolas + nicolas.isaert&sgte-ies.com +13744 + SkyTel, Inc. + Scott Humphries + scott.humphries&wcom.com +13745 + Plixer International, Inc. (formerly 'Somix Technologies, INC') + Marc Bilodeau + marc&plixer.com +13746 + UNIversity of Detroit Mercy + William F Allmendinger + allmenwf&udmercy.edu +13747 + Virtual Health Networks, Inc. + Donald K Bruce + drbruce&hc-vita.com +13748 + Wizzbit + Onno Gort + postmaster&wizzbit.nl +13749 + Madison Consulting Inc. + Hung Pham + hung.g.pham&verizon.net +13750 + Juergen Weber Unternehmensberatung + Juergen Weber + juergen&weber-falkensee.de +13751 + AGY Therapeutics Incorporated + Scott C. Lohr + slohr&agyinc.com +13752 + Amarna Software Productions + Michael Bell + mikebell90&yahoo.com +13753 + Ascent Computing Group Inc + Varghese Samuel + vsam&ascent-inc.com +13754 + Banco Zaragozano + Antonio Gascón Dominguez + agascon&bancozaragozano.es +13755 + Carleton College + Webmasters and Sysadmins + iana-tech-contact&carleton.edu +13756 + C-LOG International + Francisco Guiomar + dns&c-log.com +13757 + COXnet + COXnet Engineers + eng-disc&coxnews.com +13758 + cr-team.de + Thomas Bilger + thomas.bilger&cr-team.de +13759 + Examen, Inc + Richard Hanschu + richardh&examen.com +13760 + Fourth House Security Inc + Basit Hussain + basit&fourthhousesecurity.com +13761 + Glencore International AG + Thomas Traxel + thomas.traxel&ch.glencore.com +13762 + Innovery + Andrea Caccia + andrea.caccia&innovery.net +13763 + Johannes Passing Softwareentwicklung + Johannes Passing + jp&baernetwork.com +13764 + Khodayss Systems Limited + Manohar J + ess&khodayss.net +13765 + Life Time Fitness, Inc. + Jud McKee + jmckee&lifetimefitness.com +13766 + LOEWE Opta GmbH + Peter Wächtler + pwaechtler&loewe-komp.de +13767 + Metalogue Communications + Mike McCallister + iana&metalogue.com +13768 + Movaris + Malay Verma + mverma&movaris.com +13769 + mozilla.org + Dan Mosedale + ldap-schema&mozilla.org +13770 + MTA Solutions + Sergie Voropay + sergeiv&mtasolutions.com +13771 + Navic Systems, Inc., d/b/a Navic Networks, Inc. + Michael Byrnes + mbyrnes&navic.tv +13772 + NPULSE Software, Inc. + Stephen Earl + searl&npulse.com +13773 + NTELOS Inc. + Landon McDowell + smicontact&ntelos.net +13774 + Onetta, Inc. + Hao Li + hli&onetta.com +13775 + Pomona College + Michael Dickerson + mikey&cs.pomona.edu +13776 + Potlatch Corporation + Ed Moses + ed.moses&potlatchcorp.com +13777 + Rakuten Inc. + Neal Sato + ops-corp-pen&mail.rakuten.com +13778 + Sociedade Diginet, LdaJacques + ludovico Zacarias + jack_ludov&ebonet.net +13779 + SUMIX Corporation + Artem Malyshev + artem&sumix.com +13780 + ubernul projekt + Drew Miller + xerox&ubernul.com +13781 + Universite d'AUVERGNE + Denis Pays + denis.pays&u-clermont1.fr +13782 + University of Dortmund, Department of Mathematics + Christian Becker + christian.becker&mathematik.uni-dortmund.de +13783 + Viking Society of SA + David de Groot + david.degroot&viking.org.au +13784 + WhidbeyNet + Mike Denka + mdenk&whidbey.net +13785 + YH + Min Zhu + minzhu&nudt.edu.cn +13786 + ZP system + Won-Sik Kim + wonsikkim&zpsys.com +13787 + PageMail Inc. + Scott Tulk + scott&page.ca +13788 + Dev-Labs Ltd. + Daniel Marczisovszky + marczi&dev-labs.com +13789 + Innovations GmbH + Markus Schaertel + markus.schaertel&innovations.de +13790 + U-King Communications Corp. + Alex Huang + frank&uking.com.tw +13791 + Associação dos Estudantes da Faculdade de Ciências e Tecnologia da Universidade Nova de Lisboa + Hugo Monteiro + adm&ae.fct.unl.pt +13792 + Active Telecom + Marc Alzina + malzina&net-tone.com +13793 + Acumen Sciences, LLC. + Brett Eisenberg + beisenberg&acumen-sciences.com +13794 + Beijing Educational Information Network + Kevin Chen + kevin&bjedu.com.cn +13795 + Blue Edge Bulgaria + Dimiter Dimitrov + dimiter&blue-edge.bg +13796 + Caliari Research S.p.A. Nicola + Babieri + customer.service&caliari.it +13797 + CIRAD + Bertrand Pitollat + bertrand.pitollat&cirad.fr +13798 + Globenet + Benjamin Sonntag + benjamin&globenet.org +13799 + Internet 78 + Michael Suszko Jr. + msuszko&fast.net +13800 + IS Services + Paul Reilly + paul.reilly&tcd.ie +13801 + IUFM des Pays de la Loire + Aït Omar Saâd + saad.aitomar&paysdelaloire.iufm.fr +13802 + Mundee Internet Services + Shane Mundee + shanemundee&hotmail.com +13803 + Navajo Law Enforcement Training Academy + George John + gj86025&aol.com +13804 + Pontifical College Josephinum + Ming Li + mli&pcj.edu +13805 + Southland Christian Church + Vic Mollett + vmollett&southlandchristian.org +13806 + SysOpen Plc + Jarkko Lietolahti + jarkko.lietolahti&sysopen.fi +13807 + Tpack + Søren Heilmann + sth&tpack.net +13808 + ubernul projekt + Drew Miller + xerox&ubernul.com +13809 + Université Jean Monnet Saint Etienne + Hervé GILIBERT + criter&univ-st-etienne.fr +13810 + University of Alaska + David Bantz + david.bantz&alaska.edu +13811 + Israel Discount Bank + Michael Koenigstein + xmk&il.ibm.com +13812 + SmartPrice SA + Marco Aurelio Vilaca + operacao&smartprice.com.br +13813 + Evalesco Systems ApS + Jakob Oestergaard + joe&evalesco.com +13814 + alfa Media Partner GmbH + Tobias Haas + tobias.haas&linopress.com +13815 + Angel Iglesias, S.A. + Imanol Silva + doi&ikusi.es +13816 + AppSense Technologies + Kevin Stone + kevin.stone&appsense.com +13817 + Auswärtiges Amt + Josef Mecklenburg + josef.mecklenburg&auswaertiges-amt.de +13818 + BHI + Phil Anderson + pandersen&hotmail.com +13819 + BTI Communications Co. + Jarrod Kinsley + jkinsley&bticc.net +13820 + CESKY TELECOM, a.s. + Tomas Sekera + tomas.sekera&ct.cz +13821 + CNI/Prairienet + Brynnen Owen + owen&uiuc.edu +13822 + Command Software Systems, Inc + Helmuth E. Freericks + hfreericks&commandsoftware.com +13823 + Comtarsia IT Services + Stefan Pfingstner + stefan.pfingstner&comtarsia.com +13824 + Consus Ltd + Rob Lyle + rob&consus.co.uk +13825 + Crisp Hughes Evans LLP + John B Bowles + jbowles&che-llp.com +13826 + Dept of Veterans Affairs + Leslie L Henderson Jr + leslie.henderson&mail.va.gov +13827 + EndRun Technologies LLC + Bruce M. Penrod + bmpenrod&endruntechnologies.com +13828 + ETM International / Austria + Wolfgang Frühwirth + wfrueh&etm.at +13829 + Flecha de Lima Associados + Paulo Tarso Flecha de Lima + ptarso&FlechadeLima.com +13830 + GED-SOFTWARE + Mathieu Chappuis + mathieu.chappuis&ged-software.com +13831 + Geo-Marine, Inc. + Randall Mundee + smundee&geo-marine.com +13832 + The Go Daddy Group, Inc. + Network Operations Center + noc&godaddy.com +13833 + Gobierno de Canarias + Octavio Ascanio + oascsua&gobiernodecanarias.org +13834 + iMach, Ltd. + Forrest W. Christian + forrestc&imach.com +13835 + inet-logistics GmbH + Reinhold Gander + tik&inet-logistics.com +13836 + Information and Logistics Consultancy + Eddie Victor + eddiev&ilc.co.za +13837 + IntroTec + Peter Salvisberg + peter.salvisberg&introtec.ch +13838 + KadeL Data Servis s.r.o. + Jirka Svetlik + svetlik&kadel.cz +13839 + MarkitServFX (formerly 'Logicscope Realisations Ltd') + Lance White + lance.white&markitserv.com +13840 + HYTE Technologies, Inc (formerly 'Media Driver, LLC') + Matthew Pavlovich + matt&hyte.io +13841 + MetiLinx + Larry Ketchersid + lketchersid&metilinx.com +13842 + Moser-Baer AG + Andreas Scheidegger + as.mb&mobatime.com +13843 + Ned Davis Research Group + Brett Lee + root&ndr.com +13844 + North East Worcestershire College + Chris Hills + chills&ne-worcs.ac.uk +13845 + Ohio Northern University + Glen Shere + g-shere&onu.edu +13846 + PatientKeeper, Inc. + Craig Fields + iana-contact&patientkeeper.com +13847 + Pearco Services + Benjamin Collins + bcollins&granderiver.net +13848 + rnix.com + Robert C. Nix + org.iana&rnix.com +13849 + Sepro Telecom + John Healy + john.healy&seprobilling.com +13850 + Static + Soyoung Ju + lorien&static.co.kr +13851 + Swisscom (Switzerland) Ltd + Jean-Marc UIrich + jean-marc.ulrich&swisscom.com +13852 + TopoLogica + Eduardo Moscoso Rubino + moscoso&moscoso.org +13853 + Tularik + Aaron Forster + ianainfo&tularik.com +13854 + Universitaet Mannheim, Rechenzentrum + Heide Hiltscher + heide.hiltscher&rz.uni-mannheim.de +13855 + Universitaet Oldenburg + Juergen Weiss + juergen.weiss&uni-oldenburg.de +13856 + University of Michigan Health System + David Hasselbach + davidch&med.umich.edu +13857 + Ursys Pty Ltd + Patrick Duff + patrick.duff&ursys.com.au +13858 + Eltek Valere Inc. (formerly 'Valere Power Inc') + Bill Dempsey + bill.dempsey&eltekvalere.com +13859 + Voxpilot + Dave Burke + david.burke&voxpilot.com +13860 + WASKO + Andrzej Bankowski + a.bankowski&wasko.pl +13861 + Wolf IT Concepts + Felix Wolf + wolf&it-concepts.info +13862 + XTec, Incorporated + Alberto Martin + amartin&xtec.com +13863 + Airwolf Systems, Inc. + Frederick Nwokobia + nwokobia&msn.com +13864 + MAMINOS, Inc. + Steve Jacks + sjacks&maminos.com +13865 + Université Charles de Gaulle - Lille 3 + Arnaud Jayet + jayet&univ-lille3.fr +13866 + ACE TIMING + Charlie LE HOANGAN + charlie.lehoangan&acetiming.com +13867 + Adfinis AG + Michael Hofer + info&adfinis.com +13868 + Bahrain Credit + Adel Darwish + darwish&bahraincredit.com.bh +13869 + Blum Capital Partners, LP + Johan Martin + jmartin&blumcapital.com +13870 + Corporate Oxygen + Rakesh Aggarwal + raggarwal&coxygen.com +13871 + Falcon Software NV + Tijl Dullers + tijl.dullers&anubex.com +13872 + Federal University of Sao Paulo + Network Group + rede&dis.epm.br +13873 + FMC + Francois Monnet + noc&eurofmc.com +13874 + goepp.net + Daniel Goepp + iana&goepp.com +13875 + Interact + David Riccitelli + david.riccitelli&interact.it +13876 + Ionic Productions LLC + Brent A. Thompson + bat&ionicproductions.com +13877 + Kanda Tsushin Kogyo Co., Ltd. + Yoshio Nagasaki + nagasaki_y&at.kanda.co.jp +13878 + Kariboo Technologies SA + Maurice Galland + maurice.galland&kariboo.com +13879 + Natural Soft S.A. + Jaume Juni Estany + jaume.juni&natural-soft.es +13880 + ADS Networks Pvt. Ltd + K. Srinivas Aithal + ksaithal&ads-networks.com +13881 + neobee.net + Viktor Varga + vile&neobee.net +13882 + Netilla Networks + Paul Douglas + paul_douglas&netilla.com +13883 + New Morning Windows + Mike Forsman + mikef&newmorningwindows.com +13884 + Pepco AS + John Berntsen + john&pepco.no +13885 + Polycom, Inc. + Matt Parker + matt.parker&polycom.com +13886 + Prosodic Communications Inc. + Ian A. Marsman + imarsman&webstove.com +13887 + Red Roby Software, Inc. + Robert DeLuca + redroby&tampabay.rr.com +13888 + South Texas Lighthouse for the Blind + John Francis Lee + johnl&sotxlighthouse.org +13889 + Spriral Designs Inc. + Dan Pringle + digitals&netone.com +13890 + Teltier Technologies Inc. + Murali Swaminathan + murali&teltier.com +13891 + The Advantage Group + Dave Cole + davec&taglim.com +13892 + University of New Hampshire + William F. Costa + Bill.Costa&unh.edu +13893 + International Truck and Engine Corporation + Regina Goeing + regina.goeing&nav-international.com +13894 + LGNsys + Sora Son + sora&lgnsys.com +13895 + LaGrange School District 102 + Al Kirkus + al&dist102.k12.il.us +13896 + Atos Origin CMS / TSP + Bernard Bosse + bernard.bosse&atosorigin.com +13897 + Advanced Technologies Lab + Josef D. Allen + josallen&cs.fsu.edu +13898 + Alexander Horsch und Thomas Balbach IT Solutions + Alexander Horsch + label&flying-stone.de +13899 + Anka Systems, Inc + Young Kim + ykim&ankasystems.com +13900 + Blue Gargoyle Consulting, LLC + Marko A. Jennings + marko&bluegargoyle.com +13901 + Broadwave Inc. + Ken Roser + ken_roser&fastmail.fm +13902 + Business Internet Trends + Sabri Berisha + sabri&bit.nl +13903 + CAMH PET Centre + Sylvain Houle + shoule&camhpet.ca +13904 + Caymas Systems + Ashwin Baskaran + baskaran&caymas.com +13905 + Cemagref + Pierre Farissier + pierre.farissier&cemagref.fr +13906 + Chantry Networks Inc. + Mark Leonard + leonam&chantrynetworks.com +13907 + Compera + Alysson Prado + alysson.prado&compera.com.br +13908 + Croatian Physical Society, Student Section + Mario Juric + majuric&fizika.org +13909 + CTTC - Centre Tecnològic de Telecomunicacions de Catalunya + Carolina Pinart Gilberga + carolina.pinart&cttc.es +13910 + Das Büro am Draht + Fabian Hahn + fh&dasburo.com +13911 + Epsilon d.o.o. + Alem Orlic + alem&epsilon.hr +13912 + Everest Connections + Ryan Pool + ryan.pool&everestgt.com +13913 + EVoto Team + Sergio Avila Jiménez + sergio&evoto.org +13914 + FreakySoft + Richard Perfect + richard.perfect&freakysoft.co.nz +13915 + Harry Rüter Software Solutions + Harry Rüter + h.rueter&gmx.net +13916 + Heimat di Andrea Rota + Andrea Rota + support&heimat.it +13917 + Hostcentric + Fred Im + fred.im&hostcentric.com +13918 + Inkasbank + Dmitry Chernyak + cdl&inkasbank.ru +13919 + Instinct Internet Solutions + Peter Lowe + pgl&instinct.org +13920 + Net6 (formerly 'Magic Hosting') + Gerard Grommek + gery&net6.at +13921 + NEBS + Frank Cini + hostmaster&nebs.com +13922 + nologin + Chad Thunberg + chadth&nologin.org +13923 + Peter Verhas + Peter Verhas + peter&verhas.com +13924 + Powell's Bookstores + Paul Buder + paulb&burnside.powells.com +13925 + Real Time Productions + Peter Lowe + peter&prague.tv +13926 + Road Tech Compter Systems Ltd + Mike Rattee + mikey&roadrunner.uk.com +13927 + SCCNET SOHO Consulting + Mario Oberli + mario.oberli&sccnet.ch +13928 + TC Communications + Ben Huai + bh&tccomm.com +13929 + Teragen Pty Ltd + Sebastian Filzek + sebastian&teragen.com.au +13930 + The Aquarius Network + Aaron Angel + aangel&myrealbox.com +13931 + uberLAN Technologies + Duane Powers + duane&uberlan.net +13932 + Vogon.Net + Philip Peake + philip&vogon.net +13933 + Alstom Transport + Mikaël CAUSARD + pki_run&alstomgroup.com +13934 + Micchi + Michael Doyle + michael.doyle&micchi.co.uk +13935 + 100comm + Feng Wang + wangf&100comm.com +13936 + AEGIS Mortgage Corp + Ken Cumbus + ken.cumbus&aegismtg.com +13937 + Art Global Limited + Xinjun Wu + michael&aglworld.com +13938 + Ashton Raggatt McDougall + Cameron Maxwell + c.maxwell&a-r-m.com.au +13939 + automatX + Hannes Fremann + hg&automatx.de +13940 + COE Ltd + Sam Kingsbury + samk&coe.co.uk +13941 + Communicator Inc. + Dmitriy Fedorov + dfed&cihub.com +13942 + Candera Inc + Lai-King Mau + mau&candera.com +13943 + Datanet + Andrew Jeanmonod + aj&data.net.uk +13944 + Denali SA + Pascal Platteeuw + pascal&denali.be +13945 + Deutsche Forschungsgemeinschaft + Dominik Sack + dominik.sack&dfg.de +13946 + Educational Systems Ltd + Rob Lyle + rob&garf.co.uk +13947 + EEMA + Jane Hebson + jane.hebson&eema.org +13948 + Exostar LLC + Jeff Nigriny + security&exostar.com +13949 + FRANK-EDV-SERVICE Gesellschaft m.b.H. + Alfred Frank + office&frankedv.at +13950 + Galileo, Inc. + Norihito Shinzan + shinzan&galileo.co.jp +13951 + Gentek Marketing Inc. + Jeffrey Freedman + jfreedman&gentek.com +13952 + GLOBAL SUN TECHNOLOGY + Peter Lin + peterlin&globalsuntech.com +13953 + HelioGraph Limited + Graham Britten + graham.britten&heliograph-global.com +13954 + IT Guardian Ltd + Sang Young + wsyoung&it-guardian.net +13955 + JPConsulting + J. Paul Connolly + jpaulconnolly&yahoo.com +13956 + KATCH Network Inc + Hiroyuki Ohmiya + admin&katch.co.jp +13957 + Liquidsite, Inc. + Victor Owuor + owuor&zelotech.com +13958 + M.N. Ramos Ferreira, Electricidade e Mecânica, Lda + Joao Ferreira + score&netcabo.pt +13959 + MLA Power Systems + Neill Truter + neill&mlapower.co.za +13960 + NESTAR + You-Er Tai + u2tai&nestar.com.tw +13961 + NLIP - Dutch ISP Association + Pim van Stam + pim.van.stam&nlip.nl +13962 + QoS Labs + Victor Tapia + vtapia&qoslabs.com +13963 + R.V.R. Elettronica SpA + Alessandro Franceschi + afranceschi&rvr.it +13964 + SWCS Chan Pak Sha School + Anderson Tung + noc-admin&cpss.edu.hk +13965 + tpgpost + Ronald van Aalst + r.vanaalst&tpgpost.nl +13966 + University of Bath + Computing Services + support&bath.ac.uk +13967 + Vaporware + Oliver Wagner + owagner&vapor.com +13968 + Verlag Fur Neue Medien + Daniel Chabrol + chabrol&vfnm.de +13969 + VOXSIS Informática + Guido Malato + guido&voxsis.com.br +13970 + Aerostrong Science & Technology Co., Ltd. + Li Jingyu + ljingyu&public.hh.nm.cn +13971 + ChinaBizone Infomation Technology Co., Ltd. + Chung Zhong Zhang + zcz&chinabizone.com +13972 + Varga Limited + Gary Varga + iana&vargaltd.freeserve.co.uk +13973 + investir publications + Sebastien Pacreau + spacreau&investir.fr +13974 + Epigenomics AG + Robert Sander + hostmaster&epigenomics.net +13975 + Naval Global Directory Service + F. Peter Z. Cruikshank + peter.cruikshank&navy.mil +13976 + Interaktivna Consulting + V. Donchenko + info&interaktivna.com +13977 + SHAYNE ENTERPRISES + Jeff Metzger + shayne_ent&msn.com +13978 + GuardedNet, Inc. + Kevin J. Schmidt + snmp&guarded.net +13979 + Mintact Software Inc (formerly 'CompuCS Inc.') + Min Chen + min.chen&mintact.com +13980 + Platinum Web Pages + Ron Pfingsten + ron&platinumwebpages.com +13981 + INGCHEM + Peter Luby + lubyp&pobox.sk +13982 + FibreSpeed Ltd. + Michael T. Babcock + mbabcock-snmp&fibrespeed.net +13983 + WWR Development, Inc. + David Witten + wittend&wwrinc.com +13984 + Cybrick Information Systems + Richard Clark + richardc&cybrick.com +13985 + TAE KWANG INDUSTRIAL CO., Ltd. + Yong-Pil Huh + yphuh&daintelecom.com +13986 + Larry Drummond,Consulting Engineer + Larry Drummund + spud&nwcerts.com +13987 + C. Plath GmbH + Michael Gaudlitz + michael.gaudlitz&plath.de +13988 + tele-akademie der Fachhochschule Furtwangen + Joachim Saleyka + sysadmin&tele-ak.fh-furtwangen.de +13989 + Markus Heiden SAP / EC Consulting + Markus Heiden + iana&heiden-online.com +13990 + Coloma Community Schools + Justin Noack + noackje&yahoo.com +13991 + Accellence Technologies GmbH + Jochen Noelle + jochen.noelle&accellence.de +13992 + M-Net Sys + Miryawan + miryawan&telkom.net +13993 + ITWorx + Tarek Nabhan + standards&itworx.com +13994 + STN ATLAS Elektronik GmbH + Holger Behm + behm&stn-atlas.de +13995 + Amperion, Incorporated + Jay Brothwell + jay&erion.com +13996 + kbox + Cheolman Park + ddaul&hanmail.net +13997 + Invisible Industries + Wade Mealing + wmealin1&bigpond.net +13998 + Conceptual MindWorks, Inc. + Cary Brown + cary&teamcmi.com +13999 + ItsNotA.com + Thomas MacNeil + thomasmacneil&yahoo.com +14000 + Michail Kamprianis + Michail Kamprianis + michalis.kamprianis&gmail.com +14001 + DRS Technologies Canada Ltd. (formerly 'DRS Flight Safety and Communications') + Nikola Vladisavljevic + nvladisa&drs.ca +14002 + University of Wisconsin - Whitewater + Bradley Schwoerer + schwoerb&uww.edu +14003 + Saint Mary's College + Steve Hideg + hideg&saintmarys.edu +14004 + Gap Inc. + Navin Argulkar + navin_argulkar&gap.com +14005 + Kelly + Ronan Kelly + iana-requests&haven.at +14006 + Thomas Melzer Softwareentwicklung + Thomas Melzer + tmelzer&tomesoft.de +14007 + IMPACT Science & Technology, Inc. + Charles Barker + cjbarker&impactsci.com +14008 + KT + Song Soonyong + skull&kt.co.kr +14009 + onSynergy Solutions + Daniel Hoang + danielh&onsynergy.com +14010 + Cirrus Real Time Processing + Brett Haynes + brett.havnes&cirrusrtps.com.au +14011 + Cyber-Project + Viktor V. Buiny + adm&kem.ru +14012 + net-safe Co., Ltd. + Ryu Yong Sik + system&net-safe.co.kr +14013 + Tata Consultancy Services + Tarun Kumar Goswami + tarun.goswami&tcs.com +14014 + TP EmiTel Sp. z o.o. + Janusz Baluka + jnbaluka&emitel.pl +14015 + Unet Ltd. + Victor O Ponomarev + vick&unet.ru +14016 + HG-France + Jean-Louis Mounier + jlmounie&club-internet.fr +14017 + Fachhochschule Technikum Wien + Alexander Nimmervoll + nimm&technikum-wien.at +14018 + Bryan Bellamy + Bryan Bellamy + bryan.bellamy&lineone.net +14019 + Betronic Design BV + H.A. Dijkstra + henkdijkstra&betronic.nl +14020 + Zencod + Sergio Loureiro + sergio.loureiro&zencod.com +14021 + Shapers + J.W. Penterman + jw&shapers.nl +14022 + Arachne (Poland) + Artur Stepien + muczachan&hell.pl +14023 + KCOM Service Provider Network (formerly 'Mistral Internet Group') + Phil Edwards + phil.edwards&kcom.com +14024 + fiscus GmbH + Georg von Lachemair + g.von-lachemair&fiscus.info +14025 + Landesversicherungsanstalt + Volker Bertermann + volker.bertermann&lva-westfalen.de +14026 + Slapd.Net + John P. Hoke + john.hoke&epresence.com +14027 + RiverNorth Systems, Inc. + Randy Bey + randy.bey&rivernorthsys.com +14028 + Real Time Engineering + Greg Garner + greg&rt-eng.com +14029 + Netzwerk-Lehmann + Thomas Lehmann + info&netzwerk-lehmann.de +14030 + Smartology vof + Remy C. Cool + remy.cool&smartology.nl +14031 + STATO MAGGIORE DIFESA + Giuseppe NOCE + scd.uis.scc.cs&cor.difesa.it +14032 + SUNY Geneseo + Kirk Anne + systems&geneseo.edu +14033 + Samart Info Media Co.,Ltd. + Panot Hunwan + panot&samarts.com +14034 + OU Artaxis + Antti Helemets + annti&artaxix.ee +14035 + CyberStar + Dan Zak + dzak&cyberstar.com +14036 + LineCom Ltd + Tamas Kuti + kuti.tamas&linecom.hu +14037 + DasW:Lab + Frank Brandewiede + brande&cryptix.de +14038 + Karns Technology + Scott Karns + scott&karnstech.com +14039 + Gagliano & Cia + Roque Gagliano + rgaglian&adinet.com.uy +14040 + Branda Tech. + Kuei-Feng Li + thinker&branda.to +14041 + Railway Information Systems Co.,Ltd + Toshikazu Masano + oid&jrnet.ne.jp +14042 + Freudenberg Haushaltsprodukte KF + Steffan Haas + steffan.haas&fhp-ww.com +14043 + Sociedad Andaluza para el Desarrollo de la Sociedad de laInformacion, S.A.U. + SADESI + soporte.funcional.correo.sadesi&juntadeandalucia.es +14044 + Kernfysisch Versneller Instituut + Peter A. Kroon + p.a.kroon&kvi.nl +14045 + iServe (Pty) Ltd. + Willem Brown + willemb&iserve.co.za +14046 + Statens Bostadsfinansierings AB + Roland Magnusson + roland.magnusson&sbab.se +14047 + ISET + David Chia + rsedc&atlantic.gse.rmit.edu.au +14048 + Lyse Energi + Oddbjorn Sorseth + oddbjorn.sorseth&lyse.no +14049 + ProactiveThought Technologies Ltd.Inc. + Scott Tarone + starone&proactivethought.com +14050 + Desert Academy at Sante Fe + Scott Karns + skarns&desertacademy.org +14051 + NetMaster Digital Security + Dana M. Epp + depp&netmaster.com +14052 + Profluent systems Australia + Brad Vonarx + snmp&profluent.com.au +14053 + Prosum Solutions, LLC + Steven F. Davis + iana&prosumsolutions.com +14054 + Fibercom + JungHee-Won + jhwon&fibercom.co.kr +14055 + IP Mobile Net, Inc. + Russell Markus + rmarkus&ipmobilenetinc.com +14056 + CEGELEC + Roland Stader + roland.stader&cegelec.com +14057 + Longhill High School + Kristyan Osborne + kris&longhill.brighton-hove.sch.uk +14058 + Excogita S.r.l. + Bruno Castellani + info&excogita.net +14059 + Oberoesterreichische Versicherung AG + Stefan Vetter + s.vetter&ooev.at +14060 + ENS GmbH + Nick Baile + nick_baile&ens.com +14061 + Conectiva S.A. + Andreas Hasenack + andreas&conectiva.com.br +14062 + Escritorio Juridico Alcala Rhode & Asociados + Jose Luis Alcala Rhode + jlalcalar&cantv.net +14063 + SeaFire Networks Inc. + Kioma Valenzuela Aldecoa + kioma&seafirenet.com +14064 + UC Berkeley Residential Computing + Jennifer Ly + jenly&rescomp.berkeley.edu +14065 + Shanghai FOCI Fiber Optic Communication Equipments Inc. + yongfeng zhang + zhyfeng&eastday.com +14066 + pit.at + Peter Neumaier + pit&pit.at +14067 + allied networks GmbH + Oliver Skiebe + oliver.skiebe&alliednetworks.de +14068 + Thueringer Netkom GmbH (formerly 'TEAG Netkom GmbH') + Thomas Voigt + thomas.voigt&netkom.de +14069 + Syngenio AG + Ullrich Rieger + ullrich.rieger&syngenio.de +14070 + C3T + Sebastien Annedouche + sannodouche&c3t.fr +14071 + Bohr- und Rohrtechnik GmbH + Michael Monschein + michael.monschein&bur.at +14072 + Mistral Software Private Ltd. + Athif Allam + athif&mistralsoftware.com +14073 + SMART Technology + Joshua Giles + joshua_giles&dell.com +14074 + Shorcan Brokers Ltd. + Gord Shier + gshier&shorcan.com +14075 + Rational Software Corporation + James Washington + jwashing&rational.com +14076 + Marsh Inc. + Winston Lawrence + winston.l.lawrence&marsh.com +14077 + Esial + Samson Bisaro + samson.bisaro&esial.uhp-nancy.fr +14078 + ANECT a. s. + Daniel Fiser + dfiser&anect.com +14079 + Active Tools + Miha Vuk + miha&activetools.si +14080 + Brivo Systems, Inc. + Seth Ladd + seth&brivo.net +14081 + Armstrong World Industries + Dan Conrad + daconrad&armstrong.com +14082 + uplink coherent solutions + Rupert Roesler-Schmidt + iana-requests&uplink.at +14083 + Thomas Enterprises + Kenny Thomas + shop&kenthom.org +14084 + Cochran Consulting, Inc. + Charlie Sherman + sysadmin&divecochran.com +14085 + ADVFN.com PLC + Jonathan Tullett + jont&advfn.com +14086 + ESHA Research + Christopher Elkins + chrise&esha.com +14087 + Zultys Technologies + Amir Fuhrmann + amir.fuhrmann&zultys.com +14088 + Navis LLc + Scott Marshall + is&navis.com +14089 + City of Ontario, Oregon + Dan Stieneke + dan.stieneke&ontariooregon.org +14090 + Powerview Systems + Jeffrey Kessler + jkessler&powerviewsys.com +14091 + Hawker College + Andrew Bartlett + hostmaster&hawkerc.net +14092 + University of Rhode Island + Terry Wild + twild&uri.edu +14093 + Medibase Technologies Co., Ltd. + Dr. Weng Kong Tam + ceo&medibase.org +14094 + Sue B.V. (formerly 'Snow B.V.') + Tom Scholten and/or Jos Jansen + jos.jansen&sue.nl +14095 + TechDirection + Matthew Allison + matt&techdirection.net +14096 + Forlink Software Corporation Inc. + Chen Chuan + chenchuan&softhouse.com.cn +14097 + ITW-Informationstechnik GmbH + Franz Wagner + office&itw.at +14098 + Hotelequip Ltd. + Andy Turner + andyt&hotelequip.com +14099 + Butchwax Software + John Morris + iana&butchwax.com +14100 + WLAN Alliance AB + Rickard Gunnarsson + rickard.gunnarsson&wlanalliance.com +14101 + D.A.Tsenov EA + Angelin Lalev + lalev&uni-svishtov.bg +14102 + dacore Datenbanksysteme AG + Werner Fink + werner.fink&dacore-dbs.de +14103 + ICE Systems + Emir Mameledzija + emir.mameledzija&icesystems.hr +14104 + tetronik GmbH AEN + Michael Mahler + michaelm&tetronik.com +14105 + Business interactif + Laurent Foinel + laurent.fionel&businessinteractif.fr +14106 + Syndicat Interhospitalier de Bretagne + Claudie David + claudie.david&sib.fr +14107 + WIENSTROM + Thomas Fuchs + thomas.fuchs&wienstrom.at +14108 + Odense Kommune + John Bonnerup + jobn&odense.dk +14109 + Jaguar Cars Limited + Ewen Phillips + ephilli5&jaguar.com +14110 + ORIMOS + Guy Hindell + guy.hindell&orimos.com +14111 + Oxspring Network Solutions Ltd. + Chris Andrew + candrew&oxspring.com +14112 + hzw design + Juri Paern + paern&hzw.de +14113 + MagneTek + Filippo Vernia + vernia&magnetek.it +14114 + Aepona England + Sean Cronin + sean.cronin&aepona.com +14115 + Platespin + Deepak Lal + deepakl&platespin.com +14116 + Telecore, Inc. + John Palmquist + jpalmquist&telecore-inc.com +14117 + aostar + Jing.run Xian + xianjingrun&aostar.com +14118 + Hanover College + John W. Collins + collins&hanover.edu +14119 + Netlife AG + Dr. Thomas Tautenhahn + tautenhahn&netlife.de +14120 + Letifer.org + Jeremy Jones + jeremyj&letifer.org +14121 + Stone-IT + Yvan Cottyn + technical&stone-it.be +14122 + Wireless Broadband Alliance Ltd + Bruno Tomas + bruno&wballiance.com +14123 + Expertcity Inc. + Anshuman Kanwar + anshuman&expertcity.com +14124 + UK/CA/JN Joint Astronomy Centre + Henry Stilmack + h.stilmack&jach.hawaii.edu +14125 + SENAO INTERNATIONAL CO., Ltd. + Patrick Cheng + patrickcheng&senao.com.tw +14126 + New World PCS Limited + Daniel Chan + dkhchan&nwmobile.com +14127 + OneChannel + Mike Claussen + mclaussen&onechannel.net +14128 + Programming Arts, Inc. + D Whitehorn-Umphres + wumail&progarts.com +14129 + Sevan Networks, Inc. + Andre Melkoumian + andre&sevannetworks.com +14130 + Bifco + Tarrie Joniec + tarrie.joniec&bifco.com +14131 + Ifoundry Systems + Alvin Loh + alvinl&ifoundry.com.sg +14132 + CS GROUP + Thomas ANDREJAK + thomas.andrejak&csgroup.eu +14133 + Buergernetz Weihenstephan e.V. + Marc von Jaduczynski + marc&will-mail.de +14134 + University of Szeged + Mihaly Toth-Abonyi + m.toth-abonyi&cc.u-szeged.hu +14135 + SysDM + Jim McDonald + jim.mcdonald&sysdm.com +14136 + PONTON GmbH + Jörg Eichhorn + admin&ponton.de +14137 + Karis + Jarl Jansen + jarl.jansen&karisweb.de +14138 + TGM + Markus Schnabel + markus.schnabel&tgm.ac.at +14139 + City of Nottingham Council + Richard Heggs + richard.heggs¬tinghamcity.gov.uk +14140 + Fachhochschule Lippe und Hoexter + Carsten Halm + carsten.halm&fh-luh.de +14141 + ERECA + Olivier Stockemer + ereca3&wanadoo.fr +14142 + ALPWARE + Andrei Vitiazev + info&alpware.com +14143 + FimOSSchober & Hartl OEG + Franz Schober + office&firmos.at +14144 + Malkom S.J. + Marek Marcola + root&malkom.pl +14145 + Society for Humanity and International Trancendentalism + Daniel Storm + daniel&shit.co.uk +14146 + Blackbox Consulting Corporation + James D. Nurmi + jnurmi&blackboxcorporation.com +14147 + Fermi National Accelerator Laboratory + Igor Terekhov + terekhov&fnal.gov +14148 + Officina di idee + Riccardo Carrai + riccardo&offidee.com +14149 + Waddell & Reed Services + Mark Lonergan + mlonergan&waddell.com +14150 + Transaction Auditing Group + Andrew Dean + adean&tagaudit.com +14151 + Splentec Ltd. + James Lee + jlee&splentec.com +14152 + Dyncorp Systems and Solutions + Kirby Vaughan + kirby.vaughan&dyncorp.com +14153 + Sigma SaA + Fabio Filipponi + ffilipponi&sigmaspa.com +14154 + NTKO Network Co., Ltd. + John Tanger + dominoreg&sina.com +14155 + TIL TECHNOLOGIES + IT Manager + administrateur&til-technologies.fr +14156 + University of Reading + Andy Gatward + noc&reading.ac.uk +14157 + FHLBank Topeka + John Thompson + john.thompson&fhlbtopeka.com +14158 + Consultoria y estrategia electrónica para internet, S.L.(TestaNet) + Evelio Martinez + evelio.martinez&testanet.com +14159 + Fluent Inc. + Paul Rossman + pdr&evanston.fluent.com +14160 + Tribune Interactive + Alan Pitcher + apitcher&tribune.com +14161 + EPCNet GmbH + Jochen Dolze + jd&epcnet.de +14162 + IT Solution GmbH + Rainer Gundacker + rainer.gundacker&itsolution.at +14163 + SAGA D.C. GmbH + Jochen Grotepass + jochen.grotepass.sagadc.de +14164 + SANavigator, Inc. + Robert Pulley + rpulley&sanavigator.com +14165 + iReady + Charles Andrews + charlesa&iready.com +14166 + Saviso Consulting Ltd. + Adam Twiss + adam&saviso.com +14167 + Seattle University + Mark Young + youngma&seattleu.edu +14168 + Owl River Company + R.P. Herrold + herrold&owlriver.com +14169 + INFORMEDIA + Shashi Shekhar S + shashishekhars&yahoo.com +14170 + Australian Research Centre for Medical Engineering + James Devenish + devenish&arcme.com +14171 + Memorial University of Newfoundland + Gerard Barrington + gbarrington&mun.ca +14172 + Boca Software, Inc. + Jim Skoog + jskoog&bocasw.com +14173 + ETRI + Daeha Lee + bigsum&etri.re.kr +14174 + Softforum Co., Ltd. + Yeonyeol Jeong + yjy&softforum.com +14175 + Hospital La Fe + Ricardo Garcia Mataix + garcia_ric&gva.es +14176 + Katholieke Hogeschool Leuven + Vital Coenegrachts + vital.coenegrachts&khleuven.be +14177 + implementa GmbH + Bernd Kischnick + kisch&implementa.com +14178 + JNI Corporation + Larry Butler + lbutler&jni.com +14179 + Airespace, Inc (formerly 'Black Storm Networks') + Bhautik Doshi + bdoshi&airespace.com +14180 + DS Consulting + Dan Shinton + snmp&shinton.net +14181 + Cygnus Technologies + Egidio Cianciosi + ec&cygnus-technologies.com +14182 + GlobalLocate + Myvan Quoc + mquoc&globallocate.com +14183 + Arezzo Telecomunicazioni Srl + Giampaolo Tomassoni + g.tomassoni&ar-tel.it +14184 + Getabigger Network + David J Blackburn + admin&getabigger.net +14185 + CYBER SERVICE + Maciej Skalski + maciek&cs.net.pl +14186 + Gallant Technologies, Inc. + Michael Fowler + support&gallanttech.com +14187 + China Netcom Corporation Ltd. + Dashen Wang + dashen&sd.cn.net +14188 + Thomas K. Williams, Ltd. + Thomas K. Williams + koyote&goodnet.com +14189 + probusiness AG + Andreas Schiller + aschiller&probusiness.de +14190 + Western Outdoor Interactive + Keith Fernandez + keith&w-o-i.com +14191 + MNOFBB + Marcellius Smith + marcellius.smith&mnofbb.org +14192 + Granite Technologies + Kenny Dorr + kdorr&osnetwork.com +14193 + Strabon + Nader Boutros + boutros&msh-pairs.fr +14194 + Ministry of Social Development + William Young + bill.young002&msd.govt.nz +14195 + Now Software + Corey Johnson + corey&poweronsoftware.com +14196 + California State University, Monterey Bay + Isaac Davis-King + isaac_davis-king&csumb.edu +14197 + SISNEMA Informatica Ltda. + Giani Antonio Leichtweis Maldaner + giani&sisnema.com.br +14198 + PANNAWAY Technologies Incorporated + Victor Novikov + novikov_ve&pannaway.com +14199 + Accelrys Inc. + Thomas Helvey + helvey&accelrys.com +14200 + HCL Comnet Ltd. + Sanjay Saxena + sanjays&hclcomnet.co.in +14201 + Daxworld + Tommy Wilson + daxboy&mac.com +14202 + Dottech International Inc.Ltd. + Hubert Yeh + pest_yeh&dottech.com.tw +14203 + AQL + Vincent Meuric + vincent.meuric&aql.fr +14204 + Reksoft Ltd. + Michael Gusev + gusev&reksoft.ru +14205 + MAI Insurance Brokers Ltd. + Laszlo Vagasi + l.vagasi&mai-cee.com +14206 + Informatie Beheer Groep + J. T. Schuilenga + j.t.schuilenga&ib.groep.nl +14207 + Leroy Somer + Stephane Couvy + stephane.couvy&leroysomer.com +14208 + OpenWide + Benjamin Cleyet-Marrel + benjamin.cleyet-marrel&openwide.fr +14209 + Accellent + Christian Berge + christian.berge&accellent-group.com +14210 + The Storyteller's Place + Adam Strallen + avstrallen&btinternet.com +14211 + Tiffany and Company + Jose Rosario + jose.rosario&mycroftinc.com +14212 + Universitaet Muenchen + Robert Hofer + oidmaster&ifi.lmu.de +14213 + Scitech it solutions GmbH + Sven Rudolph + s.rudolph&scitech-gmbh.de +14214 + Tyco Services + Howard Alexander + halexander&tycoint.com +14215 + Interactive Visuals, Inc. + Mike Schienle + mgs&ivsoftware.com +14216 + Sendfar Technology Co., Ltd. + Robinson Tu + robinson&sendfar.com.tw +14217 + Wuestenrot Datenservice GmbH + Hubert Hoffmann + hubert.hoffmann&wueestenrot.at +14218 + Facultes Universitaires Notre-Dame de la Paix + Richard Mairesse + snmpmaster&fundp.ac.be +14219 + Hapag-Lloyd Container Linie AG + Ralf Kaemper + ralf.kaemper&hlcl.com +14220 + Akaflieg Munchen e.V. + Dominik Kreutzer + kreutzer&akaflieg-muenchen.de +14221 + Prodigy Labs + Shekhar Borgaonkar + prapandit&yahoo.com +14222 + Electric Reliability Council of Texas, Inc. + Ricc Babbitt + ricc.babbitt&ercot.com +14223 + Talos (formerly 'Sourcefire, Inc.') + Mark Felder + snmp&sourcefire.com +14224 + Software Machine + Ozair Garcia Campos Jr. + ozairjr&yahoo.com +14225 + SPL / ACT Wireless + Yvon Belec + yvon.belec&advantechamt.com +14226 + Vizafone + Matthew Dalby + matthewdalby&yahoo.com +14227 + Tampere Polytechnic + Jarmo Sorvari + jarmo.sorvari&tpu.fi +14228 + Flanders Electric + Mike Engler + ldap-oid&flanderselectric.com +14229 + University of Arkansas-Fayetteville + Elon Turner + uarktech&uark.edu +14230 + Engine GmbH & Co. KG + Thomas Warnick + tw&engine.de +14231 + The Shmoo Group + Tina Bird + tbird&shmoo.com +14232 + Exelon Corporation + Christopher Olmsted + christopher.Olmsted&exeloncorp.com +14233 + Sonag Company + James Urbas + jimu&sonag.com +14234 + Isotomic Technologies + Jay Haskins + jhaskins&isotomic.com +14235 + SpinCircuit Inc. + Jagan Bhamidipati + jagan_bhamidipati&spincircuit.com +14236 + Banco de la Republica + Samuel Gutierrez + sgutieti&banrep.gov.co +14237 + Sarawak Information Systems Sdn. Bhd. + Shahmat Dahlan + shahmatd&sains.com.my +14238 + Dilithium Networks + David Jack + david.jack&dilithiumnetworks.com +14239 + SPRITEInc. + Masashi Soda + i ana&sprite.ad.jp +14240 + Top Global Technology Ltd + Achilles Newman + liuxc&chinatopglobal.com +14241 + Mentorgen, LLC + Ken Bradley + ken.bradley&mentorgen.com +14242 + AOSA Telekom GmbH & Co. + Elisabeth Jaeger + elisabeth.jaeger&aosa.at +14243 + RTS Real Time Computersoftware Ges.mbH. + Peter Goetzl + gop&rts.co.at +14244 + Bjerkeset Consulting + Svein Bjerkeset + sob&jerkeset.com +14245 + The Caudium Group + Xavier Beaudouin + kiwi&caudium.net +14246 + Natural Convergence + Yves Gilles + yves.gilles&naturalcomvergence.com +14247 + signaflex Inc. + Pascal Poudrier + pascal_poudrier&signaflex.qc.ca +14248 + Gemini Security Solutions, Inc. + Peter Hesse + jobs&geminisecurity.com +14249 + Centre Hospitalier Henri Laborit + Philippe Marasse + philippe.marasse&ch-poitiers.fr +14250 + Conseil General de la Vienne + Philippe Marasse + p-marasse&cg86.fr +14251 + Stellar Craft Inc. + Hideo Morishita + manmos&stellar.co.jp +14252 + Prime Solutions Ltd. + Ari Vaha-Erkkila + ari.vaha-erkkila&primesolutions.fi +14253 + GridXpert + Olivier Petit + oliver.petit&gridxpert.com +14254 + enhansiv + Farzin Mohammadi + farzin.mohammadi&enhansiv.com +14255 + brvenik.com + Jason Brvenik + jason&brvenik.com +14256 + Arlys S. A. + Phillipe Guillaubez + phillipe.guillaubez&arlys.com +14257 + Link Margin, Inc. + Andy Warner + andyw&linkmargin.com +14258 + Northern Trust Company + Stephen Bonine + spb&ntrs.com +14259 + Groupe ECE Paris + Christophe Kern + kern&ece.fr +14260 + Linux Systemberatung + Eric Weiss + iana-oid&eric-weiss.de +14261 + Bank Leumi LeIsrael Ltd. + Shmuel Mishali + shmuelm&bll.co.il +14262 + Dipl. Ing. Felix Beer + Felix Beer + f.beer&indec.at +14263 + Sven Meinhardt EDV-Service + Sven Meinhardt + admin&meinhardt-edv-service.de +14264 + OutPost Sentinel + Christopher Fowler + cfowler&outpostsentinel.com +14265 + itouch + Andrew Thomson + andrew.thomson&itouch.com.au +14266 + Bulletproff Networks + John Ferlito + johnf&bulletproof.it +14267 + WrjTec + RuiJun Wu + wrj5&sohu.com +14268 + Projekt Avalon + Michael Weisbach + iana&tuts.nu +14269 + Beijing Vanlink Communication Co. P.R.China Yu + Yu Wei + yuwei&vl.com.cn +14270 + Technology Hackworks, Inc. + Randy Nott + jecky1&pacbell.net +14271 + Iliad + Cedric Lecourt + hostmaster&iliad.fr +14272 + AWO Kreisverband Essen e.V. + Werner Opriel + werner.opriel&awo-essen.de +14273 + IFEN + W. Werner + w.werner&ifen.com +14274 + BDILukasz Tylski + Lukasz Tylski + lukasz.tylski&bdi.net.pl +14275 + Deutsche Bundeswehr + Michael Lensch + michaellensch&bwb.org +14276 + Osystem AB + Ola Sigurdson + hostmaster&osys.se +14277 + tylerfam.net + Craig Tyler + ctyler&tylerfam.net +14278 + DINX GmbH + Stephan Schwab + schwab&dinx.net +14279 + NetHere Internet Services, Inc. + Henry Chan + htchan&nethere.net +14280 + All Optical Networks, Inc. + David Mc Coll + dbmccoll&alloptical.net +14281 + RCAT.net + Michael Weeatherlight + rcat&woofcat.com +14282 + Vertrian + Ed Frigo + marketing_vertrian&yahoo.com.br +14283 + Webraska + Antoine Martin + amartin&webraska.com +14284 + Technische Universitat Chemnitz + Guenther Fischer + fischer&hrz.tu-chemnitz.de +14285 + Cegetel SI + Denis Loitard + smpsbo&cegetel.fr +14286 + nVISIA + Stuart Schrader + sschrader&nvisia.com +14287 + Aegis:Net + Andreas Brenk + ab&aegisnet.biz +14288 + Lucterra Informatica Ltda + Luciano Terra + luciano&lucterra.com.br +14289 + A3 Design + Mars Hall + mars&A3online.com +14290 + Infocorp Computer Solutions, Inc. + Jessie Chen + jessie&infocorpnow.com +14291 + Stefan Lefnaer + Stefan Lefnaer + stefan&lefnaer.com +14292 + The Document Academy + Lars Nooden + lars.nooden&hum.uit.no +14293 + Aditel nv + Evgeniy Smoliar + sem&aditel.be +14294 + Collax GmbH + Falk Kraemer + falk.kraemer&collax.com +14295 + DOP Systems Ltd. + Diego Oliveria + diego&audisoft.com.br +14296 + Confmon Corp + Erik Dahl + edahl&mindspring.com +14297 + One.Tel.UK + Anthony Dervish + anthony.dervish&onetel.co.uk +14298 + Control Products, Inc. + Gary Olson + golson&controlproductsinc.com +14299 + SiteSuite Australasia + David Baxter + david&sitesuite.com.au +14300 + Cyber Switching Inc. + Ron Silorio + rons&cyberswitching.com +14301 + Pro Medicus Ltd. + Zane Francis + smi&promed.com.au +14302 + papiNet.org + Tom Meniga + tom.meniga&papinet.org +14303 + Nextgen, Inc. + Akio Nonaka + tech&nextgen.co.jp +14304 + ScenPro Inc. + Luke Harms + lharms&scenpro.com +14305 + Kora Institute of Science and Technology Information (KISTI) + MinYeol Lim + mylim&hpcnet.ne.kr +14306 + FOCI Fiber Optic Communications, Inc. + Chiang Feng-Shih + chiang&mailsrv.foci.com.tw +14307 + Kawamura Electric Inc. + Takuo Ito + ta-itou&kawamura.co.jp +14308 + REP + Fabrice Cetre + fcetre&repgroup.net +14309 + Technische Universitaet Clausthal, Rechenzentrum + Ralf Kalwa + kalwa&rz.tu-clausthal.de +14310 + Adjoin Solutions, Inc. + Vladimir Umansky + vumansky&adjoin.com +14311 + Technion Communications Corporation + Prashant Cherukuri + Prashant&Technion.com +14312 + Skypath Networks, Inc. + James Tavares + jtavares&skypath.com +14313 + Proceda Tecnologia e Informatica SA + Evandro Dantas Camargo + evandroc&proceda.com.br +14314 + Imaginet + Bret Otzenberger + botzenberger&imaginet.com +14315 + UBServices + J. Unterberger + j.unterberger&ubservices.de +14316 + Shanghai Huateng Software System Co., Ltd. + Xu Feng + xu_feng&huateng.com +14317 + Ratwater + Jim Maynard + ratwater&aol.com +14318 + Alstom T&D P&D + Luc Hossenlopp + luc.hossenlopp&tde.alstom.com +14319 + DDD Design GmbH + Clau Zimmerman + hostmaster&dd.de +14320 + Credit Lyonnais DSTI/DSAT/PLANET + Marco Helloco + marc.helloco&creditlyonnais.fr +14321 + Techelp Ky + Mikko Kortelainen + mikko.kortelainen&techelp.fi +14322 + Arnold Design & Informatica Ltda. + Christiano Levi Arnold + cristiano.arnold&bol.com.br +14323 + Dennis IT + Trevor Dennis + tdennis&trevordennis.com +14324 + The Written Word, Inc. + Albert Chin-A-Young + iana&thewrittenword.com +14325 + PuceBaboon + Miyoko Little + gaijin&pobox.com +14326 + Sirius Satellite Radio + Alan Pate + apate&sirusradio.com +14327 + VA Linux Systems Japan K.K. + Junjiro Okajima + jro&valinux.co.jp +14328 + JAG Productions, Inc. + Alan Gates + agates&eddinc.net +14329 + TimeCertain, LLC + Jeff Powers + jeff&timecertain.com +14330 + Shanghai Ewaytek Co., Ltd. + Liewen + liewen&ewaytek.com +14331 + Beijing Topsec Network Security Technology Co., Ltd. + Wang Xue + wx&ccert.com +14332 + DeadSquid Communications + Kevin Needham + kneedham&deadsquid.com +14333 + Phonologies (India) Pvt. Ltd. + Prashant Lamba + raj&phonologies.com +14334 + Milton Keynes Council + Paul Sansom + paul.sansom&milton-keynes.gov.uk +14335 + Econnect + Jiri Reisching + jiri.reischig&ecn.cz +14336 + The Chesapeake Computer Group, Incorporated + Bruce Wahl + bruce&ccgi.com +14337 + Superintendencia de Servicios de Salud + Aureliano Chavarria + aureliano&sssalud.gov.ar +14338 + ImpSat S.A.-Colombia + Ivan Otero + iotero&impsat.net.co +14339 + Hioptel + Mark Duan + misoso&963net +14340 + LinuxTek, Inc. + Sharif Alexandre + virtualgypsy&yahoo.com +14341 + Ministry of Pacific Island Affairs + Mike Mylonas + mike.mylonas&minpac.govt.nz +14342 + Netway Networks Pty. Ltd. + Marty Richards + marty&netwaynetworks.com.au +14343 + Osprey Network Technologies, Inc. + David Morton + oidmaster&osprey.net +14344 + Electronic Commerce Technologies + Steven Cockerill + infor&e-teq.com +14345 + ETG + Stacy Minkin + stacy&eutechgroup.com +14346 + Erich Jaeger GmbH + Mark Carolan + mark.carolan&jaeger-toennies.com +14347 + RealNet Kft. + Lorant Kurthy + root&real-net.sk +14348 + Swist Group Technologies (Pty.) Ltd. + Max Clements + clementsm&swistgroup.com +14349 + Dyband Corporation + Walter Capitani + wcapitani&dyband.com +14350 + Storbit + John Reynolds + reynoldsj&hotmail.com +14351 + Applied Engineering, Inc. + Scott Moorhouse + smoorhouse&ae-solutions.com +14352 + Penn State University + Eric Reischer + emr150&psu.edu +14353 + Alessandro Bertela + Alessandro Bertela + ale.78&tin.it +14354 + Menlo Park Presbyterian Church + Aaron Mapes + amapes&mppc.org +14355 + Chapman University + John Stauffacher + stauffacher&chapman.edu +14356 + Bank of Bermuda + Robert Rodger + robert.rodger&bankofbermuda.com +14357 + WORLDPAC + John Woodell + woodie&worldpac.com +14358 + SnapGear Pty Ltd + Tom Essebier + tom&snapgear.com +14359 + Abacus + Jae Shin + newguy&iabacus.co.kr +14360 + Routrek Networks, Inc. + Shinichi Matsushita + matsushita&routrek.co.jp +14361 + Foresearch + Woo-jin Yang + uyang&popeye.snu.ac.kr +14362 + Telekom Slovenije d.d. + Srecko Mandelj + srecko.mandelj&telekom.si +14363 + Blue Oak Wireless (Pty) Ltd. + Max Clements + max&simonsberg.com +14364 + CLG + Alexandre Pachot + alexandre.pachot.clg.fr +14365 + IgH Essen GmbH + Torsten Finke + fi&igh-essen.com +14366 + Energy Research Foundation + D. Voetelink + voetelink&ecn.nl +14367 + Ironic Design, Inc. + Michael Dorman + mdorman&ironicdesign.com +14368 + PUBLIC.AT Internet + Christian Schmied + christian&schmied.cc +14369 + TSI + Bob McClure + bmcclure&tsiconnections.com +14370 + GeoTrust Inc. + Kefeng Chen + kefengc&geotrust.com +14371 + IOTAC + Jed Liu + jed-zz4572&uma.litech.org +14372 + Universidad Simon Bolivar + Patrick O'Callaghan + poc&usb.ve +14373 + MystroTV + IT Director + it&mystrotv.com +14374 + International Software Solutions + John Uhler + john.uhler&issnet.com +14375 + Wireless Services Corp. + Frederick Wamsley + 3dlvjj001&sneakemail.com +14376 + Applied Signal Technology, Inc. + James Israni + izzy&appsig.com +14377 + CORE SECURITY TECHNOLOGIES + Javier Kohen + jkohen&corest.com +14378 + lmjn.com + Michael Sheldon + msheldon&lmjn.com +14379 + VarioSecure Networks, Inc. + Tim Burress + tim&variosecure.net +14380 + KT ICOM + Jae Kyoung Ryu + yousee&kticom.com +14381 + TeleCIS, Inc. + Dooseok Kim + dskim&telecis.com +14382 + Superlong IT Ltd. + Alasdair Thomson + alasdair.thomson&gzunk.com +14383 + Openexpertise Pty. Ltd. + Chris Forkin + chris&openexpertise.com.au +14384 + Romsym Data + Razvan Mititeanu + razvanm&romsym.ro +14385 + RTE Software + Olivier Matrot + olivier.matrot&rte.fr +14386 + soLNet, s.r.o + Ales Horak + info&solnet.cz +14387 + Absolute Systems (PTY) Ltd + Raimund Hook + root&absolutesys.com +14388 + Toll Collect + V. Siftar + ahmet.kaskir-hoepfel&ncr.com +14389 + Spider Internet Technologies, Inc. + Roger Hardwicke + rogerch&spiderintertech.com +14390 + QRS Healthcare Solutions + Kent Rankin + krankin&qrsparadigm.com +14391 + Ace Suares + Ace Suares + iana-oid&suares.com +14392 + SI-Solucoes de Informatica + Claudio Lamberti + claudio&si.psi.br +14393 + PRODASAL Companhia de Processamento de Dados de Salvador + Henrique Wendell Sales de Abreu + henrique&salvador.ba.gov.br +14394 + New Mass Media, Inc. + Troy Lemieux + itdept&newmassmedia.com +14395 + University of Oregon + Noreen Hogan + noreenh&oregon.uoregon.edu +14396 + c4net + Byung-Mu Lee + bmlee&c4net.co.kr +14397 + ICSS + Vivid Qian + vivid&icss.com.cn +14398 + Behavioral Informatics Inc. + Emilio Recio + emrecio&netscape.net +14399 + NEC Platforms, Ltd. + Shinsuke Shiota + shiotas-pb&nec.com +14400 + VentureTechs Corp. + Miles Zhou + miles&venturetechs.com +14401 + Joint Stock Company Volgatelecom, Penza region + Michael Fomin + fmikle&penza.net +14402 + University of Geneva + Dominique Petitpierre + dominique.petitpierre&adm.unige.ch +14403 + Gymnasium Hermann-Boese-Str. + Roland Bauerschmidt + roland&hbg-bremen.de +14404 + Spediant Systems Ltd. + Einat Shlengel + einat&orckit.com +14405 + Charon Systems Inc. + Mike Shareski + mike.shareski&charon.com +14406 + United Land Services, Inc. + Michael Lee + mikelee&optonline.net +14407 + Xidus.Net + Jeremy Weatherford + xidus&xidus.net +14408 + nDosa Technologies, Inc. + Jihong Kim + jkim&ndosatech.com +14409 + DCANet + Phil Banh + iana-contact&dca.net +14410 + TSSX + Li Tao + litao&tssx.com.cn +14411 + Beijing QXCOMM Technology CO., Ltd. + Zhao Quan Wang + zqw&qxcomm.com +14412 + European Transaction Bank AG + Michael Grom + michael.grom&etb-ag.com +14413 + Comfone AG + Stefan Renfer + stefan.renfer&comfone.com +14414 + Regio Ltd. + Jaak Laineste + jkl®io.ee +14415 + e-mobilizer + Hakan Sjogren + hasjg&wmdata.com +14416 + ntl + Andrew Buckley + andrew.buckley&ntl.com +14417 + Continentale Krankenversicherung A.G. + Manfred Dengjel + manfred.dengjel&continentale.de +14418 + Centro de Tecnologia da Informação "Luiz de Queiroz" + Fabio Rogero + frogero&usp.br +14419 + EMIGRATUS Communications Inc. + Sid Siddiqui + sid&emigratus.com +14420 + ltinetworks.com + Bradley White + brad<inetworks.com +14421 + Mocana Corporation + James Blaisdell + james&mocana.com +14422 + Tazmen Technologies + Chris Waters + chris&waters.co.nz +14423 + Rondosoft, Inc. + Sergey Armishev + sergey&rondosoft.com +14424 + humatix + Hugo Burm + hugob&humatix.nl +14425 + Telkonet Inc. + Eva Wang + ewang&telkonet.com +14426 + Enterprise Innovations Corporation + Jonathan Griggs + jfgriggs&enterpriseinnovations.com +14427 + NACT Telecommunications Inc. + Brian Minert + bminert&nact.com +14428 + Welstand + Krzysztof Wychowalek + info&welstand.six.pl +14429 + McLuckie and Associates + Jacob Rush + jacobr&mcluckieco.com +14430 + PoliVec Inc. + Bill Birnbaum + bbirnbaum&polivec.com +14431 + Universitas Kristen Krida Wacana + Irwan Budiman + irwanhadi&bpkpenabur.or.id +14432 + Reserved + Removed 2013-08-09 + ---none--- +14433 + China Financial Certification Authority + Sun Shengnan + sunshengnan&cfca.com.cn +14434 + Network Systems and Technologies + Ravindranath + raviav&yahoo.com +14435 + Vrije Universiteit Amsterdam + Hans Leidekker + hans&it.vu.nl +14436 + Quiconnect.com + Lawrence Baltz + larry.baltz&quiconnect.com +14437 + baltz.org + Larry Baltz + larry&baltz.org +14438 + PolarLake + Warren Buckley + warren.buckley&polarlake.com +14439 + Hit Internet Technologies S.p.A + Roberto Taccon + roberto.taccon&hit.it +14440 + Longship Scout Group + Chris Searle + postmaster&longship.org +14441 + Miller Zell + Andrew Boring + andrew.boring&millerzell.com +14442 + XYBASE (Malaysia) Sdn Bhd + Shahada Abubakar + shahada&xybase.com +14443 + July Systems + Ramesh Varma + ramesh&julysystems.com +14444 + Redback IT Pty Ltd + Rodney Chan + rbcc&redback.com.au +14445 + Elecs Industry Co., Ltd. + Shinji Yokoi + yokoi&elecs.co.jp +14446 + INDEO GmbH + Marian Sopko + marian.sopko&indeo.at +14447 + Spektra Group Ltd + Allan MacLean + allan.maclean&spektra.co.uk +14448 + Iatrogenix.com + Ray Deiniger + ray&iatrogenix.com +14449 + HitNet e.V. + Thomas Deselaes + thomasd&hitnet.rwth-aachen.de +14450 + Twicom + Kevin Twidle + kevin&twicom.com +14451 + Industrial Computing Ltd + William Dean + will-snmp&indcomp.co.uk +14452 + Everyone.net + Network Operations + netops&everyone.net +14453 + ISIS Networks + Ryan Hoegg + rhoegg&isisnetworks.net +14454 + Tel-Aviv University + Koby Schwartz + schwartz&aristo.tau.ac.il +14455 + IntercontinentalExchange + Greg Hamamgian + ghamamgian&intcx.com +14456 + Lemuria Asesores Informaticos C.A. + Ivan Aponte + iaponte&cantv.net +14457 + Lorex Industries, Inc. + Mu Wu + wum&lorex.com +14458 + ILIENT + Israel Lifshitz + israel&ilient.com +14459 + Hennessy Digital + John Hennessy + john&hennessy.net.au +14460 + B Digital + John Hennessy + john.hennessy&b-online.com.au +14461 + Keyware Solutions Inc. + Takayuki Sonobe + c-editor&keyware.co.jp +14462 + Psgue Corporation + Lyno Sullivan + lls02m&psgue.com +14463 + Tour2000 co., LTD + Ahn Jun-Mo + webmaster&tour2000.co.kr +14464 + ValueCommerce + Nathan Ollerenshaw + iana&valuecommerce.ne.jp +14465 + NOC Grove + Stefan Immel + si&grove.de +14466 + Heinrich-Heine-Universitaet Duesseldorf + Klaus Szymanski + szymanski&uni-duesseldorf.de +14467 + STIME + Georges Fointiat + gfointiat&mousquetaires.com +14468 + Marconi Integrated Systems + Dave Crooks + dave.crook&marconi.com +14469 + Cyclone Computer Consultants Ltd + Stuart Slade + stuartslade&hotmail.com +14470 + Plahl Karl Consult + Joachim Plahl + joachim&plahl-karl.de +14471 + Wasabi Systems + William Studenmund + wrstuden&wasabisystems.com +14472 + VIA NET.WORKS Services AG + Markus Wild + mw&viaservices.ch +14473 + Queen's University + Andrew Hooper + hooper&post.queensu.ca +14474 + NextAxiom Technology, Inc. + Dave Clifford + dave_clifford&nextaxiom.com +14475 + Ralph DeHart + Ralph DeHart + rdehart&speakeasy.net +14476 + Network Executive Software, Inc. + Dave Reiland + dave.reiland&netex.com +14477 + RELIX + Markus Heller + heller&relix.de +14478 + University of Melbourne + Shane Hjorth + shane.hjorth&unimelb.edu.au +14479 + Korea National Defence University + Kwang Young Kim + kykim12&kndu.ac.kr +14480 + norisbank AG + Torsten Greiner + torsten.greiner&norisbank.de +14481 + KOBIL Systems GmbH + Markus Tak + tak&kobil.de +14482 + System 5 + Andrea Albarelli + andrea&system5.it +14483 + UNIC Internet Consulting + Rudolf Meyer + rudolf.meyer&unic.ch +14484 + Digital V6 Corp. + Jimmy Lee + jimmy.lee&digitalv6.com +14485 + Sanmina-SCI Inc + Amy Arnts + amy.arnts&sanmina-sci.com +14486 + Marlboro College + Jared Benedict + jared&marlboro.edu +14487 + Erkkila Consulting + Paul Erkkila + pee&erkkila.org +14488 + Darkmist + Ed Schaller + schallee&darkmist.net +14489 + mcgrew.net Inc. + Kelly McGrew + kelly&mcgrew.net +14490 + Ariadne Internet Services, Inc. + Dale Worley + worley&ariadne.com +14491 + Standard School District + Jeff Davis + jdavis&standard.k12.ca.us +14492 + Dialogue Technology Corporation + Jimmy Leu + jimmy&mail.dialogue.com.tw +14493 + Aurigo Software Technologies + Anupam Singh + anupam&aurgio.com +14494 + ASPedi GmbH + Kay Melchinger + kme&aspedi.com +14495 + Hans Lie + Hans Lie + hans.lie&start.no +14496 + SwedishAmerican Health System Corporation + Roger Cook + inetadmin&swedishamerican.org +14497 + Gist Communications, Inc. + Kristofer Spinka + kspinka&gist.com +14498 + Style Networks, Inc. + Kristofer Spinka + kspinka&style.net +14499 + e-Bank + Michael LeWinter + michael.lewinter&ebancllc.com +14500 + Warsaw University + ICM Networking Group + info&net.icm.edu.pl +14501 + Blue Coat Systems + Mihir Lala + mihir.lala&bluecoat.com +14502 + Sam Asher Computing Services, Inc. + Tim Oertel + t_oertel&samasher.com +14503 + Asylum Visual Effects + Kelly Bergougnoux + kelly&asylumfx.com +14504 + Packet Signal Corporation + Doug Bolling + ddjbolling&charter.net +14505 + Spiral Designs Inc. + Dan Pringle + digitals&netone.com +14506 + Ministerul Apararii Nationale + Ion Ciobanu + svsi&mapn.ro +14507 + Russian American Glass Company - RASKO Ltd + Dmitry Glushenok + glush&rasko.ru +14508 + Teknovus + Manoj Punamia + manoj.punamia&teknovus.com +14509 + Mercateo AG + Jurgen Rudolph + iana&mercateo.de +14510 + Astra Datentechnik GmbH + Georg Stenzel + annette.schoene&astra-gmbh.de +14511 + London Metropolitan University + Paul Walk + p.walk&londonmet.ac.uk +14512 + Aspivia (Pty) Ltd. + Jaun Terblanche + jterblanche&aspivia.com +14513 + Webflex + Robert Wielinga + rwielinga&bigfoot.com +14514 + DAVE s.r.l. + Stefano Dal Poz + stefano.dalpoz&dave.eu +14515 + GenoGRID + Michel Mac Wing + mmacwing&irisa.fr +14516 + Sanoma Budapest Publishing Ltd. + Zsolt Magyar + zs.magyar&sanomabp.hu +14517 + Tactel AB + Pontus Fuchs + pontus.fuchs&tactel.se +14518 + Pihana Pacific, Inc. + Michael Warne + mike.warne&pihana.com +14519 + Washington University + Dan Zweifel + pen&aismail.wustl.edu +14520 + Wolfram Research Inc. + Ken Miller + netadmin&wolfram.com +14521 + H K Moore Limited + William Moore + wmoore&hkmoore.com +14522 + GVI Medical Devices + William Greathouse + william.greathouse&gvimd.com +14523 + hermit.org + Ian Smith + clint&hermit.org +14524 + Pharos Systems + Paul Reddy + paul&pharos.com +14525 + Trapeze Networks, Inc + Lol Grant + lol&trpz.com +14526 + Safenet Informatica Ltda + Fernando Albuquerque de Araujo Filho + safenet&bol.com.br +14527 + iWiring + Dan Shoop + shoop&iwing.net +14528 + Newlands College + John Topp + jtopp&newlands.school.nz +14529 + FHPWireless + Peter Sugiarto + peter.sugiarto&fhpwireless.com +14530 + Robert Baptista + Robert Baptista + robert&thebabtistas.com +14531 + Blackwell Consulting Services + John O'Leary + john.oleary&bcsinc.com +14532 + The Asylum + Sean Hanson + admin&theasylum.org +14533 + Apoteket AB + Bjorn Sjoholm + bear&europoint.se +14534 + SCHIFFKO GmbH + Axel Pruin + axel.pruin&schiffko.com +14535 + Europoint Networking + Jan Wunsche + jan&europoint.se +14536 + Inceritus + Dan Karlsson + dan.karlsson&inceritus.com +14537 + TMP Consultoria de Informatica S/C Ltda + Sergio de Souza Prallon + iana&tmp.com.br +14538 + Beijing SinceTimes Communication Co., Ltd. + Eric Zhao + gzhao&sincetimes.com.cn +14539 + realprogrammers.com + Paul Makepeace + cid&realprogrammers.com +14540 + Leszek Pisarek + Leszek Pisarek + leszek&man.poznan.pl +14541 + DeanO + Dean Rochester + rockie12&dtnspeed.net +14542 + Verizon ESG + Alan Watt + alan.watt&verizon.com +14543 + N30D + Ivan Aguilar + aguilar&n30d.com +14544 + Shanghai Posts&Telecommunications Equipment CO., Ltd. + Rison Han + jishuzx&shpte.com +14545 + ICT, Academy of Science, CHINA + Li Zonghai + lzh&ncic.ac.cn +14546 + NTO "IRE-POLUS" + D.E Kondrashin + dkondrashin&ntoire-polus.ru +14547 + Neue Zurcher Zeitung + Giovanni Pelliccia + g.pelliccia&nzz.ch +14548 + Autovalley + Dominique Garnier + dominique.garnier&autovalley.fr +14549 + Awanim + JP Speyart van Woerden + europe&awanim.com +14550 + Metaware S.p.A + C. Calisti + c.calisti&metaware.it +14551 + GSI mbH + Joerg-Uwe Diener + gsi&gsi-berlin.de +14552 + Rison Craft + Rison Han + risonhan&sina.com +14553 + DaimlerChrysler Corp. + Simon H. Wu + suw&daimlerchrysler.com +14554 + VU MIF + Vilius Alisauskas + vi&uosis.mif.vu.lt +14555 + CMGCC + Brent Calder + calder&uthscsa.edu +14556 + Graven Software + S. Edward Kelly + sekelly&gravensoftware.com +14557 + Infowave Software Inc. + Alex Cruise + acruise&infowave.com +14558 + SunGard BSR, Inc. + Jonathan Carpenter + jonathan.carpenter&sungardbsr.com +14559 + Mondru AB + Jens Jakobsen + info&mondru.com +14560 + TSCNet, Inc. + Mike Jackson + mhjack&tscnet.com +14561 + Vocent Solutions + Ian Forde + sysadmin&vocent.com +14562 + e-Project Solutions Pte Ltd + Tan Hau Jye + haujye&epsoln.com +14563 + National Information and Communication Technology Australia + Kim Holburn + kim.holburn&anu.edu.au +14564 + elbers.com + Henk Elbers + henk&elbers.com +14565 + Z-Force, Inc. + Peter Nickolov + peter&z-force.com +14566 + BMF + Heike Fehlau + heike.fehlau&bmf.bund.de +14567 + Coventry University Enterprises Ltd + Dave Boyton + d.boyton&cad.coventry.ac.uk +14568 + Weiser + Michael Weiser + michael&weiser.saale-net.de +14569 + Heidelberger Druckmaschinen AG + Dieter Meergraf + dieter.meergraf&heidelberg.com +14570 + Majentis Technologies Inc. + Gerald Brandt + gbr&majentis.com +14571 + Atomz Corporation + Ben Smith + support&atomz.com +14572 + EDIWISE + Allen Pyburn + allen.pyburn&ediwise.com +14573 + Abest Research Corp. + Eric Wong + ewong&abestrch.com +14574 + Cellicium SA + Patrick Remy + support&cellicium.com +14575 + Attention Software, Inc. + Kelly Oglesby + koglesby&attentionsoftware.com +14576 + Total Network + Hugo Martinez + hmartinez&cti.com.ar +14577 + ForestExpress, LLC + John Goetzman + jgoetzman&forestexpress.com +14578 + Geac Library Solutions + Ben Fersenheim + ben.fersenheim&geac.com +14579 + Q Linux Solutions, Inc. + Michael J. Maravillo + mike.maravillo&q-linux.com +14580 + The Manticore Group + Devin Watson + dmwatson&comcast.net +14581 + ManaSoft + Jeff Barnes + jeff.barnes&mnsft.com +14582 + LUZ Engenharia Financeira + Ailton Andrade de Oliveria + ailton&luz-ef.com +14583 + TruTeq Wireless + Deon van der Merwe / Tiann van Aardt + ianasnmp&truteq.co.za +14584 + Celestix Networks + KokMeng Loh + kokmeng&celestix.com +14585 + Compass Internet + Ronald C. Rivera + ronald&compass.com.ph +14586 + Tmax soft + Hyunik Na + hina&tmax.co.kr +14587 + Technological Centre "Tushino" CB RF + Leonid Toker + tlb&cbr.ru +14588 + Neosymmetria + Aurimas Valionis + av&neosymmetria.com +14589 + Sicap Ltd + Louis-Pierre Gagnaux + louis-pierre.gagnaux&sicap.com +14590 + Passave Inc. + Ariel Maislos + ariel.maislos&passave.com +14591 + ncc network consulting GmbH + Melhem El-Achkar + mel&ncc-consulting.de +14592 + iOrange - Internet Software and Service GmbH + Bernhard Lukassen + bernhard.lukassen&iorange.net +14593 + Dyode + David Jones + djjones419&aol.com +14594 + Rudolf Bahr + Rudolf Bahr + quasi&web.de +14595 + BillReilly + William Reilly + william.j.reilly&verizon.net +14596 + Martin Wismans GmbH + Benedikt Wismans + benedikt.wismans&wisnams.com +14597 + DolphinSearch, Inc. + Kelly Roestel + kelly&dolphinsearch.com +14598 + USDS + Kay Schaefer + kay.schaefer&web.de +14599 + Corman + Tome Serge + serge.tome&corman.be +14600 + CryptAll Limited + Dermot Dunnion + ddunnion&cryptall.com +14601 + Baazee.com + Sureshbabu Nair + suresh&baazee.com +14602 + Interval Media + Jean-Marie White + jmwhite&interval.com +14603 + Cyberport.de GmbH + Silvio Krah + skrah&cyberport.de +14604 + Commvault Systems + Muath Ali + mali&commvault.com +14605 + Wesleyan University + Steve Machuga + smachuga&wesleyan.edu +14606 + Hewlett-Ward Inc. + Stephen Ward + stephen.ward&hewlett-ward.com +14607 + Futuro Technologies Inc + Jose Valverde + jose&futurotech.com +14608 + Transfinity Corp + Robert Dempsey + bdempsey&transfinity.com +14609 + Todito.com S.A. de C.V. + Elias Ortiz Lopez + elias&iscor.com.mx +14610 + Solutions First + David Kempe + david&solutionsfirst.com.au +14611 + Web-Alm GmbH + Wolfgang Ocker + weo&web-alm.net +14612 + Linköping University + Kent Engström + komm&unit.liu.se +14613 + Ecora + Debbie Glidden + debbie.glidden&ecora.com +14614 + AvantCom Corporation + Larry Simmons + lsimmons&avantcom.com +14615 + Vivato Inc. + Larry Simmons + larry_simmons&vivato.net +14616 + University of Arkansas at Little Rock + Rogers Davis + redavis1&ualr.edu +14617 + Diseños Electronicos + Juan C Galvez + jcgalvez&telesal.net +14618 + Mennen Medical + Nurit Weiss + nurit&mmi.co.il +14619 + Quadrox Inc. + Luc Vancraen + luc&quadrox.be +14620 + Saint-Gobain + Marc Pietrala + marc.pietrala&saint-gobain.com +14621 + Wendy's International + Becky Bramble + becky_bramble&wendys.com +14622 + VTLS Inc + Fred A. Miller + millert&vtls.com +14623 + Riverstyx Internet + Alan Evetts + alan&riverstyx.net +14624 + vergerus + Erik Lickerman + elickerman&ameritech.net +14625 + Servecomm + Janus Hao + janush&pacbell.net +14626 + Reuters + Deyan Bektchiev + deyan.bektchiev&reuters.com +14627 + Idt Corp + Serge Aleynikov + serge&hq.idt.net +14628 + ResEl + Benoit Peccatte + benoit.peccatte&enst-bretagne.fr +14629 + SourceTech AB + Bjarne Ingelsson + bjarne&sourcetech.se +14630 + Binary Systems Inc + Gerry Dubois + gerry&binary-systems.com +14631 + American Institute of Physics + Eileen Coles + ecoles&aip.org +14632 + Via Internaathional + Rodrigo Zarth + rodrigo&via-intl.com.br +14633 + Nuberry Ltd + David S Roberts + dsroberts&nuberry.co.uk +14634 + Itool Systems + Bruno Medici + bmedici&atomis.com +14635 + daviesco + Derek Davies + ddavies&ddavies.net +14636 + Ontain Corporation + Dave Edelstein + dave.edelstein&ontain.com +14637 + Solectron + Trevor MacHattie + TrevorMacHattie&solectron.com +14638 + Concurrent Reality Pty Ltd + Andrew Kent + andrew&reality.com.au +14639 + HXTP + John Havard + jhavard&hxtp.com +14640 + Wurley Solutions + Deon George + dizzy&wurley.net +14641 + Raymond James Financial + Don Richmond + drichmond&it.rjf.com +14642 + 104.com + Nobuhiro Shitoh + ns&104.com +14643 + Future Beacon, Inc. + Jim Trek + admin&futurebeacon.com +14644 + HiFlyCom Corp. + Lei Xiang Dong + lmd&hiflycom.com.cn +14645 + W-ibeda High Tech. Develop. Co. Ltd. + Yang Aihe + amyyang&w-ibeda.com +14646 + buptNKL + Zhang Lun Yong + folke&sohu.com +14647 + Rossberry Consulting + Jim Wildman + jim&rossberry.com +14648 + SurfNShop E-Commerce Inc. + Denis Gilbert + denis.gilbert&surfnshop.com +14649 + holleyism.com + Adam Holley + snmp&holleyism.com +14650 + Lead Up Software + David Horton + david_horton&acslink.net.au +14651 + T-Systems International GmbH SL NWS + Volker Görg + V.Goerg&t-online.de +14652 + University of St. Thomas + Peter Downs + padowns&stthomas.edu +14653 + Metaways Infosystems GmbH + Hermann Thaele + h.thaele&metaways.de +14654 + MUZO, a.s. + Tomas Fencl + tfencl&muzo.com +14655 + Gatespace + Björn Johansson + bjorn.johansson&gatespace.com +14656 + 4IT S.A. + Krzysztof Woznica + kw&b3.4it.pl +14657 + Universita' degli Studi di Parma + Fausto Pagani + postmaster&unipr.it +14658 + Dieter Kluenter System Beratung + Dieter Kluenter + dkluenter&gmx.de +14659 + Vividlink Detlef Ingendorf + Detlef Ingendorf + inge&vividlink.de +14660 + Fachhochschule Wiesbaden + Markus Schmid + schmid&informatik.fh-wiesbaden.de +14661 + KAV-EMB + Franz Hoheiser-Pförtner + franz.hoheiser-pfoertner&wienkav.at +14662 + Bid-Owl Bildung im Dialog Ostwestfalen-Lippe + Harald Selke + hase&upb.de +14663 + IXIF Limited + Mark Gillett + info&ixif.com +14664 + Init Systems + Brian Jonnes + brian&init.co.za +14665 + Comlab Telecommunications inc + John Ahern + jahern&comlab.com +14666 + McLeod Lake Indian Band + Keith Mastin + kmastin&mcleodlake.com +14667 + NextiraOne LLC + Jose Rodriguez + jose.rodriguez&ctilab.com +14668 + ViaSat, Inc., Acceleration Research and Technology (formerly 'Intelligent Compression Technologies') + David Lerner + dlerner&viasat.com +14669 + Systemhaus x-Team + Adalbert Michelic + am+iana&x-team.at +14670 + MTX Networks Inc. + Shawn Clark + sc&mtxnet.com +14671 + GenFour Communications, LLC + Kurt Wise + kurt.wise&blueshifttelecom.com +14672 + Datamaxx Applied Technologies Inc + Tuomo Stauffer + Tuomo.Stauffer&datamaxx.com +14673 + Prefeitura Municipal de Alegrete + Júlio César de Carvalho Lopes + jccl&via-rs.net +14674 + InfoQuality Consultoria em Informática + Júlio César de Carvalho Lopes + jccl&via-rs.net +14675 + Centell Kingdom Technologies Corporation Shanghai Branch + Qianjun + lanziqian&hotmail.com +14676 + Obsidian Systems + Elardus Viljoen + elardus&obsidianlabs.com +14677 + geekhosting + F Meijer + frank&geekhosting.nl +14678 + BFW Informationssysteme GmbH + Markus Felten + Markus.Felten&bfw-informationssysteme.de +14679 + Hansard International Ltd + Kevin Luff + kevin.luff&hansard.com +14680 + Bright Grey + Jim McCann + JMCCANN&SCOTTISHLIFE.CO.UK +14681 + Tixoronet Vertriebsgesellschaft mbH + Karsten Beckmann + snmp&tixoro.net +14682 + Asopos de Vliet + Remco Nabuurs + rnabuurs&liacs.nl +14683 + Centell Kingdom Technologies Corporation Shanghai + Li Ying + liying¢ellkingdom.com +14684 + System Management Network + Thomas Wahl + ThomasWahl&system-management.dk +14685 + DataPower Technology, Inc. + Eugene Kuznetsov + eugene&datapower.com +14686 + Megasoft Limited + Srilalitha GowriSankaran + srilalitha.gowrisankaran&megasoft.com +14687 + Halion Systems + John Brinnand + john_brinnand&yahoo.com +14688 + Systems Experience + Jason J. Penn + penn&netcom.com +14689 + Urbancode Software Development, Inc. + Maciej Zawadzki + mbz&urbancode.com +14690 + Greymoose + Gary Bailey + gary&greymoose.co.uk +14691 + Fingerlos + Gerald Fingerlos + gerald&fingerlos.at +14692 + Integrators AG + Patrick Spruyt + ps&integrators.ch +14693 + CISL Systems Ltd. + Juan Borrás + juan.borras&cislgroup.com +14694 + bmd wireless AG + Marc Woog + marc.woog&bmdwireless.com +14695 + Herrmannsdoerfer Software Entwicklung + Harald Herrmannsdörfer + harry&herrmannsdoerfer.de +14696 + Absolut OK + Nenad Radosavljevic + nenadr&absolutok.com +14697 + RRDtool + Tobias Oetiker + tobi+rrdtool&oetiker.ch +14698 + CSRI "Elektropribor" + Ivan Stepanov + elprib&online.ru +14699 + Hagen Software Inc. + Colin Keith + snmpadmin&hagenhosting.com +14700 + frontiertech + Shen cheng + shencheng&frontiertech.info +14701 + Persist Technologies + Gagan Bhatia + gbhatia&persistcorp.com +14702 + Neusoft Digital Medical System CO., LTD. + Huaizhi Wang + wanghz&neusoft.com +14703 + MIYAKAWA ELECTRIC WORKS LTD. + Masayoshi Soga + m.soga&msk.co.jp +14704 + PingoS e.V. + Andreas Kalbitz + felix&schulnetz.org +14705 + Province of British Columbia + Colin Kopp + ipadmin&gov.bc.ca +14706 + sFlow.org + Peter Phaal + peter.phaal&sflow.org +14707 + Tatara Systems, Inc. + Christine Ross + chris&tatarasystems.com +14708 + Graceland University + James S. Jones + jsjones&graceland.edu +14709 + Reactivity, Inc. + Jared Smith-Mickelson + jaredsm&alum.mit.edu +14710 + Taua Biomatica + Eduardo Rosemberg + eduardorosemberg&taua-biomatica.com.br +14711 + Vaka Technology Limited + Owen Wong + owen.wong&vaka.com +14712 + Sascha Growe + Sascha Growe + growe&webflash.dyndns.org +14713 + Blacksburg Electronic Village + Richard Phipps + hostmaster&bev.net +14714 + Questra Corporation + Scott Mcdermott + oidadmin&questra.com +14715 + ManyStreams Inc. + Jyoti Soni + jyoti&noida.manystreams.com +14716 + British Columbia Institute of Technology + Bruce Link + bruce_link&bcit.ca +14717 + sunHosting company + Alex Lam Kam Chor + alexlam&msolutions.com.hk +14718 + BKit Gruppen AB + Stefan Bystam + stefan.bystam&bkit.se +14719 + Harmony Gold Operations + Murray Bryant + murray.bryant&harmonygold.com.au +14720 + HD-info + Zoran Hrvoic + zoran&hdinfo.hr +14721 + Techem Energy Services GmbH + Sebastian Fingerloos + sebastian.fingerloos&techem.de +14722 + Peter Shillan + Peter Shillan + objectwiz&lycos.co.uk +14723 + canal systems GmbH + Mauro Canal + mcanal&canalsystems.net +14724 + Business Objects + Marcel Hassenforder + marcel.hassenforder&businessobjects.com +14725 + SaM Solutions + Alexey Yantchuk + a.yantchuk&sam-solutions.net +14726 + Server Side S.A + Alejandro Guerrieri + aguerrieri&digimedia.com.ar +14727 + Henrik Edlund + Henrik Edlund + henrik&edlund.org +14728 + Pronet sp. j. + Dariusz Mizerny + dmizerny&pronet.pl +14729 + Innerwall + Steve Carlander + scarlander&innerwall.com +14730 + Telecom Italia Lab + Galliano Sergio + Sergio.Galliano&tilab.com +14731 + Christian Kuelker + Christian Kuelker + ckuelker&rpm.uni-bielefeld.de +14732 + Nodots Development, Inc. + Ken Riley + kenr&nodots.com +14733 + YASKAWA INFORMATION SYSTEMS Corporation + WATANABE Daisuke + watanabe&ysknet.co.jp +14734 + FLOW Communications + Michael S Cox + mcox&flow.com.au +14735 + ESOO + Victor V Ismakaev + vic&mail.esoo.ru +14736 + ghip systems GmbH + Astrid Hanssen + astrid.hanssen&ghipsystems.com +14737 + Global Grid Forum + Thomas Brown + tbrown&gridforum.org +14738 + Henderson & Co + Philip Henderson + philhenderson&onetel.com +14739 + rleague.com + Chris Riediger + chris&rleague.com +14740 + TechAngle Inc. + David Hahn + dhahn&techangle.com +14741 + Catalyst International, Inc. + Toby Meehan + tmeehan&mke.catalystwms.com +14742 + Digital Multitools Inc. + Peter D. Gray + peter&dmtz.com +14743 + Wachovia Bank + Steven H. Roberts + Steven.Roberts&wachovia.com +14744 + TeleNova Research and Development Centre + Eduardo D'Avila + edavila&telenova.net +14745 + Pumpkin Networks + Eunsoo Park + espark&pumpkinnet.com +14746 + Interstorm, Inc. + Michael D. Warner + michael.warner&interstorm.com +14747 + Vision Internet Services + Andrew Veitch + ajv-iana&visn.co.uk +14748 + Computer Support Systems Pty. Ltd. + Michael Leggett + mleggett&csspl.com.au +14749 + University of Rochester + Sean Singh + sean.singh&rochester.edu +14750 + Aware Servers, Inc. + Amit Shah + shahamitj&hotmail.com +14751 + SkyWare Communications Limited + Nelson Lee + nelson&sky-ware.com +14752 + Proware Technology Corp. + Tony Chang + tony&proware.com.tw +14753 + ppyworld + Patrick Pyfferoen + patrick&ppyworld.com +14754 + Koch Mikrosysteme AG + Sebastiaan Krist + skr&kms.ch +14755 + LYNX Technik AG + Henning Schmidt + Henning.Schmidt&lynx-technik.com +14756 + ProgTec GmbH + Markus Ribi + mribi&progtec.ch +14757 + Groiss Informatics GmbH + Michael Dobrovnik + michi&groiss.com +14758 + BPS Co. Ltd. + Hersonsky Mihail + hmike&smeda.ru +14759 + Adesium Réseaux et Services + D'Aversa Pascal + pascal.d_aversa&adesium.com +14760 + Wincor Nixdorf International GmbH + Rolf Müller + rolf.mueller&wincor-nixdorf.com +14761 + Ubisecure Solutions + Petteri Stenius + petteri.stenius&ubisecure.com +14762 + intarsys consulting GmbH + Dr. Bernd Wild + bernd.wild&intarsys.de +14763 + Video-Net Systems + Gaetan Gilliot + gaetan.gilliot&video-net.co.za +14764 + MisrNet, S.A.E. + Ihab Hussein + ihussein&misrnet.com.eg +14765 + Grafix Profesional S.R.L. + Ciprian Asmarandei + ciprian.asmarandei&grafix.ro +14766 + AVICONSULT + Eric PARTHUISOT + eric.parthuisot&aviconsult.fr +14767 + BWXT-PANTEX + Edward Claxton + eclaxton&pantex.com +14768 + AGMarine, Inc. + Devon Liles + devonl&agmarine.com +14769 + Sybari Software, Inc. + Joseph Barilla + jbarilla&sybari.com +14770 + Teleport Consulting and Systemmangement + Rainer Rudigier + rainer&tele.net +14771 + Computer Sciences Corporation + Bill Annocki + wannocki&csc.com +14772 + Neptune Project + Michael Kenney + mikek&apl.washington.edu +14773 + Engitech Ltd. + Andrea Bacchetta + andrea.bacchetta&engitech.ie +14774 + GNU Internet + James Golovich + james&gnuinter.net +14775 + Peak Internet + Peak Hostmaster + hostmaster&peak.org +14776 + Mystic Coders + Andrew Lombardi + andrew&mysticcoders.com +14777 + izenpe s.a. + Iñigo Barreira + inigo_barreira&hotmail.com +14778 + GEV - Ecole des Mines + Laurent Menu-Kerforn + laurent.menu-kerforn&emn.fr +14779 + e2 Technology Inc. + Yen-Shuo Su + MichaelSu&e2Tech.com +14780 + KAEVEE + Venkatesh. K + venkatesh&kaevee.com +14781 + ThoughtPort Authority of Chicacgo + Matthew Sayler + sayler&speedsite.com +14782 + Cole Innovations, Inc. + Frank R. Cole, Jr. + frank&coleinnovations.com +14783 + Asociacion para la Promocion del Arte a traves de Internet + Manuel Mollar + mollar&mobelt.com +14784 + AltaTrust + Herve Doreau + herve.doreau&alta-trust.com +14785 + Five4321 + James Ruthven + james&fusionit.com +14786 + Open Network Solutions Ltd + Les Smithson + lsmithso&hare.demon.co.uk +14787 + ZYM Linux + Zhao yongming + condor&webmaster.com.cn +14788 + William D. Petitt + Bill Petitt + wdpetitt&yahoo.com +14789 + Principal Financial Group + Craig Treptow + treptow.craig&principal.com +14790 + Statsbiblioteket + Jens Henrik Leonhard Jensen + jhlj&statsbiblioteket.dk +14791 + Nakua Technologies + Luis Rodriguez Berzosa + lrodriguez&nakua.com +14792 + Data Conseil + Michael Monserrat + mmonserrat&dataconseils.fr +14793 + System-Entwicklung Dietrich Schulten + Dietrich Schulten + info&system-entwicklung.de +14794 + Banco Herrero + David Soler + dsoler&lacaixa.es +14795 + Intellocity + Thomas Lemmons + tom&intellocity.com +14796 + AUCOS elektronische Geraete GmbH + Mr. Pelstring + franz-josef.pelstring&aucos.de +14797 + DynaStar Communications + Ed Page + epage&dynastarcom.com +14798 + NETdelivery Corporation + Andrew Diederich + andrew&netdelivery.com +14799 + StarNET Services + Marc-Andre' Husyk + MA.Husyk&starnet-services.biz +14800 + Legra Systems + Michael R. Cook + cook&legra.com +14801 + tangro software components gmbh + Hans-Ulrich Teufel + hut&tangro.de +14802 + Inder.Net + Mads Peter Bach + iana-contact&inder.net +14803 + Schubert Informationssysteme + Thomas Schubert + thomas&thomas-schubert.de +14804 + Innotrac Diagnostics Oy + Tom Javen + tom.javen&innotrac.fi +14805 + AFP7 + Klaus Pedersen + 9830&e.iha.dk +14806 + Tadiran Electronic Systems + Amir Berkovich + amirb&tadsys.com +14807 + FrIc-Net + Frank Ickstadt + frank.ickstadt&fricnet.de +14808 + The Sage Colleges + John Chao + chaoj&sage.edu +14809 + Datasul MED S.A. + Ricardo Alexandre de Oliveira + dcc2jsd&udesc.br +14810 + Advanced Digital Broadcast Ltd. + Jaroslaw Mirkowski + J.Mirkowski&adbglobal.com +14811 + Xilith LLC + Robin Berjon + robin.berjon&expway.fr +14812 + Cedars-Sinai Health Systems + Spencer L. SooHoo + spencer.soohoo&cshs.org +14813 + Fujitsu Transaction Solutions Inc. + Gerry Rice + grice&ftxs.fujitsu.com +14814 + CaroTechnology BV + Jeroen Wortelboer + info&carotechnology.com +14815 + Datasul S.A. + Ricardo Alexandre de Oliveira + ricardo_oliveira&datasul.com.br +14816 + Thomas Polnik + Thomas Polnik + polnik&web.de +14817 + 4RF Communications Ltd. + Richard Laing + richard.laing&4rf.com +14818 + Voice Mobility, Inc. + Cliff McCollum + snmp&voicemobility.com +14819 + University of Cyprus + Marios Dikaiakos + grid&ucy.ac.cy +14820 + net outremer caledonie + Alain Cocconi + cocconi&net-outremer.nc +14821 + Stibo Complete A/S + Palle Jacobsen + paja&stibo.com +14822 + FaJo.de + Falk John + falk.john&fajo.de +14823 + Aruba, a Hewlett Packard Enterprise company + Aruba External Engineering Registrations + aruba-ext-eng-reg&hpe.com +14824 + Centaur Technology Inc + Eric Anderson + anderson¢tech.com +14825 + Aldata Solutions S.A. + Robert Bakic + rbakic&gold-solutions.com +14826 + Adage Networks + Jaswant Pujari + jpujari&adagenetworks.com +14827 + Reserved + RFC-pti-pen-registration-10 + ---none--- +14828 + Sonorys Technology GmbH + Günther Niedoba + Guenther.Niedoba&sonorys.at +14829 + Centra Software + Vikas Sinha + vsinha¢ra.com +14830 + UPtime Systemlösungen + David Höhn + dh&uptime.at +14831 + voetter.at + Armin Voetter + armin&voetter.at +14832 + ESO Consortium + Jeff Case + case&snmp.com +14833 + Portal Solutions Technology, Inc. + Ken Geng + kgeng&1088.net +14834 + Konsec GmbH (formerly 'mediales GmbH') + Andreas Mack + registry&konsec.com +14835 + INFOTEC + Sergio Martinez + serch&infotec.com.mx +14836 + VARTA Microbattery GmbH + Thomas Huell + VARTA-certificate&varta-microbattery.com +14837 + Trenchant Consulting, LLC + Kevin Wanner + wanner&trenchantconsulting.com +14838 + National Management & Organization SA + Mr. Kostas Papageorgiou + kpapagðnokarta.nbg.gr +14839 + Newtech-BT Ltd + Georgi Manev + office&newtech-bt.bg +14840 + Smartwave SA + alban meunier + ameunier&smartwavesa.com +14841 + LEA (Laboratoire Europeen ADSL) + Regis Urvoy + regis.urvoy&leacom.fr +14842 + LANergy + David Webster + davidw&lanergy.com +14843 + Satnet SARL + Alain Cocconi + cocconi&satnet.nc +14844 + Upfront Systems + Roché Compaan + roche&upfrontsystems.co.za +14845 + Engineering Ingegneria Informatica S.p.A. + Sergio Raspaolo + sysman&eng.it +14846 + Padtec Optical Components and Systems + Daniel Araujo + daniel&padtec.com.br +14847 + Order of Preachers + Michael J O'Rourke + mike.orourke&op.org +14848 + better networks + Lutz Schulze + lschulze&betternetworks.de +14849 + Corporate Computer Services, Inc. + Gary Roediger + roediger&fnal.gov +14850 + Arc Solutions Limited + Muhammad Usman + umuhammad&arcsolutions.com +14851 + Storage Networking Industry Association + Arnold Jones + td&snia.org +14852 + Kinpo Electronics, Inc. + WU WU-HSIAN + stn&kinpo.com.tw +14853 + CipherQuest Ltd. + Michael Garceau + mgarceau&cipherquest.com +14854 + Beijing Hisense Digi_Tech Co.,Ltd. + JiangBin + jiangbin&hisencyber.com +14855 + CERAMISTA Recom Serviex + Marco Antonio Reginatto + reginato&tnsys.com.br +14856 + Trinitel Corporation + Eric Anderson + eric&trinitel.com +14857 + XCAT Co.,Ltd. + Takehiko Senoo + tseno&xcat.co.jp +14858 + Azundris Consulting + Tatjana Azundris von Nürnberg + ldap&azundris.com +14859 + Lawo AG + Stefan Mutschler + stefan.mutschler&lawo.de +14860 + GWS mbH + Ludger Fuehner + xgwsfue&gws-muenster.de +14861 + Britannia Building Society + Jack Dempsey + jack.dempsey&britannia.co.uk +14862 + Ministerio de Administraciones Públicas + Isabel Fábregas Reigosa + isabel.fabregas&map.es +14863 + ATMAVA Ltd + Joseph Lee + info&atmava.org +14864 + TNCE + Jason Tsai + jason_tsai&tnce.com.tw +14865 + Interplex Telecom + Dominic Blais + db&interplex.ca +14866 + Cottonwood Technology Group, Inc. + Marian Cantu + cantu&ctgi.com +14867 + Halcyon Software Limited + Richard Harriss + richard.harriss&halcyonsoftware.com +14868 + MAV INFORMATIKA Ltd. + Róbert Kisteleki + kistelekir&mavinformatika.hu +14869 + Saudi Telecom + Majed S. Alsubaie + mssubaie&stc.com.sa +14870 + AVAYA + Vasanth Ignaci + vignaci&avaya.com +14871 + Inter-Land.Net + Frederic Rouyre + rfr&inter-land.net +14872 + AVK-AdherSIS Inc. + Jean-Philippe Dionne + jp.dionne&adhersis.ca +14873 + Broadband services + Larry Benson + lbenson&bsi-maps.com +14874 + Rockstorm Technology AB + Jens Nilsson + jens&rockstorm.se +14875 + Leadglobe + John Wagner + jsmwagner&yahoo.com +14876 + Open Text Corporation + Jason McPeak + jmcpeak&opentext.com +14877 + Plexus Technology Ltd. + Alun Mills + alun&plexus-technology.com +14878 + vcrDev + Vasco Rocha + ei97028&fe.up.pt +14879 + woko + Frank Sykes + frank&woko.net +14880 + Systemhaus Kalkhoff + Christian Kalkhoff + info&kalkhoff.net +14881 + QunaTech + Paul Wilson + elviscious&rmci.net +14882 + smartBridges + Nimesh Parikh + Nimesh&smartbridges.com +14883 + antek networks INC. + Howard Chang + howard&antek.com.tw +14884 + Exelmon Technologies + Makis Gaitanidis + makis&exelmon.com +14885 + JSC RPE "Polygon" + Andrey Lyubimcev + alyubimcev&plgn.ru +14886 + AIR Co., Ltd. + Soumu Team + admin-g&air.co.jp +14887 + Nomura Research Institute, Ltd. + Yuzo Ishida + y-ishida&nri.co.jp +14888 + Das Werk + Marcus Herbert + m.herbert&das-werk.de +14889 + openforum.us + George Scott + glscott&openforum.us +14890 + Team-Konzept Informationstechnologien GmbH & Co KG + Peter Ritter + domadm&team-konzept.de +14891 + Lawseal + Gordon Brewster + gordonbrewster&lawscot.org.uk +14892 + Netsynt S.p.A. + Gennaro Brosco + a.borioni&netsynt.it +14893 + NOXA + Bjoern Swierczek + bsw&noxa.de +14894 + Teatica + Felipe Camargo Machado + felipecm&zipmail.com.br +14895 + Propel Software Corporation + Steve Halliburton + shallib&propel.com +14896 + Attingo + Wichert Akkerman + iana&attingo.nl +14897 + York Consulting + Brent York + brent&york-consulting.com +14898 + Access Communications + Scott Wunsch + scott.wunsch&accesscomm.ca +14899 + Town of Suffield + Rob Hepner + rhepner&suffieldtownhall.com +14900 + Integrated Concepts International + Salina Fung + salina&icil.net +14901 + Vodafone Libertel NV + Edwin vd Heuvel + edwin.van.den.heuvel&vodafone.nl +14902 + DATAmark + Xavier Trochu + xavier&datamark.fr +14903 + Digirose Technology Co., Ltd. + Wenshan Liou + wsliou&digirose.com +14904 + TwTec + Tim Woermann + tw&outerworld.de +14905 + DEVAU Lemppenau GmbH + Prof. Dr. Lemppenau + wle&bocholt.fh-ge.de +14906 + NVISION, INC. + Bonnie A. Galvin + BonnieGalvin&nvision1.com +14907 + NOC-CCE-USP + Isabel Chagas + mit&usp.br +14908 + Upcast Inc + Chip Vanek + chip&upcast.com +14909 + Igenda Software + James Shipley + jshipley&igendasoft.com +14910 + IP Solutions, Inc. + Timothy Carter + tim&ipsolutionsinc.com +14911 + RFP Depot, LLC. + John Koyle + jkoyle&rfpdepot.com +14912 + Smiths Aerospace + Gerald Van Baren + gerald.vanbaren&smiths-aerospace.com +14913 + Retriever Communications + Graham Carey + Graham.Carey&retriever.com.au +14914 + Vexus Consulting Group + Mike Glover + mike&vexus.ca +14915 + ASPire Technologies(Shenzhen) Ltd + Zhang Jin + zhangjin&aspire-tech.com +14916 + Utah Interactive, Inc. + Lin Salisbury + lin&utahinteractive.org +14917 + SimpleTech + Glenn Jystad + gjystad&simpletech.com +14918 + Algar Telecom Leste + Guilherme Freitas + guilherme.freitas&atl.com.br +14919 + Singlestep Technologies + Joshua McClintock + noc&singlestep.com +14920 + Mineco + Mikel Nelson + iana&mikelnelson.net +14921 + CTW Computer + Christian Ullrich + chris+ctw&chrullrich.de +14922 + Realnode Ltd + Mattias Nordstrom + nordstrom&realnode.com +14923 + Abilene Christian University + Hab Adkins + oid_info&acu.edu +14924 + The NewSof Group, Inc. + Mark A. Garcia + dnstech&newsof.com +14925 + Wavecentral, L.L.C. + Mark A. Garcia + mag&wavecentral.com +14926 + atraia.com + R. L. Menefee + randy&scheets.com +14927 + DISC, Universidad Católica del Norte + Eric Ross + eross&disc.ucn.cl +14928 + Unipulse + Carlos Takahashi + t-takahashi&unipulse.co.jp +14929 + Onscom Co., Ltd. + Kim Tae Woo + webmaster&onscom.co.kr +14930 + Solar Telecom. Technology Inc. + Chung Hyun Jang + chjang&solartt.co.kr +14931 + Victoria University of Technology + Director of IT + helpdesk&vu.edu.au +14932 + GuangZhou TopTeam Software Technology Co., Ltd. + Jimmy Leung + jimmy&gztt.net +14933 + Chung Yuan Christian University + eritta lin + eritta&seed.net.tw +14934 + Vodafone IT (Turkey) + Murat Balkas + o2.it&vodafone.com +14935 + FS Walker Hughes Limited + Norm Almond + norm.almond&fswalkerhughes.com +14936 + Eurotux Informática, SA + Ricardo Oliveira + info&eurotux.com +14937 + Itech + j.champliaud + j.champliaud&itech.fr +14938 + Exacom, Inc. + William Haskett + whaskett&exacom.com +14939 + StyleeB LLC + Austin Conger + styleegirl&yahoo.com +14940 + BlueCross and BlueShield of North Carolina + Stacey Moore + Stacey.Moore&BCBSNC.COM +14941 + Inflection Systems + Elizabeth Dougherty + edougherty&inflectionsystems.com +14942 + Avvio Networks + Leo Goyette + lgoyette&avvionetworks.com +14943 + INNOVATION Data Processing + Thomas J Meehan + tmeehan&fdrinnovation.com +14944 + Primal Technologies + Ravindra Conway + rconway&primaltech.com +14945 + JBX Designs Inc + John Boudreaux + johnb&jbx.com +14946 + Gothenburg University + Jonas Öberg + jonas&informatik.gu.se +14947 + Kyrgyz-Russian Slavic University + Sergey Ushakov + sergey&verify.kg +14948 + Hungarian Foreign Trade Bank + Mr. Andras Hargitai + hargitai.andras&mkb.hu +14949 + North Building Technologies Ltd. + Trevor Marson + tmarson&northbt.com +14950 + Servicios Digitales de Certificacion, S.L. + Alberto Marco Montoya + mollar&mobelt.com +14951 + Efacec Energia, Máquinas e Equipamentos Eléctricos, S.A. + Ana Aleixo + ana.aleixo&efacec.com +14952 + Eyeheight Ltd. + Simon Pegg + simon&eyeheight.com +14953 + Serio Ltd + Pete Williams + petew&seriosoft.com +14954 + Cingular Wireless + Judy Espejo + judy.espejo&cingular.com +14955 + CANAM + OLIVIER DELAVEAU + oid&canam.fr +14956 + NetGate SL + Pablo Raíz + wombatpal&hotmail.com +14957 + Nissan North America + Fred Goure + fred.goure&nissan-usa.com +14958 + PSC technology GmbH + Kay Schäfer + kay.schaefer&web.de +14959 + CBR Yazýlým danýþmanlýk A.Þ + Özden Turan + ertugrul&cbr.com.tr +14960 + Undernet + Perry Lorier + isomer&undernet.org +14961 + Millennium Technology Limited + Andrew N Dowden + iana-oid&millentech.com +14962 + Ingosstrakh Ltd. + Vitaly A. Sergienko + vitalyas&ingos.ru +14963 + Telemetry & Communications Systems, Inc. + David Bendrihem + davidb&tcs.la +14964 + EFERIS + Francois Romieu + romieu.francois&eferis.com +14965 + Wideinfo Corporation + Zhenghui Zhou + zhouzhenghui&163.net +14966 + Linos Photonics GmbH & Co KG + Carsten Schaub + Schaub&Linos.DE +14967 + horsfall.org + Dave Horsfall + dave&horsfall.org +14968 + WYNIWYG + Xiao Hui LOO + xh.loo&wyniwyg.com +14969 + Columbia Ultimate + Jason Hurst + jasonh&colubs.com +14970 + CAM Internet + Jean-Simon Durand + noc&cam.org +14971 + dlh services + Donald Huebschman + dhuebsch&mac.com +14972 + Jeb + Jean-Edouard BABIN + Jeb&jeb.com.fr +14973 + Deeming.net + Jon Deeming + jon&deeming.net +14974 + Data Telecom OÜ + Marko Veelma + marko.veelma&data.ee +14975 + BRy Tecnologia S.A. + Carlos Francisco Tatara + tatara&bry.com.br +14976 + softmillennium + Mark Braksator + mbraksator&softmillennium.com +14977 + Wuerth Elektronik + Oliver Windt + oliver.windt&we-online.de +14978 + Commerzbank AG + Rainer Strobel + rainer.strobel&commerzbank.com +14979 + Core Tec Communications, LLC + John Anderson + john&gocoretec.com +14980 + Guidant Corporation + Mark Pramann + mark.pramann&guidant.com +14981 + Cogent Logic Corporation + Jeff Lawson + jeff&cogentlogic.com +14982 + iKu Systemhaus AG + Kurt Huwig + iana&huwig.de +14983 + Woaf Tech Ltd + David Flynn + davidf&woaf.net +14984 + PoCo + Bill Powell + snmp&poco.co.uk +14985 + Hsi Hu Serion High School + KoChen Hu + rex&mail.hhsh.chc.edu.tw +14986 + Nextreaming Corporation + Tomy Jung + tomy&nextreaming.com +14987 + E3Networks,LTD + Naofumi Tamura + tamura&e3net.co.jp +14988 + MikroTik + John Tully + tully&mikrotik.com +14989 + Cinesite + Technical Services + support&cinesite.com +14990 + inm magic + Danny Thuering + danny&inm-magic.de +14991 + RESA Airport Data Systems + Mikael Hallkvist + Mikael.Hallkvist&resa.fr +14992 + Stagira + Philippe Hermoso + philippe&stagira.net +14993 + IGD + Dragana Likavec + dragana&igd.fhg.de +14994 + MITEQ, Inc. + Jeff Gorman + jgorman&miteq.com +14995 + Netinary + Gérard R. MICHEL + gerard.michel&netinary.com +14996 + Stille Design + Sevo Stille + Sevo_Stille&gmx.de +14997 + SOHOware, Inc. + Gale H. Moyer + gmoyer&sohoware.com +14998 + Transportation Security Administration + James Shipley + james.shipley&tsa.dot.gov +14999 + Codesic + Steve Ingersoll + steve.ingersoll&codesic.com +15000 + Kontron Canada Inc + Benoit Robert + Benoit.Robert&ca.kontron.com +15001 + Seward Designs, Inc. + Christopher A. Seward Sr. + c.seward.sr&SewardDesigns.com +15002 + Intentional Software Corporation + Derek Simkowiak + dereks&intentsoft.com +15003 + IPonWEB Ltd + Boris Mouzykantskii + boris&iponweb.net +15004 + RuggedCom Inc. + Vesna Anusic + VesnaAnusic&ruggedcom.com +15005 + Fiba Software srl + Alexandru Taracila + alextara&netconsulting.ro +15006 + P.W.P.T. Wasko sp. z o.o. + Piotr Rybok + p.rybok&wasko.pl +15007 + Metro Ethernet Forum + Dan Romascanu + dromasca&avaya.com +15008 + Tilaria Inc. + Jim Tilley + jtilley&tilaria.com +15009 + Gplicity + Glenn Puchtel + gpuchtel&gplicity.com +15010 + LogiTEL Ltd + Mr. Andrew Henderson + andrew.henderson&logitel.co.uk +15011 + Revivio Incorporated + Greg Panarello + gregp&revivio.com +15012 + Bernard Madoff Investment Securities + Richard Caputo + rcaputo&madoff.com +15013 + Quantec, LLC + Michael Luevane + mikel&quantecllc.com +15014 + 8004 Limited + Architect Justus O. Okah-Avae + okahavae8004&yahoo.com +15015 + Integrated Modular Systems, Inc. + John Mazur + johnmazur&integratedmodular.com +15016 + Nettiportti OY + Matti Aarnio + matti.aarnio+oid-registry&nettiportti.fi +15017 + Johns Hopkins Singapore + Matthew Seow + matthew&jhs.com.sg +15018 + DayDay Software LTD. + Chen TiMeng + iright&163.net +15019 + Bazy i Systemy Bankowe Sp. z o.o. + Wojciech Kosmowski + koswoj&pro.bsb.com.pl +15020 + Productos Profesionales de Telecomunicacion + Daniel Fraile Vergel + daniel&protelsa.net +15021 + mobidot + J.M. Hietbrink + info&mobidot.com +15022 + Golden Gate University + Sanjeev Mohan + smohan&ggu.edu +15023 + tremium + Udo Bremer + udo.bremer&tremium.de +15024 + Tougas.NET + Damien Tougas + damien&tougas.net +15025 + Studentersamfundet i Trondhjem + Bjørn Ove Grøtan + bog&samfundet.no +15026 + centre hospitalier de roanne + Mr Pillet + christophe.pillet&ch-roanne.fr +15027 + SecCommerce Technologies AG + Tilo Kienitz + tk-iana&seccommerce.de +15028 + James Collings + Jim Collings + jcllings&tsunamicomm.net +15029 + Jareva Technologies, Inc + Peter Henriksen + phenriksen&jareva.com +15030 + Guangzhou RCOM Communication Co., Ltd + xiaoli + mpnm&mail.maipu.com +15031 + University of Melbourne, Computer Science/Grid Computing + Steve Melnikoff + stevexm&cs.mu.oz.au +15032 + INQGEN Technology Co., Ltd. + Charlie Feng + charlie.feng&inqgen.com +15033 + Realtime Embedded AB + Mats Loman + mats.loman&realtimeembedded.se +15034 + KSolutions S.p.A. + Gianugo Rabellino + networking&ksolutions.it +15035 + Partner Voxtream + Jens Fischer + jkf&voxtream.com +15036 + Logical + Niels Bech Nielsen + nbn&logical.dk +15037 + Shands Healthcare and Teaching Clinics, Inc. + Gary Bennett + gary.bennett&shands.ufl.edu +15038 + Charles Stark Draper Laboratory, Inc + Patrick M. Sharkey + psharkey&draper.com +15039 + Armedia + Jim Nasr + jnasr&armedia.com +15040 + Global Aeon Pty.Ltd. + Basil C.P. Borun + bborun&globalaeon.com +15041 + Poznan University of Technology + Tomasz Kokowski + Tomasz.Kokowski&put.poznan.pl +15042 + vsecure + Lior Ben Naon + lior&v-secure.com +15043 + Jeng-Ye Tech. Enterprise Co.,LTD. + Ren-Han Tsou + rhtsou&jytec.com +15044 + Transatel + Durand + romain.durand&transatel.com +15045 + MoneyAM Ltd + Jonathan Tullett + jonathan&moneyam.com +15046 + ClickFox, LLC + Ami Feinstein + ami&clickfox.com +15047 + Java Software Foundry + md wills + mdw&javasoftwarefoundry.com +15048 + Startek Engineering, Inc. + Kuo Meng Lu + mlkuo&mail.startek-eng.com +15049 + Seattle Central Community College + Brett McCormick + brett&nodetree.net +15050 + Waycom International + Philippe Muller + pm&waycom.net +15051 + Finkle Enterprises + Frank Koenen + fkoenen&vonworld.com +15052 + Gemini Technologies Inc. + Jason Vas Dias + jvasdias&earthlink.net +15053 + MeriTek Systems, Inc. + Shabbir Chowdhury + shabbir&meritek.com +15054 + PASS-K + Gerald L. Gay + glgay&pass.korea.army.mil +15055 + Ikegami Tsushinki Co., Ltd. + Mitsuharu SATOH + mituharu&rd.ikegami.co.jp +15056 + anthonyhan.org + anthony han + anthonyhan&msn.com +15057 + MORAVIAPRESS a.s. + Radek Hnilica + Radek&Hnilica.CZ +15058 + CEISEC + David Rodriguez Fernandez + drodriguez&ceisec.com +15059 + Baptiste Malguy + Baptiste Malguy + baptiste&malguy.net +15060 + Zentic + Francois-Xavier Cormontagne + fxcormontagne&zentic.com +15061 + ServuS A.S + Faik UYGUR + faiku&servus.com.tr +15062 + Million Corporation + Katsuhiko Hino + hino&mln.co.jp +15063 + eko systems inc + Christopher Koelbl + chris&ekosystems.com +15064 + MANY sa + Nicolas Jungers + nj&many.be +15065 + Mantas, Inc. + Mona Mehta + mona_mehta&mantas.com +15066 + James Richardson Enterprises + James Richardson + james.richardson&htcinc.net +15067 + SMTMS + Sebastien MENANT + s.menant&free.fr +15068 + Beijing CAPE Computer Software Engineering Corp. + Yujian + yuj&cape.com.cn +15069 + Elasto Centro + Emiliano Beronich + elastocentro&arnet.com.ar +15070 + YESComm + Gemini Ahn + geminigem&yescomm.com +15071 + Frontier Solution Co., Ltd. + JB. Park + jeibi&frontiers.co.kr +15072 + HSB Bank AB + Lennart Jonsson + lennart.jonsson&bank.hsb.se +15073 + Synad Technologies Ltd. + Mike Moreton + mike.moreton&synad.com +15074 + Thus Plc. + Ashley Burston + ashleyb&demon.net +15075 + Icom + Richard De Falco + rdefalco&icominfo.fr +15076 + Fresenius AG + Hendrik Stahl + ad.support&fresenius-netcare.com +15077 + Thermeon Corporation + Scott Nelson + sbnelson&thermeon.com +15078 + Sysnet Telematica srl + Ezio Dozio + edozio&siweb.it +15079 + Remark! Internet Limited + Mr Reece Dylan + reece&remark.uk.com +15080 + XAIDAT + Thomas Bauer + bauer&xaidat.com +15081 + Screwage, Inc. + Michael Kaufman + walker&screwage.com +15082 + Carey International, Inc. + J. Lau + domains&ecarey.com +15083 + RentPayment.com + Daniel Chester + dan&rentpayment.com +15084 + Mobile Radius USA, Inc. + Derek Jean + djean&mobileradius.com +15085 + AFPA DSIS + Hichem MRABET + hichem.mrabet&afpa.fr +15086 + Omega-Trin Ltd. + Gennadiy Tschegolev + omega&linia.ru +15087 + projekt13 + Gregory J. Goodman + gregory&projekt13.com +15088 + AdvizeX Technologies LLC + Gregory J. Goodman + ggoodman&advizex.com +15089 + Scientific Systems Company, Inc. + Sanjeev Seereeram + Sanjeev.Seereeram&ssci.com +15090 + teamix GmbH + Oliver Kügow + ok&teamix.net +15091 + Boyd Consulting Services, LLC + David Boyd + David.Boyd&insightbb.com +15092 + Perfcap Corpoation + Prem Sinha + prem.sinha&perfcap.com +15093 + Novartis International AG + Hans Kohler + hans.kohler&pharma.novartis.com +15094 + Linetec Gmbh + Dirk Rappl + lyeburn&hotmail.com +15095 + TDC Services A/S + Jack Marquart + jamar&tdc.dk +15096 + Agència Catalana de Certificació + Jordi Masias Muntada + jmasias&oliverascoll.com +15097 + SevenSpace + Jon Greaves + jgreaves&sevenspace.com +15098 + Smithsonian Institution + Michael Press + pressm&si.edu +15099 + Loma Linda University + Larry Bishop + lbishop&univ.llu.edu +15100 + Grupo de Arquitectura y Concurrencia (GAC) + David Sanchez + david&dit.ulpgc.es +15101 + Volt Delta Resources Inc. + Daniel Riscalla + daniel.riscalla&gotocme.com +15102 + US LEC + Eric Kilfoil + sysmaster&uslec.net +15103 + EIVD + Markus Jaton + markus.jaton&eivd.ch +15104 + ModLink Networks + Donald Russell + don&modlinknetworks.com +15105 + 503 Integrated Systems + Phill Hardy + pwth503&gmail.com +15106 + Wificom Technologies Ltd + Veli-Matti Riepula + vmr&wificom.com +15107 + Siraya Inc. + Chih-Peng Yang + cpyang&siraya.com +15108 + Macao Post and Telecommunications Bureau - eSignTrust (formerly 'Macao Post eSignTrust Certification Authority') + Director of CTT + cttgeral&ctt.gov.mo +15109 + Psytechnics + Xiaoyi Gu + support&psytechnics.com +15110 + Savantis Systems, Inc. + Paul Campaniello + paulc&savantis.com +15111 + Dennis Eriksson + Dennis Eriksson + Dennis.Eriksson&Ericsson.Com +15112 + Whack Productions + Bruno Connelly + bruno&whack.org +15113 + MagTek + Jeff Duncan + jeff.duncan&magtek.com +15114 + Stabilizer AB + Jan Carlsson + janne.carlsson&telia.com +15115 + Obvius LLc + Stephen Herzog + herzogs&obvius.com +15116 + Liberty IT Solutions + Wesley Chong + wesley&libertyitsolutions.com +15117 + Protection One Inc. + Ned Fleming + Ned_Fleming&wr.com +15118 + Westar Energy + Ned Fleming + Ned_Fleming&wr.com +15119 + Fidelity National Information Solutions + Jason Raneses + jraneses&fnis.com +15120 + CRCnet Wireless Network + Murray Pearson + crcnet-admin&crc.net.nz +15121 + Mentata Systems + Jon Roberts + jon&mentata.com +15122 + Interface Web Hosting + Federico Contreras + federicoac&softhome.net +15123 + Fachhochschule Oldenburg/Ostfriesland/Wilhelmshaven + Henning Früchtenicht + fruechtenicht&rz.fh-wilhelmshaven.de +15124 + Rodan Systems S.A. + Dariusz Tyszka + darek&rodan.pl +15125 + Vectura + Martyn Smith + martyn.smith&vectura.com +15126 + ---none--- + Andreas Damm + oidadmin&dwl.co.uk +15127 + CUNY/CIS + Arthur Ecock + eckcu&mail.cuny.edu +15128 + Vergata EDV-Beratung + Sergio Vergata + sergio&vergata.de +15129 + E-CARD Ltd. + Valentin Zahariev + curly&e-card.bg +15130 + Gens Software Ltd. + Radik Gens + radikg&genssoft.com +15131 + EDGEACCESS + Haysam Rachid + hrachid&edgeaccess.net +15132 + Six Continents Hotels + Steve Francis + Steve.Francis&6c.com +15133 + Illinois Institute of Technology + Mrinal Virnave + virnave&iit.edu +15134 + Christian Boesch + Christian Boesch + boesch&fhv.at +15135 + omatis + kwame + kwame&omatis.com +15136 + Sukumar Patel + Sukumar Patel + patel301&comcast.net +15137 + ENOVIA + Joe Lannom + Joe_lannom&enovia.com +15138 + Sim Freaks + Mark Castillo + markc&webfreak.com +15139 + LandTime, Inc + Eric Weitzman + eweitzman&acm.org +15140 + YoYoWeb + Thornton Prime + thornton&yoyoweb.com +15141 + Infotropic AB + Patrik Nilsson + patrik&infotropic.com +15142 + comunicando società per azioni + fabrizio trovato + fabrizio.trovato&comunicandospa.net +15143 + Synergy Business Solutions, Inc. + Vivek S. Bagal + vivek.bagal&sbsconnect.net +15144 + Binary Wave Technologies Inc. + Waichi Lo + waichilo&binarywave.ca +15145 + iReasoning Networks + Robin Lin + info&ireasoning.com +15146 + M444 Systems, Inc. + Ulrich Wiedmann + ulrich&m444.com +15147 + Hrvatske Autoceste d.o.o (HAC) + Miroslav Zubcic + mvz&hac.hr +15148 + Witteveen+Bos + J. Boone + J.Boone&witbo.nl +15149 + xantury.com + florian stahl + fstahl&xantury.com +15150 + EXOSEC + Christophe Pouillet + cpouillet&exosec.net +15151 + Spyderworks Ltd + Dr Ramen Sen + r.sen&spyderworks.co.uk +15152 + Scholl Engineering + Eric Scholl + eric_scholl&scholl.ch +15153 + DMSfactory GmbH + Jens Bieler + Jens.Bieler&dmsfactory.com +15154 + ActiVia Networks + Sacha Fosse-Parisis + snmp&ActiVia.net +15155 + IPSQUARE Inc. + Hiroshi Satoh + hsato&ip-square.com +15156 + Accusys + Paul Chu + paulchu&accusys.com.tw +15157 + dL_s + Deng Lor + deng_lor&hotmail.com +15158 + SCA + Enrico Sessler + enrico.sessler&sca.com +15159 + ultraDyne + Kent Swanson + kents&ultradyne.org +15160 + Interactive Northwest, Inc. + Gary Van Gordon + gvg&interactivenw.com +15161 + Idealab + Idealab Hostmaster + hostmaster&idealab.com +15162 + Movilok Interactividad Movil S.L. + Luis del Ser + ldelser&movilok.com +15163 + NetKingCall Co., LTD. + Choi, Young-hwan + choiyh44&hotmail.com +15164 + Open Point Networks + Robert McIntyre + bmcintyre&openpoint.net +15165 + NT-SYSTEMS + Mr. PHAM + tpham&ntsystems.de +15166 + Brian Bunnell + Brian Bunnell + BrianBunnell&hotmail.com +15167 + Chapcom + Ben Brown + ben&chapcom.org +15168 + Vossloh Information Technologies Karlsfeld GmbH + Robert Michelsen + robert.michelsen&vitk.vossloh.com +15169 + IIZUKA Software Technologies + Mike Atkin + mike&iizuka.co.uk +15170 + Crinis Networks + Eric McMurry + emcmurry&crinisnetworks.com +15171 + Landmark Graphics Corporation + Nick White + nwhite&lgc.com +15172 + Supcik.net + Jacques Supcik, PhD + jacques&supcik.net +15173 + Hammerhead Systems + Loanne Cheung + lcheung&hammerheadsystems.com +15174 + Safescrypt Limited + Sharath Jeppu + SJeppu&safescrypt.com +15175 + Lonny Software + Lonny May + lonny&lonny-m.net +15176 + Advanced Info Service PLC. + Supawadee Opasnipat + supawado&ais900.com +15177 + ADMtek Incorporated + Joy Lin + joyl&admtek.com.tw +15178 + Axtion Systems Corporation + Chris Roberts + chris&axtionsystems.net +15179 + Seance Software Inc. + Jingyu Liu + jliu&seancesoft.com +15180 + City of Bloomington, IN + Daniel S. Neumeyer + neumeyed&city.bloomington.in.us +15181 + IneoQuest Technologies Inc. + Christopher Crotty + christopher.crotty&ineoquest.com +15182 + ads-tec GmbH + Armin Bossler + a.bossler&ads-tec.de +15183 + objectlab, llc + chris mollis + cmollis&objectlab.com +15184 + ITXC + Jonathon Brenner + jbrenner&itxc.com +15185 + Phantasia Broadcast Ltd. + Donald Pickett + donaldp&phantasia.net +15186 + SolumSTAR + Wolfgang Dotzauer + dotzauer&siemens.com +15187 + NYS Department of Civil Service + Marty Gardner + marty.gardner&cs.state.ny.us +15188 + Gemfor s.r.o. + Bedrich Svoboda + info&gemfor.cz +15189 + objectlab, llc + chris mollis + cmollis&objectlab.com +15190 + incNETWORKS, Inc. + Ken Roser + ken_roser&sent.com +15191 + Voice Print International, Inc. + Ryan Pfeifle + rpfeifle&vpi-corp.com +15192 + Evergreen Assurance, Inc. + Michael Chiang + mchiang&evergreenassurance.com +15193 + Tacit Networks + Pedrum Mohageri + pedrum&tacitnetworks.com +15194 + Alan Flett + Alan Flett + flett&nodanger.co.uk +15195 + privat + Andreas Rudloff + andreas.rudloff&gmx.de +15196 + Liqwid Networks + Scott Brickner + sbrickner&liqwidnet.com +15197 + HOW CO.,LTD + Koji Mizumura + mizumura&how.jp +15198 + FNET Co., Ltd + Gye Tai, Lee + dolson&f-net.co.kr +15199 + Infologic Nederland B.V. + Theo Viset + tviset&infologic.nl +15200 + Flexibix Inc. + Frank Kuo + frankkuo&attbi.com +15201 + UpSync Software India Private ltd. + Maheshwara.Hampasagar + mhampasagar&upsync.com +15202 + Oe-Consult Software GmbH + Matthias Bauernberger + matthias.bauernberger&oe-consult.at +15203 + TDC Mobile A/S + Kristen Nielsen + krn&tdc.dk +15204 + Mobilethink A/S + Kenth Hvam + domain&mobilethink.dk +15205 + Nordsan Technology Co., Ltd. + Ma Quan + quan.ma&nordsan.com +15206 + Visualpix SA + Michel Stempin + mstempin&visualpix.com +15207 + Modul 1 + Roger Lindholm + roger.lindholm&modul1.se +15208 + Ombre et Pixel + David ROBERT + david&ombrepixel.com +15209 + Precision Therapy International + Jeffery Tillotson + jeffery.tillotson&elekta.com +15210 + Shinkuro Inc. + Jeffrey Kay + jeff&shinkuro.com +15211 + North Country Internet Access + Ryan J. Taylor + rj&ncia.net +15212 + ITAU + Internet Itau + rgdomit&itau-unibanco.com.br +15213 + ITAUSA + Internet Itau + rgdomit&itau-unibanco.com.br +15214 + Global Network Operations Ltd. + Andy Coates + andy&NocOps.com +15215 + Agilejava.com + Wilfred Springer + wilfred&agilejava.com +15216 + White Rabbit + Jeff Goris + jeff.goris&whiterabbit.com.au +15217 + AltaVista + Bret McGinnis + bmcginnis&av.com +15218 + KSC Commercial Internet Co.,Ltd. + Jaruwat Boonmee + srvadmin&ksc.net +15219 + SEVOI Co. Ltd + David Jung + sdjung&sevoi.net +15220 + n.v. QWENTES s.a. + Mathieu Laurent + mathieu.laurent&qwentes.be +15221 + Lorus Inc. + Jim St. Onge + jws&lorusinc.com +15222 + Rastar Digital Marketing + Peter J. Robbins + probbins&rastardm.com +15223 + Andago + Carlos Lozano + clozano&andago.com +15224 + OZTECH + Ernest Rider + ozialien&cox.net +15225 + NightStar Corporation + Jonah H. Harris + jharris&nightstarcorporation.com +15226 + Etronics + Jae-Won Jeong + joj&etronics.co.kr +15227 + Venus Info Tech Inc. + Patrick Jiang + jiangpeng&venustech.com.cn +15228 + Damm Cellular Systems A/S + Dennis Boy + db&damm.dk +15229 + T-Systems Schweiz AG + Stefan Steinemann + stefan.steinemann&t-systems.ch +15230 + SecurIT BV + Michael van Dorp + mvd&securit.biz +15231 + schulz engineering + sven schulz + sven.schulz&netallied.net +15232 + COM.BOX Fotofinder GmbH + Alexander Finger + finger&fotofinder.net +15233 + Regenative Inc. + William Lee + wlee®enative.com +15234 + SoftSwitching Technologies + Roger Hayes + rhayes&softswitch.com +15235 + Xpoint Technologies, Inc. + Howard Helbein + hhelbein&xpoint.com +15236 + R & K Global Business Services, Inc. + Michael Grubb + mgrubb&000domains.com +15237 + Intellinger Software Corporation + Thomas Wintschel + tom&intellinger.com +15238 + Collins Enterprises LLC. + Benjamin F. Collins + bcollins&mail.granderiver.net +15239 + Applico Security, Inc. + Jessie Chen + jchen&applicosecurity.com +15240 + AHB Internet Solutions + Achim Breuer + ahb&ahb.net +15241 + nimel + Meunier + om&neokraft.net +15242 + ToolCASE LLC + Jacob Gore + jacob.gore&toolcase.com +15243 + gav-lv + John Marquart + jomarqua&techie.com +15244 + Essential Systems, Inc. + Dan Gynn + dan.gynn&essensys.com +15245 + INFNIS + seung hak Lee + shlee&infnis.com +15246 + EDV Beratung & Netzwerktechnik Dipl.-Ing. Matthias Kahle + Matthias Kahle + mka&gdnsc.de +15247 + Rocksoft Limited + Ross Williams + tech&rocksoft.com +15248 + Quantel Ltd + David Lane + david.lane&quantel.com +15249 + x-cellent technologies GmbH + Stephan Tesch + Stephan.Tesch&x-cellent.com +15250 + Morse Group Ltd + Dale Douglas + dale.douglas&morse.com +15251 + Deutsche Telekom T-Com + Herbert Eisenbeis + eisenbeis&telekom.de +15252 + SB&C, Ltd. + Dale Hirt + administrator&sbcltd.com +15253 + Ceyba + Dan Fossum + dfossum&ceyba.com +15254 + Kaseya Sweden AB (formerly 'Intellipool AB') + Robert Aronsson + robert.aronsson&kaseya.com +15255 + Reliable Controls Corporation + Roland Laird + rlaird&reliable-controls.com +15256 + United Nations + Scott T Skinnider + Scott.Skinnider&UN.Org +15257 + City Computing Limited + Lackel Hung + lackel&citycomputing.com +15258 + Exers Technologies, Inc. + Kitae Kim + ktkim&exers.com +15259 + Saarländischer Rundfunk + Oliver Pabst + opabst&sr-online.de +15260 + Université Cahtolique de l'Ouest + Alain RAYMOND + OID.Admin&uco.fr +15261 + UniCon Software GmbH + Siegbert Teuscher + OID.Admin&unicon-ka.de +15262 + Data Display Limited. + Erich Muller + emuller&data-display.com +15263 + DLRG Landesverband Rheinland-Pfalz + Kai Burkhardt + kburkhardt&gmx.de +15264 + Anonymizer Inc. + Austin Hill + austin&anonymizer.com +15265 + Telelogic AB + Jeff Steele + jeff.steele&telelogic.com +15266 + Omnibus Systems Ltd + Mark Birks + mark.birks&omnibus.co.uk +15267 + Damage Prevention Systems + Martijn Moeling + martijn&xs4us.nu +15268 + Oy NetItBe Ltd + Eino Mäkitalo + support&netitbe.fi +15269 + Craniac Entertainment + Chuck Bueche + chuckb&craniac.com +15270 + Printcafe Software, Inc. + Aaron Siri + asiri&printcafe.com +15271 + Landesamt fuer Steuern (formerly 'Oberfinanzdirektion Koblenz') + Herbert Meyers + Herbert.Meyers&lfst.fin-rlp.de +15272 + MaaTec + Alexander Maack + Alexander.Maack&MaaTec.com +15273 + The Paloma Group, Inc. + David Dove + david.dove&ThePalomaGroup.Com +15274 + Paul Scherrer Institut + Roland Blaettler + roland.blaettler&psi.ch +15275 + AAE Systems + GovindaRajan + govivs&aaesys.com +15276 + BearASP + William C. Landolina + wcl&techatl.com +15277 + SoftTree Technologies, Inc. + Dmitriy Evinshteyn + devinshteyn&softtreetech.com +15278 + Oblicore Inc. + Salomon Barak + barak&oblicore.com +15279 + WFI + Doug Lawson + doug.lawson&wfinet.com +15280 + Relapixity, LLC + Troy Lemieux + tlemieux&relapixity.com +15281 + ZeOmega Infotect + Deb + dhazarika&zeomega.com +15282 + GCom + Uffe Gavnholt + uffe&gavnholt.com +15283 + Mikroliitti Oy + Pertti Heikkinen + pjh&iki.fi +15284 + POSTA SLOVENIJE + Bostjan LAVUGER + bostjan.lavuger&posta.si +15285 + iSolv Technologies cc + Jayesh Nana + jayesh&isolv.co.za +15286 + ADPM + Peter Marschall + peter.marschall&adpm.de +15287 + Joachim Bauch (formerly 'FOB Team') + Joachim Bauch + mail&joachim-bauch.de +15288 + Register SpA + Claudio Corbetta + serena.giugni®ister.it +15289 + Advanced Broadband Communications Center (CCABA-UPC) + Jordi Domingo Pascual + jordi.domingo&ac.upc.es +15290 + Ordway Center for the Performing Arts + Bryce T Pier + btpier&ordway.org +15291 + Mach Technology, Inc. + Michael Chiang + mschiang&yahoo.com +15292 + American City Business Journals + Jared Watkins + jwatkins&bizjournals.com +15293 + Coderanger inc. + Noah Kantrowitz + coderanger&yahoo.com +15294 + EP Imaging Concepts, Inc. + Richard Nelson + rnelson&eugeneprint.com +15295 + Metrima + Giancarlo Bazzi + gbazzi&racine.ra.it +15296 + Plum Financial Service Pty Ltd + Terence Tsakiris + Terence.Tsakiris&plumfs.com.au +15297 + Sonera Solutions Oy + Sami Mäntyharju + sami.mantyharju&sonera.com +15298 + Volkswagen Bank GmbH + Andreas Koch + andreas.koch&vwfsag.de +15299 + Elcoteq Network Corporation + Mr. Sami Masalin + sami.masalin&elcoteq.com +15300 + Wireless Independent Provider AB + Per Werelius + perw&wip.se +15301 + KFKI RMKI SzHK + Bedö Sándor + bsanyi&sunserv.kfki.hu +15302 + GalaxE.Solutions, Inc + Sandipan Gangopadhyay + sandipan&galaxysi.com +15303 + accelerate IT services + eric woelkerling + ericw&accelerate-it.com.au +15304 + TakeIT + Take Knol + take&takeit.nu +15305 + MBUF + Mathieu Mourez + matt&mbuf.ca +15306 + Mount Royal College + James Bourne + jbourne&mtroyal.ab.ca +15307 + Esphion Ltd + Nathan Ward + admin&esphion.com +15308 + Peace Software + Craig Bishell + craig.bishell&peace.com +15309 + e-Dynasty Limited + Peter Loo + peterloo&dynasoft.freeserve.co.uk +15310 + Exodus Communicaitons + Jian Zhen + jian.zhen&exodus.net +15311 + misTrust Certification Authority + Bernd Rudack + bernd.rudack&mistrust.de +15312 + Jozef Stefan Institute + Janez Jezersek + Janez.Jezersek&ijs.si +15313 + REDLINK Mediendienste GmbH + Olav Roth + roth&redlink.de +15314 + IT+ A/S + Soeren Hilmer + sh&itplus.dk +15315 + Internet Business News + Giancarlo Bazzi + giancarlo.bazzi&ibn-italy.com +15316 + Hub Hill Software + A D Thomson + sandy.thomson&btinternet.com +15317 + Wells Fargo + Joachim F. Kainz + kainzjf&wellsfargo.com +15318 + Edu4 France S.A. + Jerome SIDDI + jerome.siddi&edu4.com +15319 + SkyPilot Network, Inc. + Marvin Chan + mchan&skypilot.com +15320 + Custom7 + Brad Lowe + brad&custom7.com +15321 + Transcor, Inc + Kipland Iles + kiles&etranscor.com +15322 + Echopass Corporation + Derek Burdick + Derek_Burdick&echopass.com +15323 + INP-net + Lionel Porcheron + inp-net&bde.inp-toulouse.fr +15324 + Walter E. Helmke Library + Kevin Fredrick + fredrikr&ipfw.edu +15325 + STVA + Edouard Chevtchouk + echevtchouk&stva.com +15326 + Mobicus Oy + Juho-Pekka Virolainen + jpv&mobicus.com +15327 + Halden Dataservice AS + Vegard Svanberg + vegard&svanberg.no +15328 + Svanberg Consulting + Vegard Svanberg + vegard&svanberg.no +15329 + Dalian University of Technology + Li Yingzhuang + lyz&dlut.edu.cn +15330 + KLEEGROUP + CONSTANTIN François + fconstantin&kleegroup.com +15331 + Indagon Oy + Markus Mikkolainen + markus.mikkolainen&indagon.com +15332 + Logica UK Ltd + Tony Catley + catleyt&logica.com +15333 + Global Communication Technologies,Inc. + Stilian Tcholakov + steve&globaltech-us.com +15334 + MWVCAA + Gary Winters + wintersg&mwvcaa.org +15335 + ITXC + Matthew Krokosz + mkrokosz&itxc.com +15336 + Telemet America, Inc + Alex Kotliarov + alex&taquote.com +15337 + Photronics, Inc. + Peter Tlasky + ptlasky&brk.photronics.com +15338 + PalmSource, Inc + Matthew Jordan + matthew.jordan&palmsource.com +15339 + ilse media groep b.v. + Mathijs Brands + mathijs&ilse.nl +15340 + George Furniture + George Ameller + ameller&earthlink.net +15341 + Wuhan Yangtze Communications Industry Group Co.,Ltd.,China. + Xie Qiaojun + qj.xie&ycig.com +15342 + Vitalect Technologies India Pvt. Ltd. + S. Subin + subin&vitalect-india.com +15343 + FUNDACION INASMET + JOKIN RUBIO + jokin&inasmet.es +15344 + Yomi PLC. + Tero Kalavainen + tero.kalavainen&yomi.com +15345 + Dansk Røde Kors + Bill Edgington + bill&mail2.redcross.dk +15346 + Viking Systems + Leon Hauck + leon&vikingsystems.com +15347 + IGH Ingenieurgesellschaft Höpfner mbH + Rene Etzweiler + rene.etzweiler&igh.com +15348 + Precision Interop, LLC + Scott Overholser + sover&precision-interop.com +15349 + Andreas Bartsch IT Solutions + Andreas Bartsch + ab&bartsch-web.de +15350 + Jatosoft LLC + Ben Gollmer + ben&jatosoft.com +15351 + ForeverLiving.com + Murray Johnston + hostmaster&foreverliving.com +15352 + University of Arkansas at Fort Smith + Brian Scott + bscott&uafortsmith.edu +15353 + Integrated Digital Solutions Inc. + Steve Marmer + steve&ids.ca +15354 + Number 1 Customer, LLC + Peter Chernin + peter&easy-as-toast.com +15355 + Integan + Mestdag Nico + nm&integan.be +15356 + BartNET + Gil DuPont + gdupont&bartnet.net +15357 + Diversified Systems Resources, LTD + Gil DuPont + gdupont&dsrglobal.com +15358 + Cygnetron, Inc. + Piyush Sinha + piyush.sinha&cygnetron.com +15359 + Multidas Technologies, Inc. + Alex Pan + apan&multidas.com +15360 + CENTRE DE SANTE MENTALE + DENIS ROUME + NOC&eurofmc.com +15361 + RPA Research Co., Ltd. + LEE WON SOO + wonsool&rpa.co.kr +15362 + Digital on net Co., Ltd. + I.K. Lee + ilee&digitalonnet.com +15363 + Hotel Interski **** + Lukas Demetz + lukas&hotel-interski.com +15364 + Sibsnet Technologies + Sibeuwou bertin + sibeuwou&yahoo.fr +15365 + INFOSTEP + Gerhard Mueller + office&infostep.at +15366 + Dunhuang Academy + Liu Gang + liugang&dha.ac.cn +15367 + ZlanTec + Zlan + zlan&163.com +15368 + UnixAG Siegen + Tjabo Kloppenburg + tjabo.kloppenburg&unix-ag.org +15369 + Cititech Australia + Michael Bochner + michael.bochner&ssmb.com.au +15370 + GIGA-BYTE TECHNOLOGY CO., LTD + Brad Huang + brad.huang&gigabyte.com.tw +15371 + Horry Telephone Cooperative + James Richardson + james.richardson&htcinc.net +15372 + Max-Planck-Institute for evolutionary Anthropology + Holger Bochmann + bochmann&eva.mpg.de +15373 + NASK + Franciszek Lewenda + F.Lewenda&nask.pl +15374 + Enfo Oy + Hannu Jauhiainen + hannu.jauhiainen&enfo.fi +15375 + HSB Systemhaus GmbH + Floegel Christian + christian.floegel&hsb-systemhaus.de +15376 + De Nayer + Bart Coosemans + bart.coosemans&denayer.wenk.be +15377 + ChiliMoon Foundation + Honza Petrous + hop&pike.ida.liu.se +15378 + CSI Piemonte + Fabio Cavallo + CA-CSIPiemonte&csi.it +15379 + Allocity + Robert W. Lennie + rlennie&aciesnetworks.com +15380 + AlterPoint, Inc. + Bryan J. Rollins + brollins&alterpoint.com +15381 + Wilkes University + Steven Reppert + reppert&wilkes.edu +15382 + Bit Hosting + Okke Timm + okke.timm&bit-hosting.de +15383 + BlueJavelin, Inc. + Frank Solensky + fsolensky&bluejavelin.net +15384 + NCARP.COM + Nuno Carpentier + nc&ncarp.com +15385 + Genasys II Spain + Alberto Bambala Arbea + abambala&genasys.com +15386 + Ibis Tecnologia e Informacao + Tales Costa + tales&ibistec.com.br +15387 + Industri Telekomunikasi Indonesia (INTI) + Aji P. Reksoprodjo + apr&inti.co.id +15388 + MagiNet Company, Ltd. + Gregory K Rutledge + greg&maginet.net +15389 + Bestway Telecom + Lim Hyuk + hlim&bwtelecom.co.kr +15390 + Silu co. + Precilla Luo + precilla&163.com +15391 + iei + libhaohua + tt&iei-china.com +15392 + OLVIKO + Anatoly Davidov + noc&olviko.ru +15393 + AvestaPolarit AB + Ronny Karlsson + ronny.karlsson&avestapolarit.com +15394 + P.T Sigma Cipta Caraka + Fajar Hartanto + fajar_hartanto&sigma.co.id +15395 + CrossML + John Matthews + john.matthews&crossml.com +15396 + Alfa & Ariss b.v. + Peter Boevink + peter.boevink&alfa-ariss.com +15397 + Sandvine Corporation + Kenneth Johansson + snmp-support&sandvine.com +15398 + IC3S AG + Steffen Weinreich + admin&ic3s.de +15399 + Universite de Poitiers + Jean-Claude Ben + jean-claude.ben&univ-poitiers.fr +15400 + Post Consult International + Caoimhin Blake + caoimhin.blake&pci.ie +15401 + EUnet EDV und Internet Dienstleistungs AG + Michael Lausch + michael.lausch&eunet-ag.at +15402 + Proximity Corp + Brendan Bouffler + brendan&proximitygroup.com +15403 + T-Systems CDS GmbH + Claudia Dübbelde + Claudia.Duebbelde&t-systems.com +15404 + PenTeleData Inc + Frank Clements + frankc&corp.ptd.net +15405 + Comunicaciones NetPeople C.A. + Orlando Carvallo + alejandro&net-people.com +15406 + Axiom Integration, Inc. + Terry Inzauro + tinzauro&axiomintegration.com +15407 + Ambicom, Inc. + Alex Chen + alex_chen&ambicom.com +15408 + TrueContext Inc. + Richard Dumas + rdumas&truecontext.com +15409 + Customized Database Systems + Seth Rothman + srothman&cds-software.com +15410 + Seaway Networks + David Lapp + lapp&seawaynetworks.com +15411 + Lasipalatsin Mediakeskus Oy + Egil Silfver + egil&lasipalatsi.fi +15412 + Cotagesoft Inc. + Joyo Wijaya + jwijaya&cotagesoft.com +15413 + Oden Oden Wireless Networks Technology (Shanghai) Ltd + Tod kang + kang.tao&odenwireless.com +15414 + BillHusler.com + Bill Husler + BHusler&PacBell.net +15415 + Openet Information Technology (Shenzhen) Co.,Ltd + Jiancheng He (ºÎ½¨³É) + hejc&openet.com.cn +15416 + Saft Power Systems + Gerard Gauthier + gerard.gauthier&alcatel.fr +15417 + IMA - Informatica de Municipios Associados + Henrique de Moraes Holschuh + hmh&ima.sp.gov.br +15418 + Forum Systems Inc. + Chad L. Cook + ccook&forumsys.com +15419 + Forest Siding Supply + Michael Mattice + mike+iana&forestsidingsupply.com +15420 + Lightsurf Technologies + Steve Phillips + sphillips&lightsurf.com +15421 + Chiemgau Gymnasium Traunstein + Stefan Schuch + schuch&stefan-schuch.de +15422 + ARTIFintelligence, LLC + Edgart F Gonzalez + egonzalez&webbox.com +15423 + NetSpace - Soluções Informáticas + José Rocha + suporte&netspace.pt +15424 + Storeimage Programs Inc. + IT Manager + gregs&storeimage.com +15425 + ImageStream Internet Solutions + Doug Hass + domreg&imagestream.com +15426 + Korea Internet Data Center + Kim, Jong-Shin + jongshin&kidc.net +15427 + Hwa-jin Seo + Hwa-jin Seo + freenickey&hotmail.com +15428 + Cybertec Pty Ltd + Chris Johns + cjohns&cybertec.com.au +15429 + Future Computing Solutions India Pvt. Ltd. + Gurinderjit Singh Mond + gjsingh&fcsindia.com +15430 + JIN Information Systems + Naveen + naveen&jinis.com +15431 + Xion IT Systems AG + Peter Ott + peter.ott&xion.at +15432 + Cybcon Industries + Michael Oberdorf + michael.oberdorf&cybcon-industries.de +15433 + Landespolizei Mecklenburg- Vorpommern + Schwarz, Thomas + thomas.schwarz&polmv.de +15434 + Chebucto Community Net + Jeff Warnica + jeffw&chebucto.ns.ca +15435 + worldnet21 + Jesus Garcia Rodriguez + jesus&worldnet21.es +15436 + OsiTools + Marion Maack + info&ositools.de +15437 + SSO + Markus Faltin + marfal&gmx.de +15438 + RTL Television + Ingo Rueber + ingo.rueber&rtl.de +15439 + PASS Technologie + Pierre Dittgen + pierre.dittgen&pass-tech.fr +15440 + ZPH Litex + Michal Wegrzynek + mwegrzynek&litex.pl +15441 + Karell + Karell Ste-Marie + stemarie&brainbankinc.com +15442 + Orchestel Systems + Yanjun Huang + yanjun.huang&orchestel.com +15443 + JP Sync + Paul Chen + paulkchen&yahoo.com +15444 + Saffron Solutions, Inc + Jason Parsons + registry&saffron.net +15445 + SBS + MyungKook Yang + mkyang&sbs.co.kr +15446 + CyberPower Systems, Inc. + Ming-Hsien Huang + ming&cyberpowersystems.com.tw +15447 + SEM Ltd + Iakovos Panayiotou + ssd&semltd.com.cy +15448 + DAMOVO Ceska Republika + Petr Foltyn + petr.foltyn&damovo.com +15449 + Foursys, Inc. + Don Class + dclass&foursys.net +15450 + Innominate Security Technologies AG + Lutz Jänicke + ljaenicke&innominate.com +15451 + Realitatea TV + Catalin Dinu + catalin.dinu&realitatea.tv +15452 + Geovariances + Ludovic Robinot + robinot&geovariances.fr +15453 + ABZ Nederland + A. Wollaars + AWollaars&abz.nl +15454 + Arbor AudioCommunications + J. Hendrix + jh&arbor-audio.com +15455 + Unassigned + Removed 2008-04-25 + ---none--- +15456 + La cocina + Ronald van Engelen + ronalde&xs4all.nl +15457 + Ackernet + Ulrich Ackermann + UlleAckermann&t-online.de +15458 + Mark McKenzie + Mark McKenzie + mark&errai.net +15459 + M-Systems + Amir Shinar + amir_shinar&aquanet.co.il +15460 + Orchestel Systems + Yanjun Huang + yanjun.huang&orchestel.com +15461 + Corybant + Wayne Sheppard + wayne&corybant.com +15462 + Rupprecht & Patashnick Co., Inc. + Matthew Bivans + mbivans&rpco2.com +15463 + Huron Superior Catholic District School Board + Dave O'Hare + dohare&hscdsb.on.ca +15464 + Muskoka.com + Shayne Lebrun + support&muskoka.com +15465 + University of Toronto + Russell Sutherland + russ&madhaus.cns.utoronto.ca +15466 + Peacefulhaven + Gordon Shepherd + gordon.shepherd&peacefulhaven.net +15467 + Digibel + Arjen van Drie + arjen&digibel.be +15468 + Siege.org + CJ Niemira + siege&siege.org +15469 + Internet Connection + Geo Carncross + geocar&internetconnection.net +15470 + Digital System Resources + Jeff Schuyler + jschuyler&dsrnet.com +15471 + Nucleo de Pesquisa em Redes e Sistemas Abertos + Jose Marcos F. Araujo + jmarcos&dcc.uesb.br +15472 + SOA Software, Inc. + Herman D'Costa + herman.d'costa&soa.com +15473 + Spring Worth Inc. + Noel D. Kendall + noeldkendall&hotmail.com +15474 + Lighthouse Software Group + Bill Myerly + bill&myerly.com +15475 + Zhongguang Telecommunications + Chengchao + chengchao&wri.com.cn +15476 + Fareastone telecommunication Corp. + Brad Chao + rchao&fareastone.com.tw +15477 + Bank for foreign trade + Popugaev Yuri + popugaev&vtb.ru +15478 + Höft & Wessel AG + Joachim Meyer + jm&hoeft-wessel.de +15479 + dudley.org + James Dudley + jed&dudley.org +15480 + Trillion Digital Communications + Nathan Dial + ndial&trillion21.com +15481 + Capital Channel Information Co. Ltd. + Andy Poon + andy&xchannel.com.cn +15482 + Tian En + Topspinner Ace + ahlek&hotmail.com +15483 + O.C.A. S.A. + Fernando Vallarino + fernando.vallarino&oca.com.uy +15484 + Lux Technologies + Michael Fleet + michael&luxtechnologies.com +15485 + University of Otago + Simon Brady + ldap.admin&otago.ac.nz +15486 + keyon + Martin Christinat + christinat&keyon.ch +15487 + Daisy Technologies Inc. + Bojan Likar + bojan.likar&daisy-tech.com +15488 + Pure Matrix, Inc. + Alain Penders + alain&purematrix.com +15489 + Home SE AB + Hans Eric Sandström + hes&xinit.se +15490 + Prasanna Technologies + Maheswara Rao.A.V. + raoavm&yahoo.com +15491 + Protegrity Inc. + Jonas Berglin + jonas.berglin&protegrity.com +15492 + Sichuan Changhong Electric Group Co,Ltd. + Mr. Dongjing Huang + ren.fei&changhong.com +15493 + Goll Enterprises + Michael George Goll + michael&goll.com +15494 + Helium LLC + Francis Potter + iana&meadowbase.com +15495 + eHouse + Yuri Ryazantsev + yuri&ehouse.ru +15496 + LaScoala + Lucian Parvu + lucianparvu&yahoo.com +15497 + IronPort Systems, Inc. + Charles Slater + cslater&ironport.com +15498 + TESI S.p.A + Francisco Mancardi + francisco.mancardi&gruppotesi.com +15499 + The King's University College + Jacob Modayil + admin&kingsu.ca +15500 + eWings Technologies, Inc. + Max Chan + max&.ewingstech.com +15501 + M&C Laboratory Inc. + Takashi Okada + okada&mac-lab.co.jp +15502 + Animezone + Andrew Ho + andrewho&animezone.org +15503 + University of Bern + Georg Fluehmann + campusadmins.id&unibe.ch +15504 + Ministère de l'équipement, des transports, du logement, du tourisme et de la mer + Christian Paquet + SI2.DPSM&equipement.gouv.fr +15505 + Echoworx Corporation + Michael Roberts + iana-priventnum-contact&echoworx.com +15506 + Akiratech Limited + James Alderman + james.alderman&akiratech.com +15507 + TierOne OSS Technologies, Inc. + Ion Moraru + imoraru&tieroneoss.com +15508 + PacketIQ Corp. + James H. Baxter + jim.baxter&packetiq.com +15509 + Direto - Centro de Tecnologia da Informação e Comunicação do Estado do Rio Grande do Sul S.A. + Daniel Soares de Oliveira + daniel-oliveira&procergs.rs.gov.br +15510 + Rhetorical Systems, Ltd. + Alexander Gutkin + sasha&rhetoricalsystems.com +15511 + Groupe ERMEWA S.A. + Philippe Lelédy + p.leledy&ermewa.com +15512 + COMAX INC. + Komaki Takashi + komaki&comax.co.jp +15513 + Gendac + Hein Loest + hein&gendac.com +15514 + Financijska agencija + Fani Beros + leopold.eke&fina.hr +15515 + DGET + Tariq Noor Ellahi + Tariq.Ellahi&ucd.ie +15516 + SoftWorks Australia Pty Ltd + Gavin Glynn + Gavin_Glynn&softworks.com.au +15517 + Entri ltd + Andrew Kozachenko + andrew&entri.com.ua +15518 + Vocalocity, Inc. + Jeff Haynie + jhaynie&vocalocity.net +15519 + Results Computing Corporation + John K. Edwards + john&results-computing.net +15520 + actiSwitch, Inc. + Rajesh Srivastava + rajesh&actiswitch.com +15521 + signalkontor GmbH + Olav Groehn + olav.groehn&signalkontor.com +15522 + Whitman College + Michael Osterman + ostermmg&whitman.edu +15523 + Equine.com + Jeff Little + jeff&equine.com +15524 + Access Point Inc + Bob Poliachik + bob.poliachik&accesspointinc.com +15525 + Braum's Ice Cream + Paul Enright + penright&braums.com +15526 + Mixmobile + Kimmo Vayrynen + kimmo.vayrynen&mixmobile.com +15527 + ANIENIB + Emmanuel Blot + emmanuel.blot&anciens.enib.fr +15528 + Naviair + Rasmus Sindum + sin&naviair.dk +15529 + Universidad del Valle + Luis Felipe Rodriguez + felipro&univalle.edu.co +15530 + Integrated Technology S.C. + Humberto Hernandez + hhernandez&itweb.com.mx +15531 + VPNChina Tech-Development Corp. + Jing weijun + jingwj&vpnchina.com +15532 + hollomey consultants gmbh + Joachim Ortner + ortner&hollomey.com +15533 + Mobile Tornado + Israel Shainert + israel&mobiletornado.com +15534 + Linup Front GmbH + Anselm Lingnau + anselm.lingnau&linupfront.de +15535 + Vereniging Milieudefensie + Zenon Panoussis + webmaster&milieudefensie.nl +15536 + PTM.com + Jose Celestino + japc&co.sapo.pt +15537 + EMK Design + Waitman C Gobble II + waitman&emkdesign.com +15538 + ASIGRA Inc. + David Farajun + dfarajun&asigra.com +15539 + Media 4 Sp. z o.o. + Jakub Jakacki + admin&md4.pl +15540 + AM Corporation + David Ford + davidford&amcorp.com.au +15541 + RH Ministarstvo gospodarstva + Ema Culi + ema.culi&mingo.hr +15542 + EMK Design + Waitman C Gobble II + waitman&emkdesign.com +15543 + Illinois Power Company + Jimmy Hu + domreg&illinoispower.com +15544 + American Mensa, Ltd. + Howard Prince + isdirector&americanmensa.org +15545 + Probix + Edmund Jay + edmund&probix.com +15546 + eleven.am + D Harmison + dh&miami.edu +15547 + Onaras AG + Ramon Amat + amat&onaras.ch +15548 + Pillar Data Systems + Shang Hua Wang + swang&pillardata.com +15549 + XCache Technologies, Inc. + Steve Work + support&xcache.com +15550 + La Sierra University + Marc Thomas + ianaoid&lasierra.edu +15551 + Object Engineering GmbH + Andi Koch + akoch&objeng.ch +15552 + Statmon Technologies Corp. + Peter Upfold + peter&statmon.com +15553 + SnarlSNMP Dynamic Web Application Monitor Developers Group + Stephan Wenzel + stephanwenzel&users.sourceforge.net +15554 + Democratic National Committee + Chuq Yang + chuq&dnc.org +15555 + Gold Coast City Council + Patrick Clancy + pclancy&goldcoast.qld.gov.au +15556 + Chuq Yang + Chuq Yang + chuq&bigfoot.com +15557 + Inflection Technologies + Steve Varley + steve.varley&inflection.com.au +15558 + Institute of Chemical Process Fundamentals + Radek Nezbeda + nezbeda&icpf.cas.cz +15559 + Orionet + Danil Pismenny + dapi&mail.ru +15560 + Yanfa + Xuelong, Li + samside&263.net +15561 + Coleman Family Camps + Christopher Masto + chris&colemanfamilycamps.com +15562 + mbrace.org + Juergen Fiedler + juergen&fiedlerfamily.net +15563 + Adtron + Chris Budd + CBudd&Adtron.com +15564 + Aselia Technologies, Inc. + Jeff Hays + jshays&interaccess.com +15565 + Great Plains Mall.net + Theodore Reph + ted&reph.org +15566 + EmberPoint Holdings Co., Ltd (formerly 'Cheetah Digital Japan Co., Ltd') + Hideki Yamashita + hideki.yamashita&emberpoint.com +15567 + Neople Ltd. + Jeongyun Lee + system&neople.co.kr +15568 + Center for Development of Information Technology - CDIT + Dinh Kim Cuong + dinhkc&cdit.com.vn +15569 + Krasnoyarsk ICC subdivision of MCC, JSC "Russian Railways" + Vladimir Panfilov + admins&krw.ru +15570 + Bayerische Landesbank + Georg Michi + Georg.Michi&blb.de +15571 + Schuler Electronics + Manfred Schuler + M.Schuler&schuler-electronics.de +15572 + Tcom&dtvro + Jong M. Sohn + jmson&tcom-dtvro.com +15573 + Apertio Ltd + Kevin Wakefield + kevin.wakefield&apertio.co.uk +15574 + E-milio Internet Services, S.L. + Ramon Corominas + rcorominas&citec.es +15575 + Chimes, Inc. + Tom Carroll + tcarroll&chimesnet.com +15576 + Pompoen B.V + Oscar Steenmann + oscar&steenmann.nl +15577 + AirPrism + Kap J Shin + kap&airprism.com +15578 + DeepRoot Linux + Abhas Abhinav + abhas&deeproot.co.in +15579 + Saeson Telecom co., ltd + Andrew Choi + hoidoori&mail.co.kr +15580 + VNIINS + Dmitry V. Efanov + defanov&pisem.net +15581 + Telemed Communications Services GmbH + Robert Schmoelzer + hostmaster&tmc.at +15582 + Xtradyne Technologies AG + Joerg Bartholdt + Joerg.Bartholdt&Xtradyne.com +15583 + EUREG + Stephane Bortzmeyer + bortzmeyer&eureg.org +15584 + de Werknaam + Jeroen de Kreek + Jeroen&deKreek.nl +15585 + Northern Light Technology + Tracy Williams + pekoe18&hotmail.com +15586 + Response Mechanics, Inc. + Ranny Meier + Support&RespMech.com +15587 + Humboldt State University + Peter Johnson + paj1&humboldt.edu +15588 + WetWebMedia + Jason Chodakowski + jasonc&wetwebmedia.com +15589 + IntiGate Inc. + Kyungwon Kim + kwkim&intigate.com +15590 + Catboy Technologies + Steven R. Currie + srcurrie&currieweb.com +15591 + Alex Shepard + Alex Shepard + acidic&li.com +15592 + Viking Telecom + Jens Corneliusson + jens.corneliusson&viking-telecom.com +15593 + Kore Systems GmbH + Nick Baile + nick.baile&koresystems.com +15594 + Causeway Technologies + Javeria Aijaz + javeria.aijaz&causeway.com +15595 + MEAG MUNICH ERGO AssetManagement GmbH + Dr. Roman Prikryl + RPrikryl&meag.com +15596 + SONORK S.R.L. + Sr. Bruno Ricardo Sacco + bsacco&hypernet.com.py +15597 + Avamar + Viet LaMore + snmpoid&avamar.com +15598 + Compro Technologies, Inc. + Brian Thornton + brian&comprotech.com +15599 + Groom Lake Laboratories + Todd E. Johnson + tejohnson&groomlakelabs.com +15600 + Xeta Technologies + James Middleton + jim.middleton&xeta.com +15601 + Velocity Software + Barton Robinson + barton&velocitysoftware.com +15602 + lottasophie.de + Jörg Napp + joerg&lottasophie.de +15603 + Sunhillo Corporation + Ned Gubbi + ned&sunhillo.com +15604 + Kuopio Telephone PLC + Timo Hopponen + timo.hopponen&kpy.fi +15605 + Gray Laboratories, Inc. + Michael Sparks + msparks&graylabs.com +15606 + TopPioneer Technologies Limited + Alexander Chow + alex.chow&toppioneer.com +15607 + Encentuate, Inc. + Boon K. Law + boon&encentuate.com +15608 + Anders & Rodewyk + Nils Kaczenski + NKaczenski&ar-hannover.de +15609 + Iskon Internet d.d. + Jerko Klarica + jerko.klarica&iskon.hr +15610 + Herbert Retail Ltd. + Rowan Bradley + rowan.bradley&herbertgroup.com +15611 + MIC Electronics Limited + Chandra Sekhar Vallabhaneni + chandu&micsoft.com +15612 + SoftWare Services + Tom Young + tom&twyoung.com] +15613 + gen-i limited + Robert Hunter + robert.hunter&gen-i.co.nz +15614 + Rick Bauman Consulting + Rick Bauman + rick&lowcountry.net +15615 + kma, inc. + brawnski l. armstrong jr + iana&brawnski.com +15616 + Dullroar Enterprises + James Lehmer + lehmer&dullroar.com +15617 + newlimits + Henry jean-luc + hertz&securify.ddts.net +15618 + Adrenalize, Inc. + Corey Leong + corey&adrenalize.com +15619 + Delean Vision + Nicolas Vandenberghe + nvandenberghe&deleanvision.com +15620 + Ceiva Logic + Aaron Shively + hostmaster&ceiva.com +15621 + ZP Technologies + Simon Newton + zpt&iinet.net +15622 + APTARE + Rick Clark + rick&aptare.com +15623 + Bachblue Pty Ltd t/a AFOYI + Darryl Ross + darryl&afoyi.com +15624 + Skopeo LLC + Vanson Samuel + admin&skopeo.com +15625 + Excedent Technologies + Bill Boebel + bill&webmail.us +15626 + Campus Crusade Asia Ltd + Leslie Chiang + leslie&sccc.org.sg +15627 + Spezifikum + Malte Mueller + mm&spezifikum.de +15628 + LAMARC GmbH + Lars Eisenblatt + lars.eisenblatt&lamarc.com +15629 + IQSoft Rt + Kiss Árpád + kissa&iqsoft.hu +15630 + InfoGuard HB + Magnus Petersson + magnus.petersson&infoguard.se +15631 + Vivista Limited + Caroline Andrewes + caroline.andrewes&vivista.co.uk +15632 + prime factory Gmbh & Co KG + Michael Juergens + michael.juergens&prime-factory.de +15633 + Tonisoft + Antoine Levy-Lambert + levylambert&tiscali-dsl.de +15634 + d3.net internet - technologien gmbh + Stephan Ahrensfeld + stephan.ahrensfeld&d3.net +15635 + China PKI + Han Song Ling + pki&pki.com.cn +15636 + Althea Technical + Simon Brown + simontst&cs.toronto.edu +15637 + Spinlock Network LLC + Kurt Sauer + kurt.sauer&spinlock.com +15638 + commissaire.net + Kurt Sauer + kurt.sauer&spinlock.com +15639 + e-solutions, lda + Rui Vitória + rui&e-solutions.pt +15640 + Clear Technology, Inc. + Ray Wee + dave.tyler&clear-technology.com +15641 + Masobit SRL + Marco Masoni + marco&masobit.net +15642 + Transplace, Inc. + William Ash + william.ash&transplace.com +15643 + Hallway Software Design Corp. + Brendan McKenna + brendan&hallwaysdc.com +15644 + B Bell + B Bell + b311b-iana20210701&theotherbell.com +15645 + pixell daten & design GmbH + Arndt Lückerath, technical contact + arndt&pixell.de +15646 + Pjusarafelag Islands + Ingimar Robertsson + iar&pjus.is +15647 + thesysadmin + Murat Bicer + murat&thesysadmin.com +15648 + Atlantis Services + Paul Pathiakis + paul&pathiakis.com +15649 + VOSGAMES + Take Vos + Take.Vos&vosgames.nl +15650 + Citrenesys Corporation + Jerry Walling + jerry.walling&citrenesys.com +15651 + Seventh Moon Co., Ltd. + Jun Miyata + jun&7moon.co.jp +15652 + SHE Informationssysteme AG + Claus Bayer + claus.bayer&she.net +15653 + Intesys + Miguel de Lucas Manzano + miguel_de_lucas_manzano&hotmail.com +15654 + Max Momentum + Dirk Beele + beele&maxmomentum.de +15655 + SITADELLE + Olivier LAFONT + olivier.lafont&sitadelle.com +15656 + Computer Park Ltd + Andrew Ray + aray&computerpark.co.uk +15657 + Macrocall + Lian Ngian + lngian&hotmail.com +15658 + Fillmore Labs + Oliver Eikemeier + eikemeier&fillmore-labs.com +15659 + Alaska Airlines + Brian Ferderer + Brian.ferderer&alaskaair.com +15660 + TLM Software + Kevin Carter + bilbo&tlm.us +15661 + 0x2.net + E. de Veij + deveij&xs4all.nl +15662 + fumph.com + John Bair + jbair&fumph.com +15663 + Aswan Co. Ltd. + Aswan + superaswan&yahoo.com +15664 + Aractel Networks Ltd + Ruban Selvarajah + ceo&aractelnetworks.net +15665 + Eastern Communication Technology Development CO.LTD + gaofeng + gaof&mail.eastcomtd.com +15666 + orgatech Ulrich Tiemann GmbH + Christian Nordmann + nordmann&orgatech.de +15667 + Lechner-Rau Haustechnik + Axel Rau + Axel.Rau&Chaos1.DE +15668 + ZHONGFANG Information Co.,Ltd + Smith Zhang + zzh998&hotmail.com +15669 + Lifecycle Software + John Leonardis + tauceti&optonline.net +15670 + The Internet Marketing Center + Gary Richardson + gary.richardson&marketingtips.com +15671 + Wardega Consulting + Tomek Wardega + tomek&wardega.com +15672 + Javno preduzece "Posta Srbije" (formerly 'Javno preduzece PTT saobracaja "Srbija"') + Dragan Spasic + dspasic&ptt.rs +15673 + Codeangels Solutions + Kirill Alder + kirill&codeangels.com +15674 + Valente CC + Antonio Valente + antonio&valente.cc +15675 + StoneDonut, LLC + Charles W. Loughry + cloughry&stonedonut.com +15676 + baede + daniel baede + daniel&baede.net +15677 + Thomson-Thomson + Wei Lin + wei.lin&t-t.com +15678 + Software Projects Pty Ltd + John Jeffery + JJeffery&sp.com.au +15679 + Cape Com Inc + Vernon Grabel + sysop&cape.com +15680 + Digirati + Michel Silva Machado + michel&digirati.com.br +15681 + ROSCO Associates Technology Staffing Ltd. + Roger Walker + roger&rope.net +15682 + pkiclue.com, Inc. + Rodney Thayer + rodney&pkiclue.com +15683 + Acrosonic Corporation + Anthony C Moulton + tmoulton&acrosonic.com +15684 + Macro Technology + Naekyu Dan + nkdan¯otek.co.kr +15685 + agala.net + Frank J. Beckmann + agala.net-admin-c&barda.agala.net +15686 + Mei Communication Co. + Mei Jingping + wh_angel&263.net +15687 + Magic Software Enterprises Ltd. + Rina Vinokurov + rinav&magicsoftware.com +15688 + Practical Labs + James Copher + jec&practicallabs.com +15689 + Heinz Family Germany + Tobias Heinz + tobias&formelheinz.de +15690 + LIISP + Dongwan Shin + doshin&uncc.edu +15691 + NetPower Solutions + Bart Vantieghem + bart&netpowersolutions.net +15692 + Minnigerode + David Minnigerode + minniger&minnigerode.com +15693 + IonIdea + Vasyl Rublyov + vasyl.rublyov&ionidea.com +15694 + Silicom + Ezra Koper + ezra&silicom.co.il +15695 + A. Pomerantz & Company + Andrew Paprocki + paprocki&pomerantz.com +15696 + Bayer AG + Stephan Herbertz + stephan.herbertz.sh&bayer-ag.de +15697 + Cellon Inc + Paul M. Moriarty + paul.moriarty&cellon.com +15698 + NLoci + Luke Renn + luke&terminaltechnologies +15699 + Renesys Corporation + James Cowie + cowie&renesys.com +15700 + Henrici IT-Consulting + D. Henrici + info&henrici.biz +15701 + Schmut + Mario Theodoridis + mario&schmut.com +15702 + Computer Image Technology + David Carroll + dwcar49us&yahoo.com +15703 + Software Diversified Services + Tim Full + tfull&sdsusa.com +15704 + Cyan Worlds, Inc. + Robert J. Emanuele + rje&cyan.com +15705 + Multitalents + Tim Rice + tim&multitalents.net +15706 + IGLOO SECURITY, Inc. + Dae-Soo, Choi + dschoi&igloosec.com +15707 + silve.net + Robert Silve + robert&silve.net +15708 + Beijing Westman Automation Ltd. Co. + Yanjun Wang + yjwang&btamail.net.cn +15709 + CBOSS + Boris Gribovsky + bgribovsky&cboss.ru +15710 + Megasoft + Andrew Mikhnevich + zcho&megasoft.ru +15711 + Euro DB + Didier Dubuisson + system.team&eurodb.be +15712 + Taringold Ltd. + Brendan McKenna + mckennab&taringold.ie +15713 + Drinsama GmbH + Erich Schubert + erich.schubert&drinsama.de +15714 + Lenel Systems International, Inc. + Ed Gauthier + ed&lenel.com +15715 + Blue Tree Systems + Kenn Humborg + kenn&bluetree.ie +15716 + gogo6 (formerly 'Hexago') + Mikael Lind + support&gogo6.com +15717 + Cegetel Net + Stephane Clodic + stephane.clodic&cegetel.net +15718 + Loyalty Management Group Canada, Inc. + Michael Kennedy + mkennedy&loyalty.com +15719 + Mass College of Liberal Arts + Peter Allmaker + pallmaker&mcla.edu +15720 + binaryMedia + Darnell Gadberry + darnell&binmedia.com +15721 + 7COMm + Edson Watanabe + edsonw&7comm.com.br +15722 + Concare + Bent Larsen + bent.larsen&concare.dk +15723 + Holoscenes.com + Jim Irwin + jimirwin&acm.org +15724 + VisionBank Corp. + Kate Lee + kate_lee&visionbank.com +15725 + ZyFLEX Technologies, Inc. + Tommy Chen + Tommy +15726 + Klug GmbH integrierte Systeme + Manfred Pausch + mpausch&klug-is.de +15727 + hofen.org + Kai Hofen + einkauf&hofen.org +15728 + Sentry Information and Alert Systems Corp + Gary Murphy + garym&teledyn.com +15729 + SyTrust GmbH + Florian Oelmaier + oelmaier&sytrust.com +15730 + Lulu Enterprises, Inc. + Mark Turner + iana&lulu.com +15731 + EasyPax Inc. + Igor Kravchenko + igor&easypax.com +15732 + ZIV Aplicaciones y Tecnología S.A. (formerly 'uSysCom') + Miguel Ángel Álvarez Cabanes + ma.alvarez&ziv.es +15733 + Washington State University + Rick Wegner + rick&wsu.edu +15734 + AION Systems + John Brinnand + john_brinnand&yahoo.com +15735 + Secure Technology Hawaii + Duane Takamine + support&sthi.com +15736 + Emergepoint + Duane Takamine + support&sthi.com +15737 + Zozoa Inc + Clodic Stephane + sc&clodic.com +15738 + Business Network Communications + Patrik Schilt + patrik&bnc.ch +15739 + Ruzz Technology Pty Ltd + Robert Rutherford + ruzz&ruzz.com +15740 + Digital Vision Technology Ltd + Tony Choi + tony.choi&dvm.com.hk +15741 + Softerra, LLC + Kirill Kovalenko + kirill&softerra.com +15742 + DiskSites Ltd. + Gavrie Philipson + gavrie&disksites.com +15743 + VTL (UK) Ltd + Systems Administrator (Daniel Siechniewicz) + systemsadmin&viatel.com +15744 + NetRatings, Inc. + Paul Petricevich + ppetricevich&netratings.com +15745 + Schoren NMS Solutions + Craig Rendon + c.rendon&attbi.com +15746 + ZONER software, s.r.o. + Petr Herman + herman&zoner.com +15747 + Locus Portal Corporation + Tero Heinonen + tero.heinonen&locusportal.com +15748 + NetGhost Communications + Steven Palmer + steelhawk&mail.com +15749 + Ingersoll-Rand Co Inc + Benjamin L Pugh + ben_pugh&thermoking.com +15750 + University of Tromsø + Børge Brunes + borge&cc.uit.no +15751 + TRC Fiord, JSC + Alexander Amelkin + noc&fiord.ru +15752 + slamb.org + Scott Lamb + slamb&slamb.org +15753 + MuTIC S.A.S + Antonio Pavanetto + admin&mutic.com.ar +15754 + Vancouver Community Network + Steven Chan + dns-admin&vcn.bc.ca +15755 + PIVoD Technologies + Phillip Jenkins + OID.Admin&pivod.com +15756 + Wistron NeWeb Corporation + Nanping Houl + nanping_houl&wneweb.com.tw +15757 + MITSUBISHI ELECTRIC INFORMATION TECHNOLOGY CORPORATION(MDIT) + HITOSHI WATANABE + watanabe-hi&mdit.co.jp +15758 + suntek beijingyanjiuyuan + tianxiaoyan + t800519&sina.com +15759 + Greener Pastures Innovations + John Wagner + john.wagner&gpinno.com +15760 + NMSWorks Software Limited + Dr. N. Usha Rani + usha&nmsworks.co.in +15761 + ATS Elektronik GmbH + Gerald Schroth + Gerald.Schroth&ATSonline.de +15762 + Khety + Ismaël Ruau + ismael.ruau&khety.com +15763 + Lagan + Gordon Collins + gordon_collins&lagan.com +15764 + REPSOL YPF, S.A. + MANUEL DATO BRIONES + seguridadlogicacorporativa&repsolypf.com +15765 + JX Solutions Ltd. + Phil Bluer + phil.bluer&jxsolutions.com +15766 + Matrix Mailing, LLC + Jon Foster + jon&matrixmailing.com +15767 + Triblen + Josef Brunner + josef.brunner&triblen.de +15768 + Ericsson Inc. (formerly 'BelAir Networks') + Ryan Greer + ryan.greer&ericsson.com +15769 + Argecy Computer Corporation + Steve Lee + stevewlee&aol.com +15770 + Zeera Networks, Inc. + Dennis Marti + dennis&zeera.net +15771 + JF Possibilities + Jon Foster + jon&jfpossibilities.com +15772 + Fachhochschule Landshut + Peter Bauer + ba&fh-landshut.de +15773 + e-BS, a.s. + Michal Pøíhoda + mph&e-bs.cz +15774 + DesigNET, INC. + Kimiyoshi Ohno + admin&designet.co.jp +15775 + HEIDENHAIN + Juergen Kaesberg + kaesberg&heidenhain.de +15776 + Advanced Data Integration + Mick O'Neill + mick.oneill&advdata.com.au +15777 + Openin + Stephen Francis + stephenf&openin.com.au +15778 + DaxNetwork + xiaoli + mpnm&mail.maipu.com +15779 + Novel TongFang + Huang you sheng + yshuang&novel-tongfang.com +15780 + Kiloutou + Flamant + jmflamant&kiloutou.fr +15781 + TRANSFLOW Informationslogistik GmbH + Christoph Amann + christoph.amann&transflow.com +15782 + VIA MAT MANAGEMENT AG + Christoph Amann + christoph.amann&transflow.com +15783 + HRsmart, Inc + Daniel Pruessner + daniel&hrsmart.com +15784 + ELB Consulting + Eduardo Brito + elimabrito&hotmail.com +15785 + Montrusco Bolton Investments Inc. + Tim Colby + colbyt&montruscobolton.com +15786 + NetLedger, Inc. + Ted Rice + trice&netledger.com +15787 + DGN Service GmbH + Knut Goldberg + knut.goldberg&dgn-service.de +15788 + The Elders of the Universe + Richard L. Allbery + rla&arcologies.org +15789 + Infineon Technologies AG + Roger Uebel + roger.uebel&infineon.com +15790 + Wideray Corporation + Clinton Wong + clintdw&wideray.com +15791 + La Canada Wireless Association + Bob Knight + bob&bobknight.net +15792 + Interlink Group Incorporated + Paul Chizzo + paul.chizzo&ilg.com +15793 + Pittig Software & Internet Services + Klaus Pittig + klaus&pittig.de +15794 + DeGeorge Family + David L DeGeorge + dld°eorge.org +15795 + Vandelay Internet Services, Inc. + Josh Hogle + josh.hogle&vandelay-inc.com +15796 + ImageONE Co., Ltd. + Yanan Yin + yy&imageone.co.jp +15797 + Infosys Technologies Limited + Shiv Kumar Sharna + shivkumar_sharma&infosys.com +15798 + Rato International Communication Co + James Wung + peasantry&163.com +15799 + Cesky Mobil a.s. + Vaclav Moucha + vaclav.moucha&oskarmobil.cz +15800 + JN Data A/S + Steen Bank Johnsen + sbj&jndata.dk +15801 + Huitsing Embedded Systems + Albert Huitsing + snmp&huitsing.com +15802 + Farlep + Michael Musikhin + moderatto&cetus.farlep.net +15803 + mercatis information systems gmbh + Volker Fritzsch + volker.fritzsch&mercatis.de +15804 + DLR SISTEC + Volker Padur + Volker.Padur&dlr.de +15805 + Sweetheart Cup Company, Inc. + Scott MacLellan + smaclellan&sweetheart.com +15806 + ConnecTalk Inc. + Tim Colby + tcolby&connectalk.com +15807 + DGN Service GmbH + Knut Goldberg + knut.goldberg&dgn-service.de +15808 + DGN Service GmbH + Knut Goldberg + knut.goldberg&dgn-service.de +15809 + WNET ISP + Alexey Kleschevnikov + ka&wnet.ua +15810 + Bank Severnaya Kazna OAO + pavel Pokrovsky + paul&kazna.ru +15811 + Digital Control, LLC + Weston Bustraan + wbustraan&mac.com +15812 + Radical Corporation + Thomas George + thomas.george&radicalcorp.com +15813 + AIS.PL + Romuald Dabrowski + rdab&ais.pl +15814 + Applied Concepts + Richard L. Melton + rmelton&electronicbingo.com +15815 + NetQA + Mark Trumpold + markt&tachyon.net +15816 + Infra 911, Inc. + Jaewoon Lee + ds1mgc&hanmail.net +15817 + Falcon System Consulting, Inc. + Toru Isogai + isogai&falconsc.com +15818 + Stadt Braunschweig + Sven Rethmann + sven.rethmann&braunschweig.de +15819 + Cryptolog International + Alexandre Stern + alexandre.stern&cryptolog.com +15820 + CASTOR Informatique + C. MACRIDIS + castor.telecom&wanadoo.fr +15821 + FreeBSD-CN Project + LI Dong + ld&freebsdchina.org +15822 + I-ST.net GmbH + Robert Niess + niess&i-st.net +15823 + LinuxWithin.com + John Lombardo + jl&linuxwithin.com +15824 + InaSoft + Dexter Lu + dlu&inasoft.com +15825 + ClearMetrix, Inc. + Erich Morisse + emorisse&clearmetrix.com +15826 + Cryptek, Incorporated + Andrew Scholnick + AScholnick&cryptek.com +15827 + Crocker Communications, Inc. + Matthew Crocker + matthew&crocker.com +15828 + H3G Italia S.p.A. - Enterprise C.A. + Lorenzo Giltri + lorenzo.giltri&h3g.it +15829 + DIGITAL PRODUCTION SARL + François FIGAROLA + sysadmin&digital-production.fr +15830 + Robinlea + John Francis Lee + jfl&robinlea.com +15831 + DENSO IT LABORATORY, INC. + Tomokatsu Okuya + tokuya&d-itlab.co.jp +15832 + BT Ignite solutions + Alan Taylor + alan.g.taylor&bt.com +15833 + SIGEC + Francis MARTINEZ + francis.martinez&sigec.fr +15834 + Tigard-Tualatin School District + Selena Brewington + sbrewington&ttsd.k12.or.us +15835 + M&S Systems + David Ornelas + dornelas&mssystems.com +15836 + Marcor Associates, Ltd. + Stuart Marks + stu&marcorassociates.com +15837 + Investment Technology Group + Derek Ealy + ealy&itgssi.com +15838 + AKA + Alex RENAULT + renault&aka.fr +15839 + The E.W. Scripps Company + Jamey Maze + jmaze&scripps.com +15840 + GEIDAX + jeremy wilson + jeremy.wilson&ps.ge.com +15841 + Daniel Ritter + Daniel Ritter + daniel.ritter&moonx.de +15842 + Blasberg-Computer-Systeme GmbH + Hagen von Eitzen + hostmaster&blasberg-computer.de +15843 + Massenbach + Christian von Massenbach + christian.von.massenbach&gmx.net +15844 + SELEX Communications S.p.A. + Leonardo Bertorello + leonardo.bertorello&selex-comms.com +15845 + DixPER Systems + Diego Perez + diper&dixper.com.ar +15846 + IP Fabrics + Rick Marion + rick.marion&ipfabrics.com +15847 + Harvard Law School + Nathan Logus + logus&law.harvard.edu +15848 + Sensorsoft Corporation + Hari Kosaraju + md&sensorsoft.com +15849 + Evans Companies + Richard Felix + richardf&evanscompanies.com +15850 + ReShape + Sarah Clatterbuck + it-staff&reshape.com +15851 + Computerized Medical Systems + David Harfst + harfst&cms-stl.com +15852 + Legion Interactive + Damien O'Rourke + dorourke&legioninteractive.com.au +15853 + Computer Center, Hitotsubashi university + Hiroaki Nagoya + nagoya&cc.hit-u.ac.jp +15854 + RIPE NCC + Bruce Campbell + ops&ripe.net +15855 + Luleå Segelsällskap + Michael Kolmodin + michael&kolmodin.net +15856 + Open Power Network, R.Schär + Ralf Schär + ralf.schaer&opn.ch +15857 + Administracion del Principado de Asturias + Jose Maria Alonso Fernandez + jmalonso&princast.es +15858 + all-in-green.com GmbH + Yan Hackl-Feldbusch + y.hackl&all-in-green.com +15859 + City of Tampere + Leo Lehtinen + Leo.Lehtinen&tt.tampere.fi +15860 + ParTec + Jens Hauke + hauke&par-tec.com +15861 + KiNETiK GmbH + Andreas Kurzac + a.kurzac&kinetik.de +15862 + BankService Plc. + Boian Baev + bbaev&bsbg.net +15863 + Innovative Navigation + Heiko Jaberg + heiko.jaberg&innovative-navigation.de +15864 + Unicity Pty. Ltd. + Jim Crumpler + Jim.Crumpler&unicity.com.au +15865 + cab Produkttechnik GmbH & Co KG + Jörg Falkenberg + j_falkenberg&cabgmbh.com +15866 + Joe Rhodes Consulting LLC + Joe Rhodes + joe&joerhodes.com +15867 + IXI Mobile (R&D) Ltd. + Amit Shachak + AmitS&ixi.com +15868 + Cybris Network Information Systems + Christopher Fernando + cybris-iana&cybris.net +15869 + Tijd Beursmedia + Afdeling AOB + aob&tijdbeursmedia.nl +15870 + Egon Technologies + James Downs + james.downs&egon.cc +15871 + Omnilux + Carter Moursund + carter&omnilux.net +15872 + IT Merge Inc. + Gavin Barnard + gavin&itmerge.com +15873 + Guardian Life Insurance Inc + Omer Kose + omer_f_kose-shahul&glic.com +15874 + MDS Proteomics A/S + Rob Rankin + rrankin&mdsp.dk +15875 + M-PLIFY S.A. + David Tonhofer, M-PLIFY S.A. + d.tonhofer&m-plify.com +15876 + Microprogram Information LD. CO. + luke shei + luke&program.com.tw +15877 + API Technologies, LLC + Scott Meeuwsen + scottm&api-tech.com +15878 + AZTECH SYSTEMS LTD + Sun Zhengke + sun.zheng.ke&cn3.aztech.com +15879 + WaveIP Ltd. + Danny Erez + dannye&waveip.com +15880 + MONEYLINE BANKING SYSTEMS + Philippe FUMANERI + fumaneri&moneyline.fr +15881 + Thomas Fahle + Thomas Fahle + info&thomas-fahle.de +15882 + smart ip + Carlos J. Garcia + cjgarcia&smartip.es +15883 + Getronics France + Pierre Chavas + pierre.chavas&getronics.com +15884 + Noble Turkey Software + Eileen Boer + louboer&earthlink.net +15885 + ESAG Energieversorgung Sachsen Ost AG + Uwe Höger + Uwe_Hoeger&esag.de +15886 + Manhattan Associates + Don Ruby + druby&manh.com +15887 + Cosini Networks, Inc + George Lin + glin&cosini.com +15888 + TAI S.r.l. + Claudio Montanari + c.montanari&tai.it +15889 + Q-Free ASA + Audun Myrhol + audun&q-free.com +15890 + Prairie Grove Telephone Co. + Steven Karp + admin&pgtc.com +15891 + Panda Restaurant Group, Inc. + William Yu + William.Yu&PandaRG.com +15892 + Openmodes Technology Group Inc. + Neil Sequeira + nsequeira&openmodes.com +15893 + Emerson Climate Technologies Retail Solutions, Inc. (formerly 'Computer Process Controls') + David Rohn + David.Rohn&Emerson.com +15894 + AstroStage Inc. + Masatoshi Nakamura + nakamura&astrostage.co.jp +15895 + 4Linux + Alessandro Kenji Urakawa + alessandro&4linux.com.br +15896 + Southern Federal University (formerly 'Rostov State University, Computer Center') + Andrey Berezovskiy + noc&r61.net +15897 + Guangzhou GaoU S & T Development Co. Ltd. + Jiming Pan + jiming_pan&hotmail.com +15898 + Polizei Brandenburg + Mike Peter + mike.peter&polizei.brandenburg.de +15899 + inubit + Stephan Kahnt + stephan.kahnt&inubit.com +15900 + OpenWeb + Jens Reinemuth + jens&openos.de +15901 + Valsimmon Technology Group + G. RIchard Raab + Richard.Raab&valsimmon.com +15902 + MEN@NET + Jan Bartels / Niels Gustaebel + jan.bartels&menatnet.de +15903 + Seebek Ingeniería S.R.L. + Marcelo Ovando + zlimit&seebek.com +15904 + Peter Eckel, System and Network Management + Peter Eckel + Peter.Eckel&Eckel-EDV.de +15905 + Defero Systems AB + Rasmus Aveskogh + rasmus&lantech.se +15906 + WLAN AG + Christoph Kowalczyk + christoph.kowalczyk&wlangroup.com +15907 + Canyon Networks + Craig Scheets + cscheets&canyon-net.com +15908 + Business Technology Associates, Inc. + Dirk Huizenga + netadmin&BusinessTechnologyAssociates.com +15909 + MRX Solutions + Charles Hsiao + chsiao&mrxsolutions.com +15910 + AutoTrader.com + Larry Korb + larry.korb&autotrader.com +15911 + he chuan + he chuan + h-chuan&jaist.ac.jp +15912 + BlueCom AS + Chris Qvigstad + hostmaster&bluecom.no +15913 + Networking Support Services + Jeff Morrison + jmorrison&networkingss.com +15914 + Netreo Incorporated + James Mancini + netreo-mibs&netreo.net +15915 + High Tower Software + Jeff Wong + jwong&high-tower.com +15916 + LANDesk Software + Alan Butt + alan.butt&landesk.com +15917 + Expedient Communications + Barnaby Brown + isops&expedient.com +15918 + the MORROW group + Patrick S. Morrow + pat&themorrowgroup.com +15919 + Hitachi Storage Software Incorporated + Jean Stein + j.stein&hitachissi.com +15920 + Switch Management + Brian Strand + bstrand&switchmanagement.com +15921 + MobileAccess + Yossi Appleboum + iftah&web-silicon.com +15922 + YISHANG INNOVATION TECHNOLOGY CO.,LTD + Nick + ningkg&yishang.com.cn +15923 + ICS GmbH + Markus Heller + heller&ics.de +15924 + Kuwait Linux Company + Vineet Mehta + vineet&linux.com.kw +15925 + Cadence Design Systems + Kaijun Zhan + kzhan&cadence.com +15926 + Cartier Partners Financial Group Inc. + Derek Schenk + schenkd&cartierpartners.ca +15927 + Point Loma Nazarene University + Corey Fling + oid-admin&pointloma.edu +15928 + SNB + Egon Barfuss + egon.barfuss&snb.at +15929 + Cambodia Samart Communication + Atthaya Pumsawai + atthaya.p&hello016-gsm.com +15930 + Lucasfilm Ltd. + Robert Jordan + robert.jordan&lucasfilm.com +15931 + Center for International Rehabilitation + Mike Traum + mtraum&cirnetwork.org +15932 + Wuertele + Martin Wuertele + iana&wuertele.net +15933 + Cyanea Systems Corp. + Robert Lam + robert.lam&cyanea.com +15934 + Multip Kft. + Thomas Rottler + tom-iana&multip.hu +15935 + Centerplex + Daniel J. Wilson + djw¢erplex.net +15936 + Koreadotcom + Sunhwa Han + ellesis&koreadotcom.com +15937 + Svyazinform of Republic Mordovia Joint Stock Company + Alex Macaroff + macaroff&moris.ru +15938 + JOINT-STOCK COMPANY IMPORT-EXPORT BANK "IMPEXBANK" , Moscow + Petrov Vladimir + petrov&impexbank.ru +15939 + Coral Telecom Ltd + Rishi Saini + rishi.saini&coraltele.com +15940 + Factoria de iniciativas internet fi2, s.a + Ramón Gutiérrez + info&fi2net.com +15941 + Galena Park ISD + Greg Payne + gpayne&galenaparkisd.com +15942 + Cabildo de Gran Canaria + Esther Mª Pulido + iana&grancanaria.com +15943 + CONEXUS Credit Union + Trevor Allen + tallen&conexuscu.com +15944 + Agence de la Francophonie + Pierre OUEDRAOGO + pierre.ouedraogo&francophonie.org +15945 + Ascertia + Liaquat Khan + liaquat.khan&ascertia.com +15946 + Calypso Networks + Carl Madison + carl.madison&calypsonetworks.com +15947 + New Jersey Institute of Technology + Dean Knape + knape&njit.edu +15948 + TREVSPLACE.COM + Trevor Allen + tallen&trevsplace.com +15949 + Fábrica de Idéias + Rodrigo Severo + rodrigo&fabricadeideias.com +15950 + 4am Media, Inc. + Michael Bartosh + mbartosh&4am-media.com +15951 + Axent Global + Michael Zammit + michael&axentglobal.com.au +15952 + Advanced Computer Resources + Matt Leo + matt&acrcorp.com +15953 + Aaron Spangler + Aaron Spangler + aaron&spangler.ods.org +15954 + Filesure Pte Ltd + Benjamin Har + benjamin&file-sure.com +15955 + OmniComp Technology Services + Alan Dobkin + IANA&OmniComp.Org +15956 + InCore Technology Ltd + Bernard Guillot + bguillot&incoretech.com +15957 + Muonics, Inc. + Michael Kirkham + support&muonics.com +15958 + TREVSPLACE.COM + Trevor Allen + tallen&trevsplace.com +15959 + Sue's Sound cc + Mr. R. Gush + robert&suesound.co.za +15960 + eAirports + Dawie Möller + dawie&eairports.aero +15961 + Cocking and Co. Ltd. + Romilly Cocking + romilly&cocking.co.uk +15962 + ASML + Michael Grasskamp + michael.grasskamp&asml.com +15963 + INFORMA COLOMBIA + SANTIAGO ROJO + srojo&informasa.es +15964 + Instituto Nacional de Astrofísica, Optica y Electrónica + Moisés Reyes M. + moises&inaoep.mx +15965 + Public Resources Management Group, Inc. + Gordon Threlkeld + gthrelkeld&prmginc.com +15966 + Visa, International. + Larry Liu + larryliu&visa.com +15967 + NCast Corporation + H. S. Magnuski + hankm&ncast.com +15968 + Emico Vero + Konrads Smelkovs + konrads&emicovero.com +15969 + Vindigo, Inc. + Marc Poulin + mcp&vindigo.com +15970 + Aspen Networks Inc + Sajit Bhaskaran + sajit&aspen-networks.com +15971 + Advanced Biometric Controls, LLC + Tuomo Lampinen + tlampinen&actechnology.com +15972 + Disaster Kleenup International, Inc + Ken Bowman + kbowman&disasterkleenup.com +15973 + JCorporate Ltd + Sandra Cann + scann&jcorporate.com +15974 + Skywave Corporation + Nathan Le Nevez + npl&acis.com.au +15975 + MEIKYO ELECTRIC CO.,LTD. + Mitsuo Yoshida + yoshida&meikyo.co.jp +15976 + Adacom S.A. + Kostas Noussias + knousias&adacom.com +15977 + ComPol II + Janek Gasiorowski + jg&compol2.com.pl +15978 + IT=it + Joris Schouten + joriss&itisit.nl +15979 + Tomsoft + Thomas-Henning von Kamptz + tom&tomsoft.com +15980 + TrackWell Software hf + Agust Einarsson + info&trackwell.com +15981 + Anillo Networks, Inc. + Terry Porter + anillonet&aol.com +15982 + Hubbub IT Services + Vincent Trinh + vincent.trinh&hubbub.com.au +15983 + Meru Networks + Damien Toledo + dtoledo&merunetworks.com +15984 + Authenex, Inc. + Ernesto Frutos + efrutos&authenex.com +15985 + The University of Lethbridge + Jeff Oliver + jeff.oliver&uleth.ca +15986 + Binara, Inc. + Daniel Wolk + dan&binara.com +15987 + Topoz Pty Ltd + Alan Kennington + topoz.snmp&topology.org +15988 + Cymes GmbH + Arnd Ben Otto + info&cymes.net +15989 + Torsten Schneider + Torsten Schneider + ldap&tschneider.org +15990 + x9 (formerly 'MikroBitti') + Antti Nilsson + muut&antion.fi +15991 + Raso + Raso Luciano + luciano&raso.org +15992 + Tracerdigital, LLC + Joel Peach + jpeach&tracerdigital.com +15993 + Net-Scale Technologies, Inc. + Urs Muller + urs&net-scale.com +15994 + XCOM Comunicacao Segura + Artur Romao + artur.romao&isp.novis.pt +15995 + it consultant Henning Follmann + Henning Follmann + hfollmann&itcfollmann.com +15996 + RNP Rede Nacional de Ensino e Pesquisa + Eduardo C. Grizendi + eduardo.grizendi&rnp.br +15997 + Wave Systems Corp. + Mihran Mkrtchian + mmkrtchian&wavesys.com +15998 + University of Glamorgan + Kevin Sewell + khsewell&glam.ac.uk +15999 + CPS Technology Group + Paul Johansen + pjohansen&cpstg.com +16000 + Syrea s.r.l. + Sala Renato + renato.sala&syrea.it +16001 + Ministry of Finance, Czech Republic + David Hrdy + David.Hrdy&mfcr.cz +16002 + Glyphix + Erik Riffel + riffel&glyphix.com +16003 + Zschimmer GmbH + Joacim Zschimmer + info&zsch.de +16004 + Intrusec, Inc. + David Meltzer + djm&intrusec.com +16005 + Durham University + David Griffith + d.e.griffith&durham.ac.uk +16006 + CodeFab Inc + Charles Swiger + chuck&codefab.com +16007 + Native Instruments + Andreas Roedl + andreas.roedl&native-instruments.de +16008 + Consolidated Communications Inc + Robert Koester + robert.koester&consolidated.com +16009 + TelemaxX Telekommunikation GmbH + Jan Rischmueller + rischmueller&telemaxx.de +16010 + Kapsch BusinessCom AG + Mario Cenc + mario.cenc&kapsch.net +16011 + State of Utah DTS/DET (formerly 'State of Utah DAS/ITS') + Forrest Nielson + fnielson&utah.gov +16012 + MEMSCAP + Khalid Shaheen + khalid.shaheen&memscap.com +16013 + byteCentric + Paul Taylor + paul.taylor&bytecentric.com +16014 + Applied Materials Inc. + Monil Naicker + monil_naicker&amat.com +16015 + Primary Objective, LLC + Daniel A. Torrey + daniel&primaryobjective.net +16016 + Simmey Limited + Robert Allan + rob&simmey.com +16017 + CEW Co Ltd + takamichi takahasi + takamichi&cew.co.jp +16018 + punker.org + Max Malkov + max&punker.org +16019 + Reitek + Giovanni Carbone + itadmin&reitek.com +16020 + Wasadata System AB + Tor Borrhed + tor&wasadata.com +16021 + cassava Enterprises + John Jacks + john.jacks&gib.cassava.net +16022 + Minicom Advanced Systems Ltd. + Ofer Weisz + Ofer.weisz&minicom.com +16023 + inetadmin + Ayumu Sanada + admin&inetadmin.com +16024 + Netzwert Aktiengesellschaft + Jim Martin + jim&netzwert.ag +16025 + CIBLIS Engenharia S/C + Philippe de M. Sevestre + psevestre&ciblis.net +16026 + SoftAplic S/C Ltda. + Edesio Costa e Silva + edesio+snmp&ieee.org +16027 + Shah-USA + Rajesh B Shah + shah&shah-usa.com +16028 + XAOS Systems + Mario Scarpa + m.scarpa&xaos.it +16029 + Westleader International Inc. + Sun, Yuxi + sun&wltechnologies.com +16030 + Hongkong Post + LAM, Yuk Chiu + enquiry&hongkongpost.gov.hk +16031 + Atmark Techno, Inc. + Yasushi SHOJI + yashi&atmark-techno.com +16032 + Uraltransbank + Nikolay Popov + Nikolay.Popov&utb.ru +16033 + Telemic Oy + Timo Rissanen + timo.rissanen&telemic.fi +16034 + Opus Hadsel + Runar Lyngmo + runar.lyngmo&melbu.vgs.no +16035 + Icom Mobile + Niall Teskey + Niall.Teskey&icommobile.com +16036 + InfoCentre Ltd. + Anatoli Routchiev + agr&infosite.ru +16037 + em|Motion GmbH + Robert Linden + rl&em-motion.com +16038 + ORAYLIS GmbH + Thomas Strehlow + T.Strehlow&oraylis.de +16039 + Automated Systems Engineering, Inc. + Dan Mullin + d.mullin&goase.com +16040 + dataparty.net + Alex Brasetvik + alex&brasetvik.com +16041 + Hogeschool Brabant + P.M. Schoot + schoot.pm&hsbrabant.nl +16042 + JMP SYSTEMS + Lee, Hyoung-Geun + cujo&jmpsystem.com +16043 + Viasec, s.r.o. + Jozef Dlhy + dlhy&viasec.sk +16044 + todo Gesellschaft fuer Informationstechnik mbH + Thomas Kuehne + tkuehne&todo.de +16045 + Ocaco Finland Oy + Antti Pennanen + antti.pennanen&ocaco.com +16046 + DSD S.A. + Sergi Sanchez + ssanchez&dsdga.com +16047 + IT Practice + Jens Bo Friis + jbf&it-practice.dk +16048 + BOFRIIS + Jens Bo Friis + bofriis&get2net.dk +16049 + Flamenco Networks, Inc. + David Pawloski + dpawloski&flamenconetworks.com +16050 + Krupczak Org + Bobby Krupczak + rdk&krupczak.org +16051 + CORETEAM AB + Joakim Eberlund + joakim .eberlund&coreteam.se +16052 + Chris Rauschuber + Chris Rauschuber + crauschuber&yahoo.com +16053 + Burger King Corporation + Hector Mir + EIN&whopper.com +16054 + Resource Data Management Ltd + Alan McBride + alan&resourcedm.co.uk +16055 + Blandsite + Brandon Knitter + knitterb&blandsite.org +16056 + okazaki city office + kikakubu itsuisinka + it&city.okazaki.aichi.jp +16057 + EventGnosis, Inc. + Lars Graf + lars&eventgnosis.com +16058 + GENKEY + Andrey + genkey&col.ru +16059 + Tsinghua TongFang Software Co. Ltd. + FENG Chang + fengc&ttsoft.com.cn +16060 + Lighthouse Information Systems, Inc. + Ken Harris + kharris&lhinfo.com +16061 + Adhersis North America + Dominic Grégoire + dominic.gregoire&avk-adhersis.ca +16062 + ZINCNetworks + xiaoli + mpnm&mail.maipu.com +16063 + DC Technologies + Bene Tam + bene-tam&dctechnologies.com +16064 + Perspectix AG + William Harris + harris-web-iana&perspectix.com +16065 + softArk Solutions + Ismaël RUAU + iruau&free.fr +16066 + NEC Telenetworx,Ltd + Tetsuya Obayashi + t_obayashi&mail.ntwx.nec.co.jp +16067 + BarcoIntelligentDisplays + Lode Leroy + lode.leroy&barco.com +16068 + Repeatit AB + Magnus Bergsten + magnus.bergsten&repeatit.se +16069 + MINEFI/SP + Francois Gerbaud/Martial David + sp-administration-systeme&sp.finances.gouv.fr +16070 + Bundesamt für Finanzen + Jochen Ritter + jochen.ritter&bff.bund.de +16071 + Banque Pictet et Cie SA + Jean-Michel PULFER + jpulfer&pictet.com +16072 + Linkeo.com + David Gourdelier + support&linkeo.com +16073 + Edith Cowan University + Lee Sanders + registration&scis.ecu.edu.au +16074 + VBL Karlsruhe + Andrea Ebeling + or50a&vbl.de +16075 + Uninet, S.A. + Consuelo Sanchez + chelos&reduno.com.mx +16076 + Dominion Diagnostics + Mike Muzzy + mis&dominiondiagnostics.com +16077 + Witbe + Paul Rolland + rol&witbe.net +16078 + Mouvement Républicain et Citoyen + Emmanuel Motchane + admin&mrc-france.org +16079 + bitMaster + Giovanni Piacentini + info&bitmaster.it +16080 + Software Logistics, LLC + Kevin D. Wolf + kevinw&software-logistics.com +16081 + KTALAND + GIQUELLO Cyrille + cyrille&ktaland.com +16082 + Bechtel Corporation + Michael B. McCaffrey + mmccaffr&bechtel.com +16083 + MaineBase + Robert Roux + prsman37&yahoo.com +16084 + House of Noise + Piotr Halasa + noise&linart.pl +16085 + Telexpertise de Mexico S.A. de C.V. + Edwin Solis + esolis&txm.com.mx +16086 + MITSUMI ELECTRIC CO.,LTD + IKUO NAKAJIMA + i_nakajima&atg.mitsumi.co.jp +16087 + Zayed University + T.R.Shinoy + shinoy.thachapully&zu.ac.ae +16088 + Groats Laboratories + Brian Musker + bmusker&groatslabs.com +16089 + eXwavecom Co.,Ltd. + Young Rok Jung + yrjung&eXwavecom.com +16090 + Estweb Ltd. + Ivari Horm + admin&estweb.ee +16091 + ECR + Urban Engemyr + urban.engemyr&ecr-consulting.se +16092 + AirWalk Communications, Inc. + Jay Park + JayPark&airwalkcom.com +16093 + AW Software + Andreas Willert + willert&awillert.com +16094 + Stacken + Richard Levitte + levitte&stacken.kth,se +16095 + Fluency Voice Technology Ltd. + Martin Wren-Hilton + martin.wren-hilton&fluencyvoice.com +16096 + RCS-TECHNOLOGY + Rene Stepanek + rene.stepanek&rcs-technology.at +16097 + voice robots GmbH + Lewis Graham + lewis.graham&voicerobots.de +16098 + SCA Graphic Sundsvall AB + Lars Olofsson + Lars.Olofsson&sca.com +16099 + Pinion Group, LLC + Peter Banka + peter&piniongroup.com +16100 + Iron Mountain + Christopher Twombly + christopher.twombly&imrm.com +16101 + Cadero, Inc. + Jeffrey T. Lucas + lucasjt&cadero.com +16102 + Miharu Communications Inc. + Masayuki Sugai + snmp&miharu.co.jp +16103 + Nilgiri Networks + Dr. Timothy A. Gonsalves + tag&ooty.tenet.res.in +16104 + KeyCOM Information Technology CO.,Ltd + ZhiFu Xiong + xiezhijun99&vip.sina.com +16105 + Standard Information Data Centre of Ministry of Education P.R.C + ZhiFu Xiong + xiezhijun99&vip.sina.com +16106 + ICCS + ZhiFu Xiong + xiezhijun99&vip.sina.com +16107 + Enea Data AB + Magnus Svantesson + Magnus.Svantesson&enea.se +16108 + Garderos GmbH + Hubert Euzenot + iana&garderos.com +16109 + Pfannenberg + Rudolf Malicki + rudolf.malicki&pfannenberg.com +16110 + InfoCentre Ltd. + Anatoli Routchiev + agr&infosite.ru +16111 + Beaumont Hospital + Niambh Scullion + niambh.scullion&beaumont.ie +16112 + Valentine KOUTCHERBAEV + Valentine Koutchegbaev + val&vikax.com +16113 + Network Dweebs Corporation + Jonathan A. Zdziarski + jonathan&networkdweebs.com +16114 + Qualstar Corporation + Richard Nelson + RANelson&qualstar.com +16115 + CyberSarge, Inc. + Ron Cooper + roncooper&cybersarge.com +16116 + Enforcer Group, Inc. + Masuo Gates + mgates&copweb.com +16117 + Rev D Networks + John Chen + john.chen&revdnetworks.com +16118 + 3gfp + Richard H. Chapman + hchapman-oid&3gfp.com +16119 + Catalog.com + Bill Miller + bcmiller&catalog.com +16120 + SIMCON + Karl-Heinz Oliv + Karl-Heinz.Oliv&Simcon-MT.de +16121 + Caliber Technoloigies & Consultancy + Mangesh Bendre + mangeshbendre&yahoo.com +16122 + Netguild + Tom McComb + tmccomb&netguild.net +16123 + University of Florence + Valdo Pasqui + valdo.pasqui&unifi.it +16124 + X32 Pty Ltd + Dan Irwin + admin&x32.com.au +16125 + NET SPACE + Daniel Kiper + dkiper&netspace.com.pl +16126 + Parliament of Finland + Antti Aaltonen + antti.aaltonen&eduskunta.fi +16127 + Fathom Technology KFT + Tibor Borzak + administrator&fathomtechnology.com +16128 + A LAWRENCE RIEDEL + A LAWRENCE RIEDEL + Larry&Riedel.org +16129 + LightHouse Training and Consulting srl + Andrea Martano + andrea.martano&sinapto.com +16130 + NEC Australia P/L + Michael Knox + michael_knox&nec.com.au +16131 + Humax Co., Ltd. + In Kyun Lee + iglee&humaxdigital.com +16132 + Pham Kim Binh Nguyen + Pham Kim Binh Nguyen + midnight&midnight.org +16133 + Artur Sabik + Artur Sabik + asabik&softsystem.pl +16134 + CIRTI de Nantes + BAJET Jean-Marc + Jean-Marc.BAJET&urssaf.fr +16135 + Coherent Light + Vincent Wallet + iana&clight.fr +16136 + FT Interactive Data + John Cawsey + john.cawsey&ftid.com +16137 + Dr. Huggle & Partner GmbH + Felix Huggle + felix&huggle.de +16138 + E*Trade Financial Inc. + Duc Doan + ddoan&etrade.com +16139 + Compellent Technologies + Michael Shishkin + mshishkin&compellent.com +16140 + MozApps.org + Shaun Savage + savages&mozapps.org +16141 + A-NeT Internet Services bv + Hans Bakker + info&a-net.nl +16142 + Network Technology and Engineering Institute of Xi'an Jiaotong University + Cao Jiuxin + cjx&xanet.edu.cn +16143 + Gina & Jonathan Oberg + Jonathan Oberg + jonathan&oberg.net +16144 + KERBEROS + Alain MOUILLERON + alain.mouilleron&kerberos.fr +16145 + DResearch Digital Media Systems GmbH + M. Rothe + rothe&dresearch.de +16146 + United Bulgarian Bank + Desimira Milusheva + milusheva_d&ubb.bg +16147 + intY Ltd + Stefan Kruger + stefan&inty.net +16148 + JavaRealm Software Development Team + Sergey Skugarev + s.skugarev&javarealm.com +16149 + Del Mar Analytical + Leslie VanExel + les&dmalabs.com +16150 + White Wolf Camarilla + Jerry Spaulding + camtech&white-wolf.com +16151 + Pima Community College + Betsy Hasman + nettechcontact&pima.edu +16152 + The Cincinnati Insurance Companies + Joshua Landin + josh_landin&cinfin.com +16153 + Snert + Anthony Howe + achowe&snert.com +16154 + Network Resource Technologies Corp + Gregory Wendel + gwendel&nrtc.biz +16155 + HalcyonFlame + Ranju Mathew + ranju_mathew&hotmail.com +16156 + Feral Nerds Enterprises + Andrew Stalker + andrew_stalker&hotmail.com +16157 + RDI + Jacques GRILLOT + grillot&rdipc.com +16158 + CiTR Pty Ltd + Shane Hughes + shane.hughes&citr.com.au +16159 + UQ Business School + Andy Jones + oid-admin&business.uq.edu.au +16160 + Sphere Systems + Christopher Smith + iana&christophersmith.net +16161 + CSC - Scientific Computing Ltd + Janne Kanner + Janne.Kanner&csc.fi +16162 + RM-System Holding, a.s. + Pozgay Viktor + pozgay&rms.sk +16163 + Elettronika S.r.l. + Modugno Giuseppe + g.modugno&elettronika.it +16164 + zylk + gustavo fernanadez + gus&zylk.net +16165 + Crescendo Networks + Yiftach Shoolman + yiftach&crescendonetworks.com +16166 + University of Sheffield + Richard Gilbert + R.Gilbert&sheffield.ac.uk +16167 + Repsol Quimica + Manuel Dato Briones + seguridadlogicacorporativa&repsolypf.com +16168 + MediSync Midwest Limited + Roger Cass + rogercass&medisync.com +16169 + Teleredes + Luis Casamayou + lcasamayou&teleredes.com.uy +16170 + totaleasy + kuman + kumanc&hananet.net +16171 + Zucchetti.com srl + Maurizio Boriani + boriani&zucchetti.com +16172 + Danger Incorporated + Jeff Bush + jeffbush2002&yahoo.com +16173 + Arizona Western College + Seth Hollyman + seth.hollyman&azwestern.edu +16174 + Sensatronics LLC + Keith Wright + kwright&sensatronics.com +16175 + TRIADE Beratungsgesellschaft für Informationstechnologie mbH + Dr. Nuhn + nuhn&triade.de +16176 + Zephra Corp. + Fidel Salas + fisal&zephra.com +16177 + Westermo Teleindustri AB + Daniel Rooth + daniel.rooth&westermo.com +16178 + Elevance Health, Inc. + Kyle P. Johnson + kyle.johnson&elevancehealth.com +16179 + WAN Norway + Sten Daniel Sørsdal + sten.daniel.sorsdal&wan.no +16180 + Blue Data Networks Ltd + Graeme Carpenter + graeme.carpenter&blue-data.net +16181 + E3Switch LLC + Tad Artis + e3mib_removethispart&e3switch.com +16182 + Whitesmiths Australia Pty Ltd + Peter Marks + marksp&whitesmiths.com +16183 + Hanaro Telecom + Kong Jun Woong + kong&hanaro.com +16184 + PT. Indorama Synthetics Tbk + Bambang Budiharto + budhi&indorama.com +16185 + NM Systems Co.,Ltd. + Chang Gi Yun + ylemyun&nmsys.co.kr +16186 + Peter Schneider EDV + Peter Schneider + peter&psch.de +16187 + Aedilis UAB + Donatas Cerniauskas + donatas.cerniauskas&aedilis.lt +16188 + Materials Science and Technology Dept. - Univ. of Crete GREECE + Dimitris Stefanakis + dimstef&materials.uoc.gr +16189 + Assured Communications Group Limited + Jamie Craig + jcraig&apxhub.com +16190 + Colegio Oficial de Arquitectos de Madrid + Alfonso Rodriguez + arodriguez&coam.org +16191 + NightFire Software, Inc. + Frank Seidel + fseidel&nightfire.com +16192 + BCN Associates, Inc. + John Anderson + janderson&bcnassociates.com +16193 + Eagle Mountain International Church Inc. + Dave Augustus + davea&support.kcm.org +16194 + Rancho Morado + Wendy Jacobs + no-spam&rancho-morado.net +16195 + Thomson Corp Switzerland AG + Mario Widmer + mario.widmer&dialog.com +16196 + Washington Mutual + Dave McKinnon + david.mckinnon&wamu.net +16197 + Scoobtec + Thomas Hedler + thomas.hedler&fen-net.de +16198 + Investment Company Institute + Matthew R. Briggs + mbriggs&ici.org +16199 + openForce Information Technology GesmbH + Gerhard Hipfinger + gerhard.hipfinger&openforce.at +16200 + Casero Inc. + Aaron Cullum + acullum&casero.com +16201 + TI Telecomunicazioni & Informatica + Marco Cocchis + mcocchis&tandi.it +16202 + Oakland University + Theresa Rowe + rowe&oakland.edu +16203 + Sysdata Kft. + Laszlo Baroczi + laszlo.baroczi&siemens.com +16204 + Westinghouse Electric Company + Darlene Gonta + gontads&westinghouse.com +16205 + T-Systems GCF MSY + Axel Eberhardt + Axel.Eberhardt&T-Systems.com +16206 + Mobile TeleSystems Limited libility company + Solodukho Alexander + admin&mts.by +16207 + Splicecom Ltd + Brian Frampton + brian.frampton&splicecom.com +16208 + Universidade Lusiada + Nuno Beirão + nbeirao&por.ulusiada.pt +16209 + Collaborative Network Technologies Inc. + Les Cuff + lez&colabnet.com +16210 + Delta Piktori Oy + Rami Heinisuo + rami.heinisuo&eduix.com +16211 + Certified Security Solutions + Keyfactor PKI Policy Authority + keyfactorppa&keyfactor.com +16212 + wang + mrwant + mrwant&163.com +16213 + CEN/ISSS XFS Workshop + Antonio Prevignano + PrevigA&diebold.com +16214 + Pehkonen Family + Harry Pehkonen + harry.pehkonen&hotpop.com +16215 + Elbit Systems Ltd. + Shimon Rapoport + shimonr&elbit.co.il +16216 + Trinity CC Consulting, Inc. + Dan Haligas + haligasd&adelphia.net +16217 + K-WILL Corporation + Takuma Kawamura + kawamura&kwillcorporation.com +16218 + Talkline GmbH + Jan Andres + andres.jan&talkline.de +16219 + DB Telematik GmbH + Andreas Ziegenbein + andreas.ziegenbein&db-telematik.de +16220 + LANDMAT + Bergur Heimisson + bergur&landmat.com +16221 + Strodl + Andreas Strodl + andreas&strodl.org +16222 + DONOBi, Inc. + Wayne Tucker + wtucker&donobi.com +16223 + Global Messaging Solutions, Inc. + Trey Keifer + tkeifer&gmsi1.com +16224 + HTBLuVA Wiener Neustadt + Christian Hofstaedtler + christian&hofstaedtler.com +16225 + PaeTec Communications + Scott Pause + oidadmin&paetec.com +16226 + Public Service Enterprise Group + Jeffrey S. Intravaia + jeffrey.intravaia&pseg.com +16227 + VitalStream, Inc. + Rick Stevens + rstevens&vitalstream.com +16228 + AuriQ Systems, Inc. + Hubert Fong + hfong&auriq.com +16229 + NU Informationssysteme GmbH + Thomas Witt + witt&nugmbh.de +16230 + eCornell + Matthew Willis + noc&ecornell.com +16231 + Introspect Consulting, Inc. + Michael Welter + mike&introspect.com +16232 + netMANj Project + Vijayaraghavan Kalyanapasupathy + vijayaraghavan.k&acm.org +16233 + Tukcedo Services + Michel J.L. van der Kleij + michel&tukcedo.nl +16234 + Dogico + Guillaume Huet + guillaume.huet&dogico.com +16235 + insignia financial group, inc. + kwan ng + kwan.ng&iesg.com +16236 + Andrew Maldonado Consulting + Andrew Maldonado + andy&amaldonado.com +16237 + Mantis Technology LLC + Lewis A Stahl + techwiz&nymed.net +16238 + TheCartCompany.com + Brett Doyle + brett_doyle&charter.net +16239 + Corbett Systems Development, Inc. + Steve Dickey + scd&corb.net +16240 + skinoske tech + Mursel Karabiyik + mursel.karabiyik&gantek.com +16241 + butlerNetworks AS + Robert Ker + rke&butlernetworks.com +16242 + Thames Valley University + Cossy Cosmas + cossy.cosmas&bydeluxe.com +16243 + AVS Consulting + Philippe Lagente + pla&avs-consulting.com +16244 + Ecole des Mines d'Alès + Laurent PELLISSIER + Laurent.Pellissier&ema.fr +16245 + Humanor + Lauri Jutila + lauri&jutila.net +16246 + Hertz Comunicaciones + Fermin Telleria + hertztuc&arnet.com.ar +16247 + eServGlobal + Allan Lawrie + allan.lawrie&eservglobal.com +16248 + litus.at + Gerald Aigenbauer + ga&litus.at +16249 + Italtel S.p.A. + LORIO GUIDO + Guido.Lorio&italtel.it +16250 + NetPrecept Ltd. + Ross Patterson + Ross.Patterson&NetPrecept.Com +16251 + Bonuso Industries + Patrick Bonuso + pat&bonuso.com +16252 + Mätäsahon suku + Mikael Mätäsaho + mixu&matasaho.net +16253 + Mätäsahon suku + Mikael Mätäsaho + mixu&matasaho.net +16254 + Holmen Paper AB + Björn Jonsson Dannetun + bjorn.jonsson&holmenpaper.com +16255 + Action Engine Corp. + Jeff Clark + jeff&actionengine.com +16256 + Develtech + GILLES Alexandre + ag&develtech.com +16257 + The Hong Kong Polytechnic University + Catherine Chan + itkychan&polyu.edu.hk +16258 + FeelingK + YoungGwon Choi + shyoon&feelingk.com +16259 + KOWA COMPANY,LTD. RESEARCH CENTER FOR ADVANCED TECHNOLOGY + Yasushi Kikukawa + y-kikukw&kowa.co.jp +16260 + The FreeBSD China Community + LI Dong + ld&FreeBSDChina.ORG +16261 + Kopint-Datorg Rt. + Csaba Lengyel + csaba.lengyel&kopdat.hu +16262 + Repsol Petroleo + Manuel Dato Briones + seguridadlogicacorporativa&repsolypf.com +16263 + swisswebgroup gmbh + jakub svoboda + jakub.svoboda&swisswebgroup.com +16264 + Itheon + Paul Snook + paul.snook&itheon.com +16265 + Open Methods + Nigel Runnels-Moss + nigel.runnels_moss&openmethods.com +16266 + The Optym Group + Jay Mosser + jaym&optymgroup.com +16267 + American Network Communications, Inc + John Coy + jcoy&anc.net +16268 + US Linux Networks, LLC + Geoffrey M. Silver + gsilver&uslinux.net +16269 + iomart ltd + Bill Strain + bill.strain&iomart.com +16270 + GravityRock.com + Karl Sola + karl.sola&gravityrock.com +16271 + New South Wales Fire Brigades + John Simonides + john.simonides&fire.nsw.gov.au +16272 + Legend (Beijing) Limited + Liu Tao + liutaof&legend.com +16273 + Nexge technology p ltd + Ramachandran.G + ramachandran&nexge.com +16274 + TELMAT Industrie + Denis RUHLAND + ruhland&telmat-net.fr +16275 + University of Kuopio + Olavi Manninen + Olavi.Manninen&uku.fi +16276 + Communications Regulation Commission + E. Rafailov + info&crc.bg +16277 + Mobile Internet Limited + John Bourke + john.bourke&mobileinternet.com +16278 + sLAB Informationssysteme + Lars Soltau + lars.soltau&slab.de +16279 + St. Francis Borgia RHS + Stefan Adams + stefan&borgia.com +16280 + OLTPCorp + Stefan Jon Silverman + sjs&sjsinc.com +16281 + SYSLOG Gmbh + Harald Strassberger + harald.strassberger&syslog.at +16282 + H5 Technologies + Peter Mei + pmei&h5technologies.com +16283 + OM + Aashish Jha + hariom_jha&sify.com +16284 + QuadFore Corporation + jaap schuijt + jaap&quadfore.com +16285 + ibpsearch + xiatian + xtllee&hotmail.com +16286 + Hungarocom Ltd + Gyula Bakos + hc_bgy&axelero.hu +16287 + Swiss Federal Institute of Technology + Nick Heim + heim&id.ethz.ch +16288 + EDV-Beratung + Peter Koenig + pkoenig&aon.at +16289 + Mediva Inc. + Akikazu Takada + mediva&mediva.co.jp +16290 + SIAT + Jordi Pineiro + jpineiro&siat.es +16291 + Meeting Maker, Inc. + Andrew H. Derbyshire + ahd&meetingmaker.plus.kew.com +16292 + Susurro + Kevin Crandell + kevin&susurro.com +16293 + BRVL Technology Ltd. + Jan Bruvoll + jan&brvl.com +16294 + Applied Instruments Inc + Jeff Haas + jhaas&appliedin.com +16295 + Ormond college + Ben + ben&unicol.unimelb.edu.au +16296 + Vindicia + Brett A. Thomas + iana-contact&vindicia.com +16297 + Semnode + Wiliam Dulin + willie&semnode.com +16298 + hanbang-soft technology co.,Ltd + Danhong Zhao + danhong_zhao&163.com +16299 + corega K.K. + Masaki Matsuka + matsuka&corega.co.jp +16300 + Penguin Infotech Pvt. Ltd. (formerly 'Penguin India Linux Solutions Pvt. Ltd.') + Vishwanath T. K. + vishwanath.tk&penguin-infotech.com +16301 + Silesian University in Opava + Lukas Kubin + kubin&opf.slu.cz +16302 + intrae + Jong Chung Yee + intrae&pchome.com.tw +16303 + Sans Serif + Janos Almasi + info&sansserif.hu +16304 + EADS + Dirk Kronenfeld + dirk.kronenfeld&airbus.com +16305 + KIBS AD Skopje + Marin Piperkoski + marinp&kibs.com.mk +16306 + Repsol Distribucion + Manuel Dato Briones + seguridadlogicacorporativa&repsolypf.com +16307 + StreamScale + Andrew Heaton + andrew.heaton&streamscale.net +16308 + RHOEN-KLINIKUM AG + Ulrich Simon + edvorg.simon&rhoen-klinikum-ag.com +16309 + NTT TechnoCross Corporation (formerly 'NTT Software Corporation') + Izumi Hiroyuki + ict-wan-staff-ml&ntt-tx.co.jp +16310 + Marketline Rt. + Buglos Tamás + buglos.tamas&marketline.hu +16311 + Alatec, S.A. + Miguel Angel Moreno Menendez + mmoreno&alatec.es +16312 + Register.com + Nathaniel A.F. Clark + nclark®ister.com +16313 + Valemount Networks Corp + Lonnie Nunweiler + networks&valemount.com +16314 + Bureautique Services Developpement + Philippe LAMBERT + bsd&bsd.fr +16315 + X|support + Frans H. Schippers + frans.schippers&xsupport.nl +16316 + HSBC Bank North America + James Donn + james.donn&us.hsbc.com +16317 + Quebecor World North America + David Diaz + david.diaz&quebecorworld.com +16318 + empuron + Winfried Bergmann + winfried.bergmann&empuron.de +16319 + ViaVis Mobile Solutions Inc. + John Taschereau + john&viavis.com +16320 + The Brain Room Ltd + Wez Furlong + wez&thebrainroom.com +16321 + Eleveo a.s. + Tibor Marchyn + IANA&eleveo.com +16322 + Leading Edge Telemetry, LLC + Ed Nowicki + enowicki&letllc.com +16323 + Trax Holdings + Tim Omta + websupport&filex.com +16324 + Itelsys + Fernando Marin + fmarin&itelsys.com +16325 + NESCO CO., LTD. + kazutoshi tanaka + tanaka&nesco.co.jp +16326 + iyaburo.com + Deji Ako + deji&akomolafe.com +16327 + Mark Chesney + Mark Chesney + hostmaster&chesney.net +16328 + NARI Corp. (Nanjing Automation Research Institute) + Yang Wenqing + yangw&naricom.com +16329 + AdiSyS Oy + Peter Koski + Peter.Koski&adisys.fi +16330 + Reutech Radar System + Derick Thiart + dthiart&rrs.co.za +16331 + cosmocode GmbH + Andreas Gohr + gohr&cosmocode.de +16332 + Longshine Technologie Europe GmbH + Thomas Grashoff + thomas&longshine.de +16333 + bitmine AB + Bjorn Lofdahl + bjorn&bitmine.se +16334 + Northrop Grumman + Mark Foley + mark.foley&trw.com +16335 + Seoul Electric Power System co.,ltd + Hoon-yong Lee + lsee&mpupower.co.kr +16336 + Alatec, S.A. + Miguel Angel Moreno Menendez + mmoreno&alatec.es +16337 + Canadian National Railway Company + Vicki Gillis + vicki.gillis&cn.ca +16338 + IPOne + John Patton + patton.consulting&rogers.com +16339 + GlobeOp Financial Services, LLC + Pankaj Jain + pjain&globeop.com +16340 + White Rock Networks + Richard Wank + rwank&whiterock.com +16341 + Biomedical Informatics Research Network (BIRN) + Mark James + mjames&ncmir.ucsd.edu +16342 + Menta Group + Scott Marquardt + scott.marquardt&menta.com +16343 + UTTC United Tri-Tech Corporation + Gilles Therriault + gillest&uttc.ca +16344 + Perfigo + Carl Schroeder + cschroeder&perfigo.com +16345 + The Anvil Organisation Ltd. + Andrew Meredith + andrew&anvil.org +16346 + Farabi Technology + Charles Machalani + cmachalani&farabi.com +16347 + IPCAST + Kim Gill Joong + gjkim&ipcast.co.kr +16348 + Shanghai Transfiber Science&Technology Co.,Ltd. + Yuan Quan + yuanquan&transf.net +16349 + ComGear + Jason Tobler Abraham + meej321&yahoo.com +16350 + TOT Corporation Public Company Limited + Mr. Tinnakorn Itsrangkul Na Ayuthaya + tinnakoi&tot.co.th +16351 + Caixa Bank S. A. + David Soler + dsoler&lacaixa.es +16352 + Newman College of HE + Philip Turner + p.turner&newman.ac.uk +16353 + Howaldtswerke - Deutsche Werft AG + Ralph Genz + Ralph.Genz&HDW.de +16354 + Gruppennest + Knut-Henrich Schubach + k_schubach&yahoo.de +16355 + MIDRAY GmbH + Wolfgang Heinemann + pen&hud.li +16356 + Transfer Limited + Meder Bakirov + m.bakirov&transfer.kg +16357 + Hain Celestial Group + Ben Church + bchurch&hain-celestial.com +16358 + United Carrier Networks + Greg Bailey + gbailey&ucn.net +16359 + Miller Samuel, Inc. + Christopher Miles + cmiles&millersamuel.com +16360 + Contraloría General de la República + Jaime Romero + jaime.romero&eds.com +16361 + Progressive Linux Consultants LLC + Todd Lyons + ldap&progressivelinux.com +16362 + Waters Network Systems + Gary E. Carlson + garyc&wtrs.com +16363 + Scholz & Volkmer GmbH + Peter Reichard + support&s-v.de +16364 + SMARTDATA + Frédéric Gobry + frederic.gobry&smartdata.ch +16365 + valantic Transaction Solutions GmbH (formerly 'Dion Transaction Solutions GmbH') + Holger Kuest + holger.kuest&fsa.valantic.com +16366 + INFORMA DEL PERU, INFORMACION ECONOMICA SA + Javier Terrón + jterron&informasa.es +16367 + saarstahl + Christian Jung + christian.jung&saarstahl.de +16368 + Sysapex Communications Inc. + Ashwin Rao + raov&sysapex.com +16369 + Clark University + Joe Kalinowski + jkalinowski&clarku.edu +16370 + DASS Consulting Group Inc. + Jake Gibbons + jgibbons&citocognito.com +16371 + tygrysek.com + Bartosz Porzezinski + bloob&tygrysek.com +16372 + T-Mobile Czech Republic a.s. + Marek Uher + itwebadmin&t-mobile.cz +16373 + Federal Public service Transport & Mobility + Philippe Brackeleire + philippe.brackeleire&mobilit.fgov.be +16374 + NV Multikabel + Eric Remijn + eric.remijn&multikabel.nl +16375 + University Of Athens + Chris Stavrogiannis + chriss&noc.uoa.gr +16376 + ACOSS + Olivier DANOFFRE + olivier.danoffre&acoss.fr +16377 + IMCTech + Sang-Jin Park + jinynet9&imctech.co.kr +16378 + Sarian Systems Limited + Andy Hood + andy&sarian.co.uk +16379 + River Cities Reader + Nick Welch + nick&rcreader.com +16380 + NEXVU Technologies + Robert Eden + reden&nexvu.com +16381 + Verifiber, LLC + Mark Barton + mbarton&verifiber.com +16382 + InterBox Internet + Erwin Lubbers + noc&box.nl +16383 + HP GSE Security + Jeff Parker + jeff.t.parker&hp.com +16384 + California State Polytechnic University, Pomona + Paul B. Henson + henson&csupomona.edu +16385 + Hudli + Wolfgang Heinemann + haus&hud.li +16386 + Daxiongmao + Lionel Sausin + lionel.sausin&free.fr +16387 + OEone Corporation + Todd Kelley + toddk&oeone.com +16388 + Jörg Eichhorn + Jörg Eichhorn + joerg&joerg-eichhorn.de +16389 + Pegasus EDV-Betreuungs-GmbH + Klaus Schleicher + ks&pegasus-edv.de +16390 + VineSys Technology + Bo Li + bli&vinesystech.com +16391 + RouteOne + Chris Irving + cirving&routeone.com +16392 + Cortland Communications + Evan Webb + evanw&cortland.com +16393 + Sean Champ Enterprises + Sean Champ + schamp&users.sourceforge.net +16394 + nVent, Schroff GmbH (formerly 'Pentair Technical Products,' formerly 'Pigeon Point Systems') + Dietmar Mann + Dietmar.Mann&nVent.com +16395 + William R Sowerbutts + William R Sowerbutts + will&sowerbutts.com +16396 + justinknash.com + Justin Knash + jmk&justinknash.com +16397 + U.S. Army ALTESS + Bobby Jones + bobby.d.jones&us.army.mil +16398 + Metallect Corp. + Bill McGrane + bill.mcgrane&metallect.com +16399 + Greyhavens + Neil Hemingway + neil.hemingway&greyhavens.org.uk +16400 + Singapore Telecommunication Limited + Richard Ng Swee Siah + richardng&singtel.com +16401 + Hewlett-Packard Slovakia + Ladislav Rusnak + ladislav_rusnak&hp.com +16402 + Teleformix, LLC + Jim Croci + jcroci&teleformix.com +16403 + Brobus International, Inc. + Tamir Halperin + tamir&brobus.net +16404 + NeuralWorx + Edward Curren + ecurren&hotmail.com +16405 + CPEG.Net + Huang Chun Kiu + cckiu&ust.hk +16406 + ADWIN + OTHO FRANCOIS + f.otho&adwin.fr +16407 + Lanux Limited + Steve Kennedy + iana&lanux.com +16408 + Leo Consulting + Hamid Yousaf + hamid_yousaf&gmx.net +16409 + Futuri Ltd + Gary Ford + gary_ford&hotmail.com +16410 + NEC COMPUTERS INTERNATIONAL B.V. + Marc ENGELSTEIN + marc.engelstein&nec-computers.com +16411 + Indicia Nederland bv + Leon Schuurbiers + ict&indicia.nl +16412 + Home Health Corporation of America + William OMalley + womalley&angelfire.com +16413 + rahbany.com + David Rahbany + drahbany&earthlink.net +16414 + itsanaddiction.org + Jeremy Hasty + jhasty&itsanaddiction.org +16415 + B.Bradenahl & H.Eggers + Bernd Bradenahl + bradenahl&ib2be.de +16416 + REAL DATA S.C. + Piotr Orlewicz + porlewicz&real-data.pl +16417 + TMC HealthCare + Paul Lemmons + paul.lemmons&tmcaz.com +16418 + ULYSSIS + Thomas Daniels + ulyssis&ulyssis.org +16419 + PINBOARD + Kurt Keller + Kurt&pinboard.com +16420 + SRA International + Lauren Halverson + lauren_halverson&sra.com +16421 + Christian Albrechts Universitaet + Markus Rebensburg + rebensburg&rz.uni-kiel.de +16422 + MUMPK limited partnership + TSUCHIDA Takeo + takeo&mumpk.com +16423 + ABACUS Research AG + Remo Inverardi + inverardi&abacus.ch +16424 + Alanta + Borja Prieto + borja.prieto&alanta.info +16425 + City Utilities of Springfield MO + Jason Holcomb + jholcomb&cityutilities.net +16426 + Lawrence Technological University + Edward Donley Computer Center + edcc<u.edu +16427 + Internet Access Facilities BV + P. Middelink + info&iaf.nl +16428 + Dial Assurance, Inc. + Hal Finkel + half&dialassurance.com +16429 + Lindorff Holding AS + Thomas Albertsen + thomas.albertsen&lindorff.com +16430 + Mutiny Limited + Dr Andy Murray + A.Murray&Mutiny.com +16431 + Engineering.MO S.p.A. (formerly 'T-Systems Italia S.p.A.') + Stefano Mason + stefano.mason&eng.it +16432 + PKI Innovations Inc. + Paul Wiebe + pwiebe&pk3i.com +16433 + Moorehead Communications + Andy Thompson + athompson&mooreheadcomm.com +16434 + Jenzabar, Inc + Andrew Katz + andrew.katz&jenzabar.net +16435 + Clearswift Corporation + Neil Burgess + neil.burgess&clearswift.com +16436 + Bookham Inc. + Joseph Olajubu + joseph.olajubu&bookham.com +16437 + Bart + bartek + tatrolaz&poczta.onet.pl +16438 + Kansas Information Consortium + Michael Cook + michael&ink.org +16439 + PNX Pty Ltd + Peter Walsh + peterw&pnx.com.au +16440 + netINS, Inc. + Jim Turner + noc&netins.net +16441 + ENST + Pierre Beyssac + pb+iana&enst.fr +16442 + ProActive A/S + Henrik Kim Christensen + hkc&proactive.dk +16443 + P&O Nedlloyd Limited + Martin Glassborow + m.glassborow&ponl.com +16444 + UPTI + Danpo Zhang + dpzhang&upti.com +16445 + Kineto Wireless + Randy Turner + rturner&kinetowireless.com +16446 + Adlink + G. Warrick + uadmin&adlink.com +16447 + epix Internet Services + Philippe Levan + levan&epix.net +16448 + Neogate Co., Ltd + Ki-tae Park + ktpark&neo-gate.co.kr +16449 + Charcoal Generation Limited + Albert Kwong + albert&charcoalgeneration.com +16450 + IT-Concepts GmbH + Frank Allwicher + info&it-concepts.biz +16451 + SMC Pneumatics (N.Z.) Limited + Tony Farrell + tfar&smc.co.nz +16452 + Infocomp Pty Ltd + Darren Morton + dmorton&infocomp.com +16453 + Beijing Municipal Local Taxation Bureau + LI LongJiang + llj&tax861.gov.cn +16454 + AXWAY + Christian MAURIERAS + cmaurieras&axway.com +16455 + DFS Deutsche Flugsicherung GmbH + Thorsten Kisters + thorsten.kisters&ast.dfs.de +16456 + GPL Eletro Eletronica S/A + Mr. Alessandro de Oliveira Santos + gpl_engenharia&yahoo.com.br +16457 + Opera Logic, Inc. + Emilio F Panighetti + emilio&operalogic.com +16458 + Gemini Mobile Technologies, Inc. + Andy Dunlap + domain-admin&geminimobile.com +16459 + Epygi Technologies Ltd. + Mario Cuello + mario.cuello&epygi.com +16460 + Convertronic GmbH + Jörg Becker + j.becker&convertronic.de +16461 + Eurofluxo-Suporte a Novas Tecnologias, Lda. + Reis Pinto + reis.pinto&eurofluxo.pt +16462 + JAVCO Consulting + James A. Vasil + james.vasil&gmail.com +16463 + Metavante Corporation + Larry Strickland + lawrence.strickland&metavante.com +16464 + Pangolin Software Industries Ltd + Tim Barnett + snmp&hekate.org.uk +16465 + State College Area School District + Damian Futrick + daf14&scasd.org +16466 + Sonartech Atlas Pty Ltd + Julius Malkiewicz + julius&sonartech.com.au +16467 + Gigaworks + Christian Schroeder + webmaster&gigaworks.de +16468 + Deriva GmbH + Christian Schroeder + cs&deriva.de +16469 + Coastal Wave Internet + Shad Gunderson + shadgun&coastalwave.net +16470 + KEBA AG + G. Danczul + georg&keba.co.at +16471 + SCAN ASSOCIATES SDN BHD + Nik Khairul Raja Abdullah + nik&scan-associates.net +16472 + QMedit + Maarten Coene + Maarten.Coene&qmedit.com +16473 + Terranet Ltd. + Wilhelm Farrugia + techmaltanet&maltanet.net +16474 + mencial + Ezequiel Martín Cámara + ezequielmartin&yahoo.com +16475 + Hays IMS + Peter Nicholson + Peter.Nicholson&Hays-IMS.com +16476 + awsys gmbh + Andreas Woerner + info&awsys.net +16477 + SYSLINE S.p.A. + Dario Gerosa + dario.gerosa&sysline.it +16478 + eiwei Training & Consulting + Helge Weickardt + weickardt&eiwei.de +16479 + P4 Tecnologia Ltda. + Alexandre Rocha Lima e Marcondes + alexandre&p4tecnologia.com +16480 + Ministry of Transportation of Ontario + Emmanuel Morala + emmanuel.morala&mto.gov.on.ca +16481 + JOutfitters LLC + Jason Ellinwood + Jason&ellinwoodcomputing.com +16482 + Old Dominion University + David Dandar + ddandar&odu.edu +16483 + Pure Networks + Roxanne Skelly + roxanne&purenetworks.com +16484 + Gavin Newman + Gavin Newman + gavnewman&optusnet.com.au +16485 + Advanced Multifake Systems + Andreas Gaiser + agaiser&multifake.de +16486 + Dauphin MultiMedia + Peter Nieuwpoort + info&dauphin-mm.nl +16487 + THALES NL + Heidrun Laude-Lauber + heidrun.lauber&nl.thalesgroup.com +16488 + Electralink Ltd + S. Boyce + s_boyce&hotmail.com +16489 + The Hanover Company + Joseph Frolick + jfrolick&hanover-co.com +16490 + Sentryware + Lluis Mora Hidalgo + llmora&sentryware.com +16491 + INMETRICS LTDA - EPP + Eric Daniel Mauricio + ericmau&inmetrics.com.br +16492 + Scitor Corporation + Stan Barton + sbarton&scitor.com +16493 + AutoCell Laboratories, Inc. + Larry Stefani + lstefani&autocell.com +16494 + MassMutual + Steve Erickson + serickson&massmutual.com +16495 + iafrica.com + Andrew Glen-Young + sysadmin&metropolis.co.za +16496 + ACU-RITE COMPANIES INC. + Thomas L. Joneson + tjoneson&acu-rite.com +16497 + Repsol Exploracion + Manuel Dato Briones + seguridadlogicacorporativa&repsolypf.com +16498 + MEDIUM SOFT a.s. + Ing. Antonin Vesely + antonin.vesely&mediumsoft.cz +16499 + Graburn Technology + Ron Hiller + ron&graburn.com +16500 + S.W.A.C. GmbH + Hynek Petrak + hynek&swac.cz +16501 + S&H Greenpoints + Hasan A. Charania + hcharania&greenpoints.com +16502 + LGB PhOeNiX + Lisci Gianni Battista + phoenix.burn&email.it +16503 + Prairie Fire Internet Technologies + Erik Stave + erik&pfbiz.com +16504 + SindhSoft + Shahnawaz Soomro + ssoomro&onebox.com +16505 + TeamStaff Inc + Greg Haygood + ghaygood&brightlane.com +16506 + NeXtorage, Inc. + Danilo Florissi + df&NeXtorage.com +16507 + Evanz Enterprises + Peter Evans + pete&evanz.net +16508 + Progel srl + Alberto Trigari + alberto.trigari&progel.it +16509 + Association des Medecins de Saint-Hilaire + MILLET Alain + millet&omega.amsh.org +16510 + The Pepsi Bottling Group + John Fortin + john.fortin&pepsi.com +16511 + Sentor Managed Security Services AB + Kenny Jansson + iana&sentor.se +16512 + Medienzentrum Osnabrueck + Wolfgang Ebneter + eb&medienzentrum-osnabrueck.de +16513 + Monaco Interactive + Sander Kruger + s.kruger&monaco-interactive.com +16514 + UUNET SA + Mustapha Baboo + sys-admin&za.uu.net +16515 + GRNET S.A. + Tryfon Chiotis + tchiotis&grnet.gr +16516 + elipsan + Pat Knight + pknight&elipsan.com +16517 + Partners HealthCare System Inc. + Scott Rogala + srogala&partners.org +16518 + New Particles Corporation + Lance Nehring + nehring&newparticles.com +16519 + Microdasys Inc. + Peter Pendelin + peter.pendelinµdasys.com +16520 + pg-cs + peter grotz + pegr&gmx.net +16521 + Pronto Networks + Ram P Rustagi + ram.rustagi&prontonetworks.com +16522 + Telefonica Publicidad e Informacion S.A. + Juan Angulo + jangulo&tpi.es +16523 + Eric Yeo + Eric Yeo + oid&yeo.id.au +16524 + Morty Abzug + Morty Abzug + morty&frakir.org +16525 + Unicorn + James Huang + jhuang&seed.net.tw +16526 + KYA group + Teruhito Kanazawa + tkana&kyagroup.com +16527 + Internet Pictures Corporation + John Koehler + john.koehler&ipix.com +16528 + Leadtek Research Inc. + Daniel Miao + daniel.miao&leadtek.com.tw +16529 + HCL Infosystems Limited + RABI KUMAR SAHA + rksaha&hclinsys.com +16530 + Exalt Technologies + Ibrahim Kharman + ibrahimk&exalt-tech.com +16531 + brico dépot + Laurent Courtet + lcourtet&bricodepot.com +16532 + Sparda-Datenverarbeitung eG + Markus Volk + markus.volk&sdv-it.de +16533 + Consejo General de la Abogacia Española + Maximiliano Gonzalez-Fierro + max.fierro&cgae.es +16534 + Mycom International + Philippe BONNEAU + philippe.bonneau&mycom-int.com +16535 + BitBand Technologies Ltd. + Chagai LEVAVI + chagai&bitband.com +16536 + Tiscali UK + Jeff Cadman + jeff.cadman&uk.tiscali.com +16537 + Wireless Data Services + Clive Lawrence + clive.lawrence&uk.wdsglobal.com +16538 + Progressive Electrical Services, Inc. + Randall S. Graber + RGraber&rsgnic.com +16539 + Graber Enterprises, Inc. + Randall S. Graber + RGraber&rsgnic.com +16540 + E.D.S. Pubblica Amministrazione S.p.A. + Massimiliano Pullo + massimiliano.pullo&edspa.it +16541 + Wapiti Regional Library + Don Taylor + dtaylor&panet.pa.sk.ca +16542 + Harley Systems + detha ter harmsel + dth&hs.co.za +16543 + Standard Bank of South Africa + Cathy van Rooyen + cvanrooyen&sbic.co.za +16544 + LEASFINANZ AG + Alexander Jernejcic + alexander.jernejcic&leasfinanz.at +16545 + Cardservice International + Alex Suslik + asuslik&csi-corp.com +16546 + BBHK + Gerhard Kottsieper + betreuungsbuero&kottsieper.net +16547 + Alixen + Gilles Polart Donat + info&alixen.fr +16548 + Fachschaft Elektrotechnik und Informationstechnik + Bernhard Lichtinger + ldapmaster&fs.ei.tum.de +16549 + Giddens Industries Inc + John Darrah + jhd&giddens.com +16550 + Masters of Branding + Bob Ippolito + bob&mastersofbranding.com +16551 + ComQuest Tecnologia + Ricardo Lima + ricardo&comquest.com.br +16552 + Clear Communications Corporation + Vanessa F. Cecich + vcecich&clear.com +16553 + MediaWorlds + Adam Weber + superhuman&myrealbox.com +16554 + MDS Reprocessing + Dave Mitchell + dave&mdsr.com +16555 + Vassar College + Richard Versace + riversace&vassar.edu +16556 + www.schenkman.com + Thomas Schenkman + thomas&schenkman.com +16557 + IRT Electronics Pty Ltd + R Broad + rbroad&irtelectronics.com +16558 + Jiangsu Yitong High-tech Co.,Ltd. + chenxiaoxing + chenxiaoxing&yitong-group.com +16559 + ALOC Bonnier A/S + Yasaman Nazardad + yasamann&aloc.dk +16560 + Dr. Töpper Datentechnik + Töpper + info&dr-t.de +16561 + Sandar TeleCast AS + Trond Elvejord + trond.el +16562 + DePaul University + John Kristoff + jtk&depaul.edu +16563 + InterSystems + Robert Davis + rdavis&intersystems.com +16564 + EVERSTREAM, INC. + JOSEPH KULIG + jkulig&everstream.com +16565 + Pine Digital Security + Patrick Oonk + patrick.oonk&pine.nl +16566 + R.T. Shin and Associates + Andrew Schwartz + aschwartz&shin-associates.com +16567 + University of Belize + Wayne Godoy + wgodoy&ub.edu.bz +16568 + FUKKEN CO.,LTD. + Kenji Kurisu + k-kurisu&fukken.co.jp +16569 + RealVision Inc. + Masatoshi Kanzaki + kanzaki&realvision.co.jp +16570 + linux-at-work.de + Lars Kneschke + lkneschke&linux-at-work.de +16571 + Hungarian Telekom Plc. (formerly 'Westel Mobile Telecommunications Company Ltd.') + Mr. Gabor Bozo + bozo.gabor&telekom.hu +16572 + Centro Tecnico per la Rete Unitaria della Pubblica Amministrazione + Claudio Petrucci + c.petrucci&ctrupa.it +16573 + ICP Europe PLC + Doug Winter + dwinter&icpeurope.net +16574 + Ontario Systems + Michael Wolfe + michael.wolfe&ontariosystems.com +16575 + Oops Org + Alex Hooper + alexh&oops.org.uk +16576 + Novso + Nicolas DEFFAYET + hostmaster&novso.com +16577 + Virtual Targets Center + Michael Day + day-ma&rdec.redstone.army.mil +16578 + geos + Michal Scendo + scendo&mops.uci.agh.edu.pl +16579 + meurisse + Renaud Meurisse + oid-admin&meurisse.info +16580 + Swiftel Communications + Alex Craven + krei&staff.it.net.au +16581 + SIIG + LEGIN Matthias + matthias.legin&adm-ulp.u-strasbg.fr +16582 + BOC ONCOLOGY CENTER + Marios Pieri + marios.pieri&bococ.org.cy +16583 + NET6a + Serge NOEL + contact&net6a.com +16584 + EVPU a.s. + Ing. Milan Gajdos + sktc101&evpu.sk +16585 + ISOLUTION S.C. + Jaroslaw FILONIK + jfilonik&isolution.pl +16586 + Pallas Athena B.V. + Bert Slof + bslof&Pallas-Athena.com +16587 + Global Locate, Inc + Alexander Usach + ausach&globallocate.com +16588 + SSI Micro, Ltd. + Erik Sejr + eriks&ssimicro.com +16589 + CNMP Networks, Inc. + Gary Ding + gding&cnpnetworks.com +16590 + Scytl Election Technologies SLU + Jordi Puiggali + Jordi.Puiggali&scytl.com +16591 + Avtec, Inc. + Michael Cooley + mcooley&avtecinc.com +16592 + Laboratory Automation Inc. + Fred Behlen + fbehlen&laitek.com +16593 + STIXO + Arnaud ASSAD + contact&stixo.com +16594 + EmbTek + Simon T Smith + simon.smith&embtek.co.uk +16595 + RE-Design + Morten Roll Karlsen + moroll&start.no +16596 + TrendDiscovery Corporation + WH Shih + shih.wh&trenddiscovery.com +16597 + Landala Nät AB + Kalle Svensson + kalle.svensson&landala.com +16598 + Ton- und Studiotechnik GmbH + Gero Enders + g.enders&ts-online.com +16599 + VasSol, Inc. + Meide Zhao + meide&vassolinc.com +16600 + Marketing Resources, Inc. + Preston Kutzner + pkutzner&mrichi.com +16601 + Intrex + Jeff Mercer + jlm43&drexel.edu +16602 + IT-Vision AG + Dieter Burger + admin&it-vision.com +16603 + AirMagnet, Inc. + Chia-Chee Kuan + ckuan&airmagnet.com +16604 + OpenSSL Software Foundation + Matt Caswell + osf-contact&openssl.org +16605 + Radiance BBS + Luciano Di Lucrezia + wiz&radiance.ods.org +16606 + DELFI + Kristijonas Siaulys + kristijonas.siaulys&delfi.lt +16607 + Distra Pty Ltd + Robert Pieper + robert.pieper&distra.com +16608 + Tottori SANYO Electric Co.,Ltd. + Toshikatsu Tanimura + ttanimu&torisan.co.jp +16609 + Phalanyx + Chris Traylor + ctraylor&phalanyx.com +16610 + Telemedia Software Corp. + Tonny Chen + tonny&telemedia.com.tw +16611 + QNET + Chris Linstruth + cjl&qnet.com +16612 + Zenon GmbH + Christoph Matthaeus + christoph.matthaeus&zenon-net.de +16613 + Cross + Federico Fernandez Cruz + fecru&telefonica.net +16614 + Genome Sequencing Center + Rich Wohlstadter + rwohlsta&watson.wustl.edu +16615 + Entrada Software, Inc. + Brian Williamson + brian.williamson&entradasoftware.com +16616 + DiscipleMakers, Inc. + Brian Roberg + robergb&dm.org +16617 + Merlin Aviation Systems Ltd. + Franz J Ehrengruber + franz&iptelenet.com +16618 + SAITC + Stefan Auweiler + stefan.auweiler&saitc.de +16619 + Bandspeed, Inc. + Michael Blanchard + mblanchard&bandspeed.com +16620 + Bugalux Denmark A/S + Valentina + valentina&bugalux.com +16621 + Computer Care, Inc. + Matt Finn + matt&bitcount.com +16622 + IntelligentAgents + Robert Johnston + TheHiveBrain&msn.com +16623 + Mobile Broadcasting Corporation + Shigeo,Nitta + nitta&mbco.co.jp +16624 + Planetactive GmbH + Jan Voges + jvoges&planetactive.com +16625 + Ocado Limited + Jonathan Sartin + jonathan.sartin&ocado.com +16626 + Norkom Technologies Ltd. + Ray O'Donnell + ray_odonnell&norkom.com +16627 + Chang Industry, Inc. + Wade Mergenthal + wmergenthal&changind.com +16628 + pdxcolo.net + Mike Guthrie + mikeg&pdxcolo.net +16629 + Certeon Inc + Shawn Amershek + samershek&certeon.com +16630 + IntellAgent GmbH + Dominik Wezel + dio&intellagent.ch +16631 + Bermuda Holding + Gerrit E.G. Hobbelt + Ger.Hobbelt&bermuda-holding.com +16632 + Seton Hall University + Matt Stevenson + stevenma&shu.edu +16633 + Danamis Associates + Dan Higgins + dan&danamis.com +16634 + VoxAge Teleinformatica Ltda + Rodolfo Contri Rondao + rodolfo&voxage.com.br +16635 + S-FRiENdS + Patryk Lason + eos&spin.ict.pwr.wroc.pl +16636 + KTROAD, Ltd. + Hijiri Umemoto + hijiri&ktroad.jp +16637 + SANYCOM Technology Co.,Ltd + guanzhiyong + guanzy&sanycom.com +16638 + AVL List GmbH + Philipp Stummer + philipp.stummer&avl.com +16639 + SoftSolutions! + Roberto Abati + roberto.abati&softsolutions.it +16640 + X.net 2000 GmbH + Holger Albrecht + halbrecht&xnet2000.de +16641 + International Solar Energy Society + Leander Conradie + infosys&ises.org +16642 + The Amulet Group + Dan Johnson + dan.johnson&amuletgroup.com +16643 + EXAGO + Christian Alexander Poisel + cpoisel&exago.de +16644 + Xtensive + Thierry MICHEL + thierry&xtensive.org +16645 + San Joaquin Valley College, Inc. + Craig Wall + Craig&sjvc.edu +16646 + R. F. Systems, Inc. + Richard A. Wayman + richard.wayman&rfsystems.net +16647 + Regal CineMedia + James Rivord + JRivord&RegalCineMedia.com +16648 + Centos Prime + Chris Janton + face¢osprime.com +16649 + Softel Systems Pty Ltd + Alan Conrad + alan.conrad&softelsystems.com.au +16650 + JSC Dalcombank + Ilya Rachkov + support&dalcombank.ru +16651 + Gift of the Givers Foundation + Dr. Imtiaz Sooliman + info&giftofthegivers.co.za +16652 + ThunderGeek + John Rodger + john&thundergeek.com +16653 + Eltron + Maciej J. Pyka + pm&eltron.pl +16654 + Electralink Ltd + Mr S. Boyce + s_boyce&hotmail.com +16655 + Lunar Gravity Networks + Josh Kleensang + jkleensang&lunargravity.net +16656 + m3production + Pierre Boinnard + pierre.boinnard&m3production.fr +16657 + ING Direct + itdatacomsupport&ingdirect.com + itdatacomsupport&ingdirect.com +16658 + ITA Software + Philip Dixon + iana-notify&itasoftware.com +16659 + Computer Science House + Bob Miller + bobprime&csh.rit.edu +16660 + DMX Technologies + Dominic Mak + dominic&dmx.com.hk +16661 + TACteam GmbH + Wilmer Geovanny Guamán Toledo + info&tacteam.de +16662 + CFS Global Services + Ricky Melville + rmelville&c-f-s.co.uk +16663 + Westec Holding Company Ltd + Darren Boyden + darrenb&westec-holding.co.uk +16664 + ACI Europe Ltd + Darren Boyden + darrenb&westec-holding.co.uk +16665 + Estonian Business School Group + Olev Loitme + Olev.Loitme&ebs.ee +16666 + PEC Products + Peter Ulrix + peter.ulrix&pec.be +16667 + Freax Sistemas + Federico Sayd + federicosayd&hotmail.com +16668 + ATX Networks Inc. (formerly 'PCI Technologies Inc') + Peter McCormick + pmccormick&atxnetworks.com +16669 + Freeland Haynes Limited + Laurence Wilks + it.manager&freelandhaynes.co.uk +16670 + Action Technologies, Inc + Juan Olea + jmolea&actiontech.com +16671 + American Physical Society + Paul Dlug + paul&ap +16672 + AirFlow Networks + Harry Bims + harryb&airflownetworks.com +16673 + Chris Mitchell + chris mitchell + chrisrmitchell&hotmail.com +16674 + Voyage Data, Inc. + Chris Mitchell + chris.mitchell&voyagedata.com +16675 + Chace Community School + Martin Stevens + stevens&chace.enfield.sch.uk +16676 + Servibanca- Grupo BCP + Joaquim Fraga + joaquim.fraga&bcp.pt +16677 + Centre of Medical Technology + Jacek Ruminski + jwr&eti.pg.gd +16678 + inX Services + George Garvey + tmwg&inxservices.com +16679 + Sta Track Enterprise Limited + Paco Lee + paco_lee&yahoo.com +16680 + Axia SuperNet Ltd + Daniel LaChance + daniel.lachance&axia.com +16681 + Valid Solutions + Christophe Van Ginneken + Christophe.VanGinneken&valid.be +16682 + Xtrac Ltd + Mike Gooding + mike_gooding&xtrac.com +16683 + IIT GmbH + Andreas Mueller + am&iit.de +16684 + European Humanities University + Alexander Kulak + sa&ehu.by +16685 + Optinel Systems + Brian B. Kane + bkane&optinel.com +16686 + Protego Networks, Inc. + Rui Liu + ruiliu&protegonetworks.com +16687 + Cyantel + Mike Rudden + mrudden&cyantel.com +16688 + InfraSec Sweden AB + Peter Andersson + peter.andersson&infrasec.se +16689 + Retalix + Marty Ramos + marty.ramos&retalix.com +16690 + Small Electric Motors Ltd + Darren Boyden + darrenb&westec-holding.co.uk +16691 + Tekron Communication Systems Inc. + Mike Farkouh + mikefar&tekronsystems.com +16692 + Optimal Technologies + Sébastien Taylor + sebastient&otii.com +16693 + LongReach Telecommunications Pty Ltd + Katherine Hamilton + katherineh&longreach.com +16694 + Purple Sun + David Ogier + dogier&purple-sun.com +16695 + CYGNUS MICROSYSTEMS PRIVATE LIMITED + HEMANT W GHAYAL + hwg&cygnusmicro.com +16696 + Decisiv Inc. + Geetha Ravishankar + geetha&decisiv.com +16697 + The Free Software Initiative of Japan + Fumitoshi UKAI + ukai&fsij.org +16698 + SCS Entriprise Systems + Jaruwat Boonmee + jaruwat&scses.co.th +16699 + GYL + Guang Yu Liu + gyl&unforgettable.com +16700 + Southwest Airlines Co. + Matthew Kleifgen + matt.kleifgen&wnco.com +16701 + Carlson Hospitality Worldwide + William J. Newman + nkriptr&yahoo.com +16702 + Naval Research Laboratory + Brian Cadwell + cadwell&ccs.nrl.navy.mil +16703 + University of Westminster + Mike Knell + m.knell&westminster.ac.uk +16704 + Yaga, Inc. + Nick Hengeveld + nickh&yaga.com +16705 + Loyola Marymount University + Gary Landau + glandau&lmu.edu +16706 + NavinMail Services (India) pvt ltd + Picaso Thakkar + picaso.t&navin.com +16707 + Exelixis Deutschland GmbH + Dr. Niketan Pandit + N.Pandit&exelixis-de.com +16708 + Wuhan Wearnes Technology CO,.Ltd(China) + Liyan + yli&wtwh.com.cn +16709 + same + Michael P Forman + michael.p.forman&jpmchase.com +16710 + Screaming Genius Meta Labs + Frederick J Polsky + fjp&screaming-genius.com +16711 + basis0.net + Jason Morefield + jasonm&techie.com +16712 + ILB + Sven Andresen + sven.andresen&ilb.de +16713 + Ohliger Christoph + Ohliger Christoph + christoph.ohliger&th-rosenheim.de +16714 + Gossamer Group, LLC + Skip Walker + skip&gossamer-group.com +16715 + TRLabs Regina + Xiaoran Cao + caoran&hotmail.com +16716 + Unisyn Software, LLC + Scott Robinet + scott.robinet&unisyn.com +16717 + University of Victoria + Garry Sagert + gsagert&uvic.ca +16718 + GMV S.A. + Carlos Illana + cillana&gmv.es +16719 + SAS IRIS Technologies + Arnaud FLORENT + aflorent&iris-tech.fr +16720 + OSS Service + Frédéric Perrenoud + frederic&ossservice.com +16721 + Chronos Technology Ltd + Software Development Team + software&chronos.co.uk +16722 + Methics Oy + Lennart Löfberg + iana&methics.fi +16723 + LawBase Technologies + Steven Earl Smith + SESmith&LBTinc.net +16724 + Fitzgerald Associates + Steven Fitzgerald + scfjm&telecominstitute.com +16725 + Itech Inc. + Jerry Maiers + itech-support&itech-mke.com +16726 + Dowslake Microsystems Corporation + Xin Cheng + xin.cheng&dowslakemicro.com +16727 + THALES AVIONICS + Yves DEGUINE + yves.deguine&thales-avionics.com +16728 + Wolcott Systems Group, LLC + Eric J. Kahle + eric.kahle&wolcottgroup.com +16729 + Midling and Associates + Michael Midling + mike.midling>e.net +16730 + Higher Ground Networks LLC + Lee Clemmer + admin&highergroundnetworks.com +16731 + Industrial Medium + Nate Shue + nate&industrialmedium.com +16732 + Guardent, Inc. + Mark Benedetto King + ben.king&guardent.com +16733 + ActiveLife Ltd. + Mingder Lu + mdlu&ce.ntu.edu.tw +16734 + Orbital Data Corporation + Allen Samuels + allen.samuels&orbitaldata.com +16735 + TechnoCom Corporation + Brett Hoye + bhoye&technocom-wireless.com +16736 + JavaLi Ltd. + Hugo Monteiro + hmmm&fct.unl.pt +16737 + NeoTool Development, LLC + Dave Shaver + shaver&spamcop.net +16738 + InterVideo Inc. + Lifeng Zhao + lifeng&intervideo.com +16739 + Reserved + RFC-pti-pen-registration-10 + ---none--- +16740 + 3C Limited + Forrest Wang + sbchan&hk3c.com +16741 + Alexander Müller Informatik + Alexander Müller + info&a-m-i.de +16742 + topteam Services GmbH + Guus Leeuw + Guus-Leeuw&gmx.de +16743 + Hessische Zentrale fuer Datenverarbeitung + Miriam Rudolph + m.rudolph&hzd.hessen.de +16744 + BLANKOM + Frank Hoeper + frank.hoeper&blankom-digital.de +16745 + Alfa-Bank OJSC + Alexander Omelchenko + aomelche&alfabank.ru +16746 + springtimesoft LTD (formerly 'norbu09.org') + Lorenz Gschwendtner + lenz&springtimesoft.com +16747 + InterCommIT b.v. + Afdeling beheer + beheer&intercommit.nl +16748 + Studentska Unie CVUT + Michal Medvecky + admin&sh.cvut.cz +16749 + Extensible Systems Ltd. + Andy Hutchinson + pen&extensible-systems.co.uk +16750 + Gee Broadcast Systems Ltd + Keith Gee + keith&geebroadcast.co.uk +16751 + INSA, S.A. + David Ibañez + dibanez&insa.org +16752 + 4G Consulting Ltd. + Owen Stubbs + info&4gconsulting.co.uk +16753 + dbNet Pty Ltd + Frank Carnovale + info&dbnet.com.au +16754 + MERLIN, spol. s r.o. + Mike Roskanuk + admin&merlin.cz +16755 + DanuSys, Ltd. + Simon Kim + yjinkim&danusys.com +16756 + Juasun.net + Taarifa + taarifa&juasun.net +16757 + IT SOFTWARE + Federico Spagnolini + f.spagnolini&itsoftware.it +16758 + Lakeridge Health Corporation + Brian Harrison + bharrison&lakeridgehealth.on.ca +16759 + Central Missouri State University + Steve Walker + swalker&cmsu1.cmsu.edu +16760 + Internet Direct Inc. + Mike Nerone + mnerone&idworld.net +16761 + New Mexico Tech + Daniel Lunceford + dlunceford&admin.nmt.edu +16762 + Olson Technology, Inc. + Tom Olson + richard.wayman&rfsystems.net +16763 + EZ Web-Tech, Inc. + Paul Codding + coddingp&ezwebtech.com +16764 + IDD Works Inc. + Jovan Strika + jstrika&iddworks.com +16765 + Schmitz-IT + Harald Schmitz + mail&schmitz-it.com +16766 + SV Systems + Sam Inala + sami&svsysinc.com +16767 + Wonderline Rt. + Administrators + admins&wonderline.hu +16768 + Marine Interface Inc + Eric Breen + Breener&MarineInterface.com +16769 + SV Systems + Sam Inala + sami&svsysinc.com +16770 + Dave Dodge + Dave Dodge + dododge&smart.net +16771 + 7seas Solutions Ltd. + Tamas Stolmar + iana&7seas.hu +16772 + University of California, Santa Barbara + Scott Gilbert + scott.gilbert&isc.ucsb.edu +16773 + Sourwood Research + Matt Mozur + matt.mozur&sourwood.net +16774 + 4U S.r.l. + Clizio Merli + clizio&net4u.it +16775 + GIP-MDS + Alain ROUX + alain.roux&gip-mds.fr +16776 + mmit of ECNU + Yangyan + yyang&ms.mmit.stc.sh.cn +16777 + Oxance + Jerome Lefranc + jerome.lefranc&oxance.com +16778 + Uniteam Init + Paolo Pezzoni + paolo.pezzoni&init.it +16779 + More-Secure B.V. + Meint Post + meint&more-secure.nl +16780 + Pathway Computing, Inc + Mike Sturdee + engineering&pathwaynet.com +16781 + WASP Systems Inc. + Claude Laferriere + claferr&magma.ca +16782 + ClickCadence, LLC. + Michael Dickey + mdickey&clickcadence.com +16783 + Abacus Technical Services + Michael Hammond + MCTMike&AbacusTech.info +16784 + Olson Technology, Inc. + Tom Olson + tolson&olsontech.com +16785 + Medizinische Universität Wien + Christian Kröppl + christian.kroeppl&akh-wien.ac.at +16786 + Eosys srl + Paolo Pezzoni + eosys&vizzavi.it +16787 + T-Systems Nova International GmbH + Andreas Kelling + Andreas.Kelling&t-systems.com +16788 + Information Technology Center (KYTP), Aristotle Univ. of Thessaloniki GREECE + Athanasios E. Siachoudis + asiach&itc.auth.gr +16789 + Berklee College of Music + Philip A. Durbin + pdurbin&berklee.edu +16790 + Myrio Corporation + Sagar Gordhan + sagar.gordhan&myrio.com +16791 + BRASCON + Boguslaw Rutkowski + brascon&brascon.pl +16792 + Zapata Engineering, P.A. + Bryan Ragon + bragon&zapeng.com +16793 + University of New England + Gordon Smith + gordon.smith&une.edu.au +16794 + Savant Technologies Private Ltd + Senthil Palanisamy + senthil&savanttechnologies.com +16795 + MRO - TEK LIMITED + Mohan Kumar Perala + mohan&mro-tek.com +16796 + Institute of High Energy Physics ,Beijing,China + Weiyi Zhang + wyzhang&mail.ihep.ac.cn +16797 + InfoThuis Nieuwe Media BV + Richard van Dijk + richard.mail&infothuis.nl +16798 + Digital Technical Ltd., National Libaray of China + Shijun Deng + dsjzzwdy0&hotmail.com +16799 + XL Global Services Ltd. + Dave Cameron + dcameron&xlgs.com +16800 + IDYLIC + BUZAY + cb&idylic.com +16801 + Open Pricer + Xavier Poinsard + xpoinsard&openpricer.com +16802 + i2Q Ltd + Benjamin Ellis + bellis&i2q.co.uk +16803 + Tetra Pak Information Management AB + Jan-Ake Ljungqvist + janake.ljungqvist&tetrapak.com +16804 + netcadabra + Doug Eckhart + doug&netcadabra.com +16805 + Validus Medical Systems, Inc + OID Manager + oid-manager&validus.com +16806 + Dot New Media Ltd + Iain Campbell + iain.campbell&dotco.co.uk +16807 + Millennium Digital Media + Ken Borgmeyer + kborgmeyer&mdm.net +16808 + hambrecht.org + Kai Hambrecht + kai&hambrecht.org +16809 + Payless Shoesource, Inc + Robert J Dunlap + bob.dunlap&payless.com +16810 + Along Software Developer WorkShop + Changgen Zou + netservant&vip.sina.com +16811 + BlueTie, Inc. + Domain Administration + domainadmin&bluetie.com +16812 + UnixIron + Michael Kukat + michael&unixiron.org +16813 + Micronet Communications,INC. + Gilbert + gilbertµnet.info +16814 + Quinsy B.V. + R. Doets + rdoets&quinsy.nl +16815 + GlidePath BV + GCC Administrator + gcc-support&glidepath.nl +16816 + Computer Plus GmbH + Ekkehard Burkon + hostmaster&cplus.de +16817 + De La Rue International Ltd + Clive Smeardon + clive.smeardon&uk.delarue.com +16818 + United Media Company GmbH + Robert Steiner + rst&umc-web.de +16819 + Priocom Corp. + Alexander Zhukov + zhukov&priocom.com +16820 + SOC + wenbin.xie + wenbin&shenou.com +16821 + Institute for Advanced Study + Kevin Kelly + sys-admins&math.ias.edu +16822 + Scott L. H. Yuan + Scott L. H. Yuan + yuansc&netartisan.org +16823 + Abeling-IT-Design + Elmar Abeling + elmar.abeling&web.de +16824 + OTAGAKI + Shunichi Otagaki + otagaki&otagaki.jp +16825 + Network Technology + Matt Naismith + matt&onthenet.com.au +16826 + Innomedia Technologies Private Limited + A Ravi + ravi&innomedia.soft.net +16827 + Secure Systems Ltd. + Vladimirs Satohins + vsop&securesystems.lv +16828 + IntelSoft Ltd. + Sergey Tamkovich + sergeus&okclub.org +16829 + DuckCorp + Dequenes Marc + duck&duckcorp.org +16830 + Fidelity Information Services + Shawn McKinney + shawn.mckinney&fnf.com +16831 + benhall.ca + Ben Hall + ben&benhall.ca +16832 + Auchan + Malika Saadi + msaadi&auchan.com +16833 + Logicworks Inc. + Bart Grantham + bart&logicworks.net +16834 + Hospital Billing Collection Service, Ltd. (HBCS) + C T Brooks + hostmaster&hbcs.org +16835 + Dahle Consulting + Odd Rune Dahle + oddrune&dahle.info +16836 + M2Studio Ltd. + Miljan Mitrovic + mmix&m2comp.biz +16837 + Washington County + Randall Hinton + randy&washco.state.ut.us +16838 + Omron Corporation + Takehiko Seki + takehiko_seki&omron.co.jp +16839 + Rodion Software + Rodion Raskolnykov + Rodion03&yahoo.com +16840 + Shanghai E-Rom Communication Technology Co.,Ltd + huijiang Yin + yhj21&21cn.com +16841 + Ifeelnet + Jong-Sik, Woo + jswoo&ifeelnet.com +16842 + In Phase Consulting + William Edward Woody + woody&alumni.caltech.edu +16843 + BarcoVision + Koen Mannaerts + koen.mannaerts&barco.com +16844 + CreaCare GmbH + Keller Daniel + keller&creacare.com +16845 + Electricité de France - Gaz de France + Anne Tanguy + anne.tanguy&edfgdf.fr +16846 + MPEC Wroclaw S.A. + Grzegorz Nowakowski + admin&e-wro.net +16847 + Sakura Network Japan + Kenji Tanaka + tanaka_k&sakura.ac +16848 + Independent Consulting Solutions Ltd. + Blythe Walker + wavewalker9&yahoo.com +16849 + BeyondData.net ITS + Matthew Mulrooney + iana.org&fm.beyonddata.net +16850 + 13 Colonies Software + Tim Cronin + tim&13-colonies.com +16851 + rheingold Institut für qualitative Markt- und Medienanalysen Gesellschaft bürgerlichen Rechts + Sven Morschbach + morschbach&rheingold-online.de +16852 + Telinfos + SeongGeun Lee + leesg&telinfos.co.kr +16853 + barracuda digitale agentur GmbH + Carsten Wawer + carsten&barracuda.de +16854 + STI Healthcare, Inc. + alex medvedev + alexm&stihealthcare.com +16855 + Metromorph Softworks Ruhri/List GbR + Alexander Ruhri + technik&metromorph.de +16856 + Bradford Software Inc. + Alan Hackert + hackert&bradford-sw.com +16857 + 3 Bean + Bill Hay + bill&3bean.net +16858 + Tall Maple Systems, Inc. + Eric Tucker + et&tallmaple.com +16859 + Multimedia Games, Inc. + Channing Corn + channing&mm-games.com +16860 + Thomas Endo + Thomas Endo + thomasaendo&gmail.com +16861 + Wefi.Net IT Consulting + Werner Fischer + office&wefi.net +16862 + ICONZ Ltd + Grant Zanetti + grant.zanetti&iconz.net +16863 + JSR.COM + John Russell + john.russell&iconz.net +16864 + {M:U} Consulting + Matthias Urlichs + smurf&smurf.noris.de +16865 + ZIM Technologies International Inc. + Andrew McCue + amccue&zim.biz +16866 + Software North, Inc. + Laurence R. North + lnorth&swnorth.com +16867 + TIBCO Software, Inc. DSPG (formerly 'DataSynapse, Inc.') + Ryan Glen + dspg-pen-administrator&tibco.com +16868 + Conseil Internet & Logiciels Libres, Jérôme Alet + Jérôme Alet + alet&librelogiciel.com +16869 + ROMmon Ltd + Petri Helenius + support&rommon.com +16870 + Banco do Estado do Rio Grande do Sul,S.A. + Enzo Theobaldo De Santis + enzo_de_santis&banrisul.com.br +16871 + Tchibo Frisch-Röst-Kaffee GmbH + Frank Markmeyer + frm&tchibo.de +16872 + Collaborative Commerce Engines + William J. Flanagan + bill.flanagan&collaborativecommerce.com +16873 + future gate group + Ronny Bremer + rbremer&future-gate.com +16874 + HiTRUST Incorporated + Kevin Liou + kevinl&hitrust.com.tw +16875 + Computer Plus GmbH + Ekkehard Burkon + hostmaster&cplus.de +16876 + KungFoo Coders + Paul Wagland + iana&kungfoocoder.org +16877 + All Media Banking BV + Robin Huiser + rhuiser&allshare.nl +16878 + Oldham Metropolitan Borough Council + Andrew McCall + it.andrew.mccall&oldham.gov.uk +16879 + SCA Transforest AB + Anders Åkre + itlogistics&sca.com +16880 + zehome.com + laurent COUSTET + ed&debian-fr.net +16881 + emni GmbH + Eric Roehri + e.roehri&emni.net +16882 + OpenEAI Software Foundation + Stephen Wheat + steve&openeai.org +16883 + GKB Technology Co., Ltd + Motohiro Gochi + mgochi&gkb-t.com +16884 + (IPL) Instituto Politécnico de Lisboa + Nuno Miguel Machado Cruz + ncruz-iana&net.ipl.pt +16885 + Nortel Networks + Sharon Chisholm + schishol&nortelnetworks.com +16886 + Revelstone Technology + Thomas Ledbetter + thomas.ledbetter&revelstone.net +16887 + IPLocks Inc. + Jerry Hu + jhu&iplocks.com +16888 + Neutelligent.com + Michael Conlen + mconlen&neutelligent.com +16889 + MYIT.BIZ, Inc. + Scott Smallie + ssmallie&myit.biz +16890 + Technisys + Gabriel Maffia + gmaffia&technisys.net +16891 + Meta GmbH + Christoph Bühring + cbuehring&gmx.at +16892 + Signalisation Ver-Mac, Inc. + Lesly Bien-Aimé + Lesly.Bien-Aime&Ver-Mac.com +16893 + Enterprise Rent-A-Car + Ray Westphal + raymond.w.westphal&erac.com +16894 + Cotelligent, Inc + Steven Fines + steven.fines&cotelligent.com +16895 + J.P. Evans, Inc. + John Evans + private-enterprise-number&jpevans.com +16896 + SUNGARD Front Office Solutions + DAVID COTE + david.cote&sungard.com +16897 + sdata - C. Splittgerber Datentechnik + Christoph Splittgerber + info&sdata.de +16898 + Venture Industry International Corp. + Shingo Takahashi + takahashi&vii.jp +16899 + Corvigo, Inc. + Phil White + iana-contact&corvigo.com +16900 + a metareal material + Shingo Takahashi + metareal&vii.co.jp +16901 + Mojo Networks, Inc. (formerly 'Wibhu Technologies Pvt. Ltd.') + Varun Anand + iana&mojonetworks.com +16902 + APPLETZ CO., LTD. + Tomoyuki Kano + tomo&appletz.jp +16903 + TeleCats BV + Sjoerd Boomstra + s.boomstra&telecats.nl +16904 + Aylesford Newsprint Limited + Tim Feneron + iana_admin&aylnews.com +16905 + Ginger Alliance Ltd. + Pavel Hlavnicka + pavel&gingerall.cz +16906 + Enertron LLC + Sean Millichamp + sean&enertronllc.com +16907 + Truman State University + ITS Network Manager + jmcnabb&truman.edu +16908 + Binary Ape + Pete Birkinshaw + pete&binary-ape.org +16909 + Newberg Public Schools + Jamie McParland + jamiejunk&yahoo.com +16910 + chinaunionpay + su qun + xiaosu88&hotmail.com +16911 + General Wireless Scandinavia AB + Andreas Kalin + hostmaster&generalwireless.se +16912 + Bundesanstalt Statistik Oesterreich + Peter Wesel + peter.wesel&statistik.gv.at +16913 + Open Network Solutions, Inc + James Moulton + ContactEmail +16914 + nuit.ca + eric côté + simon&nuit.ca +16915 + The Hal Lewis Group, Inc. + Richard Beyer + rbeyer&hlg.com +16916 + ma-planete + Jean-Luc Rochat + rochat&ma-planete.net +16917 + Seecago + Erwin Hogeweg + erwin&hogeweg.cc +16918 + AEIIE - ARISE + weber + weber&iie.cnam.fr +16919 + Augsburger Computer Forum e.V. + Gerhard Schmidt + estartu&augusta.de +16920 + ARTE G.E.I.E + Frederic ROTH + F-Roth&arte-tv.com +16921 + HWACOM SYSTEMS INC. + FELIX LIN + felix.lin&hwacom.com +16922 + ville d'Echirolles + Colette Duranti + c.duranti&ville-echirolles.fr +16923 + RTV Slovenija, Javni zavod + Bojan Ramsak + bojan.ramsak&rtvslo.si +16924 + British Sky Broadcasting Ltd + Jim Doak + jim.doak&bskyb.com +16925 + IGA-PEGASE + Franklin LECOINTRE + franklin.lecointre&pegase-ri.com +16926 + Systemberatung Axel Dunkel GmbH + Axel Dunkel + info&Dunkel.de +16927 + MLB Associates + Gary Thomas + gary&mlbassoc.com +16928 + Mark South West + Mark Kaplun + mark&marksw.com +16929 + University of California, Merced + Enrique Flores + ehf&ucmerced.edu +16930 + CAM Security and Connections B.V. + Richard Lucassen + iana.spamtrap&lucassen.org +16931 + Sandwell Technologies, Inc. + Paul Sandwell + paul&sandwelltech.com +16932 + DICOM Group + Alexander + info&dicomgroup.ru +16933 + ALC Computertechnik GmbH + Juergen Marquardt + marquardt&alc.de +16934 + Marian Bracinik + Marian Bracinik + bracinik&prosoft.sk +16935 + Espoo-Vantaa Institute of Technology + Jukka Veikkolainen + jukkatv&evtek.fi +16936 + Databay AG + Ralf Schenk + rs&databay.de +16937 + e-Wriedt + Dirk Wriedt + dirk&e-wriedt.de +16938 + Sensational AG + Philip Hofstetter + phofstetter&sensational.ch +16939 + Addix Internet Services GmbH + Jan-Hendrik Palic + jhp&addix.net +16940 + CryptoGram SA + Odette GROS-BONNIVARD + ogrosbonnivard&cryptogram-fr.com +16941 + Oakley, Inc. + Ryan Erwin + ldapadmin&web.oakley.com +16942 + TRX + Krzysztof Kryński + trx&trx.com.pl +16943 + Overture Networks, Inc. (formerly 'Ceterus Networks') + Rodney Spruell + Rodney.Spruell&overturenetworks.com +16944 + Seven Blades + Carlos Crosetti + carlos&sevenblades.com +16945 + Grupo de Usuarios de Linux de Canarias + Ricardo Cárdenes + heimy&gulic.org +16946 + Litchfield Communications, Inc. + Dave Danenberg + dave_danenberg&litchfieldcomm.com +16947 + Program Line + Serge Shikov + sshikovµtest.ru +16948 + RHODITECH + Sylvain FERIGO + Sylvain.FERIGO-EXTERIEUR&EU.RHODIA.COM +16949 + Mobifon S.A. + cristian garabet + cristian.garabet&connex.ro +16950 + AOM Active Online Marketing GmbH + Egon TILLER + egon.tiller&aom.co.at +16951 + COGEMA LA HAGUE + Stéphane Pierrat + spierrat&cogema.fr +16952 + Eisodus Networks Pvt. Ltd + Abhijit Gadgil + gabhijit&eisodus-net.com +16953 + TECCO Software Entwicklung AG + Stefan Witzig + office&tecco.at +16954 + Software Poetry, Inc. + Sean Nolan + sean&softwarepoetry.com +16955 + modulus-systems GmbH & Co. KG + Frank Rahn + rahn&modulus-systems.de +16956 + InboxCop, Inc. + Nathan DiNiro + nate&inboxcop.com +16957 + fionet.com - Internet & Services + F. Behrens + service&fionet.com +16958 + Instituto Tecnológico y de Energías Renovables, S.A. + Pedro Gracia + pgracia&iter.rcanaria.es +16959 + Cipher Security Services, Inc. + Chad Agate + info&ciphersecurity.com +16960 + Darron Nesbitt + Darron Nesbitt + jd_nesbitt&hotmail.com +16961 + Vaisala + Aki Lilja + aki.lilja&nogarbageplease.vaisala.com +16962 + Seven Levels Consultants AB + Torgny Hofstedt + Torgny.Hofstedt&sevenlevels.se +16963 + Intuwave Ltd. + James Maidment + james.maidment&intuwave.com +16964 + nonap + Kevin Cheung + kevc&nonap.com +16965 + AUNA TLC + OID Admin + oidAdmin&auna.com +16966 + Kiff Analytical, LLC + JD Sampson + jd.sampson&kiffanalytical.com +16967 + University Texas Medical Branch Galveston + Marsha Mullins + mmullins&utmb.edu +16968 + Magnifire + Iftah Bratspiess + iftah&web-silicon.com +16969 + eyebits studios + Dale Musser + dale&eyebits.com +16970 + Coublis Ltd + Colville Wood + colville&coublis.com +16971 + Lemche Net + valdemar lemche + valdemar&lemche.net +16972 + DSL Terminal Remote Management Alliance + Wang Jian + wangjian&huawei.com +16973 + CITS Group Inc. + Myhaiylo Yevych + info&cits-group.com +16974 + WORLDMINGO + THURMAN + TMINGO&ATTBI.COM +16975 + Horst Thiele - Maschinenbau - Hydraulische Geraete GmbH + Erik Thiele + erik&thiele-hydraulik.de +16976 + S.I.D.E. Resources & Development + Frank Polet + frank.polet&side.lu +16977 + Zwi1zek Bankow Polskich + W3odzimierz Krzysztofik + wlodek&zbp.pl +16978 + Medical Insight A/S + Morten Olsen + iana-pen&medical-insight.com +16979 + M2 Ingenierie et Conseil SARL + Timo C. Metzemakers + tcm&m2ic.fr +16980 + Jacob & Sundstrom, Inc. + Matthew D. Meyer + mmeyer&jasi.com +16981 + ITG Inc. + Ye Huang + yhuang&itginc.com +16982 + Masaryk University + David Rohleder + davro&ics.muni.cz +16983 + CINECA + Nico Tranquilli + ldap&cineca.it +16984 + Exacore Consolidated Systems + Chris Webb + exacore&yahoo.co.uk +16985 + Global Technology Co.,Ltd. + Frank Wang + Global.Frank&163.com +16986 + FACTOR SPE + Serge Kravchenko + factorse&i.com.ua +16987 + Esker SA + Thibault Mangold + mangold&esker.fr +16988 + The Manufacturers Life Insurance Company + Dave Rypma + External_Keys&manulife.com +16989 + KiSP, Inc. + Lewis Prosper + lprosper&kisp.com +16990 + Print Inc. + Robert Woodcock + rwoodcock&printinc.com +16991 + Starsoft + Johan Forslund + johan.forslund&starsoft.se +16992 + The Halifax Herald Limited + Paul Williams + paul&herald.ns.ca +16993 + Norsec + Nick Theriault + nt120305-iana&norsec.net +16994 + Key Circle + Afshin Daghi + afshin&keycircle.com +16995 + elego Software Solutions GmbH + Olaf Wagner + info&elego.de +16996 + Aboriginal Sports and Recreation Association of BC + Gordon Ali + gordon.ali&asra.ca +16997 + Innovative Consultants, LLC + Pinda Ndaki + pinda&consult-ic.com +16998 + Intoto Inc. + Sarat Vetcha + sarat&intotoinc.com +16999 + Cortinovis S.A. + Jérôme Schell + j.schell&groupe-jc.com +17000 + Centre de Telecomunicacions i TI de la Generalitat de Catalunya + David Prats Jubany + david.prats&t-systems.es +17001 + Act Technology Co.,Ltd. + Frank Wayne + w.zf&163.com +17002 + Copsey Communications Projects + Brian Copsey + bc&copsey-comms.com +17003 + University Of Dundee + Ian Angles + i.angles&dundee.ac.uk +17004 + Israel InterUniversity Computation Center + Hank Nussbacher + hank&mail.iucc.ac.il +17005 + Mangrove Systems, Inc. + Sean Harnedy + sharnedy&mangrovesystems.com +17006 + Spherion + Manuel Jimenez + manueljimenez&spherion.com +17007 + Cool IT + Administrator + admin&coolit.fi +17008 + Clever Age SARL + Matthieu Belge + mbelge&clever-age.com +17009 + imove + Stephan Pulver + spulver&imove.ch +17010 + hopecom optic communication Ltd. + luweiwen + luweiwen&hopecom.com.cn +17011 + Metadata Systems, Inc. + Thomas K Wong + snmp&metadata-systems.com +17012 + eCareme Technologies, Inc. + Cecilia Chen + Cecilia_Chen&ecareme.com +17013 + Guaranty Technology Corp. + Jack Tsai + jacktjc&guaranty.com.tw +17014 + Izumi Cyber Networks Inc. + Takenori Higashimura + oni&izumi-si.co.jp +17015 + manage.it GmbH & Co. KG + Wolfgang Zitzelsberger + w.zitzelsberger&manage.de +17016 + Info Jure Knowledge Ltd. + Simone Merli + simone.merli&ijk-eu.com +17017 + CyberTech de Colombia Ltd. + Milton Quiroga + mquiroga&uniandes.edu.co +17018 + Silikone.com + Soren Christensen + soren&silikone.com +17019 + Heinrich Berndes Haushaltstechnik GmbH&Co. KG + IT-Service + it-support&berndes.com +17020 + IRSN + Jean-Luc Verpeaux + jean-luc.verpeaux&irsn.fr +17021 + University of Central Florida + Aaron Streimish + aarons&pegasus.cc.ucf.edu +17022 + Avista Labs Inc. + Jeremy McClintock + jeremy.mcclintock&avistalabs.com +17023 + Apptera + Babu + bpapanna&apptera.com +17024 + Jackson Local School District + Keith Obermeier + kao2jc&polarbear.stark.k12.oh.us +17025 + Rether Networks Inc. + Sheng-I Doong + sdoong&rether.com +17026 + dotNSF, Inc. + George Chiesa + chiesa&dotNSF.com +17027 + YAAO + Lu Hongjie + luhongjie&aocom.com.cn +17028 + MarketXS.com BV + Alan Broady + alan.broady&marketxs.com +17029 + baumer it-services (formerly '0mode') + Roman Baumer + ldap&baumer-its.ch +17030 + Velveo + Francois TIFFREAU + ft&eytsec.com +17031 + Cluster Labs GmbH + Lothar Feige + Lothar.Feige&Cluster-Labs.com +17032 + City of Helsinki + Pirkka Luukkonen + pirkka.luukkonen&hel.fi +17033 + Medical University of South Carolina + Richard Gadsden + gadsden&musc.edu +17034 + NHN Corp. + Richard Yoo + swyoo&nhncorp.com +17035 + Cerberian Inc + Gregory Newton + gnewton&cerberian.com +17036 + Alisys Software S.L. + Carlos Carús + carlos.carus&alisys.net +17037 + Brannstroms Elektronik AB + Peter Mathiasson + pm&brannstrom.se +17038 + Federal State Unitary Enterprise "Production Company 'Start'" + Alexander Lunyov + lan&startatom.ru +17039 + Vontu, Inc + Gagan Palrecha + gagan&vontu.com +17040 + Integrits Corporation + Kari Massarene + kmassare&integrits.com +17041 + TeleDomani, Inc. + Michael Pigott + pigotm&rpi.edu +17042 + Consultores Globales Online S.L. + Oscar Maqueda + oscar&consultoresglobales.com +17043 + PC-Repair Aalestrup + Peter + peter&homedisk.net +17044 + Red Fig Ltd + Alex Tapaccos + atapaccos&redfig.com +17045 + Network Technologies Group + Nathan Gardiner + nate&nate.id.au +17046 + ErrorNet + Simon Dassow + janus&errornet.de +17047 + Eurofresh.inc + Bruce Hermes + b.hermes&eurofresh.com +17048 + Ashland Inc. + Robert Stephenson + rlstephenson&ashland.com +17049 + Eurospider Information Technology A.G. + Andreas Baumann + andreas.baumann&eurospider.com +17050 + MediaSonic + Jay Morrison + jay.morrison&mediasonic.com +17051 + Last Minute Network Limited + Monika Szyc + tech_ops&lastminute.com +17052 + A Plus Marketing + Ken Pearson + web&universityservices.net +17053 + www.slavel.ru + Yuri Nosyrev + root&slavel.ru +17054 + Severoceska plynarenska, a.s. + Vladimir Naprstek + vladimir.naprstek&scplyn.cz +17055 + MoshJahan Ltd + Network Operations Manager + netops&moshjahan.net +17056 + Fusion Network Services Corp. + FusionGOL Engineering + noc&gol.com +17057 + Victor-Sierra + Vincent Williams + n1ufg&prodigy.net +17058 + National Radio Astronomy Observatory + Allyson Polak + apolak&nrao.edu +17059 + Cookingwise + Thomas Blomseth Christiansen + thomas&cookingwise.dk +17060 + zetein.net + Thomas Mauduit + monrou_r&epita.fr +17061 + Shanghai 4U Network Tech.Co.,Ltd. + yubo + yubo&4u-china.com +17062 + ArielServices + Ariel Peinado + arielpeinado&hotmail.com +17063 + Lightship Telecom, LLC + James Cook + jcook&lightship.net +17064 + CSX Technologies + Jerry Castille + jerry_castille&csx.com +17065 + Zions Bancorporation + Cory Nemelka + cnemelka&zionsbank.com +17066 + Collaboratech, Inc. + Mark Burgess + markb&collaboratech.com +17067 + Padimax Limited + John Risby + john&padimax.co.uk +17068 + Jcast Networks Corp. + Jai H. Eu + jeu&jcastnet.com +17069 + Leucotron Equipamentos Ltda. + Dilson Frota Moraes + snmp&leucotron.com.br +17070 + Hi De Ho + Keith Hughes + reg&hideho.org +17071 + Prime Electronics & Satellitics Inc. + Double Chiang + double&pesi.com.tw +17072 + Transgate + Masahiro Ono + masahiro&transgate.org +17073 + Universitaetsklinikum Freiburg + Thomas Jancar + thomas.jancar&uniklinik-freiburg.de +17074 + Senatsverwaltung fuer Gesundheit, Soziales und Verbraucherschutz Berlin + Klaus Habeck + klaus.habeck&sengsv.verwalt-berlin.de +17075 + Edwin L. Cox School of Business + Jimi Thompson + jimi&mail.cox.smu.edu +17076 + EXCELLENCE High Tech - Georgia LTD. + Alexander Nefedov + nealexus&yahoo.com +17077 + Girit Projects Inc. + Slavtcho Nikov + slavtchon&giritprojects.com +17078 + Innovation Software Group, LLC + Joseph Phillips + admin&innovationsw.com +17079 + AppIQ, Inc. + Jonathan Hill + jon.hill&appiq.com +17080 + BeTrained, Baumgartner KEG + moritz haslhofer + moritz.haslhofer&betrained.org +17081 + M.F. van Loon + M.F. van Loon + michiel&van-loon.xs4all.nl +17082 + Oregon University System + Anthony Brock + Anthony.Brock&oregonstate.edu +17083 + COMPART MEDICAL SYSTEMS + Jacek Owczarczyk + jo&compart-med-sys.home.pl +17084 + HON HAI PRECISION IND. CO., LTD. + Serena Yeh + serena.sf.yeh&foxconn.com +17085 + Beijing BU Telecom Tech co.,Ltd + zhenzhong zhang + zhangzz&bt-t.com +17086 + BIK Aschpurwis + Behrens GmbH + Florian Hars + snmp&bik-gmbh.de +17087 + GrECo International AG + Peter Jarisch + p.jarisch&together.at +17088 + sichuan wisesoft Ltd. + lijin + lijin2001&tom.com +17089 + Universität Paderborn, Zentrum IT-Dienste + Andreas Brennecke + anbr&uni-paderborn.de +17090 + Packetizer, Inc. + Paul E. Jones + paulej&packetizer.com +17091 + Ciphire Labs + Lars Eilebrecht + le+iana&leogic.com +17092 + MediaRing Limited + Pawan Pandey + pawan&mediaring.com +17093 + Ellemedia Technologies Ltd. + Kostas Vaxevanakis + vaxevana&ellemedia.com +17094 + Blueslice Networks Inc. + Yan He + yan&blueslice.com +17095 + Microchip Technology Inc. + Nilesh Rajbharti + Nilesh.Rajbhartiµchip.com +17096 + Invesco Institutional, Fixed Income Group + Ronald V. Simmons + Van_Simmons&invesco.com +17097 + Weltek Technologies Co., Ltd. + Tat-Leung Ho + ho&weltek.com.hk +17098 + Innomenta GmbH & Co. KG + Klaus Schmidt + klaus.schmidt&innomenta.de +17099 + TellusTech Ltd. + Assen Stoyanov + softa&ttechgroup.com +17100 + Fabasoft R&D GmbH + Harald Pfoser + harald.pfoser&fabasoft.com +17101 + InternetNu + W.M. Baan Hofman + w.m.baanhofman&internetnu.net +17102 + Bororos + Bororos + bororos&monetplus.cz +17103 + Biznet Information Systems and Consultancy + Haluk Aydin + haydin&biznet.com.tr +17104 + 1-800 Communications, Inc. + Scott Dallamura + sdallamura&1800communications.com +17105 + Insignia Solutions + Anne Rosset + anne.rosset&insignia.com +17106 + GlobNIX + Phil Pennock + snmp-mib&globnix.org +17107 + The Konopka Corporation + Michael Konopka + mike&konopkacorp.com +17108 + Vinci Systems, Inc. + Joe Kralowetz + jkralowetz&vincisystems.com +17109 + Chotaro Co. + Masami Nishida + nischida&chotaro.com +17110 + Institut de Recherche pour le Développement (IRD) + THOURAUD Emmanuel + thouraud&bondy.ird.fr +17111 + JMSE + Jim McClure + jmcclure&insight.rr.com +17112 + Axstone Co., Ltd. + KI-HONG BANG + soogy&axstone.com +17113 + EvoNet Computer Network Evolution GmbH + Christoph Bott + iana&evonet.de +17114 + Keyrus S.A. + Liouville David + dliouville&keyrus.com +17115 + D-APP-X Corporation + Randy Bradley + randy&d-app-x.com +17116 + GMA COMMUNICATION MANUFACTURING & MARKETING 1991 LTD. + Liron Homski + liron&gma.co.il +17117 + Advantage Sales & Marketing + Andrew Garvin + oid.admin&asmnet.com +17118 + Byram Healthcare Centers, Inc. + Derek J. Balling + sa&byramhealthcare.com +17119 + OctetString, Inc + Phil Hunt + phil.hunt&octetstring.com +17120 + Team Cool Networks + Ken Gunderson + kgunders&teamcool.net +17121 + Geronimo Group Inc. + Carl Foster + cfoster&geronimogroup.com +17122 + Apogent Technologies Inc. + Peter J. Van Bruggen + pvanbruggen&apogent.com +17123 + Tavata Software Corporation + Peter Smith + psmith&tavatasoftware.com +17124 + Psycat Group + Lars Stenberg + lars.stenberg&psycat.net +17125 + Eze Castle Integration, Inc. + William Tan + wtan&eci.com +17126 + Nova Engineering, Inc. + Bill Walburg + billw&nova-eng.com +17127 + Kilowatt Networking, Inc. + Kyle Obear + kobear&kilowattnetworking.com +17128 + mg2.org + Michael Gregorowicz + junk&mg2.org +17129 + Zaclick Co.,Ltd + Joo Minsok + neinhart&zaclick.com +17130 + antiquity books + Michael Goldsmith + n2ucm&verizon.net +17131 + Rectifier Technologies Pacific Pty Ltd + Paul Davis + PaulDavis&rtp.com.au +17132 + Shanghai Dare Opto-electronic Communication Co.,Ltd + pengyuanlong + pengyuanlong&cndare.com +17133 + ILWOO DATA & TECHNOLOGY Co.,LTD + WILLIAM JANG + jckj&netian.com +17134 + Good Health Network, Inc + Lori Reed-Fourquet + fourquet&ix.netcom.com +17135 + Raaijmakers + Vincent Raaijmakers + vincent&raaijmakers.cc +17136 + PLG AG + Andreas Stoer + andreas.stoer&plg.de +17137 + GradSoft Ltd. + Ruslan Shevchenko + iana&gradsoft.kiev.ua +17138 + INHOLLAND Universities + Boris van Es + boris.vanes&inholland.nl +17139 + Internet Gold + Valdemar Lemche + atterdag&co.zahav.net.il +17140 + Versatel Nederland B.V. + Aleks Mitrovic + aleksandar.mitrovic&versatel.nl +17141 + Incenti S.A. + Pawel Karwowski + karwowskip&incenti.pl +17142 + Static Bytes + Javier Loureiro + derethor&derethor.net +17143 + Georg-Hewegh-Oberschule + Michael Hecker + mhecker&gmx.com +17144 + State University of New York at Stony Brook + Sanjay Kapur + Sanjay.Kapur&Stonybrook.edu +17145 + Rusch Consultancy B.V. + Guido Rusch + postbus&rusch.nl +17146 + UVHC + Fareneau Florent + florent.fareneau&univ-valenciennes.fr +17147 + Eastern Illinois University + J. Kahn Koontz + kahn&eiu.edu +17148 + iBurst Forum + Christian John + chrisj&arraycomm.com +17149 + Lodestar Technology Ltd + John Dudmesh + john&johndudmesh.com +17150 + simatika + jorge E. Courett + courett&softhome.net +17151 + COGNICA Ltd + Scott Wichall + sdwichall&hotmail.com +17152 + creamen + mannu + mannu&creamen.net +17153 + University of Connecticut + Rohit Kumar Mehta + rohitm&engr.uconn.edu +17154 + Kittredge Archery Co. Inc. + Adam Pisoni + adam&p3mammoth.com +17155 + VoiceAge Corporation + Jean Mayrand + jean.mayrand&voiceage.com +17156 + Zone Labs, Inc + Andy Hering + ahering&zonelabs.com +17157 + Chengdu Runwang Tchnology Co. LTD + liushanglin + sichuancdliushanglin&yahoo.com.cn +17158 + Laurasia Computing + Michael Kennett + snmp&laurasia.com.au +17159 + Henri Maire SA + Olivier METRA + ometra&henri-maire.fr +17160 + Lund University Hospital + Lennart Bertenstam + Lennart.Bertenstam&radfys.lu.se +17161 + Covisint LLC + John Schonmeier + jschonme&covisint.com +17162 + Airversal Technology Inc + Zhou Sanqiang + zhousq&airversal.com +17163 + Riverbed Technology, Inc. + John Cho + john.cho&riverbed.com +17164 + Callcast, Inc. + Sarah Clatterbuck + sclatter&callcast.com +17165 + Advantest R&D Center, Inc. + Clayton Conrad + c.conrad&advantest-ard.com +17166 + Louisiana State University and A&M College + Emilio A. Icaza + emilio&lsu.edu +17167 + Daotec Ltd. + Robert Zapfel + robert&daotec.com +17168 + Century 21 Mike Bowman, Inc. + Dusty Matthews + dusty.matthews&c21bowman.com +17169 + Diablotin + Yannick Cadin + Yannick.Cadin&Diablotin.com +17170 + Université Henri Poincaré - Nancy 1 - + Samson Bisaro + Samson.Bisaro&esial.uhp-nancy.fr +17171 + Qindel + Benito Martinez + nito&qindel.es +17172 + Melco International Development Ltd. + Neil Turner + nrt&iasiatech.net +17173 + Squeeky Clean + Suzie Benkoski + akira-iana&squeekyclean.net +17174 + Iridium Systems (Hangzhou) Limited + Shuhua Qi + qsh&irsys.com.cn +17175 + shanghai letel communications Co., Ltd + Geoffrey Gu + guguolin&letelopto.com +17176 + Perm State Pedagogical University + Beloglazov Maksim + beloglazov&pspu.ac.ru +17177 + TIS Inc. + Masao Kudou + bruce&tis.co.jp +17178 + VERA Autonoom Provinciebedrijf + Jan Vannieuwenhuyse + Jan.Vannieuwenhuyse&vera.be +17179 + Diaz + Miguel Diaz + mdiaz&netcabo.pt +17180 + Altibase + Hyun Su Choi + altibase&altibase.com +17181 + Dantek Computer Systems, Inc. + Dan Haanpaa + dantekcs&charter.net +17182 + CCC Data Oy + Zoltan Homorodi + zoltan.homorodi&ccc.fi +17183 + Yang Mei Technology Co., LTD. + Thomas Chen + ym&yangmei.com.tw +17184 + ProLAN + Sergey Yuditsky + ssy&prolan.ru +17185 + Richemont Group + Johann Lallemand + johann.lallemand&richemont.com +17186 + MEDIA LINKS Co.,LTD. + SEIICHI KAWADA + kawada&medialinks.co.jp +17187 + Onaro Inc. + Roy Alon + info&onaro.com +17188 + GigaClue + David Weinrich + dweinr1&cox.net +17189 + Ingrafted Software Inc. + Dave Augustus + davea&ingraftedsoftware.com +17190 + Fulcrum Microsystems, Inc. + Bevan Bennett + bbennett&fulcrummicro.com +17191 + Universidad de La Laguna + Javier Quijada Morales + sistemas&ull.es +17192 + Emprisa Networks + Mirga Jazbutis + mjazbutis&emprisanetworks.com +17193 + dbc Digital Broadcast Consulting GmbH + Sören Hellwig + sh&dbc-broadcast.de +17194 + Whoopy Engineering Ltd. + Simon Carter + hostmaster&whoopy-eng.net +17195 + Securesoft Systems, Inc. + Roland Cozzolino + rcozzol&optonline.net +17196 + CacheLogic Ltd. + Andrew Parker + aparker&cachelogic.com +17197 + Relicore, Inc. + Gilbert Benghiat + dirdev&relicore.com +17198 + martinskafte.dk + Martin Skafte + mail&martinskafte.dk +17199 + Ministry of Fisheries - NZ + Jay Montilla + jay&spidernz.com +17200 + Optus Internet Engineering + Servers Group + servers&staff.optusnet.com.au +17201 + Digital Item + Soojin Nam + sjnam&daumcorp.com +17202 + NEC informatec systems,ltd. + Susumu Kajino + kajino&nis.nec.co.jp +17203 + Pekarek Handelsges.m.b.H + Erich N. Pekarek + oid.it-services&pekarek.at +17204 + IIM CORPORATION + Kazuki Masuda + kmasuda&iim.co.jp +17205 + ARPENT ENTERPRISES + RAVINDRA JOSHI + arpent&vsnl.net +17206 + Open Stock Company «Oskol Electrometallurgical Kombinat» + Sergey Bredikhin + bredikhin&oemk.ru +17207 + Cloverleaf Communications Inc. + Dr. Elic Yavor + eyavor&cloverleafcomm.com +17208 + Fa Nea Sjöholm + Christer Sjöholm + hcs&astrakan.hig.se +17209 + Strategic Systems Consultants Ltd. + O.K.Man + ok_man&ssc-ltd.com +17210 + GZS Gesellschaft fuer Zahlungssysteme mbH + Thorsten Kraft + tk&gzs.de +17211 + Potronics sp. z o.o. + Janusz Sobczak + jsobczak&potronics.com.pl +17212 + AWETA b.v. + Marcel van de Weert + mvdweert&aweta.nl +17213 + DAY-S.COM + Laurent ROUSSANNE + l.roussanne&day-s.com.fr +17214 + Network Designers Ltd. + Keith Morley + keith&ndl.co.uk +17215 + Institute of Linguistic Studies + Artem V. Andreev + artem&iling.nw.ru +17216 + Triplanet Partners LLC + Hazem Bennaceur + hbverdian&hotmail.com +17217 + University of Richmond + Coates Carter + ccarter&richmond.edu +17218 + Union Internationale des Chemins de Fer + Mr Bru + bru&uic.asso.fr +17219 + NsTEL Inc. + Kim Tae Hyung + thkimj&lycos.co.kr +17220 + eBenX, Inc. + Mike Stinebaugh + mike.stinebaugh&ebenx.com +17221 + CHBE UBC + Darcy Westfall + darcyw.spam&chml.ubc.ca +17222 + Sierhuis Digitale Diensten + Pim Sierhuis + iana&sierhuis.com +17223 + Cena Salvatore + Cena Salvatore + cena&cenas.it +17224 + Newbury Networks + Anthony Sciola + acs&newburynetworks.com +17225 + ispi of Lincoln, Inc + Monte Ohrt + noc&ispi.net +17226 + BC5 Technologies Inc. + Bruno Couillard + bruno&bc5tech.com +17227 + DMP Initiatives + Yannick Cadin + Yannick.Cadin&Initiatives.fr +17228 + Kommando + Yannick Cadin + Yannick.Cadin&Kommando.com +17229 + Miracle Group, Ltd. + Michal Meloun + meloun&miracle.cz +17230 + UniqMinds Ltd. + Juha Lindström + iana.contact&uniqminds.com +17231 + Otto UK + Paul Zajicek + paul.zajicek&otto-uk.com +17232 + PANNOCOM Ltd. + Attila Tóth + info&pannocom.hu +17233 + ICODEX Software AG + Ralph Ortner + ralph.ortner&icodex.com +17234 + ET Enterprise Technologies srl + Giancarlo Castorina + Giancarlo.Castorina&enterprisetechnologies.it +17235 + 2e Systems GmbH + Saida Zaagoug + saida.zaagoug&2e-systems.com +17236 + Shout Telecoms Limited + David Thorne + dthorne&shout-telecoms.com +17237 + S.P.E.Sistemi e Progetti Elettronici s.a.s. di P.Prandini & C + Paolo Prandini + prandini&spe.net +17238 + NsTEL Inc. + Kim Tae Hyung + thkimj&lycos.co.kr +17239 + Plan B Email Services, LLC + Chris Bellomy + chris&planbemail.com +17240 + Spread Teleinformática Ltda + Eduardo Bortoluzzi Junior + eduardob&spread.com.br +17241 + Federal Trade Commission + Brooks Vaughn + bvaughn&ftc.gov +17242 + Mission Vi + Jason Calabrese + jasonc&missionvi.com +17243 + World Access Canada Inc. + Daniel Silivestru + dsilivestru&worldaccess.com +17244 + Briteace Enterprise Pte Ltd + K.C. Toh + 67639888&starhub.net.sg +17245 + Genentech, Inc. + Scooter Morris + scooter&gene.com +17246 + FreeSNMP + Robert Story + rstory&freesnmp.com +17247 + SBC Internet Services + Chuck M. Donaldson + cd1753&sbc.com +17248 + Gemstone Communications, Inc + Sugl Wu + sugl&gemstone-com.com +17249 + VAN Company Ltd. + Sergey Perepechayev + xxxcore&mail.ru +17250 + Yore Elektronik Yayimcilik A.S. + Özhan KARAMAN + ozhan&yore.com.tr +17251 + WeRMSPoWKe + Brendan Alderslade + brendan.alderslade&wermspowke.net +17252 + MS Global Consulting GmbH + Moheb Shafik + info&ms-global-consulting.de +17253 + Arcada Polytechnic + Harald Hannelius + hostmaster+iana&arcada.fi +17254 + Goad.US + Ed Goad + ed&goad.us +17255 + Vidyatel Ltd. + Bentzy Sagiv + bentzy&vidyatel.com +17256 + CL-NET s.r.o. + Ondrej Pejsa + noc&clnet.cz +17257 + Initial Security Limited + James Gilbert + james.gilbert&initial-security.co.uk +17258 + DRQ Sp. z o.o. + DRQ Sp. z o.o. + office&drq.pl +17259 + Manex + Vincent Keunen + vk&manex.be +17260 + Numinatek Co. + Houman Ghaemi + houman&houman.ca +17261 + RJWare, Inc + Rich Johnson + rich&rjware.com +17262 + Devonshire Group, Inc + Tim Combs + timc&ygtechnologies.com +17263 + Alpes Team + BRIERE Sebastien + sebastien.briere&ac-strasbourg.fr +17264 + Mount Clemens General Hospital + Chris Ryan + cryan&mcgh.org +17265 + Escape Tech Ltd (formerly 'Escape OE') + Christos KONIDARIS + info&escapetech.eu +17266 + California Internet Inc + Ken Anderson + dnsadmin&pacific.net +17267 + Hitachi Computer Products (Europe) S.A.S + Jean-Paul FANTON + jean-paul.fanton&hicef.hitachi-eu.com +17268 + ABB Process Analytics + Bill Lenherr + bill.lenherr&us.abb.com +17269 + Hansa JSC + Dmitry Liakh + dliakh&hansa.ua +17270 + Comcast Corporation + Aaron Settles + aaron_settles&cable.comcast.com +17271 + Sunshine Village Ski & Snowboard Resort + Eric Merth + emerth&skibanff.com +17272 + akashic Inc. + Hirofumi Takada + takada&akashic.co.jp +17273 + The Rabbithole Org. + Shaun Brady + shaun&therabbithole.org +17274 + Nevis Networks Inc. + Ravi Dara + ravi.dara&nevisnetworks.com +17275 + MicroBit + Bertie Kunz + tima&cs.co.za +17276 + Colegio de Registradores de la Propiedad y Mercantiles de España + Emilio Martinez + emilio&corpme.es +17277 + TUI Nederland N.V. + W. Delamore + unix-groep&tui.nl +17278 + IZB-Soft + Florian Schneider + florian.schneider&izb-soft.de +17279 + Stowe Holdings (Pty) Ltd + Halford Dace + hal&stowe.co.za +17280 + Fadesa Inmobiliaria + José M. Fandiño + iana&fadesa.es +17281 + Luther College + Chris Stuckman + stuckmch&luther.edu +17282 + Abhai Inc. + Rajeev Atluri + rajeev&digitaldrifters.com +17283 + YACME S.r.l. + Luca Bolcioni + Luca.Bolcioni&yacme.com +17284 + UNICOM UNIversal computer COMmunication, spol. s r.o. + Radimir Rexa + unicom&e-unicom.sk +17285 + DOBS + W.J Dantuma + willem.dantuma&dobs.nl +17286 + Steuerungs- & Netzwerktechnik + Uwe Beutin + uwe&imbrium.de +17287 + Pacific Life + David Hwang + dhwang&pacificlife.com +17288 + VicSuper + Charles Edouard + charles.edouard&vicsuper.com.au +17289 + LinuxHQ + D. Schueler + wob&bermuda.ch +17290 + Key Digital Solutions Ltd. + Mihai Giurgeanu + mgiurg&xnet.ro +17291 + Mithra Biodindustry Co., Ltd + Mike Cu + ccu123&mail.mithra.com.tw +17292 + NXN Software AG + Mr. Jochen Pielage + jpielage&nxn-software.com +17293 + DavidRLee + David Lee + dlee&novell.com +17294 + Agile Teamwork, Inc. + Earl Marwil + EMarwil&AgileTeamwork.com +17295 + WestNoble + Ahmed Abrar Bhalli + bhalli&westnoble.com +17296 + Fourth Corner Systems + Scott Courtney + courtney&4th.com +17297 + Sine Nomine Associates + Scott Courtney + scourtney&sinenomine.net +17298 + BlueWater Technologies Inc + John Curtis + BlueWater&Dock.Net +17299 + vMoksha Technologies Private Limited + Sanjeev Gopal + sanjeev&vmoksha.com +17300 + Axxius b.v. + M.J. Beelen + marco.beelen&axxius.nl +17301 + AllServe Automatisering B.V. + Sevaz + Sevaz&AllServe.nl +17302 + Iconology Ltd + Stephen Warman + steve.warman&iconology.co.uk +17303 + Paramount Pictures + Gregory Jones + gregory_jones¶mount.com +17304 + CA Technologies Inc. + Aaron Flint + aaron.flint&ca.com +17305 + Aeras Networks + John Wiese + jwiese&aerasnetworks.com +17306 + Armenian e-Science Foundation (ArmeSFo) + Ara A. Grigoryan + aagrigor&jerewan1.yerphi.am +17307 + Tribunal Regional do Trabalho da 15ª Região + Bruno Stella + brunostella&trt15.gov.br +17308 + Digimedworx + Arya Bagherpour + aryaba&hotmail.com +17309 + Saudi Arabian Monetary Agency (SAMA) + Yagoub Al-Sulaiman + y_suliman&sama-ksa.org +17310 + Vanquish International Ltd. + xiaoli + mpswitch&mail.maipu.com +17311 + Myllykoski Corp. + Vesa Erolainen + vesa.erolainen&myllykoski.com +17312 + Shizuoka System Technology + Kazuhiko Suyama + ssuuyyaammaa&po2.across.or.jp +17313 + Ratiodata GmbH + Martin Nuernberger + martin.nuernberger&ratiodata.de +17314 + Soltim + David ROUAUD + david.rouaud&soltim.com +17315 + EtherAddress.com + Wilson Mattos + wmattosðeraddress.com +17316 + Peoria Christian School + James Anderton + janderton&peoriachristian.org +17317 + MassiveEgo + Philippe Hebert + philippe&massiveego.com +17318 + Media5 Corporation / M5 Technologies (formerly 'M5T Centre d'Excellence en Telecom Inc.') + Jerome Lagasse + iana&media5corp.com +17319 + JC Computing + James Anderton + jc_computing&hotmail.com +17320 + J&M Crowther Ltd. + Chris Crowther + chrisc&jm-crowther.co.uk +17321 + Calpine + Christopher Donohoe + cdonohoe&calpine.com +17322 + SOFTLAND + Jaroslaw Kruza + jarek&softland.net.pl +17323 + Datatekniska Byrån + Joakim Ahlén + joakim.ahlen&mediaprint.se +17324 + YDI Wireless Inc. + Gino Chartre + gchartre&ydi.com +17325 + Babastik + Brian Adams + brian&babastik.com +17326 + AC Camerfirma, S.A. + Ramiro Muñoz + ramirom&camerfirma.com +17327 + City of Northglenn + Daniel Schmick + dschmick&northglenn.org +17328 + Imenta Sweden AB + Christopher Ekstrom + christopher.ekstrom&imenta.com +17329 + School of Information Technology, Universiti Utara Malaysia + Farkhana Bawazeer + farkhana_bawazeer&yahoo.com +17330 + Zetta Systems, Inc. + John Guthrie + jguthrie&zettasystems.com +17331 + 30San Information System Co.,Ltd. + Xianghui Zhu + zhuxh&30san.com +17332 + Pete Attkins Engineering Ltd + Pete Attkins + pete2.attkins&ntlworld.com +17333 + Critical Networks, Inc. + Dick Downen + tom&criticalnetworks.com +17334 + DN-Solutions + Alexander Mueller + hosting&dn-solutions.net +17335 + UPM-Kymmene Corporation + Jouko Kiviranta + jouko.kiviranta&upm-kymmene.com +17336 + Bank of Russia + Andrey Shiryaev + ShiryaevA&gis.cbr.ru +17337 + Solace Systems, Inc. + Rob Steen + rob.steen&solacesystems.com +17338 + IBS Technology & Services + Jan Maes + jan.maes&ibsts.be +17339 + SAFA GALENICA, S.A. + Manuel Ruiz + mruiz&safa.es +17340 + Microtest Ltd + Roger Jane + roger.janeµtest.co.uk +17341 + Fonner Consulting + David Fonner + dfonner&charter.net +17342 + Danske Gymnasieelevers Sammenslutning + Ask Holme + ask&dgsnet.dk +17343 + Edula + Scott Seong + scott&edula.com +17344 + OFSET + Hilaire Fernandes + hilaire&ofset.org +17345 + fusionOne, Inc. + Serge Melnichenko + smelnichenko&fusionone.com +17346 + AlienForceIT + Andrzej Pliszka + pliszka&verizon.net +17347 + Hilti Corporation + David Taylor (FIBM Dept.) + David.Taylor&hilti.com +17348 + USTC E-Business Technology Company + Wang Guizhu + gzwang&netkiss.com.cn +17349 + DELTA N.V. + Phuong Banh + pbanh&delta.nl +17350 + OnRelay Ltd + Thomas Muller + thomas.muller&onrelay.com +17351 + Simmons College + David Bruce + david.bruce&simmons.edu +17352 + FrancisScott Communications LLC + Scott Lampert + scott&francisscott.com +17353 + VisSim AS + Max Semenov + maxim.semenov&vissim.no +17354 + Zone Trading Partners + Magesh Rangaramanujam + magesh&zonetrading.com +17355 + Cirrus Multimedia Limited + Mo McKinlay + hostmaster&cirrus-multimedia.ltd.uk +17356 + RiskMetrics Group Inc. + Jason Madden + jason.madden&riskmetrics.com +17357 + Derks.IT + Jeroen Derks + jeroen&derks.it +17358 + Fulford Consulting Ltd + Clifford W Fulford + fulford&fulford.net +17359 + net9 + Jiang, Bo + sanshaoj&yahoo.com.cn +17360 + T&J Computer Systems + Tracy or Jessica Smith + grvdigger864&aol.com +17361 + Infoscience Corporation + Norio Miya + info&logstorage.com +17362 + Asia Pacific Bioinformatics Network (APBioNet) + Ong Guan Sin + guansin&bic.nus.edu.sg +17363 + SANYO DDP-BU + Katsuyoshi Takahashi + taka013561&dt.sanyo.co.jp +17364 + SITELI + BETHENOD Georges + gbethenod&siteli.fr +17365 + SISSA - ISAS Scuola Internazionale Superiore di Studi Avanzati + Piero Calucci + calucci&sissa.it +17366 + Torbay Council + Tom Brough + Tom.Brough&Torbay.Gov.Uk +17367 + Grep AS + Roy Michelsen + roy&grep.no +17368 + PIFAUT + Rémi PIFAUT + oid-iana&pifaut.com +17369 + sustainable development networking programme + Muhammad Ashraf + mm_ashraf&yahoo.com +17370 + OnRelay Ltd + Thomas Muller + thomas.muller&onrelay.com +17371 + 4DotNet oHG + Christoph Müller + cmueller&4dot.net +17372 + iFoni Cellular Services + Rudolph van Graan + snmp-admin&ifoni.com +17373 + IT Watchdogs, Inc. + Nick Tornow + ntornow&sheergeniussoftware.com +17374 + Clearfield Knowledge Solutions Ltd. + Ross Peeters + oid.admin&k.co.nz +17375 + Image Analysis,Inc. + Judd E. Reed + reed&Image-Analysis.com +17376 + TIM San Marino S.p.A. + Andrea Gabellini + andrea.gabellini&telecomitalia.sm +17377 + aesys spa + Enzo Arlati + enzo.arlati&aesys.it +17378 + Interlink Software Services Ltd + Stephen D'Arcy + sdarcy&int-link.com +17379 + University of Applied Sciences Wolfenbuettel + Thorsten Ludewig + th&fh-wolfenbuettel.de +17380 + Spazio ZeroUno SpA + Luca Russano + luca.russano&spaziozerouno.it +17381 + The Kinkaid School + Denise Zasowski + denise.zasowski&kinkaid.org +17382 + VAI bv + Albert Hooyer + albert.hooyer&vai.nl +17383 + EASY SOFTWARE + Juergen Froehlich + standardsmaster&easy.de +17384 + Contemporary Control Systems, Inc. + Bennet Levine + blevine&ccontrols.com +17385 + Appriss, Inc + Donavan Pantke + dpantke&appriss.com +17386 + iCanSP, inc + Manoj Thopcherneni + Manoj.Thopcherneni&icansp.com +17387 + Frontier Financial + Jason Balicki + kodak&frontierhomemortgage.com +17388 + Allen Systems Group INC + Bobby Brown + bbrown&asg.com +17389 + Core NAP, L.P. + Matt Knopp + matt&corenap.com +17390 + Family Video + Andrew Baker + andy&famvid.com +17391 + NOSE Applied Intelligence AG + Andreas Weder + andreas.weder&nose.ch +17392 + Abucco Technologies Inc. + Michal Stit + info&abucco.com +17393 + Allen Systems Group INC + Bobby Brown + bbrown&asg.com +17394 + BRAINBOW EDV-Beratung GmbH + Volker Funke + Volker.Funke&brainbow.de +17395 + Beijing Founder Electronic Co.,Ltd. + Dai Lin + dailin&founder.com.cn +17396 + Sapura Advance Systems Sdn Bhd + Dinesh + dinesh&sasoft.com.my +17397 + BVE des Kantons Bern + Thomas Kappeler + thomas.kappeler&bve.be.ch +17398 + HBhard, spol. s.r.o. + Jiri Balcar + hbhard&hbhard.cz +17399 + AKiT Ltd. + Boris Jordanov + bjordanov&akit.biz +17400 + tourisline AG + Frank Brehm + brehm&tourisline.com +17401 + Lands' End, Inc. + Jordan Anderson + jordan.anderson&landsend.com +17402 + Sevis Systems, Inc. + Mike Ashdown + mike.ashdown&sevis.com +17403 + Oklahoma Department of Career and Technology Education + Matthew Hubbard + mhubb&okcareertech.org +17404 + WebundCo GmbH & CoKG + Karl Macku + karl.macku&webundco.com +17405 + Telecommunications Industry Association TR8dot5 + Jeff Anderson + Jeff.S.Anderson&Motorola.com +17406 + HITS + Daniel Himler + dan&hits.at +17407 + OmegaWare Systems Ltd. + Edward Rudd + eddie&omegaware.com +17408 + HICOM COMUNICAÇÃO INTEGRADA LTDA. + SERGIO TREIGER + comercial&hi.com.br +17409 + National Standardization Committee of Radio & Television + Chen Zhiguo + chenzg2000&sina.com +17410 + SOPRA GROUP + Hervé BOUVET + hcbouvet&sopragroup.com +17411 + newsky software + zhanghui + zhanghui&newsky.com.cn +17412 + GateConnect Technologies GmbH + Halil Goektepe + h.goektepe&gate-connect.com +17413 + Ministerio del Interior - España + Francisco Romero + sistemas&guzman.mir.es +17414 + vzw CDS + Jan Vanhaecht + cdsleuven&yahoo.com +17415 + PUCRS + Luiz Alfredo Baggiotto + cpd_unix&pucrs.br +17416 + Root IT + Jeroen Vriesman + linuxificator&gmail.com +17417 + Portiva Corporation + Jason Smith + jsmith&portiva.com +17418 + Mahanaxar Networks + Stephen Butler + oid&mahanaxar.net +17419 + Lincoln Investment Planning, Inc + Steve Konde + skonde&lincoln-remove-investment.com +17420 + DigiPower Manufacturing Inc. + Harvey Hsieh + harveyhsieh&digipower.com.tw +17421 + Hôpital de la ville d'Esch-sur-Alzette + Robert Klensch + ldap.contact&hvea.healthnet.lu +17422 + Real Enterprise Solutions Development B.V. + Bob Janssen + bob&res.nl +17423 + Multidata Gesellschaft für Datentechnik und Informatik mbH + Wilfried Dersch + wd&multidata.de +17424 + Chart Pilot Ltd. + Andrey Nepomnyaschih + nas&chartpilot.ru +17425 + ilink Kommunikationssysteme GmbH + Lars Henssen + lars.henssen&ilink.de +17426 + Sega.com, Inc. + Eric Fehr + eric.fehr&sega.com +17427 + ABYZ Informática LTDA + Andreas Hansen + andreash&abyz.com.br +17428 + WebDesign BK + Rolf Luettecke + it&rolf-luettecke.de +17429 + Michael Telecom AG + Rolf Luettecke + rolf.luettecke&michael-telecom.de +17430 + TCL NETWORK EQUIPMENT (SHENZEHN) CO., LTD. + shijie cheng + chengsj&tcl.com +17431 + shenzhen SDG Information Co.,Ltd. STEC Telecom Branch + TENG DA ZHI + dzteng&stec.com.cn +17432 + Georgia Perimeter College + Bill Woodall + ldapadmin&gpc.edu +17433 + pieas educational institute + shahid + shahidnazir75&yahoo.com +17434 + THERALYS + Emmanuel Olart + eolart&theralys.com +17435 + BEAC + NGON Alain Simplice + ngon&beac.int +17436 + Worlight International + Christian P. Koch + ckoch&worlight.com +17437 + Insurance Corporation of British Columbia + Michael Cameron + michael.cameron&icbc.com +17438 + Novannet, LLC + William J. Kammerer + wkammerer&novannet.com +17439 + Panasonic Avionics Corporation (formerly 'Matsushita Avionics Systems Corporation') + Louie Iturzaeta + Louie.Iturzaeta&panasonic.aero +17440 + Omnipod, Inc. + Jerry B. Altzman + ops&omnipod.com +17441 + Unholy.org + Marcelo Falero + mfalero&uruhost.com.uy +17442 + Iter8 Inc. + Andre B + oid-spamtrap&iter8.com +17443 + Gerd Koslowski + Gerd Koslowski + gerd&itechnik.de +17444 + wrevak.net + William Revak + wrevak&wrevak.net +17445 + FEAS v.o.f. + D.M.J.C. Gehéniau + info&feas.net +17446 + Stefan Drees + Stefan Drees + sdrees&ieee.org +17447 + STATE MACHINE + Ray Childs + ray&statemachine.net +17448 + Fabrice Co. + Manager + webmaster&fabrice.co.jp +17449 + KLS Air Express Inc. + Tim Schafer + iana&klsairexpress.com +17450 + WFW.org + Emeke - A - Animadu + Admin&wfw.org +17451 + Unassigned + Returned 2005-10-19 + ---none--- +17452 + Visy Industries Holdings Pty Ltd + C. Charles Winchcombe + quinch&systems.visy.com.au +17453 + Billion Electric Co., Ltd. + Felix Cheng + felix&billion.com.tw +17454 + Integuru Inc. + Jeff Shi + integuru&aol.com +17455 + K-BEST TECHNOLOGY INC. + Andy Chang + andy&kbest.com.tw +17456 + Jigsaw Systems + Ken Vance + ken.vance&jigsaw.uklinux.net +17457 + UTi Worldwide + Leon Dunn + EITPrivateEnterpriseNumber&go2uti.com +17458 + Connect Internet Solutions PTY LTD + Harry Raaymakers + harryr&connect.com.au +17459 + Mibtree Ltd + RJ Mellor + info&mibtree.com +17460 + Revelant Technologies + Jesse Clark + jesse.clark&revelanttech.com +17461 + Pryde Systems and Software + Michael Johnston + qp123&mail.com +17462 + Akamai Technologies, Inc. + Erik Nygren + iana-poc&akamai.com +17463 + Demarc Security, Inc. + Marc Wilson + mwilson&demarc.com +17464 + O1 Communications + Casey Lee + clee&o1.com +17465 + TruSystems, Inc + Ron Pashby + Ron&TruSystemsInc.com +17466 + Wooju Communications CO.,LTD. + JayeulKoo + solobird&woojucom.com +17467 + Gamer's Valhalla Pty. Ltd. + Arron Hollis + arron&gamersvalhalla.com.au +17468 + ICONAG AG + Stefan Schorch + stefan.schorch&iconag.com +17469 + BioWisdom Ltd. + Simon Beaumont + simon.beaumont&biowisdom.com +17470 + Strålfors AB + Kim Paulsson + kim.paulsson&stralfors.se +17471 + Actona Technologies, Inc + Daniel Kaminsky + daniel&actona.com +17472 + Tetraplex Ltd. + Simon Lebrun + lebruns&tetraplex.com +17473 + Telluce Corporation + No-Wook, Kee + luscent&empal.com +17474 + CRM-Informatik GmbH + Pascal Rudin + pascal.rudin&crm-informatik.ch +17475 + Genetec Information Systems + Christian Morin + cmorin&genetec.com +17476 + V-ONE Corporation + Chris Brook + cbrook&v-one.com +17477 + Trailblazer Systems Inc. + Jason Cai + jcai&trailblazersystems.com +17478 + Nexadesic, Inc. + Sean Chittenden + iana&nexadesic.com +17479 + One Unified + Ray Burkholder + ray&oneunified.net +17480 + Choice Communications + Ray Burkholder + ray&viaccess.net +17481 + Logic Fusion + William Wong + bwong&voicenet.com +17482 + Ogmios.ca + Cameron Charlebois + cameron&ogmios.ca +17483 + Hong Kong School Net Ltd. + Tony Chan + valjean&school.net.hk +17484 + Adyoron Intelligent Systems Ltd. + Merav Ben-David + meravb&adyoron.com +17485 + HierWeb + Stephen Cooke + hierweb&earthlink.net +17486 + EDS Deutschland GmbH + Markus Wenzel + Markus.Wenzel&eds.com +17487 + Manchester Airport + Pete Burgess + peter.burgess&manairport.co.uk +17488 + GENERAL TOUCH TECHNOLOGY CO.,LTD + Alex Lee + alex&generaltouch.com +17489 + Curtis Consulting LLC + Charles Curtis + ctcurtis&curtisconsultingllc.com +17490 + DM Trucking + David Mendes + mejeda1217&aol.com +17491 + Barix AG + Johannes Rietschel + jr&barix.com +17492 + Northco.net + Zachary McGibbon + mzac&northco.net +17493 + Halliburton Company + Thang Lu + thang.lu&halliburton.com +17494 + Matthias Rath, Inc. + Robert Coleman + hostmaster&drrath.com +17495 + Gardel Software + Jim Garvin + jgarvin&gardelsoftware.com +17496 + Canadian Broadcasting Corporation + Ben Hall + ben_hall&cbc.ca +17497 + Tallinn School No. 21 + Ivari Horm + admin&21k.ee +17498 + Kommunale Datenverarbeitung Oldenburg + Ingo Luers + luers&kdo.de +17499 + Packetsurge + Mr Joel Fox + joel.fox&packetsurge.it +17500 + Georgia Gwinnett College + Brian MacLeod + bmacleod&ggc.usg.edu +17501 + Drake University + Chris Mielke + chris.mielke&drake.edu +17502 + Newfield Automation + Julian Meacham + julian.meacham&newfieldautomation.com +17503 + TechForge Software + Doug Davis + dougied&attcanada.ca +17504 + Security Wizard + Heejune Kim + drost&securewiz.net +17505 + Wamin Optocomm Mfg.corporation + Huaimin.Huang + brian&mail.wamin.com.tw +17506 + Eulerian Technologies, Sarl + Guillaume FOUGNIES + guillaume&eulerian.com +17507 + omnidata + EL AMMARIA Mohammed + melammaria&omnidata.co.ma +17508 + Sotrange Transportes Rodoviários Ltda. + Júnior + junior&sotrange.com.br +17509 + Versatel Networks Inc. + Daniel Biage + dbiage&versatelnetworks.com +17510 + GoldenHope + GH + zhongyu&mail.goldenhope.com.cn +17511 + XtremeX Corporation + Darnel Hunter + support&xtremex.com +17512 + Flinders University + Paul Gardner-Stephen + paul&infoeng.flinders.edu.au +17513 + RVA + Daniel De Baerdemaeker + ddebaerd&rva.fgov.be +17514 + FDF, Frivilligt Drenge- og Pige-Forbund + Thor A. Lange + lange&FDF.dk +17515 + BNSF Railway Inc. + Greg Britz + gregory.britz&bnsf.com +17516 + Nighthawk Radiology Services + Jordan K + ldap&jnet.eu.org +17517 + Clementi Software + Steve Clementi + sclementi2&wi.rr.com +17518 + Comba Telecom System(Guang Zhou) Ltd. + Zhong Han,Su + su_zh&tom.com +17519 + InteliTrac, Inc. + Tianlong Chen + tchen&intelitrac.com +17520 + Easton Technologies Inc. + Miroslav Krajca + miroslav&miroslavkrajca.com +17521 + RealEast Networks + Ildar Gabdulline + ildar&realeastnetworks.com +17522 + Ozitem + Erik Ableson + erik.ableson&ozitem.com +17523 + FiberLan GmbH + Norbert R. Schmidt + n.schmidt&fiberlan.de +17524 + Ferrari electroni AG + Gido Kuechler + kuechler&ferrari-electronic.de +17525 + Amsterdam Internet Exchange B.V. + AMS-IX NOC + noc&ams-ix.net +17526 + symentis GmbH + Schema Administrator + info&symentis.com +17527 + FORMWARE GmbH + Christian Lammel + lammel&formware.de +17528 + KMS Internet Solution Providers Ltd + Hosted Services Team + serveradmin&kms.co.uk +17529 + FeldTech + Peter Feldbaumer + tech&feldtech.com +17530 + Chernigov State Technological University + Andrey Khiznyak + alf&cs.stu.cn.ua +17531 + Henrico County Public Schools + Christopher Nielsen + csnielsen&henrico.k12.va.us +17532 + BSI Informatica Ltda + Roberto Mamoro Hagihara + hagihara&bsi.com.br +17533 + Association KAZAR + Xavier Beaudouin + kiwi&kazar.net +17534 + Optelecom Inc. + Anunoy Ghosh + aghosh&optelecom.com +17535 + EGT Inc. + Chris Gordon + chris&egtinc.com +17536 + Telenity, Inc. + Ilhan Bagoren + ilhanb&telenity.com +17537 + Florence Public School District One + William Freeman + wfreeman&fsd1.org +17538 + Village Grille Family Diner + Richard Eric McCoy + rick_mccoy500&yahoo.com +17539 + MXC Software + Mike Chen + mchen&mxcsoft.com +17540 + TB-Solutions Advanced Technologies S.L. + Santiago Navarro + navarros&tb-solutions.com +17541 + Lamda Inc. + Chatziandreoglou Christos + lamda&xan.forthnet.gr +17542 + TFS Technology Inc + Hakan Westin + hakan.westin&tfstech.com +17543 + ADUNEO + Emmanuel KURTH + emmanuel.kurth&aduneo.com +17544 + BSP Partners GmbH + Michael Keller + Michael.keller&bspartners.de +17545 + Wirefree Innovision pvt ltd + Viswaranjan Maligi + ranjan&bplitl.com +17546 + Magno Comunicacoes Ltda. + Tiago Ribeiro + tiagor&mac.com +17547 + Centre for Good Governance + NBN Ramesh + svln&cgg.gov.in +17548 + Air2Web, Inc. + Jonathan Newton + jnewton&air2web.com +17549 + LogicLibrary Inc. + Mark Osteraas + ops&logiclibrary.com +17550 + True Blade Systems, Inc. + Eric V. Smith + postmaster&trueblade.com +17551 + Kmart Corporation + Debra Bald + ewts&kmart.com +17552 + NKI/AvL + DA Technisch Beheer + da.tb.users&nki.nl +17553 + Telefonica Data Colombia S.A. + Rodrigo Carrillo Vásquez. + rodrigo.carrillo&telefonica.com.co +17554 + PODi - the Digital Printing initiative + Dave deBronkart + DeveloperSupport&podi.org +17555 + Webster University + Webster University Network Operations + noc&webster.edu +17556 + MAXINA GmbH + Jordan Hrycaj + jordan&maxina.de +17557 + Tokyo Woman's Christian University + TANAKA Satoshi + tanaka&twcu.ac.jp +17558 + Moveo Systemy Teleinformatyczne + Marek Gutkowski + iana&moveo.pl +17559 + The Art Institute of Chicago + Raphael Jaffey + rjaffey&artic.edu +17560 + NETFORYOU CO.,LTD. + Sung Il, Hong + sihong&nforyou.com +17561 + CLink + Lei Yang + coolinside&vip.sina.com +17562 + Expect Technology + Rajabhau Potbhare + rajabhau_mp&hotmail.com +17563 + ENCODE SA + D Petropoulos + d.petropoulos&encode-sec.com +17564 + www.portocervo.cc + fabrizio scardovi + fabrizio_scardoviz&hotmail.com +17565 + Rural Servicios Informáticos S. C. + Andrés Hernández Romero + andres_hernandez_rsi&cajarural.com +17566 + EFG EUROBANK ERGASIAS A.E. + Raptopoulos Dimitrios + draptopoulos&eurobank.gr +17567 + Certimail SA + Jerome DECOURBE + jerome.decourbe&francetelecom.com +17568 + SLAT SA + Stephane BOCH + stephane.boch&slat.fr +17569 + Schranzhofer DIAS + Schranzhofer Andreas + dias&schranzhofer.net +17570 + IFS audiovisuals + Bjorn Padding + ldapmaster&ifsaudiovisueel.nl +17571 + Glensound Electronics Ltd. + Paul Grant + paul&glensound.co.uk +17572 + Mainframe Oy + Leo Erkkil + leo.erkkila&mainframe.fi +17573 + Secretaria das Finanças do Estado da Paraíba + Adrivagner Dantas + adantas&sefin.pb.gov.br +17574 + Oxford Microsystems + Kevin Short + kshort&oxmicro.com +17575 + Lebenshilfe Niederoesterreich + Hans-Peter Bernhard + h.p.bernhard&ieee.org +17576 + mahalingam & co. + Ragho Mahalingam + ragho&mahalingam.com +17577 + Ralf Bisges Informationstechnik + Ralf Bisges + R.Bisges&T-Online.de +17578 + Berlitz International, Inc. + Mr. Sean L. Power + sean.power&berlitz.com +17579 + Groove Media + Carlos Chaparro + carlos.chaparro&groove.com.co +17580 + Telefonica Data Colombia S.A. + Rodrigo Carrillo Vásquez. + rodrigo.carrillo&telefonica.com.co +17581 + Hofmann Software Engineering International + Kai Hofmann + hofmann&hofmann-int.de +17582 + Loran Technologie + Network Admin + administrator&lorantech.com +17583 + Information Technologies Australia Pty Ltd + Simon Uren + simon&itaus.com.au +17584 + United Network Communication Corp. + Qi Juanjuan + qijj&unc.com.cn +17585 + Compusys + High Performance Computing + hpc&compusys.co.uk +17586 + BANDAI NETWORKS CO., LTD. + Yoichi Nakatake + nakatake&net.bandai.co.jp +17587 + Laserbit Communications Corporation + Bonyhadi Daniel + bdaniel&laserbitcommunications.com +17588 + GLOBAL ELECTRICITY CO., LTD. + Seung Ho, Kim + global76&kornet.net +17589 + AMEC SPIE Communications + Gilles Grangier + g.grangier&amecspie.com +17590 + Orbis Terrarum Networks + Robin Johnson + robbat2&orbis-terrarum.net +17591 + The Defender Association + Nathan Sandver + nathan&defender.org +17592 + Javacise Software + Jeff Rogers + jrogers&javacise.com +17593 + eath Co.,Ltd. + KIKUCHI Kousuke + kousuke&eath.co.jp +17594 + Simitel S. de R.L. de C.V. + Serge Kruppa + serge.kruppa&simitel.com +17595 + SEU + WuXiaoBin + wxb&seu.edu.cn +17596 + DigiOnline GmbH + Thomas Hebinck + thomas.hebinck&digionline.de +17597 + IntraData BV + Roel de Regt + regt&intradata.nl +17598 + Jouve SI + Loïc Duruel + lduruel&rennes.jouve.fr +17599 + Northern Alberta Institute of Technology + Daryl Allenby + daryla&nait.ab.ca +17600 + CHUQ + Yvan Fournier + yvan.fournier&chuq.qc.ca +17601 + Group One Trading, LP + Alex Klein + alex.klein&group1.com +17602 + Midnight Linux + James O'Kane + jo2y&midnightlinux.com +17603 + surespeed + Michael Van Aken + actionzero&usa.com +17604 + ScySoft Multimedia + Sebastian Mergelmeyer + techsupport&scysoft.de +17605 + Jens Bräuer + Jens Bräuer + jensb&cs.tu-berlin.de +17606 + helloWorld Inc. + PV Subramanian + pvs&helloworld-inc.com +17607 + ReDat Recording + Radek Svestka + rsvestka&redat.cz +17608 + GIE AGORA + Alexandre GAREL + garel.alexandre&agora.msa.fr +17609 + AZ Mercury Ltd. + Andrey Sharandakov + ash&azm.uz.ua +17610 + Sadhu Software Services Pvt. Ltd. + Dr. SVL Narasimham + svln_jntu&yahoo.com +17611 + eXc Software, LLC + Norm Freedman + normfree&san.rr.com +17612 + Groupe Caisse d'Epargne + Fabrice LAGET + fabrice.laget&cnce.caisse-epargne.fr +17613 + S-Access GMBH + Daniel Dällenbach + s-access&dplanet.ch +17614 + Queensland Parallel Supercomputing Foundation (QPSF) + Rajesh K. Chhabra + r.chhabra&qpsf.edu.au +17615 + Ashway Software + Andrey Sharandakov + ash&azm.uz.ua +17616 + Exclusive Flor Sales Ltd. + Miles K. Forrest + miles&promicro.ca +17617 + MessageGate, Inc. + Mark Richardson + markr&messagegate.com +17618 + Ziniti.Net + John Ziniti + jziniti&speakeasy.org +17619 + J Enterprise BV + A. S. Manzoor + suhail&planet.nl +17620 + Mould Industria de Matrizes Ltda + Carlos A. Mendel + cmendel&mould.com.br +17621 + Lightwave Access, LLC + Keith Goettert, CIO + Keith&LightwaveAccess.net +17622 + OpenServe + Bastian Winkler + b.winkler&techdivision.com +17623 + Millersville University + Mike Dulay + michael.dulay&millersville.edu +17624 + Scouting Nederland + Team Internet (Eelco Maljaars) + support&ti.scouting.nl +17625 + Brightline Technology, Inc. + James Wilson + jwilson&brightline.com +17626 + Phoenix Business Technologies Group, Inc. + Sean Hannan + sean&pbtg.com +17627 + Will Systems + Tamotsu Kanoh + kanoh&kanoh.org +17628 + Mount Hay Technology + Matt Johns + matt.johns&haytech.com.au +17629 + NISHIMU ELECTRONICS INDUSTRIES CO., LTD. + Mikihiro Jyono + mjyouno&nishimu.co.jp +17630 + Anana Ltd + Roger Parsons + roger.parsons&anana.com +17631 + Oral Diagnostic Systems + Paul F. van der Stelt + p.vdstelt&acta.nl +17632 + TPISACOM + Ming-En Chen + me.chen&ever.com.tw +17633 + Phoenix Color Corporation + John Thorhauer + jthorhauer&phoenixcolor.com +17634 + AGB UK Limited + Zoran Vasiljevic + zoran.vasiljevic&atruk.co.uk +17635 + DragonTail Online Security + David Sumner + david&dragontail.com +17636 + Novahead, Inc. + Burk Price + bprice&novahead.com +17637 + fudo.org + Peter Selby + war&fudo.org +17638 + JAJOK Ltd. + Jack Zhou + zhoukun1212&tom.com +17639 + Abbeynet Srl + Luca Filigheddu + roberto.camba&abbeynet.it +17640 + Reifenberger + Michael Reifenberger + Michael&Reifenberger.com +17641 + Web Publications Pty Ltd + Ian Slinger + sysadmin&webpublications.com.au +17642 + PrintSoft Czech Republic + Slavomir Pagerka + pagerka&printsoft.cz +17643 + Shanghai Neotune Information Technology Co.,Ltd. + Tao Yu + sean.yu&neotune-co.com +17644 + Next Generation Broadband, LLC + Bruce Harrison + bruce&ngb.biz +17645 + CBCA, Inc. + Jeremy Archer + jarcher&cbca.com +17646 + Contact Energy Ltd + Lincoln Mackay + lincoln.mackay&contact-energy.co.nz +17647 + NTT Innovation Institute, Inc. (formerly 'NTT Multimedia Communications Laboratories, Inc.') + Eugene M. Kim + eugene.kim&ntti3.com +17648 + Amat-SC, Service & Consulting + Ramon Amat + ra&amat-sc.ch +17649 + PUMA AG + Christian Burger + java-developer&puma.com +17650 + ITSOFT + Piotr Zieliñski + pz&euroticket.pl +17651 + necron-X + Stephan Seitz + sseitz&kist.necron-x.net +17652 + McKesson IS France + Jacques Bretagnolle + dpt_tech&mckesson.fr +17653 + Moot Hall Holdings Limited + Mark Berger + iana_sub&moothall.co.nz +17654 + Sheer Networks, Ltd. + Eli Hadad + eli_hadad&sheernetworks.com +17655 + Epsys + Hermans, Jeroen + j.hermans&epsys.nl +17656 + Tohoku Ricoh Co., Ltd. + Chiharu Toyoshima + toyo&tohoku.grp.ricoh.co.jp +17657 + @tp-net e. K. + Andreas Danner + iana.registrierung&atp-net.de +17658 + Thinklogical, Inc (formerly 'Logical Solutions Inc.') + Bill Feero + support&thinklogical.com +17659 + The Lutheran University Association, Inc (d/b/a Valparaiso University) + J Michael Yohe + OID.Admin&valpo.edu +17660 + AStA Universitaet Potsdam + Sven Friedrich + computer&asta.uni-potsdam.de +17661 + Lincoln National Corporation + Toby Gaff + domainservicealerts&lfg.com +17662 + Lucent EBS + Chris Ihling + ihlingc&lucent.com +17663 + I.C.S. Industrielle Automatisierungssysteme GmbH + Dieter W. Kuske + snmp&ics-consult.de +17664 + Watkins Contracting + Sean Noonan + noonans&watkinscontracting.com +17665 + IMAKE Software & Services + Jim McCloskey + jim.mccloskey&imake.com +17666 + New Jersey Hgiher Education Student Assistance Authority + Simon Lee + slee&hesaa.org +17667 + PCT-International, Inc. + Jon-En Wang, Director Optic Engineering + jwang&pctusa.net +17668 + American Medical Security + Daniel Bartlett + daniel.bartlett&eams.com +17669 + VSL Catena + Sjors Robroek + sjors&vslcatena.nl +17670 + Celliance Sarl. + Hugues Obolonsky + h.obolonsky&celliance.com +17671 + PT. Mugi Rekso Abadi (Holding) + Muhammad Reza + reza&mra.co.id +17672 + MACH Software, Inc. + Alex Chen + alex_chen&sbcglobal.net +17673 + CERT-ONERA + Guy ZANON + guy.zanon&cert.fr +17674 + VT100 + Rubin Simons + info&vt100.nl +17675 + Hill McGlynn & Associates Ltd + IT Manager + itmanager&hillmcglynn.com +17676 + nextevolution AG + Hayo Schmidt + hs&nextevolution.de +17677 + Tradition Financial Services Ltd + Mr Rob Waddell + robert.waddell&tfs-ln.co.uk +17678 + Open X Tecnologia Ltda + Renato Arrudas Ornelas + renato&openx.com.br +17679 + Chapter Eight Ltd + Joelle Nebbe-Mornod + sysadmin&chaptereight.com +17680 + Dialup USA, Inc. + Gabriel Cain + gabriel&dialupusa.net +17681 + Johns Hopkins Bloomberg School of Public Health Center for Communication Programs + Kevin Zembower + isgalert&jhuccp.org +17682 + BNX Systems Corporation + Brian Mizelle + bmizelle&bnx.com +17683 + Davis Polk & Wardwell + Bin Zhao + bzhao&dpw.com +17684 + Xi Software Ltd + John Collins + jmc&xisl.com +17685 + Dunes Technologies SA + Stefan Hochuli + iana.org&dunes.ch +17686 + BF&M Insurance Group + Nick Faries + nfaries&bfm.bm +17687 + Teamwork Solutions, Inc. + Michael A Weber + mweber&teamsol.com +17688 + O'Farrell Community School + Brian F. Opitz + briano&ofarrell-mail.sandi.sandi.net +17689 + TJM Computing + Tim McArthur + tim&tjmcomp.co.za +17690 + The University Of Newcastle + Bill Pascoe + bill.pascoe&newcastle.edu.au +17691 + i-Net Device Co.Ltd. + Toru Kawata + kawata&i-netd.co.jp +17692 + Torkild Lømo + Torkild Lømo + tl01&online.no +17693 + Insitut de Recherche en Communication et Cybernétique de Nantes (IRCCyN) + Denis CREUSOT + Denis.Creusot&irccyn.ec-nantes.fr +17694 + Elekta + Adam Paisley + hostmaster&elekta.com +17695 + COM.BOX WINET GmbH & Co. KG + Torsten Luettgert + t.luettgert&combox.de +17696 + GAD IT fuer Banken eG + Johann Prahm + Johann.Prahm&GAD.de +17697 + Sanderson Townend & Gilbert Ltd + Les Bessant + lesb&sandersons.com +17698 + John Lewis PLC + Network Technical Team + ntt_hitperson&johnlewis.co.uk +17699 + Japan Control Engineering, Co., Ltd. + Mugen Kawazu + m_kawazu&nihon-seigyo.co.jp +17700 + PowerWallz Network Security Inc. + Alan Chiu + achiu&powerwallz.com +17701 + BAE Systems, CNIR + Ben Kimes + george.kimes&baesystems.com +17702 + SmartPants Media, Inc. + Mark Jeweler + mark&smartpants.com +17703 + Rudjer Boskovic Institute + Matej Vela + mvela&irb.hr +17704 + Critical Telecom Corp. + Ian Meier + imeier&criticaltelecom.com +17705 + Sovelto Oy + Sakari Kouti + sakari.kouti&sovelto.fi +17706 + Stefan Kidery + Stefan Kidery + stefan&kidery.net +17707 + The Meme Factory, Inc + Karl O. Pinc + kop&meme.com +17708 + sysmango.com + Allen Lebo + allen.lebo&sysmango.com +17709 + Shanghai Telecommnunications Technological Research Institute + xue lihong + xuelh&sttri.com.cn +17710 + michael j van ham + Michael Van Ham + mjvh&lords.com +17711 + Aisai communication technology CO.,LTD + Junce Ma + junce-ma&aisaicom.com +17712 + TB-Solutions Technologies Software S.L. + Miguel Angel Sarasa Lopez + sarasam&tb-solutions.com +17713 + Cambium Networks Limited (formerly 'PipingHot Networks Limited') + Antony Holmes + antony.holmes&cambiumnetworks.com +17714 + ASM Enterprises + Mark Jeweler + mark&asmenterprises.com +17715 + Mitsukoshi Infomation Service Co.,Ltd. + system tantou + jyousys&mitsukoshi.co.jp +17716 + Georgia Tech Foundation + Scott Owens + scott.owens>f.gatech.edu +17717 + Escription, Inc. + Kenneth Valentine + ken&escription.com +17718 + Vanu, Inc. + Jonathan Santos + jrsantos&vanu.com +17719 + Noldata Ltd. + Gustavo Lozano + glozano&noldata.com +17720 + Prompt + Ruud Baart + r.j.baart&prompt.nl +17721 + AGL Resources Inc. + Grady V. Boggs + gvboggs&aglresources.com +17722 + STEAG GmbH + Bernhard Koss + bernhard.koss&steag.com +17723 + Softel SA de CV + Fernando Bracho + fbracho&softel.net.mx +17724 + SinoCDN Ltd. + System Administrator + sys-admin&sinocdn.com +17725 + HW Termination Union + liu chenglong + liuchenglong&huawei.com +17726 + Adva Technologies Ltd. + Andrey Lebedev + andrey.lebedev&advascan.com +17727 + Wiplug + Pablo Brenner + pablo&wiplug.com +17728 + AboveCable, Inc. + YiLin Xu + ylxu&abovecable.com.cn +17729 + SPUeNTRUP Software + Kai Henningsen + support&cats.ms +17730 + RedDot Solutions AG + Inka Erdwiens + inka.erdwiens&reddot.de +17731 + CQS Technology Holdings (Pty) Ltd + Jake Shepherd + jake&cqsgroup.com +17732 + SNA lab,National Dong-Hwa university ,Hualien,Taiwan + Chen, Kun-Nan + m9121039&em91.ndhu.edu.tw +17733 + Delaware State University + Bruce Pawlecki - Siemens + bruce.pawlecki&icn.siemens.com +17734 + OC Systems, Inc. + Vasya Gorshkov + vvg&ocsystems.com +17735 + Quark Communications, Inc. + Adam Guzik + aguzik&quarkcommunications.us +17736 + Trading Metrics, Inc. + Jeff Drew + jeffdrew&tradingmetrics.com +17737 + Tablus, Inc. + Aaron Reizes + aaron&tablus.com +17738 + Tracey Bernath + Tracey Bernath + tbernath&ix.netcom.com +17739 + Embedded Technologies s.r.o. + Dusan Ferbas + dferbas&etech.cz +17740 + iSecurity Technology Limited + Terence Lee + terence&cs.ust.hk +17741 + Open Enterprise Systems, Ltd. + Jason Smith + jhs&oes.co.th +17742 + Rootsr + Simon de Hartog + simon.iana&rootsr.com +17743 + M-Web Business Solutions + Warren Baker + warren&mweb.com +17744 + Dubex A/S + Sune B. Jørgensen + sbj&dubex.dk +17745 + Huntek Co., Ltd. + Xiaozhi Zhang + micblues&huntek.com +17746 + By Intertrade + Justo M. Garcia + justo&byintertrade.com +17747 + Nexagent Ltd. + Ian Corrie + icorrie&hpe.com +17748 + Card Systems Consulting SRL + Lucian Ungurean + Lucian.Ungurean&orga.ro +17749 + TGC Ltd + Ian Norman + ian&t-g-c.net +17750 + iTechnology GmbH + Sascha Teske + iana&itechnology.de +17751 + Radio Systems Ltd. + Sergey Shumov + shs&rs.net.ua +17752 + Instituto Atlântico + Victoria Matos + victoria&atlantico.com.br +17753 + MetroHealth Medical Center + David H. Johnson + monorail&cwru.edu +17754 + Alexander Pilger Consulting + Alexander Pilger + alex&cyber-defense.de +17755 + Lane Department of Computer Science and Electrical Engineering, West Virginia University + David Krovich + dkrovich&csee.wvu.edu +17756 + science + computing ag + Mitchel Timm + sc-iana&science-computing.de +17757 + Orthopedic Healthcare Northwest, P.C. + Network Administrator + postmaster&ohn.com +17758 + Trematon UK Ltd + Colin A. White + colin&orbit8.com +17759 + Synctomi, Inc. + Andrew Almquist + aalmquist&synctomi.com +17760 + Identify + Iftah Bratspiess + iftah&web-silicon.com +17761 + WiredPlace + Nicola Giacobbe + wiredplacegiacobbe&hotmail.com +17762 + Covert Security Services Ltd + Warren Mann + covertnet&onetel.com +17763 + Teske Netzwerkberatung + Sascha Teske + iana&slaxor.de +17764 + Ditelco, LLC + Philip Decker + iana_pen&ditelco.com +17765 + Tessna Private Limited + S. Mukund + mukund&tessna.com +17766 + PGP Corporation + Jon Callas + jon&pgp.com +17767 + Schruth, LLC + Cory Schruth + cory&schruth.com +17768 + Ecobyte Limited + Nic Rutterford + Nic.Rutterford&Ecobyte.co.uk +17769 + Werft22 AG + Andreas Trottmann + andreas.trottmann&werft22.com +17770 + Cia Metropolitano de São Paulo - Metrô + Jair Ribeiro de Souza + jrsouza&metrosp.com.br +17771 + Lingard Knowledge Software Engineering + Jonathan Lingard + jomajeliianaorg&lingard.com +17772 + Ironico Pty Ltd + Geoff Hill + geoff.hill&ironico.com.au +17773 + NEXCOM International Co.,LTD + Steven Wu + stevenwu&nexcom.com.tw +17774 + Smart Computer Systems Ltd + Rodney Walker + rodney&smartcom.co.nz +17775 + INTOREL + Srdjan Mijanovic + msergeo&intorel.com +17776 + SIN solution + Emanuel Kraus + ekraus&5hexen.de +17777 + JaserNet + Jeremias Mueller + jeremias&jeremi-ass.de +17778 + nic.at Internet Verwaltungs- und Betriebsgesellschaft m. b. H. + Michael Braunoeder + mib&nic.at +17779 + Econtec GmbH + Bernd Probst + bprobst&econtec.de +17780 + Truman Medical Centers, Inc. + Greg Wyman + greg.wyman&tmcmed.org +17781 + University of Vale do Itajai - CES Sao Jose + Marcelo Maia Sobral + sobral&sj.univali.br +17782 + Michael Parker + Michael Parker + MichaelParker&gmx.de +17783 + SeTel S.p.a. + Nicola Giacobbe + setelgiacobbe&hotmail.com +17784 + Mitsubishi Electric Business Systems Co.,Ltd. + Yasushi Yoshida + yyoshida&melb.co.jp +17785 + BONNA SABLA SA + LABARILE Vitantonio + exploit&bonnasabla.com +17786 + Frankendata GmbH & Co.KG + Abdelhakim Mahraoui + abdelhakim.mahraoui&frankendata.de +17787 + tschaufrei + Markus Frei + m_frei&swissonline.ch +17788 + EDAG GmbH & Co. KGaA + Markus Gruenkorn + nwadmin&edag.de +17789 + sitharus.com internet services + Phillip Hutchings + me&sitharus.com +17790 + IPAD Owners Association + Lynn W. Taylor + lynn&ipadowners.net +17791 + Turoks.Net + Glen F. Marshall + turok&turoks.net +17792 + Billing Concepts, Inc. + Ashley Neal Hornbeck + ashley.hornbeck&billingconcepts.com +17793 + Hong Kong Broadband Network Ltd. + Elhon Fung + elhon&hkbn.com.hk +17794 + EXARTECH International Corp. + Lin Chin-Yin + louis&exartech.com.tw +17795 + Echo Communications + John Freeman + john&echonet.com +17796 + EK Technology, Ltd. + Eskay Lee + eskay&ektechnology.com +17797 + PrimeScale AG + Juergen Maihoefner + juergen.maihoefner&primescale.net +17798 + Beijing Lingtu Software Co., LTD + HuangSong + huangsong&lingtu.com +17799 + Eurovast Ltd. + Richard Hoberman + thatrichard&blueyonder.co.uk +17800 + Telephone Company Chereda + Andriy Berestovskyy + info&chereda.net +17801 + tarent GmbH + Philipp Kirchner + p.kirchner&tarent.de +17802 + Egyptian Telephone Company + Sameh Youssef + syoussef&egyptiantelephone.com +17803 + iTechnology GmbH + Sascha Teske + iana&itechnology.de +17804 + Towers Perrin + Andy MacDonald + andy.macdonald&towers.com +17805 + Radio UNiCC e.V. + Daniel Graupner + daniel.graupner&informatik.tu-chemnitz.de +17806 + isw + Voorzitter + iana&isw.student.khleuven.be +17807 + WELLX TELECOM S.A. + Charles OZANNE + charles.ozanne&wellx.com +17808 + ACTCOM - Active Communication Ltd. + Serge Talankine + actobi&actcom.co.il +17809 + civil information security organisation + george gritsai + gritsai&hotbox.ru +17810 + GIE GETIMA + CHOCHON JACQUES + chochon.jacques&getima.fr +17811 + Anixe Systems GmbH + Joern Heissler + j.heissler&anixe.de +17812 + TLS-Technologie sp. z o.o. + Robert Maron + robmar&tls.pl +17813 + iVAST Inc. + Marios Stylianou + mstylianou&ivast.com +17814 + Boolean Systems, Inc. + Paul Sullivan + paul&booleansystems.com +17815 + Pollard Banknote Ltd. + John Schulz + jschulz&pbl.ca +17816 + Federated Systems Group + Alfredo J. Horta + alfredo.horta&fds.com +17817 + Fuji Research Institute Corporation + TACHIBANA Shintaro + mibadmin&fuji-ric.co.jp +17818 + SAPPORO BREWERIES LTD. + Toyohiko Terada + Toyohiko.Terada&sapporobeer.co.jp +17819 + GIP RENATER + Dany Vandromme + vandrome&renater.fr +17820 + Foresttek Networks Limited + Kenneth Chan + kenneth&foresttek.com +17821 + Tempest spol. s r.o. + Ing. Peter Magdina + peter_magdina&tempest.sk +17822 + Easytouch Robert Redl KG (formerly 'EASYTOUCH Robert Divoky KEG') + Robert Redl + robert&robertredl.com +17823 + Arnes - Academic and Research Network of Slovenia + Rok Pape¾ + aaa-podpora&arnes.si +17824 + Hisense Information Technology Ltd. + Jiao HaiBo + dragon_j&21cn.com +17825 + BEconnectIT GmbH + Rene Lange + office&beconnectit.de +17826 + ZerBit S.L. + Juan Carlos + zerbit&terra.es +17827 + Infostar Technology + Zhong Yu + zhong.yu&telia.com +17828 + MetaTV Inc. + David de Andrade + daviddeandrade&metatv.com +17829 + IDN Communication System Co.,Ltd + Zhongping Zhou + zhpzhou&idncn.com +17830 + SYRUS SYSTEMS + Moiseev Segrey + moiseev&syrus.ru +17831 + TwinCats Production sprl + X. Bogaert + info&twincats.be +17832 + Alphyra Nordic AB + Zhong Yu + zhong.yu&alphyra.se +17833 + All for One Systemhaus AG + Jochen Renner + jrenner&all-for-one.de +17834 + C.N.S. Systems AB + Mattias Blomqvist + mattias.blomqvist&cns.se +17835 + Magyar Telekom Nyrt. (formerly 'MATÁV Rt.') + Zoltan Domokos + domokos.zoltan&telekom.hu +17836 + Norbert Zänker & Kollegen + Hannes Schmidt + mail&hannesschmidt.de +17837 + Experimental Factory of Scientific Engineering + Novikov Oleg + oleg&ezan.ac.ru +17838 + JunCon + Armin Junginger + Junginger&web.de +17839 + Secureseal Systems Ltd. + kodjo baah + kodjo_baah&hotmail.com +17840 + KOITO INDUSTRIES, LIMITED + takayuki araki + takayuki_araki&koito-ind.co.jp +17841 + bombini.net + René Clavaux + rc&bombini.net +17842 + Ipsum Networks, Inc. + Raju Rajan + raju&ipsumnetworks.com +17843 + Concerto Software + Omar Dominguez + odominguez&concerto.com +17844 + Orion Associates + Scot Ridgway + sridgway&orionassociates.com +17845 + GeniServer Corporation + James Li + james.li&geniserver.com +17846 + Ocamar Technologies,Inc. + Tod Kang + tod.kang&ocamar.com +17847 + Mermit Business Applications Oy + Matti Kokkola + Matti.Kokkola&mermit.fi +17848 + Qamcom Research & Technology AB + Morten Mortensen + it&qamcom.se +17849 + Neuroimage Nord + Mathias Pietsch + m.pietsch&uke.uni-hamburg.de +17850 + Groupe Danone + Nadir Belarbi + nadir.belarbi&danone.com +17851 + SPRINGWAVE,Inc. + Minwoo Kim + minu&springwave.co.kr +17852 + The ProFTPD Project + TJ Saunders + tj&castaglia.org +17853 + Heilpraktikerschule Luzern Ltd. + Pascal Eugster + peugster&hpsl.ch +17854 + Twenty First Century Communications (TFCC) + Carey Hall + carey.hall&tfcci.com +17855 + zipRealty, Inc. + Derek Suzuki + dsuzuki&ziprealty.com +17856 + Titan.ium Platform, LLC + David Jackson + djackson&titaniumplatform.com +17857 + Tiscali France + Jerome Fleury + jerome.fleury&fr.tiscali.com +17858 + sonik.org + SASAKI Suguru + admin&sonik.org +17859 + Septier Communications Ltd. + Uri Savoeay + info&septier.com +17860 + Alembic, Inc. + Ron Wickersham + rjw&alembic.com +17861 + Twise Labo Inc. + Masayuki Yamai + ymi&twise.co.jp +17862 + Kelly Martin + Kelly Martin + kmartin&pyrzqxgl.org +17863 + dotOpen Pty Ltd + Warrick Zedi + warrick.zedi&dotopen.com.au +17864 + Fudan GrandHorizon Information Technology, Inc. + Liao Jian + jliao&guanghua.sh.cn +17865 + eValley.VS + Jaehyup, Cho + jhcho&evalleyvs.com +17866 + GPS Applications, Inc. + Andrew Zenk + andrew&overeducated.com +17867 + C&M Fine Pack, Inc. + Jerry Richards + jerry.richards&cmfinepack.com +17868 + Codersnetwork.co.uk + Matt Hampton + iana.org&codersnetwork.co.uk +17869 + etherstack.com + Jeremy Davies + jeremy.daviesðerstack.com +17870 + Bradford Technology Limited + Andrew Hall + andyh&btl.com +17871 + NexGen City + Kevin Farley + kfarley&nexgencity.com +17872 + iDOC K.K. + Kazma Sonoda + ksonoda&idoc.co.jp +17873 + Gcom, Inc + David Grothe + dave&gcom.com +17874 + Michael F. Doyle + Michael Doyle + doymi01&gmail.com +17875 + Macromedia, Inc. + Michael Melnicki + mmelnick¯omedia.com +17876 + Factline Webservices GmbH. + Klaus Ita + klaus&worstofall.com +17877 + Unassigned + Returned 2003-08-21 + ---none--- +17878 + Journalistic, Inc. + Jason Purdy + admin&journalistic.com +17879 + Ask Software Corporation + Jyoti Das + jyoti&asksoftware.com +17880 + Robert Kurtys + Robert Kurtys + bob&glog.pl +17881 + Profil Kft + Szilagyi Attila + szilagyi.attila&profilkft.hu +17882 + Costco Wholesale Corp. + Kevin Hoffman + khoffman&costco.com +17883 + CSS Versicherung + Bruno Rohrer + bruno.rohrer&css.ch +17884 + Rocketseed + Simon Ratcliffe + sratcliffe&rocketseed.com +17885 + Agarwal Associates Ltd + Alok Agarwal + alok.agarwal&nch.it +17886 + bross ag + Oliver Nispel + oliver.nispel&bross-ag.de +17887 + Realtime Systems Limited + Mr Hemant Chadha + hchadha&rtsindia.com +17888 + Xor-Technologies + Moshe Zaltzman + moshez&xor-t.com +17889 + Porsche Informatik GmbH + Johannes Grumboeck + johannes.grumboeck&porscheinformatik.at +17890 + JSC "OSS Corporation" + Alexander Mazavin + noc&oss.ru +17891 + MidAmerica Nazarene University + IANA Number Administrator + iana&mnu.edu +17892 + Lumenaré Networks, Inc + Patrick Deloulay + pdeloulay&lumenare.com +17893 + Cellent AG + Dieter Meckelein + Dieter.Meckelein&cellent.de +17894 + Airtria + Thierry Tallagrand + thierry.tallagrand&airtria.com +17895 + Keopsys + Marc LE FLOHIC + thierry.tallagrand&airtria.com +17896 + TelePro C.S + Michael Devitt + mickdevit&aol.com +17897 + QUANTUM RADIONICS CORPORATION + DR. GREGORY M. STONE + gms&quantumradionics.com +17898 + RISS (Regional Information Sharing Systems) + Chad Hendry + chendry&risstech.riss.net +17899 + mIstA Technologies + Michael Stampach + weboffice&inode.at +17900 + Pivetal Ltd + Martin Shaw + martins&pivetal.com +17901 + Koolspan, Inc. + John Keating + jkeating&koolspan.com +17902 + Copeland Corporation + Donald Lee + dwlee&copeland-corp.com +17903 + Grouse Media + Mike Barnes + mike&grousemedia.com +17904 + boros consulting gmbh + Stefan Boros + office&boros-consulting.com +17905 + Aktion Kritischer SchülerInnen Salzburg + Sebastian Arming + sebastian.arming&aks.at +17906 + Folleville + contact&folleville.com + contact&folleville.com +17907 + Kumashiro System Frontier Co., Ltd. + Taoka Fumiyoshi + taoka&ksfnet.co.jp +17908 + Tobias Punke + Tobias Punke + tpunke&web.de +17909 + Zandar Technologies + Louis Ryan + LRyan&zandar.com +17910 + Meridian VAT Processing (International) Ltd + Aoife Kavanagh + aoife.kavanagh&meridianp2p.com +17911 + Mountain Area Information Network + Rusty Holleman + rusty&main.nc.us +17912 + Jugendzentrum Kirchheim + Walter Werther + sysop&juz-kirchheim.de +17913 + MacShed + Anders Hellstrom + anders&macshed.net +17914 + Escribanía de la Fuente + Juan Angel de la Fuente + jadelaf&adinet.com.uy +17915 + Iris Media Ltd. + Vassilis Stathopoulos + hostmaster&irismedia.gr +17916 + Istituto Tecnico Commerciale Cesare Battisti + Cristiano Cumer + cristiano&mmp.it +17917 + Computer-Essence + Gary Taylor + gary&Computer-Essence.com +17918 + Smart Systems Pty. Ltd. + Richard Mullins + rmullins&smartsystems.com.au +17919 + Light Socket, Ltd. + Zoltan Ori + zoltan&ycnx.net +17920 + C-Borne Co.,Ltd. + Yasushi ABE + Yasushi.ABE&C-Borne.CO.JP +17921 + HALLE-DOT-COM Germany + Markus Laube + mlaube&servicestelle.com +17922 + PinPoint Israel Ltd. + Arie Koblenz + arie.koblenz&tppco.com +17923 + Tinext SA + MIB/SNMP + mib.snmp&tinext.net +17924 + RatNet + Jim Maynard + info&ratnet.net +17925 + Dresdner Kleinwort Wasserstein + Haran Rajadeva + GBSGlobalSchemaAdmin&drkw.com +17926 + Banco Nacional de Costa Rica + Marielos Vásquez H. + mvasquez&bncr.fi.cr +17927 + Sana Security, Inc. + Marc Hedlund + snmp-admin&sanasecurity.com +17928 + siikuls incorporated + putkowski + siikuls&bellsouth.net +17929 + Synthetic Networks Inc + Dinkar Chivaluri + dinkar_chivaluri&yahoo.com +17930 + SAGA University + Hirofumi ETO + etoh&cc.saga-u.ac.jp +17931 + Diligent Technologies Corporation + Michael Hirsch + mh&diligent.com +17932 + ZeCo GmbH + Heiko Zehner + Heiko.Zehner&zeco.de +17933 + Compulogic Limited + Bob Morris + bob.morris&compulogic.ltd.uk +17934 + Association of Teachers and Lecturers + Bernard King + bking&atl.org.uk +17935 + Videolarm, Inc. + John Kalter + jkalter&videolarm.com +17936 + Penn State Hershey Medical Center + Jeffrey S Campbell + jscampbell&hmc.psu.edu +17937 + Lollipop Learning Ltd. + Brian Bishop + brian.bishop&lollimail.com +17938 + Unassigned + Removed 2003-08-22 + ---none--- +17939 + Guangdong Province Electronic Technology Research Institute + chenlizhen + youzen&263.net +17940 + The University of Texas at Austin + William Green (ITS-TN) + net-admin&its.utexas.edu +17941 + Web Fabric LLC + Administrator + admin&mssgs.net +17942 + ISN, Inc. D/B/A inSolutions + Aaron Aycock + aaycock&insolutions.net +17943 + Progressix + Marcel v. Birgelen + marcel&progressix.com +17944 + Subrenat Expansion S.A. + Christophe GIRDAL + computing&subrenat.com +17945 + Alpha Telecom Inc. USA + Judy Lee A'Neals + janeals&alpha-tele.com +17946 + Sinema Instruction + Dan Sinema + dansinema&mac.com +17947 + Card Access Services Pty Ltd + Richard Colley + rcolley&cardaccess.com.au +17948 + LV Power (2003) Ltd + Aaron Applebaum + sale&lvpower.net +17949 + i-neda Ltd + Joseph Blackman + jblackman&i-neda.com +17950 + Endian + Raphael Vallazza + raphael&endian.it +17951 + Barnim.Net + Thomas Bez + bez&barnim.net +17952 + Scalent, Inc + Rusty Greer + rusty.greer&scalent.com +17953 + Advanced Clustering Technologies, Inc. + Justin Penney + jpenney&advancedclustering.com +17954 + Voicenet Cyber Cafe + Mr.jignesh prajapati + jignesh1&india.com +17955 + Polytron Corporation + Ralph McIntire + ralph&polytron-corp.com +17956 + NetCare + Kjeld Dunweber + kjd&netcare.dk +17957 + Uniwide Technologies inc. + Sung-il Nam + oncea&uniwide.co.kr +17958 + KAIYO DENSHI KOGYO Co., Ltd. + Manabu Taniguchi + mark&kaiyo-denshi.co.jp +17959 + Accense Technology Inc. + Kohji Osamura + osa&accense.com +17960 + ETRI + Sang Hoon Park + sanghoonnom&hotmail.com +17961 + Luottokunta + Marko Parviainen + marko.parviainen&luottokunta.fi +17962 + Zandan + Jimmy Holzer + jimmy.holzer&zandan.com +17963 + CONI Servizi S.p.A. + Sandro Marrone + smarrone&informatica.coni.it +17964 + Mairie de Pierrefitte-sur-Seine + Thierry Dulieu + tdulieu&easter-eggs.com +17965 + mediaskill OHG + Manuel Rorarius + info&mediaskill.de +17966 + space2go GmbH + Manuel Rorarius + mrorarius&space2go.com +17967 + Qovia, Inc + Jason Sapp + jsapp&qovia.com +17968 + Fortress Systems, Ltd. + Ivan Makfinsky + ivan.makfinsky&fsl.com +17969 + OC4, LLC. + William Malz + will&oc4.com +17970 + jSNMP Enterprises + Jim Pickering + jpickering&jSNMP.com +17971 + Authenti-Corp + Michael Crusoe + michael&authenti-corp.com +17972 + IntraMeta Corporation + Kevin McCarthy + nospam&intrameta.com +17973 + Terravox Technologie Inc + Adam Sherman + adam&tritus.ca +17974 + JWM3, Inc. + James W. Martin III + iana.org&jwm3.com +17975 + World Radio Missionary Fellowship Inc. + Steve Grace + sgrace&hcjb.org +17976 + ISI-ESM + Mike Dooner + mike.dooner&effem.com +17977 + Rokeby Technologies Ltd. + Salvatore Ilardo + ilasa01&linux.rokeby.com +17978 + Elfwerks + Eric Gustafson + ericg-iana&elfwerks.org +17979 + HyperMedia Systems Ltd. + Amnon David + amnon&hyperms.com +17980 + Oliver Huf + Oliver Huf + oli&huf.org +17981 + Interacct Solutions(Australia) + John Muller + john.muller&interacctsolutions.com +17982 + Clickerty-Click Ltd + Leo Paalvast + leo&clickerty-click.com +17983 + OutPostal Corporation + William Malz + will.iana.org&outpostal.com +17984 + SHENZHEN COSHIP SOFTWARE CO.,LTD. + Yuan Song + yuansong&coship.com +17985 + kingsoft + dengzhenbo + dengzhenbo&kingsoft.net +17986 + Systems Union (Shanghai) Ltd. + Aaron Xu + aaron_xu&sunsystems.com +17987 + Vodafone Group Services Limited (formerly 'Cable & Wireless UK') + Howard Johnson + howard.johnson&vodafone.com +17988 + PYLON R.I.S.C. GmbH + Siegmund Gorr + siegmund.gorr&pylon.de +17989 + petujek.net + Petr Burdik + pet&petujek.net +17990 + NEC Electronics Corporation + Sachi Kubota + oid-admin&LSi.nec.co.jp +17991 + Weldun, s.r.o. + Daniel Volar + volar&weldun.sk +17992 + Net Direct Inc. + John Van Ostrand + john&netdirect.ca +17993 + Aware-Networks Ltda + Jorge Andres Torres + jtorres&aware-networks.com +17994 + Mathematical Institute + Keith A. Gillow + gillow&maths.ox.ac.uk +17995 + Asociación de Escribanos del Uruguay + Juan Angel de la Fuente + jadelafuente&aeu.org.uy +17996 + Alien Technology Corporation + David Krull + dkrull&alientechnology.com +17997 + Artis Inc. + Artis Chiang + artisjiang&giga.net.tw +17998 + Vordel Ltd. + Customer Support Manager + support&vordel.com +17999 + Rapsodie + Franck Cornaz + info&rapsodie.fr +18000 + AGRICULTURAL BANK OF GREECE SA + THEODORE KOUMOUTSOS + tkoumoutsos&ate.gr +18001 + University of Cape Town + Craig Balfour + craig&its.uct.ac.za +18002 + Talisen Technologies + Jim Crismon + jcrismon&talisentech.com +18003 + Stichting Wireless Leiden (Foundation under Dutch Law) + Dirk-Willem van Gulik + dirkx&webweaving.org +18004 + Micro Forté Pty Ltd + Gareth Walters + garethw&syd.microforte.com.au +18005 + K.K. Alice + Christians, Stefan Mr. + software&kkalice.com +18006 + cognitas GmbH + Stefan Thums + stefan.thums&cognitas.de +18007 + Phonalyse BVBA + Johan De Witte + johan.dewitte&phonalyse.com +18008 + Fachhochschule Mannheim HS f. Technik u. Gestaltung + Mr. Juergen Bauer + j.bauer&fh-mannheim.de +18009 + Outram Research Ltd + Simon Baldry + Outram_research&compuserve.com +18010 + Eastern Washington University + Michael Flyger + networks&mail.ewu.edu +18011 + Apollo Communications, International + James McNalley + james&apollo-com.net +18012 + connectLOGIC + Kent A. Brown Lee + admin&connectlogic.co.nz +18013 + DBL Technologies, Ltd. + Kevin Cai + cai_ning&yahoo.com +18014 + Department of Electrical & Electronics Engineering + Mookiah Prathaban + mprathap&pdn.ac.lk +18015 + Futarque A/S + Ole Kaas + obk&futarque.com +18016 + JBoss Group, LLC + Scott Stark + scott&jboss.org +18017 + PUBLIC VOICE Lab - International Media Co-op + Rene Pfeiffer + rene&pvl.at +18018 + Northwoods Catholic School + Christopher Voltz + christopher.voltz&northwoodscatholic.org +18019 + On Q Holdings Pty Ltd + Scott Beck + scottb&onq.com.au +18020 + Pacific Retail Group + Mike White + mike.white&prg.co.nz +18021 + Construction Electronique et Telecommunication Internationnal + Jacques Greindl + j.greindl&cet.be +18022 + Covaro Networks, Inc. + Vikas Trehan + vikas&covaro.com +18023 + Tatung Science & Technology, Inc. + James Hwang + Jamesh&tsti.com +18024 + Cinergy Communications Company + Jamie Hill + oid-admin&cinergycom.com +18025 + siWan GmbH & Co. KG + Guido Schüller + OpenLDAP&siWan.de +18026 + Extron Electronics + Brian Taraci + btaraci&extron.com +18027 + Battleaxe Technologies, Inc. + Adam Israel + adam&battleaxe.net +18028 + Battleaxe Technologies, Inc. + Adam Israel + adam&battleaxe.net +18029 + Sonic Foundry, Inc. + Barry Hagan + iana-contact&sonicfoundry.com +18030 + Molecular Therapeutics, Inc. + Jonathan Moody + jonathan&molecularimaging.com +18031 + ATM Telecom + Jae-Won Jeong + jjw&atmtele.com +18032 + matieux.net + Mathieu FRANCOIS + mat&matieux.net +18033 + Viviale Ltd + Jeremy Hilton + jhilton&viviale.com +18034 + FrogNet, Inc. + Operations Group + noc&frognet.net +18035 + Netsolus.com Inc. + Bryan Ballard + ballard&netsolus.com +18036 + United Telecoms Ltd + C V Rao + cvrao&utlindia.com +18037 + Pramati Technologies Pvt Ltd + Srinivasa Rao T + srinivas&pramati.com +18038 + Schotten & Marchart OEG + Andreas W. Schotten + schotten&domainbank.at +18039 + Toyo Radiator Co., Ltd. + Andy Taki + andy&trad.co.jp +18040 + H.T.T.P. Planitis Communications Ltd. + Per Granath + per&planitis.net +18041 + pdv.com Beratungs-GmbH + Stefan Joachim + stefan.joachim&pdvcom.de +18042 + Meteksan Net + Internet Systems Engineering Group + se&meteksan.net.tr +18043 + FinanzIT GmbH + Jens Fricke + jens.fricke&finanzit.com +18044 + Synelec Telecom Multimedia + Synelec R&D - Software Department + rd-soft&synelec.fr +18045 + IMFORM GmbH + Bernd Poerner + poerner&imform.de +18046 + Skellefteå kommun + Patrik Hellgren + patrik.hellgren&kommun.skelleftea.se +18047 + NeoLogica s.r.l. + Marco Sambin + info&neologica.it +18048 + SunGard Futures Systems + Pault Tuffy + sfsianainfo&sungardfutures.com +18049 + Pixel Power Ltd + David Burley + dburley&pixelpower.com +18050 + Enterprise Products Integration Pte Ltd + Edward van Leent + edward&epi-ap.com +18051 + RF Systems Nachrichtentechnik GmbH + Wilhelm Heucke-Scheller + rfsmail&rfsystems.de +18052 + stuvus Studierendenvertretung Universität Stuttgart + Sven Feyerabend + referent-it&stuvus.uni-stuttgart.de +18053 + citecs GmbH + Chris Recktenwald + iana-contact&citecs.de +18054 + Tenovis GmbH & Co. KG + Rainer Krinn + Rainer.Krinn&tenovis.com +18055 + Unassigned + Returned 2003-09-05 + ---none--- +18056 + Pioneer Digital Technologies, Inc. + Glen E. Roe + glen&pioneerdigital.com +18057 + Procondo GmbH + Markus Theißinger + theissinger&procondo.de +18058 + Optimus Inc. + Ken Winke + tex&optimus.com +18059 + B2SCorp + Alsenitd Rausseo + arausseo&b2scorp.com +18060 + The Apache Software Foundation + Alex Karasulu + akarasulu&apache.org +18061 + OCEAN DEVELOPMENT + Stefan Joachim + stefan&ocean-development.de +18062 + Jo Tankers AS + Jan Eide + jan.eide&no.jotankers.com +18063 + Technetix BV (formerly 'Tratec Telecom b.v.') + Gert Bronkhorst + Gert.Bronkhorst&technetix.com +18064 + Marek Walther + Marek Walther + Marek-Walther&gmx.de +18065 + XOU Solutions Limited + Andrew Roden + hostmaster&xousolutions.com +18066 + Planet Lauritsen + Chad S. Lauritsen + noc&planetlauritsen.com +18067 + PowerTech Information Systems AS + Frode Nordahl + frode&powertech.no +18068 + hansmi.ch + Michael Hanselmann + iana&hansmi.ch +18069 + Fulfillment Plus + Jonathan Chelton + jchelton&fulfillmentplus.com +18070 + BTI Photonic Systems + Mark Jamensky + mjamensky&btiphotonics.com +18071 + Cendura Corporation + Marco Framba + framba&cendura.com +18072 + R-MEDIA + Alexander Ruof + a.ruof&r-media.de +18073 + hinegardner.org + Jeremy Hinegardner + jjh-iana&hinegardner.org +18074 + Kass Enterprises LLC + Andrew Smith + asmith&kellc.com +18075 + Combridge Medical Information Corporation + Ziqiang Chen + zchen&cam-med.com +18076 + b1n + Marcio Ribeiro + mmr&b1n.org +18077 + Acasim Technologies, Inc. + Ming Fan + mfan&acasim.com +18078 + Unassigned + Removed 2007-04-05 + ---none--- +18079 + City I.T. (UK) Ltd + Akan Nkweini + akan6&yahoo.com +18080 + BlueBoard Ltd. + Atanas Karashenski + atanas&blueboard.biz +18081 + Numa Technology Inc. + Ting + ting&numa.com.tw +18082 + Midwest Independent Media Foundation + Derek P. Moore + derek.moore&sbcglobal.net +18083 + European Society of Cardiology + Hakim Bouras + hbouras&escardio.org +18084 + PharmXplorer + Guenther Berthold + guenther.berthold&uni-graz.at +18085 + Ciaolab Technologies Spa + Marco Stoppa + marco.stoppa&ciaolab.com +18086 + ProTelevision Technologies A/S + Kim Engedahl + admin&ProTelevision.com +18087 + Optare Solutions S.L. + Ricardo Riguera + rriguera&optaresolutions.com +18088 + Jackson + Mark Ferguson + mark.ferguson&jacksonmsc.com +18089 + Inovis Inc. + Daniel Nemec + hostmaster&inovis.com +18090 + FrogNet, Inc. + Operations Group + noc&frognet.net +18091 + CaboVerde + Marcio Ribeiro + mmr&caboverde.com.br +18092 + Ssji Networks + Nicolas Pouillon + nipo&ssji.net +18093 + K-Opticom Corporation + Masahiro Teramoto + teramoto&tech.k-opti.com +18094 + Kingdom LAN Network + Wong Tsang + wongtsang&kingdom-lan.net +18095 + Beijing Neutron TeleCOM Co. Ltd. + Zhengyu Fang + zyfang&neutrontek.com +18096 + Advanced Ceramics Research, Inc. + Baruch Spence + bspence&acrtucson.com +18097 + Sevilla Project + Bas van Ulden + bvanulden&quicknet.nl +18098 + alexander stensrud + alexander stensrud + contact&stensrud.nu +18099 + Amoke Networks Pvt. Ltd. + Manas Garg + manas&amoke.com +18100 + Descom Consulting Ltd. + Remigius Stalder + remigius.stalder&descom-consulting.ch +18101 + Adaptive Mobile Security Ltd. + Brendan Dillon + bdillon&adaptivesecure.com +18102 + Leostream Corp + David Crosbie + snmp&leostream.com +18103 + Shanghai Helitech Telecommunication Co.,Ltd Neijing Technical Branch + Mr. Gong Wendong + jimmy.gong&helitechchina.com +18104 + net-worked.co.uk + Greg Dawson + gregdawson&net-worked.co.uk +18105 + Calyptix Security Corporation + Lawrence Teo + oid-admin&calyptix.com +18106 + University of Iceland Computing Services + Elias Halldor Agustsson + mib-admin&hi.is +18107 + EEG Enterprises, Inc. + Philip T. McLaughlin + philm&eegent.com +18108 + PingID Network, Inc. + David Waite + david.waite&pingid.com +18109 + NeoSpire, inc. + Sean Bruton + sbruton&neospire.net +18110 + Speakeasy, Inc. + Speakeasy NOC + noc&speakeasy.net +18111 + A.I.C. Communications, Inc. + Robert Wu + robert&aicphone.com +18112 + oDesk Corp. + Odysseas Tsatalos + otsatalos&odesk.com +18113 + Wanspot.com Inc. + Jim Mercer + jim&wanspot.ca +18114 + Twotrees Technologies, LLC + Michael Dunnell + oidcontact&twotrees.net +18115 + ICRISAT + Ram Kumar B + r.kumar&cgiar.org +18116 + Node Computing Solutions + John Rodger + jrodger&nodecs.com +18117 + HTK NetCommunication Oy (PHNet) + Kimmo Liikonen + helpdesk&phnet.fi +18118 + New Voice International AG + G. M. Brigati + smi&newvoice.ch +18119 + MONOPRIX + David DECARRIERE + ddecarriere&monoprix.fr +18120 + Hyder Consulting PLC + Chris Dando + chris.dando&hyderconsulting.com +18121 + ObjectSource Software GmbH + Patrick Shinnick + ps&objectsource.de +18122 + Universidade de Sao Paulo + Alberto Camilli + cceadmin&usp.br +18123 + Pleasant Ridge Waldorf School + John Conlon + jconlon&verticon.com +18124 + Verticon, Inc. + John Conlon + jconlon&verticon.com +18125 + End To End + Dave Robinson + drobinson&endtoend.com +18126 + Quantum Internet Services, Inc. + Justin Boswell + jboswell&qis.net +18127 + Net Consequence GbR + Peter Turczak + pt&netconsequence.com +18128 + Komos + Jun Kawai + oid-contact&komos.org +18129 + Yorkshire Building Society + Darren Mason + dbmason&ybs.co.uk +18130 + CdC-Sagax + Carlos Heras Vila + c.heras&cablescom.com +18131 + Echoraith + Allan Lloyds + contact&echoraith.net +18132 + Fundación Universidad del Norte + María Gabriela Calle Torres + mcalle&uninorte.edu.co +18133 + Genesis Networks and Communications, Inc. + Herman Strom + herman&gncom.net +18134 + CEPRI + Zhao hongbo + zhaohb&epri.ac.cn +18135 + Nortel Networks Netas + Levent Seckin + lseckin&netas.com.tr +18136 + Delsyne Software srl + Alex Balut + aleb&delsyne.ro +18137 + Wegener Communications, Inc. + Elias J. Livaditis + eliasl&wegener.com +18138 + Adwise Ltd. + Eliezer Rotshtein + laser6&walla.co.il +18139 + NTT FACILITIES,INC. + Wataru Horita + mib&rd.ntt-f.co.jp +18140 + PetT Luxembourg + SCHEER Jean-Marie + jean-marie_scheer&ept.lu +18141 + RRZN, Leibniz Universitaet Hannover + Torsten Glaeser + hostmaster&rrzn.uni-hannover.de +18142 + Gruender-AV + Thomas Gruender + info&gruender-av.de +18143 + Lindenaar + Frederik Lindenaar + frederik&lindenaar.net +18144 + Michael Vistein + Michael Vistein + iana&vistein.de +18145 + Agricultural Mineral Prospectors Inc. + Peter J. Slack + pslack&sentex.ca +18146 + PS Solutions Ltd. + Danilo J. Castro Jr. + danilo&pssolucoes.com.br +18147 + TerraLuna + Steve Traugott + stevegt&TerraLuna.Org +18148 + cool.de + Thomas Brettinger + tb&cool.de +18149 + B.I. Tecnologia Ltda + Flavio Pinto Freire Jr. + flavio.freire&bitecnologia.com +18150 + Webage + Walter Alt + alt&webage.de +18151 + Pflug Datentechnik + Michael Pflug + oid-mgmt&pflug.de +18152 + ezGet.net + QuanSheng Liang + liang&ezget.net +18153 + Software Engineering GmbH + Michael Heim + Michael.Heim&gmx.com +18154 + Telnet Media Ltd + David Taylor + sysrego&telnetmedia.com +18155 + Stilpo Laboratories + David G. Smith + d.smith&stilpo.com +18156 + Great Pacific Industries Inc. + Internet Administration + internet_administration&owfg.com +18157 + Willowglen MSC Berhad + Teh Kok How + khteh&willowglen.com.my +18158 + GATSWAY DATA COMMUNICATION TECHNOLOGY CO.,LTD. + zhihua ouyang + scorpio30cn&yahoo.ca +18159 + Accordance Systems Inc. + Mike LIU + sales&accordance.com.tw +18160 + SHENZHEN GBIT ELECTRONIC TECHNICAL LTD. + Michael Zhang + szszjs&public.szptt.net.cn +18161 + iPoint Ltd. + Kirill Lebedev + info&ipoint.ru +18162 + EYME Technologies Pvt. Ltd. + Jerry P. Philip + jerry.philip&bh.ey.com +18163 + Commit; Oy + Konstantin Vdovenko + konstantin.vdovenko&commit.fi +18164 + Office of the Prime Minister + Stein A. Haakafoss + sah&smk.dep.no +18165 + VB Pros Oy / RS-Solutions + Kauko Sirén + kauko.siren&rs-solutions.net +18166 + Chalmege + Bruno Meyrieux + bruno&meyrieux.net +18167 + Integro Networks + Bruno MEYRIEUX + bruno.meyrieux&integro-networks.com +18168 + Infogate-Online Ltd + Oren Cohen Shwartz + OrenC&INFOGATEONLINE.COM +18169 + DOTFLUX + Vincent GRENIER + iana&dotflux.com +18170 + Inno.com + Patrick Snelders + Patrick.Snelders&inno.com +18171 + Torchbox Ltd + Tom Dyson + sysadmin&torchbox.com +18172 + AP-WDSL GbR + Stefan Pfetzing + info&ap-wdsl.de +18173 + Ramanam Software Distributors Private Limited + Anupam Kumar + anupam&ramanam.com +18174 + Wisconsin Department of Justice + Rhonda Thompson + thompsonrd&doj.state.wi.us +18175 + Araknos Srl + Maurizio Dal Re + info&araknos.it +18176 + Pikeville Methodist Hospital + Rusty W. Shanklin + rusty.shanklin&pikevillehospital.org +18177 + INPE + Gustavo Beltrami Rossi + rossi&lac.inpe.br +18178 + Front Range Internet, Inc. + Neil Neely + neil&frii.net +18179 + Quest Software, Inc. (formerly 'Vintela Inc.') + Eric Baurle + iana&quest.com +18180 + UNIT Ltd. + Daniel Vanco + Daniel.Vanco&unit.sk +18181 + Western Union Financial Services, Inc + William Van Glahn + bill.vanglahn&westernunion.com +18182 + Ahold Information Services + Eric Newman + eric.newman&aholdusa.com +18183 + Systems Atlanta, Inc. + Richard Rutledge + rutledge&sysatl.com +18184 + Exactelis + Alain Kamuleta + alainkamuleta&hotmail.com +18185 + Rene van Rooyen + Rene van Rooyen + rene&vanrooyen.com +18186 + Awacs ltd + Michael Sutton + iana&awacs.co.nz +18187 + Linkwise Software(Shanghai) Co.,Ltd + Shi Zengwei + shizw&linghui.com +18188 + Torinet + Dais Kim + ggoma&torinet.co.kr +18189 + Direction Générale des Impôts + Eric BEAURY + bureau.si3-dai&dgi.finances.gouv.fr +18190 + dabs.com PLC + Wezley Hunter + whunter&beta.dabs.com +18191 + Audiovision + Christelle Fostiez + audio-vision&skynet.be +18192 + eSchoolOnline + Michael Abato + mabato&classroom.com +18193 + Ruban Consulting + Dima Ruban + dima-iana&rdy.com +18194 + Bewer-Enterprises + Nino Bewer + ldap&bewer-enterprises.de +18195 + Xsec Srl + Simo Sorce + info&xsec.it +18196 + Landeszahnärztekammer Sachsen + Peter Lange + lange&lzk-sachsen.de +18197 + Sony Communication Network Corporation + Yukio Yakushijin + jin&scn.co.jp +18198 + Zemilogix, LLc + Leighton Esdaille + leighton&zemilogix.com +18199 + Objective Data Storage + Dean Beilke + beilke&objectivedatastorage.com +18200 + Software Expedition + Thomas Berry + swexpedition&nc.rr.com +18201 + Bestseller A/S + Regnar Knudsen + regnar.knudsen&bestseller.com +18202 + Belatronix Kommunikation & EDV + Axel Beierlein + belatronix&web.de +18203 + TC Electronic A/S + Niels Farver + NielsF&tcelectronic.com +18204 + Lendscape, LLC (formerly 'HPD Software, LLC' and 'Computer And Software Enterprises, Inc.') + Dave Oksner + admin-oid&hpdsoftware.us +18205 + Pioneer Industries, Inc. + Gary Chen + garychen&ucla.edu +18206 + iVolve Pty Ltd + David Eagles + david.eagles&ivolve.com +18207 + ANTs software inc. + Jeffrey Spirn + jeff.spirn&antssoftware.com +18208 + TeleSym + Michael A. Carlson + michaelc&telesym.com +18209 + Midwest Radiology of Kentucky + Jason A. Taylor, R.T.(R) + midwestradiology&yahoo.com +18210 + Agilis Communication Technologies Pte Ltd + Chen Xuesong + chenxs&agilis.st.com.sg +18211 + Catalina Computers & Discount + Michael Wozniak + sales&catalinas.net +18212 + A && L soft, s.r.o. + Petr Kadlec + kadlec&alsoft.cz +18213 + Voxsant Resources, Inc. + Joel Realubit + joel&voxsant.com +18214 + Olgierd Ziolko + Olgierd Ziolko + hostmaster&eris.rpg.pl +18215 + Metro Packet Systems, Inc. + Fredrik Orava + fredrik&mpacket.com +18216 + AnyWeb AG + David Knecht + david.knecht&anyweb.ch +18217 + Profium OY + Tero Hagstrom + th-iana&profium.com +18218 + DIGORA + Laurent Dufosse + laurent.dufosse&digora.com +18219 + Salten Bredband AS + Kay Ove Kjerpeseth + kay.kjerpeseth&sks.no +18220 + W en J + joop gerritse + jjge&xs4all.nl +18221 + SY Electronics Ltd + Stephen Wright + stephen&syelectronics.co.uk +18222 + Hopitaux Universitaires de Strasbourg + Denni + laurent.denni&chru-strasbourg.fr +18223 + W-OneSys S.L. + Daniel Fraile Vergel + dfraile&w-onesys.com +18224 + Dipl.-Inf. Carsten Dumke + Carsten Dumke + cdumke&cdumke.de +18225 + guenever.net + Pedro Cuenca + pcuenca&ieee.org +18226 + imap4all B.V. + Brendan Bank + brendan&gnarst.net +18227 + OpenCA + Massimiliano Pala + project.manager&openca.org +18228 + University Of Stuttgart + Michael Stegmüller + Michael.Stegmueller&tik.uni-stuttgart.de +18229 + Sonitrol Security Systems of Hartford, Inc. + Marc Swanson + mswanson&sonitrol.net +18230 + Conectium Limited + Erasmo Zubillaga + ezubillaga&conectium.com +18231 + Clear Reach Networks, Inc. + Justin Nelson + inan&clearreach.net +18232 + Equipos Telemo E.T. S.A. + Anibal Itriago + anibal&telemo.com.ve +18233 + HEC Montreal + Pierre Berard + Admin.Unix&hec.ca +18234 + NetSrv Consulting Ltd + Mr Colin Woodcock + colin.woodcock&netsrv-consulting.com +18235 + Andreas Schulze + Andreas Schulze + iana-oid&andreasschulze.de +18236 + McGill University + Ron Hall + ron.hall&mcgill.ca +18237 + IntelliVid Corporation + Systems Administrator + enterprise-number&intellivid.com +18238 + SkyLink Design + Ryan Hodgson + ryan&skylinkdesign.com +18239 + DTS, Inc. (formerly 'Digital Theater Systems, Inc') + Phil Maness + phillip.maness&dts.com +18240 + berger.de + Thomas Berger + thomasberger&t-online.de +18241 + w3design + Pascal Suter + info&w3design.ch +18242 + Wyyzzk, Inc. + Jason Winters + jason&txt.com +18243 + G&J FOSTER TECHNOLOGIES, INC. + RICHARD W. FOSTER + rfoster&ccc.edu +18244 + Matisse Networks Inc + S. Sampath + sampath&matissenetworks.com +18245 + Dishnet DSL Ltd. + Geeta Bora + geetab&ddsl.net +18246 + Degussa AG + Andre Giza + is-net-ds°ussa.com +18247 + IT&T s.r.l. + Andrea Spinelli + aspinelli&imteam.it +18248 + PaPouch elektronika + Tomas Jantac + steiger&papouch.com +18249 + Diego Alvarez + Diego Alvarez + diego.o.alvarez&gmail.com +18250 + TECHNOLOGIES RESEAUX & SOLUTIONS + Alain Patrick AINA + aalain&trstech.net +18251 + Beijing ACT Technology Co., Ltd. + Zhou Ying + zhouying&act-telecom.com +18252 + Symmetrus Systems Ltd. + Jim Marsden + jim.marsden&alertbase.com +18253 + mVerify Corporation + Mark Yedinak + mark_yedinak&mverify.com +18254 + Mimezine + Jason Bubolz + mycroft&mimezine.org +18255 + Taller de Ideas (de C. Daniel Mojoli B.) + C. Daniel Mojoli B. + cdmojoli&idea.com.py +18256 + Babcock & Wilcox Company (McDermott) + Robert Aldridge + realdridge&babcock.com +18257 + KHAMSIN Security GmbH + Tom Knienieder + knienieder&khamsin.ch +18258 + Binken.com + Laurens Binken + oid&binken.com +18259 + CBTCBT Inc. + David R. Land + drland&cbtcbt.com +18260 + Bandapart.Net + Alan Martins + alan.martins&bandapart.net +18261 + TigerByte + Clint Miller + cmiller&tigerbyte.com +18262 + Technicolor Media Asset Management + Michael Druckman + Michael.Druckman&Technicolor.Com +18263 + Sanders Technology & Design + John Sanders + jsanders&radix.net +18264 + ICT Group HHW + S. van Rijn + info&ictgroup.nl +18265 + Kestrel Technologies, Inc. + Chris Kennedy + chris&mainecoon.com +18266 + Dallas Genealogical Society + Mitch Mitchell + database&dallasgenealogy.org +18267 + NEOJAPAN, Inc. + Yousuke SASAKI + sasaki&neo.co.jp +18268 + SkyNet Network Security System Integration co.,Ltd. + Andy Yuan + snmp&sns.net.cn +18269 + Mapfre + Joaquin Gonzalez + jgl&mapfre.com +18270 + BANKSYS + Guy Van Overtveldt + guy.vanovertveldt&banksys.be +18271 + CIFOM-ESNIG + Huguenin Dominique + Dominique.Huguenin&cpln.ch +18272 + Alerta Comunicaciones SAL + J. Pablo GARCIA + jpgarcia&alertacomunicaciones.com +18273 + French Senate + Stéphane Gaubert + s.gaubert&senat.fr +18274 + Shenzhen Experiment School + Liu Qiu Ming + lqm&szsy.net +18275 + CIBERNET Corporation + Anthony Sorace + anthony&cibernet.com +18276 + Lurcher Link + Nick Gorham + nick&lurcher.org +18277 + ---none--- + ---none--- + ---none--- +18278 + Columbus Metropolitan Library + Dirk Prusok + dprusok&columbuslibrary.org +18279 + Rainer Fischer, EDV-Service + Rainer Fischer + support&rf-edv-service.net +18280 + NetTempo, Inc. + Tim Irvin + irvin+iana&nettempo.com +18281 + Bodacion Technologies, LLC. + Eric Hauk + hauk&bodacion.com +18282 + Muttsoft, Inc + Robert Ambrose + rna&muttsoft.com +18283 + Paraxip Technologies + Sebastien Trottier + iana.3.strot_at_paraxip&spamgourmet.org +18284 + EXEMYS SRL + Francisco J. Remersaro + fremersaro&exemys.com +18285 + Vidiator Technology Inc. + Danny Kim + dannyk&vidiator.com +18286 + Assurent Software Inc. + R. Neil Begin + nbegin&fscinternet.com +18287 + InnoPath Software + Yuko Tanaka + ytanaka&innopath.com +18288 + RABA Technologies LLC + Paul Chakravarti + paul.chakravarti&raba.com +18289 + Router Management Solutions, Inc. + Michael Hutt + hutt&ieee.org +18290 + szeles tibor + Tiberius Szeles + szelest&t-online.de +18291 + Ligfy + Laurent Fouillé + laurent&ligfy.org +18292 + Metis Oy + Petri Riihikallio + petri.riihikallio&metis.fi +18293 + Blackhawk Internet Communications Inc. + Jason L. Nesheim + jason&bhawk.net +18294 + Cambridge University Press + Paul Canham + netadmin&cambridge.org +18295 + Applied Software Solutions, LLC + Mark Usrey + mark.usrey&appss.com +18296 + Emigrant Savings Bank + Steven De Maio + DeMaioS&emigrant.com +18297 + Symbium Corporation + David Watson + dwatson&symbium.com +18298 + drugref.org + Dr. Horst Herb + hherb&drugref.org +18299 + Institute for International Ecomonic and Political Studies, Russian Academy of Sciences + Andrew Paschenko + andrew&nop.ru +18300 + Xoba Inc. + Mike Andrews + snmp&xoba.com +18301 + Datacard Corporation + Alla Johnson + alla_johnson&datacard.com +18302 + Metroplex Webs + Chuck Gudgel + chuck&metroplexwebs.com +18303 + Sollae Systems Co.,Ltd. + Yoon, YoungChan + ycyoon&eztcp.com +18304 + Douglas Needham + Douglas Needham + netadmin&ka8zrt.com +18305 + North American Astrophysical Observatory + Douglas Needham + hostmaster&naapo.org +18306 + Sichuan e-link Co., Ltd. + Alexandre Tsu + alexela&e-link.cn +18307 + Eadiefleet Corporation + Michael Halliday + michael&eadiefleet.com +18308 + aphona Kommunikationssysteme Entwicklungs- und VertriebsgmbH + Gerald Hoch + gerald.hoch&aphona.net +18309 + Einsle + Robert Einsle + robert&einsle.de +18310 + Heitec AG + B. Luevelsmeyer + bdluevel&heitec.net +18311 + Linzies' computers + Linzie T. Oliver + oliverlt&tre357.gotdns.com +18312 + Data Flow Systems + David Odom + dodom&koostech.com +18313 + Bodacion Technologies, LLC. + Eric Hauk + hauk&bodacion.com +18314 + Canberra Industries + Troy Anderson + tanderson&canberra.com +18315 + McCormick & Company, Inc. + Stephen Morris + stephen_morris&mccormick.com +18316 + Ordination Med.-Rat Dr. Roesler + Rupert Roesler-Schmidt + iana-link&drroesler.com +18317 + Muttsoft, Inc + Robert Ambrose + rna&muttsoft.com +18318 + Copel Telecomunicações S/A + Ednei Teruaki Iamaguti + ednei.iamaguti&copel.com +18319 + Asociación Nacional Ecuménica de Desarrollo + Cody Fauser + codigo&irk.ca +18320 + Hydrologic Consultants, Inc of Colorado + Kevin Fries + it&hcico.com +18321 + Universidad del Sagrado Corazon + Cesar G. Calderon + cgcalderon&sagrado.edu +18322 + ITaCS GmbH + Björn Schneider + bjoern.schneider&itacs.de +18323 + Mindways Software, Inc. + Stephen Blankespoor + steve&qct.com +18324 + Press Communications P/L + Trevor Press + tpress&press.com.au +18325 + Advanced Relay Corporation + Ulrich Richers + arc&advancedrelay.com +18326 + InfoGin Ltd. + Daniel Yaghil + daniel&infogin.com +18327 + 42networks AB + Per Backstrom + per.backstrom&42networks.com +18328 + OpenMIND Networks Limited + Brian Kelly + iana-openmind&sulaco.com +18329 + AMC Ltd. + Jim Gilbey + jimg&amcuk.com +18330 + Electrobusiness Connections Inc. + Ibrahim Hamouda + ihamouda&electrobusiness.com +18331 + Huber S-Consulting GmbH + Gert Kössler + koessler&hscon.de +18332 + ANF Autoridad de Certificación + Florencio Diaz + fdiaz&anf.es +18333 + ANF AC Entidad de Certificación Perú S.A.C. + Florencio Diaz + fdiaz&anf.es +18334 + KONICA MINOLTA HOLDINGS, INC. + Shinya Kaku + shinya.kaku&konicaminolta.com +18335 + Melloul-Blamey Construction Inc. + Richard Bourque + richard.bourque&melloul.com +18336 + Computer Logix + Richard Bourque + richard.bourque&computerlogix.net +18337 + creedon engineering + ted creedon + tcreedon&easystreet.com +18338 + Centrum voor Wiskunde en Informatica + A. van der Klaauw + ldap-managers&cwi.nl +18339 + ANF AC MALTA, LTD. + Florencio Diaz + fdiaz&anfacmalta.com +18340 + Proquest Information and Learning + Stephen Ulicny + preroot&il.proquest.com +18341 + The Institute for Open Systems Technologies Pty Ltd + Greg Baker + gregb&ifost.org.au +18342 + CEYONIQ Technology GmbH + Stefan Niemann + s.niemann&ceyoniq.com +18343 + Performance Solutions Limited + Don MacKenzie + Don.MacKenzie&perform-sol.com +18344 + Intmain.Com + C. Larson + cal&intmain.com +18345 + Messagesoft Inc. + Jonathan Jiang, Li Wang + lwang&messagesoft.com +18346 + Sebastian Denef Computer Service + Sebastian Denef + design&denef.de +18347 + MAUSER-Werke GmbH & Co. KG + Dr. Bernhard Rohde + bernhard.rohde&mauser-group.com +18348 + Avitech International Corp. + Morris Gong + morris&avitechvideo.com +18349 + Luther Seminary + Mark Solhjem + hostmaster&luthersem.edu +18350 + Polva Central Library + Alvar Kusma + alvar&raamat.polva.ee +18351 + Velare Technologies Inc. + Serguei Mourachov + smourachov&velare.com +18352 + Sony Pictures Imageworks + Robert Brophy + brophy&imageworks.com +18353 + MAUSER-Werke GmbH & Co. KG + Dr. Bernhard Rohde + bernhard.rohde&mauser-group.com +18354 + Calyptech Pty Ltd + Gus Paolone + gus.paolone&calyptech.com +18355 + Avitech International Corp. + Morris Gong + morris&avitechvideo.com +18356 + kevinstevens.info + Kevin Stevens + contact&kevinstevens.info +18357 + Full Frontal Ingenuity LLC (formerly 'Leathern Apron Incorporated') + Paul Talbot + paul&fullfrontalingenuity.com +18358 + EPITECH + Notre RN nationnal + dacydays2003&hotmail.com +18359 + Intransa, Inc. + Walt Drummond + walt.drummond&intransa.com +18360 + INTEC Web and Genome Informatics Corporation + Hitoshi Nozaki + nozaki_hitoshi&webgen.co.jp +18361 + A3 Security Consulting Co., Ltd. + Kim jung woong + jwkim&a3sc.co.kr +18362 + Hong Kong CSL Limited + K. S. Luk + kar-shun.luk&hkcsl.com +18363 + Epok, Inc. + Perry Dillard + perry.dillard&epokinc.com +18364 + Nezabudka + Eugene Samusev + bitlz&nezabudka.ru +18365 + Konzumbank Rt. + VASARHELYI Daniel + vasarhelyid&konzumbank.hu +18366 + Applicata + Dr. Ivan Bakardzhiev + ivan&applicata.bg +18367 + Nanoteq PTY LTD + Pieter Claassens + pc&nanoteq.com +18368 + ComUnics Informatik GmbH + Henning Hoecker + oidadmin&comunics.de +18369 + Phoenix Zeppelin spol. s r.o. + Pavel Kusicka + pavel.kusicka&p-z.cz +18370 + Internet Consult SARL + Vladimir Guevezov + contact&i-consult.fr +18371 + Pickering Interfaces Ltd + Dan Roberts + dan_roberts&pickeringswitch.com +18372 + BalaBit IT Ltd. + Balázs Scheidler + balazs.scheidler&balabit.hu +18373 + CSAS - Computer Systeme Arno Seidel + Arno Seidel + aseidel&gmx.li +18374 + Andri Saar + Andri Saar + andri&kalatehas.net +18375 + Ruprecht-Karls-Universitaet Heidelberg + Michaela Wirth + Michaela.Wirth&urz.uni-heidelberg.de +18376 + ORGA Systems GmbH + Marco Klein + mklein&orga-systems.com +18377 + Universita` degli Studi di Trieste + Paolo Piccoli + piccoli&units.it +18378 + Eastern Kentucky University + bob clark + bobby.clark&eku.edu +18379 + ExtraQuest, Corporation + Edward Shephard + eshephard&extraquest.com +18380 + NEXTFOR S.A. + Josu Eguileor + josu&nextfor.com +18381 + Suramericana de Seguros S.A. + Angela Bernal + angebeja&suramericana.com.co +18382 + Bank of Valletta plc + Herbert Zarb + herbert.zarb&bov.com +18383 + Enterux Solutions + Mitul Limbani + mitul&enterux.com +18384 + Reflective Solutions Ltd. + Ray McDermott + ray.mcdermott&reflective.com +18385 + Ecki Patang Org + Claes Hammer + hammer&skip.informatik.gu.se +18386 + oshiire + sho kisaragi + sho&oshiire.to +18387 + Litrius Group + Joel Wickard + jwickard&litriusgroup.com +18388 + AstraZeneca + Bo Skallefell + bo.skallefell&astrazeneca.com +18389 + Old Genie Hottabych Company + Mikhail Y. Roudin + m.roudin&hottabych.ru +18390 + MTS S.p.A. + Fabrizio Cazzulini + cazzulini_fabrizio&mtsspa.it +18391 + Learning Objects Network Inc. + Tracy Flynn + tracy.flynn&ima-gnu.com +18392 + Applied Personal Computing, Inc. + Daniel Bingham + dan&apci.net +18393 + Goblin + Tom Linton + toml&iinet.net.au +18394 + Fred Chef Inc. + Martin Groelz + mgroelz&yahoo.com +18395 + vivaxis SAS + Pierre Lang + pierre.lang&vivaxis.com +18396 + Dassault Aviation + Matthieu WILLM + matthieu.willm&dassault-aviation.com +18397 + InfoSys + Erik Campos M. + ecampos&infosys.cl +18398 + jMind Consulting Ltd. + Robert Brautigam + robert.brautigam&jmind.hu +18399 + K/P Corporation + Fred Yano + FYano&kpcorp.com +18400 + Kommandoraden + Anders Dahlqvist + anders&kommandoraden.info +18401 + Xiqa Networks + Jeff Thomas + jeff&xiqa.net +18402 + SecurityMetrics, Inc. + Brad Caldwell + brad&securitymetrics.com +18403 + City of Chicago - Office of Budget Management + Gene Chin + gchin&cityofchicago.org +18404 + United Nations Development Programme + Anton Shmagin + oid-admin&undp.org +18405 + Redbird Informatics, Inc. + Jose T. Montoya + Jose.Montoya&RBInformatics.com +18406 + Internetworking Ltd. + Geoff Campbell + Hostmaster&internetworking.co.uk +18407 + Transcendence.net + John West + jwest&transcendence.net +18408 + Association Ohana + Nicolas MASSART + nima&association-ohana.net +18409 + Planet A.S. + Ahmet Ekim + ahmet.ekim&planet.com.tr +18410 + Häme Polytechnic + Jari Kivelä + jari.kivela&hamk.fi +18411 + DataCenterTechnologies + Frank Valcke + frank.valcke&datacentertechnologies.com +18412 + Epoch Design Ltd + Matthew Lenny (IT Administrator) + itadmin&epochdesign.co.uk +18413 + EGIM + Geoffroy DESVERNAY + info&esm2.imt-mrs.fr +18414 + Imaginative IT Limited + Martin Siddall + msiddall&imaginativeit.co.uk +18415 + ELM Computer Technologies Ltd. + Chris Kwan + ckwan&elm.com.hk +18416 + Victron bva + Wim Van de Mosselaer + wimvdm&victron.be +18417 + Argon Technologies Inc. + Morgan Nelson + maillist1&argontech.net +18418 + Endace Technology + Stephen Donnelly + stephen&endace.com +18419 + glaven.org + Adam Bultman + ContactEmail +18420 + cerebrasoft + Rob Butler + rob_butler&hotmail.com +18421 + tetera.org + Ryo ONODERA + ryo&tetera.org +18422 + AetherStorm.com + Jeff Rodriguez + iana&gurugeek.com +18423 + NetHarmonix, Inc. + Jim Blandford + domainadmin&netharmonix.com +18424 + MooreWare + Ray Moore + rcm&netharmonix.com +18425 + COMCO AG + Bernd Boom + bernd.boom&comco.de +18426 + ---none--- + ---none--- + ---none--- +18427 + Patrick Näf + Patrick Näf + iana.pen&herzbube.ch +18428 + American Registry for Internet Numbers + ARIN NOC + noc&arin.net +18429 + IT Consulting & Education Ltd. + Orlin Marinov + orlin&itce.com +18430 + GenerationE Technologies LLC + Michael E. Fannan Jr. + michael.fannan&generationetech.com +18431 + Stmk. Krankenanstalten GesmbH + Robert Ferk + robert.ferk&kages.at +18432 + ---none--- + ---none--- + ---none--- +18433 + deny all + Stephane CAUNES + scaunes&deny-all.com +18434 + University of Leipzig + Steffen Rettig + steffen.rettig&uni-leipzig.de +18435 + Advanced Computer Systems, ACS S.P.A. + Antonio Volono + a.vollono&acsys.it +18436 + Bell Mobility + Elie Nasser + elnasser&mobility.com +18437 + W.I.S.V. Christiaan Huygens + Adriaan de Jong + pccom&ch.tudelft.nl +18438 + ---none--- + ---none--- + ---none--- +18439 + Composite Software, Inc. + Stephen Ahuero + sahuero&compositesw.com +18440 + peerix + Atanas Argirov + aa&peerix.org +18441 + National Football League + Michael Palmer + palmerm&nfl.com +18442 + Vimatix + Jordan Sheinfeld + jordan&vimatix.com +18443 + Netsys.IT GbR + Peter Steiert + info&netsys-it.de +18444 + Sandwich.Net, LLC + James Renken + jrenken&sandwich.net +18445 + Datacard Corporation + Alla Johnson + alla_johnson&datacard.com +18446 + Ascendant Technologies, Inc. + Corporate Security Administrator + corpsec&aitva.com +18447 + Willis Consulting + Chris Willis + cwillis&chriswillis.tzo.com +18448 + Vanquish, Inc. + Paul Weiss + pgweiss&vanquish.com +18449 + Alexander Gretha + Alexander Gretha + developer&chicken.sh +18450 + SevenL Networks Inc. + Todd Berman + tberman&sevenl.net +18451 + PC-Pool Physik, TU-Berlin + Christian Hennig + tutoren&physik.tu-berlin.de +18452 + Tempod + Peter Jansson + snmpcontact&tempod.se +18453 + The City Of Calgary + Vin Bhola + vin.bhola&calgary.ca +18454 + OATSystems, Inc + Vadim Pesochinskiy + vadim&oatsystems.com +18455 + MEDICAL INFOMATION CO., LTD. + Kiyoshi Terasawa + info&mdi.co.jp +18456 + NetKlass Technology Inc. + Jerry Wu + jerry&netklass.com +18457 + European Directorate for the Quality of Medicines & HealthCare, Council of Europe (EDQM) (formerly 'European Directorate for the Quality of Medicines') + Christopher Jarvis + christopher.jarvis&edqm.eu +18458 + Image Processing Techniques Ltd + Graeme Griffiths + graeme&imageproc.com +18459 + Semafor Informatik & Energie AG + Sorin Marti + mas&semafor.ch +18460 + Powell Companies + Michael Powell + mikepowell&powellnetworking.com +18461 + Osix Inc. + Osman Erkan + hoe&ttnet.net.tr +18462 + Lifetree Convergence Ltd + S.Sadasivam + sss_7777&yahoo.com +18463 + Spektar JSC + Krassimir Slavchev + krassi&bulinfo.net +18464 + ROMATSA R.A. + Ionut POSTARU + netadmin&romatsa.ro +18465 + TradeLink L.L.C. + Alex Stade + hostmaster&trdlnk.com +18466 + Aviva Communications Inc. + Gin-Pao Lu + glu&avivacommunications.com +18467 + AdvancePCS + Rick Wenban + rick.wenban&advancepcs.com +18468 + hughes network systems + John Merritt + jmerritt&hns.com +18469 + UNIS LUMIN INC. + Rachel Chang + rchang&unislumin.com +18470 + Ingenieurbuero Michael Kappler + Michael Kappler + Michael.Kappler&Kappler-edv.de +18471 + e-Scripps + Ryan Scripps + ryan&e-scripps.com +18472 + Crystal Computer Corporation (Georgia) + Earl Franklin + earl.franklin&crystalcc.com +18473 + Form-IT + Preben S. Madsen + psm&form-it.dk +18474 + Etype Co. + Andrey Cherezov + cherezov&etype.net +18475 + Research Institute of America + David Levine + david.levine&riag.com +18476 + Actinium Network Sdn Bhd + Siow Teck Seng + siowts&actinium.org +18477 + Bundesministerium des Innern + Jan-Peter Trojok + janpeter.trojok&bmi.bund.de +18478 + Wehay AB + Jonas Israelsson + jonas&wehay.com +18479 + FS-VDSL + Bernard MARTI + bernard.marti&francetelecom.com +18480 + RF-DESIGN + Ralf Mayr + rf-design&t-online.de +18481 + Trustworx GmbH + Kai Danielmeier + kai.danielmeier&trustworx.de +18482 + ARCWave Inc. + Sanjay Ravindra + sravindra&arcwaveinc.com +18483 + ROMATSA R.A. + Ionut POSTARU + netadmin&romatsa.ro +18484 + ProcoliX + Koen de Jonge + info&procolix.com +18485 + Marietta College + Michael S. Robinson + robinsom&marietta.edu +18486 + Quickshift, Inc. + Kenneth East + keast&interactivesi.com +18487 + Everbank + Robb Penoyer + tholloway&firstalliancebank.com +18488 + Interbaun Communications, Inc. + Saxon Jones + netadmin&interbaun.net +18489 + CarrierComm, Inc. + Mark Lindsey + MLindsey&CarrierComm.com +18490 + Bolsa Nacional de Valores + Rigoberto Torres + soporte&bnv.co.cr +18491 + NII Voskhod + Mikhail Vinogradov + mikle&nii.voskhod.ru +18492 + Tacoma, spol.s r.o. + Veroslav Kaplan + vkaplan-prog&tac.cz +18493 + ProcoliX + Koen de Jonge + ldap&procolix.com +18494 + S.W.I.F.T. SCRL + General Counsel, Legal Department + isabelle.vasseur&swift.com +18495 + GRAU DATA GmbH + Werner Stephan + info&graudata.com +18496 + Rolotec AG + Edwin Schwab + admin&rolotec.ch +18497 + gec UOC Group + Suport Tecnologia + suport&gec.es +18498 + A&M Consulting Co + Ramon Aguirre + aguirrr&netsolve.net +18499 + PD3 Tecnologia em Redes e Sistemas Digitais + Leonardo Pereira Santos + lsantos&pd3.com.br +18500 + Oddpost.com + Iain Lamb + oid-admin&oddpost.com +18501 + Code Fusion cc. + Stephan Buys + s.buys&codefusion.co.za +18502 + Mendel University of Agriculture and Forestry + Petr Dadák + dadak&pef.mendelu.cz +18503 + Intelli7 Inc. + Phillip H. Zakas + phillip&intelli7.com +18504 + Brainstorm Internet + Network Deparatment + network&brainstorminternet.net +18505 + Tele-Consulting GmbH + Reto Lorenz + rlorenz&tele-consulting.com +18506 + CAcert Inc. + Philipp Gühring + philipp&cacert.org +18507 + Precise Time and Frequency, Inc. + David S. Briggs + dbriggs&ptfinc.com +18508 + MURASHITA CONSTRUCTION INDUSTRY CO., LTD. + Jyunji Ohta + ohta&murashita.co.jp +18509 + IVK Smart Software Solutions LLC + Igor Kovalenko + gogo-m&inbox.ru +18510 + Reid Enterprises + Ian Reid + ireid&freeuk.com +18511 + Centre Informatique Region Bruxelloise + Mommens Jean-Pierre + jpmommens&cirb.irisnet.be +18512 + Schaake + Christiaan Schaake + chris&schaake.nu +18513 + pete23.com + Mr Peter Windle + oidspace&pete23.com +18514 + BekArts International + Richard Beckmannflay + richard&bekarts.com +18515 + Nortech Management Ltd. + Simon Hodgson + simon&nortechonline.co.uk +18516 + Eclipse Networking Limited + Mark Lang + mark&eclipse.net.uk +18517 + Orbit Research Ltd + Gavin Foster + gfoster&orbitresearch.co.uk +18518 + Crea d.o.o. + Matej Trampus + matej.trampus&crea.si +18519 + Jahi Networks Inc. + Raju Datla + raju&jahinetworks.com +18520 + APSI Inc. + Bruce + bishkin&core.com +18521 + TakeNET + Vinícius Augusto Sacramento Ferreira + vinicius&takenet.com.br +18522 + Unassigned + Removed 2011-03-08 + ---none--- +18523 + Liverton Limited + Brendan Law + Brendan.Law&Liverton.com +18524 + Empresa Metalúrgica Central de Acero "José Valdes Reyes" + Victor J. Calderín Rodriguez + vjcr2&yahoo.es +18525 + robinbowes.com + Robin Bowes + robin-iana&robinbowes.com +18526 + Fisk Labs, Inc. + Dave Fisk + dave&fisklabs.com +18527 + Maersk Data Organisator A/S + Joergen Richter + jri&maerskdata.dk +18528 + Cetelem + Olivier Toche + dom-tech&cetelem.fr +18529 + Swisscom-Eurospot + Fred Brunken + registration&eurospot.com +18530 + NightCity.net + Matt Kleifgen + mkleifgen&comcast.net +18531 + DecisionPoint Applications, Inc. + Clayton Mitchell + claytonm&dpapps.com +18532 + The University of New Brunswick + Ben Steeves + bcs&unb.ca +18533 + NTH A.G. + Damir Jurica + oidadmin&nth.ch +18534 + MCI + Jim Potter + jim.potter&mci.com +18535 + Echostar Data Services + Marc Carmen + marc.carmen&echostar.com +18536 + Oolong project + Takashi Suzuki + suzuki&oolong.jp +18537 + TAM Internet service Ltd. + Mamoru Nishida + nishida&tamnet.co.jp +18538 + R.L. Phillips, Inc. + Mike Houston + mhouston&rlphillips.com +18539 + flagwireless.com + Chris Greenough + Chris.Greenough&nau.edu +18540 + Webmeesters + R. van der Steenhoven + commerce_iana&webmeesters.nl +18541 + Roland Baum System Consult + Roland Baum + rbaum&nexplosion.de +18542 + Sphinx Information Technologies Inc. + Ibrahim Hamouda + linux&sphinxinfotech.net +18543 + Innove Communications + Melvin A. Esperas + melvin&globequest.com.ph +18544 + ecofinance + Wolfgang Bachmann + wolfgang.bachmann&ecofinance.com +18545 + Joint-stock company "Trading System Administrator of Wholesale Electricity Market Transactions" + Aleksander Lashmanov + ats.iana&rosenergo.com +18546 + FAST Video Security AG + Andreas Malzahn + support&fast-security.com +18547 + Amanda Emily + Amanda Emily + aemily&colsd.org +18548 + Relationalware + Jon Pounder + jonp&relationalhost.com +18549 + Kealia Inc. + Dapeng Zhu + dapeng&kealia.com +18550 + Gemplus do Brasil + FELIPE CARASSO + felipe.carasso&gbn-br.com +18551 + Anders Bystrup IT + Anders Rostgaard Bystrup + anders_bystrup&hotmail.com +18552 + Firevue Security Systems + Jason DeStefano + support&firevue.com +18553 + Arachne Prime Inc. + Patrick Golec + iana&arachneprime.com +18554 + Xenotropic Systems + Jeremy McDermond + mcdermj&xenotropic.com +18555 + youneek organisation limited + Adrian Aitken + mib&youneek.org +18556 + Zaurum ECP project + Konstantin Boyandin + konstantin&zaurum.com +18557 + Hurray!Solution Ltd. + zhenhua zou + zhzou&hurray.com.cn +18558 + WUSHIGONG Ltd. + calayman + calayman&linux.net +18559 + Softpak International + Shahid Saeed + shahid&softpakint.com +18560 + Makedonski Telekomunikacii, MTnet + Stevco Risteski + noc&mt.net.mk +18561 + StarNIC + Md. Mahbubur Rahman + webmaster&starnic.net +18562 + Jazz Telecom, S.A. + Jose Ignacio Garcia - David Garcia + jgarcial&jazztel.com +18563 + DarkPhuture Technologies, Research & Development + Aaron J. Angel + Aaron.J.Angel&DarkPhuture.org +18564 + BFI-Burgenland + Karl SCHUH + k.schuh&bfi-burgenland.at +18565 + SCHUH-TV + Karl SCHUH + k.schuh&schuh-tv.at +18566 + Hogeschool Gent + Denis Amelynck + denis.amelynck&hogent.be +18567 + Epic Systems Corporation + Epic Hostmaster + hostmaster&epic.com +18568 + Service Availability Forum + Bill Swortwood + bill.swortwood&motorola.com +18569 + Ensequence, Inc. + Aslam Khader + akhader&ensequence.com +18570 + GreenPulse Limited + Ron Segal + ron&greenpulse.com +18571 + Mindhut Limited + Ron Segal + ron&mindhut.com +18572 + ORC + Paul A. Johnson + paul.johnson&nsc1.net +18573 + Educate Inc. + Allen Wooden + allen.wooden&educate.com +18574 + Hush Communications Canada Inc. + Kevin Roeder + kevin&hushmail.com +18575 + Sagem Morpho, Inc. + Jeff Sparks + jeff.sparks&morpho.com +18576 + Typosign AG + Stephan Pulver + spulver&music.ch +18577 + Contemporary Cybernetics Group, Inc. + R Martin Aherron + martin&cybernetics.com +18578 + Eardown + Scott Ware + scott&eardown.com +18579 + Clear Edge Networks LLC + Mark Lewis + mark&clearedgenetworks.com +18580 + Whetstone Software + James Whetstone + jameswhetstone&hotmail.com +18581 + SPD-Bundestagsfraktion + Fredo Sartori + sartori&spdfraktion.de +18582 + Slack Ltd. + Robert Stucke + r_stucke&yahoo.com +18583 + Vmware Carbon Black (formerly 'Bit 9 Inc.') + Tracy Camp + tcamp&carbonblack.com +18584 + Avanex Co. + Phillip Wang + phillip_wang&avanex.com +18585 + GuangDong Poson Company.Ltd + Chenxiangyun + chenxiangy&gsta.com +18586 + bbassett.net + Brian Bassett + bbassett&bbassett.net +18587 + Fortess Ltd. + Konstantin Boyandin + register&fortess.com +18588 + Mekhanika-Service, Ltd. + Igor Osin + osinig&mekhanika.ru +18589 + GlobeTOM (Pty) Ltd + Ben Hechter + ben.hechter&globetom.co.za +18590 + Sterci SA + Martinez Marc + marc.martinez&sterci.com +18591 + HORUS HARDWARE S.A. + Fernando Ramirez + fernando.ramirez&horushardware.com +18592 + Universita' degli Studi di Milano-Bicocca + Luisella Sironi + sysadmin&unimib.it +18593 + Ministry of Finance, Tax Administration of the Republic of Slovenia + Breda Hudej + Breda.Hudej&gov.si +18594 + Globalvision Media + Peter Brownell + peter&globalvision.com +18595 + Berufsbildende Schulen I - Uelzen + Ulrich Drolshagen + dr&bbs1-uelzen.de +18596 + Professional Computers Services Organization + Don Davis + don.davis&pcso.com +18597 + HELIOS Software GmbH + Martin Reinders + martin&helios.de +18598 + Firebox Internet Technologies + Scott Roeder + admin&firebox.ca +18599 + Fachhochschule Hagenberg + Wolfgang Friesenecker + wolfgang.friesenecker&fh-ooe.at +18600 + Marzek Etiketten GmbH + Mr. Martin Petraschek + petri&marzek.at +18601 + Blue Mountains Grammer School + Garry Optland + goptland&bmgs.nsw.edu.au +18602 + MediaZen Corp. + Heungkyu Lee + hklee&mediazen.co.kr +18603 + Powercn + xueshaowen + xueshaowen&powercn.com +18604 + Delta E.S., a.s. + Ivan Sajban + ivan.sajban&delta.sk +18605 + RiS Gmbh + Thomas Brandtner + brandtner&ris.at +18606 + Orbitel, Inc. + Stanislav Grozev + sgrozev&orbitel.bg +18607 + KLM Royal Dutch Airlines + Friso de Wolf + friso-de.wolf&klm.com +18608 + Synlogic AG + Kaspar von Gunten + Kaspar.VonGunten&synlogic.ch +18609 + JP.DIAS SERVIÇOS DE INFORMATICA LTD. + JAIR DIAS + jp.dias&terra.com.br +18610 + University at Buffalo + Joel W Murphy + jmurphy&buffalo.edu +18611 + Alenia Spazio S.p.A. + Giuseppe Tomasicchio + g.tomasicchio&roma.alespazio.it +18612 + smspundit.com + Ankur Shah + ashah&comcast.net +18613 + CGL Consulting + A. Taiwo + dns.registrar&cglcons.com +18614 + Albert White Technologies + Alex Demenschonok + alex&albertwhite.com +18615 + North American Networks Corporation + Bob Eckert, Jr. + bobjr&nanc.com +18616 + nextWLAN Corporation + Carlos Rios + crios&nextwlan.com +18617 + Timestock, Inc. + Patrick O'Sullivan + registrar×tock.com +18618 + Publi Van Dyck N.V. + Marc Cuypers + m.cuypers&mgvd.be +18619 + web-m GbR + Oliver Goepferich + ogoe&web-m.de +18620 + Jaw Networks + Brian Ross + bross&jawnetworks.com +18621 + Hinson and Associates + Charlie Hinson + charlie&belwood.net +18622 + K-n-A Ltd. + Archie Arevalo + archie_arevalo&msn.com +18623 + micro systems + Marc Balmer + marc&msys.ch +18624 + Lehrstuhl fuer Technische Dienstleistungen + Clemens Sickinger + clemens.sickinger&wi.tum.de +18625 + UniVision (Canada) Ltd. + Matthew Ho + matthew&univisioncanada.com +18626 + UNET BV + C.C. Gondelach + gondelach&unet.nl +18627 + Bakasquared + Douglas Richard + accela&bakasquared.com +18628 + Microelectronics Technology Inc. + Carl Yang + yang_carl&mti.com.tw +18629 + Iclass Co. Ltd. + Gary Shi + garyshi&iclass.cn +18630 + Reeuenta Design Service Taiwan Ltd. + WenZheng Wu + Reeuenta&hotmail.com +18631 + DARG + Brooks Sizemore + domains&darg.net +18632 + SA SST Informatique + Stéphane Cachat + sst&cachat.org +18633 + STATER + Fred Schuit + a.schuit&stater.com +18634 + Advanced Software Production Line, S.L. + David Marín + david&aspl.es +18635 + m-otion GmbH + Thomas Hager + hager&m-otion.at +18636 + UVT s.r.o. + Yevheniy Demchenko + zheka&uvt.cz +18637 + JNS Inc. + Hiroshi Miyazaki + miya&jnsjp.com +18638 + Avedya + Stefan Praszalowicz + stefan&avedya.com +18639 + Kinnikinnick Foods Inc. + Michael Shields + michael&kinnikinnick.com +18640 + Digital Envoy, Inc + Jeff Burdette + iana&digitalenvoy.net +18641 + Royal Military College of Canada + Richard Cameron + cameron-r&rmc.ca +18642 + C&C Power, Inc. + Chris Heinz + chris&ccpower.com +18643 + Montgomery County Government + John Castner + john.castner&montgomerycountymd.gov +18644 + Daniel Skadlubowicz + Daniel Skadlubowicz + daniel&skadlubowicz.de +18645 + Cesart Creation inc. + Charle Demers + charle.demers&cesart.com +18646 + Electric Mail Co. + Bill Skrypnyk + bskrypnyk&electricmail.com +18647 + DEXTER COMMUNICATIONS, INC. + Dong-Sun Hong + sunny&dextercomm.com +18648 + HTU Graz + Hans-Peter Lackner + hans-peter.lackner&htu.tugraz.at +18649 + Magnet.ch AG + E. Pflimlin + e.pflimlin&magnet.ch +18650 + Projectiondesign AS + Rolf Gjelsvik + rolf.gjelsvik&projectiondesign.com +18651 + GLANCE AG + Felix Berger + felix.berger&glance.ch +18652 + Zentrum für Bioinformatik, Hamburg + Erik Pagel + No.Spam&pagel_At_zbh.uni-hamburg.de +18653 + Eraia srl + Giancarlo Bassetto + digarbo&sofialab.com +18654 + Scottish Police Authority (formerly 'Central Scotland Police') + Stephen McDermid + stephen.mcdermid&spsa.pnn.police.uk +18655 + IUNDS AG + Hannes Seidel + h.seidel&iunds.com +18656 + AirRunner Technologies + greg phillips + greg.phillips&airrunner.com +18657 + GoldenGate Software, Inc. + David Acquistapace + dacquistapace&goldengate.com +18658 + Xcitel Ltd. + Tsiki Rosenmann + tsikir&hotmail.com +18659 + Chung Hua University + Huai-Jen Liu + hjliu&chu.edu.tw +18660 + WebKMS + Phil Hegarty + phillip.hegarty&ntlworld.com +18661 + Elitech Information Technology Co.,Ltd. + cui qinglai + cql&263.net.cn +18662 + Checkmk GmbH + Alexander Wilms + alex.wilms&checkmk.com +18663 + Laakirchen Papier AG + Walter Spanlang + walter.spanlang&heinzelpaper.com +18664 + IPnP + Oleg Antoshin + oleg&ipnp.co.il +18665 + Axelero Internet Szolgáltató Rt. + Héjjas Gábor + hejjas.gabor&axelero.com +18666 + Payroll Sweden AB + Bo Strinnholm + bo.strinnholm&payroll.se +18667 + HBware + Hans Boone + Hans&HBware.com +18668 + freshmeat.net, part of OSDN, Inc. + Patrick Lenz + scoop&freshmeat.net +18669 + Memorial Hermann healthcare System + Albert Tillery + albert_tillery&mhhs.org +18670 + General Atomics + Aaron Bostwick + aaron.bostwick&ga.com +18671 + Knovative, Inc. + Kirk Friedman + kfriedman&knovative.com +18672 + Secured Computer Concepts + M. D. Parker + mdpc&panix.com +18673 + Webjorn Data & Natverkskonsult + Mattias Webjorn Eriksson + mattias&webjorn.org +18674 + telecomSoftware + Dan Deneweth + ddeneweth&telecomse.com +18675 + Strategy & Technology ltd + Paul Sweetland + pauls&layer3.co.uk +18676 + LEOFOO DEVELOPMENT CO., LTD + johnson hsu + johnson.hsu&leofoo.com.tw +18677 + Datenzentrale Baden-Wuerttemberg + Martin Riedel + m.riedel&dzbw.de +18678 + Winston Industries + Ed Hatfield + ejhatfield&winstonind.com +18679 + Behr Internet Solutions, Inc. + Marc D. Behr + marc&behrsolutions.com +18680 + Con Edison Communications + Help Desk + stuartm&electricfiber.com +18681 + Travelping GmbH + Holger Winkelmann + hw&travelping.com +18682 + Dr. Brunthaler IITech GmbH + Stefan Brunthaler + brun&drb.insel.de +18683 + CJ Ltd. + Chengjie + dearmillet&163.com +18684 + Davey Control Systems + Rob Davey + daveyr&telkomsa.net +18685 + Portal München GmbH & Co. KG + Christoph Michel + michel&portalmuenchen.net +18686 + InControl Technology Inc + Gary Paquette + gpaquette&incontroltechnology.com +18687 + Information Flow + Ulrich Boeck + U.Boeck&InformationFlow.de +18688 + IU.TV Ltd + Andrew Harrison + andy&ideasunlimited.tv +18689 + Neonetix, LLC. + Jeremy Kister + iana-snmp-request&jeremykister.com +18690 + Healthlink Limited + Dr Edwin Ng + edwin.ng&healthlink.net +18691 + Bussi + Andreas Bussjaeger + _bussi&web.de +18692 + Katana Technology, Inc. + Thomas Hazel + thazel&katana-technology.com +18693 + NDS Media Solutions + Jon Sharp + jrsharp&ndsmedia.com +18694 + isometry.net + Robin Breathe + robin&isometry.net +18695 + Soluciones Telematicas Avanzadas S.L. + Ignacio Bernal + ibernal&solumatic.com +18696 + Idiom Communications LLC + David Sharnoff + idiomMIBnumber&trust.idiom.com +18697 + Syniverse Technologies Asia Pacific Limited (formerly 'IVRS (International) Limited') + AP-ProductDevelopment, MTS + AP-ProductDevelopment&syniverse.com +18698 + Tonghua Wanghang Information & Technology Co.,Ltd. + lizhixin + lzx&thwhgf.com +18699 + DAPYXIS NETWORK LIMITED + Simon Hung + simon&dapyxis.com +18700 + ACOM CO.,LTD + Masahiko Okukawa + mokukawa&acom.co.jp +18701 + Minplan + Keun-woo Ryu + updong&minplan.net +18702 + REUTERS S.A. + Christophe Gevrey + christophe.gevrey&reuters.com +18703 + Siix d.o.o. + Tomaz Borstnar + tomaz.borstnar&siix.com +18704 + Institute of Mathematics of the Romanian Academy + Ionel Molnar + Ionel.Molnar&imar.ro +18705 + Dynamix Promotions Limited + Igor Palamarchuk + igor&vectorkiev.com +18706 + Corporacion Aceros Arequipa S.A. + Rafael Caceres + rcaceres&aasa.com.pe +18707 + Muskingum College + Lewis M. Dreblow + dreblow&muskingum.edu +18708 + Guardium Inc. + Izar Tarandach + izar_tarandach&guardium.com +18709 + INOTESKA s.r.o. + Juraj Podolan + j.podolan&inoteska.sk +18710 + Kaytec Ltd. + Kiyoshi Tsujimura + kiyoshi&kaytec.co.jp +18711 + Burger Knowledge Consultancy + Bas Burger + yaa&euronet.nl +18712 + Ajin Techline co., Ltd + Kwangsoo, Kim + kimks&ajintech.com +18713 + FH JOANNEUM Gesellschaft mbH + Peter Gritsch + peter.gritsch&fh-joanneum.at +18714 + Lanzhou University of Technology + Li Zhiyuan + lizy&lut.cn +18715 + Grand Electronic Co.,Ltd. + Tongzhen Shao + tonyshao&grandtp.com +18716 + Grand Electronic Co.,Ltd. + Tongzhen Shao + tonyshao&grandtp.com +18717 + UCB SA/NV + Philippe Snoeck + philippe.snoeck&ucb-group.com +18718 + Association of Hellenic Internet Users + Gerasimos Melissaratos + gmelis&eexi.gr +18719 + O2 Ltd + David Harte + security&o2.com +18720 + FarSite Communications Limited + Dermot Smith + snmp&farsite.co.uk +18721 + EMICT Ltd. + Victor Ustinov + ustas&emict.com.ua +18722 + Universidade Federal do Parana + Elias P. Duarte Jr. + elias&inf.ufpr.br +18723 + Digicast Networks Inc + Jon Braunsma + jon&digicastnet.com +18724 + EDJ Enterprises, Inc. + Jason Gouldie + jgouldie&edj.com +18725 + Paycom Billing Services, Inc. + Clarke Retzer + clarke&paycom.net +18726 + Isala Klinieken + Jeroen Bos + j.bos&isala.nl +18727 + Apollo Group, Inc. + OID Master + oidmaster&apollogrp.edu +18728 + Unassigned + Removed 2011-12-01 + ---none--- +18729 + RB Holdings + Richard Belanger + Rich3800&aol.com +18730 + C&A srl + Raffaello Galli + r.galli&com-and.com +18731 + INGENIERIA DE SISTEMAS MULTIAGENTE, S.L. + ALBERTO J. CORDERO + INFO&ISM-CENTRAL.NET +18732 + Horner Brothers Print Group + Hamish Gibson + hamish&hbp.co.uk +18733 + Magnifire Networks + Mark Shahaf + marks&magnifire.com +18734 + Wildher ICT Solutions + Jaco Koppelaar + info&wildher.nl +18735 + Hayes Lemmerz International + Jason Baldini + jbaldini&hayes-lemmerz.com +18736 + Ertius Consulting + Rob Weir + rweir&ertius.org +18737 + Northrop Grumman + Ric Tibbetts + ric.tibbetts&ngc.com +18738 + DjE + Dominic J. eidson + noc&the-infinite.org +18739 + CraftAnalogy, Inc. + Judd Maltin + oid_manager&craftanalogy.com +18740 + J. S. Thrower and Associates Ltd. + Ron Zayac + iana&jsthrower.com +18741 + Leurck Software + Robert Leurck + rleurck&rjleurck.org +18742 + SACEM + DUCHEMIN JEROME + jerome.duchemin&sacem.fr +18743 + Entrada Internet Systems, Inc. + Weldon Harris + weldonharris&bigplanet.com +18744 + Idea Tec Sahar (ITS) Ltd + Hussein Pour Sultani + hpsultani&its.ir +18745 + ERANET srl + Andrea Girotto + andrea&era-net.it +18746 + Matsushita Electric Europe + Paul Bolton + paul.bolton&eu.panasonic.com +18747 + American Fibertek, Inc. + Jim McLaughlin + jmclaughlin&americanfibertek.com +18748 + Engelschall + Ralf S. Engelschall + rse&engelschall.com +18749 + The OpenPKG Project + Ralf S. Engelschall + openpkg&openpkg.org +18750 + solar + Bruno Póvoa + bpovoa&ig.com.br +18751 + IPDeliver Inc. + Mark Summer + mark&ipdeliver.com +18752 + Financial Engines, Inc. + David Smith + dsmith&FinancialEngines.com +18753 + Adelphia Communications + Robert Schultz + robert.schultz&adelphia.com +18754 + University of Wisconsin - Parkside + Steven Premeau + premeau&uwp.edu +18755 + MavriQ Technologies, LLC + Haim Jacobson + hj.mavriq&verizon.net +18756 + Michal Charvat + Michal Charvat + michal&lounsko.cz +18757 + United Systems Access Inc. + John Kane + iana-mgr&usacsp.com +18758 + Stordyne Corporation + Andre Mellul + andrem&stordyne.com +18759 + Netlink Technology Ltd. + KM LI + system&mkpc.net +18760 + Guangdong Electronic Certification Authority + Wangzhixiong + 2244103&qq.com +18761 + Regal Cyber Limited + Tan Tse + tan®alcyber.com +18762 + Sumtech Inc + Ed MacDonald + edmacdonald&hotmail.com +18763 + TheNetWerk + Patrick Hannah + phannah&thenetwerk.net +18764 + ---none--- + ---none--- + ---none--- +18765 + Comtel Electronics GmbH + Dmitry Tsitsilin + d.tsitsilin&comtel-online.de +18766 + Procitec GmbH + Thomas Wolf + procitec&gmx.de +18767 + Kaba Management + Consulting AG + Hans-Peter Sauter + hsauter&kgh.kaba.com +18768 + Virtual Royal Danish Air Force + Thomas Schütze + vrdaf&vrdaf.org +18769 + University Computer Center (URC) Banja Luka + Dragan Simic + dsimic&urc.bl.ac.yu +18770 + SUNCOM Systems + Stefan Undorf + sundorf&suncom.de +18771 + The Salvage Association + Kevin Cave + systems&wreckage.org +18772 + Microbus plc + Edward West + ewestµbus.com +18773 + Internet Creation Co.,Ltd. + Sombat Chanprajakwanich + sombat&thaiclassified.com +18774 + Banca Monte dei Paschi di Siena S.p.A. + Ierardi Alessandro + alessandro.ierardi&banca.mps.it +18775 + Neuronenwerk + Network-Services + wtf&neuronenwerk.de +18776 + eAcceleration Corp + Korey Chapman + korey&eAcceleration.com +18777 + Thomas Luzat IT-Services + Thomas Luzat + thomas&luzat.com +18778 + ARCANE NETWORKS + Benoit Lecocq + lecocq&arcane-networks.com +18779 + Synergy Information Services, Inc. + Vladan Pulec + sales&synfoserv.com +18780 + ANSES + Anzaldi Sebastian + sanzaldi&anses.gov.ar +18781 + Zagamma Labs + Vladimir Linek + vinil&zagamma.cz +18782 + Rozhled.cz + Petr Bydzovsky + petr.bydzovsky&rozhled.cz +18783 + Interwoven, Inc. + Todd Scallan + tscallan&interwoven.com +18784 + Great Clips, Inc. + Vladan Pulec + vladan.pulec&greatclips.com +18785 + Collation Inc. + Johan Casier + johan.casier&collation.com +18786 + Global-Arts + Thomas Frank + uid-info&global-arts.de +18787 + Clubhaus PLC + Andy Stewart + a.stewart&clubhaus.com +18788 + MyNym + Michael Glasson + mg&netspeed.com.au +18789 + Pro QC International Ltd + johnnywu + johnnywu&proqc.com.tw +18790 + EcGuard Technology Co. Ltd. + Yan Shi + rockyangel&126.com +18791 + Chatchalerm Namwongprom + Chatchalerm Namwongprom + chat&cscoms.net +18792 + HORIBA Europe Automation Division GmbH + Robert Kloosterman + rok&head.de +18793 + Norske Skogindustrier ASA + Oystein Saursaunet + oystein.saursaunet&norskeskog.com +18794 + Uni Regensburg + Ulrich Werling + ulrich.werling&rz.uni-regensburg.de +18795 + ASPLinux + Pavel Gashev + pax&asplinux.ru +18796 + Horizon.Net S.A + Kuba Urbaniak + kuba.urbaniak&horizon.net.pl +18797 + Lunics GmbH + Lutz Badenheuer + info&lunics.de +18798 + digital design GmbH + Gert Ziegler + gert.ziegler&dides.de +18799 + Layer14 + James MacDonald + james&layer14.net +18800 + WRK Computer Systems + Bill Kaltwasser + sysadmin&wrkcs.net +18801 + Forrest Aldrich + Forrest Aldrich + forrie&forrie.com +18802 + AUCONET GmbH + Mario Apitz + hostmaster&auconet.com +18803 + MEDCOM sp. z o.o. + Wlodzimierz Frydrych + wlodekf&medcom.com.pl +18804 + Advisec AB + Magnus Lööf + magnus.loof&advisec.com +18805 + Struktuur Meedia + Anti Veeranna + anti.veeranna&automatweb.com +18806 + Calm Computer Corp. + Kuniharu Yamane + admin-group&calm.co.jp +18807 + MAPLE NETWORKS Co.,Ltd + KyoungSik Eom + maple&maplenetworks.co.kr +18808 + Fondation pour l'institut de hautes études internationales et du développement (formerly 'Graduate Institute of International and Development Studies') + Wilfred Gander + webmaster&graduateinstitute.ch +18809 + NetLink Consulting LLC + B. Radermacher + bwr&netlink-consulting.net +18810 + Inotera Memories Inc. + Aswan Yang + aswanyang&hotmail.com +18811 + Colgate-Palmolive Company + Jay Brown + jay_brown&colpal.com +18812 + Sageway Computer Solutions Pte Ltd + Li Hao + lihao&sageway.com.sg +18813 + DEMARINA SDN. BHD. + Adrian Lo + adrian&demarina.com +18814 + RoCNet Linux-Services + Claus Rosenberger + Claus.Rosenberger&rocnet.de +18815 + ConD GmbH + Gerd Grell + ggrell&cond.de +18816 + Andrea Fino + Andrea Fino + af&faino.org +18817 + Ethernet Powerlink Standardisation Group (EPSG) + Hans Weibel + hans.weibel&zhwin.ch +18818 + Dinsa Soluciones + Alejandro Salgado Godoy + asalgado&dinsa.es +18819 + Pardes Group SA + Marc A. Brown + marc.brown&pardes.ws +18820 + RAMCS + Ben Harper + benharper&canada.com +18821 + m-Wise UK Ltd. + Nir Nir Simionovich + nir&m-wise.com +18822 + FOTEK ,Ltd. + Liu Hai + fotek&21cn.com +18823 + COM-PAN s.c. + Wojciech Kuty³a + wojciech.kutyla&com-pan.pl +18824 + av + matuura + saaki&din.or.jp +18825 + Topio, Inc. + Betty Woychowski + betty&topio.com +18826 + PSA Corporation Limited + Gan Poh Seng (ITIOD) + psgan&psa.com.sg +18827 + LOBOK - projects + R.Knotek + rkn¢rum.sk +18828 + danet GmbH + Dirk Schwarz + Dirk.Schwarz&danet.de +18829 + COM.BOX Internet Service GmbH + Tobias Gablunsky + support&cbxnet.de +18830 + Jingo Digital + Peter Clarke + peter&jingo.com +18831 + InFormaL + System Administrator + admin&informalsoftware.net +18832 + Universität Potsdam + Rolf Adams + adams&rz.uni-potsdam.de +18833 + MICROTROL SRL + Antonio Nachez + antonionachezµtrol.com.ar +18834 + Competitionhill + Rob + rmallory&san.rr.com +18835 + Government of the District of Columbia + Paul Liderman + paul.liderman&dc.gov +18836 + Flathead Valley Community College + Rick Owens + oidmaster&fvcc.edu +18837 + Safran Trusted 4D Inc. + Ryan Johnson + Ryan.Johnson&nav-timing.safrangroup.com +18838 + AEAT + Jose M Perez + permar.dit&aeat.es +18839 + Data Consulting Group, Inc. + David Polito + david&polito.com +18840 + Milwaukee Public Schools + Janese Christie + christjs&milwaukee.k12.wi.us +18841 + Acorn Packet Solutions + Michael Skerritt + mike&acornpacket.com +18842 + Linuxlösungen Michael Rößler + Michael Rößler + mroessler&linuxloesungen.de +18843 + CPR Software LLC + Robert Patrick + cpr-iana&cprsoftware.com +18844 + Triacta Power Technologies, Inc. + Dave Perry + dperry&triactapower.com +18845 + eCartsoft.com + George Sayes + ecartsoft&yahoo.com +18846 + State of Tennessee + Michael Lopez + michael.lopez&state.tn.us +18847 + Objectpark Software GbR + D. Theisen + info&objectpark.net +18848 + UIS Abler Electronics Corp. Ltd. + louis chang + louis&pecsicon.com.tw +18849 + OneDataCentral + David Chewning + sec_collection&yahoo.com +18850 + CoSystems, Inc. + R. Srinivasan + rsrini&cosystems.com +18851 + Association DSPNet + Philippe Chevalier + chevalier&dspnet.fr.eu.org +18852 + Gerdes Aktiengesellschaft + Carl-Friedrich Braun + cfbraun&gerdes-ag.de +18853 + EURILOGIC Technologies + Christophe Chambre + cchambre&eurilogic.fr +18854 + Axiliance + Henry jean-luc + jlh&axiliance.com +18855 + Beijing Fibridge Co., Ltd. + Yingyong Lou + louyingyong&fibridge.com +18856 + Monitor Electronics Ltd + Vassilis Gryparis + v.gryparis&monitor-electronics.gr +18857 + dh computersysteme + Dietmar Hummel + dietmar.hummel&dhcomputersysteme.de +18858 + RDR Technologies LLC + Richard Reich + richard&rdrtech.com +18859 + PaX AG + Dirk Wachsmuth + d.wachsmuth&pax.de +18860 + Friends of the Earth International + Robert Celina + sysadmin&foei.org +18861 + Educational Standards and Certifictations Inc. + Pedro Aguayo + paguayo&teachscape.com +18862 + WillMedia Corp. + Masataka Miura + miura&willmedia.co.jp +18863 + Sisters of Charity Health Service + David Roffe + shodgson&stvincents.com.au +18864 + Touring Club Suisse (TCS) + Alain MERLIERE + AMerliere&tcs.ch +18865 + Turunch Technologies + Bulent Kaytaz + bulent.kaytaz&turunch.com +18866 + White & Stover Innovations, LLC + John White + jwhite&altf4.com +18867 + Meetinghouse Data Communications + Dmitry Teleganov + dmitryt&mtghouse.com +18868 + InterNiche Technologies Inc + Atul Trivedi + atul&iniche.com +18869 + University of Helsinki + Minna Harjuniemi + minna.harjuniemi&helsinki.fi +18870 + OpsPoint + Grahame Ian Curtis + grahame&mindspring.com +18871 + FirstAttribute AG + Peter Schäfer + p.schaefer&firstattribute.com +18872 + The Groovy Corporation + Bill Schindler + bill-tgc&bitranch.com +18873 + Works Operating Company + John Zaitz + jzaitz&works.com +18874 + S & S Professionals, Inc. + Grant Simpson + gsimpson&sspros.com +18875 + Scorpion Software Corp. + Dana Epp + dana&scorpionsoft.com +18876 + TEVRON, LLC + Richard Byrne + rbyrne&tevron.com +18877 + Darwin Solutions LLC + Jordan Hotzel + hotzelj&hotmail.com +18878 + Asschem + Mr. David Curran + postmaster&asschem.com +18879 + LinuxHeaven + James Downer + james&inter-site.demon.co.uk +18880 + American Museum of Natural History + Ari Jort + arijort&amnh.org +18881 + Prometeia Srl + Stefano Bracalenti + stefano&prometeia.it +18882 + Adventist International Institute of Advanced Studies (AIIAS) + Roy Karuyan + rkaruyan&aiias.edu +18883 + Projektgroup LDAP University of Siegen + Juergen Wehren + iana.5.jwehren&spamgourmet.com +18884 + Shanghai Sansi Technology Co., Ltd. + Zhang Qian + zhangq&sansitech.com +18885 + geeks.pl + Tomasz Wojewodka + santini&poczta.wp.pl +18886 + PFU LIMITED + koji oeki + ooeki&pfu.fujitsu.com +18887 + University of Massachusetts Boston CPCS + Saul Baizman + saul.baizman&umb.edu +18888 + Guoxin Telecom System Ltd + Zhou Wenhua + zhouwenhua&guoxin.cn +18889 + "Arhangelsk Television Company" Ltd. + Konstantin Klimchev + koka&atvc.ru +18890 + Integrated Communication Technologies + Brent W. Sylvester + brent&ictx.com +18891 + The Closed Joint-Stock Company ?DeltaBank¦ + Igor Kalinskov + Igor_Kalinskov&deltabank.ru +18892 + GEMMA Systems, spol. s r.o. + Mr. Jaromir SEDLAK + sedlakj&gemma.cz +18893 + SMS Siemag AG + Dr. Ferdinand Klaus + fkla&sms-siemag.de +18894 + Technische Fachhochschule Wildau + Jens-Volker Steinert + steinert&hrz.tfh-wildau.de +18895 + Moravska zemska knihovna + Petr Zabicka + zabak&mzk.cz +18896 + Esilog Consulting, S.L. + Jordi Fernandez + jordi.fernandez&esilog.com +18897 + Cellopoint International Corporation + Eric Jan + ericjan&ms2.hinet.net +18898 + Freie Universitaet Berlin (FU-Berlin) + Steffen Hofmann + fudis&fu-berlin.de +18899 + Isvara + Dan Ellis + dan&a-h.net +18900 + evosoft GmbH + Jürgen Schinker + juergen.schinker&evosoft.com +18901 + Capital Lease GmbH + Kurt Weidlich + kurt.weidlich&capital-lease.com +18902 + Aetat + Trygve Moe + tmo&adir.aetat.no +18903 + MANDOZZI ELETTRONICA S.A. + Lorenzo Morellini + lorenzo.morellini&mandozzi.ch +18904 + Propylon + Derek Higgins + derek.higgins&propylon.com +18905 + Total Card, Inc. + Tom Likely + tech&totalcardinc.com +18906 + stefi + Xavier Renard + xrenard&ressource-toi.org +18907 + Pironet NDH AG + Jürgen Jatzkowski + jjatzkowski&pironet-ndh.com +18908 + Inpriva, Inc. + Don Jorgenson + djorgenson&inpriva.com +18909 + Service Management Software + Harald Gläser + harald&t-glaeser.de +18910 + Bombardier Transportation Inc. + Paolo Valsorda + paolo.valsorda&ca.transport.bombardier.com +18911 + Professional Data Management Again Inc. + Eddie Brown + eddie.brown&pdmagain.com +18912 + N-able Technologies Inc. + Adrian Gilbert + agilbert&n-able.com +18913 + Statna pokladnica + Ladislav Rusnak + ladislav.rusnak&hp.com +18914 + Empneusis Internet Services + George B. Patelis + gpatelis&mail.gr +18915 + Nautronix Ltd + Carl Gherardi + postmaster&nautronix.com.au +18916 + Shanghai TransEngines Technologies Co.,Ltd + Hard Sun + sunxian&yahoo.com +18917 + Shanghai Futures Exchange + Wenjun ZOU + zou.wenjun&shfe.com.cn +18918 + Bringe Informationstechnik GmbH + Thomas Bätzler + hostmaster&bringe.com +18919 + Alfa21 Outsourcing, S.L. + Jacobo Tarrio Barreiro + jtarrio+iana&alfa21.com +18920 + Agencia Notarial de Certificacion + Enric Hernández + ehernandez¬ariado.org +18921 + NextiraOne Deutschland GmbH + Harald Gläser + harald.glaeser&nextiraone.de +18922 + Trellis Tecnologia Ltda. + Julio Sirota + jsirota&trellis.com.br +18923 + Yamaguchi University + Media and Information Technology Center + info-cc&ml.cc.yamaguchi-u.ac.jp +18924 + Relston Consulting Limited + Peter Tyrrell + petet&relston.co.uk +18925 + Prior + Christian Prior + public&prior-I.De +18926 + JTT "Novel-IL" + Valentin A. Alekseev + V.Alekseev&novel-il.ru +18927 + tandav enterprises + Amith Kattil Prabhakaran + kpamith&yahoo.com +18928 + Areca Technology Corporation + Roland Chang + roland&areca.com.tw +18929 + Interwise, Inc. + Ori Keren + okeren&interwise.com +18930 + OXW + Thierry Sudan + contact&oxw.fr +18931 + DaveLinux + David Blomberg + dblomber&davelinux.com +18932 + Naeilnet Inc. + Jeongkyu Ryu + jkryu&naeiln.com +18933 + ActivNetworks + Serge Cuesta + serge.cuesta&activnetworks.com +18934 + Vivex GmbH + Vivex Hostmaster + office&vivex.de +18935 + Essex Electronics, Inc. + Garrett Kaufman + productdevelopment&keyless.com +18936 + National Institute of Advanced Industrial Science and Technology + Mitsuo Yokokawa + m.yokokawa&aist.go.jp +18937 + Lynk + Travis Smith + tsmith&lynksystems.com +18938 + Rubix Information Technologies, Inc. + Renny Koshy + renny.koshy&rubixinfotech.com +18939 + Retail Decisions Inc. + Matt Wixson + mwixson&red-usa.com +18940 + Kristopher Johnson Consulting + Kristopher Johnson + snmp&kristopherjohnson.net +18941 + Vermont State Colleges + John A Schrader + john.schrader&vsc.edu +18942 + Liquid Computing Corporation + Jonathan Bosloy + jonathan.bosloy&liquidcomputing.com +18943 + HealthPartners + Pete Anfinsen + Pete.M.Anfinsen&HealthPartners.com +18944 + nethype GmbH + Marc A. Lehmann + enterprise-number&nethype.de +18945 + Dynamic Infosystems Ltd. + Arif H. Raj + arif&dynamicinfosystems.com +18946 + Digital Species Ltd + Andy Raffle + andy&digitalspecies.com +18947 + CANOPEE SECURITY + Michael GIBON + michael.gibon&free.fr +18948 + SONATEL + Mohamed Gana Gueye + MohamedGana.GUEYE&orange-sonatel.com +18949 + Golden Eagle Enterprises Ltd + David Rodham + david.rodham&goldeneagle.co.uk +18950 + LAN Force Inc. + Samuli Kotimaki + samuli.kotimaki&lanforce.fi +18951 + Fast Lane Institute for Knowledge Transfer GmbH + Michael Steinmetz + admin&flane.de +18952 + Prudential Finanical + Antonio Figueroa + antonio.figueroa&prudential.com +18953 + Altkom Akademia S.A. + Aleksander Adamowski + aleksander.adamowski&altkom.pl +18954 + Brandywine Communciations + Gary Smith + garysmith&brandywinecomm.com +18955 + Digital Identity Ltd. + Stephen Ridley + oid.admin&digitalidentity.co.nz +18956 + Trusted Network Technologies, Inc. + Michael J. Slifcak + slif&trustednetworktech.com +18957 + Robert's Computer & Electrical Service + ROBERT + jkrb&optusnet.com.au +18958 + EZNETSOFT + yhkim + yhkim&eznetsoft.co.kr +18959 + sichuan normal university + Mr. He zhilong + hzl&sicnu.edu.cn +18960 + Larsen & Toubro Infotech Ltd + Karuppu Samy + ksamy&lntinfotech.com +18961 + Auditor revizijska druzba d.o.o., Ptuj + Andrej Mrsek + andrej&auditor-revizija.si +18962 + LRP + Horst Hendler + horst.hendler&lrp.de +18963 + EDN Sovintel + Evgueni Tiourine + etiourine&sovintel.net +18964 + ELEKTRONIK HENGARTNER AG + Friedrich von Salis + fsa&hengartner.ch +18965 + Netyantra Inc. + Kamal Bhambhani + kamal&netyantra.com +18966 + Unassigned + Returned 2003-12-11 + ---none--- +18967 + Oxford County Telephone and Telegraph Company + Jonathan E. Dunn + jon&oxfordnetworks.com +18968 + 012 goldenlines Ltd + Yaniv Cohen + yanivc&012.net +18969 + University of Maryland Baltimore + Kent Buckingham + kbucking&umaryland.edu +18970 + Expert SA + Moissonnier Aurélien + amoissonnier&expertsa.fr +18971 + G&S Sistemas de Información, S.L. + Victoriano Giralt + vic+iana&gssi.es +18972 + Panama Canal Authority + Gaspar Modelo Howard + gmhoward&pancanal.com +18973 + TuXic.nl + Jaap Vermaas + iana&tuxic.nl +18974 + Universal Business Matrix, LLC + Stephen Hord + steve.hord&ubmatrix.com +18975 + BRG16 + Stefan Haslinger + shaslinger&ada.rg16.asn-wien.ac.at +18976 + Badger Alarm and Control, LLC + Mary E Overby + mary&badgerac.com +18977 + NIC-IQ Ltd + Al-Dileme Adham + aldileme&spray.se +18978 + CXO Systems Inc. + Chetan Gadgil + cgadgil&cxosystems.com +18979 + EVERYWHERECOMMUNICATIONS.NET + CHARLES M. SHEPHERD + CSHEPHERD&triad.rr.com +18980 + Cypak AB + Jakob Ehrensvärd + jakob&cypak.com +18981 + Reserved + Removed 2013-02-28 + ---none--- +18982 + Link-Yug Ltd. + Victor Sheldeshov + support&linky.ru +18983 + AOK PLUS – Die Gesundheitskasse für Sachsen und Thüringen + Rico Rieger + rico.rieger&kubus-it.de +18984 + Magos Consulting, Ltd. + Giorgos Magos + magos&magos-consulting.com +18985 + Insinova AG + Jens Albrecht + jens.albrecht&insinova.ch +18986 + FleetBoston Financial Corporation + Ward Goodwin + ward_r_goodwin&fleet.com +18987 + Ralf Meister + Ralf Meister + ralf.meister&claranet.de +18988 + Shanghai Eastimage Equipments Co.,LTD + CAO,Guogang + caoguogang&vip.sina.com +18989 + Day Dreams And Information Technologies + Michael C. Day + res0f6ja&verizon.net +18990 + CNC a.s. + Lubomir Gelo + lgelo&cnc.sk +18991 + IOP Publishing Ltd + Peter Haworth + pmh&edison.ioppublishing.com +18992 + WEBForce GmbH + Marco Reichmuth + m.reichmuth&webforce.ch +18993 + ObjectFusion, L.L.C. + Joe Holt + joeholt&mchsi.com +18994 + Olix + David Liouville + dliouville&vraiment-pas.net +18995 + CommSpeed, LLC + Adam Towarnyckyj + adamt&commspeed.net +18996 + Globix Corporation + Clint Adams + cadams&globix.com +18997 + Ibrix Corp. + Dinesh Venkatesh + dinesh&ibrix.com +18998 + Redstone Consulting, LLC + Guy Yost + gyost&redstone-consulting.com +18999 + Health Management Corporation + Kenan Peters + kpeters&choosehmc.com +19000 + Dongyang Telecom Ltd. + Kyu-Ho, CHOI + khchoi&dyt.co.kr +19001 + Research Center of Computational Mechanics, Inc. + Masashi Kodama + system&rccm.co.jp +19002 + Sun Yat-sen (Zhongshan) University + Mr. Shang Ercong + shang&sysu.edu.cn +19003 + SENAS.NET + Usha + usha&visolve.com +19004 + CreationPoint Systems, Inc. + Mazda Marvasti + mazda&creationpoint.com +19005 + AdytumSolutions, Inc. + Duncan McGreggor + info&adytumsolutions.com +19006 + Polish Professional Publishers Ltd. + Ryszard Krakowiak + rkrakowiak&pwp.pl +19007 + 3C Systems Oy + Antti Suanto + antti.suanto&ccc.fi +19008 + GateHouse + Per Engberg + pch&gatehouse.dk +19009 + Clever IT di Ivan Raimondi + Ivan Raimondi + ivan.raimondi&cleverit.ch +19010 + Bluetop Technology Co., Ltd. + songqianli + sql&bluetop.com.cn +19011 + Jacarta Ltd. + Colin Mocock + colin&jacarta.co.uk +19012 + Net Evidence (SLM) Ltd + Richard Thomas + rmt&net-evidence.com +19013 + Sonario + Yoram Mizrachi + yoram&sonario.com +19014 + Magix s.r.o. + David Bucek + davidbucek&magix.cz +19015 + oulman.org + James Oulman + jamie&oulman.org +19016 + Scholl Consulting + Rob Scholl + rob&scholls.net +19017 + XTEND Consulting, LLC + Stephen Thompson + stephen.thompson&xtendconsult.com +19018 + Dandre + Mudi Dandan + info&dandre.hu +19019 + Coastal Carolina University + Mark Allen + mallen&coastal.edu +19020 + ryanscool + Ryan Gyure + ryan&ryanscool.com +19021 + Ensemble Designs, Inc. + David Wood + engineering&endes.com +19022 + Invocom Ltd. + John Clarke + john.clarke&invocom.com +19023 + LMU + sanjeev + sanju_slp&yahoo.co.in +19024 + Bixby Telephone Company + Dan White + sysadmin&olp.net +19025 + OSLiNK Spolka z o.o. + Maciej Swierk + maciek&oslink.pl +19026 + Dirk Gorny Unternehmensberatung + Dirk Gorny + Dirk.Gorny&GMX.de +19027 + TriAWorks, Inc. + Kurt Goolsbee + kurt.goolsbee&earthlink.net +19028 + Sputnik, Inc. + Nick Avgerinos + nicka&sputnik.com +19029 + Nittotsushinki Co.,Ltd. + hiwatashi mitsuhiro + hiwatasi&nittotsushinki.co.jp +19030 + Suva + Thomas Vaderna, IFS + vat&suva.ch +19031 + LogIn S&C GmbH + Dr. Werner Geigle + werner&login-gmbh.de +19032 + Apollis AG + Andre Siegert + andre.siegert&apollis.com +19033 + Moniforce B.V. + Henk de Koning + henk.de.koning&moniforce.com +19034 + 3SP, Investigação e Desenvolvimento de Tecnologias, Lda. + Daniel Pereira + info&3sp.pt +19035 + Omnirei s.r.l. + Romano Rapallini + rrapallini&omnirei.net +19036 + Etherboot Project + Marty Connor + iana-penðerboot.org +19037 + Bolsa Nacional de Valores + Rigoberto Torres + soporte&bnv.co.cr +19038 + WebNet, Ltd + Vladimir E. Protaschuk + vladprot&mail.ru +19039 + National Research Center for High Performace Computers + Ji Haitao + jiht&ict.ac.cn +19040 + Trusted Computer Solutions, Inc. + James E. Maple + jmaple&tcs-sec.com +19041 + Apparent Networks Inc. + Fred Klassen + fklassen&apparentnetworks.com +19042 + ACBR Computadores Ltda. + Ricardo Quintela + rquintela&acbr.com.br +19043 + XiTrust Secure Technologies GmbH + Georg Lindsberger + georg.lindsberger&xitrust.com +19044 + JSC "Kredyt Bank (Ukrajina)" + Volodymyr Kuzhel and Ivan Lesnik + kuzhel&wucb.lviv.net +19045 + MyNetwork System Co.,Ltd + HairongWan + wanhairong&hotmail.com +19046 + Lenovo Enterprise Business Group + Joe Bolan + jbolan&lenovo.com +19047 + Magerealm Enterprises + Chris Giard + cgiard.iana&magerealm.com +19048 + Fambus + Eric Bus + iana&fambus.nl +19049 + Scalix Corporation + Andrew Palay + andy.palay&scalix.com +19050 + Conchis, LLC + Greg Rhoades + gar&conchis.net +19051 + Martin Thorpe + Martin Thorpe + iana&met24.net +19052 + Seekamp Enterprises + Scott Seekamp + sseekamp&worldnet.att.net +19053 + KUBOTEK Corporation + Koichi Otsuka + netadmin&kubotek.co.jp +19054 + Intraperson + E Venkat Ramana + e_v_rmn&yahoo.com +19055 + Naviscan PET Systems, Inc. + Irving Weinberg + inweinberg&aol.com +19056 + Access Computech Pvt Ltd. + Ketan Upadhyay + ketan&accesscomputech.com +19057 + Exavio, Inc. + Humphrey Liu + hcliu&exavio.com +19058 + BISON Systems AG + Richard Muri + richard.muri&bison-systems.ch +19059 + moreCom A/S + Mickael Fontaine + mfo&morecom.no +19060 + UXComm + George Vanecek + gvanecek&uxcomm.com +19061 + Werner Wiethege + Werner Wiethege + iana&3112.org +19062 + Tuxee Network + Frederic Jolliton + iana&tuxee.net +19063 + Alterlane + Xavier Carcelle + xavier.carcelle&alterlane.fr +19064 + Bureau Ingénierie Richard Domon SA + Stéphane PETIT + stephane.petit&bird-fr.com +19065 + PETIT-FR + Stéphane PETIT + stephane&petit-fr.net +19066 + Macnetix OHG + Dirk Wahrheit + wahrheit&macnetix.de +19067 + Universidade Católica Portuguesa + Jorge Cerol + jcerol&ci.ucp.pt +19068 + Travel Only + Douglas Hammond + djhammond&travelonly.net +19069 + xenocastle + Joerg Neikes + xenoist&web.de +19070 + Kevcom Microsolutions + Kevin Tsang + kevin&kevcom.ca +19071 + I-Assure + Barrett McGuire + barrett.mcguire&i-assure.com +19072 + Magnasync + Mariusz + cti&magnasync.com +19073 + Sundowner Trailers Inc + Chris Childress + chrisc&sundownertrailer.com +19074 + Fidelis Security Systems, Inc + Gene Savchuk + savchuk&fidelissecurity.com +19075 + Poštna banka Slovenije, d. d. + Miran Bizjak + miran.bizjak&pbs.si +19076 + xenocastle + Joerg Neikes + xenoist&web.de +19077 + Xing-Lab + Yue Xing + yxing&ualberta.ca +19078 + Fujitsu Component Limited + Naoyuki Nagao + nagao.naoyuki&fcl.fujitsu.com +19079 + Silicon Data International Co., Ltd. + Nick Chu + nickchu&silicon-data.com.tw +19080 + Tsukasa Enterprise + Etsuji Nakai + qyp02361&nifty.ne.jp +19081 + Wuhan Jetway Information Security Industry Co.,Ltd + Zhengping Liang + lzp_wd&sohu.com +19082 + Voelcker Informatik AG + Matthias Bauer + matthiasb&voelcker.com +19083 + Albanet Ltd + Mr. T. van Stratum + t.vanstratum&albanet.co.uk +19084 + Mimic Productions + Richard + richard&mimic.ca +19085 + The Davidge Group + William M. Davidge + bill&davidge.net +19086 + Maytech Publishing Ltd + Mike Futerko + mike&maytech.net +19087 + NTx BackOffice Consulting Group GmbH + Roman Gruber + oid&ntx.at +19088 + Maersk Data SPECTIVE + Niels Putzer - System Management + npu&maerskdata.dk +19089 + Infor.org Inc. Taiwan + Wu-Shun, Wu + dannydai&mail.com +19090 + Bernhard-Riemann-Gymnasium Scharnebeck + BRGS Admin Team + admins&brgs.de +19091 + NuGenesis Technologies, Inc + Jeffrey Paquette + oidadmin&nugenesis.com +19092 + Exa Networks Ltd + Thomas Mangin + thomas.mangin&exa-networks.co.uk +19093 + Hofsvang + Bjorn Hofsvang + bjorn&hofsvang.no +19094 + peerVue LLC + Brian Batchelder + brian&peervue.com +19095 + TiL Solutions inc. + Luc Tremblay + luc.tremblay&TiLSolutions.com +19096 + info2cell.com FZ-LLC + Mohammed Ersan + ersan&info2cell.com +19097 + DefaultCity + Patrik Bodin + the&pal.pp.se +19098 + PAL Communications + Patrik Bodin + the&pal.pp.se +19099 + Whitewater Mobile LLC + Peter L. Squillante + Peter.Squillante&WhitewaterMobile.com +19100 + OHANA WIRELESS INCORPORATED + Igor Knyazev + igorknyazev&ohanawireless.com +19101 + Vouch Integrated Technologies (P) Ltd. + Santhosh B.S. + santhosh&vouchti.com +19102 + Karlsruher Lebensversicherung AG + Hans-Martin Lechner + hm.lechner&karlsruher.de +19103 + CHMS, Inc. + Sid Furst + sidf&chmsinc.com +19104 + BOUYGUES, SA + HOBERDON, Olivier + ohoberdon&bouygues.com +19105 + Fabian Fagerholm Consulting + Fabian Fagerholm + fabbe&paniq.net +19106 + Deep Eddy Internet Consulting + Chris Garrigues + cwg-iana&deepeddy.com +19107 + boojum mobile, inc + fred mcclain + mcclain&boojummobile.com +19108 + I&TC Solutions Pty. Ltd. + Ian Corner + ian.corner&itcg.com.au +19109 + PacketMotion, Inc. + Alex Chen + achen&packetmotion.com +19110 + Conduit Networks, Inc + Guy Reams + guy&conduitnetworks.com +19111 + eBdesk Ltd + Subroto + subroto&ebdesk.com +19112 + RJL Computer Consulting, LLC + Roger Leardi + roger&rjlcc.com +19113 + Server Place LTDA + Fernando Augusto M Silva + fams&linuxplace.com.br +19114 + Proximion Fiber Systems AB + Johan Widman + johan.widman&proximion.com +19115 + Bernhard-Riemann-Gymnasium Scharnebeck + BRGS Admin Team + admins&brgs.de +19116 + Antidot + Fabrice LACROIX + lacroix&antidot.net +19117 + MAMM d.o.o. + Tomislav Sereg + info&mamm.hr +19118 + Europlex Technologies Ltd. + Michael Priess + michaelp&europlex.ie +19119 + Blue Chip Technology Ltd + Robert Mortimer + rmortimer&bluechiptechnology.co.uk +19120 + 800onemail Inc. + OID Admin + oid.admin&800onemail.com +19121 + Augmentix Corporation + David McKinley + david.mckinley&augmentix.com +19122 + Yang Arts + Holly Yang Aaron + holly&yangarts.com +19123 + Virtual Charting + David L. Kern, Jr. + dave&virtualcharting.com +19124 + Pexim d.o.o. + Aleksandar Milosevic + aca&pexim.co.yu +19125 + Planar Systems, Inc. + Desmond Moleski + dez.moleski&planar.com +19126 + AECODI + Julian Inza + presidente&aecodi.org +19127 + SpamPet + Ron Allred + ron-iana&neversleep.net +19128 + KomKom Electronics + Lakutin Alexey + kernel&comcom.ru +19129 + Sanek Systems + Gordon J. Sanek + GordonSanek&Earthlink.net +19130 + MenuSiS Technologies (Pty) Ltd. + A. J . Greyling + greylina&iafrica.com +19131 + Stiftung Synanon + Joachím Gerwig + edv&synanon.de +19132 + The Cheshire Web Mill + Robert Mortimer + rmortimer&linux.fsnet.co.uk +19133 + Baycom Opoto-Electronics Technology Co., Ltd. + Porter + porter&baycom.com.tw +19134 + Texocom Inc + Mahesh Sundara + mahesh&texocom.com +19135 + Darkerhosting.net + spike grobstein + spike666&mac.com +19136 + Trichord, Inc. + Mark Jeweler + jewelerm&trichord-inc.com +19137 + Sebastian Staiger Computer Services + Sebastian Staiger + OID&staiger-service.de +19138 + Bright Prospects LLC + Richard Basch + oid&ra.bright-prospects.com +19139 + Edgewater Networks, Inc. + Larry Cromwell + lcromwell&edgewaternetworks.com +19140 + Matthew R. Wilson + Matthew R. Wilson + iana&mattwilson.org +19141 + Quest Serviced Apartments + Colin Stenhouse + questit&questapartments.com.au +19142 + Carlo Strozzi (formerly 'ScriptaWorks s.r.l.') + Carlo Strozzi + carlo&strozzi.it +19143 + SecureAge Technology + Teow-Hin Ngair + teowhin&secureage.com +19144 + Arjuna + St. Widjanarko + imut_s&lycos.com +19145 + Xonix + Eugene Xonix + ugen&yahoo.com +19146 + Jubatus Corporation + Vaughn Vernon + vaughn&jubatus.com +19147 + Università degli Studi di Milano + Divisione Telecomunicazioni + divtlc&unimi.it +19148 + acticall + Emmanuel Leclercq-bayle + e.leclercqbayle&acticall.com +19149 + Innovaciones Microelectrónicas S.L. + Rafael Romay-Juárez + rafael.romay&anafocus.com +19150 + Ville d'Aulnay-sous-bois + Martins Dominique + dmartins&aulnay-sous-bois.com +19151 + WRX Slovakia s.r.o. + Tomas Zelem + zelem&wrx.sk +19152 + Camargo e Souza SC/LTDA + Paulo Sergio Lemes Queiroz + oluap&plugon.com.br +19153 + Instituto Politécnico Do Porto + Paulo Calçada + pcalcada&ipp.pt +19154 + Corporacion Nacional de Angioplastia + Antonio Huaman + cnaantonio&terra.com.pe +19155 + AirManage Networks Ltd. + Gangadharan R. Morekonda + mganges&airmanage.com +19156 + Cutting Edge + Michael Ehman + mpe&cuttedge.com +19157 + Chongqing Changsong Network Information Co.LTD + John Wang + postmaster&csnetwork.com.cn +19158 + PlumStreet, LLC + Michael Isiminger + michael.isiminger&plumstreet.net +19159 + Shelton School District + Josh Hulbert + JHulbert&sheltonschools.org +19160 + ICAT Managers, LLC + Joan Gargano + jgargano&icat.com +19161 + POSnet Services, LLC + Christopher Paluch + chris.paluch&posnetservices.com +19162 + Evolving Media Network, LLC + Daniel Stone + dan&evolvingmedia.net +19163 + insen + boudegga mehdi + boudeggamehdi&yahoo.fr +19164 + Initial City Link Limited + A K CORMACK + acormack&city-link.co.uk +19165 + Kanton Solothurn + Bader Kurt + kurt.bader&aio.so.ch +19166 + Alswille Gloabal Services + Segun Fagbemi + olusegun&alswille.com +19167 + CapMon A/S + Peter Valdemar Mørch + pm&capmon.dk +19168 + Fruno S.A. + Andres Valenciano + andres.valenciano&fruno.com +19169 + Volantis Systems Ltd + David Roberts + se&volantis.com +19170 + X-Taskforce s.r.l. + Giacomo Cariello + jwk&x-taskforce.it +19171 + Checkcare Enterprises, LLC + James Daily + james_daily&checkcare.com +19172 + PFM.Net, Inc. + Daniel Fisher + oz&pfm.net +19173 + Rex Consulting, Inc. + Chris Paul + chris.paul&sentinare.net +19174 + Gestalt, LLC + Bob Pollack + rpollack&gestalt-llc.com +19175 + oraise GmbH + Herr Gerold Winselmann + g.winselmann&oraise.de +19176 + SCOMCENTER + Wanli Mu + wlmu&scomcenter.com +19177 + Tandem Systems, Ltd. + Oleg Malkov + info&winagents.com +19178 + Skill Corporation + Christopher Cellucci + enterprisenumber&skillcorp.com +19179 + Raysis Co.,Ltd + Su Gil Lee + lsg&raysis.com +19180 + Guidewire Software, Inc. + Alan Keefer + akeefer&guidewire.com +19181 + Optimum Holding Inc. + Willem Konynenberg + iana-contact&optimumqos.com +19182 + JDA Software + Bill Owen + sctnetops&jda.com +19183 + Herdt Domain Service + Andreas Herdt + iana&andreas-herdt.de +19184 + Widerthan.com + Wonchang Shin + shinwc&widerthan.com +19185 + LinuxCare Ltd. + Xeon Ling + vvww8899&yahoo.com +19186 + Xtramus Technologies + Sam Koo + sam_koo&xtramus.com +19187 + MEK + Martin E. Koch + enterprisenumber&rundumblick.de +19188 + satunol mikrosistem + Ojahan + ojahan&kpptransport.itb.ac.id +19189 + Senshu University + Toru Yoshikawa + toru&acc.senshu-u.ac.jp +19190 + Bildanalyssystem AB + Thomas Fafoutis + thomas.fafoutis&bildanalys.se +19191 + woofertom media + Tom Duke + ubiquitom&earthlink.net +19192 + Iglooz Technologies + Rohit Lodha + rohit_lodha&yahoo.com +19193 + b-next GmbH + b-next Administration + admins&b-next.de +19194 + Interbyte bvba + Tom Snauwaert + tom.snauwaert&interbyte.be +19195 + TRANSRADIO SenderSysteme Berlin AG + Alexander Reisig + a.reisig&tsb-ag.de +19196 + Agarik + Laurent Seror + software&agarik.com +19197 + Eastlink GmbH + Heino Gutschmidt + heino.gutschmidt&eastlink.de +19198 + activ-net GmbH & Co. KG + M. Wortmann + kontakt&activ-net.de +19199 + SHC Netzwerktechnik GmbH + Hans Lang + hal&shc.eu +19200 + MMG + Hans Lang + p9&mmg.de +19201 + Oyster Partners Ltd + Craig Smith + craig.smith&oyster.com +19202 + meto-logic + NiCki trinidad + nrtrinidad&eastern.com.ph +19203 + DAI-Labor + Sahin Albayrak + admins&dai-labor.de +19204 + Milano Medien GmbH + Alexander Gräf + graef&milanomedien.com +19205 + Secardeo GmbH + Ulf Moeller + ulf.moeller&secardeo.com +19206 + Hollins University + Computer Services - Carol Reed + creed&hollins.edu +19207 + Graphic Management Partners Inc. + Bill Ferris + bferris&gmprt.com +19208 + analytiq consulting gmbh + Christoph Fausak + info&analytiq.de +19209 + Caterpillar Inc. + Ken Serrine + kserrine&cat.com +19210 + Cassatt Corporation + Randy Murrish + mush&cassatt.com +19211 + TEZ Georgsberg GmbH + Stefan Lemsitzer + stefan&lemsitzer.com +19212 + University System of Maryland + Suresh Balakrishnan + suresh&usmd.edu +19213 + Raining Data Corporation + John Bramley + john.bramley&rainingdata.com +19214 + Ogilvy & Mather Deutschland GmbH (formerly 'WPP Service GmbH & Co. KG') + Marco Schirrmeister + Marco.Schirrmeister&ogilvy.com +19215 + European Organisation for Research and Treatment of Cancer AISBL / IVZW + Nedostoupof Gilles + gne&eortc.be +19216 + Heringa + E. Heringa + erwinheringa&gmail.com +19217 + iiNet Pty Ltd + Richard Staniforth + ricks&staff.iinet.net.au +19218 + Fortress Networks (Aust) Pty Ltd + Chris Leishman + cleishman&fortress.net.au +19219 + Pi Kappa Alpha - Gamma Tau + Mike Palombo + palomm&pikes-rpi.com +19220 + Local-Web AG + Gunter Burchardt + hw&local-web.net +19221 + Metropolis AG + Mike Bretz + info&metropolis-ag.de +19222 + Eurotek srl + Claudio Tonin + ctonin&eurotektel.com +19223 + Internap Network Services Corporation + John Shriver + jshriver+ietf&mail.internap.com +19224 + OSSBroadNet K.K + Shigeki Hosaka + shosaka&ossbn.co.jp +19225 + Tumel + Bulent Sagel + iftah&web-silicon.com +19226 + Titanium + Thomas KIPP + thomas.kipp&libertysurf.fr +19227 + Blackwood Medical Inc + Lee Yogel + lyogel&blackwoodmedical.com +19228 + Finish Line, Inc. + George Tetreault + ntadmin&finishline.com +19229 + CFOknows, LLC + Leonard Linde + leonard&cfoknows.com +19230 + Graphix Prose, LLC + Perry Dillard + perry&graphixprose.com +19231 + Lumenware, LLC + C. Andy Martin + oidadmin&lumenware.com +19232 + LSC Linux Support Center Kft. + Deim Ágoston + deim.agoston&lsc.hu +19233 + Blue Lane Technologies Inc. + Venkat Achyuta + venkat&bluelane.com +19234 + M2 + Ilia Moisseev + moisseev&web.de +19235 + Silver Diamond Services, LLP + Nickolaos Diamantis + Nick&sdspos.com +19236 + CPM SA + Cleverson Silva + admin.internet&cpm.com.br +19237 + becom Informationssysteme GmbH + Simon Fischer + simon.fischer&becom.com +19238 + Moore Systems, Inc. + David Moore + dmoore&MSWEBS.com +19239 + cfSOFTWARE, Inc. + Robert C. Wessel + rwessel&cfsoft.com +19240 + Nanshu Densetsu + Christopher Taylor + cstaylor&nanshu.com +19241 + Cemaphore Systems Inc. + Jeff Looman + jlooman&cemaphore.com +19242 + Tellusoft + Spencer Roberts + sroberts&tellusoft.com +19243 + Titus + Spencer Roberts + sroberts&titusoft.com +19244 + SafeNet Media + Spencer Roberts + sroberts&titusoft.com +19245 + CBPM Software + Jason Wang + john_wang71&hotmail.com +19246 + XWDL + Richard Lin + richard&xwdl.org +19247 + Romat Telecom Ltd. + Gilad Rom + gilad&romat.com +19248 + MANIA Research Group + Eng. Bruno Filipe Marques + bmarq&elect.estv.ipv.pt +19249 + FabianSoft di Fabiano Copeta + Fabiano Copeta + fabiansoft&libero.it +19250 + Image Systems Corporation + David Sorensen + daves&imagesystemscorp.com +19251 + KIP Information Network + Shane Jarych + sjarych&kipinfonet.com +19252 + NuPi Solutions + Nick Partsafas + n_partsafas&yahoo.com +19253 + Jaspert + Joerg Jaspert + iana&ganneff.de +19254 + Dark Blue Sea + Bill Vanderent + snmp&darkbluesea.com +19255 + Vaccius ITsec Pte LTd + Ng Pek Yong + f189a4613d29467300c3ef9c221199de&vaccius.com +19256 + iCADA GmbH + Christoph Gaitzsch + gaitzsch&icada.com +19257 + ProSyntic Ingenieurs b.v. + Caspar van Zon + c.v.zon&prosyntic.nl +19258 + La Joliverie + David Coheleach + admreseau-jol&la-joliverie.com +19259 + Ansync Inc. + Sam Miller + snmp&ansync.com +19260 + MagiQ Technologies, Inc. + Audrius Berzanskis + audrius&magiqtech.com +19261 + Minneapolis College of Art and Design + Computer Support + computer_support&mcad.edu +19262 + Dedicado + Eduardo Cota + cota&dedicado.net.uy +19263 + ET VOILA !! + Xiao Hui LOO + postmaster&etvoila.fr +19264 + Magnus Weis + Magnus Weis + magnusweis&web.de +19265 + Momentum Computer, Inc. + Harry L. White + harry&momenco.com +19266 + Landesamt für Digitalisierung, Breitband und Vermessung (formerly 'Bayerisches Landesamt für Statistik und Datenverarbeitung') + Christian Brensing + christian.brensing&ldbv.bayern.de +19267 + Ajuntament de Palma + Jose de Juan Sola + jdejuan&imi.a-palma.es +19268 + Intercomp Ltd. + Viktor Feurer + feurer.viktor&intercomp.hu +19269 + Hybrigenics SA + Mr Augustin Luton + admin&hybrigenics.com +19270 + Stora Enso Oyj + Eeva Kivelä + eeva.kivela&storaenso.com +19271 + Ministère de l'économie, des finances et de l'industrie (Minefi) + Jean Louis Ferracci + icp&dsi.finances.gouv.fr +19272 + praksys + guillaume pernot + gpernot&praksys.org +19273 + TMBNET + Laurent Fouillé + laurent&ligfy.org +19274 + Services Industriels de Geneve + Gregoire Huet + gregoire.huet&ip-man.net +19275 + Constant Data, Inc. + Justin Banks + justinb&constantdata.com +19276 + Paedagogische Hochschule Ludwigsburg + Mr. Thomas Waller + waller&ph-ludwigsburg.de +19277 + Shenandoah Solutions, Inc. + Kirk Harrison + kharrison&shensol.com +19278 + INOV - INESC Inovação + Fernando Lança + redes&inov.pt +19279 + TMBNET + Laurent Fouillé + laurent&ligfy.org +19280 + China Daily + Feng Zhiqian + fzq&chinadaily.com.cn +19281 + Intrasync, LLC + Matt Miller + matt.miller&intrasync.net +19282 + Institute of Continuous Media Mechanics + Andrey Sozykin + sozykin&icmm.ru +19283 + Sibelius Academy + Jari Toropainen + jtoropai&siba.fi +19284 + TGS Telonic GmbH + Achim Althausen + a.althausen&telonic.de +19285 + BRAZILMALL NETWORK LTDA + Angelo de Andrade Abdalla + angelo_abdalla&hotmail.com +19286 + Universidad de Los Andes + Leonardo Uzcategui + leonu&ula.ve +19287 + META Industriesoftware GmbH + Boris Plivelic + bplivelic&metaind.de +19288 + ECOPETROL S.A. + Andrés Guillermo Pinzón Rueda + andres.pinzon&ecopetrol.com.co +19289 + 3Sharp + John Peltonen + johnp&3sharp.com +19290 + Just Aaron + Aaron + aaron&justaaron.com +19291 + Macrad + Lance Pysher + lpysher&mac.com +19292 + Pirel inc. + Serge Blais + sblais&pirel.com +19293 + Danriver Technologies Corporation (formerly 'Shanghai jisung information technology co.,Ltd') + Yongfeng Zhang + benson.zhang&hotmail.com +19294 + LeapComm Communication Technologies Inc. + Lily Yin + admin&leap-comm.com +19295 + Kolle, IT-Ingeniørfirma ApS + Gunnar Boye Pedersen + iana&kolle.com +19296 + Filterlogix, LLC + Brent + bstephens&filterlogix.com +19297 + DSpace Pty Ltd + Doug Mein + doug.mein&dspace.com.au +19298 + Media Cruise Solutions k.k. + Kazuhiro Takano + ktakano&mcskk.com +19299 + Turkcell Iletisim Hizmetleri A.S. + Muharrem SAR + muharrem.sar&turkcell.com.tr +19300 + Deutscher Sparkassen Verlag GmbH + Ulrich Launer + ulrich.launer&dsv-gruppe.de +19301 + Pulse Software & Consulting Inc. + Theva Markandaier + theva&pulsevoice.com +19302 + CryptCOM Securities, Inc. + Joe Stein + joestein&cryptcomsecurities.com +19303 + Zhejiang Telecom Equipment Factory + Shen Shuqi + tec&zjtec.com +19304 + BNP Paribas Arbitrage + Jean Michel GARCIA + jeanmichel.garcia&bnpparibas.com +19305 + Indio Technologies + Srini Rao + srini_r2&hotmail.com +19306 + FatPipe Networks + Sanchaita Datta + sanch&fatpipeinc.com +19307 + MDS INC. + David Law + dlaw&mds.nordion.com +19308 + Genesee Freenet + Kenny Thomas + kthomas&gfn.org +19309 + chandan + chandan mishra + chandan&hotmail.com +19310 + D.H.S. - Data, Hardware, Software spol s r.o. + Martin Tethal + mtethal&dhs.cz +19311 + Sterling Crane + Jayson David Henkel + jhenkel&sterlingcrane.ca +19312 + Archivas, Inc. + SNMP Registration + snmp&archivas.com +19313 + beu.ch + Heinz Beutler + heinz&beu.ch +19314 + Hein Roehrig IT Consulting GbR + Hein Roehrig + iana&hein.roehrig.name +19315 + Andrew Johnson + Andrew Johnson + andrew&caitlin.gsix.net +19316 + Mitsubishi Motors Corporation + Takayuki Oda + takayuki.oda&mitsubishi-motors.com +19317 + 1310369 Ontario Ltd. + Mario Stargard + mario.stargard&magma.ca +19318 + SPAN International + Igor Peonte + ipeonte&spanltd.com +19319 + CLSA Ltd + Christopher Wong + christopher.wong&clsa.com +19320 + Tomsktelecom, a branch of Sibirtelecom OJSC + Vadim Kozlov + vad&tomsknet.ru +19321 + VSnet + Olivier Crettenand + network&vsnet.ch +19322 + netiq s.r.o. + Marcel Mojzis + marcel.mojzis&netiq.sk +19323 + National Centre for Physics + Mehnaz Hafeez + hafeez&ncp.edu.pk +19324 + Rover Laboratories S.p.A. + Maurizio Zanoni + info&wawnet.biz +19325 + Eroski S. Coop. + Javi Dieguez + javi_dieguez&eroski.es +19326 + Tancsics Mihaly SzSzK + Gergely Polonkai + polesz&techinfo.hu +19327 + Deutscher Skiverband + Anneser Edgar + edgar.anneser&ski-online.de +19328 + TRANSGENE SA + Menguy + menguy&transgene.fr +19329 + Telcotec Ltd. + Rónán Fleetwood + ronan&telcotec.com +19330 + R&K Engineering, Inc. + Marc Bjorklund + marc.bjorklund&rkeng.com +19331 + TECNET GMBH + Haider Awni Karomi + karoumihaider&hotmail.com +19332 + Paperlinx NZ Ltd + Andrew Pope + andrew.pope&spicerspaper.co.nz +19333 + Electro Industries/GaugeTech + Wei Wang + wwang&electroind.com +19334 + Cerzan, Inc. + Steven Camp + Steven.Camp&Cerzan.Com +19335 + Intermax BV + Rene Zeestraten + rene&intermax.nl +19336 + Signull Technologies + Mike Spenard + mikes&signull.com +19337 + Dynamx Internet Services + Justin Hammond + justin&dynam.ac +19338 + Bharti Telesoft International Pvt. Ltd. + support group + support&bhartitelesoft.com +19339 + Fachhochschule Bochum + Andreas Koch + andreas.koch&fh-bochum.de +19340 + Provincia di Reggio Emilia + Cristian Manfredini + c.manfredini&mbox.provincia.re.it +19341 + Foreningssparbanken + Robert Kingsepp + robert.kingsepp&foreningssparbanken.se +19342 + Pinuts media+science Multimedia-Agentur GmbH + Timo Fuchs + fuchs&pinuts.de +19343 + Medictyon + Juan A. Hernández + juan.hernandez&medictyon.net +19344 + United SSL Deutschland GmbH + Erik Hildenbrand + cao&unitedssl.com +19345 + Odd Element Inc. + Joel Brooks + jbrooks&oddelement.com +19346 + SMComputer EDV Service & Consulting Schmid Manfred + Manfred Schmid + info&it-schmid.com +19347 + Pyzzo Software Corporation + Chris Volkert + snmp-admin&pyzzo.com +19348 + Synacor, Inc. + Philip Seibel + pseibel&synacor.com +19349 + Lightshore + Rafael Tonin + rtonin&lightshore.com +19350 + eyevis + sylvestre legrand + sylvestre.legrand&free.fr +19351 + Ivega Corporation Pvt. Ltd. + Srinivas Krishnamurthy + srinivask&ivega.com +19352 + IP Labs GmbH + Klaus Reimer + hostmaster&iplabs.de +19353 + ScottRickman + Scott Rickman + scott_rickman&ntlworld.com +19354 + E-Force S.r.l. + Andrea Gariboldi + andrea.gariboldi&e-force.org +19355 + Sebastian Dietzold + Sebastian Dietzold + sebastian&dietzold.de +19356 + Agroportal B.V. + Willem Bolhuis + willem.bolhuis&agroportal.nl +19357 + University Medical Centre Nijmegen + P. Kuper + p.kuper&info.umcn.nl +19358 + Wildner AG + Stefan Schulze + stefan.schulze&wildner.de +19359 + TOPPAN FORMS CO.,LTD. + KojiKato + katokoji&toppan-f.co.jp +19360 + Q&R B.V. + Rutger van Bergen + r.vanbergen&anite.nl +19361 + Knowledge Media Research Center + Torsten Kurbad + t.kurbad&iwm-kmrc.de +19362 + Virtuous, Inc. + Jason Kirtland + tech&virtuous.com +19363 + Net Island Networks + Michael C. Toren + snmp&netisland.net +19364 + 4thpass Inc. + Markus Jansen + markus&4thpass.com +19365 + Nike Inc. + Aboo Balgamwalla + aboo&nike.com +19366 + Westel Systems + Scott Goeken + sgoeken&westelsystems.com +19367 + Ecolab, Inc + IS - Security + crystal.sroga&ecolab.com +19368 + Azul Systems, Inc. + Pramod Thangali + pramod&azulsystems.com +19369 + Laboratorio Imagen Radiologica + J. Quiles + mrquiles&usc.es +19370 + Telecast Fiber Systems, Inc. + John King + jking&telecast-fiber.com +19371 + Lanvise + Andre Perry + iana&lanvise.com +19372 + Miami County ESC + John Hermes + jhermes&miami.k12.oh.us +19373 + PowerTel Limited + Paul Matthews + matthewsp&powertel.com.au +19374 + Orc Software + Joakim Johansson + joakim.johansson&orcsoftware.com +19375 + Max-SI + Davey Goode + hostmaster&max-si.com +19376 + Integrating the Healthcare Enterprise + Mike Henderson + mike&easterninformatics.com +19377 + Nekotec Telecom + Hermilo Vallejo + hermilo.vallejo&nekotec.com.mx +19378 + MagicCastle Cummunication + Jun, Moon-Seog + msh&magiccastle.co.kr +19379 + Imperva, Inc. + Amichai Shulman + shulman&webcohort.com +19380 + TAMURA Corporation + Yoshiaki Hasegawa + yoshiaki.hasegawa&tamura-ss.co.jp +19381 + Bundesanstalt fuer Finanzdienstleistungsaufsicht + Stefan Klueppel, Oliver Hennl, Ruediger Vetter, Martin Steitz + pki&bafin.de +19382 + Manzara Electronics LTD. + Stacy Minkin + stacy&glb.net +19383 + Mikromarketingas UAB + Vytautas Vakrina + infoµ.lt +19384 + VIC TOKAI CORPORATION + Takahiro Matsuura + infra&victokai.co.jp +19385 + Infinity Comunicaciones + Manel Monguilod + Manel.Monguilod&infinity.es +19386 + Martech Systems (Weymouth) Ltd. + John Baker + john.baker&martechsystems.co.uk +19387 + Laboratori Guglielmo Marconi + Michele Bergonzoni + bergonz&labs.it +19388 + DB Systems GmbH + Stephan Kurth + stephan.kurth&tlc.de +19389 + ComSign Ltd. + Zeev Shetach + Zeev&ComSign.co.il +19390 + Trend Software LLC + VASILIY GAGIN + vgagin&trend-soft.com +19391 + SASH Management, LLC + David Witt + spam_iana_oid&giftchecksolutions.com +19392 + City of Union City, Georgia + Jeffrey D. Brown + jbrown&unioncityga.org +19393 + Trident Systems Incorporated + Wayne Franklin + wayne.franklin&tridsys.com +19394 + I.T. By Design, Inc. + Gene Hovey + gene&theHoveys.com +19395 + NewLink Genetics + Brad Powers + itsupport&linkp.com +19396 + CITI - Center for Information Technology Integration + David M. Richter + richterd&citi.umich.edu +19397 + TrafficSim Co., Ltd. + Toshiya HOTTA + hotta&trafficsim.co.jp +19398 + JSC Institute of Informational Technologies + Yuriy Gorbenko + iit&iit.kharkov.ua +19399 + Xylon Computersystems + Arne Brutschy + abrutschy&xylon.de +19400 + TriggerPlus Software Ltd. + Yoav Maor + yoav&triggerplus.com +19401 + Fachhochschule Kaiserslautern Standort Zweibruecken + Helfrich Markus + helfrich&rz-zw.fh-kl.de +19402 + Robert W. Baird & Co + IT Infrastructure Services + OIDAdmin&rwbaird.com +19403 + Westbridge Technology, Inc. + Guy Hussussian + guy&westbridgetech.com +19404 + NIC Inc. + Rob Lindenbusch + rob&nicusa.com +19405 + Advanced Systems Concepts, Inc. + Benjamin Rosenberg + ben21&advsyscon.com +19406 + Adiscon GmbH + Rainer Gerhards + rgerhards&adiscon.com +19407 + Atomic9.NET + Stephen Craft + contact&atomic9.net +19408 + Zweicom Ltd. + Jose Chavez + jchavez&zweicom.com +19409 + Smittskyddsinstitutet + Michael Strålfors + admin&smi.ki.se +19410 + Ministerio de Economia y Hacienda + Oscar Robledo + oscar.robledo&minhac.es +19411 + Andreas Falk + Andreas Falk + privat&andreas-falk.de +19412 + TeliaSonera Mobile Networks AB + Thomas Bergerhag + thomas.bergerhag&teliasonera.com +19413 + SBIM-FMUP + Pedro Miguel + pm&med.up.pt +19414 + Kolab Project + Bernhard Reiter + bernhard&intevation.de +19415 + Docucorp International + Richard Cook + rcook&docucorp.com +19416 + Edison Schools + Karen Chang + kachang&edisonschools.com +19417 + Envoy International, LLC + Alexander Sokhin + alex&envoy-usa.com +19418 + Stampede Technologies, Inc. + Brian Morris + brian&stampede.com +19419 + Internet-Team GmbH + Marino Schwedhelm + ms&i-team.de +19420 + NodeMinder + John R. Hachey + hachey&citlink.net +19421 + Futago LLC + Jevin Sweval + jevinsweval&futagollc.com +19422 + LDAP Technologies + Luis E. Lewis + lelewis&optonline.net +19423 + Global Dial Pty Ltd + Mr Nathan Alberti + na&nathanalberti.com +19424 + ECoCoMS Ltd. + Todor Vassilev + todorv&ecocoms.com +19425 + Hinttech BV + Ake van der Meer + avdmeer&hinttech.nl +19426 + ESDG Konsult AB + Anders Svensson + anders.svensson&esdg.se +19427 + The Excel Ortho Group + Sanjot Bharatan + sanjyotb&yahoo.com +19428 + NovAG-Services Ltd. + Konstantin Filipenko + KFilipenko&kmf.com.cy +19429 + Alphawave Ltd + David Lawrence + snmp&alphawave.net +19430 + NET-2COM Ltd. + Steven Feng + fxtop&hotmail.com +19431 + JSC Kordon + Rezvanov Vasily Pavlovich + kordon&ip.rsu.ru +19432 + Virulent Software + Nicholas Holder + nrholder&gmail.com +19433 + Austin Independent School District + William Hill + whill&austin.isd.tenet.edu +19434 + Concepts et contenus + Yves Bergeron + yves.bergeron&bdeb.qc.ca +19435 + Hectec GmbH + H. Breitner + dicom_uid&hectec.de +19436 + Concordia University + Steven Winikoff + Steven.Winikoff&concordia.ca +19437 + European School of Management and Technology GmbH + Thomas Werner + werner&esmt.org +19438 + Broadweave Networks + Mark Horstmeier + markh.remove&broadweave.com +19439 + Kingstar&winning Medical Info-Tech Co.,Ltd. + kai sun + sunkai&china.com +19440 + IAD GENERAL CO.,LTD + LIUCHENGLONG + LIUCHENGLONG&HUAWEI.COM +19441 + Toroki, Inc. + Amir Fuhrmann + amir&toroki.com +19442 + PHANTOM + Robert Pawlowski + R.Pawlowski&converse.com.pl +19443 + EGATEL S.L. + José Manuel Mariño + mbasalo&egatel.es +19444 + Metaswitch Networks Ltd (formerly 'Data Connection Ltd') + Colin Tregenza Dancer + iana&metaswitch.com +19445 + Calltech Sp. z o.o. + Sebastian Smyczyñski + ssmyczynski&calltech.com.pl +19446 + Cable One + Eric Billeter + ebilleter&cableone.net +19447 + Swift and Company + Joshua Hastings + joshua.hastings&swiftbrands.com +19448 + zending + C. van Meerendonk + info&zending.nu +19449 + Systinet Corp. + Petr Dvorak + petr.dvorak&systinet.com +19450 + NetShift Software Ltd + Andy Pinkard + andy.pinkard&netshift.com +19451 + NextG Networks, Inc. + Daniel Trieu + dtrieu&nextgnetworks.net +19452 + BeiJing NuQX Technology CO.,LTD + Zeng Kong + jerry&nuqx.com +19453 + Ufinity Pte. Ltd. + Lee Peng Wai + pengwai&ufinity.com +19454 + Airspan Networks Inc. + Mark Thomas + mthomas&airspan.com +19455 + Marks & Spencer PLC + Sean Catlin + sean.catlin&marks-and-spencer.com +19456 + Prediwave, Inc + Yaron Benita + yaronb&prediwave.com +19457 + Platinum Equity, LLC + Mary Ann Sigler + itservices&platinumequity.com +19458 + Application Security, Inc. + John Colton + jcolton&appsecinc.com +19459 + RF Applications, Inc. + Bruce R. Knox + bknox&rfapps.com +19460 + Sycamore.US + Eric Hittle + sysadmin&sycamore.us +19461 + Cendant Mortgage Corporation + Michael R Davis + michael.davis&mortgagefamily.com +19462 + Roberto Capancioni + Roberto Capancioni + roberto&capancioni.com +19463 + Division of Johns Hopkins in Singapore Limited + Seow Kok Heng + kokheng&hopkins.edu.sg +19464 + Hitachi Communication Technologies, Ltd. + Hiroaki Kasahara + hiroaki_kasahara&hitachi-com.co.jp +19465 + Franziska Buendgens + Franziska Buendgens + buendgf1&cs.man.ac.uk +19466 + PND1 + Pinda Ndaki + pinda&dining.umd.edu +19467 + GeekBone.org + Jun Sheng + chaos&geekbone.org +19468 + Ishihata industry + Kyohei Ishihata + ishihata&amy.hi-ho.ne.jp +19469 + Highnet Systems Ltd. + Arik Ben-Dov + arik&highnet-sys.com +19470 + Highnet Systems Ltd. + Arik Ben-Dov + arik&highnet-sys.com +19471 + Secgo Group Oy + Tatu Mannisto + tatu.mannisto&secgo.com +19472 + Staer Sistemi s.r.l. + Massimo Marini + massimo.marini&staersistemi.it +19473 + North American Electric Reliabiity Corporation + Jeff Hicks + jeff.hicks&nerc.net +19474 + CRL + Manisha + mvjagadhane&hotmail.com +19475 + Confederação Interestadual das Cooperativas Ligadas ao Sicredi - Sicredi Servicos + Fernando Henrique Cardoso + fcardoso&sicredi.com.br +19476 + Highwall Technologies, LLC + Bob Poole + bob.poole&highwalltech.com +19477 + Infocaja, S.L. + Juan Jose Roman + juan.jose.roman&infocaja.es +19478 + Rapidpacket + Todd Troxell + xtat&rapidpacket.com +19479 + KeyOn Communications, LLC + Robert McPeak + robert.mcpeak&keyon.com +19480 + litica.de - Hermann & Matejek GbR + Ulrich Matejek + uli&litica.de +19481 + BluePig + Jia Huang + iamfat&263.net +19482 + Waldmann + Karl-Heinz Waldmann + karl-heinz.waldmann&siemens.com +19483 + Aqua Systems, Inc. + Tamotsu Akama + akama&aqua-systems.co.jp +19484 + BANCO DE ESPAÑA + Miguel Ángel Peña + pki&bde.es +19485 + art of object GmbH + Andreas Winter + andreas.winter&art-of-object.de +19486 + Venali Inc + Benny Millares + benny.millares&venali.com +19487 + Lucent Technologies, Mobility IN + Enoch Wang + ewang&lucent.com +19488 + Toadico, Inc + Kelly Roestel + kelly&toadico.com +19489 + IP3 Networks, Inc. + Christopher Bradley + dev&ip3networks.com +19490 + Hiper S.A. + Alvaro Chavez + achavez&hiper.com.pe +19491 + Epeople Soluções S/C LTDA + Tiago Silva Proença + tsp&inf.ufsc.br +19492 + ADDC Infotech GmbH + Peter Kanis + kanis&addc.ch +19493 + iMEDIC GmbH + Stephan Wilczek + wilczek&imedic.de +19494 + Richard Schilling, MBA + Richard Schilling + rschi&rsmba.biz +19495 + National Informatics Company + Richard Schilling + rschi&rsmba.biz +19496 + Cognition Group, Inc. + Richard Schilling + rschi&rsmba.biz +19497 + OSS Application Consulting Centre, Taiwan + Song Huang + Song&ossacc.org +19498 + HMJ - Projets Délocalisés + M. Jacques CALAIS + jacques.calais&projet-delocalise.com +19499 + Association Inforoots + Dominique Guardiola + inforoots&inforoots.org +19500 + Government Information Technology Services + Kavin Chinawong + kavin.chinawong&gits.net.th +19501 + Trust Investment Bank, JSB + Sergey Suchinskiy + sergey.suchinskiy&trust.ru +19502 + QORVUS Systems, Inc. + Kathryn Kessey + kkessey&qorvus.net +19503 + Lockdown Networks, Inc. + Robert Gilde + iana&lockdownnetworks.com +19504 + Mullen + Tom Mullen + tmullen&sio.midco.net +19505 + verzeichnisdienst + Bastian Boday + hostmaster&bast.li +19506 + Christian Jahn + Christian Jahn + christian.jahn&calbe.net +19507 + haroldbeumer.com + Harold P. Beumer + ldap-schema&haroldbeumer.com +19508 + The Software Guild, Inc. + Arthur Messenger + Arthur.messenger&Att.net +19509 + Trident Microsystem INC + Jian Niu + jniu&tridentmicro.com +19510 + Center for Excellence in Telecommunications and Space + Dovel Myers Jr + dm142988&oak.cats.ohiou.edu +19511 + Larry Burton + Larry Burton + larry_burton&larryburton.com +19512 + SAHM Technologies LLC + Didier COLIN + didier.colin&sahm.ae +19513 + C3 Desenvolvimento de Sistemas Computacionais Ltd. + Claudio Cuqui + claudio&c3systems.com.br +19514 + On Demand Systems Limited Partnership + Yasuhiro Fujita + fujita&mitemite.co.jp +19515 + TREND Service GmbH + Christian Lox + hostmaster&trend-service.de +19516 + Universitaet Erfurt + Gabriele Schulz + gabriele.schulz&uni-erfurt.de +19517 + Colibria AS + Lars Myhrum + myhrum&colibria.com +19518 + Technische Universität München + Andreas Paul + rbg-system&in.tum.de +19519 + E.Novation Portal Technology B.V. + Maurice Leeflang + iana&operations.enovation.nl +19520 + Samartha Information Systems Pty. Ltd. + SMK + info&samartha.com +19521 + dataschalt e+a GmbH + Klaus Greisen + k.greisen&dataschalt.de +19522 + Aineas.net + Naoki Misumi + nao&aineas.net +19523 + Sequence Solutions + Kevin P Miller + kevin.miller&sequencesolutions.net +19524 + Rational Innovations + Robert Liesenfeld + xunil&xunil.net +19525 + Tippecanoe County Public Library + Lee Stoltz + stoltzld&tcpl.lib.in.us +19526 + BlueBoxStudio.net + Frederic BRIAND + fredbriand&blueboxstudio.net +19527 + Global Technology Ltd + Stephen Liptrott + stelip>ships.com +19528 + Meson Fiber Optics + Michael Jenkins + jenkinsm&mesonfiberoptics.com +19529 + NetStrong Information Ltd. + Frank.luo + wluo&nstrong.com +19530 + NetMaster Ltd. + Luowei + luowei&sjtu.edu.cn +19531 + INSA de Rennes + Roderick Petetin + Roderick.Petetin&insa-rennes.fr +19532 + businessMart AG + Dominik Schramm + info&businessmart.de +19533 + TeleMessage Ltd. + Arthur Veinstein + arthur&telemessage.com +19534 + Gorenjska banka d.d. Kranj + Janez Prešern + janez.presern&gbkr.si +19535 + ACNMS + Alexis C. Villalon + alexis.c.villalon&accenture.com +19536 + Panduit Corp. + Steve Jacks + saj&panduit.com +19537 + M5 Networks, Inc. + Scott Anderson + scott&m5net.com +19538 + Add2Net, Inc. + Ripta Pasay + ripta.pasay&lunarpages.com +19539 + Country Haven Academy + Andrew Mashchak + info&countryhaven.org +19540 + NCS Consulting Inc. + Neil Sequeira + neil&ncsconsulting.com +19541 + Belgorodenergo + Denis E. Zargarov + Zargarov_DE&belgorodenergo.ru +19542 + Aalborg University Library + Joakim Recht + recht&aub.auc.dk +19543 + Wellcome Trust Sanger Institute + Infrastructure Management Team + ssg-imt&sanger.ac.uk +19544 + Weather Strategies + Gabriel Wilkins + g459&yahoo.com +19545 + Sony CISC + David Hofmann + david.hofmann&am.sony.com +19546 + MELENTANE + Samir Sow + samir.sow&melentane.com +19547 + Oplink Communications, Inc. + Calvin Fang + calvinf&oplink.com +19548 + FROX communication + Martin Ronner + martin.ronner&frox.com +19549 + Uni-Q Systems (formerly 'ENGREN.NET') + Hans Engren + hans.engren&uni-q.se +19550 + an Academic Network at Sao Paulo (ANSP) + Jorge Futoshi Yamamoto + yamamoto&ansp.br +19551 + Thales Naval France + Kress + lionel.kress&fr.thalesgroup.com +19552 + Alexander Kowalski + Alexander Kowalski + a.kowalski&freenet.de +19553 + Beijing Gtt Telecom Technologies Co.Ltd + GANGLIU + ugang&sina.com +19554 + PIPPKRO + Yevgeniy Tkachuk + elendil&imcs.dvgu.ru +19555 + Engim Inc + Mohan K Sonti + mib&engim.com +19556 + NextJet Technologies + James Tucker + jt&nextjet.com +19557 + Frank Holtz + Frank Holtz + oid&webkoch.de +19558 + StoneHole + Eric Emerson + esemerson&adelphia.net +19559 + Zones Inc + Ravi chhabra + ravi.chhabra&zones.com +19560 + Informationssicherheit Christian Scheucher + Christian Scheucher + christian.scheucher&scheucher.net +19561 + Maxim Integrated Products, Inc. + Jeff Putsch + putsch&mxim.com +19562 + Telena Communication + Tony Porras + aporras&telena.com +19563 + Center for Imaging Research + Kati Elfers + Kati.Elfers&UC.Edu +19564 + GOLUM + Ed Stafford + ed&linuxanthology.org +19565 + Escape Communications, Inc. + James Nadeau + jnadeau&escapecom.com +19566 + C-Scape Consulting Corp. + Stephen Weiss + sweiss&c-scape.com +19567 + ScienceLogic LLC + Richard Chart + rchart&sciencelogic.com +19568 + Global Gate Systems LLC. + Alberto Alonso + admin&ggsys.net +19569 + Grid Research Lab + Hao Chen + chen12p&uwindsor.ca +19570 + frottage.org + Rob Partington + snmp-oid&frottage.org +19571 + Micromatic Tecnologia e Sistemas Ltda + Francisco Estevam + estevamµmatic.com.br +19572 + ecnuee + cong wei + ecnu_cong&hotmail.com +19573 + Adeptiva + Stephan February + domains&adeptiva.com +19574 + Network Gulf Information Technology + Rajesh Soman + rajesh&networkgulf.com +19575 + Sansay Inc. + max sheng + msheng&sansay.com +19576 + Slovanet a.s. + Peter Farkas + peter.farkas&slovanet.net +19577 + ACE electronics N.V. + Koenraad Lelong + k.lelong&ace-electronics.be +19578 + IPConsulting AG + Ralf Schiemann + r.schiemann&ipcag.com +19579 + Info Point + Przemyslaw Klawitter + pk&unix-spec.net +19580 + RZF Rechenzentrum der Finanzverwaltung des Landes NRW + Stefan Schmitz + Stefan.Schmitz&rzf.fin-nrw.de +19581 + TTC Telecom Ltd. + Peter Bodnar + snmp&ttc-telecom.sk +19582 + Pixelpark AG + IA-Berlin + ia.berlin&pixelpark.com +19583 + Wayfarer Transit Systems Ltd. + Ian Cooper + imcooper&wayfarer.co.uk +19584 + headissue GmbH + Jens Wilke + iana-reg&headissue.com +19585 + Laboranowitsch + Christian Laboranowitsch + christian&laboranowitsch.de +19586 + ICAN (International Communications and Navigation Ltd.) + Randal Greene + rgreene&ican.nf.net +19587 + Adtec Digital, Inc. + Andre G. Ancelin + andrea&adtecinc.com +19588 + Tecnologia Bancaria SA + Sebastiao Barone + suporte.redes&tecban.com.br +19589 + Business Link Kent Ltd + Anthony Butler + Anthony.Butler&businesslinkkent.com +19590 + Systems Solution, Inc. + Rich Wein + iana-reg&ssi-net.com +19591 + Oral Roberts University + Peter Kovaleski + pkovaleski&oru.edu +19592 + Prince Law Offices, P.C. + Warren Prince + troubleshooters&princelaw.com +19593 + Nerim + Marc Dequenes + marc.dequenes&corp.nerim.net +19594 + MediaCell, Inc. + Chet McNeill + cmcneill&mediacell.com +19595 + Alcatel Shanghai Bell Co. Ltd. + LIANG Ji + ji.liang&alcatel-sbell.com.cn +19596 + nhnghia + Nguyen Huu Nghia + nghianh&cardvn.net +19597 + Capnova Oy + Mikael Nylund + hostmaster&capnova.com +19598 + ianet + Sebastian Arming + ianet-pen&arming.de +19599 + GeniusBytes Software Solutions GmbH + Michael Ernst + ernst&geniusbytes.com +19600 + CTA Systemsource Inc. + Serge Belobaba + administrator&cta-systemsource.com +19601 + RGB Networks, Inc. + Yan Ding + yding&rgbnetworks.com +19602 + Custom IdM Solutions + Jimmy Yogurt + sundevils22&yahoo.com +19603 + dezcom + Andrey Tatarinov + elephantum&dezcom.mephi.ru +19604 + Secure Network Systems, LLC + Betty Pierce + b.pierce&securenetworksystems.com +19605 + Starview Technology Inc + Clark Davis + cdavis&starviewtechnology.com +19606 + Nienschanz Ltd. + Andrew Petroff + ptr&nnz.ru +19607 + Argelcom Limited + Matthew Baldwin + matt.baldwin&argelcom.com +19608 + Astea International Inc. + Vladimir Kofman + vladimirk&astea.co.il +19609 + Zytec + Barry Chang + bc&ms93.url.com.tw +19610 + BaseN Oy + Kaj J. Niemi + kaj.niemi&basen.net +19611 + trustsec IT solutions GmbH + Frank Kirschner + fk&trustsec.de +19612 + Wyncote.net + Dan Mascenik + dtm&umich.edu +19613 + Willing Minds LLC + Mark Nagel + hostmaster&willingminds.com +19614 + Business Port Systems Inc. + Yoshiyuki Takahashi + takahasi&bps-inc.co.jp +19615 + FCS India (Pvt) Ltd. + Manpreet Singh Nehra + manpreet_nehra&hotmail.com +19616 + InterCard AG + Oliver Adolph + oliver.adolph&intercard.de +19617 + Taglicht.Com Sàrl + Thomas Taglicht + www.iana.org&taglicht.com +19618 + Return Data + Fredrik Estreen + fredrik&returndata.se +19619 + Go Net + Mian Mumtaz Ali + mian.mumtaz&go.net.pk +19620 + NETDEVICES INC. + Srinivas Moturu + smoturu&netd.com +19621 + alibaba + jamesyu + james_ysh&alibaba-inc.com +19622 + Shenzhen Union Broadband Communication Co., Ltd. + Jin Qian + George_Q_King&hotmail.com +19623 + is:energy czech a.s. + Michal Mühlpachr + michal.muhlpachr&is-energy.cz +19624 + MANDA + Ralph Weichert + oid&man-da.de +19625 + Unilever PLC + Linda Millington + Linda.Millington&unilever.com +19626 + Permabit, Inc. + Jered Floyd + jered&permabit.com +19627 + NTPL + prasanna + nidasesi_prasanna&rediffmail.com +19628 + webslingerZ, Inc. + Scott Stancil + sstancil&webslingerZ.com +19629 + University of Cincinnati + Bryan Newswanger + bryan.newswanger&uc.edu +19630 + Alopa Networks Inc. + Suryaprakash Konanuru + suri&alopa.com +19631 + eDial Inc. + Jeremy Nussbaum + jeremy.nussbaum&edial.com +19632 + Rygo Technical Enterprises + Ryan Goodwin + rgoodwin&rygotech.com +19633 + BOFH Networks Oy + Kimmo Jukarainen + registry&bofhnetworks.net +19634 + THALIX + Michel Verdier + contact&thalix.com +19635 + Prager, Sealy & Co. LLC + Aaron Hathaway - Mark Stingley + IT&Prager.com +19636 + SolNet Data Service + Jesper K. Pedersen + jkp&solnet-data.dk +19637 + Oakmoon Consulting, Inc. + Steven Downs + sdowns-iana&oakmoon.com +19638 + University of Texas at Arlington + Digant C Kasundra + digant&uta.edu +19639 + Teztech, Inc. + P.J. Tezza + enterprise-numbers&teztech.com +19640 + Automated Marketing Solutions Inc. + Bartosz Zak + bzak&automs.com +19641 + Holley Communications Group + Jeff LaPorte + jeff.laporte&holleycomm.com +19642 + East Alabama Medical Center + Judson L. Bishop + judson.bishop&eamc.org +19643 + Yipes Enterprise Services Inc. + Jonathon Little + jlittle&yipes.com +19644 + Polaris Wireless Inc. + Udai Kumar + ukumar&polariswireless.com +19645 + Sioux Medical Systems B.V. + Mr. Addy Klos + addy.klos&sioux.nl +19646 + Computing Edge, Inc. + Kevin Burtchett + cedge&computingedge.net +19647 + Frederiksborg Amt + Lars Petersen + ianalap&hotmail.com +19648 + TransCore + John Freeman + john.freeman&transcore.com +19649 + ERP-HQ Inc. + Paul Wujek + pdw&rogers.com +19650 + etilize Inc. + Andre Kruger + akruger&etilize.com +19651 + Seven-Star Inc. + Gervan Thomas + gervan.g.thomas&uwrf.edu +19652 + National Chi Nan University, Taiwan + Ching-Huang Huang + hching&ncnu.edu.tw +19653 + Integral Technology Solutions Pty Ltd + Mr Cameron Tuesley + ctuesley&integral-techsolutions.com +19654 + Infoteria Corporation + Yoshiyuki Kitahara + ykitahara&infoteria.co.jp +19655 + University of Zululand + Sean Carte + scarte&pan.uzulu.ac.za +19656 + dreamtel + Qu Yibing + quyb&dreamtel.com.cn +19657 + PlewsNet + Jon Plews + enterprise-numbers&plews.net +19658 + Macro-System + Pawe©ø Wichniewicz + wichniew¯o-system.com.pl +19659 + Paradigit Computers B.V. + Jeroen van Pelt + iana¶digit.nl +19660 + CRO24 GmbH + Andrew Fowler + andrew.fowler&cro24.de +19661 + Gleiss Lutz + Stefan Schmitt + stefan.schmitt&gleisslutz.com +19662 + Telenor AB + Anders Ångström + anders.angstrom&telenor.se +19663 + Secure-Group AS + Torgeir Hansen + torgeir&secure-group.com +19664 + gr3 a/s + Andreas Jydebjerg + asj&gr3.dk +19665 + Grazer Wechselseitige Versicherung AG + Dietmar Hacker + dietmar.hacker&grawe.at +19666 + Alcatel Suomi Oy + Kari Koivula + kari.koivula&alcatel.fi +19667 + Dr. Ing. h.c. F. Porsche AG + Bert Blaha + hostmaster&porsche.de +19668 + A.S. Reiiseveranstaltungs GmbH + Marc Hennings + marc.hennings&rainbowtours.de +19669 + aserva GmbH (formerly 'newthinking IT, Inhaber Alexander Scheibner') + Alexander Scheibner + alexander.scheibner&aserva.de +19670 + IT University of Copenhagen + Flemming Lindblad + sysadm&itu.dk +19671 + DMDsecure.com BV + N.J. van der Vorm + nickel&dmdsecure.com +19672 + Gratisavisa Stimuli + Louis A.S. Holbrook + louis&stimuli.nu +19673 + ByteHoard + Taymour El Erian + taymour.elerian&tedata.net.eg +19674 + Catalis Health, Inc. + Rick Ashford + rick.ashford&catalishealth.com +19675 + Lixto Software GmbH + Gilbert Hoedl + office&lixto.com +19676 + University of Houston + Enrico Cantu + ecantu&uh.edu +19677 + Unassigned + Removed 2006-08-15 + ---none--- +19678 + Relinace Infocomm + ANIL PAWAR + anil.pawar&relianceinfo.com +19679 + Prusch + ANdreas Prusch + andreas&prusch.de +19680 + Dembach Goo Informatik GmbH + Manon Goo + manon&manon.de +19681 + vitadurum ag + Alberto Moya + admin&vitadurum.com +19682 + Balo n Co + Farrukh Masood + farrukh_masood2001&hotmail.com +19683 + WelLink.,Ltd + Kim, Dong Hyun + untame&hanmail.net +19684 + Town & Country Credit Corp. + Shawn Obermeyer + iana&tccredit.com +19685 + Virginia Dept. of Social Services + Don Mills + don.mills&dss.virginia.gov +19686 + net-concept + Torsten Becker + t.becker&nc-world.de +19687 + Indicative Software, Inc + Brian Atkins + brian.atkins&indicative.com +19688 + Cargo IT AG + Werner Pfundstein + wpfundstein&cargo-it.de +19689 + Evolta B.V. + Mark van Duren + vanduren&evolta.nl +19690 + Line4, Inc. + Mark Debusschere + markd&line-4.com +19691 + eProcess Burkina + Mahamoudou Ouedraogo + mouedraogo&ecobank.com +19692 + Hoppe Dialog GmbH + Ralph Hoppe + iana&hoppe-dialog.de +19693 + University of California Santa Cruz (UCSC) + Eric Goodman + ericg&cats.ucsc.edu +19694 + Enterprise Management Shareware + David Porter + davidp4&cox.net +19695 + projektfarm GmbH + Till Brehm + oid&projektfarm.de +19696 + Cherrypicks + Alex Erich Cheung + clcheung&cherrypicks.com +19697 + May10 Technology Inc. + Nian Liu + linuxdoors&yahoo.com.cn +19698 + Information Technology Services Department + Terence Choy + tscchoy&itsd.gov.hk +19699 + System Detection, Inc. + Brian Larkins + brian&sysd.com +19700 + Traqueur SA + Thierry Malo + thierry.malo&traqueur.fr +19701 + Mars Computer Systems Sp. J. + Andrzej Bucko + andrzej.bucko&mars.slupsk.pl +19702 + BSZ Leonberg + Rüdiger Beck + jeffbeck&web.de +19703 + Agence de l'Eau Seine Normandie + Laurent Maldonado + laurent.maldonado&atosorigin.com +19704 + NetFocus S.R.L + Carlos Gonzalez-Cadenas + gonzalezcarlos&extendforce.com +19705 + Citto Tecnologia LTDA + Volnys Borges Bernal + volnys&citto.com.br +19706 + BBned NV + Operations + contact&bbned.nl +19707 + Micran Ltd. + Yuriy Coureelo + cgg&micran.ru +19708 + Brain BroadCast sas + Maurizio Boriani + baux&member.fsf.org +19709 + TexRamp, Inc. + Logan Ashby + lashby&texramp.net +19710 + Bitbuzz Ltd + Alex French + noc&bitbuzz.com +19711 + scaryclowns.org + Jim Starwood + jim&scaryclowns.org +19712 + WebSolve, Inc. + Chris Carlson + ccarlson&websolve.com +19713 + Elfiq Inc. + Frederick parent + fparent&elfiq.com +19714 + Logic Development + Kevin Spaeth + info&logicdevelopment.net +19715 + LinkAir Communications, Inc. + Bai Lijun + Calvin&LinkAir.com.cn +19716 + Ars Lumina, Inc. + Kenneth Lee + klee&arslumina.com +19717 + Greatmark + Michael Klein + mklein&greatmark.com +19718 + Motorola Israel Ltd. - NSA + Mark Moran + Mark.Moran&motorola.com +19719 + QuaQuaNet + Guido Trotter + ultrotter&quaqua.net +19720 + Westmarsh Consulting Ltd + Pierre-Yves Dampure + pierre-yves.dampure&westmarsh.com +19721 + BlazeArts Ltd. services.hu network + Norbert Csongradi + norbert.csongradi&services.hu +19722 + CONYSER Consulting y Servicios S.L. + Jose Estevez + josemanuel.estevez&dracena.org +19723 + Broadbus Technologies, Inc. + Don Dewar + ddewar&broadbus.com +19724 + DigiDyne Inc. + Sebastien Coutu + it&digidyne.ca +19725 + D. Trust Certifikacna Autorita, a.s. + Juraj Vasko + vasko&dtca.sk +19726 + Clinical Reference Laboratory, Inc. + Keith Viken + network&crlcorp.com +19727 + Shylex Telecomunicaciones, S.L. + Jose Luis Alvarez + shylex­lex.com +19728 + Techno-Toolsmiths + John H. Reinhardt + john.h.reinhardt&techno-toolsmiths.com +19729 + SOFTREK CORPORATION + David Otminski + dotminski&pledgemaker.com +19730 + Logic Etc, Inc. + Ron Bickers + rbickers&logicetc.com +19731 + Sinaptica Networks + Jose Miguel Guzman + jmguzman&sinaptica.cl +19732 + dragonsdawn.net + Gordon Messmer + gordon&dragonsdawn.net +19733 + HUB Consulting, Inc. + Takahiro Yonekawa + yonekawa&hub.jp +19734 + RAYCOM Co.,LTD. + Yantao Ma + mayt&raycom.com.cn +19735 + OGA SynCom Co..,LTD + Noppasoon Srinarang + jeap&oga.co.th +19736 + i-RAID + Henry or Jonson + henry&i-raid.com.tw +19737 + Kyneste S.p.A. + Gianluca Sironi + gianluca.sironi&kyneste.com +19738 + Campana + Ottavio Campana + ottavio&campana.vi.it +19739 + UZorg B.V. + G. Krediet + info&uzorg.nl +19740 + Institut de Mathématiques et de Sciences Physiques/Université d'Abomey-Calavi + Joël Toyigbé Hounsou + jhounsou&imsp-uac.org +19741 + Residential Technology, Incorporated + Paul Franklin + paul.franklin&residentialtechnology.us +19742 + Kazeon Systems, Inc + Michael MacFaden + mrm&kazeon.com +19743 + jpwebworks Pty Ltd + Jason Phillips + office&jpwebworks.com.au +19744 + Novacoast, Inc. + Adam Gray + support&novacoast.com +19745 + netz.ooo (formerly 'Dataflake Weblications') + Jens Vagelpohl + jens&netz.ooo +19746 + Data Domain, Inc + J. Kaminar + jkaminar&datadomain.com +19747 + The Open and Free Technology Community + David B Harris + infrastructure&oftc.net +19748 + Affiliated Computer Services, Inc. TripPak SERVICES + Justin Slaughter + unixadmin&trippak.com +19749 + CISNS + Daniel Trombley + dtsh&cisns.net +19750 + Beijing YTT Telecom Technologies Co.,Ltd + gangliu + ugang&sohu.com +19751 + NOTE Torsby AB + Fredrik Källgren + fredrik.kallgren&torsby.note.se +19752 + AIS, Aplicaciones de Inteligencia Artificial, S.A. + David J. Pérez + davidj.perez&ais-int.com +19753 + Inexbee + José Cervera + jose.cervera&inexbee.com +19754 + ESG Elektroniksystem- und Logistik-GmbH + Andreas Kiewitt + Andreas.Kiewitt&esg.de +19755 + Pera International Ltd + Steve Pickard + it.manager&pera.com +19756 + Schwabing Software + Harald Strack + strack&fh-konstanz.de +19757 + Iontas + Frank Maurer + frank.maurer&iontas.com +19758 + Hafsjold Data ApS + Mogens Hafsjold + mha&hafsjold.dk +19759 + Compunetix, Inc + Scott Stern + sstern&compunetix.com +19760 + Leon County Schools + Chris Fulton + fultonc&mail.leon.k12.fl.us +19761 + Gordon Aluminum Industries, Inc. + Brian Kling + briank&gordonaluminum.com +19762 + Caminos y Canales Ltd. + Daniel Ovalle + daniel.ovalle&caminosca.com +19763 + Branch Banking and Trust Corporation + Ken Meehan + kmeehan&bbandt.com +19764 + Infinite Gravity Digital Media Ltd. + Richard Vandenberg + numbering_admin&vansys.com +19765 + Systems Engineering Associates, Inc. + Jack Lavender + jack.lavender&seconcepts.com +19766 + Wuhan University of Technology + tianbin + tianbin&mail.whut.edu.cn +19767 + Cooperativa Novanta s.c.r.l. + Silvetti Nicola + admin&radionova.it +19768 + Skybox Security Ltd. + Tal Hornstein + talh&skyboxsecurity.com +19769 + JLCX Inc + Gary Craig + brondeau023&comcast.net +19770 + SelfSigned.com + Matt White + anmwhite&yahoo.com +19771 + IBM, Tivoli Provisioning and Orchestration + Lewis Lo + lewisl&ca.ibm.com +19772 + Mobile Wisdom, Inc. + Gerard P. Stelzer + Gerard.Stelzer&Mobile-Wisdom.com +19773 + New-Global Corporation + David Burdelski + david&new-global.com +19774 + mBlox, Inc. + IT - Bill Brotherton + netalert&mblox.com +19775 + IPG Photonics Corporation + Giles Christenson + gchristenson&ipgphotonics.com +19776 + TI Paperco Inc. + Ernest Wohlfarth + ernie_wohlfarth&timeinc.com +19777 + Taylor University + Nathan Ehresman + nehresma&css.tayloru.edu +19778 + LinguaNet + Paul Tourtier + paul.tourtier&linguanet.net +19779 + BOBEK + Antonin Kral + A.Kral&bobek.cz +19780 + Claridion Inc. + Salvatore Cimmino + salvatore.cimmino&claridion.com +19781 + Odys-solutions + Benoit Nicq + benoit.nicq&odys-solutions.net +19782 + AEGEE-Utrecht + Luite van Zelst + ldap&aegee-utrecht.nl +19783 + NorthLANs Alliance, LLC + Dan Thompson + dantho&northlans.com +19784 + Little Box Solutions Inc. + James Puderer + jpuderer&littlebox.ca +19785 + Grand Central Communications + Prasanta Behera + pbehera&grandcentral.com +19786 + Esteban Pizzini + Esteban Pizzini + epizzini&fibertel.com.ar +19787 + Ayub Yaqub + Ayub Yaqub + ayub&yaqub.net +19788 + LDT Communication Technology CO. LTD + Zenghai Liu + liuzenghai&163.com +19789 + Sheard and Company Pty Limited + Ross Sheard + rsheard&connexus.net.au +19790 + INSE7120 conU + Truong Ta + t_ta&ece.concordia.ca +19791 + MW EDV-Beratung + Michaael Wandel + m.wandel&t-online.de +19792 + Forte IT + Vladimir Krupenin + wolodik&forte-it.ru +19793 + firstServed NV + Denis Braet + denis&firstserved.net +19794 + IP Security Consulting + Milan Pikula + milan.pikula&ipsec.info +19795 + AssetHouse Technology Ltd + Chris Norris + chris.norris&assethouse.com +19796 + Jostens Inc + Mark Schoneman + Mark.Schoneman&Jostens.com +19797 + sourceheads Information Technology GmbH (formerly 'Stefan Fiedler KEG') + Stefan Fiedler + noc-iana&sourceheads.com +19798 + FBIS + David Porter + davep&rccb.osis.gov +19799 + Jeremy McMillan + Jeremy McMillan + aphor&mac.com +19800 + Derek Balling + Derek J. Balling + dredd&megacity.org +19801 + PineApp Ltd. + Gabriel M. Mizrahi + gmizrahi&pineapp.com +19802 + Atlanta Business Software, Inc. + Bob Hollister + bhollister100&hotmail.com +19803 + EC Telecom + Ha Tae-Hyo + heartha&ectel.co.kr +19804 + The Norns Laboratories + Vitaliy Fursov + vfursov&canada.com +19805 + idiotwind.org + John T. Rose + rose&iastate.edu +19806 + SportOdds Systems Pty Limited + Peter Milburn + support&sportodds.com +19807 + AlarmsPro Inc. + Frank Knapp + fknapp&alarmspro.com +19808 + Open Cloud Ltd. + David Long + david.long&opencloud.com +19809 + GOUDRON S.A.R.L. + Olivier GOUDRON + olivier&goudron.fr +19810 + R Brooks Associates, Inc. + Mike Ricci + mdricci&rbrooks.com +19811 + WildOne Information Systems + Michael Wildpaner + mike&wildpaner.com +19812 + Hsiuping Institute of Technology + Ming Feng Yang + orson&mail.hit.edu.tw +19813 + Systems International + Dariusz Swierzewski + support&systems-international.com +19814 + Duosoft Inc + zhang baohua + bhzhang&duosoft.cn +19815 + Getronics Solutions Malaysia + Julian Jo'se Gomez + julianjose.gomez&getronics.com +19816 + Philips Medizin Systeme Boeblingen GmbH + Lars Steubesand + lars.steubesand&philips.com +19817 + Soluzioni Informatiche s.r.l. + Giacomo Carta + giacomo&solinfo.it +19818 + KIABI + Jean-François Rompais + jf.rompais&kiabi.com +19819 + Informatik Club der Universität Zürich - ICU + Markus Gerber + iana&icu.unizh.ch +19820 + Prival ODC, Inc. + Jean-Francois Brousseau + jfbrousseau&privalodc.com +19821 + Bridgeport Networks + Scott Shaffer + sshaffer&bridgeport-networks.com +19822 + Bossers & Cnossen BV + Albert Siersema + a.siersema&bnc.nl +19823 + Castle Building Centres Group Ltd. + Alex Taylor + alext&castle.ca +19824 + teleganov.net + Dmitry Teleganov + dmitry&teleganov.net +19825 + Kisslinger EDV-Beratung + Andreas Kisslinger + iana-assignment-gbx&kisslinger.net +19826 + Institut Teknologi Bandung (ITB) + Wahyu Hidayat + wahyuhid&yahoo.com +19827 + N2N Consulting Pte Ltd + See Chun Yan + chunyan.see&n2nconsulting.com +19828 + LBCN Communication Technology Co.,LTD. + Li Bo + bluedian&163.com +19829 + BITSTREAM + Dariusz Pietrzyk + info&bitstream.com.pl +19830 + VDL SA + Croiset Nicolas + ncroiset&vdldiffusion.com +19831 + rbb Rundfunk Berlin Brandenburg + Nick Zernecke + nick.zernecke&rbb-online.de +19832 + Rostrvm Solutions Ltd + Peter Magyar + peter.magyar&rostrvm.com +19833 + Team17 Software Ltd. + David Watson + noc&team17.com +19834 + Bulldog Communications Ltd + Chee Teoh + cheeteoh&bulldogdsl.com +19835 + Excibir Ltd. + Jeremy Buckley + jerry&excibir.co.nz +19836 + Qwest QNMS + Joseph Stelmack + Joseph.Stelmack&Qwest.com +19837 + USPFO for CA + Amy Ellis + amy.ellis&ca.ngb.army.mil +19838 + Univits International AB + Jon Olsson + sysadmin&univits.com +19839 + Synthean Inc. + Jim Zafrani + jim.zafrani&synthean.com +19840 + Avalon Networks Inc. + Joe Hetrick + hostmaster&avalon.net +19841 + Public Communications Services, Inc. + Craig Zeller + craig.zeller&teampcs.com +19842 + Pantel Service & Holding GmbH + Johannes M. Steger + jsteger&pantel.de +19843 + anarcho.com + Matt Magri + iana.org&anarcho.com +19844 + MOBIGEN Co., Ltd. + Sang-Hyun Jin + jins00&mobigen.com +19845 + Solenovo Oy + Mr. Jukka Keto + Jukka.Keto&solenovo.fi +19846 + DDI POCKET,Inc + Akihiro Ibe + a-ibe&pocket.ddi.co.jp +19847 + KU Communication Technology Co.,LTD + blueon + dayboy&21cn.com +19848 + University of Bologna + Luca Ghedini + lghedini&deis.unibo.it +19849 + NSFOCUS Ltd. + GuangXu Liu + lgxror&hotmail.com +19850 + Waggener Edstrom + Dean Chen + deanc&wagged.com +19851 + Telnet ISG + Matej Kovac + ianaorg&telnet.sk +19852 + Southern Utah University + Matt Zufelt + hostmaster&suu.edu +19853 + aapala.com + Esa Ääpälä + esa&aapala.com +19854 + UCLA EE AINS LAB + Dr. Izhak Rubin + rubin&ee.ucla.edu +19855 + AIS Management GmbH + Jan Boldt + jan.boldt&andrevis.de +19856 + Mobile TeleSystems OJSC, Komi Republic + Alex Deiter + tiamat&komi.mts.ru +19857 + Kuban-GSM CJSC + Arnold V. Skobeyev + infosecurity&kuban.mts.ru +19858 + IPC Media Limited + Sue Anderson + sue_anderson&ipcmedia.com +19859 + CGSS Guyane + Maurice Montgénie + maurice.montgenie&cgss-guyane.fr +19860 + CGS Corporate Group Service Ltd + Peter Jenka + p.jenka&cgs-ltd.com +19861 + Wake Forest University + John Borwick + borwicjh&wfu.edu +19862 + Motivity Telecom Inc. + Vance Shipley + vances&motivity.ca +19863 + Tippett Studio + xian&tippett.com + xian&tippett.com +19864 + Jostens Inc + Mark Schoneman + Mark.Schoneman&Jostens.com +19865 + NEOMONTANA ELECTRONIS + Yasen Angelov + neomontana&dir.bg +19866 + Edouard Boissonneault + Edouard Boissonneault + edatbdeb&hotmail.com +19867 + digitec GmbH + Bernd Strebel + b.strebel&digitec.de +19868 + Krafty Creations, Inc. + Joel Kraft + joel&krafty.com +19869 + Beagle Internet Pty Ltd + John Ferlito + johnf&beagle.com.au +19870 + The dot.GOD Registry, Limited + Joe Baptista + baptista&dot-god.com +19871 + AG Consulting, LLP. + Daniel Roscigno + dan&roscigno.com +19872 + TVEngineer.org + Bishop L. Ellison + bishop&tvengineer.org +19873 + MarsolsNet Inc. + Marsols + rbl&marsols.net +19874 + Distributel Communications Ltd. + Daniel Puckett + domainreg&distributel.ca +19875 + hutuworm.org Inc. + Liang Feng + hutuworm&hutuworm.org +19876 + USE + Uli Schulze-Eyssing + uli&e2gas.de +19877 + Nuernberger Versicherungsgruppe + Heinrich Stengl + Heinrich.Stengel&nuernberger.de +19878 + ITNet S.p.a. + Luigi Erardi + l.erardi&it.net +19879 + Makedonski Telekomunikacii, MTnet + Stevco Risteski + ContactEmail +19880 + Conarcom C.A. Sirena + Anali Contreras + analicontreras&cantv.net +19881 + AEGEE-Europe + W. van Ravesteijn + itwg-board-l&aegee.org +19882 + Radius., Ltd + Eugene Knyshev + radius&obninsk.ru +19883 + Meta4 Spain S.A. + Eduardo Fernandes / Marcos Cacabelos + eduardofer&meta4.com +19884 + Ontier + Steven W. Roth + steven.roth&ontier.com +19885 + Navimedix Inc. + Rodney Rindels + rrindels&med.com +19886 + Corio, inc + Thomas Williams + twilliams&corio.com +19887 + California School Information Services + Chris Hall + chall&csis.k12.ca.us +19888 + The Tarc Group + Hostmaster + hostmaster&tarc.net +19889 + AppTrigger + Gerry Dubois + gdubois&apptrigger.com +19890 + Global Interactive Technology Pte Ltd + Somu + support&gitpl.com.sg +19891 + bay. Innenministerium (Verfassungsschutz) + Dr. Triller + iuk&lfv.bayern.de +19892 + HONKO MFG.co.,ltd. + Nishio Hiroyuki + hnishio&honko.co.jp +19893 + Brighton Consulting Inc. + Paul C. Bryan + pbryan&bright1.com +19894 + CubeRoot + John Rowe + John&cuberoot.biz +19895 + Heolys France SARL + Christophe Crier + direction&heolys.com +19896 + udayan kumar + Udayan kumar + udayan_kumar&da-iict.org +19897 + IUT FOTSO Victor de Bandjoun, Université de Dschang + Marcellin Julius NKENLIFACK + mnkenlif&hotmail.com +19898 + LT Systems LLC + Benjamin Kingsolver + hostmaster<-systems.com +19899 + Thinking Systems Corporation + JB Wang + jb&thinkingsystems.com +19900 + Advanced RF Technologies, Inc. + Sadat Chowdhury + schowdhury&adrftech.com +19901 + Atkinson, Inglis & Associates + Simon Inglis + simon&atina.com.au +19902 + University of Texas Pan American + Information Security Officer + infosecurity+ianareg&panam.edu +19903 + Jharding + Joshua Harding + xjharding&elitemail.org +19904 + Sage Technology Ltd. + Ryan Tseng + cptseng&ms8.hinet.net +19905 + Beijing YTT Telecom Technologies Co.,Ltd + gangliu + ugang&sohu.com +19906 + Europacom.net Ltd + Mike Paglia + hostmaster&tcp.net.uk +19907 + Alpha Oil Ltd. + Ivan Zozulya + ion&ok.net.ua +19908 + Alcopack Group + Alexander Pytlev + apytlev&tut.by +19909 + PadzNet, Inc. + Dj Padzensky + oid-admin&padz.net +19910 + DirecTrust + Carlos González-Cadenas + gonzalezcarlos&extendforce.com +19911 + Global Velocity Inc. + Tom McLaughlin + tmclaughlin&globalvelocity.info +19912 + Phatline + Peter Sandström + peter&phatline.com +19913 + murphx Innovative Solutions Ltd + Ben Murphy + ben.murphy&murphx.com +19914 + Optus SingTel Pty Ltd. + Sion Camilleri + sion.camilleri&optus.com.au +19915 + BeiJing Gefon Network Technology Ltd. + yang dong + nomad21&sina.com +19916 + Fundación ESADE + Jordi Diaz + diazj&esade.edu +19917 + Sungard Finance + Laurent Ploix + lp.provisoire&sungard-finance.fr +19918 + Phoenix IT Services + Dean Schoolar + Dean.Schoolar&Phoenix.co.uk +19919 + IPL Information Processing Ltd + Kevin Hansard + kevin.hansard&iplbath.com +19920 + PREVX Ltd. + Penelope Smy + pen.smy&prevx.com +19921 + Gross + Marcel Gross + marcel.gross&panalpina.com +19922 + Iowa State University + John T. Rose + rose&iastate.edu +19923 + ei3 Corporation + Bruce B. Platt, Ph.D. + noc&ei3.com +19924 + Mirror Image Internet + Paul H.P. Christen + paulc&mirror-image.com +19925 + Triadigm Technology + Stephen Detwiler + steved&triadigmtech.com +19926 + Feith Systems and Software, Inc. + John Wehle + john&feith.com +19927 + Escherware b.v. + G.R.Stone + support&escherware.com +19928 + Linuxlab Ltd. + Felipe Barros Salgado + felipe&linuxlab.cl +19929 + 4X Information Technology GmbH + Joerg Kaenel + joerg.kaenel&4xIT.com +19930 + SalesCatalysts.com Ltd. + Chu San Bun (Stephen) + stephen.chu&salescatalysts.com +19931 + BluntSoft Ltd. + blunt + blunt_hust&yahoo.com.cn +19932 + MITTS Ltd. + Alan Attard + alan.attard&gov.mt +19933 + University of Tasmania + Brett Clifford + duty-sysadmin&utas.edu.au +19934 + Key Business Services + J Pfersich + jp&key-biz.com +19935 + Allnet GmbH + Michael Buchstaller + buchi&allnet.de +19936 + Philip Morris International Management SA + Nigel Larkin + nigel.larkin&pmintl.com +19937 + Cogent Innovators, LLC + Stefan Adams + stefan&cog-ent.com +19938 + OU College of Continuing Education + Brett Zimmerman + zim-iana&cce.ou.edu +19939 + AWARM.NET, Ltd. + Ivan Mokrotovarov + awarm&awarm.net +19940 + broadreach systems + Maurice Bard + mauricebard&broadreachsystems.com +19941 + Lind-Waldock + Tom McMahon + tmcmahon&lind-waldock.com +19942 + Jumbo Corporation + Xavier Bergade + xavierb&jumbocorporation.com +19943 + D2D Cars + Brandon Knitter + brandon&d2dcars.com +19944 + Sunwest Computers Ltd. + Brian Caverley + pen&sunwestcomputers.ca +19945 + Abilitec Limited + Damian Jones + djones&abilitec.com +19946 + ITS Pub + Horst Raedger + horst.raedger&onlinehome.de +19947 + W-IE-NE-R Power Electronics GmbH + Andreas Koester + koester&wiener-d.com +19948 + OOO Maxidom + Alexey Vekshin + alex+iana&maxidom.ru +19949 + Bellmonte + Gerardo Pirla Diaz + gpdiaz&telson.es +19950 + Sveriges Television AB (SVT) + Peter Axelsson + peter.v.axelsson&svt.se +19951 + AREA + Christian DERBOUL + Christian.Derboul&area-autoroutes.fr +19952 + Message Systems, Inc. + George Schlossnagle + george&messagesystems.com +19953 + Ironworks Consulting, LLC + Keith Long + klong&fe-works.com +19954 + CenterBoard Inc + Rohan Pinto + rpinto&ldapguru.net +19955 + Tenebris Technologies Inc. + Eric Jacksch + jacksch&tenebris.ca +19956 + Kabissa Inc. + Tobias Eigen + tobias&kabissa.org +19957 + Aurora Network Operations Management Services + Jeff Konz + jdkonz&comcast.net +19958 + nex-G Systems Pte. Ltd. + Nikhil Goel + nikhil.goel&nexg-systems.com +19959 + Lund University + Eskil Swahn + eskil.swahn&ldc.lu.se +19960 + SER Storage Technology GmbH + Klaus Eulenbach + Klaus.Eulenbach&ser.de +19961 + Central Research Institute for Machine Building + Alex Myzika + ma&mcc.rsa.ru +19962 + Netservers Ltd. + John McEleney + support&netservers.co.uk +19963 + Enigma Software Corporation + Ferenc Kokovai + kokovai.ferenc&enigma.hu +19964 + TRIBUNAL REGIONAL DO TRABALHO DA 22A. REGIÃO + Cicero Vilson + vilson&trt22.gov.br +19965 + Spotwave Wireless, Inc. + Steve Allen + steve.allen&spotwave.com +19966 + Telexy Corp. + Bong-Su Jang + ilmina&unitel.co.kr +19967 + Meta Consulting + Ben Steeves + bcs&metacon.ca +19968 + Network Intelligence Corporation + Hayim Kobi + hkobi&network-intelligence.com +19969 + Klamath Networks Corp. + Jae Kim + jkim&klamathnetworks.com +19970 + Clever Machine, Inc. + Dan Dunham + ops-iana&clevermachine.com +19971 + Tripleplay Services Ltd. + Graeme Ogilvie + info&tripleplay-services.com +19972 + David Nesting + David Nesting + david&fastolfe.net +19973 + Vader Logic, LLC + Mark James + asgard&pacbell.net +19974 + Estonian Educational and Research Network + Lauri Anton + lauri.anton&eenet.ee +19975 + ICTeam S.p.A. + Danilo Lozza + supporto&icteam.it +19976 + Alberti Holdings Pty. Ltd. + Sandro Alberti + sandro&alberti.com.au +19977 + Theta Networks, Inc. + Dawn Fang + szhu&thetanetworks.com +19978 + o2 (Germany) GmbH & Co. OHG + Daniel Stricharz + ContactEmail +19979 + INPES + Pascal GUINET + pascal.guinet&inpes.sante.fr +19980 + ---none--- + ---none--- + ---none--- +19981 + Venetica + Sean A. Johnson + sjohnson&venetica.com +19982 + Cap Gemini Norway + Christian Berg-Henry + christian.berg-henry&capgemini.com +19983 + Cu-nes + Takeshi Hebiishi + cu-nes&zar.att.ne.jp +19984 + Pittsburg State University + Michael J. Wheeler + mwheeler&pittstate.edu +19985 + Vigoment Software Inc. + Masatoshi Nakamura + huntor&ybb.ne.jp +19986 + void solutions + Emanuel Brzostowski + Emanuel.B&web.de +19987 + Anerist + Scott Pierce + spierce&anerist.com +19988 + Rezel + Association Rezel + root&rezel.com +19989 + Netlanta.com + Gregory C. Colin + gcolin59&earthlink.net +19990 + NetTeem, LLC + Stoyan Dimov + InformationServices&NetTeem.com +19991 + Beijing Zhengyou Networks&Communication Technology Co.,Ltd. + LiYong + liyong&zhengyou.com +19992 + Ericsson, Inc. + Paul Kuykendall + Paul.Kuykendall&ericsson.com +19993 + S2 Systems, Inc. + Kerry Cage + securitycontact&s2systems.com +19994 + Shanghai Withub General Technology Co.,Ltd. + Zhengwenli + zhengvl&sjtu.edu.cn +19995 + SPES S.r.l. + Pierluigi Brugnone + p.brugnone.spes&urmetsistemi.it +19996 + SwissQual AG + Beat Bolli + mibmaster&swissqual.com +19997 + 4D Technology (llc) + Dale Whitfield + dale&4dllc.com +19998 + Netspecs B.V. + Martijn Verspoor + software&netspecs.nl +19999 + eEpoch + Manuel Torres + manuel.torres&safelayer.com +20000 + Ing. Büro Ziegler + Felix Ziegler + fz&zie.ch +20001 + Ukraine Trust Network + Vladimir E. Protaschuk + director&utn.com.ua +20002 + RZNet AG + Rafael Rutkowski + Rafael.Rutkowski&rznet.de +20003 + PTK Centertel sp z o. o. + Michal Kociolek + michal.kociolek¢ertel.pl +20004 + HEUFT SYSTEMTECHNIK GMBH + Kai Dietrich + Kai.Dietrich&heuft.com +20005 + Air Products and Chemicals, Inc. + Maury G. Robert + robertmg&airproducts.com +20006 + Nagios + Subhendu Ghosh + nagios-snmp&sghosh.org +20007 + Kenati Technologies Inc. + Sanmitra Pandharpur + sanmitra&kenati.com +20008 + Toshiba Electronics Europe GmbH + Helmut Franzke + hfranzke&tee.toshiba.de +20009 + Beaver Creek Cooperative Telephone Company + Brandon Drake + hostmaster&bctelco.com +20010 + reuter network consulting + Stefan Reuter + nic&reucon.net +20011 + System Business Consulting + Marcin Cieslak + saper&saper.info +20012 + QinetiQ Ltd + Adrian Jones + acjones&qinetiq.com +20013 + Vanguard Integity Professionals + Ted Smykla + tsmykla&go2vanguard.com +20014 + Fernandez Industries Incorporated + Anthony Fernandez + anthony&fernandez.com +20015 + Ville d'Yverdon + Didier Wulliamoz + dwu&ylb.ch +20016 + Mountain Top Applied Solutions, Inc. + Allen F. Schell + afschell&mtapps.com +20017 + Dansk Netcenter APS + Flemming Rasmussen + fr&t26.dk +20018 + River City Software Associates, Ltd. + Noel Terranova + nterranova&rcsal.com +20019 + Antarctica Networks Ltd. + Juan Francisco Roco + support&antarcticanetworks.com +20020 + Pacific Wave Solutions, Incorporated + Bryce Bingham + bbingham&pacificwavesolutions.com +20021 + Boswell Online B.V. + Tom Mulder + tom&a1.nl +20022 + TETCO TECHNOLOGIES SA + Frederic Meyer + snmp&tetco.fr +20023 + Rezel + Association Rezel + root&rezel.com +20024 + Proxyconn, Inc. + Wojciech Siedlecki + wojciechs&proxyconn.com +20025 + Compusaurus Bt. + Janos Bali + compusaurus&axelero.hu +20026 + RedeNetwork.com + Renato S. Magalhaes + info&redenetwork.com +20027 + TDK-LAMBDA Corporation + Kazuhiro Iwai + soft.info&jp.tdk-lambda.com +20028 + Line-tec, Inc. + tomita sintaro + info&line-tec.co.jp +20029 + Mortara Instrument, Inc. + Barry D. Brown + barry.brown&mortara.com +20030 + ValidSoft Ltd + Michael Skells + mike.skells&validsoft.com +20031 + Qnamic AG + Michael Wirz + michael.wirz&qnamic.com +20032 + KISTER + Wilfried Kister + wilfried.kister&gmx.de +20033 + Dicr + Igor A Tarasov + iana&dicr.org +20034 + rjcdb.com + Robert John Churchill + rjc&rjcdb.com +20035 + Alexander Kellmann + Alexander Kellmann + alex&kellmann-web.de +20036 + Advanced Control Technology, Inc. + Kenneth Grob + keng&acttechnico.com +20037 + HISL Limited + Gerard Kilburn + iana_admin&his.co.uk +20038 + LithiumCorp Pty Ltd + James Wilson + jwilson&lithiumcorp.com +20039 + Shanghai Communications Technologies Center + Wang Ziping + san&scomcenter.com +20040 + GUMC + Coordinator ICT Systeembeheer + iana&ict.azg.nl +20041 + INGENION GmbH + Thomas Zehbe + tz&ingenion.de +20042 + Hitel Italia S.p.A. + Alessandro Betta + a.betta&hitel.it +20043 + Grupo 3A Recoletas + Alexis Rodríguez Castedo + alexis&3a-grupo.com +20044 + Ekinops SAS + Pascal Babin + pbabin&ekinops.fr +20045 + Corporate Express Promotional Marketing + David Brown + dave.brown&cepm-us.com +20046 + Pilgerer e.V + Marc Sztochay + msztochay&pilgerer.org +20047 + theglobe.com + John Houston + jhouston&corp.theglobe.com +20048 + Internet Broadcasting Systems + Chris Josephes + hostmaster&ibsys.com +20049 + GnuArch + James Blackwell + iana&gnuarch.org +20050 + Western United Insurance + Jarrad Winter + jwinter&westernunited.com +20051 + Unassigned + Returned 2023-03-02 + no-reply&iana.org +20052 + Intesa Sanpaolo S.p.A. + Massimo Beltramo + massimo.beltramo&intesasanpaolo.com +20053 + I.NET2 S.r.l. + Andrea Mistrali + support&inet2.it +20054 + Statistisches Bundesamt + Tim Aden + tim.aden&destatis.de +20055 + FRANCE2 + Pascal Roberts + reseau&france2.fr +20056 + ASC Program + Richard Mark + rmark&llnl.gov +20057 + Lumenos, Inc. + Ben Chatfield + BChatfield&Lumenos.com +20058 + Aramiska + Robin Vissers + r.vissers&aramiska.net +20059 + Acsera Corporation + Karthik Somasundaram + karthik&acsera.com +20060 + Pacific University + Bill Jiminez + bjiminez&pacific.edu +20061 + Seacoast Synergy, Inc. + David A Coursey + david.coursey&seacoastsynergy.com +20062 + University of Crete - Faculty of Medicine + Mr. Grigorios G. Papazoglou + grp&med.uoc.gr +20063 + vandalsWeb.com + Daniel Perez + dperez&vandalsweb.com +20064 + Tohoku Intelligent Telecommunication Co.,Inc. + Fumio Takahashi + takahashi-fm&tohknet.co.jp +20065 + Omega Consult Inc. + Chai + chaidapeng&hotmail.com +20066 + Entheos Software + Troy Farrell + troy&entheossoft.com +20067 + Karlstad University + Paul Scott + paul.scott&kau.se +20068 + Artinfor + Nicolas Dutertre + nicolas.dutertre&artinfor.fr +20069 + JDG Trading(Pty Ltd.) + Sean Paine + seanp&jdg.co.za +20070 + Telecom Italia S.p.A. + Antonio Cantoro + antonio.cantoro&telecomitalia.it +20071 + Electronic Formations + Osama Abu Elsorour + mib&eformations.net +20072 + IWKA InformationsSysteme GmbH + Patric Schirrmann + patric.schirrmann&iwka.de +20073 + Abundance Networks, LLC + Thomas Richtarich + interoperability&abundancenetworks.com +20074 + Freerun Technologies Inc. + System Administrator + swreg&freerun.com +20075 + Werthmoeller IT-Service + Martin Werthmoeller + service&werthmoeller.de +20076 + Makaera Vir 2000 Inc. + ben soo + hostmaster&soo.com +20077 + cMarket, Inc. + Information Technology OID Administrator + oidadmin&cmarket.com +20078 + Escobar Enterprises + Victor Escobar + sydbarrett74&hotmail.com +20079 + SK-TECH.net + Kianusch Sayah Karadji + kianusch&sk-tech.net +20080 + MFD + Kelsey Sigurdur + iana&m-f-d.org +20081 + Alanne LLC + Adam Crosby + iana&alanne.com +20082 + ECE Projektmanagement + Wolfgang Reimann + iana&ece.de +20083 + Techno Group, Inc + Nick Maximov + maxs&techno.spb.ru +20084 + OnlineWeb Ltd. + Laszlo Merczel + info&onlineweb.hu +20085 + HyperSpace Communications, Inc. + Fritz Hesse + fhesse&ehyperspace.com +20086 + JRC ENGINEERING CO.,LTD. + MASAYOSHI YODA + yoda&jrce.co.jp +20087 + Consultix GmbH + Thomas Duda + duda&consultix-gmbh.de +20088 + Renewal Enterprises, LLC + Jonathan Keim + snmp&christianity.com +20089 + Cornerstone Consulting, LLC + John A. Thomas + John.A.Thomas&CornerstoneConsultingLLC.com +20090 + AIS Automations- und Informationssysteme GmbH + Sven Schiwek + sysop&ais-ag.de +20091 + SyAM Software, Inc. + Michael Daniele + Michael.Daniele&syamsoftware.com +20092 + Servicios de i-pro para competitividad, S.A. de C.V. + Viktor Yarmak + viktor&i-productiva.com +20093 + TrekLogic Inc. + Clement Ng + clement.ng&treklogic.com +20094 + Vaonet + Kevin Stewart + kstewart&vaonet.com +20095 + Positron Access Solutions Corp (formerly 'Aktino, Inc.') + Jean-Francois Mailhot + jfmailhot&positronaccess.com +20096 + Image Entry, Inc. + Sean Johnson + sean.johnson&imageentry.com +20097 + Plope Consulting + Chris McDonough + chrism&plope.com +20098 + Dave Hoebe + Dave Hoebe + dave&dofnet.com +20099 + Impulse Internet Services + Ted Cabeen + iis-iana&impulse.net +20100 + Spliced Networks LLC + John Buswell + johnb&splicednetworks.com +20101 + Cox Enterprises + Samuel Workman + sam.workman&coxinc.com +20102 + multi.mediale.net.ltd + Reinhard Herrmann + Reinhard.Herrmann&mediale.net +20103 + allery.org + Steven Allery + redhawk105&gci.net +20104 + Digital Evergreen, Inc. + Scott Finnerty + scott.finnerty&digitalevergreen.com +20105 + Nàh-ko's world Inc. + Christophe Truffier + ctruffier&nah-ko.org +20106 + Center for Leadership Solutions LLC + Ryan Hatch + rhatch&leadership-solutions.net +20107 + ANIXIS + Tonio Pirotta + tonio&tpis.com.au +20108 + LPC Condor Technologies + Alberto Patron + apatron&tutopia.com +20109 + Cynap Limited + Ian Shuttleworth + ishuttleworth&cy-nap.com +20110 + MediaPlan Inc. + Katsuyuki Takahashi + takahashi&media-plan.co.jp +20111 + LEADER ELECTRONICS CORP. + Itoshi YOKOYAMA + yokoyama&leader.co.jp +20112 + SMT Electronic Technology Limited + Lu Huaqiang + lhq2003&smt.cc +20113 + Vecino + Che Tran + ctran&vecino.net +20114 + Systems Management Enterprises, Inc. + Sean Ellis + info&smeinc.net +20115 + J'raxis + J'raxis 270145 + oid&jraxis.com +20116 + Janr + Alexey Trusov + alex&tke.ru +20117 + Institute of Applied Internet Technology, Inc. + Tomohide Tanaka + tomohide&netstar.co.jp +20118 + KCN Tech Co., Ltd. + Arthur W. Moon + moon&kcntech.com +20119 + EuroMarknet Internet Technológiai és Tanácsadó Ltd. + Peter Turi + turip&inf.elte.hu +20120 + VIDEOTRON Corp. + Noboru Teruyama + teruyama&videotron.co.jp +20121 + imos GmbH + Alfred Wallender + aw&imos.net +20122 + mind-n + Han Kyoung Ha + hisystem&korea.com +20123 + Sébastien Namèche Consulting + S. Namèche + sebastien&nameche.fr +20124 + Cologne Systems GmbH + Dr. Ernst-Udo Wallenborn + ernst-udo.wallenborn&cologne-systems.de +20125 + Vitel Software, Inc + Joseph Rossi + jrossi&vitelsoftware.com +20126 + LP Agent + Lajos KISS + kl216&hszk.bme.hu +20127 + TTYS0 (formerly 'Gutenpress') + Sean Johnson + sean&ttys0.net +20128 + SKH TKO Kei Tak Primary School + William Ng + ktp-nwy&ktp.mysch.net +20129 + SOURCECORP,Inc. + Sean Johnson + sean.johnson&imageentry.com +20130 + Advanced Digital Systems, Inc. + Carolee Nail + cnail&mi-corporation.com +20131 + Vermeer Manufacturing Company + Web Systems Administrator + websysadmin&vermeermfg.com +20132 + Azonic Systems, Inc. + Barry Man + bman&azonicsystems.com +20133 + izac.org + Benoit Izac + benoit&izac.org +20134 + Surf and Sip Inc. + Mack Nagashima + mackn.iana&surfandsip.com +20135 + Elyo Services Ltd + Martyn Forbes + martyn.forbes&elyo.co.uk +20136 + SCHMIEDER it-solutions GmbH + Holger Schmieder + schmieder&schmieder.de +20137 + codesupply.com + Schema Manager + schema&codesupply.com +20138 + Avara Technologies Pty Ltd. + Stephen Lacey + stephen.lacey&avaratechnologies.com +20139 + Lobos, Inc. + Nobuhiko Onishi + onishi&lobos.co.jp +20140 + DAVOLINK + Hyung-Jong Song + hjsong&davolink.co.kr +20141 + Axis Internet + Rick Jones + rick&axisinternet.net +20142 + QianWang Networks Co. + ZhangYongsheng + zys690803&sina.com +20143 + Uffaq Technologies Private Ltd. + Rifat Zabin + rifat&uffaq.com +20144 + Optimo Service AG + Christian Reinhard + christian.reinhard&optimo-service.com +20145 + ELVIS-PLUS + Mark Koshelev + marc&elvis.ru +20146 + Ecutel + Stephen C. Sanders + ssanders&ecutel.com +20147 + Internet NetOnRamp + Bruce Kiley + hostmaster&netonramp.com +20148 + runIT AG + Eric Ernst + eric.ernst&runit.ch +20149 + Optim Ltd Int Grp + Nikolay Mirin + nikamir&mail.ru +20150 + Zon Nederland N.V. + Johan Mulder + johan&zonnet.nl +20151 + Martin-Luther-Universitaet Halle-Wittenberg + Leonhard Knauff + knauff&urz.uni-halle.de +20152 + eurofunk Kappacher GmbH + Christian Kappacher + ck&eurofunk.com +20153 + ALCA INFORMATIQUE et TELECOM + Thierry TROTIN + thierry.trotin&free.fr +20154 + Redes de Telefonía Móvil, S.A. + David Planell Peñalver + retemsa&retemsa.com +20155 + Winterthur Ibérica, AIE + Seguridad Informática + informatica.seguridad&winterthur.es +20156 + Lowe's Companies, Inc. + Shawn Haigler + shawn.m.haigler&lowes.com +20157 + High Desert Education Service District + Wade Holmes + wadeh&hdesd.k12.or.us +20158 + Washburn University + Shawn Geil + shawn.geil&washburn.edu +20159 + Force, Incorporated + Jay Kidd + jkidd&forceinc.com +20160 + Hilton Hotels Corporation + Preston Gilchrist + preston_gilchrist&hilton.com +20161 + EWE TEL GmbH + Stephan Jaeger + stephan.jaeger&ewetel.de +20162 + Trinity Capital Corporation + Ian Mcgowan + it&trinitycapital.com +20163 + Kerna Communications Ltd. + Conor McCarthy + conor&mailsecure.kerna.ie +20164 + Subrafta Industries, Inc. + John Simpson + john.simpson&voyager.net +20165 + NIE Corporation + Osamu Noguchi + noguchi&c-nie.co.jp +20166 + DEODEO Co,.Ltd + Hisashi Nakayama + nakayama&deodeo.co.jp +20167 + Sonic Industries Inc. + Network Admin + bsheriff&sonicdrivein.com +20168 + Gesellschaft fuer wissenschaftliche Datenverarbeitung mbH Goettingen + Konrad Heuer + kheuer&gwdg.de +20169 + Urmet TLC S.p.A. + Stefano Giannini + s.giannini&urmetsistemi.it +20170 + Systemlegionen + Magnus Sjöstrand + magnus&systemlegionen.com +20171 + Burwood Group, Inc. + Mike Gent + mgent&burwood.com +20172 + CRI Advantage + Aaron D. Wells + awells&criadvantage.com +20173 + sd&m AG + Matthias Meyer + matthias.meyer&sdm.de +20174 + Weldata bv + Andre van der Meer + andre&weldata.nl +20175 + FOR-A Company Ltd. + George Ishizaki + georgei&for-a.co.jp +20176 + Mark Rose + Mark Rose + uid&dicomrose.com +20177 + Chrome Cyz Co Ltd + Koji Onishi + onishi&ez-networks.jp +20178 + NetShield (Pty) Ltd + Jan + jannie&netshieldsa.com +20179 + RNDSoft co.,ltd + HwanYoung,Lee + hydool&intizen.com +20180 + SONGHWASOFT + Jong-ho, Rhee + likeu&songhwasoft.com +20181 + The Reynolds and Reynolds Company + John Simpson + john_simpson&reyrey.com +20182 + Appmind Software AB + Ulrik Odeberg + ulrik.odeberg&appmind.com +20183 + William G Dempsey & Associates + William G Dempsey + dempsey&dempsey.com +20184 + Montreal University + Alain Cote + alain.cote&UMontreal.CA +20185 + Brake Parts Inc + Warren Smith + warren.smith&dana.com +20186 + Panareef Pty Ltd + Anthony Turco + info&panareef.com +20187 + Gary T. Giesen + Gary T. Giesen + ggiesen&giesen.me +20188 + PHOENIX SOFT + Jesus Bernardo Ruiz Flores + jruiz&acelerate.com +20189 + Nature Care College + Patrick Taylor (IT System Administrator) + it&naturecare.com.au +20190 + iCable System + Sanghoon Han + hantough&icablesystem.com +20191 + Throughwave, Inc. + Chaiwat Chanruang + ohh&throughwave.com +20192 + JK Technologies Corp. + Jackson M Shih + jackson&jktech.com.tw +20193 + ERZIA Technologies, SL + Esther Lopez + esther.lopez&erzia.com +20194 + Volexia Ltd. + David Kitchener + dave.kitchener&volexia.com +20195 + AL.systems GmbH + Dr. Niketan Pandit + Niketan.Pandit&al-systems.com +20196 + Rainbow Triangle, v. o. s. + Jiri Nemrava + domeny&triangle.cz +20197 + St. Olaf College + Craig D. Rice + hostmaster&stolaf.edu +20198 + Equinox Information Systems, Inc + Wayne Lowe + wlowe&equinoxis.com +20199 + SUSCERTE - Gobierno de la Republica Bolivariana de Venezuela + Jorge J. Uya + juya&mct.gov.ve +20200 + Sioss Ltd + Tim Higham + support&sioss.com +20201 + Kinnarps AB + Stefan Helmersson + stefan.helmersson&kinnarps.se +20202 + First Canadian Title Company Ltd. + Sean Bodkin + helpdesk&firstcdn.com +20203 + First Canadian Title Company Ltd. + Sean Bodkin + helpdesk&firstcdn.com +20204 + EBC + VERROUST + frederic.verroust&ebc.net +20205 + TIS Grupa d.o.o. + Lovro Seder + lovro&tis.hr +20206 + The Unwins Wine Group Ltd + Lee Mason + lmason&unwins.co.uk +20207 + Closed Joint Stock Company "PIOGLOBAL Services" + Yuri N. Veltischev + yv&pioglobal.ru +20208 + Latitude Technologies, Inc. + Dan Crysler + danc&latitudetech.net +20209 + Submersion Corporation + Jeff Davey + jeffdavey&submersion.com +20210 + Vigilant Privacy Corporation + Cliff Reeser + creeser&vigilantprivacy.com +20211 + Information and Telecommunications Technology Center + Brett Becker + bbecker&ittc.ku.edu +20212 + Q1Labs, Inc. + Bruce Artman + bruce.artman&q1labs.com +20213 + Rock Island Communications, Inc. + Michael Hall + mikeh&rockisland.com +20214 + Nordic Institute for Theoretical Physics + Petter Urkedal + admin&nordita.dk +20215 + Netkey, Inc. + Glenn Marcus + gmarcus&netkey.com +20216 + more4u gbr + Eckhard Andree + ea&more4u.de +20217 + Polystar Instruments AB + Bengt Amnehagen + snmp&polystar.se +20218 + Ajeco Oy + Kenneth Ramstedt + Kenneth.Ramstedt&ajeco.fi +20219 + BSH Hausgeraete GmbH + Stefan Jakob + stefan.jakob&bshg.com +20220 + EB Software Enk, John Weholdt + John Weholdt + john&weholdt.no +20221 + IMAGINEON oHG + Frank Moritz + moritz&imagineon.de +20222 + PROFSOFT Sp. z o.o. + Jacek + profsoft&profnet.pl +20223 + Ementor Norge AS + Bjornar Bjorgum Larsen + hostmaster&comace.net +20224 + Institut Pasteur + Marc BAUDOIN + adm&pasteur.fr +20225 + Sub Service Ltd. + Alex Povolotsky + tarkhil&sub.ru +20226 + Cel*Star Guyana Inc. + Nyron Samaroo + nsamaroo&celstarguyana.net +20227 + BitPoint AG + Martin Gilch + m.gilch&bitpoint.de +20228 + Teleflex Incorporated + Anish Patel + apatel&teleflex.com +20229 + Karl Hofmann Schule Berufsbildende Schule + Markus Eiden + markus+oid&eiden.de +20230 + Luis Lewis + Luis Lewis + lelewis&optonline.net +20231 + LDAP Technology + Luis E. Lewis + luis.lewis&ldaptechnology.com +20232 + LocalNet Systems + Andreas Weisker + andreas&weisker.net +20233 + Pro-G Information Security and Research Ltd. + Dr. Can. Acar + can.acar&pro-g.com.tr +20234 + Superlativ + Roar Prip + roar&superlativ.dk +20235 + LinuxForce, Inc. + Chris Fearnley + hostmaster&LinuxForce.net +20236 + China Telecom-Guangzhou Research and Development Center + GuoMaoWen + liuchenglong&huawei.com +20237 + PJ Velzeboer + PJ Velzeboer + public&websavages.com +20238 + AMBIZ, Inc. + Sergey Smolin or Vasily Kiselevich + s.smolin&ambiz.ru +20239 + Sterrewacht Leiden + Tycho Bot + computerworkers&strw.leidenuniv.nl +20240 + CICA + Jose Manuel Sanchez Breton + breton&cica.es +20241 + Premiere Fernsehen GmbH & Co. KG + Mark Duprau, Björn Andersen + webmaster&premiere.de +20242 + Stephan Ruckelshaussen + Stephan Ruckelshaussen + s.ruckelshaussen&web.de +20243 + SNR S.A. + Yannick KEREUN + ykereun&snrm.net +20244 + Police IT Management Agency + Anne Sandberg + poliisin.tietohallintokeskus&poliisi.fi +20245 + Deverto Systems Ltd. + Huba Gaspar + deverto&deverto.com +20246 + Delta Electronics (Switzerland) AG (formerly 'Delta Energy Systems (Sweden) AB') + Adrian Pluess + adrian.pluess&deltaww.com +20247 + Bladiant Technologies, Inc. + Kevin Hardiman + khardiman&bladiant.com +20248 + Sony Computer Entertainment America, Inc. + Tom Perrine + tperrine&scea.com +20249 + Touchstone Systems, Inc. + Todd Wallace + rwallace&tstoneinc.com +20250 + GlobeRanger Corporation + Sridhar Jaganathan + sridharj&globeranger.com +20251 + BN + Bartosz Kozlowski + oid&bn.pl +20252 + North American World Trade Group + Timothy R. French + timf&nawpi.com +20253 + iologics, Inc. + Larry Harkrider + lharkrider&iologics.com +20254 + Mediatria s.r.l. + Stefano Maffulli + maffulli&mediatria.it +20255 + Xalted Information Systems Pvt. Ltd + Babu K Papanna + babuk&xaltedindia.net +20256 + Communicate Norge AS + Martin Rauan + martin.rauan&communicate.no +20257 + Azienda Ospedaliera Villascassi + Pedemonte Carlo + carlo.pedemonte&villascassi.it +20258 + Sabanci University + Osman Demirhan + sysadmin&sabanciuniv.edu +20259 + Sandprints + J. Frisbie + jf&sandprints.net +20260 + MDTelecom, Inc. + Dale Augustyn + daugustyn&mdtelecom.com +20261 + EMS Wireless + Delaney Mohr + mohr.d&ems-t.com +20262 + Odyssey Software, Inc. + Jim Sullivan + jsullivan&odysseysoftware.com +20263 + Aurisp + Waldemar Brodkorb, Lars Ehrhardt + support&aurisp.de +20264 + Muiderberg (M.H.C.) + Willem van Heemstra + WvanHeemstra&xs4all.nl +20265 + Cambridge University Faculty of Music + Mustafa Beg + mustafa.beg&mus.cam.ac.uk +20266 + OnWeb Technologies, c.a. + Fanny Y. Perez + fyperez&onweb.com.ve +20267 + Days of Wonder Inc. + Brice Figureau + brice+iana&daysofwonder.com +20268 + Chelsea Technologies Inc. + L.E. Lewis + llewis&chelsea-tech.com +20269 + University of Yamanashi + Masanori Hanawa + hanawa&yamanashi.ac.jp +20270 + xtendo technologies Kurt Schwedes + Kurt Schwedes + hostmaster&xtendo.de +20271 + OpsRamp, Inc. + P V Viswanatha Raju + pvvraju&opsramp.com +20272 + Creative Media GmbH + Reinhard Weismann + reinhard.weismann&creative.co.at +20273 + Kunsthistorisches Museum + Theuerkauf Dominik + dominik.theuerkauf&khm.at +20274 + Cyber Ware Ltd + Mark Godfrey + mark&cyberware.co.uk +20275 + Ex Fructu Kft. + VOROSBARANYI Zoltan + mib-snmp-27e97b&exfructu.net +20276 + Citadec Solutions OY + Miika Keskinen + mikka.keskinen&citadec.com +20277 + System-Net + Philippe GAZAGNE + direction&system-net.biz +20278 + Sakana-home.net + Stephane Kattoor + subscribe&sakana-home.net +20279 + The Polestar Group Ltd + John Chamberlain + john.chamberlain&polestar-group.com +20280 + CERNET, China Education and Research Network + Jilong WANG + wjl&cernet.edu.cn +20281 + 3TNet Experimental Network + Jilong WANG + wjl&cernet.edu.cn +20282 + Cribbins Associates + Martin Cribbins + mc&cribbinsassociates.com +20283 + KnowGate + Ivan Montoro + ivanm&knowgate.com +20284 + FAURECIA + DTI + dti&brieres.faurecia.com +20285 + E-work S.p.A + Mariano Cunietti + mariano.cunietti&e-work.it +20286 + VirusBuster Ltd. + Daniel Kunos + dkunos&virus-buster.com +20287 + Katholieke Theologische Universiteit + Nico de Groot + ndegroot&ktu.nl +20288 + TimeLink International GmbH + Thomas Barth + barth&titze.de +20289 + indatex GmbH + Klaus Wissmann + iana-contact&indatex.com +20290 + CNA INSURANCE + Don Larson + dds-lansvr&cna.com +20291 + onTimeTek Inc. + DONG JAE, HAN + martin&ontimetek.com +20292 + Dept. Network Center, China University of Geosciences + Zhou Yong + oyxw81&tom.com +20293 + Onirban Networks + Manjunath Padmanabha + manjunath&onirbannetworks.com +20294 + Caperio AB + Eric Sigurdson + eric.sigurdson&caperio.se +20295 + Euler Hermes Poland + Marcin Giedz + marcin.giedz&eulerhermes.pl +20296 + Banco Popular + Ricardo López Sánchez + rlopez&bancopopular.es +20297 + Rodos + Lorenzo Planas + lorenzo.planas&rodosengineering.com +20298 + If-Tech + Avner Zrihen + avner&if-tech.com +20299 + Sussex Community Wireless + Andrew Smalley + andrew&sussexcommunitywireless.org +20300 + InsightETE + Matt Williams + m.williams&insightete.com +20301 + IBM eServer X + Lynn Fore + sls&us.ibm.com +20302 + Medica + Michael Busse + michael.busse&medica.com +20303 + Banco Industrial e Comercial S.A. + Marcio Costa Sousa Ponte + marcio&bicbanco.com.br +20304 + Matrix Bancorp + Jason Carter + jcarter&matrixbancorp.com +20305 + DIR A/S + Jens Hilligsøe + jens&dir.dk +20306 + Kontron Transportation Austria AG (formerly 'Kapsch CarrierCom AG') + Martin Gaugitsch + martin.gaugitsch&kontron.com +20307 + Lie-Comtel + Roland Guthauser + roland.guthauser&lkw.li +20308 + Regent Group of Companies + Denis Melnikov + dekapriz&yandex.ru +20309 + CIRIL - Centre Interuniversitaire de Ressources Informatiques de Lorraine, Nancy - FRANCE + Alexandre SIMON + Reseau&ciril.fr +20310 + Nicotra Sistemi spa + Picco Maurizio + nicotra.sistemi&nicotragroup.com +20311 + Views On News + Mirza Babur Naveed Baig + paindabad&yahoo.com +20312 + Semantico Limited + Richard Padley + richardp&semantico.com +20313 + ix-tech + Juergen Schmidt + el&ix-tech.de +20314 + FabDouglas + Fabrice Daugan + fabdouglas&users.sourceforge.net +20315 + OCA "OpenCallArgentina" + Alejandro Morello + alejandro.morello&hp.com +20316 + Qosmetrics, Inc. + Anand Joshi + anand&qosmetrics.com +20317 + Lissi Co. Ltd. + Yuri Zyryanov + yzyryanov&lissi.ru +20318 + Friedrich + Lochner GmbH + Volec + volec&dresden.frilo.de +20319 + University of California, San Francisco, Information Technology Services + Tom Chen + it.networkteam&ucsf.edu +20320 + CAE Inc + Louis Labelle + ianaoid&cae.com +20321 + G+G Comsoft GmbH + Thomas Schöll + ggcomsoft&t-online.de +20322 + Udo Schledorn EDV-Beratung + Udo Schledorn + uschledorn&web.de +20323 + Success Medical Computer Co., Ltd. + YL Wang + sctech2004&yahoo.com.cn +20324 + Provinzial Rheinland Versicherung AG + Mirjam Gerritzen + gerritze&provinzial.com +20325 + neska Schiffahrts- und Speditionskontor GmbH + Jörg Hörter + hoerter&it.neska.com +20326 + Education Nationale + Romero Nicolas + pole-identite&ac-orleans-tours.fr +20327 + Helsinki Polytechnic Stadia + Esa Suominen + iana&stadia.fi +20328 + Whitehead Institute for Biomedical Research + Scott McCallum + skm&wi.mit.edu +20329 + University of Maine at Farmington + Fred Brittain + brittain&maine.edu +20330 + WPS Health Insurance + Dan Maahs + dmaahs&wpsic.com +20331 + ILTC - Instituto Doris Aragon + Luiz Claudio da Silva Leao + leao&iltc.br +20332 + Bau- und Wohngenossenschaft Brachvogel + Christian Vierkant + ianaadm&brachvogel-eg.de +20333 + ASC System + Adonai Costa + adonai&ig.com.br +20334 + IDS GmbH + Thomas WIld + admin&ids-gmbh.de +20335 + Teknologisk Institut + Nils Lastein + nils.lastein&teknologisk.dk +20336 + N&TS spa + Stefano Marino + s.marino&netsw.it +20337 + Arquematica + Eduardo Cunha + edu_cunha&hotmail.com +20338 + Addamark Technologies, Inc. + LDAP Admin + ithelp&addamark.com +20339 + Daniel Kelley and Associates + Daniel Kelley + dkelley&gmp.san-jose.ca.us +20340 + Information Builders, Inc + Mike Prieto + michael_prieto&ibi.com +20341 + LifeSize Communications, Inc + Bruce Sawtelle + bsawtelle&lifesize.com +20342 + Agencia Marítima de Consignaciones, S.A. + Alvaro Rubio Delclaux + arubio&agemasa.com +20343 + Altexia + Bruno Meyrieux + bruno&meyrieux.net +20344 + Portrait Displays, Inc. + Hong Chen + hchen&portrait.com +20345 + Edgewater Computer Systems. Inc. + Vicky Liang + vicky&edgewater.ca +20346 + MetroFi, Inc. + Andreas Ott + aott&metrofi.com +20347 + Equinox Converged Solutions Ltd + Simon Mullis + simon.mullis&equinoxsolutions.com +20348 + ARCUS Software Pty Ltd + Manuel Mall + mm&arcus.com.au +20349 + Aruba S.r.l. + Andrea Cerrito + andrea.cerrito&staff.aruba.it +20350 + Milos Malik + Milos Malik + xmalik1&fi.muni.cz +20351 + European Bridge-CA + Peter Steiert + peter.steiert&teletrust.de +20352 + UNATECH.CO.,LTD + William Jang + ckjang&unatech.co.kr +20353 + Ebang Telecom Technologies HangZhou Co., Ltd + ZengHui + notwo&ebang.com.cn +20354 + Plexis Computing Limited + Mike Ray + mike&plexis.demon.co.uk +20355 + Bundesverband der Deutschen Industrie e.V. + Volker Doert + postmaster&bdi-online.de +20356 + CONTER Control de Energía, S.A. + Rafa Faus + rafa.faus&conter.es +20357 + TINC Associates NV + Pascal Verlinden + pascal.verlinden&tinc.be +20358 + Concordia College + Dennis Duncan + dduncan&cord.edu +20359 + On Command Corporation + Michael Fuhr + snmp&ocv.com +20360 + Cronbank AG + Ulrich Viefhues + f.bermbach&cronbank.de +20361 + Ecole Centrale de Nantes + Patrick Guyomarc'h + Patrick.Guyomarch&ec-nantes.fr +20362 + Alcanet International Deutschland GmbH + Thomas Wild + thomas.wild&alcatel.de +20363 + Terrecablate + Riccardo Pannocchia + pannocchia&terrecablate.it +20364 + System Engineering International + William Kautter + wkautter&seipower.com +20365 + Malam Communications LTD + Tal Schoenfeld + tals&malamcom.co.il +20366 + DBI Technologies + Darcy Buskermolen + darcy&dbitech.ca +20367 + Univates + Marcone Theisen + marcone&solis.coop.br +20368 + Manzanita Systems + Greg VInes + gvines&manzanitasystems.com +20369 + MEPHARTEC s.a.r.l + Medjigbodo + admin&mephartec.com +20370 + Allware LTDA. + Denis Michell Delarze Garcia + ddelarze&allware.cl +20371 + VhaYu Technologies + Sanjay Bhatia + sanjay.bhatia&vhayu.com +20372 + Michael A Nachbaur + Michael A Nachbaur + mike&nachbaur.com +20373 + Coast to Coast Hosting + Gregory Kuhn + gkuhn&ctch.net +20374 + TOKYO ELECTRON DEVICE NAGASAKI LIMITED + Hironobu Kanegae + h_kanegae&ngs.teldevice.co.jp +20375 + ATT - AudioText Telecom AG + Manfred Bacher + bacher&attag.ch +20376 + Century Systems Inc. + kazuhiro kurosawa + kuro¢urysys.co.jp +20377 + ITMG GmbH + Development Labs + development&itmg-muenchen.de +20378 + Sabancy Telekomünikasyon Hizmetleri A.?. + Halis Osman Erkan + cansm&sabancitelekom.com +20379 + Specstroy-Svyaz + Alexander Shalagin + soft&proton-sss.ru +20380 + Privacy, Inc. + Jim Goss + jim.goss&privacyinc.com +20381 + ShopperTrak + Jason Kratz + jkratz&shoppertrak.com +20382 + Working Knowledge, Inc + Matthew Jording + mcdjording&workingknowledge.org +20383 + CNCLabs(CHINA NETCOM GROUP LABS) + JiWenchong + jiwenchong&rd-bta.com.cn +20384 + GAIA e.V. + Andreas Vogt + a.vogt&gaia.de +20385 + Frameworx, Inc. + Kevin Ortman + kevin.ortman&frame-wx.com +20386 + Apple Daily Publication Development Limited,Taiwan Branch + Alex Kao + akao&appledaily.com.tw +20387 + Externet Kft + Herczeg Ferenc + herczeg.ferenc&externet.hu +20388 + Streamwide SAS + Thomas BOUTON + tbouton&streamwide.com +20389 + Cape Clear Software + Seamus Donohue + seamus.donohue&capeclear.com +20390 + I&C Energo a.s. (formerly 'OT Energy Services a.s.') + Milan Beneš + mbenes&ic-energo.eu +20391 + FAKUS Elektronik GmbH + Martin Bischoff + fakus&t-online.de +20392 + Oberfinanzdirektion Berlin + Carsten Mattausch + Carsten.Mattausch&ofd.verwalt-berlin.de +20393 + Money Services, Inc. + AEGON USA Directory Services + dirsvcs&aegonusa.com +20394 + ackrium + Alain Kamuleta + alainkamuleta&hotmail.com +20395 + Sircom software + Thai DANG + thai.dang&sircomsoft.com +20396 + Erbach Associates + Christopher Erbach + chris&jadegrel.net +20397 + Telecooperation Office, University of Karlsruhe + Michael Beigl + ldap&teco.edu +20398 + Quintech Electronics and Communications Inc. + Nicholas Johnston + njohnston&quintechelectronics.com +20399 + Educational Consulting Services, Inc. + Joseph C. Felix + jfelix&iac.net +20400 + Raytech Technologies, Ltd. + Asaf Shakarchi + asaf&raytech.co.il +20401 + InterMetro Communications + Jon P. deOng + jon.deong&intermetrocomm.net +20402 + COSTRON Co., Ltd. + Kyoung-Sub, Shim + monslife&costron.com +20403 + First Communication, Inc. + William Gu + william_gu&fci.com.tw +20404 + PT. ELEKTRINDO NUSANTARA + DICKY KURNIAWAN + dkg&en.co.id +20405 + Omkhar, LLC + Martin Wegner + marty_wegner&yahoo.com +20406 + Naesasoft,Inc. + Xiaotang Shao + fengyun&naesasoft.com +20407 + Fuhrer Engineering AG + Martin Fuhrer + mf&fuhrer.com +20408 + SNMP Laboratories (formerly 'pysnmp') + Ilya Etingof + support&snmplabs.com +20409 + Kvinnherad Breiband AS + Erlend Moen + erlend&kband.no +20410 + Janusys Networks, Inc. + Alan Yang + alan&janusysnetworks.com +20411 + lepo + Christian Leporati + lepo&carpidiem.it +20412 + OAO Link + Alexey Shabanov + shabanov&nordost.ru +20413 + University of Exeter + Bill Edmunds + webadmin&exeter.ac.uk +20414 + Fracarro Radioindustrie S.r.l. + Bruno Genovese + sysadmin&fracarro.com +20415 + Solid AB + Christer Weinigel + hostmaster&solid.se +20416 + Fifth Third Bank + Jeffrey P. Anderson + jeff.anderson&53.com +20417 + EUROTUNNEL + A.POULAIN + adrien.poulain&eurotunnel.com +20418 + VectorMAX Corporation + Jon Rachwalski + jrachwalski&vectormax.com +20419 + Security Certificates UK ltd + Trevor Kennedy + tkennedy&securitycertificates.co.uk +20420 + Dyaptive Systems Inc. + Robert Dmitroca + rdmitroca&dyaptive.com +20421 + RangoSur S.A. + Jose Luis Beltrami + beltrami&netgate.com.uy +20422 + The Log4perl Project + Kevin M. Goess + cpan&goess.org +20423 + NSS S.A. + Gerónimo Syriani + gsyriani&iplan.com.ar +20424 + LUKA netconsult GmbH + Christoph Kampe + info&luka.de +20425 + Noea Corporation + John White + jwhite&noea.com +20426 + Tellus Group Corp. + Kevin Huang + kevin.huang&tellus.com.tw +20427 + TKD + EngTjong + EngTjong&Tkd.Co.id +20428 + ISTIA-AUTO + M. HARDOUIN Laurent + laurent.hardouin&istia.univ-angers.fr +20429 + Kaliop Interactive Media + POBEL Damien + dpobel&kaliop.com +20430 + OPTIMUM SYSTEMES INFORMATION + Gilbert WIENCEK + gwi&saur.fr +20431 + Inventa Technologies, Inc. + Scott Gray + sgray&inventa.com +20432 + iTRACS Corporation + Andrew Poole + apoole&itracs.com +20433 + Phoenix Broadband Technologies, LLC. + Mike Quelly + mquelly&comcast.net +20434 + Boca Internet Technologies, Inc. dba AlertSite + Ken Godskind + kgodskind&alertsite.com +20435 + Seenetix d.o.o. + Nemanja Lukic + noc&volomp.com +20436 + TransUnion LLC + Ben Klang + bklang&transunion.com +20437 + Func. Internet Integration + Thijs Schnitger + beheer&func.nl +20438 + CITIZEN WATCH CO.,LTD + Osamu Hinishi + hinishi&citizen.co.jp +20439 + Bodgit & Scarper + Matt Dainty + matt&bodgit-n-scarper.com +20440 + PATLITE Corporation + Junpei Nakanishi + junpei.nakanishi&patlite.co.jp +20441 + REMASYS Pty. Ltd. + Simon Kirsch + simonk&remasys.com +20442 + Samsung SDS Co., Ltd. + Lee, Hyoil + hyoil.lee&samsung.com +20443 + Open Development + David Pieper + admin&open-development.com +20444 + MAX SCHMIDT PAPIERLOGISTIK + THOMAS ARWEILER + thomas.arweiler&max-schmidt.de +20445 + Noncommercial partnership + Kilin Oleg Alekseevich + map&kodeks.net +20446 + Colegio Ingenieros Caminos Canales Puertos + Emilio Marin + emarin&ciccp.es +20447 + APX UK Ltd. + David Groves (NOC Operations) + dgroves&apxuk.co.uk +20448 + Gibr.Net + George Campbell + gilko-iana&eds.org +20449 + NetWisdom(Beijing)Technology co,ltd + Yiqiang Ding + dingyiqiang&msn.com +20450 + Indigo Corp. + Hideki Horibe + msp-info&indigo.co.jp +20451 + Identiga Karto + Luis Roberto Cordero + lcordero&identiga.com +20452 + FishNet, Inc. + Michael Brennen + michael&fishnet.us +20453 + Nakina Systems + Steve Rycroft + sjr&nakinasystems.com +20454 + Middlesex Community College + Alan Keniston + oidadmin&middlesex.mass.edu +20455 + Concorde Microsystems + Stefan Siegel + ssiegel&cms-asic.com +20456 + Omnis Network, LLC + Brad Schuetz + brad&omnis.com +20457 + Incipient, Inc. + Mark A Roman + mroman&incipient.com +20458 + Schneider National, Inc. + Rezaul Abid + abidr&schneider.com +20459 + Oki Electric Industry Co., Ltd. (formerly 'OF Networks Co., Ltd.') + Hajime Kawauchi + kawauchi654&oki.com +20460 + goldenfile ltd + John Harrison + jrh&goldenfile.co.uk +20461 + Organika-Kuznetsk + Konstantin Froloff + organika&sura.ru +20462 + Multimedia Project Srl + Cristiano Maria + cristiano&aggroworld.com +20463 + Perceval + Henri-Jean Pollet + hjp&perceval.net +20464 + Mi4e AB + Richard + richard_oid&mi4e.com +20465 + Caja de Ahorros de CASTILLA-LA MANCHA + FAUSTINO VILLARRUBIA CARMONA + fvillc&ccm.es +20466 + DS Wilson Consulting + Dwight Wilson + wilson&cs.jhu.edu +20467 + SAPIENS TECHNOLOGIES Ltd + Itzhak Assaraf + itzik.a&sapiens.com +20468 + TatraMed Software s.r.o. + Milos Opalek + milos.opalek&tatramed.sk +20469 + Lycos Europe GmbH + Marc Willecke + marc.willecke&lycos-europe.com +20470 + WINGcon AG + Fritz Paul + fritz.paul&wingcon.com +20471 + Banca di Roma S.p.A. + Fabio Sciomer + fabio.sciomer&bancaroma.it +20472 + Lyra Network + Jean-luc Lledos + jluc&lyra-network.com +20473 + Parlamento de Andalucia + Jose Angel Bernal + ja.bernal&parlamento-and.es +20474 + LondonLink Ltd + Richard Smith + richard&londonlink.net +20475 + Avalanche Mobile BV + Valerian Chifu + vchifu&avmob.com +20476 + Fachhochschule Ravensburg-Weingarten + Manfred Dorner + dorner&fh-weingarten.de +20477 + Buongiorno S.p.A. + Alessandro Gatteschi + servers&buongiorno.com +20478 + Noopys Store company + Carmen + carmen_wai&yahoo.com +20479 + UMC Genomics Lab + Philip Lijnzaad + genomics-system&med.uu.nl +20480 + StrikeForce Technologies, Inc. + Michael C. Brenner + noc&sftnj.com +20481 + CUBETECH + Namkoong Sun + nksun&icubetech.com +20482 + L B Enterprises + Mistye R Eks + mistye_r_eks&yahoo.com +20483 + Ministerio de Hacienda + Lic. José Manuel Cano Carvajal + canocm&hacienda.go.cr +20484 + Sukra Helitek, Inc. + Ben Myers + dative&sukrahelitek.com +20485 + Fuchsia Open Source Solutions + Conny Brunnkvist + conny&fuchsia.se +20486 + ChyronHego Corporation (formerly 'Chyron Corporation') + Minuk Choi + minuk.choi&chyronhego.com +20487 + AXT Systems Pty Limited + Geoff Swan + geoff&axtsystems.com +20488 + DTMC Systems + Miravalles Guillermo + miravallesg&yahoo.com.ar +20489 + Geotek Design Services + George Carlson + geotekds&swbell.net +20490 + China Putian Institute of Technology + Yan Chunying + yanchy&cpit.com.cn +20491 + Teamsun Technology Co.,Ltd + KEXING QIU + qiukx&teamsun.com.cn +20492 + MTT Computer Consulting, Inc + Matthew Thoren + mthoren&mttcc.com +20493 + Koblenzer Elektrizitätswerk und Verkehrs-AG + Paul R. Schmitz + pschmitz&kevag.de +20494 + Consejo General del Poder Judicial + Belen Chamarro Lerma + belen.chamarro&cgpj.es +20495 + Marvell Semiconductor Israel, Ltd + Tanya Reitman + tanya&il.marvell.com +20496 + CellVision AS + Inge Duklaet + info&cellvision.info +20497 + Universita' Mediterranea di Reggio Calabria + Diego Raffa + diego.raffa&unirc.it +20498 + Gnoble Technologies + Kelly Revels + revelsk&bellsouth.net +20499 + Azeus Systems Ltd. + John Hui + john_hui&azeus.com +20500 + Soft Link AG + Simon Rood + simon.rood&softlink.ch +20501 + Padcom + Yvilde Courtin + ycourtin&padcomusa.com +20502 + Albertsons Inc. + Michael McMurria + michael.mcmurria&albertsons.com +20503 + BearingPoint NZ Ltd + Cliff Pratt + cliff.pratt&bearingpoint.com +20504 + Franz Chladek - EDV Dienstleistungen + Franz Chladek + FChladek&connect.co.at +20505 + Astrogator + Wayne Green + wayne&astrogator.org +20506 + Huetron Co.Ltd + SahngOh Jung + sahngoh&korea.com +20507 + Luvantix Co.Ltd + SahngOh Jung + sahngoh&korea.com +20508 + Urbandale Community School District + Josh Whitver + whitverj&urbandale.k12.ia.us +20509 + Dixon Hughes PLLC + Beth Collins + bcollins&dixon-hughes.com +20510 + National Taiwan University Hospital + Huang Jian-Ming + r92922099&ntu.edu.tw +20511 + Manzanita Systems + Greg Vines + enterprise&lilapple.com +20512 + Romania Internet Security Systems + Florin MANAILA + tech&riss.ro +20513 + SARK Consultants Private Limited + Administrator + admin&sarksoft.com +20514 + Home Office Life kernel + Kazuo Takahashi + info&lifekernel.ne.jp +20515 + FJD Information Technologies AG + Frank Jorga + fj&fjd.de +20516 + Battle Eagle Entertainment, Inc. + David Warth + warthd&qwest.net +20517 + eBuild.ca Inc. + Ilya Belkin + ibelkin&ebuild.ca +20518 + Flashbit Ruf and Heide GbR + Tobias Heide + tobias&flashbit.de +20519 + Y Soft, s.r.o. + Vaclav Muchna + muchna&ysoft.cz +20520 + Cronyx Engineering + Serge Vakulenko + vak&cronyx.ru +20521 + PSDA, Inc. + Ron Plummer + rplummer&ieee.org +20522 + Javalobby.org + Matthew Schmidt + matt&javalobby.org +20523 + Tom Addis Automotive Group + Chad Mueller + chad&lakecityford.com +20524 + SunStar Systems, Inc. + Joe Schaefer + joe+iana&sunstarsys.com +20525 + Anacomp, Inc. + Steve Seremeth + sseremeth&anacomp.com +20526 + Franciscan Missionaries of Our Lady Health System, Inc. + Beverly S. Kennedy + bkennedy&fmolhs.org +20527 + Sathee Inc + Sathee + sathee&gmx.net +20528 + PARANA EM REDE SISTEMAS LTDA. + MARCELO STEGE + GERAL&PARANAEMREDE.COM.BR +20529 + AMETEK SolidState Controls Inc de Argentina + Rodolfo Recanzone + rodolfo.recanzone&ameteksci.com +20530 + Johannes Kornfellner + Johannes Kornfellner + johannes.kornfellner&chello.at +20531 + Matrix Networx + Chad Mueller + matrixnetworx&hotmail.com +20532 + DevStream Corporation + DevStream Administrator + administrator&devstream.com +20533 + 2Know-IT GmbH + Florian von Kurnatowski + Florian.von.Kurnatowski&2know-it.com +20534 + Uniklinik Ulm + Thomas Baur + thomas.baur&uniklinik-ulm.de +20535 + Latvijas Mobilais Telefons SIA + Kaspars Caune + oid&lmt.lv +20536 + Austrian Parliament + Johannes Kornfellner + johannes.kornfellner&parlament.gv.at +20537 + NIPPON EXPRESS CO.,LTD + Hideaki Sukeda + hi-sukeda&nittsu.co.jp +20538 + FIBRENETIX + Peter Green + support&fibrenetix.com +20539 + //////////fur//// + Tilman Reiff + chinchilla&fursr.com +20540 + Red Lion Controls (SIXNET) + Denis Aull + Engineering&RedLion.net +20541 + CITTIO, Inc. + Ross Fujii + rfujii&cittio.com +20542 + AirLink Communications, Inc. + Jim Baichtal + jim&airlink.com +20543 + Optovia Corporation + Hock Lim + lim&optovia.com +20544 + Sungkyunkwan University + Jae-Wan Park + jwpark&ece.skku.ac.kr +20545 + BusinessLink Advertising Ltd. + Sergey Khalyapin + svk1967&mail.ru +20546 + Wissenschaftszentrum Berlin fuer Sozialforschung gGmbH + Peter Rindfuss + rindfuss&wz-berlin.de +20547 + Matthias Braun EDV + Matthias Braun + oid&browny.de +20548 + SAP Hosting + Matthias Braun + braun&sap.com +20549 + Pride S.p.A. + Mario Del Vecchio + m.delvecchio&pride.it +20550 + Red Squared plc + Paul Rathbone + snmp&red2plc.com +20551 + DDR Freak, LLC + Eugene M. Kim + blue&ddrfreak.com +20552 + Swisscom Broadcast SA + Ernst Joachim + Joachim.Ernst&swisscom.com +20553 + Wellington Management Company, LLP + Robert Gropman + rdgropman&wellington.com +20554 + kraai.org + jim kraai + jimgkraai&yahoo.com +20555 + eTelemetry, Inc. + Alan Schunemann + alan&etelemetry.com +20556 + Hastings Entertainment Inc. + Damon Massey + masseyd&hastings-ent.com +20557 + NOFware, Ltd. + Dave McGuire + mcguire&neurotica.com +20558 + University College of Oslo. Faculty of Engineering + Kyrre Begnum + kyrre&iu.hio.no +20559 + Engedi Technologies, Inc. + Jeffrey A. Carley + carley&engedi.net +20560 + Alexander Janssen Consulting + Alexander Janssen + alexander.janssen&gmail.com +20561 + LiveWave, Inc. + Shad J. Aumann + sjaumann&livewave.com +20562 + North Electric Company, Inc. + Brad Fayette + brad.fayette&northelectriccompany.com +20563 + TNC S.A. + Alfredo Ayrala + iana&uxintech.com.ar +20564 + Albuquerque Technical Vocational Institute + Andy Miesem + webmaster&tvi.edu +20565 + Oxford Wireless Networks Ltd + Peter Curran + Peter.Curran&OxfordWireless.net +20566 + Taonix + Jérôme Schell + jerome&taonix.net +20567 + TELEGRID Technologies, Inc. + Jonathan Sharret + j.sharret&telegrid.com +20568 + Citco Technology Mangement, Inc. + Chief Information Officer + it&citco.com +20569 + Inventec Enterprise System Corp. + Anthony Lo + lo.anthony&inventecesc.com +20570 + ShenZhen Yinghetong Information & Technology Co.,Ltd. + cheney + jin_shan&163.com +20571 + International Turnkey Systems + Avinash Dewangan + avinash_dewangan&alpha.its.ws +20572 + virtual-image + Vivian Zingelmann + image&virtual-image.de +20573 + Universitaetsklinikum Aachen + Oliver Kuhl + okuhl&ukaachen.de +20574 + OXYAN SOFTWARE + Benoit NIVESSE + bnivesse&oxyan.com +20575 + Nucleonet Inc + Alex Chauvin + alex&nucleonet.com +20576 + University of Wales, Bangor + Sim Barbaresi + s.barbaresi&bangor.ac.uk +20577 + schunk edv systeme gmbh + Holger Eichhorn + service&schunk.net +20578 + St Ives Plymouth Ltd + k gregory + k.gregory&stivesweb.com +20579 + BITSO Build IT Solutions GmbH + Moritz Beck + moritz.beck&bitso.de +20580 + Kiwi Enterprises + Andrew Ross + support&kiwisyslog.com +20581 + EVR Ltd. + Goldschmidt Ido + ido&everysystems.com +20582 + Ebocom, LLC + Tod Hegstrom + THegstrom&postint.com +20583 + Union Switch & Signal + Ronald J. Victorelli + rjvictorelli&switch.com +20584 + Hunan Talkweb Information System Co. Ltd. + Jingzhou Zhang + lictd&163.com +20585 + Diamond State Port Corp. + Inigo Thomas + ithomas&port.state.de.us +20586 + Century Digital Investment & Management Consulting Ltd. + Charles Feng + zhcharles&21cn.com +20587 + Rose Electronics + Peter Macourek + peter&rose.com +20588 + MaxTronic International Co., Ltd. + Mark Huang + markhuang&maxtronic.com.tw +20589 + UKeduPerson Pilot + Simon McLeish + s.mcleish&lse.ac.uk +20590 + RUBY Inc. + Robert Pragai + pragai&rubin.hu +20591 + VISTA International Ltd. + Mark Itzcovitz + mark.itzcovitz&vistacomp.com +20592 + People's Telephone Saratov, cjsc + Dmitry V. Korotkov + dk&cdma-saratov.ru +20593 + Guangdong Multiplication Communication Ltd Co. + Fu yonggen + fuyg&njupt.edu.cn +20594 + XSif Software + Xavier Barrier + xbarrier&xsifsoftware.com +20595 + Prefeitura Municipal de Florianopolis + Ricardo Portes + portes&pmf.sc.gov.br +20596 + TomWare s.r.l. + Roberto Guardigli + rguardigli&tomware.it +20597 + SurfCloud Ltd + Lee Curtis + dox&surfcloud.net +20598 + eReM.Studio + rm&rm.pl + rm&rm.pl +20599 + Coventry Health Care, Inc. + David Hirsh + hirsh&cvty.com +20600 + Atlab s.r.l. + Michele Bevilacqua + michele.bevilacqua&atlab.it +20601 + Thomas Jefferson High School for Science and Technology + Richard Washer + rcwasher&tjhsst.edu +20602 + Tranquillo Development + Carl Corliss + carl&corliss.name +20603 + Softhouse Informatica Ltda. + Jefferson Dumes + webmaster&softhouse.com.br +20604 + Nexior IT Services + Wouter Meijers + wouter.meijers&nexior.nl +20605 + Antepo, Inc. + Jean Louis Seguineau + jean-louis.seguineau&antepo.com +20606 + CLINIQUES UNIVERSITAIRES SAINT-LUC + MATTON JL + jean-louis.matton&uclouvain.be +20607 + University of Angers + François Kermarec + cri&univ-angers.fr +20608 + Radboud University Nijmegen + Peter Clijsters + p.clijsters&uci.kun.nl +20609 + Scalable Computing Lab + Troy Benjegerdes + admin&scl.ameslab.gov +20610 + NOAA, OAR, FSL, ITS, DSG + Patrick Hildreth + patrick.hildreth&noaa.gov +20611 + Flarepath Software Limited + Glen Conway + glen.conway&flarepath.com +20612 + Time iCR + Khenaidoo Nursimulu + knursimulu&timeicr.com +20613 + Université catholique de Louvain (UCL) + Jean-Pierre Kuypers + JPKuypers&sri.ucl.ac.be +20614 + Exeo Technologies Inc. + Claude Mally + claude.mally&exeotechnologies.com +20615 + Netli, Inc. + Lev Walkin + vlm&netli.com +20616 + CGS World Inc. + Karl Kemp + kpk&cgsworld.com +20617 + TeTeSys + Kai Tetzlaff + admin1&thetetzlaffs.de +20618 + Cinetica s.r.l. + Daniele Arduini + darduini&cinetica.it +20619 + UPC Ceska republika, a.s. + UCP Ceska republika + Administrator&upc.cz +20620 + Valox Systems Co.Ltd + SahngOh + sahngoh&korea.com +20621 + Crystal Technology Solutions Group Inc + James Brunke + jbrunke&ctsgi.com +20622 + IainG + Iain Geddes + iaingeddes&lucent.com +20623 + RSDB Holding b.v. + Arjan Eecen + papinet&rsdb.com +20624 + Nexus Telecom AG + Martin Ronner + martin.ronner&frox.com +20625 + Serviciul de Telecomunicatii Speciale + Cristian Zaiu + czaiu&stsnet.ro +20626 + Software Systems A/S + Rune Langoy + sws&sws.no +20627 + Institut National d'Histoire de l'Art + Pascal Presle + ssi&inha.fr +20628 + Qual-Pro Corporation + Darrell Shane + is&qual-pro.com +20629 + STREAMTEL + LUCA ZANETTI + luca&streamtel.com +20630 + ODIBOSS NETWORKS + REGINALD ODINAKACHI NNADIRINWA + odiboss&yahoo.com +20631 + MC110-GRUPO1 + Rene Rodrigues Veloso + rene&pos.facom.ufu.br +20632 + Barracuda Networks, Inc. + Zachary Levow + zlevow&barracudanetworks.com +20633 + Security First Networks + Terry Martin + terry.martin&sfnet.ca +20634 + Langochat + David Rigaudiere + sniper&langochat.net +20635 + IberiSign + OID Administrator + oid_admin&iberisign.com +20636 + Bendigo and District Division of General Practice Inc. + Bruce Farnell + bfarnell&bgodivgp.org.au +20637 + University of Texas of the Permian Basin + Ken Bridges + bridges_k&utpb.edu +20638 + Capital Markets CRC + Chuin Nee Ooi + cooi&cmcrc.com +20639 + Entura, LLC + Sulaiman Ahmad + sahmad&entura.org +20640 + Inomial Pty Ltd + Mark Lillywhite + mark-iana&inomial.com +20641 + Vincent Consulting Group Inc. + Paul M Vincent + paul.vincent&vincentcg.com +20642 + Nexus Community + Tommy Lee + sjlee&nexus.co.kr +20643 + Prime MX + Chandrashekhar Bhosle + cnb&freedomink.org +20644 + Vaasa Polytechnic + Hannu Teulahti + hannu.teulahti&puv.fi +20645 + Poly Information Ltd. + Simon Rapoport + SimonR&PolyInformation.com +20646 + Convergenz + CH Quek + chquek&convergenz.com.sg +20647 + University of Cambridge Computer Laboratory + Ian Grant + ian.grant&cl.cam.ac.uk +20648 + PK7 + Franck Leroy + Franck.Leroy&pk7.fr +20649 + Tyco Safety Products + Ed Jones + edjones&tycoint.com +20650 + WG78 Ptb central + Peter Prohaska + pitrp&wg78.de +20651 + Computer Vision Networks, Inc. + Darrel Clute + drclute&computervisions.net +20652 + Arizona Department of Environmental Quality + David Crowfoot + crowfoot.david&ev.state.az.us +20653 + Illusions Internet Solutions + David Crowfoot + admin&illusions.com +20654 + Telegence Corporation + Edward H Lewis + elewis&telegence.com +20655 + Ikoro Digital Inc + Steven Wood + info&ikoro.com +20656 + Archer + Pawel Aksamit + paksamit&archer.pl +20657 + Rascular Technology Ltd + Roddy Pratt + roddy_iana&rascular.com +20658 + Finobra S.A. + Steven Kappel + skappel&finobra.net +20659 + Comodo Japan Inc. + Takuji Akiyama + sales&comodojapan.com +20660 + Topone Information technology Co., Ltd + YanPeizong + yanpeizong&cntopone.com +20661 + Hitachi ULSI Systems Co., Ltd. + Hidetaka Ohkubo + ookubo&hitachi-ul.co.jp +20662 + Heiko Jerke + Heiko Jerke + info&xercx.de +20663 + BV Associates + Thomas Zumbiehl + zumbiehl&bvassociates.fr +20664 + NDE Netzdesign und -entwicklung AG + Jens-U. Mozdzen + jmozdzen&mozdzen.de +20665 + Enervation GmbH + Thomas Bleckert + bleckert&enervation.com +20666 + Synchronoss Technologies Inc + Aristotle B. Allen + aristotle.allen&synchronoss.com +20667 + Prosilient Technologies AB + Mats Persson + mats&prosilient.com +20668 + Kaballero.Com LLC + Graylend Horn + graylend&kaballero.com +20669 + jelte.com + Jelte van der Hoek + inbox&jelte.com +20670 + SysCologne, Lutz Mischa Heitmüller + Lutz Mischa Heitmüller + oid&syscologne.de +20671 + Gee-Wiz Consultancy + Gabriel Faber + gabrielfaber&yahoo.com +20672 + MemoryLink Corp + Thomas A. Freeburg + tom&memorylink.com +20673 + MRO Direct, Inc + Bruce Kearns + bruce.kearns&mrodirectnet.com +20674 + Fullerton Elementary School District + Sam L. Ricchio + sam_ricchio&fsd.k12.ca.us +20675 + Reed Networks + Walter Reed + wreed&reednetworks.com +20676 + NIS4Grids + Manuel Hess + manuel&62nd.de +20677 + Pulizzi Engineering, Inc. + Joe Skorjanec + joes&pulizzi.com +20678 + Department of Computer Science, Rensselaer Polytechnic Institute + David E. Cross + crossd&cs.rpi.edu +20679 + Helmer & Zimmermann GmbH & Co.KG + Markus Zimmermann + Markus.Zimmermann&hz-bau.de +20680 + Tekmark/CSL International Solutions Inc. + Sanjay Gupta + sgupta&tgs-solutions.com +20681 + Anta Systems, Inc. + Martin Luu + mluu&antasystems.com +20682 + Campusmart Ltd. + Alfred + alfred&campusmart.net +20683 + DigiDoc AB + Kjell Jonsson + kjell.jonsson&digidoc.com +20684 + Metropolitan Health Corporate (Proprietary) Limited + Piet Theron + www_administrator&mhg.co.za +20685 + ADIES + Joseph Alain + alain.joseph&adies.ch +20686 + Pohl & Co. GmbH & Co. KG + Adam Drozdz + adam.drozdz&pohlgruppe.de +20687 + Roving Planet, Inc. + Dave Hetherington + Dave.Hetherington&RovingPlanet.com +20688 + Seiri Inc. + Robert Bownes + bownes&seiri.com +20689 + Alliance Information Systems, LLC + Robert Maynard + robert&ojai.net +20690 + Harald Svab + Harald Svab + svab.h&aon.at +20691 + Techtell, Inc. + Dan Jones + scott.sipe&techtell.com +20692 + Ruffdogs + Adam Lebsack + adam&ruffdogs.com +20693 + DYNAWEB IT Services L.P. + Szabolcs Rumi + support&dynaweb.hu +20694 + Lightspeed Technologies Pte. Ltd. + Mathias Koerber + service&lightspeed.com.sg +20695 + StratusStation Group Inc. + Dwight Herren + dwight&stratusstation.com +20696 + Caixa Andorrana de Seguretat Social + Mr. Lluís Gasia Ricart + lgasia&cass.ad +20697 + ThinPrint GmbH + Bernd Trappe + Bernd.Trappe&thinprint.com +20698 + Lipetskenergo JSC + Vadim Dobroskokin + sysadmin&lipen.elektra.ru +20699 + CitiMortgage + Gavin Haslett + gavin.haslett&citigroup.com +20700 + Trend Communications Ltd + Richard Petrie + richard.petrie&trendcomms.com +20701 + Peryam & Kroll Research Corporation + John Kiney + kinej&pk-research.com +20702 + Ritchie Capital Management + Tom Schnell + oid_admin&ritchiecapital.com +20703 + eBuz Internetdienste GbR + Tom Fischer + tom.fischer&ebuz.de +20704 + muumilaakso ry + Jere Virta + jere&muumilaakso.fi +20705 + Holguin, Fahan & Associates, Inc. + Damon Hoxworth + Damon_Hoxworth&hfa.com +20706 + Vision Web Networks, LLC + Jonathan Krauss + jkrauss&vwnetworks.net +20707 + InterCerve, Inc. + Greg Gonzalez + greg&intercerve.com +20708 + SYSLAB.COM GmbH + Manfred Lang + iana&syslab.com +20709 + Fremme's SOFTWARE-Utvikling + Steinar Fremme + steinar&fremme.no +20710 + American Medical Response Inc. + Mike Hutchins + mike.hutchins&amr.net +20711 + Platform Solutions, Inc. + Natasha Galkina + natasha&platsolns.com +20712 + Paradise Datacom LLC + Geoffrey Blosser + gblosser¶disedata.com +20713 + Noran Tel Communications Limited + Brent M. Zeiben + bzeiben&norantel.com +20714 + limitland development + Jens Luetkens + j.luetkens&limitland.de +20715 + GiK Gesellschaft fuer innovative Kommunikationssysteme mbH + Andreas Wippermann + oid&gik.de +20716 + Corbel Solutions, Inc + Lee Hundley + leeh&corbelsolutions.com +20717 + FF Network Inc + Gabor Nagy + gabor.nagy&ffnetwork.hu +20718 + American School Foundation of Monterrey, A.C. + Roberto Garcia + roberto.garcia&asfm.edu.mx +20719 + SHIN.Ltd + Francisco Lorenzo de Tuero + lacrimiroot&yahoo.com +20720 + MX Logic Inc + Shaun Bryant + sbryant&mxlogic.com +20721 + Atomicweb, LLC + Lynn Van der Veer + lynn&atomicweb.com +20722 + NetSpira Networks + Luis Pineiro + lpineiro&netspira.com +20723 + Indus International, Inc. + Geo Nordin + geo.nordin&indus.com +20724 + Csibra Bt + Gergo Csibra + csibrabt&csibra.hu +20725 + Etria, LLP + Tom von Schwerdtner + tvon&etria.com +20726 + Wilmington College + Mike Boyle + mike_boyle&wilmington.edu +20727 + Net Theatre + Zenon Panoussis + hostmaster&nettheatre.org +20728 + Spook Limited + Martin Luck + martin&spook.co.uk +20729 + Piotr Matusz + Piotr Matusz + pmatusz&sadyba.elartnet.pl +20730 + eEye Digital Security + Chris Silva + iana&eeye.com +20731 + TrueTel Communications Inc + Larry Ting + larry&truetel.com +20732 + BlueScope Steel Limited + BlueScope Head of IAM + Domains&bluescope.com +20733 + JA Davey + John Davey + jad&davey.net.au +20734 + Internet Sheriff Technology Ltd + Richard Lane + richard&isheriff.com +20735 + ANDSCO I.T. Pty Ltd + Scott Fisher + scott.fisher&andscoit.com +20736 + RS Telematica e Tecnologia da Informacao Ltda + Heberson Sette de Almeida + rstti&brfree.com.br +20737 + Cynics at Large + Perry The Cynic + pki+mib&cynic.org +20738 + TIBBO Technology, Inc. + Dmitry Slepov (Managing Director) + dima&tibbo.com +20739 + Cortec Systems Pty Ltd + Kent Gibson + kent.gibson&cortecsystems.com +20740 + AXISSOFT Corporation + Hiroshi Hashiguchi + hashiguchi&axissoft.co.jp +20741 + Qala Singapore Pte Ltd + Lim Hong Chuan + noc&qalacom.com +20742 + Hormann Funkwerk Kölleda + Robert Jung + robert.jung&hfwk.de +20743 + ISO (International Organization for Standardization) + Alan Mackenzie + mackenzie&iso.org +20744 + ICZ a.s. + Radek Bohunsky + radek.bohunsky&i.cz +20745 + WLN Technologies + Nick Groene + Nick.Groene&Advalvas.be +20746 + DIS Data Integration Services GmbH + Joerg Mokros + joerg.mokros&arikon.de +20747 + NORTEK + Jean-Luc Guiraud + jean-luc.guiraud&nortek-tv.com +20748 + Frei GmbH + Wolfram Frei + wf&freigmbh.de +20749 + Grintek TCI (Pty) Ltd + Hennie Heyl + hheyl&grintek.com +20750 + Technolution BV + J. Molenmaker + mib_contact&technolution.nl +20751 + PERAX + Barthel + obarthel&perax.fr +20752 + SA Polyclinique de Courlancy + VERRIERE Jean-Charles + jean-charles.verriere&groupe-courlancy.com +20753 + Pharos Consulting (Pty) Ltd. + Jan Jacobs + jacobs_j&mtn.co.za +20754 + Universitätsklinikum Aachen + Helmut Driessen + hdriessen&ukaachen.de +20755 + WestGlobal Ltd. + Fintan Palmer + fpalmer&westglobal.com +20756 + het Concertgebouw NV + Bas Dekker + b.dekker&concertgebouw.nl +20757 + Prisacom s.a. + Uxio Faria + ufaria&prisacom.com +20758 + netSurity Ltd + Jason Banks + jasonb&netsurity.com +20759 + Kepler-Rominfo S.A. + Razvan Sefciuc + rsefciuc&kepler-rominfo.com +20760 + Autonomous noncommercial organization "Scientific and Training Center of Information Security of Pr + Andrey Victorovich Grishin + uncib&bk.ru +20761 + Uniklinikum Mannheim + Dr. Gerald Weisser + gerald.weisser&rad.ma.uni-heidelberg.de +20762 + Passage Consortia C/O Sony Electronics (Trustee) + Lee Pedlow + lee.pedlow&am.sony.com +20763 + Fastenal Company, Inc. + Unix Administration + is_unix_admins&fastenal.com +20764 + David Cross Technical Consulting + David E. Cross + crossd&dcrosstech.com +20765 + Rosati Kain + Stefan Adams + stefan&borgia.coim +20766 + QAI India Ltd + Raja Narayan + rajan&qaiindia.com +20767 + Vereinigte Postversicherung VVaG + Heinrich Krauel + heinrich.krauel&vpv.de +20768 + St. Mary's High School + Stefan Adams + stefan&borgia.com +20769 + Immaculate Conception School + Stefan Adams + stefan&borgia.com +20770 + ADSTRA SYSTEMS INC. + Viviana Bardea + vbardea&adstra.com +20771 + Priority Networks, Inc. + High Mobley + oid&prioritynetworks.net +20772 + Optica Technologies Inc. + Serge Rioux + serge.rioux&opticatech.com +20773 + Accenture Business Services for Utilities + Steen Lauridsen + slaurids&yahoo.com +20774 + OMT Systems (Shenzhen) Limited + Frank Cai + zhaohuicai&o-netcom.com +20775 + Aspire Information Services, LLC + Lane Bryson + lb-iana&coldhardtruth.org +20776 + INAF - Osservatorio Astronomico di Padova + Amedeo Petrella + petrella&pd.astro.it +20777 + Flexetech Solutions, Inc. + Gayatri Raghupatruni + gayatri&flexetech.com +20778 + SmartWare + Amitabh Saxena + A.Saxena&latrobe.edu.au +20779 + Inova Venture Pte Ltd + David Ma + davidma08&hotmail.com +20780 + Gemeinsamenes Gebietsrechenzentrum Hagen - SG 2.5 + Heiko Schack + heiko.schack&ggrz-hagen.nrw.de +20781 + Scientific Research Institute of Applied Information Technologies + Yevhen Khyzhnyak + administrator&ndipit.com.ua +20782 + soc. coop. bilanciai + Mirco Valmori + m.valmori&coopbilanciai.it +20783 + ARGELA Yazilim ve Bilisim Teknolojileri A.S. + Huseyin Erbilgin + huseyin.erbilgin&argela.com.tr +20784 + HiTeKnowledge Limited + Justin Bowser + info&htk.co.uk +20785 + Beta Systems Software AG + Michael Feill + michael.feill&betasystems.com +20786 + Techspan System Ltd + Andy Pyrski + andy.pyrski&jarvis-uk.com +20787 + Parc Cientific de Barcelona + Miguel Angel Moruno Aparicio + sic&pcb.ub.es +20788 + Unassigned + Returned 2005-05-12 + ---none--- +20789 + PowerMeMobile.com + Elie Habib + elie&powermemobile.com +20790 + Harrington Group, Inc. + BJ Hibbert + hostmaster&hgi-fire.com +20791 + keyX.net Consulting SRL + Adrian Ciocildau + office&keyx.net +20792 + Erdmann Systemberatung + Bernhard Erdmann + hostmaster&coredumps.de +20793 + NetBridge Inc. + Jaeyoung A. Lee + iana&net-bridge.com +20794 + IT Schaller GmbH + Martin Schaller + martin.schaller&gmx.de +20795 + REALTIMEIMAGE + Sasha Ovsiankin + dwfm-se&rtimage.com +20796 + ShenZhen Roytel Technology Ltd. + George Lee + lj&roytel.com.cn +20797 + Great Dragon Infomation Technology(Group)Co.,Ltd. + Ma Zunyun + mzy2003&hotmail.com +20798 + TDN GmbH + Oliver Buchholz + Oliver.Buchholz&tdn.de +20799 + Universitaet Konstanz + Andreas Merkel + andreas.merkel&uni-konstanz.de +20800 + JSC Promsvyaz-Invest + Ernest Moshkov + erzy&sc.ru +20801 + Exxon Mobil Corporation + Internet Coordinator + corporate.internet.domain.name.coordinator&exxonmobil.com +20802 + Sanlam Ltd. + Jan de Klerk + root&sanlam.co.za +20803 + Tyler Retail Systems, Inc. + Chris Curran + ccurran&tylernet.com +20804 + Open PLC European Research Alliance (Opera) + Uwe Mietrasch + u.mietrasch&ppc-ag.de +20805 + Beijing Sunniwell BroadBand ditital technology Corp. Ltd. + Wu Feng + wufeng&sunniwell.net +20806 + Segurmatica + Jorge Lodos + lodos&segurmatica.com +20807 + Federal Bureau of Investigation + Matthew Estes + mestes1&leo.gov +20808 + Monaco Telecom + M. Hubert PHAN + h.phan&monaco-telecom.mc +20809 + Agami Systems, Inc. + Agami Systems + mib-contact&agami.com +20810 + Hamilton Consulting NSW Pty Ltd + Bruce MacLean + maclean&pacific.net.au +20811 + International Securities Exchange, LLC + John Ryan + jryan&iseoptions.com +20812 + NetMon Information Systems Ltd. + PH Chiu + phchiu&netmon.com.hk +20813 + Ipcon Informationssysteme oHG + Helmut Adams + head&ipcon.de +20814 + AMC SA + Emmanuel Dubecq + emmanuel.dubecq&amcsa.fr +20815 + Zend Technologies Ltd. + Stanislav Malyshev + stas&zend.com +20816 + Alexandru Ioan Cuza University + dr. Octavian RUSU + octavian.rusu&uaic.ro +20817 + Argos Messtechnik GmbH + Ralph Kondziella + rk&argos-messtechnik.de +20818 + SUNTEK TECHNOLOGY CO., LTD. + Daniel Qin + qyd&suntektech.com +20819 + Allenbrook, Inc. + Joe Sanderson + jsanderson&allenbrook.com +20820 + The University of Texas at San Antonio + Steven Daly + UNIX&utsa.edu +20821 + Information and Display Systems, LLC + Mitch Mitchell + mmitchell&ids-sports.com +20822 + Franwell, Inc. + John Stephens + john.stephens&franwell.com +20823 + Carefx Corporation + Marc Lottman + mlottman&carefx.com +20824 + Lions Gate Software, Inc. + Ronald Olshausen + rgo&alumni.indiana.edu +20825 + Touch22 Software and Consulting + Larisa Levental + mib&touch22.com +20826 + Convera + Rajiv Dewan + svc-iana-oid-admin&convera.com +20827 + Banco Central de Costa Rica + Roy Valenciano + valencianogr&bccr.fi.cr +20828 + Schweizerische Lebensversicherungs- und Rentenanstalt + Stephan Toggweiler + oid-admin&swisslife.ch +20829 + Stadtverwaltung Dresden + Josef Müller + jmueller&dresden.de +20830 + Reflex AS + Ove Ruben R Olsen + ruben&reflex.no +20831 + CZFree.Net + Jiri Binko + jbinko&mis.de +20832 + Ryder Systems Ltd. + IT Services + itservices&rydersystems.com +20833 + Terminales de Telecomunicacion Terrestre, S.L. + Emilio Tejedor Escobar + etejedor&ttt.es +20834 + Alike Group + Frederic Segaud + fsegaud&alike-group.com +20835 + BD Digital Lab. Co., Ltd + Soonmyung Hong + sonnet&bd-lab.com +20836 + Confer Computing Consultants Company + WONG, Man Fai + confer_computing&yahoo.com.hk +20837 + eRJe.net + Robert Joosten + dakloos&internet.nl.eu.org +20838 + GaVI mbH + Andreas Legler + andreas.legler&gavi.de +20839 + Broadcast Tools, Inc. + Don Winget + support&broadcasttools.com +20840 + Thomson Technology Limited + A D Thomson + sandy.thomson&thomsontechnology.co.uk +20841 + The Community Group Inc + Matt Einson + meinson&communitydaycare.org +20842 + Arena Solutions, Inc. + Les Niles + lniles&arenasolutions.com +20843 + Philipp Strozyk + Philipp Strozyk + pstrozyk&web.de +20844 + L'OREAL + Georges Campos + gcampos&rd.loreal.com +20845 + Linux Information Systems AG + Sebastian Hetze + s.hetze&linux-ag.de +20846 + SURFsara + Michel Scheerman + michel.scheerman&surfsara.nl +20847 + Atlas Development Corporation + Russell von Blanck + rblank&atlasdev.com +20848 + Zope Corporation + Richard Liming + rpl&zope.com +20849 + OMT Systems (Shenzhen) Limited + Frank Cai + zhaohuicai&o-netcom.com +20850 + TechNerdVana + Joshua E Malinowski + theepengu1n&technerdvana.com +20851 + Groupe Bell Nordiq inc. + Durand Christophe + netadmin&telebec.com +20852 + CrimTrac + Cliff Van Lohuizen + cliff.vanlohuizen&crimtrac.gov.au +20853 + Eurorail International + Peter Dherdt + peter&eurorail.be +20854 + OTC Wireless Inc. + Alex Tsao + atsao&otcwireless.com +20855 + Scientific Technologies Corp + Steven Bergom + Steven_Bergom&stchome.com +20856 + Pattern Matched Technologies + Rudolph van Graan + registrations&patternmatched.com +20857 + Inspired Technologies Ltd + Sunny Osaje + sc_osaje&yahoo.com +20858 + Casa Systems, Inc. + mib support + mibsupport&casa-systems.com +20859 + Citco Technology Mangement, Inc. + Chief Information Officer + it&citco.com +20860 + Liberty Enterprises Inc. + Todd Wickard + twickard&libertysite.com +20861 + Dansk System Elektronik A/S + Mads Grønfeldt + mmg&dse.dk +20862 + Online Learning Australia Pty. Ltd. + Evan McLean + root&datatask.com.au +20863 + Utiba Pty Ltd + Systems Admin + sysadmin&utiba.com +20864 + Tower Technologies + Alessandro Zummo + noc-mib&towertech.it +20865 + Sri Lanka Telecom + Sanjaya Prabath + sanjayap&slt.com.lk +20866 + Eljakim Information Technology BV + E. Schrijvers + info&eljakim.nl +20867 + danfferliu Corporation + Danffer Liu + danfferliu&danfferliu.com +20868 + Helge Gudmundsen + Helge Gudmundsen + helge&sapo.pt +20869 + Frank Vercruesse + Frank Vercruesse + iana.nospam&vercruesse.de +20870 + Pufferbox Pty. Ltd. + Jim Lam + jim&pufferbox.com.au +20871 + Kredietbank S.A. Luxembourg + Jean-Marc SIMONIS + jean-marc.simonis&kbl-bank.com +20872 + Sto AG + Philipp Bellhaeuser + p.bellhaeuser&stoeu.com +20873 + PROTEI Ltd + Irina Vakker + support&protei.ru +20874 + Håmsø Patentbyrå ANS + Odd Skjæveland + odd.skjaeveland&hamso.no +20875 + DevExperts LLC + Roman Tsiroulnikov + romanvt&devexperts.com +20876 + Vorarlberger Telekommunikations GesmbH + Oliver Peter + peter&vtg.at +20877 + Vine Linux + Daisuke SUZUKI + Vine&vinelinux.org +20878 + VineCaves, Ltd. + Daisuke SUZUKI + info&vinecaves.com +20879 + MBIT GmbH + Michael Nemecky + mbit-oid&mbit-gmbh.de +20880 + MagnaQuest Technologies + Ravindra + ravindra&magnaquest.net +20881 + Catenare LLC + Johan Martin + jnm&catenare.com +20882 + PCDEMANO Inc + Jorge Fernandez Diaz + zerote&gmail.com +20883 + Evangelische Landeskirche Wuerttemberg + Peter Pfrommer + Peter.Pfrommer&elk-wue.de +20884 + BDT GmbH & Co. KG + Thomas Weber + Thomas.Weber&bdt.de +20885 + Kontron Modular Computers SA (formerly 'Thales Computers') + Sophie Rousseau + sophie.rousseau&kontron.com +20886 + Dr. Glinz COVIS GmbH + Dietmar Bode + hostmaster&covis.de +20887 + SOLSOFT + Hostmaster + hostmaster&solsoft.com +20888 + SIS Spektrum s.r.o. + David Kolarczyk + vyvoj&sis-spektrum.cz +20889 + Comsys B.V. + Walter Botman + walter.botman&comsys.nl +20890 + Sebastian Kueppers Computer-, Medien- und Eventservice + Sebastian Kueppers + info&sk-medienservice.de +20891 + Star Internet Ltd. + Simon Coy + scoy&star.net.uk +20892 + CITEL Technologies Inc. + Steve Towlson + steve.towlson&citel.com +20893 + LShift Ltd + Stuart Mottram + query&lshift.net +20894 + Yirdis B.V. + T. Schipper + info&yirdis.nl +20895 + Diplomado + Ximena Ruiz + xruiz&deloitte.com +20896 + NuVox Communications, Inc. + Wiley Wimberly + wiley&nuvox.net +20897 + Determina Inc. + Warren Wu + warren&determina.com +20898 + Sungard SBI + Yossi Appleboum + yossia&web-silicon.com +20899 + Apollo Interactive, Inc. + Richard Balue + sysop&apollointeractive.com +20900 + Quantech Global Services LLC + Vasu Vuppala + vasu.vuppala&quantechglobal.com +20901 + Virtual Business Communities Inc. + Wayne Craig + wcraig&vbcglobal.com +20902 + johnstonshome.org + Simon Johnston + simon&johnstonshome.org +20903 + Children's Hospital - Boston + Jim Shattuck + james.shattuck&childrens.harvard.edu +20904 + Risolviamo + Stefano Merlo + ns&risolviamo.com +20905 + Unicible S.A. + Security Services + security.services&unicible.ch +20906 + Cubika S.A. + Walter Picone + wpicone&cubika.com +20907 + Regent College + Wan-Phek How + wphow®ent-college.edu +20908 + Emergin, Inc. + Juan Sierra + juan.sierra&emergin.com +20909 + Exempla Healthcare + Liam Schneider + schneiderl&exempla.org +20910 + PT OneHUB Technology + Ronny Haryanto + ronny.haryanto&onehub.net +20911 + twilley.org + John Twilley + jmt+iana&twilley.org +20912 + Publishing firm «Vidrodzhenia» Ltd. + Ihor Babyk + babyk&lviv.farlep.net +20913 + OJSC RTComm.RU + Kamolov Sergey + s.kamolov&rtcomm.ru +20914 + Deutsche Telekom - CSC Wuerzburg + Ralph Gessner + ralph&cscwzb.de +20915 + NATIONAL-BANK AG, Essen + Mr. Axel Singhof + axel.singhof&national-bank.de +20916 + AVTECH Software, Inc. + Richard Grundy + RickGrundy&AVTECH.com +20917 + FondsServiceBank + Guido Rendel + guido.rendel&fondsservicebank.de +20918 + Wicked Studio + Rob Nelson + rob.nelson&stuntware.com +20919 + Manobi + Raphael Bellec + public_technical_contact&manobi.net +20920 + PKWARE, Inc. + Jon Harvie + jon.harvie&pkware.com +20921 + Hillstone Products Ltd + Paul Smethurst + paul&hillstone.co.uk +20922 + Pro Dimension Ltd. + Peter Jantunen + peter&prodimension.com +20923 + Myxomop + Peter Novodvorsky + nidd&myxomop.com +20924 + Proyecto OTF U.de.Chile + Rafael Catalan Diaz + rcatalan&redbanc.cl +20925 + Baker & McKenzie + Paul A. Petersen + technicalcontact&bakernet.com +20926 + Mase Technologies, LLC + Perry Schwartz + perry&mase.com +20927 + TSI Sports Incorporated + Perry Schwartz + aps&tsius.com +20928 + Tangent Systems + Philip Correia + oid&singularity.co.za +20929 + SYSGO AG + Rolf Offermanns + roffermanns&sysgo.com +20930 + René Alegría Sáez + René Alegría Sáez + ralegria&corpbanca.cl +20931 + Software Builders Exchange + Roby E. Gamboa + robygamboa&gmail.com +20932 + yobe.org + Roman Kunert + rkunert&yobe.org +20933 + Tomsk State University of Control Systems and Radioelectronics + Roman V. Mescheriakov + mrv&security.tomsk.ru +20934 + ITdesign software projects and consulting + Ronald Muenzker + ronald.muenzker&itdesign.at +20935 + Anystream, Inc. + Sebastian Fonss + sfonss&anystream.com +20936 + CHINA COMMUNICATIONS STANDARDS ASSOCIATIONS + LIUCHENGLONG + liuchenglong&huawei.com +20937 + Nimrod AS + Stian W. Arnesen + enterprisesmib&nimrod.no +20938 + ETL Systems Ltd. + Andrew Wheatley + andrew.wheatley&etlsystems.com +20939 + videoNEXT LLC + Andriy Fomenko + afomenko&videonext.com +20940 + The eBiz Shop, LLC. + Matt Allen + mallen&theebizshop.net +20941 + Palo Alto Research Center, Inc. + Glenn Durfee + gdurfee&parc.com +20942 + China Telecom-Guangzhou Research and Development Center + guomw + guomw&gsta.com +20943 + QOSMOS SA + Eric Horlait + Eric.Horlait&qosmos.fr +20944 + Modular Blade Server + Murali Sundar + murali.sundar&intel.com +20945 + East Online, LLC + Alexander Ilyushin + ilyushin&eastonline.ru +20946 + BDM Business Data Management GmbH + Bernhard Lukassen + bl&bdm-systems.com +20947 + Cox Communications San Diego + Austin Hill + austin.hill&cox.com +20948 + manzanita Inc. + Juan Perez + andresvasquez&linuxmail.org +20949 + Anadarko Petroleum Inc. + Allan Wolfe + allan_wolfe&anadarko.com +20950 + Interhack Corporation + Bill Anderson + bill+ianaoid&interhack.com +20951 + Brevient Technologies, Inc. + Andy Brezinsky + abrezinsky&brevient.com +20952 + Exabridge + Ignatius Oh + jsoh1012&empal.com +20953 + Haifa University + Arthur Shkabatur + arthur&univ.haifa.ac.il +20954 + Xtendreach Limited + Arnoud van der Wal + arnoud.vanderwal&xtendreach.com +20955 + Sunet + Andrew Hosie + sysadmin&sunet.com.au +20956 + Emerson Climate Technologies, Alco Products + Hans-Juergen Bersch + HJBersch&ecopeland.com +20957 + E-Government Solutions (UK) Ltd. + Gareth Boden + administrator&egsgroup.com +20958 + I-Nex Corporation Pty. Ltd. + Adam Gray + adam&i-nex.com.au +20959 + Siemens d.d. Hrvatska + Ivana Beli + ik-management&siemens.hr +20960 + Laboratory of Content Systems(LCS) + Alexandr E. Solokow + aeriman&aeriman.ru +20961 + Communication Technologies + Fawad Nazir + fawad.nazir&gmail.com +20962 + Multimedia Polska + Marek Lukaszuk + m.lukaszuk&multimedia.pl +20963 + Kattare Internet Services + Ethan Burnside + burnside&kattare.com +20964 + Camille Bauer + Thomas Keusch + thomas.keusch&camillebauer.com +20965 + HVR Consulting Services Ltd + Martin Budd + martin.budd&hvr-csl.co.uk +20966 + Jesus College, Oxford + John Ireland + computing.manager&jesus.ox.ac.uk +20967 + Anevia + Damien LUCAS + contact&anevia.com +20968 + Genomatix Software GmbH + Markus Bayerlein + bayerlein&genomatix.de +20969 + EUDATA SYSTEMS S.A. + Charles CHRISTOPH + C.Christoph&eudata.be +20970 + LISA GmbH + Hans-Peter Jansen + hp&lisa-gmbh.de +20971 + Baltnet Ltd. + Ivari Horm + admin&baltnet.ee +20972 + British Airways Plc + Matt Hudson + matt&ba.com +20973 + NextiraOne Czech s.r.o. + Ctirad Navrátil + ctirad.navratil&nextiraone.cz +20974 + American Megatrends, Inc + Kenny Chiang + kennychiang&ami.com.tw +20975 + PONTUSYS + Kang Kyung Wan + kwkang&it.co.kr +20976 + Paul Smith Computer Services + Paul Smith + paul&pscs.co.uk +20977 + Comune di Imola + Mascaro Francesco + mascaro.f&comune.imola.bo.it +20978 + DC-SatNet Ltd. + Matthew Wilson + matthew.wilson&dc-sat.net +20979 + WT Finland Ltd + Juha Kumpulainen + juha&wt.fi +20980 + tux LLC + Martin Brulisauer + hostmaster&tux-gmbh.ch +20981 + Atrocity MUD + Bruce Bye + feliks&atrocity.org +20982 + PCDEMANO Inc + Jorge Fernandez Diaz + zerote&gmail.com +20983 + IIT Madras + Prof. V.Jagadeesh Kumar + vjk&iitm.ac.in +20984 + Geonetics + George Zullich + gzullich&geonetics.net +20985 + Nethonnun ehf. + Kristinn E. Arnarsson + hostmaster&nh.is +20986 + Universidad del CEMA + Juan Manuel Calvo + jmc&cema.edu.ar +20987 + Philadelphia Computer Institute + Tao Jiang + jiang&pc-institute.com +20988 + Rackspace + Christopher Schneider + admins&rackspace.com +20989 + Systems Research and Development, Inc. + Brian Macy + bmacy&srdnet.com +20990 + Aspect Loss Prevention LLC + Joe Maier + jmaier&aspectlp.com +20991 + Family-Williams Inc. + Bruce Williams + bruce&family-williams.com +20992 + CradlePoint, Inc. + Patrick Sewall + psewall&cradlepoint.com +20993 + Camujo + Carlos Munoz + carlos&camujo.cl +20994 + Europa Communications Pty Ltd + John Young + jyoung&europa.com.au +20995 + Progress Energy + Edwin C. Goff III + itsecurity&pgnmail.com +20996 + ACCESS Co. Ltd. + Paul Canavese + access-mibs&access-us-inc.com +20997 + Lotus Technologies SRL + Franco Galián + fg&lotustech.com.ar +20998 + Honeywell International Inc + Joel Knox + joel.knox&honeywell.com +20999 + Willis Group + Matt Parkes + matt.parkes&willis.com +21000 + imetric + Marco Ibarra + mibarra&bice.cl +21001 + Stratizon Inc. + Jianbo Stancil + jstancil&stratizon.com +21002 + Parallax Networking Limited + Parmeshwar Velayudhan Nair + parm&s2000.demon.co.uk +21003 + Dynamic Network Integration, Inc. + Brian Snipes + bsnipes&dni-online.com +21004 + American Technology Integrators Corporation + Jim Woodmansee + jimw&ati-sf.com +21005 + GERMAN NETWORK GMBH + MR. CALDAROLA + IANA&GERMAN-NETWORK.COM +21006 + OpenDAS.org + Michael R. Crawford + mike&opendas.org +21007 + InoStor + James G. Sack + jsack&inostor.com +21008 + Informed Control Inc. + Mark Wahl + mark.wahl&informed-control.com +21009 + McGuire & Associates, Inc + Deane McGuire + deane&gomcguire.com +21010 + eCollege + Jared Hedman + jaredh&ecollege.com +21011 + DotNetShop + Franz Humplmair + FH&dotnet-shop.de +21012 + iHotel International Inc. + Mike Schmidt + mike.schmidt&ihotel.ca +21013 + Xirrus, Inc. + Dirk Gates + dirk.gates&xirrus.com +21014 + Gamerz Hub + Jeremy Lee + gamerzhub&hotmail.com +21015 + Art of Living Foundation + Hariharan Gopalan + hari&artofliving.org +21016 + IntruGuard Devices + Mark Guinther + markg&intruguarddevices.com +21017 + IPeak Networks Inc. + Head of Development + oidregister&ipeaknetworks.com +21018 + Paxar Americas, Inc. + Jeanne Duckett + jeanne.duckett&paxar.com +21019 + River Systems, Inc + David Miller + miller392&yahoo.com +21020 + Stowarzyszenie Freeze NET Amatorska Siec Komputerowa + Kuba Tyszko + kuba&lbl.pl +21021 + Perimeter Technology Center, Inc. + Brad Thomas + bthomas&perimetercenter.com +21022 + The College Board + Doug Brown + iana&collegeboard.org +21023 + Forbrich Computer Consulting Ltd. + Hans Forbrich + Hans&ForbrichComputing.ca +21024 + Electronics corporation of india. Ltd + MUDIAM BADRINARAYANA,HEAD,INFORMATION TECHNOLOGY SERVICES DIVISION,INFORMATION TECHNOLOGY & TELECOM GROUP + badri&ecil.co.in +21025 + ICOMM TELE LTD. + J.Padma Latha + icommrnd&icommtele.com +21026 + Hauf Electric Co. + Brian Hauf + b_hauf&yahoo.com +21027 + MIP + Miloslav Grundmann + grundman&mip.ups-tlse.fr +21028 + Digital Motorworks, LP. + Jon R. Nials + jnials&digitalmotorworks.com +21029 + Giant Steps + Udi Margolin + udi&gntsteps.com +21030 + PeerApp + Joshua Kenan + shuki&peerapp.com +21031 + Helga Adam und Rocio Manzano Romero GBRmbH + Michael Adam + michael.adam&adam-manzano.de +21032 + Argent Networks ltd + Ian Ruddell + ian.ruddell&argentnetworks.com +21033 + Taikang Life Insurance Co.,Ltd. + Han Lin + hanlin&taikanglife.com +21034 + Pratama Sastriawan Systems + Joko Banu Sastriawan + sastriawan11&yahoo.com +21035 + DEV Systemtechnik GmbH + Julien Langer + iana&dev-systemtechnik.com +21036 + Ultrapower Software Co., Ltd. Beijing + Zhang Guobo + guobo&ultrapower.com.cn +21037 + CHUNG HUNG STEEL CO., LTD. + Cheng Ze Li + yl26200&chsteel.com.tw +21038 + Radio IP Software Inc. + Germain Emond + germain.emond&radio-ip.com +21039 + YuCa Tech. Corp. + He Yu + hefish&cz8.net +21040 + Fiberplex, Inc + Brian Markey + bmarkey&fiberplex.com +21041 + Banchile Corredores de Bolsa S.A. + Cristian Zenteno + cristian.zenteno&banchile.cl +21042 + PRT Systems Ltd + Paul Thornton + monitoring-help&prtsystems.net +21043 + AREVA + S PIERRAT + spierrat&cogema.fr +21044 + Mauro Calderara + Mauro Calderara + mcalderara&phys.ethz.ch +21045 + Intinor AB + Roland Axelsson + info&intinor.se +21046 + University Of Wisconsin Extension + Bruce LaBuda + bruce.labuda&uwex.edu +21047 + Banco de Credito e Inversiones + Jose Mauricio Ballivian Arauz + mballiv&bci.cl +21048 + Copenhagen Municipal + John Hansen + john.hansen&faf.kk.dk +21049 + ROMPETROL SA + Bogdan Florea + bofh&rompetrol.com +21050 + Freebits + Marcel Karras + toka&freebits.de +21051 + Init Seven AG + Marco Huggenberger + huggenberger&init7.net +21052 + Interact S.A. + Martin May + maym&interact.lu +21053 + Singlefin + John Runnels + jtrunnels&yahoo.com +21054 + Yardi Systems, Inc. + Matthew Van Gundy + matthew.vangundy&yardi.com +21055 + Sri Lanka Telecom Services Ltd. + Nuwan Dharmasena + nuwan&slts.lk +21056 + DeployLinux Consulting + Matthew Marlowe + matt&deploylinux.net +21057 + Themis Computer + jean-francois simon + jfs&themis.com +21058 + Tizor Systems + Peter Smith + peter&tizor.com +21059 + Nyvri + Oystein Steimler + os-oid&nyvri.net +21060 + Global Healthcare Exchange + Leigh Anderson + landerson&ghx.com +21061 + Netaquila Solutions Pvt. Ltd + Manpreet Singh Nehra + manpreet&netaquila.com +21062 + Chris Forkin Consulting + Chris Forkin + chris&forkin.com +21063 + MaqSys + Maqsood Alam + maqsoodalam_&hotmail.com +21064 + TSingTec Ltd. + jizd + jizd&163.com +21065 + SHENZHEN WLAN Research Center,CCSA + LISHOUBIN + lishbg&huawei.com +21066 + SNCF (Société Nationale des Chemins de fer Français) + Pascal MERCIER + IPNetwork.Contact&sncf.fr +21067 + Elitecore Technologies Ltd. + Ajay Iyer + ajay&elitecore.com +21068 + University of Southern Denmark + Sven Meiborg Sorensen + sms&it-service.sdu.dk +21069 + Radford Control Systems + Keith Rathband + keithr&radford-controls.com +21070 + Intentia Research & Development + Mart Muts + martmuts&gmail.com +21071 + EFS Inc. + Paul LaSalle + paul&efs.ca +21072 + Arnold Magnetics + Jean P Bourget + jbourget&arnoldmagnetics.com +21073 + webcreations.ca + Andrew Joyce + joyce&webcreations.ca +21074 + PureWave Networks, Inc. + Eric Garcia + eric&scorchedminds.com +21075 + Old Red Mill + John Zavgren + john&zavgren.com +21076 + Science Museum of Minnesota + Joel Miles + jmiles&smm.org +21077 + DigiTar + Jason J. W. Williams + williamsjj&digitarx.com +21078 + dafa + Daniel de la Fuente + dafa2002&cs.buap.mx +21079 + HAIPE + HAIPE Program Manager + haipe_po&missi.ncsc.mil +21080 + Frog Navigation Systems B.V. + Rene Jager + renej&frog.nl +21081 + Persium Inc. + Mr Behruz RUSHENAS + brushenas&sbcglobal.net +21082 + EUN Partnership a.i.s.b.l. + Jean-Noel Colin + jean-noel.colin&eun.org +21083 + Mairie de Paris + Didier Ajax + didier.ajax&paris.fr +21084 + SD Laboratories + Theron Bair + sdeath&sdeath.net +21085 + Open Alliance Sofware Libre, S.A. + Marc Munoz + marc&ingent.net +21086 + Monolith Productions + Erik De Bonte + matrixservers.snmp&lith.com +21087 + Infosia Services + James Barrett + oid&infosia.net +21088 + krix.biz Internetdienstleistungen + David Krix + info&krix.biz +21089 + Flywheel Corporation + Systems Administrator + admin&flywheelcorporation.com +21090 + Ithaka Harbors, Inc. + Lilian Wang + lwang&ithaka.org +21091 + Exinda Networks Pty Ltd + Chris Siakos + chris&exinda.com +21092 + Raindance Wireless + Darrel Bowman + db&mynetworkcompany.com +21093 + Got.Net - The Internet Connection, Inc. + Mike Roach + support&got.net +21094 + Ricoh Australia Pty Ltd + Niall McLoughlin + nmcloughlin&ricoh.com.au +21095 + Commonwealth Bank of Australia + Keith Westley + keith.westley&cba.com.au +21096 + Quantier Inc. + Albert Chang + albert_chang&QuantierTech.com +21097 + Kerr-McGee Corporation + Karen S Tompkins + ktompkins&kmg.com +21098 + IECAS(Chin. Acad. of Sci) + ZHIHUI-ZHENG + maildode&yahoo.com +21099 + Alltrix Sdn Bhd + Chau Te Han + thchau&alltrix.biz +21100 + Linwork Informática S/C Ltda + André Alexandre Gaio + aagaio&linwork.com.br +21101 + Verreau Enterprise + joseph verreau + jverreau&chartermi.net +21102 + ControlTier Software, Inc. + Charles Scott + chuck&controltier.com +21103 + Mandrakesoft + Frederic Lepied + flepied&mandrakesoft.com +21104 + Mofet Instititue + Alex Shpisman + salex&macam.ac.il +21105 + MANGO networks Inc. + Douglas Wallace + dwallace&mangonetworks.com +21106 + Shanghai Ulink Telecom Technologies Co., Ltd. + Jianhong Jia + support&ulinkcom.com +21107 + Novagem Ltd + Giles Sadler + giles.sadler&novagem.co.uk +21108 + Harting Electric GmbH & Co KG + Klaus Sperlich + Klaus.Sperlich&HARTING.com +21109 + Knowledge Zone + JJ van Gorkum + iana&knowzone.org +21110 + SANITOP-WINGENROTH GmbH & Co. KG + Michael Baeumker + m.baeumker&sanitop-wingenroth.de +21111 + Datalogic S.p.A + Marco Balestra + marco.balestra&it.datalogic.com +21112 + TNO Telecom + Jan Sipke van der Veen + j.s.vanderveen&telecom.tno.nl +21113 + Hashbang Consulting Ltd + Peter Farmer + peter&hblc.co.uk +21114 + Contec s.j. + Gronowski Ryszard + ryszard.gronowski&contec.com.pl +21115 + TFTEN SA + ROUEN Alain + arouen&tften.com +21116 + NoVi Pawel Zimnoch + Pawel Zimnoch + pawel.zimnoch&novi.com.pl +21117 + AMG Systems Limited + Fred Burton + fred.burton&amgsystems.co.uk +21118 + AB2R + David PHAM-VAN + david&ab2r.com +21119 + Guru, informacijske tehnologije d.o.o. + Ales Smodis + ales.smodis&guru.si +21120 + Sanofi-Aventis + Harjinder Nijjar + Harjinder.Nijjar&sanofi-aventis.com +21121 + m0n0 + christopher snow + cs&m0n0.co.uk +21122 + G.I.S. Global Information Services GmbH + Jens-Erik Hansen + Jens-E.Hansen&gis.de +21123 + Amps llc + Fabio Badilini + badilini&s-llc.com +21124 + Adesium + Marie Tourne + marie.tourne&adesium.com +21125 + dico-online + Selim Baccar + baccar_selim&yahoo.fr +21126 + vFortress Network Security Pvt. Ltd. + Subash Warrier + manoj_jain&vfortress.com +21127 + KCI Technologies, Inc. + Rob Carlson + rcarlson&kci.com +21128 + Heimetli Software AG + Peter Tellenbach + pen&heimetli.ch +21129 + Ideal Solution, LLC + Jeff Lawton + host&idealso.com +21130 + GlassHouse UK Ltd + Dominic Talbot + dtalbot&glasshouse.com +21131 + Phonesync Ltd. + David Jones + david.jones&phonesync.com +21132 + Novum Information Technology BV + Ferdinand de Bakker + ferdinand.de.bakker&novum-it.com +21133 + University of Iowa Hospitals and Clinics + Robert Heitman + robert-heitman&uiowa.edu +21134 + University of Minnesota + David Carlson + davec&umn.edu +21135 + Kimmel.biz + D. Brian Kimmel + briank&kimmel.biz +21136 + North Central University + IT Department + it&northcentral.edu +21137 + Astron Computer Corporation + Paul Lovelace + st_lovelace&tarleton.edu +21138 + Electrosys S.r.l. + Stefano Dilio + diliostefano&electrosys.it +21139 + Proofpoint, Inc. + Andy Maas + x-iana&proofpoint.com +21140 + Xernolan Consulting + Dano Carroll + dano&xernolan.org +21141 + Elemental Security, Inc. + Joseph Gow + joe&elementalsecurity.com +21142 + ITER + Hans-Werner Bartels + bartelh&itereu.de +21143 + Devis + Martin Hudson + MHudson&devis.com +21144 + Maieutica - Cooperativa de Ensino Superior, Crl + Alexandre Valente Sousa + avs&ismai.pt +21145 + Intalio, Inc. + Rodolphe Pineau + pineau&intalio.com +21146 + Wiltel Communications LLC + Bogdan Kaczmarek + bogdan.kaczmarek&wiltel.com +21147 + Innotrac Corporation + Justin Matlock + jmatlock&innotrac.com +21148 + Soleil Securities Group, Inc. + Jonathan Phillips + technology&soleilgroup.com +21149 + H Bauer Ediciones + Daniel Tarrero + dtarrero&bauer.es +21150 + XKL, LLC + Shane Hall + iana-admin&xkl.com +21151 + Priority Technologies, Inc. + Support + support&prioritytech.com +21152 + Nucor Steel Indiana + Jason McClish + jmcclish&ns-ind.com +21153 + Academy of Fine Arts + Jokke Heikkila + jokke.heikkila&kuva.fi +21154 + Harland Financial Solutions + Eric Setz + eric.setz&harlandfs.com +21155 + Chameleon Technology, Inc. + Mark Anacker + manacker&lot66.com +21156 + pipp + Armin Pipp + armin&pipp.at +21157 + RAE Internet Inc. + Michael Katz + mkatz&raeinternet.com +21158 + Linux-Online + Timofey Korolyov + tk&linux-online.ru +21159 + IPcom S.A. + Juan Andrés Antoniuk + jantoniuk&ipcomsa.com.uy +21160 + Emerson Retail Services + Ozzie Gurkan + ogurkan&ersus.com +21161 + Flamingo Internet Navigators + Christopher Hicks + chicks&chicks.net +21162 + Voxiva Inc. + Portman Wills + pwills&voxiva.net +21163 + EXPERTiS Tecnología S.A. de C.V. + Procopio Villarreal + pvillarr&expertis.com.mx +21164 + Legacy Health System + Jeffrey Mills + jmills&lhs.org +21165 + Tidal Networks + Jeff Prince + prince&tidalnetworks.net +21166 + Personal Broadband Australia + Michael Keaney + michael.keaney&pba.com.au +21167 + The Correspondence School + Glen Ogilvie + glen.ogilvie&correspondence.school.nz +21168 + SCF of Arizona + Terry R. Teppo + tteppo&scfaz.com +21169 + JTHInc Computing Solutions + Josh Hogle + josh.hogle&jthinc.net +21170 + Vortice Research Development Pty Ltd + Stephen Sampson + stephens&vorticeResearchgroup.com +21171 + NRG Global, Inc. + Damion Sandidge + support&NRGglobal.com +21172 + Elan Designs + David Mathis + davem&inow.com +21173 + WSCICC + Ron Wickersham + rjw&wscicc.org +21174 + DISC, State of Kansas + John Jones + john.jones&da.state.ks.us +21175 + Xinupro, LLC + Peter DrakeUnderkoffler + iana&ratgut.com +21176 + Datalucid limited + Mike Trotman + mike.trotman&datalucid.com +21177 + St. John School + Nicholas Osterhaus + nico&nicosterhaus.com +21178 + Anteya Technology Corporation + Steven Yeh + steven&anteya.com +21179 + Beijing E-tech Technology Co., ltd + CAO Cheng + caoc&extech.com.cn +21180 + SwiDyna Technologies,Inc + frank kang + swidyna.company&msa.hinet.net +21181 + anweb + Achim Neyer + achim.neyer&anweb.de +21182 + Tonediy + Reza Iskandar Achmad + reza.iskandar&gmail.com +21183 + lightelli Ltd. + yang kaimy + diskey26&hotmail.com +21184 + AirMsg, Inc. + Michael Lee + ml&ioneda.com +21185 + Ioneda, Inc. + Michael Lee + ml&ioneda.com +21186 + Corner Stone Technology Inc + Vienne Lee Ms. + vienneli&cornerstone.com.tw +21187 + SenseStream Limited + Ernest + ernest.so&sensestream.com +21188 + eyes-works Corporation + Yasuhiro Fujii + eyes&eyes-works.com +21189 + Huon Associates Pty Ltd + Andrew Bennett + andrew.bennett&huonassociates.com +21190 + MayAZ Software Services + Charles May + brad&mayaz.net +21191 + KCodes Corporation + Chris Hsieh + chris_hsieh&kcodes.com +21192 + Goldman Sachs JBWere Pty Ltd + James Hine + iana.oid&gsjbw.com +21193 + Webvision + Alexander Pajerr + office&webvision.at +21194 + LLC Inform-mobil + Alexey I. Froloff + raorn&immo.ru +21195 + Non-Stop Systems (South Africa) (Pty) Limited + Andy Watts + andy.watts&non-stopsystems.com +21196 + TerraPages Pty. Ltd. + Derek Munneke + derek.munneke&terrapages.com +21197 + CryptoEx + Oleg A. Zhirnov + info&cryptoex.ru +21198 + RTI-Zone + Rodolphe Pineau + pineau&rti-zone.org +21199 + silkway + liangjinge + liangjinge&hotmail.com +21200 + Dr. Ruff Software GmbH + Günter Kreis + gk&ruso.de +21201 + Linuma Advanced Hosting, Inc. + James Moore + Jim&linuma.net +21202 + BT (Germany) GmbH & Co. oHG + Falk John + falk.john&bt.com +21203 + ARES Inc. + Alan Yu + alanyu&ares.com.tw +21204 + MHGS + Marianne Goeltl-Schuberth + mhgs&MHGSoft.de +21205 + Webdyn + Emmanuel Viollet + Emmanuel.Viollet&webdyn.com +21206 + MobileRRD + Wim Siebring + wim&mobilerrd.tk +21207 + Technische Universitaet Bergakademie Freiberg + Dr. Andreas Kluge + Andreas.Kluge&hrz.tu-freiberg.de +21208 + IKS GmbH Jena + Lutz Donnerhacke + mib&iks-jena.de +21209 + Mayr-Melnhof Packaging International GmbH + Markus Heidinger + markus.heidinger&mm-packaging.com +21210 + kairos + Kim hong hoi + hhkim&realtimetech.co.kr +21211 + Fraunhofer-Institute for Telecommunications Heinrich-Hertz-Institut + Pieter Gross + gross&hhi.fhg.de +21212 + Incito Ltd + Colin McFarlane + iana&incito.co.uk +21213 + topolis.lt + Tomas Kuliavas + tokul&users.sourceforge.net +21214 + Eidetix S.r.L. + Massimo Cesaro + info&eidetix.com +21215 + MM-Karton + Erasmus Pachta + erasmus.pachta&mm-karton.com +21216 + Telepo AB + Patrik Granholm + patrik&telepo.com +21217 + Lux Servizi di Luigi Iotti + Luigi Iotti + snmpoid&REMOVETHIS.iotti.biz +21218 + Sette Querce + Ferdinando Simonetti + ferdinando.simonetti&elsag.it +21219 + Solidex S.A. + Pawel Gorazda + pawel.gorazda&solidex.com.pl +21220 + BASF AG + Holger Petersen + holger.petersen&basf-ag.de +21221 + Nexus Advanced Technologies S.r.l. + Giovanni Costagliola + giovanni.costagliola&nexusat.it +21222 + Profline BV + Frank Peters Sengers + info&profline.nl +21223 + Akris BV + Guido Schoonheim + schoonheim&akris.nl +21224 + 4G Systeme GmbH + Christian Car + christian.car&4g-systems.biz +21225 + British Heart Foundation + Santhan Perampalam + sp&bhfshops.org.uk +21226 + Linn Products Ltd + Jason Newell + jason.newell&linn.co.uk +21227 + ABI - Associazione Bancaria Italiana + Dott. Cesare Onorati + webmaster&abi.it +21228 + Aspex Semiconductor Ltd + Jon Wilson + it-support&aspex-semi.com +21229 + Leningrad Research and Development Institute of Telecommunications + Andrey Kiselev + dron&loniis.org +21230 + Iriga Networks + André Krempf + andre.krempf&iriga-networks.com +21231 + Kugele Elektronik + Achim Kugele + ldap&ak8.de +21232 + Jung Myoung Ltd + Jeong, jun-ho + jeong_junho&hotmail.com +21233 + Peter Schmaedicke + Ptere Schmaedicke + peter&invider.com +21234 + o-n-s CO.,LTD. + Chiaya Takeshi + chiaya&o-n-s.co.jp +21235 + Nehlsen IT Training und Beratung + Harro N. Nehlsen + h.nehlsen&nehlsen-it.de +21236 + Laseur Enterprises + Gerrit Laseur + glaseur&Comcast.net +21237 + Wright Express + Benjamin Garfield + benjamin_garfield&wrightexpress.com +21238 + BrasilSat Harald S/A + Luiz Yoshio Enomoto + lye.desenvolvimento&brasilsat.com.br +21239 + Vertiv (formerly 'Geist Manufacturing, Inc') + Brad Wilson or Jesse Preuss + bwilson&geistglobal.com +21240 + PinguSolution Meierhoff/Woltmann GbR + Michael Meierhoff + Michael.Meierhoff&pingusolution.de +21241 + Chezwam + Sebastien Gross + seb+iana&chezwam.org +21242 + SimDesk Technologies Inc. + Joe Moyle + JoeMoyle&SimDesk.com +21243 + LANet Sp. z o.o. + Grzegorz Marszalek + g.marszalek&lanet.net.pl +21244 + Mt Umunhum Wireless + William Estrada + MrUmunhum&popdial.com +21245 + Macdonald Associates GmbH + Reiner Nix + reiner.nix&macd.com +21246 + Quantitative Imaging + Ronald Ligteringen + R.Ligteringen&tnw.tudelft.nl +21247 + verimatrix + Bob Kulakowski, CTO + bobk&verimatrix.com +21248 + Pixel Software Technologies + Pavel Nemirovsky + pauln&pixel-tech.com +21249 + Lightwave Solutions, Inc. + Richard Orazi + rjorazi&lightwavesolutionsinc.com +21250 + Crane River Consulting + Daniel Linder + dan&linder.org +21251 + Secure Campus Inc + Madhav Karhade + madhav&securecampus.com +21252 + FMS Internetservice Müller und Schaub GbR + Werner Dian + dian&fmsweb.de +21253 + TransOptix Inc. + Joe Zhou + jzhou&transoptix.com +21254 + Arxceo Corporation + Jackie Smith Cashion + jackie&arxceo.com +21255 + SanYuHu LLC + Walter Seiler + walter.seiler&sanyuhu.com +21256 + Corporate Technologies Incorporated + Martin Cribbins + mcribbins&cptech.com +21257 + Transera Communications Inc + Gaya Vukkadala + gaya.vukkadala&transerainc.com +21258 + Tongel.org + David Maynor + maynor_d&bellsouth.net +21259 + Oregon Department of Human Services + Walt L Davis + Walt.L.Davis&state.or.us +21260 + XYPRO Technology Corporation + scott uroff + scott-nonstop&xypro.com +21261 + Aventia SL + Oscar Conesa + oconesa&aventia.com +21262 + Second Shift Inc. + Andrew Bates + abates&omeganetserv.com +21263 + Pacific College of Oriental Medicine + Travis Morgan + tmorgan&pacificcollege.edu +21264 + AFC Commercial + Drew Ferguson + andy&afccommercial.co.uk +21265 + Dark Horse Comics, Inc. + Chris Irvine + hostmaster&darkhorse.com +21266 + Jab Ltd + Jarkko Lietolahti + jarkko&jab.fi +21267 + IT-Service Lehmann + Arno Lehmann + al&its-lehmann.de +21268 + Tecnocity, S.C. + Gabriel Martin del Campo + gmartin&tecnocity.com.mx +21269 + OnSite Systems, Inc. + Catherine Io + cio&onsitesystems.com +21270 + SWAL + D. Faber + iana&swal.org +21271 + ITT Educational Services, Inc. + Walter Seiler + mike.seiler&sanyuhu.com +21272 + Thomas P. Olsen + Thomas P. Olsen + thomas&t-point.dk +21273 + MediaWeb IT + Jan Boysen + jan&mediaweb-it.net +21274 + Camiant Inc + Anrthony Machon + amachon&camiant.com +21275 + n3k Informatik Ltd + Bob Day + bob.day&n3k.co.uk +21276 + A.C.C.E.S.S. Inc. + James Fraser + jsf&rogers.com +21277 + City of Ottawa + Roger Langevin + roger.langevin&ottawa.ca +21278 + Intellambda Systems Inc. + Rachel Wang + rachelw&intellambda.com +21279 + State of Kansas + Jeremy Heit + jjh&srskansas.org +21280 + Computer Access Technology Corporation + Shlomi Krepner + shlomi&catc.com +21281 + Empresa Jornalística Caldas Júnior Ltda + Rafael Pivato + rpivato&cpovo.net +21282 + SYS/IO + Ingo Felger + if&sys.io +21283 + The Forest Industry Online Inc. + Dallas Vogels + dvogels&forestind.com +21284 + Edustructures LLC + Eric Petersen + eric&edustructures.com +21285 + Catalyst IT Ltd + Steve Wray + stevew&catalyst.net.nz +21286 + Sygate Technologies + Ravil A. Desai + rdesai&sygate.com +21287 + Mikrovlny s.r.o. + Libor Konecny + support&mikrovlny.cz +21288 + Cetacea Networks Corporation + Gary MacIsaac + gary&orcaflow.ca +21289 + grid-zero.net + Darren Edmundson + domains&grid-zero.net +21290 + GBP Software, LLC + Gaige B Paulsen + iana&gaige.net +21291 + Tributes Online Ltd + Simon Manning + harbour&funeral-link.co.nz +21292 + kaengs reich + Karsten Engelke + contact&kaeng.org +21293 + Fabric7 Systems, Inc. + Nakul Saraiya + saraiya&fabric7.com +21294 + KIKA S.A. + Denis Sacchet / Fabien Lallemand + it&kikamedical.com +21295 + Cunningham and Othen + Ian Cunningham + IanC&c-and-o.co.uk +21296 + Infinera Corp. + Sharfuddin Syed + ssyed&infinera.com +21297 + U.Z.KULeuven + Andre De Deurwaerder + netadmin&uz.kuleuven.ac.be +21298 + Odyssey Pharmaceuticals, Inc. + Leon Hairie + leon.hairie&odysseypharm.com +21299 + BigFix, Inc. + Lisa Lippincott + lisa_lippincott&bigfix.com +21300 + Watt 22 Távközléstechnikai Szolgáltató Kft. + Tóth Csaba + tcsaba&greengaialing.hu +21301 + Dialmex, L.L.C. + David Stanaway + hostmaster&dialmex.net +21302 + Wright State University + John Meyers + john.meyers&wright.ed +21303 + Periscope Ltd. + David Preece + davep&pscope.net +21304 + BC Genome Sciences Centre + Mark Mayo + markmayo&bcgsc.ca +21305 + Katholischen Hochschulgemeinde Linz + Josef Hoerandtner + iana.org&jossy.net +21306 + Cyberfamily.dk + Christian Christiansen + cc&cyberfamily.dk +21307 + Tel.Pacific + Xian Zhang + xzhang&tel-pacific.com +21308 + NeoMon + Shayne Balduf + sbalduf&neomon.com +21309 + Juni Australia Pty Ltd + Mark Johnson + markjohnson&juni.com.au +21310 + Center for High Energy Physics + Kihwan Kwon + kihwan&knu.ac.kr +21311 + GammaStream Technologies, Inc. + Michael Sacket + msacket&gammastream.com +21312 + LISAsoft Pty. Ltd. + Derek Munneke + dmunneke&lisasoft.com +21313 + VRx, Inc. + Ricardo Autobahn + iana+spam+richard&vrx.net +21314 + Sizwe + Jean-Paul Bauer + jpbauer&sizwe.org +21315 + The State Key Laboratory Of Information Security of China + Lin JingQiang + linjq&lois.cn +21316 + Octtel Communication Co., Ltd. + Jason C.S. Chang + jason.chang&octtel.com.tw +21317 + ATEN INTERNATIONAL CO., LTD. + JasonPan + jasonpan&aten.com.tw +21318 + Intec NetCore, Inc, + Kuniaki Kondo + noc&inetcore.com +21319 + Symbio Group + Ken Ko + ko&symbio.com.tw +21320 + SourceMachine Develop Ltd. + Peter Qian + qian_hf&hotmail.com +21321 + CITIC Telecom 1616 Limited + Ken Tam + cktam&citic1616.com +21322 + Estonian Academy of Arts + Lauri Korts-Pärn + admin&artun.ee +21323 + Consolidated Contractors International Company S.A.L + Javed Khan + JKhan&ccc.gr +21324 + Databus Inc. + Barney Wolff + barney&databus.com +21325 + Siedlung Cuxhaven AG + Nils Stoever + n.stoever&siedlung.de +21326 + Heim & Haus GmbH & Co KG + Wolfgang Struhalla + struhalla&heimhaus.de +21327 + Westsaechsische Hochschule Zwickau + Andreas Funk + Andreas.Funk&fh-zwickau.de +21328 + Metos Oy + Joakim Berndtsson + joakim.berndtsson&metos.com +21329 + secXtreme GmbH + Christian Scheucher + christian.scheucher&sec-xtreme.com +21330 + JSC "TransTeleCom Company" + Andrey A. Galdin + cainfo&transtk.ru +21331 + RESEAUNANCE + REGNIER Pascal + support&reseaunance.com +21332 + Luciad NV + Systeembeheer + tom.laermans&luciad.com +21333 + Chongqing Putian Communication Equipment Ltd, Co. + O.Brire He + OBrire&yahoo.com.cn +21334 + ACCIO GLOBAL + xavier lleixa + tl11132&salleurl.edu +21335 + OSRAM OS GmbH + Wolfgang Schamberger + wolfgang.schamberger&osram-os.com +21336 + CAEN SpA + Stefano Coluccini + s.coluccini&caen.it +21337 + PowerSource + Tom Laermans + noc&powersource.cx +21338 + NetFX + Tom Laermans + tom&netfx.be +21339 + Fiat Auto S.p.A. + Cremasco Roberto + roby.cremasco&fiat.com +21340 + Verax Systems + Slawomir Debczynski + slawomir-debczynski&veraxsystems.com +21341 + Corner Banca SA + Oscar Armanini + unix&corner.ch +21342 + EpiSoftware Ltd. + Martin Zdila + m.zdila&episoftware.com +21343 + n3k Informatik GmbH + Rainer Maurer + rainer.maurer&n3k.de +21344 + Claranet GmbH + Christian Meutes + bsd&de.clara.net +21345 + Sachs + Peter W. Sachs + iana.001&peter-sachs.de +21346 + Forler IT Training & Consulting + Wolfgang Forler + wforler&forler.de +21347 + ITCnetworks + Mihai Lozoveanu + mlozo&itcnetworks.ro +21348 + Song Networks Svenska AB + Peter Eriksson + peter.eriksson&songnetworks.se +21349 + TTG Europe PLC + Andy Raffle + andy.raffle&ttg-netherlands.nl +21350 + SHS Informationssysteme AG + Network Operations + iana-oid&shs.de +21351 + Gyrus Prod SRL + Gabriel BIXADE + gabriel.bixade&gyrus.ro +21352 + Zhang Ye + Zhang Ye + yeahchang&vip.sina.com +21353 + Hospital Santa Mônica + Abrantes Araújo Silva Filho + abrantes&epidemiologia.org +21354 + Bezirksamt Lichtenberg von Berlin + Winkler, Ray + ray.winkler&ba-libg.verwalt-berlin.de +21355 + Ascential Software + Steven Totman + steven.totman&ascential.com +21356 + Austrian Grid + Willy Weisz + weisz&vcpc.univie.ac.at +21357 + antislash + camberlin stephane + sca&antislash.net +21358 + China ZLTelecom Telecommunication Equipment Co., LTD + Li Dongxia + cat995&21cn.com +21359 + OPNET Technologies, Inc + Vicki Tardif + vtardif&opnet.com +21360 + Chatsworth Products, Inc + None + SNMP-info&chatsworth.com +21361 + Catholic Healthcare West + Jim Chambers / Sheila Norcio + jim.chambers&chw.edu +21362 + IFOTEC + C. SILLANS + csillans&ifotec.com +21363 + Valhalla Media + Nic Pedersen + admin&vikingis.com +21364 + Revinetix, Inc + Stephen Jones + snmp&revinetix.com +21365 + Accelerate Consultancy Limited + Phil Davies + phil.davies&accelerate-consultancy.co.uk +21366 + Open Source Lab + Scott Kveton + kveton&osuosl.org +21367 + Connectathon.Net + Glen F. Marshall + webmaster&connectathon.net +21368 + The Cloud Networks Ltd. + Dan Martin + dan.martin&thecloud.net +21369 + Terrasat Communications, Inc. + Jose Hecht + jhecht&terrasatinc.com +21370 + Thales Navigation + Carl Chan + cchan&thalesnavigation.com +21371 + Whited.US + Matthew Whited + matt&whitedonline.com +21372 + JavaPlus + Stephen Suen (SUNRUJUN) + stephen.suen&gmail.com +21373 + netfilter/iptables project + Harald Welte + laforge&netfilter.org +21374 + Fabbricadigitale srl + Nicola Masseroni + n.masseroni&fabbricadigitale.it +21375 + Interactive Vision Mind Beacon sp. z o.o. + Przemyslaw Plaskowicki + plex&iavmb.pl +21376 + Gruppenkasper + Aiko Barz + aiko&gruppenkasper.net +21377 + Zhongyuan Institute of Technology + An Jiangze + nair_an&msn.com +21378 + American Honda Motor Co., Inc. + Joe Pace + Joe_Pace&ahm.honda.com +21379 + Shenzhen aike Co.,Ltd. + Frank Wang + Frank_wzf&hotmail.com +21380 + Guangzhou aike Co.,Ltd. + Frank Wang + w_zf&hotmail.com +21381 + Viessmann Werke GmbH & Co KG + Michael Bergen + bemi&viessmann.com +21382 + united bolta nigeria limited + ogunjimi ayotunde + kattomi&hotmail.com +21383 + bo-tech GmbH & Co. KG + Johannes Bornhold + joh&bo-tech.de +21384 + serie a - digitale medien und systeme GmbH + Kurosch Saremi + saremi&serie-a.de +21385 + Escalon Networks, Inc. + Alex Alten + alten&EscalonNetworks.com +21386 + Quiettouch Inc. + Vladimir Moshkovsky + vmoshkovsky&quiettouch.com +21387 + The John Simon Guggenheim Memorial Foundation + Michael Smith + ms&gf.org +21388 + Risetek Inc. + Li liming + lilm&risetek.com +21389 + King Fahd University of Petroleum and Minerals + Mir Ahmed Ali Shajee + shajee&kfupm.edu.sa +21390 + ALAIN BRION SARL + Alain Brion + roulabille92&msn.com +21391 + Netalley Networks LLP + Joe Geldart + jgeldart&netalleynetworks.com +21392 + Tech Arabia + Mohamed Eldesoky + mohamed&eldesoky.net +21393 + CareerWeaver.net + Metodi Mladenov + metodi&careerweaver.net +21394 + aplis.cz, a.s. + Vlastislav SUCHARDA + vsucharda&aplis.cz +21395 + Project Logicaldreams + Matt Pruett + mpruett&logicaldreams.net +21396 + Brand X Software, Inc. + Chris Terry + chris.terry&district6.org +21397 + FiberZone Networks LTD. + Yossi Appleboum + yossia&web-silicon.com +21398 + Cybergate Technologies Corp + Marco Lucena + mclucena&yahoo.com +21399 + Active Circle SA + Jean-Michel Dréan + iana-contact&active-circle.com +21400 + e-geek + Xavier Ourcière + xavier.ourciere&fnac.net +21401 + DSNCON GmbH + Harald Rößler + mib&dsncon.de +21402 + Epic Energy + Andrew Campbell + AndrewCampbell&epic.com.au +21403 + A-First Technology CO.,LTD. + Chaucer Huang + chaucer&a-first.com.tw +21404 + DG Telecom Co. Ltd. + Wang ChuanDa + support&dg-telecom.com +21405 + COMSATS Internet Services + Nadeem Akhtar + rahnuma&skt.comsats.net.pk +21406 + azh GmbH + Thomas Wieschke + wieschke&azh.de +21407 + Maier + Rainer Maier + ramaier&gmx.de +21408 + Ernitec A/S + Søren Gullach + S.h.gullach&ernitec.dk +21409 + ARTAX, a. s. + Radek Drabant + drabant&artax.cz +21410 + Kennisnet + Jerry van de Leur + j.vandeleur&kennisnet.org +21411 + gocept gmbh & co. kg + Christian Theune + mail&gocept.com +21412 + Andreas Wachter + Andreas Wachter + andreas.wachter&signon-it.com +21413 + ScriptLogic Corporation + Chuck Baenen + chuck&scriptlogic.com +21414 + Batra Consulting, Inc. + Harjit S. Batra + HBatra&HBatra.com +21415 + Option nv + Axel Schollaert + a.schollaert&option.com +21416 + Centre Hospitalier de la Côte Basque + AGUERRETCHE Jean-Marc + jmaguerretche&chicb.com +21417 + tdb Software Service GmbH + Haris Sehic + hs&tdb.de +21418 + Unient Communications, a.s. + Jan Zach + support&unient.cz +21419 + TEDIAL S.L. + Pablo Perez Trabado + pablo&tedial.com +21420 + HanseMerkur Krankenversicherung a.G. + Frank Jodkuhn + Frank.Jodkuhn&hansemerkur.de +21421 + Yassine Zairi LTD. + Yassine O. Zairi + aleczairi&msn.com +21422 + CVW + Volker Wiegand + volker.wiegand&cvw.de +21423 + ConnecTerra, Inc. + Ken Traub + kt&connecterra.com +21424 + Kanagawa Prefectural Board of Education + Office of General Affairs + jouhouka.389&pref.kanagawa.jp +21425 + Institute for Automation and Control Processes (IACP) FEBRAS + Alexander Zatserkovniy + avz&dvo.ru +21426 + Hoekstra Consulting Group + Chris Hoekstra + chris_hoekstra&hotmail.com +21427 + Weisenberger Solarstromerzeugung und Vertrieb + Axel Weisenberger + info&weisenbergers.de +21428 + Aepona Ltd + Liam McEntee + liam.mcentee&aepona.com +21429 + SUNTECH Telecom Solution LTDA + Mario Wolf Jr + mariowolf&suntech.com.br +21430 + inPUT Softwareentwicklungs GmbH + Karl Nabinger + k.nabinger&input.at +21431 + SAE International + Ken Hutalski + itservices&sae.org +21432 + DigiNet Corporation + Mark A. Miller + mark&diginet.com +21433 + Interface Software, Inc. + John McDonnell + jmcdonnell&interfacesoftware.com +21434 + Karolis Dautartas Personal Enterprise + Karolis Dautartas + list&public.lt +21435 + Inner Class Software Engineering Limited + David McNerney + david.mcnerney&innerclass.com +21436 + basis06 AG + Reto Burkhalter + hostmaster&basis06.com +21437 + ATEN Canada Technologies Inc. + Rebecca Hou + rebeccah&aten-ca.com +21438 + Thrunet + jongwon lee + jwlee&corp.thrunet.com +21439 + Beijing Mission Communication Co., Ltd. + Yullia Zhang + yang.zhang&eqmail.com +21440 + XIUS India Ltd. + Rajesh Sharma + rajeshs&xius.org +21441 + KBK e-vallalkozasa + Kovacs, Béla Karoly + kovacsbk&attmail.com +21442 + GAPIC (PVT) LTD + K. U. Gayan Priyanatha + pcgayan&yahoo.com +21443 + Caemar Software + Mark Chapman + mark.chapman&caemar.com +21444 + OSS Technologies (Pty) Ltd + Cedric Stevens + cedric.stevens&oss.co.za +21445 + eSilicon Corporation + Stephen Van Domelen + hostinfo&esilicon.com +21446 + ZIPPtec Systemhaus GmbH + Manfred Herrmann + manfred.herrmann&zipptec.de +21447 + Clinton Group, Inc. + Eric M. Stone + eric.stone&clinton.com +21448 + KarstadtQuelle AG + Heinz-Friedrich Hemmert + trust&itellium.com +21449 + Riversoft Integracao e Desenvolvimento de Software Ltda + Luiz Alberto C. de Lima + llima&riversoft.com.br +21450 + Radiantek + yangxiaobing + yxb&radiantek.com +21451 + Topera Technology Ltd. + Fu Dongya + fudongya&263.net +21452 + AUNA OPERADORES DE TELECOMUNICACIONES, S.A. + Cristobal Martinez Ferrus + cristobal.martinez&auna.es +21453 + GatherWorks, Inc. + Frik Strecker + frik&gatherworks.com +21454 + data voice technologies GmbH + Hannes Bauer + hbauer&fibersens.com +21455 + Sinosoft Co.,LTD + han lin + hanlin&sinosoft.com.cn +21456 + LV1871 + Michael Bierenfeld + michael.bierenfeld&lv1871.de +21457 + Demandware, Inc. + David C Boyle + dboyle&demandware.com +21458 + Clovis Solutions, Inc. + Hong Lu + honglu&clovissolutions.com +21459 + CaLviX + Daniel Dehennin + webmaster&calvix.org +21460 + Global Infotek, Inc. + Scott Ganz + command&globalinfotek.com +21461 + Cole National + Rich Kirkpatrick + richkirkpatrick&colenational.com +21462 + Toro Co + Steve Watne + steve.watne&toro.com +21463 + UZ Gent + Dirk Ketels + oidadmin&uzgent.be +21464 + Adelior SA + Mirko Van Colen + mirko.vancolen&adelior.be +21465 + Vipond Inc. + Gregory Pleau + gregory.pleau&vipond.ca +21466 + DOMISYS SA + BOUCHÉ David + bouche&domisys.com +21467 + Uber Technologies cc + Morne Lategan + info&ubertech.co.za +21468 + Vijit Consulting + David Madsen + madsen&vijit.com +21469 + Blade Fusion + Eyal Segal + eyal.segal&bladefusion.com +21470 + schematech + Bob Cullen + bob&schematech.co.uk +21471 + SimpleRezo + Clement Moulin + contact&simplerezo.com +21472 + xmled.org + Steven Legg + steven.legg&eb2bcom.com +21473 + View500 + Steven Legg + steven.legg&eb2bcom.com +21474 + Ardec International Pty. Ltd. + Duncan Sayers + duncan.sayers&ardec.com.au +21475 + Farsands Corporation Limited + Derek Hinchliffe + dhinchliffe&mpl.com.au +21476 + Academia Sinica Grid Computing Centre + Chih-Chiang Chang + ccchang&beta.wsl.sinica.edu.tw +21477 + TaiwanGrid + Chih-Chiang Chang + ccchang&beta.wsl.sinica.edu.tw +21478 + AdCoCom GmbH + Ekkehard Domning + edomning&adcocom.com +21479 + Icepage AB + Bartek Kania + bartek&icepage.se +21480 + Shenzhen Olym-tech Co., Ltd. + Jack Luo + jackluo&authcyber.com +21481 + TKM Telekommunikation und Elektronik GmbH + Frank Hintsches + f.hintsches&tkm-gmbh.de +21482 + SAIPM + Reynald Lemaire + Reynald.Lemaire&saipm.com +21483 + Quest Retail Technology Pty Ltd + Paul Clarke + snmp&quest.com.au +21484 + SIGMETA GmbH + Manfred Johnen + Manfred.Johnen&sigmeta.de +21485 + UNIS Ukraine + Serg Ageyev + sa&unisorg.com.ua +21486 + Secces information technologies ltd. + Ittai Weissberg + ittai.w&secces.com +21487 + Japan Cable Laboratories + Ryozo Yamashita + info&jcl.or.jp +21488 + NC Numericable + Stephane DEGUNST + isp&ncnumericable.com +21489 + die Antwort + Stefan Daschek + noniq&noniq.at +21490 + Booth & Associates, Inc. + Rick Rambo + RamboRL&Booth-Assoc.com +21491 + Entelligencia + Stan Banash + sbanash&sbcglobal.net +21492 + jMesh Limited + Phil Bluer + phil.bluer&jmesh.com +21493 + SKODA AUTO a.s. + Daniel Cep + daniel.cep&skoda-auto.cz +21494 + 3CPLUS GmbH + Raymond Dreher + r.dreher&3cplus-inso.de +21495 + SIG IT GmbH + Mirko Waletzke + mirko.waletzke&sig.biz +21496 + nSolutions, Inc. + Gordon Zhang + gzhang&nsolutionsinc.net +21497 + Tuxeo sprl + Philippe Frycia + philippe&frycia.be +21498 + Daemonspace Inc. + LI Dong + ld&ldconfig.com +21499 + LDCONFIG + LI Dong + ld&ldconfig.com +21500 + SIC LAB S.r.l. + Fabio Cottini + fabio.cottini&siclab.it +21501 + Asdag + Ogün Bilge + o.bilge&asdag.de +21502 + Sysload Software + Yann GUERNION + ref&sysload.com +21503 + Witt Weiden GmbH + Graf Mario + mario.graf&witt-weiden.de +21504 + SipStorm, Inc. + Gerald Chatlos + gchatlos&sipstorm.com +21505 + Caton Technology(Beijing) Corporation + Danffer Liu + danfferliu&caton.com.cn +21506 + Infinity Software Development + Michael Hanna + hannam&infinity-software.com +21507 + Strategies & Solutions LLC + Sam Wozniak + swozniak&strategies-llc.com +21508 + LAAS-CNRS + Laurent Blain + Laurent.Blain&laas.fr +21509 + CanWest Global Communications Corp. + Ron Field + rfield&canwest.com +21510 + heicare GmbH + Thomas Haselwanter + thomas.haselwanter&heicare.com +21511 + Institut Laue-Langevin + Martine M. Espitallier + ldapmaster&ill.fr +21512 + Instituto Superior Técnico + Jose´Manuel Pereira + ci&ist.utl.pt +21513 + Vormetric, Inc. + Andrew Wnuk + awnuk&vormetric.com +21514 + Computer Services and Solutions, Inc. + Daniel Diachun + ddiachun&cssiky.com +21515 + Hyperion VOF + Evert Carton + evertc&hyperion-entertainment.biz +21516 + Secure Data in Motion, Inc. + Ian Elverson + elverson&sigaba.com +21517 + Ehwa Technologies Information + SHIN, DONG-MAN + cshindm&eti21.com +21518 + Queensdale Investment Corp. + Benj Carson + benjcarson+iana&digitaljunkies.ca +21519 + Detlef Woltmann IT Consult GmbH + Detlef Woltmann + dw&dewo-itconsult.de +21520 + Crossworks Co.,Ltd. + Eunjeong, Go + gowill&crossworks.co.kr +21521 + Engage Networks, Inc + Alan Gilgenbach + agilgenbach&engagenet.com +21522 + Knights of Columbus + Carlos Bonet + carlos.bonet&kofc.org +21523 + SIA European Softwarehouse + Dima Fedotov + billy&european-softwarehouse.com +21524 + Wintegra + Asaf Matan + asafm&wintegra.co.il +21525 + sydios-it-solutions GmbH + Frank G. Walzebuck + fwalzebuck&sydios.de +21526 + Equip'Trans + Thierry Dubosse + be&equiptrans.com +21527 + SLIT + M. LARDY + flardy&slit.fr +21528 + MICROSEC Ltd. + Andras Ellbogen + ellbogenµsec.hu +21529 + 2wcom GmbH + Werner Drews + contact&2wcom.com +21530 + Champ Cargosystems S.A. + Cihan Aydin + cihan.aydin&champ.aero +21531 + Adventec + Damien METZLER + dmetzler&adventec.fr +21532 + Universidad Técnica Federico Santa María + Alejandro A. Villarroel N. + alejandro.villarroel&usm.cl +21533 + Administration fiscale + Eric BEAURY + bureau.si3-dai&dgi.finances.gouv.fr +21534 + eWayDirect, Inc. + Bill Welch + billw&ewaydirect.com +21535 + QVS Software Inc. + Dennis Taylor + dwt&qvssoftware.com +21536 + MeuPC Informática e Comunicações LTDA + Fernando Barrocal + fernando.barrocal&gmail.com +21537 + KUNet + Ammar T. Al-Sayegh + ammar&kunet.com +21538 + fqdn.org + Juliane Holzt + iana-oid&juliane.holzt.de +21539 + Klahn Integration, LLC + Robert A. Klahn + robert_snmp&kint.org +21540 + Clulicon + Uli Deml + uli&die-demls.de +21541 + DNF CONTROLS + Paul Rubenstein + prubenstein&dnfcontrols.com +21542 + NET-TEK.info + Thomas Langer + thomas&net-tek.info +21543 + Bartels System GmbH + Oliver Bartels + oliver&bartels.de +21544 + Ktech Telecom + Steve Kuh + sk&ktechtelecom.com +21545 + Universidad Nacional de Salta + Diego Saravia + dsa&unsa.edu.ar +21546 + Virtual Conexions Inc + Jean-Luc Dugas + jldugas&virtualconexions.com +21547 + Logic Product Development + Erik Anderson + hostmaster&logicpd.com +21548 + Qoris, Inc. + Kenneth Lee + klee&klmn.net +21549 + Vine Systems Limited + Michael Larbi + michael.larbi&larbi.com +21550 + Millennium Pharmaceutical, Inc. + Senad Mulabegovic + senad.mulabegovic&mpi.com +21551 + IdleAire Technologies Corp. + Hostmaster + HostmasterMail&idleaire.com +21552 + XI INTERNET SERVICES + Johannes Peschke + johannes.peschke&xi-internet-services.de +21553 + Kineticode, INc. + David Wheeler + ldap&kineticode.com +21554 + Intertech Software Ltd + Rob Basto + rbasto&intertechsoftware.co.uk +21555 + VonWangelin + Daniel Von Wangelin + info&vonwangelin.com +21556 + Botho.Net + Sebastian Schulz + seb&shamoha.org +21557 + Fabio Perini North America, Inc. + Kent Maxwell + kmaxwell&fabio-perini.com +21558 + East Sea Group + Long Pham + pklong&eastseatravel.com +21559 + Parallelspace Corporation + Michael Herman + mwherman¶llelspace.net +21560 + Atlanta Help Desk + Marc Mamane + marc&mamane.org +21561 + abk-technology GmbH + Michael Adam + michael.adam&abk-technology.de +21562 + Boing.com + Geff Hanoian + boing&boing.com +21563 + Center of the System Administration LLC + Nick Gazaloff + nick&sbin.org +21564 + MMForces.de + Mathias Kaufmann + administrator&mmforces.de +21565 + R. E. Smith + Ron Smith + res485&fuse.net +21566 + WMS Industries Inc. + Ryan Schaefer + rschaefe&wmsgaming.com +21567 + Network Computing Services, Inc. + Jay Kline + jay&ahpcrc.org +21568 + nGENn GmbH + Ulrich Plate + plate&ngenn.net +21569 + COMPSIS + Rodolpho Testi + rodolpho.testi&compsisnet.com.br +21570 + Crimson Microsystems + Michael A. Thatcher + mthatcher&crimsonmicrosystems.com +21571 + Cornerstone University + Jonathan Beasley + ldap.admin&cornerstone.edu +21572 + Incompatible Time Sharing Systems + Alan Bawden + snmp&bawden.org +21573 + gb EDV Beratung + G. Beilhack + iana&beilhack.com +21574 + Active Systems Ltd. + Andres Toomsalu + andres&active.ee +21575 + IRIS Télécom & Réseaux + Cédric LHERM + info&iris-telecom.fr +21576 + Vestfold Butikkdata AS + Dag Christensen + dag.christensen&vbd.no +21577 + Allgemeine Anthroposophische Gesellschaft + Angela Gavazzi + edv&goetheanum.ch +21578 + Radium Instituto de Oncologia + Josephus + josvdv&radium.com.br +21579 + HarlemLIVE + Brad Harbans + harbansb&msn.com +21580 + XDS + David Roberts + daver&xdsinc.net +21581 + Bird Technologies + Matt Loretitsch + matt&bird-technologies.com +21582 + twam systems + Tobias Mueller + iana&email.twam.info +21583 + Oy Stinghorn Ltd + Sami Vaarala + sami.vaarala&stinghorn.com +21584 + BTT Software + Nick Abbott + snmp&bttsoftware.co.uk +21585 + Apollo Medical Imaging Technology + Qing Yang + info&apollomit.com +21586 + ELITECH DATACOM CO.,LTD. + ZongZhao Chen + chen&elitech.com.cn +21587 + Cemtek Computer Solutions Ltd. + Wei Zhong + menei&21cn.com +21588 + Media Connect GmbH + Armin Sattler + info&media-connect.de +21589 + Infosila + Shibaev Dmitry + d_shibaev&mail.ru +21590 + Zeni Corporation SA + Gilbert Roulot + contact-iana&zeni.fr +21591 + Systime GmbH + Martin Brulisauer + m.brulisauer&systime.ch +21592 + Activis Polska Sp. z o.o. + Jacek Hryniewiecki + j.hryniewiecki&activis.pl +21593 + Agnes Cebulla + Agnes Cebulla + mail&agnes-cebulla.de +21594 + Dougs Computers + Doug Hanson + iana&dougscomputers.com +21595 + Kerberos Research + Jesus Edgar Zavala Siller + jezs&kerberos-research.com +21596 + Low Fat Technology, LLC + Nick Hodulik + snmp.spam&lowfattech.com +21597 + Paper Coaters Limited + Merry Tsao + merryt&papercoaters.co.nz +21598 + Net Design Studio S.L. + José Luis Arenas + jla&nds.es +21599 + TRAK Microwave Corporation + Rolando Penabade + Rpenabade&trak.com +21600 + NETIKUS.NET ltd + Ingmar Koecher + ingmar.koecher&netikus.net +21601 + Cementhorizon + Eugene Wood + iana_private_enterprise_administrator&cementhorizon.com +21602 + Turbocomm Tech. Inc. + Lobo Chang + lobo&turbocomm.com.tw +21603 + e-Lock Corporation Sdn. Bhd. + Mr. Poong Zui Yong + poongzy&elock.com.my +21604 + Fiserv + Ryan Persaud + Ryan.Persaud&Fiserv.com +21605 + Sollentuna Energi AB + Jens Örtenholm + jens.ortenholm&sollentunaenergi.se +21606 + Asclep Communications Private Ltd + Saravanan R + saravanan&asclep.com +21607 + SPAR Oesterreichische Warenhandels-AG + Martin SPERL + martin.sperl&spar.at +21608 + John G Russell (Transport) Ltd., + Ronnie Johnstone + ronnie.johnstone&johngrussell.co.uk +21609 + Syscomp GmbH + Robert Stromer + ian&syscomp.de +21610 + Quality Objects S.L. + Roberto Sánchez + rsanchez&qualityobjects.com +21611 + MDHM COMMUNICATION + Ravizza + webmaster&mdhm.fr +21612 + ise - Individuelle Software-Entwicklung GmbH + Witali Kadatzki + kadatzki&ise.de +21613 + TOYOTA Motor Marketing Europe + Koen Maes + Koen.Maes&toyota-europe.com +21614 + Multix Finland Oy + Jarmo Tulonen + tulonen&multixfin.fi +21615 + Bolton Sixth Form College + Kevin Ratcliffe + kratcliffe&bolton-sfc.ac.uk +21616 + Fraunhofer IIS + Martin Kohlmann + kln&iis.fraunhofer.de +21617 + TiFiC AB + Per Losenborg + per.losenborg&tific.com +21618 + Navecor, SA + Veronique Gaspar + veronique.gaspar&navecor.pt +21619 + Massey Wilcox Transport Ltd. + Ray Conneely + ray&masseywilcox.com +21620 + Morrow Technologies Corporation + James Morrow + ianacontact&morrowcorp.com +21621 + Perez Torres y Cia. + Pedro Garcia and Jose M.Torres + jose.torres&pereztorresycia.es +21622 + Centerpoint Technologies Inc + Brian McBride + bmcbride&talkswitch.com +21623 + Serono International SA + Christian Geffcken + IT.Security&Serono.com +21624 + EcoNet.Com, Inc. + Tim Bandy + timb&econet.com +21625 + Shikoku Instrumentation CO.,LTD + Makio Hinomoto + hinomo1180&yonkei.co.jp +21626 + NetMon Information Systems Ltd. + PH Chiu + phchiu&netmon.com.hk +21627 + Embrapa Monitoramento por Satélite + Anderson S. Ferreira + anderson&cnpm.embrapa.br +21628 + Niagara LaSalle Corporation + David Sinclair + dsinclair&niag.com +21629 + IKTEK + Emmanuel Lécharny + elecharny&iktek.com +21630 + University of Fukui + Mitsuya Tanaka + office&icpc.fukui-u.ac.jp +21631 + Axero AG + Georg Lehmann + georg.lehmann&axero.net +21632 + Kayak Software Corporation + Steve Revilak + enterprise-numbers&kayak.com +21633 + eCorridor, Inc. + Gene Ames + games&ecorridor.com +21634 + Educational Community Credit Union + Gabrielle Braley + gabrielle.b&edcomcu.org +21635 + ZhongChuangXingCe Beijing,China + Chen Hongjun + chenhj&zcxc.com.cn +21636 + Tech-Nous Ltd + Iain Holder + iain&tech-nous.co.uk +21637 + PacWan + Michel Merle + nic&pacwan.net +21638 + PROD-EL PRODOTTI ELETTRONICI S.P.A. + Bernareggi Ilaria + mkt&prod-el.com +21639 + CONSIGNACIONES TORO Y BETOLAZA,S.A. + JOSE ANTONIO GONZALEZ CARREDANO + josean.gonzalez&torobe.com +21640 + O2 (UK) Ltd + Martyn Radzimierski + directory&02.net +21641 + mm Control AG + Hans Kremers + hans.kremers&mmcontrol.ch +21642 + Le Tual Consulting + Jamie Le Tual + ldap_admin&letual.net +21643 + Allianz Cia. de Seguros y Reaseguros SA + Esther Rus + esther.rus&allianz.es +21644 + Hyperworx + Jason Chodakowski + jasonc&hyperworx.com +21645 + PDV-Systeme + Nick Solf + Nick.Solf&pdv.de +21646 + Meuleman Technologies bvba + Koenraad Meuleman + koenraad&meuleman.com +21647 + Credence Systems Corporation + Dave Solbes + dave_solbes&credence.com +21648 + Mowa - Mobile Wireless Applications Ltda. + Rogério Corrêa + rogerio.correa&mowa.com.br +21649 + Access Prime + John Caldwell + john.caldwell&accessprime.com +21650 + The Integer Group + Danny Wang + dwang&integerdenver.com +21651 + FastSecure Technologies + Naftali Fasten + fastenn&earthlink.net +21652 + programmercafe + Stephen Suen (SUNRUJUN) + stephen.suen&gmail.com +21653 + Wirtualna Polska S.A. + Michal Luczak + micluc&wp-sa.pl +21654 + netnea AG + Jiri Dvorak + dvorak&netnea.com +21655 + Deadpixi.com + Rob King + numbersadmin&deadpixi.com +21656 + Iskraemeco d.d. + Milan Kozole + milan.kozole&iskraemeco.si +21657 + Abrantix AG + Thomas Cerny + thomas.cerny&abrantix.com +21658 + Kashya + Arye Shapiro + aryes&kashya.com +21659 + IsarNet AG + Andreas Perthel + andreas.perthel&isarnet.de +21660 + ADAM SOFTWARE Ingenieurbuero + Alfred Adam + iana&adam-software.de +21661 + Verilet Corporation + Rod Macpherson + rmacpherson&verilet.com +21662 + IIJ Technology Inc. + Shigeki Ohtsu + ohtsu&iij-tech.co.jp +21663 + Pazmany Peter Catholic University + Miklos Pasztor + pasztor&ppke.hu +21664 + SAPEC (Sociedad Anonima de Productos Electronicos y de Comunicacion) + Miguel Cristobal + miancris&sapec.es +21665 + VSB - Technical University of Ostrava + Radomir Orkac + oidmaster&vsb.cz +21666 + ToutProgrammer.com + Stéphane Vanpoperynghe + webmaster&toutprogrammer.com +21667 + Claymore Systems, Inc. + Eric Rescorla + ekr&rtfm.com +21668 + one11.net + Thomas House + ixo&one11.net +21669 + Multiband Inc + Troy Nelson + troy.nelson&multibandusa.com +21670 + MM Invent + Matthew Madderra + mminvent&mminvent.com +21671 + Netscout Systems, Inc. (formerly 'VSS monitoring Inc.') + Jeff Morriss + jeff.morriss&netscout.com +21672 + Psyche Trading Company + Peter Brooks + peter&phmb.biz +21673 + Bei Jing Polypegasus Technology Co., Ltd. + Fan Hua + fanhua&polypegasus.com +21674 + Beijing TeleSystem Technologies Co., Ltd + Zhou Feng Zhang + z.f.zhang&telestek.com +21675 + Compunicate Technologies, Inc. + Qu Dongsheng + dsqu&cti.com.cn +21676 + SparkLAN Communications, Inc. + Derek Chen + derek.chen&sparklan.com +21677 + Shanghai Huama Information Tech.Developmenet Co.,Ltd + Tony d Lu + tony.lu&71eq.com +21678 + SCREEN SERVICE ITALIA S.p.A. + Alberto Pavesi + pavesi&screen.it +21679 + Bundesagentur für Arbeit + Holger Scheetz + it-systemhaus.vertrauensdienste&arbeitsagentur.de +21680 + MeadWestvaco Intelligent Systems + Susan Grauel + susan.grauel&meadwestvaco.com +21681 + WholeSecurity, Inc. + Scott Wu + scott.wu&wholesecurity.com +21682 + ricilake.net + Rici Lake + rici&ricilake.net +21683 + Wyrdwright, Inc. + Barry King + iana&nospam.wyrdwright.com +21684 + wayneandersen.com + Wayne Andersen + powertoaster&hotmail.com +21685 + eFunds Corp. + Gerard Nadeau + gerard_j_nadeau&efunds.com +21686 + Kyung Gee Electronics Co., Ltd. + Younkwan Lee + eric&kgelect.com +21687 + KIP America, Inc. + Phil Shipley or Phil Bubin + pshipley&kipamerica.com +21688 + Serge Co. + Serguei Safonov + serge_safonov&yahoo.com +21689 + easynet + tim weickert + tim.weickert&de.easynet.net +21690 + David Bremner + David Bremner + dbremner&gmail.com +21691 + gnuTechnologies + Gary C. New + garycnew&yahoo.com +21692 + Coradir S.A. + Norberto Nazabal + nazabal&coradir.com.ar +21693 + LINET Services GbR + Moritz Bunkus + m.bunkus&linet-services.de +21694 + BeiJing Sina Information Technology Co.,Ltd + Huang Dong + huangdong&staff.sina.com.cn +21695 + net-track GmbH + Oliver Hitz + oliver&net-track.ch +21696 + KEYMILE AG + Jon Duri Sarott + jonduri.sarott&keymile.com +21697 + Rivertree Networks Corp. + Lee, Kwangsoo + kslee&rivertreenet.com +21698 + Field Boxmore Mareen + Andy Van den Bergh + andy.vandenbergh&fieldboxmore.com +21699 + Belgocontrol + Frederic Dombier + Frederic_Dombier&belgocontrol.be +21700 + Keller & Partner GmbH + Markus Kniehl + mkniehl&kup.de +21701 + MOBOTIX AG + Jochen Kunz + Jochen.Kunz&mobotix.com +21702 + EAPLV + Dario Tongue + dtongue&paris-lavillette.archi.fr +21703 + Microhard Systems Inc. + Lihong Lei + lleiµhardcorp.com +21704 + DPS-Promatic srl + Massimo Portolani + mxp&dpspro.com +21705 + ARG Electrodesign Ltd + Andy Frank (ext 772) + Andy.Frank&arg.co.uk +21706 + Dominion Resources, Inc. + Thomas Froncek + Thomas_J_Froncek&dom.com +21707 + DECATHLON + Thomas CORJON + thomas.corjon&decathlon.com +21708 + Netsweeper Inc. + Lou Erdelyi + support&netsweeper.com +21709 + Procios Oy + Petri Lammi + petri.lammi&procios.com +21710 + agentes AG + Elmar Abeling + elmar.abeling&agentes.dyndns.org +21711 + MediSoft Egypt + Sherif ElGhatrifi + s.elghatrifi&medisoftegypt.com +21712 + Caisse de Prévoyance Sociale + Hoani CROSS + hcross&cps.pf +21713 + Vectorsite + Tim Christensen + astrocat123&vectorsite.com +21714 + Cape Byron Imports & Wholesale Pty. Ltd. + Karl J. Ots + kjots&capebyronimports.com.au +21715 + UFRJ + Joao Carlos Peixoto + peixoto&nce.ufrj.br +21716 + hangzhou goldway information technology CO.,Ltd + ruizhen han + rzhan8403&hotmail.com +21717 + NIPPON KOEI POWER SYSTEMS Co.,Ltd. + Takayuki Ogawa + ogawa-tk&n-koei.jp +21718 + ReestrRN + Alex Podchufarov + apodchufarov&reestrrn.ru +21719 + EmmeciSoft S.n.c. + Mario Mariotti + mario.mariotti&emmecisoft.it +21720 + Microgaming Software Systems Limited + Nico Snyman + nicosµgaming.com +21721 + SERVICIO DE CERTIFICACION DE LOS REGISTRADORES + Manuel Lorenzo + manuel.lorenzo&scregistradores.com +21722 + Marine Corps Community Services + Spencer Hal Visick + visicksh&usmc-mccs.org +21723 + NTA CO.LTD. + ARTUR PAWLATA + office&nta.com.pl +21724 + elementec Software & Consulting + Dr. Peter Kullmann + kullmann&elementec.de +21725 + Olaf Huebner + Olaf Huebner + olaf.huebner&epost.de +21726 + HappyComm + Soo-Young, CHOI + young&happycomm.com +21727 + People's Bank of Georgia + Alexander Nefedov + nefedov&peobge.com +21728 + Synaccess Networks, Inc. + Shan Han + shan&synaccess-net.com +21729 + Somogy Informatika Kft. + Andriko Tamas + at&sominfo.hu +21730 + stSoft + BinDiPan + bdpanx&yahoo.com.cn +21731 + CryptoSoft, Ltd + Dmitry Subbotin + dsubboti&mail.ru +21732 + Sparkasse Hanauerland + Ralph Sester + ralph.sester&sparkasse-kehl.de +21733 + Altar Sp. z o.o. + Robert Urbanczyk + robert.urbanczyk&altar.com.pl +21734 + APO Consortium + David Reid + reid&snmp.com +21735 + Alber Inc. + Eddie Deveau + edeveau&alber.com +21736 + IntelliTrans Ltd + Sherril Noble + sherril.noble&intellitrans.co.uk +21737 + Gestweb S.p.A. + Luigi Belli + gigi&gestweb.com +21738 + Hong Kong Education City Ltd + Nicky Leung + hostmaster&hkedcity.net +21739 + Trusted Solutions s.r.o. + Antonin Kral + antonin.kral&trusted.cz +21740 + Persystent Technology, Inc + Thomas Sturgeon + tom.sturgeon&persystent.com +21741 + St. Margaret's Anglican Church + Ryan Rempel + ryanrempel&mac.com +21742 + uWink Inc. + Brian Yoder + brian.yoder&uwink.com +21743 + yasp.at + hermann zirknitzer + office&yasp.at +21744 + BlackRock Financial Management + Karl A Wieman + kwieman&blackrock.com +21745 + Arnprior KF + Dennis Momy + danzigger26&hotmail.com +21746 + Cheyenne Networks Inc. + George Azzi + gazzi&cheyennenetworks.com +21747 + Aldebaran Systems Ltd + Mr Graham Bradshaw + info&aldebaran.co.uk +21748 + Iggesund Paperboard AB + Lennart Mårtensson + lennart.martensson&iggesundpaperboard.com +21749 + EtherTek Circuits + Dan Pattison + sales&remotemonitoringsystems.ca +21750 + Highbridge Capital Management, LLC + Jonathan Zhukovsky + systems&hcmny.com +21751 + St. Thomas University + Stephen Moss + smoss&stu.ca +21752 + Societe Cooperative Migros Neuchatel-Fribourg + Roland Herrmann + roland.herrmann&gmnefr.migros.ch +21753 + ZKM - Zentrum f. Kunst und Medientechnologie, Karlsruhe + Florian Krupicka + florian&zkm.de +21754 + Media-Com Sp. z o.o. + Radoslaw Kojdecki + radek&media-com.com.pl +21755 + Collaborative Adaptive Sensing of the Atmosphere (CASA) + Armen Babikyan + armenb&cs.umass.edu +21756 + SOFTEC sa + Denis Gervalle + support&softec.lu +21757 + Digicadd Computacao Grafica LTDA + Albener Pessoa + albener&digicadd.com.br +21758 + Slushpupie + Jay Kline + jay&slushpupie.com +21759 + All China Federation Of Industry & Commmerce + YanxiongWu + wuyx&acfic.org.cn +21760 + E-Access Broadband + Hugh Hunkin + admin&e-access.com.au +21761 + Lübecker Hafen-Gesellschaft mbH + Wulff, Erik + erik.wulff&lhg-online.de +21762 + Cornwall County Council + Stewart Elkin + selkin&cornwall.gov.uk +21763 + Claranet + David Freedman + david.freedman&uk.clara.net +21764 + Hill Systems LLC + Charles Robert Hill + robb&hill-systems.com +21765 + The Lyarama Project + Dong LI + lyarama&gmail.com +21766 + Imperial College London + Stephen Gardner + s.j.gardner&imperial.ac.uk +21767 + Synarc, Inc. + David M. Goldhaber + David.Goldhaber&SYNARC.COM +21768 + Inspired Broadcast Networks Ltd + Steve Cotton + operations&inspiredbroadcast.net +21769 + M-Tech Information Technology, Inc. + Idan Shoham + iana&mtechit.com +21770 + Wesaidso Software Engineering + Frank Molder + info&wesaidso.com +21771 + Imolko C.A. + Yohany Flores Suarez + info&imolko.com +21772 + SEB Eesti Ühispank AS + Risto Laanoja + taotlus&eyp.ee +21773 + Choice Sourcing Pty + Chris Ip + chris&sourcing.co.za +21774 + ECLASS + Adriano Silvano + asilvano&class.it +21775 + Digital Imaging Solutions + Larry Scarff + larry&scarff.org +21776 + TelcoBridges Inc. + Carl Boulanger + carl.boulanger&telcobridges.com +21777 + Nitido Inc. + Raymond Li + raymond&nitido.com +21778 + VA TECH ELIN EBG GmbH & Co + Reinhold LEITNER + Reinhold.Leitner&vatech-ict.com +21779 + jade.net + Tim Kehres + tim&kehres.com +21780 + ADURO d.o.o. + Sinisa Domislovic + oidadmin&aduro.hr +21781 + Montelnor SCL + Rafa Diaz + montelnor&montelnor.com +21782 + Chinasys Technologies Co.,Ltd. + Cao Liquan + caolq&chnsys.com.cn +21783 + ANTGroup s.r.l + Diego Maioli + d.maioli&antgroup.it +21784 + Calrec Audio Ltd + Alison Jones + alison.jones&calrec.com +21785 + VÖB-Service GmbH + Matthias Knop + matthias.knop&voeb-service.de +21786 + Boston.com + William Tessier + snmp&boston.com +21787 + Qualitas, Inc. + Bob Smith + bsmith&sudleyplace.com +21788 + Poindexter Systems, Inc. + William Bates + wbates&poindextersystems.com +21789 + Cablevision - FiberTel + Alberto Eugenio Garrido + agarrido&fibertel.net.ar +21790 + Obidobi Software + Thomas Åhlén + thomas&obidobi.net +21791 + Wave-Tech, Inc. + Marko Kranjcic + marko&wave-tech.com +21792 + BearingPoint, Inc. + Bobby Brown + bobby.brown&bearingpoint.com +21793 + Kisoku Networking + Mathieu Sauve-Frankel + msf&kisoku.net +21794 + Panthera Systems + Daniel Baumann + daniel-baumann&panthera-systems.net +21795 + Integrated Decision Systems + Terris Linenbach + terris.linenbach&idsnet.com +21796 + HW group s.r.o + Jan Rehak + Rehak&HW.cz +21797 + Googgun Technologies Inc. + Ahmed Masud + masud&googgun.com +21798 + Covergence, Inc. + William Lynch + wlynch&covergence.com +21799 + CONSULTEC + Rodolpho Testi + rdtesti&yahoo.com.br +21800 + Island Internet Services + Hugh Blandford + hugh&island.net.au +21801 + California State University Los Angeles + Henry Balta + hbalta&cslanet.calstatela.edu +21802 + Maternité Régionale A. Pinard + Responsable Service Informatique + e.desvigne&maternite.chu-nancy.fr +21803 + Centrepoint Technologies + Natalie Gagnon + ngagnon&talkswitch.com +21804 + 360degreeweb, Inc + Frank Liu + Frank.liu&360degreeweb.com.cn +21805 + monkeyjr.com + Henry WONG + webmaster&monkeyjr.com +21806 + Vitrado GmbH + Christian Kalkhoff + c.kalkhoff&vitrado.de +21807 + TRW Engine Components + David Blankenship + David.Blankenship&trw.com +21808 + Prairie Systems Inc. + Michael Canann + michaelc&prairiesys.com +21809 + Campusware + Robert Knuth + bobbyk&campusware.com +21810 + Yubivaza + Laurent Fasnacht + server-admin&o-t.ch +21811 + Kalion + Stefan Sinnige + ssinnige&kalion.com.au +21812 + Kofax Image Products + Robert Macias + robert_macias&kofax.com +21813 + West Japan Railway Company + Takashi Mori + t-mori&jripv6.jp +21814 + Booth Software Consulting + Ken Booth + kbooth&pipeline.com +21815 + School of Veterinary Medicine Hannover + Frank Hagenberg + frank.hagenberg&tiho-hannover.de +21816 + CDE nove tehnologije d.d. + Gregor Brecko + gregor&cde.si +21817 + Gemini Storage Company + Dan Zhou + dzhou&geministorage.com +21818 + CapTech Ventures, Inc. + Jack Cox + jcox&captechventures.com +21819 + Guernsey Customs + Tim Leung + custcert01&badger.co.uk +21820 + ChanneLynx LLC + Russell Neville + rneville&channelynx.com +21821 + Colibre + Francesco Chiechi + francesco.chiechi&colibre.it +21822 + Andrew Ducore, Independent Consultant + Andrew Ducore + aducore&wam.umd.edu +21823 + MDS Caswell + Timothy C. Schmid + Tim_Schmid&mds-caswell.com +21824 + Fortuitous Technologies Inc + Philip Carinhas + info&fortuitous.com +21825 + asystec | Adolf Systemtechnik + Dietmar Adolf + info&asystec.net +21826 + Metropipe Network Services Inc + Kenny Kaputa + k&metropipe.net +21827 + ArcanaNetworks Inc. + Unni Sreekumar + unni&arcananet.com +21828 + Meta Systems AS + Bjørn Augestad + boa&metasystems.no +21829 + Ian A. Young + Ian Young + ian&iay.org.uk +21830 + CrossTalk Göteborg AB + Per Lindgren + per.lindgren&crosstalk.se +21831 + ClearOne Communications Inc. + Derek Graham + derek.graham&clearone.com +21832 + LabOne Inc. + Brian Whitehead + brian.whitehead&labone.com +21833 + Megas Media + Alex Crivat + alex.crivat&axu.rdsnet.ro +21834 + Tufin Technologies + Ruvi Kitov + iana&tufin.com +21835 + SESCAM - Servicio de Salud de Castilla la Mancha + Ismael Moreno Fernández + imoreno&sescam.jccm.es +21836 + MRF Systems Ltd. + M. White + mw&mrfsys.co.uk +21837 + DiGIR + David A Vieglais + vieglais&ku.edu +21838 + LooseFlow Ltd + K Loose + iana&looseflow.com +21839 + ALAXALA Networks Corporation + Kenji HAYASHI + khayashi&alaxala.com +21840 + Hitachi IE Systems Co.,Ltd. + Makoto Sano + m-sano&hitachi-ie.jp +21841 + Hochschule fuer Angewandte Wissenschaften Hamburg + Andrea Albert + andrea.albert&is.haw-hamburg.de +21842 + Energy Technologies, Inc. + Paul R. Coy + snmpmanager&yahoo.com +21843 + SHENZHEN MQ TECHNOLOGIES INDUSTRIAL CO., LTD. + Gu Guanghui + dulcetgu&126.com +21844 + Durham School Services + Jerry Rolo + jrolo&durhamschoolservices.com +21845 + Trium Sistemas Informaticos SL + Sebastian Fernandez A. + oid&triumsistemas.com +21846 + Trium Systems SL + Sebastian Fernandez A. + oid&triumsystems.com +21847 + The Scott Lawson Group Ltd. + Zach Lawson + zlawson&slgl.com +21848 + SLACALEK + Petr Slacalek + slacalek&volny.cz +21849 + Riggs Heinrich Media + Paul Rhodes + prhodes&golfconnoisseur.com +21850 + Intaero Sdn Bhd + Jeff Smart + jeff&intaero.com +21851 + MurrayLisook.com + Murray Lisook + murray&murraylisook.com +21852 + Norkring AS + Jan-Terje Larsen + jan-terje.larsen&telenor.com +21853 + Gesellschaft fuer Informations- und Datenverarbeitungs mbH + Martin Beier + M.Beier&gfi-bremen.de +21854 + CollabNet, Inc. + Tristan Horn + tristan&collab.net +21855 + Winterdale Computerdienste GbR + Philipp Taprogge + philipp.taprogge&winterdale.de +21856 + Modulus Systems + Arjun Sandhu + contact&modulussystems.com +21857 + Chaucer Press Limited + Graham Fear + shanon.mcneil&spicerspaper.co.nz +21858 + A.P. Woodham Ltd. + Peter Greenwood + shanon.mcneil&spicerspaper.co.nz +21859 + Leightons Paper & Printing Ltd. + Craig Penn + shanon.mcneil&spicerspaper.co.nz +21860 + Harrah's Entertainment, Inc. + Dean Benz + dbenz&harrahs.com +21861 + Mobile Cohesion + Maria McCafferty + maria.mccafferty&mobilecohesion.com +21862 + Alcasat + Nicolas Hennion + hennion&alcasat.net +21863 + INFOTEL + Patrick Allouche + patrick.allouche&infotel.com +21864 + ibte + Gerhard Thimm + admin&ibte.ch +21865 + Unitrends Software Corp + Mark Phillippi + markp&unitrends.com +21866 + FMAudit, LLC. + Kevin Tetu + kevint>techonline.com +21867 + ESI-HEB + marcel van haelen + mvanhaelen&heb.be +21868 + Highmark Inc. + Linda Betts + linda.betts&highmark.com +21869 + CUI Inc (formerly 'Tectrol Inc') + Joseph Abediny + jabediny&cui.com +21870 + United Cerebral Palsy of Greater Chicago + John Leonard + jleonard&ucpnet.org +21871 + Datastream Systems, Inc. + Mike Savage + darth.savage&gmail.com +21872 + WICEN (NSW) Inc + Dave Horsfall + dave&horsfall.org +21873 + XFI Corporation + Dr. Saiid Paryavi + saiid&xfi.com +21874 + Moore Gallagher ltd. + Chris Griffith + shanon.mcneil&spicerspaper.co.nz +21875 + macfadden.org + Michael MacFadden + mike&macfadden.org +21876 + metabit + J. Wilkes + iana.businesscontact&metabit.com +21877 + Multi Skilled Resources Australia + Jason Cox + jason.cox&multiskilled.com.au +21878 + MedStar Health + Jim Kuhar + Jim.L.Kuhar&MedStar.net +21879 + Associazione Culturale Inventati + Paolo De Rosa + pdr&inventati.info +21880 + Australian Experimental High Energy Physics Group + Lyle Winton + winton&physics.unimelb.edu.au +21881 + Milom, Inc. + Michael Milom + msmilom&milom.com +21882 + Justsystem Corp. + Yoshihiro Suzuki + yoshihiro_suzuki&justsystem.co.jp +21883 + Brain ltd. + jirong lin + jirong&seed.net.tw +21884 + W. Schneider+Co AG + Silvan Maechler + admin&wschneider.com +21885 + Ericsson Mobile Platforms AB + Per Ståhl + per.stahl&ericsson.com +21886 + Newel Informatique + Amir Hajjam + iana&newel.net +21887 + Scheidt & Bachmann System Technik GmbH + Ruediger Hinze + hinze.ruediger&scheidt-bachmann-st.de +21888 + Content Management License Administrator + John Hoy + admin&cm-la.com +21889 + Acsis, Inc. + Paul Cleary + pcleary&acsisinc.com +21890 + GRIB + jorge naranjo + jnaranjo&imim.es +21891 + Tibco Telecom Reseau + Moureaux + cmoureaux&tibco.fr +21892 + Ringwald Mikroelektronik GmbH + Armin Ringwald + a.ringwald&ringwald.de +21893 + phil cornes + phil cornes + phil.cornes&gmail.com +21894 + ISABEL S.A. + trust solutions (Mr. Huygens Frédéric) + trustsolutions&isabel.be +21895 + Delta-Soft Ltd + Alexey Slynko + slynko&tronet.ru +21896 + Niederrhein University of Applied Sciences + Franz-Josef Mueller + franz-josef.mueller&hs-niederrhein.de +21897 + i-CABLE Network Limited + Leo Chan + leo&hkcable.com.hk +21898 + SvamberkNET, s.r.o. + Jakub Vlasek + vlasek&svamberk.net +21899 + Cape City Command, LLC + Bill Mott + bmott&capecitycommand.com +21900 + Danacal Ltd. + Alister Sibbald + enquiries&danacal.com +21901 + Millward Brown UK Limited + Kevin Spicer + kevin.spicer&uk.millwardbrown.com +21902 + BAJM Internet + Max Wrzesinski + mcson&p2.bajm.com.pl +21903 + NetServices LLC + Dave Rutledge + daver&netsrvcs.com +21904 + Yoshida's Inc. + Gregg Berkholtz + hostmaster&yoshida.com +21905 + Leibniz-Institut fuer Meereswissenschaften + Dr. Ruediger Kunze + rkunze&ifm-geomar.de +21906 + March Hare Software Ltd. + Tony Hoyle + tony.hoyle&march-hare.com +21907 + Wohnprojekt Schellingstrasse GmbH + Andreas Mueller + andreas&schellingstrasse.de +21908 + NexaSoft Inc. + Han Jae Sung + castle&nexasoft.co.kr +21909 + Chaos Limited, LLC + AC Capehart + oidmaster&chaoslimited.com +21910 + Electrocomponentes S.A. + Antonio Pavanetto + app&electrocomponentes.com +21911 + Trusted Computing Group + Ned Smith + admin&trustedcomputinggroup.org +21912 + Priva Technologies, Inc. + Roger Brandt + roger.brandt&priva-tech.com +21913 + dop intelligence + Matthew Ma + mma&bluejaynetworks.com +21914 + Networx.AT - busta & neumayr oeg + BUSTA Oliver + Admin&Networx.AT +21915 + BitPusher, LLC + Michael Halligan + michael&bitpusher.com +21916 + Tehna + Denis Kochmashev + iana&tehna.ru +21917 + Staatliche Berufsschule I Bayreuth + Joachim Woelk + joachim.woelk&bs1-bt.de +21918 + Tesorion NL + R.B. Gloudemans + roel.gloudemans&tesorion.nl +21919 + Clearwire, Inc. + Katherine Beal + katherine.beal&clearwire.com +21920 + Cipherium Systems Co., Ltd. + Cadon Sheu + cadon.sheu&cipherium.com.tw +21921 + Inspiration Matters Ltd + Bryan Crotaz + bryan&inspirationmatters.com +21922 + TIE - Tecnologias de Integração Empresarial, Lda. + Antonio Sargento + antonio.sargento&tie.co.pt +21923 + Bowater, Inc. + Charles Bryson + brysonca&bowater.com +21924 + Adam Pordzik + Adam Pordzik + adampordzik&gmx.de +21925 + RTP Network Services, Inc + Joe A. Williams + jwianaorg3_re-move_me&rtpnet.net +21926 + MedOrder, Inc. + Jake Cormier + jcormier&medorder.com +21927 + intralinux + Michael Hoffmeister + m.hoffmeister&mb-ho.de +21928 + Renaissance Computing Institute + Brad Viviano + rciadmin&renci.org +21929 + Netikos Finland Oy + Veikko Kehä + veikko.keha&netikos.fi +21930 + Prihateam Networks Finland Ltd + Jarkko Priha + priha&prihateam.fi +21931 + SE46 AB + Olle Segerdahl + olle&se46.se +21932 + Tor.AT - Netzwerke-Server-Sicherheit + MERIGHI Marcus + McMer&Tor.AT +21933 + Jamie Thompson + Jamie Thompson + iana&jamie-thompson.co.uk +21934 + FIlesX + Frank Jablonski + fjablonski&filesx.com +21935 + Sigtec LTD + Malcolm Stead + malcolm&sigtec.ie +21936 + Senforce Technologies, Inc. + Peter K. Boucher + pboucher&senforce.com +21937 + Ezrez Software, Inc. + Joseph Phillips + dphillips&ezrez.com +21938 + Radio Muzyka Fakty, Sp. z o.o. + Lukasz Grochal + hostmaster+iana&rmf.pl +21939 + Hitachi Energy EDS500 + Janis Kruse + janis.kruse&hitachienergy.com +21940 + Enatel Ltd + Arthur de Beun + arthur.debeun&enatel.net +21941 + Data Return, LLC + Matt Springfield + matt.springfield&datareturn.com +21942 + Rubicon Software Ltd + Philip Ross + pross&rsuk.com +21943 + Uni-Sommerfest e.V. + Claus Faerber + claus.faerber&uni-sommerfest.de +21944 + Blitzen Networks + Steve Lee + stevelee&blitzen.net +21945 + Kerberos d.o.o. + Tomislav Vrebac + tomislav.vrebac&kerberos.hr +21946 + University of Liverpool, Department of Electrical Engineering + Lindsay Jack + l.b.jack&liv.ac.uk +21947 + TintaDigital, Soluções em Tecnologias de Informação, Lda. + José Carlos Correia + jcorreia&tintadigital.com +21948 + TW TeamWare s.r.l. + Andrea Giardini + andrea.giardini&teamware.it +21949 + Istituto Nazionale di Geofisica e Vulcanologia + Stefano Pintore + pintore&ingv.it +21950 + Open Consultants + Jason Walker + jwalker&openconsultants.net +21951 + Katun Corporation + Netmaster - I.S. Department + netmaster&katun.com +21952 + Crescent Business, Inc. + Richard Moon + richardjmoon&hotmail.com +21953 + Business Computer Maintenance Limited + Ben Major + ben.major&bcm-uk.com +21954 + Transatlantic Diagnostics LLC + Michael Abramoff + michael-abramoff&uiowa.edu +21955 + CONTROL SYSTEMS Srl + Gianfranco Cortellazzi + gf.cortellazzi&controlsystems-srl.it +21956 + Megapath Cloud Company LLC + Hugo Aviles + syseng&megapath.com +21957 + Sealed Air (NZ) ltd + Anu Hedge + shanon.mcneil&spicerspaper.co.nz +21958 + Thermakraft IndustriesNZ Ltd. + Bruce Pitcaithly + shanon.mcneil&spicerspaper.co.nz +21959 + P J Papers ltd. + Jack Hayward + shanon.mcneil&spicerspaper.co.nz +21960 + Legend Communications plc + David Baggaley + david.baggaley&legendplc.com +21961 + Exeo Systems + Claude Mally + claude.mally&exeotechnologies.com +21962 + Nextel Communications Inc. + Avis Ng + avis.ng&nextel.com +21963 + Obeco GmbH + Joachim Berger + joachim.berger&obeco.de +21964 + Nesym Consulting Srl + Donadel Luca + luca.donadel&nesym.com +21965 + LTLab + Vesselin Tabakov + ltlab&bulgaria.com +21966 + Cooperativa Obrera Ltda. + Gabriel Gomiz + gomita&cooperativaobrera.com.ar +21967 + Invento Networks, Inc. + Dave Bartolini + dbartolini&inventonetworks.com +21968 + Food Services of America + John Beard + john.beard&fsafood.com +21969 + Kernway Technology Co., Ltd + Austin Shan + austin&kernway.com +21970 + AHTS + Andrew Hosie + a.hosie&ahts.com.au +21971 + Kisters AG + Bernd Kisters + bernd.kisters&kisters.de +21972 + 1mage Software Inc. + Dale Dillard + dadillard&1mage.com +21973 + CaribbeanClub + Dorian + grey&kk.kiev.ua +21974 + Boris Kulig EDV und Statistik Beratung + Boris Kulig + Boris.Kulig&lycos.de +21975 + Infraserv Gmbh & Co Hoechst KG + Patrik Toennishoff + Patrik.Toennishoff&Infraserv.com +21976 + Roberts Wesleyan College + James Field + fieldj&roberts.edu +21977 + The Sage Group PLC + David Rinker + david.rinker&sage.com +21978 + Fluid, Inc. + Mark Belanger + it&fluid.com +21979 + fks BVBA + Luk Knapen + luk.knapen&fks.be +21980 + Enran Telecom + Andrey Orekhov + korj&bt-entel.kiev.ua +21981 + Spiderbox P/L + Damien + damiencahill&spiderbox.com.au +21982 + With Mobile Technology Co., Ltd. + Lee Myoung Oh + molee&with-m.co.kr +21983 + Terrace technology, Inc. + SpaceLee + spacelee&terracetech.com +21984 + Viswis, Inc. + Harry Chang + harryc&viswis.com +21985 + Server-Center, JSC + Varaksa Andrey + andry&atlas-2.ru +21986 + Shinnyo-en Buddhist Order Co. + K Koizumi + f-claire&fjb.fujitsu.com +21987 + Bugs Inc. + RL Vennik + ramon&vennik.com +21988 + ARENAL + Thierry WRZYSZCZ + thierryw&arenal.fr +21989 + Connex.cc DI Hadek GmbH + Markus Fruehwirth + admin&connex.cc +21990 + Universidad Autónoma de Baja California Sur + Guillermo González + memog&uabcs.mx +21991 + char + Andrei RUSAN + char&tuiasi.ro +21992 + Central Command Inc. + Keith Peer + kpeer¢ralcommand.com +21993 + girrulat.de + Sascha Girrulat + admin&girrulat.de +21994 + Norfolk Public Schools + Adam Crosby + iana&nps.k12.va.us +21995 + Magyar Elektronikus Aláírás Szövetség / Hungarian Association for Electronic Signature + MELASZ Elnökség + elnokseg&melasz.hu +21996 + crocobox.org + Mickael Guerin + kael&xeberon.net +21997 + PowerStar Ltd. + Istvan Pal + pal.i&mail.powerstar.hu +21998 + Tiani-Spirit + Tiani Martin + office&tiani-spirit.com +21999 + PLUM Computer Consulting, Inc. + Andrew Ettinger + aettinger&plumcc.com +22000 + Pacific Laminations Co. limited + John Jenkins + shanon.mcneil&spicerspaper.co.nz +22001 + Fachhochschule Esslingen - Hochschule fuer Technik + Martin Schmid + mars&fht-esslingen.de +22002 + Michael Breuer + Michael Breuer + mike.breuer&gmail.com +22003 + mst + Harald Gröne + h.groene&gmx.net +22004 + X-MD + Andreas Balg + balg&x-md.de +22005 + GE Healthcare - IT + Mark Niggemann + mark.niggemann&ge.com +22006 + GE DIGITAL - SWS - UCOM + FRANCOIS FONTENELLE + francois.fontenelle&ge.com +22007 + WinWholesale Inc. + Michael Roe + mroe&winwholesale.com +22008 + pld-sparc64 + Pawe? Boguszewski + pawel&pld-sparc64.org +22009 + IP-LABEL + ORTOLAN Lionel + lortolan&ip-label.net +22010 + Tranfa Optic Telecommunications Co., Ltd. + Yang bing + yangbing&xunfeng.com +22011 + CORE MICRO SYSTEMS INC. + Mitsuo Asai + asai&cmsinc.co.jp +22012 + silkroadtaiwan,Inc + srtuser + jaska&silkroadtaiwan.com +22013 + SGDN + DCSSI/CFSSI + cfssi&sgdn.pm.gouv.fr +22014 + China Beijing Broada Digital Technology Co.,Ltd + liudonghai(Eric Liu) + liudh&broada.com.cn +22015 + BanianTech Co.,Ltd + Techie Yang + techieyang&baniantech.com +22016 + RackWatch LLC + Larry Paniccia + lpaniccia&rackwatch.net +22017 + Claritas, Inc. + John Hoffman + jhoffman&claritas.com +22018 + RedIce SA + Christian Charette + chcharette&yahoo.com +22019 + Tutus Data AB + Per Holmer + per&tutus.se +22020 + CoreTEC GmbH + Christian Mock + cm+oid&coretec.at +22021 + WestCall Ltd + Stas Vitkovsky + admin&westcall.net +22022 + DeusXMachina + Tim Nicholls + tim&deusxmachina.com +22023 + Schaly + Wolf-Agathon Schaly + schaly_wolf-agathon&t-online.de +22024 + Inoi + Tommi Virtanen + tommi.virtanen&inoi.fi +22025 + ELTI d.o.o. + Gorazd Kuhar + gorazd.kuhar&elti.com +22026 + Texas State Technical College System + Sammy Rhodes + sammy.rhodes&systems.tstc.edu +22027 + Rock Mobile Corporation + Charmin Hsieh + register&rockmobile.com +22028 + Hauser/Flimp Programming + Todd Hauser + todd&dvnt.com +22029 + ActiveLive Technologies LLC + Joseph Kwok + joseph_k&pacbell.net +22030 + Kign Foundation + Balazs Lengyak + ldap&kign.org +22031 + Lattice Semiconductor Corporation + Jim Lewinson + hostmaster&latticesemi.com +22032 + Actelion Pharmaceuticals Ltd. + Lionel Cottin + lionel.cottin&actelion.com +22033 + Cynical Networks + Andrew Silliker + silliker&cynical.net +22034 + joztoz net + Roy Ledochowski + rledousa&yahoo.com +22035 + ECT News Network, Inc. + Daniel Bohling + sysadmin&ectnews.com +22036 + Tapsell-Ferrier Limited + Nic Ferrier + nferrier&tapsellferrier.co.uk +22037 + Elektrownia Be³chatów S.A. + Zbigniew Iwaniuk + zbigniew.iwaniuk&elb.pl +22038 + Kommune42 + Matthias Muenzner + wizz&kommune42.owl.de +22039 + Mathias Wohlfarth EDV-Beratung + Mathias.Wohlfarth + Mathias.Wohlfarth&mw-eb.de +22040 + STATCON B SCHAEFER + Bertram Schaefer + consult&statcon.de +22041 + Generalstaatsanwaltschaft Berlin + Olaf Huebner + IT-Referat&gsta.verwalt-berlin.de +22042 + GITEM + Olivier RITEAU + oriteau&gitem.fr +22043 + Land-of-uz.net + Tom Anderson + tpa10&speakeasy.net +22044 + Moviltek, Inc. + Alejandro Rapoport + arapoport&moviltek.com +22045 + Electrosystems Inc. + Maxim Kuznetsov + k.max&power.rus.net +22046 + Planecom s.r.l. + Andrea Colleoni + tech&planecom.it +22047 + Qascom S.r.l. + Chris Wullems + c.wullems&qascom.com +22048 + CAJA DE AHORROS EL MONTE + IVAN PAZ CROS + ipaz&elmonte.es +22049 + Metro Group Information Technologies + Mr. Guido Schroers + guido.schroers&mgi.de +22050 + Ing. Punzenberger COPA-DATA GmbH + Guenther Haslauer + development&copadata.com +22051 + SavageS Net + Shaun Savage + savages&savages.net +22052 + Rising Technology Co., Ltd. + Aijun Jiang + jiangaj&hotmail.com +22053 + Neptune Gaming + Alex Kehr + helloimslinky&gmail.com +22054 + Three Rings Design, Inc. + Gregory Sutter + admin+iana&threerings.net +22055 + HighSpeed America, Inc. + Jonathan Allen + jallen&hsamerica.com +22056 + Prefeitura da Cidade do Rio de Janeiro + Max Moura Wolosker + mwolosker&rio.rj.gov.br +22057 + New Signature + Jason Senich + jsenich&newsignature.com +22058 + Danaher Power Solutions + Gene Marsh + marshm&anycast.net +22059 + Corp. Hostarica + Jose Hidalgo + jose&hostarica.com +22060 + Labcom Sistemas Ltda + Armando Drummond + armando.drummond&labcomsistemas.com.br +22061 + Liquid Systems + Marc MacIntyre + marcmac&liquidsys.com +22062 + G2 Switchworks + Michael Sokolewicz + msokol&g2switchworks.com +22063 + CIFE - Consorcio de Intermediación de Facturas Electrónicas + Florencio Díaz Vilches + fdiaz&anf.es +22064 + Landeskreditbank Baden-Württemberg (L-Bank) + Rolf Mayer + rolf.mayer&l-bank.de +22065 + Mobiltel AD + Naiden Nedelchev + n.nedelchev&mobiltel.bg +22066 + Mobitex Technology AB + Johanna Karlsson + johanna.karlsson&mobitex.com +22067 + ENSEA + Jean-Paul Bachmann + bachmann&ensea.fr +22068 + Tai Liang Chemical Corporation + Kumphanart Dansiri + kumphanartd&tailiang.co.th +22069 + Deuromedia Technologies + Manfred Petz + pm&deuromedia.at +22070 + LMR Robosistic Ltd. Co. + Guanxun Mu + lyxmoo&gmail.com +22071 + Associazione culturale Investici + Antonio Laquaglia + phasa&inventati.org +22072 + EDS + Dan Gonos + dan.gonos&eds.com +22073 + Cerantus Technologies + Michael Wiseman + mwiseman&cerantus.com +22074 + Vibrant Media Ltd + Toby Doig + toby&vibrantmedia.com +22075 + Global Technical Engineering Solutions Inc. + Ken Kupsch + gtesit>esinc.com +22076 + Televisio de Catalunya, s.a. + Jesus M. Garcia Segarra + jgarcia.q&tvcatalunya.com +22077 + Smiling Screens Inc. + Scott Stein + scottstein&gmail.com +22078 + Burlington Coat Factory Warehouse + Matt Fahrner + matt.fahrner&coat.com +22079 + Matterform Media + Daniel Lyons + webmaster&matterform.com +22080 + YACAST + LACOSTE Ludovic + llacoste&yacast.net +22081 + Guyton Networks + Nat Guyton + iana&guyton.net +22082 + IBM WPC Lab + Roy Ledochowski + rledo&us.ibm.com +22083 + okkernoot.net + N. Th. Roosen + iana&okkernoot.net +22084 + Industec Industria Metalurgica Ltda. + Edmundo Valle Neto + cpd&industec.com.br +22085 + CIDE Consorcio para la Interoperabilidad de Documentos Electrónicos + Florencio Díaz Vilches + fdiaz&anf.es +22086 + Paul Poeltner + Paul Poeltner + paul.poeltner&emp.co.at +22087 + Invelica GmbH + Bodo Rueskamp + iana+spam&invelica.com +22088 + Alpine-Energie Holding AG + Silvia Schmid + edv&alpine-energie.de +22089 + Paedagogische Hochschule Zentralschweiz + Urs-Jakob Rueetschi + urs-jakob.rueetschi&phz.ch +22090 + GCommerce Inc. + Ryan Mentzer + rmentzer&gcommerceinc.com +22091 + MATRICS COMMUNICATIONS + Dominique Jeannerod + dominique.jeannerod&matrics.fr +22092 + Mazda Motor Europe GmbH + Karl Seidel + KSEIDEL&MAZDAEUR.COM +22093 + Software Systems + Pekka Rossi + pekka&rossi.se +22094 + Pulse Entertainment + Michael Tan + mtan&pulse3d.com +22095 + Baltic Online Computer GmbH + Dagobert Michelsen + dam&baltic-online.de +22096 + Image Project Inc. + Iavor Marinov + mailroom&websitepulse.com +22097 + B&W Fahrzeugentwicklung GmbH + Andre Geiger + andre.geiger&b-w-fahrzeugentwicklung.de +22098 + CHEN + Christof Chen + oidadmin&chen.de +22099 + EKF Elektronik &- Messtechnik GmbH + Andreas Schockenhoff + asc&ekf.de +22100 + Swisscom Fixnet AG + Andrejevic Andrej + andrej.andrejevic&swisscom.com +22101 + Udo Meng GmbH + Udo Meng + office&udo-meng.de +22102 + KIM Systemhaus GmbH + Jens Wegener + jwegener&kim-consult.de +22103 + Paynet Ltd + Ron Webb + webbr&paynet.co.ke +22104 + YesTurnkey Technology, Inc. + Yuh-Rong Leu + yrleu&yahoo.com.tw +22105 + Central Visayas Information Sharing Network Foundation Inc. + Ivan R. Sy Jr. + ivanjr&serv01.secure.net.ph +22106 + Vodafone SDEC + Jeff Lin + jeff.lin&vodafone.com +22107 + The Florida Lottery + Carl Cocroft + cocroftc&flalottery.com +22108 + Application Science and Technology + Frederick C. Druseikis + fdruseikis&team-ast.com +22109 + Berliner Hochschule für Technik (BHT) + Hochschulrechenzentrum + hrz&bht-berlin.de +22110 + Airedale International Air Conditioning Ltd + Controls Department + controls&airedale.com +22111 + Chilibyte Oy + Bjorn Andersson + bjorn.andersson&chilibyte.com +22112 + Vigil Security, LLC + Russell Housley + housley&vigilsec.com +22113 + Gigabeam + Robert Sutherland + bob.sutherland&gigabeam.com +22114 + Battle.net Underground + Scott Anderson + camel&clanbnu.net +22115 + Netflexity, Ltd + Max Fedorov + max&netflexity.com +22116 + Queryus + Ben van Veen + info&queryus.nl +22117 + Epiq Electronic Assembly Ltd. + Anton Stamenov + ais&epiq.com +22118 + Advance America + Robert Long + rlong&advanceamerica.net +22119 + GBST Holdings + Damien Moloney + Damien.Moloney&gbst.com +22120 + datastacks.com + Phillip Jones + iana&dsmouse.net +22121 + Krasnoyarsk State Pedagogical University + Eugene Konev + ejka&imfi.kspu.ru +22122 + ESOLUTIONPROVIDERS + MANISH DHAWAN + manish_dhawan&vsnl.com +22123 + Unimas Systems, Corp. + Miles Zhou + zzh&unimassystem.com +22124 + Networks & More! Inc. + James Punderson + jpunderson&k12usa.com +22125 + Sierra Video Systems Inc. + Michael F. Hagen + MHagen&SierraVideo.com +22126 + AVONET, s.r.o. + Technical support + support&avonet.cz +22127 + Delvos + Elmar Delvos + elmar&delvos.net +22128 + RBC Capital Markets, Sydney + Wai- Kiong Choy + waikiong.choy&rbccm.com +22129 + AfterHours Upgrades + Paul Butler + pbutler&afterhoursupgrades.com +22130 + Vivatas, Inc + Patrick Jennings + iana-info&vivatas.com +22131 + ÖWD security systems GmbH & Co KG (formerly 'ÖWD time access GmbH') + Gerald Fauland + g.fauland&owd.at +22132 + Mediasyscom SAS + Mr Emmanuel BASSE + emmanuel.basse&mediasyscom.com +22133 + UWIC + Mark John + majohn&uwic.ac.uk +22134 + Eminds AB + Johan Ekblad + jka&eminds.se +22135 + Sentiva Innovation AB + Torsten Jacobsson + torsten&sentiva.se +22136 + outsmart Ltd. + ofer cohen + ofer.cohen&smart-ss7.com +22137 + Ivosh, s.r.o. + Ivo Raisr + ivosh&ivosh.net +22138 + ATS-KONVERS Ltd. + Alexandr Malikov + convers&pskov.ru +22139 + Kurchatov Institute Grid Deployment + Eygene Ryabinkin + rea&mbslab.kiae.ru +22140 + Volker Scheuber + Volker Scheuber + vscheuber&gmail.com +22141 + ProCom GmbH + Torsten Eicker + er&procom.de +22142 + Interferenza s.r.l. + Giancarlo Russo + g.russo&interferenza.com +22143 + Steatite Ltd + Gary Parfrey + sales&steatite.co.uk +22144 + InfoNotary Ltd. + Tania Javasheva + tjavasheva&infonotary.com +22145 + ViDiSys GmbH + Jürgen PETZOLD + petzold&vidisys.de +22146 + Dpt. of Philosophy, Utrecht University + Jeroen Scheerder + js&phil.uu.nl +22147 + Allpoint Security Ltd. + Barry Seward + barry&allpoint-security.com +22148 + Venda Ltd + Radoslaw Zasiadczuk + all-sys&venda.com +22149 + Systemhaus Bad Waldsee + Sven Hillebrecht + sven.hillebrecht&all-for-one.de +22150 + Wettern Network Solutions + Joern Wettern + administrator&wettern.com +22151 + Neotronic s.r.l. + Marco Finelli + m.finelli&neotronic.it +22152 + CipherSec SPRL + Frédéric d' Huart + fdh&ciphersec.com +22153 + Viveon AG + Axel Franz + axel.franz&viveon.de +22154 + Jotron Electronics a.s. + Otto Holm + otto.holm&jotron.com +22155 + Pontificia Universita Santa Croce + Jesus Toribio + toribio&pusc.it +22156 + Dynamit Nobel Kunststoff GmbH + Martin Latteier + helpdesk&dnk.de +22157 + SLU + Martin Norrsken + martin.norrsken&adm.slu.se +22158 + Resolv SARL + Xavier COLLET-MATRAT + contact&resolv.fr +22159 + EPIN Technologies (China), Ltd. + Ruili Li + ruili&epintech.com.cn +22160 + RW Comercio e Servicos Ltda. + Wagner Sartori Junior + wsartori&gruporw.com.br +22161 + John Wiggins + John Wiggins + john.e.wiggins&gmail.com +22162 + Beyond Disability, Inc. + Richard Stubbs + rdavidson&bdi.org.au +22163 + Ricoh Printing Systems, Ltd. + Masaki Masubuchi + masaki-masubuchi&rps.ricoh.co.jp +22164 + Cambridge Silicon Radio Limited + Alexander Thoukydides + alex.thoukydides&csr.com +22165 + Web Service Solutions, Inc. + Mark Swanson + mark&ScheduleWorld.com +22166 + Ocean Broadband Networks + shirley hou + shirley.hou&obroadband.com +22167 + nologic.org + Ciaran Johnston + admin&nologic.org +22168 + Crown College + Aaron Taylor + admin&crowncollege.edu +22169 + NetEngine, Inc. + David Lee + dlee&netengine1.com +22170 + FXCM + Mark Lin + mlin&fxcm.com +22171 + CeSigma - Signals & Systems + Dr Gilles NOLIBE + gno&cesigma.com +22172 + HaCon Ingenieurgesellschaft mbH + Werner Sommerfeld + wso&hacon.de +22173 + Flex Engineering + Sergei Rusakov + sergeir&termos.ru +22174 + Virtual Trip Ltd. + Vangelis Mihalopoulos + admin&vtrip.net +22175 + Mitron Oy + Jarmo Ryynänen + jarmo.ryynanen&mitron.fi +22176 + asknet AG + Guillaume Canavaggio + guillaume.canavaggio&asknet.de +22177 + DFN-Verein + Marcus Pattloch + oidmaster&dfn.de +22178 + Kurs LLC + Andrey Komyagin + admin&netmechanica.com +22179 + Otsuka Corporation + TAKANORI KURAHASHI + noc&aics.ad.jp +22180 + INSCAPE DATA CORPORATION + Richard Ho + richard.ho&inscapedata.com +22181 + Avision Inc. + Ming-hsien Wu + hankwu&avision.com.tw +22182 + Cascadia BHC + Shaun Savage + savages&cascadiabhc.org +22183 + Cylant, Inc. + Brad Cervenak + snmp&cylant.com +22184 + Unassigned + Returned 2023-03-02 + no-reply&iana.org +22185 + eScholar LLC + Stuart Colvin + scolvin&escholar.com +22186 + Marketing Resource Consultants, Inc. + Adam Gutcheon + agutcheon&hartfordmag.com +22187 + Symphoniq + Arthur Chuang + awc&symphoniq.com +22188 + Correctime ltd + Adam Tresch + adam.tresch&correctime.hu +22189 + Star Technologies, LLC + Tom Willett + tom&startechllc.net +22190 + NetEffect, Inc. + John Lacombe + john.lacombe&neteffect.com +22191 + Bismarck Public Schools + Michael Cowart + cowartm&bsd-lions.net +22192 + Pictou Regional Development Commission + Libo Feng + lfeng&prdc.com +22193 + Computer Information Systems Ltd., Brest + Slava Zanko + slavaz&cis.by +22194 + Orion Systems Inc + Hal Gordon + hgordon&orionsystemsinc.net +22195 + NeoTIP SA + Frédéric FELTEN + info&neotip.com +22196 + Vistek Electronics Ltd + Nick Liebmann + nick.liebmann&vistek.tv +22197 + SOFTBANK BB Corp. + Satoru Tsurumaki + sbbrd-smi&bb.softbank.co.jp +22198 + University of St.Gallen + Thomas Koeppel + thomas.koeppel&unisg.ch +22199 + United Arab Emirates University + Elsayed Hemayed + ehemayed&uaeu.ac.ae +22200 + Sistemas Techniclite. C.A. + Ettore Pelliccioni + ettore.pelliccioni&techniclite.com +22201 + Research Center of NetWork Techology Nanjing University Of Posts And Telecommunication Inc. + Wang Han + y030737&njupt.edu.cn +22202 + Security Tostem co.,Ltd. + HIROSHI KODAIRA + sts-unicty&exc.tostem.co.jp +22203 + Informática El Corte Inglés, S.A. + Juan José Blanco + juanjose_blanco&ieci.es +22204 + Bundesversicherungsanstalt für Angestellte + R. Grunert + ronald.grunert&bfa.de +22205 + 3G Data Soluciones C.A. + Andres Castro + acastro&3g.com.ve +22206 + M-real Corporation + Bengt Wentus + bengt.wentus&m-real.com +22207 + SureWest + James Yost + j.yost&surewest.com +22208 + AGES International GmbH & Co KG + Dirk Nitka + dirk.nitka&ages.de +22209 + LJXN inc. + LinJianeng + lin_jianeng&163.com +22210 + FLsystem + sangho-cho + atrobo&empal.com +22211 + Legacy Property Investments LLC (formerly 'JMAnderson Enterprises') + John Anderson + bassooner&gmail.com +22212 + Cognio, Inc. + Neil Diener + ndiener&cognio.com +22213 + Posda + Bill Bennett + bill.bbennett&gmail.com +22214 + Explorer Post 227 + Christopher A. Hotchkiss + christopher.hotchkiss&gmail.com +22215 + Pogo Linux, Inc. + Alex Neth + snmp-admin&pogolinux.com +22216 + sighq networking + boyden Stéphane + sigmounte&mailsfp.univ-lille2.fr +22217 + Yitran Communications + Iftah Bratspiess + iftah&web-silicon.com +22218 + NAVARRO Y BORONAD, S.L. + JAIME TORRES + jaime.torres&navarroyboronad.com +22219 + BoreNet AB + Johan Johansson + johan&borenet.se +22220 + LACNIC + Juan C. Alonso + juancarlos&lacnic.net +22221 + Wescom GmbH + Markus Fischer + fischer&wescom.ch +22222 + Gericos + Jean-Claude CHRISTOPHE + jch&gericos.com +22223 + IntelliCal LLC + Jason Klemow + Jason&IntelliCal.net +22224 + Queensland Treasury + Prachid Tiyapanjanit + prachid.tiyapanjanit&treasury.qld.gov.au +22225 + Huazhong University of Science and Technology + shunda zhang + zhangshunda&163.com +22226 + Acamaya SA + Jeremy Charton + ldap&acamaya.com +22227 + David Mains & Co. Inc. + Matthew Mains + mrmains&dmcinc.ca +22228 + The University of Manchester Computer Society + CompSoc Admin + admin&compsoc.man.ac.uk +22229 + Heiks computerdiensten + Heiko Noordhof + heiko.noordhof&xs4all.nl +22230 + MyPoints, Inc. + Michael T. Halligan + michael.halligan&mypointscorp.com +22231 + VEL Technologies, LLC + Dmitry Wagner + snmp&veltech.com +22232 + Kyle Hamilton + Kyle Hamilton + aerowolf&gmail.com +22233 + German Overseas Institute + Michael Schroeder + admin&duei.de +22234 + DocuSign France + Erwann Abalea + dsfcompliance-risk-safety&docusign.com +22235 + Beijing WellTelecom Co.,Ltd. + zhang xiao + zhangxiao&wtf.com.cn +22236 + Ultrawaves Design, Inc. + Eric Lammerts + eric-iana.org-enterprise-numbers&ultrawaves.com +22237 + Inventronik GmbH + Dipl.-Ing. Jens Carroll + jc&inventronik.de +22238 + FOXCONN Technology Group + S.C. Lin + S.C.LIN&foxconn.com +22239 + Response Systems Corporation + Howard W. Bleiwas + howard.bleiwas&respsys.com +22240 + DATOR3 a.s. + Pavel Korensky + pavel.korensky&dator3.cz +22241 + C&A Computer Consultants Ltd. + Max Chiu + kh&caconsultant.hk +22242 + GENTOO FOUNDATION, INC. + Thomas Raschbacher + lordvan&gentoo.org +22243 + The Swatch Group Ltd + Peter Hutzli + peter.hutzli&swatchgroup.com +22244 + Ing.-Büro Otte Gmbh + H. Otte + hans.otte&ibo-gmbh.com +22245 + Phalanx + Rene Lauer + ray&phalanx.cz +22246 + HSBC Bank plc + Marie Wilson + m.v.wilson&hsbc.com +22247 + Port of Tilbury London Ltd + Steve Thorne + steve.thorne&swisslog.com +22248 + topf-sicret.org + Tim Weippert + weiti&topf-sicret.org +22249 + INSIGHT TECHNOLOGIES Co., Ltd + Jung Chung Lee + jjlee&insightstor.com +22250 + Max-Planck-Institut fuer Radioastronomie + Frank Pascher + fpascher&mpifr-bonn.mpg.de +22251 + Syderal SA + Lionel Riem + lionel.riem&syderal.ch +22252 + McAfee Inc. (formerly 'Reconnex Corporation') + Chuck Hein + Chuck_Hein&mcafee.com +22253 + Lite Speed Technologies, Inc. + George Wang + gwang&litespeedtech.com +22254 + Jerome's Furniture Warehouse + Michael LaGrasta + sysadmin&jeromes.com +22255 + StopTheGlaciers.org + Daniel Restelli + restelli&stoptheglaciers.org +22256 + Groep T Hogeschool Leuven + Dirk Fabré + dirk.fabre&groept.be +22257 + Consultation informatique Daniel Savard Inc. + Daniel Savard + dsavard&cids.ca +22258 + Enlaces Tecnologicos S.A. de C.V. + Cesar Rincon + crincon&et.com.mx +22259 + WebAvis + Jérôme Schell + jerome&myreseau.org +22260 + Scana + Doug Bryant + dbryant&scana.com +22261 + California Lutheran University + Kevin Appel + kappel&clunet.edu +22262 + OpenSides sprl + Benoit Mortier + benoit.mortier&opensides.be +22263 + The Corporation of the County of Brant + Sandra Arndt + sandra.arndt&county.brant.on.ca +22264 + Artifact Software Inc. + Ron Wheeler + rwheeler&artifact-software.com +22265 + Stewart Enterprises, Inc. + Thomas Wimprine + twimprine&stei.com +22266 + Evidant Corporation + Dennis Morton + dennis.morton&evidant.com +22267 + Integral Technologies Inc + Tim Marks + tmarks&integraltech.com +22268 + Flammiger Enterprises + Joerg Flammiger + joerg&flammiger.com +22269 + Charles A Gura + Charles A Gura + iana&gura.net +22270 + Quality Comm Comércio e Serviços de Teleinformática Ltda + Celso Moreira Martins + celsomartins&qualitycomm.com.br +22271 + Nimium d.o.o. + Miroslav Zubcic + mvz&nimium.com +22272 + PAREXEL International Corporation + Holger Marschall + PKI&parexel.com +22273 + Universita' degli Studi di Ferrara + Enrico Ardizzoni + enrico&unife.it +22274 + QSAN Technology, Inc. + Charles Luoh + charles.luoh&qsan.com.tw +22275 + PNMS + yurenlong + yurl&neusoft.com +22276 + FHD Ltd. + Wanghao + wanghao&fuhaoda.com +22277 + GIquadrat mbH + Torsten Steitz + info&GIquadrat.de +22278 + Bob-paperi Ky + Tero Ahonen + tero.ahonen&bobpaperi.com +22279 + Shiflett Consulting + steve shiflett + iana&shiflett.us +22280 + Telus Mobility + Frederick Chung + FREDERICK.CHUNG&TELUS.COM +22281 + CODE INGENIERIA INFORMATICA + OSCAR COSCARON + JAVIER&CODEINF.COM +22282 + Qualys, Inc. + IT Manager + domain-admin&qualys.com +22283 + Gutemberg Medeiros Ltda + Gutemberg Santos de Medeiros + gutapps&ig.com.br +22284 + Dimensao Digital Desenvolvimento Ltd. + Fabio Iareke + fabio&dimensaodigital.com.br +22285 + Teracue + Rainer Link + support&teracue.com +22286 + SYSTEMA + Gerhard Schweinschwaller + gerhard.schweinschwaller&systema.info +22287 + TRS SpA + Stefano Muro + stefano.muro&trs.it +22288 + EXTIS GmbH + Peter Grill + pg&extis.de +22289 + Contec GmbH + Mario Ploner + mario.ploner&contec.at +22290 + SkandSoft Technologies + Surendra Kuncharela + surendra&skandsoftindia.com +22291 + FUJITSU ADVANCED SOLUTIONS LIMITED + Takahiro Yamanaka + yamanaka&fasol.fujitsu.com +22292 + Network Center of Peking University + Ma Hao + mah&pku.edu.cn +22293 + Daily Dose Language Systems, Inc. + Justin Sharp + jsharp&sharpone.net +22294 + CAR-FRESHNER Corporation + Jack Benney + jbenney&little-trees.com +22295 + Ministrstvo za obrambo Republike Slovenije + Katarina Kavsek-Biasizzo + katarina.kavsek.biasizzo&mors.si +22296 + Fazt! Networks, Ltd. + W. Boot + wim&fazt.co.uk +22297 + Allied Bank Corporation + Lemuel Tomas + lctomas&alliedbank.com.ph +22298 + Höjebromölla + Mårten Persson + marten&hojebromolla.se +22299 + Pontifical University of St. Thomas + Gabriele Giulimondi + giulim&pust.it +22300 + CableMatrix Technologies, Inc. + Zem Green + zgreen&cablemx.com +22301 + COMP Rzeszow S.A. + Piotr Stolarz + piotr.stolarz&comprzeszow.pl +22302 + iC Compas GmbH and Co KG + Andreas Netzer + netzer&ic-compas.de +22303 + RSS Solutions Inc + Brendan Reekie + bdr&rsssolutions.com +22304 + Westinghouse Savannah River Company, LLC + Tim Arnold + timothy.arnold&srs.gov +22305 + schwaberow.de + Volker Schwaberow + volker.schwaberow&web.de +22306 + McMaster University + Wayde Nie + niew&mcmaster.ca +22307 + Active Reasoning, Inc. + Arminius Mignea + arminius.mignea&activereasoning.com +22308 + RedSky Technologies Inc. + John Connell + jconnell&redskytech.com +22309 + California State Automobile Association + Jeff McEwen + IT_Security_Processes&csaa.com +22310 + ComNetMedia AG + Mark Siebert + siebert&comnetmedia.de +22311 + A.F. Blakemore and Son Ltd + Ian J. Kennedy + ijkennedy&afblakemore.com +22312 + Université Saint-Louis - Bruxelles (USL-B) (formerly 'Université Saint-Louis (USL-B)') + Axel Luttgens + luttgens&fusl.ac.be +22313 + SP Controls, Inc. + Josh Fitzgerald + josh&spcontrols.com +22314 + TGN-Systeme Krueger + Partner + Andreas Krueger + Krueger&TGN-Systeme.com +22315 + spot media AG + Alexander Meyer + hostmaster&spot-media.de +22316 + OnTapSolutions + Tom Coppeto + tom&ontapsolutions.com +22317 + Applied Identity, Inc. + Jamie Honnaker + root&appliedidentity.com +22318 + Anyware Video + Xavier PICAT + picatx&anywarevideo.fr +22319 + Dravske elektrarne Maribor + Srecko Rojs + srecko.rojs&dem.si +22320 + Guardian Newspapers Limited + Gary Law + gary.law&guardian.co.uk +22321 + ISAC, Inc. + Masanori Machii + machii&tech.isac.co.jp +22322 + Freecomm Corporation + Wind Dong + wind.dong&freecomm.cn +22323 + Capinfo Co, Ltd. + Yang Bingyu + yangbingyu&capinfo.com.cn +22324 + Cooperton LLC + Jeffrey A Carr + jaluka&fuse.net +22325 + Blick SA + Francois van den Berg + francoisvdb&blick.co.za +22326 + Sago S.p.A + Giorgio Cangioli + g.cangioli&sago.fi.it +22327 + Martos Ltd + Martyn Wyatt + martyn&martos-ltd.co.uk +22328 + id Quantique SA + Alexandre Pauchard + alexandre.pauchard&idquantique.com +22329 + same + Michael P Forman + michael.p.forman&jpmchase.com +22330 + BIAS Inc. + Andrew Kimpton + awk&bias-inc.com +22331 + State of Texas, Office of the Attorney General, Child Support Division + Jack Bruns + jack.bruns&cs.oag.state.tx.us +22332 + ImmediateIT + Mark Summer + mark&immediateit.com +22333 + JPotter + Jim Potter + jim&jpotter.net +22334 + Laws and Wolfe Inc. + Michael E. Laws + mike&landwinc.com +22335 + Alcorn State University + Yek Sia + sia&alcorn.edu +22336 + Hopson Advanced Embedded Engineering + Vincent M. Hopson + vince&hopson.dyndns.org +22337 + Hitachi Electronics Services Co.,Ltd + NetworsSystemGroup + nip&hitachi-densa.co.jp +22338 + Paliot + Guido A. Paliot jr. + oid&paliot.de +22339 + RHX Studio Snc + Alessandro De Zorzi + info&rhx.it +22340 + Herlein Engineering, Inc. + Greg Herlein + gherlein&herlein.com +22341 + ESO Technologies + Pierre Sangouard + psangouard&eso-tech.com +22342 + Dyndaco BVBA + Bart Duchesne + bduc&dyndaco.com +22343 + Technical Manangment + Chris Snider + chris.snider&tagtmi.com +22344 + Telcomanager Technologies + Vicente Domingues + vicente&telcomanager.com +22345 + Exalon Delft + W. de Hoog + wdehoog&exalondelft.nl +22346 + dass /IT GmbH + Joerg Steffens + info&dass-it.de +22347 + IDS Services, LLC. + Mark Creekmore + mcreekmore&idsserv.com +22348 + Braintower Technologies GmbH + Florian Wiethoff + florian.wiethoff&brain-tower.com +22349 + Premier Retail Networks + Donald Army, DIrector of IT + donald_army&prn.com +22350 + debitel AG + Frank Ennulat + netman&de.debitel.com +22351 + RedPrompt + Simon Tennant + simon&imaginator.com +22352 + Pragmeta Networks + Josh Endries + jendries&pragmeta.com +22353 + Network Connection + Stephen Fulton + sfulton&connection.ca +22354 + The Lapp Companies + Dr. Wade Woolverton + drwade&functionalmed.org +22355 + Clariton Networks Ltd. + Danny Sade + dannys&clariton-networks.com +22356 + Web-Cyber & Co + Baptiste Augrain + baptiste&web-cyber.com +22357 + INET-Consulting.com, Inc + Doug Royer + Doug&INET-Consulting.com +22358 + Albeo + Mario Enriquez-Zamudio + mario.enriquez&albeo.org +22359 + Jofee Internet Services + Joe Constant + joe&jofee.com +22360 + Unzet ApS + Jack Olsen + jacko&unzet.com +22361 + University of Michigan Radiation Oncology Physics + Wayne Keranen + wkeranen&med.umich.edu +22362 + General Software, Inc. + Steve Jones + stevej&gensw.com +22363 + Ken Bass Consulting + Kenneth Bass + kbass&kenbass.com +22364 + EMARKMONITOR INC. + Casey Riley + criley&markmonitor.com +22365 + Gluesys Co. Ltd. + Gyeong-Hun Kim + kgh&gluesys.com +22366 + EasySoft + YinSheng + yyyyinsheng&tom.com +22367 + Bit Bash Labs + Michael Hubbard + mkhubbard&gmail.com +22368 + Silver Spring Networks + James Pace + pace&silverspringnet.com +22369 + Hong Kong Applied Science and Technology Research Institute Company Limited (ASTRI) + Kavitha Gopal + kavitha&astri.org +22370 + DUPLO CORPORATION + Yoshihisa Suzuki + y-suzuki&duplonet.co.jp +22371 + Business Data Solutions + Marcel Berteler + marcel.berteler&bdsolutions.co.za +22372 + Horst Reiterer + Horst Reiterer + horst&reiterer.net +22373 + ThunderTechnology Srl + Damiano Scrigni + damio&thundertechnology.com +22374 + Allegro Wireless Canada Inc. + Wayne Hammerschlag + wayneh&allegrowireless.com +22375 + Crescent Real Estate Equities, LTD. + IT Infrastructure Manager + clane&crescent.com +22376 + Masco Corporation + Mike Cantalupo + hostmaster&mascohq.com +22377 + VoicePort, LLC + Greg Dicheck + gdicheck&voiceport.net +22378 + Spaceship.com, Inc. + Matt Hudson + matt&spaceship.com +22379 + InovaWeb + Alfredo Campos Enríquez + acampos&i-novaweb.com +22380 + Vincent DEFERT + Vincent DEFERT + vincent&defert.net +22381 + Linux User Group Roma + Cristiano Paris + segretario&lugroma.org +22382 + Drazen Baic + Drazen Baic + drazen&baic.de +22383 + HCCP + Ian Brown + spam&hccp.org +22384 + Benedikt Heinen + Benedikt Heinen + iana.org&web.icemark.net +22385 + Houston Academy of Medicine-Texas Medical Center Library + Chris Young + cyoung&library.tmc.edu +22386 + ionflux.org + Jörn P. Meier + smi&ionflux.org +22387 + InSync Technology Limited + Mr B. Allan + release&insync.tv +22388 + SENTELCO + Jon Bieker + jbieker&sentelco.com +22389 + IRIS Corporation Berhad + Nrin Tan + nrin&iris.com.my +22390 + Parsek Corporation Ltd. + Ales List + list&parsek.net +22391 + Citadel Security Software, Inc. + Carl Banzhof + cbanzhof&citadel.com +22392 + Delco S.p.A. + Carlo Passet + carlo.passet&delcospa.it +22393 + CDL chair - Saarland University + Stephan Thesing + thesing&cs.uni-sb.de +22394 + Digital Fuel Ltd. + MIS team + mis&digitalfuel.com +22395 + DyLogic S.r.l. + Dario Rapisardi + support&dylogic.com +22396 + Power Media sp. z o.o. + Marek Janukowicz + marek&power.com.pl +22397 + IBAMA + Gerson Henrique Sternadt + gerson.sternadt&ibama.gov.br +22398 + Cooperativa de Computação da Unimontes + Állysson Steve Mota Lacerda + stevelacerda&gmail.com +22399 + QBT Systems, Inc. + Guido Belcic + gbelcic&qbt.com +22400 + Inside Higher Ed + Doug Lederman + doug.lederman&insidehighered.com +22401 + VoIP, Inc. + Daniel Corbe + dcorbe&voipinc.com +22402 + lois.inc + jonson lee + lianhuai&mails.gscas.ac.cn +22403 + Koch, Neff & Volckmar GmbH + Holger Schmieder + schmieder&schmieder.de +22404 + SBS Technologies + Gene Juknevicius + genej&sbs.com +22405 + Fujitsu Services Finland OY + Otto Blomqvist + otto.blomqvist&fi.fujitsu.com +22406 + Adermiis + Uhlrich Philippe + philippe.uhlrich&adermiis.fr +22407 + NeoMeridian Sdn Bhd + Tim Lloyd + tim.lloyd&neomeridian.com +22408 + PrimeKey Solutions AB + Tomas Gustavsson + tomasg&primekey.se +22409 + Midland Memorial Hospital + Ron Wooten + rdwooten&midland-memorial.com +22410 + Pixel Technology + Marek Bartnikowski + admin&pixel.com.pl +22411 + Hawkis Consulting + Vidar Håkestad + vidar&hawkis.com +22412 + openenterprise.co.uk + Nick Gregory + iana&openenterprise.co.uk +22413 + TheStreet.com Inc + Alexander Lorberg + ops&thestreet.com +22414 + Pacific Lutheran University + Keith Folsom + folsomke&plu.edu +22415 + Mole Valley Farmers Ltd. + Andres Olave + andres.olave&molevalleyfarmers.com +22416 + Shelton Internet Ltd + Ben Dunham + ben.dunham&shelton.co.uk +22417 + Information Design Department of Tama Art University + Takashi Nishiuchi + iddoperator&idd.tamabi.ac.jp +22418 + Usina de Imagens Photo+Design Ltda. + José Carlos França + digital&usina-de-imagens.com.br +22419 + Relex, Inc. + Ermakov Michael V. + ermakov&relex.ru +22420 + accedian, Inc. + Dominique Bastien + dbastien&accedian.com +22421 + TSYS Prepaid, Inc. + Tim Kuchlein + tim&tsysprepaid.com +22422 + Provideo Systems Pty Ltd + Alex Kolodin + alexk&provideo.com.au +22423 + DTN (formerly Telvent Almos) + Antonio Rafael Segura + weather-systems-devops&dtn.com +22424 + INTELSOFT + Fogha Barnabas + fogha&innsof.com +22425 + Audio Processing Technology (APT) + Frederic Allard + it&worldcastsystems.com +22426 + Digital Data Communications Asia Co., Ltd. + Kay Chan + kay&ddcasia.com.tw +22427 + Sowood & Co Ltd + Kevin Campbell + kev&sowood.co.uk +22428 + RealOps, Inc. + Chris Schroeder + chris.schroeder&realops.com +22429 + Florical Systems, Inc. + Michael Kent + Michael.Kent&Florical.com +22430 + Volke Entwicklungsring GmbH + Guido Naujoks + gna&volke.de +22431 + Phoenix Interactive Design Inc + Chris Walden + mibsupport&phoenix-interactive.com +22432 + Gimlitech + Thomas Ankele + ta&gimlitech.de +22433 + Silvermedia Group + Ireneusz Wochlik + biuro&silvermedia.pl +22434 + NET TIME Corp. + Noriaki Kobayashi + hostmaster&nettime.co.jp +22435 + Beep Science AS + Stein Aanensen + stein.aanensen&beepscience.com +22436 + Sumaré Consultores Associados Ltda. + Marcos Colpaert + marcos&colpaert.com.br +22437 + ITZ Informationstechnologie GmbH + Ralf Huelsmann + ralf.huelsmann&itz-duesseldorf.de +22438 + Groxis, Inc. + Howard Rosen + howard&groxis.com +22439 + CaseNEX + Hari Narasimhamurthy + hari&casenex.com +22440 + Canadian Bank Note Company, Ltd. + John Dempsey + jdempsey&cbnco.com +22441 + Next Dimension Inc. + Gerry Kowalsky + gerry&nextdimensioninc.com +22442 + CHILE.COM S.A. + Mauricio Nuñez + mauricio&chile.com +22443 + EMETEC + Pawel Cieslowski + office&emetec.com.pl +22444 + GASCARD Partners, L.P. + Anthony Mills + amills&gascard.net +22445 + Simon Holmgaard-IT + Simon Holmgaard + Simon.Holmgaard&emaus.cndo.dk +22446 + Bitdefender SRL (formerly 'SOFTWIN SRL') + Andrei Rusu + tech-admin&bitdefender.com +22447 + B.Braun Melsungen AG + Joern Lubadel + joern.lubadel&bbraun.com +22448 + Cluster-Worxx.net + Tim Korves + korves&cluster-worxx.net +22449 + SOPA + Bertrand JUGLAS + bertrand&juglas.name +22450 + servicenetz.biz + Steffen Beyer + beyer&servicenetz.biz +22451 + Custom IDEAS + Gerald Van Baren + vanbaren&cideas.com +22452 + ING-DiBa AG + Bernhard Duerl + b.duerl&ing-diba.de +22453 + H:S Hovedstadens Sygehusfællesskab + Simon Holmgaard + sh41&bbh.hosp.dk +22454 + Helmut Mauell GmbH + Thomas Fabrizi + tfa&mauell.com +22455 + Tribunal Regional do Trabalho da 4a. Regiao + Fabiano Martins + fabiano.martins&trt4.jus.br +22456 + Hebraic Hertiage Christian School of Theology + Dave Augustus + postmaster&admin.hhcst.org +22457 + Deutscher Ring LebensversicherungsAG + Joerg Henkel + certadmin&deutscherring.de +22458 + Modulus Video, Inc. + Chenchen Ku + cku&modulusvideo.com +22459 + QVidia Technologies, Inc. + Ron Fellman + rfellman&qvidia.com +22460 + Tangtop Technology Co.,Ltd + Peter Lee + peter&kvm.com.tw +22461 + JeeF Software + Jan Fedorek + iana&jeef.sk +22462 + Alternative Enterprises (HK) Limited + Philip Paeps + philip&trouble.is +22463 + heidemann.org + Andreas Heidemann + ah&heidemann.org +22464 + 01map + Gérald Fenoy + gfenoy&gmail.com +22465 + G.I.E. AXA Technology Services Belgium + Johan Limbourg + johan.limbourg&axa-tech.com +22466 + hoehmann.biz + Tobias Hoehmann + noc&hoehmann.biz +22467 + Applied Global Technologies, Inc. + Ben Atha + bena&appliedglobal.com +22468 + Red-C Optical Networking + Iftah Bratspiess + iftah&web-silicon.com +22469 + Universidad ORT Uruguay + Mr. Ernesto Silva + silva&ort.edu.uy +22470 + NetDeposit Inc. + Joe Benson + jbenson&net-deposit.net +22471 + Visible School, Inc. + William Norris + wnorris&visibleschool.com +22472 + Boingo Wireless Inc. + Engineering Dept. + sysnet&boingo.com +22473 + Teledyne Controls + Nataliya Chervonaya + nchervonaya&teledyne.com +22474 + Asylum Telecom, Ltd. + Janos Geller + jgeller&asylumtel.com +22475 + UK Broadband + Leigh Porter + leigh.porter&ukbroadband.com +22476 + Syncor Systems, Inc. + Tod Gentille + syncor_snmp&syncorsystems.com +22477 + KingHold Technology Co.,LTD + Jimmy Wang + lear&khtec.com.tw +22478 + My ERM Application + Peter Dunworth + dunworth&ix.netcom.com +22479 + Crosswalk, Inc. + Chris Hawkinson + chris.hawkinson&crosswalkinc.com +22480 + XipLink Networking + Charlie Younghusband + charlie&xiplink.com +22481 + HHS Health Options, Inc. + John Broadbent + johnb&hhs-inc.com +22482 + nc + Chlaupek Norbert + nc&chello.at +22483 + StreamShield Networks + Dave Wakelin + david.wakelin&streamshield.com +22484 + grand.central.org + Jeffrey Hutzelman + jhutz&cmu.edu +22485 + SafeNet InfoTech Pvt Ltd + Bharat Bhushan + bbhushan&safenet-inc.com +22486 + Kyushu Institute of Technology + Hitoshi Nakayama + jin&isc.kyutech.ac.jp +22487 + 2X Software Ltd + Raphael Borg Ellul Vincenti + raphael&2x.com +22488 + Teltronic S.A.U. + Carmelo Marin + cmarin&teltronic.es +22489 + Telvent Tráfico y Transporte, S.A. + Carlos Gil Aguirrebeitia + arbs&telvent.abengoa.com +22490 + Health Care Service Corporation + Wade Morris + wade_morris&bcbstx.com +22491 + Vircom, inc. + Sylvain Savignac + sylvain.savignac&vircom.com +22492 + Forensic Signature Corp. + Jacques Francoeur + jfrancoeur&trustera.com +22493 + MW 2000 S.A. + Daniel Quintela + dquintela&myway.com.ar +22494 + Animaltracks.net + Andrew Beaudoin + andrewb&animaltracks.net +22495 + Chandler Gilbert Community College + Austin Godber + austin.godber&cgcmail.maricopa.edu +22496 + Digital Stream, Inc. + Ben Cooley + bcooley&dstreamtech.com +22497 + shanghai blackstone communication Ltd + zeng qiang + zeng_qiang2004&hotmail.com +22498 + Neddco Enterprises + Roderick S. Baker + rod&neddco.com +22499 + Document Processing Systems, Inc. + Paul Rakowicz + paul&documentprocessing.com +22500 + OfficeLink Plus Pty. Ltd. + Nathan Le nevez + npl&acis.com.au +22501 + UAB Skaitmeninio sertifikavimo centras + Moudrick M. Dadashov + md&ssc.lt +22502 + DaVita, Inc. + George Lin + glin&davita.com +22503 + Klaus Henske + Klaus Henske + Klaus.Henske&t-online.de +22504 + RedDevel.com + Steven P. Kalemkiewicz, Jr. + kale4272&comcast.net +22505 + Volker Englisch + Volker Englisch + hwsw&englisch.us +22506 + Incache LLC + Ninan Thomas + ninan&incache.com +22507 + Ministerio da Ciencia e Tecnologia + Eduardo Viola + eviola&mct.gov.br +22508 + Beijing FiberHome Mobile Technologies Co.,Ltd + jidongzheng + jidongzheng&21cn.com +22509 + The Potter Group Ltd + Mike Griggs + mike.griggs&pottergroup.co.uk +22510 + WhatCounts + Wilson Soong + wilson&whatcounts.com +22511 + thinkorswim group, inc. + Linwood Ma + linwood&thinkorswim.com +22512 + Personal Software Developement Company + Fanis Kalatzis + tkalatz&cc.uoi.gr +22513 + PT Sapta Sarana Komunika + Sugeng Widodo + sugeng&sskom.co.id +22514 + RVision LLC + Greg Johnston + attention&drayvision.com +22515 + Theoretic Solutions + Adam Theo + theo&theoretic.com +22516 + Castalia LLC + Joshua S. Freeman + jfreeman&gmail.com +22517 + Ernster Public Relations + Tom Ernster + ternster&mn.rr.com +22518 + Arel Communications and Software, Ltd. + Amit Tzafrir + amit&arelcom.com +22519 + Akademische Fliegergruppe Karlsruhe + Michael Ewig + vorsitzender&akaflieg.uni-karlsruhe.de +22520 + Sandpath + Bin Zhang + kevinzb&yahoo.com +22521 + Schwegman, Lundberg, Woessner, & Kluth P.A. + Thomas Ernster + ternster&slwk.com +22522 + ClearPath Networks + Robert Staats + rstaats&clearpathnet.com +22523 + Fontec Information Technology Inc. + Dave Roper + daveroper&fontec.com +22524 + Proyecto Fin de Carrera + Sergio Afonso Coderch + sergioafonso&ya.com +22525 + RMP WebWorks + Robert Paskowitz + rpaskowitz&confucius.ca +22526 + InstantServers, Inc. + Webmaster + webmaster&instantservers.com +22527 + Capella Technologies + Jean-Michel David + jmdavid&capella.coop +22528 + iPodion GmbH + Thomas Kirchtag + tkircht&iPodion.at +22529 + Kommunale Datenverarbeitungsgesellschaft mbH + Mathias Weidner + oidadmin&kdg.de +22530 + Takeda Pharmaceuticals North America, Inc. + Dennis M. Reitz + dreitz&tpna.com +22531 + Paloma Partners + Joseph Cyboski + jcyboski&paloma.com +22532 + Computer Graphics Group + Jim Boren + jboren&computergraphicsgroup.com +22533 + Symetra Financial + Diane Zormeir + diazor&safeco.com +22534 + Purdue Pharma L.P. + Tichard Thompson + tichard.thompson&pharma.com +22535 + Neiman Marcus Group + Christopher Paul + chris.paul&rexconsulting.net +22536 + COPAN Systems + Brian Wang + brian.wang&copansys.com +22537 + TreeTop GbR + Conny Lichtenberg + Conny.Lichtenberg&TreeTop.DE +22538 + Reva Systems Corporation + Scott Barvick + sbarvick&revasystems.com +22539 + Champion Computer Technologies + Ken Applebaum + tech&cctupgrades.com +22540 + Oulun Lyseon lukio + Tuure Laurinolli + adm&lyseo.edu.ouka.fi +22541 + Open Acuity LLC + Mike Allred + mikejallred&gmail.com +22542 + Lex Persona + Francois Devoret + fdevoret&lex-persona.com +22543 + GOUNOT + Louis GOUNOT + iana-manager&gounot.net +22544 + Host Collective Inc. + Sean Plaice + seanp&hostcollective.com +22545 + Daudt Consulting + Christian Daudt + ip_admin&daudt.org +22546 + Qwasartech + Dominik Wezel + dio&qwasartech.com +22547 + Alfred University + Adrian Morling + morling&alfred.edu +22548 + Biapo + Adrian Morling + morling&biapo.com +22549 + Broad Net Mux Corporation + Hiroshi Ida + h-ida&bnmux.co.jp +22550 + NANOTEX CORP. + Atsushi SHIMAMOTO + shimamoto&nanotex-jp.com +22551 + MOIMSTONE Co.,LTD + Seunghee choi(Skye choi) + skyechoi&moimstone.com +22552 + Sinco Informatica + Marcos Vieira + marcos&sincoinformatica.com.br +22553 + IT Saver + Joseph Le + joseph&itsaver.com +22554 + Legion of the Bouncy Castle + David Hook + dgh&bund.com.au +22555 + Safehaus + Alex Karasulu + akarasulu&codehaus.org +22556 + JAPAN ADVANCED INSTITUTE OF SCIENCE AND TECHNOLOGY + Syuichi Kosaka + isc&jaist.ac.jp +22557 + Deutsche Börse Group + Herman Philippe + pherman.cs&clearstream.com +22558 + one4vision GmbH + Christof Allmann + iana-pen&one4vision.de +22559 + Intersys AG + Andreas Schneider + andreas.schneider&intersys.ch +22560 + Dynamic Design GmbH + Christoph Emsenhuber + christoph.emsenhuber&dynamic-design.com +22561 + Graphic Image Technologies (Pty) Ltd. + Regardt van de Vyver + regardtv&git.co.za +22562 + Trivector System AB + Bengt Persson + bengt.persson&trivector.se +22563 + ehotel AG + Arno Seidel + a.seidel&ehotel.ag +22564 + ULX Ltd. + Gabor Szentivanyi + gabor.szentivanyi&ulx.hu +22565 + Odyssée Systèmes + Benjamin Dapon-Pigatto + benjamin.dapon-pigatto&odyssee-systemes.fr +22566 + Devoteam + Olivier Gérault + olivier.gerault&devoteam.com +22567 + Instituto Politécnico de Castelo Branco + Fernando Emanuel Azevedo Reis + informatica&ipcb.pt +22568 + Redwave Technology Ltd + Phil Hughes + phil&redwavetechnology.com +22569 + Dynameeting spa + Paolo Martino + paolo.martino&dynameeting.it +22570 + Thomas Taeger Datenblatt + Thomas Taeger + taeger&datenblatt.de +22571 + AMART Logic + Jaroslaw Cichorski + info&amart.com.pl +22572 + AD.NET solutions Enrico Roga + Enrico Roga + enrico.roga&adnet-solutions.de +22573 + Infinite Software, Inc. + Bill Weisner + bdweisner&infinitesoft.com +22574 + iseg Spezialelektronik GmbH + Joachim Poethig + mib-admin&iseg-hv.de +22575 + Astro-Med, Inc. + James H. Alexander + JAlexander&astromed.com +22576 + The OpenBSD SNMP Project + Theron Bair + obsdsnmp&id-ak.com +22577 + Thomas Wollner - IT Beratung und Integration + Thomas Wollner + tw&wollner-net.de +22578 + InFarmTech + Boris B. Samorodov + bsam&ipt.ru +22579 + kaptara GmbH & Co. KG (formerly 'Christoph Michel IT Management') + Christoph Michel + info&kaptara.de +22580 + Martin-Baker Aircraft Company Ltd + Niki Blowfield + niki.blowfield&martin-baker.co.uk +22581 + epollux.org + Daniel Wissenmeyer + daniel.wissenmeyer&epollux.org +22582 + Group 4 Technology Ltd + Andrew Chilcott + andrew.chilcott&g4tech.co.uk +22583 + Clarkson University + Bruce Dunphey + bdunphey&clarkson.edu +22584 + Zetera Corporation + Bill Babbitt + bill.babbitt&zetera.com +22585 + Forest Laboratories, Inc. + Omar McKenzie + security&frx.com +22586 + Dotcast, Inc. + Jim Longino + snmpcontact&dotcast.com +22587 + Hjelle IT-Bistand + Bjorn Hjelle + bjornhjelle&yahoo.com +22588 + LucaS web studio + Lukasz A. Grabowski + www&lucas.net.pl +22589 + P6R, Inc. + Mark Joseph + mark&p6r.com +22590 + BHG Security Consulting + Bartholomew H. Grasso + bgrasso&nycap.rr.com +22591 + Fachhochschule Muenchen - ZaK + Computer Center + admin&fhm.edu +22592 + Informatec LTDA + Tamar Souza + tamar.souza&informatec-sp.com.br +22593 + Emily Carr College of Art and Design + David Ayre + david&eciad.ca +22594 + GoldKrush + Matt Richards + matt&goldkrush.com +22595 + Modulation Sciences Inc. + Ketan Bhaidasna + ketan&modsci.com +22596 + GCI Wireless + GCI Registrar + registrar&gci.com +22597 + eOrg + Young H. Etheridge + yhe&yhetheridge.org +22598 + MissionMode Solutions, Inc. + Mike Mitchell + mike.mitchell&missionmode.com +22599 + Informatec Comercial e Serviços Ltda. + Rodolfo Xavier + rodolfo.xavier&informatec.com.br +22600 + Harris CapRock Communications (formerly 'CapRock Communications') + Raymond P Chudzinski + rchudzin&harris.com +22601 + Tachometry Corporation + Tom Evans + support&tachometry.com +22602 + TESI Tècnica del So i la Imatge S.L. + Marc Camprodon + marc&tesi.es +22603 + skyblue.eu.com + Raymond B. Edah + registry&skyblue.eu.com +22604 + Rentokil Initial plc + Philip Venton + pventon&rentokil.com +22605 + Winkowski Sp. z o.o. + Adam Domañski + adomanski&winkowski.pl +22606 + Thomas Urban + Thomas Urban + soletan&toxa.de +22607 + ChiliTech Internet Solutions, Inc. + Matt Hoppes + oid&chilitech.net +22608 + Cmed Ltd + Dr Timothy Corbett-Clark + tcorbettclark&cmedltd.com +22609 + CJ Microware and Associates + Christian G Jackson + cgjackson59&comcast.net +22610 + A10 Networks (previously 'Raksha Networks Inc.') + John Chiong + JChiong&a10networks.com +22611 + Tele Lorca + Sébastien CRAMATTE + contact&zeninteractif.com +22612 + Envirocare of Utah, LLC + Irwan Budiman + ibudiman&envirocareutah.com +22613 + City Animal Hospital Ltd. + Marc A. Mapplebeck + mmapplebeck&gmail.com +22614 + AtlanticRebel Consulting + Marc A. Mapplebeck + mmapplebeck&gmail.com +22615 + Luna[e] Project + Alexandre Jousset + Alexandre&Jousset.org +22616 + Companhia de Sistemas - Compsis Consultoria e Comercio de Informatica Ltda + Guilherme Buonfiglio de Castro Monteiro + guilherme.monteiro.domain&gmail.com +22617 + Richland Community College + James Jones + james&richland.edu +22618 + IBM/NY Hospitals + Richard Basch + rbasch&us.ibm.com +22619 + Universidade de Aveiro + Ricardo T. Martins + gc-adm&cic.ua.pt +22620 + Serveapolis + Jean-Fabrice Bobo + iana&bobo-rousselin.com +22621 + stepping stone GmbH + Michael Eichenberger + michael.eichenberger&stepping-stone.ch +22622 + Lightel Technologies Inc. + Stanley Shi + stanley_shi&lighteltech.com +22623 + Excellent Rameur + Eric Thrierr + equipe&excellent-rameur.com +22624 + Source Technologies + Steve Letter + sletter&sourcetech.com +22625 + ProcessClaims + Joe Gomez + joe.gomez&processclaims.com +22626 + COMET SYSTEM, s.r.o. + Jaroslav Dohnal + dohnal&cometsystem.cz +22627 + MIKRO-KOD Ltd. + Andrij Kohan + a_kohan&mail.ru +22628 + Deutsche Angestellten-Krankenkasse + Wolfram Joost + Wolfram.Joost&dak.de +22629 + Plura Europe GmbH + Juergen Loh + jloh&plurainc.com +22630 + Istituto Nazionale per la Ricerca sul Cancro + Francesco Molina + francesco.molina&istge.it +22631 + Fachhochschule Osnabrueck + Thomas Fruend + fruend&fhos.de +22632 + TOKYO BUSINESS SOLUTION CO.,LTD. + Naoto KIHARA + net-admin&biz-sol.co.jp +22633 + Banco Mercantil do Brasil SA + Antonio Domingos de Paiva Lima + antonio.domingos&mercantil.com.br +22634 + Objectif Software + Julien VALIENTE + info&objectif-software.com +22635 + SECIT Secure IT Ltd. + Michael Hoegler + office&secit.at +22636 + bbv AG + Olaf Willuhn + admin&bbvag.de +22637 + Angel's Outpost + Diana M. Stocchi + angelsoutpost01&sbcglobal.net +22638 + Siemens PTD EA + Dr. Goetz Neumann + goetz.neumann&siemens.com +22639 + UNICO Computer Systems Pty Ltd + Greg Boug + sysadmin&unico.com.au +22640 + LuXpert + soonoh jeong + sojeong&luxpert.com +22641 + Microsystèmes S.A. + Francis CEREJA + fcµsystemes.com +22642 + PROXID + Enterprise Number Manager + iana.manager&proxid.net +22643 + Carsales.com.au Ltd + Damien O'Rourke + damieno&carsales.com.au +22644 + Zettai.net LLC + George Donnelly + info&zettai.net +22645 + Lane County + Rhett Karr + rhett.karr&co.lane.or.us +22646 + Americom Government Services, Inc. + Matt Kipe + matthew.kipe&americom-gs.com +22647 + WRO-COM Maciej Jankowski + Michal Listos + tech&wro-com.net +22648 + TENSQUARE gmbh + Hostmaster + hostmaster&tensquare.de +22649 + Astoria Networks Inc. + Eric Wong + ewong&abestrch.com +22650 + ADS Specialists, Inc. + Gary Edwards + gedwards&ads-specialists.com +22651 + Visual Commands + Brian Cavenah + brian&visualcommands.com +22652 + Raptor Networks Technology Inc. + Morteza Rahchamani + mrahchamani&raptor-networks.com +22653 + Stoke Inc + Dzung Vu + dvu&stoke.com +22654 + Kodak Dental Systems + Tory Deron + tory.deron&kodakdental.com +22655 + Number41Media Corporation + Chris Lawder + chris&number41media.com +22656 + Intelicis Corporation + Caroline Lee + caroline.lee&intelicis.com +22657 + Exprit s.r.o. + Oldrich Holy + oldrich.holy&exprit.cz +22658 + Apria Healthcare Group + Michael Long + Mike.Long&apria.com +22659 + HHS International Ltd. + Brian Hehir + brian.hehir&hhsinternational.com +22660 + Wyoming.com + Frostie Sprout + noc&wyoming.com +22661 + Wolfram Schlich, IT Service + Wolfram Schlich + wolfram&schlich.biz +22662 + UEcomm Ltd. + Joel Macatangay + jmacatangay&uecomm.com.au +22663 + Banco do Estado de Sergipe S/A + Alessio de Oliveira Rezende + alessio&banese.com.br +22664 + Soost Software Technology + Dieter Soost + mail&soost-berlin.de +22665 + explicate.org + Ken Pizzini + mib-master&explicate.org +22666 + Hautespot Networks + Tim Harvey + tim&hautespot.net +22667 + phase5 information technology GbR + phase5 Technical Support Staff + iana&entwurfsvergabe.de +22668 + Hawaiian Electric Company + Ellsworth Fujii + ellsworth.fujii&heco.com +22669 + North West University + Pieter Enslin + hostmaster&puk.ac.za +22670 + ControlGuard Ltd. + Tetelman Chen + chent&controlguard.com +22671 + VITRONIC Dr.-Ing. Stein Bildverarbeitungssysteme GmbH + Michael Moser + michael.moser&vitronic.com +22672 + University of Peradeniya + Dircetor/ICSU + noc&pdn.ac.lk +22673 + GTS Telecom SRL + GTS Telecom Tech Dept + tech>stelecom.ro +22674 + Gamersmafia + Juan Alonso + dharana&gamersmafia.com +22675 + Prorange Ltd. + Joerg-Dieter Leinert + joerg-dieter.leinert&prorange.de +22676 + manitu GmbH + Manuel Schmitt + manuel.schmitt&manitu.de +22677 + Finsoft Ltd + Nenad Ristic + nesa&finsoft.com +22678 + Latens Systems Ltd + Joseph Edwards + snmp&latens.co.uk +22679 + Futura NT S.r.l. + Simone Passet + simone.passet&futura-nt.it +22680 + Oxford BioSignals Ltd + Marc Smith + marc.smith&oxford-biosignals.com +22681 + ARD Technology + Andrew R Dale + andy&ardtechnology.com +22682 + Vesuvius + Nicolas Camus + nicolas.camus&fr.vesuvius.com +22683 + SIGMA Chemnitz GmbH + Enrico Scholz + enrico.scholz&sigma-chemnitz.de +22684 + ACS Automotive Communication Services GmbH + Hans-Juergen Schmidt + hans.schmidt&acs.vaps.de +22685 + Fronter AS + Ola Ormset + ola.ormset&fronter.com +22686 + Azbooka Publishers Ltd. + Dmitry O Furmansky + dmitry&azbooka.spb.ru +22687 + Brink's (UK) Limited + AJ Noordende + aj.noordende&brinksinc.com +22688 + jborder.com + Jamie Border + jborder&gmail.com +22689 + Yamagata University + Sumio Okuyama + oid&yz.yamagata-u.ac.jp +22690 + Cleon Solutions s.r.l. + Andrea Cozzolino + a.cozzolino&cleonsolutions.com +22691 + APL Ltd. + Steve Stewart + steve_stewart&apl.com +22692 + Magma Soft + Georg Lehner + support&magma-soft.at +22693 + Alpha Venega Corporation + Anthony A. Baffoe + abaffoe&verizon.net +22694 + Cibicom A/S + Kristian Klinting + krkl&cibicom.dk +22695 + EPCglobal + Ted Osinski + tosinski&epcglobalus.org +22696 + Netadmin System i Sverige AB + Tor Borrhed + tor&netadmin.se +22697 + First Point Global Pty Ltd + Jan Zeilinga + jzeilinga&firstpointglobal.com +22698 + Match Lab, Inc. + Chris Huang + chrish&matchlab.com +22699 + M&T Bank Corporation + Kyle Obear + kobear&mandtbank.com +22700 + LUIS COLORADO SISTEMAS, S.L. + Luis Colorado + lc&luiscoloradosistemas.com +22701 + Hospices - CHUV + Oliver Thalmann + oliver.thalmann&chuv.ch +22702 + SC Stonet Computers SRL + Adrian Belciug + adi&stonet.ro +22703 + Ask IT's Done Ltd + Simon P Smith + iana&askitsdone.co.uk +22704 + University of Kansas Medical Center + Steve Selaya + network_development&kumc.edu +22705 + DIGICAST Inc. + Yeontack Jeong + blueminz&empal.com +22706 + DIGITAL FORECAST Corporation + Yeontack Jeong + blueminz&dfcast.co.kr +22707 + Comtechservice + Vitaliy Savynskyy + vit&cts-renta.ru +22708 + eSYS Informationssysteme GmbH + Rene Mayrhofer + rene.mayrhofer&gibraltar.at +22709 + Appistry + Samuel Charrington + iana&appistry.com +22710 + Lipman Elektronik ve Danismanlik Ltd.Sti + Mehmet Fatih Eyribicak + fatih&lipman.com.tr +22711 + SGS Societe Generale de Surveillance SA + Mr. BRELAZ Stephane + stephane.brelaz&sgs.com +22712 + Wielkopolski Oddzial Wojewodzki NFZ w Poznaniu + Krzysztof Tomaszewski + krzysztof.tomaszewski&nfz-poznan.pl +22713 + Università degli Studi di Cassino + Marco D'Ambrosio + m.dambrosio&unicas.it +22714 + Mobilaris AB + Andreas Sikstrom + andreas.sikstrom&mobilaris.se +22715 + CenterPoint - Connective Software Engineering GmbH + Thomas Themel + thomas.themel&cpointc.com +22716 + Modern World Internet Ltd + Leigh Brown + leigh&modern-world.net +22717 + VRT + Lieven Troch + lieven.troch&vrt.be +22718 + Centre Reseau Communication + Pierre David + Pierre.David&crc.u-strasbg.fr +22719 + Pinellas County Government - IT + Jerry Chapman + jchapman&pinellascounty.org +22720 + Metreos Corporation + J.D. Liau + jdliau&metreos.com +22721 + SIM International, Inc. + Ben Bergen + ben.bergen&sim.org +22722 + KWPark Hardware + Keith Park + parklk&earthlink.net +22723 + Cyjaya Korea + Jay Kim + joon&cyjaya.com +22724 + Store Alcala + Daniel Pérez Arias + daniel&tiendapc.com +22725 + Marko Durkovic + Marko Durkovic + marko&fs.ei.tum.de +22726 + InfiniLogic (Private) Limited + Syed Junaid Rizvi + junaid&infinilogic.com +22727 + Z&Z Services + Jacqueline Fayaud + Jfayaud&cox.net +22728 + V-Office + Andrew Klang + oid&v-office.biz +22729 + Unleash Computers Ltd + Scott Mohekey + smohekey&insightful.co.nz +22730 + TurnTide Incorporated + Tobias DiPasquale + toby&turntide.com +22731 + Data Aire,Inc. + Minh Tran + mtran&dataaire.com +22732 + Brock University + Ronald B. Ogawa + csogawa&brocku.ca +22733 + NorBelle, LLC + Robert Olivier + rolivier&norbelle.com +22734 + synetics gmbh + Andre Woesten + awoesten&synetics.de +22735 + GSC Mobile Solutions + Gerrit Laseur + Gerrit_Laseur&GSCMobileSolutions.com +22736 + Digium + Mark Spencer + markster&digium.com +22737 + SmarTone Mobile Communications Limited + Network Administrator + netinstall&ismart.net +22738 + WiNetworks + Bercovich Sabin + sabinb&winetworks.com +22739 + Network Technology Research Group + Eric A. Hall + ehall&ntrg.com +22740 + S&P Computersysteme GmbH + Christoph Leser + leser&sup-logistik.de +22741 + N-Dimension Solutions Inc. + Vincent Wan + Vincent.Wan&n-dimension.ca +22742 + Acbel Polytech Inc. + Edwin Chiang + edwin_chiang&apitech.com.tw +22743 + Linz AG + Nigl Sascha + s.nigl&linzag.at +22744 + Purple Labs S.A. + Stephane Pointu + stephane.pointu&purplelabs.com +22745 + MathAn Praha, s.r.o. + Jan Dvorak + mathan&mathan.cz +22746 + PRIORITY ELECTRONICS LTD. + BOGDAN ANTONOVICI + bantonovici&priority.mb.ca +22747 + Travelpack + David Norman + postmaster&travelpack.com +22748 + Dade Behring + Matthew McCormack + matthew_mccormack&dadebehring.com +22749 + Celebrity Resorts, Inc. + Tim Howe + tim.howe&celebrityresorts.com +22750 + Teen Living Programs, Inc. + Betty A. Bogg + bbogg&teenliving.org +22751 + HealthlineIS + Ross Linfoot + rlinfoot&healthlineis.com +22752 + PatchLink Corporation + Ammen Harper + Ammen.Harper&PatchLink.com +22753 + Convergent Media Network Ltd. + Jan Van Uytven + wyvern&crm3.com +22754 + Cyntrum Web-Technologies, Inc. + Robert Lee + robert&cyntrum.com +22755 + Terma Software Labs LLC + Atul Awate + aawate&stirlingsystems.com +22756 + Revolution Linux inc. + Jean-Michel Dault + jmdault&revolutionlinux.com +22757 + Azonic Technology Ltd. + Ryan Tseng + ryan.tseng&azonic-tech.com +22758 + zrn.ru + Andrey Maximov + info&zrn.ru +22759 + PROMAX ELECTRONICA,SA + Marc Castro + mcastro&promax.es +22760 + Sphera Corporation Ltd. + Tohar Trabinovitch + tohar&sphera.com +22761 + NAVI Sp. z o.o. + Olaf Fraczyk + olaf&navi.pl +22762 + GROUPE ATLANTIC + GFC ATLANTIC + gfcatlantic&groupe-atlantic.com +22763 + Artis Group GmbH + Michael Büsch + buesch&artis-group.com +22764 + SPIDCOM Technologies SA + Etienne CHEVREAU + etienne.chevreau&spidcom.com +22765 + Augur Systems, Inc. + Chris Janicki + mib&AugurSystems.com +22766 + Hoatech Technologies Co., Ltd. + Peter Chiang + peter&hoatech.com.tw +22767 + TTC InfoAge Inc. + Michael Friedman + michael.friedman&ttcinfoage.com +22768 + Technical University of Crete + Eleftheria Petraki + epetraki&noc.tuc.gr +22769 + mur.at - Verein zur Foerderung von Netzwerkkunst + Jogi Hofmueller + noc&mur.at +22770 + MediaSputnik Ltd. + Viktor Luzin + vl&msp.ru +22771 + BlueFinger Ltd. + John Grange + john.grange&bluefinger.com +22772 + Oesterreichische Nationalbank + Joerg Spatschil + joerg.spatschil&oenb.at +22773 + John Fan + John Fan + fanjun&shaw.ca +22774 + Ensuren Corporation + Stephen Wostal + stephen.wostal&ensuren.com +22775 + Centromeric inc + Sita KrishnaKumar + centromeric&gmail.com +22776 + Covance Cardiac Safety Services + Tim Pollard + Tim.Pollard&covance.com +22777 + Ventura24 S.L. + Fortunato Navarro + fortunato.navarro&ventura24.es +22778 + Reclusive Hermit + Chris Anderson + ldapadmin&reclusivehermit.com +22779 + Carefirst BlueCross BlueShield + Grant Kissel + grant.kissel&carefirst.com +22780 + Powerline Technologies Ltd + David J Brain + david&braind.fsnet.co.uk +22781 + wenzhou telecom + miao min + miaomin&wz.zj.cn +22782 + Bridgette, Inc + Scott Smyth + scotts&cuttedge.com +22783 + Rauscher networX + Oliver Rauscher + Oliver.Rauscher&rnetx.com +22784 + pimp.org.za + Michael-John Turner + mj&turner.org.za +22785 + ICTeam AG + Ralf Heid + rheid&icteam.de +22786 + Lattelekom + Valdis Pukis + Valdis.Pukis&lattelekom.lv +22787 + internetbureau Websight + Eric Bus + iana&websight.nl +22788 + PSInd, LLC + David Picard + dpicard&psind.com +22789 + martyhill.net + Marty Hill + bmartinhill&comcast.net +22790 + S.K.M. Informatik GmbH + Dr. Heiko Voss + hvoss&skm-informatik.com +22791 + China Council for the Promotion of International Trade (CCPIT) + Guohui Xin + xinguohui&ccpit.org +22792 + Voice Technology Ind. and Com. Ltd. + Rodrigo Zenji Hida + rodrigo&voicetechnology.com.br +22793 + Stratum Communications Pty Ltd + Kean Lim + keanl&stratumcomms.com +22794 + Pagaros Pty Limited + Phil Cairns + admin&pagaros.com.au +22795 + Mississippi Valley State University + Edgar Bland, Jr + ebland&mvsu.edu +22796 + LOGSYS Inc. + Steve Yoon + steve&logsys.co.kr +22797 + openconcept gmbh + Stefan Huggenberger + stefan.huggenberger&openconcept.ch +22798 + University of Crete + Kissandrakis George + kissand&ucnet.uoc.gr +22799 + Endo7 GmbH/Srl + Stefan Haberl + contact&endo7.com +22800 + FlexDSL Telecommunications AG + Ruedi Aschwanden + ruedi.aschwanden&flexdsl.ch +22801 + Amedia Networks, Inc. + John Colton + oid&amedia.com +22802 + Telabria Ltd + Richard Strand + support&telabria.com +22803 + Psychosys Software Limited + Duncan Mackay + dm&psychosys.co.uk +22804 + Trivium Technologies Ltd. + Michael Dvoishes + michaeld&3vium.com +22805 + Utex Communications + Soren Telfer + soren&worldcall.net +22806 + Scientica Life Sciences Pvt. Ltd. + Dr. Ashutosh Pradhan + ashutosh&scientica.com +22807 + Andreas Julius + Andreas Julius + Andreas.Julius&t-online.de +22808 + Oesterreichische Aerztekammer + Ralf Fischer + r.fischer&aerztekammer.at +22809 + VLI Communications CO.,LTD. + Robert Zhou + Robert&vlichina.com +22810 + Aktia Savings Bank p.l.c. + Antti Ylänne + aylanne&aktia.fi +22811 + Pfeiffer & May Grosshandel AG + Dr. Peter Borst + peter.borst&pum.de +22812 + Allscripts, LLC + Daniel Venton + daniel.venton&allscripts.com +22813 + Pest Control Data Systems, Inc. + Lawrence "Dee" Holtsclaw + pcds&usit.net +22814 + radiowave limted + alan walters + alan&aillweecave.ie +22815 + Axeda Systems Inc. + Jim Hansen + jhansen&axeda.com +22816 + Rackable Systems, Inc. + Rackable Systems Contact + iana&rackable.com +22817 + BAE SYSTEMS Australia + Brett Hales + brett.hales&baesystems.com +22818 + Naked Dwarf + Nick Loeve + matt&nakeddwarf.com.au +22819 + Chas Philly Steaks and Stix + Charlie Wylam + jabroney&evenlink.com +22820 + DATS Co. Ltd. + Katsuhiko Hirata + hirata&dats.co.jp +22821 + Saab AB + Andreas Lundgren + andreas.lundgren&saabgroup.com +22822 + MobiComp Computação Móvel S.A. + João Paulo Ribeiro + jp&mobicomp.com +22823 + Crodo Technologies Pte Ltd + Stuyvesant Lim + slim&crodo.net +22824 + Verbrugge Terminals BV + Luuk. H.P. van de Vijver + luuk.vandevijver&verbrugge.nl +22825 + Androsoft GmbH + Stefan Mauerhofer + info&androsoft.ch +22826 + DFDS A/S + Jan Kierstein + jkie&dfds.com +22827 + Certagon Corporation + Matt Parks + matt.parks&certagon.com +22828 + Emacolet Networking Services + Peter P. Benac + ppbenac&emacolet.com +22829 + Core Mobility, Inc. + Nathan Wang + nathan&coremobility.com +22830 + NUMATA Kazuya + NUMATA Kazuya + kaz&nu-chon.org +22831 + Warpera Corporation + Sophia Luo + sophial&warpera.com +22832 + POSDATA Co. Ltd. + Keunyol Park + stunner&posdata.co.kr +22833 + TMN TECHNOLOGIES Telecomunicações LTDA + IUMBINO MAGALHAES BROCARDO + magalhaes&tmn.com.br +22834 + Redfish Group Pty Ltd + Justin Clacherty + snmp&redfish-group.com +22835 + Firetide Inc + Naresh Adoni + nadoni&firetide.com +22836 + Tercina Inc. + Tim Spurway + tim.spurway&enboard.com +22837 + Rogers Police Department + Joseph S. Dorn + jdorn&rogersark.org +22838 + A1 Enterprise, Inc + Chris Day + ckdinternet&yahoo.com +22839 + ZQInteractive + Kaity Yu + kaity.yu&i-zq.com +22840 + pbk2 GmbH & Co. KG + Bernhard Krönung + horke&pbk2.de +22841 + external Business Information Services GmbH + Steffen Schoch + info&ebis.info +22842 + Scientific Games International GmbH + Kornic Djordje + sysgrp&scigames.at +22843 + ANWB B.V. + Ruud Zwart + mccmiddleware&anwb.nl +22844 + Scheede + Marc Scheede + mscheede&stud.uni-goettingen.de +22845 + IntelSight + Guillaume Denoix + guillaume.denoix&intelsight.com +22846 + YR20 GROUP INC. + Mike Hinz + mike.hinz&yr20.com +22847 + Vindaloo Communications + Christopher Hilton + chilton&vindaloo.com +22848 + IdéiaLivre + Andreas Hansen + andreash&ideialivre.net +22849 + T&F Computer und Networksystems GmbH + Manfred Seifter + technik&tf-systems.at +22850 + MSB + Mark Bennett + mark.bennett&jewellery.eclipse.co.uk +22851 + Children's Memorial Hospital + Ron Isbell + risbell&childrensmemorial.org +22852 + ENEM B.V. + H. Melgers + hans&enem.nl +22853 + GuangZhou Net Control Tech. Ltd. + Zhong Wenqing + zwq&wangkong.com +22854 + Wojewódzki O¶rodek Informatyki przy Warminsko - Mazurskim Urzêdzie Wojewódzkim w Olsztynie + Tomasz Kamiñski + siomax&uw.olsztyn.pl +22855 + Multisuns Corp. + Yung-Kun Lin + yklin&multisuns.com +22856 + HiStor technologies + Christophe Graulle + cgraulle&histor.fr +22857 + Reflex Security, Inc. + SNMP Admin + snmp&reflexsecurity.com +22858 + financial.com AG + Alexis Eisenhofer + ops&financial.com +22859 + Techaya Inc. + David Stern + david&techaya.com +22860 + The LDAP Company + Phil Smith + asmith&aeinc.com +22861 + MDOUK + Mo Dutta + mo&mdouk.com +22862 + Winvision + Boris van Es + boris.vanes&winvision.nl +22863 + Softwarehaus Hartter + Ewald Hagenauer + ewald.hagenauer&hartter.com +22864 + ALPHA.CH AG + Stephan Hoffmann + stephan.hoffmann&alpha.ch +22865 + University of Basel + Bernd Sindlinger + bernd.sindlinger&unibas.ch +22866 + Vattenfall AB + Simon Zimmermann + simon.zimmermann&vattenfall.com +22867 + G-International Ltd + Paul Ford-Hutchinson + paul.ford-hutchinson&g-international.com +22868 + Ventana Medical Systems, Inc. + Jon Gillies + jongillies&ensynch.com +22869 + Deutsche Gesellschaft für Technische Zusammenarbeit (GTZ) GmbH + Axel Bujak + axel.bujak>z.de +22870 + Inter-Governmental Consultations on Asylum, Refugee and Migration Policies + Pierre GARNIER + iana&igc.ch +22871 + Autocom A/S + Kent B. Hansen + kbh&autocom.dk +22872 + Reinhard Moosauer IT Beratung + Reinhard Moosauer + office&moosauer.de +22873 + Indian Institute of Information Technology + Alok Parlikar + alok&iiita.ac.in +22874 + Syntermed, Inc. + David Cooke + ccooke&syntermed.com +22875 + Fusion Laboratories, Inc + Unix Administrator + unixadmin&fusionlabs.net +22876 + BlueCows Technical Services + Russ Woodman + russ&natcotech.com +22877 + Northern Arkansas Telephone Company, Inc. + Russ Woodman + russ&natcotech.com +22878 + Allen Corporation of America + Bill P. Fanelli + bfanelli&allencorporation.com +22879 + Affine DB Ltd. + James Clark + james&clark.nildram.co.uk +22880 + SonicDuo OSP dep. + Galina Khvan + galina.khvan&sonicduo.com +22881 + Man Investments Ltd + Grant Venner + gvenner&maninvestments.com +22882 + CJSC "UKRAINIAN MOBILE COMMUNICATIONS" + Yaroslav Galchynskyy + noc&umc.com.ua +22883 + OPENLiMiT SignCubes GmbH + Frank Jeschka + f.jeschka&signcubes.com +22884 + Western Gas Resources, Inc. + Felix David + fdavid&westerngas.com +22885 + GIP CPAGE + VIDAL Luc + lvidal&cpage.fr +22886 + Astro Strobel Kommunikationssysteme GmbH + Ralf Schmitz + r.schmitz&astro-kom.de +22887 + PC-Ware Information Technologies AG + Thomas Eichler + thomas.eichler&pc-ware.de +22888 + Interface Business GmbH + Dr. Hellfried Lohse + service&interface-business.de +22889 + Zenprise, Inc. + Curtis Heskett + cheskett&zenprise.com +22890 + Department of Applied Mathematics "U.Dini" + Paolo De Rosa + pdr&inventati.info +22891 + Future Connect (Pvt) Ltd + Asad Manzur + manzur&fc.net.pk +22892 + Jan B. Telepski Trading + Jan Telepski + jan&telepski.de +22893 + Ing. Petr Setka + Petr Setka + news&setka.cz +22894 + Unipower Corporation + Andy Page + andyp&unipower-europe.com +22895 + RF Technologies, Inc. + Wyn Gary + wgary&rft.com +22896 + Govern de les Illes Balears + GABRIEL BUADES RUBIO + bbuades&dgtic.caib.es +22897 + EPRCI + Dathan Tyler Cade + oid&eprci.net +22898 + Sericon Technology Inc. + Sander A. Smith + sas&sericontech.com +22899 + Etherstuff + Mike Hagans + mhagansðerstuff.com +22900 + Statewide Software & Systems + Joshua Harding + josh&statewidesoftware.com +22901 + Packet411 Corporation + Bill Quinn + packet411&gmail.com +22902 + Discovery Communications Inc. + Dave Duvall + dave_duvall&discovery.com +22903 + Preferred Communications, Inc. + Hitesh Patel + hitesh&presys.com +22904 + R. L. Polk & Co. + Michael Isiminger + mick_isiminger&polk.com +22905 + OAG Worldwide Ltd + Paul Powenski + ppowenski&oag.com +22906 + Medical Soft Instruments SLL + Arturo Bustos + info&medicalmsi.com +22907 + Riege Software International GmbH + Mr. Holtkamp + rsiadmin&riege.de +22908 + Magnus Informatik A/S + Peter Dahl Vestergaard + peterdv&magnus.dk +22909 + T-VIPS AS + B. Tommy Jensen + btj&t-vips.com +22910 + MaeSoft Group + Dan Allen + dan¿.net +22911 + EPSILON Software Assistance SA + Niggi Stirnimann + niggi.stirnimann&epsilon.ch +22912 + Comrad Medical Systems Limited + John Reynolds + johnr&comrad.co.nz +22913 + TJM Products Pty Ltd + Mark Poyser + infosys&tjm.com.au +22914 + Sonim Technologies + Prasanna Sathyanarayana + prasanna&sonimtech.com +22915 + IFX Corp + Julio Cesar Pinto + jc&ifxcorp.com +22916 + Pyramide Technologies Corp + Ole Ersoy + ole_ersoy&yahoo.com +22917 + UHS Systems Pty Ltd + Daniel Wu + daniel.wu&uhssystems.com +22918 + TeDenium Indústria Eletroeletrônica Ltda. + Eros Augusto De Brito + eros.augusto&tedenium.com +22919 + Herit Corporation + Yisook Nam + yisnam&herit.net +22920 + Windows Consulting Group, Inc. + William Boswell + bboswell&winconsultants.com +22921 + ATI Technologies Incorporated + David Raftus + david.raftus&ati.com +22922 + postlab + Michael Krämer + iana&postlab.de +22923 + moll-illner + Stefan Illner + stillner&moll-illner.org +22924 + wegewerk GmbH + Juri Maier + info&wegewerk.com +22925 + LAB-EL Elektronika Laboratoryjna + Michal Konieczny + mk&label.pl +22926 + Heinrig Impex SRL + Andrei Duhnea + a.duhnea&heinrig.ro +22927 + Aqsacom SA + Jean-Francois ZELL + jeanfrancois.zell&aqsacom.com +22928 + Seokyo Telecommunication Co., Ltd. + Mr. Kil Hwan Jang + business&seokyo.com +22929 + Schuemperlin Engineering AG + Hermann Schuemperlin + wavemail&gmx.net +22930 + SPI Dynamics, Inc. + Jimmy Pang + jpang&spidynamics.com +22931 + Parlano Inc. + Jason Bubolz + jason.bubolz&parlano.com +22932 + Nvia2 Mensajes Interactivos, C.A. + Victor Lamas Garcia + Internet&nvia2.com +22933 + itaas, Inc. + Brian Wyatt + snmpadmin&itaas.com +22934 + Rudolf Leiner GmbH + Sebastian Frei + iana&leiner.at +22935 + Beijing Univ. of Aeronautics & Astrnautics + HUANG, Tao + nic&buaa.edu.cn +22936 + Tonbrand Software + Ton Brand + info&tonbrand.nl +22937 + Message Mobile GmbH + Michael Jürgens + admin&mm-manager.de +22938 + NOS Technology AS + Cato Wæhle + Cato.Wahle&nos.no +22939 + WebInterstate Inc. + Brad Nelson + brad.nelson&webinterstate.com +22940 + Arrowspan Inc. + Tom Tang + tomtang&arrowspan.com +22941 + PDX, Inc. + Ken Hill + orders&pdxinc.com +22942 + O2 Micro + Alan Yang + alan.yang&o2micro.com +22943 + CVC Online + Josh Vogelgesang + joshv&cvconline.com +22944 + Winuel SA + Jan Wiktorowicz + jan.wiktorowicz&winuel.com.pl +22945 + KaiserAir + Neil Stevens + neil&eonweb.com +22946 + Chordcom + Mike Stallone + stallone&chordcom.com +22947 + IT-INFORMATIK + Achim Simon + achim.simon&it-informatik.de +22948 + InSerTO + SNMP + special&inserto.fr +22949 + EMAP Plc + Jeremy Bull + Jeremy.Bull&emap.com +22950 + OEG Australia + Mike O'Connor + mike&oeg.com.au +22951 + Lucid Security Corporation + Vik Phatak + vphatak&lucidsecurity.com +22952 + On2 Technologies + Daniel Campbell + iana&on2.com +22953 + EOSLINK CO., LTD + DoYeong Cho + dkoh&eoslink.com +22954 + UNIVERSAL MICROELECTRONICS CO., LTD. + Eddy Kao + eddy-kao&umec-tpe.com.tw +22955 + Azent AB + Thomas Sparr + thomas.sparr&azent.se +22956 + News International Supply Company Ltd. + Benedicte Gercke + chris.wilde&newsint.co.uk +22957 + ZEN Sistemi Srl + Andrea Baldi + abaldi&zensistemi.com +22958 + Weston Digital Technologies Ltd. + Phil Richards + phil.richards&weston.co.uk +22959 + Tony Chachere's Creole Foods of Opelousas Inc + Eric G Ortego + eric&tonychachere.com +22960 + Sozotek, Inc. + John Fogarty + jfogarty&sozotek.com +22961 + SK Telesys + Youngjin Kim + fpassion&sktelesys.com +22962 + matrix.mx + Martin Tomanec + martin+iana&myspot.at +22963 + Vale Technology LTD + Edward Moloney + emoloney&moloneyodeaassociates.co.uk +22964 + Acceris Communications + Scott Hanna + scott.hanna&acceris.com +22965 + Sundance Digital, Inc. + Brij Singh + bsingh&sundig.com +22966 + COSA GmbH + Andreas Ley + iana&cosa.de +22967 + ZR InfoTech + Roland Xinlei Wang + rolandwang&zrinfo.net +22968 + ComBrio, Inc + Dave Boulos + dboulos&combrio.com +22969 + H+BEDV Datentechnik GmbH + Klaus Schleicher + kschleicher&antivir.de +22970 + Dental-On-Line SARL + Cyril Elkaim + celkaim&dental-on-line.fr +22971 + A.P. Moller - Maersk A/S + Harvey Khela + harvinder.khela&maersk.com +22972 + Wilibox + Kestutis Barkauskas + iana&wilibox.com +22973 + Greenfield Networks, Inc. + Wes Perdue + wes&greenfieldnetworks.com +22974 + Elgi Equipments Ltd + Biju Gopinath + admin&elgi.com +22975 + The NGIM Project + Sami Tolvanen + iana&ngim.org +22976 + AirInfinite, Inc. + Ryan W. Kasten + rwkasten&airinfinite.com +22977 + MedImpact Healthcare Systems + Sheila Soulia + sheila.soulia&medimpact.com +22978 + GridConnect, Inc + Edward Landell + edl&gridconnect.com +22979 + Bridgestream Inc. + Jeff Shukis + jshukis&bridgestream.com +22980 + Confio Software + Lynn Stubbs + lynnstubbs&confio.com +22981 + BPB plc + Mr Jason R Tritton + jason.tritton&bpb.com +22982 + Tectona SoftSolutions Pvt. Ltd. + Rajesh Tripathy + rajesh_tripathy&tectonas.com +22983 + SSTIC + Alain Pothin + alain.pothin&bourbon-distrib.com +22984 + ALS&TEC Ltd + Ildar Salimov + gsalimova&hotmail.com +22985 + Derdack GmbH + Ronald Czachara + czachara&derdack.com +22986 + Eurofighter Jagdflugzeug GmbH + Greg Coulthard + greg.coulthard&eurofighter.com +22987 + EVONET Belgium NV + Jos De Graeve + admin&evonet.be +22988 + University of Kentucky + Herman Collins + herman&uky.edu +22989 + Essex Property Trust, Inc + Matthew Clark + mclark&essexpropertytrust.com +22990 + Baltimore County Savings Bank, FSB + Matthew Loraditch + mloraditch&bcsb.net +22991 + Netalfa Ltd. + Attila Bognar + iana&netalfa.com +22992 + WIT, Inc. + Chris Clark + oid.admin&witinc.net +22993 + Belarc, Inc. + Dick DeFuria + enterprise-number&belarc.com +22994 + SiliconWorks Corporation + Jay Vicory + vicorjh&silicon-works.net +22995 + Axley.net + Jason Axley + core-iana-2879&axley.net +22996 + City of Peabody + Frank Nguyen + frank.nguyen&peabody-ma.gov +22997 + Ab Initio Software Corporation + Daniel Flagg + dflagg&abinitio.com +22998 + Allstream Inc + Hubert Kowalczyk + Hubert.Kowalczyk&allstream.com +22999 + FURUNO SYSTEMS Co.,Ltd. + MAKOTO FUJII + fujii&furunosystems.co.jp +23000 + Nexis s.r.l. + Cesare Fontana + cesare.fontana&nexisonline.it +23001 + Corporación Supermercados Unidos S.A + José García + jagarcia&csu.co.cr +23002 + Consentry Networks + Ravi Mulam + ravi&consentry.com +23003 + HyperWerk FHBB + Lukas Meyer + l.meyer&hyperwerk.ch +23004 + C&C Solution Technology + Diego Deboni Rossetto + diegodr&creapr.org.br +23005 + Fluidsignal Group S.A. + Fluidsignal Group Engineering Department + info&fluidsignal.com +23006 + e-Mont d.o.o. + Aleksandar Milosevic + aca&pexim.co.yu +23007 + Imatic + Jan Pekar + iana&imatic.cz +23008 + WebPaws.com + Ryne Allen + ryne&magickalwinds.com +23009 + Asamnet e. V. + Thomas Krieger + iana&tom-krieger.de +23010 + Maxima Communications + Anton Martchukov + antonm&mxc.ru +23011 + OCLC PICA + Janifer Gatenby + j.gatenby&oclcpica.org +23012 + Netprovider S.A. + Juan Marti + jmarti&netprovider.cl +23013 + Salem-Keizer Public Schools + Andy Miller + miller_andy&salkeiz.k12.or.us +23014 + Genie-uk + Thomas Haslett + tom&ttc.uk.net +23015 + NewBay Software Ltd. + Keith Brady + kbrady&newbay.com +23016 + Metaphor Studio, LLC + Ran Mullins + ran&metaphorstudio.com +23017 + Ideal Technologies Inc. + LI MA + LIMA&IDEALTECHS.COM +23018 + Matrix Networks China + Yuran Xie + ryuken2050&yahoo.com +23019 + Pentacomp Systemy Informatyczne Sp. z o.o. + Pawel Jagiello + p.jagiello&pentacomp.pl +23020 + FGAN FKIE/KOM + Michael Bussmann + bus&fgan.de +23021 + Acstre Support Ltd. + Mosko Aladjem + maa&tu-sofia.bg +23022 + Net Optics Inc. + Shelton Liu + shelton&netoptics.com +23023 + THX Ltd. + Amir Fuhrmann + afuhrmann&thx.com +23024 + PT. Dinamika Mandiri + Budi Ang + bang&dm.co.id +23025 + meSSSoft + Pierpaolo Messaggio + ppmessaggio&usa.net +23026 + SuedLeasing GmbH + Marc Brenkmann + marc.brenkmann&suedleasing.de +23027 + Area Electronic System + Chun Shek + chuns&areasys.com +23028 + Sunrise Technology Co. Ltd. + Stephen Wan + wanhairong&hotmail.com +23029 + SQI, Inc. + Steve Tindle + steve&sqi-ops.com +23030 + Shing + Wai Shing Lee + wai2k&eircom.net +23031 + NSP Ltd. + Tom Hibbert + tom&nsp.co.nz +23032 + MailFrontier, Inc. + Scott Eikenberry + sde&mailfrontier.com +23033 + Leivio Technologies, Inc. + Hideyuki Mizusawa + hideyuki&leivio.com +23034 + SETIB + Yannick BOULARD + yannick.boulard&setib.fr +23035 + Labware, s.a. + Eduardo Alsina + ealsina&labware.es +23036 + Isomorphic System Research Inc. + Myron B Cheung + onyx.peridot&gmail.com +23037 + CMT Systems Inc. + Robert Schuett + schuett&cmt.net +23038 + Emphasys Software + Seth Stankowski + sstankowski&emphasysworld.com +23039 + Ambient Corporation + David Goldblatt + dgoldblatt&ambientcorp.com +23040 + Exobit Networks, Inc. + Robert Hinst + rob&exobitnetworks.com +23041 + CQ Inc. + Geoffrey Wu + geoffrey.wu&cqinc.com.tw +23042 + The Real Estate Company, Inc. + Jacob Elder + jake&trec.us +23043 + CastIS Corp. + June, Chung + jungaria&castis.com +23044 + SmallBizConcepts BV + John de Graaff + iana_registration_j&smallbizconcepts.nl +23045 + Cassee.net + Joost Cassee + joost&cassee.net +23046 + MTD Systems + Mike Griffin + mike.griffin&mtdsystems.com +23047 + Xinke (China) Information System Ltd + Zhang Han Xi + zhanghx&xkit.net +23048 + Sabik OY + Bjarne Ross Pedersen + grund&info.dk +23049 + Emsys n.v. + Francis Theys + ftheys&emsys.be +23050 + JSC Department of System Research + Alexey J Suvorov + as1999&mail.ru +23051 + SC TeSIS Logic SRL + Petru Bordeianu + petru&tesis.ro +23052 + APPI Tecnologia S.A. + Eliezio Oliveira + ebo&appi.com.br +23053 + DOSHED Corp. + Ke Ding + bigfaucet&yahoo.com.cn +23054 + Cyris Networks + Igor Ryshakov + igoruha&yahoo.com +23055 + H Zero Seven + Christian Volmering + risk&h07.org +23056 + Riedel Communications GmbH + Ruben Diez + diez.ruben&riedel.net +23057 + ISD Holland BV + Chris Holleman + chris&isd-holland.nl +23058 + Expertron Group (Pty) Ltd + Justin Schoeman + justin&expertron.co.za +23059 + NetFuel, Inc. + Dave Carnal + dave&netfuel.com +23060 + Codebench, Inc. + Robert Fontana + bob.fontana&codebench.com +23061 + VirtualBridges Communications Corp + Ross Murray + rmurray&virtualbridges.com +23062 + Huize-wel.nl + Kick van der Wel + kick&huize-wel.nl +23063 + Dominion Lasercom, Inc + Mark Doucet + mark.doucet&dominionlaser.com +23064 + Kronback ApS + Bo Kronback + bk&kronback.com +23065 + TWINCLING Society + Saifi Khan + manager&twincling.org +23066 + Telmap LTD. + Yacov Habusha + yakov.habusha&telmap.com +23067 + Nanomatic Ltd. + Nedeljko Miljevic + nmiljevic&nanomatic.net +23068 + AngelGroup + Mikhail Pokidko + Mikhail.Pokidko&gmail.com +23069 + Sodexho + Kevin Beamer + Kevin.Beamer&SodexhoUSA.com +23070 + Hubris Communications Inc + Chris Owen + owenc&hubris.net +23071 + Conformiq Software Ltd. + Otto Jansson + it&conformiq.com +23072 + Australian Internet Company Pty Ltd + Lawrence Ong + oid-admin&aic.net.au +23073 + WiMetrics Corporation + Jim Flanagan + jimf&wimetrics.com +23074 + Security Conscious Inc. + Phillip Villella + phil.villella&security-conscious.com +23075 + Novatel Wireless, Inc. + Cuong Pham + cpham&nvtl.com +23076 + SP + Kevin Levie + systeembeheer&sp.nl +23077 + Keppler IT GmbH + Klaus Keppler + kk&keppler-it.de +23078 + Administration des Douanes et Impôts Indirects + HOUARI CHIHI MOHAMED + m.houari&douane.gov.ma +23079 + Kiles Consulting + Richard Kiles + richard.kiles&gmail.com +23080 + New York State Directory Services + Andrew Hagadorn + andrew.hagadorn&oft.state.ny.us +23081 + Opus One, Inc. + Joel M. Snyder + Joel.Snyder&Opus1.COM +23082 + Riedell MotorSports + Charles Riedell + cwriedell&cox.net +23083 + University of Bridgeport + Matanya Elchanani + sysadmin&bridgeport.edu +23084 + Automated Labs + Jeffrey Hulten + jhulten&gmail.com +23085 + QQ Technology, INC. + Zhixin Mu + zxmu&qqtechnology.com +23086 + Major & Minor Exims Pvt Ltd + Amol Hatwar + amol.hatwar&majornminor.com +23087 + Ross Sampson Consulting + Ross Sampson + ross&rosssampson.com +23088 + Applied Watch Technologies, LLC + Eric Maheo + eric.maheo&appliedwatch.com +23089 + Nanjing Gentech System CO., Ltd. + Alan Cui + tsuican&hotmail.com +23090 + ncfritz.net + Neil Fritz + neil.fritz&asu.edu +23091 + Nilson Group AB + Thomas Karlsson + tkn&seaside.se +23092 + gaztec services + gaz aldridge + gaz&gaztec.co.uk +23093 + ANAGRAN + Grisha Kotlyar + grisha&anagran.com +23094 + Kuehne + Nagel (AG&Co.) KG + Nils Ketelsen + nils.ketelsen&kuehne-nagel.com +23095 + ClarioNet, s.r.o. + Kamil Vratny + vratny&clarionet.cz +23096 + jminet.com + Maki Shioya + maki&jminet.com +23097 + Municipalidad de Rosario + Martin P. Degrati + mdegrati&rosario.gov.ar +23098 + ServicePilot Technologies + Bertrand MAHE + bmahe&servicepilot.com +23099 + NeTAMS + Anton Vinokurov + anton&netams.com +23100 + H&T Greenline GmbH + Jürgen Tabert + jtabert&htgreenline.de +23101 + University Hygienic Laboratory + Frank Delin + fdelin&uhl.uiowa.edu +23102 + San Mateo County Public Safety Communications + Robert Bustichi + rsb&smc911dispatch.org +23103 + SRP + Robert R. Manthey + rrmanthe&srpnet.com +23104 + Raven Systems Design, Inc. + Matthew Polak + polak&raven-systems.com +23105 + Action Systems, Inc. + Mischa Rihm + mrihm&action-systems.com +23106 + RedPhone Security, Inc. + Mark Brown + admin&redphonesecurity.com +23107 + Women's Center for Radiology + Michael Brown + mbrown&womenscenterforradiology.com +23108 + RP Online Verlagsgesellschaft mbH + Thomas Pundt + thomas.pundt&rp-online.de +23109 + Netline Internet Service GmbH + Martin Kauss + martin.kauss&netline-is.de +23110 + UNYICO MIEE + Vadim Zakharikov + info&omis-miet.ru +23111 + Spun Pty Ltd + Andrew Agnew + iana_entnbr.admin&spun.net.au +23112 + University of Turku + Eino Tuominen + eino&utu.fi +23113 + LXPT + J. Gomes + iana&lxpt.net +23114 + Comstar Ltd. + Victor Kiranov + vk&comstar.ru +23115 + OMP (Observatoire Midi-Pyrenees) + Yann ROBERT + yann.robert&obs-mip.fr +23116 + iVOD Inc. + Bo Wang + boris.b.wang&gmail.com +23117 + New Zealand Ministry of Health + Craig McGeachie + craig_mcgeachie&moh.govt.nz +23118 + Telenet Systems Pvt. Ltd. + Mrs. Nita Mehta + nita&telenetsystems.com +23119 + Shevchenko Didkovskiy & Partners + Alex Samorukov + samorukov&shevdid.com +23120 + BFE Studio and Media Systems GmbH + M.Kühnapfel + mkuehnapfel&bfe-systemhaus.de +23121 + Saint-Petersburg Certification Authority + Vitaly Gryzunov + viv&nwudc.ru +23122 + Swisscom Ltd. - formely Bluewin AG + Guido Roeskens + scs.reg.iana&gmail.com +23123 + gedas deutschland GmbH + Manfred Gruner + Manfred.Gruner&gedas.de +23124 + Dickey Rural Telephone Cooperative + Brian Johnson + bjohnson&drtel.com +23125 + Spring Lake Consulting, LLC + Doug Hornyak + dhornyak&springsolve.com +23126 + BaneTele AS + Per Magne Olsen + per.magne.olsen&banetele.com +23127 + GINA-COMPUTING GmbH + Ginther Andreas + andreas.ginther&the-ginthers.net +23128 + NitroSecurity, Inc. + Paul Whittington + pwhittington&nitrosecurity.com +23129 + Macrovision Corporation + Brent K. Rolland + brolland¯ovision.com +23130 + CPN International Inc. + David Harris + root&cellbucks.com +23131 + Mithi Software Technologies private Limited + Sunil Uttam + sunil&mithi.com +23132 + Canadian Net + Dave Foster + dave&ionsys.com +23133 + IzhInformProject + Vadim V. Mayshev + mvv&infotrust.ru +23134 + Skillsunited.co.uk + Shaun Wakefield + shoobster&gmail.com +23135 + Articon Integralis AG + Patrick Schraut + patrick.schraut&integralis.de +23136 + Warweus S.r.l + Luigi Belli + gigi&warweus.com +23137 + Altinity Limited + James Peel + info&altinity.com +23138 + Reutlingen University + Jürgen Brenner + Juergen.Brenner&Reutlingen-University.DE +23139 + Software Eginerring Associated Consultants, Inc + Susan Jung + ssj&seac.bc.ca +23140 + Cybernetik.net + Kristofer Pettijohn + krishoppa&cybernetik.net +23141 + HOCHIKI CO.,LTD + Masahiko Nemoto + mnemoto&hochiki.co.jp +23142 + Akademia Swietokrzyska + Artur M. Piwko + artur.piwko&pu.kielce.pl +23143 + Peterson Packaging Oy + Marja Terenius + marja.terenius&petersonpackaging.fi +23144 + Defenxis Sdn Bhd + Syahrul Sazli Shaharir + sazli&defenxis.com +23145 + ExtendMedia Inc. + Bruce Martins + bmartins&extend.com +23146 + HOYA + Cyril GIRONDE + cyril.gironde&hoya.fr +23147 + VIMESA + Antonio Fabregues + gestion&vimesa.es +23148 + Dr. Peter Koch EDV Dienstleistungen + Dr. Peter Koch + iana.pkoch&dfgh.net +23149 + Typodata + Martin Kaeser + m.kaeser&typopharma.com +23150 + MoCoTec Mobile Communication Technologies + Alexander Klupsch + a.klupsch&mocotec.de +23151 + SeeTec Communications GmbH & Co. KG + Christoph Weser + christoph.weser&seetec.de +23152 + HVB Leasing GmbH + Martin Zachewicz + martin.zachewicz&hvbleasing.de +23153 + Cloanto Corporation + Takeo Sato + ts-iana&cloanto.net +23154 + AIT Austrian Institute of Technology GmbH + Andreas Zoufal + andreas.zoufal&ait.ac.at +23155 + SOFTWISE DEVELOPMENT + Dan Iulian Trutia + office&softwise.ro +23156 + Vitec Group Communications Limited + Engineering + vgc.uk&vitecgroup.com +23157 + Theobit GmbH + Lothar Feige + Lothar.Feige&Cluster-Labs.com +23158 + CCIS sprl + Jacques Petit + japetit&ccis.be +23159 + eventIS Software Solutions B.V. + Harry Koiter + harry.koiter&eventis.nl +23160 + Geneous Software AG + Sorin Costea + sorin.costea&geneous.com +23161 + Aran Technologies + Jim Donnelly + JDonnelly&arantech.com +23162 + Ecole Supérieure d'Informatique + Pierre BETTENS + pbettens&heb.be +23163 + Defensor del Pueblo + Eduardo Cunha Rodríguez + ecunha&soluziona.com +23164 + Revieworld Ltd + Ben Griffiths + ben&revieworld.com +23165 + SAFE Identity + Kyle Neuman + kyle.neuman&makeidentitysafe.com +23166 + Automatyka + Marek Hajduk + hajmar&interia.pl +23167 + Operation Mobilisation + Matt Phillips + mattp+iana&ict.om.org +23168 + Orchard View Community Network + Alan Peckham + 20050420iana&orchardview.net +23169 + Sigma Systems Canada Inc. + Brian Cappellani + Brian.Cappellani&sigma-systems.com +23170 + Verizon Card Operations + Mark Stallcup + mstallcup&verizongni.com +23171 + Servergraph + Lindsay Morris + lmorris&servergraph.com +23172 + Teletronics Technology Corp + John Roach + jroach&ttcdas.com +23173 + SubZeroNet + Klaus Alexander Seistrup + klaus&seistrup.dk +23174 + WildBlue Communications, Inc. + Aakash Sahai + asahai&wildbluecorp.com +23175 + WorkCompCentral.com, Inc. + David DePaolo, Esq., + david&workcompcentral.com +23176 + SpeedPartner GmbH + Michael Metz + hostmaster&speedpartner.de +23177 + Cirond Corporation + Tim Thompson + tthompson&cirond.com +23178 + Helix Ltd. + Pavel Yablokov + apple&tick.ru +23179 + Bilfrost Incorporated, Inc. + Jonas Gyllensvaan + jonas&bilfrost.com +23180 + TELEVES S.A. + Modesto Gomez + modgom&televes.com +23181 + Weed Instrument Company Inc. + Tarrance Graham + tgraham&weedinstrument.com +23182 + Sussman Automotive + Todd Gerbert + tgerbert&sussmanauto.com +23183 + Amnis Limited Company + Yoshihiro Kawabe + sowhat&amnis.co.jp +23184 + Applied Microsystems, Inc. + Ross Toole + ross&amicro.biz +23185 + Isensix, Inc. + David Mayer + dmayerx&isensix.com +23186 + 4A Solutions Ltd. + Andrew Holt + andrew.holt&4asolutions.co.uk +23187 + Rene Thomas Folse Inc. + Rene Thomas Folse + rene.folse&maxi.net +23188 + Maxi Net Services + Rene Thomas Folse + rene.folse&maxi.net +23189 + NETPLEX LLC + John Donagher + johnd&netplex.net +23190 + Interland, Inc + Scott Cudney + rcudney&interland.com +23191 + Nvia Gestion de Datos S.L + Victor Lamas Sanchez + victor&lagalaxia.net +23192 + Norlight Telecommunications + Richard Swagel + isinfo&norlight.com +23193 + M-Vision + Martin Miller + martin&m-vision.com +23194 + Northwest University + Nathan Henderson + network&northwestu.edu +23195 + Afar Communications Inc. + Lars Poulsen + lpoulsen&afar.net +23196 + DeveTel S.A. + Ricardo García Márquez + rgarcia&devetel.cl +23197 + HAN InfoComm + Choi, Yoo jun + roser&shinbiro.com +23198 + One Cow Standing + Nickie Buckner + nbuckner&nc.rr.com +23199 + MCGV Stack + Dean Strik + dean&stack.nl +23200 + NetFocus Technologies, Inc. + Welson Lin + Welson.Lin168&msa.hinet.net +23201 + ACI Communications, Inc. + Donald C Dove + ddove&acicomms.com +23202 + zinfo.us + Qin Zhou + zcleavon&yahoo.com +23203 + tasmanstudios Ltd. + Neil Bertram + neil&tasmanstudios.co.nz +23204 + Stratacache, Inc. + Li Chou + lchou&stratacache.com +23205 + Alcormizar Inc. + Marc-Andre Gosselin + mag&alcormizar.com +23206 + Clinical DataFax Systems Inc. + Martin Renters + martin&datafax.com +23207 + BIA B.V. + Frans van Dorsselaer + frans&biabv.com +23208 + Rhodanie Systemes et Reseaux Sarl + Ion Marculescu + ionm&bluewin.ch +23209 + Union Compagnonnique + Stephane GERBAUD + stephane.gerbaud&laposte.net +23210 + Pyramid Robotics + Mr. Botto + leeism&earthlink.net +23211 + Queue Global Information Systems Corp. + David Eppert + davide&thinksecurity.net +23212 + Cuttriss Consulting + Paul Cuttriss + paul&cuttriss.com +23213 + Wilico Wireless Networking Solutions SA + Josep Cedo + josep.cedo&futurlink.com +23214 + j2anywhere.com + Alexander Hartner + support&j2anywhere.com +23215 + Netensia + S. Namèche + sebastien.nameche&netensia.fr +23216 + Bawue.Net e.V. + Andreas Thienemann + andreas&bawue.net +23217 + Chambre de Commerce et d'Industrie de Brest + PIerre-Yves NICOLAS + admin&cci-brest.fr +23218 + Betanetworks Ltd.,ShangHai, China + huangzhiming + hzm&betanetworks.com.cn +23219 + DriveTime Automotive Group, Inc. + Van Patterson + 5000&drivetime.com +23220 + ICO Ltd. + JOO WON LEE + jwl&ico.co.kr +23221 + ROCSYS Technologies Pvt.Ltd. + Priyanka + priyankaji&rocsys.com +23222 + ESI SOFTWARE + Rob Raphael + rraphael&esisoft.us +23223 + StartCom Ltd. + Eddy Nigg + eddy_nigg&startcom.org +23224 + Sutton Group Realty Services Ltd. + Benson Wong + benwong&sutton.com +23225 + Maxitel S/A + Joao Artur Gramacho + jgramacho&timmaxitel.com.br +23226 + Dolphin Software + Scott Horn + shorn&dolphinmsds.com +23227 + Kayak Interactive + Andy M + andym&kayakinteractive.com +23228 + City of Las Vegas Nevada + Erin Wells + dnsadmin&lasvegasnevada.gov +23229 + oneK Internet Solutions Inc. + Andrew Kopp + oid&1k.ca +23230 + Captara Corporation + Patrick Mebine + pmebine&captara.com +23231 + tekVizion PVS, Inc + Dan Tifrea + dtifrea&tekvizion.com +23232 + TrackAbout, Inc. + Larry Silverman + lsilverman&trackabout.com +23233 + Webdoeds + Guido Stulemeijer + g.stulemeijer&webdoeds.nl +23234 + HOPS International Inc + John Moses + jmoses&hops.com +23235 + DTI2 + Jorge Boncompte + jorge&dti2.net +23236 + Escher Group Ltd + Tim Grady + Tim.Grady&EscherGroup.com +23237 + Softier Inc. + Gery Kahn + gxk&softier.com +23238 + FCI Broadband Communications Inc. + Raymond Wu + rwu&fcibroadband.com +23239 + Aquest Systems, Incorporated + Brian Wehrung + bwehrung&aquestsystems.com +23240 + the Centers + Palmer Sample + mis&thecenters.us +23241 + Protection and Guard Service + Burlacu Liviu + burlacu.liviu&spp.ro +23242 + SDRC Inc. + Sanh Trinh + sanh.trinh&sdrcinc.net +23243 + Algorab SRL + Cristiano Tonezzer + tonezzer&algorab.com +23244 + Improbable Universe + Sean Ostermann + osty&mts.net +23245 + Westdeutsche ImmobilienBank + Mr. Koehler, Andreas + andreas.koehler&westimmobank.com +23246 + Harmonia Inc. + Aaron Reffett + areffett&harmonia.com +23247 + Wm-Data PARERE + Martin Pettersson + mrpte&wmdata.se +23248 + TRE-CE + Alexandre Oliveira + lionbatata&nikotel.com +23249 + National Center for Biotechnology Information (NCBI) + Charles L. Montour + montour&ncbi.nlm.nih.gov +23250 + fgn GmbH + Joerg Mayer + mayer&fg-networking.de +23251 + Servicios Corporativos Gefe S.A. de C.V. + Titos Dragonas + tdragonas&grupogaba.com.mx +23252 + MYDOM + Schaefer, Dirk Alexander + info&mydom.ath.cx +23253 + Premier Image Corporation + Russell Mangel + russell&tymer.net +23254 + Leebel Services + Jonathan Barnes + jonathan&leebel.com.au +23255 + 1D Solutions + Evan Shelton + evan&1dsolutions.com +23256 + Wit-Sys Consulting Corporation + Gabor PARDI-KIS + gabor.pardi-kis&wit-sys.hu +23257 + OSIsoft, Inc. + Eric Tam + etam&osisoft.com +23258 + SdO Information Technology s.r.l. + Ruggero Bonghi + ruggero.bonghi&sdo.it +23259 + Stadt Frankfurt am Main + Jens Langsdorf + jens.langsdorf&stadt-frankfurt.de +23260 + Paradial AS + Håkon Zahl + hakon¶dial.com +23261 + Structured Information Management (SIM) Project + MERIGHI Marcus + SIM&Tor.AT +23262 + Obiect Soft SRL + Catalin Lichi + catalin&obs.ro +23263 + EXCELIANCE + Christophe Pouillet + cpouillet&exceliance.fr +23264 + Unicon Solutions NV + Marco Spoel + m.spoel&unicon-solutions.com +23265 + Hedgehog Computer Services + Peter Clark + peter-clark&bethel.edu +23266 + Frank Lowe Rubber & Gasket Co., Inc. + Gerry Zahn + gerryzahn&franklowe.com +23267 + Certic·mara S.A. + Leonardo Maldonado + admin&certicamara.com +23268 + Sunwayworld Infomation Technology Inc. + Long Zhong + squall_zhong&msn.com +23269 + NeoNova Network Services Inc. + Brian Exelbierd + bex&neonova.net +23270 + AW Comp + Alexander N. Wittig + awi&aw-comp.de +23271 + Protium Technologies, Inc. + Richard Gawlik + adminiana&protiumtechnologies.com +23272 + Runcom Technologies Ltd. + Ofer Zimmerman + shaiko&gmail.com +23273 + Eletech S.r.l. + Giuseppe Malcangio + g.malcangio&eletech.it +23274 + Mindsphere AS + Helge Andersen + helge&mindsphere.no +23275 + Pro Help + David Vella + dvella&hotmail.com +23276 + Cooperativa dos Agricultores da Regiao de Orlandia + Marcos Paulo Serafim + mpserafim&carol.com.br +23277 + ClusterVision BV + Martijn de Vries + martijn&clustervision.com +23278 + Computer Royalties + Russell Mangel + russell&tymer.net +23279 + Picis, Inc. + Greg Cooper + greg_cooper&picis.com +23280 + DigitalFreaks.org + Chad Ziccardi + cz&digitalfreaks.org +23281 + Liaoning Mobile Communications CO.LTD + Bobby Li + helpmeboy&yahoo.com.cn +23282 + iBright + Phillip Needham + phillip&ibright.net +23283 + I/O Concepts, Inc. + Andrew Porter + domains&ioconcepts.com +23284 + Identify Technology Solutions + Jeremy Zuniga + jzuniga.iana.org&itsasolution.com +23285 + STRATUM-IP + Georges Goncalves - Stéphane Bunel + noc&stratum-ip.net +23286 + Gottfried Hamm KommunikationsSysteme + Gottfried Hamm + ghamm&ghks.de +23287 + Joint Concepts Development + J.J.J. Carels + joop.carels&3v8.net +23288 + Hands-On Security, Inc. + IANA Contact + admin&handsonsecurity.com +23289 + Dandy Connections, Inc. + Min Hwang + min&dandy.net +23290 + Shaanxi Normal University + Qiu Jianbo + qjb&snnu.edu.cn +23291 + SigValue Technologies + Kobi Alfandari + kobi&sigvalue.com +23292 + Aternity Inc. + Yair Iny + yair.iny&aternity.com +23293 + Zafin Labs + Andrew McCue + amccue&zafinlabs.com +23294 + GeFoekoM e.V. + Hannes Kraus + pkxl2&pkxl2.de +23295 + Ingenium NET SRL + Bogdan Hojda + admin&ingenium.nt.ro +23296 + Yawarra Information Appliances Pty Ltd + Paul McGowan + iana&yawarra.com.au +23297 + schlittermann -- internet & unix support + Heiko Schlittermann + hs&schlittermann.de +23298 + WestLotto GmbH & Co oHG + Dr Eric Schreiber + Eric.Schreiber&westlotto.de +23299 + LRF + Per Ekstorm + per.ekstorm&lrf.se +23300 + ABEM Instrument AB + Per Hedblom + per.hedblom&abem.se +23301 + Datenverarbeitungszentrum Halle GmbH + Dr. Martin Huth + drmhuth&dvz-halle.de +23302 + LINBIT Information Technologies GmbH + Philipp Richter + philipp.richter&linbit.com +23303 + Dot Communications + System Admins + sysadmins&dot.com.au +23304 + Codan Limited + Kjell Genborg + kjell.genborg&codan.com.au +23305 + Woojyun Systec + SahngOh Jung + sahngoh&woojyun.co.kr +23306 + BeiJing Techstar Ltd. + Han Rui + hr&pps.com.cn +23307 + Machaira Enterprises Pty Ltd + David Bullock + david.bullock&machaira.com.au +23308 + NATIONAL CENTER FOR HIGH-PERFORMANCE COMPUTING + Eugene Yeh + cyeh&nchc.org.tw +23309 + AR Infotek Inc. + William Chen + williamchen&arinfotek.com.tw +23310 + OFiR a-s + Casper Angelo + root&ofir.com +23311 + Macab AB + Markus Olsson + markus&macab.se +23312 + IntelliCom Innovation AB + Jens Jakobsen + ianacontact&hms.se +23313 + Tesla a.s. + ing. Zdenek Kotalik + kotalik&tesla.cz +23314 + Plantron AB + Dirk Handzic + dirk&plantron.se +23315 + Boldon James Ltd + John Hodgkinson + john.hodgkinson&boldonjames.com +23316 + Niedersaechische Forstliche Versuchsanstalt + Jochen Eggemann + Jochen.Eggemann&nfv.gwdg.de +23317 + Heinrich Bauer Verlag KG + Daniel Siegers + daniel.siegers&bauerverlag.de +23318 + Comune di Bologna + Massimo Carnevali + massimo.carnevali&comune.bologna.it +23319 + SIRLAN Technologies SAS + Olivier Abdoun + oabdoun&sirlan.org +23320 + Orange Romania SA + Iustin Pop + iustin.pop&orange.ro +23321 + Frank Juedes EDV-Service + Frank Juedes + Frank.Juedes&edv-service-juedes.com +23322 + Aircom International + Dominic Kirsten + dominic.kirsten&aircom.co.uk +23323 + Cable & Wireless Telecommunication Services GmbH + Martin Mersberger + martinm&cw.net +23324 + swarco + Michael Rychlik + michael.rychlik&swarco.com +23325 + KOTIO + Sylvain MEILARD + sylvain.meilard&kotio.com +23326 + Association des diplômés des formations Systèmes de Télécommunications et Réseaux Informatiques + Webmaster + webmaster&wwstri.org +23327 + VAD Video-Audio_Design GmbH + Michael Strey + strey&vadgmbh.de +23328 + quadraginta-duo.de + Axel Miesen + axel.miesen&web.de +23329 + McDaniel College + Benjamin A. Koger + bkoger&mcdaniel.edu +23330 + CommScope Inc. of North Carolina + Robert Smith + rsmith&commscope.com +23331 + Johnson Bible College + Josh Kelley + josh&jbc.edu +23332 + ADCC Inc. + HanQiang + hanqiang&yeah.net +23333 + Computer Resource Team, Inc. + Craig Warman + crwarman&crtinc.com +23334 + Gorman Electronics,Inc. + Pete Kolonko + pete&gormanelectronics.com +23335 + Hampton Roads Maritime Association + Mick Drevyanko + mick&portofhamptonroads.com +23336 + VELUX A/S IT Department + Martin D. Nielsen + mdn.it&velux.com +23337 + Cybertrust + Dhiren Pankhania + dhiren.pankhania&cybertrust.com +23338 + OpenS Tecnologia e Processamento de Dados Ltda. + André Nazário de Souza + andre&opens.com.br +23339 + JUNG Analog- und Digital-Systemtechnik GmbH + P. Jung + pjung&jung-systemtechnik.de +23340 + Kedah Electronics Engineering + Eugene Chepurnykh + eugene&kedah.ru +23341 + Astute.BIZ, Inc. + Lloyd Fernandes + fernsastute&patmedia.net +23342 + Pathfinder Associates LLC + LDAP Role + support&pathf.com +23343 + Invensys/Wonderware + Vijay Anand + vijay.anand&wonderware.com +23344 + AXS-One Inc. + Krishna Mangipudi + kmangipudi&axsone.com +23345 + System Control Networks + PHILLIP SALZMAN + phill&sysctl.net +23346 + Voya Financial (formerly 'CitiStreet, LLC') + Ted Tickell + Theodore.Tickell&voya.com +23347 + Infra Resource, LLC. + Rail Aliev + rail&iqchoice.com +23348 + PC KNOW HOW CENTER Brachhold GmbH + Mr. Ulrich Brachhold + u.brachhold&pkhc.biz +23349 + NTT GIN + John Heasley + heas&shrubbery.net +23350 + Vertasent, LLC + John Schlack + john.schlack&vertasent.com +23351 + Watertown Public Schools, Watertown, MA, USA + George Skuse + gskuse&watertown.k12.ma.us +23352 + Cognis Corporation + Larry Pinsky + larry.pinsky&cognis.com +23353 + Intelaware + Jon Pounder + jon£er.com +23354 + Niska AB + Hakan Niska + hakan&niska.com +23355 + Gosh Enterprises, Inc. + Seawon Choi + schoi&charleys.com +23356 + TRYSKEL + Mathieu ALORENT + mathieu&alorent.com +23357 + Twenty Four Nine Development Ltd + Ruben Arakelyan + ruben&tfnd.uk +23358 + Typhon SARL + Jules Vo-Dinh + root&typhon.net +23359 + Hexod + Philippe Muller + philippe.muller&gmail.com +23360 + Pacific Union College + Jon Falconer + jfalconer&puc.edu +23361 + TEConcept GmbH + Franz-Otto Witte + otto.witte&teconcept.de +23362 + Leviton Mfg Co. + Mayra Silbaugh + msilbaugh&leviton.com +23363 + Sumach GB Ltd + Martin Burlock + mburlock&freenetname.co.uk +23364 + Bryn Mawr College + Matthew Rice + mrice&brynmawr.edu +23365 + Websense, Inc. + Steve Kelley + skelley&websense.com +23366 + ValueClick, Inc. + Antonio Varni + avarni&cj.com +23367 + Host Europe GmbH (formerly 'm networking') + Malte von dem Hagen + noc&heg.com +23368 + GHK Wireless + David Knippelmier + widepipe&gmail.com +23369 + Shawn Church, Information Systems Consultant + Shawn Church + sl_church&sbcglobal.net +23370 + Joel Davis + Joel Davis + deJoelness&gmail.com +23371 + BestInfo Cyber Technology Co.,Ltd + xiaoyu + xiaoyu&bestinfo.com.cn +23372 + Bloombase + Jemmee Yung + jemmee.yung&bloombase.com +23373 + NEC Kansai, Ltd. + Yamada Satoshi + yamada83&kansai.nec.co.jp +23374 + REDtone Telecommunications Sdn. Bhd. + Hew How Chee + howchee.hew&redtone.com +23375 + Islandwide Express + Daniel Pastrana + dpastrana&islandwide.com +23376 + Hyundai Network System, Inc. + Yun, Hae-Sook + snmp&hdnetsys.co.kr +23377 + IT Solution Services Co.,Ltd. + Takeshi Inatsugu + inatsugut&it-ss.co.jp +23378 + T&C Technology + Oh, Byoung Chang + bcoh&tnctec.co.kr +23379 + HUK-COBURG VVaG + Uwe R. Dietz + uwe.dietz&huk-coburg.de +23380 + Datadec Online, S.A. + Jose Carlos Toledo + jctoledo&datadec-online.com +23381 + Die GRUENEN + Alex Seppelt + alex.seppelt&gruene.at +23382 + Swissphone Telecom AG + Harald Pfurtscheller + harald.pfurtscheller&swissphone.com +23383 + Azienda Sanitaria Locale N.4 Chiavarese + Simone Lightwood + slightwood&asl4.liguria.it +23384 + Aptus Elektronik AB + Jesper Svensson + snmp&aptus.se +23385 + GESIS Gesellschaft für Informationssysteme mit beschraenkter Haftung (formerly 'GESIS mbH') + Christian Bormann + christian.bormann&gesis.de +23386 + Pole Universitaire Leonard de Vinci + pascal valois + pascal.valois&devinci.fr +23387 + iXTS Software GmbH + Armin Orthmann + iana&ixts.de +23388 + Hypercube Systems Ltd. + Sebastian James + seb&hypercubesystems.co.uk +23389 + IZB München-Frankfurt a. M. GmbH & Co. KG + Wolf Sommer + ZZG-IZB10010-Security_Management&izb.de +23390 + easyWAN GmbH + Thomas Bergler + t.bergler&easywan.net +23391 + Atomwide Ltd. + Phil Chapman + phil&atomwide.com +23392 + Insightix Ltd. + Edo Yahav + standards&insightix.com +23393 + LLC CTI - Center of Telephony Integration + Gleb Berezikov + G.Berezikov&cti.ru +23394 + Groupe Wesford + Nicolas MELIN + n.melin&wesford.fr +23395 + Trustedtec + Kay Petzold + kay&trustedtec.org +23396 + Neighbourhood Link + Arthur Niu + arthurn&nlinkces.com +23397 + Fine Point Technologies, Inc + Bob Carrick + bcarrick&finepoint.com +23398 + MTS Allstream Inc. + Bogdan Moldoveanu + bogdan.moldoveanu&allstream.com +23399 + The National Science Foundation + William Altmire + waltmire&nsf.gov +23400 + Midcontinent Communications + Brennan Wellman + brennan.wellman&midco.com +23401 + Sliwa.EU.org Network + Kuba Sliwa + kuba&sliwa.eu.org +23402 + XMedius Solutions Inc. (formerly 'Interstar Technologies Inc.') + Sébastien Lalonde + sebastien.lalonde&xmedius.com +23403 + Sharp HealthCare + Technical Services + technical.services&sharp.com +23404 + DAVANTEL + JOSE R. SALVADOR COLLADO + jrsalvador&davantel.com +23405 + JSC Sahalinmorsvjaz + Ilya S. Anpilov + ucsmc&smcom.ru +23406 + LDAPtive Pty Ltd + Jamie M Vachon + jamie.vachon&levelonenetworks.com +23407 + eyePower Limited + Simon Quill + simon.quill&eyepowerlimited.co.uk +23408 + Rhombus Systems Ltd. + David Hartley + dhartley&rhombus.co.uk +23409 + Torque.net inc. + Michael Harris + mharris&torque.net +23410 + Hackstrike Ltd. + Tali Gurevich + tali&hackstrike.com +23411 + ServInfo + Christian Tardif + christian.tardif&servinfo.ca +23412 + Arcontech Ltd + Jason Martin + jason&arcontech.com +23413 + Melange Corporation + Hideharu Itoh + hitoh&melange.co.jp +23414 + Ilmarinen Mutual Pension Insurance Company + Jere Matilainen + ianacontact&ilmarinen.fi +23415 + nordsys.com + Antoine Sauma + tonysauma&hotmail.com +23416 + MIT-xperts GmbH + Johannes Schmid + info&mit-xperts.com +23417 + COPITEC - Consejo Profesional de Ingeniería de Telecomunicaciones, Electrónica y Computación + Antonio FOTI + presidencia&copitec.org.ar +23418 + Gridlogix, Inc. + David Kempf + dkempf&gridlogix.com +23419 + Talk24 + Richard Smith + richard&talk24.com +23420 + APS systems AG + Martin Schmid + scm&aps-systems.ch +23421 + Altana Pharma AG + Dieter Saken + dieter.saken&altanapharma.com +23422 + Instituto Superior de Contailidade e Administracao de Coimbra + Helder Canais + hcanais&iscac.pt +23423 + Osmosys S.A. + Mariusz Martusewicz + m.martusewicz&osmosys.tv +23424 + Maximum Impression, LLC + Matt Dittbenner + matt&plauditdesign.com +23425 + QS Technologies, Inc. + Kevin Davidson + kdavidson&ntst.com +23426 + Louverturenet + Didier Ambroise + louverturenet&yahoo.fr +23427 + The Home Depot Supply MRO + Erick Nelson + erick_nelson&homedepot.com +23428 + NOC, School of Engineering, Morgan State University + Clifton Wood + cwood&eng.morgan.edu +23429 + Eurocis + Philippe Duveau + philippe.duveau&eurocis.fr +23430 + Lumigent Technologies, Inc. + Ashley A Pinto + ashleyp&lumigent.com +23431 + CAMed + Ralf D. Zwoenitzer + ralf&zwoenitzer.de +23432 + Packet Island Inc. + Praveen Kumar + info&packetisland.com +23433 + BGComp Inc. + Sheu Liang Jyi + blackguy&bgcomp.com.tw +23434 + Georgia Technology Authority + Michael Porter + mporter>a.ga.gov +23435 + Rock Holdings Inc. + Keith Weinbaum + oidadmin&quickenloans.com +23436 + Mexicana de Electromecanicos SA de CV + Cesar Hernandez + chernandez&dks-key.com +23437 + Don Bosco Institute of Technology + Deepak Patil + ddpatil&donboscoit.ac.in +23438 + Beijing Wandong Medical Equipment Co. Ltd. + Mou Xiaoyong + mouxiaoyong&wdmri.com +23439 + HurdFr + Dequenes Marc + duck&hurdfr.org +23440 + Avvenu Inc. + Dave Irvine + dirvine&avvenu.com +23441 + R.A.Systems + Hayashi Yoshihide + hayashi&ras.co.jp +23442 + RockMon IT-Consulting + Jochen Ortwein + development&rockmon.de +23443 + miniprobe + Robert Parusel + info&miniprobe.de +23444 + LATECOERE + TRESSIERES Jean-Pierre + jp.tressieres&latecoere.fr +23445 + Jordan Curzon + Jordan Curzon + jordan&curzons.net +23446 + H5-Group + Ingo Boehm + ingo.boehm&h5-group.de +23447 + Opsware Inc. + Ajay Gummadi + ajay&renditionnetworks.com +23448 + Avitech AG + Peter Rudolph + Peter.Rudolph&avitech-ag.com +23449 + Tripod Technology Group, Inc. + Richard Sand + info&tripodtechnologygroup.com +23450 + W. A. Strosberg + Bill Strosberg + bill&strosberg.com +23451 + Wallenius Wilhelmsen Lines + Roger Carlsen + roger.carlsen&2wglobal.com +23452 + SunGard Availability Services + Mike Simkins + mike.simkins&sungardas.com +23453 + KungFoo + Hubert Iwaniuk + neotyk&kung-foo.kicks-ass.net +23454 + BeByte srl + Patrick Vander Linden + pvdlinden&bebyte.be +23455 + Exceptional Software Strategies, Inc. + Kevin Miller + Sysadmin&ExceptionalSoftware.com +23456 + NetCarrier Inc + Network Operations + noc&netcarrier.com +23457 + AMVESCAP PLC + Drew Knox + drew.knox&amvescap.com +23458 + National Cybernet Security LTD + liukang + liukang&ncs-cyber.com.cn +23459 + Chunghwa Telecom Co., Ltd. + Tiffany Huang + caservice&cht.com.tw +23460 + Profilium Inc. + Jean-Luc Wasmer + jlwasmer&profilium.com +23461 + In4S, Inc. + Daisuke Iritani + daisuke_iritani&in4s.co.jp +23462 + Unixconn + Maxim Bourmistrov + maxim.bourmistrov&unixconn.com +23463 + Kolporter Info S.A. + Dariusz Sznajder + Dariusz.Sznajder&kolporter.com.pl +23464 + Department of IT, University of Defense, Czech Armed Forces + Martin Hlavacek + hlavaczech&gmail.com +23465 + VIVOTEK INC. + Pony Che + pony&vivotek.com +23466 + Vision Fire and Security + Phil Robertson + philr&adpro.com.au +23467 + Shanghai INFOPLS Network Technology Co.,Ltd. , China + Wang,Xing + wangxing&infopls.net +23468 + NGC Systems Sdn Bhd + Leng Chye, Ooi + lengcai&ngc.com.my +23469 + opensoul.org + Brandon Keepers + webmaster&opensoul.org +23470 + LOGATIQUE + Eric Profichet + eric.profichet&logatique.fr +23471 + Digita Oy + Esa Maunula + esa.maunula&digita.fi +23472 + mangoART.AT - Web- und Multimediatechnik, Softwareentwicklung + STANGL Mario + Mario.Stangl&mangoART.AT +23473 + Panda Software International S.L. + Enrique Garcia + ianaid&pandasoftware.com +23474 + EITB + Iratxe Blanco + blanco_iratxe&eitb.com +23475 + Softel Group + Russ Wood + russ.wood&softel.co.uk +23476 + British Educational Communications and Technology Agency + John Chapman + john.chapman&becta.org.uk +23477 + DABiS AG + Andreas Völlmin + support&dabis.ch +23478 + Provincia di Firenze + Jurgen Assfalg + j.assfalg&provincia.fi.it +23479 + Sulake Corporation Oy + Leo Jääskeläinen + contact-iana.org&sulake.com +23480 + Degussa Bank GmbH + Oliver Schneider + oliver.schneider°ussa-bank.de +23481 + Infoaxis Ltd + stuart clark + stuart&infoaxis.biz +23482 + Altobridge Ltd. + Tim Moriarty + tmoriarty&altobridge.com +23483 + University of North Carolina Asheville + David Reynolds + reynolds&unca.edu +23484 + C .D.H. srl + Dario Gnaccarini + dgn&cdhsrl.it +23485 + AusCERT - Australian Computer Emergency Response Team + Viviani Paz + v.paz&auscert.org.au +23486 + NetCentrum Ltd. + IT Administration Department + provoz&netcentrum.cz +23487 + Daxten Ltd + Hitesh Varsani + info.ie&daxten.com +23488 + Technica Corporation + James Saint-Rossy + jsaint-rossy&technicacorp.com +23489 + Visplex Association + Judah Thornewill, C. R. O. + judah.thornewill&visplex.net +23490 + ENIDAN Technologies GmbH + Per Jessen + per.jessen&enidan.com +23491 + CyberTech B.V. + G.Stam + g.stam&cybertech-telecom.nl +23492 + LightCore Co., Ltd. + Yoshiyuki Watanabe + admin&lightcore.jp +23493 + Aminfu Hudima + Paul Hedderly + paul+iana&mjr.org +23494 + Mint Systems Ltd. + David O'Connor + david.oconnor&horsebridge.net +23495 + IT-Pro Peter Lechner + Peter Lechner + office&it-pro.at +23496 + Boursorama + Frédéric Lefranc + unix-fr&boursorama.fr +23497 + Lswei + Liao Shih Wei + moli.sato&msa.hinet.net +23498 + Shanghai Asia-Pacific Computer Information System Co, Ltd. + Joe M. Leaf + allah2129&163.com +23499 + Siemens Programm- und Systementwicklung GmbH & Co. KG + Mirko Jekal + mirko.jekal&siemens.com +23500 + Articy Computer System & Service + Arthur TX Niu + ou.yuan&gmail.com +23501 + SailBum Enterprises, LLC + Reggie Rodgers + iana&sailbum.com +23502 + Renew Data Corp + Jon Nials + jnials&renewdata.com +23503 + Arizona Foundation for Medical Care + Michael Setto + msetto&azfmc.com +23504 + TWENTIETH CENTURY FOX FILM CORPORATION + Joe Fuller + joe.fuller&fox.com +23505 + Programming/Design + Joel Dubiner + joel&dubiner.com +23506 + Pleiades Consulting, Inc + Jonathan Freedman + jef&pleiades.ca +23507 + salesforce.com + Alejandro Bolivar + abolivar&salesforce.com +23508 + EVI + eric vernichon + eric&vernichon.fr +23509 + BVCompuworks + Greg Varga + gvarga&bvcompuworks.com +23510 + Centro Cultural y Deportivo Tajamar + Alfredo Abad Domingo + aet&tajamar.es +23511 + TELUS + Eric Chartre + eric.chartre&telus.com +23512 + Copi Family + Craig J Copi + admin&copi.org +23513 + Westline Security Ltd. + Albert Hui + avatar&west-line.net +23514 + Michaels Stores Inc. + Rick York + yorkr&michaels.com +23515 + TEKMOB, lda + Santos Silva + santos.silva&netcabo.pt +23516 + CitiCards + Andrew Kitzman + andrew.kitzman&citigroup.com +23517 + Leviton Voice And Data + Hieu Pham + hpham&levitonvoicedata.com +23518 + Tomas Zoufaly + Tomas Zoufaly + zoufaly&slapak.net +23519 + Regal-Beloit Corporation + Scott Marquardt + scott.marquardt&rbcmtg.com +23520 + Unassigned + Removed 2014-08-12 + ---none--- +23521 + Amherst County Public Schools + David Childress + dchildress&amherst.k12.va.us +23522 + Volicon, Inc. + Idan Gazit + idan&volicon.com +23523 + MT-C S.A. + Valery Guilleaume + valery.guilleaume&mt-c.com +23524 + Weberhofer GmbH + Johannes Weberhofer + office&weberhofer.at +23525 + Aleksey Barabanov + Aleksey Barabanov + alekseybb&mail.ru +23526 + Auster Sistemas & Soluções de Informática Ltda. + Ricardo Barone + ricardo.barone&auster.com.br +23527 + NewPage Corporation + Ted Hill + Ted.Hill&NewPageCorp.com +23528 + The Laddie Group + Bob Smith + bsmith&linuxtoys.org +23529 + APP!X S.R.L. + Claudio Russo + claudio.russo&appix.it +23530 + State of Alaska, Enterprise Technology Services + Luke Kreuzenstein + luke.kreuzenstein&alaska.gov +23531 + Practeo SA + Marc Hauswirth + marc&practeo.ch +23532 + PLAN Australia Pty Ltd + David Pinkerton + davidp&planaustralia.com.au +23533 + PJR.cc + Paul Robinson + me&pjr.cc +23534 + Supertronic Spa + Walter M. Oldoni + walti&supertronic.it +23535 + Holly Australia Pty Ltd + Nathan Clark + support&holly.com.au +23536 + eNClue Co., Ltd. + Cho Soon Yim + csyim&enclue.com +23537 + Shanghai Dreaming Information Technology Co., Ltd. + Eric Qiu + qjm&dreamingtech.com +23538 + Jenston Technology Corporation Ltd. + Raymond Sze + raymond&jenston.com +23539 + teamdzr + Scott Brown + dangerzone_03801&yahoo.com +23540 + Cordys Holding B.V. + Richard Quist + rquist&cordys.com +23541 + Emcore Corporation + Albert Lu + alu&emcore.com +23542 + CryptoSign + Nancy Lee Pope + webmaster&cryptosign.tk +23543 + iCONN + Jimahn Kim + jimahn&empal.com +23544 + Allen ReSearch + Rich Allen + traf&mtaonline.net +23545 + West Ridge Networks, Inc. + Navneeth Kannan + nkannan&westridgenetworks.com +23546 + Wagner and Associates Groupware Services, Inc + Brian Wagner + bwagner&wagsworld.net +23547 + IFSERCON COM s.r.l. + ifsercon com + ifsercon&gmail.com +23548 + Elber S.r.L. + Ing. Vittorio Lagomarsino + v.lago&elber.it +23549 + Toyo Networks & System Integration Co., Ltd. + Toshiaki Saitoh + macrenraku&dmy.magnus.nec.co.jp +23550 + e-World Developments Ltd + Stephen Phillips + sp&ewdev.com +23551 + ErgonomiX Software GmbH + Kerstin Funke + kerstin.funke&ergonomix.de +23552 + JPS Communications + David Nerge + dave.nerge&jps.com +23553 + Discover Financial Services, Inc. + George Leedle + georgeleedle&discoverfinancial.com +23554 + PEMI + Monika Januszewska + biuro&pemi.pl +23555 + contentteam GmbH + Robert Stupp + rstupp&contentteam.de +23556 + Centre de Gestion Ocean + Daniel NOIZET + courrier&cgocean.com +23557 + EEA communication solutions + Gabriel Lachmann + hostmaster&eea.sk +23558 + IPsoft Inc. + Mike Ghicas + support&ip-soft.net +23559 + United Information System Service Co., Ltd. + Alec Chang + alec_chang&uis.com.tw +23560 + edreams + Rodrigo Vargas Acevedo + mi7_h4&yahoo.com +23561 + Anite Deutschland GmbH & CO.KG + Mathias Wohlfarth + mwohlfarth&anite.de +23562 + Smartware Computing + Dave Smart + Dave&Smart-Family.net +23563 + DUX Inc. + Takuya kiuchi + root&dux.co.jp +23564 + Schmidt GmbH + Natalie Fisenko + natalie_fisenko&gmx.de +23565 + Panta Computer Systems + Adhir Potdar + adhir&pantasys.com +23566 + Infohit Computers d.o.o. + Mauricio Culibrk + mauricio&infohit.si +23567 + Georg-Christoph-Lichtenberg-Schule, Kassel + Klaus Fueller + KlausF&Schule.de +23568 + Noel-Plus, s.r.o. + Ludvik Vlcek + l.vlcek&noel-plus.cz +23569 + TeamWorx Productions Ltd. + Brian Pederson + brian&teamworx.net +23570 + Soliton + Ajrat Mansurov + mansaf&soliton.com.ru +23571 + IPBS-CNRS + Laurent Bardi + Laurent.Bardi&ipbs.fr +23572 + WebMagician Global + Brian Pederson + security&webmagician.com +23573 + Kids & Co g.e.V. + Rene Scholz + pcwerkstatt&kids-und-co.de +23574 + TU Hamburg + Dirk Schäfer + rz.oids&tuhh.de +23575 + Wayfinder Systems AB + Pierre Larsson + iana&wayfinder.com +23576 + ADVIS Maciej Matysiak + Maciej Matysiak + administrator&advis.pl +23577 + Enfo Partner Oy + Veli-Matti Luotonen + veli-matti.luotonen&enfo.fi +23578 + MSC Vertriebs GmbH (formerly 'Smart Network Devices GmbH') + Ulrich Rahmel + urah&msc.de +23579 + Centrica plc + Richard Huntley + richard.huntley¢rica.com +23580 + Andritz Oy + Mika Metsärinne + mika.metsarinne&andritz.com +23581 + Hacousto Holland + Alex Angenent + alex.angenent&hacousto.com +23582 + ESEO + Damien Piraud + damien.piraud&wanadoo.fr +23583 + APOS Systems Inc. + Dan Clements + dclements&apos.on.ca +23584 + Logical Progression Ltd + Kevin Campbell + kevin&logicalprogression.net +23585 + Bank Technologies Centre + Aliaksandr L. Radkevich + a_radkevich&btc.minsk.by +23586 + Ralf Kirchner Systemanalyse + Ralf Kirchner + masterkir&gmx.de +23587 + PROFIcomms s.r.o. + Mr. Ales Popelka + info&proficomms.cz +23588 + FOI, Swedish Defence Research Agency + Mats Ekman + mek&foi.se +23589 + Max Zinal's Number + Max Zinal + Zlat0&mail.ru +23590 + F.I.A.S.A. + Montomoli Giovanni + fiasaced&assindustria.pr.it +23591 + CCNY + Shaoquan Lin + lin&ccny.cuny.edu +23592 + Iowa Counties Information Technology + Scott Williams + swilliams&co.marshall.ia.us +23593 + Dipl.-Ing. Heiko Boesel + Heiko Boesel + iana&heiko-boesel.de +23594 + EBI L.P. + Robert Kelly + robert.kelly&ebimed.com +23595 + Virginia Employment Commission + Christopher Nicholl + christopher.nicholl&vec.virginia.gov +23596 + pikkerton GmbH + Lothar C. Feige + Lothar.Feige&pikkerton.de +23597 + Esica Terra Inc. + Bruce Brandon Werner + bwerner&esicaterra.com +23598 + UX + Steven Brunasso + sbrunasso&gmail.com +23599 + Studio Network Solutions + Eric Newbauer + enewbauer&studionetworksolutions.com +23600 + Transora + Jon Beyer + steam&transora.com +23601 + Emcom Systems + Mark Zuckerman + markz&voicenet.com +23602 + Fulano.com.br S/A + Mauricio Silva + admin&fbiz.com.br +23603 + Optellios + Zhizhong Zhuang + info&optellios.com +23604 + GoldPocket Interactive, Inc. + Rich Schiavi + rschiavi&goldpocket.com +23605 + Applications Plus, Inc. + Steve Heggood + steve&heggood.com +23606 + DigiPen Institute of Technology + Ryan Fulcher + it&digipen.edu +23607 + Mélix + Thomas Bliesener + iana&melix.com.mx +23608 + Paul's Farm + Paul A. Johnson + paul&paulsfarm.com +23609 + Ix Appliance, Inc. + Duncan McGreggor + duncan&adytumsolutions.com +23610 + WDG s.c. + Rafal Kupka + rkupka&wdg.pl +23611 + nLight, s.r.o. + Radovan Semancik + rse&nlight.sk +23612 + Atlassian Software Systems + Nick Faiz + nick&atlassian.com +23613 + JAPAN IMAGE & NETWORK INC. + Makoto Kawahata + snmp&jini.co.jp +23614 + A. Wilton + Art Wilton + art&awilton.com +23615 + HiFX IT & MEDIA SERVICES PVT. LTD. + Siju Oommen George + netadm.pub&hifx.net +23616 + SYNCHROSOFT + LOVISA Frank + lovisa&synchrosoft.fr +23617 + Deutsches Literaturarchiv Marbach + Thomas Meyer + Thomas.Meyer&dla-marbach.de +23618 + Saima Avandero Spa + Sandra Giampedraglia + sgiampedraglia&saima.it +23619 + SERGAS + Javier Quiles + javier.quiles.delrio&sergas.es +23620 + Teltronix Ltd + Mark Otridge + mark.otridge&onetel.net +23621 + Bechtel SAIC Company LLC + Alan Claypool + alan_claypool&ymp.gov +23622 + European Patent Office + Johan Sääw + jsaeaew&epo.org +23623 + Icon Multimedia + Julian Vicente + julian&iconet.es +23624 + První certifikaèní autorita, a.s. + Petr Budiš + sales&ica.cz +23625 + Intechgral Inc. + Matt Wisner + mwisner69&comcast.net +23626 + Poggs Computer Services + Peter Hicks + peter.hicks&poggs.co.uk +23627 + Interlix + Edward Mueller + edwardam&interlix.com +23628 + California State University Dominguez Hills + Edgar Lazarian + elazarian&csudh.edu +23629 + SafeNet, Inc. (formerly 'Mykotronx, Inc.') + Bill Becker + bill.becker&safenet-inc.com +23630 + Computer Aid, Inc. + Jim Chrapowicz + jimc&compaid.com +23631 + Azalea Networks, Inc. + Xu Zou + xzou&azaleanet.com +23632 + Sneha Technologies S.L. + Jorge Borrego + support&sneha-tech.com +23633 + ipsys Communications AG + Chris Schäke + chris.schaeke&ipsys.com +23634 + TC Automatisering + Wouter Thielen + w.thielen&tcautomatisering.nl +23635 + colin.de + Colin Leitner + colin.leitner&colin.de +23636 + Lycee Ader Bernay + Patrice Treton + patrice.treton&ac-rouen.fr +23637 + Novartis Institutes og BioMedical Research Vienna GmbH & Co KG + Benjamin Almeida + benjamin.almeida&novartis.com +23638 + Logilec + Eric Proust + eric.proust&logilec.fr +23639 + ParsGroup Informationstechnologie GmbH + Ferdinand Karner + ferdinand.karner&parsgroup.at +23640 + Enercon Ltd. + Maxim Kagan + maxim&enercon.co.il +23641 + Cascade Ltd + CY Chan + chi-yuen.chan&pccw.com +23642 + Tmidia Inc. + John Lynch + jlynch&tmidia.com +23643 + bsecure.dk + Martin Boller + martin&bollers.dk +23644 + Siemens Ltd., China + Zou Yu + yu.zou&siemens.com +23645 + DAF Trucks + Stephan van Veelen + stephan.van.veelen&daftrucks.com +23646 + Cell C (Pty) Ltd + N Mojaenmore + hdap&cellc.co.za +23647 + SARL GALLET.info + Sebastien GALLET + sebastien&gallet.info +23648 + ESV, LLC + Julian I. Kamil + julian.kamil&gmail.com +23649 + Cornerturn, LLC + Kevin Pratt + khpratt&cornerturn.com +23650 + Opendicom + Jacques FAUQUEX + jacquesfauquex&gmail.com +23651 + Integrated Decisions and system Inc. + Prafulla Girgaonkar + pgirgaonkar&ideas.com +23652 + Lewes Computer Services + Nigel Mundy + nigel&spam-filter.org.uk +23653 + Cirrus Technologies Pty Ltd + David Poole + info&cirrustech.com.au +23654 + AICHI ELECTRIC CO., LTD. + Shinji Isaji + isaji.shinji&adkk.co.jp +23655 + CallDesk S.A. + Eric Granados + E.Granados&CallDesk.be +23656 + Cult Hill Innovation Limited + Peter McClelland + info&culthill.com +23657 + TEDIS, a.s. + Alena Hrivnakova + admin-ca&tedis.sk +23658 + TUBITAK ULAKBIM + Asli Zengin + asli&ulakbim.gov.tr +23659 + lars morich Kommunikationstechnik GmbH + Lars Morich + kontakt&morich-gmbh.de +23660 + DRS Technologies, Inc, C2 Systems + Richard T. Cerpa + rcerpa&drs-c4i.com +23661 + American TeleCare, Inc. + Malcolm Gates + malcolm.gates&americantelecare.com +23662 + Blue Jungle, Inc + Safdar Kureishy + safdar.kureishy&bluejungle.com +23663 + Storied Future Infrastructure + Justin L. Karr + jlkbiz&storiedfuture.com +23664 + Federal Deposit Insurance Corporation + Sonia Yadav + soyadav&fdic.gov +23665 + wiselink + Jack Jiao + jiaoli&bupt.edu.cn +23666 + BeaufortWeb + Christopher Cover + christopher&beaufortweb.com +23667 + FredNet + Sjoerd de Boer + shured&frx.nl +23668 + Kaspersky Lab ZAO + Ivan Moshkarin + snmp&kaspersky.com +23669 + Kyphon Europe + Johny Van Genechten + jvangenechten&kyphon-eu.com +23670 + Nihon Kohden Corporation + Yasuhiro Osawa + Yasuhiro_Osawa&mb2.nkc.co.jp +23671 + CIS Hollywood + Robert Minsk + iana&cishollywood.com +23672 + nTime Servicos de Informatica e Comercio Ltda + Thiago Figueiro + thiago.figueiro&nTime.com.br +23673 + Bluesoft sp z o.o. + Michal Suszko + msuszko&bluesoft.net.pl +23674 + Edu Business Solutions + Paul Thormahlen + pault&edubusinesssolutions.com +23675 + Foray Technologies Inc. + Mont Rothstein + mont&foray.com +23676 + G&K Services inc. + Scott Rowe + SRowe&GKServices.com +23677 + Colonial Fiji + Isikeli Vuetaki + ivuetaki&colonial.com.au +23678 + PCTEL, Inc. + Steven Stearns + steve.stearns&pctel.com +23679 + Diligence Consulting, L.L.C. + Robert Sordillo + rsordillo&diligence-consulting.com +23680 + Eclipse SP LLC. + Konstantin Kirenko + info&eclipse-sp.com +23681 + terreActive AG + Roger Meier + iana&terreactive.ch +23682 + emuse Technologies Ltd. + Ronan Geraghty + r.geraghty&emuse-tech.com +23683 + EDV-COMPAS GmbH + Mr. Sönke Nagel + soenke.nagel&edv-compas.com +23684 + Sensitech Inc. + Tim Wile + tim.wile&sensitech.com +23685 + Godsk.net + Thomas Godsk Jørgensen + thomas&godsk.net +23686 + Softential, Inc. + Mohan Kompella + bsm&softential.com +23687 + Pobeda JSC + Nikolay D. Yurgandzhiev + koko&pobeda-ad.com +23688 + Summit Development, Ltd + Jedlik Stefan + jedlik&summitd.cz +23689 + Pervenio Ltd + Ian Deakin + ian.deakin&pervenio.net +23690 + Abstract Ltd. + Yair Badian + yairbadian&gmail.com +23691 + VIA + Lars Olof Kanngard + Lars&via.ae +23692 + IDactis + Nicolas SANTERNE + Nicolas.Santerne&idactis.com +23693 + Networks & Concepts GmbH + Joerg Hochwald + joerg.hochwald&networks-concepts.com +23694 + PMX NETWORKS, INC + JIMMY HANNAN + jimmyh&pmx-networks.com +23695 + PePLink Ltd. + Kenny Ng + kennyn&peplink.com +23696 + Nilai International College + Joe Teoh Chin Hor + joe_teoh&nilai.edu.my +23697 + hoi-polloi.org (formerly 'aspector GmbH') + Bernd R. Fix + brf&hoi-polloi.org +23698 + RELDATA Inc. + Yann Livis + ylivis&reldata.com +23699 + Zyden Software + SNMP Team + snmp&zyden.com +23700 + Internews 98 Ltd. + Atanas Bachvaroff + bachvaroff&internews-bg.com +23701 + Cannon Technologies Ltd + Mark Hirst + mark.hirst&cannontech.co.uk +23702 + SHD AG + Bernd Kossmann + inh-admin&shd.de +23703 + Agile Software (NZ) Ltd + Matthew Bagley + matthew.bagley&agilesoftware.co.nz +23704 + Calypso Capital Management + Calypso IT + it&calypsocap.com +23705 + Salient Corporation, Inc + Darrell Welty + dwelty&salient.com +23706 + GlobalPOPs + Hostmaster + hostmaster&globalpops.com +23707 + LAFARGE ALUMINATES + Philippe ESPOSITO + info&aluminates.lafarge.com +23708 + HYMATOM SA + Frédéric BERNON + frederic.bernon&hymatom.fr +23709 + Miltope Corporation + Steven H. Rines + steven.rines&miltope.com +23710 + Agincourt Computing + Steve Smith + iana-contact&aginc.net +23711 + Seventh Skill Ltd + Jon Brightwell + jonbrightwell&seventhskill.co.uk +23712 + Minnesota Office of Enterprise Technology + Art Smolecki + art.smolecki&state.mn.us +23713 + Keating Inc + James Keating + keating&woh.rr.com +23714 + Data Systems S.A.E.C.A. + Luis C. Benitez + lbenitez&data.com.py +23715 + Live Monitoring Pty Ld (formerly 'Produ-Tech Monitoring Pty Ld') + Roger Fraser + roger&livemonitoring.co.za +23716 + United States Council for Automotive Research + Daniel A. Rickert + drickert&uscar.org +23717 + Siricomm, Inc. + Bryan Wilcox + bryan.wilcox&siricomm.com +23718 + Esensors, Inc. + Networking Department + techhelp&eesensors.com +23719 + Arishi + Mike Woods + mike&arishi.com +23720 + Netifice Communications, Inc. + A. T. Stephen Thamban + athiban.thamban&netifice.com +23721 + The Ideal Computer Group Inc. + Darryl Krasman + iana&idealgroup.com +23722 + CS-Soft, s.r.o + Roman Gryc + gryc&cs-soft.cz +23723 + Great Power Electrin Science and Technology stock CO.LTD + Lin_jianeng + lin.jianeng&gmail.com +23724 + Udo Neumann + Udo Neumann + iana.mg&deklinierer.net +23725 + Theserver Brasil / Multidiagnostica IT Team + Carlos Henrique B. F. N. Pereira + carlos&theserver.com.br +23726 + VOTORANTIM INVESTIMENTOS INDUSTRIAIS S.A. + Antonio Carlos M. Nogueira Filho + antonio.nogueira&vpar.com.br +23727 + Marlin Trust Management Organization L.L.C + MTMO Operations + operations&marlin-trust.com +23728 + Versio SARL + Bernard BAILLY + bernard&versio.fr +23729 + DiVitas Networks + Srinivas Athuluru + atg&verdantarch.com +23730 + OVEA + Ronnie Garcia + r.garcia&ovea.com +23731 + Telestar + zinan ren + renzinan&gmail.com +23732 + ShijiaZhuang Railway Institute + Yan shi qiang + yanshq&sjzri.edu.cn +23733 + JGC Corporation + Shigeaki Suzuki + suzuki.shigeaki&jgc.co.jp +23734 + OEBB + Gerhard Morauf + gerhard.morauf&oebb.at +23735 + Fondation RESTENA + Stefan Winter + stefan.winter&noc.restena.lu +23736 + IceWarp Ltd. + Jakub Klos + jakub&icewarp.com +23737 + Ulm University of Applied Sciences + Jürgen Sonntag + juergen.sonntag&thu.de +23738 + AWR Solution Network + Alessandro Magalhaes + amagalhaes&awrsolution.com +23739 + HUG + LENOBLE + virginie.lenoble&hcuge.ch +23740 + SynerGIS Informationssysteme GmbH + Stefan Weihs-Sedivy + s.weihs&synergis.co.at +23741 + Wogri Unlimited + Wolfgang Hennerbichler + office&wogri.at +23742 + CONNOTECH Experts-conseils inc. + Thierry Moreau + thierry.moreau&connotech.com +23743 + Kerridge Computer Company Ltd + Steve Powell + sp&kerridge.com +23744 + BITMARCK SOFTWARE GMBH (formerly 'Arbeitsgemeinschaft Informationssysteme in der gesetzlichen Krankenversicherung (ISKV)') + Ralf Waldvogel + ralf.waldvogel&bitmarck.de +23745 + Black Hen Limited + Richard Schneider + richard.schneider&blackhen.co.nz +23746 + Administration communale de Braine-l'Alleud + Jean-Francois Brison + service.informatique&braine-lalleud.be +23747 + Areste Centre Informatique + Luc Blanc + lblanc&areste.com +23748 + Edinform S.p.A + Luigi Salvatore Palese + lpalese&edinform.it +23749 + Federal State Unitary Enterprise Leningrad Radio Research and Development Institute + Roman Surma + sur&loniir.ru +23750 + new10 GmbH + Michael Hirczy + admin&new10.com +23751 + PT. TRG international + Insan Praja SW + insan&telematika.web.id +23752 + ExcellNet IT Services and Consulting + Attila Nemes + nemes.attila&freemail.hu +23753 + Infinit Productions + Sebastien Couture + sysreq&gmail.com +23754 + Mairie de Savigny-Sur-Orge + Jean-Michel LACROIX + jm-lacroix&savigny.org +23755 + Softrax Corporation + Richard Adams + radams&softrax.com +23756 + Promotora de Informaciones, S.A. + Jaime Guntín + OID_prisa&prisa.es +23757 + AR Telecom + João Frade + joao.frade&artelecom.pt +23758 + Desert Cascade + Tsani Jones + tjones&surfside.net +23759 + Szolnok Város Polgármesteri Hivatal + Mészáros Ákos + meszarosa&ph.szolnok.hu +23760 + LayerZero Power Systems, Inc. + Milind M. Bhanoo + mbhanoo&layerzero.com +23761 + CMSL + Matthew Reeves-Hairs + iana&cmsl.com +23762 + Time-Ware + Thomas Wenrich + office&timeware.at +23763 + Rent a PACS GmbH + Michael Ulbrich + mul&rentapacs.de +23764 + Atamo Pty Ltd + Peter Barrow + peter.barrow&atamo.com.au +23765 + GetNet Comunicacoes S/C LTDA + Fabiano Felix + felix&getnet.com.br +23766 + TriGeo Network Security, Inc. + Michael Maloof + mmaloof&trigeo.com +23767 + Rensselaer Hartford Graduate Center, Inc + Gary Trail + gary&rh.edu +23768 + Integrated Management Resources, Inc. + John Hampton + john&imr-net.com +23769 + Infinity Technology, Inc. + Curt LeCaptain + lecaptainc&infinitytechnology.com +23770 + Andreoli Domenico + Domenico Andreoli + cavok&tiscali.it +23771 + Marakana, Inc. + Aleksandar Gargenta + sasa&marakana.com +23772 + RoutePulse Technologies + Parag Bopardikar + parag&routepulse.com +23773 + Starfire Engineering & Technologies, Inc. + Richard R. Spurlock + rrspurlock&starfire.net +23774 + jNetX Inc. + Steven A. Hollis + sah&jnetx.com +23775 + Outer TI + Gustavo Torres + gustavo&outer.com.br +23776 + RougeNetwork + Toshiyuki Fukushima + fukushima&rouge.gr.jp +23777 + Elektrarny Opatovice a.s. + Miroslav Jezek + admin&eop.cz +23778 + BEZ Systems, Inc. + Tim Vasil + tvasil&bez.com +23779 + Asklepios Group + Sebastian Wessel + s.wessel&asklepios.com +23780 + Prose Consulting Ltd. (formerly 'Freedomware UK') + Mark R. Bannister + mark&proseconsulting.co.uk +23781 + Albertslund Ungdomsboliger + J. Martin Petersen + jmp&aub.dk +23782 + ASN + Michal Wrobel + mwrobel&asn.pl +23783 + Planet Networks, Inc. + Robert Boyle + robert&planet.net +23784 + StrataLight Communications + Roberto Marcoccia + roberto&stratalight.com +23785 + CIC/CAFMICRO + PALANGA Eyouléki + vpalanga&ub.tg, pvenant&bsl.tg +23786 + DS DATA SYSTEMS GmbH + David Kibilka + dkibilka&datasystems.de +23787 + gnyrf.net + Roger Abrahamsson + roger&gnyrf.net +23788 + Matt Wright Consulting + Matt Wright + mwright&mattwright.us +23789 + GrammaTech, Inc. + Dave Capella + capella&grammatech.com +23790 + Pearce Bevill Leesburg & Moore, P.C. + Dave Jones + dave&pearcebevill.com +23791 + Adaptix + Heng PAN + hengp&adaptix.com +23792 + Axell Wireless Israel Ltd (formerly 'Dekolink Wireless Ltd') + Martin Hancock + martin.hancock&axellwireless.com +23793 + Cluster Resources + Michael Musson + musson&clusterresources.com +23794 + Significant Bits + Joel Simpson + Joel.Simpson&gmail.com +23795 + Movile (formerly 'Yavox Latin America') + Daniel La Laina + daniel.lalaina&movile.com +23796 + SunWare GmbH & Co KG + Julian Schüren + J.Schueren&sunware.de +23797 + KSIGN Co., Ltd. + Jeong-mi KIM + jenny&ksign.com +23798 + Australia Post + Wayne Mills + wayne.mills&auspost.com.au +23799 + icon Systemhaus GmbH + Natalie Fisenko + natalie.fisenko&icongmbh.de +23800 + Roca Corporación Empresarial, S.A + Enric Sola + Enric_sola&roca.net +23801 + Metrima Energi AB + Mikael Holm + mikael.holm&metrima.se +23802 + Dekabank + Markus Becker + markus.becker2&deka.de +23803 + Ecole Polytechnique + Andrey Ivanov + andrey.ivanov&polytechnique.edu +23804 + Rena Electronica B.V. + Karel Dupain + k.dupain&rena.nl +23805 + International Industrial Bank + Andrey Waulin + aw&iib.ru +23806 + Touch Clarity Ltd. + Jason Crane + jason.crane&touchclarity.com +23807 + UIB Umwelt Informatik Büro GmbH + Detlef Oertel + d.oertel&uib.de +23808 + CCM Software Services Ltd. + Rossa Mac Manamon + Rossa.MacManamon&serco.com +23809 + Bridicum Security Group A/S + Martin Hansen + mlh&bridicum.com +23810 + Rothschild Bank AG + Jan Brunschwig + jan.brunschwig&rothschildbank.com +23811 + Qbizm technologies, a.s. + Jaroslav Kortus + tech&qbizm.cz +23812 + INL SARL + Eric Leblond + inl&inl.fr +23813 + Cendant TDS + Ken Guettler + kguettler&orbitz.com +23814 + ID-Tech Servicos em Informatica Ltda. + Daniel C. Landi + dclandi&id-tech.com.br +23815 + Waterbury Republican American + John Anderson + janderson&rep-am.com +23816 + SlipStream Data Inc. + James McDonnell + jmcdonnell&slipstream.com +23817 + Ministerio de Economía, República Argentina + Aldo Rosemberg + iana&mecon.gov.ar +23818 + FirstEnergy Corp. + Ray Sefchik + rsefchik&firstenergycorp.com +23819 + Epoch Integration Inc. + Maithili Mavinkurve + maithili&epochintegration.com +23820 + Lateral Net Ltd. + Daniel Jordan Bambach + dan&lateral.net +23821 + Irish Centre for High End Computing + Niall Wilson + niall.wilson&nuigalway.ie +23822 + MITSUI HOME CO.,LTD. + Masayuki Yamaga + m-yamaga&mitsuihome.co.jp +23823 + Wise Technology Co.,Ltd. + James Chen + jschen&wisedata.com.tw +23824 + LAC Co., Ltd. + Kenichiro Kawase + kawase&lac.co.jp +23825 + EZconn Corporation + KY Wu + kywu&ezconn.com +23826 + HiSpeed Data, Inc. + Vinay Purohit + vp2&verizon.net +23827 + GHY International + Mark Balitsky + mark&ghy.com +23828 + MKI Systems + Alexander Hoyland + ahoyland&mkisystems.com +23829 + W&W Informatik GmbH + Ingo Huber + ingo.huber&ww-informatik.de +23830 + Blekinge Studentservice AB + Johan Forssell + j&bssab.se +23831 + Teneros, Inc. + Teneros Admin + tnsadmin&teneros.com +23832 + Aitec do Brasil Ltda + Angelim Pinto + angelim&linkconsulting.com.br +23833 + DentaQuest Ventures, Inc. + Joe Lawrence + JLawrence&greatdentalplans.com +23834 + Fachhochschule Salzburg GmbH + Andreas Strobl + domain-admin&fh-salzburg.ac.at +23835 + emFAST Inc + Sanjay Nagaraj + sanjayn&emfast.com +23836 + DS4 Laser Technology s.r.l. + Giampaolo Bellini + giampaolobellini&ds4.it +23837 + Jefferson County School District + David Hicks + dhicks&509j.net +23838 + BIATEL Systemy Komputerowe Sp. z oo. + Piotr Chomczyk + piotr.chomczyk&biatel.com.pl +23839 + GROX Networks + Dave Capella + iana-pen&grox.net +23840 + School District #57 (Prince George) + Kris Benson + kbenson&sd57.bc.ca +23841 + RAD-OP + Iftah Bratspiess + iftah&web-silicon.com +23842 + Kuban Pharm Stocks, Ltd. + Boris Samorodov + bsam&ipt.ru +23843 + Departamento de Ciência da Computação/Universidade Federal da Bahia + Guillaume Barreau + g.barreau&gmail.com +23844 + Constellation Brands, Inc. + Anthony Pochmara III + anthony.pochmara&cbrands.com +23845 + Schowalter, GbR + Christian Schowalter + admin&schowalter.org +23846 + Endress+Hauser InfoServe GmbH+Co. KG + Jan Heilbrunner + jan.heilbrunner&infoserve.endress.com +23847 + Elogic snc di Antonini Filippo + Antonini Filippo + info&elogic-net.com +23848 + Middle Kingdom - SCA, Inc. + Dave Majors + dafydd&midrealm.org +23849 + PACSGEAR, Inc. + Brian Cavanaugh + brian&pacsgear.com +23850 + Zyberit AB + Håkan Persson + hakan&zyberit.com +23851 + CCI Consulting Ltd. + Colin Campbell + netadm&ccicon.com +23852 + Seeing Machines Pty Ltd + Nick Cerneaz + nick.cerneaz&seeingmachines.com +23853 + eThinkSolutions,Inc. + Larry Stilwell + lstilwellðinksolutions.com +23854 + Shipbuilding SS + You Yu + you_yu324&yahoo.com.cn +23855 + Bullant Software + Chris Wooldridge + support&bullant.com.au +23856 + Imtec Imaging, LLC + Brad Vance + brad&imtec.com +23857 + NextIO, Inc + Rick Trujillo + support&nextio.com +23858 + VadaTech Inc. + Saeed Karamooz + saeed&vadatech.com +23859 + Puntoblu Srl + Francesco Cerbini + cerbini&puntoblu.it +23860 + Schubert-Unternehmensgruppe + Mario Wolff + mario.wolff&schubert-gruppe.de +23861 + redweb.cz + Petr Okurek + redweb&redweb.cz +23862 + AimValley B.V. + Gert H. Manhoudt + gmanhoudt&aimvalley.nl +23863 + Fort Hays State University + Aaron Roe + agroe&fhsu.edu +23864 + Cascadia Information Technologies, Inc. + Julian Blake Kongslie + jblake+iana&omgwallhack.org +23865 + Synapps + Francisco Laguna + fla&synapps.de +23866 + Starnix Care Inc. + Kevin Glendenning + kevin&starnix.com +23867 + Silver Peak Systems, Inc. + George Azzi + gazzi&silver-peak.com +23868 + sis-tec + mitsuhiro nagahara + nagahara&sis-tec.co.jp +23869 + HIOKI E.E.Corporation + Takashi Nakazawa + kuri&hioki.co.jp +23870 + EMSTONE Co. + Dong-jun Lee + cometodj&emstone.com +23871 + Qno Technology Inc. + Uno_Chang + uno&qno.com.tw +23872 + Rikom d.o.o. + Tine Zorko + tine.zorko&rikom.si +23873 + MKC Michels & Kleberhoff Computer GmbH + L. Kleberhoff + l.kleberhoff&mkc-gmbh.de +23874 + ACLI Associazioni Cristiane dei Lavoratori Italiani + federico scaramuzza + federico.scaramuzza&acli.it +23875 + Gymnasium Münchenstein + Michael Weiss + michael.weiss&gymmuenchenstein.ch +23876 + PIXID SNC + Marc Mouttet + m.mouttet&pixid.fr +23877 + Grid Operation Center, Aristotle University of Thessaloniki, + Christos Kanellopoulos + goc&grid.auth.gr +23878 + Stanton Grove Ltd + Martin Phillips + mphillips&stantongrove.com +23879 + SAG Systemhaus GmbH AMS + Jörg Berning + joerg.berning&softwareag.com +23880 + Motionpath Digital Media Ltd + Robert Pitt + rob&motionpath.co.uk +23881 + Avanton Inc. + Robert Berkowitz + robert&avanton.com +23882 + Investidor Profissional Gestão de Recursos Ltda + Luiz Motta + tecnologia&invprof.com.br +23883 + Fresenius Medical Care North America + William Blackwood + william.blackwood&fmc-na.com +23884 + Fotofuxx + Thomas Fuchs + iana&fotofuxx.de +23885 + Continental City Networks Ltd + Ruban Selvarajah + continentalcitynetworks&hotmail.com +23886 + Hay Group, Inc. + Samuel C. Smith + samuel_smith&haygroup.com +23887 + Kawashima Labo Inc. + Yasuhide Takahashi + hostmaster&yasu-hide.com +23888 + Tymar Systems Development + N Z Cave-Lynch + nic&tymar.com +23889 + Daco System Elaborazione Dati S.r.l. + Alberto Panu + alberto.panu&dacosystem.it +23890 + HCL Peripherals + Balamuralikrishnan.H + balamurali&hclp.com +23891 + ErgoGroup + Knut Bjørnstad + knut.bjornstad&ergo.no +23892 + Milpower Source + Iftah Bratspiess + iftah&web-silicon.com +23893 + Klir Technologies, Inc. + John McCaskey + johnm&klir.com +23894 + Northshore School District + Ski Kacoroski + ski&nsd.org +23895 + EvilGenius Networks + Matt Addison + matt&evilgeni.us +23896 + Netus Vaizovic k.d. + Adi Vaizovic + adi&netusvaizovic.com +23897 + Trigger Software + Tarmo Randel + tarmo.randel&trigger.ee +23898 + Tesseract Corporation + Terry Kummell + terry_kummell&tesseract.com +23899 + SineRay Consulting Co.,Ltd + Yene-Haw pan + morsi&sineray.com.tw +23900 + Progetti Ambientali Integrati s.a.s. + Del Rio Matteo + delrio&osnetwork.it +23901 + Digigram + Michel Quaix + quaix&digigram.com +23902 + ABB STOTZ-KONTAKT GmbH + Christian Winnewisser + spam&de.abb.com +23903 + Cargol.net Solucions Lliures S.L.L. + Pep Turro + pep&cargol.net +23904 + The Nielsen Company (formerly 'Arbitron Inc.') + Nick Mannion + nick.mannion&nielsen.com +23905 + VOCAL Technologies, Ltd. + Tim Powell + vocal&vocal.com +23906 + Axalto Inc. + Shuh Chang + schang&axalto.com +23907 + Advantage Security, S. de R.L. de C.V. + Greg Werner + gwerner&advantage-security.com +23908 + InfoEng Organization + J. Patrick Bedell + jpb&infoeng.org +23909 + Bel Ami Reisen + Roberto Mischke + service&belami-reisen.de +23910 + iPoint-media LTD. + Avi Sless + avis&ipoint-media.com +23911 + Aurus + Darius Pakusas + info&aurus.lt +23912 + Q-go + Pieter Ennes + hostmaster&q-go.com +23913 + Belle Bonfils Memorial Blood Center + Daniel J. Shafer + daniel_shafer&bonfils.org +23914 + pedro armelim unipessoal lda. + Pedro Armelim + pma&parmelim.net +23915 + SuperGeo Tech. Inc. + Jerry Huang + jerryhuang&supergeo.com.tw +23916 + Appear AS + Halvard Brennum + halvard.brennum&appear.net +23917 + Under Dusken + Erlend Hamnaberg + data&underdusken.no +23918 + JPBerlin + JPBerlin Admin + support&jpberlin.de +23919 + Corporación Tedexis C.A. + Robert Koch + rkoch&tedexis.com +23920 + Zoic Studios + Beau Gunderson + beau&zoicstudios.com +23921 + ISSP Internet-Solutions Szongott & Pertl OEG + Roman Pertl + hostmaster&issp.at +23922 + Milstein & Asoociates Inc. + Steven Milstein + steven&milstein-assoc.com +23923 + Hobnob + Drew Riconosciuto + drew&hobnob.com +23924 + readshaw.com + Neil Readshaw + neil&readshaw.com +23925 + Cacti + Ian Berry + iberry&raxnet.net +23926 + Golf-Leagues.com + Kevin Kovach + kkovach&kevinkovach.com +23927 + Ukrainian Catholic University + Oleksandr Panchuk + olex&ucu.edu.ua +23928 + MVV Energie AG + Markus Hildebrand + m.hildebrand&mvv.de +23929 + entire technologies gmbh + Michael Lippert + mlippert&entiretec.de +23930 + Inferno Enterprises + Glen Campbell + gcampbel&dante.com +23931 + Goodrich Corporation + Tim Flynn + tim.flynn&goodrich.com +23932 + TLD Solutions Ltd + Gary Wheeler + gari&techie.com +23933 + CaveraSystems + Paddy Vishnubhatt + paddyv&caverasys.com +23934 + April Nine Inc. + Luc Tourangeau + luc&aprilnine.com +23935 + Architects of VoIP GmbH + Thilo Roessler + thilo.roessler&architects-of-voip.de +23936 + china group zhejiang telecom corporation + yu chengwu + yucw&zjtelecom.com.cn +23937 + ExtricomLtd + Eran Shpak + micki&extricom.com +23938 + ANEDER BT + Ferenc Kulcsár + crusader&netbsd.hu +23939 + Avokia Inc. + Frankie Wong + frankie&avokia.com +23940 + Sysco S.p.A. + Marcello Palucci + palucci&syscospa.it +23941 + Opentaskforce + Harald Lang + opentaskforce&googlemail.com +23942 + Intelligent Mobile Solutions, Inc. + Juan Pablo Baserga + juanpablo.baserga&cyclelogic.com +23943 + Cendres+Métaux Holding SA + Hans Lerch + webmaster&cm-group.ch +23944 + DENIC eG + Jürgen Geinitz + sa-unix&denic.de +23945 + Institute of Journalism, University Dortmund + christian bossk holle + holle&ifj.fb15.uni-dortmund.de +23946 + MINISTERO DELL'INTERNO - DIREZIONE CENTRALE POLIZIA CRIMINALE + Dr. ANTONIO MAIORANO + cedsicurezzautenti&interno.it +23947 + Innovene LLC + Scott Reider + scott.reider&innovene.com +23948 + Confernet Produtos e Servicos Ltd. + Luiz Gustavo Nogara + suporte&wiz.com.br +23949 + Swyx Solutions GmbH + Uwe Sauerbrey + uwe.sauerbrey&swyx.com +23950 + Print Associates A/S + Christoph Lindemann + christoph.lindemann&printassociates.com +23951 + ExaProtect Technology + David Maciejak + dmaciejak&exaprotect.com +23952 + TJ COLLECTION Co Ltd + Druginin Vyacheslav + dv&tjcollection.ru +23953 + Australian Partnership for Advanced Computing + David Bannon + help-grid&vpac.org +23954 + TriNix AB + Jonas Björk + jonas&trinix.se +23955 + PUCCAMP - Pontificia Universidade Católica de Campinas + Carlos Roberto Schimidt + schimidt&puccampinas.edu.br +23956 + PCL Constructors Inc. + Chris Palmer + oid&pcl.com +23957 + Spacelabs Medical Data + Jeff Malvern + jeff.malvern&slmd.com +23958 + 310, JSC + Stanislav Sedov + stas&310.ru +23959 + micneu + Michael Neumann + neumann&micneu.de +23960 + Acquist Incorporated + Christopher Crooker + chris&acquist.com +23961 + CertiMail + Rémi Poulet + poulet&certimail.fr +23962 + Saisung Corporation Ltd. + Oliver Pawellek + oliver.pawellek&saisung.com +23963 + Safe Host + Steven Chong + schong&safehostnet.com +23964 + T-Mobile Austria + Peter Neubauer + peter.neubauer&t-mobile.at +23965 + Detecon International GmbH + Stephan Wild + networks.debug&detecon.com +23966 + Leiner & Wolff GbR + Rüdiger Wolff + wolff&leiner-wolff.de +23967 + Tallence AG + Ronald Ebel + pen-admin&tallence.com +23968 + Netvoyager PLC + Jamil Aboulzelof + jamil&netvoyager.co.uk +23969 + arvato mobile + Lukas Loesche + lukas.loesche&arvato-mobile.de +23970 + Hamburg Südamerikanische Dampfschifffahrts-Gesellschaft KG + Stefan Werner + stefan.werner&ham.hamburgsud.com +23971 + Menzebach und Wolff IT-Consulting GbR + Andreas Wolff + aw-iana-enterprise&mw-itcon.de +23972 + Fidelity MLS Systems and Solutions + Scott Bishop + Scott.Bishop&fnf.com +23973 + Still Tripping Productions. + Shaun L. Anderson + glorisign&live.com +23974 + General Dynamics Robotic Systems + Brian Byrne + bbyrne&gdrs.com +23975 + et al Innovations Ltd + Dr Stuart Marsden + stuart&myphones.com +23976 + Zebanon Inc. + Daifu Li + cisc&263.net +23977 + My Computer Person + R. Dennis McHenry + iana.enterprise.registration&mycomputerperson.biz +23978 + Sadel S.p.a. + Daniele Guerra + snmp&sadel.it +23979 + Knoways s.r.l. + Daniele Guerra + daniele.guerra&knoways.com +23980 + STS GmbH & Co. OHG + Jochen Mueller + jochen-mueller&siemens.com +23981 + MWS + Antonio galdeano + antonio.galdeano&mws.fr +23982 + tty1.net + Thomas Pircher + tehpeh&tty1.net +23983 + Hampshire Constabulary + Simon House + simon.house&hampshire.pnn.police.uk +23984 + RyCo Electronic Systems Limited + Kevin N Cordery + kevin.cordery&rycoes.com +23985 + S.O.Ge.S.A. s.a.s. di Ugo Grassi & C. + Stefano Marocco + s.marocco&sogesa.it +23986 + ENENSYS Technologies SAS + Patrick Auffray + patrick.auffray&enensys.com +23987 + Autoridad Portuaria de Barcelona + Francesc Bonada + francesc_bonada&apb.es +23988 + Elink GmbH + Uwe Hoffmann + uhoffmann&elink.de +23989 + CableFree Networks + Stephen Patrick + info&cablefree.net +23990 + Shifa International Hospitals + Ishtiaq Ahmed + ishtiaqahmeds&yahoo.co.uk +23991 + Planeetta Internet Oy + Lauri Pitkanen + hostmaster&planeetta.net +23992 + Social Science Computing Cooperative + Dan Bongert + dbongert&ssc.wisc.edu +23993 + Linear Technology Corporation + Omar Sanchez-Felipe + osanchez&linear.com +23994 + OpenDarwin Project + Andrea Barisani + lcars&opendarwin.org +23995 + Eightball Productions + Jason A. Gullickson + jag&govner.com +23996 + Fuego Inc. + Eduardo Chiocconi + eduardoc&fuego.com +23997 + Florida Department of State + James A Crozier + canotify&dos.state.fl.us +23998 + RUF Telematik AG + Thomas Kern + iana-contact&ruf.ch +23999 + FTI Consulting, Inc + Mike Niland + mike.niland&fticonsulting.com +24000 + ABN AMRO Bank Services + Sion Camilleri + sion.camilleri&uk.abnamro.com +24001 + Fiege Logistik (Schweiz) AG + Mr Luigi Haefliger + lhaefliger&fiege.ch +24002 + Ingenieria Diez Cisneros, S.A. + Juan Antonio Roman + jaroman&diezcisneros.com +24003 + Cellcom + Moti Morhayim + motimo&cellcom.co.il +24004 + Onus Petro Services + Ramchander Rao + ram_allola2&yahoo.co.in +24005 + Alexa Internet, Inc. + Dan Moniz + dan&alexa.com +24006 + Weidlinger Associates Inc. + Fabrice Guerini + fabrice&ca.wai.com +24007 + Globat.com + Romain Komorn + rkomorn&globat.com +24008 + Great Bay Software + Jason Damron + jdamron&greatbaysoftware.com +24009 + NextHop Technologies, Inc. + Michael Cook + cook&nexthop.com +24010 + ELESIGN + Shim, Kyu Nam + gyunami&elesign.com +24011 + Newtech.Co.,Ltd. + Yukio Takano + takano&newtech.co.jp +24012 + eASPNet Taiwan Inc. + Emay Liu + emayliu&easpnet.com +24013 + RTX Telecom A/S + Peter B. Mariager + pm&rtx.dk +24014 + Landesversicherungsanstalt Rheinprovinz + B. Scholze + bernd.scholze&lva-rheinprovinz.de +24015 + CODAFIX SYSTEM + IT department + support&codasystem.com +24016 + Bionix AB + Väino Vaher + vv&bionix.se +24017 + teegee + Thomas Groß + thomas.gross&teegee.de +24018 + Frederikshavn Gymnasium og HF-kursus + Orla Møller + orla.moeller&frhavn-gym.dk +24019 + CertiPath, Inc + Jeff Nigriny + support&certipath.com +24020 + Metagistics + Gene Sohn + genesohn&yahoo.com +24021 + 13 Spiders Ltd + Michael Salway + oid-admin&spidergroup.co.uk +24022 + Optelian Access Networks Corp. + Adriana Diaconu + adriana.diaconu&optelian.com +24023 + IEA Software, Inc. + Dale E. Reed Jr. + support&iea-software.com +24024 + Tecsidel, S.A. + Isidoro Legido Martínez + isidoro.legido&tecsidel.es +24025 + MVP Software, Inc. + Chris Snyder + csnyder&mvpsoft.com +24026 + Cube Route + KL Wong + kl.wong&cuberoute.com +24027 + Dexa Medica Group + Wimala Widjaja + wimala.widjaja&dexa-medica.com +24028 + Bayerwald Fenster Haustueren GmbH & Co. KG + Martin Stockinger + oidadmin&bayerwald-fenster.de +24029 + HEYFRA electronic GmbH + Christian Bornschein + c.bornschein&heyfra.de +24030 + CenterTools Software GmbH + Udo Riedel + udo.riedel¢ertools.de +24031 + Nomus Comm-Systems + V. V. Prasad + vvprasad&nomus.net.in +24032 + dmstools AG + Markus Woessner + oid&dmstools.de +24033 + Peerex Ltd + Timothy Hinchcliffe + iana-oid&tim.hinchcliffe.peerex.net +24034 + The Royal Botanic Gardens, Kew + Ken Bailey + iana&kew.org +24035 + AFIP + Sebastian Guarino + sguarin&afip.gov.ar +24036 + howard.org + Bruce Howard + bruce&howard.org +24037 + Revolt Ltd. + Craig Taylor + ldap&revoltltd.org +24038 + VistaPrint USA Inc. + Jim Sokoloff + jsokoloff&vistaprint.com +24039 + Lutz Badenheuer + Lutz Badenheuer + lutz.badenheuer&t-online.de +24040 + BARREL Investment Company Ltd. + Maksim Ilichev + support&barrel.ru +24041 + M7 Software BV + Tom Schoute + tschoute&m7software.com +24042 + AMS 1769 Ltd + Brian Candler + b.candler&pobox.com +24043 + Tanel Unt + Tanel Unt + wo&bill.ee +24044 + Cthulhu Inc. + Reka Karolyi + mithos&cthul.hu +24045 + Dwerryhouse IT Consulting + Paul Dwerryhouse + paul&dwerryhouse.com.au +24046 + TMSw Corp. + Kiminori Hirose + khirose&tms-w.com +24047 + Netfabric Corporation + Joe Welfeld + jwelfeld&netfabric.net +24048 + United Devices + Ivo Janssen + ivo&ud.com +24049 + eApps + Hosting Services + support&eapps.com +24050 + Orion Systems International + Paul Kendall + paul.kendall&orionhealth.com +24051 + Seneca Software & Solar, Inc. + Rob Savoye + rob&senecass.com +24052 + Wedo Consulting + Pedro Duque + pedro.duque&wedoconsulting.com +24053 + Xspedius Communications + Jason Trahan + jason.trahan&xspedius.com +24054 + NextNine Ltd. + Alex Toker + atoker&nextnine.com +24055 + Govolution, Inc. + Gary Morris + gmorris&govolution.com +24056 + JDI ICT + I. Palsenberg + i.palsenberg&jdi-ict.nl +24057 + Robert D. Allen + Robert Allen + zircote&io2services.com +24058 + AltCall Pty Ltd + Nick Hoffman + nick.hoffman&altcall.com +24059 + Zuzax LLC + C Wisniewski + domains&zuzax.com +24060 + icube + Jong-Seong Park + jspark&icube.co.kr +24061 + Amtium Computer Network Communiction Tech. CO,LTD£» + Jisheng Lv + ljs&amtium.com +24062 + Beijer Electronics Corp. + Steven Yang + steven.yang&beijerelectronics.com +24063 + NetComm Wireless Ltd (formerly 'Call Direct Cellular Solutions Pty. Ltd') + Mike Cornelius + mike.cornelius&netcommwireless.com +24064 + Florian Hagedorn IT-Services + Florian Hagedorn + mail&florian-hagedorn.de +24065 + TelcoSI + John Wood + info&telcosi.com +24066 + Beijing WaterTek Information Technology Co.,Ltd. + Wang Xiaowei + xiaowei&watertek.com +24067 + Webraska do Brasil + Luciano Kiniti Issoe + miagi&webraska.com.br +24068 + Tier-3 Pty Ltd. + Geoff Sweeney + geoffs&tier-3.com +24069 + IP Systems Pty Ltd + Eugene Ryan + netadmin&ipsystems.com.au +24070 + AMANO Corporation + Yoshiharu Takahashi + Yoshiharu_Takahashi&amano.co.jp +24071 + Zenitel Radioteknik + Pär Åhlund + par.ahlund&zenitel.biz +24072 + Code Mercenaries Hard- und Software GmbH + Christian Lucht + lucht&codemercs.com +24073 + Abeo AS + Jan Erik Edvardsen + domains&abeo.no +24074 + ASI Controls + Paul Chapman + paul&asicontrols.com +24075 + ELIA N.V. / S.A. + Danny Alexandre + danny.alexandre&elia.be +24076 + National Institute for Public Health and the Environment + Paul van Westerlaak + Paul.van.Westerlaak&rivm.nl +24077 + Zeus Informationstechnologie GmbH + Fabian Becker + f.becker&zeus.de +24078 + ISVA-Project Team (EADS-ISR, FHG-IITB, FGAN-FOM, FGAN-FKIE) + Dr. Ralf-Peter Eule + ralf-peter.eule&eads.com +24079 + Cacanska banka a.d. Cacak + Dejan Ristic + dejan&cacanskabanka.co.yu +24080 + key-stone BV + drs H.T. Koster + koster-h&key-stone.nl +24081 + Paxfire Inc + Mike Sullivan + msullivan&paxfire.com +24082 + GTL Limited + Nitin Thakur + nitint>llimited.com +24083 + Securitect + Sian Liu + sianliu&securitect.com +24084 + Managed Network Solutions, Inc. + Xavier Chapa + xavier&managednetworks.com +24085 + Fitre S.p.A. + Alessandro Peverelli + alessandro.peverelli&fitre.it +24086 + Medizinische Hochschule Hannover + Ralf Weiss + weiss.ralf&mh-hannover.de +24087 + International Grid Trust Federation + David Groep + oid-manager&eugridpma.org +24088 + Diamelle inc + Suneet Shah + suneet&diamelle.com +24089 + Lexbox + Stephane Guillon + stephane.guillon&lexbox.fr +24090 + Central Library, Aristotle University of Thessaloniki + Triantafellos Hatziantoniou + clib&physics.auth.gr +24091 + SEW-EURODRIVE GmbH & Co KG + Michael Kaufmann + michael.kaufmann&sew-eurodrive.de +24092 + I-Way Computers bvba + Miguel Jamous + miguel.jamous&iway.be +24093 + JSC Format Link + Alexey Pervushin + ramses&fmshop.ru +24094 + UniBwM + Kai Freytag + kai.freytag&unibw.de +24095 + MioSoft Corporation + MioSoft Internet Number Contact + support&miosoft.com +24096 + University of Douala + Elie Martial MABO + mtadjo&yahoo.fr +24097 + Thoralf Linss + Thoralf Linss + thoralf.linss&tlits.de +24098 + Southwest Youth Collaborative + Benjamin J Doherty + bjd&swyc.org +24099 + Hosanna Christian Academy + Matthew Eastman + oid-admin&hcablazers.org +24100 + LogiSoft AR Ltd, Inc. + Alan Revzin + alanr&logisoftar.com +24101 + Banyan Tree Hotels & Resorts Pte Ltd + Adrian Teo + adrian.teo&banyantree.com +24102 + Aeris Enterprises Inc. + Chris Sargent + csargent&aerisinc.com +24103 + DIR Wieslawa Rozynek + Bartosz Supczinski + Bartosz.Supczinski&dir.pl +24104 + Lennar Corporation + Juan Gomez-Sanchez + Juan.Gomez-Sanchez&Lennar.com +24105 + di-Cross + Liu Yong + ly7110&gmail.com +24106 + Damovo do Brasil S/A + Manoel Messias de Souza + manoel.messias&damovo.com +24107 + xkoto Inc. + Jeff Heisz + jeff.heisz&xkoto.com +24108 + UAB "Eltera" + Danas Jazdauskas + danas&eltera.lt +24109 + Luix + Luis Lain + luislain&luislain.com +24110 + Vigilix. LLC + Randy Lee + randy.lee&vigilix.com +24111 + Optivus Technology, Inc. + Michael Baumann + baumann&optivus.com +24112 + Shorter College + Grant Carmichael + gcarmichael&shorter.edu +24113 + Netzbiotop Dresden e.V. + Frank Benkstein + frank&benkstein.net +24114 + City of Tulsa + Darren Fritz + oidregistration&ci.tulsa.ok.us +24115 + Ginix + Gert Illemann + gerti&inform.dk +24116 + 68k.se + Martin Nilsson + martin&68k.se +24117 + Idera + Shekhar Vaidya + shekhar.vaidya&idera.com +24118 + artemis-design GbR + Alexander Stintzing + astintzing&artemis-design.de +24119 + eSimone + Ettore Simone + ettore.simone&esimone.net +24120 + JSDAAV INTERNET SOLUTIONS CC + BENNIE JOUBERT + bennie.joubert&jsdaav.com +24121 + Centro Nacional de Infromación y Comunicación Educativa + Juan Pérez + juan.perez&cnice.mec.es +24122 + Brodersen Controls A/S + Ole Borgbjerg + ob&brodersencontrols.com +24123 + Sparkasse Haslach-Zell + Hans-Juergen Neumaier + it&sparkasse-haslach-zell.de +24124 + Aqon Ltd. + Frank Rabitsch + frank&aqon.com +24125 + OceanLake Asia Pte Ltd. + Narendranath Reddy + nreddy&oceanlake.com +24126 + Junxion, Inc. + Peter Polson + ppolson&junxion.com +24127 + UUHosting.Net + Donald Wilson + webmaster&uuhosting.net +24128 + Casino Arizona + Steve Tolle + stevet&casinoaz.com +24129 + International Road Dynamics Inc. + Ian Meier + ian.meier&irdinc.com +24130 + Oneformatics + P. Ennes + iana.org&spam.ennes.net +24131 + thePlatform for Media, Inc + Mark Schliemann + mark.schliemann&theplatform.com +24132 + zyp.org + Christopher P. Cashell + topher-ldap&zyp.org +24133 + Calassa Labs Ltd + Dan Lumenko + dan&calassa.com +24134 + Torex Retail Solutions GmbH + Andreas Koschinsky + andreas.koschinsky&torexretail.de +24135 + Smartcom-Bulgaria AD + Simeon Ivanov + simeon_ivanov&smartcom.bg +24136 + Schur Pack Danmark a/s + Brian Egtved + beg&schur.com +24137 + Optimum Computing Solutions + Joseph A. Otto, Jr. + joe&theottos.org +24138 + JSC TaxNet + Paul Ivanov + paul&taxnet.ru +24139 + RFK International + René Frank Kristiansen + rene-fk&rfkint.dk +24140 + VoiceObjects AG + Stefan Besling + SBesling &voiceobjects.com +24141 + T-2, d.o.o. + Matevz Turk + matevz.turk&t-2.net +24142 + Bunya Technology Pty Ltd + Brian Scott + support&bunyatech.com.au +24143 + MallNet Ltd. + Yiyi Nai + mallnet&hotmail.com +24144 + Mobillion BV + Bjorn Hijmans + beheer&mobillion.nl +24145 + ECS GmbH + Johannes Denck + pen_oid&ecs-gmbh.de +24146 + Zone Systems + Martin Zeuthen + martin.zeuthen&zonesysdev.dk +24147 + Ex Libris Ltd. + Uri Livnat + exlibris.admins&exlibris.co.il +24148 + DATALAB DO BRASIL LTDA + Noely Moraes + noely&datalabrasil.com.br +24149 + Milestone Systems A/S + John Blem + jb&milestone.dk +24150 + TIM BRASIL + ANA PAULA COSTA + apcosta&timbrasil.com.br +24151 + Inforsis Informatica y Formacion, SL + Ricardo Alos + inforsis&inforsis.com +24152 + OTA Management LLC + Matthew Huff + mhuff&ox.com +24153 + Defense Finance and Accounting Service + Richard Fabian + richard.fabian&dfas.mil +24154 + Moog Components Group + Shad Northrop + snorthrop&moog.com +24155 + Mission Hopitals + Anne Desotelle + anne.desotelle&msj.org +24156 + Intelliant + Franz Viaud-Murat + f.viaudmurat&intelliant.fr +24157 + Spectrum Signal Processing, Inc. + Dan Weekley + dan_weekley&spectrum-usa.com +24158 + Landstar System Holdings, Inc. + Rob Wojczyk + rwojczyk&landstar.com +24159 + Neureol Technologies Private Limited + Rajan Kasiviswanathan + rajan&neureol.com +24160 + Cool IP + Kelly Prescott + prescott&deltav.org +24161 + MLL Telecom Limited + S Haynes + s.haynes&mlltelecom.co.uk +24162 + CareWorks vof + Niels Klomp + n.klomp&careworks.nl +24163 + SUPERMEDIA Internet Provider + Piotr Meyer + pmeyer&supermedia.pl +24164 + KBOSS.hu Kft. + Verhás István + vi&kboss.hu +24165 + Digi-Sign Limited + Przemek Michalski + przemek.michalski&digi-sign.com +24166 + Centro de Estudos e Sistemas Avançados do Recife + Carlos Sampaio + carlos.sampaio&cesar.org.br +24167 + WEYCO, INC + Joseph Dickson + jdd&weyco.com +24168 + GMAC + Kimberly Stratton + kim.stratton&gm.com +24169 + Sunsetbrew Inc. + Thomas Davis + sunsetbrew&sunsetbrew.com +24170 + Motricity, Inc. + Thomas Davis + thomas.davis&motricity.com +24171 + 8e6 Technologies + Todd Jackson + tjackson&8e6.com +24172 + Tenaska Power Services, Co + Michael Bohan + itmail&tnsk.com +24173 + Rho, Inc. + Bruce Walter + bwalter&rhoworld.com +24174 + Institutional Shareholder Services, Inc. + Sean B. Moore + sean.moore&issproxy.com +24175 + SILVER SERVER GmbH + Team SysAdmin, Raimund Sacherer + nimda&sil.at +24176 + Comgate Engineering Ltd. + Tony Capel + capel&comgate.com +24177 + Tivit S.A. + Gustavo Torres + gustavo.torres&tivit.com.br +24178 + Stillwater Medical Center Authority + Mark Reed + mreed&stillwater-medical.org +24179 + Ministerie van Binnenlandse Zaken en Koninkrijksrelaties + Mr. P. IJtsma + peter.ijtsma&isc.politie.nl +24180 + Tgusta.com C.A + Francisco Salcedo + presidencia&tgusta.com +24181 + Fri-Jado B.V. + Ramon Mangelaars + ramon.mangelaars&fri-jado.nl +24182 + SRH + Beckert, Dirk + oid&srh.sachsen.de +24183 + Vimio PLC + Gothe Lindahl + gothe.lindahl&vimio.com +24184 + MEDNET Service für Ärzte AG + Uwe Eissing + uwe.eissing&mednet.de +24185 + Kvalix Automatika Kft. + Peter Forro + forro&kvalix.hu +24186 + Netsol Network Solutions Oy + Timo Virtaneva + timo.virtaneva&netsol.fi +24187 + LambdaStream + Miguel Barreiro + mbpaz&lambdastream.com +24188 + Mail Object + Pascal Voyat + pvoyat&mailobject.com +24189 + Cain Computing + Daniel J Cain Jr + daniel&CainComputing.com +24190 + Distributed Systems Services, Inc. + Scott Kantner + skantner&dsscorp.com +24191 + Digital Fugue + Geert Jansen + geert&digitalfugue.com +24192 + trial-n-error + Mario Scholz + mario.scholz&web.de +24193 + Strandbygaard + Martin Strandbygaard + martin&strandbygaard.net +24194 + Mary Kay Inc. + Eric Brock + eric.brock&mkcorp.com +24195 + i5k.net + Iskandar Najmuddin + iana&i5k.net +24196 + OSEB NV + Marc Van Laer + mvl&oseb.be +24197 + Colliers Turley Martin Tucker + Mark Masson + mmasson&ctmt.com +24198 + Netcordia, Inc. + Paul Walters + support&netcordia.com +24199 + Primagraphics Ltd + Ian Hamilton + idh&primagraphics.co.uk +24200 + Quality Software Solutions Ltd. + Adnan Selimovic + support&qss.ba +24201 + Adrenio GmbH + Marcel Tuor + m.tuor&adrenio.com +24202 + MIKRONIKA + Krzysztof Marianski + kmarianski&mikronika.com.pl +24203 + Wellington Technology, Inc. + James Paul Duncan + pduncan&wellington-tech.com +24204 + Randolph Products Company + Craig A. Lampani + admin&randolphproducts.com +24205 + JUPITER TECHNOLOGY(WUXI)CO.,LTD + Kevin Huang + Huang_Kevin&Mtiw.com.cn +24206 + EGROUP Services Ltd. + Andras NAGY + andras.nagy&egroup.hu +24207 + Deimos-Space S.L. + Javier Arranz + javier&deimos-space.com +24208 + Cystelcom Sistemas S.A. + David Ramirez + dramirez&cystelcom.com +24209 + MWare CZ, s.r.o. + Pavel Machula + info&mware.cz +24210 + ComWorth Co.,Ltd. + Genki Miyamura + miyamura&comworth.co.jp +24211 + Solent Technology Ltd. + Cliff Gauntlett + cliff&solenttec.net +24212 + Conexim Australia Pty Ltd + Jonathan Thorpe + jthorpe&conexim.com.au +24213 + Oxford Health Plans, Llc. (subsidiary of UnitedHealth Group) + Mathias Rysse + mrysse&uhc.com +24214 + Mount Holyoke College Computer Science Department + Adam Goodman + agoodman&mtholyoke.edu +24215 + Reed Technology and Information Services, Inc. + Stephen Sarachman + ssarachman&reedtech.com +24216 + uptime software inc. + uptime Development Manager + info&uptimesoftware.com +24217 + j-buzz, LLC + Aakash Chopra + oid.admin&j-buzz.com +24218 + Metaweb Technologies, Inc. + David Swift + dswift&metaweb.com +24219 + nuBridges, LLC + Darin Engle + netops&nubridges.com +24220 + Marcelo Rinesi + Marcelo Rinesi + mrinesi&fibertel.com.ar +24221 + Donorware LLC + Bruce Milne + BruceMilne&DonorWare.com +24222 + Resolute Networks Ltd. + David Brief + davidb&resolutenetworks.com +24223 + Fabryka Stron Internetowych Sp. z o.o. + Konrad Lapin + tech&fsi.pl +24224 + Southern Regional Education Board + Joseph Daigle + joe.daigle&sreb.org +24225 + Formula Telecom Solutions + Liran Ravid + liranr&fts-soft.com +24226 + Braxcomm Eletrônica Ltda. + Bernardo Barreto + braxcomm&uol.com.br +24227 + Negeso Ukraine Ltd + Sergey Sholokh + sergey.sholokh&negeso.com +24228 + HOT Technology + David Veronese + dave&hot.co.nz +24229 + arvato technology GmbH + M. Redeker + marcus.redeker&bertelsmann.de +24230 + Verint Systems Canada Inc. + Alexis B. Deschamps + alexis.deschamps&verint.com +24231 + GUFI - Gruppo Utenti FreeBSD Italia + Riccardo Torrini + vic&gufi.org +24232 + 6th Sense Software, Inc. + Todd Olson + todd&6thsensesoftware.com +24233 + Ikatu + Pablo Hoffman + nospam&pablohoffman.com +24234 + VuCOMP + Brant Lewis + brant.lewis&vucomp.com +24235 + Hannum Computer Service + Tom Hannum + tom&hcscomputing.com +24236 + NStudioCorp + Stan Firstov + stan&firstov.com +24237 + ResourceChain Inc. + Jason George + jason.george&resourcechain.com +24238 + PACLABS Technology and Innovation, Inc. + Max (Skip) Arnold + mparnold&paclabstech.com +24239 + V-shine Co., Ltd + Weiwei Zheng + zhenww02&mails.tsinghua.edu.cn +24240 + TongSoft Tech. + Li Shouming + lsming&tongsoft.com.cn +24241 + Accucast, Inc. + Scott Cudney + scudney&socketware.com +24242 + SimWorks International Limited + Greg Amer + gamer&simworks.biz +24243 + Amrita Institute of Medical Sciences + Nikhil Sharma + nikhilsharma&aimshospital.org +24244 + IntroMobile CO., Ltd + Rho Hyoung Seok + darkeye75&intromobile.com +24245 + OSS Corporaton + Vladimir Baranov + oss2000&oss.ru +24246 + MGP "Mosvodokanal" + Alexey Cherkasov + lexa&mosvodokanal.ru +24247 + Bettge + Lutz Bettge + Lutz&Bettge.com +24248 + Stockway Oy + Jouni Stam + jouni.stam&stockway.fi +24249 + Clinique Saint-Luc (Bouge) + Gerd Michalke + gmichalk&tiscali.be +24250 + MATRICE SYSTEM S.A.S. + Stéphane PILLON + pillon.s&matricesystem.fr +24251 + Banca24-7 S.p.A. + Enzo Paoloni + enzo.paoloni&banca247.it +24252 + ESG GmbH + Alexander Gruener + agruener&esg.de +24253 + Systems Trust Co., Ltd. + Naoki Kobori + nkobori&stcj.co.jp +24254 + C4 Solutions Oy + Teemu Mäkelä + teemu&c4solutions.fi +24255 + Outpost24 AB + Jesper Birch + birch&outpost24.com +24256 + Deutsche WertpapierService Bank AG + Ralf Hoops + ralf.hoops&dwpbank.de +24257 + Idtect + Emmanuel FAUX + efaux&idtect.com +24258 + Ministry of the Flemish Community + Wai-Kong Yu + waikong.yu&vlaanderen.be +24259 + Achilles Information Limited + David Bannister + david.bannister&achilles.com +24260 + VIELAGE + regis martin + regis.martin&ssm-pasdecalais.canssm.fr +24261 + Mobeon AB + Ronny Jonsson + ronny.jonsson&mobeon.com +24262 + TEAM S.A + Roberto Murga + rmurga&ingeteam.es +24263 + up to data professional services GmbH + Norbert Bittner + norbert.bittner&uptodata.de +24264 + Neusta GmbH + Boris Blaha + bblahaysker&neusta.de +24265 + The British Museum + Damian Matuszczyk + dmatuszczyk&thebritishmuseum.ac.uk +24266 + Infoscope Kft. + Dohányos András Balázs + balazs.dohanyos&infoscope.hu +24267 + Directory Enabled Solutions + Sean Taylor + sean&desgroup.net +24268 + Opendium Ltd + Harry Mills + h.mills&opendium.com +24269 + CR2 LTD + Cristian Costache + cristian.costache&cr2.com +24270 + S3 Srl Servizi Soluzioni Sistemi + Esposito Aldo + aldo.esposito&scubo.it +24271 + Nessus Internet Services Florian Schicker + Christian Hofstaedtler + ch&nessus.at +24272 + Università della Svizzera italiana + Mario Gay + directory&ti-edu.ch +24273 + nihilistic.org.uk + Steven Thurgood + oid&nihilistic.org.uk +24274 + Beyond Security Inc. + Noam Rathaus + support&beyondsecurity.com +24275 + IUT Verdun + FURIET + furiet&univ-nancy2.fr +24276 + Universidad de Oriente + Nelson Vicuna + nvicuna&udo.edu.ve +24277 + Tiaxa do Brasil Ltda. + Cecilia de Paula e Maia + cmaia&tiaxa.com.br +24278 + Università Campus Bio-Medico + Marco Venditti + m.venditti&unicampus.it +24279 + LDAP Administration Console (opensource project) + Jamie Beverly + jbeverly1&tampabay.rr.com +24280 + HdL Coren & Cone + Tom Hannum + tomh&hdlccpropertytax.com +24281 + Family Health International + Douglas Wilkins + dwilkins&fhi360.org +24282 + StackFrame, LLC + Gene McCulley + mcculley&stackframe.com +24283 + dns Ltd + Stuart Fraser + stuart.fraser&dns.co.uk +24284 + wesde + Karl-Heinz Welter + karl-heinz.welter&wesde.com +24285 + Novax Industries Corporation + Lei Cao + lei_c&novax.com +24286 + HumanConcepts + Piotr Smol + piotr.smol&humanconcepts.com +24287 + Telex Communication Inc. + Shawn Anderson + shawn.anderson&us.telex.com +24288 + BHMS Webhosting + Bart-Jan Vrielink + bjv&bhms-groep.nl +24289 + HotFeet GmbH + Juraj Skripsky + js&hotfeet.ch +24290 + Datorföreningen vid LU & LTH + Tomas Gradin + system&df.lth.se +24291 + E-Planit Systems, Inc. + Tom Thomas + tthomas&e-planit.com +24292 + BRAKKE SCHAFNITZ INS BROKERS INC + KYLE JEROME THOMPSON + kthompson&brakkeschafnitz.com +24293 + Toyota New Zealand Ltd + Alan Way + afway&toyota.co.nz +24294 + Air Broadband Communications + Veera Hatte + veera&airbb.com +24295 + Pickwick Consulting + Stephen Primost + sprimost&cox.net +24296 + DENSO WAVE INCORPORATED + Tatsuya Yamamoto + tatsuya.yamamoto&denso-wave.co.jp +24297 + Sonic Solutions + Robert Petersen + rpetersen&roxio.com +24298 + Freudenberg Dichtungs- & Schwingungstechnik GmbH & CoKG + IT Service FCC + itservice.fcc&freudenberg-ds.com +24299 + go4teams GmbH + Daniel Tietze + daniel.tietze&go4teams.com +24300 + Wotif + Gavin Graham + technical&wotif.com +24301 + stj company + xia zhan gao + xzg302&yahoo.com.cn +24302 + Network constructor, Core Constructor , Kun Shan University + Feng-Pin Lo + abuse&mail.ksu.edu.tw +24303 + Pegacat Pty Ltd + Dr Christopher Betts + chris&pegacat.com +24304 + ArgusConnect Pty LTD + Andrew Shrosbree + andrew.s&argusconnect.com.au +24305 + INBOS Pty LTD + Gerry Marthe + gmarthe&inbos.com.au +24306 + 3 Vias Technologies + Darryl Hoch + darryl.hoch&3-vias.com +24307 + Activesec S.A. + Rodrigo Seguel + rseguel&activesec.biz +24308 + Media & Telecom Ventures + Erskine R. Curry + telecomventures&aol.com +24309 + Terrascale Technologies Inc. + Jean-Francois Brousseau + jfb&terrascale.net +24310 + Medical Information Technology, Inc. + Phil Polimeno + ppolimeno&meditech.com +24311 + SunHELP + Bill Bradford + mrbill&mrbill.net +24312 + Identity Automation, LP + Troy Moreland + troy.moreland&identityautomation.com +24313 + TiendaLinux.com + Nestor A. Diaz + nestor&tiendalinux.com +24314 + What Creek + John Birrell + jb&what-creek.com +24315 + Fatih University + emrullah kaya + ekaya&fatih.edu.tr +24316 + IT bridge.net s.r.o. + Petr Burdik + pet&itbridge.net +24317 + torrini.org + Riccardo Torrini + riccardo&torrini.org +24318 + InTouch B.V. + Adrianus Warmenhoven + adrianus&intouch.nl +24319 + VOCONS IT Consulting GmbH + Juergen Vollmar + j.vollmar&vocons.de +24320 + ELVAC + Jan Kordas + jan.kordas&elvac.eu +24321 + NetMagic Ltd. + Zalan Arpad + azalan&netmagic.hu +24322 + Petastor, Inc. + George Chiu + george_chiu&petastor.com.tw +24323 + Deephaven Ltd + Graham Powell + graham&deephaven.co.uk +24324 + Electric-Tronics Incorporated + John Garms + jgarms&e-tronics.com +24325 + OGL Computer Services Group Ltd + Adrian James + Adrian.James&ogl.co.uk +24326 + Exsys + Brice Soler + brice.soler&exsys.fr +24327 + Estacha Proyectos y Servicios + Miguel A. Arévalo + marevalo&estacha.com +24328 + Spirit AeroSystems, Inc. + Kenneth B. Frazier + kenneth.b.frazier&spiritaero.com +24329 + Lixil + Joel Johnson + mrjoel&lixil.net +24330 + In Reach Technology + Robert G. Werner + robert&inreachtech.net +24331 + Evolix + Gregory Colpart + reg&evolix.fr +24332 + Hardware Online AS + Raymond Julin + raymond&hardware.no +24333 + FH Ingolstadt + Juergen Metzger + metzger&fh-ingolstadt.de +24334 + EADS DS UK Ltd + Darren Learmonth + darren.learmonth&eads.com +24335 + Storewiz Inc. + Tzahi Shahak + Tzahis&Storewiz.com +24336 + ATM Express, Inc. + Shawn A. Wilson + shawnw&atmreports.com +24337 + Monster Cable Products Inc. + Joe Keegan + jkeegan&monstercable.com +24338 + Situs Management Inc. + Shinji KOMEDA + enterprise-number&situs.co.jp +24339 + ADLINK TECHNOLOGY INC. + Ryan Hsu + ryan.hsu&adlinktech.com +24340 + Macalester College + Ted Fines + fines&macalester.edu +24341 + Pontis Inc. + Alex Sokolov + alex.sokolov&pontis.com +24342 + FIO Labs, LLC + Robert Allen + zircote&io2services.com +24343 + Rokonet Electronics LTD. + Eli Brin + eli.brin&rokonet.co.il +24344 + eSPECTRUM Internet-Solution GmbH + Andreas Markuse + mib&espectrum-gmbh.de +24345 + Xsite GmbH + Dirk Proepper + proepper&xsite.de +24346 + Adaway Consulting + Simon Adaway + simon&adaway.org.uk +24347 + Magicpot Inc. + SEKI Tetsuo + seki&magicpot.co.jp +24348 + GPTech GmbH + Frank Stein + info&gptech.de +24349 + Frank Agerholm + Frank Agerholm + franka&serverwg.de +24350 + Sensoria Corporation + Ron Dippold + rdippold&sensoria.com +24351 + Linux Network Care Inc + Sutharsan Kathirgamu + xerophyte&linuxnetworkcare.com +24352 + Alpha Centauri Software Limited + Lynden Sherriff + sidvault&alphacentauri.co.nz +24353 + Media Technology Systems Inc. + Steve Woolley + steve.woolley&mediatechnologysystems.com +24354 + secscend LLC + Chris Scheper + chris.scheper&secscend.com +24355 + Lemon I.T. Co., Ltd. + James Chen + james.chen&lemon.cx +24356 + Solinus, Inc. + Mark Maurer + mark&solinus.com +24357 + CD-Telematika a. s. + Jirí Netolický + netolicky&epos.cd.cz +24358 + Nomics + Technik + tech&nomics.de +24359 + Deutsche Rentenversicherung Mitteldeutschland + Stefan Cyriax + Stefan.Cyriax&drv-rzl.de +24360 + Transmitton Ltd + Alan Cuff + alan.cuff&transmitton.co.uk +24361 + InnovaSON + Vincent RECIPON + v.recipon&innovason.com +24362 + Duquesne University + Jeff Fine + fine&duq.edu +24363 + Super Star enterprises + Patrick North + superstarent&comcast.net +24364 + ON Semiconductor + Mike Andl + mike.andl&onsemi.com +24365 + Constellation Energy + Richard Snader + NOC&constellation.com +24366 + gui.org + Shawn Amundson + sta-xuqrdtmm&gui.org +24367 + Somerdata Ltd + Edwin Kayes + edwin.kayes&somerdata.com +24368 + Paat Software + Patalenszki Attila + pa0026&stud.unideb.hu +24369 + Medem, Inc. + Jessica Formoe + jessi.formoe&medem.com +24370 + Chang Cheng Information consultant Co., LTD + Frank Hsieh (Hsieh Chang Ming) + frank&cms.com.tw +24371 + London Business School + Richard Thompson + rthompson&london.edu +24372 + Exiis Corporation + Certificate Administrator + dave&exiis.net +24373 + information unlimited + John Martin Ungar + john.martin.ungar&dokom.net +24374 + Life Sciences Collaborative Access Team + Keith Brister + kb0508&bb5.us +24375 + Aleksandar Milivojevic + Aleksandar Milivojevic + alex&milivojevic.org +24376 + Landeshauptstadt Muenchen + Christian Lutz + itm.ibs53&muenchen.de +24377 + Nassauische Sparkasse + Joachim Uhr + joachim.uhr&naspa.de +24378 + Kizoom Ltd + Robin Stephenson + sysadmin&kizoom.com +24379 + Tracewell Power + Christoph Heilmann + cheilmann&tracewell.com +24380 + certegy + John Balog + John.Balog&certegy.com +24381 + Joachim Uhr + Joachim Uhr + joachim.uhr&uhri.de +24382 + CH2M HILL, Ltd. + Brett Ammerman + brett.ammerman&ch2m.com +24383 + Orange Cable Corporation + Andrew Leonard + andy&orangecable.com +24384 + Caerus, Inc. + Shawn Lewis + noc&caerus.net +24385 + CryptGuard + Brian Pederson + security&cryptguard.com +24386 + NSSLGlobal Technologies AS (formerly 'STM Networks, Inc') + Hans Peter Lexow + hans.peter.lexow&sat.link +24387 + Software-Engineering Fabiani + Martin Fabiani + iana&fabiani.net +24388 + Washington State Bar Association + Jeff Wilkinson + jeffw&wsba.org +24389 + Suzhou Keda Technology Company, Ltd. + Li Bo + libo&kedacom.com +24390 + IRI Communications, Inc. + Kazunori ANDO + aams-nic&iri-com.co.jp +24391 + DoremiLabs, inc. + Herve MORILLON + hmorillon&doremilabs.fr +24392 + Flexagonal Systems Ltd + Matthew Shorten + matthew.shorten&flexagonal.co.uk +24393 + Office National des Forets + Maryse BIGOT + maryse.bigot&onf.fr +24394 + Concurrent Technologies Plc. + James Elliott + jelliott&cct.co.uk +24395 + Datamat S.p.a. + Vincenzo Lombardi + vincenzo.lombardi&datamat.it +24396 + Xinjiang Sailing Information Technology Co., Ltd + kechunli + kecl666&sit.com.cn +24397 + Libertas Solucoes em T.I. LTDA + Orgito Renato Luiz Araujo + orgito&gmail.com +24398 + Espirito Santo Centrais Eletricas S.A + Renato Araujo + renato.luiz.araujo&gmail.com +24399 + Telsource Corporation + Guy King + gking&telsource.com +24400 + TNS Business Solutions S.A. + Damian Martinelli + damian.martinelli&stracienta.com +24401 + Kanton Basel-Stadt + Rainer Voegtlin + Rainer.Voegtlin&bs.ch +24402 + Omgeo LLC + Rich Lutz + richard.lutz&omgeo.com +24403 + Ministerio da Cultura + Sergio Ferreira + ldap&minc.gov.br +24404 + DroidOS Project + Geoff White + info&maildroid.org +24405 + TC Software, Inc + Ahmet Taylor + ahmet&objecttcs.com +24406 + Dynamic Biometrics Ltd. + Neil Lowden + admin&dynamicbiometrics.com +24407 + OnAir USA, Inc. + Brian Clayton + brian.clayton&onair.aero +24408 + Video Technics Inc. + Scott Saturday + ssaturday&videotechnics.com +24409 + Rising System Inc. + SAITO Ryota + ryota&risingsystem.co.jp +24410 + WHEEL Sp. z o.o. + Pawel Jakub Dawidek + staff&wheel.pl +24411 + Multi Service Forum + Avri Doria + avri&acm.org +24412 + Bundesamt für Verbraucherschutz und Lebensmittelsicherheit + Jan Seidel + jan.seidel&bvl.bund.de +24413 + University of Cambridge, Department of Archaeology + David Redhouse + dir21&arch.cam.ac.uk +24414 + CORE CORPORATION + Network Solution Division + nsadmin&core.co.jp +24415 + Unimatrix-One + Jernej Kos + kostko&unimatrix-one.org +24416 + NC Interactive + Joel Comeaux + jcomeaux&ncinteractive.com +24417 + Infoweapons Corporation + Lawrence Hughes + lhughes&infoweapons.com +24418 + Infinico Corporation + Jet Takeyama + takeyama&infinico.co.jp +24419 + Red Crow Group LLC + Jeff Rizzo + oid&redcrowgroup.com +24420 + European Airlift Centre + Christian Hahn + chahn&euroairlift.org +24421 + Roweb Development + Gabriel Bogdan Rosu + gbrosu&roweb.ro +24422 + Information Management Services + Shaun Thompson + sthompso¬es.state.ne.us +24423 + Xtek Computer + Mr. Rene Hoffmann + technik&xtek.de +24424 + IDSRDL + Thomas B. Ivers + showflash&netscape.net +24425 + Monksoft + Taylor Wilson + taylor&monksoft.net +24426 + Metavize, Inc + Bill Scott + bscott&metavize.com +24427 + Coyote Point Systems, Inc + Bill Kish + kish&coyotepoint.com +24428 + Nusoft System Inc. + Rayearth Cheng + support&nusoft.com.tw +24429 + Mangold Elektronik + Joerg A. Mangold + mibcontact&mangold.net +24430 + Red Earth Systems AB + Martin Wilderoth + admin&redearth.se +24431 + ACCEL Instruments GmbH + Martin Koller + koller&accel.de +24432 + UIIP NASB + Andrei Kruhliakou + labnet&newman.bas-net.by +24433 + Yunet International D.O.O. + Milos Prodanovic + milosp&yu.net +24434 + Ohio Bureau of Workers' Compensation + Chad Miller + Chad.Miller&BWC.state.OH.US +24435 + Ubitech Systems Inc + Nila Bronkhorst + nbronkhorst&ubitech.com +24436 + Power Distribution, Inc. + Steve Richardson + srichard&pdicorp.com +24437 + Deightime, LLC + Dan Deighton + dan-iana&deightime.com +24438 + OAO RoEduNet + Octavian RUSU + octavian&iasi.roedu.net +24439 + JBG/Development Group, L.L.C. + Michael Rogers + mlr&jbg.com +24440 + Xsigo Systems + Arun Singh + aruns&xsigo.com +24441 + oddbit.com + Lars Kellogg-Stedman + ldap-oid-contact&oddbit.com +24442 + smartx GmbH + Marco Berger + info&smartx.ch +24443 + Colby Sas + Giacomo Olgeni + g.olgeni&colby.it +24444 + UniBridge AS + Pål Kristiansen + pal.kristiansen&unibridge.no +24445 + Carsten Heinrici + Carsten Heinrici + carsten&heinrici.net +24446 + TILIAR Services + Andreas Linder + linder&tiliar.com +24447 + St Basil's Homes + David Cartwright + dcartwright&stbasils.org.au +24448 + WEB.DE GmbH + Dirk Fuchs + hostmaster&webde.de +24449 + Ketsujin Studios + Denis Tumpic + dtumpic&ketsujin.com +24450 + AlphaGeek, Inc. + Keisuke Nishida + knishida&alphageek.jp +24451 + Transitional Data Services, Inc. + Craig Macfarlane + noc&transitionaldata.com +24452 + AXI + Peter Neefs + pnee&axi.be +24453 + Bank Hapoalim ltd. + Mr. Barak Fishman + barak.fishman&mailpoalim.co.il +24454 + Schenker-Joyau + Fabrice RODAK + fabrice.rodak&schenker-joyau.fr +24455 + Isotoma Limited + Doug Winter + doug&isotoma.com +24456 + CODA Plc + Chris Gill + chris.gill&coda.com +24457 + BWMS Soluções Móveis Ltda + Altino Pavan + altino&bewireless.com.br +24458 + Wise Telecomunicações Ltda. + Luiz Roberto Fontes Pacces + ti&wisetelecom.com.br +24459 + Saskatchewan Wheat Pool Inc. + Ross Bowman + ross.bowman&swp.com +24460 + Fortean Technologies, Inc. + Bruce Walter + walter&fortean.com +24461 + Advanced Solutions International, Inc. + Rob Wenger + rwenger&advsol.com +24462 + E-Sauce Limited + Lee Hetherington + ops&e-sauce.com +24463 + Banco de Costa Rica + Jorge Castro Zeledon + jmcastro&bancobcr.com +24464 + First Citizens Bank + Douglas Brandsen + doug.brandsen&firstcitizens.com +24465 + BELNET + Support + support&belnet.be +24466 + move.L consulting + Martin Adolfsson + snmp&movel.se +24467 + walkgame + MIS + acheng&walkgame.com +24468 + LK PRODUKT CZ a.s. + Ing. Milan Lehecka + lkprodukt&lkprodukt.cz +24469 + University of Leeds + Qin Li + q.li&leeds.ac.uk +24470 + Boston Maine Airways Corp. + Kevin Blaisdell + mis&flypanam.com +24471 + Omniphasic Institute LLC + Isaac Fischer + isaac.fischer&omniphasic.com +24472 + University of Virginia's College at Wise + Christopher Boggs + chb5s&uvawise.edu +24473 + NetMedia, Inc. + Alex Karahalios + Alex&NetMedia.com +24474 + Jacobacci + postmaster + postmaster&jacobacci.com +24475 + Optena Corporation + Surendra Reddy + skreddy&optena.com +24476 + Server-side Systems Ltd + Lee Goddard + oid&leegoddard.net +24477 + eenterphace + Julian Reich + jr&eenterphace.com +24478 + megatrade corp. + Yasuyuki Imaizumi + imaizumi&megatrade.co.jp +24479 + E.ON Business Services Czech Republic s.r.o. + Josef Ječmen + josef.jecmen&eon.com +24480 + oncampusuk + matthew venn + matt&oncampusuk.co.uk +24481 + Warsaw Data Center Sp. z o.o. + Adrian Blazej + info&wdc.pl +24482 + Light Backup Services + Sheryl Erez + info&lightbackups.com +24483 + ISO New England Inc. + Jason Qualkenbush + jqualkenbush&iso-ne.com +24484 + Advanced Broadcast Components Ltd. + Ekkehard Domning + snmp&adcocom-broadcast.com +24485 + webmasters akademie GmbH + Marc Remolt + m.remolt&webmasters.de +24486 + Micromedia International + Michel LLONCH + michel.llonchµmedia-int.com +24487 + Bulletproof Solutions Inc + Jeff Shaw + jeff.shaw&bulletproofsi.com +24488 + United Solutions International + Ochirkhuyag Lkhagva + ochirkhuyag&usi.mn +24489 + brut + Homa Brut + homabrut&gmail.com +24490 + The Phone House Telecom GmbH + Andreas Schweer + andreas.schweer&phonehouse.de +24491 + Asociación Centro de Cooperación Interbancaria + Rafael Marín + cci&asociacioncci.es +24492 + Consorzio Torino Time + Ezio Ajmar + ajmar&sia-av.it +24493 + Safend Ltd. + Adam Carmi + adam.carmi&safend.com +24494 + Extra Prof S.R.L. + Andrew Clark + aclark02&extraprof.com +24495 + comundus Unternehmensberatung GmbH + Meinrad Schwamborn + m.schwamborn&comundus.com +24496 + Western Telematic Inc. + Ken Partridge + kenp&wti.com +24497 + KMD.COM.TW + Sandy Chen + sandy&kmd.com.tw +24498 + Interprise (Pty) Ltd + Philip Correia + oid&singularity.co.za +24499 + European Aviation Safety Agency + Lee Goddard + lee.goddard&easa.eu.int +24500 + MorkoM GmbH + Guenter Morper + gmorper&morkom.org +24501 + Green Hat Consulting Ltd. + John Chewter + support&greenhatconsulting.com +24502 + Wandering Barque + Peter Lacey + placey&wanderingbarque.com +24503 + ICONICS, Inc. + Petr Balda + petr&iconics.com +24504 + CENTRÁL Mosodák Szolgáltató Részvénytársaság + Mihály Kepler + kepi&cmrt.hu +24505 + Emerging Systems + Glenn Elliott + glenn&emerging.com.au +24506 + Osaki Electric co.,ltd. + Makoto Takahashi + ma.takahashi&osaki.co.jp +24507 + Peadar Grant + Peadar Grant + peadar.grant&ucdconnect.ie +24508 + StagniNET + Stagni Paolo + paolo.stagni&gmail.com +24509 + Commonwealth of Massachusetts + Jeff Flannery + jeff.flannery&state.ma.us +24510 + SHANGHAI SIMED TECH LTD. + Shennan Jiang + simed&163.com +24511 + FlowInspect SpA + Marco Grillo + mgrillo&flowinspect.com +24512 + Beijing Hugeland Technologies co.,Ltd. + Qiu Jinzhu + qiujinzhu&hugeland.com +24513 + Finnish Game and Fisheries Research Institute + Matti Kaisla + matti.kaisla&rktl.fi +24514 + Rivulet Communications, Inc. + Ken Chapman + kchapman&rivulet.com +24515 + ROC Software Systems, Inc. + Mike Broadway + mike.broadway&rocsoftware.com +24516 + Conair Corporation + Joshua Cloud + joshua_cloud&conair.com +24517 + LTIA + Marcelo Fornazin + fornazin&fc.unesp.br +24518 + Great-West Life & Annuity Insurance Company + James Schultz + james.schultz&gwl.com +24519 + Trustwave Holdings, Inc. (formerly 'XRamp Security Services Inc.') + OIDAdmin + OIDAdmin&trustwave.com +24520 + Tokash Consulting Inc. + Keith Tokash + ktokash&hotmail.com +24521 + SupplyFX, Inc + David Trott + ldap&supplyfx.com +24522 + PT. Excelcomindo Pratama, Tbk. + Azhari Naman + azhari&xl.co.id +24523 + infeurope S.A. + Joerg Dorchain + sysadmin&infeurope.lu +24524 + RheinEnergie AG + Achim Kader + a.kader&rheinenergie.com +24525 + Twisted Pair Solutions, Inc. + Joshua Cloud + jcloud&t-pair.com +24526 + St James the Great R.C Primary & Nursery School + Simon Kelsall + simonk&stjamesthegreat.org +24527 + commax Co.,Ltd. + HYUNSEOK HAN + hshan&commax.co.kr +24528 + ProgramDuon AB + Stefan Johansson + stefan&programduon.se +24529 + Systembetreuung.com + Jens Höhne + jens.hoehne&systembetreuung.com +24530 + Universidade Federal do Amazonas + Leandro Nascimento dos Santos + tihgre&hotmail.com +24531 + Metis Communication Co., Ltd + Min Jung Lee + min&metiscomm.co.kr +24532 + Intelliguard I.T. + Richard Jones + richard.jones&intelliguardit.net +24533 + Akvi-Patent Co. + Papp Horvath Nandor + papp.horvath.nandor&akvipatent.hu +24534 + Gordano Ltd + Griff James + gj&gordano.com +24535 + MD Systems + Miro Dietiker + info&md-systems.ch +24536 + DAS + Hector Corredores + hector_corredores&digitel.com.ve +24537 + NNMI + Chung-Yeol Lee + morning&nnmi.kaist.ac.kr +24538 + Morning + Lee, Chung-Yeol + morning&chollian.net +24539 + IST Holdings (Pty) Ltd. + Paresh Joshi + paresh&ist.co.za +24540 + Betty TV Entwicklungs GmbH + Michael Friedel + m.friedel&betty-tv.com +24541 + Beijing GrandRole Software Co.,Ltd. + Cao Jing + caoj&grandrole.com +24542 + Albert-Einstein-Gymnasium + Jan-David Salchow + jdsalchow&web.de +24543 + Black Eyed Moon + Kent Lee + ikenticus&gmail.com +24544 + JSC "Eltel" + Yura Kardukov + rodriges&eltel.net +24545 + Ursus Technologies, Inc. + Brian Henling + brian.henling&ursus-tech.com +24546 + ServerCave, Inc. + Chris Rogers + chris&servercave.com +24547 + Linux Users Group Singapore + Michael Clark + committee&lugs.org.sg +24548 + OutSys snc + Guidotti Fabrizio + fguidotti&outsys.it +24549 + Triple-m Systems AG + Werner Bieri + sales&triple-m.ch +24550 + edoras GmbH & Co. KG + Janosch Langer + spam&edoras.de +24551 + CPU d.o.o. + Vladimir Kraljevic + vladimir.kraljevic&cpu.co.yu +24552 + GÉANT + Nicole Harris + contact&refeds.org +24553 + Prompttec Product Management GmbH + Peter Hoedl + p.hoedl&prompttec.com +24554 + Church of the Nazarene + Michael D. Fetting + it&nazarene.org +24555 + RACKMASTER SYSTEMS INC. + FAISAL AHMED + FAHMED&RACKMASTER.COM +24556 + Starwave Technologies,Inc. + Robin Jiang + PEN&starwavetech.com.cn +24557 + Obcanske sdruzeni HKfree + Jiri Syrovy + jrk&hkfree.org +24558 + Arcade ICT BV + Hans Alstein + hans.alstein&arcadenetwerken.nl +24559 + Net Tecnologies + Andrew Golubev + golubev&nt.net.ua +24560 + inode GmbH + Alexander List + alexander.list&inode.info +24561 + Celletra Ltd. + Baruch Altman + baltman&celletra.com +24562 + Bridge Technologies Co AS + Nils Jørgen Zapffe + nils.zapffe&bridgetech.tv +24563 + Kerfi AB Avdelning Norge + Hagbart Eugen Aandal-Frøystadvåg + eugen.aandal-froystadvag&kerfi.com +24564 + HOLON Corp. + Keiichi Itano + itano&mail.holondata.co.jp +24565 + Yuba Community College District + Christian Ward + cward&yccd.edu +24566 + koreanair + Jin-Yeol Roh + jyroh&hist.co.kr +24567 + YIT Corporation + Unto Eskelinen + unto.eskelinen&yit.fi +24568 + DawnSpill Ltd. + Roland Szabó + rszabo&dawnspill.hu +24569 + directnews AG + Andre Kreubel + kreubel&directnews.de +24570 + OpenWired + David Moron + david.moron&openwired.net +24571 + uniwan.be + Jean-Marc ANDRE + jean-marc&uniwan.be +24572 + Totaltel Telecommunication Ltd. + Ferenc Toth + totaltel&totaltel.hu +24573 + Massachusetts Board of Library Commissioners + Paul Kissman + paul.kissman&state.ma.us +24574 + Opsera Ltd. + Adrian Bridgett + root&opsera.com +24575 + Tranzeo Wireless Technologies Inc. + Damian Wallace + feedback&tranzeo.com +24576 + HL7 Lithuania + Vytenis Punys + hl7&mmlab.ktu.lt +24577 + Tosharecomu LLC + Yuki Hamanaka + yuki.hamanaka&10share.com +24578 + IDSignet + Chen Shaopeng + chen_shaopeng&idsignet.com +24579 + Australian Style Pty Ltd + Bruce Nicholls + bruce&bottledomains.net.au +24580 + TietoEnator Broadcasting IT Oy + Esa Hillilä + esa.hillila&tietoenator.com +24581 + Zemp Informatik + Zemp Dominik + zemp.dominik&zemp-informatik.ch +24582 + credativ GmbH + Martin Zobel-Helas + martin.zobel-helas&credativ.de +24583 + Guerrier + Olivier Guerrier + olivier+iana&guerrier.com +24584 + Nagoya University + Kenji Mase + oid-admin&itc.nagoya-u.ac.jp +24585 + Cassiano Morgado de Aquino - ME + Cassiano Morgado de Aquino + cassiano.aquino&aqua.com.br +24586 + BA Systems + Eric Green + egreen&ba-sys.com +24587 + Judson College + Philip G. Guth + pguth&judsoncollege.edu +24588 + Automatic IT Corporation + Automatic IT OID Administrator + ianaoid&AutomaticIT.com +24589 + Redoc Inc. + Tim Coder + tim&redoc.com +24590 + Bristol Technology Inc. + Ken Blackwell + ken_blackwell&bristol.com +24591 + Marchex, Inc. + Adam Jacob + adam&marchex.com +24592 + Brazos County, Texas + Brazos County Information Technology + informationtechnology&co.brazos.tx.us +24593 + Think Zone S.C. + Jose Manuel Payan del Rio + support&think-zone.com +24594 + MYOB Australia E1 Pty Ltd (formerly 'Ilisys Web Hosting Pty Ltd') + Peter Hallam + peter.hallam&myob.com +24595 + safari video networks llc. + ben thielsen + bthielsen&1safari.com +24596 + Ascent Technology, Inc. + Judy Cui + JudyCui&ascent.com +24597 + NRW.BANK + Gerhard Klein + gerhard.klein&nrwbank.de +24598 + NC Cable Pty Ltd t/as Neighbourhood Cable + Wade Roberts + iana&ncmail.com.au +24599 + Downey Savings and Loan Association, F.A. + Roy Parker + RoyParker&DowneySavings.com +24600 + EDI Branch,Shanghai Telecom Corporation Ltd. + Huang Yanping + hyp&public.shedi.net.cn +24601 + Premicare AB + Tobias Johnsson + tobias&premicare.se +24602 + Siemens Com EN PSY DB + Roman Hector Abril + roman.abril&siemens.com +24603 + Peavey Electronics Corporation + Joe Peavey + joe&peavey.com +24604 + CosmosKey + Johan Akerstrom + johan.akerstrom&myrealbox.com +24605 + Owl Computing Technologies, Inc. + Jim Hope + jhope&owlcti.com +24606 + The People's Republic of Ames + Thomas L. Kula + hostmaster&tproa.net +24607 + Drystone + John Hedges + john&drystone.co.uk +24608 + Centre hospitalier de Lagny Marne la vallée + Christophe Delpierre + christophe.delpierre&ch-lagny77.fr +24609 + R. S. Software (India) Ltd. + Koushik Nath + knath&rssoftware.co.in +24610 + gemeentebestuur sint-gillis-waas + timothy de meersman + timothy.demeersman&sint-gillis-waas.be +24611 + Baptist Memorial Health Care Corporation + Brandy Reid + brandy.reid&bmhcc.org +24612 + Fujitsu Hong Kong Ltd. + Samuel Kwok + samuelk&hk.fujitsu.com +24613 + Kingston University + Simon Willis + willis&kingston.ac.uk +24614 + Jakota Design Group GmbH + Felix Richter + richter&jakota.de +24615 + b.comp GmbH + Thorsten Knabe + info&b-comp.de +24616 + CROSSPOiNT + Jesus Ruiz + jruiz&crosspoint.es +24617 + NMS Software, Inc. + Bizhan Ghavami + bghavami&netmonsvc.com +24618 + BOXALINO AG + Ammann Michael + michael.ammann&boxalino.com +24619 + MOSDAN Technology Co.,Ltd + Sunny Lee + sunny&mail.mosdan.com.tw +24620 + Tecnosis, S.A. + Javier Pacios + iana&tecnosis.net +24621 + Tazz Networks Inc. + Chad Dunn + cdunn&tazznetworks.com +24622 + netCo GmbH Hamburg + S. Anderson + sanderson&ntcg.de +24623 + Samsung Thales Co., Ltd. + Chang Kyoon Kim + changkyoon.kim&samsung.com +24624 + EuroCableLabs + Massimiliano Pala + m.pala&cablelabs.com +24625 + Luxcore Optronics, Inc. + Gayle Link + glink&luxcore.net +24626 + Lime Brokerage LLC + Chad Cook + ccook&limegroup.com +24627 + Vitas + Duane Ellis + duane.ellis&vitas.com +24628 + CXR Larus Corporation + Tai-Fan Li + tli&laruscorp.com +24629 + Telio AS + Bjørn Nordbø + bjorn.nordbo&telio.no +24630 + Skinkers ltd + Rob Noble + sysadmin&skinkers.com +24631 + mediaproxy pty ltd + Michael Coop + software&mediaproxy.com +24632 + CorEdge Networks, Inc. + Arvid Sorenson + arvid.sorenson&coredgenetworks.com +24633 + Toyo Radio Systems Co.,Ltd. + Hiroshi Igarashi + penpenpen&toyoradio.co.jp +24634 + Ericpol Telecom sp. z o.o. + Waldemar LENDA + Waldemar.Lenda&ericpol.pl +24635 + ALPS Electric Europa GmbH + Dominik Brosch + dominik.brosch&alps-europe.com +24636 + XPass Technologies Co., Ltd + Yongqi Li + liyongqi&sslvpn.com.cn +24637 + NETWAVE CO., LTD. + Munho Kwak + mhk&net-wave.co.kr +24638 + JotSpot Inc. + Brian Thomas + brian&jot.com +24639 + Elecom scsi + Daniele Scarcella + daniele.scarcella&elecomsc.it +24640 + Nova Technologies + David Calloway + calloway&novatechnologies.com +24641 + Allodial Solutions, Inc. + Del Miller + del&allodialsolutions.com +24642 + University of Sarajevo - Faculty of Electrical Engineering + Ernedin Zajko + ezajko&etf.unsa.ba +24643 + Cedval Info inc. + Francois Meehan + fmeehan&cedvalinfo.com +24644 + Applied Broadband, Inc. + Jason Schnitzer + jason&appliedbroadband.com +24645 + e-DMZ Security, LLC. + Jeff Zupan + jeff.zupan&e-dmzsecurity.com +24646 + Fonner + Kevin Fonner + kevin&fonner.net +24647 + "NTC-Atlas" + Alexey Nadenenko + awn&atlas.by +24648 + SL EDV-Dienstleistungen + Stefan Löhberg + post&loehberg.de +24649 + MainSkill Technologies GmbH&Co.KG + T.Dargers + iana_assignments&mainskill.com +24650 + Control Systems srl + Di Salvatore Vincenzo + v.disalvatore&cs2.it +24651 + RongJi Network Security Technology Co., Ltd + Daixiang Zhu + zhudaixiang&rongji.com +24652 + Everbee Networks + Defrance Sébastien + sdefrance&everbee.com +24653 + Studec + Patrick Boyer + patrick.boyer&studec.fr +24654 + NPO TrustGear + George Gritsai + gritsai&hotbox.ru +24655 + Applied Broadband, Inc. + Jason Schnitzer + jason&appliedbroadband.com +24656 + Pre-Emptive Limited + Ralph Bolton + ralph.bolton&pre-emptive.net +24657 + TCSYS Limited + Tim Cairnes + tpc&tcsys.co.uk +24658 + intivo + Thomas Tague + IANA&intivo.com +24659 + Caringo, Inc. + Jonathan Ring + jonathan.ring&caringo.com +24660 + Rapid Mobile (Pty) Ltd + Jan van der Watt + janvdw&rapidm.com +24661 + WM-data Infra Solutions AB + Mikael Feldtman + mifal&wmdata.com +24662 + Digital Instruments S.r.l. + Marco Genova + info&digital-instruments.it +24663 + Norventure Ltda. + Trond Karlsen + trond.karlsen&norventure.com +24664 + Delaware Technical and Community College + Robert W. Rahe + bob&hobbes.dtcc.edu +24665 + BytePark + Rogier Krieger + iana&bytepark.net +24666 + Camelcom + Istvan Czegledy + info&camelcom.hu +24667 + Atlanta Technical Consultants, LLC + Donovan Young + dyoung522&gmail.com +24668 + Quay Security Ltd. + Michael Hoegen + michael.hoegen&quaysecurity.net +24669 + Beijing Mobile Interactive Co. LTD. + Mr. Li Yongfeng + liyongfeng&m-in.cn +24670 + Gemini Observatory + Jerry Brower + jbrower&gemini.edu +24671 + Vienna University + Peter Schober + peter.schober&univie.ac.at +24672 + Watkins MFG Inc. + Paul M Vincent + paul.vincent&WatkinsMFG.com +24673 + ServerEngines LLC + Kimball Brown + kimball&serverengines.com +24674 + Linzer Technikum - LITEC + Herbert Jachs + h.jachs&eduhi.at +24675 + Nordh Sistemas de Controle Ltda + Lourival José Passos Moreira + lourivalmoreira&gmail.com +24676 + Applied Voice & Speech Technologies, Inc. + Doug Murray + dmurray&avst.com +24677 + NexGen Communications, LLC. + Trung Nguyen + trung.nguyen&nexgencom.net +24678 + Oklahoma City University + Dennis Rigdon + netserv&okcu.edu +24679 + China Sports Lottery Printing(Beijing) Co. Ltd. + Zhang Yaoxin + hilsonchang&gmail.com +24680 + Bradmark Technologies, Inc. + Edward Stangler + netadmin&bradmark.com +24681 + QNAP SYSTEMS, INC + Nike Chen + nikechen&qnap.com.tw +24682 + SAT + Sang-Young Kim + sykim&satech.co.kr +24683 + Hutchison Drei Austria GmbH + Martin Aigner + martin.aigner&drei.com +24684 + Helsinki Business Polytechnic + Ivonen Jukka + hostmaster&helia.fi +24685 + Omada A/S + Morten Mygind + mn&omada.dk +24686 + PROFIBUS Nutzerorganisation e.V. + Support Centre for PROFINET and PROFIBUS + pi&profibus.com +24687 + Integrated Broadband Services, LLC + Donovan Young + dyoung&ibbsonline.com +24688 + C. & H. Euser Transport B.V. + Allaert Euser + allaert&euser.nl +24689 + WAR Ink + Mike Komer + iana&war-ink.com +24690 + Japan Communication Equipment Co.,Ltd. + Hitoshi Hiraoka + hiraoka&nitsuki.com +24691 + Beijing GoodMedia Co. LTD. + Mr. Xiao Feng + xiaofeng&m-in.cn +24692 + Ask Jeeves (Hangzhou) Limited + Wuyun Kang + wkang&askjeeves.com +24693 + Firebrick Ltd. + Adrian Kennard + iana&firebrick.ltd.uk +24694 + Technische Universiteit Eindhoven + Dean C. Strik + C.Strik&tue.nl +24695 + Prodemge - Companhia de Tecnologia da Informação do Estado de Minas Gerais + Eduardo de Paula Neves + eduardo.paula&prodemge.gov.br +24696 + Falkin Systems LLC + Rob Marano + rob&falkin.com +24697 + Canal de Isabel II + Olga Morales + omorales&cyii.es +24698 + WH Trading, LLC + Colin Kubota + whtis&yahoo.com +24699 + AGB Nielsen Media Research + Gabriele De Benedittis + gabriele.debenedittis&agbnielsen.net +24700 + Prolon Control Systems + Thomas Maltesen + tm&prolon.dk +24701 + Spitzenverbände der landwirtschaftlichen Sozialversicherung + Sven Meyer + sven.meyer&bv.lsv.de +24702 + CLS Services Ltd + IT Security + *itsecmon&cls-services.com +24703 + Sven Meyer IT-Services + Sven Meyer + s.meyer&sm-it-s.de +24704 + Landesoberbehoerde IT Baden-Wuerttemberg (BITBW) + Dr. Matthias Feuerhake + Abteilung3.Gst&bitbw.bwl.de +24705 + Clal Insurance Ltd + Baruch Oved + bar1&clal-ins.co.il +24706 + RW Consulting + Rich Williamson + w7ki&northwestradio.net +24707 + Computation Systemhaus GmbH + Bernd Ruecker + info&computation.de +24708 + University of PISA + Davide Vaghetti + davide&unipi.it +24709 + North York General Hospital + Gerry Dimnik + gdimnik&nygh.on.ca +24710 + OPSOFT s.r.o. + Stanislav Opach + support&opsoft.cz +24711 + MLK - ST informatique + Sebastien STEINER + sebastien.steiner&mlk-st.com +24712 + Waterstone Environmental Hydrology and Engineering, Inc. + Cedar Myers + ianaoid&waterstoneinc.com +24713 + Grammofonartistenes forening + Terje Klausen + terje&gramart.no +24714 + Intesis Software S.L. + Josep Ceron + jceron&intesis.com +24715 + Beijing Langhua Ltd. + Shi Jinghai + shijh&langhua.cn +24716 + Start Stelcom Ltd Corp + YuCai.SU + kingsen_su&yahoo.com.cn +24717 + Berg-BSP + Guenther Berg + guenther_berg&web.de +24718 + Pierre Bruyère + Pierre Bruyère + agora&bruyere.at +24719 + Cyberview Technology Ltd + Andrew Peek + andrew.peek&cybview.com +24720 + Thales Training & Simulation Ltd + Alan Ball + alan.ball&thalesgroup.com +24721 + Svisj + Fredrik Melsom Klausen + fredrik&svisj.no +24722 + Kef Solutions Inc. + Kevin McCall + admin&kef.ca +24723 + SOS GmbH + Andreas Püschel + andreas.pueschel&sos-berlin.com +24724 + Aluria Software + Tony Carter + tcarter&aluriacorporate.com +24725 + Australian Technology Information Pty Ltd + Tim Nicholls + tnicholls&austechinfo.com.au +24726 + USHER + John C.W. Krienke + jcwk&internet2.edu +24727 + CleanOffer, Inc. + John Hampton + john&cleanoffer.com +24728 + pythea + W.J.T.Then + h.then&pythea.nl +24729 + AutoGas Systems, Inc. + David Ashby + David_Ashby&autogas.com +24730 + Audible Magic Corp. + Jim Schrempp + j_schrempp&audiblemagic.com +24731 + Beijing Kexin Technology Co. Ltd + Wu Fengliang + flwu&thinkit.cn +24732 + Universität Bayreuth - RZ + Heinrich Ney + Heinrich.Ney&uni-bayreuth.de +24733 + Summit Computer Systems, Inc. + Bob Davis + bob&scsi.com +24734 + cintech + Damien Laurey + damien.laurey&cintech.fr +24735 + Gunnebo AB + Björn Nilson + bjorn.nilson&gunnebo.com +24736 + Narcommunications + Mariano Ribichich + narcommunications&yahoo.com.ar +24737 + ION R-D Elektronika Bt. + Attila Aradi + aradi.attila&ion-rd.hu +24738 + Cablecom GmbH + Christian Laudan + iana-oid&cablecom.ch +24739 + Slaski Urzad Wojewodzki + Cisowski Jakub + oid.admin&katowice.uw.gov.pl +24740 + ARFWorks + Christian Reynolds + cyberdoc&astro.gmtc.net +24741 + MDT Inc. + Philip W. Dalrymple III + pwd&mdtsoft.com +24742 + Danish Ministry of the Environment + Carsten Hougs Lind + chl&cfk.dk +24743 + Memorex Telex Japan Ltd. + Shinichi Yoshizawa + syoshizawa&memorex.co.jp +24744 + a2 system + miguel arpa + miguelarpa&yahoo.es +24745 + TVWorks + Przemek Struminski + snmp&tvworks.com +24746 + Oberto Sausage Company + Heath Saunders + heath.saunders&oberto.com +24747 + Capella Education Company + Jeremy D. Pavleck + jeremy.pavleck&capella.edu +24748 + TPSA LTD. + Piotr Kura + kundus&gazeta.pl +24749 + Scrutable Systems, Inc. + Nathan Winant + nw&scrutable.com +24750 + Unassigned + Removed 2006-10-23 + ---none--- +24751 + Cluster-Works GbR + Martin Roesler + m.roesler&cluster-works.de +24752 + SyncIO oHG + Denis Boehme + wb&dbse.de +24753 + SSC Publications Inc. + Mitch Frazier + sysadmin&ssc.com +24754 + SIRCA Ltd + Andrew Thomson + ajt&sirca.org.au +24755 + Satellite Music Australia Pty Limited + John Rich + john&sma.net.au +24756 + PARTAD + Sergey Sorokin + sergey&partad.ru +24757 + WiMAX Forum + Ron W. Smith + ron.w.smith&intel.com +24758 + Brightwire Media + Chris Nesbitt-Smith + chris.nesbitt-smith&brightwiremedia.co.uk +24759 + Panacya Inc + Ashwath Akirekadu + ashwath.akirekadu&panacya.com +24760 + ILS Technology LLC + MIHAI VOICU + mvoicu&ilstechnology.com +24761 + Euro/IP B.V. + Marcel v. Birgelen + mbi&euro-ip.net +24762 + Smart and Final Stores Corporation + Anna Papp + anna.papp&smartandfinal.com +24763 + rrothos.com + Alexander Tabakoff + tabakoff&web.de +24764 + IRIS Corp Bhd + Chew Kok Hooi + khchew&iris.com.my +24765 + Aylus Networks, Inc. + Yuhua Lu + yuhua&aylus.com +24766 + Spiceberry + Christian Jendeberg + jendeberg&hotmail.com +24767 + NOX SYSTEMS AG + Sven Sauter + sven.sauter&sauter.li +24768 + Pott & Körner + Mike Aretz + maretz&pottbre.com +24769 + Acronis, Inc. + Dennis Dyatlov + Dennis.Dyatlov&acronis.com +24770 + IZALL.COM Ltd. + Tarmo Kaljumae + tarmo&izall.com +24771 + Intercooperation + Claude Fankhauser + cfankhauser&intercooperation.ch +24772 + SolidX AB + Peter Eriksson + registry&solidx.se +24773 + SG GENIKI BANK S.A. + KONSTANTINOS KATSOULIS + kostas.katsoulis&geniki.gr +24774 + World Health Organization + Biswamber Gurubacharyab + hostmaster&who.int +24775 + Aviareto Limited + Head of Operations + registryofficials&aviareto.aero +24776 + Brasil Telecom S/A + Andre Gruszynski + agski&brasiltelecom.com.br +24777 + Ingenious Corporation Ltd. + Alan Helmore-Simpson + alanhs&ingeniouscorp.com +24778 + TenXc Wireless Inc. + Tam Duong + tamduong_ba&yahoo.ca +24779 + SeaNet Technologies, Inc. + John Burgess + john.burgess&seanet-tech.com +24780 + SBIN Systems + Sean Johnson + sjohnson&sbinsystems.com +24781 + A.F.P.B.T.P. du Calvados + Bergar Sebastien + sebastien.bergar&ccca-btp.fr +24782 + TetraNed + Edwin de Haan + e.dehaan&tetraned.nl +24783 + Wola Info S.A. + Kamil Kuæ + kamil.kuc&wolainfo.com.pl +24784 + Logintas AG + Daniel Lutz + daniel.lutz&logintas.ch +24785 + Luganda Group, Inc + Natasha Cooks + natasha.cooks&gmail.com +24786 + AsteriskAD.com + Tim McQueen + mcquetm&elmore.rr.com +24787 + TU Dresden, Chair of Road Design + Bert Burgemeister + Bert.Burgemeister&tu-dresden.de +24788 + Itilon Pty Ltd + Thomas Price + snmp&itilon.com +24789 + Teletech d.o.o. + Tomi Gacnik + tomi&teletech.si +24790 + Handmade Mobile Entertainment + Systems Administrator + sysadmin&flirtomatic.com +24791 + InZon Corp + Andy Pinkerton + apinkerton&inzon.net +24792 + Red Oxygen Pty Ltd + John Hedges + john.hedges&redoxygen.com +24793 + GSTeletech Co.,Ltd. + Young Ho Song + syh&gsteletech.com +24794 + Federated Department Stores + James Dessin + jim.dessin&fds.com +24795 + blue elephant systems GmbH + Alexander Pertsch + alexander.pertsch&blue-elephant-systems.com +24796 + Bundesärztekammer + Dirk Schladweiler + dirk.schladweiler&baek.de +24797 + IMSEC GmbH + Daniel Lutz + daniel.lutz&imsec.ch +24798 + data-complex gmbh + Mr. Andreas Schmidt + service&data-complex.net +24799 + App-Line + Stéphane Benteux + sibiloproduct&app-line.com +24800 + Apogee Network Systems & Consulting, LLC + Mr. Charles Thompson + projetcs&apogeenetworksystems.com +24801 + MTI co. LTD + SHIN, Tommy (SHIN, Chang min) + netrian&mtico.co.kr +24802 + Saratoga Systems, Inc + Eric DeLaney + edelaney&saratogasystems.com +24803 + nu.org + C Vance + cjsv&nu.org +24804 + Akorri Networks, Inc. + Sanjay Raja + sraja&akorri.com +24805 + Bankoa Credit Agricole + Pierrete Larran + plarran&bankoa.es +24806 + Tern Systems Inc. + Philippe Hoornaert + philippe.hoornaert&tern.is +24807 + RISO KAGAKU CORPORATION + Hideki Akiyama + akiyama&dev.riso.co.jp +24808 + EODP Section ( For Testing Only) + Nopparat Srinim + nopparat.srinim&gits.net.th +24809 + Manic Productions + Trevor Jorgenson + pdxtechie&gmail.com +24810 + Innerspace Ltd. + Alexandre da Silveira Ilha + ilhapoa&yahoo.com +24811 + Robatech AG + A.Buetler + info&robatech.ch +24812 + tribaldi GmbH + Lutz Mueller-Hipper + info&tribaldi.com +24813 + chellomedia services Ltd + Donald Boyes + dboyes&chellomedia.com +24814 + Yinhoo Software Inc. + Daniel.Kitano + daniel&yinhoo.com +24815 + Golzari IT-Consultancy + Matthew Golzari + enterprise-number&golzari.nl +24816 + Asoka USA Corporation + Elsa chan + charlie.xiang&asokausa.com +24817 + TuVox Inc + Veeresh Kolluru + veeresh&tuvox.com +24818 + Micro Industries Inc. + Scott Radcliffe + scott.radcliffe&gmail.com +24819 + CARDIONICS SA + WALDURA JEAN + jean.waldura&cardionics.be +24820 + Oskarshamns Konverterings AB + Leif Ödell + leif.odell&okabconvert.com +24821 + Elbit Systems Deutschland GmbH & Co. KG + Holger Denke + holger.denke&elbitsystems-de.com +24822 + Instituto Nacional de Estadistica + Jokin Agurruza + agurruza&ine.es +24823 + Amey Datel + Ian Ward + ian.ward&amey.co.uk +24824 + NightStorm Software Systems, Inc. + Bruce A. Mallett + bam&NightStorm.com +24825 + Creighton University + Sean Kelly + smkelly&creighton.edu +24826 + Weyerhaeuser + David Treece + david.treece&weyerhaeuser.com +24827 + Bieles Stano + Bieles Stano + stano.bieles&iol.cz +24828 + Speedway Internet Service SC LTDA + Raul S. Dias + raul&dias.com.br +24829 + Estacado Systems, LLC + Eric McMurry + emcmurry&estacado.net +24830 + THOTT Solutions + Shawn Webb + shawn.webb&thott-solutions.com +24831 + Sirit Technologies Inc. + David Webb + dfwebb&sirit.com +24832 + Mi5 Inc. + Emma Chung + emmac&mi5networks.com +24833 + Eliteitminds Technologies Inc. + Mike Crawford + ali3n&eliteitminds.com +24834 + KAT Project + Simon Ratcliffe + sratcliffe&ska.ac.za +24835 + University of Ostrava + Alice Vixie + alice.vixie&osu.cz +24836 + Johannes Kapune GbR + Johannes Kapune + johannes.kapune&kapune.de +24837 + Sobha Renaissance Information Technology Private Limited + Sajith Chandran + sajith.chandran&renaissance-it.com +24838 + Danbury Health Systems + Jeff Purslow + jeff.purslow&danhosp.org +24839 + Universidade Federal Fluminense + Vinod Rebello + vinod&ic.uff.br +24840 + David Ross + David Ross + david&drosstech.com +24841 + Red [E] Entertainment Group + Judith Freiha + judith&red-e-ent-group.com +24842 + Gravitime + Stephane GERBAUD + stephane.gerbaud&laposte.net +24843 + Halier + Linus Ericsson + le7&halier.net +24844 + Petroleum Convenience Alliance for Technology Standards, Inc. + John Hervey + jhervey&pcats.org +24845 + JSC institute "Promstrojniiproject" + Igor Frolov + test&psp.aaanet.ru +24846 + Scivo Technology Co. Ltd. + Jie.Ouyang + ouyangjie&scivo.com +24847 + International Criminal Court - Cour Penale Internationale + Daniel DRUMEA + daniel.drumea&icc-cpi.int +24848 + bollcons + Oliver Bolliger + oidmaster&oli.ch +24849 + ELITNET, Ltd. + Jonas Zaveckas + jonas&elitnet.lt +24850 + Mercantile Network Brasil, Ltda. + Sidney Huber + sidney&huber.com.br +24851 + Hillsborough Community College + Paul Carroll + oid.admin&hccfl.edu +24852 + Phihong USA + Richard Frosch + rickf&phihongusa.com +24853 + Komerccentrs DATI grupa + Denis Basta + Denis.Basta&kc.lv +24854 + Pacific Star Communications + Dominic Perez + dperez&curtisswright.com +24855 + Sensonxi Inc. + Mark Rue + mrue&sensonix.com +24856 + MDS Pharma Services + Thomas Grundstrom + thomas.grundstrom&mdsinc.com +24857 + VECTORsgi + Matt Murphy + matt.murphy&vectorsgi.com +24858 + Vendetta Performance Limited + Mike Lowrie + mlowrie&vendetta.ca +24859 + Shanghai PONX technology com.,Ltd. + Ruifeng Zhang + Ruifeng_zhang&163.com +24860 + tosker.net + Mike Goggin + mettaur&gmail.com +24861 + StudyStack + John Weidner + john.weidner&studystack.com +24862 + Qualimedic.com AG + Martin v. Boehlen + edv&qualimedic.de +24863 + MTI Technology GmbH + Gerhard Lochmann + glochmann&de.mti.com +24864 + Link GmbH + Tom E. Wierich c/o Link GmbH + t.wierich&link-gmbh.com +24865 + Cramer Systems Ltd + James Dingwall + james.dingwall&cramer.com +24866 + Hans Turck GmbH & Co. KG + Mr. Markus Ingenerf + markus.ingenerf&turck.com +24867 + Goldsmiths College, University of London + Suzanne Payne + s.payne&gold.ac.uk +24868 + Technoface Corporation + Cornelius O'Herlihy + conchan&technoface.co.jp +24869 + KLab Inc. + SNMP MIB Management + mib&project.klab.org +24870 + ZE PAK SA + Przemys³aw Maleszka + maleszka.przemyslaw&zepak.com.pl +24871 + Webroot Software, Inc. + Bryan Gale + bgale&webroot.com +24872 + H-D-H + Dieter Hendricks + dh&h-d-h.de +24873 + Procesamiento Digital y Sistemas, S.L. + Jose Valladares + jvalladares&prodys.net +24874 + GO Networks, Inc. + Executive Office Manager + info&gonetworks.com +24875 + Andxor Soluzioni Informatiche S.r.l. + Alfonso De Gregorio + iana&andxor.com +24876 + Atlas Advanced Internet Solutions Ltd. + Technical Department + admin&aais.net.uk +24877 + Informatica y Comunicaciones Avanzadas, S.L. + Luis Rodrigue + luis.rodriguez&grupoica.com +24878 + Mercurio Comunicaciones + Francisco Perea + faperea&gmail.com +24879 + Gi Gesundheitsinformatik GmbH + Michael Westermann + post&ginfo.de +24880 + DIRECCION DE IMPUESTOS Y ADUANAS NACIONALES + Diego Mauricio Calderon + dcalderonp&dian.gov.co +24881 + CERIST + Aouaouche El-Maouhab + elmaouhab&wissal.dz +24882 + Alcatel Japan Ltd. + Hisu Kang + hisu.kang&alcatel.co.jp +24883 + Vertigo Media, LLC + Faisal Rahman + frahman&vertigomedia.com +24884 + Toneware Technology co. Ltd. + Huazhang Tong + tonghz&zju.edu.cn +24885 + Banca Intesa AD Beograd + Milan Markovic + milan.markovic&deltabanka.co.yu +24886 + Eigil Bjørgum + Eigil Bjørgum + eigil&bjorgum.no +24887 + Parlamento de Galicia + Jorge Remuinan-Suarez + remuinan&parlamentodegalicia.es +24888 + Malden Electronics Ltd. + Richard Reynolds + support&malden.co.uk +24889 + IB Technologies Sdn. Bhd. + Khalil Huzairi Bin Ahmad + khalil&ibtech.com.my +24890 + Extant Solutions + Andrew Hogan + andrew&extantsolutions.com +24891 + Instituto Tecnológico y de Estudios Superiores de Occidente A.C. + José Alberto Guzmán Ramírez + hostmaster&iteso.mx +24892 + Great Atlantic and Pacific Tea Company, Inc. + Christopher Horn + hornc&aptea.com +24893 + Intelligent Platforms, LLC + Todd Shelton + Todd.Shelton&emerson.com +24894 + PantheraNet + Steven Saner + ssaner&pantheranet.com +24895 + Medifacts International Inc. + Kenneth White + kwhite&medifacts.com +24896 + Microdental Laboratories + John Helmuth + helmuthjµdental.com +24897 + Indel Industria Eletronica Ltda + Fabio Medeiros + fabsilv&yahoo.com +24898 + d&b audiotechnik GmbH & Co. KG + Gunter Coelle + it-support&dbaudio.com +24899 + Agama Technologies AB + Rolf Wilteus + support&agama.tv +24900 + Aurora Networks (GoBackTV) + Dave Baran + dbaran&aurora.com +24901 + Future Infonet + Hyun-Su Choi + solution&futureinfonet.com +24902 + Astech Corporation + horie toshio + t_horie&astech.co.jp +24903 + Corporación Politécnica Nacional de Colombia + Igor Madera Sepulveda + imadera&gmail.com +24904 + Skyrecon Systems SA + François-Philippe IL GRANDE + francois&skyrecon.com +24905 + Nextsense Ltd + Daniel Danilovski + daniel&nextsense.com +24906 + Electronic Solutions Ltd. + Chris Jones + chris.jones&e-s.co.uk +24907 + Infopact Netwerkdiensten B.V. + Support at Infopact + support&infopact.nl +24908 + Stored IQ, Inc. + Janos Haide + iana&storediq.com +24909 + Secure-24, LLC + Sean E. Millichamp + sean.millichamp&secure-24.com +24910 + OOO "ElitStudio" + Alex Unigovsky + admin&compot.ru +24911 + Foxbyte Ltd + Ian Normington + ian&foxbyte.co.uk +24912 + ASELSAN INC. + Ahmet Burak NASUHBEYOGLU + abnasuh&aselsan.com.tr +24913 + Cook Group Incorporated + Kyle Hopkins + kyle.hopkins&cookgroup.com +24914 + WSOFT, Lda. + Eduardo Rodrigues + geral&wsoft.pt +24915 + iG Internet Group do Brasil Ltda + Daniel Theodoro + email-ig&ig.com.br +24916 + Essent Corporation + Stephen Luisser + support&essent.com +24917 + Intermedix Corporation (formerly 'Collaborative Fusion, Inc.') + Bryan Kaplan + bryan.kaplan&intermedix.com +24918 + Radmer + Randy Radmer + radmer&gmail.com +24919 + Web Wizard Design + Max Schubert + security&webwizarddesign.com +24920 + Our Help Desk + Max Schubert + max&webscorpion.com +24921 + MAINDATA, spol. s r.o. + Mr. Dusan Statelov + statelov&maindata.info +24922 + VoX Communications, Inc. + Phil D'Amore + damorep&voxcorp.net +24923 + Shodor Education Foundation, Inc + Simon Karpen + skarpen&shodor.org +24924 + Unicentrix Solutions + Al Gonzalez + Al.Gonzalez&Unicentrix.com +24925 + Jordet + Stian Jordet + stian_web&jordet.nu +24926 + LAIKA, Inc. + Mahlon E. Smith + mahlon&laika.com +24927 + Exegy, Inc. + David Schuehler + dschuehler&exegy.com +24928 + WaterEd Australia Pty Ltd + Cameron Miller + cmiller&icewarm.com.au +24929 + vt100 digital solutions + Harald Klein + info&vt100.at +24930 + synedra information technologies GmbH + Thomas Pellizzari + t.pellizzari&synedra.com +24931 + Mixed Signals, Inc. + Sam Barone + oid-admin&mixedsignals.com +24932 + Mcomms Design + G. Simonds + info&mcommsdesign.com +24933 + angel bosch + angel bosch + muzzol&gmail.com +24934 + ISCG Ltd. + Piotr Olszewski + piotr.olszewski&iscg.pl +24935 + TetraNed + Edwin de Haan + e.dehaan&tetraned.nl +24936 + Snecma Propulsion Solide, Groupe SAFRAN + Chantal Dessaints + chantal.dessaints&snecma.fr +24937 + Universal Display and Fixtures Company + Alan Bunch + admin&udfc.com +24938 + Technolabs S.p.A. + Umberto Innocente + Umberto.Innocente&technolabs.it +24939 + qpass + Martin Sperl + admin&rt.solutions.qpass.com +24940 + Department of Computer Science, National Chiao Tung University + Jui-Nan Lin + jnlin&csie.nctu.edu.tw +24941 + EMKA Technologies SA + David RYCHEN + d.rychen&emka.fr +24942 + Kyle Fox DBA DigitalFennec Networks + Kyle Fox + kyle.fox&kayfox.org +24943 + Rigpa + Joe Mellon + joseph.mellon&wanadoo.fr +24944 + Kreuzinger IT-Trilogie + Bernd Kreuzinger + bernd&kreuzinger.de +24945 + ParaSun Technologies Inc. + Network Operations Group + noc¶sun.com +24946 + Corredoira Abogados + José E. Corredoira Rodríguez + jecr&corredoira.biz +24947 + Shanalyst Corporation + Yizhong Sha + ysha&shanalyst.com +24948 + Mitsubishi Materials Corporation + Hiroyuki Unoki + network&mmc.co.jp +24949 + Sentillion, Inc + Ron Arbo + ron.arbo&sentillion.com +24950 + Uniform Industrial Corp. + Wyllie Peng + wyllie&maruco.url.com.tw +24951 + Industrial Video and Control, LLC + Oliver Jones + ojones&ivcco.com +24952 + Cool Bananas Limited + Marshall Brown + marshall&coolbananas.co.nz +24953 + Numachi + Brian Reichert + reichert&numachi.com +24954 + Mobile Complete + Shlomi Gian + sgian&mobilecomplete.com +24955 + SyncCast + Sean Hsu + shsu&synccast.com +24956 + Innovative Electronic Designs, Inc. + Ken Tench + ktench&iedaudio.com +24957 + Onoclea + Pawel Sawicki + pawel.sawicki&pawel-sawicki.com +24958 + Ascent Media Group + Ken Martinek + websrvcs&ascentmedia.com +24959 + Pankerl - Media + Florian Pankerl + oid.admin&pankerl-media.de +24960 + The Sip-router Project (formerly 'iptel.org') + Jan Janak + admin&sip-router.org +24961 + Tail-f Systems AB + Martin Bjorklund + mbj&tail-f.com +24962 + Atlantic Harbour Limited + David Abensour + David.Abensour&atlanticharbour.com +24963 + SiliconVortex + Bryan Stenson + bryan.stenson&gmail.com +24964 + Shanghai Qianjin Electronic Equipment Co.,ltd + Dezhi gui + gdz198&catv-shqj.cn +24965 + Picdar Technology Limited + Andy Heather + support&picdar.com +24966 + Greenwich Hospital + Nassar Nizami + nassarn&greenhosp.org +24967 + EPCON Sp. z o.o. + Pawel Arnikowski + p.arnikowski&epcon.pl +24968 + Beijing SecuWard Information Security Technology Co.,Ltd + ChunChen Ma + macc&secuward.com +24969 + Wharfe Electronic Data Ltd + Nick Brown + nick&wharfedata.co.uk +24970 + InterWay, s.r.o. + Richard Holly + rho&interway.sk +24971 + GIDE LOYRETTE NOUEL + Patrick CHAUVIN + chauvin&gide.com +24972 + Lipman Electronic Engineering Ltd + Gilad Hirsch + giladh&lipman.co.il +24973 + Guilin Jharden Communication CO., LTD + Genzhong Liao + lgz9988&163.com +24974 + Lootah + Hamza M Sahib + hamza&lootah.com +24975 + WINGS Ltd. + Igor Starkov + info&wsoft.ru +24976 + OmegaSphere Inc. + Daniel Dent + ddent&omegasphere.net +24977 + Sxip Identity + Ian Brown + ian&sxip.com +24978 + Martin Henschke Geraetebau + Martin Henschke + martin&dr-henschke.de +24979 + madpilot.net + Guido Falsi + iana&madpilot.net +24980 + CERVICEM + DUVERNOY Thierry + tduvernoy&free.fr +24981 + Amex Information Technologies Ltd. + Aniket Jathar + aniketj&amexit.com +24982 + Fiebig+Team GmbH + Andreas Tikart + Andreas.Tikart&Fiebig-Team.de +24983 + Tod Lewin + Tod Lewin + StoneCutter&TodLewin.com +24984 + Kestral Computing Pty Ltd + Mike Rochow + miker&kestral.com.au +24985 + Xeround Systems + Iris Kaminer + iris.kaminer&xeround.com +24986 + Bjørn Ruberg konsulenttjenester + Bjørn Ruberg + bjorn&ruberg.no +24987 + Ortiva Wireless Inc. + Ortiva Support Team + support&ortivawireless.com +24988 + Singh Travels + Deep Sandhu + webmaster&singhtravels.com +24989 + Synalogic e.K. + Markus Kramer + iana-oid&synalogic.de +24990 + Bluestem Ltd. + Systems Administration + helpdesk&bluestem.co.uk +24991 + CardContact Systems GmbH + Andreas Schwier + andreas.schwier&cardcontact.de +24992 + Toko University + Olaf Fichtner + computer&mail.toko.edu.tw +24993 + MySQL, Inc. + Marc Paley + mpaley&mysql.com +24994 + nabios GmbH + Valentin Demmel + valentin.demmel&nabios.com +24995 + Integratech S.A. + Mariano Bianchi + mbianchi&integratech.com.ar +24996 + SAFE Health + Matthew Burch + matthew.burch&safehealth.com +24997 + Cirrologic Ltd + Rob Lyle + admin&cirrologic.co.uk +24998 + SBA Technologies, Inc + Hesham Elbaga + helbaga&sba-tech.com +24999 + VATSIM.net + Richard Critz + cz&vatsim.net +25000 + ConSol Consulting & Solutions Software GmbH + Network Operating Group + nog&consol.de +25001 + Codra Ingenierie Informatique + Jean-Claude Hallynck + jc.hallynck&codra.fr +25002 + VistiC Ltd. + Peter Nunn + peter.nunn&vistic.net +25003 + SQUILD + Matthias Tinnemeier + m.tinnemeier&squild.de +25004 + AEGON Magyarorszag Rt. + Laszlo Toth + tothlaszlo&aegon.hu +25005 + sevecek + Ondrej Sevecek + ondra&sevecek.com +25006 + NVision Czech Republic a.s. (formerly 'SITRONICS Telecom Solutions, Czech Republic a.s.') + Jan Aftanas + jaftanas&nvision-group.com +25007 + Precesamiento Digital y Sistemas S.L. + JOSE VALLADARES + JVALLADARES&PRODYS.NET +25008 + I.T.E.N.O.S. GmbH + Stefan Moeding + oidadmin&itenos.net +25009 + Sysworksoft + Patrick Garnier + patrick.garnier&sysworksoft.net +25010 + Bedag Informatique SA + Patrice Bonnet + osm.ism&bedag.ch +25011 + Cranite Systems Inc. + Vasu Murthy + vmurthy&cranite.com +25012 + Minux Bt. + Zoltán Fekete + fekete.zoltan&minux.hu +25013 + RS Consulting + Reiner Schmidt + reiner.schmidt&rs-consulting.de +25014 + Visimetrics (UK) Ltd + Dr Barry Mcdonald + mcdonald&visimetrics.com +25015 + 2PM Technologies Ltd + Andrew Ramsdale + snmp&2pmtech.co.uk +25016 + Vizrt/Ardendo AB + Mikael Wahlberg + mwa&vizrt.com +25017 + CertSign + Cristian Garabet + office&certsign.ro +25018 + Proxyconn Ukraine + Andrew Kozachenko + andrew&proxyconn.kiev.ua +25019 + ProfiTrade 90 Ltd. + Péter Dohányos + peter.dohanyos&profitrade.hu +25020 + MobilMail Ltd. + Herbert Straub + herbert.straub&mobilmail.at +25021 + Devon County Council + Robert Slack + webadmin&devon.gov.uk +25022 + Medox Exchange, Inc. + Michael Beck + mebeck38&gmail.com +25023 + Cyberspace Technology Corporation Ltd. + Hiroyuki Ikawa + ikawa&cst.co.jp +25024 + Auriga + Vincent Gaudeul + technique&auriga.fr +25025 + Concentrum, Inc. + Thomas B Winans + tom.winans&concentrum.com +25026 + Linear Equipamentos Eletrônicos S.A. + Pedro Dionísio Pereira Júnior + pedro&linear.com.br +25027 + Billerud AB + Tomas Einborn + tomas.einborn&billerud.com +25028 + WebService Sp. z o.o. + Pawel Gorazda + pawel.gorazda&webservice.pl +25029 + Antek Healthware LLC + Daniel Schipper + dschipper&antekhealthware.com +25030 + Ward-Beck Systems + Anthony P. Kuzub + APK&Ward-Beck.Systems +25031 + Network Orange Inc + Jim Skoog + jskoog&bocasw.com +25032 + DACI Comércio e Serviços de Equipamentos de Informática Ltda. + Marcos Oliveira + marcos.oliveira&daci.com.br +25033 + Naztec, Inc. + Bryan Beyer + bryan&naztec.com +25034 + Dima Ltda. + Oxiel Contreras + ocontreras&dima.com.bo +25035 + JINAN DEAN COMPUTER TECHNOLOGY CO LTD + JiWei Wang + great_wjw&163.com +25036 + Kantonsschule Romanshorn + Andreas Brunnschweiler + ksro&bluewin.ch +25037 + Novovia + Olivier GAUDE + contact&novovia.com +25038 + denver health + David Boone + David.Boone&dhha.org +25039 + vitroconnect systems GmbH (formerly 'infinity-3 GmbH') + Christian Kleinewaechter + christian.kleinewaechter&vcsys.de +25040 + Keytronix GmbH. + Marcello Presulli + m.presulli&keytronix.com +25041 + Microsol Ltd. + Adrian Kearney + adriankµsol.ie +25042 + JDM Software BV + Frank Jong + f.jong&jdm.nl +25043 + litts.net + security + security&litts.net +25044 + CapWIN + Joe Kemp + jkemp&capwin.org +25045 + MET/Administration wallonne/Belgique + Christine Vandesteene + cvandesteene&met.wallonie.be +25046 + GlobalTouch Telecom + Paul Leonovich + pleonovich&xcastlabs.com +25047 + Fluid4 Studios + Alan Milford + oid&fluid4.net +25048 + Synchronica PLC + Richard Godfrey + richard.godfrey&synchronica.com +25049 + Opengear Inc. + Peter Hunt + support&opengear.com +25050 + IT Knows ApS + Lars Knudsen + larsgk&gmail.com +25051 + Pythagoras Software (UK) + Benjamin Donnachie + support&pythagoras.no-ip.org +25052 + Idya Bt. + Zsolt Szloboda + slobo&t-online.hu +25053 + Ruckus Wireless, Inc. + Michael Lin + mlin&ruckuswireless.com +25054 + Carillon Information Security Inc. + Patrick Patterson + ppatterson&carillonis.com +25055 + Oxford ArchDigital + Andrew Larcombe + andrew&oxarchdigital.com +25056 + Hungarian Television Corporation + Gabor Peto + gabor.peto&mtv.hu +25057 + Rural Technologies Inc + Scott Mark + csmark&ruraltechinc.com +25058 + Bain Capital LLC + Jona Kee + jkee&baincapital.com +25059 + Abicom International + Mr David Edwards + dave.edwards&abicom-international.com +25060 + pbxnsip Inc. + Kevin Moroz + km&pbxnsip.com +25061 + Domaingruppe Riemann2000.de + Axel Riemann + axel&riemann2000.de +25062 + MessageOne Inc + Keta Sridhar + keta.sridhar&messageone.com +25063 + American Tribu Corp + Andres Paglayan + andres&paglayan.com +25064 + Women's Health Services + Andres Paglayan + it&whssf.org +25065 + Identity Engines Inc. + Anil Gopinath + anil.gopinath&idengines.com +25066 + PELCO + Randy Williams + rwilliams&pelco.com +25067 + G Central + Sergey Galtsev + support&gcentral.biz +25068 + Axistech + Greg Butler + gbfunk&gmail.com +25069 + LISA! Reisen GmbH + Alexander Greim + alexxx&iltempo.de +25070 + MULTICERT - Servicos de Certificacao Electronica S.A. + Jose Eduardo Pina Miranda + jose.miranda&multicert.com +25071 + Teradici Corporation + Jeff Dillabough + jdillabough&teradici.com +25072 + LFW ENTERPRISES, INC. + MARTIN WRIGHT + MWRIGHT&LFWENTERPRISES.COM +25073 + Thinking Phone Networks, Inc. + Aaron Evans + aaron&thinkingphones.com +25074 + Presbyterian Healthcare Services + Jeremy Vanderburg + jvanderb&phs.org +25075 + Inventec Multimedia & Telecom(Tianjin) Co.,Ltd. + Peter Hu + hu.xian-chen&importek.com +25076 + 45RU TRUST + Trent Lloyd + trentl&hostaway.net.au +25077 + BGC Australia Pty Ltd + Andrew Buckeridge + andrewb&bgc.com.au +25078 + MKQ Internetservice + Manuel Krebs + oinnmb&domainrobot.mkq.de +25079 + Gyanasoft Inc + Srinivasan Murari + smurari&gyanasoft.com +25080 + Beluga Software, Inc. + Jason McKee + it&belugasoftware.com +25081 + Universidad de Chile + Kristian Brinckmann + kbrinckmann&med.uchile.cl +25082 + FishNet Connect + Irik Anderson + irik&future-proof.net +25083 + Sirius Creations + David Lillie + dreamkeeperscomic&gmail.com +25084 + Argent Productions + Daniel Collins + admin&argentproductions.com +25085 + Metrocast Cablevision LLC + Aaron J Brace + abrace&metrocastcablevision.com +25086 + Sydney Missionary and Bible College Ltd. + Scott Tester + itdept&smbc.com.au +25087 + HWS + Evgeny V Illyushko + info&hws.ru +25088 + Transactional Records Access Clearinghouse + System Administrator + tracadmin&gwmail.syr.edu +25089 + DSK Bank PLC + Victoria Dimitrova + victoria.dimitrova&dskbank.bg +25090 + Ambientia Oy + Henri Sora + iana&ambientia.fi +25091 + Piranha Messtechnik GmbH & Co. KG + Rüdiger Köpke + Ruediger.Koepke&piranha-messtechnik.com +25092 + Aptic AB + Daniel Gustafsson + daniel.gustafsson&aptic.se +25093 + GreysonBaines Innovation + Bill Barry + bill&greysonbaines.com +25094 + Vonage Holdings Corp + Matthew Ragan + mdr&vonage.com +25095 + XenIT GmbH + Pascal Schneider + pascal.schneider&xenit.ch +25096 + Maimonides Medical Center + Joe Gallagher + jgallagher&maimonidesmed.org +25097 + TJX Companies + David McLure + david_mclure&tjx.com +25098 + Reflectent Software, Inc. + Norman Wright + nwright&reflectent.com +25099 + H.E.B. Company Inc. + Chris Maier + maier.chris&heb.com +25100 + Hochschule Vechta + Sven Schoeppner + sven.schoeppner&uni-vechta.de +25101 + Dataarmor Pvt Ltd. + Nitin Kataria + kataria.n&gmail.com +25102 + Phyxia Networks + Network Operations + noc&phyxia.net +25103 + Netsol International Argentina S.A. + Nestor Charczuk + ncharczuk&netsolintl.com.ar +25104 + Gannett Co., Inc. + IP Administrator + ipadmin&gannett.com +25105 + Cable & Wireless Jamaica + Kevin McDowell + kevin.mcdowell&cwjamaica.com +25106 + CSE Sh.p..k. + Dr. Blerim Rexha + office&cse-ks.com +25107 + Stan Winston Studio + Alan Wollenstein + sys&stanwinston.com +25108 + iRadeon, Inc. + Don Werve + donw&iradeon.net +25109 + Wireless Edge Canada Inc. + Kevin Stadlmayer + kevin.stadlmayer&wirelessedge.ca +25110 + Vanguard Technology Corp. + Tanda Headrick + tanda&vanguard-tech.com +25111 + VODCA + Stefano Gianolini + stefano&gianolini.ch +25112 + Top Gun Drywall Supply + Caleb Walker + caleb&topgundrywall.com +25113 + Konstructiv Ltd. + Vitaly Tsaregorodtsev + admin&konstructiv.by +25114 + SSE INFONET LTD + Jie Liu + jieliu&sse.com.cn +25115 + Delbuilt(Pty)Ltd + Pieter van der Westhuizen + delbuilt&lantic.net +25116 + EBInteractive + Erick BULLIER + ebi.iana&ebinteractive.net +25117 + ID Innovative Datenverarbeitung GmbH + Robert Wolf + robert.wolf&id-software.de +25118 + StanaPhone LLC + Igor Balk + ibalk&intermedia.net +25119 + Adder Technology Limited + Ian Miller + iana-reg&adder.com +25120 + SAGUAPAC + Andres J. Mendez G. + mendez.andres&saguapac.com.bo +25121 + Temex Sync + Marc Le Meur + temex_sync&yahoo.fr +25122 + Hauk & Sasko GmbH + Thomas Welsch + welsch.public&hauk-sasko.de +25123 + Frequentis Orthogon GmbH + André Schüssler + andre.schuessler&frequentis.com +25124 + Trade-On-Technology Holdings + Jan Dijkstra + jdijkstra&lantic.net +25125 + ALWIL Software + Ondrej Vlcek + vlk&avast.com +25126 + Jackson State University + Jennifer Rawls + jennifer.a.rawls&jsums.edu +25127 + Radialink Corp + Lutful Khan + lkhan&radialink.com +25128 + BreastScreen Victoria + Jacob Seleznev + jseleznev&breastscreen.org.au +25129 + SFBC Anapharm Inc. + Martin Normand + mnormand&anapharm.com +25130 + Scouts of Greece, 10th Group of Akropoli, Thessaloniki + Christos Triantafyllidis + iana&e-scouts.gr +25131 + P-tec Co., Ltd. + Takanobu Kimizuka + kimizuka&p-tc.com +25132 + Redeal Ltd + Peter Jameson + PJameson&redeal.co.nz +25133 + HKU School of Professional and Continuing Education + Wyman Chan + wyman.chan&hkuspace.hku.hk +25134 + ComDesign Inc. + Kenji Terao + terao&comdesign.co.jp +25135 + Ministerul pentru Societatea Informațională (formerly 'Ministerul Comunicatiilor si Tehnologiei Informatiei') + Augustin Jianu + augustin.jianu&msinf.ro +25136 + Haute Ecole Arc + Romain Voumard + romain.voumard&he-arc.ch +25137 + Voronezh State Pedagogical University + Vladimir Bukhal + vbux&vspu.ac.ru +25138 + Alyseo + Yacine Kheddache + yacine&alyseo.com +25139 + Adept Internet (Pty) Ltd + Gideon le Grange + gideon&adept.co.za +25140 + Institut Catholique de Toulouse + Responsable Informatique + resp.info&ict-toulouse.asso.fr +25141 + La Poste + Frédéric Le Bastard + frederic.lebastard&laposte.fr +25142 + IP Control Systems Ltd. + Paul Anderson + paul&ip-cs.com +25143 + Damovo Deutschland Gmbh & Co. KG + Joachim Zirwes + joachim.zirwes&damovo.com +25144 + Perpustakaan Institut Pertanian Bogor + Adi Sujiwo + jiwo&ipb.ac.id +25145 + Netzwerk für Kinder- & Jugendarbeit e.V. + Holger Kaßner + holger.kassner&kijunetzwerk.de +25146 + Cyprus Research and Academic Network + Agathoclis Stylianou + secretariat&cynet.ac.cy +25147 + Internet Solutions + JC Burger + jc&is.co.za +25148 + Pivot3, Inc. + Thomas Grieff + tomg&pivot3.com +25149 + National Electronics and Computer Technology Center + Suriya U-ruekolan + suriya.u-ruekolan&nectec.or.th +25150 + KK-DENSO.Co.Ltd + osamu yonezawa + yonezawa&kk-denso.co.jp +25151 + Dragontech Corporation Limited + Mr. John Chan + info&dragontechcorp.com +25152 + UpTime New Zealand Ltd. + Paul Steffensen + paul&uptime.co.nz +25153 + AvanSec + Saifi Khan + manager&avansec.com +25154 + Seeker Wireless Pty Ltd + Geoff Field + geoff&seekerwireless.com +25155 + Unisis Solutions Corporation + Chen Wang + ch.wang&unisis.cn +25156 + GECI GmbH + Martin Adler + martin.adler&geci.de +25157 + Beckhoff Automation GmbH & Co. KG + Daniel Gonzalez + d.gonzalez&beckhoff.com +25158 + Aros Magic + Arun Sagar + iana.enterpriseid&arosmagic.com +25159 + OJSC Svyazintek + Igor Kozlov + igor.kozlov&svyazintek.ru +25160 + Arackal Digital Solutions Inc + Dorin Neacsu + dneacsu&arackal.com +25161 + Telavox AB + Simon Kihlberg Wallström + simon.kihlberg.wallstrom&telavox.se +25162 + Industrias I, S.C. + Jose Julio Cidon Peon + julio.cidon&industriasi.com +25163 + Broadband Power Solutions + Marique Nicolas + n.marique&cet.be +25164 + Interpay + V.Lamers + vjl100&hotmail.com +25165 + FMN communications GmbH + Heiko Teichmueller + heiko.teichmueller&fmn.de +25166 + FH Muenster + Robin Naundorf + r.naundorf&fh-muenster.de +25167 + Genexis B.V. + M. Egmond + m.egmond&genexis.nl +25168 + Edentity Labs Ltd + Host Administration + hostmaster&edentitylabs.com +25169 + Avco Systems Ltd + Adam Barratt + netmaster-oid&avcosystems.com +25170 + AFA Systems srl + Francesco Amorosa + famorosa&afasystems.it +25171 + Armbruster IT + Stefan Armbruster + stefan&armbruster-it.de +25172 + Integra Soluciones Avanzadas, S.L. + Sean C. McCarthy + informacion&integraas.com +25173 + MET-Ministère de l'Equipement et des Transports + Christine Vandesteene + cvandesteene&met.wallonie.be +25174 + NetOp as + Erik Larsen + InternIT&netop.no +25175 + DCN + Philippe FROISSANT + Philippe.Froissant&DCN.fr +25176 + Familie Uhlig + Steffen Uhlig + steffen&familie-uhlig.net +25177 + VAS "Latvijas Pasts" + Aivars Junga + aivars.junga&pasts.lv +25178 + GÉANT (formerly 'Trans-European Research and Education Networking Association (TERENA)') + Nicole Harris + aai-is&lists.geant.org +25179 + GESA Elektronik GmbH + Martin Schweden + martin.schweden&gesa-elektronik.de +25180 + Itonis Ltd. + Antonin Kral + antonin.kral&itonis.tv +25181 + HEBUS SARL + emmanuel manent + ema&hebus.info +25182 + University of Tampere + Leena Heino + leena.heino&uta.fi +25183 + SS WorkGroup Solutions + Hendrik JA Meyer + HendrikM&ssws.co.za +25184 + Zinwave Limited + Mike Smith + msmith&zinwave.com +25185 + Banco Espírito Santo, S.A. + João Alves + jfa&bes.pt +25186 + Uwe Daube + Uwe Daube + TRA_Sniber&web.de +25187 + A.Ir.Br. Associations des Ingénieurs sortis de l'Université Libre de Bruxelles, asbl + A. Pening + airbr&ulb.ac.be +25188 + Bob Jones University + Tom Berg + tberg&bju.edu +25189 + INVIA + Edward Ficaro + eficaro&inviasolutions.com +25190 + BAE Systems Applied Intelligence + Liam Marwood + liam.marwood&baesystems.com +25191 + Smart Bear Inc + Jason Cohen + jason.cohen&smartbearsoftware.com +25192 + Positive Networks, Inc. + Sean Scott + noc&positivenetworks.net +25193 + Wireless Nomad Co-operative Inc. + Steve Wilton + steve&wirelessnomad.com +25194 + Bizanga Ltd + Olivier Lemarié + ol&bizanga.com +25195 + Waha Oil Company + Abdulwart Heshek + ael_hsheek&yahoo.com +25196 + TM Software + Thorr Tjorvi Einarsson + tte&t.is +25197 + eLearningRecord + Stephen Kidney + stephen.kidney&elearningrecord.com +25198 + SmartServ Web Hosting & Design + Brandon Penglase + brandon&smart-serv.net +25199 + Kobe University + centersystem kakari + nextsys-license&istc.kobe-u.ac.jp +25200 + TurkTel Ltd. + Tanju Cataltepe + tanju&turktel.net +25201 + Uniwell Electronics Ltd. + Hu Yuxin + huyx&uniwell.com.cn +25202 + Beechler Real Estate Services + Shane Kumpf + sak&mtco.com +25203 + Pete Rushmere + Pete Rushmere + pete&rushmere.org +25204 + Prodato Integration Technology GmbH + Markus Schneider + Markus.Schneider&prodato.de +25205 + Movial Corporation + Karri Niskala + oid-admin&movial.fi +25206 + Axxess Identification Limited + Fred Van Eekeren + fred&axxessid.com +25207 + JSC "ATLAS" + Arsen Banduryan + iana&atlas.ua +25208 + CSC Scandihealth A/S + Torsten Jordt + tjordt&csc.com +25209 + CURON Inc. + Ju, Yong Hun + zoo&curon.co.kr +25210 + bizEbox + Scott McNee + scott&bizebox.com.au +25211 + Edison Electric Corp. + Mr. A. Faris + faris&eege.com +25212 + Initworks B.V. + Jop Zinkweg + info&initworks.com +25213 + Nexus Web AS + Raymond Kristiansen + raymond&nexusweb.no +25214 + Minnesota Judicial Branch + Mike Boettcher + mike.boettcher&courts.state.mn.us +25215 + Novacell Solutions Ltd + Mr Paul Brotherton + Paul.Brotherton&novacellsolutions.com +25216 + ADI Video Technologies + Nahum Aharon + nahum&adi-vt.com +25217 + Nautilus Hyosung + Chang, bo-ick + icarus&hyosung.com +25218 + httv + Olivier Bompuis + olivier.bompuis&httv.fr +25219 + RPSoft + Georges Bacha + info&rpsoft.com +25220 + sportcentric Systems + Paul Nelson + iana&sportcentric.com +25221 + Viewpoint Construction Software + Rob Humphreys + robh&viewpointcs.com +25222 + Blue Gecko Inc. + Jonathan Nicol + jnicol&bluegecko.net +25223 + Mindbreeze Software GmbH + Mr. Daniel Fallmann + daniel.fallmann&mind-breeze.com +25224 + Grupo Pochteca SA de CV + Jose Mondragon + sistemas&pochteca.com.mx +25225 + The Austin Diagnostic Clinic, P.A. + Ned Euwer + neuwer&adclinic.com +25226 + MaxMD + Philip J Grabner + +1.201.963.0005 +25227 + Ohio Department of Transportation + Spencer Wood + spencer.wood&dot.state.oh.us +25228 + NAV Canada + Alexey Ponomarev + ponomaa&navcanada.ca +25229 + Swedish Alliance for Middleware + Leif Johansson + leifj&it.su.se +25230 + The Ohio Casualty Insurance Company + Jerry Edgington + Jerry.Edgington&ocas.com +25231 + Riverforge LLC + Shawn South + shawn&riverforge.com +25232 + Linog consulting + Francois Logier + francois.logier&linog-consulting.com +25233 + ACS + Dennis Cox + Dennis.Cox&acs-inc.com +25234 + Emdeon Corporation + Matthew Kiser + mkiser&emdeon.com +25235 + Windrush Frozen Foods Ltd + Robbie Roberts + robbie.roberts&windrushfrozen.com +25236 + Invitrogen Corporation + John Merritt + john.merritt&invitrogen.com +25237 + L-3 Communications/Narda Satellite Networks + Tim Duffy + tim.duffy&l-3com.com +25238 + GreenLight Networks + Bill Uhl + bill&greenlightnet.com +25239 + Tknika + Igor Blanco + iblanco&tknika.net +25240 + Peggy A. Pugh, MD, CPC + James Gula + jlgula&papugh.com +25241 + Proximus Information Technologies + Tom Kidd + tkidd&proximus.ca +25242 + ACTORS CONSULTING + jean-michel FAVRE + actors&actorsconsulting.fr +25243 + Agglut Technologies, Inc. + Vikas Ratna + vikas&agglut.com +25244 + BrightNet Oklahoma + Jackie Bates + noc&brightok.net +25245 + Order of St. Charbel + Bruce Sabalaskey + webmaster&charbelites.org +25246 + South-Russia State Technical University + Timur Burykin + 0x09&mail.ru +25247 + DANDELION C.A. + Cesar Villanueva + dandelionca&gmail.com +25248 + Solidcore Systems Inc + Chiradeep Vittal + chiradeep&solidcore.com +25249 + Dispuut Interlink + Rink Springer + rink&il.fontys.nl +25250 + AnQ Systems, Ltd. + TK Wang + tkwang&anqsystems.com +25251 + Princeton Alliance Church + David Roux + it&princetonalliance.org +25252 + vptechnologies srl + Stefano Veltri + stefano.veltri&vptech.it +25253 + MFEC Public Company Limited + Jaruwat Boonmee + jaruwat&mfec.co.th +25254 + Makena Technologies, Inc. + Bruce Benda + iana&thereinc.com +25255 + Broadsoft Systems + Gerry Pyne + gerry&broadsoft.com.au +25256 + Nable Communications, Inc. + Sunghyuk Kim + picapitt&nablecomm.com +25257 + Sipera Systems Inc + Piyush Bhatnagar + piyushb&sipera.com +25258 + Randombit Consulting + Jack Lloyd + lloyd&randombit.net +25259 + JWR Software Services Ltd + Jim Ramsay + jim_ramsay&blueyonder.co.uk +25260 + Frauerpower! + Leah Cunningham + leah&frauerpower.com +25261 + Westenberg & Kueppers GbR + Mr. Sebastian Kueppers + technik&wk-serv.de +25262 + Network Automation + Jan Ulander + j.ulander&networkautomation.se +25263 + mareco gmbh + Markus Zimmermann + Markus.Zimmermann&mareco.biz +25264 + Propolys + Aurelien Bompard + abompard&propolys.com +25265 + Dave Pusey - IT Services + Dave Pusey + info&davepusey-itservices.co.uk +25266 + FUH EOS + Dariusz Telinga + eos&post.pl +25267 + Virtual Synthesis + David McLure + dpm&virtualsynthesis.net +25268 + enfon + Hans-Peter Kaliba + office&enfon.com +25269 + CFNetTools + Eric Lackey + eric.lackey&gmail.com +25270 + MeepZor Consulting + Ken Coar + Ken.Coar&Golux.Com +25271 + Lotus Interworks, Inc. + Venkat Arvapally + varvapally&lotusinterworks.com +25272 + GRUPO XMARTS SA DE CV + Alvaro Ivan parres Peredo + arabe&xmarts.com.mx +25273 + Konsole Network + Michael Wesner + mike&konsole.net +25274 + IT2Media GmbH + Jueren Thurner (jueren.thurner&it2media.de), B.  Eschrich (bjoern.eschrich&is4it.de) + bjoern.eschrich&is4it.de +25275 + Peter Hall + Peter Hall + whiz100&hotmail.com +25276 + Gear6 + Martin Patterson + mpatterson&gear6.com +25277 + Pekao Inc. + Patryk Kurek + megido2&o2.pl +25278 + Canon Electronics Inc. + Nozomi Masao + masao&canon-elec.co.jp +25279 + Nasdaq MTS (formerly 'OMX Group') + Pamela Duffy + pamela.duffy&nasdaq.com +25280 + Market Pulse International + Paul Rainey + PaulR&marketpulse.biz +25281 + Westerstrand Urfabrik AB + Mats Sunesson + ms&westerstrand.se +25282 + MDK Baden-Württemberg + Daniel Schempf + daniel.schempf&mdkbw.de +25283 + CONSEIL GENERAL DU CHER + Directeur des systemes d'information + dir.si&departement18.fr +25284 + Taylor Made Computer Solutions Ltd + Bob Osola + bobo&tmcs.co.uk +25285 + Beheer-Net + Wietse W. Jonker + ianamail&beheer-net.nl +25286 + Open Logic Solutions Ltd. + Craig Smith + craig.smith&openlogic.co.uk +25287 + Megabit Informationstechnik GmbH + Michael Benten + technik&megabit.net +25288 + onShore Development + Eric Dodson + eric&onshored.com +25289 + Alabama A&M University + Sang Han + shan&aamu.edu +25290 + Convert Italia SpA + Marco DE CATALDO + mdecataldo&convertitalia.com +25291 + Zeacom Ltd. + Peter Bonham + peter&zeacom.com +25292 + Skagit County Government + Lewis Bogan + lewisb&co.skagit.wa.us +25293 + MaxMD + Philip J Grabner + grabner&max.md +25294 + EESTEC International + Florian Friesdorf + ldapmaster&eestec.net +25295 + Affiniti + Dave Janes + david.janes&affiniti.com +25296 + VeriCenter, Inc + Jobey Smith + jsmith&vericenter.com +25297 + Hotxt Ltd + Jon Topper + jon.topper&hotxt.net +25298 + Rohm and Haas Company + Steven Kradel + skradel&rohmhaas.com +25299 + Rodic M&B-Co d.o.o. + Milan Stanic + mps&oss.co.yu +25300 + OfficeNet AS + Torstein Tauno Svendsen + torstei&officenet.no +25301 + Netauth Consulting + Mike Jackson + michael.jackson&netauth.com +25302 + Deakin University + Daniel Appleby + daniel.appleby&deakin.edu.au +25303 + subnetz.computing GmbH + Tilman Koschnick + til&subnetz.com +25304 + Demon Netherlands + Phil Pennock + iana-enterprise&nl.demon.net +25305 + Vodafone Holding GmbH, FPU-CEC + Rainer Bieniek + Rainer.Bieniek&vodafone.com +25306 + Sunmine Communications + James Goins + nvt6599qko&s5e.net +25307 + Mintel Group Ltd + Patrick van Staveren + pvanstaveren&mintel.com +25308 + MDES + Penny Allen + pallen&mdes.ms.gov +25309 + Manchester Airport (MHT) + Paul Connolly + pconnolly&flymanchester.com +25310 + Internet Society Luxembourg ASBL + System administrator + hostmaster&isoc.lu +25311 + RedeDominios.com + Renato Magalhaes + rsm&rededominios.com +25312 + NexTag Inc. + Satwant Jakher + sjakher&nextag.com +25313 + Visual Development + Chris Velazquez + chris&vdev.net +25314 + New Visual Wave Cable Communications Co., LTD. + Mark Chiang + mark&nvwtv.com.tw +25315 + E-wang information Systems Inc + Xiangming Guo + flydelong&tom.com +25316 + Karolinska Institutet + Eric Johansson + eric.johansson&ki.se +25317 + Ixanon AB + Thomas Sparr + thomas.sparr&ixanon.se +25318 + NEXCON Telecomunicaciones S.L. + Javier Leon Anta + mail&nexcon.es +25319 + AccessPt Inc. + Matt Hall + matt.hall&accessptinc.com +25320 + University of Vaasa + Hannu Hirvonen + hh&uwasa.fi +25321 + The American Board of Anesthesiology + Sal Scotto + sal.scotto&theABA.org +25322 + Alarius Systems LLC + Mike Oliver + ollie&alariussystems.com +25323 + AirPatrol Corporation + Mark Bauer + mbauer&airpatrolcorp.com +25324 + Rock Hill Telephone Company + John Johnson + John.Johnson&comporium.com +25325 + Trustserver S. L. + Bruno A. Crespo + bruno&trustserver.com +25326 + Linux-Help.org (formerly 'FurreVille') + Eric Renfro + erenfro&linux-help.org +25327 + Visible World Inc. + Erik van de Pol + erikp&visibleworld.com +25328 + Deasil Systems INC + felix sheng + felix&deasil.com +25329 + Henfield AB + Lennart Henang + lennart.henang&henfield.se +25330 + The Nation + Scott Klein + scott&thenation.com +25331 + NOW! Consulting GmbH + Martin Otto + martin.otto&now-consulting.de +25332 + CADLock, Inc. + IT Administrator + admin.it&cadlock.com +25333 + Packetframe, Inc. + Suresh Venkatraman + sureshv&packetframe.com +25334 + Ashantiplc Limited + James Pauls + info&spywaredefense.com +25335 + SEAMAX MANUFACTURING PTE.LTD + jian_zhang + dvdrw-zj&126.com +25336 + DSO National Laboratories + Ng Siew Choo + siewchoo&dso.org.sg +25337 + ChinaCache + Qingrong.Jiang + qingrong.jiang&chinacache.com +25338 + RK Systems + Robert Kline + bkline&rksystems.com +25339 + EMC TECH CO., LTD. + Dong Jin, Shin + djshin&emctech.net +25340 + Tiroler Gebietskrankenkasse + Martin Klingler + martin.klingler&tgkk.sozvers.at +25341 + Immosys LLC + Bill Anderson + bill-iana&immosys.com +25342 + IRCCS Policlinico S.Matteo + Andrea Gelmetti + a.gelmetti&smatteo.pv.it +25343 + Fachhochschule Frankfurt am Main - University of Applied Sciences + Anne Sommermann + asommer&dv.fh-frankfurt.de +25344 + BlackSpider Technologies Ltd. + Matt Dainty + matthew.dainty&blackspider.com +25345 + Conseil Général du Val d'Oise + Bruno PERRIN + bruno.perrin&valdoise.fr +25346 + SAMCA + Irene Marzo + imarzo&samca.com +25347 + Tetra Networking V.O.F. + Bas van Schaik + bas&dev.tetra.nl +25348 + SJB Communications + Sacha Bernstein + sacha-iana-pe&sjbcom.com +25349 + CDSw - City Data Software, s.r.o. + Michal Bursa + admin&cdsw.cz +25350 + Haymarket Publishing Services Limited + Elaine Bevan + elaine.bevan&haynet.com +25351 + AMAG Automobil- und Motoren AG + Marc Brem + it-sicherheit&amag.ch +25352 + Kuwait Meteorological Department + Amir Al Taho + aaltaho&yahoo.com +25353 + Gibson, Dunn & Crutcher LLP + Chris Fritzsche + cfritzsche&gibsondunn.com +25354 + Travel Svcs llc + Jack Trullinger, Jr. + jtrullinger&travelsvcs.net +25355 + Shenzhen Haishuo Technologies Co.Ltd + Hao Huang + hilomenbit&gmail.com +25356 + E2E IT Solutions Pty. Ltd. + Ross Johnson + RossJohnson&e2eit.com.au +25357 + Dotstream + Slava Chernobai + chernobai&gmail.com +25358 + HUBER+SUHNER AG + Daniel Böhm + dboehm&hubersuhner.com +25359 + Perspektiv Bredband AB + Jakob Borg + jakob&perspektivbredband.se +25360 + SMACL + Dominique Pineau + d-pineau&smacl.fr +25361 + Dr. Jan-Hendrik Dörner + Dr. Jan-Hendrik Dörner + iana&doerner.net +25362 + Headroom Broadcast GmbH + Klaus Feige + feige&headroom.de +25363 + Medical Imaging Software Amsterdam + Hans Kramer + hans.kramer&xs4all.nl +25364 + Studio Tre s.r.l. + Andrea Cotti-Cometti + acc&studiotre.it +25365 + Digitum Ltd. + Vassilios Goumas + goumas&dv.fh-frankfurt.de +25366 + Iptic Systems + Adam Casto + adam&iptic.com +25367 + Mobile Streams PLC + Stuart Slade + stuarts&mobilestreams.com +25368 + Association Crans + Stephane Glondu + oid-admin&crans.org +25369 + Reliact Solutions S.L. + Dave Camino + info&reliact.com +25370 + ExtendASP LLC + Lyman Brown + lyman.brown&gmail.com +25371 + Arcitecta Pty. Ltd. + Mr. Jason Lohrey + jason.lohrey&arcitecta.com +25372 + Pegasus Consultants (SW) Ltd + Chris Smith + chris&pegasus-sw.com +25373 + Universidade Estadual do Sudoeste da Bahia + Jose Marcos Araujo + jmarcos&nprsa.uesb.br +25374 + Universität Hamburg, Regionales Rechenzentrum / Hamburg University, Computing Center + Bettina Kuhlmann + bettina.kuhlmann&rrz.uni-hamburg.de +25375 + NextSysSecure, Inc. + Larry Stilwell + lstilwell&nextsyssecure.com +25376 + Suretec Systems Ltd. + Gavin Henry + ghenry&suretecsystems.com +25377 + Bauman Moscow State Technical University + Varnava Sergey + uneex&yandex.ru +25378 + Schrodinger, Inc. + Simon Gao + gao&schrodinger.com +25379 + American Chartered Bank + Rex Choi + rchoi&americanchartered.com +25380 + Michael D'Errico + Michael D'Errico + mike-asn&pobox.com +25381 + Rx Networks Inc. + Peter H. Mueller + peter&rxnetworks.ca +25382 + Nanite Services Limited + Peter Palmer + ppalmer&naniteservices.co.uk +25383 + TiChou + Sébastien MONBRUN + tichou+oid&tichou.org +25384 + Alliant Energy + Jordan Glover + jglover&alliantenergy.com +25385 + Societe anonyme de la Communication Securisee + Henrik Stein + iana.nospam&scsecure.ch +25386 + ELETEX Co. Ltd. + Norihiko Kawanaka + kawanaka.no&eletex.co.jp +25387 + xcurenet + eric Lee + zhlee&xcurenet.com +25388 + Gadget Factory Ltd. + Tatsuya Aoyagi + ta&gfx.jp +25389 + Westcoast University of Applied Sciences + Dieter Rathmann + rathmann&fh-westkueste.de +25390 + Ulrich Flamm + Ulrich Flamm + ldap&ulrich-flamm.de +25391 + SECURE-IT SA + DE WILDE Thomas + info&secure-it.fr +25392 + inabensa + Jose Luis Gomez Bocanegra + joseluis.bocanegra&inabensa.abengoa.com +25393 + Alliant Technologies + Rich Crawford + Rich.Crawford&Alliantechnologies.com +25394 + Sunflower Technologies + Jim Kernahan + laboratory&sunflowertech.com +25395 + Aquila, Inc. + Gary Cauthon + AquilaTechSupport&aquila.com +25396 + Digital Alchemists GmbH + Matthias Meier + m.meier&digital-alchemists.ch +25397 + Primetime Medical Software + Matthew Ferrante + ferrante&medicalhistory.com +25398 + GeoForschungsZentrum Potsdam + Dr. Thomas Bleek + bl&gfz-potsdam.de +25399 + RHnet Inc. + Jón Ingi Einarsson + cert&rhnet.is +25400 + InterSystems USA + Denise Hummel + dhummel&itsden.com +25401 + MU Net, Inc. + William C Unkel + BillU&munet.com +25402 + FlanTel Communications + Barry Flanagan + barry&flantel.com +25403 + ClearCanvas Inc. + Clinton Chau + clinton&clearcanvas.ca +25404 + citadel.org (the Citadel groupware project) + Art Cancro + ajc&uncensored.citadel.org +25405 + Boston Linux & Unix + John Abreau + snmp&blu.org +25406 + Iptivia Inc + Rajendran Rajan + raju&iptivia.com +25407 + Engiby sàrl + Nicolas Bovigny + nb&engiby.ch +25408 + BarNet + Michael Green + info&barnet.com.au +25409 + Superclick Networks Inc + Enrico Demarin + enrico&superclick.com +25410 + Technical Resources International + Arthur Wang + itsupport&tech-res.com +25411 + Training Team Srl + Roberto Franchi + roberto.franchi&trainingteam.com +25412 + OrcaWare Technologies + Blair Zajac + blair&orcaware.com +25413 + FH-Trier, University of Applied Sciences + Ralf Becker + beckerr&fh-trier.de +25414 + City of Henderson, Nevada + David Nelson + David.Nelson&CityOfHenderson.com +25415 + crosscert + se eun kim + sekim&crosscert.com +25416 + Active Database Team + Hyunsik Choi + advanced80&nate.com +25417 + rootKlub.org + Mr. Féng + gf&rootklub.org +25418 + INED - Institut National d'Etudes Demographiques + Marc LAMOUCHE + hostmaster&ined.fr +25419 + CompanyName + Alexandr Motorny + disel&entry.kiev.ua +25420 + maf + Miguel Ángel Ferrer + service&maferrer.net +25421 + Postini, Inc. + Michael Han + mhan&postini.com +25422 + Vilnius University + VU Hostmaster + hostmaster&vu.lt +25423 + Babs Paylink AB + Andreas Ekman + administrator&babspaylink.se +25424 + Kongsberg Norcontrol AS + Head Technology & Products Department + knc.support&kongsberg.com +25425 + Condor LebensversicherungsAG + Andreas Matthees + andreas.matthees&condor-versicherungsgruppe.de +25426 + LenSoft Ltd. + Sergey Lentsov + thelenz&scc.lg.ua +25427 + Avenda Systems, Inc. + Santhosh Cheeniyil + santhosh&avendasys.com +25428 + AGMEN + Jean-Charles PERNOT + jean-charles&pernot.org +25429 + Comarch S.A. + Jan Srzednicki + net.dept&comarch.com +25430 + Netspecialisten + Jacob Volstrup + volstrup&avanceret.dk +25431 + Ingenico + Jean-Claude Genoud + jean-claude.genoud&ingenico.com +25432 + ServersCheck BVBA + Van Laere + helpdesk&serverscheck.com +25433 + HaiVision Systems Inc. + Frederick Mruzek + fmruzek&haivision.com +25434 + Engineering School of Geneva EIG + Francois Tamone + tamone&eig.unige.ch +25435 + Great Lakes Internet, Inc. + Eliot Gable + support8&greatlakes.net +25436 + Loren Data Corp. + Edison Carter + edison&ld.com +25437 + Chair MMSP of Perm State Technical University + Vychegzhanin Anton + ldap&mmsp.pstu.ru +25438 + B-one Aps + Jakob Goldbach + iana-admin&b-one.net +25439 + Digital Genesis Technologies + James M. Sella + sella&digital-genesis.com +25440 + Radiqal, LLC + Mr. Parag Amin + parag&radiqal.com +25441 + lagis Internet Service Provider GmbH + Ulrich Zehl + u.zehl&lagis.at +25442 + AIRAYA Corp + Mike Nydam + mnydam&airaya.com +25443 + louismc + Louis-Martin Carriere + louis_m_c&hotmail.com +25444 + Fastwire + Dan Young + djy&fastwire.com.au +25445 + AstralBlue + Eugene M. Kim + ab&astralblue.net +25446 + Burn Networks Pty Ltd + David Parrish + info&burn.net.au +25447 + Jireh International Inc. + Stephen Afuape + stephen&jirehinternational.com +25448 + Verari Systems, Inc + Tony Skjellum + tony&verarisoft.com +25449 + Beijing Easy Broadband Technology Co.,Ltd. + ymtang + ws214331&sohu.com +25450 + eTraintronics.com + Howell Dell + hdell&eTraintronics.com +25451 + UFPS Tymenskoy oblasti - filial FGUP "Pochta Rossii" + Sergey V. Kozlov + sergey.kozlov&tmnpost.ru +25452 + Sikom Software GmbH + Steffen Liersch + S.Liersch&sikom.de +25453 + Seaview Support Systems Pvt Ltd + System Administrator + sysadmin&svw.com +25454 + LiComm Co., Ltd. + Meong-Kyu Choi + cmk116&licomm.com +25455 + Trinodal S.L. + Tobias Nieters + info&trinodal.net +25456 + Wooribyul Telecom Co., Ltd. + Junpyo, Lee + leejp&wooribyul.co.kr +25457 + Schweizerische Bibliothek für Blinde und Sehbehinderte + Matthias Ragaz + informatik&sbszh.ch +25458 + XtraMind Technologies GmbH + Jan Timm + it&xtramind.com +25459 + BON.net Ltd + Benjamin Timms + ben-iana&bon.net +25460 + ComputerLand S.A. + Lukasz Wycislik + luke&pik.gliwice.pl +25461 + PALO ALTO NETWORKS + Dr. Anupam Bharali + paniana&gmail.com +25462 + Applied Wireless Identifications Group, Inc + Eric Huang + e.huang&awid.com +25463 + NanShanBridge Co.Ltd + ningzhang + zhangning&nsbic.com +25464 + Demarc Technology Group, LLC + Tony Morella + tony&demarctech.com +25465 + First Standards Organisation (FSO) + Nora Salam + nora&mefso.org +25466 + Asia Bank Card Union (ABCUnion.org) + Sophie Faris + sophie&abcunion.org +25467 + B-Lex Information Technologies + Arnold Hendriks + info&b-lex.com +25468 + Fortica Ltd. + Mika Suomalainen + mika.suomalainen&fortica.fi +25469 + Aliasource + Anthony Prades + oid&aliasource.fr +25470 + North Suburban Access Corp. + Ian R. Cobb + ian.cobb&ctv15.org +25471 + Gridpoint Systems + Ken Young + keny&gridpointsystems.com +25472 + Alltek Technology Corp. + Eddie Liao + eddieliao&alltek.com +25473 + Telelogos + Jean-Yves Bourdais + jybourdais&telelogos.com +25474 + Lisis NV + Wim Van Leuven + wim.van.leuven&ldc.be +25475 + Innovant Pty Ltd + Jamie Vachon + jamie.vachon&levelonenetworks.com +25476 + VideoCells Ltd. + Eran Bida + snmp&videocells.com +25477 + alpha nova BetriebsgesmbH + Rudolf Hatheyer + rudi.hatheyer&alphanova.at +25478 + Local Solutions + Denis Basta + Denis.Basta&local.lv +25479 + X-Tend + Frederic Descamps + fred&x-tend.be +25480 + Congruentix LLC + Amol Hatwar + amol&congruentix.com +25481 + Convergin Ltd. + Michael Katz + michaelkz&convergin.com +25482 + Global Village Data Solutions + Timothy Bogie + tim.bogie&gvds.com.au +25483 + iScience Surgical + Bruce Director + bdirector&isciencesurgical.com +25484 + AKDB + F. Salat + Franz-Xaver.Salat&akdb.de +25485 + Patentanwaltskanzlei Horns + Axel H Horns + horns&ipjur.net +25486 + Asset Archives, Inc. + Nick Downey + ndowney&assetarchives.com +25487 + Bishop Technologies, Inc. + Joe Doyle + jdoyle&bishopit.com +25488 + V2 Telecom Ltda. + Guilherme Spina + gspina&v2telecom.com.br +25489 + Henrik Nordstrom Consulting + Henrik Nordstrom + henrik&henriknordstrom.net +25490 + Wagner Tiefkuehlprodukte GmbH + Dirk-T. Rauber + dirk.rauber&wagner-pizza.de +25491 + National Bank of the Republic of Belarus + Alexandre I Burachevsky + root&bisc.by +25492 + Ruukki + Matti Oja + matti.oja&ruukki.com +25493 + VNUnet Europe + Emanuel Schleussinger + emanuel_schleussinger&vnu.de +25494 + Ivan Dolezal + Ivan Dolezal + ivan.dolezal&vsb.cz +25495 + Optimacom Sàrl + Christophe Alexandre + optimacom.sarl&gmail.com +25496 + CEMES-CNRS + Anne Altibelli + anne.altibelli&cemes.fr +25497 + Transbit Spolka z o.o. + Wojciech Wisniewski + mariola&transbit.pl +25498 + InSecMa Solutions GmbH + Marc Schlichting + m.schlichting&insecma.de +25499 + GuideCom GmbH + Sebastian Wiehage + ldap&guidecom.de +25500 + Interkey, Inc. + Jeffrey Goldschrafe + jeffg&interkey.net +25501 + VaultLab, Inc + CTO/Chief Technical Officer + p1.iana&vaultlab.com +25502 + AccuRev, Inc. + Matt Laudato + mlaudato&accurev.com +25503 + Network Integrity Systems Inc. + Cary Murphy + carym&networkintegritysystems.com +25504 + St. Edward's University Inc + Raymond Spinhirne + rays&admin.stedwards.edu +25505 + SpaceNetwork + Juergen Mira + NOC&SpaceNetwork.de +25506 + H3C + Xiaolan Wan + wxlan&h3c.com +25507 + Japan Maritime Self-Defense Force + Tsutomu Yoshima + k-mso-cm01&msdf.mail.jda.go.jp +25508 + narraSoft Philippines, Inc. + Joe Daigle + oidrequest&jwdaigle.net +25509 + C.MER + Yan Burman + yan_b&mer.co.il +25510 + Symedia S.A. + Roman Lyubovskiy + roman&symedia.org +25511 + SPALE NETWORKS + Pascal Gloor + pascal.gloor&spale.com +25512 + simple-networks + Torsten Goße + togo&simple-networks.com +25513 + VirtualLink Technologies Pte. Ltd. + Haitao Zhao + support&vlinktech.com +25514 + yatininc + yatin umrotkar + yatin_u84&yahoo.com +25515 + LKH Villach + Andreas Sucher + andreas.sucher&lkh-vil.or.at +25516 + Verizonbusiness + Jim Potter + jim.potter&verizonbusiness.com +25517 + Criterium Soluções em Informática Ltda + Rafael Santos + rafael.santos&criterium.com.br +25518 + StorCase Technology, Inc. + Joel Tang + Joel_tang&storcase.com +25519 + Washington County School District + Jeremy Cox + jcox&washk12.org +25520 + Quadrics Ltd + Lee Porter + lee&quadrics.com +25521 + NextConnect Ltd + D Short + info&nextconnect.co.uk +25522 + Renko Technologies + Arkady Renko + document.controller&efp.net.au +25523 + Homecast Co., Ltd. + Dae-Yeon Hwang + dyhwang&homecast.net +25524 + Open Source Hardware Engineering Community (OSHEC) + Dmitry V. Belimov + dimon&oshec.org +25525 + TC&C Telecommunication and Computer Technology Ltd. + Viktor Kiss + viktor.kiss&tcandc.com +25526 + IPRocess + MELI Antoine + snmp_pen&iprocess.fr +25527 + Nebraska Public Power District + Erik Weinmeister + etweinm&nppd.com +25528 + Laboratório Nacional de Computação Científica + Luiz M. R. Gadelha Jr. + lgadelha&lncc.br +25529 + World Links + Ray Stuart + ray&world-links.org +25530 + AirDat LLC + Brian Dale + bdale&airdat.com +25531 + B2E Technologies PTY LTD + Ronald Da Encarnacao + ronnied&b2e.co.za +25532 + BCA Services Ltd. + Werner Rades + info&bca-itservices.com +25533 + IdentityForge, LLC. + Chad Cromwell + ccromwell&identityforge.com +25534 + Lusan Systems + Nikolai Lusan + nikolai&lusan.id.au +25535 + Klas Ltd. + Peter Hertting + peterh&klasonline.com +25536 + Probaris Technologies, Inc. + Robert Levas + oids&probaris.com +25537 + Thomas Friedl + Thomas Friedl + friedltoolsoft&web.de +25538 + GNU Telephony + David Sugar + dyfet&gnutelephony.org +25539 + Osaka University + OID Administrator + oid-admin&ml.office.osaka-u.ac.jp +25540 + NetToolWorks, Inc. + Eric Eicke + eeicke&nettoolworks.com +25541 + JStream Technologies, Inc. + James Chen + jchen&jstream.com.tw +25542 + Ortikon Interactive Oy + Teemu Pohjolainen + teemu.pohjolainen&ortikon.com +25543 + Microbit 2.0 AB + Jonas Andersson + jonas.anderssonµbit.se +25544 + Iptune Ltd. + Vesa Haimi + info&iptune.com +25545 + EADS-CASA + Pedro Fernandez Cardador + Pedro.Fernandez&casa.eads.net +25546 + Boots Group PLC + Dr Tim Martin + tim.martin&boots.co.uk +25547 + VION + Joris te Molder + joris.te.molder&vionfood.com +25548 + OpenCraft + Karim Ratib + karim.ratib&open-craft.com +25549 + STAR Group + Daniel Geyer + daniel.geyer&star-group.net +25550 + zhihao + zhihao + zzh_1218&hotmail.com +25551 + AMEC + Bill Dishman + bill.dishman&amec.com +25552 + Poxix + Jacob Angel Munoz + me&poxix.com +25553 + Sonas Innovation Ltd + Barry Flanagan + barry&sonasi.com +25554 + WiderWeb Ltd + E Moore + ed&widerweb.co.uk +25555 + Social Science Research Council + Christopher Hernandez + hernandez&ssrc.org +25556 + Beyond 2000 Ltd + Bernard Jauregui + bj&beyond2000.co.uk +25557 + Xerox -ORG + Michael Foster + Michael.Foster&xerox.com +25558 + Infoblox, WinConnect (formerly 'Ipanto') + Eric Duchene + educhene&infoblox.com +25559 + Tivella Inc. + Alex Shekhter + alex&tivella.com +25560 + Ministerio Publico do DF e Territorios + Robson Paniago de Miranda + robsonm&mpdft.gov.br +25561 + Telecom Protection Technologies Limited + Chris Down + chris.down&tptdesigns.co.uk +25562 + Firmix Software GmbH + Vinzenz Grabner + mlzenml&firmix.at +25563 + Stadtverwaltung Duesseldorf + Michael Wadenpohl + michael.wadenpohl&stadt.duesseldorf.de +25564 + T7 e.V. + Hajo Bickenbach + hajo.bickenbach&t7-isis.org +25565 + pixality GmbH + Stefan Lippstreu + iana&pixality.com +25566 + Staatl. gepr. Inf. Markus KARG + Markus KARG + markus.karg&gmx.net +25567 + Cingular Wireless LLC + Johnny Walker + johnny.walker&cingular.com +25568 + Douglas Fast Net + Chance Newkirk + cnewkirk&douglasfast.net +25569 + Points South + Austin Brower + operations&psouth.net +25570 + Mackelprang Research + Mark Mackelprang + mark&mackelprang.com +25571 + nDosa Technologies Inc + Kwang-Bock You + kbyou&ndosatech.com +25572 + Columbia International College + Randy de Resendes + rderesendes&cic-totalcare.com +25573 + Point Clark Networks Ltd. + Peter Baldwin + peterb&pointclark.net +25574 + FXC Inc. + Zhao Fengji + zhao&fxc.jp +25575 + Bevuta + Pablo Beyen + beyen&bevuta.com +25576 + AXMEDIS Organisation + Paolo Nesi + nesi&dsi.unifi.it +25577 + Norlinx, Inc + Robert Selph + rselph&norlinx.com +25578 + Interchange Corporation + Scott Simpson + ssimpson&interchangeusa.com +25579 + Info Tech, Inc. + Network Services + sysadmin&infotechfl.com +25580 + Universidad de El Salvador + Victor Calles + victor.calles&ues.edu.sv +25581 + Musmap Team + Mathieu Parent + mathieuparent&users.sourceforge.net +25582 + Employease Inc. + Patrick Wolfe + pwolfe&employease.com +25583 + gfk + Gregor Kling + gfk&binary-delight.de +25584 + Axzona Ltd + Alan Hughes + alan&axzona.com +25585 + Infoglobal, S.A. + Prudencio Gomez + info&infoglobal.es +25586 + Unified Group Ltd + Scott Bye + sbye&unifiedgroup.co.uk +25587 + Jinitech Inc. + Shawn Zhang + shawn&jinitech.com +25588 + University of Texas Center for Agile Technology + John Holder + itcontact&cat.utexas.edu +25589 + Comune di Cagliari + Pierangelo Orofino + piero.orofino&comune.cagliari.it +25590 + Tecnoworld Com. Imp. Exp. Ltda. + Alberto Sato + alberto.sato&tecnoworld.com.br +25591 + National Clearinghouse for Rehabilitation Training Materials + Kenneth Tingey + ken.tingey&usu.edu +25592 + Intercorp + David Price + david_price&intercorp.com.au +25593 + Aethernet Ltd + Kris Saxton + kris&aethernet.co.uk +25594 + Gollard + Olesov Peter + olesov&atlantis.ru +25595 + CZ.NIC, z.s.p.o. + Ondrej Sury + ondrej.sury&nic.cz +25596 + DigitalSign - Certificadora Digital + Alvaro Matos + suporte&digitalsign.pt +25597 + FireEye Inc. + Jordan Blake + jordan&fireeye.com +25598 + Gehlbach Technical Services, Inc. + Jeff Gehlbach + jeffg&gehlbachtech.com +25599 + SecureNet PL + Wojciech S. Czarnecki + registry&sec.pl +25600 + EzValidation Inc. + Asif Bhimla + ali&ezvalidation.com +25601 + FSP Computer & Netzwerke + Stefan Schaefer + stefan.schaefer&fsproductions.de +25602 + Telekom Austria AG + Karl Vavrina + karl.vavrina&telekom.at +25603 + Locube.com + Nicolas Camus + ncamus&locube.com +25604 + Agos S.p.A. + Marco Barzaghi + m.barzaghi&agosweb.it +25605 + Ingenieurbuero T. Scholz + Thorsten Scholz + iana_snmp&demugo.sytes.net +25606 + DevMetrix LLC (formerly 'USi') + Damien Stuart + dstuart&dstuart.org +25607 + Andritz AG + christian bretterhofer + christian.bretterhofer&andritz.com +25608 + Iritec + Álvaro J. Iradier + airadier&iritec.es +25609 + StreamVision + Perceval Anichini + perceval.anichini&streamvision.fr +25610 + The SEEMIS Group + Chris Higgins + chris.higgins&seemis.gov.uk +25611 + Lafayette College + Bob Bailey + baileyb&lafayette.edu +25612 + Trilliant Networks + François Guillemette + francois.guillemette&trilliantnetworks.com +25613 + CGI Concept + Marek Habersack + grendello&gmail.com +25614 + Auroras Entertainment + Dustin F. Harmon + dharmon&auroras.tv +25615 + Computer Savvy + David Sucharski + pcdocdave&tech303.com +25616 + Cardiac Science Corporation + Bryon Schultz + bschultz&cardiacscience.com +25617 + ClearCube Technology + My Tran + my.tran&clearcube.com +25618 + Omnitrol Networks, Inc. + Joe Ireland + jireland&omnitrol.com +25619 + Ultragreen + Romain GEORGES + romain&ultragreen.net +25620 + Ost-West Handelsbank AG + Mr. Stefan Berndt + stefan.berndt&owh.de +25621 + Z Microsystems + Chris Greve + snmp-num&zmicro.com +25622 + JANET(UK) (formerly 'UKERNA (United Kingdom Education and Research Networking Association)') + Mark O'Leary + mark.oleary&ja.net +25623 + OpenVAS + Tim Brown + timb&openvas.org +25624 + Kent School District + David Norton + serveradmins&kent.k12.wa.us +25625 + ChattenAssociates, Inc. + Leonid Tochinski + ltochinski&chattenassociates.com +25626 + Crufty + Simon Gerraty + sjg&crufty.net +25627 + Ohio Department of Public Safety + David Brown + dabrown&dps.state.oh.us +25628 + Perot Systems Corporation + Bradley Marrs + brad.marrs&ps.net +25629 + U.S. Environmental Protection Agency + Keith Brown + brown.keith&epa.gov +25630 + Groupe Mutuel + Christian Parvex + cparvex&groupemutuel.ch +25631 + International Newspaper Network, LLC + Dustin Ward + dward&townnews.com +25632 + Thorold Alarm + Jon Pounder + jon£er.com +25633 + lecentre.net + Sebastien THOMAS + prune&lecentre.net +25634 + uniqueobject + Arne Gerdes + arnegerdes&uniqueobject.com +25635 + Carlo Gavazzi Computing Solutions, Inc. + Mark J. Pascarelli + markpas&mupac.com +25636 + Control Alternative Solutions, Inc. + Prakash Jadeja + jad&casolinc.com +25637 + Police Bruxelles CAPITALE Ixelles - ZP 5339 + Frédéric Devos + freddevos&gmail.com +25638 + Uniadex, Ltd. + Masatsugu Fujita + masatsugu.fujita&uniadex.co.jp +25639 + KMS Systems Inc + George Mallard, P.E. + guru&crashbar.com +25640 + Real SoftService + Wang Penghui + wangpenghui&realss.com +25641 + Papeteries Hamelin + Bernardeau Gérard + gbernardeau&hamelin.fr +25642 + CHP Consulting ltd. + Andy Brook + ops&chp.co.uk +25643 + Intermediasud + Claude Combes + ccombes&e-teleport.net +25644 + liland open IT solutions GmbH + Michael Perkonigg + michael.perkonigg&liland.at +25645 + Community TV GmbH + Matthias Subik + ms&okto.tv +25646 + b.a.b-technologie GmbH + Joerg Schmitz-Linneweber + jsl&bab-tec.de +25647 + Hungarian Customs Service (VPOP) + Zoltan Riba + riba.zoltan&mail.vpop.hu +25648 + Ecole Nationale Veterinaire de Nantes + Mr Didier ROY + roy&vet-nantes.fr +25649 + Technobox, Inc. + Larry Reilly + larry.iana-enterprise&technobox.com +25650 + SunUp Design Systems, Inc. + Praveen Sharma + sysadmin&sunup.com +25651 + Exalt Communications + Herman Lee + hlee&exaltcom.com +25652 + Tasneem Electronics L.L.C + Basem Narmok + narm&go.com.jo +25653 + Oakley Networks, Inc. + Jan L. Peterson + jan.peterson&oakleynetworks.com +25654 + Psi Systems, Inc. + David Megel + dmegel&psi-sys.com +25655 + MIMS SA + Frederic Senault + fs&mims.be +25656 + Axoïde EURL + Borgogno Cécile + contact&axoide.com +25657 + NWCG + Jason Runyan + jason.runyan&usda.gov +25658 + Essentia S.p.A. + Andrea Tarasconi + andrea.tarasconi&essentia.it +25659 + PolarSat Inc. + Marc Tibout + marc.tibout&polarsat.com +25660 + Starflight Electronics + John Anderson + johna&starflightinc.com +25661 + DataPro Group Limited + Kenneth Clark + kennethc&datapro.co.za +25662 + Opway Optical Technology(Wuxi)Co.Ltd + Bai Liyong + lybai&opwaytech.com.cn +25663 + InfowareLab Co,Ltd. + ROBIN XIA + Robin.Xia&cybernaut.com.cn +25664 + Terascala, Inc. + Mike Nuss + mike&terascala.com +25665 + Essentel Inc. + Corey Gates + info&essentel.com +25666 + Telenav, INC + Jason Choe + jchoe&telenav.com +25667 + Padjen + Frederik Padjen + ldap&padjen.de +25668 + Battelle Memorial Institute + Peter Dohm + dohmp&battelle.org +25669 + Mariner Partners Inc. + Aleksandar Petrovic + aleksandar.petrovic&marinerpartners.com +25670 + California State University, Fullerton + Chuck Yang + cyang&fullerton.edu +25671 + EC Hugbunadur Ehf + Asgeir Halldorsson + asgeir&ec.is +25672 + Responsys + Mike Sherman + msherman&responsys.com +25673 + Grupo IT Deusto, S.L. + Marcos Riosalido + mriosalido&itdeusto.com +25674 + jlu.nic.Inc. + WenZiyan + wenziyan1208&163.com +25675 + Test-O-Mat + Tim Krah + mail&timkrah.de +25676 + procilon Gmbh + Torsten Rienaß + torsten.rienass&procilon.de +25677 + Action Soft + Damien Matraire + dma&action-soft.com +25678 + EMBL Heidelberg + Matthias Helmling + helmling&embl.de +25679 + SecureAxis Software + Chris Elbring + celbring&secureaxis.net +25680 + Pixen Technologies Pvt Ltd + Vijay A Ramamoorthy + mailrvijay&gmail.com +25681 + Instituto Municipal de Cultura y Juventud de Burjassot + Gabriel Prunonosa + gabriel.prunonosa&imcjb.net +25682 + Donley Consulting + Dennis Furr + denny&donleyconsulting.co.uk +25683 + Epstein Becker & Green, P.C. + Mevin Essapen + MEssapen&ebglaw.com +25684 + Perot Systems Government Services + Rich Bashaw + rich.bashaw&psgs.com +25685 + Hurricane Labs LLC + Brian T Glenn + brian&hurricanelabs.com +25686 + MCTEL Monaco Telematique + Daniel Mavrakis + sms&mctel.net +25687 + Gendreau & Pelchat + Francis Gendreau + francis.gendreau&makwa.net +25688 + Caixanova + Juan Manuel Fernández López + jmfernandez&caixanova.com +25689 + alsatis + Frédéric Moulins + contact&alsatis.com +25690 + REM Probe Ltd + Kelvin Ager + Kelvin.Ager&branch.co.uk +25691 + Eyes, Japan Co. Ltd. + Masahiro Kinugawa + kinugawa&aizu.com +25692 + ANDREAS STIHL AG & Co. KG + Andreas Bombik + oidmaster&stihl.de +25693 + InterNetworX Systems Inc. + Don Fitzpatrick + dwf&InterNetworXsystems.com +25694 + DRS Surveillance Support Systems, Inc. + Tony Price + tony.price&drs-sss.com +25695 + Vestergaard IT + Peter Dahl Vestergaard + peterdv&vestergaard.it +25696 + Nedap N.V. + Evert Wonnink + evert.wonnink&nedap.com +25697 + BBK + Sonsoles Sagredo + sonsoles.sagredo&ieuskadi.com +25698 + KSB Italia S.p.A. + Nicola Bertellini + it&ksb.it +25699 + Generic Variables + eng.enrico frediani + e.frediani&usl6.toscana.it +25700 + Protokon Kft. + Nagy Attila + nagy.attila&protokon.com +25701 + GAMIC mbH + Marco Costa + costa&gamic.com +25702 + PCSofía + Hernan Berguan + pcsofia&gmail.com +25703 + CYBERSYS + Georges VIDAL + gvidal&cybersys.fr +25704 + inter-touch (Malaysia) Sdn. Bhd. + Peter Fang + pfang&inter-touch.com +25705 + Pixtree Technologies, Inc. + Sehoon Son + shson&pixtree.com +25706 + ViaScope Int. + Park, Sungsoo + day1102&scope.co.kr +25707 + Marksman + Chris Hughes + chris&hughes-net.net +25708 + Milde Software Solutions (MSS) + Marc Milde + mib&milde-online.com +25709 + Windstream Communications Inc + TJ Reece + ndssupport&windstream.com +25710 + Phoenix Worldwide Industries, Inc. + Adrian Esquivel + aesquivel&phoenixworldwide.com +25711 + Coral8, Inc. + Greg Shtilman + snmp-admin&coral8.com +25712 + The Higher Gear Group, Inc. + James Newby + ldapadmin&highergear.com +25713 + Magyar Telekom + Gaál Géza + gaal.geza1&t-com.hu +25714 + Chengdu Guyue Electronics Co.,Ltd + Yibin Hu + hyb&vip.163.com +25715 + JumpTV.com Inc. + Sinisa Djurkic + sdjurkic&radintl.com +25716 + Manuel Domínguez Hidalgo + Manuel Domínguez Hidalgo + manuel_dominguez_hidalgo&hotmail.com +25717 + Ravn Webveveriet AS + Jan Rudolph + hostmaster&ravn.no +25718 + Taction + Tom Welch + sysadmin&taction.net +25719 + Polysys Ltd. + Mr. Ferenc Polyak + ferenc.polyak&polysys.eu +25720 + Ingenieurbüro Kuhlmann + Daniel Migowski + ldap-admin&ikoffice.de +25721 + Linwave Technology + Dave Bell + dave.bell&linwave.co.uk +25722 + Integrated Financial Arrangements plc + Mr Alex Crow + acrow&integrafin.co.uk +25723 + Phoenix-Database Informationstechnologie GmbH + Friedhelm Matten + Friedhelm.Matten&phoenix-database.de +25724 + Universiti Sains Malaysia + Mr Nurul Faizal M.Shukeri + nfaizal&usm.my +25725 + SOSeth + Daniel Fasnacht + iana&sos.ethz.ch +25726 + Cyrix Technologies Limited + James Kan + james.kan&cyrixtech.com +25727 + Q9 Networks Inc. + Sarah Nordstrom + iana&q9.com +25728 + ZAO Light Communication + Shulman Ilya + ish&lightcom.ru +25729 + Radiodata GmbH + Norbert Langermann + nlangermann&radiodata.biz +25730 + ASmith Home + Andrew Smith + asmith2004&gmail.com +25731 + Web Xtreme, Inc. + Dave Conklin + webservers&webxtreme.com +25732 + Mirago plc + Philippe Preget + Philippe.Preget&Mirago.com +25733 + Cellact Ltd. + Amir Dorot + amir&cellact.com +25734 + Bolignet-Aarhus + Michael Molbech + mgm&bnaa.dk +25735 + Visionee s.r.l. + Fabio Radin + fabio.radin&visionee.com +25736 + Duval County Public Schools + James Moore + moorej&duvalschools.org +25737 + Reality Communications + Karl Wagner + karl&mouse-hole.com +25738 + INFOLOG GmbH + Axel Roelker + a.roelker&infolog.de +25739 + INTERMET Ueckermuende + Andreas Pohl + edv&intermet-uede.de +25740 + Nakamura Technologies Ltd. + Sylvester Chigbu + schigbu&nakamuratech.com +25741 + Ariane Ingenierie + antoine gérardin + agerardin&ariane-ingenierie.net +25742 + ALDI Einkauf GmbH & Co. oHG + Ingo Pieper + i.pieper&aldi.com +25743 + Krontek Pty Ltd + Ian Peterkin + i.peterkin&krontek.com +25744 + Avillon Networks + Gi Sangmin + yalvara&avillon.co.kr +25745 + Smart Communications, Inc + Carlo Feliciano N. Aureus + CNAureus&smart.com.ph +25746 + ARCADIA LAB srl + Lanconelli Nico + info&arcadialab.com +25747 + YellowJacket Software, Inc. + Greg Campbell + gcamp&yellowjacketsoftware.com +25748 + Templer Wirtschaftsing.-Buero + Guido Templer + g.templer&templerconsult.com +25749 + CenGen, Inc. + Frank Renwick + frenwick&cengen.com +25750 + SAM Group, Inc. + John Poore + johnpoore&samgroup.com +25751 + Python Technology Limited + Colin Hogben + iana&pythontech.co.uk +25752 + Gallery Solo + Grazyna Tonkiel + gsiencze&gmail.com +25753 + KPH Computers + Kevin Hughes + kev&kphcomputers.com +25754 + Shanghai onLAN Communication Tech.Co.,Ltd. + Xingjian Qin + qinxingjian&onlan.com.cn +25755 + Liferay, LLC + Brian Wing Shun Chan + bchan&liferay.com +25756 + Mahidol Wittayanusorn (Public Organization) + Boonnatee S. + boonnatee_sak&yahoo.com +25757 + Minister of Interior of Saudi Arabia + Abdullah Nasser Alghannam + aghannam&nic.gov.sa +25758 + SIF Inc. + Dong Jiang + dongj&sifinc.jp +25759 + Almen Laboratories, Inc. + Michael Galperin, Ph.D. + mgalperin&almenlabs.com +25760 + Doctors Telehealth Network + Brett Robblee + brettrobblee&doctel.net +25761 + ABB Automation GmbH + Cajus Hahn + cajus.hahn&de.abb.com +25762 + PersonalOffice + Denis Volkov + denis.v.volkov&gmail.com +25763 + Happydoo SAS + Eric VERNIER + iana&happydoo.com +25764 + Exmos Ltd + Gordon Coulter + gordonc&exmos.com +25765 + Barclays Global Investors Ltd + Paul Greer + paul.greer&barclaysglobal.com +25766 + Christie Digital Systems + Ashish Kudsia + ashish.kudsia&christiedigital.com +25767 + Koya Ixis, S. C. + Luis F Blanco M + lblanco&koyastudio.com +25768 + DATAllegro, Inc. + Matt Peebles + mpeebles&datallegro.com +25769 + UTRONIX Elektronikutveckling AB + Mikael Markow + iana&utronix.se +25770 + chatelp.org + Pierre Chatel + admin&chatelp.org +25771 + Mobile 365, Inc. + Prasanna Nagaraj + prasanna.nagaraj&mobile365.com +25772 + Astha Technologies Pvt. Ltd. + Shilpa Joshi + shilpa&xnapworks.com +25773 + Wemeus + Sudhakar Gorti + svgorti&gmail.com +25774 + BCEAO + Abdoulaye Mbodj + ambodj&bceao.int +25775 + midPhase Services, Inc + Zak Boca + zboca&midphase.com +25776 + Hangzhou zhongwei electronics Ltd. + jianbingqin + jianbingqin&163.com +25777 + technoms + Florent DUPONT + florent.dupont&familledupont.com +25778 + Irkutsk State Transport University (IrGUPS) + Andrey Razvodovskiy + admin&irgups.ru +25779 + MITO-Performance + Thorben Ferdinand + mib&mito-performance.com +25780 + Sanchin Consulting AB + Joakim Fallsjo + snmp&lev.sanchin.se +25781 + NetFort Technologies Limited + Sergey Lyubka + sergey.lyubka&netforttechnologies.com +25782 + MSA management solutions GmbH + Holger Berndt + holger.berndt&msa-solutions.de +25783 + Bluecom C.A. + Stein Damman + info&bluecom.com.ve +25784 + Deutsche Provinz der Salesianer Don Boscos KoeR + Hatto von Hatzfeld + hatto&salesianer.de +25785 + Universitat Politècnica de Catalunya (UPC) + Victor Huerta + hostmaster&upcnet.es +25786 + AgileSoft BVBA + Michiel Scharpé + michiel.scharpe&agilesoft.be +25787 + dox.at. Softwareentwicklungs und Dienstleistungs GmbH + Wolf Seidl + wseidl&dox.at +25788 + DAIMS Ltd. + Young-soo Kim + varchar&daims.co.kr +25789 + Digital Work Co.,Ltd. + Kouhei Aonuma + ao&digital-w.com +25790 + Sirona Dental Systems GmbH + Juergen Zimmermann + juergen.zimmermann&sirona.de +25791 + certSign.eu + Wittmer, Christian + oidmaster&certsign.eu +25792 + Leader.IT + Guido brugnara + info&leader.it +25793 + Häfele GmbH & co. KG + Geald Brunner + gerald.brunner&haefele.de +25794 + XSALTO + Renaud ZIGMANN + oid&xsalto.com +25795 + Vicarial Technology + James Lorenzo + jay.lorenzo&gmail.com +25796 + Menbit Technology Co., Ltd + Jerry Menbit + JMenbit&gmail.com +25797 + Southerland Consulting, Inc. + John Buren Southerland + john&southerland-consulting.com +25798 + Elaborated Networks GmbH + Michael Markstaller + hostmaster&elabnet.de +25799 + West Liberty Telephone Company + Andy Meader + noc&lcom.net +25800 + Ample Communications Inc. + Howard Feger + hfeger&lecomm.com +25801 + Identita Technologies Inc. + Rocky Stefano + rstefano&identita.com +25802 + Aliadis + Thibault Genessay + noc&aliadis.fr +25803 + Total System Services, Inc.. + George Perkins + gperkins&tsys.com +25804 + Scala, Inc. + Peter Cherna + peter.cherna&scala.com +25805 + Thomson Learning + Jason Spruance + jason.spruance&thomson.com +25806 + Sonnet Technologies, Inc. + Nicholas Lekkas + nick.lekkas&sonnetinc.com +25807 + heung to middle school (tin shui wai) + tang kwong shing + tang&heungto.net +25808 + eBiz Ltd. + Ben Catherall + ben&ebiz.co.uk +25809 + mobileX AG + Christian Fridgen + development&mobilexag.de +25810 + ABE Software + Matthias Doerfel + md&abesoftware.de +25811 + INLOG SA + Olivier GAYDON + olivier.gaydon&inlog.com +25812 + Home Gateway Initiative + Paolo Pastorino + paolo.pastorino&telecomitalia.it +25813 + Sai Furnitures LTD + Sai Anand + sai_anand2000&yahoo.com +25814 + Subzilla + Tim Dalsing + tdalsing&yahoo.com +25815 + Wirevox + Bill Totman + totman&gmail.com +25816 + Roland DG Corporation + Kenta Shimojima + kenta.shimojima&rolanddg.co.jp +25817 + Poseidon Networks + Nitin Bhagnari + nitinbhagnari&yahoo.com +25818 + The Sakai Foundation + Mary Miles + mmiles&umich.edu +25819 + DO|YOU|SOFT + Benoit Plessis + b.plessis&doyousoft.com +25820 + SkyBlue Technologies, Inc. + Constantine Sapuntzakis + ops-contact&skyblue-technologies.com +25821 + Consilient Technologies Corp. + Sean Hogan + sean_hogan&consilient.com +25822 + S&T Hungary Ltd. + Tamas Arato + tamas.arato&snt.hu +25823 + CIO Informatique Industrielle + Christian Charreyre + christian.charreyre&cioinfoindus.fr +25824 + Dementia 7 + Frédéric Laugier + snmp&dementia7.net +25825 + Asgard's Realm + Jamin W. Collins + jcollins&asgardsrealm.net +25826 + Peter J. Leonard Small Business Consulting Services + Peter J. Leonard + pleonard&affinityhealth.org +25827 + Big Lots Stores, Inc. + Marady Prak + maradyprak&biglots.com +25828 + Texas Health and Human Services + John Roan + John.RoanIII&HHSC.State.TX.US +25829 + Beijing superlink telecommunications Co.Ltd + sun hui + sunhui&superlink.com.cn +25830 + 4Players GmbH + Christian Celler + celler&4players.de +25831 + Rhinocorps, Ltd. Co. + Freeman P. Pascal IV + fpascal&rhinocorps.com +25832 + GlobalSCAPE, Inc. + Gregory T. Hoffer + ghoffer&globalscape.com +25833 + Edward W. Sparrow Hospital Association + Robert N. Parsons + Robert.Parsons&Sparrow.org +25834 + Interoperabilidad S.A. de C.V. + Mario Delgado + mario_delgado&interopera.com.mx +25835 + Patientline UK Ltd + Keith Jacobs + keith.jacobs&patientline.co.uk +25836 + Austrian Academy of Sciences + DI Melitta Kimbacher + melitta.kimbacher&oeaw.ac.at +25837 + Electronnyi gorod Lld. + Pavel Vladimirovich Chernov + elkursk&mail.ru +25838 + Tagawa Software Engineering + Ichiro OHTA + ichiro.ohta&nifty.com +25839 + ISTANTE srl + Enrico Lorenzini + direzione&istante.info +25840 + Logcode SARL + Orjan Petersson + nospam&logcode.com +25841 + Arakelian Software, Inc. + Gregory Arakelian + iana&arakelian.com +25842 + Technica Necesse Est + Denis Tumpic + dtumpic&sympatico.ca +25843 + VoIP.co.uk + Michael Procter + michael&voip.co.uk +25844 + Seneca Technology Corporation + Hank Skalka + hank&senecatechnology.com +25845 + MIYATSU CO.,Ltd + Tsukasa Okabe + tk_okabe_1&yahoo.co.jp +25846 + AIN Manager & Company, Inc. + Matthew Wilson + mrw&esrun.com +25847 + M-Networks, LLC. + William Mandra + wmandra&m-networks.net +25848 + Bally Technologies, Inc. + Chuck DeVall + cdevall&ballytech.com +25849 + deas Deutsche Assekuranz-Makler GmbH + Frank Gruber + enterprisenumber&deas.de +25850 + Smart Tech 21, Inc. + George F. Taylor + taylorgf&aol.com +25851 + 5by5 Software Ventures Ltd. + Daniel Choy + dchoy&5by5software.com +25852 + City Computing Ltd London UK 1767817 + Steven Britton + snmp&cityc.co.uk +25853 + Rubin Informatikai ZRt. + Csaba Parádi + titkarsag&rubin.hu +25854 + Multisoft Ltd. + Peter Perenyi + snmp-mib&multisoft.hu +25855 + Net24 Limited + Nikolai Schupbach + noc&net24.net.nz +25856 + Free University of Bozen/Bolzano + Alessandro Peroni + it&unibz.it +25857 + FEMTO-ST + William Daniau + william.daniau&femto-st.fr +25858 + The Royal College of Surgeons in Ireland + Ciaran Butler + cmbutler&rcsi.ie +25859 + The aKamali Group, Inc. + David A. Beidle + dabeidle&akamali.com +25860 + A VonderHaar Networks + AARON VONDERHAAR + iana-contact&avh4.net +25861 + Tanapro GmbH + Thomas Arn + edv&tanapro.ch +25862 + Assist WiseTech S.A. + Juan C. Plaza + jcplaza&cl.assist-la.com +25863 + Brenson Pacific Technologies Ltd + Andre Thompson + andre&brepac.com +25864 + VideoPropulsion Interactive Television, Inc + Matthew Spransy + matthew&videopropulsion.com +25865 + Boise State University + Sean Jones + smackjones&gmail.com +25866 + CHU BESANCON + Vincent JANNIN + vjannin&chu-besancon.fr +25867 + i-qnet + C.Neufend + neufend&i-qnet.de +25868 + Last Mile Gear + Brian Magnuson + Support&cni.net +25869 + MIRAVID Inc. + Hugo Chung + hchung&miravid.com +25870 + FHLBanks Office of Finance + Stacy Bergert + bergert&fhlb-of.com +25871 + Nextwireless. co. ltd. + SangHo Kim + smartkim&hotmail.com +25872 + EMM s.r.o. + Martin Zemlicka + zemlicka&emm.sk +25873 + BitlBee Team + Jelmer Vernooij + jelmer&samba.org +25874 + GrimIce Inc. + Osbert Morris + ozzy81&gmail.com +25875 + Ondrej Svoboda + Ondrej Svoboda + iana&svoon.net +25876 + Gnome Technologies + Andrew Blanksby + andrew&gnome-technologies.com +25877 + Portrait International, Inc. + Darren Warner + darren.warner&portraitsoftware.com +25878 + FineRF + Daniel Donia + daniel&finerf.com +25879 + Mess Creative Electronics B.V. + Michiel Ettema + mettema&xs4all.nl +25880 + AEB GmbH + Manfred Möbus + moebus&aeb.de +25881 + Objective Arts + Stephen Grant + sgrant&objectivearts.com +25882 + Impinj, Inc. + Bill Ashley + bill.ashley&impinj.com +25883 + Elspec-Ltd + Oren Cohen-Shwartz + OrenCS&elspec-ltd.com +25884 + JPK Instruments AG + Michael Haggerty + haggerty&jpk.com +25885 + TQI - Total Quality on Information + Fabiano C. de Oliveira + fabiano.oliveira&tqi.com.br +25886 + Nethit + Veikko Mustonen + ldap&nethit.fi +25887 + Centro Provinciale di Documentazione Modena + Luca Prampolini + tecnici&cedoc.mo.it +25888 + Gestion Professionnelle des Services de l'Assurance + CORTELLEZZI Philippe + philippe.cortellezzi&gpsa.fr +25889 + IFETH Ltd., U.K. + Mr. Hans J Haase + office&ifeth.org.uk +25890 + MNG Networks GmbH + Ernst Gentner + ernst.gentner&mngnetworks.de +25891 + Sms Systems + Taft Price + b_j_parker&yahoo.com +25892 + Teletronics International, Inc. + Ye Cao + ye&teletronics.com +25893 + Dexterra Inc. + Zeev Lieber + zlieber&dexterra.com +25894 + Blue Cross and Blue Shield of Louisiana + Kathy Constantin + kathy.constantin&bcbsla.com +25895 + PS Webhosting + Philipp Wagner + pw&ps-support.de +25896 + Sento Corporation + Hongyi Gao + hongyi_gao&hotmail.com +25897 + Alligacom Inc. + Dominique Danvoye + dominique.danvoye&alligacom.com +25898 + 25th-floor - de Pretis & Helmberger KEG + Andreas de Pretis + a.depretis&25th-floor.com +25899 + Kantonsschule Zug + Christian Wittenhorst + wiwi&ksz.ch +25900 + Blue Frog Solutions Inc. + Glenn Puchtel + gpuchtel&bluefrogsolutions.com +25901 + Vaquero (formerly 'Linterra') + Michael Owens + mikeowens&gmail.com +25902 + M&V Werbeagentur GmbH + Achim Vogel + a.vogel&mvwa.de +25903 + Palm Beach County + Craig Lessard + clessard&co.palm-beach.fl.us +25904 + Fresh Agencja Reklamowa Sp. z o.o. + Jaroslaw Szczepankiewicz + jszczepankiewicz&fresh.com.pl +25905 + Charles University in Prague + Michal Vocù + michal&cuni.cz +25906 + Topchiev Institute of Petrochemical Synthesis + Igor Ermakov + ive&ips.ac.ru +25907 + EMAP France + Jean-Pascal GUIHARD + jeanpascal.guihard&emapfrance.com +25908 + Haicku, S. Coop. Mad. + Irene Fernandez + irene&haicku.net +25909 + IPCMS + Fabien Muller + Fabien.Muller&ipcms.u-strasbg.fr +25910 + Live Software Solutions Ltd. + Mike Goatly + MGoatly&livesoftwaresolutions.com +25911 + ELCA Informatique SA + Erwin BORDET + sd.iana&elca.ch +25912 + PT Teleakses Solusindo (Tel-Access) + Emil Chandrawisesa + chandrawisesa&tel-access.com +25913 + Fi-Mesh Networks Private Ltd. + Krishna Akella + akellak&fi-mesh.com +25914 + Solacom Technologies + Channy Tremblay + ctremblay&solacom.com +25915 + BCM Bureautique S.A.R.L + Seridj Larbi + blar&nomade.fr +25916 + progon engineering + Christian Wittenhorst + wiwi&progon.net +25917 + convey Information Systems GmbH + Bernhard Froehlich + ted&convey.de +25918 + IT-SecuConsult Ltd. + Sven Steinecke + sven.steinecke&it-secuconsult.net +25919 + NorthSpark AB + Randy Vincelette + randy.vincelette&northspark.se +25920 + Institut de Genetique et Microbiologie ( IGM ) + Mazyar IZAD PANAH + mazyar.izad&igmors.u-psud.fr +25921 + centerra GmbH + Daniel Paufler + daniel.paufler¢erra.de +25922 + teuto.net Netzdienste GmbH + Christian Gall + cg&teuto.net +25923 + C D P Communications Inc. + Carlton D. Davis + cdavis&cdpcommunications.net +25924 + CatsMuvva.Net + Nicole King + nicole&catsmuvva.net +25925 + Ready Business System + Jean-Marc BURGSTAHLER + jean-marc.burgstahler&rbs.fr +25926 + Computer and Communication (CaC) + Stefan Klatt + stefan.klatt&cac-netzwerk.de +25927 + Marketcetera LLC + Graham Miller + oidadmin&marketcetera.com +25928 + SecureNet Scientific Solutions S.A. de C.V. + Xavier Arino + xavier.arino&securenet.com.mx +25929 + JHU ACM + Albert Lee + trisk&acm.jhu.edu +25930 + ZamKor P. Sagnowski i Wspolnicy Sp. J. + Krzysztof Satola + krzysztof.satola&zamkor.pl +25931 + Datalan, a.s. + Marek Baumerth + info&datalan.sk +25932 + 8labs.com + Joshua Graham + jgraham&8labs.com +25933 + Quantum Technology Marketing Ltd + Mike Kovacevich + mike.kovacevich&quantumtm.com +25934 + Intraway Corporation + Lucas Lodeiro + lucas.lodeiro&intraway.com +25935 + TNT Express Worldwide (UK) Ltd + Darryl Armstrong + darryl.armstrong&tnt.com +25936 + echternacht new media ohg + Dirk Winter + dirk.winter&echternacht.com +25937 + NNIT A/S + Network department (Responsible: BnnK/Benny Kjærgaard) + network&nnit.com +25938 + Axonpro spol. s r.o. + Ernest Beinrohr + admins&axonpro.sk +25939 + Mainstream Technologies, s.r.o. + Petr Setka + info&mainstream.cz +25940 + Lycée Technique d'Ettelbruck + Marc TEUSCH + marc.teusch&education.lu +25941 + WiComm mmc + Vadim Chernov + office&vdm-media.info +25942 + DigiVox B.V. + Patrick Tepe + digivox.nl&hotmail.com +25943 + pixus.net + Grulert + mailvon.iana&pixus.de +25944 + Psychotronics + Florian Friesdorf + flow&mytum.de +25945 + Landwehr EDV Technik + Klaus-Dieter Landwehr + kd.landwehr&landwehr-edv.de +25946 + cintrixx technologies + Mohan Ranjith + mohanranjith&yahoo.com +25947 + Halloo Communications, Inc. + Jim Li + jyl087&gmail.com +25948 + Yellowbank + Ron Peterson + iana&yellowbank.com +25949 + China Agricultural University + Wang Xu + kevinw2008&yahoo.com.cn +25950 + FlyLady & Company, Inc. + Michael Bennett + iana-mib-oid&flylady.net +25951 + Hutman, Inc + Kevin M. Tilka + ktilka&hutman.net +25952 + EyeQ Informationstechnik & Multimedia, Reinhard Sucker & Sohn GbR + Oliver Sucker + info&eyeq.de +25953 + Getitsafe + Tobias Hansson + tobias&getitsafe.com +25954 + XERON + Douglas Kim + douglas_kim&xeron.co.jp +25955 + Observit Lda + Pedro Soares + psoares&observit.com.pt +25956 + Talk-A-Phone Co. + Yosef Klein + yklein&talkaphone.com +25957 + ATITEL LTDA + Marcos Santos + marcos&ati.com.br +25958 + Secure Networks Lda. + Pedro Almeida + palmeida&securenetworks.pt +25959 + Raia & Cia Ltda. + Rodolfo Ricci + rodolfo&drogaraia.com.br +25960 + Hour Hand Productions Inc. + Michael D'Auria + michael&hourhandproductions.com +25961 + Bizfon, Inc. + Martin Abbott + mabbott&bizfon.com +25962 + Larson Technology + Brent Larson + brent&larson.name +25963 + California State University Stanislaus + Russell Inman + rinman&csustan.edu +25964 + sPearWay Ltd. + Pierre Frisch + pierre.frisch&spearway.com +25965 + Pennsylvania Legislative Data Processing Center + An Ly + axl&legis.state.pa.us +25966 + CBS Broadcasting Inc. + Ryan Don + rdon&hvc.rr.com +25967 + Packetware Inc + Srinivasa R Nallamotu + sri&packetware.com +25968 + Rabid Dog Labs + Kenneth Clark + kenneth&rabiddog.co.za +25969 + SeMarket, S.A. + J.Angel Recio + ja.recio&semarket.com +25970 + Pmovil Ltda. + Flavio Correa Prado + fprado&pmovil.com.br +25971 + SGV + heinz hoelzl + heinz.sgv&gvcc.net +25972 + SUPERCOM + Angus + Angus&supercom.com.tw +25973 + Nexge Technologies (P) Ltd + Sivalingam + siva&nexge.com +25974 + Freedom9 Inc. + Allen Hsueh + oidadmin&freedom9.com +25975 + REUNA + Juan Carlos Martínez + jcmartin&reuna.cl +25976 + Secours Catholique + J. Paul Nyame + jnyameew&capgemini.fr +25977 + Bausparkasse Schwäbisch Hall AG + Gunther Lochstampfer + gunther.lochstampfer&kreditwerk.de +25978 + EGeen + Heno Ivanov + heno.ivanov&egeeninc.com +25979 + Mavenir Systems, Inc. + Rashad Ali + info&mavenir.com +25980 + Van Shung Chong Holdings Limited + Eric LEUNG (Kai Shing) + eric.leung&vschk.com +25981 + Sapian SA + Victor Saldarriaga + vsaldarriaga&sapian.org +25982 + Hillcrest Laboratories, Inc. + Stephen Scheirey + Steve.Scheirey&hillcrestlabs.com +25983 + Telecom Design + Alexander Naidenovich + nav&teledes.ru +25984 + FAST CORPORATION + Hideo Kuwabara + H.Kuwabara&fast-corp.co.jp +25985 + JoyTV10 / S-Vox Ltd (formerly 'Rogers Broadcasting Ltd.') + Paul Muirhead + pmuirhead&s-vox.com +25986 + InterGen Services, Inc. + Craig Shrimpton + cshrimpt&intergen.com +25987 + Grupo de Investigación en Bioingeniería (GIB) CES - EAFIT + Alvin Garcia Chaves + algarcia&eafit.edu.co +25988 + AKH Wien + Gallauner Roman + roman.gallauner&akhwien.at +25989 + Automata Software Inc. + Jason Rohwedder + iana-contact&automatasoft.com +25990 + Security Compliance Corp + IANA Admin + admin&securitycompliancecorp.com +25991 + MasarLabs.com + Maurizio Sartori + masar&MasarLabs.com +25992 + ACEFHAT , A.I.E. + Juan Ramón Mesa Díaz + jrmesa&acefat.com +25993 + Aspalis SAS + Jean-Paul MALLET + jeanpaul.mallet&aspalis.com +25994 + Aspect Capital Ltd + Cheung Lo + cheung.lo&aspectcapital.com +25995 + Network-Unlimited + Eelco Nieuwstad + eelcon&xs4all.nl +25996 + ITRI + Chilung Wang + chilung&itri.org.tw +25997 + Ugly Design, Inc + Marc Paradise + marc.paradise&gmail.com +25998 + Assure Programs Pty Ltd + Scott Wilson + scottwilson&assureprograms.com.au +25999 + Junger Audio-Studiotechnik GmbH + Manfred Ottenbreit + manfred.ottenbreit&junger-audio.com +26000 + John Laesch for Congress + Jared Lash + errorlevel&gmail.com +26001 + Embedded Ventures LLC + Timothy J. Spires + tspires&frontiernet.net +26002 + Youtility Solutions, Inc. + Graham Gillies + graham.gillies&youtility.com +26003 + Adflex Ltd + Darryl Hughes + dhughes&adflex.co.uk +26004 + CONNECT Software AG + Simon Erhardt + simon.erhardt&it-connect.de +26005 + Hessisches Landeskriminalamt DV-kriminalistische Beratungs- und Auswertungsstelle + Berg, Matthias + matthias.berg&sg613.de +26006 + Personal & Informatik AG + Klaus Thiele + kthiele&pi-ag.com +26007 + Evert Mouw + Evert Mouw + post&evert.net +26008 + Neosis + Ashish Disawal + ashish&neosis.in +26009 + Sebastian Pasch + Sebastian Pasch + pasch&lipa-digital.com +26010 + Hippogriff LLC + Bernd Prager + bernd&prager.ws +26011 + CEPEL + Bruno Avila Galvão + avila&cepel.br +26012 + Holmboe Consulting + Henrik Holmboe + henrik&holmboe.se +26013 + Kongsberg Seatex AS + Morten Skille + morten.skille&kongsberg.com +26014 + Legando AG + Alan Moran + alan.moran&legando.ch +26015 + Multipolar Corporation Tbk, PT + Rudy Hidajat + rudy.hidajat&multipolar.co.id +26016 + Rederi AB Transatlantic + Peter Kindbom + peter.kindbom&rabt.se +26017 + Raiffeisenbank (Bulgaria) EAD + Lazar Oleg Konyuhov + Lazar.Konyuhov&rbb-sofia.raiffeisen.at +26018 + Westminster College + Don E. Goodlin, Jr. + goodlide&westminster.edu +26019 + nantong vocational college + guoping huang + hgp&mail.ntvc.edu.cn +26020 + DePratti Consulting LLC + Patrick DePratti + pdepratti&yahoo.com +26021 + Ligos Corporation + Jim Weller + jweller&ligos.com +26022 + Kamayo + Julien Nitard + julien.nitard&m4tp.org +26023 + Fachschaft MPI, TU München + Sebastian Hanigk + shanigk&fs.tum.de +26024 + subnet - platform for media art and experimental technologies + Andreas Förster + andreas&subnet.at +26025 + Ari Voutilainen + Ari Voutilainen + ari.voutilainen&iki.fi +26026 + arm4.org + David Carter + dcarter&entertain-me.com +26027 + OpenDS.org + OpenDS Administrator + opends&dev.java.net +26028 + MetaSoft + Ilya Melamed + ilya77&gmail.com +26029 + DuroSystems Ltd. + Brett Doyle + mib-snmp&durosystems.com +26030 + Zobel Software GmbH + Boris Synak + boris.synak&zobelsoft.de +26031 + b-tree GmbH + Roland Wepfer + roland&wepfer.com +26032 + XINU technologies GmbH + Wolfgang Stanglmeier + iana&xinu.de +26033 + On The Network Co.,Ltd + Kim TaeHyung + ohsh&otnet.co.kr +26034 + Adobe Animal Hospital + Steve Poe + steve.poe&gmail.com +26035 + Alinto SA + ALINTO Network Operation Center + noc&alinto.net +26036 + Demoniak Network + Frederic Pamart + info&demoniak.ch +26037 + Dengler Engineering GmbH + Peter Schulze + peter.schulze&dengler-gmbh.de +26038 + Cecom S.A. + Jose I. Murria + jimurria&gcener.com +26039 + CESR + Hillembrand + Cedric.Hillembrand&cesr.fr +26040 + T PARTY + TAKAHIRO KONNO + thtprty&gmail.com +26041 + Optical Internetworking Forum (OIF) + Andra Kosich + akosich&oiforum.com +26042 + Music for Life Institute + Max Clements + clementsm&gmail.com +26043 + LaSer Loyalty + Sébastien Ducléroir + admin_fidelisation&e-laser.fr +26044 + Telemacro Informacoes e Servicos Ltda. + Fernando Braga + fernando&telemacro.com.br +26045 + Wright Patman Congressional Federal Credit Union + Devin Calef + dcalef&congressionalfcu.org +26046 + interActive Systems GmbH + Thomas Fritzinger + noc&interactive-systems.de +26047 + Fritzinger IT-Consult + Thomas Fritzinger + noc&fritzinger-consult.de +26048 + Mo-Sys Engineering Ltd + Robert North + r_and_d_licensing&mo-sys.com +26049 + Open WWWorks + Andre Heine + info&open-wwworks.de +26050 + Ecole Centrale de Marseille + Geoffroy Desvernay + cri&egim-mrs.fr +26051 + Retep Software + Peter Mount + peter&retep.org.uk +26052 + HALYS + Gilles Deviercy + halys&laposte.net +26053 + Gutjahr GmbH + Martin Gutjahr + mg&gutjahr.de +26054 + ARASOR TECHNOLOGIES PRIVATE LIMITED + DR. K.AL SRIDHARAN + sridhar&arasor.net +26055 + NordicAware AS + H.Eugen Aandal-Frøystadvåg + euaan&nordicaware.no +26056 + Exxim Computing Corporation + Andrew Jeffries + ajeffries&exxim-cc.com +26057 + Alpheus Communications, L.P. + Tom Fraser + thomas.fraser&alpheuscommunications.com +26058 + Codebay Oy + Sami Vaarala + sami.vaarala&iki.fi +26059 + BTECH, Inc. + Bob Entwisle + bentwisle&btechinc.com +26060 + IZT Innovationszentrum Telekommunikationstechnik GmbH + Rainer Perthold + info&izt-gmbh.de +26061 + SMART Embedded Computing, Inc. (formerly 'Artesyn Embedded Technologies') + Dan Sheets + SMART-Embedded-Registrar-EC-ENG&smartembedded.com +26062 + MZL Software Development + Alan Forbes + alan&forbesfamilyonline.com +26063 + Ankhnet Informations Pvt. Ltd. + Dr. Ajay N. Khosla + drkhosla&ankhnet.net +26064 + Braga Moro S.p.A. + Adelio Abbondio + adelio.abbondio&bragamoro.com +26065 + GECO, Inc. + Ralph Crago + ralph.crago&gecoinc.com +26066 + MultiService Forum + Avri Doria + avri&acm.org +26067 + Kyland + Fan GongChen + fangongchen&kyland.com.cn +26068 + Shaanxi Key Laboratory of Satellite-Terrestrial Network Tech.R&D + Chen Ling + chenling&xjtu.edu.cn +26069 + IPG Sales Pty Ltd + Patrick Cole + pac&independent.com.au +26070 + neutralities.net + Contact Executive + contact&atomic9.net +26071 + Marcin Raciborski + Marcin Raciborski + marcin&msystem.com.pl +26072 + Beijing Huisen Networks technology Inc + Xiaodong Wu + wuxd&passart.cn +26073 + VEUSTEC SERVICOS DE INFORMATICA LTDA + Marcelo Botelho + marcelo.botelho&veus.com.br +26074 + TUSC + Chris Taylor + chris.taylor&tusc.com.au +26075 + x2s Limited + J. Coombes + jcoombes&x2s.co.uk +26076 + Metrinomics GmbH + Sven Christel + s.christel&metrinomics.de +26077 + Espelt.net + Aleix Solé Romeu + lasker&espelt.net +26078 + S4Software, Inc. + Peter Walsall + pwalsall&s4software.com +26079 + Burk Technology + Jonathan Burk + jonb&burk.com +26080 + AR Department of Information Systems + Brian Fortson + brian.fortson&arkansas.gov +26081 + Lars Kornwinkel + Lars Kornwinkel + lars.kornwinkel&web.de +26082 + CERTIRA + Jean-Paul ECOCHARD + jean-paul.ecochard&certira.cnafmail.fr +26083 + CacheGuard Technologies Ltd. (formerly 'OneTec') + Charles Tajvidi + iana&cacheguard.com +26084 + Enter srl + Mariano Cunietti + mcunietti&enter.it +26085 + Kunstuniversitaet Linz + Johannes Kremsner + johannes.kremsner&kunstuni-linz.at +26086 + PC - ANWENDUNGEN + Thomas Boscheck + boscheck&boscheck.de +26087 + Markus Meyer + Markus Meyer + mameye&zone.ch +26088 + Aspera OHG + Patrick Loijens + iana.org&aspera.com +26089 + Emanuel Haupt + Emanuel Haupt + ehaupt&critical.ch +26090 + qiiq communication inc. + Gary Zhu + gang&qiiq.com +26091 + Lafarge S.A. + Rami SADI + rami.sadi&lafarge.com +26092 + ACEB Electronique + Marc MALARD + contact&aceb-elec.com +26093 + NiBweb + Nico Bille + nico&nibweb.net +26094 + McGibbon IT-services + Jeroen Haak + j.haak&xs4all.nl +26095 + Canopus Co., Ltd. + Hiroshi Inamura + hiro-i&canopus.co.jp +26096 + Synergy Neworking Ltd + Adam Crisp + adam.crisp&synergy-networking.co.uk +26097 + Linux Training NZ + Ken Lomax + ken&lomax.gen.nz +26098 + King County + Selena Tonti + stonti&kingcounty.gov +26099 + Shanghai Engineering Research Center for Broadband Technologies & Applications(B-STAR) Co., Ltd. + Gao Yihe + yhgao&b-star.cn +26100 + Cypress Integrated Systems, Inc. + Brian Donnelly + brian&cypressintegrated.com +26101 + NUPPEAD - Núcleo de Pesquisa e Projetos em Educação a Distância + Marcelo Gigliotti + admin&nuppead.unifacs.br +26102 + Advance Internet, Inc. + Bob Eckert + beckert&advance.net +26103 + WOBCOM GmbH + Sven Holz + sven.holz&wobcom.de +26104 + bellwin information Co.,LTD + juiwenliang + juiwen&bellwin.com.tw +26105 + Crummock (Scotland) Ltd + Ross McKerchar + it&crummock.com +26106 + Sentivision Polska sp. z o. o. + Jakub Gorski + kuba&sentivision.com +26107 + Cambridge Display Technology Ltd + Ellis Karim + ekarim&cdtltd.co.uk +26108 + HYDRA FUEL CELL CORPORATION + Oliver Garr + oman&pacifier.com +26109 + TPS Pakistan Pvt Ltd. + Omair Ahmed Khan + omair.khan&tpsonline.com +26110 + PrintMIB, LLC + Rick Bock + Rick&PrintMIB.com +26111 + Informi GIS A/S + Christian Junker + iana&informi.dk +26112 + ISGS + Dan Thurston + thurston&isgs.uiuc.edu +26113 + keuning Information Systems + Bernd Keuning + me&bernd-jan.com +26114 + AXIOHM + Frederic Sciortino + frederic.sciortino&axiohm.com +26115 + JSC STC "Rissa" + Nikolay Sakhno + nik&rissa.ru +26116 + Practical Economic Research Limited + Mark Smith + ProjectLynx&shaw.ca +26117 + Lux Solis, LLC + Laurence Flath + lflath&luxsolis.com +26118 + BFSt + Gus Thomas + sppsm&bfst.bund.de +26119 + Trafsys + Eugene Zaikonnikov + eugene.zaikonnikov&trafsys.no +26120 + cweiske.de + Christian Weiske + cweiske&cweiske.de +26121 + Rishi Valley School + M R K Murthy Raju + murthyraju&rishivalley.org +26122 + Zenitel Norway AS + Kjell Ove Roete + kjellove.rote&zenitelcss.com +26123 + Ministerie van de Vlaamse Gemeenschap, belfla telematica + Godfried Verhamme + Godfried.Verhamme&lin.vlaanderen.be +26124 + ComX Networks A/S + Per M. Mortensen + pmm&comx.dk +26125 + Vegacom a.s. + Petr Votava + votava&vegacom.cz +26126 + Makko Solutions, S. A. de C. V. + Jesús Alfonso Rodríguez + jarodriguez&makko.com.mx +26127 + Beijing Vorx Telecommunications Co, Ltd. + Meng Qingyang + hemameng&163.com +26128 + CommSeed Corporation + Masami Hanari + domain&commseed.net +26129 + Logitek Electronic Systems, Inc. + Tag Borland + tag&logitekaudio.com +26130 + Montserrat College of Art + Mick Maldonado + mmaldonado&montserrat.edu +26131 + Vendini Tickets + Timothy J Gerk + tgerk&vendini.com +26132 + Aegis Semiconductor, Inc. + Dave Parent + dparent&aegis-semi.com +26133 + Center for Remote Sensing Ice Sheets, University of Kansas + Thorbjorn Axelsson + thax&cresis.ku.edu +26134 + Institute for Networked Solutions + René Stanger + iana&ins.hsr.ch +26135 + Mount Airey Group, Inc. + Bill Russell + russellwc&mountaireygroup.net +26136 + GENOME express + Marcel de Leeuw + m.deleeuw&genome-express.com +26137 + The University of Texas System + Paul Caskey + netadmin&utsystem.edu +26138 + INTELBRAS S/A + Adailton Adam + adailton.adam&intelbras.com.br +26139 + Samford University + Mearl Danner + jmdanner&samford.edu +26140 + Reflected Networks, Inc. + Phil Doroff + phil&reflected.net +26141 + EDI-PRO + Jean-Pierre MOMMENS + mommens&edi-pro.be +26142 + 3Way Networks Ltd + Chris Moore + snmp&3waynetworks.com +26143 + Centre d'Alt Rendiment Esportiu + Marck Collado + mcollado&car.edu +26144 + IRTE S.p.A. + Federico Brazzelli + federico.brazzelli&irte.it +26145 + Network Systems + Boris Polevoy + boris&netsys.com.ru +26146 + ZAO NPC SPECTRUM + Alexander Mishin + alexmishin&inbox.ru +26147 + Trivore Corp. + OID Master + hostmaster&trivore.com +26148 + Suburban Medical Laboratory, Inc. + Michael Nowlin + mike&smlab.com +26149 + India Mobility Research (IMR) + Pratap Sigh Ratra + pratap.ratra&indiamobility.com +26150 + FDT Manufacturing, LLC + Greg Ansley + cto&telecine.com +26151 + Win32Tools + Brandon Bernier + Brandon&Win32Tools.com +26152 + Helsinki IT Systems Consulting + Jukka Hienola + jukka.hienola&iki.fi +26153 + HighPoint Technologies, Inc. + May Hwang + mhwang&highpoint-tech.com +26154 + biaix + Joan Picanyol i Puig + hostmaster&biaix.org +26155 + Bancolombia S.A + Natalia Ramirez Escobar + nramirez&bancolombia.com +26156 + Quicksilva Ltd. + Craig Millard + itsupport&qxlva.com +26157 + Chupa Chups, S.A. + Xavier Roig Gracia + xroig&chupachups.com +26158 + Bertram Yacht Inc. + DANIELE PECA + daniele.peca&bertram.com +26159 + Euro Connect + Stephane LE COZ + stephane.lecoz&euroconnect.fr +26160 + VUMS Control Systems, a.s. + Josef Blahut + vums&vums.cz +26161 + Rocketseed + Thys Kitshoff + support&rocketseed.com +26162 + TELROS + Sergey Bybin + bybin&telros.ru +26163 + AirCUVE Co., Ltd. + You-serk Han + bluecomp&aircuve.com +26164 + Tri-vision Electronics Inc + Cam Siddiqui + qsiddiqui&aol.com +26165 + US Airways + Ron Rickard + ron.rickard&usairways.com +26166 + Nebraska Wesleyan University + Chris St. Pierre + stpierre&nebrwesleyan.edu +26167 + Blakemere Technologies Inc. + Russell Davies + russell.davies&blakemere.ca +26168 + Idologic Inc. + Jeff Gardiner + webmaster&idologic.com +26169 + eLynx Ltd. + Dusty Doris + ddoris&elynx.com +26170 + Clario Medical Imaging, Inc. + Peter McLain + pbm&clariomedical.com +26171 + Parallels Holdings Ltd (formerly 'SWsoft, Inc.') + Pavel Gashev + admin¶llels.com +26172 + Keble College + Steve Kersley + postmaster&keble.ox.ac.uk +26173 + monaghen.com + Troy Monaghen + troy&monaghen.com +26174 + SignaCert, Inc. + John Kasinger + john.kasinger&signacert.com +26175 + R&S Medizinsysteme + Janis Schuller + iana&rsmedical.de +26176 + AB Computing Ltd + Martin Bradford + mab&abcomputing.co.uk +26177 + Black Press Group Ltd. + Dave Blair + domainadmin&bcnewsgroup.com +26178 + LandQuest Services Ltd. + Lorne Chartier + lorne.chartier&landquestservices.com +26179 + Connectiva Systems + Manas Mandal + mmandal&connectivasystems.com +26180 + Queens Academic Group Ltd + Stephen Boswell + stephen&queens.ac.nz +26181 + Cohda Wireless Ltd + Peter N Pham + peter.pham&cohdawireless.com +26182 + Entelechy Systems + Kyle Warner + kwarner&entelechysys.com +26183 + Northeastern State University + William Gillen + gille001&nsuok.edu +26184 + NSP + Christian Pedaschus + cpedaschus&gmx.de +26185 + Interalia Inc. + Charl Coetzee + oid.admin&interalia.com +26186 + Yocobox Soluciones Digitales + Gabriel Czerniecki + gabrielcz&yocobox.com +26187 + Scott Logic Ltd. + Tom Bentley + tbentley&scottlogic.co.uk +26188 + Switch Media + Matt Collier + matt.collier&switchmedia.co.uk +26189 + Qstreams Networks + Ben Warren + bwarren&qstreams.com +26190 + Crisp Thinking Ltd + Adam Hildreth + adam.hildreth&Crispthinking.com +26191 + Jadebird Inc. + Jackie Huen + jackie&jadebird.com +26192 + Sony Global Solutions Inc. + Toshio Hiraga + Toshio.Hiraga&jp.sony.com +26193 + Hangzhou Grancom Information & Technology Co.,Ltd. + Xu Yuanxin + yuanxinxu&163.com +26194 + Rasmus Hahn + Rasmus Hahn + rassahah&neofonie.de +26195 + Imagic Bildverarbeitung AG + Peter Felix + felix&imagic.ch +26196 + AVT Audio Video Technolgies GmbH + Wolfgang Peters + wpeters&avt-nbg.de +26197 + HITT GmbH + Raimund Vogl + R.Vogl&hitt.at +26198 + Hedmark University College + Odd Kristian Lundby + Odd.Lundby&hihm.no +26199 + KEEPIXO + MR JEROME BLANC + jerome.blanc&keepixo.com +26200 + Solid Rock IT + Inigo Kintana + inigokintana&solid-rock-it.com +26201 + UHU-Linux Kft. + Tamási János + contact&uhulinux.hu +26202 + Snipe Networks + Andy Song + asong&snipenetwork.com +26203 + I.T. Advance Consulting, Inc. + Network Administration + network.administration&itadvance.com +26204 + University of Utah Department of Medical Informatics + Aaron Kamauu + radinfo&akamauu.com +26205 + Bright Horizons Family Solutions + George Peguero + gpeguero&brighthorizons.com +26206 + On Site Network Solutions, Inc. + Luis Chanu + Luis&OnSite.Com +26207 + FT Interactive Data Inc. + Paul Andrews + Paul.Andrews&interactivedata.com +26208 + PDL BioPharma, Inc. + Luis Chanu + Luis.Chanu&PDL.Com +26209 + Louisiana Department of Health and Hospitals + Jeff Penton + jpenton&dhh.la.gov +26210 + ACM Systems + Lisa Schamaun + Lisa.Schamaun&sncorp.com +26211 + Universidade Federal de Minas Gerais + Paulo Fernando Seixas + paulofs&ufmg.br +26212 + Harmonic Technology Group, LLC + Rich Dammkoehler + rich.dammkoehler&harmonicone.com +26213 + Stahlgruber GmbH & Co KG + Andreas Jaekle + andreas.jaekle&stahlgruber.de +26214 + Afilias Canada + Michael Young + myoung&ca.afilias.info +26215 + Progensys Ltd + Jason Banks + jasonb&progensys.co.uk +26216 + Deck S.r.l. + Emilio Capparelli + emilio_k&libero.it +26217 + Systel SA + RENOUD Christophe + contact&systel-fr.com +26218 + Nova Scotia Cancer Centre + Michael Hale + Michael.Hale&cdha.nshealth.ca +26219 + Enagas S.A. + Fernando Muñoz Gonzalez + dsi.inf.sg&enagas.es +26220 + Gunter, Winston, & O'Neal Corporation + Winston O. Gunter + z1126461473&yahoo.com +26221 + BeTV Belgium + Belaid Abadja + babadja&betv.be +26222 + S&S Pamin + Sascha-David Pamin + sascha-david.pamin&web.de +26223 + Jakobs Family Enterprises + Keith Jakobs + elohir&hotmail.com +26224 + MONITORAPP Co.,Ltd. + Wonseok Tony Yang + tony7&monitorapp.com +26225 + Studio DIGITAL ART + CRETINON Pierre-Mael + info.iana&digital-art.net +26226 + Netcom Systems + Eldad Zack + eldadz&netcom.co.il +26227 + mindjail consulting Ltd. + Daniel King + contact&mindjail.net +26228 + Tricipher, Inc. + Andy Cottrell + oid&tricipher.com +26229 + Data Management S.p.A. + Richard Golding + rgolding&datamanagement.it +26230 + Kirusa Inc. + Prasanna Uppaladadium + prasanna&kirusa.com +26231 + Punch Telematix + Kenny Gryp + kenny.gryp&punchtelematix.com +26232 + scimmia.net + Alessandro -oggei- Ogier + info&scimmia.net +26233 + Stephen Thompson Consulting, LLC + Stephen Thompson + stephen&stephent.net +26234 + Gate Petroleum Company + Austin McCormack + amccormack&gatepetro.com +26235 + Computer Networks Laboratory at Technical University of Kosice + Juraj Giertl + juraj.giertl&cnl.tuke.sk +26236 + Camara de Comercio y Produccion de Santo Domingo + Jenner Almanzar + jalmanzar&camarasantodomingo.do +26237 + Elders Limited + John Dell'Oso + john.delloso&elders.com.au +26238 + Consortium GARR + Gabriella Paolini + gabriella.paolini&garr.it +26239 + Bank Solidarnost + Dmitry A. Marin + marin_da&solidar.ru +26240 + Hochschule Darmstadt + Lars Seipel + lars.seipel&h-da.de +26241 + ALLCARE ADMINISTRATORS (PTY) LTD + SAM GELBART + samg&allcare.co.za +26242 + Loy & Hutz Aktiengesellschaft + Dirk Gaudian + dirk.gaudian&loyhutz.de +26243 + SIS Schul Internet Service + Pascal Schuppli + sisadmin&sis.edube.ch +26244 + Ocilion IPTV Technologies GmbH + Markus Schmidleitner + markus.schmidleitner&ocilion.com +26245 + Australia MultiDrive Development (AMD) + Rubayat Islam + bdbusiness&gmail.com +26246 + North Seattle Community College + Paul Piecuch + ppiecuch&sccd.ctc.edu +26247 + ServiceTrace e.K. + Markus Duus + mduus&servicetrace.de +26248 + die NetzWerkstatt + Dominik Meyer + dmeyer&die-netzwerkstatt.de +26249 + Transact Services LLC + Claudio Mendoza + cmendoza&gate.net +26250 + Information and Multimedia Center, Gifu University + Yuichi Uchida + imc&gifu-u.ac.jp +26251 + ZZNODE + Ding Yong + yong.ding&zznode.com +26252 + Universidad de Sonora + Carlos Lizarraga + carlos&fisica.uson.mx +26253 + Iduntec + Henrik Morell + henrik.morell&iduntec.com +26254 + PesaPoint Ltd + Hilary Ngeno + ngenoh&paynet.co.ke +26255 + Our Lady of the Lake Hospital, INC. + Walter Paine + wpaine&ololrmc.com +26256 + Fluendo S.A. + Julien Moutte + julien&fluendo.com +26257 + RT-Tech LLC + Terrell Simms + terrell.simms&gmail.com +26258 + Datum International Ltd + Lavinia Edmed + ledmed&datumplc.com +26259 + GlobalX Technologies, LLC + Jessie Williams + jwilliams&blansys.com +26260 + NIBCO INC. + Ray Storer + raymond.storer&nibco.com +26261 + MAXON CIC Corp. + Seo Kyung Deok + kdseo&maxoncic.com +26262 + NMMN New Media Markets & Networks GmbH + Arne P. Boettger + technik&nmmn.com +26263 + Douglas School District + Bert Twiggs + btwiggs&dsdk12.net +26264 + Galeries Lafayette + Sébastien Guerlet + sguerlet&galerieslafayette.com +26265 + Flawed Logic Server Management Software + Joshua Wilson + jwilson&cs.utsa.edu +26266 + LITE-ON TECHNOLOGY CORP. + Steve Kao + Steve.Kao&liteon.com +26267 + Kodiak Networks India Pvt. Ltd + Pankaj Kumar Roy + proy&kodiaknetworks.com +26268 + PositiveArt + Jonas Westphal + info&positive-art.de +26269 + inSORS Integrated Communications, Inc. + Jon Swanson + webmaster&insors.com +26270 + Olivetti S.p.a + Mario Tavarelli + m.tavarelli&olivetti.com +26271 + acosta.se + Fredrik Acosta + fredrik&acosta.se +26272 + SALICRU + Antoni Triado + atriado&salicru.com +26273 + Ventia Pty Limited + Dario Scopesi + info&desknow.com +26274 + Digital Ideas Pty Ltd + Ian Holland + networkadmin&digitalideas.com.au +26275 + Indigo Stone International Ltd + Lindsay Braine + lbraine&indigostone.com +26276 + Heinrich Nirschl + Heinrich Nirschl + heinrich.nirschl&gmail.com +26277 + Voronezh State University + Andy Igoshin + noc&vsu.ru +26278 + Zarafa + Steve Hardy + info&zarafa.com +26279 + Envision Technology Partners, Inc. + Ray Seggelke + rseggelke&envisiontechnology.com +26280 + Adara Networks + Robert Guillen + rguillen&adaranet.com +26281 + The EA Group + Mark Neff + markn&eagroup.on.ca +26282 + FYI Corporation + Bob Bronar + bob.bronar&fyicorp.com +26283 + Christoph Anderegg + Christoph Anderegg + ca256&gmx.net +26284 + Ochsner Health System + Mark Neill + mneill&ochsner.org +26285 + S. I. Tech, Inc. + Ramesh D. Sheth + ramesh&sitech-bitdriver.com +26286 + Alfree Systems + Maxim Bodyansky + info&alfree.ru +26287 + Exelmind Ltd. + Lahav Savir + lahavs&exelmind.com +26288 + Kavaro Oy + Ossi Väänänen + ossi&kavaro.com +26289 + VIMPLICITY Ltd. + Gideon Feldman + gidi&vimplicity.com +26290 + Exatel S.A. + Peter Debski + piotr.debski&exatel.pl +26291 + The Revolution Group Pty Ltd + Dirk Bermingham + dirk&revolutionit.com +26292 + TechSyndicate + Anthony Brown + administrator1&nyc.rr.com +26293 + Interaktivo Risinajumu Grupa, SIA + Ingemars Asmanis + ingemars&irgrupa.lv +26294 + Endurance International Group + Mark Moseley + mmoseley&enduranceinternational.com +26295 + Cyberlog Ltd + Dennis Fedak + dfedak&cyberlog.net +26296 + Resource SiteSeeing Ltda. + Fernando Gago Gomes + fgomes&siteseeing.com.br +26297 + Wireless Broadband Services Pty. Ltd. + Shannon Wynter + oid.admin&wbs.net.au +26298 + Electionmall Technologies Inc. + Manpreet Singh Nehra + manpreet&electionmall.com +26299 + EasternGraphics GmbH + Adam Minski + admin&easterngraphics.com +26300 + Genera Oy + Timo Arpola + timo.arpola&genera.fi +26301 + Texas Digital Systems, Inc + Kevin George + kgeorge&txdigital.com +26302 + Blonder Tongue Laboratories, Inc. + Cliff Fox + cfox&blondertongue.com +26303 + CarrierIQ Inc. + Ivan Aladjoff + ivan&carrieriq.com +26304 + SNMPinfo + David T. Perkins + dperkins&snmpinfo.com +26305 + Sykora Technology Inc. + Boleslav Sykora + boles&sykora.ca +26306 + Opportunity Solutions A/S + Anton Chr. Lauridsen + alauridsen&opportunity.dk +26307 + Skyguide - swiss air navigation services ltd. + Stefan M. Strasser + stefan.strasser&skyguide.ch +26308 + Saab TransponderTech AB + Henrik Toorvald + henrik.toorvald&transpondertech.se +26309 + Draksoft Soluzioni Informatiche S.a.s. + Marco Carcano + mcarcano&draksoft.com +26310 + Departamento Municipal de Eletricidade de Poços de Caldas + Anderson Stano + adurelli&dme-pc.com.br +26311 + Andreas Kinzler + Andreas Kinzler + akinzler&gmx.de +26312 + Genius Company + BALTAZAR François + baltazaf&3il.fr +26313 + Mark Roscrow + Mark Roscrow + mib&roscrow.net +26314 + Xcontrol GmbH + Hendrik Krüger + hendrik&xcontrol.de +26315 + Asian Access Networks Pte Ltd. + Malcolm Chng + support&asianaccessnet.com +26316 + Pacific States Marine Fisheries Commission + Todd Kaehler + kaehler&psmfc.org +26317 + First Choice Health Network + Greg Brown + gbrown&fchn.com +26318 + Condor Networks + Ramanathan Sanjeevan + rams&condornetworks.com +26319 + HomeCity Net Ltd. + Spark Tsai + spark.tsai&chiahsin.com.tw +26320 + Project Team s.r.l. + Daniele Asmonti + info&prjteam.com +26321 + Christian Tena + Philip Christian + philip.christian&christiantena.co.uk +26322 + Tokyo University of Marine Science and Technology + Harunori Tokunaga + tokunaga&e.kaiyodai.ac.jp +26323 + Informatica Productiva Conocimiento y Tecnologia, S.L. + Maximo Calero Sanchez + tecnologia&punto-ip.com +26324 + Wurm GmbH & Co. KG + Karsten Vossberg + oid.admin&wurm-systeme.com +26325 + GT London Ltd + David Holroyd + david.holroyd&goodtechnology.com +26326 + ADTECH AG + Christian Schroeder + schroeder&adtech.de +26327 + Kassenzahnärztliche Vereinigung Westfalen-Lippe + Herr Evelt + Michael.Evelt&zahnaerzte-wl.de +26328 + Blackfoot Telephone Cooperative, Inc. + Navan Carson / Internet Administrator + ncarson&blackfoot.com +26329 + Simpler-Webb, Inc. + Andy Webb + adea&swinc.com +26330 + South Shore Hopsital + Daniel MacNeil + daniel_macneil&sshosp.org +26331 + Trilink,Inc + koichiro kimoto + kimoto&trilink.jp +26332 + TechnoKad + Nikolay Ouchilikhin + nix&TechnoKad.ru +26333 + Brose Fahrzeugteile + Christian Gueckel + christian.gueckel&brose.com +26334 + MEGATRON DATA SERVICES GMBH + Alfred Marx + alfred.marx&megatrondataservices.com +26335 + DiViNetworks Ltd. (formerly 'IPortent Ltd.') + Yair Shapira + yair&divinetworks.com +26336 + Pawel Kraszewski + Pawel Kraszewski + pawel&kraszewscy.net +26337 + Paper Management Services Ltd. + Richard Selvey + richard_selvey&paper-man.co.uk +26338 + IWICS Inc. + David W. Mendes + david.mendes&iwics.net +26339 + Risk Laboratories, LLC + Matt Smith + msmith&risklabs.com +26340 + SKBrasil + Hans Marcus Krüger + hans&skbrasil.net +26341 + Madein.hu Bt. + Mihaly Zachar + zmihaly&madein.hu +26342 + Fujitsu System Solutions Ltd. + SATOH Shinobu + satoh_shinobu&jp.fujitsu.com +26343 + Grainmustards Co.,Ltd. + Junichi Yoshida + jyoshida&grainmustards.com +26344 + Advanced Institute of Industrial Technology + Hideki Murakoshi + hm&aiit.ac.jp +26345 + PAK ELECTRICAL APPLIANCE CO., LTD + zhou zhi cong + congli.cn&gmail.com +26346 + All-Powerful Dave + David Gee + oid&allpowerfuldave.com +26347 + acens technologies S.A. + Bea Diéguez + beatriz.dieguez&acens.com +26348 + Maenken Systems + Bjoern Maenken + maenken&maenken.de +26349 + Shyam Telecom Ltd. + Rajeev Jha + rajeev.jha­amtelecom.com +26350 + Beijing Agree Technology Development Ltd. + Shammy Chen + chen.xm&agree.com.cn +26351 + NILES Werkzeugmaschinen GmbH + Kurt Kaune + kurt.kaune&kapp-niles.com +26352 + Power Systems LLC + Shawn Moore + SMoore&PowersystemsLLC.com +26353 + Blue Lake Rancheria + Scott Joachim + scott.joachim&bluelakerancheria-nsn.gov +26354 + Software Concepts, Inc. + Andrew Beck + IANA&SoftwareConceptsInc.com +26355 + Fiplex Communications Inc. + Florencia de Goycoechea + florencia&fiplex.com +26356 + Vector Technology + José Orlando Castro + orlando&vector-technology.com +26357 + Commonwealth Service Delivery Agency (Centrelink Australia) + Jennifer Margrison + comms.support¢relink.gov.au +26358 + XM Asia Pacific Pte Ltd + Sito Tuck Seng + tuckseng.sito&xm-asia.com +26359 + Xpressent, Inc. + Saroop Mathur + saroop&xpressent.com +26360 + Woodward Ventures + Richard M. Woodward + ldap-schema&woodward-ventures.com +26361 + Solution Innovators, LLC + Jay van Achterberg + regs&solutioninnovators.com +26362 + Wenzel Elektronik GmbH + Michael Roeper + m.roeper&wenzel-elektronik.de +26363 + Woodhead + Jerome Combarieu + jcombarieu&applicom-int.com +26364 + EID - Empresa de Investigação e Desenvolvimento de Electrónica, S.A + Bento Rações + bento&eid.pt +26365 + Critical Links SA + Antonio Alves + aalves&critical-links.com +26366 + Amano Cincinnati + Vijay Parikh + vparikh&amano.com +26367 + Nondeterministic Information Systems + Alastair Hewitt + webmaster&nodem.info +26368 + Micko Group, Inc. + Dragan Mickovic + dmickovic&gmail.com +26369 + Mattig-Schauer Ges.m.b.H + Johannes Bauer + Office&mattig-schauer.at +26370 + Bend Cable Communications, LLC + Wade Holmes + dwholmes&bendbroadband.net +26371 + School of Oriental and African Studies + Mark Douglas + postmaster&soas.ac.uk +26372 + Stadig Technologies, LLC + Paul Stadig + paul&stadigtech.com +26373 + GRI e.V + Mr. Torsten Geile + geile&oekonet-bremen.de +26374 + University of Illinois + Joseph Barnes + security&illinois.edu +26375 + Ockan + Roberto Costa Filho + rtcosta&gmail.com +26376 + Tribunal Regional Eleitoral de Santa Catarina + Luiz Angelo Daros de Luca + luizluca&tre-sc.gov.br +26377 + ACC Ingenierie et Maintenance + Dominique Chateau-Annaud + dominique.chateau&accim.com +26378 + PrettyBit Software Oy + Pauli Porkka + postmaster&prettybit.fi +26379 + I.E.P.A.L.A. + Fco. Javier Picado Ladrón de Guevara + informatica&iepala.es +26380 + buaa608 + GengYawei + geng_yawei&yahoo.com +26381 + lwIP - A Lightweight TCP/IP stack + Christiaan Simons + cc_simons&yahoo.com +26382 + Sis'ta Salsa, LLC + Deana Wright + sistasalsa&comcast.net +26383 + Pay By Touch + Yoni Livshiz + yoni.livshiz&paybytouch.com +26384 + GANDI + Stéphane Enten + noc+iana&gandi.net +26385 + BUFETE CONSULTOR DE MEXICO, S.A. DE C.V. + FELIPE BAROUSSE + fbarousse&piensa.com +26386 + Koerber Enterprise + Mathias Körber + enterprise&koerber.org +26387 + John Cantu + John Cantu + john.cantu&gmail.com +26388 + Profil GmbH + Dr. R. Hilkenbach + rh&profil-hannover.de +26389 + Factory Mutual Insurance Company + Roney R. Brunetti + roney.brunetti&fmglobal.com +26390 + Woven Systems Inc. + James Liao + jliao&wovensystems.com +26391 + Kelyan Lab S.p.A. + Federico Gianni + f.gianni&kelyanlab.it +26392 + NetClarity + Michael Su + msu&netclarity.net +26393 + CallCopy, Inc. + Tarne Tassniyom + tarne&callcopy.com +26394 + Emaze Networks S.p.A. + Alessandro Budai + oid&emaze.net +26395 + American Telecommunication Inc. + Claudio Latorre + clatorre&amtelusa.com +26396 + Rising Edge Development LLC + Kerry Woodbury + info&risingedgedev.com +26397 + Oklahoma State University + Jason Stevens + jason.stevens&okstate.edu +26398 + WasaLab Oy + Juhani Puska + wasalab&wasalab.com +26399 + Emencia SARL + Roger Fernandez + roger&emencia.com +26400 + Bundesnetzagentur + Alexander Pozsgai + Alexander.Pozsgai&BNetzA.De +26401 + Alarmtech Polska sp. z o.o. + Jerzy Tlaga + jerzy.tlaga&alarmtech.pl +26402 + ELETRONIQUE PLUS SA + OLIVIER UMUTONI NICOLAS PATRICK + umutoni680&hotmail.com +26403 + IMATech2.net + Elizabeth Kay Lucas + ekl&sprintpcs.com +26404 + SupplyScape Corporation + Virgil Goins + dgoins&supplyscape.com +26405 + The MetalFish Consultancy Ltd. + Nick Carter + metalfish&btopenworld.com +26406 + CyberDefenses, Inc. + Phil M. Dolbow + phil.dolbow&cyberdefenses.com +26407 + Tiger Communications Plc + peter garner + peterg&tigercomms.com +26408 + WolfVision GmbH + Manfred Schmid + manfred.schmid&wolfvision.com +26409 + ERecruiters Ltd. + Matthew King + dirmaster&time-net.co.uk +26410 + Pakamera + Karol Wozniak + snmplab&pakamera.com +26411 + Vocality International Ltd + Martin Saunders + snmp_vocality&hotmail.co.uk +26412 + Alexandr Kara + Alexandr Kara + Alexandr.Kara&seznam.cz +26413 + Golden West Internet Solutions + Tad DeVries + gwisoperations&goldenwest.com +26414 + Fenri + Jan Podany + jpodany&fenri.com +26415 + J. & W. Seligman & Co. Incorporated + William R. Benz + seligmandomain&jwseligman.com +26416 + MetaCarta, Inc. + Greg Harris + gharris&metacarta.com +26417 + Transcepta LLC + David Mannion + dmannion&transcepta.com +26418 + Netsafe Information Technology Ltd. + Yang Shen + shenyang&netsafe.com.cn +26419 + transtec AG + Jörg Ruberg + joerg.ruberg&transtec.de +26420 + clan informatik AG + Marco Mattle + marco.mattle&claninfo.ch +26421 + Daughters of Charity Services of New Orleans + David Ward + warddj1&cox.net +26422 + IABG mbH + Roland Krausser + krausser&iabg.de +26423 + Tlantic SI + Jose Fernando Possebon Junior + possebon&tlantic.com.br +26424 + syscare.net + Maciej Matysiak + contact-iana.org-mibassign&syscare.net +26425 + Layered Technologies + Michael Hale + mhale&layeredtech.com +26426 + Forget About IT Ltd. + Marco van Beek + mvanbeek&forgetaboutit.net +26427 + Kansas Department of Health and Environment - State of Kansas + Norma Jean Schaefer + njschaefer&kdhe.state.ks.us +26428 + Bedford Hospital NHS Trust + Oliver Chandler + oliver.chandler&bedfordhospital.nhs.uk +26429 + Xinwei Telecom Technology,Inc. + Zhakai Yu + yuzhakai&bj.xinwei.com.cn +26430 + E Technologies, Inc + Carilda A Thomas + cat&the-cat.com +26431 + Thai National Grid Center + Sugree Phatanapherom + sugree_ph&thaigrid.or.th +26432 + Center of Information technologies of Tatarstan Republic + Almaz Valiullin + almaz&mcrt.ru +26433 + Calyx Internet + Alexander Grendel + info&calyx.nl +26434 + Touch Sense Inc + Rodolfo Acuna + tsense.rodolfo&gmail.com +26435 + Noam Communications + Sami Lehtimäki + info&noam.fi +26436 + HSH Soft- und Hardware Vertriebs GmbH + Marko Storll + marko.storll&hsh-berlin.com +26437 + Universität zu Köln + Sebastian Hagedorn + Hagedorn&uni-koeln.de +26438 + S.V. Paradoks + H.T. van Veen + info¶doks.utwente.nl +26439 + SFERIA SA + Wojciech Rolecki + Wojciech.Rolecki&sferia.pl +26440 + BliXem Internet Services B.V. + Jeroen Boonstra + jeroen&blixem.nl +26441 + Technikon Forschungs- und Planungsgesellschaft mbH (Ltd.) + Dr. Klaus-Michael KOCH + coordination&opentc.net +26442 + PROLIVAL + José Barbosa + jose.barbosa&prolival.fr +26443 + Cinetix Srl + Annalisa Giusto + annalisa.giusto&cinetix.it +26444 + Coroware, Inc + Martin R. Calsyn + mcalsyn&coroware.com +26445 + Special Systems and Software, Inc. + Jozef Hanus + jozef.hanus&special.sk +26446 + Ouvi Divulgacao e Marketing em Celulares Ltda. + Thomas Woillan + thomas&ouvi.com.br +26447 + Compro Computer Services, Inc + Curtis Rubel + crubel&compro.net +26448 + Variomedia AG + Marten Lehmann + lehmann&variomedia.de +26449 + Okto Tecnologia e Serviços de Informática Ltda. + Cecilia de Paula e Maia + cmaia&okto.com.br +26450 + Hubbell Inc. + Shadi AbuGhazaleh + sabughaz&hubbell-premise.com +26451 + Esencis + Jiri Nemecek + info&esencis.cz +26452 + HiTB + guo yingshou + guoyingshou&hitb.com.cn +26453 + VJ Systems + Vikash Ramanlal + ramanlalv¶dise.net.nz +26454 + wealink.com limited Inc. + Wenming Mao + wmao&staff.wealink.com +26455 + Secretariat of Information Technology, Government of Haryana + Amit Mittal + amit&hry.nic.in +26456 + Budapest University of Technology and Economics (BME) + Peter Popovics + pop&eik.bme.hu +26457 + Meditec GmbH + Torsten Rottmann + trott&meditec-gmbh.com +26458 + tetrade inc. + Pascal Buchbinder + pascal.buchbinder&tetrade.ch +26459 + wavesyscom co.,Ltd + Jea-Yong Park + wavesyscom&wavesyscom.com +26460 + Alcor Communications + Chris Down + chris.down&alcor.co.uk +26461 + Knorr & Partner Ingenieure + Michael Loos + michael.loos&kpconsult.de +26462 + LANCK Telecom (Express TeleService Corp.) + Konstantin Chervyakov + k.chervyakov&lancktele.com +26463 + Wyless plc + Martin Parr + global_ti&wyless.com +26464 + Dundalk Institute of Technology + Ruairi Hickey (Computer Services) + compserv&dkit.ie +26465 + i-together Ltd + Luke Razzell + weaverluke&gmail.com +26466 + Leissner Data AB + Pontus Löfgren + registry&leissner.se +26467 + HANZO S/A + Walter Bertot + Admin&HANZO.com.br +26468 + I.T. Management and Consultation Services (ITMACS) + Dale Conway + dale&bmcc.nf.ca +26469 + AirClic Inc. + Andy Monroe + snmp&airclic.com +26470 + Media Movel Serviços Interativos LTDA + Ricardo Ferro + rferro&mediamovel.com.br +26471 + International Teams + Dann Farquhar + dann.farquhar&iteams.org +26472 + Nordic Optical Telescope Scientific Association + Sergio Armas Perez + sap¬.iac.es +26473 + Digiplug + Béatrice Pipet + beatrice.pipet&digiplug.com +26474 + Midwest Connections Inc. + Zane C. Bowers + zanecb&midwest-connections.com +26475 + GTN77 + Eric Geyres + e.geyres.gtn77&wanadoo.fr +26476 + Advanced Devices S.p.A. + Eraldo Corti + eraldo&tin.it +26477 + Uwe Perl + Uwe Perl + uwe.perl&gmx.de +26478 + SI3SI + Laurence Wronski + lwronski&si3si.com +26479 + Schenck Process Europe GmbH + Wolfgang Kaiser + w.kaiser&schenckprocess.com +26480 + infler.de + Patrick Blitz + pb&fulgor.org +26481 + Zane C. Bowers + Zane C. Bowers + vvelox&vvelox.net +26482 + Rossonet S.r.l. + Andrea Ambrosini + andrea.ambrosini&rossonet.com +26483 + JW NetSource, LLC + Jeff Whitman + jwhitman&jwnetsource.com +26484 + Cisco Systems, Inc. (formerly 'Arch Rock Corporation') + Philip Buonadonna + pbuonado&cisco.com +26485 + Cryoserver Ltd + Chris Jolly + chris.jolly&cryoserver.com +26486 + The Stanley Works + STS-NetworkTelco + STS-NetworkTelco&stanleyworks.com +26487 + WebSpace-Forum + Thomas Wendt + hostmaster&webspace-forum.de +26488 + Ender Informatics GmbH + Ender Josef + josef.ender&ender-informatics.ch +26489 + IntelliData Systems Ltd + Mike King + snmp&intellidatasystems.com +26490 + Beverli.NET + Bjarke Lindberg + bvli&beverli.net +26491 + Metzler Tech Ltd + Thomas Metzler + thomas&metzler.org +26492 + Dongyang Telecom co., LTD + Eok One, Lee + eeone&dyt.co.kr +26493 + Urzad Marszalkowski Wojewodztwa Malopolskiego + Mariusz Kowalik + mariusz.kowalik&umwm.pl +26494 + CSJGlobal. Inc. + Seong-Bok Lee + sblee&csjcorp.com +26495 + Scope + Thomas Metzler + thomas.metzler&scope.org.uk +26496 + ZipLip, Inc. + Stephen Chen + snmp&ziplip.net +26497 + BitBin LLC + David Varieur + bitbin&gmail.com +26498 + DART Industries Inc + John Stimpson + johnstimpson&tupperware.com +26499 + Memo Serviços Interativos S.A. + Luiz Rocha + luizrocha&meflurbrasil.com.br +26500 + Carl Zeiss AG + Thomas Prim + prim&zeiss.de +26501 + Pari Medical Holding GmbH + Frank Schmidt + f.schmidt&pari.de +26502 + Emerging Information Systems Inc. + Alvin Lee + Alvin.Lee&naviplan.com +26503 + University of Applied Science Osnabrück + Dipl.-Ing. Ralf Neugebauer + R.Neugebauer&et.fh-osnabrueck.de +26504 + A3IP + Cyril HAENEL + cyril.haenel&a3ip.com +26505 + Zenexity + M. Leonard WEST + lwe&zenexity.fr +26506 + IT-Ideas + Joerg Frahm + ldap&it-ideas.de +26507 + TJ NET Spa + Pietro Marchionni + pietro.marchionni&tj.net +26508 + Electronic Forge + Adrian Buxton + adrian&electronicforge.net +26509 + Digital Electronics Corporation + Keiji Tani + keiji.tani&mail.digital.co.jp +26510 + Like Minded People Ltd + Jason Woodruff + jason&likemindedgroup.com +26511 + TAC Bilgisayar Hizmetleri Ic ve Dis Ticaret A.S. + Yalcin Gerek + yalcin.gerek&tacas.com.tr +26512 + GEEK Bilgisayar Muhendislik ve Sanayi LTD STI + Aykut KARA + akara&geek.com.tr +26513 + Hellenic Academic and Research Institutions CertificationAuthority + Dimitris Zacharopoulos + jimmy&it.auth.gr +26514 + Suntel Communications + Peter Nixon + peter+iana&suntel.com.tr +26515 + Tepe Teknoloji + Aydyn BAKIR + abakir&tepeteknoloji.com.tr +26516 + Innovation Designs Ltd. + Chris Stavrogiannis + chriss&noc.uoa.gr +26517 + BESECURE + Andreas Lalos + a.lalos&besecure.gr +26518 + MEG Elektronik Ltd. + M. Erdinc Gogus + erdinc&meg.com.tr +26519 + GEEK Bilgi Teknolojileri LTD STI + Aykut KARA + akara&geek.com.tr +26520 + Pierbridge Limited + Mark Newburn + mark.newburn&pierbridge.com +26521 + Pannasoft Technologies + Janaka Low + janaka&pannasoft.com +26522 + MDG InfoSec + Ray Flinkerbusch + iana.org&mdg4infosec.nl +26523 + American Qualified Plans, Inc. + Eric Storjohann + eric.storjohann&aqp.com +26524 + lyman & associates + john r. lyman + j.r.lyman&ezdev.com +26525 + Golden Dynamic Enterprises Ltd. + John Ko + net.mgt&gdeasia.com +26526 + eTouch Systems Corp + Ajay Upadhyaya + aupadhyaya&etouch.net +26527 + Ircona + Tim New + tim.new&ircona.com +26528 + Kirche fur Oberberg e.V. + Johannes Rueschel + hhhannes&web.de +26529 + CNRST + Sami Ait Ali Oulahcen + s.aitalioulahcen&cnrst.ma +26530 + Telemetry BTT + Kotov Oleg + kotovoe&btt.ru +26531 + Kodak Imaging Network, Inc + Thomas A. Bell + tbell&kodakgallery.com +26532 + eB Technologies (M) Sdn. Bhd. + Yulianto Widjasena + yulianto&eb.net.my +26533 + Nurun inc. + Carl de Billy + carl.debilly&nurun.com +26534 + think project! International (formerly 'AEC/communications GmbH') + Claus Koch + claus.koch&thinkproject.com +26535 + OMNEX Control Systems ULC + Doru George Botez + dbotez&omnexcontrols.com +26536 + Ringland Consulting LLC + James Ringland + james&ringland.net +26537 + Riorey Inc. + Joe Chisolm + jchisolm&riorey.com +26538 + Duaxes Corporation + Genta Iha + iha&duaxes.co.jp +26539 + Ionoscape Associates + Paul Sapp + paul&ionoscape.com +26540 + cudeso.be + Koen Van Impe + info&cudeso.be +26541 + matrix technology AG + Ralf Hornik + ralf.hornik&matrix.ag +26542 + JHC Plc + Andrew Love + andrew.love&jhc.co.uk +26543 + Blade Network Technologies, Inc. + Cynthia Gabriel + cynthia.gabriel&bladenetwork.net +26544 + AVTOVAZ JSC + Alexander Anokhin + ava&vaz.ru +26545 + SalesForce Australia (Salmat) + Mel Boyce + mel&salesforce.com.au +26546 + CP Secure Inc + Shuang Ji + shuang_ji&cpsecure.com +26547 + Nord Data A/S + Allan Baf + ndaba&norddata.dk +26548 + Prévoir Vie Portugal + João Gonçalves + joao.goncalves&prevoir.pt +26549 + Amper + Manuel Cano + mcano&er.es +26550 + BluePex do Brasil Tecnologia da Informação Ltda - ME + Sergio Lima + sergio&bluepex.com +26551 + Gordon Kapes, Inc. + Joseph Urbanczyk + joe_u&gkinc.com +26552 + Congreso de los Diputados + MIguel Angel Gonzalo Rozas + webmaster&congreso.es +26553 + CTI Networks, Inc. + CTI Networks Admin + admin&ctinetworks.com +26554 + ThingMagic, Inc. + Margaret Wasserman + margaret&thingmagic.com +26555 + NTT IT Co., Ltd. + Akihiro Hase + info-ins&ntt-it.co.jp +26556 + Ethernet Direct Corporation + Fengching.Chou + jackyðernetdirect-tw.com +26557 + Stractus + Wellerson Amarante + wellerson&stractus.com.br +26558 + Associated Network Partners, Inc. + Todd Davis + tdavis&anpisolutions.com +26559 + nyukid networks + Jeffrey Barea + nyukid&gmail.com +26560 + Jünger Audio-Studiotechnik GmbH + Hans Hübner + hans.huebner&junger-audio.com +26561 + Zynetix Ltd + Andy Odgers + andy.odgers&zynetix.com +26562 + International Telecommunication Union + Frederic Faugier + hostmaster&itu.int +26563 + TRIADO + Antoni Triado + toni&triado.com +26564 + Emerj Inc + Andy Pletch + andy&e-merj.com +26565 + Studio Technologies, Inc. + Randy Kelso + rkelso&studio-tech.com +26566 + Global Knowledge + Ryan Leathers + ryan.leathers&globalknowledge.com +26567 + Obsidian Consulting Group Pty. Ltd. + Robert Davidson + rdavidson&obsidian.com.au +26568 + Mega Solution Inc. + Tadashi Takahashi + takahashi&megasolution.jp +26569 + Idyria SARL + Richard Leys + rleys&idyria.com +26570 + Cleartone Video Ltd + Dave Turner + dave.turner&cleartone-video.com +26571 + DATANIN LTD + Nir Aran + trust&datanin.com +26572 + Optimal IdM Software, LLC + John Maring + JohnMaring&OptimalIdM.com +26573 + Dawn VME Products + Charles Linquist + clinquist&dawnvme.com +26574 + Dynamics Research Corporation + Howard S. Clayman + HClayman&drc.com +26575 + KB Toys, Inc. + Jeremy Warren + jwarren&kbtoys.com +26576 + Ian Cairns + Ian Cairns + ian&iancairns.org +26577 + Kaufcom GmbH + Donati Roberto + info&kauf.com +26578 + Credito Emiliano S.p.A + Chiriatti Stefano + schiriatti&credem.it +26579 + J. Craig Venter Institute + Rajeev K Karamchedu + unixsupport&tigr.org +26580 + Kaiser Permanente + Marc S. Hedish + marc.s.hedish&kp.org +26581 + Rustenbach.de + Toralf Rustenbach + toralf&rustenbach.de +26582 + Clerity Solutions, Inc. + Randy Holbert + randy.holbert&clerity.com +26583 + Ativa Soluções Tecnológicas Ltda + Danton Ferreira Vellenich + danton&ativasolucoes.com.br +26584 + Brooman.com, LLC + Andrew Brooman + andrew&brooman.com +26585 + Reactrix Systems, Inc. + Rob Goodman + rgoodman&reactrix.com +26586 + Fremnet + Shannon Wynter + oid.admin&fremnet.net.au +26587 + computer betting company gmbh + Thomas Gusenleitner + Thomas.Gusenleitner&com-bet.com +26588 + Nexcom Systems + Thomas Leseney + thomas.leseney&nexcom.fr +26589 + CT Company Ltd. + Leonid Khvostov + khvostov&mtu-net.ru +26590 + CLAIM GmbH & Co. KG + C. Barth + info&claim-team.de +26591 + Kynetia Networks S.L. + Jose M Corsino + jose.corsino&kynetia.com +26592 + HUBER+SUHNER Polatis Ltd + Adam Hughes + adam.hughes&hubersuhner.com +26593 + littlemail + Martin Schwartz + hostmaster&mail.littlemail.de +26594 + ASOIU departament of NTUU "KPI" + Oleksiy V. Khilkevich + grim&asu.ntu-kpi.kiev.ua +26595 + Connect Tech Inc. + David Worthen + djw&connecttech.com +26596 + S-INFORMATION TECHNOLOGY CO., LTD. + In Kweon Yu + ikyou&swpt.net +26597 + Comerica Bank + Walter Seiler + mike.seiler&sanyuhu.com +26598 + SUMITOMO OSAKA CEMENT CO.,LTD. + Katsuya Akizuki + kakizuki&sits.soc.co.jp +26599 + University of Glasgow + John Watt + oid-register&glasgow.ac.uk +26600 + China Xiamen Truetel Telecommunications Technologies Co.,Ltd. + Wang Xiang + wangx&truetel.com.cn +26601 + Dunham-Bush Yantai Co. Ltd. + Mu Bo + mubo&dunham-bush.com.cn +26602 + Raceme + Christophe Boyanique + mib&raceme.org +26603 + GEDIM AG + Dr. Martin H. Ludwig + internetverwalter&gedim.de +26604 + IMA GmbH + Dr. Martin H. Ludwig + internetverwalter&imagmbh.de +26605 + PKB Integral + Alexander Pravdin + aledin&evpatoria.com.ua +26606 + Vidactive, S.L. + Alberto Villar + alberto.villar&vidactive.com +26607 + Isco International + Gary Berger + gary.berger&iscointl.com +26608 + Wavelength Corporation, Inc. + Network Operations Center + noc&wavelengthmail.com +26609 + Pentair, Inc + Joe Licata + Joe.Licata&pentair-ep.com +26610 + Ultramain Systems Inc. + Curt Wennekamp + it&ultramain.com +26611 + Exa Corporation + James Chamberlain + oidadmin&exa.com +26612 + Kyoto Sangyo University + Kohji Ozaki + ozaki&cc.kyoto-su.ac.jp +26613 + Audiomedia Ltd. + Viorel Dehelean + viorel.dehelean&audiomedia.ro +26614 + Government of Republic of Srpska + Rade Tolimir + r.tolimir&mnrvoid.vladars.net +26615 + Noviforum Ltd., Software & Media + Zvone Jagodic + root&noviforum.si +26616 + University Computing Centre - SRCE, University of Zagreb + Ivan Maric + ivan.maric&srce.hr +26617 + Mobitel d.d. + Saso Stor + Saso.Stor&mobitel.si +26618 + HighTech Passport, Ltd. + Razvan Corneliu VILT + razvan&htpassport.ro +26619 + Private Higher education establishment Autononymous non-commercial organization Regional Finance and Economy Institute + Vladimir Alexandrovich Goncharov + admin&rfei.ru +26620 + West University of Timisoara + Marinel IORDAN + miordan&rectorat.uvt.ro +26621 + Technical University of Cluj-Napoca + Cristian KLEIN + cristi&net.utcluj.ro +26622 + NJERS Communications + Krunoslav Njers + noc&njers.com +26623 + Open Sense Solutions LLC + Michael Pardee + oid.iana.org&open-sense.com +26624 + that's software Berlin + Patrick Hoffmann + p.hoffmann&berlin.de +26625 + Sopris Surfers Inc. + Ben Plimpton + bplimpton&sopris.net +26626 + cativa.net + Oliver Meyer + olm&cativa.net +26627 + VIGOS AG + Constantin Rack + info&vigos.com +26628 + BH Telecom dd Sarajevo + Emir Fazlinovic + emir.fazlinovic&bhtelecom.ba +26629 + Rumati Computers CC + Jesse Long + jpl&unknown.za.net +26630 + PBMS Solucoes Moveis em Informatica Ltda + Altino Pavan + altino&purebros.com +26631 + Waterman Group plc + Stuart Worrow + s.d.worrow&waterman-group.co.uk +26632 + Saunders Properties Pty Ltd + Daniel Thoroughgood + Daniel.Thoroughgood&staff.tsninternet.com.au +26633 + Embedded Internet Solutions, Inc. + Steven Wu + ithaha&ipanel.cn +26634 + BigTribe Corporation + Daniel Greening + greening&bigtribe.com +26635 + Squid Consulting & Integration + Wai-Sun Chia + waisun&sqci.biz +26636 + Dexon Software Inc. + Ricardo A. Ortiz + info&dexon.us +26637 + OMB Sistemas Electrónicos S.A. + JL. Rueda + investigacion&omb.com +26638 + Studio Hamburg Media Consult International GmbH + Nils Decker + n.decker&mci-broadcast.com +26639 + The Schneider Family Tree + Rik Schneider + oid&deranged.schneider.org +26640 + InduSoft Inc. + Lourenco Teodoro + info&indusoft.com +26641 + Yellowbrix + Rik Schneider + rschneider&yellowbrix.com +26642 + Schoelzel - IT Consulting + Walter Schoelzel + info&schoelzel-itconsulting.de +26643 + XL Vision Group + Gustavo Matsunaga + matsunaga&xlvision.com.ar +26644 + International Working Group on Taxonomic Databases (TDWG) + Ricardo Pereira + ricardo&tdwg.org +26645 + Inventive Designers n.v. + Dieter Wachters + dieter_wachters&inventivedesigners.com +26646 + Institute for Parallel Processing, BAS + Luchesar ILIEV + oid-manager&acad.bg +26647 + Banco Safra S/A + Karen Higashi + karen.higashi&safra.com.br +26648 + Avago Technologies Limited + Ray Frush + ray.frush&avagotech.com +26649 + Covidien (formerly 'Tyco Healthcare') + Joseph W. Davis + OID.Admin&covidien.com +26650 + Pandora Media Inc. + James Erickson + jerickson&pandora.com +26651 + Propus Informatica Ltda + Marlon Dutra + marlon&propus.com.br +26652 + MELCO HOLDINGS INC. + Masumitsu Hatta + it&melcoinc.co.jp +26653 + Shanghai Sunstar Telecom Co.ltd + He,Lin + helin&sunstar.com.cn +26654 + NTT resonant Inc. + hirofumi matsumoto + fm_tech&nttr.co.jp +26655 + Advantech Co., Ltd + Guider Lee + guider.lee&advantech.com.tw +26656 + Allan Associates + Edward Allan + edward&allanassoc.com +26657 + Ouba + Denis Sacchet + ouba&ouba.org +26658 + Mikro Network Solutions + Michael Brown + topo2&pacbell.net +26659 + Zero9 srl + Simonluca Landi + simonluca.landi&mplatform.it +26660 + Robot Bt. + Soós László + sooslaca&robot.hu +26661 + OpenPanel V.O.F. + Peter van Dijk + registries&panelsix.com +26662 + Alpha Company + Dmitriy Ermolin + ermolin_d&alpha-comp.ru +26663 + Solana Networks Inc. + Don Bennett + dbennett&solananetworks.com +26664 + Centralsoft.org + Tom Adelstein + tom.adelstein&gmail.com +26665 + FaceToFace Software Ltd + Chris McKay + chris&defacto.com.au +26666 + Assuria Limited + Nick Connor + nickc&assuria.com +26667 + Epper & Kranz GbR + Oliver Epper + oliver&epper.de +26668 + SixFamily.Net + David Six + david.six&sixfamily.net +26669 + Sirius Corporation plc + Duncan Gibb + duncan.gibb&siriusit.co.uk +26670 + NetEase.com, Inc. + Chun Tian + tianchun&corp.netease.com +26671 + NetSpring, Inc. + Takeshi Itoh + admin&netspring.co.jp +26672 + NANJING ZIJIN-PHOTEL COMMUNICATION EQUIPMENTS CO.,LTD + Liu Li Ai + lliai&tom.com +26673 + Wyndeham Heron Ltd + Michael Hales + mikehales&wyndeham.co.uk +26674 + Klerx EDV Beratung + Martin Klerx + Martin&klerx.de +26675 + MessageLabs Ltd + Duncan Ward + infrastructure&messagelabs.com +26676 + GSF SAS + GUERIN CORINNE + cguerin&gsf.fr +26677 + Datactics Ltd. + Andy Ferguson + sysadmin&datactics.co.uk +26678 + Canadian Space Agency + Louis-Philippe Durocher + Louis-Philippe.Durocher&space.gc.ca +26679 + EMCom sp. z o.o. + Jacek Drobiecki + j.drobiecki&emcom.pl +26680 + General Dynamics United Kingdom Limited + Jonathan Hansford + jonathan.hansford&generaldynamics.uk.com +26681 + Phi Theta Kappa International Honor Society + Larry Olin Horn + ptkdis&ptk.org +26682 + lohnet.org + Larry Olin Horn + hornlo&lohnet.org +26683 + Vinci Consulting Corp + Paul Vinciguerra + pvinci&vinciguerra.com +26684 + Comm/net Systems, Inc. + Tom Osterman + tosterman&commnetsystems.com +26685 + FOSS MIBs + Chris Jones + jonesy&fossmibs.com +26686 + Softjury GmbH + Marcel Meckel + mm&softjury.de +26687 + LG Technology Inc. + Lloyd Gilroy + lgilroy&mac.com +26688 + Sino-i Technology Ltd. + Yali Ji + jiyali&myce.net.cn +26689 + CAT Telecom Public Company Limited + Padet Praosri + padet&thaipki.com +26690 + PRETTL Produktions Holding GmbH + Alexander Berger + alexander.berger&prettl.com +26691 + Reichert GmbH + Timm Reichert + tr&reichert-gmbh.com +26692 + MOBITEL SA + Jose Bourbon Ribeiro + jose.bourbon&dedic.com.br +26693 + AirPoint Co., Ltd. + Sang-ho, Kim + smartkim&airpoint.co.kr +26694 + Digitals India + D.K.Shukla + dkshukla&digitalsindia.com +26695 + Openlink Financial, Inc. + Joseph Sera + jsera&olf.com +26696 + Marvell Technology Inc + Umesh Kartha M + EXGR-iana-oid-admin&marvell.com +26697 + Arces Network, LLC + Adrian Goins + agoins&arces.net +26698 + Net Project SRL + Constantin Liviu Faciu + liviu.faciu&net-project.ro +26699 + Open Switch Software e Consultoria Ltda + Eduardo Cruz + eduardo&openswitch.com +26700 + Vulcan Materials Company + Daniel Smart + dan.smart&vul.com +26701 + Ocean Rock Corp. + Raymundo W. Perez + roskiy&paulwinston.com +26702 + Wycliffe Bible Translators of Canada Inc + Bill Cameron + computer_canada&wycliffe.ca +26703 + Pittsburgh Supercomputing Center + Michael H Lambert + lambert&psc.edu +26704 + Voice Provider Sweden AB + Per Sautermeister + per.sautermeister&voiceprovider.se +26705 + Webbertek (Blunicel Consultoria em Informatica Ltda) + Celso Kopp Webber + celso&webbertek.com.br +26706 + 4AM Lunch Inc. + Brian Hechigner + wonko&4amlunch.net +26707 + Delphi Corp. + Jim DeSantis + jim.desantis&delphi.com +26708 + Cerbercomm LTD + Or Goshen + oberon&cerbercomm.com +26709 + LogicIQ Ltd + Dr Jan Hruska + jan.hruska&logiciq.com +26710 + Elasis S.C.p.A. + Francischetti Carmine + francischetti&elasis.it +26711 + Crolox B.V. + Derk Gortemaker + iana&crolox.nl +26712 + Cutter Project Ltd + Mike Banahan + mike.banahan&cutterproject.co.uk +26713 + Stella Doradus Ltd. + Justin Collery + justin&stelladoradus.com +26714 + ERIS4 s.r.l. + Stefano Antonelli + stefano.antonelli&eris4.com +26715 + Oberlandesgericht München - IT-Stelle d. Justiz + Robert Steiner + robert.steiner&olg-m.bayern.de +26716 + Ganesh TEchnologies Ltd + Dr Georg Mueller + georg.mueller&ganesh.net +26717 + Stroeer Out-of-Home Media AG + Bellin Christian + support&stroeer.de +26718 + ELMA Kurtalj d.o.o. + Dario Maljur + dmaljur&elma.hr +26719 + Baluna GmbH + Konstantin Avdeev + kavdeev&baluna.de +26720 + Cressida Technology Ltd. + M. Kanafchian + info&cressida.info +26721 + EAB GmbH Rhein/Main + Hayo Volker Hasenfus + hayo.hasenfus&eab-rhein-main.de +26722 + S-Telecom (S-Fone) + Nguyen Sinh Chuong + chuongns&stelecom.com.vn +26723 + KUME electric Co., Ltd. + Kakehi Kozo + oidadmin&kme.co.jp +26724 + e-trees.Japan,Inc. + Kunihiko Ohnaka + ohnaka&e-trees.jp +26725 + Eiraku Electric Co.,Ltd. + Teruaki Onishi + t-onishi&eiraku.com +26726 + WireCache, Inc. + Dave Rosenberg + rose&wirecache.com +26727 + Yakshavers Incorporated + Robby Griffin + oidadmin&yakshavers.net +26728 + Transparency Software Inc. + Michael Cartsonis + michael.cartsonis&transparencysw.com +26729 + Sagittar Broadband Communication Solutions + Dr Stephen Michelson + smichels&sagittar.com +26730 + SICS AB + Mikael Nehlsen + staff&sics.se +26731 + Software Libre de Venezuela 777, C.A. + Wilmer Jaramillo Murcia + wilmer&softwarelibre777.com +26732 + COMNET Computer Netzwerke Gmbh + Michael Bischinger + bischinger&comnet.at +26733 + RRD S.p.a. + Christian Besler + cbesler&rrd.tv +26734 + CMYK Energy Software Limited + Andrew Kay + andrewjkay&gmail.com +26735 + Dipl. Ing. T. Biel + Thorsten Biel + hostmaster&ing-biel.com +26736 + Oliver Weyhmüller + Oliver Weyhmüller + oliver&w9r.de +26737 + Infostream Pty ltd + Phil Manuel + support&infostream.com.au +26738 + BEA International FZE + Purushottam Kaushik + pkaushik&beainternational.com +26739 + Hans Guentner GmbH + Pacher Michael + m.pacher&guentner.de +26740 + Tein Technology S.A. + Juan Bernabeu + j.bernabeu&teintechnology.be +26741 + Etin Systems Co.,Ltd. + MyoungHo, Kim + fantasy8&etinsys.com +26742 + Astralogic cc + Tony Bartok + tony&alogic.co.za +26743 + Lean, LLC + John W Darby + johnwinstondarby&gmail.com +26744 + National Bankcard Services Inc + Raymond Hausman + rayh&nbs-inc.com +26745 + obituariesonline.ca + Dave Panter + davepanter&shaw.ca +26746 + HEBUS + Antoine JEAN + optibus&hebus.info +26747 + GoRoam Ltd + Andrew McCall + andrew.mccall&goroam.net +26748 + Print Fulfillment Services + Jared Baldridge + jbaldridge&printfulfillmentservices.com +26749 + SL Power Electronics Corp + Greg Wade + greg.wade&slpower.com +26750 + Access Integrated Technologies, Inc. + Jeff Butkovsky + jbutkovsky&accessitx.com +26751 + Visual Sciences, LLC + Akhbar Tajudeen + akhbar&visualsciences.com +26752 + Jamul Engineering + Tom Dilatush + tom&dilatush.com +26753 + Mobile Greetings, Inc. + Jason Dusek + jd&mobilegreetings.com +26754 + Asian Institute of Technology + Agus Harianto + harianto&ait.ac.th +26755 + EyeLine Communications CIS LLC + Pavel Karavanov + kpv&eyelinecom.com +26756 + MSE Matthies Softwareentwicklung GmbH + Claus Matthies + claus&matthies-six.de +26757 + Sibinco LLC + Pavel Karavanov + kpv&sibinco.ru +26758 + CECURITY.COM + Bruno Ricci + bruno.ricci&cecurity.com +26759 + SHF Communication Technologies AG + Sven Koop + koop&shf.de +26760 + 0x3f8.net + Karsten Engelke + iana&0x3f8.net +26761 + Eszterhazy Karoly College + Andras Harsaczki + harsaczki&ektf.hu +26762 + DDinformatica + Andreas Ruge + rugea&ddinformatica.nl +26763 + Cestel S.A. + José Luis Sirera + jlsirera&cestel.es +26764 + Novnet HB + Jan Gustavsson + pcpajas&gmail.com +26765 + Universitaet Bielefeld, Fakultaet fuer Mathematik + Holger Kaelberer + root&math.uni-bielefeld.de +26766 + .vantronix secure systems + Mike Belopuhov + support&vantronix.net +26767 + Alcatel-Lucent TMC (formerly 'Alcatel SOC') + Denis Poirier + Denis.Poirier&alcatel-lucent.com +26768 + Comune di Rimini + Roberto Cremasco + roberto.cremasco&comune.rimini.it +26769 + Raytheon Technologies Corporation + Dean DeFreitas + pki&raytheon.com +26770 + Virtutility Ltd + Philip Hart + network.manager&virtutility.com +26771 + Versaterm Inc. + Steve Underwood + steve.underwood&versaterm.com +26772 + The Royal Conservatoire + Paul Mourus + mous&koncon.nl +26773 + Linde IT Services + Sabrina Sommer + sabrina.sommer&linde.com +26774 + EFOTEK CO.,LTD. + Anna lee + anna&efotek.com +26775 + B2BITS Corp. + Alexander Rivkind + air&btobits.com +26776 + maintech GmbH + Christian Daniel + cd&maintech.de +26777 + Declude, Inc. + David Franco-Rocha + dfranco&declude.com +26778 + Nerdboy Services + Ben Knight + bkk&nerdboy.net.au +26779 + PPW "ARAJ" Sp. z o.o. + Karol Kopacz + kkopacz&araj.pl +26780 + GMx Technologies, Inc + Sam Howard + iana&gmxtechnologies.com +26781 + Greg Cockerham + Greg Cockerham + greg.cockerham&gilbarco.com +26782 + TRI-D Systems, Inc. + Frank Cusack + frank&tri-dsystems.com +26783 + Tioga Project + Sean Champ, Chief Administrator, Tioga Project + gimmal&gmail.com +26784 + Acromate, Inc. + Eui-suk Cha + escha&acromate.com +26785 + University of Messina + Fabrizio La Rosa + fabrizio.larosa&unime.it +26786 + Siet SD Mladost - Ynet + Stefan Gula + root&ynet.sk +26787 + Sociedad de Tasación SA + Marcelo González Würtz + mgonzalez&st-tasacion.es +26788 + AbiliSoft Ltd + David Charles + dave&abilisoft.com +26789 + Webworks Sverige AB + Uno Engborg + uno&webworks.se +26790 + Gilbarco Inc. + Kenneth Ringeman + ken.ringeman&gilbarco.com +26791 + Thralling Penguin LLC + Joseph Benden + joe&thrallingpenguin.com +26792 + Promptlink Communications, Inc. + Anton Shtikhlaytner + antonsh&promptlink.com +26793 + Gawab + Ahmed El Zein + a.elzein&gawab.net +26794 + Periodik Labs LLC + Chris Hawk + chris&periodiklabs.com +26795 + Kemnetix Corporation + P Becke + pbecke&kemnetix.com +26796 + Meitai technology Co.,Ltd + Sanxi Hu + husanxi2005&163.com +26797 + Asankya Networks, Inc. + Scott Ryan + scott&asankya.com +26798 + Hippo, spol. s r. o. + Petr Chupik + chupik&hippo.cz +26799 + Rotal.com Ltd. + Gil Michlin + gilm&rotal.co.il +26800 + Jan Welte + Jan Welte + ianaoid&icorner.de +26801 + Network Chemistry, Inc + Christopher Waters + cwaters&networkchemistry.com +26802 + UNESP-FC + Leonardo Fortunato + leonardo&fc.unesp.br +26803 + DITTA DI FEO PASQUALE + PASQUALE DI FEO + PDIFEO&DFWARE.IT +26804 + Digital Opportunity Trust + Hugh Ranalli + hranalli&dotrust.org +26805 + Krebs Consulting & IT-Services GmbH & Co. KG + Jan Welte + network&krebs-itservices.de +26806 + MultiVision Communications + Yiu Fan Samuel Yau + syau&mvcinc.com +26807 + United States Postal Service + Jeffrey White + jeffrey.j.white&usps.gov +26808 + Visual Acting GmbH + Patrick Raithofer + raithofer&visualacting.de +26809 + ENTELIENCE + Philippe Le Berre + contact&entelience.com +26810 + SKIDATA AG + Steiner Christian + snmp&skidata.com +26811 + UNLP + Javier Diaz + jdiaz&unlp.edu.ar +26812 + Flexy Enterprise Solutions Ltd. + Mowfak Attia + mowfakattia&hotmail.com +26813 + Uniwersytet Gdañski + £ukasz Koz³owski + luke&univ.gda.pl +26814 + BVCOM Media Corporation,Ltd. + Can Wu + can_wu&cnbvcom.com +26815 + Advanced Technologies + Nasser Sunna + advtechme&gmail.com +26816 + DTM Consultoria S/C LTDA + Marcelo Mustafa Murad + atendimento&dtmhosting.com.br +26817 + IT Professional d.o.o. + Zoran Tadic + zoran&itprofessional.org +26818 + Servicio Nacional de Meteorología e Hidrologia del Perú - SENAMHI + Richard Miguel + rmiguel&senamhi.gob.pe +26819 + Economists Incorporated + Robert Czechowski + czechowski.r&ei.com +26820 + MicroPortal.info + marcelo rojas + desarrollo.java&gmail.com +26821 + Agent Logic + Brian Lee + brian.lee&agentlogic.com +26822 + Beijing Telestone Technology Co.,Ltd. + Wang Chunxiao + wangchunxiao&telestone.com +26823 + MarzhillStudios + Jeremy Wall + jeremy&marzhillstudios.com +26824 + Conseil Général de Meurthe et Moselle + Daniel BARRET + dbarret&cg54.fr +26825 + netWerkz IT Solutions + Alex Howells + alex&netwerkz.co.uk +26826 + Eon Corporation + Herb Rose + hrose&eoncorporation.com +26827 + ICT 4 Schools + Gerben van Eck + germ&eduwijs.nl +26828 + Han Internetworks Co., Ltd. + Kyu-Nam Choi + nami&haninter.net +26829 + S4 Technology Pty Ltd + Ross Mark + ross.mark&s4-technology.com +26830 + U.S. Home Systems Inc. + Alex Goff + softreq&usremodelers.com +26831 + Community High School District 155 + Mike Bell + mbell&d155.org +26832 + Jason Petsod + Jason Petsod + jason&petsod.org +26833 + Barkley Evergreen & Partners + Larry Penrod + lpenrod&beap.com +26834 + Travel & Accommodation ApS + Ronnie Mose + rm&travelaccommodation.eu +26835 + Southernprint Ltd + David Mears + david.mears&southernprint.co.uk +26836 + INVISTA S.à.r.l + Jeremy McLaughlin + Jeremy.McLaughlin&invista.com +26837 + Masaryk Hospital in Usti nad Labem + Martin Lhotsky + martin.lhotsky&mnul.cz +26838 + Haystack Professional Services + Michael Pierson + mopierson&gmail.com +26839 + Diamedx Inc. + Nikolai Bratkovski + bratkovski&gmail.com +26840 + NETWAYS GmbH + Marius Hein + info&netways.de +26841 + Soleo Communications, Inc. + Mike Thorpe + oids&soleocommunications.com +26842 + Telio AG + Torsten Grunig + grunig&tel.io +26843 + Aker Kvaerner Subsea Ltd + Alisdair Gillespie + alisdair.gillespie&akerkvaerner.com +26844 + Endeavor Information Systems Inc. + Scott Morgan + Sc.Morgan&endinfosys.com +26845 + Mark Hooper Ltd + Mark Hooper + info&mark-hooper.co.uk +26846 + Salford City Council + Peter Wild + postmaster&salford.gov.uk +26847 + COLLINS TRUCKING LLC + MARION BOSTON + collinstruckingllc&yahoo.com +26848 + Deutsche Rentenversicherung Niederbayern-Oberpfalz + Ewald Schmid + Ewald.Schmid&drv-landshut.de +26849 + Departamento de Engenharia Informática da Universidade de Coimbra + Jorge Granjal + jgranjal&dei.uc.pt +26850 + VisionOSS Ltd. + Duncan Kendall + support&visionoss.com +26851 + Pénzügyi Szervezetek Állami Felügyelete / Hungarian Financial Supervisory Authority + Mr. Istvan HORVATH + bevall&pszaf.hu +26852 + Consejo General de Colegios Oficiales de Medicos + Joan Camps Pons + jcamps&cgcom.es +26853 + Mueller & Kanduth OEG + Gerhard Mueller + iana&mko.at +26854 + Cherokee International Corporation + Daniel Bogaerts + d.bogaerts&cherokee.be +26855 + CommProve Ltd. + Alessandro Manetti + a_manett&yahoo.it +26856 + EXCELSIS Business Technology AG + Stephan Engelke + stephan.engelke&excelsisnet.com +26857 + Tocaj + Antoine Jacot-Descombes + antoine&jacot-descombes.ch +26858 + ThemSelves + Arjen Vermeer + arjen.vermeer&hetnet.nl +26859 + Compuquest, Inc. + Cliff Scheller + cs1nospam&compuquestinc.com +26860 + 6COM s.r.o. + Jan Oravec + jan.oravec&6com.sk +26861 + BM Polyco Ltd + Neal Carter + neal.carter&polyco.co.uk +26862 + Idilia Inc. + Derek Brans + derek.brans&idilia.com +26863 + CoreBridge + Said DJENNAOUI + said.djennaoui&corebridge.com +26864 + InSight Health Corp. + Louie Iturzaeta + louie&insighthealth.com +26865 + Spirit Linux + Gleydson Silva + gleydson&spiritlinux.com.br +26866 + Gigamon Systems LLC + Ted Ho + ted.ho&gigamon.com +26867 + Infitel Engineering GmbH + Wolf-Henner Ruhnau + whr&infitel.com +26868 + European Bank of development of metallurgy industry + Sergey Volkov + volkov&euromet.ru +26869 + Skype Technologies S.A. + Vishnu Vemulapalli + iana-registrations&skype.net +26870 + ArcheNova Consulting GmbH + Marcel Betschart + m.betschart&archenova.com +26871 + Tidomat AB + Mathias Henriksson + mathias.henriksson&tidomat.se +26872 + Stainsby + Erik Stainsby + erik&stainsby.ca +26873 + Aldea Global S.A. + Gustavo Adolfo Lucas Cifuentes + glucas&elperiodico.com.gt +26874 + More-IT EOOD + Remko Molier + rmolier&more-it.com +26875 + University of Technology of Ho Chi Minh City + Linh Dang Hong + dhlinh&hcmut.edu.vn +26876 + ADITEC Inc. + SEUNG YOUNG, PARK + pasey&aditec.co.kr +26877 + Civic Computing + Alexios Chouchoulas + alexios&civiccomputing.com +26878 + BroadHop Inc + Ian Campbell + icampbell&broadhop.com +26879 + Nine Systems Corporation + Flint E. Barber + flintb&ninesystems.com +26880 + CPUArchitects, Inc. + Chuck Carpenter + ccarpenter1&houston.rr.com +26881 + Univesidad Tecnica Particular de Loja + Alexander Lopez + ralopez&utpl.edu.ec +26882 + Child Exploitation and Online Protection Centre + David Bryant + Dave.bryant&ceop.gov.uk +26883 + Lilla Grän Data och Elektronik + Daniel Widenfalk + Daniel&lilla-graen.c.se +26884 + QuickCircuit + Dave Cameron + dave&quickcircuit.co.nz +26885 + Compu-Share, Inc + Marvin R Crossnoe + mcrossnoe&compu-share.com +26886 + Compact Microwave Indonesia + T.M. Adham Johan + adham&ptcmi.com +26887 + Plenware Group Oy + Pauli Kuosmanen + pauli.kuosmanen&plenware.com +26888 + Caresolve C.V. + Jos Verdoold + iana-oid&caresolve.nl +26889 + NAV E.P.E. + Manuel Campanico + manuel.campanico&nav.pt +26890 + Markus Franzke Software Entwicklung + Markus Franzke + markus&bahnhof-batten.de +26891 + RCZI FORT + Anatoly Erkin + support&rczi.ru +26892 + yyovkov.net + Yovko Yovkov + yyovkov&yyovkov.net +26893 + St.-Willibrord Gymnasium + Martin Loehnertz + st.willibrord&loehnertz.de +26894 + Apparatus Inc. + John Daily + jdaily&apparatus.net +26895 + Crimean Internet Service + Alexander Yu. Solodukhin + alt&softwarium.net +26896 + Efore PLC + Markku Degerholm + markku.degerholm&efore.fi +26897 + Copa Airlines + Jorge E. Rodríguez B. + iana&copaair.com +26898 + Nippon C.A.D. Co.,Ltd. + Mitsuyuki Komata + komata&ncad.co.jp +26899 + University of Zagreb, Faculty of Science + Alen Kovac + akovac&chem.pmf.hr +26900 + Andanza Technologies S.A. + Daniel Fernandez + d.fernandez&almatech.es +26901 + No Wires Allowed Pty. Ltd. + Carlos Gomez Gallego + carlos&nowiresallowed.com +26902 + Xiamen Longtop System Co., Ltd. + Jack Show + jzxu&longtop.com +26903 + Xytac system technologies + Hideo Noda + nodah&acm.org +26904 + Kathmann Consulting, LLC + Nicholas Kathmann + nicholas.kathmann&kathmannconsulting.com +26905 + Aboundi Inc. + Frank Lai + fclai&comcast.net +26906 + Westfax Inc. + Chad Matheson + chad&westfax.com +26907 + Joe Lewis + Joe Lewis + joe&joe-lewis.com +26908 + Pictor Solutions, Inc. + Rambabu Munjuluri + rmunjuluri&yahoo.com +26909 + SUBNET Solutions Inc. + Anthony Eshpeter + anthonye&subnetsolutions.com +26910 + Studentersamfundet ved Aalborg Universitet + Anette Brix + edbdrift&s-et.aau.dk +26911 + Trend Micro Inc. (formerly 'Third Brigade Inc.') + Bob Durie + bob_durie&trendmicro.com +26912 + Cheiron + Mark Brouwer + oid&cheiron.org +26913 + Group Logic, Inc. + Douglas hill + doughill&grouplogic.com +26914 + TIME Networks Inc,. + HyoungSeok, Park + hspark&timenetworks.co.kr +26915 + Wolfpac Mobile, Inc. + Leo Alvyn Cruz + infra&wolfpac.net +26916 + 1822direkt Gesellschaft der Frankfurter Sparkasse mbH + Sven Schiwek + sysop&1822direkt.com +26917 + Netconsulting + Thomas Heiligenmann + thomas&heiligenmann.de +26918 + AF Kursor-Audit Ltd. + Anton Burkun + anton.burkun&gmail.com +26919 + Danpex Corporation + Kwok Leung + wing&danpex.com +26920 + TXT e-solutions SpA + Domenico Rotondi + Domenico.Rotondi&TXTGroup.Com +26921 + MPI for Mathematics + Willy Tegast + pen-contact&mpim-bonn.mpg.de +26922 + TD2 Consulting, LLC + Trey Drake + treydrake&td2consulting.com +26923 + Baptist Health South Florida + Rodrigo Amarante + rodrigoa&baptisthealth.net +26924 + Knewco, Inc. + Kevin J Kalupson + kalupson&knewco.com +26925 + Enigma Data Solutions Ltd. + Laurence Barry + lbarry&enigmadata.com +26926 + Masstech Group, Inc + Wes Thiessen + wes.thiessen&masstechgroup.com +26927 + Extant Networks + Thomas Matysik + thomas&extant.net.nz +26928 + Aerohive Networks, Inc. + Gavin Zhu + gzhu&aerohive.com +26929 + jv consultant + jacques vincensini + contact&jvconsultant.fr +26930 + php-systems.com + Tim Wilkes + iana&php-systems.com +26931 + Reaktor Innovations Oy + Antti Puranen + antti.puranen&reaktor.fi +26932 + Visma Software ASA + Pål Bergan + pal.bergan&visma.no +26933 + Tridion B.V. + Dominique LeBlond + dominique.leblond&tridion.com +26934 + Tattile S.r.l. + Livio Bonisoli + l.bonisoli&tattile.com +26935 + Oseo + Xavier de Broca + arnaud.jumelet&ext.oseo.fr +26936 + Ville de Marseille + Claude MARCY + cmarcy&mairie-marseille.fr +26937 + Yahata Kosei Hospital + Akimichi Tatsukawa + akimichi_tatsukawa&yahoo.co.jp +26938 + Worldgroup Consulting Sdn Bhd + Erika + sinwee80&gmail.com +26939 + Tenwa Ltd. + Simon Dong + simon&tenwa.com.cn +26940 + The Economist + Michael Mann + michaelmann&economist.com +26941 + Johnstone Associates + David Johnstone + diana&dewlands.com +26942 + AIM Professional Systems Ltd + David Wood + david.wood&aim.co.uk +26943 + dieMosers.org + Reinhard Moser + reinhard.moser&inode.at +26944 + Icross Technology + dbin yoon + dbin&icrosstech.com +26945 + INTECH + Michael Pelletier + mpelletier&intechjanus.com +26946 + Aditi Technologies + Watsh Rajneesh + rajneeshw&aditi.com +26947 + MIPC Ltd. + Arnaldo Nunes do Prado + arnaldo.prado&gmail.com +26948 + Lambda Research Inc. + David Quick + dquick&lambdares.com +26949 + InterComputer Corp. + Bruce James + Bruce.James&InterComputer.com +26950 + GUST / Ninja Services + Jonathan Hunter + jh-iana-oid3&ninja.org.uk +26951 + RENAULT-NISSAN B.V. + Witold KLAUDEL + witold.klaudel&renault.com +26952 + Joint-stock company "Bank SOYUZ" + Vyacheslav Krotov + Vyacheslav.Krotov&banksoyuz.ru +26953 + Astronix Technology Inc. + Antonio Saygbe + nukantonio&yahoo.com +26954 + INdustrias Sola Basic, SA de CV + Emilio Catan + ecatan&isbmex.com +26955 + Avalon Networks LLC + Binon Gorbutt + bgorbutt&avalonnetworks.us +26956 + AG04 Innovative Solutions + Domagoj Madunić + domagoj.madunic&ag04.com +26957 + Kognita + Mr. Siniša Babiæ + sinisa&kognita.hr +26958 + Indu-Sol GmbH + Mr. René Heidl + heidl&indu-sol.com +26959 + www.brianmichael.org + Brian Michael + brian.michael&yahoo.com +26960 + VEM Manutenção e Engenharia S/A + Gerencia Redes + gerencia.redes&vem.aero +26961 + TantaComm Systems + Nicholas Morris + nmorris&tantacomm.com +26962 + KPMG International + Reede Taylor + dms&dipcon.com +26963 + ISEANE + Jerome JOUVENT + oid&iseane.com +26964 + van Deenen Support + Bart van Deenen + info&vandeenensupport.com +26965 + Thomas Parquette + Tom Parquette + thparque&orangemail.syr.edu +26966 + Iskoot Israel Ltd. + Yehudit Nathan + Yehudit&iskoot.com +26967 + Assotiation Mir Ltd + Denis Zagirov + info&mirgroup.ru +26968 + KielNET GmbH + Sven Juergensen + sj&kielnet.de +26969 + Bisping & Bisping GmbH & Co. KG + Magnus Schmidt + ms&bisping.de +26970 + Doellken Kunststoffverarbeitung GmbH + Ingo Gansert + ssladmin&doellken.com +26971 + Kudra Technical Services + Robert Sexton + robert&kudra.com +26972 + Cimices (formerly 'Quetz Limited') + Keith Brazington + keith&quetz.co.uk +26973 + Tomato Vine, Inc. + Trevor Jorgenson + tjorgenson&tomatovine.com +26974 + Disk Bridge + Graham Wooden + graham&g-rock.net +26975 + Reserved + RFC-pti-pen-registration-10 + ---none--- +26976 + NexG Co., Ltd. + Jae Hak, Lee + pubs&nexg.net +26977 + Qball Technologies Ltd. + Maayan Turner + produpd&qballtech.net +26978 + next layer GmbH. + Andreas Schoder + noc&nextlayer.at +26979 + Grenkeleasing AG + Frank Kessel + fkessel&grenkeleasing.de +26980 + Dudley PCT + Kevin Iddles + kevin.iddles&dudley.nhs.uk +26981 + Schrack Energietechnik GmbH + Konrad Podloucky + k.podloucky&schrack.com +26982 + R2 GmbH + Alexander Rosswog + alexander.rosswog&r2-gmbh.de +26983 + RWD Technologies + Marc Ciarniello + mciarniello&rwd.com +26984 + Otto-Petersen-Haus + Philipp Lay + ng&oph.rwth-aachen.de +26985 + ENET Inc. + Tshikazu Aso + domain&ene-t.com +26986 + Southwest Research Institute + Isaac Wagner + iwagner&swri.org +26987 + Gemini Mobile Technologies, Inc. + Andy Dunlap + domain-admin&geminimobile.com +26988 + 7-Eleven, Inc + Jane Xia-Ballew + jxia01&7-11.com +26989 + INTEGRITY Ltd. + Szabolcs Gyuris + gyuris.szabolcs&integrity.hu +26990 + Hudson Digital Systems Inc. + Michael Andrews + mandrews&hudsondigitalsystems.com +26991 + Develer S.r.l. + Bernardo Innocenti + info&develer.com +26992 + ENTEL S.A. + Francisco Turrillas + fturrillas&entel.cl +26993 + Promwad Innovation Company + Petronenko Denis + petronenko&gmail.com +26994 + opensma + John Collaros + john.collaros&gmail.com +26995 + Public Joint-Stock Company "Baltiyskiy Bank" + Mikhail Kopiev + Kopiev&baltbank.ru +26996 + Qmax Communications + Lim Hong Chuan + hongchuan&qmaxcom.com +26997 + Studer Professional Audio GmbH + Simon Gysi + simon.gysi&studer.ch +26998 + Ultra Electronics (formerly 'AudioSoft Ltd.') + Ian Barrows + ian.barrows&ultra-sss.com +26999 + Grace Community Church of Howard County, Inc. + Gary Burnett + gary.burnett&gcconline.org +27000 + Magistrat Linz, AT + Hanspeter Klapf + hanspeter.klapf&mag.linz.at +27001 + optionsXpress, Inc. + Matt Connley + mconnley&optionsxpress.com +27002 + Zayo Group (formerly 'Onvoy, Inc.') + David B. Walters + dwalters&zayoms.com +27003 + Trigence Corp + Brigitte Gagne + bgagne&trigence.com +27004 + HOT Telecom Ltd. + Eyal Wax + eyal.wax&hot.net.il +27005 + Hackwerk.net + Peter H. Veerman + pveerman&hackwerk.net +27006 + Chalktime + David B. Walters + iana-snmp&chalktime.com +27007 + Luceor S.A.S. + Benoit Papillault + benoit.papillault&luceor.com +27008 + Unified Game + Stefan Penter + stefan&unifiedgame.com +27009 + Bitrail Networks + Support Center + support&bitrail.net +27010 + LBM Systems, LLC + Xavier Guzman + xguzman&lbmsys.com +27011 + Sagamore Systems, Inc + Eric Hedlund + ehedlund&sagamoresys.com +27012 + CASAudit, Inc. + Ed Goldgehn + edg&casaudit.com +27013 + TeleDirect International Inc. + Mark M. Moore + CTO&TDirect.com +27014 + CommGate Systems India Pvt Ltd + Abhik Biswas + abhik&commgate.net +27015 + East Coast Access (PTY) Ltd + Abdul Rehman Gani + abdulg&eastcoast.co.za +27016 + Alphito d.o.o. + Uros Indihar + indy&alphito.si +27017 + world-direct eBusiness Solutions GmbH + Florian Sailer + florian.sailer&world-direct.at +27018 + TDC Switzerland AG + Michael Bischof + michael.bischof&sunrise.net +27019 + Envision Network Technologies Pvt. Ltd. + Vishwanath H.R. + vishwa&entl.net +27020 + Pan Asian Certificate Policy Authority Limited + Fanny Tse + fanny.tse&tradelink.com.hk +27021 + Pactiv Corp. + Marius Satas + msatas&pactiv.com +27022 + Instrumentation Technology Systems Corporation + Paul Hightower + phightower&itsamerica.com +27023 + Nexant, Inc. + Sergio Brignone + sbrignone&nexant.com +27024 + I-Connect Inc + Soheil Banifatemi + soheil&i-connect.tv +27025 + VisionAlive GmbH + Marc von Jaduczynski + marc&visionalive.de +27026 + ILIRIKA D.D. + Andrej Valencic + admin&ilirika.si +27027 + ESTEAM Software GmbH + David Matthew Bond + ldap&esteam.de +27028 + Torgservis Ltd. + Andrei Petrenko + a.petrenko&torgservis.com.ua +27029 + REH + Wolfgang Reh + reh&stsweilburg.de +27030 + Wichorus Inc. + Joseph L Raja + joseph&wichorus.com +27031 + CNS Technology Inc. + SooHyun Moon + shmoon&cnstec.com +27032 + Sobey Digital Technology Co. Ltd. + Mr.Wang Jizheng + wangjizheng&sobey.com +27033 + OK soft GmbH + Oleg Kiriljuk + oleg.kiriljuk&ok-soft-gmbh.com +27034 + Hopling Technologies B.V. + Ivo van Ling + ivanling&hopling.com +27035 + Alamon Telco, Inc. + Dennis R. Gesker + gesker&alamon.com +27036 + Mercy College + Chris Peckham + cpeckham&mercy.edu +27037 + Netmarks, Inc. + Atsuo Masaki + masaki.atsuo&netmarks.co.jp +27038 + Robot Visual Systems Gmbh + Wolfgang Guse + wolfgang.guse&robot.de +27039 + Kehitysyhteistyön palvelukeskus ry. + Juho Vuori + juho.vuori&kepa.fi +27040 + Royal Canadian Mounted Police + Paul Dube + systems_tools_support&rcmp-grc.gc.ca +27041 + KEMET Corporation + Brian Burch + brianburch&kemet.com +27042 + UNIVERSIDAD DE COLIMA + RAYMUNDO BUENROSTRO MARISCAL + aldo.cuevas&ucol.mx +27043 + Centro de Informatica e Automacao do Estado de Santa Catarina + Reinaldo Sabatini Fernandes + reinaldo&ciasc.gov.br +27044 + CLUSTERPOINT LTD. + GINTS ERNESTSONS + gints&clusterpoint.com +27045 + Santillana de Ediciones S.A. + Federico Peñaranda Nervi + federicopenaranda&gmail.com +27046 + Alex Group, spol. s r.o. + Marian Ondrovic + marian.ondrovic&alexgroup.biz +27047 + ASK Interactive UK Ltd + Andrew Keil + andrew.keil&askinteractive.net +27048 + Opteq Systems International Ltd + Nikki Cliff + nikkic&opteqint.net +27049 + Symark Software + Gyle Iverson + giverson&symark.com +27050 + Warrior Technology Services, Inc + Victor Guerrero + jvguerrero&warriortech.com +27051 + Chipidea Microelectronica S.A. + Fabio Silva + support&chipidea.com +27052 + Bay Microsystems, Inc. + Bob Smedley + bob&baymicrosystems.com +27053 + Omnitromics Pty Ltd + Paul Whitfield + support&omnitronics.com.au +27054 + Oneida Baptist Institute + James Thompson + technology&oneidaschool.org +27055 + ZANDER LLC. + Fred Patterson + fcpatterson55&gmail.com +27056 + Zednax Limited + Alex Masidlover + alex.masidlover&zednax.com +27057 + Elektrobit Automotive GmbH (formerly '3SOFT GmbH') + Markus Roesch + Markus.Roesch&Elektrobit.com +27058 + Ventelo Norge AS + Dag Stenstad + dag.stenstad&ventelo.no +27059 + SmartSol d.o.o. + Tomislav Randjic + tomislav.randjic&smartsol.co.yu +27060 + interactive instruments + Mr. Bernd Weidner + weidner&interactive-instruments.de +27061 + NetSENS SRL + Vasile Vladescu + operations&netsens.ro +27062 + OSTIUM-Soluções de Engenharia Lda + João Pedro Jacob + joao.jacob&ostium.pt +27063 + IMocha Consulting Sdn Bhd + Abid Altaf + abid&imocha.com.my +27064 + University of St. Thomas - Houston + Neil Gaede + neil&stthom.edu +27065 + Quagility, Inc. + Jonathan Mischo + oid&quagility.net +27066 + Rave Wireless, Inc. + Marc C. Poulin + mcp&ravewireless.com +27067 + Softinbox Inc. + Tzuoo-Hawn Yeh + tzuoohawnyeh&yahoo.com +27068 + Mooter Media Ltd + Mark Hetherington + mark.hetherington&mootermedia.com +27069 + Kaon Media Co., Ltd. + Jin, Lee + jlee&kaonmedia.com +27070 + DekTec Digital Video B.V. + Sito Dekker + sito.dekker&dektec.com +27071 + Hålogaland IKT-Senter as + Jan Åge Johnsen + janaage&hikt.no +27072 + Euro-emme s.r.l. + Paolo Signor + p.signor&euro-emme.com +27073 + Septentrio nv/sa + ing. Freddy Voorspoels + Freddy.Voorspoels&Septentrio.com +27074 + Point International + Thomas Conlon + tom.conlon&point.se +27075 + RDP B92 a.d. + Tomislav Randjic + tomislav.randjic&b92.net +27076 + Zot Inc. + Chris Edillon + jce&zot.com +27077 + Nanasoft Szoftverfejlesztõ Bt. + Peter MOLNAR + molnar.peter&nanasoft.hu +27078 + lohn.us + Josemar Müller Lohn + lohn&lohn.us +27079 + AT Promotions Limited + Jason McClean + jason.mcclean&listawood.com +27080 + Sequanux + Gregory Oestreicher + greg&sequanux.org +27081 + Trimm, Inc. + Brian Burch + brianb&trimminc.com +27082 + CESMIC - Centro de Excelência em Servidores de Missão Crítica + Mauro Tapajos + camilamachado&gmail.com +27083 + Switching Technologies + John McDougald + admin&switchingtechnologies.com +27084 + Arnulf Wiedemann + Arnulf Wiedemann + arnulf&wiedemann-pri.de +27085 + Greenview Data, Inc. + Philippe Green + phil&vedit.com +27086 + Texas Transportation Institute + Brad Hoover + b-hoover&tamu.edu +27087 + Networking Systems + Brad Hoover + bjh6938&yahoo.com +27088 + Supreme Court of New Mexico + S. Azuma + sazuma&nmcourts.com +27089 + Guus Leeuw IT Solutions + Guus Leeuw jr. + leeuwg&guusleeuwit.com +27090 + Nectia SA + Claudio Pérez + cperez&nectia.com +27091 + Brilliant Telecommunications Inc. + George Zhao + george&brilliantwireless.com +27092 + Astronautics Corporation of America + Manager of Configuration Management + m.starr&astronautics.com +27093 + YUSO.JP + Yuso KANAMORI + iana&rcv.yuso.jp +27094 + RANEEN + Mohammad Alhashash + iana-oidreg&alhashash.net +27095 + Portfolio Wired + Suzanne Payne + mail&portfoliowired.co.uk +27096 + Cadet Family + Dudley Cadet + dudley&cadet.us +27097 + Aphelion Communications Inc + JIMMY FENG + jimmy&aphelions.com +27098 + SOE Software Pty Ltd + Simon Manners + simon.manners&soesoft.com +27099 + Bunda Mulia University + Edy Sulai + esulai&bundamulia.ac.id +27100 + SwiftView, Inc. + Tyler Walden + tyler&swiftview.com +27101 + EMBARC.biz, LLC + Dudley Cadet + dudley&embarc.biz +27102 + GDMC + Mark Murphy + mmurphy&wayne.edu +27103 + Romanian Space Agency (ROSA) + Cosmin NISTOR + cosmin.nistor&rosa.ro +27104 + CFT Ltd. + Likhatchev Evgueni + iana_contacts&cft.ru +27105 + SoftMethod GmbH + Felix Schupp + felix.schupp&softmethod.de +27106 + Averina Software + Can Balioglu + can.balioglu&averina.com +27107 + Supporting Open Source bvba + Christof Haerens + christof.haerens&sos.be +27108 + Micro Elektronische Producten BV + Thijs de Ridder + info&mep-info.com +27109 + Barnaul State Pedagogical University + Alexander Mitrohin + swp&swp.pp.ru +27110 + Petrozavodsk State University, Computer Science Department + Vadim Ponomarev + vadim&cs.karelia.ru +27111 + Adamentium SAS + Daniel Teply + support&adamentium.com +27112 + MATHEMA Software GmbH + Michael Wiedeking + admin&mathema.de +27113 + APCO Worldwide + Jim Bullock + jbullock&apcoworldwide.com +27114 + electrolyte consulting + Steffen Voigt + iana-oid&electrolyte.de +27115 + Qbit GmbH + Michael Blaesi + snmp&qbit.de +27116 + Freesystems S.L.L. + Santiago Ramos + sramos&sitiodistinto.net +27117 + Tux-Logic + Fabrice Mur + fmur&tux-logic.com +27118 + Grontmij NV + Dr Erik H. ten Winkel + erik.tenwinkel&grontmij.com +27119 + ID90 Technologies + Alejandro Pozzi + alejandro&id90.com +27120 + Smar Equipamentos Industriais LTDA + Marcelo Barros de Almeida + barros&smar.com.br +27121 + ikirby.net + kirby zhou + kirbyzhou&hotmail.com +27122 + Shadowcat Systems Ltd. + Chris Jackson + chrisj&shadowcatsystems.co.uk +27123 + ICOP Digital, Inc. + John Drakey + jdrakey&icop.com +27124 + Kociok Beratungsgesellschaft mbH + Gerhard Kociok + kociok.edv&gmx.de +27125 + Accuity LLP + Matthew White + anmwhite&yahoo.com +27126 + Universidade Federal de Sao Carlos + Helio Crestana Guardia + helio&dc.ufscar.br +27127 + Systems and Software, Inc. + Christopher F. Whelan + chris.whelan&ssivt.com +27128 + Landeshauptstadt Hannover + Rouven Siehndel + oid.admin&hannover-stadt.de +27129 + Department of Conservation, New Zealand + Ken Walker + kbwalker&doc.govt.nz +27130 + TehnoCom Ltd. + Alexander P. Otstavnov + alcaver&74mail.ru +27131 + kk+w GmbH + Roland Kantz + rkantz&planiglobe.com +27132 + APIS IT d.o.o. + Perica Simundic + oid.admin&apis-it.hr +27133 + Hensley, Elam & Associates + Sam Elam + sam&hea.biz +27134 + Bangladesh Online Limited + Syed Samiul Wadood + admin&bol-online.com +27135 + 77 Elektronika Kft + Feher Istvan + ifeher&e77.hu +27136 + Osaka Gas Information System Research Institute Co.,Ltd. + Takashi Yahata + Yahata_Takashi&ogis-ri.co.jp +27137 + Proxicom, Inc. + Akil Franklin + akil.franklin&proxicom.com +27138 + Techno Viking + Dan Bullok + dan.iana&technoviking.com +27139 + My2do Solutions + Warren Strange + warren.strange&gmail.com +27140 + Acrodyne Industries, Inc. + Bill Soreth + iana-snmp&acrodyne.com +27141 + Applied Informatics Software Engineering GmbH + Günter Obiltschnig + guenter.obiltschnig&appinf.com +27142 + ALSTEC + Konstantin Tychkov + sit&alstec.ru +27143 + DirectPark GmbH + Florian Viertel + webmaster&directpark.de +27144 + Transact Limited + Alan Simes + alan.simes&transactgroup.net +27145 + GrahamCox.co.uk + Graham Cox + grahamcox&cwazy.co.uk +27146 + Infocentr Company Ltd. + Mihail Ohotin + ohotin&icentr.net +27147 + Mobissimo, Inc. + Hank Lee + mobissimo&mobissimo.com +27148 + ICP-SC + Gerente de Informática + icpsc&dtec.sc.gov.br +27149 + Wenzhou University + Miaobo Yao + ymbo.cn&gmail.com +27150 + Angelltech Co., Ltd. + Ren Jingyan + renjy&angelltech.com +27151 + SK C&C + MyoungHwan + nait&skcc.com +27152 + Institut für Kristallzüchtung Berlin + Peter Mehlhorn + pm&ikz-berlin.de +27153 + Matzke Softwareentwicklung + Sascha Matzke + sascha&scourbase.de +27154 + Telekomunikacja Polska S.A. + Jacek Borowski + idb.oid&telekomunikacja.pl +27155 + MR Copiadoras Digitais Ltda. + Warlei Alves + warlei&mrcopiadoras.com.br +27156 + Warlei Alves + Warlei Alves + warlei&teccomonline.com.br +27157 + Quartet Financial Systems Inc. + Jean Safar + jsa&quartetfs.com +27158 + Department of Internal Affairs, New Zealand + Matt Duguid + matt.duguid&dia.govt.nz +27159 + Persistent Systems, LLC + Herbert Rubens + hrubens&persistentsystems.com +27160 + PlaSec, Inc. + Terry Neely + terry&plasecinc.com +27161 + DeGooroo, Inc. + Duc Pham + dadm2006°ooroo.com +27162 + Serveis de Salut Integrats Baix Empordà + Pere Rodríguez + prr&hosppal.es +27163 + Dolphin Technology Inc + Network Administrator + mibcontact&gmail.com +27164 + CNET Networks Asia Pacific Pte Ltd + Nikk Eh + nikk.eh&cnet.com +27165 + Universidade Tecnológica Federal do Paraná (UTFPR) + Thiago Herek + therek&utfpr.edu.br +27166 + Checchia.NET IT Consulting + Daniel Checchia + daniel&checchia.net +27167 + NIPPON ANTENNA CO.,LTD. + Osamu MINETA + o-mineta&nippon-antenna.co.jp +27168 + Edward Roper + Edward Roper + edro+iana&wanfear.net +27169 + Erlang Financial System (Pty) Ltd + Danie Schutte + danie&erlfinsys.net +27170 + ETHALON GmbH + Thomas Hackbarth + thomas.hackbarthðalon.de +27171 + Council of the European Union + Thierry Manté + thierry.mante&consilium.europa.eu +27172 + ArcaBit Ltd. + Grzegorz Michalek + grzesiek&arcabit.com +27173 + Software Consulting Markus Bernhardt GmbH + Markus Bernhardt + Markus.Bernhardt&scmb.de +27174 + Guenther Mair + Guenther Mair + guenther.mair&hoslo.ch +27175 + iO tech s.r.l. + Luca Salvadori + alliance&iotecno.com +27176 + Avinity Systems BV + Arnoud Zwemmer + snmp&avinity.net +27177 + Intendencia Municipal de Montevideo + Freddy Kugelmass Dodel + fkmass&informatica.imm.gub.uy +27178 + ISIB CNR + Paolo Bison + Paolo.Bison&isib.cnr.it +27179 + Ultri + Brian Winkers + bwinkers&gmail.com +27180 + Frontier Science and Technology Research Foundation, Inc. + Peter Meszynski + meszynsk&fstrf.org +27181 + Teak Technologies + Humphrey Liu + hcliu&teaktechnologies.com +27182 + CAI Networks, Inc. + Dev Manager + netadmin&cainetworks.com +27183 + Bluewave Networks Inc. + Mr. Guna Ramireddy + guna&bluewavenetworks.com +27184 + UNAVCO + Stephen Smith + ldapadmin&unavco.org +27185 + Vendorama LTD + Jason Morehouse + jm&vendorama.com +27186 + shenzhen qiaowei investment & development co.,Ltd + wang baohua + wangbh&qiaowei.net +27187 + VALIANT COMMUNICATIONS LIMITED + Inder mohan Sood + sood&valiantcom.com +27188 + SEP Software Engineering Partner GmbH + Peter Buehrer + peter.buehrer&sep.ch +27189 + DD NetService GmbH + Stefan Dreyer + ldap&ddnetservice.net +27190 + michaelbreuer.eu + Michael Breuer + mbreuer&michaelbreuer.eu +27191 + Godalming College + Daniel Ewen + danielewen&godalming.ac.uk +27192 + Soongsil University + Chang Ho-Jin + zzang70&gmail.com +27193 + Patrick Monahan (Drogheda) Ltd + Willie Lennon + wlennon&monahansdrogheda.ie +27194 + DELTA Resources Inc. + Keegan Mills + keegan&delta-va.com +27195 + Stadtwerke Duesseldorf AG + Ina Opitz + iopitz&swd-ag.de +27196 + Harder Software Ltd. + Ian Harder + ianh&hardersoft.com +27197 + Heteonix + Benjamin Su + benjamin&hetronix.com.tw +27198 + Réseau pour l'Excellence en Enseignement Supérieur de l'Afrique de l'Ouest(RESAO) + NDANATCHE Essowèmlou Geraud + ndana_geraud&yahoo.fr +27199 + Sepracor Inc. + Michele Liguori + michele.liguori&sepracor.com +27200 + Energy Options, Inc + Philip Basile + pbasile&energy-options.com +27201 + Vianel + Ali Altiparmak + ali&vianel.com +27202 + Business Systems UK Ltd + Simon Bill + sbill&businesssystemsuk.com +27203 + Celesio AG + Joachim Schlette + joachim.schlette&celesio.com +27204 + Seawolf Technologies Inc. + Jinjun Xu + jason&seawolftech.com +27205 + GridApp Systems, Inc + Dan Cohen + dcohen&gridapp.com +27206 + Bitshift, Berchtold IT Solutions + Patrick Berchtold + hostmaster&bitshift.ch +27207 + SevOne Inc + Vess Bakalov + vess&sevone.com +27208 + Opinsys Oy + Veli-Matti Lintu + vml&opinsys.fi +27209 + Lendworx, Inc. + Al Gifford + webadmin&lendworx.com +27210 + Suso Technology Services, Inc. + Mark Krenz + ianareg&suso.com +27211 + Engsound Technical Enterprise Co., Ltd. + Ching-Hong Huang + ch_huang&engsound.com.tw +27212 + Routing International NV + Karel Deconinck + Karel.Deconinck&Routing-International.com +27213 + Banka Slovenije + Martin Umek + Martin.Umek&bsi.si +27214 + Holzhausen Network + Robert Haensel + robert.c.haensel&fh-stralsund.de +27215 + MAGIX AG + Karsten Stange + kstange&magix.net +27216 + Patryk Sciborek + Patryk Sciborek + patryk&sciborek.com +27217 + Digisense Inc. + Michael MacKay + standards&digisenseinc.com +27218 + GASAG Berliner Gaswerke AG + Steffen Löber + sloeber&gasag.de +27219 + Funambol, Inc. + Stefano Fornari + iana&funambol.com +27220 + EFCO Corporation Inc. + John Bodoni + jbodoni&efcocorp.com +27221 + Diebold Software Solutions Inc + Richard Harris + Richard.Harris&diebold.com +27222 + Pathway Connectivity Inc. + Gary Douglas + gdouglas&pathwayconnect.com +27223 + Alcon Technologies, Inc + Jorge Cano + jcano&alcon-tech.com +27224 + OhReally + Rob la Lau + rob&OhReally.nl +27225 + Chiris Networks, Inc. + Jesse Song + js&chiris.net +27226 + Asociación Grupo Universitario de Informática (Universidad de Valladolid) + Guillermo Rodríguez Cano + oid&gui.uva.es +27227 + SC SECPRAL COM SRL + Mihai Limbasan + mihailim&security.ro +27228 + DSP Group Inc. + Einam Schonberg + standards&dsp.co.il +27229 + Engineers Without Borders Canada + Francis Kung + francis&ewb.ca +27230 + E-Portal + Evgeny Skripa + skripa&e-portal.ru +27231 + Big Sky Computer Systems LLC. + James Hartley + james.hartley&gmail.com +27232 + BATM Advanced Communications GmbH + Markus Pestinger + mpestinger&batm.de +27233 + La-Events + Jeroen de Ruiter + la&la-events.nl +27234 + PortAuthority Technologies Inc. + Lidror Troyansky + lidror&portauthoritytech.com +27235 + Politechnika Gdanska + Adam Tlalka + adcent&pg.gda.pl +27236 + VOID.PL LTD (formerly 'Netoracle Ltd.') + Maciej Pedziwiatr + iana&void.pl +27237 + Smart Data Solutions, Inc. + Luis P. Rodriguez + luisrt&smartdatasol.com +27238 + Winbond Electronics Corp. + Chin-Wei Hsu + cwhsu2&winbond.com +27239 + USC-Satunama + Wahyu Widodo + t4mp4h&usc-satunama.org +27240 + Headnet aps + Sune Woeller + woeller&headnet.dk +27241 + IDC Informatica s.r.l. + Petrillo Giovanni + giovanni.petrillo&idc-informatica.it +27242 + Bilfinger SE (formerly 'Bilfinger Berger AG') + Quitt,Andreas + domains.bilfinger&bilfinger.com +27243 + Signal Networks Pvt. Ltd. + Anand Mallya + anandmallya&signal-networks.com +27244 + IronGate S.L. + Pablo Ruiz Garcia + pablo.ruiz&iron-gate.net +27245 + Maltacom plc + Ing. Charles Galea + charles.galea&maltacom.com +27246 + Järfälla Kommun, För- och Grundskolor + Thomas Hellström + thomas&jfog.net +27247 + Radian Group Inc. + Frank Monzo + Frank.Monzo&Radian.biz +27248 + Computer Programs and Systems Inc. (CPSI) + Sean Wentworth + seanw&cpsinet.com +27249 + Picosoft Ltd. + Rob Melville + robmelville&blueyonder.co.uk +27250 + West Linn Paper Company + Penny Woods + pwoods&wlinpco.com +27251 + MedStreaming + wael elseaidy + w.elseaidy&comcast.net +27252 + Excapsa Services Inc. + Janet Post + janet.post&excapsa.ca +27253 + AusRegistry Pty Ltd + Chris Wright + chris&ausregistry.com.au +27254 + FIX Flyer LLC + George Kledaras + george&fixflyer.com +27255 + Tarkhany Regional (County) Open Joint-Stock Bank + Vladimir Privalov + lee&tarkhanybank.ru +27256 + NPF "KRUG" + Danila Chernetsov + chernetsovdv&krug2000.ru +27257 + Valimo Wireless Oy + Ilkka Pietikäinen + ilkka.pietikainen&valimo.com +27258 + NovaSecure AS + Kare Langedrag + kare&novasecure.no +27259 + BEL POWER SOLUTIONS (formerly 'Power-One') + Juerg Labhart + juerg.labhart&psbel.com +27260 + DynaPel Systems Inc. + Dr. Dieter Koller + snmp&dynapel.com +27261 + Servicios Avanzados para las Instituciones S.L. + Raul Diaz + rdiaz&viavansi.com +27262 + DANTE Ltd. + Otto Kreiter + oidmaster&dante.org.uk +27263 + Netsmart Public Health, Inc. + Kevin Davidson + kdavidson&ntst.com +27264 + Nmodel + Derek Smith + dereks_letterbox&aapt.net.au +27265 + Inlet Technologies, Inc. + Dawn Blalock + support&inlethd.com +27266 + freecode + Wijnand Modderman + wijnand&freecode.nl +27267 + Encryptanet, Inc. + Clay Cover + ccover&encryptanet.com +27268 + Hangar Eighteen Enterprises LLC + Mark McConnell + mnhangareighteen&gmail.com +27269 + Mynavi Corporation + Terutake Usui + iana-contact&mynavi.jp +27270 + KoSyS - Kockisch ITK Systems and Services + Michel Bretschneider + michel-bretschneider&itk-systems-and-services.de +27271 + Structured Software Systems Ltd + David Priestley + david.priestley&threesl.com +27272 + Envisioneering Medical Technologies + Stephen Lewis + slewis&envisioneeringmedical.com +27273 + Infinite Video Corporation + Pete Koat + prkoat&gmail.com +27274 + Fastfeed Inc. + Smruti Parida + smruti.parida&gmail.com +27275 + Alexcoder + Mohammed Habib + habib&alexcoder.com +27276 + Technovare Systems, Inc. + Srikanth Sherman + sri&technovare.com +27277 + SatPath Systems, Inc. + Herkea Jea + herkea&sat-path.com +27278 + Sindoh Co., Ltd. + Minseok Pyo + mspyo&sindoh.com +27279 + GoldenNet Technology Inc. + Howard Chang + howard&togoldennet.com.tw +27280 + Università Degli Studi di Urbino "Carlo Bo + Claudio Caracci + caracci&uniurb.it +27281 + Netcon Systems + Eric Anton + eric.anton&netconsys.com +27282 + Realtek Semiconductor corp. + Tom Chen + tom.chen&realtek.com.tw +27283 + Xcenium Technology + Laura Redruello + snmp&xcenium.com +27284 + Daon Trusted Identity Services (formerly 'Security Biometric Clearing Network') + Patrick Osborne + pat.osborne&aaae.org +27285 + The Nisu research team + Manuel David Mollar Villanueva + mm.iana&nisu.org +27286 + Netopia, Inc. + Ken Haase + KHaase&netopia.com +27287 + University Of Perugia + Informatic Service Office + sysadm&unipg.it +27288 + MySystemAdmin + Chris Tucker + com&mysystemadmin.com +27289 + eGistics, Inc. + Brad Lee + blee&egisticsinc.com +27290 + Beijing Osee Digital Technology Co.Ltd + Chenrunhai + chenrunhai&osee-dig.com +27291 + Bundesministerium für Verkehr, Bau und Stadtentwicklung + Ulf Jakubke + snmp.contact&bmvbs.bund.de +27292 + Topex S.A + Bica Dumitru + dumitru.bica&topex.ro +27293 + Melian Italia SRL + Andres Kaswalder + melianitalia&melian.com +27294 + MacSpecialist + Adam Karneboge + adam&macspecialist.com +27295 + Axel Technology srl + Paolo Garuti + paolo.garuti&axeltechnology.com +27296 + Blizzard Entertainment + Ryan Fisher + rfisher&blizzard.com +27297 + SchechterTech LLC + Harry Schechter + hjs&schechter.net +27298 + Asia Pacific Broadband Wireless Communications + Brian Lin + brianlin&aptg.com.tw +27299 + Informed Computing + Roger Henry + r.henry&inform.co.uk +27300 + TRANSCOM TECHNOLOGIES CO.,LTD + Changbin Zhang + lampzhang&163.com +27301 + galaxy.io + oidmaster galaxy.io + oidmaster&galaxy.io +27302 + Der Senator für Bildung und Wissenschaft + Wolfgang Lühmann + wolfgang.luehmann&bildung.bremen.de +27303 + Clear Value + Adrien Bourland + oidmaster&clearvalue.eu +27304 + DKT A/S + Nikolaj Hermann + nih&dkt.dk +27305 + Satis Superque Merce BV + Harrie Hazewinkel + harrie&asemantics.com +27306 + Automated Computing Machinery, S.L. + Jose San Leandro + chous&acm-sl.com +27307 + Rankin County School District + Scot Hollingsworth + scoth&rcsd.ms +27308 + Unique Communications Solutions Inc. + Brandon Arthur + brandon&unique.net +27309 + SSRC Media, LLC + Adam Hebert + adam&candorworld.com +27310 + NetIDEAS, Inc. + Daniel Cushing + dan.cushing&netideasinc.com +27311 + Seni Systems, Inc. + Judet Useinovski + judet&senisystems.com +27312 + EDA LTD. + Dimitar Angelov + mitko&edabg.com +27313 + Sintrex Integration Services Pty. Ltd. + Adrienne Kotze + adrienne.kotze&sintrex.com +27314 + Del Systems d.o.o. + Marko Medos + ds&delsystems.net +27315 + Internet Management Technology Laboratory + Jong-Hyouk Lee + hurryon&gmail.com +27316 + Schoeppner + Sven Schoeppner + sven.schoeppner&gmail.com +27317 + Extreme Engineering Solutions, Inc + Matthew Starzewski + mstarzewski&xes-inc.com +27318 + ISE Inc. + Abboud Assaf + support&i-s-e.com +27319 + Nexen Inc. + I/T Security Manager + PKI-Admin&nexeninc.com +27320 + Kingsbury Inc. + S. Ohara + baw&kingsbury.com +27321 + Ready Technology (UK) Limited + Daniel Pocock + iana.contact&readytechnology.co.uk +27322 + 21Net Ltd. + Kris Ceuppens + kris&21net.com +27323 + Kei Communication Technology Inc. + Takao Awata + info&ieee802.co.jp +27324 + Semejny Doktor + Alexander Galler + galler&kuzbass.net +27325 + Ubiquitech A/S + Lars Skjærlund + ls&ubiquitech.com +27326 + EUIT Telecomunicacion, Universidad Politécnica de Madrid + Daniel Martin + daniel.martin&upm.es +27327 + Mobizoom AG + Daniel Lusti + dl&mobizoom.com +27328 + ROSA - Dansk Rock Samråd + Rasmus Wehner + rasmus&rosa.org +27329 + Business Education Council of Niagara + Matt Watson + mwatson&becon.org +27330 + LKHP Inc. + Shinichiro Suzuki + s.suzuki&lkhp.ath.cx +27331 + JTG 99 ApS + Ilja Godau + ilja&godau.dk +27332 + Sogeti + Edouard JEANSON + edouard.jeanson&sogeti.com +27333 + Buzz Systems Ltd. + Danie Brink + danie.brink&gmail.com +27334 + Multidyne Electronics Inc. + Rick Hormigo + rickh&multidyne.com +27335 + Ritek ZRt. + Zsolt Balogh + iroda&ritek.hu +27336 + Converged Access, Inc. + Pat Cauduro + pcauduro&convergedaccess.com +27337 + CableServ Inc. + Audley Alexander + audley&cableserv.com +27338 + ATEME + Vincent CLERC + v.clerc&ateme.fr +27339 + Project Automation S.p.A. + Fulvio Zanzottera + oid&p-a.it +27340 + OpenVPN Solutions LLC. + James Yonan + jim&yonan.net +27341 + Medianstrip + Mike Ryan + iana.org®.elision.org +27342 + Haskology Consulting + Thomas Conway + drtomc&gmail.com +27343 + "Telekom Srbija" a.d. + Information Technology Division + oid&telekom.yu +27344 + Dallmeier electronic GmbH & Co.KG + Norbert Niggemann + snmp&dallmeier-electronic.com +27345 + Alticor, Inc + Richard Mapes + itsecurity&alticor.com +27346 + VDA Elettronica spa + Leonardo Uzcudun + uzcudunl&vdavda.com +27347 + Alexandrow Webhosting + Paul Alexandrow + paul&alexandrow.org +27348 + Blue Spruce Technologies, Inc. + Charles Owens + iana&bspruce.com +27349 + Lonardi.org + Sandro Lonardi + sandro.lonardi&gmail.com +27350 + QEW Europe + David Persson + qeweurope&gmx.net +27351 + Medizinische Fakultät Carl Gustav Carus Dresden an der Technischen Universität Dresden + Thomas Kugel + thomas.kugel&uniklinikum-dresden.de +27352 + MAYWIDE TECHNOLOGY CO.,LTD. + Lydia (Li Bin) + L-suhexiang&126.com +27353 + ELTA + Pierre Dubarry + p.dubarry&elta.fr +27354 + epona.org + Ernesto de Miguel + emadruida&gmail.com +27355 + University Politechnical of Bucharest + Gorea Adrian Marius + admin&comm.pub.ro +27356 + DWESAB Engineering + Michel de Wolf + michel.de.wolf&dwesab.com +27357 + ANDROME NV + Chris Flerackers + cflerackers&androme.be +27358 + Technology Dynamics, Inc. + Aron Levy + linda&theallpower.com +27359 + HILTRON GmbH + Matthias Hayer + matthias.hayer&hiltron.de +27360 + NLS Technology LLC + Norm Siegel + nls&nlstechnology.com +27361 + Rizon Software Technology Co.,Ltd. + Quan Zhen + quanz&rizon.com.cn +27362 + Symbio Technologies, LLC + Gideon Romm + support&symbio-technologies.com +27363 + TvHead, Inc. + Cliff Mercer + cliff.mercer&tvhead.net +27364 + Axioma Inc. + Hollister Scholte + hscholte&axiomainc.com +27365 + Softech Worldwide LLC + Akhlaq Khan + akhlaq.khan&softech.us +27366 + INSIGNIA MOBILE COMMUNICATIONS,C.A. + RICARDO CARRILLO + ricardo.carrillo&insignia.com.ve +27367 + Visonys AG + Erwin Huber + iana-adm&visonys.com +27368 + Business I.T. Systems Ltd + Bryan Stone + info&bits.uk.com +27369 + Estçe Lauder Inc + Greg Daggett + gdaggett&estee.com +27370 + Hennepin County + Paul Edoff + paul.edoff&co.hennepin.mn.us +27371 + AspellClark + andy aspell-clark + aspellclark&yahoo.co.uk +27372 + Progenist Systems Inc. + Rudy Huang + iana&progenist.net +27373 + Quickpoint Solutions Limited + Brandon Wong + brandon.wong.nz&gmail.com +27374 + MakeITOpen.Net + Patrice LACHANCE + ldap&MakeITOpen.Net +27375 + IT-Systemhaus Thomas Wiese + Thomas Wiese + Thomas.Wiese&gmx.info +27376 + Triple-It ApS + Stefan Andersen + stefan&triple-it.dk +27377 + Groupster + Marek Jelen + marek.jelen&gmail.com +27378 + Belden + Brad Pope + brad.pope&belden.com +27379 + NESS Consulting Ltd + Ivor Potter + ivor.potter&nessconsulting.co.uk +27380 + PCS Process Control Systems AG + Rolf Stalder + rolf.stalder&pcs-ag.com +27381 + Myndit Pty Ltd + David Garrard + david&commsnet.com.au +27382 + OmniAccess S.L. + Richard Smith + richard&omniaccess.com +27383 + Theo Benning GmbH & CoKG + Ulrich Borkers + u.borkers&benning.de +27384 + WiE GmbH - Werk für industrielle Elektronik + Jürgen Eigner + platz&werner-electronic.de +27385 + ELCON Systemtechnik GmbH + Ronny Priemer + priemer&elcon-system.de +27386 + Farmers Group Inc. + William Buthod + william.buthod&farmersinsurance.com +27387 + IHOP Corp. + Joe Meyer + joe.meyer&ihop.com +27388 + Wiremat Ltd + Karl Simon + helpdesk&wiremat.com +27389 + Splunk, Inc. + Carl Jackson + carl&splunk.com +27390 + iCoGo Inc. + Virgil Dimaguila + virgil&icogo.com +27391 + Braintribe IT Technologies + Roman Kurmanowytsch + roman.kurmanowytsch&braintribe.com +27392 + SCHILLER + MEYER Didier + didier.meyer&schiller.fr +27393 + Metrovacesa, S.A. + Alberto Martin + sistemas&metrovacesa.es +27394 + NOSMadeira + David de Sousa + ds&nosmadeira.pt +27395 + AVANSI C. por A. + Diego Gil Serrano + dgil&avansi.com +27396 + Open Journal Project + Alexey Remizov + rem&ojp.ru +27397 + Praxis Balster-De Beer + Wolfgang Balster + wolfgang&balster-balsterdebeer.de +27398 + Bernd Haug + Bernd Haug + bernd.haug&gmail.com +27399 + Ross Video Limited + Jeff Allen + administrator&rossvideo.com +27400 + Randolph-Macon College + Jeff Hague + jhague&rmc.edu +27401 + Dataupia Corporation + John Chiu + jchiu&dataupia.com +27402 + TaTrad GmbH + Markus Michael + itadmin&tatrad.de +27403 + Frontier IT + Sean Mullen + sean&frontierit.com +27404 + SimpleComTools + Bill W. Crum + bcrum&simplecomtools.com +27405 + René Faigle AG + Mr. Adrian Nenning + adrian.nenning&faigle.ch +27406 + Center Communication Systems GmbH + Erich Freitag + e.freitag¢ersystems.com +27407 + BUROTEC + ABAYAD Tarik + tabayad&burotec.fr +27408 + Hotelplan AG + Daniel Rüegg + sih.systemmanagement&hotelplan.ch +27409 + Mondi Packaging AG + Christian Loidolt + christian.loidolt&mondipackaging.com +27410 + SkoleSYS A/S + Jakob Simon-Gaarde + jakobsg&gmail.com +27411 + Curtis J Coleman + Curtis J Coleman + curtis-iana&spoonhenge.com +27412 + Icsoft Pty Ltd + Administrator + support&icsoft.com.au +27413 + Labs2 i Lund AB + Valdemar Mejstad + valdemar&labs2.se +27414 + Stadt Chemnitz + Uwe Barth + it-iana&stadt-chemnitz.de +27415 + SiRF Technology, Inc. + Riju Kallivalappil + rkallivalappil&sirf.com +27416 + Elvaco AB + David Vonasek + dv&elvaco.se +27417 + FAST LTA GmbH + Thorsten Staude + it&fast-lta.de +27418 + FiSEC GmbH + Dieter Vollstaedt + dieter.vollstaedt&fisec.de +27419 + University of New England + John Vedral + jvedral&une.edu +27420 + Southside Community Hospital + Kyle Hultman + khultman&sch-farmville.org +27421 + Synergy for Smart Solutions + Younes Ouadi + ouadi&eim.ae +27422 + Inter Cars S.A. + Piotr Polkowski + piotr.polkowski&intercars.com.pl +27423 + Potts Mullarkey + Christine Salvage + snmpenterprise&pottsmullarkey.com +27424 + Firstin Wireless Technology + Changhwan You + turbocpp&firstinwireless.com +27425 + Affinity Mobile, Inc. + Max Knake + mknake&affinitymobile.com +27426 + VIASOFT Ltd., Ukraine + Serg Ageyev + boss&viasoft.com.ua +27427 + Vecima Networks Inc. + Laird Froese + external.registrations&vecima.com +27428 + Intesys S.r.l. + Romano Rosponi + romano.rosponi&intesys.it +27429 + IT Service Zahner + Jens Zahner + info&it-zahner.de +27430 + Call Genie Inc + Tim Pushor + tim.pushor&callgenie.com +27431 + Steton Technology Group, Inc. + David Omer + dms&steton.com +27432 + Archalien.com + Bryan Hunwardsen + archalien&archalien.com +27433 + Centeris Corporation + John Geer + jgeer¢eris.com +27434 + Hal's Software, Inc. + Hal Finkel + half&halssoftware.com +27435 + Aozora Information Systems Co. Ltd. + Robert O'Grady + iana&aozora-is.co.jp +27436 + Jefferies & Company, Inc. + Dan Strzelec + DStrzelec&jefferies.com +27437 + TOA Corporation + Shigeru Sugita + sugita_shigeru&toa.co.jp +27438 + Eric Lindahl + Eric Lindahl + eric&ericlindahl.com +27439 + OpenAgentSystem + Justin Kim + jst.kim&gmail.com +27440 + Ingo Lantschner + Ingo Lantschner + ingo&binonabiso.com +27441 + PEM GmbH + Kevin Humphries + kevin.humphries&pem-systems.com +27442 + SAPERION AG + Bernhard Cygan + Bernhard.Cygan&saperion.com +27443 + yLabs SRL + Octavian Cerna + tavy&ylabs.com +27444 + Murphy Software B.V. + Ton Nijkes + ton&murphy.nl +27445 + InforMed, LLC + Ryan Frantz + ryanfrantz&informed-llc.com +27446 + PAHL.NET systems + Ole Pahl + op&pahl.net +27447 + Femtio Procent Data AB + Lars Nordberg + fpd&telia.com +27448 + The Aero Group, Inc. + Jason Nesheim + jason&aeroinc.net +27449 + Universidad de Buenos Aires + Carolina León Carri / Esteban Mocskos + mcarri&dc.uba.ar +27450 + RealOrganized, Inc. + Scott Schmitz + scott&realorganized.com +27451 + Aurcon Computer Services + Darren O'Connor + darren.oconnor&aurcon.com +27452 + Billings Clinic + Information Services + network&billingsclinic.org +27453 + LANE COMPANY + Tony Bowen + tbowen&lanecompany.com +27454 + NeoMentis Ltd + Will Daniels + mail&neomentis.com +27455 + Schlesinger Associates, Inc. + Joe Kreher + Joe&SchlesingerAssociates.com +27456 + University of Warwick + Jaroslaw Zachwieja + grok&warwick.ac.uk +27457 + Joomla! (OpenSourceMatter Inc) + Samuel Moffatt + sam.moffatt&joomla.org +27458 + Flo Healthcare + Karl Haiden + karl.haiden&flohealthcare.com +27459 + INTLNET + JFC Morfin + info&intlnet.org +27460 + Digital Maelstrom LLC + Jeff Hall + contact&digitalmaelstrom.net +27461 + Knit One Pearl One + Olivia Clarke + knitonepearlone&gmail.com +27462 + Bubble Motion Pte Ltd + Gunamony Madhan Dennis + madhan&bubblemotion.com +27463 + ThüCom GmbH + Bernd Wunder + b.wunder&thuecom.com +27464 + Projekty Bankowe Polsoft sp. z o.o. + Maciej Lewandowski + maciej.lewandowski&pbpolsoft.com.pl +27465 + Domodesk S.L. + Departamento de desarrollo + project&domotium.com +27466 + Inter IKEA Systems BV + Anders Östling + anders.ostling&inter-ikea.com +27467 + NETHINKS GmbH + Garry Glendown + hostmaster&nethinks.com +27468 + ELTA-R + Stanimir Stoyanov + stani&elta.bg +27469 + Glowlink Communications Technology Inc. + Bob Estus + ushq&glowlink.com +27470 + Kuorem IT Consulting s.l. + Alfonso Rodriguez Fernandez-Cuesta + arodriguez&kuorem.com +27471 + Kotelett Group + Chris Qvigstad + hostmaster&kotelett.no +27472 + Peak Web Consulting, Inc. + Stephen Friedl + steve&peakwebconsulting.com +27473 + Consejo de la Magistratura de la Ciudad Autonoma de Buenos Aires + Gustavo Panizzo + root&jusbaires.gov.ar +27474 + Multi Co + KAZUYA YAMAGUCHI + pen-admin&multig.jp +27475 + CiriTech Systems, Inc. + Anumele Raja + araja&ciritech.com +27476 + MicsGarage + Michael Lumley + mic&micsgarage.com +27477 + Ethrix ltd. + Avi Kadosh + avikðrix.com +27478 + Boekingspunt Nederland BV + Wijnand Modderman + iana-contact&booking.com +27479 + Projekt 42! GmbH + Gerhard Strehle + gstrehle&zweiundvierzig.de +27480 + FORSIS GmbH + Steffen Poppe + steffen.poppe&forsis.de +27481 + Arzl Datentechnik + Markus Arzl + mark1&arzl.net +27482 + G2 Microsystems Pty Ltd + David Goodall + david.goodall&g2microsystems.com +27483 + OpenSIPS.ORG (formerly 'OpenSER.ORG') + Bogdan-Andrei Iancu + bogdan.iancu&voice-system.ro +27484 + Eightwire.com Limited + Daniel Hodson + daniel.hodson&eightwire.com +27485 + JSC Public Key Infrastructure + Viktor Lapin + csk&ivk.org.ua +27486 + swissgeek.org + Olivier Beytrison + olivier&swissgeek.org +27487 + Kent County Council + Mick Mason + Mick Mason&kent.gov.uk +27488 + A.T.U Handels GmbH + Bernhard Panzer + sysadminwin&de.atu.eu +27489 + Evangelisch-Lutherische Landeskirche in Braunschweig + Markus Reinmuth + edv.lka&lk-bs.de +27490 + SP Swedish National Testing and Research Institute + Per-Olof Eliasson ATa + per-olof.eliasson&sp.se +27491 + Freeport Technologies Inc. + David Tucker + dtucker&freeporttech.com +27492 + Major Hospital + Todd Goul + tgoul&majorhospital.com +27493 + Dirección de Tecnología Educativa + Guadalupe Miranda Zamora + gmiranda2&hotmail.com +27494 + Seatore Technology + Juer Lee + juer.lee&seatore.com +27495 + Optiway + Luca Berkovichi + enadmin&walla.com +27496 + MALAIWAH.COM + Michel Belleau + michel.belleau&malaiwah.com +27497 + Fiducial Staffing + Hubert FONGARNAND + informatique.internet&fiducial.fr +27498 + Innovative Technology Solutions, LLC + Leonard Danao + LDanao&itsllc.com +27499 + Operations Feedback Systems Pty. Ltd. + C. Charles Winchcombe + quinch&ofsystems.com +27500 + Dawning Ltd. + Yuan Ma + mayuan.ict&gmail.com +27501 + Seldon Systems, Inc. + Jeff Burgett + jburgett&seldonsystems.com +27502 + ARAB BANKING COPORATION + HUSAM SAOUDI + HUSAM.SAOUDI&ARABBANKING.COM +27503 + TD Computer Solutions GmbH & Co. KG + Thomas Duennemann + info&tdcs.de +27504 + Celtius Ltd + Lasse Kiviluoto + Lasse.Kiviluoto&celtius.com +27505 + KT-NET Communications GmbH + Dieter Klausner + domadmin&kt-net.at +27506 + Object Builder Software - Bulgaria + Georgi Stanchev + admins&obs.bg +27507 + Automation Control Products Inc. + Randy Cannady + randy&acpthinclient.com +27508 + TravelCenters of America + Neil Vergottini + vergottini.neil&tatravelcenters.com +27509 + Creanord Ltd. + Antti Pappila + antti.pappila&creanord.com +27510 + NetGain Systems Pte. Ltd. + Issac Dev Nirmal + issac&netgain-systems.com +27511 + Gigante Inc. + Yoshihisa Koizumi + koizumi&gigante.jp +27512 + M3 Technology Group + Nate Harris + m3support&m3tg.com +27513 + Pro-E Automation Sdn Bhd + Mr.Kanna / Mr.Magesh + kanna&pro-e.net +27514 + QTECH LLC + Basil Budko + budko&qtech.ru +27515 + Stockholms stad + Martin Dolk + martin.dolk&stadshuset.stockholm.se +27516 + NIVC AS + Alexey Ivanov + amis&amiga.org.ru +27517 + GT50 Srl (formerly 'Secure Edge Srl') + Sandro Fontana + sandro.fontana>50.org +27518 + ProMedica Health System + Mark Dailey + mark.dailey&promedica.org +27519 + Pozicom Technologies, Inc. + Eric Mayo + eric&pozicom.net +27520 + International Electronic Communication Analysts (IECA), Inc. + Christopher Bonatti + BonattiC&ieca.com +27521 + eMusic.com, Inc. + hgoldwire&emusic.com + hgoldwire&emusic.com +27522 + Municipio de la Ciudad de Monterrey + Sergio Camarena + sergio&monterrey.gob.mx +27523 + runlevel3 GmbH + Stephan Hantigk + X.500&runlevel3.de +27524 + TRIAX CZ s.r.o. + Martin Bednar + bednar&triax.cz +27525 + DET 1 AFEREG + DAVE PHILLIPS + dave.phillips&oln-afmc.af.mil +27526 + Kanazawa University + KAINS Administration + admin&kains.kanazawa-u.ac.jp +27527 + ABB Ltd + Natalie Eliasson + domain.administration&abb.com +27528 + Dragon Stock labo Inc. + Hiroshi Ito + hi_02258&yahoo.co.jp +27529 + dri, Consultoria Informática Lda + Ricardo Melo + ricardo.melo&dri.pt +27530 + decontis GmbH + Michael Hanel + no_spam&decontis.com +27531 + Planned IT Solutions, Inc. + Scott Pascoe + manager&planneditsolutions.com +27532 + Hancock Regional Hospital + Dan Allee + dallee&hancockregional.org +27533 + BKE a.s. + Jiøí Raputa + jraputa&bke.cz +27534 + Widestore srl + Technical Department + tech&widestore.net +27535 + Artwork Systems nv + Pieter Van Landuyt + pvla&artwork-systems.com +27536 + SANTECLAIR + Benoît REY + brey&santeclair.fr +27537 + Educatio Public Services Non-profit Llc. + dr. Edit Polyák + edit.polyak&edukht.hu +27538 + ADTSystems s.r.o. + Petr Nyklicek + petr_nyklicek&adtsystems.cz +27539 + Pacific DataVision, Inc. + Mike Fox + mfox&pdvcorp.com +27540 + SIMPPLE S.L. + Pere Cortada + pere.cortada&simpple.com +27541 + ITS Group + Jean-Philippe Roze + jproze&itsgroup.fr +27542 + COSAT INC. + TOMIYAMA Yoshihiro + tmy&cosat.com +27543 + Gillware Data Services, LLC + Wesley Gill + wesley&gillware.com +27544 + NSK OpenSource, Inc + Mike Kilpatrick + mike_kilpatrick&nskopensource.com +27545 + NINOMIYA TOWN + EBIHARA, SHINICHI + jo-kan&town.tochigi-ninomiya.lg.jp +27546 + American University of Beirut + Samih Ajrouch + samih&aub.edu.lb +27547 + Four Batons Technology + Johnathan Mayo + johnathanm&fourbatons.com +27548 + KRS Hardware Monitoring Development + Erik Holzer + info&kraftstrom.at +27549 + TELESANTE AQUITAINE + Régis ROSE + exploitation&sante-aquitaine.org +27550 + Alchemy Group Limited + Dean Ashby + d.ashby&alchemy.co.nz +27551 + Farheap Solutions Inc. + Jared Griffith + jared.griffith&farheap.com +27552 + Nimbus, Inc. + Kang Kyung Wan + bigskang&gmail.com +27553 + Salem Academy and College + Adam Sommer + sommer&salem.edu +27554 + PureTech Systems Inc. + Monroe Thomas + monroe.thomas&soundprint.com +27555 + Rosum Corporation + Dave Gotwisner + dgotwisner&rosum.com +27556 + The PTR Group Inc + Keith Buchanan + Keith&theptrgroup.com +27557 + Botech AB + Henrik Johansson + henrik.johansson&botech.se +27558 + IHMC + James Lott + jlott&ihmc.us +27559 + Data Science Group, Inc + Christian L. Straight + clstraight&datasciencegroup.com +27560 + Alan Dick and Company Ltd + Matt Middleton + matt.middleton&alandick.com +27561 + Indeca + Luis Moreno + luis&indeca.info +27562 + daemon software + Håkan Carlsson + snmp&daemon.se +27563 + QStar Technologies, Inc. + cs&qstar.com + cs&qstar.com +27564 + UNIFAL-MG - Universidade Federal de Alfenas + Vitor Renato Alves de Brito + vitorrenato.brito&unifal-mg.edu.br +27565 + Ministerio de Industria Turismo y Comercio + Carlos Maza + iana&mityc.es +27566 + Vizrt Group + Markus Rainer + markus.rainer&vizrt.com +27567 + Baur GmbH + Martin Naumann + martinnaumann&mac.com +27568 + Kupetto + Marco Cupidi + kupo&atomictag.com +27569 + National Digital Medical Archive, Inc. + Andrew Schaefer + aschaefer&ndma.us +27570 + GiaRoCo + Roberto Giana + iana&giaroco.ch +27571 + Guenther Brunthaler EDV Dienstleistungen + Guenther Brunthaler + gb_about_gnu&gmx.net +27572 + CEOS Integradores de Sistemas C.A. + Oscar Pizarro + oscar.pizarro&ceos.com.ve +27573 + TechGuard Security, LLC (formerly 'the-ferret.net') + Derrick Oetting + derrick.oetting&techguardsecurity.com +27574 + 10MT Co. + wang senlin + wangsenlin&vip.sina.com +27575 + DSL Internet Corporation + Javier Aguirre + jaguirre&dsli.net +27576 + Envysion, Inc + Robert Hagens + rhagens&envysion.com +27577 + R. D. Pierce + Dick Pierce + dpierce&cartchunk.org +27578 + cartchunk.org + Dick Pierce + dpierce&cartchunk.org +27579 + Keltron Corporation + Dick Pierce + dpierce&keltroncorp.com +27580 + Integrity Soluções em Informática Sociedade Simples + Rodrigo Grumiche Silva + grumiche&integrityit.com.br +27581 + Netum Oy + Petri Lappalainen + petri.lappalainen&netum.fi +27582 + pH Group Ltd + Hari Sekhon + hari.sekhon&phgroup.com +27583 + Metronik d.o.o. + Janez Zmuc + janez.zmuc&metronik.si +27584 + Upplysningscentralen AB + Anders Heintz + anders.heintz&uc.se +27585 + Orange Nederland BV + H. Kutukcuoglu + huseyin.kutukcuoglu&orange-ftgroup.com +27586 + Altai Technologies Limited + Sammy Chau + sammyc&altaitechnologies.com +27587 + Shinto Densan Corporation + Goro Murakami + gmurakami&sdkk.co.jp +27588 + KingsIsle Entertainment, Inc. + Emil Ramirez + eramirez&kingsisle.com +27589 + ZBN (Zhengzhou Broadcasting TV Network) Co. Ltd. + Yang Xu + shiuyx-2006&yahoo.com.cn +27590 + Cardinal Information Systems Ltd. + Christian Kvikant + christian.kvikant&cardinal.fi +27591 + Bit-Side GmbH + Andreas Roedl + a.roedl&bit-side.com +27592 + Beijing NetEast Technologies Corporation Ltd + wang ting yu + w.t.y&163.com +27593 + Media Excel, Inc. + Jon Clegg + jon.clegg&mediaexcel.com +27594 + Skipper Wireless, Inc. + Sumitaka Matsumoto + sam&skipperwireless.com +27595 + NWP-Technologie Informatyczne + Przemek Michalski + przemek.michalski&nwp.pl +27596 + NONNIC + Steve Conley + ssc&sysoplink.net +27597 + CADIGS Ltd. + Maik Wittchen + mwittchen&cadigs.com +27598 + Ayuntamiento de Madrid - Policia Municipal + Fco. Javier Guerrero González + guerrerogja&munimadrid.es +27599 + BIT-Kazan Ltd. + Raskin Igor + barabanov&bit-kazan.ru +27600 + C?!., C por A + beno + sai+gar&2012.vi +27601 + Higher Institute for Applied Sciences and Technology (HIAST) + Iyad Seyd Darwish + isdarwish&hiast.edu.sy +27602 + Woodforest National Bank + Victor Chataboon + vchataboon&woodforest.com +27603 + Groove Mobile + Nicholas Saparoff + nick&groovemobile.net +27604 + Tetralogyx + Jean-Loup DARNAY + jeanloup.darnay&tetralogyx.fr +27605 + Urzad Miasta Stolecznego Warszawy + Krzysztof Teperek + netmaster&warszawa.um.gov.pl +27606 + Global NetWatch, Incorporated + Renae Mazur + renae.mazur&globalnetwatch.com +27607 + Skin and Cancer Foundation Inc. + Joe Mercuri + infotech&skincancer.asn.au +27608 + Wuhan Yangtze Optical Technology Co., Ltd + zhengzhi + zhengzhi&yotc.cn +27609 + Dash Navigation Inc. + Senthil Supramaniam + senthil&dash.net +27610 + Solarcore, Inc. + Darren E. Kiel + darren.kiel&solarcore.com +27611 + Traffix Systems Ltd + Ben Volkow + bvolkow&traffixsystems.com +27612 + SmartStream Technologies + Ameya Bhakay + ameya.bhakay&smartstream-stp.com +27613 + AlbaNova University Center, the Stockholm Center for Physics, Astronomy and Biotechnology + Iouri Belokopytov + yb&albanova.se +27614 + Ascom Sweden AB + Martin Kvarmo + martin.kvarmo&ascom.se +27615 + devolo + Thomas Molkenbur + thomas.molkenbur&devolo.de +27616 + Phaedrus Ltd + Roy Schofield + roy&phaedrusltd.com +27617 + PayEx Solutions AB + Pelle Johansson + pelle.johansson&payex.com +27618 + Sihl + Eika Papier AG + Thomas Moll + thomas.moll&papyrus.com +27619 + BeWAN Systems + Christophe VAN DEPUTTE + christophe.vandeputte&bewan.com +27620 + GS1 Uruguay + Martin Rivadavia + mrivadavia&gs1uy.org +27621 + National Information Technologies JSC + Bagdat Musin + musin&nit.kz +27622 + Open Communication Security S.A. + Marcelo Fiori + mfiori&opencs.com.br +27623 + Shavlik Technologies LLC + Eric Schultze + eric.schultze&shavlik.com +27624 + Metanoia Communications Inc. + WuLung Hsu + wuulong&gmail.com +27625 + UZ Brussel + vital claeys + vital.claeys&az.vub.ac.be +27626 + SK-NIC, a.s. + Patrik Krauspe + patrik.krauspe&sk-nic.sk +27627 + Ceskoslovenska obchodni banka, a.s. + DAVID Zdenek + zdavid&csob.cz +27628 + Unum Group + Jediah Logiodice + jlogiodice&unumprovident.com +27629 + Helenic Ministry of Interior Public Administration and Decentralization / Helenic Public Administra + Stasis Antonis + a.stasis&ypesdda.gov.gr +27630 + OSI ste civile etudes et recherches + Jose H. REMY + jose.remy&openosi.org +27631 + Touchstone Technologies Inc. + Robert Hicks + robert.hicks&touchstone-inc.com +27632 + MVNO Sherpa + Hugh McLenaghan + hmclenaghan&mvnosherpa.com +27633 + Eternix Ltd. + CTO + yoav.zobel&gmail.com +27634 + ABC Phones of NC Inc. + Jason DeTiberus + jason.detiberus&abcphones.com +27635 + Wholesale Communications Group P/L + Ben Jones + bjones&wcg.net.au +27636 + F. Camara Informática Ltda + Renato Cesar de Souza + rcdsouza&terra.com.br +27637 + Lateral Networks Ltd + Bob Berryman + bob&sanlogan.com +27638 + Gulf Interstate Engineering + James Delahanty + ianainfo&gie.com +27639 + Electronic Communications Network (Pty) Ltd + ECN Systems + systems&ecn.co.za +27640 + Telefonica Moviles España, S.A. + Marc Mollà Roselló + marc.mollarosello&telefonica.es +27641 + Junta de Extremadura + Rafael Arroyo Perez + comtic&juntaextremadura.net +27642 + Manuel Linsmayer + Manuel Linsmayer + ldap&expersite.com +27643 + Zeda Ltd. + Richard Lawrence + r.lawrence&zeda.co.uk +27644 + CFC Informationssysteme Entwicklungsgesellschaft m.b.H. + Thomas Zellinger + office&cfc.at +27645 + PostgreSQL Global Development Group + Dave Page + dpage&postgresql.org +27646 + Systems Engineering & Assessment Ltd + Gavin Whitelaw + gjw&sea.co.uk +27647 + Iskra Zascite d.o.o. + Bostjan Kroselj + bostjan.kroselj&iskrazascite.si +27648 + ESPCI + Emmanuel Dreyfus + info&espci.fr +27649 + Hay Systems Limited + Technical Support + support&haysystems.com +27650 + Herward Hoyer + Herward Hoyer + gua808&web.de +27651 + ProConsultant Informatique + BOURSON Thierry + tech&proconsultant.net +27652 + SUNY + David Powalyk + domainmaster&suny.edu +27653 + MyRunning.com + Josh Forester + josh.iana.org&myrunning.com +27654 + Tain Malta Ltd. (formerly 'Involve Ltd.') + Hans Engren + noc&tain.com +27655 + IFP + Rainer Faulstich + Rainer.Faulstich&ifp-beisch.com +27656 + HafenCity Universität Hamburg + Michael Knop + michael.knop&hcu-hamburg.de +27657 + Hugh McLenaghan + Hugh McLenaghan + hughmcl&hotmail.com +27658 + Agemarks Technologies + Allen Zhang + azhang&agemarks.com +27659 + Atol, Conseils et Développements + Meunier Laurent + l.meunier&atolcd.com +27660 + Deutsche Welle + Arno Wagner + arno.wagner&dw.com +27661 + GateSquare Co., Ltd. + Yoon Hyung Taek + yun&gate2.co.kr +27662 + PePWave Ltd. + Michael Chan + hostmaster&pepwave.com +27663 + AFP548.com + Josh Wisenbaker + macshome&afp548.com +27664 + Intuicom, Inc. + Peter Miller + snmp&intuicom.com +27665 + ISR Inc. + Russ Lohman + rlohman&spraycool.com +27666 + NF Media Inc. + Chen Ming + chenm&yeah.net +27667 + Apprion, Inc + Mike Taylor + mike.taylor&apprion.com +27668 + Olivier Molteni + Olivier Molteni + olivier&molteni.net +27669 + easyRAID + Carsten Wilde + info&easyraid.com +27670 + Tirol Kliniken GmbH. + Erwin Seyrling + erwin.seyrling&tirol-kliniken.at +27671 + ATL Systems, Inc. + Kenji Yamaguchi + atl-regist&atl-systems.co.jp +27672 + Electool Systems Kft. + Tamas Lorincze + tamas.lorincze&electool.com +27673 + QUOD Financial SA + Mickael Rouillère + mickael.rouillere&quodfinancial.com +27674 + Mobibrasil Soluçoes Interactivas pela internet + javier renedo + fgil&mobibrasil.com +27675 + Curtiss-Wright Controls, Inc. + Paralegal + ksamuelson&curtisswright.com +27676 + Motionbox, Inc + Nathan Olla + sysadmin&motionbox.com +27677 + Evolution Broadcast Pty. Ltd. + David Sabine + dsabine&evolution.com.au +27678 + Jazzio + Mo McRoberts + mo&jazzio.com +27679 + The Hyde Company + Franklin Marmon + marmon&hydeco.com +27680 + NEMO ENG + Sung Jin Kim + helloforest&daum.net +27681 + Technorati Inc + Jason Matthews + jason&broken.net +27682 + ALBA Software S.L. + José Luis Sanz Boixader + albasoft&albasoft.com +27683 + Mscorp Venezuela C.A. + Jose Luis Guerrero + jguerrero&mscorp.com +27684 + Idopte + Dimitri Langlois + dimitri.langlois&idopte.fr +27685 + Trent University + Andrew Bell + andrewbell&trentu.ca +27686 + Refactored Networks, LLC + Michael mealling + michael&refactored-networks.com +27687 + DataMirror Corporation + Victor Szabo + vszabo&datamirror.com +27688 + Keane, Inc. + Jim Ouellette + ssladmin&keane.com +27689 + Enertel Wireless BV + Joffrey van Wageningen + jwageningen&enertel-wireless.nl +27690 + Arqiva + Richard Smith + richard.smith&arqiva.com +27691 + Framework Computer Consultants Limited + Andy Dowling + andy.dowling&frameworksolutions.net +27692 + Thinware s.r.l. + Carlo Todeschini + work&thinware.it +27693 + Voith AG + Uwe Gobbers + uwe.gobbers&voith.com +27694 + UTU Inc. + BYUNG JOO KANG + bjkang&u2tech.co.kr +27695 + Meteor Mobile + Adrian Whitwham + adrian.whitwham&meteor.ie +27696 + AttoSense + Erez Mutzafi + erez&atto-sense.com +27697 + GRAVITY CIS INC + Mezentsev Alexandr + mezentsev&gravity-cis.ru +27698 + Vaal Hosting + Nico Coertzen + admin&vaalhost.za.net +27699 + Tyche Enterprises, LLC + Frank Cowell + fcowell&tyche.net +27700 + Open Diameter Project + Vinayak Hegde + hvinayak&novell.com +27701 + Origin Electric co.,Ltd. + Tomonari Koizumi + koizumtm&hq.origin.co.jp +27702 + Beijing Softtone Company, Ltd. + Chen Feijie + chen.feijie&softtone.com +27703 + CABO Communications A/S + Jakob Simon-Gaarde + jakob&cabo.dk +27704 + Keisokugiken Corporation + Kazuo SAITO + ksaito&kgc.co.jp +27705 + 1984 ehf + Mordur Ingolfsson + mordur&1984.is +27706 + E-commerce-xhtml 1.1 dot com + Adam Hayden + here4u&e-commerce-xhtml.com +27707 + Siminn + Haflidi S. Magnusson + haflidi&siminn.is +27708 + ISB Brachert + Marcus Brachert + Marcus&Brachert.de +27709 + Software Applications + Walter Wriesnegger + wriesnegger&soap.at +27710 + hostNET Medien GmbH + Sebastian Jaeschke + iana&hostnet.de +27711 + 1053935 Alberta Ltd. + Sergei Agalakov + Sergei.Agalakov&gmail.com +27712 + Managing Company SB JSC + Alexander S. Efimov + alexe&sibbereg.ru +27713 + Ethon + Arne Steinkamm + infoðon.de +27714 + Arne Steinkamm + Arne Steinkamm + arne&steinkamm.com +27715 + Posterity Technologies Co. Ltd + Sword Chen + swordchen&posteritytech.com +27716 + Minnesota State Colleges and Universities + Michael Janke + hostmaster&mnstate.us +27717 + CeDoc Modena + Luca Prampolini + tecnici&cedoc.mo.it +27718 + Selfservix IT-Service + Michael Brandstetter + brandstetter&selfservix.de +27719 + Vizioncore, Inc. + Scott Herold + sherold&vizioncore.com +27720 + SBone.DE + Bjoern A. Zeeb + oid-registry&zabbadoz.net +27721 + Dotster, Inc. + Micah McNelly + mmcnelly&dotster.com +27722 + Pennic Consulting Inc + Nick Cottrell + nick&pennic.com +27723 + Permessa Corporation (formerly 'DYS Analytics, Inc.') + Stephen Grinder + sgrinder&permessa.com +27724 + Opticomm Corporation + Matt Babineau + mbabineau&opticomm.com +27725 + Asurion + Cory Plastek + directoryservices&asurion.com +27726 + Executive Coach Inc. + Edward Lanning + edward&usacoach.net +27727 + Antartec S.A.C. + Alfredo César Zorrilla Ríos + alfredo.zorrilla&antartec.com +27728 + Datron World Communications, Inc + Art Lashbrook + alashbrook&dtwc.com +27729 + US Technology Resources LLC + Biju Gopinath + biju.gopinath&ustri.com +27730 + Leporis Corporation + Peter Zhang + peter.zhang&leporisco.com +27731 + Western Power + Derek Smith + derek.smith&westernpower.com.au +27732 + OnionSoftware, Inc. + Hee-Soon Kim + snmp&onionsoftware.com +27733 + ASW Systems s.r.o. + Michal Leinweber + lei&aswsyst.cz +27734 + Squitel + Jacques Demare + contact&squitel.com +27735 + YawaBureau s.r.l. + Site Administrator + job415&yawabureau.com +27736 + Smile Ltd. + Andrew Brainin + ab&smile-soft.com +27737 + Berendsen Group Services GmbH + Martin Röh + martin.roeh&berendsen.de +27738 + VERIT Verwaltungs- und Immobilien-Gesellschaft + Christian Jenny + christian.jenny&verit.ch +27739 + Taunis GmbH + Christoph Aigner + avisu&gmx.at +27740 + ADFC LV Berlin e.V. + Dirk Slaghekke + edv&adfc-berlin.de +27741 + AAA Carolinas + Mark Daeth + domainadmin&mailaaa.com +27742 + Flying Horse Studios + Greg Ball + gball&flyinghorsestudios.com +27743 + Daftano (formerly 'Shaney') + Davide Fiorentino + email&daftano.com +27744 + Majik Networks Inc. + Taylor Dondich + tdondich&gmail.com +27745 + 4Step Ltd. + Zsolt Kovacs + kovacs&4step.hu +27746 + The institute of science and technology + JingShen + lwshenjing&163.com +27747 + CAMINO MO + Marc Olivier CAMINO + abocomar&yahoo.fr +27748 + Canon Korea Business Solutions Inc. + Ho-jin, Kim + hojikim&canon-bs.co.kr +27749 + Stennis Institute of Government + Tim Tsai + iana&sig.msstate.edu +27750 + GoodsoftwareLab Co. Ltd. + Lim WangKyu + wklim&goodsw.com +27751 + Firefly FZ LLC + Thomas Groves + tom.groves&firefly-group.com +27752 + 4A Consulting AB + Marcus Jansson + snmp&4a.se +27753 + Nucleo Operacional para a Sociedade de Informação (NOSI) + Helio Varela + helio.varela&nosi.cv +27754 + Affinite Corporation + Robert Finkle + rfinkle&affinite.com +27755 + FiveRuns + Mark Reynolds + mark.reynolds&fiveruns.com +27756 + Renaissoft, Inc. + Robert LeBlanc + rjl&renaissoft.com +27757 + Solis Energy, Inc. + Duane L. Roberts + Duane&SolisEnergy.com +27758 + ServiceSoft Sdn. Bhd. + Mohamed Shazrin + shazrin&gmail.com +27759 + Sense8 UK Ltd. + Dr. Austin Osuide + austin&osuide.com +27760 + Novo Mundo Moveis e Utilidades Ltda + Frederico C Wilhelms + fredcw&novomundo.com.br +27761 + System One GmbH + Martin Hechenberger + martin.hechenberger&systemone.at +27762 + Fixe-Post + Robert Welz + welz&fixe-post.de +27763 + Accuratus Consulting, LLC. + Jack Pestaner + jpestaner&accuratusconsulting.com +27764 + DONG Energy A/S + Ole Ahleson + oleah&dongenergy.dk +27765 + Welltrans O&E Technology Co. , Ltd. + jin pan + jing111521&163.com +27766 + Grid Net, Inc. + Ray Bell + ray&grid-net.com +27767 + JavaService Consulting + SungJo Kim + sjkim&javaservice.com +27768 + Gesellschaft für Netzwerk- und Automatisierungs-Technologie mbH (N.A.T.) + Helmut Laufkoetter + helau&nateurope.com +27769 + Université de Perpignan Via Domitia + Centre de Ressources Informatiques + ldapadmin&univ-perp.fr +27770 + MIS Corporate Defence Solutions + Neal Blount + neal.blount&mis-cds.com +27771 + EVOLIS + OLIVIER Serge + solivier&evolis.com +27772 + Sidon GmbH + Esref Yetkin + eyetkin&sidon-gmbh.com +27773 + Zabiuk + John Zabiuk + johnz&nait.ca +27774 + CenturyTel, Inc. + Chris Hancock + hostmaster¢urytel.net +27775 + Site Monitoring Solutions Inc. + Trey Seastrunk + treys&site-safe.com +27776 + IDAC Ltd. + Masayuki Shigihara + mshigihara&idac.co.jp +27777 + Auptyma Corporation + Peter Utzig + peter&auptyma.com +27778 + UnState educational establishment of additional education "Educational center "Meson" + Anatoly Botcharov + a.bocharov&meson.ru +27779 + OFFRATEL + David MAJOREL + tech&lagoon.nc +27780 + Public Surplus, LLC + Luis Londono + admin&publicsurplus.com +27781 + Ministerio de Empleo y Seguridad Social (formerly 'Ministerio de Trabajo e Inmigracion') + Miguel Gendive + mgendive&meyss.es +27782 + OnFin + Alexandre VALENTIN + alexandre.valentin&on-fin.com +27783 + Mulder Innova BV + W Mulder + willem&mulderinnova.com +27784 + Sensata Technologies + Tom Connors + SensataNet&Sensata.com +27785 + Vialogy + Alan Morrissett + vadmin&vialogy.com +27786 + Five Mile Capital Partners, LLC + Gary Maier + gmaier&fivemilecapital.com +27787 + Compart AG + Stefan Urbat + sur&compart.net +27788 + eco-warehouse + Jeff Gordon + info&eco-warehouse.net +27789 + Catalina Technologies + Gary Maier + gary.maier&catalinatech.com +27790 + S5 Wireless Inc + Eliot Weitz + eweitz&s5w.com +27791 + eMobile Networks Inc. + OID Admin + oid.admin&emobilenetworks.com +27792 + Editure Ltd + Aris Theocharides + aris.theocharides&editure.com +27793 + mythofbutterfly.com + darrik mazey + contact&mythofbutterfly.com +27794 + GENETEC Corporation + Ysohihito Gotoh + snmp-enterprise&genetec.co.jp +27795 + Guardian Equipamentos Eletronicos Ltda + Carlos Alberto Falkenbach + falk&guardian.ind.br +27796 + Embedded Control Logic Corp. + Mark Roberts + mark&ecl.us +27797 + Nata-Info Ltd. + Andrey Ivanov + support&nata-info.ru +27798 + SES NEW SKIES B.V. + Stefan Okhuijzen + stefan.okhuijzen&ses-newskies.com +27799 + 7iD Technologies GmbH + Christian Chladil + christian.chladil&7id.at +27800 + PKE Electronics AG + Manfred Jiras + m.jiras&pke.at +27801 + Fejer Megyei Szent Gyorgy Hospital + Bagyal Tamas + tbagyal&fmkorhaz.hu +27802 + NEXPLORE + Tschirren Adrian + hostmasters&nexplore.ch +27803 + HTH Consulting GmbH + Herbert Thallauer + iana&hth.co.at +27804 + Bernard Matthews Foods Ltd. + Ian Templeman + ian.templeman&bernardmatthews.com +27805 + Compagnia Assicuratrice Unipol S.p.A + Murotti Bruna + B.Murotti&unipol.it +27806 + University of Malta + Dave Mifsud + systems&csc.um.edu.mt +27807 + Hawa AG + Gianni Belotti + belotti.gianni&hawa.ch +27808 + SKELVISION SARL + Jean-Philippe SE CHAO + jp.sechao&skelvision.com +27809 + La Mamma Ind. de Alimentos Ltda. + João Rafael Moraes Nicola + joaoraf&gmail.com +27810 + Qovo Systems + Fatih GEZEN + f.gezen&qovo.com +27811 + Ellsworth School System + Charles Liebow + cliebow&ellsworthschools.org +27812 + voipDS - Voice Over IP Directory Services + Balaji NJL + balaji&voipds.org +27813 + linux-on.com + Sergey Semenyuk + info&linux-on.com +27814 + Chiang Family + Da-Wei Chiang + todawei&gmail.com +27815 + Advanced UniByte GmbH + Michael Drueing + michael.drueing&advanced-unibyte.de +27816 + WES Power Technology Inc. + Philip Crowley + contact&wespt.com +27817 + Thorcom Systems Ltd. + Mike Tubby + support&thorcom.co.uk +27818 + Vapo Oy + Petteri Oravuo + petteri.oravuo&vapo.fi +27819 + Calaveras Internet + Cody Ritts + cr&caltel.com +27820 + 121Media Inc. + Jeffrey Paul + orgadmin&121media.com +27821 + Unihost Partnership + Sven Vollbehr + service&unihost.eu +27822 + Comtech Mobile Datacom + Wayne Sheh + cmdcit&comtechmobile.com +27823 + Marienfeld Multimedia GmbH + Philip Steinkuehler + philip.steinkuehler&marienfeld-multimedia.de +27824 + Renzoo Ltd + Mr. Denis Kotlar + dkotlar&renzoo.co.uk +27825 + Battery Force Ltd. + Christian Karg + ckarg&battery-force.co.uk +27826 + Bharat Forge Kilsta AB + Gunnar Norman + gunnar.norman&bharatforgekilsta.com +27827 + i-Solutions AB + Johan Winäs + johan.winas&i-solutions.se +27828 + St. Paul's Cardiac Electrophysiology, Ltd. + Prof M Malik + marek.malik&btinternet.com +27829 + Mira Networks (Pty) Ltd + Ruan Jonker + ruanj&miranetworks.net +27830 + Epitiro Ltd + Ben Keenan + bkeenan&epitiro.com +27831 + Itexis SARL + Simon Decarpentries + simond&itexis.com +27832 + Thomas Bauer + Thomas Bauer + thomas.bauer&hauptversammlung.at +27833 + Bizvox Consultoria e Tecnologia de Voz Ltda + Patrick Blackman + patrick.blackman&bizvox.com.br +27834 + Consumers Energy Corporation + Chip Smith + cdsmith&cmsenergy.com +27835 + Gemstar - TV Guide International, Inc. + Hostmaster + hostmaster&tvguide.com +27836 + jag-stang.ch + Nicolas Mayor + jag&jag-stang.ch +27837 + HanDreamNet + Choi Soon-Kyu + soondeng&handream.net +27838 + Host Grad Inc. + Vitaly V Velikodny + velikodny.v&hostgrad.ru +27839 + Markus Boas + Markus Boas + ryven&ryven.de +27840 + polygon + Burchenja Valentin Pavlovich + valentin&severomorsk.info +27841 + IP Networks Ltd + Otto Jansson + oid&ipnetworks.fi +27842 + Klebanov + Rob Klebanov + rklebanov&gmail.com +27843 + Internet Computing & Security Laboratory + JunHyung-Lee + junhyung&icns.khu.ac.kr +27844 + iVEC + Chris Hines + help&ivec.org +27845 + Netcube Technologies,Inc + Jaeyoung Heo + jyheo&netcube.com +27846 + Prill Tecnologia Ltda + Luis Augusto C. Sauerbronn + luis.sauerbronn&prill.com.br +27847 + VirtenSys Ltd. + Ian Ormshaw + ian_ormshaw&virtensys.com +27848 + Itron GmbH (formerly 'Actaris Gaszaehlerbau GmbH') + Patric Wust + patric.wust&itron.com +27849 + American Research Institute + Sean Myers + smyers&americanri.com +27850 + LiquidXStream Systems Inc + Pierre Marcotte + pmarcotte&liquidxstream.com +27851 + Video Furnace, Inc. + Adam Yellen + adam&videofurnace.com +27852 + GUANGZHOU ZHIYUAN ELECTRONIC CO.,LTD. + Dikai Liu + ethernet.support&embedcontrol.com +27853 + mconsultancy + Kevin Montuori + montuori&gmail.com +27854 + Petr Kutalek + Petr Kutalek + petr&kutalek.cz +27855 + Interactions Corporation + A. Blake Cooper + blake&interactions.net +27856 + The Council of Australian University Directors of Information Technology (formerly 'Australian Access Federation') + Dr. Rodney G. McDuff + mcduff&its.uq.edu.au +27857 + J. Gordon Electronic Design + Kirk Wolff + kwolff&jged.com +27858 + University of Delhi + Vineet Ghildyal + vineet&du.ac.in +27859 + Rapid Information & Communication co. Ltd + kwangsik, choi + kumsim&yahoo.co.kr +27860 + DMS Group + Nikola Radovanovic + nikola.radovanovic&dmsgroup.co.yu +27861 + FILS COMMUNICATIONS LTD + STAN ONYIME + filsfamily&yahoo.com +27862 + AtNOC Corn Zauner OEG + Clemens Zauner + czauner&atnoc.net +27863 + DSR Communications Pty Ltd + Andrew Petrie + andrew&petrie.net +27864 + VOCEL, Inc. + Frans N. Benders + oidmaster&vocel.com +27865 + Tellvox S.A. + Marco Lonzetti + mlonzetti&tellvox.com.br +27866 + Logictec + James Allan + jim&logictec.co.uk +27867 + Entic Services + Anil Jangity + anilj&entic.net +27868 + Guardtime OÜ + Mike Gault + oid&guardtime.com +27869 + Microprocessador - Sistemas Digitais, SA + Joaquim Torres + joaquimtorresµp.efacec.pt +27870 + Hooghuis lyceum + Ronald van Engelen + postmaster&hooghuislyceum.nl +27871 + shee consultants + Vitor Guerreiro + vg&shee.org +27872 + FPT Corporation + Truong Thi Thu Hien + hienttt3&fpt.com.vn +27873 + The Bridgeman Art Library Ltd + Stuart Gibson + stuart.gibson&bridgeman.co.uk +27874 + eurotel spa + Massimiliano Patriarca + massimiliano.patriarca&eurotel.it +27875 + Zenulta Ltd + Duncan Tincello + duncan.tincello&zenulta.com +27876 + ResponsFabrikken Serviços de Telecomunicações Ltda. + Fabio Garrido + fg&responsfabrikken.com.br +27877 + Blustaff S.p.A. + Francesco Fadda + f.fadda&blustaff.it +27878 + agenos GmbH + Martin Heise + M.Heise&agenos.de +27879 + CTDI Nethouse Services GmbH + Hartmut Boening + Hartmut.Boening&ctdi-nethouse.com +27880 + FreeSWITCH + Brian West + brian.west&mac.com +27881 + KCS Digital, Inc. + Watkins Chen + watkinschen&kcsdigital.com +27882 + Puryear Information Technology, LLC + Dustin Puryear + dustin&puryear-it.com +27883 + Extension7 + Dan Lacey + daniel_p_lacey&yahoo.com +27884 + Dom Finansowy QS + Mateusz Kijowski + mkijowski&dfqs.pl +27885 + National City Corporation + Joe Wright + joseph.wright&nationalcity.com +27886 + Blacknight Internet Solutions Ltd + Niall Donegan + niall&blacknight.ie +27887 + TUI-NET + Martin Heise + iana.org&L13.de +27888 + AxesNetwork Solutions inc. + Hugues Boisvert + IANAManager&axesnetwork.com +27889 + Beijing Huamei Netwave Technology Co.,Ltd. + Liu cai jun + liucaijun&huameiwave.com +27890 + Roundbox Inc. + David Hu + dhu&roundbox.com +27891 + Intercomgi Argentina S.R.L. + Hugo Javier Curti + hcurti&intercomgi.net +27892 + Navaneethan Shenoy + Navaneethan Shenoy + gunmetl&gmail.com +27893 + DMS + David Syzdek + oid&syzdek.net +27894 + Cube Optics AG + Carsten Marheine + marheine&cubeoptics.com +27895 + ISHD - Inline-Skaterhockey Deutschland + Stefan Gehrig + gehrig&ishd.de +27896 + Harpa Italia Srl + Cosma Damiano DE ANGELIS + mdeang&harpaitalia.it +27897 + QSI srl + Riccardo De Mattia + riccardo.demattia&qsi.it +27898 + RAILOG SPA + MASSIMO ANGELOTTI + bruna.caprioglio&railog.net +27899 + STC Raduga + Gubanov Alexander + a_gubanov&ntc-raduga.ru +27900 + IT-Optics sa + Damien Sandras + damien.sandras&it-optics.com +27901 + Pier 29 Networks CC + Graham Leggett + minfrin&sharp.fm +27902 + Iris, FGUP PKP + Andrey Bulgakov + aw_bulgakov&mail.ru +27903 + Ingenieurbuero fuer EDV und Netzwerktechnik - Stefan Hartmann + Stefan Hartmann + stefanh&hafenthal.de +27904 + Suncorp-Metway Ltd + Domain Administrator + dnsinternet&suncorp.com.au +27905 + Universitaet Wuerzburg + Rechenzentrum + warren&rz.uni-wuerzburg.de +27906 + Community4you GmbH + Tino Schwarze + registry&community4you.de +27907 + tvtv Services a branch of Sony United Kingdom Ltd. + Operations + ops&tvtv-services.com +27908 + projects4web.de + Stefan Berger + iana&projects4web.de +27909 + LogRhythm Inc. + Phillip Villella + phil.villella&logrhythm.com +27910 + Presto Networks, Inc. + John Cheng + jcheng&presto-networks.com +27911 + DDS, Inc. + Tsuguya Umeda + penum&dds.co.jp +27912 + Messaging Architects + Nick Stefan + Nick.Stefan&MessagingArchitects.COM +27913 + The Internet Group (Northland) Ltd. + Kim Shepherd + kim&igrin.co.nz +27914 + iXcall + Marco van Zoest + marco&ixcall.net +27915 + The Computer Centre for Icelandic Savings Banks + Vifill Sigurdsson + vifills&spar.is +27916 + NEC Portugal, S.A. + Ricardo Monteiro + Ricardo.Monteiro&aveiro.nec.pt +27917 + ATCO I-Tek Inc + Dave Malone + dave.malone&atcoitek.com +27918 + Wilfrid Laurier University + Carl Langford + clangford&wlu.ca +27919 + Loto-Quebec + Simon Bélanger + simon.belanger&loto-quebec.com +27920 + Viewtel Co., Ltd. + JinKyu Son + sonjk&view-tel.co.kr +27921 + Information and Mathematical Science Laboratory, Inc. + Kazuyoshi Takano + takano&imslab.co.jp +27922 + Tripoint Corporation Pty Ltd + Michael Marsden + mmarsden&tripoint.com.au +27923 + Trillian GmbH + Karlheinz Reimann + k.reimann&trillian.de +27924 + Titof3000.org + Christophe SIMON + titof3000&hotmail.com +27925 + Canon Ophthalmic Technologies Sp. z o.o. (formerly 'OPTOPOL Technology S.A.') + Patryk Woźniak + p.wozniak&canon-ot.com.pl +27926 + Future Voice Technology + Daniel Cooper + dcooper&fvt.com +27927 + Evangelische Kliniken Bonn gGmbH + Horst Aurisch + horst.aurisch&ek-bonn.de +27928 + Ayecka Communication Systems Ltd. + Avi Barda + avib&ayecka.com +27929 + tarczynski.net + Frank Tarczynski + ftarz&tarczynski.net +27930 + Justware Corporation + Jun Wu + vividy&justware.co.jp +27931 + GFI Italia SpA + Mariani Massimiliano + ma.mariani&ois.it +27932 + LignUp Corporation + Monica Pal + monica.pal&lignup.com +27933 + NOVOTECNO, S.L. + Ivan Hernandez + ivan&novotecno.com +27934 + Computer Science Club of the University of Waterloo + Systems Committee + systems-committee&csclub.uwaterloo.ca +27935 + Sigil.org + Richard Kolkovich + richard&sigil.org +27936 + PD-House + Anders Mundt Due + anders.mundt.due&pd-house.dk +27937 + UTNOXIUM SL + Tomás Sánchez Villanueva + tsv&utnoxium.com +27938 + GNU Gatekeeper Project + Jan Willamowius + jan&willamowius.de +27939 + Intellectronika + Alex Mogilnikov + alx&intellectronika.ru +27940 + Stadtwerke Muenchen GmbH + Rosenmüller Martin + hoegen.e&swm.de +27941 + Elaborata Produtos e Treinamentos de Informatica Ltda. + Andre Avelino da Costa Santos + andre&elaborata.com.br +27942 + Stredni prumyslova skola, Usti nad Labem, Stara 99, p.o. + Martin Mudroch + noc&sps-ul.cz +27943 + VimpelCom Ltd. + Sergey Dorofeev + oid&beeline.ru +27944 + 3Leaf Networks + Dwayne Shows + dwayne&3leafnetworks.com +27945 + Upsys + Emiliano Castagnari + ecastag&gmail.com +27946 + thomaskoch.it + Thomas Koch + internet&thomaskoch.it +27947 + Gridline Communications Holdings Inc. + Lucas Nihlen + nihlen&gridlinecommunications.com +27948 + CPM Ltd. + Nikolay Fomichev + sitrix.camheds&gmail.com +27949 + C. Alex. North-Keys + C. Alex. North-Keys + erlkonig&talisman.org +27950 + Sterling Consulting Group, Inc. + Anthony Brock + info&sterlingcgi.com +27951 + ISGenesis, Inc. + C. Alex. North-Keys + erlkonig&isgenesis.com +27952 + Gazprombank Certification Authority + Sergey Proselkov + Sergey.Proselkov&gazprombank.ru +27953 + bitPlus GmbH + Bernd Holzinger + bholzinger&bitplus.de +27954 + Flexoft Ltd. + Grant Amaspeur + alex&flexoft.co.uk +27955 + GridNode + Thomas Yee + its&gridnode.com +27956 + OPNATEL + Nerea Azpilicueta + nerea.azpilicueta&opnatel.es +27957 + Rettig ICC + Erich Maierhofer + erich.maierhofer&rettigicc.com +27958 + weComm Ltd + Darren Bourget + darren.bourget&wecomm.com +27959 + International Power Switch ApS + Martin Johansson + martin&ipwrswitch.com +27960 + e-Business & Resilience Centre + Sebastien Bourgasser + sebastien.bourgasser&ebrc.lu +27961 + Tribunal de Justica de Santa Catarina + Gustavo Dagostin da Silva + gds11561&tj.sc.gov.br +27962 + Demonhost Inc. + Soós László + soos.laszlo&demonhost.hu +27963 + Westec InterActive Security + Sean McClanahan + sean.mcclanahan&westecnow.com +27964 + Commugen Ltd. + Eyal Sassoon + eyal&commugen.com +27965 + Scribe Technology Inc. + Martin Lanser + mlanser&scribetechnology.com +27966 + Citiway Technology Co.,Ltd + Simon Tian + simon&citiway.com.cn +27967 + APNIC Pty Ltd + Sanjaya Sanjaya + technical&apnic.net +27968 + Akonix Systems, Inc. + John Bolton + jbolton&akonix.com +27969 + Multical Ltda. + Davor Buvinic + dbuvinic&multical.cl +27970 + kryglik.com + George Kryglik + nospam&kryglik.com +27971 + China Infosec Technologies Co.,Ltd. + Meng Zhang + zhangm&infosec.com.cn +27972 + Mac Papers, Inc. + Richard Cassidey + richard.cassidey&macpapers.com +27973 + Open Finance, LLC + Nicholas Sushkin + nsushkin&openfinance.com +27974 + PONTILLO.EU + BRUNO PONTILLO + iana&pontillo.eu +27975 + Centec Networks Inc. + Alexander Liu + xliu¢ecnetworks.com +27976 + GEO CONCEPT SA + M. HOUDAS Olivier + olivier.houdas&geoconcept.com +27977 + Genex + Eric Sanders + helpdesk&genex.com +27978 + Stewart Information Service Corporation + Gary Morris + Gary.Morris&stewart.com +27979 + Met Sacramento High School + Sam Gammon + samusweb&gmail.com +27980 + POSYSTECH Co., Ltd. + Edward Park + howdoud0&hotmail.com +27981 + Intrachaos.net + Randy Hall + randy&intrachaos.net +27982 + Servelec Technologies (formerly 'RTUnet Pty Ltd') + David Williams + david.williams&servelec-technologies.com +27983 + TechGuard Security, LLC + David Maestas + david.maestas&techguardsecurity.com +27984 + MSBC Pty Limited + Michael Janjgava + support&msbc.com.au +27985 + Swinburne University of Technology + Bob Schorer + bob&swin.edu.au +27986 + Avonaco Systems Inc. + Yunfei Zhang + zhangyf&avonaco.com +27987 + DataAccess Inc. + Shoji Kosai + manager&dacc.jp +27988 + TOPFIELD Co., Ltd. + Hyun Ko + hko&topfield.co.kr +27989 + Nepro Japan Co., Ltd. + Kim, Haksung + ip_info&nepro.jp +27990 + Ltd. AVTOR + Gennady Dyadyk + Gennady.Dyadyk&author.kiev.ua +27991 + Media Layers Inc. + Nir Klar + nir.klar&mlayers.com +27992 + Endesa Network Factory S.L.U. + Javier Oviedo + joeplc&gmail.com +27993 + Vigintos Elektronika + Kestutis Banisauskas + kestas&vigintos.com +27994 + Xunta de Galicia. Conselleria de Innovación e Industria. Dirección Xeral de Promocion Industrial e S + Antonio González Seoane + antonio.gonzalez&sociedadedainformacion.eu +27995 + FORTH CORPORATION PUBLIC COMPANY LIMITED + Mr. Prot Pattakaree + prot&forth.co.th +27996 + Entorno Digital + Systems Department + miles&entorno.es +27997 + Me-On-Tv + Roderick Groesbeek + meontvsnmp&roderick.triple-it.nl +27998 + Triple IT + Roderick Groesbeek + snmpenterprise&roderick.triple-it.nl +27999 + Trepesch GmbH + Frank Kipfmüller + frank.kipfmueller&trepesch.de +28000 + Aptivate Ltd + Chris Wilson + chris+iana.oid&qwirx.com +28001 + Tufts Associated Health Plans, Inc. + EIM Security Administration + EIM_Administration&tufts-health.com +28002 + The Source + Marcio Garcia + mgarcia&thesource.com.br +28003 + Bender Est. + Jeremy Bender + email&the-benders.com +28004 + Mobiletech AS + Sverre Marvik + sm&mobiletech.no +28005 + LissProductions + John Liss + john&Lissproductions.com +28006 + United Space Alliance + Joe Otto + joe.a.otto&usa-spaceops.com +28007 + Jasmin Buchert + Jasmin Buchert + oid10-jb&jasmin.eu.org +28008 + Canu Group LLC + Tom Danner + tdanner&canugroup.com +28009 + ActionCOACH Inc. + Peter Tiggerdine + petertiggerdine&actioncoach.com +28010 + Kumamoto Technology and Industry Foundation + Kenichiro Kozuma + ken&kmt-ti.or.jp +28011 + NETNIC CORPORATION + Daniel Mullen + domain.services&netnic.ca +28012 + ChannelSoft (Beijing) Technology Co.,Ltd + LiMingyan + channelqa&channelsoft.com +28013 + Photobucket, Inc. + Alan Sparks + asparks&photobucket.com +28014 + BROADVOX GmbH + Daniel Mullen + domain.services&broadvox.com +28015 + metrocom corporation + Daniel Mullen + domain.services&metrocom.ca +28016 + D-Cube Resource + Jan Dries + info&dcube-resource.be +28017 + tekuso H. Kaelber + Hannes Kaelber + kaelber&tekuso.de +28018 + netjfwatcher + Yoshimasa Matsumoto + matsumoto&netwatcher.jp +28019 + TradeRoot Technologies (Pty) Ltd + Paul Mouton + amy.rautenbach&traderoot.com +28020 + Tefnet + Tomasz Jezierski + snmp&tefnet.pl +28021 + trevedi it-consulting gmbh + Klaus Brockerhoff + k.brockerhoff&trevedi.de +28022 + VITALPHONE SARL + ATHANASE Jean-René + iana.ajr&vitalphone.net +28023 + VITALIX SARL + ATHANASE Jean-René + iana.ajr&vitalix.org +28024 + Pilot Systems + Alexandre Garel + contact-iana&pilotsystems.net +28025 + Capita Business Services Ltd + Richard Stone + richard.stone&capita.co.uk +28026 + Hostbasket + Hans Thienpondt + hans&hostbasket.com +28027 + Moviclips S.A. + Diego Helmann + diegoh&moviclips.com +28028 + Urquhart Consultancy + Adrian Urquhart + adrian&urquhart-consultancy.com +28029 + Mera Systems, Inc. + Ilya Ustinov + ilya.ustinov&mera-systems.com +28030 + The Sidwell Company + David Larson + DLarson&sidwellco.com +28031 + Brightprofiles Resources ltd + Irem chimezie Bright + brightprofiles&yahoo.com +28032 + creat.io, s. r. o. (formerly 'Genesys, s. r. o.') + Richard Toth + toth&genesys.sk +28033 + VERISOFT CONSULTING TECNOLOGIA DA INFORMAÇÂO LTDA + Fabio Maximo + infra&verisoft.com.br +28034 + Belkin Logistics, Inc + Louise Mc Cullough + louisem&belkin.com +28035 + Venture Data L.L.C. + Frank Barney + frankb&venturedata.com +28036 + TalNet + Tal Raveh + talr&talr.co.il +28037 + Iaso Pty Ltd + Andrew Patterson + registration+iana&iaso.com.au +28038 + Triharpskel Productions + Wayne Morrison + wm-oid&triharpskel.com +28039 + Comwave Telecom Inc. + Ron Barzakay + rb&comwave.net +28040 + Technological Educational Institute (TEI) of Crete + Mr. Babis Tsatsarakis + babis&staff.teicrete.gr +28041 + Jackpot.uk.net + Jack Cleaver + jack&jackpot.uk.net +28042 + KMK Solutions + Kurt Kincaid + kurt.kincaid&kmksolutions.com +28043 + Altorian Systems Inc + Angela Mullins + info&altoriansystems.com +28044 + TECOM CO., LTD. + Michael Jeng + mjeng&taipei.tecom.com.tw +28045 + URALSIB Financial Corporation + Vladimir Makushev + ca&uralsibbank.ru +28046 + Distributed Management Sytems Ltd. + Dr Basil Philipsz + basil&casque.co.uk +28047 + Novabase srl + Marignoni Aurelio + marignoni&novabase.it +28048 + Presto - prekladatelske centrum s.r.o. + Filip Volejnik + domeny&presto.cz +28049 + ms2-GmbH + Marc Sieburg + info&ms2-gmbh.de +28050 + Oxford Computer Group (2005) Limited + James Booth + james.booth&oxfordcomputergroup.com +28051 + Atlas IT + David Barroso Pardo + networking&atlasit.com +28052 + Boomer Consulting, Inc. + Andrew Hanenkamp + andrew.hanenkamp&boomer.com +28053 + Tervela Inc. + Ernest Grella + egrella&tervela.com +28054 + Tower Cloud, Inc. + Robert England + rfe&england-net.com +28055 + Beck Datentechnik + Andreas Beck + becka-iana-oid&bedatec.de +28056 + Jojo + A. Bienvenue + mplp&free.fr +28057 + NebuAD Inc. + Ping Chen + ping&nebuad.com +28058 + SNDI (Société Nationale de Développement Informatique) + SAHOUE Jonas + jsahoue&yahoo.fr +28059 + Mohawk Software + Mark L. Woodward + markw&mohawksoft.com +28060 + Telairity, Inc + Adrian Pop + apop&telairity.com +28061 + Interzone Entertainment LLC + Brendan Ragan + brendan.ragan&interzonegames.com +28062 + Kewego SA + Network Operation Center + noc&kewego.com +28063 + SanDisk IL Ltd + Avraham Shimor + avraham.shimor&sandisk.com +28064 + OKTET Labs Ltd. + Piotr Lavrov + Piotr.Lavrov&oktetlabs.ru +28065 + TraviAustria Datenservice für Reise und Touristik GmbH & Co. Nfg. KG + Stefan Neumann + traviadmin&travi.com +28066 + Inomacomp s.r.o + Tomas Pavlovic / DUSAN NAGY + dnagy&inomacomp.sk +28067 + Softcreate Co., Ltd. + Takeshi Furuhata + admin&softcreate.org +28068 + connectBlue AB + Pelle Bergkvist + pelle.connectblue&hotmail.com +28069 + Universidad de Antioquia + Jefatura de Cómputo + computo&arhuaco.udea.edu.co +28070 + Illertech Datensysteme Gbr + Fischer Daniel + danfis&dessys.de +28071 + Gentiae Clinical Research + Michael Cheung + michael.cheung&gentiae.com +28072 + Engine Yard + Riki Crusha + company&engineyard.com +28073 + Grace Reformed Baptist Church + David White + ldap&grbc.net +28074 + Ricu LLC + Domain Administrator + call.ricu&yahoo.com +28075 + Support My System (UK) ltd + Gijs van Reijendam + Gijs.van.Reijendam&SupportMySystem.com +28076 + Recommind, Inc. + Igor Ebner de Carvalho + snmp-admin&recommind.com +28077 + WiseSport (Hong Kong) Limited + Clement Chow + camille.mok&wisespotgroup.com +28078 + Genos Open Source S.L. + Valenti Jove + genos&genos.es +28079 + OpenScale Technologies GmbH + Constantin Rack + constantin.rack&openscale.org +28080 + "ISG" Joint Stock Company + Vyacheslav Smirnov + admnoc&isgr.ru +28081 + New Castle Community School Corporation + Shawn Iverson + shawn&nccsc.k12.in.us +28082 + Rdesign + Remco Kleine + r.kleine&rdesign.nu +28083 + SOSDG + Andrew D Kirch + admins&sosdg.org +28084 + Polk Mechanical Company, LLC + Ty Lamb + Ty.Lamb&PolkMechanical.com +28085 + STMIK Akakom + Yudhi Kusnanto + yudhi&akakom.ac.id +28086 + Universitaet Passau + Rechenzentrum + absmeier&uni-passau.de +28087 + Albentia Systems, S.A. + Noelia Morón + nmoron&albentia.com +28088 + NEC Unified Solutions + Jaap Keuter + jaap.keuter&nec-unified.com +28089 + Sentrigo Ltd + Guy Lichtman + GuyL&Sentrigo.com +28090 + IAM Technology, Inc. + Dr. John A. Nuber + john.nuber&iamtech.com +28091 + Thelese Management + Matthew King + mdking&thelesemanagement.com +28092 + Covelight Systems, Inc. + Ricky Beam + ricky.beam&radware.com +28093 + Colombia Movil + Raul Perilla + raul.perilla&colombiamovil.com.co +28094 + Voicemail Anywhere, Inc. + Fred Radford + Fred.Radford&VoicemailAnywhere.com +28095 + JPEO JTRS + Sean Hugelmaier + srh&spawar.navy.mil +28096 + Route1 Security Corp. + Rene McIver + rene.mciver&route1.com +28097 + ACKSYS + Philippe DUPONT + philippe.dupont&acksys.fr +28098 + National Interbank Transaction Management and Exchange Co., Ltd. + Mr. Somchart Fugkeaw + somchart&pcc.co.th +28099 + Processing Center Co., Ltd. + Mr. Somchart Fugkeaw + somchart&pcc.co.th +28100 + Groupwhere Consulting, L.L.C. + Miles Lott + mlott&groupwhere.org +28101 + MIVAN KIER JOINT VENTURE LIMITED NEWPARK BUCHAREST + Costin Gusa + cgusa&mivankier.ro +28102 + baringanet gmbh + Daniel Schnyder + scd&baringanet.ch +28103 + Audionics Ltd + Phil Myers + philm&audionics.co.uk +28104 + Evorad + Kostas Karolemeas + kostas.karolemeas&evorad.com +28105 + Arcelor Bremen GmbH + Marc Djacenko + Marc.Djacenko&arcelor-bremen.com +28106 + GEUTEBRUECK + Dr. M. Döring + matthias.doering&geutebrueck.de +28107 + Liangjiang Communications System Inc. + Kehua Xiao + xiaokh&liangjiang.com +28108 + Homisco, Inc + John Peterson + jpeterson&homisco.com +28109 + Rozis BV + W. van Tongerloo + Info&rozis.nl +28110 + Near Infinity Corporation + David Singley + david.singley&nearinfinity.com +28111 + ValuePoint Networks, Inc. + Michael Edison + mredison&valuepointnet.com +28112 + Pioneer Bible Translators + Stephen Fierbaugh + stephen.fierbaugh&pbtusa.org +28113 + Thai Digital ID Co., Ltd. + Mr. Somchart Fugkeaw + somchart&thaidigitalid.com +28114 + Quies Net + Pascal S. de Kloe + pascal&quies.net +28115 + Shenzhen o'wonder Tech Inc + Alex Shen + alex.shen.cn&gmail.com +28116 + 4-tune GmbH + Christian Schneider + christian.schneider&4-tune.ch +28117 + Clear Memo + Ken Wiesner + operations&openfax.com +28118 + ColdSpark Inc + Paul Trout + ptrout&coldspark.com +28119 + Steelworks Technologies + Steven Lee + steelworks.net&gmail.com +28120 + Pinacono Software Studio + Chawalit Limsowan + chawalit&pinacono.com +28121 + Hyperband Networks, Inc. + Justin Hsu + jhsu&hyperbandnetworks.com +28122 + ObjectMastery Pty Ltd + Bradley Tate + omadmin&objectmastery.com +28123 + Intermodus d.o.o + Hrvoje Hladnik + hrvoje.hladnik&intermodus.net +28124 + C-Group + Paul Palacios + paul&c-group.com +28125 + TRINAPS + Gauthier DOUCHET + gauthier.douchet&trinaps.com +28126 + ERAMON GmbH + Gerd Hassler + gerd.hassler&eramon.de +28127 + COSSILYS 21 + Joel Harhellier + joel.harhellier&cossilys21.com +28128 + CIT + Ian Tighe + consulting&iantighe.com +28129 + Cleversafe , Inc. + Alan Holmes + it-admin&cleversafe.com +28130 + MICHATECH + Michael Holmetoft Hansen + MHH&C.DK +28131 + iBro + Muhammad Ibrahim Nurmansyah + ibrahim&ibro.web.id +28132 + Lumen Technologies + Todd Tomason + todd.tomason&lumen.com +28133 + INSIST + Yudhi Kusnanto + admin&insist.or.id +28134 + LOT Polish Airlines + Pawel Klosek + helpnet&lot.pl +28135 + Cominfo a.s. + Tomas Januska + tjanuska&cominfo.cz +28136 + Delta Dore SA + Peter Münster + pm&deltadore.com +28137 + NXP Semiconductors B.V. + Hauke Meyn + hauke.meyn&nxp.com +28138 + Icepeak AB + Tobias Svensson + tobias&icepeak.se +28139 + Hrvatski Telekom d.d. + Bruno Viland + bruno.viland&t.ht.hr +28140 + IT GAMES COMERCIO E SERVICOS DE INFORMATICA LTDA + Fabio Maximo + infra.itgames&gmail.com +28141 + CedarOpenAccounts + Mark Stevenson + mark.stevenson&cedaropenaccounts.com +28142 + Nautel Limited + Robert S. Martin + smartin&nautel.com +28143 + Altaigazprom + Eugene Kostenko + dek&altaigazprom.ru +28144 + Trilus d.o.o. + Marko Udvanc + Marko.Udvanc&trilus.com +28145 + origenis GmbH + Dr. Henrik Kuhn + henrik.kuhn&origenis.de +28146 + Fitzpatrick Enterprise OID + Sean Fitzpatrick + spfitzpatrick&orange.nl +28147 + Argon ST + Matt Keeton + Matt.Keeton&argonst.com +28148 + saint-paul luxembourg s.a. + Jeannot THEIS + jeannot.theis&saint-paul.lu +28149 + RBAS + Mr. Roel Werkman + roelwerkman&hotmail.com +28150 + IMS Global Learning Consortium Inc. + Lisa Mattson + lisa&imsglobal.org +28151 + BluePoint Data, Inc. + Chad Somerlot + csomerlot&bluepointdata.com +28152 + Palo Alto Software, Inc. + Alex Boone + alex&paloalto.com +28153 + Newfound Communications, Inc + Leo Liu + leo&newfoundcomm.net +28154 + Lattech Systems (Pty) Ltd + Truhann van der Poel + truhann.vanderpoel&lattech.co.za +28155 + Computer Network Solutions, LLC + Michael Koprowski + mkoprowski&computerns.com +28156 + Reveredata, LLC + Alexander Levin + alevin&reveredata.com +28157 + Octon Technology(Shanghai) Ltd. + Kent Wang + kent.wang&octon.cn +28158 + Great Software Laboratory Pvt Ltd + Atul Narkhede + atul&gs-lab.com +28159 + OOPS Development Organization + Joung Kyun Kim + joungkyun&gmail.com +28160 + Mototech Inc. + Luke Chen + luke_chen&mototech.com.tw +28161 + Anzsoft Co., Ltd. + Zhan Caibao + zhancaibao&gmail.com +28162 + Kazinvestbank + Alexander Maliyev + amaliyev&kib.kz +28163 + Beijing Institute of Technology + Ling Li + lilingv&gmail.com +28164 + think-tux + jeremy paris + jeremy.paris&gmail.com +28165 + InterpharmData Systems (Pty) Ltd. + Malcolm McLean + malcolmm&interpharm.co.za +28166 + Webbed Feet + Peter van Heusden + pvh&wfeet.za.net +28167 + atip GmbH + Herbert Reininger + support&atip.de +28168 + Nextlink Technologies, LLC + Rick Burton + rick.burton&nextlinktechnologies.com +28169 + World Trade Organization + Ghassan Karam + pki&wto.org +28170 + South African National Bioinformatics Institute (SANBI) + Peter van Heusden + pvh&sanbi.ac.za +28171 + Corps Rhenania + Mr. Eike Moenkemeier + box&moenkemeier.info +28172 + Bibliotheksservice-Zentrum Baden-Wuerttemberg + Hans-Jürgen Götz + pc-service&bsz-bw.de +28173 + Advanced Control Systems Design, Inc. + Christopher Harrington + chris&acsdi.com +28174 + Solid State Logic + Marek Wojtulewski + itsupport&solid-state-logic.com +28175 + GlenTech Consulting + Matthew Dickie + admin&glentech-consulting.com +28176 + Rockport PA, LLC + Rockport Technical + webmaster&rockportllc.com +28177 + Buddhadata Consulting, Inc. + Scott C. Sosna + ssosna&buddhadata.com +28178 + Internet Software Solutions + Steve Hardt + shardt&internetsoftwaresolutions.biz +28179 + Bank Zachodni WBK S.A. + Piotr Lapinski + cpsinfo&bzwbk.pl +28180 + Cvörnjek Lagerlogistik GmbH + Siegfried Cvörnjek + Siegfried.Cvoernjek&cvr.at +28181 + Whitestein Technologies AG + Oliver C. Hoeffleur + och&whitestein.com +28182 + Mobile Visions + Grzegorz Sobanski + silk&boktor.net +28183 + MungerWare + Tim Hosking + iana&trhosking.com +28184 + Hagenuk Marinekommunikation GmbH + Heinrich Fluegel + heinrich.fluegel&hmk.atlas-elektronik.com +28185 + zzvcom + sun yantong + sunyantong&zzvcom.com +28186 + SSA Global Technologies + M.RatnaShilpa + m_shilpa&yahoo.co.in +28187 + Uptime Power Services, Inc. + Registrar + snmp&dc-power.com +28188 + Kim Minh Kaplan + Kim Minh Kaplan + kaplan&kim-minh.com +28189 + Intelivox + Chris Maxwell + Christopher_Maxwell&yahoo.com +28190 + Genoscope -- Centre National de Séquencage + Claude Scarpelli + iana&genoscope.cns.fr +28191 + JVS do Brasil LTDA + Davi Baldin + davi&jvsinfo.com.br +28192 + digitallysign + Sasa Borjanovic + sasa&borjanovic.com +28193 + WebPresso + Nik Arber + jmail&bluewin.ch +28194 + Exterity Ltd + David Peat + david&exterity.co.uk +28195 + SKY Italia s.r.l + Ciro Gaglione + ciro.gaglione&skytv.it +28196 + BroadWare Technologies, Inc. + Vinod Raju + vinod&broadware.com +28197 + Wichita State University + Mike Erickson + hostmaster&wichita.edu +28198 + SNC-Lavalin Energy Control Systems Inc. + Ferdi Di Marco + ferdi.dimarco&slecs.ca +28199 + kmel + Klaus Melchior + kmel&kmel.de +28200 + CBC Companies, Inc + DNS Admin + dns&cbc-companies.com +28201 + DBG Inc. + Dave Josephsen + dave&dbg.com +28202 + Albert Bauer KG + Udo Lembke + ulembke&abc-digital.com +28203 + Linux Kernel Austria + Oliver Falk + oliver&linux-kernel.at +28204 + Synerway + Laurent NINI + lrn&synerway.com +28205 + 7-ip Pty Ltd + Frits Klok + frits&7-ip.com +28206 + IKB Deutsche Industriebank AG + Martin Behrendt + martin.behrendt&ikb.de +28207 + Rockford IT Limited + Ben Sykes + ben.sykes&rockford-uk.com +28208 + Accurite Technologies Inc. + Richard Kelly + rkelly&accurite.com +28209 + Up-Mobile Corp + Arturo Vermolen + arturo.vermolen&up-mobile.com +28210 + SunRocket Inc. + Peter Kuebler + peter.kuebler&sunrocket.com +28211 + Emergency Response Management Services Inc. + Matt Trinneer + matt.trinneer&ermscorp.com +28212 + FARIISTA LIMITED + MR. IMTIAZ A. BAHADUR + imtiaz&fariista.com +28213 + Open joint-stock company "Agency for Mortgage Housing Crediting" + Evdokimov Andrew + mail&ahml.ru +28214 + KERNEOS + Philippe Esposito + info&kerneos.com +28215 + 3Roam SA + Simon Bryden + simon.bryden&3roam.com +28216 + FingerPost Ltd + chris hugh-jones + miscmail&fingerpost.co.uk +28217 + Radiant Logic PTY LTD + Dan Zoltak + ldap&radiantlogic.com.au +28218 + The MARF Research and Development Group + Serguei A. Mokhov + mokhov&users.sourceforge.net +28219 + Liam Schneider Consulting + Liam Schneider + liamschneider&hotmail.com +28220 + Brighthouse Networks + Ron Knapp + ron.knapp&mybrighthouse.com +28221 + Stabat Solutions Pty Ltd + Lucas Barbuto + lucas&stabat.com +28222 + Spectrum Communications FZE + Ashraf Karim + ashraf&spectrummea.com +28223 + Quarto Software GmbH + Christian Walter + support&quarto.at +28224 + Efficens Software Ltd. + Gil Givati + gil&efficens-software.com +28225 + OS Security Ltd + Fabio Origlia + fabio.origlia&os-security.com +28226 + Digidiensten + M. Piscaer + my&masterpe.nl +28227 + BTC AD + Valeria Boyadjieva + valeria.boyadjieva&btc.bg +28228 + WIFINET,s.r.o. + Janèo Matej + matej.janco&wifinet.sk +28229 + Pharos Communications Ltd + Spencer Rodd + iana-info&pharos-comms.com +28230 + Fidback CRM Services + Oualid Jabnoune + ojabnoune&fidback.fr +28231 + AM Fire & Electronic Services, Inc. + Daniel Miller + dmiller&amfes.com +28232 + Prince George's Community College + David Farley + siteadmin&pgcc.edu +28233 + iSERVE Ltd. + Colin Doig + colin_doig&hotmail.com +28234 + Cypress Communications, Inc. + Jeff Gehlbach + networktools&cypresscom.net +28235 + Ellie Mae, Inc. + Michael Tan + michael.tan&elliemae.com +28236 + MySpace, Inc. + Chris Bell + cbell&myspace.com +28237 + CodeRyte, Inc + Emery Ford + eford&coderyte.com +28238 + ADE + Michael Baggett + mbaggett&ademiami.org +28239 + University of the Ryukyus + Hiromitsu Syouji + admin&cc.u-ryukyu.ac.jp +28240 + Wavelength Communications, Inc. + Daniel Corbe + dcorbe&gmail.com +28241 + RidgeRun LLC + Michael Frank + mfrank&ridgerun.com +28242 + OraTel Pty (Ltd) + Jan Van Eeden + jan&oratel.co.za +28243 + Pulsar Inc. + Andrey Pichuk + apin&nplpulsar.ru +28244 + Haus am Rügendamm + Administrator + administrator&har.fh-stralsund.de +28245 + agorum Software GmbH + Oliver Schulze + oliver.schulze&agorum.com +28246 + Trusted Peer Networks, Inc + Graham Finlayson + graham&simplifymedia.com +28247 + Maxis Broadband Sdn Bhd + Larsen Barcelon + LARSENB&maxis.com.my +28248 + Newport Development Group + Christopher James + Christopher.James&NewportDevelopmentGroup.com +28249 + eonas IT-Beratung und -Entwicklung GmbH + Helmut Manck + snmpregistry&eonas.de +28250 + Enerconv S.r.l. + Mario Mariotti + mario.mariotti&enerconv.it +28251 + The Constitutionalist Party of Iran (CPI) + Bahman Zahedi + bahman.zahedi&t-online.de +28252 + Informatec + Hans-Peter Riehlein + iana&informatec.riehlein.de +28253 + Scivis wissenschaftliche Bildverarbeitung GmbH + Dr.Uwe Engeland + info&scivis.de +28254 + COBS AB + Anders Niklasson + anders.niklasson&cobs.se +28255 + Unassigned + Removed 2007-10-23 + ---none--- +28256 + InfoGuard AG + René Mürset + rene.muerset&infoguard.com +28257 + NUTRICHEM Diät+Pharma GmbH + H. Rüdiger Förster + r.foerster&nutrichem.de +28258 + EidosMedia S.p.A. + Massimo Barsotti + massimo.barsotti&eidosmedia.com +28259 + Landeshauptstadt Stuttgart (formerly 'bw-trust CA') + Andreas Demand + iana-req&stuttgart.de +28260 + PROGIWEB + Olivier MARTINERIE + info&progiweb.com +28261 + ProfiForms Projekt GmbH + Andreas Ruch + andreas.ruch&profiforms.de +28262 + Chick-fil-A, Inc + Josh Figaretti + josh.figaretti&chick-fil-a.com +28263 + Total Spin Brasil Serviços de Telecomunicações Ltda + Maria Soller + maria.soller&spinmymobile.com +28264 + CLINICARE Corporation + Devin Nate + devin.nate&clinicare.com +28265 + FSB Förster SystemBeratung + H. Rüdiger Förster + r.foerster&fsb-wilhermsdorf.de +28266 + L.A. Specialties Inc. + Fred R. Goure + gouref&gmail.com +28267 + CYBERTRON CO., LTD. + Stacy Back + skback&cbtron.com +28268 + Global Red Solucoes em Software Livre Ltda. + Cleber Rodrigues + cleber.gnu&gmail.com +28269 + Institute for Theoritical Computer Science, Tsinghua University + Wei Yu + zig.wei&gmail.com +28270 + Göteborgs Hamn AB + Mats Hermansson + mats.hermansson&portgot.se +28271 + HTBLuVA Moedling + Elmar Volgger + Elmar.Volgger&htl.moedling.at +28272 + Concurrent Systems (Pty) Ltd + Braam van Heerden + oid&concurrent.co.za +28273 + Starling Advanced Communications + David Nidam + dady&starling-com.com +28274 + MC Control s.r.o. + Ing. Ondrej Pouchly + pouchly&mccontrol.cz +28275 + ioSafe, Inc. + Marc Bernasconi + marc.bernasconi&iosafe.com +28276 + Galeno + Kristoffer Sjoberg + info&galeno.se +28277 + LucasP.com + Lucas Poznanski + lucas.poznanski&gmail.com +28278 + DinnoVan + ImChang Baek + billy&dinnovan.com +28279 + Rocksteady Networks LLC + Mr. Eric White + ewhite&rocksteady.com +28280 + GHL Systems Berhad + Wong Y-Mi + ymi&ghl.com +28281 + Sling Media, Inc. + Ilya Asnis + ilya&slingmedia.com +28282 + Kronos, Inc + James Turner + jamesturner&kronos.com +28283 + MEDITECNIA INNOVA SL + Arturo Bustos + info&meditecnia.com +28284 + Estonian eHealth Foundation + Indrek Järve + indrek&e-tervis.ee +28285 + AGES - Österreichische Agentur für Gesundheit und Ernährungssicherheit GmbH + DI. Mayer Hans + hans.mayer&ages.at +28286 + Daon + Adrian Allen + aallen&daon.com +28287 + Optium Corporation + Terry Lim + tlim&optium.com +28288 + LightRail Inc. + Kelvin Su + lightrail.com&gmail.com +28289 + WhisperItLoud LLC + Mark Gregory + msngregory&hughes.net +28290 + XAware, Inc. + Kirstan Vandersluis + kirstan&xaware.com +28291 + Naval Postgraduate School Center for Network Innovation and Experimentation + Dr. Alex Bordetsky + abordets&nps.edu +28292 + Stepstone Technologies Inc. + Kathleen Duke + kathy&stepstonetech.com +28293 + Julong Sci-tech Co., Ltd. + Wangquangui + prince.wang.cn&gmail.com +28294 + Instituto Reconcavo de Tecnologia + Chad Riggle + suporte&reconcavotecnologia.org.br +28295 + Gainspan Corporation + Pankaj Vyas + pankaj.vyas&gainspan.com +28296 + von KARMAN INSTITUUT VOOR STROMINGSDYNAMICA ivzw + Raimondo Giammanco + giammacc&vki.ac.be +28297 + Lublin Technical University + Pawel Luty + p.luty&pollub.pl +28298 + Info.nl/hf b.v. + Dick de Waal + dick&info.nl +28299 + unixgarage.com + Mohamed Hussein Sayed + unxfan&acm.org +28300 + Miracle TV Corporation + Narno Dorbecker + support&mitvcorp.com +28301 + ONERA + Philippe CERONI + philippe.ceroni&onera.fr +28302 + One Commerce International Corporation + Glenn Go + data6&onecommerce.com.ph +28303 + C.R. Enterprise Business Services + Charles J. Reid + cjreid&sonic.net +28304 + Nexcan Solutions + Ata Haq + sales&nexcan.ca +28305 + Universitas Islam Indonesia + Wawan Indarto + wawan&fti.uii.ac.id +28306 + Foerderverein Internet Ulm/Neu-Ulm e.V. + Martin Becker + martin.becker&bn-ulm.de +28307 + PostPath, Inc. + Glen Dayton + gdayton&postpath.com +28308 + Zaragoza Network Management Research Group + Alvaro Alesanco + alesanco&unizar.es +28309 + StatRad LLC (formerly 'Stat Radiology Medical Corp') + Joe Moock + jmoock&statrad.com +28310 + Center for Computational Biology and Bioinformatics + Brandon Peters + brjpeter&iupui.edu +28311 + TECHNOGAMA Ltd. + Alexander Yeliseyev + ase&takas.lt +28312 + Penson GHCO + Chris Le Grand + clegrand&pensonghco.com +28313 + Lantic Systems A/S + Henrik Kjær Nielsen + hkn&lantic-systems.com +28314 + Ariescommerce Ltd. + Nedelin Georgiev + nedelin&audiosystems.bg +28315 + Neology (Pty) Ltd + Regardt van de Vyver + iana&neology.co.za +28316 + Branden Inc. + Filipe Brandenburger + filbranden&gmail.com +28317 + Borran Technologies Inc. + Colton Jamieson + colton&borran.com +28318 + Accelink Technologies Co.,Ltd + geng xu + geng.xu&accelink.com +28319 + IMIMOBILE Private LIMITED + Mr. Bhat S.S + ssbhat&imimobile.com +28320 + Geotechmin OOD + Alexander Spasov + a.spasov&geotechmin.com +28321 + Oniontech, Co., Ltd. + Justine Lee + mongmom&oniontech.com +28322 + THOMSON STS + Tanguy MILLET + tanguy.millet&thomson.net +28323 + Central-European International Bank Ltd. + Márta Kardos + martikardos&cib.hu +28324 + 3ple-Media BV + Costi Manda + costi.manda&3ple-media.com +28325 + Underground_8 Secure Computing GmbH + Stefan Heumader + sh&underground8.com +28326 + ko-sys + Wolfgang Kohnen + infrastruktur&ko-sys.com +28327 + Harry & David Operations, Corp. + Eric Obendrauf + eobendrauf&harryanddavid.com +28328 + Bayerischer Rundfunk + Johann Sporer + johann.sporer&brnet.de +28329 + Base Technologies, Inc. + Charles A. McCrobie + cmccrobie&basetech.com +28330 + Meshdynamics, Inc + Sriram Dayanandan + sriram&meshdynamics.com +28331 + Smiley Media, Inc. + Eric White + eric&smileymedia.com +28332 + Brink's Home Security + Terry G Phillips + terry.phillips&brinks.com +28333 + Simon Niechzial EDV Dienstleistungen + Simon Niechzial + simon&niechzial.de +28334 + Siemens A/S (formerly 'pulz8 Communications ApS') + Simon Staudt + simon.staudt&siemens.com +28335 + Xworks NZ Limited + Kevin Buckle + kevin.buckle&xworks.co.nz +28336 + Voxeo + Chris Maxwell + cmaxwell&voxeo.com +28337 + albatron S.r.l. + Roberto Ferretti + roberto.ferretti&albatron.com +28338 + Mushypea Industries + David Turner + dave&mushypea.net +28339 + Universidade Federal do Pará + Márcio Pinheiro de Aragão + aragao&ufpa.br +28340 + iseek Communications Pty. Ltd. + Stuart Low + sysadmin&iseek.com.au +28341 + Civilogix, Inc. + Elana Hinners + elana&civilogix.com +28342 + Whitley College The Baptist College of Victoria + Andrew Ross + aross&whitley.unimelb.edu.au +28343 + Rogers Wireless - OSS + Yassir Laraki + yassir.laraki&rci.rogers.com +28344 + DeskNet Inc. + Robert Kelly + robert.kelly&desknetinc.com +28345 + SFC Co.,Ltd. + Motoyoshi Hamasaki + hamasaki&imagination.co.jp +28346 + Duplex Secure Ltd + Nick Grolsen + busduplex&duplexsecure.com +28347 + SAMURAIWORKS + Masatsugu Hosoi + hosoi&samuraiworks.com +28348 + Pimp My Proxy + Sam McLane + sam&mclane.cc +28349 + PT. Bank Negara Indonesia, Tbk + Zaldy Suhatman + zaldy.suhatman&bni.co.id +28350 + Shenzhen First Mile Communications Ltd + LiQuan Wang + liquan.wang&firstmile.com.cn +28351 + Vimicro Corporation + Yuan Chen + chenyuan&vimicro.com +28352 + maxcom + Kim Jung-uk + maxcom1&hitel.net +28353 + Vital Images Inc. + Meera Rajaram + meera_rags&yahoo.com +28354 + BCD Travel + Michael Manning + michael.manning&bcdtravel.com +28355 + Promsvyazbank OJSC + Georgy Dudukalov + dudukalov&psbank.ru +28356 + MyWave Internetdienstleistungs AG + Christian Tremel + c.tremel&mywave.at +28357 + Research Institute of China Mobile + Cheng Liang + chengliang&chinamobile.com +28358 + AK IT Services + Andreas Kippnick + andreas&kippnick.com +28359 + Simulina GmbH + Håkan Källberg + hk&simulina.se +28360 + Impi Linux (Pty) Ltd + Stephan Buys + stephan&impilinux.co.za +28361 + Cranix Engineering Co. Ltd. + Artiom Vasiukov + vasiukoff&yandex.ru +28362 + LM2 Consulting Gmbh + Martin Friedl + office&lm2.at +28363 + Bytec Bodry Technology GmbH + Michael Spinnenhirn + michael.spinnenhirn&bytec.de +28364 + widesolutions.it srl + Palladino Costantino + info&widesolutions.it +28365 + Ing.-Buero Dr. Plesnik GmbH + Arne Handtmann + service&plesnik.de +28366 + HostBase + Thomas Fjellstrom + tfjellstrom&shaw.ca +28367 + PROCURE Personalmanagement GmbH + Andreas Schäfer + Andreas.Schaefer&fh-bielefeld.de +28368 + Green Valley B.V. + Steve Maddison + sysadmin&greenvalley.nl +28369 + InetLabs (DE) + Helmut Franzke + helmut&franzke.de +28370 + Infoteh d.o.o. + Dejan Stanic + dejan&infoteh.si +28371 + Nutel Communications, Ltd. + Hristo Trendev + htrendev&nutel.cc +28372 + Gratka Sp. z o.o. + Piotr Stolc + p.stolc&gratka.pl +28373 + Conversant Systems (Pty) Ltd + Braam van Heerden + oid&conversant.co.za +28374 + OpenChoice di Diego Zaccariotto + Diego Zaccariotto + diego.zaccariotto&openchoice.it +28375 + EB Enterprises, Inc. + Robert B. Bernier + rbernier10&comcast.net +28376 + MacroElite Corp. + Lenny Leblanc + questions¯oelite.ca +28377 + Bizztools GmbH + Teja Berg + tberg&proventus.de +28378 + Pulsewidth + Adam James + admin&pulsewidth.org.uk +28379 + Open Systems International, Inc. + Ryan Tetzlaff + rtetzlaff&osii.com +28380 + 01edge technologies + chandra nath + nath&01edge.com +28381 + Red Lion Controls (N-Tron) + Denis Aull + Engineering&RedLion.net +28382 + Intelsat Inflight LLC + Brad Schnair + bradley.schnair&intelsat.com +28383 + Open1X + Chris Hessing + chris&open1x.org +28384 + PConRails, LLC + Gregoire Gentil + gregoire&gentil.com +28385 + Columbia Weather Systems, Inc. + Brian Smucker + brian&smuckerdata.com +28386 + Fujian Fujitsu Communication Software Co., Ltd. + xu zihui + xuzh&ffcs.cn +28387 + svenux + Sven Schiwek + info&svenux.de +28388 + ITB CompuPhase + Thiadmer Riemersma + thiadmer&compuphase.com +28389 + Schneeweis + Martin Schneeweis + martin&schneeweis.at +28390 + 19pouces + Matthieu Gallet + admin&19pouces.net +28391 + Villa Centrum Pty Ltd + Tom Peltonen + tpeltonen&villacentrum.com +28392 + Eclipse Foundation, Inc. + Denis Roy + webmaster&eclipse.org +28393 + the emakers di Fabio Vallino + Fabio Vallino + fabio.vallino&theemakers.com +28394 + Monki + Matt Willsher + matt&monki.org.uk +28395 + NewMarket Corporation + Stephen Petersen + steve&newmarket.com +28396 + Edoceo, Inc. + David Busby + busby&edoceo.com +28397 + BluegrassNet Development + Jonathan Yarden + jon.yarden&gmail.com +28398 + Invictus Networks Pte Ltd + Chong Yu Meng + pascalcym&yahoo.com +28399 + ACHOS.COM + Tobias Richter + hostmaster&achos.com +28400 + Christian Mayer Buero- und EDV-Systeme + Christian Mayer + oid&christianmayer.de +28401 + CirTec AG + Markus von der Heiden + markus.vdheiden&gmail.com +28402 + Conteg + Vladimir Marek + vladimir&conteg.com +28403 + Town & Country Industries + Sergiusz Stempel + iana.org&tc-alum.com +28404 + Cynosure Research + Kim Davies + kim&cynosure.com.au +28405 + Hunt Brothers of Louisiana, LLC + James Hess + jhess&huntbrothers.com +28406 + Slovenija online - SiOL internet d.o.o. + Andrej Ota + andrej.ota&siol.si +28407 + Bradley D. Brown + Bradley D. Brown + bradley&brown.name +28408 + Ramsys Zrt + Vilmos Nebehaj + vilmos.nebehaj&ramsys.hu +28409 + PHOTONICS INC. + MASAFUMI TAKAGISHI + GCH06560&nifty.com +28410 + Ippon Technologies + Bertrand Pinel + bpinel&ippon.fr +28411 + Luhansk Taras Shevchenko National Pedagogical University + Yuriy Shkandybin + yshk&lnpu.edu.ua +28412 + Comtools GmbH + Frank Vollenweider + frank&vollenweider.info +28413 + Brazilian Mercantile and Futures Exchange + Jochen Mielke de Lima + jmielke&bmf.com.br +28414 + Business System Development Pty Ltd + David Probst + david&bdevel.com.au +28415 + Dixie Valley Farm + Richard Higley + rhigley&wildblue.net +28416 + SMX + Thom Hooker + admin&smx.co.nz +28417 + MÖLLER-WEDEL GmbH + M. Schaper + m.schaper&moeller-wedel.com +28418 + Corenet Ltd. + Mikko Tepponen + mikko.tepponen&corenet.fi +28419 + Trilogic + Evariste Courjaud + evariste&trilogic.fr +28420 + Opus Software + Patrick Theys + ptheys&yahoo.com +28421 + Partido Socialista Obrero Español (PSOE) + Jaime Vazquez + jvazquez&psoe.es +28422 + Tobias Scherbaum + Tobias Scherbaum + tobias&scherbaum.info +28423 + Nucomm Inc. + John B. Payne IV + jpayne&nucomm.com +28424 + Rockshore Limited + Matt Green + matt.green&rockshore.net +28425 + Einstein Industries, Inc. + System Administrator + sysadmin&einsteinindustries.com +28426 + Saint Vincent's Hospital + Lori Reed-Fourquet + lori.fourquet&sbcglobal.net +28427 + udicom AG + Bernhard Knapp + bernhard.knapp&udicom.de +28428 + Secure64 + David Roth + david.roth&secure64.com +28429 + Novanetic, Inc. + Jonathan Gettys + jgettys&novanetic.com +28430 + Macedonian Academic And Research Network (MARNet) + Goran Muratovski + gone&ukim.edu.mk +28431 + OpenAFS + Derrick Brashear + openafs-elders&openafs.org +28432 + Navajo Technical College + Mark Trebian + mtrebian&gmail.com +28433 + jmedved + Josip Medved + jmedved&jmedved.com +28434 + NHK Integrated Technology Inc. + Shinji UEDA + sin-ueda&nhkitec.co.jp +28435 + AC&T System Co.,Ltd. + Hang-Youal Yoo + hyyoo&acnt.co.kr +28436 + Carnegie Learning + Lyle Seaman + templs&carnegielearning.com +28437 + Torsten Pohl - Software-Entwicklung + Torsten Pohl + enterprise-numbers&torsten-pohl.de +28438 + rybezahl + Stephan Waldow + swaldow&gmail.com +28439 + Tradesoft Technologies Srl + Paolo Degl'Innocenti + pinnocenti&dexgate.com +28440 + Impulse LTD + Alexey Kovalkov + alexk&impulsespb.ru +28441 + Sjöland & Thyselius + Staffan Ekvall + staffan.ekvall&st.se +28442 + RPH Consulting + Rolf Petter Halle + rolf.halle&gmail.com +28443 + Global Trader + Le Roi Beukes + lbeukes>247.com +28444 + Diratel S.L.L + Igor Gonzalez + sat&diratel.com +28445 + Dealermade + Evan Carroll + evan&dealermade.com +28446 + Institute of Mathematics and Computer Science, University of Latvia + Baiba Kaskina + baiba&latnet.lv +28447 + Kabona AB + Bjorn Lundberg + bl&kabona.com +28448 + FCS Fair Computer Systems GmbH + Bernd Goerz + b.goerz&fair-computer.de +28449 + koski.org + Ryan Koski + ryankoski&gmail.com +28450 + Internet Texoma, Inc. + Larry Vaden + vaden&texoma.net +28451 + American Water + Robert P Schreiber + robert.schreiber&amwater.com +28452 + Arabian Horse Association + Brian Kempton + bkempt&arabianhorses.org +28453 + amasol AG + Hans Maurer + maurer&amasol.de +28454 + wh4f + Thomas Karcher + tk&wh4f.de +28455 + Trion World Network + Operations Control Center + iana&trionworld.com +28456 + QNIX Pty Ltd + Sam De Francesco + sam&qnix.net.au +28457 + NTT America Enterprise Hosting + Michael Bessey + m.bessey&ntta.com +28458 + Nokia Networks (formerly 'Nokia Siemens Networks') + Petri Piira + petri.piira&nokia.com +28459 + Rogue Engineering Inc. + Tim Kirk or Mark Walsh + mwalsh&rogue-engr.com +28460 + Benone Bitencourt + Benone Bitencourt + benoneb&gmail.com +28461 + Mutoh Industries Ltd. + Hideo Noda + nodah&acm.org +28462 + KnoxOne + Hans Verbrugge + hans&knoxone.nl +28463 + beginux.org + mark voltz + mark.voltz&gmail.com +28464 + On Air Networks + On Air Networks + ydy0411&naver.com +28465 + Echola Systems + Sankaran Ganesan + sankaran_g&echola.com +28466 + MrLane.com + Kam Lane + kam&mrlane.com +28467 + PiCell B.V. + Peter J. Boudewijns + peter&picell.com +28468 + Fabio Prina + Fabio Prina + fabio&sauro.org +28469 + Cyber Media (India) Ltd + Krishna Kumar + kkkg&cybermedia.co.in +28470 + St. Lawrence University + Rhett Thatcher + sysadmin&stlawu.edu +28471 + Bremer SE + Peter Kußmann + p.kussmann&bremerbau.de +28472 + tapirdata.com + Andreas Riedl + office&tapirdata.com +28473 + DAVOnet GmbH + Thomas Rehm + thomas.rehm&davonet.de +28474 + periscoptic perceptions + tracey plummer + tracey&periscopticperceptions.com +28475 + Akademickie Centrum Kliniczne - Szpital Akademii Medycznej w Gdansku + Marcin Stepnicki + root&ack.gdansk.pl +28476 + bwin Interactive Entertainment AG + Werner Huss + werner.huss&bwin.org +28477 + Global Tel Link + James Wink + jwink>l.net +28478 + Implicit Monitoring Solutions, LP + Enoch Suen + enoch.suen&implicits.com +28479 + Coremetrics + Cameron Lane + clane&coremetrics.com +28480 + Traffic.com, Inc. + Helen Holzbaur + itops&traffic.com +28481 + MicroImaging + Steven C. Leach + microimaging&gmail.com +28482 + Technospace SRL + Giosue Evangelista + giosue.evangelista&poste.it +28483 + Satileon Networks + Salit Even Shoam + salite&satileon.com +28484 + ttb-group + Matthias Spork + matthias.spork&ttb-group.de +28485 + IDTIC. C.A. + Leonardo Uzcategui + luzcategui&idtic.com +28486 + PETR HUMMEL + PETR HUMMEL + humlik&seznam.cz +28487 + Digital Dynamics Inc + Anthony Machon + iana&digitaldynamics.us +28488 + ROBOC Co.,Ltd. + Itoh Daisuke + pub_iana_enterprise&roboc.com +28489 + Devcom Solutions AB + Stefan Karlsson + stefan&devcom.nu +28490 + Kosmos ry. + Teppo Jalava + tjjalava&iki.fi +28491 + mitene internet co.,ltd. + Akio Higashi + ahigashi&mitene.or.jp +28492 + MD PREI KRASKRIPT + Eugene V. Polozenko + polozenko&kraskript.ru +28493 + nexmedia Pte Ltd + Toby Phipps + operations&nexmedia.com.sg +28494 + Orbis Lumen + Mark Rushing + mark&orbislumen.net +28495 + Rodenstock GmbH + Voelkl Markus + markus.voelkl&rodenstock.com +28496 + Unassigned + Returned 2007-07-18 + ---none--- +28497 + The Cavell Group + Michael Graham + mgraham&cavellgroup.com +28498 + linuxcon + Reinhold Baldauf + info&linuxcon.de +28499 + Differitas as + John Sletten + john&differitas.no +28500 + Tagelin + Julian Dean + iana&treworthal.net +28501 + AMBISEA Technoloy Corp., Ltd + Stephane Hollande + it&ambisea.com +28502 + TeleIDEA BV + Pio Cardone + pio.cardone&teleidea.com +28503 + Unixuser + Yasser Nabi + yasser&unixuser.org.uk +28504 + Proclos + Peter Füreder + peter.fuereder&proclos.com +28505 + Faculdade Metropolitana IESB + Bruno Gimenes Pereti + pereti&ump.edu.br +28506 + OpServices Tecnologia da Informacao S/A + Paulo Ribas + paulo.ribas&opservices.com.br +28507 + Gude Analog- und Digitalsysteme GmbH + Michael Gude + info&gudeads.com +28508 + Cunhol + Joao Bras + dsi&cunhol.com +28509 + FifSource + Philippe Troin + phil&fifsource.com +28510 + John S. Connor + Pinda Ndaki + it&jsconnor.com +28511 + DowKey Microwave + Giuseppe Bondi + gbondi&dowkey.com +28512 + Intellibyte Inc. + Andrew Tunstall + atunstall&intellibyte.ca +28513 + VHV + Joerg Monpetain + jmonpetain&vhv.de +28514 + ITALIACERCA s.a. + Giosue Evangelista + giosue.evangelista&poste.it +28515 + Goerz IT-Consulting + Bernd Goerz + goerz&web.de +28516 + bitglue.com + Phil Frost + indigo&bitglue.com +28517 + 7key + Andrey Afanasiev + scum001&gmail.com +28518 + CA & PARTNERS + M. AMEKUDJI Cyriaque + acyriaque&wanadoo.fr +28519 + NEC Fukui, Ltd. + Hiroshi Sawada + hiroshi.sawada&fukui.necel.com +28520 + Nexsys Consulting Pty Ltd + Raymond Guy + rguy&cpay.com.au +28521 + Jopasana Software & Systems Ltd. + Ashwini Bomble + ashwinib&jopasana.com +28522 + CANDIDO Kommunikationselektronik + Ing. Andreas Candido + a.candido&candido-online.at +28523 + BACH systems s.r.o. + Radomír Staroštík + r.starostik&bach.cz +28524 + Syntech SW Ltd + Andy Watson + hostmaster&syntech-sw.com +28525 + Across Finance, a.s. + Milan Beňo + mbeno&across.sk +28526 + CCS Customer Communication Systems GmbH + Michael Kärcher + m-pen-iana&eons.de +28527 + Adeptra Inc. + Operations Group + ops&adeptra.com +28528 + thevisp + Philip Saxton + psaxton&gmail.com +28529 + NetSolutions Perú S.A.C. + Irenio Luis Chagua Aduviri + ichagua&gmail.com +28530 + Khypoea + Eric Welsh + ewelsh&khypoea.com +28531 + Steven Roth + Steven Roth + steve&rothskeller.net +28532 + AO DAAZ + Sergey Moshkov + sam&daaz.ru +28533 + CYAN + Matthew Nguyen + matthew.nguyen&cyanoptics.com +28534 + City of Jacksonville + Kevin Haynes + tsg&coj.net +28535 + AlarmEngine + Stefan H. King + sking&alarmengine.com +28536 + Zappos.com + Derek Fedel + dfedel&zappos.com +28537 + HEALTHSIGN SL + MANUEL CANTON + mcanton&es.wisekey.com +28538 + Federated Mutual Insurance + Matthew J. Stein + MJStein&fedins.com +28539 + Buf Compagnie + Administrator + admin&buf.com +28540 + Weisberg Consulting, Inc. + Matt Weisberg + matt&weisberg.net +28541 + Net Demon + Jeff W. Storey + jstorey&net-demon.com +28542 + Aleph Web Services + Thomas Haselwanter + thomas.haselwanter&aleph-webservices.com +28543 + Houston Baptist University + Zung Hoang + zhoang&hbu.edu +28544 + ChilesConsulting + Michael Chiles + michael.chiles&gmail.com +28545 + M.G InfoCom Pvt. Ltd. + Sanket Gupta + sanket&mindgenies.com +28546 + Laubheimer + Markus Laubheimer + markus&laubheimer.de +28547 + Australian National University + Ahmed EL Zein + ahmed.elzein&anu.edu.au +28548 + Caja de ahorros de Santander y Cantabria + Jose Luis Gonzalez Llano + hostmaster&altamiraonline.com +28549 + PhysiSoft + Thomas Schneider + tschneider&physisoft.de +28550 + Marand d.o.o. + Jan Gnezda + jan.gnezda&marand.si +28551 + Weird Solutions Sweden AB + Nicole Östman + nicoleo&weird-solutions.com +28552 + Avocet RT Limited + Dan Maskell + dan&zippysystems.com +28553 + Securepoint GmbH + Bastian Kummer + bastian.kummer&securepoint.de +28554 + Care2 + Karl Pietri + karl&earth.care2.com +28555 + Manheim Services Corporation + SNMP Administrators + snmpadmins&terrier.manheim.com +28556 + Last.fm Ltd. + Russ Garrett + russ&last.fm +28557 + Hillstone Networks Inc + Ning Mo + nmo&hillstonenet.com +28558 + KEYENCE CORPORATION + Hideki Ueda + soft-license&keyence.co.jp +28559 + Utah State University + Information Technology - Allen Hill + tech.contact&usu.edu +28560 + IServ + Joerg Ludwig + joerg.ludwig&iserv.eu +28561 + eHealthConnecticut + Lori Reed-Fourquet + lori.fourquet&sbcglobal.net +28562 + Fortune System Inc. + Taketora Kikuchi + OID-admin&fortunesystem.co.jp +28563 + RFNC-VNIITF + Vyacheslav Kharitonov + hvb&inbox.ru +28564 + Network Synergy Services, LLC. + Zung Hoang + zhoang&houston.rr.com +28565 + Carley IP + Jeff Carley + jeff&thecarleys.net +28566 + SCache Systems + Shivaram Narasimha Murthy + snm&scache.com +28567 + MEGLA GmbH + Volker Siebelink + vsiebelink&megla.de +28568 + MicroLink + Ovchinnikov Konstantin + kstµlink.ru +28569 + UltraRAD Corporation + travis nuzzi + tnuzzi&ultraradcorp.com +28570 + Junta de Castilla y Leon + Alberto Martinez Arribas + dominios.internet&jcyl.es +28571 + Hafslund ASA + Aslak Poppe + aslak.poppe&hafslund.no +28572 + Auditiel + Jerome Dusautois + jdusautois&auditiel.fr +28573 + zonekey Inc. + Lei Wang + wanglei&zonekey.com.cn +28574 + ICA-NET s.r.l. + Marco Piai + admin&ica-net.it +28575 + My-Portal.gr + Zissis Sekros + zsek&my-portal.gr +28576 + Longent LLC + Rick Youngbar + ryoungbar&longent.com +28577 + Walnut Valley Unified School District + Brian Troudy + btroudy&walnutvalley.k12.ca.us +28578 + Powerland Computers + Chad Kitching + ckitching&powerlandcomputers.com +28579 + New World Restaurant Group, Inc. + Scott Morris + smorris&nwrgi.com +28580 + MoroSystems, s.r.o. + Stanislav Hybasek + info&morosystems.cz +28581 + Wireless Generation, Inc. + John Stewart + admin&wgen.net +28582 + Linux Systemhaus Schulz + Karsten Schulz + kschulz&t800.ping.de +28583 + Tangent Systems, Inc. + Steve Mack + steve.mack&tangent-systems.com +28584 + Allegro Networks pty ltd + Darren Philips + darren.philips&team.allegro.com.au +28585 + Sniddle LLC + Mike French + macvoip&gmail.com +28586 + EATON Wireless + John Millsaps + johnwmillsaps&eaton.com +28587 + Mobilesoft (Aust) Pty Limited + Cameron Hutchison + chutchison&mobilesoft.com.au +28588 + Kaplan Software, LLC + Gerald Kaplan + gkaplan&kaplansoftware.com +28589 + IDERs Inc + Sami Kibria + skibria&iders.ca +28590 + EVC Inc. + Tetsuya Shiotani + tech&evc.jp +28591 + BEIJING THINKOR INFORMATION TECHNOLOGIES CO.,LTD. + peream Chen + guo_gang&126.com +28592 + Eteration A.S. + Naci Dai + naci.dai&eteration.com +28593 + SCA Timber AB + Roger Jansson + roger.jansson&sca.com +28594 + EXGEN Networks Co., Ltd. + Seiichi Onda + master&exgen.co.jp +28595 + PATRONAS Financial Systems GmbH + H. Steuer + steuer&patronas.de +28596 + CTE Digital Broadcast s.r.l. + Marco Spada + mspada&cte-elit.it +28597 + National Centre for Scientific Research "Demokritos" + Vassilis Vatikiotis + sysadmin&iit.demokritos.gr +28598 + Sambers Italia Spa + Simone Bettola + s.bettola&hantarex.it +28599 + Landmark Communications, Inc. + Randy Hall + randy.hall&pilotonline.com +28600 + Hetra Secure Solutions Corp. + Dale Cannon + dcannon&hetrasecure.com +28601 + Know It All + Alek Zdziarski + alekzdz&gmail.com +28602 + LogicBox, Inc. + Jason Goemaat + Jason.Goemaat&logicboxinc.com +28603 + MEDNETWorld.com, Inc (formerly 'MEDNET USA, Inc') + Seonho Kim + seonho.kim&mednetworld.com +28604 + Factory Creative Studio Ltd. + Kovács Balázs + pen&jonapot.hu +28605 + Logicalis GmbH (formerly 'Minters GmbH') + Hendrik Hasselberg + iana&logicalis.de +28606 + TheFind, Inc. + Alex Meyer + pen28606&thefind.com +28607 + coforum + Thomas Kalka + thomas.kalka&gmail.com +28608 + Technomonk Industries + Mike Preston + support&technomonk.com +28609 + SpongeLava Ltd. + Paul Thomas + snmp&spongelava.com +28610 + Bengt Månsson + Bengt Månsson + bengt.mansson&gmail.com +28611 + CommAgility Ltd + Paul Alan Moakes + info&commagility.com +28612 + Bountiful WiFi LLC + Brett Steinicke + brett&bountifulwifi.com +28613 + Plastek Group + Chip Lynch + clynch&plastekgroup.com +28614 + 6202110 Canada Inc. + Ryan Wan + ryan&ryanwan.name +28615 + Ondrej Vlach + Ondrej Vlach + ovlach&nanobyte.cz +28616 + Orpak Systems Ltd + Boris Ratner + bratner&orpak.com +28617 + KioriSoft, LLC + John Draughn + john.draughn&kiorisoft.com +28618 + Lightning Source + Jeff Crawford + jeff.crawford&lightningsource.com +28619 + Univeris Corporation + Derek Schenk + dschenk&univeris.com +28620 + CSI Communication Systems Inc. AG + SPRING, Simon + spring&csi.ch +28621 + HANDICAP INTERNATIONAL + david meuleman + it&handicap.be +28622 + Integrated Business Systems & Services + Terrence Sullivan + tsullivan&ibss.net +28623 + Pari Networks + Satyan Raju + raju&parinetworks.com +28624 + UNHfree.net + Dominik Strnad + info&unhfree.net +28625 + tarot.com + Ken Maupin + hostmaster&tarot.com +28626 + Laszlo Systems, Inc. + Paul van Gool + pvangool&laszlosystems.com +28627 + Hammernet + Andreas Kimpfler + andreas&kimpfler.net +28628 + Twisted Storage Inc + Chas. Wegrzyn + cwegrzyn&twistedstorage.com +28629 + everRaise.com + Cd Chen + cdchen&cdchen.idv.tw +28630 + The Atlantis Consulting + Aron Sogor + aron&theatlantis.net +28631 + GoonSwarm + aka 'Solo Drakban' + tech&goonfleet.com +28632 + EuroMedia-Service GmbH + Guido J. Mueller + guido.mueller&euromedia-service.de +28633 + DataCentr Ltd. + Alex Yanchenko + yanchenko&gmail.com +28634 + ComAp, a.s. + Jan Tomandl + jan.tomandl&comap.cz +28635 + Fragnetics LLP + Kwok Yang Bin + yangbin&fragnetics.com +28636 + PHB Eletronica Ltda + Rogerio Valentim Pereira + rogerio&phb.com.br +28637 + Itadel (formerly 'DANSUPPORT') + Torben Petersen Egmose + tpe&itadel.dk +28638 + Bellomy Research, Inc. + Matt Gullett + mgullett&bellomyresearch.com +28639 + TxCore, Inc. + Tim Kuchlein + tim&txcore.com +28640 + IT Linux + Patricio Bruna + pbruna&it-linux.cl +28641 + Delinked + Michael Reynolds + michael.reynolds&sosdg.org +28642 + Proveedor de Certificados PROCERT ITFB, C. A. + Oscar Lovera + oscar.lovera&procert.net.ve +28643 + SUEIIDISS + Gustavo Perez + gustavo&hc.edu.uy +28644 + Gary Thomas + Gary Thomas + g.thomas&mat.ucsb.edu +28645 + Techila Technologies Ltd. + Teppo Tammisto + teppo.tammisto&techila.fi +28646 + 650mhz + Pavel Rubtsov + mhz650&mail.ru +28647 + EKM4 LIMITED + Martin D Bacon + m.bacon&iap.org.uk +28648 + SIATech Inc. + Bryan Oswald + Bryan&siatech.org +28649 + SIPGear AB + Urban Engemyr + urban.engemyr&sipgear.com +28650 + AxisMobile + Nadav Fried + nfried&axismobile.com +28651 + MODS + Sylwester Lunski + slunski&mods.pl +28652 + The Hartford + Alexander J Monsiegneur + amonsiegneur&thehartford.com +28653 + Physicians Group, L.L.C. + Chad Sine + chad&pgllc.com +28654 + Mobile Interactive Group + Marcus Kern + marcus&migcan.com +28655 + TIANJIN OMUX COMMUNICATION TECHNOLOGY CO.,LTD + Larry_zhao + zyf&omux.com +28656 + iPolicy Networks Limited + Mannu Kalra + mkalra&ipolicynetworks.com +28657 + Fonoklik Iletisim Hizmetleri ve Ticaret A.S. + Pınar Kapralı Gorsev + pinar&phonoclick.com +28658 + EnteGreat, Inc. + Bob Rose + bob_rose&entegreat.com +28659 + The Cooper-Cain Group, Inc + Pat Cain + pen-register&coopercain.com +28660 + Linear Acoustic Inc. + Rod Campbell + rod&linearacoustic.com +28661 + Foster + Foster Liu + fliu&gennux.com +28662 + Richard Gavenda + Richard Gavenda + richard.gavenda&net.tvtrinec.cz +28663 + Arada Systems + Ravi Puvvala + iana&aradasystems.com +28664 + GIFTCS, LLC + Noah Gift + noah.gift&gmail.com +28665 + Village-Island Co.,Ltd. + Michael VAN DORPE + michael&village-island.com +28666 + Elaxys Tecnologia + Claudio Leonel + cls&elaxys.com.br +28667 + confusatron.org + Keith Zaback + krz-iana&confusatron.org +28668 + Devmach.com Linux Support Services + Aydin Ulfer + root&devmach.com +28669 + Bycast, Inc. + Quentin Vandermerwe + pmreg&bycast.com +28670 + Blue Cacao Technologies, S.A. de C.V. + Vladimir Ernesto Diaz-Aviles + vedax&bluecacao.com +28671 + TORINS Ltd. + Alexey Tokarev + tav&torins.ru +28672 + Communication Research Labs Sweden AB + Anders Lundstrom + anders.lundstrom&crl.se +28673 + Conseil Régional de Lorraine + Xavier ESPUNAS + xavier.espunas&businessdecision.com +28674 + KIRNexus GmbH + Ewald Restle + ewald.restle&kirnexus.com +28675 + Phorm, Inc. + Tiemen Meerman + admin&phorm.com +28676 + WireGATE Technology + Zhi-Feng Dong + zfdong&wiregatetech.com +28677 + Sistemas Catastrales S.A. + Comisario Alejandro Daniel + acomisario&siscat.com.ar +28678 + VTK Gent v.z.w. + VTK Computer + computer&vtk.ugent.be +28679 + Middle Atlantic Products + Peter Skinner + pskinner&middleatlantic.com +28680 + Appalachian Regional Healthcare, Inc. + Darrell Fraliex + dfraliex&arh.org +28681 + Crystal Vision Ltd + Martin Macadie + martin&crystalvision.tv +28682 + Planetas Medios Digitales S.L. + Pablo S. Torralba + pstorralba&gmail.com +28683 + Xerox - Document Supplies Europe + Jarlath Mc Ardle + Jarlath.McArdle&xerox.com +28684 + KATE-KOM + Sinisa Radas + sradas&kate-kom.com +28685 + Consultem d.o.o. + Oliver Jukić + oliver.jukic&consultem.hr +28686 + The Hertz Corporation + James Bowen + pki&hertz.com +28687 + infotronic + livio penzo + info&infotronic.com +28688 + Samway Electronic SRL + Mihai Savu + mihai.savu&samway.ro +28689 + PC Solutions Net + Mobeen Azhar + moby&pcsn.net +28690 + morrisey.us + Richard Morrisey + r.morrisey&computer.org +28691 + BLStream Sp. z o.o. + Lech Karol Pawłaszek + lech.pawlaszek&blstream.com +28692 + Karmoy kommune + Steen Erik Hagland Hansen + ldap&karmoy.kommune.no +28693 + Colimbra BV + Terence Sambo + tsambo&colimbra.com +28694 + levigo holding gmbh + Oliver Bausch + o.bausch&levigo.de +28695 + Philadelphia Stock Exchange + Jonathan Wolf + jonathan.wolf&phlx.com +28696 + ASQUARE Consulting GmbH + Thomas Pajonk + Thomas.Pajonk&asquare.eu +28697 + Transcom Enhanced Services + Michael Hale + mhale&transcomus.com +28698 + Teradata Corporation + Mike Lee + mike.lee&teradata.com +28699 + Auto/Con Corp + Michael L Picio + mpicio&autoconcorp.com +28700 + O-Regan.org + Kevin O'Regan + kevin&o-regan.org +28701 + QuVIS, Inc. + Mark Hodges + mhodges&cableone.net +28702 + Charles Sturt University + Dave McDonnell + dmcdonnell&csu.edu.au +28703 + University of Nevada, Las Vegas + Don Diener + don.diener&unlv.edu +28704 + Integrated Broadcast Information Systems Ltd. + John Haselwood + john.haselwood&ibistv.co.uk +28705 + FURUNO ELECTRIC CO., LTD. + SNMP Administrator + snmp_admin&furuno.co.jp +28706 + Airservices Australia + Cyber admin + cyber&airservicesaustralia.com +28707 + National Cancer Center + Naoyuki Satoh + admin&ncc.go.jp +28708 + FUJIFILM Corporation + Taishi Matsumura + taishi.matsumura&fujifilm.com +28709 + Universidade de Brasília + Jacir Luiz Bordim + bordim&unb.br +28710 + Dwi Tunggal Putra Private Limited + Kristiadi Himawan + kristiadi_himawan&dtp.net.id +28711 + Department of Zoology, Stockholm University + Ulf Norberg + hostmaster&zoologi.su.se +28712 + META-LEVEL Software AG + Martin Bauer + martin.bauer&meta-level.de +28713 + GDC Technology Ltd + Ujval Lodha + ujval&gdc-tech.com +28714 + Slithy Toves + Andrew Kailhofer + andy&toves.com +28715 + Edfinancial Services + Gary Lapointe + glapointe&edfinancial.com +28716 + Virtual Viewing Ltd + Peter Kuma + peter.kuma&virtualviewing.co.uk +28717 + Zavod K6/4 + Almir Karic + almir&kiberpipa.org +28718 + Ministère de l'Education Nationale, Luxembourg + Centre de Technologie de l'Education, Christian Goebel + christian.goebel&cte.lu +28719 + Tarrant County + Aaron Barnes + aaronbarnes&tarrantcounty.com +28720 + Jaguar Software Development + Brian Mason + oid&jaguarsoftware.com +28721 + Affernet Pty Ltd + Doug Balmer + sysadmin&affernet.com +28722 + Network Synergy Pty Ltd + Trevor Lees + trevor&networksynergy.com.au +28723 + GBCOM + Li Xianping + iamlixianping&gmail.com +28724 + AnNeal Technology Inc. + Mr. Cal Wu + calwu&anneal.com.tw +28725 + Atonics Inc. + Nehemiah Chang + nehemiah&atonics.com.tw +28726 + MFLO + Pae Wan Soo + mypws&mflo.co.kr +28727 + EFUN International Corporation + Jeremy Huang + jeremy.huang&e-fun.cc +28728 + Jerich Austria GmbH + Helmut Wieser + helmut.wieser&jerich.com +28729 + GRD + Valentyn Shapoval + valshµsoft.com +28730 + NiX + Norbert Buchmuller + norbi&nix.hu +28731 + Edgeware AB + Johan Rydberg + johan.rydberg&edgeware.tv +28732 + Secure Links + Paul Thresher + paul&securelinks.net +28733 + Ronexprim Srl + Mocanu Petrut + pm&ronexprim.com +28734 + Automorpheus.com Corporation + David Pratt + fairwinds&eastlink.ca +28735 + Ahoya Networks Inc + shuming chang + shuming&ahoya.com.tw +28736 + ILOG + Fabien Lelaquais + lelaquaf&ilog.fr +28737 + webapps.jp + Toru Hiramatsu + hiramatu&webapps.jp +28738 + Nippon RAD Inc. + Hoshino, Shingo. + hoshino&nippon-rad.co.jp +28739 + ika + Jochem Ippers + ippers&ika.rwth-aachen.de +28740 + Noval Networks + Juha Antila + juha.antila&novalnetworks.com +28741 + Banco Popular y de Desarrollo Comunal + Noe J. Castro Rueda + noecastro&bp.fi.cr +28742 + Horry Electric Cooperative, Inc. + Dale Johnson + administrator&horryelectric.com +28743 + Greschitz IT Security + Thomas Greschitz + office&greschitz.com +28744 + College of Medicine, University of Ibadan, Ibadan + Dr Wole Adebiyi + woleadebiyi&comui.edu.ng +28745 + Comtest Wireless S.r.l. + Matteo Prosperi + matteo.prosperi&comtestwireless.it +28746 + The Swedish National Archive of Recorded Sound and Moving Images + Linus Sjoberg + ls&slba.se +28747 + GIP AG + Philipp Dominitzki + philipp.dominitzki&gip.com +28748 + LibreStream Technologies Inc. + Kent Wotherspoon + snmp&LibreStream.com +28749 + TeliPhone inc. + Benjamin Lawetz + support&teliphone.ca +28750 + ISEDEV + Iestyn Elfick + isedev&gmail.com +28751 + 4INFO, Inc. + Jim Cooley + jcooley&4info.net +28752 + WAE Technologies, Inc. + Joshua Olson + joshua&waetech.com +28753 + EPS Co., Ltd. + Hiromichi Nosaki + h.nosaki&eps.co.jp +28754 + Vinotech + Ralph J.Mayer + oid&vinotech.net +28755 + comWare GmbH + Frank Holznagel + fh&comware-software.de +28756 + Fachhochschule Worms + Norbert Drees + pen-admin&fh-worms.de +28757 + Mu Dynamics (formerly 'Mu Security') + PEN Contact + pen-info&mudynamics.com +28758 + Centurum Inc. + Terry Hebert + HelpDesk&Centurum.com +28759 + Savant Tecnologia + Marcos Sungaila + marcos&savant.com.br +28760 + Albus-Insec + Arthur Caranta + arthur&albus-insec.com +28761 + MDS America, Inc. + Gopiballava Flaherty + gopi&mdsamerica.com +28762 + Unwire + Morten Olsen + mol&unwire.dk +28763 + CoCoZ + Julien Francoz + julien&francoz.net +28764 + Hollywood Entertainment + Jen Meneley + jenm&hlyw.com +28765 + South Bay Community Network, Inc. + Ian Kluft + ik-iana-pen&thunder.sbay.org +28766 + Tradescape, Inc. + Jian Zhang + jian&tradescape.biz +28767 + Enseo, Inc + Ed Okerson + eokerson&enseo.com +28768 + MasterCard WorldWide + Todd Telle + Todd_Telle&mastercard.com +28769 + Unison Network Labs + Zhang Wen Ming + wenming.zhang&unison.net.cn +28770 + RJ Landau Partners PLLC + Kristen M. Tsangaris + kmtsangaris&gmail.com +28771 + wolery.net + Oleg Lyashko + oleg.v.lyashko&gmail.com +28772 + Blueberry Consultants Ltd + Martin Green + hostmaster&bbconsult.co.uk +28773 + Papier & Recycling Logistik GmbH + Haschek Martin + edv&spex.at +28774 + Ixaris Systems Ltd + Justin Vassallo + sysadmin&ixaris.com +28775 + Expway + Laurent Le Bourhis + laurent.lebourhis&expway.fr +28776 + Neuf Cegetel + John WALTER + john.walter&neufcegetel.fr +28777 + Ignos Estudio de Ingenieria S.L. + Adrian Perez Jorge + aperez&ignos.com +28778 + Intellistream + Jani Peltonen + jani.peltonen&intellistream.co.uk +28779 + Fox-IT B.V. + Mark Revier + iana&fox-it.com +28780 + patteran, inc. + Art Allisany + ainfo&patteran.com +28781 + Bowdoin College + Michael Bowden + mbowden&bowdoin.edu +28782 + Earmark Media Services + Mark Kramer + pen&earmarkmedia.com +28783 + Venafi + Matt Riddle + it.ops&venafi.com +28784 + CER International bv + Ad van den Broek + iana.inkoop&cer.com +28785 + Me.Dium, Inc. + Scott Engstrom + sengstrom&me.dium.com +28786 + Vidiom Systems, Inc. + Tobin Wahlers + toby&vidiom.com +28787 + Flex-Networks Inc. + Karim Virjee + karim.virjee&date.com +28788 + Quarantainenet BV + Casper Joost Eyckelhof + info&quarantainenet.nl +28789 + Cory Albrecht + Cory Albrecht + coryca&gmail.com +28790 + Poslovno informacioni sistemi, d.o.o. + Milos Merdzanovic + milosmerdzanovic&pis.co.yu +28791 + SBI Japannext Co Inc + Ryan McBride + ryan.mcbride&japannext.co.jp +28792 + TotalWire S.r.l. + Alfonso De Gregorio + iana&totalwire.it +28793 + Neptuny s.r.l. + Stefano Moscetti + stefano.moscetti&neptuny.it +28794 + slashconcept GbR + Christoph Pilka + c.pilka&slashconcept.com +28795 + uPRESTO, Inc + Hae-Yeon, Hwang + hyhwang&upresto.com +28796 + Haberst Infra AS + Kristjan Koppel + it-oid&haberst.ee +28797 + SCBA Expert Service Centre + Sergey N Frolov + frolovs&mail.nnov.ru +28798 + JSC "SBERCARD" + Gritsienko Sergey + ca&sbercard.com +28799 + Thales Security Solutions and Services + Ricardo Claudino + ricardo.claudino&thalesgroup.com +28800 + QualiConsult Ltda + André Carneiro + andre.carneiro&grupoquali.com.br +28801 + FlexStar Technology, Inc + Dmitri Repkine + drepkine&flexstar.com +28802 + SMS Tecnologia Eletronica LTDA + Ricardo Seixas + rseixas&sms.com.br +28803 + ISCaD GmbH + Friedhelm Matten + post&fmatten.de +28804 + L'Occitane SA + Jerome CRUZ-MERMY + iana&loccitane.com +28805 + Visionary Networks, Inc. + Sean Whitney + sean&visionary-networks.com +28806 + Nationwide Children's Hospital (formerly 'Children's Hospital, Inc.') + Brian Baacke + Brian.Baacke&NationwideChildrens.org +28807 + Mullins Household + Matthew Mullins + mokomull&gmail.com +28808 + accom GmbH & Co. KG + Michael Portz + michael.portz&accom.de +28809 + Netwurk Labs + Jeff Ritchie + jritchie&netwurklabs.com +28810 + Desai Electronic Technology (Sichuan) Co., Ltd. + Youke Zhou + yukezhou&hotmail.com +28811 + SMS spol. s r.o. + Petr Behavka + petr.behavka&smed.cz +28812 + DIGITALK Limited + Nathan Colman + ProductDev&digitalk.com +28813 + Alberta-Pacific Forest Industries Inc. + Jay Supernault + dnsadmin&alpac.ca +28814 + HEALTHGRID + Yannick LEGRE + yannick.legre&healthgrid.org +28815 + Wedjaa + Fabio Torchetti + info&wedjaa.net +28816 + Supplee Technologies + John R. Supplee + info&supplee.com +28817 + Exploreos, Inc. + Christian Clay Campbell + operations&exploreos.com +28818 + St. John Medical Center + Andy Hight + delberth25&yahoo.com +28819 + Titan Consulting Group, Inc. + Brian O'Neil + bmoneil&titanconsultinggroup.com +28820 + XUERON.COM + Xueron Nee + xueron&xueron.com +28821 + GreatWall Systems, Incorporated + Michael Burr + register&greatwallsys.com +28822 + ABE ELETTRONICA S.p.A. + Roberto Valentin + roberto.valentin&abe.it +28823 + Neta Technologies Inc. + Yap Ye, Cheng Gang + cyapye&netatechnologies.com +28824 + JPragma + Isaac Levin + info&jpragma.com +28825 + Bufete de Servicios Informaticos, SA de CV + Aaron Rivacoba Bohorquez + arivacoba&gpobsi.net +28826 + Infinite Innovation, Inc. + JD McGregor + jdmcgregor&infiniteinnovationinc.com +28827 + TLH Systems + Thomas L Hazen + TLHazen&ThomasHazen.com +28828 + Oita Computer Engineering & Consulting Ltd. + yasuyuki sato + yasuyuki&cec-ltd.co.jp +28829 + BMST Co., Ltd + Andre Song + song&bmst.net +28830 + ALCON Telecommunications Co., Ltd. + Mark Lai + marks.lai&msa.hinet.net +28831 + TurboConsult + Petr Hrebicek + petr.hrebicek&turboconsult.cz +28832 + G-cluster Ltd. + Mr. Antti Kaunisto + antti.kaunisto&g-cluster.com +28833 + LUFTHANSA TECHNIK AG + Samer Abdalla + samer.abdalla&lht.dlh.de +28834 + Surecloud + Toby Scott-Jackson + toby.scott-jackson&surecloud.com +28835 + ACXSYS Botswana + Patrick Andrew Jansen + patrick&naftec.org +28836 + BERTANA srl + Valter Foresto + progettazione&bertana.biz +28837 + MVP Health Plan + Beth McDermott + bmcdermott&mvphealthcare.com +28838 + ScreenPC + Jacques VB Voris IV + jvoris&screenpc.com +28839 + ZedX, Inc. + Joshua Sholes + systems&zedxinc.com +28840 + Avcorp Industries Inc. + Dean Sanderson + dsanderson&avcorp.com +28841 + Phönix-PACS GmbH + Martina Schlesinger + support&phoenix-pacs.de +28842 + GJAlves + Gustavo Junior Alves + gjalves&gjalves.com.br +28843 + Dekeyzer + Jonah Dekeyzer + jonah.dekeyzer&menzz.be +28844 + Channel Dynamix + IANA Administrator + iana&channeldynamix.com +28845 + Digital Ocular Networks, Inc. + Barry P. Benight + iana&digitalocular.com +28846 + Universidad de Malaga + Victoriano Giralt + victoriano&uma.es +28847 + D&S Networks + Andreas Scharein + a.scharein&ds-networks.net +28848 + Guangzhou FrameNet Telecommunication Technologies,Co,LTD + Jianyu Ke + kjyzy&126.com +28849 + Professional Products Inc + Nicholas J Suchyta + nicks&ppionline.com +28850 + Felltech Ltd + Ian McLauchlan + ianmac&felltech.com +28851 + Topnordic a/s + Johnny Boerlum + jhb&topnordic.com +28852 + Micro Innovation AG + Ivo Hengartner + ivo.hengartnerµinnovation.com +28853 + Freudenberg Hosting KG + Team Operating System and Hosting Infrastructure + hosting.osnt&freudenberg-it.com +28854 + OOO BB Systems + Alexander Baikov + baikov&bbsystems.ru +28855 + PRESCOM + Laurent Salagaras + laurent.salagaras&prescom.fr +28856 + Concurrent Thinking Ltd. + Steve Norledge + steve&concurrent-thinking.com +28857 + Working Today, Inc. + Ohad Folman + oid-manager&workingtoday.org +28858 + Public Service Mutual Insurance Company + Ken Mueller + kmueller&mcarta.com +28859 + Big Fish Games + Jeff Tanner + jeff.tanner&bigfishgames.com +28860 + GB Development + Glenn Sills + gbsills&msn.com +28861 + Proyecto Conectate al Conocimiento + Ivan Villamizar + ivanvillamizar&conectate.gob.pa +28862 + Sipcall.com Inc. + Tom Seago + operations&sipcall.com +28863 + OSS Integrators, Inc. + Dave Dickinson + dave&ossintegrators.com +28864 + Cosmofon AD + Nikola Volnarovski + support&cosmofon.com.mk +28865 + Achievo Deutschland AG + Florian Ernst + florian.ernst&achievo.de +28866 + TRENDnet, Inc. + Sonny Su + s.su&trendnet.com +28867 + Ping Identity Corporation + Brian Campbell + bcampbell&pingidentity.com +28868 + Anubisnetworks + João Gouveia + joao.gouveia&anubisnetworks.com +28869 + hollaender.net + Christian Holländer + iana&hollaender.net +28870 + Hogskolan i Gavle + Anders Lordal + anders.lordal&hig.se +28871 + Péter Szűcs + Péter Szűcs + sherlock&sysop.hu +28872 + AGENCE LANDAISE POUR L'INFORMATIQUE + Guillaume BOULOM + guillaume.boulom&alpi40.org +28873 + CHIP Xonio Online GmbH + Steininger Herbert + hsteininger&chipxonio.de +28874 + Solarflare Communications Inc. + Martin Porter + mporter&solarflare.com +28875 + secom consulting + Dr. Andreas Plöger + ploeger&secom-consulting.de +28876 + Jordan & Jordan + Mark Weindling + markw&jandj.com +28877 + NRC Systems Ltd + Nick Clifford + nick&nrc.co.nz +28878 + Seikosha Inc. + Hitoshi Suzuki + hsuzuki&ksks.co.jp +28879 + Americanas.com + Denys Sene dos Santos + denys.sene&accurate.com.br +28880 + Westone Information Industry INC. + chen fuli + chen.fuli&westone.com.cn +28881 + dSigma, LLC (formerly 'dCube Technologies, LLC') + Doug Kehn + snmp&dsigma.com +28882 + Michael Eisler + Michael Eisler + email2mre-ianaoid&yahoo.com +28883 + Tomas Bata University in Zlín + LDAP Admin + ldap&utb.cz +28884 + Kucko + Szabolcs Balogh + baloghsz&cserkesznet.sk +28885 + Spolka Inzynierow SIM Sp. z o.o. + Wojciech Rusinek + wojciechr&sim.com.pl +28886 + OQ Chemicals + Thorsten Bach + thorsten.bach&oq.com +28887 + LabSET + Vincent MARTIN + vincent.martin&ulg.ac.be +28888 + UXtechnology B.V. + H. de Koning + techsupport&uxtechnology.com +28889 + NemoQ Iberica, S.A. + Jose Monge Martin + jose.monge&nemoq.net +28890 + Art Center College of Design + Jason Blackader + unix&artcenter.edu +28891 + College of Dunaujvaros (Dunaújvárosi Főiskola) + Csaba Kovács + cs.kovacs&mail.duf.hu +28892 + Exoweb + Wang Chun + wangchun&exoweb.net +28893 + Rihotec Oy + Petri Nuuttila + petri.nuuttila&riihonlahti.com +28894 + MAXI VIEW HOLDINGS LIMITED + Chiayi Hsu + chiayi_hsu&loop.com.tw +28895 + Caisse Nationale de Sécurité Sociale Maroc + Tayeb Lahjouji + tayeb.lahjouji&cnss.ma +28896 + BCC GmbH + Florian Reinholz + florian.reinholz&bcc.de +28897 + nsec + Florian Reinholz + info&n-sec.de +28898 + Securaplane Technologies + Glen Singleton + gsingleton&securaplane.com +28899 + DataStarved.net + Tim Garton + iana&datastarved.net +28900 + Yasashii Syndicate + Stacey Ell + stacey.ell&gmail.com +28901 + Endicott College + Robert Klopotoski, Jr + rklopoto&endicott.edu +28902 + AnHui University of Technology + xIaoyin Zhang + zxy&ahut.edu.cn +28903 + InterFax + ran shoklander (shoko) + shoko&interfax.net +28904 + Arius Software + Michael Neame + mike&ariussoftware.com +28905 + MeshLinx Wireless, Inc + Matt Beckwith + mbeckwith75&yahoo.com +28906 + Cluenet + Chris Breneman + chules&cluenet.org +28907 + ViASSoL (Virtual Applied Scientific Software Laboratory)(formerly 'ViSSoL (Virtual Scientific Software Laboratory)') + Dr. Stanislav Koncebovski + stanislav&chimgan.net +28908 + MUJIN Systems, Inc. + Stephen Kim + stephen.kim&mujinsystem.com +28909 + 4A-Securer + Toney Sean + toneysean&gmail.com +28910 + Ansitaly + Mariotti Mario + mario.mariotti&ansitaly.it +28911 + CompFort Meridian Polska Sp. z o.o. + Marcin Marzec + m_marzec&compfort.pl +28912 + ZURIEL Ltd. + Balazs VAMOS + bvamos&zuriel.hu +28913 + BitTorrent, Inc. + Adam Fritzler + hostmaster&bittorrent.com +28914 + Alstom Signaling Operation LLC (formerly 'GE Transportation Systems Global Signaling, LLC') + Jeff Fries + jeffrey.fries&alstom.com +28915 + Turtlesystems + Russell Seymour + russell.seymour&turtlesystems.co.uk +28916 + Interop Informatica + Romulo Giordani Boschetti + romulo&interop.com.br +28917 + Comtica + Lukasz Spica + lukasz.spica&comtica.pl +28918 + InfoPrint Solutions Company + Harry Lewis + harryl&us.ibm.com +28919 + Borghesia Consulting + Vladimir Kraljevic + office&borghesia.com +28920 + blaulink GbR + Martin Burkert + martin.burkert&blaulink.de +28921 + Anchiva Systems, Inc. + Xuyang Li + xuyang&anchiva.com +28922 + Crescent Group Ltd. + Tyler Heal + t.heal&crescentyk.com +28923 + Luminator Holding, LP + Ross Sivertsen + lrs&luminatorusa.com +28924 + Megatel Industries Corp. + Zack Yu + zack&megatelindustries.com +28925 + BIGLIST Inc. + Omar Thameen + noc-pen&biglist.com +28926 + Mitteldeutscher Rundfunk + Roger Busch + roger.busch&mdr.de +28927 + First Mallorca + Sebastian Gloeckler + sebastian.gloeckler&firstmallorca.com +28928 + Taylor & Francis Group Ltd + Domain Administrator + domainadmin&tandf.co.uk +28929 + RASKAT + Vasily Zuev + vasily&raskat.ru +28930 + Kilowatt S.A. + Vicente Marcos Cartas + vmarcos&kilowattsa.biz +28931 + MiKe software&network SRL + Adrian Deac + ady&mikesnet.ro +28932 + NazerFarzan + Mehdi Mazloomi + mmazloomi&hotmail.com +28933 + Senado de España + Jose Luis Bahillo Pereira + joseluis.bahillo&senado.es +28934 + tick Trading Software AG + Jon Bright + iana_pen&tick-ts.de +28935 + Sacred Heart College + Dennis Punjabi + dennis&shc.edu.bz +28936 + Information Technology ltda + Hernan Valdes Pöo + hernan.valdes&I-Technology.cl +28937 + DASANTPS Inc. + Junhan Lee + junhlee&dasantps.com +28938 + TAS France + Franck Zoccolo + zoccolo&tasfrance.com +28939 + Governikus GmbH & Co. KG + Nils Buengener + nb&governikus.de +28940 + IMP Telekom d.d. + Miha Krejan + miha.krejan&imp-tel.si +28941 + Network Solutions Norway ASA + Fabian Falch + post&nsn.no +28942 + Amperion South East + John Dickinson + johnd&erionse.gr +28943 + Inter Clamp Management AG + Dieter Gasser + dgasser&ch.oetiker.com +28944 + Meyer Associates, Inc + Brad Mace + bradm&callmeyer.com +28945 + nome consulting ltd + Mike Bell + iana&nome.ca +28946 + E-Band Communications Corp + Jimmy Hannan + j.hannan&ebandcom.com +28947 + Trillenium Works + Lukasz Szanca + ender&ndr.pl +28948 + VeePee + Jérôme Beaufils + iana&veepee.com +28949 + Ownage, Inc. + James Wu + admin&ownage.com +28950 + Virtual U at Union College + Ian Melnick + melnicki&vu.union.edu +28951 + IP Fjarskipti + Hive NOC + noc&iphive.is +28952 + Cypress Solutions Inc. + Frank Li + fli&cypress.bc.ca +28953 + AiNETEK CO.,Ltd. + Max Lee + info&ainetek.com +28954 + And-Or Logic + Syed Mujtaba Ahmed + programmer316&yahoo.com +28955 + 9 to 5 Magic + Ritchie Young + ritchiey&9to5magic.com.au +28956 + Allen Martin Ltd + Paul Davies + box2&allen-martin.co.uk +28957 + Institut für Graphische und Parallele Datenverarbeitung, Universität Linz + Roland Hopferwieser + rhopfer&gup.jku.at +28958 + SNAPin Software, Inc. + David Pratt + dave&snapin.com +28959 + Fargo Electronics, Incorporated + Daniel Fowell + daniel.fowell&fargo.com +28960 + resolux + michel calame + iana&resolux.fr +28961 + PIAX Project + PIAX Administrator + admin&piax.org +28962 + Down to Earth Systems + Michael Trudeau + system&dtesys.com +28963 + deyeb + Li Wei + guxing203&gmail.com +28964 + futbag + Koji Takada + ktakada&tea.odn.ne.jp +28965 + KOSnet-EDV + Alexander Straschil + office&KOSnet.com +28966 + 37signals, LLC + Mark Imbriaco + mark&37signals.com +28967 + Optosecurity Inc. + Eric Pelletier + iana&optosecurity.com +28968 + Affinitic s.p.r.l + Jean-François Roche + jfroche&pyxel.be +28969 + Dana Koch + Dana Koch + dana.public&gmail.com +28970 + Digitec Systems + P. Suresh Kumar + suresh&digitec.in +28971 + Incontech Ltd + Richard Rainbow + richard.rainbow&incontech.co.uk +28972 + Keio University + Tatsumi HOSOKAWA + keio-net&itc.keio.ac.jp +28973 + Landschaftsverband Rheinland (formerly 'LVR InfoKom') + Gürkan Aydin + infokom.it.sec.serv&lvr.de +28974 + Belarusbank + Sergei Filatov + filatov&belarus-bank.by +28975 + GSI Europe SL + Operations Team + op&gsicommerce.eu +28976 + Intersys Sistemas Interactivos + Pedro Pablo Salgado + psalgado&intersys.info +28977 + NovusEdge + Michael Klobe + michael.klobe&novusedge.com +28978 + Canonical Ltd + Chris Jones + chris.jones&canonical.com +28979 + MwGhennndo + Mamoru Sakaue + sakaue.mamoru&mwghennndo.com +28980 + Leiden University, Faculty of Science + Roelof van der Kleij + rvdkleij&chem.leidenuniv.nl +28981 + TKRJasek + Emil Micek + emil.micek&tkrjasek.cz +28982 + Pachi + Pachi Neira Lamas + pachi.neira&lavoz.es +28983 + TechBase Sp. z o.o. + Filip Gasperowicz + f.gasperowicz&a2s.pl +28984 + Universidade Federal de Mato Grosso do Sul + Ronaldo Alves Ferreira + raf&dct.ufms.br +28985 + Staples, Inc. + Dot Mastakouras + dot.mastakouras&staples.com +28986 + FNMS Project + Alfred Reibenschuh + alfredreibenschuh&gmx.net +28987 + marsldap + mars hung + mars&dyinfo.com.tw +28988 + icanetix Software Systems and Consulting GmbH + Jeffrey Brendecke + jwbrendecke&icanetix.com +28989 + Sociedad Mutual "Seguro de Vida" + Natalio Cabizón + ncabizon&smsv.com.ar +28990 + PrivateMonitoring + Rene Janitschke + rene.janitschke&arcor.de +28991 + Ghz Soluções em Informatica Dracena LTDA ME + Matheus Fontes + matheus&ghz.com.br +28992 + American Century Proprietary Holdings, Inc. + Bryon McKee + Bryon_McKee&AmericanCentury.com +28993 + PCM LLC I + Brett L. Scott + bscott&phxcapital.com +28994 + LiveSquare + Brett L. Scott + blscott&livesquare.com +28995 + Dresser Wayne, Dresser Inc. + Tim Weston + tim.weston&wayne.com +28996 + JAE Enterprises + Jorge Guerra + jguerrag&unmsm.edu.pe +28997 + south china university of technology + wang xuepeng + cswangxp&sohu.com +28998 + Pine Tree Systems + Bill McMahon + mcmahon2&comcast.net +28999 + Voible Communications Ltd + Jay Fenton + jay.fenton&voible.com +29000 + Markus-Alexander Matthé + Markus-Alexander Matthé + mmatthe&gmx.net +29001 + TRISKEL TELECOM SL + JOSEP BAUCELLS BOIX + baucells&triskel-telecom.com +29002 + PGGM + Immanuel Noorman + immanuel.noorman&pggm.nl +29003 + Fibernet International + JOSE M. MARIN + fibernet&fibernet.es +29004 + La Voz de Galicia + Antonio Garcia González + antonio.garcia&lavoz.es +29005 + Joint Stock Company "Scientific & Production Enterprise "ORBITA" + Sergey P. Korobkov + sergey&orbita.dp.ua +29006 + Vocalink Limited + Julian Barnes + julian.barnes&mastercard.com +29007 + R&D ScanEx + Michael Rusakov + mik&scanex.com +29008 + Kaya Software, LLC + Michael Pellegrini + mpellegrini&kayasoftware.com +29009 + Mirth Corporation (formerly 'WebReach, Inc.') + Jeff Peters + jeffp&mirthcorp.com +29010 + National Bank Of Kuwait + Mr. Mustafa Gangardiwala + mustafag&nbk.com +29011 + HeBIS + Uwe Reh + reh&rz.uni-frankfurt.de +29012 + Quality Technology Services + Jerry Applebaum + gapplebaum&qualitytech.com +29013 + Teldat Sp.J. H. Kruszyński, M. Cichocki + Wojciech Znaniecki + wznaniecki&teldat.com.pl +29014 + Intellengine + Adam Murphy + amurphy0921&mac.com +29015 + Keitai Gaming (formerly 'TallTele') + Anthony Johnson + aj&keitaigaming.com +29016 + Professional Partnership Ltd + Artem Melchuk + melchuka&gmail.com +29017 + Buda + Deepak Siddananja + deepak.siddananja&gmail.com +29018 + Network Security Solutions d.o.o. + Dejan Levaja + dejan.levaja&netsec.co.yu +29019 + ACK Networks, Inc. + Hui Ning + hui.ning&acknetworks.com +29020 + Marco Aspromonti + Marco Aspromonti + marco.aspromonti&gmail.com +29021 + MUGLER AG + Michael Mueller + michael.mueller&mugler.de +29022 + Dorstewitz + Thomas Dorstewitz + thomas&dorstewitz.eu +29023 + FaMAF - Facultad de Matematica, Astronomia y Física - Universidad Nacional de Cordoba - Argentina + Yanina Iberra + iberra&hal.famaf.unc.edu.ar +29024 + OSERYS Systèmes + BEUSCART Dominique + dominique.beuscart&oserys.com +29025 + RECRO-net d.o.o. + Nevenko Bartolincic + Nevenko.Bartolincic&recro-net.hr +29026 + GUZMAN + Hannes Gruber + hg&guzman.at +29027 + NETFINITY EAD + Kliment Toshkov + kliment.toshkov&netfinity.bg +29028 + Jerry Chapman + Jerry Chapman + Jerry_W_Chapman&yahoo.com +29029 + Altierre Corporation + Michael Laws + OIDAdministrator&altierre.com +29030 + Ringier Slovakia, a.s. + Viliam Spetko + spetko&ringier.sk +29031 + ILSC + Christoph Thron + christoph.thron&ilmenauer-studentenclub.de +29032 + Creative Industries + Damien Overeem + d.overeem&creativeindustries.nl +29033 + 4DK Technologies, Inc. + Ilya Ziskind + tech&4dk.com +29034 + Common Sense IT-Consulting GmbH. + Kurt Pikl + kurt.pikl&commonsense.at +29035 + kymz online page + Kym Hames + catziyes&yahoo.com +29036 + CACI International Inc + Jim Morris + jmorris&caci.com +29037 + MooL Invest Kft. + Mihály Orosz + orosz.mihaly&mool.hu +29038 + ICEGEL Kft. + Mihály Orosz + orosz.mihaly&icegel.hu +29039 + Otis College of Art and Design + Robert S. Walters + rwalters&otis.edu +29040 + CX computers & consulting, s.r.o. + Matej Ondrusek + ondrusek&cxcom.sk +29041 + SmartOfficeBuilding + Mathias Döhle + matt&tzi.de +29042 + Toppan + Paolo Toppan + paolo&toppan.it +29043 + Peek&Cloppenburg KG + Jan Strohbehn + sphenic&nurfuerspam.de +29044 + Digital Alert Systems + Thomas Wood + wood&digitalalertsystems.com +29045 + Camp Dresser and McKee, Inc. + Jerrold Weiner + weinerjm&cdm.com +29046 + The Boston Consulting Group, Inc. + Clay Romeiser + ianaoid&bcg.com +29047 + GigaFin Networks + Vatsal Mehta + support&gigafin.com +29048 + Vianet International Ltd + Greg Johnstone + Greg.Johnstone&vianet.travel +29049 + Sichuan Xinhua Winshare Chainstore Co.,Ltd. + benrong tang + tbr&wenxuan.com.cn +29050 + Korcett Holdings, Inc. + Frank Sheiness + frank&korcett.com +29051 + DAIKON Integracion y Desarrollo S.L. + Jorge Gonzalez Villalonga + snmp&daikon.es +29052 + Defzone B.V. + C. Lemaire + info&defzone.com +29053 + Intellio Ltd. + Adorjan Princz + adorjan.princz&intellio.eu +29054 + Bluebell Opticom Limited + Paul Felix McCann + paul&bluebell.tv +29055 + American International Distribution Corporation, Inc. + Blain Sadler + bsadler&aidcvt.com +29056 + logicfish.org + Mark Fisher + logicfish&gmail.com +29057 + Universitaet fuer Bodenkultur, Wien (BOKU) + System Adminstrators + hotline&boku.ac.at +29058 + Neology Corporation + Ron Burdette + ronb&neology-rfid.com +29059 + FAL Solutions + David Gentry + dgentry&falsolutions.com +29060 + Schnapper Vision Studios + John Reynolds + schnapper&free.net.nz +29061 + AFORE Solutions, Inc. + Samuel Effah + seffah&aforesolutions.com +29062 + HEAnet Limited + HEAnet Network Operations Centre + noc&heanet.ie +29063 + Patrick McDonnell + Patrick McDonnell + kc9ddi&arrl.net +29064 + ODDO & CIE + CAMINO Marc Olivier + mocamino&oddo.fr +29065 + vzcs + Dennis van Zuijlekom + iana-assigned-numbers&vzcs.nl +29066 + enovatia + Jens Bliemeister + jens.bliemeister&enovatia.de +29067 + The Loop Communications + Richard Watson + richard&the-loop.co.za +29068 + OpenSource Training Ralf Spenneberg + Ralf Spenneberg + iana&spenneberg.net +29069 + HartmannSoft + Lars Hartmann + lars.hartmann&hartmannsoft.de +29070 + Conversion Co., Ltd. + Naohito Toike + support&conversion.co.jp +29071 + ON DEMAND Microelectronics AG + Robert Schoen + it&odmsemi.com +29072 + Neopost + Sébastien CANTOS + s.cantos&neopost.com +29073 + Western Avionics + Garry Thuna + gthuna&westernAvionics.com +29074 + Epic Aviation, LLC + Carl Lovejoy + clovejoy&airbpaviation.com +29075 + Oncology & Hematology Associates of Southwest Indiana, P.C. + Phil Bolenbaugh + pbolenbaugh&ohaev.com +29076 + CP Sharing + Henk Dick + hdick&pridis.com +29077 + Gentgeens Lodge + Kevin Squire + gentgeen&linuxmail.org +29078 + SynapSense Corporation + Kurt Sowa + ksowa&synapsense.com +29079 + Cryptologic Inc. + Lori Welland + pem.administrator&cryptologic.com +29080 + Kiong Software + Wai Kiong Choy + waikiong.choy&kiongsoftware.com +29081 + ISI.NC + Matthias Bourillon + mb.isi.nc&gmail.com +29082 + L2C2 Technologies + Indranil Das Gupta + indradg&l2c2.co.in +29083 + PKO BP SA + Michał Wersołowski + PKI.powiadomienia&pkobp.pl +29084 + OVH + Octave KLABA + iana-pen&ovh.net +29085 + Conergy AG + Jens Haustedt + j.haustedt&conergy.de +29086 + Bethlehem Lutheran School + Bryan Hoover + InformationTechnology&blcbls.org +29087 + roullier + Christophe Ramon + cramon&roullier.net +29088 + SpinVox Ltd. + Jeremy Spykerman + oid.admin&nuance.com +29089 + Intec Software Solutions + Michael Gargan + michael&intec.ie +29090 + Sendza, Inc. + Ken Adey + kadey&sendza.com +29091 + FriendlySNMP + Friendly SNMP + iana&friendlysnmp.org +29092 + Wirama + Rob Barton + rob.barton&wirama.com +29093 + Stekfon + Alexander Lunyov + sol289&gmail.com +29094 + Aquarius Telecom Technologies + Kan Wang + kwangok&gmail.com +29095 + AeroSat Avionics LLC + Jon Watson + jwatson&aerosat.com +29096 + NewACT Ltd. + Tal Barenboimm, Director of IT + tal.barenboim&newact.com +29097 + TRIGLAV, Zdravstvena zavarovalnica, d.d. + Simon Vidmar + simon.vidmar&zdravstvena.net +29098 + DRM Digital Radio Mondiale + Anne Fechner + projectoffice&drm.org +29099 + Larimart S.p.A. + Marco Stella + marco.stella&larimart.it +29100 + Lithustech Sistemas Eletrônicos LTDA. + Amauri Luis Mocki Junior + amauri&lithus.com.br +29101 + Interstate Gas Supply + Geoff Huffman + ghuffman&igsenergy.com +29102 + Cargill Inc. + Federico Guerrini + federico_guerrini&cargill.com +29103 + Naropa University + Barry Townsend + barry&naropa.edu +29104 + Felix Tiede + Felix Tiede + iana&pc-tiede.de +29105 + Intelleflex Corp. + Heena Nandu + hnandu&intelleflex.com +29106 + Xangati + Xiaohong Pan + xiaohong&xangati.com +29107 + Key Solutions + Pim van Stam + pim&keysolutions.nl +29108 + Enleiten Inc. + Eric Hedberg + ehedberg&enleiten.com +29109 + Hoover Treated Wood Products, Inc. + Randall S. Kelley + rkelley&frtw.com +29110 + Woodside Fabrics Limited + Chris March + chris.march&woodsidefabrics.com +29111 + Beijing Poweron Technologies Co., Ltd. + Alvin Jiang + jiffy&tom.com +29112 + Biblioteca de Catalunya + Ramon Novoa + rnovoa&bnc.cat +29113 + Bank Pekao S.A. + Jan Andrzej Malinowski + wbb&pekao.com.pl +29114 + SecureW2 + Tom Rixom + info&securew2.com +29115 + ECCO Sko A/S + Alex Mærsk + alm&ecco.com +29116 + Instituto Politecnico de Setubal + Renato Delgado + renato_delgado&hotmail.com +29117 + Xembedded, Inc. + John H. Sayer + jsayer&xembedded.com +29118 + BOSSIO SOLUTIONS & SERVICES SL + Juan Domingo Bossio + bossio&hotmail.com +29119 + Joint Stock Company All-Russian Scientific Research Institute of Television and Radio Broadcasting + Andrew Otvetchikov + it&vniitr.ru +29120 + SYSOON SARL + Martin Dano + dano&sysoon.com +29121 + NZN + Emanuelis Norbutas + em&nuel.is +29122 + TCX Computer Technology + Hal Finney + hal.finney&gmail.com +29123 + Jalasutram, Inc. + Murali Jalasutram + mjalasutram&gmail.com +29124 + IMAGENICS Co,Ltd. + Takayuki Kono + kouno&imagenics.co.jp +29125 + Advance Interactive Technologies Pte. Ltd + Ching Kwee Chung + chingkc&ait.sg +29126 + Osterholm & Associates + Eric Osterholm + eric&osterholm.org +29127 + SV Corporation + Vinh NGUYEN + vinh&spatialvoice.com +29128 + Mindwell Technologies + Jason Kohles + admin&mindwell.com +29129 + dg-solutions + Daniel Gschliesser + dg&dg-solutions.at +29130 + Lithuanian Academy of Physical Education + System Administrator + admin&lkka.lt +29131 + PERSONAL_JG + Joyabrata Ghosh + joy.career&gmail.com +29132 + CalNet + M. de Ridder + marnix.de.ridder&rivm.nl +29133 + VoiceCollect GmbH + VoiceCollect + service&voicecollect.de +29134 + SpeedXS + Attilla de Groot + attilla&speedxs.nl +29135 + Salvadè S.r.l. + Marzio Muscionico + marzio&salvade.com +29136 + Toys R Us Corporation + Jeff LoSpinoso + jeff.lospinoso&toysrus.com +29137 + michaelgerzabek.com(R) + Michael Gerzabek + me&michaelgerzabek.com +29138 + TWIC Root + Rex Lovelady + rex.lovelady&dhs.gov +29139 + UNICOM DATA GROUP TECNOLOGIA LTDA + RENATO FERREIRA + josield&flexvision.com.br +29140 + ONEDOC AB + Olle Mårtensson + olle.martensson&gmail.com +29141 + LiveOps + Pete Fritchman + petef&liveops.com +29142 + ViewPlus Technologies, Inc. + Network Administrator + netadmin&viewplus.com +29143 + CableWorld Ltd. + Gabor Kiss + cableworld&cableworld.hu +29144 + Unassigned + Removed 2011-08-16 + ---none--- +29145 + K&P Computer GmbH + Roland Weckwerth + weckwerth&kpc.de +29146 + SwapsWire Ltd + Brian Power + brian.power&swapswire.com +29147 + Proficient Technology + Wes Shaddix + wshaddix&gmail.com +29148 + Axolotl Corp. + Nick Radov + nradov&axolotl.com +29149 + data inform srl + Marco Marocco + marco.marocco&datainform.it +29150 + COMLAB AG + Urs Roesti + urs.roesti&comlab.ch +29151 + Live Data Group, inc. + Szu-Ching Peckner + itstaff&livedatagroup.com +29152 + Palo Alto Unified School District + Isidro Pimentel + ipimentel&pausd.org +29153 + Ridgecrest Financial, Inc. + Reilly Hayes + admin&ridgecrestfinancial.com +29154 + Electromagnetic Technologies Industries Inc. + Peter Cappiello + pete&etiworld.com +29155 + Tews Technologies GmbH + Uwe Tews + utews&tews.com +29156 + Byte Dynamics, SRL + Adrian Ilarion Ciobanu + cia&fatbit.pipe2.net +29157 + Bytemark Hosting + Patrick Cherry + iana&bytemark.co.uk +29158 + Alinea Software Solutions SL + J.C. Ramos + jcramos&alineasol.com +29159 + Nodak Flying Club, Inc. + Thomas Vacek + twvacek&hotmail.com +29160 + L2 Enterprises + Chris Linstruth + cjl&qnet.com +29161 + Hidden-City.NET + Dragan Stanojevic - Nevidljivi + invisible&hidden-city.net +29162 + china lottery online co. ltd. + wang senlin + wangsl&clo.com.cn +29163 + darrow.com + Darrow Cole + iana_01&darrow.com +29164 + EnabledPeople LLC + Sergey Pecherkin + highlander&linux-online.ru +29165 + Antech + Vincenzo Allia + v.allia&antech.it +29166 + NetQoSt + Martin florent + florent&netqost.net +29167 + ISTEC GmbH + Stefan Zobel + stefan.zobel&lycos.de +29168 + T-Mobile International + Stefan Ponge + stefan.ponge&t-mobile.de +29169 + LLC "Astelit" + Valentyn Shapoval + valshµsoft.com +29170 + Interactive Quality Services Inc. + Shan Brickey + sbrickey&iq-services.com +29171 + ESET, spol. s r.o. + Palo Luka + luka&eset.sk +29172 + Bresnan Communications, LLC. + Advanced Services - Systems Engineering + se&bresnan.com +29173 + InSpatial LLC + George Nill + george.nill&inspatial.com +29174 + rPath, Inc. + Brett Adam + sysadmin&rpath.com +29175 + phasma.nl + Remko Christ + trisooma&xs4all.nl +29176 + FIRST RABBIT GmbH + Nils Doormann + nils&first-rabbit.de +29177 + Umlautus + Jakob Saternus + jakob&uuc.se +29178 + California State University, Bakersfield + Russell Jackson + noc&csub.edu +29179 + Aginova Sàrl + Reshad Moussa + info-ch&aginova.com +29180 + Ponsse Oyj + Reijo Hynynen + reijo.hynynen&ponsse.com +29181 + Ecole de techologie superieure + Patrice Dion + Patrice.dion&etsmtl.ca +29182 + AMI dept, Sensus Metering Systems, Inc. + Eric Kidder + eric.kidder&sensus.com +29183 + 6PIXIES International + Philippe Gagnon + heelios&6pixies.com +29184 + Humor Rainbow, Inc + David Cross + dcross&okcupid.com +29185 + Digital Fountain Inc. + Mark Carey + mcarey&digitalfountain.com +29186 + Innovative IT Architects. LLC + Clint Crigger + ccrigger&iitaconsulting.com +29187 + Revelation TV + Dan Donoghue + dan&revelationtv.com +29188 + Wavex Technologies Pte Ltd + Wu Zheng + zheng.wu&wavex-tech.com +29189 + Roadside + Johan Kok + jkok&roadside.nl +29190 + Valyd Technologies Pvt Ltd + Sandeep Nekkanty + sandeepn&valytech.com +29191 + Dynatech s.r.o. + Miroslav Bartánus + bartanus&dynatech.sk +29192 + OpusVL + Stuart Mackintosh + sm&opusvl.com +29193 + Fabrix TV LTD. + Ram Ben-Yakir + ram&fabrix.tv +29194 + Shelby County Board of Education + J.D. Chaves + jdchaves&shelbyed.k12.al.us +29195 + ZeelandNet b.v. + Serge Maandag + serge&maandag.com +29196 + Lattice, L.L.C. + Majdi S. Abbas + msa&latt.net +29197 + Kaazing Corp. + Brian Albers + brian.albers&kaazing.com +29198 + PhR + Philippe Rochat + fake236&hispeed.ch +29199 + TERADA ELECTRIC WORKS CO.,LTD + KUNIYOSHI YOSHINO + kuniyoshi.yoshino&terada-ele.co.jp +29200 + UNETsystem Co., Ltd. + kenneth choi + kenneth&unetsystem.co.kr +29201 + Rawenstvo OJSC + Alexander A. Vasilyev + vaa&rawenstvo.ru +29202 + Hari Sekhon + Hari Sekhon + hpsekhon&googlemail.com +29203 + Grupo Santander + Mariano de la Cruz + seguridad_corporativa&gruposantander.com +29204 + Senex Technologies Inc. + Phil Ciraolo + pciraolo&senex.ca +29205 + NewMedia-NET GmbH - Division dd-wrt + Christian Scheele + chris&dd-wrt.com +29206 + vargo.us + Kevin Vargo + kevin&Vargo.us +29207 + Rescentris, Inc. + Joseph Spitzner + joe.spitzner&rescentris.com +29208 + Zandanel Informationstechnologie + Johann Zandanel + johann&zandanel.net +29209 + Hermann Ehlers Haus + Torsten Irländer + torsten&heh.uni-osnabrueck.de +29210 + KARCHER & LADWEIN Ingenieurpartnerschaft fuer Bautechnik + Thomas Maria Ladwein + ladwein&klib.de +29211 + DiOmega GmbH + Daniel Pfirrmann + info&diomega.de +29212 + HOV Services, LLC + Murat Arslan + murat.arslan&hovservices.com +29213 + ARBU + Armin Burchardt + armin&arbu.eu +29214 + Dairiki Solutions + Geoffrey T. Dairiki + dairiki&dairiki.org +29215 + Eyeball Networks Inc. + Feng Wang + feng&eyeball.com +29216 + Onode Server Project + Olaf Tiemann + olaf.tiemann&onode.de +29217 + Grant Street Group, Inc. + Web Master + webmaster&grantstreet.com +29218 + The ALPHAVICTOR® Companies + Bruce Micek + ceo&alphavictor.com +29219 + Wuille bvba + Pieter Wuille + pieter&wuille.biz +29220 + Ciputra Group + Bayu Krisnawan + krisna&ciputra.ac.id +29221 + Enforta + Denis Kochmashev + d.kochmashev&enforta.com +29222 + Weisser + Christian Weisser + ch.weisser&gmx.at +29223 + Stadtverwaltung Bornheim + Thomas Seck + iana-contact&stadt-bornheim.de +29224 + CoreTrek AS + Aslak Evang + drift&coretrek.no +29225 + Marko Karg + Marko Karg + marko&gutski.de +29226 + BERGERAT MONNOYEUR LOCATION + NGO Su Hong + s.ngo&bm-loc.fr +29227 + Criticall Limited + Stuart Mackintosh + stuartm&criticall.co.uk +29228 + Distributed Medical AB + Björn Lundmark + bjorn&distributedmedical.com +29229 + OutServ, Inc. + Anthony Quinn + aquinn&outservinc.com +29230 + Direction Générale de l'Aviation Civile + Dominique Ruiz + dominique.ruiz&aviation-civile.gouv.fr +29231 + Confédération Française Microtel Multimédia + François Lecluse + zaurusfr&2m01.net +29232 + coreIPM + Gokhan Sozmen + iana-pen&coreipm.com +29233 + MCL, Inc. + Richard P. Earl + rearl&mcl.com +29234 + Titan Publishing Group Limited + Kevin Wooff + john&paperman.net +29235 + Protey + Sergey Skvortsov + skv&protey.ru +29236 + APHP Beclere + Laurent LEPRETRE + laurent.lepretre&abc.aphp.fr +29237 + New Hampshire Employment Security + Bangaru Adabala + bangaru.adabala&nhes.nh.gov +29238 + AdBrite, Inc. + Frank Stutz + frank&adbrite.com +29239 + SYSTEMSKILLS COMPUTER TECHNICAL SERVICES LTD. + Bill Dagg + sysadmin&consealmail.com +29240 + HID Global + Scott Guthery + sguthery&hidcorp.com +29241 + Spry Hosting + Tres Wong-Godfrey + treswg&spry.com +29242 + Peabody Energy + Gregory J. Weatherford + gweatherford&peabodyenergy.com +29243 + Resolve Systems + Thanes Tantiprasut + duke&resolve-systems.com +29244 + Lime Wire LLC + Akshay Kumar + akshay&limewire.com +29245 + Commonwealth of Pennsylvania, Governor's Office of Administration + Frank Morrow + fmorrow&state.pa.us +29246 + KnowledgeTrax, LLC + L van der Feltz + lvanderfeltz&knowledgetrax.com +29247 + Secioss Corporation + Kaoru Sekiguchi + sekiguchi.kaoru&secioss.co.jp +29248 + Wanmi Telecom Technologies Co.,Ltd + danghui he + hedh0089&sina.com +29249 + Revue VTK vzw + Ruben Faelens + revue&vtk.be +29250 + EPM expert + Erdősi, Péter Máté + perdosi&chello.hu +29251 + LNM IIT + parth sarthi + parth2005&gmail.com +29252 + Server System Infrastructure (SSI) + Jim Ryan + JIM.RYAN&INTEL.COM +29253 + KBK Consultant + Kovács, Béla Károly + kovacsbk&t-email.hu +29254 + Pulsar Mobile Sp. z o. o. + Łukasz Pasek + lukasz.pasek&pulsar.com.pl +29255 + HRZ Zittau/Görlitz + Andy Erd + erd02&stud.hs-zigr.de +29256 + HALLESCHE Krankenversicherung auf Gegenseitigkeit + Gert Hinz + gert.hinz&hallesche.de +29257 + Fedasil + Cyrille Bollu + cyrille.bollu&fedasil.be +29258 + Mittelrhein-Verlag GmbH + Thorsten Jungblut + tj&rhein-zeitung.net +29259 + California State University, Chico + David Fuhs + oidowner&mail.csuchico.edu +29260 + Tempest Security Intelligence + Fabio Jun Takada Chino + fabio.jun&tempest.com.br +29261 + emQbit + Nelson Castillo + nelson&emqbit.com +29262 + cybermah.com + Dana Mah + cybermah&hotmail.com +29263 + Max Planck Institute of Biochemistry + H. Lehnert + hlehnert&biochem.mpg.de +29264 + apc interactive solutions ag + Gerald Wallner + gerald.wallner&apcinteractive.net +29265 + KIRP GmbH + Siegfried Viehmann + Siegfried.Viehmann&kirp.de +29266 + xvpn + I. Garbe + iana&xvpn.de +29267 + Teleologic Learning Company + Joseph Lamoree + jlamoree&teleologic.net +29268 + Unitarian Universalist Association + Sean Hogan + shogan&uua.org +29269 + SiteXs Netzwerkloesungen & IT-Consulting GmbH + Hannes Jahn + snmp&sitexs.at +29270 + Rogers West + Jason Rogers + Rogers.JA&rogerswest.com +29271 + OBS Technology + Dave Cole + dcole&obsmail.com +29272 + Biological and Popular Culture, LLC + Allan G. Schrum + aschrum&biopop.com +29273 + GCS Software & Consulting e.U. + gerhard castellitz + gerhard.castellitz&utanet.at +29274 + BusinessFabric Inc + Srinivasa Kunamneni + srini&businessfabric.com +29275 + Autometrix Precision Cutting Systems + Caleb Callaway + caleb&autometrix.com +29276 + Exim Internet Mailer + Bernard Quatermass + bernardq&exim.org +29277 + BookieStreet, Ltd. + Andy Chantrill + andy&bookiestreet.com +29278 + Dima Technologies Co.,Ltd + Sun Le + sunle&sunlecn.net +29279 + WuXi ZhongXing Optoelectronics Technology Co., Ltd. + Xianqin Li + lixianqin&wxzte.com +29280 + Widgets + Paiboon Sriwilaijaroen + sripaiboon&hotmail.com +29281 + Trelleborgs Hamn AB + Tomas Borowiec + tomas.borowiec&port.trelleborg.se +29282 + Azuki Software + Robert Bakic + robert.bakic&gmail.com +29283 + dscnet.org + Stefano Sasso + stone&dscnet.org +29284 + Axxana + Nezer J. Zaidenberg + scipio&axxana.com +29285 + Aquanasoft + Julius Schmidt + aiju&aquanasoft.de +29286 + Imagine Communications + Lior Morad + lior&imagine-com.com +29287 + Finite Communication Inc. + Val Dodge + iana&finite.com +29288 + Altus Networks Inc + Todd Hooper + todd&altusnetworks.com +29289 + Advertising.com + Kevin Stone + kstone&advertising.com +29290 + WeGo Health, Inc + Monte Brown + monte&wegohealth.com +29291 + SECRETARIA DE LA FUNCION PUBLICA + EDDER ESPINOSA ARELLANO + EDDER&FUNCIONPUBLICA.GOB.MX +29292 + Ubiquitous Systems Ltd + Harsh Patel + harsh.patel&ubisys.co.uk +29293 + Royal Roads University + Stephen Beaudry + steve.beaudry&royalroads.ca +29294 + Meshweave + Maxime Curioni + maxime&meshweave.com +29295 + IMESC + Francisco Garcia Gonzalez + francisco.garcia&imesc.eu +29296 + IDX Company, Ltd. + Yasushi Wada + wada&idx.tv +29297 + Shenzhen vStrong technology Co.,Ltd. + Tang Cheng + tangcheng&v-strong.com +29298 + Ador Powertron Ltd. + Kiran Koralkar + khkoralkar&adorpower.com +29299 + Zarovs Stelsels Bk + Frederick Peter Eek + eekf&ieee.org +29300 + SSI Schaefer Peem GmbH + Nikolett Csirmaz + n.csirmaz&ssi-schaefer-peem.com +29301 + Braxtel Communications Inc + John Tarlton + jtarlton&braxtel.com +29302 + eSI Mobile Solutions. S.L.L (GENAKER) + Abraham Iglesias + abraham.iglesias&genaker.net +29303 + Telappliant Ltd. + Harry Roberts + harry&telappliant.com +29304 + Services for Business IT Ruhr GmbH + Marcel Breuer + marcel.breuer&sbi-ruhr.de +29305 + IPFIX Reverse Information Element Private Enterprise + RFC5103 Authors + ipfix-biflow&cert.org +29306 + Useful Networks + Mike Key + mike&useful-networks.com +29307 + Loki Industries + Brad Stockdale + brad.stockdale&gmail.com +29308 + RPGfiction + Richard Homonnai + richard.homonnai&rpgfiction.net +29309 + IBM Mittelstand Systeme + Ardeshir Arian + arianard&de.ibm.com +29310 + William Petersen Elektronik A/S + William Petersen + wp&wpeas.dk +29311 + Itay Be'erli + Itay Be'erli + itay.beerli&gmail.com +29312 + B&R Industrial Automation GmbH + Stefan Stemp + stefan.stemp&br-automation.com +29313 + GloboTech GmbH + Beat Bolzern + info&globotech.ch +29314 + WDV GmbH + Martin Dümig + m.duemig&wdv-gmbh.de +29315 + VAUDE Sport GmbH & Co. KG + Christian Harf + christian.harf&vaude.com +29316 + Astute Networks, Inc. + James Mai + jmai&astutenetworks.com +29317 + Global Technology Inc. + Eddy Kao + ekao&primeworldtw.com +29318 + Doquent Inc + Pawan Kumar + pk&doquent.com +29319 + NTT Communications Corporation + Makoto Harada + makoto.harada&ntt.com +29320 + regio iT aachen gmbh + Dietmar Körfer + dietmar.koerfer®ioit-aachen.de +29321 + IBL Software Engineering, Ltd. + Stefan Wimmer + stefan.wimmer&iblsoft.com +29322 + Cabinplant A/S + Steen Thyrri Sorensen + sts&cabinplant.com +29323 + Lesun Technologies Co.,Ltd + Sun Le + sunle&sunlecn.net +29324 + Voxy Communications + Yuri Arabadji + yuri.arabadji&gmail.com +29325 + Worshipworks Ministries International + Jayson Crisman + jcrisman&cityworship.net +29326 + IBM Rational + Andrew Himmer + ahimmer&us.ibm.com +29327 + barrang + Michael Chesterton + michael.chesterton&gmail.com +29328 + The Stable Group + Jef Brink + jbrink&sigmaiq.com +29329 + Trade Me Ltd + Platform Team - Matt Duguid + matt.duguid&trademe.co.nz +29330 + Cryptic Studios + Bruce Rogers + brogers&crypticstudios.com +29331 + Swan Island Networks, Inc. + Adam Montville + adam.montville&swanisland.net +29332 + Frank Meisschaert + Frank Meisschaert + Frank.Meisschaert&telenet.be +29333 + JBlade LLC + Tom Larkens + pd&jblade.com +29334 + Provider.nl + Jeroen Boonstra + jeroen&provider.nl +29335 + Zajil International Telecom K.C.S.C. + Rami AlZaid + ralzaid&zajil.com +29336 + Bombardier Transportation (Signal) Germany GmbH + Christoph Lüders + christoph.lueders&de.transport.bombardier.com +29337 + Bynari + Trey Tabner + trey&bynari.net +29338 + Kestrel Wireless, Inc. + Steve Davis + steve.davis&kestrelwireless.com +29339 + 3guppies.com + Brian Finney + domainadmin&fishtank.3guppies.com +29340 + joncaves.com + Jon Caves + joncaves&btinternet.com +29341 + Ingenieurbuero Schmid + Ernst Schmid + snmp&ejschmid.de +29342 + Browave Inc. + Herbert Chen + herbert.chen&browave.com +29343 + Grundig SAT-Systems GmbH + Martin Mollek + martin.mollek&gss.tv +29344 + ShangHai HuaBo Taifu Internet Technology Co.,Ltd + wu jian + wuj&huabo.net +29345 + Softing AG + Dirk Palme + support.automation&softing.com +29346 + UKN Group Limited + Tony Reynolds + support&ukngroup.com +29347 + Amadeus Global Travel Ltd. + Ariel Shatil + rel&amadeus.co.il +29348 + RotaBanner Next + Vadim Nesterov + nucleusv&gmail.com +29349 + mediagrids plc + Chris Tingley + ctingley&mediagrids.com +29350 + Remuda Ranch Center for Anorexia & Bulimia Inc. + Mitchell Fisher + mitchell.fisher&remudaranch.com +29351 + Fallibroome High School + Ant Owen + ict&fallibroome.cheshire.sch.uk +29352 + IBEX Technology Co.,Ltd. + Toshikazu Morita + morita&ibextech.jp +29353 + Brewer Science, Inc. + Ron Chinn + rchinn&brewerscience.com +29354 + Health Dialog Services Corporation + Jennifer Kendall + jcrusade&healthdialog.com +29355 + PING e.V. + Daniel Hess + dh&ping.de +29356 + Montville Enterprises LLC + Adam W. Montville + adam&montville-enterprises.com +29357 + Fenster Software + Yaacov Fenster + iana&fenster-software.com +29358 + Zlango + Yaacov Fenster + fenster&zlango.com +29359 + Industrie Dial Face S.p.A. + Massimo Macrì + massimo.macri&idf-hit.com +29360 + AMS Media Sverige AB + Stefan Halen + it-avdelningen&aemedia.com +29361 + Magma + Brian Stark + bstark&magma.com +29362 + Tomizone Limited + Paul Barnetta + paul&tomizone.com +29363 + CHCLab.net + Choong Hong Cheng + hongcheng.choong&gmail.com +29364 + Applied Electro-Magnetics P. Ltd + Ravindra D. Bankar + bankar&aemindia.com +29365 + Penta Investments, a.s. + Robert Maly + maly.robert&penta.sk +29366 + BMI spa + Umberto Buelli + nostalgia&nostalgia.it +29367 + Dreamlab Technologies AG + André Roth + ldap-master&dreamlab.net +29368 + IASC + Wanchin Lin + sales&iasc.com.tw +29369 + Tom Collins L.L.C. + Tom Collins + tc314&hotmail.com +29370 + Daniel Mandler + Daniel Mandler + daniel.mandler&web.de +29371 + CDC Arkhineo + Bruno Dillet + bruno.dillet&cdcarkhineo.fr +29372 + BidiX + BidiX + bidix&bidix.info +29373 + Dillet.net + Bruno Dillet + bruno&dillet.net +29374 + elegiac + Julien Allanos + dju&elegiac.net +29375 + Selekron Microcontrol s.l. + Miguel Angel Gil + mgil&selekron.com +29376 + Trägerverein Bürgernetz Bamberg e.V. + Thorsten Meinl + Thorsten&meinl.bnv-bamberg.de +29377 + Automate The Things, LLC + Jeremy Hahn + mail&jeremyhahn.com +29378 + Unium (formerly 'CoCo Communications Corp.') + Dennis Edwards + dedwards&uniumwifi.com +29379 + Lostar Bilgi Guvenligi A.S. + Murat Lostar + murat&lostar.com +29380 + Beekhul Holdings Pty Ltd + Mark van Kerkwyk + iana-contact&vk.net +29381 + McNeese State University + Wendell Jones + OID.PEN&mcneese.edu +29382 + Dedalus S.p.a. + Olianti Andrea + andrea.olianti&deda.com +29383 + sysview Inc + Tsay Yaw Hann + tyheric&gmail.com +29384 + PD Consulting and Security + David Rawling + djr&pdconsec.net +29385 + ANS + Krzysztof Oledzki + krzysztof.oledzki&ans.pl +29386 + Zetes 3iV CC + Timur Evdokimov + timur.evdokimov&voice.zetes.com +29387 + enovance + Nicolas Marchal + n.marchal&enovance.com +29388 + Activant Solutions + David Mellinger + david.mellinger&activant.com +29389 + LOGIKonline Inc. + Dave Friedel + dave&logikonline.com +29390 + Digital IMS + Jesse Angell + info.tech&digitalims.com +29391 + Strategic Data Pty Ltd + Adam Clarke + support&strategicdata.com.au +29392 + a3 systems GmbH + Gunter Bach + gunter.bach&a3systems.com +29393 + AETA AUDIO SYSTEMS + Serge DE JAHAM + sdejaham&aeta-audio.com +29394 + Centre Hospitalier Régional Universitaire de Lille + Guillaume DERAEDT + g-deraedt&chru-lille.fr +29395 + Pheedo, Inc. + Jay Rossiter + systems&pheedo.com +29396 + VeriSat AS + Petter Chr. Amundsen + pca&verisat.no +29397 + Engineering System Solutions + IT Administrator + admin&es2eng.com +29398 + Workscape + Christopher Apgar + hostmaster&workscape.com +29399 + SIDSA + Carlos Santos + carlos.santos&sidsa.es +29400 + OKC AG + Christoph Spindler + webmaster&okc.ch +29401 + Fullnet, Inc. + John Fullington + fje&fullnet.com +29402 + HELEX + Radis Vassilis + v.radis&helex.gr +29403 + COMWAX + Clementine CREPIN + ccrepin&comwax.com +29404 + Intelligens Rendszerek Divizio Kft. + Mathe Balazs + mathe.balazs&ird.hu +29405 + Unicel do Brasil Telecomunicações LTDA + Farley A. Sousa + snmp&unicelbrasil.net +29406 + Connectbeam + Shinji Yaguma + syaguma&connectbeam.com +29407 + Portal Andres Jadan + Andres Jadan + asjm123&gmail.com +29408 + KEPCO + Moon-suk, Choi + cms96&kepri.re.kr +29409 + Innovation Wireless Inc. + Simon Hsieh + simon.hsieh&iwi.net.tw +29410 + Firesteel Technologies, Inc + Don Schwemle + donschwemle&firesteeltechnologies.com +29411 + Instituto de Tecnologia da Informação e Comunicação do Estado do Espírito Santo - PRODEST + Lívio Zanol Pereira de Souza Puppim + livio.zanol&prodest.es.gov.br +29412 + Prodea Systems + Jason Cotton + jason.cotton&prodeasystems.com +29413 + Access Layers LTD + MIB Admin + cfo2008&accesslayers.com +29414 + Opengate Data Systems + Matthew Bush + matthew.bush&opengatedata.com +29415 + Autentia Real Business Solutions + Alejandro Perez Garcia + alejandropg&autentia.com +29416 + ORBIT TECHNOLOGY GROUP + Eli Menachem + elim&orbit-ltd.co.il +29417 + Felix Schwarz Software-Entwicklung + Felix Schwarz + felix.schwarz&schwarz.eu +29418 + Tracenetwork Corporation Sdn. Bhd. + Mohd Nawawi Mohamad Jamili + nawawi&tracenetworkcorporation.com +29419 + Kapital Insurance Group + Eduard Shevtsov + Eduard.Shevtsov&ifdk-insurance.ru +29420 + West Bengal University of Technology + Prof. Ashoke R. Thakur (Vice Chancellor) + vc&wbut.ac.in +29421 + Kargo Global Inc + Florian Hoenig + fhoenig&kargo.com +29422 + Support Soluções em Informática LTDA + Renan José Schiavo + renan&support.inf.br +29423 + 2V S.r.l. + Damiano Venturin + info&2vsrl.it +29424 + Embryo Web Solutions Ltda + Janser Priori + janser.priori&embryo.com.br +29425 + Materiales Electricos y Mecanicos - MEM Ltda + Christian Pérez + c.perez&digifix.com.co +29426 + State of Mind + Patrick Koetter + patrick.koetter&state-of-mind.de +29427 + EZSAFE + liang ios + gdezsafe&gmail.com +29428 + IKOEmed + Jacob Foshee + jfoshee&ikoemed.com +29429 + SynergyRT + Hansen Chen + hansenphysics&gmail.com +29430 + HMS GmbH + Martin Hecht + mhecht&hms-dev.de +29431 + FaithFamily Academy + Chris Savage + csavage&faithfamilyacademy.org +29432 + Hurco Automation Ltd. + Bentham Yang + bentham&hacontrols.com.tw +29433 + Digma Inc. + Sergey Listopad + psychosensor&gmail.com +29434 + MRK - Media AG + Ulrich Kamke + ulrich.kamke&mrk-media.de +29435 + Wisekey ELA + Wences Basterra + wbasterr&es.wisekey.com +29436 + Central Technology Services + Sam Richards + Sam_Richards&ctsinc.biz +29437 + HNP + Enrique Diaz de la Puente + ediazpuente&telefonica.net +29438 + ARAMEX International Ltd. + Hani Barghout + hani&aramex.com +29439 + Untold Structures P/L + Pronob Pal + pronob&visualanalytics.com.au +29440 + Université Louis Pasteur - UFR de Mathématique et d'Informatique + Christophe BOCCHECIAMPE + cb&dpt-info.u-strasbg.fr +29441 + Quanzhou Normal University + Wu Weibin + abangercn&gmail.com +29442 + ARISE + Marcin Giedz + giedz&arise.pl +29443 + ShangHai RuiGao InformationTechnologies Co.,Ltd + Li Yang + yl.666666&yahoo.com.cn +29444 + Sondara Solucions + Anton Velo + techdept&sondara.com +29445 + Integra S.r.l. + Gabriele Ceranto + gabriele.ceranto&integra-web.it +29446 + Telematik Design + Dipl.-Ing. Klaus Zachmann + klaus.zachmann&telematik-design.at +29447 + Nordic Growth Market NGM AB + Assar Svensson + support&ngm.se +29448 + Gloucester Research Ltd + Imran Chaudhry + imranc&gresearch.co.uk +29449 + Resolvity, Inc + Hao Wu + hao.wu&resolvity.com +29450 + Digistar Telecomunicações S.A. + Odone Afonso Silva de Moraes Junior + engenharia&digistar.com.br +29451 + Interop Technologies + Interop Tech + tech&interoptechnologies.com +29452 + DigitalPersona, Inc. + Oxana Ivanisov + Oxanai&digitalpersona.com +29453 + Brendt Peter + Peter Brendt + pbrendt&aon.at +29454 + O.D.S., Inc. + Allen Brewer, Ph.D. + abrewer&odsinc.us +29455 + CRAHAY.EU + Gaetan Crahay + oidmaster&crahay.eu +29456 + modas mobile Datensysteme GmbH + Thomas Graf + tg&modas.de +29457 + Peer Fusion, Inc. + Richard Levy + richard&peerfusion.com +29458 + ACETEL + Jangmin Jin + jangmin.jin&acetel.co.kr +29459 + Gobierno de Aragón + Sergio Loras + sloras&aragon.es +29460 + Red Bee Media + Terry Downing or Simon Brand + terry.downing&redbeemedia.com +29461 + Authentity - Certificaton Entity + Eng. João Vaz Martins + jmartins&prologica.pt +29462 + Stulz GmbH Klimatechnik + Michael Knecht + knecht&stulz.de +29463 + Axigen Messaging (formerly 'GECAD Technologies') + Ioan Indreias + ioan.indreias&axigen.com +29464 + Inverse groupe conseil + Dominik Gehl + dgehl&inverse.ca +29465 + Aaron Von Gauss + Aaron Von Gauss + iana&avongauss.info +29466 + The Nation Traffic + Martha Greenberg + system&thenationtraffic.com +29467 + Fire Breathing Robot Systems + Steve Neuharth + steve.neuharth&gmail.com +29468 + Fruhen Infotec + Andreas Tietz + oid.iana&fruhen-infotec.de +29469 + Freber Enterprises + Wilfredo D. Aldana + kamandi01&comcast.net +29470 + DAX Technologies Corp + Brion Feinberg + brion&daxtechnologies.com +29471 + VibiT BVBA + Ward Viaene + info&vibit.eu +29472 + Grinning Fool Productions + Marc A. Paradise + gf&khalidine.com +29473 + Humanity Forward + Theodore Kisner + tsk&humanityforward.org +29474 + CHINA NETCOM GROUP BROADBAND SERVICE APPLICATIONS LIMITED CORPORATION FOR NATIONAL ENGINEERING LABORATORY + zhanglunyong + zhanglunyong&cnc-labs.com +29475 + ADILEC S.L. + Joan Pons + jpons&adilec.com +29476 + Viliam Trepák - NETCOM + Viliam Trepák + trepo&azet.sk +29477 + Markas-Al-Nour + Linus Gasser + ineiti&markas-al-nour.org +29478 + University of Chicago, Graduate School of Business + Darren Young + darren.young&chicagogsb.edu +29479 + hc Consulting + Frank Harenberg + frank&harenberg.ch +29480 + Barash Communication Technologies Inc. (BCTI) + Oleg Barzasekov + oleg.barzasekov&mts.tm +29481 + Cambridge Semantics Inc + Simon Martin + simon&cambridgesemantics.com +29482 + Excell Battery Company + Farrah L. Lelond + flelond&excellbattery.com +29483 + Proformatique + Marc Dequenes + mdequenes&proformatique.com +29484 + DevGuy + Terris Linenbach + terris&terris.com +29485 + SMART Technologies ULC + Linda Waldern + lwaldern&smarttech.com +29486 + Roundysoft + Virgil Dimaguila + virgil&roundysoft.com +29487 + Beijing LHWT Microelectronics Inc + ZhiZhou + zhouzhi&lhwt.com +29488 + Jeremy Jones + Jeremy Jones + jjones192&nycap.rr.com +29489 + SlitherSoft + Jeremy Jones + jjones192&nycap.rr.com +29490 + Protech s.a.s. + Andrea Girardi + a.girardi&protechgroup.it +29491 + John Wiley & Sons, Inc. + Simon Finch + saefinch&yahoo.com +29492 + Pischows + Mickael Sundberg + engineering&pischows.se +29493 + Limestone College + Scott Berry + sberry&limestone.edu +29494 + State of Maine Government + Mark Kemmerle + Mark.Kemmerle&maine.gov +29495 + Siteworx, Inc. + Brad Forrester + brad&siteworx.com +29496 + Marc Suttle + Marc Suttle + marc.suttle&gmail.com +29497 + Carbon Planet Pty Ltd + Jesse Reynolds + jesse.reynolds.iana.pen&carbonplanet.com +29498 + Probin + Probin Acharya + probin_np&yahoo.com +29499 + Unassigned + Removed 2007-09-17 + ---none--- +29500 + StorageSwitch + Andy Thomson + andy.thomson&storageswitch.com +29501 + Algolith Inc. + Andrew Lee + andrew.lee&algolith.com +29502 + MDIT Innovations Inc + Slobodan Miskovic + smiskovic&mditinnovations.com +29503 + GeniNetworks + Charlie Kim + kimpd&geninetworks.com +29504 + Biometria ek. för. + Magnus Engberg + magnus.engberg&biometria.se +29505 + Resercom, L.P. + Jeremy Lujan + jlujan&resercom.com +29506 + Telepin Software + Yassin Mohamed + yassino&telepin.com +29507 + InCoDe + Peter Starchl + peter.starchl&gmail.com +29508 + Pfadfinder & Pfadfinderinnenbund Nordlicht e.V. + Jonathan Jäkel + info&pbnl.de +29509 + Naftna Industrija Srbije a.d. Novi Sad + Dragomir Zizakov + dragomir.z&nis-naftagas.co.yu +29510 + ZipTie + Ryan Kruse + rkruse&ziptie.org +29511 + HITACHI Hi-System21 Co.,Ltd. Minami-Team + Kazuya Ishida + ishida&hs21.co.jp +29512 + Integral Sistemas + Alberto Reis + areis&integral.com.br +29513 + MvpZone + Vinesh Prasanna M + admin&mvpzone.net +29514 + NewLines Ltd. + Anton Nikiforov + anton&nikiforov.ru +29515 + Korea Institute of Science and Technology (KIST) + Sangchul Ahn + prime&kist.re.kr +29516 + UUDynamics, Inc. + Yang Haihua + hhyang&uudynamics.com +29517 + AuxalNet + Ninad Shah + ninad&auxalnet.com +29518 + Drew Hess + Drew Hess + dhess-web-iana&bothan.net +29519 + Urząd Zamówień Publicznych + Grzegorz Jan Ber + admin&uzp.gov.pl +29520 + DOMOD + Michael Bouma + info&domod.nl +29521 + Omega Design, s.r.o. + Tomas Mrozek + tomas.mrozek&omegadesign.cz +29522 + Unix Systems Management Pty Ltd + Graeme Elsworthy + graeme&unixsm.com.au +29523 + IE Internet.com Limited + Ken O'Driscoll + iana&ieinternet.com +29524 + Dubai Duty Free + Faizan Billah + faizan.billah&ddf.ae +29525 + Ukrainiain industrial + Ivan Ablamskiy + ivan.ablamskiy&gmail.com +29526 + EBI consulting + BULLIER Erick + iana&ebiconsulting.fr +29527 + Spangenberg Consulting + Kai Spangenberg + spangenberg-consulting&hotmail.com +29528 + TOEL Electronics Ltd. + Lubomir Toshev + office&toel-electronics.com +29529 + CPS Technologies + Chavanel Jerome + chavanel&cps-technologies.fr +29530 + New Medical Co.,LTD. + Bryan Shin + yhshin&newmedical.com.tw +29531 + K3-NETWORKS + Shinichiro Nakase + nakase&k3nw.com +29532 + Pacific West Association of Realtors + Stephen Davis + stephend&pwr.net +29533 + NSSE SSC Charleston + John Buelk Jr + jcbuelk&wareonearth.com +29534 + the hive + Brendan Clougherty + brendan&thehive.com +29535 + BKMKS.COM + Alex Protasenko + iana&bkmks.com +29536 + SKY Perfect JSAT Corporation (formerly 'Space Communications Corporation') + Hiroshi Uchiyama + iana-contact&sptvjsat.com +29537 + Velox + Kaspar Brand + iana-pen&velox.ch +29538 + Celeonet + Florent Monthel + florent.monthel&etu.utc.fr +29539 + Lockview + Paul Taylor + paul&lockview.net +29540 + B-204-1 + Emilio García + e.j.garcia&ieee.org +29541 + City of Roseville - Information Technology Department + Patrick Sullivan + CAAdministrators&roseville.ca.us +29542 + BroadcastWebs LLC + Mike Miller + mike&broadcastwebs.com +29543 + Inqnet GmbH + Michael Renner + renner&inqnet.at +29544 + University of Montenegro + Abazovic Dejan + dejoa&cg.ac.yu +29545 + Open Source Business Group + Stefan Schatz + office&osbg.at +29546 + Soft- & Hardware Beratung Ebeling + Jörg Ebeling + joerg.ebeling&shbe.net +29547 + Thomas Neumann + Thomas Neumann + th.neumann&gmail.com +29548 + 4Com GmbH + Jonas Riedel + riedel&4com.de +29549 + EDS France SAS/D3SP + Eric AZAIS + eric.azais&eds.com +29550 + Efectivos de Tecnicas, S.A. + Jorge Santos + jorge.santos&fedetec.es +29551 + Dust in the net + Olivier Migeot + olivier&migeot.org +29552 + Three Palm Software LLC + Patrick Heffernan + p.heffernan&ieee.org +29553 + Habegger AG + Jan Fischer + jfischer&habegger.ch +29554 + pouik.org + Éric de la Musse + eric&pouik.org +29555 + CMB Consulting + Cameron Brice + cbrice&cmbconsult.com +29556 + TechMind Ltd. + Sagi Woo + sagi&techmind.co.il +29557 + Titus International Inc + Stephane Charbonneau + Stephane.Charbonneau&titus.com +29558 + Titus Labs Inc + Stephane Charbonneau + Stephane.Charbonneau&titus.com +29559 + Late-Hours + Christopher M. Hosmer + Late-Hours&knology.net +29560 + SBE network solutions GmbH + Norman Meilick + nm&sbe.de +29561 + Yuston BV + Alain Schuermans + alain&yuston.com +29562 + Sophelios BVBA + Alain Schuermans + alain&sophelios.com +29563 + MITANI CORPORATION + Haruhiko Gotaishi + gotaishi&mitani-corp.co.jp +29564 + Opensquad SARL + Arnaud Brugnon + arnaud&opensquad.com +29565 + DCMSYS LLC + Dmitriy Tochilnik + dmitri&toch.us +29566 + OH San Juan de Dios. Provincia Betica + Emilio Maneiro + emaneiro&sjd.es +29567 + Central Informatics Organisation + Adlin Hisyamuddin + adlinh&cio.gov.bh +29568 + Integro Technologies Private Limited (Singapore) + System Administrator + admin&integrosys.com +29569 + Senselogic AB + Karl Eklof + karl.eklof&senselogic.se +29570 + Oak Ridge Associated Univerisites + Josh Haynes + Joshua.Haynes&orau.org +29571 + Mirasys + Sami Ovaska + sami.ovaska&mirasys.fi +29572 + Apotex Inc + Paul Tenaglia + ptenaglia&apotex.com +29573 + Laredo IT + Francisco Diderot + ldap&laredoit.net +29574 + PowerCore Engineering + John teBokkel + john&powercoreeng.com +29575 + Alexander Badent (none) + Alexander Badent + abadent&gmail.com +29576 + Kilgour + Chris Kilgour + chris&kilgour.org.uk +29577 + gateProtect AG, Germany + Dieter Balsen + development&gateprotect.de +29578 + STN BV + J.A.M. Oortman + info&stnbv.nl +29579 + 42media group GmbH + Markus Tamm + mt&42mediagroup.com +29580 + Viyya Technologies Inc. + John Bay + jbay&viyya.com +29581 + Axiomatica + Roger Neate + ldap-root&axiomatica.com +29582 + Nativ Systems Ltd + Joe Warren-Meeks + tech&lists.nativ-systems-host.com +29583 + Angelo State University + Jason Brake + security&angelo.edu +29584 + Tom Link + Tom Link + tmlink&cmu.edu +29585 + Comsec TR Pty. Ltd. + Emil Andonov + emil&totalrecallvr.com +29586 + USG People International N.V. + Marc Surinx + msurinx&usgpeople.com +29587 + Zadig Srl + Christian Deligant + cdeligant&zadig.it +29588 + EGOS! The Education Company + Mario Fuchs + mario.fuchs&egos.co.at +29589 + Wanari Kft. + György Kemény + kemeny.gyorgy&wanari.com +29590 + Lileo Scop Sarl + Olivier Cadiou + contact&lileo.fr +29591 + OpenSS7 Corporation + Brian Bidulock + bidulock&openss7.org +29592 + Corporate Big + Kristin Valinsky + kristin&corporatebig.com +29593 + Bolkhuis + Stefan Lenselink + beheer&bolkhuis.nl +29594 + San Jose State University + Network Engiineers + network&sjsu.edu +29595 + Drury Hotels Corporation + Domain Administrator + domain.admin&druryhotels.com +29596 + CMP Audiovisual + Carlos Muñoz + cmp&cmpx.net +29597 + Nextfour Group Ltd + Johan Wessberg + johan.wessberg&nextfour.fi +29598 + NSI Hosting, Inc. + Chuck Tellechea + chuckt&nsihosting.net +29599 + Torokina Networks + Mr John Ahern + john.ahern&torokina.com +29600 + International Islamic University Malaysia + Abu Hurairah Abdul Manaf + hurairah&iiu.edu.my +29601 + Aricent Communication (Holdings) Ltd. + Arpna Gupta + arpna.gupta&aricent.com +29602 + ALTASIS DI CASTIGLIEGO MICHELE + MICHELE CASTIGLIEGO + michele.castigliego&altasis.it +29603 + Kenoxa GmbH + Michael Radtke + tradtke&kenoxa.de +29604 + Diverse Computing, Inc. + Michael Keller + mkeller&diversecomputing.com +29605 + CellGain + Pratik Desai + pdesai&cellgain.com +29606 + Econoprint, Inc. + Jeremy Parrish + jeremyp&msn.econoprint.com +29607 + Reverb Networks, Inc. + Cang T Nguyen + cnguyen&reverbnetworks.com +29608 + pelzel + Dieter Pelzel + dieter&pelzel.at +29609 + Apogee Labs, Inc. + Andy Grebe + andyg&apogeelabs.com +29610 + Secure Process + Robert Mann + robert&secure-process.com +29611 + Clonewars.org + Daniel Schreiner + warmaster&clonewars.org +29612 + Wi-Links + Sam Krainer + sam&wi-links.com +29613 + Ethos-Networks + Vadim Eidlin + vadim.eidlinðos-networks.com +29614 + Aug. Hedinger GmbH & Co. KG + Christos Zisis + czisis&hedinger.de +29615 + 29West, Inc. + David Ameiss + mib-admin&29west.com +29616 + reynald.org + Reynald Poittevin + reynald&poittevin.name +29617 + SyTech + Igor Konnov + ikonv&sytech.ru +29618 + Region Syddanmark + Michael Lykkegaard Laursen + mll&rsyd.dk +29619 + Johannes Oechsle LAN-Party Netzwerkausruester + Johannes Oechsle + service&oechsle-it.de +29620 + Gamesys Ltd + Operations Dep/Systems Administrators Team + sysadmin&gamesys.co.uk +29621 + BIOS Corporation + Seimei Matsumura + matsu&bios.co.jp +29622 + Megafon OJSC + Yury Mikhienko + Yury.Mikhienko&megafonkavkaz.ru +29623 + Banque du Liban + Zeina Aoun + zaoun&bdl.gov.lb +29624 + RIMOC GmbH + Dr. Markus Ruppel + markus.ruppel&rimoc.com +29625 + Sagi Mehadave + Sagi Mehadave + rullevpt&yahoo.com +29626 + TeleDesign PLC + Adam Dring + adam.dring&teledesign.co.uk +29627 + IDpendant GmbH + Clemens Haertling + clemens.haertling&idpendant.com +29628 + n2N Commerce + Bill Goodson + wgoodson&n2ncommerce.com +29629 + Legg Mason, Inc. + Todd Behr + tmbehr&leggmason.com +29630 + ONE-ANS S.p.A + Giovanni Salvia + giovanni.salvia&one-ans.it +29631 + iptego GmbH + Stefan Keller + iana-pen&iptego.de +29632 + Seko Worldwide + Chris Johnson + chris.johnson&sekoworldwide.com +29633 + Peleton Photonic Systems + Carol Tracey + info&peleton.com +29634 + Dom Inwestycyjny BRE Banku S.A. + Paweł Kaszyński + prof.gabka&yahoo.com +29635 + Dealer Information Systems Corporation + Dave Dunkin + webmaster&dis-corp.com +29636 + SecureMedia + Sean Higgins + seanh&securemedia.co.nz +29637 + TrinitySoft Co.,Ltd. + Paek, Jin-Ha + tsse&trinitysoft.co.kr +29638 + NovaTec Kommunikationstechnik GmbH + Wilfried Steffens + wist&novatec.de +29639 + The Electric Sheep Company, Inc. + Judd Maltin + oid-master&electricsheepcompany.com +29640 + AP Nederland BV + Luit Huisman + j.verheul&impactions.eu +29641 + Hostworks Group Ltd + Christian Schulze + christians&hostworks.com.au +29642 + exxact + Denis Lomov + denis.lomov&exxact.com +29643 + Winkwaves + Hans de Graaff + hans&winkwaves.com +29644 + Antony Vennard + Antony Vennard + antony+oid&vennard.ch +29645 + bothan.net + Drew Hess + dhess-web-iana&bothan.net +29646 + VisiWear AS + Nils Erik Dahlen + nils&visiwear.com +29647 + Virtual Broadband Technology, Inc. + Ben Sprachman + ben&vbbtech.com +29648 + QuoPin + Jaewon, Chung + jaewonc&quopin.com +29649 + Infoton Corporation + Ross H. Jaibaji + ross&infoton.com +29650 + Georg Simon GmbH + Stefan Rittmann + stefan.rittmann&simon-entsorgung.de +29651 + vidIP + Xavier VINCENT + xvincent&mediatvcom.com +29652 + Shook Hardy & Bacon LLP + Henry Duong + cpane&shb.com +29653 + Overstock.com Inc + Nate Auwerda + nate&overstock.com +29654 + Freedom Networks + James Jones + james&freedomnet.co.nz +29655 + Kirishi Glass Plant Ltd + Admin + admin&steklo.kirishi.ru +29656 + phaseDOT IT-Business-Solutions + Emmanuel Zaspel + Emmanuel.Zaspel&phaseDOT.net +29657 + VTG Deutschland GmbH + Reinhart Donner + 1111&vtg.com +29658 + Fundacion Integra + Jose Luis Fernández + admin&f-integra.org +29659 + Carestream Health + Andrew Mitchell + andrew.mitchell&carestream.com +29660 + AFCO Systems + Todd Lowe + tlowe&afcosystems.com +29661 + Fulcrum Analytics, Inc. + Elmer Pallarca + epallarca&fulcrm.com +29662 + cron IT GmbH + Remus Lazar + remus&cron-it.de +29663 + Embedded X Inc + Brett Bernath + brett&embeddedx.com +29664 + Packet General Networks, Inc. + Iohannes Plethon + iana&packetgeneral.com +29665 + Protractor Software, Inc. + Lei Xu + lei&protractor.com +29666 + Lunixtreme + Deny C Luchetti + dcraid&gmail.com +29667 + Destiny Solutions + Simon Shi + sq.simon&gmail.com +29668 + Kirei AB + Fredrik Ljunggren + fredrik&kirei.se +29669 + Firstlight Networks + David Lawson + david&firstlightnetworks.com +29670 + Medflow Inc. + Mike Schmidt + mschmidt&oipacs.com +29671 + Meraki Networks, Inc. + Hans Robertson + iana&meraki.net +29672 + CSK WinTechnology Corporation + Tetsuji Iwata + tetsujii&cskwin.com +29673 + M3 Design Inc. + Gray McCord + gmccord&m3designinc.com +29674 + thewolfe.net, llc. + Michael Wolfe + netsol&thewolfe.net +29675 + alemo kommunikations GmbH + Oliver Albers + oliver.albers&alemo.de +29676 + net plus tecnolodgy + Rakesh Giri + rakesh&netplus.com.np +29677 + Sensata Technologies - Power Controls WBL + Michael Maas + mike.maas&airpax.net +29678 + Absolute, Ltd. + Igor Koval + info&atta-absolute.ru +29679 + VoiSmart S.r.l. (formerly 'Espia Srl') + Matteo Brancaleoni + mbrancaleoni&voismart.it +29680 + Atlantux Consultores S.L. + Enrique Zanardi + ezanardi&atlantux.com +29681 + AFPy + Olivier Grisel + ogrisel&afpy.org +29682 + DMO Technologies + David MOSCIPAN + david-iana&dmo-technologies.com +29683 + CopperEye Ltd + Stephen Reed + it_support&coppereye.com +29684 + Pioneer Magnetics Inc. + Param Panesar + ppanesar&pioneermagnetics.com +29685 + eVision Pty Ltd + Scott Caffyn + scott.caffyn&evision.com.au +29686 + Bharat Electronics Ltd. + Charu S. Tripathi + charustripathi&bel.co.in +29687 + Absolute Performance, Inc. + Derek Streeter + dstreeter&absolute-performance.com +29688 + Axstor + Alan Shepherd + oid_admin&axstor.com +29689 + Ekplek + Dameon Wagner + dameonwagner&gmail.com +29690 + Doctor Web, Ltd + Eugeny Gladkih + john&drweb.com +29691 + Silver Wave Technology Limited + George Holmes + gholmes&silverwave.biz +29692 + Globesy, s.r.o. + Zdenko Nečas + admin&globesy.sk +29693 + Cobalt Digital + Kevin Moore + kmjm&cobaltdigital.com +29694 + Tethers Unlimited, Inc. + Tyrel Newton + newton&tethers.com +29695 + REWSS A/S + Brian Wasylowich + brian.wasylowich&rewss.com +29696 + CitiFinancial Consumer Finance India Ltd. + Anil Chopra + anil.chopra&citi.com +29697 + Leeds Primary Care Trust + Information Systems and Delivery + michael.ross&leedspct.nhs.uk +29698 + NBG ID + Adrien VALLET + avallet&nbgid.com +29699 + Cetis d.d. + Bostjan Kolar + bostjan.kolar&cetis.si +29700 + Got Linux, Inc. + Linda Bissum + linda&gotlinux.biz +29701 + PMD GmbH + Mike Waldeyer + mike.waldeyer&pmd-media.de +29702 + Eckard Vossas Unternehmensberatung + Eckard Vossas + info&aims-consulting.de +29703 + Stuart Morgan + Stuart Morgan + stuart.morgan&vulnlabs.net +29704 + Carlos Perello Marin + Carlos Perello Marin + carlos&pemas.net +29705 + SNOLAB + John Roberts + jroberts&snolab.ca +29706 + Mapsolute GmbH + Jens Brey + jens.brey&mapsolute.com +29707 + Business & Engineering Systems Corp + John Cook + jcook&bnesystems.com +29708 + InfoStream Technologies + Abdul Rehman Gani + hostmaster&infostream.co.za +29709 + Argibel Servicios Digitales S.L + Esteban Barón + esteban&argibel.es +29710 + London Borough of Camden + David Nicholls + david.nicholls&camden.gov.uk +29711 + 802automation Ltd + Paul Morgan + paul.morgan&802automation.com +29712 + m-sys EDV-Dienstleistungen + Martin Menhart + mmsp&m-sys.at +29713 + LTech Information Technology Co. LTD. + Wang Jun + wangjun<ech.cc +29714 + Kyocera Communication Systems Co.Ltd + Shinji Higashi + shinji-higashi&kccs.co.jp +29715 + Zavod za zdravstveno zavarovanje Slovenije + Andrej Zlender + andrej.zlender&zzzs.si +29716 + Concilio Networks + Pasi Kovanen + pasi.kovanen&concilionetworks.com +29717 + RealDolmen (formerly 'Dolmen Computer Applications') + Lieven Willems + Lieven.Willems&realdolmen.com +29718 + Technological Educational Institution of Athens (T.E.I. of Athens) + Dimitrios Ninos + proedr&teiath.gr +29719 + MICROPLEX Printware AG + Matthias Krusch + matthias.kruschµplex.de +29720 + xyco technologies AG + Volker Schnabel + volker.schnabel&xyco.eu +29721 + BI-LO, LLC + Patrick Wyer + patrick.wyer&bi-lo.com +29722 + TimeCost + Marson Nicolas + info&timecost.ca +29723 + IMAX Corp. + Tod Beric + tberic&imax.com +29724 + RPC Packaging + Philippe Mees + philippe.mees&rpc-asp.com +29725 + InConcertCC + Fernando Pardo + fpardo&inconcertcc.com +29726 + Rottenbytes + Nicolas Szalay + nico&rottenbytes.info +29727 + John Zornig + John Zornig + jzornig&mac.com +29728 + Planex Technologies S.A. + Diego G. Nagy + dnagy335&hotmail.com +29729 + Logesta Gestion del Transporte S.A. + Antonio Gomez Gonzalez + antonio&logesta.com +29730 + MADEK, Ltd. + Alexey Shchutsky + shchutsky&madek.kiev.ua +29731 + General Dynamics Information Technology + Derek Smeds + derek.smeds&gdit.com +29732 + MTS + Gary Tock + gary.tock&mts.com +29733 + Hypertag Ltd. + Graham Tricker + gtricker&hypertag.com +29734 + Wartsila Corporation + Joachim Kjellman + iana&wartsila.com +29735 + E.T.S.V. Scintilla + Koen Zandberg + sot&scintilla.utwente.nl +29736 + Unified Business Solutions + Thad Smith + thad.smith&unifiedbusiness.com +29737 + Hollywood Center Studios + Systems Administrator + sysadmin&hollywoodcenter.com +29738 + Kerk + Matthias Kerk + matthias&tuxlife.de +29739 + GRETA Sud Normandie + Emmanuel Langlois + emmanuel.langlois&sud-normandie.greta.fr +29740 + MST Systemtechnik AG + Zdenek Sulc + sulc&mst.ch +29741 + Aruba PEC S.p.A. + Andrea Sassetti + andrea.sassetti&ca.arubapec.it +29742 + NetOne S.A. + NetOne Administration Team + sysadmin&netone.gr +29743 + digiremote + Gregor HARTWEGER + office&digiremote.at +29744 + SVP Broadcast Microwave S.L. + Juan Antonio Burgos + svp&svpbm.com +29745 + International Institute for Applied Systems Analysis + Bartosz Kozlowski + oid&iiasa.ac.at +29746 + Satro, s.r.o. + Michal Vančo + vanco&satro.sk +29747 + ExpressThought + Laurence Lu + laurence&expressthought.com +29748 + SophiaSoft + Caleb Chiu + mis&sophiasoft.com +29749 + Matthew R Chase + Matt Chase + matt&mattchase.us +29750 + KTS InfoTech Pvt Ltd + Tom Thomas + tom.thomas&ktsinfotech.com +29751 + Cubico Solutions + Aurelien Duarte + aurelien.duarte&cubico.co.za +29752 + Computer System Laboratory at CMC Faculty, Moscow State University + Nikita Youshchenko + yoush&cs.msu.su +29753 + 3Tera, Inc. + Peter Nickolov + iana-reg&3tera.com +29754 + Karl Blanderer Internetmarketing + Karl Blanderer + info&kabin.biz +29755 + SISTEER + Tarik ABAYAD + tabayad&sisteer.com +29756 + Rochester Community and Technical College + Chris Dobbins + hostmaster&roch.edu +29757 + Oficina de Cooperación Universitaria, S.A. + Servicios Técnicos + servicios.tecnicos&ocu.es +29758 + Polska Grupa Farmaceutyczna + Remigiusz Boguszewicz + remigiusz.boguszewicz&gmail.com +29759 + Engineering Solutions, Inc. + Carl Osborne + c3&engs.com +29760 + CSS Inc. + Thanh Chiem + ThanhChiem&csstel.com +29761 + GoConnect Australia Pty Ltd + Kevin Wong + kevin.wong&goconnect.com.au +29762 + DevMetrix + Damien Stuart + dstuart&devmetrix.com +29763 + ReadyLinks + Milton Johnson + milt_johnson&ready-links.com +29764 + Crypto Team + Duncan S. Wong + duncan&cityu.edu.hk +29765 + Paycor, Inc. + David J Granger + dgranger&paycor.com +29766 + Falcontrol Security GmbH + Philipp Guehring + philipp.guehring&falcontrol.com +29767 + James Hunt + James Hunt + filefrog&gmail.com +29768 + Emporia State University + Cheryl Alvarado + calvarad&emporia.edu +29769 + Accountis + Martin Pegler + martin.pegler&accountis.com +29770 + DIRECT ENERGIE + Florent GILAIN + webmaster&direct-energie.com +29771 + TrioCom Ltd. + Ing.Martin Paska + mpaska&triocom.eu +29772 + Sutter Health + Jonathan Taylor + taylorje&sutterhealth.org +29773 + Genworth Financial + Jeffrey Tchang + Jeffrey.Tchang&genworth.com +29774 + Amobee + Amit On + amit&amobee.com +29775 + WushNet LLC + Michael Ching + michaelc&wush.net +29776 + Checkphone + Benjamin CALAIS + bcs&checkphone.net +29777 + Shaw Industries, Inc. + Ken Martinson + ken.martinson&shawinc.com +29778 + Honeywell Systems Group + Rajesh Gopalakrishna + Rajesh.G&honeywell.com +29779 + Zion Group Limited + Shaun Stockman + shaun.stockman&ziongroup.co.nz +29780 + Secure IT-IS + Phillip O'Donnell + info&itis.co.nz +29781 + XEyedFrog Solutions + Robert Stabler + rob.stabler&gmail.com +29782 + CSK Holdings Corporation + Osa Horii + osa.horii&csk.com +29783 + Brandenburg University of Technology + Thomas Pawell + pawell&tu-cottbus.de +29784 + Turismo Andaluz + Laura Gonzalez García + lgonzalez&andalucia.org +29785 + Ministerio de Agricultura, Pesca y Alimentacion + Rodrigo Diego de Miguel + rdiegode&mapa.es +29786 + Mediterrum SARL + Mr Pekka HEIMONEN + heimonen&mediterrum.com +29787 + T-Systems MMS Dresden + Thomas Friedland + tfri&mms-dresden.de +29788 + GZT Telkom-Telmor Sp. z o.o. + Krzysztof Czajka + iptv&telmor.pl +29789 + Red Lion Controls (BlueTree Wireless Data, Inc.) + Denis Aull + Engineering&RedLion.net +29790 + Yelp Inc. + Sean Plaice + splaice&yelp.com +29791 + Quality Nighthawk + john chen + chen&qualitynighthawk.com +29792 + IPercom + Richard Blanchet + snmp-pen-request.iana.org&rb.ipercom.com +29793 + eNVENT Technologies + Grant Smith + grant.smith&envent-tech.com +29794 + DBD Deutsche Breitband Dienste + Marc Donner + donner&dbd-breitband.de +29795 + CC Computer Consultants GmbH + Michael E. Kromer + michael.kromer&computergmbh.de +29796 + Instituto Mexicano del Seguro Social (IMSS) + Alexis Diez + alexis.diez&imss.gob.mx +29797 + Working Technologies + Jurgen van der Walt + jurgenvdw&wtech.co.za +29798 + Loma Technology Group + David Kierans + dave&lomatech.com +29799 + ex eventu GmbH + Stephan Kröger + reg&exeventu.com +29800 + Zeugma Systems Inc. + Mike Dreves + mdreves&zeugmasystems.com +29801 + DEEPSCAN GR + Spanoudakis Stefanos + s_spanoudakis&freemail.gr +29802 + Comway GmbH + Rupert Weber-Henschel + r.weber&comway.net +29803 + Helsinki Metropolia University of Applied Sciences + Jukka Veikkolainen + iana_contact&metropolia.fi +29804 + Pyroll Oy + Tero Karstikko + tero.karstikko&pyroll.com +29805 + blueocean IT-Services + Dennis Ploeger + info&blueocean-design.de +29806 + Demotera + Pierre Paysant-Le Roux + pplr&free.fr +29807 + Ville de Sevran + Stephane Gatignon + iana-req&sevran.fr +29808 + Nextlead GmbH + Sebastian Roth + sebastian.roth&gmail.com +29809 + Getnet Tecnologia + Alexandro Corrêa + alexandroc&getnet-tecnologia.com.br +29810 + Moving Bytes Communications, Systementwicklung GmbH + Peter Sprenger + sprenger&moving-bytes.de +29811 + Contaduría General de la Nación + Leonardo Amor + lamor&cgn.gub.uy +29812 + Kinetic Avionics Limited + Martin Stevens + martin.stevens&kinetic.co.uk +29813 + Prologica SGPS + Eng. Joao Vaz Martins + jmartins&prologica.pt +29814 + Gandara Center + IT Administrator + itadmin&gandaracenter.org +29815 + Gymnasium Salzgitter-Bad + Mario Lipinski + admin&gymszbad.de +29816 + IRI France + Jean-Paul Chirac + jean-paul.chirac&infores.com +29817 + Gold Mobile + Iana Administrator + iana&gold-mobile.com +29818 + National Financial Partners + Chris Mankowski + root&nfp.com +29819 + Elastiq LLC + Sean Carey + sean&densone.com +29820 + x-Gate + Sven Richter + post&sven-richter.de +29821 + SztupY dot hu + Sztupák Szilárd Zsolt + mail&sztupy.hu +29822 + The Options Clearing Corporation + Scott Mitchell + smitchell&theocc.com +29823 + CallMiner, Inc. + Kim Brown + kim.brown&callminer.com +29824 + Inprint + Ilya Skorik + ilya.skorik&inprint.ru +29825 + NeoAccel, Inc. + Uday Masurekar + uday.masurekar&neoaccel.com +29826 + WiBorne, Inc. + Kevin Cheng + info&wiborne.com +29827 + Qness AB + Peter Mathiasson + iana&qness.eu +29828 + Bellgardt Embedded Systems + Andreas Bellgardt + ldap&andreas-bellgardt.com +29829 + Software Art + Remco Vossen + oid&software-art.nl +29830 + Elektro Slovenija d.o.o + Gregor Novak + gregor.novak&eles.si +29831 + Scuola Superiore S.Anna + Fabio Pagani + fabio.pagani&sssup.it +29832 + Saeil Systems Inc. + JongMin Choi + jmchoi&saeilsystems.com +29833 + GTZ India + chander bahadur thapa + neerajku79&yahoo.com +29834 + PRO-ZETA a.s. + David Cermak + prozeta&prozeta.eu +29835 + bruggmann engineering + Rico Bruggmann + bnshe&gmx.net +29836 + China Internet Network Information Center + Guonian SUN + sun&cnnic.cn +29837 + Erema - Engineering Recycling Maschinen und Anlagen GmbH + Thomas Felberbauer + t.felberbauer&erema.at +29838 + DialogueScience, Inc. + Anatoly Gladkov + gladkov&dialognauka.ru +29839 + Simoco EMEA Ltd (formerly 'Dalman Technical Services Ltd') + John Witchell + john.witchell&simocogroup.com +29840 + llrp.org + John R. Hogerhuis + registrar&llrp.org +29841 + Fat Spaniel Technologies + Ferdy Nagy + ferdy.nagy&fatspaniel.com +29842 + macmon secure gmbh (formerly 'mikado soft gmbH') + Christian Bücker + christian.buecker&macmon.eu +29843 + CargoServer AG (formerly 'ncode gmbh') + Oli Kessler + info&cargoserver.ch +29844 + Centile SA + Aymeric Renou + centilesnmp¢ile.com +29845 + VCC Perfect Pictures AG + Frank Schliefer + sysadmin&vcc.de +29846 + Sparkroom, Inc. + David DeBruin + ddebruin&sparkroom.com +29847 + Nav N Go Kft + Ivan Szkiba + ivan.szkiba&navngo.com +29848 + Far Eastern State University + Anton Yu. Umnikov + umnikov&uic.dvgu.ru +29849 + KSB AG + Wilfried Kipry + wilfried.kipry&ksb.com +29850 + Netline Communications Technologies (NCT) Ltd. + Avi Asher + avi&netline.co.il +29851 + Nuclear-Whales Informatikai Bt. + Zsolt Parragi + dutow&nwi.hu +29852 + Gryphon Networks + Ross Kramer + rkramer&gryphonnetworks.com +29853 + eCare4all + Segis Ferrairó Pons + segis.fp&gmail.com +29854 + OnePacs LLC + OnePacs LLC + info&onepacs.com +29855 + T-Mobile Crna Gora d.o.o. + Dejan Stijovic + dejan.stijovic&telekom-cg.com +29856 + anapol + Petr Mifek + iana-contact&anapol.cz +29857 + Ready Set Companies + John Duncan + johnduncan&readysetcompanies.com +29858 + FCA US LLC (formerly 'Chrysler LLC') + Tony Veach + tony.veach&fcagroup.com +29859 + KJC Systems Ltd Co + Kevin Coyle + kevin.coyle&kjc.com +29860 + Octopus Technologies Ltd + Stephen Beynon + stephen&octopus-technologies.com +29861 + N-iX LLC + Ivan Pesin + sp_ipesin&n-ix.com.ua +29862 + NANCY s.r.o + Boris Pisarcik + boris&opensolaris.sk +29863 + Software Logic, Inc. + Scott Stumpf + scott.stumpf&softwarelogic.com +29864 + Typeria + Dominik Lisiak + admin&typeria.net +29865 + Icotera A/S + Kim Esben Jørgensen + kej&icotera.com +29866 + Method Analysis Limited + Michael Wright + mjw&methodanalysis.com +29867 + Skanska + Joachim Abrahmsen + joachim.abrahmsen&skanska.se +29868 + Business Security + Roger Eriksson + roger.eriksson&businessecurity.com +29869 + UniNet + Enrico Zanolin + enrico&uninet.co.za +29870 + EUROCOM Satellite Systems AS + Kjell Danielsen + Kjell.Danielsen&EUROCOM.no +29871 + College of Southern Maryland + David marek + davem&csmd.edu +29872 + Project Rialto + Kelvin Edmison + kelvin&projectrialto.com +29873 + OutProtect + Jim Niemann + jniemann&outprotect.com +29874 + TEL2WEB GmbH + Michael Friese + m.friese&tel2web.com +29875 + sorcellerie + Takashi Yajima + k-kukutachi&sorcellerie.jp +29876 + Antwoord voor Bedrijven (ICTU) + Johnander Jansen + johnander.jansen&ictu.nl +29877 + Allnight Computing + Michael Peck + allnight4&yahoo.com +29878 + Reduce My Fee, LLC + Robert Zuber + rob&adperk.com +29879 + Pittman Holdings + James Pittman + jamespittman&gmail.com +29880 + Schelde Naval Shipbuilding + Lennard van Vugt + Lennard.vanvugt&schelde.com +29881 + Brovic ltd. China + Bryan Su + sinceresu120&hotmail.com +29882 + Lambda Networks Ltd + Lutz Hamann + l.hamann&lambdaltd.net +29883 + Telemune Software Solutions Ltd + Rajesh Bansal + rajesh.bansal&telemune.net +29884 + MadaSoft + Martin Dahling + martin.dahling&madasoft.no +29885 + Haute Ecole Specialisee de Suisse occidentale (HES-SO) + Gremaud Marcel + marcel.gremaud&hefr.ch +29886 + beroNet GmbH + Mr. Guersel Kuecuek + info&beroNet.com +29887 + Dirección General del Territorio Marítimo + Enrique Maldonado + emaldonado&directemar.cl +29888 + SpinetiX S.A. + Diego Santa Cruz + Diego.SantaCruz&spinetix.com +29889 + Utilia + Miroslav Ther + mt&utilia.info +29890 + Work Microwave GmbH + Dr. Gerhard Mocker + support&work-gmbh.de +29891 + Bluelon + Thomas Bonde + tbo&bluelon.com +29892 + Genesilico + Tomasz Jarzynka + tomee&genesilico.pl +29893 + Jeroen Koekkoek + Jeroen Koekkoek + jtm.koekkoek&home.nl +29894 + FrenchCries! + Frank Lesniak + franklesniak&hotmail.com +29895 + Fam List Schlattstall + Christoph List + hostmaster&list-schlattstall.de +29896 + VdH Soft + Danny Van den Hende + danny&vdhsoft.be +29897 + QSent + Chris Nigh + chris.nigh&qsent.com +29898 + Acropolis Software SPRL + Philippe Detournay + philippe.detournay&acrosoft.be +29899 + Roosevelt University + Timothy Hopkins + thopkins&roosevelt.edu +29900 + EDEKA Aktiengesellschaft + Dirk Thomsen + dirk.thomsen&edeka.de +29901 + Anaheim Engineering Co., LTD. + Reiki Hattori + hattori&anaheim-eng.com +29902 + Dytecna Limited + Iain Woolley + iain.woolley&syen.co.uk +29903 + Counsel's Chambers Limited + Michael Wright + support&counselschambers.com.au +29904 + abacon products GmbH + Carina Adler + carina.adler&abacon.org +29905 + Perrit + Jeroen Koekkoek + j.koekkoek&perrit.nl +29906 + Dybala + Krzysztof Dybala + krzych&stud.ics.p.lodz.pl +29907 + Nortal AS + Erik Matt + erik.matt&nortal.com +29908 + IndigoVision Ltd + Keith Manning + k.manning&indigovision.com +29909 + Cicero Networks Limited + Lubos Zisko + Lubos.Zisko&ciceronetworks.com +29910 + Open Systems Management Ltd. + Mr. C. A. Hunt + oid&osm.co.uk +29911 + Open-Tec + Robert Houghton + Robert.Houghton&Open-Tec.com +29912 + Hx Technologies Inc. + Joe Murray + joe.murray&hxti.com +29913 + Intelligent Transportation Video + Daniel Boyd + dan&intelligenttransportationvideo.com +29914 + Networkconcepts BV + John de Graaff + iana.j&networkconcepts.nl +29915 + Northwood Logic + David Rush + rushd&uwec.edu +29916 + Lightworks Technology Inc. + Jinbong Ko + jinbong&lightworks.co.kr +29917 + Guangzhou New Postcom Equipment Co.,Ltd. + Weihe Zhang + zhangweihe&newpostcom.com.cn +29918 + Uralsvjazinform Inc. + Kurenskij Evgenij + evgenij.kurenskij&samotlor.info +29919 + CJSC Orbita + Igor V. Alekseev + ca&orbitacom.ru +29920 + mySoftIT GmbH + Sascha Bitzer + info&mysoftit.de +29921 + FV Service KG + Christof Schroeder + iana.registry&freudenberg-nw.com +29922 + Starmark Trading Ltd. + Bernard Magny + bmagny&starmark.biz +29923 + Netris + Igor Perin + i.perin&netris.ru +29924 + GTD Sistemas de Informacion + Miguel A. Vilarino + miguel.vilarino>d.es +29925 + Multiplex Ltd. + Ivan Ivanov + i.ivanov&multiplex-bg.com +29926 + Santa Rosa S.A. + Luis C. Benitez A. + lbenitez&tocorre.com +29927 + LOHR + Bruno Barge + bruno.barge&lohr.fr +29928 + RelayHealth + Marty Smith + Marty.Smith&relayhealth.com +29929 + United States Steel Corporation + Jack Parkin + jparkin&uss.com +29930 + Koinema srl + Matteo La Rosa + mlarosa&koinema.com +29931 + Cambridge Imaging Systems + Simon Booth + simon&cambridgeimaging.co.uk +29932 + SOUND4 + Camille Gonnet + camille&sound4.biz +29933 + APOAL Inc. + Franck Zoccolo + admin&apoal.com +29934 + Hypermedia Systems Inc. (USA) + Alexi Papaleonardos + alexi.papaleonardos-iana&hypermediasystems.com +29935 + ABC Startsiden AS + Andreas Marienborg + andreas&startsiden.no +29936 + x15 + Pete Slagle + iana-pen-admin&x15.net +29937 + ellis stewart design + joseph ellis stewart + joseph.stewart&gmail.com +29938 + The Swedish Coast Guard + Bengt Nilsson + bengt.nilsson&combitech.se +29939 + Alpha Red, Inc + LR Mack McBride + network&alphared.com +29940 + Kawhai + Network Admin + info&kawhai.net +29941 + Pavlov Media, Inc + Jeremy Jones + jjones&pavlovmedia.com +29942 + Himalaya Technologies + Suresh Ramasamy + sureshdr&gmail.com +29943 + Shelton | Johns Technology Group + Aubrey Wells + aubrey&sheltonjohns.com +29944 + COARA Inc. + Yas Higashino + yas_h&elf.coara.or.jp +29945 + Chinauninet + Mang Lee + lim&lianmeng.com +29946 + Media-Saturn IT Services GmbH + Arthur Drymala + drymala&media-saturn.com +29947 + CNW Consulting Network AB + Per Dalén + per.dalen&cnw.se +29948 + Consell de Mallorca + Antoni Barcelo + abarcelo&conselldemallorca.net +29949 + Cassiopea OnLine srl + Daniele Pizzurro + daniele&cassiopeaonline.net +29950 + GMIT GmbH + Stephan Skrodzki + info&gmit-gmbh.de +29951 + DCom, spol. s r.o. + Rudolf Touzin + touzin&dcom.cz +29952 + JSC "Alliance Group Holding" + Aleksandre Bezhitashvili + hostmaster&agh.ge +29953 + deburit chemical engineering GmbH + Björn-Arne Meyn + info&deburit.de +29954 + Health Identity Management Consortium - HIMC + Seonho Kim + seonho.kim&mednet.org +29955 + Medina VoIP + Sean Harlow + sharlow&medinavoip.com +29956 + FreeWave Technologies + Jordan Fuerst + jordan&freewave.com +29957 + PCB Piezotronics Inc. + Joseph Van Slycke + jvanslycke&pcb.com +29958 + Washtenaw Community College + Rex Roof + rex&wccnet.edu +29959 + KolejNet + Pavel Kislinger + pavel.kislinger&gmail.com +29960 + specurio studio + Masamichi Decimal + deshi&specurio-studio.com +29961 + AServer NV + Henk BAERT + henk.baert&aserver.com +29962 + Tag Systems S.A. + Jukka Kangas + pen-admin&tagsystems.fi +29963 + Iron Council + Keisi Sciuridae + keisis&ironcouncil.net +29964 + Tribunal de Contas dos Municipios do Estado do Pará + Sérgio Cunha + sergio.cunha&tcm.pa.gov.br +29965 + Shinymark Enterprises Limited + Aaron Ho + aaron.ho&msn.com +29966 + Advanced Concept Technology Ltd. + Michael Tang + michael.tang&aconcept.info +29967 + B&R Data Systems + Bob Lynch + rwl&brenclosures.com.au +29968 + Universidade Estadual de Campinas (Unicamp) + Unicamp NOC + noc&unicamp.br +29969 + MORISHIMA.NET + Naoto Morishima + root&morishima.net +29970 + Zervina Technologies LLC + Rich Vaughn + rvaughn&zervina.com +29971 + Damogran Apathetic Solutions + Justin A. McCright + jam&damogran.org +29972 + compico + Juergen Spitzner-von der Haar + jspitzner&novell.com +29973 + GemEx Systems, Inc. + Ben Wong + benw&gemex.com +29974 + E-WEB INFORMATION Co. + Ivan Chang + ivan&e-web.com.tw +29975 + Guangzhou Great Symbol Information Technology Co.,Ltd. + Feixian Xie + sunny_mr_xie&live.cn +29976 + Nara Institute of Science and Technology + Takahiro Tsujii + tsujii&itc.naist.jp +29977 + Palm Tree Technology, Inc. + Lance Reck + lance&palmtreetechnology.com +29978 + Gotland University + Tord Berglund + tord.berglund&hgo.se +29979 + Bruker AXS GmbH + OIDMaster + OIDMaster&bruker-axs.de +29980 + Smart Com d.o.o. + Arso Savanovic + arso.savanovic&smart-com.si +29981 + Marben Products + Philippe CUER + philippe.cuer&marben-products.com +29982 + Massive Entertainment + Erik Karlsson + erikk&massive.se +29983 + Waves Audio Ltd. + Rotem Tzuk + rotemt&waves.com +29984 + Byelorussian State University of Informatics and Radioelectronics (BSUIR) + Network Department + admin&bsuir.by +29985 + DETRACOM SAS + FREZOULS Jean-Michel + jm.frezouls&detracom.fr +29986 + NET STREAMS + Valery BAUDOIN + vbaudoin&net-streams.fr +29987 + Hivemind Systems, Inc. + Clint Miller + cmiller&hivemindsystems.com +29988 + Calabrio, Inc. + Alvin Wong + alvin.wong&calabrio.com +29989 + Qwaq Inc. + Andreas Raab + andreas.raab&qwaq.com +29990 + John Smith Network Management + John Smith + snmp.stack&yahoo.co.uk +29991 + Macif + Dominique Caillaud + dcaillaud&macif.fr +29992 + PONT Technology Co.,Ltd + Hai Liu + lh&pont.cn +29993 + Net Marvellous + Koji Takemura + takemura&netmars.mobi +29994 + CONCORD GmbH + Uwe Böhme + boehmeu&concord.de +29995 + FUSAGx + Pierre Ramlot + sig&fsagx.ac.be +29996 + The Lorentz Group + Jack McKinney + nsadmin&lorentz.com +29997 + LavaNet, Inc. + Julian Cowley + system&lava.net +29998 + Kurashiki Cable Television Co.,Ltd. + YAMAKAWA Toshihiro + service&kct.co.jp +29999 + microC Design SRL + Lucian Lazarescu + l.lazarescuµc.ro +30000 + University of Western Sydney + Robert Hazeltine + r.hazeltine&uws.edu.au +30001 + BROWAN COMMUNICATIONS INCORPORATION + Gordon Chang + gordon.chang&browan.com +30002 + FREEPP INCORPORATION + Gordon Chang + gordon.chang&freepp.com +30003 + Hiflex GmbH + Rico Leenarts + rl&hiflex.com +30004 + TerraNua Ltd + Brian Matthews + brian.matthews&terranua.com +30005 + Raybit Systems korea, Inc + john choi + autochoi&raybitsystems.co.kr +30006 + Mobile Fun + André Dal Col + ti&mfun.com.br +30007 + DOB.SK + Samuel Behan + pen-admin&mail.dob.sk +30008 + Bank Austria Real Invest GmbH + Mag. (FH) Sonja Koschina + sonja.koschina&realinvest.at +30009 + Information Industry Company + Alex Dobrynin + adobrynin&informind.ru +30010 + Embotics Corporation + Jean-Marc Seguin + jmseguin&embotics.com +30011 + Itochu Cable System Corp. + Masaaki Umeura + cmts&itochu-cable.co.jp +30012 + ipoque GmbH + Daniel Breest + daniel.breest&ipoque.com +30013 + Den selvejende institution Paul Bargsøe Kollegiet + Søren Bøg + nu&pbk.dk +30014 + Pecsi Tudomanyegyetem Klinikai Kozpont + Robert Pottendi + pen&kk.pte.hu +30015 + KBC Bank + Michael Van Gucht + michael.vangucht&kbc.be +30016 + Anel-Elektronik AG + Andrzej Nieduzak + info&anel-elektronik.de +30017 + TAG Aviation + Fred D. Aldana + faldana&amicharter.com +30018 + FusionIO + Michael Zappe + zapman&fusionio.com +30019 + Diversified Sales and Service, Inc. + Ryan Faircloth + ryan&dss-i.com +30020 + Silesian University of Technology + Adam Osuchowski + adam.osuchowski&polsl.pl +30021 + Mühlbauer AG + Auburger Harald + harald.auburger&muehlbauer.de +30022 + Guangdong Cirrus Sci-tech Development Co., Ltd + jie yao + goon862&163.com +30023 + ITS-Telecom + shlomo Tzidkani + shlomot&its-tel.com +30024 + In-Tuition Networks Ltd + Matthew Francis + mf&in-tuition.net +30025 + Markus Widmer + Markus Widmer + info&markus-widmer.de +30026 + Blue Caravel + Joao Violante + joao.violante&bluecaravel.com +30027 + Brainfat Networking + Michael Graves + admin&brainfat.net +30028 + Nova CATV, Inc. + Philip de Vries + philipd&novacatv.com +30029 + NENTRON Technology + HuangYongYao + HuangYongYao&hotmail.com +30030 + jamesoff.net + James Seward + james-iana&jamesoff.net +30031 + Penguinlabs LTD + Laban Mwangi + lmwangi&penguinlabs.co.ke +30032 + Beijing Xintong Huaan Technologies Co. Ltd + Ning Mo + ning.mo&gmail.com +30033 + Atabyte + Jared Bartimus + Jarediana&theaog.com +30034 + IIJ Engineering,Inc. (formerly 'Net Care,Inc.') + Naohiko Sugiyama + oid&iij-engineering.co.jp +30035 + Formosa Wireless Systems Corp. + Joy Cheng + joy&tw-wireless.com +30036 + Nomad Digital Ltd + Paul Herring + paul.herring&nomadrail.com +30037 + Company Extrim pro, Ltd. + Maxim A. Serebrov + serebrov&xrm.ru +30038 + ALMDUDLER-LIMONADE A. & S. Klein GmbH & Co KG + Stefan Neumann + postmaster&almdudler.com +30039 + Topdog-software.com + Andrew Colin Kissa + andrew&topdog-software.com +30040 + Tartu University Hospital + Kati Korm + kati.korm&kliinikum.ee +30041 + Universität Augsburg + Maria Schmaus + maria.schmaus&its.uni-augsburg.de +30042 + Dynaco Oy + Jussi Peltola + pelzi&pelzi.net +30043 + Coral Automação Municipal + Ricardo Bueno + ricardo&coral.srv.br +30044 + Walter Dorwin Teague Associates, Inc. + Daniel Chin + oid.admin&teague.com +30045 + davidfuchs.ca + David Fuchs + david&davidfuchs.ca +30046 + rh-tec Business GmbH + Sebastian Abt + sa&rh-tec.de +30047 + WorthTEK + Jeff Wilkinson + iana-pen&worthtek.com +30048 + BNC Technologies Co.,Ltd. + Lijie Jiang + jlj&bnc.com.cn +30049 + Acorn Active Media Foundation + Joshua King + josh&acornactivemedia.com +30050 + Jushri Technologies, Inc + Zhao Yu + yu.zhao&jushri.com +30051 + EDICOM (INTERCAMBIO ELECTRONICO DE DATOS Y COM SL) + JOSE VILATA + jvilata&edicom.es +30052 + Agora SA + Slawomir Mateuszczyk + slaw&gazeta.pl +30053 + MDoffice Inc + Darshak Shah + darshak&mdoffice.com +30054 + Untangle Inc. + John D. Irwin + jdi&untangle.com +30055 + BKK Essanelle + Daniel Schindler + daniel.schindler&bkk-essanelle.de +30056 + Bando Electronic Communication Co., Ltd. + Yoowon Kim + ywkim&bandoec.com +30057 + AIO Systems + Yossi Appleboum + yossia&web-silicon.com +30058 + otaking.se + Tobias Franzén + pen&otaking.se +30059 + Alarislabs + Ilya Ustinov + neon&alarislabs.com +30060 + sevenstax + Tolga Tuncay + netadmin&sevenstax.com +30061 + Imprimerie Nationale + Martin LAFON + martin.lafon&saqqarah-international.fr +30062 + CRIX International + Terry Zagar + terry.zagar&ngc.com +30063 + Kantonsspital Luzern + Stefan Muri + stefan.muri&ksl.ch +30064 + Jens Kaiser + Jens Kaiser + jens&kaiser-von-kleve.de +30065 + Arista Networks, Inc. (formerly 'Arastra, Inc.') + Bill Fenner + fenner&aristanetworks.com +30066 + Swissdotnet + Steve Jacot-Guillarmod + steve.jacot-guillarmod&swissdotnet.ch +30067 + Imprint Global Pty Ltd + Bruce Haefele + bruce.haefele&imprintglobal.com +30068 + Walker Chandiok Grant Thornton + Benoy George + bge>-india.com +30069 + Candlelight Software + Jarvis Cochrane + jarvis&cochrane.com.au +30070 + Lufthansa Systems Indonesia + Ferdinand Neman + ferdinand.neman&lhsystems.co.id +30071 + Colby Dillion + Colby Dillion + colby.dillion&gmail.com +30072 + The Government of Brunei Darussalam + Mr. Foo Jong Ai + noc&netrust.net +30073 + WYS S.O.C Corporation. + SAM Wu + sam_wu&wys.com.tw +30074 + Beijing Sharenet Information Technology Co.,Ltd. + Liu Yongxin + lyx_sd&163.com +30075 + Humanomed + Antonio Batovanja + Antonio.Batovanja&humanomed.co.at +30076 + ATILF - Analyse et Traitement Automatique de la Langue Francaise + Zenaida TUCSNAK + zina.tucsnak&atilf.fr +30077 + clanconley.com + Ron Conley + ronconley&yahoo.com +30078 + Novella SatComs Limited + Ventura Rufino + vtrufino&novella.co.uk +30079 + Open Mobile Alliance (OMA) + Dwight Smith + dwight.smith&motorola.com +30080 + Fraport AG + Dr. Karsten Weronek + k.weronek&fraport.de +30081 + Philip Morris USA + David Brown + David.C.Brown&pmusa.com +30082 + David Computer Service + David Liang + winstonliang&hotmail.com +30083 + Intelleca + Daniel Rothmann + danier&intelleca.co.za +30084 + 1dian.com + Lixin Zhou + zhou.lixin&gmail.com +30085 + Knopad Software SRL + Dragos Novac + drnovac&gmail.com +30086 + Arinbe Technologies, Inc. + Jim Horner + jhorner&arinbe.com +30087 + NEU-INFO + Jun Chen + rainbow_jun&163.com +30088 + Wuhan Gewei Electronic Technology Co., Ltd. + Yuqiang Yao + yaoyuqiang&gemwei.com +30089 + Ak Kamal ISCC LLP + Pavel Karabidi + pkarabidi&akkamal.kz +30090 + shaikh + adil shaikh + adil.shaikh&gmail.com +30091 + SeaBright Insurance Company + Jeff Wilkinson + iana-pen&sbic.com +30092 + Zavarovalnica Maribor d.d. + Uros Kositer + uros.kositer&zav-mb.si +30093 + GEMAC Gesellschaft für Mikroelektronikanwendung Chemnitz mbH + Peter Laux + laux&gemac-chemnitz.de +30094 + Beijing US-Sino Diverse Telecom Equipment Co.,Ltd. + Tony Guan + guanxu&diverse-telecom.com +30095 + Kom-Pakt + Tomasz Gaska + tomasz.gaska&kompakt.pl +30096 + dataway GmbH + Anthony Uk + hostmaster&dataway.ch +30097 + Familie Dieter Baerwolf + Stephan Baerwolf + matrixstorm&gmx.de +30098 + BBP AG + Ian Wilcox + sysadmin&bbp.ch +30099 + MorphoTrust USA (formerly 'L-1 Identity Solutioins, Inc.') + Eric Osterberg + eosterberg&morphotrust.com +30100 + greenBytes Inc. + Paul Zuchowski + psz&getgreenbytes.com +30101 + SUNIX Co., Ltd. + Vincent Thai + double_thai&sunix.com.tw +30102 + team Communication Technology Management GmbH + Philip Kamenarsky + philip.kamenarsky&te-am.net +30103 + One Reel + Jeramey Crawford + jeramey&onereel.org +30104 + ERR + Alar Sing + alar.sing&err.ee +30105 + JARS OG + Johannes Simon + office&jars.at +30106 + Board of European Students of Technology + Matko Matic + itd-management&best-eu.org +30107 + SEACON TERMINALS LTD + T CLARKE + TIM&SEACON.CO.UK +30108 + Xaragua® Enterprise Corporation + John Adolphe + info&xaragua.com +30109 + CompIT SRL + Nazzareno Prinzivalli + developers&compit.it +30110 + MDEV Desenvolvimento de Software para Celular Ltda + Ricardo Walter Hildebrand + ricardo.walter&mdev.com.br +30111 + Pacific NETwork Imaging, Inc. + Mike Watters + mike-iana&nmmg.net +30112 + Network Solutions Private Limited + Venkatesan Sivasubramani + venkatesan.s&netsol.co.in +30113 + Fornova + Hai Zaar + haizaar&gmail.com +30114 + Reserved + Removed 2013-05-02 + ---none--- +30115 + Damien Degois + Damien Degois + damien°ois.info +30116 + Jims Group + Andy Zaugg + andy.zaugg&jims.net +30117 + Telefield, Inc. + Kim, Tae-Woo + twkim&telefield.com +30118 + Hostdime.com, INC + Ray Fernandez + iana&hostdime.com +30119 + UNION FENOSA, S.A. + MANUEL MARTINEZ GARCIA + mmartinezgarcia&unionfenosa.es +30120 + Nayatek S.L. + Chris Jolly + cjolly&nayatek.com +30121 + scoyo + Lukas Loesche + lukas.loesche&bertelsmann.de +30122 + MdbNet + Martin de Bruin + mdbnet&telkomsa.net +30123 + MaResCom GmbH + Steffen Opel + iana-contact&marescom.net +30124 + ABAJAX + Tyrel Newton + tyrel.newton&fatcatter.com +30125 + ANDURAS AG + Sven Anders + anders&anduras.de +30126 + LSI JAPAN CO.,LTD. + Kaoru Kusachi + tika&lsi-j.co.jp +30127 + Deft Labs Inc. + Ryan Nitz + contact&deftlabs.com +30128 + Neovest, Inc. + Aaron Heber + pen&neovest.com +30129 + Trask solutions + Svoboda Tomas + tsvoboda&trask.cz +30130 + Hyperwave GmbH + Bernhard Heidegger + pen&hyperwave.com +30131 + OMA + Fugaku OMA + oid&oma.jp +30132 + ANAC + Emanuel livramento + emanuel.livramento&anac.cv +30133 + iControl Networks + Chris DeCenzo + snmpadmin&icontrol.com +30134 + Bitscape + Axel Hinrichs + iana&bitscape.de +30135 + e-mehlbox.eu + Holger Librenz + hostmaster&e-mehlbox.eu +30136 + Sharif University of Technology + Mohsen Moeeni + moeeni&sharif.edu +30137 + SHANDONG TRANSPARENT COMMUNICATIONS CO,LTD + Duan Donghua + duan_6868&sina.com +30138 + Sparkassen Informatik GmbH & Co. KG + Klaus-Dieter Joos + Klaus-Dieter.Joos&sparkassen-informatik.de +30139 + AMS Engineering Sticht Gmbh + Schossleitner Robert + robert.schossleitner&ams-engineering.com +30140 + Advantech Czech s.r.o. + Petr Gotthard + petr.gotthard&advantech.cz +30141 + Landesbetrieb Daten und Information Rheinland-Pfalz + Holger Weil + OIDMaster&ldi.rlp.de +30142 + National Magazine Company + Alex Williams + alex&natmags.co.uk +30143 + DecisionSoft LTD + James Muscat + jrem&decisionsoft.com +30144 + Kepware Technologies + Tony Paine + tony.paine&kepware.com +30145 + Active Media Architects, Inc. + Andy Lintner + iana_contact&activema.com +30146 + Dovetail Storage, Inc. + Marc S. Hedish + marc.hedish&dovetailstorage.com +30147 + Magnus.net + Jerry Puoplo + jerryp&magnus.net +30148 + CrossWorld + Kevin Weaver + kevin.weaver&crossworld.org +30149 + TECNOVA LTDA + Francisco Javier Valdés + fvaldes&tecnova.cl +30150 + Nizhny Tagil Iron & Steel Works + Eugene Krapivin + ekrapivin&mail.ru +30151 + Zimbabwe Revenue Authority + Allen Saruchera + asaruchera&zimra.co.zw +30152 + nLogy s.r.o. + Kamil Srot + kamil.srot&nlogy.com +30153 + Keystone Electronic Solutions + John Eigelaar + john&kses.net +30154 + e-Vox + Drazen Zoric + drazen&e-vox.eu +30155 + OpenBSD Project + Reyk Floeter + reyk&openbsd.org +30156 + The Positive Internet Company Ltd + Paul Smeddle + pauls&positive-internet.com +30157 + Kalopa Research Limited + Dermot Tynan + dtynan&kalopa.com +30158 + EMPRESA BRASILEIRA DE TELECOMUNICACOES S A - EMBRATEL + Alessandro Fernandes Martins + amartins&embratel.net.br +30159 + Scientific Games Racing + Yulia Nikanorova + nikanorova&scigames.eu +30160 + Insomniac Games, Inc. + Cameron Mac Millan + casm&insomniacgames.com +30161 + Orbital Spuds Development, Inc. + Philip de Vries + philipd&orbitalspuds.com +30162 + Farez + Hadjali Salem + salem.hadjali&farez.fr +30163 + Johannes H Laxdal + Johannes H Laxdal + johannes&laxdal.org +30164 + Radium Network Solutions + Jonathan Coppin + jcoppin&radiumnetwork.com +30165 + Derek Belrose + Derek Belrose + derekb&realgeeky.com +30166 + Blue Ridge Networks, Inc. + Fatih Comlekoglu + Fatih&BlueRidgeNetworks.com +30167 + Tavasti Solutions Oy + Markku Tavasti + tavasti&tavastisolutions.com +30168 + dnsee + andrea denaro + a.denaro&dnsee.com +30169 + pragma:s Jan Schlosser + Jan Schlosser + jan.schlosser&pragma-s.de +30170 + Smart Health Solutions Pty Ltd + Jon Hughes + jon.hughes&smarthealth.com.au +30171 + NET SPACE Daniel Kiper + Daniel Kiper + dkiper&net-space.pl +30172 + OpenGoss Corporation + Ery Lee + ery.lee&gmail.com +30173 + Convey Plus TC + Antuan Avdioukhine + antuan&gmail.com +30174 + Deka Works + Antuan Avdioukhine + antuan&gmail.com +30175 + Jeff Knecht + Jeff Knecht + jeff_knecht&yahoo.com +30176 + Hirotoshi Hamada + Hirotoshi Hamada + Hirotoshi.Hamada&gmail.com +30177 + Geodesic Limited + Atul Chitnis + atul.chitnis&geodesic.com +30178 + Martin County School District + Lucas Parker + parkerl&martin.k12.fl.us +30179 + Cervantes S.A. + Miguel Chavez + mchavez&seguroscervantes.com +30180 + PetRays L.P. + Damon Hopkins + dhopkins&horizonradiology.com +30181 + Public Works Government Services Canada + Roger Carisse + Roger.Carisse&pwgsc.gc.ca +30182 + 3G Factory S.A.R.L + Thierry Barnier + tbarnier&3g-factory.com +30183 + KES INC. + Naomi Miyahara + miyahara&kin.co.jp +30184 + Gleim Internet, Inc. + Larry Gleim + contact&gleim.com +30185 + AWEK microdata + Michael Westermann + mwµdata-pos.de +30186 + TRINAMIC SOFTWARE FACTORY + BARTOLOME PAYERAS + tomeu&trinamic.net +30187 + HCS GmbH + Stefan Podskubka + s.podskubka&hcs.at +30188 + MB-technology GmbH + Oliver Schäfer + oliver.schaefer&mbtech-services.com +30189 + Bravura Solutions (UK) Limited + David Carter + dcarter&bravurasolutions.com +30190 + Solera Networks + Brian Edginton + bedginton&soleranetworks.com +30191 + IPTRADE SA + Philippe Joliet + iana&iptrade-networks.com +30192 + Veraz Networks + Dutt Kalapatapu + dutt&veraznet.com +30193 + ComdataNetwork, Inc. + Steve Brenner + sbrenner&comdata.com +30194 + Egon Frerich + Egon Frerich + egon&frerich.eu +30195 + Carlson Wireless Technologies, Inc. + Chris Anderson + canderson&carlsonwireless.com +30196 + Smith Travel Research + Denis Caron + NetAdmin&smithtravelresearch.com +30197 + kestan.co.uk + Jonathan Leach + jrleach&kestan.eclipse.co.uk +30198 + Coherent Solutions + Maksim Belov + maxb&coherentsolutions.com +30199 + Abaca Technology Corporation + Peter Hsu + phsu&abaca.com +30200 + ePrize LLC + Sean Millichamp + sean.millichamp&eprize.com +30201 + Metaverse Republic + Nicholas Chase + nchase&earthlink.net +30202 + HCE Engineering + Mr. Armando Genovese + info&hcedesign.it +30203 + Forschungsverbund Berlin e.V. + Rudolf Bender + bender&fv-berlin.de +30204 + Faculty of Physics and Applied Informatics, University of Lodz + Marek Garbaczewski + tech&phys.uni.lodz.pl +30205 + Rohde & Schwarz Cybersecurity GmbH (formerly 'Sirrix AG security technologies') + René Fischer + rene.fischer&rohde-schwarz.com +30206 + Invisible Software 2.0 Ltd + Charles Lecklider + iana-pen&invis.net +30207 + Level 3 Solutions, LLC + Matthew Brothers + support&l3wv.com +30208 + Firecode Soluções em Tecnologia LTDA + Leandro Cavalcante Damascena + leandro.damascena&gmail.com +30209 + Universita' Ca' Foscari Venezia + Giacomini Giuseppe + gigiacom&unive.it +30210 + Logalty Servicios de Tercero de Confianza, S.L. (formerly 'Postaltrust, S.L.') + Palmira Aldeguer + Palmira.Aldeguer&garrigues.com +30211 + THE CHUNICHI SHIMBUN CO., LTD. + Norihito Kawashima + msren&chunichi.co.jp +30212 + ON telecomm + ONtel MIS + root&ontel.com.tw +30213 + Vertex Pharmaceuticals + Robert Bodajla + robert_bodajla&vrtx.com +30214 + CVSDude PTY LTD + Nikolai Lusan + servers&cvsdude.com +30215 + Serenity Information Systems + Raymond Chan + raymond.c.p.chan&gmail.com +30216 + Azolia + francois COLOMBIER + francois&azolia.fr +30217 + Osinet S.A. + Matias G. Lambert + matiaslambert&osinet.com.ar +30218 + AltaVoz S.A. + Robert Hilliard + robert&altavoz.net +30219 + Verivue, Inc + Frederick Slama + fslama&verivue.com +30220 + Wavestream Corp + Keith King + keith&wavestream.com +30221 + UnboundID Corp + UnboundID OID Administrator + oid&unboundid.com +30222 + Home Credit Finance Ukraine + Stesenko Ed + ed&agrobank.com.ua +30223 + OOe Gebietskrankenkasse + Guenther Rasch + guenther.rasch&ooegkk.at +30224 + NCC Hoorn + Niek Last + niek.last&ncchoorn.nl +30225 + Cotendo + David Drai + davidddrai&gmail.com +30226 + Mairie de Besançon - France + Alain MERCIER + informatique_systeme&besancon.fr +30227 + Channelot Ltd. + Danny Sade + danny&channelot.com +30228 + Visipia + Jesper L. Nielsen + jln&visipia.com +30229 + debian-community.org + Andreas Putzo + andreas&putzo.net +30230 + dimedis GmbH + Jörn Reder + snmp.pen&dimedis.de +30231 + morphism + Markus Pfeiffer + iana-registry&morphism.de +30232 + www.themaine.net + Bruce Higgins + bruce&themaine.net +30233 + John Muir Health + William J Halverson + William.Halverson&JohnMuirHealth.com +30234 + NBT Infra + Adalberto Branco Araujo + adalberto&nbtinfra.com +30235 + Gordon & Louis Technologies + Matt Giacomini + mgiacomi&gltech.com +30236 + Desktone Inc. + Andrew Hobgood + andrew.hobgood&desktone.com +30237 + EteVan Technologies co., Ltd. + Zhong GuangLei + eternalvangard&gmail.com +30238 + XandMail + Jean-Michel Dubois + dubois&xandmail.com +30239 + Archisys SARL + FLORENT CARRE + contact&archisys.fr +30240 + Vivesta + Pascal van Buijtene + pascal&vivesta.nl +30241 + Jeffrey Ricker LLC + Jeffrey Ricker + info&jeffreyricker.com +30242 + Info Consultoria e Informatica Ltda. + Jocimar Soto de Gois + jocimar&infoconsultoria.com.br +30243 + network.co.at GmbH + Peter Zednik + peter.zednik&network.co.at +30244 + Dazzle + Vesa Auvinen + vesa.auvinen&dazzle.fi +30245 + Graham Holdings Company + Scott McClure + corpitadmin&ghco.com +30246 + Diventus GmbH + Hans-Juergen Rux + hru&diventus.com +30247 + Coopers Peele (France) SARL + Olivier Pichon + op&coopers-peele.com +30248 + Webkuteer + Srinivas Peesapaty + info&webkuteer.com +30249 + marek.priv.at + Philipp Marek + philipp&marek.priv.at +30250 + Telecom design (France) + Didier FILHOL + df&telecom-design.com +30251 + CampusLAN Software GmbH + Tobias Federl + tobias.federl&campuslan.de +30252 + Jetair N.V. + Filip Hosten + filip_hosten&jetair.be +30253 + Certisign Certificadora Digital S.A. + Anderson Farias + afarias&certisign.com.br +30254 + The Main Street America Group + Tammy Jutras + tammy.jutras&msagroup.com +30255 + County of San Bernardino + Cameron Brice + cbrice&isd.sbcounty.gov +30256 + Telecommunication Technologies Ltd. + Kaplyar Eduard + eddy&teletec.com.ua +30257 + Intrcomm Technology + Trey Tabner + trey&intrcomm.net +30258 + Xalyo Systems + Andre Wiesel + awiesel&xalyo.com +30259 + LWsystems GmbH & Co. KG + Martin Werthmoeller + info&lw-systems.de +30260 + Zenitel Netherlands BV + Niek Last + niek.last&zenitel.com +30261 + HigherGround, Inc. + Samuel Smith + ssmith&highergroundinc.com +30262 + Cacholong + Matthijs Mohlmann + matthijs&cacholong.nl +30263 + Klopf + Michael Klopf + mklopf&gmx.at +30264 + Axiometric, LLC + David Albert + david_albert&axiometric.com +30265 + Hinfox Bt. + Babcsány Péter + hinfox&hinfox.com +30266 + Inca Digital Printers ltd + Nick Trotter + nick.trotter&incadigital.com +30267 + sofinco + christophe yayon + cyayon&sofinco.fr +30268 + PENN Warehousing & Distribution, Inc. + Don Hills + Don.Hills&pennci.com +30269 + via donau Österreichische Wasserstraßengesellschaft mbH. + Paul Stiedl + paul.stiedl&via-donau.org +30270 + Microdata Tecnologia Ltda + Franco Motta + francoµdata.com.br +30271 + Netsense + Johan Henselmans + johan&netsense.nl +30272 + Cache IQ, Inc (formerly 'Storspeed Inc') + Kris Meier + kmeier&CacheIQ.com +30273 + Sairyx + Arlen Christian Mart Cuss + celtic&sairyx.org +30274 + Greenfossil Pte Ltd + Cheong Chung Onn + chungonn&greenfossil.com +30275 + TeraCortex + Christian Hollstein + chollstein&teracortex.com +30276 + OEAMTC + Peter Burian + peter.burian&oeamtc.at +30277 + Software Cellular Network Ltd + Jonathan Oddy + jonathan.oddy&truphone.com +30278 + Peykasa Messageware Co + Fatememeh Imani Mehr + imanimehr&peykasa.com +30279 + Gladserv Limited + Brett Sheffield + brett&gladserv.com +30280 + delta4 business solutions gmbh + Thomas Wopienka + wopienka&delta4.at +30281 + AussieHQ Pty Ltd + Tim Sharpe + t.sharpe&aussiehq.com.au +30282 + Hammond Street Developments Pty. Ltd. + Peter Sawyer + peter&hsd.com.au +30283 + GRADIOR GROUP a.s. + Martin Skrasek + it&gradior.cz +30284 + Intersoft + SeungWon Son + sonsw&itsoft.co.kr +30285 + TMT SYSTEM + Tadeusz Lesiecki + lesiecki&tmtsystem.pl +30286 + Tridymedia + Guillermo Farias Diaz + gfarias&tridymedia.cl +30287 + Agito Networks Inc. + Sandeep Rangarajan + sandeep&agitonetworks.com +30288 + Alog Datacenters do Brasil S/A + Raphael Costa + raphael.costa&alog.com.br +30289 + Brian W Bush + Brian W Bush + account&brianwbush.info +30290 + Vangenechten Packaging + Wouter D'Haeseleer + wdhaeseleer&vangenechten.com +30291 + Sujansky & Associates, LLC + Walter Sujansky + wsujansky&sujansky.com +30292 + Monit24.pl + Tom Kuzniar + tomasz.kuzniar&monit24.pl +30293 + Collège Dunoyer de Segonzac + Charlemagne Fabrice + flash_math&yahoo.fr +30294 + InnoTrans Communications Inc. + Betty Juan + snmp&inno-trans.com +30295 + I2Net Christian Nordmann + Christian Nordmann + nordmann&i2net.de +30296 + Innovation & Support Centre + Graham Watkins + pki&isc.gov.om +30297 + The Clockwork Lab + Alan Kligman + alan.kligman&gmail.com +30298 + Industrial Control and Communication Limited + Steve Stean + steve.stean&industrialcontrol.co.uk +30299 + SEP Bulgaria JSC + Svetlozar Grancharov + sgrancharov&sepbulgaria.com +30300 + Internetworking & Broadband Consulting Co., Ltd. + Hiroyuki Kato + iana-pen&ibc21.co.jp +30301 + Invada Records Pty Ltd + Fraser Stuart + oid&arkhostings.com +30302 + NuSuara Technologies Sdn Bhd + Liu Wai Kat + wkliu&nusuara.com +30303 + IoT-Lab.org + Leo Korbee + Leo.Korbee&xs4all.nl +30304 + CHAMBRE D'AGRICULTURE DU BAS-RHIN + Zhiqiang CHEN + chen&bas-rhin.chambagri.fr +30305 + ITIS Holdings PLC + Shane Nash + servicedesk&itisholdings.com +30306 + SATEL Oy + Samuli Aura + standards.support&satel.com +30307 + Invidi Technologies + Lloyd Peterson + lloyd&invidi.com +30308 + Clearspring Technologies, Inc. + William Valadez + william&clearspring.com +30309 + Svenska Linuxföreningen + Jens Hjalmarsson + jens&se.linux.org +30310 + Remigio Conti + Remigio Conti + ianamail&shareskills.org +30311 + Cartiza Networks + James J. Halpin + jim.halpin&cartizanetworks.com +30312 + educagri + Mickaël COUTRON + mickael.coutron&agrosupdijon.fr +30313 + Indanet AG + Fabian Huegle + fabian.huegle&indanet.de +30314 + Seltec Computers Ltd. + Jonathan deBoer + jonathan&seltec.ab.ca +30315 + Papierfabrik Palm GmbH & Co. KG + Walter Thum + w.thum&papierfabrik-palm.de +30316 + Unassigned + Removed 2011-01-20 + ---none--- +30317 + First Sensor Technology GmbH + Vladimir Cvetic + vladimir.cvetic&first-sensor.com +30318 + GSoft BG + Plamen Dimitroff + plamen&gsoftbg.com +30319 + SmartPropertyManager Ltd + Paolo Fragapa + pfragapa&yahoo.co.uk +30320 + Exeltech + Raymond Sgroi + ray&exeltech.com +30321 + Unison Technologies + Igor Balk + ibalk&unison.org +30322 + International Broadband Electric Communications, Inc. (IBEC) + Cole Moody + cole.moody&ibec.net +30323 + Eigen LLC + Troy DesBiens + troy.desbiens&eigen.com +30324 + Interface Masters, Inc. + Ben Askarinam + sales&interfacemasters.com +30325 + Grin AB + Peter Björklund + peter.bjorklund&grin.se +30326 + ShoreGroup, Inc. + David Lovy + dlovy&shoregroup.com +30327 + Metro Servers Inc. + Syan Selvarajah + syan&metroservers.com +30328 + Travis Hein + Travis Hein + info&travnet.org +30329 + CONRAC GmbH + Eberhard Kurz + e.kurz&conrac.de +30330 + Atom Consultants Co.,Ltd. + Tomoyasu Fujii + dev-reg&atom24.com +30331 + Octant Informatique + Alain Ganuchaud + support&octant-fr.com +30332 + eXXcellent solutions + Martin Renner + m.renner&exxcellent.de +30333 + GRIPS - Gillen & Partner + Oliver Loch + o.loch&grips-net.de +30334 + Korscient Consulting Limited + Luke Roberts + support&korscient.com +30335 + DXI Networks + David Bell + david.bell&dxi.net +30336 + Eirteic Consulting Ltd + Jan Wiktorowicz + jan.wiktorowicz&eirteic.com +30337 + Luceat Spa + Gabriele Bettoni + info&luceat.it +30338 + Smart421 + Technical Services + technicalservices&smart421.com +30339 + National Center of Digital Signature Authentication - Ministry of Information and Communications of Viet Nam + Dr. Dao Dinh Kha - Director + dinhkha&mic.gov.vn +30340 + Moderntimes + Jimmy Joong + mt&moderntimes.com.tw +30341 + Deltacast + Olivier Antoine + software-management&deltacast.tv +30342 + Pellucid Consulting LLC + James Strain + j.strain&pellucidllc.com +30343 + Middle East Education through Technology + Max Goldman + admin&meet.mit.edu +30344 + Possimo Technologies Sdn Bhd + Mohd Azlan Kenali + azlan&possimo.com +30345 + TMS, LLC + Ivan Kovalenko + kovalenko&tms.ru +30346 + IT & Design Solutions GmbH + Moritz Kobel + moritz.kobel&itds.ch +30347 + Bekatul Port + Danang Wijanarko + danang.wijanarko&gmail.com +30348 + W-I-E-S-E-R.net + Matthias Wieser + matthias.wieser&hiasl.net +30349 + Kvant-Efir + Sergey Pylypchuk + digital&kvantefir.com +30350 + Caucho Technology, Inc. + Nam Nguyen + nam&caucho.com +30351 + Philip Gladstone + Philip Gladstone + philip-iana&gladstonefamily.net +30352 + sneudert.de + Sebastian Neudert + info&sneudert.de +30353 + Symmetria Hungary Ltd. + Gabor Glavak + gabor.glavak&symmetria.hu +30354 + EPiQ Life Science AB + Mikael Fredriksson + info&epiq.se +30355 + Digital Research Services LLC + Steven Smith + admin&time-stamp.net +30356 + Domeo BV + Jeroen Veldhorst + j.h.veldhorst&avisi.nl +30357 + Unix-network + Christian Schiffler + c.schiffler&unix-network.de +30358 + Pro:Atria Ltd + Tim Adams + sales&proatria.com +30359 + Tango Networks + Patty Landgren + pattylandgren&tango-networks.com +30360 + Trustwave Holdings, Inc. + OIDAdmin + OIDAdmin&trustwave.com +30361 + Triple Play Communications + Keith Riffee + keith.riffee&3playcomm.com +30362 + Brandwidth + Bo Miller + bo&morebrandwidth.com +30363 + N.V. Devoteam Telecom & Media + Frank Verelst + frank.verelst&devoteam.com +30364 + NewMedia Publications Ltd + Ronald A Porter + info&newmediaonline.co.uk +30365 + INEO SUEZ Group - INEO SYSTRANS + Luis Lopez + luis.lopez&ineo.com +30366 + M-Tech + Byung-kwon, Lee + pungss&m-tech.co.kr +30367 + Dragonfly Technologies Pty Ltd + Branko Ninkovic + bninkovic&dragonflytechnologies.com.au +30368 + Whitewall Networks + Saravana Krishnamurthy + info&whitewallnetworks.com +30369 + Grupa Lotos S.A. + Marek Paluch + marek.paluch&grupalotos.pl +30370 + GIHAR LTD + FESCHENKO VLADIMIR IVANOVICH + vladimir-its&yandex.ru +30371 + Financial Link Sdn Bhd + Ho Sing Guan + sgho&financial-link.com.my +30372 + SANBlaze Technology, Inc. + Steve Looby + slooby&sanblaze.com +30373 + Wallix + Vincent Bernat + vbernat&wallix.com +30374 + Opencode Systems Ltd + Peter Raykov + peter.raykov&opencode.com +30375 + Innovature Labs + Prasanth Prabhakaran + sysadmin&innovaturelabs.com +30376 + Outright Solutions + A.R. van Grieken + info&outrightsolutions.nl +30377 + Baseblack Ltd + John Kozak + jk&baseblack.com +30378 + True Corporation PCL + Natapope Sarakhette + natapope_sar&truecorp.co.th +30379 + Southcoast Health System + Dave Burke + burked&southcoast.org +30380 + Aggienerds + William Reading + bill&aggienerds.org +30381 + FRIENDSHIP-SYSTEMS + Konrad Lorentz + lorentz&FRIENDSHIP-SYSTEMS.com +30382 + Kristoff Kiefer + Kristoff Kiefer + kiewie&kiewie.de +30383 + ViaCom Informatics Ltd. + Sandor Jager + jager.sandor&viacomkft.hu +30384 + Interjato Telecom + Eduardo Andrez de Oliveira + eduardo.andrez&gmail.com +30385 + CAVOK Software Systems GmbH + Jos Hartman + jos.hartman&cavok.eu.com +30386 + Software Service Christoph Bach + Christoph Bach + info&project.simplebill.de +30387 + Singlesnet.com + Matthew Von-Maszewski + matthew&thehive.com +30388 + nobisCum + Bernhard Czech + bcz&nobiscum.de +30389 + Hangzhou Digicast Technology Co., Ltd. + Song Jingtao + sjt&digicast.cn +30390 + City of Garden Grove + Keith Winston + keithw&ci.garden-grove.ca.us +30391 + Alex Potter Consulting + Alex Potter + alex&ap-consulting.co.uk +30392 + Globolog GmbH + Reinhard Mauer + reinhard.mauer&globolog.com +30393 + Cibenix + David Johnston + david.johnston&cibenix.com +30394 + Kyoto University + Yasuo Okabe + oid-admin&kuins.kyoto-u.ac.jp +30395 + Alhambra-Eidos + Consuelo Iglesias Pérez + consuelo.iglesias&a-e.es +30396 + Cambridge Consultants + Philip Morris + Philip.Morris&CambridgeConsultants.com +30397 + TELERAD SA + Patrice Mariotte + p.mariotte&telerad.fr +30398 + Leader Technologies + Benjamin Gordon + ben&leadertech.com +30399 + LogiSync LLC + Greg Saltis + gsaltis&logisync.com +30400 + Red Condor, Inc. + Adam Rosenstein + adam&redcondor.com +30401 + Fedora Project + Fedora Admin + admin&fedoraproject.org +30402 + Halsted Surgery + Mike Slowey + halstedm&halstedsurgery.com +30403 + Secure Computing Networks + Eric F Crist + ecrist&secure-computing.net +30404 + never.no AS + Andreas Dahl + andread&never.no +30405 + ClaimLynx, Inc + Eric F Crist + ecrist&claimlynx.com +30406 + Aubesoft Inc. + Denis Benoit + denis.benoit&aubesoft.com +30407 + CRISALID SARL + PIERRE YAGER + pierre&crisalid.com +30408 + Ulrich Boddenberg Informatique + Ulrich B. Boddenberg + ulrich&boddenberg.de +30409 + FTR Pty Ltd + Henry Hickling + hhickling&fortherecord.com +30410 + Jozef Hatala + Jozef Hatala + jh-oid&skrt.org +30411 + Parker Hannifin Corp + Christopher J. Nappi + cnappi&parker.com +30412 + Aeroflex Incorporated + David Hagood + iana_contact&aeroflex.com +30413 + Channel Islands Stock Exchange LBG + Donald Fraser + d.fraser&cisx.com +30414 + OBJECT Sistemas Multimidia Ltda + Wagner Correa Ramos + wagner&object.com.br +30415 + PANSKY TECHOLOGY CORPORATION CHENGDU GROUP + QI ZHANG + zhangqi&pansky.com.cn +30416 + Latenter + Marco Poli + oid-admin&latenter.com +30417 + RLG Informatica + Arthur Beltrão + arthur.beltrao&rlginformatica.com.br +30418 + Jewelry Television + Raj Ramanathan + raj.ramanathan&jtv.com +30419 + Novgorod State University + Sergey Popov + Sergey.Popov&novsu.ru +30420 + Softex Insoporated + Apurva Bhansali + apurva_bhansali&softexinc.com +30421 + NAVICON A/S + Thomas Borg Salling + support&navicon.dk +30422 + Fatwire Corporation + Francisco Braganza + braganza&fatwire.com +30423 + Centene Corporation + Nicolaas Hyatt + nicolaas.hyatt¢ene.com +30424 + Oak Scientific, LLC + Mark Ping + mping&oakscientific.com +30425 + Innovative Communications Engineering + Steve Chinatti + chinatti&ice-llc.com +30426 + Universal Solutions Group, Inc. + Brian Salisbury + brian.salisbury&usgct.com +30427 + 2inches + chuck sumner + iana&a.2inches.com +30428 + Telekom Research & Development Sdn Bhd + Mohammad Harris Mokhtar + harris&tmrnd.com.my +30429 + Amnafzar Co. + AliAsghar Hedayati + info&amnafzar.com +30430 + carnero.ca + Carlos A. Carnero Delgado + carlos&carnero.ca +30431 + Telvent Environment, S.A. + Carlos Macias Jimenez + carlos.macias&telvent.abengoa.com +30432 + Open System Solutions (UK) Limited + Steve Brown + sb&opensystemsolutions.co.uk +30433 + Dah Sing Life Assurance Company Limited + Timothy Poon + timothypoon&dahsing.com +30434 + Satelcom Oy + Leif Saarela + leif.saarela&satelcom.fi +30435 + Dalarna University + Anders Nordahl + ano&du.se +30436 + Aralia Systems Ltd. + Marcelo Grossi + administrator&aralia.co.uk +30437 + Fortna, Inc + Mike Sherman + mikesherman&fortna.com +30438 + capecom GbR + Stefan Weber + sweber&chello.at +30439 + layerX Technologies + Larry Underwood + lunderwood&layerxtech.com +30440 + PROXIFLEX + Jean-Yves Linet + contact&proxiflex.fr +30441 + MATRA Systems (UK) + Matthew Dodkins + matthew.dodkins&matra.co.uk +30442 + Adaptive Spectrum and Signal Alignment, Inc. + Marc Goldburg + mgoldburg&assia-inc.com +30443 + Schooner Information Technology, Inc. + Randy S Balaban + rsb&2BitProgrammers.com +30444 + 2-Bit Programmers + Randy S Balaban + rsb&2BitProgrammers.com +30445 + Genie Technology Management + Robert Cebollero + rob&genietm.com +30446 + Mobile Technika, Inc. + Shinichiro Iwatsuru + tsuru&mobiletechnika.jp +30447 + Pete Gooch + Pete Gooch + pete.gooch&gmail.com +30448 + Sörlöv Holding + Daniel Sörlöv + daniel&sorlov.com +30449 + Institute of Psychology and Pedagogics of Development + Konstantin Kuzvesov + root&ippd.ru +30450 + Teleglobe Canada ULC + Alex Fournier + alex.fournier&vsnlinternational.com +30451 + CPEGM + Cyril Genisson + cyril.genisson&gmail.com +30452 + Xoft, Inc. + Bruce Director + bdirector&xoftinc.com +30453 + Kannel Software Foundation + Stipe Tolj + stolj&kannel.org +30454 + Santa Barbara City College + Devin Neiman + Neiman&sbcc.edu +30455 + Wm Morrison Supermarkets PLC + Dr Tim Nicholls + viralto&gmail.com +30456 + sugat.com.np + Sugat Sikrikar + me&sugat.com.np +30457 + CYGNET Technology BV + Rik van der Kemp + rik&cygnet-ecm.com +30458 + Thomas Duclos Software + Thomas Duclos + thomas579¢urytel.net +30459 + EchoStorm Worldwide, LLC + James Chamberlain + jchamberlain&echostorm.net +30460 + Perfomix Technologies Intl(P) Ltd + Sony Lazarus + sonylazarus&gmail.com +30461 + Radley Network Technologies CC + Joon Radley + joon&radleys.co.za +30462 + Freifunk Halle + Steven Barth + webmaster&freifunk-halle.net +30463 + Supridatta Teleinformatica Ltda. + Clovis Wichoski + clovis&supridatta.com.br +30464 + Tecnosistemas Inc. + Douglas Villalobos Viales + dvillalobos&masbanking.com +30465 + Visage Mobile, Inc + Jamuel P. Starkey + jstarkey&visagemobile.com +30466 + Privantis SA + Yves Genevier + ygenevier&gmail.com +30467 + Carlson Wagonlit Travel + James Whitlow + jwhitlow&carlsonwagonlit.com +30468 + Solid Technologies, Inc + Byung-seok Lee + bslee&st.co.kr +30469 + Synacor Inc + Geoff Franks + monitor&synacor.com +30470 + CyberTrails + Shawn Ostapuk + shawn.ostapuk&cybertrails.net +30471 + TwoPiRadian Infotech Private Limited + Indrajit Raychaudhuri + indrajit&2pirad.com +30472 + CityLink, Ltd. + Vadim Ponomarev + vadim&drevlanka.ru +30473 + Steropes Technologies Limited + Timothy Poon + timothy.poon&steropes.com +30474 + CJSC "InCore" + Dennis Agafonov + support&incore.ru +30475 + Intra2net AG + Thomas Jarosch + iana-pen&intra2net.com +30476 + BIS + Drav Sloan + dsloan&bis-internet.co.uk +30477 + gig mbh berlin + Frank Gruber + support&gig-mbh.de +30478 + Enventis + Chris Halverson + chalverson&enventis.com +30479 + GlobeStar Systems Inc. + Jason Wilson + jwilson&globestarsystems.com +30480 + DotLibre + Association DotLibre - PM Alamy + iana&dotlibre.info +30481 + HySky Communications + Michael Boone + michael_boone&hysky.com +30482 + Omnes Loquantur Limited + Neil Davies + hostmaster&omlq.co.uk +30483 + Virtual Switching Consultancy Limited + Pong Yip Nam + yn.pong&vsc.com.hk +30484 + OPTeam SA + Tomasz Trojanowski + tomek&uninet.com.pl +30485 + RTC-Siberia Co., Ltd. + Vladimir Veretennikov + otir&rustelcom.ru +30486 + thomas kobienia + thomas kobienia + tk_oid&mamps.de +30487 + Hydra Labs + Jacob Feisley + jacob&darkhorsenetworks.com +30488 + Boardlink Group Limited + Mr Michael Parton + mikeparton&hawarden.co.uk +30489 + FAMIPOW + Francois BAYART + francois&famipow.be +30490 + Dtella Labs + Jacob Feisley + admins&dtella.org +30491 + Helping Hand PC Services & Networking, Inc. + Brian Porter + brian&helpinghandpc.com +30492 + Hypertek Inc. + Dr. Martin J. Burns + burnsmarty&aol.com +30493 + Digitek spa + Matteo Fortini + m.fortini&selcomgroup.com +30494 + Microlink Telecom + Marcio Pinheiro Gomes + ianaµlink.com.br +30495 + RoundTrip Systems + Don Mileff + don&roundtripsystems.com +30496 + E-Smart Systems d.o.o. Beograd + Maksim Luka Veljović + maksim&e-smartsys.com +30497 + Beijing EaseSea Interman Technology Co., + Xiaoqiang Ye + yexq&eastseagroup.com +30498 + AeroScout + Amir Vahkover + amir.vashkover&aeroscout.com +30499 + Design 2000 Pvt Ltd + Hussain Shafee + shafee&design2000.com.mv +30500 + Forca A/S + Lasse Berget + lab&forca.dk +30501 + Institute for Networking Technology + Kirill A. Ivanov + kir&int.spb.ru +30502 + The Norwegian Polar Institute + Bjørn Hjelle + bjorn.hjelle&npolar.no +30503 + Bastart + Axel Gembe + ago&bastart.eu.org +30504 + Punctual Software Inc. + Florin Seceleanu + florin.seceleanu&punctualsoftware.com +30505 + Swiss Life Deutschland Operations GmbH + Mark Kaus + oid-admin&swisslife.de +30506 + ooma Inc. + Samir Sharma + ssharma&ooma.com +30507 + Extreme Copper, Incorporated + Robert Nino + rob&extremecopper.com +30508 + Medavie Blue Cross + Vincent Power + vince.power&medavie.bluecross.ca +30509 + Shenzhen Leagsoft Technology Co., Ltd.. + Fang Yuwei + fangyw&leagsoft.com +30510 + Bestwiz Softwarw Development (Dalian) Co.,Ltd. + Li Shouming + lsming&dl.cn +30511 + Arts Alliance Media + Samuel Cozannet + samuel.cozannet&artsalliancemedia.com +30512 + ATEIS + Robert Seassau + r.seassau&ateis.com +30513 + Ford Motor Company + Harshet Patel + hpatel48&ford.com +30514 + Lavínia Tavares + Lavinia Tavares + laviniatavares&yahoo.com +30515 + m7 evolution + Sebastian Schneider + sebastian.schneider&m7-evolution.com +30516 + oclane + didier Belot + didier.belot&oclane.com +30517 + Zoot Enterprises, Inc. + Justin Buhler + justin.buhler&zootweb.com +30518 + CoolIT Systems Inc. + Mike Holden + mike.holden&coolitsystems.com +30519 + Wieldim + Brandon Dimcheff + bdimchef-iana&wieldim.com +30520 + IAESTE Austria + Axel Groß + axel.anai.gross&iaeste.at +30521 + Collab + Pedro Gomes + pedro.gomes&collab.pt +30522 + kludge.ca + Cory Oldford + oldfordroad&gmail.com +30523 + Taichung Veterans General Hospital + Ang-Hsuan Lin + ahlin&vghtc.gov.tw +30524 + Intersoft Electronics + Erik Moons + erik.moons&intersoft-electronics.com +30525 + H&S Ventures, LLC + Network Operations Center + noc&hsventures.org +30526 + Astec Solutions + Mario Gonzalez + mgonzalez&astec.com.mx +30527 + Mochi Media, Inc. + Matthew Dempsky + matthew&mochimedia.com +30528 + V.Saint-Denis + Vincent Saint-Denis + vincent&saint-denis.hd.free.fr +30529 + Vkernel Corporation + Beverley Reynolds + snmp&vkernel.com +30530 + INTI Electronica e Informatica + Gustavo Alessandrini + gusi&inti.gov.ar +30531 + LEANDRO DA SILVA MENDES INFORMATICA ME + Leandro da Silva Mendes + theflockers&gmail.com +30532 + Alox + YoungJu, Park + starp&alox.com +30533 + DASANCNS + Jeong Chan, Lee(DASANCNS) + jclee&dasancns.com +30534 + Tajinc.ORG LLC + Tim Spriggs + contact&tajinc.org +30535 + Alphasoft GmbH + Axel Freimuth + info&alphasoft.biz +30536 + BMTI + Yvon Jacquelin + contact&bmti.fr +30537 + Welsh Blood Service + Daniel Rainbird + daniel.rainbird&wbs.wales.nhs.uk +30538 + webLyzard + Arno Scharl + scharl&weblyzard.com +30539 + Sappi Fine Paper + Matt Oberpriller + matt.oberpriller&sappi.com +30540 + Zymeworks Inc. + Luke Cyca + luke&zymeworks.com +30541 + Exacq Technologies, Inc. + Matthew M. DeLoera + mdeloera&exacq.com +30542 + Komes Co. Ltd. + Dr. İrfan Acar + irfan.acar&komes.com.tr +30543 + QuartzNet + Alexander Boulette + alexanderfb&gmail.com +30544 + iTAS Technology Corp. + Charles Liu + charles.liu&itas.com.tw +30545 + Akademia Podlaska + Artur Krycki + kryckia&ap.siedlce.pl +30546 + JLA Ltd + Dave Atkin + datkin&jla.com +30547 + Institute of Physics of the ASCR, v. v. i. + Michael Kratky + kratky&fzu.cz +30548 + Alberta Blue Cross + Wade Fasek + wfasek&ab.bluecross.ca +30549 + Merli dr. Clizio Consultant + Clizio Merli + clizio&clizio.com +30550 + Konexxo GmbH + Sven Frommholz + hostmaster&konexxo.de +30551 + Transtruct Foundation + Noah Fontes + nfontes&transtruct.org +30552 + MicroBlade, Inc. + Dan Skolnik + Dan.Skolnik&MicroBlade.us +30553 + LincWare + Eric Lenio + eric&lincware.com +30554 + Mitsubishi Space Software Co.,Ltd. + Mitsuhiro Sambe + sambe&mss.co.jp +30555 + mikhailelias.net + Mikhail Elias + iana&mikhailelias.net +30556 + King Faisal University + Hakeem Sadiq + Sadiq&Kfu.Edu.Sa +30557 + Sapphire Computer Systems (UK) Ltd + Andrew Johnson + andrew.johnson&sappsys.co.uk +30558 + Ticketcorner AG + Daniel von Allmen + daniel.vonallmen&ticketcorner.com +30559 + Krüger Network Training & Consulting + Samuel Krüger + info&kr-network.de +30560 + Pumpkin 3D + Administrator + admin&pumpkin3d.com +30561 + expact.it + Raimund Sacherer + raimund.sacherer&gmail.com +30562 + Universidad de Montemorelos, A. C. + J. David Mendoza + jdmendoza&um.edu.mx +30563 + Blackwave Inc. + Jeremy Decker + support&blackwave.tv +30564 + Algarroba + Luis Coll + lncoll&algarroba.com +30565 + GTS Nextra + Jan Stanik + hostmaster>snextra.sk +30566 + Dr. Gregory M. Stone & Associates + Gregory M. Stone + gms&stone-associates.com +30567 + Port One Internet, Inc. + Ken Reiss + kreiss&portone.com +30568 + Msona Limited + Francesco Salamida + francesco.salamida&msona.co.uk +30569 + Globitel + Alaa Halawa + alaa.halawa&globitel.com +30570 + Radiator Software Oy + Heikki Vatiainen + oid&radiatorsoftware.com +30571 + Mesto Domazlice + Petr Vondras + vondras&mesto-domazlice.cz +30572 + TDC-NetDesign A/S (formerly 'NetDesign A/S') + Gregers Paludan Nakman + gpna&tdcnetdesign.dk +30573 + Photon Meissener Technologies GmbH + Frank Jaehnig + f.jaehnig&PhotonAG.com +30574 + Fixma S.L. + Luis N. Coll Aleixandre + luis.coll&fixma.es +30575 + castLabs GmbH + Martin Tews + martin.tews&castlabs.com +30576 + Wills and co Stockbrokers + Oli Comber + oli&3ait.co.uk +30577 + Opus Notion + Heath Jones + hj1980&gmail.com +30578 + T Proje Muhendislik Dis Tic Ltd. Sti. + Murat Yaran + murat.yaran&tproje.com +30579 + Arab Bank (Switzerland) + Steve Erzberger + it.operations&arabbank.ch +30580 + Peninsula Engineering Solutions Inc + Gustavo Lara + soymelvin&gmail.com +30581 + Appriver LLC + Steve Mcilwain + smcilwain&appriver.com +30582 + RSP Systems A/S + Andreas Rune Fugl + andreas&rspsystems.com +30583 + AhMuseIndustry.Net, LLC + Michael Strother + ahmuse&hotmail.com +30584 + Allegory Software, Inc. + Ron Turner + raturne&shaw.ca +30585 + SmartShare Systems + Morten Broerup + info&smartshare.dk +30586 + Xytronix Research & Design, Inc. + David Witbeck + david&xytronix.com +30587 + JumpGen Systems, LLC + Matthew Dharm + mdharm&jumpgen.com +30588 + Euphonix, Inc + Jim McTigue + jmctigue&euphonix.com +30589 + HIS Technologies (HK) Limited + Boris Chan + boris&histechs.com +30590 + Österreichische Pensionsversicherungsanstalt + Markus Paschinger + Markus.Paschinger&pva.sozvers.at +30591 + Hetzner Pty Ltd + Wynand van Dyk + wvd&hetzner.co.za +30592 + Nico Roeser + Nico Roeser + n-roeser&gmx.net +30593 + Spyderweb Consulting + Tim Aslat + admin&spyderweb.com.au +30594 + LSTC + Nathan Hallquist + nathan&lstc.com +30595 + Xenatech Co Ltd (South Korea) + Nagaraju Surulivel + snraj&xenatech.co.kr +30596 + Unioncast + Wenyue Ren + suchasplus&unioncast.tv +30597 + Kan sp. z o.o. + Tomasz Klimaszewski + tomasz.klimaszewski&kan.pl +30598 + H.C. Starck GmbH + Percy Engler + percy.engler&hcstarck.com +30599 + SimplisIP + POIRIER Laurent + lpoirier&simplisip.fr +30600 + Deltatec + Louis Plair + software-management&deltatec.be +30601 + podhart.net + Mgr. Ing. Oldrich Horak, Ph.D. + olda.horak&podhart.net +30602 + Tyson Foods, Inc. + Doug Fieldhouse + hostmaster&tyson.com +30603 + JXX Technologies + Jarod Watkins + jarod&jxxtech.net +30604 + Uhland + Stephan Nies + nies.stephan&googlemail.com +30605 + Landmark Digital Services + Tim Stinson + stinson.timothy&gmail.com +30606 + eTour + Ludovic LANGE + pen-iana&agisvoyages.com +30607 + Saskatchewan Blue Cross + Chad McDonald + cmcdonald&sk.bluecross.ca +30608 + FACULDADE NOVAFAPI + Manuel Gonçalves da Silva Neto + manuelg&novafapi.com.br +30609 + Temboo, Inc. + Judd Maltin + oid-manager&itemboo.com +30610 + UVT Unternehmensberatung für Verkehr und Technik GmbH + Volkmar Klos + pen.iana.reg&uvt.de +30611 + Maessa Telecontrol + Andreu Palou + apalou&maessa.com +30612 + EK3 Technologies Inc. + Dennis Michaelson + dgm&ek3.com +30613 + ATIS + Jackie Wohlgemuth + jwohlgemuth&atis.org +30614 + Expandium SAS + Rémy Chibois + remy.chibois&expandium.com +30615 + Creative Technology Ltd. + Stanley Lim + stanley_lim&ctl.creative.com +30616 + MagmaTec (Pty) Ltd + Brent Lightley + brent&magmatec.co.za +30617 + Advanced Digital Systems Inc. (ADS) + John Higgs + jh&advanceddigital.com +30618 + Rostocker Freizeitzentrum e.V. + Norman Lüttgerding + nluett&rfz-online.de +30619 + TOSHIBA MITSUBISHI-ELECTRIC INDUSTRIAL SYSTEMS CORPORATION + Hideki Taketani + TAKETANI.hideki&tmeic.co.jp +30620 + Sloka Telecom Private Limited + Upendra Ram Praturi + upendra&sloka.in +30621 + Global Holdings Group + John Betts + john&globalholdings.org +30622 + Centre des technologies de l'information de l'Etat , Luxembourg (formerly 'Centre Informatique de l'Etat , Luxembourg') + Daniel Nickels + cie.oid&ctie.etat.lu +30623 + Federal Signal Corporation, Integrated Systems + Tim Morten + tmorten&fedsig.co.uk +30624 + ALALOOP SAS + Jean-Louis Melin + jlmelin&alaloop.com +30625 + tollwerk sinnesnavigatoren + Dipl.-Ing. Joschi Kuphal + info&tollwerk.de +30626 + VeriSign + Sean Kent + skent&verisign.com +30627 + bitGate data systems GmbH + Siarhei Litvinko + Siarhei.Litvinko&bitGate.de +30628 + NIDEC CORPORATION + NANAE WATANABE + NANAE_WATANABE¬es.nidec.co.jp +30629 + ArchCyber Technology Co. Ltd. + Kidd Hsieh + kidd.hsieh&archcyber.com +30630 + AfriNIC Ltd. + Systems Administrator + sysadmin&afrinic.net +30631 + Terra + Gediminas Strazdas + gediminas.st&terraelectronics.com +30632 + CJSC Kyivstar GSM + Hostmaster of Kyivstar + hostmaster&kyivstar.net +30633 + Universitaet fuer Musik und darstellende Kunst Wien + Hermann Rothe + admin-list&mdw.ac.at +30634 + Eddahbi Karim + Eddahbi Karim + da.templar&acm.org +30635 + II. gimnazija Maribor + Mirko Pesec + mirko.pesec&druga.org +30636 + AMT DATA + Jean-Jacques Broussat + info&amtdata.com +30637 + Haagenti Group Inc. + William Waites + ww&haagenti.com +30638 + Larry Sturtz + Larry Sturtz + lws49028&yahoo.com +30639 + Smart Education sp. z.o.o + Miroslaw Kaminski + m.kaminski&smarteducation.pl +30640 + Unlimi-Tech Software Inc + Christian Charette + ccharette&utechsoft.com +30641 + AJA Video Systems, Inc. + AJA Video Systems, Inc. + ianaadmin&aja.com +30642 + AOptix Technologies, Inc. + Wendell Yin + wyin&aoptix.com +30643 + Databits + Pete Fritchman + petef&databits.net +30644 + Amir Rezghian + Amir Rezghian + arezghian&gmail.com +30645 + Entry Point, LLC + Robert Peterson + ccp&jdcinc.net +30646 + NYCLIX + Mikhail Elias + mikhail.elias&nyclix.org +30647 + Value Team + Stefano Lattanzi + stefano.lattanzi&valueteam.com +30648 + iSupportISP, LLC + Scott M. Grim + engineering&isisp.org +30649 + INetU, Inc. + Frank Clements + fclements&inetu.net +30650 + Para Systems, Inc. + Robert Calhoun + rcalhoun&minutemanups.com +30651 + Precognet + Edoardo Costa Sanseverino + edoardo.costa&gmail.com +30652 + GlenMarshall.us + Glen F. Marshall + admin&glenmarshall.us +30653 + Comability + Tal Schoenfeld + tal&comability.com +30654 + College of Policy Science, Ritsumeikan University + College of Policy Science + admin&ps.ritsumei.ac.jp +30655 + Diamond Valley College + Chris Lowe + chris.lowe&dvallcoll.vic.edu.au +30656 + lancehendrix.com + Steven Lance Hendrix + lance&lancehendrix.com +30657 + Eaton-Williams + Adrian Coles + adrian.coles&eaton-williams.com +30658 + BTC Business Technology Consulting AG + Stephan Backhaus + stephan.backhaus&btc-ag.com +30659 + Scilla Systems Inc. + Raimo Järvi + raimo.jarvi&scillasystems.com +30660 + Onsite Media Network Inc. + Wayne Dick + wdick&onsitemedianetwork.com +30661 + zubData + Directory Services + ds&zubdata.com +30662 + Tara Systems GmbH + Robert Sertic + Robert.Sertic&tara-systems.de +30663 + Tall Umbrella + David Maust + davidm&tallumbrella.com +30664 + P. Bryan Consulting Inc. + Paul C. Bryan + pbryan&pbryan.com +30665 + APG Algemene Pensioen Groep N.V. + Otto van Staveren + otto.van.staveren&apg.nl +30666 + Online Rewards + Chris Jacobson + chris.jacobson&online-rewards.com +30667 + Port40 + David Kempf + info&port40.com +30668 + BrControls + w.h.v.petersen + w.vanpetersen&brcontrols.com +30669 + Research for Science, Art and Technology (RFSAT) Ltd + Dr. Artur Krukowski + RTD&rfsat.com +30670 + Vololink Pty Ltd + Milan Prosenica + milan.prosenica&vololink.com +30671 + Crocus-com Company + Valentin Reshetnyak + vresh&crocuscom.com +30672 + Ancerno Ltd. + Evan Perry + webmaster&ancerno.com +30673 + EDELCA + Jose Miguel Parrella Romero + jparrella&edelca.com.ve +30674 + NTS Workspace AG + Simon Obi + noc&nts.ch +30675 + Wayne State University School of Medicine + Bobby Rose + brose&med.wayne.edu +30676 + D-TA Systems Inc. + Angsuman Rudra + abose&d-ta.com +30677 + C4i Security LLC + Scott Joachim + joachim.scott&c4isecurity.com +30678 + Nomovok Ltd + Heli Koskimäki + it&nomovok.com +30679 + Xicom Technology + Steve Trigero + steve.trigero&xicomtech.com +30680 + City of Walsenburg, Colorado + Michael Sheldon + msheldon&spisys.net +30681 + Donald Swauger Consulting + Donald William Swauger + swauger&hotmail.com +30682 + davidrobin.net + david robin + david&davidrobin.net +30683 + quadium.net + Tim Howe + vsync&quadium.net +30684 + YAZAKI Corporation + YASUO ISHIMA + ishima.y2&sys.yzk.co.jp +30685 + TSTENG + Sangwook Park + psw&tsteng.co.kr +30686 + N-iTUS + Jerome Lee + dmlee&n-itus.com +30687 + CoSolIT + Florian Frank + info&cosolit.de +30688 + ShenZhen DaShi Technology Co.,LTD + FangCheng + alanhappy&163.com +30689 + Planet Controls Pty Ltd + Steve Kelly + steve&planetcontrols.com.au +30690 + Institute of Computational Mathematics and Mathematical Geophysics SB RAS + Sergey Nechaev + nechaev&ssd.sscc.ru +30691 + Wapice Ltd + Teemu Niemi + admin&wapice.com +30692 + WS Atkins PLC + Paul Southway + paul.southway&atkinsglobal.com +30693 + global infinipool GmbH + Martin Scholl + ms&infinipool.com +30694 + FaxBack, Inc. + Mike Oliszewski + mikeo&faxback.com +30695 + Chrysler Financial + Fred Hoshowski + FJH7&chryslerfinancial.com +30696 + Deimos Engenharia, S.A. + Marcos Bento + marcos.bento&deimos.com.pt +30697 + CPC (UK) + Graham Blake + graham.blake&cpcuk.co.uk +30698 + n1ety + Dean Maluski + dmaluski&n1ety.com +30699 + Ubipart Ltd + Anssi Lindqvist + support&ubipart.com +30700 + Telefonia Dialog SA + Radoslaw Zdunek + radoslaw.zdunek&dialog.net.pl +30701 + Norwegian University of Life Sciences (UMB) + Ola-Gunnar Juelsrud + ola-gunnar.juelsrud&umb.no +30702 + A2E Technologies + Leon Havin + leon&securigy.com +30703 + Oberthur Technologies + José Cervera + j.cervera&oberthurcs.com +30704 + Dipl.-Ing. Eberhard Iglhaut + Eberhard Iglhaut + mail&iglhaut.com +30705 + SoundBite Communications + Chris Marko + cmarko&soundbite.com +30706 + OpenDNS, LLC + bill fumerola + billf&opendns.com +30707 + Solaiemes + Javier Lopez Fernandez + fjlopez&solaiemes.com +30708 + Stej - Stefan & Jens Internet Services AB + Victor Jerlin + victor.jerlin&stejtech.net +30709 + NCCI Holdings, Inc + James Murray + james_murray&ncci.com +30710 + IT Soft Ltd. + Plamen Dimitroff + plamen&itsoft.bg +30711 + Coolman, Inc + James Coolman + jcoolman&comcast.net +30712 + BDO Seidman, LLP + James Coolman + jcoolman&bdo.com +30713 + Oopss.org + Thomas Martin + admin&oopss.org +30714 + Harry Wettke Consulting + Harry Wettke + info&wettke.de +30715 + Q-layer + Carl D'Halluin + carl&qlayer.com +30716 + Alston & Bird LLP + Carlos Batista + carlos.batista&alston.com +30717 + Odysseyware Inc. + Richard Porter + rporter&odysseyware.com +30718 + TSB Bank Ltd + David Clarke + david.clarke&tsbbank.co.nz +30719 + Elektroservice + Petr Odlozil + odlozil&email.cz +30720 + Etone + Zhiyong Wang + wangzhiyong&etonetech.com +30721 + Agenor d.o.o. + Visnja Ivanovic-Opacic + V.Ivanovic-Opacic&agenor.com.hr +30722 + ProSoft Technology + Matthew Sund + msund&prosoft-technology.com +30723 + FreeCode International Inc. + Preeti Baheerathan + preeti.baheerathan&freecodeint.com +30724 + Vietnam Commercial Computing and Communication + Nguyen Dinh Nam + nguyendinhnam&gmail.com +30725 + all included software gmbh + Leo von Klenze + lvk&alinso.de +30726 + fj3.us Enterprises + Jeff Doyle + jeff&fj3.us +30727 + SNIX/net International + Nick Gustafson + nick.iana&snix.com +30728 + Siltanet + Charly Rohart + charly.rohart&siltanet.fi +30729 + SLM Corporation + Domain Techadmin + domain.techadmin&salliemae.com +30730 + Seanodes + Christophe Guittenit + christophe.guittenit&seanodes.com +30731 + BODET S.A + Philippe REY + philippe.rey&bodet.com +30732 + NetSocket, Inc. + Steve Yao + syao&netsocket.com +30733 + Umpqua Bank + Don Larsen + DonLarsen&UmpquaBank.com +30734 + Florian Sailer + Florian Sailer + f.sailer&outlook.com +30735 + BLUECOARA Networks + Hideo NAKAMITSU + nomo&bluecoara.net +30736 + Kyoeisangyo corp. + Shigeki Asano + asano&kyoeisangyokk.co.jp +30737 + Servicios Tecnicos Agrupados S.A. + Luis Escribano Ulibarri + luis&stagsa.com +30738 + Optenet + Jesús Vázquez + jvazquez&optenet.com +30739 + Jason Antman + Jason Antman + jason&jasonantman.com +30740 + Assyst Gesellschaft für Automatisierung Software und Systeme mbH + Albert Cester + albert.cester&assyst-intl.com +30741 + Configo Systems GmbH + Christof Egner + cegner&configo.de +30742 + Glimmerglass Networks Inc. + Salman A. Qamar + salman&glimmerglass.com +30743 + Global Village Telecom + Diego Rossetto + drossetto&it7.com.br +30744 + Mississippi River Maritime Association + Chris Cunningham + chris&concentriamaritime.com +30745 + Open Channel Software + Larry Mills-Gahl + lmg&openchannelsoftware.com +30746 + Opensolutions GmbH & Co. KG + Andreas Piening + andreas.piening&opensolutions.net +30747 + THR Systems + System Administrator + admin&thr.sk +30748 + Raumtex Biehl + Frank Heß + admin&raumtex-biehl.de +30749 + Marabu EDV -Beratung und -Service GmbH + Carsten Schulze + carsten.schulze&marabu-edv.de +30750 + Mesh City Wireless + Daniel Roady + droady&meshcity.com +30751 + Pentadyne Power Corporation + Claude Kalev + claude.kalev&pentadyne.com +30752 + INFOGROUP S.p.A: + Massimiliano Panichi + m.panichi&infogroup.it +30753 + University of Tsukuba + Osamu Tatebe + tatebe&cs.tsukuba.ac.jp +30754 + Erik Abele Technology Consulting + Erik Abele + erik&eatc.de +30755 + faultinjection.net + Alex Senkevitch + alex&senkevitchs.net +30756 + CodersApple.com + Krishna Gadde + gopi.gadde&gmail.com +30757 + FAR NETWORKS SRL + Nicola Fracassi + nicola.fracassi&farnetworks.com +30758 + Comtech Systems, Inc. + Charles R. Schroeder + cschroeder&comtechsystems.com +30759 + ShowSys + Jason Pontius + jpontius&showsys.com +30760 + ELECTRO STANDARDS LABORATORIES + KEN SEPE + KSEPE&ELECTROSTANDARDS.COM +30761 + Telavista B.V. + Ward van Wanrooij + wvanwanrooij&telavista.com +30762 + Mbira Technologies + Systems Administrator + hostmaster&mbira.com +30763 + GlassFish Networks + Michael Helmeste + elf&glassfish.net +30764 + QlikTech + Johan Jeansson + johan.jeansson&qliktech.com +30765 + GAMA V2 + Hamid Toloei + hhamedto&ucsd.edu +30766 + Tomas Shulman + Tomas Shulman + tomishulman&gmail.com +30767 + Full Spectrum Inc. + Menashe Shahar + mshahar&fullspectrumnet.com +30768 + Collège de France + Cyrille Ghesquiere + cyrille.ghesquiere&college-de-france.fr +30769 + Citec S.p.A. + Alberto Storch + a.storch&citec.it +30770 + Alberto Storch + Alberto Storch + alberto&storch.it +30771 + HealthPlus of Michigan + Walter Seiler + mike.seiler&sanyuhu.com +30772 + ProCom Professional Communication & Service GmbH + Wolf-Dieter Fischer + Wolf-Dieter.Fischer&procom-communication.de +30773 + FlyCatcher + John Brooks + john&flycatcher.co.uk +30774 + Anthena Technology, Inc. + Sri Chaganty + schaganty&athena-technology.com +30775 + XControl Sistemas & Soluciones Ltd. + Ricardo Moreno + ricardo_moreno&etb.net.co +30776 + Dronas 2002, S.L.U. + Javier Martínez Martí + jmartinezm&nacex.es +30777 + Cymotec + Mirko Pilger + pilger&cymotec.de +30778 + Murilo Fujita + Murilo Fujita + murilofujita&gmail.com +30779 + Apelon, Inc + Harold Solbrig + hsolbrig&apelon.com +30780 + Charles McCrobie + Charles McCrobie + charles.mccrobie&gmail.com +30781 + Infonova Consultores + Nacho Rodríguez + nacho.rodriguez&infonova.es +30782 + HiliSoft + MU Yangzhi + muyangzhi&hilisoft.com +30783 + IceCell ehf + Andreas Fink + afink&gni.ch +30784 + Fink Consulting GmbH + Andreas Fink + afink&finkconsulting.com +30785 + Bitfrost AS + Georg Engstrand + post&bitfrost.no +30786 + Defne + Abdulhakim Unlu + abdulhakimu&yahoo.com +30787 + Global Networks Switzerland AG + Gregor Riepl + griepl&gni.ch +30788 + VScape + Kerry L. Bonin + kerry&vscape.com +30789 + Market Technologies, Inc. + Roberto Villarreal + rvillarreal&mktec.com +30790 + Cortex Informatica + Francisco J. A. Souza + fj_souza&hotmail.com +30791 + brass Media Inc. + Joel Ranck + joelranck&brassmedia.com +30792 + Huber Verlag für Neue Medien GmbH + Anno v. Heimburg + heimburg&huberverlag.de +30793 + Ingram Book Co. + Mark Mise + mark.mise&ingrambook.com +30794 + Platinumtel Communications + Tom Ross + tom&platinumtel.com +30795 + Zeltiq Aesthetics, Inc. + Corydon A. Hinton + chinton&zeltiq.com +30796 + Videon Central, Inc. + Arthur Shipkowski + art&videon-central.com +30797 + elconas + Robert Heinzmann + services&elconas.de +30798 + Delta Lambda Phi National Social Fraternity + Adam J. Moore + adam.moore&dlp.org +30799 + SkyNet Community VPN + Steven Gilberd + steven.gilberd&gmail.com +30800 + Bee Ware SA + Mendoza Florent + florent.mendoza&bee-ware.net +30801 + IGUS-ITS GmbH + Frank Müller + frank.mueller&igus-its.de +30802 + FireScope, Inc. + Matt Rogers + mrogers&firescope.com +30803 + Vyatta, Inc. + Justin Fletcher + mibs&vyatta.com +30804 + Lattice Limited + Sreejith Kalyat + sreejith&latticelimited.com +30805 + Solegy LLC + James Faeldon + jfaeldon&solegysystems.com +30806 + ipcenter.at + Markus Hof + support&ipcenter.at +30807 + Franz Krainer - netal + Franz Krainer + franzk&netal.com +30808 + Sun International Management Limited + Peter Horvath + suninternational&mtnloaded.co.za +30809 + PacketOps + Tracey Bernath + tbernath&packetops.com +30810 + Universitaet Bremen - Fachbereich 3 + Christian Manal + moenoel&informatik.uni-bremen.de +30811 + Herley-CTI + Alan Kobb + administrator&Herley-CTI.com +30812 + ObjectTel, Inc. + Todd Lehmann + support&objecttel.com +30813 + Advanced Telecom Technologies + Tracey Bernath + tbernath&advancedtelecomtech.com +30814 + Institut français de recherche pour l'exploitation de la mer + Eric Bernard + Eric.Bernard&ifremer.fr +30815 + Intecna Soluciones + Pedro Salido López + psalido&intecna.es +30816 + escape studio + julien BASTIDON + julien&escape-studio.com +30817 + illuminate Solutions + Hervé Barsanti + hbarsanti&i-lluminate.com +30818 + Powerset, Inc. + Richard Hesse + richard&powerset.com +30819 + software security networks - Lukas Gradl + Lukas Gradl + iana&ssn.at +30820 + Fighting Penguin Business Systems + Kevin Fries + kfries&fightingpenguin.com +30821 + Cape Cod Sensors + Stephen Hall + sahtech2002&comcast.net +30822 + General Conference of Seventh-day Adventists + Adriano Brancher + adriano.brancher&dsa.org.br +30823 + Akimeka + John Wang + jwang&akimeka.com +30824 + SH Solutions + Steffen Heil + info&sh-solutions.de +30825 + CentraComm Communications, Ltd. + Loren Weith + noc¢racomm.net +30826 + SmartOptics AS + Göran + goran.hillebrink&smartoptics.com +30827 + RIZ-Transmitters Co. (formerly 'RIZ ODASILJACI d.d.') + Zvonimir Lucić + zvonimir.lucic&riz.hr +30828 + Colfax Corporation + Christoph Jandek + c.jandek&allweiler.de +30829 + United Technologists Europe Limited (UTEL) + andrew anten + andrew.anten&utel.co.uk +30830 + G & L Geissendörfer & Leschinsky GmbH + Mark Eisenblätter + eisenblaetter&gl-systemhaus.de +30831 + Network Center of Tianjin University + Fudi Liu + arraynil&163.com +30832 + CompuCredit Corporation + Andrew G Whitlock + andrew.g.whitlock&compucredit.com +30833 + Clearspeed Technology plc + Andrew Lonsdale + librarian&clearspeed.com +30834 + VirtueFusion + Steven Goodliff + steven.goodliff&virtuefusion.com +30835 + Blue Box Group, LLC + Stephen Balukoff + stephen.balukoff&blueboxgrp.com +30836 + WINGServers + Pradeep Dalvi + pradeep&dalvi.co.in +30837 + Edge Velocity Corporation + Michael Larson + info&edgevelocity.com +30838 + Cash America Net of Illinois, LLC + Michael Vallaly + domainmaster&cashnetusa.com +30839 + Skidmore College + Scott Wright + swright&skidmore.edu +30840 + Replify Ltd + Wesley Darlington + wesley.darlington&replify.com +30841 + Astase + Adrien Reboisson + adrien-reboisson&astase.com +30842 + APINC (Association Pour l'Internet Non Commercial) + Herve Rousseau + herve&apinc.org +30843 + Blogreen + Romain Tartière + pen&blogreen.org +30844 + Lucas IT Services + D.J. Lucas + dj&lucasit.com +30845 + Mechron Power Systems + Nicolas Novak + jay_omega&mac.com +30846 + IPsmarx Technology Inc. + Arash Vahidnia + arash&ipsmarx.com +30847 + 9Star Research, Inc. + Simon Lee + itadmin&9starresearch.com +30848 + “SITRONICS Smart Technologies”, LLC + Eugene Shishov + Shishov&sitronics-smart.com +30849 + Magellan Netzwerke GmbH + Mike Schneider + support&magellan-net.de +30850 + The University of Hong Kong + System Services Team + group-its-system&hku.hk +30851 + Accolm + Hamidreza Khajeh Alizadeh Attar + hamid.attar&accolm.com +30852 + Knipp Medien und Kommunikation GmbH + Elmar Knipp + Elmar.Knipp&knipp.de +30853 + Mikro Værkstedet + Jakob Simon-Gaarde + jakob&mikrov.dk +30854 + Heinlein Support GmbH + Peer Heinlein + p.heinlein&heinlein-support.de +30855 + Geckologic GmbH + Steffen Schumann + steffen.schumann&geckologic.com +30856 + Bandwidth.com, Inc. + James Milko + jmilko&bandwidth.com +30857 + Data Device Corporation + Vilis Kristofs + kristofs&ddc-web.com +30858 + Mobile News Channel S.A. + Cédric PERINET + iana-pen&mnc.ch +30859 + E-Solutions + Joost Daem + jdaem&esolutions.be +30860 + Betgenius + Andrew Hodgson + sysadmin&betgenius.com +30861 + Venture Craft Networks + Shaunak Sarkar + webmaster&venturecraft.net +30862 + Nancy Universite + Vincent MATHIEU + Vincent.Mathieu&nancy-universite.fr +30863 + Samsung Electro-Mechanics Co., LTD. + Dae-Min Jang + dmjang&samsung.com +30864 + CD-adapco + Chris Hamilton + systems&uk.cd-adapco.com +30865 + Cilantro Cafe + Mohammad Mahmoud Aly + alhashash&alhashash.net +30866 + ID2go IT-solutions + Dr. Axel Schmidt + axel.schmidt&id2go.de +30867 + Volksmission entschiedener Christen e.V. + Christoph Fischer + christoph_fischer&volksmission.de +30868 + Kansas City Web Repair Co. + Brett Heroux + brett.j.heroux&hero-ux.com +30869 + Weiland Associates, Inc. + FJ Weiland + fj&effjay.com +30870 + InterRed GmbH + Heinz Kunz + system&interred.de +30871 + Luce Forward Hamilton and Scripps LLP + Network Operations + postmaster&luce.com +30872 + Uniloc USA + Casey S. Potenzone + casey&uniloc.com +30873 + Confident, Inc. + Paul Kriebel + paul&confidentinc.com +30874 + Tuneology + Francis Taylor + narf&alum.mit.edu +30875 + InnoDomus + Marko Lappalainen + registrar&innodomus.com +30876 + Divmod, Inc. + Duncan McGreggor + oubiwann&divmod.com +30877 + Giogio IT + Hansjürg Wenger + pen&giogio.ch +30878 + Panacea Vision Co. LTD + Poobet Mongkolwat + poobet&panaceavision.com +30879 + Pavleck LLC + Jeremy D. Pavleck + jeremy&pavleck.net +30880 + Chaos Computer Club München e.V. + Patrick Otto + codec&muc.ccc.de +30881 + ELycée S.A.S. + Denis JOUVIN + denis.jouvin&elycee.com +30882 + Nanjing Xian Zhi Lu Technology Co.,Ltd. + Xing Xiuyi + hubertstar&gmail.com +30883 + Correlix Inc. + Boaz Bash + boaz.b&correlix.com +30884 + Covertix + Tzach Kaufmann + tkaufmann&covertix.com +30885 + Rookie Inc. + T.Hayashi + hayashi&rookie-inc.com +30886 + Sichuan Jiuzhou Electronic Technology Co.,Ltd + Lutao Zhang + satanfire345&163.com +30887 + Alfred KUHSE GmbH + Marco Anetzberger + fe_reg&kuhse.de +30888 + cittadino GmbH + Miguel Marquez Gonzales + marquez.gonzales&freenet.de +30889 + Valero Energy Corporation + Matt Moulder + matt.moulder&valero.com +30890 + Institut de Recherche en Ophtalmologie + Sylvain Bolay + it&irovision.ch +30891 + Gramant + Denis Yeldandi + dyeldandi&gramant.ru +30892 + Mirada PLC + Jaime Vallori + jaime.vallori&mirada.tv +30893 + CJSC Goodwin + Sergey Bakulin + snmp&goodwin.ru +30894 + INTRA-SYS GmbH + Frank Fahlbusch + ffa&intra-sys.de +30895 + antonis.mobi + Antonis Hadjiantonis + pen&antonis.mobi +30896 + Knitebane.net + Rich Shappard + knitebane&gmail.com +30897 + McDonald's Corporation + Michael J. Eisenberg + ITSecurity&us.mcd.com +30898 + NetworkedAssets GmbH + Joerg Mueller-Kindt + jmk&networkedassets.com +30899 + Montana State University + Ivan R. judson + ivan.judson&montana.edu +30900 + ipWorks Co., Ltd. + Tycoon Lee + master&ipworks.co.kr +30901 + UNITED INFORMATION TECHNOLOGY CO.LTD + TanYinhong + tanyh&uit.com.cn +30902 + GoBGK + Bryan Gonderinger + bjg&gobgk.com +30903 + Registru Centras + Saulius Kvedaravicius + saulius®istrucentras.lt +30904 + Radius Sweden AB + Anders Grahn + anders.grahn&radius.net +30905 + INNOVA S.A. + Nassos Tsadaris + n.tsadaris&innova-sa.gr +30906 + IFS Ing.-Vertriebsbuero für EDV Softwaretechnik GmbH + Joerg Roschke + roschke&ifs.de +30907 + Paradigm Communication Systems Ltd + Christopher Draycott + chris.draycott¶comm.co.uk +30908 + RtVision, Inc. + Chris Frederick + support&rtvision.com +30909 + wbb wireless + wbb wireless + wbb.wireless&gmail.com +30910 + WS Live, LLC + Patrick Powers + ppowers&wslive.com +30911 + Christopher Hubbard + Christopher Hubbard + guyverix&yahoo.com +30912 + Indent Solutions LLC + Sivanaga Dantuluri + siva&indentsolutions.com +30913 + UMC ELECTRONICS CO., LTD. + Ken Sudo + k-sudo&umc.co.jp +30914 + Oliver Oehme EDV-Service GmbH + Martin Meinhold + martin.meinhold&oehmeedv.de +30915 + Beijing Shengtian Engineering Design&Consult Inc. (bjst) + xumaokui + xumaokui&163.com +30916 + TeamQuality + Andrea Spinelli + andrea.spinelli&imteam.it +30917 + jillstephens.org + Jill Stephens + jsopera&gmail.com +30918 + Catatrepa, S.A. + Ariel Fatecha + afatecha&gmail.com +30919 + Unipier Ltd. + Arie Gofer + arie.gofer&unipier.com +30920 + Scania Infomate + Erik Lonroth + erik.lonroth&scania.com +30921 + john horton consultancy ltd + john horton + jh_squirrel&yahoo.com +30922 + Telsasoft + Chris Lexvold + tech&telsasoft.com +30923 + Max-Planck-Institute for Mathematics in the Sciences + Rainer Kleinrensing + rainer&mis.mpg.de +30924 + Jantronics + Janet Plato + techgrrl2003&yahoo.com +30925 + Robust Solutions LLC + Pavel Labushev + p.labushev&gmail.com +30926 + flexmedia.com.tw + stephen.wang + smellychen&hotmail.com +30927 + Mainova AG + Klaus-Dieter Hollstein + soc&mainova.de +30928 + Loycon s.c. + Tomasz Madry + biuro&loycon.pl +30929 + Schuberg Philis + Roeland Kuipers + rkuipers&schubergphilis.com +30930 + Billiamware SW + William H Swortwood III + whswort&us.ibm.com +30931 + Sheldon Networks Inc + Stuart Sheldon + hostmaster&actusa.net +30932 + Epicenter Inc + David Wang + dwang&epicenterinc.com +30933 + nick125 + Nick Devito + nick&nick125.com +30934 + STECO Ltd + Constantin Stefan + cs&steco.biz +30935 + Railway Equipment Company + Robert Wagner + rwagner&rwy.com +30936 + Texas State University + Mark Hughes, Technology Resources + es-idm-admins&txstate.edu +30937 + Communications-Electronics Solutions Pty Ltd + Michael McMahon + mike.mcmahon1&c-esolutions.com.au +30938 + N8 Identity Corp. + Mike Gillan + mike.gillan&n8id.com +30939 + Embrapa Gado de Corte + Carlo César Simioli Garcia + carlo&cnpgc.embrapa.br +30940 + SC FITS SRL + Alexandru Tica + alexandru.tica&fits.ro +30941 + iFACTORY Consulting + Victor Luyt + victorl&ifactoryconsulting.co.za +30942 + Global IP Solutions, Inc. + Tina le Grand + tina.legrand&gipscorp.com +30943 + MultiQ Products AB + Gunnlaugur Jonsson + gunnlaugur.jonsson&multiq.se +30944 + PCMS Group PLC + Tony Espley + tony.espley&pcmsgroup.com +30945 + Chengdu Guangda Electronic&Telecommunication Technology Development Co., Ltd. + Wu Hengzhong + gddz&vip.163.com +30946 + Department of Computer Engg. + Mohammad Sana Akhtar + msaelectronics&gmail.com +30947 + YAMAKI ELECTRIC CORPORATION + Toshio Itoh + toshio-itoh&yamaki-ec.co.jp +30948 + Cloudmark, Inc. + Kevin San Diego + iana&cloudmark.com +30949 + Aha! Software + Peter Gallanis + support&ahasoftware.com +30950 + Transneptune + Kit La Touche + kit.la.t&gmail.com +30951 + N Green + N Green + giddleberry&gmail.com +30952 + CONSTALANT JSC + Dimitar Dimitrov + ddimitrov&constalant.com +30953 + Novotronik GmbH + Mr. Michael Grimminger + mg&novotronik.de +30954 + IDS services + Robin Schroeder + r.schroeder&ids-services.de +30955 + Flavio Poletti + Flavio Poletti + flavio&polettix.it +30956 + yTech GmbH + Dieter Hey + dieter.hey&ytech.de +30957 + WHP International SAS + Stéphane Peyrucq + s.peyrucq&whp.fr +30958 + Slavenet + Thomas Wanderer + discoboy&slavenet.org +30959 + ZhongLianTong electronics corporation, Ltd + Zhiqiang Zhang + cys0403&hotmail.com +30960 + Innotube, Inc. + John Kim + jhkim&innotube.com +30961 + RedShift Inter Networking + Phanindra Jujjavarapu + phani&redshiftnetworks.com +30962 + Denis Knauf + Denis Knauf + deac&DenKn.de +30963 + HUTON + Hong ki ho + khhong&huton.co.kr +30964 + ZLGMCU + David Zhang + software&zlgmcu.com +30965 + University "Dunarea de Jos" Galati + Adrian ISTRATE + Adrian.Istrate&ugal.ro +30966 + SHENZHEN CLEVER ELECTRONIC CO.,LTD. + Debo Qin + qindebo&hotmail.com +30967 + VVAAQIS + Alexander Buslayev + foralex&mail.ru +30968 + For Sale Digital Internet-Agentur GmbH + André Lehmann + technik&for-sale-digital.de +30969 + 2MI Tecnologia + Marcos Amorim + marcos&2mi.com.br +30970 + IC Plus Corp. + Reyes Wu + reyes_wu&icplus.com.tw +30971 + ALEXON CO.,LTD. + Toshio Masuchi + toshio&alexon.co.jp +30972 + College of Lake County + John Chan + jchan&clcillinois.edu +30973 + RightsAssist, LLC. + Ray Gauss II + ray&rightsassist.com +30974 + kitASP + Ken Saigoh + Ken.Saigoh&kitASP.com +30975 + cvicse Ltd. + wei.kj + weikj_sky&126.com +30976 + Netezza Inc + John Skier + jskier&netezza.com +30977 + SPINLOCK d.o.o. + Davor Ocelic + docelic&spinlocksolutions.com +30978 + Cordium Links, LLC + Elizaberth Randall, PhD + erandall&cordiumlinks.com +30979 + E.C.O. Institut für Ökologie + Leo Unglaub + unglaub&e-c-o.at +30980 + Krazan and Associates, Inc + John Giannetti + johngiannetti&krazan.com +30981 + MagtiCom LTD + George Chelidze + gchelidze&magticom.ge +30982 + Jacobs University Bremen + Juergen Schoenwaelder + j.schoenwaelder&jacobs-university.de +30983 + Gurulabs.it + Alessandro Tatini + info&gurulabs.it +30984 + GNS Systems GmbH + Jan Niemann + jan.niemann&gns-systems.de +30985 + Gadgets & Technology Solutions LLC + Information Group + info&gadgetstechsolutions.com +30986 + Paymetric Inc. + Genady Vishnevetsky + mis&paymetric.com +30987 + Norpak Corporation + William Barry + wbarry&norpak.ca +30988 + PowerFile, Inc + Dave Loewith + dloewith&powerfile.com +30989 + Hiscox Plc + Andrew Harper + !ITSAD&O365Tribe&HISCOX.com +30990 + Food for the Hungry + Shawn Parrish + sysadmin&fh.org +30991 + PCS do Brasil Ltda + Celso Pasquini + celso.pasquini&pcsbrasil.com.br +30992 + Wrocław University of Economics + Borys Pogoreło + borys&ue.wroc.pl +30993 + Sure3 Networks, LLC + Josh Wyse + josh&sure3.com +30994 + SSB Progetti srl + Fulvio Monti + fulvio.monti&ssbprogetti.it +30995 + WebMessenger, Inc. + Boris Georgiev + contact&webmessenger.com +30996 + BlackMesh + Dan Reif + dan.reif+IANAPEN&gmail.com +30997 + Telecom South America S/A + Daniel Korndorfer + daniel&tesatelecom.com +30998 + Fikus Development Corp. + Filip Bujanic + fbujanic&fikus.com +30999 + Rohati Systems, Inc. + Kirti Prabhu + kirti&rohati.com +31000 + stelcom + chen shaowei + claris_chen-iana&yahoo.com.cn +31001 + secomicon GmbH + Florian Speidel + fs&secomicon.com +31002 + COSI + Bill Michaelson + ianapen&bill.from.net +31003 + China Pacific Insurance (Group) Company Limited + Haibin ZHANG + zhanghaibing&cpic.com.cn +31004 + Nautile SARL + AUPETIT Nicolas + contact&nautile.nc +31005 + ODS-Medical GmbH + Ralf Zwoenitzer + r.zwoenitzer&ods-medical.com +31006 + EnergoData + Akhmedov Dmitry + akhmedov-dv&energodata.ru +31007 + Ikse.net + Alexandre Dath + iana&ikse.net +31008 + Business & Decision + IT Team Contact + iana&businessdecision.com +31009 + PRECISION ELECTRONICS LTD. + Amit Kumar Mittal + amit.mittal&pel-india.com +31010 + Cymphonix Corp + Trevor Paskett + tpaskett&cymphonix.com +31011 + Enterprise Management Consulting Ltd. + Michael Maclean + michael&emcuk.com +31012 + Home Works S.p.A. + Tani Alessandro + support&homeworks.it +31013 + School District of Philadelphia + Rich Bateman + techops&phila.k12.pa.us +31014 + FXtion Limited + Simon Dowson + simon.dowson&fxtion.com +31015 + Nalco Company + PKI Administrator + pki&nalco.com +31016 + Socrata, Inc. + Paul Paradise + oid-master&socrata.com +31017 + tele.sjb.partners AG + Thomas Bach + t.bach&telesjb.ch +31018 + HeLi NET iTK + Dennis Ploeger + ploeger&helinet.de +31019 + Tecal Engenharia Ltda + Ricardo Smith + rsb&tecal.com.br +31020 + Teachers Credit Union + Robert Boenne + Robert.Boenne&tcunet.com +31021 + Nstein Technologies Inc. + Mikhail Moussikhine + mikhail.moussikhine&nstein.com +31022 + Sun&seeds Solutions S.L. + Adolfo Gómez + agomez&sunandseeds.com +31023 + Veeam Software + Alexey Vasilyev + Alexey.Vasilyev&veeam.com +31024 + Swiss Reinsurance Company + Beat Meyer + beat_meyer&swissre.com +31025 + NPAI SA + Gabriel GIL + gabriel.gil&npai.fr +31026 + Gil Leduc Industries + Gabriel GIL + gabriel.gil&gli-services.fr +31027 + SyTech Corporation + Tigrane Koutoudjian + tigrane&sytechcorp.com +31028 + Onse telecom corporation + Lee Byung Hee + bhlee&onsetel.co.kr +31029 + HYUNDAI Digital Technology Co.,Ltd + Jeong, junho + jhjeong&hdt.co.kr +31030 + Voipex Limited + Adam Hill + adam&voip-x.co.uk +31031 + VO2 Labs + Jean Charles Passard + jcharles&provectio.org +31032 + jcsbk + Michael Schmidt + m.schmidt&jcs.warburg.de +31033 + Elodig + Emmanuel Guiton + egn&elodig.fr +31034 + Schleifenbauer Products BV + Alain Schuermans + alain&schleifenbauer.com +31035 + Haniriito Co.,Ltd. + Kazuma Asano + k-asano&haniriito.jp +31036 + Svyaz Engineering M JSC + Kapustin Sergey + s.kapustin&allmonitoring.ru +31037 + Luxms Inc. + Serg Shestakov + serg&luxms.com +31038 + Next Generation Creative LLC + Ben Buchwald + ben&ngcreative.com +31039 + JinniWare Software + Robert Nelson + robert&jinniware.com +31040 + Doze.net + Mike Joseph + mj&doze.net +31041 + TZ Communications Ltd + Igor Marnat + i.marnat&tz.ru +31042 + Uny IT B.V. + Richard Spoorenberg + Richard.Spoorenberg&UnyIT.nl +31043 + Eclipse Options (HK) Ltd. + Nick Gustafson + nick.gustafson&eclipseoptions.com +31044 + Alfresco Software Ltd. + IT Services + its&alfresco.com +31045 + Fry Consulting, Inc. + Peter J. Fry + iana&fryconsulting.com +31046 + Collogia Unternehmensberatung AG + Markus Stockhausen + markus.stockhausen&collogia.de +31047 + Aptilon Holdings Inc + Jason Terlecki + netops&aptilon.com +31048 + Art Of Mobile + Chris Goo + domain&artofmobile.com +31049 + Midtronics Inc. + Carolyn Lynch + CLynch&Midtronics.com +31050 + CyT Comunicaciones y Telemática S.R.L + Ezequiel Blanca + Eblanca&cytcomunicaciones.com.ar +31051 + Mojix Inc. + Christopher Jones + chris&mojix.com +31052 + Liberty Alliance + Eric Tiffany + eric&projectliberty.org +31053 + Evolved Intelligence + Paul Brickell + paul.brickell&evolvedintelligence.com +31054 + TTech + Marcus Vitek + m.vitek&ttech.at +31055 + gfsb Gesellschaft fuer Systemberatung mbH + Dr. F. Schlenker + frederik.schlenker&gfsb.de +31056 + Thunderbird Resorts + Ramiro Samaniego + rsamaniego&thunderbirdresorts.com +31057 + Ergowells + Jerry Wintrode + jwintrode&gmail.com +31058 + Teragauge + Support Group + support&teragauge.com +31059 + Known Star (Beijing) Kiosks Co.. LTD + Han Zhigang + hanzg&known.cn +31060 + Video Internet Technologies LTD. + Yuriy Bukhtiyarov + yuriy&vitcompany.com +31061 + Cheops Elektronik Handels- und Fertigungs KG + Uli Steffenhagen + steffenhagen&cheops-elektronik.de +31062 + Aeracode + Andrew Godwin + andrew&aeracode.org +31063 + Daniel Saul + Farfel MacNoogen + novkes.ro&gmail.com +31064 + Optimal Satcom, Inc. + Ahsun H. Murad + amurad&optimalsatcom.com +31065 + Toyo Electronics Corp. + Takahiro Suda + t.suda&toyonics.co.jp +31066 + GSP - Loteamentos + José Santiago + ti.santiago&gsp.com.br +31067 + On Line Informatica + Felix Costa + felix&online.eti.br +31068 + Tyfon Svenska AB + Tyfon Hostmaster + hostmaster&tyfon.net +31069 + Belvok Ltd. + Sergei Gavrikov + info&belvok.com +31070 + Zentzu Inc. + René Larivière + rene.lariviere&zentzu.com +31071 + Intelinet AS + Kristian Berg + kristian.berg&intelinet.no +31072 + proQuest Computersysteme GmbH + Uli Sambeth + support&airquest.com +31073 + Radian Company + Alexander Nikitin + naa_radian&mail.ru +31074 + Electronimec, Inc + Rob Berry + rjberry&rjcontrol.com +31075 + Mobigeo + Marcin Bartoszek + mbartoszek&mobigeo.pl +31076 + Jan Fischer + Jan Fischer + jan.fischer&mac.com +31077 + Systems Neurobiology Laboratory + Chris Hiestand + chiestand&salk.edu +31078 + Nankai Densetsu CO.,LTD. + YOSHIAKI SANADA. + sanada_iana&nankai-densetsu.co.jp +31079 + Bircher ProcessControl AG + Markus Auer + markus.auer&bircher.com +31080 + Galileo International + Travelport Technology Operations + sysadmin.italy&galileo.com +31081 + Perlan Technologies Polska Sp. z o.o. + Piotr Buliński + pbulinski&perlan.com.pl +31082 + EUNETIC GmbH + Markus Koelmel + info&eunetic.eu +31083 + ALTE LEIPZIGER Versicherung a. G. + Soenke Schau + schaus&alte-leipziger.de +31084 + TechSAT GmbH + Armin Gruner + ag&techsat.com +31085 + Greyware Automation Products, Inc. + SNMP Support Group + support&greyware.com +31086 + Technology Nexus Secured Business Solutions AB + Peter Hellström + gopki&nexusgroup.com +31087 + Enfo Broadcast AS + Steve Tveit Pedersen + steve.tveit.pedersen&enfobroadcast.com +31088 + Extend Health, Inc. + Mark Stafford + mstafford&extendhealth.com +31089 + Clemson University + Jeff Bate + jeff&clemson.edu +31090 + Scale Computing, Inc. + Mike Olson + iana-contact&scalecomputing.com +31091 + Andreas Tsiotsias + Andreas Tsiotsias + andreas_tsiotsias&btconnect.com +31092 + GFI Software Ltd + Quintin Zammit + postmaster&gfi.com +31093 + Passguard + Chris Southern + southern&passguard.com +31094 + dynaTrace Software, Inc. + Ted Feyler + ted.feyler&dynatrace.com +31095 + SEC (Software Engineering Center, Chinese Academy Of Sciences) + Zheng Shaolin + slzheng&sec.ac.cn +31096 + koyo electric co,ltd + kajiwara terumitsu + kajiwara&koyo-pb.co.jp +31097 + Mouse-hole.com + Karl Wagner + karl&mouse-hole.com +31098 + PJM Interconnection, L.L.C. + George M. Fazio, Jr. + noc&pjm.com +31099 + SZ-ID b.v. + Richard Spoorenberg + R.Spoorenberg&sz.nl +31100 + Metascopic + Jason Dusek + jsn&metascopic.com +31101 + Future Spirits Co,Ltd. + Takeshi Kitada + kitada&future-s.com +31102 + u10 Networks + Philip Mulholland + phil&u10networks.com +31103 + Daniel Ellard + Daniel Ellard + ellard&gmail.com +31104 + EPRO Telecom Services Ltd. + Tim Li + tim-li&eprotel.com.hk +31105 + WMHost + Toni Walther + hostmaster&wmhost.com +31106 + 23projects.org + Torsten Grunig + tgrunig&gmail.com +31107 + Global Protocols + Kara Hale + hale&globalprotocols.com +31108 + Compriva Communications Privacy Solutions Inc. + Bill Dagg + system.admin&compriva.com +31109 + IAVANTE Foundation + Carlos González + listas1&maycar.net +31110 + UAB "BT-Grupe" + Andrius Vasauskas + andrius&bt-group.lt +31111 + Norfello Oy + Tuomas Rasila + tuomas.rasila&norfello.com +31112 + XForm Systems GmbH + Jan Winkler + j.winkler&xformsystems.de +31113 + Swemel JSC + Alexander Zakharov + zh&swemel.ru +31114 + Aetna Group S.p.A. + Gianluca Semprini + gsemprini&aetnagroup.com +31115 + InfoWatch + Svetlana Ashkinazi + Svetlana.Ashkinazi&infowatch.com +31116 + Cuesta College + Grant Chesy + gchesy&cuesta.edu +31117 + Quality Bicycle Products, Inc. + Greg Sampson + greg&qbp.com +31118 + Cryptomach Ltd. + Sergey Golovashych + sg&cryptomach.com +31119 + I-node S.r.l. + Massimiliano Perantoni + admin-c&i-node.it +31120 + QiPlay + Yannick Brehon + y.brehon&qiplay.com +31121 + Techfirm, Inc. + Katsuya Matsumoto + infra-reg&techfirm.co.jp +31122 + VoIPFuture Ltd. + Thomas Koehn + snmp&voipfuture.com +31123 + WuHan Teklong Technology Co.,Ltd + Stephen Wan + wh.teklong&gmail.com +31124 + ABF Baltic AS + Juri Gurjanov + yuri&abfbaltic.ee +31125 + Ressources Informatiques + Laurent COUDROT + coudrot&r-i.fr +31126 + ASTELLIA + julien LECOEUVRE + j.lecoeuvre&astellia.com +31127 + Emerson Process Management + Bob Huba + bob.huba&emerson.com +31128 + Lead Tech Design + Samira CHAHIBI + samira.chahibi<dsa.com +31129 + Alaska Satellite Facility + Theron Bair + tbair&asf.alaska.edu +31130 + Basho Technologies, Inc. + Justin Sheehy + justin&basho.com +31131 + Ilove2ski + Kristin Grifin + kristin.l.griffin&gmail.com +31132 + Dictao + Julien Montagne + jmontagne&dictao.com +31133 + Triton Container Intl + Chris Bradley + cbradley&triu.com +31134 + Kinderedheart + Bob Clayton + bob&kinderedheart.com +31135 + Diglinks GmbH + Badri Pillai + badri&diglinks.com +31136 + Gestión de Seguridad Electrónica S.A. - GSE S.A. + ALVARO DE BORJA CARRERAS AMOROS + info&gse.com.co +31137 + Bildungszentrum Uster + Stephan Göldi + stephan.goeldi&bzu.ch +31138 + DrJays.com, Inc. + Kelsey Hudson + khudson&drjays.com +31139 + Viv Diwakar + Viv Diwakar + viv&vdiwakar.com +31140 + TouK Sp. z o. o. s. k. a. + Pawel Zuzelski + pen-contact&touk.pl +31141 + GEMIK + Michael Geisberger + web&gemik.com +31142 + 7signal Oy + Anton Puolakka + anton.puolakka&7signal.com +31143 + Andreas Stenius Teknikkonsult + Andreas Stenius + andreas.stenius&astekk.se +31144 + Corporación Avance + Greful Montaño + gmdead&gmail.com +31145 + Holido + Philipp Maske + philipp.maske&holido.com +31146 + MatrikonOPC + Brad Huddleston + brad.huddleston&matrikonOPC.com +31147 + RiverStar Software + John Conneely + admin101&riverstarsoftware.com +31148 + Saga d.o.o. Beograd + Sasa Antic + sasa.antic&saga.rs +31149 + South-West University + Ivo Damyanov + damianov&swu.bg +31150 + TriumStar International Co., Ltd. + Peter Liu + peterliu&triumstar.com.tw +31151 + Sixgreen + Brill Pappin + bpappin&sixgreen.com +31152 + Allegea Information Services + Jose Marcos Gomes + support&allegea.net +31153 + Aonalu + Priam Kanealii + priamk&gmail.com +31154 + ck-productions + Charles Krüger + postmaster&ck-productions.de +31155 + SeYo Solutions + Yonas Hambissa + info&seyosolutions.com +31156 + By Light Professional IT Services + Craig McManus + craig.mcmanus&by-light.com +31157 + Patersons HR & Payroll Solutions + Douglas Baigrie + douglas.baigrie&sg.patersons.net +31158 + Team Engineers + Bollavaram MohanReddy + dev&teamengineers.in +31159 + Stadtwerke Speyer GmbH + Stefan Nitsche + it-service&sws.speyer.de +31160 + DARTALIS S.A. + Joerg Stefan Folz + folz&dartalis.lu +31161 + SMT&C Co., Ltd. + Tycoon Lee + tycoonlee&smtnc.co.kr +31162 + EMC Electronic Media Communication SA + Cristian Fochetti + emc.mail&bluewin.ch +31163 + Ultra Electronics - Tactical Communication Systems + Andre-Claude Paulin + andre-claude.paulin&ultra-tcs.com +31164 + xweb OG + Erwin Raab + ip-admin&xweb.cc +31165 + Active Storage, Inc. + Skip Levens + skip&activestoragelabs.com +31166 + ViM Internetdienstleistungen Gmbh + Rainer Hinterberger + r.hinterberger&vim.at +31167 + Home State Bank + Ken Porter + Ken.Porter&homestatebank.com +31168 + Pleora Technologies Inc. + Igor Sales + igors&pleora.com +31169 + Medical Facilities of America + Hunter French + hunter_french&mfa.net +31170 + PNG Development Group + Glenn Randers-Pehrson + glennrp&gmail.com +31171 + Ning Inc + Theral Mackey + ops&ninginc.com +31172 + Broward Sheriff's Office + Collin Bennett + collin_bennett&sheriff.org +31173 + Alaska Department of Fish & Game + Corey Kos + corey.kos&alaska.gov +31174 + Mobile Active Pty Ltd + System Administrators + admins&mobileactive.com +31175 + Partition Tragic + Giacomo Cariello + info&partitiontragic.org +31176 + Largic Co.,Ltd. + Daisuke.Gendou + gendou&largic.jp +31177 + Quantm + Barney Flint + barney_flint&trimble.com +31178 + GroundHog Software + Warwick Smith + sysadmin&groundhog.com.au +31179 + Micro Research Ltd + B. J. Hill + bjh&micres.biz +31180 + Centre for Development of Advanced Computing + Dr. Subrata Chattopadhyay + subratac&cdacb.ernet.in +31181 + DBALab S.p.A. + Riccardo Gajo + riccardo.gajo&dbalab.it +31182 + Ville de Luxembourg + Sven Lamberty + slamberty&vdl.lu +31183 + The Logic Group Enterprises Ltd + Mohammad Rahin + mohammad.rahin&the-logic-group.com +31184 + EWE AG + Stephan Backhaus + stephan.backhaus&ewe.de +31185 + Kell Systems + Stephen Fitton + stephen.fitton&kellsystems.co.uk +31186 + mybasta.com + Dov Zamir + dov&mybasta.com +31187 + Secure Designs, Inc. + Scott Underwood + scott&firelan.net +31188 + Engel Solutions AG + Product Admin + admin&engel-solutions.com +31189 + Rhombotech LLC + Ben Sanchez + ben&rhombotech.com +31190 + Hackers' Treasure Zoo + Jason Dusek + jsn&hackerstreasurezoo.org +31191 + Alset Corporation + John Wruble + jwruble&alsetcorp.com +31192 + Hella KGaA Hueck & Co. + Uwe Lutter + uwe.lutter&hella.com +31193 + DT Netsolution GmbH + Daniel Schwager + daniel.schwager&dtnet.de +31194 + Public Association "RENAM" (Research and Educational Networking Association of Moldova) + Bogatencov Peter + bogatencov&renam.md +31195 + audio data Ton- und Datentechnik GmbH + Achim Strauch + achim.strauch&audio-data.de +31196 + Sosialistisk Venstreparti + Lars Svanberg Jakobsen + lars.jakobsen&sv.no +31197 + Ousmane Ba International Business Automation + Ousmane Ba + oba&iba.net +31198 + Thomas Stein + Thomas Stein + thomas.stein&koeln.de +31199 + E-learning SA + Jerome Coignard + admin_retd&crossknowledge.com +31200 + is-ro Informations-Architektur + Lothar Rösch + info&is-ro.net +31201 + Dotcom-Monitor Inc + Technical Support + support&dotcom-monitor.com +31202 + wiredobjects + Sven Wilhelm + wilhelm&wiredobjects.eu +31203 + Jenomics GmbH + Benjamin Schiller + info&jenomics.de +31204 + I-James Technology + Etienne Bagnoud + etienne&i-james.com +31205 + Newsroom Solutions, LLC + Damian Hess + dhess&newsroomsolutions.com +31206 + DRS Test and Energy Management, LLC + Todd Lumpkin + tlumpkin&drs-tem.com +31207 + Vertica Systems, Inc + Mario Barrenechea + mario&vertica.com +31208 + Seneca College of Applied Arts & Technology + Louis Koutsovitis + louis.koutsovitis&senecac.on.ca +31209 + Lightcomm Technology + Eric Zhang + eric.zhang&lightcomm.com +31210 + Cross Technologies, Inc. + Nanalee Wegener + nanalee&crosstechnologies.com +31211 + Engineering Design Team, Inc. + Chet Britten + chet&edt.com +31212 + Senotron GmbH + Martin Fieseler + info&senotron.de +31213 + Integrat (Pty) Ltd + Herman Cremer + integrat-support&usa.net +31214 + ADIPSYS + Francois Bourdais + contact&adipsys.com +31215 + PFS + Lars G. Sander + lars.sander&gmx.net +31216 + PCM Industries + Paul C. Murdock + web&pcmindustries.net +31217 + AST + Sergio A. Martinez + ast&adinet.com.uy +31218 + UCOPIA Communications + Julien Rotrou + jrotrou&ucopia.com +31219 + kubus IT GbR + Rico Rieger + rico.rieger&kubus-it.de +31220 + Syberian + Elliott Ing + elliott&syberian.co.uk +31221 + Niggemann Innovations GmbH + Thomas Minor + iana-pen&niggemann-innovations.de +31222 + Idium AS + Vidar S. Ramdal + vidar&idium.no +31223 + Spider Financial + Mohamad EL-Bawab + mohamad.el-bawab&spiderxl.com +31224 + Horizon Semiconductors + Nikolay Assa + Nikolay.Assa&horizonsemi.com +31225 + Pulse Power and Measurement Ltd. + Jacek Batkowski + jbatkowski&ppm.co.uk +31226 + NWG Technologies, LLC + Chris Heath + cheath&nwgtechnologies.com +31227 + US Cable + Chris Allermann + callermann&uscable.com +31228 + RUSTEDCOMPUTING.COM + Matthew Herzog + matthew.herzog&gmail.com +31229 + One Convergence + Nilay Tripathi + nilay.tripathi&oneconvergence.com +31230 + Lazurit + Alexander Chekalin + achekalin&lazurit.com +31231 + Longhurst Group + Iain Burnley + iain.burnley&longhurst-group.org.uk +31232 + Ringcentral, Inc. + Alexey Oudalov + alexu&ringcentral.com +31233 + Almira Labs, S.L. + Juan J. Olmedilla Arregui + juan.olmedilla&almiralabs.com +31234 + Inter7 Internet Technologies, Inc. + Matt Brookings + pen&inter7.com +31235 + ENVALE SYSTEMS PVT. LTD. + BHUPENDRA SHAH + envalesystems&gmail.com +31236 + PartyGaming PLC + Kriek Jooste + kriekj&partygaming.com +31237 + e.sigma Technology AG + Max Bidlingmaier + mbidlingmaier&esigma-technology.com +31238 + baumann.at - concepts & sulotions + Dr. Christian Baumann + cbaumann&baumann.at +31239 + AustriaPro (e-Zustellung) + Dr. Christian Baumann + c.baumann&e-zustellung.at +31240 + i-move internet gmbh + Dr. Christian Baumann + c.baumann&i-move.at +31241 + Cross Industries AG + Günter Kavsek + guenter.kavsek&crossindustries.at +31242 + Foster Link Inc. + Brian Foster + brian&fosterhardware.com +31243 + Interhost AS + Asbjørn Sannes + asbjorn.sannes&interhost.no +31244 + Fresh Media Group + Andrew Burke + andrew.burke&freshmediagroup.com +31245 + Brandon Werner + Brandon Werner + brandonwerner&acm.org +31246 + Realmagic Technology Ltd. + Peter Peng + peter.penghai&163.com +31247 + pingdash AB + Pontus Engblom + pontus&pingdash.se +31248 + Field Solutions, LLC + Jeff Sussna + itops&fieldsolutions.com +31249 + Daruma Telecomunicacoes e Informatica S/A + Alexandre Pereira da Silva + a.pereira&daruma.com.br +31250 + Avnet + Brian Roller + brian.roller&avnet.com +31251 + Olé Telecom + noc&oletelecom.com.br + noc&oletelecom.com.br +31252 + ELMECH + Leszek Wolski + leszek.wolski&gmail.com +31253 + Andre Hotzler EDV-Dienstleistungen + Andre Hotzler + iana_nospam&andrehotzler.de +31254 + Fooman Limited + Kristof Ringleff + iana&fooman.co.nz +31255 + MAC Telecom Technologies Pvt Ltd + Vishwanath Patil + vpatil&macil.in +31256 + CSO srl + Piet de Jong + p.dejong&csoitalia.it +31257 + elxsi networking services + Martin Kluge + mk&elxsi.de +31258 + 3onedata Technology Co. Ltd. + Jeff Ji + jcy&3onedata.com.cn +31259 + Sisnetinfo CO., ltd. + TAEHO, KANG + admin&sisnetinfo.co.kr +31260 + Phebus + Jonathan Phebus + jonathan&pheb.us +31261 + Placid Sky Consulting + Herman Slagman + herman&placidsky.nl +31262 + HOLA S.A. + Alberto Crespo + sistemas&hola.com +31263 + Riga Technical university + Ģirts Zemītis + girts.zemitis&rtu.lv +31264 + TEGNIX + Adrián Boubeta + adrian.boubeta&tegnix.com +31265 + Queensland Studies Authority + Bernard Wright + bernard.wright&qsa.qld.edu.au +31266 + Privredna komora Srbije + Dusan Berdic + dusan.berdic&pks.rs +31267 + Zivios, LLC. + Mustafa A. Hashmi + mustafa.hashmi&emergen.biz +31268 + Nubium Systems + Dubravko Maracic + dubravko&nubiumsystems.com +31269 + Sapotek Inc. + Oscar Mondragon + info&sapotek.com +31270 + XRoads Networks + Daren French + dfrench&xroadsnetworks.com +31271 + Geni, Inc + Edward Greenberg + edg&geni.com +31272 + Ed Greenberg Technical Services Corp + Ed Greenberg + ed&edgreenberg.com +31273 + RIS Technology + Nate Johnson + info&ristech.net +31274 + Webmasterprogramm GmbH + Tom Regner + cso&webmasterprogramm.de +31275 + TTG ULUSLARARASI TELEKOMINIKASYON ILETISIM HIZMETLERI ve ELEKT.TIC.LTD.STI + Mehmet Beyaz + mehmet.beyaz&ttgint.com +31276 + DSA Volgmann + Heiko Caspers + heiko.caspers&dsa-volgmann.de +31277 + Fluidmesh Networks, Inc. + Umberto Malesci + snmp&fluidmesh.com +31278 + Alphons Tech. Co. Ltd + Hyun Jung, Cho + chj97&alphons.co.kr +31279 + Shenzhen Routdata Technology Co., Ltd + bai yugang + baiyugang&routdata.com +31280 + Premiersoft (Singapore) Pte Ltd + Yap ES + yapes&premiersoft.com.sg +31281 + sourcephotonics + Patrick Li + Patrik.Li&sourcephotonics.com.cn +31282 + Solid Access Technologies LLC + Jaroslav Belonoznik + jaroslav.belonoznik&solidaccess.com +31283 + SONOWAND AS + Per Inge Mathisen + perim&sonowand.com +31284 + Spazidigitali + Luca Mearelli + l.mearelli&spazidigitali.com +31285 + Beijing 3V Communication Technology Co.,Ltd. + ZhiXue Liu + jiaoxl216&163.com +31286 + Valora Holding AG + Ulrich Tehrani + ulrich.tehrani&valora.com +31287 + FutureIT + Itay Levi + itayl&futureitsoft.com +31288 + Partners Data Systems, Inc. + Robert Kelly + robert.kelly&partnersdata.com +31289 + SARL SECOM - GMixon - Pixeliris + EMMANUEL RUIZ + emmanuel&gmixon.com +31290 + Nissan Finacial Services Co., Ltd. + IWAO SHIOYA + applicant&nissan-fs.co.jp +31291 + Minutolo + Antonio Minutolo + antonio.minutolo&arcor.de +31292 + WPC-Vogt + Stephan Vogt + vogt&wpc-vogt.de +31293 + Tata Város Polgármesteri Hivatala + Hetényi Csaba + csabiwork&tata.hu +31294 + Impetus Infotech (india) pvt. ltd. + A. K. Vyas + amitk.vyas&impetus.co.in +31295 + Aqualectra B.V. + Roland van der Veen + rvdveen&aqualectra.nl +31296 + Kynetix + Anil Kripalani + amk&kynetix.com +31297 + Dortmund University of Technology + Stefan Rapp + stefan.rapp&tu-dortmund.de +31298 + Hochschule Anhalt (FH) + Thomas Gast + admin.pen&zik.hs-anhalt.de +31299 + Elite CRM Software Ltd. + Max Yu + maxyu&elitecrm.com +31300 + Emmeskay + Michael Tiller + itservices&emmeskay.com +31301 + Litheware Pty Ltd + Tim Hudson + tjh&litheware.com +31302 + ScuttledMonkey + Terry Funk + terryfunk&gmail.com +31303 + HelloWeb + Vincenzo Farruggia + admin&helloweb.eu +31304 + Andes Servicio de Certificación Digital (SCD) S.A. + Sandra Cecilia Restrepo Martínez + sandra.restrepo&andesscd.com.co +31305 + Sure Tech (HK) Limited + Judy Zhou + judy&suretech.com.hk +31306 + Severed Head Software Ltd. + Mark Adams + severe.redhead&gmail.com +31307 + astarsoft + guoqing wang + wanggq&ec-tech.net.cn +31308 + Pacific Network Research + Rodney Thayer + rodney&pnresearch.com +31309 + QuintessenceLabs Pty Ltd + John Leiseboer + jl&quintessencelabs.com +31310 + CRYPTECHNIQ Pty Ltd + John Leiseboer + jleiseboer&cryptechniq.com +31311 + CEMA ASP + Uwe Beer + ubeer&mycema.com +31312 + Jens Axboe Linux Kernel + Jens Axboe + axboe&kernel.dk +31313 + Nets DanID A/S + Martin Thiim + mthii&nets.eu +31314 + Priva B.V. + SNMP Support Group + snmpsupport&priva.nl +31315 + Thales Alenia Space Italia + Stefano Buratti + stefano.buratti&thalesaleniaspace.com +31316 + Seed Linux + Stuart Herbert + stuart&stuartherbert.com +31317 + ADTelecom S.L. + Victor M. Palacio Tárrega + victorpalacio&adtelecom.es +31318 + Southern Symentech & Solutions Private Limited + Mohamed Siraj + siraj&symentech.net +31319 + MEO (formerly 'Portugal Telecom, SA') + José Palma + jose.a.palma&telecom.pt +31320 + Disig, a.s. + Peter Miskovic + peter.miskovic&disig.sk +31321 + Munisense BV + Joffrey van Wageningen + joffrey&munisense.com +31322 + Sierra Wireless, Inc. + Tom Kavanaugh + tkavanaugh&sierrawireless.com +31323 + eXelate Media + Shlomi Aflalo + shlomia&exelate.com +31324 + CTS Corporation + Troy Tate + troy.tate&ctscorp.com +31325 + Howard University + Tyrone E. Boyd + tboyd&howard.edu +31326 + Apollo Security Sales, Inc. + Dennis Hall + technical.admin&apollo-security.com +31327 + Greenplum + Ben Werther + bwerther&greenplum.com +31328 + Whispering Wolf Productions + J. Grizzard + hostmaster&lupine.org +31329 + Diginext B.V. + Ridouan Agarad + info&diginext.com +31330 + Comstar, Inc + Sangho Lee + shlee&comstar.co.kr +31331 + Commuture Corp + Matthew Harvey + matthew&commuture.com +31332 + Dr.Reisacher + Dr. Anton Reisacher + dr.reisacher&gmx.de +31333 + Purekernel Systems Limited + Mohamed Ghouse + mg&purekernel.co.uk +31334 + Unicon, Inc. + John A. Lewis + jlewis&unicon.net +31335 + John Antypas + John Antypas + ja&antypas.net +31336 + Neovera, Inc. + Michael Hertrick + mike.hertrick&neovera.com +31337 + Union & Comstar + Sangho Lee + shlee&comstar.co.kr +31338 + Kavach Networks Private Limited + Uttam K Jaiswal + uttam&kavach.net +31339 + NPP Triada-TV LLC + Panov Ivan + panov&triadatv.ru +31340 + Jiwalu + Michael Eyang + jiwalu&gmail.com +31341 + innoSysTec GmbH + Markus Miller + markus.miller&innosystec.de +31342 + S-Terra CSP + Anton Voeykov + it&s-terra.ru +31343 + Festo AG & Co. KG + Matthias Daum + rmd&festo.de +31344 + Tripadvisor + Sean Hart + shart&tripadvisor.com +31345 + Sun Microsystems (B.C.) Inc. + Paul C. Bryan + pbryan&sun.com +31346 + Cine-tal + Scott Pillow + SPillow&cine-tal.com +31347 + Spirus Applied Learning Solutions AG + Dawid Kucinski + dkucinski&spirus.com +31348 + DONET S.A.C + Erick Martin Lavado Sarmiento + erlavado&gmail.com +31349 + Digital Wave Co.,Ltd. + Mingyan Wang + wangmy&digitalwave.cn +31350 + Government Information Management Unit, Ministry of Finance, Finland + Hannu Korkeala + hannu.korkeala&vm.fi +31351 + managedhosting.de GmbH + Heino Gutschmidt + heino.gutschmidt&managedhosting.de +31352 + Meteogroup Deutschland + Dennis Schulze + admin&meteogroup.de +31353 + iKu Systems & Services GmbH & Co. KG + Andreas Niederlaender + iana&iku-systems.de +31354 + Computa Services and Consultants Ltd + Gareth Thomson + gareth&computa.co.uk +31355 + Tuxicoman + Guy Martin + gmsoft&tuxicoman.be +31356 + Casa Brasil + Renê de Lima Barbosa + rene.lima&casabrasil.gov.br +31357 + Guoke tek,Inc. + JiaXing Zhu + hake2002&126.com +31358 + id3 Semiconductors + Xavier CHOPIN + xavier.chopin&id3.eu +31359 + Orcanthus + Xavier CHOPIN + xavier.chopin&id3.eu +31360 + revenco + cheng junnan + chengjunnan&revenco.com +31361 + conject AG + Tobias Wildgruber + tow&conject.com +31362 + FHeigl + Florian Heigl + florian.heigl&gmail.com +31363 + Sumlogic Innovations + Alex Cameron + support&sumlogic.net +31364 + BEE MediaSoft Limited + Yin Liang + yinliang&dmxtechnologies.com +31365 + MaxSP Corporation + Robert Lanning + Robert.Lanning&maxsp.com +31366 + MMJ Group, Inc + LaMont Jones + poc&mmjgroup.com +31367 + Asseco Slovakia, a.s. + Jan Bartek + jan.bartek&asseco.sk +31368 + Systancia SA + Emmanuel Théotime + support&systancia.fr +31369 + OhmForce + David ROBIN + david.robin&ohmforce.com +31370 + Unassigned + Returned 2020-01-06 + ---none--- +31371 + Communicado, Inc. + Kevin Shih + kevin.shih&communicado-inc.com +31372 + University of Southern Indiana + Travis Ennis + tlennis&usi.edu +31373 + CENELEC TC79 WG7 CCTV Surveillance Systems + Frank Rottmann + Frank.Rottmann&web.de +31374 + Plansmith Corporation + Christopher Hartman + cdhartman&plansmith.com +31375 + The Hub Partnership LLP + Nils Toedtmann + london.ict&the-hub.net +31376 + Reliable science & technology co.,Ltd + Jevy Ren + rjw&263.net +31377 + buunSoft + Martin Brändle + mb&buun.de +31378 + Vervis COMINT Services GmbH + Christoph Woellinger + snmp&vervis.de +31379 + inform solutions GmbH + Andre Beckers + abeckers&inform-solutions.de +31380 + Citec + Martin Thygesen + martin.thygesen&citec.com.au +31381 + BauWatch Technology Group + Tom Lippmann + tom.lippmann&bouwatch.nl +31382 + EDEKA Handelsgesellschaft Suedwest mbH + Meinrad Koller + oid.admin&edeka-suedwest.de +31383 + east taiya wireless telecommunication Inc. + xia zhan gao + xzg302&yahoo.com.cn +31384 + Port25 Solutions, Inc. + Juan Altmayer Pizzorno + iana-pen&port25.com +31385 + Zylex Systems + Christoff van Zyl + christoffv&zylex.co.za +31386 + ARCHE Engenharia e Comercio de Sistemas de Automacao e Teleinformatica Ltda + Antonio Carlos Camargo Leite + accleite&arche.com.br +31387 + Row 44, Inc. + Chris Browne + cbrowne&row44.com +31388 + On-Net Surveillance Systems, Inc. (OnSSI) + Gadi Piran + gpiran&onssi.com +31389 + Sven Richter + Sven Richter + post&sven-richter.de +31390 + Ural State Forest Engineering University + Dr. Andrej Kravtsov + dec-usfeu&yandex.ru +31391 + Oakton Community College + John Wade + jwade&oakton.edu +31392 + Squared Financial Services Ltd. + Piotr Pryzmont + ppryzmont&squaredfinancial.com +31393 + OneBeacon Insurance + Aaron Ferreira + aferreira&onebeacon.com +31394 + Victron Energy B.V. + Jeroen Hofstee + jhofstee&victronenergy.com +31395 + The Translational Genomics Research Institute + John Forrister + jforrister&tgen.org +31396 + HPC Systems Inc. + Kenji Morimoto + morimoto&hpc.co.jp +31397 + Armor Safe Technologies, LLC + Jonathan Bosch + jbosch&armorsafe.com +31398 + Suburbia Public Access Network + Trent Anderson + domainadmin&suburbia.org.au +31399 + SCALABLE NETWORK TECHNOLOGIES + Unghee Lee + ulee&scalable-networks.com +31400 + WaveGuider Technology Ltd + chry santhemun + chrysanthemun&21cn.com +31401 + Sarbarian Software + Davi Baldin H. Tavares + davi&sarbarian.com +31402 + WINLAB + Joseph Miklojcik + jfm3&winlab.rutgers.edu +31403 + Mgame Japan Corp. + Taekjin Jun + taekjin&mgame.co.jp +31404 + Optiwave Photonics Limited + Jonnadula Ravikanth + ravik&optiwavephotonics.com +31405 + X-tec GmbH ICNS + Dittert Philipp + dittert&x-tec.de +31406 + Le Studio Vert + Sylvain Floury + suivi&lsv.fr +31407 + ON-X + Guillaume Lachenal + pen-contact&on-x.com +31408 + Cincinnati Country Day School + Jeffry A. Spain + certadmin&countryday.net +31409 + Telemetry Network System + Thomas Grace + Thomas.Grace&navy.mil +31410 + TenRoses SRL + Sysops + nsalvo&tenroses.com.ar +31411 + Edinboro University of Pa + Jonathan Blaine + jblaine&edinboro.edu +31412 + Carter and Associates + Randall L Carter + StuckIn70s&sbcglobal.net +31413 + eBiz Consulting Inc. + Walter Jia + walter_jia&hotmail.com +31414 + MMK + Dmitry Zaytsev + dimaz&mmk.ru +31415 + Nederlands Ministerie van Buitenlandse Zaken + Dirk Bundhund + dirk.bundhund&siemens.com +31416 + Edevag Consulting HB + Stefan Edevag + info&edevag.com +31417 + Tokyo University of Science + Devendra Narayan + narayan&cc.kagu.tus.ac.jp +31418 + Adexso Advanced Expert Solutions + Agustin Salguero Cruz + Agustin.Salguero&Adexso.com +31419 + Skalarit AB (formerly 'Skvader Systems AB') + Nicklas Björk + nicklas.bjork&skalarit.se +31420 + team2work GbR + Kai Zirlewagen + kai.zirlewagen&team2work.de +31421 + Coblan srl + Frank Contrepois + info&coblan.it +31422 + Retailp S.A.S. + Damien Truffaut + dtruffaut&retailp.com +31423 + Identity Technology Ltd + Jason Banks + jasonb&identitytechnology.net +31424 + Egothor + Leo Galambos + lg&hq.egothor.org +31425 + Mountain Power Inc. + Carson Yuen + cyuen&mountainpower.ca +31426 + Fitbit, Inc. + Alistair Weddell + aweddell&fitbit.com +31427 + ticketpro GmbH + Stefan Kolb + ldap-schema&ticketpro.ch +31428 + PT. Telemetri Indonesia + Ceda Wan + c.wan&telemetri.co.id +31429 + RealConnect (Pty) Ltd + Nick Dalton + helpdesk&realconnect.co.za +31430 + RSU GmbH + Steffen Schmidtke + s.schmidtke&rsu-online.de +31431 + CoSiSo + Richard Speelmans + richard&cosiso.nl +31432 + Courtyard Electronics Ltd + Hugh Reynolds + hugh&courtyard.co.uk +31433 + Toumaz Technology Limited + Nikolaos Kasparidis + nick.kasparidis&toumaz.com +31434 + Minerva-Korea + Ji Hoon, Jung + jjh9922&minerva-korea.com +31435 + Users, Incorporated + Carl Bennett + carlbennett&users.com +31436 + Ansley & Associates, Inc. + Greg Ansley + gja&ansley.com +31437 + Call & Call Holding S.p.A. + Ugo Paternostro + ugo.paternostro&callecall.it +31438 + phoops s.r.l. + Ugo Paternostro + ugo.paternostro&phoops.it +31439 + Correo Uruguayo + Javier Lago + javierl&correo.com.uy +31440 + Embedded Data Systems, LLC + Chay Wesley + Chay&EmbeddedDataSystems.com +31441 + Eurika srl + Paolo Adami + paolo.adami&eurika.net +31442 + JonDos GmbH + Rolf Wendolsky + certification&jondos.de +31443 + The Alberta Library + Fletcher Nichol + fnichol&thealbertalibrary.ab.ca +31444 + MAKELSAN LTD. + Umut OGUZ + uoguz&makelsan.com.tr +31445 + FatBox Inc. + Evan Borgstrom + evan&fatbox.ca +31446 + catpipe + Phil Regnauld + pr&catpipe.net +31447 + IRGA Production Company + Łukasz Zosiak + lukasz.zosiak&irga.com.pl +31448 + Prolancer Pty Ltd + Emil Andonov + sysadmin&prolancer.com.au +31449 + Perfect Line S.A. + Szymon Kowalczyk + szymon.kowalczyk&perfectline.pl +31450 + David Rourke + David Rourke + pen&drourke.co.uk +31451 + ipado - Informatiker-Partnerschaft Pauxberger und Domschke + Thomas-Michael Domschke + th.domschke&ipado.eu +31452 + VS Information Systems + Vlastimil Setka + setka&vsis.cz +31453 + Zyrion Inc. + Rajib Rashid + rajib.r&zyrion.com +31454 + towebs inc. + leonardo alminana + l_alminana&yahoo.com.ar +31455 + Bulb + Neven Stipcevic + neven.stipcevic&bulb.hr +31456 + ScapeCaster + Sean Kent + ryoohki&gmail.com +31457 + Meebo, Inc. + Paul Handly + paulh&meebo-inc.com +31458 + RAVIOLIKINGDOM.COM + Steve Antonini /Ferdinando Ferrari + fferrari_it&yahoo.it +31459 + United States Peoples Virtual Party + John Francis Lee + jfl&uspvp.org +31460 + Versatile Security Sweden AB + Joakim Thoren + Joakim.Thoren&versasec.com +31461 + sächsische Informations-Compagnie zu Berlin | Marcus Pauli + Marcus Pauli + marcus.pauli&informations-compagnie.de +31462 + Canare + Shiro Tsunai + shirou-tsunai&canare.co.jp +31463 + Itibia Technologies Inc. + Suker.Yao + suker.yao&itibia.com +31464 + EPCOM IT-Systeme, Pirker & Zauner OEG + Joachim Zauner + hostmaster&epcom.cc +31465 + Systemica Consulting + Thomas Muller + ttm&online.no +31466 + NeoConsult A/S + Thomas Larsen + info&neoconsult.com +31467 + Gravity Interactive, Inc. + Kelvin Mok + systems&gravityus.com +31468 + The Bluenote Group + Jeffrey McCamley + jmccamley&thebluenotegroup.com +31469 + Alior Bank S.A. + Tomasz Graca + tomasz.graca&alior.pl +31470 + Vector Data LLC + Skye Nott + snott&vectordata.com +31471 + SunStrom GmbH + Daniel Zwahr + daniel.zwahr&sunstrom.de +31472 + Axcera Inc. + Dimitar Staykov + dimitars&ad2007.com +31473 + NOVA Chemicals Corporation + Sandra Nanke + nankes&novachem.com +31474 + Viagenie inc. + Marc Blanchet + marc.blanchet&viagenie.ca +31475 + ITS Schmidinger GmbH + Markus Schmidinger + markus.schmidinger&its-schmidinger.de +31476 + Deviant Software + Joe Richards + deviantsoftware&gmail.com +31477 + TTC MARCONI s.r.o. + Jiri Dlouhy + dlouhy&marconi.ttc.cz +31478 + Pluribus + Ronald McCormick + ramccor&pluribus.org +31479 + Grey Group + Sean McHugh + smchugh&grey.com +31480 + Universidade Federal do Rio Grande + Centro de Processamento de Dados + suporte&furg.br +31481 + ABBRIS Ltd. + Sergey Podushkin + abbris&abbris.ru +31482 + Top Level Internet Pty Ltd + Chris Jones + chrisj&toplevel.net.au +31483 + Universidad Rey Juan Carlos + Tomás Herrero + servred&urjc.es +31484 + Basset AB (formerly 'BassetLabs AB') + Mathias Andersson + mathias.andersson&bassetglobal.com +31485 + FlexSecure GmbH + Markus Ruppert + info&flexsecure.de +31486 + CGI Sweden AB (formerly 'Logica Norr AB') + Henrik Andreasson + se.certificateservice&cgi.com +31487 + The Transaction Company + Vladimir Dzhuvinov + vd&valan.net +31488 + oxylane + Thomas Sagnimorte + thomas.sagnimorte&decathlon.com +31489 + Manchester Computing Ltd + Dave Bracken + Support&ManchesterComputing.co.uk +31490 + Monnsta + Matthew King + matthew.king&monnsta.net +31491 + Cabo Telecom + Eduardo Andrez de Oliveira + adm-l&cabotelecom.com.br +31492 + BOCOM SECURITY(ASIA PACIFIC) LIMITED + HUAI DONG MO + mohd&bocom.cn +31493 + The Hospital Authority Hong Kong + Tony Chan + tchanwt&ha.org.hk +31494 + Host 17 + Steve Ingram + steve&host17.net +31495 + Combe Walden Associates Limited + Neil Cordell + neil&combe-walden.com +31496 + NetModule AG + Martin Meuli + hostmaster&netmodule.com +31497 + Haute Ecole d'Ingénierie et de Gestion du Canton de Vaud (HEIG-Vd) + Olivier Liechti + olivier.liechti&heig-vd.ch +31498 + Stiftung Sympany + Sascha Affolter + sascha.affolter&sympany.ch +31499 + Novozymes A/S + Roman Ford + rfor&novozymes.com +31500 + Lindsay Broadband Inc. + Vic Race + vrace&lindsaybroadbandinc.com +31501 + Khaledma + khaled Moustafa Ahmed + khaledmaa2000&yahoo.com +31502 + Mantaray AB + Martin Bjornstrom + martin.bjornstrom&mantaray.se +31503 + Sanitas S.A. de Seguros + Ignacio Campos Alonso + icampos&sanitas.es +31504 + Networkers AG + Rainer Schneider + schneider&networkers.de +31505 + Dirección General de Tráfico + Pedro Gimeno + pgimeno&dgt.es +31506 + Dragonfly Grzegorz Marszałek + Grzegorz Marszałek + graf0&post.pl +31507 + arcutronix Gmbh + Thomas Geffert + arcutronix-pen&arcutronix.com +31508 + Internat der Maristenbrüder Mindelheim + Michael Beham + michael&styre.de +31509 + Soapstone Networks + Steve Babineau + babs&soapstonenetworks.com +31510 + New Horizons + Jay Burkey + jayb&nhokc.com +31511 + Henry Ford Community College + Sandro Silvestri + sandro&hfcc.net +31512 + BittWare, Inc. + Ron Huizen + rhuizen&bittware.com +31513 + Prosul - Projetos, Supervisão e Planejamento LTDA. + Flavio do Carmo Junior + flaviocj&prosul.com +31514 + Software Horizons Inc. + Ramal Murali + ramal&shorizons.com +31515 + SizeIT Drift Aktiebolag + Erik Wallin + helpdesk&sizeit.se +31516 + Zen Internet Limited + Jeremy Nicholls + Jerry.Nicholls&zeninternet.co.uk +31517 + Akixa + Gerhard Scheffler + scheffler&akixa.com +31518 + International All Sports Limited + Jeff Cheah + jeff.cheah&iasbet.com +31519 + Jakko Network + Georg Jakovidis + tr-080712-pen&jakko.net +31520 + iPerfecta Japan Ltd. + Kenji Ueno + tec-info&iperfecta.net +31521 + Univar Benelux S.A. + Christian Van Simaeys + Christian.VanSimaeys&univareurope.com +31522 + Washington Apple Pi, Ltd. + Jon C. Thomason + jonct&tcs.wap.org +31523 + Community Health Information Collaborative + Cheryl Stephens + cstephens&medinfosystems.org +31524 + müller it gmbh + Thomas Mueller + service&muellerit.ch +31525 + NEC Laboratories Europe + Thomas Dietz + dietz&neclab.eu +31526 + Change Networks A/S + Tais M. Hansen + tais.hansen&changenetworks.dk +31527 + VMF Nord ek för + Hans Weslien + Hans.Weslien&vmfnord.se +31528 + SIEMENS S.A: IA SPAIN + Jose Antonio Corral Rubio + joseantonio.corral&siemens.com +31529 + Pensionskasse SBB + Mario Fascetti + edv.support&pksbb.ch +31530 + TSI Power Corporation + Jason Marckx + jason&tsipower.com +31531 + Immutify Limited + Bruce Skingle + Bruce.Skingle&immutify.com +31532 + Ensynch, Inc + Brad Turner + bturner&ensynch.com +31533 + ScanSafe Ltd + Jim Walker + ops&scansafe.com +31534 + Adfinis AG + Michael Hofer + info&adfinis.com +31535 + Parkeon + Thierry Machicoane + pkn-iana-pen&parkeon.com +31536 + OpenNHRP project + Timo Teräs + timo.teras&iki.fi +31537 + Gossamer Threads Inc. + Alex Krohn + alex-iana&gossamer-threads.com +31538 + Velocity Network + Justin Kruszewicz + justin.kruszewicz&velocitynetwork.net +31539 + Traveling bits, Inc. + Brian Kurle + bk&travelingbits.com +31540 + INTEC Inc. + Technologies Dept. + takada_kazuhiro&intec.co.jp +31541 + Yaamen Inc + Michael Koverman + yaamen&yaamen.net +31542 + Robert Carleton + Robert Carleton + rbc&rbcarleton.com +31543 + RIONERO + Rocco RIONERO + iana-pen-contact&rionero.com +31544 + Sistema Integrado de Defesa Social + Rui César + rui&sids.mg.gov.br +31545 + Yellowbook Inc + IT Security + penadmin&yellowbook.com +31546 + Rotary International + Chris Morley + it&rotaryhumm.com +31547 + COGITO Management Consulting + Dmitry Chernyshov + integration&cogito.ru +31548 + Media & Broadcast Technologies + Olivier Braun + OID.Admin&mediabroadcast-t.com +31549 + Shu-Te University + Po-Yu Lin + poyu.lin&gmail.com +31550 + K Franchise Vertriebssysteme Gesellschaft m.b.H. + Ernst Zechmeister-Machhart + iana&kika.com +31551 + Niechoj electronic GmbH + Christian Hoffmann + info&niechoj.de +31552 + Disforce + Bos Ruud + admin&disforce.be +31553 + Invocrown Ltd + James Fryer + jim&invocrown.com +31554 + Elvys Ltd. + Karol Rohrer + karol.rohrer&elvys.sk +31555 + VCS Aktiengesellschaft + Michael Henke + michael.henke&vcs.de +31556 + TiQ srl + Marco Cadario + marco.cadario&tiq.it +31557 + Vision Technologies Inc. + David Wilson + dwilson&visntec.com +31558 + David M Brooke + David M Brooke + mail&davidmbrooke.co.uk +31559 + pathSolutions + Tim Titus + ttitus&pathsolutions.com +31560 + ARTEC Computer GmbH + Jerry Artishdad + admin&artec.org +31561 + Telecomproduct + Demidenko Sergey + sd&tpr.ru +31562 + Backcountry.com + Ben Preston + bpreston&backcountry.com +31563 + HGST + Takashi Mine + takashi.mine&hitachigst.com +31564 + Chainzone Tech. Ind. + Yanglei + yanglei&chainzone.com +31565 + Nurminen Logistics Oyj + Topi Tukiainen + Topi.Tukiainen&nurminenlogistics.com +31566 + Ytti + Saku Ytti + pem&ytti.fi +31567 + EURECA + Alexander Marugin + a_marugin&eureca.ru +31568 + Licensys Pty Ltd + Barry Kruger + bkruger&licensys.com +31569 + Branden Schneider + Branden Schneider + branden_ny&yahoo.com +31570 + Gnodal + Tony Ford + tony&gnodal.com +31571 + Narratone Pte Ltd + Joseph Loh + joseph&narratone.com +31572 + Consolidated Smart Systems + Matt Bregger + mattb&consolidatedsmart.com +31573 + Peel District School Board + Paul Kimberley + Paul.Kimberley&peelsb.com +31574 + Security Monster + Sebastiaan Mangoentinojo + sebastiaan&securitymonster.net +31575 + InnoGames GmbH + Michael Krelin + michael.krelin&innogames.de +31576 + CTC Engineering Ltd. + Mihail Borisov + michail&ctc-bg.com +31577 + 7Safe Ltd. + Jordan Hrycaj + Jordan.Hrycaj&7Safe.com +31578 + A2x Trefzer Consulting + Benedikt Trefzer + trefzer&a2x.ch +31579 + virgitech + nassif alexan + nalexan&virgitech.com +31580 + Departamento de Engenharia Eletrônica - CEFET RJ + Carolina Soares da Conceição + caca_soares&yahoo.com.br +31581 + REDDOXX GmbH + Alexander Bauer + alexander.bauer&reddoxx.com +31582 + Caixa Geral de Depósitos + Nuno Costa + nuno.costa&cgd.pt +31583 + AgilTech + Victor Salaman + salaman&elamal.com +31584 + Ben Jen Online + Ben Jen + bjen&benjenonline-inc.com +31585 + Umoe Communication + Martin E. Koch + enterprisenumber&tech.umoecom.no +31586 + Teragram, A SAS Company + Chris Heller + heller&teragram.com +31587 + Univ Community + Weiyue Liu + Weiyue.Liu&gmail.com +31588 + IceWave + Nicholas Hubbard + nicholashubbard&icewave.net +31589 + Axlon Electronics Corporation + Tony Kao + tonykao&axlon.com +31590 + MRI Cardiac Services, Inc + Dwight Ball + dwight.ball&provaimages.com +31591 + Digital Governance + Andre Esser + it-dept&digitalgovernance.co.uk +31592 + Jump Trading, LLC + Michael Wassong + mwassong&jumptrading.com +31593 + Electronic Child Health Network + Nick Seward + nick.seward&echn.ca +31594 + Elm Technology + Pedram Ghovonlou + pedram&elmtech.com.au +31595 + KGlobal Tech Co., Ltd. + Kim Yeong Su + kimch96&gmail.com +31596 + OJSC iCFO + Anton Siluyanov + it&icfo.ru +31597 + Global Real Estate And Technology Consortium, Inc. + Corey Leong + cleong&gr8c.org +31598 + Grok-A-Lot, LLC + Glen F. Marshall + admin&grok-a-lot.com +31599 + P0F Sistemske Storitve Grega Bremec s.p. + Grega Bremec + gregab&p0f.net +31600 + Beijing TechSolute Technology Co., Ltd. + Danffer Liu + danfferliu&danfferliu.com +31601 + DreamGarage Inc. + Daisuke AIBA + admin&dreamgarage.jp +31602 + Raying Technology Co., Ltd. + Wu Yuehua + wuyuehua5&gmail.com +31603 + Information Society Development Committee under the Government of the Republic of Lithuania + Vaidotas Ramonas + v.ramonas&ivpk.lt +31604 + Sunny Multimedia Co.,ltd + Wei lin + Daemon&sunnymmc.com +31605 + Ekahau Oy + Tomi Heinonen + tomi.heinonen&ekahau.com +31606 + LUMINOUS POWER TECHNOLOGIES PVT LTD + RICHIE SARASWAT + richie.saraswat&luminousindia.com +31607 + eBox technologies S.L + Javier Uruen Val + juruen&warp.es +31608 + Emaging + Ivan Havlicek + ivan.havlicek&gmail.com +31609 + Lisp + Chun Tian (binghe) + binghe.lisp&gmail.com +31610 + Noisivne + Dimy Jeannot + it&noisivne.com +31611 + California Creativity Association, Inc. + Bob Burns + wm0809&californiacreativity.org +31612 + Roessner-Network-Solutions + Christian Roessner + christian&roessner-net.com +31613 + Extrawerk + Lars-Helge Wilbrandt + l.wilbrandt&extrawerk.de +31614 + Ubuntu NL + Dennis Kaarsemaker + dennis&ubuntu-nl.org +31615 + LEVELER, LLC + M. JANUSZEWSKI + info&levelerllc.com +31616 + LINUX INK + Oleg Sadov + sadov&linux-ink.ru +31617 + Celeste + Alexandre Boulanger + sysadmin&celeste.fr +31618 + Cloudpic + Mike Chang + pen&cloudpic.com +31619 + Editora Abril S.A. + Alexandre Bressane + alexandre.bressane&abril.com.br +31620 + The J.M. Smucker Company + Don Iles + don.iles&jmsmucker.com +31621 + MoCA - Multimedia over Coax Alliance + Gary Langille + Gary.Langille&echostar.com +31622 + Meddius LLC + Michael Chapman + michael.chapman&meddius.com +31623 + Information Security Corp. + Michael J. Markowitz + markowitz&infoseccorp.com +31624 + Premier Heart + Michael Graziano + admin&premierheart.com +31625 + Astron-Fortis Inc + Ville Walveranta + vwal&astronfortis.com +31626 + Lee-Dickens + Andy Needham + andy.needham&lee-dickens.co.uk +31627 + Preferred Voice, Inc. + Bern Bareis + bbareis&preferredvoice.com +31628 + Opticom Communications LLC + Susana Tinman + iana&opticom.co.il +31629 + fengyun.ltd + Yu Fei + yufeixiaoyu&gmail.com +31630 + media concept GmbH + Roland Köppe + rkoeppe&mc-engineering.de +31631 + Conseil Régional Nord - Pas de Calais + DE ZORZI Livio + l.dezorzi&nordpasdecalais.fr +31632 + Shanghai Titan Technology Co., Ltd. + Zu-xiong Qiu + qiu1111&sh-titan.com +31633 + Quickline Business AG (formerly 'EBM Telecom AG') + Benjamin Schlageter + benjamin.schlageter&business.quickline.ch +31634 + ICT Embedded b.v. + Jorg Wissink + Jorg.Wissink&ict.nl +31635 + TextFunnel + Richard Smith + operations&textfunnel.com +31636 + Shanghai Colorful Magnetic Resonance Technology Co. Ltd. + Yang Guang + gyangc&online.sh.cn +31637 + Peak Communications Ltd + Nigel Hepworth + nigel&peakcom.co.uk +31638 + Indra Systems, Inc. + Alfonso Urdaneta + aurdaneta&indra-systems.com +31639 + Litmus Logic + James Ryan + james&litmuslogic.com +31640 + 7ia + Alexander Bokovoy + registry&7ia.org +31641 + CFH Software + Kees Helminck + cfh&lansurvey.com +31642 + Eurotech S.p.A. + Mauro Barella + m.barella&eurotech.it +31643 + m-creations gmbh + Kambiz Darabi + darabi&m-creations.com +31644 + Perthro Ltd + James Beckett + snmp&perthro.co.uk +31645 + Network Critical + Sam Battaglia + sam&networkcritical.com +31646 + Virgina Department of Motor Vehicles + Christopher Nicholl + christopher.nicholl&dmv.virginia.gov +31647 + Ministerio de Defensa de la Nación + Alina Di lernia + webmaster&mindef.gov.ar +31648 + HANGZHOU DPTECH Technologies Co., Ltd. + Winnie Wan + wannylan115&sina.com +31649 + keksbude.net + Tobias Hommel + iana&keksbude.net +31650 + Feline Soul Systems + Bearcat M. Sandor + bearcat&feline-soul.net +31651 + W Key s.r.l. + Andrea Borghi + registry&wkey.it +31652 + Arcadia Consulting + Gonzalo Gómez García + gonzalo&arcadiaconsulting.es +31653 + Ing. Reinhard Hirz IT Services + Ing. Reinhard Hirz + pen&hirz.at +31654 + EM Software & Systems + Christian Nielsen + cnielsen&emss.co.za +31655 + Martek di Meucci Marco + Marco Meucci + info&martek.it +31656 + Beijing Autelan Technologies Co Ltd., + Haiyang Ni + nihy&autelan.com +31657 + Infinitus d.o.o. + Bojan Šernek + bojan.sernek&infinitus-outdoor.com +31658 + LightSoftware + Luca Borghini + Lukino2000&Gmail.com +31659 + TurnovFree.net, Sdružení + Zdeněk Styblík + stybla&turnovfree.net +31660 + eSure Labs, Inc. + Verrol L. Adams + verrola&esurelabs.com +31661 + Logilab, SA + Ludovic Aubry + sysadmin&logilab.fr +31662 + TIGNET Open Source Solutions + Thomas Martin + ciscofu&gmail.com +31663 + New York State Assembly + Matt Garretson + iana&assembly.state.ny.us +31664 + Bravo Tech, Inc. + Xun Gu + info&bravotechinc.com +31665 + StrongMail Systems Inc. + Aaron Ferguson + aferguson&strongmail.com +31666 + AirCanopy Internet + Kris Kenyon + kkenyon&aircanopy.net +31667 + Ministerstwo Sprawiedliwosci + Szymon Jakubczuk + jakubczuk&ms.gov.pl +31668 + KRAFT Benjamin Jean + KRAFT Benjamin Jean + benj&bkraft.fr +31669 + Tema Networks Ltd. + Niko Jokinen + niko.jokinen&temanetworks.com +31670 + Jaworsito + Daniel Jaworski + daniel&jaworsito.pl +31671 + Anunda Technology Co, Ltd. + krit wongrujira + kwkrit&gmail.com +31672 + XORTEC + David Fortier + david.fortier&xortec.fr +31673 + Datacenter Luxembourg + Raoul Thill + sysadmin&sysadmin.dclux.com +31674 + Global Star Solutions ULC + Eddie Jin + eddie.jin&starsolutions.com +31675 + coldspot.de + Kevin Loehmann + kevin.loehmann&coldspot.de +31676 + Mantara Inc + David Arnold + sysadm-notify&mantara.com +31677 + REMEC Broadband Wireless + Cherice Jobmann + cherice.jobmann&remecbroadband.com +31678 + Tenalt d.o.o. + Jure Kodžoman + jure&tenalt.com +31679 + droidnest.org + Egor Medvedev + dsx&droidnest.org +31680 + Altor Networks + Todd Ignasiak + todd&altornetworks.com +31681 + South African Post Office + Martinus Scheffers + Martinus.Scheffers&postoffice.co.za +31682 + Audinate Pty Ltd + Aidan Williams + iana-snmp-pen&audinate.com +31683 + community engineering gmbh + Thomas Wichser + tom&tom201.net +31684 + EBM WEBSOURCING + Gaël BLONDELLE + gael.blondelle&ebmwebsourcing.com +31685 + IDG GmbH + Waldemar Cebulla, Norbert Ell + norbert_ell&idg.de +31686 + Harry Jede + Harry Jede + walk2sun&arcor.de +31687 + Lorica Solutions + David Straitiff + dstraitiff&loricasolutions.com +31688 + rtfmcomputing.com + Tyson Boellstorff + perlcat&alltel.net +31689 + editspot, LLC + Webmaster + webmaster&editspot.com +31690 + Sensorlogic, Inc + Rich Lackey + operations&sensorlogic.com +31691 + FIMMG - Federazione Italiana Medici di Medicina Generale + Lorenzo Cipriani + lorenzo.cipriani&gruppocm.it +31692 + Close Premium Finance + Infrastructure Support + mking&closepf.com +31693 + MerlinTechs + Shannon Mitchell + shannonm&merlintechs.com +31694 + Heilig Graf + Steve Pouillon + stevepouillon&telenet.be +31695 + BNYConvergex Execution Solutions LLC + Luca Mihailescu + srt&bnyconvergex.com +31696 + Haberler Enterprises Ltd + Michael Haberler + iana&mah.priv.at +31697 + Peer 1 + Scott Clark + sclark&peer1.com +31698 + Bluemoon Games + Christopher Mettin + anno.1067&t-online.de +31699 + Gymnasium Querfurt Broadcasting Channel + Christopher Mettin + cmettin&gqbc-online.com +31700 + Ice House Productions + Marshall Anschutz + oid.iana&icehousepro.us +31701 + Sonoa Systems + Ganesan Vivekanandan + ganesan&sonoasystems.com +31702 + India Logics, LLC + Eric Desgranges + eric&vcardprocessor.com +31703 + Institut D'Aéronomie Spatiale de Belgique + Arnaud Lefebvre + A.Lefebvre&aeronomie.be +31704 + ASPICON GmbH + Jochen Held + support&aspicon.de +31705 + lug.org.uk + Hugo Mills + hugo&lug.org.uk +31706 + Torrenga Engineering, Inc. + Brent A. Torrenga + lists&torrenga.com +31707 + Tamil IPTV Inc + Ruban Selvarajah + ruban&tamiliptv.tv +31708 + Meddiff Technologies Pvt. Ltd. (formerly 'MedSphere Technologies Pvt Ltd') + Tapesh K Agarwal + contactus&meddiff.com +31709 + Sistema FIESC + Murilo Nunes Elias + murilone&fiescnet.com.br +31710 + Torrenga Surveying LLC + Brent Torrenga + lists&torrenga.com +31711 + Mystiq Mobile Pte Ltd + Madhan Dennis + madhan&mystiqmobile.com +31712 + gpf.me.uk + Steven Carr + gpf&gpf.me.uk +31713 + Khipu Networks Ltd. + Steven Carr + steven.carr&khipu-networks.com +31714 + national center for missing and exploited children + Uriah robins + Urobins&NCMEC.org +31715 + Portware, LLC + Ary Khatchikian + mepshteyn&portware.com +31716 + Wilder & Associates + Roel Bondoc + roel&wilder.ca +31717 + Rogue Project Inc + Jamie Robillard + jrobillard&planttel.net +31718 + Krausser EDV + Eric Krausser + eric&krausser-edv.de +31719 + Programmer.com.hk + Quentin Chung + quentin.chung&programmer.com.hk +31720 + Jan Dittberner IT-Consulting & -Solutions + Jan Dittberner + jan&dittberner.info +31721 + ArgoSoft JSC + Andrei Zelenkin + andrewzelenkin&tut.by +31722 + Spellman High Voltage Electronics Corporation + David Kafrissen + david&spellmanhv.com +31723 + Communication Systems Company + Ahmed Banihammad (Company supervisour: Laurent Bouillet) + ahmad.khayyat&gmail.com +31724 + Das Labor e.V. + Daniel Otte + daniel.otte&ruhr-uni-bochum.de +31725 + E-Cert + Gonzalo Paredes + gparedes&e-certchile.cl +31726 + Rancore Technologies (P) Ltd., + Somya Mishra + somya.mishra&rancoretech.com +31727 + U.S. Department of Energy + Robert Patrick + robert.patrick&hq.doe.gov +31728 + Monografias.com + Administrador Monografias.com + administrador&monografias.com +31729 + art of defence GmbH + Alexander Meisel + alexander.meisel&artofdefence.com +31730 + MiMOMax Wireless Limited + Dennis Aberilla + dennis.aberilla&mimomax.com +31731 + Sunwave Communications Co. Ltd. + Bao Xianguo + xianguo_bao&sunwave.com.cn +31732 + DropFire, Inc. + Gregory Harman + greg&dropfire.com +31733 + KEPID AMSTECH Co., LTD. + Yong Sik, Ryu + ysryu&amstech.co.kr +31734 + Dayang Technology Development Inc. + yanjiema + admin&dayang.com.cn +31735 + ZAO NPK Microtec + Golovin Vladimir + stµtec.ru +31736 + Filipstads kommun + Roger Nordqvist + roger.nordqvist&filipstad.se +31737 + Digital Systems Ltd + Daniel Kalchev + daniel&digsys.bg +31738 + S.C. LTHD Corporation S.R.L. + Chis-Serban Dinu-Razvan + csdr<hd.com +31739 + penagosg + guillermo penagos + penagosg&gmail.com +31740 + University of Prince Edward Island + John Cunningham + jcunningham&upei.ca +31741 + ProLogic, Inc. + Mark Stahl + mstahl&prologic-inc.com +31742 + Avenue A | Razorfish + Steve Craft + steve.craft&aa-rf.com +31743 + eXerp ApS + Ole Husgaard + osh&exerp.com +31744 + ADESA, Inc. + Rob McBroom + rob.mcbroom&adesa.com +31745 + Centro Federal de Educação Tecnológica de Pelotas + Anderson Medeiros Gomes + amg1127&cefetrs.tche.br +31746 + Gemalto + Edson Oliveira Jr + edson.oliveira&gemalto.com +31747 + AAP Communications, LLC + Robert Williams + it&aapcomm.com +31748 + JED Capital, LLC + Robert Williams + it&jedcapital.com +31749 + Need To Know News, LLC + John Harada + it&needtoknownews.com +31750 + marcher.it + Martin Marcher + martin&marcher.it +31751 + Burleson Technology Group, LLC. + Cody Burleson + cody&burlesontech.com +31752 + Omniscient Technologies + Todd Kover + kovert&omniscient.com +31753 + ALCEST'IS + Abdellah LAHOUAOUI + Abdellah.Lahouaoui&gmail.com +31754 + Triwest Healthcare Alliance + JR Ashby + jashby&triwest.com +31755 + LINK Lab.Inc. + Shiro Chiba + chiba&link-lab.co.jp +31756 + GXMU(Guangxi Medical University) + Yaowang Lin + webmaster&mainbi.com +31757 + Internet Telephony Users Association Inc. + Duane Groth + support&e164.org +31758 + Infinova LLC + Jeffrey Liu + jl&infinova.com +31759 + Stoo Networks + Tim Stewart + tim&stoo.org +31760 + C-Matic Systems Ltd + James Ashby + james.ashby&cmatic.co.uk +31761 + RFI Engineering B.V. + Michel Stam + mstam&rfi-engineering.com +31762 + Fujian Star-net Communication Co.,Ltd + Yuan Tao + taoy.msn&live.cn +31763 + Bitel Távközlés és Biztonságtechnika Kft + Koós Sándor + koos.s&bitel.hu +31764 + inmedias.it Gesellschaft für Informationstechnologie mbH + Mats Schwieger + jms&inmedias.it +31765 + Advance Display Technologies, Inc. + Jody Thomas + jody.thomas&adtimedia.com +31766 + ShowIT + Jay Templeton + jay.templeton&showit.net +31767 + Nootek + Nick Tolstokulakov + nick&nootek.ru +31768 + SHARP Electronics (Europe) GmbH + Stephan Rimpau + stephan.rimpau&sharp.eu +31769 + Kassenärztliche Vereinigung Bayerns + Helmut Zühlke + ito-unix&kvb.de +31770 + Bachmann GmbH + Wilhem Wehrle + info&bachmann.com +31771 + eGenix.com Software, Skills and Services GmbH + Marc-Andre Lemburg + info&egenix.com +31772 + International Paper + Mark S. Johnson + mark.johnson&ipaper.com +31773 + GEWI Europe GmbH & Co. KG + Hagen Geppert + hagen.geppert&gewi.com +31774 + RealTimeSites + Felipe Gutierrez + felipe&realtimesites.com +31775 + Gellings CO + C.O. Gellings + information&cgellings.nl +31776 + TheLogInn + Flip Vernooij + flip&theloginn.nl +31777 + DATAPREV - Previdencia Social + Jarbas Peixoto Júnior + jarbas.junior&gmail.com +31778 + SMHS Ltd + OID Registry Manager + oidmanager&smhs.co.uk +31779 + Innocom-arcodan + Soeren Straarup + soeren.straarup&innocom.dk +31780 + Acipia + Jonathan SEMCZYK + support&acipia.fr +31781 + TG Mess-, Steuer- und Regeltechnik GmbH + Julian Rath + rath&technikgruppe.com +31782 + SyferLock Technology Corporation + Abu Marcose + support&syferlock.com +31783 + MetroSouth Medical Center + LeAndre Jones + ldjones&metrosouthmedicalcenter.com +31784 + EndlerNET GmbH + Matthias Endler + iana-pen&endlernet.com +31785 + Aurelien Derouineau + Aurelien Derouineau + aurelien&derouineau.com +31786 + Zdravstveni dom Ljubljana + Robert Ludvik + robert.ludvik&zd-lj.si +31787 + Syren Technologies + LeAndre Jones + support&syrentec.net +31788 + QSG Verkehrstechnik GmbH + Dirk Thamer + thamer&qsg-verkehrstechnik.de +31789 + HD Software & Systeme GmbH + Dirk Thamer + dirk.thamer&hd-systeme.de +31790 + Compressus, Inc + Kelli Mierzwa + kmierzwa&compressus.com +31791 + County of Erie, New York + Michael Schenkel + michael.schenkel&erie.gov +31792 + Adways Co., Ltd. + Yusuke Watase + isys-products&adways.net +31793 + TEMIX S.p.A. + Gaetano Calabro + g.calabro&temix.it +31794 + Infinovate, Inc. + David Stahl + david.stahl&infinovate.com +31795 + Board of Regents of The University System of Georgia + Todd Watson + nic-admin&usg.edu +31796 + IdentiPHI, Inc + Cory Womacks + identiphi_iana_pen&telus.net +31797 + Neetze Online + Heiner Ohm + ho&neetze.net +31798 + Rewse Lab. + Tatsunori Shibata + info&rewse.jp +31799 + TreStore GmbH + Jens Witte + jens.witte&trestore.de +31800 + VividLogic Inc + Ram Balaraman + ram&vividlogic.com +31801 + Biap, Inc. + Aaron Johnson + support&biap.com +31802 + Aquasys + George Mengelberg + george.mengelberg&aqua-sys.org +31803 + Secureware Inc + Hiroki Nogawa + nogawa&secure-ware.com +31804 + Office of State Revenue + Paul Gonsior + paul.gonsior&osr.nsw.gov.au +31805 + IBA CZ, s.r.o. + Jiri Syrovy + jiri.syrovy&ibacz.eu +31806 + VirtualTec Solutions AG + Markus Wild + info&virtualtec.ch +31807 + Universidad de Caldas + Manuel Felipe Lopez Correa + felipe&ucaldas.edu.co +31808 + Togliatti State University + Mikhail Titarenko + mihail&tltsu.ru +31809 + Josef Lux und Sohn Baumeister GmbH + Martin Milinovsky + system&luxbau.at +31810 + Cinterion GmbH + Dr. Iavor Antonov + Iavor.Antonov&cinterion.com +31811 + IKOR Products GmbH + Henry Reckewitz + support&ikor.de +31812 + United Tote Co. + Phillip R. Paradis + phillip.paradis&unitedtote.com +31813 + Dikmenoglu + Yusuf Dikmenoglu + mail&dikmenoglu.de +31814 + Amway of Australia + Craig Hamilton + a2kwebadmin&a2k.com.au +31815 + BEAR Solutions (Australasia) Pty Ltd + Wayne Richard Morrison + waynem&bearsolutions.com.au +31816 + PrintFleet Inc. + Wallace Welles + wwelles&printfleet.com +31817 + Connexis Kft. + László Vasváry + vasvary&connexis.com +31818 + Droitech eSolutions Private Limited + Dipesh Sharma + info&droitech.com +31819 + CHENGBO NETWORK TECHNOLOGY CO. ,LTD + shijun.cao (曹世军) + caoshijun&kcsji.com +31820 + Federal Prison Industries (UNICOR) + Manuel Martinez + mmartinez¢ral.unicor.gov +31821 + eonBIT as + Stian Strandem + snmp.pen&eonbit.com +31822 + X2B Security + Matthias Scherrer + matmei&yahoo.com +31823 + Schweitzer Engineering Laboratories, Inc. + Jason Kraft + infosec&selinc.com +31824 + Solar Technology, Inc + Tom Rothamel + tom&rothamel.us +31825 + HTW Dresden FB Informatik + Thomas Schubert + thom_schu&gmx.de +31826 + Logic United GmbH + Dominik Westner + westner&logicunited.com +31827 + Sharedband Limited + Paul Evans + paul.evans&sharedband.com +31828 + Pegasus Telecom + Juliao Braga + jb&pegasus.com.br +31829 + MIPIH + Equipe CSIM / MIPIH + csim-infrastructure&mipih.fr +31830 + deCODE genetics + Eirikur Hjartarson + Eirikur.Hjartarson&decode.is +31831 + Ad Hoc Network srl + Primo Rossi + info&adhoc.net +31832 + Los Angeles Community College District + Jorge Quinones + quinonj&email.laccd.edu +31833 + ACI Worldwide Inc + Murray Chapman + murray.chapman&aciworldwide.com +31834 + BecauseWeCan.at + Thomas Fritz + fritztho&gmail.com +31835 + John Berninger + John Berninger + john&ncphotography.com +31836 + DTS Systeme GmbH + Felix J. Ogris + hostmaster&dts-online.net +31837 + Medinformatix Incorporated + Nak Phaingdy + nak&medinformatix.com +31838 + ITech ROMANIA Ltd. + Cristian MOHR + Cristian.Mohr&ITechROMANIA.com +31839 + Gridiron Systems Inc. + David Korz + dkorz&gridironsystems.com +31840 + ChoicePoint Asset Company LLC + Vic Bancroft + Vic.Bancroft&Choicepoint.Com +31841 + University of Western Macedonia + Panagiotis Voutskidis + pvoutskidis&uowm.gr +31842 + e2E Services Limited + Mr Alan Hughes + alanhughes&e2eservices.co.uk +31843 + GUISYS CORPORATION + Mike Petersen + mbp&guisys.com +31844 + Satellite Systems Corporation + Mark Kelso + mkelso&satsyscorp.com +31845 + Dans Rock Electronics + Marlon L Kasekamp + engineering&dansrockelectronics.com +31846 + Wimba + System operations + sysops&wimba.com +31847 + Bejing XCZY networks Inc. + Chen.Yaping + chenyaping&bjxczy.com +31848 + Sherman Finanial Group LLC + Bob Banasik + pki&sfg.com +31849 + Jacobsson Programutveckling + Oscar Jacobsson + oscar.jacobsson&gmail.com +31850 + Belden Solutions nv + Bart Sprengers + bart.sprengers&belden.com +31851 + WH-Netz - Verein fuer Netzwerksicherheit und Technologietransfer e.V. + Damian Philipp + kontakt-iana&wh-netz.de +31852 + Pirean Ltd. + Mike Cartwright + mike.cartwright&pirean.com +31853 + LLC Mail.Ru + Alexander Bykov + bykov&corp.mail.ru +31854 + Stix AS + Christopher Rasch-Olsen Raa + christopher&stix.no +31855 + Plusnet plc + Plusnet Network Operations Team + noc&plus.net +31856 + Wycliffe Bible Translators USA + Peter Reilly + preilly&wycliffe.org +31857 + Tirepiste + Daniel Hokka Zakrisson + daniel&hozac.com +31858 + Intellimedia Systems Ltd + Richard Hosking + hostmaster&intellimediasystems.com +31859 + HFR,Inc + Shin Myong-Ha + mhshin&hfrnet.com +31860 + Science and Technology Facilities Council + Ian Johnson + ian.johnson&stfc.ac.uk +31861 + embinet GmbH + Stephan Eisvogel + info-iana&embinet.de +31862 + Infinity Trust SRL + Chifu Valerian + vchifu&infinitytrust.ro +31863 + Australian Research Collaboration Service + Professor Anthony Williams + anthony.williams&arcs.org.au +31864 + kunden-server.org Network Services + Markus Petzsch + markus&petzsch.eu +31865 + Durchmesser + Marius Rieder + marius.rieder&durchmesser.ch +31866 + st. bernard software + Robert Scott + rscott&stbernard.com +31867 + Unassigned + ---none--- + ---none--- +31868 + The Village Group, Inc. + Wes Kussmaul + wes&authentrus.com +31869 + Empowering Media, Inc. + Lawrence Ludwig + larrylud&gmail.com +31870 + Minkamau + Hanspeter Uebelbacher + einar&bluewin.ch +31871 + MO Technologies + Mark G. Owens + owens&motech.net +31872 + Nanzan Gakuen + Yoshihiro Mizota + inet-admin&nanzan.ac.jp +31873 + Elanti Systems Inc. + Terry Dawson + terry.dawson&elantisystems.com +31874 + Alpha Networks Inc. + Celina Hsieh + Celina_Hsieh&alphanetworks.com +31875 + ShangHai XinLan Electronic Technology Co.,Ltd. + Liang Gao + gl1982&live.cn +31876 + de Koomen + Marco de Koomen + iana&dekoomen.nl +31877 + Karel Electronics + Sinan İkeda + sinan.ikeda&karel.com.tr +31878 + Chamaeleon AG + Florian Kluth + rhn&chamaeleon.de +31879 + TelASIC Communications + CJ Wang + cjwang&telasic.com +31880 + Programma Verwijsindex + M. Dell + mike.dell&ictu.nl +31881 + GGH Engineering s.r.l. + Paolo Signor + p.signor&gghsrl.com +31882 + Metrotek + Stanislav Bulov + bulov&metrotekcom.ru +31883 + Openstat (formerly 'Vega') + Anton Yuzhaninov + iana-pen&openstat.com +31884 + Kuipers Electronic Engineering + Marco de Koomen + kee&tmx.nl +31885 + ProDaM + Björn Müller + bjoern.mueller&prodam.de +31886 + Syntactic Sugar s. r. o. + Miroslav Stepanek + miroslav.stepanek&syntacticsugar.com +31887 + Schering-Plough Corporation + Sean Finnerty + sean.finnerty&spcorp.com +31888 + AdminDB.org + Herbert Specht + office&admindb.org +31889 + Apantac LLC + Thomas Tang + thomas.tang&apantac.com +31890 + Chicken and Porn + Allan Clark + allanc&chickenandporn.com +31891 + Universidade do Estado do Rio de Janeiro + Alexandre Sztajnberg + alexszt&uerj.br +31892 + T3G Technology Co.,Ltd + Yueliang Zong + zong_yl&163.com +31893 + NelNet + Hinrich Eilts + Hinrich.Eilts&nelnet.de +31894 + RPBUS LLC + Ron Svedersky + ron&rpbus.com +31895 + Chita Medias Network INC. + Shinkai Masatomo + shinkai&medias.co.jp +31896 + Doxense SARL + Christophe Chevalier + chevalier&doxense.com +31897 + Knowledge Powered Solutions + Neil Howard + domains&kpsol.com +31898 + bit4id + Antonio Chello + info&bit4id.com +31899 + CS Group - Puerto Rico + Jose E. Calderon + jecalderon&csgrouppr.com +31900 + Tarleton State University + James Wiley + NetAdmin&Tarleton.edu +31901 + Backstop Solutions Group + John Skopis + jskopis&backstopsolutions.com +31902 + Ann Arbor Public Schools + Ed Cline + clinee&aaps.k12.mi.us +31903 + Global Information Technology, Inc + Huu Do + huu.do&globalit-i.com +31904 + Mandli Communications, Inc. + D.J. Nephew + djnephew&mandli.com +31905 + Springdale School District + Paul Miller + pmiller2&sdale.org +31906 + NMI InfoSecurity Solutions + Andrew T. Robinson + iana&nmi.net +31907 + Dimenzio Informatika Ltd. + Sandor Hincs + shincs&dimenzio.com +31908 + Orange Jordan + Qusai Jadoun + qusai.jadoun&orange-jtg.jo +31909 + Textiel Verhaeghe BVBA + Pieter Verhaeghe + pieter&verhaeghe-textiel.be +31910 + Haley Limited + Nicholas Cull + nicholas.cull&haley.com +31911 + Mine Site Technologies Pty Ltd + Chris Snell + c.snell&minesite.com.au +31912 + Gyventoju registro tarnyba prie Lietuvos Respublikos Vidaus reikalu ministerijos + Marija Norkeviciene + grt&vrm.lt +31913 + Smith Micro Software, Inc. + David Sperling + dsperling&smithmicro.com +31914 + AFCC Inc. + Erik Little + erik&afccinc.com +31915 + Nstrument, Inc + Craig Roffers + croffers&nstrument.net +31916 + Exatrol Corporation + David Sharp + ianasnmp&exatrol.com +31917 + Tian Di Ying Cai(SinoProfessional) Ltd. + Li Chang Geng + LRGCC&YAHOO.COM.CN +31918 + JSC „Izhevskiy Radiozavod“ + Alexander Kurko + kurko&irz.ru +31919 + Emerion.com + Philipp Marek + philipp.marek&emerion.com +31920 + PHARMDATA,s.r.o. + Michal Rezny + rezny&pharmdata.cz +31921 + STS GROUP SA + Michel Gumilar + michel.gumilar&group-sts.com +31922 + Keeneo SAS + Alberto Avanzi + alberto.avanzi&keeneo.com +31923 + Frontier Electronic Systems Corp. + Reagan Thomas + rthomas&fescorp.com +31924 + DmitryKulgavy + Dmitry Kulgavy + dkulhavy&gmail.com +31925 + Miyowa + Laurent Labatut + llabatut&miyowa.com +31926 + Siklu Communication LTD + Lior Mordechay + lior.m&siklu.com +31927 + SQdata + Norbert Thiebaud + norbert&sqdata.com +31928 + Q-Industries, Inc. + Tony Czeh + pczeh&q-industries.com +31929 + AKUA Corp + Greg Kerr + Greg.Kerr-oid&akua.com +31930 + Wiinz Ltd + Philip Bergen + philip&wiinz.com +31931 + CLERTECH.COM,INC + MANETIRONY CLERVRAIN + monti&clertech.com +31932 + 4IPNET, INC. + Cadon Sheu + mis&4ipnet.com +31933 + Powerbox Australia + Peter Keeler + design&powerbox.com.au +31934 + Getronics Pinkroccade Healthcare BV + M.G.J.M. Lenferink + marcel.lenferink&getronics.com +31935 + Younou, Inc. + Michael Duff + hostmaster&younou.com +31936 + Inventum Technologies Private Limited + Padam Jeet Singh + padam.singh&inventum.net +31937 + NAW Enterprises Inc + Trevor Davis + tdavis&new-atom.net +31938 + anderScore GmbH + Jan Luehr + jan.luehr&anderscore.com +31939 + rkirkpat.net + Ryan Kirkpatrick + penadmin&rkirkpat.net +31940 + TV1.DE + Michael Fink + iana&tv1.de +31941 + EcoliHub + Ms. Dawn Whitaker + dwhitaker&purdue.edu +31942 + AKRUPP Networking + Andreas Krupp + AndreasKrupp&gmx.net +31943 + Clemens Fink + Clemens Fink + iana.oid&clez.net +31944 + ish group pty ltd + Ari Maniatis + ari&ish.com.au +31945 + Advanced Powers Merchant Empires + Brian Zablocky + zabmilenko&gmail.com +31946 + Elenos Srl + Massimo Liggio + m.liggio&elenos.com +31947 + Yomu Project + Philipp Kaluza + yomu-maint&yomu.de +31948 + RC Alltech Power Systems Pvt Ltd + I.Nagaraju + rcalltechups&rcalltechups.com +31949 + Spikelops + Andrew Hepburn + spike&spikelops.com +31950 + Nextivity, Inc. + Curtis Clinton + cclinton&nextivityinc.com +31951 + Organizacion de Servicios Directos Empresarios + Juan Ignacio Narvaiz + juan.narvaiz&osde.com.ar +31952 + Miroslaw Lach + Miroslaw Lach + iana&lach.waw.pl +31953 + Markus Falb + Markus Falb + markus.falb&fasel.at +31954 + E-smile Co., Ltd. + Hideo Saito + saito&e-smile.ne.jp +31955 + Prolar Corporation + Engineering Department + abuse&prolar.net +31956 + GuangZhou E-Standard Software Technology Co.,Ltd. + Yin Zhongbin + yzbin&126.com +31957 + TSDA - Tecnologia e Soluções Digitais Aplicadas LTDA + Fernando Garcia Pina + fernando&tsda.com.br +31958 + INDUSTRONIC Industrie-Electronic GmbH & Co. KG + Wilfried Lenz + wilfried.lenz&industronic.de +31959 + EDV-Beratung Robert Velter + Robert Velter + Robert&Velter.de +31960 + Altech ISIS + Stefan Burwitz + burwitzs&altechisis.com +31961 + PubliTronic bv + Jeroen Domburg + jeroen&publitronic.nl +31962 + Flybe + Nathan Burden + nathan.burden&flybe.com +31963 + BEJING Tongtian Century Technology CO.LTD + Yang Xiaofei + xfyang1973&hotmail.com +31964 + chengdu goldtel communication (group) co.,ltd + jichanghu + jch_zxc&yahoo.com.cn +31965 + PGE Gornictwo i Energetyka S.A. + Pawel Koc + pawel.koc&bot.pl +31966 + Lampe-Batkin Associates, Inc. + Adam Batkin + adam&batkin.net +31967 + Thomas Production Company, L.L.C. + Colin Thomas + colin.thomas&thomasproduction.us +31968 + Datum Systems, Inc. + Michael Boutte + mike&datumsystems.com +31969 + Gradwell dot com Ltd + Stuart Herbert + stuart.herbert&gradwell.net +31970 + Siegel Daten- und Anwendungsservice + Stefan Siegel + iana&sdas.de +31971 + MQuest S.A. + Claudio Latorre + snmp&mquest-technologies.com +31972 + Gruppo Software Oy + Mikko Ahonen + info&grupposoftware.com +31973 + thejof.com + Jonathan Lassoff + jof&thejof.com +31974 + Cavazza Anna Sas + Giacomo Cesari + giacomocesari&tiscali.it +31975 + Cable Vision Electronics Co., Ltd. + Kevin S.H. Huang + kevinhuang&cable-vision.com.tw +31976 + INTEG Process Group, Inc. + Richard Shulkosky + rshulkosky&integpg.com +31977 + Saia-Burgess Controls AG + Schneeberger Francis + francis.schneeberger&saia-burgess.com +31978 + Forticom + Kristijonas Siaulys + kristijonas&forticom.lt +31979 + AltimatOS + Gary Greene + greeneg&altimatos.com +31980 + Melog.com + Radomir Klacza + rklacza&melog.com +31981 + Hongdian Technologies + Xiao Gong + xgong&hongdian.com +31982 + ORNL Federal Credit Union + Michael West + mwest&ornlfcu.com +31983 + FOUGEROLLE + Pascal HOYAU + pascal.hoyau&fougerolle-fr.com +31984 + Ericsson Australia Pty. Ltd. + Lindon Parker + lindon.parker&ericsson.com +31985 + Syphan Technologies + Carl Sherratt + carl&syphan.com +31986 + GOL MOBILE PRODUTOS E SERVIÇOS DE TECNOLOGIA DA INFORMAÇÃO LTDA. + Reinaldo Mello + reinaldo&golmobile.com.br +31987 + Splatterladder + Torsten Sachse + proddi&splatterladder.com +31988 + duagon Germany GmbH (formerly MEN Mikro Elektronik GmbH, Nuremberg) + Ruediger Zapf + ruediger.zapf&duagon.com +31989 + EverMAX s.r.o. + Martin Zizka + zizka&evermax.cz +31990 + Continental AG + Stefan Vogel + stefan.vogel&continental-corporation.com +31991 + TOSCO CORPORATION + Toru Uetani + toru.uetani&tosco.co.jp +31992 + Oceana Sensor Module + Jens Hult + jhult&oceanasensor.com +31993 + Parinya Software + Parinya Thipchart + thipchart&gmail.com +31994 + NxGen Communications Pte Ltd + Lim Kheng Yong + khengyong.lim&nxg-c.com +31995 + National Bank of Poland + Jacek Skibinski + jacek.skibinski&nbp.pl +31996 + Brian Thomas Matthews Limited + Brian Matthews + brian&btmatthews.com +31997 + Cellusys + Daniel McTague + daniel&cellusys.com +31998 + NOVA KREDITNA BANKA MARIBOR d.d. + Andrej Radosevic + aradosevic&nkbm.si +31999 + Adcon Telemetry GmbH + Robert Ernst + r.ernst&adcon.at +32000 + Ekstrem Bir Bilgisayar + Ozcan Oksuz + oksuz&bilkent.edu.tr +32001 + Convercom AG + Marcel Haldemann + marcel.haldemann&convercom.ch +32002 + Ba-Bi Mobile Media Co. Ltd. + Yan Leung + yanl&ba-bi.com +32003 + herbstwest.de + Alvaro Aguilera + alewar&herbstwest.de +32004 + On-Waves ehf + Roch-Alexandre Nomine + roch&on-waves.com +32005 + Obra Social de Empleados Publicos de Mendoza (OSEP) + Leandro Santinelli + lsantinelli&osep.mendoza.gov.ar +32006 + Fashion Institute of Technology of the State University of New York + Bryan Gucwa + bryan_gucwa&fitnyc.edu +32007 + Innovative Labs + Joaquin Penroz Olavarría + jpenroz&grupoinl.com +32008 + Digital Rapids Corp + Brian Stevenson + brian&digital-rapids.com +32009 + OuterNet + Jamie Pugh + jamie&outer.net +32010 + Pixelworks + Nick Huang + NHuang&pixelworks.com +32011 + Kabel Deutschland GmbH + Hadmut Danisch + hadmut.danisch&kabeldeutschland.de +32012 + Penango, Inc. + Sean Leonard + oid+iana&penango.com +32013 + Intranet Solution + Paul Kumlin + paul.kumlin&itsol.fi +32014 + ESAC + Salvatore Cataldi + salvatore.cataldi&esacsrl.com +32015 + ExtraHop Networks, Inc. + Jesse Rothstein + hostmaster&extrahop.com +32016 + Latvia University of Agriculture + Aldis Berzins + aldis&llu.lv +32017 + LinQuest Corporation + Naveen Reddy + naveen.reddy&linquest.com +32018 + Law Offices Of Roger E. Naghash + Roger E. Naghash + ren&lawfirm4u.com +32019 + Intellon Corporation + Charles Maier + charles.maier&intellon.com +32020 + doudemoii + Hideo Saito + hideo&doudemoii.net +32021 + Positron Inc. + Jean-Francois Mailhot + jfmailhot&positronaccess.com +32022 + Pegatron Computer Inc. + Hausen Lo + hausen_lo&pegatroncorp.com +32023 + American Clean Air, Inc. (formerly 'XeNSiiS') + Jeff Doyle + webmaster&americancleanair.net +32024 + Applied Polymorphism + Khaled Atiyeh + khaled_Atiyeh&hotmail.com +32025 + Primal Research + Randel Anderson + anderson&primalresearch.com +32026 + Sigma ITS Co., Ltd. + Yasutatsu Arai + arai-yasutatu&sigmaits.jp +32027 + Edvina AB + Olle E Johansson + info&edvina.net +32028 + Tray International Services and Administration (Pty) Ltd + Benjamin Michel + benjamin.michel&tray-international.com +32029 + The Rafferty Patent Law Firm, PC + Ryan Rafferty + mail&raffertyfirm.com +32030 + Commodity Systems + André Gentil + agentil&commodity.com.br +32031 + XITIJ INDIA + Bhavinkumar Patel + bhavin&xitij.net +32032 + DWI Technologies + Daniel Wamara + dwamara&dwitech.com +32033 + Apfelwald + Florian Buchwald + florian.buchwald&apfelwald.net +32034 + Finnsat Ltd. + Paul Kumlin + it&finnsat.fi +32035 + ISEN Consulting and Services + Ken Xing + kenxing&isenconsulting.com +32036 + Borders Group Inc. + Art Hill + networkservices&bordersgroupinc.com +32037 + Commerce Lab + Yaroslav Tarasenko + ytarasenko&gmail.com +32038 + TELZAS + Tomasz Smialkowski + tomasz.smialkowski&telzas.com.pl +32039 + Total Computer Kft. + Andras Herczig + office&rendszergazdak.hu +32040 + Tecno&Logic Consulting + Sergio Ninotti + s.ninotti&tecnoandlogic.it +32041 + Association Paris-Montagne + Georges Racinet + georges.racinet&paris-montagne.org +32042 + Virginia Community College System + Christopher Glaze + cglaze&vccs.edu +32043 + Hacklab Toronto Club Inc. + Owen Jacobson + owen.jacobson&grimoire.ca +32044 + IVOCS + Gerard Havermans + gerard.havermans&ivocs.com +32045 + Avanquest Software + Kamel Messaoudi + kmessaoudi&avanquest.com +32046 + NetMatch + Angelo Höngens + systeembeheer&netmatch.nl +32047 + IDRIX + Mounir IDRASSI + mounir.idrassi&idrix.fr +32048 + x9000.com Consulting Services Limited + Paul Prior + paulprior&x9000.com +32049 + FacilityONE + Dr Robin Alston + ralston&facilityone.com +32050 + PacketFlux Technologies, Inc. + Forrest W Christian + forrestc&packetflux.com +32051 + SAMsystems GmbH + Heribert Oechsler + h.oechsler&samsystems.de +32052 + Toronto Hydro Corporation + Kshamit Dixit + KDixit&torontohydro.com +32053 + 3Y Power Technology, Inc. + Kevin Chen + Kevinc&3ypower.com +32054 + IPSL (formerly 'IPSL/CETP') + Elisabeth Porteneuve + elisabeth.porteneuve&latmos.ipsl.fr +32055 + Service to Youth Council Inc + Information and Communication Technology + ict&syc.net.au +32056 + Vietnam Datacommunication Company (VDC) + Hung La The + hunglt&vdc.com.vn +32057 + Mercury Brain Systems + Hidetoshi YOSHIMOTO + yoshimoto&mercury-bs.com +32058 + Vietnam Posts and Telecommunications Group (VNPT) + Cuong Nguyen Xuan + cuongnx&vnpt.com.vn +32059 + Axis Network Technology Limited + Roger McCalman + snmp&axisnt.co.uk +32060 + Babelway + Mathieu Pasture + mathieu.pasture&babelway.com +32061 + VAS Latvijas Valsts radio un televizijas centrs + Kārlis Mālnieks + info&lvrtc.lv +32062 + Endersys + Omer Faruk Sen + info&endersys.com +32063 + The eID Company + Olivier LIBON + olivier.libon&eidcompany.be +32064 + state51 + Tomas Doran + t0m&state51.co.uk +32065 + jonEbird + Jon Miller + jonEbird&gmail.com +32066 + EAFB - Escola Agrotécnica Federal de Barbacena-MG + Herlon Camargo + herlon.camargo&eafb.org.br +32067 + IPaXiom Networks + Saber Fakih + saber&ipaxiom.com +32068 + Scherndl + Scherndl Bernhard + admin&scherndl.at +32069 + University of Laverne + Mark Smith + msmith&ulv.edu +32070 + Laser Interferometer Gravitational-Wave Observatory (LIGO) + Stuart Anderson + anderson&ligo.caltech.edu +32071 + Vialtus Solutions + Roeland Mertens + roeland.mertens&vialtus.com +32072 + SEKIZAWA Corp. Inc. + SEKIZAWA Kazuya + k_sekizawa&nazca-net.co.jp +32073 + MesaVida Information Management + Robert Pettengil + rcp&mesavida.com +32074 + Prorail BV + Chris Verhoef + chris.verhoef&prorail.nl +32075 + XS Networks BV + Eric Veraart + e.veraart&xsnetworks.net +32076 + Connectical IT + Andrés J. Díaz + ajdiaz&connectical.com +32077 + Sumlock Electronics (NE) Ltd + John Osborne + john.osborne&sumlock.co.uk +32078 + Zentrum Servicios SA de CV + William Anderson Carcamo + william&zentrum.com.mx +32079 + ESTeem Wireless Modems Inc. + Brent Strecker + support&esteem.com +32080 + Baseline Communications + Ingvild Sorteberg + ingvild.sorteberg&baseline.no +32081 + Cytec Industries Inc. + Tariq Rahman + tariq.rahman&cytec.com +32082 + Unassigned + Removed 2010-08-26 + ---none--- +32083 + HealthBlocks, Inc. + Darin Greaham + darin&healthblocks.com +32084 + DeltaNode Ltd. + Hans Schillstrom + hans.schillstrom&deltanode.com +32085 + E.ON Business Services GmbH + Florian Dietrich, Peter Marschall + pki&eon.com +32086 + Fiberblaze + Jakob Hilmer + ftth&fiberblaze.com +32087 + x:fer GmbH + Markus Wernig + info&xfer.ch +32088 + College of Micronesia-FSM + Kenneth Girrard + kgirrard&comfsm.fm +32089 + IMS Messsysteme GmbH + Christian Thiel + christian.thiel&ims-gmbh.de +32090 + Waumovil + Alsenitd Rausseo + arausseo&waumovil.com +32091 + AES Corporation + Bill Goretkin + goretkin&goretkin.com +32092 + pragma people + Alfonso Ferrigno + a.ferrigno&gmail.com +32093 + Engineering Systems Incorporated + Sys Admin (Walter Hawkins) + sysadmin&esi-il.com +32094 + Vantage Tech Solutions + Ishwara Kedila + viked2000&yahoo.com +32095 + Hosei University Research Institute, California + Ivan Ho + ivan&huric.org +32096 + APD Technologies Inc. + Anthony D'Souza + support&apdtech.com +32097 + Reorder + Luca Bortot + l.bortot&reorder.it +32098 + Baseline IT + Darren Hemphill + darren&baselineit.co.za +32099 + Midnight Oil Consulting, INC. + Ketema Harris + ketema&midnightoilconsulting.com +32100 + Oversi Networks + Ronen Itzhaki + roneni&oversi.com +32101 + Telinea d.o.o. + Adi Vaizovic + adi.vaizovic&telinea.com +32102 + Tiaxa + Raul Tremsal + rtremsal&tiaxa.net +32103 + New York Connect + Eric Futch + efutch&nyct.net +32104 + InCharge Systems, Inc + Patrick Looby + patlooby&inchargesys.com +32105 + Chief Security Officers, SA + Fernando Cardoso + noc&cso.pt +32106 + HaDiKo e.V. + Noël Weidenhammer + vorstand&hadiko.de +32107 + SOH Systems + Scott O'Hare + sohare&suffolk.lib.ny.us +32108 + PLANAR LLC + Vladimir Kuznetsov + kuznetsov&planar.chel.ru +32109 + ADC Co.,LLC + Nevolin Anatoly + permadc&mail.ru +32110 + IPVox Sdn Bhd + Kim Lindberg + support&ip-vox.com +32111 + Digital Solutions Ltd. + Peyo Hristov + peyo&digitalsol.net +32112 + CENARIO systems GmbH (formerly 'VCS Computer-Systeme GmbH') + Christian Kirschner + iana.pen&vcs-web.de +32113 + Vocord + Alexey Kadeishvili + alexey.kadeishvili&vocord.ru +32114 + ORELIA SAS + Christophe GLEMAREC + christophe.glemarec&orelia.fr +32115 + Zweites Deutsches Fernsehen Anstalt des oeffentlichen Rechts + Tobias Botens + zvd&zdf.de +32116 + ACTIV Financial Systems, Inc. + Simon Gray + Simon.Gray&ActivFinancial.com +32117 + Unassigned + Returned 2008-10-20 + ---none--- +32118 + iWave Software + Julius Clayton + JClayton&iwavesoftware.com +32119 + tera.sys hardware inc. + Michael Habacher + m.habacher&terasys.at +32120 + Focon Electronic Systems A/S + Frank B. Jakobsen + SWLicense&focon.dk +32121 + Armstrong Relocation + Chris Phelan + cphelan&goarmstrong.com +32122 + Netafim Australia + Aaron Were + awere&netafim.com.au +32123 + Sidlinger Computer Corporation + Bruce D. Sidlinger + bruce&sidlinger.com +32124 + Stadtverwaltung Ludwigshafen + Stefan Hofem + stefan.hofem&ludwigshafen.de +32125 + Small Media Giant Ltd + Marek Zwiefka-Sibley + marek&smallmediagiant.com +32126 + Lietuvos archyvu departamentas prie Lietuvos Respublikos Vyriausybes + Inga Petraviciute + arch.dep&archyvai.lt +32127 + Darkware Co. + Gergely Szerovay + gergely.szerovay&gmail.com +32128 + Octotelematics S.r.l. + Mario Pandolfi + system&octotelematics.com +32129 + Innovation Center Computer Assisted Surgery (ICCAS) + Stefan Bohn + stefan.bohn&iccas.de +32130 + Data Elements for Emergency Department Systems (DEEDS) + Kevin M. Coonan, MD + kevin.coonan&gmail.com +32131 + ESPN, Inc. + Diane Larivee + ESPN.SNMP.PEN.Management&espn.com +32132 + UltraMonkey-L7 Project + Kouhei TANUMA + ultramonkey-l7-devel&lists.sourceforge.jp +32133 + nativenet.ch + Andreas Schuerch + andreas.schuerch&nativenet.ch +32134 + Zestysoft + Ian Brown + snmp&zestysoft.com +32135 + WWK Lebensversicherung a.G. + Jürgen Neumeier + juergen.neumeier&wwk.de +32136 + Hackburg + Pierre Spring + pierre.spring&caillou.ch +32137 + TECNOSTEEL + Giuseppe Basso + marketing&tecnosteel.it +32138 + Universidade Federal de Itajubá + Pablo Marques de Oliveira + pablo&unifei.edu.br +32139 + Applitron Datasystem AB + Niklas Hallqvist + niklas&appli.se +32140 + Chalmers University of Technology AB + Chalmers NIC + cth-nic&chalmers.se +32141 + University of Maryland Center for Environmental Science + Robert Sink + sinkr&cbl.umces.edu +32142 + Solid Solutions + Satoshi ARA + satoshi.ara&solids.jp +32143 + Finsignia LLC + Susan Potter + susan.potter&finsignia.com +32144 + TransHuman Design + Ryan Rawdon + ryan&soldat.pl +32145 + Allergan, Inc. + Tomasz Kozlowski + kozlowski_tomasz&allergan.com +32146 + Validian Corporation + Peter Ritchie + validian&peterRitchie.com +32147 + Rob Johnson + Rob Johnson + RBJ1128&Gmail.com +32148 + Individuali veikla + Donatas Cerniauskas + donatas&cerniauskas.com +32149 + UXCG + Corey Reynolds + corey.reynolds&uxcg.com.au +32150 + Zetron, Inc. + Clark Emerick + cemerick&zetron.com +32151 + NerdGroup + Fernando Ribeiro + fernando.ribeiro&gmail.com +32152 + Rearden Labs + Ian Buckley + ian.buckley&rearden.com +32153 + damosys ltd. + yun hyung taek + yun0512&gmail.com +32154 + iXuros Limited + Brian Andrews + info&ixuros.com +32155 + Humana, Inc. + Ajay Gupta + agupta&humana.com +32156 + TechCare + Bertrand Allo + bertrand.allo&tcare.fr +32157 + IntelMS (Pty) Ltd. + Darryl Garth Vine + support&bluebird.co.za +32158 + Alcom + Jan-Erik Eriksson + jee&alcom.ax +32159 + Ancla Internet, S.L. + Jordi Llonch + llonchj&gmail.com +32160 + Power Innovation Stromversorgungstechnik GmbH + Ulrich Carstensen + u.carstensen&powerinnovation.com +32161 + Trilogy Communications Ltd + Philip Luke + phil.luke&trilogycomms.com +32162 + Eutecert + Jakubowski Marcin + gcth&eutecert.eu +32163 + Umber Systems + Valia Osborne + vai&umbersystems.com +32164 + JWARE TECHNOLOGIES + Francois SaintLu + oid-pen&jwaretechnologies.com +32165 + WestGrid + Lixin Liu + liu&sfu.ca +32166 + Computacenter + Steven Mai + steven.mai&computacenter.com +32167 + REDCOM Laboratories, Inc + Daniel J Hazekamp + it&REDCOM.com +32168 + Fiber-Span + James Stewart + jstewart&fiber-span.com +32169 + Turkcell Teknoloji Arastirma ve Gelistirme A.S. + Ender ULUSOY + ender.ulusoy&turkcellteknoloji.com.tr +32170 + MAYA Group + Christopher DeMarco + it&maya.com +32171 + SeaMicro Inc + Carl Yang + carl&seamicro.com +32172 + Prefeitura Municipal de Franco da Rocha + Rodrigo Campos + rca.campos&gmail.com +32173 + Royal Observatory Edinburgh + Horst Meyerdierks + network-manager&roe.ac.uk +32174 + Universidade Estadual do Oeste do Paraná + Ademar Feil + ademar&unioeste.br +32175 + Thieme ICT Services B.V + Ron Offerman + rofferman&ticts.nl +32176 + GlobalTelecom + Jongryong Kim + jrkim&globaltelecom.co.kr +32177 + Barossafarm + Dominic Storey + dstorey&barossafarm.com +32178 + ecotel communication ag + Dennis Kuchenbecker + dennis.kuchenbecker&nacamar.de +32179 + SANDS INDIA Pvt Ltd + Suseendran Renga Bashyam + micro&sandsindia.com +32180 + Moscow Branch of Federal Migration Service, Russia + Gennady Kudryashoff + gk&pvu.msk.ru +32181 + Amuser SpA + Technical Support + technicalsupport&amuser.it +32182 + cubro + Gabriel Guriencu + office&cubro.net +32183 + Answers.com + Derek Balling + derekb&answers.com +32184 + Instantiations Inc + Seth Hollyman + hostmaster&instantiations.com +32185 + Environmental Technology, Inc. + Chuck Gartland + cgartland&networketi.com +32186 + Prolexic Technologies + Paul Sop + paulsop&prolexic.com +32187 + Founder International Inc. + Xu Hao + xu_hao&founder.com +32188 + SkyCash Sp. z o.o. + Anton Lundberg + anton.lundberg&skycash.com +32189 + Richards-Zeta Building Intelligence + David Leimbrock + dleimbrock&richards-zeta.com +32190 + REDOUBT, Inc. + Albert Lingelbach, Jr. + alingelb&yahoo.com +32191 + Emergensys solutions inc. + Jerome Tetreault + jerome.tetreault&emergensys.net +32192 + Vanguard Rugged Storage LLC + Craig Watson + craig.watson&vanguard-rugged.com +32193 + I-New Unified Mobile Solutions AG + Mario Zsilak + mario.zsilak&i-new.com +32194 + InDorse Technologies + Daniel Pohl + daniel.pohl&indorse-tech.com +32195 + CommTel Network Solutions Pty Ltd. + Paul Ross + paulr&commtelns.com +32196 + EBFS AG + Denis Korobow + it&ebfs.eu +32197 + Bunkspeed, Inc + James Briant + jamie.briant&bunkspeed.com +32198 + Esang Technologies Co., Ltd. + Min-yeong, Lee + michel&esangtech.com +32199 + Louisiana Immersive Technologies Enterprise + Tech Support + techsupport&lite3d.com +32200 + Seven Networks, Inc. + Mika Uusitalo + mika.uusitalo&seven.com +32201 + Sonitor Technologies AS + Øystein Haug Olsen + oo&sonitor.com +32202 + Saima Sistemas, S.L. + Vaino Venermo + vaiski&saimanet.net +32203 + Pixeon + Gabriel Bianco + gabriel.bianco&pixeon.com.br +32204 + Mykola Grechukh + Mykola Grechukh + nick.grechukh&gmail.com +32205 + Twistround Ltd + Per Larsson + iana&twistround.com +32206 + Oksijun + Michael Gwin + iana&oksijun.net +32207 + U.S. Department of Transportation + Eric Baldwin + eric.baldwin&dot.gov +32208 + www.jointhegrid.com + Edward Guy Capriolo + edlinuxguru&gmail.com +32209 + Escuela Bancaria y Comercia + Victor Ilich Mendoza Celis + vi.mendoza&ebc.edu.mx +32210 + eAgency, Inc. + Boyd Machtolff + bmachtolff&eagency.com +32211 + Positech Consulting Ltd + Paul O'Sullivan + pos&positechconsulting.com +32212 + Armstrong International, Inc. + Luke Way + luke&armstronginternational.com +32213 + Luke Rules + Luke Way + iana&lukerules.com +32214 + GRETA de Bordeaux + Sébastien Monbrun + admin-a-0332820l&ac-bordeaux.fr +32215 + John Anson Co. LLC + John Anson + janson&johnanson.com +32216 + SIOS Technology, Inc. + motomichi tanaka + mtanaka&sios.com +32217 + BÖWE Systec GmbH + Wolfgang Meinelt + wolfgang.meinelt&boewe-systec.com +32218 + tedman.com + Ted Leung + tedman&tedman.com +32219 + NBS srl + Silvio Fabi + s.fabi&nbsgroup.it +32220 + Esperion Therapeutics, Inc. + David Thibault + admin&esperion.com +32221 + Intera Group, Inc. + Michael E. Laws + mlaws&interainc.com +32222 + TCM Networks + Matthew Gray + matthew.gray&tcm.co.za +32223 + Shanghai Kyattinfo Inc. + Johnny Liu + johnny&kyattinfo.com +32224 + Epivalley Co., Ltd. + Byung Joo, Kang + bjkang&epivalley.com +32225 + PCCW Mobile + Thomas Ng + itstng&msn.com +32226 + Ingenieurbuero für innovative Informationstechnik + Joerg Beckmann + jb&iiit.de +32227 + ArtSoft Consult + Fulop Levente + hostmaster&artsoft-consult.ro +32228 + MERAWEX Sp. z o. o. + Witold Grabysz + merawex&merawex.com.pl +32229 + Nokia (formerly 'Novarra, Inc.') + William Boyle + william.boyle&nokia.com +32230 + Aeris Communications, Inc. + Bob Fultz + Bob.Fultz&aeris.net +32231 + hbcms + Davi Huang + support&hbcms.com +32232 + Landratsamt Muenchen + Agnes Taffertshofer + agnes.taffertshofer&lra-m.bayern.de +32233 + Vionis d.o.o. + Igor Ostriž + igor.ostriz&vionis.info +32234 + KMUX Project + Wilhelm Meier + wilhelm.meier&fh-kl.de +32235 + ProAce International Trading Ltd. + George Chan + proacehk&gmail.com +32236 + DATASPHERE S.A. + Patrick Monnerat + pm&datasphere.ch +32237 + Atea Systems Ltd. + Murray Lum + support&ateasystems.com +32238 + Venture Research Inc. + Kevin Baker + tech&ventureresearch.com +32239 + Emnico Technologies Ltd + Heydar Faramarzi + heydar&emnico.com +32240 + Clipsal China Ltd. + Keith Leng + jianqing-keith.leng&cn.schneider-electric.com +32241 + Vederie + Jozef Bombiak + JozefB&vederie.com +32242 + Comic Relief + Anthony Mizon + amizon&saviso.com +32243 + FAUSER AG + Marc Fauser + marc.fauser&fauser-ag.com +32244 + CASO - Consultores Associados de Organizações e Informática + César Alves + calves&caso.pt +32245 + PingUs Solutions oHG + Oliver Kiel + oliver.kiel&pingus.de +32246 + IBS.Datafort + Nick Perhunkov + nperhunkov&ibs.ru +32247 + NewNet Communication Technologies, LLC + Kutluk Uslu + kutluk.uslu&newnet.com +32248 + Wanzl Metallwarenfabrik GmbH + Juergen Haemmerle + Juergen.Haemmerle&wanzl.de +32249 + National Food Technology Research Centre + Patrick A. Jansen + patrick&naftec.org +32250 + San Francisco International Airport, Information Technology & Telecommunications + Daniel Gonzales + daniel.gonzales&flysfo.com +32251 + ETRI SRC + Jun Seob Lee + juns&etri.re.kr +32252 + Pavol Jozef Šafárik University in Košice + Michal Ráček + michal.racek&upjs.sk +32253 + DANU Technologies + Michael Slevin + ms1&danutech.com +32254 + Hoare Lea LLP + Hoare Lea IT Support + itsupport&hoarelea.com +32255 + J. Christof GmbH + Wolfgang Spinotti + iana&christof-group.com +32256 + Sonlinc A/S + Lars Skjærlund + lsk&sonlinc.dk +32257 + RealTime Intelligence + Winona Whitener + winona.whitener&poweredbyrex.com +32258 + WYDE Voice, LLC + Dmitriy Bondar + noc&wydevoice.com +32259 + Meyer Tool, Inc. + Michael Kinney + mikek&meyertool.com +32260 + PacketDNA Inc. + Sonny Chaiwala + sonnyc&packetdna.com +32261 + Varolii + Shayne Balduf + shayne.balduf&varolii.com +32262 + Cyber Operations, Inc. + Kevin Etheridge + kevin&cyberoperations.com +32263 + Plus Net Solutions + Javier Outón + jouton&plusnetsolutions.com +32264 + National Institute of Informatics + Kazutsuna Yamaji + yamaji&nii.ac.jp +32265 + Johnny Software Studio + JianZhuo, Liu + jjkkxyz&gmail.com +32266 + Power PLUS Communications AG + Bartosz Dendor + b.dendor&ppc-ag.de +32267 + Institute of Science and Technology Austria + Core IT + coreit&ista.ac.at +32268 + Hobby Lobby Stores, Inc. + John Sargent + john.sargent&hobbylobby.com +32269 + Novantiqua multimedia + Paolo Tentori + paolot&promo.it +32270 + Elseta + Tomas Sinkunas + info&elseta.com +32271 + Zaxmy + Johan Zaxmy + oid&zaxmy.com +32272 + Sebastien Aperghis-Tramoni + Sebastien Aperghis-Tramoni + sebastien&aperghis.net +32273 + netzwerkplanet. + Christian Lox + lox&netzwerkplanet.com +32274 + Sheffield Teaching Hospitals Foundation Trust + Steven Wood + steven.wood&sth.nhs.uk +32275 + Interscan Navigation Systems + Andrew Cain + info&interscan.com.au +32276 + SiChuan Public Information Industry Co.Ltd (SCPI) + lei li + lilei5353&hotmail.com +32277 + ZTI + Pascal Lequeux + pascal.lequeux&zti.fr +32278 + RT Systems (Pty) Ltd + George Daras + george&minelight.co.za +32279 + Ryan J Thompson + Ryan Thompson + iana&ry.ca +32280 + Johnston Press Plc + Richard Grant + richard.grant&thenews.co.uk +32281 + Videoplaza + Dante Buhay + dante&videoplaza.com +32282 + DaimonLab + Damiano Scaramuzza + dscaramuzza&daimonlab.it +32283 + TBits.net GmbH + Thomas Börnert + tb&tbits.net +32284 + erefer + Frederic Rouyre + rfr&inter-land.net +32285 + Sumavision Technologies Co.,Ltd + guoqiang zhao + sumavisionzhao&126.com +32286 + ACD Elektronik GmbH + Jürgen Striegel + juergen.striegel&acd-elektronik.de +32287 + Sattvik Software & Technology Resources, Ltd. Co. + Daniel Solano Gómez + pen&sattvik.com +32288 + eg-electronic GmbH + Thomas Henkel + thenkel&eg-electronic.de +32289 + CellSpotting.com + Carl Johan Ferner + calle.ferner&cellspotting.com +32290 + Records For Living, Inc. + Simone L. Pringle + simone&RecordsForLiving.com +32291 + lingzhou NetWork + jack zhang + jackzhangcl&163.com +32292 + Florida Department of Children and Families + Larry Kenyon + Larry_Kenyon&dcf.state.fl.us +32293 + netfutura GmbH & Co. KG + Kevin Streit + kevin.streit&googlemail.com +32294 + Lewis Silkin + Nicolas Martin + nicolas.martin&lewissilkin.com +32295 + cPanel Inc + David Koston + dave&cpanel.net +32296 + Beach Horizon LLP + John Oconnell + bcm_test&yahoo.co.uk +32297 + s2company s.r.l. + Francesco Ronsivalle + francesco.ronsivalle&s2company.it +32298 + Communication Networks, LLC + Robert Gallagher + rgallagher&comnet.net +32299 + Coatesoft + Greg Coates + greg&coatesoft.com +32300 + Alcorn McBride Inc. + Hunter Olson + hunter&alcorn.com +32301 + Siemens IT Solutions and Services + Frank Wagner + f.wagner&siemens.com +32302 + Insert Data Here + Thomas James + tvjames&insertdatahere.net +32303 + Anchor Systems Pty Ltd + Anchor NOC + noc&anchor.net.au +32304 + Lojas Maia LTDA. + Julianne Martins Cavalcanti + juliannecavalcanti&lojasmaia.com.br +32305 + KAMP Netzwerkdienste GmbH + Roland Irle + ri&kamp.de +32306 + Soolbox Association + Vincent Batoufflet + contact&phantoom.net +32307 + Embedded Systems + Lukasz Panasiuk + lukasz.panasiuk&embedded-systems.com.pl +32308 + NLI Business Support as + Stian Viddal + stian.viddal&nli.no +32309 + Chittagong Online Limited + Aniruddha Barua + cto&colbd.net +32310 + Edge Technologies + Sven Wallage + support&edgetech.eu +32311 + CPI Progetti S.p.a. + Marcella Ceruti + m.ceruti&cpiprogetti.it +32312 + International Hellenic Univeristy + Iraklis Kamilatos + support&ihu.edu.gr +32313 + Red Lambda, Inc. + Matt Whitlock + mwhitlock&redlambda.com +32314 + sp-its + Stephan Peijnik + sp&sp.or.at +32315 + Virtual Management Technologies + Mark Gibson + markg&vmtech.net +32316 + From2 + Hyunwoo Park + airtrain&from2.co.kr +32317 + The National Library of Norway + IKT NETT + IKT-nett&nb.no +32318 + Tom Geudens Private Enterprise Limited + Tom Geudens + tom.geudens&gmail.com +32319 + Fernau Avionics Ltd + Alasdair McFarlane + alasdair.mcfarlane&fernau.com +32320 + Supermarchés Match + Vercoutère Morgan + mvercout&supermarchesmatch.fr +32321 + eSystems, Inc. + Vivek Sawhney + contact&esystems-inc.com +32322 + Conseils Oy SimplySecure + Teemu Rissanen + teemu.rissanen&conseils.fi +32323 + Filb.de + Matthias Butz + matze&filb.de +32324 + Certus Digital, Inc. + Vickie Eigel-Danielson + vickie.eigel-danielson&certusdigital.com +32325 + BadgerNet.net + James Kinneavy + kinneavy-j&sa.ucsb.edu +32326 + NewAge Ukraine Inc. + Sergey Reshetnikov + greyshaman&gmail.com +32327 + RF Industries + Bart Voskulen + bart.voskulen&rfi.com.au +32328 + legendsec Technology Co.Ltd + Sideng WANG + wangsd&legendsec.com +32329 + Shenzhen Guanri Technology Co., Ltd. + tommy Zhao + zxf770330&163.com +32330 + Olitechs + Olivier Brizard + brizard.olivier&gmail.com +32331 + Nimbus Data Systems, Inc. + Thomas Isakovich + tom&nimbusdata.com +32332 + Siscard S.A. + Ramiro Morales + morales&siscard.com.ar +32333 + IRRINGER.DE + Siegfried Irringer + pki&irringer.de +32334 + CGNet + Christian Garling + christian&cg-networks.de +32335 + Nippon Control System Corporation (NCS) + Hiroshi Nonaka + nonaka&nippon-control-system.co.jp +32336 + Micro Ocean Technologies Sdn Bhd + Chin Yee Wee + yw_chin&mocean.com.my +32337 + ISRACARD LDT + Eran Peer + epeer&isracard.co.il +32338 + United Biscuits + Steve Hackett + steve_hackett&biscuits.com +32339 + Schweitzer GmbH - Architektur & Generalplanung + Michael Schweitzer + m.schweitzer&schweitzerplan.de +32340 + DCTI + Catalin Petrescu + catalin&dcti.ro +32341 + Gangola Designs + David Gangola + ianapen&gangola.com +32342 + XORP, Inc. + Paul Zeldin + paul.zeldin&xorp.net +32343 + Shop Direct Home Shopping Limited + Carl Gogerty + Carl.Gogerty&ShopDirect.Com +32344 + advanscope Inc. + daisuke nakamoto + d-nakamoto&catv-ads.jp +32345 + RF Monolithics, Inc. + Greg Ratzel + gratzel&rfm.com +32346 + Digi Telecommunications Sdn Bhd + Suresh Ramasamy + sureshr&digi.com.my +32347 + SiteScanner Europe AB + Peter Germundsson + peter.germundsson&sitescanner.se +32348 + Cross Country Systems + Markus Södergren + oid&cc-systems.com +32349 + iRedMail + Zhang Huangbin + michaelbibby&gmail.com +32350 + Bedework.org + Mike Douglass + douglm&rpi.edu +32351 + Genscape, Inc + Mike Frohme + mfrohme&genscape.com +32352 + Rindels Information Management Systems (RIMS) + Rodney Rindels + rodney&rindels.net +32353 + Baidu + Lin Guo + guolin&baidu.com +32354 + vertico Software GmbH + Martin Schuhmacher + ms&starface.de +32355 + KOCAK Corp + AYDIN KOCAK + aydin.kocak&turkom.com.tr +32356 + Syslogic Ltd. + Zoltan Nemeth + zoltan.nemeth&syslogic.hu +32357 + Solo Networks Inc. + Xu Bingcan + bingcanxu&126.com +32358 + Epiphan Systems Inc. + Pavel Zeldin + pzeldin&epiphan.com +32359 + UNIVERSITY OF UDINE + Renato Spoletti + renato.spoletti&uniud.it +32360 + Dneprooblenergo + Vitaliy Pidodnya + hawk&mail.dp.energy.gov.ua +32361 + CAIR + Eugene Atroshkin + eugene&cair.ru +32362 + IDT SPC + BEURTON Pierre + beurton&idt-fr.com +32363 + Engineering & Software GmbH + Sandro Exner + ViTex&es-manebach.de +32364 + Universidade Federal do Estado do Rio de Janeiro UNIRIO + Ademir Dias Lima + direcao_cpd&unirio.br +32365 + Mino Wireless USA Inc + Amardeep Nikumbh + amardeep&minowireless.com +32366 + Pragmatic Data + Gunther Schadow + oid&pragmaticdata.com +32367 + Communication Automation Corporation + John Sweeney + sweeney&cacdsp.com +32368 + Faculty of Information Technology - Nguyen Tat Thanh College + Le Tri Anh + khoa.cntt.ntt&gmail.com +32369 + Uptime-IT + Daniel Frederiksen + post&uptime.dk +32370 + University of Zielona Gora + Maciej Deorowicz + m.deorowicz&wnb.uz.zgora.pl +32371 + Complex IT Solutions + Traian Petrache + traian.petrache&complexitsolutions.ro +32372 + Web Sages + James S. White + whitejs&websages.com +32373 + paternostro.org + Ugo Paternostro + ugo&paternostro.org +32374 + RidgeviewTel LLC + Vince Jordan + vjordan&ridgeviewtel.com +32375 + Secorix, Inc. + George Parsons + gparsons&secorix.com +32376 + RAM Mobile Data + Bart Lubberdink + b.lubberdink&ram.nl +32377 + archIT + Thomas Besser, Elke Spanke + archIT&uni-karlsruhe.de +32378 + TB Solutions Security S.A. + Responsable de sistemas + iana&tb-security.com +32379 + Lake Michigan Credit Union + Greg Sopcak + admin&lmcu.org +32380 + Instituto Curitiba de Informática + Estevão Thomacheski Rodrigues + erodrigues&ici.curitiba.org.br +32381 + Milwaukee County Government + Keith Parkansky + dnsmgr&milwcnty.com +32382 + Mixtur Interactive, Inc. + Mark Eissler + itservices&mixtur.com +32383 + Iojik Inc. + Marc Guastavino + iojik_iana&simonide.org +32384 + Henchmonkey + John Cater + katre&henchmonkey.org +32385 + Beijing Bohui Science & Technology Co., Ltd + Jiabin Zhang + zhangjb&bohui.cn +32386 + Proxicast, LLC + Kevin Weaver + kweaver&proxicast.com +32387 + Etisalat + Vijaya Sarathi + LSarathi&etisalat.ae +32388 + The NetBSD Foundation + Alistair Crooks + agc&NetBSD.org +32389 + MILLENNIUM ARTS Group + Andreas Bartl + iana&rz.millenniumarts.org +32390 + ID7 Ltd. + Wallace Anderson + wallace.a&id-7.com +32391 + Systemhuset Episteme AS + Vladimir Petrov + admin&episteme.no +32392 + Secure-MSP GmbH + Christian Lotz + christian.lotz&securemsp.com +32393 + Onesto Services Oy + Olli Helenius + olli&onesto.fi +32394 + Solutions and Services, a.s. + Vit Moravec + vit.moravec&saservices.cz +32395 + SalamanderSoft Ltd + Richard Willis + iana&salamandersoft.co.uk +32396 + Ohio University + Ken Bailey + bailey&ohio.edu +32397 + iRobot Corporation + Daniel Allis + dallis&irobot.com +32398 + Eaton Vance Management + Matthew McNamara + mmcnamara&eatonvance.com +32399 + Nederland Live + Joost Rohde + info&nederlandlive.nl +32400 + WaveMaker Software, Inc. + Edward J Callahan + ecallahan&wavemaker.com +32401 + AMP Financial Services New Zealand + Michael Robinson + michael_robinson&.co.nz +32402 + Greenson Pty Ltd + David Green + david&greenson.com.au +32403 + China fuzhou evideo + Chunchao Wang + wangcc&star-net.cn +32404 + NetHawk Oyj + Timo Ainali + timo.ainali&nethawk.fi +32405 + Selectron Systems AG + Jonathan Orditz + jonathan.orditz&selectron.ch +32406 + Koncept Sp. z o.o. + Piotr Romanczuk + piotr.romanczuk&koncept-studio.com.pl +32407 + GeCOSoft Consulting GmbH + Lueder Heinemann + heinemann&gecosoft.com +32408 + Daniel Andrade Costa Silva + Daniel Andrade Costa Silva + daniacs&gmail.com +32409 + Voxel dot Net, Inc. + James W. Brinkerhoff + jwb&voxel.net +32410 + RF Code Inc + Michael Primm + mprimm&rfcode.com +32411 + Swedish Orient Line + Catarina Rockstrand + catarina.rockstrand&sollines.se +32412 + Minot Enterprises, Inc. + Michael VanHecke + mjvanhecke&minotinc.com +32413 + ECbridges, Inc. + Benjamin Madsen + bmadsen&ecbridges.com +32414 + Veracity UK + Colin McLeod + colin.mcleod&veracityusa.com +32415 + Comax BV. + David Levi + comax.bv&gmail.com +32416 + DOCOUT S.L. + Roberto Gonzalez + soporte&docout.es +32417 + Kinamik Data Integrity S.L. + Mr. Nadeem Bukhari + nbukhari&kinamik.com +32418 + Marne & Elk Horn Telephone Co + Noel Leistad + noel&metc.net +32419 + Institut Straumann AG + Manuel Piessnegger + csoe&straumann.com +32420 + Payment Processing, Inc. + Kai Wang + kwang&paypros.com +32421 + Bertholdsson + Jörgen Bertholdsson + snmp&bertholdsson.com +32422 + Universidade Federal de Viçosa + Eduardo Jaime Quirós Batres + dojai&ufv.br +32423 + Rogaland fylkeskommune (County of Rogaland) + Frode Sjovatsen + frode.sjovatsen&rogfk.no +32424 + Harry Jackson - Boozled + Harry Jackson + harry&hjackson.org +32425 + CRW Data AB + Rickard Karlsson + rickard.karlsson&crw.se +32426 + Institute for Studies in Theoretical Physics and Mathematics (IPM) + Hessamaddin Arfaei + arfaei&ipm.ir +32427 + Bornis Group + Parham Ghaffarian + info&bornisgroup.com +32428 + Sepehr S. T. Co. Ltd. + Shahriar Pourazin + pourazin&sepehrs.com +32429 + University of Isfahan + Arash Givchi + arash.givchi&gmail.com +32430 + NADAM CO.,LTD + You-hyun Yang + fromasia&na-dam.com +32431 + Yukthi Systems Pvt. Ltd + Avinash Sultanpur + avinash.s&yukthi.com +32432 + LDAP Study Union + Ruixue Cheng + Forever_Chrx&hotmail.com +32433 + NetCeler + Julien Graglia + jgraglia&netceler.com +32434 + Telenor Telecom Solutions AS + Espen Alexander Strømme + Espen-Alexander.Stromme&telenor.com +32435 + TamoSoft Ltd. + Michael Berg + support&tamos.com +32436 + Intersec + Jean-Marc Coic + jean-marc.coic&intersec.com +32437 + Obsidian Research + Jason Gunthorpe + jgunthorpe&obsidianresearch.com +32438 + PROMONT Soluções Ltda. + Mr. Nilton Cezar Carvalho + nilton&promont.com.br +32439 + ShangHai YoSee Ltd. + JunWu + wujun&ruijie.com.cn +32440 + kinopsis.net + Lorenzo Pastrana + netman&kinopsis.net +32441 + Netwell Ltd. + Evgeny Chepusov + echepusov&netwell.ru +32442 + MPS.ETI.BR + Marcos Paulo Serafim + mps&mps.eti.br +32443 + Internet Research Institute, Inc. + Dai Nishino + dai&iri.co.jp +32444 + Fibercom Technologies (ShenZhen) CO.,LTD + Yong Xie + xieyong&fibercomtec.com +32445 + Lamoree Software + Joseph Lamoree + joseph&lamoree.com +32446 + Paessler AG + Dirk Paessler + info&paessler.com +32447 + CASERIS GmbH + Frank Offermanns + Frank.Offermanns&caseris.de +32448 + Addition IT Sweden AB + Jesper Lönnqvist + jesper.lonnqvist&addition-it.se +32449 + Cuatrecasas Abogados + Jaume Echevarria + syc&cuatrecasas.com +32450 + MICRON ENGINEERING DI MANCA MASSIMO + MASSIMO MANCA + massimo.mancaµnengineering.it +32451 + B-Source SA + Claudio Crosio + claudio.crosio&b-source.ch +32452 + Damaya UK + Adam Charlton + support&damaya.co.uk +32453 + Terrapin Associates + Paul Curtis + pfc&terrapin.com +32454 + Atrato, Inc. + Product Support + pen&atrato.com +32455 + E-Tag, Inc + Richard Grant + info&e-tag.name +32456 + Freebox SAS + Maxime Bizon + mbizon&freebox.fr +32457 + Iptec, Inc. + Bent T. Jensen + bjensen&iptec-inc.com +32458 + Colorado Clinical Research ECG Core Lab + Ken Barz + CCRECGCorelab&cpcmed.org +32459 + HappyEnd + Lorenzo Pastrana + lorenzo.pastrana&happyend.fr +32460 + The Digital Freehold + Lew Pitcher + lew.pitcher&digitalfreehold.ca +32461 + Brinker Pharmaconsulting + Bjoern-Arne Meyn + bjoernmeyn&brinkerpharma.de +32462 + VIP Systems Co., Ltd. + Masato Ina + ina&vips.co.jp +32463 + Coalese Corporation + Andrzej Taramina + andrzej&coalese.com +32464 + IDH GmbH + Marcel Bucher + mbucher&idh.ch +32465 + All Options International B.V. + Thijmen Klok + this.is.a.spam.magnet.please.use.the.phone&alloptions.nl +32466 + guifi.net + Fundació Privada per a la Xarxa Oberta, Lliure i Neutral guifi.net + fundacio&guifi.net +32467 + SysDesign + Ralf Dorfner + postmaster&sysdesign-edv.de +32468 + Teamnett AS + Ruslan Valiyev + rva&teamnett.no +32469 + Network Design GmbH + Ovidio Raimondi + rai&ndm.ch +32470 + Forschungszentrum Jülich + Thomas Eickermann + th.eickermann&fz-juelich.de +32471 + Daitec GmbH + Steiner Daniel + daniel&daitec.it +32472 + Tikona Digital Networks + Tarun Kumar + tarun.kumar&tikona.in +32473 + Example Enterprise Number for Documentation Use + See [RFC5612] + iana&iana.org +32474 + Kialco Group + Patrick O'Donnell + odonnell.pb&gmail.com +32475 + Ambit Communicate + Rob Mitchelmore + oid.registry&midworld.co.uk +32476 + Bell's Booking Agency + Nathan Bell + nathanbell&netzero.net +32477 + ISAccountable + David Kaufman + support&isaccountable.com +32478 + RF Window Co., Ltd. + Julian Kim (Chang-Woo Kim) + knox0210&rfwindow.com +32479 + AppEx Networks + Michael Tong + xtong&appexnetworks.com +32480 + Ollix + Olli Wang + olliwang&ollix.com +32481 + Unitronics + Benny Magrafta + benny&unitronics.com +32482 + Qnective AG (formerly 'Qporter Schweitz AG') + David Saborido + david.saborido&qnective.com +32483 + Octrooicentrum Nederland + T.A. van der Laan + beheer&octrooicentrum.nl +32484 + Escatronic GmbH + Sven Packebusch + sven.packebusch&escatronic.de +32485 + Live Interactive S.A. + Paulo Crescionini + paulo.crescionini&liveinteractive.net +32486 + DCK Worldwide + David Hirsh + dhirsh&dckww.com +32487 + League of American Orchestras + Brian Tanaka + btanaka&field15.com +32488 + The College of New Jersey + Shawn Sivy + ssivy&tcnj.edu +32489 + Alan Savage + Alan Savage + pen&alansavage.co.uk +32490 + Pringo + Harvard Young + harv&pringo.com +32491 + Beijing UniTrust Tech. Service Co.,Ltd. + Changli Zhang + tsa&tsa.cn +32492 + maas-martin.nl + Mark Maas + mark&maas-martin.nl +32493 + CPS Color Equipment S.p.A. + Giuseppe Manicardi + elisabetta.maffei&cpscolor.com +32494 + Kwasniak + Janusz Kwasniak + janusz&kwasniak.de +32495 + Screwfix Ltd + Brett Hastings + brett.hastings&screwfix.com +32496 + MindLogix + Robert Mircea + robert.mircea&mind-logix.com +32497 + Centex Service Co. LLC. + Eric Romine + emailadministrator¢ex.com +32498 + Clockwork Active Media Systems, LLC + Ben Beuchler + insyte&clockwork.net +32499 + TecCon, inc. + Bill Buhler + bill.buhler&teccon.net +32500 + Virtual Computer, Inc. + Alex Vasilevsky + alex&virtualcomputer.com +32501 + Telemisis Ltd + Glen Cumming + glen.cumming&telemisis.com +32502 + Pancetera Software, Inc. + Mitch Haile + snmp&pancetera.com +32503 + Purewire Inc. + Jeff Hartley + jhartley&purewire.com +32504 + Washington Division of URS Corporation + Bruce LaMont Turner + lamont.turner&wgint.com +32505 + Aptix IT SRL + Javier Kohan + jk_iana&aptix.com.ar +32506 + KOR Networks + Kevin F. O'Riordan + kfor&compsoc.com +32507 + First Community Services, Inc + Riaz Maulvi + riaz.maulvi&fcserv.com +32508 + Siemon + Frank Velleca + frank_velleca&siemon.com +32509 + luminis + Hans Bossenbroek + hans.bossenbroek&luminis.nl +32510 + Watchdata + zhiming liu + zhiming.liu&watchdata.com +32511 + Fountainhead Investments Ltd. + IANA Role Account + iana-role&U10R.net +32512 + Zen Soluciones + Sébastien Cramatte + scramatte&zensoluciones.com +32513 + Buerologistik GmbH & Co. KG + Joris Fischer + joris.fischer&buerologistik.de +32514 + JSC Effortel + Blinov A. Sergey + sergey.blinov&effortel.ru +32515 + Rebtel Networks AB + Mathias Adler + mathias.adler&rebtel.com +32516 + Gisdata + Tihana Cajic + tihana.cajic&gisdata.com +32517 + Accsys GmbH + Eike Gehler + eike.gehler&accsys.de +32518 + VirtuOz + Tim Chen + tech&virtuoz.com +32519 + Wireless Mundi S.L. + Enrique Cimadevila Lage + ecimadevila&wirelessmundi.com +32520 + Anglo European Aviation AG + Charles Orford + c.orford&angloeuropean.com +32521 + kontur networx + Sascha Dorow + s.dorow&kontur-networx.de +32522 + Creare Inc. + William H. Finger + whf&creare.com +32523 + ACN - Agència Catalana de Notícies + Alex Argelaguet + iana&acn.cat +32524 + Zaxia + Sylvain St-Pierre + sylvain.st.pierre&zaxia.com +32525 + Gold Lasso, Inc. + Richard Kilcoyne + pen-admin&goldlasso.com +32526 + Object Partners Inc. + Steve McCoole + admin&objectpartners.com +32527 + Yugma Inc. + Tom Hudak + thudak&yugma.com +32528 + Acquia, Inc + Ethan Fremen + ethan&acquia.com +32529 + Warped Communications, Inc. + William Coldwell + billc&warped.com +32530 + uid0 Networks + Mario Iseli + mario.iseli&finecom.ch +32531 + Nokeena Networks Inc. + Kumar Narayanan + kumarn&nokeena.com +32532 + Regis Corporation + Mark Anderson + mark.anderson®iscorp.com +32533 + Xue Can + Xue Can + xuecan&gmail.com +32534 + MB Connect Line GmbH + Siegfried Mueller + mueller&mbconnectline.de +32535 + ZT Systems + ZT Systems Contact + iana&ztsystems.com +32536 + Ergobyte Informatics + George Nikolaidis + gnikolaidis&ergobyte.gr +32537 + voidmage.net + Pascal Grün + pascal&voidmage.net +32538 + NetCare Service Co., Ltd. + Ryuji Kaneko + KanekoR&e-care3.net +32539 + Cinnober Financial Technology AB + Anders Lindgren + anders.lindgren&cinnober.com +32540 + Manlight + Alain REPINGON + alain.repingon&manlight.com +32541 + Alsion + Konstantin Stepanov + milezv&yandex.ru +32542 + SGIT (formerly 'Banco Credicoop') + Diego Barrios + diego.barrios&gmail.com +32543 + Raidbr Solucoes em Informatica LTDA + William A. Knob + william.knob&raidbr.com.br +32544 + Kentucky Department of Education + Kenneth Brakefield + netadmin&education.ky.gov +32545 + Army & Air Force Exchange Service + Tyna Porter + Porterty&aafes.com +32546 + Valcom Inc. + Bill Kowalkowski + bkowalkowski&valcom.com +32547 + SenSage, Inc + Eric Karlson + eric.karlson&sensage.com +32548 + University of North Carolina - General Administration + Steven Hopper + regmaster&northcarolina.edu +32549 + Govierno de la Provincia de Corrientes + Matias Soler + gnuler&gmail.com +32550 + COVETEL R.S: + Jesús Lara + jesuslara&covetel.com.ve +32551 + Valunex + Claudia Vanesa Ramirez + claudia.ramirez&valunex.com.ar +32552 + FreeIT + Heiko Bernloehr + hb7&gmx.net +32553 + Hopewiser Ltd + Philip Whineray + pdw&hopewiser.com +32554 + Vincent Wang + Vincent Wang + vincentwhc&yahoo.co.nz +32555 + RND.fr + Guillaume du Bourguet + gdubourguet&rnd.fr +32556 + Bruno Medici Consulting + Bruno Medici + iana&bmconseil.com +32557 + Hungarian Chamber of Civil Law Notaries (MOKK) + Zoltan Naszali + naszali&mokk.hu +32558 + Kleinhenz Elektronik GmbH + Simon Richter + Simon.Richter&kleinhenz.com +32559 + Paradigma Tecnologico S.L. + Emilio Jose Mena Cebrian + emena¶digmatecnologico.com +32560 + Tsubata Engineering Co., Ltd. + Tetsuya Nishiura + t.nishiura&aios.jp +32561 + E.D.S.I. Trend Argentina S.A. + Maximiliano Cittadini + maximilianof&trendargentina.com.ar +32562 + Ambrado Inc. + Fabrice Deyber + fabrice.deyber&ambrado.com +32563 + DRK gemeinnützige Krankenhaus GmbH Sachsen + Stephan Frenzel + frenzel.stephan&drk-chemnitz.de +32564 + Asidev s.r.l. + Giacomo Bagnoli + info&asidev.com +32565 + Neissware GmbH + Harald Neiss + iana-pen&neissware.de +32566 + I2C, Industrie Innovation Conseil + Pascale Delmas + pascale.delmas&i2c-conseil.com +32567 + Alastria Networks Limited + Peter Wood + p.wood&alastria.net +32568 + Integral + Ing. Bernhard Nickel + bernhard.nickel&integral.at +32569 + Gayux + Hinfray Romain + contact&gayux.fr +32570 + Hillyton Electronic Technology Co., Ltd. + Jiang Sanyi + support&hillyton.com +32571 + Peering Portal, Inc. + donghan, kim + khan&peeringportal.com +32572 + Ciavox + Marcio Luis Gunther + mgunther&oninet.com.br +32573 + ONI-Net C&S + Marcio Luis Gunther + mgunther&oninet.com.br +32574 + DSIT - ReimsMetropole + Hamid ZINE + hamid.zine&reimsmetropole.fr +32575 + Daniel Palffy + Daniel Palffy + dpalffy&rainstorm.org +32576 + Aydayev's Investment Business Group + Eldar Aydayev + eldar&aydayev.com +32577 + Proximetry, Inc. + Walter Buga + wbuga&proximetry.com +32578 + GeekDude.com + Chuck Hein + chein.snmp&geekdude.com +32579 + Gianluca D'Andrea + Gianluca D'Andrea + gianlucadandrea&gmx.com +32580 + HDT Hanseatische Datentechnik GmbH + Michael Zastrow + michael.zastrow&hdt.de +32581 + REMOTEK CORPORATION + hwal hu + service&jplop.twbbs.org +32582 + N2 Networks + Bradley Kennedy + bk&nn2.us +32583 + Litespan Inc. + Alex Ferdman + alex&litespan.net +32584 + DesignArt Networks + Avner Aloush + avnera&designartnetworks.com +32585 + VIGITRONIC + Pascal MICOUD + contact&vigitronic.com +32586 + Instituto Venezolano de Investigaciones Cientificas + Jesus E. Quiroz S. + quirozsjesuse&gmail.com +32587 + WiKID Systems, Inc. + William Owen + nowen&wikidsystems.com +32588 + MVLogix + Markus Veutner + mv&mvlogix.de +32589 + Effinger + Markus Effinger + domains&effinger.org +32590 + Axiom Networking + Angus Jordan + angus.jordan&axiomnetworking.ca +32591 + Deltares + Bart Jan Kelter + BartJan.Kelter&Deltares.nl +32592 + Pintu + Tom Willis + Tom.Willis&Pintu.com +32593 + Crystal Media Inc. + Hank Chang + hank&crystalmedia.com.tw +32594 + Invensys Robertshaw Industrial Products + Benjamin N. Lease + ben.lease&invensyscontrols.com +32595 + Nextgen Networks + Sherryn Bryden + sherry.bryden&nn.com.au +32596 + PLATH PROCITEC Suisse AG + Sascha Clavadetscher + iana&pps-ag.ch +32597 + Double Negative + James Braid + jamesb&dneg.com +32598 + Providence Life Services + John K. Hohm + jhohm&provinet.com +32599 + SUPEMIR + Ngah Moudjeu Roussos + moudjeu&yahoo.fr +32600 + ClearCrypt Inc. + Ramana Devarapalli + ramana&clearcrypt.com +32601 + GenCore Candeo, Ltd. + Phil Burks + genesis&genesisworld.com +32602 + Faculdade SEAMA + Lucien Rocha + lucien&seama.edu.br +32603 + DVS Solutions + Dave Brockman + dave&dvstn.com +32604 + FRC Component Products + Ahmet B Basagalar + ahmet&thebluezone.com +32605 + Structual Biology Research Center,High Energy Accelerator Research Organization,KEK + Honda,Nobuo + nhonda&post.kek.jp +32606 + Crystaline Infotech + Tushar Barhate + ulkal&crystalindia.com +32607 + Goldan + Tomasz Woźniak + goldan&hot.pl +32608 + Federal Home Loan Bank + Stephen Morrow + smorrow&fhlbdm.com +32609 + Development Consultants Incorporated + Kauser Kabealo + kauser&devconinc.com +32610 + Jilin University Information Technologies Co., Ltd. + Jilin University Information Technologies Co., Ltd. + yuankai_zhao&jit.com.cn +32611 + IMage Manipulation Systems Inc + Larry Linde + linde&imageman.com +32612 + MORISAKI TAKATOSI + MORISAKI TAKATOSI + t-morisaki&muj.biglobe.ne.jp +32613 + California State Legislature + Terrie Moon + terrie.moon&LC.CA.GOV +32614 + YOU Telecom India Pvt. Ltd. + HARINDRA AKBARI + harindra.akbari&youtelecom.com +32615 + ELPROC sp. z o.o. + Tomasz Lobejko + serwis&elproc.com.pl +32616 + OpenSourceXpress Ltd. + Jiang Xin + worldhello.net&gmail.com +32617 + Polytechnical Engineering College in Subotica + Dr. Szilveszter Pletl + pszilvi&vtssu.rs +32618 + Fiok i Wspólnicy Sp. z o.o. + Marcin Bis + pen&fiok.pl +32619 + Movik Networks, Inc. + Pramod Kalyanasundaram + pramod&movik.net +32620 + AnueSystems + Vivi Zhang + vzhang&anuesystems.com +32621 + Franklin University + Alex Kelly + certs&franklin.edu +32622 + CACE Technologies + Gerald Combs + info&cacetech.com +32623 + Kuni Automotive + David Valdez + david.valdez&kuniauto.com +32624 + Khomp Ind. e Com. Ltda + Rodolfo Leffa + rodolfo&khomp.com.br +32625 + GoPC Pty Ltd + Chris Hoy Poy + noc&gopc.net +32626 + Security Cube + Kashif Ali + kali&securitycube.net +32627 + Emageon, Inc. + Razvan Atanasiu + razvan.atanasiu&emageon.com +32628 + AXIOMTEK Co., Ltd. + Alex Pan + alexpanhl&gmail.com +32629 + DIGI Ltd + Akos TOROK + akos.torok&digi.hu +32630 + Trezorix + Sander van der Meulen + sander&trezorix.nl +32631 + LIYO Technology Co. Ltd. + liyotech + liyotech&liyotech.com.tw +32632 + Excluvior + Sascha Dobbelaere + sascha.dobbelaere&excluvior.eu +32633 + Ors TISZAY + Ors TISZAY + molyza&gmail.com +32634 + Bluetick, Inc. + Luis Rodriguez Chiappetta + lchiappetta&bluetickinc.com +32635 + Sisters of Mercy Health Systems + Jeff McDonald + jeff.mcdonald&mercy.net +32636 + ITS Electronics Inc + Gershon Deutsch + gdeutsch&itselectronics.com +32637 + Perceptech Inc. + Patrice Fournier + patrice.fournier&perceptech.ca +32638 + iStor Networks Inc. + Mark ODell + modell&istor.com +32639 + Tribunal Electoral del Poder Judicial de la Federación + Dirección de Seguridad Infomática + seguridad.informatica&te.gob.mx +32640 + NeoCatena Networks Inc. + Lukas Grunwald + rnd&neocatena.com +32641 + Security Services Framework + Harjinder Singh Takher + harjinder.takher&gmail.com +32642 + Föreningen DIS + Bo Kleve + bok&dis.se +32643 + Krakowskie e-Centrum Informatyczne JUMP + Jakub Jelen + admin&kei.pl +32644 + BWS Consultores C.A. + Victor Medina + victor.medina&bws.com.ve +32645 + Coty Inc. + Silvano Nanni + silvano_nanni&cotyinc.com +32646 + Pneumatic Tube Products Co, Inc + Brian Knittel + brian&quarterbyte.com +32647 + Federal Defender Office + Brian Brunelle + brian_brunelle&fd.org +32648 + Vortex Technology Networks + Steve McMaster + steve-iana&h4xx0r.us +32649 + Beijing Raycomm Digital Technology Co.,LTD + Zhipeng Lu + bclzchina&yahoo.com.cn +32650 + Namtrac Kft. + Csaba Toth + info&namtrac.hu +32651 + Newtec Communications GmbH + Dirk Fleischer + pen&tellitec.be +32652 + Machine VFX + Darren Hildebrand + sysadmin&franticfilms.com +32653 + Piksel Ltd. + GIS - Global Infrastructure Design + gis-gid&piksel.com +32654 + Institut für Experimentelle Kernphysik, KIT + Thomas Kuhr + admin&ekp.uni-karlsruhe.de +32655 + Classics Animated + Adrian Hill + adrian.hill&classicsanimated.com +32656 + Hypios + Praden Florian + florian.praden&hypios.com +32657 + particle dynamics GmbH + Martin Wilck + martin.wilck&particle-dynamics.de +32658 + Iconoclast IT + Fabio Origlia + fabio.origlia&iconoclast.it +32659 + SAT-TRAKT d.o.o. + Aleksandar Pejic + aleksandar.pejic&sattrakt.com +32660 + Joachim Keltsch + Joachim Keltsch + joachim&keltsch.net +32661 + MITSUBISHI ELECTRIC ENGINEERING CO.,LTD. + Tachizaki Kenji + Tachizaki.Kenji&ma.mee.co.jp +32662 + NTTME + CHIBA SHIGERU + schiba&ntt-me.co.jp +32663 + IFIN Sistemi srl a socio unico + Antonio Taschin + antonio.taschin&ifin.it +32664 + DSR Information Technologies Ltd. + József Bodnár + bodnar&dsr.hu +32665 + Veccsa S.A. + Pablo Ybalo + pybalo&veccsa.com +32666 + Integrated Healthcare Solutions + Gary Hamilton + ghamilton&ihsolutions.org +32667 + Ultrablend LLC + Spencer Berg + sberg&ultrablend.com +32668 + IPFire.org + Michael Tremer + m.s.tremer&gmail.com +32669 + consistec Engineering & Consulting GmbH + Martin Nicolay + iana&consistec.de +32670 + Consilience Software + Howard Hsieh + support&consiliencesoftware.com +32671 + Graspi + Urs Spitzer + urs&graspi.ch +32672 + betabuild.net + Steve Moore + pen.iana.org&betabuild.net +32673 + WAB Sicherheitssysteme GmbH + Joachim Klingler + klingler&wab-sicherheitssysteme.de +32674 + GeekNode + Alexandre Legrix + alex&bragon.info +32675 + Cracow University of Economics + Bartłomiej Kołodziej + kolodzib&uek.krakow.pl +32676 + Softwarehuset.DK + Morten Stigaard Laursen + morten&softwarehuset.dk +32677 + EMI Music Ltd. + Nirav Patel + nirav.patel&emimusic.com +32678 + Framestore Ltd + Stephen Willey + stephen.willey&framestore.com +32679 + Zinc Solutions + Antoine Reid + areid&zinc-solutions.com +32680 + Rentabiliweb Group + Julien Mangeard + admin&rentabiliweb.com +32681 + Atwal Inc + jaspreet atwal + jaspreet.atwal&gmail.com +32682 + Text Team GmbH & Co. KG + Thorsten Marquardt + Marquardt&text-team.de +32683 + TOKYO KEIKI INC. + Strategic Information Systems Offiece + tonet-mgr&tokyo-keiki.co.jp +32684 + Healthtrans LLC + Scott Seekamp + sseekamp&healthtrans.com +32685 + Redefine Sp. z o.o. + Łukasz Mierzwa + lukasz.mierzwa&redefine.pl +32686 + Holly Corporation + Jay Blanchard + jay.blanchard&hollycorp.com +32687 + Digital Finance SPDA + Denis Benoit + denis.benoit&digitalfinance.ca +32688 + Gigle Semiconductor + Daniel Robles + dani.robles&gigle.biz +32689 + MWJ Computing + Matthew W. Johnson + networkoperations&mwjcomputing.com +32690 + Comrex Corporation + Yeasah Pell + yeasah&comrex.com +32691 + eXMeritus Software Federal Systems, Inc + Robin Alman + support&exmeritus.com +32692 + Amcom Telecommunications Ltd + Ian Bishop + hostmaster&amnet.net.au +32693 + E-számla Zrt. + Adam Popper + adam.popper&e-szamla.hu +32694 + ComputerPride + Francis Kamuyu + kamuyu&computer-pride.co.ke +32695 + VOZPP + Andrei Nikodimov + postmaster&vozpp.vsi.ru +32696 + ZAKLAD PRODUKCYJNY TEL-KA S.C. + Piotr Zwierko + tlk&tel-ka.com.pl +32697 + ACO Serverin Ahlmann GmbH & Co. KG + Ehrhardt Petter + epetter&aco.com +32698 + SE-Elektronic GmbH + Munz, Claudia + munz&se-elektronic.de +32699 + Thales Alenia Space France + Herve Dubosq + herve.dubosq&thalesaleniaspace.com +32700 + INBUSS Informatikai Szolgáltató és Kereskedelmi Kft. + PÁLFALVI Tamás + tamas.palfalvi&inbuss.hu +32701 + Loyalty Partner GmbH + Dominik Pekarek + dominik.pekarek&loyaltypartner.com +32702 + Joint Stock Company "NPO RusBITech" + Vladimir Podobaev + rusbitech_cct&mail.ru +32703 + Centre de Recherche Henri Tudor - SANTEC + Uwe Roth + uwe.roth&tudor.lu +32704 + JSC ErshovTelecom + Stanislav Scherbakov + stok&artkamneva.ru +32705 + dSys e.K. + Andreas Becker + andreas.becker&dsys.de +32706 + Reech Capital Ltd + Alessandro Evangelista + alessandro.evangelista&sungard.com +32707 + Pi Consulting (UK) Limited + Christopher Henson + cth&piconsulting.co.uk +32708 + Dimaco Systems S.R.L. + Vassilios Zafiropoulos + vassilios.zafiropoulos&dimacosystems.com +32709 + AirTies + Beri Levi + beri&airties.com +32710 + Lojas Renner S.A. + Claiton Alexandre Vieira + claiton&lojasrenner.com.br +32711 + fides AG + Thomas Koch + koch&fides.ag +32712 + Linden Lab + Paul Heffner + heff&lindenlab.com +32713 + Flox-arts.net + Florent MONTHEL + noc&flox-arts.net +32714 + IPS MeteoStar + Steven Packard + spackard&meteostar.com +32715 + IBM Managed Security Services + Marc Mamane + mmamane&us.ibm.com +32716 + Alea Soluciones SLL + Eduardo Ferro Aldama + eferro&alea-soluciones.com +32717 + Gemintek Corporation + Jesse Huang + jesse.huang&mail.gemintek.com.tw +32718 + Southwestern University of Finance and Economics + Song Xiaohui + sxh&swufe.edu.cn +32719 + 3Nokta Bilisim Teknolojileri Ltd. + E.Kaan Adanali + yardim&3nokta.net +32720 + CVR Transport Kft + Katalin Gallwitz + katalin.gallwitz&cvrtransport.hu +32721 + RTPHYS + Erik Roelofs + admin&rtphys.net +32722 + MAASTRO + Erik Roelofs + erik.roelofs&maastro.nl +32723 + Pacemaker Cluster Project + Andrew Beekhof + andrew&beekhof.net +32724 + AIRDATEC TECNICOS, S.L. + LUIS MIGUEL PEDRAZA + lm.pedraza&airdatec.es +32725 + Trans Iceland + Anna Jonna Armannsdottir + annajonna&gmail.com +32726 + Tatis + Christhonie Geldenhuys + christhonie.geldenhuys&tatis.com +32727 + Webtech as + Jan Helge Salvesen + Jan.Helge.Salvesen&webtech.no +32728 + Grid Dynamics Consulting Services, Inc + Edward Tregubov + techsupport&griddynamics.com +32729 + EMS Satcom + Cheryl Hyslop + hyslop.c&emssatcom.com +32730 + Mobile For You - M4U + Eduardo Paredes + eduardo.paredes&m4u.com.br +32731 + obviously-nice + Heinz-M. Graesing + heinz-m&web.de +32732 + W.K. Kellogg Foundation + Technology Support + TechSupport&wkkf.org +32733 + Phybridge Inc + Oliver Emmanuel + oliver.emmanuel&phybridge.com +32734 + City of Seattle + John Jacobson + John.Jacobson&seattle.gov +32735 + SecurityNet.cz s.r.o. + Arne Rusek + hukot&hukot.cz +32736 + SHD System-Haus-Dresden GmbH + Steffen Langer + steffen.langer&shd-online.de +32737 + Meghnaben Daxeshbhai Patel + Meghnaben Daxeshbhai Patel + meghna_vapi&yahoo.com +32738 + Delta Nusantara Networks Co., Ltd. + Dedy Deng + dedy&delta.net.id +32739 + Saimaan Lehtipaino Oy + Timo Pelkonen + timo.pelkonen&sanoma.fi +32740 + Lehtikanta Oy + Timo Pelkonen + timo.pelkonen&sanoma.fi +32741 + Savon Paino Oy + Timo Pelkonen + timo.pelkonen&sanoma.fi +32742 + Hämeen Paino Oy + Timo Pelkonen + timo.pelkonen&sanoma.fi +32743 + Sanomala Oy + Timo Pelkonen + timo.pelkonen&sanoma.fi +32744 + KAMEKO Bt + Mester József + mesterjoco&gmail.com +32745 + DURAG data systems + Dipl.-Ing. Holger Frantz + holger.frantz&durag-data.de +32746 + Instituto de Biología, Universidad Nacional Autónoma de México + Sergio Luis Chavarria Cisneros + schavarria&ibiologia.unam.mx +32747 + telenet AG Rhein-Main + Joachim Majunke + info-ipi&telenet-ag.de +32748 + KolosStudio + Kolos Andrew + sirkonst&kolosstudio.ru +32749 + TrustMission + Raymond de Bernis + rdb&trustmission.com +32750 + LigoWave + Steven Ho + it&ligowave.com +32751 + Solytron + Vladimir Ivanov + Vladimir_Ivanov&solytron.bg +32752 + John Will Motors + Paul Murphy + paul&johnwill.co.uk +32753 + Omniture Inc. + Network Operations Center + noc&omniture.com +32754 + Dexcel Electronics Designs Pvt Ltd + Amit Sinha + amit.sinha&dexceldesigns.com +32755 + Wesabe, Inc + Sam Quigley + sq&wesabe.com +32756 + Creative Solutions Laboratories Sp. z o.o. + Borys Stanczew + Borys.Stanczew&CSLab.com.pl +32757 + ChartConnect Inc. + Josh Plunkett + josh&chartconnect.com +32758 + ANAGKY BVBA + Willem Verbruggen + willem.verbruggen&anagky.be +32759 + PRIM'X Technologies + BINET SERGE + serge.binet&primx.eu +32760 + Koliada + Guy McIlroy + guy&koliada.com +32761 + Deliberant + Matt Hardy + mhardy&deliberant.com +32762 + AKSolutions + Hal Asbridge + halasbridge&aksolutions.com +32763 + YMAGIS + Olivier Lemaire + olivier.lemaire&ymagis.com +32764 + inTechnology PLC + Dale Douglas + dale.douglas&intechnology.com +32765 + WebHost Asia Pacific Pty Ltd + Peter Bos + peter.bos&webhostap.com +32766 + Capricode + Vesa Järvitalo + hostmaster&capricode.com +32767 + JPPA Gerenciamento e Projetos S/S LTDA. + Leonardo Agustini + lda&jppa.com.br +32768 + Educational Service Unit 6 + Administrator + vexie&esu6.org +32769 + BIZICLE + Stefan Pukallus + spukallus&yahoo.de +32770 + Hoedl-Online-Systemtechnik + Friedrich Hoedl + systemtechnik&hoedl-online.de +32771 + Fujitsu Tohoku Systems Ltd. + Kazuo Takahashi + tohoku-ipwatcher&cs.jp.fujitsu.com +32772 + Declera Ltd. + Alek Paunov + alek&declera.com +32773 + Jeremy Self + Jeremy Self + finder83&gmail.com +32774 + wei fang lesheng electronic CO.,LTD + sun rui jun + srj_01&sina.com +32775 + Gigaset Communications + John Spence + Spence.John&gigaset.com +32776 + Harry J.E Day + Harry J.E Day + harry&dayfamilyweb.com +32777 + BowBridge Software + Joerg Schneider-Simon + joerg.schneider-simon&bowbridge.net +32778 + Raiffeisen Bank Hungary + Attila Klapper + attila.klapper&raiffeisen.hu +32779 + BBS AS + Jan Helge Salvesen + Jan.Helge.Salvesen&bbs.no +32780 + synergetic AG + Andreas Kippnick + a.kippnick&synergetic.ag +32781 + Groupe Chevalier + Julien Dufourcq + hostmaster&chevaliergestion.fr +32782 + Conseil Général du Morbihan + Deredec Nicolas + nicolas.deredec&cg56.fr +32783 + ECRIN SYSTEMS + Bruno BERUARD + bruno.beruard&ecrin.com +32784 + Amplia Soluciones S.L. + Javier Martinez + javier.martinez&lia.es +32785 + PWI Consultants + Pierre Wieser + itadmin&wieser.fr +32786 + MasonRay Networks + Tim Ray + tim.ray&masonray.com +32787 + Aprend Technology + Robert Alvarez + ralvarez&aprendtech.com +32788 + Ennoris Trans + Evgenij Kravtsov + ejkrav&gmail.com +32789 + Institute of Economics, Management and Law + Timur Batyrshin + batyrshin&ieml.ru +32790 + Raz-Lee Security + Eli Spitz + eli.spitz&razlee.com +32791 + ICSMED AG + Joerg Riesmeier + icsmed-iana&riesmeier.de +32792 + Atex Group Ltd. + Keith Judson + keith.judson&atex.com +32793 + Shenick Network Systems + Kevin Glavin + kevin.glavin&shenick.com +32794 + LuTong Optoelectronic Technology Co.,Ltd + Samlee + Samleever&163.com +32795 + Westnet Ptd Ltd + Jay Turner + mta.admin&staff.westnet.com.au +32796 + i-STS Manufactuiring + Robert Heezeman + roberth&staticpower.com.au +32797 + Asempra Technologies + Parimal Puranik + iana&asempra.com +32798 + Netlinx, Inc. + Patrick H. Piper + ppiper&netlinxinc.com +32799 + www.thingall.com + Bing Zhou + zhoub1631&163.com +32800 + www.neuq.edu.cn + Bing Zhou + zhoub1631&163.com +32801 + ObjectFX + Robert Streich + robert.streich&objectfx.com +32802 + Inuk Networks + Aled Treharne + aled.treharne&inuknetworks.com +32803 + Research Institute for Linguistics, Hungarian Academy of Sciences + Csaba Oravecz + oravecz&nytud.hu +32804 + Unify Software and Solutions GmbH & Co. + Andrew Hutton + andrew.hutton&unify.com +32805 + CR Host + Jose Romeiro Filho + romeiro&romeiro.info +32806 + scientia.net + Christoph Anton Mitterer + calestyo&scientia.net +32807 + TVACE + Sang Lyoul, Lee + lyoul&tvace.co.kr +32808 + SynTech Soc. Coop. a R.L. + Franco Marchesini + franco.marchesini&gmail.com +32809 + Evalesc GmbH + Thomas Rohde + rohde&evalesc.de +32810 + Lancashire Constabulary + Chris Ayres + chris.ayres&lancashire.pnn.police.uk +32811 + Gemnet bv + Marcel Nijenhof + m.nijenhof&kpn.com +32812 + ICFO + Gonçal Badenes + goncal.badenes&icfo.es +32813 + GXPCONSULT LIMITED + Raj K. Bains + pen&gxpconsult.eu +32814 + RealTime7 Inc. + Dwight Kulkarni + dwight&realtime-7.com +32815 + Consert, Inc + Bill Boswell + bboswell&consert.com +32816 + AREYOUNET.COM + Yann Zinenberg + yzinenberg&areyounet.com +32817 + Shifted Labs + Jake Douglas + jakecdouglas&gmail.com +32818 + Tecton Limited + Andrew Wood + woody&tecton.co.uk +32819 + Infonet Network Systems + Anishkumar Kurup + anish&infonet.co.in +32820 + Mundo Linux + Vitor Rentes Pimentel + vitor_rentes_pimentel&hotmail.com +32821 + martin mollet informatik + Martin Mollet + software&mollet.ch +32822 + PGS + Paul Stenius + pauls&cs.lamar.edu +32823 + Hextra Digital, S.L.L. + Xavier Rubio Jansana + xrubio&hextra-digital.com +32824 + Free Open Source Solutions Inc. + Karoly Molnar + kmolnar&eseri.net +32825 + LS cable + lee joo sam + leejusam&lscable.com +32826 + Tongji University + Lu Hai + lh&tongji.edu.cn +32827 + Siemens I MO TS + Guenther Sing + guenther.sing&siemens.com +32828 + Guntermann & Drunck GmbH Systementwicklung + Dirk Stieler + snmp-contact&gdsys.de +32829 + Marc Cain GmbH + Steffen Hessing + steffen.hessing&marc-cain.de +32830 + Questora Software, CJSC + Edward Tregubov + techsupport&griddynamics.com +32831 + IDS + Harald Härtl + snmpadmin&ids.de +32832 + Optech Fibres Limited + Jason Mellors + support&optechsupport.co.uk +32833 + Université Lille 2 Droit et Santé + François Périchon + francois.perichon&univ-lille2.fr +32834 + ANTALIS + Steve Houle + steve.houle&antalis.com +32835 + Data Driven Logistics Limited + Gary Aston + gary.aston&datadrivenlogistics.com +32836 + C2SAT communications AB + Andreas Nilsson + andreas.nilsson&c2sat.se +32837 + Lohmann GmbH&Co.KG + Edgar Ehrhardt + Edgar.Ehrhardt&lohmann-tapes.com +32838 + University of AJK CS&IT Campus Mirpur azad Kashmir + Usman Ali + 4usmanali&gmail.com +32839 + olea medical + Faycal Djeridane + faycal.djeridane&olea-medical.com +32840 + Krum Independent School District + Greg Tappert + technology&krumisd.net +32841 + Constant Contact + Jason Parsons + iana&constantcontact.com +32842 + Seidl KEG + Rainer Seidl + rs&sibris.com +32843 + Information Computing Center of Ministery of Communications and Information Technologies + Araz Mirzayev + oid-admin&rabita.az +32844 + die-rudolphs + Michael Rudolph + michael.rudolph&die-rudolphs.de +32845 + Metric Systems Corporation + John Clark + jclark&metricsystems.com +32846 + Cougaar Software, Inc. + William Drew + wdrew&cougaarsoftware.com +32847 + DataStress + Mario Huizinga + snmp&datastress.com +32848 + Arquila Limited + David Ross + david&arquila.com +32849 + Active Broadband Neworks Inc + Adam Dunstan + adam&a-bb.net +32850 + C4i Pty Ltd + Darren Armstrong + darren.armstrong&c4i.com +32851 + Aytel + Adrian Penny + adrian&aytel.co.uk +32852 + Towerstream + Chris Manigan + cmanigan&towerstream.com +32853 + Pixel8 Networks + Michael Lin + mlin&pixel8networks.com +32854 + OmniGlobe Networks, Inc. + Zhixue Liu + aliu&omniglobe.com +32855 + Total Transaction Management, LLC + Benson Lim + benson.lim&ttmsolutions.com +32856 + JL Ingram & Associates CC T/A The Hardware Junction + Johnathan Ingram + jingram&hardwarejunction.co.za +32857 + Openware + Jan Rozema + jan&openware.nl +32858 + nerdnet.ca + Colin R Hahn + colin.hahn&sasktel.net +32859 + Escuela Politécnica Nacional + Diego Rocha + diegojavier95&hotmail.com +32860 + Expert Systems (Pvt.) Ltd. + Mubbashir Ahmad + mubbashir&expertsystems.net +32861 + Shenzhen LinkPower Network Systems Co.,LTD. + William Lou + williamloucn&gmail.com +32862 + Böning IT-Service + Jörn Peter Böning + dspx&gmx.net +32863 + OmniTI Labs + Theo Schlossnagle + jesus&omniti.com +32864 + Workflow.com, LLC + Packy Hyland + oid&workflow.com +32865 + Heol Design + Philippe Irrien + pirrien&heoldesign.com +32866 + rockyluke + Antoine MILLET + antoine.millet&gmail.com +32867 + University of Economics Prague + Petr Hofmann + petr.hofmann&gmail.com +32868 + Diamante Technology Advisors, Inc. + Ken Savela + ksavela&diamanteconsulting.com +32869 + Gravitas MM Ltd + Dariush Marsh-Mossadeghi + dariush&gravitas.co.uk +32870 + Fujian Sunnada Communication Co., Ltd + yun chen + 12310cy&163.com +32871 + BUPT Incowin Information Communication Technology Institute Ltd. + LiuYong + liuyong&incowin.com.cn +32872 + Royal Alberts Broadcasting Network + Subu Ayyagari + subu.ayyagari&gmail.com +32873 + KONE Corporation + Jani Hautakorpi + jani.hautakorpi&kone.com +32874 + Novatium Solutions Private Limited + Venu Gopala Raju Kanumuri + venu&novatium.com +32875 + vedicis + Francois-Frederic Ozog + ff&vedicis.com +32876 + Marian Fotul + Marian Fotul + oid.admin&fotul.sk +32877 + Radio Amateurs Against Packet Radio + Vesa Tervo + keymaster&grim.raapr.org +32878 + MediaPod SARL + Pascal JARDE + iana&mediapod.net +32879 + Eugene 4J School District + Troy Knabe + knabe&4j.lane.edu +32880 + Knerer & Lang Architekten GmbH + Alvaro Aguilera + alvaro.aguilera&herbstwest.de +32881 + BeeMobile + Mark Lever + lever&beemobileusa.com +32882 + GameAccount Global Ltd + Sasedharen Chinnathambi + schinnathambi&gameaccount.com +32883 + One23 Designs + Mark Lever + lever&one23.org +32884 + Chemnitzer Verlag und Druck GmbH & Co. KG + Peter Nawroth + peter.nawroth&freiepresse.de +32885 + H-Net AG + Marcel Kleiber + marcel.kleiber&h-net.ch +32886 + Restorepoint (formerly 'TADASoft Ltd') + Riccardo Valente + riccardo&restorepoint.com +32887 + Joseph King ICT Consultants + Yusuf Rajah + yusuf&josephking.it +32888 + Bizsensors + Soumadeep Sen + soumadeep&bizsensors.com +32889 + The College at Brockport State University of New York + Kevin Colagio + kcolagio&brockport.edu +32890 + Bigred Solutions Ltd + Stephen Solberg + stephensolberg&bigredsolutions.com +32891 + Jangwhan Kim + Jangwhan Kim + kim.jangwhan&gmail.com +32892 + caplog-x GmbH + Christian Regner + christian.regner&caplog-x.de +32893 + BJ's Deisgn & Consulting, Inc. + Colenso W Price + cw&bjsdesign.com +32894 + Touro College + Yitzchak Schaffer + yitzchas&touro.edu +32895 + BRICOM Technology Co., Ltd. + Yi Huaixun + yihuaixun&sina.com +32896 + Jarn AS + Laurence Rowe + lrowe&jarn.com +32897 + Megware Computer GmbH + Robert Hommel + robert.hommel&megware.de +32898 + Spectrum Health + Patrick J. O'Hare + informationsecurity&spectrum-health.org +32899 + Quist Ltd. + David D'Acquisto + iana&quist.ch +32900 + Dimensional Insight + Stan Zanarotti + srz&dimins.com +32901 + SAGI-B Expert Group Sp. z o.o. + Konrad Sagala + konrad.sagala&sagi-b.pl +32902 + Verizon + Christopher M. Gonzalez + Chris.Gonzalez&verizon.com +32903 + Lincoln Laboratory + Edward Kuczynski + edk&ll.mit.edu +32904 + NUMLOG + Francis GASCHET + fg&numlog.fr +32905 + Cognito Ltd + Mr A Potter + andy.potter&cognitomobile.com +32906 + Rorotika Technologies + Hugo Mokken + pen-iana&rorotika.com +32907 + NPO RTC, Ltd. + Sergey Koupreyenko + ksw&npo-rtc.ru +32908 + ADVENAGE GmbH + Dr. Lutz Grüneberg + lutz.grueneberg&advenage.com +32909 + Kinek Technologies Inc. + Michael Hartery + michael.hartery&kinek.com +32910 + Impeq Technologies BV + Stefan van Zoggel + s.van.zoggel&impeq.com +32911 + Brian Raaen Network Consulting + Brian Raaen + iana-pen&brianraaen.com +32912 + Duck Creek Technologies, Inc + Johnny Ramirez + networkservices&duckcreektech.com +32913 + Electrodata Recorders Pty Ltd + Sunil Hargunani + sunilh&electrodata.com.au +32914 + Pharmacy Chain 36.6 + Alexeychenko Mikhail + mikhail.alexeychenko&gmail.com +32915 + Edinburgh's Telford College + John Yeaman + pen&ed-coll.ac.uk +32916 + Lane Community College + Thad Cowdin + cowdint&lanecc.edu +32917 + GT Apps Limited + Radim Marek + radim&laststation.net +32918 + Institut Sainte Catherine + Hénoch Hervé + h.henoch&isc84.org +32919 + Barking Iguana + Craig R Webster + craig&barkingiguana.com +32920 + ETSA + Laurence Le Floc'h + llefloch&etsa.fr +32921 + Orbis Technology Ltd + Andre Esser + it-notice&OrbisUK.com +32922 + Verathon Inc + Samir Budhdeo + samir.budhdeo&verathon.com +32923 + Alastair Carr + Alastair Carr + alastairrcarr&tiscali.co.uk +32924 + Menturagroup Ltd + Andreas Heckwolf + andreas.heckwolf&menturagroup.com +32925 + Treck Inc. + Peter Carney + pcarney&treck.com +32926 + WebOnyx + Jared Laprise + jared&webonyx.com +32927 + Unlabeled Artists Group, LLC + Chris Osborn + domain.admin&unlabeled.com +32928 + BrightHouse + Dean Robinson + dean.robinson&brighthouse.co.uk +32929 + Squarewave Computing + Robert Niederreiter + office&squarewave.at +32930 + Wolfgang Karall EDV Consulting and Training + Wolfgang Karall + office&karall-edv.at +32931 + TOTEC AMENITY LIMITED (formerly 'easynet, inc.') + Kazuhito Aono + Kazuhito_Aono&totec.co.jp +32932 + Direct Telecom Ltd + Andrey Nikolayev + nikolayev&directtel.ru +32933 + Conduct AS + Lars Preben Sorsdahl + lars.preben&conduct.no +32934 + Institute of Biophysics of the CAS, v.v.i. + Jan Kovarik + jkovarik&ibp.cz +32935 + Amalto Technologies + Bruno Grieder + bruno.grieder&amalto.com +32936 + Atelier Decora + Jannis Jacobsen + drift&decora.no +32937 + uma information technology GmbH + Daniel Doegl + daniel.doegl&uma.at +32938 + Server Racks Australia + Gordon Campbell + support&server-racks-australia.com.au +32939 + Hochschule Hannover + Jürgen Rosemeyer + support-it&hs-hannover.de +32940 + NCC AB + Henrik Liljeqvist + henrik.liljeqvist&ncc.se +32941 + Azet.sk, a.s. + Ivan Debnár + debnar&firma.azet.sk +32942 + Voltage Security + Robert Slifka + rob.slifka&voltage.com +32943 + HEXIOS + Tom Casaer + tom.casaer&hexios.be +32944 + drs4drs + Robert Pollard + rpollard&drs4drs.com +32945 + Norsk eSport DA + Einar S. Idsø + einar.oid&norsk-esport.no +32946 + Manuel Meistrowitz + Manuel Meistrowitz + manuel_meistrowitz&web.de +32947 + Nuclemed S.A. + Gustavo Coscia + gcoscia&nuclemed.com.ar +32948 + UniFirst Corporation + Brian Doiron + bdoiron&unifirst.com +32949 + SunWater + Bill Holder + bill.holder&sunwater.com.au +32950 + Meucci Solutions + Lieven De Bontridder + lieven.de.bontridder&meucci-solutions.com +32951 + Wellseeing Communication Technology Co.,Ltd + Neill Wang + xingye_wang&126.com +32952 + PheeNet Technology Corp. + Ms. Eileen Wei + eileen&pheenet.com.tw +32953 + YACOUB Automatiom GmbH + Peter Schuetz + ps&yat.de +32954 + Italiaonline S.p.A. + Massimo Meregalli + massimo.meregalli&italiaonline.it +32955 + Hrvatske sume d.o.o + Ratko Pacadi + ratko.pacadi&hrsume.hr +32956 + ipct.net + Michael Linke + info&ipct.net +32957 + IN Switch Solutions + Sebastian Bello + sebastian.bello&inswitch.us +32958 + YMMV LLP + Andy Brown + snmp&ymmv.co.uk +32959 + Audit Bureau - Qatar + Mahmoud Albatarni + mahmoud&albatarni.com +32960 + Ringsted Kommune + Tobias Moelgaard Laursen + tom&ringsted.dk +32961 + KBC Consumer Finance + Radoslaw L. Zak + radoslaw.zak&zagiel.com.pl +32962 + Beijing Gefei Tech. Co., Ltd + Guo Xiaoxia + gxx&cbvt.com +32963 + Taekjin Solutions + Taekjin Jun + taekjin&mgame.co.jp +32964 + Brand Up LLC + Robert Goodyear + domain-admin&brand-up.com +32965 + REAL Solutions S.A. + Eric NOEL + eric.noel&real.lu +32966 + eSpida Limited + Paul Hanson + Paul.Hanson&espida.co.uk +32967 + Centro de Tecnologia da Informação Renato Archer + Alexandre de Almeida Duarte + alexandre.duarte&cti.gov.br +32968 + HELiX Software + Support GmbH + Raimund Hölle + raimund.hoelle&helix.de +32969 + Odin TeleSystems Inc + Hanz Johansson + hanz&odints.com +32970 + Caltha - Krzewski, Mach, Potempski Sp. J. + Rafał Krzewski + Rafal.Krzewski&caltha.pl +32971 + MUZICALL + Benjamin Kohler + sysadmin&muzicall.com +32972 + TransWorks, Inc. + Greg Rhoades + greg.rhoades&trnswrks.com +32973 + POP.PL - Internetowe Systemy Sieciowe + Slawomir Bem + admin&pop.pl +32974 + Stimulus Software + Jamie Band + jamie&stimulussoft.com +32975 + CircleSoft Llc + Gerard J. Cerchio + gjpc&circlesoft.com +32976 + HyC Américas + René Fuentes Riquelme + rene.fuentes&hyctv.com +32977 + TELoIP + Network Administrator + netadmin&teloip.com +32978 + IntelliDOT Corporation + Brandon Gilmore + bgilmore&intellidotcorp.com +32979 + XIX + Markus Juenemann + markus&juenemann.net +32980 + Hitachi Systems Engineering Services, Ltd. (formerly 'Hitachi Systems Engineering and Solutions, Ltd.') + Yasumasa Ichioka + wiseaudit.rm&ml.hitachi-systems.com +32981 + Arcadyan Technology Corporation + Thomas Lee; Tom Teng + tom_teng&arcadyan.com +32982 + Trends Telematics BV + Theo Belder + development&trends.nl +32983 + Open Computing Solutions + Todd Shadburn + tshadb&opencomputingsolutions.com +32984 + Advantage Telecom + Fedor A. Kosilov + fkosilov&adtel.ru +32985 + Philips Electronics (Israel) LTD - Philips HealthCare + Amir Rosenbloom + amir.rosenbloom&philips.com +32986 + Kommunik8 Inc. + Brad Gawne + board&kommunik8.com +32987 + Barrowa + Kelvin Carman + kelvin.carman&barrowa.com +32988 + Dott. Giulimondi Gabriele + Gabriele Giulimondi + gabriele&giulimondi.it +32989 + in-volv + Gintaras Vaira + gintaras.vaira&in-volv.com +32990 + Prahm IT-Systemdienstleistungen + Jens Prahm + j.prahm&prahm-it.de +32991 + dix.cz + Tomas Hruby + th&dix.cz +32992 + ladava.com + Matjaz Ladava + matjaz&ladava.com +32993 + OpenSAF Foundation + Henry Turko + henry&erin-services.com +32994 + Standingstone + Simon Woods + simon&standingstone.de +32995 + Vendio Services, Inc. + Lorin Scraba + netops&corp.vendio.com +32996 + GOURMET NAVIGATOR INCORPORATED + KIMURA Hiroshi + registry-tec&gnavi.co.jp +32997 + Tuijnman Professional Services + Daniel Tuijnman + daniel&velotype.nl +32998 + Shanghai DareGlobal Technologies Co.,Ltd. + Yuguang, Yang + yangyuguang&dare-tech.com +32999 + Air Transportation Advancement Program + Jeong, Woo-Cheol + wcjeong&hist.co.kr +33000 + Zeon Corporation + Shixian Ma + sxmabj&hotmail.com +33001 + Micro Talk Systems Corp. + Eda Ryuichiro + eda&greentag.to +33002 + Open End AB + Anders Hammarquist + iko&openend.se +33003 + SHENZHEN GONGJIN ELECTRONICS CO.,LTD + xiaojian luo + luoxiaojian&twsz.com +33004 + Universal Scientific Industrial (Shanghai) Co., Ltd + yun zhu + yun_zhu&usish.com +33005 + Artio Oy + Urho Tamminen + info-fi&artio.net +33006 + MGB-Tech B.V.B.A. + Bruno De Maesschalck + bdm&mgb-tech.com +33007 + Schoenhofer Sales And Engineering GmbH + Dirk Fuks + dirk.fuks&schoenhofer.de +33008 + TV 2 + Robert Uglehus + rug&tv2.no +33009 + Stadtverwaltung Mannheim + Certificate Support + cert&mannheim.de +33010 + IRB Barcelona + Francisco Lozano + its&irbbarcelona.org +33011 + digital performance + Stephan Scholz + info&dp4.de +33012 + FASTFOOD-SERVICE LTD. + Dmitry Demianov + barlone&yandex.ru +33013 + NexAira, Inc. + Michael Manulis + mmanulis&nexaira.com +33014 + Network Zen + Glen Zorn + gwz&net-zen.net +33015 + Emilie Myers and Associates + Julian Myers + julian&emiliemyers.com +33016 + Ajuntament de Barcelona + M Mercedes Mestre Antoli + mmestre&bcn.cat +33017 + Mobito Inc + Ryan Humiya + services&mobitocorp.com +33018 + Pilin-Echtar.NET + JF MASSARD + jf.massard&gmail.com +33019 + Huntsville Hospital + Ryan Petraszewsky + ryanp&hhsys.org +33020 + Rebel 2000 Limited + Colin Gaunt + colin.gaunt&rebel2000.com +33021 + WFG PARTICIPAÇÕES S.A. + CARLOS ARCE + carce&xpg.com.br +33022 + Simon & Stolle GbR + Frank Stolle + f.stolle&hosting-agency.de +33023 + Eoos Technologies GmbH + Norbert Niederhuebner + nn&eoos-technologies.com +33024 + Lenovo Chinaweal System & Service Co.,Ltd + wangquanbo + wangqb&lenovo-cw.com +33025 + Atlanta Advanced Communications Holdings Limited + peter.xu + xq&axelwave.com.cn +33026 + Positive Advisory S.A. + Artur Kulinski + akulinski&positiveadvisory.pl +33027 + CAJA DE AHORROS DE GALICIA + Manuel Duran + manueld&caixagalicia.es +33028 + Prassel S.r.l. + Nicola Murino + n.murino&prassel.it +33029 + CJSC “Borisoglebsk Communication System" + Smirnoff Sergey + eto&bss.vrn.ru +33030 + Timico Ltd + Paul Sherratt + networks&timico.net +33031 + C.G.C.,a.s. + Roland Prochazka + prochazkar&cgc.sk +33032 + CTP GmbH + Martin Obermair + m.obermair&ctp-gmbh.com +33033 + Probability PLC + Marcus Gustavsson + admins&probability.co.uk +33034 + Taleo Inc. + Martin Tremblay + noc&taleo.com +33035 + InGenius Software + Ralph Curtis + Ralph&InGenius.com +33036 + A.ö. Krankenhaus der Elisabethinen Linz + IKT Abteilung + it&elisabethinen.or.at +33037 + Broadcast International + J. Dean Brederson + dean.brederson&brin.com +33038 + Fluor Corporation + Darren Albers + Darren.Albers&fluor.com +33039 + Adapt4 LLC + Bob Lawless + support&adapt4.com +33040 + BeatleNet Ltd. + Uri Mozer + uri&beatlenet.com +33041 + ZEIT Verlag Gerd Bucerius GmbH & CO KG + Ingo Paschke + ingo.paschke&zeit.de +33042 + bitrausch + Markus Ketterl + info&bitrausch.net +33043 + Shared Autonomous sYstems + Morten Hermanrud + mhe&say.no +33044 + Mantica Solutions S.L. + David Ramirez + david.ramirez&mantica-solutions.com +33045 + JinPeng + Wang Yong + wangyong.net&gmail.com +33046 + NEXTWERK IT-Services GmbH + Christopher Banck + Christopher.Banck&nextwerk.de +33047 + Freifunk Potsdam e.V. + Kai Sommer + iana&freifunk-potsdam.de +33048 + Brand + Hans-Juergen Brand + pen&brand-online.info +33049 + Mellanox Technologies LTD + Sagi Rotem + sagir&mellanox.co.il +33050 + China IWNCOMM Co., Ltd. + Zheng Li + iad&iwncomm.com +33051 + flykernel + Mike Dong + mikedong&flykernel.com +33052 + Dhyan Infotech Inc., + Sai Krishnan + sai&dhyanit.com +33053 + Vialis bv. + Ferry de Bruin + ferry.de.bruin&vialis.nl +33054 + iPLON GmbH The Infranet Company + Thomas Kurz + kurz&iplon.de +33055 + Van Dijk Educatie BV + Hessel Smid + hessel.smid&vandijk.nl +33056 + Cambridge Research Systems Ltd. + Don Jackson + don.jackson&crsltd.com +33057 + CRISOL DE FRUTOS SECOS S.A.T + Jordi Tormo + jtormo&crisolfs.com +33058 + Stratus Telecommunications + Arun Dharankar + Arun.Dharankar&StratusTelecom.Com +33059 + MOBITRUM + Ray Wang + ray_wang&mobitrum.com +33060 + International Computer Science Institute + David Johnson + iana&icsi.berkeley.edu +33061 + University of Shanghai for Science & Technology + Wang Jia Lin + realeric327&hotmail.com +33062 + Netvision Telecom Inc. + Han Young Song + hysong&netvisiontel.co.kr +33063 + DVN Technology Limited + Chaplin J.F. Chen + chaplinchen&dvnholdings.com +33064 + A2B Electronics AB + Emil Ljungdahl + emil.ljungdahl&a2b.se +33065 + CertiCon a.s. + Kamil Kantar + kamil.kantar&certicon.cz +33066 + SmartSynch, Inc. + Derek Gibbs + dgibbs&smartsynch.com +33067 + Lake Cumberland District Health Department + Robert M Campbell + robertm.campbell&ky.gov +33068 + F2Ware Inc. + Helen Pai + helen&f2ware.com +33069 + Grieshaber Logistik AG + Marius Titz + mtitz&grieshaberlog.com +33070 + Alkaloid Networks LLC + Ben Klang + ben&alkaloid.net +33071 + BBMS AS + Erlend Ringstad + tech&bbms.no +33072 + subsist GmbH + Jens Graefe + jens.graefe&subsist.de +33073 + System Analysis and Information Technologies Conference + Mykhailo Makukha + sait&sait.org.ua +33074 + Oryx Mail Systems GmbH + Arnt Gulbrandsen + arnt&oryx.com +33075 + linux systeme thomas + Jens Thomas + info&linux-systeme-thomas.de +33076 + SHAROPS SASU + RAKOTOMALALA Renaud + iana&iopsthecloud.com +33077 + InformSvyazStroi, Ltd. + Aleksei Miheev + amiheev&st-host.ru +33078 + Ganymeade Systems + Dan Schaper + dschaper&ganymeade.com +33079 + EmblaCom Oy + Niclas Svahnström + niclas.svahnstrom&emblacom.com +33080 + Drexler Hard- und Software e.K. + Kai Edinger + admin&drexler-software.de +33081 + KOGAN + Kevin Cadogan + kevin.cadogan&bigpond.com +33082 + zozs.se + Linus Karlsson + iana.admin&zozs.se +33083 + Seafreeze Acquisition LLC, + Allen Williams + allenwilliams&seafreeze.com +33084 + Box and Dice Software Pty Ltd + Support + support&boxdice.com.au +33085 + Mueller Ltd. & Co. KG + Franziska Seibold + franziska.seibold&mueller.de +33086 + ims Info Management System AG + Oskar Persano + ope&ims-info.ch +33087 + International School of Stuttgart e. V. + Juergen Eichenbrenner + support&issev.de +33088 + Versatile Knowledge System + Karol Depka Pradzinski + karolrvn&verknowsys.info +33089 + Fachhochschule für öffentliche Verwaltung NRW + Martin Wolf + martin.wolf&fhoev.nrw.de +33090 + xxlboy + Florin Buzec + florin.buzec&yahoo.com +33091 + TCRP + Carlos Visser + carlos_visser&tcrpmail.com +33092 + Opsource + John Hedden + jhedden&opsource.net +33093 + LexisNexis RIAG + Philip Schwartz + philip.schwartz&lexisnexis.com +33094 + Netactive Systems Ltd + Gary R Smith + gary.smith&netactive.co.uk +33095 + HyTrust, Inc. + Hemma Prafullchandra + hemma&hytrust.com +33096 + Hard To Port Productions + Stephanie Carlyle + stephanie&hardtoportproductions.com +33097 + Adligo Inc + Scott Morgan + scott&adligo.com +33098 + Gradient Effects + Kelly Bergougnoux + systems&gradientfx.com +33099 + Rudra Nevatia + Rudra Nevatia + admin&rudranevatia.com +33100 + ELITE Sistemas + Jose Luis Martin Peinado + elite&elitegrupo.com +33101 + Danube Data Center GmbH + Roland Felnhofer + admin.network&danubedc.com +33102 + Engelsburg Gymniasum + Jannes Jeising + j.jeising&webkassel.de +33103 + EmisFR + Stephane BERTHELOT + sberthelot&emisfr.com +33104 + KyaPanel + Anahuac de Paula Gil + anahuac&anahuac.biz +33105 + D-ns + Anthony Bousselier + admin&d-ns.fr +33106 + Softbooking + Alexandre Georges + alexandre.georges&softbooking.com +33107 + Meetup, Inc. + Qing Shou + qing&meetup.com +33108 + Nexen Services, Alter Way Hosting + LDAP Administrator + ldapadmin&nexenservices.com +33109 + Cumquat Information Technology BV + Jan Vissers + Jan.Vissers&cumquat.nl +33110 + ITD Network SA + Cvetelin Petkov + cpetkov&itdnet.net +33111 + dickicht.org GbR, Ragnar Nevries & Robert Waltemath + Ragnar Nevries + iana-pen&dickicht.org +33112 + Vidient Systems, Inc. + Jon Cook + joncook&vidient.com +33113 + FXhome + Toby Walsh + pen.malone&fxhome.com +33114 + Maricopa County Community College District + Norm Dye + n.dye&domail.maricopa.edu +33115 + Shamir Systems Ltd. + Ofer Dar + ofer&shamir-sys.co.il +33116 + Solvo, Ltd. + Andrew L. Shwaika + als&solvo.ru +33117 + Linuxense Information Systems Pvt. Ltd. + Rajkumar Sukumaran + raj&linuxense.com +33118 + Freescale Semiconductor + Sen Li + sen.li&freescale.com +33119 + AltaStream Controls Inc + Miroslav Ristic + mristic&altastream.com +33120 + eZ Systems + Maik Seyring + it&ez.no +33121 + Tadcom AB + Simon Percivall + simon&tadcomab.se +33122 + Vestmark, Inc. + William Porter + wporter&vestmark.com +33123 + ProTel Communications Ltd + Ravin Dimantha + ravin&protelnet.com +33124 + ASKing Co., Ltd. + Akiko Ohtsuki + iana-pen&asking.co.jp +33125 + Precise Power, Inc. + Daniel Baileys + dbaileys&precisepower.com +33126 + DataSoft Corp. + Luke Ritchie + luke.ritchie&datasoft.com +33127 + Dialog und Medien Agentur der ACS mbH + Carsten Kollmeier + kollmeier&dialog-medien.net +33128 + Semperian Capital Management + Ingmar Hupp + ingmar.hupp&semperian.co.uk +33129 + ENTEREST GmbH + Stefan Deigmüller + stefan.deigmueller&enterest.com +33130 + Virtual Instruments Corporation + Ryan Perkowski + ryanp&virtualinstruments.com +33131 + Drexel University + Paul Keenan + pen-mgr&drexel.edu +33132 + Kaiserpfalz EDV-Service + Roland Thomas Lichti + rlichti&kaiserpfalz-edv.de +33133 + Daedalus Software, Inc. + Stefano Santoro + SSantoro&DaedalusSoftware.com +33134 + LS ELECTRIC Co.,Ltd. + Jinwook Hong + jwhonga&ls-electric.com +33135 + PeakSystems + Alexander Krasnov + a_krasnov&peaksystems-software.com +33136 + ELPRO VIDEOLABS srl + Renzo Parile + r_d&elprovideolabs.com +33137 + JIGAR + JIGAR BULCHANDANI + jigarbulchandani&ymail.com +33138 + TENA SDA + Bhagat Bandlamudi + bandlamudib&tena-sda.org +33139 + 5gbit.de + Andre Brandt + webmaster&5gbit.de +33140 + theBside + Kirk Bridger + kbridger&shaw.ca +33141 + e-Rank Internetdiensten + Rens Admiraal + rens&e-rank.nl +33142 + Manuel Kobashigawa + Manuel Kobashigawa + mk.sysnet&yahoo.com +33143 + Clipsal Australia Pty Ltd + Tim Bates + tim.bates&clipsal.com.au +33144 + Kitten Wranglers Unlimited + Rina Carman + rinacarman&gmail.com +33145 + Digivision Entertainment Private Limited + Parvez Ahmad Rishi + parvez.rishi&smarttv.co.in +33146 + Stiftung Nikolauspflege + Mateusz Kochmann + mateusz.kochmann&nikolauspflege.de +33147 + Olivier BONHOMME + Olivier BONHOMME + obonhomme&nerim.net +33148 + British Columbia Wireless Network Society + Matthew Asham + matthewa&bcwireless.net +33149 + Defining Technology, Inc. + Bruce Olsen + bruce.olsen&definingtech.com +33150 + silmarilli.eu + Ludovic Dupont + silmarilli.eu.oid&gmail.com +33151 + CAIL Technologies + Jim Walker + jim_walker80&yahoo.com +33152 + ASIC North, Inc. + Mike Laramie + mike.laramie&asicnorth.com +33153 + TeleDNA Communications Pvt. Ltd. + Jitendra Patil + jitupatil&teledna.com +33154 + NPO Impuls + Alexei Lystsev + impuls_ntc&mail.ru +33155 + Code Jawa + Trio Syamsul Benny + tech_support&codejawa.com +33156 + Monsternett AS + Helge Milde + helge&monsternett.no +33157 + fqdn dot Ro + Daniel Crisan + crisandaniel&gmail.com +33158 + ak obs, ltd. + Igor Artemov + info&omobus.ru +33159 + AGS INFORMATIQUE + Hamed AZOUAOU + h.azouaou&agsinformatique.fr +33160 + The Invariant Autocracy + Peter M Gerdes + hostmaster&invariant.org +33161 + www.libvoip.com + Cullen Jennings + fluffy&cisco.com +33162 + DCS Internet Pty Limited + Benjamin Wakefield + ben&team.dcsi.net.au +33163 + Gundersen Lutheran + David P. Mezera + dpmezera&gundluth.org +33164 + datahouse AG + Daniel Meister + daniel.meister&datahouse.ch +33165 + Minds + Machines + Jothan Frakes + jothan&mindsandmachines.com +33166 + Fiasko Software + Joakim Soya + jsoya&fiaskosoftware.com +33167 + Trusted Logic + Erwan David + Erwan.David&trusted-logic.com +33168 + mLife Sp. z o.o. + Andrzej Trawiński + andrzej.trawinski&mlife.pl +33169 + Via-Vox Limited + shaun gibson + shaun.gibson&via-vox.net +33170 + International Financial Data Services (Canada) Ltd. + Sarah Nordstrom + snordstrom&ifdsgroup.com +33171 + HELIKA, a.s. + Petr Kralik + petr.kralik&helika.cz +33172 + NetClean Technologies Sweden AB + Bengt Gördén + bengt.gorden&netclean.com +33173 + Annidis Health Systems Corp + Rob Andrews + roba&annidis.com +33174 + COMM-connect A/S + Steen Moeller + steen.moeller&comm-connect.com +33175 + ZIEHL industrie-elektronik GmbH+Co KG + Uwe Ziehl + u.ziehl&ziehl.de +33176 + Rambla + Wout Decré + wout&rambla.be +33177 + Radiation Oncology Victoria + Philip Yarra + admin&radoncvic.com.au +33178 + Jilin University + Hongmei Yu + hmyu&jlu.edu.cn +33179 + omod + Dan White + whitehse&gmail.com +33180 + PayLife Bank GmbH + Martin Steinbichler (Systemtechnik) + iana&paylife.at +33181 + LASELEC S.A. + Olivier LARRIGAUDIERE + olivier.larrigaudiere&laselec.com +33182 + Advanced Network Engineering Consultants Limited + Christopher J Knight + anecltd&gmail.com +33183 + SystemsPromAutomatic + Anatoly Danilov + ermolaew&bk.ru +33184 + AsiaRF Ltd. + Tzong Der Lai + sales&asiarf.com +33185 + jps networks + James Shaw + iana&jpsnetworks.co.uk +33186 + Data Robotics, Inc + Rod Harrison + rod&drobo.com +33187 + Commission de la santé et de la sécurité du travail (CSST) + Francis Bugeaud + francis.bugeaud&csst.qc.ca +33188 + DANS – Data Archiving and Networked Services + Henk van den Berg + henk.van.den.berg&dans.knaw.nl +33189 + Droplet Technology Inc + Arun Mahajan + arun&droplet-tech.com +33190 + Petroleum Development Oman LLC + Sueliman Al-Harthy + suleiman&pdo.co.om +33191 + Equine Technology Pte Ltd + Dr Peter Lim Boon-Lum + peterlim09&equinetech.net +33192 + University of Žilina + Ján Janech + jan.janech&fri.uniza.sk +33193 + Powercom Consultants Pty Ltd + Nick Sonneveld + nick&powercomgroup.com +33194 + Klarna Bank AB (formerly 'Kreditor Europe AB') + Mustafa Çetinkaya + iam&klarna.com +33195 + Bimash LLP + Beibut Yerzhanov + beibut.yerzhanov&bimash.kz +33196 + VITA + John Rynearson + techdir&vita.com +33197 + WIT + M. DUFFAU Georges + georges.duffau&wit.fr +33198 + SISCA + Samuel littierre + Samuel.littierre&sis-ca.fr +33199 + "TERRATEL" LLC + Aleksandr Sobkovych + oleksandr.sobkovych&terratel.eu +33200 + Gelber Group LLC + A. Blake Cooper + bcooper&gelbergroup.com +33201 + 3F Elettronica s.n.c. + Federigi Dino + 3felettronica&3felettronica.com +33202 + ASUMO CO., LTD. + Yamagishi Makoto + yamagishi_m&asumo-inc.com +33203 + SensiMesh Pte Ltd + Wu Zheng + zheng.wu&sensimesh.com +33204 + Sprillion Technologies + Shivaram Upadhyayula + snm&sprillion.com +33205 + Elman srl + Pierluigi Fiori + p.fiori&elmansrl.it +33206 + Aqueouslife + Richard Graham + richard&aqueouslife.co.uk +33207 + CEJIL - Center for Justice and International Law + Mariano Absatz + baby&baby.com.ar +33208 + AMB Consulting SARL + Alexandre Berge + alexandre.berge&amb-consulting.com +33209 + Dich Networks Co. + Andrew Wan + andrew&dich.com.tw +33210 + Qvantel + Malyadri Beegala + malyadri.beegala&qvantel.com +33211 + Westminster College + Troy Gerber + tgerber&westminstercollege.edu +33212 + Rötzer Engineering + Torsten Schmidt + schmto&hrz.tu-chemnitz.de +33213 + Moeller GmbH + Arnd Stein + arnd.stein&moeller.net +33214 + IPBFR + Michel Pallard + pallard&ipb.fr +33215 + Swisscanto Assetmanagement AG + Stefan Liechti + stefan.liechti&swisscanto.ch +33216 + Dr. August Oetker Nahrungsmittel KG + Maik Wegnar + mwegnar&oetker.de +33217 + accessec GmbH + Sebastian Rohr + rohr&accessec.com +33218 + Polar Circle AS + Kim A. Betti + post&polar-circle.no +33219 + Causata Ltd + John Graham-Cumming + jgc&causata.com +33220 + Netfonds Bank ASA + Bjørn Nordbø + drift&netfonds.no +33221 + Insumo Systems + Steve Gibson + sgibson&insumo.com.au +33222 + INECOIEC + Omar Poch + omar&inecoiec.com.ar +33223 + Aero-Info Technologies Co.,Ltd. + Geng Wang + wanggeng&ait.cn +33224 + CJSC Rosta + Alexey Lychagin + alex&zaorosta.ru +33225 + Obelux Oy + Mr. Reijo Järnstedt + reijo.jarnstedt&obelux.fi +33226 + Inline Internet Online Dienste GmbH + Ansgar Sonntag + sonntag&inline.de +33227 + PromiNet s.r.o + Ing. Marek Zidek + marek.zidek&prominet.sk +33228 + AareNet AG + Daniel Jaggi + daniel.jaggi&aarenet.com +33229 + COMUNICI GmbH + Sebastian Mauer + s.mauer&comunici.com +33230 + Blueloop Ltd + Jon Gerdes + gerdesj&blueloop.net +33231 + AvL Technologies + Scott Bakken + sbakken&avltech.com +33232 + stderr.nl + Matthijs Kooijman + matthijs&stdin.nl +33233 + ALERT Life Sciences Computing, S.A. + Luis Costa + luis.costa&alert.pt +33234 + Halon Security + Jonas Falck + jonas.falck&halonsecurity.com +33235 + DataHack + Erik Lax + root&datahack.se +33236 + AMTRON GmbH + Hans-Dieter Gehlen + registration&amtron.net +33237 + Authernative, Inc. + Dipankar Das + dipankar.das&authernative.com +33238 + ReachLocal, Inc. + System Operations + sysops&reachlocal.com +33239 + Active Control Technology Inc. + Peter Baranowski + pbaranowski&activecontrol.com +33240 + Salamander Technologies, Inc. + Rob Jones + rjones&salamandertechnologies.com +33241 + CITIZEN SYSTEMS JAPAN CO., LTD. + Masaji Iwata + engineering-div&systems.citizen.co.jp +33242 + PanTek + Federico Sanchez + fedesp&gmail.com +33243 + China Broadband Wireless IP Standard Group + Liu Wei + bwips&chinabwips.org +33244 + William Demant Holding + Niklas Lagersson + nil&oticon.dk +33245 + kaf.cz + Antonin Faltynek + tonda&kaf.cz +33246 + UAB Pivot Capital Management + Saulius Bauzinskas + itservice&pivotcapital.com +33247 + Daton Securities Co., Ltd. + Yuchanghong + ychxhy&gmail.com +33248 + Radix Development Corporation CC + Andre Liebenberg + andre&radix.co.za +33249 + BI@Work S.r.l. + Mauro Aiuto + mauro.aiuto&biatwork.com +33250 + SmurfitKappa News press + Andrew Magill + andrew.magill&smurfitkappa.ie +33251 + REASON TECNOLOGIA S.A. + Conrado Seibel + conrado.seibel&reason.com.br +33252 + Flashlight Engineering & Consulting + David Hoots + dlhoots&flec.tv +33253 + MiS Inc. + Ian McMaster + ian.mcmaster&gmail.com +33254 + TimeData Corporation + Terry Martin + tmartin&timedatacorp.com +33255 + Kentucky Christian University + Gregory C. Richardson + grichardson&kcu.edu +33256 + Small Office Networks + Gena Gulchin + gena&sonetworks.com +33257 + Dienste fuer Menschen gGmbH + Hardtmann Stephan + shardtmann&abakus.de +33258 + dzhon pty. ltd. (formerly 'mosais pty. ltd.') + Rob Gillan + rob&dzhon.com +33259 + Martin DiViaio + Martin DiViaio + martindiv&yahoo.com +33260 + Triescom Co., Ltd. + Katsuhito Ozawa + katsuhito.ozawa&triescom.co.jp +33261 + Resilans AB + Bengt Gördén + bengan&resilans.se +33262 + Jäger Computergesteuerte Messtechnik GmbH + Thomas Böhne + TBoehne&ADwin.de +33263 + Raiffeisen Information Service KonsGmbH + Reinhold Trocker + ris.ais&ris.bz.it +33264 + EPI Service Ltd + Marek Stuczynski + mareks&epi-uk.com +33265 + Robert Rhea Photography + Robert Rhea + robert.rhea&robertrheaphotography.com +33266 + ISOMEDIA, Inc. + Stephen Milton + milton&isomedia.com +33267 + The SPACEPOL Corporation + Mrs. Chantal Gagnon + spacepol&spacepol.ca +33268 + ClearTech Ltda + Infraestrutura e Tecnologia + ite&cleartech.com.br +33269 + Ecessa corporation + Qui Huynh + qui&ecessa.com +33270 + Sysmap Solutions + Daniel Andrade + daniel.silva&sysmap.com.br +33271 + Baptist Care (SA) Inc + James Newport + jnewport&baptistcaresa.org.au +33272 + Med-RT, LLC + John Faltys + jfaltys&med-rt.com +33273 + Gomez & Associates + Antonio F. Gomez + afgcala&gmail.com +33274 + Nestle Waters North America + Ben Herman + dnsadmin&perriergroup.com +33275 + The Hutchins School + Ian MacRae + ian.macrae&hutchins.tas.edu.au +33276 + KDN (Korea Electric Power Data Network) + Jung Ho. Ryu + jhryu&kdn.com +33277 + Pfiffner Gruppe + IT Support + it&pfiffner.com +33278 + Confident Instruments, Inc. + Gilbert Baker + baker&confident-instruments.com +33279 + Mazioli LTDA-ME + Gleydson Mazioli da Silva + gleydson&mazioli.com +33280 + OWFS -- One Wire Filesystem + Paul H Alfille + paul.alfille&gmail.com +33281 + New Dream Network + Jeremy Kitchen + jeremyk&newdream.net +33282 + Eastern Oregon University + Timothy Willey + twilley&eou.edu +33283 + AN-D.cz + Adam Nemcek + adam.nemcek&an-d.cz +33284 + Macquarie Group Limited + Adam Brimo + adam.brimo&macquarie.com +33285 + NavStar Geomatics Ltd + Glen Bjorgan + glen.bjorgan&navstar.com +33286 + KIPAC + Stuart Marshall + marshall&slac.stanford.edu +33287 + Epix LLC + Bogdan Ćulibrk + hostmaster&epix.rs +33288 + GEVAS software GmbH + Thilo Schoen + software&gevas.de +33289 + Metalor + Jerome Copin + adminmaster&metalor.com +33290 + Bundesverfassungsgericht + Oswald Plaikner + it-reg&bundesverfassungsgericht.de +33291 + Microvideo Ltd + Dan Brennan + dbrennanµvideo.co.uk +33292 + Fordham University + Bruce Portz + bportz&fordham.edu +33293 + Jenkins Shipping Company Ltd + hans van der Meyden + hans&vanmeyden.co.uk +33294 + Lincor Solutions Ltd + Yannick Servel + yannick.servel&lincor.com +33295 + Georg Utz, Inc. + Thomas Schwank + thomas.schwank&us.georgutz.com +33296 + LepomisLab, LLC + Vlad Ershov + vershov&lepomislab.com +33297 + Arion Systems Pvt. Ltd. + Swapneel Kore + swapneel&arionsys.net +33298 + Wikimedia Foundation, Inc. + Mark Bergsma + mark&wikimedia.org +33299 + Ithos USA + Michael O'Mara + omara.michael&verizon.net +33300 + Linkare TI + José Pedro Pereira + jpereira&linkare.com +33301 + Mee IT Solutions Co.,Ltd. + Jaruwat Boonmee + jaruwat&meeit.co.th +33302 + Trio Datacom + Richard Gipps + rgipps&netspace.net.au +33303 + Dannatu AG + Juergen Sprenger + juergen.sprenger&dannatu.ch +33304 + EudoxeSoft SARL + Nicolas Delisle + ndelisle.iana&eudoxesoft.com +33305 + Ark Computing + Alexios Chouchoulas + alexios-ark&bedroomlan.org +33306 + Telemaque + Tristan Mahé + tristan&telemaque.fr +33307 + St. Jude Children's Research Hospital + Will Schmied + OIDAdmin&stjude.org +33308 + DataCode srl + Massimo Lusetti + massimo&datacode.it +33309 + Aochuang Electronic Equipment Co.,Ltd. + Luo Jun + luojun&live.com +33310 + ZMC Elektronika d.o.o. + Viktor Varga + viktor.varga&zmc.rs +33311 + Everest Software International + Alexander Pastuhov + apastuhov&hotmail.com +33312 + Forenet, Inc. + Neco Fang + necofang&forenet.cn +33313 + SoeKul, LLC + Luke Stamm + iana&soekul.com +33314 + Servicio Nacional de Rehabilitación + Javier Badaracco + javier&snr.gov.ar +33315 + Max-Planck-Institut fuer Informatik + Dirk Raufer + ldap-adm&mpi-inf.mpg.de +33316 + Government of Saskatchewan + Myles Sartor + myles.sartor&gov.sk.ca +33317 + INCITS Technical Committee T11 + Claudio DeSanti + cds&cisco.com +33318 + AIDS Law Project + Brian Honermann + honermannb&alp.org.za +33319 + Technische Universität Kaiserslautern + Heiko Krupp + heiko.krupp&rhrk.uni-kl.de +33320 + Skarvon + Alexander Krasnov + Krasnov&serviceline.ru +33321 + Tradesmen International Inc + Charles Bushong + domains&tradesmeninternational.com +33322 + Progress Financial + David Bryson + dbryson&progressfin.com +33323 + Chaos Eternal Network + Charles Etherme + chaoseternal&gmail.com +33324 + Istochnik Ltd. + Marat N. Faizrakhmanov + maratnf&bk.ru +33325 + MSB Micro Systems + Danny Stemmet + support&msbmicro.com +33326 + PERTUS + Piotr Bylinski + p.bylinski&pertus.pl +33327 + ZIP Sistemas S.L. + Ferran Lax + f.lax&zipsl.com +33328 + NetArt Piotr Nowak + Grzegorz Piszczek + hostmaster&netart.pl +33329 + Encode Networks Svenska AB + Björn Lantz + bjorn.lantz&encode.se +33330 + Relevantum Oy + Vesa Keinänen + vesa.keinanen&relevantum.fi +33331 + Balance Network Co. ,Ltd. + Changyu Yang + ycy034&163.com +33332 + City of Edmonton + Shirley Fenton + shirley.fenton&edmonton.ca +33333 + Morningstar Corporation + IT Support + software&morningstarcorp.com +33334 + STEALTHbits Technologies, Inc. + Phil Burgard + phil.burgard&stealthbits.com +33335 + TrueOffice + Dmitry Shurupov + Dmitry.Shurupov&trueoffice.ru +33336 + Kat5Design + Ron McLeod + ron.mcleod&kat5design.com +33337 + elineis + pierre-mael Cretinon + com.iana&elineis.com +33338 + Comkom GmbH + Christian Dergovics + cdergo&comkom.at +33339 + The Charity Bus + Andy Ryan + thecharitybus&googlemail.com +33340 + Fundacio Privada Universitat Abat Oliba CEU + Responsable de sistemas + iana&uao.es +33341 + Beijing TongFang Gigamega Tech. co., Ltd. + Zhujun Qin + qinzhj82&gmail.com +33342 + u-blox AG + Mike Burgener + mike.burgener&u-blox.com +33343 + Storebrand ASA + IAM Admin + iamadmin&storebrand.no +33344 + SC Garant Industries SRL + Neacsu Marian + industries.garant&gmail.com +33345 + Branded Payment Solutions + David Kelly + itsupport&bpscards.com +33346 + Conseil Général des Pyrénées Atlantiques + BODET Thomas + thomas.bodet&cg64.fr +33347 + Papa John's International, Inc + Christopher Rodman + christopher_rodman&papajohns.com +33348 + Voidnet.biz + James Horner + humankind135&gmail.com +33349 + Mohawk College of Applied Arts and Technology + Jeff Howard + jeff.howard&mohawkcollege.ca +33350 + GALLACCI COMMUNICATIONS + Gabriele Gallacci + mailing&gallacci.com +33351 + Hospital Service Association of Northeastern Pennsylvania + Robert Budd + robert.budd&bcnepa.com +33352 + MAX Technologies + Yvon Belec + ybelec&videotron.ca +33353 + 2Ring + Jirka Novak + jiri.novak&2ring.com +33354 + Kaasjager Consultancy + Alexander Kaasjager + kalex&kalex.nl +33355 + Ozeki Informatics Ltd. + Mr. Gyula Rábai + gyula.rabai&ozeki.hu +33356 + HEADJAPAN Co.,Ltd. + Enzo Fukura + contact_mib&mx.headjapan.com +33357 + TWINSEC GmbH + Thomas Kriener + thomas.kriener&twinsec.de +33358 + ApplianSys Ltd + John Robson + support&appliansys.com +33359 + VUT GmbH + Julian Backes + j.backes&vutonline.de +33360 + Kahaf (pvt) Ltd + Muhammad Shoaib + shoaib&kahaf.com +33361 + Cursum + R A Hazeleger + robert.hazeleger&cursum.nl +33362 + DataSchenk, Inc. + Jeffrey Schenk + Jeffrey.Schenk&DataSchenk.com +33363 + Duff & Phelps, LLC. + Scott Goodwin + scott.goodwin&duffandphelps.com +33364 + ginkgotek + huo ju + huoju&ginkgotek.com +33365 + Edelbluth Engineering + Juergen Edelbluth + juergen.edelbluth&edelbluth.eu +33366 + Columbia Data Products, Inc + Lance Hudson + lhudson&cdp.com +33367 + VIRTUALMGS + Myles Sartor + msartor&virtualmgs.com +33368 + VTRON TECHNOLOGIES LTD. + Zhentao Liu + liuzhentao&vtron.com +33369 + Sino-telecom Technology Co.,Ltd + Wan Renyong + wanry&sino-telecom.com +33370 + Erasmus MC Rotterdam + Maarten Bijl + m.a.bijl&erasmusmc.nl +33371 + Kelkoo + Eric VEYRET + eric.veyret&kelkoo.com +33372 + Infocare AS + Jan Terje Andersen + jan.terje.andersen&infocare.no +33373 + ZeroPoint.it + M. Shoaib Ahmed + sysadmin&zeropoint.it +33374 + SFProjects + Sebastian Fillinger + sebastian&fillinger.de +33375 + Dalee LLC + Andrey Nikolaev + andrey.nikolaev&dalee.ru +33376 + Argeon Limited + Balázs András Dohányos + dohanyos.balazs&argeon.hu +33377 + Paul Armstrong + Paul Armstrong + iana&otoh.org +33378 + Zetta, inc + Theral Mackey + tmackey&zetta.net +33379 + KLabs + Inseok Choi + ischoi&klabs.re.kr +33380 + ICS Computer Services SA + Gordan Vosicki + admin&icsa.ch +33381 + DVS Digital Video Systems AG + Konstantin Schinas + schinas&dvs.de +33382 + chillyweb + Niklas Schulze + ns&hardware-area.de +33383 + Audisoft Technologies + Jean-Pierre Harvey + jpharvey&audisoft.net +33384 + eSaturnus + Dong Hoon Van Uytsel + donghoon.vanuytsel&esaturnus.com +33385 + Feeva Technology Inc. + Don Gilletti + don.gilletti&feeva.com +33386 + Hyper9 + Ryan Kruse + rkruse&hyper9.com +33387 + SS7 Solutions + Martin Quevedo + martin.quevedo&ss7solutions.com +33388 + InCASE - Sven Mueller + Sven Mueller + iana-pen-registration&incase.de +33389 + Keyboard Monkeys Ltd. + Reiner Jung + reiner&kb-m.com +33390 + Cormedica + Marcelo O. Rodriguez + cormedica&cormedica.com.ar +33391 + OSI d.o.o. + Rudi Ponikvar + rudi.ponikvar&osi.si +33392 + Source Allies, Inc + Scott Peshak + scott&sourceallies.com +33393 + C-NET Information Technology LTD. + POOR, Laszlo + poor.laszlo&cnet.hu +33394 + Mio Software Laboratory Inc. + Naoki Nishiyama + naoki&miolab.com +33395 + Washington State Consolidated Technology Services + Scott Barringer + CTSDLPKIAdmin&watech.wa.gov +33396 + Ferpa Consultoria e Sistemas Ltda. + Fernando Hackbart + fernando&ferpaconsultoria.com.br +33397 + CreAPPtive Limited + Ian Lee + ian.lee&legend-engg.com +33398 + Traffic Management Technologies + Gerhard Lamprecht + glamprecht&tmtservices.co.za +33399 + Egyption Root certification authority + Dr. Sherif Hazem Nour el-din + snoureldin&mcit.gov.eg +33400 + Anywire Corporation + Makoto Yamashita + yamashita&anywire.jp +33401 + Servera, Inc. + Kevin Shih + kevin.shih&servera-inc.com +33402 + Quadrizen + Stephen E. Halpin + oid-wn5rw5&quadrizen.com +33403 + Radiocomp ApS + Anne Christensen + ach&radiocomp.com +33404 + Dipl.-Ing. Christian Lindemann + Christian Lindemann + ldm_admin&arcor.de +33405 + EISST Limited + Corrado Ronchi + cronchi&eisst.com +33406 + Keller Williams Realty Intl + Ben Mayfield + sysadmin&kw.com +33407 + I-Evolve + Justin Elze + jelze&i-evolve.com +33408 + BASIS International Ltd. + Jerry Karasz + jkaraszpen&basis.cloud +33409 + The Chronicle + Kenneth Moir + LicenseManagement&chronicle.com +33410 + FETAC + Giovanni Zaidan + gzaidan&fetac.ie +33411 + QNETIC LTD. + Tom Gleason + tom.gleason&qnetic.com +33412 + Kootenai Electric Cooperative, Inc. + Art Malin + malina&kec.com +33413 + ADSLWEB-dot-Net + Pieter de Rijk + domreg&adslweb.net +33414 + Monitis Inc. + Hovhannes Avoyan + havoyan&monitis.com +33415 + N-Com, L.P. + Brad Landis + blandis&ncom.com +33416 + wondersgroup + zhang fengchang + alain&wondersgroup.com +33417 + PiN GmbH + Kurt Harders + harders&pin-gmbh.com +33418 + Rila Feinkost-Importe GmbH und Co. KG + Kurt Harders + k.harders&rila.de +33419 + CNStreaming Technologies Co., Ltd + Li Jie + jie.li&cnstreaming.com +33420 + Voalte, Inc + Oscar Callejas + o.callejas&voalte.com +33421 + South Gloucestershire Council CYP ICT + Nick Pearce + Nick&sgcyp.org.uk +33422 + Com-Ned Netwerken B.V. + Ronald Huizer + ronald&comned.com +33423 + Safran Passenger Innovations Germany GmbH + Andreas Wolf + de.engineering&zii.aero +33424 + ITXTEND LLC + Raghunathan KV + raghukv&itxtend.com +33425 + Fortisbank NL NV + Steven Geerts + steven.geerts&nl.fortis.com +33426 + Mirae Ikorn company + Sai Thi Hien Ninh + sthninh&mikorn.com +33427 + 5V Technologies Ltd. + Hsu Chih-Ming + mein_hsu&5vtechnologies.com +33428 + TRAKCE, a.s. + Bretislav Otruba, Ing. + otruba&trakceol.cz +33429 + Mobile Experts sp. z o.o. + Adam Wos + adam.wos&mobileexperts.pl +33430 + Libra Szoftver Zrt + Mihaly Kepler + mkepler&mve.hu +33431 + Oberfinanzdirektion Karlsruhe + Robert Foerderer + robert.foerderer&ofdka.bwl.de +33432 + MAHLE International GmbH + Michael Zerrer + michael.zerrer&mahle.com +33433 + Interwetten + Otahal Helmut + it-admin&interwetten.com +33434 + SDC Sistemi di Computer + Federico Scaramuzza + pen-iana&essedici.it +33435 + dicas digital image coding GmbH + Matthias Wegener + wegener&dicas.de +33436 + Lepida spa + Maurizio Coppari + maurizio.coppari&lepida.it +33437 + Heidelberg Engineering GmbH + Michael Reutter + mreutter&heidelbergengineering.com +33438 + RIPN + Mikhail Vsevolodov + vsev&ripn.net +33439 + Centralx + Marcos Moreira + mm¢ralx.com.br +33440 + Centro Brasileiro de Pesquisa e Tecnologia em Informática Médica + Marcos Moreira + mm&cpqt.com.br +33441 + Science Systems and Applications, Inc. + Owen Steinert + admin&ssaihq.com +33442 + Bernards + Jason Wise + supportdesk&bernards.com +33443 + Novus Entertainment Inc. + Norio Sakuta + norio.sakuta&novusnow.ca +33444 + obtelecom + xuxin + xuershao&hotmail.com +33445 + The London Library + Will David + will.david&londonlibrary.co.uk +33446 + Relay Station Ltd + Alexander Fisher + alex.fisher&relaystation.co.uk +33447 + Comtech Ltd. + Balázs Bagó + dev&comtech.co.hu +33448 + Chemtura Corporation + Michael Baker + michael.baker&chemtura.com +33449 + Time2Dive + michael koch + michael&time2dive.at +33450 + Facultad de Ingenieria Informatica, CUJAE + Bislandry Vejo + bvejo&ceis.cujae.edu.cu +33451 + Infomed + Maykel Moya + moya&infomed.sld.cu +33452 + Comsenz.Ltd + Joanna Zhang + zhangjuan&comsenz.com +33453 + Polylogics Consulting, Inc. + Rod Dorman + rodd&polylogics.com +33454 + Nippon Sogo Systems, Inc. + Kazuhiro Seo + k-seo&nssys.co.jp +33455 + BVE Solutions + Boris Eschenko + redhat&ngs.ru +33456 + DCF Technologies + Uri Margalit + uri&dcftech.com +33457 + DANET.CZ s.r.o. + Roman DAVID + rdavid&danet.cz +33458 + Colégio Santo Agostinho + César Augusto Simões Goudouris + cesar&csa.com.br +33459 + Vivisimo, Inc. + Chris Gillin + gillin&vivisimo.com +33460 + //SEIBERT/MEDIA GmbH + Torsten Rehn + trehn&seibert-media.net +33461 + DS Media Labs, Inc. + Glenn R. Martin + iana&dsmedialabs.com +33462 + Zmac1, Inc. + Dan Maus + noc.oid&zmac1.com +33463 + Limelight Networks, Inc. + Wylie Swanson + wylie&llnw.com +33464 + Leo Lab + Leo Wang + xfwang0724&hotmail.com +33465 + Unlimited Bandwidth LLC + Jim Albanese + jalbanese&ubllc.com +33466 + Kadeo Pty Ltd + Shaun Deans + shaun&kadeo.com.au +33467 + Cyprotex + David Roe + d.roe&cyprotex.com +33468 + Future Skies + Charles Hull + charles.hull&future-skies.com +33469 + Produban Servicios Informaticos Generales + David Manchado + dmanchado&produban.com +33470 + Vista Systems, Corp. + Derek Smithson + derek.smithson&vistasystems.net +33471 + Alekstra Oy + Jani Piitulainen + pen&alekstra.com +33472 + Sittig Industrie-Elektronik GmbH & Co. KG + Michael Sittig + michael.sittig&sittig.de +33473 + Unassigned + Returned 2017-01-20 + ---none--- +33474 + Regex + M. van Buytene + martijn®ex.nl +33475 + Botswana Power Corporation + Hanno van der Maas + hanno&bpc.bw +33476 + Kittelberger media solutions GmbH + René Gerisch + sysadmin&kittelberger.de +33477 + Sommer Mess-Systemtechnik + Bernd Herstelle + iana&sommer.at +33478 + aserva GmbH + Alexander Scheibner + alexander.scheibner&aserva.de +33479 + TradingScreen Inc + Jon Andrews + jon.andrews&tradingscreen.com +33480 + Canadian Automobile Association, South Central Ontario + Alex Savitsky + asav&caasco.ca +33481 + Manufacturing Resources International + John Schuch + jschuch&mri-inc.net +33482 + ManageIQ, Inc. + Oleg Barenboim + oleg.barenboim&manageiq.com +33483 + Goldflower Incorporated + Dr. Charles Cavanaugh + cdc&goldflowerinc.com +33484 + Initech s.r.o. + Petr Antonov + development&initech.cz +33485 + Caribou Software + Jeremy Phillippe + jeremy.phillippe&caribousoftware.com +33486 + Silk Information Systems, Inc. + Brandon Bates + info&silkisi.com +33487 + Packet Ship Technologies Limited + Paul Clark + paul&packetship.com +33488 + MoaningMarmot + Emmanuel Blot + emmanuel.blot&free.fr +33489 + NERO Network + Stephen Fromm + stephenf&nero.net +33490 + GeneGO Inc. + Maxim Kuznetsov + max&genego.com +33491 + Malviny International + Jiri "Malviny" Maly + malviny&post.cz +33492 + Broadex Technologies, Shanghai + Zhu Hangtian + hangtian.zhu&broadex-tech.com +33493 + Loadcom Technology Shenzhen Co., Ltd. + wangdadong + dolphine51&163.com +33494 + Sekoci Maju Jaya + Kunto Harjadji Baiquni + kbaiquni&esemje.com +33495 + AXPO INFORMATIK AG + Matthias Gysin + Matthias.Gysin&axpo.ch +33496 + Spurious Logic Technology Services + Ian Tarasevitsch + Ian.Tarasevitsch&gmail.com +33497 + Vahni Solutions + Cariappa Bollera Appaiah + cariappa.ba&vahnisolutions.com +33498 + Andrew Kipcharskiy + Andrew Kipcharskiy + avkipcharskiy&mail.ru +33499 + KPN HotSpots + Jan Haitjema + jan&kpnhotspots.com +33500 + Vertex Antennentechnik GmbH + SNMP Administrator + snmp&vertexant.de +33501 + Novatice Technologies + Erwan HAMON + tech&novatice.com +33502 + Comunidad Autonoma Region de Murcia + Tomas Sanchez Sandoval + tomas.sanchez&carm.es +33503 + Heavens-Above GmbH + Chirstopher Peat + chris.peat&heavens-above.com +33504 + Narodni technicka knihovna + Jakub Shanel + j.shanel&stk.cz +33505 + tw telecom, inc. + Bill Sella + bill.sella&twtelecom.com +33506 + Dassault Systemes (formerly 'Enginuity PLM LLC') + Carl P. Miller + carl.miller&3ds.com +33507 + Elster Electricity, LLC + Edward J. Beroset + edward.j.beroset&us.elster.com +33508 + Avere Systems, Inc. + Daniel S. Nydick + support&averesystems.com +33509 + Sociedad Hebraica Argentina + Leandro Fernández + leandro&hebraica.org.ar +33510 + Provincial Health Services Authority + Jason Freeman + jfreeman&phsa.ca +33511 + Radiant Technologies, Inc. + Tae Joon Ha + tjha&radiantech.net +33512 + TheMarkets.com LLC + Albert Rech + arech&themarkets.com +33513 + Arvensian + Jon Gray + jon&arvensian.co.uk +33514 + LUMANTEK + Kang Myung Soo + mskang&lumantek.com +33515 + Antonio Cunha Barbosa + Antonio Barbosa + cunha.barbosa&gmail.com +33516 + Zhuhai Jiasin Industry Co., Ltd + xiaohu yu + yuxh&jiasin.com +33517 + Scourger.nl + Richard K. Noorlandt + info&scourger.nl +33518 + Tap / Bkruse & Associates LLC + Ivan D Vasin + ivan&vacced.com +33519 + SkyMesh PTy Ltd + Brett Johnson + brett.johnson&skymesh.net.au +33520 + Modern Module Inc. + Alex Qian + alexqian&e-mmi.com +33521 + IKT Advanced Technologies s.r.o. + Jiri Smitka + jiri.smitka&ikt.cz +33522 + soit GmbH + Haik Schwebke + h.schwebke&soit.de +33523 + Sword Real Time + Stuart Banks + stuart.banks&rtel.com +33524 + measanctum + Bren Norris + bren.norris&measanctum.com +33525 + Bewigo Technologies SARL + Omar BENHAMID + omar.benhamid&bewigo.com +33526 + DonaldWilson.Info + Donald Wilson + donald&donaldwilson.info +33527 + USDA + Rich Barr + richard.barr&usda.gov +33528 + Krestfield + Darren Wilson + darren.wilson&krestfield.com +33529 + MIDASPLUS, INC. + David Stachowski + dave.stachowski&acs-inc.com +33530 + EasyWay + Wang Yongfeng + wangsir512&126.com +33531 + Dmitry Komarchev + Dmitry Komarchev + komarchev&gmail.com +33532 + Valsts Robežsardze + Sergey Lukashevich + sergejs.lukasevics&rs.gov.lv +33533 + Linkdevices Technology, Inc. + Victor Romanchuk + vroma&linkdevices.com +33534 + Systembase Limited + Mr A Steward + tech&systembase.com +33535 + Herning Kommune + Kim Skotte Larsen + itakl&herning.dk +33536 + INESC Porto + Joao Neves + Joao.Neves&inescporto.pt +33537 + BCV solutions s.r.o. + Zdenek Burda + info&bcvsolutions.eu +33538 + EDX Software Design + Eduard Furrer + Eduard.Furrer&TheEDX.ch +33539 + Europcar Information Services + Mounir Chaabane + jerome.dufournet&europcar.com +33540 + Ion Beam Applications + Fabrice Deleau + fabdel&gmail.com +33541 + w3variance + Christian Hammerl + c.hammerl&w3variance.de +33542 + CBC Cologne Broadcasting Center GmbH + Günter Neitzel + guenter.neitzel&cbc.de +33543 + UHU Systems Kft. + Pozsár Balázs + info&uhusystems.com +33544 + T.Rowe Price + Mark Gogel + mark_gogel&troweprice.com +33545 + Entelec Control Systems + Kris Daniels + kris.daniels&entelec.be +33546 + Axiros GmbH + Natalia Vaisberg + info&axiros.com +33547 + Liberty Mutual Insurance Company + Liberty Communications Services + registerdomainus&libertymutual.com +33548 + Managed I.T. + Kiall Mac Innes + kiall&managedit.ie +33549 + ZnamiNet + Bartosz Radwan + biuro&znaminet.pl +33550 + BIXOLON Co., Ltd. + Chulhui, LEE + swallow&bixolon.net +33551 + David Jones + Chris Freeman + cfreeman&davidjones.com.au +33552 + HiFull Technology + Forrest Zhang + forrest&hifulltech.com +33553 + TBC Solutions + Kwek Yuan + kwekyuan&tbcsolutions.com.sg +33554 + Genetech AB + Bo Danielsson + bo.danielsson&genetech.se +33555 + RACOM s.r.o. + Ing. Marek Prasil + marek.prasil&racom.eu +33556 + Hyperthought Solutions + Scott Kelly + scott&hyperthought.com +33557 + University of Kent + IT and Library Support + helpdesk&kent.ac.uk +33558 + powertron engineering co., ltd. + Jung Ha-Jin + powertro&powertron.co.kr +33559 + AXION - Red de Banda Ancha de Andalucia S.A. + Antonio Marcos Linares Cabrera + amlinares&axion.es +33560 + Commercial Court of Tula Region + Alexander Senturin + avsenturin&tula.arbitr.ru +33561 + Express Gifts ltd + Andrew Gerrard + hostmaster&findel.co.uk +33562 + Judako AB + Johan Udd + johan.udd&judako.se +33563 + CNEA - Regional Centro + Martinez Rodrigo + rmartinez&rcentro.cnea.gov.ar +33564 + MEDIATECH + Antony Simonneau + technique&mediatech.fr +33565 + Epic Event International Pty Ltd + Ian Henderson + ian.henderson&epicevent.com +33566 + Dublin University Internet Society + Stephen Dolan + mu&netsoc.tcd.ie +33567 + Vira Realtime, Ltd. + Oleg Mityagin + o.mityagin&rlt.ru +33568 + Bakoma SA + Paweł Jankowski + pawel.jankowski&bakoma.pl +33569 + ERG S.p.A. + Danilo Greco + dgreco&erg.it +33570 + RENATA WEB SYSTEMS + Alexey V. Degtyarev + alexey&renatasystems.org +33571 + MRC Epidemiology Unit + Iain Morrison + iain.morrison&mrc-epid.cam.ac.uk +33572 + Educational Service Unit 10 + Jordan Clark + admin&esu10.org +33573 + Corps Saxo-Montania + Dennis De Matteis + admin&saxo-montania.de +33574 + waxtie + Martin Sharp + martin.sharp&waxtie.com +33575 + Inovatic d.o.o. + Boris Jakov Anic-Curko + anic&inovatic.hr +33576 + AP Router Industria Eletronica Ltda. + Guilherme F. Weidle Jr + guilherme&aprouter.com.br +33577 + CenIP + Sosa Lugones, Marcelo Martin + marsosa&cenip.com.ar +33578 + Jason B. Alonso + Jason B. Alonso + pen&hackorp.com +33579 + Fairfield County, Ohio + Andrew Michael Stemen + astemen&co.fairfield.oh.us +33580 + Anacom Eletrônica Ltda + Alexandre Rodrigues + arodrigues&anacom.com.br +33581 + OpenMind S.r.l. + Fabrizio Giustina + fabrizio.giustina&openmindonline.it +33582 + Errigal, Inc. + Darren Hoffman + darren.hoffman&errigal.com +33583 + BSoft + Bart van Pelt + bvpelt&solcon.nl +33584 + OnlLine Technologies + Dr Nikos Baltas + nikos&flowgrid.com +33585 + Intehel + Xu Hailong + mail2xhl&sohu.com +33586 + Institute of Information science, Beijing Jiaotong University, China + Chongyan Xia + sunnyxcy&sohu.com +33587 + PlayBox Technology Ltd. + Ludmil Kushinov + support.bg&playbox.tv +33588 + Autek Ingenieria, SL + Manuel Sanz + manuel.sanz&autek.es +33589 + Ministarstvo unutrasnjih poslova Republike Srbije + Dragoslav Stanižan + dragoslav.stanizan&mup.gov.rs +33590 + Faltermeier + Florian Faltermeier + florian.faltermeier&gmail.com +33591 + GIE Astria + Bénédicte TAILLEBOIS + benedicte.taillebois&astria.com +33592 + Gryphon Technology Pty Ltd + Stephen Gryphon + sgryphon&gryphontechnology.biz +33593 + Errigal Inc. + Patrick Gary + patrick.gary&errigal.com +33594 + Daniel Hommel + Daniel Hommel + daniel&hommel4u.de +33595 + San Francisco State University + Joellen Fung + jfung&sfsu.edu +33596 + Adventist Health System + Kirk Ott + kirk.ott&ahss.org +33597 + Clasdix SRL + Leandro Fernández + lfernandez&clasdix.com.ar +33598 + isi-muenchen ltd. + Marc Ende + me&isi-muenchen.de +33599 + Dolf - Systems + Matej Bucar + matej&dolf-systems.com +33600 + OMNIconnect Pty Ltd + Graeme Lee + graemel&omniconnect.com.au +33601 + KIRYUNG + LEE MYUNG HEE + mh&huvent.com +33602 + northstarlabs.net + Jeff McCune + mccune.jeff&gmail.com +33603 + Infotech enterprises + Ajay Krishnan + ajaykrishnanm&infotechsw.com +33604 + SARL LES CAGOTS + Marc Mouttet + mmouttet&yahoo.fr +33605 + Soft & Control Technology s.r.o. + Jozef Nagy + register&sct.sk +33606 + HI5 Networks, Inc + Aaron Chu + achu&hi5.com +33607 + SeeChange Health + Mike Stinebaugh + mstinebaugh&seechangehealth.com +33608 + RipCode, Inc. + Nick Bozadzis + nick.bozadzis&ripcode.com +33609 + Softtek + Eduardo Bernot + eduardo.bernot&softtek.com +33610 + fastip, llc + Benjamin Black + iana&fastip.com +33611 + Transmicro + Rodrigo Deppe + deppe&transmicro.com.br +33612 + Com Hem AB + Simon Ehrlin + advisory&comhem.com +33613 + CRISPYLOGICS + Jochen Schmidt + js&crispylogics.com +33614 + DPD Polska Sp. z o.o. + Daniel Kiper + dkiper&dpd.com.pl +33615 + Cross Country Automotive Services + Jack Moore + networkrenewals&atxg.com +33616 + Ohio Supercomputer Center + Doug Johnson + djohnson&osc.edu +33617 + David Kraeutmann + David Kraeutmann + david&davidkra.net +33618 + Nito Programs + Stephen Cleary + oid.nito-programs&xoxy.net +33619 + FreeComm Data Communication Co., Ltd. + Ivans Tang + ivans_tang&163.com +33620 + CertusNet + CertusNet + zhangrt&certusnet.com.cn +33621 + Asmens dokumentu israsymo centras prie Lietuvos Respublikos vidaus reikalu ministerijos + Aldona Gudeliene + aldona.gudeliene&vrm.lt +33622 + 4Dst International Telecommunications (Pty) Ltd + Andrew Gaylard + mib&4dst.com +33623 + Pokrovskoe Ltd + Roman Osipov + RomanOsipov&gmail.com +33624 + Beijing VBON Science & Technology Development Co.,Ltd. + Xiao Kewei + keweixiao&163.com +33625 + xallaraparadigm + Marcus Griep + iana-pen&xpdm.us +33626 + Reouel + Alon Vilozny + alonsovilo&gmail.com +33627 + code-drohne.de + Andreas Zekl + accountz1&frechheit.net +33628 + Egyption Government Certification Authority + Engineer.Atef Mohamed Yassin Khallaf + govca&mof.gov.eg +33629 + t-Linux Ltd. + Denis Pynkin + denis.pynkin&t-linux.by +33630 + Iridea (M) Sdn Bhd + Ishraf Ismail + ishraf&ir-idea.com +33631 + Poczta Polska + Kamil Perfecki + kamil.perfecki&katowice.poczta-polska.pl +33632 + AGNEKO + Vasiliy Gusev + gusev&agneko.com +33633 + Herman Andersson Oy + Hannu Vanninen + hannu.vanninen&hermanandersson.fi +33634 + Reale Seguros Generales + Eduardo Cunha + eduardo.cunha&reale.es +33635 + Ruby Sync + Ritchie Young or Nowhere Man + babar2k6&gmail.com +33636 + TechnoSec + Marco Pennelli + marco.pennelli&technosec.net +33637 + Walla Walla University + Paul Harvey + noc&wallawalla.edu +33638 + Voiceboard Corporation + Doug Fuller + doug&voiceboard.com +33639 + Vestas Wind Systems A/S + Morten Duus + modus&vestas.com +33640 + TCPOS SA + Ivan Salvetti + ivan.salvetti&tcpos.com +33641 + Ajimi + Bastien DOUCE + iana&ajimi.fr +33642 + Denmark - Fujitsu Services A/S + Michael Soerensen + privateenterprisenumber&fujitsu.dk +33643 + Comgroup GmbH + Markus Schmidt + markus.schmidt&comgroup.de +33644 + Sytel Limited + Garry Pearson + garryp&sytel.com +33645 + MKS Inc. + Russell Robinson + russell&mks.com +33646 + Extelia + pierre-marc pinel + pierre-marc.pinel&extelia.fr +33647 + Mashlab / Huther & Sommer GbR + Oliver Sommer + oliver&mashlab.com +33648 + Ireth + Walter Summonte + walter.summonte&ireth.net +33649 + Pramana Inc + Guru Rajan + guru&pramana.com +33650 + AdaCore SAS + AdaCore contact + iana-contact&eu.adacore.com +33651 + Sparkplug Inc + Patrick Randall + engineering&sparkplug.net +33652 + Cousteau Network Development LLC + Joop Cousteau + jcousteau&gmail.com +33653 + Opulan Technologies Corp. + Zhiheng Gan + zgan&opulan.com +33654 + Posidex Techonologies Pvt Ltd + Bhavani Shanker C + madhusudan&posidex.com +33655 + OVOCENTRUM V+V s.r.o. + Petr Tuček + ptucek&euronet-group.cz +33656 + Wardsback Brotherhood + Frédéric CORNU + fcornu&wardsback.org +33657 + Harald Würger + Harald Würger + iana&wuerger.net +33658 + V.D.S. - Video Display Systems Srl + Mauro Barella + mbarella&vds-it.com +33659 + Università di Bari + Sabino Calo + s.calo&csi.uniba.it +33660 + Lower Colorado River Authority + Paul Griffin + Paul.Griffin&lcra.org +33661 + LineRate Systems, Inc. + John Giacomoni + John.Giacomoni&LineRateSystems.com +33662 + AfroDuck Productions + Andrew Butcher + abutcher&csee.wvu.edu +33663 + Manz Automation AG + Mathias Glaser + mglaser&manz-automation.com +33664 + Conei Cia. Int. de Inversiones + Iván Baranda + ibaranda&conei.com +33665 + Liveperson + Leonid Mirsky + leonid&liveperson.com +33666 + STMIK Mikroskil + Des Dulianto + desdulianto&mikroskil.ac.id +33667 + SADA Systems Inc. - Apps Division + Nicky Parseghian + nicky.parseghian&sadasystems.com +33668 + ECtel LTD. + Eyal Zamir , Chief System Architect + eyalz&ectel.com +33669 + Kevin R. James Enterprise Services + Kevin R. James + kevin&jameses.net +33670 + Arctic Lake + Niels Buhl + niels.buhl&arcticlake.com +33671 + HESTIA FRANCE + Mr Daniel SPITZ + hestia&hestia-france.com +33672 + intecsoft GmbH + Marco Grunert + marco.grunert&intecsoft.de +33673 + Lacerda Sistemas de Energia + Enrico Caruso + enrico&lacerdasistemas.com.br +33674 + NUVICO + Royal Lee + royal.l&nuvico.com +33675 + Zydax, LLC + Mark Swayne + iana&zydax.com +33676 + Happy Jack Software LLC. + Mona Gamboa + ian&happyjacksoftware.com +33677 + HiWiFi Networks + Jie Song + js&hiwifi.net +33678 + Fähnle IT-Solutions + Matthias Fähnle + info&faehnle-it-solutions.de +33679 + The University of Waikato + Dougal Mair + maird&waikato.ac.nz +33680 + Bankart, d.o.o. + Matej Bučar + matej.bucar&bankart.si +33681 + Deutsche Papier Vertriebs GmbH + Christian Amberger + AmbergerC&deutsche-papier.de +33682 + Tyze.com + Jerry Hutchings + jerry&tyze.com +33683 + Agilink Systems Corp. + Wolfgang Tolkien + wtolkien&agilink.ca +33684 + eCern Inc + Robert Stanton + stanton&ecern.com +33685 + Netgroup A/S + Hroi Sigurdsson + hs&netgroup.dk +33686 + NetVitesse S.A.R.L + Antoine Roux + admin&net-vitesse.com +33687 + NETAVIS Software GmbH + Lazar Jozsef + jlazar&netavis.net +33688 + Packet Power + Paul Bieganski + paul&packetpower.com +33689 + mediQ + Stephan Goeldi + goeldi&goeldi.com +33690 + shenzhen etoptech co.,Ltd + Genbay.Zheng + Genbay.Zheng&163.com +33691 + MOSTCOM Ltd. + Sergey Kuznetcov + ksn&moctkom.ru +33692 + Meona GmbH + Matthias Wuttke + info&meona.de +33693 + SMALS + Monitoring Office + supervision&smals.be +33694 + Knexus Research Corporation + Erskin L. Cherry + erskin.cherry&knexusresearch.com +33695 + National Oilwell Varco + Ruijing Ou + ruijing.ou&nov.com +33696 + Trulix Systems + Tim Lyons + tim.lyons&trulix.com +33697 + Klaus Becker Kopiersysteme -Service-GmbH + Sebastian Schnitzler + info&bueroit.de +33698 + ACRA CONTROL LTD. + Nikki Cranley + cranley&acracontrol.com +33699 + TAG Video Systems + Waldman Gal + galwa&tagvs.com +33700 + Saginaw Valley State University + Timothy Patterson + tjpatter&svsu.edu +33701 + Cathay Pacific Airways Ltd. + Jacky Cheng + spajacc&cathaypacific.com +33702 + Technalogix + Thomas King + thomas&technalogix.ca +33703 + ziggurat29 + David Lemley + dev&ziggurat29.com +33704 + Middle Office Solutions, LLC + Robert Tjia + rtjia&middleofficesolutions.com +33705 + Global 360 Inc + Dave Berglund + dave.berglund&global360.com +33706 + OPVISION Technology Co.,Ltd + Dai xinghe + daixinghe&163.com +33707 + United International College + Evan ZHU + evanzhu&uic.edu.hk +33708 + SPIT elektromechanica BV + Robert Boers + robert.boers&spit.nl +33709 + OnTimeLine + Jan Leinemann + Leinemann&OnTimeLine.de +33710 + Tawasul Telecom + Mohammed Al-Kout + tawasuladmin&tawasultele.com +33711 + Campanja AB + Berner Setterwall + berner.setterwall&campanja.com +33712 + CSC PYXUS-2006 + Andrey Wacker + andrey_wacker&pyxus2006.com +33713 + WB Electronics S.A. + Andrzej Asztemborski + a.asztemborski&wb.com.pl +33714 + Neuralitic Systems Inc. + Sebastien Nobert + it&neuralitic.com +33715 + Axcient, Incorporated + Dave Garnett + dave&axcient.com +33716 + Jiangsu Hengtong Photoelectric Stock Co., Ltd + Wang Chong + chenb&hengtonggroup.com +33717 + Nirmitsu Technologies Private Limited + Ganesh Gudigara + ganesh&nirmitsu.com +33718 + Lava-Tech + Ren wenyue + suchasplus&gmail.com +33719 + EBU / UER + Bajas françois + bajas&ebu.ch +33720 + ELTA Systems Ltd. + David Tayouri + dtayouri&elta.co.il +33721 + Thrane & Thrane + Finn Berthelsen + finn.berthelsen&cobham.com +33722 + MC SECURITY Co., Ltd. + Sachio Ohnishi + s.ohnishi&mcsecurity.co.jp +33723 + laurentum.de + Wolfgang Lorenz + wo.lorenz&gmail.com +33724 + EBDSoft + Francesc Guasch + frankie&ebdsoft.com +33725 + Bimeng Communications System Co.,Ltd + Tianhang Chen + chentianhang&bmcsys.com.cn +33726 + White Sands Technology + Michael A. O'Donoghue + modonoghue&whitesands.com +33727 + Caerian Inc + Simon Wheatley + scw&caerian.com +33728 + Adaptive AI, Inc. + Kirill Katsnelson + kkm&adaptiveai.com +33729 + Beijing Pannet Information Technology Co., Ltd. + Zhanchun Gao + bj-pannet&163.com +33730 + Stefan aulbach + Stefan Aulbach + info&stefan-aulbach.de +33731 + Asito B.V. + Robert Boers + r.boers&asito.com +33732 + NetResults S.r.l. + Stefano Lucetti + stefano.lucetti&netresults.it +33733 + sw-k software & system engineering + Mr. Sebastian Woelk + sebastian.woelk&sw-k.net +33734 + chini.info + Davide Chini + oid&chini.info +33735 + Buzzlogic Inc. + Robert B. Carleton + bcarleton&buzzlogic.com +33736 + fylleri.is + Hlynur Kristjánsson + hlynur&fylleri.is +33737 + Soprano Design Pty Ltd + Peter Cleary + support&soprano.com.au +33738 + Animelliure + Marc Serra Romero + mad93&majomo.com +33739 + ANTRICE S.A + Ioan Petrescu + iopet&antrice.ro +33740 + Interdatanet - DET + Franco Pirri + fpirri&gmail.com +33741 + Yellowstone Soft + Hermann Betz + betz&yellowstone-soft.de +33742 + Net4Promotions + Biswajit Das + net4promotions&gmail.com +33743 + Zentrum für Soziale Innovation + Johannes Simon + simon&zsi.at +33744 + Application Solutions (Safety and Security) Ltd. + Andy Binks + abinks&asl-control.co.uk +33745 + College for Creative Studies + Matt Halcrow + mhalcrow&collegeforcreativestudies.edu +33746 + imagine that + jeff bachiochi + jeff&imaginethatnow.com +33747 + OnlineOK + Micheal Smith + xulfer&cheapbsd.net +33748 + Wilcox Technologies + Andrew Wilcox + AWilcox&Wilcox-Tech.com +33749 + VoLGA Forum + Kurt Bischinger + kurt.bischinger&t-mobile.at +33750 + Iritel A.D. Beograd + Zoran Miljkov + info&iritel.com +33751 + OBEJ di Galantini Andrea + Andrea Galantini + andrea&obej.it +33752 + Gen-Z Technologies inc. + Eric Pelletier + ericp&gardeplus.ca +33753 + Beijing CircLoop Digital Image Tech.Co.,Ltd + ZhuXuzhen + circloop&vip.163.com +33754 + Duelco A/S, Denmark + Harry Vohs + hav&duelco.dk +33755 + www.capad.de + Christian Adamski + christian.adamski&googlemail.com +33756 + keimatsu.com + Dan Keimatsu + keimatsu&keimatsu.com +33757 + Oblong Industries, Inc. + Kyle Thomas Miller + kyle&oblong.com +33758 + Inquinox, LLC + Jack McKinney + jack_mckinney&inquinox.com +33759 + Golden Micro Systems Corporation + Yukiko Suzuki + suzuki&g-micro.co.jp +33760 + Ministerstvo Finansov Chelyabinskoy oblasti + Alexander Povyshev + minfin74&mail.ru +33761 + OptimSys, s.r.o. + Pavel Cenek + cenek&optimsys.cz +33762 + Global Display Solutions SpA + Maurizio Garabunato + m.garabunato&gds.com +33763 + AD Holdings plc + Chris Blood + adh.snmp&googlemail.com +33764 + I-bridge B.V. + Luis Santos + luis.santos&ibridge.nl +33765 + Sapienza Consulting Ltd + Saso Bruzzese + saso.bruzzese&sapienzaconsulting.com +33766 + panagenda GmbH + Franz Walder + admin&panagenda.com +33767 + Korean Broadcasting System (KBS) + Sung soo, Lee + deniro33&kbs.co.kr +33768 + Beijing ZhonghongLida Technology Development Co., Ltd. + Arthur Lee + webmaster&leadal.com +33769 + Universidad de La Habana + Abel Marrero Santos + marrero&uh.cu +33770 + Telcom Research + Brian Weppler + bweppler&telcomresearch.com +33771 + Academy Consult München e.V. + Martin Klenk + it&academyconsult.de +33772 + The Portalus Group + Donald Norwood + dnorwood&portalus.com +33773 + Weather Kat + Matthew Soffen + truffle-iana&weatherkat.com +33774 + Pyro Networks + Yogendra Agarwal + yogendra.agarwal&pyrogroup.com +33775 + Moscow Metering Company + Vladimir Vodolazkiy + vvv&miccorp.ru +33776 + Orca Interactive LTD + Zevik Knobler + zevik.knobler&orcainteractive.com +33777 + Cadmos microsystems S.r.l. + Federico Lanza + f.lanza&cadmos.it +33778 + TIKLA tv + Deren Bdag + deren95&live.com.au +33779 + Stefan Seelmann + Stefan Seelmann + mail&stefan-seelmann.de +33780 + Grosvenor Technology Ltd. + Stephen Woolhead + siw>l.biz +33781 + FreeKey::Labs s.l. + Javier Lasheras Peña + lasi&freekeylabs.com +33782 + Pictage, Inc. + Michael Brown + mbrown&pictage.com +33783 + Chinarainbow Technology CO.,LTD + zhoupeng + pengzhou&chinarainbow.com.cn +33784 + AvFinity, L.L.C. + Steve D. Perkins + sdp&avfinity.com +33785 + La Salle University + Mark Purcell + purcell&lasalle.edu +33786 + Iquall S.A. + Matias G. Lambert + matias.lambert&iquall.net +33787 + KaHo Sint Lieven + Jorn Lapon + jorn.lapon&kahosl.be +33788 + juris GmbH + juris Rechenzentrum + domain&juris.de +33789 + Medical IT + VILSON COBELLO JUNIOR + vilson.cobello&medicalit.com.br +33790 + InFocus Corporation + Kevin Thompson + kevin.thompson&infocus.com +33791 + XelNet + Raphaël Barrois + oid&xelmail.com +33792 + Noisebridge + Jonathan Lassoff + ops&noisebridge.net +33793 + inviCRO, LLC + Christian Lackas + lackas&invicro.com +33794 + Myungmin Systems, Inc. + Ashuaria Lee + ashuaria&myungmin.com +33795 + SOLIT Information Security + Joerg Folz + info&solit.lu +33796 + ROMTELECOM S.A. + Mihai Cosma + ns_sa&romtelecom.ro +33797 + ESRI Portugal + Sandro Batista + sandro.batista&esri-portugal.pt +33798 + Suefke Softwareentwicklung + Martin Suefke + ianapen.20.masuefke&recursor.net +33799 + LinkSat + Jack Thomasson + jkt&linksat.com +33800 + CARFAX INC + Larry Dorman + mohelpdesk&carfax.com +33801 + Colorquick, LLC + Philip Panelli + ppanelli&crwgraphics.com +33802 + Xensor Systems Incorporated + Kurt Van Laar + xtensor&xtensor.com +33803 + Manor AG + Urs Schmid + urs.usm.schmid&manor.ch +33804 + Chrisalys + Christopher Tarento + contact&chrisalys.org +33805 + Peak Drive Solutions Ltd + Andrew Eastham + andy-snmp-admin&peakdrive.com +33806 + UTILISE IT LIMITED + Andrew Douthwaite + hostmaster&utilise-it.co.uk +33807 + Internexo Ltda. + Danton Nunes + danton.nunes&inexo.com.br +33808 + Jim Montague Software LLC + Jim Montague + pennum&jimmontaguesoftwarellc.com +33809 + Topomaro + Roberto Giovanardi + roberto.giova&gmail.com +33810 + Masterhost, CJSC + Dmitry V Krikov + iana&masterhost.ru +33811 + Idendego Inc. + Brian Pederson + security&idendego.com +33812 + Goodus, Inc. + Yoonkwon Kim + yoonkwon.kim&goodus.com +33813 + Invenso + Brecht Yperman + brecht.yperman&invenso.com +33814 + IProNet Sistemas, S.A. + Mikel Martin + mikelmartin&ipronet.es +33815 + Archean Technologies + Julien Vermillard + jvermillard&archean.fr +33816 + PROFILAX, s.r.o. + Jozef Remenec + remenec&profilax.sk +33817 + CopyKnight Technologies Ltd + Martin North + hostmaster©knight.com +33818 + asco + Ralf Doeblitz + doeblitz&netzverwaltung.net +33819 + StreetGeek + Michael Farrell + michael.farrell&streetgeek.com.au +33820 + DECOIT GmbH + Andre Brandt (Systemmanagement) + systemmanagement&decoit.de +33821 + Proliphix, Inc. + Paul Ciarfella + pciarfella&proliphix.com +33822 + Computer Task Group, Inc. + Mike Beekey + michael.beekey&ctg.com +33823 + comspace GmbH & Co. KG + Holger Paschke + support&comspace.de +33824 + Spirent Communications of Rockville, Inc. + Ntinos Solos + ksolos&spirentcom.com +33825 + ECHO KEISOKUKI Co.,Ltd. + NAOKI ISHIHARA + field_2005&clock.co.jp +33826 + Lutong Network Technology Co., ltd + Ben Xu + benx&sjtu.org +33827 + Dan Hirsch + Dan Hirsch + thequux&gmail.com +33828 + NTT Plala Inc. + Dan Keimatsu + n-mail&plala.co.jp +33829 + Novel Communication Co., LTD. + ZhaoGuangLu + boyce&126.com +33830 + One97 Communication (P) Limited + Avnindra P. Singh + avnindra.singh&one97.net +33831 + noFilis AutoID GmbH + Martin Dobler + martin.dobler&nofilis.com +33832 + Biologische Heilmittel Heel GmbH + Christian Huber + huber.christian&heel.de +33833 + Nettare Srl + Giuseppe Risi + giuseppe.risi&nettare.net +33834 + Salford + Zdenek Mejzlik + zdenek.mejzlik&salford.cz +33835 + REGS DE CATALUNYA, S.A. + Mario Lacambra Calahorra + mlacambra&gisa.cat +33836 + Alliaria, S.L. + Carles Giner + carles.giner&alliaria.com +33837 + GESTIÓ D'INFRAESTRUCTURES, S.A. + Mario Lacambra Calahorra + mlacambra&gisa.cat +33838 + Voxline Contact Center + Alex Bertuqui + abertuqui&voxline.com.br +33839 + CONFESOL + Márcio André + marcio&confesol.com.br +33840 + LEDZGO + Stephane Van Geystelen + vangeys&ledzgo.com +33841 + ArenaNet + Jared Chavez + jared&arena.net +33842 + SIBIO + Thierry Vaillant + sysadmin&sibio.fr +33843 + ProIS d.o.o. + Sasa Dautovic + office&prois.hr +33844 + Blastwave.org, Inc. + Dennis Clarke + dclarke&blastwave.org +33845 + Microlab Snc + Giulio Sfoglietti + giuliosfogliettiµlabprogetti.com +33846 + DISA CSD MONTGOMERY + Reggie Adkins + reginald_adkins&emainc.com +33847 + maXan Pty Ltd + Grant Maxwell + grant.maxwell&maxan.com.au +33848 + Hot Solder Ltd + Iain Derrington + Iain.Derrington&hotsolder.co.uk +33849 + Universidad Autónoma de Colombia + Edward J Morales + webmaster&fuac.edu.co +33850 + Capgemini Spain SLU + Pedro J. Molina + pmolinam&capgemini.es +33851 + Combilent A/S + Brian Johansen + BRJ&combilent.com +33852 + Tractis + David Garcia + david.garcia&tractis.com +33853 + "STM" Co., Ltd + Vasily Redkin + support&ctm.ru +33854 + RP Compounds GmbH + Michael Laue + mlaue&rpcompounds.com +33855 + ADVtools + Sebastien ANDRIVET + sebastien&advtools.com +33856 + Ansgar Jonietz + Ansgar Jonietz + ansgar&jonietz.de +33857 + SECONS s.r.o. + Martin Hinner + info&secons.com +33858 + Aveco s.r.o. + Jiří Bašek + iana.adm&aveco.com +33859 + JRD + Enno Davids + enno06&metva.com +33860 + The Insitu Group + Andrew Hayes + andrew.hayes&insitu.com +33861 + CONGUIDE + Jens Hartmann + info&conguide.com +33862 + Large Binocular Telescope Observatory + Stephen Hooper + shooper&lbto.org +33863 + Mantaro Networks, Inc. + Ken McInerney + ietf&mantaro.com +33864 + Praktiker Services GmbH + Adam Bielak + PKIAdmin&praktiker.com +33865 + Atukorala Holdings Ltd. (formerly 'Squad 17 Limited') + Uditha Atukorala + info&aholdings.biz +33866 + Lionic Corp. + Roger Chien + roger.chien&lionic.com +33867 + Infrascape Technologies Limited + Imran Khan + ikhan&infrascape.com +33868 + DOSIsoft + MR Grandjean Pascal + grandjean&dosisoft.com +33869 + Kompas Xnet d.o.o. + Joze Markic + joze.markic&kompas-xnet.si +33870 + EXXOSS S.P.R.L. + Olivier Cant + olivier&exxoss.com +33871 + ZIBImed GmbH + Volker Zipfel + volker.zipfel&zibimed.de +33872 + Alpha Technologies, Inc. (formerly 'Cheetah Technologies L.P.') + Mark Bereit + mbereit&alpha.com +33873 + ARC Créations + Marc Laflutte + m.laflutte&arc-cre.com +33874 + Novita + Gregor Malensek + info&novita.si +33875 + National Bank of Ukraine + Max Kozlovsky + nbumailprog&gmail.com +33876 + Van Hameren ICT diensten + Peter van Hameren + peter&hameren.com +33877 + Publications Office + Marita Logan + opoce-liso&publications.europa.eu +33878 + Children's Miracle Network + Scott Lance + slance&childrensmiraclenetwork.org +33879 + Global IT Bilisim Hizmetleri Ltd. + Kivanc Oskay + koskay&globalit.com.tr +33880 + Isca Networks Corporation Ltd + Martin Cole + martin_cole&iscanetworks.com +33881 + ETM Electromatic Inc. + Wei Qin + wqin&etm-inc.com +33882 + Four Color Studio + Jason Silkey + jason&fourcolorstudio.com +33883 + The Bank of Lithuania + Rimvydas Zilinskas + rzilinskas&lb.lt +33884 + NSG + Steve Barrass + steve.barrass&nsg.com +33885 + Abhineet Singh Chauhan + Abhineet Singh Chauhan + ronin_b007&yahoo.com +33886 + UNICEF + Sean Boyd + SBoyd&unicef.org +33887 + Integrasul Solucoes em Informatica Ltda. + Edson Dino Salvati + edson&integrasul.com.br +33888 + Aqua Cooler Pty Ltd + Andrew Blackmore + andrew&aquacooler.com.au +33889 + Do-Style,INC. + Toshiaki Sakazume + toshiaki&do-style.com +33890 + KUB Ltd. + Khaustov Ivan + pavel&tomline.ru +33891 + goettert.net Internet Service + Daniel Goettert + info&goettert.net +33892 + Be IP s.a. + Steve Frécinaux + sfrecinaux&beip.be +33893 + Specialized Information Systems Ltd. + Vitaliy Verveyko + office&specinfosystems.com.ua +33894 + Aulofée + Cuir Lionel + aulofee&aulofee.com +33895 + The Joint Commission + Kurt Stehlik + kstehlik&jcaho.org +33896 + DDO Organisation sarl + Donnet, Sylvain + sdonnet&ddo.net +33897 + GSA + Raju Bhupatiraju + raju.bhupatiraju&gsa.gov +33898 + Synovel Software Technologies Pvt. Ltd. + Siva Edupuganti + sivakrishna&synovel.com +33899 + rahmn.com, llc + Michael Rahmn + michael&rahmn.com +33900 + Next Level Security Systems, Inc. + Peter Jankowski + pete&nlss.com +33901 + ezwww.ch + Thierry Bagnoud + hosting&ezwww.ch +33902 + Tiju Paul + Tiju Paul + tijupi&yahoo.com +33903 + nenunic + Yingzhe Hou + houyz&nenu.edu.cn +33904 + Sergei Butakov + Sergei Butakov + sergei&bslos.com +33905 + iVoltaire.org + Nicolas A. Bérard-Nault + nicobn&gmail.com +33906 + WANTS Inc. + Hidehiro Sasaki + sasaki07&wantsinc.jp +33907 + SECURITAS DIRECT ESPAÑA S.A.U. + Jose Antonio Aparicio Rubio + joseantonio.aparicio&securitasdirect.es +33908 + Seowonintech Co., Ltd + Jude Jung + gsjung&seowonintech.co.kr +33909 + Open Joint Stock Company Belvnesheconombank + Sergey Kazuk + kss&bveb.minsk.by +33910 + BSN Medical GmbH + Hartmut Volkmar + hartmut.volkmar&bsnmedical.com +33911 + Cerillion Technologies Limited + David Ball + david.ball&cerillion.com +33912 + FULLSEE Electronics Co., Ltd. Beijing + Scan Li + fullseetech&126.com +33913 + ClusterDB.com + Andrew Morgan + andrew&clusterdb.com +33914 + Monmouth Telecom + Mark Stevens + oid-admin&monmouth.com +33915 + Middle East Number Thirteen -Financial Services + Mohamad Amin Peelechi + m.amin.peelechi&gmail.com +33916 + Freeman Decorating Services, Inc. + Tommy Mhire + tommy.mhire&freemanco.com +33917 + Intact Financial Corporation + Richard Daigle + richard.daigle&intact.net +33918 + Top Producer Systems + Morgan Crocker + morgan.crocker&topproducersystems.com +33919 + ImmediaTV Corporation (formerly 'Magenta Video Networks') + Ciro Noronha + ciro&immediatv.com +33920 + DLT Limited (formerly 'Remote Management Systems Limited') + Darren Lucinsky + DL&DLTLimited.com +33921 + Molowa + Olivier Pallière + olivier&molowa.com +33922 + Byzoro Networks Ltd. + xie yongjiu + xieyongjiu&byzoro.com +33923 + Axione + Thomas FAGART + thomas.fagart&axione.fr +33924 + Ebor Computing Pty Ltd + Bill Cumpston + snmp.info&ebor.com +33925 + Progetto 3000 srl + Alessandro Sebastianutti + snmp-admin&progetto3000.com +33926 + buzzword.org.uk + Toby Inkster + mail&tobyinkster.co.uk +33927 + Cambridge Leaning Center + Durk Kim + durkkim&gmail.com +33928 + Ibuildings + John Le Drew + john&ibuildings.com +33929 + Universitat Jaume I + Juanjo Villaplana + villapla&si.uji.es +33930 + Contineo Systems + Milton Lie + milton&contineosystems.com +33931 + IK-TECH + Joerg Weichbrodt + jw&ik-tech.de +33932 + MEDIACAST CO., LTD. + Satoru Nonaka + nonaka&mcast.co.jp +33933 + QOX,Inc + Katsumi Honda + khonda&qox.jp +33934 + XenData Limited + Mark Broadbent + xendata&xendata.com +33935 + RedFox Communications Ltd + Mark Fenwick + mark&redfox.co.uk +33936 + Sentilla Corporation + Pradip De + pradip&sentilla.com +33937 + LinuxBox.cz + Petr Kopecky + info&linuxbox.cz +33938 + Ordina + Alwin Warringa + alwin.warringa&ordina.nl +33939 + Bolt Court Consulting + Jamie Hosker + admin&boltcourtconsulting.com +33940 + Wuhan Hongxin Telecommunication Technologies Co.,Ltd + Zheng Zhi + zhengzhi&hxct.com +33941 + Business Logic Systems + Stefan Szalai Dragos + stefan.dragos&gmail.com +33942 + NakedApe + Anton Emmerfors + anton.emmerfors&bredband.net +33943 + Faculty of Electrical Engineering Banja Luka + Mihajlo Savic + badaboom&etfbl.net +33944 + Compower Systems Inc. + Juan Gil Niehorster + jgil&compowersystems.com +33945 + OHO Interactive + Chris OConnor + chris&oho.com +33946 + Vencomm + Alexandre Vézina + avezina&vencomm.ca +33947 + MAGICONLINE + Adel Mezibra + amezibra&magic.fr +33948 + Azienda USL Ferrara + Boccafogli Luca + l.boccafogli&ausl.fe.it +33949 + Geotech Ltda + Ramiro Tostes + ramirobot&gmail.com +33950 + Zilogic Systems Pvt. Ltd. + Ganesan Paramasivam + iana&zilogic.com +33951 + taunusstein.net + Christian Felsing + hostmaster&taunusstein.net +33952 + ZimZahm Productions + Thomas P. Zahm + tpzahm&gmail.com +33953 + DLT Limited + Darren Lucinsky + DL&DLTLimited.com +33954 + International School of Beijing + Damir Tejic + dtejic&isb.bj.edu.cn +33955 + Better Place + Emek Sadot + emek.sadot&betterplace.com +33956 + Oversun + Timur Izhbulatov + t.izhbulatov&mchost.ru +33957 + NCHOVY Inc. + Yang, BongYeol + xeraph&nchovy.com +33958 + Skytap + Lonnie Hutchinson + lonnieh&skytap.com +33959 + The Egyptian Company for Networks and Computer Security Services + Ossama Husain + info&snsegypt.com +33960 + Nordnet AB + Johan Tanner + johan.tanner&nordnet.se +33961 + enCentro Systems Inc + Celso Meira + celso.meira&encentro.eu +33962 + icuetv + George Kurian + gkurian&icuetv.com +33963 + OCCN Ltd. + Thomas Steinbach + pen&occn.co.uk +33964 + ParaVolve Development Corporation + James W. Brinkerhoff IV + jwb¶volve.net +33965 + R-DATA Sp. z o.o. + Grzegorz Szczyglowski + grzegorz.szczyglowski&gmail.com +33966 + FUNAI ELECTRIC CO., LTD. + Masahiro Matsuo + matuo&funai.co.jp +33967 + The Now Factory + Paul Colgan + paul.colgan&thenowfactory.com +33968 + Puleo Electronics Inc. + Jim Montesion + snmp&puleoinc.com +33969 + Canfield Research Group + Robb Canfield + robb&canfield.com +33970 + TradeHelm, Inc. + Frank Gallagher + frank.gallagher&tradehelm.com +33971 + VanceInfo Technologies Inc. + Jianmin Gao + gao_jianmin&vanceinfo.com +33972 + RFPA + ROUSSELET Michel + rfpa&rfpa.com +33973 + Net Entertainment NE AB + Christofer Olofsson + chol&netent.com +33974 + Agency.com + Craig Webster + cwebster&agency.com +33975 + GnomishThoughts + Ryan McHugh + ryan.mchugh&gnomishthoughts.com +33976 + HackThisSite + Kage Konjou + admin&hackthissite.org +33977 + BLIP + Dan Granville + dan&blipcreative.com +33978 + YAGI ANTENNA INC. + mimura makoto + mimura.makoto&yagi.h-kokusai.com +33979 + Hong Kong Trade Development Council + Shirley Yik + shirley.sl.yik&hktdc.org +33980 + DDB Link + Julien LEROY + jl&ddb.fr +33981 + Tek-Ops.com + Michael Schenck + mschenck&tek-ops.com +33982 + New Internet Technologies Sp. z o. o. + Michał Kiędyś + mk&newit.pl +33983 + Total Tel International Pty Ltd + Kim Beveridge + info&ttiadvant.com +33984 + Spark::red LLC + Troy Anderson + troy&sparkred.com +33985 + MigraTech + Philippe CHAUVAT + infos&migratech.fr +33986 + Universidade Federal de Pernambuco + Jose Antonio Monteiro de Queiroz + jose.queiroz&ufpe.br +33987 + Lakstins Family, LLC + Brian Lakstins + brian&lakstins.com +33988 + Comm.SEC di Valter Foresto + Valter Foresto + valter.foresto&gmail.com +33989 + Hyette S.A. + Manuel Cabrera Caballero + manuel.cabrera&hyettemail.com +33990 + Cassandra Research Center + Matteo Slaviero + matteo.slaviero&we-cassandra.com +33991 + Beaumaris Networks + Sunil Mudholkar + sunil&bninet.tv +33992 + Louisiana Community and Technical College System + Lee Hammett + lhammett&lctcs.edu +33993 + Coalinga State Hospital + Chris Kashuba + ckashuba&csh.dmh.ca.gov +33994 + Nei til EU + Atle Solbakken + atle&neitileu.no +33995 + Pixip.net Gmbh + Michael Raab + info&pixip.net +33996 + Diateam + Guillaume Prigent + snmp&diateam.net +33997 + OneMedNet Corporation + Jeffrey Yu, MD + jeffrey.yu&onemednet.com +33998 + Agily Networks Inc. + Kirill Pertsev + kika&agily.com +33999 + Xiangmin Technology Co.,Ltd + Ran Bi + biran&xiangmin-tech.com +34000 + BYNE (formerly 'SIMB Tecnologia') + Vitor Espindola + vitor&byne.com.br +34001 + ingrifo + Kamil Kaluzny + rev&o2.pl +34002 + WriteX ltd + Victor V Kudlak + info&kentos.ru +34003 + AdRiver + Victor Kudlak + mega&adriver.ru +34004 + PETROFAC INT LTD + ABDUL ARSHAD + abdul.arshad&petrofac.com +34005 + Community Technology + George Lee + communitytechnology&yahoo.com +34006 + Number Kuus Konsultatsioonid OÜ + Raivo Metsvaht + raivo.metsvaht&kuus.ee +34007 + SKYSOFT ATM + SEBASTIEN DUX + sebastien.dux&skysoft-atm.com +34008 + Tech Mahindra + Vandana Awatramani + vandana.awatramani&techmahindra.com +34009 + YGOMI Europe Ltd. + Daniel Nanassy + Daniel.Nanassy&ygomi.com +34010 + Paraflux, Inc. + Scott Gimenez + hostmaster¶flux.com +34011 + Donet, Inc. + David Mezera + david&donet.com +34012 + Internet Doorway, Inc. + John Havard + admin&netdoor.com +34013 + CBTS + Nathan Nelson + nathan.nelson&cbts.com +34014 + B1 Systems GmbH + Tobias Wolter + towo+iana-pen&b1-systems.de +34015 + Pinghu Feihong Technology Co.,LTD + Shujian Li + lsj115&163.com +34016 + management of procurement for state needs of Krasnodar Region + Roman Osipov + romanosipov&gmail.com +34017 + telecommunication software gmbh + Markus Meixner + office&telecomsoftware.com +34018 + MAC GUFF LIGNE + Bruno MAHE + mglsys&macguff.fr +34019 + Fortenova Grupa d.d. + Dario Miculinic + admin&agrokor.hr +34020 + OpenHTTPD + Peter Müller + iana&openhttpd.net +34021 + Hosteam + Bartosz Waszak + bartosz.waszak&hosteam.pl +34022 + ITG SOLUTIONS S.A.C. + Hector Zapata + hector.zapata&itgsolutions.com.pe +34023 + zumbi + gustavo panizzo + gfa&zumbi.com.ar +34024 + Department of Homeland Security + Raymond G. Melusky Jr. + Raymond.melusky&dhs.gov +34025 + Accelerated Data LLC + Kevin Cornell + kevin&xlr8edllc.com +34026 + Alphalink + Dominique Broeglin + iana&alphalink.fr +34027 + Qumu, Inc. + David Bukhan + dbukhan&qumu.com +34028 + Idaho Power Company + Shaun Skidmore + ipcosnmp&yahoo.com +34029 + TLSPU (formerly 'HaqaSoft') + Adrian Hungate + ale.hungate&gmail.com +34030 + metatux.net + Lars Buerding + mtx-oid&metatux.net +34031 + ViewStor Pty Ltd + Kean Lim + kean.lim&viewstor.net +34032 + Amateur Radio Club (ARC) ITB + Adityo Jiwandono + jiwandono&gmail.com +34033 + Unix Linux Authority Kft + Gyula Afra + hq&unixlinuxauthority.com +34034 + Active Web Solutions Ltd + Richard Prodger + richard.prodger&aws.net +34035 + IPsoTV + Maria ARNAUD + maria.arnaud&ipsotv.com +34036 + Benchmark Group + LE BARS Guillaume + lebars&benchmark.fr +34037 + European Nazarene College + J. Alan Schrock + itdirector&eunc.edu +34038 + USGN + Scott Wessels + swessels&usgn.net +34039 + Conversant Solutions Pte Ltd + Yew-Jin Chua + yewjin&conversant.com.sg +34040 + OrgLDAP Project + Timur Izhbulatov + timochka&gmail.com +34041 + unattended-gui + Mario Gzuk + gzuki&users.sourceforge.net +34042 + T-Mobile Hrvatska d.o.o. + Damir Perkov + damir.perkov&t-mobile.hr +34043 + Suffolk University ITS + Keith Hersh + khersh&suffolk.edu +34044 + Adesta LLC + Teri Shindo + tshindo&adestagroup.com +34045 + Vex + Rafael Ugolini + rafael.ugolini&vexcorp.com +34046 + Midwatch Systems Inc. + Bruce Claremont + mid1&midwatchsystems.com +34047 + THENEWPASTAWAVE.COM + FERDINANDO FERRARI + fferrari_it&yahoo.it +34048 + Novell Consulting Ukraine Ltd + Andriy Trotsenko + ATrotsenko&novell.com +34049 + Spirtech + Frederic Levy + contact&spirtech.com +34050 + Cloudhopper, Inc. + Joe Lauer + joe.lauer&cloudhopper.com +34051 + Universidade Estadual de Ponta Grossa + Luiz Gustavo Barros + luizgb&uepg.br +34052 + Mount Desert Island Biological Laboratory + Roy McMorran + mcmorran&mdibl.org +34053 + Associação Hospitalar Santa Rosália + Geraldo Lopes de Souza + ti&ahsr.com.br +34054 + Australian Nursing Federation + Will Dowling + will.dowling&anfiuwp.org.au +34055 + Vietnam National University, Hanoi + Dzung Phung Chi + dungpc&vnu.edu.vn +34056 + Tranquil Hosting + Chris Thunes + cthunes&tqhosting.com +34057 + Beehive Security + Anthony Iacobucci + steve&beehivesecurity.com +34058 + Babicom + Steve Babineau + babineau&gmail.com +34059 + NII "Vektor" + Pustozerov Andrey + pustozerov_aa&nii-vektor.ru +34060 + Nexgen Technologies + Marcel Manning + netpro25&gmail.com +34061 + COS-CRPF + yann GUILLOUX + yguilloux&cos-crpf.com +34062 + Mosart Medialab AS + Morten Larsen + morten&mosartmedialab.no +34063 + Hyves / StartPhone Ltd + Roald van Loon + noc&hyves.nl +34064 + Böcherer Angewandte Informatik + Wolfgang Böcherer + info&bai.de +34065 + Ricoh Europe PLC + Ben Robinson + ben.robinson&ricoh-europe.com +34066 + Interpublic Group of Companies, Inc. + Martin Wilkins + Martin.Wilkins&interpublic.com +34067 + OWVAT + Charles Sauerbier + charless&clarityconnect.com +34068 + GAI-Tronics Corporation + Joseph Walter + JWalter&gai-tronics.com +34069 + Information Security Services Association + Chris Spohn + cspohn&issa.org +34070 + TeachMKs + Jonathan Compton + JECompton&gmail.com +34071 + Hearst Communications Inc. + Jerry W. Talbot II + jtalbot&hearstsc.com +34072 + NBEE Embedded Systems S.L. + Miguel Angel Ajo + miguelangel&ajo.es +34073 + ISS + Sherif Elian + sherif.elian&issholding.com +34074 + IDP Co.,Ltd. + Kouji Mikami + stdreg&idp-jp.com +34075 + Deltatec - Tecnologia de Sistemas Lda + José Semedo + jsemedo&deltatec.pt +34076 + Datakom Ltd + Metin Hekimoglu + datakom&datakom.com.tr +34077 + BRVZ Bau- Rechen und Verwaltungszentrum GmbH + Norbert Gaberle + it&bauholding.com +34078 + Informtekhnika & Communication, LLC + Andrey A. Ramazov + ramaz&infotek.ru +34079 + Axios Systems + Scott Leckie + scott.leckie&axiossystems.com +34080 + rtCamp Solutions Pvt. Ltd. + Rahul Bansal + rahul&rtcamp.com +34081 + INSYS Microelectronics GmbH + Markus Heider + mheider&insys-tec.de +34082 + CitySync Limited + Nico Bekooy + support&citysync.co.uk +34083 + Erayd LTD + Steven Gilberd + admin&erayd.net +34084 + Universidade Federal de Juiz de Fora + Rodrigo Costa Duarte + rodrigo.duarte&ufjf.edu.br +34085 + AWeber Communications, Inc. + Ryan Steele + ryans&aweber.com +34086 + Talari Networks + John E Dickey + jdickey&talari.com +34087 + Ultra Electronics, Nuclear Sensors & Process Instrumentation + Ernest Cisneros + ernest.cisneros&ultra-nspi.com +34088 + Visioneer Inc + John Dexter + john_dexter&visioneer.com +34089 + Newport Electronics, Inc. + Kei Ho + kho&newportus.com +34090 + TFB Technology Ltd. + Nicola Worthington + nicolaw&tfb.net +34091 + Benchlabs Limited + Will Wright + will&benchlabs.com +34092 + MyMail PLC + Lin Htun + lh&mymail.net.mm +34093 + UAB Profimus + Donatas Cerniauskas + info&profimus.lt +34094 + Fanamoj (JSC) + Morteza Razavi + info&fanamoj.com +34095 + Gymnazium Jaroslava Seiferta + Jiri Bohac + jbohac&jikos.cz +34096 + Trilogical + Asaf Tvito + asaft&trilogical.com +34097 + Racktivity + Niko Vinken + nvinken&racktivity.com +34098 + farbwahl GbR + Stefan Michalsky + stefan.michalsky&farbwahl.de +34099 + Palestine Polytechnic University + Ghannam Aljabari + galjabari&ppu.edu +34100 + Tompkins Cortland Community College + Brandon Wood + woodb&tc3.edu +34101 + Aimetis Corp + Ryan Wightman + ryan.wightman&aimetis.com +34102 + inMeta.Networks + Johannes Heyder + jh&inmeta.net +34103 + BV-Tech S.p.A. (formerly 'Bluestone S.r.l.') + Mirko Innocenti + m.innocenti&bv-tech.it +34104 + Ceton Corp + Alexander Faucher + afaucher&cetoncorp.com +34105 + Local Website Consulting + Tim Drew + tim_drew&hotmail.com +34106 + TESCOMA s.r.o. + Petr Absolon + iana&tescoma.com +34107 + SELEX ES LTD (formerly 'SELEX GALILEO') + Davina Forster + davina.forster&finmeccanica.com +34108 + IAM Solutions GmbH & Co. KG + Michael Raunft + iana.contact&iam-solutions.de +34109 + G&G Fitness Equipment Inc. + Christopher Bennett + cbennett&livefit.com +34110 + Code Blue Corporation + Mick Doran + mdoran&codeblue.com +34111 + Confamiliares de Caldas + Gustavo Adolfo Gonzalez Gutierrez + redes&confamiliares.com +34112 + NetServe365 + Joel Reed + jreed&netserve365.com +34113 + CertifiCall + Florian MAURY + contact&certificall.fr +34114 + Mitchell International + Configuration Management or Stephanie Plein + configuration.management&mitchell.com +34115 + NIW Solutions + Ian West + ian&niw.com.au +34116 + EhangCommucation & Technology Ltd. + Alexander Fox + wuzl&ehangcom.com +34117 + Information System Products Co.,Ltd. + Eitaro Washio + washio&isp21.co.jp +34118 + HL komm Telekommunikations GmbH + Matthias Branzko + it-admin&hlkomm.de +34119 + Max Planck Institute for Human Cognitive and Brain Sciences + Dr. Helmut Hayd + hayd&cbs.mpg.de +34120 + Laketec Communications Inc. + Steve Foutty + sfoutty&laketec.com +34121 + Alias + Patrice Peyrano + support&alias.fr +34122 + Retrix Hosting, Inc. + Patrick Gibson + patrick&retrix.com +34123 + Akitogo Internet and Media Applications GmbH + Gunnar Lieb + admin&akitogo.com +34124 + Digital Measures + Scott Severtson + tech&digitalmeasures.com +34125 + Beyond Semiconductor d.o.o. + Mitja Pufic + mitjap&beyondsemi.com +34126 + Infocon S. A. + Francisco J. Guzman + javier_guzman&ieee.org +34127 + Allgemeiner Deutscher Fahrrad-Club Landesverband Nordrhein-Westfalen e.V. + Jan Bartels + penregistration&adfc-nrw.de +34128 + Fabri + Alexander Stanley + alexanderwstanley&gmail.com +34129 + TUBITAK BILGEM BTE + Bulent CANDAN + bulent.candan&bte.tubitak.gov.tr +34130 + ALCEA + M VO-QUI BRUNO + bvoqui&alcea.fr +34131 + United In Song + Rocky Skinner + admin&unitedinsong.com.au +34132 + First Cash, Inc + Unix Admins + unixadmins&firstcash.com +34133 + Safety Vision, L.P. + Raymond Parfett + rparfett&safetyvision.com +34134 + OpenTTD + Patric Stout + truebrain&openttd.org +34135 + Forever Home Improvement + Gary Gies + gies&google.com +34136 + VeNMSOL Technologies + VINAYAGAM Mariappan + vinayagam_m&hotmail.com +34137 + Athens Wireless Metropolitan Network (AWMN) + Socrates Panoussiou + admin&awmn.net +34138 + CER FRANCE POITOU-CHARENTES + DIDIER dominique + ddidier&86.cerfrance.fr +34139 + Vasista Systems Inc + Sharad Vasista + sharadvasista&hotmail.com +34140 + MAISONNEUVE Informatique + MAISONNEUVE Michel + maisonneuve01&orange.fr +34141 + EXPRESS + John Flores + jflores&express.com +34142 + Bank of Thailand + Ed Yampratoom + edy&bot.or.th +34143 + Netrounds Solutions AB (formerly 'Absilion AB') + Norbert Vegh + norbert.vegh&netrounds.com +34144 + Böhler Edelstahl GmbH & CoKG + Josef Scheiber + josef.scheiber&bohler-edelstahl.at +34145 + Károly Róbert College, Gyöngyös, Hungary + Kalcsó Gábor + mofli&karolyrobert.hu +34146 + Telogic Ltd. + Low Kim Seng + telogicsnmp&gmail.com +34147 + MANGO + Eduard Hernandez Vilanova + eduard.hernandez&mango.com +34148 + PRESENSE Technologies GmbH + Friedrich Delgado Friedrichs + iana&pre-sense.de +34149 + IMATRIX Corp. + Toshiyuki Hamasaki + tech&imatrix.co.jp +34150 + Syndicat IT & Internet + Niels Dettenbach + nd&syndicat.com +34151 + Peppe-Certification + Giuseppe Tumino + peppe.tumino&gmail.com +34152 + esd electronic system design gmbh + Oliver Thimm + oliver.thimm&esd.eu +34153 + Päivölän Kansanopisto + Asko Aulanko + tech&paivola.fi +34154 + Texprezzo Group BV + Ferenc Holzhauser + ferenc.holzhauser&textendo.com +34155 + Tendril Networks + Ben Hoyt + bhoyt&tendrilinc.com +34156 + Orel Analytics + Greg Sterijevski + gsteri1&hotmail.com +34157 + Instituto Nacional de Matemática Pura e Aplicada + Manoel Prazeres + prazeres&impa.br +34158 + Gouvernement de la Polynésie Française + Leonard TAVAE + leonard.tavae&informatique.gov.pf +34159 + Pelican Engineering, Inc. + Larry White + lwhite&pelicaneng.com +34160 + Certichron Inc + Todd Glassey + tglassey&certichron.com +34161 + CUSI(China unicom system integration limited corporation) + Wanchun Ye + yewc1&chinaunicom.cn +34162 + Symplified Inc. + Harout Hedeshian + hhedeshian&symplified.com +34163 + ITMemphis + David Aldridge + david&daldridge.com +34164 + Fubra Limited + Mark Sutton + servers&fubra.com +34165 + Convergence CT, Inc. + Donn Mukensnable + dmuk&convergencect.com +34166 + Stellar Switches, Inc. + Donald Eastlake 3 + donald.eastlake&stellarswitches.com +34167 + Power Admin LLC + Doug Nebeker + support&poweradmin.com +34168 + Richer link co.ltd + Bill Chen + william&richerlink.com +34169 + Northsun.net + Karl Stevens + iana-reg&northsun.net +34170 + TTK S.A.S. + Stefan Balatchev + sbalatchev&ttk.fr +34171 + Wingpath Ltd. + Frank O'Gorman + frank.ogorman&wingpath.co.uk +34172 + Fixme Internet-käyttäjät ry + Santtu Pajukanta + tech&fixme.fi +34173 + PD Software s.r.o. + Pavel Dedík + pavel.dedik&pdsoftware.eu +34174 + FastOnlinePM + W. Greg Youree + wgyouree&gmail.com +34175 + Advanced Navigation & Positioning Corporation + Rob Hoeye + rhoeye&anpc.com +34176 + Hedgehog Oy Ltd + Kalle Rönkkö + kalle.ronkko&hedgehog.fi +34177 + Bond Technical Management + Michal Werner + michal&bondtm.com +34178 + Paragon Electronic Design Limited + Shane Harrison + shane.harrison¶gon.co.nz +34179 + Inland Revenue Department of Sri Lanka + Lal Silva + lal&photolk.com +34180 + Photop Technologies, Inc. + qiaozong lee + qiaozong.li&photoptech.com +34181 + DARSHAN DUDHORIA + DARSHAN DUDHORIA + DARSHAN&DUDHORIA.COM +34182 + Area SX + Massimo Battisti + info&areasx.com +34183 + Michael Karakashian (formerly 'Global Data Logistix (Pty) Ltd') + Michael Karakashian + michael&karakashian.co.za +34184 + Universität Hohenheim + Bjoern Breiner + bjoern.breiner&uni-hohenheim.de +34185 + EBANK ONLINE (BEIJING) TECHNOLOGY Co., LTD + chenzhe + chenzhe&chinabank.com.cn +34186 + TEAMCAST + Sebastien MAHEUT + support&teamcast.com +34187 + WAGNER Group GmbH + Claus-Peter Reinecke + claus-peter.reinecke&wagner.de +34188 + Klocktornet AB + Patrik Lindahl + patrik&klocktornet.com +34189 + Max Planck Institute for the science of light + Benjamin Klier + benjamin.klier&mpl.mpg.de +34190 + SSCANRFK54425 + charles sauerbier + charles.sauerbier&navy.mil +34191 + Human Factors International + Michael Adams + hostmaster&humanfactors.com +34192 + Sikjur + Christoph Kiechle + iana&sikjur.de +34193 + McKinney Identity Management Solutions, LLC + Shawn McKinney + shawn.mckinney&sbcglobal.net +34194 + Jerusalem College of Technology + avraham shir-el + avraham&jct.ac.il +34195 + ZID, Vienna University of Technology + Jan Zabloudil + jan.zabloudil&tuwien.ac.at +34196 + Catchy.net BV + Berend van der Laan + berend&catchy.net +34197 + EXANE + ALIBELLI TONY + tony.alibelli&exane.com +34198 + Pragmatic Source SARL + Farzad FARID + ffarid&pragmatic-source.com +34199 + West Virginia University + Christopher Boyer + ckboyer&mail.wvu.edu +34200 + Planet-Work + Frédéric VANNIÈRE + tech&planet-work.com +34201 + Neuberger Berman + Timothy Warren + tim.warren&nb.com +34202 + Thing in the Corner Enterprises, LLC + Wesley F. Arnold + wesley.arnold&dwwtc.com +34203 + TrustAlert + Arie Kanarie + penholder&trustalert.com +34204 + Netweb Technologies + Hemant Agrawal + hemant&netwebindia.com +34205 + Altrusoft AB + Hans Höök + hans.hook&altrusoft.se +34206 + Penguin Solutions + Florian Tischler + admin&penguin-solutions.at +34207 + tmira solutions + Miguel Angel Nieto Jimenez + manieto&tmira.com +34208 + Telefin S.p.A. + Luigi Ferrarini + ferrarini&telefin.it +34209 + Vamed Management und Service GmbH & CO KG + Thomas Gallistl + domainadmin&vamed.com +34210 + Evangelische Kirche von Kurhessen-Waldeck + Torben Busse + edv.lka&ekkw.de +34211 + Hanil Display + Youngje Lee + yj0000&dreamwiz.com +34212 + Function International + John Hughes + jjhii&functioninternational.com +34213 + ooVoo, LLC + Michael Nordberg + michael.nordberg&oovoo.com +34214 + ACEnet + Craig Squires + csquires&mun.ca +34215 + Acksen Ltd + Richard Phillips + richard.phillips&acksen.com +34216 + Izmir University Of Economics + Aydin MUTLU + aydin.mutlu&ieu.edu.tr +34217 + SKY Network Television Ltd + Murray Knox + mknox&skytv.co.nz +34218 + DEW Industrial Electronics (Pty) Ltd, t/a SOS Industrial Electronics + Ron Edge + info&sos-ind.co.za +34219 + wer-kennt-wen.de + Frederik Padjen + frederik.padjen&wer-kennt-wen.de +34220 + CreAim B.V. + Rogier Pafort + pafort.rogier&creaim.nl +34221 + TELEFRANG AB + ROBERT WIIK + ROBERT.WIIK&TELEFRANG.SE +34222 + SOLTENLAR, S. L. + JUAN CARLOS FERNANDEZ GARACHANA + juancarlos&soltenlar.com +34223 + chaos Studio + Paul Adams + paul&chaos-studio.com +34224 + OpenSpirit S.A. + Pedro Morris + pmorris&openspirit.com.ar +34225 + S30LABS IT ADVANCED SYSTEMS + Fernando Marin + fmarin&s30labs.com +34226 + EION Inc + jaganbabu Rajamanickam + jagan&eion.com +34227 + Software Machine S.r.l. + Andrea Bertolotto + info&softwaremachine.eu +34228 + Radialpoint SafeCare General Partnership + Sebastien Coutu + sebastien.coutu&radialpoint.com +34229 + Codewalker Solutions + Gustavo Neto + gustavo.neto&codewalker.com.br +34230 + GCI Science & Technology Co. ,Ltd. + Xiaojun Guo + guoxiaojun&chinagci.com +34231 + Open Cobalt Project + John Dougan + jdougan&acm.org +34232 + Lucian-Alexandru Stanescu + Lucian-Alexandru Stanescu + luci&cnix.ro +34233 + Kruger + Fábio Ricardo Krüger Sartori + fabio&krugerconsultoria.com.br +34234 + Land Srl + Claudio Dosio + dosio&land.it +34235 + Cork IT + Alberto Reis + contato&corkit.com.br +34236 + Pixie + Kiall Mac Innes + info&pix.ie +34237 + AssureTec Systems, Inc. + Erich Weihrauch + IT&AssureTec.com +34238 + NewsWatch, Inc. + Masahiro Kajiura + penadmin&newswatch.co.jp +34239 + Innovating Distributed Systems S.L. + Miguel Angel Jimenez + info&idsystems.es +34240 + DARS d.d. + Matjaz Vidic + matjaz.vidic&dars.si +34241 + Competella AB + Lennart Bauer + lennart.bauer&competella.com +34242 + PerspecSys Inc. + Terry Woloszyn + admin&perspecsys.com +34243 + Rob Sims + Rob Sims + iana-z&robsims.com +34244 + Media Capital - Editora Multimedia S.A. + Bruno Furtado + bmfurtado&mediacapital.pt +34245 + Circle Software Group B.V. + Marc Bremmers + Bremmers&circlesoftware.nl +34246 + Hacktor + Ruben de Groot + ruben&hacktor.com +34247 + Stationtostation + Ruben de Groot + ruben&stationtostation.nl +34248 + Cykor Systems Ltd + Nick Tsirides + nick&cykorsystems.com +34249 + Triada d.o.o. + Jure Ferbezar + jure.ferbezar&triada.si +34250 + Arkadin + Vincent THINSELIN + v.thinselin&arkadin.com +34251 + FREESTONE INTERNATIONAL GROUP LIMITED + FRANK ZHANG + frank&freestone.hk +34252 + XPS ELETRÔNICA LTDA + Antonio Henrique Pires de Sant'Anna + antonio&xps.com.br +34253 + VIAFIRMA, S.L. + Benito Galan + bgalan&viafirma.com +34254 + Savannah Networks LLC + Scott Cudney + snmp&savannah-networks.com +34255 + Corner Bowl Software Corporation + Michael Janulaitis + michael&cornerbowl.com +34256 + HUMICOM co,. ltd + Jang, Tae Seok + tsjang&humicom.co.kr +34257 + Defense Systems Group, NTT DATA Corporation + Junsuke Suzuki + suzukijns&nttdata.co.jp +34258 + Otip Office + Satoru Otsubo + hatt3&otip.jp +34259 + The State Higher School of Vocational Education in Elblag + Adam Zaleski + admin&pwsz.elblag.pl +34260 + Apttsoft + Saravana Kumar.V + saravanakumaar.v&gmail.com +34261 + MeVis Medical Solutions AG + Sebastian Meyer + sebastian.meyer&mevis.de +34262 + Joinville Eau Vive + Vincent Behar + info&jevck.com +34263 + picoChip Designs Ltd + Spenser Poultney + operations&picochip.com +34264 + I.C.E. Logic Ltd. + Dmitry A. Khromov + info&icelogic.net +34265 + OMNITECH SECURITY + Pierre Gélard + pgelard&omnitech.fr +34266 + Unochapeco + Sergio Wanderley + stw&unochapeco.edu.br +34267 + Swedbank AB + Pavel Balashov + Pavel.Balashov&swedbank.ee +34268 + Martin Luther College + Aaron Spike + spikeac&mlc-wels.edu +34269 + SLA Corporation + Eric Collins + eric.collins&slacorp.com +34270 + Bamag GmbH + Rene Streuber + rene.streuber&bamag.net +34271 + Geomidi Informatica Ltda. + Marcos Ambrosio + ambrosio&geomidi.com.br +34272 + JSC NCC + Glushko Artyom + aglushko&ncc-volga.ru +34273 + TMNS B.V. + Walter Snel + wsnel&tmns.com +34274 + Sipwise GmbH + Andreas Granig + agranig&sipwise.com +34275 + Boyne Resorts + Ryan Kneeshaw + rkneeshaw&boyneresorts.com +34276 + Signaturgruppen A/S + Niels Frimodt Sørensen + niels&signaturgruppen.dk +34277 + Jens Meißner + Jens Meißner + heptalium&gmx.de +34278 + Janitza electronics GmbH + Marec Andreessen + pen-contact&janitza.de +34279 + JoutCon Oy + Antti Lautamies + antti.lautamies&joutcon.fi +34280 + Metva + Enno Davids + enno06&metva.com +34281 + Taiwan Dichen Co, Ltd. + YANG WAN SHAN + hackerkevin&gmail.com +34282 + MEON-yamaguchi + Kenichiro Miyoshi + support&stellar.meon.ne.jp +34283 + CA-Jinchen Software Co.,Ltd + Zhou Li + zhou.li&ca-jc.com +34284 + Elisa Oyj + Matti Swan + matti.swan&elisa.fi +34285 + S.C. DigiSign S.A. + Iosif Szavuj + admin&digisign.ro +34286 + Feusi Bildungszentrum AG + Heinz Dubach + informatik&feusi.ch +34287 + Raiffeisen Bank Kosovo JSC + Bujar Ibrahimi + bujar.ibrahimi&raiffeisen-kosovo.com +34288 + Centralna banka Crne Gore + Božo Cvetkovski + bozo.cvetkovski&cb-cg.org +34289 + IRE Rayos X S.A. + Jose Maria VENEROSO BENITEZ + jmvene&irerayosx.com +34290 + RS2i + Fahrid Djebbari + fdjebbari&rs2i.fr +34291 + xrow GmbH + Björn Dieding + service&xrow.de +34292 + Spring Street Storage + gabriel wolney + mgrwolney&yahoo.com +34293 + Coyote Creek + Dave Theodore + dtheodore&coyotecrk.com +34294 + C-Cron GmbH + Markus Heinze + markus.heinze&c-cron.de +34295 + Dalian Gigatec Electronics Co.,Ltd + Dalian guokun + guo_kun1977&126.com +34296 + Dalian Maritime University + Wang Renda + renda&newmail.dlmu.edu.cn +34297 + Mobistar sa/nv + Philippe RASSEL + philippe.rassel&mail.mobistar.be +34298 + ConteXtream + Harold Lowenthal + harold&contextream.com +34299 + Datavalet Technologies Inc. + RnD Department + it&datavalet.com +34300 + Croix Rouge Francaise + Manson Thomas + mt&mansonthomas.com +34301 + NEON, Inc. + Leo Bredehoft + lbredehoft&neoninc.org +34302 + NetIntegrate Solutions Inc + Chris Roylance + chris&netintegrate.ca +34303 + Skyline Network Engineering + Michael Branan + snmp&skylinenet.net +34304 + Feynman Group, Inc. + Mike Wilson + administrator&feynman.net +34305 + Developing Solutions, Inc. + John Minnis + iana&developingsolutions.com +34306 + Canodus (formerly 'Decre') + Wout Decré + wout&canodus.be +34307 + Deckland + Martin Ried + MartinRied&gmx.net +34308 + Ford & Mason Ltd + Andrew Ford + A.Ford&ford-mason.co.uk +34309 + Archimedes Solutions GmbH + Marco Fagiolini + mfx&archimedes-solutions.de +34310 + UBINETSYS.CO., LTD. + Jinju, Lee + ceo&ubinetsys.com +34311 + Data Net Solutions Group, Inc. + Mark Greco + mgreco&4datanet.com +34312 + Accanto Systems S.r.l. + Silvani Lucia + lucia.silvani&accantosystems.com +34313 + iDMS - UMANN + COUROUBLE Bertrand + b.courouble&umanngroup.com +34314 + Datanomic Ltd. + Richard Evans + richard.evans&datanomic.com +34315 + Broadband United GmbH + Georg Herrmann + info&broadbandunited.de +34316 + PDSG + Igor Klacansky + iklacansky&pdsg.com +34317 + Fabric Technologies + James Waite + james.waite&fabrictechnologies.com +34318 + iEARN-USA + Jeffrey Spiro + jeffrey&us.iearn.org +34319 + Inveneo Inc. + Mark Summer + it&inveneo.org +34320 + Octo Wireless LLC + Michel Stam + mstam&octo-wireless.com +34321 + ICP DAS Co., Ltd + LiLing Huang + liling_huang&icpdas.com +34322 + Hoojima Ltd + Ofir Mano + ofir&xmousse.com +34323 + Zolfo Cooper + Faruk Patel + fpatel&zolfocooper.eu +34324 + Ministry of Public Administration of Montenegro + Dusan Polovic + dusan.polovic&mju.gov.me +34325 + Investec + Herman Young + hyoung&investec.co.za +34326 + Nokia + Petri Piira + petri.piira&nokia.com +34327 + Farmacia Valentini + Pablo Juan Valentini + pablo_valentini&hotmail.com +34328 + Tmcpcorp + Matthew Heck + customerservice&tmcpcorp.com +34329 + DOCOMO interTouch + Roseller Armirola + roseller.armirola&docomointertouch.com +34330 + Webyog Softworks Pvt Ltd + Sayan Chaliha + Sayan.Chaliha&webyog.com +34331 + Vilant Systems Oy + Anssi Kolehmainen + anssi.kolehmainen&vilant.com +34332 + Forex Club International Ltd + Aleksandr Ivanov + ivanov-av&fxclub.org +34333 + Streamit BV + Johan van der Stoel + johan&streamit.eu +34334 + Psylock GmbH + Stefan Al. Brandl + stefan.brandl&psylock.com +34335 + Javahar Private Limited + Javahar Ragothaman + javahar&gmail.com +34336 + Shenzhen Meikai Electronics Co., Ltd. + Fred Xiao + fred.xiao&meikai-cn.com +34337 + Batracomiomachia + Emilio Anzon + emilio&anzon.it +34338 + Bredbandstelefoni i Sverige AB + Olle E Johansson + noc&bbtele.net +34339 + Asesoría CTDATA + Luis Jiménez + ljjimenez&gmail.com +34340 + hoip.org + David Pérez + david&hoip.org +34341 + Globalinx + Thomas Powers + thomas.powers&globalinx.com +34342 + Prime Creation Technology Limited + Michael Tso + michael.tso&primecreation.com +34343 + Saint-Petersburg Industrial Joint-Stock Bank + Valeriy Kobak + vkobak&siab.ru +34344 + PNI Digital Media Inc. + Adam Cheal + operations&pnimedia.com +34345 + Bloodhound Technologies Inc + David Whipple + dwhipple&bloodhoundinc.com +34346 + Astral Media Radio inc. + Charle Demers + charle.demers&gmail.com +34347 + ATX Innovation, Inc. + David Lemley + dave&atxinnovation.com +34348 + AKC Solutions + Maurice Kamgaing + info&akcsolutions.com +34349 + miraclesoft beijing co.ltd + ji xiang yang + jixiangyang&miraclesoft.com.cn +34350 + Bingley Grammar School + John Ralph + john.ralph&bingleygr.ngfl.ac.uk +34351 + iseemedia Inc. + Ivan Yuan + ivany&iseemedia.com +34352 + Kamailio.ORG + Daniel-Constantin Mierla + daniel&kamailio.org +34353 + ClairMail + Ashwin Pejavar + ashwin.pejavar&clairmail.com +34354 + CGS, LLC + Terry Rossio + tcr&cgstogo.com +34355 + sol4.net + David Watson + pen&sol4.net +34356 + Inepex Ltd. + Gabor Buza + gabor.buza&inepex.com +34357 + Plaid Tie Software + Brian Bowling + brian&plaidtiesoftware.com +34358 + Servicios técnico comerciales en Internet S.C.P + Francisco Monteagudo + monti&servitec.net +34359 + Nucleus Connect Pte Ltd + Lim Hong Chuan + hclim&nucleusconnect.com +34360 + Silmor.de Projects + Konrad Rosenbaum + konrad&silmor.de +34361 + Matrixware Information Services GmbH + Oliver Falk + oliver&linux-kernel.at +34362 + FH Aachen + Jörn Tomczak + tomczak&fh-aachen.de +34363 + Illallangi Enterprises + Andrew Cole + andrew.cole&illallangi.com +34364 + geinitz.info + Andreas Geinitz + andreas&geinitz.info +34365 + Beijing Eastcat Information Technology Company + Liwenhao + vovon&sohu.com +34366 + Erwin Peters Systemtechnik GmbH + Dieter Gleske + d.gleske&epsystem.de +34367 + Fire Check Consultants Pty Ltd + Laurie Odgers + l.odgers&firecheck.com.au +34368 + General Dynamics Mission Systems–Australia + Matthew Jolly + sysadmin&gd-ms.com.au +34369 + NanoSoft AG + Peter Stadler + support&nanosoft.ch +34370 + Papier- und Kartonfabrik Varel GmbH & Co KG + Thomas Bonsack + t.bonsack&pkvarel.de +34371 + A1 Systems, LLC + Sergey S. Sergeev + it&a1-systems.com +34372 + Tubedale Limited + Gerrard Whitmore + ged.whitmore&tubedale.co.uk +34373 + ESCAUX + Wim Livens + iana-pen&escaux.com +34374 + Paki Shells Internet Services + Mohsin Rizwan + mohsin&pakishells.net +34375 + Mevio + Dan Benson + ops&mevio.com +34376 + Automática y Regulación S.A. + Rubén Lepe Morales. + rlepe&auter.cl +34377 + Locaweb Ltda + Gilberto Mautner + email-tec&locaweb.com.br +34378 + q1cc.net + Markus Dangl + sky&q1cc.net +34379 + Cheers Co.,Ltd + Takashi Yoshida + info&cheersnet.jp +34380 + Puppet Labs, Inc. (formerly 'Reductive Labs, Inc.') + Luke Kanies + luke&puppetlabs.com +34381 + Inixa - Security & Communication + Julio Rilo Blanco + jrilo&inixa.com +34382 + Bigdogs Bros & Co. + Massimo Liggio + snmp.master&gmail.com +34383 + INEX/ZAMIR + David Goldstein + davidg&inexzamir.com +34384 + Sipoon kunta + Viktor Eskman + iana-pen&sipoo.fi +34385 + Hinge Software Co.,Ltd. + QingDa Yu + qingda.yu&hingesoftware.com +34386 + Intelligent Mechatronic Systems Inc. + David Campbell + dcampbell&intellimec.com +34387 + Component Engineering, Inc + Larry DeSoto + ldesoto&componentengineering.com +34388 + O'Reilly Automotive, Inc. + Dustin McIlvain + dmcilvain&oreillyauto.com +34389 + Geofront + Mike Woods + mike&geofront.co.uk +34390 + BlackGinger + Deon Borman + infotech&blackginger.tv +34391 + Owens Corning + Andy Michaelson + dn-admin&owenscorning.com +34392 + 3 Rivers Communications + Tim Hodges + tim.hodges&3rivers.coop +34393 + blau Mobilfunk GmbH + Tobias Galitzien + tg&blau.de +34394 + Whiplashcentret + Morten Nielsen + webmaster&whiplashcentret.org +34395 + OMESH Networks + Michael Lee + michael.lee&omeshnet.com +34396 + Metropolitan Government of Nashville & Davidson County + Joseph Roselli + Joseph.Roselli&nashville.gov +34397 + Cloud Scope Technologies, Inc. + Masaru Sugawara + system-admin&cloud-scope.com +34398 + LANCARD.COM inc, + Hiroki MINEMATSU + info&lancard.com +34399 + coravy. Ltd. + Stefan Saasen + admin&coravy.com +34400 + Research electronics AB + Sven-Åke Eriksson + sae&researchelectronics.se +34401 + Studio Moderna d.o.o. + Tomaz Grad + tomaz.grad&studio-moderna.com +34402 + Siskiyou County + Eric Silfies + eric.silfies&co.sisqjustice.ca.us +34403 + unseregedanken.de + Jan Dennis Bungart + info&unseregedanken.de +34404 + skyenet + Laura Eckardt + iana&comsound.de +34405 + City of Cape Town + Alan Moon + Alan.Moon&capetown.gov.za +34406 + Generals Network, OZ + Marek Bečka + yuen&klacno.sk +34407 + nexus AG + Gerrit Raith + gerrit.raith&nexus-ag.de +34408 + WANET s.r.o. + Bruno Redl + wario&wanet.cz +34409 + Bebidas Gaseosas del Noroeste S.A. + Alberto Gallego Patino + alberto.gallego&begano.es +34410 + Foss-on-line + Sergej Mamontov + mmm&foss.kharkov.ua +34411 + Playtech Estonia OÜ + Valdo Kanemägi + infra.system&playtech.com +34412 + Mutina Technology S.r.l. + Stefano Torri + storri&mutinatechnology.com +34413 + Global Substation Solutions Inc. + Milos Prodanovic + milos.prodanovic&gss.eu.com +34414 + Beijing Galaxy Golden Star Technology & Development Ltd. + Li Weijun + tianyino&gmail.com +34415 + Superna Analytics Inc. + Keith MacEwen + keith.macewen&supernaanalytics.com +34416 + Ottawa Area Intermediate School District + Patrick Donlin + pdonlin&oaisd.org +34417 + eGestalt Technologies Pvt. Ltd. + Sanjay Debnath + sanjay.debnath&egestalt.com +34418 + Santa Clara University + Todd Schmitzer + tschmitzer&scu.edu +34419 + Regis University + Erich Delcamp + oidadmin®is.edu +34420 + Hochschule RheinMain - University of Applied Sciences + Ralph Weichert + oid-itc&hs-rm.de +34421 + Benoit Eletrodomesticos LTDA + Mauro Delazeri + mauro.delazeri&benoit.com.br +34422 + Kingley Health + Alexander Komarov + alias.iana&kingleyhealth.com +34423 + Jamtlands County Council - Sweden + Mikael Johnson + mikael.johnson&jll.se +34424 + Lansen Technology + Martin Hassel + martin.hassel&lansentechnology.com +34425 + Archant Limited + Peter Bailey + Peter.Bailey&archant.co.uk +34426 + Wind Telecomunicazioni S.p.a + Luca Ferrandino + luca.ferrandino&mail.wind.it +34427 + Czeh Consultants + Tony Czeh + tony&czeh.us +34428 + UAB Lema + Vidmantas Balcytis + vidma&lema.lt +34429 + Less Rain GmbH + Marcus Pauli + marcus&lessrain.com +34430 + www.fiber-space.de + Kay Schluehr + kay&fiber-space.de +34431 + ForceFive AG + Stefan Bicanic + stefan.bicanic&forcefive.de +34432 + Lescom AG + Schneider Pascal + pascal.schneider&lescom.ch +34433 + UniControls a.s. + Petr Cvachouček + cvachoucek&unicontrols.cz +34434 + HongTEC (BeiJing) Co., Ltd + zhu yuechen + zhuyuechen&hong-tec.com +34435 + North Loop Networks + Mike Corneille + mcorneille&northloopnetworks.com +34436 + TS-Associates plc + Steve Rodgers + steve&ts-a.com +34437 + Consequor Consulting AG + Frank Goenninger + frank.goenninger&consequor.de +34438 + DOHMEN, HERZOG & Partner GmbH + Frank Herzog + frank.herzog&dhp-gmbh.de +34439 + West Virginia State University + Eugene Stowers + estowers&wvstateu.edu +34440 + Velocix Limited + David Ferguson + mibs&velocix.com +34441 + NTT Basic Research Laboratories + Hirofumi Ueshima + ueshima&will.brl.ntt.co.jp +34442 + e-utile S.p.A. + Dario Dainelli + dario.dainelli&e-utile.it +34443 + Narinet Inc. + Wonkyu Park + wkpark&narinet.com +34444 + STS Studi Tecnologie Sistemi S.r.l. + Flavio Poletti + flavio.poletti&stsconsulting.it +34445 + Capgemini Nederland B.V. ( Netherlands ) + Renato Bianchessi + renato.bianchessi&capgemini.com +34446 + Hasenohr Consulting + Paul Hasenohr + consulting&hasenohr.eu +34447 + Beijing Huoohuoo Technology Co., Ltd + yadong shi + huoohuoo&gmail.com +34448 + G-Bits Network Technology Co., Ltd. + Chao Xu + xuc&g-bits.com +34449 + Laguna Ventures, Inc. + Sally Smith + ssmith&lvi-usa.com +34450 + JCNetwork e.V. + Rami Baccar + rami.baccar&jcnetwork.de +34451 + OS3 srl + Fabio Rotondo + fabio.rotondo&os3.it +34452 + EDV-Service Oliver Strucken + Oliver Strucken + post&strucken.net +34453 + OPT Tahiti + Christopher LOUIS + christopher.louis&opt.pf +34454 + VIS Visual Information Systems Limited + John Bengston + john&vis-ranger.com +34455 + Primex Family of Companies + Mark Meyer + mmeyer&primexinc.com +34456 + Dashwire, Inc. + Garrett Honeycutt + noc&dashwire.com +34457 + Rex Wheeler + Rex Wheeler + rex&fuzzytiger.com +34458 + K.S.Transplaneta Ltd + Karsten Siebert + Karsten.Siebert&transplaneta.com +34459 + Network Box Corporation Ltd + Mark Webb-Johnson + mark.johnson&network-box.com +34460 + xplo.re IT Services + Michael Maier + michael.maier&xplo-re.org +34461 + Ithaxis + Patrick Marteau + p.marteau&ithaxis.fr +34462 + init AG + Martin Hase + mhase&init-ka.de +34463 + Corey Farrell + Corey Farrell + iana-reg&coreyfarrell.com +34464 + Pragma Securities + David Mechner + sysad&pragmafs.com +34465 + CyPace, Inc. + Jeremy Murtishaw + jeremy&cypace.com +34466 + polyformal System- & Formarbeit + Stefan Pampel + stefan.pampel&polyformal.de +34467 + REPOTEC CO., LTD. + Jeffrey Hsu + jeffrey&repotec.com +34468 + OSSTech Corporation + SATOH Fumiyasu + info&osstech.co.jp +34469 + LOGIT CORPORATION + takeshi yoshida + yoshida&logit.co.jp +34470 + Institute of Cybernetics at Tallinn University of Technology + Riina Maigre + riina&ioc.ee +34471 + Trust-iD BV + Marcel A. Wendt + mwendt&trust-id.nl +34472 + Hogeschool van Amsterdam + Maurik van den Haak + m.van.den.haak&hva.nl +34473 + DOKOM Gesellschaft für Telekommunikation mbH + Stefan Skuballa + requests&dokom21.de +34474 + ALT-LAN + Maxim Tabolkin + info&alt-lan.ru +34475 + Polytronics Engineering Ltd. + Eric Roman + eric&polytronicseng.com +34476 + BIA-Net GmbH + Heiko Meyn + mail&bia-net.de +34477 + K & V spol. s r.o. + Lubos Stanek + stanek.l&kavdane.cz +34478 + CMS Hasche Sigle + Thoralf Tietboehl + thoralf.tietboehl&cms-hs.com +34479 + DWEB SOLUÇÕES PARA INTERNET + AMADOR GONÇALVES + suporte&dwebhosting.net +34480 + iModules + Todd Decker + tdecker&imodules.com +34481 + World Fuel Services + Julio Pereira + gwadmin&wfscorp.com +34482 + Lambdazero IT + Stefano Fiorini + stefano.fiorini&lambdazero.it +34483 + Himalaya networks + Magesh Pat + pat&himalayanetworks.com +34484 + Toolio Systems + Al Wheeler + toolio&toolio.net +34485 + R2Meton AB + Per Lindström + Per.Lindstrom&r2m.se +34486 + Hanno.dk + Jesper Hanno Hansen + jesper&hanno.dk +34487 + Kemi Shipping Oy + Jukka Ponkala + jukka.ponkala&kemishipping.fi +34488 + Kryptiq Corporation + Matthew R. Wilson + mwilson&kryptiq.com +34489 + Nikon AG + Raimund Geiger + raimund.geiger&nikon.ch +34490 + B3Networks + Ong Kok Choong + okchoong&b3networks.com +34491 + Almaz-Antey Telecom + Dmitry Grishin + charit13&gmail.com +34492 + Aryaka Networks + Steve Jankowski + aryaka-IANA&aryaka.com +34493 + CCMIB CCB + Michael Haenchen + AFLCMC.HNCCM.CCMIB&us.af.mil +34494 + HSQ Technology + Jason Spence + jspence&hsq.com +34495 + Streetcar Ltd + Richard Smith + richard.smith&streetcar.co.uk +34496 + Cision US Inc. + Brian Crocker + oid-admin.us&cision.com +34497 + LinSec Consulting + Shane Machon + shane&linsec.com.au +34498 + IRBIS-T + Gennady Gromov + gromov&shtyl.ru +34499 + Martin Merkel + Martin Merkel + martin-merkel&web.de +34500 + LAOLA1 Multimedia GmbH + Guido Rudisch + guido.rudisch&laola1.at +34501 + S. C. Null Team Impex SRL. + Diana Cionoiu + diana&null.ro +34502 + Millikin University + Craig Janssen + infotech&millikin.edu +34503 + WMI Computer + Birger Brunswiek + mib-admin&wmicomputer.de +34504 + GuangZhou Comble Information Technology Co.,Ltd. + Peter Zhou + zhou&cbetek.com +34505 + SIEGNETZ-IT GmbH + Ralf Zimmermann + technik&siegnetz.de +34506 + Twinfalls Technologies + Eric Bond + eric.bond&telus.net +34507 + Safe Consulting AS + Øystein Tusvik + oidadmin&safeinnovations.com +34508 + Wyatt Tarrant & Combs LLP + Rick Humphrey + rhumphrey&wyattfirm.com +34509 + Portland State University + Jim Stapleton + hostmaster&pdx.edu +34510 + Optimum Path Systems, Inc + Steven Webel + steven&visualdatacenter.com +34511 + Trinity County Office of Education + Robert Jackson + ctech&tcoek12.org +34512 + graphische Informationstechnik Beratungsgesellschaft mbH (grit) + Stephan Reichhelm + iana.contact&grit.de +34513 + RMT INC + John Saley + John.Saley&rmtinc.com +34514 + Australian College of Remote and Rural Medicine + David Warring + david.warring&gmail.com +34515 + Whitemice Consulting + Adam Tauno Williams + awilliam&whitemiceconsulting.com +34516 + Network Applied Communication Laboratory + Hiroshi Inoue + support&netlab.jp +34517 + Eastcom Systems Pte Ltd + Charles Madapa + charles&eastcom-systems.com +34518 + Movianto GmbH + Michael Rocktäschel + michael.rocktaeschel&movianto.com +34519 + UK Border Agency + Vincent Blake + vincent.blake&trustedborders.co.uk +34520 + Mudra Communications Pvt. Ltd. + Chirag A. Gandhi + it&mudra.com +34521 + Comlink Telecommunication Services Inc + Arshad Patel + awp&comlinkinc.com +34522 + Pinnacle Wireless + Doug Ehlers + dehlers&idsmindshare.com +34523 + BlueKrypt + Damien Giry + damien.giry&bluekrypt.com +34524 + RIS2048, Lda + Pedro Leite + pedro.leite&ris2048.pt +34525 + Yesin.NET + Erol Yesin + erol&yesin.net +34526 + Techsologic + Michael Stapleton + michael.stapleton&techsologic.com +34527 + Matrix Telecom Pvt. Ltd. + Rakesh Rathod + rakesh.rathod&matrixtelesol.com +34528 + SecureWare + Vassilis Poursalidis + poursal&csd.uoc.gr +34529 + Iuniperus S.r.l. + Filippo Solinas + netmgm&iuniperus.it +34530 + Netpioneer GmbH + Ulrich Hochholdinger + admins&netpioneer.de +34531 + landofnever.net + Dario Turchetti + support&landofnever.net +34532 + mkbdoos + L.B. Sparreboom + luuk.sparreboom&mkbdoos.nl +34533 + Compuvisor Lab + Vadim Kataev + vm&compuvisor.net +34534 + Trimble MRM + Erol Yesin + erol_yesin&trimble.com +34535 + JetTek LLC + Gregory Orsini + greg.orsini&jettekfix.com +34536 + fdXtended + Anton Van Cauteren + anton&fdxtended.com +34537 + The Moses H. Cone Memorial Hospital + Dee Davis + webadministrator&mosescone.com +34538 + s.a. D'Ieteren n.v. + Fabrice Coppens + servercorp&dieteren.be +34539 + Netcom XXI Informatica y Comunicaciones, S.L. + Emilio Hernandez + eh&netcom-xxi.com +34540 + azeti Networks GmbH + Uwe Holz + uwe.holz&azeti.net +34541 + Bematech S.A. + Joao Cadamuro Junior + joao.cadamuro&bematech.com.br +34542 + kindwind technological ltd. + huang zufeng + zf.huang&kindwind.com.cn +34543 + Susquehanna International Group + John Cardenas + DeptTech-CoreInfrastructure&sig.com +34544 + Certant + Juan Heguiabehere + idm&certant.com +34545 + Cistera Networks + Ramana Guntor + rguntor&cistera.com +34546 + RedZinc Ltd + Vincent Caffrey + Vincent&Redzinc.net +34547 + Glory Global Solutions (formerly 'Talaris') + James William Long + james.long&uk.glory-global.com +34548 + Badischer Landwirtschaftlicher Hauptverband e.V. + Jürgen Liebs + Juergen.Liebs&BLHV.DE +34549 + University of Saskatchewan + Ken Glover + ken.glover&usask.ca +34550 + Austin Hughes Electronics Ltd. + Ed Chan + ed.chan&austin-hughes.com +34551 + Zettacon Systems Inc. + Malik Mazhar + mazhar&zettacon.com +34552 + IP SQUARE Softwareerzeuger GmbH + Ronan Kelly + admin&ipsquare.at +34553 + Spryware + Kenneth Felix + Ken&spryware.com +34554 + GrandPower + Sunbin + sunbin&grandpower.com.cn +34555 + Eurasian energy corporation JSC + Alexey Kramnyuk + akramnyuk&eec.enrc.com +34556 + EDAN INSTRUMENTS CO., LTD. + Wenqiang Peng + pwqlwl&gmail.com +34557 + Ministerio de Justicia + Angelines Turón + seguridad_calidad&lista.mju.es +34558 + Unassigned + ---none--- + ---none--- +34559 + Maschinenfabrik Reinhausen GmbH + Gerhard Reichl + g.reichl&reinhausen.com +34560 + Marquise Technologies + Gregory Medwed + support&marquise-tech.com +34561 + VITALATIV, s.r.o. + Vitaly Tsaregorodtsev + iana&vitalativ.com +34562 + FUHST MEHRENS + Jonas Fuhst + h.mehrens&fuhst.com +34563 + Calame + Philippe Rochat + web100&sikatizen.com +34564 + Hoseo telecom Co., Ltd. + Lee min ki + qhfleagle&hoseotel.co.kr +34565 + KVARTA SOFT EOOD + Todor Manev + tmanev&kvarta.net +34566 + Tradar Limited + Tristan Clow + tristan.clow&tradar.com +34567 + Micronetics, Inc. + Thomas Rizzo + trizzoµnetics.com +34568 + Synectics + Andy Smith + snmp&synx.com +34569 + KB "Uniastrumbank" + Mixaylin Aleksey + mixaylin&uniastrum.com +34570 + Netdeep Tecnologia + Marcos Paulo Serafim + mpserafim&netdeep.com.br +34571 + SIGNION SYSTEMS + T. PRAVEEN KUMAR + praveen&signion.com +34572 + NARI-RELAYS Electric Co., Ltd. + Liu Minghui + liumh&nari-relays.com +34573 + Finaf S.p.A + Claudio Rocchi + c.rocchi&angelini.it +34574 + Blackbit Studio + Andrea Gronchi + info&blackbit.it +34575 + Kent Imaging Inc. + Matt Cervi + matt&kentimaging.com +34576 + HS Emden/Leer + Ingo Herz + herz&technik-emden.de +34577 + Correlation Systems Ltd + Erel Rosenberg + erel&cs.co.il +34578 + OpenNMS Sans Effort (OSE) + Samuel Mutel + opennmsse&free.fr +34579 + SYM Technology, Inc. + Hyunwoo Lee + jason.lee&symtechnology.com +34580 + Need Solutions + Thierry LUCON + thierry.lucon&needsolutions.fr +34581 + J3TEL + Aymeric Tiburce + qualit&j3tel.fr +34582 + Tuenti Technologies + Matías Surdi + msurdi&tuenti.com +34583 + V-Private AG + Stephan Paschke + stephan.paschke&v-private.net +34584 + unterberg-electronic Gmbh + Thomas Unterberg + thomas&unterberg-electronic.de +34585 + REWE-Informations-Systeme GmbH + Andreas Thimm + andreas.thimm&rewe-group.com +34586 + Ontario Teachers Pension Plan Board + Brett McClellan + brett_mcclellan&otpp.com +34587 + Hiive Systems + Glenn Fowler + sysadmin&hiivesystems.com +34588 + Beijing office of International Air Transport Association + Fan Aihua + fanah&iata.org +34589 + Adnovate + Joris Jansen + jjansen&adnovate.com +34590 + Novus Produtos Eletronicos Ltda + Sandro Rafael dos Santos + sandro.santos&novusautomation.com +34591 + JSC Proryv-Komplekt + Maksim Yanin + YaninMA&Proryv-komplekt.ru +34592 + Shenzhen C-Data Technology Co.,Ltd. + Alan Cui + yunliang_cui&cdatatec.com +34593 + Zhilabs + Julio César Arpírez Vega + jarpirez&zhilabs.com +34594 + Growing Opportunity Finance India Private Limited + Immanuel Jeyaraj + ijeyaraj&gopportunity.net +34595 + Xteam Network(Beijing) Co.,Ltd + zhaohai + zhaohai&18mail.cn +34596 + Casalogic A/S + Troels Hansen + th&casalogic.dk +34597 + CADT Software and Drafting, S.L. + Pascual Castellon Gadea + p.castellon&cadt.com +34598 + Cable Sense Ltd + Phil Garner + phil.garner&cablesense.net +34599 + 3SI Security Systems NV + Bart Saerens + bart_saerens&3sisecurity.com +34600 + TECSEM Tecnologia em Sistemas Embarcados + Tiago Dall'Agnol + tiago&tecsem.com.br +34601 + MongoDB + Eliot Horowitz + eliot&mongodb.org +34602 + Bomgar Corporation + Tal Guest + ProdInfo&bomgar.com +34603 + Seria AS + Jon-Eirik Pettersen + joneirik&seria.no +34604 + Fibernetics Corp + Yvon Bertrand + role.iana.pen&fibernetics.ca +34605 + SL Corporation + Michael Kairys + michael&sl.com +34606 + Wayne RESA + Jim Rarus + rarusj&resa.net +34607 + Newdaysoft Co., Ltd + Nam-Hyun , Kim + nhkim&newdaysoft.co.kr +34608 + American Society for Microbiology + Edward Kurowski + ekurowski&asmusa.org +34609 + Vine Telecom Co., Ltd + Young Hur + young&vinetel.co.kr +34610 + Grand Future Beijing Info-Tech Co., Ltd. + Sun Ligang + slg&valuex.cn +34611 + Berufsförderungswerk Oberhausen + Michael Detambel + detambel&bfw-oberhausen.de +34612 + Alteris Sp. z o.o. + Robert Frączek + robert.fraczek&alteris.pl +34613 + iKron Ltd. + Peter Timar + sysadm&ikron.hu +34614 + Technocats GmbH + John Ainhirn-Williams + john.williams&technocats.org +34615 + Magyar Államkincstár / Hungarian State Treasury + Izsa Jeno + oid.admin&allamkincstar.gov.hu +34616 + ActualWeb Soluciones Informaticas + Rafael Forcada + info&actualweb.es +34617 + RCDevs + Charly Rohart + charly.rohart&rcdevs.com +34618 + Gruppo TNT S.r.l. + Giuseppe Bonariva + beppe&gruppotnt.com +34619 + 3Dicom, S.L. + Rafael Forcada + info&3dicom.es +34620 + Asuerus van Tuijl + Asuerus van Tuijl + asuerus&asuerus.nl +34621 + imacab + Bouchrahi abdelhaq + abdelhaqb&imacab.ma +34622 + Heimore Group AB + Daniel Roig + daniel.roig&heimore.com +34623 + USF1 + Frank Cusack + frank.cusack&usf1.com +34624 + Tokavuh Technologies oy + Sami Kuhmonen + sami&tokavuh.com +34625 + Actavis Group hf + Leslie Potts + lpotts&actavis.com +34626 + A-Real Consulting LLC + Alexey Guskov + lexag&mail.ru +34627 + Digifix Ltda + Christian Perez + c.perez&digifix.com.co +34628 + Citrus Networks + Noel Kelly + nkelly&citrusnetworks.net +34629 + MP Advanced Multimedia + Ignacio Alles + ia&mpadvanced.com +34630 + Tigron BVBA + Gerry Demaret + info&tigron.be +34631 + Janelia Farm Research Campus/HHMI + Eric Trautman + trautmane&janelia.hhmi.org +34632 + Cohort Technology Ltd + Keith Gilbert + technical&cohorttechnology.com +34633 + norgie.net + Mike Hemstock + mikeh&csits.net +34634 + Graduate School of Education and Information Studies + Luke Brannon + brannon&gseis.ucla.edu +34635 + Pi-Systemprogrammierungs-GmbH + Martin Trübner + martin&pi-sysprog.de +34636 + Contact Center Compliance Corporation + Alex Hisen + alexh&dnc.com +34637 + Revenue Solutions, Inc + John Walker + jwalker&rsimail.com +34638 + Memphis Network Service Ltd. + Robson Stedille + rst&memphisnetwork.com.br +34639 + Conformity Inc + Roy Kipp + rkipp&conformity-inc.com +34640 + Nodnet + Juho Ylikorpi + juho.ylikorpi&nodnet.fi +34641 + Dimac Hosting AB + Jonas Frost + jonas&duplo.org +34642 + Rugged Information Technology Equipment Corporation + Stanley Papush + stan&ritecrugged.com +34643 + Garmin International + Jason Anderson + jason.anderson&garmin.com +34644 + Potomac & Bluebonnet Technolgy LLC + Robb Hill + robb&protoping.com +34645 + Adael Wireless SARL + Mathieu Alengrin + malengrin&adael.net +34646 + HellermannTyton Data Ltd + Jason James + jason.james&htdata.co.uk +34647 + KAMOME Engineering, Inc. + Kohji Osamura + sysadmin&kamomeengineering.com +34648 + The-Box Development + Jac Kersing + info&the-box.com +34649 + netvisiontel + Moon-Jin, Yeom + mjyeom&netvisiontel.co.kr +34650 + P2Cache Pte Ltd + He Hin Hoong + hhhoong&p2cache.com +34651 + agama co,ltd + luo yaojun + luoyaojun&sina.com +34652 + IPF Technology Ltd + Stanislav Chernikov + s.chernikov&ipftechnology.com +34653 + Irisys + Neil Sumpter + neils002&irisys.co.uk +34654 + La Gentz KG + Gregor Dorfbauer + office&lagentz.at +34655 + Berthold Boeser Ingenieurbuero + Berthold Boeser + info&ib-boeser.de +34656 + ATS, Applicazione Tecnologie Speciali + Campo Marilina + marilina.campo&atsmed.it +34657 + Seifert mtm Systems (Malta) Ltd. + George Saliba + george.saliba&seifert-mtmsystems.com +34658 + Sysadmins LV + Vadims Podans + vpodans&sysadmins.lv +34659 + EPLAN Software & Service GmbH & Co. KG + Andreas Marxen + marxen.a&eplan.de +34660 + SIA Datu Sistemas + Gatis Zeiris + info&dsistemas.lv +34661 + Islandsbanki + Jon Elias Thrainsson + sys&islandsbanki.is +34662 + Ajuntament de Sant Adria de Besos + Juan Antonio Vera Nieto + javera&sant-adria.net +34663 + Lavinia Interactiva + Lluis Ribes + lribes&lavinia.tc +34664 + Trident Micro Systems + Adam Yonce + ayonce&tridentms.com +34665 + ENTEL S.A. BOLIVIA + Wilfredo Carrillo + wilfredo.carrillo.o&gmail.com +34666 + Lava Business Solutions + Adam Kolakowski + adamk&lava.pl +34667 + DSP LABS S.r.l. + Riccardo Scussat + dsplabs&dsplabs.net +34668 + Koine Servizi S.r.l. + Bruno Alberto + bruno&koine-servizi.it +34669 + Encarnate, LLC + Shane Kumpf + shane&encarnate.com +34670 + Scott Shen + Scott Shen + scott.swm&gmail.com +34671 + Anoop Aryal + Anoop Aryal + anoop.aryal&gmail.com +34672 + MicroAqua Network system + Yangjiancai (James Yang) + yangjiancai8&hotmail.com +34673 + The FreeDHCP Project + Alan DeKok + aland&freedhcp.net +34674 + FLIR Systems, Inc. + Ricardo Gomez + ns.it&flir.com +34675 + blackcase.pl + Kamil Kurzyk + k.kurzyk&blackcase.pl +34676 + Vox Telecom + Dael Williamson + daelw&voxtelecom.co.za +34677 + informationpartners GmbH & Co. KG + Stefan Mayer + smayer&informationpartners.eu +34678 + Anticept + Jochen Seiler + hosting&anticept.de +34679 + forcont business technology gmbh + Frank Nowak + Frank.Nowak&forcont.com +34680 + RUAG Holding AG + Ernst Huber + ernst.huber&ruag.com +34681 + DATAGROUP IT Services Holding AG + Tobias Hüttner + security&datagroup.de +34682 + ATP Management & Technology s.r.l. + Luigi Vezzoso + lvezzoso&atpmanagement.it +34683 + Seacliff Associates, LLC + Richard Irwin + rirwin&seacliffedu.com +34684 + Rosado Beheer BV + Rui Rosado + rui&rosado.nl +34685 + Accipiter Systems, Inc. + Eric Helmsen + helmsen&accipitersystems.com +34686 + Sunet Technology LLC + Vadim Topchiy + vadim&sunet.uz +34687 + Gale + Mike Seiler + mike.seiler&sanyuhu.com +34688 + King Abdullah University of Science and Technology + Iain Georgeson + iain.georgeson&kaust.edu.sa +34689 + Tekron International Ltd + Aleks Ristich + support&tekroninternational.com +34690 + National Gypsum Company + Mike Brannon + mebrannon&nationalgypsum.com +34691 + EON Co.,Ltd + Jin-su Jang + jsjang&eonworld.com +34692 + DICOM Grid, Inc + Daniel Ostrow + dostrow&dicomgrid.com +34693 + Netsoft Lund Development AB + Jorgen Christiansson + jorgen&netsoftlund.se +34694 + eleven GmbH + Enno Cramer + iana&eleven.de +34695 + EXTENSION, Inc. + Steve Tyler + styler&ext-inc.com +34696 + RIOD Technologies + Brad Henry + j0j0&riod.ca +34697 + AfirmTrust, LLC + Kirk Hall + Kirk&mfhllc.com +34698 + Studio Caccia + Andrea Caccia + pen&studiocaccia.com +34699 + France Développement Conseil + Alexandre Allien + alexandre.allien&fdc.eu +34700 + Beijing Hongdexin Information Technology Co, Ltd. + Robert Pletscher + leiluobei&126.com +34701 + BTCentral + Ben Thomas + services&btcentral.org.uk +34702 + Parity Energy Inc. + David Brown + david.brown&parityenergy.com +34703 + Stefan Sieber + Stefan Sieber + info&home.sieber.net +34704 + H. Lundbeck A/S + Thomas Svenningsen + thsv&lundbeck.com +34705 + ZAO GU NPO "Stroytechautomatika" + Andrew Smirnoff + asu&gu-sta.ru +34706 + Aprius Inc. + Swaminathan Saikumar + swaminathan.saikumar&aprius.com +34707 + Centro Federal de Educação Tecnológica de Minas Gerais + Clever de Oliveira Júnior + clever&dri.cefetmg.br +34708 + Etrafficers, Inc. + Steven Fletcher + fletch&etrafficers.com +34709 + Lightner Engineering + Bruce D Lightner + lightner&lightner.net +34710 + Advance Communication Technologies + James Laing + jlaing&advance.ca +34711 + Jamaica Broilers Group + Owen Clashing + oclashing&jabgl.com +34712 + Fujian Newland Auto-ID Tech. Co., Ltd. + CHEN JUN + chenjun&mail.nlscan.com +34713 + lc4ever.net + Wang QuanLong + apeidou&gmail.com +34714 + OCULUS Optikgeräte GmbH + Daniel Ache + d.ache&oculus.de +34715 + ITVT GmbH + Patrick Kreuzer + support&itvt.de +34716 + Vizimax Inc. + Jean-Marc Da Pozzo + jmdapozzo&vizimax.com +34717 + bhold + Ben Fontein + oid&bhold.com +34718 + FUENSO + Emiliano Almazán + support&fuenso.com +34719 + Cronon AG + Florian Heinz + fh&cronon-ag.de +34720 + mFoundry + Craig Setera + software&mfoundry.com +34721 + Magyar Waldorf Szövetség / Hungarian Waldorf Fellowship + István Haraszti + rendszergazda&waldorf.hu +34722 + FastSoft, Inc. + Rod Morison + rmorison&fastsoft.com +34723 + Conxx + Brandon Wigfield + brandon.wigfield&conxx.net +34724 + netInsight ITLabs + Alexandre Martins + amartins&netinsight.com.br +34725 + Vasc-Alert + Larry Mills-Gahl + lmg&vasc-alert.com +34726 + Works Systems + Joseph Chen + climaxl&workssys.com.cn +34727 + Burda Digital Systems GmbH + Daniel Hansert + daniel.hansert&burdadigital.de +34728 + INSA de Lyon + Frederic Jacquot + frederic.jacquot&insa-lyon.fr +34729 + OOO PS Yandex.Dengi + Andrey V. Kovalev + licensing&yamoney.ru +34730 + FRANCILIENNE D'INGENIERIE ET DE SERVICES INFORMATIQUES SAS + COQUEL, JEAN + jeancoquel&fisi.fr +34731 + Sloan Valve Company + Mark Chapin + mark.chapin&sloanvalve.com +34732 + Spb Software Inc. + Nikolay V. Krasko + n.krasko&spbsoftware.com +34733 + Zinnia Systems + Akhilesh Singh + akhilesh&zinniasystems.com +34734 + University of Macerata + Marco Principi + marco.principi&unimc.it +34735 + Cellfish + Thomas Dupont + thomas.dupont&cellfishmedia.fr +34736 + Universidade Estadual Vale do Acaraú - UVA + Pedro Roger M Vasconcelos + roger&uvanet.br +34737 + CIMNE + Felip Moll + fmoll&cimne.upc.edu +34738 + MassiveSolutions Ltd + Viktor Sovietov + victor&massivesolutions.co.uk +34739 + lihnidos.org + Damjan Sirnik + damjan&lihnidos.org +34740 + University of Ottawa + Luc Lepine + lalepine&uottawa.ca +34741 + Binet Réseau + Raphaël Barrois + oid&frankiz.polytechnique.fr +34742 + LevelMobile, Inc. + Ryan Nideffer + rnideffer&gmail.com +34743 + Teleperformance + Joshua Miller + joshua.miller&teleperformance.com +34744 + QC Technology B.V. + Eduard Karel de Jong + eduard&dejongfrz.nl +34745 + METADYS + Cyril VELTER + cyril.velter&metadys.com +34746 + Kevinsnet Limited + Kevin C. Leacock + leacockk&kevinsnet.com +34747 + Infineta Systems, Inc + Ramesh Pavadai + ramesh&infineta.com +34748 + Twitter, Inc. + Anatole Shaw + anatole+iana&twitter.com +34749 + SubscriberMail, LLC + Bob Ziuchkovski + sysadmin&subscribermail.com +34750 + Truenorthlogic + Nathan Dykman + ndykman&truenorthlogic.com +34751 + Shanghai Meridian Technologies, Co. Ltd. + Tie Yu + mrd454&gmail.com +34752 + ITDiv.com + Johan Wasserman + johan.wasserman&gmail.com +34753 + OnTime Networks AS + Øyvind Holmeier + contact&ontimenet.com +34754 + PowerTrunk Inc. + A. L. Kilbourne + akilbourne&teltronic.es +34755 + Res Ingenium s.r.l. + Placidi Fabio + fplacidi&res-ingenium.com +34756 + Inoa + Frederik Vermeulen + frederik.vermeulen&inoa.net +34757 + Gorilla Concept GmbH + Markus Schütze + schuetze&gorilla-concept.de +34758 + Univerza v Ljubljani + Anton Jagodic + anton.jagodic&uni-lj.si +34759 + Thames Valley Police + Tim Nicholls + tim.nicholls&thamesvalley.pnn.police.uk +34760 + AMiT s.r.o. + Martin Vosahlo + vosahlo&amit.cz +34761 + SSC-IT + Sven Gertsch + sven.gertsch&kssg.ch +34762 + DartIT + Boris B. Rudakov + support&dartit.ru +34763 + People Power Company + David Moss + dmoss&peoplepowerco.com +34764 + ProStructure Consulting + Micah McNelly + micah&prostructure.com +34765 + forVNs Group + Tran Quoc Hoai + hoaitq&gmail.com +34766 + Invities + Frederic Minot + frederic.minot&invities.com +34767 + ISPIN AG + Mario Gersbach + support&ispin.ch +34768 + S+P LION AG + Andreas Christ + andreas.christ&sp-lion.com +34769 + ACORDE TECHNOLOGIES + Jacobo Domínguez + jacobo.dominguez&acorde.com +34770 + Cardiff University + Ceri Davies + daviescm5&cf.ac.uk +34771 + Altmann Software + Juergen Altmann + juergen.altmann&altmann-software.de +34772 + Automat Limited + Philip Leung + notifications&automat.co.uk +34773 + Hx Engineering, LLC + Steve Hendrix + SteveHx&HxEngineering.com +34774 + Huawei Symantec Technologies Co.,Ltd + xiaoyong xiong + xyxiong&huaweisymantec.com +34775 + Canford Software Consultancy Limited + Paul Stiles + paul&canfordsoftware.com +34776 + TODO BPO E SOLUCOES EM TECNOLOGIA S.A. + Oswaldo Dantas + oswaldo.junior&todobpo.com.br +34777 + Capek Consulting + Jan Capek + jan-consulting&capkovi.eu +34778 + American Customer Care, Inc. + Gary Haubert + noc&americancustomercare.com +34779 + DeltaGlobal Co. LTD + Seyyed Mohammad Mohammadzadeh + mehran.m&deltaglobal.net +34780 + IT One + Pascal Stage + pascal.stage&it1.fr +34781 + Unassigned + Removed 2009-12-18 + ---none--- +34782 + dcm4che.org + Gunter Zeilinger + gunterze&gmail.com +34783 + Gordian Knot Limited + Steven Robbins + steven.r&gordian.co.uk +34784 + Hurtig DataComm + Johan Hurtig + johan_hurtig&yahoo.com +34785 + University of Craiova + Marius MARIAN + marius.marian&cs.ucv.ro +34786 + Hartwick Services + Michael J. Hartwick + hartwick&hartwick.com +34787 + Sunflower Broadband + Matthew Welch + mwelch&sunflowerbroadband.com +34788 + Ministerio de Hacienda de El Salvador + Jese Gallardo + jgallardo&mh.gob.sv +34789 + Northgate Information Solutions PLC + Phill Demoore + phill.demoore&demoore.co.uk +34790 + Vtkom d.o.o. + Nenad Opsenica + office&vtkom.com +34791 + Centre Hospitalier de Péronne + BRICHE Nicolas + nbriche&ch-peronne.fr +34792 + Gouvernement de Nouvelle-Caledonie + Eric AUVINET + reseau.dtsi&gouv.nc +34793 + Azblink + Ko-Hsing Chang + khc&azblink.com +34794 + JSC Commercial Bank "Center-invest" + Dyachkov Roman + niarbrnd¢rinvest.ru +34795 + DATA-PKS Ltd. + Svetlin Radoslavov + svetlinr&mail.bg +34796 + MBS GmbH + Nils-G. FRITZ + iana&mbs-software.de +34797 + AM-GmbH + Oliver Greschok + og&am-gmbh.de +34798 + Tribunal Regional Eleitoral do Maranhão + Jorge Carlos Amengol de Lima + jorge.amengol&tre-ma.gov.br +34799 + ACTi Corporation + MingYoung You + mingyoung.you&acti.com +34800 + Polizei Baden-Württemberg + Uwe Xaver + uwe.xaver0&lka.bwl.de +34801 + sublab e.V. + David Lamparter + equinox&local.sublab.org +34802 + HomeNet + Mike Calderwood + contact&mikecalderwood.com +34803 + Gubkin Russian State University + Andrew Pantyukhin + netadm&gubkin.ru +34804 + Wideband Semiconductor, Inc. + Chuck Handley + chuck.handley&wideband.com +34805 + Zivillian Software + Bianco Veigel + bianco.veigel&zivillian.de +34806 + Optical Systems Design Pty Ltd + Xinli Zhu + xzhu&osd.com.au +34807 + Q-TECH INFORMATION CO.,LTD + wei chi + weichinjcit&gmail.com +34808 + Jiangsu broadcasting television network corporation limited + xiaojun gao + gaoxiaojun&gmail.com +34809 + WSP Global Inc + Matthew Groff + matthew.groff&wsp.com +34810 + Intermountain Healthcare + Bruce James + Bruce.James&imail.org +34811 + Stollmann E+V GmbH + Ingo Sandmeier + is&stollmann.de +34812 + Mirifice Ltd + Alex Dick + alex.dick&mirifice.com +34813 + Multilink + Jon Ciccozzi + jciccozzi&multilinkone.com +34814 + Zpend Pty. Ltd. + Chris Forkin + chris&openexpertise.com +34815 + Angry Dog Business Sytems + David Dymock + david&theangrydog.co.uk +34816 + Icon Power Solutions Pvt. Ltd. + Baljeet Singh Bhatti + baljeet&iconpowersolutions.com +34817 + Tamtron Oy + Pekka Lehikoinen + pekka.lehikoinen&tamtron.fi +34818 + CPI Corporation + Derek Chen-Becker + noc&cpicorp.com +34819 + Nacka kommun + Martin Andersson + martin.andersson&nacka.se +34820 + Satcom Technologies EMC + Robert Fitting + rfitting&emcsatcom.com +34821 + Lafayette Consolidated Government + Ben Segura + bsegura&lus.org +34822 + Calet, Inc. + Gregory Allen + gallen&mycalet.com +34823 + VersaForm Systems Corp + Joe Landau + jrl&versaform.com +34824 + Texas A&M University System + Networking & Information Security + tech&net.tamu.edu +34825 + Pareto Networks Inc. + Carl Mower + snmp&paretonetworks.com +34826 + Tayana Software Solutions Pvt Ltd + Mohan Panakam + mohan.p&tayana.in +34827 + RT-RK + Milenko Beric + milenko.beric&rt-rk.com +34828 + Yettel Magyarország Zrt. + Sándor Tihanyi + stihanyi&yettel.hu +34829 + GraphLogic Inc + Kenneth Eschrich + keschrich&graphlogic.com +34830 + Forers, s. r. o. + Petr Kutalek + info&forers.com +34831 + The Freecycle Network + Deron Beal / Richard Wallman + servicenotify&freecycle.org +34832 + Intelibs, Inc + Sohee Kim + sohee.kim&intelibs.com +34833 + DrivenDown.com + Jeff Trnka + iana&drivendown.com +34834 + BermudaTriangle.com + Jeff Trnka + iana&bermudatriangle.com +34835 + Hypercable + DUCASSE Jean-Claude + info&hypercable.fr +34836 + Denkosha Co.,Ltd. + KOIKE Shiro + koike&denkosha.co.jp +34837 + Arrive Technologies + Dan, Nguyen Duc + dannd&atvn.com.vn +34838 + Innovative SCADA Solutions Pty Ltd + Louie Wong + louie.wong&innov8scada.com +34839 + abakus IT AG + Peter Fischer + pki&abakus.de +34840 + Shanghai Huaheng Telecom Equipment Co.,Ltd + yin gen ning + anhuinyg&163.com +34841 + Gordon Food Service, Inc. + Jonathan Craig + Jon.Craig&gfs.com +34842 + TERASAKI ELECTRIC CO.,LTD + Mamoru Sogo + mamoru-sogo&terasaki.co.jp +34843 + NASCENT Technology, LLC + Scott Urban + ieee&nascent.com +34844 + Gennet S.A. + Ilias Apalodimas + apalos&gennetsa.com +34845 + Faculté des Sciences et Techniques de Tanger + Mohammed Azirar + m.azirar&fstt.ac.ma +34846 + SERTRES del Norte + Daniel Rios + juanvelez&logistecsoftware.com +34847 + Spacetime Studios, LLC + Anthony Sommers + asommers&spacetimestudios.com +34848 + Shibasoku + Tetsuya Michikawa + michkawa&shibasoku.co.jp +34849 + Security Code Ltd. + Evseev Ilya + it&securitycode.ru +34850 + EldoS Corporation + Eugene Mayevski + cto&eldos.com +34851 + Adalia Oy + Jani Soila + soila&adalia.fi +34852 + Roeften + Panos Gkikakis + contact&roeften.com +34853 + RainStor + Jonathan Teague + it&clearpace.com +34854 + indeni + Yonadav Leitersdorf + yonadav&indeni.com +34855 + Votiro Ltd. (formerly 'MobileTick Ltd') + Aviv Grafi + aviv&votiro.com +34856 + ALTAVIA SA + François ROUSSEAU + f.rousseau&altavia-group.com +34857 + BaiY Studio + Yang Bai + baiyang&gmail.com +34858 + Wireless Networks Association (WNA.gr) + John Haralampidis + admin&wna.gr +34859 + jgfs.net + Jakub Gawkowski + jakub.gawkowski&jgfs.net +34860 + Finalist IT Group + Sander Bos + sander.bos&finalist.com +34861 + Rajant Corporation + Marty Lamb + mlamb&roinet.com +34862 + Fundamo + Willem Visser + willemv&fundamo.com +34863 + Leopold Kostal GmbH & Co. KG + Vit Svarc + v.svarc&kostal.com +34864 + Mito Europe Ltd + Sandor Apati + admin&mitoeurope.com +34865 + World Evolved Services, LLC + Marcus Louie + mlouie&worldevolved.com +34866 + Suffolk County Council (formerly 'Customer Service Direct') + Shang Jin + Shang.Jin&suffolk.gov.uk +34867 + Universitaet Greifswald + Gordon K. Grubert + gordon.grubert&uni-greifswald.de +34868 + Winnerstek Inc. + Jae Young Lee + duri&winnerstek.com +34869 + Ian Hine Consulting + Ian Hine + ian_hine&ozemail.com.au +34870 + Megabyte Doctor (Pty) Ltd + Bame Sekwakwa + megabytedoctor&gmail.com +34871 + trovicor GmbH + Thomas Karger + iana-admin&trovicor.com +34872 + ETM Communications AB + Ulf Bergqvist + info&etmc.se +34873 + Klein & Partner KG + Jens W. Klein + jens&bluedynamics.com +34874 + Automasjon og Data as + Morten Hagland Hansen + mhh&automasjon.no +34875 + Alpikom SpA + Cristian Cappelletti + c.cappelletti&alpikom.it +34876 + Informatics Corporation of America + Alexander Saip + alex.saip&icainformatics.com +34877 + Masergy Communications, Inc. + Mike Clark + mike.clark&masergy.com +34878 + Advanstar Communications Inc. + Dan Beutler + dbeutler&advanstar.com +34879 + BlueCross BlueShield of Minnesota + Robert Hock + Robert_L_Hock&bluecrossmn.com +34880 + Cortina Systems + Andrew Mok + andrew.mok&cortina-systems.com +34881 + EVT Technologies LTD + Ram Tagher + Ram&evt-vms.com +34882 + Kolektor Group + Gorazd Šemrov + gsem&ascom.si +34883 + House Arkko + Jari Arkko + jari&arkko.com +34884 + David Wozny Limited + David Wozny + David&Wozny.org +34885 + SSI (US) Inc + Michael Wareman + mwareman&spencerstuart.com +34886 + Compagnie des Alpes + Jean-Henri ANTUNES + jean-henri.antunes&compagniedesalpes.fr +34887 + Iariss + Muré Michael + batolettre&gmail.com +34888 + Gatix + Michel Stam + mstam&gatix.net +34889 + McKnight Insight + Lawrence McKnight + lawrence.mcknight&gmail.com +34890 + Aizkraukles Banka A/S + Ivars Zaicevs + ivars.zaicevs&ab.lv +34891 + Croatia Control Ltd. + Branko Vesligaj + branko.vesligaj&crocontrol.hr +34892 + Kedros, a.s. + Juraj Hrubsa + hostmaster&kedros.sk +34893 + Leeds City Council + Alistair Fletcher + Alistair.Fletcher&Leeds.gov.uk +34894 + Global Strategies Group, Mission Systems + David Loring + david.loring>ec-inc.com +34895 + ITaM Services + Daniel Reichelt + mail&itamservices.de +34896 + Gorilla Logic, Inc. + Hank Harris + admin&gorillalogic.com +34897 + Centro Brasileiro de Pesquisas Fisicas + Marita Maestrelli + marita&cbpf.br +34898 + CESEC-SFF + lirui + liruibit&hotmail.com +34899 + H5 audits + Lionel Barrère + lionel.barrere&h5audits.com +34900 + Informations Technologie Service und Consulting GmbH + Registration Coordinator + registration.coordinator&your-its.de +34901 + Fortunaglobal (Pvt) Limited + Buddhika Jayasekara + buddhika&fortunaglobal.com +34902 + IT Industry LTD + Anton Globa + a.globa&otdelit.ru +34903 + M247 Ltd + Geoff Garside + geoff.garside&m247.com +34904 + KRC OOO + Roman Sakhnov + admin®cen.ru +34905 + G-Lab + Robert Henjes + henjes&informatik.uni-wuerzburg.de +34906 + RadiantGrid Technologies, LLC + Kirk Marple + mib&radiantgrid.com +34907 + TELEVIC N.V. + Luc Jonckheere + L.Jonckheere&televic.com +34908 + CareCloud Corp + Thomas Packert + tpackert&carecloud.com +34909 + RHD Research Ltd + Chris Crowther + chris&rhdresearch.com +34910 + Geeknet, Inc. + Uriah Welcome + uriah&geek.net +34911 + Nanjing Jiajian Network Co. Ltd. + Jiancheng Chen + jcchen&jjlink.com +34912 + PT. Nyra + Hendrik Heriyanto Gosali + hgosali&nyra.co.id +34913 + TDM Ingénierie + Loïc Mauhourat + loic.mauhourat&tdm-ing.com +34914 + Creative Consulting GmbH + Adrian Müller + handle&crecon.de +34915 + Free Software Foundation Europe e.V. + Cristian Rigamonti + rigamonti&fsfeurope.org +34916 + Walki Group Oy + John Aspnäs + john.aspnas&walki.com +34917 + Car Chase Workshop Inc. + Christopher W. Baran + chrisbaran&carchaseworkshop.com +34918 + Syncapse Corp. + Kenneth Voort + ldapadmin001&syncapse.com +34919 + Sky Wisdom Technology Limited + Thomas Chen + thomas&xray.hk +34920 + Allen Lund Company + Jason Nelson-Wolfe + jason.wolfe&allenlund.com +34921 + EvoStor Inc. + Cheryl Davis + cheryl&evostor.com +34922 + Epic Advertising Inc + Timothy Barrow + timothy.barrow&epicadvertising.com +34923 + KOOLING + Greg Hill + gaohaiyun&kooling.com.cn +34924 + Augustine Consulting + Adam Augustine + crypto&augustinec.com +34925 + Nantes Systems Private Limited + Aravinda Babu + support&nantes.net.in +34926 + Limited Liability Company "Wild Orchid" + Scherbakov Alexander + a.scherbakov&classic.ru +34927 + DL Santé + Grégory ROUSSEAU + gregory.rousseau&dlsante.fr +34928 + ACVITAS + Andrey Shvydky + andrey&acvitas.com +34929 + SURGIDEX + David Nunes + dnunes&surgidex.com +34930 + Caneris Inc + Erik Levinson + noc&caneris.com +34931 + ShangHai DynamiCode Company Limited + Barry Lv + barrylv&dynamicode.net.cn +34932 + CMS Affiliate + Cody Michael Salnave + SupportAgent&purplehayze.com +34933 + CPO Partners + Leo Romers + oid&cpo-partners.com +34934 + CirclePrinters + JANC Arnaud + arnaud.janc&circleprinters.fr +34935 + marmira + Urs Zurbuchen + incoming&marmira.com +34936 + Creowave Oy + Tommi Matila + creowave&creowave.com +34937 + UniversitaetsSpital Zurich + Jens Grundtvig + jens.grundtvig&usz.ch +34938 + Fédération française d'équitation + Emmanuel HUDE + emmanuel.hude&ffe.com +34939 + SURMEI MIHAI-EUGEN + Mihai Surmei + msurmei&gmail.com +34940 + Roman V. Kiseliov + Roman V. Kiseliov + roman.kiseliov&gmail.com +34941 + DiegoLima.org + Diego Lima + diegolima.br&gmail.com +34942 + Cambrium BV. + Johan Mulder + johan-iana&cambrium.nl +34943 + Sumisho Computer Systems Corporation + Kazuhiro Sato + k.satou&jpta.scs.co.jp +34944 + Amplus Communication PTE LTD + Ryan F. Mejia + rmejia&lus.com.sg +34945 + Albert Ziegler GmbH & Co. KG + Andreas Mesch + amesch&ziegler.de +34946 + Densitron Technologies Limited (formerly 'Independent Project Engineering Ltd') + Tom Hirst + tom.hirst&densitron.com +34947 + chill-n-go, Inc + William Estberg + bill&chill-n-go.com +34948 + NORMHOST + Geoffroy MARTIN + geoffroy.martin&normhost.com +34949 + Electronics Source Co.,Ltd. + Pornchai Chantachatkul + pornchai&es.co.th +34950 + Herrmann & Lenz Solutions GmbH + Peter Bekiesch + peter.bekiesch&hl-services.de +34951 + Komercijalna Banka AD Skopje + Oliver Stefanovski + pki-support&kb.com.mk +34952 + nxtControl GmbH + Gernot Kollegger + support&nxtcontrol.com +34953 + Concentric Circle Consulting + Robert Falkowitz + robert&3cs.ch +34954 + Novtis do Brasil S/A + Ricardo Barbosa Matsuno + ricardo.matsuno&novtis.com.br +34955 + Roland Gruber Softwareentwicklung + Roland Gruber + roland.gruber&rg-se.de +34956 + Shanghai Transform IT Co. Ltd + Baozhu Zhao + zhaobzhu&yahoo.com.cn +34957 + TTC Next-generation Home Network System WG + Hideo Kobayashi + kobayashi&ttc.or.jp +34958 + Logilin + Christophe Blaess + contact&logilin.fr +34959 + Ingenieurbuero Berghofer KG + Gerhard Berghofer + office&ib-berghofer.com +34960 + SuperHost.pl s.c. + Jakub Jankowski + jakub.jankowski&superhost.pl +34961 + H&S Hochfrequenztechnik GmbH + Daniel Stock + office&hs-equipment.com +34962 + Falco Networks B.V. + Berry Batist + berry&falco-networks.com +34963 + Idle Pattern Communications, LLC + Omachonu Ogali + oogali&idlepattern.com +34964 + Carbones del Cerrejon Ltd. + Jose A. Diaz + jose.a.diaz&cerrejoncoal.com +34965 + Ricardo I. Vieitez Parra + Ricardo I. Vieitez Parra + ianapen&0xj.info +34966 + Global Rail Systems, Inc + David Ruskauff + davidr&globalrailsystems.com +34967 + Sichuan Hongguan Communication Technology Co., Ltd. + li gong + myltwg&126.com +34968 + Airimba Wireless, Inc. + Charles Taylor + ctaylor&airimba.com +34969 + REGNIA,Inc. + Hisao Kato + tech®nia.com +34970 + Institut Teknologi Nasional (ITENAS) + Wahyu Hidayat + wahyuhid&hotmail.com +34971 + Ethereal Information Technology, Ltd. + Brett Meier + netopsðit.com +34972 + Eric Huss + Eric Huss + eric&huss.org +34973 + MIANYANG NETOP TELECOM EQUIPMENT LTD.CO. + li gang + myltwg&126.com +34974 + alentogroup.org + Sven Handre, Director of IT + admin&alentogroup.org +34975 + IPROAD, Inc. + Jay Kim + jay&iproad.co.kr +34976 + Mathew Systems Inc. + Mathew Chan + mathew.chan&mathewsystems.com +34977 + Node-Nine, Inc. + Chris Moody + postmaster&node-nine.com +34978 + Laysis Consulting Inc. + Seth Miller + seth&laysis.com +34979 + Scientis Solutions Ltd + Andi McLean + Andi.McLean&scientis.co.uk +34980 + linexus + Alexander Vogt + a.vogt&linexus.de +34981 + cramif + pailler vincent + vincent.pailler&cramif.cnamts.fr +34982 + Eye Pea Ltd. (formerly 'exaStack Limited') + Jon Fautley + jon.fautley&appstal.com +34983 + 013Netvision + Dmitry Saratsky + nevil&013netvision.co.il +34984 + ER Technology Ltda. + Juan Pablo Daza Pardo + juan.daza&er-technology.net +34985 + kylimar.com + Ben Love + penregistry&kylimar.com +34986 + Bookmans + Jon Hassen + jonh&bookmans.com +34987 + OneWire + Greg Varga + greg.varga&onewire.ca +34988 + Clearstone Central Laboratories + Franck Dufreney + franck.dufreney&clearstonelabs.com +34989 + appventure GmbH + Jens Holze + info&appventure.de +34990 + Vodafone Czech Republic a.s. + Ondrej Remes + ondrej.remes&vodafone.com +34991 + SooperCreations + Ivan Kluzak + ivan&sooper.com +34992 + Microlynx Systems Ltd + Ken Mouratidis + snmpsupportµlynxsystems.com +34993 + Cymfony + Seth Simmons + ssimmons&cymfony.com +34994 + d3v Systems + Konrad Pióro + konrad.pioro&d3v-systems.eu +34995 + DBMJ Rehabilitation Services, PLLC + Richard D. Ball, MD, PhD + r.d.ball&charter.net +34996 + Richard D. Ball, MD, PhD + Richard D. Ball + r.d.ball&charter.net +34997 + Kyoto University of Education + Tsuyoshi Akiyama + ipc&kyokyo-u.ac.jp +34998 + CILogon Project + Jim Basney + info&cilogon.org +34999 + JSC «Institute of infotelecommunications» + Mr. Yuriy Sherstuk + yusher&iitc.ru +35000 + OptXware Research&Development Ltd. + András Balogh + balogh&optxware.com +35001 + NERINFORMATICA DI NERI LUCIANO (formerly 'APTASYS S.R.L.') + Luciano Neri + info&nerinformatica.it +35002 + dcux Co.Ltd. + yu liang + coolfish&dcux.com +35003 + ADP ESI + Jean-Edouard Babin + ipc.network&europe.adp.com +35004 + Bahia Software SL + Jose Ramon Varela Pernas + jramon.varela&bahiasoftware.es +35005 + IMS Nanofabrication AG + Samuel Kvasnica + samuel.kvasnica&ims.co.at +35006 + Vivio World + Szymon Nieradka + szymon&vivioworld.com +35007 + mPAY24 GmbH + Wolfgang Schaefer + wolfgang.schaefer&mpay24.com +35008 + Corporation Service Company + Randall Shutt + rshutt&cscinfo.com +35009 + Techno Factory Desenvolvimento de Software Ltda. + Osvandir Júnior + osilvajr&live.com +35010 + ATK-tehdas Oy + Ossi Väänänen + ossi&atk-tehdas.com +35011 + Toyota Finance Australia Ltd. + Justin-Rei Kurosawa + jkurosawa&tfal.com.au +35012 + StorSimple + Joel Christner + jchristner&storsimple.net +35013 + Sweet Briar College + Aaron Mahler + amahler&sbc.edu +35014 + Fenzke Netzwerktechnik GmbH + Matthias Fenzke + info&fenzke.net +35015 + PWM Project + Jason Rivard + jrivard&gmail.com +35016 + Bank-Pedersen + Niels Chr. Bank-Pedersen + iana&bank-pedersen.dk +35017 + Western Range Maintenance + Patrick Dunn + vicepresidentsteam&gmail.com +35018 + GoMidjets + Tamir Gefen + tgefen&gomidjets.com +35019 + Decho Corporation + Kevin S. Blackham + netmgr&decho.com +35020 + nangu.TV + Antonin Kral + antonin.kral&nangu.tv +35021 + TranSoft a.s. + Dusan Kudora + admin&transoft.cz +35022 + Sueddeutscher Verlag + Schorr Norbert + 9500&sueddeutscher-verlag.de +35023 + Heirich IT Dienste GmbH + Peter Heirich + pen.iana&mail.heirich.eu +35024 + Hospital Sierrallana - Servicio Cántabro de Salud + Raúl Martínez + rmartinezs&hsll.scsalud.es +35025 + Lachlan Gunn + Lachlan Gunn + lachlan.gunn&internode.on.net +35026 + PRODIST TECHNOLOGIES LTDA. + Fernando Tasso Amaral Baptista + fernando.tasso&gmail.com +35027 + liuxiangbupt.com + liuxiang + liuxiangbupt&sohu.com +35028 + netPark LLC + Joshua Williams + jwilliams&netpark.us +35029 + K.C.Innovations Co.,Ltd. + Tanarug Issadisai + tanarug&kcinovs.com +35030 + Run Technologies Co.,Ltd.Beijing + Xin Sun + sunxin&bjrun.com +35031 + GP NPF "Ratex" + Andrey Melovatsky + ratexvrn&yandex.ru +35032 + BelTechInfo + Victor Bolshakov + vbolshakov&beltech.info +35033 + Global Net Commerce, Inc. + Joel Chan + info&gnciwireless.com +35034 + Sentai Digital, LLC + John Franklin + franklin&sentaidigital.com +35035 + interCLICK + Andrew Katz + andrew.katz&interclick.com +35036 + ASKO-IT + Serge Duzhnikov + grey.dev&gmail.com +35037 + Onesource + Luis Cordeiro + cordeiro&onesource.pt +35038 + XYMA-SONDEO + anesto dell toro almenares + anestodta&yahoo.com +35039 + Denva ApS + Jørgen Nielsen + jon&qnet.dk +35040 + Xiamen Helios Telecom Equipment Manufacture Inc. + Stanley Zou + Stanley.Zou&heliostelecom.com +35041 + Electro Power Systems SpA + PierPaolo Cherchi + pierpaolo.cherchi&electrops.it +35042 + Sporting Index Ltd + Rob Cowie + RCowie&SportingIndex.com +35043 + Tripower + Samuel S. Ciraulo + sam.ciraulo&tripower.com +35044 + CJSC Schelkovo Agrochim + Timofey L Negrebetsky + hexedit&betaren.ru +35045 + Mathieu Malaterre Consulting + Mathieu Malaterre + mathieu.malaterre&gmail.com +35046 + Intucell + Rani Welling + rani&intucell.co.il +35047 + Sangfor Technologies Co.,Ltd. + yi feng + fengyi&sangfor.com.cn +35048 + ShenZhen GreatFirst Technology Co., Ltd. + Russell Luo + russell&greatfirst.com +35049 + Marler Media + Jon Marler + jmarler&marlermedia.com +35050 + topdog.ru.net + Dmitry Prokopchenkov + dp&topdog.ru.net +35051 + Systemnoe Modelirovanie i Analiz LLC + Vladislav Babin + vladislav.n.babin&gmail.com +35052 + Condrey Corporation + David Arnold + ldapadmin&condreycorp.com +35053 + LLC "Promo Interactive" + Vlad L. Glagolev + it&promo.ru +35054 + Jan Schampera + Jan Schampera + jan.schampera&web.de +35055 + Gameloft + Pascal Lalonde + gns-pen&gameloft.com +35056 + whiteroad + maris malic + junk&whiteroad.ca +35057 + UNESP + Gabriel A. von Winckler + winckler&ncc.unesp.br +35058 + NikB-Soft + Nikola Batchvarov + nikb&mail.bg +35059 + VarnaNet + Kaloyan Kovachev + kkovachev&varna.net +35060 + IGI Mobile, Inc. + Daniel Park + skpark&igimobile.com +35061 + Normation + Jonathan Clarke + jonathan.clarke&normation.com +35062 + AiN-Tech. Corp. + Keita Dougo + dougo&ain-tech.jp +35063 + Ukraine Property Group, Ltd. + Olga Ermolenko + info&upg.kiev.ua +35064 + Jderobot + José María Cañas + jmplaza&gsyc.es +35065 + Sinequa + Alexandre Bilger + support&sinequa.com +35066 + HLRN + Isaac Hailperin + bzadmhai&hlrn.de +35067 + mail.de GmbH + Norman Schulz + n.schulz&team.mail.de +35068 + Virgo Systems Kft. + Peter Heiner + admins&virgo.hu +35069 + Worldwide Trust Asia Pte Ltd + Stefan Jonsson + s.jonsson&wwtrust.com +35070 + Plausible Labs Cooperative, Inc. + Landon Fuller + landonf&plausiblelabs.com +35071 + Q & M Informática Ltda + Marcos Belarmino + bigmarcao&hotmail.com +35072 + NGK Houten + Maarten Bosmans + mkbosmans&gmail.com +35073 + GateWare Communications GmbH + Wolfgang Haubner + w.haubner&gateware.de +35074 + MaxMara Fashion Group s.r.l. + Marco Muccetti + noc&maxmarafashiongroup.com +35075 + BlueTech Technology Co., Ltd. + Situ Xinhong + stxh&bluetech.com.cn +35076 + synfin.de + Rene Graf + rgraf&synfin.de +35077 + Gracenote Inc. + Mohammad Ali Khavari + mkhavari&gracenote.com +35078 + i-fabrik GmbH + Nico Wagner + nico.wagner&ifabrik.de +35079 + Terminal Quequen S.A + Serguey Shevtsov + sergioshev&gmail.com +35080 + KDH + Klaus-Dieter Helbig + klaus-dieter&helbig-mail.de +35081 + Sarana Tunggal Pratama + Asep Maryana + maryana&saranatunggal.com +35082 + Amaranten (ASIA) Network Co. Ltd. + Liu Feng + liufeng&amarantenasia.com +35083 + TOSS GmbH + Bernd Thinius + info&toss.de +35084 + RBS Coutts International + Zurkinden Dominik + dominik.zurkinden&rbscoutts.com +35085 + EURO-INFORMATION + Pascal BOBÉE + certif-admin&e-i.com +35086 + ReliabilityFirst Corporation + Shawn Liggett + shawn.liggett&rfirst.org +35087 + BD Consult A/S + Bent Dahl + support&bdconsult.eu +35088 + SRA International, Inc + Kay Patterson + Kay_Patterson&sra.com +35089 + Bluebird Electronics Ltd + Nigel Gardner + nigel&bluebird-electronics.co.uk +35090 + CTI + Michael G. Martinez + info&canam-technology.com +35091 + Mitec Telecom Inc + Daniel Bisson + daniel.bisson&mitectelecom.com +35092 + Made IT + Dennis Leeuw + dleeuw&made-it.com +35093 + AT Testing + Andrea Tedeschi + admin&andreatedeschi.com +35094 + iDevices Industria e Comercio de Produtos Eletronicos Ltda + Francis Munhoz Mazzaro + suporte&idevices.com.br +35095 + OFFICE24 Co.,Ltd. + Takaaki Arai + info-request&webjapan.co.jp +35096 + SmartEquip Inc. + George Gu + ggu&smartequip.com +35097 + BlueWater Communication Group + Henry Feusi + henry.feusi&blueh2ogroup.com +35098 + PICA8 Inc. + Sharad Ahlawat + apple&pica8.com +35099 + E-Mice Solutions (H.K.) Limited + Stephen Chu + stephenchu&e-mice.net +35100 + Globelinks Technologies Limited + Andy Ng + andy&btl.com.hk +35101 + SHENZHEN SHENXUN INFORMATION TECHNOLOGY DEVELOPMENT CO.,LID + RenDong Lee + SilentWoolf&hotmail.com +35102 + Guangdong Huada Integrated Technology Co.,ltd. + wangchan + wangchan&hd-os.com +35103 + Sichuan Cybercamera Information Technology Co.,Ltd + Qin Ling + qinl&cybercamera.com.cn +35104 + Oerlikon Textile + Thomas Overbeck + thomas.overbeck&oerlikon.com +35105 + Mason Brown Development, LLC + Mason Brown + masonbrown&gmail.com +35106 + Reale Mutua Assicurazioni + Alberto Re + alberto.re&realemutua.it +35107 + Prosol Group (Pty) Ltd + Grant Hart + grant.hart&prosol.co.za +35108 + tocore education + Liu Ming + king.liuming&gmail.com +35109 + Scienta Media s.r.o + Maria Burykhina + maria.burykhina&scientamedia.com +35110 + 443 Pty Ltd + Colby Pender + oid&443.com.au +35111 + NBank - Investitions und Förderbank + Christian Wahlmann + christian.wahlmann&nbank.de +35112 + Vem Sistemi SpA + Vittorio Valentini + valentini&vem.com +35113 + OpTier + Elad Schulman + elad.schulman&optier.com +35114 + Robert Wood Johnson University Hospital + Martin A Flynn + mflynn&rwjuhr.com +35115 + SqueakSoft.com, LLC + Ron Chinn + ron&squeaksoft.com +35116 + Trover Solutions + Jason P Messenger + jmessenger&hcrec.com +35117 + BamNet + Ben VanDenBerg + bamojr&gmail.com +35118 + Korestone Technologies + Chiming Huang + chiming.huang&korestone.com +35119 + Belhard Group JSC + Gubanov Alexei + GubanovAS&belhard.com +35120 + SIDN + Stephan Rütten + stephan.rutten&sidn.nl +35121 + BULiGL + Andrzej Talarczyk + Andrzej.Talarczyk&zarzad.buligl.pl +35122 + Wayob + Christian Körner + christian.koerner&wayob.de +35123 + Clear Channel Radio Digital + Douglas Cliche + douglascliche&clearchannel.com +35124 + FilesAnywhere.com + Jonathan Grubb + jonathan&filesanywhere.com +35125 + Helmut Ritter + Helmut Ritter + info&helmut-ritter.de +35126 + Vodafone Group Services GmbH + Helmut Ritter + hostmaster&vodafone-rnd.com +35127 + Japan Communication Inc. + Shinya Arai + sarai&j-com.co.jp +35128 + KWS-Electronic GmbH + Werner Blabsreiter + w.blabsreiter&kws-electronic.de +35129 + Rahn & Bodmer Co. + Marcel Matter + marcel.matter&rahnbodmer.ch +35130 + fipscode + Fabiano Sidler + fabianosidler&swissonline.ch +35131 + London Underground Ltd + Michael Collyer + Michael.Collyer&MetronetRail.com +35132 + IFProject + Timofey L Negrebetsky + hexedit&ifproject.ru +35133 + Gobierno de la Republica NeuKarthago + John Stewart Diaz Di Castelo + gob.neukarthago&redesdigitales.com +35134 + 4Access Communications Company, Inc. + James Skinner + admin&4access.com +35135 + Personal Software Company + Sebastien Lefevre + seb.lefevre&gmail.com +35136 + BlueStripe Software + Maury Cupitt + mcupitt&bluestripe.com +35137 + Chris Clayson + Chris Clayson + chris&stmaggies.org +35138 + PK Verkkotaito Oy + Andreas Backlund + org-admin&eteinen.fi +35139 + AZWUL + Théotime Jurzak + jurzak&gmail.com +35140 + CableHD Radio Network + Carlton Davis + cdavis&cablehdradio.net +35141 + PSIGlenmere + Rudy Valencia + rudy.valencia&psiglenmere.com +35142 + VX4.NET + Christian Kahlo + cck&vx4.de +35143 + Helpo Systems + Claudio Henrique Fortes Felix + claudio&helpo.com.br +35144 + Power Quadrant + Robert Turner + rturner&powerquadrant.com +35145 + Gearbit + Ray Tompkins + tech08&gearbit.com +35146 + China Net Cloud + Stefan Berder + stefan.berder&chinanetcloud.com +35147 + Wishfi Pte. Ltd + Einstein Lin + elin&wishfi.com +35148 + Technological Educational Institute of Piraeus + Christina Avgerinou + noc&teipir.gr +35149 + NPO Lisco Ltd. + Valeriy Novikov + nvn&lisco.ru +35150 + inno-networks + shmulik bezalel + shmulik&inno-networks.com +35151 + Raiffeisen Bank Aval + Oleg Ivanivskyi + oleg.ivanivskyi&aval.ua +35152 + AvroRAID + Michael Stepanenko + mistic&unixway.org +35153 + Kynetics srl + Andrea Zoleo + andrea.zoleo&kynetics.it +35154 + PowerShield Ltd. + Paul Hectors + paul&pwrshield.com +35155 + IP Cube Co.,Ltd + Eitaro Washio + info&ip3.co.jp +35156 + Unbit + Roberto De Ioris + info&unbit.it +35157 + University of Manitoba + Tony Wong + atwong&umanitoba.ca +35158 + Osaka International Educational Institution + Akihiko Nishikawa + nisikawa&oiu.ac.jp +35159 + ITC MIKS, LLC + Alexander Antonenko + aga&itc-miks.com.ua +35160 + Equicom + Evgeny Kiselyov + info&equicom.dp.ua +35161 + Venturi S.R.L. + Matteo Rocco Creati + matteo.creati&gmail.com +35162 + TeleOSS Consulting Ltd. + Voravit Satitviriyakul + voravit&teleoss.com +35163 + OOO InfoTechService + Axe Ilshat + mysocks&inbox.ru +35164 + Black Earth Consulting Ltd + Matthew Seaman + m.seaman&black-earth.co.uk +35165 + Barid Al Maghrib + Dr. Ahmed KADA + kada&poste.ma +35166 + E2E Networks Private Limited + Ashish SHUKLA + ashish.shukla&e2enetworks.com +35167 + MPI Bremen + Mathias Doehle + mdoehle&mpi-bremen.de +35168 + NT Services Ltd. + Joe Lefort + joe.lefort&neovia.com +35169 + Sven Christel + Sven Christel + spam&chbln.de +35170 + Bioptic Co., Ltd. + Sanghyun CHOI + shchoi&bioptic.com +35171 + Cloudeva + Hai Yun Zhao + haiyunzhao&gmail.com +35172 + Nordex + Andreas Klaus + aklaus&nordex-online.com +35173 + Automobile Association "DreamCar" + Igor Nagorny + igor.nagorny&dream-car.ru +35174 + PBS&J + Brian Owen + bdowen&pbsj.com +35175 + TamKang University - Information Management Network Association + Jodern Yap + support&mis.im.tku.edu.tw +35176 + Asavie Technologies Ltd. + Tom Maher + tmaher&asavie.com +35177 + Control4 + John Epeneter + jepeneter&control4.com +35178 + MJV Tecnologia Ltda + Kaoru Kishimoto + kaoru.ronaldo&mjv.com.br +35179 + danzuck + Daniel Zuck + iana&danzuck.ch +35180 + Mark Domansky Consulting + Mark Domansky + mark&sagewolf.com +35181 + Techubs Network + Hui Kam Choi + drchoi&techubs.com +35182 + Aggregate Knowledge, Inc + Igor Fedulov + igor&aggregateknowledge.com +35183 + Rob Haverkort BV + Rob Haverkort + rob.haverkort&rhbv.nl +35184 + Kontorsplatsen Business Group AB + Eddie Olsson + eddie.olsson&kontorsplatsen.se +35185 + PlainText s.r.o. + Matus Svrcek + svrcek&plaintext.sk +35186 + Hiromasa Nara + Hiromasa Nara + hiromasa.51&gmail.com +35187 + kunming ucrown network system (china) ltd. co + chen li + chenli&ucrown.com +35188 + Eventide + Richard Factor + Rfactor&eventide.com +35189 + Thermo Fisher Scientific Inc (formerly 'Thermo Electron LED GmbH') + John Peduzzi + john.peduzzi&thermofisher.com +35190 + Trice Imaging, Inc. (formerly 'Great Connection') + Martin Westin + martin&triceimaging.com +35191 + Applied Software Control Ltd. + David Barker + d.barker&ascman.co.uk +35192 + Miracle Tan + Miracle Tan + miracle.tan&163.com +35193 + MicroNet Video Technology Ltd., + Cang Lin + lc&vip.sina.com +35194 + Barclays Capital + ilia solovey + ilia.solovey&barclayscapital.com +35195 + Rudraksha Technology Private Limited + Dilip Srivastava + dilip.srivastava&rtpl.co.in +35196 + Demonware Limited + Miroslaw Baran + sys&demonware.net +35197 + INTEGRA Software Systems + Timothy J. Spires + tspires&pobox.com +35198 + Al-Madinah International University + Mohamad Zaman Shahri + zaman.shahri&mediu.edu.my +35199 + DynCorp International LLC + Ronald S. Merriman + scott.merriman&dyn-intl.com +35200 + ARIAL + Vladislav Zhuravlev + vzhuravlev&runway.ru +35201 + Ochanomizu University + Ayako Watanabe + netadmin&cc.ocha.ac.jp +35202 + TechniData AG + Andreas Zurell + bits-server&technidata.com +35203 + squatlabs + Alexander Meindl + webmaster&squatlabs.com +35204 + Younicos AG + Michael Friedel + friedel&younicos.com +35205 + SIAG-OMC + STEFANIA GRILLI + s.grilli&siagomc.com +35206 + AllDSP GmbH & Co. KG + Jeroen van Waterschoot + info&alldsp.com +35207 + Dynamica s.r.l + Roberto Chiappa + info&dynamica.it +35208 + Trubiquity GmbH + Achim Trieselmann + atrieselmann&trubiquity.com +35209 + Fomento de Construcciones y Contratas, S.A + Antonio Collar Alberola + ACollar&fcc.es +35210 + Bechtle AG + Thomas Rack + thomas.rack&bechtle.com +35211 + North Point Technology LLC + Bryan Bell + email&nptllc.org +35212 + dealnews.com, Inc. + Daniel Beckham + iana-oid&dealnews.com +35213 + Sandbox Solutions + Torsten Robert Kirschner + torsten.kirschner&sandbox.no +35214 + Agence pour l'Enseignement Français à l'Etranger + Alexandre Duval + alexandre.duval&diplomatie.gouv.fr +35215 + ZAO "Begun" + Alex Kuklin + akuklin&begun.ru +35216 + w-Ha + Laurent Cloarec + lcloarec&w-ha.com +35217 + Adknowledge Inc + Scott Kahler + skahler&adknowledge.com +35218 + Beemaster ICT + Jeroen Peschier + j.peschier&beemasterict.com +35219 + FttX.nu + Erkan Tuyen + e.tuyen&fttx.nu +35220 + Ioannes Co., LTD + Igor Kulakov + ioannes&yandex.ru +35221 + S.C. InterData Systems S.R.L. + Adrian Pitulac + adrian&idsys.ro +35222 + NewYork-Presbyterian Hospital - Radiation Oncology + Hansen Chen + hac9005&nyp.org +35223 + Insight International Corporation + Yoji Nakanishi + Yoji.Nakanishi&insight-intl.com +35224 + Langara College + Jonathan Chui + serveradmin&langara.bc.ca +35225 + Shanghai HOWZONE Network Communication Device Co.,LTD + ann zhou + ann&how-zone.com +35226 + Bureau Software Development Corporation + John Bureau + iana2bsdc&fastmail.fm +35227 + OOO Nika Motors Holding + Petrov Vladimir + pvv&nikamotors.ru +35228 + Commsquare BVBA + Stefan Engels + stefan&commsquare.com +35229 + Intercard Services AD + Kosta Velikov + kosta.velikov&icard.bg +35230 + Generic Sweden AB + Håkan Sjöö + hakan.sjoo&generic.se +35231 + Arca Sistemi SCARL + Michele Sandonini + michele.sandonini&arcavita.it +35232 + Imaging In Motion LLC + David E. Mars + david.mars&imaginginmotion.com +35233 + WHITEBEARSOLUTIONS + Ricardo Lorenzo Rodríguez + ricardo.lorenzo&whitebearsolutions.com +35234 + Computerized Assessments & Learning, LLC + Stephen Spencer + sdspence&caltesting.org +35235 + Hargrove & Associates, Inc. + Brian Seebacher + brian&haiint.com +35236 + Florian Rupp Ingenieur + Johannes Florian Rupp + mass&frupp.de +35237 + KBJ S.A. + Lukasz Krotowski + pen.info&kbj.com.pl +35238 + Novgorod Information and Analitical Centre + Yuri Tarasov + yuri-tarassov&niac.ru +35239 + NetSieben Technologies INC. + Andrew Useckas + andrew&netsieben.com +35240 + Beehivetesting + Robert Schweighardt + sepicrob&gmx.de +35241 + Betz Stefan -- Webdesign & Computerservice + Betz Stefan + info&stefan-betz.net +35242 + Cygnus Networks GmbH + Dr. Torge Szczepanek + info&cygnusnetworks.de +35243 + Dorna Sports + Tony Aguilar + itsoftware&dorna.com +35244 + Wojo + Robert Wojciechowski + robert&wojo.net +35245 + Railcar Management, Inc + Robert Wojciechowski + robert.wojciechowski&rmiondemand.com +35246 + OS Qingdao University + Difan Zhang + alex&osqdu.org +35247 + OpenDL Pty Ltd + HONG CHEN + hchen&opendl.net +35248 + Serendio Inc + Harsha Elchuri + harsha&serendio.com +35249 + ALTAMYS - Tiers de Confiance + Pierre Emmanuel de POMPIGNAN + pedepompignan&altamys.com +35250 + Simply-Info + Sebastien DAMAY + simply-info&damay.eu +35251 + AWIND Inc + Pete Chou + ptchou&awindinc.com +35252 + SAM Electronics + Jan Lausch + hostmaster&marine-mail.com +35253 + AixSolve GmbH + Carsten Skopinski + cs&aixsolve.de +35254 + NOS Comunicações (formerly 'ZON Multimédia') + João Serras Rodrigues + joao.srodrigues&nos.pt +35255 + Mind Candy Ltd + Ryan Conway + ryan&mindcandy.com +35256 + Iondale Information Services Limited + Fabien Broquet + fabien.broquet&iondalegroup.com +35257 + Brisa Inovação e Tecnologia, SA + Joaquim José Paiva Pereira + joaquim.pereira&a-to-be.com +35258 + HEHA.ORG + Owen Hau + owen&heha.org +35259 + Televersions LLC. + Levente Tamas + levi&televersions.com +35260 + Rotkraut + Rolf Krahl + hostmaster&rotkraut.de +35261 + Norwegian Defence + Sturla Moller + sturlam&gmail.com +35262 + Stadtwerke Saarbruecken GmbH (formerly 'Versorgungs- und Verkehrsgesellschaft Saarbruecken mbH') + Bernd Kraus + bernd.kraus&sw-sb.de +35263 + Araneo Limited + Nathan Ward + nward&braintrust.co.nz +35264 + TetraStorm Technologies + Ragesh Krishna + oid.admin&tetrastorm.com +35265 + Eltex Enterprise, Ltd. + Ivan Yeryomin + ie&eltex.org +35266 + EJJE, Lda + Eduardo Correia + mibs&ejje.eu +35267 + Eten Technologies Inc. + Allan Hsu + allan_hsu&etentech.com +35268 + ORESCO + Ivan V Kvasnikov + kvasnikov&oresco.ru +35269 + Siemens AG Österreich, CMT (formerly 'Siemens AG Österreich, SIS SDE SVI OSS SAC') + Werner Hornik + werner.hornik&siemens.com +35270 + Samji Eletronics Co., Ltd. + Chungsoo Shin + cavatina&samji.com +35271 + micallef.fr + David Micallef + david&micallef.fr +35272 + DEBID + Hector Cano + h.cano&debid.cat +35273 + Jah'Z Interactive + Sylvain Rossi + sylvain.rossi&gmail.com +35274 + CHU Angers + M. Vallée Eric + ErVallee&chu-angers.fr +35275 + SMARIS s.r.o. + Radomir Hornicek + rhornicek&smaris.cz +35276 + TopCoder, Inc. + Travis Haas + thaas&topcoder.com +35277 + ZZ Dats Ltd + Edzus Zeiris + edzus&zzdats.lv +35278 + Texas Communications Inc. + Jacob Bachmeyer + oidcontact&texascom.com +35279 + Singlenesia Software + John Martin + johnm&singlenesia.com +35280 + QuickPlay Media, Inc. + Torin Walker + torinw&quickplay.com +35281 + METRACOM + Philippe SOLTYSIAK + psoltysiak&metracom.fr +35282 + Community Health Network + Mark Kutin + mkutin&ecommunity.com +35283 + Pacific Blue Cross + Jason Burrows + jburrows&pac.bluecross.ca +35284 + UROEP.COM + Kim,Kihyeon + kihyeon.kim&gmail.com +35285 + Ministry of Administration and Interior - General Directorate for IT & C + Marian Augustin Răducu + marian.raducu&mai.gov.ro +35286 + Johanniter-Unfall-Hilfe e. V. - Landesverband Baden-Wuerttemberg + Dr. Robert Formanek + it&juh-bw.de +35287 + Mobile Metrics + Asa Atallah + admin&mobilemetrics.net +35288 + Deep Web GmbH & Co KG + Oliver Loch + netadmin&deepweb.de +35289 + Quative Limited + Prabhu Subramanian + prabhu.subramanian&quative.tv +35290 + ptman.name + Paul Tötterman + tech&ptman.name +35291 + Embvue + Marc-Andre Boulais + boulaisma&embvue.com +35292 + Embedded Solutions Group + Praveen Minumula + praveen&dsrminc.com +35293 + Collegium Josephinum Bonn + Stefan Hanßen + s_hansse&cojobo.net +35294 + INETJ Communications, LTD + Carl Gideon + cgideon&inetj.com +35295 + ExitPi + Ryan Edgar + random.oid-admin&exitpi.com +35296 + Cyclone Microsystems Inc. + Scott Coulter + scott.coulter&cyclone.com +35297 + orainf + Remigiusz Boguszewicz + remigiusz.boguszewicz&gmail.com +35298 + dvsAnalytics, Inc. + Rita Dearing + rdearing&dvsAnalytics.com +35299 + Universidad del Valle de Guatemala + Steve Ortiz + steve&uvg.edu.gt +35300 + REGISTRO NACIONAL DE IDENTIFICACIÓN Y ESTADO CIVIL + Ricardo Saavedra Mavila + iana&pkiep.reniec.gob.pe +35301 + Kvazar Test + Ivan V Kvasnikov + kvasnikov&gmail.com +35302 + Outblaze Limited + Howard Chui + howardc&outblaze.com +35303 + Velocity + Rich Pawly + admin&velocity.org +35304 + Procube Ltd. + Mitsuru Nakakawaji + mitsuru&procube.info +35305 + DB ELETTRONICA TELECOMUNICAZIONI SPA + FELIZIANI GUGLIELMO + fg&dbbroadcast.com +35306 + Equinox Software, Inc. + John Jones + jjones&esilibrary.com +35307 + Screwgun Logic + Tyson Sommer + register&screwgun-logic.com +35308 + Koukaam a.s. + Milan Rusek + mrusek&koukaam.se +35309 + am-cor inc. + angus macdonald + amm&am-cor.com +35310 + Gaggle.Net, Inc. + Chris Moates + cmoates&gaggle.net +35311 + ARANZ Medical Ltd + Christopher Fairbairn + chris.fairbairn&aranzmedical.com +35312 + gabosh + Oliver Bohlen + olli&gabosh.net +35313 + Spinnaker Web Design & Hosting LLC + Eric Patterson + epatterson&spinnakerweb.com +35314 + Conergos GmbH & Co. KG + Florian Hagedorn + florian.hagedorn&conergos.de +35315 + Wave Creative Technologies + Lau Kwok Hung + kh.lau&wave-creative.com.hk +35316 + Schauer Hungaria Kft. + Ferenc Sárközy + f.sarkozy&schauer.hu +35317 + Kolibri Systems B.V. + Bart Cuperus + bart.cuperus&kolibri-systems.com +35318 + Kutztown University + Lisa Frye + frye&kutztown.edu +35319 + Ingenu, Inc. (formerly 'On-Ramp Wireless, Inc.') + Brandon Beam + brandon.beam&Ingenu.com +35320 + Università degli Studi di Torino + Paola Laguzzi + paola.laguzzi&unito.it +35321 + Profitcomputing s.r.o. + Daniel Vanco + daniel.vanco&profitcomputing.com +35322 + PJRC.COM, LLC + Paul Stoffregen + paul&pjrc.com +35323 + Kliniken Nordoberpfalz AG + Robert Dworschak + iana&kliniken-nordoberpfalz.ag +35324 + Edutel B.V. + Dave Hellings + noc&edutel.nl +35325 + Universität Witten/Herdecke + Christoph Peus + postmaster&uni-wh.de +35326 + University of Music and Drama Hannover + Frank Meister + frank.meister&hmt-hannover.de +35327 + SMABTP + Jean-Victor Balin + jean-victor_balin&smabtp.fr +35328 + BlueCrest Capital Management + Stephen Gibbs + stephen.gibbs&bluecrestcapital.com +35329 + S. Walter Packaging + David Tenney + dtenney&swalter.com +35330 + RRsat Global Communications Network + Oded shor + oded&rrsat.com +35331 + Faculdade Natalense para o Desenvolvimento do Rio Grande do Norte + Lineu Paiva + lineu&farn.br +35332 + Alticast Corp. + Younggi Hong + nari&alticast.com +35333 + pk0 + Shuja Khan + shuja&pk0.net +35334 + Lamda Networks + Uri Rotshtein + uri&lamdasys.com +35335 + Alion Science and Technology + Matt Jacobs + mjacobs&alionscience.com +35336 + Lime Labs, LLC + Jesse Callaway + snmp&limelabs.com +35337 + Primitives.lv + Romans Krjukovs + romans.krjukovs&gmail.com +35338 + IMFirewall Software + Bruce Geng + Bruce_Geng&imfirewall.us +35339 + Yambay Technologies Pty Ltd + Paul Vowles + paulv&yambay.com +35340 + dustOS + Mathias Kuester + abstract&dustos.org +35341 + Bundesamt f. Eich- und Vermessungswesen + Eduard Täubl + post.master&bev.gv.at +35342 + American HomePatient + Dave Morris + ahp-pki&ahom.com +35343 + Cairo University Hospitals + El-Sayed Abdallah Ayoub + drsayed&gmail.com +35344 + Thales Defence Deutschland GmbH + Axel Offt + axel.offt&de.thalesgroup.com +35345 + SRC d.o.o. + Benjamin Orazem + beno.orazem&src.si +35346 + Northern Ireland Housing Executive + Mark McNeilly + mark.mcneilly&bt.com +35347 + WuHan RenTang Information Limited + SunQunYing + whrtqa&qq.com +35348 + RTQA Medical Limited + Cheung Yu Ka + rtqa&rtqa.com +35349 + Takahashi Yusuke + Takahashi Yusuke + taka.yuu8917&gmail.com +35350 + DS-Department + Dmitry Sotarev + dmitry&sotarev.ru +35351 + Silver Sky Soft + Kiran Kumar + contact&silverskysoft.com +35352 + A&R Carton + Jörn Ehlich + joern.ehlich&ar-carton.com +35353 + Entry Point + Maciej Dobrzanski + reg.iana&entrypoint.pl +35354 + Nintendo of America Inc. + Bryan Irvine + bryair01&noa.nintendo.com +35355 + Intelliresponse Systems Inc + Jon Barnett + jon.barnett&intelliresponse.com +35356 + Telezygology, inc. + Mark Bisaillon + M.Bisaillon&tz.net +35357 + Garden State Health Systems + Peter Bates + pbates&gshsys.com +35358 + Syntervision + Greg Elmore + greg.elmore&syntervision.com +35359 + Fensom System S.L. + Francisco J. Lazur + fjlazur&fensomsystem.com +35360 + ZODIAC Data Systems GmbH + Thomas Otten + thomas.otten&zodiacaerospace.com +35361 + Newtel Engineering S.r.l. + Diego Brocchi + d.brocchi&newtel-eng.com +35362 + Quill Training Systems Ltd + Richard Dawson + quilltraining&btconnect.com +35363 + University of Huddersfield + Peter Hutchison + p.j.hutchison&hud.ac.uk +35364 + Seattle Children's Hospital + Mike Kindle + Mike.Kindle&seattlechildrens.org +35365 + Schroff Technologies International, Inc + David Therrien + david.therrien&schrofftech.com +35366 + ChengDu OuRuan Corp., Ltd. + Yu Su + suyu.oursoft&gmail.com +35367 + ITL, LLC + Roberto Schipp + rschipp&itl-llc.com +35368 + doubango + Diop Mamadou + diopmamadou&yahoo.fr +35369 + 3iMedia GmbH + Frank Schmidt + swyx&3imedia.de +35370 + natnat inc. + Aldwin Panganiban + aldwin.p&gmail.com +35371 + Infocore.Inc + Hai Wang + wanghai&infocore.cn +35372 + Daniel Clark + Daniel Clark + clarkddc&gmail.com +35373 + Slovak Telekom, a.s. + Miloslav Lehotský + miloslav.lehotsky&st.sk +35374 + Universidade Federal do Espirito Santo + Hans-Jorg Andreas Schneebeli + diretor.geral&npd.ufes.br +35375 + UAB "Mano numeris" + Ričardas Pocius + ricardas.pocius&numeris.lt +35376 + SOFTWAY MEDICAL + David BARRE + dbarre&waid.fr +35377 + Funkwerk IP-Appliances GmbH + Frank Eberle + support&packetalarm.com +35378 + Antwerpse Waterwerken + Joris Mertens + jmertens&aww.be +35379 + ISiS Papyrus Software AG + Max Pucher + info&isis-papyrus.com +35380 + Rogers Broadcasting Ltd. - Vancouver Television + Nathan Pachal + nathan.pachal&rci.rogers.com +35381 + Informatikos ir rysiu departamentas prie Lietuvos Respublikos vidaus reikalu ministerijos + Alvyda Pupkoviene + alvyda.pupkoviene&vrm.lt +35382 + EuPathDB Bioinformatics Resource Center + Mark Heiges + it&apidb.org +35383 + Swissfram SRL + Jose Luis Franzen + josefranzen&gmail.com +35384 + International Social Security Association + Pascal Tetard + tetard&ilo.org +35385 + Far Systems SpA + Andrea Turso + andrea.turso&farsystems.it +35386 + Neverfail Group + Ian Brown + ibrown&neverfailgroup.com +35387 + Ning + Mike Svoboda + ops&ning.com +35388 + Beyel + Rainer Beyel + rainer&beyel.de +35389 + KIWIGRID + Joerg Eichhorn + joerg&kiwigrid.com +35390 + Fednot asbl / vzw (formerly 'Credoc services CVBA') + Dimitri Geshef + security&fednot.be +35391 + HealthTrio, LLC + Connie Lagneaux + connie.lagneaux&healthtrio.com +35392 + sepox.de + Oliver Kahr + hostmaster&sepox.de +35393 + Blue Vervet Ltd + Dan Hulsmann + dan&bluevervet.com +35394 + SuKaiTek + Barnaby J Astles + bjastles&gmail.com +35395 + PT. Prima Teknologi + Fendy Riyanto + fendy&primateknologi.com +35396 + ZAO Intercross + Elinskiy Igor + gushin&intercross.ru +35397 + x-fabric GmbH + Roman Puls + info&x-fabric.com +35398 + IDKAYA + Thierry Caminel + tcaminel&idkaya.com +35399 + CeNetMon + Yaroslav Shcherbakov + info&cenetmon.net +35400 + Noumenon, LLC + Steve Berglie + sberglie&noumenon.us +35401 + ElectroLink Srl + Mirko Pettinelli + ufficiotecnico&electrolink.com +35402 + Internet Connectivity Group, Inc + Marty Bernal + mbernal&icginfo.com +35403 + Hochschule Mittweida + Matthias Luehr + luehr&hs-mittweida.de +35404 + Beijing C&W Optical Communication Technology Co.,Ltd. + Guo Huan + gh&bcoc.com.cn +35405 + Default Deny Security + Phillip Hallam-Baker + hallam&defaultdenysecurity.com +35406 + RADiflow + Rafi Horev + rafi_h&radiflow.com +35407 + BOUYGUES IMMOBILIER + Olivier RADIX + contactiana&bouygues-immobilier.com +35408 + Ing. Roman Těšík + Roman Těšík + roman.tesik&gmail.com +35409 + AccelOps, Inc. + Partha Bhattacharya + partha&accelops.net +35410 + SOLIS - Cooperativa de Soluções Livres Ltda + Niumar André Klein + niumar&solis.coop.br +35411 + Fortium Technologies Ltd + Ceri Coburn + ceri&fortiumtech.com +35412 + CZ-MAN s.r.o. + Roman Tesik + roman.tesik&gmail.com +35413 + GlideAround LLC + Paul M Seigler + pseigler&glidearound.com +35414 + Molisoft grupo empresarial S.L. + Miguel Molinos Muñoz + Miguel.Molinos&molisoft.com +35415 + Universitas Surabaya + Gde Bagus Aryana + sim&ubaya.ac.id +35416 + DHD Deubner Hoffmann Digital GmbH + Sven Hoffmann + snmp&dhd-audio.de +35417 + Randstad Deutschland + Heiko Schmidt + heiko.schmidt&de.randstad.com +35418 + icub3d + Joshua Marsh + joshua&icub3d.com +35419 + Electron, Ltd. + Gushchin Vladimir + info&electron-ru.com +35420 + rtron + Dennis Lee + kwlee&r-tron.com +35421 + ChinaScope Financial Limited + Frank Liu + it&chinascopefinancial.com +35422 + Schweizer Paraplegiker-Zentrum + Staub Bruno + bruno.staub¶net.ch +35423 + berolina Schriftbild GmbH & Co. KG + René Steffin + r.steffin&berolina.de +35424 + Viprinet GmbH + Simon Ley + ley&viprinet.com +35425 + Mastery Technologies, Inc. + Dave Lawson + dlawson&masterytech.com +35426 + The OpenSSH Project + Damien Miller + djm&openssh.com +35427 + Data Turbine, Inc. + Scott Schoenthal + scott.schoenthal&dataturbine.com +35428 + Sterling Computer Systems + Lionel Johnson + ljohnson&sterling1.com +35429 + Limis + Fabien Costard + fabien.costard&limis.fr +35430 + IntelligentWorks Co., Ltd. + KITAMURA Hideyuki + kitamura&intelligentworks.co.jp +35431 + KAMFU Infomation & Technology Co., LTD. + Xue Lihui + xuelihui&gmail.com +35432 + Superblock, LLC + joshua stein + jcs&superblock.net +35433 + Connexion Technologies + Joel Burgess + joel.burgess&cnxntech.com +35434 + Quadrant Newmedia Corp. + Darren Widenmaier + iana.oid&quadrant.net +35435 + Anabasis Consulting Ltd. + Lawrence Weeks + lweeks&anabasis.net +35436 + 01.com, Inc. + Lawrence Weeks + hostmaster&01.com +35437 + SERPRO - Serviço Federal de Processamento de Dados + Rodrigo Hjort + rodrigo.hjort&serpro.gov.br +35438 + ekom21 - KGRZ Hessen + Carsten Scherb + carsten.scherb&ekom21.de +35439 + App B.V. + Jeff Reiffers + jeff&appsoftware.nl +35440 + Faivre et Mahon + Florian Mahon + info&faivre-et-mahon.ch +35441 + corporacion empresarial iunka + Iurii Andamasov + yuriy&iunika.net +35442 + deZem GmbH + Michael Bayer + m.bayer&dezem.de +35443 + Cat Consulting, Inc + Beth Hunt + info2&catconsulting.com +35444 + Spectralnet Sollutions, LLC + Kevin R. James + kevin&spectralnetaz.com +35445 + Information Security Service Digital United + Chia Huan Wu + willie&issdu.com.tw +35446 + Connaught FactsLine Ltd. + Gabriel Bixade + gbix73&yahoo.com +35447 + SeeByte Ltd. + Stephane Chazelas + postmaster&seebyte.com +35448 + Video Location Service + Sebastien Baguette + iana-pen&sav-dnv.be +35449 + Keytech BVBA + Cedric Ghiot + c.ghiot&keytech.be +35450 + Storagedata + Mark Ruijter + mruijter&storagedata.nl +35451 + Incenp + Damien Goutte-Gattat + dgouttegattat&incenp.org +35452 + Commercial Bank DeltaCredit + Alexander Kotkov + itsecurity&deltacredit.ru +35453 + DREAM TRAIN INTERNET, INC. + Hiroshi TAKITA + dti-nsg&dti.ad.jp +35454 + Niometrics Pte Ltd + Christos Ricudis + ricudis&niometrics.com +35455 + Tokyo Metropolitan Organization for Medical Research + Toshi Akazawa + ns-media&igakuken.or.jp +35456 + Westway Nominees + Justin Twiss + justin.twiss&bekkers.com.au +35457 + Infocomm Development Authority Of Singapore + Lau Chee Kiong + lau_chee_kiong&ida.gov.sg +35458 + Dossot Networks + David Dossot + david&dossot.net +35459 + LEMKO Corporation + Chris White + cwhite&lemko.com +35460 + EDAXI UG (haftungsbeschraenkt) + Jochen Schoenfeld + js&edaxi.de +35461 + Bernhard Dick + Bernhard Dick + bernhard&bdick.de +35462 + Qingdao Hisense Media Networks LTD + zhangfengxi + fengxiboy&gmail.com +35463 + EfiCode Oy + Eija Virkkala + hostmaster&eficode.fi +35464 + Blues Point Partners Pty Ltd + Peter Horne + ph&bluespointpartners.com +35465 + Bank Julius Baer & Co. Ltd. + Sebastian Kowalski + sebastian.kowalski&juliusbaer.com +35466 + Forsway Scandinavia AB + Edvin Lindqvist + edvin.lindqvist&forsway.com +35467 + OOO"SpinBrain" + Nickolay Redin + ya&spinbrain.com +35468 + Kaunas University of Technology + Marius Urkis + marius.urkis&ktu.lt +35469 + Seamless Distribution AB + Radoslaw Kozlowski + radoslaw.kozlowski&seamless.se +35470 + First-Matrix + Juan Meyer + juan&first-matrix.com +35471 + Johns Byrne Company + Brett Gustafson + brett.gustafson&johnsbyrne.com +35472 + blichmann.de + Christian Blichmann + pen-request-iana&blichmann.de +35473 + UNETCONVERGENCE + Lee Jun Yong + jylee&unetconv.co.kr +35474 + TransMIT GmbH + Michael Kröning + Kroening&transmit.de +35475 + Sprengnetter GmbH + Markus Tondorf + m.tondorf&sprengnetter.de +35476 + The Overtis Group Limited + Jeremy Barker + jeremy.barker&overtis.com +35477 + Dataram Corporation + Jason Caulkins + jcaulkins&dataram.com +35478 + The UNIX Man Consulting, LLC + Evan Cofsky + evan&theunixman.com +35479 + Academic Medical Center + Ewald H. Beekman + E.H.Beekman&amc.nl +35480 + Kutak Rock LLP + Jim Cook + postmaster&kutakrock.com +35481 + LightningIO + Ben Kelly + kibelan&gmail.com +35482 + Image Soft Oy + Matti Suuronen + matti.suuronen&imagesoft.fi +35483 + Exagate + Dogan Erdogan + dogan.erdogan&exagate.com +35484 + Alphacom LLC. + Aleksey Zhukov + drdaeman&netwi.ru +35485 + Derrick & Associates, Inc + Michael Derrick + tech&dnacorp.net +35486 + Television New Zealand Ltd + Mark Potter + noc&tvnz.co.nz +35487 + The Salvation Army USA Western Territory + Doug Neely + doug.neely&uss.salvationarmy.org +35488 + The Corosync Cluster Engine Project + Steven Charles Dake + sdake&redhat.com +35489 + RADIOFID SYSTEMS COMPANY LIMITED + Volkov Georgii + iana&radiofid.ru; gvolkov&radiofid.ru +35490 + AGEPS - APHP + Martin Hilka + martin.hilka&eps.aphp.fr +35491 + M.S. Projekt Management + Vertrieb GmbH + Peter Siebertz + mail&pmv-gmbh.de +35492 + RheinLand Versicherungen + markus schellen + markus.schellen&rheinland-versicherungen.de +35493 + EMBED-IT OG + Marcello Presulli + m.presulli&embed-it.com +35494 + act750 + Frederic Duclos + frederic.duclos&act750.com +35495 + Arjo Wiggins Chartham Limited + Malcolm Laws + malcolm.laws&arjowiggins.com +35496 + Interface Devices Ltd + Vadim Govorovski + vadim&interface-devices.com +35497 + GenLan d.o.o. + Sebastijan Sef + info&genlan.si +35498 + VDG Security BV + Robin Hermann + robin&vdg-security.com +35499 + Stratos NZ Ltd + David Liversidge + david.liversidge&stratosglobal.com +35500 + Aim co., Ltd. + Takayuki Eimizu + t-eimizu&aim.ac +35501 + Access General Insurance Holdings + John Zucco + jzucco&accessgeneral.com +35502 + BIGSSS + Jakob Lenfers + lenfers&bigsss-bremen.de +35503 + CybersCube + Victor Fang + victor.fang&cyberscube.com +35504 + Eitelwein Net + Michael Eitelwein + michael&eitelwein.net +35505 + Lindenbaum GmbH + Ahmad Masrieh + iana-ext&lindenbaum.eu +35506 + Streamezzo S.A. + Cédric Gégout + stz-server&streamezzo.com +35507 + Ben Clifford + Ben Clifford + benc&hawaga.org.uk +35508 + Addiva Engineering AB + Mikael Sandberg + mikael.sandberg&addiva.se +35509 + C Squared Systems, LLC + Chris Dallaire + chris.dallaire&csquaredsystems.com +35510 + Tecnicas De Soft, S.A. + Antonio Rodriguez Fernandez + antonio-rodriguez&tecnicasdesoft.es +35511 + VirtualScopics + Colin Rhodes + colin_rhodes&virtualscopics.com +35512 + OSSEra, Inc + Fazhong Deng + david_deng&ossera.com +35513 + AixConcept GmbH + Sebastian Fillinger + sfillinger&aixconcept.de +35514 + Liquidweb Inc + Nick Cappelletti + network&liquidweb.com +35515 + KJ3 Elektronik + Johan Johansson + johan&kj3.net +35516 + Suomen Teollisuusosa Oy + Harry Flink + to-steo-from-iana-about-pen&steo.fi +35517 + Shernet + Adam Sher + adam&shernet.net +35518 + Bay Talkitec Private Limited + Krishnamoorthy Raghunath + raghu&baytalkitec.com +35519 + Maldives Monetary Authority + Shawn Rasheed + shawn&mma.gov.mv +35520 + Video Clarity + Blake Homan + blake&videoclarity.com +35521 + Entimo AG + Sven Prasse + pr&entimo.de +35522 + Comelit Group SpA + Luca Ceresoli + luca.ceresoli&comelit.it +35523 + Phillips and Jordan, Inc. + David Deal + ddeal&pandj.com +35524 + phgamers + Joshua Garcia + josh&phgamer.net +35525 + DirectStreams + Yvan BARTHÉLEMY + ybarthelemy&direct-streams.com +35526 + Foundata GmbH (formerly 'ikt.werk GbR') + Andreas Wolf + mail&foundata.net +35527 + FlexRadio Systems + Stephen Hicks + steve&flex-radio.com +35528 + ADLANTIA + Jesus Broceno + jesus.broceno&adlantia.com +35529 + Evony LLC + Kean Johnston + kean&evony.com +35530 + TJK Tietolaite Oy + Tero Kankaanpää + tero.kankaanpaa&tietolaite.com +35531 + Contato Global Solutions + Paulo Ricardo Bruck + paulobruck1&gmail.com +35532 + ACSYS BSC Sp. z o.o. + Jacek Grundland + jg&acsys.com.pl +35533 + Axecta Inc. + Thanh Chiem + ThanhChiem&axecta.com +35534 + ettex GmbH + Helmut Fritsche + fritsche&ettex.de +35535 + NanVPN + Hucheng Zhang + administrator&nanvpn.net.ru +35536 + Systems in Progress GmbH + Andreas Landgraf + alandgraf&sip.co.at +35537 + TeMeno GmbH + Volker Gebhardt + vg&temeno.de +35538 + TeleTrader Software AG + Robert Zeller + robert.zeller&teletrader.com +35539 + Micros Systems, Inc. + James T. Walsh + jwalshµs.com +35540 + M/s. Varun Infosys + Guntagani Vimal Kumar + vimal&varuninfosys.in +35541 + Oesterreichische Elektrizitaetswirtschafts-AG + Rudolf Mitterhuber + admin&verbund.at +35542 + Lucas sarl + Christian Lucas + cl&lucas.lu +35543 + Novaworx + Kevin Callaghan + webmaster&novaworx.com +35544 + Jas. Quinn & Son + James L. Quinn, Jr. + jquinnjr&jasquinn.com +35545 + Robot Lda. + Filipe Rosa Ferreira + layer3&robot-lda.pt +35546 + EBS S.R.L. + Rossi Massimo + massimo.rossi&tin.it +35547 + CA DiKey Ltd + Andreevskih Yuriy + pravo&distate.ru +35548 + KNI Technical Consulting Ltd. + Zsolt Kendi + kni&mail.datanet.hu +35549 + E-Disp DA + Björn T. Nöstdahl + bjorn.nostdahl&nostdahl.com +35550 + Leith Brandeland + Leith Brandeland + leith.brandeland&gmail.com +35551 + Conchus + Guy Gershoni + guy&conchus.com +35552 + COMGuide Co.,Ltd. + Seiji Sanagi + info&comguide.co.jp +35553 + Internet Hypermarket Ltd. + Dmitry Stremkouski + stremkouski&dostavka.ru +35554 + CorSsys + Eugene Zaitsev + zaitsev_ee&corssys.ru +35555 + Infinities Within + Alec Lanter + alec&infinities-within.net +35556 + OdysSloot + Sander van der Sloot + odyssloot&gmail.com +35557 + Nyadendis Enterprices + Felix Okoth + admin&nyadendis.com +35558 + IT-INTEGRATION - SOLUÇÕES INTEGRADAS EM TECNOLOGIA DA INFORMAÇÃO LTDA ME + Alex Sobral de Freitas + projetos&itin.com.br +35559 + Shanda Interactive Entertainment Limited + Ke Ze Zhou + zhoukeze&snda.com +35560 + Dnspod + Wang Fei + wangfei&dnspod.com +35561 + NetDialog International B.V. + Tim Ruhl + iana-pen&netdialog.eu +35562 + Service Elements + David Smith + davidsm&selements.com.au +35563 + saladisdead.com + Aaron Angel + aja&saladisdead.com +35564 + Universidad Catolica Andres Bello + Rafael Andara + randara&ucab.edu.ve +35565 + Koozyt, Inc. + Mr. Atsushi Shionozaki + snmp-admin&koozyt.com +35566 + trammell.ch + Brian Trammell + brian&trammell.ch +35567 + CME Group + Bryan Green + bryan.green&cmegroup.com +35568 + Beth Israel Deaconess Medical Center + Jacob Hesterman + hesterman&invicro.com +35569 + A-dec Inc. + James Ward + james.ward&a-dec.com +35570 + Travel Tripper LLC + Michael V Spoonauer + michael&traveltripper.com +35571 + LU-Hosting + Leo Unglaub + leo&leo-unglaub.net +35572 + Arends IT+TK + Gerrit Arends + gerrit.arends&arends-itk.net +35573 + sitel + Itay Moav + itay.moav&email.sitel.org +35574 + Dambach-Werke GmbH + Alex Gottschalck + alex.gottschalck&dambach.de +35575 + ByteSource Technology Consulting GmbH + Alexander Penev + contact&bytesource.net +35576 + Ailux S.r.l. + Giacomo Vianelli + giacomo.vianelli&ailux.eu +35577 + LivingData Gesellschaft für angewandte Informationstechnologien mbH + Michael Rieck + michael.rieck&livingdata.de +35578 + Southern Telecommunications Company + Artem Alimov + a.alimov&mail.stcompany.ru +35579 + Super Group Trading (Pty) Ltd + Adam Orpen + adam&supergrp.com +35580 + Technisys, Inc. + Tom Mirochna + tmirochna&tsys-inc.com +35581 + Linct + Thomas Andrews + iana-admin&grok.co.za +35582 + mTrust, s. r. o. + Ing. Vladimír Popík + vladimir.popik&mtrust.sk +35583 + Drizzle + Edward Konetzko + konetzed&quixoticagony.com +35584 + BANK AL MAGHRIB + FAIK Hicham + h.faik&bkam.ma +35585 + Intelcan Technosystems Inc. + Hector Nunez + hectorn&intelcan.com +35586 + Competentum + Pavel Bondarenko + iana&competentum.ru +35587 + White Birch Paper division Papier Masson + Marc Côté + marccote&papiermasson.com +35588 + ndl.kiev.ua + Alexander Tsvyashchenko + ndl&ndl.kiev.ua +35589 + Confederación Pirata (formerly 'Partido PIRATA') + Rafa Couto + rafacouto&confederacionpirata.org +35590 + Convergent Communications, Inc + Mike Knoblock + mknoblock&ccinet.us +35591 + Limited Liability Company "Mejregiongaz " + Pusev Alexsey V. + u6000025&mrg.gazprom.ru +35592 + FiberSensing S.A. + José Sá + jose.sa&fibersensing.com +35593 + Thales / Rockwell Collins + James Brokenbek + jebroken&rockwellcollins.com +35594 + iCOMcept GmbH + Sven Schoenfelder + schoenfelder&icomcept.de +35595 + distributedmatter.net + Bruno Harbulot + iana&distributedmatter.net +35596 + Innovid Co., Ltd. + Jeongseok Kim + jskim&innovid.co.kr +35597 + "TeleMiks" UE + Eugene Sheffer + e.sheffer&mail.ru +35598 + PIWorks Inc + Baris Akpinar + subscription&piworks.net +35599 + Marketware + Antonio Barros de Sousa + asousa&marketware.eu +35600 + TCC R&D GmbH + Uwe Danzeglocke + danzeglocke&tcc.de +35601 + TransCanada Pipelines Ltd + Rob Sealock + rob_sealock&transcanada.com +35602 + Syndetic Pty Ltd + Ross Varnes + ross.varnes&syndetic.com.au +35603 + TIANJIN DEVISER ELECTRONICS INSTRUMENT CO.,LTD + Cao Yuliang + susan&deviser.com.cn +35604 + Compal Broadband Networks Inc. + Roy Huang + roy_huang&compalbn.com +35605 + Montani Webdevelopment + Strategie + J.A. Vroegop + jav&montani.nl +35606 + ZAO NPC "KOMPYUTERNYE TECHNOLOGII" + Alexandr Bondarenko + komtex&perm.ru +35607 + Wireless eSystems + Casey Calloway + ccalloway&irespond.com +35608 + Answer Quick + Mike Demoulin + technical&answerquick.com +35609 + AXIe Consortium, Inc. + Robert George Helsel + execdir&axiestandard.org +35610 + Itel snc + Alessandro Tomassini + atomassini&itel.it +35611 + SOFT PROJECT C.A. + Jesús Lara + jesuslarag&gmail.com +35612 + Digital Mages + Arthur Corliss + corliss&digitalmages.com +35613 + Linktrust + Neil Liu + liuhj&linktrust.com.cn +35614 + ITK-Engineering + Andreas Pfeiffer + andreas.pfeiffer&itk-engineering.de +35615 + Basilea Pharmaceutica Ltd + Dirk Wacker + itsecurity&basilea.com +35616 + Deltanet AG + Daniel Neff + daniel.neff&deltanet.ch +35617 + XtcN + Richard Lamb + slamb&xtcn.com +35618 + Beanfield Technologies Inc. + Brent Bloxam + brent&beanfield.com +35619 + Mittwald CM Service + Frank Bergmann + iana-pen&mittwald.de +35620 + William Beaumont Hospitals + Dale Thomas + drthomas&smtpgw.beaumont.edu +35621 + Patrick Kobly + Patrick Kobly + patrick&kobly.com +35622 + Akaflieg Stuttgart + Pascal Groß + pascal.grosz&me.com +35623 + stwalkerster.co.uk (formerly 'Albino Slug Studios') + Simon Walker + simon&stwalkerster.co.uk +35624 + tuxad.com + Frank W. Bergmann + iana-pen&tuxad.com +35625 + Vembu Technologies Private Limited + S. Gopal Krishnan + snmp&vembu.com +35626 + CHONGQING JINGHONG HI-TECH CO.,LTD + zonghua qin + 13629731011&163.com +35627 + Atera Networks LTD + Oren Cohen + oren&ateranetworks.com +35628 + Unitas Network GmbH + Joerg Deckert + jdeckert&unitas-network.de +35629 + PL-Grid + Grzegorz Kosicki + grzegorz.kosicki&wcss.pl +35630 + Qube Technologies Sp. z o.o. + Wojciech Kozerski + wkozerski&qube.pl +35631 + CyberTrans Japan + Iftah Bratspiess + iftah&web-silicon.com +35632 + ntop + Luca Deri + deri&ntop.org +35633 + Open Sistemas + Fernando Monera + sistemas&opensistemas.com +35634 + Karl Wörwag Lack- und Farbenfabrik GmbH & Co. KG + Klaus Fellmeth + klaus.fellmeth&woerwag.de +35635 + GFR Software Solutions AG + Claude Strübin + struebin&gfr.ch +35636 + KION GROUP GmbH + Markus Wintruff + cns&kion-ims.com +35637 + ubiqu access b.v. + Boris Goranov + boris.goranov&ubiqu.nl +35638 + Flughafen Wien AG + Gerald Schinagl + g.schinagl&viennaairport.com +35639 + Wydawnictwo Naukowe PWN S.A. + Rafal Szmigiel + rafal.szmigiel&pwn.pl +35640 + Appareo Systems, LLC + Paul Olson + polson&appareo.com +35641 + Universidad de los Andes + Andres Holguin + a-holgui&uniandes.edu.co +35642 + Paneda AS + Thomas Flølo + thomas.flolo&paneda.no +35643 + EURIX srl + Decarlini Paolo + decarlini&eurix.it +35644 + Olson Consulting + Paul Olson + paulo&olson-consulting.com +35645 + ZiChen Tech. Co. Ltd. + Kevin Zheng + kevin.z.y&163.com +35646 + Arca Technologies S.r.l. + Bruno Pastore + b.pastore&arca.com +35647 + G4S Deposita (RF) (Pty) Ltd (formerly 'Deposita Systems (Pty) Ltd') + Cobus Aikman + cash360&za.g4s.com +35648 + Broadband Solutions Technology Pty Ltd + Buks Fouche + support&b-s-t.co.za +35649 + Direct Payment Solutions Limited + Robert Li + robert.li&paymentexpress.com +35650 + Piratenpartei Schweiz + Christian Häusler + haeusler.christian&mac.com +35651 + IHM P/S + Erik Hansen + eh&ihm.dk +35652 + Informatique de Sécurité + KAAG François + support.informatique&ids-assistance.com +35653 + DeltaWare Systems Inc. + John Donahoe + jdonahoe&deltaware.com +35654 + HAUT COMMISSARIAT POUR LES REFUGIES HCR (UNHCR) + Giovanni Meneghetti + meneghet&unhcr.org +35655 + Peak Solution GmbH + Andreas Hechfellner + a.hechfellner&peak-solution.de +35656 + Brian Delaney + Brian Delaney + brian&briandel.ca +35657 + PT. Bank BRI Syariah + Zaldy Suhatman + zaldy&brisyariah.co.id +35658 + Health Info Net AG + Andre Seiler + andre.seiler&hin.ch +35659 + «Baker Tilly Russaudit» Ltd. + Alexander V. Samokhin + samokhin&russaudit.ru +35660 + 3Way Solutions + Sebastián Alvarez + salvarez&3way.com.ar +35661 + NetworkIP + Logan Ashby + unixadmin&networkip.net +35662 + Optrak Distribution Software Ltd + Tim Pigden + tim.pigden&optrak.co.uk +35663 + State of Hawaii + Todd Crosby + todd.m.crosby&hawaii.gov +35664 + PI-Embedded - Building Automation Community + Janusz Piwek + info&pi-embedded.de +35665 + BroadBand Security, Inc. + Sadaaki Tanaka + pen&bbsec.co.jp +35666 + Joint Stock Commercial Bank INVESTBANK Open-end JSC + Maksim Kupriyanov + max&granbank.ru +35667 + Elimco Sistemas S.L. + Emilio de Leon Cardenas + edeleon&sistemas.elimco.com +35668 + PEAK6 Investments, L.P. + Luna Petrova + lpetrova&peak6.com +35669 + Advanced Integration Technology + Iain Brown + iain&aint.com +35670 + Alberta Health Services + Rainer Oebels + Rainer.Oebels&AlbertaHealthServices.ca +35671 + im3D S.p.A. + Angelo Leto + angelo.leto&i-m3d.com +35672 + Bitnethic Srl + Marco Bavassano + marco.bavassano&bitnethic.it +35673 + Bank Saint Petersburg + Alexandr Sukhov + aleksandr.i.suhov&bspb.ru +35674 + Johan De Wit + Johan De Wit + johan&koewacht.net +35675 + ZAP S/A Internet + Daniel Checchia + daniel.checchia&zapcorp.com.br +35676 + ComPughTerWorx + Michael Dean Pugh + mdpugh&hotmail.com +35677 + Envisionier Medical Technologies, Inc. + Jeremy Brooks + jbrooks&envisionier.com +35678 + Clayster AB + Olof Zandrén + administrator&clayster.com +35679 + GRITA + Rémi TILLY + remi.tilly&grita.fr +35680 + AdGear Technologies Inc. (formerly 'Bloom Digital Platforms') + Mina Naguib + mina.naguib&adgear.com +35681 + Zenith System Solutions + Vikram Dhaddha + vikramd&zenithss.com +35682 + Technical University of Liberec + Petr Adamec + Petr.Adamec&tul.cz +35683 + Hofmann-IT-Systeme Ingenieurbuero + Florian Hofmann + Hofmann&Hofmann-IT-Systeme.de +35684 + Proserve B.V. + Edwin van Vliet + edwin&proserve.nl +35685 + HELLUG + Kargiotakis Georgios + kargig&hellug.gr +35686 + Brian Desmond Consulting, LLC + Brian Desmond + brian&briandesmond.com +35687 + Proware Technologies Co., LTD + Robin Cui + robin.cui&proware.com.cn +35688 + Hochschule Wismar - University of Applied Sciences - Technology, Business and Design + Alexander Mahler + alexander.mahler&hs-wismar.de +35689 + Betfair + Gerrard Geldenhuis + gerrard.geldenhuis&betfair.com +35690 + Indaal Information Management GmbH + Juergen Lloyd + juergen.lloyd&indaal.de +35691 + Matthias Bonn it-consult + Matthias Bonn + info&mb-itconsult.de +35692 + Headweb AB + Johan Ström + software&headweb.com +35693 + Open-Future BVBA + Johan De Wit + johan&open-future.be +35694 + Choosehelp.com + Martin Schoel + martin&choosehelp.com +35695 + SITEM S.r.l. + Alessandro Lugli + alessandro.lugli&sitemnet.it +35696 + NetScout Systems, Inc. (formerly 'Avvasi') + Michael Gallant + WATorders&netscout.com +35697 + Ystrad Mynach College of Further Education + RUSSELL TUCK + itteam&ystrad-mynach.ac.uk +35698 + Ldap.com.br + Marcelo José Xavier + marcelo.xavier&live.com +35699 + Cummins Power Generation + Greg Knowd + greg.knowd&cummins.com +35700 + Tyrian Technical Consulting + Ben Bell + nospam&tyriantech.com +35701 + phpcoms + chunguo li + lichunguo&ceopen.cn +35702 + NTO IRE-Polus, Ltd. (formerly 'Optical Components & Systems') + Alexander Gutor + sanya.gutor&gmail.com +35703 + JSC DevLab + Kolesnyk Oleksij + 5akv&ukr.net +35704 + SALUC + THIERRY BRUYERE + thierry.bruyere&saluc.com +35705 + HWS Informationssysteme GmbH + Bernd Scheurer + support&hws-gruppe.de +35706 + Guangzhou KingTeller Technology Co.,Ltd + Leo Zeng + zcw200&126.com +35707 + S&P + Steffen Löb + pki&sup-sahlmann.com +35708 + BOUYGUES CONSTRUCTION + Bruce GARNIER + B.GARNIER&bouygues-construction.com +35709 + Unassigned + Removed 2010-05-03 + ---none--- +35710 + Alcatel-Lucent, 4ESS + Eugene Koeppe + gene.koeppe&alcatel-lucent.com +35711 + Sopinspace + Gérald Sédrati-Dinet + gerald.sedrati-dinet&sopinspace.com +35712 + Master Translation and Technology Services Co.,Ltd. + Zhang Yuanji + zhangyj&mts.cn +35713 + Information Management Services + Brian Gibson + gibsonb&imsweb.com +35714 + kommtnoch.com + Andreas Hergert + andreas&kommtnoch.com +35715 + Video Gaming Technologies + Dan Milligan + dan.milligan&vgt.net +35716 + Action Without Borders + Kenneth Maupin + hostmaster&idealist.org +35717 + DI Michael Kuen + DI Michael Kuen + Michael.Kuen&xudis.com +35718 + Mississippi Gulf Coast Community College + David Besancon + david&mgccc.edu +35719 + Shopping.com + Arthur Yang + artyang&shopping.com +35720 + Infotrend Development + Gilberto Mardegan + gmardegan&infotrend.it +35721 + Institute of Corporate Law and Corporate Governance + Dmitry Mikhailov + cio&iclg.ru +35722 + Digital Nirvana + Ned Chini + nedchini&digital-nirvana.com +35723 + SENSAIR Pty Ltd + Peter Lissenburg + peter&sensair.com +35724 + Chenega Corporation + Joseph Bedard + hostmaster&chenega.com +35725 + China Beijing TV Station + sunliguo + sunliguo&btv.com.cn +35726 + China Guangdong Telepower Communication Technology Co.,Ltd. + Shawn.Hong + xxycat&163.com +35727 + First Horizon National Corporation + Preston Gilchrist + inettech&ftb.com +35728 + Scallable Technologies, Inc. + Iurii Rashkovskyi + yrashk&scallable.com +35729 + Forssan Seudun Puhelin Oy / SurffiNET + Asmo Porma + hostmaster&fsp.fi +35730 + iAd GmbH + Juergen Kosel + juergen.kosel&iad-de.eu +35731 + abtis GmbH + Thorsten Weimann + info&abtis.de +35732 + Ajuntament de Benicarló + Josep V Taus Dieste + correu.informatica&ajuntamentdebenicarlo.org +35733 + VIDA Diagnostics Inc + Angela Bryant + angela&vidadiagnostics.com +35734 + Lennart Jütte + Lennart Jütte + admin&rtjuette.de +35735 + A9 SAS + Antoine Brenner + support&gymglish.com +35736 + BZCToOn'S Network / RedJuice.fr SARL + Réda Bourebaba + support&bzctoons.net +35737 + QA Cafe LLC + Joseph McEachern + support&qacafe.com +35738 + abcwxy.com + Tim Jones + tjonesyhoo&yahoo.com +35739 + Bowline Network Consulting, Inc + Kevin Thompson + kevin.thompson&bowlineconsulting.com +35740 + SACD + René LE MENER + rene.le.mener&sacd.fr +35741 + Chi-X Europe Ltd + Adam Harm + adam.harm&chi-x.com +35742 + Neurosearch, Inc. + Michael Hall + michael.hall&neurosearch-usa.com +35743 + Workonline Communications (Pty) Ltd + Ben Maddison + benmaddison&workonline.co.za +35744 + D-Media Communication Tech + Zinan Ren + renzinan&gmail.com +35745 + Marine Harvest ASA + Jan Olav Skeie + jan.olav.skeie&marineharvest.com +35746 + Passepartout sa + gianluigi nigro + gianluigi.nigro&passepartout.sm +35747 + FEDERALNOE AGENTSTVO ZhELEZNODOROZhNOGO TRANSPORTA + Vladimir Folin + admin&roszeldor.ru +35748 + Switch++ + Amir Fuhrmann + amir&switchpp.com +35749 + Alsim + Thierry LEBOURQUE + t.lebourque&alsim.com +35750 + Spring Wireless + Alvaro Anton + aanton&springwireless.com +35751 + Linux Lunatix + Stefan Marx + stefan&linux-lunatix.org +35752 + Supra net d.o.o. + Aleksandar Šerbetar + aleksandar.serbetar&supranet.hr +35753 + Les Développements Durables + Sébastien DUCOULOMBIER + iana-pen&ldd.fr +35754 + ON-AIR Systems Ltd. + Miroslav Jeras + miroslav.jeras&on-air-systems.com +35755 + P-21 GmbH + Thilo Heinermann + thilo.heinermann&p-21.de +35756 + inett GmbH + Christian Baus + christian.baus&inett.de +35757 + Sonnection B.V. + Rolf E. Sonneveld + R.E.Sonneveld&sonnection.nl +35758 + Stichting Diagnose Kanker + Rolf E. Sonneveld + R.E.Sonneveld&sonnection.nl +35759 + Welkin Sciences, LLC + Rex Shaffer + rex.shaffer&welkinsciences.com +35760 + Optical Access Networks Lab, Shanghai University + Song Yan + jylshm&126.com +35761 + Center Parcs Europe + Daan de Jongh + daan.dejongh¢erparcs.com +35762 + FMLOG, Swedish Armed Forces Logistics + David Hedlund + david.hedlund&mil.se +35763 + Groupe Laurent + GUNGOR Fatih + fatih.gungor&groupe-laurent.com +35764 + St. Lawrence College + Michael Zeleny + mzeleny&sl.on.ca +35765 + zbits Unternehmensberatung GmbH + Thomas Baumann + tbaumann&zbits.de +35766 + Killermann GdbR + Herbert Killermann + herbert.killermann&killermann.info +35767 + University of Lodz + Krzysztof Miodek + oper&uni.lodz.pl +35768 + GetWellNetwork, Inc. + Bernd Nigmann + bnigmann&getwellnetwork.com +35769 + Wellogic + Dan Powell + dan&wellogic.com +35770 + USIL Technology + Cesar Loyola + cloyola&usiltechnology.pe +35771 + Hightech Payment Systems + Radouane ELFITOURI + radouane.elfitouri&hps.ma +35772 + n@work Internet Informationssysteme GmbH + Sascha Retzki + retzki&work.de +35773 + Arcturus Networks Inc. + Michael Durrant + mdurrant&arcturusnetworks.com +35774 + Starline Holdings + Lucas Andrews + info&starlinepower.com +35775 + Rachitskiy Research and Development LLC + Eugene Rachitskiy + eugene&rachitskiy.com +35776 + BlueNote Communications SA + Arian Mares + arian.mares&bluenote.ro +35777 + Unique Solutions SA + Iulian Dogariu + iulian&uniques.ro +35778 + RTC-LEASING OJSC + Alexander Scherbachev + sag&rtc-leasing.ru +35779 + ican solutions private limited + vivek vinod + vivek&icanconnect.com +35780 + OAO Tatneft + Gazizov Almaz + a_gazizov&tatneft.ru +35781 + Matej Bel University Banska Bystrica + Peter Kottman + sekretariat&uakom.sk +35782 + Universidad de Extremadura + Ana Gallardo Gómez + aigallardo&unex.es +35783 + Nika Ltd + Andrej Taran + fant&nika.vin.ua +35784 + ECKD Service GmbH + Michael Moessinger + technik&eckd.de +35785 + Xcira, Inc. + Tom Fleniken + tomf&xcira.com +35786 + Continuant Inc + Aldo Febro + aldof&continuant.com +35787 + Computaris + Dariusz Gorczynski + dariusz.gorczynski&computaris.com +35788 + Xolido Systems,S.A. + Luis Carlos Ganso Mellado + administracion&xolido.com +35789 + Despegar.com, Inc. + Gustavo Randich + grandich&despegar.com +35790 + ClearCorp + Joan E. Jorden + joan&clearcorp.biz +35791 + IES Systems, Inc. + John Stacey + john.stacey&ies-us.com +35792 + Rygl + Ales Rygl + ales&rygl.net +35793 + Hirotech, Inc. + Yoshiaki Hayakawa + yhayakawa&hirotech.com +35794 + Java Verified + Martin Wrigley + martin.wrigley&orange-ftgroup.com +35795 + Actifio + Ravi Kollipara + ravi.kollipara&actifio.com +35796 + Burda:IC GmbH + Rene Henzinger + henzinger&burda-ic.com +35797 + Polska Telefonia Cyfrowa Sp. z o.o. + Tytus Buńka + tbunka&era.pl +35798 + shack e.V. + Christian Recktenwald + iana-contact&shackspace.de +35799 + Phmb Consulting + Peter Brooks + peter.h.m.brooks&gmail.com +35800 + Uniconsult + Bruno Pirajá Moyle + bruno&uniconsult.com.br +35801 + EIX Ltd + Dr Eddie Insam + edinsam&eix.co.uk +35802 + Mobango Ltd + Rocco Lucia + ops&mobango.com +35803 + Eucalyptus Systems Inc. + Steven Fitzgerald + administrator&eucalyptus.com +35804 + Allgood Networks + Guy Allgood + guyallgood&netzero.com +35805 + Athonet s.r.l. + Andrea Agosti + andrea.agosti&athonet.com +35806 + Paul Milliken + Paul Milliken + iana&pmm.me.uk +35807 + AJR Development + Allan Rogers + allan.rogers&mail.org +35808 + SAR Elektronik A.S. + Serdar Edgu + serdar.edgu&sarelektronik.com +35809 + Hosting Community + Dmitriy Kirhlarov + ext-coordinators&hostcomm.ru +35810 + CDL PMO + CALI RODRIGUEZ + AFLCMC.HNAG.CDL&us.af.mil +35811 + AMTANGEE Aktiengesellschaft + Christian Roesner + service&amtangee.com +35812 + CANCOM IT Solutions GmbH + Ginther Andreas + andreas.ginther&cancom.de +35813 + LinuxRulz + Stephan Jauernick + stephan48&linuxrulz.de +35814 + Cordier Networks + Robin Cordier + mrjk.78&gmail.com +35815 + QUADStor Systems + Shivaram Upadhyayula + shivaram.u&quadstor.com +35816 + Shenzhen HY Amplitec Technology Co.,Ltd + Yanwei Wang + info&hyamplitec.com +35817 + Giorik spa + erminio canal + prior.customers&gmail.com +35818 + Azienda Feltrina s.p. + erminio canal + prior.customers&gmail.com +35819 + Digithurst Bildverarbeitungssysteme + Raimund Jakobsmeyer + rja&digithurst.de +35820 + RCS Kladno, s.r.o. + Petr Paryzek + Petr.Paryzek&rcs-kladno.net +35821 + FBComputers s.r.l. + Leonardo Centoventotto + assistenza&fbcomputers.com +35822 + Panasonic Net Solutions Co.,Ltd + 佐藤 晋司 (Sato Shinji) + sato.shinji002&jp.panasonic.com +35823 + HANGZHOU RICH INFO-TECH CO.,LTD + Wenbin Liao + archvile&sina.com +35824 + Boll und Partner Software GmbH + Tobias Roese + tobias.roese&boll-und-partner.com +35825 + INAX Corporation + Masami Uchida + domain-admin&isc.i2.inax.co.jp +35826 + THALES AIR OPERATION + Francois GIERSCH + francois.giersch&thalesgroup.com +35827 + Invengo Information Technology Co.,Ltd + johnson.guo + johnson.guo&invengo.com +35828 + Entropia e.V. + Eve Entropia + info&entropia.de +35829 + United Natural Foods, Inc. + Derec DelSignore + iana-pen&unfi.com +35830 + ROFFET.com + Nicolas Roffet + nicolas-w&roffet.com +35831 + ATECH MICROELECTRONIC SYSTEMS + Robert Went + r.went&amesystems.eu +35832 + Open Horizont.Ltd + Zoltan Markella + zoltan.markella&openhorizont.com +35833 + Deva Broadcast Ltd. + Todor Ivanov + office&devabroadcast.com +35834 + Gozaimass + Pascal VIVIEN + pen.iana.org&gozaimass.fr +35835 + iB Solution Corporation + Satoshi HOSHINO + info&ib-sol.co.jp +35836 + China BraveFly Technology Co., Ltd. + LiuZhiyong + liuzy57&hotmail.com +35837 + Mamat GmbH + Rolf Mamat + snmp&software-for-future.de +35838 + Matrixx Software Inc. + Luther Kitahata + sysadmin&MatrixxSw.com +35839 + Imagine One Technology & Management + Jeffrey Farmer + jeffrey.farmer&imagine-one.com +35840 + Gyrus ACMI, Inc. + Jonathon Oss + jonathon.oss&gyrusacmi.com +35841 + Agecodagis SARL + Sébastien Judenherc + sebastien.judenherc&agecodagis.com +35842 + RamVZ + Rouven Raudzus + Rouven.Raudzus&gmx.de +35843 + Sebastian Muszytowski + Sebastian Muszytowski + s.muszytowski&googlemail.com +35844 + Stormont-Vail HealthCare + Nathan Girard + ngirard&stormontvail.org +35845 + Heraklion Wireless + Damianos Mylonakis + danmylonakis&gmail.com +35846 + Parsons + Michael Pinkston + michael.pinkston&parsons.com +35847 + Greenbone Networks GmbH + Lukas Grunwald + info&greenbone.net +35848 + Multipolaris Ltd. + Andras Avar + andras.avar&mp-i.hu +35849 + Telefonica International Wholesale Services + Fernando Sastre + fernando.sastre&telefonica.com +35850 + Horns And Hooves + Deyev Vitaliy + holms_html&ukr.net +35851 + Abix Tecnologia + Tulio Munhoz + tulio&abix.com.br +35852 + Spire Sciences LLC + Peter Countryman + peter.countryman&spiresciences.com +35853 + Remote Enterprise Monitoring, Inc. + Jorge Ventura + jorge.araujo.ventura&gmail.com +35854 + American Modern + Richard Hartmann + richard_hartmann&amig.com +35855 + Dialog Semiconductor + Heiko Schumann + heiko.schumann&diasemi.com +35856 + Berchtold Holding GmbH + Joerg Friedrich + Joerg.Friedrich&BERCHTOLD.biz +35857 + AMARON BVBA + Nico Vannieuwenhuyze + nico&amaron.be +35858 + The Goodyear Tire & Rubber Co. + Shawn Leyden + spleyden&goodyear.com +35859 + Georgia-Pacific LLC. + JENNIFER MOORMAN + hostmaster&gapac.com +35860 + Kvadroteh Ltd. + George Kashperko + george&academy.zt.ua +35861 + CRAWFORD COMMUNICATIONS, INC + Guangbin Liu + gliu&mail.crawford.com +35862 + Mobile Integration Workgroup + Stephen Moody + iana&mobileintegration-group.com +35863 + WAMAJU LLC + Matthew Juszczak + group&wamaju.com +35864 + Ales Hakl + Ales Hakl + ales&hakl.net +35865 + Trivia Solutions B.V. + Marcel de Kock + marcel.dekock&trivia-solutions.com +35866 + Eno, Inc. + Sueo Kawai + s-kawai&eno-inc.jp +35867 + Prior srl + Erminio Canal + prior.customers&gmail.com +35868 + Ater Belluno + erminio canal + prior.customers&gmail.com +35869 + STB Broadcast + Ulisses Barreto + nandopina80&yahoo.com.br +35870 + OpsWise Software Inc. + Gwyn Clay + gwyn.clay&opswise.com +35871 + Cloupia + Raju Datla + info&cloupia.com +35872 + huggla.com + Reiner Schmidt + reiner.schmidt&rs-consulting.de +35873 + JDS Uniphase Corporation + Judith Walker + Judith.Walker&jdsu.com +35874 + LiveAction + John K. Smith + jsmith&liveaction.com +35875 + JX2 Technology Pty Ltd + Zane Francis + support&jx2.com.au +35876 + WHITECODE,.LTD + Jungwon Park + jwpark&whitecode.co.kr +35877 + High Technology Industries B.V. (formerly 'LEITNER AG - S.p.A') + Dennis Kodde + admin&high-technology-investments.com +35878 + Conseil général de l'Aube + Philippe RICARD + philippe.ricard&cg10.fr +35879 + WellDoc, Inc. + Shariar Ghavami + sghavami&welldocinc.com +35880 + JSC "Mediafon" + Mindaugas Jasiulis + mindaugas.jasiulis&mediafon.lt +35881 + Ionis group + Jonathan Gnassia + jonathan.gnassia&epitech.eu +35882 + NTT DATA KYUSHU CORPORATION + Masayoshi Setoyama + setoyamam&nttdata-kyushu.co.jp +35883 + VisualSoft Private Limited + Aftab Sarwar + aftab&visualsoft-inc.com +35884 + Nethost s.r.o. + Lukas Futera + lukas.futera&nethost.cz +35885 + VZ Holding AG + Conradin Ragettli + 666&crnet.ch +35886 + Arise Telecommunications Ltd. + Gunay Mazmanoglu + gunay&arisetel.com +35887 + Lucierna SLNE + Juan Mahillo Alvarez + juan.mahillo&lucierna.com +35888 + DeadInkVinyl + David L Kinney + iana&deadinkvinyl.com +35889 + ColumbiaSoft Corporation + Tim Emerson + temerson&columbiasoft.com +35890 + Video-Flow ltd + Adi Rozenberg + adi.rozenberg&video-flow.com +35891 + punctum Gesellschaft fuer Software mbH + Dr. Georg Fischer + punctum&punctum.com +35892 + ELMEH GIU + Dino Šepac + dino.sepac&elmeh.hr +35893 + Xelmo + David Bauman + sysadmin&xelmo.com +35894 + Cognimatics AB + Erik Södervall + erik.sodervall&cognimatics.com +35895 + Slackarea Internet Services + Vincenzo Ingrosso + expert&slackarea.net +35896 + Concord Energy + Jon Storchevoy + jstorchevoy&ConcordEnergy.com +35897 + Violin Memory, Inc. + Ed Roskos + roskos&vmem.com +35898 + SYSTEREL + Francois BUSTANY + iana&systerel.fr +35899 + Prism Clinical Imaging, Inc. + Chad Neller + production&prismclinical.com +35900 + DuMont Net GmbH & Co. KG + Ralf Fischer + ralf.fischer&dumontnet.de +35901 + Al-Quds University + Marwan Rabie + marwan&alquds.edu +35902 + EasyTools (formerly 'OpenHeadend') + Christophe Massiot + cmassiot&easytools.tv +35903 + Bartonia Domain and Enterprises + Ian J. Barton + ian&bartonia.net +35904 + MacroSAN + Yinglan Shangguan + shangguan.ms&gmail.com +35905 + ACME-Cebul inc. + Tomaz Cebul + tomaz&cebul.eu +35906 + Unister + Enrico Popp + enrico.popp&unister-gmbh.de +35907 + CRV Natural + Ricardo Frei Bruel + ti&crvnatural.com.br +35908 + Embrane, Inc. + Marco Di Benedetto + marcodb&embrane.com +35909 + Horace Mann Educators Corporation + William Fellner + william.fellner&horacemann.com +35910 + overnet.qc.ca + Pascal Lalonde + pen&overnet.qc.ca +35911 + Mrezne Tehnologije Verso + Dario Bosnjak + dario.bosnjak&verso.hr +35912 + Makedonijalek + Momcilovic Dragan + draganm&makedonijalek.com.mk +35913 + Diputacion Provincial de Cáceres + Juan Carlos Manzano Pérez + juancman&dip-caceres.es +35914 + Borea, Ltd. + Klemen Porenta + klemen.porenta&borea.si +35915 + SERLI + Laurent RUAUD + laurent.ruaud&serli.com +35916 + Orangehead Software + Tommy Braas + tommy&orangeheadsoftware.com +35917 + kobisun.org Surdurulebilir Sunucu Sistemleri + Kerem Erciyes + root&kobisun.org +35918 + ETON International co. ltd + Seonggyu Lim + saint&etonint.co.kr +35919 + Funkwerk plettac electronics GmbH + Ruediger Zapf + r.zapf&plettac-electronics.de +35920 + Sebastian Himberger Software + Sebastian Himberger + s.himberger&sebastian.himberger.de +35921 + Alpha-Bit GmbH + Alexander Muhl + alexander.muhl&alpha-bit.de +35922 + Backbase B.V. + Remie Bolte + ict&backbase.com +35923 + Enthusia Consulting Ltd + Olusegun S. Odujebe + oodujebe&enthusiaconsulting.com +35924 + IET-International Empire Traders + Syed Wali Hasan Zaidi, Abdullah Salauddin + projects&iet-group.com +35925 + Warsaw Stock Exchange + Paweł Czechowicz + pawel.czechowicz&wse.com.pl +35926 + Aguas de la Cuenca del Norte, S.A. + Miguel Ángel Rodríguez Fernández + mrodriguez&acuanorte.es +35927 + Softfinança S.A. + Pedro Anselmo + pedro.anselmo&softfinanca.pt +35928 + Wheaton College + Steve Hopeman + steve.hopeman&wheaton.edu +35929 + Prodata Mobility Systems NV + Peter Haijen + peter.haijen&prodatamobility.com +35930 + Radeks Medical Information Technologies Ltd + Serdar Soydemir + serdar&radeks.com +35931 + Woasis Telecommunications Ltd. + xiangdongwang + xiangdong.wang&w-oasis.com +35932 + JM Technology Inc. + Toshiaki Shiwaku + t.shiwaku&jmtech.co.jp +35933 + Semiocast + Paul Guyot + ianaoid&semiocast.com +35934 + OOO CPCR-Express + George Zavyalov + zavyalov_gg&cpcr.ru +35935 + RAU-Stromversorgungen GmbH + Werner Rau + info&rau-strom.de +35936 + Iconmobile GmbH + Anton Popov + it.adm&iconmobile.com +35937 + Unassigned + Removed 2015-03-19 + ---none--- +35938 + Omnibond Systems LLC. + Boyd Wilson + boydw&omnibond.com +35939 + Original1 GmbH + Sven Schiwek + info&original1.net +35940 + Neuhalfen + Jens Neuhalfen + iana&neuhalfen.name +35941 + Mitsubishi UFJ Securities (USA), Inc. + Kazutaka Abe + kabe&us.sc.mufg.jp +35942 + Uralchem + Dmitry Ponomarev + dmitry.ponomarev&uralchem.com +35943 + Boerse Stuttgart + Samir Gaertner + systembetrieb&boerse-stuttgart.de +35944 + Selecom + Olivier Deblock + odeblock&selecom.fr +35945 + Swid + Stephane Morucci + contact&swid.fr +35946 + SMG Co., Ltd. + Nobuyuki Maki + endosnipe&smg.co.jp +35947 + SME Soluciones + Antonio Aragón + sme-soluciones&sme-soluciones.com +35948 + Phoneytunes.com + Manish Nema + manish.nema&phoneytunes.com +35949 + Vienna Symphonic Library GmbH + Bernd Mazagg + register&vsl.co.at +35950 + eFolder, Inc. + Peter Samuelson + psamuelson&efolder.net +35951 + XConnect Global Networks + David Schwartz + dschwartz&xconnect.net +35952 + Sleevewerks BV + Hans Verbrugge + netmaster&sleevewerks.nl +35953 + Chargetek, Inc. + Ray Goodrich + ray&chargetek.com +35954 + VOD Pty Ltd + Van Lisowski + vl&vod.net.au +35955 + Niigata University + Kenji Mikawa + mikawa&cais.niigata-u.ac.jp +35956 + china wing technologies co.,ltd. + wyvern wang + wyvernw&139.com +35957 + Donjin Communication Technology Co.Ltd + Deepanker Tamta + deepanker.tamta&donjin.com +35958 + Vedekon + Mikhail Zaitsev + mz&vedekon.net +35959 + TECHWAY S.A.S + Elodie RIGAUDIERE + support&techway.fr +35960 + Drotposta Consulting Ltd + Geza Simon + sngeza&freemail.hu +35961 + Christian Arnold + Christian Arnold + christian&arnold.schlittermann.de +35962 + Pumpkin Heads Co.,Ltd. + Shoichiro Sakaigawa + nwadmin&pumpkinheads.jp +35963 + k12 ITC, Inc. + Brad Sandt + sandtb&k12itc.com +35964 + BeiJing CS&S HuaTech Info Tech Ltd + LiYingyu + yingyu_li_7&163.com +35965 + Control Risks Group Ltd + Michael Cherry + Mike.Cherry&control-risks.com +35966 + MBDA Italia SPA + Matteo Calabrese + matteo.calabrese&mbda.it +35967 + AutoZone + Jared Breland + iam&autozone.com +35968 + TrustFabric + Joe Botha + joe&trustfabric.org +35969 + Vidyo, Inc. + Adi Regev + adi&vidyo.com +35970 + Planetsgroup + Edvin Gacina + egacina&planetsgroup.com +35971 + Zaha Hadid Architects + Simon Johns + simon.johns&zaha-hadid.com +35972 + Audaxis S.A. + Eric Fesler + eric&audaxis.com +35973 + Fenazari + Stefan Rådström + admin&fenazari.se +35974 + SnmpSoft Company + Sergey Turlachev + company&snmpsoft.com +35975 + Healthsense, Inc. + Dan Vatland + dan.vatland&healthsense.com +35976 + Herrmann EDV-Beratung + Sascha Herrmann + oid-reg&nvbi.de +35977 + Reddog Chili + David C. Biediger + david.biediger&gmail.com +35978 + Clearleap, Inc. + Omar DeLara + odelara&clearleap.com +35979 + Gonow Tecnologia + Rodrigo F. Trevisan + rodrigo.trevisan&gonow.com.br +35980 + vMonitor LLC + Raed Abdallah + raed&vmonitor.com +35981 + isigma asesoría tecnológica, S.L. + Chema López + clopez&isigma.es +35982 + HUMANNIX co., Ltd + Jeoung-Hyun, Seo + jhseo&humannix.com +35983 + MADS B.V. + Melvin Rook + melvin.rook&mads.com +35984 + Simbrella + Adnan Akhundov + adnan&simbrella.com +35985 + SM CNS Corporation + Murugavel Kumaravel + k.murugavel&smcns.net +35986 + KALKITECH + Tomy Devasia + tomy&kalkitech.com +35987 + Sangoma Technologies + Jean-Marc Legrand, eng. + snmp.10.jlegrand_paraxip&spamgourmet.com +35988 + Hiport srl + Antonino Iaria + noc&hiport.it +35989 + Maranatha Christian University + Wilfridus Bambang Triadi Handaya + wilfridus.bambang&eng.maranatha.edu +35990 + UK MOD DE&S + Ian Burch + desle-defstans&mod.uk +35991 + Chesapeake Systems + Terry Melton + terry&chesa.com +35992 + Christian Hain + Christian Hain + christian_hain01&gmx.net +35993 + Syrus, Ltd. + Ben Mills + bmills&syrusinfo.com +35994 + Intuix LLC + Dmitry Kohmanyuk + dk&intuix.com +35995 + quidecco.de + Isidor Zeuner + iana&quidecco.de +35996 + Datatek Applications Inc. + Susan Stultz + sstultz&datatekcorp.com +35997 + Kath. Jugend St. Bruno + Benedikt Schmitz-Rode + webmaster&stbruno.de +35998 + Universitat d'Andorra + Aleix Dorca + adorca&uda.ad +35999 + E-Flamingo LLC + Joel DeJesus + dejesus.joel&e-flamingo.jp +36000 + Host-Consultants + Felix Bartels + felix&host-consultants.de +36001 + PJB Consulting + Jakub Pjanka + jakub&pjb.com.pl +36002 + Taiji Computer Corporation Ltd. + liqi + liqi&mail.taiji.com.cn +36003 + Techlan s.r.l. + Simone Gasparini + simone.gasparini&techlan.it +36004 + Vitracom AG + Ralph Majer + majer&vitracom.de +36005 + CSE s.c.a.r.l. Consorzio Servizi Bancari + Ezio Faccioli + sisunix&csebo.it +36006 + VFT Telecom + Mr. Nguyen Thai Ha + hant&vft.com.vn +36007 + Logikos, Inc. + Stefan Kelley + skelley&logikos.com +36008 + Kelly Services, Inc. + Jim Merrill + jim.merrill&kellyservices.com +36009 + 6connect, Inc. + Aaron Hughes + aaron&6connect.com +36010 + lordgandalf + Niels de Leeuw + niels&lordgandalf.nl +36011 + Commontime Ltd + Neil Whitworth + iana&commontime.com +36012 + Oxford Life Insurance Company + Stephen Maurer + Steve&oxfordlife.com +36013 + SMA Solar Technology AG + Andreas Mueller + sysadmin&sma.de +36014 + Radio Free Asia + David Baden + badend&rfa.org +36015 + SavillTech + John Savill + john&savilltech.com +36016 + PROCERGS - Rio Grande do Sul State IT Company, Brazil + Leonardo Reginin + leonardo&procergs.rs.gov.br +36017 + Huntsman Cancer Institute at the University of Utah + Dennis Berry + Dinny.Berry&hci.utah.edu +36018 + RAAF Technology + Rubin Simons + rubin&raaf.tech +36019 + die tageszeitung + Norbert Thies + norbert&taz.de +36020 + PoC Oy Ltd. + Erkka Marjakangas + erkka.marjakangas&poc.fi +36021 + IT Research Center LLP + Rinat R Yamalutdinov + yamalutdinov_r&itrc.kz +36022 + rku.it GmbH + Klaus Seeger + klaus.seeger&rku-it.de +36023 + Stichting Christelijke Hogeschool Windesheim + Johan van der Molen + ja.van.der.molen&windesheim.nl +36024 + AltaSoft s.c. + Emil Kruczek + ekruczek&altasoft.com.pl +36025 + Videopolis France + Joern Berrisch + jb&tvtrip.com +36026 + GoldZone Web + Gaëtan Trellu + gaetan.trellu&gmail.com +36027 + Methode Electronics + Rex Park + rpark&methode.com +36028 + NoconaGeek + Curtis Wood + noconageek&gmail.com +36029 + Medical Micrographics LLC + Bryan Jennings + bryan&medicalmicrographics.com +36030 + Happyserver Ltd + Michael Derringer + michael&happyserver.co.uk +36031 + Techsense Solutions Sdn Bhd + Namran Hussin + namran&techsense.com.my +36032 + Promsvjazdizajn, OOO + Maxim Krakovskiy + kmm&promsd.ru +36033 + Pak eVentures + Muhammad Raza Saeed + raza.saeed&confiz.com +36034 + Kelvin Connect Ltd + Meurig Sage + alarmsupport&kelvinconnect.com +36035 + Signe S.A. + María José Martínez + mariajose.martinez&signe.es +36036 + DNA Oy + Lasse Leppänen + Lasse.Leppanen&dna.fi +36037 + Folia a/s + Rikke Ottesen + ro&folia.dk +36038 + Adif, Administrador de Infraestructuras Ferroviarias + Rafael J. Mata Martin + rmata&adif.es +36039 + Estonian Internet Foundation + Norman Aeg + norman.aeg&eestiinternet.ee +36040 + Ibetor, S.L. + Mikel Nava + mnava&ibetor.es +36041 + KAMAZ Inc. + Oleg Soroka + soroka&kamaz.org +36042 + LIAB ApS + Mikael Dich + info&liab.dk +36043 + Athena Wireless Communications Inc. + Warren Steffen + wsteffen&athenawirelesscommunications.com +36044 + UCC Pro GmbH (formerly 'NSSR.neT') + Stefan Richter + info&UCCPro.ch +36045 + Uma Electrotechnical Services + Devendra Kumar Shukla + devendrakumarshukla&gmail.com +36046 + tdvine Co., Ltd + SahngOh Jung + sahngoh&tdvine.com +36047 + Kadme AS + Adalbert Michelic + adalbert&kadme.com +36048 + PUISSANCE + + Francis DELPECH + delpech&puissanceplus.com +36049 + GGG Kft. + Gaspar Pechy + ukobold&ggg.hu +36050 + Feed Your Head + Wolfgang Hotwagner + postmaster&feedyourhead.at +36051 + TDM Consult GmbH + Thorsten Marsen + t.d.marsen&tdm-consult.com +36052 + EdgeCast Networks, Inc. + John Scharber + john&edgecast.com +36053 + Azka National for Information Technology + Abdel Rahim Omer Ali + abd&azka.com +36054 + eVigilo Ltd + Bronya Korolyova + bronya&evigilo.net +36055 + RadLogix Pty Ltd + Mat Hudson + info&radlogix.com +36056 + AIDev LLC + Alexei Isac + alexei&aidev.org +36057 + FreeSInno Network Technology Co.,Ltd + Channer.lv + channerlv&freesinno.com +36058 + Beijing TopLink Technology Development Co., Ltd + Hu Wen + huwen&top-linkbj.com +36059 + International Electric Supply Corp. (IESC) + Sandeep Keni + sandeep.keni&gexpro.com +36060 + geek!daily + Jim Meyer + jim&geekdaily.org +36061 + Amonics Ltd. + Tsang Chi Wai + postmaster&amonics.com +36062 + Integrius AB + Christian Klemetsson + christian.klemetsson&integrius.com +36063 + NKIA Co.,Ltd. + sungsoo yoo + ssyoo&nkia.co.kr +36064 + Zappware + Erwin Peeters + erwin.peeters&zappware.com +36065 + Pionier - Polish Optical Internet + Paweł Wolniewicz + pawelw&man.poznan.pl +36066 + Invensys/Triconex + John Gabler + john.gabler&invensys.com +36067 + Timo Gerke + Timo Gerke + timo.gerke&alice-dsl.net +36068 + UBIMET GesmbH + DI Markus Lenger + mlenger&ubimet.com +36069 + Carbofos RU + Andrey Rasskazov + a.rasskazov&carbofos.ru +36070 + Power Standards Lab + Alex McEachern + Alex&PowerStandards.com +36071 + HerpEco LLC + Jean-Francois Cayron + jf&HerpEco.com +36072 + Antares 611 LLC + Jean-Francois Cayron + jf&antares611.com +36073 + ETECSA s.a. + Hector Manuel Jacas Joa + hector.jacas&etecsa.cu +36074 + Dark Matter Labs Inc. + Jeff MacMillan + jm&darkmatterlabs.net +36075 + TBWA\Chiat\Day + Richard Kruszewski + dns.billing&tbwachiat.com +36076 + SAPO + Marco Ramos + mramos&co.sapo.pt +36077 + Xilocore + Richard Pressler + richardp&allconnected.com +36078 + RFVISION Co.,LTD + Shin, Dong-Jin + djshin&rfvision.kr +36079 + Audaxis + Charles KOPROWSKI + cko&audaxis.com +36080 + National Information Technology and Internet Agency + Jovana Bujosevic + jovana.bujosevic&rzii.gov.rs +36081 + Clare Controls, Inc. + Jim Hanna + jim.hanna&clarecontrols.com +36082 + Electronic Tele-Communications, Inc. + Dean W. Danner + snmp-admin&etcia.com +36083 + Agility Logistics Pvt Ltd + Naresh Kumar .A + anaresh&agilitylogistics.com +36084 + Electronic Classroom Of Tomorrow + David Conrad + david.conrad&ecotoh.net +36085 + L-Card + Pavel Sukortsev + newstep&yahoo.com +36086 + lamehost.it + Marco Marzetti + marco&lamehost.it +36087 + MCIO Incorporated + Aaron Brace + abrace&mcio.org +36088 + LinowSat + Oliver Linow + admin&linowsat.de +36089 + SPARF + Christopher L. Cousins + clc-iana&sparf.net +36090 + Remote Instruments + Michelle Low + snmp.admin&remote-instruments.com +36091 + SIS Group Pty Ltd + Luke Iggleden + admin&sisgroup.com.au +36092 + NPP Ugpromavtomatizaciya + Fursenko Anton + anton&ugpa.ru +36093 + Weifang Dongsheng Electronics Co., Ltd. + gaozhiliang + gaozhiliang_qust&126.com +36094 + VVDN Technologies + Selva Muthukumar + selva.muthukumar&vvdntech.com +36095 + Insighteck + Ioan Salau + ioan.salau&insighteck.com +36096 + Adama University + Mohammed Nuru Hassen + mohammed.nuru&adama-university.net +36097 + Unixwiz.net + Stephen J. Friedl + steve&unixwiz.net +36098 + GoodForBusiness + Dominic Marks + dom&goodforbusiness.co.uk +36099 + Hangmat + Olivia Puraski + iana-pen&hangmat.org +36100 + WWF-UK + Paul Beyer + pbeyer&wwf.org.uk +36101 + South Patron (formerly 'SMK Software') + Durand Miller + info&southpatron.com +36102 + Paramatrix Technologies Pvt. Ltd. + Raghu Prasad + prasad.raghu.k&gmail.com +36103 + Lan ETS + Philippe Larouche + plarouche&lanets.ca +36104 + TraceSpan Communications + Vladislav Goverdovsky + info&tracespan.com +36105 + Blue Corss Blue Shield of Massachusetts + David Querusio + David.Querusio&bcbsma.com +36106 + Wraptastic, LLC (for http://rpm5.org) + Jeff Johnson + n3npq&mac.com +36107 + Helen Marks Marketing Ltd + Dominic Marks + dom&goodforbusiness.co.uk +36108 + Cooperative Resources International + Rich Pinkall Pollei + rpollei&crinet.com +36109 + stellaware.de + Malte Starostik + info&stellaware.de +36110 + Wannet Tecnologia da Informação LTDA. + Rommel de Sene Trindade + rommel.trindade&wannet.com.br +36111 + INNOS Co., Ltd. + cheolwoong kim + support&innos.net +36112 + Perceptive Software, Inc + Michael Price + michael.price&perceptivesoftware.com +36113 + IQinVision + Rochak Sharma + rochak.sharma&iqeye.com +36114 + Automazione e Sicurezza Ferroviaria + Gianluca Sanna + gianluca.sanna&ausif.it +36115 + True Access Consulting SA + Enilton Nascimento Jr. + eniltonj&trueaccess.com.br +36116 + Akrometrix, LLC + Timothy Purdie + Tpurdie&akrometrix.com +36117 + Gustav R. Jansen + Gustav R. Jansen + gustav&umbriel.org +36118 + Offshore Systems Ltd. + Kenneth Lee + ken.lee&osigeospatial.com +36119 + Zer0.Tools IT Solutions + Christopher Gabijan + dbx0001&gmail.com +36120 + Lemnisys + Kale Blankenship + kale&lemnisys.com +36121 + Ebruit Ltd. + Becht Richard + becht.richard&ebruit.hu +36122 + Designskolen Kolding + Søren Grønning + sgi&dskd.dk +36123 + KORATEK + SEHEE JANG + asara1&hanmail.net +36124 + Vinzenz Gruppe Krankenhausbeteiligungs- und Management GmbH + Stefan Rausch-Schott + stefan.rausch-schott&vinzenzgruppe.at +36125 + secadm GmbH + Christian Niessner + iana-registry&secadm.de +36126 + CCBill + Jason Kirk + general&ccbilleu.com +36127 + The Ulyanovsk reg. Dept. of Treasury + Vladimir Olejnik + noc&simtreas.ru +36128 + MobileTech Limited + Mr. Raymond Fung + raymondf&mobiletech.com.hk +36129 + Institution des Chartreux + Raphael RIGNIER + r.rignier&leschartreux.com +36130 + INDUSTEX S.L. + Juan José Esplugues + inf.juanjo&industex.com +36131 + Orafol Europe GmbH + Mario Czarny + m.czarny&orafol.de +36132 + woernhard.NET + Maurice David Wörnhard + maurice&woernhard.net +36133 + Cartagena as + Knut Eivind Handeland + infrastructure&cartagena.no +36134 + Tristan Navikrvicius + Tristan Navikevicius + tristan.navi&gmail.com +36135 + Yet Another Linux Distro + Tristan Navikevicius + root&tristan.cz.cc +36136 + Central Bank of the UAE + Mohammed Shibili + shibili&cbuae.gov.ae +36137 + Rising Sun Pictures + Simon Malessa + snmp&rsp.com.au +36138 + CENTRON COMMUNICATIONS TECHNOLOGIES FUJIAN CO.,LTD + AnShan Wang + ashan.wang&oa.centron.com.cn +36139 + Fläkt Woods Group SA + Lars J. Sandell + lars.sandell&flaktwoods.com +36140 + Funkwerk Information Technologies Karlsfeld GmbH + Christian Trautsch + christian.trautsch&funkwerk-itk.com +36141 + Ubiquoss + Gyunam Cho + earl&ubiquoss.com +36142 + ASTREA LA INFOPISTA JURIDICA SL + Fernando M. Alamillo + fernando.alamillo&astrea.cat +36143 + dipswitch networks + ben thielsen + pen_admin&dipswitch.net +36144 + NovaSparks + Marc Battyani + info&novasparks.com +36145 + Alliance Healthcare + Manuel Ruiz + mruiz&alliance-healthcare.es +36146 + Five Colleges, Incorporated + John W. Manly + jwmanly&amherst.edu +36147 + Simrex Corporation + Frank Neuperger + frank&simrex.com +36148 + Webproduce Corporation + Hiroshi Nakamura + wpc-se&primestage.net +36149 + Argox Information Co., Ltd. + Rick Chen + rick_chen&argox.com +36150 + ShenZhen MingWah AoHan High Technology Corporation Ltd. + lixiang + lixiang26&tom.com +36151 + HM EI Zrt. + Papp Botond + papp.botond&hmei.hu +36152 + Kulczyk Tradex + Maciej Walczak + walczakm&kulczyktradex.com.pl +36153 + Beijing InHand Networks Technology Co,.Ltd + Bai Bin + baibin&inhand.com.cn +36154 + Factor Power Inc. + Eric Roman + eric&factorpower.com +36155 + US Army Surface Deployment and Distribution Command + Jimmy Orona III + jimmy.orona&us.army.mil +36156 + m.a.x. Informationstechnologie AG + Michael Luecke + hostmaster&max-it.de +36157 + gooseman.cz + Jakub Husak + sysadmin&gooseman.cz +36158 + CoBlitz LLC + Larry Peterson + larry&coblitz.com +36159 + Inphoria Corporation + Will Townsley + wt&inphoria.com +36160 + OR-CZ spol. s r. o. + Jan Pechanec + jpechanec&orcz.cz +36161 + Hills-Cook Outcomes Consulting Company + David H. Cook, Ph.D., M.B.A. + admin&dhcook.net +36162 + Hardata + Lucio Corral + l.corral&hardata.com +36163 + CC Services, Inc + Network Services + itsnetworkservices&countryfinancial.com +36164 + Videoswitch S.R.L. + Marcelo Javier Indarramendi + soporte&videoswitch.tv +36165 + Learning Objects, Inc. + Jessica Murray + jmurray&learningobjects.com +36166 + Cabela's + Guy Anthony De Marco + guy.demarco&cabelas.com +36167 + BEWATEC Kommunikationstechnik GmbH + Sebastian Ehlke + ehlke&bewatec.de +36168 + Jurkiewicz + Jurkiewicz Jean-Marc + jurkiewicz&orange.fr +36169 + RSG Model Works + Rodney Thayer + rsgmodelworks&gmail.com +36170 + KeeperOS + Vojtech Suk + vojta&wifipresl.cz +36171 + Host Group of Companies Pty Ltd + Paul Foote + paul&hostnetworks.com +36172 + Gorenje, d.d. + Jure Šafarič + ntadmin&gorenje.si +36173 + Bob.sh + Ken Barber + iana-pen&bob.sh +36174 + Chengdu Ethercom Information Tech.Co.Ltd + TaoWanen + taowanen&hc2008.com +36175 + Sunspeedy Technology Co.,Ltd + MuJie + mujie&sunspeedy.com +36176 + Huayuan Technology CO.,LTD + Pingli Sui + suipingli&hyetec.com +36177 + AirSage Inc + Jonathan Scott + jscott&airsage.com +36178 + Minetec Pty Ltd + Paul Morton + paul.morton&minetec.com.au +36179 + Celerity Systems (Pty) Ltd + Hugo Lombard + sysadmin&celerity.co.za +36180 + Baltic Amadeus infrastrukturos paslaugos + Nerijus Sarnas + n.sarnas&baip.lt +36181 + PM Microwave Srl + Antonio Sganzerla + antonio.sganzerla&pmmicrowave.com +36182 + ENYCA + Raquel Gómez + rgomez&enyca.es +36183 + VMK RAUSCHER + Patrick Rauscher + p.rauscher&vmk-rauscher.at +36184 + Wright Line LLC + Jonathan Fuller + jonathan.fuller&wrightline.com +36185 + LVM Works + Matt Marchione + mmarchio&earthlink.net +36186 + Hangzhou Prevail Optoelectronic Equipment Co.,LTD + XI YU XI + stanhangzhou&gmail.com +36187 + LogicFactor + Damian Boune + dboune&logicfactor.com +36188 + Plandata Datenverarbeitungsgesellschaft m.b.H. + Constantin Gruber + constantin.gruber&plandata.at +36189 + BILGI GIS + ERHAN CINAR + erhanc&bilgigis.com +36190 + Jiang Wenkai's Own LDAP + Jiang Wenkai + jiangwenkai1234&yahoo.com.cn +36191 + otop AG + Christian Gaul + christian.gaul&otop.de +36192 + xtendx AG + Stu Thompson + stu&xtendx.com +36193 + KenBIT Koenig i Wspólnicy Sp.J. + Marek Paradiuk + mparadiuk&kenbit.pl +36194 + it-proserv Projects & Services Ing. Karl Liepold + Ing. Karl Liepold + k.liepold&it-proserv.at +36195 + Hedera Technology + Antoine Castaing + antoine.castaing&hederatech.com +36196 + Kai-Media Co. + Young-il, Lee + yilee&kai-media.com +36197 + Movius Interactive Corporation + Jeff Woods + jeff.woods&moviuscorp.com +36198 + TheSpidersProject + Richard Cuti + TheSpidersProject&gmail.com +36199 + Open Finance + Radoslaw Burza + rburza&open.pl +36200 + Valtech + Ingomar Otter + ingomar&valtech.de +36201 + NTV Broadcasting Company + Sergey Slivin + sslivin&ntv.ru +36202 + Thomson Reuters + Twan van Beers + twan.vanbeers&thomsonreuters.com +36203 + Namirial S.p.A. + CLAUDIO GABELLINI + simone&namirial.com +36204 + Sysinforg Kft. + Tamas Flamich + tamasflamich&sysinforg.hu +36205 + xhombee studios + Bear Wadleigh + bearlinux&gmail.com +36206 + Stichting Revelation Space + Koen Martens + gmc&revspace.nl +36207 + SYNERGY INFORMATION SYSTEMS INC. + Lin Chin-Yin + george&siscomm.com.tw +36208 + Karl Storz + Naveen Yenugu + nyenugu&ksea.com +36209 + RGazdi Kft. + Tibor Szucs + sz.tibor&rgazdi.hu +36210 + Centre hospitalier de l'Université de Montréal + Guy Fournier + guy.fournier.chum&ssss.gouv.qc.ca +36211 + The Catholic University of America + Daniel Foerst + foerst&cua.edu +36212 + Research and Education Bridge Certification Authority, Inc. + Benjamin T. Wilson + ben&digicert.com +36213 + SIA "INBOKSS" + Jevgenijs Kuznecovs + nimda&inbox.lv +36214 + Galaxy Telecom Technologies Ltd + Dawei Qi + dqi&galaxytelecomms.com +36215 + Greek Academic Network - GUnet + Nikos Voutsinas + nvoutsin&gunet.gr +36216 + GoliathDNS ENK + Atle Solbakken + atle&goliathdns.no +36217 + Ladela Interactive + Serghey Trufkin + serj&ladela.com +36218 + Open Cloud Consultants, Inc + Geoffrey Engerman + pen-admin&opencloudconsultants.com +36219 + Pipe Coaters Nigeria Limited + Michele Malagnini + michele.malagnini&pipecoatersnigeria.com +36220 + FLACSO + Norberto Bensa + nbensa&flacso.org.ar +36221 + Internet Brands, Inc. + James Fuentes + jfuentes&internetbrands.com +36222 + QBS - Quality Business Software + Hubert Kario + hka&qbs.com.pl +36223 + BeST (Closed Joint-Stock Company «Belarusian Telecommunications Network») + Vitaly Garbenkov + Vitaly.Garbenkov&life.com.by +36224 + Playphone Entretenimento LTDA + Fernando de Castro Rahal + fernando.rahal&playphone.com +36225 + Secure Forward, LLC + Kevin Peterson + khp&secureforward.net +36226 + Securus Technologies Inc. + Brian Galyean + BGalyean&SecurusTech.net +36227 + bookmark.com + Jack Wu + jack&bookmark.com +36228 + Dellus communication technologies Ltd. + Mia Chan + mia&dellus.com.cn +36229 + TechStudio Solutions Pte Ltd + Jesvyn Oon + jesvyn.oon&techstudio.com.sg +36230 + Axco Insurance Information Services + Simon Bruce + simon.bruce&axcoinfo.com +36231 + semantics GmbH + Jochen Rodewald + j.rodewald&semantics.de +36232 + Stig Johnsen IKT + Stig Johnsen + stig&sjikt.no +36233 + Security Industry Supplies Pty. Ltd. + Matthew Jones + mij&sisdirect.com.au +36234 + The Otto von Guericke University of Magdeburg + Nataliya Kulyk + nataliya.kulyk&ovgu.de +36235 + Livedrive Internet Ltd + Matthew Willcock + admin&livedrive.com +36236 + PointSharp + Stefan Ullgren + stefan.ullgren&pointsharp.com +36237 + Application Solutions (Electronics And Vision) Ltd + Craig Graham + cgraham&asl-vision.co.uk +36238 + oocero microsystems Unternehmergesellschaft (haftungsbeschränkt) + Paul Emmerich + paul.emmerich&mytum.de +36239 + Live Oak Technologies LLC + Theresa Wolf + Terry.Wolf&LiveOakTechnologiesLLC.com +36240 + ProBuild Holdings LLC + Frank Johnson + domainmaster&probuild.com +36241 + Bankinter + David Herraiz + dherraiz&bankinter.es +36242 + mep srl + roberto cerfogli + r.cerfogli&mepping.com +36243 + Hollan Family + Rene S. Hollan + rene&hollan.org +36244 + Linguamatics Solutions Ltd. + Terry Stebbens + od_operations&linguamatics.com +36245 + EBO Group Inc. + Zac Charlton + zcharlton&ebogroupinc.com +36246 + Ärztekammer Schleswig-Holstein + Michael Stramm + michael.stramm&aeksh.org +36247 + ACAMS AS + Kim Fredheim + mail&acams.no +36248 + HeadHunter + Vadim Novik + novik&hh.ru +36249 + Green Cloud srl + Gianmarco Gabrieli + info&greencloud.it +36250 + New York State Division of Criminal Justice Services + David R. (Rod) Watterworth + rod.watterworth&dcjs.state.ny.us +36251 + Allgoodbits.org + Duncan Hutty + dhutty&allgoodbits.org +36252 + PHARMATECHNIK GmbH & Co. KG + Wolfgang Wendefeuer + w.wendefeuer&pharmatechnik.de +36253 + Customized InformSystems, Ltd + Orlov Valentin + sysadmins&custis.ru +36254 + Brandon Associates + John Grabiec + jgrabiec&brandonassociates.com +36255 + Geek Central + Ian Reinhart Geiser + geiseri&yahoo.com +36256 + PRIMA Cinema, Inc. + Nicholas Wehr + noc&primacinema.com +36257 + Givaudan SA + Damien Lachuer + damien.lachuer&givaudan.com +36258 + ThreatGuard Inc. + Gunnar Engelbach + Support&ThreatGuard.com +36259 + Callino + Wolfgang Pichler + wpichler&callino.at +36260 + BioMarin Pharmaceutical Inc. + Alan Fung + afung&bmrn.com +36261 + BD2000 Ltd + Deepak Patel + bd2000&talk21.com +36262 + Meyertech Ltd. + Paul Grossman + paul.grossman&meyertech.co.uk +36263 + EUROP ASSISTANCE SERVICIOS INTEGRALES DE GESTION S.A. + JULIA GARCIA DE MINGO + julia.garcia&europ-assistance.es +36264 + Evaldo Gardenali + Evaldo Gardenali + evaldo.gardenali&gmail.com +36265 + MAXBERT + Miroslaw Grochowski + miroslaw.grochowski&maxbert.pl +36266 + Golden Gekko + Sebastian Dahlgren + sd&goldengekko.com +36267 + Torsten Franke + Torsten Franke + mail&torstenf.de +36268 + ComAbility + Oleg Pogorelik + oleg&comability.com +36269 + Servicios de Consultoria Independiente S.L. + Juan Domingo Sandoval Gonzalez + sandoval&sci-spain.com +36270 + JAEA + Tetsuya Kuno + ms-oid&jaea.go.jp +36271 + StreamOcean inc. + Li Cheng + dennistocker&gmail.com +36272 + Protacon Solutions Oy + Lauri Laukkarinen + lauri.laukkarinen&protacon.com +36273 + Haas Elektro GmbH + Peter Zednik + peter.zednik&network.co.at +36274 + Embedded Systems Technology Group + James Leonard + snmp&estg.com +36275 + Communitake + Ben Katz + ben&communitake.com +36276 + North Island Credit Union + Tom Tanida + ttanida&myisland.com +36277 + UOL S.A. + Marcelo Herrero + l-tec-net&uolinc.com +36278 + Protality Ltd + Peter Robinson + pete.robinson&protality.co.uk +36279 + Uno Web Informação e Tecnologia Ltda + Renato Magalhães + contato&unoweb.com.br +36280 + Basda Medical Apparatus Co.,Ltd + ZhangShaobin + zhangbin&basda.com.cn +36281 + Beijing Boomsense Technology CO.,LTD. + Young Guo + guoyang&boomsense.com +36282 + Monico Monitoring inc. + Doyle Taylor + doyle&monicoinc.com +36283 + Lilee Systems, Inc. + Wayne Kao + kao&lileesystems.com +36284 + Centre Psychothérapique de l'Ain + Jean-Noël Perrimbert + info&cpa01.fr +36285 + Alliance Design WorkGroup + Mario Weißmann + mario.weissmann&a-d-w.net +36286 + MIKROKLIMA s.r.o. + Michal Dvorak + dvorak&mikroklima.cz +36287 + wTVision + Alex Fraser + alex.fraser&wtvision.com +36288 + Editora del Mar S.A. + Wilmer Aljure G. + waljure&eluniversal.com.co +36289 + AVM Informatique + BARBIER Jérôme + jerome.barbier&avm-informatique.com +36290 + VeEX Inc + Cyrille Morelle + cmorelle&veexinc.com +36291 + Signalion GmbH + Matthias Henker + matthias.henker&signalion.com +36292 + kulthauskante e.V. + Karsten Krispin + info&kulthauskante.de +36293 + Smartlink Network Systems Limited + Deepak P Chodankar + deepak.chodankar&smartlink.co.in +36294 + MSO Link + Tito Johnson + tito&msolink.com +36295 + Marubeni-Itochu Tubulars America, Inc. + David Johnson + david-johnson&mitube.com +36296 + Mentor Solutions + Ryan Geyer + rgeyer&its.jnj.com +36297 + DVM ITS + Dmitry V. Mikhailov + dvm&dvm.su +36298 + Baustem Technologies Ltd. + Xiaoli Li + ally&baustem.com +36299 + Ronix Systems + Alexander Shinkarenko + alexander&ronix.ru +36300 + Intervale JSC + Vasil Varabyou + vvorobyov&intervale.ru +36301 + URS Federal Services + Dan Bartz + dan_bartz&urscorp.com +36302 + ARTiiS GROUP a.s. + Libor Hřib + libor.hrib&artiisgroup.cz +36303 + AuthenTec Inc. + Greg Kerr + greg.kerr&authentec.com +36304 + Kandcnet Inc. + Kaoru Miyauchi + miya&kandcnet.com +36305 + WoSign CA Limited (formerly 'WoSign eCommerce Services Ltd.') + Richard Wang + ca&wosign.com +36306 + University of Teesside + UNIX Team (Bill Taylor) + iana&scm.tees.ac.uk +36307 + SONUMA + Eric DENIS + ed&sonuma.be +36308 + txtNation Ltd. + Mr Andy Pieters + A.Pieters&txtNation.com +36309 + Circadence Corporation + Andrew Thigpen + athigpen&circadence.com +36310 + Firelands Regional Medical Center + Steve Ayres + ayress&firelands.com +36311 + exthex GmbH + Peter Danner + peter.danner&exthex.com +36312 + Alert Logic, inc. + Tom Veronie + tveronie&alertlogic.com +36313 + GL Communications, Inc. + Alan Revzin + arevzin&gl.com +36314 + Regional Court in Lublin + Lukasz Glaz + hostmaster&so.lublin.pl +36315 + Joel Riedesel + Joel Riedesel + joel&joelriedesel.com +36316 + Polar Power Inc + Arthur D. Sams + SALES&POLARPOWERINC.COM +36317 + AVCOM Of Virginia, Inc. + Chris Blyseth + cblyseth&avcomofva.com +36318 + Marathon Petroleum Company + Joe Froelich + externaldomainmgt&marathonpetroleum.com +36319 + iT-CUBE SYSTEMS GmbH + Thomas Fischer + thirdlevel&it-cube.net +36320 + 4Tel Pty Ltd + Matthew Rechter + mrechter&4tel.com.au +36321 + Dirección Ejecutiva de la Magistratura + Olaf Reitmaier Veracierta + soporte_correo&tsj-dem.gob.ve +36322 + semanticlab + Albert Weichselbraun + albert&semanticlab.net +36323 + WynnIT Ltd + steve wynn + steve&wynnit.co.uk +36324 + OpenRT + Derek Dolney + nospam&dolney.com +36325 + IMAQLIQ Ltd. + Konstantin Khitsko + kkhitsko&imaqliq.com +36326 + T. Gipp + Torge Gipp + torge.gipp&gmail.com +36327 + 9Netics Inc. + Fariborz "Skip" Tavakkolian + skip&9netics.com +36328 + Parvus Corporation + David Draper + ddraper&parvus.com +36329 + Ohio Farmers Insurance Company + Alan Willis + linuxadmin&westfieldgrp.com +36330 + Comwired, Inc. + Deven Phillips + deven&dns.com +36331 + Social & Scientific Systems + Kenneth White + kwhite&s-3.com +36332 + Coast360 Federal Credit Union + Cody Flis + systems&coast360fcu.com +36333 + Wilshire Media Group + Ian Geoghegan + ian&wilshiremedia.com +36334 + Dialog Information Technology + Rob Ruxton + rob_ruxton&dialog.com.au +36335 + aceway telecom co.,Ltd + Sun Hongmei + sunhm&aceway.com.cn +36336 + Arxscan Inc. + Mark Fitzsimmons + mfitzsimmons&arxscan.com +36337 + St. Antonius Ziekenhuis + TeunJan Pelgrim + t.pelgrim&antoniusziekenhuis.nl +36338 + RESI Informatik & Automation GmbH + DI Heinz-Christian Sigl + office&RESI.cc +36339 + Neurosearch A/S + Morten Nielsen + mhn&neurosearch.dk +36340 + Phonographic Performance Limited + Jason Roberts + jason.roberts&ppluk.com +36341 + Optiver + Marc Jongeneel + networks&optiver.com +36342 + Erlyvideo + Max Lapshin + info&erlyvideo.org +36343 + CoreCodec, Inc. + Kate Gray + kgray&corecodec.com +36344 + Skyfiber Inc + Wayne Walters + wayne.walters&skyfiber.com +36345 + Benchbee co., Ltd. + SeongHa Park + nexgens&msn.com +36346 + X-TRANS, Inc. + Shigeki Iwai + iwai&x-trans.jp +36347 + Prosodie + Benoît Quéval + bqueval&prosodie.com +36348 + Compass-EOS + Einat Ron + einatr&compass-eos.com +36349 + ANSEC Group, s.r.o. + Pavel Andreev + pavel.andreev&ansec.cz +36350 + New Zealand Ministry of Education + Andreas Kubisch + Andreas.Kubisch&minedu.govt.nz +36351 + CESI S.p.A. + Dino Pradelli + security&cesi.it +36352 + Just IT s.r.l. + Renato Massone + iana-pen&justit.it +36353 + Bridgewater + Lalit Tyagi, Brian Cella, Jacques Malette + it_esm&bwater.com +36354 + globo.com + Nedimar Paulo Turatti + suporte.producao&corp.globo.com +36355 + BIRUNI Grid Computing Centre + Muhammad Farhan Sjaugi + farhansj&biruni.upm.my +36356 + Zycomm Electronics Limited + Charles Price + cpwp&w3z.co.uk +36357 + NOMOS-BANK Open Joint-Stock Company + Evdokhin Gennady + pki&nomos.ru +36358 + Whole Foods Market Services, Inc + Daniel Birmingham + Daniel.Birmingham&wholefoods.com +36359 + Securus Software Ltd + Jill Britton + jill.britton&securus-software.com +36360 + ASTELNET + Andrey Ivanov + andrey&astelnet.com +36361 + APTEC, LLC + Earl Belcher, Jr. + earl.belcher&aptecllc.com +36362 + OT Bioelettronica snc + Enrico Merlo + e.merlo&otbioelettronica.it +36363 + NRT LLC + Scott Zadroga + Scott.Zadroga&nrtllc.com +36364 + Stefan Reinhold + Stefan Reinhold + development&ithron.de +36365 + OTJ Tecnologia + Paulo Sérgio Garcia + psgarcia&otj.com.br +36366 + Kaminario + Arik Kol + arik.kol&kaminario.com +36367 + E-SONDE NETWORK MONITORING, S.L. + Antonio Ruiz + antonio.ruiz&e-sonde.com +36368 + Shenzhen Hexicom Technology Co., Ltd. + luoxionghui + luoxionghui&hexicomtech.com +36369 + Cirrascale Corporation + David Leimbach + dave.leimbach&cirrascale.com +36370 + Growth Curve Technologies Ltd. + Ian Gilfillan + ian&b3accounts.com +36371 + Solium Capital Inc. + Trever Miller + trever.miller&solium.com +36372 + Flygprestanda U.S. Inc. + Alexander Lazer + alexander.lazer&flygp.us +36373 + HHO_Automation + Uwe Hanke + uhanke&hho-inh.de +36374 + MIG + Jad Nehme + jad.nehme&themig.com +36375 + Neota Logic Inc. + Michael Mills + michael.mills.ny&gmail.com +36376 + VersaSuite + Mason Mitchell + mmitchell&versasuite.com +36377 + Schmidt electronic + Juergen Schmidt + info&schmidt-electronic.com +36378 + Nordic AkkaSource AB + Nils-Olof Bankell + nils-olof.bankell&jayway.com +36379 + Mesto Prostejov + David Koudelka + pki&prostejov.eu +36380 + Internet Applications Research and Development, LLC + Stuart A MacKenzie + administrator&iarad.net +36381 + TeleMate.Net Software + John O'Reilly + contact&telemate.net +36382 + Damballa, Inc. + Stephen Newman + snewman&damballa.com +36383 + VDC Display Company + Jesse Salvatore + jsalvatore&vdcds.com +36384 + Designfusion Software Sales Inc + Jonathan Brown + jbrown&designfusion.com +36385 + NMS Unlimited + Mike Worthington + nmsunlimited&gmail.com +36386 + Digia cc + Tristan Koen + tristan&digia.org +36387 + Scorpion Holding Company Limited + Omar Charles + omarcharles&scorpion-holding.com +36388 + Sanctuary Group + Colin Williams + colin.williams&sanctuary-housing.co.uk +36389 + Infospectrum India Pvt. Ltd. + Farooque Khan + farooque&info-spectrum.com +36390 + Nice Thoughts + Iwan Nijs + admin&nicethoughts.eu +36391 + Beijing JinYuanXin Technology Co.,Ltd. + Xiaoping Guo + Xiaoping.Guo&xpower.com.cn +36392 + Taxcom Ltd. + Alexander Tupitsyn + TupitsynAV&taxcom.ru +36393 + Bohemia Interactive + Jan Hlavaty + hlavac&bistudio.com +36394 + Benco Dental + Infrastructure Team + DL.NOC&benco.com +36395 + Hochschule Wilhelmshaven/Oldenburg/Elsfleth + Guido Manemann + manemann&jade-hs.de +36396 + OAO "FSK EES" MES Siberian + Temerov Alexandr Miroslavovich + temerov-am&sibmes.ru +36397 + Plurk Inc. + Ryan Lim + ryan&plurk.com +36398 + Ingenieria GBR, C.A. + Raul Gomez + rgomez&ingenieriagbr.com +36399 + 39th Studios, LLC + Curtis M Turner II + curtis&39thstudios.com +36400 + Neo Silurian Technologies (NS-Technologies) + Christopher Boulton + chris&ns-technologies.com +36401 + Dax Networks Ltd + Sudha Jagadish + peg&dax.in +36402 + Chunichi Denshi Co.,LTD. + Tatsuo Suzuki + tatsu&kcd.co.jp +36403 + sentiam, inc. + Jürgen Reinold + iana-pen&sentiam.com +36404 + Mako Networks Ltd + Murray Knox + murrayk&makonetworks.com +36405 + Department of Information System, Hanoi National university of Education, Vietnam + Dang Dinh Duong + myheeu&gmail.com +36406 + PopTarantula + Joel Blackthorne + joel&poptarantula.com +36407 + Peer Review Mediation and Arbitration + Jonathan Smith + jonathan&peerreviewboard.com +36408 + Norges Bank Investment Management + Thomas Aure + tau&nbim.no +36409 + Foyer Assurances S.A. + Mar Durbach + dum&foyer.lu +36410 + gokuraku.it + Renato Massone + iana-pen&gokuraku.it +36411 + Zhejiang Chuangyi Technology Co., Ltd + Zhang Pengyu + zhpy2000&sina.com +36412 + Scientific manufacturing firm Stalenergo Limited + Gennadiy Mogulyan + mgv&stalenergo.ru +36413 + LUMIPLAN SAS + Didier FAUCHER + didier.faucher&lumiplan.com +36414 + Espial Group Inc. + Kumanan Yogaratnam + kumanan&espial.com +36415 + INRS Custom Data Inc + Victor Plenter + vplenter&inrs.com +36416 + Harritronics LLC + Paul Harrison + paul&harritronics.com +36417 + KDE e.V + Ben Cooksley + ripe&kde.nl +36418 + Qwilt Inc. + Gaash Hazan + gaash&qwilt.com +36419 + Path Intelligence + Toby Oliver + toby&pathintelligence.com +36420 + Trinet Co., Ltd. + Mitsuhiro Nakamura + nakamura&trinet.co.jp +36421 + geniegate.com + Jamie Hoglund + pen&geniegate.com +36422 + Gaikai, Inc. + Timothy C. Arland + tca&gaikai.com +36423 + Warped Reality Studios + Guy A. De Marco + guy&guydemarco.com +36424 + Roberto De Duro + Roberto De Duro + r.deduro&hotmail.it +36425 + Fladi.at + Michael Fladischer + michael&fladi.at +36426 + AT Software und Rechnertechnik GmbH + Andreas Thalmayer + info&at-online.com +36427 + Soneco d.o.o. + Slavko Gajin + info&soneco.rs +36428 + Inreal Technologies Ltd + Jussi Heinonen + jussi&inreal.co.uk +36429 + TOOLS.LV SIA + Mareks Malnacs + mareks&tools.lv +36430 + Piarista Gimnázium, Vác + Sandor OROSZI + itadmin&vpg.sulinet.hu +36431 + Cobham + Mike Kee + mike.kee&cobham.com +36432 + Proteco Industria Eletrotecnica Ltda. + Marcos Gaeta + marcos&proteco.com.br +36433 + Station to Station BV + Gerben van Ek + g.vanek&stationtostation.nl +36434 + Inovit GmbH + Jochen Nickel + jochen.nickel&inovit.ch +36435 + Rathravane LLC + Peter Cardona + iana_pen&rathravane.com +36436 + Beijing GuoTie HuaChen Communication & Information Technology Co.,Ltd. + Yaoyang Wang + wyy&huachencn.com +36437 + ASAHI ELECTRONICS CO.,LTD. + Yoshihiro Inoue + inoue&aec.co.jp +36438 + Shanghai XZ-COM. Co.,Ltd. + zhu xiao hu + zhuxh&shxz-com.com +36439 + Telenetik Corporation + Roman Sergey + romansergey&telenetik.net +36440 + RICH RIGHT TECHNOLOGY LIMITED + Jason Zhang + zh-shine&163.com +36441 + G-FIT GmbH & Co. KG + Moritz Graf + snmp&g-fit.de +36442 + comune di Jesi + Alessandro Paggi + ced&comune.jesi.an.it +36443 + eikju: AG + Christoph Eicker + eicker&eikju.com +36444 + Sana IT Services GmbH Berlin-Brandenburg + Udo Leistner + u.leistner&sana-bb.de +36445 + fotocom + marc chassoulier + marc&foto.com +36446 + netkey information technology gmbh + Peter Klett + peter&netkey.at +36447 + Colliers International + Rui Fidalgo + Rui.Fidalgo&colliers.com +36448 + Verkehrsautomatisierung Berlin GmbH + Rainer Stimmerling + Rainer.Stimmerling&vaberlin.de +36449 + Computation Institute + Greg Cross + grog&ci.uchicago.edu +36450 + DFZ + Blagomir Blagoev + blagob&dfz.bg +36451 + CNP Assurances + SICARD Virginie + virginie.sicard&cnp.fr +36452 + Samara Institute of Open Education + Sergey A. Sushkov + cit&sec21.ru +36453 + Vanguardia Liberal + Rene Di Marco + rdimarco&vanguardia.com +36454 + RGI, Inc + Robert Greathouse + greathouse.robert&gmail.com +36455 + synertronixx GmbH + Dr. Matthias Blaschke + blaschke&synertronixx.de +36456 + Freiwillige Feuerwehr Strasshof + Felix Rehberger + Administrator&Feuerwehr-Strasshof.at +36457 + APL software, SL + Jesús Marín + jesus.marin&aplsoftware.com +36458 + Triumfant, Inc. + Jaye Whitmire + jwhitmire&triumfant.com +36459 + SURVISION + Alban GICQUEL + alban.gicquel&survision.fr +36460 + PangyreSoft + Ashley Pond V + ashley.pond.v&gmail.com +36461 + Squeep + Justin Wind + jwind-iana&squeep.com +36462 + 1st Source Bank + Michael Weaver + WeaverM&1stsource.com +36463 + DNW Technologies + Hsi Fang + manbaum&hotmail.com +36464 + henrich + jun wu + wujun9959&163.com +36465 + Data Center Manageability Interface Forum + Hari Ramachandran + hari.ramachandran&intel.com +36466 + H2 PowerTech (formerly 'Ballard Fuel Cell Systems' and 'IdaTech LLC') + Kevn Desrosiers + it&h2powertech.com +36467 + adesso AG + Christoph Stoppe + stoppe&adesso.de +36468 + Cornastone Infrastructure Services (Pty) Ltd + Jan Semmelink + jans&cornastone.com +36469 + Through IP Pty. Ltd. + Marco Hess + marco.hess&through-ip.com +36470 + Sofdela Inc. + Satoshi Fushimi + satoshi.fushimi&sofdela.info +36471 + Beijing SureKAM Technology Co., Ltd. + size Liu + liusize_yuexia&hotmail.com +36472 + Ilait + Kim Lundgren + registry&ilait.se +36473 + Direct One S.A. + Cezar Darie + support&directone.ro +36474 + Lifespan Corporation + Paul Howard + pdhoward&lifespan.org +36475 + Fujian Etim Information & Technology Co,.Ltd. + yangxiaoyan + yxy&etim.cn +36476 + Academia de Informática Brava, Engenharia de Sistema, Lda + Ricardo Martins Sargo Garcês + rgarces&acin.pt +36477 + Ministry Of Health, Saudi Arabia + Mohammad Ibraheem Aboul Ola + ABOELELA-M&moh.gov.sa +36478 + WareValley + Inho Kim + ihkim&warevalley.com +36479 + Fastbooking + Ludovic LANGE + fastbooking.pen.iana&gmail.com +36480 + Driving Growth International GmbH + Mr. Smail Sakiri + smail.sakiri&driving-profit.de +36481 + eqqon GmbH + Wolfgang Zelenka + wolfgang.zelenka&eqqon.com +36482 + First Businesspost GmbH + Jürgen Graf + juergen.graf&first-businesspost.com +36483 + Lowe Enterprises and Destination Hotels and Resorts + Bill Hilbers + bhilbers&loweenterprises.com +36484 + JaReVo + Daniel Reichelt + daniel.reichelt&jarevo.de +36485 + nCk Research LLC + Matthew Chabot + matt.chabot&nck-research.com +36486 + Ecator Ltd + Bjorn Bergsten + ec001&ecator.com +36487 + Shanghai Xinmin Telecom Technologies Co.,Ltd + Wei Ye + rayye&foxmail.com +36488 + Brimrose Technology Corporation + Rick Winkelman + RWinkelman&brimrosetechnology.com +36489 + Geovan Salvato Borges + Geovan Salvato Borges + geovansb&gmail.com +36490 + MassHousing + James McCaughey + jmccaughey&masshousing.com +36491 + California State University, Long Beach + Steve La + sla&csulb.edu +36492 + JUNet (Jordanian Network Universities) + Anwar Al-Yousef + anwar&junet.edu.jo +36493 + IDIS Co.,Ltd. + Kiun Lee + rexos33&idis.co.kr +36494 + Fixmo Inc + Shyam Sheth + shyam&fixmo.com +36495 + Systemfive + Roger Kirchhofer + support&systemfive.com +36496 + TeamBox SARL + Patrick Bihan-Faou + patrick.bihan-faou&teambox.fr +36497 + Buzzinbees SAS + Marc Lamberton + marc.lamberton&buzzinbees.com +36498 + Asurea Insurance Services + Scott Freeland + sfreeland&asurea.com +36499 + West Penn Allegheny Health Systems + Tony Lukasavage + alukasav&wpahs.org +36500 + Behavioreal Ltd. + Alush Joni + eyal&behavioreal.com +36501 + Ever-Team Software + Ziad WAKIM + z.wakim&ever-team.com +36502 + NetBoss Technologies, Inc. + Nenad Matejic + nenad.matejic&netboss.com +36503 + waldmeisda.com + Frank Kreller + Frank&waldmeisda.com +36504 + Joint Stock Company «Arkhangelsk Sale Company» + Andrew V. Belov + abel&arsk.ru +36505 + Viatel + Pavel Smirnov + pavel.smirnov&viatel.com +36506 + Com N S Ltd. + Alon Rubinstein + alon&comns.co.il +36507 + chgauert + Christian Gauert + chgauert&gmx.de +36508 + Unica Corporation + Chris Marko + cmarko&unica.com +36509 + BeiJing testor techniqual Ltd + zhongtao Zhou + zhouzhongtao&testor.com.cn +36510 + TechnoServ A/S + Alexey Maslov + amaslov&technoserv.ru +36511 + MKB Unionbank AD + Cvetan Grigorov + GrigorovC&unionbank.bg +36512 + CITILOG + FORTHOFFER Martial + mForthoffer&citilog.com +36513 + SOGELINK SAS + Xavier BASSET + pen&dict.fr +36514 + Ipeer Internet Network + Amin Dandache + support&ednic.de +36515 + Suprema Corte de Justicia de la República Oriental del Uruguay + Ivonne Carrión + uejecutora&poderjudicial.gub.uy +36516 + Beaufort Delta Education Council + Michael Reardon + mike_reardon&bdec.learnnet.nt.ca +36517 + Estalea, L.P. + Antonio Varni + systems&estalea.com +36518 + RF Technology Pty Ltd + John Dalton (per Frank Romanin) + john.rf.technology&gmail.com +36519 + bsdn + isa.wang + isa.wang03&gmail.com +36520 + SIGI + Philippe Mathieu + exploitation&sigi.lu +36521 + Global RadioData Communications Ltd + Marcus Williams + marcus.williams&grcltd.net +36522 + Simples Consultoria + Érico Andrei + erico&simplesconsultoria.com.br +36523 + SippySoft + Yura Monchak + yurun&sippysoft.com +36524 + Locus Telecommunications, Inc. + Andrew Norris + snmpadmin&locus.net +36525 + Imatia Innovation S.L. + Alejandro Ricart Varela + alejandro.ricart&imatia.com +36526 + Blackhawk Computer Services + Alex Rasmussen + TubaRasputin&gmail.com +36527 + Mark Pröhl + Mark Pröhl + mark&mproehl.net +36528 + WMZ Engenharia + Wagner Luiz Zucchi + wzucchi&lps.usp.br +36529 + BlueSpace Software + Brian Vetter + brian.vetter&bluespace.com +36530 + Fruktträdet + Jonas Eckerman + jonas&frukt.org +36531 + netford + Jin-Kyu Won + jkwon&netford.com +36532 + State Agency of Medicines of Latvia + Ingmars Briedis + ingmars.briedis&zva.gov.lv +36533 + Danaher Corporation + Eric Aarts - Danaher GS3 Team + dhr_pen&danaher.com +36534 + Bitartists + Marc Werner + mwerner&bitartists.de +36535 + Der Beck GmbH + Thomas Dietsch + thomas.dietsch&der-beck.de +36536 + Eprinsa + Manuel Castiñeira + mcasti&eprinsa.es +36537 + Beijing NeTopChip Technology Co.,LTD + Chao Zhong + chao.zhong&netopchip.com +36538 + ExperTeach Gesellschaft für Netzwerkkompetenz mbH + Muammer Arslan + helpdesk&experteach.de +36539 + smart SNMP + Matthias Haag + info&smart-snmp.org +36540 + Innofinity GmbH + Meinhard Schneider, CEO + schneider&innofinity.de +36541 + FIVES + Olivier HERMAN + olivier.herman&fivesgroup.com +36542 + St.Petersburg Out-patient department N106 + Tomashevsky Dmitry + tomashevdi&gmail.com +36543 + Sascha Silbe + Sascha Silbe + sascha-web-pen.iana.org&silbe.org +36544 + Universidade Federal do ABC + José A. de A. Gomes + jose.gomes&ufabc.edu.br +36545 + ii2P, LLC + Scott Ringer + sringer&ii2p.com +36546 + Towers Watson + Nino Evola + nino.evola&towerswatson.com +36547 + Heimatverein Elte e.V. + Klaus Paradies + technik&heimatverein-elte.de +36548 + Virtualmin Inc + Jamie Cameron + jcameron&webmin.com +36549 + Medienzentrum Kassel + Thorsten Strusch + noc&ksan.de +36550 + AnthonyVinhPham + Anthony Vinh Pham + anthonyvinhpham&gmail.com +36551 + RAADIUS LLC + Richard Hilling + richard.hilling&raadius.net +36552 + Cora Informatique + AUBRY Patrice + paubry&cora.fr +36553 + Altos Solutions + Tim Henke + thenke&altossolutions.com +36554 + Moocom Corporation + Takeshi KOMIYAMA + contact&moocom.co.jp +36555 + TimeTools Limited + Andrew Shinton + ashinton&timetools.co.uk +36556 + Triad Semiconductor, Inc. + Mike Burr + it&triadsemi.com +36557 + Positive Software sprl + Luc Claes + luc&posisoft.com +36558 + Actus Digital + Raphael Renous + raphir&actusdigital.com +36559 + Bitnet.hu Ltd. + Jozsef Czompo + czompo.jozsef&bitnet.hu +36560 + Entr'ouvert + Benjamin Dauvergne + bdauvergne&entrouvert.com +36561 + Nwork Co.,Ltd. + Tomoyasu Fujii + t_fujii&nwork.co.jp +36562 + Green Resource Inc. + Yu Xin + scaner&gmail.com +36563 + Remsdaq Limited + Les Mather + lm&remsdaq.com +36564 + Daniel Fernandes + Daniel Fernandes + danieldesenvolvedor&gmail.com +36565 + Washington State Patrol + Chris Molenda + chris.molenda&wsp.wa.gov +36566 + new frontiers software GmbH + Markus Müller + mueller&new-frontiers.de +36567 + Teva Pharmaceutical Industries Ltd. + Ashkenazy Ronen + Ronen.Ashkenazy&teva.co.il +36568 + Anixter + David Hopper + david.l.hopper&anixter.com +36569 + Gridstore + Antoni Sawicki + as&gridstore.com +36570 + Gemüsering Stuttgart GmbH + Stephan Maier + st.maier&gemuesering.de +36571 + Picoware Ltd + Martin Hayes + Martin.Hayes&picoware.com +36572 + NET4GAS, s.r.o. + Michal Mühlpachr + michal.muhlpachr&net4gas.cz +36573 + Harney + Gerard Harney + oid&harney.eu +36574 + SCI Group + Ashley Jacobs + ajacobs&scigrp.com +36575 + MerLar + Robert Merchant Larson, II + robert&merlar.com +36576 + APB PRODATA LTDA. + Mr. CARL BEQUET + cb&apb.com.br +36577 + Starscriber Corporation + Michael Johnson + mjohnson&starscriber.com +36578 + Kron Telekomunikasyon Hizmetleri A.S + Mete SANSAL + info&kron.com.tr +36579 + Erste Bank Hungary Nyrt. + Akos Solymos + akos.solymos&erstebank.hu +36580 + DVG Deutsche Vertriebsgesellschaft für Publikationen und Filme mbH + Markus Nass + nass&dvg-ff.com +36581 + BSTECNOLOGIA + EDUARDO SAMPAIO + EDUARDO&BSTECNOLOGIA.NET.BR +36582 + Arduino (http://www.arduino.cc/) + Eric Gionet + lavco_eg&hotmail.com +36583 + Tom Aafloen + Tom Aafloen + iana&aafloen.com +36584 + vmkid.com + Nanger Lu + lujnan&gmail.com +36585 + Logan Technologies + David Ferris + eric.ferris&sympatico.ca +36586 + NET IDEA d.o.o. + Tomislav Bosanac + tomislav.bosanac&gmail.com +36587 + NFB Consulting + Nigel Brownett + nbrownett&nfbconsulting.com +36588 + Satellink, Inc + Lorie Goodrich + lorie.goodrich&satellink.com +36589 + Pier 1 Imports + Edward E. Iwanski + eeiwanski&pier1.com +36590 + Rb Datensysteme GmbH + Bastian Reich + reich&weblodge.de +36591 + Prefeitura Municipal de Fortaleza + Daniel Ruoso + daniel&ruoso.com +36592 + The CBORD Group, Inc. + Robert Carlson + rcc&cbord.com +36593 + Paul De Audney + Paul De Audney + pdeaudney&gmail.com +36594 + Certi Networks Sdn Bhd (formerly 'Telistar Solutions Pte Ltd') + Jovi Yeong + jovi&certinetworks.com +36595 + Ena Tecnologia, S.L. + Miguel Angel Martin Mendez + miguel.martin&ena-tecnologia.com +36596 + Dachser GmbH & Co. KG + Fariborz Moradi + fariborz.moradi&dachser.com +36597 + PRDS + Dominique Laigle + dlaigle&gmail.com +36598 + Indra Air Traffic Inc. + Kevin Sivits + ksivits&indraairtraffic.com +36599 + Bremer Landesbank + Oliver Merdes + netze&bremerlandesbank.de +36600 + Nashire AG (startup) (http://www.nashire.com) + Blapp Martin + mblapp&attic.ch +36601 + OTPasswd + Tomasz Fortuna + bla&thera.be +36602 + Observium + Adam Armstrong + adama&memetic.org +36603 + ITechnical + Heiko Nardmann + verwaltung&itechnical.de +36604 + Media Service Provider Ltd + Charlie Allom + hostmaster&mediaserviceprovider.com +36605 + Hannover Rückversicherung AG + Hans-Juergen Kreutzer + hans-juergen.kreutzer&hannover-re.com +36606 + Littlehall Corporation + Mark Bannon + resources&littlehall.com +36607 + Johnny Abrahamsson + Johnny Abrahamsson + johnny.abrahamsson&gmail.com +36608 + WHTY + Sharon Zhu + zhb&whty.com.cn +36609 + Universita' del Salento + Simone Molendini + simone.molendini&unisalento.it +36610 + August Moon LLC + Dennis Chang + support&augustmoonsoftware.com +36611 + Ziborski KG + Zrin Ziborski + office.pen-iana&ziborski.net +36612 + AngloGold Ashanti + Anton van Wieringen + anton&vanwieringen.co.za +36613 + United Europe Holding OJSC + Andrew Syrov + syrov.a&united-europe.ru +36614 + itl Institut für technische Literatur AG + Marcel Rechenberg + marcel.rechenberg&itl.eu +36615 + Identity Networks + John Carter + jcarter&identitynetworks.com +36616 + Arcatem + Armand Dembele + armand.dembele&arcatem.com +36617 + RooX + Andrey Pestov + APestov&rooxteam.com +36618 + Alceo s.r.l. + Amedeo Bonini + abonini&alceo.com +36619 + Beatport, LLC + Jonathan Steffan + admin&beatport.com +36620 + 8086 Limited + Chris Burton + Support&8086.net.uk +36621 + Krithia Ltd + Peter Braidwood + peter&krithia.co.uk +36622 + WELLHAT,INC + Tatsuhiro Kasai + info&wellhat.com +36623 + Swedish Orphan Biovitrum + Petra Tesch + petra.tesch&sobi.com +36624 + Senath Pty Ltd + Kanchana Wickramasinghe + kanchanaw&senathltd.com +36625 + Allesklar.com AG + Tilman Mayer + tilman.mayer&meinestadt.de +36626 + State of Montana + Hunter Coleman + hcoleman&mt.gov +36627 + COMPLUS CZ, a.s. + Martin Krob + martin.krob&complus.cz +36628 + Inphosoft Pte Ltd + Brian Wang + brian.wang&inphosoft.com +36629 + Assens Kommune + Lars Andersen + ljand&assens.dk +36630 + 3stylestudio srl + Andrea Conti + tech&3stylestudio.com +36631 + Portalify Ltd + Hannu Aronsson + haa&portalify.com +36632 + Scalr Inc. + Sebastian Stadil + sebastian&scalr.com +36633 + Ivoclar Vivadent Corporation + Wolfgang Schmied + wolfgang.schmied&ivoclarvivadent.com +36634 + LinkORB + Joost Faassen + j.faassen&linkorb.com +36635 + Kirchliche Paedagogische Hochschule Wien/Krems + Erich Schneeweiß + erich.schneeweiss&kphvie.at +36636 + AMS Ltd. + Dmitriy Volzhin + dv&ams.ru +36637 + Delaware Department of Education + Paul Pond + ppond&doe.k12.de.us +36638 + Planphoria, LLC + Saul Rosenberg + srosenberg&planphoria.com +36639 + TimedOut Services + Jon Bennell + enquiries&timedout.co.uk +36640 + Benjamin Tietz + Benjamin Tietz + benjaminµnet24.de +36641 + Famos, LLC + Timothy Wall + twall&famos.com +36642 + FireID + Iain Wadds + iain&fireid.com +36643 + ANNAX Schweiz AG (formerly 'ANNAX passenger electronics AG') + Rico Zoss + rico.zoss&annax.ch +36644 + WiseMo A/S + Jakob Bohm + jb-iana&wisemo.com +36645 + cybertronixx consulting services GmbH + Marcus Hoeltkemeier + info&cybertronixx.de +36646 + San Diego State University + Bob Carroll + bob.carroll&alum.rit.edu +36647 + STV Electronic GmbH + Alexander Karavan + a.karavan&stv-electronic.de +36648 + Netherlands Institute for Neuroscience + Adriaan Klop + A.Klop&nin.knaw.nl +36649 + Traficon International n.v. + Johan GACHON + jg&traficon.com +36650 + Joh. Enschedé IT & Consultancy + Melle van der Klip + M.v.d.Klip&je-itc.com +36651 + Sportvision, Inc + Tim Driedger + timdriedger&sportvision.com +36652 + denkagenten GbR + Oliver Gruhn + gruhno&googlemail.com +36653 + Copenhagen Business School + Mikkel Ankjær + ma.it&cbs.dk +36654 + CHU de Nantes + Yvrenogeau Christophe + cyvrenogeau&chu-nantes.fr +36655 + Innovative Energies + Matt Jones + mattj&innovative.co.nz +36656 + Unger, Welsow & Company GmbH + Michael Steuter + support&uw-c.de +36657 + Axept AG + Werner Helbig + interneIT&axept.ch +36658 + OMGPOP + Chris Holt + chris&omgpop.com +36659 + ATRUST COMPUTER CORPORATION + Winnie Chuang + Winnie.Chuang&atrustcorp.com +36660 + PAUL Consultants e.V. + Dirk Methner + it&paul-consultants.de +36661 + MailCleaner + Olivier Diserens + dev&mailcleaner.net +36662 + MTU Friedrichshafen GmbH (formerly 'Tognum AG') + Herbert Roth + registration&tognum.com +36663 + Aish Technologies Limited + Robin Coley + robinc&aishtechnologies.com +36664 + Stadt Warburg + Christian Bierwirth + edv&warburg.de +36665 + Egress Software Technologies Ltd + Neil Larkins + neil.larkins&egress.com +36666 + Golden Frog, Inc. + Philip Molter + noc&goldenfrog.com +36667 + Orange County Container Group LLC + Arthur Dubey + it&csipaper.com +36668 + Canadian Standards Association + Prakash Nair + prakash.nair&csagroup.org +36669 + Medicom Health Interactive + Lance Linder + llinder&medicomhealth.com +36670 + Bundesministerium für Inneres - Republik Österreich + Markus PAST + markus.past&bmi.gv.at +36671 + Guangzhou Changyue Electronic Technology Ltd. + fang hui + fanghui1230&126.com +36672 + FieldView Solutions + Veera Thumuganti + vthumuganti&fieldviewsolutions.com +36673 + IP Infusion Inc + Vishwas Manral + vishwas&ipinfusion.com +36674 + Moving Impressions + Terence Bennell + tjb&moving-impressions.com +36675 + Cossette + Bernard Beaulieu + bernard.beaulieu&cossette.com +36676 + E-Prime Co., Ltd. + Doo Jin Choi + ssscdj&eprime.or.kr +36677 + Nejla + Jon Kristensen + jon.kristensen&nejla.se +36678 + Barmherzige Brüder Österreich + Matthias Frankfurt + matthias.frankfurt&bbprov.at +36679 + eo Networks S.A. + Robert Fabisiak + robert.fabisiak&eo.pl +36680 + B&W Software GmbH + Achim Schön + achim&buw-soft.de +36681 + IsoBeef GbR + Sebastian Gepperth + root&isobeef.org +36682 + Reaonix, LLC + Gerald Jackson + gerald.jackson&reaonix.com +36683 + ApexIdentity Inc. + Paul C. Bryan + paul.bryan&apexidentity.com +36684 + NetCom Consulting GmbH + Wolfgang Herrmann + wolfgang.herrmann&netcom-consulting.de +36685 + Mozdell Inc, + Rafael Antonio Mosquera Delgado + mozdell&hotmail.com +36686 + iSencia AB + Christian Klemetsson + info&isencia.se +36687 + JaxMP L.L.C. + Stephen Hull + stephen&jaxmp.com +36688 + DOORSolutions + Zhang Ping + dqzhangp&163.com +36689 + J&T Systems + Katsuya Higuchi + info&jt-sys.co.jp +36690 + HCS - HES Cabling Systems + Ufuk Yilmaz + yilmaz&hescs.com +36691 + Dolphin Speed Networks Ltd + Bradley Kite + bradley.kite&gmail.com +36692 + Open Certification Centre + Radoslaw Ejsmont + radoslaw&ejsmont.net +36693 + SGI Japan, Ltd. + Manabu Suzuki + iana_oid&sgi.co.jp +36694 + The Revenue Department + Pornthip Yousathorn + pornthip.yo&rd.go.th +36695 + Cognitive Security, Ltd. + Martin Rehak + ianacontact&cognitive-security.com +36696 + Natural History Museum + Patrick Cunningham + p.cunningham&nhm.ac.uk +36697 + Neo Telecoms + Frederic Gabut-Deloraine + eng&neotelecoms.com +36698 + Toyon + Richard Mirard + rmirard&yahoo.com +36699 + The Engle Group + Jeff Senter + jsenter&erols.com +36700 + Defferrari Sistemas de Informática Ltda + Marco Defferrari + marco&defferrari.com.br +36701 + Cogeco Cable + David Zheng + david.zheng&cogeco.com +36702 + Cerbini + Francesco Cerbini + francesco&cerbini.eu +36703 + ABR Innovations, LLC. + Mark Pack + mark&abrinnovations.com +36704 + My Ways, Org. + Eric R Ross + eric.ross&myways.org +36705 + National Time Service Center,Chinese Academy of Sciences + Guo wei + guowei&ntsc.ac.cn +36706 + Fundación Centro Oncológico de Galicia "José Antonio Quiroga y Piñeyro" + Manuel J. Torres + informatica&cog.es +36707 + SpiderCloud Wireless, Inc. + Paul Fraley + iana&spidercloud.com +36708 + Sensus Informatika Kft. + Szabolcs Csermák + csszabolcs&srwtrade.hu +36709 + Mezeo Software Corporation + John Eastman + jeastman&mezeo.com +36710 + Anoigo Services Ltd. + Mari Heesbeen + mari.heesbeen&anoigo.nl +36711 + Micromodje Industries + Yasamin Gharib + yasamin.gharib62&gmail.com +36712 + The Cell + Daniel Bradshaw + iana-contact&the-cell.co.uk +36713 + Alepo USA + Rajesh Mhapankar + rajesh&alepo.com +36714 + jackl0phty, LLC + Gerald L. Hevener Jr. + jackl0phty&gmail.com +36715 + SEAS-NVE + Claus Larsen + ckl&seas-nve.dk +36716 + Jiu Info & Tech + Youster Park + os5070&chol.com +36717 + Studentenwerk München + Wolfgang Walter + iana&kontakt.stwm.de +36718 + Futurice Ltd + Mats Malmsten + admin&futurice.com +36719 + onedrous.com + John Chandler + admin&onedrous.com +36720 + Evax S.A.R.L. + Thomas Charbonnel + iana&evax.fr +36721 + Cuker Interactive + Brian LaMere + brian&cukerinteractive.com +36722 + TSC Technologies + Corneil du Plessis + corneil&tsctech.com +36723 + ILT Innovations AB + Stefan Bernbo + registry&ilt.se +36724 + Kaplan Research and Application Prototyping + Hadriel Kaplan + hadrielk&yahoo.com +36725 + Artisan Computer Services LLC + Zack Williams + info&artisancomputer.com +36726 + Centered Logic, LLC + Subramaniam Aiylam + av¢eredlogic.com +36727 + Benchmark Systems, LLC + Eric R Ross + eric.ross.9186291637&gmail.com +36728 + Ellerines + Neil Liebenberg + neil.liebenberg&ellerines.co.za +36729 + EAS Schaltanlagen GmbH + Andreas Hirn + snmp&eas-schaltanlagen.de +36730 + TAGSYS RFID + Cyril Catalanotto + cyril.catalanotto&tagsysrfid.com +36731 + Agorabox + Marc Schlinger + marc.schlinger&agorabox.org +36732 + PDR Network + Douglas Kunz + douglas.kunz&pdr.net +36733 + ForgeRock + Ludovic Poitou + ludovic.poitou&forgerock.com +36734 + Leibniz Center for Marine Tropical Ecology (ZMT) + Christoph Lutz + christoph.lutz&zmt-bremen.de +36735 + Regione Emilia Romagna + Bucciarelli Fabio + FBucciarelli&Regione.Emilia-Romagna.it +36736 + Balance of Nature + Cedric Ebisch + cedric&balanceofnature.com +36737 + Pošta Crne Gore + Stevan Ljumovic + stevan.ljumovic&postacg.me +36738 + BLINQ Networks Inc. + Tracey Bogue + tracey.bogue&blinqnetworks.com +36739 + Christophe Garault + Christophe Garault + christophe&garault.com +36740 + Sinicnet Technology Co., Ltd. + Zhenye Liu + liuzhenye&sinicnet.com.cn +36741 + CV Rekatama Elektronika Persada + David Suryadinata + david&innovativeelectronics.com +36742 + Provincial Government of the Western Cape + Deidre Marais + Dmmarais&pgwc.gov.za +36743 + GESTICUMPRE + Abílio Fernandes + abiliofernandes&gesticumpre.pt +36744 + mcclishingramcatering service + Loretta Moton Coleman + cole506&bellsouth.net +36745 + ninjo-workstation, EuMetSys c/o Ernst Basler + Partner GmbH + Thomas Schonhof + info&ebp.de +36746 + Papendorf Software Engineering GmbH + Falk Kohlhase + info&papendorf-se.de +36747 + Vertica A/S + Peter Loft + plj&vertica.dk +36748 + Leine & Linde AB + Joel Zachrisson + j.zachrisson&leinelinde.se +36749 + EML Speech Technology GmbH + Markus Klehr + markus.klehr&eml.org +36750 + JaCoTec + Marco Jakobs + mj&jacotec.de +36751 + RPC Scandinavia + René Paw Christensen + spam1&rpc-scandinavia.dk +36752 + Datasinc + Igor Pereira Peixoto + igor&datasinc.com.br +36753 + Engineering with IT + Dave Moats + dave&engwit.com +36754 + BZK GROUP Sp. z o.o. + Paweł Jankowski + pawel.jankowski&bzkgroup.pl +36755 + stocker-schmid.ch + Arthur Stocker + arthur.stocker&gmail.com +36756 + Fossmart Pvt Limited + Janaka Wickramasinghe + janaka&fossmart.net +36757 + GuanFang Technologies Co., Ltd + Pines You + pinesyou&163.com +36758 + FORTRUST LLC + Bryon Miller + noc&fortrustllc.com +36759 + eks Engel GmbH & Co. KG + Christian Gieseler + info&eks-engel.de +36760 + Ancitel S.p.A. + Donatantonio Mazzei + mazzei&ancitel.it +36761 + Datera S.A. + Andrzej Dopierała + dopieralaa&datera.pl +36762 + Actisis GmbH + Frank Losemann + oid-support&actisis.com +36763 + Riverside Health System + Sherry Maynard + sherry.maynard&rivhs.com +36764 + Linuxstar DE + Klaus Troeger + klaus&linuxstar.de +36765 + Herff Jones Inc + Robert Montgomery + rrmontgomery&herffjones.com +36766 + Ixolit GmbH + Stefan Schlesinger + s.schlesinger&ixolit.com +36767 + Rockyou + Brad Fino + brad&rockyou.com +36768 + IT Prof Service Ltd. + Serg Livitin + livitin&itprofservice.ru +36769 + Originity + BARBIER Jérôme + frontonas&free.fr +36770 + Redmark + Joost ten Brink + snmpadmin&redmark.nl +36771 + Learning Unlimited, Inc. + Jason B. Alonso + jason&learningu.org +36772 + Funk-Electronic Piciorgros GmbH + Marco Jakobs + mj&piciorgros.com +36773 + SecurActive + Gilles Huguenin + admin&securactive.net +36774 + op5 AB + Aron Lander + admin&op5.com +36775 + Akera S.r.l. + Luigi Bellio + luigi.bellio&akera.it +36776 + Washington State University Extension Energy Program + James L. Colombo + colomboj&energy.wsu.edu +36777 + sedOSS AB + Jonathan Petersson + jpetersson&sedoss.com +36778 + Leap Power + Haluk Aral + haral&leap-power.com +36779 + University Corporation for Atmospheric Research + Marla Meehl + marla&ucar.edu +36780 + Format Computing + Jarosław Tabor + jarek&srv.pl +36781 + Monument Systems LLC + Chris Muller + chris.muller&healthtrio.com +36782 + Premier Wireless, Inc. + Michael Long + miklong2&gmail.com +36783 + Tasco Sales (Aust) Pty Ltd + Anthony Kemp + admin&tasco.com.au +36784 + FMS Software Ltd. + Helmuts Razdovskis + helmuts.razdovskis&fms.lv +36785 + Diino AB + Erik Gulliksson + erik.gulliksson&diino.net +36786 + TechMA s.r.l. + Alessandro Riggi + alexriggi&techma.it +36787 + Futurex, LLC + James Espinoza + jespinoza&futurex.com +36788 + Autoridade Nacional de Segurança Rodoviária + João Luís Silva Assunção + jassuncao&deetc.isel.ipl.pt +36789 + BONET Group + Peter Mihalik + peter.mihalik&bonet.sk +36790 + POWERVAR INC + BRUCE RHODES + BRUCER&POWERVAR.COM +36791 + eac easy automation components GmbH + Gerhard Wagner + gerhard.wagner&eac.biz +36792 + ZyTrust SA + José M. Fernández Anaya + jfernandez&zytrust.com +36793 + Kiwibank + Graham Pohl + graham_pohl&hotmail.com +36794 + Web4U s.r.o. + Michal Humpula + praha&web4u.cz +36795 + Allgemeine Baugesellschaft - A. Porr Aktiengesellschaft + Radmer Quirin + iana&porr.at +36796 + Norbit + Erik Ryan + erik&norbit.no +36797 + No Limits Software + Dave Cole + dave.cole&nolimitssoftware.com +36798 + Teamup Technology Limited + Kam-Wah Pong + kwpong&teamup.com.hk +36799 + Codeworx3 Pty Ltd + Larry Jones + oidsupport&codeworx.com.au +36800 + DONGGUANG APTECH ELECTRONICS CO., LTD + JIAN ZHANG + dvdrw-zj&126.com +36801 + kokii + chenchi + tardychen&kokii.com +36802 + Cazoodle, Inc. + Jeremy Jones + sysadmin&cazoodle.com +36803 + Flughafen Stuttgart GmbH + Michael Hartmann + hartmann&stuttgart-airport.com +36804 + UCN GmbH + Matthias Subik + matthias.subik&ucn.at +36805 + RideCharge Inc. + Scott Sanders + ssanders&taximagic.com +36806 + Great River Energy + Scott Hughes + shughes&grenergy.com +36807 + NetWitness + Josh Rosenthol + josh.rosenthol&netwitness.com +36808 + The Sherwin-Williams Company + Sherwin-Williams PKI + pki.admins&sherwin.com +36809 + ITERANET Ltd + Golubtsov Aleksey + cis2admin&iteranet.com +36810 + 237 Solutions Pty Ltd + Mike Lee + mike&237solutions.com.au +36811 + Consendo Systems A/S + Klaus Heigren + klaus&consendo.dk +36812 + CSP, Inc. + Andrew Shieh + ashieh&cspi.com +36813 + SafeTek USA, LLC + Robert Tilley + rftilley&safetekusa.com +36814 + DEFFAYET + Nicolas DEFFAYET + hostmaster&deffayet.com +36815 + Savings Society Inc + Jeff Forsyth + jeff.forsyth&savingssociety.com +36816 + Beijing Abloomy Technologies Co., Ltd (formerly 'Beijing Yunshengfa Technologies Co., Ltd') + Ou Zhongyun + zhongyun.ou&abloomy.com.cn +36817 + Bohemia Interactive Simulations + Michael Bode + michael.bode&bisimulations.com +36818 + Accelya France SAS + Sylvain Rigou + Service.systeme&accelya.com +36819 + Yare Objects AB + Mattias Oden + mattias.oden&yare.se +36820 + Updata Infrastructure UK Limited + Ollie Anderson + ollie.anderson&updata.net +36821 + Local Matters, Inc. + Joel Maslak + jmaslak&localmatters.com +36822 + Adaffix GmbH + Martin J. Laubach + mlaubach&adaffix.com +36823 + Conference Plus, Inc. + Anthony Graczyk + agraczyk&conferenceplus.com +36824 + UFPS "Tatarstan pochtasy" - filial FGUP "Pochta Rossii" + Levkovskiy Grigory + pki&tatpochta.ru +36825 + Grenada Lake Medical Center + Sarah W Longest + sarah.longest&glmc.net +36826 + Control Zero + Nathan Malyon + oid&czero.com.au +36827 + Message Solution + leo lu + luping&messagesolution.cn +36828 + CEZ, a. s. + Ondrej Komorous + certmaster&cez.cz +36829 + LAGARDERE ACTIVE + Guy WISNIEWSKI + guy.wisniewski&lagardere-active.com +36830 + WHATEVER MOBILE GmbH + Kevin Fibich + noc&whatevermobile.com +36831 + Ratio Consulta + Stefano Picerno + stefano.picerno&ratioconsulta.it +36832 + Pocket Kings Ltd + James Beckett + snmp&pocketkings.ie +36833 + Authenware Corporation + Miguel Arrighi + miguel.arrighi&authenware.com +36834 + Omnis Cloud + Lou Picciano, MD + admin&OmnisCloud.com +36835 + Solimar Systems, Inc. + Drew Sprague + drew.sprague&solimarsystems.com +36836 + Brian Carlson + Brian Carlson + pen&briancarlson.org +36837 + Hangzhou Future Technology Co.,Ltd + ZengHui + zh&futuretel.com.cn +36838 + GK Software AG + Michael Opel + mopel&gk-software.com +36839 + Connect Informatica S.r.l. + Giancarlo Stoppani + giancarlo.stoppani&connectinformatica.it +36840 + Peter Kutting IT-Services + Yuna Kamzelak Morgenstern + kamzelak&kutting-its.de +36841 + Thor DMS + Geert Kooijmans + corp.info&thor-dms.com +36842 + CSR Ltd + Damian Strode + dstrode&csr.com.au +36843 + Valley Telegence Private Ltd + Sonal Dighe + sonal&valleytelegence.com +36844 + Sozialhilfeverband Kirchdorf an der Krems + Bernhard Winkler-Ebner + bernhard.winkler-ebner&ki.shvki.at +36845 + ABACOM-Ingenieurgesllschaft + Dipl. Ing. Lothar Feldmann + lothar.feldmann&electronic-software-shop.com +36846 + OOO "SMU OFISSTROY" + Ivan V Kvasnikov + kvasnikov&o-st.ru +36847 + International Game Technology, Inc. + Erik Petersen + erik.petersen&igt.com +36848 + CRE-DO GmbH + Guido Schnepp + guido.schnepp&cre-do.de +36849 + Hanwha Vision Co., Ltd. (formerly 'Hanwha Techwin Co., Ltd.') + Jihye Lee + jihye17.lee&hanwha.com +36850 + Dharma Drum Buddhist College (DDBC) + Simon Wiles + swiles&ddbc.edu.tw +36851 + OutputLinks, Inc. + Andy Plata + ap&outputlinks.com +36852 + Bnei Baruch + Amnon Israely + amnonbb&gmail.com +36853 + axilaris GmbH + Oliver Hintzsche + o.hintzsche&axilaris.de +36854 + BURGO GROUP SPA + Giuseppe Fiducia + pen_iana&burgo.com +36855 + Helvetia Assicurazioni SA + Lamberto Abelli + lamberto.abelli&helvetia.it +36856 + GEDIA Gebrüder Dingerkus GmbH + Rolf Geyer + support&gedia.com +36857 + Holztechnikum Kuchl + Stefan Hauer + support&holztechnikum.at +36858 + THQ Inc. + Rory Prendergast + oid.admin&thq.com +36859 + Ixia (formerly 'BreakingPoint Systems') + Kris Raney + kraney&ixiacom.com +36860 + True Value Company + Eric Tuttleman + Eric.Tuttleman&truevalue.com +36861 + Pacmed International Inc + David Collett + pacmedintl&yahoo.com +36862 + Mitrol SRL + Adrian Kowal + akowal&mitrol.net +36863 + sankai + Kaiming Xu + sankai99&126.com +36864 + South Stream + Markus Anthener + helpdesk&southstream.info +36865 + delight software gmbh + Zurschmiede Lukas + l.zurschmiede&delightsoftware.com +36866 + Zott GmbH & Co. KG + Heiko Friebel + edv-rechenzentrum&zott.de +36867 + Zaměstnanci.com s.r.o. + Jan Opravil + postmaster&up-group.org +36868 + SignGuard Europe Ab + Sven Peter Hellden + sph&SIgnGuard.se +36869 + Sofium + Jari Jokinen + jari.jokinen&sofiumgroup.com +36870 + Walter Parr + Walter Parr + walter.parr&siemens.com +36871 + Daylife, Inc. + Scott Briggs + scottb&daylife.com +36872 + Tekno Telecom, L.L.C. + Donald Malesh + dmalesh&teknotelecom.com +36873 + Wideportal IT Solutions UG (haftungsbeschränkt) + Holger Lauinger + holger.lauinger&wideportal.de +36874 + Anerma Electronics b.v.b.a + Rudy Van Hecke + rudy.van.hecke&anerma.be +36875 + TOYO Corporation + Yoshiyuki Saitoh + saitohy&toyo.co.jp +36876 + Chongqing Rural Commercial Bank + Junfeng Cao + caojfeng&gmail.com +36877 + Sundale Garden Village + Richard Ham + netadmin&sundale.org.au +36878 + Insta Group Ltd. + Miia Onkalo + miia.onkalo&insta.fi +36879 + New Mexico Consortium + Andree Jacobson + andree&newmexicoconsortium.org +36880 + tolj.org system architecture + Stipe Tolj + st&tolj.org +36881 + Simon Josefsson Datakonsult AB + Simon Josefsson + simon&josefsson.org +36882 + Texas Department of Transportation + Justin Krause + Justin.Krause&txdot.gov +36883 + Modernizing Medicine, Inc. + Daniel Cane + contact&modernizingmedicine.com +36884 + ESE (Mod-Electronics Inc.) + David Pitts + dpitts&ese-web.com +36885 + CImbal Inc. + Yuri Drozd + yuri&cimbal.com +36886 + Oliver Wyman Group + Brendan Welter + brendan.welter&oliverwyman.com +36887 + ArtPlant + Maxim Pushkar + archon&mail.ru +36888 + E-Formation GmbH + Jens Bradler + jbradler&eformation.de +36889 + China Bluedon information security technology co., ltd + ZhiLiang.Shen + shenzhi&163.com +36890 + future Training & Consulting GmbH + Frank Nausedat + frank.nausedat&futuretrainings.com +36891 + Nomadic Technologies Inc. + Myongsu Choe + msc&nomadictechs.com +36892 + Axel Springer SE + Niko Alandt + niko.alandt&axelspringer.de +36893 + Takacom Corporation + Takahiro Yokota + yokota&takacom.co.jp +36894 + Compagnie Européenne des Emballages Robert Schisler + Jean-Emmanuel Hallopé + jean-emmanuel.hallope&ceeschisler.fr +36895 + Guangdong Newstart Technology Service Co.,Ltd + Wangfang + wang.fang&gd-linux.com +36896 + Quality Manager Plus AS + Per Abich + per&qmplus.net +36897 + OEConnection LLC + John Kelly + JOHN.KELLY&OECONNECTION.COM +36898 + Finning (Canada) + Clint Garden + cgarden&finning.ca +36899 + CEVA Sante Animale + Nicolas PRADIER + nicolas.pradier&ceva.com +36900 + Atricore, Inc. + Sebastian Gonzalez Oyuela + sgonzalez&atricore.com +36901 + Art Is Rebellion + Steve Braun + iana-pen&artisrebellion.com +36902 + OneWaiheke + Andrew Watkins + OneWaiheke&gmail.com +36903 + Osyris + Miguel Zuniga + miguelzuniga&gmail.com +36904 + Legion Telekommunikation GmbH + Robert Schlabs + sysadmin&legion.de +36905 + MedCentral Health System + Michael Mistretta + mmistretta&medcentral.org +36906 + Institute for Internet Technologies and Applications + Andreas Steffen + andreas.steffen&hsr.ch +36907 + Luggage Pros + Matt Janssen + matt&luggagepros.com +36908 + Research Industrial Systems Engineering (RISE) Forschungs-, Entwicklungs- und Großprojektberatung GmbH + Andreas Ntaflos + andreas.ntaflos&rise-world.com +36909 + Bison IT Services AG + Jimmy Rüedi + jimmy.rueedi&bison-its.ch +36910 + Pacific Communications + Mark Shannon + ip&pacom.com.au +36911 + EEtrust + Carter Chang + changj&eetrust.com +36912 + Lenglet imprimeurs + Marie SAVARY + msavary&lenglet-imprimeurs.fr +36913 + Asociación Cooperativa Sutronix, R.L. + Luis Urdaneta + luis_urdaneta&interlink.net.ve +36914 + Square, Inc. + Robert Liesenfeld + ops&squareup.com +36915 + ALFATRUST CERTIFICATION S.A. + Calugareanu Ciprian Marius + office&alfasign.ro +36916 + Gemeente Groningen + Henk Klöpping + henk&snow.nl +36917 + Tunz.com + Thierry Coopman + thierry&tunz.com +36918 + Altran + Antero Santos + antero.santos&altran.pt +36919 + Mentana - Claimsoft GmbH + Juergen Ludyga + pen.iana&mentana.de +36920 + Yearsun Industrial Co., Ltd. + Fei Jung Kuo + simon&yearsun.com.tw +36921 + LLC SIBUR + Shubin Aleksey + noc&sibur.ru +36922 + Eurohueco S.A. + Jannigje Herzog + jherzog&arvato-print.es +36923 + schlott gruppe Aktiengesellschaft + Werner Simon + werner.simon&schlottgruppe.de +36924 + Doosan Power Systems Limited + Marcus Wrightson + mwrightson&doosanpowersystems.com +36925 + Heinrich Schmid Systemhaus GmbH & Co. KG + Alexander Lerner + a_lerner&heinrich-schmid.de +36926 + Prime Therapeutics + Ted Blakley + tblakley&primetherapeutics.com +36927 + Innovasic Semiconductor Inc + David Alsup + dalsup&innovasic.com +36928 + Technical University of Lodz Computer Center + Pawel Szychowski + pawel.szychowski&p.lodz.pl +36929 + Vitri Solutions Private Limited + Reesha Padmanabh + reesha.padmanabh&vitrisystems.com +36930 + Zoe Evangelistische Vereinigung + Patrick Zahnd + atreju&gmx.ch +36931 + Aboagye Okyere Emmanuel + Aboagye Okyere Emmanuel + eaboagyeokyere&yahoo.com +36932 + Zetetic LLC + Stephen Lombardo + sjlombardo&zetetic.net +36933 + PXL S.C. ARL + Giovanni Pico + g.pico&pxl.it +36934 + Lemco Electronics + Lempidakis Constantinos + info&lemco.gr +36935 + Kentuckiana Pride Foundation + Aaron J Angel + aaron.angel&kypride.com +36936 + Clinical Software Solutions + Bryan Jones + bryan.jones&clin1.com +36937 + Alphasoft Nederland + Henk Klöpping + henk&fortean.org +36938 + Profweb + Jonathan-Marc Lapointe + jmlapointe&profweb.qc.ca +36939 + Sentinela Security Ltda. + Vinícius da Silveira Serafim + serafim&sentinelasecurity.com.br +36940 + TLO + Jason Rowe + itsecurity&tlo.com +36941 + Solekai Systems Corporation + Kert Jans + kjans&solekai.com +36942 + KAVECO-Workflow + Kees Verduin + kees&kees-verduin.nl +36943 + Venice International University + Alessandro De Rossi + sysadmin&univiu.org +36944 + ANO "TV-Novosti" + Paul Garmashov + pgarmashov&rttv.ru +36945 + Advance Milles System Co.,Ltd. + Andy Huang + andy775211&hotmail.com +36946 + Web Technology Projects ltd. + Ivan Soshnikov + ivan&wtp.su +36947 + Multilab AS + Kai-W. Nessler + kai&multilab.no +36948 + OpenIT GmbH + OpenIT Admins + admins&openit.de +36949 + Inetum + Contact Registrant + registrantcontact&inetum.com +36950 + Dédalo Helicolor S.A. + Raúl Campos Muñoz + rcampos&dedalogrupografico.es +36951 + Southern Illinois Univeristy + Michael A. Shelton + admin&siu.edu +36952 + Kunming Railway Signalling And Telecommunications Department + Zhang Shaowei + z_sw&hotmail.com +36953 + VOX - Norwegian Agency for Lifelong Learning + Rolf Johannessen + rolf.johannessen&vox.no +36954 + 42 Units - IT, Ltd. + Paul Garmashov + adm&42u.it +36955 + ELSIST Srl + Sergio Bertana + sbertana&elsist.it +36956 + SEMATECH + Mark Porter + mark.porter&sematech.org +36957 + libvirt + Daniel Veillard + daniel&veillard.com +36958 + Receptive IT + Alex Ferrara + alex&receptiveit.com.au +36959 + The Cellars Group LLC + Tom Blakely + tfb&thecellarsgroup.com +36960 + TPH Acquisitions LLLP + Thomas Blakely + tblakely&thepartshouse.com +36961 + BOC GmbH + Ronny Sommer + boc&boc-de.com +36962 + Advertiser Technologies + Diogo Carlos Fernandes + diogo&advertiser.com.br +36963 + Communications Supply & Support Limited + Hugh Wills + hugh.wills&comss.co.uk +36964 + Arkodia Technologies, LLC + Pete Beazley + admin&arkodia.com +36965 + CriptoTec + Luis Fernando Pardo + luis.pardo&criptotec.es +36966 + CCDMD + Jonathan-Marc Lapointe + jmlapointe&ccdmd.qc.ca +36967 + FNet + Fabrice Bucher + contact&dafab.ch +36968 + Ocom Communications, Inc. + Yogi Yao + Yogi.Yao&Ocom.com.tw +36969 + NELLS ELECTRONIC SYSTEMS (NES) + Nells K.S + louisnells&gmail.com +36970 + FinanceScout24 GmbH + Elmar Weber + iana-pen&financescout24.de +36971 + Transcend Communication Beijing Co.,Ltd + Nancy Guan + gxy&transcendcom.cn +36972 + Banco Central de Honduras + Edy Javier Milla Reyes + edy.milla&bch.hn +36973 + Phoenix Capital + Andriy Yemets + admin&phnx.com.ua +36974 + Costain Group PLC (formerly 'Simulation Systems Ltd') + Regis Grosclaude-Galland + regis.galland&costain.com +36975 + UrgentCare Works, LLC + J. Pepper Garretson + pgarretson&poseidongroup.com +36976 + Critter bvba + William Leemans + wle&critter.be +36977 + Eloqua Corporation + Dave Kuyper + domain&eloqua.com +36978 + Pico Digital Inc + Cameron Esfahani + cesfahani&picodigital.com +36979 + accumio GmbH + Juergen Schmied + juergen.schmied&accumio.de +36980 + Steinle Solution-Factory GmbH + Jochen Schmidt + jochen&steinle.net +36981 + Kindersprachscreening + Andreas Bodensohn + info&kindervorsorgezentrum.com +36982 + UNESCO-IHE + Roel Noorman + r.noorman&unesco-ihe.org +36983 + Kiwilight.com Limited + Kaiting Chen + kaitocracy&gmail.com +36984 + TAO Beratungs- und Management GmbH + Leonhard Preis + administration&tao.at +36985 + Sewar Technologies + Rayed Alrashed + rrashed&wirefilter.com +36986 + Piratenpartei Oesterreichs + Leonhard Preis + technik&piratenpartei.at +36987 + OnVPS + Tres Wong-Godfrey + tres.wong-godfrey&onvps.com +36988 + host4u.at + Markus Hof + office&host4u.at +36989 + BEIJING XINGHUA BRAINRUST TECHNOLOGY CO.,LTD + Wei Zhang + 15910798928&163.com +36990 + Prinovis Nuernberg GmbH + Roland Peters + netcontrol&prinovis.com +36991 + CASTEL + Ali MAHMOUD + ali.mahmoud&castel.fr +36992 + SystemGroup + Dmitriy Kim + Dmitriy.Kim&systemgroup.com.ua +36993 + Lucid Design Group + Chris Haumesser + sysadmin&luciddg.com +36994 + Fastnet + Olivier Bourgeois + postmaster&fastnet.ch +36995 + akrido.net + Jonathan-Marc Lapointe + jmlapointe&akrido.net +36996 + Fordingbridge Computer Services + Chris Dennis + chris&fbcs.co.uk +36997 + XuZhou Medical College + Li Jian + lijian&xzmc.edu.cn +36998 + MTek Enterprises, LLC + Bradley Morris + hostservant&mtekentp.com +36999 + Instituto de Tecnologia Química e Biológica + Daniel Feliciano Branco + iana-pen&itqb.unl.pt +37000 + Joel Hatcher + Joel Hatcher + jshtux&gmail.com +37001 + MindShift Corp + Andrew K Sharicz + andrew.sharicz&mindshift.com +37002 + Diversified Control, Inc. + Lewis G. James + lewjames&divcon.net +37003 + Vertu + Kevin Quigley + kevin.quigley&vertu.com +37004 + VMTurbo + Shai Benjamin + sheyeh&vmturbo.com +37005 + Medizinische Einrichtungen des Bezirks Oberpfalz GmbH + Michael Raith + info.pki&medbo.de +37006 + Koger, Inc. + Juraj Tomasov + juraj.tomasov&kogerusa.com +37007 + LOGIWAYS + Thierry Lelegard + thierry.lelegard&logiways.com +37008 + Cleopatra + Esteve Blanch Sanmartí + esteve&esteveblanch.cat +37009 + CHIP-N LLC + Roman Eikine + roman&chip-n.ru +37010 + Saguna Networks LTD + Evgeny Mugerman + evgeny&saguna.net +37011 + KFSoftware + Mr Kieran Foot + KieranFootPrivate&hotmail.com +37012 + Bestgo.pl Sp. Z O.O. + Kamil Kurzyk + admin&bestgo.pl +37013 + eoswan + Juan Manuel Martinez + info&eoswan.com +37014 + Hanming Technology Co., Ltd + He Jianhui + flashrainy&163.com +37015 + Harms Consulting Partners Pty Ltd + Josh Levy + support&harmsconsulting.com +37016 + Unicredit S.P.A. + Francesco Oliva + francesco.oliva1&unicreditgroup.eu +37017 + NATION Technologies Group, Inc + Rod Meli + VP.Technology&nationtechnologies.com +37018 + Phabrix + Pete Fraser + pete&phabrix.com +37019 + LANOS Computer GmbH & Cie KG + Hans-Juergen Fockel + hansjuergen.fockel&lanos.de +37020 + Dichotomia + Jerome Herman + jherman&dichotomia.fr +37021 + ScandFibre Logistics AB + Mats Berlin + mats.berlin&scandfibre.se +37022 + Scientific and Production Company Granch Ltd. + Rashid N. Achilov + achilovrn&granch.ru +37023 + KazTransCom JSC + Azamat T. Shymbolat + a.shymbolat&kaztranscom.kz +37024 + edv neubrand + martin neubrand + neu.brand&gmx.de +37025 + Maverick Wireless Solutions Private Limited + Shyamal Kumar + shyamal.kumar&mavericknets.com +37026 + Mylab Oy + Petri Heikkilä + admin&mylab.fi +37027 + rc-tec GmbH + Christoph Reichl + c.reichl&rc-tec.at +37028 + Fernwaerme Ulm GmbH + Werner Straehle + it-service&fernwaerme-ulm.de +37029 + Technology On Demand, Inc. + Ed Mana + emana&technologyondemand.com +37030 + G4S Technology Limited + Kevin Hollingworth + kevin.hollingworth&usa.g4s.com +37031 + Studio MekTek, Inc + Vince McMullin + vincem&mektekdev.com +37032 + RR Donnelley Europe Sp. z o.o. + Krzysztof Bar + krzysztof.bar&rrd.com +37033 + Eden Rock Communications + Beesun Liou + beesun.liou&edenrockcomm.com +37034 + Goatrace Ltd + Keith Young + goatraceltd&gmail.com +37035 + Kitami Institute of Technology + Hiroshi Masui + hgmasui&mail.kitami-it.ac.jp +37036 + Geekerati + Eric Fehr + eric®olith.net +37037 + America First Federal Credit Union + Doug Youngberg + dyoungberg&americafirst.com +37038 + ComplexNet Ltd. + Tamas Gergely Peter + info&complexnet.hu +37039 + EtherWorks Pty. Ltd. + Terry Hoppitt + dnsðerworks.com.au +37040 + GoldCard spol. s r. o. + Antonin Hanacek + hanacek&goldcard.cz +37041 + Thales Alenia Space España + Ana Yun García + ana.yungarcia&thalesaleniaspace.com +37042 + Bee Software Inc. + Jilei Xie + xjl&99jsj.com +37043 + Irkutsk regional clinical hospital + Alexander Puschenko + puschenko_aa&iokb.ru +37044 + DINEC International + Luc Van Walleghem + lvw&dinec.be +37045 + LexisNexis Univentio B.V. + Daniel Knippers + dknippers&univentio.com +37046 + Fjord Technology UG (haftungsbeschränkt) + Martin Hoppe + info&fjord-technology.com +37047 + InterCard Kartensyteme GmbH + Chris Bartl + c.bartl&intercard.org +37048 + Tetra Tech Inc. + Bill Purrier + bill.purrier&tetratech.com +37049 + UFCSPA + Klaus Engelmann + klaus&ufcspa.edu.br +37050 + Techyond Inc. + Frank Lafitte + frank.lafitte&techyond.com +37051 + Oliver Roll + Oliver Roll + mail&oliver-roll.com +37052 + and One Inc. + Makoto Dei + support&andone.co.jp +37053 + MMR Network Management Science & Technology Co., Ltd + HaoQing Shi + mr_shq&163.com +37054 + AirP Systems LLC + Randy Prakken + randy&airpsystems.com +37055 + JM Properties + Jerome Kaidor + jerry&jm-properties.com +37056 + ATM Japan, Ltd. + Ohtsuka, Mitsuo + ohtsukamo&atmj.co.jp +37057 + Quickpay Ltd. (formerly 'MPS Quickpay') + Andrew Kochetkov + a.kochetkov&quickpay.ru +37058 + thockar IT consulting + Thomas Eckardt + Thomas.Eckardt&thockar.com +37059 + Ultra Communication & Integrated Systems (formerly 'Command and Control Systems') + Mike Hall + mike.hall&ultra-sss.com +37060 + Silicon & Software Systems Ltd. + Conor O'Donoghue + conor.odonoghue&s3group.com +37061 + Sargento Foods Inc. + Tim Melis + msadmins&sargento.com +37062 + RigNet, Inc. + Mordecai Black + cai.black&rig.net +37063 + SARL AEGLE INFORMATIQUE MEDICALE + Philippe Brun + philippe.brun&aegle.fr +37064 + The University of Kitakyushu + Teruaki Egami + josen-p&kitakyu-u.ac.jp +37065 + COTEK ELECTRONIC IND.CO.,LTD + Donald Yang + donald&cotek.com.tw +37066 + OE INVESTMENTS LIMITED + Vladimir Kozhukalov + v.kozhukalov&qiwi.ru +37067 + Centrum Rozwoju Szkół Wyższych TEB Akademia sp. z o.o. + Szymon Winiarz + s.winiarz&teb-akademia.pl +37068 + Arizona Coppersoft + Glen Hein + ghein&azcoppersoft.com +37069 + Broadband Antenna Tracking Systems + Tim Foster + tfoster&batswireless.com +37070 + RadioOpt GmbH + Ernesto Zimmermann + ernesto.zimmermann&radioopt.com +37071 + Sofistone Oy + Arttu Laine + sofistone.oid&sofistone.com +37072 + Lantech Communications Global, Inc. + Stone Hsu + stone&lantechcom.tw +37073 + BEIJING JETSEN TECHNOLOGY CO.,LTD + wang zhen shan + wangzhenshan&jetsen.cn +37074 + Assam Electronics Development Corporation Limited (AMTRON) + Mahendra Kumar Yadava + md&amtron.in +37075 + rootsys + Riccardo Bassi + admin&rootsys.it +37076 + IPETRIK + Ing. Ivan Petrik + ivan.petrik&ipetrik.cz +37077 + Albuquerque Public Schools + Davis Lee + lee_d&aps.edu +37078 + Nor1, Inc. + Derrald Vogt + derrald.vogt&nor1.com +37079 + Wofford College + J. Bart Casey + caseyjb&wofford.edu +37080 + 3VR Security, Inc. + Marcus Marinelli + marcus&3vr.com +37081 + Wicked Trickster LLC + Jessie Rourke + iana-admin&wickedtrickster.com +37082 + Ministry of Finance + Keyura Chainapong, Prinya Pridiyanon + kyura898&mof.go.th +37083 + Blok Software Development (BlokSoft) + Tsadok Moshe Blok + m3rlin.liza&gmail.com +37084 + 9h37 + Florian Le Goff + penadmin&9h37.fr +37085 + KDS + Sébastien Kremer + skremer&kds.com +37086 + Elemental Technologies, Inc. + Greg Truax + gregt&elementaltechnologies.com +37087 + Holy Cross Energy + Tony May + itdepartment&holycross.com +37088 + Teamsourcing Cia. Ltda. + Javier Valencia + jvalencia&teamsourcing.com.ec +37089 + Mediengruppe Pressedruck Dienstleistungs-GmbH & Co. OHG + Michael Gruenzweig + michael.gruenzweig&presse-druck.de +37090 + OUVREZLABOITE + YVES MARTIN + yves.martin&ouvrezlaboite.com +37091 + Orion Virtualisation Solutions Pty Limited + Sheng Yeo + sheng.yeo&orionvm.com.au +37092 + CJ Computers + Chris Barron + webmaster&cjcomputers.dyndns-server.com +37093 + Consolidated Edison Company of New York Inc + John Lim + limj&coned.com +37094 + MadPilot Productions + Myles Eftos + myles&madpilot.com.au +37095 + GH-Informatik GmbH. + Helmut Ghirardini + helmut.ghirardini&ghi.co.at +37096 + Dongnian Networks Inc + Dan Yao + dan&dongniannetworks.com +37097 + UkSATSE + Ruslan Fesenko + rfesenko&uksatse.org.ua +37098 + Ingenieursbureau Ebatech BV + Dennis Koot + d.koot&ebatech.nl +37099 + Itautec S.A. + Fausto Murari + fausto.murari&itautec.com +37100 + Endai Worldwide + Lukasz Ochoda + lochoda&endai.com +37101 + Quelltext AG + Arndt Schönewald + pen-37101&iana20180219.mc.schoenewald.de +37102 + Wind mobile + Henry Zheng + hzheng&windmobile.ca +37103 + Infinetix Corp. + Ken Farr + kfarr&infinetix.com +37104 + Pebble Beach System Ltd + Gavin Smith + gavin.smith&pebble.tv +37105 + Secure Systems and Technologies Ltd + Mr Paul Massey + paul.massey&sst.ws +37106 + RazorThreat, Inc. + Vernon Smith + vsmith&razorthreat.com +37107 + Manitoba Telecom Services + Michael Jamieson + michael.jamieson&mtsallstream.com +37108 + DonbassInformCommunicationService + Dmitriy Krivoruchko + dk&dics.com.ua +37109 + SMS Data Products Group, Inc. + David Ishmael + dishmael&sms.com +37110 + Altera Corporation + Mukul Kumar + mkumar&altera.com +37111 + Institute for Clinical and Experimental Medicine + Jiri Amler + jiri&amler.cz +37112 + Infosol d.o.o. + Damir Jurica + iana&info-sol.net +37113 + Chita State University + Evgeny Dranichnikov + dranichnikov&chitgu.ru +37114 + Christian Aid + Duncan Drury + iops&christian-aid.org +37115 + VIVA SOFT LTD + Alexander Alexandrov + alexandrov&vivasecurity.com +37116 + Instituto Nacional de Seguros + Luis Cárdenas + lcardenas&ins-cr.com +37117 + Industrial Thinking Ltd + Bill Graham + oid&industrialthinking.com +37118 + Panoramic Data + David Bond + david.bond&panoramicdata.com +37119 + Object ECM AG + Björn Abt + bjoern.abt&object.ch +37120 + Velocent Systems Inc. + Randy Johnson + rwj&velocent.com +37121 + Eiritu Equipment Technology Co.,Ltd + Hiroshi Tarumi + tarumi&eiritu.co.jp +37122 + EVOTRIX SYSTEMS + PRITAM PAUL + pritam&evotrix.com +37123 + BYTIS Ltd. + Smaliak Siarhei + cc&bytis.by +37124 + beatandmusic media GmbH + Bastian Menningen + admin&beatandmusic.com +37125 + Omega Cube + Guillaume Seigneuret + gs&omegacube.fr +37126 + syn2cat a.s.b.l. + Georges Toth + iana&hackerspace.lu +37127 + SuperLumin Networks + Douglas Merrill + doug.merrill&superlumin.com +37128 + bo.lt + Ben Smith + it&boltnet.com +37129 + Project 76 + Jamie Stallwood + snmp&project76.net +37130 + Miroslav Mrakota + Miroslav Mrakota + mirek&mrakota.cz +37131 + Unioncast Network Technology Co.,LTD,BeiJing + Jiang Fei + fei.jiang&unioncast.net +37132 + Friedrich Kuhnt GmbH + Wilfried Röben + produktion&kuhnt.de +37133 + Reverse-Logic Ltd. + Zoltan Riba + riba.zoltan&gmail.com +37134 + MackTECH GmbH + Joachim Mack + support&macktech.de +37135 + oscardijkhoff.nl + Oscar Dijkhoff + admin&oscardijkhoff.nl +37136 + Crawford and Company + Winfield Terry + win_terry&us.crawco.com +37137 + Reilly Telecom Inc. + James Reilly + jreilly&reillytele.com +37138 + tibra + omar kheir + omar.kheir&tibra.com +37139 + Deep-Secure Ltd + Simon Wiseman + Simon.Wiseman&Deep-Secure.com +37140 + Guavus Inc + Bijay Singh Bisht + bijay.singh&guavus.com +37141 + MUWA-Spelle + Mischa Schiffelbein + info&muwa-spelle.com +37142 + Actua + Helge Jensen + hej&actua.dk +37143 + in-put GbR - Das Linux-Systemhaus + Mr. Stefan-Michael Guenther + s.guenther&in-put.de +37144 + Viktor Gruber + Viktor Gruber + info&gv-its.de +37145 + MedicVision + Shai Attia + shai&medicvision.com +37146 + Unitrin Services Group + Dev Davuluri + distsys&unitrin.com +37147 + Alveole Studio + RIBEYRON Sylvain + contact&alveole-studio.com +37148 + burak özen + burak özen + burakozen2004&gmail.com +37149 + OnLineDB ltd + Arthur Sokolov + Arthur.Sokolov&OnLineDB.ru +37150 + Surtronic International B.V. + John Vermaas + jvermaas&surtronic.nl +37151 + Cross Domain Technical Forum + Boyd Fletcher + cds_snmp&nsa.gov +37152 + France Net Com + Raouf Habib + raouf.habib&francenetcom.com +37153 + srnjak.com + Grega Krajnc + grega.krajnc&srnjak.com +37154 + Junction Software Ltd + Greg Hughes + greg&junctionsoftware.co.uk +37155 + ByteActive AB + Anders Tormod + anders.tormod&byteactive.com +37156 + Hangzhou Lancable Technology Co.,ltd + Jimmy Sun + jimmy&blue-link.cn +37157 + NetentSec Inc. + ZhongYuan Guo + zhongyuan_guo&netentsec.com +37158 + cynix.org + Brian Chu + cynix&cynix.org +37159 + SCK-CEN + Luc Vandenbroucke + luc.vandenbroucke&sckcen.be +37160 + Continium Ltd. + Valeria Derevenskaya + valeri&continium.biz +37161 + Mobis Ltd + George Shegunov + mobis&netplus.bg +37162 + Softperience + Radosław Smogura + rsmogura&softperience.eu +37163 + IPS - International Power Supply + Alexander Rangelov + ar&ips-group.net +37164 + IMage INTelligence (IMINT) + Rodolphe Saunier + rodolphe.saunier&free.fr +37165 + Kivanc Oskay + Kivanc Oskay + kivanc&oskay.web.tr +37166 + Seillac Co., Ltd. + Ichiro Sakada + ichiros&seillac.com +37167 + Infrafuture Inc. + Prakash Patil + prakashp&infrafuture.com +37168 + CorpGenie + Aashish Jha + jha.aashi&gmail.com +37169 + Xeirius Solutions + Prashant Singh Ahluwalia + prashant&xeirius.com +37170 + SoftwareMind + Dariusz Zbik + d.zbik&softwaremind.pl +37171 + Dipl.-Ing. (BA) Thomas Knoblauch + Thomas Knoblauch + iana-pen&netz-en.de +37172 + Wired Informática e Sistemas Lda + José Filipe Mendes Rodrigues + jrodrigues&wired-tech.com +37173 + Kluwer + Arno Broekhof + abroekhof&kluwer.nl +37174 + STEP Networks Inc + Paul Woolcox + pwoolcox&stepnetworks.com +37175 + NovoLink Communications + Fernando Lombardo + flombardo&novolink.com +37176 + Integrated Products Inc + Ray Rutledge + rayr&integratedproductsinc.com +37177 + Southern Oregon University + Chris Corcoran + netadmin&sou.edu +37178 + Alquist Consulting Ltd + Andrew Jones + andrew.jones&alquist.co.uk +37179 + Ab Ovo Nederland BV + Lex Brinkhuijsen + pensnmp&ab-ovo.com +37180 + Centric Corporation + Peter House + peter.house¢ricsystems.com +37181 + IP Technology Labs + Paul Cutler + paul&iptechnologylabs.com +37182 + Viewteq Corp. + Richard Scully + richards&viewteq.com +37183 + Cipherdyne + Damien Stuart + dstuart&cipherdyne.org +37184 + Aptus Interactive Ltd + William Lewis + william&aptusinteractive.com +37185 + AMERSPORTS + Pierre GAILLY + iana-contact&amersports.com +37186 + Perfecto Mobile + Yoram Mizrachi + yoramm&perfectomobile.com +37187 + Premier Manufacturing Support Services Poland Sp. z o.o. + Tomasz Gwozdz + tomasz.gwozdz&premiermss.com.pl +37188 + eTellicom Pty Ltd + Raul Fragoso + raul&etellicom.com +37189 + Audit Grup Ltd. + Vladimir V. Losev + losev&auditgr.ru +37190 + Tecisa74 S.L. + Ignacio Cortes Soro + icortes&tecisa74.com +37191 + Androscoggin Valley Hospital + Ryan J. Taylor + ryan.taylor&avhnh.org +37192 + INSTITUTO FEDERAL DO PARÁ - IFPA + RICARDO JOSÉ CABEÇA DE SOUZA + ricardo.souza&ifpa.edu.br +37193 + REACH Global Services + Matthew W D ROBERTSON + matthew.robertson&reach.com +37194 + Gaba Corporation + Patrick Hoferer + phoferer&gaba.co.jp +37195 + connect co.Ltd + Shouichi Maruyama + s.maruyama&connect-net.co.jp +37196 + Tieline Technology + Anthony Sizer + sizer&tieline.com +37197 + Oberfinanzdirektion Niedersachsen + Ralf Möhse + ralf.moehse&ofd-z.niedersachsen.de +37198 + SoftAgency Co., Ltd. + Satoshi Tatsuoka + satoshi&softagency.co.jp +37199 + Zentrum fuer Informationstechnologie kreuznacher diakonie + Klaus Kruse + zitinfotec&kreuznacherdiakonie.de +37200 + S&L ITcompliance GmbH + Götz Schmitt + info&sul-itcompliance.de +37201 + S&L Netzwerktechnik GmbH + Götz Schmitt + info&sul.de +37202 + United Network for Organ Sharing + Blaine Hess + hessbt&unos.org +37203 + Global Telecom Tools, LLC + Chris Sibley + chris.sibley&globaltelecomtools.com +37204 + Maxxess Systems, Inc. + Ryan Haney + PEN&maxxess-systems.com +37205 + SAMPENSION KP LIVSFORSIKRING A/S + Henrik Ellesø + Servicedesk&sampension.dk +37206 + FEMSWISS AG + Daniel Keller + keller&femswiss.com +37207 + Astrium Services - GEO-Information division + Noel Hardy + noel.hardy&infoterra-global.com +37208 + Embedded Planet + Tim Van de Walle + vandewallet&embeddedplanet.com +37209 + San Francisco Department of Public Health + Winona Mindolovich + winona.mindolovich&sfdph.org +37210 + The Monkeysphere Project + Daniel Kahn Gillmor + dkg&fifthhorseman.net +37211 + Fontana Unified School District + David Araujo + DavidA&fusd.net +37212 + Yamaha motor solutions xiamen + chen hongbin + chen_hongbin&ymsl.com.cn +37213 + RS Automation Co.,Ltd. + Brian Chun + bhchun&rsautomation.co.kr +37214 + Aeromaritime Systembau GmbH + Michael Simon + noc&aeromaritime.de +37215 + MicroWorld Technologies Inc. + Mr. Govind Rammurthy + govind&escanav.com +37216 + Clickstream Technologies Plc + Jon Baldachin + ops&clickstream.com +37217 + Entanet International Ltd. + Jake Smith + jake&enta.net +37218 + SZM.com s.r.o. + Milan Igrini + szm&szm.com +37219 + Wandertec + Frank Koenen + frank&bikeshophub.com +37220 + cPacket Networks Inc. + Hari Miriyala + hari&cpacket.com +37221 + TerraVision Pty Ltd + Keven Weber + keven&sat.com.au +37222 + ALLCLEAR + John Lin + pen&allclear.cc +37223 + Integra-S + Astafiev Aleksey + astafiev&integra-s.com +37224 + international west trading ltd + hakan akgun + hakan_ak&hotmail.co.uk +37225 + eParadeigma, LLC + Matt Johnson + mattj83&gmail.com +37226 + Polizei Saarland + Ralf Stoll + lka-saarland-iana&polizei.slpol.de +37227 + INC Technologies + Karim E.E. Sabaa + ksabaa&inc.com.kw +37228 + HandcraftIT + Jimmy Myerscough + dns&handcraftit.biz +37229 + Applied-Intelligence GmbH + Andreas Hecker + andreas.hecker&applied-intelligence.de +37230 + Modulo Consulting + Ion Nistor + ion.nistor&modulo.ro +37231 + University of Plovdiv + Atanas Terziyski + atanas&uni-plovdiv.bg +37232 + GTS-Thaumat XXI, S.A. + Alfredo Garcia + thaumat&thaumat.com +37233 + Void Sistemas S.L + Ariel Pedrera Valdes + apedrera&void.es +37234 + Xuzhou Normal University + WeiZhang + wzhang&xznu.edu.cn +37235 + Gen Energija d.o.o. + Robert Pozun + robert.pozun&gen-energija.si +37236 + Stantech Communications Solutions, Inc. + Barry Stanton + bastanto&hotmail.com +37237 + Metabolon + Hongping Dai + hdai&metabolon.com +37238 + Bateau Limited + Graham Horne + hostmaster&bateau.co.uk +37239 + MKM Bilgisayar Danismanlik ve Egitim Hizmetleri San. ve Tic. Ltd. Sti. + Murad BASDAG + basdagm&mkm.com.tr +37240 + J. Riesmeier + Joerg Riesmeier + iana-pen&riesmeier.de +37241 + Grupo Soares da Costa SA + Américo Nascimento + americo.nascimento&soaresdacosta.pt +37242 + Zerolag Communications + Greg Strelzoff + noc&zerolag.com +37243 + Everyone Counts, Inc. + Ryan Nideffer + ryan.nideffer&everyonecounts.com +37244 + ZigBee Alliance, Inc. + Bill Chase + bchase&inventures.com +37245 + Fusion Crypto + Melvin Jeddeloh + melvin.jeddeloh&fusioncrypto.com +37246 + Zerfowski + Olaf Zerfowski + iana.org&zerfowski.de +37247 + Thales Solutions Asia + Christopher Ong + chris.ong&asia.thalesgroup.com +37248 + JoongboTech Co, LTD. + Jaeho, Min + jhmin&joongbotech.com +37249 + Amicon Ltd + Alexey Novotorzhin + novotorjin&amicon.ru +37250 + Grid2Home + Don Sturek + dsturek101&yahoo.com +37251 + eVent Medical Inc + Johnny Chan + j.chan&event-medical.com +37252 + Image Video + Bob Crowder + bcrowder&imagevideo.com +37253 + Energy Aware Technology Inc. + Colby Gore + colby.gore&energy-aware.com +37254 + Intronis, Inc. + Steven Frank + sfrank&intronis.com +37255 + Mirada + Kelly Bergougnoux + kelly&mirada.com +37256 + Mendeley Ltd + Robin Stephenson + sysadmin&mendeley.com +37257 + die | brueckenschlaeger + Niklas E. Cathor + niklas&brueckenschlaeger.de +37258 + Telekomunikacije RS AD Banja Luka + Mirko Obradovic + mirko.obradovic&mtel.ba +37259 + Netic A/S + Thomas Rasmussen + info&netic.dk +37260 + XIAN ZIKING NETWORK COMMUNICATIONS CO.,LTD. + Xiu Yu Yin + xyyin&ziking.net +37261 + University of Shkodra + Sokol Lahi + sokol_lahi&yahoo.com +37262 + Sabrefin Limited + Mark Brown + mark.brown&sabrefin.co.uk +37263 + Catholic Education Office, Lismore + Michael Kemsley + support&lism.catholic.edu.au +37264 + Tularosa Communications + Jerimiah Cole + jcole&tularosa.net +37265 + Delovoy Partner Ltd. + Alexander Shevtsov + shevtsov&dpartners.ru +37266 + Schindler Elevator Ltd + Adrian Buenter + adrian.buenter&ch.schindler.com +37267 + Tecnobit + Jesus Megia + jesus.megia&tecnobit.es +37268 + Gira Giersiepen GmbH & Co. KG + Stefan Pees + registration&gira.de +37269 + Rosa-Luxemburg-Stiftung Gesellschaftsanalyse und politische Bildung e. V. + Olaf Barz + support&rosalux.de +37270 + Farell Instruments SL + Jose Luis Prieto Saens + pen&farell-i.com +37271 + COESYS s.r.l. + Leonardo Palazzolo + iana-pen&coesys.it +37272 + Westvik + Dane Westvik + danew&nosheds.com +37273 + Acision Ltd + Paul Pankhurst + paul.pankhurst&acision.com +37274 + Infratel, Inc. + Artem Bozhenov + a.bozhenov&infratel.com +37275 + Skyfire Labs, Inc. + John Metzger + jmetzger&skyfire.com +37276 + System of Modular Technologies - ICC + Mikhail Maysuradze + adm&smticc.ru +37277 + Universidad TecMilenio + Juan Jesus Salazar + jjsalazar&tecmilenio.mx +37278 + Hanselmann Consulting GmbH + Thomas Hanselmann + thomas&hanselmann.net +37279 + eco-ged + Christophe Feau + christophe.feau&eco-ged.fr +37280 + jTendo Sp. z o.o. + Andrzej Trawinski + andrzej.trawinski&jtendo.com +37281 + Signatec ltd. + Anton Charintsev + anton&signatec.ru +37282 + Barloworld Power + Ryan Sprong + rsprong&barloworldpower.com +37283 + SPF FInances + Claudio Rudi + ict.operations.unixservers&minfin.fed.be +37284 + Energy Sector Security Consortium, Inc. + Justin Clarke + justin&energysec.org +37285 + NationalField + Thomas Christ + tchrist&nationalfield.org +37286 + Congressional Budget Office + Kristen Skinner + Kristen.Skinner&cbo.gov +37287 + christopher d del riesgo + christopher d del riesgo + christopher&delriesgo.com +37288 + AXGATE CO., LTD (formerly 'AXAN Networks Co.,LTD.') + Jack H. Lee + pen-manager&axgate.com +37289 + Inco-Service Ltd. + Kozak Ivan + kozakman&gmail.com +37290 + Gridmerge Limited + Robert Cragie + robert.cragie&gridmerge.com +37291 + BRITZE Elektronik und Geraetebau GmbH + David Kirchner + david.kirchner&geyer-gruppe.de +37292 + Arcanum Communications Ltd + Richard Frith-Macdonald + richard&arcanumcommunications.com +37293 + Rusteletech Ltd. + Vladimir Lee + lee&rusteletech.ru +37294 + netzquadrat GmbH + Matthias Witte + noc&netzquadrat.de +37295 + Stadt Muenster - citeq + Stefan Schoenfelder + oidadmin&citeq.de +37296 + nanoLogika GmbH + Bastian Menningen + admin&nanologika.de +37297 + LiteCore Networks India Pvt Ltd + Rathnakumar Kayyar + rkayyar&litecore.net +37298 + PAGANI INFORMATIQUE sarl + Sébastien PAGANI + paganisebastien1&free.fr +37299 + Quanxi Technologies, LLC + J. Ryan Porter + rporter&acenetconsulting.com +37300 + Neoware s.r.o. + Kamil Kantar + kamil.kantar&neoware.cz +37301 + metaVentis GmbH + Matthias Hupfer + hupfer&metaventis.com +37302 + Daniel Adam + Daniel Adam + iana&fad-net.de +37303 + Ignesco Software + Charlotte Harrison + iana-ldap&ignesco.co.uk +37304 + Stonegate Instruments Ltd + Shaun Evers + pdev&stonegate-instruments.co.uk +37305 + Beijing Utility Software Core Technology Co., Ltd + Shi Cancan + cancan.shi&gmail.com +37306 + Hintz + Thomas Hintz + thomas&familie-hintz.de +37307 + Vodéa + Nicolas Mercadier + nicolas.mercadier&vodea.com +37308 + GSMK mbH + Robert Blauenhausen + infra-support&gsmk.de +37309 + PÉTÁV Pécsi Távfűtő Kft. + Kutfej Béla + kutfej.bela&gmail.com +37310 + NetAcademia Kft. + Agoston Deim + deim.agoston&netacademia.net +37311 + Leibniz-Gymnasium Remscheid (formerly 'N@team Leibniz-Gymnasium Remscheid') + Klaus Rörig + netzwerk&leibniz-remscheid.de +37312 + ULC Systems Inc. + Seong-Heon Jeong + linebacker&ulcsystems.com +37313 + peers@play + Arno Wacker + arno.wacker&uni-due.de +37314 + 40 North LLC + Basit Mustafa + basit&40northllc.com +37315 + Greenshift + Alex Sanderson + pen.iana.org&greenshiftnetworks.com +37316 + Yuvad Technologies + Vivian Song + cpsong&yuvad.com +37317 + Ruby Rocket (Chengdu) Consulting Co., Ltd + David Wilkinson + dw&rubyrocketlink.com +37318 + Datacenter Services, Inc. + Robert Hermanns + robert.hermanns&d8acenter.com +37319 + GERNY + Stefan Gerny + mail&gerny.de +37320 + Inneasoft + Sebastien Cand + sebastien.cand&inneasoft.com +37321 + Really Helsinki Oy + Jari Partti + jari.partti&really.fi +37322 + ERNW GmbH + Rene Graf + rgraf&ernw.de +37323 + Yasashi Matsuo + Yasashi Matsuo + matsuo.yasashi&cocoa.plala.or.jp +37324 + Albany Medical Center + Joseph Hoffman + hoffmaj&mail.amc.edu +37325 + York College of Pennsylvania + Jason Kopp + dsadmin&ycp.edu +37326 + Datasat Digital Entertainment + David Eyre + david.eyre&datasatdigital.com +37327 + Student Satellite Initiative Munich SSIMUC e.V. + Martin Luelf + admin&ssimuc.de +37328 + Peninsula Regional Medical Center + Brad Taylor + brad.taylor&peninsula.org +37329 + Community Care Physicians, P.C. + Patrick Knapp + pknapp&communitycare.com +37330 + OpenMarket + Andrew Kerr + andrew.kerr&openmarket.com +37331 + Frey Textilreinigung GmbH + Albert Frey + EDV&frey-textilreinigung.de +37332 + CPS Power Systems GmbH + Jörg Becker + j.becker&cpspower.de +37333 + CADENAS GmbH + Klaus Gruber + k.gruber&cadenas.de +37334 + Norsk Hydro ASA + Nils Elverhaug + nils.jorgen.elverhaug&hydro.com +37335 + DimpleDough + Bob Kwiencien + bkwiencien&dimpledough.com +37336 + Identity Solutions Pty Ltd + Richard Begg + richard.begg&identity-solutions.com.au +37337 + RandomStorm Ltd + Andrew Gilhooley + andrew.gilhooley&randomstorm.com +37338 + C3 + Denise Stockman + denise.stockman&c3-carbon.com +37339 + Open IPTV Forum, e.V. + Nilo Mitra + nilo.mitra&ericsson.com +37340 + Teclo Networks + Ties Stuij + ties&teclo.net +37341 + Plexxi, Inc. + Ephraim Dobbins + ephraim.dobbins&plexxi.com +37342 + uCIRRUS Corp + Dave Cracknell + Dave.Cracknell&ucirrus.com +37343 + InterScot Network + Chris Maxwell + cmaxwell&interscot.net +37344 + Express TeleCom JSC + Andrey A. Konovalov + noc&extel.ru +37345 + it suits IT + Erwin van Dijk + erwin.vandijk&itsuitsit.com +37346 + Medicity + Ashish V. Shah + avshah&medicity.com +37347 + IT-Services der Oesterreichischen Sozialversicherungs GmbH + Ronald Holper + ronald.holper&itsv.at +37348 + Compact Software International SA + Antonio J. García Lagar + ajgarcia&c17.net +37349 + Family Trach + Jens Trach + jens.trach&k-67.de +37350 + SkyOnline Argentina + Daniel H. Perez + dperez&skyonline.net.ar +37351 + Agile Europe s. r. o. + Vladimir Sys + vladimir.sys&agile-europe.com +37352 + Ministerio de Relaciones Exteriores, Comercio Internacional y Culto + Diego Woitasen + dxw&mrecic.gov.ar +37353 + Euroports + Etienne Leneveu + etienne.leneveu&euroports.com +37354 + Arawat Inc + Rohit Joshi + msg&arawat.com +37355 + Norris Communications + Sam Norris + SAM&MrNorris.com +37356 + Deutschmann Automation GmbH & Co. KG + Dominik Litzinger + IANA_PEN&deutschmann.de +37357 + Auderis + Boleslav Bobcik + bbobcik&gmail.com +37358 + Regionalmedien Austria AG + Kurt Stieger + it-services®ionalmedien.at +37359 + PaperlinX Europe + Chris Wynia + chris.wynia&paperlinxeu.com +37360 + Hypace Technology Co.,Ltd + Grant Lee + grantlee&hypace.com +37361 + EquiVox Inc. + Adeel Nazir + adeel.nazir&equivox.net +37362 + Center Information Technologies of Amur Region + Oleg Yakovlev + ca&amur-cit.ru +37363 + Viishanke Oy + Mika Mähönen + mika.mahonen&viishanke.fi +37364 + Likewise Software + Glenn Curtis + glennc&likewise.com +37365 + AeCS Aeroclub Saar e.V. + Christian Zeitz + christian.zeitz&aeroclub-saar.de +37366 + Network Application Engineering Laboratories LTD. + Minoru Okazaki + okazaki-rd&nalab.jp +37367 + UNIVISION SRL + Giacomo Colombo + giacomo.colombo&univision.it +37368 + AND Technology Research Ltd. + Steven Kear + steven.kear&andtr.com +37369 + CyberCité + DUVERGIER Claude + iana.pen-net&cybercite.fr +37370 + Mark White + Mark White + scoooby&campus.ie +37371 + Idibri + Russell Reid + rreid&idibri.com +37372 + University of San Diego + Amol Athawale + identitysupport&sandiego.edu +37373 + MitraStar Technology Corporation + Becker Lu + Becker.Lu&mitrastar.com.tw +37374 + Brainstorm Mobile Solutions Ltd + Richard Frith-Macdonald + richard.fm&brainstorm.co.uk +37375 + DATA 5 Ingenieurbuero fuer Datentechnik GmbH + Karl Zuern + Karl.Zuern&DATA5.de +37376 + Gerhard Messer + Gerhard Messer + register&gerhardmesser.com +37377 + Chaos Creations + Søren P. Skou + sps&mud.dk +37378 + Dansk Kabel TV + Søren P. Skou + sps&danskkabeltv.dk +37379 + Charles Schwab & Co., Inc. + Darren Mar-Elia + darren.marelia&schwab.com +37380 + Computer Output Print & Internet (COPI) + Andy Plata + ap&888999COPI.com +37381 + Balidev.com + Yogi Triana + yogi.triana&gmail.com +37382 + Danateq PTE Ltd + Ian Langman + ian.langman&danateq.com +37383 + Infocom Network Limited + Rajesh Kumar Mallah, CTO + mallah&tradeindia.com +37384 + Onze Lieve Vrouw Ziekenhuis Aalst + Anthony Van der Vennet + anthony.van.der.vennet&econocom.be +37385 + Centrum Informatyki ZETO S.A. w Białymstoku + Waldemar Buraczewski + Waldemar.Buraczewski&zeto.bialystok.pl +37386 + CEGES/SOMA + Steven Langenaken + steven.langenaken&cegesoma.be +37387 + NAVUM GmbH + Alexander Schneider + Alexander.Schneider&navum.de +37388 + Lietuvos Respublikos Seimo kanceliarija + Rimas Paliusis + ripali&lrs.lt +37389 + BYO NETWORKS + Olivier Tirat + olivier.tirat&byo-networks.com +37390 + Adyton Systems AG + Franco Fichtner + franco.fichtner&adytonsystems.com +37391 + Ministry of health Republic of Macedonia + Zaklina Cagoroska + zaklina&izismk.org +37392 + PROBE Inc. + Dong Hyun Kim + dhkim&probedigital.com +37393 + Tazarv Afzar Co. + Jalal Abedinejad + abedi&tazarv.com +37394 + Farsinet + Mohsen Saeedi + iana&farsinetco.com +37395 + kaspian sanat co. + aflatounian ali + aflatounianali&live.com +37396 + Usługi Informatyczne ITS Mariusz Kryński + Mariusz Kryński + mrk&sed.pl +37397 + MMB Research Inc. + Mark Borins + domain.admin&mmbresearch.com +37398 + ThreatMetrix + Joel Heenan + operations&threatmetrix.com +37399 + Oceans Edge Inc + Adam McKay + adam.mckay&oceansedgeinc.com +37400 + Topsoft ZRt + Ferenc Lendvai + lendvai&topsoft.hu +37401 + FancyFon Software + Konrad Kehrer + konrad.kehrer&fancyfon.com +37402 + neverpanic.de + Clemens Lang + clemens&neverpanic.de +37403 + Marmiko IT-Solutions GmbH + Th. Meier-Brook + penmaster&marmiko.de +37404 + Rischioto Corp + Carlos L L Rischioto + carlos&rischioto.com.br +37405 + HeBei FarEast Harries comunication company + liumengyang + liumengyang.dxx&163.com +37406 + AHASWARE s.r.o. + David Misek + info&ahasware.cz +37407 + Comcraft + Laurent Schirck + laurent.schirck&comcraftfr.com +37408 + Callis Technologies + Sebastian Sensabastiano + ssensabastiano&callistech.com +37409 + Home Credit International, a. s. + Luděk Finstrle + app&homecredit.net +37410 + Opus VFX + Darren Hildebrand + darren.hildebrand&opusvfx.com +37411 + L-S-B Broadcast Technologies GmbH + Wilfried Luff + wlu&l-s-b.de +37412 + Cable Manufacturing Business, Inc + Chris Badinelli + cbadinelli&cablemanufacturing.com +37413 + Smile SA + Nicolas BOUTHORS + nicolas.bouthors&smile.fr +37414 + Tipjar L.L.C. + David Nicol + david&tipjar.com +37415 + International Radio and Electronics Corp. + Don Pettifor + dpettifor&irec1.com +37416 + Rivet Software + Jim Cleppe + jim.cleppe&rivetsoftware.com +37417 + EFM s.r.l. + Grimaldi Marco + grimaldi.marco&efmnet.it +37418 + Mobile People + Lars Bo Svenningsen + lbo&mobilePeople.com +37419 + Bonprix sp. z.o.o + Marek Zaradzki + mzaradzki&bonprix.pl +37420 + Virtual Bit di Lucio Crusca + Lucio Crusca + info&virtual-bit.com +37421 + Factum IT BV + Damian Myles + info&efactum.net +37422 + Start Norge + Audun Bjørkøy + webint&startntnu.no +37423 + Unassigned + Returned 2019-02-01 + ---none--- +37424 + MDL29 + Arnaud Yonnet + arnaud.yonnet&mdl29.net +37425 + Ericsson-LG Enterprise Co., Ltd. (formerly "LG-Ericsson Co., Ltd.") + Andrew Kihyun Kim + andrew.kihyun.kim&ericssonlg.com +37426 + Travelzen Group Ltd. + Erich Cheung + erich.cheung&travelzen.com +37427 + ENTE + Adam Szastok + a.szastok&ente.com.pl +37428 + Bürkert Werke GmbH & Co. KG + Dr. Udo Gais + licences.eu&burkert.com +37429 + Grand Canyon University + Gary Crites + dl-itvendorsupport&gcu.edu +37430 + Electronic Power And Market Sp. z o.o. + Lukasz Panasiuk + lukasz.panasiuk&epm.com.pl +37431 + Montalbano Technology SPA + Luca Adamo + ladamo&montalbanotechnology.com +37432 + Carbon Mountain LLC + John Buswell + engineering&carbonmountain.com +37433 + Custom Radio Network, Inc. + Henry Mecke + hal&customrn.com +37434 + ZenRobotics Ltd. + Paul Tötterman + admin&zenrobotics.com +37435 + Gensler IT-Support + Christoph Gensler + info&gensler.to +37436 + Maritime Broadband Inc. + Mary Ellen Kramer + mkramer&maritimebroadband.com +37437 + Softec Integrations AG + Stefan Huwiler + stefan.huwiler&softec.ch +37438 + networks direkt Gesellschaft für Informationstechnologie mbH + Nils Böckmann + nils.boeckmann&direkt-gruppe.de +37439 + Computer Network Limited + Jamie Reddell + jamie.reddell&cnlsoftware.com +37440 + Digitalwatt S.r.L + Roberto Quadrini + roberto.quadrini&digitalwatt.it +37441 + OCI Nitrogen + Frans Ortmans + frans.ortmans&ocinitrogen.com +37442 + ANFAC Autoridad de Certificación Ecuador C.A. + Florencio Diaz + fdiaz&anf.es +37443 + Centina Systems + Paul Pantages + pdp¢inasystems.com +37444 + Digital Products Limited + Julien Richard + netadmin&dplcore.com +37445 + Atlantic Intertrade Ltd. + William Robert Caron + w.r.c&atlanticit.com +37446 + Andrey Klyachkin + Andrey Klyachkin + aklyachkin&gmail.com +37447 + Nimble Storage + Hector Yuen + hector&nimblestorage.com +37448 + ENBLINK + Gwak, Beom Seok + bsgwak&enblink.com +37449 + Opzoon Technology Co., Ltd. + Keane Xi + xizhuke&gmail.com +37450 + Sodrugestvo Group of Companies + Dmitriy Ilyin + it&sodru.com +37451 + Effdon Networks Ltd. + Radion Mirchevsky + radion&effdon.com +37452 + Delta Sistemi Srl + Roberto Capato + rcapato&deltasistemi.it +37453 + B-Obvious + Raz Elharar + raz&b-obvious.com +37454 + Newsweb + Vincent Batoufflet + vincent.batoufflet&newsweb.fr +37455 + Abertis Autopistas España + Carles Fuentes + carlos.fuentes&abertisautopistas.com +37456 + The New School + Peter Redhead + redheadp&newschool.edu +37457 + Medas S.R.L. + Angelo Alfano + angelo.alfano&medas-solutions.it +37458 + QualCare, Inc + Ed Merola + emerola&qualcareinc.com +37459 + Yealink + Vin Too + vintoo&yealink.com +37460 + Nova banka AD + Sasa Popravak + sasa.popravak&novabanka.com +37461 + Peakwork GmbH + Christian Garling + christian.garling&peakwork.de +37462 + COGISTICS, INC. + Robert Berquist + rberquist&gmail.com +37463 + Leitwelt GmbH + Dr. Oliver Welter + oliver.welter&leitwelt.com +37464 + Intelligrated, Inc + Mark Holland + mark.holland&intelligrated.com +37465 + DataDesign + Michael Forum + michael.forum&forumonline.dk +37466 + Digitactics, Inc. + Matthew Beebe + matt&digitactics.com +37467 + Arpeggio Software Inc + Tim McCarthy + tim.mccarthy1&yahoo.com +37468 + Educational Service Unit Coordinating Counsel + Matthew Blomstedt + admin&esucc.org +37469 + Shenzhen Communication Technology Co.Ltd + hulin Li + sct_lhl&hotmail.com +37470 + Power Internet Ltd + Peter Spikings + peter.spikings&powergroup.co.uk +37471 + QEES Aps. + Henrik Olsen + hol&qees.eu +37472 + Cuculus GmbH + René Böringer + r.boeringer&cuculus.net +37473 + Used Car Dealers Association of Ontario + Sergey Lopatin + s.lopatin&ucda.org +37474 + sedkol + John Greenfelder + zgreenfelder&gmail.com +37475 + OT Systems Limited + Kelvin Chan + admin&ot-systems.com +37476 + ViaThinkSoft + Daniel Marschall + oidra&viathinksoft.de +37477 + SIGNALIS + Claude RICHARD + claude.richard&signalis.com +37478 + The Karthikeyans + Muruganand Karthikeyan + muru&mkarthik.cz.cc +37479 + Warimax Ltd. + Michael Todd Peterson + support&warimax.com +37480 + SkySQL Ab + Kaj Arnö + kaj&skysql.com +37481 + Canyons School District + Scot McCombs + itmsengineers&canyonsdistrict.org +37482 + Stewart McKelvey + Scott Gorrill + sgorrill&smss.com +37483 + A.5 Security Consulting Group, SL + Sergi Gonzalez + sgonzalez&a5security.com +37484 + PT. Teknologi Multimedia Indonesia + Agastiya S. Mohammad + egi&pure-technology.net +37485 + Alico Graphic Web Designs + Sean Ali + sean&alicographics.com +37486 + OOO Verified Solutions + Alexander Ryzhov + admin&vs5.ru +37487 + Nicolas Ledez + Nicolas Ledez + iana&ledez.net +37488 + Ridgetop Group, Inc. + Robert Wagoner + rwagoner&ridgetopgroup.com +37489 + Scality + Dos Santos Patrick + iana&scality.com +37490 + ENCO Systems, Inc. + Eugene Novacek, P.E. + research&enco.com +37491 + FTM Systems Kft. + Néher Márton + neher.marton&ftmsystems.hu +37492 + Mindstorm Networks + Nick Whalen + nickw&mindstorm-networks.net +37493 + Cyberlink AG + Thomas Bader + bader&cyberlink.ch +37494 + Vincent de Lau + Vincent de Lau + vincent&delau.nl +37495 + MARS Advanced Research Systems + Robert Leslie + pen.iana.org&mars.org +37496 + ZHEJIANG DAHUA TECHNOLOGY CO.,LTD + Wang Lingli + wang_lingli&dahuatech.com +37497 + Delphi Innovations + Marshall Reeske + mreeske&gmail.com +37498 + NasuTek Global Enterprises + Michael Manley + mjmanley&ntge.net +37499 + NetSoft Distributed Solutions Pty Ltd + Keith Willis + iana&netsoft.com.au +37500 + Schmid Industrieholding + Thomas Brandner + iana&sih.co.at +37501 + Progetto Archivio S.r.l. + Massimo Nuvoli + info&progettoarchivio.com +37502 + kyberna AG + Thomas Fritz + noc&kyberna.net +37503 + Seratel Technology SAL + John Hobdell + john&seratel.com +37504 + ONETASTIC S.r.l. + Visentin Sergio + onetastic&onetastic.com +37505 + WSO2 Inc. + Prabath Siriwardana + external-relations&wso2.com +37506 + Biodec s.r.l. + Michele Finelli + m&biodec.com +37507 + Freenet Liberec, o.s. + Jakub Petržílka + kubapet&lbcfree.net +37508 + AQUAPHOR + Nikolay Kartashev + kolq&aquaphor.ru +37509 + DirectorySelfService + Mohd EQbal. + DirectorySelfService&tashawour.com +37510 + SilentSystem + Shinichi Nakamoto + naka&silentsystem.jp +37511 + Future Dynamics Interstellar + Napoleon A. Courtney + napoleoncourtney&gmail.com +37512 + Systems Corps + Michael Carey + mike&systemscorps.com +37513 + Citkomm + Marc Risse + risse&citkomm.de +37514 + Raphael Frasch GmbH + Raphael Frasch + info&frasch.de +37515 + Avajadi Solutions + Eddie Olsson + ewt&avajadi.org +37516 + Software Integration Development + C.H.W. van Zon + c.v.zon&softint.nl +37517 + Sveriges Radio AB + Henrik Sörling + Henrik.sorling&sr.se +37518 + Firewall Services + Hervé Lardin + tech&firewall-services.com +37519 + Draexlmaier Group + Mario Lohner + Lohner.Mario&DRAEXLMAIER.DE +37520 + Kassenaerztliche Vereinigung Baden-Wuerttemberg + Eike Ott + NetzwerkeundSecurity&kvbawue.de +37521 + Network Pipeline Inc. + Brad J. Wilkus + bradwilkus&networkpipeline.com +37522 + Kousec Software, Inc. + Masato Kataoka + masato.kataoka&kousec.com +37523 + Alyrica Networks Inc + Joseph Sullivan + joseph.sullivan&alyrica.net +37524 + Com1 Communication Solutions B.V. + Bas Sanders + b.sanders&com1.nl +37525 + Infovide-Matrix + Pawel Biedronski + pbiedronski&ivmx.pl +37526 + WellCentive, LLC + Andre Thenot + agthenot&wellcentive.com +37527 + Signalsky Communication Co., Ltd. + Li Chunsheng + tom&signalsky.com +37528 + Crompton Greaves Limited + Yogendra Namjoshi + yogendra.namjoshi&cgglobal.com +37529 + A and A System Co., Ltd. + Shigetaka Furukawa + moroheiya&aa-sys.co.jp +37530 + WebRadar + Daniel Hart + daniel&hart.net +37531 + Frank Moeller + Frank Moeller + frank&nx01.de +37532 + BBT Technology Corp. + PanJie + pjie131&yahoo.com.cn +37533 + Delta Electronics (Thailand) Public Co.,Ltd + Phichej Cheevanantachai + Phichej&delta.co.th +37534 + uher.info + Marek Uher + marek&uher.info +37535 + microDATA GIS, Inc. + Jeremy Jackson + jjackson&md-911.com +37536 + NRG2 B.V. + B. Sanders + bsanders&nrg2.nl +37537 + JADE + Błażej Kukliński + bkuklinski&jade.pl +37538 + Big Switch Networks + Kanzhe Jiang + kanzhe.jiang&bigswitch.com +37539 + Code One GmbH + Arne Bönninghoff + ab&codeone.tv +37540 + N3 Labs Sdn. Bhd. + Ihsan Junaidi Ibrahim + ihsan&n3labs.my +37541 + COMPUTER HI-TECH INC. + Koji Suzue + suzue&cht.co.jp +37542 + Depelmaan Ltd + Alireza Khodaian + a.khodaian&depelmaan.com +37543 + Crowe Horwath LLP + Ted Dyck + ted.dyck&crowehorwath.com +37544 + UM Labs Ltd + Peter Cox + peter&um-labs.com +37545 + ACCEO Solutions inc. (formerly 'GFI Solutions Group inc.') + Christophe Dupre + christophe.dupre&acceo.com +37546 + BNP Paribas Fortis group + Patrick GUSTOT + patrick.gustot&bnpparibasfortis.com +37547 + Europapier International GmbH + Muranyi Mihnea + m.muranyi&europapier.ro +37548 + CenturyLink + Christina Ellis + chris.ellis¢urylink.com +37549 + C&T sas + Roberto Casalegno + roberto.casalegno&ct-sas.it +37550 + Flexenclosure AB + Magnus Persson + magnus.persson&esitepowersystems.com +37551 + IFM Infomaster SpA + Giampaolo Sica + giampaolo.sica&ifminfomaster.com +37552 + RAYCORE TAIWAN CO., LTD. + Jimmy Chiang + contact&raycore.com.tw +37553 + FRDLWEB + Till Wehowski + till&webfan.de +37554 + Archbold Medical Center + Lou Ellen Parker, RN + lparker&archbold.org +37555 + Enabling Technologies Company, Inc. + Mark Mercer + markm&brailler.com +37556 + Brandwatch + Luke Alexander + luke&brandwatch.com +37557 + Pathology Associates Medical Laboratories, LLC + Sean Rowe + srowe&paml.com +37558 + VM-DG Hemmert & Schuster GbR + Marco Schuster + marco&vmsoft-gbr.de +37559 + Socialcast + Patrick Kile + patrick&socialcast.com +37560 + Netscout Systems, Inc. (formerly 'Simena, LLC') + Sezen Uysal + sezen.uysal&netscout.com +37561 + Beijing KEMACOM technologies Co., Ltd. + Yongjie Zhang + eddy.jhang&gmail.com +37562 + Hangzhou Yuwan Technology Co., Ltd. + Shuyun Guo + support&itmone.com +37563 + NNS - Nigl Network Solutions + NIGL Sascha + s.nigl&nns.at +37564 + Everest Display Inc. + Michael B. F. Wu + michaelbfwu&everestdisplay.com.tw +37565 + Struck Innovative Systeme GmbH + Thorsten Fritzke + thorsten.fritzke&struck.de +37566 + Discovery Reply S.r.l. + Andrea Ceciarelli + a.ceciarelli&reply.it +37567 + Orbital ATK, Inc (formerly 'Alliant Techsystems Inc.') + Chris Helget + pki&orbitalatk.com +37568 + Commend International GmbH + Johannes Helminger + j.helminger&commend.com +37569 + Vocative Ltd. + Dmitry Glukhov + gludminick&vocative.ru +37570 + ELFF Tech + Lonny Fairfield + lfairfie&wyoming.com +37571 + KVUC + Mik Helmich + mik&kvuc.dk +37572 + Panini s.p.a. + Marco Fassiotto + marco.fassiotto&panini.com +37573 + LightSoft Research Ltd. + Alexey Leontev + alex&lightsoft.ru +37574 + Alberta Distance Learning Centre + Arlen Baker + abaker&adlc.ca +37575 + Infobip LTD + Izabel Jelenic + izabel.jelenic&infobip.com +37576 + NovelSat + Guy Cohen + guy.c&novelsat.com +37577 + Branch of LLC "Tabak-Invest" Trade Center "Korona" + Mikhail Koren + mike.it&korona.by +37578 + Samworth Brothers Ltd + thomas wing + helpdesk&ginsters.co.uk +37579 + Synetrix Holdings Limited + John Shaw-Miller + john.shaw-miller&synetrix.co.uk +37580 + Chao-Inn elementary school + Chia-Hui Chen + mis&cies.tyc.edu.tw +37581 + Mika timing GmbH + Joerg Mika + joerg.mika&mikatiming.de +37582 + Theobroma Systems Design und Consulting GmbH + Dr. Philipp Tomsich + oid-admin&theobroma-systems.com +37583 + Surescripts, LLC + Jim Martin + security&surescripts.com +37584 + Gary Hawkins + Gary Hawkins + gary.hawkins&garyhawkins.me.uk +37585 + QDIAGNOSTICA + Ayuda Imagen + ayuda.imagen&diagnosticorecoletas.com +37586 + E-sektionens Teletekniska Avdelning + Anton Landberg + anton.landberg&gmail.com +37587 + QualVu, Inc. + Rodney D. Holm + rholm&qualvu.com +37588 + QingDao Easytelecom Co., Ltd. + Lin Guo + easytelecom&public.qd.sd.cn +37589 + Institut de l'Ebre + Sergi Tur Badenas + stur&iesebre.com +37590 + Meyer Burger Technology AG + Jonas Reusser + j.reusser&meyerburger.ch +37591 + Reggiani + Andrea Pagni + support®giani.it +37592 + Ev.-Luth. Brüdergemeinde Enger e.V. + Alexander Gebel + alex&elb-enger.de +37593 + Platform-a Arastirma Gelistirme Koordinasyon Hizmetleri A. S. + Ilhan Uncuoglu + ilhan.uncuoglu&platform-a.org +37594 + Netmining LLC + Cristian Ferencz + cristian.ferencz&netmining.com +37595 + Geomarine Ltd + Ian Macdonald + ianmac51&gmail.com +37596 + Agri-hitech LLC + Shigeru Makino + mac&agri-hitech.com +37597 + Decatur Hospital Authority dba Wise Regional Health System + Joe Arispe + jarispe&wiseregional.com +37598 + CipherCloud, Inc. + Pravin Kothari + pkothari&ciphercloud.com +37599 + Meontrust Inc. + Markku Mehtälä + markku&meontrust.com +37600 + ledeuns.org + Denis FONDRAS + contact&ledeuns.org +37601 + Xech S.r.l. + Marco Tiraboschi + mtiraboschi&xech.it +37602 + Spider Software GmbH + Michael Becker + pen&spider-software.net +37603 + Meami.org + Martin Musatov + musatov&att.net +37604 + RMG Tech (Malaysia) Sdn Bhd + Jean-Yves Sireau + sysadmin®entmarkets.com +37605 + MCX Telecom 2 Sp. z o.o. + Tomasz Sadowski + tomasz.sadowski&mcx.pl +37606 + "SM Finance",JSC + Vladimir V. Losev + losev&smfin.ru +37607 + Cascada Software & Consulting + Marcos Luna + marcos.luna&gmail.com +37608 + CareEvolution, Inc + Himabindu Bolisetty + bindu&careevolution.com +37609 + Dang Networks Inc. + Andrew Duffy + andrewd&dangnetworks.com +37610 + The TuxClan + Carel Lubbe + carel.lubbe&gmail.com +37611 + CJSC TSI Service + Ivan Yu. Malinin + miu&tsinet.ru +37612 + Kassenärztliche Vereinigung Niedersachsen + Christian Strassburg + christian.strassburg&kvn.de +37613 + ennovatis GmbH + Hartmut Freihofer + h.freihofer&ennovatis.de +37614 + Siqura + Peter de Konink + pkonink&siqura.com +37615 + V Labs Informatica Ltda + Andre Luiz de Senna + senna&igenesis.com.br +37616 + SATS Holding AB + Ludvig Nilsson + it&sats.com +37617 + Ventek Sistemas e Automação + Guilherme Namen Pimenta + guilherme&ventek.com.br +37618 + Tumblr + Fredrik Nylander + fredrik&tumblr.com +37619 + SCHIEx + Himabindu Bolisetty + bindu&careevolution.com +37620 + Alegent Health + Cathi Bishop + cathi.bishop&alegent.org +37621 + Ename, S.A. + Jose Lameira + oid&ename.pt +37622 + Chrysalis Utah, Inc. + Dustin Carroll + dustin.carroll&gochrysalis.com +37623 + Red Hound Software, Inc. + Carl Wallace + carl&redhoundsoftware.com +37624 + M & PI Software S.r.l. + Pierluigi Brugnone + pierluigi.brugnone&mpisoftware.it +37625 + Mike Johnson + Mike Johnson + m.d.johnson&kuub.org +37626 + STC AKTOR + Alexey Vorontsov + av&aktor.ru +37627 + ensemble + Mike Joe + sysadmin&ensemble.com +37628 + NETRATOR Andrzej Dopierala + Andrzej Dopierala + dopieralaa&netrator.pl +37629 + COMTEC NET + Diana Ioan + diana.ioan&tecnet.ro +37630 + Iron Technology Solutions Ltd + Regis Savry + regis&irontech.co.nz +37631 + Plasmatronics Pty Ltd + Brendan English + iana&plasmatronics.com.au +37632 + The Frontier Group + Matthew Lambie + mlambie&thefrontiergroup.com.au +37633 + Bolnisnica Golnik KOPA + Tomaz Knific + tomaz.knific&klinika-golnik.si +37634 + Achieve3000, Inc. + Yaakov Goldberg + yaakov.goldberg&achieve3000.com +37635 + Awarepoint + Brandon Gilmore + bgilmore&awarepoint.com +37636 + Catholic Health + Michael Brown + mbrown&chsbuffalo.org +37637 + Henry County Hospital + Louetta Dishman + ldishman&hcmhcares.org +37638 + Infinitrum Co., Ltd + David cheng + david.cheng&infinitrum.com +37639 + ECYS S.A. + Fernando Valenzuela + fdvalenzuela&ecyssa.com +37640 + Federal Cardiovascular surgery center + Nikulin Andrey + avn&krascor.ru +37641 + Neebula Systems + Shai Mohaban + shai&neebula.com +37642 + GK "SoftExpert" + Alexander Sentyurin + asentyurin&sfx-tula.ru +37643 + IBM BTO Business Consulting Services Sp. z o.o. + Lukasz Kozubal + lukasz.kozubal&pl.ibm.com +37644 + Fuzhou Ucore Information Technologies Co,.Ltd + Wensheng Hu + 13705084159&139.com +37645 + Medical Data Express + Alan Hagerman + alan.hagerman&medicaldataexpress.com +37646 + Wyoming Valley Health Care System + Luann Draht + ldraht&wvhcs.org +37647 + Ministry of Interior - UAE (Governmental Entity) + Major General Matar Salem Bin Msaeed Al Neyadi – ICT Director / Dr. Haider Khalid A.A. Al-Ameed – Senior ICT Advisor + hkameed&adpolice.gov.ae +37648 + Kalmia Technology Co, LLC + Alton Brantley + alton.brantley&gmail.com +37649 + Helium Inc. + Tracy Flynn + tflynn&helium.com +37650 + Shared Technology Services Group, Inc. + Venu Venugopal + vvenugopal&stsgi.com +37651 + JoyStem Web Services Private Limited + Mr. Chandra Sekhar & Mr. JayaChandra Prakash + sekhar&joystem.com +37652 + I-TECH + Alex McGee + camcgee&uw.edu +37653 + Lamby + Stefan Lamby + support&lamby.de +37654 + Moccu GmbH + Thomas Walter + admin&moccu.com +37655 + Far South Networks + Michael Walton + mike&farsouthnet.com +37656 + Broxtowe Borough Council + Stuart Chaffin + Stuart.Chaffin&broxtowe.gov.uk +37657 + Erskine Design + Phil Howell + dev&erskinedesign.com +37658 + Czech National Bank + Martin Podstata + certificateadmins&cnb.cz +37659 + ControlCircle Ltd. + Andy Coates + MonitoringTeam&ControlCircle.com +37660 + Unit of Research of Technologies of Information and Communication (UTIC) + Heithem Abbes + heithem.abbes&gmail.com +37661 + Transcon Electronic Systems spol. s r. o. + Martin Persich + persich&transcon.cz +37662 + Ablerex Electronic Co., Ltd. + Fred Hsu + fred&ablerex.com.tw +37663 + nGen - Niagara Interactive Media Generator + Thomas Madej + tmadej&ngen-niagara.com +37664 + Zone de Police Midi + Jacques Struelens + dir.tel&zpz5341.irisnet.be +37665 + R. W. Beckett Corporation + John Bohan + engineering&beckettenergy.com +37666 + Airebullas C.B. + Juan Gomez Melero + airebullas&airebullas.es +37667 + Office of the Auditor General of Norway + Stig Kristiansen + stig.kristiansen&riksrevisjonen.no +37668 + bit GmbH + M. Botzem + m.botzem&bit-germany.de +37669 + FONDEMI + Marco Saldivia + msaldivia&fondemi.gob.ve +37670 + Barling Bay LLC + Perry Minchew + pminchew&barlingbay.com +37671 + The Biostatistics Center - GWU + Aria Bamdad + aria&bsc.gwu.edu +37672 + Odine Solutions LLC + Bahri Azar + snmp.pen&odinesolutions.com +37673 + Estech Systems, Inc. + Brian Berger + bberger&esi-estech.com +37674 + Heineken International + Jan den Otter + Jan.denotter&heineken.com +37675 + LoveBee + Phillip O'Donnell + admin.iana&lovebee.co.nz +37676 + Sutunam Co., Ltd. + Martin PANEL + network&sutunam.com +37677 + Linuxsoft s.r.o. + Ales Hakl + adh&linuxsoft.cz +37678 + Hall Research + Justin Ireland + justin&hallresearch.com +37679 + AGILiCOM + Frédéric BAHUAUD + f.bahuaud&agilicom.fr +37680 + Scott Ellis + Scott Ellis + mail&scottellis.com.au +37681 + Sinefa Pty Ltd + John Bothe + john&sinefa.com +37682 + AlexxHost + Alexey bogomolov + alexx.b84&gmail.com +37683 + TimoCom Soft- und Hardware GmbH + Marco Haack + hostmaster&timocom.com +37684 + Grupo Alsa + Pablo Alvarez Menendez + guardia&alsa.es +37685 + Online Marketing Solutions AG + Michael Junghans + notify&online-marketing-solutions.com +37686 + Lookout Mobile Security, Inc. + Brian Martin + brian.martin&lookout.com +37687 + Bethesda Memorial Hospital + Leslie Durham + leslie.durham&bethesdahealthcare.com +37688 + ACE Computer bv + Stephen Day + stephen&ace.nl +37689 + unixtastic + Stephen Day + sd&unixtastic.com +37690 + JOTATEC + Julio Cesar Gomes da Silva + julio&jotatec.com.br +37691 + Broadcast Devices Inc. + Ron Kumetz + ron&broadcast-devices.com +37692 + StringIT + Jeroen Dekkers + iana&stringit.nl +37693 + Brattleboro Memorial Hospital + Jonathan Farina + jfarina&bmhvt.org +37694 + Tsubakimoto Chain Co. + Kohei Yamada + mitaro.sales&mitaro.gr.jp +37695 + Quantum Data Systems (HK) Ltd + Leonard Siu + info&quantum.com.hk +37696 + WIKA Polska S.A. + Maciej Czyznielewski + it&wikapolska.pl +37697 + Higher One + Ian Preston + ipreston&higherone.com +37698 + Customs and Border Protection + Hari Kolli or David Kalavity + Hari.K.Kolli&cbp.dhs.gov +37699 + Implaneo Dental Clinic (formerly 'BolzWachtel Dental Clinic') + Christian Fohmann + edv&ipi-muc.de +37700 + Axed S.p.A. + Gennaro Coppola + sistemi&axed-technology.com +37701 + energie-m.de + Hans-Stefan Müller + mail&energie-m.de +37702 + Narodowy Fundusz Zdrowia - Centrala + Pawel Malara + informatyka&nfz.gov.pl +37703 + Mywindowslab.com + Laith Al Shamri + laith&live.it +37704 + Interconcept GmbH + Tobias Reibling + t.reibling&interconcept.de +37705 + CommSoft RMS LLC + Stewart Foote + sf&commsoft-rms.com +37706 + Netmedia Systems e. K. + Matthias Meinecke + info&nmsweb.de +37707 + Rocrail + Rob Versluis + info&rocrail.net +37708 + Systola Forwarding GmbH + Roman Kuznetsov + support&systola.de +37709 + S3 Satcom Ltd + John Kennett + john.kennett&s3sat.com +37710 + Dane E. Jones + Dane E. Jones + dane.jones&att.net +37711 + FileMaker Inc + Meena Rajvaidya + meena_rajvaidya&filemaker.com +37712 + Eurosistemi + Michael Lattari + michael&lattari.eu +37713 + Winextreme.org + Artem Pronichkin + artem&pronichkin.com +37714 + AppFolio, Inc. + Ross Harvey + noc&appfolio.com +37715 + FTN + Pascal Fontaine + pascal.fontaine&ftn.ch +37716 + Ville de Lausanne + Pascal Fontaine + securite.informatique&lausanne.ch +37717 + Herbert Smith LLP + Tom Maher + tom.maher&herbertsmith.com +37718 + Fresenius Vial + Franck Rakotonindrainy + franck.rakotonindrainy&fresenius-vial.fr +37719 + PT. Multi Structure + Mulyo Santoso + helpdesk&multistructure.co.id +37720 + CSR ZHUZHOU INSTITUTE CO LTD + xiaoyan jiang + jiangxy&teg.cn +37721 + OpenSys (M) Berhad + Zainun Romli + zromli&myopensys.com +37722 + Trusted Information Consulting Sp. z o.o. + Daniel Wachnik + daniel.wachnik&ticons.pl +37723 + ODIN technologies + Scott Barvick + sbarvick&odinrfid.com +37724 + Chalet Technologies Inc. + James Lin + jamesl&chalettech.com +37725 + Yupiq Corp + Paul Reimer + paul.reimer&yupiq.com +37726 + Bertol, Alexander + Bertol, Alexander + alex&bertol.de +37727 + General Digital Corporation + ROBERT GREGER + iana-pen&generaldigital.com +37728 + ADV Consulting + Petr Mitrofanov + pmitrofanov&advc.ru +37729 + ATS Group GmbH + Quentin Zak + quentin&groupsecurity.net +37730 + p-rimes.net + Paul Reimer + paul&p-rimes.net +37731 + MB S.p.A. + Luca Faccin + luca.faccin&mbcrusher.com +37732 + Ymif Engineering b.v. + A. Wennen + info&ymif.nl +37733 + Civil Registry Agency of Ministry of Justice of Georgia + Zurab Magradze + zmagradze&cra.gov.ge +37734 + ASSA ABLOY AB - Group Technologies + Jens Bylehn + IANAPen-Registrar.Authority&assaabloy.com +37735 + St. Mary's Medical Center + Dave Imhoff + dimhoff&st-marys.org +37736 + PinnacleHealth System + Tony Hackman + thackman&pinnaclehealth.org +37737 + GUANGDONG DONYAN NETWORK TECHNOLOGIES CO.,LTD. + Zhihu Zhang + beyoniger&gmail.com +37738 + Apadmi Ltd + Adam Fleming + adamf&apadmi.com +37739 + Braintree Payment Solutions, LLC + Michael Vallaly + noc&getbraintree.com +37740 + Johannes Roith + Johannes Roith + johannes&jroith.de +37741 + First Step Internet, LLC + Nathan Anderson + nathana&fsr.com +37742 + Iprio Corporation + Kazuhiro Matsushita + info&iprio.co.jp +37743 + Robert Frank + Robert Frank + rwf14f&gmail.com +37744 + Centrify Corporation + Nathan Yocom + nate.yocom¢rify.com +37745 + spampig.org.uk + Barry Hueder + barry.hueder.iana_org&spampig.org.uk +37746 + Security Data S.A. + Lenin Vasquez + soporten3&securitydata.net.ec +37747 + Mercy Health Partners + Bob Stansfield + rstansfield&health-partners.org +37748 + GIAVANEERS + L. Brian McGann + brianm&giavaneers.com +37749 + Wire e Wireless World, SA + Pedro Lage Tavares + security&3w.com.pt +37750 + Quester Tangent + Rick Pearson + rpearson&questertangent.com +37751 + E.I. du Pont de Nemours and Company + Karen Galbraith + contact&dupont.com +37752 + NETS DENMARK A/S + Peter Lind Damkjaer + pldam&nets.eu +37753 + Robin Partington Architects + James Waite + james.waite&fabrictechnologies.com +37754 + Nextragen GmbH + Helge Jürgensen + helge.juergensen&nextragen.de +37755 + Radio Activity srl + Roberto Roberti + r.roberti&radioactivity-tlc.it +37756 + The Ministry of Interior of the Republic of Croatia + Marin Istvanovic + marin&mup.hr +37757 + Newton Graphics, Inc. + Tadahiro kanno + kanno&newton-graphics.co.jp +37758 + Actidata Company + Sergey Firsov + admin&telemetr.net +37759 + WXXR Network Technology Ltd. BEIJING + Neil Lin + neillin&wxxr.com.cn +37760 + Powertech Automation Solutions Pvt. Ltd + Vivek Patwardhan + vivek.patwardhan&pasl.in +37761 + Altitude Software + Joaquim Freitas + joaquim.freitas&altitude.com +37762 + Neotion R&D + Remi Verchere + rverchere&neotion.com +37763 + Bitxenio, S.L. + Miguelanxo Otero Salgueiro + miguelanxo&bitxenio.com +37764 + Portel Servicios Telemáticos + Carlos Martín + c.martin&portel.es +37765 + LHERITIER + Pierre FICHET + pfichet&lheritier-alcen.com +37766 + Beijing Banggoo Networks Co,Ltd. + James Zhang + glzhang&banggoo.cn +37767 + Gartner Inc. + Peter Heilig + peter.heilig&gartner.com +37768 + Bundesministerium für Arbeit und Soziales + Fridolin Smolka + fridolin.smolka&bmas.bund.de +37769 + Koramis GmbH + Michael Fess + m.fess&koramis.de +37770 + IDmachines LLC + Salvatore D'Agostino + sal&idmachines.com +37771 + Linkwise Technology Private Limited + Shirley Koh + shirley&linkwisetech.com +37772 + AERODEV + Martin Chen + chenmj&aerodev.com +37773 + OAO NESK Novorossiyskenergosbit + Kokorin Artem Vladimirovich + KokorinAV&nesk.ru +37774 + 51.6 Noord B.V. + Marcel de Kock + marcel.dekock&516noord.nl +37775 + BaoBros Studio + Winnow Cai + winnow.cai&baobros.com +37776 + Co. Ltd. "RCS Labs" + Artem Marchenko + artem.marchenko&rcslabs.ru +37777 + Cabinet Pierre Guerin + Pierre Guerin + pierre.guerin&cab-cpg.net +37778 + Northern Design (Electronics) Ltd + Paul Connor + paul.connor&ndmeter.co.uk +37779 + Nuvolas + Andre Joswig + info&nuvolas.de +37780 + MicroRIGHT Corporation + Aaron Moreau-Cook + opsµright.com +37781 + eZuce Inc + George Niculae + george&ezuce.com +37782 + KindleIT + Rodolfo Hansen + rhansen&kitsd.com +37783 + Wolf Pond Creative LLC + Thomas Davis + tommy&wolfpond.com +37784 + IDC Solutions Pty Ltd + James Yong + jamesy&idcsolutions.com.au +37785 + Ingenieurbuero Jan F Westerkamp (IBW) + Jan F Westerkamp + oid&ib-westerkamp.de +37786 + Galtronics Telemetry, Inc. + Mike Lafferty + mike.lafferty&galtronics.com +37787 + Emirates Identity Authority + Mohamed Al Redaini + Mohamed.AlRedaini&emiratesid.ae +37788 + Syslife + Antoine Natale + a.natale&syslife.com +37789 + Novapost SAS + Dr. Jorge Tellez + jorge.tellez&novapost.fr +37790 + Gould International UK Ltd + Tim Garvin + timg&gouldinternational.co.uk +37791 + PortaOne, Inc. + Lutay Alexander + Alexander.Lutay&portaone.com +37792 + Enfuse Inc + Hong Wang + hwang&inscopeinternational.com +37793 + Squirrel Solutions Ltd + Matt Jenkins + matt&sqsol.co.uk +37794 + DVR Support Center + Jon Swatzell + engineering&pco-inc.com +37795 + Centre Hospitalier du Nord + Aloyse Gilbert + aloyse.gilbert&chdn.lu +37796 + PTS Group AG + Sören Lifka + s.lifka&ptsgroup.de +37797 + Graylog2 + Lennart Koopmann + lennart&socketfeed.com +37798 + Pcmedic + Tiago Lourenço Geada + tiago.geada&pcmedic.pt +37799 + Coloriuris A.I.E. + Pedro J. Canut + webmaster&coloriuris.net +37800 + Aster Data Systems, Inc + Dinkar Gupta + dgupta&asterdata.com +37801 + Innovactive Engineering s.r.l. + Lorenzo Maiorfi + maiorfi&innovactive.it +37802 + Sypris Solutions, Inc + Charles Timko + charles.timko&sypris.com +37803 + California State University San Marcos + Diane Petersen + kai&csusm.edu +37804 + Spirit Software Solutions + Graeme Elliott + gelliott&spiritsoftware.biz +37805 + Team Simoco Ltd. + Customer Services + customer.service&simocowireless.com +37806 + North Power Coporation (EVN NPC) + Trung Huu Truong + trungdtbk&gmail.com +37807 + Instituto Tecnológico de Canarias + Fco. Javier Ascanio Suárez + jascsua&itccanarias.org +37808 + FIBCOM INDIA LIMITED + B.Sreenivasappa + sreenivasappa.b&fibcom.com +37809 + Wnet Wisp S.r.l + Scalisi Mihai + adranoweb&hotmail.com +37810 + PCE SYSTEMS + Leo Magee + lmagee&pcesystems.com +37811 + Glass Echidna + Aidan Steele + aidan.steele&glassechidna.com.au +37812 + Kotkamills Oy + Peter Danielsbacka + peter.danielsbacka&kotkamills.com +37813 + DSM Computer GmbH + Wolfram Ansin + w.ansin&dsm-computer.de +37814 + Infinidat LTD + Gregory Shulov + gregs&infinidat.com +37815 + Friendly Runet Foundation + Varnakov Kiril + kvarnakov&friendlyrunet.ru +37816 + skweez.net + Florian Mutter + elm&skweez.net +37817 + GMG Technologies + Gorobchenko Michael G. + gmg.inbox&gmail.com +37818 + LEGALBOX + Pierre Guerin + pierre.guerin&legal-box.com +37819 + Technical Dabblings + Glenn R. Martin + iana&technicaldabblings.com +37820 + Telecore LTD. + Anatoly Danilov + ermolaew&bk.ru +37821 + untermStrich software gmbh + Christian Koller + christian.k&untermstrich.com +37822 + Wolfplex Hackerspace ASBL + Sébastien Santoro + sebastien.santoro&wolfplex.org +37823 + Vimukti Technologies Private Limited + Rajesh Akkineni + rajesh&vimukti.com +37824 + ECOLE NATIONALE VETERINAIRE D'ALFORT + BATTAGLIA Hervé + informatique&vet-alfort.fr +37825 + Interregional Distribution Grid Company of Volga, JSC + Sergey Yu. Bozhko + sy.bozhko&mrsk-volgi.ru +37826 + ISL Internet Sicherheitsloesungen GmbH + Andreas Rieke + andreas.rieke&isl.de +37827 + WV Network for Educational Telecomputing (WVNet) + Samuel Lay + sam&mail.wvnet.edu +37828 + LIL'SYSTEMS + Yves Geunes + info&lilsystems.be +37829 + Puxlit + Xiao Di Guan + woot&puxlit.net +37830 + InCampus Pte Ltd + Liew SC + liew&incampus.com.sg +37831 + Techroutes Network + AnilRaj Thulasidas + araj&techroutes.com +37832 + Clifford Chance LLP + Stephen Thurling + stephen.thurling&cliffordchance.com +37833 + Southern New Hampshire Medical Center + Devora Moriarty + Devora.Moriarty&snhmc.org +37834 + No23 + Ivan Bischof + ivan.bischof&no23.de +37835 + Jerome Baum + Jerome Baum + jerome&jeromebaum.com +37836 + CCM Benchmark Group + Adrien Sévère + asevere&ccmbenchmark.com +37837 + mikroVOX s.r.o. + Martin Madlik + m.madlik&mikrovox.cz +37838 + Futurad S.r.l. + Lorenzo Rizzatto + l.rizzatto&futurad.it +37839 + OneSystem S.A. + Daniel Eduardo Alzate Tamayo + daniel.alzate&onesystemsa.com +37840 + Veda Advantage + Jason Pell + jason.Pell&vedaadvantage.com +37841 + Flughafen Zürich AG + Thomas Reichmuth + ictzertifikate&zurich-airport.com +37842 + Aicox Soluciones S.A. + Ithar Alhaidari + ithar&aicox.com +37843 + Adecco Group + Abid Khwaja + abid.khwaja&adeccona.com +37844 + Eastern Upper Peninsula ISD + Jay Schupp + jschupp&eup.k12.mi.us +37845 + VINCI S.A. + Guy Widloecher + oid&vinci.com +37846 + MEDIAPRINT Zeitungs und Zeitschriften Verlags GesmbH + Charly Baldrian + ssladmin&mediaprint.at +37847 + Business Information Intelligence Services + Thierry Brouard + thierry.brouard&biis.biz +37848 + ORSENNA + SENCKEISEN JEAN PHILIPPE + jpsenckeisen&orsenna.fr +37849 + Aris System + Davood Firoozian + dfiroozian&arissystem.com +37850 + Slavic Gospel Association + Steve Palm + stevep&sga.org +37851 + YTY Enterprises + Steve Palm + n9yty&n9yty.com +37852 + ECCOS inzenjering + Aleksandar Dujmović + Aleksandar.Dujmovic&eccos.com.hr +37853 + OASIS Systems Pty Ltd + Jonathan Yeoh + jonathan.yeoh&oasissystems.com.au +37854 + Crozer-Keystone Health System + Marisa Niedbalski + marisa.niedbalski&crozer.org +37855 + The Reading Hospital and Medical Center + Jason Balthaser + balthaserj&readinghospital.org +37856 + Essential Enterprise Solutions Inc. + David Davies + ddavies&eesimed.com +37857 + Veolia Environmental Services - North America + Michael Glaubig + michael.glaubig&veoliaes.com +37858 + Synegen + Martin Phee + marty.phee&synegen.com +37859 + Synerline sprl + André Deblire + iana&synerline.com +37860 + Source Trading Co., Ltd. + Hui Mei Chen + ruthchen36&yahoo.com.hk +37861 + Austco Communication Systems + Stephane Benayoun + stephane.benayoun&austco.com +37862 + Suntel S.A. + Eddy Nelson + suntelsa&hotmail.com +37863 + ETMC Regional Healthcare System + Pete Range + prange&etmc.org +37864 + Cape Cod Healthcare + Peter Read + pread&capecodhealth.org +37865 + City and County of San Francisco, Department of Technology + Aaron Smith + aaron.smith&sfgov.org +37866 + Bank of New Zealand + B.A.Johnston + nz_infoman_zcidsm&bnz.co.nz +37867 + APG + Patric Lichtsteiner + patric.lichtsteiner&apg.ch +37868 + VirtualSharp Software + Alberto Gonzalez Martos + agonzalez&virtualsharp.com +37869 + BruteCO Certificate Authority + Adam Vallee + adam.vallee&bruteco.com +37870 + Sensometrix SA + Grégory Brusick + gbrusick&sensometrix.ch +37871 + Digital Vision, s.r.o. + Michal Vanco + mvanco&di-vision.sk +37872 + Neko Consulting Ltd. + Vasily Zezin + vzezin&nya.me +37873 + J4care GmbH + Michael Knapp + michael.knapp&j4care.com +37874 + CebaTech Inc. + Hank Cohen + hcohen&cebatech.com +37875 + oogle Networks LLC + Josh Kittle + josh&ooglenetworks.com +37876 + Delta Management AB + Mattias Niklasson + info&deltamanagement.se +37877 + TITNC Alexander Seifarth + Alexander Seifarth + a.seifarth&yahoo.de +37878 + WineSkills + Andrew Atkinson + andrew.atkinson&plumpton.ac.uk +37879 + NetOrg LTD + Suthagar Kathirkamathamby + suthagarht&gmail.com +37880 + Qowisio + Jean-Samuel Reynaud + js&qowisio.com +37881 + ESCA + BRAND Arnaud + abrand&esca.fr +37882 + RedeCamp Ind. Com. e Serviços de Telecomunicações S.A. + Alexandre Alves + acalves&redecamp.com.br +37883 + Vitaly Pashkov + Vitaly Pashkov + admin&fluda.net +37884 + Perfect Home AS + Bjørn Håvar Falck-Andersen + havar&perfecthome.no +37885 + Renewable Funding, LLC + Darrell Fuhriman + itstaff&renewfund.com +37886 + Coraid Inc. + Binh Le + lpbinh&coraid.com +37887 + OraPharma, Inc. + Jeff Edonick + jedonic2&orapharma.com +37888 + PROGIRIS + Louis CALENDA + louis&progiris.com +37889 + Accord Medical Management, LP DBA Nix Health Care System + Les Surrock + lsurrock&nixhealth.com +37890 + Quality Fiber and RF Inc + Jeff Oberholtzer + jeff&qfrf.com +37891 + Big Ant Studios Pty Ltd + Scott Pederick + sysadmin&bigant.com +37892 + GCT Semiconductor, Inc. + Jason J. Kim + jasonkim&gctsemi.com +37893 + BeyondTrust Software, Inc + Jordan Bean + jbean&beyondtrust.com +37894 + aylite + tarmo kaljumae + tarmo&aylite.com +37895 + LucidView + Tim Haak + tim&haak.co.uk +37896 + IFD Kapital + Konstantin Sidorenko + it&ifdk.com +37897 + Maxwell Management Inc. (MMI) + Will Maxwell + will&shpapa.com +37898 + Éric Larivière + Éric Larivière + ericlariviere&hotmail.com +37899 + Hahn Family + Stefan-W. Hahn + stefan.hahn&s-hahn.de +37900 + International Information Programs IR, Deptartment of State + Greg Goble + GobleGW&state.gov +37901 + Aeon Experienced Development + Henning Hucke + oidmaster&aeon-experienced.eu +37902 + Polynet Ltd. + Sándor Nagy + sandor.nagy&polynet.hu +37903 + CURUPIRA S/A + Renato Frederick + operacao&takenet.com.br +37904 + UnixServices + Thomas Müller + tmueller&unixservices.de +37905 + Beijing Jiaxun Feihong Electrical Co.,Ltd + Zhang Tao + zhangtao&jiaxun.com +37906 + Beijing WingMax Technology Co.,Ltd + jiangshan + sjiang&wingmax.net +37907 + Fusion Comm Solution Sdn Bhd + Chong Nan Bing + nanbing_chong&fusioncomms.com +37908 + A puissance 3 + Thierry Le Naour + th.ln&ap3.fr +37909 + Arantia 2010 S.L.U. + Gonzalo Berdeal + gberdeal&arantia.com +37910 + Modesat Communications + Jüri Põldre + juri.poldre&modesat.com +37911 + CDP Communications Inc. + Ted Dunlop + ted.dunlop&cdpcom.com +37912 + Locomotive Software & Consulting + Pigozzo Michele + pigozzo.michele&gmail.com +37913 + ZAO PTKB "MERCURIY" + Lubsanova Antonina + a.lubsanova&bankm.ru +37914 + TECNOTEL s.r.l. + PAOLO SARTINI + paolo.sartini&tecnotel-sistemi.it +37915 + Shenzhen Zyeeda Information Technology Co. Ltd. + Rui Tang + tangrui.cn&gmail.com +37916 + Volvo Car Corporation AB + Björn Kjellén + BKJELLEN&volvocars.com +37917 + MarkitServ + Matthew Barr + matthew.barr&markitserv.com +37918 + SAGE Dining Services + Marcus Madsen + technology&sagedining.com +37919 + NETMOON + forest.Guo + gxb&netmoon.cn +37920 + DEXUS Holdings Pty Limited + Mark Aidasani + mark.aidasani&dexus.com +37921 + Southwest Institute of Healing Arts + Michael LaBelle + mikel&swiha.edu +37922 + TeleMedia Systems, Inc. + Lance Wang + lance&telemediasystems.com +37923 + Freysteinn Alfredsson + Freysteinn Alfredsson + freysteinn&freysteinn.com +37924 + NokianTyres LLC + Alexandr Sizov + Aleksandr.Sizov&nokiantyres.com +37925 + mysql-cluster-snmp-monitor + Steven Ayre + steveayre&gmail.com +37926 + Pranveer Singh Institute of Technology + Amit K Awasthi + awasthi&psit.in +37927 + RTLGroup + Udo Kertels + domreg&rtlgroup.com +37928 + Sheffield Teaching Hospitals NHS Foundation Trust + Richard Conway + richard.conway&sth.nhs.uk +37929 + PIPS Technology Ltd + David McConnell + davidm&pipstechnology.co.uk +37930 + Virus Stopper Foundation Registered NGO No 348 -2010 + GUNTAGANI VIMAL KUMAR + virusstopperfoundation&gmail.com +37931 + Bob Greene + Bob Greene + onms.bg&gmail.com +37932 + Jack Storm + Jack Storm + blaiddeira&gmail.com +37933 + Westchester Medical Center + Andrew Minikes + pen-iana-org-Request&wcmc.com +37934 + Confident Technologies, Inc. + John Whitlock + ct.admin&confidenttech.com +37935 + Danmarks Designskole + Henrik Mathorne + hma&dkds.dk +37936 + Calendar42 + M. Boerrigter + michel&calendar42.com +37937 + Schaffhausen Communication GmbH + Bjoern Becker + bb&schaffhausen.de +37938 + Lake Hospital System, Inc. + Kimberly Krueger, HIM Director + kimberly.krueger&lakehealth.org +37939 + Reykjavik University + Jens Valur Ólason + hostmaster&ru.is +37940 + WRD Technology Co., Ltd. + Cheng Yang + ycflash&gmail.com +37941 + comForte 21 GmbH + H.Horst + h.horst&comforte.com +37942 + Futuver S.A. de C.V. + Nora Erika Morales Hernández + nora.morales&futuver.com.mx +37943 + Medium Link System Technology CO., LTD + Jacky Wang + jacky&ecenter-net.net +37944 + megalo & company + Nicolas Lehuen + nlehuen&megalo-company.com +37945 + Inspur(BeiJing) Electronic Information Industry Co.,Ltd + ShuangPeng Di + dishp&inspur.com +37946 + CJSC "Dzetta" + Sergey Levin + dzetta&dzetta.com +37947 + Banco Central del Ecuador + Hernan Gonzalez Lopez + eci&bce.ec +37948 + TOS.BG + Stoyan Petkov + info&tos.bg +37949 + TenneT TSO + Elsbeth Boer + websupport&tennet.eu +37950 + v-solution + Zhiyang Lu + Zhiy.lu&v-solution.cn +37951 + DataONE + Dave Vieglais + vieglais&ku.edu +37952 + MoCA by Design + Dan Kolis + dan_kolis&yahoo.com +37953 + ControlNet International Inc. + kenlee + kenlee&connet.com.tw +37954 + Kentix GmbH + Thomas Fritz + t.fritz&kentix.de +37955 + Nierhaus + Andre Nierhaus + andre&nierhaus.ch +37956 + EMH Healthcare + Tammy Smith + tsmith&emhrhs.org +37957 + Texas Regional Medical Center + Robert D. Reeves, Jr. + robert.reeves&trmcsunnyvale.com +37958 + http://www.sentinel-engine.org + Dusten Sobotta + dusten&sentinel-engine.org +37959 + paratio.com e.K. + Carsten Logemann + ldap¶tio.com +37960 + MINIADMIN + Christian Fohmann + post&miniadmin.de +37961 + COCUS AG + Alexander Janssen + ajanssen&cocus.com +37962 + Mindbender Technologies + Brent Irwin + brent&getmbt.com +37963 + Affirmed Networks, Inc. + Ming Yang + Ming_Yang&affirmednetworks.com +37964 + CTI Digital + Matt cockayne + m.cockayne&ctidigital.com +37965 + Altoona Regional Health System + Sean Murphy + smurphy&altoonaregional.org +37966 + Perth Linux Users Group + Timothy White + committee&plug.org.au +37967 + Webra JSC + Dmitry Ishutkin + spe&webra.ru +37968 + Elma Electronic GmbH + Timo Böhr + t.boehr&elma.de +37969 + OKSEI + Ivan Fedulov + ivanfedulov&gmail.com +37970 + Global Digital Inc. + Quest Chen + quest&gdnet.com.tw +37971 + EKILIA + Philippe Rongier + contact&ekilia.com +37972 + Dyalog Ltd + Andy Shiers + andys&dyalog.com +37973 + Mountain View Systems, LLC. + James Carrion + james_carrion&hotmail.com +37974 + Winthrop University Hospital + Dennis Rock + drock&winthrop.org +37975 + imessage.de + H.-Christian Schreiber + christian.schreiber&imessage.de +37976 + MetaFORM Bilisim Iletisim ve Danismanlik Ltd Sti + Zafer Genckaya + zafer&metaformltd.com +37977 + XTO Energy, Inc + Lee Scarborough + security_administrator&xtoenergy.com +37978 + TEB + MATHIEU Stéphane + smathieu&teb-online.com +37979 + Panaram Ltd + Garry Partington + garryp&panaram.com +37980 + Bayer Business Services GmbH + Ingo Wittenberg + ingo.wittenberg&bayer.com +37981 + Solution Concept s.r.o. + Ivo Jánský + ivo.jansky&solutionconcept.cz +37982 + Netstream AG + Alexis Caceda + alexis.caceda&netstream.ch +37983 + Nux Ltd. + Michal Charvat + michal.charvat&nux.cz +37984 + GsurfNET + Alessandro Pereira + cops&gsurfnet.com +37985 + CCDI + Qiu.haoqi + qiu.haoqi&ccdi.com.cn +37986 + mCarbon Tech innovation pvt. ltd. + Sushil Naresh + sushil&mcarbon.com +37987 + The Hong Kong Country Club + Robert S. HALL + rob.hall&countryclub.hk +37988 + Jack in the Box + Brent Irwin + brent.irwin&jackinthebox.com +37989 + FASTEK ITALIA S.R.L. + IGINO RUSSO + fastek&fastekitalia.it +37990 + Intellique + Emmanuel Florac + eflorac&intellique.com +37991 + Letechnic Ltd + Paul HEAYS + paul.heays&letechnic.com +37992 + SYSM Systemmanagement Service & Konzepte GmbH + Ralf Affeldt + ralf.affeldt&sysm.de +37993 + Hokkaido Telecommunication Network Co., Inc. + Hayaki Nakamoto + server-admins&hotnet.co.jp +37994 + i-free + Konstantin Egorov + egorov_k&i-free.com +37995 + Initiative für Netzfreiheit + Leonhard Preis + technik&netzfreiheit.org +37996 + KTS GmbH + Gregor Kotyrba + g.kotyrba&kts-systeme.de +37997 + 4G Technology + GRENIER Gilles + gilles.grenier&4g-technology.eu +37998 + Greenwood Leflore Hospital + Marvin Pate + mpate&glh.org +37999 + TransPacket AS + Morten Rolland + mortenro&transpacket.com +38000 + DELEC Audio- und Videotechnik GmbH + Georg Klug + Georg.Klug&delec.de +38001 + DIgitata LTD + Hugo Mokken + snmp&digitata.com +38002 + Innovise IES (formerly 'Innovise ESM Software Ltd.') + Jaco de Wet + jaco.dewet&innovise.com +38003 + StorMagic Ltd + Chris Farey + chris_farey&stormagic.com +38004 + Edenred SA + xiao hui loo + xiaohui.loo&edenred.com +38005 + REFLEXE Technologies + Christopher Brown + brown&reflexe.fr +38006 + Dietrich IT-Services + Daniel Dietrich + d.dietrich&dietrich-it.com +38007 + Handy Networks + Jon Aykroyd + jon&handynetworks.com +38008 + ELECOM S.A.S + Juan Manuel Franco Jácome + juanmfranco&gmail.com +38009 + FonSee Technology Inc. + Feng Yang, Chang + jeff_chang&fonsee.com +38010 + CE Labs + Srikanth Sherman + ssherman&celabs.net +38011 + NTTM (Name to Telephone Mapping) Limited + Emanuel Peri + pen&nttm.co.uk +38012 + USL, Inc. + Harold Hallikainen + harold&uslinc.com +38013 + Ampex Data Systems + Chris Douty + Chris_Douty&ex.com +38014 + NORICS GmbH + M. Glave + m.glave&norics.net +38015 + DUHA system s.r.o. + Pavel Brychta + pavel.brychta&duhasys.eu +38016 + Exicom Tele-Systems Limited + Ajay Kumar + kumar.ajay&exicom.in +38017 + SC EXIMTUR SRL + Mihai Limbasan + support&itsquad.ro +38018 + guideye.de + Peter Köhler + BigBoss19&web.de +38019 + Special Systems Engineering Center LLC + Maxim Goncharenko + max&ssec.ru +38020 + Franklin County Public Schools + John Vehmeier + john.vehmeier&frco.k12.va.us +38021 + Cfengine AS + Jon Henrik Bjornstad + contact&cfengine.com +38022 + HappyBooking UG (haftungsbeschränkt) + Daniel Boldura + info&happybooking.de +38023 + ultrachaos + Stefan Horst + stefan&ultrachaos.de +38024 + Logic Soft sas + Paolo Tarantola + paolo&logicsoft.it +38025 + MindArray Systems (P) Ltd + Alpesh Dhamelia + alpesh&mindarraysystems.com +38026 + ACOME + LUCAS Anne + anne.lucas&acome.fr +38027 + Ltd "InfoTechnoService" + Boris Stepchenko + boris&its.kiev.ua +38028 + DANTECH Limited + Daniel Houlbrooke + daniel&dantech.net.nz +38029 + University of South Florida + Eric Pierce + epierce&usf.edu +38030 + Conngame Co.Ltd. + Sailer Shen + tshen&conngame.com +38031 + NextGate + Edy Yang + helpdesk&nextgate.com +38032 + EFOLIA + Gabriel GIL + gabriel.gil&efolia.fr +38033 + TeamF1 + Madhusudhan Kovalmudi + madhu&teamf1.com +38034 + IHSE GmbH + Heiner Esseln + esseln&ihse.de +38035 + GS1 Sweden AB + Jeremy Morton + jm&gs1.se +38036 + ChronoPay B.V. + Valentine Zaitsev + v.zaitsev&chronopay.com +38037 + Ståle Hebæk Ødegården + Ståle Hebæk Ødegården + mail&hebaek.no +38038 + Informacijsko svetovanje, Miha Valenčič, s.p. + Miha Valenčič + miha.valencic&gmail.com +38039 + Klika d.o.o. + Miha Valenčič + miha.valencic&klika.si +38040 + Reliable System Services Corporation + Andrew Oldenburg + aoldenburg&rsscorp.org +38041 + Via Christi Health + Denise Johnson + Denise.Johnson&viachristi.org +38042 + Unassigned + Removed 2011-06-30 + ---none--- +38043 + Hear And See Systems OÜ + Imre Tabur + imre.tabur&eesti.ee +38044 + wu-way, inc. + Basit Mustafa + basit&wu-way.com +38045 + Shenzhen Winhap Commucations Inc. + HU ZHIQIANG + lieryou&gmail.com +38046 + Joint Stock Company UniCredit Bank + Maxim Ivlev + infosecurity&unicredit.ru +38047 + JSC STC Electron-Service + Alex Moskalenko + pen&elserv.ru +38048 + Institut des Sciences de l'Homme + DUSSURGET Frederic + fdussurget&ish-lyon.cnrs.fr +38049 + Zaehlwerk GbR + Gregor Giesen + iana&zaehlwerk.net +38050 + Lightbar Software Solutions LLC + Bart Stevens + bart&lightbar.org +38051 + Boy Scouts of America + David Gersting + dgersting&oa-c4.org +38052 + Attackplan Software + Charles Orford + iana-contact&attackplan.net +38053 + Avisit Solutions Limited + Arjen Visser + arjen.visser&avisit.co.nz +38054 + Arellia Corporation + Michael Sainsbury + msainsbury&arellia.com +38055 + Gforce Inc. + Bin LIU + binliu&greencache.com +38056 + Nason Hospital + Brian Lilly + blilly&nasonhospital.com +38057 + D&B Germany GmbH + Stephan Umbach + umbach&dnbgermany.de +38058 + Cyanide Studio + Bastien Semene + admin&cyanide-studio.com +38059 + Banking production center + Glukhova Marina + glukhova&bpcbt.com +38060 + Sempla Srl + Raniero Bonelli + it&sempla.it +38061 + Edistar Srl + Denis Gasparin + denis.gasparin&edistar.com +38062 + Marathon Oil Corporation + Rod Holmes + externaldomainmgt&marathonoil.com +38063 + Suomen Asiakastieto Oy + Jussi Saarinen + jussi.saarinen&asiakastieto.fi +38064 + SSL Corp + Leo Grove + leo.grove&ssl.com +38065 + Media Alliance, CJSC + Egor Vyscrebentsov + noc&medi-a.ru +38066 + SC Romastru Trading SRL + Mihai Limbasan + support&itsquad.ro +38067 + Cray Inc (formerly 'APPRO International, Inc.') + Jeremy Higdon + jhigdon&cray.com +38068 + Centrastate Medical Center, Inc. + Mark Handerhan + mhanderhan¢rastate.com +38069 + Cetis, Inc. + Brock Munsell + bmunsell&Cetisgroup.com +38070 + Anode + Paul C. Bryan + pbryan&anode.ca +38071 + MaximaTelecom JSC + Sergey Komarov + sk&maximatelecom.ru +38072 + Linear Photonics, LLC + Tim Naples + tnaples&lintech.com +38073 + Defense Photonics Group + Khurram Kazi + k.kazi&defensephotonics.com +38074 + Statropy Software LLC + Erik Larson + info&statropy.com +38075 + Sentaca Communications + Maciej Lopacinski + maciej.lopacinski&sentaca.com +38076 + Radical Creations + Łukasz Rżanek + lukasz.rzanek&radical.com.pl +38077 + JetPay, LLC + Zephaniah Loss-Cutler-Hull + sysadmin&jetpay.com +38078 + Nanjing Yxlink Information Technologies Co.,Ltd + Zhong Wei + zhongwei&yxlink.com +38079 + DEAC Medics S de RL de CV + Hugo Romero + hromero&deacmedics.com +38080 + Pick n Pay + Duncan Rae + drae&pnp.co.za +38081 + Rank Group + Kunal Sharma + itbackoffice&rankinteractive.com +38082 + HD Vietnam Co. + Nguyen Huyen Dieu + sau&hdvietnam.com.vn +38083 + SLG Broadcast AG + Michael Hofer + michael.hofer&slgbroadcast.com +38084 + Connecture, Inc. + Mike Cernik + mcernik&connecture.com +38085 + Solo Cup Company + Matthew Talaga + matthew.talaga&solocup.com +38086 + Kordia + Tony Halligan + noc.nz&kordia.co.nz +38087 + Real I.T. Technicians, LLC + Victor Centross + contact&realittechs.com +38088 + Joshua Tree Software, LLC + Shawn McKinney + shawn.mckinney&jtstools.com +38089 + Prometheus Research + Daniel Ferris + dferris&prometheusresearch.com +38090 + GreenMojito.org + Aaron Hinkle + aaron.hinkle&greenmojito.org +38091 + SolidFire, Inc. + Preston Wilson + preston&solidfire.com +38092 + EtherDVB Pty Ltd + Greg Wickham + gregðerdvb.com +38093 + wopoco.de + Administrator + rk907403&wopoco.de +38094 + Thales Optronics + Karl Farmer + karl.farmer&uk.thalesgroup.com +38095 + Cooperativa de Ensino Superior Politecnico e Universitario + Antonio Salgado + dsi&cespu.pt +38096 + CyclopusCAD s.r.l. + Francesco Fauci + info&cyclopuscad.com +38097 + Universidade Estadual de Goiás + Robson Cardoso Vieira + robsoncardoso.ti&gmail.com +38098 + Rostov region office of Federal service of state registration cadaster and cartography + Besstremyannyy Sergey + master&donjust.ru +38099 + Transglobal Secure Collaboration Participation Inc. (TSCP) + Keith Ward + support&tscp.org +38100 + CAMI Research Inc. + Erik Muench + erikm&camiresearch.com +38101 + Roalter International + Luis Roalter + luis&roalter.it +38102 + Szechenyi Istvan University + Bela Csabi + csabi&sze.hu +38103 + Dataport AöR + Sascha Graf + sascha.graf&dataport.de +38104 + ProFIX Co. + Oleksii Shcherbatiuk + Oleksii.Shcherbatiuk&profix.kiev.ua +38105 + Kit Digital + Edward Shnayder + edward.shnayder&kit-digital.com +38106 + Nebraska Furniture Mart, Inc. + Thomas Applebee + tom.applebee&nfm.com +38107 + Aviosys Inc. + Albert Liu + albert&aviosys.com +38108 + MicroXel Networks, Inc. + Fred Mo + fred.moµxel.com +38109 + Domtar + Marc Mathieu + marc.mathieu&domtar.com +38110 + Arkivum Ltd + Richard Lowe + Richard.Lowe&arkivum.com +38111 + SHENZHEN DINSTAR TECHNOLOGIES Co.,Ltd. + Jin Rong,Zhong + zjrtx530&126.com +38112 + Wuxi ZhongXun Technology Co.,ltd. + yedaoliang + yedaoliang&163.com +38113 + Red Lion Controls Inc. + Denis Aull + Engineering&RedLion.net +38114 + Merseytravel + Mark Evans + mark.evans&merseytravel.gov.uk +38115 + Marist College + Server Administrator + server.admins&marist.edu +38116 + ClearBUS + Jean-Marc Lefebvre + jean-marc.lefebvre&clearbus.fr +38117 + University College Dublin + John Curran + security&ucd.ie +38118 + BEI Electronics LLC + Ron Marks + pen&bdcast.com +38119 + Open Mobile Video Coalition + Anne Schelle + anne.schelle&omvc.org +38120 + UMass Memorial Medical Center + Lorena Mandozzi + lorena.mandozzi&umassmemorial.org +38121 + IOD Incorporated.com + Lenny Dexter + ldexter&iodincorporated.com +38122 + Red Wire Services, LLC + Nick Webb + nickw&redwireservices.com +38123 + kinkajougames + juan pablo gaviria + juanpablo&kinkajougames.com +38124 + Harmonic France (formerly 'Thomson Video Networks') + Patrick Gendron + patrick.gendron&harmonicinc.com +38125 + EuroDNS SA + Xavier Buck + icann&eurodns.com +38126 + COGITOP SAS + VIVERET Jean-Marie + jm.viveret&cogitop.fr +38127 + IES Doctor Balmis + Eduardo Barredo + eduardo&iesdoctorbalmis.com +38128 + Laurentian University + Marty Laferriere + mlaferriere&laurentian.ca +38129 + Akumen LTD + Istvan Csaky + csaky&akuware.com +38130 + Faith Regional Health Services + Kim Stinson + kstinson&frhs.org +38131 + RG Nets + Simon Lok + scl&rgnets.com +38132 + eTruhla s.r.o. + Vit Hnilica + info&etruhla.cz +38133 + Ingnitia http://www.ingnitia.com + Mikel Mugarza + mmugarza&ingnitia.com +38134 + National Document Custodians, LLC + Albert Wheeler + devadmin&documentcustodians.com +38135 + Shield Bridge Inc. + William Bai + wbai&thebais.com +38136 + In Touch Pharmaceuticals, Inc + Mitch Gaffigan + mitch&intouchpharma.com +38137 + Fusionskye + Wang Yong + wangyong&fusionskye.com +38138 + Hochschule Regensburg + Athanassios Tsakpinis + athanassios.tsakpinis&hs-regensburg.de +38139 + pro parity IT-Service + Dominik George + info&proparity.de +38140 + Imtech Marine + Pascual de la Cruz + pascual.delacruz&imtech.nl +38141 + Emich KsE - Kundenspezifische Elektronik + Marc Emich + info&emich-kse.de +38142 + BHE Bonn Hungary Electronics Ltd. + Gyula Mikó + gyula.miko&bhe-mw.eu +38143 + CloudBees, Inc. + Engineering Operations + operations&cloudbees.com +38144 + Displaydata Ltd (formerly 'ZBD Displays Limited') + Greg Beresford + greg.beresford&displaydata.com +38145 + ECG, Inc. + James Puckett + jpuckett&e-c-group.com +38146 + Access Now + Daniel Bryg + dbryg&yahoo.com +38147 + Mind Matters LLC + Deepa Abraham PhD + mobileofficepro&att.net +38148 + EMCALI E.I.C.E. E.S.P. + David Blandon Romaña + dblandon&emcali.net.co +38149 + Teknokala Ky + Timo Santasalo + timo.santasalo&teknokala.com +38150 + Altia Consultores S.A. + Luis Alberto Asturias Núñez + luis.asturias&altia.es +38151 + Invendis Technologies + Devashis Sahoo + devashis.sahoo&invendis.com +38152 + Bauerfeind AG + Daniel Scharf + daniel.scharf&bauerfeind.com +38153 + Dada S.p.A. + Fabio Coatti + sistemi&dada.eu +38154 + Autopart International, Inc. + James Russell + it&autopartintl.com +38155 + EXATEQ Ltd. + Mark Fidell + mark.fidell&exateq.com +38156 + Zepheira LLC + Zepheira PEN Contact + iana.pen&zepheira.com +38157 + Policìa Nacional de Colombìa + Natalia Andrea beltran + natalia.beltran&policia.gov.co +38158 + Open Platform Trust Services + Seiji Munetoh + seiji.munetoh&gmail.com +38159 + Unity Global Corporation + Pamela Turner + turner&unityglobalcorporation.com +38160 + Datto Inc. + Dan Fuhry + dfuhry&datto.com +38161 + W.W. Grainger, Inc. + Eleazar Flores + eleazar.flores&grainger.com +38162 + BNTPRO BILGI ve ILETISIM HIZ. LTD. STI. + Fatih BATUM + fatih&bntpro.com +38163 + Kiesel GmbH + Andrease Imhof + it&kiesel.net +38164 + Ticket Monster Inc + Doo Han Yoon + sys&tmon.co.kr +38165 + Kohlpharma GmbH + Imonikhe Imonah + serveradminwin&kohlpharma.com +38166 + nexurium.fr + Joly Bastien + joly.bastien&gmail.com +38167 + Albrecht Jung GmbH & Co. KG + André Paul + iana_admin&jung.de +38168 + The Taubman Company + Quang Nguyen + qnguyen&taubman.com +38169 + Life Image Inc + Hai Wang + hwang&lifeimage.com +38170 + LocalNet Corp. + Ed Szynaka + ln_eds&localnet.com +38171 + fischerwerke GmbH & Co. KG + Johannes Klumpp + admin-c&fischer.de +38172 + CustosMobile + Carlos Blanco + carlos.blanco&custosmobile.com +38173 + Adminia Sistemas + Javier Martín + sistemas&adminia.es +38174 + Hamilton Medical Center + Chad White + cwhite&hhcs.org +38175 + Dali Wireless, Inc. + Albert S. Lee + lee&daliwireless.com +38176 + Marken Mehrwert AG + Hajo Kliemeck + klk&mmw.ag +38177 + iCue + Ashutosh Vighne + avighne&icue.biz +38178 + Skytide, Inc + Laxman Chinnakotla + lchinnakotla&skytide.com +38179 + Bard College + David Brangaitis + dbrangaitis&bard.edu +38180 + Kangaroo Media Inc. + Pedro Costa + pcosta&fanvision.com +38181 + Trinity Health + Edward Trail + traile&trinity-health.org +38182 + CUBE. Corporate Release SA + Kamil Kurzyk + kkurzyk&cube-cr.pl +38183 + Pythagore FD + Frédéric Helmer + f.helmer&pythagore-fd.fr +38184 + Heidelberg Mobil International GmbH + Christian Zimmermann + christian.zimmermann&hdm-i.com +38185 + Detailresult Groep + J. van Leeuwen + jvleeuwen&detailresult.nl +38186 + Xpandia Ltd. + Ian S Gordon + ian&xpandia.co.uk +38187 + Weidmüller Interface GmbH & Co. KG + Sebastian Wolf + sebastian.wolf&weidmueller.de +38188 + StormMQ Limited + Raphael Cohn + raphael.cohn&stormmq.com +38189 + Telemedia LTD + Tamas Kanai + tamas.kanai&telemedia.hu +38190 + Magna + Tho Nguyen + Tho.Nguyen&magna.com +38191 + Memjet, Inc. + Bob Woods + bob.woods&memjet.com +38192 + Gravity R&D Limited + Gabor Vincze + administration&gravityrd.com +38193 + Yuduan Mobile Co., Ltd. + Xin Chen + hhuang&yuduanmobile.com +38194 + Naga Concept + Jean-Marc Houbiers + jmh&nagaconcept.com +38195 + Gameservers.com + Brian Rak + brak&gameservers.com +38196 + Flagship Facility Services + Jasmeet Sidhu + sidhu.j&gmail.com +38197 + Lacuna Systems, Inc. + Derek Andree + dandree&lacunasystems.com +38198 + Rincon Research Corporation + John Forinash + jmf&rincon.com +38199 + Fraser Talbot Consulting Services; L.L.C + Fraser Talbot + fraserhtalbot&ft-cs.com +38200 + Libéo + Jean-François Rousseau + info&libeo.com +38201 + Ronald Hummelink + Ronald Hummelink + ronald&hummelink.net +38202 + NETILITY GmbH & Co. KG + Arno Schenk + hostmaster&netility.de +38203 + Interactive Systems Engineering SRL + Aurel Chiriac + aurel.chiriac&intersystems.ro +38204 + Six DEE Telecom Solutions Pvt. Ltd. + Kenil Paul + kenil&6dtech.co.in +38205 + University Teleinformatic Center - UTIC, University of Sarajevo + Enes Halilovic + dnstech&utic.ba +38206 + Eloquera + Dmytro Bablinyuk + dmytro&eloquera.com +38207 + Fujian JinQianMao Electronic Technology Co.,Ltd. + ROY HUANG + jqm&fjjqm.com +38208 + meaganharris.net + Meagan Harris + meagan&meaganharris.net +38209 + VMC Systems Ltd. + Abhijit Bhalekar + abhijit.b&vmcindia.com +38210 + Xovis AG + Markus Herrli + markus.herrli&xovis.com +38211 + JSC 'Siberian Coal Energy Company' + Pashkov Sergey + Pashkov&suek.ru +38212 + Derichebourg + Nikic Milan + hotline&derichebourg.com +38213 + ACCENT-ELECTRONIC + Alexander Mashtakov + amashtakov&accent.md +38214 + Vlatacom Institute + Zoran Crnobrnja + ict-team&vlatacom.com +38215 + Hochschule Hamm-Lippstadt - University of Applied Sciences + Michael Steuter + michael.steuter&hshl.de +38216 + Blumenthal S.R.L. + Gion Blumenthal + gion&blumenthal.it +38217 + gigatec GmbH + Stephan A. Klein + servicedesk&gigatec.de +38218 + Designer Systems Ltd + David Ingleby-Oddy + david&designersystems.co.uk +38219 + Therap Services, LLC + Md. Nazrul Islam + nazrul&therapservices.net +38220 + Private Unitary Enterprise on Rendering Services "Greenwall Systems" (formerly 'IZAO Greenwall Systems') + Mikhail Kuznetsov + info&greenwall.by +38221 + IP Gorshkov Yuri Valerevich + Yuri V. Gorshkov + yvgorshkov&gmail.com +38222 + Philips North America LLC + Mike King + mike.king&philips.com +38223 + definition six, inc. + Chris Hecker + support&d6.com +38224 + Caromont Health + Mike Johnson + johnsonm&caromonthealth.org +38225 + Encell Technology, Inc. + Gordon Hwang + ghwang&encell.com +38226 + IDnow SAS + Marc NORLAIN + marc.norlain&idnow.io +38227 + Wellington and King, Inc. + Charle Demers + cdemers&wellingtonking.com +38228 + PICA GmbH + Moritz Maier + moritz.maier&pica.de +38229 + GIVC + Eugene Sokolov + sokolov&givc.ru +38230 + Kostroma State Technological University + Serge Beletskiy + bs&kstu.edu.ru +38231 + Lycée Sainte Marie du Port + tedesco nicolas + ntedesco&lpegt-smp.fr +38232 + Onondaga Community College + Keith Conger + sysadmin&sunyocc.edu +38233 + ARCHE Systeme GmbH + Peter Weinstock + peter.weinstock&arche.de +38234 + TESSCO Technologies Incorporated + Michael Kaegler + kaeglerm&tessco.com +38235 + ENTERSEKT (PTY) LTD + Chris Kistner + chris&entersekt.com +38236 + Colsys s.r.o. + Jiri Merth + merth&colsys.cz +38237 + Elverson Rod and Gun Club + Doug Pratt + doug&elversonrodandgunclub.com +38238 + karnal talat private limited + jahangir hussain + hussain.jahangir61&gmail.com +38239 + OARC, Inc. + Geoffrey Sisson + geoff&dns-oarc.net +38240 + Butte-Glenn Community College District + Christian Dyckman + noc&butte.edu +38241 + Hanlong Technology Co.,Ltd. + Navy Ding + navy&hanlongtek.com +38242 + Shenzhen 33e9 E-Business Co., Ltd + zengweizhi + zengweizhi&33e9.com +38243 + Thecus Thecnology Corp. + Bonny Lin + bonny_lin&thecus.com +38244 + TOKYO RADAR CORPORATION + Masayuki Iwasawa + iwasawa&radar.co.jp +38245 + MIBUC-NET + Michael Buchmann + info&mibuc-net.de +38246 + FSC International Center GmbH + Andre de Freitas + fsc&fsc.org +38247 + Charleston Area Medical Center, Inc. + Bradley B. Young, Chief Technology Officer + brad.young&camc.org +38248 + Nebraska Heart Hospital + Oliver Banta + SoarianProjectSupport&neheart.com +38249 + Niagara Health System + Patrick Radics + licensing&niagarahealth.on.ca +38250 + Proventa AG + Clemens Bergmann + it-admins&proventa.de +38251 + Zmanda, Inc. + Ramesh Gupta + ramesh&zmanda.com +38252 + EOSSOnline Limited + Steven McArdle + Smcardle&eossonline.com +38253 + Athenta Technologies (P) Ltd., India + Jacob A Thomas + jacob.thomas&athenta.com +38254 + Resolan + Julien Francois + admin&resolan.com +38255 + Khalid Shukri IT-Beratung + Khalid Shukri + khalid.shukri&web.de +38256 + ItsOn, Inc. + David Johnson + drj&itsoninc.com +38257 + nuinno + Stefan Nunninger + stefan.nunninger&nuinno.de +38258 + WhipTail Technologies + James Candelaria + jc&whiptailtech.com +38259 + ADD-Technology SRL + Serghei Druta + Serghei.Druta&addgrup.com +38260 + 4SMS ApS + Josip Djuricic + jd&4sms.dk +38261 + Brightstone + Aleksander Adamowski + aleksander.adamowski&gmail.com +38262 + Boxcar Press, Inc + Brian Pribis + brian&boxcarpress.com +38263 + Thomas Memorial Hospital + Charles Covert + Charlie.Covert&thomaswv.org +38264 + Midwest Health Systems Data Center + Bob Lies + rlies&mhsdc.com +38265 + Intendencia de Canelones + Carlos Pirez + carlos.pirez&imcanelones.gub.uy +38266 + Chinese Bible Church of Greater Boston + Patrick Y. Lin + plin&cbcgb.org +38267 + UTS Programmers' Society + Tomislav Bozic + admin&progsoc.org +38268 + Threeline LLC + Alexander Orlov + aorlov&trxline.ru +38269 + rrbone + Dominik Bay + db&rrbone.net +38270 + inTaligent Digital Consultants + Charles A. Moorman + cam&intaligent.net +38271 + inTaligent Digital Consultants + Charles A. Moorman + cam&intaligent.net +38272 + Liberti + Rokhim Purboyono + rokhim.purboyono&liberti.co.id +38273 + AvePoint, Inc. + James Zhu + James.Zhu&avepoint.com +38274 + Safirion GmbH + Joerg Franke + franke&safirion.de +38275 + 3-IK + Matias D. Banchoff T. + matias&3-ik.com.ar +38276 + CipSoft GmbH + Thomas Kraeuter + techsupport&cipsoft.com +38277 + r-tec IT Systeme GmbH + Dr. Stefan Rummenhoeller + rummen&r-tec.net +38278 + Memorial Hospital of South Bend + Erek Sherwood + esherwood&memorialsb.org +38279 + Pilot Corporation of America + Rick Siciliano + OID-Admin-PCA&pilotpen.com +38280 + Millennium Information Technologies (Private) Limited + Rahal Jayawardena + rahal&millenniumit.com +38281 + 1st Setup + Michel Verbraak + info&1st-setup.nl +38282 + Italtelec International S.r.l. + Giacomo Burdi + swdepth&italint.com +38283 + MobiWork LLC + Herve RIVERE + hrivere&mobiwork.com +38284 + PacketAccess + Kevin Robinson + krobinson&goldtelecom.com +38285 + Live-PA Ltd + Paul Bentley + paul.bentley&live-pa.com +38286 + pki24.com + Stephan Callsen + info&callsen.org +38287 + D-TACQ Solutions Ltd + Peter Milne + peter.milne&d-tacq.com +38288 + Swiss public broadcasting organisation (SRG SSR) + Luciano Ramagnano + luciano.ramagnano&srgssr.ch +38289 + LeetGeek Pty Ltd + Ben Corbett + ben&leetgeek.com.au +38290 + IntelliVoice Co,Ltd. + Akiko Izawa + izawa&intellivoice.co.jp +38291 + G Data Software AG + Hendrik Flierman + Hendrik.Flierman&gdata.de +38292 + Damall Technologies Sas + Silvia Fioranelli + info&damallgroup.com +38293 + Dynamic Technical Solutions Inc. + Shawn May + shawn&d-t-s-inc.com +38294 + IOMachine LLC + Nilanjan Bhowmik + billing&iomachine.com +38295 + Pro Broadband Inc. + Xinkai Wang + xkwang&pbicn.com +38296 + Multi Sinergi Infrastruktur, PT + Trijaya Danisaputra + trijaya_danisaputra&multisinergi.co.id +38297 + ComMedia, Lda + Antonio Paneiro + info&commedia.pt +38298 + DaGo Solutions + Dave Goldstraw + technical&dago.co.uk +38299 + Dillard's Inc + Shawn Thex + Shawn.Thex&Dillards.com +38300 + GrubHub + Devon Mackay + systems&grubhub.com +38301 + hosthis.org + Jonathan THIBERVILLE + jonathan.thiberville&gmail.com +38302 + ITSUMO s.c. + Szymon Kowalczyk + pkit&itsumo.pl +38303 + Pask + Thien Pask + Tepask&gmail.com +38304 + CrossImaging Inc. + Hisayuki Naitoh + naitoh&crossimaging.jp +38305 + LybreDyne Technologies + Christopher Dobbs + crdobbs&lybredyne.net +38306 + MASIBUS AUTOMATION AND INSTRUMENTATION PVT LTD + Sumitkumar Nagar + sumitnagar&masibus.com +38307 + OIS-Net + Juerg Oehler + juerg.oehler&ois-net.org +38308 + Joint Stock Company «Banking & Financial Network» + Kovalyov Andrey + a_kovalyov&bfn.by +38309 + Cirquent GmbH + Hermann Baier + Hermann.Baier&cirquent.de +38310 + PREMIER Bankcard, LLC + Michael Baker + Michael.Baker&premierbankcard.com +38311 + OPTIM'HEXA + Jean-Charles Lopez + jc.lopez&optimexa.com +38312 + CareView Communications + Matt Clark + mclark&care-view.com +38313 + STG (Stargazer) + Maxim Mamontov + faust&stg.dp.ua +38314 + adjectivism.org + Erick Turnquist + jhujhiti&adjectivism.org +38315 + National Land Survey of Iceland + Samúel Jón Gunnarsson + sysadmin&lmi.is +38316 + SEKONIC CORPORATION + HIROSHI TAKEI + h.takei&sekonic.co.jp +38317 + JR East Mechatronics Co.Ltd. + Kiyotaka Suzuki + suzuki-kiyotaka&jrem.co.jp +38318 + Ing. Buero Fischer GmbH + Bernhard Fischer + fischerb&fischer-ing.de +38319 + Lantiq Deutschland GmbH + Olaf Wachendorf + olaf.wachendorf&lantiq.com +38320 + Bubblephone Ltd + Johnathan Turrall + john.t&bubblephone.com +38321 + Greatnet.de OHG + Stephan Sedlmeier + ss&greatnet.de +38322 + Strangeloop Networks + Alex McCubbin + alex.mccubbin&strangeloopnetworks.com +38323 + Stuart McGraw + Stuart McGraw + smcg4191&mtneva.com +38324 + emagsoftware + dingrong + dingrong&emagsoftware.cn +38325 + Precogen, LLC + Brian McKenna + oid&precogen.com +38326 + nepda + Nepomuk Frädrich + webmaster&nepda.de +38327 + Vokzal-infokom + Ivoilovsky Mikhail + mivoilovsky&yandex.ru +38328 + LvlUp e.V. + Olaf Rühenbeck + some.individuum&gmail.com +38329 + Three Laws of Mobility + Sridhar Venkatakrishnan + sridhar&3lm.com +38330 + Datametrix AS + Glenn Andersson + glenn&datametrix.no +38331 + Skeed Co., Ltd. + YANAGISAWA, Kentaro + k-yanagisawa&skeed.co.jp +38332 + LeonIT GbR + Maximilian Thoma + info&leonit.net +38333 + Network Solution Technologies Co., Ltd. + Minyoung Lee + mylee&nstco.co.kr +38334 + ArchOne + Roman Sergey + rs&archone.org +38335 + AVIN Networks Private Limited + Pankaj Kumar Roy + pankaj_roy&avinnetworks.com +38336 + Platte Valley Medical Center + Darrell Messsersmith + dmessersmith&pvmc.org +38337 + Power Instruments Pte Ltd. + Wang Yetao + wangyt&powerinstruments.com.sg +38338 + Suntel Ltd + Arasaratnam Umakanthan + UmakanthanA&suntel.lk +38339 + Centrum Informatyki Statystycznej + Roman Mendaluk + r.mendaluk&stat.gov.pl +38340 + Broadcast Partners + Yves Vermeersch + support&broadcastpartners.nl +38341 + AGETO Innovation GmbH + Christian Kahlo + c.kahlo&ageto.net +38342 + City of Hope + Jon Sharp + jsharp&coh.org +38343 + Wilcom Technologies + Qizhi Yi + yiqizhi&wilcom.com.cn +38344 + Maxwell Worthington + Mark Madere + mark&maxwellworthington.com +38345 + S&T Ukraine + Iurii Chernichenko + iurii.chernichenko&snt.ua +38346 + Pallas GmbH + Stephan Sachweh + helpdesk&pallas.com +38347 + Ing. Günther Vlaschits + Ing. Günther Vlaschits + office&vlaschits.at +38348 + ISDEFE Ingenieria de Sistemas para la Defensa de España S.A. + Carlos Manuel Gonzalez Rodriguez + cmgonzalez&isdefe.es +38349 + Firstcom A/S + Jon Schøpzinsky + jos&firstcom.dk +38350 + XtreemFS project + Michael Berlin + berlin&zib.de +38351 + net-lab GmbH + Andreas John + internet&net-lab.net +38352 + National Information Technology Center + Nabeel Al-Fayoumi + generaldirector&pki.jo +38353 + Weebly, Inc. + Network Operations + netops&weebly.com +38354 + Princeton Consultants + Attila Mihalyi + amihalyi&princeton.com +38355 + TTTech Computertechnik AG + Dominique Riezler + dominique.riezler&tttech.com +38356 + Mike Becker + Mike Becker + mike.becker&redpeppersworld.de +38357 + ActualMed + Rafael Forcada + rafael.forcada&actualmed.com +38358 + Peak Reliability (formerly 'Western Electricity Coordinating Council') + Lyonell Keplar + lkeplar&peakrc.com +38359 + Meissner + Oliver Meissner + oliver-sd94&la-familia-grande.de +38360 + Delled Company Ltd + Delmiro Paes + delmiro&delled.com.br +38361 + Exabre Ltd + Chris Jupp + chris.jupp&exabre.com +38362 + alpha dot net Australia + James Ponza + jponza&alpha.net.au +38363 + PT Primatama Sentra Solusi + Martin Otterman + otterman&primatama.net +38364 + Netsis Technology + Edgardo Gonzales II + edgardo.gonzales&netsis.com.sg +38365 + BKW Management AG + Abidin Vejseli + abidin.vejseli&bkw.ch +38366 + Joint-Stock Bank "Master-Bank" + Aleksey Jastrebcev + snmpmaster&masterbank.ru +38367 + KIS Information Services GmbH + Joachim Kaule + joachim.kaule&kis-is.com +38368 + Radimetrics Inc. + John Adziovsky + john&radimetrics.com +38369 + ZiXi, LLC + Igor Ratner + igor&zixi.us +38370 + jetNEXUS Limited + Greg Howett + ghowett&jetnexus.com +38371 + PT. Cahaya Barumas Sejahtera + Rudiman + rudiman&cbsindustry.com +38372 + PT INPAR SAKA + Ahmad Hidayat + ahmad&inparsaka.co.id +38373 + Greenville Hospital System University Medical Center + Jerry L. Cox, MISM, CPHIMS + jcox&ghs.org +38374 + Cloudera + Matt Massie + matt&cloudera.com +38375 + Linus van Geuns + Linus van Geuns + linus&vangeuns.name +38376 + The Chester County Hospital + Ray Hess + rhess&cchosp.com +38377 + Tohojo ApS + Toke Høiland-Jørgensen + toke&tohojo.dk +38378 + FeedHenry Ltd. + James Mernin + iana.ops&feedhenry.com +38379 + Inter Media Group + Denys Vernychenko + vdv&inter.ua +38380 + AMG.net S.A. + Marek Strejczek + marek.strejczek&amg.net.pl +38381 + Unetvale Servicos e Equipamentos Ltda. EPP + Rafael Domingues + noc&unetvale.net.br +38382 + znt Zentren für Neue Technologien GmbH + Radu Mihail + m.radu&znt.de +38383 + WorldCare Clinical LLC + Patrick Chokron + pchokron&wcclinical.com +38384 + DBAPP Security + Jiang Junyong + junyong.jiang&dbappsecurity.com.cn +38385 + Svyazcom Ltd. + Ivan Zubkov + zubkov&svyazcom.ru +38386 + Coop Denmark A/S + Peter Dahl Vestergaard + peterdv&coop.dk +38387 + Smart Design sprl + Bruno Dagnelie + bdagnelie&smartdesign.be +38388 + HANGZHOU LAYBEL ELECTRONIC TECHNOLOGY CO,. LTD + wuyong + wuyong&laybel.com.cn +38389 + Verisec AB + Tony Buss + tony.buss&verisec.com +38390 + Railinc Corporation + Chad Boos + rcisecurity&railinc.com +38391 + iroïd + Tom Wagner + dev&iroid.net +38392 + University of Dayton + Steven Smith + smithstm&udayton.edu +38393 + SJX Soft + Simon J Xuereb + sjxuereb&gmail.com +38394 + Interfibra Telecomunicaciones, S.L. (formerly 'Teleyecla, S.L.') + Francisco José Bernal Fernández + fj.bernal&interfibra.es +38395 + Phillips Foods, Inc. + Bradley H. Grant + bgrant&phillipsfoods.com +38396 + Flinders Christian Community College Inc + David Tredinnick + david.tredinnick&flinders.vic.edu.au +38397 + TSB - Tiefdruck Schwann-Bagel GmbH & Co.KG + Mr. Soufian al Maziani + soufian.almaziani&tsb.de +38398 + AO TD BOVID + Vidgof Mikhail Borisovich + sn&bovid.ru +38399 + Kloeckner & Co SE + Ruediger Pohl + coit&kloeckner.de +38400 + nfon AG + Jan-Peter Koopmann + technik&nfon.net +38401 + Veridian Solutions Pty. Ltd + Michael Junek + michael.junek&veridian.com.au +38402 + C.I. Centro de Informacoes + Jailson Lessa Marques + jailson&infonet.com.br +38403 + vacc.ch + Hanspeter RUTSCHMANN + hp&vacc.ch +38404 + Maintainet AG + Uwe Carsten Krause + ukrause&maintainet.de +38405 + Autotalks + Alex Reicher + alex.reicher&auto-talks.com +38406 + Roscom Ltd + Harry Sheppard + it&roscom.co.uk +38407 + Chesapeake Ltd + Andrew Hardy + andrew.hardy&chesapeakecorp.com +38408 + OKIT GmbH + Olaf Krause + iana_ac&okit.de +38409 + CargoSoft LLC + Alexey Gerasimenko + admin&cargosoft.ru +38410 + Raikosoft GmbH + Joachim Kluemper + joachim.kluemper&raikosoft.com +38411 + Knology + Paul Schmidt + paul.schmidt&knology.com +38412 + Fun & RelaX GmbH + Eckart Uhlig + eckart_uhlig&hotmail.com +38413 + Lodgeistics Inc. + Justin Kaufman + jkaufman&lodgeistics.net +38414 + FusionDirectory + Benoit Mortier + contact&fusiondirectory.org +38415 + Clavius-Gymnasium Bamberg + Wolfgang Faltin + it&srv.cg.bamberg.de +38416 + Nordic ID Oy + Nikolai Raita + nikolai.raita&nordicid.com +38417 + Kybeire Ltd + John Eire + kybeire&gmail.com +38418 + UFMA - Universidade Federal do Maranhão + Diego Figueiredo + diegolcf&ufma.br +38419 + Rauland Australia Pty Ltd + Mathew McKernan + matmckernan&rauland.com.au +38420 + Yottaa Inc + Jason Parsons + noc&yottaa.com +38421 + ALL-RTP + Mohamed LALMI + lalmi&all-rtp.com +38422 + Tesync Technology Limited + Rahul Choudhary + rahul.choudhary&tesync.com +38423 + PBeaKK + Thomas Fillips + thomas.fillips&pbeakk.de +38424 + BalTstamp + Sandra Kocetkova + sandra.kocetkova&baltstamp.lt +38425 + Private University College of Education of the Diocese of Linz + Wolfgang Uebermasser + wolfgang.uebermasser&dioezese-linz.at +38426 + State company "Russian highways" + Danil Tolkalin + D.Tolkalin&russianhighways.ru +38427 + Genève Aéroport + Jerôme BOCH + it&gva.ch +38428 + SlimPay + Jan Tapuska + jan.tapuska&slimpay.net +38429 + Dryden Municipal Telephone System + Adam Thompson + athompso&dmts.biz +38430 + Irell & Manella LLP + David James + djames&irell.com +38431 + Communications & Power Industries, Satcom Division + John Overstreet + john.overstreet&cpii.com +38432 + Oberon, Inc. + Travis J. Weaver + tjw&oberonwireless.com +38433 + NetAxis Solutions + Raphael Benedet + management&netaxis.be +38434 + OpenCSI + Bruno Bonfils + bbonfils&opencsi.com +38435 + IQgroup + Daniel Abegglen + iana&iqgroup.ch +38436 + ReiniNET + Patrick Reinhart + patrick&reini.net +38437 + Vlastimil Kupsky + Vlastimil Kupsky + KupskyV&seznam.cz +38438 + GTwins + shuaiwang + wangs>wins.cn +38439 + Aurenz GmbH + Andreas Sander + a.sander&aurenz.de +38440 + HEROLD Business Data GmbH + Herold Operating + operating&herold.at +38441 + Blue-Sphere + Marro Pasquale + pasquale.marro&gmail.com +38442 + INRADIOS GmbH + Steffen Bittner + steffen.bittner&inradios.com +38443 + OrbiConnect GmbH & Co. KG + Benjamin Biel + oid&orbiconnect.de +38444 + Holiday Extras Limited + Kieran McGettrick + kieran.mcgettrick&holidayextras.com +38445 + VMC + Trevor Seward + trevorse&vmc.com +38446 + Enlogic Systems, LLC + Joel Greene + joel.greene&enlogic.com +38447 + Ing. Viliam Oršula - iElectro + Viliam Oršula + ielectro&ielectro.eu +38448 + NOLOGIN CONSULTING SL + Richard Rojas + richard.rojas&nologin.com.bo +38449 + Nationwide Building Society + AD Management Committee + admcmail&nationwide.co.uk +38450 + DediPower Managed Hosting Ltd. + David Joy + support&dedipower.com +38451 + LanderNet Inc. + Isak Adde + addeisak&hotmail.com +38452 + QQ2 + Harry Sheppard + hcs&qq2.net +38453 + Indigo Dynamic Networks, LLC + Adam Rich + support&indigodynamic.com +38454 + Adpuntum GmbH + DI Mandl Alexander + alexander.mandl&adpunctum.com +38455 + Cyberware Inc. + Takafumi Hiraka + hiraka&exm.cyberware.co.jp +38456 + IC "ISKRA" Ltd. + Prodayvoda Vitaly + Prodayvoda_V&uckpa.ru +38457 + ecvision + Billy Chan + billychan&ecvision.com +38458 + Nachtwacht + W.P. Snel + walters&nachtwacht.org +38459 + SBIT GmbH + Mark Becker + mark.becker&sbit-gmbh.de +38460 + Gemtek Technology Co., Ltd. + Masu Lin + masu_lin&gemtek.com.tw +38461 + ragenet llc + Dan Theisen + admin&ragenet.us +38462 + Pt Magnar Nugraha + Wawan Wiratno + wawan_wiratno&yahoo.com +38463 + Data Service Integration + DESSAI Imrane + dessai.imrane&dsi-oi.com +38464 + Snoerf.org + Veli-Matti Lammentausta + velkka&snoerf.org +38465 + Avalcom Sp. z o.o. + Jacek Bilski + jacek.bilski&avalcom.pl +38466 + Max-Planck-Institut für Molekulare Genetik + Donald Buczek + buczek&molgen.mpg.de +38467 + Carl Edelmann GmbH + Frank Maurer + f.maurer&edelmann.de +38468 + Garage Center for Contemporary Culture + Alexey Bykov + gd&gccc.ru +38469 + Axtion Sdn Bhd + Lam Chang Jium + cjlam&axtion.com.my +38470 + Bison Schweiz AG + Patrick Reinhart + patrick.reinhart&bison-group.com +38471 + Multimediakontor Hamburg GmbH + Uwe Carsten Krause + ukrause&maintainet.de +38472 + Kevin Mullet + Kevin Mullet + kwm&themullets.net +38473 + Ellis Hospital + Derek Dzimiera + dzimierad&ellismedicine.org +38474 + Blackridge Technology + Charles M. Tellechea, Jr. + ctellechea&blackridge.us +38475 + Global SATCOM Technology, Inc. + Cecil Lo + cecil.lo&globalsatcom.com +38476 + prosozial GmbH + Holger Schmidt + itinfo&prosozial.de +38477 + Antaira Technologies, LLC. (formerly 'Aaxeon Technologies, LLC.') + Peter Szyszko + peter.szyszko&antaira.com +38478 + VerticalCue Design LLC + John Vrbanac + info&verticalcue.com +38479 + Seluxit + Daniel Lux + daniel&seluxit.com +38480 + ABPM + Marek Ciszynski + marekc&abpm.com.pl +38481 + M Financial Group + Preston Schram + preston.schram&mfin.com +38482 + MetaStack Solutions Ltd. + David Allsopp + administration&metastack.com +38483 + netmonitor + Sandra Puffing + office&netmonitor.at +38484 + Innerdive Solutions, LLC. + Victor Fedoseev + vvf&innerdive.com +38485 + Westchester Digestive Disease Group LLP + Dr. Peter Wayne + drpwayne&gmail.com +38486 + Eurotec Information Systems K.K. + Hideo GOTO + gotoh&eis.co.jp +38487 + Quadro Systems + Aleksander Protsenko + support&quadrosystems.com +38488 + Exceptional Innovation, LLC. + Geoffrey Carr + gcarr&life-ware.com +38489 + WD-40 Company + Matthew DiSabatino + mdisabatino&wd40.com +38490 + Nebula, Inc. + Devin Tres + iana&nebula.com +38491 + Clean Communications Ltd + Ralph Casey + rcasey&theemaillaundry.com +38492 + WorldmingoSolutions + Tai Mingo + tmingo&worldmingosolutions.com +38493 + VPT + Claudio tapia + claudio.tapia&vptsa.cl +38494 + CLINTWORLD GmbH + Florian Albrecht + florian.albrecht&clintworld.de +38495 + PERTIMM + Ludovic GENOT + alias.iana&pertimm.com +38496 + Digg, Inc. + Warren Turkal + wt&digg.com +38497 + Collins Systems + Ralph Collins + ralph.collins1&gmail.com +38498 + Georgia-Cumberland Conference + Andrew LaPierre + andrew&gccsda.com +38499 + Versadial Solutions + Gene Shennikov + ianaoid&versadial.com +38500 + CareTech AB + Magnus Andersson + itenheten&caretech.se +38501 + Fundação Parque Tecnológico Itaipu + Rodrigo Renie de Braga Pinto + rodrigo&pti.org.br +38502 + BizNet Corporación Costa Rica + Harvey Villalobos León + harvey&biznetcorp.net +38503 + Advtal52 + Mariano Rodrigues Lopez + mariano&advtal52.com.ar +38504 + Skyworth + Cai Zhongpeng + caizhongpeng&skyworth.com +38505 + OJSC OC ROSNEFT + Vitaly A. Sergienko + v_sergienko&rosneft.ru +38506 + Plus Pack A/S + Martin B. Eskesen + ianaoid&pluspack.com +38507 + Depalol Gestió i Multimèdia + Eduard de Palol Pujadas + info&depalol.net +38508 + BW Broadcast Ltd + Brad Plant + info&bwbroadcast.com +38509 + DNS:NET Internet Service GmbH + Marlon Berlin + marlon.berlin&dns-net.de +38510 + Colorcon, Inc. + Jim Cassella + jcassella&colorcon.com +38511 + Toyota Motor Corporation Australia + Ashish Oliver + ashish.oliver&toyota.com.au +38512 + St Mary's College + Alex Matthews + ams&stmaryscollege.co.uk +38513 + PJSC Myronivsky Hliboproduct + Yuriy Molchanov + admin&mhp.com.ua +38514 + Fr. Sauter AG + Fritz Sauter + dnsmaster&ch.sauter-bc.com +38515 + IXC + Mykhaylo Yehorov + noc&ixc.ua +38516 + Allworx + Mark Nenni + MNenni&allworx.com +38517 + St Joseph Healthcare + Kelly Robinson + kelly.robinson&sjhhealth.com +38518 + ATMA Tecnologia Ltda. + Carlos Roberto Araujo Pinto Jr. + carlos&atmatecnologia.com.br +38519 + DeltaVision Sp. Z o.o. + Krzysztof Magosa + krzysztof.m&deltavision.pl +38520 + Semmle Ltd. + Neil Ongkingco + neil&semmle.com +38521 + Občanské sdružení Ubuntu pro Českou republiku + Ondřej Surý + admin&ubuntu.cz +38522 + Zhejiang Post + Yongsheng Fu + 106126215&qq.com +38523 + Samimrayaneh + Kaveh Fareghi + fareghi&samimrayaneh.com +38524 + StorageCraft Technology Corporation + Matt Thompson-Moltzen + matttm&storagecraft.com.au +38525 + City of Windhoek + Jacques Frye + jcf&windhoekcc.org.na +38526 + Paul Davison (consulting) + Paul Davison + pd&pauldavison.com +38527 + GeekSoc + Andrew Smillie + president&geeksoc.org +38528 + PIKA Technologies + David Clarke + david.clarke&pikatech.com +38529 + AKA Computer Solution Inc. + Matthew Draper + mdraper&akacs.com +38530 + Informationsdesign AG + Thomas Brunko + ssladmin&informationsdesign.de +38531 + ICF International + Mike Rankin + mrankin&icfi.com +38532 + GroupScape.com + Christopher Barker + saddlebear&hotmail.com +38533 + ElphoTech + Aliaksandr Hutar + athlete0&gmail.com +38534 + L-ACOUSTICS + PIGNON Christophe + christophe.pignon&l-acoustics.com +38535 + Sierra Photonics Inc. + Laurence Flath + support&sierraphotonics.com +38536 + 10ZiG Technology + Vu Nguyen + vu&10zig.com +38537 + Texas Municipal League Intergovernmental Risk Pool + Jonathan Wiest + jwiest&tmlirp.org +38538 + Sify Technologies Ltd + BIJU.Kannuvayalil Kuniyil + biju.anandan&sifycorp.com +38539 + ANKABUT (UAE Advanced Network for Research and Education) + Mohammad Mabrouk + mohammad.mabrouk&kustar.ac.ae +38540 + Phoebus Vision + zhouyiliang + zhouyiliang_sky&163.com +38541 + Concentric Cloud Solutions, LLC (formerly 'XO Interactive, LLC') + Kip Krauss + Interactive-SystemsSupport&xo.com +38542 + OpenIP + THOMAS Yoann + ythomas&openip.fr +38543 + Continental Lensa Ltd. + Marcos Caballero Gallardo + mcaballero&cecchile.com +38544 + Asset Control International BV + Yves Han + yhan&asset-control.com +38545 + Advanced Medical X-Ray + Greg Crowley + Greg.AMXR&gmail.com +38546 + ORPHEUS + Eric Prost + eprost&orpheus.fr +38547 + Dignitas Technologies + Jon Watkins + jwatkins&dignitastechnologies.com +38548 + SHENZHEN UNIVERSAL INTELLISYS PTE LTD + YIN KEAN MENG + yin&ekintellisys.com +38549 + Netstor Technology Co., Ltd. + David WeiYu Chen + david&netstor.com.tw +38550 + Security Pillar Ltd + Alfonso De Gregorio + adg&securitypillar.com +38551 + Pansardata AB + Joakim Bergkvist + joakim.bergkvist&pansardata.se +38552 + Manfred Nagl - naglit.at + Manfred Nagl + manfred.nagl&naglit.at +38553 + Bureau of industrial automation LLC + Andrey Chaus + it&bpa.ru +38554 + Mark & Space Telesystems (P) LTD. + Vishal Sharma + vishal.sharma&marknspace.com +38555 + Jeppesen + Håkan Olsson + hakan.olsson&jeppesen.com +38556 + Tradesignal GmbH + Kristoffer Mittelstorb + admin&tradesignal.com +38557 + Artvin Coruh University + Rahman Duran + rduran&artvin.edu.tr +38558 + Innes Corporation Pty Ltd + Jeffrey Pages + jeff&innescorp.com.au +38559 + ITAC, IT APPLICATIONS CONSULTING S.A. + RICARDO CORTES CALDERON + info&itac.com.co +38560 + Denver International Airport (City and County of Denver) + Stephen Taylor + stephen.taylor&flydenver.com +38561 + Appleton Coated llc + Dave Van Sambeek + dvansambeek&appletoncoated.com +38562 + KokelNET + Tobias Hachmer + tobias&hachmer.de +38563 + Ryan Systems, Inc. + Dr. John M. Ryan + jryan&ryansystems.com +38564 + Mark IV Indonesia + Cahya Masdi + cahya&mark-indonesia.com +38565 + Instrumentation Technologies d.d. + Mateja Lampe + mateja.lampe&i-tech.si +38566 + FirstPower a.s. + Dmitry Kotov + kotov&firstpower.cz +38567 + Hit the Sticks, LLC + Aubrey Jones + aubrey&htssoft.com +38568 + Syvea Technologies + Seth Weith-Glushko + seth.weith-glushko&syvea.net +38569 + Uziek Pte. Ltd. + Elvin Tan + elvin&uziek.com +38570 + axn software UG (haftungsbeschränkt) + Carsten Klein + carsten.klein&axn-software.de +38571 + Health Information Technology Exchange of Connecticut + Lori Reed-Fourquet + lfourquet&hitect.org +38572 + TroSoft AB + Tomas Rook + tomas&automatisera.nu +38573 + Zucchetti Axess SpA + Alberto Pavesi + alberto.pavesi&axesstmc.com +38574 + Facultad Regional Santa Fe, Universidad Tecnológica Nacional. + Oscar A. Jara + oajara&frsf.utn.edu.ar +38575 + TeraTron GmbH + Oliver Scharmann + edv&teratron.de +38576 + Lomond Paper Ltd + Ivan Bulaev + ivan&lomond.com +38577 + Perfecta Aviation + George Zoulias + support&PerfectaAviation.com +38578 + New York City Transit Authority + David Papis + David.Papis&nyct.com +38579 + Pike Aerospace Research Corporation + Mike Sharkey + mike&pikeaero.com +38580 + i3 International inc. + Bob Hoang + bob&i3international.com +38581 + Crouse Hospital + Matt Mahoney + MattMahoney&crouse.org +38582 + Fryman and Associates, Inc + James Fryman + james&frymanandassociates.net +38583 + MetroPCS Wireless, Inc. + Rucks Yerger + ryerger&metroPCS.com +38584 + Superior Communication Solutions Inc. + Steve Sohn + Steve.Sohn&scsi-ga.com +38585 + VEG Networks + Valentin Gusarin + veg&str.bashnet.ru +38586 + DoZeener Controls + Rodney Micallef + rodney&dozeener.com +38587 + SmartKomm GmbH + Thomas Benke + benke&smartkomm.net +38588 + Tanuki Software, Ltd. + Leif Mortenson + snmp-admin&tanukisoftware.com +38589 + Egyptian Universities Network, EUN + Dina Barakat + dina&eun.eg +38590 + Islamic Republic Broadcasting of Iran + Hassan Afshar + iana&irib.ir +38591 + Indusface Telecom Private Limited + Carol John Paul Mendonca + carol&indusfacetelecom.com +38592 + Cyan Technology + Matt Kern + matt.kern&cyantechnology.com +38593 + Cloud 9 Business Analytics Limited + Julian Edwards + julian&c9analytics.com +38594 + Enel PS d.o.o. + Bojan Vesic + bojan.vesic&enelps.com +38595 + Shanghai Zhongmi Communication Technology CO.,LTD. + xiangdongwang + shxdwang&21cn.com +38596 + University of Saint Joseph + Daniel Filipe G. Farinha + netadmin&usj.edu.mo +38597 + Sunfest + Chinnaswamy Subramanian Kannan + kannan&sunfest.in +38598 + Sunfest + Chinnaswamy Subramanian Kannan + kannan&sunfest.in +38599 + LTU Technologies + Yoann Queret + it<utech.com +38600 + kleinhans it-solutions und services + Sebastian Kleinhans + sebastian&kleinhans.org +38601 + ELIUM GmbH + Kotulla Beate + bkotulla&elium.de +38602 + S&C Electric Company + Rohit Sharma + rohit.sharma&sandc.com +38603 + Applied Systems Engineering, Inc. + Robert Krebsbach + bobk&ase-systems.com +38604 + In And Out Cologne + Nils Doormann + nils&inandout-cologne.de +38605 + Calxeda, Inc. + Steve Beatty + steve.beatty&calxeda.com +38606 + Gusbeckers Development + Dirk Gusek + dirk.gusek&gusbeckers.de +38607 + WITTENSTEIN AG + Jens Mundhenke + mundhenke.jens&wittenstein.de +38608 + Gymnázium, Plzeň + Filip Štědronský + spravce&mikulasske.cz +38609 + lipple.jp + Kunio Matsumoto + zyno&lipple.jp +38610 + PT Respati Solusi Rekatama + Dhita Yudhistira + d.yudhistira&rektronik.com +38611 + Alan Baugher + Alan Baugher + alan&baugher.us +38612 + Itrix Incorporation + Pankaj J Kachhwaha + pankaj&itrixinc.com +38613 + Computer Technologes and Systems Ltd. + Yuri Saltykov + support&voiptech.ru +38614 + Nextest Systems + Randy Nuss + randy.nuss&nextest.com +38615 + Mettle Networks, Inc + Abraham K. Jacob + abraham&mettlenetworks.com +38616 + METEL s.r.o. + Tomas Metelka + info&metel.eu +38617 + Callplus Services Limited + Nick Clifford + networksnoc&callplus.co.nz +38618 + Critical Software Ltd + Matt Burke + iana-oid&icritical.com +38619 + think digital GmbH + Michael Hauber + michael.hauber&think-digital.ch +38620 + Gamma 2000 Pte Ltd + Seow Yi Xuan + 1101091409&gamma2k.com +38621 + Merruk Technology, SARL. + Yahya Lmallas + merruk.company&gmail.com +38622 + Master Power Technologies + Liaan van der Merwe + liaan&kva.co.za +38623 + bolay.co + Sylvain Bolay + info&bolay.co +38624 + NETGRAPPLER Corporation + William Rympalski + rympalski&netgrappler.com +38625 + SILOCOM Internacional Corp. + Efraín B. Cardona + ebcardona&silocom.com +38626 + Bureau 14 SARL + Edouard Alligand + edouard.alligand&bureau14.fr +38627 + DLC Systems, Inc. + Ralph Spaulding + rspaulding&tantalus.com +38628 + LivingSocial + Val Aleksenko + val&livingsocial.com +38629 + Connamara Systems, llc + Chris Busbey + cbusbey&connamara.com +38630 + OneHealthPort + Sue Merk + smerk&onehealthport.com +38631 + Morgajel.com + Jesse Morgan + morgajel&gmail.com +38632 + Mariana Consulting, LLC + Peter Ordonez + peter.ordonez&gmail.com +38633 + VOXLIBERTUM AG + Olivier Boucard + olivier&voxlibertum.com +38634 + Hendrikx ITC B.V. + Alfred Blokland + alfred.blokland&hendrikx-itc.com +38635 + Falcon Steel Co. + Daniel McClain + dmcclain&falconsteel.com +38636 + Starfish Technologies Limited + Graham Neden-Watts + graham&starfish.tv +38637 + U2 Systems + Tony Ching + tony&u2systems.com +38638 + ista International GmbH + Dr. Nicola Altan + nicola.altan&ista.com +38639 + bpost + Olivier Rombaut + olivier.rombaut&bpost.be +38640 + Novus Partners, Inc. + Tarik Jabri + tjabri&novus.com +38641 + yate + hanboyang + xglinux&163.com +38642 + Net Trans Services AS + Christian Berg-Henry + christian.berg-henry&ntrans.com +38643 + SQISOFT Inc. + Choi, Seung Beom + mirage&sqisoft.com +38644 + Armadino + Francis Tay + usnmpagent&gmail.com +38645 + OCV CJSC + Zakharov Marat + info&ocv.ru +38646 + StoniesHome + Philip Stone + philip.stone&stonys-home.net +38647 + KV-Telematik ARGE + Stephan Hoevels + stephan.hoevels&kvwl.de +38648 + DOMENY.PL Ltd + Arkadiusz Szczurowski + as&domeny.pl +38649 + Mr Panquecito + José Antonio Montiel Téllez + antunelo&live.com.mx +38650 + Western Australian Network Gaming Inc. + Will Dowling + tech&walan.org +38651 + Observatório Nacional + Aluizio de Almeida Cruz + kanter&on.br +38652 + Instituto Federal Catarinense Campus Videira + Giorge Vanz + giorge&ifc-videira.edu.br +38653 + Verlagsgruppe NEWS Ges.m.b.H + Michael Lechner + postmaster&news.at +38654 + BoostAeroSpace SAS + Romain BOTTAN + romain.bottan&eads.net +38655 + NBU SK (NATIONAL SECURITY AUTHORITY) + Ing. Peter Rybar + peter.rybar&nbusr.sk +38656 + Energinet.dk + Brian Hestehave + itplatform&energinet.dk +38657 + Ministerio de Justicia y Paz + Marianella Granados Saavedra + mgranados&mj.go.cr +38658 + Music for Humans + James Butler + james&musicforhumans.com +38659 + The Criminal Defense Group + James Butler + jbutler&thecriminaldefensegroup.com +38660 + Certipost n.v./s.a. + Guy Ramlot + guy.ramlot&certipost.com +38661 + Wuxi Thinkyare Electronic Technology Co., Ltd. + Huaxin + ThinkYare&163.com +38662 + Drachenfels Ltd + Johannes von Drachenfels + johannes&drachenfels.de +38663 + ahd GmbH & Co. KG + Thomas Drewermann + thomas.drewermann&ahd.de +38664 + Sky Area + Andrey Belogrivov + administrator&skyarea.net +38665 + PUUR it (www.puur-it.nl) + Rene Stoutjesdijk + rene&puur-it.nl +38666 + Silversands Limited + Jason Jones + jason.jones&silversands.co.uk +38667 + Mindbean + Hugh Roberts + mindbean&gmail.com +38668 + RahuNAS + Neutron Soutmun + neutron&rahunas.org +38669 + NIKKEY CO.,LTD. + KENZO.NAKAI + nakai&nikkey.co.jp +38670 + BICS - Belgacom International Carrier Services + Jofi Yance + jofi.yance&bics.com +38671 + Cellbusters LLC + George O'Dowd + george.odowd&cellbusters.com +38672 + Zaeim Electronic Industries + Hossein Hosseini + hhosseini&zaeim.com +38673 + E-merchant + George Abitbol + g.abitbol&e-merchant.com +38674 + West Virginia Health Information Network (WVHIN) + Himabindu Bolisetty + bindu&careevolution.com +38675 + NorCell Inc. + Chris Sperber + chris.sperber&cellmarkpaper.com +38676 + PDU EXPERT UK LTD + PASCA MEME LATH + pascal.lath&pduexpertuk.com +38677 + ECOSIX + Eric LAURENT-RICARD + eric&ecosix.com +38678 + Joyent, Inc. + Ben Rockwood + hostmaster&joyent.com +38679 + idcell.co.ltd + myung kwan, Lim + idcell&naver.com +38680 + Nimbula, Inc + Bo Li + bli&nimbula.com +38681 + HJBaier + Hermann Baier + hb&hjbaier.de +38682 + Mindlinx Technology Solutions + Michael Lopez + mlopez&mindlinx.net +38683 + Klinikum Ansbach + Steffen Rottler + Steffen.Rottler&Klinikum-Ansbach.de +38684 + LOGEMED + Cédrick Perron + cedrick.perron&logemed.fr +38685 + Semptian Technologies Ltd. + huangxinzhu + huangxinzhu&semptian.com +38686 + CLS Argos + Eric Spessotto + equipe.systeme&cls.fr +38687 + The Online Backup Company Norway AS + Lars Bahner + support&drift.obcn.no +38688 + Educa.ch - Schweizer Medieninstitut für Bildung und Kultur Genossenschaft + Manuel Dominguez + manuel.dominguez&educa.ch +38689 + Old Mutual + Hugh Roberts + hroberts&oldmutual.com +38690 + Corepoint Health + Thomas Merritt + joe.merritt&corepointhealth.com +38691 + Quanti s.r.o. + Marek Polčák + admin&quanti.cz +38692 + ONPATH Technologies Inc + David Vitt + david.vitt&onpathtech.com +38693 + matthewv.com + Matthew Von-Maszewski + matthewv&matthewv.com +38694 + Alabama One Health Record (ALOHR) + Himabindu Bolisetty + bindu&careevolution.com +38695 + Joehl + Koeferli AG + Thomas Koeferli + tkoeferli&j-k.ch +38696 + Hacking Networked Solutions Ltd. + Max Hacking + iana.pen.registrar&hacking.co.uk +38697 + DSM Inc + Silas Moeckel + silas&dsminc-corp.com +38698 + Onebox Ticket Management + Jordi Alborch + jalborch&oneboxtickets.com +38699 + Pulsar Consulting sa + Michel Goossens + gsm&pulsar.be +38700 + Sergey Kozlov + Sergey Kozlov + skozlov&bmstu.ru +38701 + ECBIZNET, INC. + ERSKINE R. CURRY + ADMIN&ECBIZNET.ORG +38702 + Talisman Energy Inc + Jeff Pitman + jpitman&talisman-energy.com +38703 + Advancia Corporation + Rod Miller + advcorpit&advancia.com +38704 + Nvest., Inc + Tejas Viswanath + tejas&wikinvest.com +38705 + Amplidata + Wouter Van Eetvelde + ianaadmin&lidata.com +38706 + maceghost + Gregory Mace + gregorymace&earthlink.net +38707 + Zodiac Data Systems + M. Cyrille HANSQUINE + zds_snmp_contact&zodiacaerospace.com +38708 + Shenzhen CTI Technology Co., Ltd. + Dongsheng Bai + white21cn&21cn.com +38709 + Edison Global Circuits, LLC + Jeffrey Franks + snmp-mgr&egcircuits.com +38710 + Ian Kristin + Ian Kristin + ikristin&pixelfleck.de +38711 + Dolby Medical Home Respiratory Care Ltd + John Simon + postmaster&dmhrc.co.uk +38712 + St. Joseph's Healthcare System INC + Christopher Bocchino + bocchinc&sjhmc.org +38713 + National Association of REALTORS® + Keith Garner + kgarner&realtors.org +38714 + RLH INDUSTRIES,INC. + THOMAS VO + TVO&FIBEROPTICLINK.COM +38715 + jllin agricultural university + pan he + smart_p01&jlau.edu.cn +38716 + Orbotech + Shay Bar even + sys-rnd&orbotech.com +38717 + INSIEL – Informatica per il Sistema degli Enti Locali S.p.A + Francesco Sasso + francesco.sasso&insiel.it +38718 + Zobele Instrument (Shenzhen) Co., Ltd. + William Xiong + william.xiong&zobele.com +38719 + Nextgen Innovation Labs LLC + Amit Verma + license&nil-labs.com +38720 + ENTLibre + Sami SMATI + sami.smati&atos.net +38721 + Hexaglobe + Pierre-Alexandre Entraygues + paentraygues&hexaglobe.com +38722 + Conseil Général du Calvados + Jean-Damien Bouvier + jean-damien.bouvier&calvados.fr +38723 + jusme.com + Ian Lowrey + iana-pen-1209&jusme.com +38724 + 3iSYS + Shirin Sabzali + amirally&3isysnetworks.com +38725 + Florian Wagner + Florian Wagner + florian&wagner-flo.net +38726 + bjoernskou + Brian Bjørnskou Petersen + mail&bjoernskou.dk +38727 + banan s.r.o. + Martin Carbol + carbol&banan.cz +38728 + Kiez.Net + Clemens Schrimpe + csch&kiez.net +38729 + GCCA Inc. + Shaun Chang + shaunchang&gccacloud.com +38730 + RadioAccess + Boris Bozic + boris.bozic&radioaccess.nl +38731 + Wiscada s.r.l + Filippo Formica + filippo.formica&wiscada.it +38732 + Sachsen DV GmbH + Hendrik Eilers + info&sachsendv.de +38733 + VoiceRite, Inc. + Jeff Engle + jengle&voicerite.com +38734 + Padilha com + Krishna Padilha + krishna&padilha.com +38735 + Stephen Timothy Perryman + Stephen Timothy Perryman + stephen&stephenperryman.com +38736 + Hutchinson Regional Medical Center (formerly 'Promise Regional Medical Center - Hutchinson') + Mark Keeton + KeetonM&hutchregional.com +38737 + MyOwnFile BV + Dave Franken + dfranken&meddex.nl +38738 + NL Technologies + Colin Ma + cma&nltinc.com +38739 + Nebras Technology + Hesham Desouky + hesham.desouky&nebrastech.com +38740 + Attentec AB + Tobias Blom + it&attentec.se +38741 + Mobile Frosting + Mike Mackintosh + mike.mackintosh&mobilefrosting.com +38742 + Les créations Snowflakes + Claude Mally + claude.mally&gmail.com +38743 + Gruden + Filippo Vitale + filippo&gruden.com +38744 + twootton + T. Wootton + tim_wootton&yahoo.com +38745 + UNO System + Kim, Kyoung-hyun + kkh&uno-system.com +38746 + incelis + James Park + infrastructure&incelis.com +38747 + SHENZHEN SUPLET CO., LTD. + Andy luo + lion_ru&126.com +38748 + 2H ENERGY + HANIN Samuel + Samuel.Hanin&FPTINDUSTRIAL.COM +38749 + GNU Generation + Laurent Fasnacht + informatique.gnugeneration&epfl.ch +38750 + Neologic Sp. z o.o. + Bartlomiej Zarzecki + pen.administrator&neologic.pl +38751 + Marketwire + Boris Pasko + bpasko&marketwire.com +38752 + New Jersey Manufacturers Insurance Company + James New + jnew&njm.com +38753 + TECNOMEDSA + Lucas Magurno + lucasm&tecnomedsa.com.ar +38754 + Jinon Corporation + Seiji Kurata + seiji.kurata&jinoncorp.com +38755 + Českomoravské informační systémy s.r.o. + Markos Paraskevopulos + markos&cmis.cz +38756 + TNG Technology Consulting + Bernhard Bock + bernhard.bock&tngtech.com +38757 + Trapeze Group Asia Pacific Pty Ltd (formerly 'Sigtec Pty Ltd') + Kim Faint + kim.faint&trapezegroup.com.au +38758 + GrandCapital Ltd. + Vasily Alexeev + valexeev&grandcapital.net +38759 + Katholieke Hogeschool Limburg + Vandael Tim + tim.vandael&khlim.be +38760 + Veselības ekonomikas centrs (formerly 'ABC Software, Ltd') + Margreta Baltgalve + margreta.baltgalve&vec.gov.lv +38761 + OSnode + Geoffrey Rekier + geoffrey&osnode.com +38762 + Deutsche Rentenversicherung Bund + Torsten Gruchmann + ossmon&drv-bund.de +38763 + cenhare AG + Olaf Beyer + ob&censhare.com +38764 + Boscad + Bill Gunn + boscad&btinternet.com +38765 + imirhil.fr + Nicolas VINOT + aeris&imirhil.fr +38766 + Gosloto Ltd. + Alexey Alekov + alexey.alekov&gosloto.ru +38767 + Martin Diener + Martin Diener + pen.admin&mdscs.de +38768 + StreamBase Systems, Inc. + Jim Campbell + support&streambase.com +38769 + Holy Redeemer Hospital and Medical Center + Craig Heller + cheller&holyredeemer.com +38770 + AGGREGATUM + Agustin de Landa Gil + adelanda&aggregatum.com +38771 + Internet Integration, Inc. + David Rosen + david&i-3.com +38772 + Mageia.Org + Romain d'Alverny + rda&mageia.org +38773 + Visionutveckling AB + Fluff Abrahamsson + fluff.abrahamsson&visionutveckling.se +38774 + CONET Solutions GmbH + Joachim Janz + SNMP-OID-Admin&conet.de +38775 + Stream Labs + Konstantin Preis + it&streamlabs.ru +38776 + Telarix + sanath kumar + s.kumar&Telarix.com +38777 + NOU DPO UMC YARB + Ivan V Kvasnikov + i.kvasnikov&pobts.ru +38778 + Jeda Networks + David Crespi + dcrespi&jedanetworks.com +38779 + Noggin IT + Travers Carter + iana.account&noggin.com.au +38780 + Gaertner + Carolin Gärtner + webmaster&gaertner.cx +38781 + trainline + John Catlin + john.catlin&thetrainline.com +38782 + TAGHOS Tecnologia + Ricardo Nabinger Sanchez + rnsanchez&taghos.com.br +38783 + Teracom Ltd. + Ognyan Dimitrov + info&teracom.cc +38784 + Systems Integration Specialists Company, Inc. + Herbert Falk + herb&sisconet.com +38785 + Dirdal-it + Kjell Dirdal + kjell_dirdal&hotmail.com +38786 + Emirates Integrated Telecommunications Company, PJSC + Hamid Boukhelifa + hamid.boukhelifa&du.ae +38787 + Nanjing Tunec Science & Technology Co., Ltd. + Li Wang + wang_wang_li&163.com +38788 + Oasis Telecommunication Technologies + Alex Sokolov + support&oasis-tech.net +38789 + National TeleConsultants LLC + David Potter + david.potter&ntc.com +38790 + EUROBANK EFG BULGARIA AD + Rossen Naydenov + rnaydenov&postbank.bg +38791 + Atos IT Solutions and Services AG + Thomas Duerrenberger + thomas.duerrenberger&atos.net +38792 + Operator Logistyczny Paliw Płynnych Sp. z o.o. + Piotr Rycombel + piotr.rycombel&olpp.pl +38793 + Universidade Federal Rural do Rio de Janeiro + Everton de Freitas Cordeiro + freitas&ufrrj.br +38794 + UQODE + Halima Naboulsi + hnaboulsi&uqode.com +38795 + ASSOCIATION MENSA FRANCE + Marcelin Da Cruz + rsi&mensa.fr +38796 + Takashi Komatsubara + Takashi Komatsubara + takashis&time-trend.com +38797 + Atlantis Computing Inc. + Vinodh Dorairajan + vinodh&atlantiscomputing.com +38798 + Compulink + Robert Rosen + rer&compulinkadvantage.com +38799 + Dream Property GmbH + Andreas Oberritter + hostmaster&dream-property.net +38800 + Ecker & Partner + Christian Dergovics + cdergo&eup.at +38801 + Pythia Technologies + Jim Potter + jim.potter&pythiatech.com +38802 + CloudWare Co + Thomas Steinig + thomas.steinig&cloudware.co +38803 + Woori-Net Inc. + Kang, DuckMoon + dmkang&woori-net.com +38804 + SEVEN PRINCIPLES AG + Klaus Dudda + klaus.dudda&7p-group.com +38805 + Envers AG + Thomas Heppner + thomas.heppner&envers.de +38806 + Victor Technologies + Christian van Ophuijsen + cvo&victortek.com +38807 + Lanka Education and Research Network + Nimal Ratnayake + learn&ac.lk +38808 + University of Miyazaki + Kenji Aoki + staff&cc.miyazaki-u.ac.jp +38809 + 4finance + Igors Bogustovs + igors.bogustovs&smscredit.lv +38810 + Anhui Asky Quantum Technology Co.,Ltd + shexiangsheng + shexiangsheng&qasky.com +38811 + Ministry of Information + ganitham sreenivasa prasad + srini&media.gov.kw +38812 + Centerm Information Company + zhengqizhi + zhengqizhi¢erm.com.cn +38813 + Computer Systems Consulting s.r.o. + Jan Dohnal + info&csc-sro.cz +38814 + IFMOD GmbH & Co. KG + Florian Mueller-Schunk + fms&ifmod.de +38815 + ETHERPOWER S.R.L. + Miguel Vinitski + miguelðerpower.com.ar +38816 + Partnerpedia Solutions, Inc. + Jason Owens + systems&partnerpedia.com +38817 + University of Western Greece + Georgios Fragkogiannis + gfrago&uwg.gr +38818 + American National Insurance + James Mc Eniry + james.mceniry&anico.com +38819 + VCE Company, LLC + Mike Holloway + mike.holloway&vce.com +38820 + Inside Lacrosse + Wesley Craft + wcraft&insidelacrosse.com +38821 + ThirdWave B.V. + R.J.T. de Vries + rdevries&thirdwave.nl +38822 + Sport Voswinkel GmbH & Co. KG + Thomas Koch + t.koch&voswinkel.de +38823 + Grandvision + Aleksey Tsvetkov + tsvetkov_av&grandvision.ru +38824 + LYNX spol. s r.o. + Marian Benicky + marian.benicky&lynx.sk +38825 + Huntsman International LLC + David McGoohan + david_mcgoohan&huntsman.com +38826 + Liberty Life + Christo Coetzer + christocoetzer&hotmail.com +38827 + orientscience + deguiqin + ostc&orientscience.com +38828 + AllDigital, Inc. + Kon Wilms + kon&alldigital.com +38829 + Sjöfartsverket + Anders Karlström + Anders.Karlstrom&Sjofartsverket.se +38830 + Sepialine Inc. + Bruce Cummins + bcummins&sepialine.com +38831 + Fédération Nationale des Tiers de Confiance + Bernard Delecroix + bernard.delecroix&fntc.org +38832 + Clearfield Hospital + Bryan Sanford + bsanford&clearfieldhosp.org +38833 + TRANSPOREON GmbH + Markus Metzger + infra-live&transporeon.com +38834 + Orad Hi Tec Systems Ltd. + Jaroslaw Weksej + j.weksej&orad.pl +38835 + Yota + Sergey Malygin + SMalygin&yotateam.com +38836 + Western Oregon University + Dave Diemer + diemerd&wou.edu +38837 + First Interstate Bank + Jason Newell + domains&fib.com +38838 + Angstrem-Telecom JSC + Alexander Treshchanovsky + akt&angtel.ru +38839 + nPhase LLC. + Jeff Layton + itops&nphase.com +38840 + Visual Click Software, Inc. + John McCann + jmccann&visualclick.com +38841 + XCEL SOLUTIONS CORP + Jit Goel + jit&xcelcorp.com +38842 + ECM Inc + David Wen + david.w&ecmhp.com +38843 + Kinoton Digital Solutions GmbH + Frank Würkner + wuerkner&kinoton.de +38844 + Astec IT Solutions Ltd. + Chris Barlow + snmp&astecsolutions.co.uk +38845 + DZ BANK AG + Peter Garben + sysmgmt&dzbank.de +38846 + Bong U.K. Ltd. + Andrew Knight + andrew.knight&bong.com +38847 + Prelert + Stephen Dodson + steve&prelert.com +38848 + Susquehanna Health + Mark Miller + mmiller&susquehannahealth.org +38849 + Intwine Energy + Dave Halasz + dhalasz&intwineenergy.com +38850 + Global MunnexTech SA de CV + David Abundiz + jg&munnex.com +38851 + Izmir Institute of High Technology + FIRAT KOCAK + firatkocak&iyte.edu.tr +38852 + XCOM AG + Jan Lange + jan.lange&xcom.de +38853 + RIA Novosti (formerly 'Federal State Unitary Enterprise Russian Information Agency News') + Andrey Belokurov + postmaster&rian.ru +38854 + Ncurity + Ju, Sung Su + ssju&ncurity.com +38855 + Nagoya Institute of Technology + Shoichi Saito + admin&nitech.ac.jp +38856 + Login People + R&D team + dev&loginpeople.com +38857 + VASONA NETWORKS + Tali Harash + tharash&vasonanetworks.com +38858 + General Micro Systems, Inc + Donald Palmer + dpalmer&gms4sbc.com +38859 + Agile Communications, Inc. + Jason Erickson + jason.erickson&agile.cc +38860 + comadot.net + Luke Comadot + iana-pen&comadot.net +38861 + Rudy Gevaert + Rudy Gevaert + rudy&webworm.org +38862 + Erstikalender.info + Andreas Neumann + andreas.neumann&tu-ilmenau.de +38863 + SiChuan Andi industry co.LTD + Liangkun Jiao + qishankai&andisat.net +38864 + Virgin Holidays Ltd + Gary Payne + infrastructure&virginholidays.com +38865 + Squadra Technologies + Anthony LaMark + anthony&squadratechnologies.com +38866 + TRIMET Aluminium SE (formerly 'TRIMET ALUMINIUM AG') + Christoph Hackstein + christoph.hackstein&trimet.de +38867 + mecodia GmbH + Tobias Birmili + it&mecodia.de +38868 + Comision Nacional de Bancos y Seguros + Luis Lupiac + llupiac&cnbs.gov.hn +38869 + Alexey S Khromov + Alexey S Khromov + zxalexis&gmail.com +38870 + Mandiant + Dave Merkel + dave.merkel&mandiant.com +38871 + Vostokcement Ltd. + Sergey Rudenko + sysadmin&vostokcement.ru +38872 + JSC "Prompribor" + N.I.Kobylkin + sales&prompribor.ru +38873 + Alex Bolgov + Alex Bolgov + bolgov2009&gmail.com +38874 + ENABIL Solutions Ltd. + Lindsay Murfin + IT.Team&enabil.com +38875 + UnifiedPost S.A. + Arjen Van Drie + hostmaster&unifiedpost.com +38876 + Mork + Bjørn Mork + bjorn&mork.no +38877 + BPM Advanced Technology Company + Fred Legros + contact&flowmind.org +38878 + s.d.i. s.p.a. + Castiglioni Lorenzo + l.castiglioni&sdiautomazione.it +38879 + tradebridge Pty Ltd + Graham Brown + graham&healthbridge.co.za +38880 + MicroAutomation + D'Artagnion R. Pope + dpopeµautomation.com +38881 + Abelium d.o.o. + Primoz Luksic + primoz.luksic&abelium.eu +38882 + Barron McCann Technology + Cliff Young + cliff&bemac.com +38883 + Phenomic Business Systems Ltd + David Barker + david.barker&phenomic.co.uk +38884 + Electrolux + Aleksander Brozek + oid&electrolux.com +38885 + nPulse Technologies Inc. + Bill Cantrell + bc&npulsetech.com +38886 + Societe d'exploitation de Reseaux et de Services Securises (SER2S) + Philippe Provost + gin-manager&e-rsb.com +38887 + VIS - Verein der Informatik Studierenden + Pascal Spörri + sysadmin&vis.ethz.ch +38888 + c3pb e.V. + Helge Jung + infrastruktur&c3pb.de +38889 + Okuejina + Pontus Carlsson + PontusCarlsson&live.se +38890 + Blue Shield of California + Charles Blas + charles.blas&blueshieldca.com +38891 + Jimma University + Alemayehu Lishane + alemayehu.Lishane&ju.edu.et +38892 + Axiovista S.A. + Juan J. Sierralta + juanjose.sierralta&axiovista.com +38893 + Marc Luehr + Marc Luehr + marcluehr&googlemail.com +38894 + GROB-WERKE GmbH & Co. KG + Wolfgang Rothermel + wrothermel&grobgroup.com +38895 + sig21 labs + Martin Mueller + mm&sig21.net +38896 + Findaway World + Kevin Kovach + kkovach&findawayworld.com +38897 + Safegate International AB + Markus Andersson + markus.andersson&safegate.com +38898 + La France du Nord au Sud + Laurent Ollagnier + laurent.ollagnier&dunordausud.fr +38899 + National Institute Of Weather And Atmospheric Research + Gabriella Turek + g.turek&niwa.co.nz +38900 + Fukuoka University + Tastuo Kimura + densan&adm.fukuoka-u.ac.jp +38901 + Landsvirkjun + Larus Gudmundsson + lgudmund&lv.is +38902 + NovAtel Inc. + Patrick Fenton + patrick.fenton&novatel.com +38903 + Malin Space Science Systems + Scott Davis + davis&msss.com +38904 + School Specialty, Inc. + Mark Ziesemer + Mark.Ziesemer&SchoolSpecialty.com +38905 + Timanetworks Inc. + Albee Bu + albee.bu&timanetworks.com +38906 + Goethe-Universitaet Frankfurt am Main + Prof. Dr. Udo Kebschull + basisdienste&rz.uni-frankfurt.de +38907 + Zumbox, Inc. + Gareth Greenaway + ggreenaway&zumbox.com +38908 + Aon Corporation + Kenton Morneau + kenton.morneau&aon.com +38909 + Linguistic Data Consortium + Andrew McMackin + systems&ldc.upenn.edu +38910 + Xicoy Electronica S.L. + Gaspar Espiell + gaspar&xicoy.com +38911 + McLeod Health + Sharmayne Donley + sdonley&mcleodhealth.org +38912 + psych0tik.net + Rich Healey + richo&psych0tik.net +38913 + QXTN Pty Ltd + Richard Ham + admin&qxtn.net +38914 + JNT SOUND SYSTEM AB + Richard Dahlstrom + richard&jnt.se +38915 + avono AG + Stefan Engel + admin&avono.de +38916 + Shanghai ekingstar Digital Campus Co,.Ltd. + liu hongqing + liuhongqing&ekingstar.com +38917 + Guangxi Xinhai Communication Technology Co.,Ltd + He Kai + hekai&gzhyit.com +38918 + DEK Technologies Pty Ltd + Rick Collings + ictteam&dektech.com.au +38919 + EPRONA, a.s. + Zdenek Cermak + cermak&eprona.cz +38920 + SnapTV AS + Åsmund Grammeltvedt + asmundg&snap.tv +38921 + Capetti Elettronica srl + Maurizio Bertero + maurizio.bertero&capetti.it +38922 + Hypotecni banka, a.s. + Filip Sixta + sixta.filip&hypotecnibanka.cz +38923 + iProfs + Afdeling Beheer + beheer&iprofs.nl +38924 + Damarel Systems International Ltd. + John Boult + jboult&damarel.com +38925 + PROCENTEC + Paulo Silva + psilva&procentec.com +38926 + InnovAction srl + Francesco Policastrese + fpolicastrese&innovaction.it +38927 + Versile AS + Tore Skaug + toresk&versile.com +38928 + Powel Energy Management AB + Lars Lindström + lars.lindstrom&powel.se +38929 + Fugro Geos Ltd + Ian Bellamy + i.bellamy&geos.com +38930 + @Planet Ltd + Tom Bamford + tom&atplanet.co.uk +38931 + Chaz6 + Chris Hills + chaz&chaz6.com +38932 + INSTART INC + Bowei Du + bdu&instart.co +38933 + Ubilogix International, Inc. + Edgardo Aviles-Lopez + edgardo&ubilogix.com +38934 + 35x.de + Thomas Ristic + tr&35x.de +38935 + MPM sas + Emanuele Tandurella + e.tandurella&mpm-tlc.it +38936 + Enrico Rivarola + Enrico Rivarola + henry_thebuilder&yahoo.it +38937 + ResMed SAS + Roger Stradiot + roger.stradiot&resmed.fr +38938 + ISACO GmbH + Stefan Keller + iana-pen&isaco.de +38939 + Flexera Software LLC + Fraser Chapman + fchapman&flexerasoftware.com +38940 + Thales Communications and Security + Daniel LOPES + daniel.lopes&thalesgroup.com +38941 + D. E. Shaw & Co, L.P. + Richard E. Silverman + res&deshaw.com +38942 + Coca-Cola Enterprises, Inc. + Curd Stefan Zechmeister + curdzechmeister&cokecce.com +38943 + Edgar Kittner GmbH & Co.KG + Steffen Jeschke + s.jeschke&kittner.de +38944 + Kamstrup A/S + Erik B. Pedersen + ebp&kamstrup.dk +38945 + NASA Center for Climate Simulation (NCCS) + Bennett Samowich + bennett.samowich&nasa.gov +38946 + Raja Consulting Ltd + Sumit Raja + sumit.raja&raja-consulting.co.uk +38947 + IREQ (Institut de recherche d'Hydro-Quebec) + Sylvain Riendeau + riendeau.sylvain&ireq.ca +38948 + Fortior Solutions (formerly 'SureID Inc.', formerly 'Eid Passport, Inc.') + Dave Byrum + dbyrum&fortiorsolutions.com +38949 + Astronics AES + Andrew Nicolas + andrew.nicolas&astronics.com +38950 + Mars Space Flight Facility + Nick Piacentine + npiace&mars.asu.edu +38951 + Beijing Zigvine Tech. Co. Ltd. + Zhu Wangkun + zhuwk&zigvine.com +38952 + Ferox Communications S.L. + Daniel Fraile Vergel + dfraile&telnet-ri.es +38953 + coveragetools + Bas Brul + bbrul&coveragetools.com +38954 + Informatio GmbH + Holger Jakob + holger.jakob&informatio.ch +38955 + Wire and Wireless Co., Ltd. + Naoto Komatsu + tech&wi2.co.jp +38956 + HOERBIGER Deutschland Holding GmbH + Helmut Ritter + helmut.ritter&hoerbiger.com +38957 + SWISS-ARTG + Marc Balmer + marc&msys.ch +38958 + Dynamic Technologies(Asia) Ltd + Kevin Chan + kevin&dynamic-asia.com.hk +38959 + K M Medicals + Jagadeesh C Juvvala + jagdish.juvvala&imeddoc.ie +38960 + E-VAL Tecnologia em Informática Ltd. + Rafael Shoji + rafael&evaltec.com.br +38961 + A&W Networks + Dianne M. Allison + allidm&comcast.net +38962 + Oleksandr Moskalets + Oleksandr Moskalets + mos4&seznam.cz +38963 + WoodWing Software + Joris Conijn + sysman&woodwing.com +38964 + Coral Sea Enterprises, LLC + Jane Adams + info&disruptivestudies.org +38965 + TelePro, Inc. + Jim Johnson + Jim.Johnson&tpri.com +38966 + Stryker + Marcelo Cattaneo + marcelo.cattaneo&stryker.com +38967 + Resal spol. s r.o. + Alexandr Seidl + support&resal.cz +38968 + Telino + Christian Quillot + christian.quillot&telino.fr +38969 + Intellipower, Inc. + Jim Kenna + sales&intellipower.com +38970 + Vilpra, UAB + Vytautas Kašėta + admins&vilpra.lt +38971 + Guacamole Project + Michael Jumper + mike.jumper&guac-dev.org +38972 + Coordinate-System Transport Tech. Co.,Ltd + Li Peng + shenzhen_lip&sina.com +38973 + k-blue + Andreas Kruemmel + iana-pen&k-blue.de +38974 + ENDA GmbH & Co. KG + Torsten Lüttgert + iana&enda.eu +38975 + Production Resource Group L.L.C. + Ian Smith + ismith&prg.com +38976 + Quortus Ltd + Riki Dolby + iana-pen&quortus.com +38977 + Imayhem S.L.L. + Cecilio Pardo + cpardo&imayhem.com +38978 + Adriatic Solutions Corp. + Filip Bujanic + fbujanic&adriaticsolutions.com +38979 + SII Lille + Joffrey DEREMETZ + jderemetz&sii.fr +38980 + AGH University of Science and Technology + Szymon Sokół + szymon.sokol&agh.edu.pl +38981 + Stradus + John Bakker + info&stradus.com +38982 + Advanced Telecommunications Technology Research Sp. z o.o. + Bartlomiej Jurek + bartlomiej.jurek&attr.com.pl +38983 + CompuRoot + Alexander Verbod + Responsible_Person.Private_Enterprise_Number.PEN.OID.IT.Department&CompuRoot.Com +38984 + CampusIT + Jan Navratil + jnavratil&campusit.net +38985 + ECM2 + Erwin Bogaard + info&ecm2.nl +38986 + SOCAR Georgia + Alexander Buslayev + a.buslayev&socar.ge +38987 + Nettbuss AS + Harald Hegerberg + harald.hegerberg&nettbuss.no +38988 + Ministry of Awqaf and Islamic Affairs - Kuwait + Karim Essam El Sabaa + srid&awqaf.gov.kw +38989 + Qtree BVBA + Frederik Kaputa + frederik&qtree.be +38990 + Nova World International, LLC dba Nova Shipping + Yevgeniy Epshteyn + pen.administrator.9906f1644e0fe35dcc2f52614311c283&nova-shipping.com +38991 + Innoforma E-learning Technologies + Francisco Javier Moreno Hidalgo + sistemas&innoforma.com +38992 + SCCT, Wilhelm Wimmreuter + Wilhelm Wimmreuter + wilhelm&wimmreuter.de +38993 + South Jersey Healthcare + Julie Morris + MorrisJ&SJHS.com +38994 + San Juan Software + Drew Gislason + drewg&sanjuansw.com +38995 + Connexon Telecom Inc. + Simon Cadieux + simon.cadieux&connexon.com +38996 + Japan Aerospace Exploration Agency + Tamotsu Motoki + motoki.tamotsu&jaxa.jp +38997 + Telnetware Co.,Ltd + Jeong Sup Lee + jslee&telnetware.com +38998 + i20 + Pavel Prischepa + prischepa.p&i20.biz +38999 + Higher Light Communications + Richard A. Scott + richard.a.scott&me.com +39000 + LinuxMCE + Serge Wagener + serge&linuxmce.org +39001 + NetFlow Logic + Sasha Velednitsky + svelednitsky&netflowlogic.com +39002 + cibex gmbh + Josef Weisskopf + jw&cibex.net +39003 + Sub10 Systems Ltd. + Mark Stevens + mark.stevens&sub10systems.com +39004 + Diaphanoscope + Angelo Leto + admin&diaphanoscope.com +39005 + Total Device + Vincent Bouttier Deslandes + vincentb&totaldevicesolutions.com +39006 + VZTech Importacao Exportacao e Desenvolvimento de Tecnologia Ltda + Pedro Zorzenon Neto + it&vztech.com.br +39007 + Armada Nacional - Uruguay + Fernando CHEDA + ncheda&armada.mil.uy +39008 + Nouvelle s.r.l. + Alessandro Forghieri + staff&orion.it +39009 + hub telecom + adrien mistretta + iana&hub-telecom.net +39010 + GrupaA Sp. z o.o. + Wojciech Rusinek + wrusinek&grupaa.pl +39011 + Fresco Logic, Inc. + Eric Wittmayer + eric&frescologic.com +39012 + INNOBAND TECHNOLOGIES, INC. + CODY LIN + codyl&innoband.com +39013 + Assurity Trusted Solutions Pte Ltd + Service Desk + servicedesk&assurity.sg +39014 + Colina Participations + Roger Costandi + rcostandi&groupecolina.com +39015 + MAXXING S.A.S.U. + DEVAU Rémi + rdevau&maxxing.com +39016 + Field Electronics Limited + Lee Awcock + lee&fieldelectronics.com +39017 + Object Trading Pty Ltd + Joseph Metcalf + joseph.metcalf&objecttrading.com +39018 + Dyna Healthcare Co., Ltd. + Robin Cyue + robin.cyue&dynahealthcare.com +39019 + Techinfocom, Jsc + Denis Kostousov + dkostousov&techinfocom.com +39020 + Net To Net Co., Ltd + yun, hyung taek + yun0512&gmail.com +39021 + pdv-systeme Sachsen GmbH + Norman Ziert + ziert&pdv-sachsen.net +39022 + Joint Stock Company SVYAZNOY BANK + Gritsienko Sergey + sib&svyaznoybank.ru +39023 + murtaza enterprizes + Muhammad Murtaza + m_murtaza&ymail.com +39024 + Bjango + Marc Edwards + marc&bjango.com +39025 + Velleros, Inc. + Brent Harsh + support&velleros.com +39026 + Tritux + Zouari Fourat + support&tritux.com +39027 + www.duowan.com + Bob Lin + linbobo&chinaduo.com +39028 + Enkata + Gerard Larios + it&enkata.com +39029 + Lukse + Saulius Lukse + saulius.lukse&gmail.com +39030 + onway ag + Michael Schneider + iana&onway.ch +39031 + Eastern Mennonite University + Jason Alderfer + is.info&emu.edu +39032 + Caldwell Memorial Hospital, Inc. + Beverly Canipe + bcanipe&caldwell-mem.org +39033 + Quinstar Technology, Inc. + Sage Kuno + sage&quinstar.com +39034 + nesfile.org + Cliff F. Railey + CliffFRailey&nesfile.org +39035 + Orbital Systems, Ltd. + Carl Schoeneberger + carl.s&orbitalsystems.com +39036 + iMobile3, LLC + Laszlo Hanyecz + lhanyecz&imobile3.com +39037 + Inocybe Technologies inc. + Mathieu Lemay + mlemay&inocybe.ca +39038 + COMPNET Ltd + Sergei Korolyov + sergei.korolyov&self.ru +39039 + Daniels Electronics, Ltd + Malcolm Holser + Malcolm_Holser&danelec.com +39040 + DigitalArts Inc. + Yuusuke Amano + y-amano&daj.co.jp +39041 + Enthina + Edward Curren + ecurren&enthina.com +39042 + E-ID internet strategies B.V. + C. J. Krul + sysadmin&e-id.nl +39043 + EBRO ARMATUREN GMBH + Stefan Meier + s.meier&ebro.at +39044 + idefclub.ru + Yuriy V Yagupov + project&idefclub.ru +39045 + Cubio Communications Oy + Mike Jackson + mike.jackson&cubio.com +39046 + Jimdo GmbH + Lars Fronius + lars&jimdo.com +39047 + The Foundry + Dan Alderman + dan&thefoundry.co.uk +39048 + Sayre Memorial Hospital + Eric Leitner + it&sayrehospital.org +39049 + Byte, SIA + Vadim Zayakin + byte&apollo.lv +39050 + Forschungsgemeinschaft elektronische Medien (FeM) e.V. + Michael Braun + michael.braun&fem.tu-ilmenau.de +39051 + g360apps.com + Matt Mahoney + sysadmin&g360apps.com +39052 + Vutlan sro (formerly 'Sky Control sro') + Vasily Tyazhev + vasily&vutlan.com +39053 + Rail & Sea + Wolfgang Anditsch + edv&railsea.at +39054 + NetUP Inc. + Konstantin Emelyanov + info&netup.tv +39055 + S2 Communications AB + ULF SAHLIN + noc&s2.se +39056 + Samsung Electronics America, Inc. + Eric E. Osterholm + e.osterholm&samsung.com +39057 + SCAE Co. Ltd. + Vladimir Krivenko + admin&scaegroup.com +39058 + CET Electronics + Nicola Vicino + rd&cet-electronics.com +39059 + RTE + Arnaud MAINIER + arnaud.mainier&rte-france.com +39060 + Suster.net + Gregor Suster + gregor&suster.net +39061 + Analytica + Vikram Seshadri + SVIKRAM&ANALYTICA-INDIA.COM +39062 + Mark Harlos, Software Engineering Consultant + Mark A. Harlos + mharlos&verizon.net +39063 + Delecon + Daniel Eidenskog + daniel.eidenskog&delecon.se +39064 + Eseye Ltd + Rob Coward + rcoward&eseye.com +39065 + African Institute for Mathematical Sciences + Tom Bamford + tom&aims.ac.za +39066 + FTS DVL SRL + Galina Pavlenco + b2e&dv-lab.com +39067 + Verba Technologies + Szabolcs Daroczy + support&verba.com +39068 + hostapd/wpa_supplicant project + Jouni Malinen + jm&w1.fi +39069 + Japan Ballast Co. + Masaki Ohashi + m_oohashi&softsirius.co.jp +39070 + STARK + Jens Stark + jens.stark&gmail.com +39071 + Konkurent Ltd. + Art Mitskevich + art&coffeestudio.ru +39072 + Campai Business Solutions BV + Martijn van Buijtene + martijn&campai.nl +39073 + Blue Mind + Anthony Prades + oid&blue-mind.net +39074 + makabra + Miroslaw Baran + miroslaw-iana&makabra.org +39075 + HealthE Advocate LLP + Mureen Allen + mallen_md&yahoo.com +39076 + Zalaszam Kft + Nemcsics Geza + gnemcsics&zalaszam.hu +39077 + Dorlet S.A + Iker Pérez de San Román + ikerperezsanroman&dorlet.com +39078 + Vimpex GmbH + Elisabeth Freudenthaler + elisabeth.freudenthaler&vimpex.at +39079 + AdvaICT, a.s. + Martin Juřen + martin.juren&advaict.com +39080 + ITRS Group Limited + E. Morales + emorales&itrsgroup.com +39081 + ASACA Corporation + Takashi Nakagomi + nakagomi&asaca.co.jp +39082 + PhoenixNAP + John Anderson + johna&phoenixnap.com +39083 + Samodzielny Publiczny Centralny Szpital Kliniczny + Maciej Kazulak + oidadmin&spcsk.pl +39084 + Texas A&M University - Kingsville + Robert Miller + robert.miller&tamuk.edu +39085 + MUJIN Inc. + Rosen Diankov + rosen.diankov&mujin.co.jp +39086 + FriendFinder Networks, Inc. + Patrick Gillan + pgillan&ffn.com +39087 + Astra Communication Service Co., Ltd + Kridsada Compeerapap + kridsada&astra.co.th +39088 + Hochschule Deggendorf + Volker Scheuer + volker.scheuer&hdu-deggendorf.de +39089 + Krones AG + Maximilian von Zweydorff + maximilian.zweydorff&krones.com +39090 + subreport Verlag Schawe GmbH + Thomas Osenau + thomas.osenau&subreport.de +39091 + Good Link + Kaung Thant Kyaw Dare + support&darecom.net +39092 + Chongqing Cable Network Co.,Ltd + Wang Ji + wangji&cqccn.com +39093 + Basler AG + Frank Schmolla + Frank.Schmolla&baslerweb.com +39094 + Kambi + Lezgin Bakircioglu + Kambi_Infra_Tech&Kambi.com +39095 + iFlex Ltd. + Andrey Belyaev + ABelyaev&iflex.ru +39096 + Lohika ltd. + Pavlo Khromchak + pkhromchak&lohika.com +39097 + Maxeler Technologies Ltd + Rob Dimond + rob&maxeler.com +39098 + Kyanmedia Limited + Gareth Adams + gareth&kyanmedia.com +39099 + RSA SaaS + Steven Howe + steven.howe&rsa.com +39100 + The Binary Workshop + Ashwin Shankar + ashwin&thebinaryworkshop.com +39101 + Frisbie Memorial Hospital + Mike Leach + m.leach&fmhospital.com +39102 + Power Home Remodeling Group, Inc. + Jeff Levine + jlevine&powerhrg.com +39103 + LS telcom AG + Timo Weishaeupl + tweishaeupl&lstelcom.com +39104 + Universidad Central de Las Villas + Roberto Hiribarne Guedes + hiribarne&gmail.com +39105 + Technet Systems + Peter Parianos + peter&technet.com.au +39106 + ExpatJob.net GROUP + Lucas Antoine + development&expatjob.net +39107 + ULHI - Urrutiko Lanbide Heziketa Institutua + Antton Rodriguez + arodriguez&ulhi.net +39108 + Trusteer Ltd. + Shmulik Regev + webmaster&trusteer.com +39109 + Morpho Detection, Inc. + Alexei Kireev + akireev&morphodetection.com +39110 + State of Colorado Judicial Department + James Stoner + info-servers&judicial.state.co.us +39111 + Eladian Partners, LLC + James Cape + iana-pen&eladian.com +39112 + Netsoc DIT + Mark Cunningham + markcunninghamemail&gmail.com +39113 + IRT Technologies Inc. + Arkadi Potapov + info&irttechnologies.com +39114 + Ittxa + Oscar Jimenez Sainz + tercera&ittxa.com +39115 + XACK,Inc. + Kazutaka Kumamoto + penadmin&xack.co.jp +39116 + Folkwang Universität der Künste + Andreas Cieslak + cieslak&folkwang-uni.de +39117 + noax Technologies AG + Sebastian Engl + IANA-PEN&noax.com +39118 + Amadeus s.a.s + jacques Dalbera + jdalbera&amadeus.com +39119 + dm-drogerie markt GmbH + Co. KG + Marco Aliberti + marco.aliberti&dm-drogeriemarkt.de +39120 + itunic consulting + Bernhard Rekemeier + br01&itunic.de +39121 + ElringKlinger AG + Christian Segor + oidmaster&elringklinger.com +39122 + Framehawk Inc. + Michael Martin + michael.martin&framehawk.com +39123 + Two Degrees Mobile Ltd + Tim West + IANA_Pen&2degreesmobile.co.nz +39124 + Kaweah Delta Health Care District + Steven Larkin + itdirector&kdhcd.org +39125 + Instituto Superior de Engenharia do Porto (ISEP) + Nuno Pereira + nap&isep.ipp.pt +39126 + DCA Inc + Mike Chatterton + mikec&dcainc.com +39127 + Mercury Insurance Group + Roy Hegge + rhegge&mercuryinsurance.com +39128 + ALLIANZ LIFE INSURANCE OF NORTH AMERICA + Drew Pierce + drew.pierce&allianzlife.com +39129 + Reiknistofa i Vedurfraedi + Thor Sigurdsson + thor&belgingur.is +39130 + Century Software (M) Sdn Bhd + Mark Rees + mark¢urysoftware.com.my +39131 + Sistemas Informaticos Abiertos S.A. + Jorge Cea + jcea&sia.es +39132 + Camair-Co + Jean-Francis AHANDA + francis.ahanda&camair-co.net +39133 + Kamsoft S.A. + Bartłomiej Syryjczyk + bsyryjczyk&kamsoft.pl +39134 + Apex + Stijn Schouteten + stijn&apex-audio.be +39135 + U.S. Bancorp + Kenneth Payne + kenneth.payne&usbank.com +39136 + Adams State University (formerly 'Adams State College') + Randall Smith + rbsmith&adams.edu +39137 + ameria GmbH + Pavel Shumarov + admin-department&ameria.de +39138 + Santa Barbara County Education Office + Sharon Van Gundy + dnsadmin&sbceo.org +39139 + nPario Inc + Joerg Hallmann + joerg&npario.com +39140 + Interra Systems, Inc. + Shelly Adhikari + shelly&InterraSystems.com +39141 + Qice Technology Co. Ltd + judd wang + jdwang&qicetech.com +39142 + Nian Electronic Company + Morteza Sadr + idm&nianelectronic.com +39143 + LARS + Mariusz Kokocinski + ca&lars.pl +39144 + Nextouch, Inc. + Young D. Moon + ydmoon&ntnextouch.com +39145 + Innovative Circuit Technology Ltd. + Natalia Bolster + technical.contact&ictcorporate.com +39146 + Watchdog Software Pty Limited + Richard Giddey + support&watchdogsoftware.com.au +39147 + intratop + Thorsten Eisinger + thorsten.eisinger&intratop.de +39148 + Agjencia Kombetare e Shoqerise se Informacionit + Fisnik Kruja + fisnik.kruja&akshi.gov.al +39149 + broadAngle, LLC + Garrison Atkisson + garrison&broadangle.com +39150 + B.O.S. Software Service und Vertrieb GmbH + Jahn Fuchs + jahn.fuchs&bossoftware.de +39151 + Autorité des Marchés Financiers + Erik Ableson + e.ableson&amf-france.org +39152 + DVT - Daten-Verarbeitung-Tirol GmbH + Florian Sailer + florian.sailer&tirol.gv.at +39153 + WAYF - Where Are You From + Christian Hannested + sekretariat&wayf.dk +39154 + SwiftStack + Joe Arnold + joe&swiftstack.com +39155 + Mango DSP, Inc. + Eldor Reif - Dir. of Professional Services and Product Management + ereif&mangodspinc.com +39156 + PROSCOPE Corporation + Hideki Fujimura + fujimura&proscope.co.jp +39157 + DERMALOG IDENTIFICATION SYSTEMS GMBH + Innocents Sia + Innocents.Sia&dermalog.com +39158 + Akouto + Dominic Chorafakis + dom&akouto.com +39159 + Redpoint Software + James Rhodes + jrhodes&redpointsoftware.com.au +39160 + PointRed Telecom Ltd. + Shajee Lawrence + shajee&pointred.co +39161 + Precision Infomatic + Charan Narayanan + ncharan86&gmail.com +39162 + WEOLO + Paolo Decarlini + paolo.decarlini&nimix.it +39163 + U-BTech Solutions + Ilan Lanz + ilan&u-btech.com +39164 + Cires21 S.L. + Manuel Cardeñas Cano + mcardenas&cires21.com +39165 + Hikvision Digital Technology Co., Ltd. + Qi Liu + liuqi&hikvision.com +39166 + Stadt Moenchengladbach + Klaus Schauer + klaus.schauer&moenchengladbach.de +39167 + kastel.net + Juergen Dattl + jd&kastel.net +39168 + eneatec GmbH + Steffen Buschmeier + buschmeier&eneatec.com +39169 + Softronics AG + Oliver Loch + info&softronics.ch +39170 + Koario + Stéphane Reynaud + contact&koario.fr +39171 + alipay + Jian Song + jian.song&alipay.com +39172 + Paytronix Systems, Inc. + Ross Mellgren + Rmellgren&paytronix.com +39173 + Datek Wireless AS + Espen Westgaard + espen&datek.no +39174 + Club Méditerrannée S.A. + Nicolas Karageuzian + nicolas.karageuzian&clubmed.com +39175 + GoEast GmbH + Daniel Rechsteiner + hostmaster&goeast.ch +39176 + MiTek Inc. + Michael Paul + mpaul&mii.com +39177 + Leuphana University of Lueneburg + Karsten Holmberg + karsten.holmberg&leuphana.de +39178 + The IMS Company + Chris Healy + chealy&imsco-us.com +39179 + Accuoss + Rodney Rindels + rrindels&accuoss.com +39180 + MTICUBE, LLC + Rajul Shah + rshah&mticube.com +39181 + International Development Council for Academic Studies (IDCAS) + Liam Drew + rat&idcas.org +39182 + OJSC "GT-TEC Energo" + Dmitiry Dorokhov + itdept>energo.ru +39183 + Layered Logic, Inc. + Brooks Bell + brooks&layeredlogic.com +39184 + xingplatform inc + jimmy gong + gongyf&xingplatform.com +39185 + System Level Solutions (India) Pvt. Ltd + Kaushal Thacker + kthacker&slscorp.com +39186 + Ministry of Foreign Affairs of Hungary + Hobok Janos + janos.hobok&mfa.gov.hu +39187 + Marmara Univesity + Huseyin Yuce + sysadmin&marmara.edu.tr +39188 + Virtual Cable S.L. + Adolfo Gómez + agomez&virtualcable.es +39189 + King Pine + David K. Gerry + dkgerry&kingpine.org +39190 + Sergey I Mihailov + Sergey I Mihailov + creating-school&mail.ru +39191 + Comfort Consulting + John Comfort + poc&comfortconsulting.com +39192 + IBSmm Engineering, spol. s r.o. + Radomir Vrbovsky + R.Vrbovsky&IBSmm.com +39193 + Vayosoft Network Technologies Ltd. + Jean Dubenko + jean&vayosoft.com +39194 + CSSS IUGS (Centre de Sante et des Services Sociaux - Institut Universitaire de Geriatrie de Sherbrooke) + Sylvain Gobeil + slgobeil.csss-iugs&ssss.gouv.qc.ca +39195 + Spawngrid, Inc. + Iurii Rashkovskyi + yrashk&spawngrid.com +39196 + AltNet (formerly 'Oxynux') + Benjamin Collet + iana-pen&alt.tf +39197 + Xitek Design Ltd + Alan Jacks + support&xitekdesign.com +39198 + Brekeke Software, Inc. + Shinichi Mitsumata + mitu&brekeke.com +39199 + EXPRESSTRAK - DATS + Vijay Pavuluri + vijayp000&gmail.com +39200 + KTB COMPUTER SERVICES Co.,Ltd + Somporn Jarusinchai + somporn.jaru&ktbcs.co.th +39201 + Axians redtoo AG (formerly 'redtoo ag') + Patrick Sczepanski + patrick.sczepanski&axians.com +39202 + ASP Alerce Tecnology SL + Alvaro Fernandez Corugedo Feilenreiter + acorugedo&aspalerce.es +39203 + Qualysoft Informatikai Zrt. + Jacint Toth + jacint.toth&qualysoft.com +39204 + QuanticEvents + Eric Fries + eric.fries&quanticevents.com +39205 + Broadnet Telecom Inc + Christian Mokbel + cmokbel&broadnet-telecom.ca +39206 + EITV - Entretenimento e Interatividade para TV Digital + Roberto Marchesini + roberto.marchesini&eitv.com.br +39207 + SparkYard, Inc. + Jonathan Layes + iana-pen-support&sparkyard.com +39208 + LivingObjects + Damien DUMEZ + damien.dumez&livingobjects.fr +39209 + Plexus Corp. + Scott Reynolds + hostmaster&plexus.com +39210 + Impulse Point LLC + Andrew Cohen + acohen&impulse.com +39211 + Positronics Enterprise Limited + Kelvin Chan + kelvin.chan&positronics.com +39212 + Prosyscor Ltd + Bradley Kite + support&prosyscor.com +39213 + Datakonsult Magnus Sandberg + Magnus Sandberg + oid&datakon.se +39214 + MT2IT + Joerg-Uwe Meyer + joerg-uwe.meyer&mt2it.com +39215 + Kernun + Vladimir Tulacka + iana&kernun.cz +39216 + BroadForward B.V. + Robert Groenenberg + infra&broadforward.com +39217 + Triple T Broadband PCL + Aroon Janthong + aroon.j&jasmine.com +39218 + Connection Electronics Ltd. + Victor Lau + victor&cel.hk +39219 + Overkiz + COSSERAT Hubert + h.cosserat&overkiz.com +39220 + Theoldmonk.net + Devendra Gera + iana.pen&theoldmonk.net +39221 + Pakedge Device and Software Inc + Dusan Jankov + dusan&pakedge.com +39222 + Patrick Shuff Industries + Patrick Shuff + patrick.shuff&gmail.com +39223 + ADNET Systems, Inc. + Lucas Yamanishi + lyamanishi&sesda2.com +39224 + Djipalo Junuz + Djipalo Junuz + djipalo.junuz&djipalo.com.ba +39225 + AVRO Electrics + Krzysztof Sroka + krzysztof.sroka&avro.com.pl +39226 + REWOO Technologies AG + Andreas Gall + administration&rewoo.com +39227 + GIE Vauban Humanis + Vincent Dussaucy + oid.admin&vaubanhumanis.com +39228 + Kemira Oyj + Hannu Strang + kits.infra&kemira.com +39229 + Convergence Technologies + Jeff Ehman + jehman&converge-tech.com +39230 + lenic.eu + Lukasz Kapiec + lukasz&lenic.eu +39231 + Hosting.com, Inc. + Joe Ceresini + jceresini&hosting.com +39232 + Drogentherapiezentrum LAGO + Raphael Beer + raphael.beer&gmx.de +39233 + Saint John's Health System + Jennifer Marcum + jlmarcum&sjhsnet.org +39234 + Mitroko Inc. + Dmitry Stremkouski + mitroko&gmail.com +39235 + Exelis Inc. + Paul Lafferty + paul.lafferty&exelisinc.com +39236 + TriFractal Studios (formerly 'Sacred Point Studios') + Joshua Porter + jporter&trifractalstudios.com +39237 + FW-Systeme GmbH + Reiner Onnen + onnen&fw-systeme.de +39238 + VelociData, Inc + Michael Henrichs + mhenrichs&velocidata.com +39239 + LittleJackal + Paul Kiela + iana&littlejackal.com +39240 + Atacama Large Millimeter/submillimeter Array + Marcelo Bartsch + software&alma.cl +39241 + volkszaehler.org + Gerhard Bertelsmann + info&gerhard-bertelsmann.de +39242 + Krki IT + Lafo Mamone + krki-it&hotmail.com +39243 + VNLI LTD CO. + VINCENT KIBET KIRUI/ LILIAN CHEPKORIR KIRUI + vkirui11&yahoo.com +39244 + CHINA NATIONAL SOFTWARE & SERVICE CO., LTD. + Xiaohan Zang + zangxh&css.com.cn +39245 + Linkra Networks + Alessandro Greco + alessandro.greco&linkra.it +39246 + MBDA Deutschland GmbH + Tobias Mucke + tobias.mucke&mbda-systems.de +39247 + medInt Holdings LLC (formerly 'Imalogix') + David Naylor + david.naylor&mihllc.com +39248 + STEC + Swapna Yasarapu + syasarapu&stec-inc.com +39249 + Shango + Cody Crawford + cody&shango.com +39250 + Dalian Xinyu Technology, Ltd + Bill Wade + yujinjun3&126.com +39251 + NewSun Co., LTD + Yin Wenzhu + yinwz&newsun.com.cn +39252 + RioCard Tecnologia da Informacao S/A + Bruno Benayon + bruno.benayon&riocard.com +39253 + Punto Com srl + Luigi Scilimati + luigi.scilimati&puntocomsrl.com +39254 + Consecom AG + Lukas Ruf + Lukas.Ruf&consecom.com +39255 + Yakaz + Mialon Pierre-Gilles + root&yakaz.com +39256 + laorim + Thierry Bigotte + thierry&laorim.fr +39257 + Medical Records LLC + Clayton Phillips + clayton&phillipseng.com +39258 + TZ Consulting UG + Thorsten Zenker + zenker&tz-consulting.de +39259 + Geoffroy PLANQUART + Geoffroy PLANQUART + geoffroy&planquart.fr +39260 + Koeller Family + Thomas Koeller + thomas&koeller.dyndns.org +39261 + Planungsbüro Ledermann + Jakob Ledermann + jakob_ledermann&web.de +39262 + MediaMelon, Inc. + Deepa Suresh + accounts&mediamelon.com +39263 + Akademischer Verein + Moritz Neeb + moritz.neeb&akademischerverein.de +39264 + New Rock Technologies, Inc. + Hai Wang + hwang&newrocktech.com +39265 + Versino GmbH + Michael Steiner + michael.steiner&versino.ch +39266 + Runway International OÜ + Toomas Vendelin + toomas.vendelin&runway.ee +39267 + ZAT a.s. + Václav Häusler + vaclav.hausler&zat.cz +39268 + Fachhochschule Brandenburg + Thomas Bluhm + leitung-rz&fh-brandenburg.de +39269 + BASE - Gestió d'Ingressos + Cèsar Garcia i Pérez + cgarciap&base.cat +39270 + CLUB PSCO + BORDIER JEROME + jerome.bordier&sealweb.eu +39271 + Palisades Medical Center + Glenn Hunsberger + ghunsberger&palisadesmedical.org +39272 + WCR EDV GesmbH + Christoph Walzer + c.walzer&wcr-edv.com +39273 + ARC Wireless LLC + Lindsey Barlow + lbarlow&antennas.com +39274 + INNOV@TIVE IT LTD + Steve Asser + steveasser_innov&tiveit.co.uk +39275 + Martello Technologies Corporation + Niall Gallagher + niall.gallagher&martellotech.com +39276 + Intux + Bart Verwilst + info&intux.be +39277 + Cape Regional Medical Center + Kristen Sherick + ksherick&caperegional.com +39278 + ZeXtras s.r.l. + Paolo Storti + info&zextras.com +39279 + Web Devices Ltd + Stephen Liptrott + Stephen.Liptrott&atlasnet.co.uk +39280 + ARVOO Engineering B.V. + Rini van Zetten + Rini&arvoo.nl +39281 + ST Enclosures for Electronics Systems LTD + Avi Talmor + avi&st-enclosures.com +39282 + Infinet Financial Systems + Huw Williams + huw.williams&infinetfs.com +39283 + TeleWare PLC + Dr David Hodgson + David.Hodgson&teleware.com +39284 + Mountain States Health Alliance + Kevin Bowes + boweskj&msha.com +39285 + Prime Engineering Ltd + Jeff Johnson + jeff.johnson&primeeng.ca +39286 + Wargaming.net LLP + Michael Loginovsky + admins&wargaming.net +39287 + Unimed Litoral + Claudio Renato Santiago + claudio.santiago&unimedlitoral.com.br +39288 + Fimasys + Jeremie Grauer + admin&fimasys.fr +39289 + SPAUN electronic GmbH & Co. KG + Kevin Spaun + kspaun&spaun.de +39290 + Citibank, N.A. + Papa Faye + papa.faye&citi.com +39291 + SugarCRM, LLC + Denise Stockman + dstockman&sugarcrm.com +39292 + Victoria's Secret + Tim Ebbers + VSDNewmediaTechSvcs&Limitedbrands.com +39293 + MARS + Fabio De Marzo + fabio.de.marzo&effem.com +39294 + Rue La La + Zach Armstrong + zarmstrong&ruelala.com +39295 + Yakima Valley Memorial Hospital + Robert J Waller + robwaller&yvmh.org +39296 + Salford Software Ltd + Paul Heaney + paul.heaney&salfordsoftware.co.uk +39297 + Beijing YUKUAN Technology Co.,Ltd + Dai xinghe + daixinghe&163.com +39298 + Banque du Bois AG + John Ainhirn-Williams + jaw&banquedubois.com +39299 + McPherson Hospital, Inc + Angela Hall + angelah&mcphersonhospital.org +39300 + Sherwood Compliance Services Ltd. + Stu Coates + stuart.coates&sherwoodcompliance.co.uk +39301 + oikaze + Yasuhisa Mine + mine&oikaze.jp +39302 + Essent NV + Marco Spoel + marco.spoel&essent.nl +39303 + Ospow + Gérald Colangelo + gcolangelo&ospow.com +39304 + Shine Data AB + Thomas Ostmans + thomas.ostmans&shinedata.se +39305 + WiVDO, Lda + Luis Figueiredo + luisfigueiredo&wivdo.com +39306 + Intellidesign + Andrew Ward + andrew&intellidesign.com.au +39307 + Clealink Technology Co., Ltd. + Takamichi Mizuhara + snmp&clealink.jp +39308 + Blinkenlichten Open Source Solutions + Rouven Sacha + info&blinkenlichten.de +39309 + Centrum Promocji i Rozwoju Inicjatyw Obywatelskich OPUS + Wojciech Pietruszewski + admin&opus.org.pl +39310 + Sønderborg Kommune + Bo Mathiasen Bladmose + hostmaster&sonderborg.dk +39311 + Wellspan + David Smith + dsmith14&wellspan.org +39312 + Brookhaven Memorial Hospital + Mary Ellen Shortell + meshortell&bmhmc.org +39313 + iFAX Solutions, Inc. + Patrice Fournier + patrice.fournier&ifax.com +39314 + Efactures (JLEM) + Laradji nacer + nacer.laradji&gmail.com +39315 + Xelion b.v. + Hans Bos + snmp.pen.registratie&xelion.nl +39316 + Savtira + Zoltan Patay + zpatay&savtira.com +39317 + Systems Solutions & Development Technologies Ltd + Firoze Muhammad Zahidur Rahman + jewel&ssd-tech.com +39318 + QUALIS AUDIO, INC. + RICHARD CABOT + snmp&qualisaudio.com +39319 + mr-brooks.com + Darren Brooks + contact&mr-brooks.com +39320 + Canada Revenue Agency + Nicolas Tolstoy + Nicolas.Tolstoy&cra-arc.gc.ca +39321 + Schooneman.net + Ivo Schooneman + ivo&schooneman.net +39322 + Champlain Valley Physicians Hospital Medical Center + Kelly Ahern + kahern&cvph.org +39323 + E2E4 TECHNOLOGIES + VENKATASIVAKUMAR BOYALAKUNTLA + shiva&e2e4tech.com +39324 + OS NEXUS, Inc + Steven Umbehocker + steve&osnexus.com +39325 + SecuAvail, Inc. + Kenji Matsuura + matsuura&secuavail.com +39326 + University of Neuchatel + Antoine Jacot-Descombes + antoine.jacot-descombes&unine.ch +39327 + ID Cyber-Identity Ltd + Adrian Mueller + info&cyber-identity.com +39328 + SeaWell Networks + Ram Ranganathan + ramr&seawellnetworks.com +39329 + Lesley University + Scott Bendekgey + sbendekg&lesley.edu +39330 + Dalian Jinghaofeng Techbology Co., Ltd + John Terry + yujinjun5&126.com +39331 + SurGATE Labs + ismail yenigul + ismail.yenigul&surgate.com +39332 + Zuercher Lehrbetriebsverband ICT + Primus Hofmann + technik&zli.ch +39333 + m3 bauprojektmanagement gmbh + Joachim Konetzki + joachim.konetzki&m3-gmbh.de +39334 + RADMOR S.A. + Marcin Lewandowski + marcin.lewandowski&radmor.com.pl +39335 + basis Volume Limited + Dr Charlton Tavares + cptavares&btconnect.com +39336 + Kraus + Christian Kraus + uni&christiankraus.de +39337 + Elephant Bird Consulting + Tom Geudens + tom.geudens&hush.ai +39338 + Operate4all + Martijn Windgassen + noc&operate4all.eu +39339 + Bagsbug + Agnes FENAUX + agnes.fenaux&bagsbug.net +39340 + Kingsbrook Jewish Medical Center + Melissa Gonzalez + MGonzalez&kingsbrook.org +39341 + Soifdinfo + Kpama Frédéric + contact&soifdinfo.fr +39342 + CSF Sistemi S.r.l. + Marco De Lellis + marco.delellis&gruppocsf.com +39343 + Vennetics Limited + Ivan McShane + ivan.mcshane&vennetics.com +39344 + NextNav, LLC + Frank Patry + it&nextnav.com +39345 + ConCen + Tom Potts + yeti&concen.org +39346 + Communaute d'agglomeration Herault Mediterranee + Laurent Miserey + l.miserey&agglohm.net +39347 + LeoNux + Leon Baker + leon&leonux.co.za +39348 + Hochschule für Telekommunikaton Leipzig + Haiko Wolf + wolf&hft-leipzig.de +39349 + National Observatory of Athens + Nikolaos Milas + nmilas&noa.gr +39350 + SuperNAS + Eric de Hooge + ericdehooge&supernas.eu +39351 + Voxbone SA + Laurent JARBINET + laurent&voxbone.com +39352 + Adirondack Community College + Bryan Goodwin + goodwinb&sunyacc.edu +39353 + Biogen Idec + Aldrin Carao + Aldrin.Carao&biogenidec.com +39354 + Oceus Networks, Inc. + Kevin Stiles + kstiles&oceusnetworks.com +39355 + Jeanes Hospital + Andrew Gavin + andy.gavin&tuhs.temple.edu +39356 + Showa University + Hiromasa Inoue + itcenter&ic.showa-u.ac.jp +39357 + INSTITUTO FEDERAL DO CEARÁ - IFCE + Antonio Rodrigo dos Santos Silva + rodrigosantos&ifce.edu.br +39358 + Prospect Computer Systems Inc. (PCS) + Luke Tsang + luketsang&yahoo.ca +39359 + Supranetcom + Pierre Dubois + labo&supranetcom.com +39360 + Welotec GmbH + Jos Zenner + j.zenner&welotec.com +39361 + FraudBuster + Léonard Wauters + leonard.wauters&fraudbuster.mobi +39362 + Activanos SAS + Daniel Hennion + jeandaniel.hennion&activanos.com +39363 + Wessel Dankers + Wessel Dankers + wsl-iana&fruit.je +39364 + The Basis Group, Inc. + Dusty Little + dustin.little&groupbasis.com +39365 + Komar Consulting Inc. + Brian Komar + bkomar&komarconsulting.com +39366 + href.net + Carsten Zeumer + carsten.zeumer&href.net +39367 + Theodore Productions + Ian Maddison + ian&theoprod.fr +39368 + Aqcua Electronics + Alejandro Pasika + alex&acquaelectronics.com.ar +39369 + Silicon Valley Medical Instruments + Steve Reynolds + stever&svmii.com +39370 + Reelway GmbH + Stephan Schneider + s.schneider&reelway.de +39371 + deron Consulting GmbH + Theodoros Paraskevopoulos + Theodoros.Paraskevopoulos&deron.de +39372 + Teo Technologies, Inc. + Steve Hill + steve.hill&teotech.com +39373 + Cooltech srl + Daniele Caiti + daniele.caiti&cooltech.it +39374 + TDK Technologies, LLC + Mark Henman + mark.henman&tdktech.com +39375 + Xtronix Ltd + Jim Carter + jim&xtronix.co.uk +39376 + Gigalan + Jean Daniel Hennion + jeandaniel.hennion&groupegigalan.com +39377 + g2b Holdings Inc. + Kim Eung Cheon + kecheon&okg2b.com +39378 + WAO Corporation + Shunji Hirakawa + shunji_hirakawa&wao-corp.com +39379 + Lab M, llc + Sergei Beilin + toor&lab-m.ru +39380 + Fysisch-Mathematische Faculteitsvereniging + Eric Spreen + comcie&fmf.nl +39381 + Daggerpoint Technologies + Joe Schiavone + Joe&Daggerpoint.com +39382 + JSC "ZHASO" + Maksim Sorokin + sorokin_my&zhaso.ru +39383 + Flex Valley srl + Nicola Turato + nicola.turato&flexvalley.com +39384 + CRYPTAS it-Security GmbH + DI Stefan Bumerl + dnsadmin&cryptas.com +39385 + M.M. Elektrolab + Miroslav Milutinovic + melektrolab&sbb.rs +39386 + Ultra Electronics - Advanced Tactical Systems + Adam Gaither + adam.gaither&ultra-ats.com +39387 + Alpha Direct Services + Jérôme BELLEGARDE + j.bellegarde&alpha-direct-services.com +39388 + Online Classifieds Australia + Alfie John + alfie&alfiejohn.com +39389 + Beaulaton + Laurent Beaulaton + contact&laurent-beaulaton.fr +39390 + Baycom Technology Co.,Ltd + nie qingrong + nieqingrong&baycomwireless.com +39391 + H&L Instruments, LLC + Robert J. Landman + rlandman&hlinstruments.com +39392 + jameskinnaird.ca + James Kinnaird + james&jameskinnaird.ca +39393 + Global Reach Technology Inc. + Dr Christopher Spencer + chris.spencer&globalreachtech.com +39394 + Direccion General de Escuela Nacional Preparatoria + Gabriela Cancino Ramirez + gabriela.cancino&dgenp.unam.mx +39395 + ELDIS Pardubice, s.r.o. + Pavel Kolín + snmp&eldis.cz +39396 + Zygma Inc. + R.G. Wilsher + RGW&Zygma.biz +39397 + amitego engineering GmbH (formerly 'ToolBox Solution GmbH') + Tillmann Basien + tab&amitego.com +39398 + HvS-Consulting AG + Michael Hochenrieder + hochenrieder&hvs-consulting.de +39399 + Gumilyov Eurasian National University + Vladimir Gerassimov + vg&enu.kz +39400 + Institut für angewandte Funksystemtechnik GmbH + Martin Drischler + info&iaf-bs.de +39401 + Tilburg University + Corno Vromans + lis-unix&uvt.nl +39402 + B2F Concept + Vincent Riquer + exploitation&b2f-concept.com +39403 + GEBIT Solutions GmbH + Marko Eling + marko.eling&gebit.de +39404 + Keirex Technology Inc. + Yoshihiro Mochizuki + mochizuki&keirex.com +39405 + Yieldbroker Pty Limited + Alex Samad + alex.samad&yieldbroker.com +39406 + Benu Networks, Inc. + Michael Brown + mbrown&benunets.com +39407 + Explorys + Nicolas Seritti + systems.operations&explorys.com +39408 + Clark State Community College + Andrew Deans + deansa&clarkstate.edu +39409 + b3Lateral Ltd + Nitesh Bharadia + nbharadia&b2lateral.net +39410 + Ideco + Alexey Smirnov + smirnov&ideco.ru +39411 + Ongoing Warehouse AB + Henrik Ekman + henrik.ekman&ongoingsystems.se +39412 + ALBEDO Telecom SL + Jordi Colomer + jco&albedo.biz +39413 + SD Worx VZW + Ben Nijenhuis + security&sdworx.com +39414 + Domo, Inc. + Ron Hair + ron.hair&domo.com +39415 + MicroStep-MIS spol. s r.o. + Andrej Lucny + infoµstep-mis.com +39416 + Metropolitan Transportation Authority + David Laidig + sysadmin&mtabuscis.net +39417 + Kukinto + Phil J. Laszkowicz + enquiries&kukinto.com +39418 + Liberty Healthcare System Inc + Stephen Li + sli&libertyhcs.org +39419 + NETWORK PROCESS CONTROL SYSTEMS S.L. + Jordi Ferrando Fabra + jferrando&netplc.com +39420 + Delta Information Systems, Inc. + Gary Thom + gthom&delta-info.com +39421 + Rising-Gods UG + Georg Schröjahr + administration&rising-gods.de +39422 + JazzHands Management System + Todd Kover + enterprisenumber&jazzhands.net +39423 + Joshua Morgan + Joshua Morgan + joshua.morgan&gmail.com +39424 + hilltonic + guhongbo + hongbo.gu&hilltonic.com +39425 + Verein für Menschen mit Körper- und Mehrfachbehinderung e.V. + Johannes Köhler + johannes.koehler&zfk-wuerzburg.de +39426 + Medical Mutual of Ohio + David Bogatek + david.bogatek&mmoh.com +39427 + Cyrrus Products Ltd + Nigel Warren + nwarren&cyrrus.co.uk +39428 + Himilco + anis hachani + hachanienis&gmail.com +39429 + G10 Transportes - LTDA + Ricardo Oliveira + ricardo.oliveira&g10.com.br +39430 + ownCloud Inc. + Arthur Schiwon + arthur.schiwon&owncloud.com +39431 + BLT Italia srl + Alfredo Bartelletti + alfredo&blt.it +39432 + devcoach GbR - Michael Willers & Partner + Michael Willers + michael.willers&devcoach.com +39433 + T8, LLC + Yurij Kapin + Info&t8.ru +39434 + Isthmus SARL + Pierre-Yves Bourguignon + pbourguignon&isthmus.fr +39435 + Sistemas Dypsa S.R.L + Fernando Romero + fernando.romero&sisdypsa.com.ar +39436 + Just Commodity Software Solutions Pte Ltd + Frank Edward + frank.edward&justcommodity.com +39437 + Six Quarks + Svetlana Bychkova + alexander&six-q.net +39438 + CB10 + C. Brooks + medievalist&gmail.com +39439 + FACOS + Daniel Wolff + daniel&facos.edu.br +39440 + JELETL + Thierry DELHAISE + thierry.delhaise&jeletl.fr +39441 + George Watson's College + Alistair Riddell + webmaster&gwc.org.uk +39442 + Fred Hutchinson Cancer Research Center + Jason Burdullis + jburdull&fhcrc.org +39443 + Banco de la Provincia del Nequén S.A. + José Mateo + jose.mateo&bpn.com.ar +39444 + Caucasus Online LLC + Arman Obosyan + hts&co.ge +39445 + Alameda County Medical Center + John Cooper + jcooper&acmedctr.org +39446 + RESEARCH CONCEPTS, INC. + ETHAN GARTRELL + egartrell&researchconcepts.com +39447 + Career Partner GmbH + Peter Bender + p.bender&careerpartner.eu +39448 + Swiss Mideast Finance Group AG + Thomas Bolliger + Administrator&swissmideast.ch +39449 + UBI sistemi e Servizi S.c.p.a + Luca Biancardi + ubiss-ictsicurezza-adm&ubiss.it +39450 + The Lampo Group, Inc. + Jim Riggs + itdepartment&daveramsey.com +39451 + Zarathustra Solucoes em Informatica + Alessandro Madruga Correia + amcorreia&zarathustra.com.br +39452 + Artemis Investment Management LLP + Robert Smith + rob.smith&artemisfunds.com +39453 + Bequant S.L. + Guillermo Lopez + admin&bequant.com +39454 + University of Scranton + Philip Erb + systems&scranton.edu +39455 + Reno A&E + Chris Mysz + chrism&renoae.com +39456 + Denis Bondar + Denis Bondar + bondario&gmail.com +39457 + pcsysteme.at IT Service Gmbh + Ing. Robert Pabeschitz + robert.pabeschitz&pcsysteme.at +39458 + Lobster GmbH + Steffen Brehme + steffen.brehme&lobster.de +39459 + StoFey GbR + Michael Kirchner + info&stofey.de +39460 + CIPHERTELECOM + Porfirio Lopez + george.lopez&datacredito.info +39461 + Arachnocypher Web Services + Greg Tracy + arachnocypher&gmail.com +39462 + Damovo Belgium nv/sa + Paolo De Luca + it.belgium&damovo.com +39463 + Adaptive Networks, Inc. + Khaled Saab + saab&adaptiveNetworks.com +39464 + Solid Instance, Inc + Ron Steed + ron&solidinstance.com +39465 + ENBALA Power Networks + Xander Botha + xbotha&enbala.com +39466 + NV Nederlandse Gasunie + Peter Hofman + P.M.Hofman&gasunie.nl +39467 + RISC Software Gmbh + Wolfgang Hennerbichler + wolfgang.hennerbichler&risc-software.at +39468 + Kettering University + Winfred Harrelson + root&kettering.edu +39469 + Neosho Memorial Regional Medical Center + Gretchen Keller + gretchen_keller&nmrmc.com +39470 + MetaEmotion S.L + Diego Garcia Morate + diego.garcia&metaemotion.com +39471 + El Centro Regional Medical Center + Sylvia Chavarria + schavarria&ecrmc.org +39472 + Karsten Eberding + Karsten Eberding + karsten&eberding.eu +39473 + TGW Limited + Alixx Skevington + alixx.skevington&tgw-group.com +39474 + Kaleva Oy + Juho Rankinen + juho.rankinen&kaleva.fi +39475 + TUIR WARTA S.A. + Paweł Jankowski + pawel.jankowski&warta.pl +39476 + Antinea Software + Sébastien Delmée + contact&antineasoftware.eu +39477 + American Bureau of Shipping (ABS) + Cory Leonardi + cleonardi&eagle.org +39478 + Andi Miller Engineering Services + Andi Miller + andi&andimiller.net +39479 + sysangels e.U. + Fresel Michal + m.fresel&sysangels.com +39480 + BC Hydro + Kyle Luciak + kyle.luciak&bchydro.com +39481 + WWPass Corporation + David L. Bucciero + d.bucciero&wwpass.com +39482 + Pieceable Software, Inc. + Bob Ippolito + ops&launchcommander.com +39483 + Eneo Tecnologia S.L. + Juan Jesús Prieto Tapia + jjprieto&eneotecnologia.com +39484 + The University of Connecticut Health Center + Kathy A Noel + noel&uchc.edu +39485 + EUCAST Co., Ltd. + Shinha Kang + shkang&eu-cast.com +39486 + ChongQing TrunkSoft Technology Co., LTD. + Huayong Liu + liuhy&trunksoft.net +39487 + Wavestore Limited + Christopher Mocock + snmp&wavestore.com +39488 + Plexipi Kft. + Zsolt Varady + info&plexipi.hu +39489 + EZYield + Andrew Bailey + abailey&ezyield.com +39490 + Data Security Systems Solutions Pte Ltd + Seng Kee Tan + sengkee&ds3global.com +39491 + Eleiss + Nikunj Master + nikunjmaster&eleiss.com +39492 + Steven Denzinger + Steven Denzinger + Steven&Denzinger.info +39493 + Brückenkopf e.v. + Hanns Mattes + hanns&hannsmattes.de +39494 + comBerg Computersysteme + Andreas Alberg + info&comberg-computer.de +39495 + CHU de Charleroi + Dr André Vandenberghe + andre.vandenberghe&chu-charleroi.be +39496 + Arch Systems, Inc. + Sunao Shirakawa + techinfo&archsystem.com +39497 + EMSYS Design Inc + Bojan Keca + bojan&emsys-design.com +39498 + Peerpath + zhou jin + jin.zhou&peerpath.net +39499 + Flowmon Networks a.s. (formerly 'INVEA-TECH a.s.') + Jan Pazdera + pazdera&flowmon.com +39500 + Sumatronic AG + A. von Mentlen + andre.vonmentlen&sumatronic.ch +39501 + Bayerische Medien Technik (bmt) GmbH + Martin Dreher + info&bmt-online.de +39502 + Aaron Wolfe + Aaron Wolfe + aaron&aaronwolfe.com +39503 + Adsalsa Publicidad, S.L. + Ivan Novas Ferrin + ivan.novas&adsalsa.com +39504 + CTI Group (Holdings), Inc. + Siddhartha Rao + srao&ctigroup.com +39505 + Lingo Systems SA de CV + Juan Rubén Marrero Vizcaíno + pen&lingo.mx +39506 + Qalixa Solutions AB + Mathias Grundell + mathias.grundell&gmail.com +39507 + Kreuzer-bb networking + Johannes Kreuzer + kontakt&kreuzer-bb.de +39508 + Schmitz RZ Consult GmbH + Martin Schmitz + schmitz&schmitz-rz-consult.de +39509 + Totemo AG + Marcel Mock + support&totemo.ch +39510 + Institut de Biologie Structurale + fred metoz + frederic.metoz&ibs.fr +39511 + Schiavone Org + Izzy M. Inocenti + Joe&Schiavone.org +39512 + saman insurance + makan jooyani + m.jooyani&samaninsurance.com +39513 + MacAllister Software + Bill MacAllister + bill&ca-zephyr.org +39514 + Associated Engineering + Scott Flowers + flowerss&ae.ca +39515 + Jõhvi Vallavalitsus + Karel Niine + karel.niine&johvi.ee +39516 + Indisys + Adrian Real Oliva + a.real&indisys.es +39517 + Evernote Corp. + Theral Mackey + tmackey&evernote.com +39518 + KAGE Systems Ltd + Alan Evans + support&kagesys.com +39519 + HavenSec, Inc. + Edward Beuerlein + ceo&havensec.com +39520 + Qualycloud + Benoit Lecocq + benoit&qualycloud.com +39521 + xG Technology + Siddhardha Garige + sidg&xgtechnology.com +39522 + Maldivica Inc + Adam Bane + adam&maldivica.com +39523 + Aliphcom + Joel Krauska + Itvendor&jawbone.com +39524 + Nacogdoches Memorial Hospital + Jenny Clifton + cliftonj&nacmem.org +39525 + Creotech Instruments SA + Grzegorz Kasprowicz + Grzegorz.Kasprowicz&creotech.pl +39526 + LogicLore Incorporation + Prabakar Kalivaradan + prabakar.prabakar&gmail.com +39527 + Motaavi, LLC + Kaiting Chen + kaiting&motaavi.com +39528 + ADDIT LTD + Anton Sinelnikov + antal&addit.ru +39529 + Cyberside Ltd. + Nick Lowe + nick.lowe&gmail.com +39530 + 6cure SAS + Vincent Boissée + vincent.boissee&6cure.com +39531 + n-Tier construct GmbH + Dr. Rolf Dahm + dahm&n-tier.de +39532 + Quanex Building Products + Ryan Grant + ryan.grant&quanex.com +39533 + Armadillo Software + H. Eugene Latham + genel&armadillo.nu +39534 + Mobiata, LLC + Jeffrey Parker + jdparker&mobiata.com +39535 + modusoft GmbH + Sascha Ittner + sascha.ittner&modusoft.de +39536 + RBNetwork + André Keller + ak&rbnw.net +39537 + Supreme Education Council + Amal Ahmad Alkuwari + itdirector&sec.gov.qa +39538 + Bright Pattern, Inc + Sergey Menshikov + sergey.menshikov&brightpattern.com +39539 + Northwest Hospital & Medical Center + ADAM PARCHER + adam.parcher&nwhsea.org +39540 + Saphety Level - Trusted Services, S.A. + Bruno Marinho + pki&saphety.com +39541 + Blacklight IT UG + Maximilian Hess + hess&blacklight-it.com +39542 + Calgary Co-operative Association Limited + Jeff Larsen + jlarsen&calgarycoop.com +39543 + GNUstep (gnu.org) + Richard Frith-Macdonald + rfm&gnu.org +39544 + igolgi Inc. + Kumar Ramaswamy + kumar.ramaswamy&igolgi.com +39545 + Winters Broadband LLC + Brian Horn + planning&winters-broadband.com +39546 + Infoage + Martin A Flynn + martin&infoage.org +39547 + WakeMed Health and Hospitals + Jackie Cheeks + jcheeks&wakemed.org +39548 + Beijing TopShine Technology Co., Ltd. + dengji hua + dengjihua&topshinetech.com +39549 + DataCore Systems Ltd + John Reynolds + john.reynolds&datacoresl.com +39550 + KE2 Therm Solutions + Steve Roberts + admincontact&ke2therm.com +39551 + UAB SignDigi + Andrejs Čelnovs + chelnov&co.inbox.lv +39552 + Dalian Co-Edifice Video Technology Co. Ltd. + ZHUANG Xu + sanzluisgarcia&sina.com +39553 + Beijing Dynamic Power Co.,Ltd. + shiqiang + shiqiang&dpc.com.cn +39554 + D'Crypt Private Limited + Tan, Kim Sai + kim_sai&d-crypt.com +39555 + gyselroth GmbH + Raffael Sahli + sahli&gyselroth.com +39556 + University of Zurich, Institute of Mathematics + Rafael Ostertag + support&math.uzh.ch +39557 + Horoquartz + Patrick Paranthoen + patrick.paranthoen&horoquartz.fr +39558 + that's it gmbh + Andreas Woerner + woerner&thatsit-gmbh.de +39559 + derEDVdienst + Frank Leprich + info&derEDVdienst.de +39560 + Leads Technologies Limited (formerly 'Lead Innovation System Technology Corp.') + Frank, H.L.Lai + frank&leadstec.com +39561 + XSD Zrt. + Adam Popper + info&xsd.hu +39562 + Greenhills IT Ltd. + Mr Martin Wheldon + martin.wheldon&greenhills-it.co.uk +39563 + Smart Messaging Solution Pte Ltd + Hanny Tidore + hanny.tidore&smsdr.com +39564 + Yunnan Technician College + Guanhua Tao + tgh328&gmail.com +39565 + Birzeit University + Raed Hindaileh + netadmin&birzeit.edu +39566 + Idealo Internet GmbH + Marc Schildt + ldap-oid&idealo.de +39567 + DJIGZO + Martijn Brinkers + martijn&djigzo.com +39568 + Liberologico Srl + Marco Bizzarri + m.bizzarri&liberologico.com +39569 + max4G, Inc. + Jeff Stern + jstern&max4g.com +39570 + POOL4TOOL AG + Georg Roesch + georg.roesch&pool4tool.com +39571 + L33 NETWORKS + Samuel MULLER + sml&l33.fr +39572 + TCPWave Inc + Patty Parcha + Patty.Parcha&tcpwave.com +39573 + Sauper Associates, Inc. + Metro Sauper + msauper&sauper.com +39574 + Guido Berhoerster + Guido Berhoerster + guido&berhoerster.name +39575 + Global Patient Identifiers, Inc. + Barry Hieb + oids&vuhid.org +39576 + ElvenStar + Michael Kennedy + security-l&elvenstar.tv +39577 + GlobeOSS Sdn. Bhd. + Redianto Satyanugroho + redianto&globeoss.com +39578 + Sven van den Heuvel GmbH + Sven van den Heuvel + svdheuvel&t-online.de +39579 + Hearst Business Media + Chris Suozzi + csuozzi&hearst.com +39580 + dual-PROJEKT Paweł Szmidt + Pawel Szmidt + pszmidt&dual-projekt.com.pl +39581 + ZPBE ENERGOPOMIAR-ELEKTRYKA + Marek Nabiałczyk + marek.nabialczyk&elektryka.com.pl +39582 + WebSatMedia Pte Ltd + Honggo Yeo + honggo&websatmedia.com +39583 + Egemin Automation + Jim Marstboom + jim.marstboom&egemin.be +39584 + Hannoversche Volksbank eG + Lars Hodann + lars.hodann&hanvb.de +39585 + Kleyling Spedition GmbH + Jona Glaubitz + edv&kleyling.de +39586 + Datasiel Spa + Maurizio Pastore + oid&datasiel.net +39587 + GuideStone Financial Resources of the Southern Baptist Convention + Jimmy Brown + GuideStone.IANA&GuideStone.org +39588 + SELECTRIC Nachrichten-Systeme GmbH + Andreas Ahland + andreas.ahland&selectric.de +39589 + eBackpack, Inc. + Doug Mayer + doug&ebackpack.com +39590 + Fundació privada i2cat + Andrea Cervera + andrea.cervera&i2cat.net +39591 + Aux Sable Liquid Products LP + Keith Nush + Keith.Nush&auxsable.com +39592 + Sermotec Communications GmbH + Harald Petrovitsch + office&sermotec.at +39593 + Suofeiya Home Collection Co., Ltd + JohnsonChen + johnson.chen&sogal.com.cn +39594 + Point Of Pay Pty Ltd + Jatinder Singh + jatinders&pointofpay.com.au +39595 + GuangZhou JoySim Network Technology Co.,Ltd. + liangwei Wu + 164254840&qq.com +39596 + Zhejiang Broadcast&Television Technology Co.,Ltd. + MeiLi Pan + ml.pan&163.com +39597 + Chongqing Chongyou Communication Technologies Co,Ltd. + ZhiKuan Li + liangyan&cqupt.edu.cn +39598 + INFACOM S.L. + Andres Espin + aespin&infacom.es +39599 + Mzinga, Inc. + Eric Kreiser + ekreiser&mzinga.com +39600 + Capital Bank + Christian Ropposch + christian.ropposch&capitalbank.at +39601 + Vector 3 S.A. + Pau Ceano + pau.c&vector3.es +39602 + Regional Medical Center at Memphis + Doug Dohmen + ddohmen&the-med.org +39603 + DTTec Consulting + Mario Teichmann-Denschlag + Mario.Teichmann-Denschlag&dttec.de +39604 + Accuenergy (CANADA) Inc. + Zhi Zhao + george.zhao&accuenergy.com +39605 + GlobeCast France + GAUDIN Thierry + thierry.gaudin&globecast.com +39606 + GoldKey Security Corporation + Thomas Eyre + teyre&goldkey.com +39607 + NX Information Services + Matthieu MICHAUD + matthieu&nxdomain.fr +39608 + Palosanto Solutions S.A. + Rocio Mera + rmera&palosanto.com +39609 + next-pbx + aeran moon + wphilmoon&gmail.com +39610 + BITLANCER LLC + Matthew Juszczak + contact&bitlancer.com +39611 + Video Design Software Inc. + Wyk Parish + wyk&videodesignsoftware.com +39612 + Stantec Consulting + Semyon Chaichenets + semyon.chaichenets&stantec.com +39613 + Tambora Systems India Pvt Ltd (formerly 'Sawridge Systems India Pvt Ltd') + Pothirajan Kandasamy + pothirajan.k&tamborasystems.com +39614 + Valid + Ben van Zanten + Ben.van.Zanten&valid.nl +39615 + 2008mcitp.com + Mark Thrower + mark.thrower&2008mcitp.com +39616 + GlasgowNet + Kyle Gordon + kyle&lodge.glasgownet.com +39617 + PDT Partners, LLC + John Rau + johnrau&pdtpartners.com +39618 + Cloud Assist + Mark Wyman + mark.wyman&cloudassist.com.au +39619 + Institutul pentru Tehnologii Avansate + Aurelian Tolescu + ati&dcti.ro +39620 + JANUS srl + Gianpiero Lovat + janus&jns.it +39621 + SOPLEX Consult GmbH + Sven Kotting + tech&soplex.de +39622 + Intermas France S.A.S + Gomarin Philippe + Philippe.Gomarin&intermas-el.fr +39623 + PCS Systemtechnik GmbH + Markus Zinner + mzinner&pcs.com +39624 + herold&schönsteiner networks UG haftungsbeschränkt + Fabian Kreuzkam + fabian.kreuzkam&herold-schoensteiner.de +39625 + Kraft Foods + Bruce Portz + bruce.portz&hp.com +39626 + Hackerspace Warsaw + Tomek Dubrownik + tomek&hackerspace.pl +39627 + Rhinobee + Kevin Weare + kevin&rhinobee.com +39628 + Vegayan Systems + Surinder Singh + surinder_singh&vegayan.com +39629 + Auxilium + Boris Lenzinger + boris.lenzinger&auxilium-conseil.fr +39630 + mailplus.co.at + Johannes Schulz + office&mailplus.co.at +39631 + mocube + Alexander Mette + admin&mocube.net +39632 + Hyro Pty Ltd + David Rhee + david.rhee&hyro.com +39633 + Dos Tek Group + Zakon Kuruchbekov + pki&dostek.kg +39634 + HighGrand Technology + Pun Lam + phl&highgrand.com.cn +39635 + Beijing ZhongChuang Telecom Test Co.,LTD + Huiling Shen + happyshlshl&126.com +39636 + Comet Computer GmbH + Florian Albrechtskirchinger + albrechtskirchinger&comet.de +39637 + IMP Solutions Network Operations + Jeff Warnica + jeff.warnica&impsolutions.com +39638 + Cornerstone Backup, Inc. + Nigel Whillier + nwhillier&cornerstonebackup.com +39639 + Vigilent Corporation + Kennard White + kennardwhite&vigilent.com +39640 + Europalab Networks + Michael Schloh von Bennewitz + michael&schloh.com +39641 + SPICE DIGITAL LIMITED + Gurpreet Singh Gondara + gurpreet.singh&spicedigital.in +39642 + S3 s.r.l. + Ruben Castelletti + ruben.castelletti&s3srl.com +39643 + WedaCon Informationstechnologien GmbH + Thorsten H. Niebuhr + info&wedacon.net +39644 + The University of North Carolina at Greensboro + Robert Gorrell + idm-admin&uncg.edu +39645 + Anglo-American School of Moscow + Ilya Pekshev + iana-pen&aas.ru +39646 + Mikro Odeme Sistemleri A.S + Deniz Bektas + deniz.bektas&mikro-odeme.com +39647 + iNic AS + Bjornar Ness + bjornar&fastname.no +39648 + Enovacom + Christophe Marcel + cmarcel&enovacom.fr +39649 + General Data Technology Co. Ltd. + Yin Lina + info&gbase.cn +39650 + AEMO Ltd + Kerrod Fuller + iana.administrator&aemo.com.au +39651 + ComTecT + Werner Saathoff + saathoff&comtect.de +39652 + The William Carter Company + Richard Hunt + rich.hunt&carters.com +39653 + Imbio + Ryan Chamberlain + ryanchamberlain&imbio.com +39654 + TISSOT + Andrés M. Tissot + info&etissot.com.ar +39655 + Humbug Telecom Labs Limited + Nir Simionovich + nirs&humbuglabs.org +39656 + Nationale Nederlanden (formerly 'ING Insurance /IM') + R.E. Flinkerbusch + ray.flinkerbusch1&nn-group.com +39657 + Administration of the Vladimir Region + Pavel Shuba + shuba&avo.ru +39658 + TollNet a.s. + Petr Vokac + petr.vokac&tollnet.cz +39659 + NSD CO., LTD. + Kenji Yuasa + yuasa&nsd.co.jp +39660 + Tarana Wireless Inc + Rakesh Tiwari + rakesh&taranawireless.com +39661 + Allianz Managed Operations and Services SE - oneWeb + Markus Moltenbrey + extern.moltenbrey_markus&allianz.de +39662 + metafinanz Informationssysteme GmbH + Markus Moltenbrey + markus.moltenbrey&metafinanz.de +39663 + RENK AG + Helmut Wirth + helmut.wirth&renk.biz +39664 + Lingua-IT Sp. z o.o. + Marcin Suchocki + m.suchocki&lingua-it.pl +39665 + Fat Mongoose Technologies, Inc. + Alan Chan + ac&fatmongoose.com +39666 + Consult A Nerd + Rod Miller + rod&consultanerd.com +39667 + CodeMettle, LLC + Cliff Ford + cliff&codemettle.com +39668 + Iridium Digital Systems Corporation + Falcon Darkstar Momot + falcon&iridiumlinux.org +39669 + Koning Corporation + Shaohua Liu + shaohua.l&koningcorporation.com +39670 + knaute.info + Thomas Knaute + thomas&knaute.info +39671 + NEXTDC Ltd + Bob Purdon + bob.purdon&nextdc.com +39672 + Elite Automação Comércio e Serviços Ltda ME + José Luiz Vieira + contato&eliteacs.com.br +39673 + Talon Storage Solutions + Shirish H. Phatak + shirish&talonstorage.com +39674 + OctoGate GmbH + Ralph Viehhofer + ralph.viehhofer&hsm.de +39675 + Vast Array Corporation + Benjamin F. Beideman + hostmaster&vac-usa.net +39676 + Pason Systems Corp. + Abudy Charanek + abudy.charanek&pason.com +39677 + Archdata SPRL + Philippe Delhaise + phil.delhaise&gmail.com +39678 + American Domain Names LLC + Christopher M Mettin + cmettin&unclesamnames.com +39679 + JSC Tekhdiagnostika + Serge Voronin + voronin&tdiag.ru +39680 + Nozhup B.V. + Richard van Bemmelen + richard.van.bemmelen&breplu.nl +39681 + Catalyst Repository Systems + Kevin Fries + kfries&catalystsecure.com +39682 + Exegin Technologies Limited + Leslie Mulder + ljm&exegin.com +39683 + Single Touch Interactive + Mark Ramirez + mark&singletouch.net +39684 + Genteel.org + Gentarou Kimura + gentianster&gmail.com +39685 + WTG Muenster + Michael Grote + service-ms&wtg.com +39686 + Acquirente Unico + Cappadozzi Elettra + elettra.cappadozzi&acquirenteunico.it +39687 + JSC Aprelevka X-ray factory + Alexey Surnin + alexey_surnin&amico.ru +39688 + interactingstars.org + Prof. Natalia Ivanova + sysadmin&interactingstars.org +39689 + BRF + Thierry Groteclaes + thierry.groteclaes&brf.be +39690 + IPACCT Ltd. + Boian Bonev + bbonev&ipacct.com +39691 + embeddeers GmbH + Josef Werth + netadmin&embeddeers.com +39692 + Ideal Mahdban Engineering Services + Mohammad Reza Mahdavifar + Mahdavifar&mahdban.com +39693 + N-Partner + Max Tsai + max&npartnertech.com +39694 + Diamont Equipamentos Especiais Ltda + Clebe Vitorino + engenharia&diamont.com.br +39695 + FRAFOS GmbH + Stefan Sayer + stefan.sayer&frafos.com +39696 + Banglalion Communications Limited + Rezwanul Haque + rezwanul.haque&banglalionwimax.com +39697 + Intelligent Security Systems + Aluisio Figueiredo + aluisio&isscctv.com +39698 + Mianyang Netop Telecom Equipment Co.,Ltd + Tan Xiaorong + smiletann&yahoo.com.cn +39699 + Robustel Technologies + Lianfeng Cao + info&robustel.com +39700 + Sonic Boom Wellness + Eric Fox + eric&sbwell.com +39701 + Mangelal & Sons + Vikash Ku. Agrawal + mangelalandsons&gmail.com +39702 + Linux Solutions + Kayo Sérgio Guimarães + kayo_tec&hotmail.com +39703 + SASCO + Esteban De La Fuente Rubio + esteban&sasco.cl +39704 + Unyonsys + Bruno LEON + bruno.leon&unyonsys.com +39705 + skslater.net + Simon Slater + skslater&skslater.net +39706 + ADTEK + Arhainx Jean + arhainx&adtek.fr +39707 + Cumbria Constabulary + Andrew Douthwaite + andrew.douthwaite&cumbria.police.uk +39708 + Telent GmbH * A division of euromicron Group + Michael Manderscheid + michael.manderscheid&telent.de +39709 + Broadcast Graphics and Control Limited + Stephen Mills + stephen.mills&bgandc.com +39710 + Wentworth-Douglass Hospital + Lynda Powers + lynda.powers&wdhospital.com +39711 + Radiobolaget Gunhamn och Åsbrink AB + Tim Sjöstrand + tim&radiobolaget.com +39712 + QSD Sistemi Srl + Marco Pagnoncelli + marco.pagnoncelli&qsdsistemi.it +39713 + Abertis telecom + Angel Dominguez + angel.dominguez&abertistelecom.com +39714 + Associacio Cultural TGK + Alex Barcelo + intranet&telecogresca.com +39715 + DominionTech Computer Services + Nabil Alanbar + nabil&dominiontech.com +39716 + University of South Alabama Health System + Terry Dees-Kolenich + tdkoleni&usouthal.edu +39717 + Westerholt + Jonathan Westerholt + joniw&joniw.de +39718 + Silverflare Ltd + Jan Nielsen + jan.nielsen&silverflare.com +39719 + TCF Financial Corporation + Kevin Serafin + kserafin&tcfbank.com +39720 + Maryland Stadium Authority + Joe March + JMarch&mdstad.com +39721 + DUCSS + Ben Doyle + admin&ducss.ie +39722 + Underpin Taiwan Ltd. + Ken.Fu + ken.fu&underpin.com.tw +39723 + Logistics Energy Korea Co., Ltd. + An Byeonghwan + abh041804&naver.com +39724 + [j]karef GmbH + Matthias Münzner + matthias.muenzner&jkaref.com +39725 + JeraSoft + Andrii Zinchenko + mail&zinok.org +39726 + British National Party + Chris Barnett + webtech&bnp.org.uk +39727 + IT Ihme + Miika Pekkarinen + miika.pekkarinen&ihme.org +39728 + Bessels IT + Dick Bessels + info&bessels.it +39729 + Cilutions, Digital Media Bridge + Bill Stanton + bstanton&cilutions.com +39730 + SHANGHAI MEXON COMMUNICATION TECHNOLOGY.,LTD + Zhang jian + 806&mexontec.com +39731 + Emind Systems Ltd + Lahav Savir + info&emindsys.com +39732 + Shenzhen Smart Cube Intelligent Technology Co.,LTD + Fisher Yu + fisheryu&smart3.cn +39733 + Boss Info AG + Simon Boss + system&bossinfo.ch +39734 + wujiman.net + Martin Kraus + iana&wujiman.net +39735 + Main Street Softworks, Inc. + Darryl Wright + info&monetra.com +39736 + ACTIVE SYSTEM MANAGEMENT + Mr. Chinh Phan + c.phan&active-system.com +39737 + CommercePack + Aditya Advani + support&commercepack.com +39738 + IBS America, Inc. + Carlo Accorsi + it&ibs-us.com +39739 + Madscientistclub + David Corni + dcorni&cinci.rr.com +39740 + PSA Zeebrugge International Port nv + Louis Stevens + louis.stevens&globalpsa.com +39741 + Egnyte Inc. + Ramanathan Kavasseri + rkavasseri&egnyte.com +39742 + Société des ingénieurs Arts et Métiers + Vivien GIRARD + vivien.girard&gadz.org +39743 + Shanghai Dongzhou Lawton Telecom Technologies Co.,ltd + FengJiang + jiangfeng&dztx.com +39744 + EDESA + Javier Sanchez + javier.sanchez&edesa.com.co +39745 + OPower, Inc. + Jesse Kempf + jesse.kempf&opower.com +39746 + MEITSUDENSHI + Yuji Tsukamoto + meitu&din.or.jp +39747 + Zhejiang Uniview Technologies Co., Ltd. + wuzhaohui + wuzhaohui&cn-uniview.com +39748 + Intesyn S.r.l. + Luca Lo Iacono + support&intesyn.it +39749 + Kassenärztliche Vereinigung Brandenburg + Stefan Mrosek + stefan.mrosek&kvbb.de +39750 + Union Hospital Health Group + John McKinney + jmckinney&uhhg.org +39751 + Mobintegro + Alexander Atamanov + admin&mobintegro.com +39752 + HootSuite Media Inc + Chris Maxwell + hosting&hootsuite.com +39753 + Vignold Workflow & New Media GmbH + Patrick Seiferth + pati&ooaahh.de +39754 + HAVI Logistics IS GmbH + Holger Guyens + holger.guyens&havilog.com +39755 + Saint Mondiale PTE LTD + Greg Broux + gbroux&saint-mondiale.com +39756 + St. Johns County School District + Justin Forfar + justin.forfar&stjohns.k12.fl.us +39757 + Schomäcker GmbH + Christian Pflüger + christian.pflueger&schomaecker-gmbh.com +39758 + imoxion + Yeonchan,Jung + dev&imoxion.com +39759 + LOREME + Patrick Dumoulin + technique&loreme.fr +39760 + New Cosmos Electric Co., Ltd. + Kosaku Yanagihara + yanagihara.kousaku&new-cosmos.co.jp +39761 + Galiam Capital, LLC + Austin B. Calvert + austin.calvert&galiam.com +39762 + Radio Mobile Access, Inc + Richard Bouchard + rbouchard&radiomobiles.com +39763 + Adams County School District 50 + Mark Hanson + mhanson&adams50.org +39764 + Transoft (Shanghai) Inc. + Jacky Gan + jacky_gam_2001&163.com +39765 + Keydap + Kiran Ayyagari + kayyagari&keydap.com +39766 + Null Pointer Inc. + Andrew Zeneski + andrew&andrewzeneski.com +39767 + TomTom International BV + David Bruce + david.bruce&tomtom.com +39768 + UAB Technologiju ir inovaciju centras + Aleksandras Jevsejevas + aleksandras.jevsejevas&etic.lt +39769 + Italian Grid Infrastructure (IGI) + Riccardo Brunetti + riccardo.brunetti&to.infn.it +39770 + Martin Neimeier + Martin Neimeier + neimeier&bibbernet.org +39771 + SIP Express Media Server + Raphael Coeffic + rco&iptel.org +39772 + Melchinger Systemhaus UG + Thomas Melchinger + thomas.melchinger&melchinger.net +39773 + Pannon Site Ltd. + Tamas Kiszely + tkiszely&pannonsite.hu +39774 + Criena Network + Karel W. Dingeldey + admin&criena.net +39775 + Capitol Lien Records & Research, Inc. + Ben Moudry + benm&capitollien.com +39776 + Andreas Strey + Adnreas Strey + aohrner&web.de +39777 + Simon Arlott + Simon Arlott + 16VsDHrUOzfdqHel-at-nospam-dot-yfEkR81FU9bInzV.pen.iana.simon&arlott.org +39778 + Young Master Supplies & General Enterprises Ltd + Michael Sifuni + meezy&youngmaster.co.tz +39779 + UCLA Anderson School of Managemetn + James Kim + james.kim&anderson.ucla.edu +39780 + Pikopiko Network + Anthony Low + shinji&pikopiko.org +39781 + T&W + Haiyang Zhang + haiyang.zh&gmail.com +39782 + Hangzhou I'converge Techonoloay Co.,Ltd + Ms Jane Xu + xujz&iconverge.cn +39783 + ASB Bank Limited + Ryan Cotterell + ryan.cotterell&asb.co.nz +39784 + National Jewish Health + Liam Schneider + admin&njhealth.org +39785 + DS/2 GbR + Dirk Strauss + info&ds-2.de +39786 + Travelocity.com + Jaime Diaz + jaime.diaz&travelocity.com +39787 + Suntrust Banks, Inc. + Larry Tanner + larry.tanner&suntrust.com +39788 + LogoDynamic Unit GmbH + Dr. Ernesto Schobesberger + es&ld-unit.com +39789 + Southwest Texas Regional Advisory Council + Sam Dibrell + sam&strac.org +39790 + Church & Dwight Co., Inc. + Gary Oppel + gary.oppel&churchdwight.com +39791 + Unified Microsystems + Christopher Morley + info&unifiedmicrosystems.com +39792 + YouDotNet Limited + Ben Durkin + ben&youdotnet.com +39793 + World Net + Sam Dibrell + iana.org&world-net.net +39794 + FedeRez + Olivier Caillault + admin&federez.net +39795 + Cardiothink, Inc. + Lawrence Widman + pen.iana.org&cardiothink.com +39796 + BkavCA + Mr Quyen Le Cong + quyenlc&bkav.com.vn +39797 + Contract Kitting + Hennie Lombaard + hlombaard&ckza.co.za +39798 + Republički fond za zdravstveno osiguranje + Mladen Mitrović + mladen.mitrovic&rfzo.rs +39799 + TELE APOSTUAK + Antxon Alonso + sistemak&kiroljokoa.com +39800 + MTU Aero Engines GmbH + Ulrich Clara + hostmaster&mtu.de +39801 + Imagicle SpA + Christian Bongiovanni + Cb&imagicle.com +39802 + Urban Airship, Inc. + Mike Newton + eng-ops&urbanairship.com +39803 + IT-Box GmbH + Matthias Wimmer + mwimmer&it-box.de +39804 + Ctyle Corporation + Zhang Weizhong + pr&ctyle.com +39805 + Ingenious Med + Randy Astle + rastle&ingeniousmed.com +39806 + PrJSC "IC SEB LIFE UKRAINE" + Alexander Migal + OLEKSANDR.MIGAL&SEB.UA +39807 + Edgewater Wireless Systems Inc. + Natalie Gagnon + natalieg&edgewaterwireless.com +39808 + SHINE SYSTEM + Heo,wonsuck + activedesk&zenois.com +39809 + Lonely Planet + Alistair Weddell + Alistair.Weddell&lonelyplanet.com.au +39810 + KNET Co., Ltd. + SUN Guonian + sunguonian&knet.cn +39811 + ZaPOP (Pty) Ltd + Riaan Labuschagbne + riaan&zapop.com +39812 + UC RUSAL + Denis Khleborodov + denis.khleborodov&rusal.com +39813 + Public Joint-Stock Company "National Depository of Ukraine" + Taras Kraychuk + t_kraychuk&csd.ua +39814 + SOLYSTIC + Didier Tresse + Didier.Tresse&solystic.com +39815 + iphion B.V. + Martijn Grendelman + martijn&iphion.nl +39816 + Accelatis + Jonathan Berry + jberry&accelatis.com +39817 + cl0secall.net + Joshua Kocinski + iana&cl0secall.net +39818 + Anator + Fernandez Julien + julien.fernandez&anator.com +39819 + VaporPunk, Inc. + Cody Mello + cody&vaporpunk.com +39820 + Cyssea + Fred Lacombe + fred.lacombe&cyssea.com +39821 + EDINEC SRL + Eduardo Antonio Jalil + ejalil&edinec.com.ar +39822 + iSchemaView, Inc. + Roland Bammer + rbammer&stanford.edu +39823 + Consorci Administració Oberta de Catalunya + Joan A. Olivares Obis + jaolivares&aoc.cat +39824 + Smart Grid Networks + Anders Grahn + anders.grahn&smartgridnetworks.net +39825 + Public Safety canada + Pierre Plante + plantep&smtp.gc.ca +39826 + Accelerated Servers, Inc + Avi Freedman + avi&acceleratedservers.com +39827 + Celal Bayar University + Ahmet Oztuna + ahmet.oztuna&cbu.edu.tr +39828 + ATP + Lars Ditlevsen + ldi&atp.dk +39829 + Asetek + Kasper Fuglsang Jensen + kfj&asetek.com +39830 + Greeve + Auryn Hughes + auryn.hughes&greeve.co.uk +39831 + Museu de Astronomia e Ciencias Afins + Alberto Wester + albertow&mast.br +39832 + LogicMonitor Inc + Steve Francis + sfrancis&logicmonitor.com +39833 + hangzhou e-vision electronic system engineering corp. + baifanfan + ffbai&hzys.net.cn +39834 + Fata Informatica + Vincenzo Alonge + v.alonge&fatainformatica.com +39835 + JANTEQ CORPORATION + Andrew Fox + andy&janteq.com +39836 + Sindikat “Akademska solidarnost” + Andrej Dundović + kontakt&akadsolid.hr +39837 + A123 Systems, Inc. + John M. McNally + jmcnally&a123systems.com +39838 + University of Bahrain + Dr Eshaa Alkhalifa + ealkhalifa&admin.uob.bh +39839 + Gaftech + Gabriel Fournier + gabriel&gaftech.fr +39840 + ICAR CNR + Pietro Storniolo + tech-c&pa.icar.cnr.it +39841 + Danske Bank + Jørn Guldberg + jrgu&danskebank.dk +39842 + Communauté Urbaine du Grand Toulouse + OULD-AMAR Karim + Karim.OULD-AMAR&grandtoulouse.fr +39843 + Precogs Software SAS + Vlad Vasiliu + vlad.vasiliu&precogs.fr +39844 + MAPPER Lithography + Robert de Vries + robert.de.vries&mapperlithography.com +39845 + Dr. Schenk IT-Consulting GmbH + Dr. Andreas Schenk + info&dr-schenk-it.de +39846 + UNILINK SA + FRANCOIS QUENUM + FRANCOIS.QUENUM&UNILINK.FR +39847 + ATS GRUP LTD + Fatih Kucuklergil + fatih&atsgrup.com.tr +39848 + Voximage + Regent Beaulieu + rbeaulieu&voximage.ca +39849 + Arbiter Systems + Niles Rasmussen + nrasmussen&arbiter.com +39850 + Ross Opticians Inc. + Evelyn Ross + evelynzross&gmail.com +39851 + HLT Software GmbH + Wolfgang Wester + wolfgang.wester&hlt-software.de +39852 + Clima-Tech Coporation + Wayne Andersen + waynea&clima-tech.com +39853 + BSI Business Systems Integration AG + Daniel Buehler + bsi_pen&bsiag.com +39854 + eVolution Networks + Amit Liber + Amit&evolutionetworks.com +39855 + FCP Fritsch, Chiari & Partner ZT GmbH + Georg Fritsch + gf&fcp.at +39856 + Beijing Qibadian Information Technology CO., Ltd. + Fei Wang + fei.wang&jimii.cn +39857 + CYSM Ltd. + Chris Roberts + c.roberts&csrfm.com +39858 + Kirin-gumi + Takayuki Akiyama + akiyama&kirin-gumi.net +39859 + Clemax,Inc + Nizeyimana, Rosine + ghighi-du63000&hotmail.fr +39860 + OGS IT-Solutions + Mr. Osman Gümüs + guemues&ogs.at +39861 + Orbitz Worldwide, LLC. + James Regovich + james.regovich&orbitz.com +39862 + ComSonics,Inc. + Shirley J. Custer + shirleyc&comsonics.com +39863 + World Vision US + Adam Patterson + apatters&worldvision.org +39864 + WEBeDoctor, Inc. + Jim Starwood + jstarwood&webedoctor.com +39865 + Slabinfo E.U. + John Edisson Ortiz + jortiz&slabinfo.com.co +39866 + Advanced Network Devices + Dan Mahn + dan&digidescorp.com +39867 + Linedata + Rick Morris + it&na.linedata.com +39868 + Miami International Holdings + Manjul Rai + mrai&miami-holdings.com +39869 + Illumina, Inc + Ryan Niemes + rniemes&illumina.com +39870 + Telesis A.S. + Mumin Guler + contact&telesis.com.tr +39871 + Trans Industrias Electronicas + Sergio Jardon + sjardon&trans.com.ar +39872 + CIPFP Ausias March + Luis Garcia + lgarcia&ausiasmarch.net +39873 + Silicon Labs (formerly 'Ember Corporation') + Robert Power + robert.power&silabs.com +39874 + Asmlab Limited + Semen Makhorkin + iana&asmlab.ru +39875 + Beijing GuangXinYingKe Technology Co.,Ltd + Ming Xue + xueming&gxtech.com.cn +39876 + 7-Technologies A/S + Lars Mortensen + lmo&7t.dk +39877 + VCE Vienna Consulting Engineers + Georg Fritsch + gf&vce.at +39878 + Le Aquile Unita' Cinofile da Soccorso Ravennate + Balestri Paride + segreteria&leaquile.it +39879 + Amplitude Systemes + Julien CARLIER + jcarlier&litude-systemes.com +39880 + Thomas Wiedstruck - Informationstechnik + Thomas Wiedstruck + thomas&wiedstruck.de +39881 + EyesOfNetwork + LEVY Jean-Philippe + eyesofnetwork&eyesofnetwork.com +39882 + Greater Pittsburgh Glass & Door + Steven L Buzonas Jr + steve&fancyguy.com +39883 + Sysacom + Steve Légaré + slegare&sysacom.com +39884 + T-Platforms + Alexander Amelkin + noc&t-platforms.com +39885 + StorONE + Gal Turchinski + Galtur&storone.com +39886 + Kurt Kincaid Consulting + Kurt Kincaid + kurt&kurtkincaid.com +39887 + localroot.de + Sascha Bendix + hostmaster&localroot.de +39888 + Ness Computing, Inc. + Henning Schmiedehausen + henning&likeness.com +39889 + icecreek Software und Services + Christian Ey + info&icecreek.de +39890 + Beijing Forsun Technology Co.,Ltd + David Leng + 275582448&qq.com +39891 + MNUCIB VNII PVTI + Dmitry Ponomarev + dmitry.ponomarev&mnucib.ru +39892 + v-ic + Victor Ashirov + v.a.ashirov&r35.v-ic.ru +39893 + Energierversorgung Mittelrhein GmbH + Bjoern Hermes + bjoern.hermes&evm.de +39894 + Haley Technologies Inc. dba PC PitStop + Paul Haley + mark&pc-pitstop.com +39895 + OpenX + Seth Daniel + systems&openx.org +39896 + The Cleveland Clinic Foundation + Dave Blankenship + blanked&ccf.org +39897 + Ryder System, Inc. + Sumanth Gangaraboina + enterpriseitsecurity&ryder.com +39898 + AtrilA GmbH + Andre Keller + keller&atrila.com +39899 + Virginia Hospital Center + Michelle Lardner + mlardner&virginiahospitalcenter.com +39900 + INITE SC + Diego García Bustos + diego.garcia&inite.edu.mx +39901 + Crypto4A + Bruno Couillard + bruno&crypto4a.com +39902 + inqa.be + Joachim Van den Bogaert + joachim&inqa.be +39903 + Pivotal Technologies Pty Ltd + Steve Kelly + admin&pivotaltechnologies.com.au +39904 + Sascha Wolf + Sascha Wolf + adnae&anxia.org +39905 + eA-light Arbeitsgemeinschaft der ÄKNO und ÄKWL + Viktor Krön + kroen&aekno.de +39906 + Zwissler Technologies, LLC + Rob Zwissler + rob&zwisslertech.com +39907 + LinkSquare + Zhang Haibin + zhanghaibin&linksquare.cn +39908 + china beijing koal Company + haitao wang + wanghaitao03&126.com +39909 + OJSC GUTA-BANK + Vladislaw Polukeev + polukeev.vo&gutabank.ru +39910 + Fusionex + Andy Pang + andy.pang&adv-fusionex.com +39911 + ltd Arhangelskoe predprijatie protivopozharnoj avtomatiki + Chulkin Alexander + spisprut&gmail.com +39912 + State Grid Electric Power Research Institute + Yuxin Chen + chenyuxin&sgchip.com +39913 + ELONG Corp + haitao ni + haitao.ni&corp.elong.com +39914 + ZPAS S.A. + Daniel Piegza + daniel&zpas.pl +39915 + wiba10 + Konstantin Hopf + info&wiba10.de +39916 + KeyLemon SA + Gilles Florey + info&keylemon.com +39917 + KUFP “Medaparatura”, ALC + Kostiantyn Zander + office&medaparatura.kiev.ua +39918 + Cardiocore + G.K. Meier + snmp.engineers&cardiocore.com +39919 + hybris GmbH + Joachim Kraftmayer + joachim.kraftmayer&hybris.com +39920 + wintek System + Jinseok Seo + jsseo&wintek.co.kr +39921 + UKFast.Net Ltd + Darren Taylor + darren.taylor&ukfast.co.uk +39922 + SDN SPA + Pasquale Garzillo + ced&sdn-napoli.it +39923 + Seres + Thierry Guérin + iana.docupost&seres.fr +39924 + Chase Paymentech + Paul Peterson + paul.peterson&chasepaymentech.com +39925 + Excellent S.A. + Roman Kolasiewicz + iana_pen&excellent.com.pl +39926 + Aspen Systems + Doug Morse + dmorse&aspensystems.com +39927 + Voronezh Cityhall + Vladimir Pakhomov + don&cityhall.voronezh-city.ru +39928 + Dmitry Ponomarev + Dmitry Ponomarev + dmitry&ponomarevs.ru +39929 + Dingbox + Ant Kenworthy + dns&dingbox.net +39930 + Rewired State + Steve King + steve&rewiredstate.com.au +39931 + openNetMon + Jean-Damien POGOLOTTI + jean-damien&opennetmon.net +39932 + Mindware + Alan March + alandmarch&gmail.com +39933 + Technology Space Integration Limited + Marek Stuczynski + marek.stuczynski&tsiuk.com +39934 + TopHat Software + Colm Vize + colm&tophat.ie +39935 + Transas Marine International + Dmitry Rostopshin + rost&transas.com +39936 + Cheshire Medical Center + Marie Young, Applications Systems Manager + mryoung&cheshire-med.com +39937 + Netspective + Shahid Shah + shahid.shah&netspective.com +39938 + Inca Networks Inc. + Mike Bradley + mike.bradley&incanetworks.com +39939 + Lansing Community College + John Hendzel + hendzej&lcc.edu +39940 + Oregon Scientific Global Distribution Limited + Jacky Tsoi + jackytsoi&oregonscientific.com +39941 + KUZUMI Electronics,Inc. + Motonobu Suzuki + motonobu_suzuki&kuzumi-denshi.co.jp +39942 + eWorld Technologies Ltd. + Wu Zhaohui + wzh&tomtaw.com.cn +39943 + University of Santo tomas + Mario M. Raagas + mmraagas&mnl.ust.edu.ph +39944 + shenzhen Catic Bit Communication Technology Co.LTD + Rao ZhiJian + raozj_bt&caticsz.com.cn +39945 + Tecblazer + Chris Cheng + chris.cheng&tecblazer.com +39946 + EV Group GmbH + Buttinger, Josef + it-policies&evgroup.com +39947 + B PLUS TV a.s. + Jiří Otisk + j.otisk&btv.cz +39948 + Ping Communication + Fredrik Gratte + fredrik&pingcom.net +39949 + Beijing Arrays Medical Imaging Technology Development Co.,td + sunlei + sunlei&arrays.com.cn +39950 + Altersign, LLC. + Vitalii Prikazchik + info&altersign.com.ua +39951 + Town Sports International + John Adams + ssladmin&town-sports.com +39952 + SecurePKI + Dennis van den Oever + d.vandenoever&securepki.com +39953 + SERVICE DEPARTEMENTAL D INCENDIE ET DE SECOURS DE LA DROME + Emmanuel JUGGERY + directeur-adjoint&sdis26.fr +39954 + RWetc. s.r.o. + Zdenek Roh + rwetc&rwetc.com +39955 + SMP Bank OJSC + Artem Bychkov + abychkov&smpbank.ru +39956 + Vale + Renato Orgito + renato.orgito&vale.com +39957 + Columbus Regional Heathcare System, Inc. + Terry Richards + terry.richards&crhs.net +39958 + Withings + Damien Lesgourgues + damien.lesgourgues&withings.com +39959 + CIRCUTOR, SA + Mr. Bernat García + bgarcia&circutor.es +39960 + cktsoi.com + Jacky Tsoi + jacky&tsoi.me +39961 + Nicira, Inc. + Rob Enns + rpe&nicira.com +39962 + Kern Scientific Co. + David L. Kern, Jr. + david&kernscientific.com +39963 + AGCO Corporation + Kenneth Peirce + kenneth.peirce&agcocorp.com +39964 + EIDEN Co,Ltd + ISAMU OHNO + i-ohno&eiden-gp.co.jp +39965 + Trans Sped + Viky-Teodora Manaila + viky.manaila&transsped.ro +39966 + Luminoso, LLC + Jason B. Alonso + jalonso&lumino.so +39967 + Ethersex Project + Stefan Siegl (project founder & maintainer) + maintainerðersex.de +39968 + Toya Technologies Inc. + Dustin Little + dlittle&toyatech.net +39969 + United Energy Pakistan Limited + Mohammad Abdul Qadir + qadirma&uep.com.pk +39970 + HandySolutions (CJSC) + Evgeny Nikitin + enikitin&handybank.ru +39971 + Shenzhen Skyee Optical Fiber Communication Technology Ltd. + wuminhua + wuminhua&skyee.com.cn +39972 + TUV RHEINLAND IBERICA SA + Enrique Mora + enrique.mora&es.tuv.com +39973 + Janmedia Interactive Sp. z o.o. + Bartosz Kondek + bkondek&janmedia.pl +39974 + Garland Technology LLC + Jerry Dillard + jerry.dillard&garlandtechnology.com +39975 + COGNEX + Michael Miller + mike.miller&cognex.com +39976 + Southwestern University + Todd K. Watson + tkw&southwestern.edu +39977 + Deltix, Inc. + Gene Florintsev + gflorin&deltixlab.com +39978 + Daniel Theodoro Consultoria em Informatica Ltd. + Daniel Theodoro + daniel&theodoro.com.br +39979 + ConicIT Ltd. + Tetelman Chen + chen&conicit.biz +39980 + Nocturnal Aviation + Jeff Grossman + jeffg&NocturnalAviation.net +39981 + OPT SP CO.,LTD. + Yutaka Yamada + cti-solution&optsp.co.jp +39982 + Icareus Ltd + Mikko Karppinen + support&icareus.com +39983 + Project Engineering srl + Nannini Nicola + nicola.nannini&proeng.it +39984 + Shenzhen Process Tech.Co.,Ltd. + Kelven Li + kelven11&163.com +39985 + GRZ IT Center Linz GmbH + Ralph Hosp + ralph.hosp&grz.at +39986 + Florida Department of Education + Ted Duncan + Ted.Duncan&fldoe.org +39987 + Pathmaker Group, Inc. + Jerry Castille + jerry.castille&pathmaker-group.com +39988 + Reserved + Removed 2012-07-30 + ---none--- +39989 + LINE SEÑALIZACIÓN ELECTRÓNICA S.L. + JESÚS JIMÉNEZ MUÑOZ + jjimenez&line.es +39990 + Center of Information Technologies of Volgograd region (http:\\citvo.ru) + Kucherenko Vladimir Alexandrovich + admin&citvo.ru +39991 + Aitia International Inc. + Endre Cseszkó + ecseszko&aitia.ai +39992 + davidhowland.com + David Howland + pen&metalliqaz.fastmail.fm +39993 + Owl Forestry + Mikko Kokkonen + mikko&owlforestry.com +39994 + Echo360 + charles mckinster + hostedops&echo360.com +39995 + Plymouth University + John Horne + snmp-admin&plymouth.ac.uk +39996 + sys4 AG + Patrick Ben Koetter + pk&sys4.de +39997 + Netsumo Ltd + Richard Smith + noc&netsumo.com +39998 + Beijing Lanxum New Technology + Jason Hu + hutiejun&lanxum.com +39999 + IDEMIA SPAIN PRODO SLU (formerly 'Prodo Telecom (An OT Company)') + Daniel Antonio Sánchez Cortés + daniel.sanchez22&idemia.com +40000 + Kapsi Internet-käyttäjät ry + Juho Juopperi + jkj&kapsi.fi +40001 + WineSOFT + Jonathan Jaeha Ahn + bglee&winesoft.co.kr +40002 + Open System Co., Ltd. + Jonson Doong + jonson&opensys.com.tw +40003 + Karjalan Paperi Oy + Juha Hartikainen + juha.hartikainen&karjalanpaperi.fi +40004 + Cargo Service Nord GmbH + André Bürger + a.buerger&csn-logistik.de +40005 + 5p. GmbH & Co. KG + Lukas Sassl + l.sassl&5partner.de +40006 + Reevex, LLC + Snehal Vashi + svashi&revventures.com +40007 + Universal Research Solutions, LLC + Matt Allen + iana&oberd.com +40008 + Virtual Connect Technologies, Inc + Benjamin Hathaway + ben.hathaway&virtualconnect.net +40009 + Oasiswork SARL + Nicolas Brisac + contact&oasiswork.fr +40010 + MSMC Informatica Ltda + Mario Dias + mario.dias&msmc.com.br +40011 + Fripost + Guilhem Moulin + guilhem&fripost.org +40012 + Hatomi Sp. z o.o. + Tomasz Macioszek + tomasz.macioszek&hatomi.pl +40013 + lintloaf.net + Juha Nyholm + oid-admin&lintloaf.net +40014 + Progress Rail Services Inspection & Information Systems + Steven Kirby + skirby&progressrail.com +40015 + Hüttner & Werfling Softwareentwicklung GbR + Stefan Werfling + info&hw-softwareentwicklung.de +40016 + MAS Technology + Shawn Peters + manager&mastechnology.ca +40017 + ARKS LLC + Alexandr V. Koshkin + dit&arks.ru +40018 + JSC Bank Vologzhanin + Dmitry Ogarkov + admins-group&bankvl.ru +40019 + Isbak A.S. + Abdullah YAKUT + ayakut&isbak.com.tr +40020 + Gutenberg Networks + Sebastien CALARD + sebastien.calard&gutenberg-networks.com +40021 + Viscount Systems Inc + David Lee + david&viscount.com +40022 + Sirius Software, Inc. + Gary Gregory + gary&sirius-software.com +40023 + LLC " NPF DUKON" + Oleg Poterpeev + root&dukon.ru +40024 + iNovo Broadband, Inc. + Jesse Still + jstill&inovobb.com +40025 + Tym3 + James Carpenter + jcarpenter&tym3.com +40026 + Société d’Informatique et de Systèmes + Sebastien FLEURY (System Administrator) + contact.iana&sis-france.com +40027 + Aki Network + Akifumi Shiroma + aki&gp7.info +40028 + TAKOTA, LTD + Shakalov Andrey + ice&doka.tc +40029 + leadtone + gexingyu + gexingyu&leadtone.com +40030 + RADOM, s.r.o. + Jaroslav Hokeš + development&radom.eu +40031 + Oblako10, Ltd. + Vadim Ponomarev + vadim&oblako10.ru +40032 + Ultra Electronics, Precision Air & Land Systems + Richard Collins + Richard.Collins&ultra-pals.com +40033 + Radio Design + Caspar Lucas + caspar.lucas&radiodesign.eu +40034 + DuraSpace + Andrew Woods + awoods&duraspace.org +40035 + CellSec Inc. + Erik Dahl + admin&cellsec.com +40036 + Vertafore PLM + Josh Zook + jzook&vertafore.com +40037 + Inceptum d.o.o + Zvonimir Balog + zvonimir.balog&inceptum.hr +40038 + Livestream, LLC + Justin Stallard + devops&livestream.com +40039 + MySysAdmin.DE + Peer Dampmann + dampmann&mysysadmin.de +40040 + Stichting IFCAT Foundation + Wilco Baan Hofman + treasury&ifcat.org +40041 + ООО «Электронные Офисные Системы (проектирование и внедрение)» /OOO “Elektronnye Ofisnye Sistemy (proektirovanie i vnedrenie)/ + Alexey Kravchenko + eosreg&eos.ru +40042 + Stiftelsen Chalmers Studentbostäder + Carl-Martin Pershed + calmar&csbnet.se +40043 + NewPace Technology Development Inc.‎ + Josh Warren + josh.branchwarren&newpace.com +40044 + PC-Doctor, Inc. + Colin Corr + spammagnet&pc-doctor.com +40045 + Nexenta Systems, Inc. + Peer Dampmann + peer.dampmann&nexenta.com +40046 + CREDIT EUROPE BANK Ltd. + Ivan Bauzhadze + ivan.bauzhadze&crediteurope.ru +40047 + Korex Networks s.r.o. + Vladimir Vydra + vvydra&korex.sk +40048 + Information Display Company + Sam Mallicoat + smallicoat&gmail.com +40049 + Red Bend + Elad Tamari + elad.tamari&redbend.com +40050 + Microtech Andrzej Sokulski + Andrzej Sokulski + infoµ-tech.com.pl +40051 + RELEASE14 + Josip Djuricic + jd&release14.org +40052 + Energie AG Oberösterreich Data GmbH + Christian Aichinger + christian.aichinger&energieag.at +40053 + Sistemas Avanzados de Diseño + Ildefonso Moreno + ildefonso&sadsl.es +40054 + 4UR + ILyong Kim + hostmaster&4ur.cc +40055 + Yuchengtech + MingRui Li + limr&yuchengtech.com +40056 + AccuBeat Ltd + Yishay Eliav + yishay.e&accubeat.com +40057 + Reserved + Removed 2012-06-26 + ---none--- +40058 + Vitaliy Tokarev + Vitaliy Tokarev + vitaliy.tokarev&gmail.com +40059 + Datagram, Inc. + Jae Kim + jae&datagram.com +40060 + Leo-Li com. + Leo Li + leo.wenlu&gmail.com +40061 + Bay Area Hospital + Brenda Curtner + brenda.curtner&bayareahospital.org +40062 + Trevor and Emily Mouat King + W. Trevor King + wking&tremily.us +40063 + Qintara, Inc. + Joseph W. Daigle + oid&qintara.com +40064 + Optima Lab srl + Daniele Arduini + admin&optimalab.it +40065 + Larch networks + Alex Cheskis + alexk&larch-networks.com +40066 + Saliens Information Systems Ltd. + Marko Pecic + mpecic&saliens.com.hr +40067 + LinTech JSC + Alexander Kochetkov + akochetkov&lintech.ru +40068 + The Venus Project + Roxanne Meadows + admin&thevenusproject.com +40069 + enprovia Software Engineering s. r. o. + Patrik Csokas + oid.admin&enprovia.com +40070 + RetailNext + Nathan Mueller + nate&retailnext.net +40071 + P. St. Onge + P. St. Onge + pete.stonge&utoronto.ca +40072 + techlab.jp (formerly 'scminfo.net') + Shotaro Chiba + schiba&techlab.jp +40073 + Lutron Electronics Company, Inc + Jeremy Neyhart + jneyhart&lutron.com +40074 + Energy Solutions Ltd. + Vladimir Todorov Fuchedzhiev + vendor&borsabg.com +40075 + SPL-Xdemat + Philippe RICARD + philippe.ricard&spl-xdemat.fr +40076 + SiFox + Alexey Larin + alexey.larin&sifox.ru +40077 + Austrian Red Cross + Markus Hnatek + markus.hnatek&roteskreuz.at +40078 + Open Grid Computing + Jes Nielsen + jes&opengridcomputing.com +40079 + Electronic Systems Protection, Inc. + Robert Dawley + robert.dawley&ametek.com +40080 + IIHE - Inter-university Institute for High Energies + Gilles De Lentdecker + gdelentd&ulb.ac.be +40081 + Shanghai Myway Technology, Inc. + Simon Tian + simonT&mywayinfo.com +40082 + NuGardt Software UG (haftungsbeschränkt) + Kevin Gardthausen + kevin&nugardt.com +40083 + Accelerated Concepts, Inc. + Tom Butts + tom.butts&accelecon.com +40084 + Ingalls Memorial Hospital + Susan Bjork + sbjork&ingalls.org +40085 + Stage Tec GmbH + R. Harder + org.adm&stagetec.com +40086 + Inworx Group + Javier Schamber + itadministrator&inworx.com +40087 + NIDO Informatica + Cassio Jose de Jesus + cassio&nido.com.br +40088 + Voixtel Telecom + Melcon Moraes + ianaoid&voixtel.com.br +40089 + NetAmbit Infosource & E-services Pvt. Ltd. + Vijay Sharma + vijay.sharma&netambit.in +40090 + Netline Communications Technologies Ltd + Harel Bahaloul + harel&netline.co.il +40091 + A.C.T.I.C asbl + Kenny Louveaux + info&actic.be +40092 + Wiwynn Corporation + Zong Bing, Wu + bing_wu&wiwynn.com +40093 + Zhuhai Seine Technology Co., Ltd + WangDan + wangdan&pantum.com +40094 + SecuEnv + Zhou Li + zhou.li&secuenv.com +40095 + G4S Engenharia e Sistemas S. A. + Mario Dias + mario.dias&br.g4s.com +40096 + ROICX + matt Hu + hul&roicx.com +40097 + WorldPay + Vladimir Jirasek + vladimir.jirasek&worldpay.com +40098 + update software + Christoph Macheiner + christoph.macheiner&update.com +40099 + North Carolina Healthcare Exchange + Himabindu Bolisetty + bindu&careevolution.com +40100 + TechNet + Richard Dols + richard&technetgroup.nl +40101 + EPB Fiber Optics + Mike Cathey + iana-poc&epbinternet.com +40102 + Pinger, Inc. + Jo Rhett + jorhett&pinger.com +40103 + Fashion Days + David Shulman + David.Shulman&fashiondays.ch +40104 + BamseNet GmbH + Mr. Rico Koerner + service&bamsenet.de +40105 + APN Promise S.A. + Konrad Sagala + konrad.sagala&promise.pl +40106 + Polara Engineering, Inc. + Brad Whitney + bwhitney&polara.com +40107 + Michael Reschly + Michael Reschly + michael&reschly.com +40108 + Amber Buddha + Steve Gillam + steveg&amberbuddha.com +40109 + SigAlgo Technology Ltd + Roger McCalman + snmp-pen&sigalgo.com +40110 + EGNATIA + Athanasios Bakalis + athanasios.bakalis&group-egnatia.gr +40111 + Deloitte Services & Investments nv + Christophe Castelein + ccastelein&deloitte.be +40112 + Ethernix + Amaury Darsch + amauryðernix.com +40113 + Blue Mountain Health System + Cheryl Heffelfinger + cheffelfinger&blmtn.org +40114 + Guavatech + John Cheeseman + johnc&guavatech.com +40115 + V & B Ingenieria S.A. de C.V. + Asterio Valenzuela Delfin + ventas.vybingenieria&gmail.com +40116 + DCO4 Sp. z o.o. + Jan Szumiec + jan&linkedlist.co.uk +40117 + Faculté des Sciences et Technologies - Université de Lorraine + Samson Bisaro + Samson.Bisaro&univ-lorraine.fr +40118 + CNPC LOGGING Co.Ltc + chen xiaolei + chenxiaolei2002&126.com +40119 + Terralink LLC + Grudtsin Andrey + support&terralink.ru +40120 + Pamlock AB + Johannes Hassmund + johannes&pamlock.com +40121 + Methodia Inc + Gerard Hovanessyan + gerard.hovanesyan&methodia.com +40122 + Beijing Cyber Stone Information Technology Corp. Ltd. + Yunfei xiang + xiangyf&cyber-stone.com +40123 + Hangzhou dunchong Technology Co.,Ltd. + lupeihong + lupeihong&dunchongnet.com +40124 + Kernkraftwerk Leibstadt AG + Wälti Stefan + ServiceDesk&kkl.ch +40125 + Travelfusion Ltd + Seth Tunstall + seth&travelfusion.com +40126 + real virtual GmbH + Ralph Borowski + rborowski&real-virtual.com +40127 + Federos + Mike Gent + mgent&federos.com +40128 + Beijing Photon-Byte Technology Co., Ltd. + David.Hua + softpro&vip.163.com +40129 + MarLukKi Centre + Marcin Kiejzik + marlukki&marlukki.eu +40130 + Tempered Networks + Mike Viles + m.viles&tempered.io +40131 + Nuvem Networks Inc + Ricki Evans + operations&nuvemnet.com +40132 + Telemedia Argentina S.A. + Tenorio, Leandro + ltenorio&telemediala.com +40133 + FIX TELECOM + Ismael Nunez + a.nunez&fix.com.do +40134 + Nexum, Inc + Michael Fread + mfread&nexuminc.com +40135 + Spawn Labs, Inc. + Paul Gautreaux + PaulGautreaux&gamestop.com +40136 + Comviva Technologies Limited + Shadaksharayya Aravind Hiremath + shadaksharayya.ha&comviva.com +40137 + University of Pardubice + Lukas Slansky + lukas.slansky&upce.cz +40138 + DataNET21 Solutions GmbH + Eckhard Schulz + admin&datanet21.de +40139 + GRN Serveis Telematics + Jordi Fabregas + iana&grn.es +40140 + AIRTEL ATN + Frank O'Connor + frank.oconnor&airtel-atn.com +40141 + InterConnect Brasil + Luis Marcelo de Almeida Nogueira + NIC&INTER.BZ +40142 + MODULISTIC.NET + Pablo Costa + in.Ko9exohk.Vee2UoQu.iana&modulistic.net +40143 + CookieConcepts + Jeroen S. van de Hoef + info&cookieconcepts.nl +40144 + Understanding SharePoint LLC + Bjørn Furuknap + furuknap&gmail.com +40145 + STRYME GmbH + Clemens Czepe + office&stryme.com +40146 + Ascent Communication Technology + Vincent Nguyen + support&ascentcomtec.com +40147 + IT-Technology GmbH + Johann Steinwendtner + steinwendtner&it-technology.at +40148 + Thorsten Pape + Thorsten Pape + tpape&pape-clan.de +40149 + PFA Pension + Dennis Wiberg-Jørgensen + dwi&pfa.dk +40150 + Kenium + Robert Breton + rbreton&kenium.com +40151 + G²mobility SA + Lionel CAUSSE + lionel.causse&g2mobility.com +40152 + Trimtab.ca + Alex Kwiatkowski + iana_oid&trimtab.ca +40153 + Abraxas Informatik AG + Olaf Sonderegger + olaf.sonderegger&abraxas.ch +40154 + North-Caucasus Federal University + Yuri Serdyukov + yserdyukov&ncfu.ru +40155 + Groundhog Technologies + Shaun Chang + shaun&ghtinc.com +40156 + WVNET GmbH + Stefan Berger + support&wvnet.at +40157 + Medicalgorithmics + Tomasz Mularczyk + t.mularczyk&medicalgorithmics.com +40158 + Gage Marketing Group, LLC + Kevin Borchers + netadmin&gage.com +40159 + Paul Reinhart AG + Heinz Hiltebrand + h.hiltebrand&reinhart.ch +40160 + NJVC + Peter Jeanmougin + Peter.Jeanmougin&njvc.com +40161 + Canal Street Securities + Luis Nogueira + canalstreet&lmn.com.br +40162 + McCarthy Tetrault + Hugo Ethier + hethier&mccarthy.ca +40163 + Kamidama Breedables + Grandma Bates + grandma.bates.sl&gmail.com +40164 + OOO Avtomatizirovannye sistemy (Automated Systems, LTD) + Andrew Glukhov + not_x&mail.ru +40165 + morrow.me.uk + Ben Morrow + ben&morrow.me.uk +40166 + Omega Tech Consulting Group LLC + Matt Chamberlain + mchamberlain&otcgcorp.com +40167 + NS Reizigers B.V. + Morten Minke + morten.minke&ns.nl +40168 + Etransmedia Technology + Ben Steadwell + Ben.Steadwell&etransmedia.com +40169 + 3xA Security AB + Stefan Santesson + stefan&aaa-sec.com +40170 + NP Networks, Inc. + Stephen Liang + kuochuan&npnetworks.com +40171 + HF. Radio Communication Technology Co., Ltd + Paky.Du + paky.pc.du&radiotech.cn +40172 + Closed joint stock company “StandarTelecom” + Stanislav Vasin + standartelecom&gmail.com +40173 + Hoermann Solutions + Hoermann Johann + support&hans-hoermann.de +40174 + BlueFinch + Richard Verkaik + info&bluefinch.com +40175 + RIPAS + Pavel Karasin + karasin&ripas.ru +40176 + Consulfem + Jorge Berti + jberti&consulfem.com +40177 + Right&Above + Vasyl Rublyov + enterprise&rightandabove.com +40178 + Noolitic + Sylvain Deceuninck + sylvain.deceuninck&noolitic.biz +40179 + aplicacionesYredes.com + Rafa Couto + rafa&aplicacionesyredes.com +40180 + HTL Kaindorf - Mechatronik + Manfred Steiner + sx&htl-kaindorf.ac.at +40181 + CyberCenter S.A. + Max Weinstein + admin&cybercenter.cl +40182 + Sichuan Cable TV Network Co., Ltd. + Gangliu + liugang&catvgd.com +40183 + con terra GmbH + Uwe König + u.koenig&conterra.de +40184 + NovInTeh LLC + Maksim Balabanov + noc&novinteh.com +40185 + Esa Unggul University + didi supriyadi + didi&esaunggul.ac.id +40186 + DA/Page, LLC + Greg Ansley + support&dapage.net +40187 + Aclima, Inc. + Stan Hu + stanhu&aclimalabs.com +40188 + United States Infrastructure Corporation + Jason Bush + noc&usicinc.com +40189 + Charlton Labs + Chuck Charlton + ccharlton&gmail.com +40190 + JCL-eCommerce GmbH + Martin Köckinger + martin.koeckinger&jcl-ecommerce.com +40191 + Lylid Labs OÜ + Key Aavoja + key.aavoja&lylid.net +40192 + ITQuality + Chris Knudsen + cfk&itq.dk +40193 + Institute of Electronic Music and Acoustics + Johannes Zmölnig + noc&iem.at +40194 + WaveCloud Corporation + David Van Duzer + dvd&wavecloud.com +40195 + Zantek Pty. Ltd. + Rowan Taubitz + rowan&zantek.com.au +40196 + SANTOS + Adam Nye + unix.support&santos.com +40197 + JSC CROC POVOLZHYE + Radaev Konstantin + k.radaev&volga.croc.ru +40198 + Movirtu + Durgesh O Mishra + durgesh&movirtu.com +40199 + Szechenyi Istvan SZKI + Laszlo Herman + school&szimki.sulinet.hu +40200 + Anext + Ing. Peter Petrilak + peter.petrilak&anext.sk +40201 + Nexiway + Adel Mezibra + adel.mezibra&nexiway.com +40202 + Primetech Ltd. + Kiselevich Maxim + info&primetech.ru +40203 + Golder Associates + Tom Laitinen + tlaitinen&golder.com +40204 + Availity, L.L.C. + Susan Eveland + seveland&availity.com +40205 + Jackson Family Enterprises + Jason Womack + itadmin&kjmail.com +40206 + minivi + David Santoso + davidsantoso&minivi.com +40207 + Gerrit Beine GmbH + Gerrit Beine + mail&gerritbeine.com +40208 + S.J.M. Steffann + Sander Steffann + sander&steffann.nl +40209 + Hiroshima University + Kouji Nishimura + kouji&hiroshima-u.ac.jp +40210 + Matt-J.co.uk + Matt Johnson + mattkj09&gmail.com +40211 + SHENZHEN MEGMEET ELECTRICAL CO.,LTD + Yongru An + anyongru&megmeet.com +40212 + PHV - Confluances + DREUX Vincent + vincent.dreux&confluances.fr +40213 + University College Northen Denmark + Jacob-Steen Madsen + itdrift&ucn.dk +40214 + ICEANS, Inventive Computer Engineering and Network Service + Ahmed Waqas Abid + owner&iceans.com +40215 + ITF-EDV Froeschl GmbH + Armin Riedl + internet&itf-edv.de +40216 + Global Traffic Technologies, LLC + Chad Chryst + chad.chryst>t.com +40217 + Tesmec Automation + Alessandro Maggi, Simona Pantano + automation&tesmec.com +40218 + Witzig The Office Company AG + Stephan Brenner + stephan.brenner&witzig.ch +40219 + Hirtle, Callaghan & Co. + Josh Rose + jrose&hirtlecallaghan.com +40220 + Northwestern Michigan College + Neil Streeter + nstreeter&nmc.edu +40221 + Infrastructures Technologiques Gallium + Antoine Reid + areid&Gallium-IT.com +40222 + Family Home Care & Hospice, Inc. + Mark Manley + Mark.Manley&familyhomecare.org +40223 + molotov.ro + Szabolcs Borbely + bbszabi&yahoo.com +40224 + Willem4Ever BV + Willem Eradus + pen-request-owner&eradus.eu +40225 + Image Data Systems + Robin Kearney + robin&image-data.com +40226 + xNet Solutions Inc. (formerly 'xNetBox Solutions') + Tony Huang + support&xnetec.com +40227 + Digital One + Daniel Mahoney + dmahoney&d1tv.net +40228 + Valley ComputerWorks, Inc. DBA Paragus Strategic I.T. + Delcie Bean + dbean¶gusit.com +40229 + VizionR + Pierre Souchay + pierre.souchay&vizionr.fr +40230 + Medikon Polska Sp. z o.o. + Artur Wajgel + a.wajgel&medikon.pl +40231 + Baptist St. Anthony's Health System + Micah Campbell + Micah.Campbell&bsahs.org +40232 + Michel Messerschmidt + Michel Messerschmidt + oid&michel-messerschmidt.de +40233 + Sanoma Media Netherlands B.V. + Marco Lebbink + marco.lebbink&sanomamedia.nl +40234 + Andreas Zieschang + Andreas Zieschang + a.zieschang&arcor.de +40235 + Stadt Dortmund + Sebastian Franz + sfranz&stadtdo.de +40236 + Diputació de Barcelona + Miguel Angel Murillo Viñuales + murillovm&diba.cat +40237 + The Office of the Government Chief Information Officer of the Government of the Hong Kong Special Administrative Region + Dennis Ng + dccng&ogcio.gov.hk +40238 + RoutoMessaging + Marko Dukanac + marko&routotelecom.com +40239 + Gruppo Editoriale L'Espresso spa + Stefano Malossini + s.malossini&gruppoespresso.it +40240 + OpenSky.com + Friedrich Seifts + systems&opensky.com +40241 + Ogden Technology Corporation + Andy Lan + andy.lan&ogdentec.com +40242 + GNS Science + Robert Pearce + r.pearce&gns.cri.nz +40243 + 3U HOLDING AG + Erik Schoenau + nmc&3u.net +40244 + SDAMO Group, LLC + Aleksey V. Shepelev + shepelev&sdamo.ru +40245 + Böning Automationstechnologie GmbH & Co. KG + Marten Müller + marten.mueller&boening.com +40246 + OOO "Laboratoriya Trehmernogo Zreniya" + Alexander Zinchuk + azinchuk&3detection.ru +40247 + GGS German Graduate School of Management and Law + Thomas Heiligenmann + thomas.heiligenmann&ggs.de +40248 + NPO Telecom JSC + Rodion Zobnin + okb&npotelecom.ru +40249 + Vargyas Networks, Inc D/B/A Maxxwave + Brian Vargyas + brianv&balticnetworks.com +40250 + Fiber Connections Inc. + Jordan Slater + jslater&fiberc.com +40251 + coretanium.net + Leon de Jager + ldejager&coretanium.net +40252 + Lonnie Mandigo + Lonnie Mandigo + lonnie.mandigo&gmail.com +40253 + RunStone Technology Co., Ltd. + Julian Ding + julian&runstone.com +40254 + Particle Software Industries + David McIlwraith + david.mcilwraith&particlesoft.org +40255 + Zhiway Technologies CO., Ltd. + yi Huang + huangyi&zhiway.com.cn +40256 + EFG Bank SA + Daniel Rieille + it.security&efgbank.com +40257 + Arcontia Technology AB + Karl Thorén + karl.thoren&arcontia.se +40258 + REWAG Regensburger Energie- und Wasserversorgung AG & Co KG + Josef Fischer + j.fischer&rewag.de +40259 + BruderhausDiakonie Stiftung + Paul Landenberger + paul.landenberger&bruderhausdiakonie.de +40260 + Mongoose Metrics, LLC. + Charles H. Liggett, Jr. + chuck.liggett&mongoosemetrics.com +40261 + KeenSystems B.V. + Rob janssen + r.janssen&keensystems.eu +40262 + Airbnb, Inc. + Chris Ballard + domain&airbnb.com +40263 + Bank of Latvia + Arnis Gigulis + arnis.gigulis&bank.lv +40264 + CIRTEC Medical Systems, LLC + Erik Morgan + IANA-Admin&CIRTECMed.Com +40265 + Derbyshire County Council + Rob Ellis + robert.ellis&derbyshire.gov.uk +40266 + zxc ltd + David Barrett + dave&zxc.co.nz +40267 + Anite Travel Ltd. + Geoff Gomez + geoff.gomez&anite.com +40268 + KRZN + Daniel van Soest + dvs&krzn.de +40269 + Kenshoo ltd + omer brandis + omer.brandis&kenshoo.com +40270 + Flexagon GmbH + Anh Ngoc Nguyen + nn&flexagon.de +40271 + Peconic Bay Medical Center + Rosemarie Lampitok + rlampitok&pbmedicalcenter.org +40272 + SLAC National Accelerator Laboratory + Ross Wilper + rwilper&slac.stanford.edu +40273 + XeroAtom Group Ltd + Edward Alekxandr + edward.alekxandr&xeroatom.com +40274 + Spire Payments Holdings S.a.r.l. + Pedro Gruenholz + Pedro.Gruenholz&spirepayments.com +40275 + blackholearchives.org + Fletch Hogan + operations&blackholearchives.org +40276 + Navy Federal Credit Union + Baskaran Arumugam + baskaran_arumugam&navyfederal.org +40277 + Cloudant, Inc. + Joan Touzet + joant&cloudant.com +40278 + Ocean-County Monmouth Amateur Radio Club, Inc. (OMARC) + Martin A Flynn + domain&n2mo.org +40279 + HAMILTON Bonaduz AG + Daniel Bombis + pcsupport&hamilton.ch +40280 + Sovtest-Internet Limited Company + Roman Dmitriev + admin&sovtest.ru +40281 + Bottega Verde Srl + Cristian Testa + cristian.testa&bottegaverde.com +40282 + NSB AS + Stig B. Sivertsen + stig.sivertsen&nsb.no +40283 + Soporcel, Sociedade Portuguesa de Papel, SA + Luis Cavaleiro + luis.cavaleiro&portucelsoporcel.com +40284 + Wavex Technology Ltd + Russell Tester + infrastructure&wavex.co.uk +40285 + British Columbia International School, Bangkok + Peter Bell + pbell&bcisb.net +40286 + Siclic SARL + Nicolas Brisac + contact&siclic.fr +40287 + Perfectly Posh LLC + Christian Sieber + christian&perfectlyposh.com +40288 + Delta Power Solutions India Pvt Ltd + Sunil Chavan + sunil.chavan&delta.co.in +40289 + TransLattice, Inc. + Michael Lyle + protocols&translattice.com +40290 + Government of Dane County, Wisconsin + Kevin Hammond + hammond.kevin&countyofdane.com +40291 + Central National Australia Pty Ltd + Scott Hordern + scott&cennat.com.au +40292 + Mekelle University + Eric Lembregts + eric&lembregts.eu +40293 + TG Byte Software GmbH + Thilo-Alexander Ginkel + tg&tgbyte.de +40294 + OOO "Jewelry House "Kristall" + Shmurgalkin Oleg + postmaster&kristall-shop.ru +40295 + dogado Internet GmbH + Sascha Schiller + ssc&dogado.de +40296 + stocka + Geoffrey GUERET + geoffrey&stocka.net +40297 + Hytera Communications Co.,Ltd. + Yiyang Zhong + zhongyiyang&hytera.com +40298 + Thad Touchton + Thad Touchton + thad.touchton&gmail.com +40299 + Peter Janke + Peter Janke + peter&pjml.ca +40300 + Radialogica, LLC + Bryan Cool + bryan&radialogica.com +40301 + Esis Enerji ve Elektronik San. Tic. A.S. + Emrah AYDIN + emrah.aydin&esis.com.tr +40302 + Oozlum + Chris Smith + chris.smith&oozlum.co.uk +40303 + Anix Networks, Inc. + Anthony Lin + anthony.lin&anixnetworks.net +40304 + Wacker Neuson SE + Thomas Herzberger + thomas.herzberger&wackerneuson.com +40305 + Eletrosul Centrais Elétricas S/A + Luís Gustavo Coelho + internet.suporte&eletrosul.gov.br +40306 + Pointstore + Mark shepstone + mark&pointstore.co.za +40307 + Great River Technology + Paul Grunwald + pgrunwald&greatrivertech.com +40308 + Fundația Ceata + Tiberiu C. Turbureanu + tct&ceata.org +40309 + UNICART EOOD + Iliya Bazlyankov + iliya.bazlyankov&unicart.bg +40310 + Cumulus Networks, inc + JR Rivers + jrrivers&cumulusnetworks.com +40311 + ECHOES Technologies SAS + Florent Poinsaut + florent.poinsaut&echoes-tech.com +40312 + Denbridge Marine Ltd. + Jonathan Brady + software&denbridgemarine.com +40313 + goRill Solutions + Stefan Kaderabek + kaderabe&gorill.at +40314 + Universidad Europea de Madrid + Juan Carlos Corbacho + jcarlos.corbacho&uem.es +40315 + M&L Solution + haeyeon, hwang + hyhwang&mnlsolution.com +40316 + VastRiver Technology Co.,Ltd. + Zheng Haiou + zhenghaiou&vastriver.com +40317 + Alltech Group + Nikita Filimonov + it&alltech.ru +40318 + Budikom + Mateusz Skała + mateusz.skala&budikom.net +40319 + Canto GmbH + Thomas Schleu + tschleu&canto.com +40320 + Convene Networks + Kevin Dauster + kdauster&convenenetworks.com +40321 + Vitroconnect GmbH + Florian Lohoff + flo&vitroconnect.de +40322 + Jan Axelsson - DICOM2USB + Jan Axelsson + axelsson.jan&gmail.com +40323 + Big Nerd Ranch + Eric Jeffers + ejeffers&bignerdranch.com +40324 + The Woman's Christian Association of Jamestown, N.Y. + Laura Josephson + laura.josephson&wcahospital.org +40325 + Leon Gaming Limited + Vladislav Artemyev + vl&leongaming.com +40326 + Expertize + Fabrice Bucher + fbr&lanexpert.ch +40327 + Metal Expert LLC + Dmitry Demchenko + d.demchenko&metalcourier.com +40328 + Yunohost + Adrien Beudin + yunohost&yunohost.org +40329 + Tanaza S.r.l. + Sebastiano Bertani + sebastiano.bertani&tanaza.com +40330 + KuVision Digital Technology Ltd + Steve Freeman + sfreeman&kuvision.com +40331 + Micronica srl + Massimo Tomasi + massimo.tomasiµnicasrl.it +40332 + Syn-Apps LLC + Ian Pitts + ipitts&syn-apps.com +40333 + Enthought + Ognen Duzlevski + ognen&enthought.com +40334 + Mercury Security Corp. + Michael Serafin + michael.s&mercury-security.com +40335 + RMTech + Moon-il, Han + hmi&rmtechs.co.kr +40336 + Mericle Technologies, LLC. + Brian P. Mericle + brian&mericletechnologies.com +40337 + Centers for Medicare and Medicaid Services + Gregory Kreymer + gregory.kreymer&cms.hhs.gov +40338 + aizoOn Consulting s.r.l. + Antonio Rosina + support-it&aizoon.it +40339 + Anovio AG + Frank Flachenecker + frank.flachenecker&anovio.de +40340 + miersch-IT + Holger Miersch + hostmaster&miersch-it.de +40341 + Dalmartin Ltd + Steven Jupp + hostmaster&dalmartin.com +40342 + CronLab Ltd + Markus Nilsson + markus.nilsson&cronlab.com +40343 + E-Sales Soluções de Intergração + Cassio Cristiano + cassio.cristiano&esales.com.br +40344 + Falk Online + Sascha Falk + sfalk&falk-online.eu +40345 + JRE & Associates, Inc. + Jonathan Wilson + Support&jreitsolutions.com +40346 + Belarusian Universal Commodity Exchange + Valery Niachai + uc&butb.by +40347 + DUIT GmbH + Marc Stermann + marc.stermann&duit.de +40348 + Adaptic AS + Morten William Hansen + morten&adaptic.no +40349 + EICSYS GmbH + Wojciech Jalmuzna + contact&eicsys.eu +40350 + Health Sciences Libraries Consortium + Alan Simon + simon&hslc.org +40351 + XO Communications LLC + Maeve O'Connor + security&xo.com +40352 + American Electric Power + Jon C. Kidder + jckidder&aep.com +40353 + Cirrus Software Engineering LLC + James MacLennan + cirrus.sw&gmail.com +40354 + Suttle Apparatus + Nicholas Larsson + nick.larsson&suttlesolutions.com +40355 + Domantic + Sebastien Lobstein + sebastien.lobstein&domantic.com +40356 + IT-CE + Philippe GUEUGNON + philippe.gueugnon&it-ce.fr +40357 + Michael Buth - IT Consulting + Michael Buth + michael.buth&mbuth.de +40358 + Mercado Libre S.R.L. + Agustin Gomez Roca + agustin.gomezroca&mercadolibre.com +40359 + Zeppelin GmbH + Daniel Ghita + daniel.ghita&zeppelin.com +40360 + IP Total Software S.A + Andrés Ortiz + infraestructura&iptotal.com +40361 + Xenya d.o.o. + Peter Reinhardt + peter.reinhardt&xenya.si +40362 + Cambridge Communication Systems + Iftah Bratspiess + iftah&web-silicon.com +40363 + Galambos Dániel + Daniel Galambos + galambos.dani&gmail.com +40364 + Trinity Desktop Project + Timothy Pearson + kb9vqf&pearsoncomputing.net +40365 + Weichi Consulting LLC + Marvin Wolfthal + maw&weichi.com +40366 + SHadrinsky Telephonny Zavod (SHTZ) + Alexey Roznin + prog8&shtz.shadrinsk.net +40367 + Elcom International Pvt Ltd + Mahesh Mudholkar + mc.mudholkar&elcom-in.com +40368 + DATA-1 Ltd. + Andrew Ivanov + ivanov&data-1.net +40369 + gruppenrichtlinien.de + Mark Heitbrink + mark&gruppenrichtlinien.de +40370 + Socialbakers a. s. + Martin Homolka + martin&socialbakers.com +40371 + Forcare B.V. + Rick Riemer + info&forcare.nl +40372 + Celab Ltd + Keith Harrison + keith.harrison&celab.co.uk +40373 + HealthForce Partners + James von Seggern + webmaster&healthforcepartners.com +40374 + ODYA Bilgisayar ve Teknoloji Urunleri San. ve Tic. Ltd. Sti. + Ali YAZICI + ali.yazici&odya.com.tr +40375 + Bayport Financial Services + GIdeon Serfontein + Gideon.Serfontein&bayport.co.za +40376 + Astorex Corp + Artem Gavrilov + admin&astorex.com +40377 + Independence School District + Jace Ferguson + jace_ferguson&isdschools.org +40378 + Innovative Technology Solutions, Inc. + Bradley Smith + bradley.smith&its-florida.net +40379 + Lower Bucks Hospital + Steven Kane + kanes&lowerbuckshospital.org +40380 + Arkologic, Inc. + Hung Lu + hungl&arkologic.com +40381 + Golub Capital + Doug Prine + dprine&golubcapital.com +40382 + Klimat prof Co. + Vladimir Losev + pkiinfo&klimat-prof.ru +40383 + CAME CANCELLI AUTOMATICI SPA + GIOVANNI CEGLIA + gceglia&came.com +40384 + Association of friends of japanese culture (SPJK) + Jana Křečková + yarche&spjk.cz +40385 + Rose Datasystems Inc. + Xiaoli Gu + violee&rosedata.com +40386 + Instituto Nacional de Segurança Social + Alexandre Francisco Marengula + alex&inss.gov.mz +40387 + Mediatree SAS + Olivier MOLERO + olivier.molero&mediatree.fr +40388 + Eurosel + Arif Sultanov + arif&eurosel.az +40389 + Facultad Regional Cordoba Universidad Tecnológica Nacional + Daniel Forte + staff&computos.frc.utn.edu.ar +40390 + Shale-Inland Holdings, LLC + Navid Gardooni + navid.gardooni&shaleinland.com +40391 + Ecks Three Ltd + Eric Benoit + eric&ecksthree.com +40392 + MDV Soft + Dmitry Mushta + dmushta&gmail.com +40393 + Phantasy Concepts + Garry L. Hurley Jr. + garry.hurley.jr&gmail.com +40394 + Optogan Group + Dima Fedorov + dimajf&optogan.com +40395 + 25-Seven Systems + Geoff Steadman + gsteadman&25-seven.com +40396 + LUMINO Licht Elektronik GmbH + Ulrich Görden + ugoerden&lumino.de +40397 + Piratepartei Lëtzebuerg + Sven Clement + sven.clement&piratepartei.lu +40398 + Scout Trading LLC + Peter Bisroev + registrar&scout-trading.com +40399 + Huneed Technologies Co.,Ltd + Junghee Han + jhhan&huneed.com +40400 + CortijoDelRio.net + Daniel Diaz + ddiaz&cortijodelrio.net +40401 + VidScale, Inc. + John Webb + jwebb&vidscale.com +40402 + Cryptonector, LLC + Nicolas Williams + nico&cryptonector.com +40403 + Detached + Jan-Dirk Bosman + penadmin&detached.co.za +40404 + Vestiaire Collective + Guillaume Moreau + guillaume&vestiairecollective.com +40405 + UAB Fortevento + Žilvinas Kundrotas + info&fortevento.lt +40406 + Cadis + Chris Swinnen + chris.swinnen&cadis.be +40407 + Smartbox Experience Limited + Alexander Schumann, Colin Cashin, Daniel Marin + Certification.Authority&smartbox.com +40408 + Philip Cullen Ltd + Philip Cullen + registrar&philipcullen.co.uk +40409 + YEEJOIN (BEIJING) TECHNOLOGY COMPANY LIMITED + Luo Hongli + luohongli&yeejoin.com +40410 + boxEleven + Eric Horne + iana&box11.org +40411 + PaansNet + Marcel Paans + info&paans.net +40412 + Option Computers Ltd + Nick de Smith + nick.desmith&dealhub.com +40413 + Concordus Applications Inc. + Jeremiah Sheckler + jeremiah&concordusapps.com +40414 + New England Baptist Hospital Inc. + William Presley + wpresley&nebh.org +40415 + symThoughts + Nic de Sousa + nic&bayport.co.za +40416 + Hectronic GmbH + Georg Ziaja + postmaster&hectronic.com +40417 + SUNSEA OPMEX Technologies Co., Ltd. + Daniel.Yushaoqing + daniel.yushaoqing&opmextech.com +40418 + NAG LLC + Dmitry Samodelko + dmitry&nag.ru +40419 + PAWI Verpackungen AG + Thomas Gehrke + licenses&pawi.ch +40420 + Tuxum Secure Systems, S. L. + Pedro Echanove + pedro.echanove&tuxum.com +40421 + adorsys GmbH & Co. KG + Thomas Krieger + admin&adorsys.de +40422 + Solutionary, Inc. + Steve Madel (Systems Department) + SteveMadel&solutionary.com +40423 + Osceola County Board of County Comissioners + David Lomen + sysadmin&osceola.org +40424 + TJ Samson Community Hospital + Jessica Hoy + jhoy&tjsamson.org +40425 + Laird Technologies + Tim Carney + timothy.carney&lairdtech.com +40426 + Unified Communications Inc. + Satoshi Kajiwara + pjtmg&unified.co.jp +40427 + Kodofon JSC + Sergey Ryazanov + s.ryazanov&kodofon.vrn.ru +40428 + SwiftServe Limited. + Bryan Amesbury + bma&swiftserve.com +40429 + Utoolity GmbH + Steffen Opel + biz.iana&utoolity.net +40430 + ZinuSoft S.A. de C.V. + Francisco Javier Zuluaga Ramírez + francisco.zuluaga&zinusoft.com +40431 + Dracal technologies inc. + Raphael Assenat + raph&dracal.com +40432 + SquareOne Networks, Inc + Logan Owen + logan&s1network.com +40433 + Census Digital Inc. + Doug Bascombe + dougbascombe&censusdigital.com +40434 + Huntington National Bank + Beth Mellen + beth.mellen&huntington.com +40435 + Cyara Solutions Pty Ltd + Luan Tran + luan&cyarasolutions.com +40436 + dba Glen Black + Glen Black + glen.h.black&gmail.com +40437 + Polcom Sp. z o.o. + Boguslaw Juza + hosting&polcom.com.pl +40438 + Otto-Friedrich-Universität Bamberg + Frank Schreiterer + frank.schreiterer&uni-bamberg.de +40439 + I M Skaugen SE + Alexander Andstø + alexander.andsto&skaugen.com +40440 + restfarbe.de + Jan Peschke + jan&restfarbe.de +40441 + DarkDNA + Alex Hanselka + alex&darkdna.net +40442 + OnApp + Carsten Sjoerup + carsten&onapp.com +40443 + Computer Rehab + Jeff Taylor + jeff&computerrehab.us +40444 + ETEK TECHNOLOGY SHENZHEN CO., LTD. + Wilson Law + wilson&etek-td.com +40445 + TRADERS S.A. + REY Jean-Luc + rey&traders.fr +40446 + Ordbogen A/S + Peter Christensen + pch&ordbogen.com +40447 + FAVITE Inc. + Rill Chang + rill_chang&favite.com +40448 + Ratocsystems, Inc. + Masakazu Kondo + kondoh&ratocsystems.com +40449 + Max-Planck-Institut fuer extraterrestrische Physik + Rainer Sigl + sigl&mpe.mpg.de +40450 + University of Ottawa Heart Institute + Jared Strydhorst + jstrydhorst&ottawaheart.ca +40451 + Allens, Inc. + Michael Young + myoung&allens.com +40452 + Care Team Connect Inc. + Greg Kuhnen + gkuhnen&careteamconnect.com +40453 + School District #59 (Peace River South) + Kenneth McCleary + kmccleary&sd59.bc.ca +40454 + Free Pro + NOC Free Pro + engineering&jaguar-network.com +40455 + TMM.CX + Hein-Pieter van Braam + hp&tmm.cx +40456 + Radio Systems Ltd + Andy Wood + andy.wood&radio-systems.co.uk +40457 + Zen Entertainment, Inc. + John Earle + iana&zenentertainment.com +40458 + East Tennessee State University + Ryan Seale + sealerd&mail.etsu.edu +40459 + Paymark Limited + Michael Chester + iss&paymark.co.nz +40460 + SHENZHEN SUPER RICH TECHNOLOGY CO.,LTD + wu minhua + wuminhua&sairuq.com +40461 + Mirzo Ulugbek Professional College of Informatics + Gimadiev Gayrat + engineer&mupki.uz +40462 + CIDEON + Mariusz Zienkiewicz + mariusz.zienkiewicz&cideon.com +40463 + TROY Group, Inc. + Jay Pearson + jpearson&troygroup.com +40464 + a4ESSOR SAS + Christian SERRA + christian.serra&thalesgroup.com +40465 + ZETA6 Computer Systems LLC + Nazim Can Bedir + nazim.can.bedir&zeta.sx +40466 + West Monroe Partners, LLC + Jeff Anderson + it.accounts&westmonroepartners.com +40467 + Netronome Systems, Inc. + Roelof du Toit + roelof.dutoit&netronome.com +40468 + LSE Leading Security Experts GmbH + Stefan Pietsch + stefan.pietsch&lsexperts.de +40469 + Arkansas Heart Hospital + Shannon Jacobson + shannon.jacobson&arheart.com +40470 + Ecodota - Ecolo federal asbl. + BOLLINGH Sébastien + hostmaster&ecolo.be +40471 + Zimory GmbH + Peter Caron + peter.caron&zimory.com +40472 + Synapture + Lior COHEN + lior.cohen&synapture.fr +40473 + Chiang Mai International School, CMIS + Bradley Shank + admin&cmis.ac.th +40474 + ALAGAS NETWORK PTE LTD + Soragan Ong + soragan.ong&alagasnetwork.com +40475 + Earthworks inc. (formerly 'iWeave inc.') + Yoshinobu HOSAKA + yoshinobu.hosaka&iweave.jp +40476 + Winnertel + Jiongfeng Wu + jiongfeng.wu&winnertel.com +40477 + geomedia + lichunhua + lichunhua&geomedia.com.cn +40478 + LevelOne Communications GmbH + Joseph Naduthottam Babu + joseph.nb&levelone-tssa.com +40479 + 40mm Holdings, LLC + Billy Gaines + billy&bpong.com +40480 + OTS Logistics Group, Ltd. + Michael Gomez + michael.gomez&vanguardlogistics.com +40481 + MiMedia.com, Inc. + Bob Eckert + bob&MiMedia.com +40482 + Pure Storage + Scott Sewall + scott&purestorage.com +40483 + Volta electronics srl + Daniele Menozzi + solar&voltaelectronics.com +40484 + Auroville Foundation + Coriolan Weihrauch + coriolan&auroville.org.in +40485 + Online.net + Manfred Touron + mtouron&online.net +40486 + State of Nevada Dept of Employment Training and Rehabilitation + Lani Smith + lssmith&nvdetr.org +40487 + Timesafer Inc. + Xiaohai Lu + luxiaohai×afer.com +40488 + Staiger, Schwald & Partner AG + Stefan Mattenberger + stefan.mattenberger&ssplaw.ch +40489 + Mintlab B.V. + M.K. Ootjers + michiel&mintlab.nl +40490 + The Dot Net Factory, LLC + Patrick Parker + patrick&empowerID.com +40491 + Uceem Networks Inc. + Dan Axtman + dan.axtman&uceem.com +40492 + Balsec GmbH + Radim Svejda + rsvejda&balsec.ch +40493 + Toyota Adria d.o.o. + Jure Lakota + jure.lakota&toyota.si +40494 + Ditenity Inc. + Piyush Jain + piyush&identicate.com +40495 + ADVANCEDDIGITAL INC. + Phil Oxrud + phil&advanceddigital.ca +40496 + Shanghai Aerospace Automobile Electromechanical Co.,LTD + Yu Tan + yu.tan&ht-saae.com +40497 + CROSS Zlin a.s. + Marián Bullo + bullo&cross.cz +40498 + Third Sight Pte. Ltd. + Teo Kok Hoon + kokhoon&thirdsight.net +40499 + SOCEL VISIONOR + CONION Jacques + jacques.conion&visionor.fr +40500 + DAN electronic Ltd + Stefan Jordanov + stefan&infoserv.bg +40501 + MAF Consulting Ltd + Michael Fyles + michael.fyles&mafconsulting.com +40502 + AT Consulting + Alexey Efremov + aefremov&at-consulting.ru +40503 + University of Louisville Hospital + Michael Boston + mikebos&ulh.org +40504 + Kamil Bujniewicz + Kamil Bujniewicz + kamil.bujniewicz&gmail.com +40505 + OUTSCALE + Laurent SEROR + laurent.seror&outscale.com +40506 + NSC corpration + shigeharu seki + shigeharu.seki&nscjapan.co.jp +40507 + TravelSky Technology Limited + Feng Yu + fengyu&travelsky.com +40508 + Dynamic Telecom + Calin Serbanescu + calin&dynamictelecom.net +40509 + VoipSwitch + Łukasz Nowak + lukas&voipswitch.com +40510 + Fourth Sector Innovations + Piseth Kem + pisethkem&gmail.com +40511 + DoGi Enterprise + Ben Deligato + deligato&gmail.com +40512 + Hamid Saeed + Hamid Saeed + hamid.saeed&virgin.net +40513 + weifang mingji technology co., LTD + Jiayulin + jiayl&mingjitech.com +40514 + Shenzhen Howah Network Communication Co., Ltd + Jianhui Gu (samcool) + samcool&howah.com.cn +40515 + Boivie Inc + Victor Boivie + victor&boivie.com +40516 + Staffordshire County Council + Chris Williams + chris.williams&staffordshire.gov.uk +40517 + Shenzhen Herotel Tech. Co.,Ltd. + Li Min + lim&herotel.cn +40518 + Foresight + Yariv Hazony + yariv&foresight-air.com +40519 + sprd.net AG + Thomas Abraham + it-einkauf&spreadshirt.net +40520 + Novareto + Christian Klinger + ck&novareto.de +40521 + Metacloud, Inc + Morgan Fainberg + morgan.fainberg&metacloud.com +40522 + myoscience Inc + Corydon Hinton + chinton&myoscience.com +40523 + Eau De Web + Cristian Romanescu + cristian.romanescu&eaudeweb.ro +40524 + Realex Payments + Owen O Byrne + owen.obyrne&realexpayments.com +40525 + Business Intelligence Direct Limited + Will Jones + will&bidmarketing.co.uk +40526 + REGISTRO PUBLICO DE PANAMA + Ana Raquel Ulloa + firma&rp.gob.pa +40527 + Bank of Canada + Martin Ladouceur + ladm&bank-banque-canada.ca +40528 + Kod Integrations, LLC + Christopher Lemieux + clemieux&kodintegrations.com +40529 + Massxess + Patrick Timmers + ptimmers&massxess.nl +40530 + Agence universitaire de la Francophonie + Jean Christophe André + jean-christophe.andre&auf.org +40531 + Dr. Stolyarenko Medical Center + Leonid Krivoshein + it&stolyarenko.com +40532 + Zeptonics Pty. Ltd. + Lindsay Powles + lindsay.powles&zeptonics.com +40533 + Voice of Russia + Aleksey Sachenko + adminmail&ruvr.ru +40534 + Signamax, a.s. + Martin Jech + martin.jech&signamax.eu +40535 + SVYAZKOMPLEKTSERVICE, LLC + SIARHEI SOICH + skservis&list.ru +40536 + Wieland-Werke AG + Guido Drescher + guido.drescher&wieland.com +40537 + Null Ventures LLC + Jeremy L. Gaddis + jeremy&nullventuresllc.com +40538 + Seal Maker Produktions- und Vertriebs GmbH + Georg Herr + g.herr&seal-maker.com +40539 + Aquto + Scott Klein + sklein&aquto.com +40540 + LPAR2RRD + Pavel Hampl + support&lpar2rrd.com +40541 + MecSys + Isaac Mechi + isaac&mecsys.com.br +40542 + SDL Fredhopper + Nikolay Kalev + sysadmin&fredhopper.com +40543 + Udruga P.O.I.N.T. + Hrvoje Belani + kontakt&udruga-point.hr +40544 + Impedance Mismatch LLC + Marc Petit-Huguenin + marc&petit-huguenin.org +40545 + Insight Enterprises, Inc. + David Eckles + DG-NA-DTLK_DeptTechnologyServices&insight.com +40546 + Tentixo NG AB + Lars Mårelius + morre&tentixo.com +40547 + Aadvanced Filtering Services + Roman Rybalko + iana-oid-pen&advancedfiltering.net +40548 + id3as + Adrian M Roe + adrian&id3as.co.uk +40549 + synchroad.com + David Dick + davidd&synchroad.com +40550 + OZtell + David Chung + admin-iana&oztell.com +40551 + Bootable Cluster CD + Skylar Thompson + skylar.thompson&gmail.com +40552 + Salcininku rajono savivaldybes administracija + Valerijus Sinkevicius + valerijus.sinkevicius&salcininkai.lt +40553 + "Bulgartabac-Holding" AD + Stefan Veselinov + it&bulgartabac.bg +40554 + Tecom + Alexander Kabanov + kabanov&tecomgroup.ru +40555 + Garz & Fricke GmbH + Bernd Mierzowski + bernd.mierzowski&garz-fricke.com +40556 + Moving Possibilities LLC + Michael Eisenman + meisen321&gmail.com +40557 + Triodos Bank NV + Michel van Kooi + michel.vankooi&triodos.nl +40558 + Elecmor + Dāvis Mosāns + elecmor&elecmor.lv +40559 + BPCE + Frédéric Aulard + frederic.aulard&bpce.fr +40560 + Norsat International Inc. + Michael Schefter + mschefter&norsat.com +40561 + Alloy Computer Products (Aust) Pty Ltd + Scott Young + scott.young&alloy.com.au +40562 + Open Broadcast Systems Ltd + Kieran Kunhya + kierank&ob-encoder.com +40563 + Christine Steup Unternehmensberatung + Christine Steup + matthias.steup&arcor.de +40564 + StrikeAd LCC + Michael Dewhirst + admin&strikead.com +40565 + Berufsbildende Schulen Technik + Andreas Loosen + andreas.loosen&bbst-clp.de +40566 + Forte Automation System, Inc. + Scott Alcock + alcocks&forteautomation.com +40567 + Melodon Software Inc. + Fred Saberian + Fred.Saberian&MelodonSoftware.com +40568 + Tredegar Corporation + Adrian H. Amos + adrian.amos&tredegar.com +40569 + HERNIS Scan Systems AS + Audun Skjelnes + pen&hernis.no +40570 + RFOptic + Oz Abramson + oz&rfoptic.com +40571 + SRT Communications, Inc. + Travis Post + travisgp&srttel.com +40572 + CapTemp, Lda + Carlos Domingues + geral&captemp.com +40573 + vklop.com + Dusan Debeljak + dd&vklop.com +40574 + Ukrainian Computer Laboratory ltd. + Denis Goncharoff + denis.goncharoff&ucl.com.ua +40575 + Touro Infirmary + Greg Barker or Octavio Estorino + estorinoo&touro.com +40576 + NovaTech, LLC + Matthew Mizer + novatechlenexa&gmail.com +40577 + KOCH + Thomas Koch + office&t-koch.com +40578 + Computing Point Limited + Dr. Paul C. Caesar PhD Comp Sci. + paul.caesar&computingpoint.ltd.uk +40579 + Vistula University + Bartosz Kozlowski + oid&vistula.edu.pl +40580 + GlobalSite UK + James Burton + james.burton&globalsiteuk.co.uk +40581 + Big Dutchman ltd. + Dmitry V. Solovev + itsupport&bigdutchman.ru +40582 + iBlocks Ltd. + Tim Brewer + tim.brewer&iblocks.co.uk +40583 + Federal Service State Registration, Cadastre and Cartography in Moscow + Salavat Gayfulin + sggayfulin&mosregistr.ru +40584 + Petersburg State University of Railways Transport + Eugene Oparin + OnapuH&mail.ru +40585 + Box UK, Limited + Matt Willsher + matt.willsher&boxuk.com +40586 + Rocky Mountain College + Andrew Niemantsverdriet + andrew&rocky.edu +40587 + Neptec OS, Inc. + David Cheng + davidc&neptecos.com +40588 + TrikeApps Pty Ltd + Matthew Fallshaw + operator&trikeapps.com +40589 + Indochina Telecommunication Technology JSC (ITT) + Hau Tran + hau.tran&itt.vn +40590 + ClickBridge Inc + Ryan King + Ryan.king&clickbridge.net +40591 + Avateq Corp. + Alex Babakhanov + alex_b&avateq.com +40592 + IDK CORPORATION + Yasu Shimizu + shimizu&idk.co.jp +40593 + Iberdrola USA Management Corporation + Iberdrola USA Network Security / Neal Garber + network.security&iberdrolausa.com +40594 + ilogixx Limited + Christian Becker + info&ilogixx.de +40595 + NB Software + Klaus Breining + klaus_breining&nbsoftware.de +40596 + ComTec!Fütterer + Mathis Fütterer + info&comtecs.de +40597 + NetProbe, Llc + Sergey Eremenko + s.eremenko&net-probe.ru +40598 + Geopost UK Ltd + Simon Bonnck + simon.bonnick&geopostuk.com +40599 + Quick OID Registry (quick-oid.org) (Roman Rybalko) + Roman Rybalko + iana-pen&quick-oid.org +40600 + EWA Ltd + Greg Illsley + greg.illsley&ewa.ltd.uk +40601 + Emaris Limited + Paul Brissenden Hemstock + paul&emaris.com +40602 + Vytautas Magnus University + Egmantas Girnis + egmantas&puga.vdu.lt +40603 + Harvard Pilgrim Health Care + John Whippen + john_whippen&harvardpilgrim.org +40604 + Geosync Microwave, Inc. + Mohammad Zahidi + mzahidi&geosyncmicrowave.com +40605 + Unassigned + Returned 2017-11-28 + ---none--- +40606 + Scripps Health + Matt Ferguson + ferguson.matthew&scrippshealth.org +40607 + Menara Netowrks + Chris Hewitt + chewitt&menaranet.com +40608 + Bars NPK Ltd. + Borodkin Victor + support&bars-perm.ru +40609 + Nordstrom + Michael Schell + michael.schell&nordstrom.com +40610 + Docstoc, INC + Dmitry Kireev + dmitryk&docstoc.com +40611 + Key Software Develpment + Javier Silva Pérez + javier.sp1209&gmail.com +40612 + SoftPro S.r.l. + Cristiano Montanari + info&softpro.it +40613 + Instituto Tecnológico de Informática + Sergio Talens Oliag + iana&iti.es +40614 + GUANGDONG EAST POWER CO.,LTD. + Simo He + soft&eastups.com +40615 + Olympus Corporation of the Americas + Narayanan R Pillai + Narayanan.Pillai&olympus.com +40616 + Pragmatica, LLC + Scott Sobotka + iana&pragmatica.us +40617 + WiMacTel, Inc + Andrew Preece + apreece&wimactel.com +40618 + Intechne Tecnologia da Informação + Michel Araujo Neves + michel&intechne.com.br +40619 + First Gulf Bank PJSC + Sanjeev Mulay + sanjeev.mulay&fgb.ae +40620 + Welcome Italia spa + Giorgio Luchi + giorgio.luchi&welcomeitalia.it +40621 + Centrum Holdings, s.r.o. + Michal Kucera + michal.kucera¢rumholdings.com +40622 + Covea Insurance plc + Mike Croxford + penadmin&coveainsurance.co.uk +40623 + Morgowicz Inc + Nicholas Morgowicz + nick&morgowicz.com +40624 + Willops Management + Vasile Lacatus + vasile.lacatus&willops.com +40625 + SBO "Centre of Information Technology of the Orenburg region" + Karaseva Natalya + ngka&mail.orb.ru +40626 + Web de Confianza Andaluza S.Coop.And. + María de los Ángeles Ojeda Rojas + info&webconefe.com +40627 + Jefferson Regional Medical Center + Toni Fox + toni.fox&jeffersonregional.com +40628 + FormPipe Software A/S + Thomas Larsen + thomas.larsen&formpipe.com +40629 + Lexington, S.L. + Daniel Paniagua + soporte&lexington.es +40630 + Crexendo, Inc. + Larry Low + llow&crexendo.com +40631 + AVA Communications + Payam Shabanian + shabanip&gmail.com +40632 + Trüb AG + Gergely Karoly + help&trueb.ch +40633 + "Scan Engineering Telecom" CJSC + Eugene Litvinov + pro&setdsp.ru +40634 + Willway, S.A. + Rui Monteiro + rmonteiro&willway.pt +40635 + opvizor GmbH + Dennis Zimmer + info&opvizor.com +40636 + Use System Engineering B.V. + Marco ten Thije + marco.tenthije&usetechnology.nl +40637 + Deliservice Punnitse & Säästä Oy + Teemu Haapoja + teemu.haapoja&punnitse.fi +40638 + BSC Nutrition LTD + Kestutis Rasimavicius + info&bulksupplementscompany.co.uk +40639 + PROFI-UC Ltd. + Andrew Matyazh + support&profi-uc.ru +40640 + Polisnab JSC + Maxim Y. Evdolyuk + evdolyuk&polisnab.ru +40641 + Dierichsweiler Unternehmens- und Prozessberatung GmbH + Oliver Magnus + o.magnus&dup.de +40642 + Gardado s.r.o. + Petr Votava + developement&gardado.com +40643 + Broadpeak + Jacques Le Mancq + credential.manager&broadpeak.tv +40644 + Formanek + Robert Formanek + robert&formanek.org +40645 + UFAL - Universidade Federal de Alagoas + Diogo Cabral de Almeida + diogo&nti.ufal.br +40646 + iNet Telecoms Ltd (Voipfone) + Lee Rose + lee&voipfone.co.uk +40647 + HELVETAS Swiss Intercooperation + Manfred Berger + Manfred.Berger&helvetas.org +40648 + AREAL + Arnaud Prevot + Arnaud.Prevot&areal.fr +40649 + Acumentrics Corporation + Donald Charlantini + dcharlantini&acumentrics.com +40650 + InterMedia Enterprises + Paul Kosinski + noc&iment.com +40651 + Beijing Huasun Unicreate Technology LTD. + Bo Yang + yangbo&bhunetworks.com +40652 + Illinois Wesleyan University + Michael Zehr + root&iwu.edu +40653 + Bittoo + Henrik Dige Semark + hds&bittoo.net +40654 + Consórcio de Informática na Gestão Pública Municipal + Rodrigo Valceli Raimundo + rodrigo&ciga.sc.gov.br +40655 + Nanjing Wlanease Co., Ltd. + Forrest Zhang (https://wlanease.com) + forrest&wlanease.com +40656 + State independent уestablishment Tula region center of information technologies + Andrea Gorbut + andrea.gorbut&tularegion.ru +40657 + experience4you GmbH + Thomas Loch + thomas.loch&experience4you.de +40658 + Delta Meccanica S.r.l. + Gianluca Magistrato + g.magistrato&deltameccanica.com +40659 + Oregano Systems – Design & Consulting GesmbH + Günther Genser + genser&oregano.at +40660 + Southern Record Distributors Ltd. + Szabolcs Rumi + szabolcs&srd.co.uk +40661 + Airwave Solutions + Dipesh Mistry + dipesh.mistry&airwavesolutions.co.uk +40662 + NISZ Co. + Károly Juhász + juhasz.karoly&nisz.hu +40663 + Kupson spol. s r.o. + Martin Kupec + martin.kupec&kupson.cz +40664 + Noblis, Inc. + Kyle Villano + Kyle.Villano&noblis.org +40665 + Akron Group + Lukin Alexandr + it&akronplus.ru +40666 + Zito Media + Brian Empson + brian.empson&zitomedia.com +40667 + BRToken Ind. e Com. de Produtos Eletrônicos Ltda. + Alexandre Cagnoni + alex&brtoken.com.br +40668 + NEC New Zealand Ltd + Dave Clarke + cs&nec.co.nz +40669 + Microlab RF Ltd + Mike Bush + m2c2ab&btinternet.com +40670 + Lanka Government Information Infrastucture + Nimal Ratnayake + nimalr&noc.gov.lk +40671 + Radinet Communications Inc. + Ray Chen + ray.chen&radinetcomm.com +40672 + Fiber SenSys, Inc. + David E Hollingsworth + david.hollingsworth&fibersensys.com +40673 + Unigine Inc. + Sergey S. Kovalev + sysadmin&unigine.com +40674 + SAIT (Securitas) + Koen Verdijck + koen.verdijck&securitas.be +40675 + geiger BDT GmbH + Maximilian Barth + barth&geiger-bdt.de +40676 + Microchip Technology Inc. + G. Richard Newell + richard.newellµchip.com +40677 + RKF Engineering Solutions, LLC + Brian Sipos + BSipos&rkf-eng.com +40678 + Nexura Internacional S.A + Melissa Vela + melissa.vela.coral&gmail.com +40679 + Centre Hospitalier de Fougeres + Yannick HERVE + yannick.herve&ch-fougeres.fr +40680 + La Trobe University + Benji Wakely + unixsup&latrobe.edu.au +40681 + ELNO + Guillaume Baron + g.baron&elno.fr +40682 + Thanis.org + Bill Thanis + differential.karma&gmail.com +40683 + Open Grid Europe GmbH + Holger Kayser + pen-admin&open-grid-europe.com +40684 + Appdynamics + Boris Livshutz + it&appdynamics.com +40685 + Kafinated Kode, Inc + Rishi Pidva + rishi&kafinatedkode.com +40686 + Issac Systems Inc. + Harry Lee + harry&issacnet.com +40687 + ezamber + Pawel Tomulik + ptomulik&ezamber.pl +40688 + Kum and Go + Chris Sweeney + CJS&KumandGo.com +40689 + Digital Processing Systems + Muhammad Ramzan + ramzan.farooq&dpskw.com +40690 + MBTechnology Ltd + Emilian Mieilica + emilm&mbtelecom.ro +40691 + Crosstel Inc + John Maliakal + john.maliakal&crosstel.com +40692 + The ZAP Group + John Zaitseff + J.Zaitseff&zap.org.au +40693 + Inter Vehicle Communication + Simon Hsieh + simon&interconnect.com.tw +40694 + antrou + HongCai Wang + hongcai.wang&antrou.com +40695 + BitPlanet Inc. + Poornaprajna Udupi + poorna&bitplanet.io +40696 + IDENETWORK + Benoit MBOUBATEUVAWE + benoit&idenetwork.fr +40697 + Alma Manu Oy + Mikko Junnila + mikko.junnila&almamanu.fi +40698 + TeliSwitch Solutions + Shaike Zalitzky + shaikez&teliswitch.com +40699 + LexSoft, LTD + Alexandr A. Larin + iana&cpp.su +40700 + M-Cube S.p.A. + Paolo Ferracin + p.ferracin&mcube.it +40701 + Vitera Healthcare Solutions + Michael Rosile + mike.rosile&viterahealthcare.com +40702 + San Mateo Medical Center + Kathleen Boutte Foster + kbfoster&co.sanmateo.ca.us +40703 + Hokkaido Tracks + Simon Walter + simon.walter&hokkaidotracks.com +40704 + Amstar Creative Ltd + Charles Chin + service&amstar.tw +40705 + Signal Processing Devices + Patrik Thalin + patrik.thalin&spdevices.com +40706 + AGMF Prévoyance Union de mutuelles soumise au livre II du Code de la Mutualité - Filiale de GPM + Dominique RIBAUTE + dominique.ribaute&gpm.fr +40707 + Aleturo Group + Stefan Unterweger + 91.95744&chiffre.aleturo.com +40708 + Rhythm Engineering, LLC + Steve Penrod + snmp&rhythmtraffic.com +40709 + Jiaheng Medical Technology Co., Ltd. + Dai Songshi + ss5309&yahoo.com +40710 + Grasshopper + Kevin Cormier + kcormier&grasshopper.com +40711 + DSTA S.L. + Jon Noble + jnoble&dsta-sl.com +40712 + DHCPCD Project + Roy Marples + roy&marples.name +40713 + Migame.org + Alex Morgenegg + alex&migame.org +40714 + Accelera Mobile Broadband, Inc. + Aakash Sahai + aakash.sahai&acceleramb.com +40715 + Innerworkings + Justin Baker + jbaker&inwk.com +40716 + Kyle Brantley + Kyle Brantley + kyle&glidegaming.com +40717 + Conde Nast + Erick Cano + TSOITSecurity&condenast.com +40718 + RelySys Technologies India Private Limited + Praveen Kumar Bijadi + praveen&relysys.co.in +40719 + The Lubrizol Corporation + IT Hybrid Data Centers Team + ithybriddatacentersteam&lubrizol.com +40720 + Texas Department of Public Safety + William Berry + william.berry&dps.texas.gov +40721 + FormFactor, Inc. + Seng Ing + sing&formfactor.com +40722 + HUVITZ + Sun Kim + Sun&huvitz.com +40723 + New Zealand Defence Force + Christina Porter, NZDF Site Manager + webmaster&nzdf.mil.nz +40724 + Ubisoft Entertainment + Jason Joseph + jason.joseph&ubisoft.com +40725 + Intune Networks Limited + Lewis Hanly + Lewis.Hanly&intunenetworks.com +40726 + CIPHRON GmbH + Oliver Skibbe + os&ciphron.de +40727 + Cosium + Cyril Bailly + netadmin&cosium.com +40728 + Falck Danmark A/S + Claus Canzella + inetadmin&falck.dk +40729 + LAND-DATA GmbH + Dennis Röhrs + iut-hotline&landdata.de +40730 + Neocom Software Ltd. + Anton Galushkin + a.galushkin&trbonet.com +40731 + Elephant Talk Communications Corp. + Thomas Bhatia + thomas.bhatia&elephanttalk.com +40732 + IEEE 2030.5 Working Group + Robby Simpson + robby.simpson&ieee.org +40733 + Sabzfaam ICT + Ahmadreza Khaleghi + khaleghi&sabzfaam.com +40734 + Karina Mobile Solutions + Siavash Safi + siavash&karinaco.ir +40735 + DoctuSoft Ltd. + Tibor Czimer + tibor.czimer&doctusoft.com +40736 + Audeo, Inc. + Justin Baugh + justin.baugh&audeocloud.com +40737 + National Labor Relations Board + Mark Jeweler + mark.jeweler&nlrb.gov +40738 + The Children's Institute of Pittsburgh + Sharon L. Dorogy + sdo&the-institute.org +40739 + Affirmative Insurance Holdings, Inc. + Fabian Schramke + fabian.schramke&affirmativeinsurance.com +40740 + DirectOut GmbH + Claudio Becker-Foss + claudio.becker-foss&directout.eu +40741 + Flying Horse Productions, LLC + Mark A. Munza + commander&flying-horses.com +40742 + PDI Ninth House + Chris Mulcahy + chris.mulcahy&pdinh.com +40743 + Reissmann IT-Services + Sven Reissmann + info&reissmann.it +40744 + Center for Translational Molecular Medicine (CTMM TraIT) + Erik Roelofs + erik.roelofs&maastro.nl +40745 + WiValley, Inc + Brian Foucher + NOC&wivalley.com +40746 + GOZUBUYUKOGLU + Arda GOZUBUYUKOGLU + arda&gozubuyukoglu.com +40747 + Radiant Logic, Inc. + Claude Samuelson + csam&radiantlogic.com +40748 + IONODES Inc. + Eric Tasso + etasso&ionodes.com +40749 + Corfire (SK C&C USA) Inc. + Byungkwon Jeon + byungkwonjeon&corfire.com +40750 + Synrc Research Center + Maksym Sokhatskyi + maxim&synrc.com +40751 + Homemail - Ross Johnson + Ross Johnson + ross&homemail.org +40752 + Barricane Technology Ltd. + Chris Dew + iana-pen&barricane.com +40753 + Beijing Time Antaeus Media Technology Co.,Ltd + houhongji + u3email&yahoo.com.cn +40754 + Liikennevirasto TVT + Jyrki Jarvinen + jyrki.jarvinen&ely-keskus.fi +40755 + Acticom + Jaroslav Beneš + acticom&acticom.cz +40756 + K.W.Doggett Fine Paper + Ben Grech + bgrech&kwdoggett.com.au +40757 + MOSCOW COMMERCIAL BANK MOSCOMPRIVATBANK ZAO + Alexey Pavlov + aleksej.pavlov&privatbank.ru +40758 + Eltek Polska + Jaroslaw Gawin + jaroslaw.gawin&eltek.com.pl +40759 + DKV MOBILITY SERVICES BUSINESS CENTER GmbH + Co. KG (formerly 'EGRIMA BuisnessCenter') + Karsten Seifert + FM-DMB-PKI&dkv-mobility.com +40760 + TOMRA Systems ASA + Erik R. Hersløv + erik.reinhardt.herslov&tomra.no +40761 + Mideye AB + Ulf Schuberth + support&mideye.com +40762 + Hitec Electric b.v. + Gert-Jan Dorenbos + pen&hitecelectric.com +40763 + Kennedy University Hospitals, Inc. + Thomas J. Balcavage + t.balcavage&kennedyhealth.org +40764 + Resonate Insights + Adam Glenn + adam.glenn&resonateinsights.com +40765 + hSenid Software (Singapore) PVT Limited + Jason Jebanesan + jason&hsenid.com +40766 + Xenode Co. Ltd. + Nikolay V. Krasko + n.krasko&xenode.ru +40767 + Reporting Estándar S.L. + Ignacio Hernandez-Ros + ignacio&reportingstandard.com +40768 + Hotwords Tecnologia + Bento Loewenstein + tech&hotwords.com.br +40769 + VendScreen + Cory Johannsen + cory.johannsen&vendscreen.com +40770 + Partheas + Jeroen Serpieters + jeroen.serpieters&partheas.com +40771 + VISUAPPS GmbH + Armin Kanitsar + office&visuapps.com +40772 + SuperMedia, LLC + Colin Cheline + colin.cheline&supermedia.com +40773 + The Prosecutor General`s Office of Ukraine + Dmytro Chub + chub_ds&gp.gov.ua +40774 + NagSNMP + Remi Broemeling + remi&broemeling.org +40775 + TripodWorks CO.,LTD. + SHOJI Takeshi + t.shoji&tripodw.jp +40776 + Kalyan Kadiyala + Kalyana Kadiyala + kckadiyala&gmail.com +40777 + Qin Technology SpA + Jaime Vasquez Acuña + jaime&qin.cl +40778 + Unassigned + Removed 2012-11-12 + ---none--- +40779 + 2reallife + IT Department (Mazitov Farid, Motylev Dmitriy, Ozerov Vailiy) + noc&2reallife.com +40780 + Smartpipe Solutions + Adrian Enache + adrian.enache&smartpipesolutions.com +40781 + Cynaptica + Roger Cocks + rogerc&cynaptica.net +40782 + Dyne System Co., Ltd + Do Woong Park + adams&dynesys.co.kr +40783 + icoMetrix + Dirk Loeckx + Dirk.Loeckx&icometrix.com +40784 + Kumahira Co., Ltd. + Daisuke Chanohara + da-chanohara&kumahira-safe.co.jp +40785 + Optro Co.,Ltd + Park InSoo + greatpis&optro.co.kr +40786 + Unassigned + Removed 2012-11-12 + ---none--- +40787 + Compumatica secure networks + Guido Frohn + guido.frohn&compumatica.com +40788 + Abegglen Management Consultants AG + Brunner Rita + itcontract&abegglen.com +40789 + Logix + Michal Ludvig + mludvig&logix.net.nz +40790 + Simple Solution Technologies LLC + David Goldberg + godberg&gmail.com +40791 + Beijing OMATE Digital Technology CO.,Ltd + Weixun Meng + mwx&omate.com.cn +40792 + Express-Interfracht Internationale Spedition GmbH + Martin Haschek + martin.haschek&railcargo.at +40793 + Andelskassen JAK + Martin Kjær Jørgensen + admin&ajak.dk +40794 + IG Metall Vorstandsverwaltung + Alexander Stahl + alexander.stahl&igmetall.de +40795 + otaku-realm.net + Olivier Lody + olivier.lody&otaku-realm.net +40796 + ICBPI S.p.A. + Enrico Galoni + enrico.galoni&icbpi.it +40797 + Sidion + Alexander Klimuk + alexander.klimuk&sidion.de +40798 + 3D-P + Stephan Marcotte, P.Eng. + stephanemarcotte&3d-p.com +40799 + Swiss International Air Lines Ltd. + Georges Mathis + U638067&dlh.de +40800 + Smart Refill i Helsingborg AB + Michael Jörgensen + michael.jorgensen&smartrefill.se +40801 + FiveCo + Antoine Gardiol + antoine.gardiol&fiveco.ch +40802 + Indra Navia as + Eldar Firing + eldar.firing&indra.no +40803 + netRapid GmbH & Co. KG + Eugen Staab + eugen.staab&netrapid.de +40804 + ULTEO + David PHAM-VAN + d.pham.van&ulteo.com +40805 + Nimbus Directory Services + Vladimir Dzhuvinov + vladimir&nimbusds.com +40806 + ICBTECH d.o.o. + Aleksandar Pejic + aleksandar.pejic&icbtech.rs +40807 + semyon.org + Semyon Chaichenets + IODadmin&semyon.org +40808 + Wi-Fi Alliance + Greg Ennis + technical&wi-fi.org +40809 + Tohoku Gakuin University + Nozomu Hino + regi&staff.tohoku-gakuin.ac.jp +40810 + BUPT-GUOAN Broadband Network Technology Co.Ltd + Junwei Ren + rjw&bupt.edu.cn +40811 + WebSistem + Ahmet Polat + ahmetpolat&web-sistem.com +40812 + Die Firma GmbH + Jann Heider + it&diefirma.de +40813 + Agenturadmin.de + Jann Heider + info&agenturadmin.de +40814 + Garden City Hospital + Linda Bell + LBell&gchosp.org +40815 + Dr. Thalmair + Dr. Tobias Thalmair + 884256247&s320403455.online.de +40816 + Ecodigi Tecnologia e Serviços Ltda. + Fernando Gago Gomes + fgomes&ecodigi.com.br +40817 + Dr. Martin Froehlich + Dr. Martin Froehlich + xdrcft&gmx.net +40818 + CYG SUNRI CO.,LTD + LiuFeng + liufeng_gx&hotmail.com +40819 + Provident Solutions LLC + James Nelson + jmn&provident-solutions.com +40820 + OpenIPMI + Trenton Corey Minyard + minyard&acm.org +40821 + Public Protector South Africa + Ignatius Makgoka + ignatiusm&pprotect.org +40822 + Hagedorn Informationssysteme GmbH + Michal Mlynar + michal.mlynar&hagedorn-infosysteme.de +40823 + NVable Limited + Bob McChesney + bob.mcchesney&nvable.com +40824 + Qore Technologies, sro + David Nichols + oid-admin&qoretechnologies.com +40825 + VeriTeknik + Cem Karaca + ckaraca&veriteknik.com +40826 + Short Films 4 U Limited + Andy Spooner + andy.spooner&shortfilms4u.com +40827 + Lifelan Technology Co., LTD. + Chang Hsu Hua + hua&lifelan.com.tw +40828 + CHIeru Co., Ltd. + takayuki toge + toge&chieru.co.jp +40829 + Cambridge Industries Group (CIG) + Jet Liu + zliu&ci-g.com +40830 + Qiaoy.info + Yi Qiao + qiaoy&qiaoy.info +40831 + Domenikss SIA + Eduards Čaikovskis + Eduards.caikovskis&domenikss.lv +40832 + MicroNova AG + Hannes Oberländer + Hannes.Oberlaenderµnova.de +40833 + Hytec Electronics Ltd + Mike Bacon + mike.bacon&hytec-electronics.co.uk +40834 + Kübler IT + Benjamin Kübler + b.kuebler&kuebler-it.de +40835 + Department of Microelectronics and Computer Science, Lodz University of Technology + Piotr Perek + pperek&dmcs.pl +40836 + Blackbird Group, Inc. + Dave Ruginski + d.ruginski&blackbird-group.com +40837 + iTech Comércio Importação e Exportação de Componentes Eletrônicos LTDA + Douglas Malvar Ribas + douglas&grupoitech.com.br +40838 + Commune de Niort + BROUARD Yohann + yohann.brouard&mairie-niort.fr +40839 + SARDA South Wales + Chris Daw + chris.daw.pen&sardasouthwales.org.uk +40840 + Rainydayz + Andy Ruddock + admin&rainydayz.org +40841 + n-Systems GmbH & Co. KG + Bernhard Grün + bernhard.gruen&n-systems.de +40842 + A10 Networks + Madhusudhan Vadde + mvadde&a10networks.com +40843 + Chatmongers, LLC + Shaun Kruger + skruger&chatmongers.com +40844 + MAC&C LLC + Matuzko Andrey + macandc&macandc.ru +40845 + Veris Industries + Aaron Parker + support&veris.com +40846 + Endurance Services (formerly 'Montpelier Technical Resources Ltd') + Hugh Kelley + hugh.kelley&montpeliertr.com +40847 + Académie de Versailles + Cellule confiance numérique et sécurité + rssi&ac-versailles.fr +40848 + VeriFyle, Inc. + Stephen Pierce + spierce&verifyle.com +40849 + Ceiec Electric Technology Inc. + Dachuan Liu + liudachuan&ceiec-electric.com +40850 + Kongsberg Spacetec AS + Sverre Moe + sverre&spacetec.no +40851 + IPContact Software + Jorge Merlino + jorge.merlino&ipcontact.com.uy +40852 + Main Line Health Inc. + James A Robinson + robinsonj&mlhs.org +40853 + ATOSS CSD Software GmbH + Andreas Blochberger + ablochberger&atoss-csd.de +40854 + WH Ireland Limited + Richard Pugh + richard.pugh&wh-ireland.co.uk +40855 + University of Pittsburgh + Jeff White + jaw171&pitt.edu +40856 + Tofino Security + Oliver Kleineberg + oliver.kleineberg&belden.com +40857 + Mira Soft + Gokhan Toplar + gokhan&mira-soft.com +40858 + ModemTec, spol. s r. o. + Lešek Franek + lesek.franek&modemtec.cz +40859 + VOSGELIS + COURROY Régis + rcourroy&vosgelis.fr +40860 + Protocom Technology + Mohamad Charafeddine + support&protocomtech.com.au +40861 + Silent Softwares Pvt. Ltd. + Patel Jigneshkumar Prakashbai + admin&silentcomponents.com +40862 + Systema Technologies SA + Mike Chatzipetros + michalis&systema.gr +40863 + UP-nxt + Maarten Bynens + maarten.bynens&up-nxt.com +40864 + Larcan Inc + Mr William E Meechem + bmeechem&larcan.com +40865 + Majorpower Corporation + Samuel Norman + snorman&majorpower.com +40866 + Arcusys Oy + Jouni Hiltunen + jouni.hiltunen&arcusys.fi +40867 + Boardroom Pty Limited + David Watson + pen&boardroomlimited.com.au +40868 + Genew Technologies Co.,Ltd + mingtao yang + yangmt&genew.com.cn +40869 + TAIWAN-CA Inc. + Robin Lin + robin.lin&twca.com.tw +40870 + Closed Joint Stock Company Interfax + Vladimir Timofeev + noc&interfax.ru +40871 + poweroasis + Pete Bishop + Pete.Bishop&poweroasis.com +40872 + Pardazeshgaran Saman Banking Solutions + Bijan Hoomand + hoomand&samanpr.com +40873 + Semafone Limited + Sam Harry + techsupport&semafone.com +40874 + LL Control Solutions International (PTY) Ltd + Lukas Hendriks + lukas&llcsi.com +40875 + OfficeCore + Dan Avni + danavni&officecore.com +40876 + Pressens Fællesindkøb + Martin Kjeldsen + mk&bluepipe.dk +40877 + myprocurement + Quentin Ambard + qambard&myprocurement.fr +40878 + Venere Net Srl + Giuseppe Lavagetto + vsysadm&expedia.com +40879 + VERION TEKNOLOJI A.S. + BULENT KAPTAN + bulent.kaptan&verion.com.tr +40880 + DBI Software, Inc. + Ward Fry + support&dbisoftware.com +40881 + Mutualink Inc + Charles Wegrzyn + cwegrzyn&mutualink.net +40882 + Step One AS + Nils Magnus Englund + nme&stepone.no +40883 + Beijing CangLang TV Technologies Co., Ltd. + Tom Yu + yutao&canglangtv.com +40884 + Instituto Federal Fluminense - IFF + Andre Cunha + acunha&iff.edu.br +40885 + ELCUS + Lukowkin Andrey + mail&elcus.ru +40886 + Kommunales Rechenzentrum Minden-Ravensberg/Lippe + Andreas Ollenburg + a.ollenburg&krz.de +40887 + Edeka Minden-Hannover IT-/logistic service GmbH + Sven Linscheid + idm.admin&minden.edeka.de +40888 + Universitaetsklinikum Tuebingen + Oliver Warda + oliver.warda&med.uni-tuebingen.de +40889 + Global Invacom Ltd + Leigh Coombs + leigh.coombs&globalinvacom.com +40890 + Hardomo + Diego Mejia + ingenieria&hardomo.com +40891 + RDC, Inc. dba LynTec + Dan Nguyen + dan&lyntec.com +40892 + abimus + Patrik Karisch + office&abimus.com +40893 + University of the Arts Helsinki + Juha Nyholm + oid-admin&uniarts.fi +40894 + Cory-Net + Craig Cory + craig&cory-net.com +40895 + VRVis Zentrum für Virtual Reality und Visualisierung Forschungs-GmbH + Adi Kriegisch, Georg Muelleder + oid&vrvis.at +40896 + Pearson Technology + David Ferguson + david.ferguson&pearson.com +40897 + C.S. Veritas + Maarten Abbink + coda&veritas.nl +40898 + Cirries Technologies Inc. + Rick Aguirre + rick.aguirre&cirries.com +40899 + Evo Group Technologies, Inc. + Kenneth P. Hough + kenneth&egtech.us +40900 + Cybersmart Ltd + Shaun Courtney + noc&cybersmart.co.za +40901 + Courtney.org.za + Shaun Courtney + shaun&courtney.org.za +40902 + TransLink - South Coast British Columbia Transportation Authority + Benson Chin + Benson.Chin&translink.ca +40903 + Thongfforong.cyf + Geraint Jenkin + geraint.jenkin&gmail.com +40904 + Brisbane City Council + Russell McGregor + Russell.McGregor&brisbane.qld.gov.au +40905 + Altronix Corporation + Jonathan Sohnis + jsohnis&altronix.com +40906 + Divitel Development Lda + Diogo de Andrade + diogo.andrade&divitel.com +40907 + OOO Adicom + Dmitry Sokolov + dsokolov&adicom.ru +40908 + Autoritatea Naţională pentru Administrare şi Reglementare în Comunicaţii - ANCOM + Puiu Lucian Chitu + puiu.chitu&ancom.org.ro +40909 + BigBrother Security Systems + Kalle Jansen + kj&bigbrother.nl +40910 + Bradley University + Michael Whitlow + mwhitlow&fsmail.bradley.edu +40911 + Freedompay, Inc. + Stephen Oberholtzer + stephen.oberholtzer&freedompay.com +40912 + Kulcs-Soft Nyrt. + Zsolt Eperjesi + eperjesizs&kulcs-soft.hu +40913 + QEM Software Ltd. + Timo Koski + info&qem.fi +40914 + Symanitron + Natalia Neverova + neverova&symanitron.ru +40915 + WANdisco, Inc + Ian Mordey + ian.mordey&wandisco.com +40916 + Berico Technologies + Richard Clayton + rclayton&bericotechnologies.com +40917 + Lender Processing Service + Scott Manning + scott.manning&lpsvcs.com +40918 + ResponseTap Limited + Marius Gaubas + marius.gaubas&responsetap.com +40919 + Vnomics + Kenneth A. Barlow + kbarlow&vnomicscorp.com +40920 + Icahn School of Medicine at Mount Sinai + Jonathon Anderson + jonathon.anderson&mssm.edu +40921 + MikroM Mikroelektronik für Multimedia GmbH + Holger Krahn + it&mikrom.com +40922 + Telydata Cía. Ltda. + Alejandro Augusto Andrade Mafla + aandrade&telydata.net +40923 + Isle of Capri Casinos, Inc. + Montez Fitzpatrick + montez.fitzpatrick&islecorp.com +40924 + aYaline + Bouziane Fourka + bfourka&ayaline.com +40925 + Trenkwlader Solutions, s.r.o. + Miguel Lambrecht + m.lambrecht&trenkwalder.com +40926 + Majic + Branko Majic + branko&majic.rs +40927 + SID Solutions Inc. + Shinsuke Nishiyama + shinsuke.nishiyama&sid-sol.com +40928 + Esprit Digital Ltd + James Brenner + james&espritdigital.com +40929 + Barrett Consulting Group Pty Ltd + Jobst Schmalenbach + hostmaster&barrett.com.au +40930 + MAGREX Co. Ltd. + kazuo imoto + devinfo&magrex.co.jp +40931 + Victorian Electoral Commission + Shripad Joshi + shripad.joshi&vec.vic.gov.au +40932 + SuccWare (Beijing) Software System Co., Ltd + Wang Edward + wgch72&163.com +40933 + QiZhi Technologies + LiQiang Xu + support&shterm.com +40934 + LIG Nex1 + YounYeoul Lee + leeyy777&lignex1.com +40935 + LTD "MedScann" + Igor Proskurjakov + strong2005&mail.ru +40936 + St. Elisabeth Convent + Constantin Piskounov + kpiskounov&obitel-minsk.by +40937 + Axelprod GSM Transmission + Luis Morales + webmaster&axelprod.ch +40938 + MobileIron Inc. + Mansu Kim + mansukim&mobileiron.com +40939 + Alphonso + Ravi Sarma + admin&alphonso.tv +40940 + ROYAL NEW ZEALAND PLUNKET SOCIETY INCORPORATED + Mike Kavanagh + IT.Helpdesk&plunket.org.nz +40941 + Tamara Elektronik Ltd.Sti. + Yiğit YÜCE + yigit&tamara.com.tr +40942 + logic-base GmbH + Andreas Beyer + andreas.beyer&4sellers.de +40943 + XHONIA + Samuel N'Diang + information&xhonia.com +40944 + Bitlomat LLC + David Coleman + alessandro&bitlomat.com +40945 + Oak Solucoes em Informatica LTDA EPP + Leonardo Pignataro + leonardop&oaks.com.br +40946 + Yurii Moskovets + Yurii Moskovets + kontact&moskovets.com +40947 + Metromatics Pty Ltd + Damien Cahill + dcahill&metromatics.com.au +40948 + BiTMICRO Networks, Inc. + Chris Mostrales + cmostrales&bitmicro.com +40949 + Smart Grid Billing, Inc + Morten Lundberg + ml&smartgridbilling.com +40950 + JSC Stock Company OZNA + Ildar Akhmedov + admin&ozna.ru +40951 + uAnywhere + Ryan Verner + pen.admin&uanywhere.com.au +40952 + S3 ID Ltd + Support + support.dg&S3-ID.com +40953 + Unassigned + Removed 2013-01-11 + ---none--- +40954 + Ivar Jacobson International AB + Magnus Hyllander + magnus.hyllander&ivarjacobson.com +40955 + Campbell County Hospital District + Paul Thomson + paul.thomson&ccmh.net +40956 + SEAKR Engineering Inc + James Davis + James.Davis&seakr.com +40957 + Dexa Systems, Inc + Glen Mullen + ghmullen&dexasystems.com +40958 + Geneity Ltd + Andre Esser + andre.esser&geneity.co.uk +40959 + Optical Zonu Corporation + Meir Bartur + meir&opticalzonu.com +40960 + Witelcom AS + Fredrik Eriksen + info&witelcom.com +40961 + Axxes + Jérôme CALLY + jerome.cally&axxes.fr +40962 + SDL - Language Weaver + Dan Waddell + dwaddell&sdl.com +40963 + Jacques Ledoux + Jacques Ledoux + jcq&ledx.com +40964 + Stone Fifteen Design Group + Martin Cribbins + mcribbins&stonefifteen.com +40965 + Louisiana State Board of Medical Examiners + Alan W. Phillips + aphillips&lsbme.la.gov +40966 + Ronald.ORG + Ronald Kuehn + rk&ronald.org +40967 + SVIAT Ltd. + Aliaksej Zharko + admin&sviat.by +40968 + Nirvanix, Inc. + Bob Bawn + bob.bawn&nirvanix.com +40969 + Tobila Systems, Inc. + Atsushi Akita + akita&tobila.jp +40970 + LRD23 Consulting LLC + Christopher A Lowde + lowdeca&lrd23consulting.com +40971 + Audioptic Trade Services + GAUBERT Alexandre + alexandre.gaubert&audioptic.fr +40972 + SFR + Pedro Gasalho + pedro.gasalho&sfr.com +40973 + Freeside Atlanta + Alan Fay + admin&freesideatlanta.org +40974 + Kim Johnsson + Kim Johnsson + kimjohnsson&gmail.com +40975 + Applied Communication Sciences + Aileen Cheng + acheng&appcomsci.com +40976 + Videotrec Industrial Co. Ltd. + zhuyouming + zhuyouming1988&gmail.com +40977 + Inter-M + Wonho, Lee + whlee&inter-m.com +40978 + AZElectronic + Antoine DUMONT + antoine.dumont.az&gmail.com +40979 + Newell Rubbermaid - DYMO Corp. + Rui Meng + rui.meng&dymo.com +40980 + creatale GmbH + Sebastian Gepperth + iana&creatale.de +40981 + Facebook, Inc. + Neal Poole + iana-assign&fb.com +40982 + ROARING FORK SCHOOL DISTRICT + David McGavock + trcheshire&rfsd.k12.co.us +40983 + Trinity Solutions, Inc. + Sachiyuki Fujimura + fujimura&3nity-sol.com +40984 + Fastback Networks + Dan Kiewlich + dan.kiewlich&fastbacknetworks.com +40985 + Clearview Systems, LLC + Jason Bean + admin&clearviewsystems.com +40986 + JVL Ventures / Isis Mobile Commerce + Houston Hopkins + houston.hopkins&paywithisis.com +40987 + Quality Software Works + Ivan Montoro + iana&qswks.com +40988 + Justin Obernier + Justin Obernier + justin.obernier&gmail.com +40989 + GLsun Science and Tech Co.Ltd + pujinhuang + Glsnmp&ymail.com +40990 + Domain Name Services + Ed Pascoe + ed&dnservices.co.za +40991 + tedox KG + Torge Gipp + torge.gipp&tedox.de +40992 + vijay. Pvt. Ltd + Vijay Rajah + vijayrajah&gmail.com +40993 + HD Vest Inc. + Ken Vetter + ken.vetter&hdvest.com +40994 + LLC "AIS Gorod" + Roman Vavilov + tenno&aisgorod.ru +40995 + DLG Automacao Industrial Ltda + Ronaldo Tomazeli Duarte + ronaldo&dlg.com.br +40996 + SilverRail Technologies, Inc. + Will Phillipson + will&silverrailtech.com +40997 + Armonti Digital Services + Armin Paffrath + paffrath&armonti.de +40998 + DomaCom Pty Ltd + John Bird + john.bird&domacom.com.au +40999 + Ribose Inc + Ronald Tse + ronald.tse&ribose.com +41000 + mykarte.com + Ryuji Ito + ryujii&mykarte.com +41001 + Cuattro, LLC. + Matthew Probasco + mprobasco&cuattro.com +41002 + Biotest AG + Joachim Lorenz + joachim_lorenz&biotest.de +41003 + Workers' Compensation Board - Alberta + Wallace Wang + wallace.wang&wcb.ab.ca +41004 + Clackamas County + Julio Cabrera + jcabrera&clackamas.us +41005 + IntelliBatt, Inc. + Doug Sheppard + doug&intellibatt.com +41006 + Ineluctable Designs + Shane Spinuzzi + shane&ineluctabledesigns.com +41007 + IMBC GmbH + Frank Dornheim + frank.dornheim&imbc.de +41008 + EMTS Telecom Services Ltd. + Erik Melki + melki&012.net.il +41009 + Info-M Ltd. + András Czihó + cziho&info-m.hu +41010 + FlyingVoice Technology Ltd. + liuyangxin + liuyangxin&flyingvoice.com +41011 + LanPro Inc. + Andreas Loffler + lofflera&lanpro.com +41012 + tmakinen.com + Tapio Makinen + sysadmin&tmakinen.com +41013 + Miyagi University of Education + UGAWA, Yoshihiro + center&ipc.miyakyo-u.ac.jp +41014 + Attam Ltd + Wayne Marrison + wayne.marrison&attam.co.uk +41015 + Intelligenza + Wayne Marrison + wayne.marrison&intelligenza.co.uk +41016 + Héonium SARL + Christophe CRIER + christophe.crier&heonium.com +41017 + EDV-Studio Stephan Konheiser + Stephan Konheiser + s.konheiser&edv-studio.de +41018 + WaldiNetwork Ltd. Home + Ing. Walter Hoehlhubmer + waldinetwork.home&liwest.at +41019 + Axians + Diego Nuevo + tech.admin&axians.es +41020 + GetOnline Ltd + Karl Pielorz + iana-pen&getonline.co.uk +41021 + Gensicke + Sebastian Gensicke + sg2004&gensicke.de +41022 + van Rens IT Consultancy + Erwin van Rens + info&vanrens.nl +41023 + BAITS Global + Georges Haddad + georges.haddad&baitsglobal.com +41024 + BG Unfallklinik Murnau + Erwin Kinateder + erwin.kinateder&bgu-murnau.de +41025 + Lorillard Tobacco Company + Stephen Norat + stephen.norat&capgemini.com +41026 + DealerTrack + Ronald Buglione + ronald.buglione&dealertrack.com +41027 + Direct Line Insurance Group PLC + Ashish Surti + ashish.a.surti&directlinegroup.co.uk +41028 + Delphix Corp + Peng Dai + peng.dai&delphix.com +41029 + The Orvis Company Inc. + Armand Boudreau + Boudreaua&orvis.com +41030 + JAI Inc. + Avinash V. Uppuluri + au&jai.com +41031 + Axeos Services B.V. + Max van Biezen + mvanbiez&axeos.nl +41032 + VeriCom AB + Fredrik Nilsson + fredrik&vericom.se +41033 + Amor Group + Matthew Wicks + matthew.wicks&amorgroup.com +41034 + Systems With Intelligence Inc. + Fausto Della Rovere + fausto.dellarovere&systemswithintelligence.com +41035 + TM Systems, LLC + Anna Homyakova + tms&tm-sys.ru +41036 + Corero Network Security + Scott Barvick + scott.barvick&corero.com +41037 + Edgepoint IT Services + Thomas Schweizer-Bolzonello + eusupport&edgepoint.fr +41038 + Gridcore AB + Richard Säfström + richard&gridcore.se +41039 + adidas AG + Eric Gunter + eric.gunter&adidas-group.com +41040 + Scientific & Research Center Epsilon + Aleksei Lukin + alukin&ntc-epsilon.com +41041 + Fairpersonal GmbH + Bernhard Hess + bernhard.hess&fairpersonal.de +41042 + tinNet - Lukas Barth + Lukas Barth + mail&tinloaf.de +41043 + Radius, LLC + Michael Lopez + mlopez&radiustoday.com +41044 + SohnTech Solutions, LLC. + Stephen Sohn + ssohn&sohntechsolutions.com +41045 + Rover Apps, LLC + Jeff Garbers + jgarbers&roverapps.com +41046 + Driven Technical Solutions + Jonque Washington + jonque&driventechnical.com +41047 + Sestek Communications Inc. + Bill Lin + bill&sestek.com.tw +41048 + NETIFY + Olivier LE CAM + olc&netify.fr +41049 + Erudio Solutions + Luca Filipozzi + noc&erudio.ca +41050 + Raspmon + Luke Niland + beakersoft&gmail.com +41051 + MBIA Inc. + Jake Kim + jake.kim&mbia.com +41052 + IntraFind Software AG + Bernhard Messer + bernhard.messer&intrafind.de +41053 + musicute project - Rouven Raudzus + Rouven Raudzus + Rouven.Raudzus&gmx.de +41054 + Dinamo Networks + Enilton Antonio do Nascimento Junior + eniltonj&dinamonetworks.com +41055 + Esquisse Software Studio + Alexander Konnen + alex&pikkatech.eu +41056 + telco Management & Service GmbH + Bernhard Hess + bernhard.hess&telco-ms.de +41057 + FANCYRABBIT.ORG + Huaping Huang + sysadmin&fancyrabbit.org +41058 + Loggly Inc. + Philip O'Toole + philip&loggly.com +41059 + Petromoc + Boris Wiethoff + borisw&petromoc.co.mz +41060 + M-D Technology + Haixing Li + aner.li&md-technologies.com +41061 + SOLUTI Certificacao Digital + Reinaldo Borges + reinaldo.borges&acsoluti.com.br +41062 + NuScale Power LLC + Chris Bates + cbates&nuscalepower.com +41063 + Asynchrony, Inc. + Nate McKie + nate.mckie&asynchrony.com +41064 + CheckMySystems Ltd. + Darren Rewston + darren&checkmysystems.com +41065 + Alea Iacta Est + Mark Rogaski + admin&aie-guild.org +41066 + Shaanxi Tians Information & Technology Co.,Ltd. + Xu aiming + xuaiming&tianstech.com +41067 + Quantitative Risk Research, S.L. + Raul Wegmann + it&qrr.es +41068 + Shenzhen RHC technology Co.,Ltd + zhou li hua + zhoulihua&runhc.com +41069 + insystem + Oleg Morozov + byom&insystem.ru +41070 + WhereGroup Gmbh & Co. KG + Karim Malhas + karim.malhas&wheregroup.com +41071 + Servicios Electrónicos Universitarios, S.A.S. + Xavier Vila + xvila&firmaprofesional.com +41072 + Domogik + Maikel Punie + maikel.punie&gmail.com +41073 + Openreach + Mike Williamson + mike.williamson&openreach.co.uk +41074 + voestalpine group-IT GmbH + Thomas Schwarz + thomas.schwarz&voestalpine.com +41075 + Edge S.A. + Anibal Acosta + anibal.acosta&edge.com.py +41076 + The Probe Project Ltd + Jan-Erik Eriksson + info&probeproject.tv +41077 + iOWA AB + Anders Wittfeldt + anders.wittfeldt&iowa.se +41078 + Apteka Alpi Farm + Dragan Momcilovic + dragan_momcilovic&alpifarm.mk +41079 + autocrash.info + Alexander Kuznetsov + acca&cpan.org +41080 + IT-EASY Berlin + Sylvio Schilling + sylvio.schilling&it-easy.biz +41081 + LLC Okey + Prokhorov Sergey + sergey.prokhorov&okmarket.ru +41082 + Altarix-Samara + Vladimir Sidorov + vladimir.sidorov&altarix.ru +41083 + Shanghai Ruiyuan Information Technologies Co.,Ltd. + qianpingkang + qianpingkang&ibs-tech.com.cn +41084 + ALTELL Ltd. + Alexander Burov + burov&altell.ru +41085 + Isida-Informatica, Ltd + Denis Kachurin + master&isida.by +41086 + Trafikselskabet Movia + Lars Panton + lbp&moviatrafik.dk +41087 + Automation NV/SA + Kris Clottemans + kris.clottemans&automation.be +41088 + TCUBE + Pascal Dietsche + tcube&laposte.net +41089 + Neptune Internet Service + Arturo Montes + iana&neptune.fr +41090 + Shanghai HEADING Information Engineering Co., Ltd. + Li Ximing + liximing&hd123.com +41091 + Libra Srl + Paolo Frizzi + paolo&libra.it +41092 + Dynamic Systems, Inc. + Travis Castor + travis.castor&dynamicsystemsinc.com +41093 + Bareos GmbH & Co. KG + Marco van Wieringen + marco.van.wieringen&bareos.com +41094 + iS5 Communications Inc. + Gary Boardman + garyboardman&is5com.com +41095 + Evrisko Systems + Paul Buenger + pbuenger&evriskosys.com +41096 + Metadosis GP + Antonios Kanouras + iana&metadosis.gr +41097 + Magnus LTD + Fyodor Ustinov + ufm&magnus.net.ua +41098 + ESPEC CORP. + Masahiko Watanabe + m-watanabe&espec.co.jp +41099 + COMPTA - EQUIPAMENTOS E SERVIÇOS DE INFORMÁTICA S.A. + JORGE ESTEVES + jorge.esteves&compta.pt +41100 + Fon Wireless LTD + Gonzalo Becares + gonzalo.becares&fon.com +41101 + Simply Business + James Condron + james.condron&simplybusiness.co.uk +41102 + NS Solutions Corporation + Katsuhisa Inagaki + inagaki.katsuhisa&ns-sol.co.jp +41103 + Global Radio Services Limited + Richard Carpenter + noc&as39202.net +41104 + Transway Ltd. + Koby Michelsohn + kobym&transway.co.il +41105 + Lognet Billing LTD + Rost Bitterlikh + rost.bitterlikh&lognet-systems.com +41106 + Institute of Photonics and Electronics AS CR, v.v.i. + Martin Havlicek + it&ufe.cz +41107 + Transact Technologies Inc + Donald E. Brooks + dbrooks&transact-tech.com +41108 + Mill Software + Nick Bonnett + nick.bonnett&millsoftware.com.au +41109 + TriVu Media + Michael Sullivan + mike&trivu.tv +41110 + Good Dog Design + James Brooks + james&gooddogdesign.com +41111 + 66 VISION TECH CO.,LTD + Zuoping Tan + tech&66vision.com +41112 + Ubiquiti Networks, Inc. + Kestutis Barkauskas + keba&ubnt.com +41113 + Barnfind Technologies AS + Stian Sebastian Skjelstad + stian&barnfind.no +41114 + S2 Factory, Inc. + Jun Kuriyama + iana-pen&s2factory.co.jp +41115 + Glarner Kantonalbank + Luchsinger Ralf + isc&glkb.ch +41116 + DEBES ENGINEERING + Klaus Debes + info&debes-engineering.de +41117 + Richard Wolf GmbH + Muhammed-Ali Findik + muhammed-ali.findik&richard-wolf.com +41118 + Stadt Duisburg + Marc Stermann + marc.stermann&duit.de +41119 + 3 Stack Technologies + James Waite + james&3stackcloud.com +41120 + CMosaix + Daniel Batkilin + danielzeev&gmail.com +41121 + Terminal plus, LLC + Rudolf Zolotov + zrr&tdrusintel.ru +41122 + Pindrop Security + Lamar Willis + lamar&pindropsecurity.com +41123 + Sumo Logic, Inc. + Yongxing Wang + yongxing&sumologic.com +41124 + KfW Bankengruppe + Erid Guga + pki&kfw.de +41125 + Mystic Video, Inc. + Som Ghosh + som&mysticvideo.net +41126 + SFERA + Alexey Klinov + alexey.klinov&sfera-co.ru +41127 + Linux Router Systems + Lucas Nishimura + lucas.nishimura&gmail.com +41128 + Knorst Consulting LTDA + Alexandre Eduardo Knorst + knorst&gmail.com +41129 + WideNet + Leonardo Rizzi + l.rizzi&wide-net.org +41130 + Mint Medical GmbH + Johannes Kast + support&mint-medical.de +41131 + LUGO TERMINAL SPA + Paolo Righini + ict&lugoterminal.com +41132 + Västra Götalandsregionen + Christer Nygren + christer.nygren&vgregion.se +41133 + Alinean, Inc. + AZ Flynn Thomas + ops&alinean.com +41134 + Ronal AG + Dirk Wacker + itsupport&ronal.ch +41135 + Salsbury Engineering, Inc. + Jonathan Gauthier + jon&salsburyeng.com +41136 + Ryan Spinuzzi + Ryan Spinuzzi + ryan&ryanspinuzzi.com +41137 + Autotoll Limited + Frank Chan + help.it&autotoll.com.hk +41138 + 10gen, Inc. + Akshay Kumar + akshay&10gen.com +41139 + NextGenTel AS + Eivind Olsen + hostmaster&nextgentel.com +41140 + nplawes + Nick Lawes + iana&nplawes.com +41141 + Southeast Health + Tammy Goldsmith + tgoldsmith&sehealth.org +41142 + Instant Solutions LTDA + Pedro Howat Rodrigues + pedro&instant.com.br +41143 + Goodloe Consulting Group, LLC + Jason Goodloe + jason.goodloe&goodloegroup.com +41144 + Initra d.o.o. + Goran Cunjak + goran.cunjak&initra.com +41145 + Industrial Control Communications, Inc + Felickao Yang + fyang&iccdesigns.com +41146 + VertiCloud + Tucker Dewitt + ops&verticloud.com +41147 + The Southampton Hospital Association + William Bifulco + wbifulco&southamptonhospital.org +41148 + Binary Software Indy, llc + Kevin Glinski + kevin&binarysoftwareindy.com +41149 + Shandong New Beiyang Information Technology Co., Ltd. (SNBC) + Chen Qiuyi + chenqiuyi&newbeiyang.com +41150 + Acelis SARL + Aurélien Geromboux + aurelien&acelis.net +41151 + dev.ncds.eu + Dominik Sułkowski + dominik&ncds.eu +41152 + Cyfrowy Polsat SA + Przemyslaw Dlugosz + pdlugosz&cyfrowypolsat.pl +41153 + Status Nobilis SA + Fabio Scaccabarozzi + fs&statusnobilis.com +41154 + trojniak.net + Rafal Trojniak + pen.iana.org&trojniak.net +41155 + Oberlin College + Barron Hulver + Barron.Hulver&oberlin.edu +41156 + Solmate Group + Hans Donner + support&solmategroup.com +41157 + JEFFREY WALTER HEISEY + Jeffrey Heisey + jeff_heisey&hotmail.com +41158 + Special Services Division, NCISS + Colonel Ewen + NCISSDirectorate&gmail.com +41159 + Media Group LLC + Igor Blinov + supergarikk&gmail.com +41160 + Copper Systems Ltd + Paul Wearne + paul.wearne&copper-systems.co.uk +41161 + Bentsen Electronics + Morten Bentsen + spam&bentsen.org +41162 + Software Daten Service + Wolfgang Brandl + wolfgang.brandl&sds.at +41163 + INVAP S.E. + Pérez Ghiglia Santiago Alfonso + sperez&invap.com.ar +41164 + Tulsa Technology Center + Ryan Dahlgren + mib&tulsatech.edu +41165 + SKYERA Inc + Suresh Dussa + sdussa&skyera.com +41166 + streibelt.net + Florian Streibelt + iana-pen&streibelt.net +41167 + Pomona Valley Hospital Medical Center + Cris Tran + cris.tran&pvhmc.org +41168 + Excelitas + Thomas Osbrink + thomas.osbrink&excelitas.com +41169 + Scorpio IT + Christian Wittmer + info&scorpio-it.net +41170 + NOVXTEL + Emmanuel CHAUVEAU + e.chauveau&novxtel.com +41171 + MECC CO., LTD. + Masaya Suzuki + m-suzuki&mecc.co.jp +41172 + HEC Paris + Marcelo Moreira + sysrxadm&hec.fr +41173 + Dream Chip Technologies GmbH + Stefan Stürke + mibs&dreamchip.de +41174 + Mount Vernon Hospital Physics (UK) + Gerry Lowe + gerry.lowe&nhs.net +41175 + Aartesys AG + Andreas Grossenbacher + andreas.grossenbacher&aartesys.ch +41176 + Energy X Systems Ltd. + H. R. Coles + ianaoid.1&energy-x.com +41177 + Ocean Interactive (Beijing) Technology Co. Ltd + Shaolin Zheng + zhengshaolin&omusic.cc +41178 + Proftal Oy + Vesa Haimi + contact&proftal.com +41179 + DirectTrust.org Inc. + Scott Rea + Scott.Rea&DirectTrust.org +41180 + Process Query Systems, LLC + Vincent Berk + vberk&proquesys.com +41181 + conversis technologies GmbH + Lukas Loesche + l.loesche&conversis.de +41182 + Digital Imaging Lab. + Shunsaku Otsuka + otsuka&diljapan.com +41183 + Contim Automação de Sistemas Ltda + Thiago Ragozo Contim + thiago&contim.eng.br +41184 + FreedomVoice Systems + Jeremiah Gowdy + jeremiah.gowdy&freedomvoice.com +41185 + Norwia AS + Baard Nossum + bnossum&norwia.no +41186 + QuantuMatriX Technologies + Matthew Schultz + matt&qmxtech.com +41187 + Video Tech Laboratories + HIroshi Fujimoto + fujimoto&videotech.co.jp +41188 + University of California Hastings College of the Law + Ronald Proschan + proschan&uchastings.edu +41189 + Sika Informationssysteme AG + Oliver von Wartburg + vonwartburg.oliver&ch.sika.com +41190 + Invensys Rail + Fernando Javier Navarro Quiles + fernando.navarro&invensys.com +41191 + Educastream + Nicolas Le Manchet + nicolas.lemanchet&educastream.com +41192 + VICTORY Standards Support Office + Trent Styrcula + trent.styrcula&us.army.mil +41193 + TELETOR, LLC + Alexandr Suranov + support&teletor.ru +41194 + Piratenpartei Deutschland Landesverband Saarland + Jan Niklas Fingerle + jan.niklas.fingerle&piratenpartei-saarland.de +41195 + Imecon Engineering srl + Luigi Guido + l.guido&imecon.it +41196 + SimpliVity Corporation + Owen Carr + owen.carr&simplivity.com +41197 + Epicard SA + Nicolas Deurin + deurin&epicard.ch +41198 + Ryan J. Geyer + Ryan J. Geyer + me&ryangeyer.com +41199 + OPTEYA SAS + Yann Droneaud + ydroneaud&opteya.com +41200 + Vision Microsystems Co.,ltd. + Junius Liu + Junius.liu&visionmc.com +41201 + ZeroSpace ICT Services B.V. + F.J. Bogman + info&zerospace.nl +41202 + Intigua Inc. + Oran Epelbaum + oran.epelbaum&intigua.com +41203 + Swiss Bankers Prepaid Services AG + Eigenmann Marc + it&swissbankers.ch +41204 + AVASAD + Colin Edwards + colin.edwards&avasad.ch +41205 + Colégio Técnico Industrial de Santa Maria (CTISM) + Douglas Santos Bregolin + dosbre&ctism.ufsm.br +41206 + Travelex Limited + Deepak Pasi + deepak.pasi&travelex.com +41207 + Yospace Technologies Ltd + Adrian Hungate + systems&yospace.com +41208 + Gorodissky & Partners, Ltd. + Alexander Bordachenkov + BordachenkovA&gorodissky.ru +41209 + Qpay Inc. + Rami Leshem + Rami.Leshem&qpaynet.com +41210 + Santa Fe College + John Caldwell + john.caldwell&sfcollege.edu +41211 + CherryRoad Technologies Inc. + Thomas Slattery + tslattery&cherryroad.com +41212 + Providigm + Erinn Looney-Triggs + erinn&providigm.com +41213 + bitagentur GmbH & Co. KG + Maximilian Eidenschink + max&bitagentur.de +41214 + Cloud Concepts + Jurgen Van de Perre + jurgen&cloudconcepts.be +41215 + INFOMARK CO., LTD. + HYUNCHUL JUNG + harry.jung&infomark.co.kr +41216 + Monitise Group Limited + Grant Mitchell + grant.mitchell&monitise.com +41217 + Xcision Medical Systems, LLC + Xiaofeng Zhao + xiaofeng.zhao&xcision.com +41218 + Vera Networks, LLC + Samuel D Ware + sam.ware&veranetworks.com +41219 + Lytzen IT A/S + Christian Ejlertsen + ce&lytzenit.dk +41220 + PS-IT-Consulting + Patrick Szameitat + info&ps-it-consulting.eu +41221 + IMDIT GmbH + Marc Bergner + info&imdit.de +41222 + Jnsys + Jay Kim + jay&jnsys.co.kr +41223 + DCT DELTA AG (formerly 'ASC-TEC AG') + Wojciech Eliasz + w.eliasz&dct-delta.de +41224 + MobileVaults, Inc. (formerly 'CloudVaults, LLC') + Gyle Iverson + Gyle&MobileVaults.com +41225 + JSC Volga + Alexander Kollegaev + kaj&volga-paper.ru +41226 + Ennetix + Jonathan Symons + jon&ennetix.com +41227 + Voice Conferencing Systems for Misson Control Centers (vocsmcc) + Markus Toepfer + openmail&vocsMCC.com +41228 + CoreMeda + Eray Tavukcu + etavukcu&gmail.com +41229 + Friulia SPA + Massimiliano Viola + massimiliano.viola&friulia.it +41230 + Usharesoft + german parente + iana&usharesoft.com +41231 + IntelliMagic B.V. + Gilbert Houtekamer + administratormail&intellimagic.net +41232 + ZFSoft + Francesco Zerra + francesco.zerra&gmail.com +41233 + HubSpot, Inc. + Jason Webber + domain-groups&hubspot.com +41234 + RNelson Consulting + Robert Nelson + iana&rnelson.ca +41235 + Schalter Eletrônica + Peter Gabriel + peter&schalter.com.br +41236 + Telepoint Global Hosting Services, LLC. + Russell Bierschbach + rbierschbach&telepointglobal.com +41237 + Great Lakes Data Systems Inc. + Saulius Vabalas + savas&glds.com +41238 + Shane Spinuzzi + Shane Spinuzzi + shane&shanespinuzzi.com +41239 + Charles County Goverment + Matthew Goddard + goddardm&charlescountymd.gov +41240 + Mijares Consultoría y Sistemas SL + Alberto Mijares + amijares&mcs.com.ve +41241 + ARC Document Solutions, Inc + Chaitanya Garlapati + chaitanya&e-arc.com +41242 + Shanghai PPLive Media Tech. Co.,Ltd + Wang kun + morhaimeren&pptv.com +41243 + Desoma GmbH + Christoph Knott + ck&desoma.de +41244 + ABAK Systèmes + Jean Luc Levionois + levionnois&abaksystemes.fr +41245 + Sadara Internet + Dharmacari Sadara + admin&sadarainternet.com +41246 + 2600hz + Karl Anderson + sales&2600hz.com +41247 + Rakennusliitto ry + Jukka Kylliäinen + jukka.kylliainen&canon.fi +41248 + IETFNG.org + Nathaniel Filardo + GEMSE3TED2LEIPE02M01LVAPATUNTPVN&cmx.ietfng.org +41249 + Silent Circle LLC + Louis Kowolowski + louisk&silentcircle.com +41250 + Globosat Programadora LTDA + Marcelo Rezende Módolo + modolo&globosat.com.br +41251 + Simple Software Solutions + John Battersby + johnbatty&ssspgh.com +41252 + Wicked Software + Tony Roy + tonyroy&wcksoft.com +41253 + Millry Telephone Co + Gene Brown + gene&millry.com +41254 + Wroclaw School of Applied Informatics "Horyzont" + Jarek Szablowski + admin&horyzont.eu +41255 + Maui Systems Ltd + Marty Lee + contact&maui-systems.co.uk +41256 + NormanStudios + Fabien Charbonnier + systeme&normanstudios.fr +41257 + SignOn + James Burton + james.burton&signon.org.uk +41258 + Jason R. Gonsalves + Jason R. Gonsalves + jason&gonsalves.ws +41259 + Lullabot, Inc. + Tim McDorman + admin&lullabot.com +41260 + Wheatstone Corporation + Dominic Giambo + dominic.giambo&wheatstone.com +41261 + Vectra Networks Inc. (formerly 'TraceVector') + Greg Rocha + support&vectra.ai +41262 + Institut National de Physique Nucléaire et de Physique des Particules + Jean-Pierre Cachemiche + cachemi&cppm.in2p3.fr +41263 + Nutanix Inc. + Binny Gill + binny&nutanix.com +41264 + Kent and Essex Police IT Directorate + Mark Williams + mark.williams&kent.pnn.police.uk +41265 + infOpen + Alexandre Chaussier + alexandre.chaussier&infopen.pro +41266 + Igor Posledov + Igor Posledov + i.posledov&gmail.com +41267 + Benbro Electronics Pty Ltd + John Bennett + pben&ozemail.com.au +41268 + Elsicom Ltd. + Roman Burdin + roman&elsicom.ru +41269 + Dr. V.M.Ponzoni ICT Outsourcing + Vittorio Massimo Ponzoni + ponzoni&linux.it +41270 + Vibicom Communications Inc. + John Martin + john&vibicom.com +41271 + Ondot Systems, Inc. + Kannan Konath + kannan&ondotsystems.com +41272 + Cascade Microtech, Inc. + Paul Curtis + paul.curtis&cmicro.com +41273 + Lakehead University + Timo Miettinen + tomietti&lakeheadu.ca +41274 + Bluwan S.A + Cedric Gaston-Bellegarde + cbellegarde&bluwan.com +41275 + Kapsch Telematic Services Sp. z o.o. + Arkadiusz Sobota + arkadiusz.sobota&kapsch.net +41276 + BCi Limited + Pierre Drezet + pierre.drezet&bci.eu.com +41277 + Computer System Corp. + KEIICHI TAMURA + tamura&computer-system.jp +41278 + Stateless Networks + Rob Meadows + rob&statelessnetworks.com +41279 + Danish Broadcasting Corporation + Stefan B. Christensen + sebc&dr.dk +41280 + Neul Limited + Andrew Dawson + Andrew.Dawson&neul.com +41281 + tols.org + Marco van Tol + snmp&tols.org +41282 + SpeechStorm Ltd. + Michael Orr + iana&speechstorm.com +41283 + IEX Group, LLC + PEN Administrator + pen_admin&iextrading.com +41284 + Celestech, Inc + Skip Penny + skip.penny&celestech.com +41285 + Clemex Technologies Inc + Pierre-Steve Girard + psgirard&clemex.com +41286 + The Libreswan Project + Tuomo Soini + team&libreswan.org +41287 + Red Bull GmbH + Michael Gauss + michael.gauss&at.redbull.com +41288 + J.D. Irving, Limited, IT Division + Kevin McDonough + McDonough.Kevin&jdirving.com +41289 + Lanaccess Telecom + Xavier Oliva + xavier.oliva&lanaccess.es +41290 + o3, d.o.o. + Gorazd Rogelj + gorazd&ooo.si +41291 + LEO Pharma A/S + Kurt Bernhardt + kurt.bernhardt&leo-pharma.com +41292 + LSU Health System + John Mire + john.mire&lsuhs.edu +41293 + Taylor Innovations, LLC + Charles Taylor + taylorc&tinnov.net +41294 + Datafrog + Chen Shuai + chenshuai&data-frog.com +41295 + Andalusia Group + Shaaban Refay + Shaaban.aly&andalusiagroup.net +41296 + E-Gate Communications Inc. + Gary Giesen + ggiesen&egate.net +41297 + Nextech Co.,Ltd + Tatsumi Ueda + t-ueda&nextech.co.jp +41298 + BRGFrauengasse + Stefan Hagmann + webmaster&bgbaden-frauen.ac.at +41299 + Infodom d.o.o. + Damir Kus + infodom-sw&infodom.hr +41300 + Deganius + Christof Thalhofer + info°anius.de +41301 + CSF Scientific Computing Core + Andras Aszodi + andras.aszodi&csf.ac.at +41302 + Unified Technology Ltd + Phil Robinson + philrobinson&unified-technology.co.uk +41303 + Six Degrees Managed Data Ltd + Timothy Arnold + timothy.arnold&6dg.co.uk +41304 + QiNet SRL + Marco Poet + marco.poet&qinet.it +41305 + Gehirn-Mag.Net (formerly 'Schoch-IT UG') + Steffen Schoch + steffen.schoch&schochit.de +41306 + Columbus College of Art & Design + Jeremiah Shifflet + Licensing&ccad.edu +41307 + LDS Technology Group + DeYung Le + deyung&ldstg.com +41308 + ALCO Aircraft + John Hemphill + jhemphill&alco.aero +41309 + Inter Dimensional Space Port + HRH James Rook + royaladmin&inter-dimensional-space-port.net +41310 + dwApi, Ltd. + Dennis Warner + dennislv&gmail.com +41311 + Missouri Western State University + Fred Nesslage + nesslage&missouriwestern.edu +41312 + Netscreens LTD. + Itay Pollak + itay.p&netscreens.tv +41313 + OLTRI LLC + Dmitry Kuzmin + dmitry.kuzmin&oltri.ru +41314 + A.E.Q. Aplicaciones Electrónicas Quasar + Miguel Sancho + msancho&aeq.es +41315 + Recording Enterprise Solution (R.E.S.) + Mr. Gabriele Bovo + gbovo&resnet.it +41316 + Velocimetrics Ltd + Bill Nokes + bill.nokes&velocimetrics.com +41317 + DAC Beachcroft LLP + Tom Bowyer + tbowyer&dacbeachcroft.com +41318 + Adaptavist + Dan Hardiker + dhardiker&adaptavist.com +41319 + Netbuilder S.r.l. + Andrea Soracchi + info&netbuilder.it +41320 + Diagnosoft + Donel Tani + donel.tani&diagnosoft.com +41321 + Commsky Technologies(HangZhou)Co.,Ltd. + Kong XiaoHai + skong&commsky.com +41322 + Time-O-Matic, Inc. + Carl Roth + carl.roth&timeomatic.com +41323 + Reid Hospital & Health Care Services + Tim Love + Tim.Love&ReidHospital.org +41324 + Centre de services partagés du Québec + Guy Painchaud + guy.painchaud&cspq.gouv.qc.ca +41325 + Uber Technologies, Inc. + Prakash Gopinadham + gprakash&uber.com +41326 + Dangerous Linux + Douglas Danger Manley + doug.manley&gmail.com +41327 + Radiology.io, Inc + Cody Ebberson + cody&radiology.io +41328 + blackned GmbH + Fred Schulz + fschulz&blackned.de +41329 + Soncatec Oy + Tuomas Harju + tuomas.harju&soncatec.com +41330 + Eurotoll + Alexis de Warren + alexis.dewarren&eurotoll.fr +41331 + NxtGrid Ltd + Ioannis Vlachos + info&nxtgrid.com +41332 + Fourth Watch Business Continuity Services LC + Lawrence D Landis + ldl&linux.com +41333 + ShenZhen Great Electronic Technology Co., LTD + John Xie + xie&greatele.com +41334 + Feldhaus - Uhlenbrock Sicherheit &Technik GmbH + Franz Josef Feldhaus + feldhaus&feldhaus-uhlenbrock.de +41335 + AMPEG GmbH + Peter Graf + info&eg.de +41336 + UEL - Universidade Estadual de Londrina + Fernando Favero + favero&uel.br +41337 + WingSystem Rst. inc. + Takashi Kurashima + kurashima&wing21.co.jp +41338 + Imagination Team + Larry Stein + larry.stein&imaginationteam.net +41339 + Duons + Dominique Poirier + Dominique.Poirier&duons.com +41340 + Friedrich Miescher Institute for Biomedical Research + Dean Flanders + dean.flanders&fmi.ch +41341 + Knobbe, Martens, Olson & Bear, LLP. + Howard Goble + hpg&kmob.com +41342 + IMAIOS SAS + Denis HOA + contact&imaios.com +41343 + Qihong Computer Science & Technology Co. Ltd + Shusheng Heng + simon&qihong.com.cn +41344 + FirmWin software technology Co. Ltd + LINJIE LI + li_lj&firmwin.com +41345 + Nectar Services Corporation + Zachary D Rowitsch + zrowitsch&nectarcorp.com +41346 + Certiwise + Benoit Malchrowicz + admin&certiwise.com +41347 + Comsys Bärtsch AG + Christoph Sigrist + cs&cbmail.ch +41348 + Ronyo + Jiří Hrivňák + hrivnak&ronyo.cz +41349 + Enigmedia + Carlos Tomas + info&enigmedia.eu +41350 + ITManx Ltd + Christian Salway + ccsalway&itmanx.com +41351 + FJM Security Solutions, LLC + Don Knopf + DonK.SS&Frontier.com +41352 + Paychex, Inc. + David Muench + dmuench&paychex.com +41353 + Waukegan Public Schools + David Weate + dweate&wps60.org +41354 + Parker Poe Adams and Bernstein, LLP + Tony Brock + netops&parkerpoe.com +41355 + Optic Cloud Technology Co,.Ltd + Li Yue + gr.route&gmail.com +41356 + Tufts Clinical and Translational Science Institute + Corey Zelinski + czelinski&tuftsmedicalcenter.org +41357 + Groupe Clarins + Philippe Rousselin + oid.admin&clarins.net +41358 + JSC «AMB Bank» + Ruslan Zarvanskiy + rzarvansky&ambbank.ru +41359 + Triax AS + Assil Chehade + ach&triax.dk +41360 + Prodrive Technologies Group B.V. + Information Technology + iana-pen-inquiries&prodrive-technologies.com +41361 + KYOS SA + Maxime Feroul + it&kyos.ch +41362 + Transport Department, HKSARG + Lai Man WONG + SE-IT-TTSD&td.gov.hk +41363 + Naunet Corporation + Michael Trausch + mbt&naunetcorp.com +41364 + MIRACLE Information Systems GmbH + Harald Plank + plank&miracle.at +41365 + Avon and Wiltshire Mental Health Partnership NHS Trust + Paul Monteith + paulmonteith&nhs.net +41366 + Citelum + Maxime Morelon + dsi.infra&citelum.com +41367 + A.T.WORKS, Inc. + Michiaki Abe + develop&atworks.co.jp +41368 + Nilsoft Janko Debenjak s.p. + Janko Debenjak + janko.debenjak&gmail.com +41369 + Burri IT Systems + Chris Burri + chris.burri&burrit.ch +41370 + JHome Consulting, Ltd. + Jose A. Dominguez + jad&jhome.com +41371 + 2ndQuadrant + Craig Ringer + craig&2ndquadrant.com +41372 + ConVista Consulting AG + Manuel Bermedo + support&convista.com +41373 + Seawind Sud America + Patrick McColl + pmccoll&rms.co.cl +41374 + Fachschaftsrat Elektrotechnik an der TU Dresden + Sebastian Wienforth + sebaw&fsret.de +41375 + Switching Power Inc. + David Kravitz + davek&switchpwr.com +41376 + Hivecast Corporation + Joe Abley + jabley&hivecast.net +41377 + IONHT CO.,LTD + Mingrui Qu + qumingrui&ionht.com +41378 + Rodnik SPE, Inc. + Alexey Noskov + alexey&rodnik.ru +41379 + Miros AS + Hein Gustavsen + hein&miros.no +41380 + KORUS Consulting SNG Ltd + Alex Zaharow + Alex-Zaharow&yandex.ru +41381 + CJSC NEC Neva Communications Systems + Sergey Leshkin + s.leshkin&nec.ru +41382 + Fundacion Social Camara Chilena de la Construccion + Juan M. Contreras + jcontreras&fundacioncchc.cl +41383 + DURR Systems GmbH + PFS R&D, Marius Schommer + ES_Software&durr.com +41384 + Zakład Usług Informatycznych OTAGO Sp. z o.o. + Rafał Voss + rafal.voss&otago.pl +41385 + Deep Sea Electronics plc + Scott Preece + scott.preece&dsedevelopment.com +41386 + SYBORG Informationssysteme b.h. OHG + Robert Lander + snmp.iana&syborg.de +41387 + Nest Labs + John A. Vink + jav&nestlabs.com +41388 + IRIS Analytics GmbH + Nikolaus D. Bayer + admin&iris.de +41389 + Smarsh Inc + Brent Jones + bjones&smarsh.com +41390 + Suzhou Anke Medical System Co., LTD + Weihua Cai + weihuacai&tom.com +41391 + OOO TFPK + Andrey Balakin + avb&smarket.ua +41392 + Livesport s.r.o. + Robert Vojcik + robert.vojcik&livesport.eu +41393 + Oh Leck! + Jan Niklas Fingerle + jan.niklas&fingerle.org +41394 + quarxConnect + Bernd Holzmüller + operations&quarxconnect.de +41395 + Codiac GmbH + Jan Moritz Meyer + jmm&codiac.de +41396 + V-Nova Ltd + Sam Littlewood + systems&v-nova.com +41397 + ARAG SE + Joachim Meier + joachim.meier&arag.de +41398 + Micaela Gasper LMT + Micaela Gasper + micaelagasper&gmail.com +41399 + Groupe Dynamite Inc. + JF. Gauthier + jgauthier&dynamite.ca +41400 + ECCEL CORP + Asdrubal Espitia + asdrubal&eccel.co +41401 + Northwest Savings Bank + Nathan Lindberg + nlindberg&nwbcorp.com +41402 + NMS Guru, Inc + Daniel L. Needles + guru&nmsguru.com +41403 + Apcera, Inc. + Phil Pennock + noc&apcera.com +41404 + Rayan Roshd Electronic + Alireza Taghavi + taghavi&rayroshd.com +41405 + FrontRange Solutions + Frank Tadman + frank.tadman&frontrange.com +41406 + focom limited + Andy Miller + andy.miller&focom.com +41407 + Commune de Blonay + Jean-Marc Guex + vgt&blonay.ch +41408 + Strix d.o.o. + Darija Tadin-Đurović, dipl. ing + dtadin&strix.hr +41409 + CDJEM + Chris Brouwer + lt.brouwer&cdjem.nl +41410 + NTS Netzwerk Telekom Service AG + Evelyn Riha + evelyn.riha&nts.eu +41411 + ThinkSkink S.A. de C.V + Valentin Petre + vali&thinkskink.com +41412 + Innovative Technical Decisions LTD + Lev Vanyan + lev.vanyan&xcs.com.ua +41413 + Xetawave, LLC + Robert Campbell + robert&xetawave.com +41414 + Systeme-U (GIE Iris) + Kowalski Cédric + supervision&systeme-u.fr +41415 + Lucile Packard Children's Hospital at Stanford + Sreedhar Madullapalli + Smadullapalli&lpch.org +41416 + Futura Retail Solution AG + Klaus Pittig + klaus.pittig&futura4retail.com +41417 + rosemann software GmbH + Carsten Rosemann + info&rosemann-software.de +41418 + Toopher, Inc + Drew Shafer + drew&toopher.com +41419 + Schlenkermann + Philipp Schlenkermann + iana&schlenkermann.de +41420 + Comunicación y Tecnología Araos LTDA + Alvaro Araos + alvaro&araos.cl +41421 + Arizona Engineered Products LLC + Chuck Amy + camy&azep.us +41422 + Vello Systems Inc. + Sudhir Dhankhar + sudhir&vellosystems.com +41423 + Horasphere Inc. + Lou Simard + lou.simard&horasphere.com +41424 + National Computer Board + Vyankoj Mulloo + vmulloo&goc.gov.mu +41425 + iQsim + Thierry Sudre + sudre&iqsim.com +41426 + Flexlab Ltd. + Wagan Sarukhanov + wagan&flexlab.ru +41427 + Benbria + Peter Menhart + pmenhart&benbria.com +41428 + Otter Tail Power Company + Thomas Obowa + tobowa&otpco.com +41429 + InCoax Networks AB + Thomas Svensson + thomas.svensson&incoax.com +41430 + New Zealand Ministry of Business, Innovation and Employment + Tony Murray + tony.murray&mbie.govt.nz +41431 + Tahiti Nui Arena + Leonard TAVAE + abel&tna.pf +41432 + Bernhard Czech + Bernhard Czech + info&bernhard-czech.de +41433 + GROWMARK, Inc. + Scott Bross + sbross&growmark.com +41434 + Rolamasao.org + Noel Torres + envite&rolamasao.org +41435 + Smart Associates Limited + Huw Ringer + Huw&smart-associates.biz +41436 + Allwin Telecommunication Co., Ltd. + Zhenghua Du + allwintel&126.com +41437 + Geoffroy Gramaize + Geoffroy Gramaize + geoffroy.iana&gramaize.eu +41438 + HITOTEK Co.,Ltd + Evane Yuan + ht_vanyuan&163.com +41439 + Moscow Psychological and Social University + Alexandr Zapasna + zapasna&mpsu.ru +41440 + Stabilus GmbH + Mark Krämer + mkraemer&stabilus.com +41441 + Tohoku University + Futoshi Arima + arima&bureau.tohoku.ac.jp +41442 + Berger Gunkel IT Systeme GmbH + Andreas Berger + aberger&berger-gunkel.de +41443 + Evotope AS + Reidar Jortveit + post&bitfrost.no +41444 + Syslore Oy + Juha Sadeharju + info&syslore.com +41445 + Broadcast Over cellular + Michel Matsumoto + michel_is_here&yahoo.com +41446 + APEX Medicus UAB + Mindaugas Ziemelis + mindaugas&apex.lt +41447 + INGECOM + Daniel Inaebnit + inaebnit&ingecom.ch +41448 + Electronic Tolling Société Habilitée fournissant un service de Télépéage (toll service provider) Front End + Michael Kraftl + michael.kraftl&siemens.com +41449 + Teranga-Software + Germain PIGEON + germain.pigeon&teranga-software.com +41450 + SheepDip Project + Mike Farnworth + g8jci&users.sourceforge.net +41451 + Candid Color Systems, Inc. + Daryl Simic + daryl&candid.com +41452 + LUISLAIN.COM + Luis Lain + luislain&luislain.com +41453 + Decura IM LLP + Dan Atkinson + itservices&decuragroup.com +41454 + Globig Consulting + Mathias Globig + mathias&globig-consulting.de +41455 + International Radio and Electronics Corporation (formerly 'Chrisso Technologies, LLC') + Don Pettifor + dpettifor&irec1.com +41456 + Tintri Inc. + Rajiv Dharmadhikari + rajiv&tintri.com +41457 + Bauman Moscow State Technical University (BMSTU) + Ostrikov Sergey + ostrikov&bmstu.ru +41458 + Ensoft Ltd + Patrick Smears + patrick.smears&ensoft.co.uk +41459 + Cofely Zuid Nederland BV + Marco Mans + marco.mans&cofely-gdfsuez.nl +41460 + Bundesnotarkammer + Stefan Semmelroggen + itp&bnotk.de +41461 + ELMITEL d.o.o. + Matej Serc + matej.serc&elmitel.com +41462 + Actiontec Electronics Inc. + Brian Henrichs + bhenrichs&actiontec.com +41463 + Air Internet Service Co.,Ltd. + Masahiro Tanaka + tanaka&air.ad.jp +41464 + MAXIOL Ltd. + Max Bagaev + root&maxiol.com +41465 + DiceLock Security, SL + Angel J. Ferré Herrero + aferre&dicelocksecurity.com +41466 + JAKE Pty Ltd + James Komninos + jamesk&jake.com.au +41467 + Shanghai iComhome Co., Ltd. + Wenjie Xu + woixwj&hotmail.com +41468 + Zakład Ubezpieczeń Społecznych + Grzegorz Chybowski + Grzegorz.Chybowski&zus.pl +41469 + Lactalis Danmark + Andreas Sobczyk + andreas.sobczyk&lactalis.dk +41470 + IMAGiNA Visión Artificial S. L. + Santiago Cabello + scabello&eimagina.net +41471 + Basware (formerly 'Basware Belgium') + Etienne Abe + etienne.abe&basware.com +41472 + PKP CARGO S.A. + Biuro Teleinformatyki + sekretariat.cci&pkp-cargo.eu +41473 + Overseas Private Investment Corporation + Jason Figley + ciso&opic.gov +41474 + Pascagoula School District + Brian Camp + bcamp&psd.ms +41475 + Shenzhen Tencent computer system Co., Ltd. + air yao + airyao&tencent.com +41476 + LightSquared + David Ishmael + david.ishmael&lightsquared.com +41477 + Computer Systems Institute + Vadim Golub + tss&csinow.com +41478 + ExtremFarm Ltd. + Simukov Pavel + pan&extrem-f.ru +41479 + Koenig & Bauer AG Werk Bielefeld + Joern Sacher + jsacher&kba-bielefeld.de +41480 + LA DETECTION ELECTRONIQUE FRANCAISE - DEF + Eddy GERVINET + eddy.gervinet&def-online.com +41481 + MedischeGegevens.nl + Dave Franken + d.franken&propredict.nl +41482 + Yubico + Domain Administrator + hostmaster&yubico.com +41483 + 4Safe Advanced Solutions + Florin BARBU + florin.barbu&4safe.ro +41484 + VidOvation - Moving Video Forward + Jim Jachetta + jimj&vidovation.com +41485 + FPX, LLC + Robert Streich + robert.streich&fpx.com +41486 + Sargent & Lundy, L.L.C. + Christine Berns + christine.m.berns&sargentlundy.com +41487 + Ostrovok.ru + Evgeny Kuryshev + mail-root&ostrovok.ru +41488 + City of Naperville + Brian Groth + grothb&naperville.il.us +41489 + Hakwerk IT B.V. + Arjan Hakkesteegt + arjan&hakwerk-it.nl +41490 + SUNGSAM CO., Ltd. + JUNGIN BYUN + jibyun&sungsam.co.kr +41491 + KDDI Web Communications Inc. + Yosuke Abe + tech&kddi-web.com +41492 + Mobtechonline + Alexey Stupnikov + astupnikov&m-t-o.ru +41493 + Bangkok Pacific Steel Co., Ltd. + Sutthisak Liwsakul + admin&bpssteel.com +41494 + Hydac Verwaltung GmbH + Eric Kleinpeter + pkiadmin&hydac.com +41495 + Northgas + Gasov Vladimir + gasov&northgas.ru +41496 + Dedicated Network Partners + Antti Viro + antti.viro&dnwpartners.com +41497 + Optimus S.A. + Ferran Girones + fgirones&optimus.cat +41498 + SCHOTT AG + Marcus Urban + marcus.urban&schott.com +41499 + Orgacomm international S.A. + Matthias Bacher + info&orgacomm.lu +41500 + faboleon bonaparte + faboleon bonaparte + faboleon.bonaparte&outlook.com +41501 + Afghanistan Root Certification Authority (ARCA) + Zmarialai Wafa + zmarialai.wafa&mcit.gov.af +41502 + Balogh SA + Frederic Tahon + webmaster&balogh-rfid.com +41503 + St. Tammany Parish Hospital + Cherie Faucheux + cfaucheux&stph.org +41504 + Les solutions Asentri inc + Denis Samson + denis.samson&asentri.com +41505 + ALLIANCE AVIA + Konstantin Nikolaev + techreports&alavia.ru +41506 + Maniilaq Association + Wayne Hogue + whogue&maniilaq.org +41507 + Cloud21 + Matthew Charles Wicks + matthew.wicks&cloud21.net +41508 + Rest Network + Shoko Ranni + it&RestCloud.net +41509 + SPING + Jan Gerard Snip + j.g.snip&sping.nl +41510 + NAS Recruitment Communications + Eric Brill + se&nasrecruitment.com +41511 + Municipality of Athens + Panagiotis Skarvelis + admin&cityofathens.gr +41512 + DENSAN.CO.,LTD. + YUICHIRO HIRATA + yhirata&densan.co.jp +41513 + Autonomy Systems Limited + Matt Diakun + cmbg-sysadmin&autonomy.com +41514 + LiHAS - Adrian Reyer + Adrian Reyer + iana-contact&lihas.de +41515 + QUADRAC Co. Ltd. + Tatsuo ITABASHI + tatsuo.itabashi&quadrac.co.jp +41516 + SaitecSrl + Lorenzo Salvadori + lorenzo.salvadori&saitecsrl.com +41517 + Eagle Electronics + michael barker + mab.eagle&virgin.net +41518 + Ingram Content Group + Daniel Owen + daniel.owen&ingramcontent.com +41519 + OpenADR Alliance + Massimiliano Pala + m.pala&cablelabs.com +41520 + Krypto-IT + Jakub Juszczakiewicz + jakub.juszczakiewicz&krypto-it.pl +41521 + Zebsys Ltd + Geoff Hall + ghall&zebsys.com +41522 + www.info-x.org + Payvin Nikita + nekit&info-x.org +41523 + Cloudseed + Ian Freislich + ianf&cloudseed.co.za +41524 + Neratec Solutions AG + Andreas Schwarz + andreas.schwarz&neratec.com +41525 + Hubject GmbH + Thomas Thamm + thomas.thamm&hubject.com +41526 + MJP Communications Ltd + Kevin Patterson + Kevin&mjpcommunications.com +41527 + Paperless + Eduardo Troncoso + etroncoso&paperlessla.com +41528 + Dunder Mifflin + William Steiner + bills&bravanet.org +41529 + Zenerji, LLC + Young H. Etheridge + yhe&yheville.net +41530 + USIX Co., Ltd. + Jung Soo Chul + steve&usix.co.kr +41531 + Basic., INC. + Akira Ohta + postmaster&basic.co.jp +41532 + Quick2Wire Limited + Romilly Cocking + romilly&quick2wire.com +41533 + CDS Call Dispatch Scholz GmbH + Michael Weinberger + mw-it&cdsnet.de +41534 + VoiceHost Ltd + Ross Beer + support&voicehost.co.uk +41535 + Cleo Communications US, LLC + Kevin Pearson + IT&cleo.com +41536 + Vix Technology + Andy Avery + andy.avery&vixtechnology.com +41537 + U.P. Joven Club de Computación y Electrónica + Carlos Raúl Laguna Mendoza + carlosr&jovenclub.cu +41538 + DARC e.V. OV Freiburg + Leopold Schabel + darc&leoschabel.de +41539 + RAGged Software + Roy A. Gilmore + rag&ragged-software.com +41540 + Intelligent Software Solutions, Inc. + Michael Auer + michael.auer&issinc.com +41541 + Ardral Co. + Jeremy Willans + admin&ardral.co +41542 + Monnit Corporation + Kelly Lewis + kellyl&monnit.com +41543 + Advanced Power Laboratories + John Detrick + johnd&advancedpowerlaboratories.com +41544 + Fish Eagle Limited + Tony Murray + tony&fish-eagle.net +41545 + Gravity Networks + Reid Nilson + reid.nilson&gmail.com +41546 + Innologica JSC + Yordan Yordanov + yordan.yordanov&innologica.com +41547 + MOVASIM + Leonardo Massazza + leonardo.massazza&movasim.com +41548 + Laimbock Consulting + Patrick Laimbock + oid-admin&laimbock.com +41549 + runtastic GmbH + Gerhard Sulzberger + gerhard.sulzberger&runtastic.com +41550 + Interlogica + Manuel Giorgini + manuel.giorgini&interlogica.it +41551 + Navayo Research Kft. + Gabor Kaszas + snmp&navayo.net +41552 + Compagnie Nationale des Commissaires aux Comptes + Adminsys team / équipe des administrateurs système (Marc-Aurèle DARCHE) + admin&cncc.fr +41553 + Spellpoint Oy + Janne Sirén + info&spellpoint.fi +41554 + ITX Secrurity + Summer Kim + Summer&itxsecurity.com +41555 + ECOIT + Oh JunSeok + ignatius000&gmail.com +41556 + Fexco Merchant Services LTD + Micheal O Shea + mioshea&fexco.com +41557 + AutoUplink Tech + Collin Gilbert + cgilbert&autouplinktech.com +41558 + DigitalSignal + Petri Stenberg + petri&digitalsignal.se +41559 + GRUP SERHS, S.A. + Albert Orriols Cabané + albert.orriols&grupserhs.com +41560 + Landesforsten Rheinland-Pfalz + Tobias Helfenstein + tobias.helfenstein&wald-rlp.de +41561 + Inteligo Financial Services SA + Emil Wolski + emil.wolski&inteligo.pl +41562 + Nexiens + Mohamed Badri + mohamed&nexiens.ma +41563 + BioLink Solutions Ltd. + Ruslan Marin + alexanderi&biolink.ru +41564 + Selectel LLC. + Igor Shestakov + shine&selectel.ru +41565 + Ministry of Taxes of the Republic of Azerbaijan + Eyyub Ibrahimov + Eyyub.Ibrahimov&taxes.gov.az +41566 + amagical.net + Jan Baier + jan.baier&amagical.net +41567 + A B Gensets Inc. + Jonas Bauman + jonas&abgensets.com +41568 + Sokuda Technologies(Beijing), Inc. + cui chunfeng + xiaocui&sokuda.com +41569 + Mobile Interactiva S.L. + Jorge Calavia + jcalavia&mobileinteractiva.com +41570 + 9bit, Borut Mrak s.p. + Borut Mrak + hostmaster&9bit.biz +41571 + Global Linking Solutions + Brandon Phelps + bphelps&gls.com +41572 + BRS Labs + Anthony Akins + Asakins&brslabs.com +41573 + ITRun Consulting Sp. z o.o. + Jacek Nowicki + office&itrun.pl +41574 + UNIÃO BRASILEIRA DOS ESTUDANTES SECUNDARISTAS + Ronaldo Batista Da Silva + rbatista1&ig.com.br +41575 + Modern Woodmen of America + Brian Rathke + brian.rathke&modern-woodmen.org +41576 + Uniao Nacional dos Estudantes + Ronaldo Batista Da Silva + rbatista1&ig.com.br +41577 + Car Connectivity Consortium + Rebecca Goldberg + admin&carconnectivity.org +41578 + MAC IT Solutions + Marco Antonio Carcano + info&macitsolutions.ch +41579 + Teraoka Seiko Co., Ltd. + Mitsuyoshi Madokoro + m.madokoro&digi.jp +41580 + Homer Electric Association, Inc. + Marvin Super + msuper&homerelectric.com +41581 + RB Generalekonomik + Aleksandar Kostic + akostic&geneko.rs +41582 + Maksat Tech Pvt Ltd + Ankush Malhotra + ankush.malhotra&gmail.com +41583 + ZAO NHK + Sergey Afanas'ev + it&nhk.ru +41584 + GCC Ltd. + Roman Yakubov + R.Yakubov&IBPost.ru +41585 + EILEO + Matthieu Jacquier + services&eileo.com +41586 + ENSTEAM Sp. z o.o. (c/o E-FISH Sp. z o.o.) + Tomasz Krakowiak + tomasz.krakowiak&ensteam.com +41587 + HOLLY&Co.,Ltd. + Masafumi Horimoto + horimoto&holly-linux.com +41588 + Shumilov Nikita Sergeevich + Nikita Shumilov + nikita307&icloud.com +41589 + Jolokia + John Mitchell + mitch&jolokianetworks.com +41590 + Wirtschaftsagentur Wien. Ein Fonds der Stadt Wien. + Ruediger Swoboda + administrator&wirtschaftsagentur.at +41591 + Sipi srl + Lorenzo Balliana + lorenzo.balliana&sipi.it +41592 + Evanti + Alik Kalmykov + kalmykov.alik&gmail.com +41593 + Sard Verbinnen & Co + Randal Bridges + it&sardverb.com +41594 + LLC "Ekb-Info" + Andrey Volkov + volkov.am&ekb-info.ru +41595 + Genesis Technical Systems Corp + Stephen Cooke + snmp-admin&genesistechsys.com +41596 + HornersCorners, LLC + Jim Horner + jim&hornerscorners.us +41597 + RFEvolution s.r.l. + Luigi Fazio + lfazio&rfebroadcast.com +41598 + Instituto Federal Catarinense - Campus Concórdia + jonas antunes da silva + jonas.antunes&ifc-concordia.edu.br +41599 + University of the Philippines - Diliman + Byron V. Maniquis + support&upd.edu.ph +41600 + ABDUL KHALEK STORE + RAKIBUL ISLAM KHOCON + KHOCONSUKURVANGGA&GMAIL.COM +41601 + Myriad Group AG + Stephen Kershaw + stephen.kershaw&myriadgroup.com +41602 + Links Global Services, C.A. + Loris Santamaria + loris&lgs.com.ve +41603 + SOMANSA Co., Ltd + Yong Pil Hur + yphuh&somansa.com +41604 + Monitor Electric Joint Stock Company + Alexey Oblog + Aleksey.Oblog&monitel.ru +41605 + Georgia's Own Credit Union + Thomas Stratton + tastratton&georgiasown.org +41606 + centrotherm photovoltaics AG + Martin Schmidt + ctpki&ngmail.eu +41607 + One IP + Ron Arts + ron.arts&oneip.nl +41608 + EFP Rotenberg LLP + Craig Julien + cjulien&efprotenberg.com +41609 + Kjempekjekt AS + Sondre Eikanger Kvalø + iana&kjempekjekt.no +41610 + Aspectra AG + Thomas Sauter + thomas.sauter&aspectra.ch +41611 + Bank Vontobel AG + Pierre-Olivier Mangin + seadmin&vontobel.ch +41612 + Swisscom Energy Solution AG + Bruno Hostalery + bruno.hostalery&be-smart.ch +41613 + Controles S.A. + Alvaro Delacoste + adelacoste&controles.com +41614 + Eagleville Hospital + Richard Mitchell + rmitchell&eagleville.org +41615 + Manhattan College + Robert Moran + software&manhattan.edu +41616 + Weeo Group + Jerome Herman + jerome.herman&weeo.fr +41617 + Sparta Consulting + Ben Tallman + btallman&spartaconsulting.com +41618 + OJSC Bank SGB + Eduard Shcherbakov + ed&severgazbank.ru +41619 + Hospitality Alliance AG + Katharina Störmer + katharina.stoermer&ramada.de +41620 + Eric Lakin + Eric Lakin + elakin&infohell.net +41621 + Ministerio de Gobierno y Reforma del Estado + Lucila Wilde + internet&santafe.gov.ar +41622 + XOR Media, Inc. + Ralph Beck + ralph.beck&xor-media.com +41623 + One Tech Inc. + OID Admin - Murat Kilic + oidadmin&onetechinc.com +41624 + Bell Aliant + Greg Desveaux + gregory.desveaux&bellaliant.ca +41625 + Mended Duck Computer Services + Adam Kittilson + accounts&mendedduck.com +41626 + Geller & Company LLC + Omar Pena + opena&gellerco.com +41627 + Cenwell LTD + Justin Shen + justin&cenwell.com.tw +41628 + Learning Media Ltd + Richard Parratt + richardparratt&learningmedia.co.nz +41629 + Chakra Network Solutions Private Limited + S.Sree hari nagarajan + sree&chakranetwork.com +41630 + SENSEMATIX + Jothi Kumar + jothi&sensematix.com +41631 + Tasneem electronics LLC + Mr. Ayman Narmug + ayman&usg-jo.com +41632 + PT.Bina Buana Raya + Irwan Sulistiyono + irwan&bbr.co.id +41633 + Certivox Ltd. + Stoyan Petkov + system&certivox.com +41634 + EDV Beratung Haag + Peter Haag + Peter&haagmail.de +41635 + NBC Elettronica Group Srl + Mr. Paolo Soldarelli + elettronica&nbc-el.it +41636 + Tolaris.com + Tyler Wagner + admin&tolaris.com +41637 + Talia Ltd + Tyler Wagner + helpdesk&talia.net +41638 + Socus networks + Blake Wu + bwu&socusnetworks.com +41639 + SVSI + Greg Wirth + gwirth&svsiav.com +41640 + Laconisoft LLC + Bret Ewin + R8pBsn1Jfva5TPFQ&laconisoft.com +41641 + Colglazier Clinic + Clifford Colglazier + acolglaz&gpcom.net +41642 + ThinKom Solutions, Inc. + Stuart B. Coppedge + Stuart.Coppedge&thinkom.com +41643 + RCG Creations Limited + Craig Goodenough + craig&rcgcreations.com +41644 + Cinegy GmbH + Jan Weigner + jan&cinegy.com +41645 + Sinogram Technology (Beijing) Co., Ltd. + enhe wu + jealtw&hotmail.com +41646 + QUADROtech Solutions AG + Michel Zehnder + michel.zehnder&quadrotech-it.com +41647 + SpectrAp + Samsonov Konstantin N. + samsonov&spectrap.ru +41648 + Shenzhen SDGI Optical Network Technologies Co,. Ltd. + Zeng Dezhong + zengdz&sdgi.com.cn +41649 + Schnoor Industrieelektronik GmbH & Co. KG + Henry Koplien + entwicklung&schnoor-ins.com +41650 + Engisoft + David Acevedo + d.acevedo&engisoft.com +41651 + Dapesco S.A. + Lorenzo BERNARDI + lbi&dapesco.com +41652 + IdentSign + James Burton + james.burton&identsign.com +41653 + KCG Europe Ltd (formerly 'GETCO Europe Ltd') + Duncan Gibb + dgibb&kcg.com +41654 + E.A.L. Apeldoorn B.V. + Frank Tamminga + frank.tamminga&eal.nl +41655 + CardSmart Technologies + James Wiley + jimwiley&yourcardsolution.com +41656 + 2024Sight INC. + Anton Hofland + anton.hofland&2024sight.com +41657 + Bressner Technology + Dr. Olaf Holthausen + olaf.holthausen&bressner.de +41658 + United Business Media + Michael Buglass + michael.buglass&ubm.com +41659 + Datasat Technologies Ltd + Alex Brown + alex.brown&datasattechnologies.com +41660 + SciVisum Ltd + Deri Jones + snmp.mib&scivisum.co.uk +41661 + SEKAS GmbH + Michael Rosenberger + webmaster&sekas.de +41662 + Capio S:t Görans Sjukhus AB + Nicklas Johannesen + nicklas.johannesen&capiostgoran.se +41663 + Philippe Bonatti + Philippe Bonatti + philippe&bonatti.ch +41664 + Costaneira - Arno Johann S/A Comércio de Material de Construção + Flávio Johann + flavio.johann&costaneira.com.br +41665 + George Flemming LLC + George Flemming + George&gflemming.com +41666 + Government of Yukon + Richard Wieser + registrar&gov.yk.ca +41667 + Park Bench Software + Paul Messias + pmessias&parkbenchsoftware.com +41668 + The Evergreen State College + Dan Scherck + netservices&evergreen.edu +41669 + Spiros Iliadis + Spiros Iliadis + spiros&iliadis.me +41670 + AddOn Holding Gmbh + Bernd Burkhardt + burkhardt&addon.de +41671 + Atelier WW Architekten SIA AG + Klaus Leuders + klaus.leuders&atelier-ww.ch +41672 + TECMASUR CIA. LTDA. + SANTIAGO REINO + sreino&tecmasur.com.ec +41673 + Lapsum + Rafael de la Torre Consuegra + rdelatorre&lapsum.com +41674 + WAS.ch GmbH + Dominik Fässler + info&was.ch +41675 + ArmOwl LAB (Roman V. Kosinskiy) + Roman V. Kosinskiy + armowl&list.ru +41676 + Gordon Broom + Gordon Broom + gjbroom&thebrooms.ca +41677 + Nicholas Brown + Nicholas Brown + nick&anatoll.org +41678 + SIBOAVANCE + Pablo Baena + pbaena&siboavance.com.co +41679 + OVNETAP + Radomir Ochtyra + radomir.ochtyra&ovnetap.com +41680 + Media Netwerk AS + Stig Christian Aske + stigchristian&netwerk.no +41681 + Chudyk Cloud Services + Gerald Chudyk + gerald&chudyk.com +41682 + ASSEMBLY Organizing Oy + Jaakko Vallo + jaakko.vallo&assemblytv.net +41683 + Nebula Media Solutions Ltd. + Peter Jones + peter.jones&nebula-media.co.uk +41684 + Pyzuka + Jeroen Dekkers + iana&pyzuka.nl +41685 + nfotex Informationstechnologie Dienstleistungs GmbH + Raphael Wegmann + wegmann&psi.co.at +41686 + Uzin Utz AG + Andreas Koegel + andreas.koegel&uzin-utz.com +41687 + Cold Crossing + David Klann + dklann&coldcrossing.com +41688 + EOS Sistemi s.r.l. + Tiziano Martelli + tiziano&eos.it +41689 + Thüga Aktiengesellschaft + Christoph Mayer + hostmaster&thuega.de +41690 + Dension Audio Systems + Tibor Fábi + tfabi&dension.com +41691 + Horizon Forest Products, LLP + Jimmy Dixon + jimmy.dixon&horizonforest.com +41692 + Renee Marie Jones + Renee Marie Jones + rmj&renee-jones.com +41693 + Nyingma Association of Mangalam Organizations - Communications And Network Infrastructure + Lee Brown + Administrator&RatnaLing.Org +41694 + Christians Against Poverty + Matt Hardy + it&capuk.org +41695 + Translational Centre for Regenerative Medicine (TRM) + Steffen Loeb + pki&trm.uni-leipzig.de +41696 + Actia Systems España SAU + Jonathan Redondo + jredondo&actiasystems.com +41697 + European Central Bank + Christoph Schaper + dgisopsec&ecb.int +41698 + Lucas-Consulting + Andreas Lucas + andreas&lucas-consulting.net +41699 + LGS Innovations + Bobby Dimmette + bjd&lgsinnovations.com +41700 + eyeReturn Marketing Inc. + Kenneth Voort + kvoort&eyereturn.com +41701 + Jarvis Ford + Craig Williams (IT Manager) + it&jarviscars.com.au +41702 + Levart Distribution Systems Pty. Ltd. + Andrew Simmonds + andrew&levart.com.au +41703 + ATM-12 + Vitaly Bodin + vitbo&atm12.ru +41704 + Neoflow. Co., Ltd. + Daisuke Ota + daisuke&neoflow.jp +41705 + Institutul de Dezvoltare a Societatii Informationale + Andrei Rusu + andrei.rusu&idsi.md +41706 + Trollhattan Stad IT + Magnus Danhage + magnus.danhage&trollhattan.se +41707 + Solbox Inc. + Hyunkyu Kim + arhes&solbox.com +41708 + Superior Access Solutions, Inc. + Dave Werdin + dwerdin&sa-solutions.com +41709 + EAP Technologies, Inc. + Roland Alden or Brenda Cundy + ralden&ralden.com +41710 + ATES Networks + Alexandre Ioachim + aioachim&atesnetworks.com +41711 + Dossia + Steve Allman + steve.allman&dossia.org +41712 + ISTHARI + JOSE FELIX HERNANDEZ BARRIO + jose.hernandez&isthari.com +41713 + Melet.com + John Melet + john&melet.com +41714 + Azuki Systems, Inc. + Keith Bilafer + keith.bilafer&azukisystems.com +41715 + Universal Cinema Services Co., Ltd. + Tang Liguo + lgtang&sina.com +41716 + Rational Retention, LLC + Konstantin Mertsalov + support&rationalretention.com +41717 + Blekinge Institute of Technology + Patrik Arlos + patrik.arlos&bth.se +41718 + Ouroboros + Claude Dehais + cdehais&ouroboros.fr +41719 + Qosmotec GmbH + Axel C. Voigt + acv&qosmotec.com +41720 + Saisei Networks Inc + John Harper + john&saiseinetworks.com +41721 + International Communications Corporation INC + Keith Alexis + keith.alexis&intcomcorp.com +41722 + Information Networks Ltd. + Dmitry G Shin + dmitry.s&hsdn.org +41723 + XGEN - Web Business + Fabiano Cainelli + xgen.snmp&xgen.com.br +41724 + Chengdu SKSpruce Technology Inc. + kai zhang + kaizhang&skspruce.net +41725 + Tere.com Networks OÜ + Toomas Vendelin + iana&vendelin.com +41726 + Dr. Sulaiman Al Habib Medical Group + Irshad Ismail + irshad.ismail&drsulaimanalhabib.com +41727 + Sabatier Geolocalisation + Damien CABROL + damien.cabrol&geolocalisation-sabatier.fr +41728 + athenahealth, Inc. + Ben Whittaker + vendorcontact&athenahealth.com +41729 + United Equitable Group, Ltd. + Matthew Morrow + mmorrow&ueg1.com +41730 + Arab States Research and Education Network, GmbH + Ola Adnan Samara + info&asrenorg.net +41731 + Prodware Deutschland AG + René Büdinger + r.buedinger&prodware.de +41732 + Mathias Haimerl 3S + Mathias Haimerl + webmaster&haimi.de +41733 + Parallel Wireless + Sumit Garg + sgarg¶llelwireless.com +41734 + V10 Networks + Jeff Leung + oid.admin&v10networks.ca +41735 + Teletech Pty. Ltd. + Sean Gallagher + sean&teletech.com.au +41736 + Beatrice Wireko IT + Ottmar Erbes + mail&wireko.com +41737 + Bryan Health + Brad Znamenacek + bradz&bryanhealth.org +41738 + Algo Communication Products Ltd. + Steve Ansell + steve&algosolutions.com +41739 + Five9, Inc. + Darin Fisher + darin.fisher&five9.com +41740 + TheGunns.net + William Gunn + postmaster&thegunns.net +41741 + Ittim Technology Co.Ltd + Zheng Dajun + zhengdj&ittim.cn +41742 + IMEC + Julien Beauviala + julien&imec-archives.com +41743 + eKing Technology Co., Ltd. + Liang Dai + liang.dai&hnair.com +41744 + Z3 Technology + Bruno Marchevsky + brunomarchevsky&z3technology.com +41745 + FrozenWarrior.com + Bret Olmsted + bret&frozenwarrior.com +41746 + Chengdu Kingtype Digital TV Equipment Co., Ltd. + Junyu Xue + ywlizhen&163.com +41747 + Oliver Solutions + David Ganor + david&oliver-solutions.com +41748 + CensorNet Ltd + Dan Searle + dan&censornet.com +41749 + Owens State Community College + Martin Stroud + martin_stroud&owens.edu +41750 + Head Department for Statistics and Information Technologies of State Customs Committee + Aleksandr Voloschuk + aleksandr.voloschuk&customs.gov.az +41751 + ROBIN'S + Radhakrishnan Nagarajan + robinblr.arts&gmail.com +41752 + ZAO "NPK Rotek" + Sergei Shirokov + sergo&rotek.ru +41753 + Greg Waterhouse + Greg Waterhouse + gregw&rgweb.org +41754 + Daniel Sage + Daniel Sage + iana&mail.dsage.org +41755 + Lastline, Inc. + Ludovico Cavedon + cavedon&lastline.com +41756 + REVCORD - Revolutionizing Voice Recording + Vasanth Neelamagam + vasanth&revcord.com +41757 + Prism Systems, Inc. + Alex Lynch + alynch&prismsystems.com +41758 + Axham Corporation + Vijay Varadan + admin&axham.com +41759 + Open Computing Facility + Dara Adib + gm&ocf.berkeley.edu +41760 + TibetSystem Co.,Ltd. + Kwon Tae Un + nadakwon&tibetsystem.com +41761 + Atea Sverige AB + Martin Arnsrud + martin.arnsrud&atea.se +41762 + Mayak LTD + Victor Shachnev + its&slata.com +41763 + JSC «BystroBank» + PEN admins + iana-pen-admins&bystrobank.ru +41764 + ETAT DE FRIBOURG + Simon Ecoffey + simon.ecoffey&fr.ch +41765 + Troyer AG + Michael Saxl + michael.saxl&troyer.it +41766 + Flow-Data, Inc + Keith Thomson + keitht&flow-data.com +41767 + IDSS AB + Claes Wikholm + info&idss.se +41768 + Beijing Telesound Electronics Co., Ltd. + Jiapeng Lu + lujiapeng&telesound.com.cn +41769 + IS4IT GmbH + Lothar Haeger + lothar.haeger&is4it.de +41770 + Ascendi + Bruno Cacheira + bcacheira&ascendi.pt +41771 + Fundamental Games + Kenneth Noland + ken.noland&fundamentalgames.com +41772 + Auth-servers + Warren Kumari + warren&kumari.net +41773 + TelVue Corporation + Jesse Lerman + jlerman&telvue.com +41774 + Health Research, Inc. + Mike Varney + mlv03&healthresearch.org +41775 + Global Solutions Tecnologia da Informação LTDA + Fernando de Oliveira Carvalho + global&glsolutions.com.br +41776 + IACIT Soluções Tecnológicas LTDA + gustavo hissi + gustavo.castro&iacit.com.br +41777 + GSE Environmental, LLC + Jeff Johnson + jjohnson&gseworld.com +41778 + Lake Avenue Congregational Church of Pasadena + Emil Tulcan + infosys&lakeave.org +41779 + QualityLogic Inc. + Kai Zuber + sysadmin&qualitylogic.com +41780 + Telesphere Networks Ltd + Larry Low + llow&telesphere.com +41781 + Digby Wells Environmental + Clint Naude + clint.naude&digbywells.com +41782 + Pacific Design Enginering + Bill Moats + moats&pde.com +41783 + Geolink Satellite Services + Julien Leroux + julien.leroux&geolink.com +41784 + ifbyphone, Inc. + Marcus Lewin + mlewin&ifbyphone.com +41785 + Mary Washington Healthcare + Douglas Hanback + douglas.hanback&mwhc.com +41786 + Ryu project + Isaku Yamahata + yamahata&valinux.co.jp +41787 + HyperMatrix Solutions Ltd. + Hiren Shah + admin&hypermatrix.com +41788 + kuederli.net + Urs P. Kuederli + urs&kuederli.com +41789 + AlTaysir for Information Systems Security Consulting LLC + Aladdin Dandis + aladdin.d&secrivacy.com +41790 + I-CONCEPT + TASSOS FRAGOS + t.fragos&i-concept.gr +41791 + eSPe + Karsten Pohle + k.pohle&gmx.net +41792 + Alrayyan for media and marketing co + Mahmoud Albatarni + mahmoud.albatarni&alrayyan.qa +41793 + North East Independent School District + Brian Harrell + bharre&neisd.net +41794 + High Point PC Solutions + Daniel T. Harrison + highpointpc2&gmail.com +41795 + Net Consulting UK Ltd + Glenn Morgan + glenn.morgan&netconsulting.co.uk +41796 + EleSy + Alexandr Makarov + Alexandr.Makarov&elesy.ru +41797 + STC Systema + Mikhail Maslennikov + mikhailmaslennikov&yandex.ru +41798 + astozi + Tomasz Zieba + tomasz.zieba&astozi.pl +41799 + William Hill + Peter Morgan + peter.morgan&williamhill.com +41800 + GaHIN (Georgia Health Information Network) + Himabindu Bolisetty + bindu&careevolution.com +41801 + Prattville Water Works Board + Neil Harris + nharris&pwwb.com +41802 + Talkpath LLC + Steven Platt + splatt&talkpath.com +41803 + On Center Software, Inc. + Brandon Davis + brandon.davis&oncenter.com +41804 + Austrian Federal Ministry of Science and Research + Klemens Urban + klemens.urban&bmwf.gv.at +41805 + Guidance Solutions, Inc. + Jon Larsen + jlars&guidance.com +41806 + Viastorage + Guillaume VERNAT + gvernat&coffreo.com +41807 + GuiZhou BC&TV Information Network Co.,LTD + XiangLong Hou + houxianglong&coship.com +41808 + Administration of Municipal Formation of the City-Hero of Novorossiysk + Dmitry Kirienko + uprav.it&gmail.com +41809 + Dismuntel S.A.L. + Josep Enric Rosell Carbonell + jerosell&dismuntel.com +41810 + Credit Agricole Bank, PJSC + Sergii Plakhota + Sergii.PLAKHOTA&credit-agricole.com.ua +41811 + Spital Männedorf AG + Bruno Baldauf + b.baldauf&spitalmaennedorf.ch +41812 + Rouis Labs + Khalifa Rouis + labs&rouis.net +41813 + Anhui comhigher tech co.,ltd + Sun Xinya + xinyasun&tsinghua.edu.cn +41814 + Hubei University Of Automotive Technology + Lan JianPing + ljp_1984&126.com +41815 + Fraunhofer-Institut fuer Software- und Systemtechnik ISST + Ralf Nagel + ralf.nagel&isst.fraunhofer.de +41816 + Blissful Living Foundation + Bruno Lago + admin&blissful.im +41817 + Cooper Gitiesse + Maurizio Cannella + Maurizio.Cannella&cooperindustries.com +41818 + Quotient, Inc. + Rob Payne + rpayne"ient-inc.com +41819 + IPNetwork + Ingo Peukes + admin&ipnetwork.de +41820 + Kanton Basel-Landschaft + Claude Tomek + claude.tomek&bl.ch +41821 + Frantovo.cz + Frantisek Kucera + iana-contact&frantovo.cz +41822 + Project FiFo + Heinz N. Gies + heinz&licenser.net +41823 + Miaozhen Systems + Chi Song Tao + cst&miaozhen.com +41824 + Université Gaston Berger + Moussa Diagne + moussa.diagne&ugb.edu.sn +41825 + Agilord Ltd. + Istvan Soos + istvan.soos&agilord.com +41826 + Prognus Soluções Livres em TI + William Fernando Merlotto + william&prognus.com.br +41827 + Novantum BV + Ron Arts + ron.arts&novantum.com +41828 + NySoft Argentina SRL + Leonardo Bracco + leonardo&nysoft.com.ar +41829 + MEDDATA + SAIT SARI + sait&meddata.com.tr +41830 + PicoCELA Inc. + Hiroshi Furukawa + info&picocela.com +41831 + Bitbase AS + Stig Christian Aske + stigchristian&bitbase.no +41832 + Private Entrepreneur Kutsevol Maxym + Maxym Kutsevol + max&kutsevol.com +41833 + Paree BV - Elektro Telecom + Erik de Jong + erik.de.jong&paree.nl +41834 + BRAM Technologies + Dmitry Repkin + rd&bram.tv +41835 + JDL Digital Systems Inc. + Yanda Ma + yanda&airshipvms.com +41836 + Commend International GmbH (formerly 'bct electronic GesmbH') + Martin Posch + lizenz&commend.com +41837 + Evan-Moor Corp + Eric Lindquist + administrator&evan-moor.com +41838 + Dynamic Network Services Inc. + Josh Bradley + jbradley&dyn.com +41839 + Prism-IPX Systems, LLC + Russell Friery + rfriery&prism-ipx.com +41840 + University of the Philippines Manila + Diosdado B. Calmada + dbcalmada&post.upm.edu.ph +41841 + Zhuhai Bluemax Broadband Electronic Co.,Ltd + Chen Guoxiang + cgx613&163.com +41842 + Novosibirsk State University + Adamansky Anton + adamansky&post.nsu.ru +41843 + Uni-Film-Club Dortmund + Fabian Schlenz + fabian&ufc.tu-dortmund.de +41844 + PERF-IT B.V. + René de Theije + info&perf-it.eu +41845 + Bitcarrier S.L. + Martin Mendez + info&bitcarrier.com +41846 + Chinasoft International Co., Ltd. + Meng Gang + knight.meng&chinasofti.com +41847 + EDALab s.r.l. + Stefano Cailotto + stefano.cailotto&edalab.it +41848 + TOVEK + Ludovit Kontsek + support&tovek.cz +41849 + CIDON + Julio Cidon + julio&cidon.es +41850 + DaySequerra Corporation + David V. Day + david&daysequerra.com +41851 + zeb/rolfes.schierenbeck.associates gmbh + Patrick Haubold + webmaster&zeb.de +41852 + Peregrine Capital Management + TJ Cornish + domain.admin&peregrine.com +41853 + RAMI + LOUCHART PASCAL + plouchart&ramiaudio.com +41854 + W.J. Bradley + Adam Steed + adam.steed&wjbradley.com +41855 + Logol + Sergey Beloff + admin&logol.ru +41856 + Grand Valley State University + Jason Kunnen + dnsadmin&gvsu.edu +41857 + AGENCE NATIONALE DES INFRASTRUCTURES NUMERIQUES ET DES FREQUENCES + MASSAMBA AIME MARTIAL + info&aninf.ga +41858 + The Trade Desk, Inc. + Ryan Reynolds + ryan.reynolds&thetradedesk.com +41859 + Hidden Design Kft. + Viktor LOOSZ + efpe&hidden.hu +41860 + Oryon TI + Joel Franco Guzman + joel.franco&gmail.com +41861 + Tucker Ellis LLP + Lee K. Harris + lee.harris&tuckerellis.com +41862 + Metawell + Ralf Krieg + postmaster&metawell.com +41863 + Mongoose GFX + Andrew Hughes + andrewhughes&mongoosegfx.co.nz +41864 + EyeKor LLC. + Yijun Huang + yhuang&eyekor.com +41865 + Monitor Business Machines Ltd + Andrew Hughes + andrew.hughes&monitorbm.com +41866 + Hitachi-LG Data Storage, Inc. + Yong Hyun, Ahn + anyong&hlds.co.kr +41867 + yinyuetai + taotan + tao.tan&yinyuetai.com +41868 + elecom co., ltd. + kosuke okada + Kosuke_Okada&elecom.co.jp +41869 + Selex Gematronik GmbH + Andre Weipert + a.weipert&gematronik.com +41870 + Notartel S.p.A + Carmine Cesarano + ccesarano¬ariato.it +41871 + Sonifex Ltd + Adam Borowski + adam&sonifex.co.uk +41872 + Masdar PV GmbH + Peter Kruse + pkruse&masdarpv.com +41873 + Institut Superieur d'Electronique de Paris (ISEP) + Christophe Boyanique + admsys-iana-pen&isep.fr +41874 + LLC "ANTE-MEDIAM" + Valentyn Shapoval + valshµsoft.com +41875 + Esselte IPR AB + David Block + dblock&esselte.com +41876 + FOXCOMM NETWORKS + Grigory Korotov + g.korotov&foxcomm.ru +41877 + Orban + Greg Ogonowski + greg&indexcom.com +41878 + Tobias & Tobias + Ian Taylor + ian.taylor&tobias.tv +41879 + Mind Labs + Vyacheslav Pogoreltsev + sysadm&mind.com +41880 + MIcrotel Innovation S.r.l. + Adelio De Bernardi + adelio.debernardiµtelinnovation.com +41881 + Korbitec (Pty) Ltd. + Ricky Burrell + korbitec&gmail.com +41882 + KLU Consulting / Kōkua Lolo Uila + Michael V. David + michael&mvdavid.com +41883 + Trojan Technologies + Rob Marles + is-admin&trojanuv.com +41884 + Katherine Shaw Bethea Hospital + David Ginn + dginn&ksbhospital.com +41885 + NDsoftware + kenji murakami + kenji&ndsoft.co.jp +41886 + Beijing Sapling Technology Co.,Ltd + Jikun + jikun&sapling.com.cn +41887 + ENSCO, Inc. + Devin Whelan + ist.ca&ensco.com +41888 + Friedrich-Schiller-Universität Jena + Thomas Otto + thomas.otto&uni-jena.de +41889 + Nine Internet Solutions AG + Nils Caspar + nils.caspar&nine.ch +41890 + Cuyahoga Community College District + David Mastny + information.security&tri-c.edu +41891 + Holland LP + Tim Rushing + hollandit&hollandco.com +41892 + BIFIT Service + Dmitrij Kovalevskij + kovalevsky&bifit.ua +41893 + DBM S.r.l. + Matteo Piva + matteo.piva&dbmsrl.com +41894 + Logistica Integral + Ernesto Pareja + epareja2&gmail.com +41895 + Alliance Data + Mark Mishler + mark.mishler&alliancedata.com +41896 + Alberta Electric System Operator + Leo Li + leo.li&aeso.ca +41897 + Inovar + Dan Parker + dan.parker&inovar.com +41898 + Australian Nuclear Science Technology Organization + Daniel Matusch + dm&ansto.gov.au +41899 + YU JYA TECHNOLOGY CO., LTD. + Steve Chen + steve.chen&azuretec.com +41900 + Ministry of Interior, Bulgaria + Valentin Velkov + v3&mvr.bg +41901 + CSG SCIENCE&TECHNOLOGY CO., LTD.HEFEI + BING LU + support&csgnet.cn +41902 + Gemeindeverwaltung Landquart + Matthias Hirschmann + mhirschmann&baggenstos.ch +41903 + ETL Solutions Ltd. + Martin Pegler + sysadmin&etlsolutions.com +41904 + Impala Editores, SA + Ricardo Matos + ricardo.matos&impala.pt +41905 + Great-West Life + Ronald Hewson + ron.hewson&investorsgroup.com +41906 + VeriSign, Inc. + Danny McPherson + dmcpherson&verisign.com +41907 + LoudCell Technologies Pvt Ltd. + Yogesh Sharma + yogesh&loudcell.com +41908 + HanKeInfo + Alfred Huang + alfredhuang&hankeinfo.com.cn +41909 + AventuraHQ, Inc + Adam Williams + adam.williams&aventurahq.com +41910 + Zentyal + Jose Antonio Calvo + jacalvo&zentyal.com +41911 + Great Plains Manufacturing, Inc. + John Woods + jwoods&greatplainsmfg.com +41912 + International Electrotechnical Commission + Herbert Falk + herb&sisconet.com +41913 + Tecsys do Brasil Industrial Ltda + Rodolfo Vidal + rodolfo&tecsysbrasil.com.br +41914 + Bradford Robotic Telescope + Dan Hedges + admin&telescope.org +41915 + Southwestern Energy Co. + Tom LeClare + tom_leclare&swn.com +41916 + vIPtela Inc. + Ramesh Prabagaran + ramesh&viptela.com +41917 + Leica Geosystems AG + Armin Märk + webteam&leica-geosystems.com +41918 + CIT Telecom-Service JSC + Egor Duda + iana&corpit.ru +41919 + Ugobame Uchibeke + Ugobame Uchibeke + uchi4jah&gmail.com +41920 + Shenzhen Integrated Electronic Systerms Lab Co.,Ltd. + DuMin + duolaheimeng123&163.com +41921 + Beijing Infomedia Electronic Technology Co., Ltd + Xuhu + evchnil&263.net +41922 + GuangDong Super Telecom Co,Ltd. + tom.liu + tom.liu&sts.cn +41923 + Patrick Sczepanski + Patrick Sczepanski + patrick&sczepanski.com +41924 + Digital Value SL + Juanjo Garcia + juanjo&digitalvalue.es +41925 + Nufarm Limited + Geoff Aldred + admin.it&au.nufarm.com +41926 + Finecom Telecommunications AG + Erich Hohermuth + erich.hohermuth&finecom.ch +41927 + FORO-tele + Maxim Struchok + max&foro-tele.com +41928 + Muir Matheson Ltd + Blair Moir + blair.moir&muir-matheson.com +41929 + Jung, DMS & Cie. AG + Christian Bauer + christian.bauer&jungdms.de +41930 + Nautronix Limited + Iain MacMillan + Iain.MacMillan&nautronix.co.uk +41931 + CMC Electronics + Joel Savard + joel.savard&cmcelectronics.ca +41932 + HSC Brasil + Junior Cunha + junior.cunha&hscbrasil.com.br +41933 + Aligera + Wagner Müller Gegler + wagner&aligera.com.br +41934 + NKI AS + Christian Skarby + csk&nki.no +41935 + Indian River County Sheriff + Andy Collier + acollier&ircsheriff.org +41936 + Marway Power Solutions + Greg Willits + gwillits&marway.com +41937 + Escola Agricola de Jundiai - EAJ/UFRN + Thiago Dutra + thiago&eaj.ufrn.br +41938 + INCEPTRUM Technologies Inc. + Dmitry Kursov + dmitry.kursov&inceptrum.com +41939 + Tralix México S. de R.L. de C.V. + Dario Morales López + dario.morales&tralix.com +41940 + Bradbury School + Christopher Chan + christopher.chan&bradbury.edu.hk +41941 + StuStaNet e. V. + Markus Kaindl + vorstand&stustanet.de +41942 + Lettergen + Vincent Van der Kussen + it&btr-services.be +41943 + Bartec GmbH + Ralf Bauermeister + ralf.bauermeister&bartec.de +41944 + Virginia Department of Behavioral Health and Developmental Services + Marcie Stidham Stout + marcie.stidham&dbhds.virginia.gov +41945 + Institution Solutions + Jerome Haltom + jhaltom&isillc.com +41946 + The Information Architecture Institute + Dorian Taylor + iana-pen&iainstitute.org +41947 + Unidem Sales Inc + Nicholas Linn + nicklinn&michebag.ca +41948 + Job Snijders + Job Snijders + job&instituut.net +41949 + Danmagi + Julius Sloth + jsloth&danmagi.com +41950 + DTI Solutions + Marc Devault + itlicensing&dtisoft.com +41951 + Quarles & Brady, LLP + Rich Raether + itops&quarles.com +41952 + Asad Ahmed + Asad Ahmed + mdasadahmed&gmail.com +41953 + JSC "ESC of EVRAAS" + Andrew Shulga + shulga&evraasgr.ru +41954 + Polewall Norge AS + Michael Gallagher + Mjg&polewall.com +41955 + Jansen AG + Gerry Harzl + gerry.harzl&jansen.com +41956 + Fellig e.V. + Philipp Schafft + team&fellig.org +41957 + Olfeo + Julien Pinon + jpinon&olfeo.com +41958 + Prime Rate Ltd. + Szabolcs Gyuris + gyszabolcs&primerate.hu +41959 + Earlham College + Randy Schultz + schulra&earlham.edu +41960 + easygo + Kurt Stieger + pen&easygo.at +41961 + IntraCom Systems, LLC + John Jurrius + snmp&intracomsystems.com +41962 + 3ZTelecom Inc. + Enzo Dalmazzo + faisal.sikder&gmail.com +41963 + WebAmphibian.Com + Roland Stolfa + rstolfa&yahoo.com +41964 + Ozona Consulting, S.L. + Fernando Vázquez Vázquez + ssii&ozona.es +41965 + Mano Namai UK + Valerij Prusakov + valerij.prusakov&gmail.com +41966 + Applied Card Technologies Ltd. + Nick Wilson + nick.wilson&weareact.com +41967 + Pivot Point Security + Michael Gargiullo + michael.gargiullo&pivotpointsecurity.com +41968 + EPCOTS + Alaa AADIL + alaa.aadil&epcots.fr +41969 + Enero Solutions, inc. + François Trahan + ftrahan&enerosolutions.com +41970 + Orion Networks International, Inc. + David Smith + d.smith&orionni.com +41971 + Touch Tecnologia e Informática LTDA + Rodrigo Vieira Couto + rcouto&touchtec.com.br +41972 + Poznan University of Life Sciences + Robert Stanislawiak + robert.stanislawiak&up.poznan.pl +41973 + Generation Tech + Ken McDonald + ken&generationtech.org +41974 + Auf nach Mallorca GmbH + Benjamin Behringer + benjamin.behringer&auf-nach-mallorca.info +41975 + ALNET ELEKTROMEKANIK LTD + METIN YAZICI + metin&alnet.com.tr +41976 + Turvasana Tmi + Thor Kottelin + thor.kottelin&turvasana.com +41977 + Oakland County, Michigan + Internet Admin + internetadmin&oakgov.com +41978 + BEIJING DING QING TECHNOLOGY LTD. + Chen Kai(Mr.) + kai.chen&startechsoft.com +41979 + Mitsubishi Electric TOKKI Systems Corporation + Takashi Iwaki + taka-iwaki&west.melos.co.jp +41980 + Neo-Renaissance Studios + Shane Spinuzzi + rspinuzzi&neorenaissancestudios.com +41981 + AerVox + David Berrocal + david.berrocal&aervox.com +41982 + zetVisions AG + Sascha Winter + it-support&zetvisions.com +41983 + Vorboss Limited + Timothy Creswick + noc&vorboss.net +41984 + meterriblecrew.net + Andreas Wiese + aw-pen&meterriblecrew.net +41985 + Desvaux Labs + Pierre Desvaux + contact&desvaux.eu +41986 + deepearth.co.uk + Aubrey Stark-Toller + aubrey&deepearth.co.uk +41987 + BMO Capital Markets + Nina Pyron + nina.pyron&bmo.com +41988 + Van Wert County Hospital + Deb Point, Clinical Applications Coordinator + dpoint&vanwerthospital.org +41989 + Alphatron Security Systems + B.D. van der Have + b.van.der.have&alphatron.nl +41990 + Pelagicore AG + Robert Griebl + robert.griebl&pelagicore.com +41991 + Super-Visions + Christophe Fonteyne + christophe.fonteyne&super-visions.com +41992 + HOPPE Holding AG + Gerhard Kapeller + gerhard.kapeller&hoppe.com +41993 + Redactia + Quentin Armitage + Quentin.Armitage&redactia.co.uk +41994 + Jonas Kopp Systems Trust Network + Jonas Kopp + jonaskoppsystems&gmail.com +41995 + SIA-CE + Attila Foris + oid&fredhome.net +41996 + COM-TECH Italia S.p.A. + Stefano De Paoli + snmp&com-tech.it +41997 + Silversky Inc. + William Lewis + bill.lewis&silversky.com +41998 + Bumicom Telecommunicatie B.V. + Arnoud Mulder + arnoud.mulder&bumicom.nl +41999 + e2x Ltd. + Darren Beck + darren.beck&e2x.co.uk +42000 + National Mineral Resource University + Popov Mikhail + popov&spmi.ru +42001 + TechDivision GmbH + Philipp Dittert + p.dittert&techdivision.com +42002 + Hurrikane Systems + Mark Kubacki + kubacki&hurrikane.de +42003 + Akron Public Schools + Derek Lewis - Technology Services + core_support&akron.k12.oh.us +42004 + Gertec + Eduardo Santos + eduardo.santos&gertec.com.br +42005 + NHST MEDIA GROUP AS + Ole Kristian Klevstrand + okk&nhst.no +42006 + netis korea + yun hyung taek + yun0512&gmail.com +42007 + nikolakoco + Nikolche Kochovski + nikolakoco89&gmail.com +42008 + BumeBox, Inc. + Per Thomsen + pthomsen&bumebox.com +42009 + Riava Networks Inc. + Aki Tsuzuku + aki&riava.net +42010 + Luxul Corporation + Dan Haab + dhaab&luxul.com +42011 + ISAAC Software Solutions B.V. + Martijn Grendelman + admin&isaac.nl +42012 + Mangold Technologies + Michael Pfeuffer + pfeuffer&mangoldtechnologies.com +42013 + Registro General de la Propiedad de Guatemala + Laura Maria Marroquin Cortez + lcortez&rgp.org.gt +42014 + Francois Trahan + François Trahan + francois.trahan&gmail.com +42015 + Weilhammer Networks + Robert Weilhammer + robert&weilhammer.com +42016 + Kingman Regional Medical Center + Chris Jones + cjones&azkrmc.com +42017 + MailMak ApS + Tommy Holm Jakobsen + thj&mailmak.com +42018 + numo labs Pty. Ltd. + Reza Monavari + admin&numolabs.com +42019 + Fort-Telecom + Alexandr Belyaev + ABelyaev&fort-telecom.ru +42020 + Piter Gaz JSC. + Maxim Y. Evdolyuk + itsupport_prof&petergaz.com +42021 + VoiceCom SA + Valentin Zahariev + vzahariev&voicecom.bg +42022 + ITUS JAPAN Co.,Ltd. + kijong lee + support&itus.co.jp +42023 + Thetus Corporation + Sam Alpher + salpher&thetus.com +42024 + Cibertec Internacional + Miguel Quesada + mquesadab&cibertec.com +42025 + Koji Komatsuzaki + Koji Komatsuzaki + kojikomatsuzaki&gmail.com +42026 + TSTREAM CO.,LTD. + Yongjae Lee + yjlee&tstream.co.kr +42027 + Sanctum Networks (P) Ltd. + Girish Vinayak Gulawani + girish&sanctumnetworks.com +42028 + Arnel limited + Gary Bowyer + gbowyer&arnel.co.uk +42029 + Amigo Software + Junaid Abid Ali + junaid.ali&amigo-software.com +42030 + Ghost Software + Junaid Abid Ali + junaid.ali&ghost-software.com +42031 + 42 + Kevin 'Ether' Boulain + iana&staff.42.fr +42032 + swisspartners Investment Network AG + Othmar Büeler + it&swisspartners.com +42033 + IslaLink + Javier Valero + javier.valero&islalink.es +42034 + Intemo Technologies + Victor Palacio Tarrega + vpalacio&intemotechnologies.com +42035 + Zedge + Thomas Tinglum + thomas&zedge.net +42036 + LOYTEC electronics GmbH + Thomas Rauscher + trauscher&loytec.com +42037 + Stadt Augsburg + Manfred Gundel + netze.it&augsburg.de +42038 + Sanquin Bloedvoorziening + Rik Hulleman + r.hulleman&sanquin.nl +42039 + Laboratoire Jacques Louis Lions + Altaïr S. Pelissier + altair.pelissier&ljll.math.upmc.fr +42040 + Nasuni Corporation + Rob Mason + iana&nasuni.com +42041 + Vänerhamn AB + Kristian Holmqvist + kristian.holmqvist&vanerhamn.se +42042 + Inca Telecom S.A.C. + Antonio Franco + antonio.franco&incatelecom.com +42043 + DingLi Communications Corp., Ltd. + Ding Li + wancheng.li&dinglicom.com +42044 + GMQ Consulting AB + Ola Eriksson + service&gmq.se +42045 + Logical Tools s.r.l. + Mr. Fabio Pucitta + info&logicaltools.com +42046 + DigitalSign - Certificação Digital Ltda + Fernando Moreira + suporte&digitalsigncertificadora.com.br +42047 + twis.la (Clement Martin) + Clement Martin + twisla&twis.la +42048 + Circle of Life Hospice + Matthew Dickson + sysadmin&nwacircleoflife.org +42049 + Achkey Ltd + Hugo Jobling + hugo.jobling&achkey.com +42050 + China Security & Surveillance Technology,Inc. + chen yiliang + yiliang.chen&csst.com +42051 + Skyward Information System Co.,LTD. + Yasushi Sonegawa + sone&shinetsu.jp +42052 + vitapublic GmbH + Christian Bode + hostmaster&vitapublic.de +42053 + MEDvision360 + Ralph van Etten + ralph&medvision360.com +42054 + Stadler Bussnang AG + Andreas Buechi + andreas.buechi&stadlerrail.com +42055 + West Lothian Council + Jamie Girdwood + jamie.girdwood&westlothian.gov.uk +42056 + MultiToll Solutions SAS + Hugues-Olivier Yar + normes&multitoll.fr +42057 + Cegedim + Romain Vergniol + hostmaster&cegedim.com +42058 + DONAU INFORMATIK + Roman Szabados + Support+IANA-OID&DONAU-INFORMATIK.EU +42059 + MED2020 Health Care Software Inc. + Michael Chiviendacz + michael.chiviendacz&med2020.ca +42060 + Egon Braun + Egon Braun + egonbraun&gmail.com +42061 + SquareTwo Financial + John-Thomas Gaietto + jtgaietto&squaretwofinancial.com +42062 + CyberTransJapan Co., Ltd. + Takashi Takesue + info&cybertrans-jp.com +42063 + Guidoon SAS + Cyril Tata + cyril.tata&guidoon.com +42064 + Royse City ISD + Tony Liptrap + liptrapt&rcisd.org +42065 + Iberia + Javier Pardo + jpardo&iberia.es +42066 + dbSpectra + Hal Asbridge + halasbridge&aksolutions.com +42067 + Bayerische Staatsforsten AoeR + Markus Schneider + markus.schneider&baysf.de +42068 + UBM Drecker GmbH + Pascal Drecker + pascal.drecker&ubm.de +42069 + The Comptroller General’s Department + Ms. Rattanaporn Ussavanuphap + ratanaus&cgd.go.th +42070 + German Aerospace Center; Institute of Communications and Navigation + Hartmut Brandt + hartmut.brandt&dlr.de +42071 + Sprecher Automation GmbH + Helmut Feichtinger + pen-admin&sprecher-automation.com +42072 + Homes and Communities Agency + Jon Walker + jon.walker&hca.gsi.gov.uk +42073 + DatASE + Martin Pegler + martin&datase.co.uk +42074 + Deltenna Limited + Sean Godwin + sean.godwin&deltenna.com +42075 + BAFO Technologies Corp + Jerry Chang + jerry_chang&bafo.com.tw +42076 + saltation GmbH & Co. KG + Matthias Hörmann + mhoermann&saltation.de +42077 + sRatio + John P. Birck + it&sratio.com +42078 + Recursoft.org + Michael Dahlberg + dahlberg&recursoft.org +42079 + Army Emergency Relief + Julius Asante + julius.asante.ctr&aerhq.org +42080 + Dajar + Mateusz Kijowski + mateusz.kijowski&dajar.pl +42081 + TravailPrive + Mateusz Kijowski + m.kijowski&travailprive.eu +42082 + Travis Perkins PLC + Craig Butler + craig.butler&travisperkins.co.uk +42083 + JS Networking Lab + Jason Stultz + stultzjason&gmail.com +42084 + Mike Mackintosh + Mike Mackintosh + mike.mackintosh&zyp.io +42085 + Prolateral Consulting Ltd + Ian Chilvers + Ian.Chilvers&prolateral.com +42086 + Vossloh Cogifer + Guy Bourgin + Guy.Bourgin&vcsa.vossloh.com +42087 + Business Insurance Direct + Sears Young + sears.young&bizdirect.co +42088 + Legal & General Nederland + Wijnand Westra + beheer&landg.nl +42089 + Rambler Internet Holding LLC + Anes Mukhametov + noc&rambler-co.ru +42090 + Aleat shpk + Elton TORO + elton.toro&aleat.com +42091 + Tritech International Limited + Allan Leeming + postmaster&tritech.co.uk +42092 + Serttel LTDA + Bruno Figueirôa + bruno.figueiroa&serttel.com.br +42093 + Expand + Pablo Alsina + palsina&expand.com.uy +42094 + Bfabric.org + Marco Schmidt + ianapen&bfabric.org +42095 + NII SOKB Ltd + Technical Center, Michael Rubashenkov + saferoute&niisokb.ru +42096 + IQnet Ltd + Dragan Vlahovic + dragan&iqnet.co.rs +42097 + IT2u Czech s.r.o. + Dusan Purr + dusan.purr&it2u.cz +42098 + Fahrzeugsystemdaten GmbH + Erik Hennig + erik.hennig&fsd-web.de +42099 + Smart Green Labs S.L. + Carlos Losada + closada&itelsis.com +42100 + DV Industrial Computer Ltd. + Alexander Mruga + am&inpc.com.ua +42101 + iTEL + Alexander Terlanov + alex&tg.lv +42102 + Communications Audit UK Ltd + Paul Thomson + paul.thomson&commsaudit.com +42103 + hmcw gmbh + Richard Wachler + rw&hmcw.de +42104 + wi2be Tecnologia S/A + Albary Franco Pimpao Junior + albary&wi2be.com +42105 + WebControl + Pavel Nesterov + nesterov&web-control.ru +42106 + 'MIGHTY APPARATUS FOR RADIOBRODCASTING AND TV" Joint-Stock Company + Aleksandr Khizhnichenko + mart&mart2.spb.su +42107 + South Carolina Department of Revenue + Rick Gibson + rick.gibson&sctax.org +42108 + HRSoftworks + Brad Held + bheld&hrsoftworks.com +42109 + Linux Based Systems Design, Ltd + Nigel Kukard + nkukard&lbsd.net +42110 + API Digital Communications Group, LLC + Chris Adams + cadams&api-digital.com +42111 + Inovonics, Inc. + Josh McAtee + josh&inovonicsbroadcast.com +42112 + Kakapo Technologies Ltd. + Gevan Dutton + gevan&kakapo.co.nz +42113 + STAsoft.net + Stanislav V. Korsakov + sta&stasoft.net +42114 + Finnish Institute of Occupational Health + Teemu Suurnäkki + teemu.suurnakki&ttl.fi +42115 + CommerceWest Bank + Chuck Charlton + ccharlton&cwbk.com +42116 + AIG + Muneer Baddad + muneer.baddad&aig.com +42117 + Oman Telecommunications Company (S.A.O.G) + Dinesh.S.Devan + dinesh.devan&omantel.om +42118 + IDentAcc + Steffen Koehler + skoehler&identacc.de +42119 + TRACT cjsc + Alexander Golenshin + agolenshin&tract.ru +42120 + Multimatic Inc. + Michael Scott + mscott&multimatic.com +42121 + Michigan Public Health Institute + Joshua Coons + jcoons&mphi.org +42122 + Yurur + Burhan Yurur + bur.han&yurur.net +42123 + Shanghai Xinyou Information Technology Co., Ltd. + Vincent Chen + newpomelo&163.com +42124 + YN-IT + Yosef Nesirat + info&yn-it.de +42125 + CK Engineering Ltd + Dzina Ilya + idzina&dics.ua +42126 + NOVARCHIVE + Nathalie Pinto + pbezamat&novarchive.fr +42127 + SYMCOM INC + Kris Jensen + kris.jensen&symcom.com +42128 + KUL Elektronik Teknolojileri Ltd. + Basri KUL + kulelektronik&gmail.com +42129 + Continental Graphics Corp + Andrew Graham + hostmaster&cdgnow.com +42130 + Skylink Technology Inc. + Alan Meeh + ameeh&skylinktechnology.com +42131 + Westmont College + John Rodkey + rodkey&westmont.edu +42132 + Applied Security GmbH + Volker Roethel + pki&apsec.de +42133 + GoS Networks Ltd. + Graham Willmott + graham.willmott&gosnetworks.com +42134 + ScaleArc + Deepak Patel + deepak.patel&scalearc.com +42135 + TRUSTe + Robert Bruce Carleton + bcarleton&truste.com +42136 + Ultra Electronics Airport Systems + Stuart Jones + stuart.jones&ultra-as.com +42137 + Unipagos S. de R.L. de C.V. + Paul Coppinger + pc&unipagos.com.mx +42138 + Virtualmaster, s.r.o. + Josef Liska + josef.liska&virtualmaster.com +42139 + Internet Corporation for Assigned Names and Numbers + Punky Duero + punky.duero&icann.org +42140 + Bitfabrik GmbH & Co. KG + Peter Schillerwein + peter.schillerwein&bitfabrik.de +42141 + Laboratoire d'Informatique Paris Nord + Mr Mamadou Sow + mamadou.sow&lipn.univ-paris13.fr +42142 + The Faculty of Advocates + Mark Beecham + mark.beecham&advocates.org.uk +42143 + Moose Beast Software + Gary Bourke + gary.bourke&moose-beast.com +42144 + cycos AG + Volker Fenners + Volker.Fenners&cycos.com +42145 + PRIME Apparatus + Cliff Wheatley + cliff.wheatley&gmail.com +42146 + EntobilSoft, Inc. + Soonmi Lee + tensal.smlee&entobilsoft.com +42147 + Pepperl+Fuchs SE + Benjamin Reibold + breibold&de.pepperl-fuchs.com +42148 + The Geneva Foundation + Jason McKinney + jmckinney&genevausa.org +42149 + Cloud Life Software, LLC + Michael Milom + msmilom&cloudlifesoftware.com +42150 + AffiniInternational B.V. + Gerard Juyn + iana&affinii.nl +42151 + Net Business Pty Ltd + Neil Robinson + neil&netbusiness.com.au +42152 + Itransition + Vadzim Kuzmich + v3.kuzmich&itransition.com +42153 + Chuden CTI Co.,Ltd. + Masashi Kamei + Kamei.Masashi&cti.co.jp +42154 + oak3 GmbH + Thomas Jakobi + tj&oak3.com +42155 + Evolution Racingteam Saar + Eric Scheid + eric&rainerscheid.de +42156 + NeoLab-Systems S.A.R.L. + Didier PRALON + contact&neolab-systems.fr +42157 + CERTIFIRMA, SOCIEDAD ANONIMA + David Elizondo + david&certifirmas.com +42158 + NetAgent Co., Ltd. + Packet Black Hole Technical Support Services (Sugiura Takayuki) + pbh-support&netagent.co.jp +42159 + AT-Biotech Traceability Information Systems, S.L.U. + Juan Gómez + jgomez&at-biotech.com +42160 + Iverdahl Systems + Michiel Vincent de Jong + mvdejong&mvdejong.nl +42161 + GIE AGIRC-ARRCO + Stephane Barban + sbarban&agirc-arrco.fr +42162 + NAKAYO ELECTRONICS CO.,LTD + Masafumi Sueyoshi + sueyoshi&es.nakayo.co.jp +42163 + EBS Service Company Limited + Dimitrije Miljevic + dimitrije.miljevic&ebs.com +42164 + Ministério Público do RS + Fabiano Martins + fmartins&mp.rs.gov.br +42165 + Pi-Coral Inc. + Robert Snyder + mib-contact&pi-coral.com +42166 + Saint Josephs Hospital, Yonkers + Deborah DiBernardo + deborah.dibernardo&saintjosephs.org +42167 + Data Tote + Jim Tambling + jim.tambling&datatote.co.uk +42168 + blackchair Ltd + Austen Jackson + austen.jackson&theblackchair.com +42169 + The Coca-Cola Company + Thorsten Hofrichter + thofrichter&coca-cola.com +42170 + Tiger Computing Ltd. + Chris Boot + info&tiger-computing.co.uk +42171 + Gravitate + Boris Putanec + hostmaster&gravitate.com +42172 + Strencom + James Waite + james.waite&strencom.net +42173 + Adiczion SARL + Christophe CRIER + ccr&adiczion.com +42174 + ThiemoNet + Thiemo Nordenholz + oidreg&service.thiemo.net +42175 + REFER Telecom, S.A. + Pedro Miguel Oliveira Simões + posimoes&refertelecom.pt +42176 + eBernd + Bernd Hofmann + iana&ebernd.com +42177 + Anuta Networks, Inc. + Praveen Vengalam + praveen&anutanetworks.com +42178 + Nym Networks + Jakob Borg + jakob&nym.se +42179 + Sonora Quest Laboratories + Bryan Seminara + webmaster&sonoraquest.com +42180 + Kantonsspital Winterthur + Matthias Spühler + matthias.spuehler&ksw.ch +42181 + Wavecom - Soluções Rádio S.A. + Bruno Antunes + bantunes&wavecom.pt +42182 + Speed and Function + Nick Gluzdov + gluzdov&gmail.com +42183 + Digitus Biometrics + Chris Marsden + cmarsden&digitus-biometrics.com +42184 + ECG Management Consultants, Inc. + John Murphy + support&ecgmc.com +42185 + DEMTECH + Jean DEMARTINI + jean.demartini&demtech.net +42186 + National Cement Company, Inc. + Valentin Stoilov + domreg&natcem.com +42187 + Podomatic, Inc + Justin Dossey + jbd&podomatic.com +42188 + MATRIXGROUP (CMS) PTY LTD + Julian McDonnell + julianm&matrixgroup.com.au +42189 + JRTwine Software, LLC + James R. Twine + jtwine&jrtwine.com +42190 + Genowise + Lixin Zhou + zhou.lixin&gmail.com +42191 + TotalServe Pty Ltd + Michael Pasqualone + support&totalserve.net.au +42192 + IDF Connect, Inc. + Richard Sand + rsand&idfconnect.com +42193 + Alpha Networks S.A. + Xavier Sacré + xavier.sacre&alphanetworks.be +42194 + Matthews Midrange Consulting, Inc. + Gene Matthews + gene&mmc-inc.com +42195 + Tektorque, Lda + Emanuel Fernandes + efernandes&tektorque.com +42196 + Cauldron Development LLC + Joseph Coffland + joseph&cauldrondevelopment.com +42197 + Coffee Bean Software Pty Ltd + Mike Ciavarella + snmp-pen-contact&cbsoft.com.au +42198 + IO-Power Technology Co., Ltd. + Jacky Cheng + jacky&io-power.com.tw +42199 + Foothills Rural Telephone Cooperative Corporation Inc. + Matthew Bailey + matthew&foothills.coop +42200 + Ecil Informática Indústria e Comercio LTDA + Alberto Ferreira da Silva + alberto&ecilenergia.com.br +42201 + Schenker, Inc. + Brian Klauss + brian.klauss&dbschenker.com +42202 + FreeTel, s.r.o. + Jaroslav Střeštík + info&freetel.cz +42203 + TRAÇOTOPÁZIO - INFORMÁTICA UNIPESSOAL LDA + Fernando Hackbart + tracotopazio.informatica&gmail.com +42204 + Kumoya Network + Hiroki Otsuka + otsuka&kumoyanet.com +42205 + Dooks Computer Services Ltd. + Alexey Maslennikov + alexey&dooks-it.com +42206 + Proelse + Jesus Manuel Conejo Sarabia + jmconejo&proelse.es +42207 + Commission Scolaire de la Pointe-de-l'Ile + Robert Mc Cready + robert-mccready&cspi.qc.ca +42208 + itecPlus GmbH + Reiner Winter + reiner.winter&itecplus.de +42209 + CharlesRead Dot Com + Charles Read + charles&charlesread.com +42210 + Beenius d.o.o. + Miha Cerar + miha.cerar&beenius.tv +42211 + SHENZHEN HUAXUNARK TECHNOLOGY Co. Ltd. + Sun Rongjun + zhangming&huaxunchina.cn +42212 + CareCenter Software GmbH + Christoph Hatzenberger + hatzenberger&carecenter.at +42213 + troydenton.ca + Troy Denton + trdenton&gmail.com +42214 + Kaplan Bilisim Teknolojileri Yazilim ve Ticaret Ltd. + Yasin KAPLAN + yasin.kaplan&kaplansoft.com +42215 + Alerant Inc. + Peter Nagy + nagy.peter&alerant.hu +42216 + BlueCorner + Rueben Van Mol + rueben.vanmol&enovates.com +42217 + Khoo Software Solutions + Eddie Khoo + software&khoo.id.au +42218 + ValleyCare Health System + Jenny Yiu + jyiu&valleycare.com +42219 + UniCredit Luxembourg S.A. + Michael WALDINGER + PrivateEnterpriseNumber&unicreditgroup.lu +42220 + TELEDATA IT-Lösungen GmbH + Roman Gross + support&teledata-it.de +42221 + Syl Research Limited + Adrian John + adrian.john&sylsemantics.com +42222 + Integrated Systems Technology, Inc. + Nathan Schlutter + NSchlutter&IntSysTek.com +42223 + Archive Analytics Ltd + John Burns + John.Burns&ArchiveAnalytics.com +42224 + Fraunhofer Portugal Research Center for Assistive Information and Communication Solutions (Fraunhofer AICOS) + Carlos Alexandre Teixeira do Amaral Resende + carlos.resende&fraunhofer.pt +42225 + Amco Marketing + Kate Gray + kate&amcomarketing.com +42226 + Evolve Media LLC + Richard Kruszewski + sysadmins&evolvemediallc.com +42227 + Max-Planck-Institut fuer Mikrostrukturphysik + Frank Mueller + frank.mueller&mpi-halle.mpg.de +42228 + Center Group + Dinar Rakhimbaev + rakhimbaev&cg.ru +42229 + Coriant R&D GmbH + Sharfuddin Syed + ssyed&infinera.com +42230 + HiProCall GmbH + Lothar Schmitt + L.Schmitt&hiprocall.de +42231 + AlfaTrein Ltd + Vladimir Isaev + isaevv_r&alfatrein.ru +42232 + Fanzz + Shawn Waters + iana-pen&fanzz.com +42233 + Sigma Designs Inc. + Edmund Lee + edmund_lee&sigmadesigns.com +42234 + Knotice, Ltd. + Ron Cuirle + rcuirle&knotice.com +42235 + Tresorit Kft. + Istvan Lam + lam&tresorit.com +42236 + RDT Ltd + Holger Hasenstrauch + itsupport&rdt.co.uk +42237 + Texas A&M University-Central Texas + Information Technology Services + its&tamuct.edu +42238 + Grenville Mutual Insurance Company + Andrew Asquith + itservices&grenvillemutual.com +42239 + Aspira Networks, Inc + Bart Wyatt + iana-admin&aspiranetworks.com +42240 + Keyano College + Hasan Mehdi + hasan.mehdi&keyano.ca +42241 + Geoquip Worldwide + Paul Renham + paul.renham&geoquip.com +42242 + znets + DESCOMBES Thierry + thierry.descombes&gmail.com +42243 + DLA Piper LLP (US) + Don Krtanjek + donald.krtanjek&dlapiper.com +42244 + ITS Express, Inc. + Mark Schulting + mschulting&itsexpress.com +42245 + Tagged, Inc. + Corey Hickey + chickey&tagged.com +42246 + OnePIN, Inc. + Marcin Nowak + it&onepin.com +42247 + Aleph01 + David Rouillon + contact&aleph01.com +42248 + SAIV + Giovanni Lovato + giovanni.lovato&saiv.it +42249 + Claritech SRL + Cristian Croitor + cristian&claritech.ro +42250 + HFC Technics Ltd. + Gergely Peter, Tamas + g.tamas&hfctechnics.hu +42251 + FutureTek, Inc. + In Hyun, Cho + xtra72&gmail.com +42252 + Pflegeheim Alexander Beer GmbH & Co KG + Martin Schneider + m.schneider&pflegeheim-beer.at +42253 + Zalando GmbH + Hanno Hecker + sysop&zalando.de +42254 + Inline Telecom Solutions + Dmitriy Pustovalov + d_pustovalov&inlinetelecom.ru +42255 + The All England Lawn Tennis Club (Championships) Limited + Paul Beyer + it-alerts&aeltc.com +42256 + McGuireWoods LLP + Stephen Hogan + shogan&mcguirewoods.com +42257 + Cambridge Major Laboratories, Inc. + Joe Nolan + j.nolan&c-mlabs.com +42258 + ooblick.com + Andrew Arensburger + iana&ooblick.com +42259 + Green Energy Options Ltd + Adrian van den Heever + geo.pen&greenenergyoptions.co.uk +42260 + Bent Vector + Rob Targosz + rtargosz&bentvector.com +42261 + Sayegh & John e-Trolley GbR + Nabil Sayegh + internet&e-trolley.de +42262 + TechRede, LLC + Shannon Fritz + pen&techrede.net +42263 + Servas + Matteo Pedani + matteo&servas.it +42264 + Fabien Debuire + Fabien Debuire + debuire.fabien&gmail.com +42265 + Thunder Software Technology Co., Ltd. + zhangqiang + zhangqiang&thundersoft.com +42266 + Otto (GmbH & Co KG) + Tino Brandl + OV-G-V-FI-RK-GR-DOMAINVERWALTUNG&otto.de +42267 + SARL Mixcom + Philippe Rolland + mibadmin&mixcom.fr +42268 + MEWA Textil-Service AG & Co. Management OHG + Hagen Block + Hagen.Block&mewa.de +42269 + Rigspolitiet (Danish National Police) + Peter Thiim + pth013&politi.dk +42270 + Azienda Unita' Sanitaria Locale di Parma + Domenico Ielo + dielo&ausl.pr.it +42271 + Fixious Global Indonesia + Feny Suharyono + fenk&fixious.com +42272 + Howard University Hospital + Bernie Galla + bgalla&huhosp.org +42273 + AMPER + David Marza + david.marza&eronline.com +42274 + Nexusguard Limited + Juniman Kasman + juniman&nexusguard.com +42275 + Advanced MR Analytics AB + Hannes Järrendahl + pen&amra.se +42276 + Infragear Inc + Livio Ceci + livio&infragear.com +42277 + CSW.IO + Robert Van Kleeck + rob&csw.io +42278 + Precitel SA + Jim Liener + jml&precitel.com +42279 + SensMaster S/B + Martin Harnevie + martin&sensmaster.net +42280 + SatExpander + Dan Peleg + dan.peleg&satexpander.com +42281 + InfoBridge Solutions + Naimish Dayah + naimish&infobridge.co.uk +42282 + Pragmatix Services Private Limited + Sandeep Singh Sandhu + sandeep.sandhu&pragmatixservices.com +42283 + Berwick Area School District + Dave Lovette + dlovette&berwicksd.org +42284 + Kai van Es + Kai van Es + kai.vanes&gmail.com +42285 + FCS Computer Systems Sdn Bhd + David Hutt + david.hutt&fcssolution.com +42286 + Connectem Inc. + Heeseon Lim + heeseon&connectem.net +42287 + UAP inc. + Pierre Ouellet + is-admin&uapinc.com +42288 + Sportech Inc. + Andrew Hall + andrew.hall&sportech.net +42289 + NetTech Associates LLC + Dean Thuline + deant&ntaglobal.com +42290 + LLC Sysvisor + Aleksey Tretyakov + sysvisor&mail.ru +42291 + DWANGO MOBILE Co., Ltd. + Masahiro Honma + infra1&dwango.co.jp +42292 + SL Audio A/S + Thomas Birkelund + tbi&lyngdorf.com +42293 + North-West Customs-Logistical Service Co. Ltd. + Maksim Andrukhovich + uc&sztls.ru +42294 + S. & A.S. Ltd. + Ziad Boustany + ziad&sascontrollers.com +42295 + imzadi.de Network + Anna Christina Naß + christina&imzadi.de +42296 + Impero Solutions Ltd + Richard Moscone + rmoscone&imperosoftware.com +42297 + Fraunhofer-Institut fuer Kommunikation, Informationsverarbeitung und Ergonomie FKIE + Steffen Grathwohl + steffen.grathwohl&fkie.fraunhofer.de +42298 + Smoothwall Ltd + Daniel Barron + daniel.barron&smoothwall.net +42299 + Johnson Financial Group + Hank Ollanketo + hollanketo&johnsonbank.com +42300 + Ministère de la santé et des serveurs sociaux + Mathieu Alain + mathieu.alain&msss.gouv.qc.ca +42301 + turleyclan.com + Edward Lawrence Turley + ed-and-ale&sbcglobal.net +42302 + Beijing ForEase Times Technology Co., Ltd. + Jonsen Yang + yangmingmao&forease.net +42303 + TE-SYSTEMS GmbH + Oliver Koerber + info&te-systems.de +42304 + it & synergy GmbH + Martin Koeditz + martin.koeditz&it-syn.de +42305 + Lupus alpha Asset Management + Matthias Biedenkapp + group-adminmanager&lupusalpha.de +42306 + CANARIE Inc. + Chris Phillips + iana&canarie.ca +42307 + Wildcard UK Ltd + Lee Johnston + lee&wildcard.net.uk +42308 + IndexUniverse, LLC + Fernando Rivera + frivera&indexuniverse.com +42309 + Skale, Inc. + Jared Brank + jared.brank&skalenetworks.com +42310 + Matrix Switch Corporation + Element Green + element&matrix-switch.tv +42311 + X-Centric Solutions + Justin Knash + jknash&x-centric.com +42312 + Hangzhou Zailing Electronic Technology Co., LTD + Jing Kuo + jingkuo&zailingtech.com +42313 + Synergy Tecnologia + Mauricio Strasburg + mauricio&synergy.com.br +42314 + D2D Technologies, LLC + Steve Doll + steve.doll&d2dtechnologies.com +42315 + Renew Group Private Limited + David M. Zar + dzar&ultralinq.com +42316 + Pragmatech + Valentine Sinitsyn + v.sinitsyn&pragmatech.ru +42317 + inVentia + Marcin Dutka + marcind&inventia.pl +42318 + Recognition Technologies, Ltd + Alexander Sein + a.sein&recognize.ru +42319 + 777 Technology Solutions Limited + Andrew Heron + Andrew.Heron&777T-S.COM +42320 + Unassigned + Removed 2013-08-27 + ---none--- +42321 + Mother Lode Holding Company + Ryan Murphy + rmurphy&mlhc.com +42322 + TrakSystems + Remy Mudingay + mudingay&trak-systems.com +42323 + Demma + Mark Hodge + markh&demma.co.uk +42324 + Powercode LLC + Simon Westlake + simon&powercode.com +42325 + Dialight, Inc. + Robert Henson + rhenson&dialight.com +42326 + ICON Americas + Sergio Samayoa + sergiosamayoa&icon.com.gt +42327 + IEEE Student Branch Passau + IEEE Student Branch Passau Administrator of the Day + iana-contact&ieee.uni-passau.de +42328 + Nementis + Antonello Giuseppe Bianchi + antonello&nementis.eu +42329 + ipowertec + zhoumou + zhoumou&gmail.com +42330 + Deltapath Commerce And Technology Limited + Yip Yue Hong + hong&deltapath.com +42331 + Polarlink + Chenfeng Liang + chenfeng.liang&polarlink.com.tw +42332 + Vaillant Group + Dr. Thomas Demuth + thomas.demuth&vaillant-group.com +42333 + Aliter Technologies, a.s. + Milan Gottstein + milan.gottstein&aliter.com +42334 + Relmek co., Ltd + plato lin + plato&relmek.com.tw +42335 + iQ Consult Pty Ltd + Benjamin Johns + bjohns&iqconsult.com.au +42336 + ADITO Software GmbH + Tobias Feldmann + administrator&adito.de +42337 + New York State Workers' Compensation Board + Salvatore Galazzo + salvatore.galazzo&its.ny.gov +42338 + Anywhere.24 Technologies GmbH + Marc Aigner + subscriptions&anywhere24.com +42339 + Sabtech + Bryan Ly + lyb&sabtech.com +42340 + Open Standard Digital-IF Interface + Herald Beljour + herald.beljour&us.army.mil +42341 + Tucson Airport Authority + Robert Maudsley + rmaudsley&flytucson.com +42342 + Guangdong Cable Corporation Limited + Wangga + wga&gdcatv.com.cn +42343 + James Hurley + James Hurley + hurleyjames6&gmail.com +42344 + Puli Space Technologies Ltd. + Attila Bárdi + attila.bardi&pulispace.com +42345 + Fluidic, Inc., d/b/a Fluidic Energy + Bruce Weber + snmp-admin&fluidicenergy.com +42346 + E-SIGN S.A. + FLAVIO TAPIA + ftapia&e-sign.cl +42347 + Toyou Feiji Electronics Co., Ltd. + Wang Daobang + wangdb&toyou.com.cn +42348 + RUSNANO + Ivan Kostukov + licensing&rusnano.com +42349 + Noack Group + Christian Dergovics + ianapen&noackgroup.com +42350 + Trebing & Himstedt Prozessautomation GmbH & Co. KG + Dr. Christopher Anhalt + canhalt&t-h.de +42351 + Champs Libres SCRLFS + Julien Fastré + julien&fastre.info +42352 + Hanweck Associates LLC + Gerald Hanweck + jhanweck&hanweckassoc.com +42353 + LottSpot + James Lott + james&lottspot.com +42354 + Trackprotect B.V. + Jair Römer + jair.romer&rhosound.com +42355 + ABCTec + João Souza + joao&abctec.com.br +42356 + Electronic Media Services Ltd + Andrew Lambert + sysadmin&ems-uk.com +42357 + CHILLI MINT LABS LIMITED + BEN DURKIN + support&chillimintlabs.com +42358 + IP2I + HERVE PAPIN + herve.papin&ip2i.fr +42359 + Versa Networks, Inc + Shiva Shenoy + shiva&versa-networks.com +42360 + Skybox Imaging, Inc. + Evan Tidd + evan&skybox.com +42361 + rahont + Ivan Raduntsev + pen-data&rahont.org +42362 + spacefrogg.net + Michael Raitza + s-pen&spacefrogg.net +42363 + Dematis + ludwig schubert + ludwig.schubert&dematis.com +42364 + Optoscape Optical Electronic Tech.Co.,Ltd + maiyuan + yuan.mai&optoscape.com +42365 + H & M Hennes & Mauritz AB + Emil Linder + hm_it_infra&hm.com +42366 + WxBR Wireless Broadband Soluctions + Arnaldo Verdibello + averdibello&wxbr.com.br +42367 + Gedomo GmbH + Hannes Hörting + h.hoe&gedomo.com +42368 + Azuru Networks + Liam McSherry + team&azuru.me +42369 + Softel ltd + Stanislav Georgiev + s.georgiev&softel.bg +42370 + ADRIATIC SLOVENICA d.d. + Mitja Trtnik + mitja.trtnik&adriatic-slovenica.si +42371 + PA CSS (Pennsylvania eHealth Partnership Authority’s Community Shared Services) + Himabindu Bolisetty + bindu&careevolution.com +42372 + Banterra + Michael Goff + mhgoff&banterra.com +42373 + Kliniken des MTK GmbH + Mathias Rockel + mrockel&kliniken-mtk.de +42374 + Inmar Enterprises + Matt Aloi + Matt.aloi&inmar.com +42375 + Unassigned + Removed 2013-09-06 + ---none--- +42376 + Riverbed Technology Labs GmbH (formerly 'Ocedo GmbH') + Ulrich Weber + ulrich.weber&riverbed.com +42377 + MBD Consulting, LLC + Timothy Sawyer + tim.sawyer&mbdsolus.com +42378 + FOSS-Group GmbH + Beat Stebler + beat.stebler&foss-group.de +42379 + Danske Fragtmænd A/S + Lars Pedersen + dflpn&fragt.dk +42380 + IDFocus + Mark I. van Reijn + mvreijn&idfocus.nl +42381 + Human Brain Project + Carlos Aguado + bbp.administrator&epfl.ch +42382 + iSystems e.U. + Jenny Danzmayr + iana&isystems.at +42383 + Horizon Fuel Cell Technologies + Richard Clint Escamilla + richardclint&horizonfuelcell.com +42384 + Tecnologías Inteligentes y Modelación de Sistemas, S.A de C.V. + Roberto Hiribarne Guedes + hiribarne&gmail.com +42385 + AIC Inc. + Jack Hsu + jack.hsu&aicipc.com.tw +42386 + dylanharris.org + Dylan Harris + mail&dylanharris.org +42387 + Da Planet Security + Da Planet Security + support&dapla.net +42388 + TEL-STER sp. z o.o. + Tomasz Lis + Tomasz.Lis&tel-ster.pl +42389 + Ital-Mec s.r.l. + Alessandro Fiani + a.fiani&ital-mec.com +42390 + Oslo KFUMs Sjøkorps + Ragnar Storstrøm + sjefen&sjokorpset.no +42391 + Eastern Informatics, Inc. + Mike Henderson + mike&easterninformatics.com +42392 + FOXTER Cia Imobiliária + Frederico Pandolfo + frederico&foxter.net.br +42393 + Teledata Comunicaciones S.A. + Jorge Barattini + jorgito&teledata.com.uy +42394 + Synapse + Gre7g Luterman + greg.luterman&synapse-wireless.com +42395 + EPM + Travis Meier + TravisM&epm.co.za +42396 + xsinfosol pvt ltd + Amit Pasari + amit&xsinfosol.com +42397 + Grandstream Networks, Inc. + Xiaofeng Xu + xxu&grandstream.com +42398 + KnCMiner AB + Marcus Erlandsson + marcus&kncminer.com +42399 + Garuda Permata Saputra, CV. + Stephanus Bagus Saputra + shimaiqiwang&yahoo.com +42400 + TD Tech Ltd. + Zhang Hehua + tdtechianaoid&gmail.com +42401 + CanTech s.r.o. + Frantisek Hertl + hertl&cantech.cz +42402 + Spec S.A. + Toni Beleta + abeleta&grupospec.com +42403 + Ivane Javakhishvili Tbilisi State University + Mikheil Makhviladze + Mikheil.makhviladze&tsu.ge +42404 + Vadacom Limited + Nic Bellamy + snmp&vadacom.co.nz +42405 + Roambotics, Inc. + Scott Menor + scott&roambotics.com +42406 + SATO Corporation + Konosuke Haraguchi + hara2772&pn.sato.co.jp +42407 + iGrid S.L. + David Bru i Bru + david.bru&igrid-td.com +42408 + HealthSafe24 + Friedhelm Matten + friedhelm.matten&healthsafe24.de +42409 + TopMenu.com + Justin Kaufman + jk&topmenu.com +42410 + Blue Denim Consulting Group Inc. + Shawn Brant + bluedenim&bdenim.ca +42411 + Niklaus Ltd. + Dmitry Grunin + dgrunin&niklaus.ru +42412 + Osorno Enterprises Inc. + Dr. H. Peter Hombach + p.hombach&osorno.ca +42413 + PACSHealth, LLC + Steve Massey + steve.massey&pacshealth.com +42414 + Cloudacc Interactive, Inc. + Cheng Zhang + cheng.zhang&cloudacc-inc.com +42415 + Prince Rupert Grain Ltd. + Howard Hood + hhood&prgrain.bc.ca +42416 + Software Engineering Center (SEC) + Jamie C. Rizzo + Jamie.Rizzo&us.army.mil +42417 + A2Zlogix + Sami Fattah + sfattah&a2zlogix.com +42418 + GovSource Pty Ltd + Steve Capell + admin&govsource.com.au +42419 + NICEUC Communication Tech CO., LTD + young peng + supports&niceuc.com +42420 + Hebei SICON-EMI Power System Co., Ltd. + LiuYong + xltyt&qq.com +42421 + OpenVox Ltd. + Miao Lin + lin.miao&openvox.cn +42422 + ALLWIN Network Corp. + Michael Kuo + michael&allwin.com.tw +42423 + Republic of Azerbaijan Ministry of Foreign Affairs + Mr. Talib Talibli + t_talibli&mfa.gov.az +42424 + International Card Services BV (ICS) + Erik Uyttenbroek + euyttenbroek&icscards.nl +42425 + Thiesen Hardware- & Software-Design GmbH + Erik Habicht + e.habicht&thiesen.com +42426 + Institut Paul Bocuse + David Dessertine + david.dessertine&institutpaulbocuse.com +42427 + New York Community Bancorp + Nathan Arlington + nathan.arlington&mynycb.com +42428 + Bharti Airtel Limited + Ravinder Bhardwaj + ravinder.bhardwaj&airtel.com +42429 + Hidden Authentication and Trust service + Filigranowy Dragon + filigranowy.dragon&riseup.net +42430 + University of Massachusetts + James Barbieri + jbarbieri&umassp.edu +42431 + Chelyabinsk palace of pioneers and pupils named after Krupskaya N.K. + Dmitry Merzlov + admin&chel-dpsh.ru +42432 + lepassepresent.com + Nathalie Bezamat + nb&lepassepresent.com +42433 + Parliamentary Commissioner for Administrative Investigations + Devin Simpson + devin.simpson&ombudsman.wa.gov.au +42434 + BOMATEC AG + Markos Pliakas + markos.pliakas&bomatec.ch +42435 + LEONHARD KURZ Stiftung & Co. KG + Markus Bergner + postmaster&kurz.de +42436 + Ojsc NIAEP + Roman Kluev + sam&niaep.ru +42437 + Deltek, Inc. + Rich Ronston + security&deltek.com +42438 + Saratoga Speed Inc. + Julian Ratcliffe + julian.ratcliffe&saratoga-speed.com +42439 + Stephan Seitz + Stephan Seitz + s.seitz&secretresearchfacility.com +42440 + IQ Devices + Steve Rogers + steverogers&iqdevices.com +42441 + Uniscon universal identity control GmbH + Arnold Monitzer + contact&uniscon.de +42442 + Hochschule der Medien Stuttgart + Engster, Florian + hostmaster&hdm-stuttgart.de +42443 + Genesis Integration + Manuel Deschambault + themactech&me.com +42444 + Oriental Cambridge Education Group + Wang Bin + wangbin&dfjq.com.cn +42445 + InterASIA Solutions Inc. + Dharmen Panchal + dharmen_panchal&int-asia.com.tw +42446 + Beijing Hesun Technologies Co.Ltd. + liming zhang + zhangliming&hesuns.com +42447 + FreeSoft Nyrt. + Frigyes Bato + fbato&freesoft.hu +42448 + Taisis Integration And Consulting Services sl + Rafael Fernández + rafael.fernandez&taisis.com +42449 + InvestLab Technology, LLC + Robert Philipp + rphilipp&investlab.com +42450 + Mutual Aid Labs, LLC + Ryan Wessels + ryan.wessels&mutualaidlabs.com +42451 + Rizotec + Liran Kessel + liran&rizotec.com +42452 + IRIS-RFID + JEREMY LE GALL + admin-reseau&iris-rfid.com +42453 + Drawersteak Research + Andrew Zonenberg + azonenberg&drawersteak.com +42454 + Funkring.net + Martin Reisenhofer + martin.reisenhofer&funkring.net +42455 + FOBAS Consulting, Inc. + Satoru Matsushita + smatsush&fobas.jp +42456 + VM Farms + Justin Good + justin&vmfarms.com +42457 + IDIEIKON + Alejandro Echevarria + aecheverria&idieikon.com +42458 + TigerLead Solutions + Andrew Kerr + ops&tigerlead.com +42459 + Esdenera Networks GmbH + Reyk Floeter + reyk.floeter&esdenera.com +42460 + SYS'TRONICS Co., Ltd. + kwang young lee + kylee&systronics.kr +42461 + Innosonix GmbH + Markus Baetz + markus&innosonix.de +42462 + David Ouagne + David Ouagne + david.ouagne&gmail.com +42463 + TLS Corp. + Ioan Rus + ioan.rus&telosalliance.com +42464 + binarycube + Chris Thornhill + chris&binarycube.com +42465 + Xerx Software Ltd + William Wirth-Torres + williamwirth-torres&hotmail.com +42466 + Kratos Defense and Security Solutions, Inc. + Gary Barrett + Gary.Barrett&kratosdefense.com +42467 + Lucipher + Pieter van Beek + pieter&lucipher.net +42468 + Jorge Cavallin + Jorge Cavallin + jecavallin&hotmail.com +42469 + Infobahn SA + Bertrand Dufresne + info&infobahn.ch +42470 + Voippartners s.r.l. + Simone Freddio + simone.freddio&voippartners.it +42471 + HOTCITY S.A. + Guillaume-Jean Herbiet + gherbiet&hotcity.lu +42472 + SC-IT GmbH + Tobias Peter + hostmaster&sc-it.de +42473 + Action Technology Ltd. + Xie Zhenye + xiezhenye&actionsky.com +42474 + Toro Development Ltd. + Wilson Liu + wilson.liu&toro-asia.com +42475 + Dewar Electronics Pty Ltd + Delia Jones + delia&dewar.com.au +42476 + Border 6 + Mateusz Viste + mateusz.viste&border6.com +42477 + OneNeck IT Services Corporation + Paul Zalewski + Paul.Zalewski&OneNeck.com +42478 + Proteus Sensor + Avarachan Cherian + contact&proteussensor.com +42479 + Metropolitan Wireless International Pte Ltd + Sun Yong + sunyong&mwi.com.sg +42480 + Grenland Data as + Stian Viddal + stian.viddal&gda.no +42481 + Teletronix + Samuel Dionísio de Oliveira + samuel&teletronix.com.br +42482 + DocuSign, Inc. + Erwann Abalea + dsfcompliance-risk-safety&docusign.com +42483 + Pioneer Bank + David Paul + dpaul&bankwithpioneer.com +42484 + Ultrix Sistemas de Informação + Henry Fernandez + henry&ultrix.srv.br +42485 + Christian Scholz + Christian Scholz + scholz.christian&berlin.de +42486 + Nuolezio + Bastien Le Querrec + bastien&nuolezio.org +42487 + Shenzhen Philorise co.ltd. + Ke huimin + kehuimin&philorise.cn +42488 + Element d.o.o + Ivo Elezovic + ivo&element.hr +42489 + Preferred Credit, Inc. + Steve Halvorson + comp&preferredcredit.com +42490 + Intesi Group SpA + Emanuele Cisbani + ecisbani&intesigroup.com +42491 + Racktop Systems + Eric Bednash + netops&racktopsystems.com +42492 + Far Bank Enterprises + Christopher Kent + it&farbank.com +42493 + City of Waukesha + Greg Vanness + gvanness&ci.waukesha.wi.us +42494 + Beckwith Electric Co. Inc + Thaichong Li + tli&beckwithelectric.com +42495 + O2 Systems Limited + Pranilesh Chand + support&o2systems.co.nz +42496 + GlobalNaz IT + Brad Firestone + iana&globalnaz.org +42497 + Brookhaven Retreat, LLC + Ken Barnt + bhadmin&brookhavenretreat.com +42498 + Guenter Zimmermann + Guenter Zimmermann + guenterzim&gmail.com +42499 + Orlov-Miller + Alexander Orlov + 673564&gmail.com +42500 + Deoca Ltd. + Joe Sloan + joe.sloan&deoca.com +42501 + Oto IT Limited + Lee Marsden + lee&otoit.co.uk +42502 + CellOS Software Limited + David Lynes + dlynes&cellossoftware.com +42503 + XJ Group Corporation + Wu ShuangHui + wushuanghui&xjgc.com +42504 + SOKRAT Ltd. + Eugene Burlakov + jean&sokrat.ru +42505 + Denkovi Assembly Electronics LTD + Borislav Denkov + support&denkovi.com +42506 + Gostcomp Oliwer Godlewski + Kamil Bugaj + bugaj.kamil&gostcomp.pl +42507 + HeuleSoft + Daniel Heule + daniel.heule&heulesoft.ch +42508 + Shepherd Group Built Environment Information Systems (SGBE IS) + Steve Burton + sburton&shepherdbe.com +42509 + net4home GmbH + Oliver Koerber + info&net4home.de +42510 + Cape Fear Valley Health System + Sam Atchley + satchley&capefearvalley.com +42511 + maxrosin.com + Max Rosin + iana-pen&hackrid.de +42512 + Suchocki IT + Marcin Suchocki + marcin&suchocki.it +42513 + Ymax Communications + Gregory Lynn Wood + greg.wood&ymaxcorp.com +42514 + SHENZHEN HXWK TECHNOLOGIES,CO LTD + Huayong Ding + hding&hxtcpip.com +42515 + Liberty Global Services B.V. + Silvia Baumann / James Kennedy + ripe&libertyglobal.com +42516 + Orderman GmbH + Alexander Uzsoki + Alexander.Uzsoki&orderman.com +42517 + Fundación Centro de Estudios de Física del Cosmos de Aragón + Luis Guillén Civera + lguillen&cefca.es +42518 + ZPE Systems, Inc + Livio Ceci + livio.ceci&zpesystems.com +42519 + BrainLogical Software Development + Andreas Röne + roene&brainlogical.de +42520 + LifeSafety Power, Inc. + Guang Liu + gliu&lifesafetypower.com +42521 + Assystem France + François LEGAL + franlegal&assystem.com +42522 + Velin V. Pavlov + Velin V. Pavlov + velin.v.pavlov&gmail.com +42523 + Leibniz-Institut fuer Analytische Wissenschaften -ISAS- e.V. + Peter Lampen + it&isas.de +42524 + Townet srl + Franco Marchetti + franco&townet.it +42525 + Werkbank Multimedia GmbH + Sascha Gresk + gresk&werkbank.com +42526 + elgris UG + Elwin Veenendaal + info&elgrispower.com +42527 + WTG Technologies Ltd + Raymond Edah + iana-en&wtg.co.uk +42528 + Slovak Hydrometeorological Institute (SHMU) + Roman Zehnal + Roman.Zehnal&shmu.sk +42529 + New Jersey Office of Information Technology + Timothy Gansert + timothy.gansert&oit.state.nj.us +42530 + SSQ Financial Group + Remy Jobin + remy.jobin&ssq.ca +42531 + Security Industry Association + Edison Shen + eshen&securityindustry.org +42532 + Holysee Vatican Library + Mr.Gabriele Giulimondi + gabriele&giulimondi.it +42533 + BIOVELA, UAB + Sergej Pokusajev + Sergej&biovela.lt +42534 + CAFAT + GUYAUX Pierre + securiteinfo&cafat.nc +42535 + ictjob + Frederic Lepere + info&ictjob.be +42536 + Hanover Displays Ltd. + David Glendining + dglendining&hanoverdisplays.com +42537 + SnmpStack + Jason Patterson + jasonp&snmpstack.com +42538 + artnet world wide + Muhammad Elsaiedy + melsaiedy&artnet.com +42539 + Trumpet, Inc + Richard Mixon + RNMixon&CustCo.biz +42540 + Zimpel Events And Communications PTY Ltd. + Maik Zimpel + maik&zimpel.co.za +42541 + My Telecom Holdings Pty Ltd + Timothy Sheahan + sheahant&mytelecom.com.au +42542 + Voillo Solutions Ltd. + Taneem Ahmed + tahmed&voillo.com +42543 + Simpson University + Curt Dodds + cdodds&simpsonu.edu +42544 + ALLENGERS GLOBAL HEALTHCARE PRIVATE LIMITED + MADHUR GOYAL + madhur.goyal&allengers.net +42545 + CSC Telecom + Viktor Svecov + noc&csc.lt +42546 + tyntec GmbH + Thorsten Trapp + it&tyntec.com +42547 + OrecX LLC + Ralph Atallah + ratallah&orecx.com +42548 + gloops, Inc. + Kenichi Matsudo + system&gloops.com +42549 + F.S.P. Filofarm + Grzegorz Rybicki + grybicki&filofarm.pl +42550 + SFS services AG + Marco Grünenfelder + grue&sfsservices.biz +42551 + CRFS Limited + Steve Williamson + swilliamson&crfs.com +42552 + Hrvatski zavod za telemedicinu + Zoran Zorica + zoran.zorica&ztm.hr +42553 + TeknoUnit + Alexander Fedorov + service&teknounit.com +42554 + Dynamic Quest, INC + Jen Wong + jwong&dynamicquest.com +42555 + Faculty of Information Technology and Applied Mathematics, BelSU + Mikhalev Oleg + systemarchitect.sentinel&gmail.com +42556 + Visao Tecnologia em Transmissao de Dados Ltda + Arthur Girardi + noc&visao.psi.br +42557 + Unico Data AG + Stefan Beckmann + oid&unico.ch +42558 + Canis Lupus LLC + Benjamin Nelms + alpha&canislupusllc.com +42559 + AppEnsure + Sri Chaganty + sri&appensure.com +42560 + Kirovohrad Volodymyr Vynnychenko State Pedagogical University + Maksym Nazarenko + sysadmin&kspu.kr.ua +42561 + ZAO "MD Project 2000" + Alexander Bikov + it&mospmc.ru +42562 + Universidad Pública de Navarra (UPNA) + Javier Echeverría Martorell + secretaria.general&unavarra.es +42563 + Kaleidescape, Inc. + John Sykes + internetregistrar&kaleidescape.com +42564 + KO4BB Electronics + Didier Juges + didier&ko4bb.com +42565 + Weirton Medical Center + Mike Contumelio + michael.contumelio&weirtonmedical.com +42566 + ClearBearing, Inc. + Zack Colgan + tech&clearbearing.com +42567 + David Spinella + David Spinella + spinella.david&verizon.net +42568 + Andreas Vavra + Andreas Vavra + andi&vaviland.at +42569 + Miesepies + Michel Vissers + iana&miesepies.nl +42570 + Independent Bank Corporation + Chris Michaels + cmichaels&ibcp.com +42571 + Cybera, Inc + Scott Wolfe + scott.wolfe&cybera.net +42572 + SAFELYLOCKED, LLC + Tuomas Siren + iana-pen-contact&safelylocked.com +42573 + Sun Nuclear Corporation + Mark Paradis + mparadis&sunnuclear.com +42574 + Sydestep Ltd + Dr. Richard Petheram + it-admin&sydestep.com +42575 + e-business-systems SARL + Boris Wiethoff + boris.wiethoff&ebs.co.mz +42576 + Linpow SRL + Jorge Sbaco + sbaco.jorge&linpow.com.ar +42577 + Crosspoint SRL Romania + Viorel Dehelean + viorel&cpt.ro +42578 + Xcel Energy + Jeremy Rider + jeremy.m.rider&xcelenergy.com +42579 + sysmocom - systems for mobile communications GmbH + Harald Welte + hwelte&sysmocom.de +42580 + Statseeker + James Wells + james&statseeker.com +42581 + Regional Digital Telecommunication Company + Andrey Lygin + sysand&rdtc.ru +42582 + Interlink Ltd. + John Pate + jp&interlink.com.ua +42583 + DPII TELECOM & SERVICES + DELANCRAY Claire Isabelle + cidelancray&dpii-telecom.com +42584 + ROWA Group Holding GmbH + Thomas Hooge + hooge&rowa-group.com +42585 + TRIKOM Ltd. + Kropotov Alexander + alk&trikom.ru +42586 + DAG System + Roux Jean-Luc + jl.roux&dag-system.com +42587 + Investigaciones Medicas S.A. + Exequiel Arona + earona&im.com.ar +42588 + AGST Controles e Automação Ltda + Tiago dAvila + engenharia&agst.com.br +42589 + Swifttrip LLC + Alan Bunch + oids&swifttrip.com +42590 + AVB GmbH Wind Engineering + Boy Reese + b.reese&avb-germany.com +42591 + Forlani Impianti s.r.l. + Alessandro Forlani + snmp&forlani.it +42592 + TV 2/Danmark A/S + Mik Kay + administrator&tv2.dk +42593 + Gas Natural Informática, S.A. + Marc Xicoy Cirici + mxicoy&gasnatural.com +42594 + Fox Technologies + Peter Tornberg + peter.tornberg&foxt.com +42595 + Right Systems Inc + William Gunn + wg&rightsys.com +42596 + ATNet Services + Alexey Asemov + alex&alex-at.ru +42597 + Norberto Bensa - ENEABE + Norberto Bensa + nbensa&gmail.com +42598 + Tibero + Taikyoung Kim + taikyoung_kim&tibero.com +42599 + NIPPON TELEGRAPH AND TELEPHONE WEST CORPORATION + Masaki Minami + pen-contact&rdc.west.ntt.co.jp +42600 + Rejlers Oy + Mikko Sirén + mikko.siren&rejlers.fi +42601 + Sycada + George Tourkas + tourkas&sycada.com +42602 + Performr B.V. + Michel Stol + support&performr.com +42603 + Vorston + Michael Fyles + registration&vorston.net +42604 + Springfield Technical Community College + Gill Lapointe + helpdesk&stcc.edu +42605 + PLC Power LLC + Carlos A Huerta + carlos.huerta2010&gmail.com +42606 + Urban-Software.de + Thomas Urban + ThomasUrban&urban-software.de +42607 + Brekford International + Michael Kniffen + mkniffen&brekford.com +42608 + Alan Seedhouse + Alan Seedhouse + alan.seedhouse&hotmail.co.uk +42609 + Boundary, Inc. + Cody Crawford + cody&boundary.com +42610 + Superior Power solutions (HK) Co.,Ltd + salvador trullols rico + sales&superiorpowersolution.com +42611 + Valdrea, LLC + Theodore Reed + teddy&valdrea.com +42612 + Koninklijke BAM Groep nv + Chris van Zwolle + informationsecurity&bamgroep.nl +42613 + POLYONE CORPORATION + Fatou Tounkara + fatou.tounkara&polyone.com +42614 + Tribal Chicken Australia + Thomas White + thomas&tribalchicken.com.au +42615 + eTRUST co., ltd. + Hideyuki Usui + h-usui&etrust.ne.jp +42616 + Pannonia Technologies + Matthias Endler + support&pantech.at +42617 + Alphion India Private Limited + K. Anjaneya Sharma + asharma&alphion.in +42618 + Kaaos Unlimited Oy + Jaakko Vallo + jaakko.vallo&kaaosunlimited.fi +42619 + PRYSM SARL + JEAN-MICHEL BELIN + support&prysm-software.com +42620 + Katalix Systems Ltd + James Chapman + jchapman&katalix.com +42621 + Elecdan + Moctar DIAKHITE + mdiakhite&elecdan.com +42622 + Hernic Ferro Chrome Pty Ltd + Francois Kaljee + francois.kaljee&hernic.co.za +42623 + Open Compute Project + Bijan Nowroozi + admin&opencompute.org +42624 + Whatever s.a. + Sylvain Munaut + netadm&whatever-company.com +42625 + Technology Toolbox LLC + Jeremy Jameson + jjameson&technologytoolbox.com +42626 + Blue Fire Capital + Carl Strickler + systems&bluefirecap.com +42627 + Vermont Department of Labor + Aaron Lewis + Aaron.Lewis&state.vt.us +42628 + Shenzhen HD Digital Tech Co., Ltd. + Damon LIU + rd&digihdtech.com +42629 + WaveNT Co.,Ltd. + Sung-Ho Yoon + yoonsh&wavenettech.com +42630 + Dona Ana County, New Mexico + Louis VanLandingham + louv&donaanacounty.org +42631 + Xellia Pharmaceuticals ApS + Niels Sillemann + Niels.sillemann&Xellia.com +42632 + Kompanija Dunav Osiguranje a.d.o. Beograd + Slobodan Babic + slobodan.babic&dunav.com +42633 + VOXXL + Mathieu Malaterre + mathieu.malaterre&voxxl.com +42634 + PSW GROUP GmbH & Co. KG + Christian Heutger + info&psw.net +42635 + RudiNet Ltd. + Rudi Ingebrethsen + frudi999&hotmail.com +42636 + SecuriPax + Nick Higham + nmh&securipax.dk +42637 + Cloudhouse Technologies + Mat Clothier + mat.clothier&cloudhouse.com +42638 + ARD-Sternpunkt + Dominique Mähler + dominique.maehler&ard-stern.de +42639 + Cellcrypt Ltd + Jim Buller + support&cellcrypt.com +42640 + Derek Lambert (CrEOF) + Derek Lambert + dlambert&dereklambert.com +42641 + Tri-State Generation and Transmission + Steve Wesling + swesling&tristategt.org +42642 + Stage Entertainment + Kyung Eigenraam + ict&stage-entertainment.com +42643 + NetSavia SA + Alejandro Ackermann + alejandro.ackermann&netsavia.com +42644 + New Leaf Publishing Group, LLC + Christopher Hallski + web&nlpg.com +42645 + Ros Vicente (ITComp) + Ros Vicente + rosvicente&hotmail.com +42646 + Etherlive Ltd + John Storey + john.storeyðerlive.co.uk +42647 + Alliance Spacesystems, LLC + Eric Kuecherer + ekuecherer&alliancespacesystems.com +42648 + Blue Global Media + Keith Linneman + keith&blueglobalmedia.com +42649 + The LaSalle Technology Group + Matthew Augustine + mmaugust&lasalletech.com +42650 + Parque Tecnológico Itaipu - Paraguay + Luis Bení­tez + oidmaster&pti.org.py +42651 + willisss.com + Richard Willis + richard.willis&gmail.com +42652 + Sichuan Bihong Broadcast&Television New Technologies Co.,Ltd + Mo Zhenyang + mzy&scbhcatv.cn +42653 + Netcloud AG + Roberto Giana + no-mail-please-20131021&netcloud.ch +42654 + RCS & RDS SA + Iulian Liviu Ionescu + liviu.ionescu&rcs-rds.ro +42655 + On the Cusp Ltd. + Trevor Drawbridge + trevor&onthecusp.co.uk +42656 + Clear-Com + Erik M. Devane + Erik.Devane&ClearCom.com +42657 + Mixpo + Edward Charles + echarles&mixpo.com +42658 + Lakra Sintez Ltd. + Roman Leonovich + iana&lakra-sintez.ru +42659 + Robert Maxwell Professional Consulting + Robert "Bobby" Maxwell + bobby&rm-pc.com +42660 + Wildman & Morris + Robert Miller + bmiller&wildman-morris.com +42661 + Asian Pacific Telecommunications + David Oramas + david.oramas&aptel.com.au +42662 + Wuerzburger Versorgungs- und Verkehrs-GmbH + Andreas Reumann + andreas.reumann&wvv.de +42663 + Zhengwei-tech Co.,Ltd. + Wan Feng + wanf256&qq.com +42664 + Iromedica AG + Ines Salzer + administrator&iromedica.ch +42665 + WIRD AG + Alexandre Molari + info&wirdgroup.com +42666 + Affiliated Managers Group, Inc. + Matt Miarratani + matt.giarratani&amg.com +42667 + Northwest Multiple Listing Service + Michael Kimbley II + mkimbley&nwmls.com +42668 + ZIMT University of Siegen + Matthias Trute + trute&zimt.uni-siegen.de +42669 + HangZhou Chenxiao Technologies Co.,ltd + wangjianping + wangjianping&chenxiaotech.com +42670 + SICE Tecnología y Sistemas, S.A. (formerly 'Sociedad Ibérica de Construcciones Eléctricas, S.A.') + David Marín + dmarin&sice.com +42671 + Pinetron Co.,Ltd + Jeong-Ah Kim + foxrain&pinetron.co.kr +42672 + G. X. Clarke & Co. + Vladimir Yakovlev + yakovlev&gxclarke.com +42673 + SAIC, Inc. + Network Information Center c/o Barbara Shurtleff + nic&net.saic.com +42674 + TVH Group NV + Jan Keirse + jan.keirse&tvh.com +42675 + Comité Permanent des Médecins Européens + Constance Colin + constance.colin&cpme.eu +42676 + Servergy, Inc + Jack Smith + jack&servergy.com +42677 + Altertech s.r.o. + Sergiy Symonenko + div&altertech.com +42678 + Lakra Sibir Ltd. + Sergey Rusak + iana&lakra-sibir.ru +42679 + cne.at + Heinz Berger + hbb&cne.at +42680 + Randstad España, S.L. + Diego Miranda García + diego.miranda&randstad.es +42681 + Freeport of Riga Authority + Daniels Atkacuns + reg&rop.lv +42682 + Cyviz AS + Eirik Simonsen + simonsen&cyviz.com +42683 + Telematix AG (formerly 'Trans Data Management (TDM)') + Reto Jaeger + reto.jaeger&telematix.ch +42684 + Bump Networks, Inc. + Oleksii Kukhar + alex&bumpnetworks.com +42685 + Benjamin Huepeden IT + Benjamin Huepeden + info&huepeden.net +42686 + Siempelkamp Maschinen- und Anlagenbau GmbH & Co. KG + Tim Heim + tim.heim&siempelkamp.com +42687 + Meridian Health System + Bob Radvanski + BRadvanski&meridianhealth.com +42688 + STABILO International GmbH + Sebastian Meyer + sebastian.meyer&schwan-stabilo.com +42689 + Southern Methodist University + Joe Gargiulo, CIO + gargiulo&smu.edu +42690 + nGeniux LLC + Jason Denning + iana&ngeniux.com +42691 + Australia and New Zealand Banking Group Limited + Svyatoslav Pidgorny + sp&anz.com +42692 + Philotech + Hartwig Dirscherl + iana&philotech.net +42693 + AcroRed Technologies, Inc. + Raymond Lin + sales&acrored.com +42694 + LoopPay, Inc. + Susan Doeit + susan.doeit&looppay.com +42695 + Beijing QTS Networks Technologies Co., Ltd + TerryChen + chengang&qtsnetworks.com +42696 + Indian Hotels Company Limited + Nivrutti Sutar/Vijay Mishra + datacentre&tajhotels.com +42697 + Activsolar + Vladimir Sadkov + sadkov&gmail.com +42698 + Qualitis Kft. + Nagy Attila + attila.nagy&qualitis.hu +42699 + Dorwin Enterprises + John Dorwin + jdorwin&gmail.com +42700 + ESP Credit Management + Aaron Caine + aaron&espcreditmanagement.com +42701 + thepickle.com, Inc. + Dan Rothstein + dan&thepickle.com +42702 + Altairis, s. r. o. + Michal A. Valasek + michal.valasek&altairis.cz +42703 + Locatrix Communications + Andrew Eross + eross&locatrix.com +42704 + Marco Displays + Cody Null + itdept&marcocompany.com +42705 + HVR Software + Steve Sheil + steve.sheil&hvr-software.com +42706 + Ministry of Internal Affairs - Kosova + Arben Osmani + arben.f.osmani&rks-gov.net +42707 + Corvinus University of Budapest + Csaba Toth + csaba.toth&uni-corvinus.hu +42708 + Teachers Assurance + Shane Lovett + slovett&teachersassurance.co.uk +42709 + CarWoo! Inc. + Tim Chen + tim&carwoo.com +42710 + Designs for Health, Inc. + Jonathan Walker + jwalker&ashley-martin.com +42711 + MEIRYO DENSHI CORPORATION + Kumiko Agata + Agata.Kumiko&meiryo.co.jp +42712 + Hanvit SI, Inc. + Jessie Kim + jessie&hanvitsi.com +42713 + HPS ICT&Mobile Solutions + Jan Baggen + ict&hps-solutions.nl +42714 + Beijing Joy and Success Technology Co., Ltd. + Ma hui + goldmh&139.com +42715 + tanum consult GmbH + Michael Schrader-Bölsche + noc&tanum-consult.de +42716 + LzLabs GmbH + Bryan Young + sysadmin&lzlabs.com +42717 + Prefeitura Municipal de Cachoeirinha - RS - Brasil + Ederson Souza + edersoncsouza&gmail.com +42718 + Vema, a.s. + Martin Ivanis + itadmins&vema.cz +42719 + CyberFX, Inc. + Adam Werner + adam.werner&cyberfxcorp.com +42720 + Bilgibim Bilgisayar Teknolojileri + Ahmet Oztuna + ahmet.oztuna&bilgibim.com.tr +42721 + Teck Resources Limited + Rob Labbe + rob.labbe&teck.com +42722 + Miraj Technology and Consulting Corporation + Shahn Karim + contactsk&mirajtc.com +42723 + Shouting Ground Technologies, Inc. + Bryan Holloway + admin&shout.net +42724 + Kernel Electronics Co Ltd + LI Man Lung + lml&kernelhk.com +42725 + Institut Teknologi Sepuluh Nopember + Kamas Muhammad + kamas&its.ac.id +42726 + ArcSoft Hangzhou CO.,LTD. + Liu Juan + lj1769&arcsoft.com +42727 + X-Company Pty Ltd + Stephen Reay + stephen&x-team.com +42728 + Tomaxx GmbH + Roger Wenham + roger.wenham&tomaxx.com +42729 + Private Iron and Steel Joint Stock Company "Donetsksteel" + Yehorchenkov Nikolay + postmaster&donetsksteel.com +42730 + Croda International Plc + A.W.J. van Strien + alfred.van.strien&croda.com +42731 + Statkraft SF + Harald Haugan + domains&statkraft.com +42732 + Southern Power Systems, Inc. + Matthew S. Fitzgerald + mfitzgerald&southernpwr.com +42733 + FSMLabs + Cort Dougan + cort&fsmlabs.com +42734 + Nextmove Technologies + Daniel Blackmer + daniel.blackmer&nextmovetech.com +42735 + dParadig + Daniel Alejandro Zuleta Zahr + dzuleta&dparadig.com +42736 + Haus der Barmherzigkeit + Thomas Schneider + thomas.schneider&hausderbarmherzigkeit.at +42737 + Zends + Alvaro Jose de Oliveira Junior + alvaro&zends.com.br +42738 + Vivint Wireless + Adam Augustine + b2b-iana-2013&vivw.net +42739 + Veolia Water Solutions & Technologies + Olivier PILLON + olivier.pillon&veoliawater.com +42740 + MaerdnGaming GbR + Dennis Katz + dennis.katz&maerdngaming.com +42741 + Nanjing Dong Xun Information Technology Co., Ltd. + Steven Mao + maofeng&dongxuntech.com +42742 + Winix Solutions LLC + Timothy Poschel + ianaspam&winix.com +42743 + Niederösterreichische Gebietskrankenkasse + Manfred Gram + Manfred.Gram&noegkk.at +42744 + Falabella + Nelson Eduardo Prado Villalobos + ext_neprado&falabella.cl +42745 + Jamdeo + John Tucker + jt&jamdeo.com +42746 + InterNetX GmbH + Carsten Schoene + carsten.schoene&internetx.com +42747 + Kik Interactive + Dan Hendry + dan.hendry&kik.com +42748 + MyOrg.at (Johannes Hackl) + Johannes Hackl + hannes&hackl-net.at +42749 + Utilidata, Inc + Sridhar Ramachandran + sri&utilidata.com +42750 + Umbrella ID + Björn Abt + bjoern.abt&psi.ch +42751 + Neowatt Energy Solutions Co. Pvt. Ltd. + Tausif Kazi + tausif.kazi&neowatt.co.in +42752 + AMEDTEC Medizintechnik Aue GmbH + Steffen Schmidt + dicom&amedtec.de +42753 + University of The Philippines Los Baños + Ludwig B. Tirazona + ljbtirazona&uplb.edu.ph +42754 + Enel Energy Electronic + Mehmet İşlek + mislek&enel.com.tr +42755 + ATON GmbH + Marvin Schultz + marvin.schultz&atongmbh.de +42756 + Purdue Research Foundation + Kenneth L. Williams + klw&prf.org +42757 + Encana Corporation + Chris Wallace + chris.wallace&encana.com +42758 + C4B Com For Business AG + Bernhard Woköck + admin&c4b.de +42759 + Practicom B.V. + Dhr. Martin Spaans + registratie&practicom.net +42760 + PHOENIX PHARMA + Zoran Vojnovic + webmaster&phoenix.ba +42761 + Imavis srl + Aldo Brucale + aldo.brucale&imavis.com +42762 + ipex telecom LLC + Abdul Hakeem + alhakeem&ipextelecom.net +42763 + ShangHai Jeelan Information Technology Inc. + china wu + chinawu&jeelan.com +42764 + Smith College + Karla Borecky + kborecky&smith.edu +42765 + Muzeum Historii Zydow Polskich Polin + Szymon Kowalczyk + pkit&mhzp.pl +42766 + Typomedia Foundation + Philipp Speck + info&typomedia.org +42767 + Queensland Motorways Limited + Nathan Frankish + iana-pen&qldmotorways.com.au +42768 + Marcelo Leal + Marcelo Leal + msl&eall.com.br +42769 + Supermathie Networks + Michael Brown + michael&supermathie.net +42770 + Totalpost Services PLC + William Wright + hostmaster&totalpost.com +42771 + DGQoS + Diego Maioli + d.maioli&antgroup.it +42772 + Galkam PTY LTD + Glen L Kleidon + glenk&galkam.com.au +42773 + Transport for London + Gurmeet Sahotay + gurmeet.sahotay&tfl.gov.uk +42774 + Semtech Corporation + Ken Porter + kporter&semtech.com +42775 + Linx ICT Solutions + Muhammad Zeeshan Munir + zeeshan&linxsol.com +42776 + one2many BV + Peter ter Beek + peter.ter.beek&one2many.eu +42777 + Gosuncn Technology Group Co., Ltd + Yong Li + LiYong&gosuncn.com +42778 + Michael Offel (mflour.com) + Michael Offel + michael.offel&web.de +42779 + CJSC "NORSI-TRANS" + Maksim Zrazhevskii + zma&norsi-trans.ru +42780 + SEB DEVELOPPEMENT SAS + Olivier RADIX + iana&groupeseb.com +42781 + Bragg Communications Inc. + Curtis Wambolt + curtis.wambolt&corp.eastlink.ca +42782 + Ekrompt, JSC + Maxim Y. Evdolyuk + itsupport&ekromt.ru +42783 + workvslife.com + Brian Friday + ianaoid&workvslife.com +42784 + Pinnacle Foods Group, LLC + Neil Minichino + neil.minichino&pinnaclefoods.com +42785 + Syntaxjockey + Michael Frank + iana-pen&syntaxjockey.com +42786 + CNS Engineering + Bruno Couillard + bruno&bc5tech.com +42787 + Concentric Sky + Francisco Gray + it&concentricsky.com +42788 + artec technologies AG + Uwe Martens + uwe.martens&artec.de +42789 + SynQor, Inc. + Abram Dancy + dancy&synqor.com +42790 + Rebasoft + Kevin Wilkie + kevin.wilkie&rebasoft.net +42791 + Geo-Comm, Inc. + Neil Erickson + nerickson&geo-comm.com +42792 + Near East University + Senol Korkmaz + senol.korkmaz&neu.edu.tr +42793 + IntCache Technology Co.,Ltd + Jeffrey Hu + jeffrey.hu&intcache.com +42794 + LEX Media Concepts SRL + Daniel Neculai + daniel.neculai&lexmedia.ro +42795 + mrByte Tech Solutions + Lino Prahov + desarrollo&mrhouston.net +42796 + IQ Solutions S.A. + George Varakis + g.varakis&iqsolutions.gr +42797 + Freie ArbeiterInnen Union + Rudolf Rocker + rudolf.rocker&fau.org +42798 + HUENGSBERG AG + Daniel Kopp + dak&huengsberg.com +42799 + Alfstore + Rahmi Turhan + r.turhan&alfstore.com +42800 + Co-Nexus Communications Systems, Inc. + Mark Mullings + mark.mullings&co-nexus.com +42801 + Essai Inc + Richard Rebol + TechCon&essai.com +42802 + Rigel Engineering + Craig DeHaan + craig&rigel-eng.com +42803 + Valialsoft LLC + Svetoslav Petrov + iana&valialsoft.com +42804 + New Singularity International Technical Development Co.,Ltd. + Shiyong Teng + tengshiyong&ultrapower.com.cn +42805 + Cloudy IT + Jonathan Cyr + jon&cloudyit.com +42806 + WINFO Corp. + GuoWei + guowei&winfo.cc +42807 + City West Country Ltd + William Ross + william.ross&mercedes-benzsouthwest.co.uk +42808 + Arachnid Labs Ltd + Nick Johnson + nick&arachnidlabs.com +42809 + netbreaker IT-Service + Rico Koerner + it-service&netbreaker.de +42810 + Telesis, S.A. de C.V. + Roberto Currlin + ingenieria&telesis.com.sv +42811 + Enyx + Laurent de Barry + laurent.debarry&enyx.fr +42812 + ISD Dunaferr Co. Ltd. + Zoltan Gyimesi + gyimesi.zoltan&isd-dunaferr.hu +42813 + Polskie Sieci Elektroenergetyczne S.A. + Sebastian Klimczak + sebastian.klimczak&pse.pl +42814 + Inveo + Slawomir Darmofal + sdarmofal&inveo.com.pl +42815 + Tussa IKT AS + Stein-Are Smoge + stein-are.smoge&tussa.no +42816 + HACENTER + Alexander Demertash + alexander&hacenter.com.ua +42817 + IBM Platform Firmware Division + Jayashankar Padath + jayashankar.padath&in.ibm.com +42818 + Relative Variable Software + Eugene Bolshakov + pub&relvarsoft.com +42819 + Carante Groep + Sven Brocking + sbrocking&carantegroep.nl +42820 + Techsun + Myron Jiang + ph.china&hotmail.com +42821 + ACXIO + Patrice Collardez + pcollardez&acxio.fr +42822 + Tesc + Anibal Ricardo Salusso + anibal.salusso&tesc.com.br +42823 + Spilsby Internet Solutions + Terry Froy + tez&spilsby.net +42824 + Web.Cloud.Apps. GmbH + Daniel Boldura + boldura&web-cloud-apps.com +42825 + vArmour Networks + Sandro Janita + sjanita&varmour.com +42826 + ChengDu Network Security Technology Corporation + Cole.Chen + chenjunnian&wangankeji.cn +42827 + rm-netproject UG (haftungsbeschränkt) + Reiner Gutowski + rg&rm-netproject.de +42828 + US Army Electronic Provings Ground (USAEPG) Instrumentation Management System (IMS) + James Fisher + james.martin.fisher&us.army.mil +42829 + QuartzDesk.com + Jan Moravec + admin&quartzdesk.com +42830 + Landis+Gyr + Lars E. Haven + lars.haven&landisgyr.com +42831 + Kickass Systems + Ian Weller + ian&ianweller.org +42832 + Tata Institute Fundamental Research + Sagar Mungse + msagar&tifr.res.in +42833 + Nimbus Technologieberatung GmbH + Arno Fiedler + arno.fiedler&nimbus-berlin.com +42834 + TechniData IT-Service GmbH + Ralf Wigand + ralf.wigand&its-technidata.de +42835 + ZELITRON SA + Abatzis Kostas + abatzis&zelitron.com +42836 + eXtreme IT Development + Mohamed Ayyad + m.ayyad&xitd.net +42837 + Westpac New Zealand Ltd + David Biggs + David_Biggs&westpac.co.nz +42838 + FAST DISTRIBUTED CLOUD COMPUTING INC., + Hierro Park + acumen&wisetodd.com +42839 + Tactical Communications Corporation + Doug Fuller + dfuller&tacticalcommunications.com +42840 + Reporo + Reporo Tech + tech&reporo.com +42841 + Kraftanlagen Muenchen GmbH + Kai Hoeltgen + hoeltgenk&ka-muenchen.de +42842 + iQSpot + Damien Martin-Guillerez + damien.martin-guillerez&iqspot.fr +42843 + TWIN Engineering S.r.l. + Michele Fazzalari (CEO) + info&twinengineering.com +42844 + SPI GmbH + Eckhard Schilling + iana&spi.de +42845 + TOKYO SYSTEM HOUSE Co., Ltd. + Kazuya SAKAMOTO + k-sakamoto&tsh-world.co.jp +42846 + Corhoma SRL + Jorge Blasi + corhoma&gmail.com +42847 + 2H Offshore Engineering Ltd + Robert Brock + robert.brock&2hoffshore.com +42848 + NETIXIA + Laurent FRANCOISE + iana&netixia.fr +42849 + Fabrecode + Robert Banfill + robert&fabrecode.com +42850 + LocalTV LLC of Alabama + David McFerrin + david.mcferrin&whnt.com +42851 + Fidelity Voice Services + Rob McKrill + robm&fidelityvoice.com +42852 + Bank Gospodarstwa Krajowego + Dorota Remiszewska-Gajek + pwasa&BGK.com.pl +42853 + DunnCox + Ryan Dunbar + ryan.dunbar&dunncox.com +42854 + EMPRESA DE TECNOLOGIA DA INFORMAÇÃO E COMUNICAÇÃO DO MUNICÍPIO DE SÃO PAULO + Wagner Kanagusuko + wagnerk&prodam.sp.gov.br +42855 + techsoup.net + Don Betts + don&techsoup.net +42856 + Kreis Warendorf + Olaf Doering + olaf.doering&kreis-warendorf.de +42857 + Family Madera of Switzerland + Jan Madera + jan&madera.ch +42858 + Linux Szerver Kft. + Ferenc Koczka + info&linux-szerver.hu +42859 + Shanghai SHR Automation Co.,Ltd + HU Daoxu + hdx.3047&shrcn.com +42860 + Asul IT Consulting LLP + Alexandr Khrabrov + info&asul.kz +42861 + Ophylink Communication Technology Co., Ltd. + Linlin Qiu + linlin.qiu&ophylink.com +42862 + AITA-Soft + Oleg Ostroverkh + oleg.o&aitasoft.ru +42863 + IOS Health Systems + Dario Va + dva&ioshs.com +42864 + Mobius Embedded Systems Ltd + Andrew Chilcott + iana&mobius-embedded-systems.co.uk +42865 + Hoza Logistic Solutions + John Melet + jmelet&hoza.nl +42866 + Frontend.biz GmbH + Antonio Pedrazzini + info&frontend.biz +42867 + BestSolution.at + Udo Rader + udo.rader&bestsolution.at +42868 + Association of American Railroads + Mr. Robert A. Kollmar + rkollmar&aar.org +42869 + UNE EPM Telecomunicaciones S.A. + Hector Fernando Vargas Montoya + hector.vargas&une.com.co +42870 + Unither Manufacturing LLC + Melissa Mehlenbacher + melissa.mehlenbacher&unither-pharma.com +42871 + IMRIS + Chris Mussack + cmussack&imris.com +42872 + Unomaly AB + Goran Sandahl + goran&unomaly.com +42873 + Comune di Ascoli Piceno + Mauro Angiolillo + admin&comuneap.gov.it +42874 + Hospedia Ltd + Keith Jacobs + keith&keithjacobs.net +42875 + ACISA + Silvio Fernández + silvio.fernandez&acisa.es +42876 + SGSITES Technologies + Stefan Genchev + stefan.genchev&sgsites.net +42877 + Sundvor + Aage Johan Strøm Sundvor + aage&sundvor.com +42878 + Credit-Moscow Bank + Nikolay Belyakov + cso&cmbank.ru +42879 + Sielaff GmbH & Co. KG + Oliver Muck + oliver.muck&isa.de.com +42880 + Uwe Disch + Uwe Disch + mail&uwe-disch.de +42881 + No Limit Network + Nicolas Belan + contact&no-limit-network.com +42882 + Jiangsu Fablesoft Co.,Ltd + Xiaoqiang Ye + yexq&fablesoft.cn +42883 + CARC (C-DOT Alcatel-Lucent Research Centre Pvt Ltd) + Ramkumar Jayachandran + ramkumar.jayachandran&carc.co.in +42884 + Anderson Morgan Kelowna Inc. + Fred Brown + fredb&andersonmorgan.ca +42885 + S P I R I T - informačné systémy, a.s. + Lubomir Bak + bak&spirit.sk +42886 + InMotion Hosting + Vanessa Vasile + steam&imhadmin.net +42887 + One Call Now + Edward Curren + ed.curren&onecallnow.com +42888 + Canadian Museum for Human Rights + Christopher Rivers + christopher.rivers&museumforhumanrights.ca +42889 + IO Data Centers, LLC + Kjell Holmgren + koh&io.com +42890 + Renkus Heinz Incorporated + Tim Shuttleworth + tim&renkus-heinz.com +42891 + Santechkomplekt Ltd. + Babin Vladimir + postmaster&santech.ru +42892 + TOYGA + Gil Shinar + gils&hexagontech.co.il +42893 + Funkfeuer Wien - Verein zur Förderung freier Netze + Matthias Šubik + admin&funkfeuer.at +42894 + CAROL JEANNE MACK CORPORATION + Carol Jeanne Mack + mac.carol5&gmail.com +42895 + Securiton GmbH + Peter Treutler + peter.treutler&securiton.de +42896 + The O'Gara Group + Mike Estridge + postmaster&ogaragroup.com +42897 + Evan Edstrom + Evan Edstrom + contact&evanedstrom.com +42898 + IM Service Lab Srl + Claudio Spallaccini + info&imservicelab.com +42899 + kiloWattsol + Sebastien Collin + scollin&kilowattsol.com +42900 + Elcom Innovations Private Limited + Rajeev Vats + rvats&elcominnovations.com +42901 + Chaos Computer Club Cologne + Tobias Wolter + towo&koeln.ccc.de +42902 + J.R. Torralba + Jesus R. Torralba + jhunetorralba&hotmail.com +42903 + IT Innovations Ukraine LLC + Vyacheslav Labzin + vyacheslav.labzin&ua.ibm.com +42904 + Nemik Consulting Inc + Nemanja Stefanovic + snmp&nemik.net +42905 + Trihedral Engineering Ltd. + John Cole + john.cole&trihedral.com +42906 + Shift Workforce Management + Diego Jorquera Tapia + djorquera&shiftlabor.com +42907 + PLUTEX GmbH + Fabian Müller + fmu&plutex.de +42908 + Megadevices + Oleksandr Kovalenko + kovalenko.md&gmail.com +42909 + Tver State University + Andrew Zhukov + zhukov&tversu.ru +42910 + latticeware.com + stanton fisque + sfisque&latticeware.com +42911 + Metiri Systems, LLC + Steven C. Ollmann + s.ollmann&metirisystems.com +42912 + locate solution GmbH + Daniel Petat + daniel.petat&locatesolution.de +42913 + Exatech bv + Henk Pragt + hpragt&exatech.nl +42914 + Wimsey + Stuart Lynne + stuart.lynne&gmail.com +42915 + MCATSYSTEMS LLC + Hendry Kunthara + info&mcatsystems.com +42916 + Arcerm + Alexey Grenev + graa&arcerm.spb.ru +42917 + Adcubum AG + Christian Pfister + intit&adcubum.com +42918 + Trafigura + Mark Parris + mark.parris&trafigura.com +42919 + Max-Planck-Institut fuer Astrophysik + Heinz-Ado Arnolds + arnolds&mpa-garching.mpg.de +42920 + Slaughter and May + John Lynch + John.Lynch&slaughterandmay.com +42921 + Dropbox, Inc. + Gary M. Josack + pen-contact&dropbox.com +42922 + Ministarstvo odbrane Republike Srbije + Milos Pejanovic + milos.pejanovic&vs.rs +42923 + Stefan Ebert + Stefan Ebert + stefan&huhn.li +42924 + The Molecule + alex maceda + alex&themolecule.com +42925 + SYLEB SARL + KLA KOUE SYLVANUS + sylvanuskla&syleb.com +42926 + NSC Communications Siberia Ltd + Alexei Tsigulin + alex&nsc-com.com +42927 + Fidelio Cruise Software + Gregor Stute + gstute&fcruise.com +42928 + Galleon Systems Ltd + Craig Richards + support&galleonmail.com +42929 + securedeviceaccess.com + Lars Reemts + postmaster&securedeviceaccess.com +42930 + GEKO.NET di Mena Giuseppe + Giuseppe Mena + info&gekoinformatica.net +42931 + Vladimir Mihajlović pr, računarsko programiranje, METACODE, Beograd + Vladimir Mihajlovic + vlada79&gmail.com +42932 + Infolink Global + Alec Hans + alec&infolinkglobal.net +42933 + iArtemis + Jie Xia + admin&iartemis.ca +42934 + APRS World, LLC + James J Jarvis + jj&aprsworld.com +42935 + Axcelera + Diogo Sato + diogo&axcelera.com.br +42936 + Anhui Telehome Digital Technology Co. Ltd + Simon Wu + simon100.wu&gmail.com +42937 + VP (Video Privacy) Forum + JinKyo Shin + jkshin&4dream.co.kr +42938 + Moviri S.p.a. + Davide Malagoli + davide.malagoli&moviri.com +42939 + Applied Informatics for Health Society + Doug Gorley + dgorley&aihs.ca +42940 + City of Smyrna + Chris Addicks + is&smyrnaga.gov +42941 + 2XWIRELESS, INC + Brock Eastman + brock&2xwireless.com +42942 + kayzan + Kay Zander + kayzan&kayzan.de +42943 + CopperPoint Mutual Insurance Company + Evan F. Erickson + eerickson&copperpoint.com +42944 + Seenov Inc. + Claude Arpin + claude.arpin&me.com +42945 + Seeser Datentechnik + Peter Seeser + guardian&seeser.net +42946 + Utel Systems + Frode Gorseth + frode.gorseth&utelsystems.com +42947 + Network Technologies International, Inc. + Daniel F. Domnick + registrar&nticloud.net +42948 + Methodist Home for Children + Daniel F. Domnick + registrar&nticloud.net +42949 + Bol.com b.v. + Mark Frasa + tim&bol.com +42950 + Hangzhou Joson Technology Co.,Ltd + songjing + songjing&hzjoson.com +42951 + Highlands Technologies Solutions + Patrick Zucchetta + it&h-t-solutions.com +42952 + SOREDI touch systems GmbH + Gertrud Brandl + brandl&soredi-touch-systems.com +42953 + "Runet Business Systems" + Sergey Levin + levin&paymentgate.ru +42954 + Bizerba GmbH & Co. KG + Michael Briegel + Michael.Briegel&bizerba.com +42955 + Louisville-Jefferson County Metro Government + Brian Clenney + Brian.Clenney&Louisvilleky.gov +42956 + HumanIT Inc. + Manuel Deschambault + mdeschambault&humanit.ca +42957 + Wave Advanced Technology Applications s.r.l. + Sergio Arena + info&wave-srl.com +42958 + Central Office for Administrative and Electronic Public Services + Krisztian Horvath + Krisztian.Horvath&kekkh.gov.hu +42959 + Snap Limited + James Burnett + noc&snap.net.nz +42960 + Garn Contracting & Consulting Pty Ltd + Jeremy Ford + jez&garn.cc +42961 + EH-electronics GmbH + Guenther Heumann + info&eh-electronics.com +42962 + Blitznote e.K. + Mark Kubacki + kubacki&blitznote.com +42963 + UltraMED Systems, Inc. + Robert Gill + support&ultramedonline.com +42964 + Abelsoft Inc + Kazutoshi Osakabe + k-osakabe&abelsoft.co.jp +42965 + ec3 Networks GmbH + Manfred Hackl + manfred.hackl&ec3networks.at +42966 + ECHO LLC + Sergey Shakirov + registry&lwd.ru +42967 + liedloff.org + David Liedloff + david&liedloff.org +42968 + Wertschütz GmbH i.Gr. (formerly 'ME Engineering M2M GmbH') + Matthias Wimmer + mw&wertschuetz.de +42969 + Iorga Group + Frédéric PREVOST + fprevost&iorga.com +42970 + RP SIA "Rigas satiksme" + Nils Kolecis + nils.kolecis&rigassatiksme.lv +42971 + ONELAN LIMITED + Amit Kachroo + amit.kachroo&onelan.com +42972 + IT Business Solutions and Services (ITBSS) + Juergen Graf + juergen.graf&itbss.de +42973 + COMDOK GmbH + Sven Ehret + support&comdok.de +42974 + Willis-Knighton Health System + Theresa Cochran + tcochran&wkhs.com +42975 + Iridium Thought + Peter Wood + development&iridiumthought.com +42976 + Key Source International + Paul Schwartz + paul&ksikeyboards.com +42977 + Matta Consulting Limited + Florent Daigniere + florent.daigniere-pen&trustmatta.com +42978 + CSN Groep BV + F.A.N. van den Berg + f.berg&csngroep.nl +42979 + Wellington Security Systems + Seth Stiebinger + admin&wellingtonsecurity.com +42980 + EZ e-TRAK + Christos Chochlidakis + christos&eze-trak.com.au +42981 + Heuristic Systems Pty Ltd + Dewayne Geraghty + admin.operations&heuristicsystems.com.au +42982 + Leafsprout + Cezary Klimczak + cezary.klimczak&leafsprout.com +42983 + SAS Nexylan + Gaetan Allart + gaetan&nexylan.com +42984 + Toyohashi University of Technology + TSUCHIYA Masatoshi + oid-liaison-officer&imc.tut.ac.jp +42985 + Bdog Technology (formerly 'Cnofe') + Ding Fei + paul712&163.com +42986 + Beijing MicroColor Corp., LTD. + WANG LONG + wanglongµcolor.net +42987 + Gida Technology Services + Pravin Carvalho + pravincar&gmail.com +42988 + Nanjing Huamai Technology Co., Ltd + Xia Haibin + davidxia136&139.com +42989 + Telegärtner Karl Gärtner GmbH + Felix Klein + felix.klein&telegaertner.com +42990 + VTSL Ltd + Daniel Wilson + dwilson&vtsl.net +42991 + Marylhurst University + Keelan Cleary + ies-admins&marylhurst.edu +42992 + Vektron Energy (Pty) Ltd + Glen Medlin + glen&vektron.co.za +42993 + Wake Forest Baptist Health (formerly 'Cornerstone Health Care, P.A.') + Paul Weese + paul.weese&wakehealth.edu +42994 + Institut de Physique de Rennes + Jérémy Gardais + jeremy.gardais&univ-rennes1.fr +42995 + AvertIT + Alan Stewart + Alan.Stewart&AvertIT.com +42996 + NOMASYSTEMS, S.L. + Enrique Marcote Peña + admin&nomasystems.com +42997 + Red Giant Inc + Adam Murphy + adam.murphy&redgiantmobile.com +42998 + Progdence Co.,Ltd. + Yusuke Fujimura + fujimura&progdence.co.jp +42999 + Parse Software Development B.V. + Sjon Hortensius + sjon&parse.nl +43000 + Service Ontario + Mike Lefever + mike.lefever&ontario.ca +43001 + Nextbeacon + Steve Waldbusser + snmp&nextbeacon.com +43002 + RootService (Markus Kohlmeyer) + Markus Kohlmeyer + admin&rootservice.org +43003 + TribalGroup + Martyn Smith + martyn.smith&tribalgroup.com +43004 + Xinguard, Inc. + Kae Hsu + MIBs&xinguard.com +43005 + University of Technology and Life Sciences in Bydgoszcz + Łukasz Dobosiewicz + lukasz.dobosiewicz&utp.edu.pl +43006 + root-a + Kukharev Vitaly + vkx&vkx.ru +43007 + Gipen Systems + Gyula Nemetz + info&gipen.hu +43008 + FRTek + Doyeon Kim + dykim&frtek.co.kr +43009 + Vilnius Gallery + Constantin Piskounov + kpiskounov&outlook.com +43010 + Two Sigma Investments, LLC + Viktor Dukhovni + iana-master&twosigma.com +43011 + Sängerschaft Erato + Julian Metza + senior&erato.de +43012 + Aldo Group Inc. + Eric Beaumier + ebeaumier&aldogroup.com +43013 + ISC8, Inc. + Scott Millis + smillis&isc8.com +43014 + En Garde Systems, Inc. + Mike Neuman + mcn&EnGarde.com +43015 + Quinyx AB + Mikael Knutsson + mikael.knutsson&quinyx.com +43016 + Réseau en scène Languedoc-Roussillon + Yvan GODARD + y.godard&reseauenscene.fr +43017 + Albaad + Vladimir Kopolovich + admin&albaad.com +43018 + Pädagogisches Landesinstitut + Sandro Isoletta + it-support&pl.rlp.de +43019 + Digital Human Identity S.L + Ramón Martínez + rampa&encomix.org +43020 + Angel Lane + Paul Bennett + registry&angellane.com +43021 + i-storm + SeongJin Park + sjpark&i-storms.com +43022 + OneSearch Direct Ltd. + Jamie Craig + ithelpdesk&onesearchdirect.co.uk +43023 + Tuxpower.dk + Mikael Bertelsen + mickey&tuxpower.dk +43024 + InfoGoose.Inc + Seeyun Kim + seeyunk&infogoose.com +43025 + Nomadconnection, Inc. + Kyoungjune Lee + ym1020&nomadconnection.com +43026 + Mostlucky Ltd. + David Fan + david&mostlucky.com.tw +43027 + Fujian Great Power PLC Equipment Co.,Ltd + Wu Xueya + wuxueya&md-front.com.cn +43028 + Office of Immigration and Nationality + Ede SOÓS + iana-pen&bah.b-m.hu +43029 + Netco Ltda. Network Solutions Co. + Alejandro Naranjo + anaranjo&netco.la +43030 + ROCK YOUR LIFE! gGmbH + Daniel Menzel + daniel.menzel&rockyourlife.de +43031 + RaumZeitLabor e.V. + Simon Elsbrock + info&raumzeitlabor.de +43032 + Live It - Nerd + Peter Young + peter&liveitnerd.com +43033 + TVV lippu- ja maksujärjestelmä Oy + Ilkka Kankkunen + ilkka.kankkunen&lmj.fi +43034 + K and P Business Solutions + Mulenga Katebe + katebe&kpbs.co.zm +43035 + Feinschliff + Christian Schweizer + administrator&feinschliff.ch +43036 + Bay Of Plenty District Health Board + Alexey Lobanov + alexey.lobanov&bopdhb.govt.nz +43037 + Newcastle-under-Lyme College + Mr Chris Stevenson + administrator&nulc.ac.uk +43038 + Creative Electronic Systems SA + Cédric Gaudin + cedric.gaudin&ces.ch +43039 + OASYS Healthcare Corporation + Martin vanPutten + mvanputten&oasyshealthcare.com +43040 + Davra Networks + Colin Ryan + colin.ryan&davranetworks.com +43041 + ATREM S.A. + Michal Kowalski + kontaktit&atrem.pl +43042 + Definition Networks Inc. + Manojkumar Nair + mnair&definitionnetworks.com +43043 + Information System Assiociaties + Mario Remy Almeida + malmeida&isa.ae +43044 + Gran Investimentos + Thiago Mello + ti&granbio.com.br +43045 + Banco Exterior C.A. Banco Universal + Gabriel Salcedo + arquitecturay.soporte&bancoexterior.com +43046 + Andrey Fedorov + Andrey Fedorov + andrey.fedorov&gmail.com +43047 + H&W Computer Systems + Nick Yates + nyates&hwcs.com +43048 + CB7 Systems LLC + Lance Palatini + admin&cb7systems.com +43049 + Stewart Bryant + Stewart Bryant + stewart.bryant&gmail.com +43050 + Vobile Co., Ltd. + Honghui Ding + ding_honghui&vobile.cn +43051 + SHENZHEN SENSEGRID CO.,LTD. + liu.minpeng + liu.minpeng&sensegrid.cn +43052 + Shenzhen Tendzone Intelligent Technology Co; Ltd + Buck Yu + yuyupeng&tendzone.com +43053 + I.T. SYNERGY LIMITED + Tony Smith + oid&itsynergy.co.uk +43054 + Crown Prince Court + Meera Al Mansoori + meera.almansoori&cpc.gov.ae +43055 + American Kybernetik (Juan Daugherty) + Juan Daugherty + juan&acm.org +43056 + ENVISTACOM + Mark Hammel + mhammel&envistacom.com +43057 + PrAT CENTRAVIS PRODUCTION UKRAINE + Vladyslav Mochalov + administrator¢ravis.com +43058 + Synergy Sky AS + Egil Hasting + eh&synergysky.com +43059 + iteracon GmbH + Michael van Laak + michael.vanlaak&iteracon.de +43060 + Leepfrog Technologies, Inc. + Wes Bachman + certmaster&leepfrog.com +43061 + Global System & Network Services BVBA + Mr. Ferrara Cataldo + tech&gsns.be +43062 + Ingenieure ohne Grenzen e.V. + Tom Dinkelaker + tom.dinkelaker&gmail.com +43063 + Digital Multimedia Technology Co.,Ltd. + Jae Kyun, Lee + jklee&dmt.kr +43064 + Embedded Wireless Labs + Phang Wee Meng + wmphang&embeddedwireless.com +43065 + Shanghai Onezero Electronic Commerce Co., Ltd. + yibing wang + appdev&mc2.cn +43066 + Stiebel Eltron GmbH & Co. KG + Oliver Bast + oliver.bast&stiebel-eltron.de +43067 + VFI SYSTEM + Fabrice VOMSCHEID + fabrice.vomscheid&vfi-system.com +43068 + AVG Technologies + Pavel Odehnal + pavel.odehnal&avg.com +43069 + Markus Ruecker (mr-consult.net) + Markus Ruecker + markus.ruecker&mr-consult.net +43070 + Hrvatska narodna banka + Ana Puntarić + ana.puntaric&hnb.hr +43071 + NABB Gaming Community + Mathieu Dugas + mathieu.dugas&gmail.com +43072 + Privlo, Inc. + Aaron Picht + IT-IANA&privlo.com +43073 + roskakori.fi + Jarmo Jaakkola + operator&roskakori.fi +43074 + WRH Inc + Jack Cheney + jack.cheney&wrhinc.net +43075 + SKY Interactive + James Yong + james.yong&skyinteractive.com.au +43076 + Greiner Holding AG + Servicedesk + servicedesk&greiner.at +43077 + TUI Marine + Shawn LoPresto + shawn.lopresto&tuimarine.com +43078 + Lindvik + Kristian Lindvik + kristian&lindvik.com +43079 + Zimmer GmbH + Matt Addy + matt.addy&zimmer.com +43080 + Atlantic Corporation of Wilmington, Inc. + Robbie Walker + robbiew&atlanticpkg.com +43081 + intellec + Christian Jucker + christian.jucker&intellec.ch +43082 + Informatik Service Gesellschaft mbH + EL Ghouat Nor-eddine + ne.elghouat&isg-cgn.de +43083 + IMEN RAYANE SHRGH Co. + Hassan Hamzeloue monfared + info&imenrayaneh.com +43084 + Webtown Informatika Kft. + Peter Felber + wtsysop&webtown.hu +43085 + Detroit R & D, Inc. + David Putt + info&detroitrandd.com +43086 + NHS Sistemas Eletronicos + Thiago Wiezbicki + thiago.wiezbicki&nhs.com.br +43087 + xCelor LLC + Vahan Sardaryan + vahan&x-celor.com +43088 + Measurlogic Inc. + John Stratford + john&measurlogic.com +43089 + Dualog AS + Vidar Berg + vidar&dualog.com +43090 + KVADOS, a.s. + Milan Dadok + dadok&kvados.cz +43091 + SAP integrator Ltd. + Anthony Pankov + info&sapintegrator.com +43092 + Habermehl + Martin Habermehl + Martin&Habermehls.de +43093 + Xiamen Faratronic Co., Ltd. + Xiang Bai + bx&faratronic.com.cn +43094 + HyperWallet Systems Inc + Mark E.S. Bernard + mbernard&hyperwallet.com +43095 + Service2Media B.V. + Robert Hassing + robert&service2media.com +43096 + James Guse (JAG Enterprises) + James Guse + guse3&beryk.com +43097 + ROTH + WEBER GmbH + Stephan Frisch + software-entwicklung&rowe.de +43098 + Martem AS + Roland Uuesoo + roland&martem.ee +43099 + Embrionix Design inc. + Sithideth Viengkhou + sviengkhou&embrionix.com +43100 + Strata Decision Technology, L.L.C. + Joshua Bautista + it&stratadecision.com +43101 + Alphavida + Stuart Cianos + scianos&alphavida.com +43102 + Regional Municipality of Peel + Arthur Michalec + arthur.michalec&peelregion.ca +43103 + Caradigm + Sampath Damodarasamy + Sampath.Damodarasamy&Caradigm.com +43104 + CyberSecure IPS + Steve Sohn + steve.sohn&cybersecureips.com +43105 + CyberFlow Analytics, Inc. + Steve Hinkle + stevehinkle&cyberflowanalytics.com +43106 + PERK Innovation GmbH + Michael Pauen + michael.pauen&perk-innovation.com +43107 + Waclaw Schiller (Torinthiel) + Waclaw Schiller + iana-pen&torinthiel.pl +43108 + Attend Systems, LLC + Noah Groth + ngroth&attendsystems.com +43109 + dataFASCIA Corporation + Bill Lattin + blattin&datafascia.com +43110 + LTD «Bradbury Lab» + Evgeny Novikov + evn&bradburylab.com +43111 + VNT Software + Mirit Manor + mirit&vnt-software.com +43112 + Picsearch AB (publ) + Navid Abdi + sysadmin&picsearch.com +43113 + Screen9 AB + Navid Abdi + sysadmin&screen9.com +43114 + Kantonsapotheke Zuerich + Clau Deplazes + clau.deplazes&kaz.zh.ch +43115 + Forêts & Bois de l'Est + Alain JACQUET + contact&foretsetboisdelest.com +43116 + Berghof Automation GmbH + Dr. Arno Rabold + Arno.Rabold&berghof.com +43117 + National Stock Exchange Of India Limited + Prasad Addagatla + prasada&nse.co.in +43118 + NORDSYS GmbH + Stefan Sander + penadmin&nordsys.de +43119 + Altibox AS + Jan-Frode Myklebust + jf&altibox.no +43120 + Peshawar Business School + Muhammad Amjad + amjid49&yahoo.com +43121 + Ypsomed AG + Patrick Hostettler + dnsadmin&itsroot.net +43122 + Summit Networks + Daniel Korndorfer + daniel.korndorfer&summitnetworks.com.br +43123 + Instituto Nacional para la Evaluación de la Educación + Saul Castro Olvera + scastro&inee.edu.mx +43124 + PasswordBox Inc. + Francois Proulx + francois&passwordbox.com +43125 + System Studies Incorporated + Eric Malmberg + eric&airtalk.com +43126 + US Fire Insurance Company + Amos Desrosiers + amos.desrosiers&cfins.com +43127 + Oliver Manz (ips manz consulting) + Oliver Manz + ips&omanz.de +43128 + TangentOrg + Brian Aker + brian&tangent.org +43129 + Meijo University + Toshihiko KATO + ccoffice&meijo-u.ac.jp +43130 + Amplesky Communication Technologies Ltd. + LI WEIXING + liweixing&lesky.com +43131 + Ternopil State Medical University + Kovalok Volodymyr + admin&tdmu.edu.ua +43132 + CJSC Svyaz engineering + Evgeniy Chudaykin + e.chudaykin&sipower.ru +43133 + mailgarant (Meint Post) + Meint Post + meint&meint.net +43134 + Advanced Fiber Products LLC + Wojciech Przeczkowski + wprzeczkowski&afpgco.com +43135 + adis.ca + Adi Linden + adi&adis.ca +43136 + Digital Footprints International, LLC; DBA Internet Identity + Rod Rasmussen + iana-pen&internetidentity.com +43137 + Raven Rocks Computer Services Group, Inc. + Charles Hopkins + charlesghopkins&gmail.com +43138 + Multapplied Networks, Inc. + Matthew Fox + support&multapplied.net +43139 + Glutonus SRL + Vladimir Tanase + vtanase&glutonus.md +43140 + Xora, Inc. + James Graham + jgraham&xora.com +43141 + RVTN.org + Vladimir Voskoboynikov + vladv&rvtn.org +43142 + St. Mary's International School + DJ Feldmeyer + administrator&smis.ac.jp +43143 + Nanjing Sinovatio Technology Co. ltd + Ning Zhang + zhang.ning&sinovatio.com +43144 + HMS Industrial Networks + Magnus Hansson + Ianacontact&hms.se +43145 + Vasily Nushtakin (Home Router) + Vasily Nushtakin + nushtakin.vasily&gmail.com +43146 + VanWesten.net + Erik Jan van Westen + erik&vanwesten.net +43147 + Best Theratronics Ltd. + Lloyd Smith + lloyd.smith&theratronics.ca +43148 + Det Norske Oljeselskap + Hans Arvid Olsen + hans.arvid.olsen&detnor.no +43149 + RUBYCAT-Labs + Anthony DAVID + iana&rubycat-labs.com +43150 + Stenbock Systems Inc. + Tore Stenbock + tore.stenbock&gmail.com +43151 + AltaSteel Ltd + Neil Tomlinson + ntomlinson&altasteel.com +43152 + Kristoffer Berdal (cognitive.io) + Kristoffer Berdal + iana&flexd.net +43153 + Gerriko S-E-T Technology Solutions Ltd. + Colin Gerrish + info&gerriko.ie +43154 + NexLabs Pte Ltd + Foo Ji-Haw + jhfoo&nexlabs.com +43155 + ION Trading Srl + Marco Bizzarri + m.bizzarri&iontrading.com +43156 + WEY Group AG + Daniel Walder + daniel.walder&weytec.com +43157 + Tatung Technology Inc. + Ming-Yih Tom Lu + tomlu&tti.tv +43158 + Skytech.lt + Viktoras Dobrovolskis + viktoras.d&skytech.lt +43159 + Hartley Consultants Limited + Alan Chandler + alan.chandler&hartley-consultants.com +43160 + EnStream LP + Yiping Ye + yiping.ye&enstream.com +43161 + Riverside County Regional Medical Center + Elsa Wu + elwu&co.riverside.ca.us +43162 + Flash Services + F.Ruitenburg + frank.ruitenburg&flash-services.com +43163 + Cloud Technologies Inc. + Isao Tanimoto + support_sapphire&cloud-tech.co.jp +43164 + Eruditor Group Ltd. + Alexander Tyumentsev + it-admin&eruditor-group.com +43165 + Strike Technologies, LLC + Ben Yu + byu-operations&strikens.com +43166 + Thai Citrus, Inc. + Krissada Jindanupajit + info&thaicitrus.com +43167 + NaSys Inc. + Youngjae,Ahn + youngjaeahn&nasys.kr +43168 + ALFRED E. TIEFENBACHER (Gmbh & Co. KG) + Marek Falten + m.falten&aet.eu +43169 + Tiroler Versicherung + Ernst Schweighofer + it.admin&tiroler.at +43170 + Banco Bilbao Vizcaya Argentaria S.A + MERCEDES GARCIA FLORES + mercedes.garciaf&bbva.com +43171 + Datwyler Holding AG + Martin Sager + global.ict&datwyler.com +43172 + Atrium - Region PACA + Frederic Lorrain + frederic.lorrain&atos.net +43173 + Elesta Ltd. + Bykov Dmitry + elesta&elesta.ru +43174 + Ubiquity Srl + Giacomo Olgeni + giacomo.olgeni&ubiquity.it +43175 + Dorsey Racing + Walt Dorsey + admin&dorseyracing.com +43176 + Swiss Mobility Solutions + Miguel A. Roman + sysadmin&swissms.ch +43177 + Positron srl + Pier Bruni Venturoli + info&positronaudio.com +43178 + Main Line HealthCare + Kim Martino + martinok&mlhs.org +43179 + Lendy LLC + Brian Brophy + brianmbrophy&gmail.com +43180 + Ole Virginia Hams Amateur Radio Club + Derek LaHousse + dlahouss&mtu.edu +43181 + VFA INC + Antoine Jackson + itgroup&vfa.com +43182 + Minor Inf.Tech. + Ahmet Bahar + ahmetb&minor.com.tr +43183 + Renfe Viajeros, S.A. + Salvador Pozo Ortega + sportega&renfe.es +43184 + The Floow + James Ridgway + james.ridgway&thefloow.com +43185 + FINN Sp. z o.o. + Przemyslaw Sztoch + psztoch&finn.pl +43186 + Zweite Heimat GmbH + enno dobberke + ed&zweite-heimat.com +43187 + LIM college. + Syed Zaidi + Syed.Zaidi&limcollege.edu +43188 + INDIWAN GmbH + Marco Revesz + info&indiwan.de +43189 + J.B. Hunt Transport Services, Inc. + Michael Mullins + Michael_Mullins&jbhunt.com +43190 + Arket Srl + Paolo Grotto + p.grotto&arket.it +43191 + Metamako + Scott Newham + info&metamako.com +43192 + Automation & Integration Global Security Group, LLC. + Elisa Hernandez + elisa&aigsg.com +43193 + GRG International LTD + Rico Wang + rico.wang&grgatm.com +43194 + ABB Oy + Mika J Kärnä + mika.j.karna&fi.abb.com +43195 + Management of finance of the Administration of Tchaikovsky municipal district Perm region + Svetlana Koroteeva + purdiksergey&yandex.ru +43196 + Evintia SL + Jorge Albareda Florensa + timestamp&evintia.com +43197 + BCS Technologies + George Abitbol + mco-bcs&bcstechno.net +43198 + stepins.com + Markus Stepins + support&stepins.com +43199 + Rubisco Ltd (formerly 'Aitch-Pea Limited') + Jernej Zajc + oidmaster&rubisco.com +43200 + CLAAS KGaA mbH + Ralph Hoeltmann + iana&claas.com +43201 + bytemine GmbH + Holger Rasch + info&bytemine.net +43202 + Zeta Storage Systems Ltd (formerly 'Digital Networks UK Ltd') + Ed Taylor + edward.taylor&zeta.systems +43203 + TSS COMPANY + Frantisek Takacs + takacs&tsscompany.eu +43204 + Hard 'n Software Consulting GmbH + Scott Hardin + scott&hnsc.de +43205 + Aloxa.eu + Javier Fernández + javier&aloxa.eu +43206 + American Cancer Society, Inc. + Scott Barr + scott.barr&cancer.org +43207 + Real-Time by Design, LLC + Creed Huddleston + creedh&realtimebydesign.com +43208 + Cybersavants,LLC + Guy Fuller + sales&cybersavants.com +43209 + Bliik + Jonathan Vermeulen + info&bliik.nl +43210 + Prominic.NET, Inc. + Andrew Hettinger + ahettinger&prominic.net +43211 + Atostek Oy + Timo Mähönen + root&atostek.com +43212 + space150, Inc. + Shawn Roske + shawn.roske&space150.com +43213 + Aveillant Ltd + Jason Morgan + jason.morgan&aveillant.com +43214 + Programa Interlegis + Fabio Kaiser Rauber + fabior&interlegis.leg.br +43215 + PC Users Group (ACT) Incorporated + Stephen Rothwell + hostmaster&tip.net.au +43216 + Fukushima Prefectural Government. Japan + Katsumi Ujiie + ujiie_katsumi_01&pref.fukushima.lg.jp +43217 + Ultimate Europe Transportation Equipment GmbH + Christian Hintersteiner + christian.hintersteiner&ultimate-eur.com +43218 + tecalor + Michael Kruckenberg + michael.kruckenberg&tecalor.de +43219 + Sallie Mae + Mike Bandy + mike.bandy&salliemae.com +43220 + DB Netz AG + Jan Kaebisch + jan.kaebisch&deutschebahn.com +43221 + Bemobi Midia e Entretenimento LTDA. + Tamoyo Vitali + tamoyo.vitali&bemobi.com.br +43222 + Oxfam + Gabriele Sani + gabriele.sani&oxfaminternational.org +43223 + Wireless Telecom Group, Inc + Brian Forbes + bforbes&wtcom.com +43224 + Netyard Pawel Jenner + Pawel Jenner + netyard&jenner.pl +43225 + Unitymedia KabelBW + Sven Ossendorf + postmaster&unitymedia.de +43226 + Ashe Cosgrove Corporation + Chris Cosgrove + chris&sunmoonstar.net +43227 + ossmail.de + Till Smejkal + postmaster&ossmail.de +43228 + Shanghai MRDcom Co., Ltd. + Qingzhong Tan + qingzhong.tan&mrdcom.net +43229 + SOFTEH PLUS SRL + Adrian Trif + adrian&softeh.ro +43230 + Betrust N.V. + Stefan Geelen + stefan.geelen&betrust.be +43231 + GSI Helmholtzzentrum für Schwerionenforschung GmbH + Christopher Huhn + monitoring-service&gsi.de +43232 + Bank of Nova Scotia + Jason Gray + jason.gray&scotiabank.com +43233 + Scheinkönig & Co. + Dieter Scheinkönig + service&scheinkoenig.com +43234 + Enistek LLC + Andrey Tolstoy + andrey&enistek.com +43235 + Stadt Chur + Josua Ackermann + lizenzen_it&chur.ch +43236 + Pilz GmbH & Co. KG + Frank Eberle + f.eberle&pilz.de +43237 + AOC Oost + Pascal te Pas + ictservicedesk&aoc-oost.nl +43238 + Compass Plus Ltd. + Alexandra Kochergina + a.kochergina&compassplus.com +43239 + MindMade sp. z o.o. + Tomasz Borkowski + office&mindmade.pl +43240 + Day & Zimmermann + Jason Schrader + NetworkInfrastructure&dayzim.com +43241 + Guidance Ltd. + Peter Varga + peter.varga&guidance.hu +43242 + Universal Switching Corporation + Hans Johansson + hj&uswi.com +43243 + ShinyByte Web Agency, LLC. + Jacob Vancil + Jacob&shinybyte.com +43244 + Open Information Security Foundation + Victor Julien + oisf-team&openinfosecfoundation.org +43245 + G421 Networks + Keith Dube + keith.dube&g421networks.com +43246 + Bennington-Rutland Supervisory Union + Daniel French + daniel.french&brsu.org +43247 + Rui Pereira (netcloud.pt) + Rui Pereira + admin&netcloud.pt +43248 + 3 Turn Productions LLC + Judy Tyrer + judy3t&3turnproductions.com +43249 + THM Consulting Ltd + Thorsten Mayr + thorstenpm&gmail.com +43250 + "Group of companies "POWER ENGINEERING" Ltd + Evgeniy Petrov + e.petrov&e-pwr.ru +43251 + Planet Erde + Roland Gsell + roland.gsell&gmx.net +43252 + CosDay e. V. + Alexander Pankow + it&cosday.de +43253 + ASGraphics + Andreas Strobl + astrobl&asgraphics.at +43254 + Geonov + Mathieu Ambrosy + mathieu.ambrosy&geonov.fr +43255 + FISBA Optik AG + Joder Philipp + Philipp.Joder&fisba.ch +43256 + FlouLab (Ioannis Angelakopoulos) + Ioannis Angelakopoulos + ioangel&floulab.net +43257 + webs.bz + Rafal Kruk + hello&webs.bz +43258 + D'Youville College + Mary Spence + helpdesk&dyc.edu +43259 + Cambridge Health Alliance + Jeffrey Johnson + itd39&challiance.org +43260 + WireGeo.com + Konstantin Artemev + manager&wiregeo.com +43261 + Pontis Research Inc. + Aditya Bhushan + aditya.bhushan&pontisresearch.com +43262 + ABVI-Goodwill + Randy Overmyer + rovermyer&abvi-goodwill.com +43263 + SolarCity + Todd Tidwell + ttidwell&solarcity.com +43264 + Opanga Networks, Inc. + John Burnette + johnb&opanga.com +43265 + HongKong Yunlink Technology Co., Ltd + WU SHUNQUAN (Jessy) + yuncore.jessy&gmail.com +43266 + S.Toraighyrov Pavlodar State University + Sergey Berendakov + admin&psu.kz +43267 + JAMESON MEMORIAL HOSPITAL + RHONDA GRICKS + RGRICKS&JAMESONHEALTH.ORG +43268 + TELECO Spa + Massimiliano Negretti + laboratorio&telecospa.it +43269 + DI Kurt Renauer + DI Kurt Renauer + office&renauer.at +43270 + Yoshitaka Yamane + Yoshitaka Yamane + yoshitaka.yamane&oasis.ocn.ne.jp +43271 + Justus-Liebig-Universität Gießen + Dr. Oliver Bäumer + ldap-admin&hrz.uni-giessen.de +43272 + Austria Card Plastikkarten und Ausweissysteme Ges.m.b.H + Thomas Aichinger + thomas.aichinger&austriacard.at +43273 + Control Center Apps GmbH + Christoph Knittel + christoph.knittel&cca.io +43274 + "Group of Companies "RosIntegracija" LLC. + Dmitriy Gvozdyuk + dmitriy.gvozdyuk&rosintegracija.ru +43275 + Alex Eggenberger + Alex Eggenberger + domaxinreg9&gmail.com +43276 + Coreline Soft Co.,Ltd + Hyungi Seo + hyungi.seo&corelinesoft.com +43277 + AE Telelink System Limited + Rajeev Kumar + rajeev&aetelelink.com +43278 + zalio + Hans Riethmann + hans.riethmann&zal.io +43279 + R. L. Drake Holdings, LLC + Joshua Blanton + jblanton&rldrake.com +43280 + Vereniging Ons Middelbaar Onderwijs + Thijs van Ham + tv.vanham&omo.nl +43281 + PAUL HARTMANN AG + Falk Nicolaus + certificate-admins&hartmann.info +43282 + PROVIDENTIA SYSTEMS + ROBERT BASSANO + robert.bassano&providentiasystems.com +43283 + Vado Security + Yossi Ben Hagai + yossi&vadosecurity.com +43284 + Dream Solutions Ltd + Martin Searancke + martin&dreamsolutions.biz +43285 + Quanzhou Karassn Security Protection Electronics Co., Ltd. + HongYaDe + www&dik.com.cn +43286 + Keysight Technologies, Inc. + Ron Pleshek + ron.pleshek&keysight.com +43287 + Mocomsoft Inc. + Jian Shen + jshen&mocomsoft.com +43288 + Lugansk State Academy Culture and Arts + Filippov Valeriy Leonidovich + lugansk.academy.culture.arts&gmail.com +43289 + W3Security + Adrian Tanasie + pixelsbox&gmail.com +43290 + DLP Networks + David Jia Wei Li + david&david-li.com +43291 + Frank Loepthien (Loeppi) + Frank Loepthien + loeppi&gmx.de +43292 + Applifier Oy + Juho Mäkinen + juho&applifier.com +43293 + Diakonie Deutschland + Jörn Hoffmann + internet&diakonie.de +43294 + M2M Telemetria Ltda + Celso de Oliveira + celso&m2mtelemetria.com.br +43295 + Kreel LLC + Scott Kreel + scott&kreel.com +43296 + Exablaze + Matthew Chapman + matthew.chapman&exablaze.com +43297 + BASSANO & ASSOCIATES + ROBERT BASSANO + robert.bassano&bassanoassociates.co +43298 + fnordspace.net + Markus Barenhoff + mbarenh&alios.org +43299 + Das Kartell (German Star Citizen Organization) + Jörg Griepentroch + BFC.OGDA&gmx.net +43300 + PDC Corporation + Hershel Wolf + hershel.wolf¶meds.com +43301 + xiamen diksoft Co.,Ltd. + YaDe Hong + www&dik.com.cn +43302 + Bank Of Stockton + Tyler Applebaum + tapplebaum&bankofstockton.com +43303 + 1st Choice Tax Service, Inc. + Don Larsen + donlarsen&1stChoiceTax.biz +43304 + Davenport University + Steven Tharp + stharp&davenport.edu +43305 + Hey Communications + Chris Bajumpaa + chrisb&hey.net +43306 + Bitclear, LLC + Chris Lamont + chris.lamont&bitclear.us +43307 + Keyprocessor + Wilfred Janssen + wilfred.janssen&keyprocessor.com +43308 + Appcara Inc + Goh Eng Teong + engteong.goh&appcara.com +43309 + Sweratel AB + LIM KEE CHIAN + keechian&sweratel.se +43310 + Konak Solutions Pty Ltd + Nesh Nenad Mijailovic + info&konak.com.au +43311 + Belcity Software + Nigel Prosser + nigel.prosser&belcity.co.uk +43312 + ZRO Kvant + Nazarenko Victor + snakedj&inbox.ru +43313 + zerolatency ltd + Mauro Rappa + info&zerolatency.co +43314 + Tecteo Group SA + Adrien le Maire + adrien.le.maire&tecteo.be +43315 + PowerDNS.COM BV + bert hubert + bert.hubert&powerdns.com +43316 + Information Factory + Anton Borowka + admins&information-factory.com +43317 + Stadtwerke Bonn GmbH + Eduard Dueck + it-service&stadtwerke-bonn.de +43318 + PERSON to PERSON Analysis CA + Enrique Fermin + efermin&contactop2p.com +43319 + Secure Meters Limited + Hemant Jangid + hemant.jangid&securetogether.com +43320 + NetSTAR inc + seinosuke terashi + s.terashi&netstar-inc.com +43321 + Polaris Networks + Pradip Biswas + pradip_biswas&polarisnetworks.net +43322 + Kenya Education Network + Peter Muia + pmuia&kenet.or.ke +43323 + Trans4mation IT GmbH + Sebastian Hähnel + Sebastian.Haehnel&trans4mation.de +43324 + FARSENS S.L. + Daniel Pardo + info&farsens.com +43325 + Shoreland Inc. + Scott Kreel + sakreel&shoreland.com +43326 + Triple Canopy Inc. + Todd Wallace + it-services&triplecanopy.com +43327 + Toronto Public Library + Alex Schifitto + aschifitto&tpl.ca +43328 + Exscribe, Inc + Tony Kennedy + tony&exscribe.com +43329 + OneConfig Pty Ltd + Martyn Lomax + mlomax&oneconfig.net +43330 + Sonavation, Inc. + Tom Alexander + tom.alexander&sonavation.com +43331 + Madhat Technical Solutions LLC + Jamie Duemo + jamieduemo&madhatts.com +43332 + DialogSoft inc. + Ivan Petrov + peti&dialogsoft.biz +43333 + Gangwon EMbedded Software Cooperative Research Center + Chang Yeong CHOI + ccy333&gwnu.ac.kr +43334 + iDocTrust + Gerhardus Muller + gerhardus1&gmail.com +43335 + Union des Bois de Champagne + Alain JACQUET + contact&foretsetboisdelest.com +43336 + GOERLITZ AG + Norbert Bartsch + norbert.bartsch&goerlitz.com +43337 + ZENON MEDIA GmbH + Horst Roser + roser&zenon-media.net +43338 + Yottabyte + Greg Campbell + Engineering&Yottabyte.com +43339 + efComs UG + Joachim von Stieglitz + jvs&efcoms.com +43340 + Traffic Technologies, Inc. + Michael Kolb + michael.kolb&t-t-i.com +43341 + Dashlink + Rick Gibson + rick&dashlink.com +43342 + SnailProof Computer Services + Shawn Amyotte + shawn&snailproof.com +43343 + Contactless Devices LLC + Evgeny Boger + info&contactless.ru +43344 + Maven Inventing Solutions + Marison Gomes + marison&maven.com.br +43345 + Aktietrykkeriet AS + Eyvind Riise + eyvind.riise&aktietrykkeriet.no +43346 + Douglas County School District, Colorado + Jeremy Norman + jnorman&dcsdk12.org +43347 + ValueDatum Systems, Inc. Ltd + Kevin Huang + kevin&valuedatum.com +43348 + EmbeddedSoft Ltd + Aleksander Kałążny + aleksander.kalazny&embeddedsoft.eu +43349 + Nigerian Research and Education Network (NgREN) + Prof. Mike Faborode + ngren&eko-konnect.net.ng +43350 + Arikel.net Enterprises, LLC + Michael King + mking&arikel.net +43351 + Advania + Aki Barkarson + aki&advania.is +43352 + Hangzhou Huaxing Chuangye Communication Technology CO.,LTD. + Wang Ke + yujiangang&hxcy.com.cn +43353 + Beijing Certificate Authority + Lanfang Yang + yanglanfang&bjca.org.cn +43354 + OBI Smart Technologies GmbH + Dirk Proepper + dirk.proepper&obi.de +43355 + VTI Instruments Corporation + Dale P. Smith + dsmith&vtiinstruments.com +43356 + Mimosa Networks, Inc + Alexander Gostrer + alex&mimosa.co +43357 + Thomas Fagart (Brozs) + Thomas Fagart + tfagart&brozs.net +43358 + candiansolar.com + wei.yang + wei.yang&canadiansolar.com +43359 + China Guodian Corporation + ChengChuan + ChengChuan&cgdc.com.cn +43360 + Binhai Online Information Technology Company Limited + Sun Yajiang + info&binhai.com +43361 + IT Voimala Oy + Topias Airaksinen + topias.airaksinen&itvoimala.fi +43362 + Zakład Automatyki i Urządzeń Pomiarowych AREX Sp. z o.o. + Marcin Garski + marcin.garski&arex.pl +43363 + B2B.CZ, s.r.o. + Richard Kotal + richard.kotal&b2b.cz +43364 + Vision Box, S.A. + Roger Davies + rdavies&vision-box.com +43365 + Qi ict + Martijn Ebben + martijn.ebben&qi.nl +43366 + N-Tools.de + Dirk Nenninger + iana&n-tools.de +43367 + Suedzucker AG + Wolfgang Junker + wolfgang.junker&suedzucker.de +43368 + Oradian d.o.o. + Dražen Kačar + drazen.kacar&oradian.com +43369 + BrightSign LLC + Mike Crowe + mcrowe&brightsign.biz +43370 + Giordano Bruno Stiftung + Dr. Michael Schmidt-Salomon + it-ops&giordano-bruno-stiftung.de +43371 + Moegui S.p.A + Luciano Giacchetta + admin&moegui.com +43372 + Embedded Access Inc + Craig Honegger + craig.honegger&embedded-access.com +43373 + Associação dos Registradores Imobiliários de São Paulo + walter de oliveira + walter&arisp.com.br +43374 + Belton IT + Thomas Matysik + noc&belton.co.nz +43375 + Baer & Karrer AG + Forgiarini Gabriele + gabriele.forgiarini&baerkarrer.ch +43376 + Ladybird Cranes Ltd. + Karl Knibbs + it&ladybirdcranes.co.uk +43377 + GDV-IT + Christian Graichen + info&gdv-it.de +43378 + Holger Genth (HGD) + Holger Genth + kontakt&holger-genth.de +43379 + R.J. O'Brien and Associates + Matthew Wulk + MWulk&rjobrien.com +43380 + Counterpoint Networking, Inc. + John Hudson + ipadmin&cpoint.net +43381 + GEMATICA SRL + Enrico Ciotola + snmp&gematica.com +43382 + Dreamket Co., Ltd. + Jae Young Lee + duri&dreamket.co.kr +43383 + Commercial Bank “Severo-Vostochny Alliance” (Joint-Stock сompany) + Vasily Rusakov + sb&svabank.ru +43384 + Pelle Olsson (Raholmen) + Pelle Olsson + pelle_olsson&swipnet.se +43385 + Proyectos-IP + Jose Antonio Perez + japerez&proyectos-ip.com +43386 + Expkits + Göktuğ ÇAKMAK + info&expkits.com +43387 + city of Sioux City, IA + Daniel Middleton + dmiddleton&sioux-city.org +43388 + aeworld.co.uk + ian sutherland + isutherland1&hotmail.co.uk +43389 + Eko-konnect Research and Education Initiative + Cletus Okolie + okoliec&eko-konnect.net.ng +43390 + BEIJING FOREVER COMMUNICATION ELECTRONICS CO.,LTD + Jin Cancan + jcc&xinxunxintong.com +43391 + UTT Co., Ltd. + Cho Yoon Sik + yscho&uttsys.com +43392 + Agile Digital Engineering Pty Ltd + Justin Smith + infrastructure&agiledigital.com.au +43393 + S-Net GmbH + Henrik Schröter + registrierung&snet-it.de +43394 + Intrinium, Inc + Nolan Garrett + internalit&intrinium.com +43395 + The City of Amsterdam + Niels de Groot + N.de.Groot&amsterdam.nl +43396 + earthlab (daichi makino) + daichi makino + makino.daichi&earthlab.info +43397 + NET MEDIA SERVICES a.s. + Marek Lukács + marek.lukacs&netmedia.as +43398 + WILDIX + Stefano Osler + stefano.osler&wildix.com +43399 + Tiscali Italia SpA + Anselmo Canfora + acanfora&it.tiscali.com +43400 + IT storitve, Gregor Jerše, s.p. + Gregor Jerše + gregor&jerse.info +43401 + TRData Limited + Anton Pasiechnikov + anton&trdata.com +43402 + Layer-7 Technologies + Michael Rommel + rommel&layer-7.net +43403 + CareerHub Pty Ltd + Dee Hughes + dee&careerhub.com.au +43404 + SOCIEDAD ESTATAL CORREOS Y TELEGRAFOS SA + David Matesanz + asei&correos.com +43405 + SA Water Corporation + Andrew Commons + issecurity&sawater.com.au +43406 + Universiti Putra Malaysia + Norhayti Mustafa + ldap&upm.edu.my +43407 + David Martin (ReaperLegion) + David Martin + ReaperLegion&gmail.com +43408 + HDmessaging Inc + Juho-Pekka Virolainen + ops&hdmessaging.com +43409 + Reith IT-Lösungen GmbH + Nico Reith + info&reith.it +43410 + Joseph Williams + Joseph Williams + williams.joe&gmail.com +43411 + Graeser Development + Patrik Gräser + mib-mgr&graeser.se +43412 + Genius Digital Limited + Alex Dick + registry&geniusdigital.tv +43413 + Fairview Health Services + Coral Lindahl + codell1&fairview.org +43414 + Molson Coors Brewing Company + Jackie Halewood + jackie.halewood&molsoncoors.com +43415 + Deepblue Informatikai Kft + Attila Kuti + attila.kuti&deepblue.hu +43416 + Multigenregameworld LLC + Mikhail Kuznetsov + mikhail.kuznetsov&mggworld.com +43417 + South Side Hackerspace Chicago, NFP + Christopher Swingler + cto&sshchicago.org +43418 + Khakham Sayavong (Nticks) + Khakham Sayavong + contact&nticks.net +43419 + Microsoft Unified Communications and Collaboration User Group + Yves Frédéric N´Soussoula + info&msuccug.de +43420 + Proximic Inc + Paul Armstrong + iana&proximic.com +43421 + SysMa.cz + Tomas Krejcik + tomas.krejcikz&gmail.com +43422 + ADTEC Communications + Jon Bolton + engineering&adtec.com.au +43423 + KeyONet + Kim Cheol Kee + firstpw&keyonet.com +43424 + Securitas FINLAND + Juha Miettinen + juha.miettinen&securitas.fi +43425 + Aduno Gruppe + Boris Bellorini + boris.bellorini&aduno-gruppe.ch +43426 + Noction + Eugeniu Ceglov + eceglov&noction.com +43427 + Altus Sistemas de Automação S.A + Rafael Morello + rd&altus.com.br +43428 + The William and Flora Hewlett Foundation + Boris Decout + bdecout&hewlett.org +43429 + Strata Information Technology, Inc + Keith Stark + keith&stratait.com +43430 + Systems Definition, Inc. + Frank Briese + briesef&systemsdefinition.com +43431 + Tate, Inc. + Peter Strapp + pstrapp&tateinc.com +43432 + MICROMED BIOTECNOLOGIA LTDA + FRANCISCO DUARTE MOREIRA NETO + netoµmed.ind.br +43433 + Sreevidya Aravind + Sreevidya Aravind + sv.vidyaaravind&gmail.com +43434 + Metacom Pty Ltd + Stefan Reinke + stefan&metacom.co.za +43435 + Nutsrange Technologies + James Gu + nutsrange&gmail.com +43436 + Accumuli PLC + Paul Maskelyne + paul.maskelyne&accumuli.com +43437 + City of Montclair + Robert Castillo + rcastillo&cityofmontclair.org +43438 + County Of Bruce + Jaron Kerr + jkerr&brucecounty.on.ca +43439 + J.J. Downs Industrial Plastics Inc. + Dani Cela + dcela&jjdowns.com +43440 + royal holloway, university of london + roger matthews + roger.matthews&rhul.ac.uk +43441 + royal holloway, university of london + roger matthews + roger.matthews&rhul.ac.uk +43442 + Niles Radio Communications + John Lindsey + john&nilesradio.com +43443 + Transacciones y Servicios Mobile + Fernando Bas + ferbas&transermobile.com +43444 + Inspire Living, Inc. + Michael Script + mscript&inspirelivinginc.com +43445 + 4th Layer Integrated Solutions S.r.l. + Enrico Milani + enrico.milani&4lis.it +43446 + Hartmann Electronic GmbH + Fedor Minich + fedor.minich&hartmann-electronic.com +43447 + LoadFront, S.L. + Víctor Román Archidona + vroman&loadfront.com +43448 + THEFEINERS.COM + Amnon Feiner + Amnon.Feiner&hotmail.com +43449 + mobilcom-debitel GmbH + Jan Singer + jan.singer&md.de +43450 + Agencja Restrukturyzacji i Modernizacji Rolnictwa + Marek Michnowicz + PKI&arimr.gov.pl +43451 + Australian Customs and Border Protection Service + Chris Sherlock + chris.sherlock&customs.gov.au +43452 + Pragtec inc. + Jean-Francois Savard + jf.savard&pragtec.com +43453 + Visiometrics S.L. + Raul Cuevas González + informatica&visiometrics.com +43454 + Wiccess + Jean-Paul Mallet + jp.mallet&wiccess.com +43455 + LFT Solutions + Angel Manchado + angel.manchado&fiplex.com +43456 + KHS GmbH + Frank Jaenecke + frank.jaenecke&khs.com +43457 + Warp Software Ltda. + Luiz Imbroisi Filho + luiz&imbroisi.com +43458 + Nvizible Ltd + Mr Andrew Kingston + licenses&nvizible.com +43459 + Virsae Group Ltd + Matthew Bagley + matthew.bagley&virsae.com +43460 + university college Leuven Limburg (UCLL) + Noe Gunther + gunther.noe&khleuven.be +43461 + DGIT INTERNATIONAL PTE. LTD + Allan Randall + engineering&dgit.biz +43462 + Universidade Federal Rural de Pernambuco + Carlos Frederico Medeiros de Souza + coord.suporte&nti.ufrpe.br +43463 + Kyoto-University Microcomputer Club + Kasumi Hanazuki + hanazuki&kmc.gr.jp +43464 + Ten Cities Media + Greg Foster + greg.foster&tencitiesmedia.com +43465 + Communication Federal Credit Union + Travis Llewellyn + travisll&comfedcu.org +43466 + CMS Central Media Solutions GmbH + Lablack, Stefan + stefan.lablack¢ralmedia-solutions.de +43467 + Evolv, Inc. + Haam Tham + pencontact&evolv.net +43468 + Cacto Internet Ltda + Daniel Farias + daniel&cacto.me +43469 + Sysnove + Guillaume Subiron + guillaume&sysnove.fr +43470 + G + H Netzwerk-Design Gesellschaft für IT-Consulting mbH + Tobias Schröder + t.schroeder&netzwerk-design.de +43471 + Sun Trading Solutions + Jason Conley + jconley&suntradingllc.com +43472 + APC Integrated Services Group, Inc. + Matt Lehmann + mlehmann&apcisg.com +43473 + YHGfL Foundation + Andrew Yoward + support&yhgfl.net +43474 + Coca-Cola Hellenic Bottling Company + Martin Mirov + martin.mirov&cchellenic.com +43475 + XNX Sàrl + Cyril Christin + account-iana-pen&xnx.ch +43476 + LeanDev + Robert Pettersson + robert.pettersson&leandev.se +43477 + Bioss Consulting SRL + Giovanni Tirino + info&biossconsulting.com +43478 + Jonathon Anderson (civilfritz) + Jonathon Anderson + anderbubble&gmail.com +43479 + Ecole nationale Supérieure d'Informatique + MEDABIS BILAL + b_medabis&esi.dz +43480 + Canine Creche + Alanna Marquis + info&caninecreche.co.za +43481 + Innovaatik OÜ + Jargo Kõster + jargo&inno.ee +43482 + GlobalSensing Technologies + Olivier BROUSSE + r_et_d&globalsensing.eu +43483 + AIPHONE CO., LTD. + Hirokazu Harima + soft_snmp&aiphone.co.jp +43484 + Fundación Fulgor + Juan Vercellone + jvercellone&fundacionfulgor.org.ar +43485 + Trillium Teamologies, Inc. + Robert Morris + helpdesk&trilliumteam.com +43486 + Fuse Networks, LLC + Garan Williams + garanwilliams&fusenetworks.com +43487 + Guangzhou Woxiang Information Technology Co., Ltd + Difan Zhang + tifan&ifanr.com +43488 + Schletter GmbH + Markus Burgstaller + markus.burgstaller&schletter.de +43489 + GOOD FIRST GROUP . + HongDe + 59295152&qq.com +43490 + Rite-Tech Industrial Co., Ltd. + Sam Huang + sam&ritetech.com.tw +43491 + ITR Service GmbH + Stephan Schumann + stephan.schumann&itr-service.de +43492 + Bernhard Trinnes (microdots) + Bernhard Trinnes + bernhard.trinnesµdots.de +43493 + Online Linjeforening + Kristian Ekle + kristian.ekle&gmail.com +43494 + Clarity Ltd + Robert Stiles + b06&gmx.com +43495 + Taide Enterprise Co.,Ltd. + Luo Xinheng + luo&tai-de.com +43496 + QSC AG + Andreas Schimansky + andreas.schimansky&qsc.de +43497 + Weavesys UK + Elliott Clark + elliott.clark&weavesys.com +43498 + SENSORBOX LTDA (formerly 'SENSORBOX DO BRASIL LTDA') + Carlos Eduardo Padua Sarcinelli + c.eduardo&sensorbox.com.br +43499 + Youncta + Luigi Russo + luigi.russo&youncta.com +43500 + 4D Security Solutions + Ken Robinson + kenneth_robinson&4-dsecurity.com +43501 + Boston Financial Data Services Inc + Rob Forand + rforand&bostonfinancial.com +43502 + HighRes Biosolutions + Ian Locke + ilocke&highresbio.com +43503 + Jay Ridgeway (beekling.com) + Jay Ridgeway + jayridge&gmail.com +43504 + Judd Storrs + Judd Storrs + jstorrs&gmail.com +43505 + Plugh Studios Ltd + Randy Beiter + iana&plughstudios.com +43506 + Beats by Dre + Brannon Means + brannon.means&beatsbydre.com +43507 + ti&m AG + Harald Böttcher + harald.boettcher&ti8m.ch +43508 + EDT SAS - Electronic Data Transfer + Stéphane CHANVILLARD + schanvillard&edt.fr +43509 + BITPRO AS + André Nordstokke + an&bitpro.no +43510 + Juan Jose Rodriguez Ponce + Juan Jose Rodriguez Ponce + mail&juan-jose.com +43511 + E7 Solutions + Brandon Opfermann + brandon.opfermann&e7solutions.com +43512 + BEIJING SIFANG AUTOMATION CO., LTD + youtao + youtao&sf-auto.com +43513 + SDIS68 + Nicolas Riss + nicolas.riss&sdis68.fr +43514 + kt telecop + Park, Sang Yeon + itsmeda.park&kt.com +43515 + NISCOM,Inc. + Kaoru Miyazaki + k-miyazaki&niscom.co.jp +43516 + Auvik Networks Inc. + Marc Morin + mmorin&auvik.com +43517 + CoolMinds Technologies (P) Ltd + Abraham Jacob + abraham&coolmindsinc.com +43518 + Smartconnect NV / SA + Mark Tetrode + mark.tetrode&smartconnect.eu +43519 + Tranwall + Aurelien Duarte + aurelien.duarte&tranwall.com +43520 + pteam gmbh + Markus Gruetter + office&pteam.ch +43521 + RSE Informationstechnologie GmbH + Dipl.Ing. Alfred Darmann + darmann&rse.at +43522 + VoltServer, Inc. + Dan Lowe + dan.lowe&voltserver.com +43523 + Alberta Investment Management Corporation + Nathanael Law + nathanael.law&aimco.alberta.ca +43524 + Vanteon Corp. + Matt Healy + mhealy&vanteon.com +43525 + Wavecon GmbH + Cemil Degirmenci + info&wavecon.de +43526 + Signal Bredbånd AS + Lars Erik Utsi Gullerud + lerik&signal.no +43527 + Presidio Networked Solutions + Brendan Bowden + bbowden&presidio.com +43528 + SHODEN CORPORATION + Yoshito Yagi + yagi&sdn.co.jp +43529 + Istituto Radiologico e Fisioradioterapico Valdarno + Andrea Venanzi + andrea.venanzi¢romedicovaldarno.it +43530 + HYTEC INTER Co.,Ltd. + Takashi Nishihara + net_admin&hytec.co.jp +43531 + Atomic Object LLC + Justin Kulesza + admin&atomicobject.com +43532 + Yafeng Shan (kokonur) + Yafeng Shan + yafeng.shan&gmail.com +43533 + Beijing National Railway Research & Design Institude of Signal & Communication Co, Ltd. (CRSCD) + Yafeng Shan + yafeng.shan&gmail.com +43534 + Alstom Grid + Zhi Shi + zhi.shi&alstom.com +43535 + Luxottica North America Distribution LLC + David Reasoner + dreasone&luxotticaretail.com +43536 + AO K-Dorstroy + Alex Chernyavskiy + support&k-dorstroy.kz +43537 + Solumed + Saul Jankelow + Saulj&solumed.co.za +43538 + EXA-TMR + Tülin Yaman + tulin.yaman&exa-tmr.com +43539 + Dwarf Animation Studio + Yann VOTÉ + it&dwarf-labs.com +43540 + Federal Agricultural Marketing Authority + Sharifah Khairun Nisa Habib Elias + ldap&fama.gov.my +43541 + LSY Inc + Brian Grey + brian.grey&lsyinc.com +43542 + DENT Instruments + Jesse Laird + jlaird&dentinstruments.com +43543 + ID Business Solutions Ltd + Simon Diaz + snmp-admin&idbs.com +43544 + DigiComm GmbH + Theo Bongartz + tbongartz&digicomm.de +43545 + Omnitracs, LLC + Pete Patron + ppatron&omnitracs.com +43546 + Tariox Limited + Martyn Van Berkel + hostmaster&tariox.com +43547 + SCOOP Software GmbH + Jürgen Link + juergen.link&scoop-software.de +43548 + Trasis sa + Gauthier Philippart + info&trasis.com +43549 + IT Competence Group SE + Karl Gerber + support&neton.ag +43550 + CAMEA spol. s r. o. + Fučík Otto + camea&camea.cz +43551 + Mavari-IT bvba + Peter Maeseele + peter&mavari.be +43552 + Whitesky Communications + Spencer Thomason + spencer&whiteskycommunications.com +43553 + Rapid Focus Security, Inc. + Sam Stelfox + sam&pwnieexpress.com +43554 + IGC + Didier Houdas + didier.houdas&igc-erp.com +43555 + LayeredDefense + Deral Heiland + dh&layereddefense.com +43556 + Rigas domes Informacijas tehnologiju centrs + Juris Persins + Juris.Persins&riga.lv +43557 + Nick Fender + Nick Fender + net.admin&nickfender.com +43558 + GitHub, Inc + Hugh Bawt + ops&github.com +43559 + ENKOM AG + Patrick Bruegger + info&enkom.com +43560 + New Orleans East Hospital + Kimberly Jones-Williams + Kim.JWilliams&NOEHospital.org +43561 + Micro-Research Finland Oy + Jukka Pietarinen + jukka.pietarinen&mrf.fi +43562 + ZepMed, LLC + Nathan White + natewhite44&gmail.com +43563 + pedro paulo medeiros + Pedro Paulo Medeiros + ppmedeiros&ig.com.br +43564 + TSC AUTO ID Technology Co., Ltd. + Phil Liu + phil&tscprinters.com +43565 + Reliance Jio Infocomm Limited + Sudhanshu Purwar + sudhanshu.purwar&ril.com +43566 + Engineering Center REGIONAL SYSTEMS, Limited + Alexey Gordichuk + a.gordichuk&ec-rs.ru +43567 + Yongjia County Board of Education + Li Chengdao + lcd&yje.cn +43568 + EMICON + Rifat Abdulkaumov + rifat&emicon.ru +43569 + Hong Yun Technology + Hong Yun + hongyuntech&gmail.com +43570 + Coopérative Forestière Bourgogne Limousin (CFBL) + Chantal BRODIN PREVOSTO + chantal.prevosto&cfbl.fr +43571 + SOCIETE DES TRANSPORTS ROCHATTE + Rose-Marie ROCHATTE + str&transports-rochatte.com +43572 + Foundation for Research and Technology Hellas + Panagiotis Sikas + sikas&ics.forth.gr +43573 + VSAT-SERVICE + Shishkov Alexey + shan&vsat-s.ru +43574 + Lansing Makers Network + Michael Johnson + itstaff&lansingmakersnetwork.org +43575 + OpenPeak + Dustin Oprea + dustin&openpeak.com +43576 + IntSec.NET + Alessio Bravi + alessio&bravi.org +43577 + CNPC Beijing Richfit Information Technology Co.,LTD. + Songjiajia + songphd&hotmail.com +43578 + 42technology AG + Stephan Koch + sw-development&42technology.ch +43579 + EASYPROTO di Giacomo Cesari + giacomo cesari + info&easyproto.it +43580 + SutherlandGlobalServices + Arunbabu Rajmohan + Arunbabu.r&sutherlandglobal.com +43581 + Ooyala Inc. + Bao Nguyen + ngqbao&gmail.com +43582 + Document Centric Solutions Ltd + David Randall + sysadmin&doccentrics.com +43583 + ReMake Electric + Karl Palsson + developers&remake-electric.com +43584 + Trizetto Provider Solutions + Kyle Pott + kyle.pott&trizetto.com +43585 + SAMSON AG + Axel Goettmann + agoettmann&samson.de +43586 + SVS TELEKOMÜNİKASYON HİZ. TİC. SAN. A.Ş. + Muhammet Fatih OKUTAN + fatih.okutan&svstelekom.com.tr +43587 + Roave, LLC. + Evan Coury + info&roave.com +43588 + SAA Topnet GmbH + Suzanne Epp + epp&saatopnet.de +43589 + KEKLOLWTF AS + Jan Egil Vestbø + iana&keklolw.tf +43590 + HAL9k hackerspace + Georg Sluyterman + bestyrelse&lists.hal9k.dk +43591 + Erlang Solutions LTD + Michal Tomczuk + sysadmin&erlang-solutions.com +43592 + Campbell Scientific, Incorporated + Sam Utley + sutley&campbellsci.com +43593 + Crimson Innovative + Sudhakara Rao + sudhakarmurala03&gmail.com +43594 + Interhyp AG + Norman Bauer + _IT_Windows_Services&interhyp.de +43595 + MolinoSoft (Julian Bourne) + Julian Bourne + julian&britesc.com +43596 + Carnegie Fitness + Clark Chiu + clark&carnegie-fitness.com +43597 + Comprehensive Health Management, Inc. + Oscar Galdona + oscar.galdona&wellcare.com +43598 + STORM + ST STORM + chip&storm.vu +43599 + Paneidos Desu + Sernin van de Krol + sernin&paneidosdesu.com +43600 + Beijing Easesky Netcom Technology Co.,Ltd + David Xu + davidxu&easeskytech.com +43601 + Nuri Technology Co., Ltd. + Kwang-Wook, Lee + nuritec&nuritec.co.kr +43602 + Rossiya Segodnya + Andrey Belokurov + postmaster&rian.ru +43603 + The Twelve Tribes + Patrick McCourt + patrick.mccourt&twelvetribes.org +43604 + Arkansas Educational Television Network (AETN) + Greg Barber + gbarber&aetn.org +43605 + Metatrust + Jorge Rocha + jorge.rocha&metatrust.com.mx +43606 + nobaq.net + Nikolaus Hammler + noc&nobaq.net +43607 + Funda Real Estate B.V. + Remie Bolte + beheer&funda.nl +43608 + Raben Group + Marcin Piotrowski + it.admin&raben-group.com +43609 + NetCracker Technology Corp. + Nikita Sizykh + ldap.snmp.oids.group&netcracker.com +43610 + ColosseoEAS, a.s. + Miroslav Petrak + miroslav.petrak&colosseoeas.com +43611 + Power Ethernet + James Gardiner + support&powerethernet.com +43612 + IOLAN + Ernst Lemm + e.lemm&iolan.com +43613 + Skylife Engineering + Antonio Rodriguez + itadmin&skylife-eng.com +43614 + phaenovum Schülerforschungszentrum Lörrach-Dreiländereck e.V. + Lars Möllendorf + it-robotik&phaenovum.eu +43615 + VonRoll + Bilal Karaagac + bilal.karaagac&vonroll.com +43616 + Aventura Technologies Inc. + Michael Wangner + info&aventuratechnologies.com +43617 + Vidant Health + Kirk Davis + kirk.davis&vidanthealth.com +43618 + Camtrace + Alain Dumouchel + alcodu1812&gmail.com +43619 + LianYunGang E-Port Information Development Co.,Ltd + Yaokun Zhang + postmaster&lygeport.gov.cn +43620 + Shenzhen Headsun Technology + Bo Liu + 18929378839&189.cn +43621 + Medical Effect + Dr. Roman Zolow + zolow&medical-effect.com +43622 + WebPerative LLC + Alan McCluney + alan&webperative.com +43623 + Umensis - Guillaume Pannatier + Guillaume Pannatier + guillaume.pannatier&gmail.com +43624 + Jaroslav Sendler (xsendl00) + Jaroslav Sendler + sendler.jaroslav&gmail.com +43625 + Core Technology + Krzysztof Kielak + contact&moosefs.com +43626 + NewB + Catherine Blancquaert + info&newb.coop +43627 + Devani Creative + Sernin van de Krol + sernin&devani.nl +43628 + The Rubicon Project + Sean Michael Byron + poc&rubiconproject.com +43629 + PSITEX, LLC + Jack D. Pond + jack.pond&psitex.com +43630 + Duxoft, Inc. + Gennadiy Bezkorovayniy + gb&duxoft.com +43631 + Heijmans N.V. + Edwin van de Burgt + eburgt2&heijmans.nl +43632 + Celtex Works + Davey Robertson + d.robertson&celtexworks.com +43633 + AIES Advanced Industrial Electronic Systems + Dariusz Makowski + dariusz.makowski&aies.pl +43634 + Metrellis, Inc. + Walter Sujansky + wsujansky&sujansky.com +43635 + Practice Velocity, LLC + Gregory J Wentz + gwentz&practicevelocity.com +43636 + LES.NET + Leslie Bester + sales&les.net +43637 + Zymer Inc. + Peter Yoo + pryoo&zymerinc.com +43638 + ITS Korea + Sanghyun Lee + shleego&itskorea.kr +43639 + Evoluta Interactive + Schalk Snyman + schalks&evoluta.biz +43640 + THROUGHOTHEREYES (Jonathan Platzer) + Jonathan Platzer + jonathan&throughothereyes.net +43641 + netplus.ch SA + Jean-Blaise Rey + jean-blaise.rey&netplus.pro +43642 + Linde Engineering + Walter Werther + walter.werther&linde-le.com +43643 + TEQ SA + Alessandro Prioglio + tech-admin&teq.ch +43644 + Infra Services + Arjen Barnhard + pen.iana&infra-services.net +43645 + FEIG ELECTRONIC GmbH + Sascha Brueck + sascha.brueck&feig.de +43646 + Datatal AB + Thomas Persson + thomas&datatal.se +43647 + Rimbach IT Systems UG + Rene M. Rimbach + info&rimbach-it-systems.de +43648 + Naudit High Performance Computing and Networking S.L. + Eduardo Magaña + eduardo.magana&naudit.es +43649 + Raritan Bay Medical Center + Susan Landow + slandow&rbmc.org +43650 + United Health Services + Kevin Rymer + kevin_rymer&uhs.org +43651 + INTTRA Inc + Michael Lunny + michael.lunny&inttra.com +43652 + GoBICS + Rasmus Steinkamp + iana.pen.6kUL4EWH2w&gobics.de +43653 + UNIVERSITAT OBERTA DE CATALUNYA + Carles Cortada i Hortalà + ccortada&uoc.edu +43654 + marcsi.ch + Marc Siegenthaler + shin&marcsi.ch +43655 + University Ecclesiastical Academy of Thessaloniki + Telemachos Stamkopoulos + telem&aeath.gr +43656 + Consortium + Samsun Kang + sammy-kang&nate.com +43657 + Kanton St.Gallen + Robert Schneider + Robert.Schneider&sg.ch +43658 + Seacloud + Bong Dae Kim + bd.kim&sea-cloud.co.kr +43659 + Hochschule für Technik und Wirtschaft des Saarlandes (htw saar) + Niclas Wolniak + niclas.wolniak&htwsaar.de +43660 + Zadara Storage Ltd + Yair Hershko + yair&zadarastorage.com +43661 + Columbus Business Solutions + Johnny Mejias + jmejias17&gmail.com +43662 + Simpson Housing LLLP + Gene Pinson + gene.pinson&simpsonhousing.com +43663 + eTouch Federal Systems + Benjamin Stassart + iana&etouchfederal.com +43664 + Unzane + Gerald Turner + gturner&unzane.com +43665 + rioxo GmbH + Rudolf Rohr + rudolf.rohr&rioxo.net +43666 + Hackerspace Brussels + Frederic Pasteleurs + askarel&hackerspace.be +43667 + Sunwire Inc + Jason Legault + support&sunwire.ca +43668 + Andreas Kreisel + Andreas Kreisel + akreisel&duck-edv.de +43669 + The Institute of Mathematical Sciences + B. Raveendra Reddy + ravi&imsc.res.in +43670 + Epic Code Pty. Ltd. + Michael Slade + mslade&knobbits.org +43671 + EMSEAS Teknik AB + Jonathan Petersson + jonathan.petersson&cxense.com +43672 + INTELLECT MODULE + Ivan Fedotov + ivan&intellect-module.ru +43673 + Federal State Unitary Enterprise CentrInform + Aleksey Gvozdev + a.gvozdev¢er-inform.ru +43674 + Robonect + Mikhail Sokolov + robot&robonect.com +43675 + Accenture CAS GmbH + Carsten Mueller + carsten.mueller&accenture.com +43676 + 57North Hacklab + Iain Learmonth + irl&fsfe.org +43677 + ICT Plus srl + Alessandro Barsotti + alessandro.barsotti&ictplus.it +43678 + Michael Kramer + Michael Kramer + verwaltung&turkeyfish.de +43679 + SETTE + Sotir Gramosli + sotir.gramosli&sette.com.mk +43680 + Chorus Limited + David Vink + chorusopsnotify&chorus.co.nz +43681 + ELESTER-PKP Sp. z o.o. + Robert Bojara + robert.bojara&elester-pkp.com.pl +43682 + ITSA Consulting, LLC + Joshua M Miller + joshua&itsecureadmin.com +43683 + ALDEIS + Damien guigue + damien&yooda.com +43684 + NOVASIB GmbH + René Carl + rene.carl&novasib.de +43685 + biz:Consult Unternehmensberatung GmbH + Gregor Kralik + gregor.kralik&bizconsult.at +43686 + Jefferson County + David Olsen + itstaff&co.jefferson.wa.us +43687 + Christoph Knott - IT-Sicherheitstechnik + Christoph Knott + info&it-sicher.com +43688 + GERMEN ELEKTRONIK LTD + MURAT GERMEN + mgermen&germen.com.tr +43689 + schugart + Dimitri Fagart + raspberry&schugart.net +43690 + The Active Network + Heather Tillman + heather.tillman&activenetwork.com +43691 + Michael Kettner + Michael Kettner + mikettner&gmx.de +43692 + haberberger.com + Andreas Haberberger + admin&haberberger.com +43693 + Fotolia LLC + Renaud Chaput + renaud&fotolia.com +43694 + Omega Intelligence Systems Ltd + Martin Adamczyk + admin&omegaintellisys.com +43695 + 5nine Software + Dr. Konstantin Malkov + kmalkov&5nine.com +43696 + LLC Info-Kontent + Sergey Mudrak + sergey.mudrak&beeper.ru +43697 + JSC Tyumen Electricity sale company + Ivan Toporkov + Ivan&elektro-32.ru +43698 + Joint Electronic Teller Services Ltd. + Ricky Man + rickyman&jetco.com.hk +43699 + EasyData, LLC + Vladimir Baranov + info&easydata.ru +43700 + PathCore + Dan Hosseinzadeh + iana.org&pathcore.ca +43701 + HySecurity, Inc. + Chuck Davidson + cdavidson&hysecurity.com +43702 + Dom-Daniel + Artashes Kalantarian + artashes&dom-daniel.com +43703 + Wacom Company Ltd. + Daniel Dietz + daniel.dietz&wacom.eu +43704 + Accutron Instruments Inc + Mike Sharkey + mike&accutroninstruments.com +43705 + F!nTcH.org + Vincent BECHON + fintch&fintch.org +43706 + Statnett SF + Roar Håkonsen + roar.hakonsen&statnett.no +43707 + Australian Rail Tecchnology + Thomas George + t.george&ar-tech.com.au +43708 + SimonMed Imaging + Chris Diehn + cdiehn&simonmed.com +43709 + AirDSL (Pty) Ltd + Iliyan Dobrev + iliyan&airdsl.co.za +43710 + Siemens Gamesa Renewable Energy (formerly 'GAMESA') + Jose Antonio Contreras Caballero + jose.contreras&siemensgamesa.com +43711 + mySupply ApS + Peter Freiberg + peter.schmidt.freiberg&mySupply.dk +43712 + Universidad Central de Venezuela + Alberto Bellorin + alberto.bellorin&ucv.ve +43713 + Code 42 + Ethan J Sommer + ethan&code42.com +43714 + Mega Soft Computación, C.A. + Jose E. Delgado + infra&megasoft.com.ve +43715 + Data Infrastructure Technologies Ltd. + Chris Ianculovici + crs5tx&gmail.com +43716 + Veterinary Nurse Solutions Pty Ltd + Deane Klaer + deane&vetnurse.com.au +43717 + imensup + makan jooyani + info&imensup.com +43718 + Österreichisches Weltraum Forum + Sebastian Sams + sebastian.sams&oewf.org +43719 + Telvio LLC + Andrew Kolchoogin + gadm&telvio.ru +43720 + Creat0r.pro + Sergey A. Prokofyev + s-proff&creat0r.pro +43721 + Cyberoam Technologies Pvt. Ltd. + Sanket Handa + pen.iana&cyberoam.com +43722 + DAC Systems, Inc. + Mark Nickson + mnickson&dacsystems.com +43723 + Stancil Corporation + Simon Farrow + simon.farrow&stancilcorp.com +43724 + Japan Prime Computing + daisuke watanabe + watanabe.daisuke&j-pc.jp +43725 + Casair, Inc + Mike Sturdee + mike.sturdee&casair.net +43726 + GO! Express & Logistics Deutschland GmbH + Uwe Rehorst + uwe.rehorst&general-overnight.com +43727 + Valeo Comfort ans Driving assitance + Etter Stephan + stephan.etter&valeo.com +43728 + Sinergise Ltd. + Primoz Kolaric + primoz.kolaric&sinergise.com +43729 + Teckids e.V. + Dominik George + dominik.george&teckids.org +43730 + Ernst Klett AG + Horst Lehmacher + hostmaster&klett-gruppe.de +43731 + FooxTek Co., LTD. + David Chu + david&foox.cc +43732 + Netmag Technology Corporation + Joyce Chen + joycechen&netmag-tech.com +43733 + East Photonics, Inc. + Yong-Seung Jeong + tech&eastphotonics.com +43734 + IQUO Ltd + Peter Reynolds + peter&iquo.co.uk +43735 + STI-Tassimco + Patrick Lauziere + patrick.lauziere&sti-tassimco.com +43736 + Standard Imaging, Inc. + Mark Mackenzie + mmackenzie&standardimaging.com +43737 + MedAllies + Nicole Domkiw + ndomkiw&medallies.com +43738 + Doshisha University + TANAKA, Yasuhiro + donetmgr&mail.doshisha.ac.jp +43739 + Concept Smoke screen Ltd + Matt Gilmartin + Matt&smoke-screen.co.uk +43740 + ASB-Security + Info Asb + info&asb.nl +43741 + Cerqueira & Marcos, Lda + Luis Martins + luis.fcm&netcabo.pt +43742 + Microx Computer GmbH + Adam Jaskowiec + adam.jaskowiecµx-computer.de +43743 + Open Systems Technology, Pty. + Louis Rodriguez + itt&ost.com.au +43744 + airberlin + Peter Jaremenko + peter.jaremenko&airberlin.com +43745 + CONSEJO DE LA JUDICATURA + CRISTIAN FREIRE + entidad.certificacion&funcionjudicial.gob.ec +43746 + Ulterius Technologies, LLC + Alex Cavgalar + alex.cavgalar&ulteriustech.com +43747 + MaterialApps + Andreas Werder + andreas.werder&yahoo.com +43748 + ARES Conocimiento de Negocio, SL + Andres Escribano de Mingo + AEscribano&ares-cn.es +43749 + Country Bright Company Ltd. + Terence Ho + it&countrybright.com +43750 + Jabil Inc + Thomas Cetta + domains-administrator&jabil.com +43751 + Shenzhen TG-NET Botone Technology CO. Ltd. + Shine Zhang + xian.zhang&tg-net.cn +43752 + China Film Equipment Co. Ltd. + linxiaofei + linxiaofei&cfec.com.cn +43753 + Kyriasis + Johannes Löthberg + johannes&kyriasis.com +43754 + Philipp Wagner Softwareentwicklung + Philipp Wagner + mail&philipp-wagner.com +43755 + BG-Phoenics GmbH + David Feindel + basisdienste&bg-phoenics.de +43756 + Schwarz Dienstleistung KG + Max Killinger + gtld&sbg.de +43757 + QOSIT AG + Uwe Schneider + uwe.schneider&qosit.com +43758 + Clique Intelligence Inc. + Art Leondires + art.leondires&cliqueintelligence.com +43759 + Big White Network Technologies,Ltd. + Tuhongchao + tuhongchao&bigwhitenet.com +43760 + Atlas Networks + Nathan Eisenberg + nathan&atlasnetworks.us +43761 + Our School Ltd + Brett Merrick + support&ourschool.co.nz +43762 + PlazaLotusGroup + Pavel Chikovani + admin&plazalotus.com +43763 + ChinaFilm Global (Beijing) Technology limited + wang heng + heng.wang&cfgdc.net +43764 + Tallac Networks + Matthew Davy + mpd&tallac.com +43765 + Color Print + Torben Vingborg + TV&colorprint.dk +43766 + MBSys LLC Azerbaijan + Mehdi Kamran Mammadov + mehdi&mbsys.tv +43767 + Eastern Long Island Hospital + Susan Apicello + sapicello&elih.org +43768 + GatesAir, Inc. + Bradley Thomson + brad.thomson&gatesair.com +43769 + Vezea + Sebastian Wetzel + iana.admin&vezea.com +43770 + Addat s.r.o. + Ing. Jiri Kubias + jiri.kubias&addat.cz +43771 + Behlman Electronics + David Gash + dgash&behlman.com +43772 + K+S IT-Services GmbH + Regina Böhm + regina.boehm&k-plus-s.com +43773 + The Vancouver Clinic Inc. + Tim Morris + netadmins&tvc.org +43774 + Hortonworks Inc + Jesse Lesperance + jlesperance&hortonworks.com +43775 + Quotepro Inc + Brian Marquis + brian"epro.com +43776 + Symbiotic System Design + Manuel Deschambault + themactech&me.com +43777 + Maikel de Boer + Maikel de Boer + maikel&loopodoopo.nl +43778 + Hampshire Controls Corporation + Diane D. Rush + drush&hampshirecontrols.com +43779 + Finnova AG + Alexander Berger + alexander.berger&finnova.ch +43780 + Paradox Networks, Inc + Jackie Bates + iana-pen¶doxnetworks.net +43781 + Moshel Kamadu Records + Markus Szumovski + markus.szumovski&moshel-kamadu.at +43782 + UPS Solutions INC. + Mitsunari Tomosaka + sdb-develop&ups-sol.com +43783 + WaveNet Communications Private Limited + MUSUNURU SRIDHAR + ms&wavenet.co +43784 + IFOM + Ivan Lago + ivan.lago&ifom.eu +43785 + Magellium + Romain Cabassot + romain.cabassot&magellium.fr +43786 + Factual Inc + Philip Marcus + sysops&factual.com +43787 + Leviathan Security Technologies + Falcon Darkstar Momot + falcon.momot&leviathansecurity.com +43788 + Comita d.d. + Bojan Oresnik + bojan.oresnik&comita.net +43789 + Trading & Consulting 'H.P.C.' GmbH + Robert Gierzinger + rg&hpc.at +43790 + becker-aero + Roger Becker + roger.becker&becker-aero.de +43791 + Cine Digital Service + Ronan SALMON + snmp&cinedigitalservice.fr +43792 + Now Wireless Limited + Brett Warthen + brett&nowsms.com +43793 + Tom Gundersen (systemd) + Tom Gundersen + teg&jklm.no +43794 + Chris Thomson + Chris Thomson + chris&chris-thomson.co.uk +43795 + Guangzhou Andea Electronics Technology Co.,Ltd + ZhenSong Li + lzs6401948&163.com +43796 + Rasing IT Solutions + Bastiaan Rasing + bastiaan&rasing.me +43797 + Croz d.o.o. + Neven Biruski + nbiruski&croz.net +43798 + NANJING SAND TECHNOLOGY CO., LTD + Wang Kai Jiang + tech&sandkj.com +43799 + TRUMPF GmbH + Co. KG + Andreas Reinmoeller + andreas.reinmoeller&de.trumpf.com +43800 + OraSentry + Raphaël Tiran + raphael&orasentry.com +43801 + Dolphin Systems AG + Peter Honegger + peter.honegger&dolphin.ch +43802 + Avaloq Evolution AG + Jukka Salmi + it-infrastructure&avaloq.com +43803 + University of South Wales + Jeremy Thomas + sysadmin&southwales.ac.uk +43804 + Bombardier Aerospace + Marc Rinfret + marc.rinfret&aero.bombardier.com +43805 + Metalogix International GmbH + Anna DeGiorgio + adegiorgio&metalogix.com +43806 + Shape Security, Inc + Judy Karp + judy&shapesecurity.com +43807 + CENTRI Technology, Inc. + Lance Lu + lance¢ritechnology.com +43808 + Spincat Studios + Bob Bregant II + bob&spincats.com +43809 + University of the Philippines + Rommel Feria + admin&up.edu.ph +43810 + Li Hongnan (LiHN) + Li Hongnan + mail&lihn.cn +43811 + University of the Philippines Mindanao + Krishna Hernandez + krishnahernandez&upmin.edu.ph +43812 + Corscience GmbH & Co. KG + Florian Kasper + edv&corscience.de +43813 + Centraal Justitieel Incassobureau + C.M. Eyzenga + informatiebeveiliging&cjib.minjus.nl +43814 + GNU Networks Ltd. + Kenneth Peiruza + kenneth&gnun.net +43815 + The University of Texas M.D. Anderson Cancer Center + Sean Walker + swalker&mdanderson.org +43816 + Tran Industries LLC + Duc Tran + info&tranindustries.com +43817 + Vertex Business Services + Pat Sagers + pat.sagers&vertexgroup.com +43818 + Featurespace Limited + Andy Whitell + sysadmin&featurespace.co.uk +43819 + pulse technologies + Ronnie Gilkey + ronnie&pulsetechofla.com +43820 + Mozy, Inc. + Thor Sigurjonsson + thor.sigurjonsson&mozy.com +43821 + Suwannee County School Board + Adam Boatright + adam.boatright&suwannee.k12.fl.us +43822 + Nok Nok Labs, Inc. + John Westien + jw-iana&noknok.com +43823 + VAS Experts + Dmitriy Moldavanov + dmoldavanov&vasexperts.ru +43824 + ConectaIP Tecnologia S.L. + Jordi Corpas + jordi.corpas&conectaip.es +43825 + Earnest Products, Inc. dba Southern Manufacturing + Anjanett Exum + aexum&southernmfg.com +43826 + TEXKA LABS + Carlos Visser + cvisser&texka.com +43827 + Ideosoft C.B. + Juan Gomez Melero + jgomez&ideosoft.es +43828 + Bosoft + Karol Klos + karol.r.klos&gmail.com +43829 + Ifield School + Andrew Mudamai + amudamai&ifield.kent.sch.uk +43830 + eWater Pty Ltd + Erick Tomaliwan + support&ewater.com.au +43831 + Ezvoicetek Co., Ltd. + Samuel Sung + samuel&ezvoicetek.com +43832 + Beijing D&S FieldBus Technology Co.,Ltd + Biao Wang + wangbiao&c-profibus.com.cn +43833 + LIVECREATION + Navas CH + navas&livecreation.in +43834 + Urban Systems Design Institute + Bugarev Ivan + support&ecocity.ru +43835 + HOCHTIEF CZ, a.s. + Vladimir Bouchal + vladimir.bouchal&hochtief.cz +43836 + C Tech Bilisim Teknolojileri San.ve Tic. A.S + Mahmut Bulut + mahmut.bulut&ctech.com.tr +43837 + DTI Group Ltd + Jean-Michel Florent + jean-michel.florent&dti.com.au +43838 + Genex Hellas L.T.D + Dimitrios Papadopoulos + ts&genex.gr +43839 + RailComm LLC + Tim Myers + tmyers&railcomm.com +43840 + Sumitomo Mitsui Banking Corporation + Sean Boyd + SBoyd&JRI-America.com +43841 + Eatel + Duane Wylie + duane.wylie&eatel.com +43842 + Zero Waste Scotland + Chris Meikle + chris.meikle&zerowastescotland.org.uk +43843 + Alter Trading Corporation, LLC + Jason Hitt + isapps&altertrading.com +43844 + GuangDong Big Banian Info Tech Co., Ltd. + Jack Peng + bigbanian&gd3n.com +43845 + Toshiba Tec Solution Services Corporation + Syo Koyanagi + syou_koyanagi&toshibatec.co.jp +43846 + University of the Philippines - Baguio + Dominic S. Dulay + sysnet&upb.edu.ph +43847 + A Hagedoorn Electronics + Arend Hagedoorn + ah&ah-electronics.com +43848 + INoTHINGS GmbH + Andreas Brodmann + andreas.brodmann&inothings.com +43849 + LiiON, LLC + Thomas Lynn + Lft&liionllc.com +43850 + Doxcelerate Corporation + Thierry Thelliez + tt&doxcelerate.com +43851 + danieLLegal.Net - Rechtsanwälte + Dr. Andreas Daniel + daniel&daniellegal.net +43852 + Truseco, s.r.o. + Jakub Horak + info&truseco.com +43853 + Academy of Economic Studies of Moldova + Constantin Sclifos + sclifcon&ase.md +43854 + TU Investment Club e.V. + Mathias Burger + mathias.burger&tuinvest.de +43855 + ezIX.org + Lyonel Vincent + lyonel&ezix.org +43856 + UNIT4 Business Software Spain S.L.U. + Ezequiel Parra Mestre + ezequiel.parra&unit4.com +43857 + mITra data + Supriya Mitra + sm&mitradata.com +43858 + Grigorivska International School + Vadim Baglikov + vadim&grigorivska.com +43859 + Harbor Freight Tools + Garrett Scott + InfoSec&harborfreight.com +43860 + Groenewoud + Frank Groenewoud + snmp&groenewoud.me +43861 + New York Genome Center + Fred Parks + fparks&priveon.com +43862 + Drei Banken EDV Ges.m.b.H. + Lothar Handl + lothar.handl&3beg.at +43863 + Valour Security Ltd + Dave Shaw + dave&val-sec.com +43864 + Breeze Innovations Private Limited + Seenivasan Balaji + balaji_blr&yahoo.com +43865 + Matthias Krause (spectre-net) + Matthias Krause + matthias.krause&hotmail.de +43866 + beijing huafei technology co.,ltd + wukuang + wukuang&huafeitech.com +43867 + Rename it + Rien Broekstra + rien&rename-it.nl +43868 + CyberArk Software Inc. + Yaniv Hagag + admins&cyberark.com +43869 + Yatharth Gupta + Yatharth Gupta + yatharth.gupta&gmail.com +43870 + Precyse Technologies + Yaniv Shneiderman + yanivs&precysetech.com +43871 + Metricell Ltd. + Nick Hassett + nick.hassett&metricell.com +43872 + Q'ligent.inc + Ted Korte + ted.korte&qligent.com +43873 + TA Associates + Aaron McCoppin + amccoppin&ta.com +43874 + pmacct + Paolo Lucente + paolo&pmacct.net +43875 + TeKnowledgy, Inc. + Bill Buhler + bill&tknow.com +43876 + Centre Hospitalier Universitaire Sainte-Justine + Pascal Cloutier + infrastructure.hsj&ssss.gouv.qc.ca +43877 + Dalian Rural Commercial Bank Co.,Ltd. + Li Hongnan + lihn79&163.com +43878 + Embross Group + Andrew Tzelepis + andrewt&embrossgroup.com +43879 + Chengdu Fuhuaxin Technology Co.,Ltd. + Wanbin.Gan + 563572071&qq.com +43880 + FRAKO Kondensatoren- und Anlagenbau GmbH + Andreas Kreker + kreker&frako.de +43881 + Informatics Services Corporation (ISC) + Mehrdad Sheikhan + m_sheikhan&isc.iranet.net +43882 + DHBW Villingen Schwenningen + Richard Franke + franke&dhbw-vs.de +43883 + NiVo Engineering BV + Edwin Vos + evos&nivo.nl +43884 + AnyCode Kft. + Gabor Fekete + pen.oid&anycode.hu +43885 + WCC Group BV + Rene Schuil + mail.it&wcc-group.com +43886 + Anjary Pty Ltd + Andrew Fisher + andrew&anjary.com.au +43887 + ΒΥΤΕ COMPUTER S.A + Christos Anifantis + anifantis&byte.gr +43888 + Agiloo S.r.l. Semplificata + Andrea Duimich + andreaduimich&agiloo.it +43889 + datenkollektiv.net + Jan Kowalsky + support&datenkollektiv.net +43890 + Thotaka Technologies Private Limited + Lakshmi Mohan Saripalli + mohan&thotakaa.com +43891 + SMY.com + Stephen Youndt + ianapen.5.smy&neverbox.com +43892 + NerySEC + Kyra Zimmer + nonchip&nerysec.de +43893 + Hope Bay Technologies, Inc. + Pa Hsuan + pa.hsuan&hopebaytech.com +43894 + HedoN electronic developments B.V. + Remon Dijkstra + rdijkstra&hedon.nl +43895 + Institut für Holztechnologie Dresden gemeinnützige GmbH + Thomas Hupfer + hupfer&ihd-dresden.de +43896 + Emercit + Sergey Tsidilov + tsidilov&emercit.ru +43897 + Amped Wireless + Anthony Ng + anthony&edwireless.com +43898 + Hudson City Savings Bank + Thomas Slattery + tslattery&hcsbnj.com +43899 + atlantis software + alexandre tiertant + a.tiertant&atlantis-software.net +43900 + Teramatrix Technologies Private Limited + Amit Sharma + asharma&teramatrix.in +43901 + Talisman Sinopec Energy UK Limited + Andrew Bentley + hostmaster&talisman-sinopec.com +43902 + Enkel Tecnologia + Oscar Flores + oscar.flores&enkeltecnologia.com +43903 + LIons Clubs International - Multi District 111 (Germany) + Stefan Kaufmann + portal&lions.de +43904 + BitronikLab + Milovan Čolić + bitroniklab&gmail.com +43905 + MelRok LLC + GY Hanna + gyhanna&melrok.com +43906 + Tegile Systems, Inc. + Pradipmaya Maharana + pradip&tegile.com +43907 + CloudThing Ltd. + Francis Thomas + fran&cloudthing.com +43908 + Hawking Technology, Inc. + Frank Lin + Frank&hawkingtech.com +43909 + Association PauLLA + Président PauLLA + iana&paulla.asso.fr +43910 + Pravala Inc. + Jakub Schmidtke + admin&pravala.com +43911 + Thureon Limited + Ross Vincent + ross.vincent&thureon.com +43912 + FUJI FURUKAWA ENGINEERING & CONSTRUCTION Co.Ltd. + Satoshi Nagai + nagai-satoshi&ffec.co.jp +43913 + Zhejiang Science Electronic Technology Co., Ltd + Changyu Liu + cliu&zjsaisi.com +43914 + The Administrative Committee of Jiangning Development Zone + Buqiang Liu + lbq&jndz.net +43915 + Wallace Kroeker + Wallace Kroeker + wallacekroeker&rundletech.com +43916 + Van Lanschot + Niels de Kok + wintel&vanlanschot.com +43917 + SantéLink SAS + Romain Tartière + pen&santelink.fr +43918 + Marina Del Rey Hospital + Robbin Messier + robbin.messier&marinahospital.com +43919 + Neoteck Grup + Mihai Cretu + office&neoteck.ro +43920 + Shandong Chaoyue Digital Control Electronics Co., Ltd. + Qingshi Li + liqsh&inspur.com +43921 + CJSC STC SIMOS + Tarasov Nikolai + tarasov&simos.ru +43922 + Equra Health Trust + Piet Theron + ptheron&equrahealth.co.za +43923 + Ivan Leonardi + Ivan Leonardi + ivanleonardi&gmail.com +43924 + Standard Transfer Specification Association + Franco Pucci + franco.p&mweb.co.za +43925 + EastWind + Andrew Pavlenko + a.pavlenko&eastwind.ru +43926 + CSI.NET, Inc. + Daniel J. McGrail + dmcgrail&csiworld.com +43927 + ID.me, Inc. + Tom Robison + tom&id.me +43928 + Vantage Communications + Kevin Miller + kmiller&vantage.com +43929 + WellerNET + Lucien Weller + info&wellernet.ch +43930 + Radlink Communications + Ben Rampling + ben.rampling&radlink.com.au +43931 + IFB Institut für Bahntechnik GmbH + Benjamin Tietz + bt&bahntechnik.de +43932 + Bluwireless Technology Limited + Doug Turner + doug.turner&bluwirelesstechnology.com +43933 + Exploros inc. + Nadav Hameiri + nadav.hameiri&exploros.com +43934 + Võrumaa Kutsehariduskeskus + Allan Liblik + ikt&vkhk.ee +43935 + Keen IO + Joshua Coats + joshua&keen.io +43936 + zhong yuan hua dian + yong fei wang + yfwang&zyhd.com.cn +43937 + Tieto Signaling Solutions + Sten-Ulrik Eriksson + sten-ulrik.eriksson&tieto.com +43938 + Memorable Images + Tony Flury + memorable.images&btinternet.com +43939 + invliD + Sebastian Brückner + mail&invlid.com +43940 + WISAG Dienstleistungsholding GmbH + Fabian Sauer + fabian.sauer&wisag.de +43941 + Event Store Ltd + James Geall + iana&eventstore.com +43942 + Luma Pictures + Brent Hensarling + sys&luma-pictures.com +43943 + VOLTRONIC POWER TECHNOLOGY CORP. + bluefish wei + bluefish_wei&voltronicpower.com.cn +43944 + VOLTRONIC POWER TECHNOLOGY CORP. + bluefish wei + bluefish_wei&voltronicpower.com.cn +43945 + Telenia Software srl + Roberto Grava + rgrava&teleniasoftware.it +43946 + Vanilla.no + Ole Øyvind Hove + oleo&vanilla.no +43947 + LichtBlick SE + Claus Sprave + administration&lichtblick.de +43948 + Taiwan Intelligent Home + Cing-Yao Chen + pichu&tih.tw +43949 + Aceyus, Inc. + Gerard Lilly + gerard.lilly&aceyus.com +43950 + NexxCom Wireless + Michael Overholser + michael.overholser&nexxcomwireless.com +43951 + Denovo Ventures LLC + Geoff Anderson + ganderson&denovo-us.com +43952 + LRAD Corporation + jason wright + jasonwright&lradx.com +43953 + closerlook, inc. + Ronney N Hunter + rhunter&closerlook.com +43954 + Seno Medical Instruments Inc. + Bryan Clingman + bclingman&senomedical.com +43955 + Radiology Associates LLP + Harry Mosser + jmosser&xraydocs.com +43956 + Total Restyling di Tola Rosa + Corrado Mulas + amministrazione&totalrestyling.it +43957 + Multitone Electronic plc + Rupert Johnson + rupert.johnson&multitone.com +43958 + University of Innsbruck + Dirk Draheim + dirk.draheim&uibk.ac.at +43959 + Torraspapel SA + DANIEL ESCORIZA + daniel.escoriza&lecta.com +43960 + SaF GmbH + Thomas Schulte + thomas.schulte&saf-kassel.de +43961 + OKTAL, SAS + Yves Gallot + yves.gallot&oktal.fr +43962 + Utena University of Applied Sciences + Mantas Mikulėnas + mantas&utenos-kolegija.lt +43963 + Thruput Ltd + Adam Cottrell + adam.cottrell&thruput.co.uk +43964 + Silpakorn University + Sethalat Rodhetbhai + sethalat&su.ac.th +43965 + akraas.de + Alexander Kraas + iana-pen&akraas.de +43966 + J4SYSTEMS SOLUCOES TECNOLOGICAS LTDA. + ANTONISIO PRUCOLI MARTINS + amartins&j4systems.com.br +43967 + Manthorpe Ltd + Daniel Sutton + it&manthorpe.co.uk +43968 + California University of Pennsylvania + Eric Sabo + Eric.Sabo&calu.edu +43969 + NIPPON FIELD ENGINEERING Co.,Ltd. + Hiroki Takeda + h_takeda&nfe.co.jp +43970 + Bächtold & Moor AG + Stefan Thomet + stefan.thomet&baechtoldmoor.ch +43971 + Map59 + Steve Saunders + steve1&map59.com +43972 + DriveWorks Ltd + Philip Stears + philip&driveworks.co.uk +43973 + Universidad Nacional de Córdoba - UNC + Agustín Moyano + agustin&psi.unc.edu.ar +43974 + Elcoma + Rafael Moraes + rafael.moraes&elcoma.com.br +43975 + DaedaFusion, LLC + Paul Patrick + Paul.Patrick&DaedaFusion.com +43976 + University of the Philippines Cebu + Gigi Carcallas + gigiprg&gmail.com +43977 + HomeSend SCRL + Sébastien Lefevre + sebastien.lefevre&homesend.com +43978 + IKATA NETWORKS + ASIER IBARGUEN + aibarguen&ikatanetworks.com +43979 + IMED Engineering s.r.o. + Francesco Buonomo + fbuonomo61&gmail.com +43980 + Cerritos College + Bruce Tanner + tanner&cerritos.edu +43981 + Partnet, Inc. + Todd VanderVeen + tdv&part.net +43982 + Analytical Flavor Systems + Evan Farrell + evandev&gastrograph.com +43983 + Colibri Technologies Inc. + Amandeep Thind + aman&colibritech.com +43984 + MPL Technology Group + Harry Podciborski + harry.podciborski&mpltechnologygroup.com +43985 + WebINTENSIVE Software + David Bodnick + iana&webintensive.com +43986 + Privredna Banka Zagreb d. d. + Tatjana Vujovic + tatjana.vujovic&pbz.hr +43987 + ALVA IT-Solutions + Michael Grieswald + info&alvaits.com +43988 + Bichara Tecnologia Ltd + Daniel Bichara + daniel&bichara.com.br +43989 + EEMRA Inc. + Norman Katz + norm&eemra.com +43990 + Tropo, Inc. + Kevin Chatow + kchatow&tropo.com +43991 + Elektrum, S.A.R.L + Ghislain IRANYUMVA + ghislain.iranyumva&gmail.com +43992 + Power Assure, Inc + Andrew Stickler + andrew.stickler&powerassure.com +43993 + Security First Corp. + Rick Orsini + rorsini&securityfirstcorp.com +43994 + QuanTemplate Technologies Limited + Marek Nelken + marek.nelken&quantemplate.com +43995 + Axentia Technologies AB + Peter Rosin + peda&axentia.se +43996 + Bloomington Medical Services + Lisa Suttle + lsuttle&bloomingtonmedical.org +43997 + Cellwize wireless technologies PTE LTD + Yaacov Cohen + yaacov&cellwize.com +43998 + Computerwork GmbH + Norbert Rehfeldt + nrehfeldt&computerwork.de +43999 + AKD d.o.o. (formerly 'Agencija za komercijalnu djelatnost d.o.o.') + Leo Lokas + Leo.Lokas&akd.hr +44000 + PointSecure Technologies Inc + Warren Kahle + warren.kahle&pointsecure.com +44001 + Shenzhen Envicool Technology Co., Ltd. + Yudi Xie + xieyd&envicool.com +44002 + Gridwiz, Inc. + Teddy Hyunwoong Kim + teddy&gridwiz.com +44003 + AutoGrid Systems, Inc. + Rajeev Singh + it&auto-grid.com +44004 + BYKING INC. + Kengo Saki + saki&byking.jp +44005 + Wadi Chadli + Wadi Chadli + wadisupp&gmail.com +44006 + Xunlei Networking Technologies,Ltd. + Feng Huayang + postmaster&xunlei.com +44007 + Informatica Bancaria Trentina + Ivan Leonardi + evolution&ibttn.it +44008 + SYSTECH + Ahmed Murtabegovic + ahmed.muratbegovic&systech.ba +44009 + WebPKI.org + Anders Rundgren + anders.rundgren.net&gmail.com +44010 + OadisCorp + Stephen Rutledge + oadis&outlook.com +44011 + eMortgage Logic + Bill Miller + bmiller&emortgagelogic.com +44012 + Tata advanced systems limited (TASL) + Jay Singh + jpsingh&tataadvancedsystems.com +44013 + MPEC Technology Ltd + Janos Vaczi + janos.vaczi&mpec.co.uk +44014 + Tyco Fire & Integrated Solutions (UK) Ltd. + David Biggins + dbiggins&tycoint.com +44015 + DataInfo + Michael Khomenko + mvhoma&gmail.com +44016 + Baikal Electronics + Andrey Malafeev + Andrey.Malafeev&baikalelectronics.ru +44017 + SmartGuard, LLC + Erik Amundson + erik.amundson&smartguardllc.net +44018 + exiztec + Effi Olivkovitch + effi&exiztec.com +44019 + FRANMAX, UAB + Audrius Sadlauskas + service.mgroup&maximagroup.eu +44020 + Router Networking Ltd. + Attila Domjan + access&router.hu +44021 + Alliance Resource Partners, L.P. (ARLP) + Greg Tate + ssladmin&arlp.com +44022 + Advanced Publishing Technology + Stephen Hansen + shansen&advpubtech.com +44023 + Karther Ingeniería + Carlos D. Medina + info&karther.es +44024 + ConnectSolutions + Matthew Greenberg + matt.greenberg&connectsolutions.com +44025 + Foobar Development LLC + John Moore + jmoore&foobardevelopment.com +44026 + Smart4Aviation Group + Kuba Zakrzewski + kuba.zakrzewski&smart4aviation.aero +44027 + Shop Airlines, Ltd. + HARATAKE Kazunari + kazunari.haratake&shopairlines.com +44028 + Extreme-Access.com + Kirk Reiten + whois&extreme-access.com +44029 + Nestlé Operational Services Worldwide SA + Lukasz Kozubal + lukasz.kozubal&nestle.com +44030 + WooshCom Corporation + B.W. Marler + BWMarler&WooshCom.com +44031 + TrustCor Systems S. de R.L. + Rachel McPherson + rachel&trustcor.ca +44032 + Cloud Utility District + Edward Buchwalter + ed.buchwalter&cloudutilitydistrict.com +44033 + Mobitech + Alexey Cherviakov + cto&mobitech.ru +44034 + S Broker AG & Co. KG + Christian Felsing + hostmaster&sbroker.de +44035 + Wojskowe Zaklady Lacznosci nr 1 S.A. + Krzysztof Marciniak + k.marciniak&wzl1.com.pl +44036 + OPENNETEUROPE + Rene Stoutjesdijk + r.stoutjesdijk&openneteurope.eu +44037 + Deželna banka Slovenije d. d. + Dejan Tomažič + dejan.tomazic&dbs.si +44038 + Filmakademie Baden-Württemberg GmbH + Götz Reinicke + hekdubyuj&filmakademie.de +44039 + Wind Mobile SA + Michał Cheba + Michal.Cheba&windmobile.pl +44040 + IEI Integration Corp. + Don Yu + donyu&ieiworld.com +44041 + Conntrack Technologies + Frederic d'Huart + info&conntrack.com +44042 + Instituto Nacional de Tecnologias de Informacao e Comunicacao (INTIC) + Sergio Mapsanganhe + sergio.arnaldo&intic.gov.mz +44043 + BSG-IT Kft. + Tamas Bagyal + info&bsg-it.hu +44044 + SETCCE + Jurij Zelic + jurij.zelic&setcce.si +44045 + Dos al Cubo + Mariano Gonzalez + sysadmin&dosalcubo.com +44046 + POK AG Schweiz + Martin Paierl + hostmaster&pok.net +44047 + Schreiber Foods Inc + Jon Simonsen + security&schreiberfoods.com +44048 + Instytut Hodowli i Aklimatyzacji Roslin - Panstwowy Instytut Badawczy + Roman Osinski + pki&ihar.edu.pl +44049 + Cloud2Ground Technologies Div. Digital Multimedia, LLC + John Hooks + johnh&cloud2groundtechnologies.com +44050 + Cryptografree (David Hoyle) + David Hoyle + david_j_hoyle&hotmail.com +44051 + Sapporo Gakuin University + Yasuhiro Hida + postmaster&sgu.ac.jp +44052 + Beijing Cloud Core Network Technology Co., Ltd. + Wang Xuesong + wangxuesong&cloudcore.cn +44053 + ELFTECH Co., Ltd. + Kwak, Ja-Seop + jskwak&elftech.co.kr +44054 + Hörmann KG Verkaufsgesellschaft + Christof Sobek + c.sobek.vkg&hoermann.de +44055 + Identity And Access Management Technologies (IAM Tec) + Jorge de Almeida Pinto + JorgeDeAlmeidaPinto&live.com +44056 + Aktif Mühendislik + Emre Sami Süzer + haluk.turan&aktif.net +44057 + predata.org + Lukas Mika + webmaster&predata.org +44058 + Avalanche Cloud Corporation + Scott Kern + scott&hydrantid.com +44059 + School District #63 (Saanich) + Ryan Tandy + ldapadmin&sd63.bc.ca +44060 + Iridium Communications Inc + redouane laroussi + redouane.laroussi&iridium.com +44061 + Tippecanoe County Government + Bradley Alexander + bpalexander&tippecanoe.in.gov +44062 + FlightStats Inc. + Jason Denning + it&flightstats.com +44063 + ACCESS CO., LTD. + Toshikazu Sakita + toshikazu.sakita&access-company.com +44064 + Quobyte Inc. + Felix Hupfeld + support&quobyte.com +44065 + PROGSTAR + Gustaw Mazurek + techniczny&progstar.com.pl +44066 + Fachschaftinformatik, WHS + Robin Marchel + infra&fachschaftinformatik.de +44067 + McStas project + Peter Willendrup + pkwi&fysik.dtu.dk +44068 + Major League Baseball Advanced Media + Friedrich Seifts + friedrich.seifts&mlb.com +44069 + Groupe Citypassenger Inc + Falempin Sven + ovh&citypassenger.com +44070 + IFTER + Andrzej Nieweglowski + andrzej_n&ifter.com.pl +44071 + New Relic, Inc + Kevin Wright + kwright&newrelic.com +44072 + Lee County Schools + James Hunter + rhunter&lee.k12.nc.us +44073 + Data People Pty Ltd + Paul Whitman + paul&datapeople.com.au +44074 + Servyou + Jin Cheng + jinc&servyou.com.cn +44075 + Beijing Nan Bao Technology Co., Ltd. + yuqing bai + yuqing&baonanbao.com +44076 + Landtag von Baden-Württemberg + Ralph Geisser + Ralph.Geisser&landtag-bw.de +44077 + PST AG + Peter Piehler + info&pst.ag +44078 + Taiwan Mobile Payment Co. + Wei-Ching Su + tsmrd01&mail.fisc.com.tw +44079 + NPO SISTEMI S.p.A. + Massimo Bianchi + massimo.bianchi&nposistemi.it +44080 + TNTNET + Thorsten Seeger + iana&tntnet.eu +44081 + UNI LEADER INTERNATIONAL LTD + Patricia Gisselle Alba + p.g.alba&unilintl.com +44082 + Beijing DTLT Technology CO., LTD. + Jackie Deng + jackie.deng&lqtech.com +44083 + GuiLin HYGJ communication technology co., Ltd + David Qin + qinxingsheng&foxmail.com +44084 + B. Metzler seel. Sohn & Co. AG + Joerg Huber + cet&metzler.com +44085 + Display Quality Equipment, S.L + Francisco Javier Perez Gomez + fperez&dqebroadcast.com +44086 + E-T-A Elektrotechnische Apparate GmbH + Werner Ruempelein + werner.ruempelein&e-t-a.de +44087 + Hammond Consulting Services + Wesley Hammond + wes.hammond&hotmail.com +44088 + CODESA - Grupo Empresarial de la Construcción, Santiago de Cuba, Cuba + Sr. Hector Suarez Planas + hector.suarez&codesa.co.cu +44089 + ABit Consulting + Tristan Marlow + software&abit-wa.com.au +44090 + CSG Global LLC + Simon Rak + srak&csggc.com +44091 + Agilion GmbH + Heiko Klose + h.klose&agilion.de +44092 + Sigel GmbH + Jochen Mokry + jochen.mokry&sigel.de +44093 + Bell Ambulance, Inc. + Wayne A Jurecki + WJurecki&264Bell.com +44094 + Aser Secury + Donizete Junior + donizete&aser.com.br +44095 + Kagoshima University + Koichi SHIMOZONO + simozono&cc.kagoshima-u.ac.jp +44096 + Shenzhen Zhong Yuan Tong Power Supply Technology Co., Ltd. + Liwenbin + liwenbin&robot-zyt.com +44097 + db-central GmbH + Dirk Rosomm + it-support&db-central.com +44098 + Patchman B.V. + Ralph Broenink + ralph&patchman.co +44099 + CyberSeal LTD + Tal Yechye + taly&cyber-seal.net +44100 + SLASH16 + Jean EYMERIT + clark&slash16.org +44101 + Radiss s.c. + Robert Karpiński + r.karpinski&olikon.com.pl +44102 + GRC Advisory Solutions + Robert Bordynuik + rbordynuik&grcas.com +44103 + Agora Con GmbH + Horst Kapfenberger + horst.kapfenberger&agoracon.at +44104 + Groupe Alsatis + Thomas Mignien + thomas&groupe-alsatis.com +44105 + BHS tabletop AG + Georg Putz + Putz.G&bhs-tabletop.de +44106 + IMSWorkX, Inc. + Bradley Wideman + support&imsworkx.com +44107 + Rivada Networks, LLC + Vincent D'Onofrio + registration&rivada.com +44108 + Telecom Argentina S.A. + Javier Mendez + javier.mendez&proveedor.telecom.com.ar +44109 + FAA National Airspace Systems + Jim Laymon + jim.laymon&faa.gov +44110 + Nowa Era Sp. z o.o. + Marcin Mazur + admin&nowaera.pl +44111 + Guangzhou Younghead Electronic Technology Co.,Ltd + huang chuen + huangchuen&younghead.com.cn +44112 + Coöperatieve Rabobank U.A. + B. van Zanten + OperationsAD&rabobank.com +44113 + Villeroy & Boch AG + Thomas Ochs + domainmaster&villeroy-boch.com +44114 + RealVNC Ltd + Bruce Bye + standards&realvnc.com +44115 + Height8 Technologies Pvt. Ltd + Vishal Solanki + vishal&height8tech.com +44116 + RosBusinessConsulting, CJSC + Anes Mukhametov + noc&rbc.ru +44117 + Cross Borders Trust Services OÜ + Ott Sarv + support&signwise.me +44118 + Noakes + Mark Fawcett + mark.fawcett&noakesltd.co.uk +44119 + Creighton + Oliver Creighton + iana&creighton.de +44120 + Avairis, Inc. + James McQuillan + jam&Avairis.com +44121 + HRK Soft Group Ltd. + HamidReza Kadkhodaei + hrkadkhodaei&gmail.com +44122 + T17R (Martin Treusch von Buttlar) + Martin Treusch von Buttlar + iana-pen&m.t17r.de +44123 + Business-intelligence of Oriental Nations Corporation, Ltd. + zhangxiao + zhangxiao&bonc.com.cn +44124 + bit media e-solutions gmbh. + Michael Holasek + michael.holasek&bitmedia.cc +44125 + Lanyo Networks Co., LTD. + Robin Cui + robincui25&gmail.com +44126 + Lookhere Design + Mike Davis + info&lookhere.co.za +44127 + Primlight AB + Sten Oscarsson + support&primlight.net +44128 + MapGear + W. Goedhart + w.goedhart&mapgear.nl +44129 + beijing yunzheng telecommunication technology Co, LTD + guohongliang + guohongliang&robustsync.com +44130 + Michael Ghazi + Michael Ghazi + hftekk&gmail.com +44131 + Nanjing DunLei Network Technology Co.,Ltd. + Jianping Shi + dlshijianping&gmail.com +44132 + OpsVision Solutions + David Ishmael + dishmael&opsvision.com +44133 + Commerce Technologies, Inc. + David Pigliavento + pen&commercehub.com +44134 + Bless Information & Communication Inc. + Bok Hyeon Lee + bhlee&bless.co.kr +44135 + Jonkoping Energi AB + Rickard Höglund + rickard.hoglund&jonkopingenergi.se +44136 + Data Controls Inc. + Keisuke Yamaguchi + info&dci.jp +44137 + KaiXiang + xinbo yan + xinbo.yan&kaixiangtech.com +44138 + NodeForge + Dave Franks + noc&nodeforge.com +44139 + Vizury Interactive Solutions Pvt. Ltd + Udai Singh Mehra + vizury.infracomm&vizury.com +44140 + Rapp Management AG + Michael Kretschmann + iana-pen&rapp.ch +44141 + Rosbank + Vitaliy Glushnev + vitaliy.glushnev&socgen.com +44142 + Edidin Group, Inc + Howard Edidin + hedidin&edidingroup.net +44143 + kazalo GmbH + Daniel Menzel + daniel.menzel&kazalo.de +44144 + Formulus Black (formerly 'Symbolic IO') + Nafees Abdul + snmp-admin&formulusblack.com +44145 + Pokazz sp. z o.o. + Piotr Czekala + info&pokazz.com +44146 + INSYS K.Bartkowski, P.Czekala sp.j. + Piotr Czekala + info&insys.pl +44147 + Diagnostic Imaging Associates + Curtis Blankenship + curtis&diarads.net +44148 + Nanjing Tsinghua Novel Network Technology Co.,Ltd. + Han Chen + bsnch&163.com +44149 + KouXianglong + KouXianglong + 643166601&qq.com +44150 + Intermarketing Oy + Valtteri Konttinen + valtteri.konttinen&intermarketing.fi +44151 + Metrovision + Jacques Charlier + charlier&metrovision.fr +44152 + Kambio Company + Konstantin Yakovlev + kostya&kambio.com +44153 + DreamCode S.A.S. + Jorge Jonathan Ortiz Vélez + jonathan.ortiz&dreamcodesoft.com +44154 + InfoStreet, Inc. + Todd Paradise / Jeff Carlson + noc&infostreet.com +44155 + Sacred Heart Hospital of Allentown + Julie Grumbein + jgrumbei&SHH.ORG +44156 + Computer Applications & Technical Services + Ibrm Haymour + ibrm&cats.com.jo +44157 + Stadtverwaltung Duebendorf + Robert Steiner + robert.steiner&duebendorf.ch +44158 + Mediterranean Broadband Infrastructure s.r.l. + Roberto Ferrari + rferrari&mbigroup.it +44159 + DK-Technologies + Michael Kristensen + mk&dk-technologies.com +44160 + Contra Costa Oncology + Sara Cuff + sarac&contracostaoncology.com +44161 + Tri-County Hematology & Oncology Assoc. + Ruth Settle + rsettle&tricountyassoc.com +44162 + CHAMPLAIN VALLEY HEMATOLOGY ONCOLOGY, PC + MICHELLE PASCHALL + MICHELLE.PASCHALL&VTMEDNET.ORG +44163 + University of SS. Cyril and Methodius in Trnava + Marian Hercek + marian.hercek&ucm.sk +44164 + OutSmart Power Systems LLC + Uwe Meding + umeding&outsmartinc.com +44165 + Eduardo Miranda MD + Elizabeth Facundo + mbsbilling11&yahoo.com +44166 + Andrews & Patel Associates, P.C + Andrew Munchel + amunchel&andrewspatel.com +44167 + North Texas Gynecologic Oncology + Elizabeth Stevens + elizabeth.stevens&northtxgynonc.com +44168 + NetCraft Australia Pty Ltd + Geoffrey D. Bennett + oid&netcraft.com.au +44169 + FIDA INTERNATIONAL (S) PTE LTD + STEIN MA + stein_ma&prolink2u.com +44170 + Decision Group Inc. + Charles Chien + charles&decision.com.tw +44171 + RogSibAl LLC + Maxim Alexandrov + support&rogsibal.ru +44172 + Hangzhou Kuaiyue Mobile Technologies, Ltd. + ye daoliang + ye.dl&kuaiyuetech.com +44173 + InPhoSys Ltd + Jacques Peacock + jpeacock&callconnection.com +44174 + Baptist Health Cancer Care & Blood Disorders + Joshua Gold + joshua.gold&bhsi.com +44175 + Urology Cancer Center + Gary Glissman + gglissman&gucancer.com +44176 + SOCIETE REUNIONNAISE DU RADIOTELEPHONE + Christophe CAMON + christophe.camon&srr.fr +44177 + INSTITUTO DE HEMATOLOGIA Y ONCOLOGIA MEDICA, SRL + ALFREDO J MARTINEZ + amartinez&ihom-coi.com +44178 + Cancer Clinic + Leah Moore + Leah&cancerclinic.com +44179 + Daniel Stensnes + Daniel Stensnes + daniel&zift.no +44180 + Oncology & Hematology Associates of West Broward, P.A. + Jane Steinkamp + jane.steinkamp&ohawb.com +44181 + Chitra Venkatraman, M.D., P.A. + Veronica Avent + ronniesworld330&yahoo.com +44182 + Associates in Oncology/Hematology + Kimberly Roddy + kroddy&aohmd.com +44183 + Security Confidence Corporation + Matthew Corney + info&securityconfidence.com +44184 + Ma-Ya IT Consult, e.U. + Albin Mayer + office&mayaitc.com +44185 + Horizon Medical Group, Inc + Michelle Hamm + mhamm&horizonbioadvance.com +44186 + Keats, Connelly & Associates, LLC + Michael Connelly + Admin&Keatsconnelly.com +44187 + Zirtual Inc + Joe Tyson + joe.tyson&zirtual.com +44188 + Cancer Center Oncology Medical Group + Debbie Mason + dmason&grossmontoncology.com +44189 + Pacific Cancer Care + Karin N. Roman + kroman&pacificcancercare.com +44190 + PostAR + Bostjan Kezmah + bostjan.kezmah&cepris.si +44191 + CLG Enterprises + Louie Gomes + IANA&CLGEnterprises.net +44192 + Remego Ltd. + See Kok Sin + koksin.see&remego.com +44193 + Beijing DATAX Technology Co., Ltd. + Xinhong Deng + dengxh&champor.com.cn +44194 + EnGenius Networks, Inc. + Joe Liang + joe.liang&senao.com +44195 + Southern Oncology Specialists, PLLC + Sarah Cowart + scowart&southern-oncology.com +44196 + SecureRF Corporation + Joanne Kelleher + jkelleher&securerf.com +44197 + Piedmont Cancer Institute + Penny DeMarco + pdemarco&piedmontcancerinstitute.com +44198 + Cancer and Hematology Centers of Western Michigan + Brian Kyllonen + bkyllonen&chcwm.com +44199 + University of New South Wales + Igor Grozdanov + i.grozdanov&unsw.edu.au +44200 + Markit Ltd + John Kennedy + john.kennedy&markit.com +44201 + Micromation.Net + Norberto Núñez + micromation.net&gmail.com +44202 + American Energy Partners, LP + Jamie Nelson + jamie.nelson&aep-lp.com +44203 + Global 3R Ltd. + Andrius Jankevicius + pen&fostral.net +44204 + Medical Oncology Associates of Long Island, PC + Sandra Rosenberg/Ronnie Morales + srosenberg&medoncli.com +44205 + BandRich, Inc. + Purine Chu + purine_chu&bandrich.com +44206 + Fujitsu Isotec Limited + Masatoshi Sato + masatoshi.sato&jp.fujitsu.com +44207 + Proxee Solutions + Louis-Pierre Morin + lpmorin&proxee.ca +44208 + Telecom Personal Paraguay + Norberto Isaac Núñez + Norberto.Nunez&personal.com.py +44209 + fsis GmbH + Fermin Sanchez + info&fsis.ch +44210 + Robadey Network + Nicolas Robadey + nicolas&robadey.net +44211 + GGR Communications Ltd + Ed Collen + ed.collen&ggr.net +44212 + Oncology Hematology Care of Connecticut, LLC + Nora Dias + noraoncology&gmail.com +44213 + Happy Gears Inc + Vadim Kurland + info&happygears.net +44214 + Erie Indemnity Company + Jamison Budacki + jamison.budacki&erieinsurance.com +44215 + Arsslensoft + Arsslen Idadi + arsslens021&gmail.com +44216 + FUYOH VIDEO INDUSTRY CO.,LTD. + Shinya Mochizuki + mochizuki&fva.co.jp +44217 + FiberRoad + Lifeng Zhang + lifeng_zhang&fiberroad.com.cn +44218 + Buffalo Boots GmbH + Michael Conradi + hostmaster&buffalo.de +44219 + Beijing Raytight Technologies, Co + Chao Gao + gaochao&raytight.com +44220 + Netzin Technology Corporation,.Ltd. + Jiun-Jie.Chang + jackie.chang&netzintech.com +44221 + Primary Oncology Network, PLLC + Tonia Flohr + toniaflohr&ponwv.com +44222 + Fox Valley Hematology Oncology/Illinois Cancer Specialists + AKHILA YESHWANT + akhilay&yahoo.com +44223 + GE AVIC Civil Avionics Systems Company Limited + Andrew Hodgman + andrew.hodgman&aviagesystems.com +44224 + MATTHEW TAUB MD PA + INGRID SANTOS + optimumoncology&gmail.com +44225 + Mr.yassine hamraoui + yassine hamraoui + yassinehamraoui2012&gmail.com +44226 + sonoran hematology oncology + aimee dundas + aimeedundas192&yahoo.com +44227 + Pulse Secure + Dmitry Teleganov + dt-iana&ux4.net +44228 + port GmbH + Marcus Tangermann + mt&port.de +44229 + Wanco Inc + Joseph Chen + joe.chen&wanco.com +44230 + Bundesamt für Seeschifffahrt und Hydrographie + Jörg Gerdes + joerg.gerdes&bsh.de +44231 + Queens Medical Associates, PC + Sadiaka Joarder + sjoarder&queensmedical.com +44232 + Ing. Büro Mann + Dipl.-Ing. U. Mann + oid.mann&bklev.de +44233 + Balluff GmbH + Markus Rentschler + Markus.Rentschler&balluff.de +44234 + VIT S.A. + WALTER SUAREZ + walter.suarezm&gmail.com +44235 + Ospero Pty. Ltd. + Jonathan Kerkin + jkerkin&ospero.com +44236 + Compassionate Oncology Medical Group + Joanne Ferri + joanne&compassionateoncology.org +44237 + Allegro Packets GmbH + Klaus Degner + iana&allegro-packets.com +44238 + Esense Embeded + Bojan Cupic + bojancupic&gmail.com +44239 + Centro de cancer de la mujer de Puerto Rico + Juan C Prieto + centrocancermujer&gmail.com +44240 + Rocket Internet AG + Alessandro Avagliano + alessandro.avagliano&rocket-internet.de +44241 + LotusCom Inc. + Houman Sadeghi Kaji + h.sadeghikaji&lotuscomsys.com +44242 + Radomir LLC + Roman Zolotov + zolotov&radomir.su +44243 + NSTEK Inc. + KyungTae Kim + mrlord7&nstek.com +44244 + Gardens Regional Hospital and Medical Center, Inc. + Anthony Carrasco + acarrasco&tcrmc.org +44245 + Fazion Sistemas Ltda + Moacyr Franco Neto + moacyr&fazion.com.br +44246 + St. Louis Cancer Care, LLC + Carol Riley + caroljriley&hotmail.com +44247 + CLEAR SRL + Marcos Vicentini + sistemas&clear.com.ar +44248 + Simula Research Laboratory + Nornet Ito + nornet-ito&simula.no +44249 + Florida Cancer Specialists & Research Institute + Donna L. Irving + dirving&flcancer.com +44250 + Coastal Integrative Cancer Care + Gail Kissinger + gkissinger&ohmacc.com +44251 + IMT Services Corp + Andrew Bent + andy&insuremytrip.com +44252 + Iceotope + Marc Kelly + Marc.Kelly&iceotope.com +44253 + HANKYUNG I-NET + Kim Tae Je + ktj&hkinet.co.kr +44254 + LuckyBulldozer + Ben Torkington + ben&luckybulldozer.com +44255 + Medical Oncology Associates of San Diego + Joyce Johnston + jjohnston&oncologysandiego.com +44256 + Kootenai Cancer Center + Johanna Bruning + jbruning&kh.org +44257 + Teraoka Weigh-System Pte Ltd + Desmond Lye + desmond&teraoka.com.sg +44258 + Hello World Ltd + Eric Yan + eric&helloworld.ltd.uk +44259 + Xstream Flow (Pty) Ltd + Andrew van Tonder + andrew.vantonder&xstreamflow.com +44260 + BALLY WULFF Games & Entertainment GmbH + Henry Gutknecht + it&bally-wulff.de +44261 + CARBOGEN AMCIS AG + Ryan Griffiths + ryan.griffiths&carbogen-amcis.com +44262 + BJ's Wholesale Club + Will Dalton + wdalton&bjs.com +44263 + Cancer Center of Sarasota Manatee + Shanthy Gadam + sgadam&cancersarasota.com +44264 + SurfCrew, Inc. + Todd Ignasiak + todd&surfcrew.com +44265 + Grotex OOO + Dmitriy Bubnov + iana-pen&luna-78.com +44266 + Luna-78 LLC + Dmitriy Bubnov + iana-pen&luna-78.com +44267 + Hematology & Oncology Associates of Alabama, LLC + Vaughn Skinner + vskinner&hoaallc.com +44268 + DynaScan Technology, Inc. + Asson Ko + asson&dynascan.com.tw +44269 + Beamly + Neil Saunders + neil&beamly.com +44270 + Barnardsoft Co., Ltd. + Masaaki Matsumi + matsumi.masaaki&barnardsoft.co.jp +44271 + Nuage Networks + Diego Garcia del Rio + diego&nuagenetworks.net +44272 + Peter Andree + Peter Andree + peter.andree&web.de +44273 + Gernert-Net + Björn Gernert + mail&gernert-net.de +44274 + Trit Networks LLC + Dima Dorfman + iana-pen&trit.net +44275 + Illumio, Inc. + Bryan Pelham + bryan.pelham&illumio.com +44276 + TANABIKI Inc. + Noriaki Tanabiki + n-tanabiki&tanabiki.com +44277 + Mike Tennant + Mike Tennant + mtennant&bigted.net +44278 + ShangHai RealCom Communication Technology Co.,Ltd. + Li Yan + tech&realcom.com.cn +44279 + ZheJiang University PRESS + Tong HuaZhang + tonghz&zju.edu.cn +44280 + F E T Elettronica snc + Marco Tani + info&fet.it +44281 + Blood & Marrow Transplant Group of Georgia + Robin Cullen + rcullen&bmtga.com +44282 + Microware Computing & Consulting Pvt Ltd + Sanjay Sangal + sanjay&sanjaysangal.com +44283 + Appnovation Technologies Inc + Will Sheldon + will&appnovation.com +44284 + Time4 Systems Ltd. + Kenneth Hann + ken&time4systems.com +44285 + New England Cancer Specialists + James Reddy + reddyj&newecs.org +44286 + Working Distributors, Inc. + Ken Mautz + it&workingdistributors.com +44287 + JConcept Open Solutions + OLIVIER SMADJA + olivier&jconcept.com.br +44288 + greaty + xiaoyong.huang + xiaoyong.huang&greatytech.com +44289 + Beijing Cyber Greatwall Information Technology Co., Ltd. + Liu Junfeng + info&cybergreatwall.com +44290 + Sajeev Anand, M.D., LLC + Robin Williams + rrwilliams726&gmail.com +44291 + New London Cancer Center + Mithlesh Govil + mgovil&gmail.com +44292 + emplus Networks Inc. + Stan H.Y, Liu + stan.liu&senao.com +44293 + Unwired Networks GmbH + Alexander Szlezak + devops&unwired.at +44294 + Quanta-Computing + Matthieu ROSINSKI + sysadmin&quanta-computing.com +44295 + SSE - Stuellein Software Engineering + Christian Stuellein + christian.stuellein&sse-engineering.de +44296 + HelmetHub + James Griffith + jgriffith&helmet-hub.com +44297 + Alliance Cancer Specialists, PC + Susan Lindner + bhoma130&gmail.com +44298 + CogVis GmbH + Jürgen Konetschnig + konetschnig&cogvis.at +44299 + I.M. Dauntless + Ramona Trevino + ramonatrevino&sbcglobal.net +44300 + Cyber1st R&D Ltd. + Jonathan Spurgin + jonathan.spurgin&cyber1st.co.uk +44301 + Alexander Maier GmbH + Alexander Maier + alexander.maier&maier-gst.de +44302 + Clearview Cancer Institute + Michelle Brown, Chief Operations Officer + mbrown&ccihsv.com +44303 + Netz-AG Emil-Figgestraße 15-39 + Fabian Reiners + fabian.reiners&wh-nef.de +44304 + HEMATOLOGY ONCOLOGY LIFE CENTER LLC + JULIE BEENE + JBEENE&HOLCMED.COM +44305 + Oncology Hematology Associates of Springfield, MD, PC + Amber Pierce + amberpierce22&gmail.com +44306 + CAROLINA ONCOLOGY ASSOCIATES, P.A. + EMILY SHEPHERD + ESHEPHERD&CAROLINAONCOLOGY.COM +44307 + LP Technologies, Inc. + Micky Mukalay + info&lptech.com +44308 + Centripetal Networks, Inc. + Justin Rogers + admin¢ripetalnetworks.com +44309 + Center for Cancer Care + Candace Hayes + chayes&suburban-hemonc.com +44310 + SiteSpect, Inc. + Simon Tetelbaum + stetelbaum&sitespect.com +44311 + Budapesti Fazekas Mihaly Gyakorlo Altalanos Iskola es Gimnazium + Herczeg Zsolt + herczeg.zsolt&fazekas.hu +44312 + Amundson Partners, Inc. + Mark Amundson PharmD + mpa&cableone.net +44313 + Johan Grip (ogun.org) + Johan Grip + ogun&ogun.org +44314 + Democritus University of Thrace + Ioannis Alexiadis + noc&duth.gr +44315 + Guido Di Fazio + Guido Di Fazio + g.difazio&mclink.it +44316 + Softneta UAB + Tomas Dumbliauskas + info&softneta.com +44317 + Kanton Zug (www.zg.ch) + Gisler Rudolf + rudolf.gisler&zg.ch +44318 + PT. Telekomunikasi Indonesia + Mohamad Fajar Aditya Masyhur + aditya_mh&telkom.co.id +44319 + Oklahoma Cancer Specialists and Research Institute + John Oborn + John.Oborn&ocsri.org +44320 + Lake Norman Hematology Oncology + Betty Adams + betty&lakenormanoncology.com +44321 + TrilioData, Inc + Muralidhar Balcha + murali.balcha&triliodata.com +44322 + Shenzhen Mailian Electronics Co.,Ltd + luo shuang shuang + 157641738&qq.com +44323 + SmartRG, Inc. + David La Cagnina + david.lacagnina&smartrg.com +44324 + EstiNet Technologies Inc. + Fuh-Jang Lin + lfj&estinet.com +44325 + TangoME, Inc. + Zach Carlson + chefs&tango.me +44326 + Kwartzlab Makerspace + Ben Brown + admin&kwartzlab.ca +44327 + Ardexa Pty Ltd + David Mohr + david.mohr&ardexa.com +44328 + tmon + Ingo Flaschberger + iana&tmon.at +44329 + Elektronik Art + Pawel Sienczak + pawel&elektronikart.pl +44330 + Xeneta AS + Pål Eivind Jacobsen Nes + pal.nes&xeneta.com +44331 + ZHAW Zürcher Hochschule für Angewandte Wissenschaften + Stephan Neuhaus + stephan.neuhaus&zhaw.ch +44332 + Wallarm, Inc + Alexander Golovko + admin&wallarm.com +44333 + artdecode.de + Stephan Neuhaus + sten&artdecode.de +44334 + Saint Vincent Physician Services, Inc. + Jennifer Woodbury + Jennifer.woodbury&stvincenthospital.com +44335 + Waverules LLC + John Handley + jhandley&waverules.com +44336 + Oncology Hematology Associates of Saginaw Valley, P.C. + Jennifer Metevia + jmetevia&jbartnik.com +44337 + ONCOLOGY ASSOCIATES OF MONROE + KRISTI HOLTON + kholtonoa&yahoo.com +44338 + Low Country Cancer Care Associates, P.C. + Jim Tucker + jtucker&lcccsav.com +44339 + Low Country Cancer Care Associates, P.C. + Jim Tucker + jtucker&lcccsav.com +44340 + Mid-Illinois Hematology & Oncology Associates, Ltd. + Sarah Whelan + sarah.whelan&mihoaonline.org +44341 + GreatCall, Inc. + Laurent Gousset + gcitsyseng&greatcall.com +44342 + Regional Cancer Care Associates LLC + Amanda K. Boyer + aboyer®ionalcancercare.org +44343 + Jackson Oncology Associates, PLLC + Ann Huff + joncology&jacksononcology.com +44344 + CANCER CENTER ASSOCIATES + LISA MCGILLEM + LMCGILLEM&CANCERCENTERASSOCIATES.COM +44345 + LLC, Sintek + Artem Svechnikov + svechnikov.artem&sintek-nn.ru +44346 + JSC "Promstroikontrakt" + Dmitry Valiavsky + admin&psk-holding.ru +44347 + IK SATPROF LLC + Dmitrii Zubchenko + dmitry.zubchenko&iksatprof.ru +44348 + NIL KSA + Michel Beloshitsky + mbeloshitsky&nilksa.ru +44349 + Highland Clinic, APMC + Renee' DeMoss + rdemoss&highlandclinic.com +44350 + Cancer Care Centers of Brevard + Gail Erentreich + gerentreich&hoacb.com +44351 + greglearns (Greg Edwards) + Greg Edwards + greg&greglearns.com +44352 + North Shore Hematology & Oncology Associates P.C. + ToniAnn Genna + tgenna&nshoa.com +44353 + Cancer Center of Central Connecticut, LLC + Susan Hansen + shh6219316&sbcglobal.net +44354 + Grace Hematology and Oncology + Amanda Strange + gracehematology01&yahoo.com +44355 + Oncology Hematology Associates of Northern PA, PC + Carrie Frederick + cbfrederick&ohanp.com +44356 + Trust Medical and Oncology Center + Alvaro Talavera + talaveramd&yahoo.com +44357 + Birchbox Inc. + Jereme Corrado + hostmaster&birchbox.com +44358 + Nashat Y Gabrail, MD Inc + Shelly Rentsch + skr1105&aol.com +44359 + Frederick P. Smith, MD PC + Mary McNeal + mmcneal&cchealthcare.net +44360 + EAST SIDE ONCOLOGY ASSOCIATES, PLLC + VALERIE VALLEJO + VAL629&AOL.COM +44361 + Dabas Cancer Institute, P.A. + Robin Lenz + rlenz930&gmail.com +44362 + Universidade Federal de Uberlandia + Divisao de Redes + dr&cti.ufu.br +44363 + Cloudflare + Cloudflare NOC + noc&cloudflare.com +44364 + AVEA + Huseyin Firat Kose + huseyinfirat.kose&avea.com.tr +44365 + DR. CHRISTOPHER T. SOPRENUK, M.D. P.A. + BETH SOPRENUK + zkstevens&hotmail.com +44366 + Essex Oncology of North Jersey PA + Denise Johnstone + dj&essexoncology.com +44367 + Highland Solutions + Stu Heiss + sheiss&highlandsolutions.com +44368 + Unmukti Technology Private Limited + Nishant Sharma + nishant&unmukti.in +44369 + NeoTerra Systems Inc. + Lam Nguyen + lam.nguyen&neoterra.ca +44370 + Utelisys Communications B.V. + Michiel Timmers + mtimmers&utelisys.com +44371 + Jay Steel + Jason Mallory + steelurban&yahoo.com +44372 + Valley Medical Oncology Consultants, A Medical Group, A Professional Corp + Arose Bey-Molina + abeymolina&vmoc.com +44373 + Regional Cancer Care Associates- Central Jersey Division + Eileen Peng + epeng®ionalcancercare.org +44374 + New England Hematology Oncology Associatesd + Amy Carlton + acarlton&neho.org +44375 + Eastern Long Island Hematology Oncology + Wayne Burgess + wburgess&optonline.net +44376 + Opus One Winery, LLC + Chris Dillon + noc&opusonewinery.com +44377 + Hudson Hematology Oncology + Barry Downer + barry24361838&yahoo.com +44378 + Stephan Gogler + Stephan Gogler + pen&gogler.at +44379 + Meanwave GmbH + Heiko Vachek + Heiko&Vachek.de +44380 + Desert Hematology Oncology Medical Group Inc. + Rita Flores + ritadho&aol.com +44381 + Medical Oncology Associates, PS + Sherry Cleveland + clevels&nwrm.com +44382 + Intec Pacific Pty. Ltd. + Spiro Asarloglou + spiro&intecpacific.com.au +44383 + Ascent Co.,Ltd + Tsutomu Asami + t.asami&ascent-jp.com +44384 + Harbour IT Pty Ltd + Brendan Thompson + ianaregistry&harbourit.com.au +44385 + BONSONINFO SYSTEM CO.,LTD + haobo.wang + wanghb&bonsoninfo.com +44386 + 3 Gen d.o.o. + Henrik Udovc + henrik.udovc&3gen.si +44387 + Conway Hematology Oncology + Priscilla Klosky + pklosky&conwaycorp.net +44388 + Medical and Surgical Clinic of Irving + Dorothy Ellis + dellis&mscitx.com +44389 + Max Planck Institute for Metabolism Research + Dr. Stefan Vollmar + vollmar&nf.mpg.de +44390 + Skootr.com + Scott Stephens + scott&skootr.com +44391 + Polytechnic West + Nat Hansen + icthelpdesk&polytechnic.wa.edu.au +44392 + Xperterra + Igor Sirkovich + info&xperterra.com +44393 + Pinellas Hematology &Oncology PA. + Iliana Bolton + iboltonhemonc&gmail.com +44394 + Province Nord - Nouvelle Calédonie + DSI Province Nord (Bernard Sautet) + pen_dsi_provincenord&province-nord.nc +44395 + Pierre Decazes + Pierre Decazes + pierre.decazes&gmail.com +44396 + Hilberling GmbH + Stefan Schmahl + sschmahl&hilberling.de +44397 + OTP Bank Plc. + David Attila + david.attila&otpbank.hu +44398 + SAITEL Telecomunicazioni s.r.l. + Carlo Banfi + support&saitel.it +44399 + Masgalor + Julian Röder + registration&masgalor.de +44400 + Eagle Investment Systems LLC + James Baker + jbaker&eagleinvsys.com +44401 + Meeting House Lane Medical Practice PC + Michelle Mullin + mmullin&southamptonhospital.org +44402 + Singh & Arora Oncology/Hematology, P.C. + Heather Decker + pheather76&hotmail.com +44403 + STUART ONCOLOGY ASSOCIATES + JAIMINI PATEL + pate7773&comcast.net +44404 + Climate-control NN, Ltd. + Marat Cheremhin + marcher&list.ru +44405 + Shenzhen Wintop Photoelectric Technology Co., Ltd + Jeff Ji + jeff&wintoptec.com +44406 + SMT S.A. + Tomasz Gąska + tomasz.gaska&smtsa.pl +44407 + Vidder, Inc. + Scott Shackleton + operations&vidder.com +44408 + Ventus Technologies S.L. + Joan Balagueró Valls + joan.balaguero&ventusproxy.com +44409 + OSNA Research + Dr. Michael Schukat + michael.schukat&osna-solutions.com +44410 + Versilis Inc + Jean-Francois Menier + jfmenier&versilis.com +44411 + Universidad Mariano Gálvez de Guatemala + Juan Manuel Lemus + jlemus&umg.edu.gt +44412 + Markus Froehlich + Markus Froehlich + pen.iana&marfr.de +44413 + Studievereniging A-Eskwadraat + Jitse Klomp + sysop&a-eskwadraat.nl +44414 + Aldazar LLC + David Marques + Bills&aldazar.com +44415 + Mikeji d.o.o. + Miha PIhler + miha.pihler&telnet.si +44416 + China Electronics Technology Group Corporation No.7 Research Institute + kinglee + kinglee_gci&139.com +44417 + Lazy Mountain Computers + Greg Jetter + greg&lazymountain.com +44418 + Shenzhen Haipengxin Electronics Co., Ltd. + Ms.Coco Zhou + cocozhou&hpxin.com +44419 + South County Hematology Oncology + Elena Lankford + elankford.scho&yahoo.com +44420 + Fairfax Media + Joe McNamara + jmcnamara&fairfaxmedia.com.au +44421 + Shen zhen huaptec co.,ltd + xieyanlin + 2580557001&qq.com +44422 + CloudGate Systems India Pvt Ltd + Abhik Biswas + abhik&cloudgate.in +44423 + Frederick G. Barr, MD + Mary Mcneal + mmcneal&cchealthcare.net +44424 + Nelson G.N. Kalil, MD PC + Mary McNeal + mmcneal&cchealthcare.net +44425 + goldenTech SA + Matteo Risoldi + mar&goldentech.ch +44426 + ek-soft GmbH + Frank Moegle + moegle&ek-soft.de +44427 + Hills Road Sixth Form College + Matthew Benstead + mbenstead&hillsroad.ac.uk +44428 + ZAO Severo-Zapad + Valeriy Bogatov + admins&sev-zap.ru +44429 + Great Lakes Cancer Management Specialists + Betty Wiknich + Betty.Wiknich&stjohn.org +44430 + AGF Management Limited + Russell DPaiva + network.support&agf.com +44431 + Panhandle Cancer Center + Christi Richardson/Regina Parker + chr&malamud1.comcastbiz.net +44432 + CanDeal + Igor Peonte + ipeonte&candeal.com +44433 + Tri-Tech Manufacturing, Inc + Alf Riisnaes + Alf&TTMi.us.com +44434 + Junta de Comunidades de Castilla-La Mancha + José Illescas Pérez + jose.illescas&castillalamancha.es +44435 + Staatliches Berufsschulzentrum Hermsdorf + Wolfram Pienkoss + wp&bszh.de +44436 + IdentKey Sicherheitsverbund (Ithamar Garbe) + Ithamar Garbe + info&identkey.de +44437 + CommBox Pty. Ltd. + Robert Gaunt + support&commbox.com.au +44438 + Thomas A. York + Thomas A. York + thomas.alan.york&gmail.com +44439 + FOP Kagarlickij D.V. + Kagarlickij Dmitriy + dmitriy&kagarlickij.com +44440 + INFUSION ASSOCIATES, PC + JAN CARRICK + janc&infusionassociates.com +44441 + Radford University + Bill Abplanalp + itsecurity&radford.edu +44442 + iSysBus + Ithamar Garbe + info&isysbus.org +44443 + Stelo - Companhia Brasileira de Pag Eletr + Vitor Carvalho + vitor_it7&stelo.com.br +44444 + iWelcome B.V. + Jordi Clement + jordi.clement&iwelcome.com +44445 + Corrado Mulas Enterprise Root CA + Corrado Mulas + corradomulas&virgilio.it +44446 + GovComm, Inc. + Svetoslav Veltchev + sveltchev&govcomm.us +44447 + Fairchild Medical Center + Michael Madden + mmadden&fairchildmed.org +44448 + Apica + Niclas Tollgard + operations&apicasystem.com +44449 + Xiamen University + Chen Zhiwei + chenzhiwei&xmu.edu.cn +44450 + Synapsys Solutions Ltd. + Andy Devine + andy.devine&synapsys-solutions.com +44451 + Schessner IT-Consulting + Wolf Schessner + registry&schessner-it.de +44452 + Evgeny Artemyev (XART) + Evgeny Artemyev + eart&live.ru +44453 + Arizona Oncology/Saguaro Cancer Center + Tammy Riley + tammy.riley&usoncology.com +44454 + Globalstar, Inc. + Sandra Durbin + sandra.durbin&globalstar.com +44455 + CHINA HUALU GROUP CO., LTD + Che Yongjin + cheyj&hualu.com.cn +44456 + tuxwave.net + Sebastian Kricner + sebastian.kricner&tuxwave.net +44457 + Biamp Systems + Jeff Sondermeyer + jeff.sondermeyer&biamp.com +44458 + IT WATANA COMPANY + Jaruwat Boonmee + jaruwat&itwatana.com +44459 + Bluepunkt Networks, Inc. + Ronnie Hung + ronnie&bluepunkt.com +44460 + Alumina Elit 2003 Ltd. + Dobrin Dobrev + d.dobrev&aluminaelit.com +44461 + Rainus + Song-on Lee + sg&rainus.co.kr +44462 + K2E + Jin Kim + jin&tekpoint.com +44463 + BTS TECH S.r.l. + Fabio Cateno Burgarello + btstech&hotmail.it +44464 + Edgeguide AB + Mats Sjöström + mats.sjostrom&edgeguide.com +44465 + Cataleya Pte Ltd + Miguel Lopes + m.lopes&cataleya.com +44466 + FOP Tumakha Yuriy Volodymyrovych + Yuriy Tumakha + tumakha&gmail.com +44467 + Sigmacom Broadcast + Stelios Kirkalas + skirkalas&sigmacom.gr +44468 + Streamline, Lda. + Francisco Maia + famaia&streamline.pt +44469 + ARPA2 + Rick van Rein + rick&openfortress.nl +44470 + Intermediate Engineering GmbH + Elio Wahlen + e.wahlen&im-en.com +44471 + Kryptos Security + Michael Copeland + michael&kryptos-security.com +44472 + BAMBR Information, Inc. + Joe Bambr + joe&bambr.info +44473 + CSTx GmbH + Michael Rother + office&cstx.de +44474 + Murata Manufacturing Co.,Ltd + Ryuhei Nakai + r_nakai&murata.com +44475 + init.at informationstechnologie GmbH + Georg Bahlon + bahlon&init.at +44476 + Middlesex Oncology + MArtha Conner + middlesexoncology&msn.com +44477 + Washington Metropolitan Area Transit Authority + John Michie + jfmichie&wmata.com +44478 + BIN-Control GmbH + Stephan Henseler + helpdesk&bin-control.com +44479 + Gikos Networks + Kranium Mendoza + schema&gikos.net +44480 + Beijing Liuhe Intelligence Technology Ltd. + Sui Pingli + suipingli&liuheit.com +44481 + CentrAlert + Jeff Whattam + support¢ralert.com +44482 + Oncare-Hawaii Hematology Oncology + Rana Sallakian + ranas&lahomg.com +44483 + Los Angeles Cancer Network + Rana Sallakian + ranas&lahomg.com +44484 + YIXUN Technology Co., Ltd. + PengChong + pengchong&champor.com.cn +44485 + WIBU-SYSTEMS AG + Wolfgang Voelker + wolfgang.voelker&wibu.com +44486 + Diabolocom + Vincent Batoufflet + admin&dabcnet.net +44487 + Ineos Manufacturing Koeln GmbH + Andreas Woithon + andreas.woithon&ineos.com +44488 + Reichle & De-Massari AG + Reinhard Burkert + reinhard.burkert&rdm.com +44489 + olifluous + Sybren Apiecionek + ca&olifluous.com +44490 + Beward R&D Co., Ltd + Oleg Beketov + beketov&beward.ru +44491 + Cherry Creek Mortgage Co., Inc. + Todd Keller + tkeller&ccmclending.com +44492 + MEDIAHOUND, INC. + WESLEY BURGER + WESLEY&MEDIAHOUND.COM +44493 + STARMON s.r.o. + Jiri Holinger + holinger&starmon.cz +44494 + TrustAsia Technologies, Inc. + Edwin Zhai + edwin.zhai&trustasia.com +44495 + PAIO co.,ltd. + KyuWoo Choi + apple&paio.co.kr +44496 + Bender GmbH & Co. KG + Jan Hofmann + jan.hofmann&bender-de.com +44497 + CHINA AERONAUTICAL RADIO ELECTRONICS RESEARCH INSTITUTE + SONG QING + song_qing&careri.com +44498 + IT Solutions Roland Breitschaft + Roland Breitschaft + info&x-company.de +44499 + BLOOD AND CANCER CENTER OF EAST TEXAS + LISA GROSS + lgross&bccet.com +44500 + CHRONOTRACK SYSTEMS CORP. + Robert Lyons + blyons&chronotrack.com +44501 + NuLEDs, Inc. + Chris Isaacson + chris&nuleds.com +44502 + SmartCast GmbH + Matthias Meyer + meyer&smartcast.de +44503 + MDLIVE Inc. + Matt Thompson + mthompson&mdlive.com +44504 + Evilcats Organization + Jonathan Pearsall + jonathan&evilcats.org +44505 + Ideum Group, Inc + Mohamad EL-Bawab + mohamad&ideumgroup.com +44506 + Consulta Network Security AB + Daniel Azzarri + daniel.azzarri&consulta.se +44507 + Northwest Alabama Cancer Center, PC + Ella Ward + eward_nwacc&comcast.net +44508 + Vigilant Applications Limited + Thomas Medhurst + software&vigilantapps.com +44509 + CompTek + Fedor Vompe + f.vompe&comptek.ru +44510 + FJ Consultant + Francois Joannette + francois&fjconsultant.ca +44511 + Rudolf Wild GmbH & Co. KG + Peter Schwendner + peter.schwendner&wild.de +44512 + iptronix srl + dario pennisi + dario&iptronix.com +44513 + Essener Versorgungs- und Verkehrsgesellschaft mbH (EVV) + Ronald Kaufmann + ronald.kaufmann&evv-online.de +44514 + bremersee.org + Christian Bremer + iana&bremersee.org +44515 + Griesson - de Beukelaer GmbH & Co. KG + Winter Michael + m.winter&griesson.de +44516 + ETES GmbH + Markus Espenhain + support&etes.de +44517 + Virgil Grigoras + Virgil Grigoras + grigoras.oid&arcor.de +44518 + EUROSELL SPA + VITTORIO DALERCI + vittorio.dalerci&eurosell.it +44519 + US Railroad Retirement Board + Johand W. Cruse + johand.cruse&rrb.gov +44520 + Consolidated Information Systems + Tim Wetzel + wetzelt&consolidatedinfosystems.com +44521 + WUNDERVOLL NETWORKS + SEAN HUANG + sean&wundervoll.com.tw +44522 + Information Technology Solutions Laboratory + Danila Zubkov + danila.zubkov&gmail.com +44523 + NetBeez + Stefano Gridelli + stefano&netbeez.net +44524 + Research & Development Center "Vulkan" LLC + Andrey Bryantsev + a.bryantsev&ntc-vulkan.ru +44525 + Breast Surgery of Tulsa + Dr. LaNette Smith + jobs&breastsurgeryoftulsa.com +44526 + Carta Solutions Processing Services Corp + Geoff Smith + gsmith&cartaworldwide.com +44527 + Marco Dominguez + Marco Dominguez + marco.dominguez&gmail.com +44528 + Uriel Technologies + Janis Germanis + janis.germanis&urieltechnologies.eu +44529 + One Call Care Management + Jim Sullivan + Security_Team_Notice&onecallcm.com +44530 + Alphapower Ltd. + Octavian Stanciu + admin.alphapower&gmail.com +44531 + Renfell Engineering Pty Ltd + David Clement + pen&renfell.com +44532 + Sage Electronic Engineering, LLC + Scott Hoot + scott.hoot&se-eng.com +44533 + Digital Barriers + Paul McAteer + paul.mcateer&digitalbarriers.com +44534 + Weybourne Partners LLP + Charles Airey + charles.airey&weybournepartners.com +44535 + TSAT AS + Stein Harstad + stein.harstad&tsat.net +44536 + True Partners Consulting LLC + Gerald Liebhardt + gerald.liebhardt&tpctax.com +44537 + Elauwit Networks, LLC + Taylor Jones + cto&elauwit.com +44538 + Providius Corp + Jackson Wiegman + jackson&providiusdesign.com +44539 + Safe Patient Systems Ltd + Ross Folland + tech&safepatientsystems.com +44540 + Bond Enterprises + Carlos Maldonado + carlos.maldonado&inving.com +44541 + FeiTian United (Beijing) System Technology Co.,Ltd. + Yeping Xiao + yeping.xiao&feitian-tech.com +44542 + VdS Schadenverhütung GmbH + Philipp Steingrebe + psteingrebe&vds.de +44543 + Viesti Networks Oy + Risto Järvinen + risto.jarvinen&vine.fi +44544 + SANEF-ITS + Stephane Coudray + stephane.coudray&sanef-its.com +44545 + K-LAGAN España S.L. + Jesús González + jgonzalez&k-lagan.com +44546 + Harp Visual Communications Ltd + David Dunning + david.dunning&harpvisual.co.uk +44547 + YMOR Group B.V. + Stefan Postma + spostma&ymor.nl +44548 + Centrale Ashton Inc. + Vincent D. Handfield + v.handfield&ashcent.com +44549 + ELECTRIS + Mateusz Knapik + mateuszknapik&electris.pl +44550 + Avature USA LLC + Marco Dominguez + marco.dominguez&avature.net +44551 + NetCredit Group + Janis Orlovs + it&netcreditgroup.com +44552 + Night Corporation + Sutton Shin + oid&infsys.net +44553 + Oncology/Hematology of Loudoun and Reston + Sarva Rajendra + onchemraj2&verizon.net +44554 + Primevation Ltd. + Lars Eilebrecht + iana.org&primevation.com +44555 + CICS AB + Magnus Zimmerman + magnus.zimmerman&cics.se +44556 + Universitas Al Azhar Indonesia + Rahman Pujianto + rahman&uai.ac.id +44557 + WorNet AG + Michael Wodniok + wodniok&wor.net +44558 + Derixx GmbH + Markus Mayer + markus.mayer&derixx.de +44559 + Tribunal Regional do Trabalho da 13a. Regiao + Daniel Nunes Lira Barbosa + dnbarbosa&trt13.jus.br +44560 + International Black Sea University + Salavat Sayfullin + hostmaster&ibsu.edu.ge +44561 + Alachua County Library District + Harold McMullen + hmcmullen&aclib.us +44562 + aixtools (Michael Felt) + Michael Felt + aixtools&felt.demon.nl +44563 + Irkutsk Diagnostic Centre + Vladislav Arkhipov + arhipov&dc.baikal.ru +44564 + CJSC "ENERGOMERA" + Roman Lipskiy + LipskiyRN&energomera.ru +44565 + Tobias Mandjik (formerly 'LECKERBEEF.de') + Tobias Mandjik + tobias.mandjik&gmail.com +44566 + OOO Komandor-holding + Tarelov Egor + khext&sm-komandor.ru +44567 + Agencja Rozwoju Przemyslu S.A. + Piotr Stokluska + piotr.stokluska&arp.com.pl +44568 + Om7Sense GmbH + john read + john.read&om7sense.com +44569 + Regional Networks, Ltd. + Konstantin A Savchenko + konstantin.s®nets.ru +44570 + Lancaster Cancer Center + Daisy Plaza + dplaza&lancastercancercenter.com +44571 + Solinea, Inc + John Stanford + john&solinea.com +44572 + Abington Hematology Oncology Associates + Richard Perri + rperri&abingtonhemeonc.com +44573 + MGI1 + Edmund Jay + ejay&mgi1.com +44574 + Jilin Yuxin Technology Co. Ltd + 李昊天 (Haotian Li) + clerk_9919&163.com +44575 + 3S PocketNet + Kevin Ko + kevin.ko&3svision.com.tw +44576 + PERNOD-RICARD + Gilaud Christophe + christophe.gilaud&pernod-ricard.com +44577 + Proxion Solutions Oy + Liisa Tolvanen + liisa.tolvanen&proxion.fi +44578 + Spectra Engineering Pty Ltd + Albertus Sonny Harmanto + sonnyh&spectraeng.com.au +44579 + Sonnenburg Electronic AG + Roland Westenkirchner + westenkirchner&sonnenburg.de +44580 + Johann Sperber GMBH & Co. KG + Mr. Maikranz, Michael + maikranz&sperber-kg.de +44581 + Regional Hematology Oncology Associates,P.C + Carol Reifsteck + carol.rhoa&gmail.com +44582 + Inversion Software OÜ + Indrek Järve + indrek&inversion.ee +44583 + Obicis Ltd. + Hrvoje Maracic + hrvoje&obicis.com +44584 + JumpCloud, Inc + Travis Yoes + sa&jumpcloud.com +44585 + Servato Corp + Dorion Carr + dcarr&servatocorp.com +44586 + CommDev, LLC + Sergey Kozlov + commdevllc&gmail.com +44587 + InMechaSol + Matthew von Arx + matt.vonarx&inmechasol.com +44588 + Kryptus Information Security S/A + Igor Jardim + igor&kryptus.com +44589 + becker-ikt.de - Jochen Becker + Jochen Becker + jochen.becker&becker-ikt.de +44590 + Kevin Niehage + Kevin Niehage + kevin&niehage.name +44591 + Morgan Hunt Ltd + Mr Julian Fletcher + julian.fletcher&morganhunt.com +44592 + Fininvest Sp. z o.o. + Jacek Blaszczynski + oid&fininvest.pl +44593 + Jan Bětík (Údolí Sviní) + Jan Bětík + jan.betik&svine.us +44594 + DTS INSIGHT CORPORATION (formerly 'Yokogawa Digital Computer Corporation') + Koichi Ida + pqa_admin&dts-insight.co.jp +44595 + indigo Consulting GmbH + Sven Schmidtke + svschmid&indigo-nms.eu +44596 + DILO Armaturen und Anlagen GmbH + Peter Haag + Peter.Haag&dilo-gmbh.com +44597 + BigBoards + Wim Van Leuven + wim.vanleuven&bigboards.io +44598 + Gosford IT + Dave Horsfall + dave&gosford-it.com +44599 + Vuzix Corporation + Vuzix Corporation + iana&vuzix.com +44600 + e-Paper Ltd. + Eduard Furlender + efurl&epapersign.com +44601 + Sunware s.r.o. + Michael Brabec + michael.brabec&sunware.cz +44602 + Systems Mechanics Ltd. + Peter Godfrey + pete.godfrey&sysmech.co.uk +44603 + u-form Testsysteme GmbH & Co KG + Cornelius Scheffel + scheffel&uforme.de +44604 + Beijing Jinhong Xi-Dian Information Technology Corp. + Pingli Sui + suipingli&chinaxidian.com +44605 + Lighthouse IVM GmbH + Andreas Niemeyer + technik&kadis.de +44606 + Siraya Networks Co., Ltd. + Sam Wang + sam&sirayanetworks.com +44607 + GreenField Software Private Limited + Ankur Pramanick + ankur.pramanick&greenfieldsoft.com +44608 + Net at Work Netzwerksysteme GmbH + Stefan Cink + stefan.cink&netatwork.de +44609 + PRONIX s.r.o. + Petra Sergovicova + pronix&pronix.cz +44610 + Elvis-Telecom JSC + Dmitry Kozlov + noc&elnet.ru +44611 + Dumee.fr + Gilles Dumee + gilles&dumee.fr +44612 + Beijing Tonlier Energy Technology Co., Ltd. + XiaoZiYun + xzy_2008&sina.com +44613 + Sunkaisens(Beijing) Technology Co.,Ltd. + Zhang XiangLong + zhangxianglong&sunkaisens.com +44614 + Lingualeo LLC. + Anthony Regeda + regeda&lingualeo.com +44615 + Bluehouse Technology Ltd + Ms. Sharmila Pillai + info&bluehouse-technology.co.uk +44616 + University Transilvania of Brasov + Sorin COCORADA + cciu&unitbv.ro +44617 + GLOBTECH spol. s r.o. + Vit Zdara + vit.zdara&globtech.cz +44618 + Philter, LLC + Chad Johnson + chadrjohnson&gmail.com +44619 + Ziften Technologies + Luca Loiodice + luca.loiodice&ziften.com +44620 + Integrated Service Company LLC + Jeff L Dean + jdean&inservusa.com +44621 + OLeary Computers Inc + Doug O'Leary + dkoleary&olearycomputers.com +44622 + Instituto Colegio de Todos los Santos + Vera Santiago + informatica&tls.edu.ar +44623 + Iomnis Surveillance Solutions + Mr. Alan Davis + iomnis_it&iomnis.com +44624 + Blood & Cancer Center (Ohio) + Rita Lopez + ritlopez&hotmail.com +44625 + Electre + Foued ZIDANE + fzidane&electre.com +44626 + Captive.net + Anton Van Cauteren + anton&captive.net +44627 + National Instruments Corporation + William A Lanagan + lanagan&ni.com +44628 + Canva, Inc. + OID Admin + oid-admin&canva.com +44629 + Australian Department of Infrastructure and Regional Development + Jon Harrison + Jon.Harrison&infrastructure.gov.au +44630 + JSC Platron + Dmitry Karmishkin + cto&platron.ru +44631 + TechFu (Pty) Ltd. + Neil Fourie + iana&techfu.co.za +44632 + EBlink SA + Ludovic Le Corre + informatique-op&e-blink.com +44633 + Squire Technologies + Andrew Cooper + acooper&squire-technologies.com +44634 + markentier (Christoph Grabo) + Christoph Grabo + christoph&markentier.net +44635 + Degeetia OÜ + Ivo Mehide + ivo°eetia.ee +44636 + NewCashel Inc. + Mike Rainey + mike&newcashel.com +44637 + CB&I Federal Services + Steven O'Neal + steven.oneal&cbifederalservices.com +44638 + ONEDC + Sam WIlson + sam.wilson&onedc.com +44639 + Hermes Europe GmbH + Christoph Eichner + Christoph.Eichner&hermes-europe.eu +44640 + UpdateLogic, Inc. + Brian Paul Johnson + it&updatelogic.com +44641 + VyOS + Daniil Baturin + maintainers&vyos.net +44642 + National Knowledge Network + Gaurav Kansal + gaurav.kansal&nkn.in +44643 + Fujikura Automotive Europe S.A. + Manuel Ruiz + manuel.ruiz&fujikura-automotive.com +44644 + Vopium A/S + Haroon ur Rashid Abbasi + haroon.rashid&vopium.com +44645 + Infrabel NV + Pieter Baele + pieter.baele&infrabel.be +44646 + Emko Elektronik Sanayi ve Ticaret A.Ş. + Duygu Ispalar Guneri + duygu.ispalar&emkoelektronik.com.tr +44647 + Argus der Presse AG + Yannik Nicod + software&argus.ch +44648 + Manet Ltd + Tomi Malkki + tomi&manmon.net +44649 + Alma Technologies + John K. Hinsdale + postmaster&alma.com +44650 + Hemotology Oncology Care of Northern VA + Layne Fry + lfry&hocnova.com +44651 + University of Detroit Jesuit High School and Academy + Information Technology + webadmin&uofdjesuit.org +44652 + LaMarche Mfg. Company + Pat DeLacluyse + pdelacluyse&lamarchemfg.com +44653 + Unassigned + Removed 2014-11-04 + ---none--- +44654 + Hochschule für Musik Freiburg + Jürgen Diez + j.diez&mh-freiburg.de +44655 + Heartland Hematology & Oncology + Shannon Bauer + sbauer&heartlandhem-onc.com +44656 + Sound Choice Communications LLC + Eric Osterberg + staff&sccvoip.com +44657 + Bow Valley College + Mark Prevey + mprevey&bowvalleycollege.ca +44658 + Northwest Medical Specialties PLLC + Jeff Hunnicutt + jhunnicutt&nwmsonline.com +44659 + Burlington County Hematology-Oncology Associates, P.A. + Joyce Matola + bchemonc&verizon.net +44660 + Landspitali + Elfar Ingvarsson + elfar&landspitali.is +44661 + Boyce Technologies, Inc. + Craig Hale + chale&boycetechnologies.com +44662 + CodeChase + Andrew Hilliar + andrewh&codechase.com.au +44663 + Wuhan GreeNet Information Service Co., Ltd. + zhangwei + wzhang&greenet.net.cn +44664 + IRCOS JSC + Sergey Korochin + KorochinSV&ircoc.vrn.ru +44665 + 1&1 Internet AG + Alfons Maas + oid-administration&1and1.org +44666 + Ville de Saint André + Fabrice PAYET + DSI&saint-andre.re +44667 + Pardic Systems , Ltd. + Farid Nabizad + pardicsys&gmail.com +44668 + Securosys SA + Robert Rogenmoser + roro&securosys.ch +44669 + First Capital Payments + Fredrick Robinson + frobinson&firstcapitalpayments.com +44670 + Prism Software Corporation + Mayank Aggarwal + mayank&prismsoftware.com +44671 + Sysctl AB + Andreas Jonsson + andreas&sysctl.se +44672 + Naucra Co., Ltd + Shimhyun Lee + shlee&naucra.com +44673 + SitePen, Inc. + Robert Chady + sitepen-pen&sitepen.com +44674 + FUSED NETWORK CORP + Paul Fleming + admins&fused.net +44675 + Spillman Technologies Inc. + Michael F. Wilkinson + mwilkinson&spillman.com +44676 + Glenn McGurrin + Glenn McGurrin + oid-pen-admin&glennmcgurrin.com +44677 + Dreamhack Network + Christian Svensson + bluecmd&tech.dreamhack.se +44678 + Loopia AB + Linus Sundqvist + drift&loopia.se +44679 + Plansee Group Service GmbH + Martin Broll + martin.broll&plansee.com +44680 + Iudex + Tuomas RIihimäki + pen.iana&iudex.fi +44681 + Ajuntament de Valencia + Pedro Belenguer + pbelenguer&valencia.es +44682 + CompliSpace Technology Pty Ltd + Michael McHugh + michael.mchugh&complispace.com.au +44683 + Intern-net + Bastian Brunner + contact&intern-net.de +44684 + Corsa Technology Inc. + Richard Perrin + mibmaster&corsa.com +44685 + NTENT + Ashton Davis + itns&ntent.com +44686 + ROI Development Corp., DBA Newmar + Kurt Palermo + kurtp&newmarpower.com +44687 + Hebeo + Damien Lesgourgues + damien&hebeo.org +44688 + WorldWide Supply LLC (aka "WWS") + Barry Hardek + bhardek&worldwidesupply.net +44689 + Groupe Simplinet inc. + Martin Hébert + support&simpli.net +44690 + Sonavox Canada Inc. + Stephanie Tedesco + stedesco-rizzo&sonavox.com +44691 + GRUPO ONCOLOGICO COMUNITARIO DE SAN JUAN + JENNIFER OYOLA + oyolajennifer&gmail.com +44692 + Sedus Stoll AG + Eric Michael + ericmichael&sedus.com +44693 + Microwave Photonic Systems, Inc. + Keith Voss + kvoss&b2bphotonics.com +44694 + Adams County Cancer Center + Payal Patel + payalppatel87&gmail.com +44695 + Wolfgang Erlenkoetter IT + Wolfgang Erlenkoetter + wolfgang&erlenkoetter.net +44696 + apl.io + Luke Van Horn + luke&apl.io +44697 + Charles University in Prague, Faculty of Mathematics and Physics + Dan Lukes + netadm&mff.cuni.cz +44698 + Baxter Planning System + Ken Hughes + it&bybaxter.com +44699 + BiLL GmbH + Yvan Zaugg + zaugg&billgmbh.ch +44700 + Kuss IT-Solutions + Reinhard Kuss + office&edv-profi24.at +44701 + Hearsay Social + Adam DePue + opsmgmt&hearsaysocial.com +44702 + Voiceworks B.V. + Oliver Bril + oliver.bril&voiceworks.com +44703 + Danville Hematology and Oncology + Star Reed + starlette.reed&lpnt.net +44704 + BlackSip Development + Jens Sauer + j.sauer&mx.blacksip.de +44705 + Hack12 + Morgan Attias + morgan&hack12.com +44706 + Hostopia.com Inc. + Kelvin Hockin + khockin&hostopia.com +44707 + SAI Technology Inc. + Herman Lee + hlee&saitechnology.com +44708 + Crews Banking corporation + Matt Opalach + Mopalach&crewsbankcorp.com +44709 + Xolphin B.V. + Maarten Bremer + info&xolphin.com +44710 + Ensured B.V. + Maarten Bremer + info&ensured.com +44711 + T-Mobile Nederland BV + Brahim Mouhdi + brahim.mouhdi&t-mobile.nl +44712 + AGAT-System + Ujin Kosy + ujinkosy&gmail.com +44713 + COSMOS-SOFT.net + Masato KISHI + mkishi&cosmos-soft.net +44714 + Transdata + Pavol Podolak + podolak&transdata.sk +44715 + Ezitherm + James Yong + jamesy&ezitherm.com +44716 + University of the Sunshine Coast + Peter Embleton + pembleto&usc.edu.au +44717 + CO-Sol Inc. + Ryota Watabe + ryota.watabe&cosol.jp +44718 + Loews Hotels Inc + Manuel Olivero + HotelItTech&loewshotels.com +44719 + UniversalCard Sibiria LLC + Anton Maltsev + akm&ruspro.info +44720 + Shanghai BEIDIAN Industry Group + Chenliyang + chenly&shbeidian.com.cn +44721 + Okaki Health Intelligence Inc + Wei Tam + wei&okaki.com +44722 + WALLMEDIEN AG + Sven Koepfer + iana&wallmedien.de +44723 + Unassigned + Removed 2014-12-16 + ---none--- +44724 + LLC "ITTech" + Andrey Kravchenko + kravchenko&i-t-tech.ru +44725 + BADTRONIC sarl + JEROME MSIKA + badtronic&badtronic.fr +44726 + Radionika Sp. z o.o. + Aleksander Piwkowski + aleksander.piwkowski&radionika.com +44727 + Y2S Corporation + Yuji Kanematsu + iana&y2s.net +44728 + I3 CZ s.r.o. + Tomáš Brandýský + brandysky&i3.cz +44729 + Fixmon + Smirnov Alexey + chemist&haskell.su +44730 + Elkomtech S.A. + Tomasz Biczyński + tomasz.biczynski&elkomtech.com.pl +44731 + Palestine Monetary Authority + Hani Amer + smart&pma.ps +44732 + OOO bella-Don + Vladislav Spivak + spivak&bella-don.ru +44733 + Forbes Technosys Ltd + Mahesh Kulkarni + mahesh.kulkarni&forbestechnosys.com +44734 + Menlo Security + Todd Ignasiak + info&menlosecurity.com +44735 + Electronic Commerce Inc. + Don Magee + dmagee&ecipay.com +44736 + JERRA Soft GmbH + Mr. Sascha Poss + sascha.poss&jerra.de +44737 + Raindrop Laboratories (Alan Batie) + Alan Batie + hostmaster&rdrop.com +44738 + ASUSTOR Inc. + James Su + jamessu&asustor.com +44739 + Miele & Cie. KG + Rolf Berenbrinker + rolf.berenbrinker&miele.de +44740 + Innovapuglia S.p.A. + Nicola Saracino + n.saracino&innova.puglia.it +44741 + IAV GmbH + Karsten Prejawa + iana-oid&iav.de +44742 + Aunigma Network Security Corp. + Karl Elliott + kelliott&aunigma.net +44743 + POMA + James PARADON + it_poma&poma.net +44744 + Scanline VFX LA Inc. + Viet Nguyen + tech&scanlinevfx.com +44745 + Tremor Video + Presley Acuna + pacuna&tremorvideo.com +44746 + Sea Surveillance AS + Roman Efanov + re&seasurv.net +44747 + VoiceTrust + Jan Nitecki + jnitecki&voicetrust.com +44748 + Wild Kilt Pty Ltd + Tommy Braas + tommy&wildkilt.com +44749 + Malwarebytes Corporation + David Cahill + dcahill&malwarebytes.org +44750 + Edifecs Inc + Vik Sachdev + viks&edifecs.com +44751 + Tell International Inc. + Rabih Ismail + rismail&tell-inc.com +44752 + Quamatik doo + Gustavo Perez + gustavo.perez&quam.hr +44753 + UniQ-CA B.V. + Frans Bolk + frans.bolk&uniq-id.com +44754 + Patent Agency Tegas Llc + Roman E. Paliy + info&pategas.com +44755 + Online Development Inc. + Jim Webber + jwebber&oldi.com +44756 + AgNO3 GmbH & Co. KG + Moritz Bechler + bechler&agno3.eu +44757 + ANIMO LIMITED. + Yoshimichi Akiyama + yakiyama&animo.co.jp +44758 + ACTRONICS K.K. + Katsumi Sasaki + system&actronics.co.jp +44759 + Turbo Systems Co., Ltd. + Hiroyuki Kobayashi + koba&turbosystems.co.jp +44760 + Vsoft Colombia Ltda. + Jonathan Obando + jobando&pcsistel.com +44761 + VoxIdea + Jonathan Rozo + jonathan&voxidea.com +44762 + IBS PROJECT LLC + Talip Ilyassov + talip.ilyassov&ibsproject.kz +44763 + Steven Sloane + Steven Sloane + oid-admin&1e303.uk +44764 + bacq.ru + Andrey Borisenko + admin&bacq.ru +44765 + PTL Limited + Denis Vella + netadmin&ptl.com.mt +44766 + Katao Ops + Sven Bender + mail&svenbender.de +44767 + Tieto Sweden AB + Anders Strandberg + anders.strandberg&tieto.com +44768 + Grayson bvba + Wim Coulier + wim&coulier.org +44769 + Tsinghua University + Changqing An + acq&tsinghua.edu.cn +44770 + NYNEX satellite OHG + Frederik Kriewitz + f.kriewitz&nynex.de +44771 + Apsidis + Maniel Sotomayor + maniel.sotomayor&gmail.com +44772 + West Genesee Central School District + Art LaFlamme + helpdesk&westgenesee.org +44773 + Sistemas de Seguridade A1 + Mario Rial Amado + mario&seguridadea1.com +44774 + Alameda Health Services + LouAnn Raucher + lraucher&alamedahealthsystem.org +44775 + City Network International AB + Joel Svensson + joel.svensson&citynetwork.eu +44776 + Hedberg Produtions + Johan Hedberg + johan&hedbergproductions.com +44777 + MEAZON SA + Stelios Koutroubinas + s.koutroubinas&meazon.com +44778 + thinkindifferent.net + lily wilson + hotaru&thinkindifferent.net +44779 + Unity Health Insurance + Jeremy Hermsdorf + netadmin&unityhealth.com +44780 + BikePics + Denis Markwell + help&bikepics.com +44781 + Connetos Company + Luo Feng + tech&connetos.com +44782 + Xiamen Kehua Hengsheng Co.,Ltd + Kewen Fu + kecan&kehua.com +44783 + Push To Talk International Ltd + Kevin Broadey + hostmaster&ptti.co.uk +44784 + Hitzigrath + Mischa Hitzigrath + mischa&hitzigrath.de +44785 + IS2 Intelligent Solution Services AG + Florian Wöhrl + admin&is2.de +44786 + Stadtwerke Jena GmbH + Bjoern Wever + dv&stadtwerke-jena.de +44787 + Corporate Business Systems LLP. + Nickolay Krivtsov + nkrivtsov&cbs.kz +44788 + Goethe-Institut e. V. + Benjamin Bauer + benjamin.bauer&goethe.de +44789 + Sarah Cannon Research Institute + Nicholas Breaux + nicholas.breaux&sarahcannon.com +44790 + Ingenium Automation + Engineering Pte Ltd + Kannan Balaji + balaji&ingeniumiae.com +44791 + Chengdu Global Capsheaf Solution Technology Co., Ltd. + Zhou Ji + jo121fox&gmail.com +44792 + AzuriteUK (Valerij Prusakov) + Valerij Prusakov + valerkafromuk&yahoo.co.uk +44793 + GAMING1 SPRL + DONEA Bertrand + bertrand.donea&gaming1.com +44794 + Chickasaw Holding Company + Billy Staggs + ctcadmins&chchq.net +44795 + Tridigy, LLC + Laimonas Slecpenka + iana&tridigy.com +44796 + SFO Technologies Pvt. Ltd. + Dileep Kumar S. + dileepkumar.s&nestgroup.net +44797 + Stoat Works + Martin P. Hellwig + martin&stoatworks.com +44798 + TechsMix LLC + Jared Ledvina + jared&techsmix.net +44799 + audioalgorithms + Jakob Ashtar + info&audioalgorithms.com +44800 + it7 s.r.o. + Lukas Stana + it7&it7.sk +44801 + Sole proprietorship Ilya Gruzinov + Ilya Gruzinov + ilya.gruzinov&gmail.com +44802 + IT-Labor + tom ate + ich-hab-was-gefunden&web.de +44803 + Whatson-Web + Jordi Grimaud + jordi.grimaud&gmail.com +44804 + FCM TECHNOLOGY S.r.l. + Fabio Cateno Burgarello + fcmtechnology&gmail.com +44805 + Sea Island Cancer Center + Terrea R. Parker + cancercenter&embarqmail.com +44806 + Universidade Federal da Integração Latino-Americana + Lucas Guilherme Diedrich + lucas.diedrich&unila.edu.br +44807 + Infor*Med + Roberto Alfonso + roberto.alfonso&praxisemr.com +44808 + tsblog.org + Tone Southerland + tone&s6mail.net +44809 + HealthStream, Inc. + Michael Lopez + michael.lopez&healthstream.com +44810 + arales.org + Yoshiaki UCHIKAWA + yoshiaki&arales.org +44811 + Funambolo Technologies Private Limited + K. Shahanawaz Khan + kskhan&funambolo.co.in +44812 + JSC "Institute of Automation of Energy Systems" + Alexey Petrov + alexey&iaes.ru +44813 + Beijing Easynetworks Technology Co.,Ltd. + Charles Wang + 13910122958&126.com +44814 + Genexyx Srl + Damiano Scrigni + admin&genexyx.com +44815 + Mauro Altamura + Mauro Altamura + mauro&mauroaltamura.com +44816 + South Carolina Oncology Associates + Wade Bonner + wbonner&sconcology.net +44817 + Idaho Division of Veterans Services + Clinton Dale + clinton.dale&veterans.idaho.gov +44818 + CADMO CONOCIMIENTO S.L. + Antonio Roncero + soporte&cadmoweb.com +44819 + Store Electronic Systems S.A. + Tarik FILALI Ansary + tarik.filali&ses-esl.com +44820 + Chlorine Hilarva + Manuel Lanctot + sensoz&gmail.com +44821 + Beijer Electronics AB + Niklas Sjöström + niklas.sjostrom&beijer.se +44822 + DC Matrix Internet S/A + Danilo Bento + danilo.bento&matrix.net.br +44823 + Airborne Interactive + Ian Walberg + ian.walberg&airborne.aero +44824 + Opravil + Jan Opravil + jan.opravil&opravilovi.com +44825 + Lautersoft + Rainer Lauterbach + rainer.lauterbach&lautersoft.de +44826 + Fire Protection Service Corporation + Andrew T. Fry + oid&kencosecurity.com +44827 + epcan GmbH + nils waning + nw&epcan.de +44828 + SilverNet Ltd. + Paul Ryan + pryan&silvernet.com +44829 + Ganymed Pharmaceuticals AG + Dr. Bastian Holthöfer + it&ganymed.ag +44830 + Trio Motion Technology Ltd + Anthony Matthews + tmatthews&triomotion.com +44831 + Nuvotex Solutions GmbH & Co KG + Daniel Nachtrub + dn&nuvotex.de +44832 + Product Ventures Ltd + Christopher Smith + administrator&productventures.com +44833 + peteheilig.com + Pete Heilig + pete&peteheilig.com +44834 + SYSTEMS AND TECHNOLOGIES, ZAO + Victor Sizov + veector&mail.ru +44835 + Oncology & Hematology Consultants of Houston + Nabeel Shalan + nabeel_shalan&yahoo.com +44836 + Breqwatr, Inc. + Jason Harley + jason.harley&breqwatr.com +44837 + Absec Malaysia Sdn Bhd + Mohd Khalemi + khalemi&absecmy.com +44838 + chinasoft-tokyo corporation + xu kebin + xukebin&chinasoft-tokyo.co.jp +44839 + Mega Designs Pvt. Ltd. + Soomit Banerjee + soomitb&gmail.com +44840 + Intrasonics Ltd + Paul Trinder + itadmin&intrasonics.com +44841 + Lyngmo.Net + Runar Lyngmo + ianapen&lyngmo.net +44842 + Fairbanks North Star Borough School District + Robert Hingst + robert.hingst&k12northstar.org +44843 + Hasin Technology + Mohammad Banisaeid + banisaeid&hasintech.com +44844 + Nobel Biocare Services AG + Jacopo Fumagalli + info.switzerland&nobelbiocare.com +44845 + Dillert + Joerg Dillert + jdi&gmx.net +44846 + SuperNetwork s.r.o. + Marek Becka + marek.becka&superhosting.cz +44847 + Yuri Bugelli + Yuri Bugelli + yuri.bugelli&gmail.com +44848 + COSYLVAL + RUFFIE Marc + marc.ruffie&cosylval.fr +44849 + Five Below, Inc. + Joshua Cloud + oid&fivebelow.com +44850 + United States Antarctic Program + Kristofer Boyd + kristofer.boyd.contractor&usap.gov +44851 + WINK Streaming + Michael McConnell + michael&winkstreaming.com +44852 + Energy Communications Management Exchange, sub-division of Future DOS Research & Development Inc. + Avygdor Moise + info&ecmx.org +44853 + KVL COMP Kft. + Mátyás Stubeczky + matyas.stubeczky&kvlcomp.hu +44854 + linmaonline.de + Markus Fluer + webmaster&linmaonline.de +44855 + h-dy's Home + Dingyuan Hu + dingyuan&h-dy.org +44856 + Art Developers Corporation + Koichi Taga + iana-regist&artdev.co.jp +44857 + Broadsound Corporation + Steve Yao + steveyao&broadsound.com.tw +44858 + Elkjøp Nordic AS + Maqsood Ali Bhatti + maqsood.ali.bhatti&elkjop.no +44859 + Mobik d.o.o. + Gašper Drolc + gasper.drolc&mobik-ics.com +44860 + Emanomedia GmbH + Alexander Trapp + alexander.trapp&emanomedia.com +44861 + Catoctin Systems + Jason Callaway + admin&catoctinsystems.com +44862 + IT SYSTEMS srl + Max Vegni + m.vegni&itsystems.it +44863 + Unitow Services (1978) Ltd. + Gerald Chudyk + gchudyk&ekotech.com +44864 + Bethel Blood and Cancer Center + Melissa Gutierrez + melissa.bbcc&outlook.com +44865 + Bay Area Regional Medical Center + Mark Albright + malbright&barmc.us +44866 + Benedikt Frenzel Systems + Benedikt FRENZEL + pen&bfr.name +44867 + Etic Telecom + Mickael Chazaux + mickael.chazaux&etictelecom.com +44868 + Earthling Interactive + Dan Stevens + stevens&earthlinginteractive.com +44869 + Zen Solutions Limited + Colin Foster + who.is&zen.net.nz +44870 + JSC "SKB Electronmash" + Mamut Naum Ilyich + skbelectronmash&ukr.net +44871 + Universidad de Cuenca + María José Torres Maldonado + mariajose.torres&ucuenca.edu.ec +44872 + FiberLabs Inc. + Yoshinori Mimura + oidadmin&fiberlabs.co.jp +44873 + CJSC «INTERSET» + Andrey Tumanian + atfreebox&gmail.com +44874 + Instytut Matematyczny Polskiej Akademii Nauk + Tomasz Zajdel + tzajdel&impan.pl +44875 + Reykjavikurborg + Tomas Gudmundsson + hostmaster&reykjavik.is +44876 + Hrvatska poštanska banka, dioničko društvo + Marijan Žužić + marijan.zuzic&hpb.hr +44877 + IMMA Inc. + Svyatoslav Noskov + snoskov&imma.ru +44878 + Datcent Technology Co.,Ltd. + ShuQiang Li + lishuqiang&datcent.com +44879 + Cunz RaD Ltd. + Sascha Cunz + sascha&cunz-rad.com +44880 + Harte & Lyne Limited + James B. Byrne + support&harte-lyne.ca +44881 + ikara + Koji Araki + araki.koji&ikara.jp +44882 + Techno-com + Maxim Melnikov + melnikovmv&bk.ru +44883 + Iw1BI + Domenico Casarino + domenico.casarino&gmail.com +44884 + OneVision Software AG + Christian Zietlow + it&onevision.com +44885 + Baltech AG + Robert Hölzl + robert.hoelzl&baltech.de +44886 + Gainspeed, Inc. + Ram Prasad + ram&gainspeed.com +44887 + The Kansas City Southern Railway Company + Michael Chung + KCSInfoSec&KCSouthern.com +44888 + klolik.org + Bartłomiej Korupczyński + klolik79&gmail.com +44889 + Denver Public Library + Heath Young + hyoung&denverlibrary.org +44890 + BKtel Photonics SAS + Cyril Le Goas + legoas&bktel.com +44891 + Xero + David Garner + david.garner&xero.com +44892 + Primum Health IT S.L. + David de Mena García + info&primum.es +44893 + Software Diversions, Inc. + S. Colin Leister + colin&softwarediversions.com +44894 + Napatech + Alex Agerholm + aa&napatech.com +44895 + Bitcraze AB + Arnaud Taffanel + arnaud&bitcraze.se +44896 + Secureloin + Teguh P. Alko + chain&rop.io +44897 + Vista Oncology + Callie Hills + callieh&vista-oncology.com +44898 + Insignis + Roberto da T. Goncalves + roberto.goncalves&insignis.com.br +44899 + Code L7 + Alvin Garcia Chaves + alvin.garcia&code-l7.co +44900 + ReminderMedia + Information Technology + systems&remindermedia.com +44901 + Xiamen Sunwe Technology CO.,Ltd + Decai Zhang + zhangdc&china-sunwe.com +44902 + ShareTech Information Co., LTD. + Lois Hsieh + ting&sharetech.com.tw +44903 + Stratech Systems Limited + Nurra Trog + nurra_farhana_mahat&StratechSystems.com +44904 + Media Stream Co., LTD. + Kevin Lee + gklee&media-stream.co.kr +44905 + NightTrade Group, Inc. + Martin Estok IV + martin.estok&nighttrade.org +44906 + TBDD, LTD + Aleksandr Vavilin + vavilin&tcobdd.ru +44907 + nVisionIT Pty Ltd + Stephen Jordaan + stephenj&nvisionit.co.za +44908 + CARTIERE DEL GARDA + Tonidandel Flavio + flavio.tonidandel&lecta.com +44909 + sys-pro GmbH + Joachim Adler + joachim.adler&sys-pro.de +44910 + ST-Vitrinen Trautmann GmbH & Co. KG + Arne Schlueter + schlueter&st-vitrinen.de +44911 + Alpha ENGINEERING Tunisia + Taher AMMARI + taher.ammari&alpha-engineering.net +44912 + 2Be-FFICIENT + Haga Rakotoharivelo + haga&2befficient.fr +44913 + RedSocks B.V. + Rick Hofstede + rick.hofstede&redsocks.nl +44914 + Genting Casinos UK + Iain Smith + iain.smith&gentinguk.com +44915 + Silex Industrial Automation Ltd. + Márton Ughy + marton.ughy&silex.hu +44916 + MJog Limited + Mark Howells + mark.howells&mjog.com +44917 + LMpro GmbH + Bernd Ahlfeld + bahlfeld&lmpro.de +44918 + Nethix S.r.l + Andrea Girardi + a.girardi&nethix.com +44919 + Avisi B.V + Jeroen Veldhorst + j.h.veldhorst&avisi.nl +44920 + Argyle Security Inc + Neal Horman + nhorman&argylesecurity.com +44921 + Exele + Dane Overfield + dane&exele.com +44922 + Echo Technology Solutions + Sejo Jahic + support-echots&echots.com +44923 + Remco ICT + R. Peters + remco&remco-ict.nl +44924 + Open Connectivity Foundation (formerly 'AllSeen Alliance, Inc.') + Aja Murray + staff&openconnectivity.org +44925 + East Kent Hospitals University Foundation Trust + Mark Williams + mark.williams28&nhs.net +44926 + Amdocs Network Solutions + Christopher Coote + christopher.coote&amdocs.com +44927 + SyS LLC. + Sergey Randitskiy + rsa&systemco.ru +44928 + Caligare, s.r.o. + Jan Nejman + nejman&caligare.com +44929 + NetKnights GmbH + Cornelius Kölbel + cornelius.koelbel&netknights.it +44930 + Fisher Investments + Security Administrator + security.administrator&fi.com +44931 + SyncWise Systems & Technology + Aurel Chiriac + aurel.chiriac&syncwise.ro +44932 + Panasonic System Communications Company of North America + Ildar Nizami + ildar.nizami&us.panasonic.com +44933 + In Ardua Petit + Tim Hedtke + thedtke&inarduapetit.org +44934 + G-RAY + heleihua + leihua_he&ultrasilicom.com +44935 + Digital Telecommunication Systems WA PTY LTD + Justin Swain + justin_swain&iinet.net.au +44936 + Stratech iVision Pte. Ltd + LOW CHENG SENG + LOW_Cheng_Seng&stratechsystems.com +44937 + Inoventica technologies + Mike Kogan + regs&inoventica-tech.ru +44938 + Charité - Universitätsmedizin Berlin + Michael Schieke + michael.schieke&charite.de +44939 + AVA-PETER Ltd. + Pavel Finogeev + finogeev-pk&avaclinic.ru +44940 + Nemiroff + Aleksey Rud + aleksey.rud&nemiroff.pro +44941 + GCP German Cathodic Protection GmbH & Co. KG + Torsten Krebs + krebs&gcp.de +44942 + Stadt Duelmen + Peer Wulff + it&duelmen.de +44943 + Inlink OÜ + Ilvar Oja + iana&inlink.ee +44944 + Rock Flow Dynamics + George Zhilyakov + george.zhilyakov&rfdyn.com +44945 + TEKTELIC Communications Inc. + David Tholl + dtholl&tektelic.com +44946 + Danish Railway (DSB) + Finn Fleron + IT-Infrastruktur&dsb.dk +44947 + Internet Security Research Group + Josh Aas + josh&letsencrypt.org +44948 + Pacific Institute for the Mathematical Sciences + Ian Allison + iana&pims.math.ca +44949 + NJK Aviation AG + Nicholas John Koch + koch&njk.aero +44950 + automation-engineering + Dmitriy Murashov + dmitriy.murashov&automation-engineering.ru +44951 + SSR Engineering, Inc. + Kevin Miller + kmiller&ssreng.com +44952 + AC/ BC Electrical Company + Bob Crnogorac + bobacbc&rogers.com +44953 + Flat World Resource Technologies Limited. + Stephen Van + stephen1w&yeah.net +44954 + Keweon + Torsten Jahnke + keweon&outlook.com +44955 + Connexo Tecnologia e Comunicação EIRELI + Hely Firmino de Sousa + hely.sousa&connexo.com.br +44956 + Savvi Inc. + Jay Fenton + ops&savvi.io +44957 + Robotron Datenbank-Software GmbH + Marco Friebe + marco.friebe&robotron.de +44958 + mse GmbH + René Büdinger + rbuedinger&mse-gruppe.de +44959 + Carl Berberich GmbH + Colin Rust + colin.rust&berberich.de +44960 + Craltech Electrónica, S.L. + Cristobal Campoy + soft&craltech.com +44961 + Valley Medical Center, PLLC + Jake Hinkelman + internalit&valleymedicalcenter.com +44962 + Foreman Instrumentation And Controls + Kevin Kirmse + kdkirmse&syntemp.com +44963 + gueux.org + Félix Sipma + admin&gueux.org +44964 + Vivid Orange Limited + Matthew Keay + matthewkeay&vividorange.co.uk +44965 + pascher.IT + Peter Pascher + info&pascher.it +44966 + Jingtu Printing Systems Co., Ltd + Zongxiong Nishino + hzx&jtprint.cn +44967 + Beijing Heweinet Technology Co.,Ltd (北京和维网通科技有限公司) + Chen Xuedong (陈学栋) + chenxd&heweinet.com +44968 + May China + Scott Flowers + scott.flowers&gmail.com +44969 + CLINICA DE CANCER Y ENFERMEDADES DE LA SANGRE, CSP + MARIA DEL C. BLAS + clinicadecancerdrvazquez&hotmail.com +44970 + Thread Group, Inc. + Martin Turon + mturon&nestlabs.com +44971 + Compen Embedded Software + Pieter Compen + pieter&compen.net +44972 + Alameda Hospital + LouAnn Raucher + lraucher&alamedahealthsystem.org +44973 + Kantar Media France SAS + Tanguy MILLET + tanguy.millet&kantarmedia.com +44974 + Sonos, Inc. + Jonathan Wenocur + jonathan.wenocur&sonos.com +44975 + FlexCoders Ltd + Steven David West + iana&flexcoders.co.uk +44976 + Onegini B.V. + Stein Welberg + developers&onegini.com +44977 + Reduxio Systems + Jacob Cherian + standards&reduxio.com +44978 + Telefónica Soluciones de Criptografía, S. A. + Luis Recuerda Santiago + luis.recuerdasantiago&telefonica.com +44979 + Tsmart Inc. + Ahn, Byung-Keun + bkahn&tsmartall.com +44980 + MEDiA LiNK Co.,Ltd. + Yukihiro Taniguchi + yukihiro.taniguchi&medialink-ml.co.jp +44981 + LLC "PromIT" + Gluhov Alexey + personal&promit-ek.ru +44982 + INNEXIV, INC. + Muhammad Hassan + mhassan&innexiv.com +44983 + ARTIS GmbH + Dirk Lange + dirk.lange&artis.marposs.com +44984 + EPAM Systems + Alexander Aleksiuk + aliaksandr_aleksiuk&epam.com +44985 + Tecnologias Corporativas SC + Jonathan Salazar Santos + jonanx779&hotmail.com +44986 + Taglio LLC + GW Habraken + gw&tagliosc.com +44987 + Temple University + Jorj Bauer + jorj&temple.edu +44988 + Nxtera Ltd + David Adcock + dave.adcock&nxtera.com +44989 + Multi Parts Supply USA, Inc. + Nathan Brown + nathan.brown&multiparts.net +44990 + TONGFANG INDUSTRIAL CO.,LTD + li dong + zdyzlidong&126.com +44991 + Ameriprise Financial + Robert Buerck + Robert.L.Buerck&f.com +44992 + Amscreen Group Limited + James Denkaat + jdenkaat&amscreen.co.uk +44993 + BCP Co. + vahid gilassi + vahidgilassi&yahoo.com +44994 + Umbrellium Ltd. + Samuel Mulube + sam&umbrellium.co.uk +44995 + Regional Cancer Care Associates, Little Silver Division + Debbie Wolf + dmayo&hemoncnj.com +44996 + JMP Group Inc. + Charles Yao + cyao&jmpg.com +44997 + Emery Telcom + Josh Galvez + noc&emerytelcom.com +44998 + Sichuan Zero Gravity Technology CO.LTD + Yu HAN + hy&zg.hk +44999 + Ltd GoldLans + Andrii Bratanich + andrii.bratanich&gmail.com +45000 + Symbicon Ltd + Kalle Heikkinen + kalle.heikkinen&symbicon.fi +45001 + Northern Virginia Hematology Oncology Associates + Ronnie Owen + nurse&novahemonc.comcastbiz.net +45002 + CompuGroup Medical Polska sp. z o.o. + Dariusz Trochimiuk + netraad&cgmpolska.pl +45003 + Egag, LLC + Gage Hainer + ghainer&egagllc.com +45004 + SMARTRAC N.V. + Sidnei Moreira + iana-pen&smartrac-group.com +45005 + Advanced Alloy Processing + James Wazorick + jwazorick&maincompanies.com +45006 + Wuestenrot poistovna, a.s. + Ladislav Schramm + uit&wuestenrot.sk +45007 + Trial Software Laboratories, Inc. + Nobuhiro Haketa + iana_pen_registry_contact&trialsoftware.com +45008 + Things.Expert LLC + Daniel Tibor Fuchs + daniel.fuchs&things.expert +45009 + Thomas Hassall Anglican College + Werner Kasselman + wk&thac.nsw.edu.au +45010 + Youhua + Nision Wu + wujunjie&youhuatech.com +45011 + Ming Dynasty AvantLexa Firm + James Kendrick Sneckner + jamesthinkpad&gmail.com +45012 + Miraeson + SangHyun Baek + billy&miraeson.com +45013 + Beijing Gehua CATV Network Co.,Ltd. + Dongxi Wu + wudongxi&bgctv.com.cn +45014 + Andra sp. z o. o. + Piotr Szczerek + snmp&andra.com.pl +45015 + foo.sh + Timo Mäkinen + iana&foo.sh +45016 + Kouba & Partner + Martin Kouba + iana&kouba.at +45017 + I3 Comércio de Computadores Ltda Epp + Paschoal Mazziero Neto + paschoal&i3si.com.br +45018 + QualiStream + Romary Sonrier + rsonrier&qualistream.com +45019 + Infinite AI, Inc. + Derek Knutsen + dknutsen&infiniteai.com +45020 + Bitnasium Co., Ltd. + Utthawit Ratchatapoom + utthawit&gmail.com +45021 + Health Plan Partners LLC + Robert Butler + robert.butler&hpprx.com +45022 + digivod gmbh + Karsten Fourmont + k.fourmont&digivod.de +45023 + Chair Four Development Group LLC + Daniel Sutcliffe + dans&chairfour.com +45024 + BTS Software Solutions, LLC + Erich Izdepski + erich.izdepski&bts-s2.com +45025 + Fujitsu Hokuriku Systems Limited + Takiguchi Naruhito + takiguti&jp.fujitsu.com +45026 + Mynavi Corporation + Hiroaki Suzuki + suzuki.hiroaki&mynavi.jp +45027 + The Center For Mental Health + Jason Adragna + it¢ermh.org +45028 + Debugo + Nicolas CHAUVENET + nicolas&debugo.fr +45029 + Fengyz Technology CO.,Ltd. + Yuanzhen Feng + fengyuanzhen.osb&vsc.com +45030 + Lagopus switch project + Yoshihiro Nakajima + ynaka&lagopus.org +45031 + Engineering Centre Energoservice LLC + Vladimir Bovykin + f.orlov&ens.ru +45032 + JustNow + Jordi Cabré + jc.justnow&gmail.com +45033 + synyx GmbH & Co. KG + Marc Sommer + sommer&synyx.de +45034 + Millimetrica s.r.l. + Aldo Cagno + a.cagno&millimetrica.it +45035 + TelSIP Communication + Benoit Raymond + braymond&telsip.ca +45036 + NIC Hosting + Juraj LUTTER + juraj&lutter.sk +45037 + Hauke Bartsch + Hauke Bartsch + hbartsch&ucsd.edu +45038 + Enman + Emmanuel Poitier + contact&enman.fr +45039 + Central Bank of the Republic of Azerbaijan + Vahid Gurbanli + vahid_gurbanli&cbar.az +45040 + BBH Media + Kris Reeves + kris.re&bbhmedia.com +45041 + Vieira Filho Tecnologia Eletronica + Paulo Steigleder + paulo.steigleder&vieirafilho.ind.br +45042 + Officeworks Ltd + Stephen Hart + shart&officeworks.com.au +45043 + Argus Hosting, LLC + Derick Pelser + tech&arguscloud.net +45044 + TangoRythm + Bilel Hawari + bilel&tangorythm.com +45045 + DELNET + Jacques DELMAS + officiel&delnet.fr +45046 + lpk + Lutz-Peter Kurdelski + lutz-peter&kurdelski.com +45047 + Crypto Software Chryselius + Toralf Chryselius + toralf&chryselius.de +45048 + MARIA - DEBORA + Łukasz Piotrowski + lpiotrowski&objectivcode.com +45049 + Gudok LLC + Michael Dokolin + mail&ipgudok.com +45050 + Bjørn Ludvig Langaas Johansen + Bjørn Ludvig Langaas Johansen + iana-pen&bjornj.no +45051 + Finalsite + Carl P. Corliss + carl.corliss&finalsite.com +45052 + SequoiaDB Ltd. + Tao Wang + taoewang&sequoiadb.com +45053 + Information and Telecom Systems India Pvt. Ltd. + Milind Pandharipande + pmilind&info-telecomsys.com +45054 + Tony Ditchfield + Tony Ditchfield + tony.ditchfield&gmail.com +45055 + ISAE + David MOUCHOIR + david.mouchoir&isae.fr +45056 + Kassenärztliche Vereinigung Schleswig-Holstein + Sven Grünewald + sven.gruenewald&kvsh.de +45057 + COPALP + Jerome FOLLUT + jf&copalp.com +45058 + CCP Games + Sturla Thorsson + sturla&ccpgames.com +45059 + Bauduin Raphaël + Bauduin Raphaël + rb&raphinou.com +45060 + Otis R Bowen Center for Human Services, Inc + Joseph Van Overberghe + joe.vanoverberghe&bowencenter.org +45061 + Gymnasium Geretsried + Wild Axel + Axel.Wild&gymger.de +45062 + Southern Cancer Center + Lauren Pettis + lauren.pettis&southerncancercenter.com +45063 + SEAtech Technology Inc + Pengfei Hu + hupengfei&seatech-global.com +45064 + China Mobile Hong Kong Company Limited + Komix Hui + komixhui&hk.chinamobile.com +45065 + Insyde + Y.C. Lin + yc.lin&insyde.com +45066 + SIP Cantabria S.L. + Luis Carlos Peña Bocos + luis&sipcantabria.net +45067 + Dude Solutions Inc. + Wes Carver + IANAAdmin&dudesolutions.com +45068 + PROTECH SYSTEMS PRIVATE LIMITED + ATUL BAL + protech&aprotech.com +45069 + TOMinf Tomasz Tomaszewski + Tomasz Tomaszewski + tominf&tominf.com +45070 + Reservoir Labs, Inc. + Nicole Leung + leung&reservoir.com +45071 + tetraguard systems GmbH + Stefan Kistner + skistner&tetraguard.de +45072 + Catalyst Paper + Karim Dhanani + karim.dhanani&catalystpaper.com +45073 + Pacific Seafood Group + Brent Glasgow + oid&pacseafood.com +45074 + TTSSB corp + Balaji Sunder Rajan + bsfrc_2000&yahoo.com +45075 + Electron Jungle, LLC + Kevin Wagner + iana&electronjungle.com +45076 + HamLogin.com + Tyler Sarna + hamlogin&hamlogin.com +45077 + Sestek Ses ve Iletisim Bilgisayar Teknolojileri San. ve Tic. A.S. + M. Levent Arslan + levent.arslan&sestek.com +45078 + QoS Solutions + Jeffrey Thomas + jthomas&qos-solutions.com +45079 + i4p informatikai kft. + Rózsahegyi Zsolt + info&i4p.com +45080 + Kerio Technologies, Inc. + Martin Meduna + hostmaster&kerio.com +45081 + Increase Qingdao Information Technology Co., Ltd. + Zhengxin Sun + sunzx&qdincrease.com +45082 + PT. Tekno Inovasi Asia + Rachmat Kusnadi + rachmat.kusnadi&teknoasia.com +45083 + Intility AS + Arne Klæboe + arne.klaeboe&intility.no +45084 + Arbeitsgemeinschaft Dresdner Studentennetz + Sebastian Schrader + software&wh2.tu-dresden.de +45085 + Bibliotheca Alexandrina + Mohamed Ellotf + Sysadmin&bibalex.org +45086 + Advanced Imaging Technologies (Pty) Ltd + Jacques Basson + basson&ait-sa.com +45087 + Institut Catala de Finances + Josep Maria Guiu Baiget + sistemes&icf.cat +45088 + Volkswagen Møller Bilfinans AS + Jan-Fredrik Kleven + jfk&vwfs.no +45089 + ICANN + Naela Sarras + naela.sarras&icann.org +45090 + Mushroom Networks, Inc. + Cahit Akin + hakin&mushroomnetworks.com +45091 + Cisco Sera + J. Huxley Barbee + barbee&ciscosera.com +45092 + Silectica + Eddie Luaces + eddie.luaces&silectica.com +45093 + SUMOMO Computer Association + Randy Li + tp&soulik.info +45094 + UAB "VAATC" + Darius Palšis + dariusp&vaatc.lt +45095 + INAF - Osservatorio Astrofisico di Arcetri + Francesco Tribioli + tribioli&arcetri.inaf.it +45096 + INTERNATIONAL MEDICAL SOLUTIONS INC. + VITTORIO ACCOMAZZI + vaccomazzi&imstsvc.com +45097 + Graphite Systems, Inc. + Mark Himelstein + mark&graphitesystems.com +45098 + Ex Cathedra Solutions Ltd. + Stephen Durbin + noc&excathedra.co +45099 + Pentaho Corporation + Ryan Schoeffler + rschoeffler&pentaho.com +45100 + Cloud Vision Networks Technology Co.,Ltd. (cvn) + William Yu + yweiquan&cvnchina.com +45101 + Yew Tree Services, Inc. + Ronaldo Chan + loloski&yahoo.com +45102 + Yawns.com Limited + Kevin Iddles + domain.admin&yawns.com +45103 + Satel Spain, S.L. + Javier de Elías + javier.elias&satelspain.com +45104 + infoworxx GmbH + Sebastian Lemke + s.lemke&infoworxx.de +45105 + Samsung Poland R&D Center + Sebastian Cis + s.cis&samsung.com +45106 + Subsentio + Shawn Hannon + Shawn.Hannon&subsentio.com +45107 + IntegraOptics Inc + Jean Sullivan + jsullivan&integraoptics.com +45108 + Jetmobile + Jean-Francois d'Estalenx + webmaster&jetmobile.com +45109 + Chaos Labs OOD + Trayan Azarov + trayan.azarov&chaoslabs.bg +45110 + IzumoBASE, Inc. + Tomohumi Mutuda + mutuda&izumobase.com +45111 + Elbrus-RW + Alex Kushick + alex.kushick&elbrus-r.com +45112 + Christian Wilhelmi + Christian Wilhelmi + christian.wilhelmi&chriwi.net +45113 + OPTIMANS + Christopher Niznik + support&optimans.com +45114 + PALS Elektronik + Cihat Aydemir + cihat.aydemir&pals.com.tr +45115 + Ajenti Solutions + Faadiel Hendricks + faadiel.hendricks&gmail.com +45116 + LoJack Corp. + Pedro Pinheiro + ppinheiro&lojack.com +45117 + University of North Carolina at Charlotte + David McIntosh + iam-group&uncc.edu +45118 + ehaaihee + Michal Golebiowski + michal.golebiowski&hotmail.co.uk +45119 + SBONGILE PROJECT + jose manuel ernesto chunguane + suparsquare&outlook.com +45120 + iba AG + Manuel Koenig + manuel.koenig&iba-ag.com +45121 + Hangzhou Guangwei Technology Co., Ltd + Lizhong Liu + lizhong.liu&hotmail.com +45122 + M & T Bank Corporation + Jack Stewart + jpstewart&mtb.com +45123 + Suzhou Industrial Park Kejia Automation Co., Ltd + Salina Yu + salina&kejia.com +45124 + De Haan + Martijn de Haan + mdehaa&hotmail.com +45125 + Netcom Europa S.L. + Mario Emmanuel + info&netcomeuropa.com +45126 + Sarada Gummadi MD PA + Sarada Gummadi + vsarada&hotmail.com +45127 + ProfitBricks GmbH + Jan Krause + jan.krause&profitbricks.com +45128 + Infotrage Limited + Petr Kutalek + operations&infotrage.com +45129 + BID Initiative Tanzania - PATH + Henry Mwanyika + hmwanyika&path.org +45130 + r3k + Markus Witt + pen&feuerrot.org +45131 + Onelogin, Inc. + Christian Pedersen + ops&onelogin.com +45132 + ADD-Engineering BV + Ed Gouweloos + pen.gouweloos&add.nl +45133 + Kristian Söderholm + Kristian Söderholm + kristian&codepusher.org +45134 + XCore Company Limited + Naveed Alam Khan + pathan.pk&hotmail.com +45135 + AQUASOFT spol. s r.o. + Radek Nemec + r.nemec&aquasoft.eu +45136 + e-Cop Pte Ltd. + Albert Lim + albert.lim&e-cop.net +45137 + Nanoha Project + Naoyuki MORITA + nanoha&saigyoji.sakura.ne.jp +45138 + Huwomobility Inc. + Qinan Mao + qinanmao&gmail.com +45139 + Maruganzer + Alexander Marugin + alexander.marugin&gmail.com +45140 + Green Vict Technology Co., Ltd + Jevy Ren + rjw&gvtt.com.cn +45141 + Onomichi ikakikai Co., Ltd. + Onomichi hirokazu + info&onomichi-ika.com +45142 + Corporate West Computer Systems, Inc. + Evan Hall + evan&corpwest.com +45143 + Datawise Systems, Inc. + Wilson Yee + wilson&datawisesystems.com +45144 + American Airlines Federal Credit Union + Russell Keith + russell.keith&aacreditunion.org +45145 + Hivemind Engineering + William Pratt + postmaster&hivemindengineering.com +45146 + Primestone Network + Chuansen Liu + liuchuansen&primestone.com.cn +45147 + Datu Health + Sanjay Sharma + Sanjay.sharma&datuhealth.com +45148 + Universität Koblenz-Landau + Uwe Arndt + arndt&uni-koblenz.de +45149 + NoisyPeak + Sergey Tatarchenko + st&noisypeak.com +45150 + WAGNER AG + Managed Backend + tm-system&wagner.ch +45151 + MEISLAB + Simon Meister + administrator&meislab.ch +45152 + Parchment Inc. + Andrew Lillie + alillie&parchment.com +45153 + Drillinginfo + Robert Bastian + robert.bastian&drillinginfo.com +45154 + ELES, d.o.o., sistemski operater prenosnega elektroenergetskega omrezja + Tomaz Rus + tomaz.rus&eles.si +45155 + Democratic Alliance + Adrian Frith + adrianf&da.org.za +45156 + The IcedTea project (HeapStats) + Shinji Takao + oss-java-support&lab.ntt.co.jp +45157 + Medgate AG + Oliver Schmid + administrator&medgate.ch +45158 + Infracom S.p.A. + Marco Bessone + marco.bessone&infracom.it +45159 + Heinz Nixdorf MuseumsForum GmbH + Thorsten Fuchs + tfuchs&hnf.de +45160 + Jabatan Ketua Menteri Melaka + Md Ridzuan bin Mohammad Latiah + mdridzuan&melaka.gov.my +45161 + YouCo Srl + Filippo Ferrari + filippo.ferrari&youco.eu +45162 + GUANG DONG SMART ELECTRIC INFORMATION INDUSTRY CO.,LTD. + PAN HONGKAI + 13922302687&163.com +45163 + ROSTOCK PORT GmbH (formerly 'Hafen-Entwicklungsgesellschaft Rostock mbH') + Rayk Zimmermann + r.zimmermann&rostock-port.de +45164 + Brulli Energia + Laura Filippini + laura.filippini&brulli.it +45165 + IT Services, Loughborough University + John Dodson + si.supportcontracts&lboro.ac.uk +45166 + Itaipu Binacional + Jacson Querubin + querubin&itaipu.gov.br +45167 + Redgates.com + Karl Redgate + Karl.Redgate&gmail.com +45168 + krumedia GmbH + Michael Krutwig + office&krumedia.de +45169 + Tahoe Forest Health System + Michael Soule + NetworkTeam&TFHD.COM +45170 + The Baupost Group, L.L.C. + Greg Smith + baupostinfosec&baupost.com +45171 + Gareth Morgan Investments + Robert Pearce + robertp&gmi.co.nz +45172 + Stronghold, Ltd. + Matt Walker + matt.walker&strongholdlimited.com +45173 + The Furukawa Battery Co.,Ltd + Osamu Yuzawa + a-tominaga&furukawadenchi.co.jp +45174 + Eyecom Telecommunication Equipments Ltd + Master Guan + master&eyecom-telecom.com +45175 + NGI SpA + NGI Network Group + network&ngi.it +45176 + Mirantis Inc + Alexey Rogusskiy, Scale Team Lead + unix.linux.aix&gmail.com +45177 + Sapientier Development + John W Davidson + jwdavidson&gmail.com +45178 + Direktion der Justiz und des Innern des Kantons Zuerich + Simon Giger + simon.giger&ji.zh.ch +45179 + Jablotron Alarms a.s. + Karel Hladik + hladik&jablotron.cz +45180 + Kongsberg Maritime AS + Einar Rasmussen + einar.rasmussen&km.kongsberg.com +45181 + AVSystem + Danilo Mitera + d.mitera&avsystem.com +45182 + bkaiser GmbH + Bruno Kaiser + bruno&bkaiser.com +45183 + Access Control Services Ltd + Russell Martin + rmartin&xplan.com +45184 + Patrick Kalkman + Patrick Kalkman + snmp&pakalkman.nl +45185 + SCA Skog AB + Magnus Tjärnskog + magnus.tjarnskog&sca.com +45186 + frazz.se + Franz Levin + iana&frazz.se +45187 + Amber Technical Solutions Ltd + Paul Mailes + contact&ambertechnical.co.uk +45188 + Vzajemna, d.v.z. + Rok Goli + it_app&vzajemna.si +45189 + Genplus Pte Ltd + LIM CHOON BOON + limcb&genplus.sg +45190 + Peaxy, Inc. + Giordano Beretta + gberetta&peaxy.net +45191 + Simple Finance Technology Corp + Miah Johnson + ops&simple.com +45192 + Lonmin + Simon Bezuidenhout + simon.bezuidenhout&lonmin.com +45193 + TeraStream + Kristian Larsson + kristian.larsson&t-systems.se +45194 + Oryx Embedded + Clément Zeller + info&oryx-embedded.com +45195 + Beijing 35one Technology Development Co., Ltd + MoYusheng + moyusheng&35one.com +45196 + Of Modems and Men + Nick Kamenyitzky + nick&kamenyitzky.com.au +45197 + C.S.C.D.G.R + Richard Mantha + manthar&cscdgr.on.ca +45198 + Weichert Lead Network, Inc + Arthur Quintalino + aquintalino&wlninc.com +45199 + Lite-On Power System Solutions + Jing Shao + jing.shao&liteon.com +45200 + Reserve Bank Of Australia + Ronnie Sidhu + SidhuR&rba.gov.au +45201 + BBright + Gael Martin + gael.martin&bbright.com +45202 + Fachhochschule Potsdam + Stephan Schier + it-service&fh-potsdam.de +45203 + Synergy Consulting Ltd. + Andrej Sandyrov + andrej.sandyrov&synergy.lt +45204 + Four Horsemen + Lionel Mehl + ldapadmin&sanemail.de +45205 + Richard Frostell + Richard Frostell + rich_f01&yahoo.com +45206 + Quintron Systems, Inc + Craig Armstrong + wcarmstrong&quintron.com +45207 + healcerion + lyle chung + lyle&healcerion.com +45208 + GSS International Group + Riaz Farhanian + riaz.farhanian&gssint.com +45209 + Altostratus Oy + Timo Kälviäinen + timo.kalviainen&instepsoftware.fi +45210 + DigiSeq Limited + Colin Tanner + colin.tanner&digiseq.co.uk +45211 + Data Control Ltd + Nihar Mohanta + nihar.mohanta&datacontrol-ltd.com +45212 + Vecture Inc. + Eric Roman + eroman&vectureinc.com +45213 + Municipal Parking Services, Inc. + Dave Collins + dave&mpspark.com +45214 + Community Campaign (Hart) + James Radley + iana&jradley.com +45215 + Matchpoint Systems, Inc. + Ross Dreyer + rdreyer&matchpointsystems.com +45216 + Cyprus Telecommunications Authority (CYTA) + Charalambos Charalambous + charalambos.a.charalambous&cyta.com.cy +45217 + Large Display Solutions International ltd. + Pere Pons + pere&largedisplaysolutions.com +45218 + Mesosphere + Lukas Loesche + lukas&mesosphere.io +45219 + Telecomax + John Badie + jbadie&telecomaxgroup.com +45220 + eGO Travel & Leisure AG + Stoyan Mitkov + smitkov&mygo7seas.de +45221 + AMP + Anand Gulla + anand_gulla&.com.au +45222 + XpoLog Ltd. + Ziv Eylon + ziv&xpolog.com +45223 + FASII Information Technology + Weifeng Yang + wfyang&fasii.com +45224 + Wireless Sensors LLC + Guilherme Goretkin + goretkin&goretkin.com +45225 + Guangzhou Netzone Inc. + Ansel Choi + cai&netzone.com +45226 + Allicient + Peter Maxwell + peter&allicient.co.uk +45227 + Okinawa Institute of Science and Technology Graduate University + Tim Dyce + tim.dyce&oist.jp +45228 + Lumagate AB + Tobias Wold + tobias.wold&lumagate.com +45229 + nyantec GmbH + Mikael Voss + mvs&nyantec.com +45230 + RCD Radiokomunikace spol. s r. o. + Vaclav Korecek + korecek&rcd.cz +45231 + Liverock Technologies + Toshihide Hosotani + thosotani&liverock-tech.com +45232 + Hellenic Ministry of Foreign Affairs + Gerasimos Melissaratos + gmelis&mfa.gr +45233 + Guangzhou Tongrui Electronic Electronic Technology Co.,Ltd + Dimon Zeng + zlpingcn&163.com +45234 + Pennsylvania State Education Association + Cliff Miller + cmiller&psea.org +45235 + ConfirmSign S.L. + Frankie Gómez + confirmsign&confirmsign.com +45236 + Ferz corp. + ALEXANDR SVYATOV + a.svyatov88&gmail.com +45237 + Context Relevant + Jason Lucas + jlucas&contextrelevant.com +45238 + City of Oregon City + David Knoll + dknoll&ci.oregon-city.or.us +45239 + RosReserv + Kostukov Ivan + licensing&rosreserv.ru +45240 + International Capital & Management Company + Robert Ziegler + zieglerr&icmcvi.com +45241 + fuzzcat.net + Michael L Martin + mmartin4242&gmail.com +45242 + INGEBI + Juan Vera + juan&dna.uba.ar +45243 + La Boulgour Compagnie + Bruno Beaufils + bruno&boulgour.com +45244 + IJM Systems + Ian Macdonald + ianmac51&gmail.com +45245 + SAS CALVI Célestin + CALVI OLIVIER + contact&calvibois.fr +45246 + EcoCooling Ltd + James Dawson + JamesDawson&ecocooling.org +45247 + Triple Domain Vision Co., Ltd. + philip chang + philipchang&tdv.com.tw +45248 + NetOcean + Leander Schäfer + info&NetOcean.de +45249 + Banco Macro S.A. + Leandro Curci + leandrocurci¯o.com.ar +45250 + Habr Europe OÜ + Vadim Rybalko + vadim&habr.ee +45251 + axelIT Üzletfejlesztési Kft. + Rózsahegyi Zsolt + info&axelit.hu +45252 + mDAKS GmbH + Leo Rave + rave&mDAKS.com +45253 + RMB Capital Management, LLC + Brian Hettinger + bhettinger&rmbcap.com +45254 + Shouei Denshi Kenkyuusho Co.,Ltd. + Satoshi Matsumoto + matsumoto&shouei.co.jp +45255 + Dallas Delta Corp + Fred Villella + fred.villella&dallasdelta.com +45256 + Foreks Bilgi Iletisim Hiz. A.S. + abidin sunar + abidin.sunar&foreks.com +45257 + InterConnectData + Mark Galvin + snmp-pen-admin&interconnectdata.net +45258 + Lazaros Economou + Lazaros Economou + Lazaros.Economou&gmail.com +45259 + Billo Systems Ltd. Co. + Bill Ota + billo.systems&gmail.com +45260 + FYLDE MICRO LTD + Thomas M Johnson + tmjohnson&fyldemicro.com +45261 + inAere Holdings + Grant Pullen + grant&inaere.com +45262 + Raven + Philipp Raabe + philippraabe&web.de +45263 + VTAS GmbH & Co. KG + Stephan Brunner + s.brunner&vtas.de +45264 + Schaeffler Technologies AG & Co. KG + Daniel Jarling + daniel.jarling&schaeffler.com +45265 + GMAURER + Maurer + guido.maurer&t-online.de +45266 + bootix Technology GmbH + Dirk Koeppen + dirk&bootix.com +45267 + ISSENDIS + Didier Urban + urban&issendis.com +45268 + RusHydro + Mihail Mayorov + MayorovMV&rushydro.ru +45269 + Rivasense Technologies Ltd + Vladimirov Vladimir + vvv&rivasense.com +45270 + Electronic Certification Services SL + Esteban Canalda + ecs&sga.es +45271 + Beco + Zehrudin Becic + zehrudin.becic&chello.at +45272 + Szallas.hu Ltd. + Norbert Attila Benkocs + norbert.benkocs&szallas.hu +45273 + independIT Integrative Technologies GmbH + Dieter Stubler + kontakt&independit.de +45274 + EyaSys + Lars J. Aas + lars.aas&eyasys.no +45275 + Energy Essentials Group B.V. + Rob Lemmens + it&energyessentials.nl +45276 + iTEAM (Instituto de Telecomunicaciones y Aplicaciones Multimedia, Grupo COMM) + Juan Carlos Guerri Cebollada + jcguerri&dcom.upv.es +45277 + Five Monkeys Code Factory + Alberto Bambala Arbea + alberto.bambala&vento.tv +45278 + OpenCloud SL + Xavier González del Águila + xavi&viapps.org +45279 + Strategy Object + Nick Spirov + netmngmt&strategyobject.com +45280 + Premise Health + Joseph Johnson + joey.johnson&premisehealth.com +45281 + Generac Power Systems Inc. + Mike Calbaum + mike.calbaum&generac.com +45282 + LinkUp Networks + Fred L. Templin + fltemplin&acm.org +45283 + T-Systems International GmbH + Andreas Brasching + de-mail&t-systems.com +45284 + ax86.net + Tilman Blumenbach + tilman&ax86.net +45285 + Salience Systems Pty Ltd + Gary Simmons + gary.simmons&salience.net.au +45286 + Morning Project Samurai + Junya Kaneko + jyuneko&hotmail.com +45287 + GEOPRAEVENT AG + Daniel Regenass + daniel.regenass&geopraevent.ch +45288 + Compania Logistica de Hidrocarburos CLH, S.A. + Juan Marcos + jmarcosm&clh.es +45289 + unixadm.org + Philippe Kueck + dmiawticeenq6ryxcmbw2xqkdrews&protected32.unixadm.org +45290 + Agentia pentru Agenda Digitala a Romaniei + Gabriel DUMITRU + gabriel.dumitru&aadr.ro +45291 + DOCTORS CENTER HEMATOLOGY & ONCOLOGY GROUP, PSC + CESAR MALDONADO, MHMS + dcc2003&gmail.com +45292 + DOCTORS' CENTER HEMATOLOGY & ONCOLOGY GROUP BAYAMON, PSC + CESAR MALDONADO, MHMS + dcc2003&gmail.com +45293 + Transport Network llc + Minh Pham + pxm5489&att.net +45294 + ANEAC Co., Ltd. + Kevin Tang + support&aneac.net +45295 + BTT Bilgi Teknoloji Tasarim Ltd + Rim KHAZHIN + rim&btt.com.tr +45296 + Mazars + Jayson Dudley + jayson.dudley&mazars.co.uk +45297 + T-21 Technologies, LLC + Kevin Ancelin + kancelin&t-21.biz +45298 + Alea s.r.l. + Giuseppe Merlino + giuseppe.merlino&aleagames.com +45299 + AMRTEC TECNOLOGIA IMPORTAÇÂO E EXPORTAÇÂO LTDA + Daniel Alberto Percara + dpercara&amrtec.com.br +45300 + WatchDox LTD + Guy Shrabany + Admin&Watchdox.com +45301 + Double B - Business, s.r.o. + Damian Baran + damian.baran&doublebb.sk +45302 + SOIN Soluciones Integrales S.A, + Nelson Baltodano + nelsonb&soin.co.cr +45303 + Industrial Software Co + Ivan Petrov + ivan.d.petrov&gmail.com +45304 + ByteLogix + Cole Minnaar + cole.minnaar&bytelogix.co.za +45305 + Asyne inc. + Hugo Bernier + hbernier&gmail.com +45306 + DE-CIX Management GmbH + Philippe Kueck + syseng&de-cix.net +45307 + Tracsis plc + David Turner + d.turner&tracsis.com +45308 + Impactions BV (formerly 'Born Inventors BV') + Jeroen Verheul + j.verheul&impactions.nl +45309 + Needham Public Schools + Paul Messias + paul_messias&needham.k12.ma.us +45310 + Universidade Estadual de Santa Cruz + Raimundo Campos Simoes + rcsimoes&uesc.br +45311 + GHT Co., Ltd + yong li + li.yong&ghtchina.com +45312 + Instituto para os Assuntos Cívicos e Municipais Macau + Lei Seak Tou + leist&iacm.gov.mo +45313 + iiPAY Ltd + Guy Webb + techsupport&iipay.com +45314 + CCV Deutschland GmbH + Stefan Marxen + S.Marxen&de.ccv.eu +45315 + KRKA, tovarna zdravil, d. d., Novo mesto + Zvonko Rangus + zvonko.rangus&krka.biz +45316 + devlab + Corrado Colombo + rs232&email.it +45317 + SAU + Maikel Ortega Hernandez + maikeloh&gmail.com +45318 + Rosa Khutor LLC + Konstantin Komzolkin + noc-adm&rosaski.com +45319 + NGENIX + Dmitry Krikov + noc&ngenix.net +45320 + ActiStuff + Vladislav Zhuravlev + dev&actistuff.com +45321 + Triumph Bancorp, Inc. + Brent Buchanan + it&triumphllc.com +45322 + GigaEnergy Inc. + Felix Lin + felix&felix-lin.com +45323 + BIORETICS SRL + MATTEO ROFFILLI + roffilli&gmail.com +45324 + Business Solutions Adviser + Vitaly Lyanke + VLyanke&bs-adviser.ru +45325 + roo.ch + Enrique Róo Moares + enrique.roo&roo.ch +45326 + Yves Durce ent. + Yves DURCE + yves.durce&sfr.fr +45327 + NATAJA Maciej Kozuch + Maciej Kozuch + maciej.kozuch&nataja.pl +45328 + KOSHIN DENKI KOGYO CO., LTD. + Michihiro Kobayashi + mchkoba&koshindenki.com +45329 + Kristin School + Tony Bigby + tbigby&kristin.school.nz +45330 + Fineline Solutions Ltd. + Patrick Keys + patrick&fineline.uk.com +45331 + San Fernando Valley Community Mental Health Center, Inc. + Stephen Dowd + pki&sfvcmhc.org +45332 + East Side Oncology Clinic PLLC + Nagender Mankan MD + eastsidehemoncology&gmail.com +45333 + NeatApps, Inc. + Dave Neathery + dave&neatapps.com +45334 + Nextys + Ramon Zambelli + ramon.zambelli&nextys.com +45335 + Matthias Blümel IT-Dienstleistungen + Matthias Blümel + info&bluemel-it.de +45336 + Microbase + Antonis Psaras + apsarasµbase.gr +45337 + voria.net + Robert S. Quattlebaum + darconeous&gmail.com +45338 + Tableau Software, Inc. + Braxton Ehle + behle&tableau.com +45339 + JSC "Housing Construction Savings Bank of Kazakhstan" + Glebova Serafima + glebova.s&hcsbk.kz +45340 + Half-Baked Software + Michael Thomas Richter + ttmrichter&gmail.com +45341 + NEXT Srl + ing. Denis Ioan + d.ioan&next-italia.it +45342 + IWD Ltd. + Sandor Takacs + sandor.takacs&iwd.hu +45343 + Thum+Mahr GmbH + Benedikt Geltenpoth + bgeltenpoth&thummahr.de +45344 + ZhenJiang College + Teng Bin + zjtengbin&163.com +45345 + Smithee, Spelvin, Agnew & Plinge, Inc. + Meadhbh Hamrick + meadhbh&smithee.us +45346 + Velocloud Networks, Inc. + Christopher Szeles + chris&velocloud.net +45347 + The Centers for Families and Children + Dennis Anderson + it.contact&thecentersohio.org +45348 + Megvii Inc. + Kai Jia + jiakai&megvii.com +45349 + VGNet Networking + J.H. van Gils + support&vgnet.nl +45350 + jorde.it + Stefan Jorde + stefan&jorde.it +45351 + XPSoft sas + Luca Poretti + poretti&xpsoft.it +45352 + AMERGINT Technologies Inc. + Brian Willette + bwillette&amergint.com +45353 + BRS Sistemas Eletrônicos + Marcelo Richter + marcelo&brs.ind.br +45354 + E2G srl + Marco Nicoloso + marco.nicoloso&e2g.it +45355 + Staffordshire Police + John Dunning + John.Dunning&staffordshire.pnn.police.uk +45356 + Liveblock Auctions International + Roe Peterson + roe&liveblockauctions.com +45357 + CUBENet GmbH + Karsten Schramm + kschramm&cube.net +45358 + Integral Business Solutions + Peter Barry + pbarry&go-integral.com +45359 + ARC Informatique + Benoît LEPEUPLE + pm&arcinfo.com +45360 + Valley Christian School + James Paul + jppaul&valleybible.com +45361 + OC Software Engineering + Clay Cover + ccover&ocsofte.com +45362 + Coca-Cola Erfrischungsgetraenke AG + Sandra Seitz + sandra.seitz&cceag.de +45363 + Advanced Training LLC + Ruslan V. Karmanov + info&atraining.ru +45364 + TRIADE InformationSystems GmbH + Klaus Eulenbach + klaus.eulenbach&triade.de +45365 + Om Vindhyavasini College of IT & Management + Neelkamal Nimavat + nimavat&omvvim.edu.in +45366 + Cohere Technologies + Ofer Avitzur + Ofer.avitzur&cohere-technologies.com +45367 + SilverTip Marine Inc. + Ian McEachern + imceachern&silvertiptel.com +45368 + Amida Technology Solutions + Dmitry Kachaev + dmitry&amida-tech.com +45369 + MVZ Labor Dr. Quade und Kollegen + Dr. Carsten Bartling + bartling&lab-quade.de +45370 + ID TECH + David Dai + davidd&idtechproducts.com +45371 + AV + Mikhail Malygin + ldnetru&gmail.com +45372 + Professional Software Design Pty Ltd + Neil Anderson + neila&psdlogistics.com +45373 + Speech Technology Center Limited + Evgeniy Zelenko + registr&speechpro.com +45374 + i-SOLUTIONS HEALTH GmbH + Technischer Service + technik&i-solutions.de +45375 + Znuny GmbH + Roy Kaldung + rk&znuny.com +45376 + PT. Mahardika Putra Mahkota + Head of IT Department + itdepthead&mpmoffice.com +45377 + Air Liquide IT + PEN Support Team + oid.support&airliquide.com +45378 + Beijing Uxsino software CO.,Ltd + Jianhui Wang + wangjh&uxsino.com +45379 + iCRCo, Inc + Scott Shannon + sshannon&icrcompany.com +45380 + Happy Crow Enterprises, LLC + Tim Taylor + tim.taylor&happycrow.com +45381 + iCode Global, LLC + Scott Shannon + scott.shannon&icodeglobal.com +45382 + Huf Secure Mobile + Daniel Deuter + daniel.deuter&huf-group.com +45383 + SHARP Corporation Communication Systems Division + Kei Nakayama + nakayama.kei&sharp.co.jp +45384 + SERgroup Holding Europe GmbH + Klaus Eulenbach + klaus.eulenbach&ser.de +45385 + 4GTSS Corporation W.L.L. + Osama Abuamara + Osama.taha&4gtss.com +45386 + PeopleLogic Corporation + Waqas Athar + waqas.athar&peoplelogic.com.pk +45387 + NCC Group PLC. + Mr Chris Middleton + chris.middleton&nccgroup.com +45388 + SSV Software Systems GmbH + Eduard Neufeld + ene&ssv-embedded.de +45389 + LV= + John Chasty + john.chasty&lv.com +45390 + Alef Mobitech Inc. + Steven Spencer + steven.spencer&alefmobitech.com +45391 + PLC Group + Nasir Mahmood + nasirm&plcgroup.com +45392 + Quadro Group LLC + Alexander Lyakhov + alyakhov&quadrogroup.ru +45393 + OSM GmbH + Thomas Schiller + t.schiller&osm-gmbh.de +45394 + Mangstor Inc + Ashwin Kamath + ashwin&mangstor.com +45395 + Secrétariat général de l'enseignement catholique + Olivier Perrichon + o-perrichon&enseignement-catholique.fr +45396 + Dejero Labs. Inc + IT Support + itsupport&dejero.com +45397 + ASFINAG Maut Service GmbH + Alexander Coreth + service&asfinag.at +45398 + Segmint, Inc. + Keith Veleba + keith.veleba&segmint.com +45399 + Omemee Engineering Group + Scott MacEachern + scottm&enggrp.com +45400 + Candid Security Solutions LLC + Csaba Kadar + csaba&candid-ss.com +45401 + Unitron NV + Stephen Deleu + stephen.deleu&unitrongroup.com +45402 + Arvet Bank Operations, INC. + Mike Calvi + mcalvi&arvest.com +45403 + ExchangeCore LLC + Christopher Hougard + chris&exchangecore.com +45404 + TightVideo Ltd. + Peter Volkov + peter.volkov&tightvideo.com +45405 + Lee County School System + Ryan Hairyes + rhairyes&lee.k12.nc.us +45406 + BetEasy Pty Ltd + Jaiwardhan Puri + jaiwardhanp&beteasy.com.au +45407 + Northwest Kits + Nigel Vander Houwen + nigel&nwkits.com +45408 + Octavian Technology Ltd + Eryk Bottomley + eryk&octaviantechnology.com +45409 + OneSoft Tecnologia S/A + Paulo Rogerio Panhoto + paulo.panhoto&flashtrader.com.br +45410 + ISAMBERT + ISAMBERT Patrick + patrick.isambert&isambert.info +45411 + Ultra Electronics - Varisys + Marcin Jankowski + marcin.jankowski&ultra-varisys.com +45412 + HGH SYSTEMES INFRAROUGES + THOMAS ANTHONY + info&hgh.fr +45413 + Global IQX + Michael de Waal + admin&globaliqx.com +45414 + SHUYSKIY ZAVOD AKVARIUS LLC + Alexandr Tridtsatov + at&aqs.ru +45415 + Repheka Haiti, Inc. + Mario Malivert + mmalivert&repheka.org +45416 + Chengdu Chengxun Network Technologies Co., Ltd. + Jason Deng + admin&cxria.com +45417 + HCO Computer Products /dba ZGO Tech Hosting + Sam Jazaerli + sjazaerli&zgohosting.com +45418 + ABAST systems + Oliver Schwarz + oschwarz&nechigroup.com +45419 + PhirePhly Design + Kenneth W. Finnegan + kennethfinnegan2007&gmail.com +45420 + Strike XII Development + Jairo Cantillo + jairo&strikexii.com +45421 + Nico Boehr + Nico Boehr + ianacontact&nicoboehr.de +45422 + powerbrand marketing GmbH + Patrick Schebek + p.schebek&powerbrand.de +45423 + Match.com L.L.C. + James La Vacca + james.lavacca&match.com +45424 + The Ghosh Center for Oncology & Hematology + Mindy Martin + mmartin&theghoshcenter.org +45425 + Kadlec Health System + Mark Dickson + mark.dickson&kadlec.org +45426 + Soltec Technology Co., Ltd. + Ying-Bin Haung + rtarmy&gmail.com +45427 + Xiamen Winer Technology Co.,Ltd + ShiXu Dai + daisx&winertech.com +45428 + UrtheCast + Aaron Yeung + admin-uc&urthecast.com +45429 + Instituto Federal Catarinense Campus Sombrio + Marco Antonio Silveira de Souza + marco&ifc-sombrio.edu.br +45430 + Atlas Copco Rock Drills AB (Business Area MR) + Tony Petersson + tony.petersson&se.atlascopco.com +45431 + Nanjing Lopu Co., Ltd. + Yang Fei + yangf&lopu.com.cn +45432 + Cirrato Technologies AB + Rob Pike + robert.pike&cirrato.com +45433 + Finivation Software + Guido Belcic + gbelcic&finivation.com +45434 + Accel Frontline Ltd + Rajesh Sharma + rajesh.sharma&accelfrontline.in +45435 + Cyprus University of Technology + Andreas Mouskos + andreas.mouskos&cut.ac.cy +45436 + THALES Services MsLabs (formerly 'THALES Services Lab DT') + MARTEL Arnaud + gestion-mslabs&thalesgroup.com +45437 + TEOCO Corporation + Zion Alfia + ipadmin&teoco.com +45438 + AI2M srl + Marco Perego + info&ai2m.eu +45439 + ScioTeq + Pascal Godefroidt + Pascal.Godefroidt&scioteq.com +45440 + BISS d.o.o. + Aleksander Radovan + administrators&biss.hr +45441 + Safe Internet, LLC + Alexander Gusev + smtp&cstech.ru +45442 + Wrocław University of Science and Technology + Krzysztof Kołodziejczyk + krzysztof.m.kolodziejczyk&pwr.edu.pl +45443 + Arca Sistemi Scarl + Michele Sandonini + michele.sandonini&arcavita.it +45444 + ecg.local + Ralf Balzer + balzer&manthey.it +45445 + Kuantic SAS + Mike O'Dowd + modowd&kuantic.com +45446 + Ministerio Publico de la Acusación + Ing. Alejandro Azario + aazario&mpa.santafe.gov.ar +45447 + SOCIETE DU FIGARO + Bruno Coutureau + bcoutureau&lefigaro.fr +45448 + UF Health Oncology - Baymeadows + Kathy Politano + kpolitano&21co.com +45449 + Blue Krait Consulting Inc. + Blair Sandberg + blair.sandberg&bluekrait.com +45450 + Parsec (Pty) Ltd + Francois Joubert + francoisj&parsec.co.za +45451 + reBTSOFT + Björn Ternes + bjoern.ternes&gmail.com +45452 + Instituto Medico Alexander Fleming + Mario Cazeneuve + mcazeneuve&alexanderfleming.org +45453 + Banff Cyber Technologies Pte Ltd + Alex Ngo + alexngo&banffcyber.com +45454 + Shanghai Digigrid Technology Co.,Ltd + Ivy Jiang + ivy.jiang&digigrid.com.cn +45455 + John Wason + John Wason + wason&wasontech.com +45456 + Galaxia Electronics Co., Ltd. + Hong-rae, Kim + hrkim&galaxia.co.kr +45457 + Kanton Thurgau + Patrik Stacher + patrik.stacher&tg.ch +45458 + VKBit Betrieb GmbH + Juri Zirnsak + postmaster&vkb.de +45459 + Lonix Oy + Jarmo Mitjonen + jm&lonix.com +45460 + Comfact AB + Anders Tornqvist + anders&comfact.com +45461 + Pocos bv + JordI Smits + jordi&pocos.nl +45462 + Delvag Luftfahrtversicherungs-AG + Peter Berghaus + servicedesk&delvag.de +45463 + Federal Public Service Justice, Belgium + David Roskams + ict.infra.pen&just.fgov.be +45464 + Verband Christlicher Pfadfinderinnen und Pfadfinder in Württemberg + Frederik Held + frederik.held&wuerttemberg.vcp.de +45465 + Clear2Pay NV/SA + Dieter Boden-Schelfthout + dieter.boden&clear2pay.com +45466 + PIXEL Sp. z o.o. + Seweryn Kamiński + s.kaminski&pixel.pl +45467 + Vidat + Jochen Ulbricht + j.ulbricht&vidat.eu +45468 + Avi Networks + Anand Parthasarathy + anpartha&avinetworks.com +45469 + IQ Messenger + Ramazan Aslan + r.aslan&iqmessenger.com +45470 + Azienda Ospedaliero-Universitaria di Bologna + Luca Boccafogli + luca.boccafogli&aosp.bo.it +45471 + Triangle Wholefoods Collective Ltd. + Pod Andrews + pod&suma.coop +45472 + Ebee Smart Technologies GmbH + Robert Weyrauch + iana&ebeesmarttechnologies.de +45473 + Excelfore Corporation + Anoop Balakrishnan + admin&excelfore.com +45474 + HfMT Hamburg + Axel Schnellbügel + axel.schnellbuegel&hfmt-hamburg.de +45475 + MEYTEC GmbH + Gerhard W. Meyer + info&meytec.com +45476 + StreamVue Ltd + Sonia Griffiths + hostmaster&streamvue.com +45477 + ZOO Digital Group plc. + Stuart Green + stuart.green&zoodigital.com +45478 + DIVA IT SIA + Dmitrijs Petrovs + dmitrijs.petrovs&divait.lv +45479 + PXP Solutions Ltd + James Russell + hostmaster&pxp-solutions.com +45480 + cognitix GmbH (formerly 'Packetwerk GmbH') + Sven Roethig + sven.roethig&cognitix.de +45481 + Galore Networks Private Limited + Balaji Kulothungan + balajik&ga-lore.com +45482 + St Jude Medical + Eric Crist + ecrist&sjm.com +45483 + FusionLayer, Inc. + Rauno Haapaniemi + snmp-admin&fusionlayer.com +45484 + Siama Systems Inc. + Marco Mascitto + marco.mascitto&siamasystems.com +45485 + SCUBI + Gopal Padinjaruveetil + gopal.padinjaruveetil&capgemini.com +45486 + ACSS Communications Pty Ltd + Alex Lodge + AlexL&acsscomms.com.au +45487 + Polydata Corporate + Jing Kang + jing.kang&polydata.com.cn +45488 + KONAR + Anton Myachin + noc&konar.ru +45489 + groupe-alpha + Alain Mouilleron + iana&groupe-alpha.com +45490 + Nexperteam + Nathan Van Overloop + operations&nexperteam.be +45491 + Gainde 2000 + Magatte MBAYE + mmbaye&gainde2000.sn +45492 + Connexin Software, Inc. + Greg Anderson + oid&connexinsoftware.com +45493 + Media System Technologies Srl + Giovanni Squassi + gsquassi&gmail.com +45494 + GISTEC + Ibrahim Afaneh + ibrahim.afaneh&gistec.com +45495 + Konneka Information Technologies (formerly 'Konneka Bilgi ve Iletisim Teknolojileri Imalat Tic. Ltd. Sti.') + Kursad Yusuf KONUS + kykonus&konneka.com.tr +45496 + Groupe ESB + Kevin COTARD + kevin.cotard&ecoledubois.fr +45497 + Qucell + Sung Hyun Park + sunghyun.park&qucell.com +45498 + Novella-R SIA + Dmitrijs Petrovs + dmitrijs.p&ivf.lv +45499 + Janos + Jan Van Nieuwenhove + jan.van.nieuwenhove&gmail.com +45500 + Virgin Technologies Inc + Justin Miller + justin&virgintechnologies.com +45501 + Micro Instruments + Francois van Jaarsveld + francoisµinstruments.co.za +45502 + nterra integration GmbH + Cornelius Flöter + cornelius.floeter&nterra.com +45503 + 42 Solutions B.V. + Stefan Roels + snmp&42solutions.nl +45504 + Synaptive Medical Inc. + Monroe Thomas + monroe&synaptivemedical.com +45505 + Tox Foundation + Sean Qureshi + sean&tox.im +45506 + Mida Solutions s.r.l. + Mauro Franchin + ict&midasolutions.com +45507 + iMapData.com + Jorge Dana + jdana&imapdata.com +45508 + Strich Labs + Sarah Nordstrom + oidadmin&strichlabs.com +45509 + YourMembership + Information Technology + itmail&yourmembership.com +45510 + BBMRI-ERIC + Petr Holub + petr.holub&bbmri-eric.eu +45511 + Cogent Technologies Limited + Ada Qi + ada.qi&cogent-technologies.net +45512 + Entergate AB + Roger Bergstrand + roger&entergate.se +45513 + Hekatron Technik GmbH + Klaus Fischer + informatik&hekatron.de +45514 + ShenZhen SNMP Link Electronics Co., Ltd. + daniel zhang + 542708124&qq.com +45515 + Econocom-osiatis + Eric Lapouyade + eric.lapouyade&econocom-osiatis.com +45516 + Valid Soluciones Tecnógicas S.A.U. + José Carlos Santos García + josecarlos.santos&valid.com.es +45517 + GarantPlus Ltd + Maxim Shabrov + maxim&otx.ru +45518 + Macnica Inc. + Jiaqi Chen + chen-j&macnica.co.jp +45519 + Diamedica Sp. z o.o. + Borys Stankiewicz + telediamedica&gmail.com +45520 + Actility + Gilles LEFEVRE + gilles.lefevre&actility.com +45521 + OPENBASE + PARK BYUNG WOOK + bwpark&openbase.co.kr +45522 + xipki.org + Lijun Liao + lijun.liao&gmail.com +45523 + RFE Broadcast srl + Luigi Fazio + lfazio&rfebroadcast.com +45524 + Advancapp + Luis Costa + info&advancapp.com +45525 + Ubiqam LTD + Dovalle Yankelovich + Dovalle&ubiqam.com +45526 + Snapper Services + David Stamp + david.stamp&snapper.co.nz +45527 + Diehl AKO Stiftung & Co. KG + Matthias Preissing + matthias.preissing&diehl.com +45528 + Joerg-Peter Hempel + Joerg-Peter Johann Hempel + mail&jphempel.eu +45529 + vishmail.net + Joerg-Peter Hempel + mail&jphempel.eu +45530 + Medien Service Untermain GmbH + Peter Bethke + bethke&msu.biz +45531 + M.A.D.A. + Dovalle Yankelovich + Dovalle&MadaTechnologies.com +45532 + Infinicore inc + David Chen + Dchen&infinicoreinc.com +45533 + Red Arrow (Southampton) Group Limited + Alex Martin + iana&redarrow.co.uk +45534 + WestNet + Paul Cunnane + paul.cunnane&westnet.ie +45535 + Information Systems Dynamics + Hakeem P. Fahm + bills&isdynamics.com +45536 + Rheinmetall Defence Electronics GmbH + Herbst, Andre + andre.herbst&rheinmetall.com +45537 + NUAZ + Dick Hofflamut + dick.hofflamut&gmail.com +45538 + PHARMAGEST INTERACTIVE + Sebastien DEON + sebastien.deon&pharmagest.com +45539 + Domain fritze.org + Stefan Fritze + info&fritze.org +45540 + Rueval S.A. + Ricardo Arias + rarias&rueval.com.uy +45541 + Schneider Elektronik GmbH + Jan Keßler + jan.kessler&schneider-elektronik.de +45542 + Orlando Utilities Commission + Andy Bailey + abailey&ouc.com +45543 + Mekansal Yazilim + Ömür YAVUZ + omur.yavuz&intergis.com.tr +45544 + IT Kimmig + Alexander Kimmig + info&it-kimmig.de +45545 + Ormazabal + SNMP Administrator + licenses.electronics&ormazabal.com +45546 + SECUWAVE CO,. LT + PARK BYUNG WOOK + bwpark&openbase.co.kr +45547 + Wirtgen (China) Machinery Co., Ltd. + Richard Shao + richard.shao&wirtgen-china.com.cn +45548 + Asian Hope Inc + Lyle Kozloff + lyle&asianhope.org +45549 + Shenzhen Zontele Technologies Co., Ltd + king zhu + king.zhu&zontele.com +45550 + Webstudio Information Technology Inc. + Ruyi Wang + webstudio&webstudio.com.cn +45551 + VIPP + CE NOYELLE + cenoyelle&vippinterstis.com +45552 + Unisys + Abhineet Pandey + abhineet.pandey&in.unisys.com +45553 + GE Healthcare - Engineering + METIVIER Maxime + maximemetivier&ge.com +45554 + Grupo JAMPIG S.A.S. + Fabián E. Barón + fbaron&jampig.co +45555 + RDP + Nechay Anton + anechay&rdp.ru +45556 + Triadic Industries, LLC + Alex McWhirter + alexmcwhirter&triadic.us +45557 + SHENZHEN CAN TECHNOLOGY CO.,LTD + Skye Zhao + zhaoxiao&canpdu.com +45558 + CJSC GROUP OF COMPANIES RENOVA + Sergey Kulikov + it&renova-group.ru +45559 + Prevoty, Inc. + Kunal Anand + kunal&prevoty.com +45560 + Inovapar Soluções LTDA. + Antonio Calegare + suporte&inovapar.com.br +45561 + Masterclock, Inc. + William Clark + sysadmin&masterclock.com +45562 + Capella Aerospace Systems Engineering Inc. + Sean Hamilton + seanh&capellaaero.com +45563 + SOMNOmedics GmbH + Jonas Zeiger + jz&somnomedics.de +45564 + Beijing SplendidTel Co.,Ltd. + Marco Jun Peng + marco.peng&splendidtel.com +45565 + INTERBYSS S.R.L + Alvin Nunez + a.nunez&interbyss.com +45566 + INTELMA CONSULTING Inc. + Badis Omarouayache + badis.omarouayache&intelmas.com +45567 + Ticom Geomatics, Inc. + Craig Hamilton + chamilton&ticom-geo.com +45568 + GoCardless Ltd + James Cunningham + webops&gocardless.com +45569 + Core Network Dynamics + Dragos Vingarzan + dragos&corenetdynamics.com +45570 + IQ Tel d.o.o. + Ognjen Seslija + ognjen.seslija&iqtel.rs +45571 + Midfin Systems + Shuvabrata Ganguly + sganguly&midfinsystems.com +45572 + awelten + Roman Hector Bianco + roman.bianco&awelten.com +45573 + Western Michigan University Homer Stryker M.D. School of Medicine + David Mitchell + david.mitchell&med.wmich.edu +45574 + Hellea SPRL + Jean-Charles de Longueville + iana.pen&hellea.net +45575 + China Beijing Stegosaurus Technology Laboratory + Sean.Zhu + SeanZhu&163.com +45576 + Hulu LLC + Robert Minsk + iana&hulu.com +45577 + Shenzhen Sundray Technologies Company Limited + MaoXing Qin + qmx&sundray.com +45578 + BroadQuest + MI HYE, KIM + mhkim&broadquest.co.kr +45579 + Guangdong Nenkingtech Development Co,Ltd. + MINXUAN CAO + maxoncao&163.com +45580 + DENTCAT + Masood Behabadi + masood&dentcat.com +45581 + YICHUN YILIAN PRINT TECH CO.,LTD + Chloe Sun + chloe_lishan&sina.cn +45582 + paysafecard.com Wertkarten GmbH + Michael Postmann + m.postmann&paysafecard.com +45583 + Puminsoft Co.,Ltd. + Pumin Duangmanee + puminsoft&gmail.com +45584 + OJSC "RIRT" + Alexander Barskiy + Barskiy_AA&rirt.ru +45585 + Space Imaging Middle East (SIME) + Spandan Kar + spandan&dsi.co.ae +45586 + Mythic Network + Ilya Kogan + ikogan&mythicnet.org +45587 + Software & Service Center,Sichuan Changhong Electric Co.,Ltd + Chuan Gu + chuan.gu&changhong.com +45588 + OLSPS + Rikus Combrinck + rikus&olsps.com +45589 + Doc-Soft Ltd. + Daniel KEKESI + daniel.kekesi&docsoft.hu +45590 + Mutesah General Investments ltd + Golan Mutesah + mutesah&yahoo.com +45591 + Shenzhen Sungreat Communication Co.,LTD + huangchuen + 1320798537&qq.com +45592 + Julius Clinical + Sander Böhm + admin&juliusclinical.com +45593 + OpenREM + Ed McDonagh + pen&openrem.org +45594 + Università Iuav di Venezia + Boeretto Marco + boeretto&iuav.it +45595 + Reaktor 23 + Elias Bonauer + info&reaktor23.org +45596 + ICloud Innovation (Beijing) ltd. + Weihua Li + liweihua&icloudinno.com +45597 + TESA Assa Abloy + Julia Vila + j.vila&tesa.es +45598 + Mckinney Independent School District + Robert Foster + rfoster&mckinneyisd.net +45599 + Network RADIUS SARL + Alan DeKok + aland&networkradius.com +45600 + OX Service AG + Markus Ruefenacht + support&ors.ch +45601 + Wikimedia Deutschland e.V. + Silke Meyer + noc&wikimedia.de +45602 + Corvus Energy + Nick Erb + nerb&corvus-energy.com +45603 + Rieker, Inc. + Harry May + hmay&riekerinc.com +45604 + BeiJing SmartWell Info&Tech Co.,Ltd. + Fountain Hsiao + info&smartwell.cn +45605 + Wi-SUN Alliance, Inc + Phil Beecher + pbeecher&wi-sun.org +45606 + Carillon Federal Services Inc. + Patrick Turcotte + pturcotte&carillon.ca +45607 + Sysco Foods + Michael Germony + germony.michael&corp.sysco.com +45608 + Atlan dynamic group + Anton V. Sekretov + service&ahilles.name +45609 + Centralschweizerische Kraftwerke AG + Daniel Jud + oid-admin&ckw.ch +45610 + EAM GmbH & Co. KG + Uwe Scheller + it&eam.de +45611 + EOSPACE Inc. + William Dougherty + dougherty&eospace.com +45612 + Under Armour Connected Fitness + Stew Blanford + sblanford&underarmour.com +45613 + Universiti Malaysia Terengganu + Ahmad Nazir bin Haron + admin&umt.edu.my +45614 + RegEd Inc. + Yury Nemchin + yury.nemchin&ontarget-group.com +45615 + CityNet + Gubanov Dmitry + koloskovm&mail.citynet.ru +45616 + Healthcare Information and Management System Society (HIMSS) + Richard Delort + rdelort&himss.org +45617 + The Institute of Computational Technologies of the Siberian Branch of the Russian Academy of Sciences + Oleg Zhizhimov + zhizhim&mail.ru +45618 + Ferdinand Malcher + Ferdinand Malcher + mail&ferdinand-malcher.de +45619 + Oei Family Office + Chris Oei + admin&oei.io +45620 + Steampunk Island Inc + James N Riley Jr + webmaster&steampunkisland.com +45621 + Tycon Systems Inc. + Scott Parsons + scottp&tyconsystems.com +45622 + Netrix LLC. + Andrew Meyer + ameyer&netrixllc.com +45623 + Hughey & Phillips, LLC + Michael A. Forhan + mforhan&hugheyandphillips.com +45624 + Brain Sense + Haim Gabsow + haim&brain-sense.co.il +45625 + hahnefeld.net + Andreas Hahnefeld + info&hahnefeld.net +45626 + Sapientia Systems + John Liebenau + johnliebenau&gmail.com +45627 + X NET + Igor Malyshkin + imalyshkin&xnet.kz +45628 + HANFORD (DOE) + Michael Epler + mepler&rl.gov +45629 + Ritron + Bruce Ahlemeyer + bahlemeyer&ritron.com +45630 + Regional Medical Oncology Center + Tracie Whitley + tracie.whitley®ionaloncology.com +45631 + Perimind Corporation + Patrick Pyette + ppyette&perimind.com +45632 + br0.fr + Johan Fleury + johan.fleury&br0.fr +45633 + The Hershey Company + Kyle Smith + kasmith&hersheys.com +45634 + XeeMetric Inc. + Konstantin Korf + konstantin.korf&xeemetric.com +45635 + Cornerstone Health Enablement Strategic Solutions, LLC + Ian Atchison + infosec&chessmso.com +45636 + Robit SAS + Maurizio Bovo + info&robit.com +45637 + Dicks Garage + Richard Beck + dfb83&dicksgarage.com +45638 + AVOLITES LTD + ADAM PROFFITT + adam&avolites.com +45639 + Chen-Hung Chen + Todd Chen + luvboa&gmail.com +45640 + Fujitsu Taiwan Ltd. + Todd Chen + todd.chen&tw.fujitsu.com +45641 + SOARNEX Technology Corporation + Ethan Wang + ethan.wang&soarnex.com +45642 + SITRINICS CAMS, JSC + Alexey Zharkov + azharkov&sitronics-cams.com +45643 + MARETON d.o.o. + Matija Markovic + mareton&mareton.hr +45644 + Tom Maher + Tom Maher + tom_maher&outlook.com +45645 + thinkCSC, Inc + Jacob Delgado + jacob&thinkcsc.com +45646 + UK Shared Business Services Ltd + Colin McDerment + colin.mcderment&uksbs.co.uk +45647 + Grant Thornton UK LLP + Adam Stevens + adam.stevens&uk.gt.com +45648 + Bridgeway Software, Inc. + Swarupa Kota + swarupa.kota&bridge-way.com +45649 + Sousou Industries + Cochise Ruhulessin + cochise.ruhulessin&sousouindustries.com +45650 + Fibersystem AB + Anders Fallheden + Anders.Fallheden&fibersystem.se +45651 + Layer3 TV, Inc. + Nobuo Matsushita + nobuo&layer3tv.com +45652 + New Jersey State Government (Office of Information Technology) + Ricardo J Oliveira + OIT-DNSadmin&oit.nj.gov +45653 + Compression Services + Harlan Lane + bt.alerts.domainnames&exterran.com +45654 + LogicVein, Inc. + Leo Bayer + leo&lvi.co.jp +45655 + CHUNG-HSIN ELECTRIC & MACHINERY MFG. CORP. + Alice Li + c0000&chem.com.tw +45656 + Vector-V, Law Firm + Akim Korolev + 0255838&gmail.com +45657 + Syncbak + Timothy Kriz + tim.kriz&syncbak.com +45658 + P3 GmbH & Co KG + Markus Kalmbach + helpdesk&p3-group.com +45659 + LPO OUI Dev Teams + Tristan Le Toullec + tristan.letoullec&univ-brest.fr +45660 + Pentandra Research Solutions, Inc. + Chris Chapman + netadmin&pentandra.com +45661 + Instec Inc. + LH Ji + lh.ji&instec.com +45662 + Intelligence Ambiante + Frédéric Menut + frederic.menut&laposte.net +45663 + Hochschule Merseburg + Dirk Hube + netadmin&hs-merseburg.de +45664 + Guangzhou Hugen Electronic Technology Co .Ltd + wujinming + 304252844&qq.com +45665 + Biroteh Ltd + Mr. Uldis Jaudzems + info&biroteh.lv +45666 + Ebiid Product and Solutions, S.L. + Santiago Batlle + tech&biid.com +45667 + Single Digits + Bob Sullivan + bsullivan&singledigits.com +45668 + Midwest Microwave Solutions Inc. + Michael Horn + mchorn&mms-rf.com +45669 + CBR Systems Limited + Anthony de Broise + anthony&cbrsystems.co.uk +45670 + tcc construction + tim clapp + timclapp700&gmail.com +45671 + Nick Sverdel + Nick Sverdel + NickSverdel&gmail.com +45672 + HARVL + Herry Darmawan + herry&harvl.co.id +45673 + Smith & Nephew + Todd Smith + todd.smith&smith-nephew.com +45674 + Redbird Advanced Learning, LLC + Cory Finnimore + coryfinnimore&redbirdlearning.com +45675 + Evolve IP, LLC + Peter Eisengrein + peisengrein&evolveip.net +45676 + OCHIN + Tyler Applebaum + applebaumt&ochin.org +45677 + Parashift + Peter Lesty + peter¶shift.com.au +45678 + Department of Primary Industries and Mines + Padoongsak Nojit + padoongsak&dpim.go.th +45679 + AGR Abfallentsorgungs-Gesellschaft Ruhrgebiet mbH + Uwe Hildebrandt + uwe.hildebrandt&agr.de +45680 + ORISOFT + Dong-Won Kim + 9112027&daum.net +45681 + Polska Spolka Gazownictwa sp. z o.o. + Andrzej Nosek + andrzej.nosek&psgaz.pl +45682 + Michigan Education Special Services Association + Darryl Castillo + dcastillo&messa.org +45683 + Flexpay AB (Benify) + Marcus Wallgren + ops&benify.com +45684 + Dübon Engineering GmbH + Matthias Dübon + matthias.duebon&duebon-engineering.de +45685 + AllCreator Co., Ltd. + Toshihide Hashimoto + toshihide.hashimoto&allcreator.net +45686 + Ratel Technologies + Abhinav Sahrawat + abhinav&rateltechnologies.com +45687 + oneM2M + Gerry McAuley + gerry.mcauley&etsi.org +45688 + Limone Fresco Limited + Michael Laccetti + michael&limone.me +45689 + Evolveum + Radovan Semancik + semancik&evolveum.com +45690 + bitbone AG + Sebastian Scheuring + technik&bitbone.de +45691 + Javacraft LLC + Michael Swiercz + iana&java-craft.org +45692 + Dantherm Power A/S + Morten Brun Madsen + mma&dantherm.com +45693 + arvato Systems S4M GmbH + Harald Hoyer + harald.hoyer&s4m.com +45694 + Comvergence Pty Ltd + Paul Arnold + paul.arnold&comvergence.com.au +45695 + CHAPS spol. s r.o. + Petr Hlavac + hlavac&chaps.cz +45696 + Mitchel Berberich Software GmbH + Mitchel Berberich + info&mbsw.com +45697 + Computer Sciences Corporation + Garvita Singh + gsingh41&csc.com +45698 + E-KENT Teknoloji ve Odeme Sistemleri San. ve Tic A.S + Huseyin Erdem Oguz + erdem.oguz&e-kent.com.tr +45699 + JazzHR (formerly 'Hireku, Inc.') + Sean McAfee + sean.mcafee&jazzhr.com +45700 + Transylvania University + Stephen Poynter + gadmin&transy.edu +45701 + IntegraCore, LLC + J.R. Maycock + jrmaycock&integracore.com +45702 + China Transinfo Technology + Wang Gang + wanggang02&chinatransinfo.com +45703 + Gemeindewerke Ga.-Pa. + Albert Grassl + it&gw-gap.de +45704 + Landstinget i Kalmar Län + Jens Vegeby + jens.vegeby<kalmar.se +45705 + FINT AB + Aengeln Englund + aengeln&fintab.se +45706 + Bluer Medical Technology + Tommy Bluer + bluermedical&163.com +45707 + ITCEG S.A.S + Alejandro Valdes Arcila + alejandro.valdes&itceg.com.co +45708 + Lively Arts Productions LLC + Director of Information Systems + infosys&livelyarts.net +45709 + Seibels + Nick Agostas + nick.agostas&seibels.com +45710 + Mobie Oy + Joni Eskelinen + oid-admin&mobie.fi +45711 + Matrix Electronica S.L. + Jose Gallego + jgallego&matrix.es +45712 + SERFIM T.I.C. (formerly 'SERELEC SAS') + WAUTHIER Valérian + vwauthier&serfimtic.com +45713 + CONSULDATA Advisors di Massimo Del Barba + Massimo Del Barba + maxdb&consuldataweb.net +45714 + Logitravel SL + Raimund Sacherer + rs&logitravel.com +45715 + Empresa Municipal de Informática S.A. - IPLANRIO + Bruno Furtado + bruno.furtado&rio.rj.gov.br +45716 + ARGE Rundfunk-Betriebstechnik + Christian Pohle + christian.pohle&rbt-nbg.de +45717 + Sensu Enterprise + Sean Porter + support&heavywater.io +45718 + TJH Systems Pty Limited + Trevor Harwood + trevor&tjhsystems.com.au +45719 + inQool a.s. + Matus Zamborsky + zamborsky&inqool.cz +45720 + Chaos Computer Club Mainz e.V. + Tobias Hachmer + admin&lists.cccmz.de +45721 + Shenzhen SONTU Medical Imaging Equipment Co.,LTD + qiuhongfa + qiuhongfa&sontu.com.cn +45722 + Hamburg Port Authority AoeR + Sean Crane + pen&hpa.hamburg.de +45723 + Cirrent + Robert Conant + Rob&cirrent.co +45724 + FIDO Alliance, Inc. + Debbie Mac + debbie&fidoalliance.org +45725 + ABB Xiamen Low Voltage Equipment Co.,Ltd + Mike-Wulin Weng + mike-wulin.weng&cn.abb.com +45726 + ecava sdn bhd + Ti Tiau Kiem + tkti&ecava.com +45727 + HDWIFI TECHNOLOGIES + Byron del Castillo + byron&hdwifitech.com +45728 + JunoTele Solutions Pvt. Ltd + Sudhir Nayak + sudhir&junotele.com +45729 + Roedl IT Operation GmbH + Adam Bielak + adam.bielak&roedl.com +45730 + Geneberg + Holger Geneberg + pki&geneberg.net +45731 + WOOBE s.a.s. + bernard HAUZEUR (Woobe CTO) + bhauzeur&woobe.fr +45732 + TIM AG + Nico Rathke + Nicor&tim.de +45733 + Frank Wagner Holding Hanseatische Management GmbH + Arne Quante + arne.quante&fw-holding.de +45734 + STARNET s.r.o. + Milan Cizek + milan.cizek&mail.starnet.cz +45735 + easyset + Jin Young Kim + skyblue&easyset.co.kr +45736 + Utilisoft Ltd + Philip Littler + philip.littler&utilisoft.com +45737 + brokentech.ca + Shahrad Rezaei + shahrad&rezaei.io +45738 + Aspirational Cognition LLC + Greg Gaskill + aboron&aspirationalcognition.com +45739 + winfonet.eu + Nils Guenther + n.guenther&winfonet.eu +45740 + Cirrus Identity, Inc + Ian Tegebo + ian.tegebo&cirrusidentity.com +45741 + Entensys + Alexander Kistanov + akistanov&entensys.com +45742 + ImageWare Components GmbH + Guido Albers + albers&imageware.de +45743 + FIBERLAB + WS Jung + hifiberlab&daum.net +45744 + GGExo + Gergely Pollak + shuzirra&gmail.com +45745 + Care Zone Inc. + Walter Smith + walter&carezone.com +45746 + University of Rochester Medical Center + Mario Sbrocco + oidadmins&urmc.rochester.edu +45747 + Wuhan Rui Ying Tong Network Technology Co., Ltd(China) + pitao + ptao&whyktx.com +45748 + Thüga SmartService GmbH (formerly 'Thüga MeteringService GmbH') + Jörg Stelzle + it&smartservice.de +45749 + Evolving Models + Konstantin Schaefer + ks&kschaefer.eu +45750 + gerthoux + Luca Gerthoux + luca&gerthoux.net +45751 + Hibernating Rhinos + Oren Eini + ayende&ayende.com +45752 + connecT EDV-Vertriebs GmbH + Jens Dimter + info&connect-edv.de +45753 + winterdrache.de + Matthias S. Benkmann + msb&winterdrache.de +45754 + VoipOtago Ltd + Kieren Black + kieren&voipotago.co.nz +45755 + 2memory LTD + Alexey Tsareov + a.tsareov&2memory.com +45756 + MicroLAB Systems + Pavel Semyonov + pavel.td.amc&mlabsys.com +45757 + FUCOM + Shahzad Haider + shahzad.haider&fucom.com +45758 + IPCOMM GmbH + Edgar Zaiser + Edgar.Zaiser&ipcomm.de +45759 + Signicat AS + Harald Stendal + harald.stendal&signicat.com +45760 + Kove IO Inc. + Andy Poling + logistics&kove.com +45761 + Celab Communications AB + Daniel Tagesson + daniel.tagesson&celab.se +45762 + Platan spółka z ograniczoną odpowiedzialnością sp.k. + Karol Marczyński + Karol.Marczynski&platan.pl +45763 + EMG Systems Sp. z o.o. + Piotr Swebodzinski + emgadmin&emg-systems.com +45764 + CH Des pays de Morlaix + CREACH Gilles + gcreach&ch-morlaix.fr +45765 + nohl.eu + Johannes Nohl + webmaster&nohl.eu +45766 + Serval CA + Vaclav Rous + admin&serval.cz +45767 + Succeed Management Solutions, LLC + Moses Stickney + moses.stickney&succeedms.com +45768 + telecoms forensics equipment ltd + brian parsons + brianparsons&subpico.com +45769 + Farm House Technologies, LLC. + Brent Fiegle + brent&farmhousetech.com +45770 + Digital Loggers, Inc. + Martin Bodo + mbodo&digital-loggers.com +45771 + weatherlights.com + Hauke Hasselberg + hauke&weatherlights.com +45772 + TauRes Gesellschaft für Investmentberatung mbH + Christoffer Rumohr + c_rumohr&taures.de +45773 + Liebherr-IT Services GmbH + Martin Rogg + martin.rogg&liebherr.com +45774 + SHS SERVICES GmbH + Christian Jung + iana&shsservices.org +45775 + Tornado Network Operating System + Reyk Floeter + reyk.floeter&esdenera.com +45776 + Belden Deutschland GmbH + Markus Jung + markus.jung&belden.com +45777 + Fingerprint Technologies SAS + Jean-Christophe CUENOD + jcc&fingerprint.fr +45778 + exp Services Inc. + Sean Chard + sean.chard&exp.com +45779 + Mosaic Life Care at St. Joseph Medical Oncology + Shana Tauai + shana.tauai&mymlc.com +45780 + Norsk Sykepleierforbund + Pål Kristen Rønnevik + Pal.Kristen.Ronnevik&sykepleierforbundet.no +45781 + Masaryk Memorial Cancer Institute + David Nejeral + spravci&mou.cz +45782 + Metacores Sp. z o.o. + Sebastian Smyczyński + s.smyczynski&metacores.com +45783 + TEKIN + Raphaël AUTALE + beii&tekin.fr +45784 + Novarad, Corp + Carlo Okolowitz + dns&novarad.net +45785 + bangj, LLC + Tom Pusateri + pusateri&bangj.com +45786 + SAT-AN CableNet SE + Milan Krcmar + krcmar&sat-an.net +45787 + WISTE + Wilhelm Stejskal + w.stejskal&wiste.at +45788 + Agustus & Ahab, Inc. + Sam Aarons + sam&agustusandahab.com +45789 + Vicon + Kobi Shnaidman + kobi.shnaidman&vicon-security.com +45790 + Crystal GmbH + Michael Ratcliffe + michael&crystal.de +45791 + Neos Ventures GmbH + Michael Ratcliffe + mratcliffe&neosventures.net +45792 + Flohr IT + Nico Flohr + ---none--- +45793 + Joint Stock Company "PK Kupchino" + Antipov Evgeniy + antipov&pk-kupchino.spb.ru +45794 + Safe Creative + Jorge Guillo Pitarque + development&safecreative.org +45795 + Primo1D SA + Dominique Vicard + dominique.vicard&primo1d.com +45796 + Umea Municipality + David Krasowski + hostmaster&umea.se +45797 + Viettel Group + Le Quang Son + sonlq4&viettel.com.vn +45798 + Enovae BV + Sven Berkvens-Matthijsse + s.berkvens&enovae.nl +45799 + Csaw BV + Sven Berkvens-Matthijsse + sven&csaw.nl +45800 + Zorgdoc Nederland BV + Sven Berkvens-Matthijsse + s.berkvens&zorgdoc-nederland.nl +45801 + TARASOFT SRL + PAOLO TARANTOLA + info&tarasoft.eu +45802 + Ubee Interactive Corp. + Luke Liu + luke.liu&ubeeinteractive.com +45803 + netcruz inc. + Yong Soo Park + yspark&netcruz.co.kr +45804 + ERICA + Michel Marchandise + webmaster&erica.fr +45805 + OCMW Temse + Leander Quintelier + ict&ocmwtemse.be +45806 + genealogie.com + Xiao Hui LOO + loo.xiaohui&genealogie.com +45807 + Starbucks Coffee Company + Greg Onstot + gonstot&starbucks.com +45808 + Global Tecnologia LTDA. + Zundir Buzzi + zundir&globaltecseg.com.br +45809 + Beijing Mipay Technology Co., Ltd. + junqi lin + linjunqi&xiaomi.com +45810 + Binary Rock + Dan McCormack + site-iana&binrock.net +45811 + Istvan Puzsar e.v. + Istvan Puzsar + snmp&puzsar.hu +45812 + ILoca Services, Inc. + Gus Caparell + gus&semitrailers.net +45813 + PI '97 Bt. + Ferik Mihály + femisi&upcmail.hu +45814 + Waukesha-Pearce Industries, Inc. + C Cheung + admin&wpi.com +45815 + Sourcewise + Network Engineer + networkengineer&sourcewise.co +45816 + Bruce R. Smith Limited + Contact removed 2021-03-11 + ---none--- +45817 + Inacomp Consultores SA + Alexander Osorio + alexander.osorio&inacomp.com.ve +45818 + Passaic County Technical Institute + Roberto D. Rubino + rob&pcti.tec.nj.us +45819 + Sven Anders + Sven Anders + pen-contact17&sven.anders.hamburg +45820 + PROFIPRINT spol. s r.o. + David Makovský + makovsky&profiprint.cz +45821 + 21st century Oncology, LLC-Gabriel Domenech MD + JUAN M RODAS + Juan.Rodas&21co.com +45822 + Les Ambassadeurs Club + Carl van Eijk + carlvaneijk&lesaclub.com +45823 + Orient Technologies LTD + Colin Leister + c.leister&orientdb.com +45824 + System One Noc & Development Solutions + David Batanero + dbatanero&systemonenoc.com +45825 + One-Click Softworks + David Mason + mason&pr-team.org +45826 + Singlewire Software, LLC + Jerry Steinhauer + iana&singlewire.com +45827 + Beijing JN TASS Technology Co.,Ltd. + yanfeng wang + wangyanfeng&tass.com.cn +45828 + Kanhan Technologies Limited + Chu Chi Hang + chihang.chu&kanhan.com +45829 + preis24.de GmbH + Nicolas Neumann + nicolas.neumann&preis24.de +45830 + Dynniq UK Ltd (formerly 'Imtech Traffic & Infra UK Ltd') + Steve Gray + steve.gray&dynniq.co.uk +45831 + Argo Technologie SA + Denis Nuja + denis&argotech.io +45832 + I-ROSE d.o.o. + Janez A. Korun + office&i-rose.si +45833 + Hoffmann Foerdertechnik GmbH + Dmitry Sintschilin + dmitry.sintschilin&liftket.de +45834 + Tieto Finland Oy + Hannu Tirkkonen + hannu.tirkkonen&tieto.com +45835 + Tangara Tagentis Inc. + Steeve Stooke + steeve.stooke&tangaratagentis.com +45836 + Varonis System inc. + Hanni Barry + hbarry&varonis.com +45837 + Orage.io + Nicolas BAUDEZ + okhin&okhin.fr +45838 + BELIMO Automation AG + Andreas Mühlebach + andreas.muehlebach&belimo.ch +45839 + NETEVEN + thomas SIMON + tsimon&neteven.com +45840 + Lester Electrical of Nebraska, Inc. + Spencer Stock + Marketing&LesterElectrical.com +45841 + System One Noc & Development Solutions S.A + Juan Villegas + jvillegas&systemonenoc.com +45842 + Andrew Stemen + Andrew Stemen + andrew&stemen.net +45843 + WebNet Broadband + Nicholas J Brockway + noc&login2us.net +45844 + Uwe Gradenegger + Uwe Gradenegger + uwe&gradenegger.eu +45845 + EchoPixel, Inc. + Sumeet Khandelwal + khandelwal&echopixeltech.com +45846 + StorPool Storage AD + Anton Todorov + oid&storpool.com +45847 + ComputerTel LTD + Johannes van den Berg + development&computertel.co.uk +45848 + VISHMAIL + Joerg-Peter Hempel + mail&jphempel.eu +45849 + MMCVEN Mail + Teudis Naranjo + teudis2008&gmail.com +45850 + Eternity's Touch, Inc. + Eric VanDamme + eric.vandamme&eternitystouch.com +45851 + Unfallkasse Baden-Wuerttemberg + Christoph Haas + christoph.haas&ukbw.de +45852 + Miami Hematology and Oncology Associates + Wendy Gibbs + wendygibbs&hemonc101.com +45853 + Jaguar Land Rover Limited + JLR IT Networks + ITNetworks&jaguarlandrover.com +45854 + Cell Technology Ltd. + Clarence KI + kki&cell-technology.net +45855 + Intracom Asia Co., Ltd. + Archie Liu + archieliu&icintracom.com +45856 + Mie Prefectural Government + Masaya Okajima + network&pref.mie.jp +45857 + Kailow Graphic A/S + Boris Dreyer + sysadmin&kailow.dk +45858 + Cifernet Inc. + Neco Fang + neco&cifernet.net +45859 + Pylo.Net + Pylo.Net Hostmaster + hostmaster&pylo.net +45860 + Exel Technology Group + Wayne Barr + wayne.barr&etgl.co.uk +45861 + Research Associates of Syracuse, Inc. + Michael Swiercz + iana&ras.com +45862 + Apost Ltd. + A. Khodorov + a.khodorov&corpbox.ru +45863 + La Folie Boutique + Angie Evers + angieevers&me.com +45864 + Direction Generale de l'Education et des Enseignements + Bureau De l'Informatique + informatique&education.pf +45865 + Shouei electric,co + Shinei Ueki + ueki-shinei&shoueidenki.co.jp +45866 + GlobalPME + Diego Gonzalez + diego.gonzalez&globalpme.com +45867 + LUKOIL Bulgaria Ltd. + Borislav Cheshmedzhiev + bvc&lukoil.bg +45868 + Skywire Technologies + Leroy Simonsen + technical&skywire.co.za +45869 + POET GmbH + Alexander Ludwig + Alexander.Ludwig&poet.de +45870 + Fraunhofer Institute for Secure Information Technology SIT + Levona Eckstein + Levona.Eckstein&sit.fraunhofer.de +45871 + FATECH Electronic + Mohammad Reza Akhoundi + m.reza&fatechelectronic.com +45872 + Institut National de l'Audiovisuel + Ludovic Bey + lbey&ina.fr +45873 + Instituto Federal Catarinense + Marcos Antônio Malfatti + cirt&ifc.edu.br +45874 + Coencorp + Serge Kopikov + serge&coencorp.com +45875 + Edward H. Kaplan,M.D. and Assoc. + Sandra Hatzopoulos + sandy&nsoncology.com +45876 + Jetson Systems, LLC + Elizabeth A. Dexter + edexter&jetsonsys.com +45877 + Intelligent Homes + Sam Roberts + iana.pen1&gmail.com +45878 + vesperal.eu + Stephane Martin + stephane.martin&vesperal.eu +45879 + Université de Lille + Sylvain DEROSIAUX + sylvain.derosiaux&univ-lille3.fr +45880 + West-Ward Pharmaceuticals Corp + Steven Bolan + sbolan&west-ward.com +45881 + 2Lconsult + Laurent Lesage + laurent&2lconsult.be +45882 + Global Voice Group, S.A. + James J. Claude + james&globalvoicegroup.com +45883 + EagleBurgmann Germany GmbH & Co. KG + Mazen Moussli + mazen.moussli&de.eagleburgmann.com +45884 + ScaleFT Inc (formerly 'AuthClub Inc') + Paul Querna + paul.querna&scaleft.com +45885 + Cesbo Ltd. + Andrei Dyldin + and&cesbo.com +45886 + Connect-Omsk, Ltd. + Samoseev Pavel + conver-pavel&yandex.ru +45887 + Arke Telekom Ltd. Sti. + Saygun Onay + saygun&arketel.com +45888 + Jawset Visual Computing + Jascha Wetzel + jascha&jawset.com +45889 + TechMIC electronics + Michael Vermeulen + michael&techmicelectronics.co.za +45890 + Sys-City Co., Ltd. + Tun Win Naing + twnaing&sys-city.com +45891 + Pandora A/S + Jan Lynegaard Kragh, Group IT Operations + admin&pandora.net +45892 + Combination AB + Mikael Johansson + it&combination.se +45893 + PSTech + Marko Potkonjak + valentina.savic&pstech.rs +45894 + SysAdmin-CR Sociedad Anonima + Danny Castillo Segura + danny&sysadmin-cr.com +45895 + SysAdmin-CR Sociedad Anonima + Danny Castillo Segura + danny&sysadmin-cr.com +45896 + Paperless Pipeline, LLC. + Rajesh Dhawan + rajesh&paperlesspipeline.com +45897 + M H Corbin Inc + Brandon Whitaker + brandon.whitaker&mhcorbin.com +45898 + KEYLOGIC AB + Martin Liesén + martin&keylogic.se +45899 + CASwell Inc + Pomah Yen + pomah.yen&cas-well.com +45900 + WEBER Schraubautomaten GmbH + Peter Much + IT&weber-online.com +45901 + Kellerman Networks + Ryan Kellerman + rkellerman&westmonroepartners.com +45902 + Kellerman Networks + Ryan Kellerman + rkellerman&westmonroepartners.com +45903 + dachsbau + Stefan Kauerauf + mail&stefankauerauf.de +45904 + National Foods Ltd. + Muhammad Abdullah Saqib + muhammad.saqib&nfoods.com +45905 + Obihai Technology, Inc. + Sherman Scholten + sherman&obihai.com +45906 + Selinko S.A. + Jonathan Giannuzzi + dev&selinko.com +45907 + PolyVirtual Corporation + Samuel Li + sam.li&polyvirtual.com +45908 + ITDO Inc. + Kazuhiko Oota + pen-regist&itdo.jp +45909 + SHENZHEN ANKKI TECHNOLOGIES CO., LTD + WANGCAI + wangc&ankki.com +45910 + haiyi + jingtao + jingtao&haiyisoft.com +45911 + SynerMed Labs + Hossein Aarabi + haerabi&synermed.com +45912 + Universitas Brawijaya + Abdurrohman Muhammad + abdurrm&ub.ac.id +45913 + M2 Group Pty Ltd + Leon Kyneur + leon.kyneur&m2.com.au +45914 + Cetera Financial Group + Matt Lehman + matthew.lehman&cetera.com +45915 + PromonLogicalis + Eduardo Henrique Tessarioli + eduardo.tessarioli&br.promonlogicalis.com +45916 + Quest Payment Systems + Rupert Enwright + ruperte&questps.com.au +45917 + NFRI + Woongryol Lee + wrlee&nfri.re.kr +45918 + Network Canary + Chris Watt + admin&networkcanary.com +45919 + Planevision Systems GmbH + Gunther Kruse + snmp.pen&planevision.de +45920 + Humble Carrier + Johnson Tellewoyan + jmt82g2&yahoo.com +45921 + Ministry of Agriculture Latvia + Armands Smits + armands.smits&zm.gov.lv +45922 + DunaNet-DeviceManagement LLC + Laszlo KUPOR + penreg&dunanet.hu +45923 + TVN S.A. + Witold Oleksiak + witold.oleksiak&tvn.pl +45924 + NOU IPAP + Alexander E. Zabiralov + admin&ipap.ru +45925 + MPS GmbH + Johannes Klug + johannes.klug&chemocompile.de +45926 + OpenZAB.com + Grzegorz Grabowski + grzegorz&mbs-it.pl +45927 + Ahsay Systems Corporation Limited + Scherring Chong + mispro&ahsay.com +45928 + MORU Industrial Systems Co.,LTD. + Jisu-Jang + jjs1127&moru.com +45929 + Plum Sp. z o.o. + Andrzej Rusiecki + grupa_it&plum.pl +45930 + Landwirtschaftskammer Niedersachsen + Axel Blees + webmaster&lwk-niedersachsen.de +45931 + Patterson Companies + Kirk Haddad + Kirk.haddad&pattersoncompanies.com +45932 + Walcoe + Victor Walcoe + iana&walcoe.com +45933 + Kuratorium für Dialyse und Nierentransplantation e.V. + Thomas Schickedanz + thomas.schickedanz&k2it-services.de +45934 + Fiber Mountain Inc + Phuc Ly + phuc&fibermountain.com +45935 + Main Line Oncology Hematology Associates + Jaclyn Callahan + jaclyncal&gmail.com +45936 + Das Kommunalunternehmen des Landkreises Wuerzburg + Andreas Korbmann + edv&kommunalunternehmen.de +45937 + M & C Enterprises, Ltd. + Doc Miller + support&mc-enterprises.com +45938 + Intelligent Visibility, Inc. + Michael Cotrone + mikec&intelligentvisibility.com +45939 + TRIMET S.A. + ELISEO JUSTO + eliseo.justo&trimet.com.ar +45940 + Beckman Library + Keith Beckman + kbeckman&becknet.com +45941 + PM CONSULTING + Papa Malick MBOW + pm.mbow&gmail.com +45942 + ezVoice Telecom + Danilo Martins + danilo&ezvoice.com.br +45943 + Londelec UK Ltd + Alan Kay + tech&londelec.com +45944 + IT Telecom co.,Ltd + Hyunsoon Lee + aromacrony&it-telecom.co.kr +45945 + Friedrich-von-Bodelschwingh-Schule + Timo Widdau + Timo.Widdau&rsplus-puderbach.de +45946 + dynexo GmbH + Florian Reinholz + info&dynexo.de +45947 + Kartal Belediye Baskanligi + Murat Aksoy + sistemyonetimi&kartal.bel.tr +45948 + FitnessFirst Germany + Rene Henzinger + rene.henzinger&fitnessfirst.de +45949 + RF elements s.r.o. + Miroslav Vrabel + vrabel&rfelements.com +45950 + Cogo Labs + Ken Koldewyn + kkoldewyn&cogolabs.com +45951 + RDK Management LLC + Peter Sheedy + oid_admin&rdkcentral.com +45952 + CTRLTECH S.A. + GUSTAVO RAMIREZ + gustavo.ramirez&ctrltech.com.br +45953 + CyberData Corporation + Cameron Barfield + cbarfield&cyberdata.net +45954 + Truven Health Analytics Inc. + Match Grun + match.grun&truvenhealth.com +45955 + Big Daddy Games, LLC + Mark Trettin + MTrettin&bigdaddygames.net +45956 + 128 Technology + Michael Baj + mike&128technology.com +45957 + Cryptographic Information Services + Kevin Acres + cryptis&cryptis.com.au +45958 + aisubp + Mihail Kutsenko + m.kutsenko&bftcom.com +45959 + Zhongyunwangan Technology (Beijing) Co.,Ltd + Lei Wu + lei.wu&zhongyunwangan.com +45960 + IMS, TU Wien + Johannes Fromm + techn&ims.tuwien.ac.at +45961 + ATGEN SOFTWARE SOLUTIONS LLP + SAGAR MEHTA + SAGAR.MEHTA&ATGENSOFT.COM +45962 + SHOWA TSUSHIN KOGYO INC. + Showa Tsushin + info&stkinc.co.jp +45963 + HHnet + Henner Kruse + henner.m.kruse&fh-stralsund.de +45964 + Dispersive Technologies + Brian Burns + bburns&dispersivegroup.com +45965 + Aleris International Inc + Christopher Eberle + Christopher.Eberle&aleris.com +45966 + GURI SOFTHOUSE LTDA + Ulisses Thadeu Vieira Guedes + uli1958&hotmail.com +45967 + SWAN Analytische Instrumente AG + Michael Rudolf + michael.rudolf&swan.ch +45968 + perinova IT-Management GmbH + Stefan Wagner + info&perinova.com +45969 + perinova IT-Management GmbH + Stefan Wagner + info&perinova.com +45970 + Böttcher & Collin GbR + Fabian Böttcher + fabianb&bcbangs.com +45971 + CJSC "Transinfoset" + Konkin Andrey + konkin&tsi.ru +45972 + Baptist School of Health Professions + Jason Everling + jeverling&bshp.edu +45973 + Playground Global + Dan Morrill + morrildl&playground.global +45974 + SPINNER GmbH + Michael Lege + m.lege&spinner-group.com +45975 + Securities Finance Trust Company + David Griswold + dgriswold&eseclending.com +45976 + Intema Comunicaciones + Agustín Ivorra + aivorra&intema.biz +45977 + Platform.sh + Damien Tournoud + damien&platform.sh +45978 + Vision 95 Ltd. + Zoltan Kakuk + zoltan.kakuk&vision95.hu +45979 + Intevac, Inc. + Brenda Thrasher + blthrasher&intevac.com +45980 + S.Network Communications Ltd. + Denis Ignatov + denis&snc.ru +45981 + DroidInActu + Andy Aspell-Clark + andyaspellclark&gmail.com +45982 + Neliva + Maksim Goleta + maksim&neliva.com +45983 + Airbus DS Limited - MIPN + Andy Aspell-Clark + Andy.Aspell-Clark&airbus.com +45984 + OOO Stels + Vladimir Gorpenko + vgo&stels.ru +45985 + CyberStream Ltd + Nikos Lykouropoulos + nikos.lykouropoulos&cyberstream.gr +45986 + Kramer Electronics Ltd. + Nahum Aharon + naharon&kramerel.com +45987 + Rete telematica del CNR di Pisa + Alessandro Mancini + alessandro.mancini&iit.cnr.it +45988 + SKYCOM + Hyeok-Hee Lee + hhlee&skycom.ne.kr +45989 + SKYCOM + Hyeok-Hee Lee + hhlee&skycom.ne.kr +45990 + Brueckner-Werke KG + Jan Beers + edv&bwhh.de +45991 + KLENK HOLZ AG + Winfried Hofmann + edv&klenk.de +45992 + Grabeuh Corp. + Javelini + javelini-xwyn9p&yopmail.com +45993 + ITSMOS + Nestor Charczuk + nestor.charczuk&itsmos.com +45994 + Prink s.r.l + Paolo De Vito + paolo&prink.it +45995 + Posteo e.K. + Patrik Löhr + iana&posteo.de +45996 + Chora A/S + Johnny Vestergaard + chora&chora.dk +45997 + Colorado Center for Gynecologic Oncology + Kelly Sullivan + kelly&ccgynonc.com +45998 + JENSEN-GROUP + Fabian Lutz + it-ch&jensen-group.com +45999 + The Dysongang + Jon Dyson + jon&thedysongang.co.uk +46000 + Third Light Ltd. + Dominic Benson + dominic.benson&thirdlight.com +46001 + Vektra Advanced Engineering + Evan Phoenix + evan&vektra.com +46002 + 6Harmonics Inc. + Alexander Miller + alexander.miller&6harmonics.ca +46003 + NAUTILUS INFOTECH Co., Ltd. + Cheng Chao + cheng.chao&nautilus.com.tw +46004 + 50Hertz Transmission GmbH + Thomas Naumann + thomas.naumann&50hertz.com +46005 + SAG Services AG (formerly 'SAG Informatik AG') + Anthony Uk + sysadmin&sag-ag.ch +46006 + Greetz BV + Jan van der Veen + jan.van.der.veen&greetz.com +46007 + Augustinum gGmbH + Thorsten Mueller + t.mueller&augustinum.de +46008 + Codan A/S + Alan Burton-Woods + abw&codan.dk +46009 + Advanced Hematology & Oncology Group of PR + Maria Rincon + advonco&gmail.com +46010 + Westfalen Weser Netz GmbH + Rudolf Hessel + rudolf.hessel&ww-energie.com +46011 + cloudVM Inc. + Matthew Gamble + mgamble&cloudvm.io +46012 + Redhot OOO + Andrey Petrovsky + admin&redhot.su +46013 + Yhat, Inc. + Ryan J. O'Neil + ryan&yhathq.com +46014 + KCA Deutag + Mark McRitchie + mark.mcritchie&kcadeutag.com +46015 + PrimoTechSolution S.R.L. + Denis Shapovlaov + shapovalovdenis&gmail.com +46016 + Leonton Technologies, Co. Ltd. + Kun-Lung Liao + clone.liao&leonton.com +46017 + Research Institute of Systems Planning, Inc. / ISP + Tomoya Saito + stt&isp.co.jp +46018 + Thoralf Tietboehl (ttcn) + Thoralf Tietboehl + thoralf.tietboehl&outlook.com +46019 + DSPG Ltd. + Martin Holmes + hostmaster&dspg.co.uk +46020 + Cardio Sistemas Coml. Indl. Ltda. + Ricardo Miglino + webmaster&cardios.com.br +46021 + Thales Belgium SA + Eric Viseur + eric.viseur&be.thalesgroup.com +46022 + Deep S.R.L + Fernando Ghioldi + deep&deep-ing.com +46023 + High Desert Oncology + Veronica Santos + vronika21&yahoo.com +46024 + W&W-AFCO STEEL LLC + Todd Park + tpark&wwsteel.com +46025 + Kahana Technology, LLC + Jason Woodrich + domain&kahanatech.com +46026 + Southern Oncology Hematology Associates + Trish Morgan + pam&southernoncology.net +46027 + OFICINA NACIONAL DE PROCESOS ELECTORALES + Fernando Antonio Zapata Miranda + fzapata&onpe.gob.pe +46028 + Play Consulting + Alastair McFarlane + alastair&play-consult.co.uk +46029 + Serenergy A/S + Christian Rostgaard Andersen + cra&serenergy.com +46030 + iRespond + Identity Administrator + info&irespond.org +46031 + Taconza LLC + Scott Deerwester + scott.deerwester&taconza.com +46032 + AccelStor Technologies Ltd + Identifier Management + identifier&accelstor.com +46033 + Cavium Inc. + Biju Abraham + babraham&caviumnetworks.com +46034 + Ramakrishna Mission Vivekananda University + Swami Sarvottamananda + vivekananda.university&gmail.com +46035 + Company Crypton Ltd. + Dmitry Gorokh + d.gorokh&crypton.com.ua +46036 + IT CARD Centrum Technologii Platniczych SA + Radoslaw Glowka + network&itcard.pl +46037 + City of Pearland + Justin Arnold + jarnold&pearlandtx.gov +46038 + Layer8 + Fernando Cardoso + fernando.cardoso&layer8.pt +46039 + Dynamist AB + Henrik Holmboe + registry&dynamist.se +46040 + Freestyle Technology Pty. Ltd. + Jamie Chard + jamie.chard&freestyletechnology.com.au +46041 + W.B. Hunt Co., Inc. + Kit LaMark + support&wbhunt.com +46042 + SpacePath Communications + Matt Milston + matt.milston&space-path.com +46043 + element j + Jason Woodrich + jason&elementj.net +46044 + Cloudbyte + Senthilkumar Eswaran + senthilkumar.e&cloudbyte.com +46045 + BYTEDANCE LTD. + Wang Jian + wangjian&bytedance.com +46046 + Branislav Manic Law Office + Branislav Manic + branislav.manic&iplaw.co.rs +46047 + paulista informatica ltd + antonio carlos oliveira + acoliveira&terra.com.br +46048 + Attivo Networks + Srikant Vissamsetti + srikant&attivonetworks.com +46049 + SMSEagle + Radoslaw Janowski + hello&smseagle.eu +46050 + Remod Oy + Juho Juopperi + juho.juopperi&remod.fi +46051 + STAGE2015 + STAGE Test + zabbixmaquette&gmail.com +46052 + Neotys + Benoit Derouet + benoit.derouet&neotys.com +46053 + AVEBE U.A. + Erik Dijck + erik.dijck&avebe.com +46054 + NBS Ltd. + Evgeniy Palenov + palenov&scsbm.ru +46055 + Hytera Broadband Products + Zhigang Wang + zhigang.wang&hytera.com +46056 + OOO"NPF MULTIOBRABOTKA" + AROV MARK + amv&multio.ru +46057 + Hong Kong Community Athletic Development + Brad PFEFFER + brad.pfeffer&hkcad.org +46058 + Panda Sports + Brad PFEFFER + brad.pfeffer&pandasports.asia +46059 + Vidamin Ltd + Kari Arvonen + kari.arvonen&vidamin.com +46060 + central florida cancer and blood center + Ravi Koti + ravikoti&aol.com +46061 + Ognios GmbH + Hermann Haslinger + office&ognios.com +46062 + YAVEON AG + IT Management / Peter Reinhardt + itadmin&yaveon.de +46063 + Broala + Seth Hall + seth&broala.com +46064 + AddApptr GmbH + Stefan Scheld + support&addapptr.com +46065 + Cannabis Credit + shaun savage + savages&savages.com +46066 + The DiameterTech Project + Tomi Varhe + tsv&diametertech.com +46067 + Union College - Computer Science Department + Tom Yanuklis + yanuklit&union.edu +46068 + Take-Two Interactive Software, Inc. + Victor Carpetto + ateam&take2games.com +46069 + Deutsches Rotes Kreuz Ortsverein Suedlicher Wonnegau e. V. + Christoph Kling + info&drk-suewo.de +46070 + NDP, LLC (NVEA) + Justin Mercier + justin.mercier&ndpgroup.com +46071 + Chista Ltd. + Ali Nikneshan + Ali&nikneshan.com +46072 + B2B-SOFT + Maksym Chubarov + tycooncmn&gmail.com +46073 + Imbasoft + Philippe Granjal + pgranjal&imbasoft.fr +46074 + ROAMData, Inc. + William C Kincaid + BKincaid&ROAMData.com +46075 + Freshmind Sp. z o. o. + Paweł Krzaczkowski + pawel&freshmind.pl +46076 + Edward A Eichler, Jr., M.D., P.A. + Edward A Eichler + eahackett60&gmail.com +46077 + Inventive IT Services Ltd + Adam Miloszewski + adam&invntvitsrvs.com +46078 + Beijing ForceControl-Huacon Technology Co., Ltd. + Li Da + lid&huacon.com.cn +46079 + HM Electronics, Inc. + Ryan Foley + rfoley&hme.com +46080 + Yara SA/NV + Yves Duquenne + Yves.duquenne&yara.com +46081 + INFODAS Gesellschaft für Systementwicklung und Informationsverarbeitung mbH + Peter Gorski + p.gorski&infodas.de +46082 + AVI + Chris Lockwood + info&avi.com.au +46083 + F&F Computer Anwendungen und Unternehmensberatung GmbH + Stefan Krauthausen + s.krauthausen&ff-muenchen.de +46084 + FACTORYNET AUGUSTA, S.L. + JOSE MARIA MARCO LAZARO + info&factorymail.es +46085 + INFORMATICA Y PROCESOS DE GESTION, S.L. + JOSE MARIA MARCO LAZARO + info&ipgsoft.com +46086 + CONDIS SA + Michel Demierre + mdemierre&condis.ch +46087 + IT Scharschinger + Simon Scharschinger + simon&scharschinger.de +46088 + Sylvain Girod + Sylvain Girod + girod.sylvain&gmail.com +46089 + edisonlee55 + Ming-Chien Lee + edisonlee&edisonlee55.com +46090 + namotronic GmbH + Christian von Stebut + christian&von.stebut.org +46091 + ICM University of Warsaw + Dominik Bartkiewicz + bart&icm.edu.pl +46092 + Ingenieurgesellschaft für Gebäudeautomation mbH + Kristian Virkus + kristian.virkus&inga-hameln.de +46093 + Sistelbanda S.A. + Alvaro del Campo + adelcampo&sistelbanda.es +46094 + Brampton College + Network Manager + networkmanager&bramptoncollege.com +46095 + otrisPROJECTS GmbH + Michael Marsanu + marsanu&otrisprojects.de +46096 + bitbert.com + Robert Gierzinger + robert.gierzinger&bitbert.com +46097 + IPiFony Systems, Inc. + Matthew Hardeman + mhardeman&ipifony.com +46098 + Ericsson Canada Inc. + Steve Baillargeon + steve.baillargeon&ericsson.com +46099 + Community Health Systems-CHS14 + Bob Kearns + bob.kearns&chs14.net +46100 + Maxwell Paper Canada Inc + Charles Herrington + cherrington&maxwellpaper.ca +46101 + Belar Electronics Laboratory Inc + Mark Grant + mark&belar.com +46102 + Art Van Furniture, Inc. + Tony Sayers + oid.admin&artvan.com +46103 + VoIP Integration Inc + Brad Vonarx + brad.vonarx&voipintegration.com +46104 + TVM Capital + Mark Williams + williams&tvmcapital.com +46105 + Alexion Pharmaceuticals, Inc. + Jacob Thampi + oidadmin&alxn.com +46106 + 1-800 Contacts Inc. + Suni Alles + mbx-systemops&1800contacts.com +46107 + Medivation, Inc. + Ody Lupescu + ody.lupescu&medivation.com +46108 + Medivation, Inc. + Ody Lupescu + ody.lupescu&medivation.com +46109 + Credomatic + Journey Tinoco + jtinocoal&credomatic.com +46110 + smartTrade Technologies + Remy Falco + rfalco&smart-trade.net +46111 + Document Knowledge Business Solutions (DKB Solutions SA) + K. Bertin DJAHA + bertin.djaha&dkbsolutions.com +46112 + Russian Institute for Scientific and Technical Information of the Russian Academy of Sciences (VINITI RAS) + Dmitry Sirik + sirik&viniti.ru +46113 + The Moscow Chamber of Advocates + Sergey Zakhryapin + it&advokatymoscow.ru +46114 + «SUMYOBLENERGO» PJSC + Sergii Taraban + stg&soe.com.ua +46115 + Progi-média inc. + James Watkins-Harvey + technical&progi-media.com +46116 + CRYPTONEO + ZEHI Felix + felix.zehi&cryptoneo.com +46117 + SMP Solutions GmbH + Mathieu Habegger + mh&smpsolutions.ch +46118 + SwissMediaPartners AG + Mathieu Habegger + mh&swissmediapartners.ch +46119 + Eden Rock Communications, LLC + Juan Olivo + juan.olivo&edenrockcomm.com +46120 + Alliance-Electronics, Ltd. + Andrey Kireev + info&alliance-electronics.ru +46121 + The Stahura-Brenner Group Inc. + Dale Brenner + dbrenner&stahurabrenner.com +46122 + WrightCCS + Steven Wright + steven&wrightccs.com +46123 + Wiener Boerse AG + Simone Braun + iana&wienerborse.at +46124 + MICI Network Co,.Ltd + George Wu + george&mici.com.tw +46125 + Canadian Pacific + Darcy McCully + darcy_mccully&cpr.ca +46126 + Blockstream + Jonathan Wilkins + root&blockstream.com +46127 + Rock Solid Solutions LLC + Daniel Kauffman + 7fpsf2wxeanbgwykrsrf3bnw29ajedhbbachneh2gdkptep&rocksolidsolutions.org +46128 + City of Biel + Patrick Maeschli + technik.iul&biel-bienne.ch +46129 + MechanicaPark LLC + Ilya Burov + ilya.burov&gmail.com +46130 + Informa Sistemi S.P.A. + Andrea Zaghi + a.zaghi&informasistemi.com +46131 + Hackerspace Kraków + Paweł Kozubal + pawel&hackerspace-krk.pl +46132 + Amdatu + Jan Willem Janssen + iana&lxtreme.nl +46133 + ostermeyer.de + Andreas Ostermeyer + andreas&ostermeyer.de +46134 + Uprava za indirektno oporezivanje + Dijana Jankovic + dijana.jankovic&uino.gov.ba +46135 + MSS Managed Security Services GmbH + Falk Gruenwald + falk.gruenwald&mssgmbh.com +46136 + Steinbeis Embedded Systems Technologies GmbH + Christian Hayer + christian.hayer&steinbeis-est.de +46137 + Association Clandestine + Muri Nicanor + muri&immerda.ch +46138 + HTU Härtetechnik Uhldingen-Mühlhofen GmbH + Helmut Dautermann + h.dautermann&htu-haertetechnik.de +46139 + Baseventure Investing + Craig Setera + craig&baseventure.com +46140 + Laboratoire d'Analyse et de Mathématiques Appliquées + Laurent Marciniszyn + laurent.marciniszyn&math.cnrs.fr +46141 + Ysance + REMI JANOT + remi.janot&ysance.com +46142 + ShenZhen Sinexcel Electric Co.Ltd. + lin ting + ting_lin&sinexcel.cn +46143 + Lazada South East Asia (formerly 'Lazada Tech Hub') + Lim Qi Kang + qikang.lim&lazada.com +46144 + Schmidt & Co., (H.K.) Ltd. Taiwan Branch + James hsu + jameshsu&schmidtelectronics.com +46145 + A. Baggenstos & Co. AG + Enrique Róo Moares + eroo&baggenstos.ch +46146 + INFORM GmbH + Michael Carell + michael.carell&inform-software.com +46147 + American Prepaid VAS LLC + Osmar Coronel + osmar.coronel&americanprepaidvas.com +46148 + Beijing Yoxon Smart Technology Co.,Ltd. + HuangWei + harrid&qq.com +46149 + AO Citibank + Yuriy Stryuchkov + yuriy.stryuchkov&citi.com +46150 + spornkuller.de + Johannes Bauer + wpaptwcvqpfu&spornkuller.de +46151 + Brett A. Lewis + Brett A. Lewis + brett.lewis&gmail.com +46152 + MCHV Software + BOUZOUAD Abderrahmane + abouzouad&mchvsoftware.com +46153 + Budafony + Aurélien Vabre + gm&budafony.hu +46154 + xMob + Pete Foster + oid.admin&xmob.me +46155 + TAKAOKA TOKO CO.,LTD. + MASAKI HOSOYA + hosoya.masaki&tktk.co.jp +46156 + SB Systems Co.,Ltd + Park Min Woo + mwpark&sbsystems.co.kr +46157 + NARI Technology Co., Ltd. + Luo Linglu + luolinglu&sgepri.sgcc.com.cn +46158 + Ohanae + Greg Hauw + greg&ohanae.com +46159 + TselinTransStroy Ltd. + Alexey Chernyavskiy + cts.astana&gmail.com +46160 + GDV Dienstleistungs-GmbH + Ingo Braun + uhd&gdv-dl.de +46161 + Mobile Viewpoint + Gertjan Bult + support&mobileviewpoint.com +46162 + Tuna Technologies Limited + Alex Amsel + alex&tunatech.com +46163 + Neuberger Gebaeudeautomation GmbH + Steffen Hornung + steffen.hornung&neuberger.net +46164 + AS-PC Sarl + Arnaud Brand + abrand&aspc.fr +46165 + Politeknik Negeri Ujung Pandang + Andi Zulfadli Malik + andi.zulfadli&gmail.com +46166 + JETR Consulting + Jeff Truong + jeff_truong&yahoo.com +46167 + Toptranslation GmbH + Stefan Rohde + stefan.rohde&toptranslation.com +46168 + NewGen Interactive Software Corporation + Ryan Hairyes + rhairyes&ng-interactive.com +46169 + International Capital Investment Company + Mo Ismail + mo.ismail&icicusa.com +46170 + Runetwister Publishing + Matt Thomason + matt&warpedcore.net +46171 + HashPlex, Inc + Finn Herzfeld + admin&hashplex.com +46172 + Redcello Ltd. + Jonathan Kerkin + jonathan&redcello.co.uk +46173 + PayPal Inc + Greg King + grking&paypal.com +46174 + SecuLynx Corporation + Masami Hisada + hisada.masami&seculynx.co.jp +46175 + NEWIND Sp. z o.o. + Slawomir Glowacz + iana-pen&newind.pl +46176 + SensorLink + Henrique Santos + sensorlink&sensorlink.com.br +46177 + KYOSAN ELECTRIC MFG. CO., LTD. + KATSUTOSHI FUKUDA + ka-fukuda&kyosan.co.jp +46178 + Faculty of Informatics, TU Wien + Alexander Dorfmeister + trouble&zkk.tuwien.ac.at +46179 + Pikkatech Software Development & Consulting + Stanislav Koncebovski + stanislav&pikkatech.eu +46180 + Gables Engineering, Inc. + Robert Zamora + zamora&gableseng.com +46181 + sanscomp + Sanoop Mallissery + sanoopmallissery&gmail.com +46182 + Golden Grid Systems + Xiaochun Liu + sliu&goldengridsys.com +46183 + Dansk Medicinsk DataCenter ApS + Thomas Larsen + tl&dmdc.dk +46184 + Lumentum.com + Channy Tremblay + channy.tremblay&lumentum.com +46185 + Intelity + Andrew Bailey + andrew.bailey&intelitycorp.com +46186 + cyontec solutions GmbH + Fred Tate + hostmaster&cyontec.de +46187 + AGRAVIS Raiffeisen AG + Nils Wernau + nils.wernau&agravis.de +46188 + MarketFactory Inc + George Cox + support&marketfactory.com +46189 + John Cianfarani + John Cianfarani + john&the-net.ca +46190 + Phoenyxcode + Kenneth Benson + phoenyx33&outlook.com +46191 + 21st Century Oncology - Crestview + Aurelle Delerme + aurelle.delerme&21co.com +46192 + ToastCoders + Michael Rice + michael&michaelrice.org +46193 + Distributed Hacker Maker Network Inc. + Mark Finn + treasurer&dhmn.net +46194 + arara inc. (formerly 'repica Inc.') + Shigeki Torii + mib&arara.com +46195 + sdc world group co. + Zhe Chen + zchen1007&gmail.com +46196 + Narodowy Fundusz Ochrony Srodowiska i Gospodarki Wodnej + Zbigniew Pawelec + informatyka&nfosigw.gov.pl +46197 + Telensa Limited + Trevor Byrne + tb&telensa.com +46198 + Equinox Holdings LLC + Stephen Roux + stephen.roux&equinox.com +46199 + DENTSPLY International + Andrew Campbell + andrew.campbell&dentsply.com +46200 + Matthew Martin + Matthew Martin + phy1729&gmail.com +46201 + Business Telecommunications Services Europe + Juan Villegas + jvillegas&bts.io +46202 + Avigilon Corporation + Chris Monkiewicz + chris.monkiewicz&avigilon.com +46203 + Strictly Monitoring Limited + Edward St Pierre + edward.stpierre&strictlymonitoring.com +46204 + Hutchison Drei Austria GmbH + Martin Diewald + martin.diewald&drei.com +46205 + MIT Dynamic Technologies Limited + Paul McGuinness + info&mit-tech.co.uk +46206 + Stamm + Markus Stamm + admin&stamm.nu +46207 + Kasisto, Inc. + Stephen Martin + system&kasisto.com +46208 + Knjaz Milos a.d. + Dejan Camilovic + Dejan.Camilovic&knjaz.co.rs +46209 + deepin + Hong Hao + oahong&gmail.com +46210 + Shanghai Koal Software + zhang xiaoyu + zhangxy&koal.com +46211 + Fernfachhochschule Schweiz (FFHS) + Informatikdienste FFHS (Stefan Meichtry) + id&ffhs.ch +46212 + e2E Satcom Limited + Alan Hughes + alanhughes&e2eservices.co.uk +46213 + Atmos Sistemas Ltda. + Rafael Villatore + rafael.mello&atmossistemas.com.br +46214 + COGIT MATIDA UK LTD + Taofik I Ajagbe + tajagbe&cogitmatida.com +46215 + Shandong Senter Electronic Co., Ltd. + Hanzhen + 277054510&qq.com +46216 + Rocken am Brocken UG + Eric Hertwig + eric&rocken-am-brocken.de +46217 + VINCI Energies Schweiz AG + Stefan Buergi + iana.veshol&vinci-energies.com +46218 + SilverTours GmbH + Christian Michel + technik&billiger-mietwagen.de +46219 + Migros Bank AG + Marcel Graf + loitse&migrosbank.ch +46220 + Ministério Público do Estado de Santa Catarina + Rodrigo Grumiche Silva + gered&mpsc.mp.br +46221 + iconectiv + Gabor Kiss + CTO&iconectiv.com +46222 + On IP 46 Limited + Dr. Paul C. Caesar PhD Comp. Sci. + onip46&outlook.com +46223 + Cyber Medical Imaging, Inc. + Joel Karafin + jkarafin&XDRradiology.com +46224 + Inner Range Pty. Ltd. + Alfred Katz + alf.katz&innerrange.com +46225 + tehasdf.com + Edward Ceaser + ed&tehasdf.com +46226 + Apigee Corporation + Vidya Pissaye + vpissaye&apigee.com +46227 + Guangxi radio and television information network corporation limited + Weihua Zhang + z2002wh&96335.COM +46228 + Nikko Graphic Arts Co.,Ltd. + Makoto Sugimoto + sugimoto&nkbj.co.jp +46229 + igus GmbH + Christian Thönes + cthoenes&igus.de +46230 + Cleverlance Enterprise Solutions a.s. + Michal Cervinka + michal.cervinka&cleverlance.com +46231 + Eikeo + Stéphane Pinchaux + spinchaux&eikeo.com +46232 + Ergatel + Cedric CASTRO + ccastro&ergatel.com +46233 + Mundo Digital Informática Redes y Comunicación S.L. + Emilio González + egonzalez&mundodigital.es +46234 + SpaceTeamLab, Ltd. + Nikolay Pankov + pankov_n&st-hld.ru +46235 + RCNTEC + Sergey Minakov + ser&rcntec.com +46236 + Multisoft S.A. + Artur Kot + artur.kot&multisoftsa.com +46237 + Ayuntamiento de Alcobendas + Javier Peña + jpena&aytoalcobendas.org +46238 + Immobilien Hilgers e.K + Philipp Hilgers + philipp.hilgers&t-online.de +46239 + InviNets + Michał Marschall + m.marschall&invinets.com +46240 + Polish Border Guard (Border Guard Headquarters) + Piotr PIETRZAK + Piotr.Pietrzak&strazgraniczna.pl +46241 + Bionic Fuel Knowledge Partners Inc. + Ulrich Riemann + ubr&bionicmail.de +46242 + Netonix + Eric Stern + eric&netonix.com +46243 + Programize + Andreas Prodromidis + staff&programize.com +46244 + South African Air Force + Daniel du Plessis + daniedup&cybersmart.co.za +46245 + Reo3 + Greg Geering + greg.geering&reo3.com +46246 + 88.io + David Chung + admin-iana&88.io +46247 + Supertel-DALS + Sergei Lebedev + lebedev_sf&supertel-dals.ru +46248 + Pole/Zero Corporation + Kevin Ehlert + kehlert&polezero.com +46249 + SITES + Tony FRUCHART + tony.fruchart&sites.fr +46250 + Amsted Rail + Dustin Nikora + dnikora&amstedrail.com +46251 + POSBANK + Kyu Seok, Park + parksio2&posbank.co.kr +46252 + Z-ROUTER TECHNOLOGY + Tom Lin + TOM&Z-ROUTER.CN +46253 + Plusserver GmbH + Malte von dem Hagen + noc&heg.com +46254 + Vector Informatik GmbH + René Zauner + iana-pen&de.vector.com +46255 + Ovenden Papers Limited + James Ovenden + james.ovenden&ovendenpapers.co.uk +46256 + Hokkaido University + Norishige Nagai + sec-t&iic.hokudai.ac.jp +46257 + Wlbe Deutschland + R. Alexander Marschall + amarschall&wlbe.de +46258 + VENISO SOLUTIONS PRIVATE LIMITED + Navnit Chachan + navnit&veniso.com +46259 + Hauseigentümerverband Zürich + Tobias Horber + tobias.horber&hev-zuerich.ch +46260 + IQ-Media B.V. + Mark van Oudheusden + mark&iqmedia.nl +46261 + Universidad Nacional del Litoral + Maximiliano Boscovich + mboscovich&rectorado.unl.edu.ar +46262 + Zscaler Inc + Vivek Srivastava + vsrivastava&zscaler.com +46263 + LPL Financial + Dwayne White + dwayne.white&lpl.com +46264 + FinnHEMS Oy + Ossi Ahosalmi + ossi.ahosalmi&finnhems.fi +46265 + AlBikar.net + Daniel Drennan + drennan&panix.com +46266 + Wood County Telephone Company dba Solarus + Brian Krause + bkrause&solarus.net +46267 + Development Gateway + Stepan Semenukha + domains&developmentgateway.org +46268 + bleim.net GmbH + Wolfgang Bleim + Wolfgang.Bleim&bis.at +46269 + Dynasys Technology Co.,LTD + Lu Huang + hlu&dynasys.com.tw +46270 + NBOO-RU + Yury Ulyanov + common&nboo.ru +46271 + PaloVerde Cancer Specialists + Michael Bustard + mbustard&pvcancer.com +46272 + DataStax, Inc. + Mike Mercado + sysadmin&datastax.com +46273 + informalogique + Ian Labbé + ian.labbe&gmail.com +46274 + Heartware + James Waite + jwaite&heartware.com +46275 + EBPI + John Renne + john.renne&ebpi.nl +46276 + Linux Lab + Vijay Nayani + vijay&linuxlab.org +46277 + Ministerio dos Petroleos + Rafael Aranha + raranha&progroup.co.ao +46278 + TÜV NORD AG + Matthias Helmhagen + mhelmhagen&tuev-nord.de +46279 + Thomas Hellström + Thomas Hellström + rel&xed.se +46280 + Enterprise Computing + Francisco Moreira + moreira&enterprisecomputing.com +46281 + WRITESYS Traffic Systems + Lucas Zampar Bernardi + lucas&writesys.com.br +46282 + uGo3D LLC + Peter Yoo + peter&ugo3d.com +46283 + Inmotion Data, LLC. + Joseph Lee + support&inmotiondata.com +46284 + ENERGEX + Graeme Farquharson + graemefarquharson&energex.com.au +46285 + Trimble Navigation Ltd. + Haroon Muhammad + haroon_muhammad&trimble.com +46286 + RUDIMENTS + Rudi Yanto + rudiments21&gmail.com +46287 + yaxunhongda + peiji zhao + whyhonest&126.com +46288 + Tedas B.V. + Thomas van Dongeren + t.vandongeren&tedas.nl +46289 + gluIT Software GmbH + Herr Kirchberg + sk&gluit.de +46290 + VS XXI JSC + Nikolay Bukin + nb&vsxxi.ru +46291 + IES srl + Alessandro Arras + alessandro.arras&iessrl.it +46292 + Koiossian Inc. + Robert Cowart + snmp&koiossian.com +46293 + Monroe County Hospital Oncology Clinic + Katrina Meshell + kmeshell&mchcare.com +46294 + inView GmbH + Caspar Fromelt + cf.iana.pen2&inview.de +46295 + City of Huntsville + Chris Davis + chris.davis&huntsvilleal.gov +46296 + SHI + David Marques + david_marques&shi.com +46297 + Agrarmarkt Austria + Gerhard Dyduch + office&ama.gv.at +46298 + ISPM Serviços em Informática LTDA + Alexandre Santos Campos + alexandre.campos&ispm.com.br +46299 + Birmingham Hematology and Oncology Associates, LLC d/b/a Alabama Oncology + Chris Barnes + chris.barnes&alabamaoncology.com +46300 + MicroKnight Business Systems Ltd. + Kevin Jeffery + kevinµknight.com +46301 + CARRA, Inc. + Kelly Mieszkalski + info&carragroup.org +46302 + Sistemi di Telecomunicazione Srl + Giannini Alessandro + alessandro.giannini&sistemiditlc.it +46303 + ITSUDE + Ralf Schmitz + ralf.schmitz&it-su-de.net +46304 + GSMA + Wayne Cutler + wcutler&gsma.com +46305 + OSDE + Seguridad de la Información + seginf&osde.com.ar +46306 + Institute for Traffic Safety Management and Research + Joel Lord + jlord&itsmr.org +46307 + Briskhome + Egor Zaitsev + heuels&gmail.com +46308 + GDC Communications + Gerald Douglas + data&gdc.com.fj +46309 + Delft Solutions + Max Maton + max&maton.info +46310 + BCons Argentina S.A. + Leonardo Federico Rosso + lrosso&baufest.com +46311 + Baufest + Leonardo Federico Rosso + lrosso&baufest.com +46312 + Jack Kingsman + Jack Kingsman + jack.kingsman&gmail.com +46313 + Level 8 Aps + Jan Frank Nielsen + jfn&level8.dk +46314 + SongMovers + Antwan Carr + webmaster&songmovers.com +46315 + Silicon Valley Bank + Ron Parker + RParker&svb.com +46316 + AERTiCKET AG + Maximilian Mühe + mmuehe&aer.de +46317 + Abraxas Informatik AG + Olaf Sonderegger + olaf.sonderegger&abraxas.ch +46318 + Mils Electronic Gesmbh & CoKG + Martin Lubich + ml&mils.com +46319 + Traffic Tech Inc. + Yann Bourgault + ybourgault&traffictech.com +46320 + BNC National Bank + Shaun Skeldum + pen&bncbank.com +46321 + Sensignal Co.,Ltd. + Yuuki Uno + yuuki-uno&sensignal.co.jp +46322 + Gareth Williams + Gareth Williams + gareth&garethwilliams.me.uk +46323 + F. KLUCZNIK & SON LIMITED + Dan McNicol + daniel.mcnicol&iae.co.uk +46324 + HAYSYS Ltd + Nicholas Lloyd + nlloyd&haysys.co.uk +46325 + Firstwan Delivery Network Services + Jean-François ESPERET + jfesperet&firstwan.fr +46326 + IUT de VALENCE + Eric PEYREMORTE + eric.peyremorte&iut-valence.fr +46327 + Niagara Regional Police Service + Matthew Kohut + oid-admin&niagarapolice.ca +46328 + DDS Wireless International Inc + David Nagy + David.Nagy&ddswireless.com +46329 + Security Indemnity Insurance Company In Liquidation + Michael Rydzewski + mrydzewski&njliq.org +46330 + Vubiq Networks + Aaron Baranoff + aaron&vubiqnetworks.com +46331 + NightSteam Solutions + Marcel Heers + info&nightsteam.de +46332 + Lacuna Software LTDA - EPP + Bazili Swioklo + bazili&lacunasoftware.com +46333 + American Express Global Business Travel + Kenny Moss + kenny.moss&aexp.com +46334 + HRM Systems AG + Felix Zimmermann + it&hrm-systems.ch +46335 + Bruce A. Hayton, M.D. + Bruce A. Hayton, M.D. + brucehaytonmd&yahoo.com +46336 + Live Niaga + Edie Mohamad + administrator&liveniaga.com +46337 + Meisei Electric Co., Ltd. + Ryuya Yamagishi + yamagishir&meisei.co.jp +46338 + CTO Balzuweit GmbH + Dennis Balzuweit + dennis.balzuweit&cto.de +46339 + National Archives, Czech Republic + Milan Vojacek + milan.vojacek&nacr.cz +46340 + The Opole University of Technology + Marcin Sroczak + m.sroczak&po.edu.pl +46341 + Dansk Metalarbejderforbund + Christian Jensen + hostmaster&danskmetal.dk +46342 + Anoop Sukumaran + Anoop Sukumaran + anoops1984&gmail.com +46343 + mazou kitchen + zoliswa ndzuzo + zoliswandzuzo9&gmail.com +46344 + PricewaterhouseCoopers AG + Marc Eggenberger + marc.eggenberger&ch.pwc.com +46345 + Clear Government Solutions, Inc. + Alexis Bor + alexis.bor&cleargovsolutions.com +46346 + Vail Software Solutions + BJ Vail + bjvail&sbcglobal.net +46347 + innato + Lee Choon Siong + domain&innato.io +46348 + Providence Health & Services + Bill Stites + bill.stites&providence.org +46349 + Deepfield + Max Mizikar + maxmzkr&deepfield.net +46350 + NorCERT - Norwegian Computer Emergency Response Team + Tor Inge Skaar + tor.inge.skaar&nsm.stat.no +46351 + Laubheimer Networks + Markus Laubheimer + markus&laubheimer.de +46352 + Premier Business Centers + Aaron Mendoza + Amendoza&Pbcenters.com +46353 + Schwackenberg Consulting + Andreas Schwackenberg + admin&schwackenberg.com +46354 + Kooperativa pojistovna, a.s. + David Klima + dklima&koop.cz +46355 + Expert-Team Pte Ltd + Edgardo Gonzales + edgardo.gonzales&expert-team.net +46356 + Grupo de Tecnologia Cibernetica, S.A. de C.V. + Claudio M. Horvilleur + claudio.horvilleur&tecno.com.mx +46357 + NBS System + Jean EYMERIT + jean.eymerit&nbs-system.com +46358 + ZwickRoell GmbH & Co KG + Markus Laubheimer + markus.laubheimer&zwickroell.com +46359 + Robert P. Fein and David A. Richards + James Badaracco + jbadarac&gmail.com +46360 + West and Central African Research and Education Network (WACREN) + Omo Oaiya + omo&wacren.net +46361 + Richard Scranton + Richard Scranton + scrantr&yahoo.com +46362 + Digital Data Technologies Inc. + Justin Martinek + jmartinek&ddti.net +46363 + BGProtect + Alex Maltinsky + alex&bgprotect.com +46364 + Halmstad University + Magnus Moren + iana-pen&hh.se +46365 + arvato IT Support GmbH + Uwe Weiss + uwe.weiss&bertelsmann.de +46366 + Technicolor + Alex De Smedt + alex.desmedt&technicolor.com +46367 + UPSINVERTER.COM + RAKESH GARG + RAKESH&UTLUPS.COM +46368 + The Job Planet + Melissa Maxwell c/o Roy Barnett + maxwellmelissa75&yahoo.com +46369 + Miskolci Egyetem Informatikai Intezet + Gergo Szabo + szabo89&iit.uni-miskolc.hu +46370 + eshore + luoyongqi + 13316209005&189.cn +46371 + Japan Canada Oil Sands Limited + Joel Francisco + joel.francisco&jacos.com +46372 + Uber Operations, LLC + Franscis de Wet + system&uberops.com +46373 + Bypass Network Services + Patrick Jordan-Smith + patrick.jordan-smith&bypass.net.nz +46374 + Zizaike Inc. + genyiwang + genyiwang&zizaike.com +46375 + eCommistry Ltd + Matt Johnston + matt.johnston&ecommistry.com +46376 + Nullroute + Mantas Mikulėnas + mantas&nullroute.eu.org +46377 + Star Media (Beijing) CO.,LTD + Michael Fu + fubotao&star-media.cn +46378 + Centre Hospitalier Mémorial Saint-Lô + Nicolas GOEURY + nicolas.goeury&ch-stlo.fr +46379 + LunaticIsland.net + Justin M. Bushey + antix84&gmail.com +46380 + Enghouse Interactive AB + Mikael Norberg + mikael.norberg&enghouse.com +46381 + Arcapola Shipwrights (formerly 'Imterra') + Peter Polacik + polacik.p&gmail.com +46382 + Berolina-autolacke + Michael Degner + security&netelco.de +46383 + Ministry Division, Church of England + Dr Ken Farrimond + ken.farrimond&churchofengland.org +46384 + DUCA Financial Services Credit Union Ltd. + Matthew Burr + sysadmin&duca.com +46385 + Walnut Hill Physicians’ Hospital, LLC + Larry Sutherland + larry.sutherland&walnuthillmc.com +46386 + Shenzhen GL-COM Technology Co.,Ltd. + hongshun cui + cuihsh&gl-com.com +46387 + Drift + Sedelkova Daria + dasha.sede&gmail.com +46388 + Taygeta Scientific Inc. + Everett Carter + office&taygeta.com +46389 + VESTA + LUKE YOUNG + luke.young&motorolasolutions.com +46390 + Clarke Technical Services, Inc. + Carlos Clarke + carlos&ptdprolog.net +46391 + jddev + Jakub Dohnal + jak.doh&gmail.com +46392 + Donostian Belgie + Albert Narain + donostianbe&yahoo.com +46393 + Programlabbet AB + Johan Persson + johan&programlabbet.se +46394 + BLITSC + Bernhard Lang + Bernhard.Lang&blitsc.de +46395 + FirstRand Group + Steven Corneelsen + ISSCertificateM&fnb.co.za +46396 + Marotta Controls, Inc. + Carlos Clarke + cclarke&marotta.com +46397 + iSecway Inc. + Veda Wang + isecway&163.com +46398 + Regional Cancer Care Associates Hope Community Cancer Center Division + Pauline Flaville + pflaville®ionalcancercare.org +46399 + iSIGHT Partners + Matt Shelton + mshelton&isightpartners.com +46400 + NorQuest College + Kevin Nikiforuk + kevin.nikiforuk&norquest.ca +46401 + Marshall B. Ketchum University + Jeremy Bascom + dnsadmin&ketchum.edu +46402 + NSF Telecom Ab + Joonas Harjumäki + joonas.harjumaki&nsftele.com +46403 + QA2 + Daniel Dent + iana-qa2-oid&contactdaniel.net +46404 + DOT Systems Sp. z o.o. + Klaudiusz Kobylecki + info&dotsystems.pl +46405 + superChic + Michael Hacker + mh&superchic.at +46406 + Foundation Pärnu Hospital + Vegard Kruusla + vegard.kruusla&ph.ee +46407 + TECNOLOGIAS EOS SA DE CV + CARLOS ENRIQUEZ + carlos.enriquez&tec-eos.com +46408 + PKP Polskie Linie Kolejowe S.A. + Daniel Kozłowski + daniel.kozlowski&plk-sa.pl +46409 + BSP Consulting spol. s r.o. + Martin Rublík + martin.rublik&bspc.sk +46410 + SEGGER Microcontroller GmbH & Co. KG + Oliver Olligs + iana_pen&segger.com +46411 + Art2Wave Canada Inc. + Sergey Plotnikov + sergey.plotnikov&art2wave.com +46412 + Andrew Sheret Ltd. + David Manning + david.manning&sheret.com +46413 + Table25, Inc. + Mike Adair + r.mike.adair&gmail.com +46414 + William Oliver + William Oliver + Woliver1&gmail.com +46415 + Open Banking Foundation + Vedran Čačić + veky&opensourcebank.net +46416 + The National Archives of Latvia + Haralds Krūmiņš + Haralds.Krumins&arhivi.gov.lv +46417 + Emerald Coast Cancer Center + Cynthia Vinson + cancercenter1&yahoo.com +46418 + Agile Ingeniería y Consultoría Ltda. + CLAUDIO DELGADO + claudio.delgado&agile.cl +46419 + Araknis Networks + Ayham Ereksousi + ayham.ereksousi&araknisnetworks.com +46420 + Knauf Information Services GmbH + Yvonne Hillenbrand + hillenbrand.yvonne&knauf.de +46421 + B/E Aerospace + Tim Keller + tim_keller&beaerospace.com +46422 + UTEK TECHNOLOGY(SHENZHEN)CO,LTD + YEDAWEN + yedw&szutek.com +46423 + Osirium Ltd + Kev Pearce + kev.pearce&osirium.com +46424 + Otterbein Homes + Kevin Hook + khook&otterbein.org +46425 + DTSV, Inc. + Jason Dickert + jason.dickert&cellavant.com +46426 + Salinas Valley Medical Clinic Cancer Care + Claudia Powell + cpowell&chp-dod.com +46427 + LISI AEROSPACE + GAGLIARDI Gérard + gerard.gagliardi&lisi-aerospace.com +46428 + Mobicare + Jorge Pereira + jorge.pereira&mobicare.com.br +46429 + Universidad Nacional de San Martin + Pablo Esteban Bullian + pbullian&unsam.edu.ar +46430 + RTEC + Ivanov Kirill + ivanov_k&pkcc.ru +46431 + Alaf Organization + Alexandre Lafargue + a26&lafargue.eu +46432 + Modula Shop Systems + Alexander Schmid + alex&modula-shop-systems.de +46433 + Fern Ridge School District 28J + M Bateman + 28jadmin&fernridge.k12.or.us +46434 + consulter net GmbH + M.Balter + balter&consulternet.net +46435 + LEW AG + Bernhard Lang + Bernhard.Lang&lew.de +46436 + Infoprocess + Viacheslav Alekseev + info&infoprocess.ru +46437 + TTI NORTE SL + Borja Santander + bsantander&ttinorte.es +46438 + Bartec-Vodec + William Dudeney + William.Dudeney&Bartec-Vodec.com +46439 + Bolsas y Mercados Españoles Innova, S.A.U. (BME Innova). + Ivan Zapata + izapata&bmeinnova.com +46440 + Cloud Electronics Ltd. + Jon Spyve + technical&cloud.co.uk +46441 + AKROS AG + Akros Informatik + iana-pen&akros.ch +46442 + PIHI Media + Isaac Hildebrandt + isaac&pihimedia.com +46443 + intec GmbH + Joerg Huwig + joerg.huwig&argus.info +46444 + Zhejiang Huanshun Network Science & Technology Co., Ltd. + Jianjie Chen + cjj&huanshun.com +46445 + Unitel Engineering + Vladimir Trushchenko + v.truschenko&uni-eng.ru +46446 + OV Corporation SAS + Benjamin ROZIERE + benjamin.roziere&rdalambic.com +46447 + Embedict + Yiğit YÜCE + ygtyce&gmail.com +46448 + Telewizja Polsat Spolka z o. o. + Marcin Barcikowski + mbarcikowski&polsat.com.pl +46449 + Collibra + Xavier Tesch + xavier.tesch&collibra.com +46450 + Opsmate, Inc. + Hostmaster + hostmaster&opsmate.com +46451 + InfoRelay Online Systems, Inc + Justin Bushey + jbushey&inforelay.com +46452 + Lanworld Finland Oy + Matti-Oskari Leppanen + mongrelx&gmail.com +46453 + McKesson IWS + Vanja Hrbacek + vanja.hrbacek&mckesson.com +46454 + Goldberg and Mathew Medical Assc P.L.L.C. + Renee Goldberg + renee.goldberg.phd&gmail.com +46455 + Coral Active + Kelvin Lynch + info&coralactive.com +46456 + AttoCore Ltd + David Neil + David.Neil&attocore.com +46457 + Gerrie Electric Wholesale Limited + Trevor Davis + tdavis&gerrie.com +46458 + ENow, inc. + Jay Gundotra + jayg&enowinc.com +46459 + Mairie de Canteleu + M. Kévin ROSSELOT + K-Rosselot&ville-canteleu.fr +46460 + Veterinary Specialists of North Texas + Michael Gibson + iana.org&vsnt.com +46461 + Outhwaite Technologies + Dan Outhwaite + admin&outhtech.com +46462 + Sicoob Confederação + BRUNO RITHELE CORREA BATISTA + bruno.rithele&sicoob.com.br +46463 + Radio Gigabit Inc. + Alexey Sevastyanov + alexey.sevastyanov&radiogigabit.com +46464 + Knowledgeloop, Inc. + Vitaliy Sapounov + vitaliy&knowledgeloop.com +46465 + EKE-Electronics Ltd. + Kimmo Lindholm + kimmo.lindholm&eke.fi +46466 + Rossbach Systems + Julian Rossbach + julian.rossbach&rossbach-systems.eu +46467 + CellSens + Guy op de Beeck + guyopdebeeck&cellsens.com +46468 + Coho Data, Inc. + Ed Woz + it&cohodata.com +46469 + 149W Managed Services LLC + Dennis Hollenbeck + hollenbeckd&gci.net +46470 + Man Technology Co, Ltd. + Harold Lee + hllee&mantech.co.kr +46471 + Illusive Networks Ltd. + Olga Vingurt + olga&illusivenetworks.com +46472 + infraView GmbH + Andreas Wilhelmi + andreas.wilhelmi&infraview.net +46473 + Otterbein Homes + Kevin Hook + khook&otterbein.org +46474 + Kiwox Soluciones Tecnológicas + Francisco Pérez + francisco&kiwox.cl +46475 + OSTEC Business Security + Denis Volpato Martins + denis&ostec.com.br +46476 + Martijn van Hoof + Martijn van Hoof + martijn&vhoof.com +46477 + HealthNow New York Inc + EIS + EIS&healthnow.org +46478 + DIAL GmbH + Administration + sysadmins&dial.de +46479 + Unidesk Corporation + Chad Heipler + cheipler&unidesk.com +46480 + Modular Mining Systems, Inc. + SNMP Management + iana-snmp&mmsi.com +46481 + Desjardins Groupe Technologies + Jean Voltaire Emile + jean.voltaire.emile&desjardins.com +46482 + Netki, Inc. + Justin Newton + oidadmin&netki.com +46483 + ERLPhase Power Technologies Ltd + Anatoliy Ksyunz + Aksyunz&erlphase.com +46484 + HERE North America, LLC + Jeff Phillips + pen-administrator&here.com +46485 + Guy's and St Thomas' NHS Foundation Trust + Jamie Cousinne + jamie.cousinne&gstt.nhs.uk +46486 + Dedrone GmbH + Frederic Beister + frederic.beister&dedrone.com +46487 + Stripe, Inc. + Carl Jackson + iana&stripe.com +46488 + WALSER-NET + Michael Walser + admin&walser-net.at +46489 + TGR - Tecnologia para Gestão de Redes, S.A. + Guilherme da Silva Senges + guilherme.senges&tgr.net.br +46490 + Captel + Pierre-Yves GIRAULT + pygirault&captel.fr +46491 + Valerio Morozzo + Valerio Morozzo + valerio.morozzo&yahoo.com +46492 + Conevity Solutions Inc + Nick Judson + nick.judson&conevity.com +46493 + UAB "GKF" + Vytautas Simanaitis + it&gkf.lt +46494 + Assimil8 Limited + Chris Jones + c.jones&assimil8.com +46495 + European Dynamics SA + Nassos Michas + nmix&eurodyn.com +46496 + DEPARTAMENTO ADMINISTRATIVO DE CIENCIA, TECNOLOGIA E INNOVACION - COLCIENCIAS + LUIS EDUARDO SIERRA + servicedesk2&colciencias.gov.co +46497 + ISS BRASIL LTDA + DANIEL PECHMANN + daniel&issbrasil.com +46498 + Jiangsu Sinolte Wireless Technology Co., Ltd. + rulinwan + rulinwan&sinolte.net +46499 + ICD CO.,LTD + Hosoo Lee + hosoolee50&eicd.co.kr +46500 + Trukai Industries Limited + Richard Omi + romi&trukai.com.pg +46501 + Didactum® Security GmbH (formerly Didactum® Ltd. Deutschland ) + Andrea Oeltjendiers + roe&didactum.com +46502 + TASS + Denis Ovchinnikov + ois&tass.ru +46503 + Quench Worldwide Limited + Jim Marsden + jim.marsden&quenchww.com +46504 + Stolt-Nielsen Limited + Vidar Kroslid + v.kroslid&stolt.com +46505 + PLAS-TEK Ltd. + Alexander Pautov + alexander.pautov&plas-tek.ru +46506 + Systems Integrated + Larry Pomatto + kishore&systemsintegrated.com +46507 + Simpress Comercio Locacao e Servicos + Rangel Pacheco Sperandio + admredes&simpress.com.br +46508 + Glen Raven, Inc. + Taylor Edwards + apps&glenraven.com +46509 + Oncology and Hematology Specialists + David Wilmes + dwilmes&ohsnj.com +46510 + Wake County Public School System + Rob Breault + rbreault&wcpss.net +46511 + New Telecom Solutions LLC + Vladimir Gorin + gorin&mwnts.ru +46512 + Times Internet Limited + Mohammad Shoaib Zahir + shoaib.zahir×internet.in +46513 + ZorgNed Automatisering BV + Albert Post + software&zorgned.nl +46514 + Mhashim Ltd + Babiker Mohammed + mhashim.gm&gmail.com +46515 + Wolf-Medizintechnik GmbH + Torsten Peukert + torsten.peukert&womed.net +46516 + Ante Grup Elektrik Elektronik Bilişim İnşaat Danışmanlık San. ve Tic. Ltd. Şti. + Alper SEZEN + alper.sezen&antegrup.com.tr +46517 + FitX Deutschland GmbH + Markus Warg + markus.warg&fitx.de +46518 + SPD Electrical Engineers (Pty) Ltd + Dr Andries Hercules Putter + aputter&vukanet.com +46519 + Albertslund Kommune + It-afdelingen + it.teknik&albertslund.dk +46520 + Hitachi Industry & Control Solutions, Ltd. + Toshio Ooga + enterprise-mib&ml.hitachi-ics.co.jp +46521 + PMC - PERIPHERIQUES ET MATERIELS DE CONTROLE + Franck LEDAY + f.leday&groupecarrus.com +46522 + Dino Occhialini + Dino Occhialini + dino.occhialini&gmail.com +46523 + Bridgetec Corp. + SuJeung Shin + sujeungshin&gmail.com +46524 + Linkbroad Technology Beijing Corporation Limited + WU JIANMING + digger.wu&linkbroad.com +46525 + Hedvig Inc. + Reece Joyner + rjoyner&hedviginc.com +46526 + Moldeo AB + Joakim Nilén + joakim.nilen&moldeo.se +46527 + Trans Link Systems B.V. + Gerrit Kraaij + registrar&translink.nl +46528 + Shanghai Chuxun Information Technology Co.,Ltd. + Yiqing Wang + yiqing&storswift.com +46529 + ldap4mail + Jan Krüger + jk&jk.gs +46530 + R-SYS s.r.o. + Attila Csontos + attila.csontos&r-sys.sk +46531 + PGE Polska Grupa Energetyczna S.A. + Jerzy Compa + jerzy.compa&gkpge.pl +46532 + Hinchingbrooke Hospital NHS Health Care Trust + Sean Diviney + sean.diviney&nhs.net +46533 + Open Source System + Brian Lockwood + Lockwoodbj&gmail.com +46534 + Arx One + Steven Rogers + servers&arx.one +46535 + CosmoKey GmbH & Co. KG + Christian Tacke + iana&cosmokey.com +46536 + Textel Marimon s.a. + Carlos Marimón + carlos&textel-marimon.com +46537 + Yodel Delivery Network Limited + Mark Needham + mark.needham&yodel.co.uk +46538 + Pioneers Memorial Healthcare + Alicia Ortega + aortega&pmhd.org +46539 + Virtual Network Element, Inc. + Kalidas Porika + kporika&vne.io +46540 + VSSUT + Janmejaya Barik + barikjanmejaya&gmail.com +46541 + Seneca Data Distributors, Inc. + Rob Dukette + robd&senecadata.com +46542 + Mountain Blue Cancer Care Center + Amy Holleran + amy.holleran&mountainbluecare.org +46543 + CNIAP - Guinea Ecuatorial + Djassi Fonseca + djassi.fonseca&nosi.cv +46544 + Axion Technologies Ltd. + Carl Cassista + ticontact&axiontech.ca +46545 + Rechtsanwaltskanzlei Schaalo + Wolfram Schaalo + kanzlei&schaalo.de +46546 + Impavid LLC + Benjamin Wilson + ben&impavid.io +46547 + BILD GmbH & Co. KG + Harald Brückner + harald.brueckner&bild.de +46548 + Secvice Information Technologies Private Limited + Raju Alluri + raju&secvice.net +46549 + Messer Information Services GmbH + Matthias Thaler + matthias.thaler&messer-is.com +46550 + Dufoort + Dufoort Michaël + iana&dufoort.be +46551 + Amprion GmbH + Andreas Westhoff + andreas.westhoff&rion.net +46552 + Eltham College + Piotr Zdarzil + pz&eltham-college.org.uk +46553 + Amsterdamtelecom Ltd. + Victor Litvin + v.litvin&amsterdamtelecom.ru +46554 + Anthon Romanov + Anthon Romanov + alxadm&yandex.ru +46555 + Kostya.Pro + Konstantin Ol'khovyy + olkhovyy&kostya.pro +46556 + L.D. Lutum + Leonardo D'Onofrio + leonardo&ldlutum.com +46557 + Scale Genius Inc. + Michael Vallaly + iana&scalegenius.com +46558 + FutoIn + Andrey Galkin + andrey&futoin.eu +46559 + ESC - Electronic Service Center GbR + Martin A. Wielebinski + marwie&esc.de +46560 + Cox Communications Inc - Engineering + Shannon Orem + shannon.orem&cox.com +46561 + BHS Sonthofen GmbH + Helmut Clausnitzer + helmut.clausnitzer&bhs-sonthofen.de +46562 + Revenue Management Solutions, LLC + Philip Morrow + hostmaster&revenuemanage.com +46563 + Virgin Islands Oncology & Hematology,PC + Eielt Semper Smith + semper&vioandh.com +46564 + Entera AS + Gunay Mazmanoglu + snmp-mib&entera.net +46565 + Wiollo Sp. z o.o. + Rafal Michalak + r.michalak&wiollo.pl +46566 + Holbæk Kommune + Bo Von Jensen + boj&holb.dk +46567 + DIROX + Truong Ta + truong.ta&dirox.net +46568 + Moqom Limited + David Morrissey + david.morrissey&moqom.com +46569 + HybridDigital + Rittichai Limarunothai + rittichai&hybriddigitals.com +46570 + Mithril Informatique + Jean-Noel Rouchon + mail&mithril.re +46571 + Fruit of the Loom + Cliff Coleman + internettechnician&fotlinc.com +46572 + Comact Equipment Inc. + Stefan Graf + sgraf&comact.com +46573 + Stefan van Someren + Stefan van Someren + iana&vansomeren.nl +46574 + Encore Systems, LLC + Joe Enzminger + joe.enzminger&encoresystems.com +46575 + WorkCover Queensland + Server Support + serversupport&workcoverqld.com.au +46576 + NFWare Inc. + Igor Ryzhov + iryzhov&nfware.com +46577 + Cloudstead, Inc. + Jonathan Cobb + jonathan&cloudstead.io +46578 + Gereltei Systems + Zoljargal Gantumur + contact&zoloogg.com +46579 + Pivotal Cloud Foundry + Toolsmiths + cf-toolsmiths&pivotal.io +46580 + Braincoolant Oy + Jan Tuomi + jan.tuomi&braincoolant.fi +46581 + MalariaGEN + Ian Wright + sysadmin&malariagen.net +46582 + Robert-Bosch-Krankenhaus GmbH + Walid Sbaih + its&rbk.de +46583 + Linoma Software + Bob Luebbe + bluebbe&linoma.com +46584 + French Lick Resort + Andrew K. Condra + acondra&frenchlick.com +46585 + StrathTech + Adam McGhie + contact&strathtech.co.uk +46586 + ProSiebenSat.1 Media SE + Biebl Christian + christian.biebl&p7s1produktion.de +46587 + Institute of Low Temperature and Structure Research, Polish Academy of Sciences + Krzysztof Cach + k.cach&int.pan.wroc.pl +46588 + Aptitude Technologies + Bernard Lefebvre + bernard.lefebvre&aptitudetech.net +46589 + Stifel Financial Corp + Chris Savala + chris.savala&stifel.com +46590 + suretap wallet lp + Abbas Zangeneh + abbas.zangeneh&suretap.com +46591 + Auditoria y Consultoria de Privacidad y Seguridad, S.L. + Candido Rodriguez Montes + candido.rodriguez&prise.es +46592 + Gebr. Hoffmann Werkzeugmaschinen GmbH + Steffen Rademacher + st.rademacher&arcor.de +46593 + JuntoTelecom + Gean Martins + gean.martins&juntotelecom.com.br +46594 + Matthew Wynn + Matthew Wynn + matthew&matthewwynn.com +46595 + smartbits + Stephan Burger + stephan.burger&live.at +46596 + Hazelnut Software Ltd + Technical Support + iana&hazelnutsoftware.co.uk +46597 + OW2 Consortium + Stéphane Laurière + stephane.lauriere&ow2.org +46598 + Industrielle Alliance Valeurs mobilières inc. + Florian Niculae + fniculae&iagto.ca +46599 + UAB Erisata + Karolis Petrauskas + karolis.petrauskas&erisata.lt +46600 + Mersoft Corporation + Jeff Smith + itsupport&mersoft.com +46601 + Inform-Mobil + Alexander Sergin + sergin&immo.ru +46602 + kt Corporation + Sanghun Kim + dustin.kim&kt.com +46603 + Xeron Universal Technologies + Rasmus Riis + tech&xeron.dk +46604 + NetIndia Private Limited + Chandrasekhar Vallabhaneni + chandra&netindia.co.in +46605 + WALLYS COMMUNICATIONS SDN BHD + JOVI YEONG + jovi&wallyscommunications.com +46606 + Cisco Systems India Private Limited + Madhur Raj N + madhurn&cisco.com +46607 + VetRay N + Peter Pfeifer + pp&vetray.de +46608 + SoftLab–NSK Co., Ltd. + Igor Arsenin + administration&softlab-nsk.com +46609 + The Wireless Innovation Forum + Lee Pucker + Lee.Pucker&wirelessinnovation.org +46610 + Sensorpoint, LLC + Mark Bain + markb&sensorpoint.net +46611 + St Vincents Health Australia (NSW) + Glenn Elliott + glenn.elliott&svha.org.au +46612 + DV Impex, Ltd + Alexandra Glushkova + glushkova&doublev.ru +46613 + JSC "SPO Angstrem" + Sedykh Fedor + sedykh&npo-angstrem.ru +46614 + BBM spol. s r. o. + Jan Pomahac + jan.pomahac&bbm.cz +46615 + WirelessCar + Ola Dawidson + ola.dawidson&wirelesscar.com +46616 + Anghel F. Dan-Alexandru PFA + Dan-Alexandru Anghel + dan.anghel&outlook.com +46617 + Helse Nord IKT + John Ole Grønmo + john.ole.gronmo&hnikt.no +46618 + AXEL + Franck BOURSIN + franck.boursin&axel.fr +46619 + Centre Hospitalier Emile Mayrisch + Roland Kuffer + downloads&chem.lu +46620 + Ferguson PLC + Jediah Logiodice + it.iana-pen.admin&ferguson.com +46621 + Thales Services France Toulouse + Antoine Tran + antoine-b.tran&thalesgroup.com +46622 + FLAG Solutions S.L. + Jose A. Rodriguez + info&flagsolutions.net +46623 + Trunomi, Inc. + Pawel Veselov + pawel&trunomi.com +46624 + Flexoptix GmbH + Volker Meiss + development&flexoptix.net +46625 + wingtop co., ltd + jerry wu + jerry&wingtop.com.tw +46626 + zibernetics + Chris Sanchez + chris&zibernetics.com +46627 + Init Corporation + Rytis Urnezius + it&init.lt +46628 + SC 4 WAVE BRO SRL + Andrei Nicolau + andrei.nicolau&4wave.ro +46629 + Joe Pitt + Joe Pitt + Webmaster&joepitt.co.uk +46630 + Dominique Dor + Dominique Dor + dominiquedor&free.fr +46631 + DeviousOps + Michael van Slingerland + michael&deviousops.nl +46632 + Encinitas Country Day School + Richard Bychowski + admin&ecdschools.org +46633 + Oncology San Antonio + Patrick Magallanes + waji.syed&oncologysa.com +46634 + NIvetti Systems Private Limited + Manjul Khandelwal + manjul&nivettisystems.com +46635 + INTERSCHALT maritime systems AG + Fred Ollermann + fred.ollermann&interschalt.de +46636 + Honorardozent Peter Kruse + Peter Kruse + kruse.peter&outlook.de +46637 + RaulWalter LLC + Raul Kaidro + info&raulwalter.com +46638 + City of Sunrise + Chris Rhamanohar + crhamanohar&sunrisefl.gov +46639 + Jan Skogqvist + Jan Skogqvist + jan.skogqvist&gmail.com +46640 + Caixa Economica Federal + Alessandro Lasmar Mourão + ceptibr05&caixa.gov.br +46641 + MeterBee + Felipe Maier Cruz + fmaier&gmail.com +46642 + National Documentation Centre + Alexandros Soumplis + noc&ekt.gr +46643 + kittenberger.org + Peter Kittenberger + pekittenberger&gmx.at +46644 + ZettaLogs + Saygun Onay + saygun&zettalogs.com +46645 + Aeronix + Dana Reed + dreed&aeronix.com +46646 + Wavelab Inc. + Xiang Gao + xiangg&wave-lab.com +46647 + Michael G. Sullivan (MGS) + Michael Sullivan + mike&mgs.sh +46648 + Brain4Net, Inc. + Sergey Romanov + s.romanov&brain4net.com +46649 + PTPd project + Wojciech Owczarek + wojciech&owczarek.co.uk +46650 + Humboldt Solutions Ltd + Adrian Cox + info&humboldt.co.uk +46651 + Dantherm Cooling A/S + Harikrishnan Chinnappan + hac&dantherm.com +46652 + Sertel Electronics UK Ltd + Srinath Gopalan + srinath.g&sertel.co.uk +46653 + Raymond Chung Industries Corporation + Raymond Chung + raymond_chung&rayind.biz +46654 + Future Infrastructures Online Ltd. + Miguel Angel Nubla + ianaoid&fiolimited.com +46655 + Innofidei Technologies Inc. + xiongjun + xiongjun&innofidei.com +46656 + Polskie Koleje Państwowe S.A. + Paweł Weżgowiec + pawel.wezgowiec&pkp.pl +46657 + EKCR + Ziker Koz + 5033282&gmail.com +46658 + AssetOwl + Fletch Hogan + admin&assetowl.com +46659 + BBR Verkehrstechnik GmbH + Eugen Maier + maier&bbr-vt.de +46660 + Volvo Group Telematics + Ola Dawidson + ola.dawidson&volvo.com +46661 + Bucherer AG + Maurizio Riva + IT&bucherer.com +46662 + Pointek + Yagiz Yildirim + yagiz.yildirim&pointek.com.tr +46663 + TORRING SYSTEMS CORP + Pavel Kiriakov + forhosting&lucky-labs.com +46664 + Novatronic + Alfredo Guzmán + aguzman&novatronic.com +46665 + Reshin + Viktor Reshin + reshin-va&yandex.ru +46666 + NPF Modem, Ltd. + Alex Gorokhov + asg&npfmodem.spb.ru +46667 + Limited Liability Company «ICBCOM» + Dmitriy Ivushkin + divushkin&icbcom.ru +46668 + ENX Association + Immo Wehrenberg + immo.wehrenberg&enx.com +46669 + Job Corps + Bill Stites + stites.bill&jobcorps.org +46670 + OCTAGON computer&communication + Alexander Steins + alexander.steins&octagon.de +46671 + Medical Specialists of Fairfield, LLC. + Victoria Root + Victoria.msf12&gmail.com +46672 + PrimeDesign + Markus Primes + admin&primedesign.at +46673 + Minoris + Kent Gustavsson + kent&minoris.se +46674 + cantaa GmbH + Andreas Micklei + andreas.micklei&cantaa.de +46675 + Essential Energy + Matt Coman + matthew.coman&essentialenergy.com.au +46676 + Weir Shared Services Ltd + Cory Davis + cory.davis&weirgroup.com +46677 + XATU + HeiSir + 369946814&qq.com +46678 + naksitrallid.com + Vladislav Artemyev + iana&naksitrallid.com +46679 + Bistum Würzburg + Michael Seufert + Michael.Seufert&bistum-wuerzburg.de +46680 + Control Techniques + Auryn Hughes + auryn.hughes&emerson.com +46681 + SV Informatik GmbH + Andreas Legler + iana&sv-informatik.de +46682 + Applied Optoelectronics, Inc. + Mark Siejka + mark_siejka&ao-inc.com +46683 + OkAlfred (formerly 'Zdravec Bulgaria Ltd') + Alexander Atanasov + hw&okalfred.com +46684 + Nazessen + Peter Breur + peytor&gmx.com +46685 + Maag Informatik + Daniel Maag + daniel.maag&gmx.net +46686 + Document Storage Systems, Inc. + Carlos Serpa + cserpa&dssinc.com +46687 + Qualicorp SA + Carlos Eduardo Faucz + cefaucz&illimitati.com +46688 + Karl Camilleri + Karl Camilleri + karl&camilleri.za.net +46689 + Entria LLC + Wolfgang Baltes + wolfgang_baltes&hotmail.com +46690 + 12Sided Technology, LLC + Phil Vachon + pvachon&12sidedtech.com +46691 + IDOBJECT + Rémi CARLIER + rfid&idobject.com +46692 + Ripple + Craig DeWitt + craig&ripple.com +46693 + BestCAS Ltd + Krassimir Slavchev + krassi&bestcas.com +46694 + KoreLogic, Inc. + Joe Segreti + iana&korelogic.com +46695 + Cancer Care of West Central Ohio + Christopher L. Powell + cpowell&drdavidpowell.com +46696 + MastMinder Ltd + Cliff Whitehead + CliffWhitehead&MastMinder.com +46697 + Unimar, Inc. + Terry Zarnowski + terryz&unimar.com +46698 + Cybersales a.s. + Radek Hladík + r.hladik&cybersales.cz +46699 + HCit Consultant + Dr. Lavanian Dorairaj + ceo&hcitconsultant.com +46700 + ICTrex + ICTrex admin + ictrex&ictrex.nl +46701 + State Historical Museum + German Dubinin + dgv&shm.ru +46702 + Control Service do Brasil Eireli. + Eduardo Antão + eduardo&controlservice.com.br +46703 + aBjorne + Andrea Bosa + mib&abjorne.info +46704 + Salib Oncology Hematology + Roxanne Brown + billingmanagerhs&yahoo.com +46705 + Maestria en Seguridad Informatica Aplicada - ESPOL + Lilian Benavides + liliantbo&gmail.com +46706 + Wowza Media Systems, LLC + Elliot Miller + legalnotices&wowza.com +46707 + Neocoretech SAS + Christophe Rettien + christophe.rettien&neocoretech.com +46708 + Action for Children + Richard Hill + isias&actionforchildren.org.uk +46709 + BDO Canada LLP + Drew Farlinger + dfarlinger&bdo.ca +46710 + Network Time Foundation + OID Administrator + oid-admin&nwtime.org +46711 + DoubleNegative.eu + Márton Levente Szigeti + szigeti.marton&doublenegative.eu +46712 + Open Infrastructure + Daniel Baumann + system-administration&open-infrastructure.net +46713 + Datahealth + Simon Devine + Simon&datahealth.com.au +46714 + Cesa Inc + Mike Joe + 446251226&qq.com +46715 + Bioptická laboratoř s.r.o. + Radek Vopěnka + rvopenka&trask.cz +46716 + TARGOBANK AG & Co. KGaA + Hoffmann Oliver + oliver.hoffmann&targobank.de +46717 + Ibys Technologies S.A. + José Luis Carretero + jlcarret&ibys.net +46718 + Sea Island Systems, Inc. + Lloyd Milligan + Lloyd&SeaIslandSystems.com +46719 + dtsi + Vladimir Datsevich + oid&dtsi.eu +46720 + LightFactor + Jeff Cesnik + jcesnik&lightfactor.co +46721 + Cancer Care Center, PC + Linda Moore + lindamoorecc&gmail.com +46722 + Merge Security + Ren Wei + renwei&mergesecurity.com +46723 + United Educators Credit Union + Dennis Griesgraber + djg&uecu.coop +46724 + Ferrograph Limited + Ed Agar + Ed.agar&ferrograph.com +46725 + Swift MT + Kevin Lentle + kevin.lentle&swiftmt.com +46726 + U.S. Anesthesia Partners, Inc. + Russell Booker + Russell.Booker&USAP.com +46727 + SOFTEK GLOBAL EDESIGN S DE RL DE CV + JUAN GONZALEZ + juan.gonzalez&softekglobal.com +46728 + Heksagon Group Cy Ltd + Andraz Oblak + andraz.oblak&hex-group.com +46729 + Growing Energy Labs, Inc. + Ivan Cooper + ic.pen&geli.net +46730 + Route Weaver LLC + Kumaresh Malli + kmalli&routeweaverllc.com +46731 + Institut fuer medizinische und pharmazeutische Pruefungsfragen + Dr. Marco Schaerfke + oidmaster&impp.de +46732 + Vedang Radio Technology Pvt. Limited + Dharmesh Gupta + dharmesh&vedangradio.com +46733 + DOM-P Zrt + Miklos Fazek + hostmaster&domp.hu +46734 + TCCC + Mary Lou Gladieux + mgladieux&toledoclinic.com +46735 + Silhouette International Schmied AG + Johann Mairinger + j.mairinger&silhouette.com +46736 + Powertech IT Services S.R.L. + Marian Patruca + mpatruca&gmail.com +46737 + Amphinicy Technologies + Marko Mrvelj + marko.mrvelj&hinicy.com +46738 + Beeler IT Services + Adrian Beeler + info&beeler-it-services.ch +46739 + RONIT. N.THAKOR + RONIT. N.THAKOR + ronitnthakor&gmail.com +46740 + Zendesk, Inc + Aaron Peschel + opsteam&zendesk.com +46741 + Aspect Enterprise Solutions Inc. + Liviu Faciu + lfaciu&aspectenterprise.com +46742 + Global Biodiversity Information Facility (GBIF) + Tim Robertson + trobertson&gbif.org +46743 + Technology Architects LLC + John Flanigan + JFlanigan&TechnologyArchitects.com +46744 + Transaction Technologies Pte Ltd + Thorsten Neumann + thorsten.neumann&smartpesa.com +46745 + Joe Thielen + Joe Thielen + joe&joethielen.com +46746 + Weill Cornell Medicine + Lucas Rockwell + lur2015&med.cornell.edu +46747 + The Christ Hospital Hem Onc + Regina Lampe RN BSN OCN + regina.lampe&thechristhospital.com +46748 + Alliance Technology Group, LLC + Eric L. Kibisingo + eric.kibisingo&alliance-it.com +46749 + 121 Marketing Services Group, Inc. + David Carter + hostmaster&121msg.com +46750 + MobilSign Ltd. + Peter Dohanyos + info&mobilsign.hu +46751 + Carrizo Oil & Gas, Inc. + David Hepler + hostmaster&carrizo.com +46752 + THE MINNITI CENTER FOR MEDICAL ONCOLOGY & HEMATOLOGY + LINDA BURKE + minniticenterlinda&comcast.net +46753 + Hematology Oncology Clinic, LLP + Steven Winkler + stevenwinkler3&aol.com +46754 + NETFOX AG + Mirko Ludeke + mludeke&netfox.de +46755 + Nexgen Oncology + Sujan Paudel + spaudel&mycancercenter.com +46756 + Michael Oberdorf IT-Consulting + Michael Oberdorf + info&oberdorf-itc.de +46757 + DeMoulas Market Basket, Inc. + Neil Buckley + security&demoulasmarketbasket.com +46758 + CarMax + David Harold + david_harold&carmax.com +46759 + CoverMyMeds + CoverMyMeds + admins&covermymeds.com +46760 + ISub Softwares + Shubhra Prakash Nandi + email2shubhra&gmail.com +46761 + Groupe JVS + childeric busche + childeric.busche&jvs.fr +46762 + Atos IT Solutions and Services GmbH + Björn Pohl + bjoern.pohl&atos.net +46763 + alfer aluminium Gesellschaft mbH + Christine Breitmoser + christine.breitmoser&alfer.com +46764 + HealthInsight + Information Services + hisrlis&healthinsight.org +46765 + Sviaz-Bank + Nikolay Kurepkin + nkurepkin&sviaz-bank.ru +46766 + Creatcomm Technology + jie zhao + suijtwl&gmail.com +46767 + CarMax, Inc + Shiva Thatipelli + shiva_thatipelli&carmax.com +46768 + Symphony Communication Services LLC + Andrew Wnuk + andrew.wnuk&symphony.com +46769 + St. Leonard's Community Services + Grant Emsley + gemsley&st-leonards.com +46770 + Benz Dataconsulting + Christian Benz + info&benz-dc.de +46771 + Atos IT Services Sp. z o.o. + Paweł Antczak + pawel.antczak&atos.net +46772 + Brig Center for Cancer Care and Survivorship + Christy Campbell + ccampbell&brigcancercare.com +46773 + Voss Kommunikasjon AS + Øyvind Fykse + oyvind&vossk.no +46774 + K4Connect + Jonathan Gould + jonathan&k4connect.com +46775 + Bourgeois Bits LLC + Peter Sagerson + psagers&bourgeoisbits.com +46776 + Westchester Cancer Care + Ellen Hantman + wcancercare&aol.com +46777 + Air New Zealand + Mark Sim-Smith + mark.sim-smith&airnz.co.nz +46778 + NTx Back Office Consulting Group + Christian Schindler + christian.schindler&ntx.at +46779 + Belgian Supreme Administrative Court + Pieterjan Montens + pmo&raadvst-consetat.be +46780 + Domaincracy LLC + Leo Angelo + theleoa&gmail.com +46781 + Sasola Limited + Ian Tyson + ian.tyson&sasola.com +46782 + John Holland PTY LTD + Tim Rollason + tim.rollason&jhg.com.au +46783 + Nephos6 + Ciprian Popoviciu + chip&nephos6.com +46784 + COSMOVISION Co., Ltd. + Yutaka Yamatsu + yamatsu.yutaka&cosmovision.co.jp +46785 + Comark Communications LLC + Jason Hwang + jhwang&comarktv.com +46786 + blue-beam.de + Aaron Bulmahn + webmaster&blue-beam.de +46787 + Sony Computer Entertainment Shanghai Ltd. + Chen Wenli + wenli.chen&scesh.sony.com +46788 + Xabyss Inc. + Changwoo Ryu + changwoo&xabyss.com +46789 + Rains Corp. + Thomas Lewis + thomaslewis&rains-corp.com +46790 + TOYOTA MOTOR CORPORATION + Shinichiro Hagihara + hagihara&mail.toyota.co.jp +46791 + Stephan Fiebrandt + Stephan Fiebrandt + stephan&fiebrandt.org +46792 + ISON Technology Co., Ltd + Eric.hung + eric.hung&ison.com.tw +46793 + ASAT Co., Ltd. + Young Hwan Kim (YH Kim) + kyhwan&asat.kr +46794 + Decision Detective Corporation + D. Stewart + info&decisiondetective.com +46795 + Organization for Security and Co-operation in Europe + Andreas Hainz + andreas.hainz&osce.org +46796 + Bedrock Automation Platforms, Inc. + John Weismiller + john.weismiller&bedrockautomation.com +46797 + The self-financing Science Information Center for New Technologies under State Tax Committee of the Republic of Uzbekistan + Azamat Musakhanov + info&yt.uz +46798 + RBS Services (Switzerland) Ltd + Andrew Church + andrew.church&rbs-services.com +46799 + Interblock d.d. + Dobrovoljc Dejan + dejan.dobrovoljc&interblockgaming.com +46800 + Digitoll Information Technology and Servicing Ltd. + Viktor Németh + digitoll&digitoll.co.hu +46801 + The Hut Group + Martyn Ashworth + martyn.ashworth&thehutgroup.com +46802 + The Business Clinic Ltd + Mark Rawlinson + mark&thebusinessclinic.ltd.uk +46803 + BROCELIA + Christophe GOUTEIX + gouteix&brocelia.fr +46804 + Himnark CJSC + Ruben Osipyan + ruben&himnark.am +46805 + Oncology Consultants + Jerel Sukarangsan + jsukarangsan&oncologyconsultants.com +46806 + Mid Florida Hematology Oncology + Harish Gowda + hgowda&mfhoc.com +46807 + Motiv Telecom Group + Eugene Bobin + gene&motivtelecom.ru +46808 + Fall River School District + Jim Maynard + jmaynard&fallriver.k12.wi.us +46809 + KB PROMSVYAZ + Viktor Khomenko + vx&dbic.pro +46810 + Stockton University + Brian Cole + Brian.Cole&stockton.edu +46811 + L'Imprimerie + Menard Patrick + pmenard&l-imprimerie.fr +46812 + Konsortium ICT Pantai Timur + Ahmad Fazli Ismail + office&k-ict.org +46813 + Sunbird Software, Inc. + Michael Davidson + michael.davidson&sunbirddcim.com +46814 + SC3 Technologies Oü + Andrew Billington + andy.billington&sc3tech.ee +46815 + Third Point LLC + Scott Zionic + it&thirdpoint.com +46816 + HELM AG + Gregor Glinski + g.glinski&helmag.com +46817 + Atam Holdings + Adam Green + adamgreen&atam.com +46818 + VIVASECUR GmbH (formerly 'Systemhaus Scheuschner GmbH') + Mario Andexel + mario.andexel&vivasecur.de +46819 + Attenda Ltd. + Julian Jordan + julian.jordan&attenda.com +46820 + Telekom Malaysia Berhad + Mazlaini Yahya + mazlaini&tmrnd.com.my +46821 + Commandus + Andrei Ivanov + support&commandus.com +46822 + Zhuhai Kingsoft Office Software Co Ltd + Chen Jie + chenjie4255&gmail.com +46823 + Siselectron Technology Ltd + Eva Tsai + eva&siselectron.com +46824 + Certly, Inc. + Ian Carroll + ian&certly.io +46825 + Loop Technologies Limited + Glenn McCarthy + glennm&looptechnologies.com +46826 + OptimalMethods Ltd. + Yuriy Demchenko + yuriy.demchenko&optimalmethods.ca +46827 + Affix Technology Co., Ltd. + Sompop Kumnoonsate + sompopµx.co.th +46828 + Sea and Air Technology, S.L. + Francisco Gonzalez + fgonzalez&seairtech.com +46829 + GLOCK Ges.m.b.H. + Christian Mikula + christian.mikula&glock.at +46830 + LearnConsult - DI Meitz & Ing. Pock OG + Christoph Schick + christoph.schick&learnconsult.com +46831 + GRAUPNER medical solutions GmbH + Robert Kreher + robert.kreher&graupner-medizin.de +46832 + Heylige FRau Latte + Robert Pehl + robert.pehl&hf-latte-berlin.de +46833 + Synapses Technologies, LLC + Brian Long + Brian.Long&SynapsesTech.com +46834 + Martin GmbH für Umwelt- und Energietechnik + Guenter Flohe + guenter.flohe&martingmbh.de +46835 + rf152 + Richard Franks + pen&rf152.co.uk +46836 + Bilfinger GreyLogix GmbH + Lars Petersen + lars.petersen&bilfinger.com +46837 + AZD Praha s.r.o. + Richard Lammel + doz&azd.cz +46838 + Swaffer Consulting Ltd. + Stuart Swaffer + stuart&swaffer.net +46839 + RED ELECTRICA DE ESPAÑA SAU + José Manuel Moure + jmoure&ree.es +46840 + Christophorus Club e.V. + Henryk Plötz + henryk&christophorus.club +46841 + Hwacheon Machinery Europe GmbH + Steffen Rademacher + admin-hme&hwacheon.com +46842 + Hamburger Software GmbH & Co. KG + Dennis Kronbuegel + dennis.kronbuegel&hamburger-software.de +46843 + Newfield Automation Ltd. + David Jones + david.jones&newfieldautomation.com +46844 + Dreamtime.net (Thailand) Co., Ltd. + Stephen Karrington + support&diamondcard.us +46845 + Interlink INC + Eduardo Malisani + support&interlink.com.ar +46846 + FLOPNET + Krzysztof Zarzycki + admin&flopnet.pl +46847 + VALLEY MEDICAL & SURGICAL CLINIC + WANDA MCCOY + WANDASCOTT624&CHARTER.NET +46848 + Lavelle Networks India Private Limited + Trivikrama Rao V + operations&lavellenetworks.com +46849 + Irving Independent School District + Jon Melancon + jmelancon&irvingisd.net +46850 + Tula State Center of Information Technology + Aleksei Sukhov + noc&tularegion.ru +46851 + Terremark + Bill McLaren + billmcl&terremark.com +46852 + viagogo AG + David Suart + iana&viagogo.com +46853 + PAC Enterprises + Brad Alexander + balexand&comcast.net +46854 + Hagenberger Kreis zur Förderung der digitalen Sicherheit + System Administrator + admin&hagenbergerkreis.at +46855 + ShowCase PRO Tecnologia LTDA + Rafael Augusto Peressinoto + rap&showcasepro.com.br +46856 + ZPEG, Inc. + Scott Hastings + scott&zpeg.com +46857 + Park Controls & Communications (P) Ltd. + Satyanarayana Tummalapalli + tsatya&parkcontrols.com +46858 + Dgoujard + Damien Goujard + damien&macuser.fr +46859 + Guangdong Quantum Technology Co., Ltd + NancyChen + nancy_chern&163.com +46860 + Bittacle Consulting Limited + Craig Baltzer + Craig.Baltzer&bittacle.com +46861 + JVCKENWOOD Corporation + Manabu Sato + snmp.support&jvckenwood.com +46862 + Digital Life Promotion Association + Kazuhiro Tabuchi + info&dlpa.jp +46863 + Bosch Thermotechnik GmbH + Tobias Baumgartner + tobias.baumgartner2&de.bosch.com +46864 + Embedtronics Oy + Sami Kantoluoto + sami.kantoluoto&embedtronics.fi +46865 + IS4U, s.r.o. + Petr Dadák + tech&is4u.cz +46866 + Virdata NV + Jeremy De Clercq + jeremy.declercq&virdata.com +46867 + Synthetel Corporation + Shao Miller + iana-pen&synthetel.com +46868 + Secure Energy Services + Tim Nickerson + tnickerson&secure-energy.com +46869 + Torchmind + Johannes Donath + johannesd&torchmind.com +46870 + relaxt confusion labs e.U. + Thomas Gradisnik + tg&relaxt.at +46871 + Magic Mike, Ltd + Michael Bonhomme + michael.bonhomme&gmail.com +46872 + Bimex Energy AG + Lukas Kaufmann / Simon Kech + lukas.kaufmann&polytech.ch +46873 + y1s2 + smail chamrah + yossefsaso2015&gmail.com +46874 + Wavenet sprl + Jean-Noël Wallez + iana&wavenet.be +46875 + prodot GmbH + Mischa Hueschen + mischa.hueschen&prodot.de +46876 + GE Lighting + Tom Holland + holland&ge.com +46877 + ATB Financial + Dustin Heywood + dheywood&atb.com +46878 + PermaGreen Supreme, Inc. + Christopher McChristian + clm&permagreen.com +46879 + Lengvarsky.com + Igor Lengvarsky + igor&lengvarsky.com +46880 + Technology Kitchen Corporation + Brad Claflin + bradclaflin&technologykitchen.com +46881 + henz.co + John Henz + john.henz&jhenz.net +46882 + LSTI + Philippe Bouchet + pbouchet&exchange.lsti.fr +46883 + Capsicum Business Architects + Michael Wyatt + mwyatt&capsi.com.au +46884 + Freie ev. Gemeinde Giessen + Peter Schuster + admin&feg-giessen.de +46885 + TwoFive, Inc. + Masatatsu Akamine + akamine&twofive25.com +46886 + Beken Learning Systems, Inc. + Steve Benoit + steve.benoit&bekenlearning.com +46887 + Bulletin.net Ltd + Kerry Sainsbury + kerry.sainsbury&bulletin.net +46888 + KRDS + Guillaume Simon + hosting&krds.com +46889 + Gravity Academy Engineering + grant haywood + mrgranthaywood&gmail.com +46890 + SOLUM Co., Ltd. + ChangWoo Lee + cw0220.lee&solu-m.com +46891 + fmad engineering llc + Aaron Foo + aaron_ppus&fmad.com +46892 + TwelveBaud Studios, LLC + Andrew Cook + andrew.cook&twelvebaud.net +46893 + Antelope Enterprises + Joelle Maslak + jmaslak&antelope.net +46894 + Hanwha S&C + See Yun Kim + seeyunk&hanwha.com +46895 + NexDefense, Inc. + Jeff Barber + jeff.barber&nexdefense.com +46896 + UNIACC Electronic Technology Ltd. of Suzhou + Kingtonlee + kingtonlee&sina.com +46897 + Virtual Hold Technology, LLC + Sean O'Rourke + sorourke&virtualhold.com +46898 + Hewlett-Packard (Schweiz) GmbH + John Henz + john.henz&hpe.com +46899 + TDK LAMBDA LTD + SHMUEL MASSIL + samuel.m&tdk-lambda.co.il +46900 + TiePie engineering + Anton Poelsma + sysops&tiepie.nl +46901 + Wilhelmsen Loom Consulting + Lars Wilhelmsen + lars&sral.org +46902 + Keegan Moodley Ltd + Keegan Moodley + keeganmoodley96&gmail.com +46903 + DE-CIX Management GmbH + Kay Schroeder + kay.schroeder&de-cix.net +46904 + Sepura plc + Kevin Sale + Kevin.Sale&sepura.com +46905 + KTI RÁDIO, spol. s r.o. + Miroslav Sajko + sajko&kti.sk +46906 + Enghouse Networks Nordics AB + Johan Norborg + johan.norborg&enghouse.com +46907 + Qognify LTD + Info at Qognify + info&qognify.com +46908 + Ascot Industrial + Marco Bambili + m.bambili&ascotinternational.it +46909 + Centric Housing & Healthcare + Erik van Alderwegen + erik.van.alderwegen¢ric.eu +46910 + AIC S.A. + IT Department + it&myaic.com +46911 + KERLINK + Dubourg Ronan + ronan.dubourg&kerlink.fr +46912 + Nassauische Heimstaette Wohnungs- und Entwicklungsgesellschaft mbH + Michael Schreck + michael.schreck&naheimst.de +46913 + Hotspot Tiefenort + Thomas Pinkert + thomas.pinkert&hotspot-tiefenort.de +46914 + Administration des Ponts et Chaussées + Georges Goebel + georges.goebel&pch.etat.lu +46915 + raffel.biz + Joern Raffel + joern&raffel.biz +46916 + Aviva Voice Systems and Services SL + Javier Moreno + jmoreno&avivavoice.com +46917 + PC-TipTop + Robert Christian Strasser + pki&pc-tiptop.de +46918 + GRUPO REVENGA + FRANCISCO JOSÉ ELENO PARDO + elias.molinedo&revenga.com +46919 + Aadi Network Tools + Hemant Kelkar + hemantkelkar2001&yahoo.com +46920 + DEVISE FUTURES - IT SOLUTIONS, LDA + José Eduardo Pina Miranda + jose.miranda&devisefutures.com +46921 + Universidade Federal de Sao Joao del-Rei + Roosevelt Mairink dos Santos Junior + mairink&ufsj.edu.br +46922 + Wolf GmbH + David Mager + David-Arpad.Mager&Wolf-Heiztechnik.de +46923 + LG&E-KU LLC + Steven Ramirez + Steven.Ramirez&lge-ku.com +46924 + Oncology Specialists of Charlotte, PA + Erin LeMire + elemire&oncologycharlotte.com +46925 + EEBus Initiative e.V. + Andreas Schwackenberg + schwackenberg&eebus.org +46926 + Midas Green Technologies + Chris Boyd + iana&midasgt.com +46927 + INVITE Networks Incorporated + Josh Whiting + josh&invitenetworks.com +46928 + r0ck.net + Cary Kempston + iana&r0ck.net +46929 + Uzma Iqbal MD PA + Barbara Hand + manager&houstoncenterofhope.com +46930 + Sandelman Software Works + Michael Richardson + mcr&sandelman.ca +46931 + Welch Allyn Singapore Pte Ltd + Michael Lim + michael.lim&welchallyn.com +46932 + PAO NEFAZ + Gavrilenko Sergey + admin&nefaz.ru +46933 + Business&Decision + Blyednova Iryna + iryna.blyednova&businessdecision.com +46934 + AFSOC + David Bricker + afsoc.csf.scp.cmia&us.af.mil +46935 + Provinzial NordWest + Uwe Karhof + ca-admin&provinzial.de +46936 + Ingenieurbüro Eggimann + Peter Eggimann + office&i-buero.ch +46937 + Judo Atlas + Jean-Luc Duprat + jld&acm.org +46938 + Frederick Oncology Hematology Associates + Naycherie Alvira + manager&fredonc.com +46939 + Sentinel Technologies + Doug Schaapveld + dschaapveld&sentinel.com +46940 + Quantic Telecom + Josselin Lecocq + noc&quantic-telecom.net +46941 + Noom, Inc. + Zachary Elliott + infrastructure&noom.com +46942 + Löwenfelsen UG (haftungsbeschränkt) + Philipp Schafft + it&loewenfelsen.net +46943 + STC Systems + François Lacombe + iana&stcsystems.fr +46944 + Beijing Yanglian technology co., LTD + Tenen An + ancuizhao&ylnetworks.com +46945 + Rafako S.A. + Mateusz Żołyński + administratorzy&rafako.com.pl +46946 + Transports Publics Genevois + Mathieu Loffel + loeffel.mathieu&tpg.ch +46947 + Meplan GmbH + Martin Stefan Riesel + itadmin&meplan.de +46948 + United Airlines, Inc. + Brandon Mangold + brandon.mangold&united.com +46949 + Moore Family + David Moore Jr. + drmoorejr&gmail.com +46950 + FRZ Uni Leipzig + Bjoern Schwarzbach + schwarzbach&wifa.uni-leipzig.de +46951 + 中山大学附属第一医院 (First Affiliated Hospital of Sun Yat-sen) + 王武 (Wang Wu) + cfanwang_001&126.com +46952 + SecureAuth Corporation + Michael Franke + dev_mgmt&secureauth.com +46953 + Active8 + Erik de Jonge + erik&a8.nl +46954 + Institut Puig Castellar + Gerard Falcó + gerardfp&iespuigcastellar.xeill.net +46955 + Wolfnet + Jason Fritcher + jkf&wolfnet.org +46956 + Institute of Cosmophysical Research and Aeronomy of the Siberian Branch of the Russian Academy of Sciences + Andrey I. Ivanov + andrey.ivanov&ikfia.sbras.ru +46957 + setelsa + Jesus Manuel Ibañez Gomez + jibag&setelsa.net +46958 + New Cloud Technologies Ltd. + Vladislav Varnavsky + vladislav.varnavsky&ncloudtech.ru +46959 + Jaxageto + Robert Labudda + iana-contact+remove-if-not-spam&spacepanda.se +46960 + mrn sa (formerly 'mrn lda') + bruno romao + ssi&mrn.pt +46961 + emineo AG + Claudio Jossen + ictservices&emineo.ch +46962 + Novaris Ltd. + Evgeny Kurensky + support&novaris.ru +46963 + Shenzhen United Innovation Automatic Control System Co., Ltd. + likongcai + lghcg2015&hotmail.com +46964 + Yandik + Yandi Kuspriandi + rian191&gmail.com +46965 + Wordery + Lee Valentine + tech&wordery.com +46966 + KIRACON Ltd. + Rainer Kirchner + piana&kiracon.com +46967 + Thompson Rivers University + Wesley Cole + hostmaster&tru.ca +46968 + Triad RF Systems + David Campbell + info&triadrf.com +46969 + R&D Software Solutions srl + Dragos Iosub + dragos&rdss.ro +46970 + Borealis + Ivo Karremans + ivo.karremans&borealisgroup.com +46971 + GKeS + Sunghae Kim + shkim&gkes.co.kr +46972 + ICS Industries Pty Ltd + David Barker + dbarker&icsindustries.com.au +46973 + Datalab Software GmbH + Bernhard Radke + info&datalab.de +46974 + EdgeTheory LLC + Maloy Wilkes + maloywilkes&leadify.net +46975 + MediGroup + Cedomir Suljagic + c.suljagic&medigroup.rs +46976 + Rhebo GmbH (formerly 'Rhebo AG') + Martin Menschner + martin.menschner&rhebo.com +46977 + ELTEC Elektronik AG + Ralf Spachmann + rspachmann&eltec.de +46978 + The Akron Holding + Alexander Lukin + it&akron-holding.ru +46979 + Q&D Solutions + Carl Sörqvist + oid-admin&qdsolutions.se +46980 + Ondics GmbH + Wolfgang Clauss + marketing&ondics.de +46981 + Netclient as + Svend Eriksen + supplier&netclient.no +46982 + Smart Concepts B.V. + Johan Vaatstra + info&smartconcepts.nl +46983 + inIT - Institute Industrial IT, OWL University of Applied Sciences and Arts + Arne Neumann + arne.neumann&th-owl.de +46984 + SABO Elektronik GmbH + Ulrich Gruenebaum + gruenebaum&sabo.de +46985 + Marcus Portmann + Marcus Portmann + marcus&mmp.guru +46986 + Meister Rechtsanwaelte + Bernhard Radke + b.radke&meisterlaw.de +46987 + Cancer & Blood Disorders Treatment Center + Lauren Breznak + lmbreznak&gmail.com +46988 + CGI Deutschland + Uwe Hering + uwe.hering&cgi.com +46989 + Systemservice Nordfriesland + Sascha Büttner + Sascha.Buettner&sysserv-nf.de +46990 + ACCOLADE ELECTRONICS PVT. LTD. + Mr. Deepak Jagdale + design&accoladeelectronics.com +46991 + Universitaetsmedizin Goettingen + Mathias Schuetze + mathias.schuetze&med.uni-goettingen.de +46992 + IP CINT + Zhitenev Viktor + vicomsys&mail.ru +46993 + TDP s.r.o. + Petr Kureš + pkures&tdp.cz +46994 + ETS Schick + Andreas Schick + andreas.schick&elektrotechnik-schick.de +46995 + Tantalus Systems + Kel Bahi + kbahi&tantalus.com +46996 + Claus Queck GmbH + Thomas Frings + it&stahlbau-queck.de +46997 + MyQuickCloud + Talal Choucair + iana&myquickcloud.com +46998 + CEHL + Cristian E. Honorato L. + cristian&honorato.cl +46999 + RAK + XiaoXia Cui + jackel.cui&dhdz-suzhou.com +47000 + Converling Group B.V. + Dennis van den Oever + Domains&converling.com +47001 + LCS Co.Ltd + Johnson Lu + lcsbbs&163.com +47002 + System Controls Technology Solutions Private Limited + Barry Anand Francis + webmaster&system-controls.com +47003 + JSC VELAN + Maxim Tsyplakov + tm&oganer.net +47004 + Secucloud Network GmbH + Steffen Bajerke + steffen.bajerke&secucloud.com +47005 + Fort Lewis College + Information Technology + information_technology&fortlewis.edu +47006 + TransneftEnergy, LLC + Artem Shtykov + ShtykovAA&tne.transneft.ru +47007 + Ascent Resources + Murphy Crosby + murphy.crosby&ascentresources.com +47008 + MBDA France + Jean-Philippe Brossat + jean-philippe.brossat&mbda-systems.com +47009 + ISIS Bilisim Teknolojileri San. Tic. Ltd. Sti. + Mustafa Kerim Yılmaz + mustafa.yilmaz&isisbilisim.com.tr +47010 + UK Ministry of Defence + Mathew Newton + hostmaster&mod.uk +47011 + Ozarks Community Hospital + Joe Johnson + joejohnson&ochonline.com +47012 + SIMP + Kendall Moore + support&sicura.us +47013 + APTVISION LIMITED + Marek Wierzbicki + markw&aptvision.com +47014 + BBS1 Mainz + Frank Schurad + edv&bbs1-mainz.de +47015 + Zeye Solutions + Pavel Reznikov + info&zeye.ws +47016 + BlackSands Inc + Timothy Gallagher + tgallagher&blacksandsinc.com +47017 + Qumulo Inc. + Michael Murray + mmurray&qumulo.com +47018 + ZeuSWarE GmbH + Kai Scheddin + kaisch&zeusware.de +47019 + ApexSQL LLC + Adrijan Bandu + adrijan.bandu&apexsql.com +47020 + Flor Szoftver Ltd. + Flor Gabor + info&florszoftver.hu +47021 + Real Time Automation, Inc. + Scott Zukewich + szukewich&rtaautomation.com +47022 + TSI Automacao + Eduardo Goncalves + tsi&tsi-aut.com.br +47023 + Xolarsoft + Guynell Pittman + gpittman&xolarsoft.com +47024 + Catalyze + Mark Olschesky + mark&catalyze.io +47025 + Charles River Analytics, Inc. + James Lauwers + iana&cra.com +47026 + Paraqum Technologies (Private) Limited + Ajith Pasqual + pasqual¶qum.com +47027 + Quavant Networks, Inc. + Ronald Phillips + rphillips&quavant.com +47028 + Bund der Pfadfinderinnen und Pfadfinder e.V. + Philipp Steinmetzger + hostmaster&pfadfinden.de +47029 + Thomas Tague + Thomas Tague + pen&intivo.com +47030 + HAN Networks Co., Ltd + Fan Xiaowei + fanxw&han-networks.com +47031 + Shenzhen Eunicum Electric Co., Ltd. + Xianyao Yu + xianyao.yu&eunicum.com.cn +47032 + Shanghai Yamu Technology Limited + Xiaodong Zhang + xd.zhang&yamutech.com +47033 + LLC "66 Parallel" + Alexei Babich + expl&66p.su +47034 + CARS + Fred Yin + fredyin32&gmail.com +47035 + trash.net + Roman Fischer + romanf&trash.net +47036 + Ecosoft B.V. (trademark is Vidicode) + Alwin Adolf + alwin&vidicode.nl +47037 + Jubic Oy + Matti Rita-Kasari + matti.rita-kasari&jubic.fi +47038 + Université de Namur (UNamur) + Antoine Roly + antoine.roly&unamur.be +47039 + 312Telephony + Charlie Srutowski + csrutowski&312telephony.com +47040 + ChuckStarter + C Srutowski + srutowsk&chuckstarter.com +47041 + gurtzick.net + Thomas Gurtzick + iana-pen&gurtzick.de +47042 + Neragon Networks LTD + Raviv Zeev + raviv&neragon.com +47043 + Illusion Ltd. + Csaba Major + major&illusion.hu +47044 + Charter Court Financial Services + Stuart Pittaway + stuart.pittaway&chartercourtfs.co.uk +47045 + MartinLefebvre.com + Martin Lefebvre + dadexter&gmail.com +47046 + Bucks New University + Dr Nikos Baltas + Nikos.Baltas&bucks.ac.uk +47047 + Phoenix Home + YANG WEI + urbain.yang&qq.com +47048 + Webnii + David J. Warho + dwarho&webnii.com +47049 + EndFirst LLC + Rob Archibald + rob.archibald&endfirst.com +47050 + The AREDN Project + Andre Hansen + andre.k6ah&gmail.com +47051 + Virtu Financial LLC + Peter Venardos + ondesk&virtu.com +47052 + SK Planet + Jinoos Lee + jinoos&sk.com +47053 + SOUTHEAST ASIA JOINT STOCK COMMERCIAL BANK (SeABank) + Nguyen Tuan Cuong + cuong.nt&seabank.com.vn +47054 + CJSC Transmashholding + Kapitonov Mikhail + info&tmholding.ru +47055 + GouldFLP + Thomas Gould + tgould&gouldflp.com +47056 + SunEdison Inc + John Bassinger + jbassinger&sunedison.com +47057 + CityLink Data Co,.Ltd + Zhihong You + youzhihong&citylinkdata.com +47058 + AlfaSix Tecnologia + Fernando Gottlieb + fernando&alfasix.com.br +47059 + Earth2 Software Pty Ltd. + Adam Ali + adam.ali&earth2.com.au +47060 + elementmedia GmbH + Christian Friedrich + friedrich&elementmedia.com +47061 + Shenzhen headsun technology co.,ltd. + Li Zhi + 13603076995&139.com +47062 + Isha Foundation + Arumugam Rajarethinam + arumugam.r&ishafoundation.org +47063 + ZWAY CO.,LTD. + Zhou Arron + arron&zwpdu.com +47064 + St. Joseph's Anglo-Chinese Primary School + Kwok Wan + admin&sjacps.edu.hk +47065 + TACACS.net, Inc. + Rob Lemaster + rob&tacacs.net +47066 + VISQUAL Technologies + Brave Wong + admin&visqual.com +47067 + xlea.se GmbH + L. Wolf + l.wolf&xlea.se +47068 + Optelix (Pty) Ltd + Wietz Joubert + wietz&optelix.com +47069 + Economia per i Cittadini + William B. Clay + hostmaster&epici.it +47070 + Orell Fuessli Holding AG + Beat Gruetter + support&ofh.ch +47071 + Flash Systems + Daniel Goss + developer&flashsystems.de +47072 + 567.dk + Henrik Bruun + registration&567.dk +47073 + Layka Soluciones + Arturo Diaz Rosemberg + arturodr&gmail.com +47074 + TRONTEQ Electronic + Simon Tapmeyer + Simon.Tapmeyer&tronteq.de +47075 + PROJECT-M + Kimura Masao + cybermasa&project-m.jp +47076 + Bridgeway Security Solutions + Jason Holloway + it&bridgeway.co.uk +47077 + Device Insight GmbH + Matthias Rothe + it&device-insight.com +47078 + JAPAN TELECOMMUNICATION ENGINEERING SERVICE CO., LTD. + Manabu Teshirogi + ma-teshirogi&j-tes.co.jp +47079 + Mott MacDonald + Joe Madden + helpdesk.tts&mottmac.com +47080 + EDV-Beratung Baumgarten GmbH + Daniel Gompf + daniel.gompf&edv-baumgarten.de +47081 + Aragon Institute of Engineering Research + Juan Ignacio Garcés Gregorio + hcluster&unizar.es +47082 + Mobiquithings + Jérémy RIZZOLI + jrizzoli&sierrawireless.com +47083 + Advanced Vision Technology LTD + Alan Pritchard + alan&avtechuk.com +47084 + Stade Toulousain Rugby + Julien BARES + jbares&stadetoulousain.fr +47085 + Controles Inteligentes S.A.S + Pedro Camargo + pcamargo&ci24.com +47086 + Olmsted County Minnesota + Jim Burke + burke.jim&co.olmsted.mn.us +47087 + Masonic Villages of the Grand Lodge of PA + Tamara Hake + thake&masonicvillages.org +47088 + Pro-hosting s.r.o. + Peter Húbek + info&pro-hosting.sk +47089 + Ramp Holdings, Inc. + Giovanni Marzot + gmarzot&ramp.com +47090 + Exdev + bennani afif + afif.bennani&exdevbusiness.com +47091 + Cancer and Blood Specialist of Northern Virginia, PC + Deborah Kandahari + dkandahari&cancerandbloodspecialist.com +47092 + Tangible Trade + Alex Kwiatkowski + iana&tangibletrade.ca +47093 + DeMaTech HB + M. Katerbarg + info&dema-tech.se +47094 + Sielte S.p.A. + Salvatore Davide Rapisarda + sa.rapisarda&sielte.it +47095 + Subtopia Ltd + Chris Smith + chris.smith&subtopia.org +47096 + Unisphone + Miguel Silvosa Rodríguez + sat&unisphone.es +47097 + ARDIAN + Olivier RUZE + olivier.ruze&ardian.com +47098 + Lloyd Industries Inc. + liton liao + itcn&lloydind.cn +47099 + G4S Security Systems (Hungary) Ltd. + Tamas Morocz + tamas.morocz&hu.g4s.com +47100 + Linkscreens SRL + Todor Florin + florin.todor&linkscreens.com +47101 + DAS-Online + Dirk Alexander Schaefer + info&das-online.org +47102 + Global Technology Solutions + Mahmazayitov Kufliddin + iana.contact&glotechsol.com +47103 + PLC Technology, LLC + Kirill Esipov + yesipov&tpz.ru +47104 + Agentia pentru Agenda Digitala a Romaniei + Gabriel DUMITRU + gabriel.dumitru&aadr.ro +47105 + Together Bulgaria + Lachezar Kuzmanov + lachezar.kuzmanov&together.bg +47106 + Spearline Labs + Matthew Lawlor + support&spearline.com +47107 + Shaw Academy + Ajit Parthan + ajit.p&shawacademy.com +47108 + Beeper Communications Ltd. + Benny Michailovici + mislogs&beeper.co.il +47109 + Abaco Systems + Charles Wood + charles.wood&ametek.com +47110 + Agenzia per l'Italia Digitale + Umberto Rosini + rosini&agid.gov.it +47111 + Baker Hill Solutions, LLC + David Petercheff + david.petercheff&bakerhill.com +47112 + Louisiana Division of Administration + DeSha Fryoux + admin&la.gov +47113 + Aviacomm Inc. + Qinan Mao + qinan.mao&aviacomm.com +47114 + WOM Engineering + Peter Chen + yxchen0324&gmail.com +47115 + The Star Entertainment Group Limited + Steve Malone + Software.Licensing&star.com.au +47116 + NPL Management Ltd + Simon Ashford + simon.ashford&npl.co.uk +47117 + Spaulding Clinical + Troy Garrison + informationtechnology&spauldingclinical.com +47118 + Asian Arts Talents Foundation + Felix Wong + pen&aatf.us +47119 + SCIFI + Cosme Faria Corrêa + cosmefc&id.uff.br +47120 + Solustic - Solucoes em Tecnologia + Rodrigo Melo Meireles + rodrigo&solustic.com.br +47121 + TDCH A/S + Claus Albøge + ca&tdchosting.dk +47122 + BVL Group Ltd + Wallace Wadge + info&bvlgp.com +47123 + CODEC CO.,LTD. + Kunihiro Tanaka + codec_tanaka&yahoo.co.jp +47124 + Schmied Elektronik & Software + Benedikt Schmied + b.schmied&gmx.com +47125 + IDEAS S.R.L. + DOMENICO CAPELLA + d.capella&ideas-srl.com +47126 + earthTV network GmbH + Anja Krüger + accounting&earthtv.com +47127 + Yoti Ltd + Laurence Withers + laurence.withers&yoti.com +47128 + SQTec GmbH + Danny Baensch + danny.baensch&sqtec.ch +47129 + SECURA Insurance Companies + Todd Dorschner + todd_dorschner&secura.net +47130 + Al Yah Satellite Communication Company PJSC (Yahsat) + Josip Duricic + jduricic&yahsat.ae +47131 + ViktorBalogh + Viktor Balogh + me&viktorbalogh.net +47132 + Thinking Automation, Inc. + Derek Knutsen + dknutsen&researchcapitalgroup.com +47133 + Research Capital Group, Inc. + Derek Knutsen + dknutsen&researchcapitalgroup.com +47134 + FROG CELLSAT LIMITED + TARUN SHARMA + tarun&frogcellsat.com +47135 + MotionLED Technology Ltd. + Wilfred Wong + wilfred&motionledtechnology.com +47136 + 4S Information Technologies + Efe Erdogru + efe.erdogru&4s.com.tr +47137 + Andre Grosse Bley + Andre Grosse Bley + mib&grosse-bley.com +47138 + Noble 3D Printers, LLC + Edward R. Aylward II + edward.aylward&Noble3DPrinters.com +47139 + MaTelSo GmbH + Marc Fürst + marc.fuerst&matelso.de +47140 + DSG Systems AS + Espen Remman + espen.remman&dsg.no +47141 + PT. WIRAKY NUSA TELEKOMUNIKASI + UJANG HERANI + ujaheran&wnt.co.id +47142 + CoralTree Systems + Lee Ward + lee.ward&coraltreesystems.com +47143 + iSnapp + Willem de Jonge + willem&isnapp.nl +47144 + CommerceHub + Michael Vinzi + mvinzi&commercehub.com +47145 + Telonic Berkeley Inc + KANAK VAGHELA + kanak&telonicberkeley.com +47146 + Unassigned + Returned 2017-05-04 + ---none--- +47147 + Avionics Interface Technologies + Sharon Simon + sharons&aviftech.com +47148 + Winston Brands Inc. + Wilson Caetano + wcaetano&winstonbrands.com +47149 + Adaptrum Inc. + Shufen Zhang + shufen&adaptrum.com +47150 + Virginia Cancer Institute + Shonda Clements + sclements&vacancer.com +47151 + CoinBau GmbH + Dr. Markus Winter + winter&coinbau.com +47152 + Dovecot Oy + Ville Savolainen + ville.savolainen&dovecot.fi +47153 + Brunke electronic + Marc Brunke + pen.iana&optocore.org +47154 + Stabilitas + Stephen Riley + stephen&stabilitas.io +47155 + VINTEGRIS SL + Marisa Herranz + marisa.herranz&vintegris.com +47156 + Haus Michael auf Draht e.V. + Malte Horstmann + mad&rub.de +47157 + Boston Scientific Corporation + Craig Johnson + Craig.Johnson&bsci.com +47158 + Clipcomm Inc. + Kim Eunsoo + eskim&clipcomm.co.kr +47159 + Jevons Global Pty Ltd + Kingsley Jones + Info&jevonsglobal.com +47160 + Centorrino Technologies + Nicholas Iacobucci + niacobucci&ct.com.au +47161 + Stuttgarter Haus- und Grundbesitzerverein e. V. + Daniel Niccoli + d.niccoli&hausundgrund-stuttgart.de +47162 + Ote SA (Hellenic Telecommunications Organisation) + George Patikis + gpatikis&ote.gr +47163 + Banka Koper d.d. + Damjan Janezic + damjan.janezic&banka-koper.si +47164 + Ciudad del Motor de Aragon, SA + Jose Miguel Arilla Navarro + jmarilla&motorlandaragon.com +47165 + Self Esteem Brands + Matthew Warrick + matt&sebrands.com +47166 + Digital Insyte, LLC + Lewey Taylor + lewey&digitalinsyte.com +47167 + AB LITGRID + Robertas Radžvilas + robertas.radzvilas&litgrid.eu +47168 + Guangzhou Yinxun Comm. Tech. Co., LTD + Hengxuan Liu + market&gzyinxun.com +47169 + JSS Technologies, LLC + Jared Szechy + jared&jsstechllc.com +47170 + Skylark Technology Inc. + Alexey Sadovskiy + sadovsky&skylark.tv +47171 + Maximus, Inc. + Daniel J Dohner + danieljdohner&maximus.com +47172 + OMNITEC SOLUTIONS, Inc. + Mark Holmes + mholmes&omnitecinc.com +47173 + Openwave Messaging + Paul van Gool + paul.vangool&owmessaging.com +47174 + Compute Canada + Gregory Newby + cto&computecanada.ca +47175 + Cosylab d.d. + Rok Kosir + rok.kosir&cosylab.com +47176 + gbltech.net + George Lancina + gbl&gbltech.net +47177 + Diplomat Pharmacy, Inc. + Charles Williamson + cwilliamson&diplomat.is +47178 + Seneca Mortgage Servicing LLC + James Torrence + jtorrence&senecaservicing.com +47179 + Infosec Global Inc + Dilip Chetram + dilip.chetram&infosecglobal.com +47180 + Unite Control Sp. z o.o. + Semyon Krivosheev + sk&unitecontrol.com +47181 + Capella Systems, LLC + David Dong + ddong&capellasystems.net +47182 + Applied Micro Design Inc. + Eric Becker + ebecker&appliedmicrodesign.com +47183 + Studer Innotec SA + Vincent Bovay + it_manager&studer-innotec.com +47184 + LifeTech + Roman Korshakevich + license&life.com.by +47185 + TOPCON CORPORATION + Tomoyuki Yabe + t.yabe&topcon.co.jp +47186 + CryoWerx + Darwin Gosal + contact&cryowerx.com +47187 + Global Blue Service Company Austria GmbH + Nils Mayerhofer + nmayerhofer&globalblue.com +47188 + Slovenske elektrarne, a.s. + Matej Sustr + matej.sustr&seas.sk +47189 + EPICT Italy + Koceva Frosina + frosina.koceva&edu.unige.it +47190 + HCMI datasharing consortium + N.F. Ramsey + admin&hcmi-consortium.nl +47191 + Bulat LLC + Shubin Sergey + shubin&bulat-opk.ru +47192 + Central Bank of Ireland + Dan Cleary + information.security¢ralbank.ie +47193 + ATICEL - Advanced Tech Cell + Miguel Proaño + miguel&advancedtechcell.com +47194 + Asesoria Telematica Canarias SL + Eric Dantie + edantie&atcan.es +47195 + Gradkell Systems, Inc. + Mike R. Prevost + mprevost&gradkell.com +47196 + Hewlett Packard Enterprise + Harry Lynch + harry.lynch&hpe.com +47197 + Rainbow Crow + Mike Dimitrov + mike.dimitrov&gmail.com +47198 + IVALUA SAS + Arnaud Khuat-Duy + akd&ivalua.com +47199 + ISEC7 + Michael Brandt + michael.brandt&isec7.com +47200 + realraum - Verein für Technik in Kultur und Gesellschaft + Nicolas Braud-Santoni + nicoo&realraum.at +47201 + Vivid-Hosting, LLC + Kiet Chau + kchau&vivid-hosting.net +47202 + Top Down Systems Corporation + Jeromy Arnold + jeromy.arnold&topdownsystems.com +47203 + Flipit Marketing Ltd (formerly 'Reactiv Media Ltd') + Clinton Edmondson + Clinton.Edmondson&flipitmarketing.co.uk +47204 + «КВАНТЭКС» Limited liability partnership («QUANTEX») + Yuriy S. Samarin + office&quantex.kz +47205 + Lake Superior State University + Scott Olson + solson&lssu.edu +47206 + Brnit + Sharagim Parvareshkhah + sh&brnit.com +47207 + DAVID Systems GmbH + Wolfgang Seidl + wseidl&davidsystems.com +47208 + Ensinger GmbH + Rupert Holzer + OID-Admin&de.ensinger-online.com +47209 + MonkZ + Malte Kuhn + admin&monkz.de +47210 + ug consult + Ulrich Greshake + ugreshake&ug-consult.de +47211 + Ecole Polytechnique Fédérale de Lausanne + Claude Lecommandeur + claude.lecommandeur&epfl.ch +47212 + MailInBlack + Antony Spera + aspera&mailinblack.com +47213 + Nechi Group + Yandi Naranjo Basalo + ynaranjo&nechigroup.com +47214 + 4all Tecnologia + Eduardo Hahn + infra&4all.com +47215 + Markus Losco + Markus Losco + markus.losco&diskstation24.de +47216 + Mundio Mobile Holding Limited + Mr Andoko Wicaksono + legal&mundio.com +47217 + Einstein Medical Center + Louis Esposito + espositol&einstein.edu +47218 + websix GmbH + Marco Eble + m.eble&websix.de +47219 + Primary Data + Roni Rosen + roni&primarydata.com +47220 + IUT de BEZIERS + Christophe BORELLY + Christophe.Borelly&iutbeziers.fr +47221 + µE Kunst + Stefan Dickel + stefan.dickel&t-online.de +47222 + Megapath + Hugo Aviles + syseng&megapath.com +47223 + IQlunch + Hironori Ikura + hikura&iqlunch.net +47224 + Maersk Oil + Henrik Hulstrøm + henrik.hulstrom&maerskoil.com +47225 + David Garner NZ + David Garner + david&davidgarner.nz +47226 + Walker Digital Table Systems + Brian Besterman + bbesterman&wdtablesystems.com +47227 + EPI-USE Systems + Roelof Naude + snmp&epiuse.com +47228 + Evolution Gaming, Ltd. + Janis Lasmanis + security&evolutiongaming.com +47229 + SIPO Global + David Martin + dmart408&gmail.com +47230 + Gafachi, Inc. + Adam Glynn + adam.glynn&gafachi.com +47231 + Foxconn Corporation + Alex Phan + houston.it&foxconn.com +47232 + RodNoc + Dominik Sliwa + iana&tuxtorials.de +47233 + TELENT TECHNOLOGY SERVICES LIMITED + Adrian Phillips + adrian.phillips&telent.com +47234 + Suffolk County Scouts + Rob Gloess + rob.gloess&suffolkscouts.org.uk +47235 + QXIP + Lorenzo Mangani + lorenzo.mangani&gmail.com +47236 + MarSoftware + Torsten Markwardt + tmarkwardt&marsoftware.de +47237 + SYSTEMES INFORMATION HARMONIE MUTUELLE SIHM GIE + Patrick Rioche + contact.certificat&sihm.fr +47238 + AIS Advanced Info Service + Rachot Tawornsiri + rachotta&ais.co.th +47239 + Braincarta + N.F. Ramsey + n.f.ramsey&braincarta.com +47240 + Syncsort Inc. + IT Support + dns-info&syncsort.com +47241 + DZIECIECY SWIAT Krzysztof Pytko + Krzysztof Pytko + krzysztof.pytko&edzieciecyswiat.pl +47242 + CoreOS, Inc + Brian Harrington + brian.harrington&coreos.com +47243 + Steelcase, Inc. + Christian Gillen + ianaadmin&steelcase.com +47244 + Roku Inc. + Gary Ellison + gfe&roku.com +47245 + Shar3d Interactive + Arnaud BOUTIN + contact&shar3d-interactive.com +47246 + JF Zwobada + JF Zwobada + jf&zwobada.com +47247 + SILKAN SA + Farid Ben Tekfa + courrier&silkan.com +47248 + Ramarc Innovations + Marc Bilodeau + marc&ramarc.com +47249 + Pjano Consulting AB + Johan Pellkvist + jope&pjano.se +47250 + ProMDM d.o.o. + Ratko Štibrić + stibra&promdm.com +47251 + WEBSPH + Pepe Mariano Jr + jhuntech09&gmail.com +47252 + City of West Lafayette + Brad Alexander + balexander&westlafayette.in.gov +47253 + secureNET Switzerland Inc. + Bruno Kunz + bkunz&securenetswiss.ch +47254 + Western Australian Internet Association Inc. + Will Dowling + will&ix.asn.au +47255 + T&T sistemi srl + Gabriele Valentini + gabriele.valentini&tetsistemi.com +47256 + INSTER Tecnologia y Comunicaciones + Jose Antonio Cardenas Montero + jcardenas&inster.es +47257 + DBC A/S + Lars Skjærlund + las&dbc.dk +47258 + The Automobile Association (AA PLC) + Darragh O'Keeffe + darragh.okeeffe&theaa.com +47259 + LUNATICANET + Alexander Koch + hostmaster&lunatica.net +47260 + Electrontechnika + Tretyakov Sergey + root&et32.ru +47261 + OBI Smart Technologies mbH + Guido Kötter + guido.koetter&obi.de +47262 + LSware Inc. + Information Technology + infotech&lsware.co.kr +47263 + HEITZ Jean-Marie + HEITZ Jean-Marie + loc.heitz&wanadoo.fr +47264 + New Zealand Customs Service + Byron Collins + byron.collins&datacom.co.nz +47265 + HWADAR Technologies Co., Ltd. + Tom.Zhao + zcx3000&126.com +47266 + Prochista Irsa Tech Ltd. + AmirAli Pasdar + pasdar&prochista.ir +47267 + OpenSwitch + Frank Reichstein + frank.reichstein&hpe.com +47268 + FARMING BITS, LDA + Filipe Romão da Fonseca + filipe.fonseca&farmingbits.com +47269 + Pluribus Networks, Inc + Marco Pessi + marco.pessi&pluribusnetworks.com +47270 + Felixfound + William Knox + william.knox&iinet.net.au +47271 + Dreamlab Onet.pl SA + Marcin Kaptur + marcin.kaptur&dreamlab.pl +47272 + Evrotrust JSC + Stefan Hadjistoytchev + ca&evrotrust.bg +47273 + LLAMMA SK + Juraj Binka + juraj.binka&llamma.sk +47274 + CPK Interior Products + Arley Hinds + arley.hinds&cpkip.ca +47275 + Hirschmann Multimedia B.V. + Daan Bonenkamp + dbonenkamp&hirschmann.nl +47276 + Helsana Versicherungen AG + Hans-Peter Bärtsch + hans-peter.baertsch&helsana.ch +47277 + Hochschule Weihenstephan-Triesdorf + Rechenzentrum + rz&hswt.de +47278 + TeskaLabs Ltd + Ales Teska + ales.teska&teskalabs.com +47279 + ITCENTER + Carlos Almeida + carlos.almeida&itcenter.com.pt +47280 + Imagen Technologies + Ronald Kneusel + ron&imagentechnologies.com +47281 + ESPUBLICO SERVICIOS PARA LA ADMINISTRACION SA (ESFIRMA) + Fernando Palacio + fpalacio&espublico.com +47282 + The LinchPinGroup, LLC + IANA Contact + IANA&thelinchpingroup.com +47283 + PJSC SOLLERS + Glinnik Dmitry + msk.help&sollers-auto.com +47284 + Cyber IT Solutions + Con Zymaris + conz&cyber.com.au +47285 + LLC "Smartbrood" + Andrey Petrov + andreynpetrov&gmail.com +47286 + UANATACA S.A. + Marco Scognamiglio + info&uanataca.com +47287 + Brabbler Secure Message and Data Exchange Aktiengesellschaft + IT Operations Department + itops&brabbler.ag +47288 + COMMUNITHINGS S.A. + JUAN HURTADO TOMERO + juan.hurtado&communithings.com +47289 + SpacePath Communications + Matt Milston + matt.milston&space-path.com +47290 + Johnson Computers + Johnson Computer Operations + operations&johnson.computer +47291 + Deep Groups + Pavel Kurkin + p.kurkin&deepgroups.net +47292 + Luxar Tech Inc + Sean Chen + info&luxartech.com +47293 + STILLITS + José Carlos França + info&stillits.com.br +47294 + NPort Networks, Inc. + Stephen Shiue + stephen.shiue&nport.com.tw +47295 + Marvin Gülker + Marvin Gülker + m-guelker&guelkerdev.de +47296 + Arctic Paper Kostrzyn S.A. + Marcin Szymanek + marcin.szymanek&arcticpaper.com +47297 + hotelleriesuisse + hotline + hotline&hotelleriesuisse.ch +47298 + Pastem Solutions, Ltd. + Pastem Admin + sysoid&necps.jp +47299 + P2 Wireless Technologies Limited + Kevin Chan + kevinchan&p2wt.com +47300 + NTT DATA Business Solutions AG + Amine El Ayadi + amine.el-ayadi&nttdata.com +47301 + "NR"WEB US + Joshua K Betts + servers&nrwebus.com +47302 + Alnatura Produktions- und Handels GmbH + Thomas Brettnich + thomas.brettnich&alnatura.de +47303 + Moratua Solutions + Mahadi Siregar + regar.016&gmail.com +47304 + Ivnosys Soluciones S.L.U. + Rubén Curiel + sistemas&ivnosys.com +47305 + FoxGuard Solutions + Mitch Helton + mhelton&foxguardsolutions.com +47306 + Jasper + Bobby Richardson + bobby.richardson&jasper.com +47307 + IgniteNet + Harold Bledsoe + hbledsoe&ignitenet.com +47308 + Lanbowan Technology Ltd. + guangdong yang + 18626469675&163.com +47309 + Sofia Digital, Ltd. + Juha Joki + juha.joki&sofiadigital.com +47310 + botanic + Nicolas Dubost + systeme&botanic.com +47311 + Seacomp s.r.o. + Ivo Hlavaty + admin&seacomp.cz +47312 + Searidge Technologies + Justin Beattie + justin&searidgetech.com +47313 + Adenon Bilisim ve Iletisim Tek. San. ve Tic. Ltd. Sti. + Ergun SERAY + ergun.seray&adenon.com +47314 + Torrential Data Solutions, Inc. + Joseph Pickard + jpickard&torrentialdata.com +47315 + Wahsega Labs + Kelley Conway + kconway&wahsega.com +47316 + True Manufacturing + Jeremy Zanitsch + networkoperations&truemfg.com +47317 + University of Warsaw + Andrzej Zadrozny + devel&adm.uw.edu.pl +47318 + Optigo Networks + Stephen Chan + stephen&optigo.net +47319 + Riella Systems + Mike Riella + Mike&Riella.com +47320 + Advanced Enterprise Solutions Limited + Dan Williams + oid_admin&adventsol.co.uk +47321 + memit.de + Merlin Mittelbach + merlin.mittelbach&memit.de +47322 + Office IT-Partner Borås + Martin Westberg + martin.westberg&officeitpartner.se +47323 + TrustKo + Seongwoo Hong + certmaster&trustko.com +47324 + Mokop, Bartosz Rebeś + Bartosz Rebeś + admin&mokop.co +47325 + Suzhou Robot Information Technology Co.,Ltd. + Yin Haibo + yhb&szrobot.net +47326 + yanyuxieyang + dong pan + 361435262&qq.com +47327 + Kassenärztliche Vereinigung Nordrhein + Cornelius Vaessen + Zertifikate&kvno.de +47328 + Simulity Labs + Colin Tebbutt + colin.tebbutt&simulity.com +47329 + Stancke Transporte GmbH & Co. KG + Christoph Becker + it&stancke.com +47330 + Murrelektronik GmbH + Jörg Hinze + licenses.rnd&murrelektronik.de +47331 + Pro:Atria Ltd. + Adi Roiban + adi.roiban&proatria.com +47332 + Current, powered by GE + Tom Holland + holland&ge.com +47333 + Asintel, S.L. + Victor Orero + info&asintel.net +47334 + trendytech + Xiaoyin Liu + liuxy&trendytech.com.cn +47335 + Superwave Group LLC + Dmitriy Novokhatniy + admin&swgroup.ru +47336 + Iguana Comunicacions SL + Aleix Solé Romeu + aleix.sole&iguana.cat +47337 + Security On-Demand, Inc. + Marcus Rosenberg + sysadmin&securityondemand.com +47338 + Bob Ziuchkovski Free/Libre and Open Source Software + Bob Ziuchkovski + bob.ziuchkovski&gmail.com +47339 + Hessware GmbH + Sebastian Heß + shess&hessware.de +47340 + Handysoft,co.ltd. + Kim keehwan + kimkeehwan&handysoft.co.kr +47341 + Syllaba Press International Inc. + Ernesto Rodriguez + info&syllabapress.us +47342 + VITAL WILLIAM CONSEIL + William VITAL + william.vital&william-vital.fr +47343 + ElektronIT GmbH + Jörn Volkhausen + service&elektronit.de +47344 + COMSYS Communications Systems Service GmbH + Robert Sander + robert.sander&comsysgmbh.de +47345 + Variant + Oleg Avrutin + avrutin&variant.kiev.ua +47346 + Laurenz Wagner + Laurenz Wagner + mail&ldd.li +47347 + MIRACL + John McCane-Whitney + john.mccane-whitney&miracl.com +47348 + Centro de Tradições Nordestinas + Arthur Stanev + cti&radioatual.com.br +47349 + Rockwell Automation + Julie Farrell + jmfarrell&ra.rockwell.com +47350 + Securian Financial Group + Nicholas LaFleur + nick.lafleur&securian.com +47351 + Premium Security Broadband + Tony Ledbetter + tledbetter&utiglobal.com +47352 + AMPAREX GmbH + Thomas Marquardt + thomas.marquardt&arex.com +47353 + HornasjövägenIT + Tom Tjörnhed + tom.tjornhed&gmail.com +47354 + Avista Corporation + Security Operations + corpsecurityoperations&avistacorp.com +47355 + Metamarkets + Jeff Strunk + jeff.strunk&metamarkets.com +47356 + Cress Enterprises + Michael Cress + michael.cress&cress.us +47357 + Technology Concepts & Design, Inc. + Joey Adams + jea&tcdi.com +47358 + HenzHome Services + Tobias Henz + tobias.henz&henzhome.ch +47359 + stary tec + staryzhou + staryzhou&163.com +47360 + MAX-TECH + Kim Min Jae + mj.kim&max-tech.co.kr +47361 + KT&C CO., LTD. + HYUN MUG JI + software&ktnc.co.kr +47362 + Woningstichting Haag Wonen + Patrick van der Harst + p.vander.harst&haagwonen.nl +47363 + SYSTEM Development Inc. + Kenichi Shimizu + ken&sysky.co.jp +47364 + SIRIETA + DAVID PHILIPPE + phdavid75&gmail.com +47365 + Wabtec Railway Electronics + Michael Bratcher + mbratcher&wabtec.com +47366 + Arturs Plisko + Arturs Plisko + legal&blizko.lv +47367 + Transport for New South Wales + Duncan Chan + adteam&transport.nsw.gov.au +47368 + dainox GmbH + Jochen Brueckner + jochen.brueckner&dainox.net +47369 + Canal Digital AS + Helge Hasund + helge.hasund&canaldigital.com +47370 + Lorindus + Joost Daem + jdaem&esolutions.be +47371 + Shine Security Ltd + Oren Farage + oren&getshine.com +47372 + MariaDB Corporation Ab + Adam Donnison + adam.donnison&mariadb.com +47373 + Octopod Technology Company Limited + Chen Mingyuan + mike&octopodtech.com +47374 + Sitex Inc. + Nikita Inozemtsev + n.inozemtsev&sitex.ru +47375 + Turpeinen + Teemu Turpeinen + teemu-iana.pen&turpeinen.org +47376 + Taqniat Ltd + Yury Lapin + taqniat&outlook.com +47377 + International Software Systems Inc. (ISSI) + VC Sekhar Parepalli + sekhar&issi-software.com +47378 + Threema GmbH + Manuel Kasper + noc&threema.ch +47379 + IoT.nxt Pty Ltd + Gysbert Jacobs + bertus.jacobs&iotnxt.com +47380 + Zagrebacka banka d.d. + Goran Sustek + Goran.Sustek&unicreditgroup.zaba.hr +47381 + Omnitelecom + Daniel Marchasin + danielm&omnitelecom.co.il +47382 + UMAITEK + DEMIL Rezki + r.demil&umaitek.dz +47383 + Kaessbohrer Geländefahrzeug AG + Timo Schetelich + timo.schetelich&pistenbully.com +47384 + Sartorius Mechatronics T+H GmbH + Christian Raffel + christian.raffel&sartorius-intec.com +47385 + Sveaskog Förvaltnings AB + Johanna Snell + johanna.snell&sveaskog.se +47386 + US Signal + Chuck Taylor + ctaylor&ussignal.com +47387 + 0xBEDA, LLC + Joe Beda + joe&0xbeda.com +47388 + SeGuru Corp. + Sean Arries + sean&seguru.io +47389 + State Universities Retirement System + Byron Campbell + bcampbell&surs.org +47390 + Affinity Credit Union 2013 + Dustin Halvorson + dustin.halvorson&affinitycu.ca +47391 + Green Cubes Technology Corporation + Anthony Cooper + acooper&greencubestech.com +47392 + FAWONG + Felix Wong + pen&fawong.com +47393 + Koalephant Co., Ltd. + Stephen Reay + stephen&koalephant.com +47394 + BKT Elektronik Sp. z o.o. + Jacek Siwilo + siwilo&bkte.pl +47395 + East Central Oncology + Diana Rossell + dianaatecoa&yahoo.com +47396 + CJSC TransProektInzhiniring + Dmitry Markov + pkiadmin&transpir.ru +47397 + IT-Wissen.org + Udo Jendges + jendges&jendges.org +47398 + AVP Technology + Kalachev Aleksandr + pkiadmin&avpt.ru +47399 + AMAG Technology + Joe Kelleher + joe.kelleher&amag.com +47400 + Cottonwood Creek Technologies, Inc. + Alan Schott + aschott&cwctech.com +47401 + Hotel Internet Services + Rafael van den Berg + admin&hotelwifi.com +47402 + VALID CERTIFICADORA DIGITAL LTDA + MARCILIO JORGENSEN CASSELLA + marcilio.cassella&valid.com.br +47403 + Connect Managed Services + Adam Leedell + adam&connectmanaged.com +47404 + Coligo AB + Tim Nilimaa-Svärd + tim.nilimaa-svard&coligo.se +47405 + Brukarkooperativet JAG + Tim Nilimaa-Svärd + tim.nilimaa-svard&coligo.se +47406 + Tactic Code AB + Tim Nilimaa-Svärd + tim&tacticcode.com +47407 + Forester IT + Mal Harwood + mal.harwood&foresterit.com.au +47408 + Pankraz Elektronik + Viktor Pankraz + info&pankraz.org +47409 + Javier Pastor + Javier Pastor + ldap-oid&cerebelum.net +47410 + InfraSource Inc. + Jae Kyung KIM + jkiset&infra-source.com +47411 + Esferize Comunicaciones SL + Fernando Enriquez + ingenieria&esferize.com +47412 + SAIFER ASSOCIATES LTD + Juan Luis Iglesias + jliglesias&saiferassociates.co.uk +47413 + Caprica Limited + Manfred Duchrow + manfred.duchrow&caprica.biz +47414 + Burgstaller + Albert Burgstaller + albert&burgstaller.priv.at +47415 + Voxaware Ltd. + Ray Euden + voxaware&btconnect.com +47416 + 3Shape Medical A/S + Thomas Allin + thomas.allin&3shape.com +47417 + Associazione La Nostra Famiglia + Marco Gaiarin + gaio&sv.lnf.it +47418 + Key Bridge Wireless + Jesse Caulfield + administrator&keybridgewireless.com +47419 + Halter AG + Stefan Koch + support&halter.ch +47420 + 2scale GmbH + Dirk Vleugels + pen&2scale.net +47421 + Cohesity Inc. + Roy Yang + roy&cohesity.com +47422 + imito AG + Manuel Studer + manuel.studer&imito.io +47423 + Viamericas Corporation + Joel Kosloski + Joel.Kosloski&Viamericas.com +47424 + RPCI Oncology, PC + Joseph Wedge + joe.wedge&shoawny.org +47425 + BASYS Bartsch EDV-Systeme GmbH + Florian Stamer + florian.stamer&basys-bremen.de +47426 + Triolan Limited Inc. + Max Sorokolat + gambit&gambit.net.ua +47427 + PULLNET TECHNOLOGY, S.L. + Jaume Darne + jaume.darne&pullnet.com +47428 + Taras Shevchenko National University of Kyiv + Ievgen Sliusar + slu&knu.ua +47429 + NOWMEDIA TECHNOLOGIES + Bakhtiyorbek Khasanov + bkhasanov&lamuz.uz +47430 + Ifixmail.com + Phillip LaRue + Pj&ifixmail.com +47431 + CrowdStrike Inc. + Milos Petrbok + milos&crowdstrike.com +47432 + Diamond Technologies Inc. + Paul Tesini + ptesini&diamondt.com +47433 + DAC System SA + Paolo Delmastro + paolo.delmastro&dacsystem.ch +47434 + COUNT+CARE GmbH & Co. KG + Juergen Friedrich + juergen.friedrich&countandcare.de +47435 + Roksnet Solutions LTD + Kristjan Kullerkann + kristjan.kullerkann&roksnet.com +47436 + Lufthansa Systems GmbH & Co. KG + Wolfgang Wahle + licencemanagement&lhsystems.com +47437 + Vagaro Inc. + Eric Lee + webmaster&vagaro.com +47438 + tofutown gmbh + thomas knoche + it.service&tofutown.com +47439 + Alexander Zaigraev + Alexander Zaigraev + zaigraev&outlook.com +47440 + Stellus Technologies + Mark Lonsdale + mark.lonsdale&stellus.com +47441 + Lagardère Ressources + Marc BEHIDI / Paul HALAJ + dsi-lr&lagardere.fr +47442 + Highlands Oncology Group + Meagan Higginbotham + mhigginbotham&hogonc.com +47443 + Jockulator Studios North + Magnus Haack + magnus.haack&jockulator.eu +47444 + Milwaukee School of Engineering + Christopher Hougard + hougardc&msoe.edu +47445 + Telestream LLC + Scott Davis + scottd&telestream.net +47446 + 443 IT + Thomas Komen + info&443-it.nl +47447 + UniqCast + Dario Habulin + dhabulin&uniqcast.com +47448 + Intis d.o.o. + Miroslav Kelekovic + miroslav.kelekovic&intis.hr +47449 + Ryanair Ltd + Systems Support + systemssupport&ryanair.com +47450 + Cloud Foundry Foundation + Chip Childers + cchilders&cloudfoundry.org +47451 + OmegaStar + Marcin Jurczuk + snmp&omegastar.eu +47452 + QMS Capital Management LP + Casey Averill + casey.averill&qmscapital.com +47453 + Gobierno de Santa Fe + Pablo Bussi + pbussi&santafe.gov.ar +47454 + Area d'Inxenieria Telematica - University of Oviedo + Francisco G. Bulnes + bulnes&uniovi.es +47455 + Sydved AB + Ingemar Arvidsson + ingemar.arvidsson&sydved.se +47456 + One New Zealand Group Limited + Daniel Kerse + dan.kerse&one.nz +47457 + SAS Orbitracs + Daniel Tits + daantits&wanadoo.fr +47458 + GIGANET NETWORKING SOLUTIONS LTD + Erick Mbugua + gigalite&giga-net.co.uk +47459 + BKM-Micronic Richtfunkanlagen GmbH + Robert Schubert + info&bkm-micronic.de +47460 + LiquidCool Solutions + Daryl Lautenschlager + daryl.lautenschlager&liquidcoolsolutions.com +47461 + Lit Consulting + Staffan Olsson + staffan.olsson&litconsulting.se +47462 + ChangZhou Taiping Communication Technology Co.Ltd + Zhou Yang + zyang&taiping.cc +47463 + bestrun nanjing, co.ltd + YK Huang + ykhuang&bestrun.com.cn +47464 + IN WIN Development Inc. + Eric Lin + eric.lin&in-win.com.tw +47465 + CDB Monolit + Konovalov Alex + alexcon314&gmail.com +47466 + Lab3 e.V. + Sebastian Ratajczak + sebastian.ratajczak&lab3.org +47467 + Allianz Insurance Company Russia OJSC + Boukreev Vassili Vassilievich + ng_mail&allianz.ru +47468 + Trueverit + Simone Gasparini + simone.gasparini&trueverit.com +47469 + Presence Technology + Francisco Segovia Mora + fsegovia&presenceco.com +47470 + CGI Sverige + Stefan Carlsson + s.carlsson&cgi.com +47471 + Apogey Ltd + Voloshin Vladimir Alekseevich + 516965&mail.ru +47472 + Purkinje Inc. + Alain Toutant + atoutant&purkinje.com +47473 + Resurs-Komplekt LLC + Nikolay Bukhalov + resgroup.iana&gmail.com +47474 + Sonn & Partner Patentanwälte + IT Department + admin&sonn.at +47475 + ShenZhen Smart Imaging Healthcare Co.,Ltd. + ShenWen Quan(male) + shenwen.quan&zying.com.cn +47476 + medi.com sp. z o.o. + Marcin Zabraniak + marcin.zabraniak&medi.com.pl +47477 + CGS Tower Networks Ltd. + Nitzan Gurfinkel + nitzan&cgstowernetworks.com +47478 + NetSection Security + Lars Karlslund + lars&netsection.com +47479 + Artec Design LLC + Anti Sullin + anti.sullin&artecdesign.ee +47480 + TECO Ltd + Trubochkin Alexey + tav&teco.com.ua +47481 + RADD Web Studio + Darlington Wleh + radd&raddwebstudio.com +47482 + ATVIRTUAL.NET KG + Martin Fischer + martin.fischer&atvirtual.net +47483 + Mcallen Oncology + Bricy Martinez + bmartinez&mcallenoncology.org +47484 + Quest Medical Imaging B.V. + Richard Meester + richard.meester&quest-innovations.com +47485 + ArmySr + Ron Armstrong + ron&armysr.com +47486 + Factorial Advance Systems Ltd + Adebola Omoboya + aomoboya&icloud.com +47487 + Leroy Merlin Polska Sp. z o.o. + Tomasz Pikon + tpikon&leroymerlin.pl +47488 + Nedam ENG. Co., ltd. + Min Joon Jeon + mjjeon&nedameng.com +47489 + Techonline Consulting Ltd + Mike Elliott + support&techonlineuk.com +47490 + Nordea AEP Luxembourg + IT Department + iana_pen&nordea.lu +47491 + Feenix Communications + Mark Abrahams + mark&feenix.co.nz +47492 + Carnelutti Studio Legale Associato + Nicola La Selva + issupport&carnelutti.com +47493 + SHENZHEN GYE TECH CO.,LTD + Guangming Han + hanguangm&sohu.com +47494 + AO-27 Control Operators Association + Michael Wyrick + wyrick&umbrasi.com +47495 + alfaleasing + Maltsev Andrey + helpdesk&alfaleasing.ru +47496 + Binary Kitchen e.V. + Markus Hauschild + markus.hauschild&binary-kitchen.de +47497 + KIZIL Elektronik + Ali KIZIL + ali&kizil.com +47498 + Dirk Rossmann GmbH + Info + postmaster&rossmann.de +47499 + Beijing NewDigit Technology Co., Ltd. + Xiao Hua Yang + yangxh&newdt.cn +47500 + PB Design & Developments Limited + Michael Provis + Mike.Provis&pbdesign.co.uk +47501 + Hargreaves Lansdown Plc + Mike Salway + oidadmin&hl.co.uk +47502 + Hiawatha Valley Education District + Andrew Hamilton + itsupport&hved.org +47503 + Ralph Lauren Center for Cancer Care + Lisa Pfail + lpfail&ralphlaurencenter.org +47504 + Zellkraftwerk GmbH + Christian Hennig + hennig&zellkraftwerk.com +47505 + Numonix + Evan Kahan + evan.kahan&numonixrecording.com +47506 + AOSense, Inc. + Isaac Wilson IV + iwilson&aosense.com +47507 + CT LAB + Jacobus van Zyl + jacobus&ctlab.com +47508 + Incosys SAS + Fabio Alexander Ferreira Angarita + fabiorov&gmail.com +47509 + Herz Group + Christian Dergovics + iana.pen&herz.eu +47510 + Signifai, Inc + Zach Carlson + zcarlson&signifai.io +47511 + GPC Asia Pacific + Christian Joy + cjoy&gpcasiapac.com +47512 + linuxmuster.net e.V. + Jesko Anschütz + jesko.anschuetz&linuxmuster.net +47513 + venetex corporation + keiji sawada + sawada&venetex.co.jp +47514 + RAK SYSTEM CO,;LTD + TianFeng + tianfeng&dhdz-suzhou.com +47515 + Spectrum Controls, Inc. + Mike Chen + oid.admin&spectrumcontrols.com +47516 + Certego s.r.l. + Bernardino Grignaffini + info&certego.net +47517 + Nekomit + Sorah Fukumori + her&sorah.jp +47518 + Openflexo + Christophe Guychard + christophe.guychard&openflexo.org +47519 + Fincons SpA + Vincenzo Summo + vincenzo.summo&finconsgroup.com +47520 + NES Technology Inc + Dae Won Kim + dwkim&nestek.co.kr +47521 + S+T Service & Technique S.A. + Frédéric Hess + fhess&splust.ch +47522 + BTECO LIMITED + Gunter Liszewski + gunter.liszewski&gmail.com +47523 + VDE Prüf- und Zertifizierungsinstitut GmbH + Dr. Arthur Herzog + ianapen&vde.com +47524 + Rubicon Labs, Inc. + Alexander Usach + ausach&rubiconlabs.io +47525 + Wuliang + En Feng Wu + wuen320&gmail.com +47526 + A. Farber & Partners Inc. + IANA Administrator + iana&afarber.com +47527 + Silent Waters IT Consulting S.L. + Patrick Charbonnier + penmaster&mail.silent-waters.tech +47528 + Softhum + Armando Hernández Aguayo + armando.hdez&softhum.com +47529 + NATIONAL BANK OF GREECE + Michael Ontrias + ibank_Business_Analysis&nbg.gr +47530 + Softwaretechnik Ges.m.b.H. + Dipl. Ing. Hermann Reisinger + hermann.reisinger&softwaretechnik.at +47531 + Analytik Jena AG + Gert Finke + gert.finke&analytik-jena.de +47532 + Arhides d.o.o. + Dejan Kampus + iana&arhides.si +47533 + Balyasny Asset management L.P. + Ken D'Agostino + certificates&bamfunds.com +47534 + AutoAlert + Michael Dean + michael.dean&autoalert.com +47535 + Autoridad Certificante de la Oficina Nacional de Tecnologías de Información + Federico Bustamante + consultapki&jefatura.gob.ar +47536 + PJSC "Sumykhimprom" + Dmitry Malyshok + d.a.malyshok&sumykhimprom.org.ua +47537 + SATT PACA Corse + ENJOLRAS Marc + marc.enjolras&sattse.com +47538 + knoefel.xyz + Markus Knoefel + markus&knoefel.xyz +47539 + Greendoc Systems Kft. + Péter Bösztöri + bosztori.peter&greendoc.hu +47540 + QuadraNet, Inc + Kate Gerry + kate.gerry&quadranet.com +47541 + Admino LLC + Andrey Kurochkin + iana&admino.ru +47542 + Tianyi iMusic culture & technology Co. Ltd. + Zhao Xu + zhaoxu&imusic.cn +47543 + MWR InfoSecurity + Stuart Morgan + stuart.morgan&mwrinfosecurity.com +47544 + JTKJ.com + Ivor TsLeng + qbystory&163.com +47545 + Jingtong Technology Co. Ltd. + Buyun QU + buyun.qu&gmail.com +47546 + HANGZHOU CHANGER TECHNOLOGY.,LTD + Yang Lei + lei.yang&changertech.net +47547 + Takta Co. + Heshmati, Mohammad Ali + heshmati&takta.com +47548 + Tvip Ltd + Pavel Sokolov + pavel&tvip.ru +47549 + iniLINE Co., Ltd. + Ki Tae John + ktjohn&iniline.co.kr +47550 + AVENTICS GmbH + Sebastian Minks + sebastian.minks&aventics.com +47551 + USGS National Wildlife Health Center + Tom Beighley + tbeighley&usgs.gov +47552 + Marcone Supply + Stephen Lotz + stephen.lotz&marcone.com +47553 + Hochschule Koblenz + Heiko Hellweg + hellweg+iana&hs-koblenz.de +47554 + Total Highspeed + Christopher Tyler + sysadmin&totalhighspeed.net +47555 + EA Games FireMonkeys + Fletch Hogan + fhogan&ea.com +47556 + Barrcode Ltd + Brian Barr + brianbarr&barrcode.com +47557 + Hangzhou Telin Technologies Company limited + Wang Qun + liuhaichao&telincn.com +47558 + Communications & Power Industries, Satcom Division + Jeremy Peters + jeremy.peters&gd-ms.com +47559 + Crypto-Pro + Kollegin Maksim + maxdm&cryptopro.ru +47560 + Westinghouse Electric Sweden AB + Mats Karlsson + karlssmg&westinghouse.com +47561 + System-on-Chip engineering (SoC-e) + Sergio Salas + sergio.salas&soc-e.com +47562 + Chengdu Emfuture Automation Engineering Co., Ltd + Dongming Tang + tangdm&emfuture.com +47563 + Falcon Critical Care Transport + Alex Ackerman + netad&falconcct.com +47564 + Permian Resources, LLC + Blake Reeves + Blake.Reeves&PermianResources.com +47565 + Forcepoint LLC + Security Operations + domains&forcepoint.com +47566 + Uniper SE + Steve Abbott + steve.abbott&uniper.energy +47567 + Seifert Logistics GmbH + IT Abteilung + urgent&seifert-logistics.com +47568 + InterTalk Critical Information Systems + Kevin Brown + kbrown&intertalksystems.com +47569 + FIAMM SoNick S.A. + Todeschini Marco + marco.todeschini&fiamm.com +47570 + Ennnot + Maxim Korolyov + info&ennnot.ru +47571 + Glowpoint + Ted Tzeng + ttzeng&glowpoint.com +47572 + University of Keele + Jonathan Knight + it.operations&keele.ac.uk +47573 + McFarland Clinic + Rich Voyek + rvoyek&mcfarlandclinic.com +47574 + City of Harker Heights + Gary Bates + gbates&ci.harker-heights.tx.us +47575 + AUTEC Gesellschaft fuer Automationstechnik mbH + Dr. Hartmut Staerke + dhs1&autec-gmbh.de +47576 + CapSenze Biosystems AB + Dag Erlandsson + de&capsenze.se +47577 + Newell Rubbermaid + Tom Glowacki + Tom.Glowacki&newellco.com +47578 + Vantiv Inc + William Cain + william.cain&vantiv.com +47579 + Open Software S.r.l. + Giuseppe Pitzus + giuseppe&osw.it +47580 + Liechtensteinische Landesverwaltung + Peter Kindle + wsc&llv.li +47581 + Styevko Attila + Styevko Attila + attila&sty.hu +47582 + Timothy Boronczyk + Timothy Boronczyk + tboronczyk&gmail.com +47583 + Gentrack + Vincent Commarieu + vincentc&gentrack.com +47584 + Hirealton network technology co., LTD + xiaojing.weng + xiaojing.weng&hirealton.com +47585 + 上海兆越通讯技术有限公司 (Shanghai Communication Technology Co., Ltd. trillion more) + 张健荣 (Zhang Jianrong) + 2472265650&qq.com +47586 + Alfing Kessler Sondermaschinen GmbH + Dietmar Wieber + dwieber&aks.alfing.de +47587 + Rack2Cloud Limited + Nicholas McDermott + nick.mcdermott&rack2cloud.com +47588 + StatPro Group Plc. + George Palmer + george.palmer&statpro.com +47589 + Route443 LLP + Security Officer + info&route443.eu +47590 + iik + Paul Engel + pen&iik.dk +47591 + aidounix + Aidouni, Frederic + snmp&aidounix.com +47592 + NJATECH + Huzhong + huzhong&njatech.net +47593 + Shenzhen Electrical Co. + NIE YONG JUN + nyj981&163.com +47594 + Allwaywin Co., Ltd + Liu Wei + admin&allwaywin.com +47595 + cluster + wangzhibao + wangzhibao163&163.com +47596 + INSS + Eulaidy Assuncao + eulaidyreis&gmail.com +47597 + christmann informationstechnik + medien GmbH & Co. KG + Wolfgang Christmann + mail&christmann.info +47598 + Weiss Klimatechnik GmbH + Christoph Hammer + c.hammer&wkt.com +47599 + Changzhou Wisdom Intelligent Technology CO.,LTD. + dapeng ye + ydp&czwit.com +47600 + LLC "Commercial Bank" Bank Talmenka " + Cherkashin Dmitriy + cherkashin&tb22.ru +47601 + Novo Nordisk A/S + Novo Nordisk PKI Team + pki-service&novonordisk.com +47602 + pandoranorge + John Thingstad + jpthing&online.no +47603 + eClinicalWorks + Paul R. Dittrich + paul.dittrich&eclinicalworks.com +47604 + OOO NPP ETRA-Plus (Ltd, Research and Production Enterprise) + Alexander Arserntiev + stolknovenie&gmail.com +47605 + OOO BALTSTAR + Pavel Galkin + administrator&baltstar.net +47606 + ocuro + Badrul Alam + balam&ocuro.net +47607 + Lopez Foods, Inc. + Chris Wooldridge + cwooldridge&lopezfoods.com +47608 + WIRTGEN GROUP Holding GmbH + Frank Kehlenbach + edv&wirtgen.de +47609 + Escola Nacional de Administração Pública (Enap) + Coordenação-Geral de TI + infrati&enap.gov.br +47610 + Bitwiseshift Ltd. + George Palmer + gp&bitwiseshift.net +47611 + CENAPAD + Carlos Henrique Rebollo + rebollo&cenapad.unicamp.br +47612 + Gurtsoft + Matvei A. Tokmakou + tama&gurtam.com +47613 + Esbjerg Kommune + Esben Foverskov + hostmaster&esbjergkommune.dk +47614 + Pranas.NET + Ruslan Sudentas + ruslan&pranas.net +47615 + WRELMS Networking + Wendy Ruokangas + wruokangas&gmail.com +47616 + HangZhou AOBO TELECOM Corp + zhangshen + 13656650200&163.com +47617 + NTSystems + tadasi nisiyama + watanabe&ntsystems.co.jp +47618 + SEFIRA spol. s r.o. + Tomas Vanek + it&sefira.cz +47619 + D&K Technologies GmbH + Stanislav Koncebovski + Stanislav.Koncebovski&dktech.de +47620 + DOMIS/SOMFY + Alain DEPLANTE + alain.deplante&somfy.com +47621 + RiverWeb IT Solutions + Fotini Tatsidou + hostmaster&riverweb.gr +47622 + nicos Research & Development GmbH + Jörg Langkau + jlangkau&nicos-rd.com +47623 + schaetz cro + William Young + w.young&schaetz-cro.ch +47624 + Sysdat Turismo s.r.l. + Fabrizio Monti + assistenza.hardware&sigesgroup.it +47625 + Gekås Ullared AB + Johan Armfelt + johan&gekas.se +47626 + Datelstream Limited + Darryl Betts + darryl.betts&datelstream.co.nz +47627 + kapptivate + Alexandre Grais + alexandre.grais&kapptivate.com +47628 + krauth technology GmbH + Olaf Knüppel + Olaf.Knueppel&krauth-online.de +47629 + AAEON Technology Inc. + Robin Lin + RobinLin&aaeon.com.tw +47630 + Ontario English Catholic Teachers Association + Joanne Chenier + sysadmin&oecta.on.ca +47631 + University of South Alabama + Ernest A. Lighrtbourne + alightbo&southalabama.edu +47632 + HC APMC ONCOLOGY + JAMIE BAKER + JBAKER&HIGHLANDCLINIC.COM +47633 + International Airlines Group + Matt Hudson + matt.hudson&ba.com +47634 + Blue Labs + David Ford + david&blue-labs.org +47635 + Skogsutveckling Syd AB + Ingemar Arvidsson + ingemar.arvidsson&sydved.se +47636 + Stora Enso Bioenergi AB + Ingemar Arvidsson + ingemar.arvidsson&sydved.se +47637 + gehrigmobi + Jeffrey Gehrig + registry&gehrig.mobi +47638 + Crescend Technologies + Marc Holdwick + mholdwick&crescendtech.com +47639 + KpXX + Serge Guzew + Serge&Guzew.Ru +47640 + Tangible Security Inc + David Partridge + dpartridge&tangiblesecurity.com +47641 + gruenewald-clan.de + Willi Gruenewald + hostmaster&gruenewald-clan.de +47642 + Istituto Clinico Città Studi + Dario Pezzi + dario.pezzi&ic-cittastudi.it +47643 + Asahi Net, Inc. + LABONNE Christophe + iana_contact&bizml.asahi-net.or.jp +47644 + Robin Systems + Tom Morano + tom&robinsystems.com +47645 + Harman Intl. + Mark Smith + mark.smith&harman.com +47646 + Beijing Qianxin Technology Co., LTD. + Jiming Gao + gaojiming&qianxin.com +47647 + barox Kommunikation + Angelo Banfi + banfi.angelo&barox.ch +47648 + Aireon LLC + Andy Hoag + info&aireon.com +47649 + World Economic Forum + Carine Benetti + carine.benetti&weforum.org +47650 + Arkin Net Inc. + Maor Bril + mbril&arkin.net +47651 + Accessnord AB + Tom Tjörnhed + tom&accessnord.se +47652 + D.C.Orbital LLC + Sergey Sapiga + sys&dcorbital.net +47653 + Lanbowan Technology Ltd. + guangdong yang + 18626469675&163.com +47654 + News India Today Trust + Hiranmoy Debnath + advhiranmoy&gmail.com +47655 + Nufront co.ltd + Liu Zhi Gang + zhigang.liu&nufront.com +47656 + Conval, Inc. + Tim Pickering + tpickering&conval.com +47657 + Shanghai Golden Bridge InfoTech Co.,Ltd + zhumin + zhumin&shgbit.com +47658 + ZITH + Sherwood Wang + sherwoodwang&zith.org +47659 + Actions-Micro + Tingya Wang + tingyawang&actions-micro.com +47660 + Cookpad Inc. + Sorah Fukumori + sorah&cookpad.com +47661 + H&K International + Sri Karan + sri.karan&hki.com +47662 + weroSoft AG + Rolf Wenger + rolf.wenger&weroSoft.net +47663 + Baruwa Enterprise Edition + Andrew Colin Kissa + andrew&baruwa.com +47664 + IP Rozduhov M.E. + Sergey Gulyaev + gusa&maxi-net.ru +47665 + InformInvestGroup + Dmitry Komolov + devops&iigroup.ru +47666 + Qualica Technologies (Pty) Ltd + Gregory Babski + greg&qualica.com +47667 + Init AB + Calle Dybedahl + calle.dybedahl&init.se +47668 + Agência para a Modernização Administrativa, I.P. (AMA) + Rui Martinho + rui.martinho&ama.pt +47669 + IHI Charging Systems International GmbH + Torsten Lechler + it&ihi-csi.de +47670 + Hitachi, Ltd., Defense Systems Business Unit + Shohei Nishida + shohei.nishida.vy&hitachi.com +47671 + Intra + Tatiana Kondakova + t&yadro.com +47672 + findsolutions + Alexander Pohl + info&findsolutions.de +47673 + IHI Charging Systems International Germany GmbH + Uwe Jakobi + it&ihi-csi.de +47674 + University POLITEHNICA of Bucharest + Soriga Stefan Gabriel + stefan.soriga&upb.ro +47675 + IHI Charging Systems International Sp A + Gavino Fraghì + it&ihi-csi.de +47676 + Social Native + Steven Oxley + steveno&socialnative.com +47677 + Point of Presence Technologies + Michael Newton + mnewton&pofp.com +47678 + Intuity Consultants, Inc. + Dave Martens + davemartens&yahoo.com +47679 + DigitalX + Alex Wied + alex.wied&digitalx.com +47680 + Weimann - IT Consulting + Marc Weimann + marc.weimann&gmail.com +47681 + Endeavor Air, Inc. + Erik Radde + erik.radde&delta.com +47682 + Rocket Lab Ltd. + Joshua Lamorie + j.lamorie&rocketlabusa.com +47683 + Michael Schubert GbR + Michael Schubert + michael&michael-schubert.eu +47684 + Babcock Power + Keith Taylor + ktaylor&babcockpower.com +47685 + SimuOne ApS + Michael Skriver + msk&simuone.com +47686 + Switzerland Global Enterprise + IT-Team + it-team&s-ge.com +47687 + Filetress + Peter Donov + registration&filetress.com +47688 + Itential + Dale Sorsby + dale.sorsby&itential.com +47689 + Chop-Chop Sp. z o.o. + Paweł Jasiński + payments&chop-chop.org +47690 + Linton Enterprise I (LEI) + Eric Russell Linton I + ericlintonsr&outlook.com +47691 + Krogh-Consult + Jesper Krogh + jek&krogh-consult.dk +47692 + Tribunal Supremo de Elecciones de Costa Rica + Eric Quiros Jimenez + equiros&tse.go.cr +47693 + Monroe Tool and Manufacturing + IT department + it&monroetoolmfg.com +47694 + CANATAL + Huzhong + huzhong&njatech.net +47695 + McKesson Corporation + Conrad Spielman + conrad.spielman&mckesson.com +47696 + MedicusTek Inc + William Ott + william.ott&medicustek.com +47697 + Nanjing Tian Hua Zhong An Communication Technology CO.LTD + E Wang + wange&chinathza.com +47698 + aroba Inc. + Masato Kudo + aroba_se&aroba.jp +47699 + Avon Products Inc + Walter Kerner + walter.kerner&avon.com +47700 + Systrome Networks + Steven George + steven&systrome.com +47701 + BDO Audit SRL + Sorin Popa + sorin.popa&bdo.ro +47702 + SECLAB + Thomas VIDAL + tvidal&seclab-solutions.com +47703 + cadilinea, slu + carlos briso + internet&cadilinea.com +47704 + BEIT Systemhaus GmbH + Michael Trenkle + domainadmin&beit.de +47705 + Norra Skogsägarna Ek För + Thomas Nilsson + thomas.nilsson&norra.se +47706 + Silion Technology Co., Ltd + He Xiaopeng + hexp&silion.com.cn +47707 + Zynx + Joey Leclerc + ta01&zynx.ca +47708 + Di-Nikko Engineering Co.,Ltd. + shinya sato + shinya.sato&dne.co.jp +47709 + GE Healthcare on behalf of ACRIM hospital + Emmanuel Gleye + emmanuel.gleye&ge.com +47710 + RIBS - Registration and Imaging of Brain Systems + VME Kersten + ribsadmin&umcutrecht.nl +47711 + KECK MEDICINE OF USC + Jill Chiascione + jill.chiascione&med.usc.edu +47712 + Treegital + Gilles Sauliere + gse&treegital.fr +47713 + Charlieuniformtango + Jeffery Harrell + sparky&charlietango.com +47714 + qsx + Thomas Schneider + oid-pen&qsuscs.de +47715 + EZTrader + gal koren + galk&eztrader.com +47716 + Devdot B.V. + Raymond Pley + ray&devdot.io +47717 + OrbiWise SA + Pierre-Jean PIETRI + pierre-jean.pietri&orbiwise.com +47718 + Zetes + Erwin Vermoesen + erwin.vermoesen&zetes.com +47719 + Videology + Jon Turwy + jturwy&videologygroup.com +47720 + LAWO Informationssysteme GmbH + Karsten Köth + k.koeth&lawo.info +47721 + Avaleris + Hugh Lindley + hugh.lindley&avaleris.com +47722 + Hansoft AB + Erik Olofsson + erik.olofsson&hansoft.com +47723 + Michigan Open Carry, Inc. + Jason Gillman Jr. + jgillman&miopencarry.org +47724 + Cuyahoga County + Tim Verry, Aaron Edens + tverry&cuyahogacounty.us +47725 + Konke Online + Jared Honey + jared&konkeonline.com +47726 + Slots Machines S.A. + Claudio Gustavo Gonzalez + claudio.gonzalez&slotsmachines.com.ar +47727 + Assistance Publique des Hôpitaux de Paris + François Pereira de Lima + francois.pereira&aphp.fr +47728 + Tobias Buchloh + Tobias Buchloh + postmaster&tb-devel.de +47729 + Niagara Networks Inc + Bill Kish + billk&niagaranetworks.com +47730 + République et Canton du Jura + Bruno KEROUANTON + bruno.kerouanton&jura.ch +47731 + Leidos, Inc. + Richard Braunagel + braunagelr&leidos.com +47732 + Kopano B.V. + Michael Kromer + mike&zarafa.com +47733 + Beijing Lehe Innovation Information Technology Co.,Ltd. + chongli wei + weichongli&higohappy.com +47734 + Nomura Holdings, Inc. + Shigekazu Inohara + inohara-0hxz&jp.nomura.com +47735 + Toyam Cox + Toyam Cox + toyam&borealis.im +47736 + L2M Solutions doo + Miljan Djakonovic + office&l2m.rs +47737 + BLUETOWN + Jesper Sandberg + js&bluetown.com +47738 + MilDef AB + Christoffer Martinsson + christoffer.martinsson&mildef.com +47739 + Fam.Andersson Skog AB + Marie Andersson + marie&askog.se +47740 + Holmen Skog AB + Christer Hörnfeldt + christer.hornfeldt&holmenskog.com +47741 + Vista Group Limited (formerly 'Vista Entertainment Solutions') + Christoph Berthoud + christoph.berthoud&vista.co +47742 + V-Key Pte Ltd + Yoong Kwek Yuan + itsupport&v-key.com +47743 + Atelios Communication Systems GmbH + Hans-Juergen Schmidt + hans.schmidt&atelios.de +47744 + Tomra ASA + Vincent Ambo + vincent.ambo&tomra.com +47745 + Monmouth Hematology Oncology + Margaret Bell + mbell&barnabashealth.org +47746 + Celgene + Kushal Mukehrjee + kmukherjee&celgene.com +47747 + Oakwood Controls + John Harrell + admin&oakwoodcontrols.com +47748 + Rubricall, SL + Alejandro Pinedo + aph&rubricall.com +47749 + GemTalk Systems + Norm Green + norm.green&gemtalksystems.com +47750 + Bring Dialog Norge AS + Kim Olsen + support&bringdialog.no +47751 + Mattersight Corporation + Steven Anderson + steven.anderson&mattersight.com +47752 + Lycee Louis Armand + Ewen PRIGENT + ewen.prigent&ac-creteil.fr +47753 + Aperi Corporation + Brian Keane + bkeane&apericorp.com +47754 + Core Services Corporation + Michael Rulf + mrulf&coreservices.com +47755 + OpenRat CMS + Jan Dankert + iana&jan.weiherhei.de +47756 + Global Oncology, Inc + LiiShin + lslin&global-oncology.com +47757 + Bariq Electronics + Muhammad Yahya + meetyahya&outlook.com +47758 + Służba Więzienna + Marcin Woldan + webmaster&sw.gov.pl +47759 + Sociedade Educacional Braz Cubas Ltda + Andre Janna + andre61&brazcubas.br +47760 + County of El Dorado + Tim Liston + tim.liston&edcgov.us +47761 + Wishnmix Ltd + Justin Megawarne + justin.megawarne&wishnmix.com +47762 + Section9 + Mark Mc Nicholas + markmcn§ion9.ie +47763 + OfficeFlex LLC + Bimal Saraiya + bimal&officeflexplano.com +47764 + Elektrownia Rybnik + Jan Radwański + jan.radwanski&edf.pl +47765 + DOC.INFOSAFE (ISRAEL) LTD + Andrei Tseitlin + it&swissinfocloud.ch +47766 + MACNICA FUJI ELECTRONICS HOLDINGS, INC. + Jiaqi Chen + chen-j&macnica.co.jp +47767 + ASH Szoftverhaz Kft + Bederna, Zsolt + zsolt.bederna&ashszoftverhaz.hu +47768 + Aristech GmbH + Martin Mende + martin.mende&aristech.de +47769 + Armour Communications Limited + Andy Lilly + it&armourcomms.com +47770 + ESG BIRO DE SERVICOS S/S LIMITADA - ME + Eugênio Neves da Rocha + eugenio&esg.eng.br +47771 + Colorado School of Mines + Matthew B. Brookover + mbrookov&mines.edu +47772 + Clear Axess SAGL + Giovanni Tirino + giovanni.tirino&clearaxess.com +47773 + Radeus Labs + Andrew Correnti + it&radeuslabs.com +47774 + Microsoft Small Basic + Ed Price + smallbasicµsoft.com +47775 + Ernst Basler + Partner AG + Hjalmar Heinrich + support&ebp.ch +47776 + TS Gateway Ltd + Stuart Berry + slberry&tsgateway.com +47777 + Fachschaftsvertretung der Fakultät für Informatik und Mathematik, Universität Passau + FSinfo administrators + iana-contact&fsinfo.fim.uni-passau.de +47778 + Cox Communications Inc. - Atlanta Technology + Gemarl Perry + gemarl.perry&cox.com +47779 + TaiHao Medical + You-Wei Wang + wei.tomato1112&gmail.com +47780 + Prosoft Kroměříž s.r.o. + Josef Zvoníček + prosoft&prosoft.cz +47781 + National Western Life Insurance + Trevor Westerdahl + twesterdahl&nationalwesternlife.com +47782 + Nautile Software + laurent CONIN + conin&nautile-software.com +47783 + codia Software GmbH + Thomas Menke + t.menke&codia.de +47784 + Suncoast Cancer Institute + Don Heinrich + admin&suncoastci.com +47785 + mBank S.A. + Roger Walczak + roger.walczak&mbank.pl +47786 + 5BARz India Private Limited + J RAJENDRA PRASAD DEEPAK + djrprasad&5barzindia.com +47787 + MuseumsIT + Thomas Johansen + post&museumsit.no +47788 + Prismaflex International + Alexis Greppo + itsupport&prismaflex.com +47789 + OAO MNIIPI OKOSZ "MOSPROEKT-4" + system administrator + admin&mosproekt-4.ru +47790 + ASYS Automatic Systems GmbH & Co. KG + Fredy Walth + f.walth&asys-micro.de +47791 + Rayed Alrashed + Rayed Alrashed + rayed&rayed.com +47792 + Pew Research Center + Brian Broderick + bbroderick&pewresearch.org +47793 + Scripps Networks Interactive + Chuck Edwards + Chuck.Edwards&scrippsnetworks.com +47794 + Arizona Center for Hematology and Oncology, LLC + Kimberly Fanciullo + kfanciullo&arizonaccc.com +47795 + Tegsoft + Eray Gürsoy + info&tegsoft.com +47796 + Architecting.nl + Jan Schoonderbeek + j.schoonderbeek&architecting.nl +47797 + Yellowbrick Data, Inc + Mark Brinicombe + iana&yellowbrick.io +47798 + CNI + nadia chebbi + nadia.chebbi&hotmail.fr +47799 + Verody, LLC + Kobi Eshun + info&verody.com +47800 + Waiariki Bay of Plenty Polytechnic + James Chamberlain + james.chamberlain&boppoly.ac.nz +47801 + CloudSeeds GmbH + Kevin Fibich + info&cloudseeds.de +47802 + CTSI Limited + David Reid + david.reid&ctsilimited.com +47803 + Urban Renaissance Agency + Amimoto Reiji + amimoto&ur-net.go.jp +47804 + Bravowhale Information Technology Ltd + Jason Deng + denghuimin&bravowhale.com +47805 + Shenzhen Grentech RF Communication Limited + qiyang + yangqi&powercn.com +47806 + Trussan Co., Ltd + Jarod Yu + jarodyu&trussan.com +47807 + DSRI "Volna", OJSC + Mirza Nabiev + volna.nabiev&yandex.ru +47808 + Delaware County Community College + Donald Sloat + dsloat&dccc.edu +47809 + Centerline Biomedical, Inc. + Vikash Goel + v¢erlinebiomedical.com +47810 + LINAGORA TUNISIE + Sami Bouhlel + sbouhlel&linagora.com +47811 + Pavilion Data Systems Inc + Sundar Kanthadai + sundar&paviliondata.com +47812 + AZ Elektro AG + Thomas Müller + Thomas.Mueller&az-elektro.ch +47813 + Luetze Transportation GmbH + Dimitrios Koutrouvis + dimitrios.koutrouvis&luetze.de +47814 + ChinaUnicom + zhanglunyong + zhanglunyong&chinaunicom.cn +47815 + Stora Enso Skog AB + Erik Buki + erik.buki&storaenso.com +47816 + Joseph Studio + Joseph Chris + joseph&josephcz.xyz +47817 + HAWE Hydraulik SE + Daniel Pradel + d.pradel&hawe.de +47818 + Salzburg AG + Bamberger Walter + walter.bamberger&salzburg-ag.at +47819 + NTT DATA Italia + Stefano Crespi + stefano.crespi&nttdata.com +47820 + Böco Böddecker & Co. GmbH & Co. KG + Benedikt Diederichs + b.diederichs&boeco.de +47821 + Sjova-Almennar tryggingar hf + Jon Elias Thrainsson + skilriki&sjova.is +47822 + NdT Web Services + Nicholis du Toit + ndtwebservices&icloud.com +47823 + Couch Red + Niklas Johansson + raphexion&gmail.com +47824 + Exprivia Telco & Media srl + Andrea Andreucci + andrea.andreucci&exprivia.it +47825 + Ravel Electronics Pvt Ltd + Ramdas Ayyadurai + ramdas&ravelfire.com +47826 + Bristows LLP + Chris Jory + chris.jory&bristows.com +47827 + Moogsoft + Andrew Hepburn + spike&moogsoft.com +47828 + Lincoln Oncology, LLC + Joanne Cruz + cruz.joanne23&gmail.com +47829 + Fornetix + Gerald Stueve + GStueve&Fornetix.net +47830 + MovingStar Corporation + Sanghyun Park + shpark&movingstar.org +47831 + Robert Half Inc. + Mark Nguyen + mark.nguyen&roberthalf.com +47832 + NBCUniversal + Tarfa Hachem + tarfa.hachem&nbcuni.com +47833 + Teleworks Co., Ltd. + Ivy Kim + tw_lab&teleworks.co.kr +47834 + OOO NPP "POLUS" + Andrey Lebedev + polus_npp_lebedev&mail.ru +47835 + ARCHIVECO + SYLVAIN PLANCHON + pen&archiveco.fr +47836 + Bernd Matusche + Bernd Matusche + bernd.matusche&gmx.de +47837 + Produban Global Services + Alexander Gray + cloudsecurity&produban.com +47838 + onether.net + Hannes Eberhardt + info&onether.net +47839 + Landesarchiv Nordrhein-Westfalen + Mario Gruchalski + mario.gruchalski&lav.nrw.de +47840 + alzahra university + sara ahmadi + ahmadi_2616&yahoo.com +47841 + Comita Group of companies + Maxim Y. Evdolyuk + support&comitagroup.com +47842 + Identity Tech Solutions, LLC + Jeffery Frederick + jeff&identitytechsolutions.com +47843 + 广州易速计算机设备有限公司 (Guangzhou-speed computer equipment Ltd.) + 谢锐民 (Xie Ruimin) + 329920023&qq.com +47844 + ELEET Networks + Tomas Agartz + oid-admin&as31337.org +47845 + Netz39 e.V. + Stefan Haun + stefan.haun&netz39.de +47846 + BCausE Enterprise Private Limited + Apu Chandra Saha + apu.saha&bcause.in +47847 + Informatikdienste, Stadt Bern + Vladimir Fabian + vladimir.fabian&bern.ch +47848 + SYNERGY SYSTEMS AND SOLUTIONS + VIDHU AGGARWAL + info&s3india.com +47849 + RealSprint AB + Niclas Åström + niclas.astrom&realsprint.com +47850 + Cathexis Technologies (PTY) LTD + Morkel Potgieter + devpartner&cat.co.za +47851 + Arest Inc. + Orest Pazdriy + dr.arest&gmail.com +47852 + throwstone + Sylvester Chen + wencong.chen&outlook.com +47853 + Key Performance Consulting + Patrice Blanchardie + patrice.blanchardie&kpconsulting.fr +47854 + Abakusz Computer Services + Peter Oth + othp&abakusz.hu +47855 + Rumo Logística + Félix Alexandre da Silva + felix.silva&rumoall.com +47856 + Geo++ GmbH + Norbert Matzke + matzke&geopp.de +47857 + Juice Goose + Stephen Blow + sblow&juicegoose.com +47858 + ENDICOTT PRECISION, INC. + Joshua Kaminsky + JKaminsky&endicottprecision.com +47859 + KEO GmbH + Peter Kellendonk + kellendonk&kellendonk.de +47860 + Thalia Bücher GmbH + Christoph Drosten + itbetrieb&thalia.de +47861 + The Berkeley Carroll School + Tammi Williams + tdwilliams&berkeleycarroll.org +47862 + almanid group GmbH + Ronny Bremer + rbremer&almanid.com +47863 + Ing.-Büro Sigmund Gassner + Sigmund Gassner + g&ssner.de +47864 + Futureweb OG + Andreas Schnederle-Wagner + schnederle&futureweb.at +47865 + PT. Sinergi Teknologi Utama + Rachmat Kusnadi + rachmat.kusnadi&sinergiteknologi.com +47866 + Deniz Sezer + Deniz Sezer + deniz.sezer1&gmail.com +47867 + nadansys + Byun Jung In + jibyun&ndsys.co.kr +47868 + Luigi D'Ambrosio + Luigi D'Ambrosio + luigi.dambrosio&gmail.com +47869 + Nykvist Skogs AB + Lina Edin + lina&nykvist-skogs.se +47870 + thovel Thomas Velthoven + Thomas Velthoven + thomas&velthoven.no +47871 + RadioFrequencyExpert srl + Cesare Paganino + c.paganino&radiofrequencyexpert.com +47872 + BANK-now AG + Matt, Rainer + rainer.matt&bank-now.ch +47873 + SURIX SRL + Sergio Starkloff + starkloff&surix.net +47874 + County of Sacramento + Adam C. Huyck + huycka&saccounty.net +47875 + Solebit Labs + Boris Vaynberg + boris&solebitlabs.com +47876 + Shanghai Real Communication Technology Co., Ltd. + gongchengxin + gongcx&realcom.com.cn +47877 + LÖWEN Entertainment GmbH + IT&Organisation + admin&loewen-gruppe.de +47878 + pretty Easy privacy foundation + Volker Birk + council&pep.foundation +47879 + Skogsägarna Norrskog + Rikard Lundström + rikard.lundstrom&norrskog.se +47880 + Wildom Ltd. + Gabor Kovacs + kovacsg&wildom.com +47881 + B.P. Konstantinov Petersburg Nuclear Physics Institute + IT Department + software&pnpi.nrcki.ru +47882 + TechAssist + Brad Beckett + bradbeckett&gmail.com +47883 + sst-net.de + Sven Stitzelberger + iana.dienste.sst&sst-net.de +47884 + ISIMA + Guillaume Avez + guillaume.avez&isima.fr +47885 + SWDC RTSoft, OOO + Timur Frolov + admin&dev.rtsoft.ru +47886 + Intelligent Automation, Inc. + Geoffrey Bernstein + itdept&i-a-i.com +47887 + Keramikmanufaktur Bruckner eG + Juergen BRUCKNER + info&keramik-bruckner.eu +47888 + Freifunk - Verein zur Förderung des freien Internets + Juergen Bruckner + juergen&freifunk.club +47889 + Traffic and Parking Control Co., Inc. + Chris McLean + cmclean&tapconet.com +47890 + QSight Ltd + Daniel Price + qsightnz&gmail.com +47891 + Sea Tel + Nedko Vassilev + nedko.vassilev&cobham.com +47892 + Cosmic Engineering Inc. + Tetsuo Saito + saito_tetsuo&cosmic-eng.co.jp +47893 + NEW SUNRISE CO.,LTD. + zhu jian + zhujian778&163.com +47894 + phoenixcompany + jianbo,tian + 291205728&qq.com +47895 + 杭州云霁科技有限公司 (Hangzhou Yun Ji Technology Co., Ltd.) + James Gao + gaowenfei&idcos.com +47896 + System Infra Solutions Pvt. Ltd. + Dushyant Kumar + dushyant&sysinfra.in +47897 + SmartGen + zhangshuangyang + zsysmartgen&126.com +47898 + Centrul de Calcul S.A. + Adelin Cusman + office&certdigital.ro +47899 + École Supérieure des Arts Saint-Luc Liège + Jean-Michel CAFAGNA + cafagna.jean-michel&saint-luc.be +47900 + Södra Skogsägarna Ekonomisk Förening + Örjan Vorrei + orjan.vorrei&sodra.com +47901 + Hypixel, Inc + Bruce Blair Jr + agentk&hypixel.net +47902 + Ralphie T + Ralph Thompson + ralphiet78&gmail.com +47903 + RAC Motoring Services + Ralph Thompson + ralph.thompson&getronics.com +47904 + Ricola AG + Christian Jud + christian.jud&ricola.com +47905 + OBS/OCB Cloudwatt + FERAUDET Cyril + ops.cloudwatt&orange.com +47906 + ninjap + francois casse + fchr&free.fr +47907 + HAMBURG WASSER + Katrin Morhöfer + katrin.morhoefer&hamburgwasser.de +47908 + Consulting Company Informatica SC Ltda + Marcos Hideyo Sibuya + m.sibuya&consultingcompany.com.br +47909 + Bossa Nova Robotics + Joe Hosteny + joe&bnrobotics.com +47910 + Orbitel + Evgeney Kirik + orbitel45&gmail.com +47911 + TOC S.A. + Ricardo Navarro + rn&toc.cl +47912 + Radio Rentals Ltd + Brad Howson + rrgitregistrations&radiorentals.com.au +47913 + uczen + Hyungsuk Choi + hschoi&uczen.co.kr +47914 + geoint.org + Steven Siebert + steve&geoint.org +47915 + CAAGIS + SVP Caagis + supervision&ca-caagis.fr +47916 + ioTRAN Corp. + Michael Banschbach + mwb&iotran.com +47917 + Urban Search & Rescue Austria (U.S.A.R. Austria) + Juergen Bruckner + info&usar.at +47918 + Lauren Pullen + Lauren Pullen + drurowin&gmail.com +47919 + dsi + Md. Habibur Rahman + habib.rahman&dsinnovators.com +47920 + PropertyGuru Pte. Ltd. + Kostiantyn Lysenko + engineering&propertyguru.com.sg +47921 + One Nine One Computer + Olivier Eigensatz + nic&191.ch +47922 + NetPilot Internet Security Ltd. + Robert Hawkins + rhawkins&netpilot.com +47923 + Sentia B.V. + Dennis van Zuijlekom + dennis.van.zuijlekom&sentia.com +47924 + mergedK GmbH + Conrado Seibel + conrado.seibel&mergedk.com +47925 + Cima S.p.A. + Fabio Abbottoni + fabio.abbottoni&cimaspa.it +47926 + Mitchell Farrar Holdings LTD + Daniel Bairstow + externalitsupport&mfgroup.co.uk +47927 + Tenet Healthcare + Mario Olbera + mario.olbera&tenethealth.com +47928 + Fortifydata + Sushil jain + sushil.jain&fortifydata.com +47929 + Edwards Lifesciences LLC + Ben Monrad + ben_monrad&edwards.com +47930 + Mansoft + H.F. Manson + hfmanson&mansoft.nl +47931 + ExteNet Systems, Inc. + Keyur Brahmbhatt + kbrahmbhatt&extenetsystems.com +47932 + Andrino + Martin Andrino + martin&andrino.eu +47933 + Terma A/S + Mads Ulrik Kristoffersen + muk&terma.com +47934 + World Privacy and Identity Association (WPIA) + Juergen Bruckner + info&wpia.club +47935 + tiri GmbH + Gerald Fehringer + gerald.fehringer&tiri.li +47936 + HansBeerman B.V. + Hans Beerman + HansBeerman-BV&xs4all.nl +47937 + Ophir Energy PLC + Damien Derby + damien.derby&ophir-energy.com +47938 + Alloy Software, Inc. + Ivan Samoylov + ivans&alloy-software.com +47939 + Audio Export GmbH + Jürgen Schenk + j.schenk&audioexport.de +47940 + clAIRvision Corporation + Jun Ogawa + enmng&clairvision.co.jp +47941 + Mendix B.V. + Hans van Kranenburg + iana&mendix.net +47942 + Tachyons .NET + Joel Studtmann + joel&tachyons.net +47943 + Oesterreichischer Mantrailing Verband - Mantrailing Austria + Doris Bruckner + info&mantrailing-austria.org +47944 + Center for International Private Enterprise + Christopher Bolcik + cbolcik&cipe.org +47945 + Fukoku Matual Life Insurance Company + Ryouichi Mukai + ryouichi.mukai&fi.fukoku-life.co.jp +47946 + OOO "Eko Paper" + Sergey V. Kurochkin + serge&volga-paper.ru +47947 + Hoermann GmbH + Matthias Müllner + info&hoermann-gmbh.de +47948 + Systemtechnik LEBER GmbH & Co. KG + Stefan Angele + info&leber-ingenieure.de +47949 + SYMACORP + Cedric LEFIEF + dtechnique&symamobile.com +47950 + Balkantel Ltd. + Georgi Georgiev + ggeorgiev&balkantel.net +47951 + Foundation for Trusted Identity + Sam Dibrell + sam&strac.org +47952 + NETIO products a.s. + Jan Rehak + info&netio.eu +47953 + D11 + Peter Ahlgren + peter.ahlgren&kalundborg.dk +47954 + Punkt Creative LLC + Ethan Brooks + punktcreative&gmail.com +47955 + Max Planck Institute for the Physics of Complex Systems + Thomas Mueller + mueller&pks.mpg.de +47956 + KAV Danubia + Sebastian Ecker + calimero&danubia.wien +47957 + Equiis Technologies Inc. + Valdemar Tadeu Mendonca + tmendonca&equiis.com +47958 + Cloud Cantábrico Siglo XXI, S.L.U + Óscar Flor Lozano + oscar.flor&cc3m.com +47959 + Snapchat, Inc. + Eric Mak + eric.mak&snapchat.com +47960 + Hartmann GmbH + Lutz Polosek + lutz.polosek&hartmann-gmbh.eu +47961 + Gerhard D. Wempe KG - Division Chronometerwerke + Christian Blanck + chrono&wempe.de +47962 + Ezam Automotive Parts + Ali Karimi + Karimi&ezamco.com +47963 + Schwyzer Kantonalbank + Martin Annen + netadmin&szkb.ch +47964 + Landkreis Oberhavel + Uwe Volwarski + it&oberhavel.de +47965 + Dedicated Computing LLC + Greg Krimmer + administrator&dedicatedcomputing.com +47966 + NooBaa + Yuval Dimnik + yuval.dimnik&noobaa.com +47967 + IT-Consulting Mario Bergmann + Mario Bergmann + mb&mabecon.de +47968 + KOMSET-servis LLC + Ilya Orlenko + support&komset.ru +47969 + Mills College + Tobin Lee + its-admins&mills.edu +47970 + Neulinger Consulting + Nathan Neulinger + nneul&neulinger.org +47971 + Martin Sprocket and Gear, Inc. + Network Admin + netadmin&martinsprocket.com +47972 + Sam Yaple + Sam Yaple + sam&yaple.net +47973 + Servosity Inc + Sam Yaple + syaple&servosity.com +47974 + WANSecurity, Inc. + Robert Smith + iana.org&wansecurity.com +47975 + kezhi-controls + gen.xu + gen.xu&kezhi-controls.com +47976 + gematik Gesellschaft für Telematikanwendungen der Gesundheitskarte mbH + Ingo Bahn + ingo.bahn&gematik.de +47977 + Tivaci Corporation + Edward Kwa + ed.k&tivacicorp.com +47978 + ALPEIN Software SWISS AG + Sergey Gamov + devteam&alpeinsoft.ch +47979 + BG Klinikum Hamburg gGmbH + A.-R. Schmidt + edv&bgk-hamburg.de +47980 + Jernbaneverket + Herman Seip + enterprise_oid_snmp&jbv.no +47981 + Prowise B.V. + Paul de Laat + systeembeheer&prowise.com +47982 + 6PM PLC + David Vassallo + david.vassallo&6pmplc.com +47983 + Hanseatic Bank + Stephan Jacobsen + Stephan.jacobsen&hanseaticbank.de +47984 + Atos IT Solutions and Services AG + Sebastian Manthey + sebastian.manthey&atos.net +47985 + IDnomic + Ahmadou DEM + securite-audit&idnomic.com +47986 + Green Communications + Khaldoun Al Agha + admin&green-communications.fr +47987 + Eraldo Gandini + Eraldo Gandini + io&eraldogandini.it +47988 + PDS s.r.o. + Lukas Plachy + lukas.plachy&pds.eu +47989 + Software Workers srl + Riccardo Scartozzi + riccardo.scartozzi&softwareworkers.it +47990 + Juergen M. Bruckner + Juergen M. Bruckner + info&bruckner.xyz +47991 + shadowhunt + Alexander Dreweke + iana&shadowhunt.de +47992 + Gettysburg Cancer Center + Bansari Mandalia + bamandalia&gettysburgoncology.com +47993 + Brazen Technologies + Jason Southern + jason&brazen.com +47994 + bba solutions + Darrick Buralli + dburalli&bbasolutions.com +47995 + American University + Hosein Nahidian + nahidian&american.edu +47996 + DataBoost LLC + Bradley Giesbrecht + iana&databoost.com +47997 + Securilytics, LLC + Alex Senkevitch + tech-poc&securilytics.com +47998 + DASSAULT FALCON SERVICE + Nicolas SAINT-VAL + nicolas.saint-val&dassault-falcon.com +47999 + Port of Portland + IT Technical Services - IANA Admin + iana&portofportland.com +48000 + Steelwedge Software + Chris Callison + ccallison&steelwedge.com +48001 + OOOGLEEE, INC. + JAYANTA KACHARI + jayantasumitra2011&gmail.com +48002 + schoeller network control Datenverarbeitung GmbH + Wilhelm Pichler + wilhelm.pichler&schoeller.at +48003 + Fast S.p.A. + Lorenzo Rompianesi + l.rompianesi&fastautomation.it +48004 + Digifort - IP Surveillance System + Francisco Luiz Zanini + francisco&digifort.com.br +48005 + ZyCast Technology Inc. + Shelly Chang + shelly_chang&zycast.com.tw +48006 + Nandex, Inc + Vladmir Komarov + vladimir.komarov&nandex.org +48007 + Netskope + Nauman Tahir + Nauman&netskope.com +48008 + BfG Eigentümer/-innen- und Verwaltungsgenossenschaft eG + Philipp Tod + it-support&mitgruenden.at +48009 + AÇÃO EDUCACIONAL CLARETIANA + Danilo da Silva + danilodasilva&claretiano.edu.br +48010 + Howard & Howard + Darren Ginter + dg&h2law.com +48011 + SparkStone Ltd + Michael Keeley + admin&sparkstone.co.nz +48012 + COWE Co., Ltd. + Kenick Kim + khkim&cowe.co.kr +48013 + Strauss and Strauss Consultants (Pty) Ltd. + Heinrich Strauss + hostmaster&strauss.company +48014 + SCAI Connect + Francesco Pinta + francesco.pinta&scaiconnect.it +48015 + IT Gården i Landskrona AB + Magnus Nilsson + magnus.nilsson&itgarden.se +48016 + MELASZ + Janos Almasi + elnokseg&melasz.hu +48017 + Syamsul Mobile + Syamsul Maarief + syyull&gmail.com +48018 + Today Technology Co., Ltd. + jinmingfeng + jinmf&todaysec.com +48019 + InMobi Pte Ltd + Masthanaiah Cheekavolu + infra-ops&inmobi.com +48020 + Shanghai Chenrui Communication Technology Company + Xuhua Zhou + anthony570&163.com +48021 + THE ROBERTO GIORI COMPANY LTD + Roberto Giori + info&gsmt.ch +48022 + Norges Geotekniske Institutt + Arne Digernes + arne.digernes&ngi.no +48023 + Latvian Institute of Organic Synthesis + Andris Diss + andris&osi.lv +48024 + Paradise Network Enterprises + David Robb + ender¶dise.gen.nz +48025 + Lyonel Serradura + Lyonel Serradura + lserradura&free.fr +48026 + Oostec BV + L. Meinders + leo.meinders&oostec.nl +48027 + PKB RIO Ltd + Konstantin Alekseenkov + AlekseenkovK&pkb-rio.com +48028 + VS Web Labs + VANGALA SURESH + 640067&gmail.com +48029 + High-Galaxy,HGANS + Zhuoyao Wang + wang.zy&high-Galaxy.com +48030 + Pesticide Software + Steven Stanton + sstanton&pesticidesoftware.com +48031 + GLOBAL 2000 + Stefan Hirschhofer + stefan.hirschhofer&global2000.at +48032 + Västra Värmland och Dals skogsägareförening + Anders Alm + anders.alm&vvds.se +48033 + Aktiebolaget Hilmer Andersson + Erland Erlandsson + erland.erlandsson&hilmer.se +48034 + Bukalapak + Rizal Muhammad Nur + rizal&bukalapak.com +48035 + Copperchase Limited + Tony Myers + sales&copperchase.co.uk +48036 + ALHUE-TEC LTDA + Sergio Uribe + sergioaquiles.uribe&gmail.com +48037 + SHENZHEN UTEPO TECH CO., LTD + timwang + wangxianghua&utepo.com +48038 + ONF ENERGIE + Aymeric ALBERT + aymeric.albert&onf.fr +48039 + PIRIOS S.A. + Konrad Pieniawski + konrad.pieniawski&pirios.com +48040 + Radium s.r.o + Milan Cecrdle + cecrdle&radium.cz +48041 + Technica Del Arte BV + Joost Bloemen + joost&technicadelarte.com +48042 + Geomant + Akos Vecsei + info&geomant.com +48043 + DENSO International America + Douglass Coombs + douglass_coombs&denso-diam.com +48044 + Tri-Valley Oncology + Yvette Hamilton + yhamilton&tvoha.com +48045 + Inpro Telecom S.A. de C.V. + Enrique RIvera Florentino + erivera&inprotelecom.com +48046 + Arcadis NV + Flint Barber + flint.barber&arcadis.com +48047 + DDESK LLC + Andre Meyer Pflug + andre&ddeskllc.com +48048 + Levene Såg AB + Björn Broberg + broberg&levene-sag.se +48049 + NEW Service GmbH + Clemens Blank + clemens.blank&new.de +48050 + International Personal Finance + Steven Carney + hcdarchitects&ipfin.co.uk +48051 + Sato America + Zayar Tun + zayar.tun&sato-global.com +48052 + Milpowerinc + Brooks Dorroh + bdorroh&milpowerinc.com +48053 + Mazatal Hotel and Casino + Scott Smith + scotts&777play.com +48054 + SunGard Data Systems + Mark Kaczorowski + mark.kaczorowski&fisglobal.com +48055 + sendhybrid GmbH + Peter Danner + peter.danner&sendhybrid.com +48056 + Jeremy Gibbons + Jeremy Gibbons + jeremy.gibbons&laposte.net +48057 + Kronos Technologies + Nicolas Vanheuverzwijn + nvanheuverzwijn&kronostechnologies.com +48058 + SouthEast Texas Regional Advisory Council + Jeremiah Williamson + jeremiah.williamson&setrac.org +48059 + Karlsruhe Institute of Technology (KIT) + Patrick von der Hagen + patrick.hagen&kit.edu +48060 + SecureMe2 + Hans van Beek + hans&byte.expert +48061 + Byte Expert B.V. + Hans van Beek + hans&byte.expert +48062 + Territorial Generation Company 14 + Anatoly Loskutnikov + anatlosk&gmail.com +48063 + Anna Jaques Cancer Center + Laura Rossi + lrossi&ajh.org +48064 + Grupo MSA S.A. + IT Services department + webmaster&msa.com.ar +48065 + Talentnet Corporation + IT Department + it&talentnet.vn +48066 + Linkforce Engineering + Todd Bayley + IT&linkforce.com.au +48067 + UnifyID, Inc. + John Whaley + john&unify.id +48068 + CSE Transtel + Diego Abas + diego.abas&cse-transtelsg.com +48069 + Demmich IT Service + Torsten Demmich + mail&torstendemmich.de +48070 + Patrick Lesky + Patrick Lesky + mail&patricklesky.at +48071 + Retarus GmbH + Florian Kretzschmar + enterprise.it&retarus.de +48072 + Optanix + Kelly Ronan + timothy.boronczyk&optanix.com +48073 + Pentest Limited + Lasantha Priyankara + lasantha.priyankara&pentest.co.uk +48074 + Cube-Tec International GmbH + Robert Meyer + technik&cube-tec.com +48075 + Tennessee Plateau Oncology + Linda Barnwell + lbarnwell&volfirst.net +48076 + NexGen Inc + Mohsen Mashayekhi + admin&arianrp.com +48077 + State University of Feira de Santana + Delmar Broglio Carvalho + aei&uefs.br +48078 + Ventura County Hematology Oncology Specialists + Marissa Rivera + mrivera&venturaoncology.com +48079 + NEC Enterprise Communication Technologies + Amit V Raut + amit.raut&necect.com +48080 + Mimetrix Design Group + Elliott Starin + elliott.starin&mimetrix.com +48081 + Max Planck Institute for Plant Breeding Research + Andreas Hoffmann + iana&mpipz.mpg.de +48082 + Stromnetz Hamburg GmbH + Matthias Brasch + Matthias.Brasch&stromnetz-hamburg.de +48083 + AIRESERVICES + Bruno LE TUAL + technique&aireservices.com +48084 + Stewart Investors + Peter Lyttle + peter.lyttle&firststate.co.uk +48085 + RCCA COMMUNITY HEMATOLOGY ONOCOLGY + BETH SHAW + BSHAW&CHOP-MD.COM +48086 + Helicon Opleidingen + Alex Peeters + aut.bb&helicon.nl +48087 + Troup County School System + Gary Stansbury + admin&troup.org +48088 + Instituto Superior de Economia e Gestão + Raul Bras + rbras&iseg.ulisboa.pt +48089 + Rådgivende Ingeniør Per Dypvik AS + Per Dypvik + per&energea.no +48090 + Nord/LB Luxembourg S.A. Covered Bond Bank + Juergen Koehnen + juergen.koehnen&nordlb.lu +48091 + Sicss Society + Haochen Xie + haochenx&acm.org +48092 + qi2 + Laurent Cambou + qi2.info&free.fr +48093 + Aptomar AS + Morten Espeland + morten.espeland&aptomar.com +48094 + White Star Petroleum + Joshua Shackeford + Joshua.Shackelford&wstr.com +48095 + Stadler Pankow GmbH + André Orpel + Andre.Orpel&stadlerrail.de +48096 + jedernet GmbH + Pete Inhofer + ldap&jedernet.de +48097 + RAYAPHONE + Mahdi Mohammad Hossaini + info&rayaphone.com +48098 + EBPI BV + Martijn van Buijtene + martijn.van.buijtene&ebpi.nl +48099 + Redgate Software + Ben Emmett + bsprocurement&red-gate.com +48100 + Orca Technologies + Gary Geil + sales&orcatechnologies.com +48101 + Systech International + Robert Phillips + bob.phillips&systechone.com +48102 + DARTY + Jonas BARROZO + jonas.barrozo&darty.fr +48103 + Executive Weather Corporation + Amy M. Fincher + amy&executiveweather.com +48104 + example.cz + Alice Vixie + example&email.cz +48105 + Sky Puzzle Ltd. + Zdenko Marincic + info&sky-puzzle.com +48106 + Skami Programming + Karn Kallio + kkallio&skami.org +48107 + Equidome + Turpin Olivier + turpin_olivier&orange.fr +48108 + 9DOT + Alessandro Campanella + a.campanella&9dot.it +48109 + WishCert + Hyunjae Yoo + y9769852&gmail.com +48110 + Universite de Rouen Normandie + Cedric HOUSSIER + cedric.houssier&univ-rouen.fr +48111 + AMD Distribution sp.z o.o. + Marcin Cichocki + m.cichocki&amdd.pl +48112 + AZCOM Technology s.r.l. + Stefano Gandini + stefano.gandini&azcom.it +48113 + Rabian Inc. + Adrien Rabian + admin&rabian.fr +48114 + Solutionbase Ltd + Kevin Golding + kgolding&solutionbase.co.uk +48115 + Gallagher Group Limited + Eric Light + ISInfrastructure&gallagher.co.nz +48116 + VšĮ Kauno Šilainių poliklinika + Darius Grigarevičius + d.grigarevicius&silainiupoliklinika.lt +48117 + Scheidt & Bachmann GmbH + Julian Dickreiter + dickreiter.julian&scheidt-bachmann.de +48118 + Pyro Telecom Solutions Pvt Ltd + Yugandhar Dussani + yugandhar.dussani&pyrogroup.com +48119 + Scangaule + Christophe Hellman + info&scangaule.com +48120 + Digipolis CVBA + Cor Michels + cor.michels&digipolis.be +48121 + njhd + liu bin + hd&njhd.com.cn +48122 + Millennium Physician Group + Ryan Williams + ryan.williams&mpgus.com +48123 + Paradise Valley Community College + itpvc + itpvc¶disevalley.edu +48124 + MagicMonster Limited + Jurn Ho + jurn&magicmonster.com +48125 + Exodus Intelligence + Peter Vreugdenhil + pen&exodusintel.com +48126 + Energostat Co., Ltd + Andrei Polizharov + support&energostat.ru +48127 + salesforce.com, inc. + PEN + pen&salesforce.com +48128 + Distributed Data Systems Ltd. + Alex Kuzmuk + alex&kuzmuk.com +48129 + simplicityEngine Inc. + David Kristensen + david&simplicityengine.com +48130 + Batscan / PL Trading AB + Göran Krook + gk&batscan.se +48131 + "DEP Company" Ltd. + Grishin Alexey + grishin&dep.ru +48132 + Sparebanken Vest + Anders Lunde + anders.lunde&spv.no +48133 + McCullough-Solutions.NET + Andrew McCullough + andrew&mccullough-solutions.com +48134 + 8438757 Canada Inc D.B.A. Multifactor.net + Kate Gray + kgray&multifactor.net +48135 + MOPIENS, Inc. + LEE, Kwangweon + leekw&mopiens.com +48136 + GitLab + John Northrup + john&gitlab.com +48137 + Billi Pty Ltd + Adam Marriott + adam.marriott&billi.com.au +48138 + BillerudKorsnäs Skog & Industri AB + Lise-Lotte Swing + lise-lotte.swing&billerudkorsnas.com +48139 + Krogle + Jean-Francois Ageneau + jf.ageneau&gmail.com +48140 + mBank Hipoteczny SA + Dawid Smoliński + seb&mhipoteczny.pl +48141 + first frame networkers ag + IT Architecture Admin + hostmaster&firstframe.net +48142 + Digiton Systems LLC + Aleksandr Sirotkin + support&digiton.ru +48143 + Aurum Europe B.V. + Twan van der Schoot + twan.van.der.schoot&aurumeurope.com +48144 + Synermed Soluciones Medicas + Claudio Baeza + cl.baeza&gmail.com +48145 + SNS Bank NV + Mike Noonan + mike.noonan&sns.nl +48146 + FlyCAA + Didier MBONEKUBE + didier.mbonekube&caacongo.com +48147 + The Warehouse Group Limited + Anthony Gorecki + anthony.gorecki&thewarehouse.co.nz +48148 + Taos + Christopher Crabtree + ccrabtree&taos.com +48149 + CybLab Ltd + Stefano Sola + support&cyblab-ltd.com +48150 + simus systems GmbH + Harald Kunze + kunze&simus-systems.com +48151 + Laryio + Paul Arnold + sysadm&laryio.com +48152 + WishCert Co. + Hyunjae Yoo + y9769852&gmail.com +48153 + xgp.ch + G. P. + iana-pen&xgp.ch +48154 + XMW + Woo Ram Jeong + wrjeong&xmwinc.com +48155 + Wuhan FiberHome International Technologies Co., LTD + ZhangZheng + zhangzheng2012&fiberhome.com +48156 + Hypertech Solutions Pty Ltd + Hamish Graham + hamish&hypertechsolutions.com.au +48157 + CIBRED SUD s.r.l. + Carmelo Russo + engineering&cibred.com +48158 + cedric ltd + cedric luneau + cedricluneau&gmail.com +48159 + NHS Property Services Ltd + Gareth Tandridge + gareth.tandridge&property.nhs.uk +48160 + Stichting Regionale Radio Noord + John Melet + helpdesk&rtvnoord.nl +48161 + TUI AG + Ian Macro + Directory&tui.com +48162 + Torchmark Corporation + Bradley Marrs + brmarrs&torchmarkcorp.com +48163 + abahlali + Adrian Riaan + adrianriaan458&gmail.com +48164 + my-PV GmbH + Dr. Gerhard Rimpler + gerhard.rimpler&my-pv.com +48165 + 1stPoint Communications, LLC + Erik Levitt + elevitt&1pcom.com +48166 + Bayerische Staatsbibliothek + Gregor Horstkemper + horstkemper&bsb-muenchen.de +48167 + Dr. Mohtaseb Cancer Center and Blood Disorders + Brandie Carrington + brandie&azcancerandblood.com +48168 + SCPTime + Xavier Bestel + xavier.bestel&gorgy-timing.fr +48169 + Bumps to Babes + Jack Jiang + jiang1095&hotmail.com +48170 + Avaya Atlanta Lab + Shane Artman + artman&avaya.com +48171 + Health Catalyst + Nate Arnold + administrator&healthcatalyst.com +48172 + Speed Roam PTE LTD + Deepthi Duggirala + deepthi&speedroam.com +48173 + Careum AG Bildungszentrum für Gesundheitsberufe + Oliver von Wartburg + oliver.vonwartburg&careum.ch +48174 + Italian Pastafarian Church + Salvo Enrico + info&chiesapastafarianaitaliana.it +48175 + Astronix Data Security Solutions + Antonio N. Saygbe + astronixt&gmail.com +48176 + InnoEye LLC + Rajeev Gupta + rajeev&innoeye.com +48177 + ZIGGO + Jutta Galatenko + jutta.galatenko&office.ziggo.nl +48178 + Contargo GmbH & Co. KG + Henrik Hanke + hhanke&contargo.net +48179 + Andreas Heigl + Andreas Heigl + iana&heigl.org +48180 + Nial + Andreas Påhlsson + andreas&pahlsson.info +48181 + pSenso sp. z o.o. + Mariusz Żelażewski + mariusz.zelazewski&psenso.com +48182 + Terberg Group B.V. + Rene van Stipriaan + stipriaan&terberg.nl +48183 + Phegda Technology Co.,Ltd. + Tao Jin + brody_tao&phegda.com +48184 + Nanning OMARA Tech .inc + gecheng wang + bitgothic&163.com +48185 + NeuLion, LLC + Frank Schoenberger + frank.schoenberger&neulion.com +48186 + Innere Mission München e.V + Gerwin Miller + gmiller&im-muenchen.de +48187 + Nanjing Balance Network technology Co., Ltd + Chen Shen + shenchen&balance-net.com +48188 + Delta Solutions LLC + Ilya Smirnov + i.smirnov&deltasolutions.ru +48189 + SPRUT Technology Ltd + Dmitriy Bukeev + dbuk&sprut.ru +48190 + mcccxxxvii.net + Marcel Meyer + iana-pen&mcccxxxvii.net +48191 + Praten + Andrew Okri + andrew.okri&praten.co.uk +48192 + SEPPmail AG + Stefan Klein + klein&seppmail.ch +48193 + University of Pittsburgh Medical Center + Ralph T. Moffat + moffatrt&upmc.edu +48194 + Alltec GmbH + Mathis Graw + mgraw&alltec-laser.com +48195 + SISDEF Ltda. + Alejandro Yachan + ayachan&sisdef.cl +48196 + Horvoje.net + Hrvoje Lončar + horvoje&gmail.com +48197 + TECNIARK S.A. + JUAN JOSE SANCHEZ + jsanchez&tecniark.com.ar +48198 + cryptofreek.org + Cole Barnes + colebarnes&gmail.com +48199 + mVISE AG + Thomas Lewandowski + iana-requests&mvise.de +48200 + LumenVox + Nigel Quinnin + NigelQuinnin&LumenVox.com +48201 + CyberHound + Bernd Jerzyna + bernd.jerzyna&cyberhound.com +48202 + Vekomy Technologies + Sathyanarayanan Srinivasan + sathya.narayanan.s&vekomy.com +48203 + Obstreperus + Sathyanarayanan Srinivasan + sathya.narayanan.s&obstreperus.com +48204 + Eisenmann SE + Jörg Eddiks + joerg.eddiks&eisenmann.com +48205 + COLO21 AG + Admin + admin&colo21.com +48206 + SRC Incorporated + Sal Yurman + syurman&srcinc.com +48207 + University of Hull + Shaun Miller + shaun.miller&hull.ac.uk +48208 + Axilspot Communication CO.,Ltd + Huangyao + daisy.qiu&axilspot.com +48209 + AB Karl Hedin Sågverk Råvara + Erik Sundström + erik.sundstrom&abkarlhedin.se +48210 + AB Karl Hedin Sågverk Biobränsle + Erik Sundström + erik.sundstrom&abkarlhedin.se +48211 + Derome Skog AB + Helena Andersson + helena.andersson&derome.se +48212 + Frödinge Skog AB + Joakim Rosén + joakim&frodingeskog.se +48213 + COC AG + Lars Schmidt + hostmaster&coc-ag.de +48214 + Master Class Corp + Nicolay Oladov + it&mk-profy.ru +48215 + American Heart Association + Corey Clements + corey.clements&heart.org +48216 + RADIANTECH, INC. + Pin-Lin, Huang + pinlinhuang&radiantech.com.tw +48217 + codemanufaktur GmbH + Vit Matousek + sale&codemanufaktur.com +48218 + Government of Manitoba + Faren Chadney + faren.chadney&gov.mb.ca +48219 + Jackson Hewitt Tax Service Inc. + Adam Murphy + adam.murphy&jtax.com +48220 + TPAX.EU UG (haftungsbeschraenkt) + Jens Trach + jens&tpax.eu +48221 + Flopsar Technology + Dariusz Sendkowski + dsendkowski&flopsar.com +48222 + Micro 100 Tool Corp + Brett Hill + technologyµ100.com +48223 + Rauland-Borg Corporation + Thomas Fuller + thomas.fuller&rauland.com +48224 + QIT Systeme GmbH & Co. KG + Stefan Baur + stefan.baur&qit-systeme.de +48225 + Summonte + Walter Summonte + walter&summonte.com +48226 + Sandåsa Timber AB + Tina Gyllengahm + tina.gyllengahm&sandasa.se +48227 + AB Tutor + Phil Hallows + phallows&globemicro.com +48228 + LLC "Tecon MT" + Artem Dolgikh + dolgih&tecon.ru +48229 + Platina Systems, Inc. + Jason Pang + jason&platinasystems.com +48230 + Montefiore Medicine + John Wang + jowang&montefiore.org +48231 + Alvernia University + Richard Reitenauer + richard.reitenauer&alvernia.edu +48232 + Platinum Home Mortgage Corporation + Network Administrator + netadmin&phmc.com +48233 + IceMobile Agency BV + Rutger van Bergen + rutger&icemobile.com +48234 + Netronics Technologies Inc. + Claire Winfield + clairew&netronics-networks.com +48235 + LinkXess GmbH + Christian Marhoff + c.marhoff&linkxess.de +48236 + Zschimmer & Schwarz GmbH & Co KG + Ingo Junker + i.junker&zschimmer-schwarz.com +48237 + SOFTEL Corp. + Igor Kovalev + ikov&softel.ru +48238 + Optima Tours GmbH + Silvio Šverko + silvio&optimatours.de +48239 + Non-public JSC "Krasnaya polyana" + Evgeniy Belokon + it&karousel.ru +48240 + senselan gmbh + Rafael Studer + office&senselan.ch +48241 + Neptune and Company, Inc. + Seth Sanchez + ssanchez&neptuneinc.org +48242 + Serban Nistor + Serban Nistor + serban.nistor&gmail.com +48243 + Mindit Services + Serban Nistor + serban.nistor&mindit.ro +48244 + Quantic Vision, S.A. + Luis Bethancourt + luis.bethancourt&quanticvision.com +48245 + Atlas Air + timothy cochrane + timothy.cochrane&atlasair.com +48246 + Mission Microwave Technologies, Inc + Chad Deckman + chad&missionmicrowave.com +48247 + CENTRAL PARK HEMATOLOGY & ONCOLOGY, P.C. + David Halvorsen + david&cpho.com +48248 + innoTel Pty Ltd + Andrew Sims + info&innotel.com.au +48249 + Rohe Automation + Georgi Pangev + office&rohe-auto.com +48250 + 中国广东省深圳市核达中远通电源技术有限公司 (Shenzhen City, Guangdong Province, China COSCO through Nuclear Power Technology Co., Ltd.) + 谢先生 (xiejiangbo) + xiejiangbo&vapel.com +48251 + Liskl Networks, Inc + Loren Lisk + loren.lisk&liskl.com +48252 + Ecole CentraleSupelec + Renaud MONNET + Renaud.Monnet&CentraleSupelec.fr +48253 + OpusV + Mark Taylor + mark&opusv.com.au +48254 + Shaanxi XinTong Intelligent Technology Co., Ltd. + zhaoyu jia + jiazy&xaxintong.com +48255 + DiaSys Diagnostic Systems GmbH + Alexander Schwarz + a.schwarz&diasys.de +48256 + NISHANT.BIZ + NISHANT KUMAR + NISHANT.ORG&GMAIL.COM +48257 + Comoretel + Eric Lackore + elackore&comoretel.com +48258 + SourceClear + Pete Kocks + operations&srcclr.com +48259 + R+V Allgemeine Versicherung AG + Axel Panten + axel.panten&ruv.de +48260 + Guangzhou iplook network technologies Co.,Ltd. + Yong Liang + liangyong&iplooknetworks.com +48261 + Callison Networking Company + Christopher Callison + chris&callison.org +48262 + Tricentis GmbH + Heike Artner + h.artner&tricentis.com +48263 + Noble Markets, LLC + Jason Frisvold + jason&noblemarkets.net +48264 + Mairie de Saint-Maur-des-Fossés + Julien Huon + admin&mairie-saint-maur.com +48265 + Assembly Data System S.p.a. + Matteo Patrignanelli + matteo.patrignanelli&assembly.it +48266 + Mainmetall GmbH & Co. KG + Nils Berberich + nbe&mainmetall.de +48267 + Phexonite + Arsslen Idadi + arsslens021&gmail.com +48268 + c13 LLC + Arturo Dumas + dumas&c13.us +48269 + Info-Tech Research Group + PKI Admin + iana-pen&infotech.com +48270 + MiroNet AG + Mathias Seiler + noc&mironet.ch +48271 + AITelecom S.A. de C.V. + Fernando Rodriguez + frod&aitelecom.net +48272 + Examination Management Services, Inc. + Infrastructure Manager + infrastructure&emsinet.com +48273 + Computer Health + Michael Cameron + msp&chcorp.ca +48274 + Saxion University of Applied Sciences + Kick Molenveld + k.molenveld&saxion.nl +48275 + SingularIT Solutions + Jean-Marc Lavoie + pen&singularitsol.com +48276 + BNP Paribas Fortis SA (Belgian UEN: 0403.199.702) + Fréderic Huygens (Global Security) + frederic.huygens&bnpparibasfortis.com +48277 + Topway Network Engineering Co., Ltd. + Jianqing Tang + tjianqing&qq.com +48278 + Twowing Technologies + king zhu + king&twowing.com +48279 + Institut für Steuerungstechnik der Werkzeugmaschinen und Fertigungseinrichtungen, Universität Stuttgart + Felix Kretschmer + felix.kretschmer&isw.uni-stuttgart.de +48280 + R-Platforma LLC + Dmitriy Baturin + info&rosplatforma.ru +48281 + Khipu + Emilio Davis + emilio.davis&khipu.com +48282 + The Situs Companies + Ryan Young + Ryan.Young&situs.com +48283 + ZI-Argus + Nicholas Farrugia + Nicholas.Farrugia&zi-argus.com +48284 + in2ip BV + Dirk-Jan Wemmers + dirkjan&in2ip.nl +48285 + Mary Street Wellness + Oliver Frye + webmaster&marystreetwellness.com.au +48286 + NewsUK + Pankaj Pratap Singh + pankaj.singh&news.co.uk +48287 + 中电和瑞科技有限公司 (China Electronics Harvest Technology Co., Ltd.) + 林志伟 (Lin Zhiwei) + linzw&cechr.com.cn +48288 + Health-net 2020 GmbH + Arno Krzywon + krzywon&health-net.at +48289 + Connect2 Systems Limited + Steve Rutherford + steve&connect2.io +48290 + Óbuda University + Levente Németh + nemeth.levente&kvk.uni-obuda.hu +48291 + CTOUCH Europe B.V. + Rick van Dijk + pm&ctouch.eu +48292 + Michigan Education Association + Darryl Castillo + dcadmin&mea.org +48293 + Utica College + David Parker + network&utica.edu +48294 + Personal Genome Diagnostics, Inc. + Matthew Riedel + mriedel&personalgenome.com +48295 + UNess Smart Home and Living + Bas Sanders + bas.sanders&com1.nl +48296 + Montgomery Blair High School + Peter Hammond + Peter_A_Hammond&mcpsmd.org +48297 + Chillout + Maxim Tkachenko + tkacenko.maxim&gmail.com +48298 + Health Choice Management Company + Firas Matti + HCH_ISNetworkSecurity&iasishealthcare.com +48299 + Koninklijke Nederlandse Dambond + Martijn van der Klis + martijnvdklis&gmail.com +48300 + ComSource s.r.o. + Ales Lednej + ales.lednej&comsource.cz +48301 + NathanNet + Nat Lasseter + nathannet&4574.co.uk +48302 + Big Neptune + Matthew Walker + matthew&bigneptune.com +48303 + 0x0f + Simon Wachter + ops&0x0f.org +48304 + American Financial Group Inc + Charles Peters + afgit&amfin.com +48305 + APRA-OPTINET Sp. z o. o. + Jakub Kaczmarek + j.kaczmarek&apra-optinet.pl +48306 + Gentofte Kommune + Martin Arnoldi + arno&gentofte.dk +48307 + Continental Electronics + Joshua Moore + license&contelec.com +48308 + Argeo GmbH + Mathieu Baudier + mbaudier&argeo.org +48309 + Fuji IT Co.,Ltd. + SHIGERU SUDA + suda-shigeru&fujielectric.com +48310 + BLOBFISH E.I.R.L. + Jaime Hablutzel Egoavil + jaime&blobfish.pe +48311 + Northcloak Corporation + Juli Mallett + juli&northcloak.com +48312 + Avid Integration Technologies + Toby Napier + service&avidintegrationtechnologies.com +48313 + Not for Radio, LLC + Patrick Kelsey + pat.kelsey¬foradio.com +48314 + ESA Elektroschaltanlagen Grimma GmbH + Steffen Löb + pki&esa-grimma.de +48315 + Documenta S.A. + André L'Hereux + comunicaciones&documenta.com.py +48316 + Berliner Verkehrsbetriebe + Evelyn Thiel + evelyn.thiel&bvg.de +48317 + PedFast Technologies + John F. Tamburo + johntam&pedfast.com +48318 + Beijing NationSky Network Technology Co., Ltd + Weining Ma + weining.ma&nationsky.com +48319 + 911 Datamaster, Inc. + Jim Shepard + jim.shepard&motorolasolutions.com +48320 + Xhesi + Xhesi Galanxhi + xhesi.galanxhi&outlook.com +48321 + Metroswitch Technologies, Inc. + Chris Fogel + chris.fogel&metroswitch.net +48322 + Surfilter Network Technology Co.,Ltd + David Lee + lichangqing&1218.com.cn +48323 + Altaire Ltd + Nathan Baum + nathan&altaire.com +48324 + 2test + Mikhail Sokolov + m.sokolov&2test.ru +48325 + Tesco Europe + Pavel Vagner + wintel&cz.tesco-europe.com +48326 + Vänerbränsle AB + Carl-Axel Östensson + carl-axel.ostensson&moelven.se +48327 + Johnson Controls Inc. + Justin Watts + Justin.Watts&jci.com +48328 + Veritas Technologies LLC + Xiaowei Deng + clare.deng&veritas.com +48329 + OJSC "MINSK TRACTOR WORKS" + Dmitry Pogorelov + mtz_adm&belarus-tractor.com +48330 + Nationalmuseet + Bjarke Schaar + bjarke.schaar&natmus.dk +48331 + SKTB "SKiT" + Alexander Meshkov + meshkov.av&skitlab.ru +48332 + Callans Trä AB + Gustaf Callans + gustaf&callanstra.se +48333 + Koolsign Co., LTD + MoonSeok Cho + mscho&koolsign.net +48334 + Bank of the South + James Brammer + IANA&bankofthesouth.com +48335 + RealPage, Inc. + Sloan Ozanne + infosechelpdesk&realpage.com +48336 + Abrumet + Filoretta Velica + filoretta.velica&abrumet.be +48337 + TD Avidis LLC + Pereverzev Sergey + support&avidis.kiev.ua +48338 + Orion Power Systems + Robert Bridenbaugh + rebriden&orionpowersystems.com +48339 + Seldiame Software + Olaf Hernandez Beristain + olaf&seldiame.net +48340 + Public Sector Pension Investment Board + IT Infrastructure + dlgDistList-Infra-Certificates&investpsp.ca +48341 + DevLounge + Nicola Delle Foglie + nicola.dellefoglie&devlounge.it +48342 + Ransnet Singapore Pte Ltd + Ben Hau + benhau&ransnet.com +48343 + Industrial Bank of Kuwait + Mr. Mustafa Gangardiwala + g_mustafa&ibkuwt.com +48344 + SHW Automotive GmbH + Georg Erhard + georg.erhard&shw.de +48345 + IP-Lease BV + Frank Kuipers + frank.kuipers&ip-lease.com +48346 + Elan Audio + Rob Parker + Rob&elan.com.au +48347 + OHB SE + Domenic Abb + domenic.abb&ohb.de +48348 + Medcomsoft Ltd. + Sergey V. Sergeev + sergeev&echo1.ru +48349 + The Scale Factory Ltd + Jack Thomas + jack&scalefactory.com +48350 + Serco, Inc. + Paul Lizer + paul.lizer&serco-na.com +48351 + Beijing Coconet Corporation + tao zhang + hr&coconet.cn +48352 + Levantis AG + Patrik Hurni + lizenzen&levantis.ch +48353 + Neosecure S.A + Ricardo Perez + rperez&neosecure.com +48354 + Qiy Foundation + Bram Neuteboom + webmaster&qiyfoundation.org +48355 + General Motors + Ken Peirce + Ken.Peirce&gm.com +48356 + Salerno Data LLC + Steven Salerno + noc&salernodata.com +48357 + Skogsägarna Mellanskog Ek För + Ian von Essen + ian.vonessen&mellanskog.se +48358 + Hobbylook + Ahmad Tubaishat + Ahmadtubishat&live.com +48359 + Infosec (T) LTD + Joel Wilson + joel.wilson&infosec.co.tz +48360 + Sakura City + Satoshi Atsuta + jyosys&city.sakura.lg.jp +48361 + Kpnetworks Ltd. + Naonobu Yamamoto + info&kpnetworks.jp +48362 + IndraStra Global + Rahul Guhathakurta + info&indrastra.com +48363 + ResoNetz Airfolc Inc. + Yutaka Nakai + nakai&airfolc.co.jp +48364 + SNAL + Romuald Squarzoni + oid&albus.fr +48365 + QWERTY Concepts Inc + Jimmy Kaytovich + jimk&qwertyc.com +48366 + DriveScale, Inc. + Tom Lyon + pugs&drivescale.com +48367 + GOTrust Technology Inc. + Lawrence Lee + lawrencelee&go-trust.com +48368 + Beijing Zrinc Technologies Co., Ltd. + Xiuqin Yao + it&zrinc.cn +48369 + Falcon Technologies LTD + Boaz Dovev + boaz&edig.co.il +48370 + NMB PLC + Joel W. Kazoba + joel.kazoba&nmbtz.com +48371 + Long-Distance Communication Branch + Igor Zinchuk + izin68&gmail.com +48372 + MicroStep - HDO s.r.o. + Michal Liziciar + mliziciarµstep-hdo.sk +48373 + Kamago + Grégory Oestreicher + greg.iana&kamago.net +48374 + Polarteknik Oy + Mr. Mika Korhonen + mika.korhonen&pmcpolarteknik.com +48375 + Igepa Paper Hungary Kft. + Stankóczi László + l.stankoczi&igepa.hu +48376 + MimerCon + Michael Buchardt + mhb&mimercon.dk +48377 + Linefactory + Peter Philips + support&linefactory.net +48378 + Zwise s.a. + Martin Lamote + martin.lamote&zwise.eu +48379 + Camelot Global + George Adamopoulos + devops-support&camelotglobal.com +48380 + Newbridge Technologies Int. Ltd. + Ryan Low + admin&newbridgewireless.net +48381 + BLS AG + Lorenz Heusser + lorenz.heusser&bls.ch +48382 + Case Tecnologia Ltda + Fernando Brunelli Costa + fernando&casetecnologia.com.br +48383 + McKinsey & Company, Inc. + Scott Stephenson + Scott_Stephenson&mckinsey.com +48384 + STAPRO SLOVENSKO s.r.o. + Adrian Petrik + petrik&stapro.sk +48385 + Audax Electronics Corporation + Marco Tramujas + audax.usa&gmail.com +48386 + MJ Technical Solutions, LLC + Jon McCombs + Jon.McCombs&mjtsolns.com +48387 + Japan Airlines Co.,Ltd. + Kazuhiko Fujiwara + kazuhiko.mm3p&jal.com +48388 + EIDISTA + Fermin Latas + fermin.latas&eidista.com +48389 + TakumiVision Co.,LTD. + Takashi Nakanishi + office&takumivision.co.jp +48390 + Bottomline Technologies + Derik Bibb + dbibb&bottomline.com +48391 + photicsensing + Bruce.Wang + wx89489&163.com +48392 + FilmLight Ltd + Andy Moseley + hostmaster&filmlight.ltd.uk +48393 + Sdx Lab Sdn. Bhd. (formerly 'SDxLab') + Ryan Low + ryan.low&sdxlab.com +48394 + br.Svensson Skog AB + Martin Svensson + martin&brsvensson.com +48395 + Qimtronics + Achmad Anugrah + achmad.anugrah&qimtronics.com +48396 + Sood sales + Akshay Sood + soodsales&outlook.com +48397 + Joma-Polytec GmbH + Peter Rist + peter.rist&joma-polytec.de +48398 + Paranoidlabs + Nadja Reitzenstein + dequbed¶noidlabs.org +48399 + Kassenaerztliche Vereinigung Sachsen + Volker Hanisch + edvsupport&kvsachsen.de +48400 + Bjernareds Sågverk AB + Janet Fredriksson + janet&bjernared.se +48401 + AJ Vaccines + Jacob Riedel + jacob.riedel&devoteam.com +48402 + Konica Minolta Business Solutions Czech, spol. s r.o. + Sebastian Bušek + sebastian.busek&konicaminolta.cz +48403 + PEQ Services + Pekka Pietikäinen + hostmaster&peq.fi +48404 + Korbank S.A. + Konrad Kreciwilk + noc&k.pl +48405 + Orbiss Ltd + Will Colwyn + systems&orbiss.co.uk +48406 + swisspro Solutions AG + Daniel Kasper + daniel.kasper&swisspro.ch +48407 + Open Applications Ltd + Nigel Hanlon + nigel.hanlon&openapp.ie +48408 + Flint International Limited + Joseph Dunne + iana&lambda.tech +48409 + SHENZHEN PHOTON BROADBAND TECHNOLOGY CO., LTD + Xiangshan Yi + xiangshan_yi&photonbroadband.com.cn +48410 + COLEGIO ESTADUAL EURICO BATISTA ROSAS + WILLIAM GERRIT LOS JUNIOR + wjr&williamjunior.com.br +48411 + AB Maa Såg + Anneli Andelen + Anneli&maa.se +48412 + CCEE + Marcelo Molina + marcelo.molina&ccee.org.br +48413 + NSF International + Montana Arble + marble&nsf.org +48414 + Line34 Telefonia e Computação + Alessandro Jorge Correa + alessandro.correa&line34.com.br +48415 + IBM (group CloudMatrix) + David Jimenez + jimenez&us.ibm.com +48416 + Bell Canada + Frederic Bonin + frederic.bonin&bell.ca +48417 + GLCMI Internal Shared Services + Justin Sabourin + justin.sabourin&gwl.ca +48418 + ANF AC United Kingdom, LTD. + Florencio Díaz + fdiaz&anf.es +48419 + Marcia Slosson, MS, LAC, MAC + Marcia Slosson + marciaslossonmslacmac&gmail.com +48420 + Meraskog i Jämtland AB + Mikael Mattsson + mikael&meraskog.com +48421 + Hematology Oncology Associates of Central New York + Brian Barker + bbarker&hoacny.com +48422 + Tech Futures Interactive Inc. + Andrew Gee + agee&techfutures.co +48423 + Cedar Gate Technology + Stephen Zander + stephen.zander&cedargate.com +48424 + Insatech A/S + René Nielsen + rbn&insatech.com +48425 + isboom.com + Esa Ääpälä + aapala&gmail.com +48426 + Lambda Tech Ltd + Joseph Dunne + iana&lambda.tech +48427 + Budget Insight + Messager Jean-Pierre + jpmessenger&budget-insight.com +48428 + MFA Russia + Dmitry Klimentiev + kda&mid.ru +48429 + BELADACI CONSULTING, LLC + STEPHANE BELADACI + STEPHANE&BELADACI.ORG +48430 + Pro-Digital Projetos Eletronicos Ltda + Marco Antonio de Paula Tramujas + pro2&prodigital.com.br +48431 + NetStable, LLC + Julian Yates + julian&netstablellc.com +48432 + 广州程星通信科技有限公司(Guangzhou Starway Communication Technology Co., LTD) + 袁先生(yuan huangxing) + huangxing.yuan&starwaycomm.com +48433 + Wuxi Broadlan Telecommunication Technology Co.,Ltd + Alan Song + wxbrtx&163.com +48434 + praclear + Christian Brenner + chr.bre&outlook.com +48435 + Wuxi Neihua Network Technology Co.,(NHN) + Alan Song + songrr&leihua.com +48436 + Siemens Energy AG - Transmission + Jochen Schaefer + jochen.schaefer&siemens-energy.com +48437 + Ikano Bank + Henrik Klinkvort + hkl&ikano.dk +48438 + Trätransporter i Norrbotten AB + Mattias Markström + mattias.markstrom&bdx.se +48439 + Schönfelder Papierfabrik GmbH + Frank Liebig + frank.liebig&schoenfelder-papierfabrik.de +48440 + 2mt Software GmbH + Michael Wallner + mw00&2mt-software.de +48441 + Gridco Systems + Guruprasad Kulkarni + gkulkarni&gridcosystems.com +48442 + Frankonia Handels GmbH & Co. KG + Simon Gottschlich + sgottschlich&frankonia.de +48443 + Verticali LLC + Andrey Zhelezin + a.zhelezin&verticali.ru +48444 + Stefano Canepa + Stefano Canepa + stefano&canepa.ge.it +48445 + Digital Security + Stéphane Jourdois + stephane.jourdois&digitalsecurity.fr +48446 + Slottstornet AB + Victoria Eriksson + victoria.eriksson&slottstornet.se +48447 + Verizon Labs + Vlabs + jayaraj.wilson&intl.verizon.com +48448 + Bricata, LLC + Paul Casto + pcasto&bricata.com +48449 + Harvard Computer Society + Frederick Widjaja + fwidjaja&hcs.harvard.edu +48450 + Timmerkörarna i Norrland AB + Stefan Nordström + stefan.nordstrom&timmerkorarna.se +48451 + Fuzhou In&Pro Information Technology Co., Ltd + inpro sysop + inpro.sysop&gmail.com +48452 + ANF Certification Authority USA, Corp. + Florencio Diaz + fdiaz&anf.es +48453 + Riddarhusförvaltningen Fr Emilie Pipers Donationsfond + Victoria Eriksson + victoria.eriksson&skogstjanst.se +48454 + Riddarhuset O G Paulis Donationsfond + Victoria Eriksson + victoria.eriksson&skogstjanst.se +48455 + Linköpings Skogstjänst AB + Victoria Eriksson + victoria.eriksson&skogstjanst.se +48456 + Wasatornet AB + Victoria Eriksson + victoria.eriksson&skogstjanst.se +48457 + KaX Pvt. Ltd + Samohan Kumar + kaxpvtltd&gmail.com +48458 + Otsuka America Pharmaceutical, Inc. + David Sullivan + david.sullivan&otsuka-us.com +48459 + Condast GmbH + Frank Brieger + frank.brieger&condast.de +48460 + Rotronic AG + Bruno Niklaus + bruno.niklaus&rotronic.ch +48461 + StudentBridge LLC + Sergii Koval + skoval&studentbridge.com +48462 + Insight media Devlopment. + Khandoker Golam Morsheed + gmorsheed&gmail.com +48463 + Fitworks Co., Ltd. + Hisaaki Takeuchi + iana&fit-works.co.jp +48464 + ANF AC Panamá, S.R.L. + Florencio Díaz + fdiaz&anf.es +48465 + Gestión Tributaria Territorial, S. A. + José Joaquín Callado Leal + jcallado>t.es +48466 + Chambre des Députés Luxembourg + Nicolas Dubois + chdcontact&chd.lu +48467 + Imperial College Healthcare + Yusuf Mangera + yusuf.mangera&imperial.nhs.uk +48468 + Lianozovo Electromechanical Plant + Rinat Gadelshin + gadelshin.r.n&topaz-atcs.com +48469 + Cancer Care Specialists + Wesley Falconer + wesley.falconer&etransmedia.com +48470 + Örnfrakt + Jan-Eric Lindberg + jan-eric.lindberg&ornfrakt.se +48471 + British Amateur Television Club + Philip Crump + phil&philcrump.co.uk +48472 + swissEmbedded GmbH + Dr. Daniel Haensse + info&swissembedded.com +48473 + mh2net, z.s. + Radim Horak + radim.horak&mh2net.cz +48474 + Klynt Industries + Dave McCarthy + dave&klynt.com +48475 + SIA Rigas udens + Andris Smits + andris.smits&rigasudens.lv +48476 + Zen Swipe, Inc. + Eric Lee + webmaster&zenswipe.com +48477 + Nuix North America Inc + Justin Capella + justin.capella&nuix.com +48478 + Hainan Goodstart Network Technology Co. Ltd. + gong ze + gongze&goodstart.com.cn +48479 + Sunshine Sugar + William Wood + itregistrations&sunshinesugar.com.au +48480 + finally safe GmbH + Dominique Petersen + infra&finally-safe.com +48481 + Falu Energi & Vatten AB + Daniel Ahlberg + daniel.ahlberg&fev.se +48482 + Linaro Ltd + Luca Di Stefano + luca.distefano&linaro.org +48483 + RecordSure Ltd. + Jacint Toth + jacint.toth&recordsure.com +48484 + topsystem Systemhaus GmbH + Karl-Heinz Gross + khg&topsystem.de +48485 + ITEC Training Solutions Ltd + Simon Watts + s.watts&itecskills.co.uk +48486 + Salzbrenner Media + Thomas Baier + t.baier&salzbrenner.com +48487 + Veropharm + Alexandr Simonov + simonov&veropharm.ru +48488 + ACS International Schools Ltd. + Graham Mayers + gmayers&acs-schools.com +48489 + ANF AC Chile, Ltda. + Florencio Díaz + fdiaz&anf.es +48490 + Bundesministerium fuer Landwirtschaft, Regionen und Tourismus + Georg Estelmann + georg.estelmann&bmlrt.gv.at +48491 + Strategic Information Technology Ltd. + William MacDonald + billm&stratinfotech.com +48492 + Zequenze + Erasmo Zubillaga + erasmo&zequenze.com +48493 + Naltagrett + Jorgen Bjorck + naltagrett&gmail.com +48494 + Lockstep Technologies + Stephen Wilson + swilson&lockstep.com.au +48495 + Alm. Brand A/S + Rene B. Rosenbeck-Larsen + it.sikkerhed&almbrand.dk +48496 + LOTS Group AB + Fredrik Hjelm + Fredrik.hjelm&lotsgroup.com +48497 + VIDA Energi AB + Magnus Linnér + magnus.linner&vida.se +48498 + VIDA Skog AB + Magnus Linnér + magnus.linner&vida.se +48499 + inovex GmbH + Jan Gehring + jan.gehring&inovex.de +48500 + Ventilatorenfabrik Oelde GmbH + Detlef Wilmsen + detlef.wilmsen&venti-oelde.de +48501 + Wilmers Messtechnik GmbH + Hans Wilmers + hans&wilmers.no +48502 + Startups Venture + Shilles Steven Albert + registrar&shill.es +48503 + Magrathea Laboratories e.V. + Sven Reißmann + sven&0x80.io +48504 + Pulselight Inc + Stuart Jarriel + sjarriel&pulselight.com +48505 + AI2Co + Peter Lasker + P.Lasker&AI2Co.nl +48506 + Tunnel Radio of America + Carlo McKee + carlo.mckee&tunnelradio.com +48507 + o-byte.com GmbH & Co. KG + Martin Weiß + info&o-byte.com +48508 + Trapeze-Elgeba GmbH + Lars Hantschmann + lars.hantschmann&trapezegroup.com +48509 + add solution GmbH + Alberto Mancheno Sanchez + alberto.mancheno&add-solution.de +48510 + Grand Rapids Adventist Academy + Christian McDonald + admin&graa.com +48511 + ApeComm Sdn. Bhd. + Farhah Kamaruzzaman + farhah.zm&gmail.com +48512 + Inspur Group Co.,Ltd. + Chunpeng Mao + Maochp&inspur.com +48513 + Owari Precision Products (India) Pvt.Ltd. + Man Mohan Kumar + mmkumar&oppi.ttp.co.in +48514 + Warkdag AS + Mirko Klaus Ansgar Tasler + mirko&tasler.net +48515 + PACE POWER SYSTEMS PVT LTD + Basavaraja BR + basavaraja.br&pacerenewables.com +48516 + Duerr IT Service GmbH + Jan Skowron + Jan.Skowron&durr.com +48517 + Locatel Flotas, S.L. + Juan Carlos Mendez + jcmendez&locatel.biz +48518 + BYOM Electronics LLC + Oleg Morozov + byom&omino-board.com +48519 + St. Bernards Medical Center + Kevin Hawley + khawley&sbrmc.org +48520 + ADISTA SAS + MUNIER Brice + bmunier&adista.fr +48521 + nCentric Europe BVBA + Robin Leblon + robin.leblon&ncentric.com +48522 + Elisa Eesti AS + Elisa Eesti + aqnomail&icloud.com +48523 + ALTEC S.p.A. + Carmelo Manetta + carmelo.manetta&altecspace.it +48524 + Ping Communication (Switzerland) AG + Markus Goetsch + mg&pingcom.net +48525 + SnapServ Mathis + Pascal Mathis + pascal.mathis&snapserv.net +48526 + Twilio, inc. + Seth Hardiman + shardiman&twilio.com +48527 + LEGO System A/S + Simon Clausen + simon.clausen&lego.com +48528 + IT Niedersachsen + Frank Meyer + frank.meyer&it.niedersachsen.de +48529 + MediaSift Ltd + Gareth Llewellyn + gareth&datasift.com +48530 + OOO "TIUS" + Sergey Mikhaylin + info&tius-project.ru +48531 + FIS + Mark Kaczorowski + mark.kaczorowski&fisglobal.com +48532 + Inlandsfrakt AB + Carina Kristoffersson + carina.kristoffersson&inlandsfrakt.se +48533 + Sony Mobile Communications AB + Security & Enterprise + DL-SELD-securityenterprise&sonymobile.com +48534 + FIT Networks + Oscavo Prata + oscavo&fitnetworks.com.br +48535 + Blue Ridge Concepts, Inc. + Robert M. Gregory, Jr. + gregory&BlueRidgeConcepts.com +48536 + Technische Hochschule Aschaffenburg + Benjamin Heeke + teamrz&th-ab.de +48537 + Defense Information Systems Agency + David Wagner + david.s.wagner.ctr&mail.mil +48538 + ServerCentral + Andrew Meyer + ameyer&servercentral.com +48539 + ED&F Man Capital Markets + Timothy Perry + tperry&edfmancapital.com +48540 + ED&F Man Capital Markets + Timothy Perry + tperry&edfmancapital.com +48541 + BWAY S.r.l. + Claudio Botta + claudio.botta&bway.it +48542 + Chromatic Inc. + Stephen Quebe + stephen&chromatic.ai +48543 + Procesar, S.A de C.V + Jorge Sánchez + jsanchez&procesar.com +48544 + TV-Teknik Jonas Hermansson + Jonas Hermansson + jonas&tvteknik.se +48545 + Triona AB + Fredrik Bratteberg + fredrik.bratteberg&triona.se +48546 + Hans Andersson Recycling AB + Claes Dahlberg + claes.dahlberg&hansandersson.se +48547 + Public Technical Identifiers + Punky Duero + punky.duero&icann.org +48548 + Harbin Yantuo Science and Technology Development Co.,Ltd + Minglin lee + superlml&163.com +48549 + 广州阜月软件有限公司 (Guangzhou fuyue Software Co. Ltd.) + Ansion + 1572309495&qq.com +48550 + Beijing Redlink Information Technology Co., Ltd. + Zhao Zhiquan + zhaozhiquan&redlink-tech.com +48551 + Profound Solutions + Olesov Peter + peter&profound.ru +48552 + ShangHai 30wish information security Limited + wuchong + wuc_sh&30wish.net +48553 + Duet Asset Management Ltd + Davide Ronchi + iana-pen&duetgroup.net +48554 + Church Pension Group Services Corporation + Clayton Crawley + PrivateEnterpriseNumber&cpg.org +48555 + STORMATRIX, Inc. + Mark Mays + mark.mays&stormatrix.com +48556 + bellaflora Gartencenter GmbH + Thomas Hartl + it&bellaflora.at +48557 + OOO LOYALTY PROGRAM Koshelek + Evgeny Ermakov + e.v.ermakov&yandex.ru +48558 + Trinity Grammar School + Mitchell Richters + mrichters&trinity.nsw.edu.au +48559 + Rosenberger Technologies Co., Ltd. + Guozhenjie + Jay.guo&prosetechnologies.com +48560 + Almnäs Bruk AB + Simon Bylund + sb&almnas.com +48561 + Ahlstrom Group + Ahlstrom Hostmaster + hostmaster&ahlstrom.com +48562 + KENDRIS AG + Sacha Verwaaijen + software&kendris.com +48563 + ANEXIA Internetdienstleistungs GmbH + Stephan Peijnik + speijnik&anexia-it.com +48564 + Autoland Deutschland Inh. Wilfried Wilhelm Anclam, e. K. + Mario Neubert + neubert&autoland.de +48565 + NEUBERT-IT Inh. Mario Neubert + Mario Neubert + mn&neubert-it.de +48566 + Guangzhou Shirui Electronics Co., Ltd. + ChenWeilin + chenweilin&cvte.com +48567 + LINET Group SE + Petr Kraus + petr.kraus&linet.cz +48568 + First Orion Corp. + Keith Rogers + krogers&firstorion.com +48569 + Midland States Bancorp, Inc + Tim Krampe + tkrampe&midlandsb.com +48570 + sn3rd llc + Sean Turner + iana-pen&sn3rd.com +48571 + Istanbul Teknik Universitesi (ITU) + Can Sirin + sistemdestek&itu.edu.tr +48572 + Vanti + Mike Brooman + hello&vanti.co.uk +48573 + C-Hack + Web Master + webmaster&c-hack.de +48574 + Quanergy Systems, Inc + IT Administrator + support&quanergy.com +48575 + Experts Enterprises. + Sandeep Kumar + sandeep180782&gmail.com +48576 + GTBilt + Carlos Alvarez + tech>bilt.com +48577 + Fail-Safe IT Solutions Oy + Mikko Kortelainen + mikko.kortelainen&fail-safe.net +48578 + Shenzhen KSTAR Science and Technology Co., Ltd + xiao tian + xiaott&kstar.com.cn +48579 + BOMAR, spol. s r.o. + Christian Dergovics + iana-pen&bomar.cz +48580 + Continuity Logic LLC + Casey Friese + casey.friese&continuitylogic.com +48581 + Changhong Network Tech. + Yongjun Rao + yongjun.rao&changhong.com +48582 + GSK #11 Gradskiy + Konstantin M. Khankin + hc&gsk11.ru +48583 + HTWNET Information Technology Services + Weber, Helge Thorsten + info&htwnet.de +48584 + CineLab + Evgeny Ischenko + cert&cinelab.ru +48585 + jiangsu broadcasting cable information network corporation limited suzhou branch + zhaofeng chen + 280446991&qq.com +48586 + KUNDOXT GmbH + Andre Binnig + andrebinnig&aol.com +48587 + Ring2 Communications LLC + Oleksiy Pavlenko + oleksiy.pavlenko&loopup.com +48588 + Capgemini Next Generation Platform + A.J.A.M. (Rob) van Eerd + rob.van.eerd&capgemini.com +48589 + Tallink Grupp AS + Juhan Tamsalu + juhan.tamsalu&tallink.ee +48590 + Peñasco Valley Telephone Cooperative, Inc. + Zachary Arias + pen&pvt.com +48591 + Borda Technology + Burak Bardak + burak.bardak&bordatech.com +48592 + Capetrust + Arnaud Tardy + oid&capetrust.com +48593 + ECRA SRL + Troisi Pasquale + marketing&ecranet.com +48594 + University Cancer Institute + Bonnie Raines + braines&universitycancerinstitute.com +48595 + ENKON Information Systems Inc. + Richard Ihmels + rihmels&enkon.com +48596 + SKOGsam ab + Joel P Åberg + joel&skogsam.se +48597 + Administrative Computer Consultants Co. + Mike Boyer + boyer&administrative.com +48598 + NetGuardians + Support + support&netguardians.ch +48599 + Zodiac Telecommunications South Africa + Keegan Moodley + zodiac&highveldmail.co.za +48600 + Cyber Advanced Technology, Inc. + Paul Bowman + paul.bowman&ot-ocn.com +48601 + Syadem + Josselin Auguste + contact&mesvaccins.net +48602 + Pacific Hematology Oncology Associates + David Ruiz-Velez + david&phoamd.com +48603 + Systemk + Osamu Takahashi + o.takahashi&systemk.co.jp +48604 + ShenZhen SandStone Data Technology Co.,Ltd + LiXinrong + lixinrong&szsandstone.com +48605 + PSZ + Yuri Azanov + skb162&imf.ru +48606 + ADVENS + POTTIER Frédéric + frederic.pottier&advens.fr +48607 + Navkonzept GmbH + Krystian Gawol + k.gawol&navkonzept.de +48608 + PT. Waruna Nusa Sentana + Des Dulianto + desdulianto&waruna-group.co.id +48609 + Averbis GmbH + Jan Paetzold + jan.paetzold&averbis.com +48610 + Net2Edge Limited + Jon Burke + engineering&net2edge.com +48611 + Locher & Christ GmbH + Markus Kollmann + markus.kollmann&lc-top.de +48612 + R.WEISS GROUP + Helmut Kienle + kienle.h&r-weiss.de +48613 + DTV Innovations, LLC + Benitius Handjojo + bhandjojo&dtvinnovations.com +48614 + Infraxis AG + Jonathan Taylor + itsupport&infraxis.com +48615 + Cancer Care Associates of York + Shauna Herman + sherman&cancercareyork.com +48616 + AWGES + Ânderson Ignácio da Silva + anderson&awges.com +48617 + binsec GmbH + Florian Zavatzki + fz&binsec.com +48618 + revservrd + Bill Huddleston + spaderbaby&gmail.com +48619 + Stream Technologies Ltd + Melissa Jenkins + noc&stream-technologies.com +48620 + BE INVEST International S.A. + Sébastien PASSELERGUE + sebastien.passelergue&be-ys.com +48621 + Dueton Systems s.r.o. + Vasily Goldobin + vasily&dueton.com +48622 + Innovate DC S.A.C. + Carina Estrada + carina.estrada&innovateahora.com +48623 + Ingarps Trävaror AB + Mattias Sälleteg + mattias.salleteg&ingarpstra.se +48624 + Johnson Earls + Johnson Earls + johnson.earls.pen&gmail.com +48625 + Setra Group AB + Mikael Nylén + Mikael.Nylen&setragroup.com +48626 + Duo Security + Nick Soulliere + nsoulliere&duosecurity.com +48627 + CounterCraft + David Barroso + dbarroso&countercraft.eu +48628 + Whanganui High School + Sean Anderson + techs&whs.ac.nz +48629 + ID-ware Deutschland GmbH + Sebastian Fiebig + Administrator&id-ware.com +48630 + DOCAPOST + Nicolas DUMINIL + nicolas.duminil&simplex-software.fr +48631 + AlkSys + AlkSys + info&alksys.com +48632 + Carousel Industries of North America + James Middleton + jmiddleton&carouselindustries.com +48633 + IT-Consulting Gschwendt + Ing. Markus Gschwendt + office&gschwendt.at +48634 + BK TELECOMUNICAÇÕES LTDA ME + José Welliton Sá da Silva + infraestrutura&bktele.com +48635 + Riverside County Information Technology + Jason Smith + jasonsmith&rivco.org +48636 + ewell + hu haifeng + huhaifeng&ewell.cc +48637 + COLEGIO DE NOTARIOS DE LIMA + Gerardo García Martínez + ggarcia¬arios.org.pe +48638 + DOCTORS CANCER CENTER + CESAR MALDONADO, MHMS + dcc2003&gmail.com +48639 + Platformatics + Jon Ford + jford&platformatics.com +48640 + Electronic Technology Inc. + Dave DeLeo + engineering&eti-nj.com +48641 + Det Norske Studentersamfund + Jonas Braathen + jonas&studentersamfundet.no +48642 + Wangarden + Edouard Bonnet + system&wangarden.com +48643 + Graytech Computers + Chris Gray + domain&graytech.com.au +48644 + UNION NATIONALE DES MAISONS FAMILIALES RURALES D'EDUCATION et D'ORIENTATION + HURLAIN Arnaud + union&mfr.asso.fr +48645 + Haldor Topsoe + Kim Bauer + kbj&topsoe.dk +48646 + Aralink Tecnologias de la Informacion SL + Departamento de Administracion + administracion&aralink.com +48647 + Jan Vana + Jan Vana + jan&janvana.cz +48648 + New Mexico Oncology Hematology Center, Ltd + Cookie Santamaria + cookies&nmohc.com +48649 + Practical Design Group, LLC + Tim Paulson + tpaulson&practicaldesign.com +48650 + MAILEVA + Mekki GHERNATI + mekki.ghernati&docapost.fr +48651 + Vint + Rémi Lapeyre + remi.lapeyre&vint.fr +48652 + Lokithor Sécurité + Jacques Beauregard + dtln&netcourrier.com +48653 + HUGO BOSS AG + Stefan Baldus + stefan_baldus&hugoboss.com +48654 + P3KI GmbH + Gregor Jehle + hadez&p3ki.com +48655 + Sky Brasil + Welber Neto + skyengnetwork&sky.com.br +48656 + HighJump Software Inc. + Timothy R. Simmons + Tim.Simmons&Highjump.com +48657 + Tobias Tilgner + Tobias Tilgner + tt&totis.net +48658 + Martin Balint + Martin Balint + martin&balint.cz +48659 + Tecnotel Servizi Tecnologici SRL + Sergio Tosti + noc&tecnoadsl.it +48660 + Netz-Weise + Holger Voges + holger.voges&netz-weise.de +48661 + Dialamerica + Bryan Dorry + bdorry&dialamerica.com +48662 + bDigital Indonesia + Adiwijaya Hadinoto + technology&bdigital.id +48663 + T38Fax Incorporated + Patrice Fournier + patrice.fournier&t38fax.com +48664 + eth2 networks + Martijn Reening + m.reeningð2.link +48665 + Telcom d.o.o. + Sasa Galjak + sasa.galjak&telcom.co.rs +48666 + 24-7 Entertainment ApS + Andrzej Soinski + doms&247e.com +48667 + ibes AG + Marian Wendt + mwendt&ibes.ag +48668 + Czech University of Life Sciences Prague (CULS) + Jan Richter + apacheadmin&czu.cz +48669 + WIPO + Thierry Crouzier + thierry.crouzier&wipo.int +48670 + Nuco Technologies LTD (Host-IT Internet Solutions) + Maxim Wilson (Host-IT Technical Support) + support&host-it.co.uk +48671 + Giax GmbH + Andreas Blohmann + ab&giax.de +48672 + Symbotic LLC + John Cole + iana-admin&symbotic.com +48673 + iCONX solutions + Shane O'Keeffe + shane.okeeffe&iconxsolutions.com +48674 + Jano + Edward Antony Muchiri + ed&jano.co.ke +48675 + Heerscharen.Net + Thies Müller + webmaster&hosting.heerscharen.net +48676 + Zaba Industries + Vsevolod Kozlov + zaba&mm.st +48677 + SEPSA + Vincenzo Auteri + vincenzo.auteri&sepsa.es +48678 + RNAV CDG + CARRIERE Arnaud + dark_mickey95&yahoo.fr +48679 + dataWeapons + songjun choe + 10gic&dataweapons.org +48680 + abas-usa + Jeremy Sheridan + jeremysheridan&abas-usa.com +48681 + Guangzhou Caipin Communication Technology Co., Ltd. + Huan Yong, Qin + 287042313&qq.com +48682 + NETSTARS CO., LTD. + WangKun + wang.kun&netstars.co.jp +48683 + Elastifile + Modi Finkelstein + modi.finkelstein&elastifile.com +48684 + 32lou + arthur li + yunql4&sina.com +48685 + IXI Technology + Bryan Ly + bryanly&ixitech.com +48686 + Pittel+Brausewetter Holding GmbH + Marvin Kirowitz + it-office&pittel.at +48687 + John Kenyon + John Kenyon + etljwk&gmail.com +48688 + Ventacity Systems, Inc. + Jonah Peskin + jp&ventacity.com +48689 + Novar GmbH + Klaus-Dieter Wank + klaus-dieter.wank&honeywell.com +48690 + Teltonika + Darius Jokšas + darius.joksas&teltonika.lt +48691 + OPNT B.V. + Marco Gorter + m.gorter&opnt.nl +48692 + ASP AG + Christof Kraka + christof.kraka&asp-ag.eu +48693 + Sistemas Electrónicos de Potencia, S.A. + José Alberto García García + jagarcia&sepsa.es +48694 + Vision4ce Limited + Philip Park + snmp.admin&vision4ce.com +48695 + Occitaline + Sourinia Onevilayvanh + support&occitaline.com +48696 + infofab GmbH + Frank Flachenecker + frank.flachenecker&infofab.de +48697 + Threat Stack, Inc. + Patrick Cable + pat.cable&threatstack.com +48698 + McHenry Savings Bank + Ken Olsen + kolsen&mchenrysavings.com +48699 + Reversing Labs + Igor Lasic + ilasic&reversinglabs.com +48700 + SwipeSense + Daniel Gunderson + admin&swipesense.com +48701 + Steward Health Care System + Glenn Brown + glenn.brown&steward.org +48702 + ReversingLabs US Inc. + Maarten Boot + cloud&reversinglabs.com +48703 + A.T.I.B. Srl + Antonio Tinti + antonio.tinti&atib.com +48704 + Center of Radiological Medical Physics, USTC + Yifei Pi + pyfdrw&mail.ustc.edu.cn +48705 + Griffith City Council + Mike Gaze + mike.gaze&griffith.nsw.gov.au +48706 + Beijing Feifanshi Technology Co., Ltd. + DANG Fan + dangfan&fayfans.com +48707 + häwa GmbH + Markus Spring + edv&haewa.de +48708 + Rolls-Royce Control Systems + Matthew Bennion + matthew.bennion&rolls-royce.com +48709 + DevTeam SRL + Mauricio Ghiorzi + mauriciog&devteam.com.ar +48710 + Gluu, Inc. + Michael Schwartz + mike&gluu.org +48711 + UBIVELOX Inc. + Jung-Ho Kim + visualguide&ubivelox.com +48712 + van-Eerd.net + Rob van Eerd + rob&van-Eerd.net +48713 + DWA Internet Solutions + Slobodan Senic + office&dwais.rs +48714 + LEMZ R&P Corp. + Rinat Gadelshin + gadelshin.r.n&topaz-atcs.com +48715 + LEMZ R&P Corp. + Rinat Gadelshin + gadelshin.r.n&topaz-atcs.com +48716 + Ceredigion County Council + Jason Taylor + jason.taylor&ceredigion.gov.uk +48717 + Q2 Solutions + Anthony Altemara + anthony.altemara&q2labsolutions.com +48718 + United Power Inc + System Administrator + itregistrations&unitedpower.com +48719 + Digitalor Tech Inc + ZOU BIN + binzou&digitalor.com +48720 + New Mexico Oncology Hematology Consultants, Ltd + Tyler Black + Tyler.Black&pathforward.us +48721 + Chastain-Skillman, Inc. + Matthew Gruenau + mgruenau&chastainskillman.com +48722 + PG Services Sarl + Murat Izli + murat.izli&pg-services.lu +48723 + Institute for Family Health + Muhammad Sulaman + it-com&institute.org +48724 + cCARE:California Cancer Associates for Research and Excellence + Mahshid Albrecht + malbrecht&ccare.com +48725 + PayNet, Inc. + Avi Nutkis + anutkis&paynet.com +48726 + TAP Portugal + Marcio Garcia Marcenari + e-mmarcenari&tap.pt +48727 + COTA Inc. + Doug Leder + dougleder&oncota.com +48728 + CircleX + Richard Dawson + dawsora&gmail.com +48729 + CSRA + Michael Cramer + michael.cramer&csra.com +48730 + HostDir + Antonino Riina + admin&hostdir.net +48731 + Home Branch + Richard Dawson + dawsora&gmail.com +48732 + Fundação Educacional Encosta Inferior do Nordeste + Marcia Regina Diehl + redes&faccat.br +48733 + Excellium Services S.A. + IT Services + it-services&excellium-services.com +48734 + IOxOS Technologies SA + Ralph Hoffmann + ralph.hoffmann&ioxos.ch +48735 + CAE Engineering Kft. + Dr. András Korn + iana-pen&cae-engineering.hu +48736 + INX International Ink Co. + Todd Pharis + todd.pharis&inxintl.com +48737 + EasyNMS + Sławomir Kłos + slawekk87&gmail.com +48738 + Littlepay Pty Ltd + David Johnston + iana.admin&littlepay.com +48739 + 杭州电子科技大学 (Hangzhou University of Electronic Science and Technology) + zhangmeng + 1247882983&qq.com +48740 + Clanjor Prods. + Giumo Clanjor + cjxgm2&gmail.com +48741 + Zitius Service Delivery AB + Mathias Ehrlin + mathias&zitius.com +48742 + RetailPact Group + Thomas Hayes + thomasbhayes&gmail.com +48743 + Cepheid + Alexey Goncharov + it.telecom&cepheid.com +48744 + MiraCosta Community College District + Karen Brown + kbrown&miracosta.edu +48745 + JSC Team-R + Razumov Dmitry + razumd&algspb.ru +48746 + Fondation Officielle de la Jeunesse + Diego Carrillo + diego.carrillo&foj.ch +48747 + Meep Consulting SPRL + Dimitri Lambert + info&meep.be +48748 + Ali + Alireza Mohammadi + alireza83&gmail.com +48749 + Cavisson Systems Inc. + Raj Sajankila + raj.s&cavisson.com +48750 + Traiana Inc. + Andy Kubat + andreask&traiana.com +48751 + Pace IT Systems + Chris Kirkham + chris.kirkham&paceit.co.uk +48752 + Bergs Timber Production AB + Anders Jansson + anders.jansson&bergstimber.se +48753 + ES-prom + Andrey Kostrov + kostrov.a&elsystems.ru +48754 + EnShape GmbH + Dr. Martin Schaffer + m.schaffer&enshape.de +48755 + Armagard Ltd + Craig Richards + craig.richards&armagard.com +48756 + Skogssällskapets Förvaltning AB + Stefan Hillerström + stefan.hillerstrom&skogssallskapet.se +48757 + kubos.org + Jean Brangé + jean&kubos.org +48758 + 3City Electronics Sp. z o.o. + Marek Kuciński + biuro&3CityElectronics.com +48759 + Le EcoSystems Technology Private Limited + Syam Sundar + syamsundar&le.com +48760 + Envall Tech + Henrik Envall + henrik.envall&gmail.com +48761 + SEYR + Administrator + iana-contact&seyr.me +48762 + AiEmTi LLC + Pavel Zakharov + support&clouddc.ru +48763 + Volt Active Data + Ruth Morgenstein + iana&voltactivedata.com +48764 + HeartVista, Inc. + Juan Santos + jmsantos&heartvista.com +48765 + 1705 Purdue + Administrator + 310.9.300&gmail.com +48766 + Sawtelle & Palms + Administrator + dubs11233035&gmail.com +48767 + SDIS DE SAONE ET LOIRE + COUSANCA, Didier + dcousanca&sdis71.fr +48768 + DeviceRadio AB + Christian Klemetsson + christian&deviceradio.com +48769 + Process Automation Solutions GmbH + Marc Haehnlein + marc.haehnlein&pa-ats.com +48770 + Tosibox Oy + Antoine Barbaroux + tosibox.control&tosibox.com +48771 + Diebold Nixdorf + Roman Cinkais + roman.cinkais&dieboldnixdorf.com +48772 + SSP ZhilComResurs + Yuri Gunbin + yuri_gunbin&zhcr.ru +48773 + NETRONIK spolka z o.o. + Tomasz Slaski CEO + iana&netronik.pl +48774 + FlexiLogix + Junaid Mukhtar + jmukhtar&flexilogix.com +48775 + TELSY SpA + ORLANDINI Roberto + roberto.orlandini&telsy.it +48776 + Skellefteå Kraft AB + Jörgen Larsson + dataavd&skekraft.se +48777 + Globalways AG + Hostmaster of the Day + support&globalways.net +48778 + Dana-Farber Community Cancer Care + Michele Schuckel + michele_schuckel&dfci.harvard.edu +48779 + Michel Reynaud Joyeux + Michel Joyeux + michel.joyeux&zoho.com +48780 + Trustonic + Andy Mell + andy.mell&trustonic.com +48781 + blobule.com + Glenn Slayden + boots&blobule.com +48782 + Moelven Skog AB + Bo Nyberg + bo.nyberg&moelven.se +48783 + STG, Inc. + Daniel Bizon + noc&stg.com +48784 + European Securities and Markets Authority + Paul Hussein + systems.admin&esma.europa.eu +48785 + ARGETURK + Coskun Tasdemir + coskun&argeturk.com +48786 + heiland.io + Martin Heiland + martin&heiland.io +48787 + linudata GmbH + Claus Wickinghoff + info&linudata.de +48788 + Bohlins Träexport AB + Erland Forsstrom + Erland&archipelago.se +48789 + Fielmann AG + Pietro Cattide + p.cattide&fielmann.com +48790 + OOO KIT + Anton Abrosimov + admin&itcgrp.ru +48791 + H. Elderson + Herman Elderson + herman&elderson.com +48792 + SCC Technology GmbH + Rene Rimach + rene.rimbach&scc-technology.de +48793 + morgiij, inc. + Clay Hutcherson + clay&morgiij.com +48794 + binnj, inc. + Clay Hutcherson + clay&binnj.com +48795 + Comm5 Tecnologia Ltda + Thiago Augusto Correa + thiago&comm5.com.br +48796 + Tennessee Oncology, PLLC + Toni Perry + tperry&tnonc.com +48797 + Inspur-Cisco Networking Technology Co., Ltd. + Harrison Liu + liushaoy&inspur.com +48798 + AKIPS Pty Ltd + Paul Koch + paul.koch&akips.com +48799 + BiZone LLC + Vyacheslav Tsepennikov + vt&bi.zone +48800 + CSNS + Hong Jianshu + hongjs&ihep.ac.cn +48801 + CAF Signalling + Miguel Villeta + mvilleta&cafsignalling.com +48802 + Panavision International, L.P. + Jeff Wilson + certificates&panavision.com +48803 + VTID + Egide Chiapponi + egide.chiapponi&vtidiksmuide.be +48804 + Univef LLC + Igor Vershinin + i_vershinin&univef.ru +48805 + Shadrinsk State Pedagogical University + Dmitry Slinkin + vc&shgpi.edu.ru +48806 + uznm + Valentin Kulesh + valentin.kulesh&phystech.edu +48807 + Post Holdings + David Combs + david.combs&postholdings.com +48808 + Thales Services France ATHENA (formerly 'THALES SERVICES LAB CSK') + GINTRAND Olivier + olivier.gintrand&thalesgroup.com +48809 + medi-x-Thomas Kadlubowski + Dominik Murr + MurrComputer&gmx.de +48810 + 21st Century Onco EKWR + Mercy Hiller + mercy.hiller&21co.com +48811 + BADU Networks Inc. + Timothy Nelson + tim.nelson&badunetworks.com +48812 + Nerotech Solutions + Matthew Probasco + mprobasco&nerotechsolutions.com +48813 + TechnoBox Tecnologia LTDA + Matheus Cadori + suporte&technobox.com.br +48814 + Max Planck Institute for Polymer Research + Robert Klein + kleinrob&mpip-mainz.mpg.de +48815 + ProsiebenSat.1 Digital GmbH + Lasse Borchard + oid-reg&7tv.com +48816 + Boxholms Skogar AB + Peter Wallin + peter.wallin&boxskog.se +48817 + Rödins Trä AB + Lars Svensson + lars.svensson&rodinstra.se +48818 + WELLTRON ELECTRONICS CO., LTD. + Peter Lee + plp456&welltron.com.tw +48819 + Techno Mathematical + Yasuo Suzuki + suzuki-ya&tmath.co.jp +48820 + VoiSmart S.r.l. + Matteo Brancaleoni + mbrancaleoni&voismart.it +48821 + Haroutioun Shahinian, MD, PA + Haroutioun Shahinian + hsscancer.onc1&yahoo.com +48822 + Interfax - Corporate Information Disclosure Center LLC + Serafima Gorbatova + dc2&e-disclosure.ru +48823 + MAIF + Alice MILANOVA + alice.milanova&maif.fr +48824 + Ponce Hematology Oncology + Vanessa Alvarez + mokin&flatiron.com +48825 + Smurfit Kappa Kraftliner Piteå Aktiebolag + Nicklas Norén + Nicklas.Noren&smurfitkappa.se +48826 + Gabriel Domenech 21st Century Oncology + George Moore + gmoore&flatiron.com +48827 + KGSS + Chris Pavona + chris.pavona&kgssinc.com +48828 + Bollore + LEFEBVRE Francois + francois.lefebvre&bollore.com +48829 + INSYS Therapeutics, Inc. + Adam Brumm + abrumm&insysrx.com +48830 + Green Charge Networks, LLC + Bryan Chow + bchow&greencharge.net +48831 + Clarive Software + Ricardo Martinez + ricardo&clarive.com +48832 + Assense Software Solutions + Tsen Yang Kwong + tsen.kwong&assense.com +48833 + Veas Inc. + Jorge Luis Gandulfo + jorge&veas.tv +48834 + WS Technology GmbH + Thomas Fröhlich + thomas.froehlich&waysense.eu +48835 + Pico Systems Co., Ltd. + Makoto Ito + makoto&picosystems.net +48836 + Evada Technology (Shenzhen) Co.,Ltd + Lin Zhou + zh_zhuanyong&hotmail.com +48837 + FUJIAN NEBULA BIG DATA APPLICATION SERVICE CO.,LTD + 钟军 (Zhang Jun) + zhangzhangwen&xrjiot.com +48838 + Nesodden Kommune + Tobias Wold + tobias.wold&lumagate.com +48839 + Sömlös AB + Stefhan Stjärnås + stefhan&stjarnas.com +48840 + stij.net + Stijn Adriaensens + stijn.adriaensens&gmail.com +48841 + LevelUP Solutions Pty Ltd + Brendon Allen + brendon.allen&levelup.solutions +48842 + Spiral Software Ltd + Dan Martinelli + dmartinelli&spiralsoft.com +48843 + LBS Bayerische Landesbausparkasse + Alexander Eller + alexander.eller&lbs-bayern.de +48844 + Unmanned Innovation D.B.A Airware + Ash Smolenski + it&airware.com +48845 + August Schell + Olavo da Rocha + olavo.rocha&augustschell.com +48846 + WE + Dan Kuzmicki + dan.kuzmicki&we.org +48847 + Reputronix + Jean Claude Lemoyne + jclaude.lz&gmail.com +48848 + FORTUNA GAME a.s. + Vojtech Kotous + kotous.vojtech&ifortuna.cz +48849 + Marcus Seidel Beteiligungs GmbH Businessangels.de + Janko Richter + j.richter&adcell.de +48850 + Logi-Concept Computers + Sylvain Girouard + sylvain38&bell.net +48851 + Flaxen Consulting + Michael P. Gerlek + mpg&flaxen.com +48852 + SL Global Service + Oleksii Shevchenko + support&sgs4business.com +48853 + Jura Elektroapparate AG + Dominik Schweizer + itsupport&jura.com +48854 + SherWeb Inc. + Karl Gagnon + kgagnon&Sherweb.com +48855 + Domsjö Fiber AB + Anna Nylander + anna.nylander&domsjofiber.com +48856 + Lewis-Sigler Institute + Mark Schroeder + mds&princeton.edu +48857 + Hawaii Cancer Care + Phia Magill + phia.magill&hawaiicancercare.com +48858 + Hope Health Center + Lida Arutyunyan + hope_healthcenter&yahoo.com +48859 + Nevada National Security Site + Joshua Moulin + MoulinJS&nv.doe.gov +48860 + “Metinvest Eurasia” LLC + Dmitriy Petrykin + dmitriy.petrykin&metinvest-eurasia.com +48861 + Aysima Bilişim Teknolojileri + Özkan TOPAL + destek&aysima.com +48862 + Alexson Solutions + Adrien Alexson + adrien&alexson.ca +48863 + HM Wallace, Inc. + Michael White + m.white&hmwallace.com +48864 + Serit Skagerak AS + Lars Erik Wold + lew&serit-skagerak.no +48865 + Sava Turizem + Peter Hocevar + peter.hocevar&sava.si +48866 + Enav spa + daniel di rienzo + daniel.dirienzo&technosky.it +48867 + Questa Computing Ltd. + Andrew Johnston + andrew3&andrewj.com +48868 + Bureau de coopération interuniversitaire + Francois Fortier + informatique&bci-qc.ca +48869 + Synergy Embedded + Elias Jose Di Domenico + elias&synergyembedded.com.br +48870 + ECHONET Consortium + Toshiba Corporation : Keiichi Teramoto + keiichi.teramoto&toshiba.co.jp +48871 + XSky(beijing) Data Technology Co.,Ltd. + Liu Bo + liubo&xsky.com +48872 + PT. Cipta Piranti Sejahtera + Ariel Silooy + ariel&bisnis2030.com +48873 + Guangzhou Rojao Technology Co., Ltd + MiaoJun + brucemiao&hotmail.com +48874 + DongHui Group + Xushengyang + dhsc100&163.com +48875 + Fast National University + Rizwan Saeed + rizwan.saeed&nu.edu.pk +48876 + Fatbat + Fatih BATUM + fatih&batum.gen.tr +48877 + thomas beteiligungen GmbH + Stefan Boemer + pki&thomas-gruppe.de +48878 + TOP TECHNOLOGIES CONSULTING GmbH + IT-ServiceDesk + it-servicedesk&toptechnologies.de +48879 + Centre Patronal + LERESCHE François + licence¢repatronal.ch +48880 + The Sigma Financial Group Limited + Karl Russell + karl.russell&sigfin.co.uk +48881 + MTRLC LLC + Hume Vance + humev&motorolacable.com +48882 + L-TECH Corp. Networks + Jong-Suk Jung + jsj<echnology.co.kr +48883 + Universidad de Murcia + ATICA - Sección telemática + t&um.es +48884 + Skyriver + John Neubauer + jneubauer&skyriver.net +48885 + Tsuzuki Denki Co. Ltd. + Hiroshi Fujita + fujita-h&tsuzuki.co.jp +48886 + NPF Sibpeleng Ltd. + Alexei Bazlaev + lexus&sibpeleng.ru +48887 + 东方网力科技股份有限公司南京研发中心 (Oriental Network Technology Co., Ltd. Nanjing R & D Center) + 丁伟 (Ding Wei) + dingwei&netposa.com +48888 + SSLINK TECHNOLOGY (BEIJING) CO., LTD. + Jackie Deng + jackie.deng&sslink.com.cn +48889 + SilicoWolf (Pty) Ltd. + Andrew Broekman + technical&silicowolf.com +48890 + Huels Unternehmensgruppe + Carsten Theißing + carsten.theissing&dihug.de +48891 + EVN AG + IT Benutzerservice + ius.lizenzen&evn.at +48892 + Administration des services de secours + Jerry Jacobini + itmgmt&secours.etat.lu +48893 + Bornemann AG + Johannes Rehm + jre&bornemann.net +48894 + Banco BPI, SA + Jorge Miguel Fernandes Carvalho Pinto + jorge.miguel.pinto&bancobpi.pt +48895 + Netcompany A/S + Aya Nielsen + itservices&netcompany.com +48896 + Ironwood Physicians, PC + Becky House + rhouse&ironwoodcrc.com +48897 + /ETC - Martin Misuth + Martin Misuth + etcðome.sk +48898 + Software Freedom Law Center + Daniel Gnoutcheff + root&softwarefreedom.org +48899 + VoiceOverNet + Alexis Fidalgo + afidalgo&voiceovernetinc.com +48900 + Guaranty Bank and Trust Company + Casey Boyd + ncc&guarantybankco.com +48901 + DOTSENSEI LTD + Iliya Bazlyankov + iliya&dotsensei.com +48902 + Shenzhen Puduan Technology Co., Ltd. + Yang Jin + yangjin&puduantech.com.cn +48903 + Qualvision + enjun.zhao + enjun.zhao&qualvision.cn +48904 + AB Gustaf Kähr + Dahlgren Pär + par.dahlgren&kahrs.se +48905 + Falan Energiflis AB + Michael Lindström + michael&falan.se +48906 + ConiferSoft AB + Björn Spets + bjorn.spets&conifersoft.com +48907 + Ter Hell & Co. GmbH + Martin Kroll + m.kroll&tergroup.com +48908 + girtech srl + Claudio Girlanda + cgirlanda&girtech.com +48909 + SDEL Contrôle Commande + Valentin Brehier + support.rd&sdelcc.com +48910 + Rheinische Fachhochschule Köln gGmbH + Philipp Thomas + philipp.thomas&rfh-koeln.de +48911 + DS Norden A/S + Peter Frost + vesselit&ds-norden.com +48912 + Thismonkey IT Pty. Ltd. + Scott Aitken + iana02&thismonkey.com +48913 + Haussli + John Heasley + heas&haussli.com +48914 + Conductor Inc. + Donald Stahl + dstahl&conductor.com +48915 + SPG International, LLC + Dave Bicanic + it.tech&spgusa.com +48916 + ITS-Network + Sven Schertel + sven.schertel&schertel.net +48917 + CPI Security + Steve Butkovich + itsoftware&cpisecurity.com +48918 + Dothan Hematology & Oncology + Tonya Odom + tonya&dothanhemonc.com +48919 + Bancroft + Andre Bodden + Andres.Bodden&Bancroft.Org +48920 + Identos Inc. + Brion Muldoon + brion&identos.com +48921 + Mercer Bucks Hematology Oncology + Stefanie Callahan + stefanie&mbhemonc.com +48922 + Thales Australia + Jeremy Kerwin + jeremy.kerwin&thalesgroup.com.au +48923 + Icomer, Inc + Koh Sung Hyuk + shkoh&icomer.com +48924 + HIST + Joung minsoub + mjoung&hist.co.kr +48925 + Invite Services + Frey Mansikkaniemi + iana&accounts.invite.hk +48926 + OOO Tsentr Sistem Svyazi + Egor Bolshedvorsky + info&da-tele.com +48927 + Circle B + Menno Kortekaas + menno&circleb.eu +48928 + 深圳市视游互动科技有限公司 (Shenzhen City videoseeing Interactive Technology Co., Ltd) + Jacky yi + yirui&videoseeing.com +48929 + DBLabs + Yiorgos Adamopoulos + adamo&dblabs.com +48930 + IFRASS + Jeremie Pilette + j.pilette&ifrass.fr +48931 + Assecor GmbH + IT-Support + It-support&assecor.de +48932 + Secure Systems & Technologies + Andrew Harrison + andrew.harrison&sst.ws +48933 + BIT + Teun Vink + teun&bit.nl +48934 + Vevida + Onno Molenkamp + onno&vevida.nl +48935 + PumpCo Ltd + Andrew Lawrenson + andy.lawrenson&pumpco.co.uk +48936 + OMEGA Ltd + Christian Ritter + webmaster&omega.ch +48937 + Polyconseil SAS + Raphaël Barrois + iana-oid&polyconseil.fr +48938 + Denys Berkovskyy + Denys Berkovskyy + den.kiev&gmail.com +48939 + Vevida + Jacco Koning + jacco&vevida.com +48940 + Controllis + Mark Anderson + mark.anderson&controllis.com +48941 + Schleupen AG + Dr. Peter Wenderoth + peter.wenderoth&schleupen.de +48942 + Synaptics Inc + Preetham Nayak + preetham.nayak&synaptics.com +48943 + Ampetronic Ltd + Russell Simpson + russell.simpson&etronic.co +48944 + Lietuvos vyriausiojo archyvaro tarnyba + Vaidotas Ramonas + v.ramonas&archyvai.lt +48945 + Rundvirke Skog AB + Tomas Berg + tomas.berg&rundvirkeskog.se +48946 + Aktiebolaget Karlaträ + Erik Kjellberg + erik&karlatra.se +48947 + ATMC Automação e Comunicação Ltda + Marcos Pereira + marcos&atmc.com.br +48948 + TCPixel, LLC + Brian Cavanaugh + brian&tcpixel.com +48949 + At4wireless + Alexander Quesada López + aquesada&at4wireless.com +48950 + LogDNA + Mike Hu + ops&logdna.com +48951 + UNION TANK Eckstein GmbH & Co. KG + IT-Technik + pki&uta.com +48952 + ATA Timber AB + Jonas Hafmar + jonas&ata.nu +48953 + UbuntuNet Alliance + Chris Rohrer + chris.rohrer&ubuntunet.net +48954 + KosmoNeko OU + Hongchuan Sun + ieb&outlook.my +48955 + Schoeller Technocell GmbH & Co KG + Michael Moehlmann + MMoehlmann&Felix-Schoeller.com +48956 + T4MGroup.com + Thomas Nierychlo + thomas.nierychlo&trans4mation.de +48957 + awaii.net + Jesper Nøhr + jesper&awaii.net +48958 + Trinity Industries, Inc. + Anthony Noonan + Anthony.Noonan&trin.net +48959 + Cruise Engineering Management Consulting + Stanley Cruise + scruise56&gmail.com +48960 + SoftwareONE AG + David Tannheimer + david.tannheimer&softwareone.com +48961 + struktur AG + Joachim Bauch + bauch&struktur.de +48962 + Unilogic Networks B.V. + Cliff Albert + cliff&unilogicnetworks.net +48963 + Gällö Skog AB + Hans Lantz + hans.lantz&galloskog.se +48964 + VŠĮ "Šviesos kūgiai" + Anatolij Škodin + iana&sviesoskugiai.lt +48965 + eMudhra Limited + Vijay Kumar M + Vijay&emudhra.com +48966 + Nils Kaczenski + Nils Kaczenski + nils&kaczenski.de +48967 + ZincFive, Inc. + Michael P. Wells + mwells&zincfive.com +48968 + akquinet AG + Matthias Timm + matthias.timm&akquinet.de +48969 + TK-Schulsoftware GmbH & Co. KG + Tobias Knipping + knipping&tk-schulsoftware.de +48970 + Navipoint Genomics LLC + Evan Cofsky + evan&navipointgenomics.com +48971 + PSYSTEME GmbH + Reiner Pröls + proels&psysteme.de +48972 + Soulssoft + Fischer Romain + romain.fischer&soulssoft.com +48973 + Menzel IT Services + Daniel Menzel + service&menzel-it.net +48974 + Arte y Calidad en Papel,Artepapel,S.A. + Fernando Araujo + informatica&artepapel.com +48975 + Ulmart RSK Ltd. + Aleksey Bondarenko + ianaoid&ulmart.ru +48976 + MURATA SYSTEMS,LTD. + Kazuki Kariya + kazuki.kariya&jhb.muratec.co.jp +48977 + Corsham Technologies, LLC + Bob Applegate + bob&corshamtech.com +48978 + Sawwave + Yeoncheon Yi + yeoncheon&sawwave.kr +48979 + 帝信科技股份有限公司 (DIXIN Technology Co., Ltd) + Charlie Li + li.changli&bendis.cc +48980 + TCL-IMAX + Xiaofeng Wu + xfwu&tcl-imax.com +48981 + Eigenexus Incorporated + Dennis Hollenbeck + dennis.hollenbeck&eigenexus.net +48982 + Navitel s.r.o. + Alexey Morozov + morozov&navitel.ru +48983 + Open Consent Group + Mark Lizar + info&openconsentgroup.com +48984 + Athene Lebensversicherung AG + Christian Pöll + christian.poell&athene.de +48985 + Netresec AB + Erik Hjelmvik + info&netresec.com +48986 + LLC "INVEST-M" + Kirill Nastasin + nk&invm.net +48987 + SkyDNS LLC + Dmitriy Vostretsov + mail&skydns.ru +48988 + Streamonix Ltd + DNS Manager + dns_manager&streamonix.com +48989 + Ture Johanssons Trävaru AB + Renata Sorockova + renata&tujo.se +48990 + Intesi Group S.p.A. + Francesco Barcellini + fbarcellini&intesigroup.com +48991 + Aequitas Software GmbH & Co. KG + Dr. Burkhard Mars + burkhard.mars&aequitas-software.de +48992 + Agricultural Industry Electronics Foundation e.V. + Marco Brück + marco.brueck&aef-online.org +48993 + MKB Webhoster Limited + Michel Dekker + michel&mkbwebhoster.com +48994 + AGELEC + Guillaume Guerin + guerin&agelec.fr +48995 + Lumeta Corporation + Lumeta Support + lumetait&lumeta.com +48996 + HOPE Cancer Center of East Texas + Fred L Ernest + flernest&hopecancertexas.com +48997 + Colorado Blood Cancer Institute + Peter Paturynski + Peter.Paturynski&HealthONEcares.com +48998 + Colorado Blood Cancer Institute + Peter Paturynski + Peter.Paturynski&HealthONEcares.com +48999 + MoraMedSoft + Enn Maripuu + enn.maripuu&telia.com +49000 + "Zheldorconsulting", Ltd. + Zakharov Marat + software&ocv.ru +49001 + KHADAMAT NOVIN DADEAVARZI SADAD + Mehdi Khademi + m.Khademi&sadadpsp.ir +49002 + ULTRA SA + Fabio Ferreira + sistemas&alarmasultra.com +49003 + Gen Re + Pratap Sahoo + Pratap.sahoo&genre.com +49004 + EGate Networks Inc. + Matthew Gamble + mgamble&egate.net +49005 + A&H Software House, Inc. + Sergey Gorbachev + sergey.gorbachev&luxriot.com +49006 + DHARA CONSULTING GROUP, INC + SASTRY DHARA + SASTRY&DHARA-IT.COM +49007 + YAMABUN ELECTRONICS CO., LTD. + Masaru Iguchi + yamabun&mxa.mesh.ne.jp +49008 + Derome Timber AB + Helena Andersson + helena.andersson&derome.se +49009 + THE PUBLIC AUTHORITY FOR CIVIL INFORMATION + Tareq Al-Rashed + tareqr&paci.gov.kw +49010 + Rayzon Technologies AG + Marc Danzeisen + marc.danzeisen&rayzon-technologies.com +49011 + Tanium Inc. + Justin Burke + jburke&tanium.com +49012 + Indasys + Dominik Appich + Dominik.appich&indasys.de +49013 + Choice Cancer Care + Joseph P. Gilio, PhD + j.gilio&choicecancercare.com +49014 + Stibo Complete AB + Palle Jacobsen + paja&stibo.com +49015 + HEXBITS TECNOLOGIA LTDA + Carlos Eduardo F. Magnani + carlos.magnani&hexbits.com.br +49016 + Build Informed GmbH + Peter Hirn + peter+iana&bim.ag +49017 + Yukoo Limited + Greg Newton-Ingham + greg&yukoo.co.uk +49018 + Assmann Télécom + Dubus Philippe + contact&groupe-assmann.fr +49019 + ExaScaler Inc. + Yuki Yamaura + yamaura&exascaler.co.jp +49020 + Exaphotons Co., ltd. + Liu Yan + liuyan1001&exaphotons.com +49021 + Saturn Imaging Inc. + Alma Su + almasu&saturnimage.com.tw +49022 + China Moblie(Suzhou) Software Technology Co,Ltd + Jiang Zhaohai + sdlyjzh&126.com +49023 + Universitätsbibliothek Johann Christian Senckenberg + Risse, Thomas + T.Risse&ub.uni-frankfurt.de +49024 + Deutsche Gesellschaft zum Bau und Betrieb von Endlagern fuer Abfallstoffe mbH + DBE Technical Support + info&dynexo.de +49025 + ISPAS AS + Anders Rosnes + ar&ispas.no +49026 + DEG - Deutsche Investitions- und Entwicklungsgesellschaft mbH + Pascal Hansmann + nwm°invest.de +49027 + Berner Trading Holding GmbH + Hans-Georg Schweikert + iana_pen&berner-group.com +49028 + Värmevärden AB + Gustaf Molin + gustaf.molin&varmevarden.se +49029 + Seven Solutions S.L + Rafael Rodriguez + sevensols&gmail.com +49030 + GST + Gregor Stever + pen&stever.de +49031 + Walhalla u. Praetoria Verlag GmbH & Co. KG + Hans Schwendner + schwendner.hans&walhalla.de +49032 + Telecom North America Inc. + Roch-Alexandre Nominé + roch&telna.com +49033 + Uptake Technologies, Inc. + Koushik Subramanian + koushik.subramanian&uptake.com +49034 + PROFEN ILETISIM + AHU ATAKAN + ahu.atakan&profen.com +49035 + Seattle Radiologists APC + Casey Allard + it&searad.com +49036 + LUSINI + Kilian Gumpp + security&lusini.com +49037 + BrByte + Luiz Fernando Souza Softov + softov&brbyte.com +49038 + Klinikum Dortmund + Jens Leuschner + jens.leuschner&klinikumdo.de +49039 + Teslonix Inc + Alaaedeen Aglan + aaglan&teslonix.com +49040 + Pharm-IT GmbH + Florian Schindler + cert&pharmit.de +49041 + Betc digital + florent Peyron + florent.peyron&ext.betc.com +49042 + Blue Pillar + Mike Rooks + mike.rooks&bluepillar.com +49043 + TeamViewer GmbH + Christoph Kohler + IT&TeamViewer.com +49044 + NewPace Communications Inc + Gavin Murphy + gavin.murphy&newpace.com +49045 + JoyMoe Interactive Entertainment Limited + Chino Chang + chino&joymoe.com +49046 + Faria Education Group Limited + Darrin Wortlehock + operations&fariaedu.com +49047 + Ville de Québec + Bruno Gagnon + bruno-b.gagnon&ville.quebec.qc.ca +49048 + AFONWeb + Alistair Fenning + pen.admin&afonweb.co.uk +49049 + Oster Consulting + CJ Oster + iana-pen&cjoster.com +49050 + PRINT SOLUTION + Naveen Sharma + print24solution&gmail.com +49051 + IKEM AD + Stefan Kirchev + stefan.kirchev&gmail.com +49052 + Trätåg AB + Olle Pettersson + mats.jannes&tieto.com +49053 + Cesare Ceneri + Cesare Ceneri + cesare.ceneri&gmail.com +49054 + Koenigin-Luise-Stiftung + Tobias Seifert + seifert&kls-berlin.de +49055 + AOIFES + Angel Suero + angel.suero&aoifes.com +49056 + 4SH FRANCE + Roux Nicolas + nicolas.roux&4sh.fr +49057 + invenio GmbH Engineering Services + Andreas Vogt + andreas.vogt&invenio.net +49058 + Glookast LLC + Felipe Santos + felipe.santos&glookast.com +49059 + Cronoburn + Eric Goerge + cronoburn&gmail.com +49060 + Martinsons Såg Aktiebolag + Anders Ringsell + anders.ringsell&martinsons.se +49061 + Agree Technoloy + Zhang Cong + zhangcong02&agree.com.cn +49062 + William Robinson + William Robinson + willrobi&shaw.ca +49063 + ALTYS TECHNOLOGIES + GELLY Julien + julien.gelly&altys-tech.net +49064 + Astyx GmbH Communication & Sensors + Roland Stigge + r.stigge&astyx.de +49065 + Gruppo Servizi Informatici s.r.l. + Nazzareno Sileno + nazzareno.sileno&grupposi.eu +49066 + Capesesp - Caixa de Assistencia e Previdencia + Marcos Menegazzo + marcos.menegazzo&capesesp.com.br +49067 + Beaufort - Jasper Water & Sewer Authority + Phillip McCright + phillipm&bjwsa.org +49068 + SGCA-THD + Richard Darrieutort + richard.darrieutort&treshautdebit.com +49069 + ICT Berufsbildungscenter AG + Kevin Wyer + kevin.wyer&bbcag.ch +49070 + TELEFONICA DE ESPAÑA SAU + Guillermo Gavilan Montenegro + guillermo.gavilanmontenegro&telefonica.com +49071 + D2slink Systems + Jose Vivero + j.vivero&d2slink.com +49072 + AIG Wilton + Jack McKinney + WBPServerAppSupport&aig.com +49073 + Cutler Group, LP + Dave Preston + dave.preston&cutlergrouplp.com +49074 + Blue Cross Blue Shield of North Dakota + Jeremy Boelter + jeremy.boelter&bcbsnd.com +49075 + Samlex America Inc + Michael Hamanishi + mike&samlexamerica.com +49076 + Itawamba Community College + Jason Guntharp + jwguntharp&iccms.edu +49077 + JGA Skog AB + Andreas Grugiel + ag&jga.se +49078 + Skånetimmer Bioenergi AB + Andreas Grugiel + ag&jga.se +49079 + Skånetimmer AB + Andreas Grugiel + ag&jga.se +49080 + Allianz pojistovna, a.s. + Stanislav Bednar + OID-admin&allianz.cz +49081 + Lógica Sistemas de Informação LTDA-ME + Daniel Passos Martins + mib&logicasistemas.com.br +49082 + Modulation Index, LLC + Greg Ogonowski + greg&indexcom.com +49083 + Avon Maitland District School Board + Dan Root + dan.root&ed.amdsb.ca +49084 + BrByte + Luiz Fernando Softov + softov&brbyte.com +49085 + DETEC + Nestor Martinez + nestor.martinez&detecmx.com +49086 + CORUSCANT INVESTMENT GROUP AS + Thomas Ryste + thomas&ryste.net +49087 + Waterside Consulting, Inc + Theodore S Kapela + IANA&WatersideConsulting.com +49088 + D.S. Group Ltd. + Yemets Andriy + cio&dsg.org.ua +49089 + ESKtrade UG (haftungsbeschraenkt) + Mathias Kowalkowski + mk&esktrade.com +49090 + My-MTC.NET + Thomas Lessel + t.lessel&gmx.de +49091 + Eastern Connecticut Hematology and Oncology + Anne Slam + aslam&echoct.com +49092 + One Call Away, Inc. + Christian Stuck + noc&onecallaway.biz +49093 + Xi'an HISU Multimedia Technology Co., Ltd.q + fu yinghu + fuyh&chinahisu.com +49094 + FASTWEL Group Co. Ltd. + Alexander Amelkin + iana&fastwel.ru +49095 + Swedish National Agency for Education + Tomas Lindhoff + tomas.lindhoff&skolverket.se +49096 + KPCS CZ + Jakub Urban + urban&kpcs.cz +49097 + EVTEC AG + Markus Kramis + evtec&evtec.ch +49098 + NP ZAO "REKO-VEK" + Andrey Mazilov + amazilov&reko-vek.ru +49099 + GBLabs Ltd. + Alex Richman + a.richman&gblabs.co.uk +49100 + ce - corporate education GmbH + Juan Luis Lopez Lopez + jlopez&ce.de +49101 + EBS BrokerTec + Gil Kaspi + gil.kaspi&ebs.com +49102 + INEL TEHNIK + Nikola Sapundjiev + nikola&inel.net.mk +49103 + AMD + Dmitry Sabaev + dsabaev&datamodel.ru +49104 + Invenia AS + Trygve Sanne Hardersen + dns-admin&invenia.no +49105 + tetrardus.net + Paul McMath + paul.mcmath&gmail.com +49106 + Hindog.com + Aaron Hiniker + hindog&gmail.com +49107 + Elecronic Applications, Inc. + Russ Churchill + russc&electronicapplications.com +49108 + Gustafsborgs Säteri AB + Mikael Jönsson + jnm&gustafsborg.se +49109 + Rörvik Skog + Linus Swanberg + linus.swanberg&rtimber.se +49110 + Millenium Internet Exchange + Keshwarsingh Nadan + kn&millenium.net.mu +49111 + Tigera, Inc. + Karthik Krishnan Ramasubramanian + tech-admin&tigera.io +49112 + Advasol + Markus Buchholz + markus.buchholz&advasol.de +49113 + ITooLabs + Alexey Naidyonov + alexey.naidyonov&itoolabs.com +49114 + simplicity wins + Raphael Rojas + pen&simplicity-wins.de +49115 + GreenAnt + Frank Giorlando + ldap&greenant.net +49116 + 天津蓝天科技股份有限公司 (Tianjin Blue Sky Technology Co., Ltd) + chengjun pan + panchj&zanpu.com +49117 + Micronet Communications Inc. + Albert Feng + albertµnet.com.tw +49118 + Utilex + Alexey Khrestolyubov + lex&utilex.ru +49119 + Setra Trävaror + Mårten Stjernberg + Marten.stjernberg&setragroup.com +49120 + pureLiFi Limited + Ron Schaeffer + ron.schaeffer&purelifi.com +49121 + Legislative Assembly of British Columbia + Graeme Brown + Graeme.Brown&leg.bc.ca +49122 + Antheus Telecom Ltd + Nik Middleton + nik.middleton&antheus.co.uk +49123 + Turbonomic, Inc. + Yuri Rabover + yuri.rabover&turbonomic.com +49124 + Hematology Oncology Associates Of Brooklyn + Tammi Ramos + Tramos&brooklynoncology.com +49125 + Xoriah Solutions Inc. + Wilson Fung + wilson&xoriah.com +49126 + Sudo Security Group, Inc + William Strafach + will.strafach&sudosecuritygroup.com +49127 + Musée Guérin + JACQUES CORNILY + cornily&gmail.com +49128 + PromKomplektServis + Michael + itpks&pks.su +49129 + finke Das Erlebnis-Einrichten GmbH & Co. KG + Michael Vogt + it-srv-net&finke.de +49130 + access fintech + steve fazio + steve&access-fintech.com +49131 + OWLCO + Alexey Sovenko + alexey.sovenko&owlco.ru +49132 + Globility Limited + Oliver Kemmis + okemmis&globility.co.uk +49133 + Stiegele Datensysteme GmbH + Felix Puchinger + felix.puchinger&stiegele.eu +49134 + BKS Systems, Inc. + Brian Strelioff + BKStrelioff&Hotmail.com +49135 + Carbon Soft Ltd + Osintsev Sergey + sergey&carbonsoft.ru +49136 + ALGcom + Lissandro Gerhardt + lissandro&algcom.com.br +49137 + Ljusnans Virkesfrakt AB + Lars Svedåker + lvfrakt&lvfrakt.se +49138 + bjut, Inc. + qian yong + qianyong&mail.taiji.com.cn +49139 + First State Investments (UK) Limited + Tom Lloyd + PKIAdministrators&firststate.co.uk +49140 + exands + wei zhang + zw&exands.com +49141 + VAG-Armaturen GmbH + Ibanez Jose-Manuel + it&vag-group.com +49142 + Gemalto AG Switzerland + Michael Lehmann + michael.lehmann&gemalto.com +49143 + cryptosource GmbH + Dr. Falko Strenzke + fstrenzke&cryptosource.de +49144 + AB Högland Såg & Hyvleri + Ulrika Rehn + ulrika&hoglandssagen.se +49145 + IHK Nürnberg für Mittelfranken + Mirko Hoffmann + admins&nuernberg.ihk.de +49146 + bpost banque SA - bpost bank NV + Sacha Heinen + ictsecurity&bpostbank.be +49147 + Bexen Cardio + Estefania Gutierrez + egutierrez&bexencardio.com +49148 + Civis Grand Casino Kft. + Szendrei Attila + online&grandcasino.hu +49149 + Deutsche Gesellschaft zum Bau und Betrieb von Endlagern fuer Abfallstoffe mbH (DBE) + IT-Service + it-service&dbe.de +49150 + Vertiv Co + John Bogdan + john.bogdan&vertivco.com +49151 + California Credit Union + Charles Lacuesta + charlesl&californiacu.org +49152 + BlueArchive + Jeff Flowers + jflowers&bluearchive.com +49153 + Stratos + Wassilios Stratos + wst&stratox.de +49154 + Nordic Automation Systems + Viljo Veesaar + viljo&nasys.no +49155 + jusst technologies GmbH + Julian Scheel + jscheel&jusst.de +49156 + Canham + Paul Canham + paul&canham.eu +49157 + Sylpheo + Mathias Bazire + mathias.bazire&sylpheo.com +49158 + Cancer Center of Southern Califonria/Sarcoma Oncology Research Center + Victoria Chua + vchua&sarcomaoncology.com +49159 + Milai Digital + Keisuke Watanabe + k-wata&mail.milai-digital.com +49160 + Forssjö Pellets AB + Tina Gyllengahm + tina.g&sandasa.se +49161 + CEIA S.p.A. + Mr. Riccardo Bruni + rbruni&ceia-spa.com +49162 + Team GeoLook + Usanov Denis + team&geolook.me +49163 + NES Co.,Ltd. + YOSHIHIRO OHURA + ooura&neskk.co.jp +49164 + Japan Novel Corporation + Mari Miyata + mari.miyata&jnovel.co.jp +49165 + JUNI Software SAS + Julien Blitte + julien.blitte&juni-soft.fr +49166 + Kirale Technologies S.L. + Manuel Amutio + mamutio&kirale.com +49167 + Andreas Terpotiz + Andreas Terpotiz + andreas&terpotiz.net +49168 + Clear Guide Medical Inc. + Philipp Stolka + admin&clearguidemedical.com +49169 + Zhejiang Chuangyi Optoelectronics CO.,LTD. + Zhang Baoyin + byzhang&cykj-tech.com +49170 + Dhaawat Web Services + Suchitra Reddy + suchi.reddy1196&gmail.com +49171 + UCS Technology Services + Edward Hinchcliffe + InetAdmin&ucsts.com +49172 + Eurowings Aviation GmbH + Matthias Bolte + appwin&eurowings.com +49173 + PTV Group + Klaus Fronius + it-services&ptvgroup.com +49174 + Bogadi + Marc Bogadi + marc&bogadi.de +49175 + Galapagos Linux Foundation + Elizabeth Myers + elizabeth&glpgs.io +49176 + CCMI + IT Administrator + itadm&ccorpusa.com +49177 + Capital Vision Services, LLC + Steven R. Hensley + shensley&myeyedr.com +49178 + End 2 End Technologies, LLC + Greg Crow + info&e2etechinc.com +49179 + xmachina GmbH + Klaus Mueller + orders&xmachina.de +49180 + hoge.se + Kenichi Harada + postmaster&hoge.se +49181 + MAGFest + Joe Cooter + joe.cooter&magfest.org +49182 + Flughafen Köln/Bonn GmbH + René Koch + Rene.Koch&koeln-bonn-airport.de +49183 + Codeacious Pty Ltd + Glenn Schmidt + glenn&codeacious.com +49184 + IT PRO Consulting and Training + Adam Jakubiec + adam.jakubiec&itproszkolenia.pl +49185 + Quasar EDV-Informationsges. mbH + Raymond Tuleweit + raymond.tuleweit&quasar-edv.de +49186 + Cinedom Kinobetriebe GmbH + Raymond Tuleweit + raymond.tuleweit&quasar-edv.de +49187 + Wausau Supply Co + System Admin + sysadmin&wausausupply.com +49188 + O3cv + Ricardo Vieira + contato&o3cv.com.br +49189 + LDA Technologies + Vahan Sardaryan + info&ldatech.com +49190 + Mothic Technologies LLC + Brandon Martin + administrators&mothictech.com +49191 + LCA Systems + Lucas Coelho de Almeida + atendimento.lca.systems&gmail.com +49192 + CM IT Certification Authority + Corrado Mulas + ca&casamulas.it +49193 + Archivio Digitale - Casa Mulas + Giovanni Mulas + archivio&casamulas.ml +49194 + multipurpose center of Irkutsk region + Sergey Starovoytov + s.starovoytov&mfc38.ru +49195 + Deutsche Messe + Thomas Howind + pki-iana&messe.de +49196 + First Solutions Sistemas de Informação S.A. + Tiago Oliveira + tiago.oliveira&first-global.com +49197 + Orange System Group, CJSC + Paul Vavilov + pv&orangesystem.ru +49198 + Domination + ALEKSEY BAYDAROV + 1&networkvideo.ru +49199 + WONDERS INFORMATION CO., LTD + zhimin Li + lizhimin&wondersgroup.com +49200 + Vasamed, Inc + James Parr + jparr&vasamed.com +49201 + ANF Autoridad de Certificación, S.L. + Florencio Díaz + fdiaz&anf.es +49202 + S.C. Mindsentry S.R.L. + Alexandru Culea + alexandru.culea&mindsentry.com +49203 + Dirk Gerbig, EDV & IT-Dienstleistungen + Dirk Gerbig + pen-iana&gerbig.info +49204 + Foehammer Games LLC + John Edmiston + john.edmiston&foehammergames.com +49205 + CERMOB TECNOLOGIA LTDA + Jose Carlos da Silva Neto + j.neto&cermob.com.br +49206 + City of Tempe + Scott Campbell + security_admin&tempe.gov +49207 + arizvi + Ali Rizvi + ali&arizvi.ca +49208 + Jackie's Wholesale Nurseries Pty Ltd + Dan Irwin + dan&jackies.com.au +49209 + easynetworks + Ge jin + gj&easynetworks.com.cn +49210 + CR0BAR Limited + Anthony de Broise + anthony&cr0bar.co.uk +49211 + DATAGROUP Operate IT GmbH (formerly 'HanseCom Gesellschaft für Informations- und Kommunikationsdienstleistungen mbH') + Clemens Christensen + clemens.christensen&operate-it.com +49212 + Oleg Dubovskoy + Oleg Dubovskoy + odubovskoy&outlook.com +49213 + Nextcloud + Arthur Schiwon + arthur&nextcloud.com +49214 + Monzo Bank Limited + Oliver Beattie + oliver&monzo.com +49215 + Emma, Inc. + Marc Powell + systems&myemma.com +49216 + Corigine, Inc. + Wing Lee Sing + wing.sing&corigine.com +49217 + Hypherion + Eitan Levy + ethanlevy97&protonmail.com +49218 + Nelkinda Software Craft Private Limited + Siddhesh Nikude + Siddhesh.Nikude&nelkinda.com +49219 + LANI GmbH & Co. KG + Sven Lanzke + administrator&herberge-harz.de +49220 + Tiger Management L.L.C. + Tommie Porter + tommie&tigerfund.com +49221 + IACPublishing + Dan Constatino + operations&iacpublishinglabs.com +49222 + KOMDAT + Ronald Kopecky + kopecky&komdat.at +49223 + ZOOM SERVER + wei hui + weih&zoomserver.cn +49224 + PikTime Systems Sp. z o.o. + Robert Urbaniak + piktime&piktime.com +49225 + 5micron GmbH + Sebastian Werner + oid&5micron.de +49226 + Patriot's, LLC + Jacob Ives + jacob.ives81&gmail.com +49227 + GGR Bryansk + Teleliaev Evgeny + asu&gro32.ru +49228 + The unbelievable Machine Company GmbH + David Obando + david.obando&unbelievable-machine.com +49229 + Bergkvist Siljan Skog AB (formerly 'Siljan Skog AB') + Kent Matsuhashi + it&bergkvistsiljan.com +49230 + Enginuity Communications, Inc. + Mark Orton + morton&enginuitycom.com +49231 + Guest-tek Interactive Entertainment Ltd + Christian Carrier + christian.carrier&guest-tek.com +49232 + ChemAxon Kft + Sandor Juhasz + it&chemaxon.com +49233 + Liberty Bank + Dan LaLone + dlalone&liberty-bank.com +49234 + Wyatt Miler + Wyatt Miler + wmiler1&gmail.com +49235 + ninIRC Network + Michael Sauer + michael&ninirc.ga +49236 + appway + Ge Jin + gj&easynetworks.com.cn +49237 + GAU MFC IO + Sergey Starovoytov + info&mfc38.ru +49238 + VAV UNION Kft. + Tóth Tamás + totht&vavunion.hu +49239 + MIBESIS D.O.O. + Miroslav Beranič + info&mibesis.si +49240 + AK-Funktechnik GmbH + Werner Bader + werner.bader&ak-funktechnik.de +49241 + uTech Tecnologia + Jeferson A. Cassol + cassol&utech.com.br +49242 + Steuerbüro Pfeiffer + Mathias Pfeiffer + mathias.pfeiffer&pfhq.org +49243 + Dailymotion + Alan Martins + admins&dailymotion.com +49244 + Söderenergi AB + Sylwe Wedholm + sylwe.wedholm&soderenergi.se +49245 + The University of Kansas Health System + James Beeson + jbeeson&kumc.edu +49246 + Initium Novum LLC + Joshua Marvel + joshuam&initiumnovumllc.com +49247 + Genesis Cancer Center + Laura Sellers + lsellers&genesiscancercenter.com +49248 + Leipziger Versorgungs- und Verkehrsgesellschaft mbH + Anke Peter + anke.peter&l.de +49249 + GBSD Technologies, Inc. (GBSDTech) + Michael Powers + mike.powers&gbsdtech.com +49250 + Robiquity Limited + Anthony de Broise + adebroise&robiquity.com +49251 + Blue Grass Airport + David Roux + helpdesk&bluegrassairport.com +49252 + Cincinnati Bell + Dan Trees + dan.trees&cinbell.com +49253 + Timrå Kommun + Robin Nylander + robin.nylander&timra.se +49254 + Factory Systemes + Fabien Serre + fabien.serre&factorysystemes.fr +49255 + econ solutions GmbH + Christoph Rust + christoph.rust&econ-solutions.de +49256 + Weatherford International + Lawrence Baumle + larry.baumle&weatherford.com +49257 + ECSC Group plc + Adam Shore + adam.shore&ecsc.co.uk +49258 + DR-ARP + Mark Matties + mmatties&gmail.com +49259 + NINTEGRATE, INC. + Hakan Berg + hakan.berg&nintegrate.com +49260 + Bims Laboratories + Harry Bims + harrybims&me.com +49261 + Contact Wave Limited + Meena Anton + manton&contactwave.co.uk +49262 + Scientific RT GmbH + Markus Alber + markus&scientific-rt.com +49263 + Connect and Exchange + Martin Meuwese + m.meuwese&cande.nl +49264 + Benelec Pty Ltd + Grant Pinkney + service&benelec.com.au +49265 + Kivu GmbH + DI Jan van Oort + jan&kivu.tech +49266 + Ashenden Capital + Richard Bibb + richard.bibb&ashendencapital.com +49267 + Government of Western Australia, Department of Finance + Nikhil Mugi + ICT-ServerTeam&finance.wa.gov.au +49268 + Profamilia Bundesverband + Thomas Elsaesser + thomas.elsaesser&animate.de +49269 + Franck d.d. + Alen Santic + alen.santic&franck.hr +49270 + Locata Corporation Pty. Ltd. + Tony Griffiths + engineering&locata.com +49271 + Wireless Technology, Inc. + Chris Richardson + chris&gotowti.com +49272 + AREA S.p.A. + SNMP customer support + snmp&area.it +49273 + BIZMEDTECH + CEDRIC DAIMO + it&bizmedtech.com +49274 + Belgian Mobile ID + Wim Coulier + wim.coulier&sixdots.be +49275 + Royal Society of Chemistry + Andrew Preston + technology&rsc.org +49276 + Integramed Fertility + Kelsey Smith + kelsey.smith&integramed.com +49277 + Stafford County Public Schools + Barton Hunter Matheson III + mathesonbh&staffordschools.net +49278 + Ayasdi, Inc. + Scott Mikusko + ops&ayasdi.com +49279 + Tesla + Edouard Lafargue + elafargue&tesla.com +49280 + Trapper + Alejandro Del Cid + delcid.alejandro.z&gmail.com +49281 + Plexonics Technologies Pvt. Ltd. + Sumit Agarwal + sumit.agarwal&plexonics.com +49282 + medimaps + pascal vergnaud + pvergnaud&medimapsgroup.com +49283 + Credit Union of Colorado + Kevin Gammon + iana&cuofco.org +49284 + LankaClear (Pvt) Ltd. + Duleep Liyanage + duleep.liyanage&lankaclear.com +49285 + Lightbend Inc + Christopher Hunt + admin&typesafe.com +49286 + IMOTIONS + Imotions Support + support&videobutler.nl +49287 + Millson Custom Solutions + Jay Gibson + it&millson.net +49288 + Bitoptimum + Dragos Ilie + dragos&bitoptimum.com +49289 + Praniskom Solutions Private Limited + Devang Tanna + sales&praniskom.com +49290 + consultec sas di Sartor P. & c. + Piero Sartor + pierosartor&tiscali.it +49291 + Access Denied bvba + Johan Loos + iana&accessdenied.be +49292 + Biblioteca - Casa Mulas + Giovanni Mulas + biblioteca&casamulas.it +49293 + Nanjing Juming Network technology Co.,Ltd + tangjun + tangjun&juminfo.com +49294 + Hargs Bruk AB + Lotta Wallin + lotta.wallin&hargsbruk.se +49295 + Direction du numerique UPPA + DAUBA Christophe + christophe.dauba&univ-pau.fr +49296 + Fraunhofer MEVIS + Ruben Stein + ruben.stein&mevis.fraunhofer.de +49297 + Avicenna Health Group Inc + Mohammadreza Hemmati + hemmati&avicennapp.com +49298 + The Port Authority of NY & NJ + Steven Choy + schoy&panynj.gov +49299 + Truecom Telesoft Private Limited + Chirayu Patel + sales&truecomtelesoft.com +49300 + The New Tricks + Taylor Trimble + taylor&thenewtricks.com +49301 + BCS Information Systems Pte Ltd + Choo Tian Fook + chootianfook&bcsis.com +49302 + Dart Container + Michael Watters + michael.watters&dart.biz +49303 + Flexmedia Ind. e Com. de Tecnologia Ltda. + Gustavo Yamasaki Martins Vieira + gvieira&flexmedia.com.br +49304 + Unassigned + Returned 2024-03-26 + ---none--- +49305 + Neural Technologies + Adrian C Harris + adrian.harris&neuralt.com +49306 + AUM Cardiovascular Inc + Marie Johnson + info&aumcardiovascular.com +49307 + Secturion Systems Inc. + Richard Jorgensen + rjorgensen§urion.com +49308 + Airwave Brasil Tecnologia + Younes Maia + ymaia&abt.tv.br +49309 + VIDA AB + Magnus Linner + magnus.linner&vida.se +49310 + CELLULAR GmbH + Daniel Storozhev + dstorozhev&cellular.de +49311 + Asteas Technologies GmbH & Co KG + SNMP MiB Management + office&asteas.com +49312 + MichaelSchneiderKoeln + Michael Schneider + schneider.michael.cgn&web.de +49313 + Isaac Johnson Organization + Isaac Johnson + isaacmjohnson&gmail.com +49314 + ADFweb.com + Piero Sartor + p.sartor&adfweb.com +49315 + Sealite PTY LTD + Jens Ohle + j.ohle&sealite.com +49316 + SixGor + Zanz Abteilung + ZanzAbteilung&protonmail.ch +49317 + ICT Star Group Myanmar Co., Ltd. + San Lin Naing + sanlin&isgm2.com +49318 + Desert Water Agency + Mohammed S Abbazih + isguys&dwa.org +49319 + Credit Bank of Moscow PJSC + Andrey Lopatin + crt&mkb.ru +49320 + Wallnäs AB + Carl johan Örmander + carl-johan.ormander&wallnas.se +49321 + Vevy Europe S.p.A. + Raffaele Rialdi + atd&vevy.com +49322 + Stichting Haaglanden Medisch Centrum + Michel Vissers + tib&haaglandenmc.nl +49323 + Dynamic Consulting International Telecommunications Spain, S. L. + Seila Garcia + admdci&dci.es +49324 + mobex communication GmbH + Markus Urban + m.urban&mobex.de +49325 + EVER SMART TECHNOLOGIES SL + Jesus Martínez + jesus&eversmart.io +49326 + Clyde Bergemann Power Group International Ltd + Stephan Bovet + stephan.bovet&de.cbpg.com +49327 + Frivillighetshuset + Karl Fredrik Haugland + kf&frivillighetshuset.no +49328 + Praim srl + Andres Kaswalder + sysadmin&praim.com +49329 + SysEleven GmbH + SysEleven GmbH Security + security&syseleven.de +49330 + A.B.G Systems Ltd Israel + Yuval Ofer + yuval&abgsystems.com +49331 + Johannishus godsflrvaltning AB + Magnus Roth + magnus.rooth&johannishus.com +49332 + AppWorks + Juliana Lin + appworksales&gmail.com +49333 + Brighthouse Financial Inc. + Stefan Kelber + skelber&metlife.com +49334 + Axon-Ar + Diego Bellver + diego.bellver&gmail.com +49335 + ROTOK + Robert Klein + rtwk72&yahoo.com +49336 + DHWS + Dennis Schreiber + iana&dhws.eu +49337 + KommunalBIT AöR + Team Systeme und Netze + systemeundnetze&kommunalbit.de +49338 + ASTEN + Philippe LEMAZURIER + philippe.lemazurier&groupe-asten.fr +49339 + GAYA + Julien Liebert + tech&gaya.fr +49340 + Beijing Fusion Co., Ltd + Gao, Bo + gaobo&senlime.com +49341 + Ayuntamiento de Cartagena + Alejandro Delgado-Gomez + alejandro.delgado&ayto-cartagena.es +49342 + Unser Heimatbäcker GmbH + Mathias Krämer + m.kraemer&unserheimatbaecker.de +49343 + HaloDoc.com + Pandu POLUAN + pandu.poluan&halodoc.com +49344 + IX Layers, Inc. + KIKUCHI Yutaka + admin&ix-layers.com +49345 + KVaibhav Personal CA + Vaibhav Kurde + sslca&kvaibhav.in +49346 + Samvardhana Motherson Innovative Autosystems B.V. & Co. KG + Juergen Baule + juergen.baule&smia-automotive.com +49347 + Healthjump + Clifford Cavanaugh + oid&healthjump.com +49348 + Yapku Limited + Darren Breeze + darren&yapku.com +49349 + BCDVideo + tom larson + tlarson&bcdvideo.com +49350 + VGCIT, Inc. + Vartan Chukhadarian + vartanc&vgcit.com +49351 + STIVCX + Ivan Athanazio + ivan.athanazio&gmail.com +49352 + Hines Network + Jason Hines + jason&hinesnetwork.com +49353 + Certificate Authority for Presidential Office of Information and Communication Technologies of the Dominican Republic + BENITO GALAN + BGALAN&VIAFIRMA.COM +49354 + ARGYROU MINAS + ARGYROU MINAS + minasargyrou&outlook.com +49355 + Fiskarhedens Trävaru AB + Ulf Gustavsson + ulf.gustavsson&fiskarheden.se +49356 + DQE Communications + Mike Kelly + mkelly&dqe.com +49357 + Hoptroff London Limited + Lasse Johnsen + admin&hoptroff.com +49358 + Tri-State Digital Services + Jim Wickenhiser + jim&tristatetheatre.com +49359 + EnterSträta Systems + Isaac Johnson + founder&enterstrata.com +49360 + Cord3 Innovations + Dennis Momy + dennis.momy&cord3inc.com +49361 + Pridok AS + Dag Christensen + dag&pridok.no +49362 + tabaracci.org + Al Tabaracci + oidadmin&tabaracci.org +49363 + Unior Hungary Ltd. + Balazs Tihanyi + balazs.tihanyi&unior.hu +49364 + ALFA SATCOM SYSTEMS LLC + Andrey Zinchenko + az&alfasatcom.ru +49365 + Phoenix Online Studios + Richard Flores + webmaster&postudios.com +49366 + visol digitale Dienstleistungen GmbH + Jonas Renggli + jonas.renggli&visol.ch +49367 + inducta++ + Jerko sladoljev + jerko.sladoljev&inducta.hr +49368 + Airbus Safran Launchers + Thomas MEUNIER + thomas.meunier&airbusafran-launchers.com +49369 + Neopath Integrated Systems ltda + Leonardo Alvim Muricy + leonardo&neopath.com.br +49370 + VTU Energy + Miguel Prokop + miguel.prokop&vtu.com +49371 + VIA Rail Canada Inc. + Francois Du Perron + IT_Security&viarail.ca +49372 + Apmetrix, Inc. + Mark Caldwell + armin&apmetrix.com +49373 + Meissner AG (formerly 'Daniel Schmitz') + IT Services + it&meissner.eu +49374 + BAITIC SOLUCIONES + Luis M. Roman Sanchez + contacto&baitic.com +49375 + New York City Housing Authority + Jian Hua Chen + jianhua.chen&nycha.nyc.gov +49376 + IPKids + James Lee + support&ipkids.com +49377 + 556075-2825 + Martina Lindman + martina&nydalatra.se +49378 + People Come First Informatikai Szakertok Egyesulete + Ramona Berki + admin&pcf.hu +49379 + RGB Spectrum + Lynton Auld + lauld&rgb.com +49380 + Fastest Dog Services, Inc. + Aury G. Friedman + afriedman&fastestdog.com +49381 + University of Wollongong + OID Administrator + oid-admin&uow.edu.au +49382 + SAIC + Administrator + j.s.goldberg&saic.com +49383 + Riverscape Software + Brian Mottershead + development&riverscape.info +49384 + InfoTest@KinLong + Palvin Zhu + palvin&163.com +49385 + effiPilot + DUBUS Cyril + cyril.dubus&effipilot.com +49386 + Agentil Software + Ronan BARIOU + contact&agentil-software.com +49387 + Empresas Publicas de Medellin E.S.P + Edwin Alberto Montoya Saldarriaga + edwin.montoya&epm.com.co +49388 + Nanjing Jiuyao Networks Technology Co.,Ltd + Rudy Sun + rudynj&sina.com +49389 + Weeden & Co. LP + Weeden Admins + oids&weedenco.com +49390 + Nebbiolo Technologies + Thiru Narayanan + thiru&nebbiolotech.com +49391 + PCs Plus + Todd Cowman + todd&pcsplus.biz +49392 + Think Clever + Raita Ionut Cristian + craita&think-clever.com +49393 + Neetra Srl + Domenico Ruggiero + d.ruggiero&neetra.com +49394 + VidaIdentity + Ivan Brightly + webmaster&vidaidentity.com +49395 + WestTel International + Robin Erkkila + robin.erkkila&westtel.com +49396 + McKean Defense + Paul Perusse + pperusse&mckean-defense.com +49397 + Wizards of Industry B.V. + Cochise Ruhulessin + cochise.ruhulessin&wizardsofindustry.net +49398 + NPCore lnc. + support email + support&npcore.com +49399 + Process-Informatik Entwicklungsgesellschaft mbH + Werner Sonnentag + sonnentag&process-informatik.de +49400 + Borås Energi och MIljö AB + Farid Dedeic + farid.dedeic&borasem.se +49401 + JSC Navigation-Information Systems + Evgeniy Bolshakov + bolshakovev&nis-glonass.ru +49402 + Skogsbrukarna Ek Förening + Kjell-Åke Åslund + kjell-ake.aslund&moelven.se +49403 + ZaPF e.V. + Technisches Organisationsgremium der Physikfachschaften + topf&zapf.in +49404 + shenzhen qibo network co.,ltd + quanwei liu + lqw&vpnsoft.net +49405 + HIGH CONNEXION + Omar DAROUICHE + o.darouiche&highconnexion.com +49406 + Concordia University + Tim Wier + cnet.noc&cuchicago.edu +49407 + Skylark Wireless LLC + Ryan Guerra + ryan&skylarkwireless.com +49408 + ZestFinance + Kuba Tyszko + devops&zestfinance.com +49409 + miniOrange Inc + Gaurav Sood + gaurav&miniorange.com +49410 + Objective:pi LLC + Network Administration + hostmaster&objectivepi.com +49411 + Mindleap + Dan Rades + dan.rades&mindleap.io +49412 + Guenzel IT + Andreas Günzel + a.guenzel&guenzel-it.de +49413 + Telmec Soc. Coop. a r.l. + Daniele Donnini + daniele.donnini&telmec.net +49414 + Windstack IVS + Giuseppe Aceto + giuseppe&windstack.io +49415 + Business Services Organisation + Michael Harnett + ictsecuritymanager&hscni.net +49416 + upSource GmbH + Daniel Tretter + dtr&upsource.de +49417 + Emsyscon Solutions + Luc Vansteenkiste + luc.vansteenkiste&emsyscon.com +49418 + Innovation Place + Eric Chalifoux + echalifoux&innovationplace.com +49419 + Barefoot Networks, Inc. + Prem Jonnalagadda + prem&barefootnetworks.com +49420 + Baylor Scott & White Health + Robson Strange + robson.strange&bswhealth.org +49421 + DMIB Inc. + Alexandre Gravel + agravel&dmib.com +49422 + SeQnet + Pawel Kunecki + seqnet&seqnet.pl +49423 + The Pingry School + Apu + apu&pingry.k12.nj.us +49424 + GDT Advanced Solutions + Ben McClure + ben.mcclure&gdtadvancedsolutions.com +49425 + Foolean.org + Bennett Samowich + support&foolean.org +49426 + Cnergee Technologies Pvt. Ltd. + Suvarna Kulkarni + suvarnak&cnergee.com +49427 + Shopping 24 Gesellschaft für multimediale Dienstleistungen mbH + Dennis Kallerhoff + domreg&s24.com +49428 + Mobeewave + Raphael Hudon-Voyer + rhv&mobeewave.com +49429 + MRSG Advisors + Manvir Randhawa + manvirr&yahoo.com +49430 + Treatment.com + Seonho Kim + seonho.kim&outlook.com +49431 + UNION PAPELERA MERCHANTING, S.L. + DAVID RIBALTA + dribalta&unionpapelera.es +49432 + RAMSAY Générale de Santé + Christophe MAIRA + C.MAIRA&ramsaygds.fr +49433 + Path + Dean Chandler + dchandler&path.org +49434 + cusy GmbH + Peter Hormanns + peter.hormanns&cusy.io +49435 + Fundamentia Business Consulting SL + Francisco Morales + francisco.morales&fundamentia.com +49436 + Joe Gedeon + Joe Gedeon + joe.gedeon&gmail.com +49437 + Prodemge + Elmar Lacerda + elmar&prodemge.gov.br +49438 + TalkTalk Technology + Paul Chattaway + pchattaway&talktalkplc.com +49439 + Altitude - Phenix/Projet3 + GUITARD Ancelin + ancelin.guitard&altitude.fr +49440 + Vasgard GmbH + Gregor Best + support&vasgard.com +49441 + OTLIS - Operadores de Transportes da Região de Lisboa, A.C.E. + Alexandre Domingues + geral&otlis.pt +49442 + IOOOTA Srl + Roberto Pierpaoli + roberto&iooota.com +49443 + European Court of Auditors + Rodrigo Vicente MANZANAL RUIZ + eca-dit-netsec&eca.europa.eu +49444 + Delypse + Stéphane Enjalbert + stephane.enjalbert&delypse.com +49445 + Emeritus Solutions Ltd + Martin Bishop + webhead&emeritus-solutions.com +49446 + Gordion AB + Måns Gotare + mans.gotare&gordion.se +49447 + ITAhM + eui-jin jeon + itahm2014&gmail.com +49448 + Sprylogic Technologies Ltd. + Mukhtar Khan + mukhtar.khan&sprylogic.com +49449 + International Telecom Assistance, sa + AGUIE AGUIE GHISLAIN ARNAUD + aguie&ita-ci.com +49450 + DGAC SNA-RP + HARIB + brahim&harib.fr +49451 + Molina Healthcare Inc + Darrell Wright + darrell.wright&molinahealthcare.com +49452 + Dmitriy Merkushov + Dmitriy Merkushov + d.merkushov&gmail.com +49453 + Crab Cove Limited + Steven Griffiths + servernet1997&hotmail.com +49454 + Lobster Bay Limited + Steven Griffiths + servernet1997&hotmail.com +49455 + Oncology and Hematology Associates of South Texas, P.A. + Veronica Procasky + vprocasky&aol.com +49456 + Rottneros AB + Pasi Toivanen + val.support&rottneros.com +49457 + CM IT Services + Corrado Mulas + it-services&corradomulas.ml +49458 + Selective Insurance Company of America + Robert England + robert.england&selective.com +49459 + Baker, Donelson, Bearman, Caldwell & Berkowitz, PC + Jason Kopacko + jkopacko&bakerdonelson.com +49460 + Priora AG + Rüetschi Werner + werner.rueetschi&priora.ch +49461 + Xeen.UK LTD. + Alex Wright + alex&xeen.uk +49462 + Crinkleit + Scott Crilly + scottcrilly&crinkleit.com +49463 + Severn Trent Services + Scott Crilly + scott.crilly&stservices.co.uk +49464 + Darktide + Jenna Matthews + jrm&darktide.net +49465 + Yuki Enterprises + Jye Karl-Perry + snowleopard&amused.com.au +49466 + Helse Midt-Norge IT + Tore Tetliaune + tore.tetliaune&hemit.no +49467 + LOGISMAN ARAGÓN, S.L. + JAVIER LÓPEZ LORDA + jlopez&logisman.es +49468 + PrJSC "MTS UKRAINE" + Hablo Vladimir + vhablo&vodafone.ua +49469 + WIT + TangQianfeng + tqf&czwit.com +49470 + Community Life GmbH + Joerg Aulich + jaulich&communitylife.de +49471 + bayernets GmbH + Reinhard Melzer + reinhard.melzer&bayernets.de +49472 + Photo-Sonics, Inc + Cecil Rudenberg + crudenberg&photosonics.com +49473 + Weydstone LLC + Kai Johnson + info&suttonabinger.com +49474 + CKUA Radio Network + Ryan Breitkreitz + rbreitkreitz&ckua.com +49475 + suckleast.org Hosting Collective + Stefan Lücke + iana-pen&glaxx.net +49476 + Session Control Oregon LLC + Gerald Lester + glester&sessioncontrol.com +49477 + Vault Investing + Brent Jones + brent&getvault.com +49478 + funatic b.v. + William Janssen + william.janssen&funatic.nl +49479 + Damian Murphy Private LDAP Development + Damian Murphy + murff&warlock.org +49480 + Williams-Sonoma, Inc. (WSI) + Chris King + CKing3&wsgc.com +49481 + Vonamic GmbH + Kevin Konrad + snmp&vonamic.com +49482 + OEDIV Oetker Daten- und Informationsverarbeitung KG + Sebastian Kerssen + sebastian.kerssen&oediv.de +49483 + NYS Unified Court System + Brandon Koch + bkoch&nycourts.gov +49484 + Foundation National intellectual development + Alexander Scherbachev + als&cnir.ru +49485 + Behr Paint + Darrin Frazer + dfrazer&behr.com +49486 + Activx Biosciences + Paul Chen + paulc&activx.com +49487 + NTT DATA, Inc. + Vartan Chukhadarian + Vartan.Chukhadarian&nttdata.com +49488 + Vinton Steel LLC + Rene Salayandia + renefurcio&hotmail.com +49489 + Winmate INC. + Bill Lin + bill_lin&winmate.com.tw +49490 + Compello + Efe Erdogru + efe.erdogru&compello.com.tr +49491 + Epec Oy + Mr. Kari Ahvenlampi + kari.ahvenlampi&epec.fi +49492 + Danphone A/S + Jesper Henriksen + jbh&danphone.com +49493 + Türkiye Cumhuriyeti İçişleri Bakanlığı İller İdaresi Genel Müdürlüğü + Murat Tamer Ataol + m.tamer.ataol&icisleri.gov.tr +49494 + TSI Solutions + David Grace + dgrace&tsisolutions.us +49495 + Textron Systems - Electronic Systems + Chris Engler + engler&textronsystems.com +49496 + BA-POWER ELECTRONICS INC. + Peter Lee + bapower&ms39.hinet.net +49497 + Immucor Inc + Joe DeNicola + jdenicola&immucor.com +49498 + engelbert strauss GmbH & Co. KG + Matthias Fischer + oid-admin&engelbert-strauss.de +49499 + VCAT Consulting GmbH + Patrick Schwalger + admin&vcat.de +49500 + CODIUM Company Limited + Htet Naing Aung + htet_naing.a&codium.co +49501 + UGO3D INC. + Peter Yoo + support&ugo3d.com +49502 + NRPL Aero Oy + Alexey Dudkov + alexey.dudkov&nrpl.aero +49503 + North Atlantic Industries, Inc. + Paul Reid + preid&naii.com +49504 + De Dietrich SAS + Pascal Muckensturm + Pascal.Muckensturm&DeDietrich.com +49505 + Payboost + Eric Machabert + eric.machabert&payboost.com +49506 + GE Transportation + Ryan Hoffman + ryanmarcus.hoffman&ge.com +49507 + Air Canada + Mohamad Mneimneh + mohamad.mneimneh&aircanada.ca +49508 + FidusCrypt GmbH + Andrey Ludwig + a.ludwig&fiduscrypt.com +49509 + Frank Engler + Frank Engler + oid&engleer.de +49510 + Pragmatik + Geoffrey JARASSIER + gjarassier&pragmatik.fr +49511 + Arcom Digital, LLC. + Gregory A. Tresness + tresness.greg&arcomlabs.com +49512 + ipHouse + Doug McIntyre + merlyn&iphouse.net +49513 + AgResearch Ltd + Alastair Sheppard + alastair.sheppard&agresearch.co.nz +49514 + EPK Solutions + Onnig Kouyoumdjian + onnig&epk-solutions.com +49515 + SQLTreeo + Danny Riebeek + d.riebeek&databaseonline.nl +49516 + Jesse Friedman + Jesse Friedman + jesse&jesse.ws +49517 + Implaneo Dental Clinic Regensburg + Christian Fohmann + edv&implaneo.com +49518 + turbosoft + John Chen + tbsoftinc&gmail.com +49519 + PremiumSoft CyberTech Limited + Lin Wa Wing + adp&navicat.com +49520 + Kauer GmbH + Christoph Molenda + christoph.molenda&kauer.de +49521 + George Hogan Sound Ltd + George Hogan + george&georgehogan.co.uk +49522 + SPIDYA Yazılım A.Ş. + Ali YAZICI + ali.yazici&spidya.com +49523 + CANCOM GmbH + Marc Bergner + marc.bergner&cancom.de +49524 + MiE GmbH + Dr. Hanno Schumacher + schumacher&miegermany.de +49525 + Avatier Corporation + Billy Barron + billyb&avatier.com +49526 + Massaraksh + Valentin Kulesh + valentin.kulesh&gmail.com +49527 + Sigterm AS + Mikal Villa + mikalv&sigterm.no +49528 + Menzell & Döhle GmbH & Co. KG + Martin Lormes + helpdesk&menzelldoehle.de +49529 + ALTEN SA + Christophe DECOMBE + christophe.decombe&alten.com +49530 + MSC Trustgate.com Sdn. Bhd. + Ikmal Salleh + operation&msctrustgate.com +49531 + True Partner Singapore Holding PTE. LTD. + Martin Meuwese + m.meuwese&truepartner.org +49532 + Plustek Inc. + Chris Lin + chrislin&plustek.com.tw +49533 + Banco de Credito Social Cooperativo S.A. + Francisco Navarro + francisco.navarro&bcc.es +49534 + jagdish chand + jagdish chand + jkumar852&gmail.com +49535 + MaxLinear, Inc. + Ken Chu + kchu&maxlinear.com +49536 + NetCom Satelital S.A. + Claudio J. Peña + claudio&netcomsatelital.com.ar +49537 + Rewards LLC + Adam Browning + adam&savvysoftwaremt.com +49538 + Mencom Corporation + John J. Bisson + john.bisson&mencom.com +49539 + Communications & Power Industries, ASC Signal Division + Douglas A Gribben + douglas.gribben&cpii.com +49540 + SSE + Wayne Stidolph + wayne&stidolph.com +49541 + Computerhaus EDV-Handels GmbH + Heiko Steindl + heiko&kom.at +49542 + DCM TECNOLOGIA + DANIEL PECHMANN + daniel&dcmtech.com.br +49543 + Unimatica S.p.A. + Gestione Sistemi Unimatica + sistemi&unimaticaspa.it +49544 + Nathan Balch + Nathan Balch + PrivateEnterpriseNumber.Admin&NathanBalch.com +49545 + Universidad de Mendoza + Departamento de tecnología + infraestructura&um.edu.ar +49546 + Wilhelm Eimke oHG + Michael Eimke + edv&eimke.com +49547 + OpenIQ Pty Ltd + Mark van Kerkwyk + mark&vk.net +49548 + FUJIAN GELU POWERTRONICS CO.,LTD. + Vinter Wu + vinter.wu&cn.ssemco.com +49549 + GmSSL + Zhi Guan + guanzhi1980&gmail.com +49550 + LG Uplus + Jinseok, Kang + realstone&lguplus.co.kr +49551 + vorg.eu + Felix Serbanoiu + felix.serbanoiu&icloud.com +49552 + Snowflake Software Ltd. + Andy Newton + oid-master&snowflakesoftware.com +49553 + Radis Ltd + Mikhail Golovenko + miha-gol&mail.ru +49554 + PEO C3T - PM Tactical Network + Lyonel Maxime + lyonel.l.maxime.civ&army.mil +49555 + Stadt Jessen (Elster) + Sven Wewior + info&jessen.de +49556 + BRUNO BADER GmbH + Co. KG + Patrick Bäzner + pki&bader.de +49557 + CONDAT S.A.S. + DANIEL ESCORIZA SANTIAGO IT. DPT LECTA GROUP + daniel.escoriza&lecta.com +49558 + TechCERT + Harshana Porawagama + harshana&techcert.lk +49559 + Cancer Care Specialists + Brett Slizeski + brett.slizeski&ccsreno.com +49560 + Gemeente Purmerend + Sander Kornman + systeembeheer&purmerend.nl +49561 + EAD Systeme GmbH + Georg Eutermoser + support&ead-systeme.de +49562 + Frankfurter Verein für soziale Heimstätten + Dirk Westfal + it&frankfurter-verein.de +49563 + Shure Incorporated + Kehang Wu + wu_kehang&shure.com +49564 + YumaWorks, Inc. + Andy Bierman + andy&yumaworks.com +49565 + Universitate Alexandru Ioan Cuza din Iasi, Facultatea de Informatica + Alexandru Ocheana + arhanghel&info.uaic.ro +49566 + Salish Cancer Center + Callie Hills + callie.hills&salishcancercenter.com +49567 + Norrskog Wood Products AB + Jim Salvin + jim.salvin&norrskog.se +49568 + jRedes Ltda ME + Jardson Thome + redes&jardsont.com.br +49569 + Surgical Information Sciences + Danny Askarov + danny.askarov&surgicalis.com +49570 + Jonathan Wilbur + Jonathan Wilbur + jwilbur&jwilbur.info +49571 + Elektrotechnik und Elektronik Oltmann GmbH + Stefan Springer + s.springer&eeo-gmbh.de +49572 + Precision Castparts Corp + Network Operations + pccnoc&precastcorp.com +49573 + eVolution Networks + Tomer Greenwald + Tomer.greenwald&evolution-networks.com +49574 + Modern Language Association + Eric Knappe + eknappe&mla.org +49575 + El-Abbadi School + Jacob El-Abbadi + jacob.elabbadi&elabbadischool.com +49576 + K.T.E.C. + Kai Ryu + kai1103&gmail.com +49577 + GlobalCom + Praneet Rai + praneet&rai.email +49578 + OON GlobalCom Private Limited + Praneet Rai + praneet&rai.email +49579 + TCOM L.P. + Matt Fields + tclo&tcomlp.com +49580 + Samuel Bächler Informatik + Samuel Bächler + baechler&boeser.ch +49581 + Sensify Security + Jeffrey Venable, Sr. + jeff&sensify-security.com +49582 + Vapor Team + Thomas Lewis + thomaslewis&0xaa55.me +49583 + Txture GmbH + Thomas Trojer + thomas.trojer&txture.io +49584 + PacketX Technology Ltd. + Tony Wang + tony.wang&packetx.biz +49585 + InoftTech + Demakov Nikita + demakoffnik&gmail.com +49586 + Ulf Andersson Åkeri AB + Anders Andersson + anders&uaakeri.se +49587 + Prästlönetillgångarna i Uppsala stift + Stefan Granath + stefan.granath&svenskakyrkan.se +49588 + CONET Kft. + Sandor Meczler + smeczler&conet.hu +49589 + Auto-Maskin AS + Leif Algermissen + la&auto-maskin.com +49590 + Bobst Mex SA + Bernard Schuepbach + bernard.schuepbach&bobst.com +49591 + TecAlliance GmbH + Oliver Mittas + Oliver.Mittas&tecalliance.net +49592 + Net Research + Andrzej Gregorczyk + andgre&netrsr.com +49593 + ACES + SCHINIOTAKIS K. + constantin.schiniotakis&gmail.com +49594 + The Royal Marsden NHS Foundation Trust + Simon Bryant + simon.bryant&rmh.nhs.uk +49595 + aineton + rui ma + 420874062&qq.com +49596 + Previder BV + Stefan Broeils + s.broeils&previder.nl +49597 + PremiumSoft CyberTech Limited + LIN WA WING + adp&navicat.com +49598 + LANKA NAP INC + Ruben Rajah + admin&lankanap.com +49599 + Bridgeworks Ltd + Steven Hayter + development&4bridgeworks.com +49600 + Vision Valley FZ LLC + Muhammed Basheer + products&visionvalley.net +49601 + Apollo Solar Inc. + John E. Pfeifer + johnp&apollosolar.com +49602 + CertificaEdu + Ronaldo de Sousa Araujo + contato&ronaldoaraujo.eti.br +49603 + LINKTEK Co., LTD. + Doo-Hwa, Jung + dhjung&linktek.biz +49604 + KysinTech + Xun HAN + hx&kysintech.com +49605 + MONISTOR + Taras & Iegor Nabok + egorn2007&yandex.ru +49606 + First American Bank + Jeff Noga + jnoga&firstambank.com +49607 + Learning Equality + Jamie Alexandre + jamie&learningequality.org +49608 + Kofola Ceskoslovensko a.s. + Roman Birtek + ms.admins&kofola.cz +49609 + Picomass Limited + Martin Hayes + Martin.Hayes&picomass.com +49610 + d.velop AG + Thomas Henrichsmann + thomas.henrichsmann&d-velop.de +49611 + molch + Thomas Felder + tf&molch.at +49612 + Unassigned + Returned 2017-07-12 + ---none--- +49613 + Fremont Bank + Chris Lane + christopher.lane&fremontbank.com +49614 + Caritas der Dioezese St. Poelten + Stefan Teklits + edv&stpoelten.caritas.at +49615 + Metro Vancouver Regional District + Alfred Ho + alfred.ho&mvrd.ca +49616 + Bienestar + Juan José Zamora Mejía + jjzamoram&gmail.com +49617 + Controlid Industria e Comercio de Hardware e Servicos de Tecnologia Ltda + Albert Nissimoff + iana&controlid.com.br +49618 + Cubic Controls + Simon Churcher + simon&cubiccontrols.com +49619 + SOJO University + Akira Tagami + tagami&cc.sojo-u.ac.jp +49620 + ePatientFinder + Rob Moore + rmoore&epatientfinder.com +49621 + Synetica Limited + David Norman + info&synetica.net +49622 + ASRock Rack Incorporation + Jeff Chan + jeff9_chan&asrockrack.com +49623 + Bragafvl + Wyatt Miler + wmiler1&gmail.com +49624 + Loews Corporation + Wing Chiu + IT-TechSvcs&loews.com +49625 + China Telecom + shi meng + shimeng&chinatelecom.cn +49626 + CITIC GUOAN BROADCOM NETWORK CO.,LTD + Liuzhenfeng + liuzhenfeng&citicguoanbn.com +49627 + Guangzhou Ziyuan Information Technology Co.,Ltd. + chenxianhong + chenxh&zyserv.com.cn +49628 + Penumbra + Lucas Gallagher + lgallagher&my.wctc.edu +49629 + AlphaTesters + Praneet Rai + administrator&alphatesters.com +49630 + Instituto Superior de Derecho y Economia, S.A. + Alfonso Pueyo + alfonso.pueyo&isdemasters.com +49631 + Arcad Software + Raphael Manchon + rmanchon&arcadsoftware.com +49632 + BeeZeeLinx + Renan Le Padellec + admin&beezeelinx.com +49633 + CESBIO + ROBERT + yann.robert&obs-mip.fr +49634 + TIMWE Group HQ – TOTAL TIM Servicos de Telecomunicacoes e Afins, Unipessoal Lda + Rui Alexandre Ferreira + rui.ferreira&timwe.com +49635 + Schwan-STABILO Cosmetics GmbH & Co. KG + Robert Hans + robert.hans&schwancosmetics.com +49636 + SUNY-ESF + Help Desk + helpdesk&esf.edu +49637 + Kostiantyn Osypenko + Kostiantyn Osypenko + k.osypenko&gmail.com +49638 + Unassigned + Returned 2020-06-25 + ---none--- +49639 + Stampa Sistem + Sasa Antic + sasa.antic&stampa.rs +49640 + AIRTAG + Cyril Porteret + iana-pen&airtag.com +49641 + VPS Holdings Ltd + Group IT + itadmin&vpsgroup.com +49642 + Innotech Controls + Simon Hoer + simon&innotech.com.au +49643 + Grand Rapids Public Schools + Ruth Michels + michelsr&grps.org +49644 + cenetec + Pierre Kraschinski + pierre.kraschinski&cenetec.de +49645 + Flanga + Moritz Wirth + mw&flanga.io +49646 + SCHÄFER Ausstattungs-Systeme GmbH + Christoph Lammers + clammers&schaefer-it-systems.de +49647 + Substrata Systems LLC + Tim Lukasiewicz + tim&substrata.io +49648 + zcsevcik + Radek Sevcik + zcsevcik&gmail.com +49649 + Skyport Systems, Inc + Ted Treiber + ttreiber&skyportsystems.com +49650 + Vanasse Hangen Brustlin Inc + Rocky Cheung + rcheung&vhb.com +49651 + Erste Group Card Processor d.o.o. + Tomislav Živković + tomislav.zivkovic&egcp.com +49652 + Infrontec GmbH + Juergen Mueller + juergen.mueller&infrontec.de +49653 + WIKON Kommunikationstechnik GmbH + Dennis Rech + it&wikon.de +49654 + JSC Varutis + Evaldas Dobravolskas + info&varutis.lt +49655 + RFmondial GmbH + Jens Schroeder + schroeder&rfmondial.de +49656 + Cohen & Grigsby, P.C. + Justin Dodd + jdodd&cohenlaw.com +49657 + NEC Solution Innovators, Ltd. + Yoshinori Nishino + yos-nishino&ys.jp.nec.com +49658 + BIIK SibSUTIS + Artem Matveev + ianapen&biik.onmicrosoft.com +49659 + Shanghai Shentong Metro Group Co.Ltd. + Ming Zhou + mingzgabriel&163.com +49660 + MOTEX Inc. + Akira Nishitani + akira.nishitani&motex.co.jp +49661 + Hive13 + Ian Blais + cto&hive13.org +49662 + Net4You Internet GmbH + Stefan Raspotnig + sraspotnig&net4you.net +49663 + Belbohemia, IOOO + Sergei Grebenchikov + sg&belbohemia.com +49664 + Fundació Centre de Seguretat de la Informació de Catalunya + David Forner Griñó + msalinas&bcn.sia.es +49665 + Tessares SA + Christophe Aubry + account&tessares.net +49666 + Gribskov Kommune + Benny Rasmussen + it-kontakt&gribskov.dk +49667 + Weda Skog AB + Anders Bergström + anders.bergstrom&wedaskog.se +49668 + maxdoom.com + Nico Hoffmann + info&maxdoom.com +49669 + DB Station&Service AG + David Jähnert + David.Jaehnert&deutschebahn.com +49670 + 有时间 (Have time) + 申华林 (Shen Huolin) + shl_256&163.com +49671 + Vnomic, Inc. + Derek Palma + dpalma&vnomic.com +49672 + POWER Engineers, Inc. + Russel Riley + noc&powereng.com +49673 + Capernwray Torchbearers Australia + Samuel Weirich + it&capernwrayaustralia.org +49674 + Marland.IT + Chris Marland + chris&marland.it +49675 + LLC Qualitteq + Andrey Karpov + info&qualitteq.ru +49676 + Agence Nationale de Sécurité des Systèmes d'Information (ANSSI/BF) + Georges P. LALLOGO + glallogo&cirt.bf +49677 + BlackBelt Technology Kft. + Robert Csakany + robert.csakany&blackbelt.hu +49678 + Cogit Studio + Yohann Prigent + yohann&cogitstudio.com +49679 + VLSS + Dean Bolton + dean.bolton&vlss-llc.com +49680 + Binovia Corp. + John Pell + jpell&binovia.com +49681 + Praetorian Group, Inc + Richard Penshorn + it&praetorian.com +49682 + Dittman + Eric Dittman + dittman&dittman.net +49683 + iPhotonix + Andrew Fullford + net-coord&iphotonix.com +49684 + Infinit Group + Glenn Lim + glenn&infinit.com.sg +49685 + Polovnikov LLC + Alexander Polovnikov + polovnikov&triadatv.ru +49686 + Fundação Universidade Regional de Blumenau + Ana Lucia Anacleto Reis + anna&furb.br +49687 + Cormant Inc. + Paul Goodison + services&cormant.biz +49688 + Blue Danube Systems + Dan Kiewlich + dan.kiewlich&bluedanube.com +49689 + Zener Redes + Angel Granado + angel.granado&zenerlan.com +49690 + Fortanix + Jethro Beekman + jethro&fortanix.com +49691 + kd5ahl.org + Kelsey Smith + admin&kd5ahl.org +49692 + SHENZHEN Maisijie NETWORK CO.,LTD + 石玉泽 (Shi Yuze) + shiyuze&msjnet.cn +49693 + Warner Music Group + Stelancy D.Lapointe + Phibog59&yahoo.com +49694 + IoTium Inc + Dhruva Narasimhan + dhruva.narasimhan&iotium.io +49695 + D & K Co. + Kevin Smih + kevin&dk.co +49696 + WanLiYun Medical Information Technology Co. Ltd. + Xiaohui Wang + go2hero&gmail.com +49697 + Twitch Interactive + Kevin Smith + kevins&twitch.tv +49698 + Sector 7 + Christian Klein + Sector7&gmx.de +49699 + Wack.de + Franz Wack + pen&wack.de +49700 + SCHERDEL GmbH + Michael Schobert + pki-admins&scherdel.de +49701 + DofiLoop + Mikael Schultz + mikael&dofiloop.com +49702 + Coc Coc Company Limited + Duc Nguyen + ducnm&coccoc.com +49703 + Nutimaja Ltd. + Reijo Sirila + reijo&nutimaja.ee +49704 + Tieto Sweden Healthcare & Welfare AB + Joachim Wallberg + joachim.wallberg&tieto.com +49705 + Cloudworks AS + Ragnar Storstrøm + ragnar.storstrom&cloudworks.no +49706 + glomex GmbH + Andreas Sieferlinger + ops&services.glomex.com +49707 + Pragmaxus AG + Harald Grov + harald.grov&pragmaxus.ch +49708 + DentalRay + Andrea Venanzi + a.venanzi&yahoo.it +49709 + Universign + Julien STERN + julien.stern&universign.com +49710 + GOMA Elettronica SpA + Davide Calza + davide.calza&gomaelettronica.it +49711 + Polizei Sachsen + Service Desk + pki&polizei.sachsen.de +49712 + Ivins, Phillips & Barker Chartered + Douglas M. Andre + dandre&ipbtax.com +49713 + Instituto Nacional de Metrologia, Qualidade e Tecnologia - INMETRO + Marcos Trevisan Vasconcellos + mtvasconcellos&inmetro.gov.br +49714 + Stannum-Man + Sean Mullin + sean_mullin&charter.net +49715 + Epilog Vermögensverwaltungs AG + Guido Grygiel + it-koordination&mayfair-hamburg.de +49716 + Mayfair Vermögensverwaltungs SE + Guido Grygiel + it-koordination&mayfair-hamburg.de +49717 + Complyify LLC + Sean Bruton + sean&complyify.com +49718 + Catalia Health + Gary Arnold + garnold&cataliahealth.com +49719 + PERK Innovation + Nicolas Höft + nicolas.hoeft&perk-innovation.com +49720 + Australian Access Federation + Terry Smith + t.smith&aaf.edu.au +49721 + LinCom + Marcin Jabłecki + jableckim&lincom.waw.pl +49722 + Perspica Networks + Jonathan Creasy + jonathan.creasy&perspica.io +49723 + ELAS Ltd. + Sviatoslav Kononenko + kononenko&elas.com.ua +49724 + PJSC Rostelecom Volga branch + Andrey Klimentev + a.klimentev&volga.rt.ru +49725 + SSIMWave + Abdul Rehman + info&ssimwave.com +49726 + Tata Communications.Limited (TCL) + Jacques Sabourin + jacques.sabourin&tatacommunications.com +49727 + BA + Support Team + support&ba.be +49728 + Broadquest 2nd system + may kim + mhkim&broadquest.co.kr +49729 + GN Hearing A/S + Peder Thode + pthode&gn.com +49730 + GN Audio A/S + Peder Thode + pthode&gn.com +49731 + Apollo Enterprise Imaging Corp + Jeff Wang + jwang&apolloei.com +49732 + Parahyangan Catholic University + FTIS Lab Adminstrator + it&unpar.ac.id +49733 + SMARTMOVE S.A. + LUIS OSORIO + luis.osorio&smartmove.pt +49734 + Ekeryds trävaruaffär AB + Leif Nilsson + Ekerydstra&gmail.com +49735 + Hwang Group + Hwang Hyun Chang + hhc00&hanmail.net +49736 + Crytus corporation + Yoshiaki Kurihara + ykuri&crytus.co.jp +49737 + Verizon Telematics + Kalyan Varma Bhupathiraju + kalyan.varma.bhupathiraju&verizon.com +49738 + SynTrust Tech International Ltd., + Jason Chuang + Jason_Chuang&SynTrustTech.Com +49739 + Drivenets + Gal Zolkover + gal&drivenets.com +49740 + Velostrata + Ady Degany + ady&velostrata.com +49741 + xanzex + Robert Harris + rjt.harris&gmail.com +49742 + Hot Chilli Box Ltd. + Martin Henery + martin&hotchillibox.com +49743 + Intouch Games Ltd + Florin Cazanaru + florin.cazanaru&mfortune.co.uk +49744 + Hughes do Brasil + Fernando Secali + fsecali&hughes.com.br +49745 + Ludia inc. + Olivier Gagnon + operations&ludia.com +49746 + ImageWare Systems, Inc. + Dale Peek + infotech&iwsinc.com +49747 + Ricegrowers Limited + Ben Saxvik + bsaxvik&sunrice.com.au +49748 + NearbySensor + Ricard Maso + rmaso&nearbysensor.com +49749 + K&T Host + Timothy Grondin + hostmaster&knthost.com +49750 + Mutualink, Inc + David Parry + dparry&mutualink.net +49751 + Gridscape Solutions + Ali Imran Najmi + ali&grid-scape.com +49752 + CLOUDWAN + Eugene M. Kim + eugene.kim&ntti3.com +49753 + Interactive Northwest Inc + Matthew Van Gordon + mvg&interactivenw.com +49754 + Hydro-Quebec - DPT (Direction Principale Telecom) + El Hassan Aouzah + Aouzah.ElHassan&hydro.qc.ca +49755 + Université Paris Sciences et Lettres + Mathieu Grelier + mathieu.grelier&univ-psl.fr +49756 + Futurecom Systems Group, ULC. + Samuel J. Grabski + sam.grabski&futurecom.com +49757 + USMD + Sandra Lazala + lisa.shulin&usmd.com +49758 + NextgenID + Dario J Berini + dberini&nextgenid.com +49759 + Jacques Technologies + Heena Kapadia + heena.kapadia&jacques.com.au +49760 + Semperficio Software LLC + Kristopher Mader + kristopher.mader&semperficio.com +49761 + Toki Winter + Toki Winter + kevin.t.winter&gmail.com +49762 + PCCW-HKT TSL + Marco HSU + marco.mt.hsu&pccw.com +49763 + Communication Company, NARI Group Corporation Information Technology + Li Muye + limuye&sgepri.sgcc.com.cn +49764 + Spokeo, Inc. + Tom Chapon + tchapon&spokeo.com +49765 + Guangzhou ChengJia Technology Co., Ltd. + Xie.Shaofeng + postmaster&o-home.com +49766 + Radium Networks + René Mau + rene.mau&radium-networks.com +49767 + J. RETTENMAIER & SÖHNE GMBH + CO KG + Swen Blatt + Swen.Blatt&jrs.de +49768 + Infomac Sp. z o. o. Sp. k. + Andrzej Maciejczyk + biuro&infomac.pl +49769 + YADRO + Support + snmp&yadro.com +49770 + NAVIUS + IT Dept + support&navius.ru +49771 + Stiftung Krankenhaus Bethanien für die Grafschaft Moers + Michael Ziller + michael.ziller&bethanienmoers.de +49772 + Zolkover + gal zolkover + Gal&zolkover.com +49773 + SCHOELLERSHAMMER GmbH + Günter Wirtz + gwirtz&schoellershammer.de +49774 + XLIM + Laurent Hagerman + svp-pen&xlim.fr +49775 + Unassigned + Returned 2017-05-03 + ---none--- +49776 + INSTITUTO DEL CANCER Y ENFERMEDADES DE LA SANGRE, CSP + MELANIE CUEVAS + cuevasriveramelanie&hotmail.com +49777 + Herbrich Corporation + Jennifer Herbrich + j.herbrich&aol.de +49778 + Lin and Associates, Inc. + Eric LeBlanc + sysadmin&linandassociates.com +49779 + Shenzhen Yetelcom Communication Tech. Co.,Ltd. + Laurence Lu + LaurenceLu&139.com +49780 + Oklahoma State University Foundation + David Laws + dlaws&osugiving.com +49781 + MicroGrid Solutions, LLC + Andrew Stevens + admin&mgrids.com +49782 + Author Solutions, LLC + Michael Kreitzer + michael.kreitzer&authorsolutions.com +49783 + SnapRoute Inc. + Adam Casella + acasella&snaproute.com +49784 + FINAO Ltd + Greg Harbers + greg.harbers&finao.co.nz +49785 + Tubular Labs, Inc. + Christian Verkerk + operations&tubularlabs.com +49786 + Cancer Specialists of North Florida + Kelly Anderson + kelly.anderson&csnf.us +49787 + AparnaSystems Inc. + Alex Henderson + ahenderson&aparnasystems.com +49788 + Nova Computer Services LLC + Scott A Buettner + buettner.scott&live.com +49789 + DHW + Drake Weibel + drake.weibel&dhwtechnologies.net +49790 + Marmocet LLC + Jacob C Conley + jake&marmocet.com +49791 + Borel & Barbey + Roger Noetzlin + roger.noetzlin&borel-barbey.ch +49792 + HDS a.s. + Jozef Juriga + jozef.juriga&hds.sk +49793 + Persson Invest skog AB + Henrik Mårtensson + henrik.martensson&piskog.com +49794 + M2MSOFT + Bruno BOSQUED + sales&m2msoft.com +49795 + Navitel sp. z o.o. + Wojciech Mordec + serwis&navitel.pl +49796 + ARH Inc. + Csaba Nagy-Amigó + csaba.nagy&arh.hu +49797 + RideOnTrack + Herman Laenen + herman.laenen&rideontrack.com +49798 + BVZ Holding AG + Martin Ittig + martin.ittig&mgbahn.ch +49799 + Visteon Electronics Germany GmbH + Jo Wilkes + jwilkes&visteon.com +49800 + GE Digital + Don Quigley + donald.quigley&ge.com +49801 + PST Public Safety Technologies GmbH + Herwart Wermescher + herwart.wermescher&pst.at +49802 + beyerdynamic GmbH & Co. KG + Ulrich Roth + roth&beyerdynamic.de +49803 + Kopen Secondary School + Haroun Rashid + shane&emstret.com +49804 + Concept Soluções + Marcio Thomaz Lima + marcio&conceptsol.com.br +49805 + Skaro 73k + Adam Piontek + damekpiontek&gmail.com +49806 + St. Luke's Hospital + Terri Penn + tpenn&stlukes-stl.com +49807 + BERNMOBIL + Thomas Bodenmann + it.oid-admin&bernmobil.ch +49808 + Rubica + Ryan Denniston + ryan&rubica.com +49809 + Fitivision Technology Inc. + Lisa Chen + fitiadmin&fitivision.com +49810 + Panalpina World Transport (Holding) Ltd. + Regis Hambalek + TrustCenter.PAC&panalpina.com +49811 + Ecole Supérieure de la Francophonie pour l'Administration et le Management + Ecole Supérieure de la Francophonie pour l'Administration et le Management + info&bg.auf.org +49812 + S&T AG + Karl Klauda + karl.klauda&snt.at +49813 + Sytecs + Alexander Motsnyy + info&sytecs.com.ua +49814 + GWAdriga GmbH & Co. KG + Axel Pätzold + pki&gwadriga.de +49815 + FBR Group B.V. + Jorrit Pouw + jorrit&fiber.nl +49816 + SEIKO TIME CREATION INC. + MAKOTO IIJIMA + makoto.iijima&seiko-stc.co.jp +49817 + Solid Park AB + Stefan Törnblom + stefan.tornblom&solidpark.se +49818 + shikuo technology corpartion + Cook Xiao + xcc_shikuo&163.com +49819 + KST technology co.,ltd + YouChan So + ycso1011&i-kst.com +49820 + TELEOFIS JSC + Pavel Gololobov + pavel&teleofis.com +49821 + New Jersey Hematology Oncology Associates, LLC + Kimberly Loevy + admin&njhoa.com +49822 + Matthias Robert Wiora + Matthias Robert Wiora + iana&wiora.co.uk +49823 + Multapplied Networks Inc. + Josh Hicks + josh.hicks&multapplied.net +49824 + Springs Charter Schools + Thomas Krause + tj.krause&springscs.org +49825 + Canovate Group + Adil Bek + adil.bek&canovate.com +49826 + Drees & Sommer + Denis Günther + denis.guenther&dreso.com +49827 + Digital imaging + Mario Testoni + mario.testoni&digitalimaging.it +49828 + City of York Council + Ian Towner + ian.towner&york.gov.uk +49829 + VTTEK - Viettel Group + Thanh Doan + thanhdc3&viettel.com.vn +49830 + eDoc Group - Tiers de Confiance + Gabriel GIL + gabriel.gil&edoc.fr +49831 + Expression Networks LLC + John Robinson + jrobinson&expression.net +49832 + Nero Blanco IT Ltd + Twan van Beers + team&neroblanco.co.uk +49833 + Sierra View Medical Center + Scott Cheney + scheney&sierra-view.com +49834 + Sioui Microsolutions + Pierre Sioui, Ing. + pierre&sioui.com +49835 + Barava, LLC + Johnny Betz + bahnbrenner&hotmail.com +49836 + MirageOS + Camelus Dromedarius + oid&mirage.io +49837 + JIANGSU HENGSION ELECTRONIC S&T CO.,LTD + Yanping-Zhang + zhangyp&hengsion.com +49838 + ZHEJIANG QUANTUM TECHNOLOGIES CO., LTD + ZhouQi + zhouqi&qtec.cn +49839 + Adjara Group Hospitality + Dimitri Karapetian + dimitri.karapetian&adjaragroup.com +49840 + Ponomarevs + Dmitry Ponomarev + dmitry&ponomarevs.ru +49841 + Tecnohold Development Tecnology Ind. e Com. Ltda. + Daniel de Souza Vicente + daniel.vicente&tecnohold.com.br +49842 + Mark Broecker IT-Consulting + Mark Broecker + broecker&qsecofr.de +49843 + Universidad Nacional de Colombia + Mauricio Tamayo + emtamayog&unal.edu.co +49844 + KAHUNA Ventures LLC + Steve Russell + srussell&kahunaventures.com +49845 + Älvdalens Besparingsskog + Håkan Lissman + hakan.lissman&besparingsskogen.se +49846 + Vadsbo Skog AB + Ulf Eriksson + Vadsbo.skog&vadsbo-skog.se +49847 + Logitech, Inc. + Ken Erbes + kerbes&logitech.com +49848 + Heilongjiang ETSkill Technology Co., Ltd. + JiangXuehao + 1062881635&QQ.com +49849 + UNIS-WDC Storage Co., LTD. + Ray Hu + ray.hu&uniswdc.com +49850 + Idiosys + Wietse Zuyderwyk + wzuyderwyk&idiosys.net +49851 + Aria Networks + Archie Wade + pen-admin&aria-networks.com +49852 + JSC ITC Sistema-Sarov + Kirill Novichikhin + kirill.novichikhin&sarov-itc.ru +49853 + Atea Norge AS + Thommy Mikkelsen + thommy.mikkelsen&atea.no +49854 + BITMARCK TECHNIK GMBH + Frank Wagenbach + frank.wagenbach&bitmarck.de +49855 + IDGENERIS + RENATO G MACHADO + contato&idgeneris.com +49856 + Community Living British Columbia + Ian Scott + ian.scott&gov.bc.ca +49857 + Department of Intellectual Property + Chaiyaporn Phopan + chaiyaporn.p&ipthailand.go.th +49858 + Centracare Health + Dianna Esplan + centradomain¢racare.com +49859 + Lyggeskog AB + Erland Forsström + erland&archipelago.se +49860 + Rönås Skog AB + Erland Forsström + erland&archipelago.se +49861 + Logate + Predrag Biskupovic + predrag.biskupovic&logate.com +49862 + NodNetwork + Andrew Meyer + ameyer&nodnetwork.org +49863 + RESEARCH ON DEMAND LAB + Oleksii Serikov + ceo&rdl.global +49864 + BlueGoat + Tin Tin Hamans + t.hamans&bluegoat.me +49865 + Bartz.net + Jason Bartz + bartzjegr&gmail.com +49866 + Thurnau Industries + Chris Drosos + ianaoid&chateau76.net +49867 + Kforce, Inc. + Jay Astell + hostmaster&kforce.com +49868 + Pizza Max + Orhan Gensch + info&pizzamax-spandau.de +49869 + TrowLink + Evan Trowbridge + admin&trowlink.net +49870 + APIIDA AG + Armin Stephan + armin.stephan&apiida.com +49871 + OpenBMC Project + Patrick Williams + patrick&stwcx.xyz +49872 + McKay Brothers, LLC + Ralph Childers + Ralph.Childers.IANA&mckay-brothers.com +49873 + Thales Services France Agora-t + RUGET Frédéric + olivier.gintrand&thalesgroup.com +49874 + friedlmaier.net + Daniel Friedlmaier + daniel&friedlmaier.net +49875 + Advocate Health and Hospitals Corporation + Stephen Krug + steve.krug&advocatehealth.com +49876 + Vexata Inc. + Venkatesh Krishnamurthy + venkatesh&vexata.com +49877 + Skylads Ltd + Soufian Aboulfaouz + soufian&skylads.com +49878 + Wanless Systems Limited + Matt Wanless + matt&wanless-systems.com +49879 + Gorilla Technology + PaulLuo + mis&gorilla-technology.com +49880 + Unassigned + Returned 2018-03-09 + ---none--- +49881 + Prismview + Jared Yelton + jyelton&prismview.com +49882 + Synanetics Ltd + Robert Hickingbotham + robert&synanetics.com +49883 + MES Solutions by ASTOR + Kamil Prejs + kamil.prejs&astor.com.pl +49884 + Praetors AG + Costin Enache + costin&praetors.ch +49885 + Wisscot + David Scott + contact&wisscot.com +49886 + LensRentals.com + Will Glynn + will&lensrentals.com +49887 + MySCS + Timothy Nothdurft + support&myscs.com.au +49888 + SEEK LIMITED + Domain Administrator + skdns&seek.com.au +49889 + BobsterCorp + Jason Tung + jasontung&bobster.co +49890 + Choptank Electric Cooperative + Deni Dorsey + denid&choptankelectric.coop +49891 + Hubei Keenward Engineering Co.,Ltd. + Li Le + xiagjiu2gai&163.com +49892 + ProcessUnity + Eric Kemp + eric.kemp&processunity.com +49893 + Trimoz Technologies + Lou Simard + lsimard&trimoz.com +49894 + Tom W Wolf + Tom W Wolf + tomwwolf&gmail.com +49895 + ZXW Networks Co., Ltd. + Bin Zhao + zhaobin_0516&163.com +49896 + Fireglass + Danny Rehelis + danny&fire.glass +49897 + TrueVolve Technologies + Steyn Geldenhuys + steyn&truevolve.technology +49898 + PROMSVYAZKOMPLEKT LLC + Denis Peretrukhin + peretruhin&pskt.ru +49899 + stone IT SOLUTIONS + Roman Steininger + roman.steininger&stone-it.at +49900 + Rahandusministeeriumi Infotehnoloogiakeskus + Raimo Pinding + raimo.pinding&rmit.ee +49901 + LDA Audio Tech + Juande Sánchez + jdsanchez&lda-audiotech.com +49902 + PANSOMA GmbH + Administrator + office&pansoma.at +49903 + Petrolink International Ltd + Robert Bruce + robert.bruce&petrolink.com +49904 + telisca + Jean-Marc LACOSTE + jmlacoste&telisca.com +49905 + OOO KONTINENT + Perepelkin Aleksei + perepelkin.a&kontinent-spb.ru +49906 + Hanning + Rainer Beermann + vtm&hanning-gim.com +49907 + Phasor Solutions Ltd + Matthew Wells + matthew.wells&phasorsolutions.com +49908 + Nuance Communications, Inc. + Jason Qualkenbush + jason.qualkenbush&nuance.com +49909 + BNP Paribas Real Estate Hungary + Attila Kovari + attila.kovari&bnpparibas.com +49910 + Gonščákovci + Stanislav Gonščák + stanislav&gonscak.sk +49911 + Alliance Entertainment + Michael J. Prentice + michael.prentice&aent.com +49912 + Moose and Wombat + Rian Aldridge + rian&mooseandwombat.com +49913 + kioriy network + Yuta Okubo + kioriy95&kioriy.jp +49914 + SAKO TECHNOLOGIES LIMITED + Zidago Sako + zst&sakotechnologies.com +49915 + Los Angeles Department of Water & Power + David Chwa + david.chwa&ladwp.com +49916 + ESTECH International + Inseok Choi + ischoi&es-tech.com +49917 + MNP LLP + Justin Van Niejenhuis + justin.vanniejenhuis&mnp.ca +49918 + UNATEC ICT, S.L. + Raúl Gil García + info&unatec.es +49919 + SQUELCH INC. + Bahman Safinejad + Bahman.Safinejad&squelch.io +49920 + VITOGAZ Switzerland AG + Yann-Amaël Aubert + it.service&vitogaz.ch +49921 + OLZETEK, Inc. + Kyeong-Min Park + kyeongmin.park&olzetek.com +49922 + Aron AG + Peter Strauss + p.strauss&aron-ag.com +49923 + VELANKANI ELECTRONICS PVT. LTD. + GIRISH ANAND KALELE + GKALELE&VELANKANIGROUP.COM +49924 + whiteneng + Yong-Hwan Kim + whiteneng&gmail.com +49925 + Figeas SA + Kevin Taillens + ktaillens&figeas.ch +49926 + H&D International Sp. z o.o. Oddział w Polsce + Mikołaj Wieczorkiewicz + mww&hud.pl +49927 + Oregon Department of Transportation + Karina Stewart + ODOTSecurityProgram&odot.state.or.us +49928 + Nomad Global Communication Solutions + Brett Bowden + brett.bowden&nomadgcs.com +49929 + Rubrik, Inc + Peter J. Milanese + peterm&rubrik.com +49930 + Zollner Elektronik AG + Andreas Hausner + admin&zollner.de +49931 + Metacode + Wiktor Kwapisiewicz + contact&metacode.biz +49932 + Deerfield Academy + William Summers + wsummers&deerfield.edu +49933 + Alpha Med Physician Group LLC + Steve Bainaka + steve_bainaka&alphamedphysicians.com +49934 + Cellnetrix GmbH + Vladimir Nagin + vnagin&cellnetrix.com +49935 + Thought Through Software, Inc. + Corporate President + pen.iana.org&bairdbunch.com +49936 + The AME Group + Justin Hack + jhack&theamegroup.com +49937 + bg nerilex + bg nerilex + bg&nerilex.org +49938 + Azapp Software + Cristiano Fernandes + cristiano&azapp.net +49939 + nyx-network + Mickael Winschel + contact&nyx.cx +49940 + Sagio A/S + Svend Meyland Nicolaisen + smn&sagio.com +49941 + ReadyNet Solutions + David Salick + sslca&readynetsolutions.com +49942 + myspot.at + Martin Tomanec + martin+iana&myspot.at +49943 + Tamseng + Mingyu Kim + mingyu&tamseng.co.kr +49944 + City of Toronto + Andrew Chan + abchan&toronto.ca +49945 + Fastly, Inc. + RIR Administrator / Thomas Daly + rir-admin&fastly.com +49946 + Nymi + Jonathan Sau + jsau&nymi.com +49947 + ACH Colombia SA + Diego Jaramillo + administradoresseguridad&achcolombia.com.co +49948 + eWitness Italia Srl + Eleonora Giardino + eleonora.giardino&ewitness.eu +49949 + Calvert County Government + John Silcox + silcoxjd&co.cal.md.us +49950 + MeetNow! GmbH + Patrick Schneider + patrick.schneider&meetnow.eu +49951 + Jaramillo + Diego Jaramillo + ddjaramillo&outlook.com +49952 + DATA TECH INTERNATIONAL DOO + Goran Todorov + office&dti.rs +49953 + Spg Controls Pty Ltd + Myles Rennick + mylesr&spgcontrols.com +49954 + Forimp AB + Eric Gäverth + info&forimp.se +49955 + Martin Handl + Martin Handl + info&szarafinski.de +49956 + ReiseBank AG + Patrick Baumann + patrick.baumann&reisebank.de +49957 + Diginext + Aurélie Trautmann + aurelie.trautmann&diginext.fr +49958 + Burgergemeinde Bern + Patric Bartl + zi&bgbern.ch +49959 + inDenova SL + Jordi Gisbert Rodas + jgisbert&indenova.com +49960 + Ericsson DE - MS/IT + Hermann Maurer + hermann.maurer&ericsson.com +49961 + MindCompute Inc. + Praveen Srinivasan + praveen.srinivasan&mindcompute.com +49962 + Magic Code Co., Ltd. + Thanate Dhirasakdanon + thanated&magic-code.com +49963 + Belarusian State University + Dimitry Dvorcovoy + dvorc&bsu.by +49964 + Bobby's Foods Ltd + Christopher Cowley + registrations&bobbysfoods.co.uk +49965 + Neova AB + Christer Danneskog + christer.danneskog&neova.se +49966 + Herbrich Corporation + Aleksandar Herbrich + aleksandar&herbrich.biz +49967 + Värendskog AB + Erland Forsstrom + Erland&archipelago.se +49968 + Tyfone Inc + Warren Bebout + warren.bebout&tyfone.com +49969 + Miramar Networks + Gene Swank + geneswank&miramarnetworks.com +49970 + s IT Solutions + Thomas Schiesterl + thomas.schiesterl&s-itsolutions.at +49971 + Hedmark IKT + Bjørn Are Hansen + Bjorn.Are.Hansen&hedmark-ikt.no +49972 + RunSDN + Alexey Elin + aelin&runsdn.com +49973 + FancyGuy Technologies + Steve Buzonas + sbuzonas&fancyguy.com +49974 + Vlinder Software + Ronald Landheer-Cieslak + rlc&vlinder.ca +49975 + Bon Secours Health System + Arpad Rohrsetzer + ADMGT&bshsi.org +49976 + E8 storage + Danny Melamed + danny&e8storage.com +49977 + West Virginia State Tax Department + Jonathan Lipscomb + jonathan.k.lipscomb&wv.gov +49978 + Universidade do Estado de Santa Catarina - UDESC + Fernando Augusto Seidler + fernando.seidler&udesc.br +49979 + BigRentz + Matthew Evans + ianamaster&bigrentz.com +49980 + ExampleCompany + Jan Hakl + janhakl1456&gmail.com +49981 + IFBLE SOLUCIONES + Juanra Posada + juanra.posada&ifble.com +49982 + Medtronic RTG + Tom W Wolf + tom.w.wolf&medtronic.com +49983 + Luminate Wireless + Albert Pang + albert&luminatewireless.com +49984 + Arecont Vision + Alex Krul + akrul&arecontvision.com +49985 + Franklin Community Schools + Dustin Southard + southardd&franklinschools.org +49986 + BAE Systems Inc. + Robert Schwendinger + robert.schwendinger&baesystems.com +49987 + GHIGHO + Raoul Signorile + raoul&signorile.it +49988 + Bundeseisenbahnvermoegen + Anton Babel + anton.babel&bev.bund.de +49989 + Peloton Technology + John Muth + jmuth&peloton-tech.com +49990 + UTISCP + Ruediger R. Asche + rac&ruediger-asche.de +49991 + Smart Integrated Solutions SRL + Ioan Sonea + ioan.sonea&s-i-s.ro +49992 + Littwin Systemtechnik GmbH & Co. KG + Kai Schmidt + kai.schmidt&littwin-systemtechnik.de +49993 + Koinonia Christian Fellowship + Jason Cook + iana&kcf.org +49994 + Banco Azteca S.A. Institución de Banca Múltiple + Rosangela Luna Muñoz + roluna&bazdigital.com +49995 + DS Broadcast, Inc. + Harry Chang + info&dsbroadcast.com +49996 + CoreEdge Networks Co., Ltd + Nick Choi + nickchoi&ce-net.com +49997 + Socionext Inc. + Masahiro Yamada + yamada.masahiro&socionext.com +49998 + OOO INFOCOM-LTD + Vladyslav Mochalov + vlad_mochalov&ia.ua +49999 + GFD GmbH + Daniel Jordan + d.jordan&gfd.de +50000 + King Tsushin Kogyo Co., Ltd. + Taiyu Ono + ono&king-tsushin.co.jp +50001 + Scildon + Wijnand Westra + beheer&scildon.nl +50002 + Marcepan Org. + Sergey Bartenev + ntsdec&mail.ru +50003 + NCSCCS Limited + James Burton + jb&0.me.uk +50004 + Einsatz Development Inc + Luqman Hakim + luqi&luqi.web.id +50005 + Université de Haute Alsace + Alexandre Heck + iana-oid&uha.fr +50006 + Chongqing Ambition Science&Technologies Co.,Ltd. + Waine + nornet&sina.cn +50007 + Lahti University of Applied Sciences + Sari Laaksonen + sari.laaksonen&lamk.fi +50008 + RPC LLP + Craig Hawthorne + orders&rpc.co.uk +50009 + Workinout.com + James Danforth + admin&workinout.com +50010 + Collinear Networks Inc. + Jon Horddal Jonasson + jon.horddal&collinear.com +50011 + Hose-McCann Communications + Andrea Jara + ajara&hosemccann.com +50012 + Enrique Avalle + Enrique Avalle + enrique&avalle.net +50013 + Aldridge Traffic Controllers Pty Ltd + Paul Milazzo + pmilazzo&atsc4.com.au +50014 + nmhq.net + Niklas Matthies + contact_pen.iana.org&nmhq.net +50015 + Scarsdale Schools + Michael Basso + mbasso&scarsdaleschools.org +50016 + Biocatch + Konstantin Savelev + providers&biocatch.com +50017 + CG Defense Technologies + Jeremy Sparks + admin&threemail.org +50018 + Pacific Media Technologies Pty Ltd + Glen English + glen&pacificmedia.com.au +50019 + Bucet AG + Thorsten Lauer + thorsten.lauer&bucet.de +50020 + Nightwire + Robin Beismann + robin&beismann.biz +50021 + MAJALOG + JACQUES LE GOUSSE + jlegousse&majalog.fr +50022 + air atéliers GmbH + Oliver Marugg + airateliers&gmail.com +50023 + HIMA Paul Hildebrandt GmbH + Alexandre Terentiev + a.terentiev&hima.com +50024 + AlmavivA S.p.A + Andrea Polidoro + a.polidoro&almaviva.it +50025 + dSPACE GmbH + Radoslaw Lapko + iana-admin&dspace.de +50026 + PHYSEC GmbH + Andreas Rex + it&physec.de +50027 + The Document Foundation + Florian Effenberger + info&documentfoundation.org +50028 + Siemens AG Mobility Division + Ulrich Barner + ulrich.barner&siemens.com +50029 + AGILE WEB + Benjamin MIXTE + contact&agileweb.fr +50030 + BENTELER Business Services GmbH + Roland Arendes + iana&benteler.com +50031 + Ring Central, Inc + Timothy McKee + tim.mckee&ringcentral.com +50032 + City of Edmond + Hans Schroeder + nst&edmondok.com +50033 + IT-Solutions Nuernberg + Alexander Kress + info&it-solutions-nuernberg.de +50034 + AR24 SAS + Clément Schneider + clement.schneider&ar24.fr +50035 + Identinetics IT-Services GmbH + Rainer Hoerbe + pen.iana.org&mail.identinetics.com +50036 + Cancer South Institute + Dr Hector Silva + drhsilva55&yahoo.com +50037 + Ultimate Software + Michael Taylor + mike_taylor&ultimatesoftware.com +50038 + Southeast Nebraska Hematology and Oncology Consultants + Becky Shedeed + beckys&leadingcancercare.com +50039 + ELUON + Kim Soyoun + sykim&eluon.com +50040 + MTI Systems + Wesley Eddy + wes&mti-systems.com +50041 + dust production + Anders Hannus + anders&hannus.eu +50042 + ZERO WAR (零度战争 工作室) + Kevin Yu + 2629171127&qq.com +50043 + Svanängskog AB + Arne Ytterström + svanang.skog&telia.com +50044 + STR-SpeechTech Ltd. + Matt Middlebrook + mattm&speechtech.com +50045 + IT-LINE SIA + Jevgenijs Grossmans + genn&it-line.lv +50046 + The ICT Hub INC. + Zubaer Ahammed + zubaer.ceo&theicthub.com +50047 + S-Sharp Corporation + Ryan Kuo + ryan.kuo&s-sharp.com +50048 + Härnösands stift + Thomas Karnestrand + thomas.karnestrand&svenskakyrkan.se +50049 + Openfiler + Rafiu Fakunle + support&openfiler.com +50050 + JNC FZE + Albert Armand + jncrak&gmail.com +50051 + tRetail + Ben Antony + technical&tretail.net +50052 + Datang Gohighsec(zhejiang)Information Technology Co.,Ltd. + Haijie Liu + liuhaijie&gohighsec.com +50053 + AVer Information Inc. + Ming-Kang Chuang + mk.chuang&aver.com +50054 + Cherry Grass + Administrator + keith.sieman&gmail.com +50055 + Telegra + Hrvoje Prgeša + hrvoje.prgesa&telegra-europe.com +50056 + Public Joint-Stock Company "Detsky Mir" + Sergey Rusak + itsa&detmir.ru +50057 + PrimeCalc GmbH + Martin Gwerder + mgwerder&primecalc.com +50058 + RtBrick Inc. + Hannes Gredler + hannes&rtbrick.com +50059 + JSC Penza Research Electrotechnical Institute + Larisa Y. Anisimova + ontipr&pniei.penza.ru +50060 + HPC + Vincent AUDOUARD + vincent.audouard&appartcity.com +50061 + IP Directions + Gilles Bernard + gilles.bernard&ipdirections.net +50062 + Ultra Electronics – Communication and Integrated Systems + Stuart Gilbey + stuart.gilbey&c4systems.co.uk +50063 + QCR Holdings, Inc. + Michael Cave + MCave&qcrh.com +50064 + By Techdesign + Agustin Pizarro + agustin&by.com.es +50065 + PREFECTURE DE POLICE DE PARIS + didier enjaux + didier.enjaux&interieur.gouv.fr +50066 + YADOS GmbH + Daniel Schulz + daniel.schulz&yados.de +50067 + University Cancer & Blood Center, LLC + Loretta Goodson + lgoodson&universitycancer.com +50068 + JNETLABS + Jason Smith + jason&jnetlabs.com +50069 + ELIT electronics & IT + Stefan Kleck + stefan.kleck&elit.at +50070 + Network Kinetix, LLC + Jonas Haskins + Jonas&networkkinetix.com +50071 + Immae + Ismael Bouya + ismael&bouya.org +50072 + CAF Power & Automation + Mikel Korta + mkorta&cafpower.com +50073 + Integrisight, LLC + Peter Dutton + pdutton0&gmail.com +50074 + Changchun Sunny Information Technology Co., Ltd + Yu Pan + dev&sunnyit.cn +50075 + Shanghai YuanRui Industrial Co., Ltd. + Yu Pan + dev&yuanruish.cn +50076 + Real World Education Ltd + Wiremu Demchick + wdemchick&real.ac.nz +50077 + AZZURRA Engenharia de Sistemas Ltda + Mario Girasole Junior + contato&azzurraengenharia.com +50078 + Equate Technologies Pty Ltd + Fadly Tabrani + fadly.tabrani&equatetechnologies.com.au +50079 + ABT ASSOCIATES PTY LTD + IT Manager + michael.nuss&abtassoc.com.au +50080 + Do It Yourself Werkstatt Wilhelmshaven e.V. + Viktor Grosch + it&diyww.de +50081 + SMC Corporation (Product Development Division-6) + SMC-K6-THERMO-CON + k6_eg&smcjpn.co.jp +50082 + Beijing JoinusRIP Co. + Ke Meng Wang + kmwang&joinusrip.com +50083 + Norwegian Creations + OID Administrator (G) + oidmaster&norwegiancreations.com +50084 + Douanes Sénégalaises + Moussa Diouf + moussadiouf&douanes.sn +50085 + Condeco + Mark Flowers + mark.flowers&condecosoftware.com +50086 + Fircroft Engineering Services Limited + Nicholas McDermott + nick.mcdermott&fircroft.com +50087 + Comprimato Systems, s.r.o. + Martin Jirman + jirman&comprimato.com +50088 + Japanese Communist Party, Central Comittee + Kunio Kasai + admin&jcp.or.jp +50089 + Continent 8 Technologies PLC + Marcel Balan + marcel.balan&continent8.com +50090 + Floyd Arguello + Floyd Arguello + floyd.arguello&gmail.com +50091 + BrightSoftware Technologies LLC. + Rustam Khamidov + ruster88&gmail.com +50092 + Ostec-SMT + Vladimir Badashkin + badashkin.v&ostec-group.ru +50093 + SCA Munksund AB + Joakim Sturesson + joakim.sturesson&sca.com +50094 + SEC 1.01 AG + Roman Gribi + tech&sec101.ch +50095 + Private-Network.com, Inc. + Maksim Erenburg + merenburg&private-network.com +50096 + Scoop Publishing Limited + Wiremu Demchick + iana-contact&scoop.co.nz +50097 + Unassigned + Returned 2017-11-01 + ---none--- +50098 + Unassigned + Returned 2017-11-01 + ---none--- +50099 + PRZEDSIĘBIORSTWO WIELOBRANŻOWE DMS BOGDAN DARZECKI MAREK MAŚLANKA S.C. + Piotr Maślanka + piotrm&dms-serwis.com.pl +50100 + energy & meteo systems GmbH + Christian Ambrass + ldap&energymeteo.de +50101 + NEOCHROM, ltd + Oleksii Serikov + ceo&neochrom.company +50102 + FATH Mechatronics GmbH + Frank Schütz + frank.schuetz&fath-mechatronics.de +50103 + TSYS + EU UNIX + euunix&tsys.com +50104 + ActivIntel Inc. + Nate Groendyk + nate&activintel.com +50105 + Candela Innovations, LC + Ryan Walker + ryan&candelainnovations.com +50106 + delgiacco medical, llc + elizabeth delgiacco + drelliesmith&gmail.com +50107 + Karlour LLC + Karl Wang + karl&karlour.com +50108 + Gebhardt Bauzentrum GmbH & Co. KG + Philip May + may&gebhardt-bauzentrum.de +50109 + Open Banking Limited + Tony Duarte + Tony.Duarte&openbanking.org.uk +50110 + Cybraics, Inc. + Robert Sanders + rsanders&cybraics.com +50111 + Samsung SDI America + Xajuan Smith + Xajuan.smith&samsung.com +50112 + Otomatica + Ahmet Kasım Şahin + ahmet.sahin&otomatica.com +50113 + AI Prime + Michael Sullivan + mike&ai-prime.com +50114 + CloudGenix + Aaron Edwards + arin-poc&cloudgenix.com +50115 + Genesis Medical Group + Vicky Titus + billing&genesisdoctors.com +50116 + Rivian Automotive, Inc. + Jedidiah Bowers + jbowers&rivian.com +50117 + Eversource Energy + PAULO SILVA + PAULO.SILVA&EVERSOURCE.COM +50118 + NZ FIBRE COMMUNICATIONS LIMITED trading as Stuff Fibre + Sam Chan + sam.chan&stuff-fibre.co.nz +50119 + Seoul National University Hospital + Youngho Kim + argonaise&snu.ac.kr +50120 + Sofco Pty Ltd + Andrew Hughes + ahughes&sofco.com.au +50121 + Root + Ervin Remus Radosavlevici + admin&root-cloud.com +50122 + ISiT OOO + Andrey Kolegov + alko&istperm.ru +50123 + Rutronik Elektronische Bauelemente GmbH + Alexander Schnitzer + alexander.schnitzer&rutronik.com +50124 + Howco + Scott McKinnie + scott.mckinnie&howcogroup.com +50125 + SERVICIOS DE RADIO WAVENET + MANUEL HERAS + mheras&wavenetradio.com +50126 + Elecard + Nikolay Milovanov + sales&elecard.com +50127 + Changchun Vocational Institute Of Technology + Yu Pan + dev&cvit.com.cn +50128 + Digiroam LLC + Grant Browne + grant.browne&digiroam.com +50129 + DEM Manufacturing + Simon Heyward + simon.heyward&alpha3manufacturing.com +50130 + Perfoware Corp. + George Maroulis + gmaroulis&gmail.com +50131 + Blacktree Technology Pty Ltd. + Joe Nevin + info&blacktree.com.au +50132 + Sonic Automotive + Scott Jernigan + bt.servers&sonicautomotive.com +50133 + GPI RAS + Alexander Samarin + sasha<.gpi.ru +50134 + Enika LLC + Alexander Samarin + sasha&enikasoft.ru +50135 + Markon Cooperative Inc + John Eldredge + johne&markon.com +50136 + High Sec Hosting HSDC AB + Rickard Johnsson + rille&highsechosting.eu +50137 + Greene County Circuit Court + Doug Shy + dshy&courts.mo.gov +50138 + Legrand North and Central America + John Canny + John.Canny&Legrand.Us +50139 + Trice Medical, Inc. + Al Intintoli + aintintoli&tricemedical.com +50140 + Thinking Objects GmbH + Raffaele Lupo + penmaster&to.com +50141 + Sundkontor + Igor Hein + igor.hein&sundkontor.de +50142 + Sunshine Coast Council + Web Master + webmaster&sunshinecoast.qld.gov.au +50143 + Universitätsmedizin Greifswald + UMG Hostmaster + umg-it-netz&uni-greifswald.de +50144 + Viz.AI + David Golan + david&viz.ai +50145 + FRRouting + David Lamparter + equinox&opensourcerouting.org +50146 + Wi-Net Telecom + Hugo De Zela + hugodz&winet.com.pe +50147 + Mateer Harbert + Bryan Fowler + bfowler&mateerharbert.com +50148 + PIXELLAB LLC + Jaiden Benz + pixellab&icloud.com +50149 + Beijing ZhongChuang Teraspek Co.,LTD + Marvin + Marvin&teraspek.com +50150 + Royole Corporation + Tuochen Lyu + Tuochen.Lyu&Royole.com +50151 + Flugatlas + PEN Administrator + pen.administrator&flugatlas.com +50152 + NEOCHROM-TRADE, ltd + Oleksii Serikov + ceo&neochrom.trade +50153 + Span d.o.o. + Jasmin Kotur + jasmin.kotur&span.eu +50154 + NU2 Systems LLC + Isac Tabib + isac&ifly51.com +50155 + Babioch + Karol Babioch + karol&babioch.de +50156 + Navigators Studentenverenging Enschede + Daniël van de Giessen + daniel&nsenschede.nl +50157 + fouss + Sébastien Fouss + sebastien&fouss.be +50158 + City of Burnsville + Ryan Huss + ryan.huss&burnsvillemn.gov +50159 + Monro Muffler Brake Inc. + Mark Kraska + mark.kraska&monro.com +50160 + YUASA-NET + Toru Yuasa + pen18&098753.xyz +50161 + it-pro-berlin.de + Evgenij Smirnov + es&it-pro-berlin.de +50162 + Sweet Hub + Yanwen Sun + sunyanwen&outlook.com +50163 + Hartmut Eilers + Hartmut Eilers + hartmut&eilers.net +50164 + STATUS INTERNET CO., LTD + Nick Chang + Nickchang&status.com.tw +50165 + IPN Solutions GmbH & Co. KG + Daniel Glaser + info&ipn-solutions.com +50166 + Aviall Services, Inc. + Lesli Hust + Netops&aviall.com +50167 + NGP VAN, Inc + Louis Levine + it&ngpvan.com +50168 + Aaltronav + ICT Manager + hostmaster&aaltronav.eu +50169 + Industrielle Alliance + Benoit Bégin + benoit.begin&ia.ca +50170 + Price International Ltd + Martin Kokalov + support&callflowlab.com +50171 + ICON Multimedia, S.L. + Rafael Hornos + rhornos&iconmm.com +50172 + ifm syntron gmbh + Andreas Gebhard + andreas.gebhard&ifm.com +50173 + DOMINAE Srl Unipersonale + Alessandro Bressan + info&dominae.com +50174 + Beekeeper Technology + Jonathan Steinert + iana&beekeeper.technology +50175 + The Department for Work and Pensions + Phil Raynor + phil.raynor&dwp.gsi.gov.uk +50176 + Mark H. Zangmeister Center + Dewana Frazier + mstromer&zangcenter.com +50177 + Nextiva + Ralph Goers + rgoers&nextiva.com +50178 + FLR Services Ltd + Lewis Richards + flr_ots&hotmail.com +50179 + Ocean Technical Systems + Lewis Richards + lrichards&oceantechsys.co.uk +50180 + MiaRec, Inc. + Gennadiy Bezkorovayniy + gb&miarec.com +50181 + pioto.org + Mike Kelly + pioto&pioto.org +50182 + Weibel Scientific A/S + Peter Adamsen + pa&weibel.dk +50183 + EIUS d.o.o. + Andrej Komelj + akomelj&gmail.com +50184 + Tech4Lyfe Co. Ltd + Nur Nadia Nabila Binti Mustakim + misenns&gmail.com +50185 + WH Gelsenkirchen FB Informatik + Volker Goerick + Volker.Goerick&w-hs.de +50186 + Vodafone UK + Lorraine Dryland + Lorraine.Dryland&vodafone.com +50187 + Pattern Recognition and Inteligent Systems Laboratory + Fransisco Siles + prislab.soporte&gmail.com +50188 + zkpig123.org + Kevin Zhang + zkpig123&yahoo.com +50189 + AnRobot + ZHANG XINNAN + 70331471&qq.com +50190 + CS Corporation + Tae Kun Ha + tgha&cs-holdings.com +50191 + Matra Mandiri Prima + Rommy Pardamean + rpardamean&mmp-group.co.id +50192 + SOGETEL srl + Stefano Aquino + fornitori&sogetel.it +50193 + ANA-U GmbH + Ewald Ulrich + register&ana-u.com +50194 + Deutscher Genossenschafts-Verlag eG + Harald Lukhaub + hlukhaub&dgverlag.de +50195 + Domo Tactical Communications (DTC) Ltd + Greg Hindley + greg.hindley&domotactical.com +50196 + Instituto de Física Gleb Wataghin - Unicamp + Ariel Luiz Malves + redes&ifi.unicamp.br +50197 + jedi solutions + Alex Jankuv + consultingjedi&gmail.com +50198 + Aplura, LLC + Dan Deighton + sysadmin-iana&aplura.com +50199 + Skogsbolaget Mats Broberg AB + Mats Broberg + mats&matsbroberg.se +50200 + yellowshelf + PEN Admin + pen.admin&yellowshelf.com +50201 + Cubic Telecom + Frank Monahan + FrankM&Cubictelecom.com +50202 + Southern Union Conference of Seventh day Adventists + Richard Stephenson + is&southernunion.com +50203 + Sankosha Corporation + Yuichi Takahashi + takahashi-y&sankosha.co.jp +50204 + congliu0913 Limited + Liu Cong + congliu0913&126.com +50205 + Telescent Inc. + Anthony Kewitsch + kewitsch&telescent.com +50206 + QuaeroSys UG + Johannes Braun + mail&quaerosys.com +50207 + Kerridge Commercial Systems + Neil Mulligan + neil.mulligan&kerridgecs.com +50208 + TRANSPORTS GPH + PHILIPPE GEYER + transports.gph&orange.fr +50209 + kbo-Isar-Amper-Klinikum + Nicolas Stein + postmaster&kbo.de +50210 + The Center for Cancer and Blood Disorders + Irene Rand + irand&txcc.com +50211 + Universitätsstadt Tübingen + Michael Politz + informationstechnik&tuebingen.de +50212 + HostId + Mikhail Mikhaylov + postmaster&hostid.ru +50213 + Positive Technologies + Mikhail Golovanov + migolovanov&ptsecurity.com +50214 + Alfred Kärcher GmbH & Co. KG + Ronny Buchmann + dns.admin&de.kaercher.com +50215 + FIBOIS Alsace + Cedric Luneau + cedric&fibois-alsace.com +50216 + Two Degrees Ltd + Anton Pereira + NOC&2degrees.nz +50217 + EVICERTIA (formerly 'Evidencias Certificadas SL') + Jacobo van Leeuwen + info&evicertia.com +50218 + Union Scribe + Kevin Burkhart + kburkhart&unionscribe.com +50219 + Alaska Railroad + Brian Scheid + scheidb&akrr.com +50220 + Lukas Müller + Lukas Müller + info&muellerlukas.de +50221 + Secure Edge Technologies + Jason Van der Meer + jason.vandermeer&securedge.com.au +50222 + jw4.us + John Weldon + johnweldon4&gmail.com +50223 + Changing Information Technology Inc. + Eric Wang + techsupport&changingtec.com +50224 + Shenzhen HS Fiber Communication Equipment CO.,LTD + LIU XIN + liuxin&hsgq.com +50225 + FONEX SAS + Jerome Brisset + xliu&fonex.com +50226 + APM Technica AG + Oliver Strasser + oliver.strasser&apm-technica.com +50227 + Trinecke zelezarny, a.s. + Tomas Bocek + tomas.bocek&trz.cz +50228 + NETSURFUSA INC + NETWORK OPERATIONS + noc&netsurfusa.net +50229 + Positive Resource Center + Jim Wegman + jim.wegman&prcsf.org +50230 + The Regional Municipality of York + Dima Stolarsky + dima.stolarsky&york.ca +50231 + Beijing SinoVoice Technology Co.,Ltd. + lei lu + lulei&sinovoice.com.cn +50232 + WebWorks Sistemas e Redes + Marcio Calasans Rodrigues + suporte&webworks.net.br +50233 + One Identity LLC + Bob Baske + Bob.Baske&oneidentity.com +50234 + Ursaconn Technology Co., Ltd. + Aviva LI + aviva&yeastar.com +50235 + Energy Efficiency Ltd + Mikhail Saltykov + redpine&hited.ru +50236 + T 8 Publishing Technologies + Pavel Sheldyaev + ps&t8group.ru +50237 + WILO SE + Klaus Samusch + hostmaster&wilo.com +50238 + Manuel Fernandez Lopez + Manuel Fernandez Lopez + admin&fernandez-lopez.net +50239 + Crypteron + Sid Shetye + info&crypteron.com +50240 + Raycap + Ioannis Apostolou + iapostolou&raycap.com +50241 + Tom Storey + Tom Storey + tom+pen&snnap.net +50242 + SERVICE TOUS TRANSPORTS + JOEL BONETTO + bonettoj&wanadoo.fr +50243 + Panoptix CC + Stephan Buys + info&panoptix.co.za +50244 + International Aeronavigation Systems Concern, JSC (IANS) + Boris Resnick + resnick&ians.aero +50245 + van Ryck Communications LLC. + Mark de Groot + mark.degroot&vanryck.net +50246 + Oncology & Hematology Associates of West Broward PA + Jacwen Jones + suzie.jones&ohawb.com +50247 + Techbase IT Solutions + Mark Breitkreutz + domains&techbase.ca +50248 + mr-s.run + Marc Vogler + vogler.marc&t-online.de +50249 + five-nines + Yuji Yasuda + drhouse201009&yahoo.co.jp +50250 + ErvoCom Engineering AG + Fabian Vogt + Fabian.vogt&ervocom.ch +50251 + TMM Software + Serge MASSOT + s.massot&tmm-groupe.com +50252 + Web Benefits Design Corporation + Christian Clay Columba Campbell + christian.campbell&wbdcorp.com +50253 + Electronic Transaction Consultants Corporation + Alessandro Stagni + astagni&etcc.com +50254 + JPoD + Robert Wuttke + pen_iana_org&jpod.cc +50255 + PeerPair + Bruce Fitzsimons + bruce&fitzsimons.org +50256 + Infinite Solutions LLC + Brian Gordon + bgordon&infinitesolutionsllc.com +50257 + NSD Corporation + UNE RYOSUKE + une&nsdcorp.co.jp +50258 + Northwestern Local Schools + John Hermes + hermesj&nwlschools.org +50259 + Citra Health Solutions + Eric DeVaudreuil + eric&citrahealth.com +50260 + Byteflux.io + Robert Westman + robert&byteflux.io +50261 + Verafin + Alain O'Dea + alain.odea&verafin.com +50262 + Razzleberries AB + Thomas Guimbretiere + tguimbreti&gmail.com +50263 + Tuveri + Nicola Tuveri + nic.tuv&gmail.com +50264 + PINTU PASWAN + PINTU + pk2479073&gmail.com +50265 + SAS TRANSPORT NOEL + GILLES NOEL + gilles.noel4&wanadoo.fr +50266 + 5xS Security Consulting + Ernst Giessmann + giessmann&informatik.hu-berlin.de +50267 + Credit Agricole Technologies et Services + GARIBALDI Francois + POLITIQUES.SIGNATURE.ELECTRONIQUE&ca-ts.fr +50268 + BOUVERY COMBUSTIBLES + MANUEL BOUVERY + combustibles.bouvery&wanadoo.fr +50269 + AKIO SOFTWARE + Thibaut Gaspaillard + tgaspaillard&akio.com +50270 + Software Logistics / ABN AMRO + Fred Jonkhart + fred.jonkhart&nl.abnamro.com +50271 + Salam International + IT Manager + it.manager&salaminternational.com +50272 + QInnovate + CIO Office + cio&qinnovate.biz +50273 + Diginosis, Inc. + Ken Smith + ksmith&diginosis.com +50274 + TRANSPORTS KIEFFER SARL + CLARISSE KIEFFER + transports.bois.kieffer&orange.fr +50275 + Granicus Systems Limited + Robert Henderson + postmaster&granicus-systems.co.uk +50276 + Strikr + Saifi Khan + saifi&strikr.in +50277 + all-your-assets + Thomas Tannhäuser + thomas.tannhaeuser&all-your-assets.de +50278 + ColorTokens, Inc. + Bharat Sastri + bharat.sastri&colortokens.com +50279 + Magrathea Technologies + Lamar Hansford + lamar.hansford&magrathea-tech.net +50280 + Naveria, LLC + Jacob Greenfield + xales&naveria.com +50281 + whateverany.com + Jeff Brown + news&whateverany.com +50282 + S.A.S. TRANSPORTS CLAUDEL + Anthony Claudel + transports.claudel88&gmail.com +50283 + HovNet-SERVIS s.r.o. + Radomir Luza + info&hovnetservis.cz +50284 + Autonomous Non-Commercial Organization of higher education “INNOPOLIS UNIVERSITY” + Igor Loginov + it&innopolis.ru +50285 + Logikbar, LLC + Randy Hall + randy&logikbar.com +50286 + GRUPO LA NOGALERA SA DE CV + Jesus Chavez + sistemas&lanogalera.biz +50287 + Webellian + Jacek Hewko + j.hewko&webellian.com +50288 + fibrisTerre Systems GmbH + Andrea Carradori + iana-contact&fibristerre.de +50289 + aapala.fi + Esa Ääpälä + aapala&gmail.com +50290 + Supplyframe, Inc. + Leon Torres + ltorres&supplyframe.com +50291 + Infomicro Comunicaciones + Carlos Heredia + carlos.heredia&infomicro.es +50292 + Jenoptik AG + Martin Räthe + martin.raethe&jenoptik.com +50293 + Selfnet e.V. + Selfnet Administrators Team + admin&selfnet.de +50294 + Canreer + FAN QINGJUN + service&anrobot.org +50295 + Melbourne Grammar School + IT Technical Services + certificates&mgs.vic.edu.au +50296 + jSCM Solucoes Empresariais + Diogo Soares + diogo.soares&jscm.eti.br +50297 + WERNER IT-SYSTEME + Andreas Werner + mail&werner-it.net +50298 + Paper 4 Print + Graham Kinchin + graham.kinchin&paper4print.com +50299 + effexx Telekommunikation GmbH + Daniel Hetzel + IT&effexx.com +50300 + SIGNAL IDUNA Gruppe + Fabian Schlieckau + fabian.schlieckau&signal-iduna.de +50301 + Personal + Rahul Chauhan + crahul10101991&gmail.com +50302 + ACIN iCloud Solutions Lda + Sandra Fernandez + sandra.fernandez&acin.pt +50303 + Meadow + Boris van Es + boris.van.es&vionfood.com +50304 + OnePacs LLC + Thomas Bryce + info&onepacs.com +50305 + inkClub Development AB + Erik Ellingsen + erik.ellingsen&inkclub.com +50306 + roselarsen.dk + Johnny Rose Larsen + johnny&roselarsen.dk +50307 + Laureate Education Services Australia + IT Manager + pen&laureate.net.au +50308 + YazamTech + Elina Abuliak + elina&yazamtech.com +50309 + ITALIAN INSTITUTE FOR GENOMIC MEDICINE (IIGM) + Sabrina Bertinetti + sabrina.bertinetti&iigm.it +50310 + BridgeHead Software + Mark Hodgson + mark.hodgson&bridgeheadsoftware.com +50311 + INSITE S.A.S. + DAVID MORA + dmorar&insite.com.co +50312 + Tennessee Cancer Specialists + Jeremy Hall + itdepartment&tncancer.com +50313 + eVestment + Joshua McCorkle + corporateit&evestment.com +50314 + Bracket Computing, Inc. + IANA contact + iana-contact&brkt.com +50315 + Harborside Condominium Owners Association + Mark Kendrick + sysadmin&hscondo.net +50316 + Modbus Organization, Inc. + Rudy Belliardi + rudy.belliardi&schneider-electric.com +50317 + RNK SERVICOS + FRANCISCO DE PAULA LIMA FILHO + francisco.lima&rnkgroup.com.br +50318 + TrustFactory(Pty)Ltd + Jayesh Nana + jayesh&isolvtech.com +50319 + POCABAR GmbH + Wolfgang Mair + w.mair&pocabar.de +50320 + Engineers Gate + Robert Cherry + ssl-admin&engineersgatelp.com +50321 + Trimark Limited + Joakim Gezelius + joakim&trimarkgaming.com +50322 + Mondragon Goi Eskola Politeknikoa Jº Mª Arizmendiarrieta S. Coop. + Arantxa Manterola + sistemak.mgep&mondragon.edu +50323 + Santander Consumer USA Holdings Inc + Daniel Cervera + dacervera&santanderconsumerusa.com +50324 + Quatius Limied + Simon Wu + simon.wu&quatius.com +50325 + The Candystripe Company + Dominic Martin + dm&thecandystripecompany.com +50326 + Xton Technologies + Mark Klinchin + mklinchin&xtontech.com +50327 + Funding Circle Limited + Marcus Berglof + devops&fundingcircle.com +50328 + asap + Stephan Kalvelage + skalvelage&freenet.de +50329 + Druid Software + Olivier Deme + odeme&druidsoftware.com +50330 + Microart ltd. + Kirichenko Timur + saleµart.ru +50331 + Rennschmiede Pforzheim e.V. + Nils Siegle + admin&rennschmiede-pforzheim.de +50332 + Motorola Solutions Australia Pty. Limited + Chris Blaker + chris.blaker&motorolasolutions.com +50333 + SamKnows Ltd + Russell Knighton + russell&samknows.com +50334 + E.on Värme Sverige AB + Annelie Mossberg + annelie.mossberg&eon.se +50335 + Security Bank of KC + William Girnius + itdept&securitybankkc.com +50336 + MFR Stenay + Vincent DEFERT + mfr.stenay&mfr.asso.fr +50337 + New Hampshire Oncology-Hematology, PA + Eliza Browne + e.browne&nhoh.com +50338 + North Pole Engineering + Jim Meyer + jimm&npe-inc.com +50339 + Satellite Signature + Kevin O'Neil + kevin&satsignature.com +50340 + Ares-Tech + Capparucci Francesco + francesco.capparucci&ares-tech.it +50341 + Habitech + Martin Krohn + ianaoid&krohn.com.ar +50342 + Telexir + Vladimir Romanov + vromanov&telexir.com +50343 + SpaceCircuits + Adtihya Tanjavur + adithya&spacecircuits.com +50344 + Häradskog i Örebro AB + Ida Prejer + ida.prejer&haradskog.se +50345 + Panopta + Jason Abate + jason&panopta.com +50346 + Centerpoint Networks + Don McGrath + notices¢erpointnetworks.net +50347 + Muquans + Mathieu Corradini + mathieu.corradini&muquans.com +50348 + VCP Land Schleswig-Holstein + Jan-Hendrik Garber + internet&vcp.sh +50349 + Cubic Global Defense + Configuration Management + scm_cda&cubic.com +50350 + Epikur Software & IT-Service GmbH & Co. KG + Bernhard Hertel + info&epikur.de +50351 + Love That Collar + Jeff Frederick + jeff&jfrederick.com +50352 + jfrederick.com + Jeff Frederick + jeff&jfrederick.com +50353 + Secure-U + Jeff Frederick + info&secure-u.org +50354 + Sonalysts, Inc. + IT Department + postmaster&sonalysts.com +50355 + Bit-hewn Technologies Ltd + Oliver Bunting + oliver&bithewn.co.uk +50356 + Fenrir Inc. + Nobuhiro Ito + reg.iana&fenrir.co.jp +50357 + AmeriTech Energy Corporation + David Wise + david.wise&AmeriTechEnergy.com +50358 + Sri Sai Communications Pvt.Ltd., + Dr.Pakala Madhusudan Rao + info&srisaipl.com +50359 + Synthesa Chemie Gesellschaft m.b.H. + Synthesa Informatik - Alfred Palkoska + edv&synthesa.at +50360 + Lumacron Technology Ltd + Kevin Robertson + krobertson&lumacron.com +50361 + Transport Limon + Laurent Limon + regis.dirian&siatbraun.fr +50362 + iNETSTABLE.cz + Jaroslav Urbanek + urbanek&inetstable.cz +50363 + 915ers + Him Yeung + Felix&915ers.com +50364 + TRANSPORTS GIGOUX + SABINE GIGOUX + transports.gigoux&wanadoo.fr +50365 + TRANSPORTS GIGOUX + SABINE GIGOUX + transports.gigoux&wanadoo.fr +50366 + Paratus AMC Limited + Luke Flack + luke.flack¶tusamc.co.uk +50367 + EJIE + Juan Ignacio uralde Moro + ji-uralde&ejie.eus +50368 + Open Cosmos LTD + Thomas Parks + thomas&open-cosmos.com +50369 + IQHQ + Steve Smith + steven&iqhq.co.uk +50370 + RadioKit Ltd + Marcin Lewandowski + admin&radiokit.org +50371 + Deliverik Software Co. Ltd. + Thomas Xu + xuzheng&deliverik.com +50372 + Data Vision S/A + Luis Ernesto + engsoft&datatraffic.com.br +50373 + Serpent77 Networks + Sean Sterling + housebillz&serpent77.com +50374 + Vlasnet Enterprises Beheer BV + Jan Vlastuin + security&vlasnet.nl +50375 + Landesmedienzentrum Baden-Wuerttemberg + Soo-Dong Kim + kim&lmz-bw.de +50376 + HUENGSBERG GmbH + Werner Huengsberg + wah&DAXware.onMicrosoft.de +50377 + Ministerio de Economía de la República de El Salvador + Oscar Humberto Cruz Guardado + firma.electronica&economia.gob.sv +50378 + Mendel Warshawsky MD + Mendel Warshawsky + drwarshawsky&optimum.net +50379 + Sebastian Rauch Glasfasertechnik + Sebastian Rauch + sebastian.rauch&rauch-systems.com +50380 + iDAvatars + Neil Lamoureux + io&idavatars.com +50381 + Trust Designer + Jérôme Dusautois + jdusautois&trustdesigner.com +50382 + Apeiron Data Systems, Inc. + Vijay Shrivastav + vijay&apeirondata.com +50383 + Radiofit + Sergey Kustov + info&radio-fit.com +50384 + Zotter Schokoladen Manufaktur GmbH + Michael Zotter + michael&zotter.at +50385 + Maestro Digital Mine + Ashton Gobbo + ashton.gobbo&maestrodigitalmine.com +50386 + SigScale Global Inc. + Vance Shipley + vances&sigscale.org +50387 + lenart.io + Janos Lenart + janos&lenart.io +50388 + TRANSPORTS GEYER & CIE + PIERRE GEYER + pierre.geyer&wanadoo.fr +50389 + VIZENTEC S/A + Luis Ernesto + engsoft&datatraffic.com.br +50390 + Redjack + Greg Virgin + admin&redjack.com +50391 + Dallas Makerspace + Infrastructure Committee + Infrastructure +50392 + Eles + Dmitry Dyogtev + eles&eles.ru +50393 + HongKong JoyTelecom Co., Limited + Hongwu LU + hongwu.lu&joytelecom.com +50394 + Conrad Electronic SE + Peteratzinger Thomas + dvhotline&conrad.de +50395 + VARTA Consumer Batteries GmbH & Co. KGaA + Stefan Kemmler + stefan.kemmler&eu.spectrumbrands.com +50396 + FSUE "State ATM Corporation", branch "Aerocontrol" + Mikhail Mikhaylov + it&aero.gkovd.ru +50397 + Shanghai YuHuan Information System.,Ltd + Hongwu LU + hongwu.lu&joytelecom.com +50398 + Asseance Software Inc. + Ron Christie + ron&asseance.com +50399 + Association of German Transport Companies (VDV) + Berthold Radermacher + radermacher&vdv.de +50400 + Frogen International Limited + Tuddy Ma + administrator&frogen.com +50401 + VBM / Veri Bilgi Merkezi Bilişim Hiz. Ltd. + Ahmet Aylin Öztürk + ahmet.ozturk&vbm.com.tr +50402 + Radio Belle Vue + MATHIEU Michael + michael&radiobellevue.fr +50403 + BACKFACTORY GmbH + Carl Kiendl + admin&backfactory.de +50404 + Tata Instatitute of Fundamental Research Hyderabad + Suman Saurav + suman&tifrh.res.in +50405 + Tasmanian Alkaloids Pty Ltd + Richard Loone + dl-it&tasalk.com.au +50406 + Vietnam Post and Telecommunication industry technology joint stock company (VNPT Technology) + Ly Quoc Chinh + chinhlq&vnpt-technology.vn +50407 + BERNHARD CHRISTIAN + CHRISTIAN BERNHARD + transportcgb.christian&gmail.com +50408 + DUTRIEUX ROGER + DUTRIEUX ROGER + vero.dutrieux&gmail.com +50409 + Australasian Audio Engineering (NZ) Ltd. + Rob Inskeep + snmp&aae.co.nz +50410 + Goliath Technologies + Raja Jadeja + corpit&goliathtechnologies.com +50411 + Soprani.ca + Denver Gingerich + denver&soprani.ca +50412 + Superna + Andrew MacKay + andrew.mackay&superna.net +50413 + UB330.net d.o.o. + Uros Gaber + uros&ub330.net +50414 + Weka.IO + Zohar Zilberman + zoharz&weka.io +50415 + Challow + Kieron Mohindra + kmohindra&challow.net +50416 + Gabriel Guldner + Gabriel Guldner + gabriel&guldner.eu +50417 + Maricopa County + Enterprise Data Center + edcsteam&mail.maricopa.gov +50418 + MetsTech Pty Ltd + Chris Martin + chris&martin.cc +50419 + Bubtaina Group Limited + Mohamed Ali Bubtaina + mohamed&bubtaina.com +50420 + earthledger + East Xiao + xiaodong&earthledger.com +50421 + SEIKO ELECTRIC CO.,LTD + Hideo Harada + hideo-harada&seiko-denki.co.jp +50422 + philisense + Xinjun Cao + ceshi&philisense.com +50423 + amazingrex + Jason Broune + luhongwei&dragonest.com +50424 + Netberg + Alice Yew + alice.yew&netbergtw.com +50425 + MKSS Thailand Co., Ltd. + Bunphot Kliaphuangphit + bunphot&mkssthailand.com +50426 + National Library of Greece + Dimitris Zygoukis + zygoukis&nlg.gr +50427 + KIEHL FRERE + DENIS BOELHER + regis.dirian&siatbraun.fr +50428 + TRANSPORTS HOLZ + VERONIQUE HOLTZ + regis.dirian&siatbraun.fr +50429 + TWT + THIERRY WAYTTEYNE + regis.dirian&siatbraun.fr +50430 + RIEDLINGER SARL + VERONIQUE RIEDLINGER + regis.dirian&siatbraun.fr +50431 + DERSEN Sp. z o.o. + Marcin Woźniak + m.wozniak&dersen.com.pl +50432 + GE Power + zhi Shi + Zhi.shi&ge.com +50433 + IBE - Institute for Medical Information Processing, Biometry and Epidemiology + Nikolaus von Bomhard + bomhard&ibe.med.uni-muenchen.de +50434 + Tieto Great Britain + Stephen Franks + stephen.franks&tieto.com +50435 + jingjiamicro + Wanhui Zeng + jingjiasoft&jingjiamicro.com +50436 + NM2 S.r.l. + Giuseppe Aceto + aceto&nm-2.com +50437 + ZhongXin Cybersecurity Co.,Ltd. + bing jiang + jiangbing&cnzxsoft.com +50438 + TRANSPORTS CONRAD + ERIC CONRAD + conrad-eric&wanadoo.fr +50439 + Discovery Digital + Igor Brezac + dp-dev-sysarch&discovery.com +50440 + RPWB + Dan Jost + postmaster&rpwb.com +50441 + Inventure + Andrew Harness + andrew&jtdenterprisesinc.com +50442 + Saransh + Saransh Srivastava + saransh.311&gmail.com +50443 + BEL Ministerie van Landsverdediging + Moonen Jef + MRCI-CIS-C-DMZ-DL&mil.be +50444 + TRANSPORTS VINCENT + EMILIE WENTZEL + tvsimon&wanadoo.fr +50445 + Harmony Lab + Harry + lead.harmony.lab&gmail.com +50446 + RVA Lighting and Masts + Network Administrator + admin&rvalm.com +50447 + Schilliger Bois SAS + Vincent Misslin + vincent.misslin&schilliger.fr +50448 + ROPP PIERRE ET CIE TRANSPORTS + GILBERT BOSSERT + gil572&wanadoo.fr +50449 + Asseco-SEE DOOEL Macedonia + Zarko Kostadinovski + zarko.kostadinovski&asseco-see.mk +50450 + CarePool Hannover GmbH + Andreas Werner + a.werner&carepool.de +50451 + LMMR Technologies Ltd + Lee McLoughlin + lee&lmmrtech.com +50452 + SOVEN + Grégori ROGER + gregori.roger&engie.com +50453 + GSD Healthcare FZ LLC + Francesco Magro + it&gsdhealthcare.ae +50454 + 飞利信 (PHILISENSE) + 曹昕军 (Xinjun Cao) + ceshi&philisense.com +50455 + aptico GmbH + Tim Skopnik + oid-admin&aptico.de +50456 + Myers Industries Inc + Jackie Wanner + ialerts&myersind.com +50457 + SMJG e.V. + Alex Böhm [jwacalex] + jwacalex&gmx.net +50458 + VIAA + Herwig Bogaert + herwig.bogaert&viaa.be +50459 + Leonardo DRS, Inc. + IT Architects + itarchitects&drs.com +50460 + Codeo Soluções em Tecnologia + Rafael Gomes + rafael.gomes&e-storageonline.com.br +50461 + Eastcompeace + Lu Wang + wanglu&eastcompeace.com +50462 + bredent GmbH & Co.KG + Tobias Moser + Tobias.Moser&bredent.com +50463 + TRANSPORTS PERRIN + ANGELIQUE PERRIN + regis.dirian&siatbraun.fr +50464 + VEM Group + Rene Platzk + it_lizenz&vem-group.com +50465 + intelliCard Solutions AG + Thomas Weber + thomas.weber&intellicard.ch +50466 + ProBoards, Inc + Martyn Dale + martyn&proboards.com +50467 + iD Mobile + John Vesey + MVNOOperations&iDMobile.co.uk +50468 + MEDIAEDGE Corporation + Takeshi Kamai + kamait&mediaedge.co.jp +50469 + Bytepimps + Michael Brown + ronin.crip&gmail.com +50470 + Piston Intelligence Co.,Ltd + Chris + develop&pistonint.com +50471 + CISPA - Center for IT Security, Privacy and Accountability + Christoph Hirtz + admin&cispa.saarland +50472 + Rugged Logic, Inc. + Philip Wolfe + pwolfe&rugged-logic.com +50473 + HealthCare Relations Co., Ltd. + Keisuke Karaushi + hcr-oid&hcr.co.jp +50474 + Canadian Tire Corporation, Limited + DNSadmin CTC + dn.administration&cantire.com +50475 + Wozavez Consulting Ltd. + Andras Karasz + andras.karasz&wozavez.com +50476 + Roga & Kopyta + Ostap Bender + nicembox&gmail.com +50477 + Deli Group + Jiang Dong + jiangdong&nbdeli.com +50478 + St. John's School + Richard Tang + tech.support&stjohns.bc.ca +50479 + Grepit AB + Sebastian Larsson + sebastian&grepit.se +50480 + Broker Consulting, a.s. + Vojta Berescak + vojtech.berescak&bcas.cz +50481 + Katharina Kasper Gruppe + Achim Pfeiffer + webmaster&dernbacher-gruppe.de +50482 + A.N. Belozersky Institute Of Physico-Chemical Biology + Alexander Romanov + alerom&belozersky.msu.ru +50483 + SK Versicherung AG + Henning Grote + grote&sk-versicherung.at +50484 + Geely universty + 习天一 (Xi Tianyi) + 993150929&qq.com +50485 + Vodafone GmbH + Holger Jacobs + holger.jacobs&vodafone.com +50486 + Vlaamse Maatschappij voor Sociaal Wonen + Peter Rousseau + peter.rousseau&vmsw.be +50487 + Municipio de Caxias do Sul + Fernando Brombatti + infra&caxias.rs.gov.br +50488 + AW2S + Mr David ARNAUD + contact&aw2s.com +50489 + Beveridge Williams & Co Pty Ltd + Ian Macnaughtan + macnaughtani&bevwill.com.au +50490 + Yunhe Enmo(Beijing)Technology Co.,LTD + Kai Deng + kai.deng&enmotech.com +50491 + Covond Digital Communications Tec.,Ltd + Wei Sun + sunxikui&covond.com +50492 + Reygers Systemhaus GmbH + Michael Reygers + info&reygers.de +50493 + The Wind in the Sail, LLc + william sheldon + william.sheldon&windinsail.com +50494 + Danske Spil A/S + Jesper Tjørnlund + jest&danskespil.dk +50495 + Ceph + Sage Weil + sage&newdream.net +50496 + Sidean SRL + Francisco Villegas + francisco.villegas&sidean.com +50497 + 4myhealth + Martin Exner + me&4myhealth.at +50498 + Inalasys Technologies + Christo Steyn + christo.steyn&inalasys.co.za +50499 + Delta Engineers, Architects, & Land Surveyors, DPC + Colleen Mulrooney + cmulrooney&delta-eas.com +50500 + NCSCCS Intelligence + James Burton + jb&0.me.uk +50501 + Pos Digicert Sdn Bhd + Muhammad Faris Bin Zainal Abidin + muhammadfaris&digicert.com.my +50502 + HYPR Biometric Security + Enterprise Infrastructure + entint&hypr.com +50503 + Federal Way Schools + Noah Keables + nkeables&fwps.org +50504 + XtraTrust, Inc. + Sheky Cheung + sheky&xtratrust.com +50505 + Mystia.org + OID Administrator + oidmaster&mystia.org +50506 + CGI + Magnus Forsell + magnus.forsell&cgi.com +50507 + Royal National Lifeboat Institution + Nick Royley + nick_royley&rnli.org.uk +50508 + GrowingSpace + Justin Kaufman + jkaufman&growing.space +50509 + Fidelity Bank + Andrew Dodd + andrew.dodd&lionbank.com +50510 + Procubed Inc + Rakesh Kumar + rakesh.kumar&procubedinc.com +50511 + Stordata Sverige AB + Niklas Nord + niklas.nord&stordata.se +50512 + Reisenett AS + Carl Petter Sky + sky&reisenett.no +50513 + coseos.com + Thorsten Lorenz + thorsten.lorenz&coseos.com +50514 + Royal Automobile Club of Tasmania + Justin Delpero + j.delpero&ract.com.au +50515 + Smart Study Co., Ltd. + Jooncheol Park + joongom&smartstudy.co.kr +50516 + Cubresa + Bob Schellenberg + bschellenberg&cubresa.com +50517 + Reinform-Int + Arcady Gryzunov + agryzunov&reinform-int.ru +50518 + YOUNG Develpment Co. + Ian Lee + admin&youngdevelop.com +50519 + NetCologne GmbH + Ingo Jentzsch + ijentzsch&netcologne.de +50520 + Bundesdruckerei GmbH + Bjoern Schuette + bjoern.schuette&bdr.de +50521 + Zentraler IT-Dienstleister der Justiz des Landes Brandenburg + Matthias Ernst + IANA&Zenit.justiz.brandenburg.de +50522 + Kabel Premium Pulp & Paper GmbH + Volker Schmidt + volker.schmidt&kabelpaper.de +50523 + Zynga + Sam Dockery + it-ops&zynga.com +50524 + Provision Data Systems Inc + Ian McLaughlin + ian&provisiondata.com +50525 + DRTECH + Jongwoo Kim + jwkim&drtech.co.kr +50526 + Creative Thinking Innovation Technology + Putta Khunchalee + putta.k&ctit.co.th +50527 + Diaverum Sweden AB + Tim Nilsson + Tim.Nilsson&Diaverum.com +50528 + Toriv AB + Oscar Virot + oscar.virot&toriv.com +50529 + Lonza AG + Williner Willi + willi.williner&lonza.com +50530 + R3 + Mike Hearn + mike&r3.com +50531 + Freedom Holdings, LLC dba Freedom Graphic Systems + PEN Management + penmanagement&fgs.com +50532 + BuFaTa Elektrotechnik + Dominik Rimpf + dominik.rimpf&bufata-et.de +50533 + Samaritas + Bill Cauley + bcaul&samaritas.org +50534 + Assurance Technology Corporation + Peter Donovan + donovan&assurtech.com +50535 + Svpribor Ltd. + Vitaly Stolyarov + svitaliy77&mail.ru +50536 + iXsystems + Kris Moore + kris&ixsystems.com +50537 + TeleCommunications Systems, Inc Enterprise Technologies Division + Gene Breshears + gene.breshears&comtechtel.com +50538 + Data Systems International, Inc. + Omar A. Vega + omar.vega&dsiglobal.com +50539 + FNT + Oliver Lindner + Oliver.Lindner&fntsoftware.com +50540 + FieldComm Group, Inc. + Stephen Mitschke + smitschke&fieldcommgroup.org +50541 + San Jose FIRST Robotics Club + Matthew Cooper + popcornpenguins17&gmail.com +50542 + AC&E Pty Ltd + Hamish McKinlay + it&acande.com +50543 + Ambedded Technology Co., LTD. + Aaron Joue + aaron&ambedded.com.tw +50544 + Intrinsic ID B.V. + Roel Maes + roel.maes&intrinsic-id.com +50545 + Maxell, Ltd. + Hiroyuki Urata + hiroyuki-urata&maxell.co.jp +50546 + Burton Neil & Associates, P.C. + Hector Rivera + hector.rivera&burt-law.com +50547 + DXWash LLC + Daryl Washington + dxw&dxwash.com +50548 + metraTec GmbH + Klaas Dannen + info&metratec.com +50549 + Inapa + João Miguel Santos + joao.santos&inapa.com +50550 + Beijing Tengling Technology Co.,Ltd. + Zisong Wang + wangzisong&bjtengling.com +50551 + Elara Software GmbH + Stefan Scheffler + ssc&elara-it.com +50552 + SARA ELECTRONIC INSTRUMENTS SRL + MAURO MARIOTTI + info&sara.pg.it +50553 + PRIMARK Limited + Zlatko Krastev + zkrastev&primark.ie +50554 + DigiCAP Co.,Ltd. + SungHeun Oh + shoh&digicaps.com +50555 + Laureate Education Services Australia + Jiri Kosar + pen&laureate.net.au +50556 + ukgov.cloud + peter wild + postmaster&salford.gov.uk +50557 + ictservices.co.uk + peter wild + postmaster&salford.gov.uk +50558 + Northrop Grumman Litef GmbH + Harald Fischer + fischer.harald&ng-litef.de +50559 + Alex Lambert + Alex Lambert + alex&alexlambert.com +50560 + infra fuerth gmbh + Dierk Ebert + sit&infra-fuerth.de +50561 + Agroprombank CJSC + Andrey Zvyagin + andrey&agroprombank.com +50562 + Powersmiths International Corp. + Piotr Grudzinski + piotr&powersmiths.com +50563 + Council Rock Enterprises LLC + Jairo Hernandez + jhernandez&council-rock.com +50564 + MaxiMedia + Maxi Hartig + pen&mmwebsites.de +50565 + CohuHD Costar LLC + Neil A Alan, Software Engineering Manager + nalan&cohuhd.com +50566 + Pitt County Schools + Jeffrey Smith + jsmith&pitt.k12.nc.us +50567 + RF Optic LTD + David Gabbay + davidG&RFOptic.com +50568 + The University of Texas Rio Grande Valley + Joseph Banda + joseph.banda&utrgv.edu +50569 + Cookiejar Technologies Pvt. Ltd. + Manoj Alandkar Ajit + manoja&cookiejar.co.in +50570 + WoTrus CA Limited + Richard Wang + admin&wotrus.com +50571 + Erste Group IT International + Martin Rublik + martin.rublik&erstegroup.com +50572 + CHU de Québec + Pierre-Emmanuel Turcotte + pierre-emmanuel.turcotte&mail.chudequebec.ca +50573 + Ram Tool Construction Supply Co. + David Lowery + david.lowery&ram-tool.com +50574 + R3 Continuum, LLC + Zachary Burkum + zachary.burkum&r3continuum.com +50575 + Synchrony Financial + Troy Rindy + Troy.Rindy&synchronyfinancial.com +50576 + TrustComm Inc + Jon Dubose + jon.dubose&trustcomm.com +50577 + West Cancer Center + Audrey Cordray + acordray&westclinic.com +50578 + Solid State Network Solutions + Matthew Petricone + matt&solidstate.solutions +50579 + Seckure, LLC + Joseph Arnold + joseph.arnold&seckure.com +50580 + International Integrated Systems, Inc. + Chris Kung + chris.kung&iisigroup.com +50581 + MHOLGUIN + Marino Holguin + marinoholguin&gmail.com +50582 + U-NMS + Kookmin University, SCRC, Hamdamboy Urunov, Soo-Hyun Park + hamdamboy.urunov&gmail.com +50583 + Omnipresent Friki Enviroment + Jorfran Alexander Belloso Medina + zonafrikica&gmail.com +50584 + Indiana University, Radiology and Imaging sciences + Sundar Paramasivam + mparamas&iupui.edu +50585 + Core-Mark International + Ken Merrigan + it&core-mark.com +50586 + Civiq Smartscapes LLC + Ilan Rozenblat + ilan.rozenblat&civiq.com +50587 + Cloudwise SRL + Bogdan Bocse + bogdan&cloudwise.ro +50588 + Datapath Limited + Richard Smith + richard.smith&datapath.co.uk +50589 + Xorble + David Hoyle + david_j_hoyle&hotmail.com +50590 + RunSmart + Matt Hamende + mhamende&runsmart.io +50591 + SCIERIE ET CAISSERIE DE STEINBOURG + SCHNEPF ALEXANDRE + schnepf.alexandre-scs&wanadoo.fr +50592 + OOO "OTZVUK"/ООО "ОТЗВУК" + Aleksey Volchkov + volchkov&otzvuk.spb.ru +50593 + FSI (FM Solutions) Ltd + IT Support + it&fsi.co.uk +50594 + Peregrine Labs LLC + Jesse Miller + jmiller&peregrine-labs.com +50595 + AtFCyber Inc + Dennis Glatting + oid&pki2.com +50596 + VisageCloud + Bogdan Bocse + bogdan&visagecloud.com +50597 + GnomeLabs + Frank Robertson + techcontact&gnomelabs.com +50598 + stulz technology integration limited + SIMON GARDNER + SIMON.GARDNER&STULZ-TI.CO.UK +50599 + Bank Millennium SA + Grzegorz Popowski + grzegorz.popowski&bankmillennium.pl +50600 + Radium + Anton Kasimov + root&radium.group +50601 + Richard M. Hicks Consulting, Inc. + Richard Hicks + rich&richardhicks.com +50602 + Impact Networking + Douglas Gamache + dgamache&impactnetworking.com +50603 + Discovery Bank + Marcus Portmann + marcusp&discovery.co.za +50604 + ss7.io + Stanislav Poroshin + sp&ss7.io +50605 + ChinaNetCenter-Xiamen R&D center + Qiu Nianting + qiunt&wangsu.com +50606 + TSEP + Johannes Klein + administration&tsep.com +50607 + ELVEES NeoTek, JSC + Nikita Bobkov + iana&elveesneotek.com +50608 + Taylor Fresh Foods Inc. + Kevin Forni + support&taylorfarms.com +50609 + walker + 邓齐 (Deng Qi) + 13142285368&163.com +50610 + Uppsala Akademiförvaltning + Anders Söderström + anders.soderstrom&uaf.uu.se +50611 + Laban + Laban Wang + laban.l.wang&gmail.com +50612 + South Hadley Public Schools + Kyle Sodano + ksodano&shschools.com +50613 + RABBAH SOFT SARL + RABBAH Mahmoud Almostafa + contact&rabbahsoft.ma +50614 + St Augustine's College-Sydney + Svetlana Mazur + smazur&saintaug.nsw.edu.au +50615 + Heldküchen Möbelfabrik GmbH + Michael Reitmeier + michael.reitmeier&held-moebel.de +50616 + Huorong + Jianye Li + lijianye&huorong.cn +50617 + JIT Solutions Sp. z o.o. + Lukasz Ziolkwsoki + lukasz.ziolkowski&jitsolutions.pl +50618 + Enghouse AG + Patrick Kusseneers + patrick.kusseneers&enghouse.com +50619 + Bedford Borough Council + Craig Hulland + craig.hulland&bedford.gov.uk +50620 + UniTesS + Maxim Barodzka + unitess_OID_admin&rambler.ru +50621 + Golage Inc + Furkan CONTAR + furkancontar&golage.com +50622 + EQS Group AG + Osman Durrani + osman.durrani&eqs.com +50623 + South Jersey Industries + NOJAN EMAD + nemad&sjindustries.com +50624 + Baxter Healthcare Products + Stephan Romano + stephan_romano&baxter.com +50625 + Agari Data, Inc. + Neil Chazin + nchazin&agari.com +50626 + Concentric Media Sdn Bhd + Aaron Tan + aaron&nuavox.com +50627 + Rosenberger Hochfrequenztechnik GmbH & Co KG + Christian Janssen + christian.janssen&rosenberger.de +50628 + Imopetro, SA. + Boris Wiethoff + boris.wiethoff&ebs.co.mz +50629 + e-business systems, SARL + Boris Wiethoff + boris.wiethoff&ebs.co.mz +50630 + Compal Electronics Inc. + Benson Wang + Benson_Wang&compal.com +50631 + Latvijas Republikas Ārlietu ministrija + Kārlis Akmens + Karlis.Akmens&mfa.gov.lv +50632 + Anyware LTD + Giogi Gogishvili + ggogishvili&anyware.ge +50633 + LPixel Inc. + Antoine Choppin + choppin&lpixel.net +50634 + LLC NPO UralNash + Tolkachev Aleksander + Tolkachev.cnt73&yandex.ru +50635 + Gunnar Beutner + Gunnar Beutner + gunnar&beutner.name +50636 + FELA Management AG + Severin Birrer + severin.birrer&fela.ch +50637 + Squadra Group + Sergey Alekseev + sa&squadra-group.com +50638 + cyLEDGE Media GmbH + Günter Dressel + admin&cyledge.com +50639 + Enics + Risto Mäkelä + risto.makela&enics.com +50640 + R2 Dermatology + Dylan McReynolds + dmcreynolds&r2derm.com +50641 + Orthopädie-Schuhtechnik Frisch GmbH & Co. KG + Christoph Schmatzler + it&frisch-luebeck.de +50642 + Andreas Schufft SW Entwicklung & EDV Beratung + Andreas Schufft + iana&rmslash.com +50643 + "ИП" Kostakov Dmitry + Dmitry Kostakov + dmitrybarynov&gmail.com +50644 + Elumbus GmbH + Joern Eble + technik&elumbus-reisen.de +50645 + Salvage Management & Disposals (Pty) Ltd. + Jean-Francois du Toit + jean-francois&smd.co.za +50646 + Signaturit Solutions, S.L. + Edwin Mata Navarro + edwin.mata&signaturit.com +50647 + Beispielsfall + Mark Broecker + pen.iana.org&beispielsfall.de +50648 + ENVOY Group, LLC + Aaron Picht + it-iana&weareenvoy.com +50649 + Trailing Bits + Erik Johansson + erik&trailingbits.com +50650 + Xerox Belgium - Luxembourg + Wesley Biart + wesley.biart&xerox.com +50651 + 5F Soluções em TI + Eduardo Hirochi Inoue + hiro&5f.com.br +50652 + Communal Hydro Energy, Inc. + KIKUCHI Yutaka + yu&communalhydro.com +50653 + Huaxin SM Optics (HSMO) + Fan Chunquan + chunquan.fan&hsmoptics.com +50654 + Pioneer Service Corporation + Luxinglin + luxinglin&pioneerservice.cn +50655 + SUNTOR ELECTRONICS CO.,LIMITED + zhouqi + 67761121&qq.com +50656 + Somerset Partnership NHS Foundation Trust + Darren Thomas + darren.thomas&sompar.nhs.uk +50657 + Direction interministérielle du numérique et du système d'information et de communication de l'Etat + Jérôme Ploquin + jerome.ploquin&modernisation.gouv.fr +50658 + Aviator + Dimitri Karapetian + dimitri.karapetian&adjaragroup.com +50659 + Department of the Prime Minister and Cabinet + Ben Coutts + it&help.pmc.gov.au +50660 + RNLI + Nick Royley + nick_royley&rnli.org.uk +50661 + Spencer First Church of the Nazarene + Mark Braker + systemsadmin&spencernazarene.org +50662 + Maryland Department of Human Services + Chip Crawford + chip.crawford2&maryland.gov +50663 + Birmingham Open Source Solutions Ltd + Richard Wallman + richard.wallman&bossolutions.co.uk +50664 + Internalog LLC + Vladimir Plotnikov + mail&internalog.ru +50665 + HILLS HEALTH SOLUTIONS + Emanuele Lagana + emanuele.lagana&hills.com.au +50666 + Trade FIDES, a.s. + Martin Tureček + mturecek&fides.cz +50667 + Betterservers + Jason Borden + jason&betterservers.com +50668 + igloonet s.r.o. + Ondřej Kudlík + kudlik&igloonet.cz +50669 + National Bank of Belgium + John Deckers + john.deckers&nbb.be +50670 + Innovametro + Felix Lechner + felix.lechner&innovametro.com +50671 + TVT + liao weijian + liaoweijian&tvt.net.cn +50672 + LayTec AG + Bjoern Alan Dresen + bjoern.dresen&laytec.de +50673 + Tessenderlo Chemie International + Dirk Baekelant + dirk.baekelant&tessenderlo.com +50674 + Gosteleradiofond + Sinyakov Roman + sysadmin>rf.ru +50675 + TRANSMASH-TOMSK + Prodan Aleksey + report&transmash-tomsk.ru +50676 + Essenjay Technology + Rodney McKay + pen-admin&essenjay-technology.uk +50677 + datacrumbs + Mario Hirt + oid&datacrumbs.de +50678 + Cihan Aydin + Cihan Aydin + cihan.aydin&live.de +50679 + Hanssens Telecom + Arne Claerebout + arne&hanssenstelecom.be +50680 + SomaLogic Inc. + David Ziebarth + dziebarth&somalogic.com +50681 + IPHolders Inc. + Walter Dreksler + dreksler&ipholders.com +50682 + SKB Prominform + Alexandr Karyakin + shura&prominform.ru +50683 + NETMA + Juergen Schmidt + wanmon&netma.nato.int +50684 + Xantaro + Felix Schüren + fschueren&xantaro.net +50685 + OSZ Informations- und Medizintechnik + Martin Schleyer + netservice&oszimt.de +50686 + Blue Cedar + Kevin Fox + kfox&bluecedar.com +50687 + Energyfive + Dennis Cheliukanov + den&energyfive.io +50688 + GRIDNET + Rafal Skowronski + rafalskk&gmail.com +50689 + Uila Inc. + Miles Wu + miles.wu&uila.com +50690 + Space Exploration Technologies + Adam Lathers + hostmaster&spacex.com +50691 + Janus Technology Ltda. + Gustavo Ramirez + gustavo&janustech.com.br +50692 + Embrapa Sede + Sandra Satyko Guimaraes Watanabe + sandra.satyko&embrapa.br +50693 + ATEA Baltic + Vytautas Kelmelis + Vytautas.Kelmelis&atea.lt +50694 + Symanitron-electronics + Andrey Panov + pad&symanitron.ru +50695 + ENLITEON Limited + Riccardo Mazzurco + riccardo.mazzurco&enliteon.com +50696 + Brightways Corporation + David Hanson + david.hanson&brightwaysco.com +50697 + Cafari Inc. + Paul Chen + paul&cafari.com +50698 + North Central Texas Council of Governments + Jonathan Matthews + jmatthews&nctcog.org +50699 + ASiO + Stefan Szczygielski + pen-iana&asio.pl +50700 + Ulrich Busch + Ulrich Busch + ulrich.busch&bluewin.ch +50701 + PI SYSTEM co.,ltd. + Kuwabara Hiroshi + hrkuwaba&pi-system.co.jp +50702 + DIEHL Informatik GmbH + Michael Schieber + michael.schieber&diehl.com +50703 + Acteon Group Limited + Robert Brock + hostmaster&acteon.com +50704 + star.Energiewerke GmbH & Co. KG + Volker Scherrer + v.scherrer&star-energiewerke.de +50705 + NPAW (Nice People At Work) + Marc Rodon + mrodon&nicepeopleatwork.com +50706 + Fiducia & GAD IT AG + Hermann Amann + hermann.amann&fiduciagad.de +50707 + Nextragen Solutions GmbH + Sascha Mahmood + sascha.mahmood&nextragen-solutions.de +50708 + Point Core SAS + Nicolas BOUQUET + nicolas&point-core.com +50709 + JJIM.de Network Exploration Task-Force + Joel Brunenberg + joel.brunenberg&org.jjim.de +50710 + zhaoqing medical college + Eason Wu + vistahome&qq.com +50711 + Goldberg & Mathew Medical associates + hope friedman + hopestacy6&gmail.com +50712 + Connexys + Camiel Kluver + kluver&connexys.com +50713 + KT IT Planning Group + Jungmo Koo + jungmo.koo&kt.com +50714 + Jones Lang LaSalle IP, Inc. + Mahmood Ali + CertManager&am.jll.com +50715 + Bluink Ltd + Laurence Hamid + lhamid&bluink.ca +50716 + Huaqin Telecom Technology Co.,Ltd. + Zane Chen + chenzhao5&huaqin.com +50717 + FTI "ROSTRANSMODERNIZATSIYA" + Alexander Korolev + lic&asutk.ru +50718 + BPO Advisors SpA + Marco Mosca + marco.mosca&bpo-advisors.net +50719 + RNLI + Nick Royley + nick_royley&rnli.org.uk +50720 + Hawaii Oncology, Inc. + Selina Lewis + slewis&hioncology.com +50721 + St Marys Oncology Center, LLC + Shannan Robenalt + shannan&drtriponc.com +50722 + Practice Provider Corporation + John Carbone + john&practiceprovider.com +50723 + RF Solutions LLC + Chris Godwin + cgodwin&rf-solutions.com +50724 + Iowa Student Loan + Blake Norton + bnorton&studentloan.org +50725 + ROCAS SI + Arcade AGBECI + contact&rocas-si.fr +50726 + Tekon-Avtomatika + Alexander Ivanov + ivanov&tekon.ru +50727 + Civil Aviation University of China + 刘中 (Liu Zhong) + liuzhong_18&163.com +50728 + Nemon + HongJe Kim + support&nemon.co.kr +50729 + Stichting Groningen Groningen Declaration Network + Herman de Leeuw + gdn&groningendeclaration.org +50730 + Salzgitter Digital Solutions GmbH + Efstratios Daskalopoulos + security&salzgitter-digital.de +50731 + NAES Corporation + Mike Leinweber + mike.leinweber&naes.com +50732 + HIAG Data + CISO + CISO&hiagdata.com +50733 + bluebyteIT Sven Treiber + Sven Treiber + sven.treiber&bluebyte-it.de +50734 + HHGL Limited + Jason Hampton + Jason.Hampton&homebase.co.uk +50735 + Vricon Systems + Anders Rönnbrant + anders.ronnbrant&vricon.com +50736 + Axcella, LLC + Nick Avgerinos + nicka&axcella.com +50737 + Chaitin Tech + yang.li + yang.li&chaitin.com +50738 + HomeLABs + Ivan Kotiv + zlojkota&gmail.com +50739 + lmu + david planner + david.planner&iaag.geo.uni-muenchen.de +50740 + AIRSYS GmbH + Ronny Bunke + ronny.bunke&bertelsmann.de +50741 + Dust Devil Hosting, LLP + Emily Heginbotham + admin&cheetahdesigns.org +50742 + Optus Yes Lab + Guillaume Poulet-Mathis + yeslab&optus.com.au +50743 + nihon communication solutions + ANGURU SURESH + suresh.anguru&ncs-in.com +50744 + Max Bögl Bauservice GmbH & Co.KG + Klaus Steger + certs&max-boegl.de +50745 + GRDF + KARENZI Pascal + grdf-dsi-certificats&grdf.fr +50746 + Bank of Namibia + Mbauguraije Tjikuzu + Mbaunguraije.Tjikuzu&bon.com.na +50747 + Vastec + David Thompson + david.thompson&vastec.com +50748 + iRobot Corporation + John Bell + pen-oid-admin&irobot.com +50749 + Avantix + COULON Christine + christine.coulon&atos.net +50750 + AEGIDE + Cédric NEVEUX + support&aegide.fr +50751 + FIDUMTEC + Carlos Visser + carlos.visser&fidumtec.com +50752 + Delta Constructors + Aaron Schilling + aschilling&deltaconstructors.net +50753 + Bergkvist Siljan Mora AB + Kent Matsuhashi + it&bergkvistsiljan.com +50754 + Bergkvist Siljan Blyberg AB + Kent Matsuhashi + it&bergkvistsiljan.com +50755 + Siljan Energi AB + Kent Matsuhashi + it&siljan.com +50756 + D. A. Taylor Consulting LLC + David Taylor + david&dataylorconsulting.com +50757 + ST Engineering Ltd + Wong Peng Leong, Melvin + wong.pengleong&stengg.com +50758 + All My Papers + Toby Ball + toby.ball&allmypapers.com +50759 + MAVOCO GmbH + Remigiusz Weska + remek.weska&mavoco.com +50760 + CELPRAM + Mario MARTINS + mario.martins&celpram.fr +50761 + von Stockhausen + Hans Christian von Stockhausen + hc&vst.io +50762 + mühlbauer + partner Technische Dokumentation GmbH & Co. KG + Guido Kraus + cert&m-p.de +50763 + Loca Images + Stéphane Ténier + stephane&loca-images.com +50764 + Genuinous + Davide Fiorentino lo Regio + email&genuinous.com +50765 + Toyota Connected, Inc. + Preston Doster + preston.doster&toyotaconnected.com +50766 + Mennonite Central Committee Ontario + Karl Reimer + karlreimer&mcco.ca +50767 + PrecisionWave AG + Andreas Zutter + contact&precisionwave.com +50768 + TECHNOKEY SAS + Luz Elba Carrillo + luz.carrillo&technokey.co +50769 + Communication Devices Inc. + Tadhg Kelly + info&commdevices.com +50770 + TQ Systems GmbH + Georg Unterlugauer + georg.unterlugauer&tq-group.com +50771 + B&A Technology Co., Ltd + Fu Xiang + fuxiang&baphoton.com +50772 + GeissNET + Christian Geiss + admin&geissnet.de +50773 + Stored Value Cards, Inc. + Jeff Cours + jcours&numifinancial.com +50774 + Lessbroken Internet Services + Tris Emmy Wilson + tris&tris.fyi +50775 + MeSign Technology Limited + Richard Wang + cps&mesign.com +50776 + System V,Inc. + Youichi Kato + kanri&sysv.co.jp +50777 + Stenvalls Trä AB + Thomas Wikstrom + thomas.wikstrom&stenvalls.se +50778 + Edwin Lankamp Consulting + Edwin Lankamp + edwin.lankamp&gmail.com +50779 + Zombie Orrpheus Entertainment LLC + Ben Dobyns + b.dobyns&zombieorpheus.com +50780 + MacEwan University + Network Operations Center + noc&macewan.ca +50781 + Gradient Technologies, LLC + Tom Mia + operations&gradientsoftware.com +50782 + Cumulus Systems, Inc. + Jed Krohnfeldt + jed&cumulus-systems.com +50783 + Traxens + Christophe Gierski + rssi&traxens.com +50784 + GIDURAL, Ltd. + Denis Matveev + denis&gidural.nexcom.ru +50785 + Knowcell + Luis Vasquez + luis.vasquez&knowcell.com +50786 + Gemological Institute of America + Noe Ortega + certificates_ssl&gia.edu +50787 + Thales UK Ltd + Matt Shaw + matt.shaw&uk.thalesgroup.com +50788 + German-Jordanian University + Anass Ksasbeh + anass.ksasbeh&gju.edu.jo +50789 + Sentaca + Boualem Guiti + boualem.guiti&sentaca.com +50790 + Synforma Ltd. + Alexander Tashkov + office&synforma.bg +50791 + Redtea Technology (Shanghai) Co., LTD + Vincent Wang + vincent.wang&redteamobile.com +50792 + Genetus Inc. + Takanobu OYAMA + oyama&genetus.co.jp +50793 + Compulsory inforcement bureau under the Prosecutor general's office of the repoblic of Uzbekistan state unitary enterprise "Centr on electronic online auctions organization" + Todjiev Alisher + e-auksion&mib.uz +50794 + Hellomouse + Lucas Fiegl + lfiegl&hellomouse.net +50795 + HENSOLDT Holding Germany GmbH + Slonek, Marc + it_support_web&hensoldt.net +50796 + Clienia Management AG + Stephan Greuter + info.it&clienia.ch +50797 + Salzgitter AG + Efstratios Daskalopoulos + security&salzgitter-digital.de +50798 + Aporeto Inc. + Antoine Mercadal + antoine&aporeto.com +50799 + SlashNext (formerly 'Uet') + +923344446789 + awais.khan&slashnext.com +50800 + Domanski Zakrzewski Palinka sp. k. + Maciej Maciejewski + it&dzp.pl +50801 + cqcatr 重庆信息通信研究院 (Chongqing Information and Communication Research Institute) + Yang XuGuang + yxguang1988&gmail.com +50802 + 556607-9975 + Peter Loman + peter.loman&dalafrakt.se +50803 + LOGETEL + DECAMPS Lionel + ldecamps&logetel.fr +50804 + SABEN + Arno Hart + arno&saben.ac.za +50805 + nexVortex + nexVortex NOC + noc&nexvortex.com +50806 + DataBang S.P.R.L. + Bernard Rodriguez Y Lopez + bernard.rodriguez&databang.io +50807 + Eaglepix Systems LLC + SiuWai Wu + swu&eaglepixsys.com +50808 + Veridian Credit Union + Blake Bell + blakejb&veridiancu.org +50809 + Newcontact + Matvey Solodovnikov + m.solodovnikov&newcontact.su +50810 + Stacc AS + Marek Zakrzewski + marekz&stacc.com +50811 + Adel System S.r.l. + Matteo Bonaiuti + bonaiuti&adelsystem.com +50812 + Mediaset España + Ramón Ortiz + certificados&mediaset.es +50813 + Quesive GmbH + Bernhard Bock + bernhard.bock&quesive.de +50814 + Faculty of Medicine Comenius University + Jan Bartek + jan.bartek&fmed.uniba.sk +50815 + Agman Holdings Limited + Adrian Gonzalez + eca&edfman.com +50816 + Telsat + Stefano Coli + coli&telsat.it +50817 + Jazz Networks Limited + Engineering + iana-pen&jazznetworks.com +50818 + Hochschulbibliothekszentrum NRW + Alexander Drawe + unix&hbz-nrw.de +50819 + Medialis + Gourrier Alexandre + agourrier&medialis.com +50820 + Niche Finder + David Collins + info&nichefinder.net +50821 + Eurovibes + Benedikt Spranger + bene&eurovibes.org +50822 + Gospel Technology Ltd + Reuben Thompson + reuben.thompson&gospel.tech +50823 + Inner Expanse LLC + Shawn Garrington + shawn&innerexpanse.com +50824 + Central Valley Cancer Center + Dianna Aguirre + Aguirre79&yahoo.com +50825 + RTRJ Tecnologia (T. E. N. Reis Servicos De Informatica - ME) + Telmo Reis + telmo&rtrj.tech +50826 + Mediso Art s.r.o. + Petr Pospisil + mediso-art&mediso-art.cz +50827 + Needswell Inc. + Daiji Okamoto + daiji.okamoto&needswell.com +50828 + Fujian Taili Communication.,Ltd. + Wei,Liu + 516109734&qq.com +50829 + Mount Zion School + Praneet Rai + praneet&mtzion.edu.in +50830 + VECTIGALIS d.o.o. + Anze Jensterle + pen&vectigalis.si +50831 + PATA SIA + Edgars Melveris + it&pata.lv +50832 + Teratac (Pty) Ltd. + Andrew Broekman + andrew&teratac.com +50833 + Bank-Verlag GmbH + Jens Althoff + jens.althoff&bank-verlag.de +50834 + Zavod No. 423 LLC + Andrey Gorskiy + a.gorskiy&owen.ru +50835 + Erhardt-IT + Nicky Erhardt + Webmaster&erhardt-fbt.de +50836 + Pontchartrain Cancer Center + Kathy Oubre + kathy&pcclouisiana.com +50837 + Parsec Labs LLC + Robyn Brandner + robyn&parseclabs.com +50838 + Structura Technology & Innovation s.r.l. + Alessandro Chies + alessandro.chies&sti4u.it +50839 + SkyLab Innogram Pte. Ltd. + Sean Kim + sean.kim&skylabteam.com +50840 + Intercoax Co., Ltd + Sang-Gwan Lee + sglee&intercoax.com +50841 + Qcera, Inc. + Peter Pak + ppak&qcera.com +50842 + Infralinc, LLC + Andrew R. Wagner + andy&infralinc.com +50843 + Seychelles Tourism Board + Jude Adeline + support&seychelles.travel +50844 + Impervious Technologies LLC + Steven Foreman + steven.foreman&impervioustech.com +50845 + RAFFCOM TECHNOLOGIES SDN BHD + Affendi Razak + affendi&securemetric.com +50846 + EXASOL AG + Florian Reck + florian.reck&exasol.com +50847 + Original Telco Solutions limited + Gareth Evans + garethe&originalsolutions.co.uk +50848 + Innflow AG + Martin Streiff + services&innflow.com +50849 + VSV Frakt AB + Jan-Stefan Karlsson + jan-stefan.karlsson&vsv.se +50850 + SnowOncard + Daeman Kwon + daeman.kwon&snowoncard.com +50851 + Gnome Rock + Douglas Anger + danger&gnomerock.com +50852 + LECIP CORPORATION (formerly 'LECIP SLP CORPORATION') + Yusuke Hasegawa + yuusuke.hasegawa&mb.lecip.co.jp +50853 + Flandrin IT + Erwan LE DISEZ + it&chapsvision.com +50854 + Communics Systemhaus GmbH + Gerrit Kaemper + oid&communics.de +50855 + Block Center for Integrative Cancer Treatment + Shana Ocasio + gmoore&flatiron.com +50856 + Pacific Battleship Center + David Canfield + it&labattleship.com +50857 + DENSO SOLUTIONS CO.,LTD. + Mari Miyata + miyata&kk-denso.co.jp +50858 + Shasta County Office of Education + James Alspach + jalspach&shastacoe.org +50859 + dilling.dev + Hendrik Dilling + iana-pen&rabbadak.de +50860 + BIOT sp. z o.o. + Kamil Burzyński + k.burzynski&biotcloud.com +50861 + Stadtverwaltung Herzogenaurach + Thomas Wilhelm + edv&herzogenaurach.de +50862 + Hotspring Ventures Ltd + Andrew Stubbs + pen&treatwell.host +50863 + Figment Design Laboratories (Pty) Ltd + Alan Erasmus + alan&figment.co.za +50864 + GLIWA GmbH + Igor Zlatkovic + igor.zlatkovic&gliwa.com +50865 + ilah + ilah Zagui + 2tl2r4&gmail.com +50866 + ELVA-1 MICROWAVE HANDELSBOLAG + Sergey Petrov + petrov&elva-1.com +50867 + KPMG Nunwood + Robert Steel + rob.steel&kpmg-nunwood.co.uk +50868 + Signamax + Guy Shavit + gshavit&signamax.com +50869 + BASoft + KHALDOUNE HAKIM + cb.neuro&gmail.com +50870 + Jieti Technology Co., Ltd + felix li + felix.li&newjieti.com +50871 + dyna bcs Informatik GmbH + Lydia Natter + lydia.natter&dynabcs.at +50872 + Progeny System Corp. (PAX River Office) + Frank J. Crow + fjcrow2008&gmail.com +50873 + Levi Strauss & Co. + Michael Martinez + mmartinez&levi.com +50874 + Cowan & Associates, Inc. + Jim Cecil + jim.cecil&cowanassociates.net +50875 + Slovenske železnice d.o.o. + Marko Arsović + marko.arsovic&slo-zeleznice.si +50876 + GESAN srl + Umberto Della monica + umbertodm&gesan.it +50877 + Balluff GmbH + Sysadmin + sysadmin&balluff.de +50878 + Creativity Software + Leon Hendry + leon&creativitysoftware.net +50879 + Walgreens Boots Alliance Services Limited + Kurt Kincaid + kurt.kincaid&walgreens.com +50880 + Quantus Information Technologies + Calin Morar + calin&quantusitech.com +50881 + PrimeCert Ltd + Federico Berti Arnoaldi + administration&primecert.com +50882 + Caleo Technologies AB + Jens Ahlin + jens.ahlin&caleotech.com +50883 + tolltickets GmbH + Robert Strasser + pki&tolltickets.com +50884 + Nochta, Inc. + Ozcan Bicak + ozcan&nochta.com +50885 + King Abdulaziz Center for World Culture + Nathanael Kenyon + nathanael.kenyon&kingabdulazizcenter.com +50886 + Chengdu Saturn Technology Co., Ltd. + LIU ZHI FENG (Bruce) + hny2000&163.com +50887 + RailNetEurope + Harald Reisinger + harald.reisinger&rne.eu +50888 + Telcoserv + Thomas Manolas + tman&telcoserv.gr +50889 + Buerotechnik Haustein + Rene Haustein + iana&meekorah.net +50890 + Olimpia Management S.A. + Maira Alejandra Mora + maira.mora&olimpiait.com +50891 + T.CON GmbH & Co. KG + Stefan Fiedler + ita&team-con.de +50892 + Ivy Tech Community College - Lifelong Learning - Bedford + Matthew Lewallen + bedfordclc&gmail.com +50893 + Ethica Data + Mohammad Hashemian + mohammadðicadata.com +50894 + RF Creations + Timothy Newton + tim&rfcreations.com +50895 + Kajeet Inc. + Hugh mcLenaghan + hmclenaghan&kajeet.com +50896 + LEAF + Juraj Tobias + onlineservices&leaf.sk +50897 + Islamic College of Brisbane + Orhan Camkara + ocamkara&icb.qld.edu.au +50898 + Comtrol Private Limited + Clive Heightman + clive&comtrol.com.sg +50899 + The Steamship Authority + James Ashley + jashley&steamshipauthority.com +50900 + ALTE OLDENBURGER Krankenversicherung AG + Thomas Bartke + bartke&alte-oldenburger.de +50901 + OBT AG + David Huber + david.huber&obt.ch +50902 + Stadt Nuertingen + Juergen Raab + it.monitoring&nuertingen.de +50903 + ASOCS + Vitaly Grinberg + vitaly&asocsnetworks.com +50904 + Kemberton Healthcare Services + Seonho Kim + skim&kemberton.net +50905 + OpenMuffin + Amy Nagle + amy-iana&zyrenth.com +50906 + mediainvent Service GmbH + Johannes Leopold + iana-pen&mediainvent.com +50907 + Lowe-Martin Company Inc. + Gurpreet Singh Bhamra + gurpreet.bhamra&lmgroup.com +50908 + Mystery Ranch Ltd. + Mack Pexton + it&mysteryranch.com +50909 + Quick Soft Tecnologia da Informação + Anderson Artur Nuss + datacenter&quicksoft.com.br +50910 + Trusted Shops GmbH + Leif Hoppe + leif.hoppe&trustedshops.de +50911 + NSS TECHNOLOGY LIMITED + Tank Bai + nssadm&163.com +50912 + Multipart Labs + Matthew Hershberger + matthewh&multipart.net +50913 + Ponte Technologies Ltd + Wei Zhou + zhouwei&ponte-tech.com +50914 + Cyberstack Limited + Domain Administrator + cyberlicense&cyberstack.uk +50915 + Lacroix Traffic + SAUVEBOIS Laurent + l.sauvebois&lacroix-city.com +50916 + IMDEA Software + Juan Cespedes + software&imdea.org +50917 + Berufsförderungswerk Leipzig gGmbH + Alois Fischer + it&bfw-leipzig.de +50918 + KBR GmbH + Bernd Wißler + bernd.wissler&kbr.de +50919 + KONICA MINOLTA, INC. + Shinya Kaku + shinya.kaku&konicaminolta.com +50920 + Harold Masenya + Harold Masenya + harold.masenya&outlook.co.za +50921 + DPS Electronics, Inc. + Susy Sands + susy.sands&dpsrr.com +50922 + bdcj + Ben Divers + ben&bdcj.co.uk +50923 + DHBW Heilbronn + Stefan Kress + its&heilbronn.dhbw.de +50924 + NthPermutation Security + Michael StJohns + msj&nthpermutation.com +50925 + Advance Financial + James Sims + james.sims&af247.com +50926 + Farmers Bank & Trust Co. + Ken Stapleton + ianapen&farmers247.com +50927 + Tapestry Solutions, Inc. + Aaron Reilly + areilly&tapestrysolutions.com +50928 + Ambu A/S + Peter Lund + pelu&ambu.com +50929 + AS Globitex Holding + Aivars Mazurs + itd&globitex.com +50930 + ZhenSen Optical Communication + Momo Li + 26201204&qq.com +50931 + Onlearn-tech kft + Attila Pető + peto&onlearn.hu +50932 + Sensio AS + Øyvind Hvidsten + oyvind&sensio.no +50933 + Szkoła Podstawowa im. Kornela Makuszyńskiego w Giewartowie + Romuald Szczotkiewicz + adam&itproszkolenia.com +50934 + Erre Elle Net s.r.l. + Stefano Annese + noc&erre-elle.net +50935 + Oneida Health System + Daniel Nash + dnash&oneidahealthcare.org +50936 + TV2 Consulting + Jason Peacock + jason.peacock&tv2consulting.com +50937 + Epic Networks S.r.l. + Davide Diana + davide.diana&epicnetworks.it +50938 + Apprenda Inc + Michael Michael + accounts&apprenda.com +50939 + Magic Leap, Inc + Brad Whaley + bwhaley&magicleap.com +50940 + SMARTNES S.r.l.s. + Enrico Gardenghi + egarden&myforensic.net +50941 + Nocsys + Chen Chang Cheng + macauley.cheng&nocsys.com.cn +50942 + Tubbesing Services + Jeff Noel + jeffn&tubbesing.com +50943 + BelWü-Koordination + Tobias Loehnert + sysadmin&belwue.de +50944 + Deutsche Telekom AG Innovation Laboratories + Torsten Haller + torsten.haller&telekom.de +50945 + Lamb Weston Belaya Dacha + Dmitriy Zhidkov + admin&lwbd.ru +50946 + Restaurant Brands International + Hector Munoz + hmunoz&rbi.com +50947 + Shanghai Gotell Communication Technology Holdings Co., Ltd. + Jun Wu + jun.wu&roam2free.com +50948 + BlockArray + sameer ibrahimbacha + contact&blockarray.com +50949 + Varner Retail AS + Inge E. Kjolsvik + license&varner.no +50950 + ARVAL + VANNIER Christophe + christophe.vannier&externe.arval.com +50951 + Manx Telecom IT + Laurence Hind + MIB&manx.net +50952 + Dimetor GmbH + Thomas Wana + thomas.wana&dimetor.com +50953 + BluVector INC + Steven Bade + steve.bade&bluvector.io +50954 + Sportcast GmbH + Pascal Günther + it&sportcast.de +50955 + comcrypto GmbH + Hendrik Nöll + iana&comcrypto.de +50956 + Health & Social Care Northern Ireland + Michael Harnett + ictsecuritymanager&hscni.net +50957 + Southwest electrolic research institute of China + leon wang + yife.wang&gmail.com +50958 + Idax Solutions Ltd + Mark Rodbert + mark.rodbert&idaxsoftware.com +50959 + becrafted + Michael Kraus + iana&becrafted.de +50960 + Advanced Datacentre Systems Ltd + John Hardy + john.hardy&advanceddatacentre.com +50961 + Gateway TechnoLabs + Dhwanil Khandwala + dhwanil.khandwala&gatewaytechnolabs.com +50962 + Computition + Stephen Berthelsen + nate&computition.net +50963 + HalloWelt! GmbH + Leonid Verhovskij + support&hallowelt.com +50964 + Funko + Funko Admin + funkoadmin&funko.com +50965 + DANUBE SOFT s. r. o. + Stanislav Gardon + s.gardon&danubesoft.com +50966 + Softbridge Technology + alvin CANGOU + cangoualvin&gmail.com +50967 + China Electronics Technology Instruments CO.,LTD The 40th &41st Institute of CETC + Dong Jigang + nc.dept6&ei41.com +50968 + Virtuozzo + Valentin Kulesh + valentin.kulesh&virtuozzo.com +50969 + ENTESC SRL + Scardigli Marco + marco.scardigli&entesc.com +50970 + AerojetRocketdyne + Mark Mahoney + mark.mahoney&rocket.com +50971 + Beijing Metstar Radar Com. LTD. + zhaoping sun + zhaoping.sun&metstar.net +50972 + Micronisus Technonologies Pvt. Ltd. + Hesham Ahmed + hsaµnisus.com +50973 + Vengeful Syndicate + Ashley Townsend + admin&vengefulsyndicate.com +50974 + R2P GmbH + Soren Jessen + soeren.jessen&r2p.com +50975 + Tricolorvision + Jordan Fischer + Jordanfischer50&gmail.com +50976 + Medtronic CRHF + Matthew Kirkwood + matt.d.kirkwood&medtronic.com +50977 + eMudhra Technologies Limited + Vijay Kumar M + vijay&emudhra.com +50978 + DareNET + Jason Hill + jason&darenet.org +50979 + Radnor Township School District + Chris Augustine + chris.augustine&rtsd.org +50980 + PJSC "TGC-2" + Sergey Nabatov + NabatovSV&tgc-2.ru +50981 + Gartenmann Software AG + Markus Gartenmann + infra&gartenmann.ch +50982 + Viken Skog SA + IT-support + it-support&viken.skog.no +50983 + BAUR GesmbH + Franz Kempter + It&baur.at +50984 + Stuff In A Plug + Adrian Aitken + oid&stuffinaplug.com +50985 + Navigate IT Services GmbH + Christopher Friedrichs + friedrichs&navigate-it-services.de +50986 + New Lexington Clinic, P.S.C + Dan Knight + dakni&lexclin.com +50987 + GoodNet LLC + Oleg Yakovlev + techdir&goodnet.su +50988 + eInfochips India Pvt Ltd + Jay Ganatra + jay.ganatra&einfochips.com +50989 + Solactive AG + Sebastian Waitz + waitz&solactive.com +50990 + CRTC PGUPS + Anton Idukov + idukov&crtc.spb.ru +50991 + Electric Applications, Inc. + Stephen Ashworth + saa&ashconllc.com +50992 + Entertainment Partners + Mark Allen + mjallen&ep.com +50993 + BitSight Technologies + Philip Gladstone + legal&bitsighttech.com +50994 + RBR IT Consulting + Brett Thomas + admin&rbrettman.com +50995 + Quantum Networks (SG) Pte. Ltd. + Narasimhan Jayasri + admin&qntmnet.com +50996 + Wolford AG + Steven Billen + postmaster&wolford.com +50997 + Optellum Ltd + Nicholas Dowson + Nick.Dowson&optellum.com +50998 + BeOnLink LLC + Anton Dolgov + anton&beonlink.ru +50999 + Beijing Lian You Fu Kang Technology Ltd. + Zhou Xiao Wei + showellz&163.com +51000 + Ardent Creative Inc. + Jay Sauce + jay&ardentcreative.com +51001 + SurePassID Corporation + Mirko J. Ploch + mirko.ploch&surepassid.com +51002 + Beehive Systems Pvt Ltd + Ganesh Rajamani + techpartner&beesys.com +51003 + Indian River County Sheriff's Office + Matthew Guerra + mguerra&ircsheriff.org +51004 + Khipu-Networks Ltd + Arnab Roy + arnab.roy&khipu-networks.com +51005 + Opito Labs GmbH + Jan Kirchhoff + info&opito.de +51006 + Collector AB + Henrik Björkdahl + henrik.bjorkdahl&collectorbank.se +51007 + Menard, Inc. + Justin Yarrington + jyarring&menard-inc.com +51008 + Holiday Stationstores, Inc + Paul Achtelik + Paul.Achtelik&holidaystationstores.com +51009 + Skylone Technology Ltd. + Gokhan Poyraz + gokhan&skylone.com +51010 + Flight Systems Inc + Brian Attinger + hawk&flightsystems.com +51011 + Beijing Winicssec Technologies Co.,Ltd. + Wangfan + fan.wang&winicssec.com +51012 + Griffin INet, Inc + Darron Black + darron&griffin.net +51013 + TS3CORP + GABRIEL ABDALLA CAVALCANTE SILVA + gabriel.cavalcante88&gmail.com +51014 + Owen LLC + Vladimir Smelnitskiy + it&owen.ru +51015 + Vispiron GmbH + Steffen Calsow + steffen.calsow&vispiron.de +51016 + ZoneArt Networks Ltd + Andrew Chilcott + andrew.chilcott&zoneartnetworks.com +51017 + Thetatronics Ltd + Andrew Chilcott + andrew&thetatronics.com +51018 + neurocat + Frank Selensky + fs&neurocat.ai +51019 + TrueBlue Inc + Michael Bednarczyk + mbednarczyk&trueblue.com +51020 + BX Technical Services Inc + Becka Terry + admin&bxtechnical.com +51021 + Hochschule Luzern + IANA HSLU + iana&hslu.ch +51022 + DicomFlowAccess - DFA + Denys Alexandre Barboza da Silva + denirow&gmail.com +51023 + SurreyLabs Technology Inc. + Tom G. Huang + tomghuang&surreylabs.com +51024 + Kay McCormick + Kay McCormick + tech-admin&kaymccormick.com +51025 + Guangdong South New Media Inc + linyimao + gdsnm&qq.com +51026 + WAGNER AG Informatik Dienstleistungen + Managed Backend + tm-system&wagner.ch +51027 + Ultra Electronics - USSI + Brent Scare + IT&ultra-ussi.com +51028 + Nucleos Inc. + Piotr Zduniak + piotr&nucleos.com +51029 + Suno Kaj 3 + Daniel Díaz + info&sunokaj3.com +51030 + Littler Mendelson, P.C. + Austin B. Calvert + acalvert&littler.com +51031 + w4xzr + Thomas Brown + thomasb9511&gmail.com +51032 + Ridesoft.it + Andrea Cavalli + iana.org&ridesoft.it +51033 + Quark intelligence llc + Manuel Berro Madero + manuel&quark-i.com +51034 + hettronic.net + Ralf Thomas + ralf.thomas&hettronic.de +51035 + Bit4id SAC + Rodrigo Lopez + info.pe&bit4id.com +51036 + Irbis.Works + Petr Kralik + petr.kralik&live.com +51037 + FAMES GmbH + Rudolf Dietz + rudolf.dietz&fames.de +51038 + Quanterion Solutions Incorporated + Aaron Riesbeck + ariesbeck&quanterion.com +51039 + Unassigned + Returned 2017-11-28 + ---none--- +51040 + Magellan Medical Systems + Bernie Zhao + bernie.zhao&gmail.com +51041 + M ALAM Enterprise + Toufiqul Alam + toufiq.alam&malamenterprise.com +51042 + Max-Planck-Institut fuer Entwicklungsbiologie + Johannes Woerner + woerner&tuebingen.mpg.de +51043 + protel Hotelsoftware GmbH + Mark Andrae + devops&protel.net +51044 + in.power GmbH + Matthias Roth + matthias.roth&inpower.de +51045 + Arnavsoft India Pvt Ltd + Anurag Shrivastava + anurag.shrivastava.in&gmail.com +51046 + Kungälv Närenergi AB + Ulf Lysmark + ulf.lysmark&kungalvenergi.se +51047 + ATI Systems + Mohamed Bastawy + mbastawy&atisystem.com +51048 + BIOSENCY + Quentin BODINIER + contact&biosency.com +51049 + Ratel, Inc. + Junpei Yoshino + junpei.yoshino&ratel.co.jp +51050 + Hermann Pfanner Getränke GmbH + Lars Moosbrugger + certificate&pfanner.com +51051 + Raisin GmbH + Jeff Younker + jeff.younker&raisin.com +51052 + HAINAN EKING TECHNOLOGY CO., LTD + Dongmao Zhang + dmao.zhang&haihangyun.com +51053 + bottomlesspit + Boris Chao + boris&bchao.ca +51054 + Mölndal Energi AB + Leif Viklund + leif.viklund&molndalenergi.se +51055 + ABB Switzerland Ltd - Low Voltage Products + Christian Hirsbrunner + christian.hirsbrunner&ch.abb.com +51056 + SameArch.RU + Pavel Afanasev + afanasev.p&gmail.com +51057 + Primwest SA + Hélène Le Marchand + admin&primwest.com +51058 + SII Group + Jean-Charles BLANC + jcblanc&sii.fr +51059 + Cryptopia Ltd + Morgan Nicholson + morgan.nicholson&cryptopia.co.nz +51060 + Quuxy Inc. + Quuxy Inc. Operations + ops&quuxy.com +51061 + Distribution Finance Capital Ltd + John Gauslin + jgauslin&dfcapital.co.uk +51062 + Deli Home Products + Max Elzinga (Felton IT partner) + m.elzinga&felton.nl +51063 + Aura Engineering + Erik Knight + aura.pen&aura-engineering.com +51064 + ATBIS Co., Ltd. + Anders DAN + anders&atbiss.com +51065 + Lewii + David Kuen + 1024&lewii.cn +51066 + San Diego State University Directory Services + Marcus Jeffers + mjeffers&sdsu.edu +51067 + Anyfi Networks AB + Johan Almbladh + johan.almbladh&anyfinetworks.com +51068 + 中国广东纽脉电器有限公司 (China Guangdong Pulse Electric Co., Ltd.) + 张乐天 (Zhang Lotte) + 454125063&qq.com +51069 + Inango Systems LTD. + Boris Shehter + boris&inango.com +51070 + EPSS European Printer Support Services GmbH + Willy Wierichs + anfrage&epss.de +51071 + Fio banka, a.s. + Marek Seemann + sit&fio.cz +51072 + Verbundrechenzentrum der Kunst- und Musikhochschulen NRW + Benedikt Meyer + benedikt.meyer&verbundrechenzentrum.de +51073 + AL Wireless a.s. + Petr Vodička + petr.vodicka&al-wireless.com +51074 + Chatham Financial + Andrew Lowther + alowther&chathamfinancial.com +51075 + HIGHROAD + Murielle Ablancourt + murielle.ablancourt&highroad.re +51076 + Inspire Technology Co.,Ltd ,Dalian China + Xu Qiang + all&eyegroup.cc +51077 + Cooltera Limited + Adrian Coles + adrian.coles&cooltera.com +51078 + Slobodna domena Zadruga za otvoreni kod i dizajn + Davorin Kremenjaš + davorin.kremenjas&slobodnadomena.hr +51079 + Zolmot Energia + Tomasz Zielinski + biuro&zolmotenergia.com.pl +51080 + Novus Power Products LLC + Bill Kurple + bkurple&novuspower.com +51081 + LogicLab s.r.l. + Pirovano Fabrizio + pirovano&logiclab.it +51082 + Blockchain Technology Research Innovations Corporation + Benjamin F. Beideman + hostmaster&btric.org +51083 + Stichting ODIN-ICT + Jaap Jolman + jaap&odin-ict.nl +51084 + Chronopost + Guillaume Cabuzel + guillaume.cabuzel&chronopost.fr +51085 + Applefountain + Robert Spoden + robert.spoden&gmail.com +51086 + YAWL Foundation + Michael Adams + mj.adams&qut.edu.au +51087 + Altoption - Sistemas de Informacao Lda + Jose Albuquerque + jose.albuquerque&altoption.pt +51088 + Tech Software + Walden Leverich + waldenl&techsoftware.com +51089 + Viamex + Thorsten Plathe + tp&viamex.de +51090 + Adritas information Technology co.,Ltd + jarod xia + jarodxia&adritas.com +51091 + Alliander AG + Richard van der Hoorn + Servicedesk.It.deutschland&alliander.com +51092 + Westan Logistik AB + Lars Edsvik + Lars.edsvik&westan.nu +51093 + Moving Targets Consulting GmbH + Christoph Lubnau + christoph.lubnau&mtc.berlin +51094 + JSC Special Electrosystems + Damir Mansurov + DMansurov&electrosystems.ru +51095 + Saint Louis University + Jeff Abernathy + Webteam&slu.edu +51096 + ACE Soluções Computacionais + Edward Boszczowski + edward&acesolucoescomputacionais.com.br +51097 + Foundation for Anime and Niche Subcultures + System Administrator + sysadmin&fanime.com +51098 + DerbyCity.org + Kenny Marcum + kenny&derbycity.org +51099 + École des mines de Saint-Étienne + Dominique BERTHET + dberthet&emse.fr +51100 + backslash systems + Jordan Songer + jordan.s&team.backslash.systems +51101 + STL Co.,Ltd. + Kouichi Matsumoto + k.matsumoto&stlabs.co.jp +51102 + MARUENG Co., Ltd. + DongHyun Kwon + arang&marueng.co.kr +51103 + Visionary Communications, Inc. + Sean Kennedy + skennedy&office.vcn.com +51104 + BoltN Hosting Limited + Matthias Merkel + matthias&boltn-hosting.com +51105 + SAYMON + Yaroslav Kharitonov + yk&saymon.info +51106 + Negotech Cons Kft. + Janos Toberling + negotechcons&gmail.com +51107 + Andrew Broekman + Andrew Broekman + andrewbroekman&gmail.com +51108 + Jan Ohlsén Åkeri AB + Christian Jakobsson + christian&ohlsen-akeri.se +51109 + Metis Cyberspace Technology SA + Serafeim Katsikas + serafeim.katsikas&metis.tech +51110 + GRADES ESEA NOUVELLE-AQUITAINE + Thierry Rinaldo + iana.exploitation&esea-na.fr +51111 + AVIONIX ENGINEERING sp.z o.o. + Ralf Heckhausen + ralf.heckhausen&avionix.pl +51112 + Saraxa GmbH + Andreas Brodmann + abrodmann&saraxa.net +51113 + Telit Communications PLC + Mihai Voicu + mihai.voicu&telit.com +51114 + CeBiTec - Center for Biotechnology + CeBiTec Computer Support + support&CeBiTec.Uni-Bielefeld.DE +51115 + FRAME, Inc. + Marko Zivanovic + marko&fra.me +51116 + The Ombudsman Service Ltd + OS Registrant + osregistrant&ombudsman-services.org +51117 + Vision Studio S.A. + Diego Perez Roca + diego&visionstudio.com.ar +51118 + Accelleran NV + Stephen Parker + admin&accelleran.com +51119 + gerdos.de + Gerd Schering + gerd.schering&mailbox.org +51120 + ArcScan, Inc. + Andrew Levien + andy&levien.co +51121 + Barne-, ungdoms- og familiedirektoratet + Jo Jachlin + jo.jachlin&bufetat.no +51122 + ALLIAS + Jean-Francois KOTLOWSKI + contact&allias.fr +51123 + II-VI Network Solutions + Tim Zahnley + timothy.zahnley&ii-vi.com +51124 + Trace3 + David Ishmael + dishmael&trace3.com +51125 + Beijing HXZR Technology Co.,Ltd. + Sun Jun + sunjun0602&163.com +51126 + Stream LLC + Vladimir Kulakov + vkulakov&stream.ru +51127 + Communauté d'Agglomération du Puy-en-Velay + Julien Sabatier + julien.sabatier&lepuyenvelay.fr +51128 + Nova Integral Sistemas Ltda + Marcos Donizete Fogaca + mfogaca&integral.com.br +51129 + Fireside21 + Rich Frangiamore + rich&fireside21.com +51130 + Apalan Infor S. L. + Ernesto Vigo + apalan&apalan.com +51131 + NETSHIELD Corporation + Chris Gauthier + chris.gauthier&netshieldcorp.com +51132 + MMD-Monitors & Displays Nederland B.V. + Harold Niericker + harold.niericker&tpv-tech.com +51133 + Cash Flow Management Inc. + Tyler McLachlan + tmclachlan&cfms4.com +51134 + JWIPC TECHNOLOGY DEVELOPMENT LTD. + xu yi hai + xuyh&jwele.com.cn +51135 + 深圳中科德能科技有限公司 (Shenzhen Scodeno Technology Co., Ltd.) + 刘情 (Liu Love) + liuqing&scodeno.com +51136 + Thinkst Applied Research + Marco Slaviero + marco&thinkst.com +51137 + Cambio Healthcare Systems AB + Viktor Jernelöv + viktor.jernelov&cambiosys.com +51138 + AIUT sp. z o.o. + Robert Chmieliński + admin&aiut.com +51139 + Controlled Electronic Management Systems + Ian Schofield + ian.schofield&jci.com +51140 + AUDIO INTEGRATED UNITS MMB GmbH + Mustafa Gürsan + info&mmb-units.de +51141 + NPEX.IT + Bogusz Omycki + kontakt&npex.it +51142 + HMS Holdings Corporation + Joseph Spearin + joseph.spearin&hms.com +51143 + Pinguan Tech (Wuhan) Co., Ltd. + Woods Xia + woods_xiazw&163.com +51144 + Ratho B.V. + Thomas Schepers + helpdesk&ratho.nl +51145 + Globtel Holding d.o.o. + Mario Munda + mario.munda&air-tv.net +51146 + Csaba Kovács + Csaba Kovács + snmp&kcs.hu +51147 + Cobalt Iron + Robert Marett + rmmarett&cobaltiron.com +51148 + REVGO (GUANGZHOU) TELECOM TECHNOLOGY LIMITED + Juan Jiang + juan.jiang&revgotech.com +51149 + Alpenglow Australia Pty Ltd + IT + it&alpenglow.com.au +51150 + Okta, Inc. + OID Admin + oid_admin&okta.com +51151 + JSC Cybertech + Andrey Shumilin + ashumilin&nppct.ru +51152 + Lorenzo Rompianesi + Lorenzo Rompianesi + lorenzo.rompianesi&gmail.com +51153 + JSC Asteros + Sergey Kovpanets + sergey.kovpanets&asteros.ru +51154 + Ecertic Digital Solutions, S.L. + RAUL TAPIAS HERRANZ + info&ecertic.com +51155 + Utah Hematology Oncology PC + David Pittam + david.pittam&utahhemonc.com +51156 + DaLuNET + David Cedl + dcedl&dalunet.cz +51157 + WAVE Project + Michael Andersen + wave&steelcode.com +51158 + ANMD + Pieter Schuurmans + pieter.schuurmans&icloud.com +51159 + 珠海安联锐视科技股份有限公司 (Zhuhai Allianz Sharp Technology Co., Ltd.) + 李双林 (Li Shuanglin) + lsl&raysharp.cn +51160 + FoamPartner + Peter Staab + it.support&foampartner.com +51161 + Signet Bank AS + Deniss Boiko + deniss.boiko&signetbank.com +51162 + asTech + Chris James + cjames&astech.com +51163 + KC Corp + Fumiya Tsumura + f-tsumura&kcj.co.jp +51164 + Beijing Sunyainfo Technologics Co.,Ltd. + Bourne Zhang + bourne&sunyainfo.com +51165 + Taubman Engineering + Jason Taubman + jason&taubman.org +51166 + Game Creek Video + Jason Taubman + jtaubman&gamecreekvideo.com +51167 + NTW Netzwerk Telekommunikation Datentechnik + Thomas Weis + tweis&ntw-datentechnik.de +51168 + Cetwin System Solutions + Max Kielland + max.kielland&css-sweden.com +51169 + Inlandsfrakt AB + Heiko Schindelmeiser + heiko.schindelmeiser&inlandsfrakt.se +51170 + LGB + Jorge Vásquez + javasquez&eco-lgb.com +51171 + Sveden Trä AB + THOMAS HEENS + thomas.heens&svedentra.se +51172 + DEPO Electronics Ltd. + Support Service + deposupport&depo.ru +51173 + Scientific Florida + Robert van Noetsele + websites&positronicusa.com +51174 + SOOSAN INT Co., Ltd. + Kim Do Young + kimdy&soosan.co.kr +51175 + Sony Business Solutions Corporation + Takuya Hotta + Takuya.Hotta&sony.com +51176 + Cloudching + Sas Stu + sastu&cloudching.com +51177 + Gemeinschaftswerk der Evangelischen Publizistik (GEP) gem. GmbH + Ralf Dzubiel + rdzubiel&gep.de +51178 + Universitaet Ulm Institut fuer Epidemiologie und Med. Biometrie + Michael Bohnet + michael.bohnet&uni-ulm.de +51179 + Mashroat + Adel Alshahrani + shahrania&npmo.gov.sa +51180 + Ministry of Finans of the Republic of Uzbekistan + Administrator of MF + administrator&mf.uz +51181 + Telespazio s.p.a. + Andrea Spagnuolo + andrea.spagnuolo&telespazio.com +51182 + BALIN ADVERTISING LTD. + RAFAEL ADATTO + rafael&bac.co.il +51183 + JR Butler Inc + Ryan Eaves + it&jrbutlerinc.com +51184 + MacAulay-Brown, Inc. + Thomas Brewster + domain-management&macb.com +51185 + Oekumenisches Hainich Klinikum gGmbH + Stephan Herz + s.herz&oehk.de +51186 + T-MOBILE POLSKA S.A. + Piotr Koryczan + itsec&t-mobile.pl +51187 + SoftIron + SoftIron Compliance + compliance&softiron.com +51188 + Lanner Electronics Inc. + Pat Huang + pat_huang&lannerinc.com +51189 + JiangSu Future Networks Innovation Institute + Ran Liu + liuran&fnii.cn +51190 + Go-B Enterprise + Gregory Beggs + greg&go-b.com +51191 + IntraPack Industries, Inc. + Tracy Zaffino + tracy&intrapack.com +51192 + Total Access Communication Public Company Limited + Thanakorn Sukhonthaphong + thanakorn.sukhonthaphong&dtac.co.th +51193 + 北京平治东方科技股份有限公司(peace-east Networks Ltd.) + Haixin Jiang + jianghaixin&pzdf.com +51194 + Harvard-Smithsonian Center for Astrophysics + Courtney Grimland + penmaster&head.cfa.harvard.edu +51195 + IT.NRW + Holger Jungmann + holger.jungmann&it.nrw.de +51196 + CAF S.A. - Construcciones y Auxiliar de Ferrocarriles S.A. + Ruben Alvarez + ralvarez&caf.net +51197 + Smart Card Security Inc. + Yannick LaRue + YannickL&SmartCardSecurity.ca +51198 + Logicalis Latin America + Fabio Viva Jardim + fabio.jardim&la.logicalis.com +51199 + LTD Meditsinsky center Doctor Bogolubov + Igor Fedotov + it&dbog.ru +51200 + Fuszenecker Software Development + Robert Fuszenecker + robert.fuszenecker&outlook.com +51201 + VIAZIJING + David Dai + shdai&viazijing.com +51202 + Vefsn kommune + IKT-avdelingen + IKT.Drift&vefsn.kommune.no +51203 + tu-m.de + Manuel Miska + manuel.miska&tu-m.de +51204 + SCAE S.p.A. + Andrea Giovanni Bianchessi + bianchessiag&scae.net +51205 + COMPILEO + Florent PELCAT + florent.pelcat&kpf.fr +51206 + MH Corporation + Matthew Hitching + matthew.hitching&sky.com +51207 + McLennan Ross LLP + Systems Group + systemsgroup&mross.com +51208 + Indeed + Kam Lane + cameronl&indeed.com +51209 + Thinking Design LLC + Radhakrishna Reddy + business&thinking-design.com +51210 + Themata Desenvolvimento e Consultoria em TI LTDA + Mateus Bezerra de Medeiros + mateus&themata.com.br +51211 + Iveco S.p.A + Stefano Firenze + ICT-INFODOMINI&cnhind.com +51212 + iboss, Inc. + Christopher Park + admin&iboss.com +51213 + SeTI SRL + Franco Marchesini + franco.marchesini&gmail.com +51214 + IKS Co.,Ltd. + Satoshi Osawa + osawa&applecenter.co.jp +51215 + Raffcomm Technologies Sdn. Bhd. + Mohd Anais Bin Mohd Samuri + anais.samuri&raffcomm.my +51216 + Alicat Scientific, Inc + David Schollmeyer + dschollmeyer&alicat.com +51217 + Terabit Computer Systems Corporation + Kenn Arion Wong + kawong&tcs.com.ph +51218 + Ham Radio Group at RWTH Aachen + Ralf Wilke + rwth-afu&online.de +51219 + HANKEN SCHOOL OF ECONOMICS + Mats Ceder + mats.ceder&hanken.fi +51220 + Esprit Europe GmbH + Bjoern Schoenewald + it.windows.administrator.de&esprit.com +51221 + Wolseley UK Limited + Jon Pilmoor + ITIANA.PENAdmin&wolseley.co.uk +51222 + John Carroll University + John Sully + jsully&jcu.edu +51223 + RBJ Enterprises, LLC + Rohid Jiwani + rohid.jiwani&gmail.com +51224 + RJ Enterprises, LLC + Rohid Jiwani + rohid.jiwani&gmail.com +51225 + ABC + Rohid Jiwani + Rohid.Jiwani&gmail.com +51226 + Department of Mines, Industry Regulation and Safety + Network Support + network.support&dmirs.wa.gov.au +51227 + nantong advanced communication technology research institute co. LTD + LiMingYan + qhyanlm&outlook.com +51228 + LINXA UK LTD + ASIM ALP + asim&linxa.com +51229 + Tradeshift + Jesper Terkelsen + operations&tradeshift.com +51230 + Euron Sp. z o.o. + Pawel Bien + pawel.bien&euron.pl +51231 + SquareTrade + Asher Schaffer + unix&squaretrade.com +51232 + Tian-Power Ltd. + Iron Peng + penggang&126.com +51233 + GOMYCODE + Arsslen Idadi + arsslen&gomycode.tn +51234 + ABC arbitrage Asset Management + Tech ABC + tech&abc-arbitrage.com +51235 + Masterpress S.A. + Tomasz Baranowski + oidmaster&masterpress.com +51236 + NUX Technologies + Matias Lambert + matias.lambert&nux.tech +51237 + Enveil, Inc + Jacob Wilder + support&enveil.com +51238 + conduent + Soma Sekhar Kaligotla + soma.kaligotla&conduent.com +51239 + Questrade, Inc. + Kashif Ali + kali&questrade.com +51240 + The Servicepoint Group + ICT Department + ict&servicepoint.group +51241 + TE Subcom + Richard Kram + rkram&subcom.com +51242 + Ufi Space + Eason Huang + eason.ys.huang&ufispace.com +51243 + Medical Specialist Holdings (Pty) Ltd + Piet Theron + ptheron&mshsa.co.za +51244 + Tangent Animation + James Paskaruk + james.paskaruk&tangent-animation.com +51245 + Movenda S.p.A. + Gaetano Esposito + gaetano.esposito&movenda.com +51246 + Evidencias Certificadas, S.L. + Juan Gallego Molero + jgallego&evicertia.com +51247 + Siemens Postal, Parcel & Airport Logistics GmbH + Nico Weber + itadmin.logistics&siemens.com +51248 + BDSDEV (formerly 'Family Herder') + Brian DiMaggio + brian.dimaggio&outlook.com +51249 + Jages TransportAB + Marina Thyberg + marina.thyberg&jages.se +51250 + Dalfors Åkeri AB + Per Andersson + dalforsakeri&telia.com +51251 + DeepCore Systems + Philippe Devaux + devap&deepcore.eu +51252 + Reifenhaus Nordheide + Philippe Devaux + info&reifenhaus-nordheide.de +51253 + VIGOR Digital Communication Technologies Co.,Ltd + LiuJun + liujun0608&163.com +51254 + Eskilstuna Strängnäs Energi och Miljö AB + Dan Eriksson + dan.eriksson&esem.se +51255 + Clinical Oncology Associates + Sue Hudel + suehudel&gmail.com +51256 + SW Solutions + Stanislaw Wawszczak + biuro&swsolutions.pl +51257 + ZOOMSERVER + guo xia + guoxia&zoomserver.cn +51258 + Département de Maine-et-Loire + Denis Pithon + d.pithon&maine-et-loire.fr +51259 + Skogsåkarna i melllansverige AB + Mats Thörlind + mats.thorlind&skogsakarna.se +51260 + Waverly Hematology Oncology + Maria Ullrich + mullrich&waverlyhemeonc.com +51261 + Sander Dijkhuis + Sander Dijkhuis + pen&sanderdijkhuis.nl +51262 + www.sccheung.com.hk + Sunny Cheung + playonlie&gmail.com +51263 + Julita Åkeri AB + Henrik Johansson + info&julitaakeri.se +51264 + Norrhälsinge Skogsägarförening + Thomas Andersson + thomas.andersson&norrhalsingeskog.se +51265 + Giffsworld + Kevin Gifford + kevin&giffsworld.com +51266 + IT-Solutio + Jacek Adamowicz + adamowicz.j&wp.pl +51267 + ARAD Networks + SooYoung Choi + syoungone&aradnetworks.com +51268 + Gerolsteiner Brunnen GmbH & Co. KG + Maik Schütz + maik.schuetz&gerolsteiner.com +51269 + Swedish Match Industries AB + Torbjörn Sundh + torbjorn.sundh&swedishmatch.com +51270 + Prima Systems + Miha Unk + info&primasystems.si +51271 + Azura Engineering Ltd + Richard Harvie + richard.harvie&azura-engineering.com +51272 + ControlTec sp. z o.o. + Paweł Stadnik + pawel.stadnik&controltec.com.pl +51273 + StrykersStryle + Stryker Cain + stryker.cain&carecentrix.com +51274 + Spotless BHP + Tristan Nelson + tristan.nelson&spotless.com.au +51275 + Broberg Skogs AB + Anders Larsson + anders.larsson&brobergskogs.se +51276 + ForceShield, Inc + Sand Tu + sand.tu&forceshield.com +51277 + Right To Play + Rob Harman + itadmin&righttoplay.com +51278 + Prästlönetillgångarna i Skara Stift + Joakim Källman + Joakim.Kallman&svenskakyrkan.se +51279 + «Sharx Datacenter» (LimitedLiabilityCompany) + Alexander Varlamov + ext-support&sharxdc.ru +51280 + Bleemeo + Lionel Porcheron + iana&bleemeo.com +51281 + Decentralized and Distributed Systems (DEDIS) Laboratory at EPFL + Jeff R. Allen + jeff.allen&epfl.ch +51282 + Atom + Timo-Pekka Yrjölä + py&iki.fi +51283 + AHS Aviation Handling Services GmbH + Dirk Mueller-Peters + dmueller&aviova.com +51284 + TNT Systems Ltd. + Tamas Szanto + tamas.szanto&tnt-systems.eu +51285 + Tower Hill Insurance Group, LLC + Jon Llewellyn + network&thig.com +51286 + Bournemouth University + Seb Dennard + sdennard&bournemouth.ac.uk +51287 + EcoSyllaba Latinoamerica S.A.S + Ernesto Rodriguez + ernesto.rodriguez&ecosyllaba.info +51288 + Christopher Kleen + Christopher Kleen + iana&kleen.ch +51289 + BEAMER.org + Kevin Beamer + kevin&BEAMER.org +51290 + NII Masshtab + Natalia Shreter + n.shreter&mashtab.org +51291 + Pauls Stradins Clinical university hospital + Karlis betins + karlis.betins&stradini.lv +51292 + Siberian Integration Systems + Vladimir Sinkevich + admin&sis-it.pro +51293 + Government of the Northwest Territories + Nathan Whiteman + tech&gov.nt.ca +51294 + ecomes + ilteris Oney + info&ecomes.org +51295 + Tuomas Siren Consulting + Tuomas Siren + tuomas.siren&tuomassirenconsulting.com +51296 + International Community School of Abidjan + Guillermo Fernandez + gfernandez&icsabidjan.org +51297 + GROKE Tueren und Tore GmbH + Maximilian Muench + admin&groke.de +51298 + Santa Monica Networks + Kristaps Cudars + itteam&smngroup.net +51299 + ib datentechnik nause GmbH + Nicolaus Kunze + software&ib-datentechnik.de +51300 + Gübau Service GmbH + Hartmuth Wenzel + wenzel&guebau-service.de +51301 + contact.pl sp. z o.o. + Pawel Bublewicz + pawel.bublewicz&ccontact.pl +51302 + Nextek Solutions Pte Ltd + Karim Rahemtulla + info&nextek.sg +51303 + Sailpoint Technologies + Tim Goldenburg + hostmaster&sailpoint.com +51304 + Medical Clinic of Northville + Vesna Summers + vss003&gmail.com +51305 + National Marine Electronics Association + Steve Spitzer + sspitzer&nmea.org +51306 + Nipendo + Nir Mendel + systems&nipendo.com +51307 + OEM Solutions, Inc. + Colby Johnson + cjohnson&oemsolutions.net +51308 + SEHCON + Stefan Haine + stefan.haine&sehcon.be +51309 + 5x9 Networks + Branimir Rajtar + branimir.rajtar&5x9networks.com +51310 + segfault.io + Adam Przybyszewski + admin&segfault.io +51311 + SAGS IT + Sven Schlinkmann + service&sags-it.com +51312 + Vasilevsky Rudnik Gold Mine + Alexander Litvin + sysadmin&vrgm.net +51313 + Horizon Power + Mike Masiye + DERMS.Project&horizonpower.com.au +51314 + Solusindo Antar Network + Lukman Sahid + lukman.sahid&solarnet.co.id +51315 + Technotronics Ltd. + Alexandr Startsev + startsev&ttronics.ru +51316 + Lundgrens i Igelfors AB + Robert Lundgren + Robbanlundgren&gmail.com +51317 + ArchNemesis + Miles Whittaker + mjwhitta&gmail.com +51318 + Starview Systems + James Huang + jamsphon&gmail.com +51319 + Wallace Roberts & Todd, LLC + Wayne Yeager + wyeager&wrtdesign.com +51320 + Hitachi Hirel Power Electronics Pvt Ltd + Dhawal Patel + dhawal_patel&hitachi-hirel.com +51321 + If P&C Insurance AS + Mihails Galuska + mihails.galuska&if.lv +51322 + ONAIR MEDYA KOMUNIKASYON LTD. + LUTFI AYSAN + info&onair.com.tr +51323 + Stensborgs åkeri AB + Joakim Lindholm + Joakimkarllindholm&gmail.com +51324 + Skarox OÜ + Nazim Can Bedir + pen&its.skarox.ee +51325 + Jurupa Unified School District + Matthew Hahn + matt_hahn&jusd.k12.ca.us +51326 + Profitap HQ BV + Rosario Contarino + rosario.contarino&profitap.com +51327 + Agência Estadual de Tecnologia da Informação de Pernambuco + Celso Agra + celso.sa&ati.pe.gov.br +51328 + Elder Estates + Tom Elder + teldertelder&hotmail.com +51329 + Salviol Global Analytics Ltd. + Denis Leskošek + info&salviol.com +51330 + VOMATEC Innovations GmbH + Matthias Breyer + entwicklung&vomatec.de +51331 + TO21 + Bak Jong O + getbak&to21.co.kr +51332 + Shenzhen Taishan Technology Co., Ltd. + king zhu + king.zhu&taishan-tech.com +51333 + EduBase LLC + Aron Hives + aron.hives&edubase.net +51334 + PolarPower, Inc. + Xuan Yao + xuanyao&polarpowerinc.com +51335 + Austrian Federal Ministry of Education, Science and Research + DI Klemens Urban + klemens.urban&bmbwf.gv.at +51336 + Big River Communications + Chris Simmons + it&bigrivercom.com +51337 + Freesat Limited + Kanwal Chauhan + kanwal.chauhan&freesat.co.uk +51338 + Frederick County Bank + Michael Crum + iana&fcbmd.com +51339 + packetized.org + IANA Admin + iana&packetized.org +51340 + Shenzhen Adamasnet Technology Co., Ltd. + Qin Xiaofeng + qxf.nvans&gmail.com +51341 + Trusted Key + Keith Kowal + keith&trustedkey.com +51342 + Deutsche Familienversicherung + Florian Racky + it-infrastruktur&deutsche-familienversicherung.de +51343 + Brotherhood and Sisterhood Graphic Design and Fashion + Ayanda Nkwanyana + asnkwanyana908&gmail.com +51344 + SINENSIA IT SOLUTIONS + Carlos Arranz + madrid&sinensia.com +51345 + IBT Interfaces + Nils Köhler + nils.koehler&ibt-interfaces.de +51346 + da young telecommunication + young-bok, sung + master&dayoungcom.co.kr +51347 + Accipiter Radar + Wilson Aycho + pen&accipiterradar.com +51348 + Shanghai Kyee Technology Co.,Ltd. + chebin + 2060lab&kyeegroup.com +51349 + ABN AMRO Clearing + M.U. van Doorne + mike.van.doorne&nl.abnamro.com +51350 + County Durham and Darlington Fire and Rescue Service + Lee Heightley + lheightley&ddfire.gov.uk +51351 + Norfolk County Council + Jamie Holmes + jamie.holmes&norfolk.gov.uk +51352 + eiipii + Paweł Cesar Sanjuan Szklarz + contact&eiipii.com +51353 + IOActive + Derek Held + iana.oid&ioactive.com +51354 + Klemetsson Holding AB + Christian Klemetsson + christian&klemetsson.com +51355 + SJ AB IT Utveckling + Göran Linné + goran.linne&sj.se +51356 + MDIIA + Matthias Merkel + matthias&boltn-hosting.com +51357 + CIRA Labs + Lucas Estienne + lucas.estienne&cira.ca +51358 + Clockworx + RICHARD DAWSON + dawsonra&clockworx.org +51359 + Lehigh Trust Services, LLC + OID Administrator + oidadmin&lehightrust.com +51360 + RCDDM + Michael Küppenbender + ddm-uhd&rcddm.com +51361 + Police and Border Guard Board of Estonia + Margit Ratnik + margit.ratnik&politsei.ee +51362 + THOMAS SIGNE SOLUCIONES TECNOLOGICAS GLOBALES S.A.S. + María José Martínez + mariajose.martinez&signe.es +51363 + Spark Networks Services GmbH + Ilona Fischer + ilona.fischer&affinitas.de +51364 + The Flirble Organization + Chris Luke + chrisy&flirble.org +51365 + Milliarum GmbH & Co. KG + Harald Schmitz + harald.schmitz&milliarum.de +51366 + Università degli Studi del Sannio - Settore Sistemi IT + Francesco Montella + francesco.montella&unisannio.it +51367 + Sörmlands Skogstransporter AB + Marie Johansson + lj.akeri&telia.com +51368 + Aterlo Networks Inc + Scot Loach + scot&aterlo.com +51369 + Expeditors International + Peter Lansdaal + peter.lansdaal&expeditors.com +51370 + StraCon Services Group, LLC. + Austin Craig + austin.craig&stracongroup.com +51371 + Auckland Council + ICT Network Infrastructure Team + Daniel.Banks&aucklandcouncil.govt.nz +51372 + SmartCrypto Pty Ltd + Niel van Greunen + niel&smartcryptosolutions.com +51373 + DIGIEVER + Derek Lin + derek.lin&digiever.com +51374 + TimmerLogistikVäst AB + Simon Simonsson + simon&tlv.nu +51375 + Swisscard AECS GmbH + Jean Paul Kölbl + jeanpaul.koelbl&swisscard.ch +51376 + inno-plan GmbH + Andre Maoro + andre.maoro&inno-plan.de +51377 + Reaxcer AB + Sven-Åke Jonasson + Sven-Ake.Jonasson&reaxcer.se +51378 + OWITHO Network Technology (Shanghai) Co., Ltd. + Si Jianghua + thirdparty-external&owitho.com +51379 + LBS Landesbausparkasse Südwest + Winkler, Christian + techorg&lbs-sw.de +51380 + E.S.R.Labs AG + Jessica Stöcker + jessica.stoecker&esrlabs.com +51381 + Intek LLC + Sergey Orlov + ntrl2010&gmail.com +51382 + Sandfly Security, Ltd + Craig Rowland + info&sandflysecurity.com +51383 + Verallia + samir ramdane + samir.ramdane&verallia.com +51384 + Auto Gassner + Sebastian Oswald + it&autogassner.de +51385 + Zubro.NET + Antonin Zuber + tonda&zubro.net +51386 + Mount Holyoke College + Joel Broyles + network&mtholyoke.edu +51387 + S.C. Bitlosophy S.R.L. + Domain Admin + domainadmin&bitlosophy.ro +51388 + osjava.net + li dapeng + lidp&osjava.net +51389 + FOC - fibre optical components GmbH + Tisko Sonntag + tisko.sonntag&foc-fo.de +51390 + | Mobiltrust Bilisim Sanayi ve Ticaret A.S. + Levent Topaktas + levent&mobiltrust.com +51391 + Komtel Bilgi ve İletişim Teknolojileri A.Ş. + Nadiye Yildiz + nadiye.yildiz&komteltech.com +51392 + TETRA Informatique + Marc Delerue + mdelerue&tetra-info.com +51393 + Kjellbergs Logistik & Teknik AB + Annika Gustavsson + annika&kjellbergsab.se +51394 + b-Things + Heynderickx Wim + wim&b-things.com +51395 + Depenbrock Bau GmbH & Co. KG + Bernd Bauermeister + b.bauermeister&depenbrock.de +51396 + The Bilco Company + Mike DePaola + miked&bilco.com +51397 + MNS Group + Douglas Szoka + diszoka&mnsgroup.com +51398 + Johnson Brothers Liquor Company + Douglas Symalla + dsymalla&johnsonbrothers.com +51399 + secure.car + IANA Admin + iana&secure.car +51400 + Max Planck Society Administrative Headquarters + Harald Suckfuell + iana-pen&gv.mpg.de +51401 + Hunan Normal University + zhangzhiyong + zyzhang&hunnu.edu.cn +51402 + 1G5 Solutions (formerly 'NT-SRV LTD') + Christopher Simpson + christopher&1g5.co.uk +51403 + Ooe Landesfeuerwehrverband + Wieland Kleinlechner + wieland.kleinlechner&ooelfv.at +51404 + Università degli Studi dell'Insubria + Boris Gattuso + boris.gattuso&uninsubria.it +51405 + infoscore austria gmbh + Patrick Messner + it&infoscore.at +51406 + Electronic Certification Center + Montajab Saleh + montajab&nans.gov.sy +51407 + National Agency for Network Services - NANS + Montajab Saleh + montajab&nans.gov.sy +51408 + Institute For Advanced Studies in Basic Science (IASBS) + emad mirsadeghi + emir&iasbs.ac.ir +51409 + Railton Consultants Ltd + OID Administrator + oidadmin&railtonconsultants.co.uk +51410 + Dannes trp Rengsjö AB + Danne Persson + Dannestrp&yahoo.se +51411 + L1 Smart Solutions + JOSMAR AFONSO IGNACIO ANTUNES + josmar&redisul.com.br +51412 + Fotrousi Electronics Research + Farshad Fotrousi + farshad.fotrousi&fotrousi.com +51413 + Baraga Area Schools + Adam Paquet + admin&baragaschools.org +51414 + Open Connectivity Foundation + Aja Murray + staff&openconnectivity.org +51415 + Banicomm Co. + Ali R Esfahani + esfahania&banicomm.com +51416 + Neotech Solutions + Khaled Al-Hamwi + k.hamwi&neotech-s.com +51417 + Rede Zone + Cosme Faria Corrêa + cosmefc&gmail.com +51418 + North Carolina Department of Transportation (NCDOT) + R. Craig Strunk + rcstrunk&ncdot.gov +51419 + Responsiv + Malcolm Warwick + malcolm.warwick&responsiv.co.uk +51420 + Skene Ventures Limited + Martyn Ashworth + martyn.ashworth&skene.ventures +51421 + Civis Technologies, LLC + Seth Whitworth + seth.whitworth&civistech.com +51422 + Department of Energy - Strategic Petroleum Reserve + Jeremy Maggio + jeremy.maggio&spr.doe.gov +51423 + Little Company of Mary Hospital and Health Care Centers + Matthew Stachowski + mstachowski&lcmh.org +51424 + Vyvygen Corporation + Abuzer Rafey + arafey&vyvygen.com +51425 + New Directions Housing Corporation + IT Administrator + admin&ndhc.org +51426 + Dark3, Inc. + Bryan T. Richardson + btr&darkcubed.com +51427 + China Telecommunication Technology Labs + Ji Dong + dongji&caict.ac.cn +51428 + Virkeslogistik Mellansverige AB + Andreas Pipenburg + 84443&blixtmail.se +51429 + Crossrims Pty Ltd + John Gavrilita + info&crossrims.com +51430 + liuwei1573 + Liu Wei + liuwei1573&163.com +51431 + ELKO EP, s.r.o. + Zdenek Dokulil + dokulil&elkoep.cz +51432 + Vaultit AB + Martin Adamsson + martin.adamsson&vaultit.org +51433 + V-Partei³ + Karlheinz Späth + karlheinz.spaeth&gmail.com +51434 + S.C. DELTATEL SRL + ADAM GH. Sorin + sorin.adam&deltatel.ro +51435 + SCLE SFE + Menard Christophe + christophe.menard&scle.fr +51436 + Opendigitalradio + Matthias Brändli + matthias.braendli&mpb.li +51437 + Razberi Technologies, Inc. + Stephen Schwartz + stephen.schwartz&razberi.net +51438 + Unshare + Valentin Kulesh + leshy&unshare.net +51439 + Videotec S.P.A. + Ottavio Campana + o.campana&videotec.com +51440 + smart-edge.com + Nick Ross + nick&smart-edge.com +51441 + InfraNet AG + Christian Storch + storch&infra.net +51442 + Siegfrieds Mechanisches Musikkabinett GmbH & Co Museum KG + Lucas Wendel + lucas.wendel&smmk.de +51443 + Kaloom + Suresh Krishnan + suresh&kaloom.com +51444 + AllSouth Federal Credit Union + Benjamin Schoenecker + benjamin.schoenecker&allsouth.org +51445 + Chromatec Video Products + Michael Gardiner + michael&chromatec.com +51446 + Saitro + Luis Carlos de Toledo + iana.register&saitro.com +51447 + Mizuno USA, Inc. + William Dawson + netadmin&mizunousa.com +51448 + ID06 AB + Martin Adamsson + martin.adamsson&vaultit.org +51449 + Verified Systems International GmbH + Christof Efkemann + efkemann&verified.de +51450 + Nokia + Jeff Donnelly + jeff.donnelly&nokia.com +51451 + Beijing mission communication co.,Ltd + Yuanyuan Tian + tianyy&eqmail.net +51452 + Evolution Travel Network + arvit varfaj + avarfaj86&gmail.com +51453 + Lekkerland information systems GmbH + Frank Peters + frank.peters&lekkerland.com +51454 + Pionica Poland + Marcin Zawadzki + info&pionica.com +51455 + Ministry of Foreign Affairs of Estonia + Madis Peedimaa + madis.peedimaa&mfa.ee +51456 + Dedon Inc + Jason Mozingo + itdepartment&dedon.us +51457 + Linkedin + Yuval Bachar + yuval&linkedin.com +51458 + Empower Psychiatry & Sleep LLC + Ravi Singareddy + rsingare&gmail.com +51459 + DataEngrave S.L. + Ramón Martínez + ramon.martinez&dataengrave.com +51460 + ALEXIUS M BISHOP, MD PSC + Denah Clinebell + billingoffice&ambishoppeds.com +51461 + DIESEL-NETWORKS.COM + Dan Erlichman + dan&diesel-networks.com +51462 + Saudi Aramco + Gary Simmons + x500OIDAdmin&aramco.com +51463 + Ortana Electronics Software Inc. + Hayal Senyurt + hayal.senyurt&ortana.com +51464 + Zehetner-Elektronik GmbH + Zehetner Klaus + office&zehetner-elektronik.at +51465 + Synvina + Riaan van Staden + itsupport&synvina.com +51466 + Avantium + Riaan van Staden + itsupport&avantium.com +51467 + Chrisse Corporation + Christoffer Andersson + christoffer.andersson&chrisse.se +51468 + Vertis Solutions + Jose Gregorio Gutiérrez + jose.gutierrez&vertis-solutions.com +51469 + CARDIOLOGY ASSOCIATES, INC. + NANCY GRUNE + cj&cardiohawaii.com +51470 + Hole kommune + Roger Halvorsen + admin&hole.kommune.no +51471 + DR. WILLIAM STRAZZELLA + DAWN DOSCHER + DAWNCEE366&YAHOO.COM +51472 + Arkessa + Martin Hostacny + martin.hostacny&arkessa.com +51473 + thyssenkrupp System Engineering GmbH + Lars Gloistein + lars.gloistein&thyssenkrupp.com +51474 + Crayon Group AS + Erik Larsen + erik.larsen&crayon.com +51475 + tlacuache.us + Jacob L Lemus Peschel + jacob&tlacuache.us +51476 + Rega + Rega + Stephan.Graber®a.ch +51477 + XZS + Sam Schmidt + webmaster&xzs.be +51478 + ITW Food Equipment Group + Todd Davis + Todd.Davis&itwfeg.com +51479 + VeChain + GU Jianliang + jianliang.gu&vechain.com +51480 + Ace Supply Company, Inc. + Shawn Fonley + shawn.f&ace-aircontrolessentials.com +51481 + Curve Dental + Steven Hines + infrastructure&curvedental.com +51482 + R2H Flavor Technology, LLC + Michael Piety + mpiety&r2hflavortech.com +51483 + CTIA + Robert Cantu + ctiacybersecurity&ctia.org +51484 + Exonar Ltd + Russell Foster + russ.foster&exonar.com +51485 + mm-lab GmbH + Andreas Lemmer + alr&mmlab.de +51486 + Revi Comp. Network Technologies + Oliver Geist + Webmaster&ReviComp.de +51487 + UnicusID, Inc. + Scott Pierce + pierce&unicusid.com +51488 + Netnod Internet Exchange i Sverige AB + Henrik Melwin + ops&netnod.se +51489 + SliceOne + Xudong Zheng + admin&sliceone.com +51490 + Ashfords LLP + Sam Quick + sysops&ashfords.co.uk +51491 + SCRT SA + Sergio Alves Domingues + sergio&scrt.ch +51492 + SuitePad GmbH + Mario Olivio Flores + mario.flores&suitepad.de +51493 + Smart Flows + Hervieu Olivier + ohervieu&smart-flows.com +51494 + Alan N Yager MD + sabrina green + sabrina.green&alanyagermd.com +51495 + Real-World-Systems, Inc. + Dennis German + DGermania&Real-World-Systems.com +51496 + CC-Link Partner Association + Yoshiyuki Mamada + Mamada.Yoshiyuki&cc-link.org +51497 + ChatVoice Corporation + Takayuki Uehara + uehara&chatvoice.jp +51498 + KeeeX SAS + Laurent HENOCQUE + certmaster&keeex.net +51499 + FLEXXIBLE INFORMATION TECHNOLOGY SL + RICARD SANCHEZ + ricards&flexxible.com +51500 + Milwaukee Foot and Ankle Specialists + LORI ABRAMOWSKI + INFO&MILWAUKEEFOOT.COM +51501 + Nebraska Foot & Ankle, P.C. + Melissa Beeck + mbeeck&nfaortho.com +51502 + Cludo + Andrzej Pierscionek + noc&cludo.pl +51503 + ARS Traffic & Transport Technology + Jonathan Prins + snmp&ars-traffic.com +51504 + Oaks Hotels and Resorts Australia (Minor Hotels Australia) + Mark Norton + webadmin&theoaksgroup.com.au +51505 + Hitachi Metals, Ltd. + Yoshihiro Nakatani + yoshihiro.nakatani.zj&hitachi-metals.com +51506 + TnMTech Co.,Ltd. + Sang Woo Jung + swjung78&gmail.com +51507 + meanit bvba + Maarten De Ridder + maarten.deridder&meanit.be +51508 + HealthMyne, Inc. + Hao Wang + hao.wang&healthmyne.com +51509 + Lycoming-Clinton Joinder Board + David Walter + dwalter&joinder.org +51510 + Crisis Innovation Lab + Robert Marko + robimarko&gmail.com +51511 + Adams Street Partners + michael giannangelo + mgiannangelo&adamsstreetpartners.com +51512 + Greenbriar Equity Group LLC + Scott Parkis + iana-pen&greenbriarequity.com +51513 + SYSETM INFORMATION PARTNER + Tomomi Gotou + t.goto&s-i-p.jp +51514 + PJSC Bank ALEKSANDROVSKY + Valeriy Kobak + v.kobak&alexbank.ru +51515 + StarLight Media LLC + Roman Kaschenco + security&slm.ua +51516 + ASAAF-UCM + Ismael Aderdor + ikabliz&asaaf.org +51517 + Snell Check + A.W.Snell + alex.snell232&gmail.com +51518 + IoTExperiences + Frédéric Menut + frederic.menut&actemium.com +51519 + Commsignia, Inc. + Andras Takacs + andras.takacs&commsignia.com +51520 + Karabro AB + Oskar Borgqvist + kontakta&karabro.se +51521 + ISOutsource + Paul Youngberg + pauly&isoutsource.com +51522 + The Omni Group + Ken Case + kc&omnigroup.com +51523 + aplicube + Michael Novak + michael.novak&aplicube.de +51524 + AASKI Technology Inc. + Wesley Peters + wpeters&aaski.com +51525 + AT&T Wireless Network Architecture and Design + Yajun Gu + yg8106&att.com +51526 + Schweizerische Akademische Turnerschaft + Fredrik Roubert + webmaster&utonia.ch +51527 + braincon GmbH + Thorsten Rood + postmaster&braincon.cloud +51528 + AIGCEV - Association Internationale de Gouvernance du Cachet Electronique Visible + Patrick Drolet + patrick.drolet¬arius.com +51529 + X Stream Designs + Joel DeGraff + joel&xstreamdesigns.com +51530 + Volkswagen Poznań Sp. z o.o. + Michał Kościński + michal.koscinski&vw-poznan.pl +51531 + multiOTP + Andre Liechti + andre.liechti&multiotp.net +51532 + SysCo systemes de communication sa + Andre Liechti + andre.liechti&sysco.swiss +51533 + SkyData Communications + James McCartney + jtmiii&skd.com +51534 + OH Precision Corp. + Jason H. + jason&ohprecis.com +51535 + iGeneTech + Yang Wu + yang.wu&igenetech.com +51536 + Energieversorgung Mittelrhein AG + Jens Monzert + jens.monzert&evm.de +51537 + Tessi Documents Services + MUNOZ Déborah + deborah.munoz&tessi.fr +51538 + Nanjing Stable Electronic Technology Co. , Ltd + chao yang + yangc&stbsys.com +51539 + LHI Leasing GmbH + Kurt Mc Leod + k.mcleod&lhi.de +51540 + LITEON Networking Solutions + Mark Yang + mark.yang&liteon.com +51541 + LITEON TECHNOLOGY CROP. + Mark Yang + mark.yang&liteon.com +51542 + UPM-Kymmene Oyj + Tuomas Vuojamo + tuomas.vuojamo&upm.com +51543 + Apex Systems + Goran Jozic + gjozic&apexsystems.com +51544 + MAI Trading Co. W.L.L + Osama Al-Shawaf + info&mai-trading.com +51545 + Swampfox Technologies Inc + John Finn + john.finn&swampfoxinc.com +51546 + Alex Moore + Alex Moore + alex&lspeed.org +51547 + Catalysts GmbH + Jakob Englisch + admin&catalysts.cc +51548 + Council Rock Enterprises, LLC + Michael Doser + mdoser&council-rock.com +51549 + Pediatrics at Oyster Point + Taylor Weakley + Taylorw.pop&gmail.com +51550 + American Amplifier Technologies, LLC + Brian Toon + briant&americanamptech.com +51551 + Caribbean Cancer Care Services + Nanette Fradera + nfradera&oncologiapr.com +51552 + Advanced Television Systems Committee + Jerry Whitaker + jwhitaker&atsc.org +51553 + Oregon Bureau of Labor and Industries + Kemper Peel + peelw&boli.state.or.us +51554 + System Planning + Taizo Sasamoto + iana-pen&sysplnd.co.jp +51555 + Jason Letanosky + Jason Letanosky + jletanosky&gmail.com +51556 + Max Planck Institute for Biology of Ageing + Sebastian Ystems + systems&age.mpg.de +51557 + SecurIT360 LLC + Jim Richardson + soc&securit360.com +51558 + Europower Consulting GmbH + Steffen Loetzsch + sloetzsch&europower-consulting.com +51559 + duagon AG + Josep Rubi + josep.rubi&duagon.com +51560 + Informatics Systemhaus + Ralf Pachali + it&informatics-systemhaus.de +51561 + Fry-IT Limited + Keean Schupke + keean&fry-it.com +51562 + Fasttrack Immediate Care LLC + Carminta Dykes + carminta.dykes&fasttrackic.com +51563 + TelcoEdge Pty Ltd + Richard Clarke + richard&telcoedge.com.au +51564 + National Institute of Information and Communications Technology (NICT) + Hitoshi Asaeda + asaeda&nict.go.jp +51565 + Shenzhen Ehilink Technology Ltd. + Wei Yuanqin + wei&ehilink.com +51566 + AMBA + Aleksander Mroczek + amba&mm.pl +51567 + Plusnet GmbH + Carsten Reuter + carsten.reuter&plusnet.de +51568 + Buck Institute + Kevin Casey + nadministrator&buckinstitute.org +51569 + Technology Bjumper S.L. + Rubén Agudo + ragudo&bjumper.com +51570 + Clinic Management Group + Yushkevichus Dmitry + d.yushkevichus&clinic-mg.ru +51571 + Observer-X LLC + Xiaoke K Wang + karlour&hotmail.com +51572 + DEVtec.io + Camilo Rodegheri Mendes dos Santos + camilo&devtec.io +51573 + Nachiket V. Patel, MD, PA + Nachiket Patel, MD + npatelmdpa1&gmail.com +51574 + CENTRAL JERSEY SPORTS MEDICINE & ORTHOPAEDIC CENTER, PC + SONDRA DIDONATO + CJSOC43&YAHOO.COM +51575 + TUC Co + Tony Tucker + tony&tucmoto.com +51576 + Timber Technologies, Inc. + Ben Johnson + ben&timber.io +51577 + CTI Certificate Authority Co., Ltd + Yuan Jing + yuanjing&hnca.com.cn +51578 + Codilis and Associates, P.C. + Justin Hay + justin.hay&il.cslegal.com +51579 + Thauris + H. Dorhout + hidde.dorhout&thauris.nl +51580 + MetaNetworks + Dmitry Heistonen + d.heistonen&meta-networks.ru +51581 + Localdomain.dk + Ben Vestergaard + mail+iana&localdomain.dk +51582 + Darling Ingredients Inc + Laurie Strackeljahn + lstrackeljahn&darlingii.com +51583 + Tarrell, Inc. + Michael R. Tarrell + Admin&tarrell.com +51584 + Vision Net + Jamie Miller + jamie.miller&vision.net +51585 + Kohler Co. + Pierringer Jayson + jayson.pierringer&kohler.com +51586 + TrellisWare Technologies, Inc. + Ryan McCourt + rmccourt&trellisware.com +51587 + RICIS, Inc. + Gregory D. Rosenberg + gregg&ricis.com +51588 + Allegiance Consulting (Pty) Ltd + Andrew Broekman + andrew.broekman&allegiance.co.za +51589 + White Hat Security + Johan Loos + iana&whitehatsecurity.be +51590 + The Austins + Neil Austin + Neil&theaustins.co.uk +51591 + L'Empire Troll + Benjamin Somers + benjamin.somers&empiretroll.fr +51592 + Cloud Way Computing + Steven Maluleke + business&cloudway.co.za +51593 + Theben AG + Wolfgang Gulde + Wolfgang.Gulde&theben.de +51594 + Royalunibrew + Jørgen Thinggaard + jorgen.thinggaard&royalunibrew.com +51595 + Ikast-Brande Kommune + Murat Özer + it-drift&ikast-brande.dk +51596 + DCT Gdansk S.A. + Jakub Ciechanowicz + itadmin&dctgdansk.com +51597 + Cullman Regional Urgent Care + Genia Sutton + urgentcare&cullmanregional.com +51598 + ESIREM + Nader MBAREK + Nader.Mbarek&u-bourgogne.fr +51599 + NeoPrime, LLC + John J. Hanley + support&neoprime.io +51600 + Stengele Holz- & Kunststofftechnik GmbH + Jürgen Halder + halder&stengele.com +51601 + PJSC First Ukrainian International Bank + PKI Admin + pki_admin&fuib.com +51602 + Uman GmbH + Robby Gurdan + rg&umannet.com +51603 + Martin Currie Investment Management Ltd + Gabriel McColl + gmccoll&martincurrie.com +51604 + Arrcus, Inc + Keyur Patel + keyur&arrcus.com +51605 + Cryptodira + Stuart Naifeh + stuart&cryptodira.org +51606 + Wafrum Consulting & Caeli Online Services + Dmitry Ponomarev + master&caeli.online +51607 + Fara Afrand Co. + Hamed Ahmadi + hamed&fara-afrand.com +51608 + Winnebago County + Jay Schaefer + jschaefer&co.winnebago.wi.us +51609 + K33BZ.COM + Administrator + admin&k33bz.com +51610 + Off Lease Only, Inc. + Network Operations + oid-admin&offleaseonly.com +51611 + Ultimate Access Primary Care + ZaNita Hill + zhill.uapc&gmail.com +51612 + IVM srl + Michele Basile + michele.basile&ivmtech.it +51613 + Controlled Power Company + John Manfreda + jmanfreda&controlledpwr.com +51614 + Lumileds LLC + Ronald Greene + ronald.greene&lumileds.com +51615 + ZhuHai JinFangDa Technology Co., Ltd + GaoXueLian + gaoxuelian&jinfangda.com +51616 + CatIT + Bartlomiej Malecki + bartek_malecki&o2.pl +51617 + Novo Holdings A/S + Niels Paerregaard + npr&novo.dk +51618 + ITS Bel Ltd + Mikhail Kuznetsov + tech.support&itsbel.by +51619 + Celonis SE + Andreas Bayer + servicedesk&celonis.com +51620 + Getzner Textil Aktiengesellschaft + Juergen Gehring + admin&getzner.at +51621 + TaraTech Co. + Kamal Dashti + info&taratech.ir +51622 + San@sro Inc. + Yannick LaRue + San&sro.ca +51623 + Lyft + Robert Tillman + rtillman&lyft.com +51624 + Garrett Container Systems + Nathaniel Stem + nstem&garrettcontainer.com +51625 + Open Stack, Inc. + Jay Nam + jay.nam&openstack.co.kr +51626 + Dortmunder Blankstahl GmbH + Andreas Kari + a.kari&dortmunder-blankstahl.de +51627 + NMBS-SNCB + Jeroen Desplenter + 95ypto.cybersec-accessmgmt&ypto.be +51628 + Solid Optics + Thijs van den Boogaard + thijs&solid-optics.eu +51629 + Mektoso + Amr Mekawy + amekawy&outlook.com +51630 + HealthTrust, Inc. + Technical Support + techsupport&healthtrustnh.org +51631 + Bocom BBM + Carlos Jourdan + carlosjourdan&bocombbm.com.br +51632 + AND Agency + Jeffrey Thompson + jeffrey.thompson&andishere.com +51633 + Eurotel Bilgi Iletisim Sistemleri A.S. + Erkan Durmus + erkan&eurotel.com.tr +51634 + RobotSzoft Kft + Jozsef Kovacs + robotszoft&robotszoft.hu +51635 + PagerDuty, Inc. + Cees de Groot + cees&pagerduty.com +51636 + CEGI + Pascal ANDRE + p.andre&cegi.fr +51637 + Ovostar Union + Support + noreply&ovostar.ua +51638 + YADA + Dave Varon + varontron&gmail.com +51639 + DI Christian Linhart GmbH + Christian Linhart + christian.linhart&clinhart.com +51640 + JustinWiebe.ca + Justin Wiebe + justin&justinwiebe.ca +51641 + Allianz Technology SE - Global Document and Archive Management + Steffen Raabe + extern.raabe_steffen&allianz.com +51642 + IIC Workshop + PKI Admin + pkiadmin&iicworkshop.com +51643 + Janus Henderson Investors + Nick Rowe + nick.rowe&janushenderson.com +51644 + IP-Only Networks AB + Daniel Wennberg + daniel.wennberg&ip-only.se +51645 + The Trustee for KM Licence Trust + Timothy Kozak + netops&kordamentha.com +51646 + Becker Nachrichtentechnik GmbH + Alexander Domanski + alexander.domanski&becker-rf.com +51647 + Rennes Métropole + Nathalie Marin + dsi&rennesmetropole.fr +51648 + Ingenico e-Commerce Solutions + Silas Jenner + sysadmin&ecom.ingenico.com +51649 + BHS Corrugated Maschinen- und Anlagenbau GmbH + IT Security Team + webmaster&bhs-corrugated.de +51650 + St. Nikolaus-Hospital + Benoit Van Mele + informatique&hospital-eupen.be +51651 + Piekarnia Oskroba S.A. + Michal Milewski + michal.milewski&oskroba.pl +51652 + DEA Deutsche Erdoel AG + Carsten Eickhoff + domainverwaltung&dea-group.com +51653 + NorthWestern Energy + Matthew Dunbar + oid&northwestern.com +51654 + Convergence Systems Limited + Albert Lai + albertlai&convergence.com.hk +51655 + H W Communications Limited + Dr Steve Marple + it&hwcomms.com +51656 + Xtiva Financial Systems Inc + Robert Kenig + rkenig&xtiva.com +51657 + marclachapelle.com + Marc Lachapelle + iana.org&marclachapelle.com +51658 + Transports GUILMET + GUILMET Gaetan + gaetan.guilmet&gmail.com +51659 + 0x19e Networks + Robert Baumgartner + info&0x19e.net +51660 + Combat Networks Inc. + Paul Boone + pboone&combatnetworks.com +51661 + FRPC JSC RPA Mars + Petr Smikun + smikun&mail.ru +51662 + Telna Inc + Mathew Stein + mathew&knowroaming.com +51663 + Maspex Sp. z o.o. Sp.K. + Krzysztof Skalski + k.skalski&maspex.com +51664 + Kodegenix + Jakub Chłapiński + jakub.chlapinski&kodegenix.pl +51665 + South Carolina Law Enforcement Division + Nicholas Leaphart + nleaphart&sled.sc.gov +51666 + iC Consult GmbH + Beate Schmidt + icconsultpen&gmail.com +51667 + BroadSource Group Pty Ltd + Joshua McAdam + joshua.mcadam&broadsource.com.au +51668 + Dannes transport i Rengsjö + Dan-Erik Persson + dannestrp&yahoo.se +51669 + Qeshm paya pars + Mohamad Bagher Sajadi + mb.sajadi&qpayapars.com +51670 + Parspooyesh + Farshad Khoshkhui + farshadkh&parspooyesh.com +51671 + Silk Road Infrastructure Communication Development Co + Mr.abdolvahab Mohammadpourlima + lima&silkroadcommunication.com +51672 + payammail + payam ahmadi + support&payammail.ir +51673 + Sanjesh Afzar Asia + Meysam Mohammadi + techsvcs&saa-co.com +51674 + TEACHERS MUTUAL BANK LIMITED + Peter Smee + psmee&tmbank.com.au +51675 + FaSTTUBe + Falk Schimweg + f.schimweg&fasttube.de +51676 + Cloud Kom d.o.o + Ivan Brozovic + ivan.brozovic&cloudkom.hr +51677 + NIL Ltd. + Bostjan Sustar + bsustar&nil.com +51678 + Intrising Networks, Inc. + Ken Lee + kenlee&intrising.com.tw +51679 + GIAL + Laurent Hue + laurent.hue&gial.be +51680 + mSensis S.A. + Michalis Lilos + m.lilos&msensis.com +51681 + Dr. KADE Pharmazeutische Fabrik GmbH + Andreas Kloska + andreaskloska&kade.de +51682 + Rebex CR, s.r.o. + Lukas Pokorny + lukas.pokorny&rebex.net +51683 + MegaDrive + Moiseenko Anatoly + moiseenkoaa&megadrive.ru +51684 + Umdasch Digital Retail GmbH + Christian Scalet + support.dr&umdasch-shopfitting.com +51685 + verdict id limited + David Wong + vid-info&verdictid.com +51686 + Flexential + Paul Davison + paul.davison&flexential.com +51687 + SRG Radiology + SRG IT Department + it&srgmri.co.nz +51688 + The Pictsweet Company + Seth Parrish + sparrish&pictsweet.com +51689 + EverTrust + Jean-Julien Alvado + jja&evertrust.fr +51690 + Idsu + FOURNIER Sébastien + contact&idsu.fr +51691 + ANIMALL + Brad PFEFFER + global-ops-it&ANIMALL.global +51692 + Bluetest AB + OID Admin + oid&bluetest.se +51693 + Smarting + Dani Fernandez + smarting&smarting.es +51694 + Feistritzwerke-STEWEAG-GmbH + Deutschl Gerald + gerald.deutschl&feistritzwerke.at +51695 + DARZ GmbH + Jürgen Henzler + j.henzler&da-rz.de +51696 + Massachusetts Financial Services Company + Don Abernathy + dabernathy&mfs.com +51697 + Anywave Communication Technologies INC + Yingying Fan + yingying.fan&anywavecom.com +51698 + Netbytes, Inc. + Christopher Flynn + Chris.Flynn&netbytes.com +51699 + Greenville Surgical Clinic + kelli serio + surgic_g&bellsouth.net +51700 + Eastern New Mexico University + Christopher Lindemann + christopher.lindemann&enmu.edu +51701 + Powermeter + Real, Nicolas Eduardo + info&powermeter.com.ar +51702 + Delta Computer Systems, Inc. + Quinton Tormanen + quinton&deltamotion.com +51703 + Timmersdala skogstransport ab + Mikael Johansson + timmersdalaskog&gmail.com +51704 + Sinai Grace Specialty Care + Evelyn Postell-Franklin + efranklin&med.wayne.edu +51705 + Ionx Solutions + Colin Anderson + colin.anderson&ionxsolutions.com +51706 + Anditi + Garth Kidd + gkidd&anditi.com +51707 + South Sound Oncology Services, PC + Deb Ellifritt + debe&siccwa.com +51708 + Medutech s.r.o. + Jan Tenora + jan.tenora&medutech.cz +51709 + K-CIX + Alexander Wormuth + pen&k-cix.de +51710 + Electronic Monitoring Solutionz Ltd + Ray Abram + ray&ems.gen.nz +51711 + Apis Networks + Matt Saladna + matt&apisnetworks.com +51712 + 0b1.se + Felix Gustavsson + felix&0b1.se +51713 + Girolami Controls Inc. + Philippe Lemieux + philippe.j.lemieux&gmail.com +51714 + Lumentum Operations LLC + Brian Kim + brian.kim&lumentum.com +51715 + SSI Cable + Pat Riley + priley&ssicable.com +51716 + EVE compliancy solutions + EVE team + info+iana-pen&lawfulinterception.com +51717 + Gamefanatics.com + Brandon Stephenson + gamefanatic&hotmail.com +51718 + Centro Oncologico del Oeste + Juan Prieto + centrooncologicooeste&gmail.com +51719 + Criteo SA + Jean-Francois Weber-Marx + jf.webermarx&criteo.com +51720 + Lartech + Viacheslav Shirikov + vs&lar.tech +51721 + UTran Technology Inc. + Matt Yen + matt.yen&utran.com.tw +51722 + GCR Tech + Gabriel Rodrigues Cravo Roxo + gabriel&gcrtech.com.br +51723 + BEIJING ZONGHENG ELECTRO-MECHANICAL TECHNOLOGY DEVELOPMENT CO. + Yuanxuan Li + liyuanxuan&zemt.cn +51724 + Intersect + Patrick Lauziere + patrick.lauziere&orangetraffic.com +51725 + Radio DataCast + Craig Kenny + craigkenny&thekennys.info +51726 + CTPHQ + Garry Baird + garry.baird&met.pnn.police.uk +51727 + Indium Limited + Mark Lyons + admin&indium.co.nz +51728 + Honiball IT Limited + Hydor Honiball + hydor&honiballit.net +51729 + Serverside Kft. + Zoltan Kiss + zoltan.kiss&serverside.hu +51730 + Discount Tire + Thomas Gardner + tgardner&discounttire.com +51731 + GERO Meßsysteme GmbH + Ralf Rosenquist + info&gero-mess.de +51732 + digitalpath sàrl + Carlos Lavanchy + carlos.lavanchy&digitalpath.ch +51733 + Ministry of Science, ISRAEL + Shimon Liani + liani&most.gov.il +51734 + Gomes Consulting Services + Francis Gomes + fg&GomesConsultingServices.com +51735 + Shanghai Hite-Belden Network Technology Co., Ltd. + Nina.Ning + ningsj&hite.com.cn +51736 + Sumeru Microwave Communications Pvt. Ltd. + Siddharth Parmar + s.parmar&sumerugroup.com +51737 + x-tention Informationstechnologie GmbH + Stefan Berger + stefan.berger&x-tention.at +51738 + Glenn E Hurst MC P.C. + Trisha Boughton + trishaboughton&gmail.com +51739 + Jinmyung Communications Co., Ltd. + Hae Soo Kim + hskim&jmbroadcast.com +51740 + Goldspotlight + David Rosen + goldspotlight10&gmail.com +51741 + Zander Work + Zander Work + zander&zanderwork.com +51742 + Contact Energy Limited + Leaoa Tolo + cloud.services&concepts.co.nz +51743 + Corporación Font S. A. + Fernan Font + fernan.font&font-tecnologia.com +51744 + cyberways GmbH + Jan Baumann + jb&cyberways.net +51745 + Hochschule Ruhr West + Mike Pendzich + support&hs-ruhrwest.de +51746 + Lab Local Org + Sergey Korotkov + admins&lablocal.ru +51747 + OS33 + Alex Osipov + alex&os33.com +51748 + Kong Inc + Marco Palladino + marco&konghq.com +51749 + Quantify Technology Ltd + Architecture Team + tech.arch&quantifytechnology.com +51750 + UNIGONE + Bruno Konik + bruno.konik&unigone.com +51751 + McLaren Applied Technologies + Daniel Steer + MATCommsSystems&mclaren.com +51752 + Abanka d.d. + Tomaz Hiti + tomaz.hiti&abanka.si +51753 + Unite Private Networks, LLC + Kevin Augspurger + kevin.augspurger&upnfiber.com +51754 + Alcaldia de Medellin + Luis Marin + luisc.marin&medellin.gov.co +51755 + Breastlink Medical Group + Rena Munoz + renamunoz&veritymed.org +51756 + Sofim spol. s r. o. + Michal Veselka + michal.veselka&sofim.cz +51757 + Tieto Business Information Exchange (BIX) + Jyrki Poteri + jyrki.poteri&tieto.com +51758 + Glasswall Solutions Ltd + Paul Kennedy + pkennedy&glasswallsolutions.com +51759 + Intercede Group Plc + Dhiran Mandalia + dhiran.mandalia&intercede.com +51760 + Quilvest (Switzerland) Ltd. + Patrick Camichel + cert&quilvest.com +51761 + SS20 + Anatoly Osipov + osipov.a&ss20.pro +51762 + Euris Health Cloud + Nicolas Sirot + tech-cloud&euris.com +51763 + Equisign + LOTH DEMAY Guillaume + guillaume.lothdemay&equisign.fr +51764 + Institut de Biologie Structurale (IBS) + Jean-Luc Parouty + Jean-Luc.Parouty&ibs.fr +51765 + Bentron Power Systems + Vinit Bharadwaj + vinit.bentron&gmail.com +51766 + Jesus Fernandez + Jesus Fernandez + jfdezgt&gmail.com +51767 + Usabilla B.V. + Gijs Kunze + gijs&usabilla.com +51768 + yogendra thakur toure&travals + yogendra thakur + thakuryogendra677&gmail.com +51769 + One Stop Systems + Mark Ruijter + mruijter&onestopsystems.com +51770 + Elmira College + Neil Griswold + ngriswold&elmira.edu +51771 + Computils + Joerg Knura + joerg.knura&computils.de +51772 + RaiScience + Raices Goodwin + support&raiscience.com +51773 + Ransom Memorial Health + Joyce Ullmer + jullmer&ransom.org +51774 + Acom Networks + Peter Cheng + peter&acom-networks.com +51775 + AppNexus Inc + Ryan Williams + rdw&appnexus.com +51776 + rooteehealth + jaewon, chung + lyle&rooteehealth.com +51777 + SeeEyes + Ji-Su, Yun + yunjs&sscctv.com +51778 + MOBI Antenna Technologies(Shenzhen)Co.,Ltd. + Su Qiuxia + mobitech&mobi-antenna.com +51779 + Shenzhen Borynet Co.,Ltd + Wang Yan + wangyan&borynet.com +51780 + Centre hospitalier Bienne SA + Juan Miguel Severino + oidadmin&szb-chb.ch +51781 + TNBA + Bernard Schoenzetter + admin&tnba.org +51782 + Warp United Scientific Co., Ltd. + Kilo LIANG + info&warpunited.com +51783 + Canadian Cancer Trials Group + Teddy Brown + tbrown&ctg.queensu.ca +51784 + Berrien Regional Education Service Agency + Matt Skalecki + matt.skalecki&berrienresa.org +51785 + Tecnocore + Rodolfo Luis Overbeck + rodolfo&tecnocore.com.br +51786 + Peachtree Dunwoody Dermatology + Joseph Erwin + jalmdpc&gmail.com +51787 + J&K Communications, Inc. + Shawn M Loe + sloe&jkcomm.com +51788 + Vinay Kumar Malviya, MD PC + Vinay Malviya, MD + vmalviya01&gmail.com +51789 + CinemaVision + Przemyslaw Sztoch + psztoch&finn.pl +51790 + Wysocki Family of Companies + Chris Kloiber + chris.kloiber&rpespud.com +51791 + Corsearch, Inc + Rick Rodriguez + cs-it-services&corsearch.org +51792 + Björnsholms Åkeri AB + Torbjörn Eriksson + Bjornsholms&hotmail.com +51793 + Derakhshan Sanat Isatis (DSI) + Mohsen Sharifi + sharifi&dsi.ir +51794 + Bosch.IO GmbH + Daniel Janev + daniel.janev&bosch.io +51795 + Ovoo Sp. z o. o. + Jan Waszkiewicz + jan.waszkiewicz&ovoo.pl +51796 + Citadel LLC + Vladimir Bunin + it&ctdl.ru +51797 + Bachmann electronic GmbH + Johannes Huber + edv&bachmann.info +51798 + Norgine Limited + Matt Froud + mfroud&norgine.com +51799 + Excera Technology Co., Ltd + Yu Sun + yu.sun&excera.com.cn +51800 + Virgin Global Media + Ervin Remus Radosavlevici + ervin210&sky.com +51801 + Quick Heal Technologies Limited + Sanjay Katkar + sanjay&quickheal.com +51802 + Semperis Ltd. + Guy Teverovsky + guyte&semperis.com +51803 + ProofShow Inc. + Yan-Cheng Chang + yc&proof.show +51804 + Energy Services Handels- und Dienstleistungs G.m.b.H. + Patrick Czelecz + it-infrastruktur&energy-services.at +51805 + Simpson Accountancy Ltd + David Simpson + dave&simpsonaccountancy.co.uk +51806 + Bittium + Network Support + networksupport&bittium.com +51807 + regio[.NET] GmbH&Co. KG + Bernhard Krönung + horke®io.net +51808 + Andreas Kloska + Andreas Kloska + postmaster&kloska-berlin.de +51809 + Xunde Energie + Jakobus Schuerz + jakob&xundeenergie.at +51810 + Cloud Portfolio And Trading Analytics + Danny Byrnes + dannyb&cloudpta.com +51811 + ATSC 3.0 Security Authority (formerly 'Pearl TV') + Ron Wheeler + ron&a3sa.com +51812 + Shenzhen Kstar Science & Technology Co.,Ltd + Jay Jiang + jiangzl&kstar.com.cn +51813 + Thomas Gebhardt + Thomas Gebhardt + tmfgebhardt&gmail.com +51814 + Solid Optics EU N.V. + Bram Nieuwenhuize + broom&solid-optics.com +51815 + Dominanz s.r.o. + Ladislav Domin + laco.domin&dominanz.sk +51816 + euNetworks Managed Services GmbH + Dirk Duncker + dirk.duncker&eunetworks.com +51817 + KAP IT + Thomas Colin de Verdière + tdeverdiere&kapit.fr +51818 + Westland Gummiwerke GmbH & Co. KG + Reinhard Rehm + reinhard.rehm&westland.eu +51819 + Serg Oskin + Serg Oskin + serg.oskin&gmail.com +51820 + deepthink AG + Florian Heigl + fhe&deepthink.ag +51821 + SAFARI Montage + Greg Gesualdi + netadmin&safarimontage.com +51822 + iTechTool Incorporated + David Coelho + support&itechtool.com +51823 + Hacking & Coffee, LLC + Jim hartnett + hon1nbo+iana&hackingand.coffee +51824 + QuEST Global Engineering Services Private Limited + Prashant Nagaraj + prashant.nagaraj&quest-global.com +51825 + Astronaut, LLC + Ignacio Valdes + ivaldes&hal-pc.org +51826 + Atmosphere Architects + Jinson Jacob + architects&atmosphereinfra.in +51827 + Gillerfors Åkeri AB + Stefan Gillerfors + stefan.gillerfors&gmail.com +51828 + StructureIT + Thokozani Sibeko + thokozani.sibeko&structureit.net +51829 + High-End Media Kft. + Levente Huszkó + levente.huszko&high-end.hu +51830 + Athora Corporation + Gus Lynch + gus.lynch&athora.com +51831 + Gratika + Odan Prima + odan.pz&gmail.com +51832 + EnableIT Technologies Ltd + Jason Davey + jdavey&enable.services +51833 + Macadam + Jeroen Notebaert + helpdesk&prodata-systems.be +51834 + Atam Id + Adam Green + adamgreen&atam.id +51835 + French-Road + David CARELLA + david.carella&gmail.com +51836 + SUNY College of Optometry + Robert Pellot + rpellot&sunyopt.edu +51837 + INVEST - M, Ltd. + Andrei Salavarau + it&hermitagehotel.by +51838 + ProHealth Care + Keith Zimmer + keith.zimmer&phci.org +51839 + ddrs.de + Konrad Jacobi + admin&ddrs.de +51840 + Express Scripts Canada Inc. + Cliff Wang + cwang&express-scripts.com +51841 + FullCore + M. Majczak + pen.reg&fullcore.net +51842 + Adven Sweden Ab + Pauli Luoto + pauli.luoto&adven.com +51843 + Delta Systems Group + Byron DeLaMatre + sysadmin&deltasys.com +51844 + Carnegie Technologies + Jonathan DeLee + licensing&carnegietechnologies.com +51845 + SureFire LLC. + IT Support + itsupport&SureFire.com +51846 + SyndaTec LLC + Dane C Howard + info&syndatec.com +51847 + GUARANA + Wojciech Kozłowski + wkozlowski&radmor.com.pl +51848 + NetMeister, Inc. + Joe Nave + joe-iana&netmeister.com +51849 + MunichCamper + Michael Gams + michael&munichcamper.de +51850 + Inspur Power Commercial Systems Co.,Ltd. + HuaTang Ban + banht&inspur.com +51851 + Connetics Ltd + Drewe Hinkley + hinkleyd&connetics.co.nz +51852 + SPTek + Youngjin Choi + yjchoi&sptek.co.kr +51853 + Åsljunga Pallen AB + Edward Sandin + edward.sandin&asljungapallen.se +51854 + Energie Graz GmbH & CO KG + Heimo Bischofter + it.lizenzierung&energie-graz.at +51855 + BC² + Donna M. Scott + donna.scott&behaviorchangecenter.com +51856 + NeEhA Team + Takaoka Haruki + takaoka-haruki&neeha-jp.net +51857 + ngVision Sp. z o.o. + Radosław Bolforski + admin&ngvision.pl +51858 + Mole Valley District Council + Roger Finch + roger.finch&molevalley.gov.uk +51859 + IDS Ingegneria Dei Sistemi + Giuliani Fabrizio + support.cdc&idscorporation.com +51860 + Syscom + Jose Ramon Blazquz + jrblazquez&syscom.es +51861 + Safe Stamper + Jorge Guillo + jguillo&safecreative.org +51862 + East Coast Simulation + Christopher Collins + pen&ecsim.com.au +51863 + serv.it Gesellschaft für IT Services mbH + Sebastian Rhode, Thorsten Meyer + infra-server&servit.de +51864 + PayamPardaz + Esmaeil Vakili + vakili&payampardaz.com +51865 + Ian Bobbitt + Ian Bobbitt + ian&icb.im +51866 + Mynt (Globe Fintech Innovations Inc.) + John Paul Alcala + jp.alcala&mynt.xyz +51867 + Open Networking Foundation + Timon Sloane + timon&opennetworking.org +51868 + Holvonix LLC + Holvonix LLC + internet-assignments-contact&holvonix.com +51869 + Sala-Heby Energi AB + Viktoria Söder + viktoria.soder&sheab.se +51870 + MSI + RoyKuo + xqet00321&gmail.com +51871 + Autoclear, LLC + Karl Voigtland + karlv&autoclear.com +51872 + Fegens Sågverk AB + Mikael Andersson + mikael&fegenssag.se +51873 + FOXTEL S.R.L. + Pietro Pandolfi + pandolfi&foxtel.it +51874 + Micro C, LLC + Evan Ruff + evan.ruffµcimaging.com +51875 + New Context Services, Inc. + Justin Ferguson + jferg&newcontext.com +51876 + Unflap + Jack Rugby + junk&unflap.com +51877 + Konten Networks Inc. + Uwai Chen + uwai&konten-networks.com +51878 + Network Over the Edge + Michael Pinkston + michael.pinkston&joboa.net +51879 + Freespee + Daniel Donoghue + daniel.donoghue&freespee.com +51880 + Wahléns Åkeri AB + Janne Wahlén + jon.wikstrom&cgi.com +51881 + PINTSCH BUBENZER GmbH + Christian Witte + christian.witte&pintschbubenzer.de +51882 + Cycleon B.V. + bojan galonja + b.galonja&levi9.com +51883 + Rotork Controls Limited + IT Helpdesk + iana-reg&rotork.com +51884 + Jan-Olof Sundbergs Åkeri AB + Michael Helmby + micke&sundbergsakeriab.se +51885 + Victor G Carabello MD Inc + Belinda Ramirez + Belinda&carabellokidney.com +51886 + Pensando Systems, Inc. + Enrico Schiattarella + enrico&pensando.io +51887 + Nashville Oncology Associates, PC + Cheryll B. Doss + cdoss&nashvilleoncology.com +51888 + Infinite Software Solutions Inc D\B\A MD-Reports + Carl Thomas + cthomas&md-reports.com +51889 + Dimon Pro sp. z o.o. + Bartosz Sapijaszko + bartosz&dimonpro.pl +51890 + Noqoush Mobile Media Group FZ LLC + Noqoush Support + support&noqoush.com +51891 + Br Bertsons Åkeri AB + Andreas Johansson + brbertsons.andreas&telia.com +51892 + Rhaeticom AG + Andrej Leder + a.leder&rhaeticom.ch +51893 + Dayton Physicians Network + Susan Melton + smelton&daytonphysicians.com +51894 + Power Element + Sergey Mordashov + sm&powerelement.ru +51895 + Vet Rocket LLC + Aerik Sylvan + aerik&vetrocket.com +51896 + Instituto de hematologia y oncologia medica del norte PSC + Mayra Rivera + doctoramrivera&gmail.com +51897 + Engineering Bureau Phoenix LLC + Alexander Lutsenko + luae&fenix24.org +51898 + Vyex LLC + Dave Karr + iana-pen&vyex.com +51899 + JDQIRC + Jens Erik Becker + v2px&jdqirc.net +51900 + IronSys + Jan Heinze + jan.heinze&ironsys.org +51901 + SYNEFFI + LARUE Philippe + contact&syneffi.fr +51902 + ENRIQUE GRIEGO MD PA + OLGA GONZALEZ + ogonzalez&guajiraclinic.com +51903 + BAE SYSTEMS, Apex + James Elstone + james.elstone&baesystems.com +51904 + Tridum key + Esbold Unurkhaan + esbold&tridum.mn +51905 + Kent Denver School + Alex Clement + aclement&kentdenver.org +51906 + Butterfly Network Inc + Damien Dolimier + ddolimier&butterflynetinc.com +51907 + Rapiddot Hosting Services Ltd + Antonios Matsoukas + amatsoukas&rapiddot.com +51908 + FICOSA + Jesus Gonzalez + jesus.gonzalez&ficosa.com +51909 + Universidad Autonoma de Bucaramanga + Juan Carlos Martinez Quintero + jmartine&unab.edu.co +51910 + ANM Inc. + Dylan Marlow + dylan.marlow&anm.com +51911 + amaysim Australia Ltd + Cormac Donnellan + cormac.donnellan&amaysim.com.au +51912 + Ascension IT Pty Ltd + Janaka Wickramasinghe + janaka&ascensionit.com.au +51913 + Electroacustica General Iberica S.A. + Miguel Lopez + mlopez&egiaudio.com +51914 + Ixxus Limited + Robin Bramley + isa&ixxus.com +51915 + HPDS + Reza Salkhordeh + salkhordeh&hpds.ir +51916 + Sommens Transport AB + Bert Johansson + jon.wikstrom&cgi.com +51917 + RealD Inc. + Tony Davis + tdavis&reald.com +51918 + Applied Research Center for Computer Networks + Aley Elin + aelin&arccn.ru +51919 + Universidad de Oriente + Eloy Olivero + albertohung&gmail.com +51920 + link22 AB + Conny Ljungqvist + conny.ljungqvist&link22.se +51921 + UO.EDU.CU + Alberto Hung Estevez + ahung&uo.edu.cu +51922 + Ruthlessly Practical + Theron Bair + tbair+pen&ruthlesslypractical.com +51923 + Tom Klein IT + Tom Klein + tom&kleinholding.com +51924 + Micrologix Embedded Controls Pvt Ltd + Manoj Prabhakaran + manoj.r&e-micrologix.com +51925 + Telesens IT + Vladimir Mukhachov + Telesens.IT&gmail.com +51926 + Amplified Engineering Pty Ltd + Stanley Chong + support&lified.com.au +51927 + The Nordam Group, Inc. + Timothy Potter + tpotter&nordam.com +51928 + Colégio Notarial do Brasil + Marcos De Paola + tecnologia¬ariado.org.br +51929 + FarmSoft Network Technology Shanghai Co. + ganhuan + 267079163&qq.com +51930 + HELMUT FISCHER GMBH INSTITUT FÜR ELEKTRONIK UND MESSTECHNIK + Michael Döbler + IT.DE.Sifi&helmut-fischer.de +51931 + Pietro Fiorentini spa + Sergio Andrein + service&pec.fiorentini.com +51932 + Andrews University + Jeff Easton + eastonjandrews&gmail.com +51933 + University of Chinese Academy of Sciences + Xin Wang + wangxin17a&mails.ucas.edu.cn +51934 + Linus Brogan + Linus Brogan + iana-pen&lb.ax +51935 + Sydney Boys High School + James Rudd + admin&sbhs.nsw.edu.au +51936 + Matthias Merkel + Matthias Merkel + matthias&matthias.zone +51937 + BARTCO TRAFFIC EQUIPMENT PTY. LTD. + David Chamberlain + david&bartco.com.au +51938 + Metsähallitus Metsätalous Oy + Hanna Soinne + hanna.soinne&metsa.fi +51939 + Sirius Extrusion, LLC + Valentyn Grygoriev + admin&sirius.pro +51940 + Sasa Software (C.A.S) Ltd + Hanoch Lev + hanochl&sasa-software.com +51941 + ISYRIUS + Alina Sztoch + alinasztoch&isyrius.com +51942 + Polskie Górnictwo Naftowe i Gazownictwo SA + Piotr Chmielewski + piotr.chmielewski&pgnig.pl +51943 + Herzogsaegmuehle (Innere Mission Muenchen - Diakonie in Muenchen und Oberbayern e.V.) + Hans Juergen Hutterer + edv&herzogsaegmuehle.de +51944 + DYWIDAG-Systems International GmbH + Albert Weichmann + Albert.Weichmann&dywidag-systems.com +51945 + Micronova SRL + Cristian Ruiz + cruizµnova.com.ar +51946 + Code Designs® Inc + Shawn Landry + contact&codedesigns.ca +51947 + LL Consulting + L. Charles Labelle + iana-oid&llabell.net +51948 + eCH Association + Lorenz Frey-Eigenmann + Lorenz.Frey-Eigenmann&ech.ch +51949 + ALC Industries Inc + Andy Mulay + penadmin&it3.net +51950 + SSR Network Solutions Inc + Andy Mulay + penadmin&ssrns.com +51951 + Pliancy (formerly 'TSGCA, inc') + Scott Carlow + scott&pliancy.com +51952 + Epsylon Sp. z o.o. Sp. K. + Stanislaw Rejowski + stanislaw.rejowski&eterio.eu +51953 + Clausohm-Software GmbH + Stefan Otto + stefan.otto&clausohm.de +51954 + SWILE DIGITAL ON LINE + Rodney John Swile + swile&swile.com.au +51955 + TISCOM + Jason Rahn + Jason.Rahn&uscg.mil +51956 + BNY Mellon + Pedro Peralta + pedro.peralta&bnymellon.com +51957 + Curo Teknika Inc. + Richard B. Bumanlag + it-infra&curoteknika.com +51958 + Santa Rosa County School District + John Garlock + garlockj&santarosa.k12.fl.us +51959 + Paul Egy + Paul Egy + paul.egy&netapp.com +51960 + actidata Storage Systems GmbH + Christoph Hestermann + service&actidata.com +51961 + Accidental Development + Brian Faga + admin&accidentaldevelopment.com +51962 + AD QUALITE + YVES DUVAL + yves.duval&ad-qualite.com +51963 + Transacciones y Transferencias, S.A. + Jesus Sosa + firmasdigitales5b&5b.com.gt +51964 + Linn-Benton Community College + Michael Quiner + quinerm&linnbenton.edu +51965 + Plan International Canada Inc. + Rob Harman + cnoitservices&plancanada.ca +51966 + Ide Bekr Mobin + Mohammad Shirin Sokhan + info&ibm.co.ir +51967 + Smartweb s.r.o. + Martin Misuth + martin&smartweb.sk +51968 + RobCo sp. z o.o. + RobCo + konta&robco.pl +51969 + Faraday&Future Inc. + David Dai + david.dai&ff.com +51970 + Wireless Logic Limited + Paul Furmedge + admin.oid&wirelesslogic.com +51971 + Confédération des Jeunes Chercheurs + Mathieu Durero + contact&cjc.jeunes-chercheurs.org +51972 + RELEX + Sami Mattila + sami.mattila&relexsolutions.com +51973 + Raptor Engineering, LLC + Timothy Pearson + tpearson&raptorengineering.com +51974 + Raptor Computing Systems, LLC + Support Department + support&raptorcs.com +51975 + TouHou.FM + Daniel Sonck + info&touhou.fm +51976 + Barry Electric Cooperative, Inc. + Nicholas Warren + nwarren&barryelectric.com +51977 + Shirakumo + Nicolas Hafner + shinmera&tymoon.eu +51978 + BitMEX + Tim Tickel + twt&bitmex.com +51979 + WestJet Airlines Ltd. + Devon Smibert + ITSecurity2&westjet.com +51980 + IT Forge + Leander Janssen + leander&itforge.nl +51981 + Unassigned + Returned 2018-05-17 + ---none--- +51982 + Red Dog Consulting LLC + Aaron Johnson + aaron&reddogconsulting.us +51983 + Groupe Solutions TI + Richard Simard + richard.simard&groupesti.com +51984 + XnetSolutions KG + Mr. Andreas Klaedtke + iana.pen&xnetsolutions.de +51985 + Linogate GmbH + Mateusz Roik + roik&linogate.de +51986 + CARVAJAL SOLUCIONES EN TECNOLOGIA S.A.S + Carlos Andres Guzman + carlos.guzmans&carvajal.com +51987 + Squar Milner + Andy Mulay + penadmin&squarmilner.com +51988 + Panj Works + Hrvoje Cvetkovic + admin&panj.ws +51989 + Irvine Company + John Barnett + jbarnett&irvinecompany.com +51990 + Rigosys Information Technology + huakai.han + huakai.han&rigosys.com +51991 + Landkreis St. Wendel + Stefan Wommer + s.wommer&lkwnd.de +51992 + Zuercher Hochschule der Kuenste ZHdK + Stefan Knodt + service.itz&zhdk.ch +51993 + GKD Paderborn / OWL-IT / Lernstatt Paderborn + Volker Hellmuth + vhellmu1&lspb.de +51994 + Indian Institute of Science, Bangalore + SANOOD ULLATTUTHODIYIL + sanood&iisc.ac.in +51995 + Bengkel Oprek Elektronika + Dwi Satria Wirawan + dwisatriawirawan&gmail.com +51996 + Grand Canyon Education + Staff Admin + staff.admin&gce.com +51997 + Ten Fifteen Solutions Limited + Matthew Burr + info&tenfifteen.ca +51998 + Ampd Energy Limited + Luca Valente + luca&d.energy +51999 + Bäckebo Sågverk AB + Roger Franzen + roger&backebosagverk.se +52000 + Amsio + Amsio Support + support&amsio.com +52001 + European External Action Service + Luis Felix + luis.felix&ext.eeas.europa.eu +52002 + Svod International, LLC + admin&svod-int.ru + admin&svod-int.ru +52003 + Starpower Home Entertainment Systems, Inc. + Daniel Hoag + ianapen&star-power.com +52004 + Elcomplus LLC + Alex Maltsev + MaltsevAM&elcomplus.ru +52005 + ogt11.com, llc + Sam Nicholson + samcn2&ogt11.com +52006 + Designx Solutions + Shakaib Safvi + shakaibsafvi&designxsolutions.com +52007 + Garretson Resolution Group + Alex Whitman + Networking&garretsongroup.com +52008 + Alliance Development Fund + Jason Stroup + infrastructure&adf-inc.com +52009 + Oakland Unified School District + Daniel Augustine + dan&ousd.org +52010 + Westmoreland Casemanagement and Supports, Inc. + Joe Capozzi + jcapozzi&wcsi.org +52011 + Stolon + Stolon President + iana&stolon.fr +52012 + Rodmyre Technology + Brian Rodmyre + brian&rodmyretechnology.com +52013 + Hanalytics Pte Ltd. + Mok Ming Foong + ethan.mok&hanalytics.sg +52014 + Volkswagen Group Rus + Mazurenko Andrey + andrej.mazurenko&volkswagen-rus.ru +52015 + Sabinet Online + Wynand Meijer + dba&sabinet.co.za +52016 + Tidalis B.V. + Huub van Roosmalen + info&tidalis.com +52017 + OOO FPK "Kosmos-Neft-Gaz" + Grigorov Konstantin + grigorov&kng.vrn.ru +52018 + A.D.SION Info Santé + Guillaume Girard + tech&adsion.fr +52019 + littlemore.me.uk + Paul Littlemore + webmaster&littlemore.me.uk +52020 + Havana University + Victor Puentes + v.puentes&estudiantes.matcom.uh.cu +52021 + ITR Services EOOD + Oleg Ivanov + oleg&itrservices.eu +52022 + Spacynet + Daniel Bhalla + Dbhalla&spacydesign.de +52023 + Yeraz + Quintard Jérôme + jquintard&yeraz.fr +52024 + T-Mobile USA + Yanfeng Lu (Alex) + yanfeng.lu&t-mobile.com +52025 + AlticeUSA + John Solarino + john.solarino&alticeusa.com +52026 + Jishi Medical Tech. Co., Ltd. + Rex Chen + ichenrs&126.com +52027 + Abilities Limited + Connor Spencer Harries + connor.harries&abilities.org.uk +52028 + Hebei Mota Electronic Technology Co., Ltd. + Wang ShuHua + 18931159311&163.com +52029 + Kleberg Bank NA + Jimmie Locklear + jimmie.locklear&klebergbank.com +52030 + Cross Link Group + Joseph Clark + infrastructure&crosslinkgroup.com +52031 + iTreatMD Inc. + George Krucik + george&itreatmd.com +52032 + New Relic Inc + Prakash Reddy + preddy&newrelic.com +52033 + Hangzhou Youshi Industry Co., Ltd. + Huang Yikun + hyk<e-m.cn +52034 + DEVELICT Solutions s.r.o. + Zdeněk Lokaj + info&develict.com +52035 + Profitt ltd. + Alexey Sizyukhin + alexs&profitt.ru +52036 + Bertil Ytterbom Skog AB + Fredrik Ytterbom + fredrik&ostanskar.se +52037 + Remota Tecnologia em Comunicação + Jean Nack + jean&remotatec.com.br +52038 + Community Colleges of Spokane + Thomas Ingle + thomas.ingle&ccs.spokane.edu +52039 + BHK Child Development Board + Dave Anderson + admin&bhkfirst.org +52040 + Torin J. Carey + Torin J. Carey + penmaster&tcarey.uk +52041 + Institute for Network Sciences and Cyberspace, Tsinghua University + Lin He + he-l14&mails.tsinghua.edu.cn +52042 + e-Contract.be BVBA + Frank Cornelis + info&e-contract.be +52043 + Thüga Energienetze GmbH + IT Operations + it-operations&thuega-netze.de +52044 + AMHP - ASSOCIACAO DOS MEDICOS DE HOSPITAIS PRIVADOS DO DF + Marcelo S Mota + ti&amhp.com.br +52045 + Solar Monitor s.r.o. + Dusan Ferbas + dferbas&solarmonitor.cz +52046 + PAE + MIS + mis&pae.com +52047 + Jennason, LLC + Scott Pugh + scott.pugh&jennason.com +52048 + Rob Andrews Consulting + Rob Andrews + roba&r-andrews.net +52049 + Olle i Od åkeri AB + Patrik Wernklev + Olleiod&telia.com +52050 + infinite io, Inc. + Jay Rolette + rolette&infinite.io +52051 + SHAPE Australia + Zac Avramides + Zac.Avramides&shapegroup.com.au +52052 + SCA Energy + Magnus Tjärnskog + magnus.tjarnskog&sca.com +52053 + Dharma Drum Institute of Liberal Arts + Kan Huang + kanhuang&dila.edu.tw +52054 + Sture Gustafssons Åkeri AB + Viktor Gustafsson + viktor&sgakeri.se +52055 + ASN Norway AS + Svein Erik Aasen + svein.erik.aasen&asn.com +52056 + Immowelt AG + Andreas Mattern + administrator&immowelt.de +52057 + telpass + Thierry Gueret + contact&telpass.fr +52058 + WebitDesign + Henning Janßen + henning.janssen&webitdesign.de +52059 + Datasages + Peter Kofod + pete&datasages.com +52060 + Münchener und Magdeburger Agrar + Richard Jummer + r.jummer&mmagrar.de +52061 + TEB Tecnologia Eletrônica Brasileira Ltda. + Fred Schinke + engenharia&teb.com.br +52062 + Hunan Tensafe Tech Co.,Ltd + tim tim + tim&haoup.net +52063 + Evolven Software + Sasha Gilenson + sasha&evolven.com +52064 + UPGRADE SOLUTIONS INFORM ATICA + Marcelo S Mota + marcelosmota&upgradesolutions.com.br +52065 + St. Joseph Dermatology and Vein Clinic + Lider Koc + stjoederm16&gmail.com +52066 + University Dermatology and Vein Clinic + Lider Koc + universityderm14&gmail.com +52067 + Montrose Environmental Group, Inc. + Minhhoang Le + mle&montrose-env.com +52068 + Mid-Range Computer Group + David Fillmore + Dfillmore&midrange.ca +52069 + Webforeveryone GmbH + Chris Böhm + chrisjb&webforeveryone.de +52070 + Knipmeyer IT + Alan Knipmeyer + alan&knipmeyer-it.ltd +52071 + Piepenbrock Unternehmensgruppe + Kay Seemann + iana-contact&piepenbrock.de +52072 + Gosudarstvennaya sluzhba svyazi PMR + Maydanyuk Sergey, Pekelnyak Nikolay + mininfopmr&gmail.com +52073 + Experda + avi golan + avig&experda.com +52074 + TylerWeb + Tyler Melton + administrator&tylerweb.net +52075 + Covenant Care California, LLC + Covenant Care Network Operations + netops&covenantcare.com +52076 + System Source + Robert A Roswell + broswell&syssrc.com +52077 + Grieshofer + Philipp Grieshofer + oid&grieshofer.com +52078 + VES, LLC + Christopher Marks + cmarks&ves.solutions +52079 + Biogen + Christopher Kibble + christopher.kibble&biogen.com +52080 + ConfigMgrFTW + Jason Sandys + admin&configmgrftw.com +52081 + isoshi-oustache + kazuaki-ito + kazuaki-ito&isoshi-moustache.com +52082 + ZENTRIOT SOLUTIONS PVT. LTD. + Lintamol Thomas + sales&zentriot.in +52083 + NexCast GmbH + Christian Brenner + info&nexcast.net +52084 + Phoenix Recording Systems Limited + Simon Sparkes + simonsparkes&phoenixrecordingsystems.com +52085 + Polystar System AB + Polystar System IANA contact person + iana&polystarsystem.com +52086 + Nexstra, Inc + David Lee + dlee&nexstra.com +52087 + New Hope-Solebury School District + Brandon Penglase + ITAdmin&nhsd.org +52088 + Carter County Sheriff's Office + Isaiah Grindstaff + grindstaffi&sheriff.cc +52089 + Servicio de Administración de Rentas de Honduras + OSMAN RENE MORENO RAMOS + omoreno&sar.gob.hn +52090 + UPM Solutions Inc + Dmitry Kunilov + dmitry.kunilov&upm.solutions +52091 + CentriLogic + Gary T. Giesen + ggiesen¢rilogic.com +52092 + Infinidim + OID Admin + oidadmin&infinidim.be +52093 + Carrick-Skills + Grigori MORDIN + grigori&carrick-skills.com +52094 + Stipendium + Przemyslaw Sztoch + info&stipendium.pl +52095 + EVS Ltd + Mihail Palatik + mpalatik&evs.ru +52096 + Japan Synchronization Radiation Research Institute + Takahiro Matsumoto + matumot&spring8.or.jp +52097 + Adven Energilösningar AB + Christer Eriksson + christer.eriksson&adven.com +52098 + ERIC P FONTENOT M.D. INTERNAL MEDICINE + Eric P Fontenot + EFONTENOT41&YAHOO.COM +52099 + Acosm + Jieyu Bai + itadmin&acosm.com +52100 + NetQPro Sp. z o.o. + Zbigniew Lapkiewicz + zbigniew.lapkiewicz&netqpro.com +52101 + Data I/O Corporation + Brian Herdeg + herdegb&dataio.com +52102 + Twohe + Christoph Tohermes + pen.twohe&gmail.com +52103 + Cyberbit + Daniel Cohen-Sason + Danielcs&cyberbit.com +52104 + ShamAn + Andy + av.shamrikov&gmail.com +52105 + spoe + Simon Spöhel + admin&spoehel.ch +52106 + ISED Ingegneria dei sistemi S.p.A. + Servizio IT + servizioit&ised.it +52107 + Volkswagen Financial Services Digital Solutions GmbH + Carsten Kobierski + carsten.kobierski&vwfs.com +52108 + Exponential-e Limited + Eamonn McQuaid + Eamonn.McQuaid&exponential-e.com +52109 + Zeitounian Tecnologia + Wagner de Queiroz + wagner&zeitounian.com.br +52110 + Grupo Idial + Francisco Prior + fprior&eviciti.com.mx +52111 + Jecstar Innovation + Mark Holster + m.holster&jecstar.com +52112 + Instart Logic, Inc. + Operations + operations&instartlogic.com +52113 + Verein der Freunde der Burgruine Andeck e.V. + Oliver Roll + mail&oliver-roll.com +52114 + FAIRFAX FOOT AND ANKLE CENTER, PC. + MIRNA MENDOZA + OLKINDPM&RCN.COM +52115 + RasPTin + Marco Della Penna + Monitoring&stz-bt.de +52116 + Geutebrück GmbH + Michael Zeise + michael.zeise&geutebrueck.com +52117 + Lenovo PLM + Spring Liu + chunliu&lenovo.com +52118 + thyssenkrupp Elevator AG Asia Pacific Office + Zyle Lam + admin&tkeap.com +52119 + Tardigade Limited + Siu Bun Chan + sbchan&tardigade.com +52120 + Shaanxi Fenghuo Industrial Co.,LTD + Pengying Guo + guopy0608&126.com +52121 + Symbiotic Nexus + Bart Heyse + bartheyse&symbioticnexus.com +52122 + YHIMA OÜ + raoul signorile + yhima&protonmail.ch +52123 + Ewalds Åkeri AB + Mikael Svensson + ewa8003105&gmail.com +52124 + Trax Technologies, Inc. + Matt LeRay + sysadmin&traxtech.com +52125 + FAPMC + Konstantin Goncharenko + kagoncharenko&fapmc.ru +52126 + Rock Electronic + GuangPing.Shu + sgp&rockelec.com +52127 + Rosenblatt Enterprise Group, LLC. + L. Rosenblatt + compliance&rosenblattent.com +52128 + Webridge Soft, Ltd. + Spring + spring&webridge.com.cn +52129 + PALFINGER AG + Alexander Naglik + IANA-Admin&palfinger.com +52130 + Blomesystem GmbH + Kristin Schumann + kristin.schumann&blomesystem.com +52131 + Boxtrap Security + Dusan Mondek + dusan.mondek&boxtrap.net +52132 + MOMENTUM + Hafid Akhiat + hafid.akhiat&elekta.com +52133 + Universitat Ramon Llull + Víktu Pons i Colomer + viktu&rectorat.url.edu +52134 + Francisco E. Martinez + VALERIA ZAPATA + teremart&bellsouth.net +52135 + Piscator Solutions + Patrick Piscator + ppiscator&web.de +52136 + 509 Solutions Pty Ltd + Russell Tomkins + info&509.solutions +52137 + CARICOM Secretariat + David Chan + itstaff&caricom.org +52138 + VCNS TECH + Simon Jackson + simon&vcns.tech +52139 + Old Republic Title + Steve Payne + security&oldrepublictitle.com +52140 + CardPlus Sverige AB + Mikael Strang + mikael.strang&cardplus.fi +52141 + CardPlus Oy + Mikael Strang + mikael.strang&cardplus.fi +52142 + TRANS SPED LTD + Viky Manaila + viky.manaila&transsped.ro +52143 + RCCA MD LLC - Center for Cancer & Blood Disorders + Carreen F. Huffman + chuffman&ccbdmd.com +52144 + Applied Video Solutions + Igor Shimolin + igor&applyvideo.com +52145 + CRSG Deutschland GmbH + Kay Schmidt + crsg&muc.implaneo.com +52146 + LightYear Dealer Technologies, LLC + Josh Robinson + joshua.robinson&dealerbuilt.com +52147 + Truphone Limited + Frederico Dias + webmaster&truphone.com +52148 + Les Services Conseils I6T + Guy Talbot + guy.talbot&i6t.ca +52149 + NGX STORAGE + BEYHAN CALISKAN + iletisim&ngxstorage.com +52150 + Seaford UFSD + Fred Kaden + rcarbon&seaford.k12.ny.us +52151 + IEX Data Analytics LLC + IEX Data CORE Operations + support&iexdatacore.com +52152 + Illinois Student Assistance Commission + Mike Linn + Mike.Linn&illinois.gov +52153 + Agency for Digitisation - Ministry of Finance Denmark + Thoke Graae Magnussen + thokm&digst.dk +52154 + High Prairie School Division No. 48 + Sascha Klingsch + support&hpsd.ca +52155 + Seaford UFSD + Fred Kaden + rcarbon&seaford.k12.ny.us +52156 + Kramer Deboer & Keane + ronald vandeusen + rvandeusen&kdeklaw.com +52157 + Excelocity Inc. + Michel-Ange Hyppolite + mhyppolite&excelocity.com +52158 + Hager Group + Jerome BOULLOT + ianaContact&hagergroup.com +52159 + Shandong Anzhilian Intelligent Technology Co., Ltd. + ming yi + yiming1925&126.com +52160 + Raft Technology + Elad Segalis + elad&raft-tech.com +52161 + John Paul the Great Catholic University + Kevin Meziere + kmeziere&jpcatholic.edu +52162 + Mustasaare Tarkvarabüroo OÜ + Arno Pedusaar + arno&mtb.ee +52163 + PAPACHRISTOU + Stathis Papachristou + stathis&papachristou.eu +52164 + Perkbox Limited + Chieu Cao + admin&perkbox.com +52165 + ND PAPER LLC + Jennifer Hamilton + jen.hamilton&catalystpaper.com +52166 + Lilith Mimms + Lilith Mimms + lilith.mimms&gmail.com +52167 + Watson Realty Corp + Andrew Yurick + ayurick&watsonrealtycorp.com +52168 + tkni.co + Joost Albers + Info&tkni.co +52169 + iDtrust Tecnologia de Software + Cristiano Andrade + cristiano&idtrust.com.br +52170 + Ip Way Ltd. + Vasily Filatov + certadmin&ip-way.ru +52171 + SageRider, Inc. + Shelley Wall + shelley.wall&sageriderinc.com +52172 + Byosoft Co.,Ltd + Iasi.Hu + iasi&byosoft.com.cn +52173 + Landesvermessung und Geobasisinformation Brandenburg + Andreas Hätscher + andreas.haetscher&geobasis-bb.de +52174 + WM Systems LLC. + Mihaly Krizsan + mihaly.krizsan&wmsystems.hu +52175 + TPS B.V. + TPS Support + support&mijntps.nl +52176 + TSV-Groep B.V. + TPS Support + support&mijntps.nl +52177 + ThoughtSpot, Inc. + Matthew Kelly + matthew.kelly&thoughtspot.com +52178 + AvarTec, Inc. + Darren Lucht + darren&avartec.com +52179 + CELIA ALGERIE SARL + Ahmed BRINIS + ahmed.brinis&celia.dz +52180 + Jindanupajit + Krissada Jindanupajit + jindanupajit&gmail.com +52181 + SimSpace Corporation + Michael Beynon + contact&simspace.com +52182 + Brokhults Åkeri AB + Kenneth Johansson + kennethbrokhult&gmail.com +52183 + Benchmark Invest SPRL + Stephan Tachelet + s.tachelet&benchmark-invest.com +52184 + Vali Cooper International + John Vollman + john&valiint.com +52185 + Duluth Trading Company + Casey Schoonover + cschoonover&duluthtrading.com +52186 + Wuhan University + Iasi.Hu + iasi&whu.edu.cn +52187 + StarLeaf Inc + Abraham Sharp + abe.sharp&starleaf.com +52188 + Jakintek + Sendoa Rojas + compras&jakintek.com +52189 + Parker Data Networks Ltd + Adam Parker + adam&parkerdatanetworks.com +52190 + Flying Tiger Copenhagen + Thomas Marhauer + itoperationlicenses&flyingtiger.com +52191 + Jutel Oy + Jyrki Stranius + jyrki.stranius&jutel.fi +52192 + ITESOFT + Didier Charpentier + legal&itesoft.com +52193 + Thales Global Services + Patrick Camelin + patrick.camelin&thalesgroup.com +52194 + Foot Specialists of Greater Cincinnati + Anthony DeMaria / Angela DeMaria + footdoctor2865&gmail.com +52195 + Rahavard Karen + Hossein Bagherzade + hossein.bagherzade&gmail.com +52196 + ASPIT AS + Aasmund Rinde + aasmund&aspit.no +52197 + Toyota Motor North America + Neal Shen + CryptoOperationsSupport&toyota.com +52198 + ARCATrust SA + Yacine Felk + yacine&arcatrust.io +52199 + Syhard + Leo Snowareski Filho + leosfilho&gmail.com +52200 + Chuo System Corporation + Ryota Kawashima + kawashima_ryota&chuosystem.co.jp +52201 + kukjae telesys + JUN MIN PARK + jmpark&kukjaetelesys.com +52202 + Verschwörhaus + Albert Paka + al.paka&verschwoerhaus.de +52203 + SICK AG + Sebastian Fandrich + iana-oid&sick.de +52204 + Garrison Technology + David Bailey + opsadmin&garrison.com +52205 + Conselho da Justiça Federal - CJF + Adriana Jesus de Morais + sesser&cjf.jus.br +52206 + Ferrocarrils de la Generalitat Valenciana + Sergio Cano + cano_ser&gva.es +52207 + thePulse Inc. + Vito DeCarlo Jr. + vito.decarlo&thepulse.com +52208 + Guangdong UNIPOE IoT Technology Co., Ltd. + Bu Jindong + bonly&unipoe.com +52209 + Arçelik AŞ + Ovunc Doruk Izgen + doruk.izgen&arcelik.com +52210 + NNAISENSE SA + Tomas Horyl + iana.contact&nnaisense.com +52211 + EUCHNER GmbH + Co. KG + Kay Hildmann + hostmaster&euchner.de +52212 + Telefónica Móviles México + Roberto Maceda Peñaloza + roberto.maceda.ext&telefonica.com +52213 + GuavaSpace + Christoffer Müller Madsen + christoffer&guava.space +52214 + Universität Regensburg + Henri Stöcklein + Henri.Stoecklein&stud.uni-regensburg.de +52215 + Llama.pe SA + Alfonso Ronald Macedo Lopez + ronald&llama.pe +52216 + PrimosTI + Rodrigo Speller + rodrigo&primosti.com.br +52217 + Ambient System sp. z o.o. + Wlodzimierz Plewa + w.plewa&ambientsystem.pl +52218 + Castle Global + Justin Hao + jhao&thehive.ai +52219 + NOVOTEA + Jan Vanhercke + jan.vanhercke&novotea.com +52220 + Nianet A/S + Kristian Klinting + krk&nianet.dk +52221 + Landkreis Mansfeld-Südharz + David Herning + support&lkmsh.de +52222 + Support Logistic Services srl + Simone Freddio + s.freddio&slsitalia.it +52223 + Optimum Design Technology LLC + Sunil Shrestha + optimumdesign07&gmail.com +52224 + Eberle Design + Joseph F Dudich + JDUDICH&EDITRAFFIC.COM +52225 + Block Array Corporation + Sam Bacha + sam&blockarray.com +52226 + PEPE Invest s.r.o. + Peter Petrakovic + peter&petrakovic.com +52227 + Avanfort + Serg Kalashnikov + serg&niform.ru +52228 + music support group + Michael Hendel + technik&musicsupportgroup.com +52229 + Roboteurs Inc + Reiner Schmidt + reinerschmidt&roboteurs.com +52230 + WuHan SanJiang Space Network Communication Co,LTD + Gao Quan + 24420546&qq.com +52231 + sentieris + jim chu + jim&sentieris.com +52232 + Bitline Informatikberatung GmbH + Daniel Metzger + daniel.metzger&bitline.ch +52233 + Internetswitch + Maarten Wolzak + iana-pen&internetswitch.com +52234 + Hostsharing eG + Hostmaster of the Day + info&hostsharing.net +52235 + ZheJiang Taoshi Technology CO.,LTD. + Chuanfeng Zhang + zhangcf&topsvision.net +52236 + Lyon e-Sport + Etienne Glossi + administratif&lyon-esport.fr +52237 + Covenant Transportation Group + Howell Strain + hstrain&covenanttransport.com +52238 + AMP Robotics + Richard Stilborn + richard&robotics.com +52239 + Kernel Labs Inc. + Steven Toth + stoth&kernellabs.com +52240 + Piwiteam + Nicolas Rodrigues + nicolasrodrigues&outlook.fr +52241 + CENTRAL COAST MEDICAL ONCOLOGY CORP + Melody Plotner + melody&ccmo.us +52242 + Wataniya Mobile + Samer Hussein + samer.hussein&wataniya.ps +52243 + Luminor Bank AS + Mindaugas Apanavicius + pki&luminorgroup.com +52244 + NetWorkS! + Adam Milewski + certyfikaty&networks.pl +52245 + meituan + chuanjun Liu + liuchuanjun&meituan.com +52246 + HMSHost + Ryan Fondren + ryan.fondren&hmshost.com +52247 + Vivo Telefonica + Romulo Paulo + romulo.paulo&telefonica.com +52248 + Altergy Systems + Matthew Ellington + matthew.ellington&altergy.com +52249 + Jefferson Lab + Marty Wise + wise&jlab.org +52250 + First Weber, Inc. + Mike Steinhilber + steinhilberm&firstweber.com +52251 + Enconnex LLC + Thane Moore + thane&enconnex.com +52252 + MNF Group + James Woods + james.woods&mnfgroup.limited +52253 + Symbio Networks + James Woods + james.woods&symbionetworks.com +52254 + Dabee.ca + Ryan Workman + workerbe&gmail.com +52255 + Woodridge Baptist Church + Barry Merritt + bmerritt&woodridge.org +52256 + GloryTrust, LLC + OID Administrator + oidadmin&glorytrust.com +52257 + Ufi Space Co., Ltd. + Moore Lee + moore.cc.lee&ufispace.com +52258 + SDNIMO + Mark Chen + markchen&bjnimo.com +52259 + PSiDEO SA + Patrick Stuto + patrick.stuto&psideo.com +52260 + happn + Vincent Batoufflet + ops&happn.com +52261 + Suomen Tilaajavastuu Oy + Ville Niskala + admin&tilaajavastuu.fi +52262 + Service Benefit Plan Administrative Services Corp + Robert Wiseman + robert.wiseman&fepoc.com +52263 + NK Lundströms Trävaror AB + Anna Hansson + anna.hansson&nkltra.se +52264 + Verbraucherzentrale NRW e.V. + Jürgen Cleven + edv.beschaffung&verbraucherzentrale.nrw +52265 + NNIP-Marketing + Pieter Bakkenist + pieter.bakkenist&nnip.com +52266 + Global Legal Entity Identifier Foundation (GLEIF) + Christoph Schneider + it-admin&gleif.org +52267 + VAS "Valsts nekustamie ipasumi" + Raimonds Brazevics + raimonds.brazevics&vni.lv +52268 + SHAZAM, Inc. + Nick Janssen + oidadmin&shazam.net +52269 + Consumer Affairs Agency, Government of Japan + Yutaka Kukimoto + kukimoto.yutaka.h5&tex-sol.com +52270 + Particle Industries, Inc. + Jonathan Beri + jberi&particle.io +52271 + Neal Suares, MD Family Practice + Paige Suares + suares4&yahoo.com +52272 + People's Council of the Donetsk People's Republic + Alexander Pushkin + it&dnrsovet.su +52273 + Shen-Zhong Link Administration Center + Chen Xiang + chenxiang&shenzhonglink.com +52274 + Rijksen GmbH + Marc Rijksen + marcrijksen&gmail.com +52275 + Zentrales Engineering Active Directory + Daniel Metzger + daniel.metzger&vtg.admin.ch +52276 + IntegraWare Informática + F. Alejandro Osso + alejandro.osso&integraware.com.br +52277 + Liquid Voice Ltd + Andrew Barrett + andrew.barrett&liquidvoice.co.nz +52278 + CAGIP + Davis JAYARAJ + silca_alerting_build&ca-silca.fr +52279 + Menara Holding + Tourhfar Ahmed + a.tourhfar&groupe-menara.com +52280 + Exploit-IT, Ltd. + Alexey Morilov + web&exploit-it.ru +52281 + MEDIAN Unternehmensgruppe B.V. & Co. KG + Carsten Gießen + IANA-PEN&median-kliniken.de +52282 + Shenzhen Forward Industry Co., Ltd. + Zhang Xiaojin + zhangxiaojin&szforward.com +52283 + Shanghai Teraoka Electronic Co.,Ltd + Ni yufeng + yf.ni&cn.digi-group.com +52284 + Magic Leap Horizons + Neill Thornton + neill.thornton&mlhorizons.com +52285 + CYBERTELBRIDGE Co., Ltd + Junghoon Kim + kimjunghoon&everytalk.co.kr +52286 + Thinkum Labs + Sean Champ + hostmaster&thinkum.space +52287 + Energienetze Steiermark GmbH + Mag. Fetsch Peter + peter.fetsch&e-netze.at +52288 + Energienetze Steiermark GmbH + Mag. Fetsch Peter + peter.fetsch&e-netze.at +52289 + DS Produkte GmbH + Lars Axmann + it.bus&dspro.de +52290 + StreamUnlimited Engineering GmbH + Radek Dostál + radek.dostal&streamunlimited.com +52291 + IT-GRAD + Roman Kuchukbaev + iana-pen&it-grad.ru +52292 + SmartGuard Software + Vladislav Kalyuzhnyy + support&smart-guard.eu +52293 + EVA ICS (https://www.eva-ics.com/) + Sergiy Symonenko + div&altertech.com +52294 + Beijing Juson Technology Co., Ltd. + Yongzhuo Liu + liuyongzhuo&jusontech.com +52295 + Comp Line Informática LTDA + Welson Santos + welson.santos&compline.com.br +52296 + dacoso GmbH + Felix Pohl + it&dacoso.com +52297 + Alivetec.io + Ryan A. Griffiths + ryan&alivetec.io +52298 + ATRiCS Advanced Traffic Solutions + Volker Poplawski + volker.poplawski&atrics.com +52299 + PT Surya Utama Putra + Koestejo + tejo&suryautamaputra.com +52300 + Institute For InfoComm Research + Ying Lay Chiu + ylchiu&i2r.a-star.edu.sg +52301 + GfK SE + PKI Team + pki&gfk.com +52302 + ROSATOM + Eugene Semerikov + easemerikov&greenatom.ru +52303 + Advanced Conversion Technology, Inc. + Kevin Finkbiner + kfinkbiner&actpower.com +52304 + ThousandEyes, Inc. + Paulo Cabido + paulo&thousandeyes.com +52305 + Shenzhen Tong Tai Yi Information Technology Co.,Ltd. + bao feng + fengbao&ttyinfo.com +52306 + novastar + sober gou + xue-ying&139.com +52307 + BotProbe Ltd + Mark Graham + mark.graham&botprobe.co.uk +52308 + YMKatz.net + Yehuda Katz + yehuda&ymkatz.net +52309 + Quaintco Services Limited + GEORGIOS NIKOLOUDIS + georgios.i.nikoloudis&gmail.com +52310 + Cal Poly IEEE Student Branch + Dominic Gaiero + dgaiero&calpoly.edu +52311 + Lebenshilfe Münster gGmbH + Tobias Pelz + kontakt&lebenshilfe-muenster.de +52312 + Cloudify Platform Ltd. + Elad Dotan + it&cloudify.co +52313 + itself s.r.o. + Radim Hupšil + hupsil&itself.cz +52314 + Mairie d'Antibes Juan-les-Pins + Eric DEBOST + eric.debost&ville-antibes.fr +52315 + ituma GmbH + Johannes von Langen + langen&ituma.eu +52316 + Synertone Communication Corp + Chan Siu Bun + chenzb&synertone.net +52317 + NRB + Alfonso SCIASCIA + alfonso.sciascia&nrb.be +52318 + futratec, llc. + parker maroney + fallswithki&gmail.com +52319 + Urzad Komisji Nadzoru Finansowego + Tomasz Wisniewski + tomasz.wisniewski&knf.gov.pl +52320 + Aerolineas Argentinas S.A + Enriqueta Battaglia + penarsa&aerolineas.com.ar +52321 + ausecus GmbH + Andreas Dolp + andreas.dolp&ausecus.com +52322 + Hospital Pediátrico Universitario William Soler Ledea + Jorge Tamayo Ramírez + jorginno76&gmail.com +52323 + AMC + Yavorsky Mykola + amcmail&ukr.net +52324 + Unassigned + Returned 2018-07-24 + ---none--- +52325 + Edinburgh Hacklab Ltd + Tim Hawes + iana-pen&edinburghhacklab.com +52326 + Alpine Optoelectronics Inc + Ming Ding + linlin.qiu&alpineoptoelectronics.com +52327 + Code Writers + Brent Bengtson + brent.bengtson&codewritersinc.com +52328 + HiTRON SYSTEMS Inc. + Seongho Jeong + shjung&hitron.co.kr +52329 + DSI DGAC + Arave ABDOUS + arave.abdous&aviation-civile.gouv.fr +52330 + Chubb Systems Ltd + James Atherton + james.atherton&chubbfs.com +52331 + learntotechsolutions Limited + Abdulrahman Al-Dabbagh + abdul&learntotechsolutions.com +52332 + INTEGRITY Security Services LLC + Alan Meyer + iss.policyauthority&ghsiss.com +52333 + Banco de Credito del Peru + Christopher Sanchez + christophersanchezc&bcp.com.pe +52334 + Systematic Inc + Andrew Sauer + andy.sauer&systematicinc.com +52335 + Alignment Healthcare + Eric Hill + security&ahcusa.com +52336 + Merrill Corporation + Dale Gould + dale.gould&merrillcorp.com +52337 + Bitfusion Inc. + Alberto Alonso + info&bitfusion.io +52338 + Ultimum Technologies s.r.o. + Stanislav Ulrych + stanislav.ulrych&ultimum.io +52339 + denkbares GmbH + Alex Legler + alex.legler&denkbares.com +52340 + Barber Management, Ltd. + Alexey Morilov + develop&big-bro.pro +52341 + Search4.Work + Brent A. Bengtson + brent.bengtson&search4.work +52342 + Highland Centre + Neal Andrews + tech&highlandcentre.org +52343 + Rohtash kumar + Rohtash kumar + rohtashgandhi34&gmail.com +52344 + PriVerify Corp. + Adam Glynn + adam.glynn.iana&priverify.com +52345 + County of Nevada + Landon Beard + landon.beard&co.nevada.ca.us +52346 + AAP-NDT GmbH + Andreas Thielen + thielen&aap-ndt.com +52347 + iwerk + Rich Rowan + rrowan&iwerk.com +52348 + TruePath Technologies + Douglas Mauro + support&truepathtechnologies.com +52349 + DIMATE GmbH + Jens Martin + martin&dimate.de +52350 + Allison Transmission Inc. + Kyle Pierce + Kyle.Pierce&allisontransmission.com +52351 + Sitecore + Vitaliy Pidodnya + vpi&sitecore.net +52352 + Georgian Microelectronics + David Japaridze + info&geme.ge +52353 + Manutec srl + Matteo Sacco + matteo.sacco&mail.manutec.it +52354 + Alex Almero + Alex Almero + aalmero&gmail.com +52355 + American Oncology Partners, P.A. + Patrick Cochrane + pcochrane&flatiron.com +52356 + Someserver + Sebastian Lohff + iana-pen&someserver.de +52357 + Trackhe.de + Michael Nehring + trust&trackhe.de +52358 + Paradigma + Gianpaolo Ferrarin + gianpaolo&eggonemail.com +52359 + wedoid + FengXi + westwin&gmail.com +52360 + Solutions By OQuinn + Daniel O'Quinn + danny&sbo.tech +52361 + THALES AVIONICS ELECTRICAL SYSTEMS + CUILLIER Florian + florian.cuillier&fr.thalesgroup.com +52362 + healthfinch + Eric Richards + ericr&healthfinch.com +52363 + Villamosipari Kooperációs Iroda Bt + Csaba Toth + csaba.toth&spiritflame.co.uk +52364 + Marryatville High School + Richard Todd + richard.todd&usgroup.com.au +52365 + Porsche Ukraine LLC + Oleksandr Zhukov + Oleksandr.Zhukov&porscheukraine.com.ua +52366 + Dynniq Sweden AB + Ming Xie + ming.xie&dynniq.com +52367 + Livelo + Vladimir Sabatino Júnior + si-ti&livelo.com.br +52368 + Pegasus GmbH Gesellschaft für soziale/gesundheitliche Innovation + Florian Didszun + it&pegasusgmbh.de +52369 + BJYDZY + Chen Yaping + chenyaping&bjydzy.com +52370 + St. Samenwerkende Publieke Omroepen Midden Nederland + J. van Voorst van Beest + ict&rtvutrecht.nl +52371 + Energy Science Network + Daniel White + dmwhite&es.net +52372 + Long Term Care Partners, LLC + John Wiegert + contact<cpartners.com +52373 + Floyd Healthcare Management Inc. + Lance D. Fisher + lfisher&floyd.org +52374 + Fintech JSC + Dmitry Ponomarev + ponomarev&fintech.ru +52375 + oneZero Financial Systems + Greg Moberg + gmoberg&onezero.com +52376 + LleidaNetworks Serveis Telemàtics S.A. + Eva Pane + oid-info&lleida.net +52377 + J Skoba Industries + Jordan Skoblenick + jordan&skoba.ca +52378 + Lenz Family, LLC + Tom Lenz + thomas.lenz96&gmail.com +52379 + ADVISOR SLA + Eric MOREAU + sylvain.remy&advisorsla.com +52380 + Delphin Technology AG + Ivo Schenk + ivo.schenk&delphin.de +52381 + XIRIUS Informatique + Patrick LEGAND + sf048&xirius-informatique.fr +52382 + DoD Ophthalmology + Boonkit Purt, MD + boonkitp&gmail.com +52383 + Vrinda Nano Technologies Pvt Ltd + Mr. Brij Almadi + brij&vnt.in +52384 + Mount Sinai Health System + Luis Lewis + messaging&mountsinai.org +52385 + Weldtech + David Hsu + david.xu&weldtech.cn +52386 + Numatic International Ltd + IS Support + is.support&numatic.co.uk +52387 + INODESIGN + Mickael CORONADO + mickael&inodesign.fr +52388 + Housing Authority New Haven + James Pekar + jpekar&newhavenhousing.org +52389 + Wolfined + Christiaan de Die le Clercq + christiaan&wolfined.com +52390 + Unc Inc B.V. + Christiaan de Die le Clercq + christiaan&uncinc.nl +52391 + CCX Technologies + Charles Eidsness + charles&ccxtechnologies.com +52392 + Myriad Mobile + Myriad Systems + systems&myriadmobile.com +52393 + T.D. Video Engineering + Tobias Dieterich + tobias.dieterich&tdvideo.de +52394 + Sussex Partnership NHS Foundation Trust + Greg Roberts + greg.roberts&sussexpartnership.nhs.uk +52395 + OMICRON electronics GmbH + Stephan Amann + oid.master&omicronenergy.com +52396 + Bootstrap di Giuseppe La Rocca + Salvatore La Rocca + mail&boot.it +52397 + Matthias Lösch + Matthias Lösch + matthias&loesch.hamburg +52398 + Raubex Pty Ltd + Adriaan Ferreira + it-notify.dc&raubex.com +52399 + Cellum Global Zrt. + Attila Jankó + attila.janko&cellum.com +52400 + Wimark Systems + Pavel Gonchukov + info&wimark.com +52401 + The Source + Dan Barger + dan.barger&thesource2000.com +52402 + Stromnetz Graz GmbH & Co KG + Harald Gruber + ha.gruber&stromnetz-graz.at +52403 + Howlyte + Anthony Magnini + anthony.magnini&howlyte.fr +52404 + Kent ISD + Chris Lillis + itsec&kentisd.org +52405 + Tractor Supply Company + Stephen Snyder + ssnyder&tractorsupply.com +52406 + Centro de Cáncer de la Montaña, CSP + William Sierr + wsierra&ccmcsp.com +52407 + Xinhua News Agency + Yanbin Ma + mayanbin&xinhua.org +52408 + Shanghai Di'an Technology Incorporated + Robert Tian + robert&aolc.cn +52409 + Technologywise + Richard Hills + web&tw.co.nz +52410 + ONES.AI + Dong Sheng + dongsheng&ones.ai +52411 + Unassigned + Returned 2019-04-10 + ---none--- +52412 + NN Biztosító Zrt. + Bártfai Gábor + biztonsag&nn.hu +52413 + Macon County R-1 School District + Jamie Warren + jlwarren&macon.k12.mo.us +52414 + Hestra Åkeri AB + Patrik Lindgren + patrik&hestraakeri.se +52415 + Quantitative Risk Management, Inc. + Roy Gedeborg + Roy.Gedeborg&qrm.com +52416 + GÖRLITZ AG + Andreas Pfüller + andreas.pfueller&goerlitz.com +52417 + Fusion Power Systems Pty Ltd + Aidan Cyr + aidan.cyr&fusionps.com.au +52418 + Skyhawk Group + Mitchell S. Sharp + msharp&goskyhawk.com +52419 + Fortis Labs Pty Ltd + Aidan Cyr + aidan&fortislabs.com.au +52420 + ION UPS Pty Ltd + Aidan Cyr + aidan.cyr&ionups.com.au +52421 + INFODATA S.A. + Adam Piorko + kontakt&infodata.pro +52422 + CTI Products, Inc. + David R Groen + dgroen&ctiproducts.com +52423 + NetworkFX + Massimiliano Pala + m.pala&cablelabs.com +52424 + Kyrio, Inc. + Massimiliano Pala + m.pala&cablelabs.com +52425 + The North America Connect it Networks Company inc. + Jonathan Senecal + noc&connectitnet.com +52426 + Farm Credit Canada + Chris Spencer + chris.spencer&fcc-fac.ca +52427 + Upravleniya obrazovaniya administratsii g.Belgoroda + Karpenko Alexander + uo-belgorod&yandex.ru +52428 + Certinet S.A. + Roberto Riveros Duran + soporte&certinet.cl +52429 + SPConnect Pte Ltd + Chow Si Hao + chowsihao&spgroup.com.sg +52430 + AppVision + Kris Hefner + khefner&appvision.net +52431 + MensaMarine + Alexander Boomgard + mensamarine&yahoo.com +52432 + Micro Foundry + Terry Phillips + terry.phillipsµfoundry.com +52433 + Bernard Krone Holding SE & Co. KG + Stefan Wenker + stefan.wenker&krone.de +52434 + Williams Lea Limited + Chandramohan Swaminathan + chandramohan.swaminathan&wlt.com +52435 + Fiserv Corporation + Fiserv Global Cyber Security Services + PKI.Services&fiserv.com +52436 + Family Footcare Specialist, Inc. + Melissa Chew + familyfootcarespecialist&comcast.net +52437 + DroidSolutions GmbH + Anton Anders + anton.anders&droidsolutions.de +52438 + Zollihood Information Technology and Computer Sciences Research Laboratories + Rettich Washington + yvldwt&gmail.com +52439 + TeraGo Networks Inc. + James Bothe + james.bothe&terago.ca +52440 + appotronics + Qitao Song + qt.song&appotronics.cn +52441 + Blight-Clark + Mick Clark + mclark1980&gmail.com +52442 + NETGLUE SDN BHD + HALVIN HEE + halvin&netglue.com.my +52443 + Valley Cancer Associates, P.A. + Anthony Flores, Jr. + aflores&valleycancer.com +52444 + ATV Corporation + Masaki Shimizu + shimizu&atvcorporation.com +52445 + Gwydir Shire Council + Justin Hellmth + jhellmuth&gwydir.nsw.gov.au +52446 + Shanghai XunTai Information Technology CO., LTD + Hao Fang + fanghao&xtquantech.com +52447 + DAIKIN INDUSTRIES, LTD. + KATSUJI FUJIKI + dki.adadministrator&daikin.co.jp +52448 + Nedcomp Hosting B.V. + Floris Termorshuizen + beheer&nedcomp.nl +52449 + SPIRIT/21 GmbH + Support + support&spirit21.com +52450 + CounterSign Ltda + Marcio Roberto Ramirez + mramirez&consultsoftware.com.br +52451 + Helsionium + Mag. David Hell + administrator&helsionium.eu +52452 + Eloxal Design Jung + Thomas Jung + thomas.jung&eloxal-juelich.de +52453 + VOEB-ZVD Processing GmbH + Torsten Harting + torsten.harting&voeb-zvd.de +52454 + SMC Ship Motion Control + Richard + richard.janas&shipmotion.se +52455 + l0nax UG (haftungsbeschränkt) + Emanuel Bennici + benniciemanuel78&gmail.com +52456 + Cloudwave + Matthew Donahue + mdonahue&gocloudwave.com +52457 + Automated Microprocessor Systems + Yavorsky Mykola + amcmail&ukr.net +52458 + Sercos International e.V. + Peter Lutz + p.lutz&sercos.de +52459 + NanoTemper Technologies GmbH + Matthias Fleschütz + it&nanotempertech.com +52460 + Urząd Dozoru Technicznego + Wojciech Napierała + wojciech.napierala&udt.gov.pl +52461 + Matrix Elektronik AG + Michele Peruzzi + it&matrix-elektronik.com +52462 + BloomReach + Bart van der Schans + bart.vanderschans&bloomreach.com +52463 + Contratanet Sistemas LTDA + Gabriel Calegari + gabriel.calegari&contratanet.com.br +52464 + Telefônica Brasil S.A. + Carlos Cesar Reis da Silva + carlos.reissilva&telefonica.com +52465 + LEGAL SIGN SPA + Enrique Santelices + contacto&legalsign.cl +52466 + Lamont-Doherty Earth Observatory + Jonathan P. Sattelberger + jsattelb&ldeo.columbia.edu +52467 + PEPXIM Ltd. + Eddy Yeung + eddyy&pepxim.com +52468 + NVision Group + Alexey Pishchulin + apishchulin&nvg.ru +52469 + RCL Manila + Enrique Bondoc + ebondoc&rccl.com +52470 + Soft Strategy S.p.A. + Natascia Asaro + nasaro&softstrategy.it +52471 + Systema Datentechnik GmbH + Norbert Fehlauer + pen&systema-online.de +52472 + Medical Oncology and Hematology Associates + Jane Osterson + josterson&cancercenterofiowa.com +52473 + CEMA AG + Frank Breier + servicedesk&cema.de +52474 + McAfee, LLC + Michael Melnyk + mykhaylo_melnyk&mcafee.com +52475 + PriSec Limited + Edwin SO + edwin.so&prisec.co +52476 + ginoclement.com + Gino Clement + ginoclement&gmail.com +52477 + Werner Enterpises + David Elfering + delfering&werner.com +52478 + Holger Rauch + Holger Rauch + holger.rauch&posteo.de +52479 + Fedor Radostev + Fedor Radostev + radostev.fedor&gmail.com +52480 + City of Canon City + Chase Weber + caweber&canoncity.org +52481 + 北京圣博润高新技术股份有限公司 (Beijing SBR High-tech Co., Ltd.) + 张海俊 (Zhang Haijun) + zhanghaijun&sbr-info.com +52482 + FUJIYAMA POWER SYSTEMS PRIVATE LIMITED + RAKESH GARG + RAKESH&UTLUPS.COM +52483 + EGDN + Eugene Semerikov + evgen&egd.net.ru +52484 + Nexway co.,ltd + yudai matsusaka + matsusaka_yudai&nexway.co.jp +52485 + Mensa in Deutschland e. V. + Martin H. Sluka + root&mensa.de +52486 + Kazcangi + Sébastien Ignaczak + sebastien&ignaczak.fr +52487 + ENERTECH COMNET + A V RAMANA RAJU + enertechcomnet&yahoo.com +52488 + BEC + Niels Joergen Hansen + njh&bec.dk +52489 + Landesamt für Zentrale Polizeiliche Dienste + Frank Dreher + frank.dreher&polizei.nrw.de +52490 + Gardners Lane & Oakwood Federation + IT Services + itservices&cheltcc.org.uk +52491 + LES-TV + Vladimir Kuzemko + ku&les.ru +52492 + TIBCO Software Inc. + Tibco Mashery Operations + rpielock&tibco.com +52493 + BACnet Interoperability Testing Services, Inc. + Edward Hague + edward&bac-test.com +52494 + The NetWorker + Greg Johnson + gregj2010&yahoo.com +52495 + ISE Informatikgesellschaft für Software-Entwicklung mbH + Hendrik Heimers + hendrik.heimers&ise-online.com +52496 + Panoptiqon + Mert Cingöz + mert.cingoz&panoptiqon.com +52497 + Aloha Group LLC + Mark Joseph + sysadmin&alohagroupllc.com +52498 + Etat de Vaud + Ignacio Arsuaga + ignacio.arsuaga&vd.ch +52499 + APLEONA GmbH + Sebastian Gawlik + sebastian.gawlik&apleona.com +52500 + Santander Bank Polska SA + Paulina Smolińska + certificates&santander.pl +52501 + DIANET INFRASTRUCTURE MONITORING + Emmanuell Scolimoski + suprimentos&dianet.net.br +52502 + Nureva Inc. + DevOps Team + DevOpsAll&nureva.com +52503 + Eaton Lighting + Altan Stalker + altanjstalker&eaton.com +52504 + Renal Hypertension Clinic + Jennifer Kirk + renalhc&comcast.net +52505 + SoftNI Corporation + Jose M. Salgado + jmsalgado&softni.com +52506 + Åströms markentreprenad AB + Johan Söderholm + Johan.soderholm&cgi.com +52507 + ProPotsdam GmbH + Olaf Janke + sys.systema&propotsdam.de +52508 + Bank of Albania + Andri Iljazi + ailjazi&bankofalbania.org +52509 + EMQX + ChongYuan Yin + yinchy&emqx.io +52510 + Sentry View Systems, Inc. + Chuck Baenen + cbaenen&sentryviewsystems.com +52511 + hSenid Mobile Solutions + Brayan Perera + brayan&hsenidmobile.com +52512 + Quirem Medical B.V. + Gerrit van de Maat + info&quirem.com +52513 + Synergy Medical BRG Inc + Frederik Bernard + fbernard&secur01.com +52514 + PIO, Inc. + Nguyen Hong Nhan + hnn&processes.io +52515 + OXNET Lukasz Pulka + Luke Pulka + lukasz&oxnet.pl +52516 + The National Diabetes and Obesity Research Institute + Emily Foret + eforet&ndori.org +52517 + LAZIOcrea S.p.A. + Claudio Latini + claudio.latini&laziocrea.it +52518 + UW Credit Union + John Saley + infosec&uwcu.org +52519 + IT Mothership + Brian Roehm + broehm&itmothership.com +52520 + Telesea Technology Co.,Ltd. + Jiancun Li + info&telesea.cn +52521 + ddm Hopt+Schuler + Josep LLopart + jllopart&hopt-schuler.com +52522 + Sovereign Housing Association + Stuart Collier + stuart.collier&sovereign.org.uk +52523 + Secur01 Inc. + Frederik bernard + pki&secur01.com +52524 + Steed Enterprises LLC + Albert Steed + al&acsapp.com +52525 + Autotrol S.A. + marcelo rodriguez + mr&autotrol.com.ar +52526 + NextComputing + Dinkar Chivaluri + dchivaluri&nextcomputing.com +52527 + Sonobi, Inc. + Nathan Johnson + njohnson&sonobi.com +52528 + Novigo S.R.L + Exequiel Arona + earona&novigo.com.ar +52529 + Synchronet Telecomunicações Ltda + Pedro Paulo Medeiros + medeirospedropaulo&gmail.com +52530 + Eldis-Soft + Anatoly Galiulin + vega&eldis.ru +52531 + CJSC "GOLLARD" + Konstantin Kireu + kireu&pkcc.ru +52532 + LANGROOM LTD + Mikhail Panfilov + software&skyeng.ru +52533 + PayOS + Alex Konovalov + alexcon314&gmail.com +52534 + Engineered Floors + Jammie Greene + jammie.greene&engineeredfloors.com +52535 + hz.gl + Houzuo Guo + guohouzuo&gmail.com +52536 + U11G + Andriy Lishchytovych + AL&u11g.com +52537 + mixi, Inc. + Junpei Yoshino + junpei.yoshino&mixi.co.jp +52538 + Ampere Computing + AJ Shah + aj&erecomputing.com +52539 + Sanntuu Corporation + Tatsushi Takata + takata&sanntuu.co.jp +52540 + University of Latvia + Ivars Zaicevs + ivars.zaicevs&lu.lv +52541 + Desoutter + Ronan Picaut + ronan.picaut&desouttertools.com +52542 + LGM Ingénierie + Benoît TORBIERO + Benoit.TORBIERO&lgm.fr +52543 + ZVD + Alex Laz + zvd&zvd.kz +52544 + NoviFlow Inc. + Luc Mayrand + luc.mayrand&noviflow.com +52545 + Jackson College + Tim Upham + tim_upham&jccmi.edu +52546 + Bloombox LLC + James S Clark + sam&bloombox.io +52547 + Bridge Group Ltd + Dmitry Baicer + Baicer&bridge-group.ru +52548 + Akademia Wojsk Ladowych + Krystian Bobkiewicz + licencje&awl.edu.pl +52549 + Telefonica Germany + Karsten Pawlik + karsten.pawlik&telefonica.com +52550 + Alameda County Water District + Geoffry Brown + geoffry.brown&acwd.com +52551 + GO MY CODE + Arsslen Idadi + arsslen&gomycode.co +52552 + IBJ Leasing Company, Limited + Kensuke Nakada + it.receive&ibjl.co.jp +52553 + Chingo Software Co., LTD. + Shiying Yu + shiying.yu&gmail.com +52554 + PT Privy Identitas Digital + Sarah Rosalina Eveline + corsec&privy.id +52555 + NINGBO TURN-LINK NETWORK COMMUNICATION EQUIPMENT CO., LTD + Hongwu Chen + frank_hw&126.com +52556 + Unipark LTD + Alexey Chernyavskiy + chernyavskiy&unipark.kz +52557 + SGNR.org + Bill Brady + support&sgnr.org +52558 + TechCraft Co.,Ltd. + Apinyo Wisanapinyo + apinyo&gmail.com +52559 + P-Cure LTD + Roy Alkalay + roy.alkalay&p-cure.com +52560 + VIZZIA Technologies + Jefferson Hudson + penadmin&vizziatech.com +52561 + Jaeger Bau GmbH + Wolfgang Fitsch + w.fitsch&jaegerbau.com +52562 + Personal Soft + Fabiano Bonin + fabiano.bonin&personalsoft.com.br +52563 + xBar7 Communications, LLC + Oskar Atkinson + oskar&xbar7.com +52564 + FreeDSx SNMP + Chad Sikorra + Chad.Sikorra&gmail.com +52565 + UMC H ELECTRONICS CO., LTD. + Kazuhiro Komuro + kazuhiro.komuro.sy&hitachi.com +52566 + Mercer Financial Services + Mike Welsh + mike.welsh&mercer.com +52567 + factuno UG (haftungsbeschränkt) + Daniel Bader + daniel.bader&factuno.com +52568 + fair&smart + Nicolas Rueff + nicolas.rueff&fairandsmart.com +52569 + Secure ID LTD. + Itzhak Sharon + admin&secureid.co.il +52570 + OMU + Recai Oktaş + roktas&baum.omu.edu.tr +52571 + HIFSYS TECHNOLOGY INC. + Fuat SENGUL + fuat.sengul&hifsys.com +52572 + Oghmasys IT Services GmbH + Jean-Barthelemy Jilibert + jilibert&big-vienna.com +52573 + Keenetic Limited + Kevin Cheng + kevin.cheng&keenetic.com +52574 + StepOver GmbH + Andy Erd + service&stepover.de +52575 + Kinexon GmbH + Torben Frey + torben.frey&kinexon.com +52576 + AJ's Power Source Inc. + Travis C Priest + travisp&ajpowersupply.com +52577 + Cisco SolutionsLab + Robert W. Rogier + rorogier&cisco.com +52578 + Federal Reserve Bank of Kansas City + Jason Taylor + jason.r.taylor&kc.frb.org +52579 + ReLegoTec + Carsten Piel + info&relegotec.de +52580 + DigiCert, Inc. + Tomofumi Okubo + tomofumi.okubo&digicert.com +52581 + Kamran Khan SC + Dorothy Haley + dhaley&silvercross.org +52582 + TRUSTCLOUD SOLUTIONS, S.L. + Alberto Angón Robles + alberto.angon&trustcloud.tech +52583 + Stöde Skog AB + Örjan Åström + magnus.tjarnskog&sca.com +52584 + Shakopee Public School District + Chris Lee + clee&shakopee.k12.mn.us +52585 + Lanet Network Ltd + Anton Marcuk + it&lanet.ua +52586 + First Electronic Bank + Chris Joley + it&firstelectronic.com +52587 + Edgecore Networks Corporation + CK Ng + ck_ng&edge-core.com +52588 + Spacelabs Healthcare + Robert Buna + robert.buna&spacelabs.com +52589 + Bold Idea, INC + Ben Davis + ben&boldidea.org +52590 + Ad Astra Information Systems + Erik Fritzler + erik0010&hotmail.com +52591 + Old Naples Concierge Medicine + Jeffrey L Craig, MD + jeffrey.craigmd&yahoo.com +52592 + JOVYATLAS + Andreas Doyen + andreas.doyen&jovyatlas.de +52593 + Urzad Marszalkowski Wojewodztwa Podlaskiego + Lukasz Wejda + lukasz.wejda&wrotapodlasia.pl +52594 + RHEINZINK GmbH & Co. KG + Hafiz Adeyinka Adeniji + asb&rheinzink.de +52595 + Digibase Operations + Kradorex Xeron + kxeron&digibase.ca +52596 + Trustual + Victor Perl + registry&trustual.com +52597 + Oriflame Software, s.r.o. + Ivo Hanuska + ivo.hanuska&oriflame.com +52598 + ABSULT GmbH & Co. KG + Alexander Becker + info&absult.de +52599 + Gilbert Public Schools + Zane Crutchfield + zane.crutchfield&gilbertschools.net +52600 + Auburn Networks, LLC + Jay Franklin + jay&auburnnetworks.com +52601 + HORISEN AG + Nemanja Miljkovic + nemanja.miljkovic&horisen.com +52602 + PTZNetwork + Adam Simons + asimons&ptznetwork.org +52603 + Neutrona Networks LLC + David Flamini + david&neutrona.com +52604 + MediBloc, Inc. + Dr. Eunsol Lee + eunsol&medibloc.org +52605 + Security Intelligence, LLC + Rakhmetov Ruslan + rrakhmetov&securityvision.ru +52606 + Cranfield University + Luke Whitworth + network.team&cranfield.ac.uk +52607 + Nrby + Kurt Dobbins + kurt&nrby.com +52608 + Kingsen Creations Co., LTD. + Kitagawa Kenta + kenta&live.cn +52609 + GenesisCare + Philip Yarra + DST-IANA-PEN_Alerts&genesiscare.com.au +52610 + 上海宽域工业网络设备有限公司 (Shanghai Kemyond Industrial Network Equipment Co., Ltd) + 林海 (Lin Hai) + kemyond&apextech.cn +52611 + Thatcham Research + David Maskell + davidm&thatcham.org +52612 + Matoa Systems + Suryo Bintoro + suryo.bintoro&trg.co.id +52613 + Hetrogenous communications Pvt ltd + Anush Gopalan + anush&hetrogenous.com +52614 + Gregory Pest Control LLC + Bradley Pierce + support&gregorypestsolutions.com +52615 + hangzhou anlantech Co. + Wang hua + orc&anlantech.net +52616 + OOO "RusBITech-Astra" + Kirill Dobrynin + root&astralinux.ru +52617 + RomTeck Australia Pty Ltd + Russell Jones + russell.jones&romteck.com +52618 + Health Gorilla Inc. + Sultan Tezadov + stezadov&healthgorilla.com +52619 + KeyWest Networks + Kishore Gandham + kishore&keywestnetworks.com +52620 + unique projects GmbH & Co. KG + Marcel Rodewald + marcel.rodewald&unique-projects.com +52621 + Wireless Systems Solutions + Susan Gross + susan&wirelesss2.net +52622 + byon gmbh + Christian Fertig + edv&byon.de +52623 + Evonik Industries AG + Kai Kreilkamp + pki&evonik.com +52624 + Mail.Ru, LLC + Mikhail Popov + mi.popov&corp.mail.ru +52625 + Shearwater Geoservices Norway AS + Julien Le Héricy + julien.lehericy&shearwatergeo.com +52626 + Semicyber, LLC + Andy Hoag + info&semicyber.com +52627 + Unassigned + Returned 2020-09-02 + ---none--- +52628 + Express Retail LLC + Aleksandr Nemov + aleksandr.nemov&x5.ru +52629 + Nook Industries, Inc. + Rick Bergen + it&nookind.com +52630 + TechCertain Limited + TechCertain Admin + admin&techcertain.com +52631 + Shanghai Zhuoran Information Technology Co., Ltd. + Chengzhi Jia + czjia&139.com +52632 + LogSentinel + Bozhidar Bozhanov + bozhidar.bozhanov&logsentinel.com +52633 + Meierguss Sales & Logistics GmbH & Co. KG + Tobias Szabo + edv&meierguss.de +52634 + PKIMONSTER LTD. + Valerij Prusakov + vaprus&pkimonster.com +52635 + Gauff + Maik Martin + it&gauff.com +52636 + AIR-LYNX SAS + Christophe PECHUZAL + christophe.pechuzal&air-lynx.com +52637 + MOCX Engenharia LTDA ME + Edner + edner&mocx.com.br +52638 + Allentown Family Foot Care Prof Corp + Lois Clauss + lclauss&affc.com +52639 + CDNTV TECNOLOGIA LTDA + Matheus Cadori + suporte&technobox.com.br +52640 + 3Derm Systems, Inc. + Elliot Swart + elliot&3derm.com +52641 + COAC Jardin Azuayo Ltda. + Francisco Muñoz Campos + f.munoza&jardinazuayo.fin.ec +52642 + FS.COM INC + Tonny.shang + rd&fs.com +52643 + Datilmedia S.A. + Eduardo Raad + eraad&datil.co +52644 + UNITEL LLC + Shijir Chimed-Ochir + shijir.ch&unitel.mn +52645 + San Francisco Foot and Ankle Center + Kelly Anderson + kelly&footprince.com +52646 + J-TEK Inc. + hawk kuo + hawk_kuo&j-tek.com.tw +52647 + tosee garan electronic rakhsh + mohsen sheikh hasani + mohsenmsh2003&yahoo.com +52648 + Nordmalings skogsmaskiner AB + Pär Rönnkvist + Johan.soderholm&cgi.com +52649 + myhELO + Earle Thomas + help&myhelo.com +52650 + J Group Italia + Michele Gentilini + info&jgroup.xyz +52651 + 柏科数据技术(深圳)股份有限公司 (Rorke Data Technology (Shenzhen) Co., Ltd.) + 许宇峰 (Xu Yufeng) + xuyufeng&rorke.com.cn +52652 + V3iT Consulting, Inc + Charles Lipscomb + chad.lipscomb&v3it.com +52653 + Teonite + Lech Karol Pawłaszek + admin&teonite.com +52654 + KRON d.o.o. + Stjepan Busic + stipe.busic&kron.hr +52655 + Newicon Oy + Tomi Hirvonen + it-support&newicon.fi +52656 + JP/Politikens Hus A/S + Thomas Kjaer + thomas.kjaer&jppol.dk +52657 + Mitsubishi Hitachi Power Systems Europe GmbH + Dirk Wedemeyer + d_wedemeyer&eu.mhps.com +52658 + Azienda Zero + Marco Scognamiglio + msc&bit4id.com +52659 + Guido de Bres Christian High School + Frederick DeWit + gdbchs&gmail.com +52660 + myToys GmbH + OPIS Team + license&mytoys.de +52661 + TD Williamson + Shane Robinson + shane.robinson&tdwilliamson.com +52662 + EDC-Business Holding GmbH + Reinhold Fahrnschon + postmaster&edc.de +52663 + Lanyon Bowdler LLP + Robin Thain + robin.thain&lblaw.co.uk +52664 + Rehm Thermal Systems GmbH + Siegmar Meier + s.meier&rehm-group.com +52665 + IEMN + Mickaël Masquelin + mickael.masquelin&iemn.fr +52666 + GreenUnit UG + Oliver Rath + oliver.rath&greenunit.de +52667 + Intercel Pty Ltd + Moe Chaudhry + moe.chaudhry&intercel.com.au +52668 + jörg giencke | internet design + Jörg Giencke + joerg.giencke&jgid.de +52669 + Cognida Foundation + Michael Hathaway + michael&wndmill.com +52670 + ZULU + Eric Voisard + evoisard&ieee.org +52671 + arvato + Manuel Montigny + Manuel.Montigny&arvato.fr +52672 + Teleservice Bredband Skane AB + Jens Andersson + noc&teleservice.net +52673 + VPSign Ltd. + Edik Furlender + efurl&vpsign.com +52674 + Schneider Electric / APC NetBotz + Kenneth Springhetti + Kenneth.Springhetti&schneider-electric.com +52675 + Peddie Institute Co., Ltd. + CAO Fang + admin&endicottschool.org +52676 + Hawaiki Cable Ltd. + Magesh Cannane + it&hawaikicable.co.nz +52677 + iFuture Service Co., Ltd. + XU Rui + oid&ifutureca.com +52678 + Jit Team Sp. z o.o. + LukaszZiolkowski + admin&jit.team +52679 + Advanced Technology Facility + Yorri Jufri Ahmad Atf + info&advantechno.com +52680 + Australian Paper + Thy Rith + thy.rith&australianpaper.com.au +52681 + dVentus Technologies + Mekdem Getahun + mekdemg&dventus.com +52682 + Sign & Crypt - Verein zur Foerderung der sicheren Kommunikation im Internet + Juergen Bruckner + info&signcrypt.at +52683 + Zombie Emergency Response Organization + Richard Dawson + dawsora&gmail.com +52684 + ATRIAN COMMUNICATION TECHNOLOGIES LLC + JAVIER OURET + javier&atriantech.com +52685 + R. Dubois + Rémi Dubois + oid-iana&rdubois.fr +52686 + Laretk + Carlos F. Reynoso + creynoso&lartek.com.ar +52687 + C-130 ATS + Michael Daily + michael.daily&lmco.com +52688 + Technisches Hilfswerk, SEElift + Jörg Lenz + joerg.lenz&thw-gg.de +52689 + KRUL.TECH + Pieter Krul + pieter&krul.tech +52690 + Getac Technology Corporation + Yushian Chen + yushian.chen&getac.com.tw +52691 + Aerztekammer des Saarlandes + Andreas Kuhn + andreas.kuhn&aeksaar.de +52692 + Tag-IP + 'Ndrianiaina Mandrantosoa + tosoa&tag-ip.com +52693 + Hoge Fenton + Hoge Fenton IT + it&hogefenton.com +52694 + Ribbon Communications + Richard Krizan + rkrizan&rbbn.com +52695 + OETIKER+PARTNER AG + Tobias Oetiker + tobi&oetiker.ch +52696 + Emtelligent Software Ltd + Tim OConnell + info&emtelligent.com +52697 + Sanchez Home Lab + Samuel Sanchez + samasanchez92&gmail.com +52698 + NNE A/S + Peter Kjeldgaard Nielsen + pkjn&nne.com +52699 + Ethoca Limited + Felix Almeida + felix.almeidaðoca.com +52700 + Max Harmony + Max Harmony + maxh&maxh.name +52701 + Tech-Tips Fr + Aurelien Grimal + aurelien.grimal&tech-tips.fr +52702 + Appstractor Corporation (UK) Ltd + Rafi Ozick + techcontact&privatise.com +52703 + InstaSafe Technologies Private Limited + Biju George + biju&instasafe.com +52704 + Olympus Infotech, LLC + Srinivasan Vanamali + mali&olympus-infotech.com +52705 + State of Nebraska + Ken Huber + ken.huber&nebraska.gov +52706 + Sällströms Åkeri AB + Tommy Sällström + Johan.soderholm&cgi.com +52707 + IGEM Communications + Paul Wolfe + paul.wolfe&globalgig.com +52708 + Bildungsverbund Handwerk + Guido Michaelis + administrator&bvh-karriere.de +52709 + Orsa besparingsskog + Magnus Lewenhaupt + magnus.lewenhaupt&orsabesparingsskog.se +52710 + Crypto Capital Australia + Ted Hottes + ted&cca.trade +52711 + 广州合明软件科技有限公司 (Guangzhou Heming Software Technology Co., Ltd.) + 吴宇 (Wu Yu) + wordless&hemingsoft.com +52712 + Huber + Monsch AG + Traber Marc + marc.traber&hubermonsch.ch +52713 + VMF LATVIA SIA + Edijs Muiznieks + muiznieks&vmf.lv +52714 + COFORET + BERTRAND VERNAY + bvernay&coforet.com +52715 + UMT LLC + Ievgenii Kukhol + info&umt-tv.com +52716 + TRYDEA + Yves PIERSON + ypierson&trydea.fr +52717 + Neturius GmbH + Lars Streblow + lars.streblow&neturius.com +52718 + Nrn maskin ab + Peder lundin + lpl75a&gmail.com +52719 + Jacob Video & Sound + Paul R Jacob + jacobvideo&cox.net +52720 + RF IDeas Inc. + Graham Henderson + ghenderson&rfideas.com +52721 + Archbright + Jason Denney + jdenney&archbright.com +52722 + Deutsches Archäologisches Institut + Reinhard Förtsch + it&dainst.de +52723 + RackPower + RnD RP + rnd&austin-hughes.com +52724 + leqs + leqs support + iana&leqs.com +52725 + Paper Machinery Corporation + Robert W. Baumgartner + rwb&papermc.com +52726 + Trident RFID + Wayne Uroda + wayne.uroda&tridentrfid.com +52727 + lunica + Ludovic PERICK + ludovic&lunica.be +52728 + Xiamen Dianchu Technology Co.,Ltd. + tian gui + tg&dianchu.com +52729 + Mitteldeutsche Flughafen AG + Steffen Loeb + pki&mdf-ag.com +52730 + Vinetu Technologies Ltd + Eli Melamed + admin&vinetu.co.il +52731 + Society for Human Resource Management + Bryan Fennell + bryan.fennell&shrm.org +52732 + BODINE ELECTRIC COMPANY + Dave Sturgeon + dave.sturgeon&bodine-electric.com +52733 + A.K. & Sons + Alexander A. Kelner + a.a.kelner&gmail.com +52734 + Brain Corporation + Daniel Woodlins + woodlins&braincorp.com +52735 + DearBytes BV + J Wijnands + iana&dearbytes.nl +52736 + BlastAsia Inc. + System Admin + itg&blastasia.com +52737 + InQuest, LLC + InQuest Help Desk + support&inquest.net +52738 + Abris Ltd. + Viktor Weininger + viktor.weininger&abrisconsult.com +52739 + Trinity Anglican College + Cameron Bishop + itadmin&trinityac.nsw.edu.au +52740 + Hiscale GmbH + Tilo Skomudek + tilo.skomudek&hiscale.com +52741 + Doolins + Nicholas Doolin + oid&doolins.com +52742 + KeHE Distributors, LLC + Internet Administrator + internet.administrator&kehe.com +52743 + Darrera + Raúl Martínez Zancada + raul&darrera.com +52744 + Arkansas Department of Human Services - Office of Information Technology + Kevin Grace + kevin.grace&dhs.arkansas.gov +52745 + cupids guide by cynrocks + cindy l appley + cinpeaches01&yahoo.com +52746 + Amtelco + Paul Henning + paul&amtelco.com +52747 + X Free Space Optical Communication + Varun Puri + vpuri&google.com +52748 + Evolution Digital + Nicholas Bracciale + support&evolutiondigital.com +52749 + Qvalent + Frans Henskens + fhenskens&qvalent.com +52750 + Ahnnet + Hyunho Lee + hhlee&ahnnet.co.kr +52751 + RCI Financial Services Ltd + Neale Lonslow + neale.lonslow&rcibanque.com +52752 + XPHONE Czech s.r.o. + Michael Novotny + michael&xgw.cz +52753 + TE Technology, Inc. + Paul Lau + pglau&tetech.com +52754 + NIICHASPROM + Vladimir Trofimov + trofimov&niichasprom.ru +52755 + At Home + Brandy Johnston + bjohnston&athome.com +52756 + XVTEC + Avner Flesch + avner&xvtec.com +52757 + Beacon Cancer Care, PLL + Natalie Spiller + nspiller&beaconcancercare.com +52758 + Kistler Instrumente AG + Josua Hunziker + josua.hunziker&kistler.com +52759 + Rayhaan Networks + Rayhaan Jaufeerally + noc&rayhaan.ch +52760 + SBTAP-AS59715 + Mauro Vito Angiolillo + mauro.angiolillo&as59715.net +52761 + Baldwin Risk Partners + Keith Johnson + keith.johnson&bks-partners.com +52762 + Instituto Federal Catarinense - Araquari + Jefferson Douglas Viana + jefferson.viana&ifc.edu.br +52763 + HTTPCART Technologies Pvt Ltd + Praveen Gupta + tech&wijungle.com +52764 + SOUTHCERT, LLC + Luis Alfredo Armas + luis.armas&southcert.com +52765 + Einfaches-Netzwerk + Dietmar Haimann + blog&einfaches-netzwerk.at +52766 + Oita Denshi Kogyo Co., Ltd. + Technical Contact + tech&oitadensi.co.jp +52767 + E-Government + Roel Vonsee + roel.vonsee&president.gov.sr +52768 + IOV42 LTD + Robert Zapfel + robert&iov42.com +52769 + Kara Systems + Meysam Farsi + m.farsi&karasystems.ir +52770 + Caroline A. Rivera Olmo + Caroline A. Rivera Olmo + carolmo.hemaonco&gmail.com +52771 + dslz.biz + Sergey Pastukhov + paster&dslz.biz +52772 + Salfer GmbH + Christian Schulz + cschulz&salfer.de +52773 + Erzurum Teknik Üniversitesi + Erkan Özyılmaz + erkanozyilmaz&erzurum.edu.tr +52774 + Computerservice U.Zeysing + Uwe Zeysing + uwe.zeysing&uwe-zeysing.de +52775 + Workz Media FZ LLC + Edwin Haver + directors&workz.com +52776 + Stiftung Mathias-Spital Rheine + Dirk Hoffmann + d.hoffmann&mathias-stiftung.de +52777 + Versiant + Jeff Kupke + jeff.kupke&versiant.com +52778 + Allergy and Asthma Associates + Nancy Shocket + allergyasthmadocs&gmail.com +52779 + Parswa + Pooja Jain + pooja&parswa.com +52780 + Kasco Tecnologia + Diogo Gará Caetano + diogo.gara&kascosys.com.br +52781 + Primary Care Physicians, LLP + Maureen Jones + mjones&primarycares.com +52782 + Community Care HIE + Victor Vaysman + buy&medside.com +52783 + Xunlei + Ethan Lin + linyuzheng&xunlei.com +52784 + Atech Negócios em Tecnologias S/A + Felipe Iada Tomitar + ftomitar&atech.com.br +52785 + Radio Bremen + Mahlstedt, Markus + markus.mahlstedt&radiobremen.de +52786 + POWIDIAN + JEAN-MICHEL BERTHEAS + jean-michel.bertheas&powidian.com +52787 + Applied Laser Technologies + Thomas Weigel + weigel&lat.rub.de +52788 + Meridian Technologies Inc + Alexander Lebedev + alebedev&meridian-tech.com +52789 + Indyme Solutions + Bill Kepner + bkepner&indyme.com +52790 + Charles Taylor InsureTech + Postmaster IANA + postmaster&ctplc.com +52791 + Undocumented Features + Aaron Guilmette + aaronguilmette&gmail.com +52792 + Crazzy Systems, Inc. + Aoang Lona + aoangc&gmail.com +52793 + HOME + Igor Ozol + ystal&mail.ru +52794 + intoto systems + David Potter + dpotter&intotosystems.com +52795 + Batteries Plus, LLC + Chris Budish + cbudish&batteriesplus.com +52796 + New Hippo Health + Nicholas Waller + service.it&newhippo.com +52797 + ENH - Empresa Nacional de Hidrocarbonetos; E.P. + Boris Wiethoff + ebsadmin&enh.co.mz +52798 + ib company GmbH + Andreas Mürle + a.muerle&ib-company.de +52799 + LOGICSPECTRA + Ramaraju Sagi + sales&logicspectra.com +52800 + tegra electronic + mohsen sheikh hasani + mohsenmsh2003&yahoo.com +52801 + DCS Corporation + Scott Houck + shouck&dcscorp.com +52802 + Mackenzie Hauck + Mackenzie Hauck + mhauck&live.ca +52803 + Chrisite Systems Pty Ltd + Lee Allemand + lallemand&christiecorporate.com.au +52804 + Stringon (Beijing) Technology Inc.Stringon technology (Beijing) Co., Ltd. + Jeff.Zhang + jeff&blackpai.com +52805 + Locatee AG + Benedikt Köppel + dev-accounts&locatee.ch +52806 + Manfred Paul + Manfred Paul + iana-pen&manfredpaul.me +52807 + Supervisor 4U Bt. + Gabor Gallo + gabor.gallo&supervisor4u.hu +52808 + Paragon Development System + Michael Meyer + MMeyer&pdsit.net +52809 + Nayax Ltd. + Nitzan Carmeli + admin&nayax.com +52810 + MCC of Roshydromet + Dmitrii Teliuk + d.telyuk&meteorf.ru +52811 + National museum "Kyiv art gallery" + Kyrylo Kobtsev + kkl&knag.museum +52812 + TriNet + Wu Yuwu + wyw&hztrinet.com +52813 + Fidesmo AB + Miguel Cardo + oid.registry&fidesmo.com +52814 + Virginia Indigent Defense Commission + Jason Hodges + itsupport&adm.idc.virginia.gov +52815 + LANXESS AG + Juergen Huettel + juergen.huettel&lanxess.com +52816 + UNISYLVA + Christophe BERNARD + christophe.bernard&unisylva.com +52817 + Ramsay Corporation + Kenny Huang + itgroup&ramsaycorp.com +52818 + DITEC, a.s. + Michal Mračka + mracka&ditec.sk +52819 + Win10isComing + Mathieu Aït Azzouzene + mathieu.aitazzouzene&gmail.com +52820 + Fyfe Software Inc. + Justin Fyfe + justin&fyfesoftware.ca +52821 + Totalplay + Jorge Andrés Robles + jarobles&totalplay.com.mx +52822 + Shenzhen AOTO Electronics Co., Ltd. + Yan Chunxiao + yancx&aoto.com +52823 + IT CONSULTING WOLFINGER + Klaus Dieter Wolfinger + info&itconsulting-wolfinger.de +52824 + Tomas Dobrovolny + Tomas Dobrovolny + sdobrtomas&gmail.com +52825 + PGNiG Obrót Detaliczny sp. z o. o. + Mirosław Borodeńko + miroslaw.borodenko&pgnig.pl +52826 + Latvijas Transportlidzeklu Apdrosinataju birojs + Agris Daukste + agris<ab.lv +52827 + Unia + Managed Backend + tm-system&wagner.ch +52828 + elaf + Beat Brand + beat.brand&elaf.ch +52829 + Sumber Energi Khatulistiwa + slamet mualif + smualif&energikhatulistiwa.com +52830 + Tanner Ryan + Tanner Ryan + tanner&txryan.com +52831 + IneControl + Omar Poch + omar&inecoiec.com.ar +52832 + Westfield Cardiology + Alla Kanevsky + fwccardiology&gmail.com +52833 + Crossfield Technology LLC + Gary McMillian + Gary.McMillian&crossfieldtech.com +52834 + Joinnet Technology + Luo Longcai + luolongcai&zyytkj.com +52835 + Karsoft Technology + Aykut Ongel + aykut&karsoft.com +52836 + LEIPA Group GmbH + Sven Schaumann + Sven.Schaumann&leipa.de +52837 + Pelatro Plc + Max Afanasyev + maxim.afanasyev&pelatro.com +52838 + Illuminate Technologies Ltd + Garry Marshall + gmarshall&illuminate.solutions +52839 + digital sewa kendra fukaha tadiyawa haradoi + vipin kumar + vipin82035&gmail.com +52840 + Bearmach LTD + IT Support Dept. + itsupport&bearmach.com +52841 + Department for Environment, Food & Rural Affairs + Paul Tomlinson + paul.tomlinson&defra.gov.uk +52842 + Intevac Photonics, Inc. + Brenda Thrasher + blthrasher&intevac.com +52843 + CARD Services Utrecht + Chris Maaswinkel + maaswinkel&cardservices.nl +52844 + National Radio and Telecommunication Corporation + Muhammad Faheem Alvi + faheem.alvi&nrtc.com.pk +52845 + Mist Systems Inc. + Nicolas Dade + nic.dade&mistsys.com +52846 + Chunghwa System Integration Co., Ltd. + Richard Cheng + richard.cheng&ch-si.com.tw +52847 + Van Walt Limited + Vincent van Walt + vincent&vanwalt.com +52848 + JellWin Information Technology Co.,Ltd. + Qianfeng Tang + tangqianfeng&gmail.com +52849 + Technology Company Center, JSC + Sergey Zorin + sergey.zorin&stoloto.ru +52850 + Quiet To Deep + Han Xin + hanxv&live.cn +52851 + e-Netdata, Limited + Albert Chen + albert.chen&e-netdata.com +52852 + Ärztliche Abrechnungsstelle Ludwigsburg GmbH + Jochen Sommer + penrequest&mvz-labor-lb.de +52853 + mobisys Mobile Informationssysteme GmbH + Matthias Stadter + it&mobisys.de +52854 + HÄVG Hausärztliche Vertragsgemeinschaft AG + Uwe Mahlberg + technik&hausaerzteverband.de +52855 + Diamanti Inc + Amitava Guha + amitava&diamanti.com +52856 + Oakridge Networks Inc. + YONG KANG + yong.kang&oakridge.io +52857 + ITHB + Alex Gochi + alex.gochi.javan&gmail.com +52858 + SAE IT-systems GmbH & Co. KG + Juergen Venhaus + marketing&sae-it.de +52859 + TCPS, Inc. + IT Registration + itregistration&thompsoncreek.com +52860 + Global Commercial Technologies + Dmitriy Paunin + dmitriy&coins.ph +52861 + SoftIB, SIA + Janis Andersons + info&softib.lv +52862 + 大庆中基石油通信建设有限公司 (Daqing Zhongji Petroleum Communication Construction Co., Ltd.) + 王柳青 (Wang Liuqing) + wangliuqing006&cnpc.com.cn +52863 + Compass Systems Pvt Ltd + Amit Sharma + amit.sharma&cavisson.com +52864 + AMENIDY, Inc. + MAEDA Katsuyuki + maeda&amenidy.co.jp +52865 + IMD Ingenieurbuero fuer Microcomputertechnik + Thomas Doerfler + Thomas.Doerfler&imd-systems.de +52866 + JellWin Information Technology Co.,Ltd. + Qianfeng Tang + tangqianfeng&gmail.com +52867 + Dundee Foot and Ankle Center + Paul Potach + ppotach05&gmail.com +52868 + O-NET Communications (Shenzhen) Limited + Lizhi Zhao + lizhizhao&o-netcom.com +52869 + Abstruse Systems + Dylan McAuliffe + iana&abstruse.systems +52870 + TXS GmbH + IT-Support + hotline&txs.de +52871 + The Packet Hub + Warren Baker + warren&tph.io +52872 + Premier Community Credit Union + Anthony Iaccino + it&premierccu.com +52873 + HealthLink Group Limited + John Carter + john.carter&healthlink.net +52874 + Focal Tech Limited + Glynn Roche + glynn&focaltech.biz +52875 + Gebbeth, s.r.o. + Daniel Cizinsky + iana&gebbeth.cz +52876 + Art-bolitos + Abimelech Romero Lara + abimelech77&gmail.com +52877 + ASF + Stévy Matton + stevy.matton&arvato.fr +52878 + TRENNSO TECHNIK - Trenn- und Sortiertechnik GmbH + Florian Wirth + Florian.Wirth&trennso-technik.de +52879 + TECSUP + Nicolas D'URSO + durso&tecsup.fr +52880 + DOKltd + Sergey Petrov + petrov&dokltd.ru +52881 + PACE Media Development GmbH + Martin Schröder + martin.schroeder&pacemedia.de +52882 + Western University - Faculty of Science + Jeff Shantz + jeff.shantz&uwo.ca +52883 + Celfocus + Celfocus - MSEM + msem-am&celfocus.com +52884 + Trinity Guard + Tony Perera + Tony&Trinityguard.com +52885 + Three Ireland (Hutchison) Ltd + Tony Buckley + buckley.tony&gmail.com +52886 + S Park-Davis Healthcare + Samuel Park + integrated.hca&gmail.com +52887 + MALTA INFORMATIQUE + David DELFORGE-TROLLIET + david.delforge&malta-informatique.fr +52888 + HSB Affärsstöd AB + Jonas Wennberg + jonas.wennberg&hsb.se +52889 + Cropland BVBA + Geert Vromman + info&cropland.be +52890 + Sonoma County Junior College District + Dan Exelby + dexelby&santarosa.edu +52891 + Kerasotes Showplace Theatres + Craig Babarskas + cbabarskas&kerasotes.com +52892 + Critical Response Systems, Inc + James Dabbs + jdabbs&criticalresponse.com +52893 + Inspur Power Systems Co.,Ltd. + Bing Liu + liubing&inspur.com +52894 + chez14 + Qiang Christianto + chez14&publik.christianto.net +52895 + China Academy of Information and Communications Technology + Jianwei Wang + wangjianwei&caict.ac.cn +52896 + 360Globalnet Ltd + David Hogan + david.hogan&360globalnet.com +52897 + Computech bvba + Peter Verijke + peter&compu-tech.eu +52898 + Telenot Electronic GmbH + Frank Schierle + oidadmin&telenot.de +52899 + White Rabbit Security GmbH + Martin Bartosch + office&whiterabbitsecurity.com +52900 + Rouviere High School + Pascal JEAN + pjean2&ac-nice.fr +52901 + Linxdatacenter + ilya ilichov + systeembeheer&linxdatacenter.com +52902 + VI Company Services B.V. + Bas Roovers + bas&vicompany.nl +52903 + Turrillas Inc. + Francisco Turrillas + francisco&turrillas.net +52904 + Defiance Technology Inc. + Dan Molik + dan&d3fy.net +52905 + JT Global + Riaz Anjam + riaz.anjam&jtglobal.com +52906 + Acumen Consulting + Robert Wagnon + rwagnon&acumen-corp.com +52907 + APROPLAN s.a + Thomas Lionel SMETS + tls&aproplan.com +52908 + Bank J.Van Breda & C° + Michel Joossens + informatica&bankvanbreda.be +52909 + Weichert Companies + Arthur Quintalino + aquintalino&wlninc.com +52910 + Tom Technology Limited + Thomas Lewis + thomaslewis&0xaa55.me +52911 + Pika Energy + Jacob Miller-Mack + jacob.miller-mack&generac.com +52912 + AMTT (Beijing) Internet Technology Inc. + Heath Deng + snmp&amttgroup.com +52913 + RF-Tuote Oy + Ari-Pekka Lajunen + ari-pekka.lajunen&rf-tuote.fi +52914 + Acqueon Technologies Inc. + Mohideen Sheik + mohideens&acqueon.com +52915 + SCHENKER spol. s r.o. + Milan Dostál + milan.dostal&schenker.cz +52916 + Tramwaje Warszawskie Sp. z o.o. + Mariusz Grochowski + ditasi&tw.waw.pl +52917 + Thomas Fuchs + Thomas Fuchs + thomas.fuchs&thomas-fuchs.net +52918 + A3 SYSTEM sprl-bvba + Thomas Lionel SMETS + Thomas.Smets&a3-system.eu +52919 + GuangZhou KaiXin Communication System Co.,Ltd. + lijiaxiang + 804580467&qq.com +52920 + MilleniumSign + Keshwarsingh Nadan + kn&millenium.net.mu +52921 + ayfie Group AS + Frank Gynnild + frank.gynnild&ayfie.com +52922 + Empa - Eawag + Cristofolini Silvano + pki-pen&empa.ch +52923 + Keyfactor + Keyfactor PKI Policy Authority + keyfactorppa&keyfactor.com +52924 + Brainbean Apps OU + Alexey Pelykh + alexey.pelykh&brainbeanapps.com +52925 + STRABAG SE + Network Administrator + networks.pki&strabag.com +52926 + alerta.io + Nicholas Satterly + nfsatterly&gmail.com +52927 + Nova Scotia Power + Haseeb Minhas + haseeb.minhas&onx.com +52928 + Volqanic Ltd + Peter Slavik + peter.slavik&vlqnc.com +52929 + Physician Reimbursement Systems, LLC + Owen Hathaway + owen&prsdata.com +52930 + Primma, LLC + Zach Truscello + Z.Truscello&medmal.com +52931 + Freedom Mobile + Muhammad Uppal + muppal&freedommobile.ca +52932 + Maven Wireless AB + Martin Wetterholm + martin.wetterholm&mavenwireless.com +52933 + Top Aces Inc. + Jeremy van Engen + jeremy.vanengen&topaces.com +52934 + Decision-Plus + David Hebert + dhebert&decisionplus.com +52935 + Envira Sostenible S.A. + Luis García + lg&envira.es +52936 + Hope and Healing Cancer Services + sophia rosado + srosado&hopeandhealingcare.com +52937 + Aerostart LLC + Aleksander Kononov + alex&pites.ru +52938 + Finslink Communication Technology Co., Ltd. + zhengchangwen + zhengchangwen&finslink.com +52939 + Dumfries & Galloway Mental Health Association + Robbie Glenister + rob.glen&dgmha.org +52940 + Domstolsstyrelsen + Morten Rønne + it-infrastruktur&domstolsstyrelsen.dk +52941 + Homelab City + Hembery Finkelduff + admin&homelab.city +52942 + WIZnet Co., Ltd. + Eric Hyungki Jung + eric&wiznet.io +52943 + Banca Comerciala Romana S.A. + Mihai Muntean + mihai.muntean&bcr.ro +52944 + SCIERIE MOULIN + LE JAOUEN Romaric + r.lejaouen&moulinvest.com +52945 + Partner MKF + Jasmin Smigalovic + jasmin.smigalovic&partner.ba +52946 + PDTec AG + Marc Schaefer + schaefer&pdtec.de +52947 + Alexandr Zolotarev + Alexandr Zolotarev + lexandr9&hotmail.com +52948 + LeviTech Co., Ltd. + Jia Tang + jia.tang&digicompass.com +52949 + DigiCompass Pty Ltd + Jia Tang + jia.tang&digicompass.com +52950 + Mindsec Technology Company + Ziliang Zhu + zhuziliang&mindsec.com.cn +52951 + Denex Technology, LLC + Nelly Alden + na&denextech.com +52952 + Stadt Winterthur + Datacenter + rz&win.ch +52953 + iVent Mobile B.V. + Sjoerd de Vries + sdevries&iventmobile.nl +52954 + Lef Srl + JACOPO CANTINI + ufficioacquisti&lef.it +52955 + Advanced Control Systems, Inc + Joseph Boike + joe.boike&acspower.com +52956 + Fronius International GmbH + Raffael Rehberger + rehberger.raffael&fronius.com +52957 + ACTIVEMALL SRL + Ciprian Buhosu + tehnic&activemall.ro +52958 + NumaTech Ltd. + Pavel Smolenskiy + smolenskiy&numatech.ru +52959 + Drakontas LLC + OID Administrator + info&drakontas.com +52960 + AssistRx + Christopher Parker + Christopher.Parker&AssistRx.com +52961 + Cohen & Company + Michael Tylicki + mtylicki&cohencpa.com +52962 + AVA Abfallverwertung Augsburg KU + Michael Stenzel + ca-admin&ava-augsburg.de +52963 + BRNET + Francisco José Bernal Fernández + soporte&brnet.es +52964 + OOO "Synergy Team" + Mikhail Drachev + m.drachev&synergy.msk.ru +52965 + Berale of Teldan Group + eyal knaan + eyal&berale.co.il +52966 + Itzos + Kjeld Loozen + kjeldloozen&itzos.nl +52967 + Mascaro Construction Company + Tony Clark + tclark&mascaroconstruction.com +52968 + Reimer IT Solutions + Kjeld Loozen + k.loozen&reimeritsolutions.nl +52969 + TRUSTPRO QTSP LTD + Mario Rossi + administration&trustpro.eu +52970 + Eleven Software + Susan Liles + accounting&elevensoftware.com +52971 + Art-k-tec Systems LLC + Nika Jones + nikaj-pen&art-k-tec.com +52972 + Matthias Staudacher + Matthias Staudacher + secure.home.oid&e.mail.de +52973 + August Storck KG + Oliver Meise + windows-support&de.storck.com +52974 + Alamo Psychiatric Care, P.A. + David Vargas + Alamopsychcare&sbcglobal.net +52975 + Salish Integrative Medicine, Inc. + Callie Hills + callie.hills&salishcancercenter.com +52976 + Pittol + Gabriel Luis Duarte Pittol + gldpittol&inf.ufrgs.br +52977 + ZMNH EDV + Rattai + srl&zmnh.uni-hamburg.de +52978 + Träfrakt Götaland AB + Pehr Sundblad + pehr.sundblad&trafrakt.se +52979 + Fujian Straits Information Technology Co., Ltd. + TANG JUN + tangjun&heidun.net +52980 + Nissin Systems Co.,Ltd. + IANA Administrator + nss_its&co-nss.co.jp +52981 + CPON TECHNOLOGIES HONGKONG CO LIMITED + Richard Wang + richard.wang&cpontech.com +52982 + PNP SECURE INC. + JAE CHUN LEE + jclee&pnpsecure.com +52983 + West London NHS Trust + Jim Ying + dev-team&westlondon.nhs.uk +52984 + Albany Surgical, PC + Michael St. Angelo + mstangelo&albanysurgical.com +52985 + Omni Air International + Matthew Hunt + is&oai.aero +52986 + Mörks skogsmaskiner AB + Mats Mörk + morksskogsmaskiner&telia.com +52987 + TierPoint + Art Cancro + art.cancro&tierpoint.com +52988 + 中天宽带技术有限公司 (Zhongtian Broadband Technology Co., Ltd.) + 徐国庆 (Xu Guoqing) + xugq&chinaztt.com +52989 + Medical Training Institute Center INC + Micheal Daniels + mtdaniels&emsmedtraining.com +52990 + Heilongjiang Electric Power Dispatching Industry Co., Ltd. + Qi Li + coder_li&foxmail.com +52991 + DataOrbis + Devon Hunter + itadmin&dataorbis.com +52992 + Lahtis Entreprenad AB + Mattias Lahti + Lahtisentreprenadab&hotmail.com +52993 + ChocolateCoding + Johannes Müller + iana&chocolatecoding.com +52994 + drei01 Technology Business Services GmbH + Michael Linkenheil + linkenheil&drei01.de +52995 + Yarnlab + Zane England + zane&yarnlab.io +52996 + Andreas Nilsson Skog AB + Andreas Nilsson + anskogab&telia.com +52997 + ict-optim.cz + Support-ICT + ict&optim.cz +52998 + Electricity North West Limited + Information Security + infosecurity&enwl.co.uk +52999 + InStride Capital Foot and Ankle + Emily Peters + emilyp&capitalfootnc.com +53000 + Wasserstraßen- und Schifffahrtsverwaltung des Bundes + Mr Stefan Hemmers + Stefan.Hemmers&wsv.bund.de +53001 + Unipart + Richard Neill + sysadmins&unipart.io +53002 + Webmeisterei Informationstechnologie GmbH + Gerd Moser + office&webmeisterei.com +53003 + Cirrus Data Solutions, Inc. + Stanley Qin + stan&cdsi.us.com +53004 + RIAB + Robert Idergard + idergard&gmail.com +53005 + Matthew Kehrer Companies + Matthew Kehrer + matt91.mek&gmail.com +53006 + Hybrix + Marc Baudoin + noc&hybrix.fr +53007 + Ulrich Boddenberg IT-Consultancy + Ulrich B Boddenberg + ulrich&boddenberg.de +53008 + Schröter-Group + Henrik Schröter + backoffice&schroeter-group.de +53009 + Ozaukee Medical Group + Terry S + tsomc&sbcglobal.net +53010 + KECCEO AB + Kenneth Skoog + ke1.skog&telia.com +53011 + JH skogsentreprenad AB + Johan hammarström + firmajh&gmail.com +53012 + Pine Rest + Network Administrators + networkadmins&PineRest.org +53013 + Administración de Aduanera de Honduras (formerly 'Dirección Adjunta de Rentas Aduaneras') + Edwin Joel Bulnes Vásquez + ebulnes&aduanas.gob.hn +53014 + SIGMA SYSTEM CO.,LTD. + Tatsuya Nakai + sigma&sigma-system.co.jp +53015 + Murakami Business Consulting, Inc. + Koichi Murakami + iana&mbc-net.com +53016 + AXYS Technologies Inc + David Borg + itdept&axystechnologies.com +53017 + Campo Imaging + Yuankuo Ma + mayk&campo-imaging.com +53018 + zeald + cherry ardillos + cherrymae.ardillos&gmail.com +53019 + NetDesign GmbH + Jarno Coenen + jarno&datenmodell.de +53020 + Anuview + Kevin Wilkie + kevin&anuview.net +53021 + Thimeo Audio Technology B.V. + Mathijs Vos + mathijs&thimeo.com +53022 + 8tree GmbH + Jonas Eberhard + info&8-tree.com +53023 + AGFEO GmbH & Co. KG + Ralf Weinbrecher + rweinbrecher&agfeo.de +53024 + wiesmueller.info + Fabian Wiesmüller + fabian&wiesmueller.info +53025 + Carleton Univeristy + Paul Ungoed + paulungoed&cunet.carleton.ca +53026 + RadOncWare + James Gordon + jjg&jjgordon.com +53027 + University of Havana + Lian Ulloa + l.ulloa&estudiantes.matcom.uh.cu +53028 + Jutos skog AB + Sven Ingvar Juto + jutos.skog&linnea.com +53029 + Overseas Education Investment Management (HK) Ltd + Matt Ryan + matt&oeim.co.uk +53030 + RIFT Inc + Matt Harper + matt.harper&riftio.com +53031 + AliceSystem + Takuzo Isami + aliceyou&alicesystem.net +53032 + Inter-University Institute for Data Intensive Astronomy + Jeremi-Ernst Avenant + jeremi&idia.ac.za +53033 + Zenuity AB + IT Operations + hostmaster&zenuity.com +53034 + Nio-Electronics + Malygin Igor + info&nio-electronics.ru +53035 + ArcelorMittal Global R&D Asturias + Daniel Argüelles + daniel.arguelles&arcelormittal.com +53036 + National HME Inc. + Eric Hummel + eric.hummel&nationalhme.com +53037 + InterGalactic Space Walrus + Anthony Ross + anthonyr&igsw.co.nz +53038 + Grega-JK d.o.o. + Matjaz Kolaric + matjaz.kolaric&grega-jk.si +53039 + Technetics Consulting Pty Ltd + Anthony Gazdovic + agazdovic&technetics.com.au +53040 + NOREST-SERVICES + Clément Mouline + clement.mouline&norest-telecom.fr +53041 + 北斗天汇(北京)科技有限公司 (Beidou Tianhui (Beijing) Technology Co., Ltd.) + 刘海强 (Liu Haiqiang) + liuhaiqiang20060101&126.com +53042 + Enbridge Inc. + Brian Boundy + brian.boundy&enbridge.com +53043 + InfoKeyVault Technology + Chih-Ping, Hsiao + cp.hsiao&ikv-tech.com +53044 + ABC Systems AG + Daniel Vogel + dv&abcsystems.ch +53045 + TUCHA Sp. z o.o. + Volodymyr Melnyk + v.melnik&tucha.ua +53046 + Memsource + Michal Kebrt + michal.kebrt&memsource.com +53047 + oneFactor, LLC + Roman Postnikov + soft&onefactor.com +53048 + Keonn Technologies SL + Ausias Vives + avives&keonn.com +53049 + Cresdee Consulting Ltd + Simon Cresdee + simon&cresdee-consulting.com +53050 + Sberbank Telecom + Egor Aryukov + earukov&sberbank-tele.com +53051 + Lum Hematology & Oncology + Cecilia Lum + cecilia&flatiron.com +53052 + Department of Public Expenditure and Reform + Kevin Wyley + kevin.wyley&per.gov.ie +53053 + VAULT ID - SOLUCOES EM CRIPTOGRAFIA E IDENTIFICACAO + Reinaldo Freitas + admin&vaultid.com.br +53054 + T Lindmark skog AB + Robert Lindmark + robertlindmark1&gmail.com +53055 + BCNexxt B.V. + Matthijs de Vries + matthijsdevries&bcnexxt.com +53056 + Adara Technologies Inc. + Dmitriy Vassilyev + adara.support&adara-tech.com +53057 + Entropy Solution + Ryan Joel Patawaran + rjpatawaran&me.com +53058 + Baicells + wujunfeng + wujunfeng&baicells.com +53059 + TomLab + Thomas Lewis + thomaslewis&0xaa55.me +53060 + ReAssure + Kris Dunn + kris.dunn&reassure.co.uk +53061 + Kids Plus Pediatrics + Rob Bartram + sysadmin&kidspluspgh.com +53062 + NANTOKA.COM + MAEDA Katsuyuki + kei&nantoka.com +53063 + Lynx Technology + John Driver + driver&lynxtechnology.com +53064 + Stratodesk + Emanuel Pirker + epirker&stratodesk.com +53065 + Lee Industrial Contracting + Randy Weiland + randyweiland&leecontracting.com +53066 + Atlanta Ropheka Medical Center + Edward Olufelo + atlantaropheka&gmail.com +53067 + KAMALA Gupta + KAMALA Gupta + devendragupta235&gmail.com +53068 + Funkwerk Magyarország Kft. + Miklós Mets + miklos.mets&funkwerk-mo.hu +53069 + BranchScuba + Randall Branch + rbranch&harris.com +53070 + MIPS + Cédric Lemoy + cedric.lemoy&mips.be +53071 + NGN System Inc. + Vladilen Yakunin + globalbozz&gmail.com +53072 + Prime Syntax + Gunter Strauss + gstrauss&primesyntax.com +53073 + cocoon-it Projektbüro + Klaus C. Yan + k.yan&cocoon-it.de +53074 + SPG Co., Ltd + SPG Support Team + hyunju.kim&spg.co.kr +53075 + Vinetech Co., Ltd. + Jeongjin Kim + kimjeongjin&vinetech.co.kr +53076 + IT Craft YSA GmbH + Igor Mospan + mi&yourserveradmin.com +53077 + Copernicus Science Centre + Rafal Bartczak + it&kopernik.org.pl +53078 + Advanced Information Management + Joel Lord + infrastructure&advancedinfomanagement.com +53079 + Huuuge Games Sp. z o.o. + Marcin Januszewski + marcin.januszewski&huuugegames.com +53080 + Hanssons åkeri i Fjugesta AB + Niklas Hansson + Johan.soderholm&cgi.com +53081 + Philip Couling + Philip Couling + couling&gmail.com +53082 + Paedagogische Hochschule Bern + Systemtechnik - Peter Studer + server&phbern.ch +53083 + Fiber Optika Technologies Pvt Ltd + Hitesh Mehta + info&fiberoptika.com +53084 + Proxima Software di Vanini Mirco + Vanini Mirco + mirco.vanini&outlook.com +53085 + Urology Associates of Central MO + Debbie Barnes + debbie.barnes&usoncology.com +53086 + Dr. Basel Refai, MD + Tina Dean + tdean&lakemartinvascular.com +53087 + Authindicators Group (aka Brand Indicators for Message Identification Working Group) + Wei Chuang + weihaw&google.com +53088 + Hendrik Oenings Enterprise + Hendrik Oenings + hendrik&oenings.eu +53089 + The Steel Construction Institute + Barry Tattersall + b.tattersall&steel-sci.com +53090 + Burke, Inc. + Mark Schuler + mark.schuler&burke.com +53091 + University of Arkansas System - Division of Agriculture + Cosmo Denger + cdenger&uaex.edu +53092 + Bank ZENIT, PJSC + Andrey Petrovsky + a.petrovskiy&zenit.ru +53093 + Contilla GmbH + Oliver Friedrich + friedrich&contilla.de +53094 + Shenzhen Boruide Technology Co., Ltd. + Qianlin Wu + wuql&broadtech.com.cn +53095 + Greenway Health, LLC + Jonathan M. Wilbur + jonathan.wilbur&greenwayhealth.com +53096 + AAR CORP. + Dharmesh Patel + dpatel&aarcorp.com +53097 + Data Alliance + Kwangbum Lee + foobar&data-alliance.com +53098 + PT. Aplikanusa Lintasarta + Ronnie I Kurniawan + ronnie.indra&lintasarta.co.id +53099 + DEKRA Testing and Certification, S.A.U. + Diego Bartolome Arquillo + diego.bartolome&dekra.com +53100 + Nexora AG + Bruno Hostaléry + bruno.hostalery&nexora.ch +53101 + JMT åkeri AB + Johan Söderholm + Johan.soderholm&cgi.com +53102 + SUMIT + Tomislav Papac + tomislav.papac&sum.ba +53103 + DASTAG + Leendert Van Wyk + leendert&yrless.co.za +53104 + Ferncast GmbH + Thomas Schlien + webmaster&ferncast.de +53105 + Athenitas Softworks, LLC + Benjamin Becker + benjamin.becker&athenitas.com +53106 + Victoria College + Andy Farrior + andy.farrior&victoriacollege.edu +53107 + Hong Leong Bank Berhad + Suresh Ramasamy + sureshramasamy&hlbb.hongleong.com.my +53108 + Individual Entrepreneur Savenkov V.O. + Savenkov Vladislav + v.savenkov&cbug.ru +53109 + Formula Student Germany GmbH + Philipp Bandow + philipp.bandow&formulastudent.de +53110 + Centre of Advanced Computing & Telecommunications + Suresh Ramasamy + suresh&cact.asia +53111 + ChocolateCoding + Johannes Mueller + iana&chocolatecoding.com +53112 + Berner Fachhochschule + Daniel Baumann + daniel.baumann&bfh.ch +53113 + STDERR.PL + Grzegorz Tworek + pen&stderr.pl +53114 + Foot and Ankle Associates of Maine, P.A. + LeAnn Albrecht + leanna&footandankles.com +53115 + Sinclair Technologies, a division of Norsat International Inc. + Ray Ling + rling&sinctech.com +53116 + Virgin Mobile UK SIT3 + Arjen Smit + asmit&libertyglobal.com +53117 + CUTTER Systems s.r.o. + David Indra + indra&cuttersystems.cz +53118 + Pierre Lind + morgan lind + moorganlind&gmail.com +53119 + Lindenberg Software + Joachim Lindenberg + pen-oid&lindenberg.one +53120 + [IS]2 + Jean-Yves Lojou + jean-yves.lojou&is2ws.com +53121 + Vestel Elektronik Sanayi ve Ticaret AS + Deniz Kurt + deniz.kurt&vestel.com.tr +53122 + LOMACO + Mathieu Migout + mathieu.migout&lomaco.fr +53123 + Gastroenterology Center, PA + Rhonda Parker + gastrocenter4500&gmail.com +53124 + Åkeri Per H Öberg AB + Stefan Öberg + stefanobergsakeri&telia.com +53125 + Gold Star Mortgage Financial Group + Ian Macomber + dnsadmin&gsfmail.com +53126 + Assistance Services + Stéphane Sanseverino + ssanseverino&onet.fr +53127 + FUJITSU GENERAL + Yoshikazu Tanaka + network&fujitsu-general.com +53128 + Quickstep Technologies Pty Ltd + Michael Ireland + itgroup&quickstep.com.au +53129 + Frank Reda MD PA + Wanda Reda + wreda&optonline.net +53130 + Centra Networks + Louis Ren + louis.ren¢ra.com.au +53131 + Action Software Intl. + Aaron Surty + aarons&actionsoftware.com +53132 + sitt-engineering + Thomas Sitt + ThomasSitt&t-online.de +53133 + eidexen.net + Ortwin Pfeiffer + op&eidexen.net +53134 + GESIPA Blindniettechnik GmbH + Gregor Boell + gregor.boell&gesipa.com +53135 + ENARTIA + Panagiotis Vavilis + p.vavilis&enartia.com +53136 + denninger.jp Project + Eiji Matsumoto + admin&denninger.jp +53137 + Nebraska Cancer Specialists + Annie Rudloff + arudloff&nebraskacancer.com +53138 + PF Olsen Limited + Nick Horton + nick.horton&pfolsen.com +53139 + Chaos Computer Club Berlin e.V. + Ingo Albrecht + prom&berlin.ccc.de +53140 + NEOPERL + Kilian Niklaus + Kilian.niklaus&neoperl.com +53141 + EKA Skog AB + Mattias Eklund + mattias&ekaskog.se +53142 + Pima Omran Niroo + Mostafa Javadian + Javadian&Pima-co.com +53143 + Travelopia Holdings Ltd. + Jacint Toth + jacint.toth&travelopia.com +53144 + GTS Deutschland GmbH + Robert Richter + Robert.Richter&urbanandmainlines.com +53145 + Ethos Infotech + Ashwani Mathur + ashwaniðosit.com +53146 + Frachtwerk GmbH + Friedrich Ellmer + admin&frachtwerk.de +53147 + fortop projects BV + Roelof van der Wal + rwl&4top-projects.nl +53148 + O-RAN Alliance e.V. + Zbynek Dalecky + zbynek.dalecky&o-ran.org +53149 + Wessanen N.V. + Wietse van Assema + wietse.van.assema&wessanen.com +53150 + FAR-EASTERN GRIDS COMPANY + Yury Talakan' + oid-admin&drsk.ru +53151 + Beijing Zhongchuangwei Nanjing Quantum Communication Technology Co., Ltd. + chuanyu.jiang + 1501614847&qq.com +53152 + IMEX Co.,Ltd. + Tomoaki Abe + abe&imex-net.co.jp +53153 + EM Clarity Pty Ltd + David White + david.white&emclarity.com.au +53154 + Selection + Jinsong Hu + sntree&gmail.com +53155 + DSR Corporation + Stanislav Izmalkov + stanislav.izmalkov&dsr-corporation.com +53156 + Telegra GmbH + Sebastian Röder + sysadmin&telegra.de +53157 + Asahi CE & Europe Services, s.r.o. + Radek Vopěnka + radek.vopenka&mainstream.cz +53158 + KORUS Consulting + Administrator + postmaster&korusconsulting.ru +53159 + Aksel Sp. z o.o. + IT Administrator + it&aksel.com.pl +53160 + McMinnville Foot and Ankle Specialists + Bonnie Sabatini + macfootankle503&gmail.com +53161 + Warrior Invictus Holding Company, Inc + Ron Maynard + helpdesk&priscorp.net +53162 + Whoolud Networks + Thomas Lehmann + thomas&whoolud.de +53163 + SentinelOne + Nir Givol + nirg&sentinelone.com +53164 + Unisys Österreich + Franz Antony + franz.antony&at.unisys.com +53165 + Morton Lights + Michael Junek + michael&juneks.com.au +53166 + COMaction + Michael Tholey + Support&comaction.de +53167 + Keck - IT-Consulting + Oliver Keck + support&okeck-computer.de +53168 + Concordia Station in Antarctica + Alessandro Mancini + alessandro.mancini&cnr.it +53169 + SCISYS Group PLC + Malcolm Doody + malcolm.doody&scisys.co.uk +53170 + Dox EMR + Bart Ripperger + bripperger&doxemr.com +53171 + Department of Defense Education Activity + Aaron Fudge + aaron.fudge&dodea.edu +53172 + Tschetschpi PC Services + Brian Duff + tschetschpipcservices.iana&gmail.com +53173 + University Medicine and Cardiology + KATERINA KOREN + umc_veins&yahoo.com +53174 + VITEC + Richard BERNARD + richard.bernard&vitec.com +53175 + 雅泰歌思(上海)通讯科技有限公司 (Yatai Gesi (Shanghai) Communication Technology Co., Ltd.) + Xin Huang + Jimmy_nuaa&163.com +53176 + A3K + Alexander Korinek + noles&a3k.net +53177 + autorion + Marian SVONAVA + info&autorion.sk +53178 + Iowa Cancer Specialists, P.C. + Amber Rankin + amarshall&iacancer.com +53179 + SystemsAdmin.pro + Matthew Hunt + mhunt&systemsadmin.pro +53180 + ControlAware LLC + Ron Beyer + inquiry&controlaware.com +53181 + Sun-Maid Growers of California + Alan Sickels + asickels&sunmaid.com +53182 + MARcom Technologies Inc. + Fred Brown + fred.brown&marcomtech.com +53183 + QalifSolutions + Anthony Magnini + adminteam&qalifsolutions.com +53184 + Reg.Ru + Leo Ten + l.ten®.ru +53185 + KA-RaceIng e.V. + Jens Pfeifle + jens.pfeifle&ka-raceing.de +53186 + FEDERAL STATE BUDGETARY INSTITUTION "FEDERAL CENTER OF Cerebrovascular Pathology and Stress" MINISTRY OF HEALTH OF THE RUSSIAN FEDERATION + ALEXEY NIKOLAEV + alexniko&ronc.ru +53187 + MAJMAU ENGLISH SCHOOL + MUHAMMED MARZOOKH PC + muhdmarzoo&gmail.com +53188 + Queen Margaret's School + James Beck + itsupport&queenmargarets.com +53189 + Share IT Limited + Mark Halls + mark&shareitltd.co.uk +53190 + LittleMouseCloud + 朱建军 (Zhu Jianjun) + 3193472696&qq.com +53191 + Telmex Colombia S.A. + Juan Miguel Abreo Camacho + juan.abreo&claro.com.co +53192 + Socially Determined, Inc. + David Conrad + david&sociallydetermined.com +53193 + UVEX WINTER HOLDING GmbH & Co. KG + Christian Martin + im-service&uvex.de +53194 + Goodbaby International + Jeremy Rea + Jeremy.Rea&goodbabyint.com +53195 + NM Rocha + Willian Rocha + willian.rocha&outlook.com +53196 + IDFloke + Ferdinand Neman + ferdinand.neman&solarch.work +53197 + National Institute of Technology Karnataka, Surathkal + LDAP Administrator + ldapadmin&nitk.edu.in +53198 + Foosoft srl + Luca piccarreta + luca.piccarreta&foosoft.it +53199 + TEMA TELECOMUNICAZIONI Srl + Felice Lamanna + flamanna&tematlc.it +53200 + CANAL+TELECOM + Olivier MICHEL + olivier.michel&canal-plus.com +53201 + Merit LILIN + chirun, huang + chirun&meritlilin.com.tw +53202 + Qisda Corporation + JY Hu + JY.Hu&Qisda.com +53203 + plentybytes + Boris Sander + boris.sander&plentybytes.com +53204 + Samsung R&D Institute Bangladesh Ltd. + Muhammad Mushfiqul Islam + i.mushfiq&samsung.com +53205 + Poarch Band of Creek Indians Employee Health Clinic + Linda Jones + employeehealthclinic&pci-nsn.gov +53206 + Smithee Solutions LLC + Rodney Thayer + operations&smithee.solutions +53207 + Star Electronic Concepts + Mahesh Rao + mahesh&starelco.com +53208 + Roadbit + Alireza Taghavi + alireza-taghavi&outlook.com +53209 + Stanzl + Wilhelm Stanzl + wstanzl&hotmail.com +53210 + Cavendish Communications + Scott Barrett + scott.barrett&cavcoms.com +53211 + The Chainheart Machine, LLC + Nathan P. McDonald + iana&chainheart.com +53212 + AlpenEDV KG + Andreas Erhard + office&alpenedv.at +53213 + PIH Health + Technology Resources Group + Technology.Resources&pihhealth.org +53214 + symmet.net + Lars Lehtonen + lars.lehtonen&gmail.com +53215 + J Dahlqvist Skog AB + Mattias Dahlqvist + info&dahlqvistskog.se +53216 + SaferMobility, LLC + Moshe Katz + mmkatz&safermobility.com +53217 + Bryk Technologies + RUBEN CESSA FLORES + ben&pixative.com +53218 + Energy + Azat R. Valeev + azat&valeev.su +53219 + m.it.s-co + Giray Poyraz + gary&mits-co.com +53220 + ThinkThinkDo + Hermann Mundprecht + hmun&thinkthinkdo.com +53221 + Parachor, LLC + Mark Fling + mark.fling¶chor.com +53222 + PLD Technology + Paul Drydyk + paul&pldtech.net +53223 + Among Beyond + Ba An Le + baanle&teksavvy.com +53224 + TooNet, s.r.o. + Gabriel Bagita + gabriel.bagita&toonet.sk +53225 + Agropur + Simon Loiselle + simon.loiselle&agropur.com +53226 + 01 Communique Laboratory Inc + Andrew Cheung + andrew.cheung&01com.com +53227 + AlliumTech Srl + Emanuele Rivoira + emanuele.rivoira&alliumtech.it +53228 + Enea AB + Daniel Rapp + daniel.rapp&enea.com +53229 + West Alabama Pediatrics + Shana Butts + sbutts&westalabamapeds.com +53230 + Posnet Polska S.A. + Adam Maciejewski + admini&posnet.com +53231 + Lubelskie Fabryki Wag FAWAG S.A. + Tomasz Król + admini&fawag.pl +53232 + vectemis.co.uk + Sean Gibson + sean&vectemis.co.uk +53233 + Defutek Inc + Tj Delmarco + tj.delmarco&defutek.net +53234 + ProcNULL + Sven Gebauer + iana-contact&procnull.de +53235 + WorldTree Information Services + Rex Loftin + icann&wtis.com +53236 + THITTANIX INSTRUMENTS + T J SAMPATH KUMAR + thittanix&gmail.com +53237 + Banfico Ltd + Kannan Rasappan + kannan&banfico.com +53238 + Wheaton World Wide Moving + Jerrod Carter + jerrod_carter&wvlcorp.com +53239 + HSSO + Ba An Le + baanle&hssontario.ca +53240 + AID:Tech + Ben Cessa + ben&aid.technology +53241 + DSV Sint Jansbrug + Ike Mulder + itb&sintjansbrug.nl +53242 + Oreus d.o.o. + Kristijan Simunic + kristijan&oreus.hr +53243 + EES SARL + Sebastien Jourdan + contact&ees06.com +53244 + RIKEN + Motoyoshi Kurokawa + sysadmin&ml.riken.jp +53245 + Cyxtera Technologies Inc + Jamie Bodley-Scott + jamie.bodley-scott&cyxtera.com +53246 + Mailteck, S.A. + Sonia Lasheras Ostiz + slasheras&mailteck.com +53247 + Customer Communications Tecknalia, S.L. + Sonia Lasheras Ostiz + slasheras&customercomms.com +53248 + Rosneft Deutschland GmbH + Jamie Hunt + jamie.hunt&rosneft.de +53249 + Inwood National Bank + Zach Miller + OID_Admin&inwoodbank.com +53250 + openBerry Foundation + RUBEN CESSA FLORES + ben&openberry.org +53251 + Sysynced Pty Ltd + Jared Joubert + jared&sysynced.com +53252 + sebitec Informatica + Sebastian Nauk + sebitec&gmail.com +53253 + NuRAN Wireless + Maxime Dumas + maxime.dumas&nuranwireless.com +53254 + Digital Receiver Technology, Inc + David Roberts + droberts&drti.com +53255 + Jilin Genolo Technology Co., Ltd. + Yu Pan + dev&genolo.cn +53256 + Expertus + Tommy Franzmann + tbrf&anythingit.dk +53257 + Nokia Distributed Access + Janet Chen + janet.chen&nokia-sbell.com +53258 + EZELink Telecom + Danny Magat + danny&ezelink.com +53259 + LPPM Global (Pty) Ltd + Lavesh Pillay + lavitista&gmail.com +53260 + ABS4S + Sylvain Gerard ARNOLD + sylvain.arnold.france&gmail.com +53261 + NIEDAX GmbH & Co. KG + Oliver Bauer + oliver.bauer&niedax.de +53262 + ESMA + Tiziano Borrelli + Tiziano.Borrelli&ESMA.Europa.eu +53263 + MODO Systems + John Birtley + john&jacketless.com +53264 + RFEL Ltd. + Mike Chambers + mike.chambers&rfel.com +53265 + UCOM LLC. + Artyom Kuzikyan + artyom.kuzikyan&ucom.am +53266 + SGI Co. + Abouzar Ostadi + ostadi&sgi.ir +53267 + MZ RNO-Alania + Georgy Levandovsky + georgy.levandovsky&mz15.ru +53268 + Binary Fusion Inc. + Michael McLean + mmclean&binaryfusion.ca +53269 + Cole Engineering Services Inc. + Shannon Boivin + shannon.boivin&cesicorp.com +53270 + Enterprise Data Solutions Inc. + Chuck Knapik + Chuck.Knapik&edsi.us.com +53271 + Cleveland Menu Printing Inc. + Jeff Manuszak + Jeff.Manuszak&clevelandmenu.com +53272 + PUNCH Cyber Analytics Group + Meyer Bossert + aaron&punchcyber.com +53273 + AXING AG + Benedikt Breuer + b.breuer&axing.com +53274 + SouthwestRe, Inc + Shawn Jones + itnotifications&southwestre.com +53275 + Mid-Atlantic Oncology Hematology + Tiffany Horvath + tiffanynoelhorvath&gmail.com +53276 + Intellect Design Arena + Chandra Kumar + chandra.kumar&intellectdesign.com +53277 + Voleatech GmbH + Sven Auhagen + tech&voleatech.de +53278 + TITAN Umreifungstechnik GmbH & Co. KG + Christian Ney + cney&titan-schwelm.de +53279 + LocustWorld Ltd + Richard Lander + iana-contact&locustworld.com +53280 + Sundrax Limited + Maria Bragina + office&sundrax.com +53281 + Stelco Inc + Tim Schepens + stelco.licensing&stelco.com +53282 + Vulcan Wireless + Douglas Concors + dconcors&vulcanwireless.com +53283 + Accesspoint Technologies + Mike Hyslop + mike.hyslop&theaccesspoint.co.uk +53284 + Selecture Incorporated + Frank Blateri + frankselecture&aol.com +53285 + Shenzhen Baitong Putian Technology Co.,Ltd. + Guojun Chang + postmaster&baiton-pon.com +53286 + shuangxigong + shuangxi gong + gsx1986&126.com +53287 + Administration of town Salekhard + Mikhail Zhitnov + mzhitnov&yandex.ru +53288 + SEKOIA + Sekoia Administrator + admin&sekoia.fr +53289 + Arduent Centisi LLC + Waitman Gobble + waitman&arduent.com +53290 + AKC-Schwarz GmbH + Heiko Schwarz + info&akc-erftstadt.de +53291 + Decafgeek Enterprises + Patrick Boyne + patrick.j.boyne&gmail.com +53292 + Administration of Press, Publication, Radio and Television of Guangxi Zhuang Autonomous Region + Yuanyang Chen + gxpprft&aliyun.com +53293 + Instytut Tele-i Radiotechniczny + Karol Makowiecki + karol.makowiecki&itr.org.pl +53294 + openhosting + Niels Poppe + info+iana&openhosting.nl +53295 + Dantherm Cooling, Inc. + Rick Schmidt + rick.schmidt&danthermcooling.com +53296 + Motorola Solutions PKI + Edwin Gonzales + edwin.s.gonzales&motorolasolutions.com +53297 + Keronet Cunsulting Lrd. + Marco Quattrocchi + info&keronet.com +53298 + Editech Co., Ltd. + Sang Jun Choi + sjc&editech.co.kr +53299 + Shanxi wangyun technology co. LTD + JIAO XIAO WAN + 1329946052&qq.com +53300 + Rönningås Skog AB + Pelle Rönningås + pelle&ronningas.com +53301 + Iron Gate Technology + Rob Bartram + rbartram&trustigt.com +53302 + IDRO + Seon Kim + seon&idro.co.kr +53303 + Tianjin Optical Electrical Group Co., Ltd. + Fang Sun (孙芳) + fax&toec.com +53304 + HH Skog i Ytterhogdal AB + Lasse Hedqvist + lasse.hedkvist&hotmail.com +53305 + TrioSpring LLC + Thomas T. Gao + tom&triospring.com +53306 + SalvaPruebas + Rebollo Benítez, Salvador + salvabenitez5&gmail.com +53307 + Deutsche Telekom Technik GmbH + Jonas Kopp + Jonas.Kopp&telekom.de +53308 + MULTOS Limited + Scott Etherington + setherington&multosinternational.com +53309 + Kwanko + Benoit Portrat + adminsys&kwanko.com +53310 + Nickel Institute + Phil Moore + pmoore&nickelinstitute.org +53311 + Regional Urology, LLC + Andrea Jones + ajones®ionalurology.com +53312 + albaek.NET + Thomas Albaek + thomas&albaek.net +53313 + CyberX-Labs Inc. + Ariel Saghiv + info&cyberx-labs.com +53314 + Hindmarsh Pty Ltd + Derek Harley + oidpen&hindmarshgroup.com +53315 + ITC Global + Patrick Connelly + pconnelly&itcglobal.com +53316 + TenderCare Pediatrics of Miami, LLC + Jacqueline Valdes + jvaldes&femwell.com +53317 + Worktrax Pty Ltd + Dave Kadow + dave&worktrax.com.au +53318 + Enetek Power Asia Pte Ltd + KUMAR PALANICHAMY + kumarp&enetek-power.com +53319 + Furiosa-AI + June Paik + junepaik&furiosa.ai +53320 + Wilab + Javier Roberts + javier&wilab.io +53321 + Smokescreen Technologies + Amir Moin + amoin&smokescreen.io +53322 + H.R.Z. Software Services LTD + Roni Zaharia + info&roniza.com +53323 + Deutsche Lufthansa AG + Benita Böhme + benita.boehme&lhind.dlh.de +53324 + Entrust (Europe) Limited + Andrew Jackson + andy.jackson&entrustdatacard.com +53325 + Wavestone + Wavestone Admin + admin&wavestone.com +53326 + BWI GmbH + Vincent Spanjaart + vincent.spanjaart&bwi.de +53327 + Erudicon + Neil Johnson + neil.johnson&erudicon.com +53328 + Declaro Support Limited + David Wright + info&declaro.co.uk +53329 + Amphenol Aerospace + Carl Hippenstiel + chippenstiel&henol-aao.com +53330 + Netstratum Inc + Abdul Nazeer + anazeer&netstratum.com +53331 + JMA Wireless + Development Tools Account + dev-tools&jmawireless.com +53332 + TECNAIR S.p.A. + Alessandro Rettani + alessandro.rettani&eu.panasonic.com +53333 + Xchange + Christopher Garrison + info&xchange.life +53334 + Cloudstreet Oy + Joona Myllynen + joona.myllynen&cloudstreet.co +53335 + REKOBA GmbH + Christian Schnell + cs&rekoba.de +53336 + Resource Environmental Services, LLC + Michael Sheppard + msheppard&res.us +53337 + Rectangle Solutions Group + Michael Dzikowski + its.reg&rectanglesg.com +53338 + Future Systems (AUST.) Pty Ltd + Steve Kennedy + kennedys&futuresystems.com.au +53339 + Auditron + Pascal C. Kocher + pascal.kocher&auditron.ch +53340 + Challenge Networks + Ben Larcombe + ben.larcombe&challengenetworks.com.au +53341 + Carlsson & Persson Skogstjänst AB + Henric Carlsson + Johan.soderholm&cgi.com +53342 + Deutsche Telekom IT GmbH + Jörg Olschowka + joerg.olschowka&t-systems.com +53343 + VAHLE Automation GmbH + Hubert Feurstein + office&vahle.at +53344 + NRS Healthcare + Steve Johnson + techservices&nrs-uk.co.uk +53345 + LAKA CZ s.r.o. + Otakar Horak + otakar.horak&laka.cz +53346 + Pinnacle Data Services + David Jones + postmaster&pinndata.com +53347 + ISP TI INFORMATICA LTDA ME + Lamartine N. P. Filho + lamartine&ispti.com.br +53348 + Cerberus Systems + Jan van Brügge + jan&vanbruegge.de +53349 + JCCS PC + Victor Lombardo + victor.lombardo&jccscpa.com +53350 + Wirral Community NHS Foundation Trust + Alex Kay + wcnt.infrastructure&nhs.net +53351 + Université de Technologie de Belfort-Montbéliard + Nicolas JOURDAIN + nicolas.jourdain&utbm.fr +53352 + R.Brorssons Skogsentreprenad AB + Robin Brorsson + r.brorsson&gmail.com +53353 + Phasornet Labs + H S MUKUNDA + mukunda&phasornet.com +53354 + TachTEK Internal Operations + Stephan Noetzel + 3601101&ds9.cc +53355 + OnePointe Solutions LLC + Don Carlson + dcarlson&onepointesolutions.com +53356 + West Park Healthcare Centre + Radu Iancu + radu.iancu&westpark.org +53357 + Orthopaedic Institute for Children + Bruce Keelan + bkeelan&mednet.ucla.edu +53358 + Trucking inc + Denis Carias + dacarias77&gmail.com +53359 + Axians ICT Austria GmbH + Roland Felnhofer + infra&axians.at +53360 + Team Fardigh AB + Karl-Gunnar Färdig + teamfardigh&gmail.com +53361 + Michigan Institute for Interventional Pain + Almedina Asbahi + miippain&gmail.com +53362 + TheoryEngineering + Devin P Davis + Devin&limitedsystems.net +53363 + Bilgipro + ILKIN TACAN + ilkin.tacan&gmail.com +53364 + pschatzmann.ch + Phil Schatzmann + phil.schatzmann&gmail.com +53365 + Wenzel + Thomas Wenzel + iana&leznew.com +53366 + Worteks + Clément OUDOT + clement.oudot&worteks.com +53367 + bitmain technology(beijing) company + David.Chan + wei.cheng&bitmain.com +53368 + Midea Group + Ting Chen + ting1.chen&midea.com +53369 + Ottawa County + Harold Harper + hharper&miottawa.org +53370 + SimpliSafe Inc + Paul Rodine + network&simplisafe.com +53371 + Gladbooks Ltd + Brett Sheffield + brett&gladbooks.com +53372 + Sandvik AB + IAM + iam&sandvik.com +53373 + Leuxner.net + Thomas Leuxner + tlx&leuxner.net +53374 + PT. Poly Jaya Medikal + Mohamad Ikhsan Sofyan + ikhsan.sofyan&outlook.com +53375 + Frank's International + Hoyt Self + hoyt.self&franksintl.com +53376 + Lightron Inc. + Youngjin Na + nayj&lightron.co.kr +53377 + Imagination and illusions + Tasha Davison + Sassynstrong100&gmail.com +53378 + Jolla Oy + David Greaves + david.greaves&jolla.com +53379 + nautilus capital + Philip Chan + it&nautiluscapital.net +53380 + Centrale de Compensation - Swiss Central Compensation Office + Philippe Kappel + philippe.kappel&zas.admin.ch +53381 + Screaming Eagle llc (formerly 'CadillacJoyride enterprises') + Cliff Everett McKnight + cliffmcknight1&gmail.com +53382 + ООО Программный Продукт (LLC Software Product) + Качула Евгений (Kachula Evgeny) + ekachula&ppr.ru +53383 + BT Lancashire Services + Mark Greenwood + mark.greenwood&btlancashire.co.uk +53384 + Inter-American Development Bank + Rubén Cessa Flores + rcessa&iadb.org +53385 + Trinomica GmbH + Oliver Sandmann + iana&trinomica.de +53386 + da-weber + Markus Weber + markus&da-weber.de +53387 + New Sun Road + Michael Goldbach + mgoldbach&newsunroad.com +53388 + S&S Sprinkler Company LLC + Derek DuBois + derek.dubois&sssprinkler.com +53389 + BirTech Technology + Eren Birekul + eren.birekul&birtech.com +53390 + Volterra, Inc. + Jon Nistor + nistor&ves.io +53391 + Produsentregisteret sa + Ole karsten kirste + Okk&produsentregisteret.no +53392 + CMS Computers Ltd. + Vishnu Vishwanath + vishnu_v&cms.co.in +53393 + iotera + Deden AF + deden&iotera.io +53394 + Piimega Oy + Hannes Lievonen + hannes.lievonen&piimega.fi +53395 + NorthCountry Federal Credit Union + Ross Zipper + it&northcountry.org +53396 + Liwtuvos sveikatos mokslų universiteto ligoninė Kauno klinikos + Evaldas Bačiulis + licencijos&kaunoklinikos.lt +53397 + Filbico Sp. z o.o. + Andrzej CZAJKOWSKI + aczajkowski&filbico.com +53398 + Intrinsic Systems Limited + Darren Thoirs + dmthoirs&intrinsic-systems.co.uk +53399 + Sonstorps Åkeri AB + Rickard Karlsson + rickard.karlsson&sonstorpsakeri.se +53400 + Cinemeccanica s.p.a. + Lorenzo Branca + lorenzo.branca&cinemeccanica.it +53401 + Computastar Limited + Jason Hunter + jason.hunter&computastar.com +53402 + Bio-Logic SAS + Thierry Kauffmann + bladmin&bio-logic.net +53403 + Belcan, LLC + Mark Mahoney + mmahoney&belcan.com +53404 + The Orchard Enterprises NY, Inc. + Artur Onefater + artur&theorchard.com +53405 + QOS Networks + Jon Moss + jmoss&qosnet.com +53406 + AB Habitat + Davy Le Meur + dlemeur&ab-habitat.fr +53407 + n5201 + Alex Con + alexcon314&gmail.com +53408 + Thermomentum Ltd + Andy Simpkins + andy&thermomentum.com +53409 + VITRIA TECHNOLOGY INC + Hale Metzger + hmetzger&vitria.com +53410 + Technipad + Patrick Savary + patrick.savary&bluewin.ch +53411 + Kabam Games, Inc. + Peter Storkey + pstorkey&kabaminc.com +53412 + ConvergeOne Holdings Inc. + David Schlenk + dschlenk&convergeone.com +53413 + Musikverein Angelbachtal e.V. + Jan Otto + j.otto&mv-angelbachtal.de +53414 + Stockton Hematology Oncology Medical Group + Bob Anderson + randerson&shomg.net +53415 + A-Kyrey Systems, Inc + Alfredo Barrios Villanueva + alfbarrios2010&gmail.com +53416 + certree.com + Michael Li + oid&certree.com +53417 + Womens Choice Oncology,PLLC + Van Williams + van.williams&womenschoiceoncology.com +53418 + Urology Partners of North Texas, PLLC + Lisa Baker + lbaker&ushventures.com +53419 + Harmony + Eugene Kim + ek&harmony.one +53420 + Global Soluciones Inteligentes S.A. + Alfredo Barrios + alfredo.barrios&globalsi.com.py +53421 + Group-IB LTD. + Alexander Ershov + ershov&group-ib.com +53422 + Deutscher Fachverlag GmbH + Roger Riege + roger.riege&dfv.de +53423 + Selcuk Universitesi + Selcuk postmaster + postmaster&selcuk.edu.tr +53424 + Dijssel B.V. + Lukas van den Dijssel + lukas&dijssel.nl +53425 + Business France + ALEGRE Philippe + philippe.alegre&businessfrance.fr +53426 + Vacus Tech Pvt Ltd + Venugopal kapre + venugopal.k&vacustech.com +53427 + GB Logging AB + Gustaf Bergman + huggarn82&hotmail.com +53428 + Invigo Offshore SAL + Walid Badaoui + walid.badaoui&invigo.com +53429 + Digital 14 Oy + Teemu Savolainen + teemu.savolainen&katim.com +53430 + AUTAJON Group + Sylvain Mollier + it.etude.infra&autajon.com +53431 + Lookman Electroplast Industries Limited + Ammar Bharmal + itsupport&lookman.in +53432 + Fondation des Apprentis d'Auteuil + Christophe Laverdure + production.informatique&apprentis-auteuil.org +53433 + J Amréns Skogsmaskiner AB + Jonas Amrén + jonas&amrens.se +53434 + SKY WALKERS Czech Republic s.r.o. + Peter Slavik + peter.slavik&o-id.info +53435 + InProTec Industrial Process Technologies S.r.l + Kostiantyn Ananievskyi + k.ananievskyi&inprotec.it +53436 + Offsite, LLC + NOC + nocsupport&off-site.com +53437 + Justus + Kristian Justus + kristian&justus.biz +53438 + Färsåns Skogsentreprenader AB + Stefan Ajax + farsanskog&telia.com +53439 + Beijing Finelinx technology co.,Ltd + Guo Baozhong + gbaozhong&sina.com +53440 + Coromatic Nord AB + Bo Danielsson + bo.danielsson&coromatic.se +53441 + TECHNOPROG + Pavel Lepikhov + lepikhov&tehnoprog.ru +53442 + Thomas Westén + Thomas Westen + thomas.westen&hotmail.com +53443 + ELEPHANTVERT FRANCE SAS + Eric Couasnet + contact-dsi&elephant-vert.com +53444 + ROTOFRANCE IMPRESSION + Xavier LAMBROS + xavier.lambros&rotofrance.fr +53445 + Cyborgi + Web Administrator + web-admin&cyborgi.com +53446 + Firebird Foundation Incorporated + Helen Borrie + helebor&iinet.net.au +53447 + Numis Securities Ltd + Mathew Abraham + infrasup&numis.com +53448 + Ernst Dello GmbH & Co. KG + Matthias Timm + Matthias.Timm&akquinet.de +53449 + Optieng + Gustavo Azenha + gustavo.azenha&optieng.com +53450 + Laurelin Open Source + Alex Shafer + ashafer&pm.me +53451 + Enable Networks Limited + Mark Lyons + mark.lyons&concepts.co.nz +53452 + Verizon Connect + Alex Miller + alex.miller&verizonconnect.com +53453 + Iglesia De Dios Puente De Salvacion, Inc. + Daniel Soto + daniel.soto&iddpds.org +53454 + DataPlus + Sergey Shapotkin + support&shs-systems.ru +53455 + Susfugo + NetOps + netops+iana&susfugo.com +53456 + Dentons Australia Pty Ltd + Michael Micallef + michael.micallef&dentons.com +53457 + CleverLogic Co.,Ltd. + Jae Kyung KIM + jkiset&cleverlogic.co.kr +53458 + Maldun Security + Banyan He + bhe&maldun.com +53459 + CHALLENGE NETWORKS PTY LTD + Ben Larcombe + support&challengenetworks.com.au +53460 + Oasis Smart SIM Europe SAS + Patrick Cao + patrick.cao&oasis-smartsim.com +53461 + SimiGon + Hagai Piechowicz + picho&simigon.com +53462 + Staerk Industries + Ulrich Staerk + uli&ustaerk.de +53463 + Internetworking Solutions Ltd + James Mugauri + james&inwsolutions.net +53464 + Puzzle ITC Deutschland GmbH + Mark Pröhl + proehl&puzzle-itc.de +53465 + inPoste.it S.p.A. + Massimo Colella + massimo.colella&tnotice.com +53466 + Buddha Global Conferencing & Registry Services. + ABHAY AANAND (ABHAY KUMAR SINGH) + biharabhay&gmail.com +53467 + JIP InfoBridge Co.,Ltd. + Naoki Henna + nao&info-brdg.co.jp +53468 + Bendigo Senior Secondary College + Trevor Lock + sysadmin&bssc.edu.au +53469 + Soundwave Networking + William Ono + swadmin&soundwave.net +53470 + Shenzhen hongfusheng Technology Co., Ltd.(HFS) + Jingkan Lu + lujingkan&163.com +53471 + Communication Components Inc + Allen Cohen + acohen&cciproducts.com +53472 + ALPLA Werke Alwin Lehner GmbH & Co KG + Thomas Lambertz + thomas.lambertz&alpla.com +53473 + Erwin Halder KG + Stefan Hirschbeck + pki&halder.de +53474 + AKTOR - Information system + Sergey Vykhodtsev + svykhodtsev&aktor-is.ru +53475 + North East Derbyshire District Council + L Thompson + technology&ne-derbyshire.gov.uk +53476 + Criminal Cases Review Commission + Helpdesk + helpdesk&ccrc.gov.uk +53477 + LST Forest AB + Martin Ålander + martin&lstforest.se +53478 + Everest Networks + Tilak Ravi + travi&everestnetworks.com +53479 + Tjugoett och Trettio AB + Johan Persson + johan&2130.se +53480 + Shanghai Horizon Information Tech. Co., Ltd. + Mayson Huang + mayson.huang&shanghai-horizon.com +53481 + Valmet + Julius Elfving + julius.elfving&valmetpartners.com +53482 + Economia a.s. + Petr Králík + petr.kralik&economia.cz +53483 + FOR-A ELETEX Co., Ltd. + Takashi Ikeda + service_snmp&for-a.co.jp +53484 + KeySign NV + Jan Smets + jan.smets&keysign.eu +53485 + chinadaas.inc + Xu hui + xuhui&chinadaas.com +53486 + LLC INTECH + Andrey Kravchenko + kravchenko&intech-llc.ru +53487 + Linkin Technology Co., Ltd. + ZHANG, LIGANG + zhangligang&linkintec.cn +53488 + Maeeko Cat Housing Ltd. + Naoki Kobayashi + amphineko&futa.moe +53489 + Institute Of Physics and Power Engineering + Vadim Beletskiy + vbeletskiy&ippe.ru +53490 + INVENTURE Automotive Electronics Research & Development, Inc + Katalin Szűcs + katalin.szucs&inventure.hu +53491 + NPC SYSTEM + Guy FERRARO + contact&npcsystem.com +53492 + CapitalData + Didier Parisot + contact&capitaldata.fr +53493 + Sapcorda Services USA, Inc + Danilo Scardua + d.scardua&sapcorda.com +53494 + Apex Clean Energy, Inc. + Daniel Neal + daniel.neal&apexcleanenergy.com +53495 + SIMS + David Sims + domains&sims1.com +53496 + OPTIMUS TELECOM + Mikis de Bonneval + commerce&optimustelecom.fr +53497 + KosmosKosmos + Andreas Kosmowicz + techlog&kosmoskosmos.de +53498 + QIC Global Services Limited + Enrico Gassmann + egassmann&qicglobal.com +53499 + Austrian Airlines AG + Andreas Husch + andreas.husch&austrian.com +53500 + IVES-SYSTEM Sp. z o. o. + Jacek Kryszyn + jacek.kryszyn&ives-system.com +53501 + DC IT-Consulting + Dominik Chilla + info&dc-it-con.de +53502 + RealexPayments + Jason Fitzpatrick + jason.fitzpatrick&globalpay.com +53503 + Unassigned + Returned 2020-05-28 + ---none--- +53504 + Stephens College + Michael Reustle + infrastructure&stephens.edu +53505 + TELERYS COMMUNICATION + Olivier Gaudet + noc&telerys.com +53506 + Kaman Inc. + Reza Roohi + reza&kaman.ir +53507 + Fiserv Education Loans + Fiserv Global Cyber Security Services Education Loans + pki.services&fiserv.com +53508 + Rhein-Neckar-Zeitung GmbH + Stefan Hauck + edv-abteilung&rnz.de +53509 + Renaissance Lab + Todd Povilaitis + todd.povilaitis&outlook.com +53510 + Zharfpouyan Toos + Mohsen Vahabzadeh + info&zharfpouyan.net +53511 + ubsafa + Aaron Lewandowski + doppelar0n&ubsafa.de +53512 + Bright Access + Janarthanan Sundaram + j.sundaram&brightaccess.nl +53513 + andosto GmbH & Co. KG + Mark Völkl + info&andosto.com +53514 + DPNIC + Vladimir Kiyan + hostmaster&dp.ua +53515 + Medical Systems a.s. + Ing. David Pech + david.pech&msy.agel.cz +53516 + DVZ Schwerin GMBH + Jochen Laser + j.laser&dvz-mv.de +53517 + edwinit + Edwin van de Burgt + admin&edwinit.com +53518 + COASTAL CAROLINA FOOT & ANKLE ASSOCIATES, + KATHY N. WALKER + kathy.walker&instridefoot.com +53519 + w4v3 + pascal vergnaud + pascal&w4v3.com +53520 + APW Business Services + Adam Werner + adam.werner&cyberfxcorp.com +53521 + Metis Aerospace Ltd + Lee Carter + lee.carter&metisaerospace.com +53522 + EnableBit + Leonardo Torrella + leonardo&enablebit.com +53523 + Hangzhou Infogo Technology Co.,Ltd + Jiang Feng + jiangfeng&infogo.com.cn +53524 + EmbedWay Technologies (Shanghai) Corporation + HuangQi + marketing&embedway.com +53525 + Centrum Onkologii im. prof. F. Łukaszczyka w Bydgoszczy + Piotr Korowiecki + co&co.bydgoszcz.pl +53526 + Dell ATC + Martijn Groothuis + nl.ams.atc&dell.com +53527 + Brideweir Systems + Adrian Waters + ajpw&iinet.net.au +53528 + Viridian Group + Craig Monahan + Craig.Monahan&viridiangroup.co.uk +53529 + Town of Los Gatos + Chris Gjerde + cgjerde&losgatosca.gov +53530 + Tracel + Eder Luiz de Mello + eder&tracel.com.br +53531 + Glue Software Engineering AG + Sven Leupold + leupold&glue.ch +53532 + Kiratech Spa + Alessandro Menti + sysadmin&kiratech.it +53533 + Tenzor Tech Kft. + Daniel Vaszari + daniel.vaszari&tenzortech.hu +53534 + mediacomm + 王绍贤 (Wang Shaoxian) + wangsx&MediaComm.com.cn +53535 + nanjing zoogho electronic technology co. LTD + chaoyang + yangchao&zoogho.com +53536 + Information Assurance Specialists, Inc. + Derek Yerger + derek.yerger&iaspecialists.com +53537 + PQ PLUS GmbH + Her Jens Schübel + j.schuebel&pq-plus.de +53538 + Kreon Technology + Nicol van der Merwe + nicol&kreon.co.za +53539 + Mycronic AB + Per Treutiger + ianaoid&mycronic.com +53540 + Swimlane LLC + Dan Story + dan.story&swimlane.com +53541 + IPFX + John Benson + internet_domains&ipfx.com +53542 + Myntex Inc. + Geoffrey Green + ggreen&myntex.ca +53543 + Knorr Bremse Services GmbH + Stefan Hafner + stefan.hafner&knorr-bremse.com +53544 + XTRONIC GmbH + IT Support + it-support&xtronic.de +53545 + Rheinmetall AG + Manfred Schmitten + manfred.schmitten&rheinmetall.com +53546 + Nexapp Technologies Pvt Ltd + Pradeep Singh Tomar + pradeep&nexapp.co.in +53547 + City of Saratoga + Leo Salindong + infotech&saratoga.ca.us +53548 + Chilldyne, Inc. + Trevor Irwin + trevor.irwin&chilldyne.com +53549 + 北京网藤科技有限公司 (Beijing Net Teng Technology Co., Ltd.) + 靳涛 (Tao Tao) + jintao&wangtengtech.com +53550 + Fire Financial Services Limited + Owen O Byrne + owen.obyrne&fire.com +53551 + Pensar Development + Shellee Riverman + shellee.riverman&pensardevelopment.com +53552 + CAPREIT Limited Partnership + Mark Livingstone + m.livingstone&capreit.net +53553 + Arch Health Medical Group + Gerald Roca + gerald.roca&archhealth.org +53554 + Andynformatics + Philipp Schneider + p.schneider&andynformatics.de +53555 + Asthma and Lung Clinic + Krystle Bonnett + khanppoc&gmail.com +53556 + RIWELA LTD + William Martin + admin&riwela.com +53557 + Networking Services + Dudley Zelaya + dudley.zelaya&teletechnonet.com +53558 + ordinator + Alexandre SAINT-CRICQ + orionteam.web&gmail.com +53559 + GHIFARI160 + Ghifari Aditya + ghifari&ghifari160.com +53560 + Trustgrid + Marquis Calmes + marquis&trustgrid.io +53561 + Toyota Motor Corporation Australia (CVS) + Freddie Lewis + freddie.lewis&toyota.com.au +53562 + Nanjing WeiXiang Technology Limited Company + Geminal Tian + geminal&163.com +53563 + mit_waap + Wilma Pavitra Puthran + wilma.puthran&gmail.com +53564 + EHG Service GmbH + Fabian Bäumer + fabian.baeumer&ernstings-family.com +53565 + Knorr-Bremse Services GmbH + Uwe Podlech + uwe.podlech&knorr-bremse.com +53566 + paul cumberworth + paul cumberworth + lidgood&aol.com +53567 + SYSOCO + Gabriel VIVAS, R&D Sysoco + dev&sysoco.fr +53568 + MIT Manipal + Aashish Pai + aashishlovsachin99&gmail.com +53569 + COFEL + kevin jezak + jezak.kevin&cofel.biz +53570 + Beijing 6cloud Linsec Network Technology Co.,Ltd + mingjian Shi + shimingjian&6cloudtech.com +53571 + ZYL + Yuriy Zhelnov + info&zy1.ru +53572 + RF Designs + Bob Frady + bob&wallcloud.us +53573 + Alois Vitasek + Alois Vitasek + avitasek&seznam.cz +53574 + Complex Service, LLC + Sergey Yakimenko + info&cs-engr.ru +53575 + Transmit.Live + Mike Placentra + mike&transmit.live +53576 + Getslash GmbH + Tobias Hanisch + tobias.hanisch&getslash.de +53577 + ARCTARUS LIMITED + Joseph Marsden + josephmarsden&arctarus.co.uk +53578 + Genesee Hematology Oncology + Mary Spinney + MSpinney&ghopc.com +53579 + WaveFlex, Inc. + Shawn Flannery + sflannery&wave-flex.com +53580 + Within Reach Holding B.V. + Arjan van der Oest + arjan.vanderoest&enreach.com +53581 + BEYOND ORBIT LTD + Chin Yun Wang + contact&beyond-orbit.com +53582 + DEAN + Nick Fletcher + fletchern675&learn.mod.uk +53583 + Cigna + Ryan Zuroski + ryan.zuroski&cigna.com +53584 + DC Energy Management LP + Ware Adams + dceadmin&dc-energy.com +53585 + BB&T Securities - Pershing + Kevin Akselrod + kakselrod&bbtsecurities.com +53586 + Specto Paineis Eletronicos Ltda + Moacyr Franco Neto + moacyr.franco&specto.com.br +53587 + FIRESI s.r.o. + František Klika + firesi&firesi.cz +53588 + MIT_ACNT_MIB + Cleevan Alban Cardoza + cleevancardoza&gmail.com +53589 + LLEIDA S A S + Eva Pane + oid-info&lleida.net +53590 + Eastone Century Technology Co,.Ltd. + weiguo.lai + laiweiguo&etonetech.com +53591 + TERAPRO + Sergii Pylypchuk + s.pylypchuk&terapro.com.ua +53592 + K M B systems, s.r.o. + Jan Kraus + j.kraus&kmb.cz +53593 + National Highways + Paul Canning + Paul.Canning&nationalhighways.co.uk +53594 + Protocol Labs + Raul Kripalani + raul&protocol.ai +53595 + Lookback Inc + Forrest Alvarez + forrest&lookback.io +53596 + Cumulus Cloud Software and Consulting + Rainer Bieniek + rainer.bieniek&googlemail.com +53597 + StepStone Group + Christopher Bernadino + cbernadino&stepstoneglobal.com +53598 + Avicenna.AI + Cyril Di Grandi + Cyril.di-grandi&avicenna.ai +53599 + Canfield Scientific, Inc. + Robert A. Villar + robert.villar&canfieldsci.com +53600 + KLIMEX Medical Ltd. + Miklós Lászlóffy + laszloffy.miklos&klimexmedical.hu +53601 + One Fylde + Paula Nash + ceo&fyldecommunitylink.org.uk +53602 + The Blue School + Ian Hotston + blueictsupport&educ.somerset.gov.uk +53603 + Immersion4 SA + Patrick Sinz + patrick&immersion4.com +53604 + Fieldfisher + IT Technical Services + ittechnicalservices&fieldfisher.com +53605 + Onur Mühendislik + Oğuzhan Kayhan + lisans&onur.net +53606 + Intralinks, inc + BINU THOMAS + bthomas&intralinks.com +53607 + Enertech Global + Travis Hill + travis.hill&enertechgeo.com +53608 + OR.NET e.V. + Stefan Schlichting + stefan.schlichting&ornet.org +53609 + KoCo Connector GmbH + Joerg Dommel + joerg.dommel&kococonnector.com +53610 + B4ComTechnologies LLC + Leonid Kolpachyov + lkolpachev&b4comtech.com +53611 + geOrchestra + François Van Der Biest + psc&georchestra.org +53612 + Infinicomm Solutions + Biju Nair + biju.nair&infinicomm.io +53613 + HAX Enterprises Ltd. + Andrew Green + andrew&haxent.ca +53614 + Frenzl IT-Services + Wolfgang Wicke + wwicke&gmail.com +53615 + TiTiT + Martin Bolbroe + post&titit.dk +53616 + pegase-cloud + Bernard BARRERE + bernard.barrere&pegase-cloud.com +53617 + Gravipet + Guillaume Sivade + guillaume&gravipet.com +53618 + Dalholm.net + Anders Jarl Dalholm + anders&dalholm.net +53619 + Altomax LLC + Evgueni Tzvetanov + support&altomaxtech.com +53620 + Sensys Gatso Australia + Barney Flint + b.flint&sensysgatso.com +53621 + Manipal Institute of Technology + Radhika Thagadur Srinivasamurthy + radhikats1996&gmail.com +53622 + DebOps + Maciej Delmanowski + iana&debops.org +53623 + MIRAIT Corporation + Masao Ishibe + ishibe.masao&mirait.co.jp +53624 + SMS Informacionnye tekhnologii Ltd + Sergey Parfenov + sergey.parfenov&sms-a.ru +53625 + Barrett Steel + michael ratcliffe + michael.ratcliffe&barrettsteel.com +53626 + Penzeys Spices + Daniel Gapinski + dan.gapinski&penzeys.com +53627 + Rinicom Limited + Garik Markarian + office&rinicom.com +53628 + California Dental Association + Gary Pilkington + prodreg&cda.org +53629 + Beijing Zhicheng Co-Creation Information Technology Co., Ltd. + Wang Zhi Quan + wzq&xjgeek.com +53630 + SunSpec Alliance + Thomas Tansy + tom&sunspec.org +53631 + Vertrics microSistemas S.R.L. + Nicolás Giussani + ventas&vertrics.com +53632 + De Data + Michael Burgess + mburgess&dedata.com.au +53633 + voipGATE S.A. + Marc Storck + mstorck&voipgate.com +53634 + Adyen N.V. + Bas van Ritbergen + bas.vanritbergen&adyen.com +53635 + Matricis Informatique + Jamie Le Tual + jamie.letual&matricis.com +53636 + Jonas Wahlberg + Jonas Wahlberg + jonaswahlberg1&hotmail.com +53637 + CL Networks Co., LTD. + Jake Chang + jake&clnetworks.co.kr +53638 + Stern-Koblikowski Foundation + Ze'ev Ben-Aron + israeliwolf1&gmail.com +53639 + Fonestar Sistemas S.A. + Daniel Cobo + idi&fonestar.es +53640 + Big Data Technology + Aleksey Kluchnikov + akluchnikov&carriers-capital.com +53641 + AJ Bell Ltd + Kevin Moore + kevin.moore&ajbell.co.uk +53642 + Sensys Gatso Group AB + Timo Gatsonides + t.gatsonides&sensysgatso.com +53643 + Sayenko Kharenko + Oleksandr Kartamyshev + okartamyshev&sk.ua +53644 + Tieto Czech s.r.o. + Martin Carbol + martin.carbol&tieto.com +53645 + galis.org + George Georgalis + george&galis.org +53646 + AZG Tech GmbH + Sascha Schmitz + sascha.schmitz&azg-tech.com +53647 + RAIDIX + Rufat Ibragimov + support&raidix.com +53648 + Userman + Aleksey Krylov + akrylov33&gmail.com +53649 + Fuzhou Teraway Information Technology Co.,Ltd + admin@teraway + admin&teraway.tech +53650 + Sekurbit Sverige AB + Marcus Westin + marcus&sekurbit.se +53651 + Backhaus Consulting GmbH + Stephan Backhaus + s.backhaus&backhaus-consulting.com +53652 + Staatsbosbeheer + Michel van Spijk + m.vanspijk&staatsbosbeheer.nl +53653 + DLM ELECTRONIQUE + COQUILLE Benoit + snmp&dlm-electronique.fr +53654 + D9 Technologies LLC + Sarvjit Pabla + spabla&d9now.com +53655 + Propulsor Technology, Inc. + Demian Nave + register&propulsor.com +53656 + Pacific Radiology + IT Operations + it.operations&pacificradiology.com +53657 + CETC34 + Huang Haoxiang + huanghaoxiang&outlook.com +53658 + St. Raphael Cariats Alten und Behindertenhilfe GmbH + Thorsten Bär + edv&srcab.de +53659 + Safe Sky Industries, Inc. + Jakob Borg + jbo&safe-sky.net +53660 + EduCoach Consultants Inc + Baljeet Singh Bhatti + baljeet.singh&educoachconsultants.com +53661 + Here's work + Andrew The + andrew&burntash.com +53662 + KUZBASSHIMBANK + Gennadiy Gerasimov + grenadir2007&yandex.ru +53663 + Serous + D Gifford + davegifford&protonmail.com +53664 + BLP Mobile Paint + Kyle Stewart + kss&mobilepaint.com +53665 + BD Diesel Performance + Dan Reimer + admin&bddiesel.com +53666 + Certograph Ltd + Jacek Artymiak + jacek.artymiak&certograph.com +53667 + Biamino & Figli SPA + Dario Biamino + dario.biamino&biamino.com +53668 + Synergetic Management Systems + Ian MacRae + imacrae&synergetic.net.au +53669 + Agena Bioscience + Maggie Lowe + maggie.lowe&agenabio.com +53670 + Silumin-Vostok, LLC + Kirill Dmitrievich Lukin + it&silumin.kz +53671 + Forensic Risk Alliance Limited + Alex Pheysey + APheysey&forensicrisk.com +53672 + Alturing + Cabuzel Guillaume + guillaume.cabuzel&chronopost.fr +53673 + Coreit + Marjan Erceg + marjan.erceg&coreit.me +53674 + FGUE STC Atlas + Sergey Matveev + stargrave&stargrave.org +53675 + VESvault Corp + Jim Zubov + jz&vesvault.com +53676 + Eckelmann AG + Philipp Eckelmann + postmaster&eckelmann.de +53677 + PICA Group Pty Ltd + Peter Dodemont + iana.pen&picagroup.com.au +53678 + Brush New Zealand Limited + Stephen Irons + stephen&brush.co.nz +53679 + Bank of China Sydney Branch + Michael Yeung + itdept_sydney&mail.notes.bank-of-china.com +53680 + fivetek + Alfred Kim + alfred.kim&fivetek.com +53681 + European Institute for Gender Equality + Ramunas Lunskus + adminmail&eige.europa.eu +53682 + Ofcom + Jonathan Riches + registrar&ofcom.org.uk +53683 + Cisco Sytems, Inc. + Joshua Dotson + josdotso&cisco.com +53684 + Circle Cardiovascular Imaging Inc. + Ben Park + ben.park&circlecvi.com +53685 + neurodine.com + Blazej Kaczmarek + fajny.masz.tornister&gmail.com +53686 + Saankhya Labs Pvt Ltd + Makarand G Kulkarni + makarand.kulkarni&saankhyalabs.com +53687 + 6YS Pty Ltd + Alun Carp + alun.carp&6ys.com.au +53688 + Toyota Tsusho Corporation + Naoki Kimura + 1100-ttc_oid&dist.toyota-tsusho.com +53689 + TAKEBISHI CORPORATION + Masatoshi Ike + Masatoshi.Ike&takebishi.co.jp +53690 + LanRoad LLC + Andrew Makarov + a.makarov&lanroad.ru +53691 + Citizens & Farmers Bank + Jonathan Olson + jolson&cffc.com +53692 + Mount Pleasant Waterworks + Chase Hassold + chassold&mpwonline.com +53693 + Miami Pulmonary Specialists + Linda Maggi + miamipulmonaryspecialist&gmail.com +53694 + CapSpecialty + Tim Moermond + tmoermond&capspecialty.com +53695 + AMC + Nathan Rees + n.rees&amcllc.net +53696 + BITECH SPA + GENNARO D'ANGELO + gennaro.dangelo&bitech.it +53697 + Liceo Classico Statale "G.B. Morgagni" + Giano Donato + donato.giano&morgagni.cloud +53698 + Gray Day Cafe + Paul Wojcicki-Jarocki + paul&graydaycafe.com +53699 + mecom Medien-Communikations-Gesellschaft mbH + André Sowinski + sowinski&mecom.de +53700 + PrimeWest Health + Troy Ronning + troy.ronning&primewest.org +53701 + Microdyne Systems + Marco Zoggia + mzoggiaµdynesys.com +53702 + madeIT.hu Ltd. + János Lajos + janos.lajos&madeit.hu +53703 + Gotthardt Healthgroup AG + Sven Peter + sven.peter&gotthardt.com +53704 + Avarn Security AS + IT Department Nokas + adsecurity&nokas.com +53705 + Jatom Systems Inc. + Kolin Baetz + kbaetz&jsitelecom.com +53706 + North Plains Electric Cooperative, Inc. + Wayne Brockwell + wayne&brockwelltech.com +53707 + Arbalo AG + Bruno Kaiser + admin&arbalo.ch +53708 + Kateza Realty + Massimiliano Pala + cto&katezarealty.com +53709 + Conectate Soluciones y Aplicaciones SL + Fernando Latorre + gestion&conectate-soluciones.com +53710 + Tjärnberg Service AB + Hans Tjärnberg + hans.tjernberg&hotmail.com +53711 + huikongchuanghengData System Co., Ltd + chujia.zhao + hkchsjxtyxgs&163.com +53712 + Clarios + Jesse Bociek + jwatts&concurrency.com +53713 + MCUplay Technology + huang quan + 121014889&qq.com +53714 + ZMS Insurance Company + Shilnikov Andrey Borisovich + Andrey.Shilnikov&zms.chita.ru +53715 + Podravka d.d. + Krunoslav Vecenaj + krunoslav.vecenaj&podravka.hr +53716 + Dexy Co d.o.o. + Ljuban Milosevic + ljuban.milosevic&dexy.co.rs +53717 + NCS Pte. Ltd. + Koh Vui Chueh + vuichueh&ncs.com.sg +53718 + GETEC net GmbH + Ulf Weber + aw&getec-net.de +53719 + Sopra Steria AG Schweiz + Manuel Duffner + manuel.duffner&soprasteria.com +53720 + Q2 Holdings, Inc + Robert Shelton + rob.shelton&q2ebanking.com +53721 + Wyoming Judicial Branch + Keven McGill + kmcgill&courts.state.wy.us +53722 + SpecPromDizajn LLC + Frolov Dmitry + info&spd.net.ru +53723 + Crooked + Spencer Butler + spencerunderground&gmail.com +53724 + Council of Dubinia + Joel Dubiner + joel&dubiner.com +53725 + Montajes Y Proyectos Electrotécnicos, S.L. + Luis Alvarez + lalvarez&mype.com +53726 + CGI INFORMATION SYSTEMS AND MANAGEMENT CONSULTANTS ESPAÑA, S.A. + David Graciani Izquierdo + david.graciani&cgi.com +53727 + OXYLIOM + Lamine Diouf + lamine.diouf&oxyliom.com +53728 + NPC, Inc. + Danielle Gerko + danielle.gerko&npcweb.com +53729 + celos Computer GmbH + Stephan Hardtmann + stephan.hardtmann&celos.de +53730 + GRUPOVIVA S.A. DE C.V. + Yury Dubon + ydubon&grupovesta.net +53731 + evanto media AG + Sven von Kaenel + svk&evanto.de +53732 + Stelkom d.o.o. + Bostjan Stros + bostjan.stros&stelkom.si +53733 + Exterion Media + Arthur Gibson + arthur.gibson&exterionmedia.co.uk +53734 + Forerunner (Electronic) Ltd. + Alun Hawkins + alun&forelec.co.uk +53735 + Stella Technology + Sean Smith + ssmith&stellatechnology.com +53736 + PRIVATE ENTERPRICE NETWORK + Keth + kennethsorianomalonga&outlook.com +53737 + KellemannConsult ApS + Jacob Kellemann + jacob&kellemann.dk +53738 + Beijing Tasson Technology Ltd. + Xian Changming + xianchangming&tasson.cn +53739 + IDRAK TECHNOLOGY TRANSFER + Rufat Aliyev + idrak.licenses&idrak.az +53740 + Magnit Pharma + Marat F Sunagatullin + admins&siamed.ru +53741 + Banque de Commerce et de Placements SA + Vojin COLAK-ANTIC + netsec&bcp-bank.com +53742 + Deutsche Gesellschaft für Internationale Zusammenarbeit (GIZ) GmbH + Sven Koch + oid&giz.de +53743 + Domino's Pizza, Inc + Chad La Joie + chad.lajoie&dominos.com +53744 + Wejds + David Scott + contact&wejds.com +53745 + Ideal Concept Holdings + Ryan Schreiber + ryan.schreiber&idealconcept.ae +53746 + DBMUD + Marc Michels + mm&david-bock.de +53747 + Korbix + Eric Dey + iana-pen-registry&korbix.net +53748 + TOC PERU SAC + Alexander De Feudis Kockova + ad&toc.pe +53749 + Dynamics Edge + Arden John Barreras + arden&dynamicsedge.com +53750 + SiRcom + Jamie Warner + jamie.warner&sircomusa.com +53751 + esciris GmbH + Fabian Steiger + fabian.steiger&esciris.de +53752 + Roadstarter Networks + Rouell Adriano + roadstarter&gmail.com +53753 + ITS-chita + Hiroyuki Oomiya + oomiya&its-chita.com +53754 + Groundhog Technologies Inc. + Alan Hwang + alan.hwang&ghtinc.com +53755 + NOVUS UKRAINE LLC + Ighor Zamickin + infobezpeka&novus.ua +53756 + National Széchényi Library + Mikael Bak + bak.mikael&oszk.hu +53757 + 768BIT Ltd + Craig Smith + craig.smith&768bit.com +53758 + Tribeca Heavy Industries LLC + Michael Smith + nic&tribeca.com +53759 + Inscyth, Inc. + John Martin + jmartin&inscyth.com +53760 + Erich Utsch AG + Christian Nölle + christian.noelle&utsch.com +53761 + Arvato Rus LLC + Dmitry Turkin + dmitry.turkin&arvato.ru +53762 + Rema Tip Top Holdings UK Ltd + Alan Smith + it&tip-top.co.uk +53763 + Vorwerk International Strecker & Co. + Michael Arends + michael.arends&vorwerk.de +53764 + Semple Consulting Services Ltd + Jonathan Semple + iana&sempleserve.com +53765 + Digicomp Engenharia e Tecnologia Ltda + Thiago Torres Alves + thiago.torres&digicomp.com.br +53766 + Robert Boucneau, Individual + Robert Boucneau + bobboucneau&gmail.com +53767 + NEC Energy Solutions + Matthew Bush + mbush&neces.com +53768 + MA Lighting Technology GmbH + Christian Tarne + christian.tarne&malighting.de +53769 + Cancer Care Associates PC + Annalynn Hall + lhall&ccapc.com +53770 + Belderbos ICT Consultancy & Beheer + Jeroen Belderbos + jeroen&belderbosict.nl +53771 + Centro De Hematologia Y Oncologia Medica Integral + Ruth Calcano + rnco76&gmail.com +53772 + Agence Technique de la Charente + Yann Bogdanovic + ybogdanovic&atd16.fr +53773 + Sonion A/S + Said Jawad + sja&sonion.com +53774 + WV Communications Inc + Uri Yulzari + wv-purchasing&wv-comm.com +53775 + 1oT OÜ + Märt Kroodo + mart.kroodo&1ot.com +53776 + JINR + Ilya Slepnev + islepnev&jinr.ru +53777 + sodeac.org + Sebastian Palarus + s.palarus&googlemail.com +53778 + Veea Inc. + Frank Zarzuela + it&veea.com +53779 + Linsys Ltd + Dmitry Tatarinov + tatarinov&lin-sys.ru +53780 + RA BU ZU + P ADMIN + padmin&rabuzu.cloud +53781 + Rosling King LLP + Cert Admin + cert.admins&rkllp.com +53782 + CANCOM Managed Services GmbH + Viktor Schmidt + core&cancom-pironet.de +53783 + Amplex Corporation + Daniel Bullard + danielb&lex.com +53784 + Shenzhen Rongan Networks Technology Co. Ltd + xiangzheng + zhengxiang&rongannetworks.com +53785 + Derycks.net + Ronald Deryck + rsderyck&gmail.com +53786 + SRSP group of Industries + Sudhina Kumar GK + sudhina.kumar.gk&gmail.com +53787 + Robinhood Markets, Inc. + Brandon Stout + brandon.stout&robinhood.com +53788 + Cumulus Media Stockton + John Phelan + john.phelan&cumulus.com +53789 + ACA IT-Solutions + Michael Waterman + m.waterman&aca-it.nl +53790 + AnyGaming Ltd. + Matthias Merkel + matthias&anygaming.co +53791 + Department of Defense + Keith Tucker + keith.r.tucker6.civ&mail.mil +53792 + Technically Alter'd, LLC + Christopher S. Webster + cwebster&technicallyalterd.com +53793 + Rimot.io Inc. + James Craig + james.craig&rimot.io +53794 + LERCTR Consulting + Larry Rosenman + ler&lerctr.org +53795 + Liceo scientifico statale G. Salvemini + Francesco Brunetti + administrator&liceosalvemini.it +53796 + InnovAge + Alvin Smith + Asmith2&myinnovage.com +53797 + Edmonton Transit Service + Aly Dharshi + aly.dharshi&edmonton.ca +53798 + OpenEGrid Inc + Vijay Israni + jay&openegrid.com +53799 + Zhejiang Hengrui Technology Co., Ltd. + Roy Zhang + roy&hresys.com +53800 + Aknet ISP + Temir Umurzakov + tima&aknet.kg +53801 + Utility Connect B.V. + René Baars + Rene.baars&utilityconnect.nl +53802 + Rexel UK Ltd + S Gill + sandeep.gill&rexel.co.uk +53803 + London Data Engineering Ltd + Simon Duffield + simon.duffield&ldeng.co.uk +53804 + NOVO DR + Reuven SHTEIN + reuven&novo-dr.com +53805 + Atamate Ltd + Matthew Gakuo + matthew.gakuo&atamate.com +53806 + Valenta Pharm JSC + Alexey Arkhipov + admins&valentapharm.com +53807 + PHYSICIAN ASSOCIATES OF JACKSONVILLE, PA + NAG RAVICHANDRAN, MD + JAXDRNR&YAHOO.COM +53808 + VIAVI Solutions Inc. + Ward Cobleigh + ward.cobleigh&viavisolutions.com +53809 + RUN Polito + Corrado Mulas + areait&runpolito.it +53810 + City of Portland, Oregon + David Carter + iana&portlandoregon.gov +53811 + vestwoods + ZhanQun Ye + zq.ye&vestwoods.com +53812 + Wave-In Communication Inc. + Scott Shan + scott.shan&wavein.com.tw +53813 + cinoware - NFN GmbH + Walter Reif + walter.reif&cinoware.com +53814 + ALP ENERJİ SİSTEMLERİ BİLGİ ve TEKNOLOJİSİ HİZMETLERİ SAN ve TİC. LTD. ŞTİ. + Fuat SAYGI + fuat.saygi&tovura.com +53815 + Engramo Project s.r.o. + Jaroslav Macků + jarek.macku&team.engramo.cz +53816 + motv.eu + Jan Slavik + motv&motv.eu +53817 + Concentra Consulting Ltd + David Batt + infrastructure.seniors&concentra.co.uk +53818 + Oxipit + Darius Barušauskas + darius&oxipit.ai +53819 + Virtuologic + Malek Kemmou + kemmou&virtuologic.com +53820 + THOMAS SIGNE CHILE SPA + MARIA JOSE MARTINEZ VIVAS + mariajose.martinez&signe.es +53821 + Exabeam, Inc. + Chandra Siva + chandra&exabeam.com +53822 + National Bank of the Kyrgyz Republic + Karabaev Azamat + akarabaev&nbkr.kg +53823 + Bedroq Ltd + Edward Armitage + technical&bedroq.co.uk +53824 + GypsyBud + Ashutosh Sharma + Ashutosh2423&gmail.com +53825 + Dottikon Exclusive Synthesis AG + ICT System Administrator + it-reg&dottikon.com +53826 + Revera + Slava Tarasov + Slava.tarasov&revera.co.nz +53827 + Japan Registry Services Co., Ltd. + Akemi Kato + oid-admin&jprs.co.jp +53828 + Institut Nicolau Copernic + Juan Martin + xarxa&copernic.cat +53829 + LG Electronics + Jongwan Park + jongwan.park&lge.com +53830 + Tadeu Organics + Tadeu Pinheiro + prp&tadeu.org +53831 + Partei der Humanisten + Dmitrij Paramonov + dmitrij.paramonov&it.diehumanisten.de +53832 + skynetflow.com + Dmitrii Ogarkov + df&skynetflow.com +53833 + PT. Kreasi Rekayasa Indonesia + Alfian Azizi + azizi&kirei.co.id +53834 + Rockrose Energy plc + Edward Armitage + technical&bedroq.co.uk +53835 + KV2 Audio International spol. s r.o. + Vlastimil Labsky + vlasta.labsky&gmail.com +53836 + Genesee Cancer & Blood Disease Treatment Center + Jayme Carpenter + geneseecancer&comcast.net +53837 + Obsługa Kancelarii + Jacek Piasecki + root&enotariat.eu +53838 + Treality SVS, LLC. + Jeremy Yoke + jeremy.yoke&trealitysvs.com +53839 + In Touch Ministries + Matthew Johnson + matthew.johnson&intouch.org +53840 + Aehoo Networks + Tadeu R. P. + falhas&aehoo.net +53841 + ENA Energi AB + Lena Wreder Quick + lena.wreder&enae.se +53842 + Holland AI B.V. + Ayoub Charehbili + aci&holland-ai.com +53843 + WKO Inhouse GmbH der Wirtschaftsakammern Österreichs + Andreas Fennes + pen.iana&inhouse.wko.at +53844 + The Electoral Commission + Lukasz Mazek + lmazek&electoralcommission.org.uk +53845 + DF Software Lab + Dmitrii Ogarkov + df&dfsl.ru +53846 + BPCE Infogérance et Technologies + Laurent TALAND + Laurent.TALAND&bpce-it.fr +53847 + Gizelle Manoah P C , Inc + Anne-Marie Gracia + drannemariegracia&gmail.com +53848 + Marker Therapeutics Inc. + Donnie Lash + dlash&markertherapeutics.com +53849 + PT. Callysta Multi Engineering + Henky Prayoga + henky.prayoga&callysta-engineering.com +53850 + Gaeltex Group + Guy Robertson + iana.pen&gaeltex.group +53851 + Bs-Logistic AB + Jaakko Aho + johan.soderholm&cgi.com +53852 + INNOVATUS TECNOLOGIA + ROGERIO AVELINO COSTA + rogeriocosta&innovatus.com.br +53853 + Saudi Authority for Intellectual Property + IT Operations + it_license&saip.gov.sa +53854 + Healtis LLC + Eduard Grebenyukov + info&healtis.ru +53855 + PatientComp LLC + Joseph D Termine + joseph.termine&patientcomp.com +53856 + Automation and Control Limited + Iain Twentyman + PEN&aacl.co.nz +53857 + Av-Comm Pty Ltd + Malcolm Faed + malcolm&avcomm.com.au +53858 + Ariadne Security Foundations + Russell Gregg + rusty.gregg6+IANA&gmail.com +53859 + NTHU CS + Pin-Yuan Chen + pinyuan615&gmail.com +53860 + geomer GmbH + Martin Schroeder + emes&geomer.de +53861 + Raytheon CASL + David Gulbransen + david.gulbransen&raytheon.com +53862 + Enphase Energy, Inc. + Ravindra + infosec&enphaseenergy.com +53863 + Aisle Systems Sweden AB + Elias Norberg + info&aisle.se +53864 + Afiniti + Sohaib Athar + sohaib.athar&afiniti.com +53865 + Next Today + Minjin Chen + twnexttoday&gmail.com +53866 + theflatnet.de + Kadir Mueller + kadir.mueller&theflatnet.de +53867 + PS Logistics + kevin phang + support&pstrans.com +53868 + ONCF + mohamed ZEGMOUT + zegmout.mohamed&oncf.ma +53869 + OPNsense + Ad Schellevis + project&opnsense.org +53870 + Framsteg GmbH + Olivier Debenath + olivier.debenath&framsteg.ch +53871 + POLYTEC HOLDING AG + Andreas Moser + andreas.moser&baseit.at +53872 + Cibersur Unip Lda + Vitor Manuel de Lemos Dinis + vitor.dinis&cibersur.pt +53873 + Alexander Heights Family Practice + Dr Penny Wood + admin&ahfp.net.au +53874 + LNK Systems Muntenia + LNK Team + info&lnk.ro +53875 + Infonics Solutions + Dr. Surendra Pal + dr.surendrapal&gmail.com +53876 + Alpha Design Technologies Pvt Ltd + Hanumanatha Rao V + hanumantha&adtl.co.in +53877 + DP World London Gateway + Matthew Tweedie + LGITInfrastructureEngineersTeam&londongateway.com +53878 + OpenEmail IO + Chinthaka Deshapriya + chinthaka&cybergate.lk +53879 + East London NHS Foundation Trust + Usman Malik + usman.malik2&nhs.net +53880 + EDITH DIGITAL + VALENTIN POUILLART + valentin&cvdesignr.com +53881 + Siam University + Kritphong Mongkhonvanit + kritphong&siam.edu +53882 + ACM Metal Forming Ltd. + Jason Stuart + j.stuart&acmmetalforming.com +53883 + Elkhorn Public Schools + EPS Technology + epstechnology&epsne.org +53884 + Thorium Technologies Inc. + Frederick Care + fred.carle&thorium90.io +53885 + GEOIDE (Crypto&Com) + Aurélien Darragon + ssi&geoide.fr +53886 + Monol International Education Institute + Norieta Langpawen + norietalangpawen.8&gmail.com +53887 + Web Sensing LLC + Stephen Taylor + stnh.email&icloud.com +53888 + Beijing Changyang Technology Co.,Ltd. + Guodong Zhang + guodong.zhang&cy-tech.net +53889 + Richpower New Energy + Ge Houyi + gehy&richpower-china.com +53890 + Trilogysystem + Chiodaroli Giuseppe + assistenza&trilogysystem.it +53891 + Identify3D + Chris Adkins + chris&identify3d.com +53892 + NTT DATA SMS Corporation + Akira Kobuki + kobukia&nttdata-sms.co.jp +53893 + Teijin Aramid + Server Management + servermanagement&teijinaramid.com +53894 + WVP Health Authority EHR Team + Joshua Kubli + jkubli&mvipa.org +53895 + Qulsar, Inc + Bala Balasubramaniam + tbalasubramaniam&qulsar.com +53896 + Jane Street Group, LLC + OID Administrator + oid-admin&janestreet.com +53897 + BG Klinikum Unfallkrankenhaus Berlin gGmbH + Ute Wackernagel + ute.wackernagel&ukb.de +53898 + FAIRSHELL + Vivien MALERBA + vmalerba&gmail.com +53899 + National bank of Slovakia + Michal Vyboch + postmaster&nbs.sk +53900 + bcsner.com + Xintong Zhou + zxt&bcsner.com +53901 + Wivity Inc. + Jorg Brakensiek + jorg&wivity.com +53902 + Arbala Systems + Michael Henry + mhenry&arbalasystems.com +53903 + Q-Net Security, LLC + Scott Chaney + schaney&qnetsecurity.com +53904 + Observatoire Astronomique de Strasbourg + Christophe SAILLARD + informatique&astro.unistra.fr +53905 + Agile Workspace Ltd + Daniel Hope + daniel.hope&smartalock.com +53906 + Alaska Permanent Fund Corporation + Shawn Calhoon + scalhoon&apfc.org +53907 + Command Alkon + Chad Eldridge + celdridge&commandalkon.com +53908 + North America Fuel Systems Remanufacturing LLC + Andrew Dendel + andrew.dendel&nafsreman.com +53909 + Thrall Software LLC + Eric Thrall + thrallsoftware&gmail.com +53910 + Eaton Energy Automation Solutions (EAS) Division + Roger K Alexander + rogeralexander&eaton.com +53911 + Shenzhen Channlink Technology Co.,Ltd + Dai wenjun + admin&channlink.com +53912 + Hi-Tech LLC LTD + Nikolay Vasilievich Sokolov + sokolov&iva-tech.ru +53913 + GETSmart Analytics Inc. + Henry Schriemer + dr.henry.schriemer&gmail.com +53914 + CofNet Co., Ltd + Cof Lee + sysyear&163.com +53915 + JSC Antiplagiat + Biryukova Ekaterina + support&antiplagiat.ru +53916 + NHS Lothian + Juergen Caris + juergen.caris&luht.scot.nhs.uk +53917 + ALTECH Solutions And Consulting SL + Lluís Gener + llgener&altech.es +53918 + DeWitt County + Joey Trungale + jtrungale&co.dewitt.tx.us +53919 + Sichuan Odot Automation System Co., Ltd + Raphael Xiong + Raphael&odot.cn +53920 + Taikang Insurance Group + Lin Zhang + zhanglin78&taikanglife.com +53921 + ConnectiX Technologies Pty Ltd + David Gardner + david.gardner&connectix.com.au +53922 + ULAK HABERLEŞME A.Ş. + Tufan Oruk + tufan.oruk&ulakhaberlesme.com.tr +53923 + ControlPay BV + Andrew Chekhlatyy + iana-pen&controlpay.com +53924 + va-Q-tec AG + Thomas Grosser + thomas.grosser&va-q-tec.com +53925 + Diakonie Stetten e.V. + Ruben Wagner + edv&diakonie-stetten.de +53926 + ABC PEDIATRICS, P.A. + EDUARDO RIVAS + ABCPED&MSN.COM +53927 + KIRS LLc + Pavel Reutov + support&kirs.ru +53928 + PT. Datacomm Diangraha + Budi Setiaji + hostmaster&datacomm.co.id +53929 + zServe Networks + Lewis Marsden-Lambert + lewis.lambert&zserve.co.uk +53930 + Enter SystemSolutions OY + Juha Lehtinen + juha.lehtinen&enter.fi +53931 + QLABS, Ltd. + Yaroslav Kharitonov + yaroslav.kharitonov&qlabs.cc +53932 + Secure Industries Inc. + Murat Demirten + murat&secure.industries +53933 + RND.center + Yuri Korzhenevsky + yura.nevsky&gmail.com +53934 + Marius Pedersen a.s. + Radek Vopěnka + radek.vopenka&mainstream.cz +53935 + ADNS Airborne + Brent Ditri + brent.ditri1&navy.mil +53936 + PXiSE Energy Solutions, LLC + Sergiy Bondar + sergiy.bondar&pxise.com +53937 + Precision Practice Management + Wayne Schiermeyer + wschiermeyer&precisionpractice.com +53938 + Chilicon Power, LLC + Alexandre Kral + lx&chiliconpower.com +53939 + Hertfordshire Bedfordshire and Luton ICT + John Edirimanasinghe + J.Edirimanasinghe&nhs.net +53940 + Planbox + Brendan Wheble + brendan.wheble&planbox.com +53941 + PrivateDNS Pty Ltd + Peter Bos + noc&privatedns.net.au +53942 + GMVT GmbH + Norbert Nather + it&gmvt.de +53943 + ABL GmbH + Dr. Andreas Mull + andreas.mull&abl.de +53944 + Pharazon AB + Antti Hätinen + support&phz.fi +53945 + TEK TRIBE + Levi Scharf + levi&tektribe.net.au +53946 + JACJ IT SOLUTIONS, LLC + James Johnson + jjohnson&jacjit.com +53947 + Sweroam + Mikael Brostrom + iana.pen&sweroam.se +53948 + Norealp + FERNANDES Jorge + admin-nord&norealp.com +53949 + Capio CFR A/S + Jens-Peter Vraa Jensen + it-afdelingen&capiocfr.dk +53950 + Klebl GmbH + Florian Heßlinger + florian.hesslinger&klebl.de +53951 + Arizona Blood and Cancer Specialists, PLLC + Evelyn Brantley + Evelyn.Brantley&oneoncology.com +53952 + LEGAL SERVICES SOCIETY + Lloyd Dean + helpdesk.computer&lss.bc.ca +53953 + SUN Behavioral Health Inc. + Jeremy Smith + jsmith&sunbehavioral.com +53954 + DECIMAL PLATFORM - LDA + Manuel Oliveira + webmaster&appx.pt +53955 + Treetown Tech LLC + Kris Schilling + kris.schilling&treetowntech.com +53956 + Alexei Voyageman Global Cyber Verifying Service + Alexei Voyageman + alexeivoyageman&protonmail.com +53957 + Easterseals Bay Area + Aza Rainey + IT_Noc&esba.org +53958 + Adaptive Energy LLC + Tom Westrich + tom.westrich&adaptiveenergyllc.com +53959 + SunPower Corporation + Christopher Fox + chris.fox&sunpower.com +53960 + Gateview Technologies + Matthew Bush + mbush&gateview.com +53961 + WaferPath Inc. + James McCartney + jmccartney&waferpath.com +53962 + Cannonia + Joshua Cannon + joshua.cannon+iana&me.com +53963 + Shanghai Jundo International Logistics Co., Ltd. (formerly 'Jundo Online, LLC') + Li, Bo + li.bo&jundo.net.cn +53964 + T&T sistemi R&D + Simone Chiesi + technologies&tetsistemi.com +53965 + Julian Kotysch + Julian Kotysch + julian&kotysch.de +53966 + jamaillia.net + Martin Rimmele + martin&jamaillia.net +53967 + Sollatek UK + Carlos Sainz Martinez + carlos.martinez&sollatek.com +53968 + Infotek + Thomas Ritou + watom&ritou.me +53969 + Schneider Electric Software Netherlands + Cees Kruijf + cees.kruijf&aveva.com +53970 + ICT Concept B.V. + Richard Huveneers + RHuveneers&ict-concept.nl +53971 + Office of Public Works + Martin Malone + martin.malone&opw.ie +53972 + Celitech Inc. + Ahmad Al Fares + afares&celitech.com +53973 + ABB Power Protection SA + Paolo Pezzino + paolo.pezzino&ch.abb.com +53974 + SOGECAP + Paul Narbonneau + paul.narbonneau&socgen.com +53975 + UGuard Technology Co. LTD + IANA-PEN-ManagementTeam + pen-reg&uguard.com.tw +53976 + Trilobit + Vitor Gomes + vitor.gomes&trilobitglobal.com +53977 + LaserNet + Robert Calay + appdevelopr&lasernet.com +53978 + West Air Gas & Equipment + Graham Howe + ghowe&westairgases.com +53979 + Seti Seguranca E Tecnologia Na Internet Ltda. Me + Fábio Depin + fabio&setinet.com.br +53980 + Motech Solutions Ltd + Michael Moane + michael.moane&motechsolutions.co.uk +53981 + Socket Ltd. + Damir Franusic + damir.franusic&socket.hr +53982 + OZON + Thierry Fournier + thierry.fournier&ozon.io +53983 + Centro de Hematología y Oncología del Sur, CSP + Lorraine Feliciano Martinez + lorrainefeliciano&yahoo.com +53984 + Havs- och vattenmyndigheten + Christer Mägi + christer.magi&havochvatten.se +53985 + CirrusPoint Solutions Inc. + Kevin Martin + admin&cirruspoint.com +53986 + Mast Bazaar + Madhu Ramanujam + madhu&mastbazaar.com +53987 + mpDev + Per Schratz + mpdevbinero&gmail.com +53988 + P-X Systems + Derk Bell + derk&p-x.systems +53989 + Vyera Pharmaceuticals + Michael Kolias + systems&vyera.com +53990 + Strategic Digital Defense, LLC + david t. klein + david&strategicdigitaldefense.com +53991 + tecnint hte + Mario Sangalli + msangalli&tecnint.it +53992 + Rausch Sturm + IT Admin + itadmin&rsieh.com +53993 + Ifinet srl + Federico Vincenzi + networking&ifinet.it +53994 + Schaeffer-AG + Tobias Florek + tf&schaeffer-ag.de +53995 + ZeeVee, Inc. + Jeremy Greene + jeremy&zeevee.com +53996 + King's College + James O'Meara + help&kings.edu +53997 + ZampleWorks + Anders Runesson + anders&runesson.info +53998 + Documo Inc + Matthew Herrera + matt&documo.com +53999 + Cathworks Ltd. + Ben Bloom + ben&cath.works +54000 + Springfield College + Nadim El-Khoury + nel-khoury&springfield.edu +54001 + Hangzhou Vcard Technology Co.,LTD + Wulei Li + cooli&163.com +54002 + Westfalen AG + Dimitri Schlee + admin&westfalen.com +54003 + Vodafone Idea Limited + Tarun Kumar Sikri + tarunkumar.sikri&vodafoneidea.com +54004 + Seminole Electric Cooperative, Inc + William Simmons + wsimmons&seminole-electric.com +54005 + Newland Medical Associates + Danielle Coleman + Dcoleman&newlandmedical.com +54006 + Cetrtapot + Ziga Zvan + domene&cetrtapot.si +54007 + Skalio GmbH + System Department + iana&skalio.com +54008 + Datora Mobile Telecomunicacoes SA + Samy Uziel + samy.uziel&datora.net +54009 + Rtek Software + John Reynolds + support&rteksoft.uk +54010 + FansWiFi + Sam Yeung + admin&fanswifi.com +54011 + Dr. Bhavesh Patel + Kellie Lulas + klulas&drgarg.net +54012 + Network Lubbock, Inc. + Dennis Wisdom + iana&networklubbock.com +54013 + Fink Telecom Services GmbH + Andreas Fink + afink&fink-telecom.com +54014 + Zoox + Benjamin Pappas + bpappas&zoox.com +54015 + SUPERSYSTEMS + Pushpender Sangwan + ps&supersystems.co.in +54016 + TodoAsap + Marek Zakrzewski + marek&todoasap.com +54017 + Implenia Ltd. + Alexander Bösch + alexander.boesch&implenia.com +54018 + LLC MasterPlast + Aleksandr S. Chirva + admin&wplast.ru +54019 + AMSORT Sp. z o.o. + Jakub Hołdys + jakub.holdys&amsort.com +54020 + Joint Stock Company "Scientific research institute "Rubin" + Shivrin Aleksandr + inforubin&rubin-spb.ru +54021 + Ubirch GmbH + Waldemar Gruenwald + waldemar.gruenwald&ubirch.com +54022 + Eram Data Center Infrastructure + Amir Esmaeel Hassanpour + hassanpour&eramco.ir +54023 + Drivio LTD + Dmitry Sigitov + ops&driv.io +54024 + Planzer IT + Kevin Stumpp + kstumpp&planzer.ch +54025 + SpeedFI Inc + Dalton Gilmore + support&speedfi.ca +54026 + QSJ + Michael Hafner + a725.170131&gmail.com +54027 + Refinitiv + Joe OHalloran + joe.ohalloran&refinitiv.com +54028 + RPM ECO + Cédrik Dumoulin + cdumoulin&rpm.eco +54029 + Les Entreprises Dominic Payette + Cédrik Dumoulin + cdumoulin&edpco.ca +54030 + NekomimiSwitch.com + Huanjie Zhu + iana-pen&public.swineson.me +54031 + Bloom Energy + James Collins + DLIanaNotices&bloomenergy.com +54032 + Hatco Corporation + IS Department + admin&hatcocorp.com +54033 + Retriever Services, LLC + Josh Wilson + josh&retrieversvc.com +54034 + PKIIFY, LLC + Josh Wilson + josh&pkiify.com +54035 + The Knot Worldwide Inc + Michael Snyder + msnyder&theknotww.com +54036 + ANA Technology Partner + Alan Baugher + alan.baugher&anapartner.com +54037 + American Litho Inc + Noah Liveris + noahl&alitho.com +54038 + TrueSpeed Communications Ltd + Richard Smith + richards&truespeed.com +54039 + MIMAKI ENGINEERING CO., LTD. + Osamu Igarashi + osamu.igarashi&mimaki.com +54040 + Hamburger Hochbahn AG + Clemens Christensen + network.support&hochbahn.de +54041 + Isagenix International Inc. + Craig Owings + craig.owings&isagenixcorp.com +54042 + Amen Clinics + Alireza Madi + amadi&amenclinics.com +54043 + RiverLand Federal Credit Union + George Lunsford + george&riverlandcu.org +54044 + Smetak Holdings, LLC + Matthew Smetak + matthew&smetak.org +54045 + Madonna Rehabilitation Hospital + Terri Carstenson + tcarstenson&madonna.org +54046 + Data Patterns India Pvt Ltd + Navin X Raja + itsadmin&datapatterns.co.in +54047 + Stadtwerke Sindelfingen GmbH + Farhad Wiese + f.wiese&stadtwerke-sindelfingen.de +54048 + steep GmbH + Patrick Matuschczyk + it&steep.de +54049 + All for One Group AG + Andreas Bergen + andreas.bergen&all-for-one.com +54050 + Stichting Abrona + Jeffrey van Haeften + jeffrey.van.haeften&abrona.nl +54051 + Village of Menomonee Falls + Ken Bringa + nadministrator&menomonee-falls.org +54052 + Envieta Systems LLC + Jeff Hahn + jeff.hahn&envieta.com +54053 + SoloKeys, Inc + Nicolas Stalder + nicolas&solokeys.com +54054 + Outerplane Solutions B.V. + Harjo Otten + info&outerplane.nl +54055 + Elrad International d.o.o. + Matjaz Znidaric + Matjaz.Znidaric&elrad-int.si +54056 + New Mexico Hematology and Oncology Specialists + April Christensen + chop&choptx.org +54057 + Sport Maska Inc. + Daniel Taillon + ccm.sysadmin&ccmhockey.com +54058 + Sentryo SAS + Sentryo Engineering + contact&sentryo.net +54059 + Melillo Consulting + Sabato Melillo + sabato.melillo&melilloconsulting.it +54060 + TÜV Rheinland (Shanghai) Co., Ltd. + Amanda Yang + amanda.yang&tuv.com +54061 + Cinkciarz.pl Sp. z. o.o + Marcin Klinski + m.klinski&cinkciarz.pl +54062 + allpay Ltd + Mr Douglas Munford + douglas.munford&allpay.net +54063 + PROFESSIONAL HOSPITAL GUAYNABO + LEONARDO VALENTIN + radiologia&professionalhospital.com +54064 + GILAT PERU + CARLOS GUERRA + CGUERRA&GILATLA.COM +54065 + Cybershark Systems + Martin Levin + martin.levin&sharkie.se +54066 + Gwirio (Pty) Ltd + David Roux + david.roux&gwirio.com +54067 + SitioDistinto + Santiago Ramos + sramos&sitiodistinto.net +54068 + AdvaHealth Solutions + Ben Ganley + ben.ganley&advahealthsolutions.com +54069 + SPECINFOSYSTEMS + Vitalii Verveiko + v.verveiko&specinfosystems.com +54070 + Lars Hegenberg + Lars Hegenberg + info&lhcom.de +54071 + Beijing College of Politics and Law + Leon Wang + abcool&126.com +54072 + Guam Seventh-day Adventist Clinic + Jim Dunn + jdunn&adventistclinic.com +54073 + Hefei six-line speed cloud + Yuanchun Wang + 597042613&qq.com +54074 + Univox + Weslley Almeida + noc&univox.com.br +54075 + Data-Linc Group + Dilip Karna + dkarna&data-linc.net +54076 + ENEDIS + Esteban Pereira + esteban-externe.pereira&enedis.fr +54077 + BDO UK LLP + Nicholas Cleary + Nick.Cleary&bdo.co.uk +54078 + Greschitz Management GmbH + Thomas Greschitz + office&greschitz.net +54079 + GlassTerra Pty Ltd + Cain O'Sullivan + cain&glassterra.com +54080 + Auvera Technology Group Pty Ltd + Justin Galogly + justin&auvera.com.au +54081 + Piratenpartei Baden-Württemberg + Adrian Nöthlich + adrian.noethlich&piratenpartei-bw.de +54082 + Ani's creation + Aniket Meena + Aniketmeena300&gmail.com +54083 + ENHEXA s.r.o. + Lubos Kaestner + lubos.kaestner&enhexa.com +54084 + fl0wer.me + Gilberto Persico + gilberto&fl0wer.me +54085 + eWitness Malta LTD + Eleonora Giardino + eleonora.giardino&ewitness.eu +54086 + Liverpool Networks + James Allan + jim&liverpoolnet.com +54087 + BLANKE automation GmbH + Markus Weiss + markus.weiss&blanke.ch +54088 + ITCS BDO Unibank, Inc. + ITCS Engineering + itcs-cloud-engineering&bdo.com.ph +54089 + Sunny Daze Landscaping + Sonny Jarock + meritsjay&gmail.com +54090 + Kishwar Shareef MD PC + Syed Shareef + syedshareef&yahoo.com +54091 + Basswood Systems + James Linden + jflinden&basswood-systems.com +54092 + Rebotiga de Serveis Solars, S.L. + Marius Tresanchez + marius&b2csms.com +54093 + RDX Networks Oy + Petrus Repo + petrus.repo&rdx.net +54094 + Whizpace Pte Ltd + Pankaj Sharma + pankaj&whizpace.com +54095 + StellarLink CORPORATION + Hiroki Toyota + admin&stellarlink.co.jp +54096 + MAMPU + Nik Zarina binti Nik Mat + nikzarina&mampu.gov.my +54097 + Secusmart GmbH + Dorian Borovina + it&secusmart.com +54098 + Nord-West Oelleitung GmbH + Dirk Traenapp + dirk.traenapp&nwowhv.de +54099 + Estateably + Nicolas Huray + nhuray&estateably.com +54100 + New Mexico Mutual + Alex Sifuentes + oid&newmexicomutual.com +54101 + LogoTek GmbH + Christopher Koempel + Christopher.Koempel&logotek-gmbh.de +54102 + Fair Isaac Corp + Vivek Gupta + ML_FICO_IANA_ADMIN&fico.com +54103 + PT Satyamitra Surya Perkasa + Prabowo Priyo Ardhiatno + prabowo&ssp.co.id +54104 + Byucksan Power Co.Ltd + Jae Kyu Lee + jakelee&bspower.co.kr +54105 + Acalvio Technologies Inc + Srinivasan Narasimhan + srini&acalvio.com +54106 + The Wirehead Mechanist + Jay Jones + wirehead&wireheadmechanist.com +54107 + Advanced Cardio Diagnostic PLLC + ANHELINA SHTEERMAN + lina&acdmed.net +54108 + Elinnov Technologies, Inc. + Christopher Misola + cgmisola&elinnovtech.com +54109 + 556001-6064 + Daniel IVarsson + daniel.ivarsson&jamtkraft.se +54110 + Azienda Regionale per l'Innovazione e per gli Acquisti S.P.A. (ARIA S.p.A.) + Luigi Bongiorni + luigi.bongiorni&lispa.it +54111 + SONIIR + Anton Minaev + minaev.aa&soniir.ru +54112 + TE Connectivity + Egbert Stellinga + estellinga&te.com +54113 + verlinked GmbH + Fabian Christ + info&verlinked.de +54114 + Procore Technologies + Site Reliability + sre+oid-registration&procore.com +54115 + Nebulon Inc + Stuart Davies + stoo&nebuloninc.com +54116 + BHP INNOVATION PTY LTD + Charles Garzarella + charles.garzarella&bhp.com +54117 + Packetworx + Arnold Bagabaldo + admin&packetworx.com +54118 + Victorian Funds Management Corporation + Kristian Beckett + kbeckett&vfmc.vic.gov.au +54119 + AZL N.V. + Wessel Landzaat + ict&azl.eu +54120 + visionvera information technology Co., Ltd. + meng zhang + miles&tecowin.cn +54121 + Deadlogic Desenvolvimento e Tecnologia LTDA + Denner Araujo Costa + denner.ac&outlook.com +54122 + fwgx + Paul Phillips + paul&fwgx.uk +54123 + Unassigned + Returned 2019-07-16 + ---none--- +54124 + MSG Mechatronic Systems GmbH + Christoph Moser + christoph.moser&msg.at +54125 + Kreativität trifft Technik e.V. + Sebastian Reichel + sre&mainframe.io +54126 + Hitachi Advanced Systems Corporation. + Takashi Wada + takashi.wada.pz&hitachi.com +54127 + Poynting Antennas (Pty) Ltd + Nicol Spies + nicol.spies&poynting.co.za +54128 + LuaDex Solutions (Pty) Ltd + Nicol Spies + nicol.spies&gmail.com +54129 + Université Gustave Eiffel + Cyril Bouvier + cyril.bouvier&ifsttar.fr +54130 + DCC Energi Center A/S + Morten Clare + mcl&dccenergi.dk +54131 + YPSI SAS + YANN PILPRE + yann.pilpre&ypsi.fr +54132 + Wendav IT Solutions + Mirco Wendav + iana_pen&wendav.ch +54133 + SCA IT Solutions Ltd. + Stephen Allen + scaits&gmail.com +54134 + David L Crowder MD + David Crowder MD + drdavidcrowder&gmail.com +54135 + Bohdan Khmelnytsky National University of Cherkasy + Roman Romadin + hostmaster&cdu.edu.ua +54136 + NOTARchiv Kft. + dr. Burgstaller Attila + notarchiv&mokk.hu +54137 + eLiam + Liam Parker + liam&eliam.co.uk +54138 + Opaq + John Kamenik + jkamenik&opaq.com +54139 + Corix Infrastructure Inc. + Carol Vorster + carol.vorster&corix.com +54140 + Pivotal Commware + Brian Nay + bnay&pivotalcommware.com +54141 + Energovat d.o.o. + Blaž Burgar + blaz.burgar&energovat.com +54142 + ZZ Vermoegensverwaltung Gesellschaft m.b.H. + Christian Dergovics + admin&zzgmbh.at +54143 + Ixsson Systems Kft. + Peter Orosz + orosz&ixsson.com +54144 + Evangelisches Christophoruswerk e.V. + Ulrich Bastian + ulrich.bastian&cwdu.de +54145 + SevenTest R&D Centre Co. Ltd + Edward Bazhenov + netmaster&seventest.ru +54146 + Simac BMS + Joel Thys + snbbms&simac.be +54147 + MicroArx Corporation + Mac Dougherty + infoµarx.com +54148 + STI Servicios SpA + Jonathan Nelson + jnelson&stiservicios.cl +54149 + Computer Pros + Robert Stokes + robert.stokes&computerpros.com +54150 + IDTrust + Policy Authority + pa&idtrust.com +54151 + Microsystem S.A. + Jaroslaw Marcin Iwanski + jiwanskiµsystem.cl +54152 + Unassigned + Returned 2019-07-29 + ---none--- +54153 + SecureMFA + Juventas Vijeikis + admin&securemfa.com +54154 + Mediaworks Hungary zrt. + Róbert Nagy + pen&mediaworks.hu +54155 + Top Dog PC Services, LLC + Jim Staricha + jim&topdogpc.com +54156 + fortuna network + mohsen atiq + mohsenatigh2000&gmail.com +54157 + le Fay Network Services + Felicity Tarnell + ft&le-fay.org +54158 + Neural Technologies (HK) + Lewis LEE + hksis.support&neuralt.com +54159 + Dr. James Fierro D.O., PA + James Fierro + office1805&aol.com +54160 + Conperience GmbH + Andreas Habel + reg.net&conperience.net +54161 + AdSign Inc. + Joao Pedro Lisboa + joao&adsign.com.br +54162 + TCAM Technology Pte Ltd + Tan Shuang Maan + tansm&tcam.com.sg +54163 + Infomedia Technologies Corp.,Ltd + Hongjiang Chen + chenhongjiang&infomedia.com.cn +54164 + Thirdwayv Inc + Michael Ayoub + michael.atef&thirdwayv.com +54165 + PROTAB S.A. + Roger Rivera + rrivera&protab.com +54166 + Collège de Bois-de-Boulogne + Guillaume Beaudoin + dns&bdeb.qc.ca +54167 + CAHI Corporation + Hiroki Nakano + admin&cahi.net +54168 + “Display“ Design office”, JSC + Sergey A. Dubchak + sdubchak&kbdisplay.com +54169 + G.E.G. S.R.L. + Walter Gotti + it&geg.it +54170 + Vanderlande + Stefan Broekman + Stefan.Broekman&vanderlande.com +54171 + Macq + Marc Luycx + marc.luycx&macq.eu +54172 + Panobit, Inc + James Staricha + oid&panobit.com +54173 + zajkovski.dev + Mile Zajkovski + zajkovski&gmail.com +54174 + MWPDEMO + Rene Anton + ranton&synergyadvisors.biz +54175 + Allen A Flood MD + Juanita Robinson + info&aafloodmd.com +54176 + Fareportal Inc + Jalal Ahmad + jahmad&fareportal.com +54177 + ITcom Pro AG + Michael Damm + mcdamm&itcompro.com +54178 + Cossack Labs Limited + Dmytro Shapovalov + shad&cossacklabs.com +54179 + Radionor Communications AS + Anita Bokvist + anita&radionor.no +54180 + Softwise, Inc. + Richard Shaw + rshaw&softwise.com +54181 + Treon + Ismo Manninen + ismo.manninen&treon.fi +54182 + JSC "Informtehtrans" + Dmitry Tikhonov + tihonov&informtehtrans.ru +54183 + Evaluate Ltd + Solomon Mettle + Solomon.Mettle&evaluate.com +54184 + Meson + Christopher Johnstone + meson800&gmail.com +54185 + PHOTRON LIMITED + ISHII Takayoshi + ishii&photron.co.jp +54186 + Beijing xencore network technology CO.,LTD + Jeff.Chen + chenweiliang&xencore.cn +54187 + DDSat Technologies Private Limited + Dhiraj Kumar + dhiraj.kumar&ddsat.in +54188 + Akksan Ingenieurbüro + Kürsat Afacan + info&akksan.de +54189 + Lagardere Media News + MARC MELAINE + mmelaine&lagarderenews.com +54190 + Abstrakt Marketing Group + Lee Engelhardt + admin&abstraktmg.com +54191 + Enstar Group Limited + Luke Flack + luke.flack&enstargroup.com +54192 + R.K. Deep Sea Technologies limited + Symeon Hatzigeorgiou + s.chatzigeorgiou&deepsea.ai +54193 + LightWave Networks, Inc + David Capone + programs+snmpprivate&lightwavenetworks.com +54194 + The Resolvers s.r.l.s. + Alessandro Arcieri + alex&the-resolvers.net +54195 + Foodstuffs South Island Limited + Phil Dewar + phil.dewar&foodstuffs-si.co.nz +54196 + Hotplate Labs + Michael Fincham + michael&hotplate.co.nz +54197 + The Barnes Foundation + Steven Brady + support&barnesfoundation.org +54198 + Insomnia Security Group Limited + Michael Fincham + michael.fincham&insomniasec.com +54199 + kakaopay + Albert Kim + albert.j&kakaopaycorp.com +54200 + Hunan Kuangan Network Technology Co., Ltd. + Tracy Lv + lvting&hnics.com +54201 + Suewag Energie AG + Oliver Ruebner + oliver.ruebner&syna.de +54202 + Hangar Hosting, srl + Stefaniu Criste + stefaniu.criste&hangar.hosting +54203 + Diffblue Ltd + Rob Johnson + rob.johnson&diffblue.com +54204 + Aero-Comm + Arief Nugroho + aerocomm.rnd&gmail.com +54205 + PioneerTimeSystem + Tahseen Uddin + tahseen_uddin&hotmail.com +54206 + dvdxdue s.n.c. + Denis Caffarel + posta&dvdxdue.it +54207 + Stadt Wuerzburg + Guenter Kretzer + guenter.kretzer&stadt.wuerzburg.de +54208 + Mmapro IT Solutions (Pty) Ltd + Tumelo Dlodlo + tumelod&mmapro.co.za +54209 + S.I.C.E.S. + Andrea Rizzon + R&D&sices.eu +54210 + Ooredoo + Moza Mattar + mhmattar&ooredoo.qa +54211 + BCAST Sp. z o.o. + Maciej Lipiński + info&bcast.pl +54212 + Stiftung Sankt Johannes + Felix Meyer + team.it&sanktjohannes.com +54213 + Prime Interway + Genilson Borges + genilson.borges&primeinterway.com.br +54214 + Derbyshire Fire & Rescue Service + Brett Clements + bclements&derbys-fire.gov.uk +54215 + Datateam Consulting S.A. de C.V. + Angel Daniel Alonso Tovar + angel.alonso&datateam.mx +54216 + Zahnmedizinisches Kompetenzzentrum Wolfsburg MVZ GmbH + Dr. Winfried Reiche + edv&wolfsburg.crsg.online +54217 + vlaine.fr + vincent laine + vincent.laine5&gmail.com +54218 + Clinique Saint-Jean + Steven Creve + infra.it&clstjean.be +54219 + WeSee Tecnologia + Rainaldo Augusto Silva + engenharia&wesee.online +54220 + BH Solutions SPRL + Julien Baldini + julien&bhsolutions.be +54221 + Adreon Technologies + Baljeet Singh Bhatti + baljeet.singh&adreon.tech +54222 + Velankani Communications Technologies, Inc. + Raymond Krauchunas + rkrauchunas&velankani.com +54223 + Kraftway Corporation PLC. + Vladimir Tairov + vtairov&kraftway.ru +54224 + YoGoKo SAS + Emmanuel THIERRY + contact&yogoko.fr +54225 + DIGITEL ON TRUSTED SERVICES S.L.U. + José Antonio Díaz + joseantonio.diaz&digitelts.com +54226 + Professional Link + Andre Ferlin + supporto&plink.it +54227 + Engineering Services and Testing + Donald J. Draper + EST_IT&estinc.com +54228 + GBM + Tyrone Cordoba + tcordoba&gbm.net +54229 + Viero, Inc. + Zoltan Gobolos + zoltan.gobolos&viero.tv +54230 + WestCoast Children's Clinic + Michael Schrecker + help&westcoastcc.org +54231 + Malta Information Technology Agency + Jonathan Cassar + jonathan.cassar&gov.mt +54232 + CrossFirst Bank + Mark Bye + mark.bye&crossfirstbank.com +54233 + TX RX Systems + Ken Pokigo + kpokigo&txrx.com +54234 + StarHub Ltd + Jessie Hu + jing.hu&starhub.com +54235 + HENGBAO + GaoLan + gaolan&hengbao.com +54236 + General Programming, LLC + Erin Liman, General Programming NOC + support&generalprogramming.org +54237 + Zahnzentrum Alstertal MVZ GmbH + Dr. Peter Borsay + edv&alstertal.crsg.online +54238 + AWR Dental Labor GmbH + Dr. Reinhold Ast + edv&awr.crsg.online +54239 + Dres. R.Ast & Kollegen MVZ GmbH + Dr. Reinhold Ast + edv&rast.crsg.online +54240 + Zahnaerzte am Kurhaus Wiesbaden MVZ GmbH + Dr. Dirk Mueller + edv&wizak.crsg.online +54241 + Dr. Schneider und Kollegen MVZ GmbH + Dr. Juergen Schneider + edv&pfaffenhofen.crsg.online +54242 + Zahnaerzte Much MVZ GmbH + Dr. Kerstin Wolf + edv&much.crsg.online +54243 + MKG Bogenhausen Dr. Hauck & Kollegen MVZ GmbH + Dr. Wolfgang Hauck + edv&mkgbogenhausen.crsg.online +54244 + Zahnmedizinisches Zentrum im Rosenhof Dr. Aigster, Dr. Sonntag & Kollgen MVZ GmbH + Dr. Wilhelm Aigster + edv&kissing.crsg.online +54245 + Dwornik Classen Zahnzentrum Juelich MVZ GmbH + Dr. Frank Dwornik + edv&juli.crsg.online +54246 + Dres. Ast & Kollegen MVZ GmbH + Dr. Wanda Ast + edv&dresast.crsg.online +54247 + Zentrum für Zahnheilkunde HafenCity MVZ GmbH + Dr. Thomas Nicolai + edv&hafencity.crsg.online +54248 + Zahnaerztliches Kompetenzzentrum bei der Jakobskirche MVZ GmbH + Dr. Volker Widmann + edv&wz-team.crsg.online +54249 + Dr. Beck & Kollegen MVZ GmbH + Dr. Frank Beck + edv&drbeck.crsg.online +54250 + bytesofgigabytes + Sagar khamkar + bytesofgigabytes&gmail.com +54251 + Digicon S.A. + Jose Luis Korman + jkorman&perto.com.br +54252 + Collab9 LLC + Mustafa Baig + mabaig&collab9.com +54253 + NAS Australia Pty Ltd + Mark Roemermann + it&nasaustralia.com.au +54254 + TSP SpA + Leonardo Mazzella + lmazzella&trustprovider.cl +54255 + Bacchus-Group + Asif Bacchus + admin&bacchus.cloud +54256 + CMS Cameron McKenna Nabarro and Olswang LLP + Adrian Sovaila + adrian.sovaila&cms-cmno.com +54257 + Chiltern and South Bucks District Council + Sim Dixon + servicedesk&chilternandsouthbucks.gov.uk +54258 + Wander + Mark Logan + mark&wander.me +54259 + SC Digital Solutions Limited + Hassan Reda + hassan.reda&sc.com +54260 + Shenzhen Mindray Bio-Medical Electronics Co.,Ltd. + Qiao Sen + qiaosen&mindray.com +54261 + Korea Airports Corporation + Sungbin Lim + limsb90&airport.co.kr +54262 + SWSAM Solution + Aadil Imran + aadil&swsam.co.uk +54263 + LSR Group Ltd + Cyril Savchenko + pen&lsr.ru +54264 + Monitoring Systems + Aliasghar Bagheri Soulla + ali.soulla&gmail.com +54265 + Byldis B V + Roel Laming + ict&byldis.com +54266 + Trelar + Eric Engberg + tdevops&trelar.com +54267 + MACS Group SRL + Mariano Ortega + mariano&macsgroup.com.ar +54268 + netElastic Systems Inc + David Williams + dwilliams&netelastic.com +54269 + Wolf & Danniel s.r.o. + Jan Wolf + wolf&wolf-danniel.com +54270 + Nanjing e-Quantum Information Technology Co., Ltd. + Yan Zhiwen + yan.zhiwen&e-quantum.com.cn +54271 + Lulin Systems Co., Ltd. + Allan Hsu + allan&lulinsys.com +54272 + Encurest + Juan Ponce + jcponce&encurest.com +54273 + LLC "Imlight-Showtechnic" + Podyniglazov Denis + iana&imlight.ru +54274 + ITEres GmbH + Raphael Waldburger + raw&iteres.ch +54275 + Surface Generation Ltd + Joe Mann + joe.mann&surface-generation.com +54276 + DWF LLP + Paul Price + paul.price&dwf.law +54277 + Jan de Rijk + Ronald Hunold + ict-servicedesk&janderijk.com +54278 + Silverengine GmbH + Kai-Uwe Schmidt + Kai-Uwe.Schmidt&silverengine.de +54279 + Nelson Tran + Nelson Tran + nelson&nelsontran.com +54280 + PC Informatica + Paolo Cetroni + paolo.cetroni&pc-info.it +54281 + VARTOS + Silviu Sirbu + silviu&vartos.io +54282 + Fuuzio Group + AiDAN MOUNTFORD + AIDAN.MOUNTFORD&FUUZIO.COM.AU +54283 + Max Kessler + Max Kessler + private&enterpri$e*number&maxkessler.org +54284 + iD Corporation. + yosuke yano + yano&intelligent-design.co.jp +54285 + ECNS.EPC.HCI.CN + GuanYun + sioy2000&163.com +54286 + Wagner Group + Alex Kohler + alex.kohler&wagner-group.com +54287 + Blue Box Group Srl + Alex Bernardini + alex.bernardini&swegon.it +54288 + Aristotle Space and Aeronautics Team + Konstantinos Kanavouras + electrovesta&gmail.com +54289 + Microdigital Argentina + Cristian Caracci + ccaracciµdigital.com.ar +54290 + RMACD.COM + Ronald MacDonald + ronald&rmacd.com +54291 + Rebecca Pruim + Rebecca Pruim + me&rebeccapruim.com +54292 + CareFlite + Mark Davis + mdavis&careflite.org +54293 + 8ZERO2 Consultants + Anirudh Malhotra + anirudh&8zero2.in +54294 + Magellan Robotech + Brett Gilbert + Brett.Gilbert&magellanrobotech.com +54295 + Network Treinamento e Capacitação + Joria Batista + admin&networkescola.com.br +54296 + Waagner-Biro Bridge + Huemer-Datacenter Support + michael.hamernik&huemer-dc.com +54297 + SHARD CAPITAL PARTNERS LLP + Aaron Kolton + shard&guidancetec.com +54298 + Private citizen + Joshua Denson + topslot91&gmail.com +54299 + City of Lawrenceburg Indiana + Bryson Walke + bwalke&lawrenceburg.in.gov +54300 + The Zanzibar Company Ltd. + Ali Rashid + ali.r&thezanzibarcompany.com +54301 + Andromeda Research, Inc + Robert William Keyes + rwk&andromedaresearch.com +54302 + EverFocus Electronics Corp. + EverFocus Electronics Corp. + ts&everfocus.com.tw +54303 + ADB Safegate Austria GmbH. + AT PKI team + at-pki&adbsafegate.com +54304 + Permian Women's Center, P.A. + Roxann Castillo + pwc405&gmail.com +54305 + iMed Software Inc. + David Runion + david.runion&imedemr.com +54306 + Jackson River + Nick Carroll + hosted&jacksonriver.com +54307 + ABN AMRO Bank N.V. + Sander Dorigo + sander.dorigo&nl.abnamro.com +54308 + Binarno s.p. + Paolo Brandoli + info&binarno.com +54309 + Camrize + Lukas van den Dijssel + lukas&camrize.com +54310 + AG Rechnersicherheit (TU-Berlin) + Eno Flag + iana-pen&enoflag.de +54311 + Vektor LLC + Sergei Rezviakov + admin&btcirk.com +54312 + Naval Group + Guillaume ASTIER + guillaume.astier&naval-group.com +54313 + Personal + Fer MenLop + fmenendezlop&gmail.com +54314 + ARM Automation + Chris Hammel + chris.hammel&armautomation.com +54315 + Pevans East Africa Limited + Antony Mbugua + itnetworks&sportpesa.co.ke +54316 + Tibx + Alex Kurganov + ak&tibxtech.com +54317 + SPECIALTY MICROWAVE + JAMES HONEYCUTT + james&ehtek.com +54318 + Center of Hope + Gina Cox + gcox&cohreno.com +54319 + Disy Informationssysteme GmbH + Markus Knye + it&disy.net +54320 + Häger+Busch GbR + Oliver Häger + info&inghb.de +54321 + Research School of Astronomy and Astrophysics, Australian National University + Chris Delfs + oid.manager&mso.anu.edu.au +54322 + LogPoint A/S + Bikash Bhandari + bbh&logpoint.com +54323 + Datera Inc. + Anil Ravi + g-pm&datera.io +54324 + Rhenus SE & Co. KG + Ingo Boehm + sysop.stuttgart&de.rhenus.com +54325 + Datamecanic + Jorge Hernandez + jorge.hernandez&datamecanic.com +54326 + Saudia, Corp. + Ala Bakhamis + Alabakhamis&saudiacorp.com +54327 + Tule River Tribe Gaming Commission + Adam Christman + licensing&trtgc.com +54328 + Kimbrell + Ryan Kimbrell + ryan&kimbrell.io +54329 + Modern Treasury Corp. + Sam Aarons + sam&moderntreasury.com +54330 + "Digital Solutions" JSC + Grigorii Solovev + mail&dsol.ru +54331 + Eurofins Digital Testing + Bob Green + bob.green&eurofins-digitaltesting.com +54332 + Nefeli Networks + Domain Admin + domain.admin&nefeli.io +54333 + Clicks Online Business e.K + Emrico Bohn + bohn&clicks.de +54334 + Central National Gottesman + Michael Schilling + mschilling&cng-inc.com +54335 + Infrascale, Inc + Alexey Turchanikov + alexey.turchanikov&infrascale.com +54336 + rowspace + Victor Bilyk + v&rowspace.net +54337 + OpenPOWER Foundation + Jeff Scheel + tsc-chair&openpowerfoundation.org +54338 + Certsys Tecnologia da Informação LTDA + Felipe Cespedes + felipe.cespedes&certsys.com.br +54339 + Yunnan Tobacco Company + Jonny Yang + 63300503&qq.com +54340 + Next Stride AG + Chris Streubel + chris.streubel&nextstride.com +54341 + Enabled Energy + Joe Staib + Info&enabledenergy.net +54342 + Copperhead Limited + Network Operations + team&copperhead.co +54343 + Malco Theatres, Inc. + Kiran Hanumaiah + kiran.hanumaiah&malco.com +54344 + PoisonCloud + Uways Khan + ukhan&poisoncloud.uk +54345 + Allo Communications LLC + Andrew Hadenfeldt + snmp-admin&ops.allophone.net +54346 + Penombre.eu + Grégoire BOHIC + gregoire.bohic&gmail.com +54347 + Kamunet, Prime Minister's Office of the Turkish Republic of Northern Cyprus + Mehmet Giritli + mehmet.giritli&kamu.ct.tr +54348 + Traffic ITS GmbH + Andre Kracht + akracht&traffic-its.com +54349 + Adolf Nissen Elektrobau GmbH + Co. KG + Jörn Jens + jjens&nissen-germany.com +54350 + NPO Baum JSC + Gleb Semenov + g.semenov&npobaum.ru +54351 + SLT-Technologies GmbH & Co. KG + Raphael Schlameuss + iana&slt-technologies.com +54352 + Contiki-NG + Yago Fontoura do Rosário + yago.rosario&hotmail.com.br +54353 + Joseph Orraca-Tetteh, MD + Maria Davis + mariadavis_1&msn.com +54354 + The Home Depot Mexico + Alberto Cardenas + Alberto_Cardenas&homedepot.com.mx +54355 + Badge Inc. + Albert Kwon + kwonal&badgebiometric.com +54356 + Shandong Huachentel Information Technology Co., Ltd. + zhhc + admin&huachentel.com +54357 + Suma Technology Co., Ltd. + Guo Jianhua + guojh&cancon.com.cn +54358 + TESCOM corp. + BULENT SAGEL + bsagel&tescom-ups.com +54359 + Hillsboro School District + Brian Ware + wareb&hsd.k12.or.us +54360 + PPMnet AG + Frank Gerhardt + it&ppmnet.de +54361 + UB Merchants + Andras Ormenyi + andras.ormenyi&ubm.hu +54362 + Barry J. Burns + Barry J. Burns + barry&barryjburns.com +54363 + BijKluit + Robin Kluit + info&bijkluit.nl +54364 + Graviton Org + David Nash + dnash&graviton.org.uk +54365 + LinchpinLabs Australia + Dan Brooks + linchpinlabs_oid&proinbox.com +54366 + Maincare Solutions + Eric Machabert + eric.machabert&maincare.fr +54367 + HUNAN SCROWN ELECTRONIC INFORMATION TECH.CO.,LTD + Dean.Ruan + ruanming&fullriver.com.cn +54368 + THE 52ND RESEARCH INSTITUDE OF CETC + YIN·JUN + yinjun&hikdata.com +54369 + Dragon Systems Software Limited (DssW) + Graham Miln + support&dssw.co.uk +54370 + Stichting Patyna + ICT + ict&patyna.nl +54371 + Compactive, s.r.o. + Marcel Uhlir + marcel.uhlir&compactive.cz +54372 + Codegic Pvt Ltd + Muhammad Wahaj Khan + muhammad.wahaj.khan&codegic.com +54373 + Brehm Praezisionstechnik GmbH & Co. KG + Celosmitarbeiter - Johann Grubek + c.mitarbeiter&brehm-praezision.de +54374 + VR-Yhtymä Oy + Merja Alastalo + merja.alastalo&vr.fi +54375 + POSTEK Electronics Co., Ltd. + Jarvis Lu + jarvis.lu&postek.com.cn +54376 + Baloise Group + Simon Gautschi + iana&baloise.com +54377 + Picture Code Co., Ltd. + Hideto Ozaki + hostmaster&picturecode.co.jp +54378 + U.I. Lapp GmbH + Juergen Greger + juergen.greger&lappgroup.com +54379 + IT42 + Laurent Pertois + iana&laurent.it42.fr +54380 + XSYS.CH + Umar Raad + admin&xsys.ch +54381 + Ministerium der Finanzen des Landes Sachsen-Anhalt + Christian Ortholf + christian.ortholf&sachsen-anhalt.de +54382 + Virgin Mobile UK SAIP + Arjen Smit + asmit&libertyglobal.com +54383 + LAWtrust Third Party Services + Marcile De Waal + marcile.dewaal&altron.com +54384 + Yeni Hayat Bilisim Teknolojileri A.S. + Murat Demirten + mdemirten&yh.com.tr +54385 + Teleindustria S.r.l. + Fulvio Bestetti + fulvio.bestetti&teleindustria.it +54386 + Delem B.V. + Eddie Draaisma + eddie.draaisma&delem.com +54387 + VANCL - IT s.r.o. + Martin Vancl + info&vancl-it.cz +54388 + Emerson College + Corey Davis + corey_davis&emerson.edu +54389 + Spectrum Health Lakeland + George Korr + gkorr&lakelandhealth.org +54390 + Cedars Health + Ziad Skaf + zskaf&cedars-health.com +54391 + Casper Cardiology + Ziad Skaf + zskaf&caspercardiology.com +54392 + Waterjuice + Justin Waters + pen&waterjuice.org +54393 + Network Solutions Group + Brendan Thompson + brendan&netsg.co +54394 + LoadEx AB + Christofer Mattsson + christofer&loadex.se +54395 + Contentpepper GmbH + Dirk Schmedding + dirk.schmedding&contentpepper.de +54396 + Proviron + Peter Coppens + Peter.Coppens&proviron.com +54397 + ENQUETES FORENSIK INC. + Frédéric NADEAU + support&forensik.ca +54398 + metALCOM Zrt. + Dr. Miklos Tamás KONCZ + miklos.koncz&metalcomzrt.eu +54399 + Unassigned + Returned 2021-06-07 + ---none--- +54400 + Beijing Microvision Technology CO.,Ltd. + Meng Meng Xiao + xiaomm&mvtech.com.cn +54401 + Canterbury District Health Board (CDHB) + Mark Lyons + mark.lyons&concepts.co.nz +54402 + Opensource ICT Solutions B.V. + Brian van Baekel + info&oicts.nl +54403 + Virgin Mobile UK SIT2 + Arjen Smit + asmit&libertyglobal.com +54404 + Virgin Mobile UK SIT1 + Arjen Smit + asmit&libertyglobal.com +54405 + Virgin Mobile UK PROD + Arjen Smit + asmit&libertyglobal.com +54406 + Sonove GmbH + Thomas Hartmann + admin&sonove.de +54407 + SIC DEI-FCTUC + Ronaldo Junior + rjunior&dei.uc.pt +54408 + BEKAS + David Savidnes + david&bekas.no +54409 + Bit Mapper Integration Technologies Pvt Ltd + Kapil Satav + bitmapper531&gmail.com +54410 + Z1 Zentrum für Qualitaetszahnmedizin MVZ GmbH + Dr. Thomas Vogt + edv&z1.crsg.online +54411 + Implaneo Dental Labor GmbH + Dr. Wolfgang Bolz + edv&implaneo.com +54412 + ACECR branch of Zanjan + Nasser Shami + it.nasser&yahoo.com +54413 + Core Consulting + Ricardo Gonçalves + ricardo.goncalves&coreconsulting.com.br +54414 + TimeMachines Inc. + Douglas Ehlers + dehlers&timemachinescorp.com +54415 + AKLARO, Blaž Bregar s.p. + Blaž Bregar + blaz&aklaro.si +54416 + JUNO Software SRL + Mihnea Teodorescu + m&juno-software.com +54417 + Security and Protection Solutions + Rene Anton Castillo + ranton&ms365protection.biz +54418 + Beijing Tuoming Technology Co., Ltd. + Xiaoguang Liu + liuxiaoguang&tuoming.com +54419 + m-privacy GmbH + Holger Maczkowsky + info&m-privacy.de +54420 + Cyber Defense Institute, Inc. + Toru Tomita + admin&cyberdefense.jp +54421 + MonetaGo + Jesse Chenard + jesse&monetago.com +54422 + FLYFISH TECHNOLOGIES d.o.o. + Ivan Žilič + iana.NOSPAM&flyfish-tech.com +54423 + Schweizerische Parlamentsdienste Bern + Teamhead IT-OPS + oidadmin&parl.admin.ch +54424 + Procubed Inc. + Bhupender Virk + bvirk&procubedinc.com +54425 + Coda Octopus Colmek + Jason Martin + jasonm&colmek.com +54426 + Struxture IT, Inc. + Technology Administrator + techadmin&struxtureit.com +54427 + Westelcom Networks + Eric Kreckel + ekreckel&westelcom.net +54428 + ČD - Informační systémy, a.s + Patrik Chadima + Patrik.Chadima&cdis.cz +54429 + MEMC Korea Company + J.E. Oh + jeoh&gw-semi.com +54430 + Elexis Open Source Project, www.elexis.info + Marco Descher + oid&elexis.info +54431 + Denevy + Jan Cervenak + jcervenak&denevy.eu +54432 + pemsy + Piotr Pabian + ppabian&pemsy.com +54433 + guardREC AS + Oddmund Johansen + ojo&guardrec.no +54434 + ONE NATIONAL EARTH + JESSI R REYNOLDS + jreynolds36&my.gcu.edu +54435 + MaXentric Technologies LLC + Tuan Do + tdo&maxentric.com +54436 + wimxtelecom + humberto santiago + gerencial&wimx.com.mx +54437 + Haud Systems Limited + Etienne Sammut + etienne.sammut&haud.com +54438 + Bradford Teaching Hospitals NHS Foundation Trust + James Townend + james.townend&nhs.net +54439 + Kanoon Iran Novin + makan jooyani + m.jooyani&irannovin.net +54440 + Alperia Ltd. + Julian Moroder + julianpaul.moroder&alperia.eu +54441 + Interact Consulting AG + Tae-Hoon Kwon + t.kwon&interact-consulting.com +54442 + Beahashy Network + Kentaro Abe + kentaro&whim.jp +54443 + Hospital Alemão Oswaldo Cruz + Atualpa Aguiar + atualpa&haoc.com.br +54444 + WARP + Georg Swoboda + cn&warp.at +54445 + Nolte IT Services Limited + Andreas Nolte + anolte&nolteits.co.uk +54446 + beratergruppe:Leistungen PartGmbB + Markus Frey + it&leistungen.de +54447 + Hausner Cloud + Rouven Hausner + rouven&hausner.cloud +54448 + KeyPKI, Inc. + Michael Yatsko + michael.yatsko&keypki.com +54449 + Vancouver Dispensary Society + Tim Sproule + tim&cannabisdispensary.ca +54450 + Geedge Networks + Chao Zheng + zhengchao&geedgenetworks.com +54451 + Icon Industrial Engineering + Dmitry Komolov + dkomolov&icon-group.ru +54452 + Novo Gaming + Rene Cools + helpdesk&novogaming.nl +54453 + Ministry of Interior of Republika Srpska + Divna Lovrić-Bojanić + ikt&mup.vladars.net +54454 + Zurtax + Yury Ulyanov + yury.ulyanov&gmail.com +54455 + CelerSMS + Victor Celer + admin&celersms.com +54456 + Olean Medical Group + Ricky Bee + rbee&oleanmedical.com +54457 + Novetta + Bryan Schneiders + bschneiders&novetta.com +54458 + Fernandes Banerjee Shenoy Kidney Center,LLC + Karen Pino + karen&banerjeekidneycenter.com +54459 + XCome Technology CO., Ltd. + Li, Ching-Chung + jason.li&xcome.com +54460 + Cadoles + Cadoles Engineering + contact&cadoles.com +54461 + GALIOS + Sergey Kalmykov + ksa&galios.ru +54462 + lorengraff.net + Lorenny Pujols + admin&lorengraff.net +54463 + ITGLOBAL + Roman Kuchukbaev + iana-pen&itglobal.com +54464 + YDK Co.,Ltd. + Ryuuta Ezaki + ezaki-ru&ydkinc.co.jp +54465 + Extreme Solutions + John van der Aa + john&extreme-solutions.nl +54466 + Supalta + Support Informatique + inforeseau&supalta.com +54467 + btdev + Christopher Herzog + cjh30&pct.edu +54468 + Research and Producion Enterprise EKRA Ltd. + Konstantin Doni + ekra&ekra.ru +54469 + Health Insurance Fund of Republika Srpska + Dejan Stojaković + dejan.stojakovic&zdravstvo-srpske.org +54470 + KUBITZA Network Solutions + Kurt Seiler + seiler&kubitza.de +54471 + Belfast Health and Social Care Trust + Michael Henderson + michael.henderson&belfasttrust.hscni.net +54472 + British Standards Institution (BSI) + Paul Akinsete + paul.akinsete&bsigroup.com +54473 + VR PLUS Altmark-Wendland eG + André Kroll + it&vb-ware.de +54474 + Olean Medical Group + Ricky Bee + rbee&oleanmedical.com +54475 + 72 Fashion Corp. + Albert Nigri + hr&pgnyc.net +54476 + Whitlock Infrastructure Solutions + Alex Ulbrich + alex.ulbrich&whitlockis.com +54477 + Ulbrich Technologies + Alex Ulbrich + pen&ulbrichtech.com +54478 + Shenzhen Kiwi Smartek Co., Ltd. + Pendy Cai + Pendy.Cai&Kiwi-Smartek.com +54479 + NOCSI, Ltd. Liability Co. + Loc Nguyen + l&nocsi.com +54480 + Orphans Care Center - Dreama + Ayman Nady + a.nady&dreama.org.qa +54481 + CloudVirga + Chris Fortunato + cfortunato&cloudvirga.com +54482 + Virtium LLC + Greg Picard + greg.picard&virtium.com +54483 + Infrastruktura TK, OOO (limited liability company) + Dmitriy Shvetsov + dmitriy.shvecov&infra.ru +54484 + weclapp SE + Jan Vollendorf + vollendorf&weclapp.com +54485 + Airnace SA + Etienne Bagnoud + info&airnace.ch +54486 + Zecurion + Roman Vasiliev + support&zecurion.com +54487 + Zavarovalnica Sava d.d. + Tomaz Koncan + tomaz.koncan&zav-sava.si +54488 + Comprod Inc. + Waldimar Huamani Alfaro + walfaro&comprodcom.com +54489 + KVL + Pedro Vítor + pvitor&kvl.pt +54490 + 中广优视新媒体文化(成都)有限公司 (Zhongguang Youshi New Media Culture (Chengdu) Co., Ltd.) + seth deng + seth.deng&foxmail.com +54491 + TranSendX, LLC + Jonathan Holt + jonathan&transendx.com +54492 + BBPC + Jonathan + J.Nuttall&bbpc.org.uk +54493 + Bistum Eichstätt + Robert Bittl + rbittl&bistum-eichstaett.de +54494 + Google Inc + Jon Giffin + jgiffin&google.com +54495 + Provino Inc. + Erin Loy + erin&provinowines.com +54496 + Shire of Dardanup + Stephen Eaton + Stephen.Eaton&dardanup.wa.gov.au +54497 + Claritas Solutions Ltd + Derek Baker + support&claritas-solutions.com +54498 + Direção Regional das Obras Públicas e Comunicações + João Pedro Gouveia Botelho + noc&azores.gov.pt +54499 + Viakoo, Inc. + Alex Sternberg + alex.sternberg&viakoo.com +54500 + Movement Mortgage + Jason Bull + Jason.Bull&movement.com +54501 + Primary Health Medical Group + Administrator + swlicensing&primaryhealth.com +54502 + Rovenma Corp. + Şadi Çağatay Öztürk + info&rovenma.com +54503 + Area9 Pty Ltd + Service Desk + servicedesk&area9.com.au +54504 + Warsaw University of Technology + Igor Rutkowski + igor_rutkowski&poczta.onet.pl +54505 + Transcelestial Technologies PTE LTD + Roland Groza + dev&transcelestial.com +54506 + Intereuropa d.d. + Roberto Rusnjak + hostmaster&intereuropa.si +54507 + CySight + Rafi Sabel + rafi.sabel&cysight.ai +54508 + Deutsche Telekom - Access 4.0 + Fabian Schneider + fabian-schneider&telekom.de +54509 + iCE - Intelligent Controlled Environments + Cody Thomson + cody.thomson&icecontrol.co.za +54510 + Noscendo GmbH + Sven Böckelmann + sven.boeckelmann&noscendo.com +54511 + 1st Financial Bank USA + Bob Hudelson + ops&1FBUSA.COM +54512 + Dynalog India Ltd + Anand Sindhav + anands&dynalogindia.com +54513 + EVBox + Arjan van Rooijen + arjan.vanrooijen&ev-box.com +54514 + Teramed Limited + Jacob Hung + jbhinfo&gmail.com +54515 + EN TOUTE CONFIANCE INC. + Frédéric NADEAU + support&infidem.biz +54516 + INTERTRONIC IT GmbH Wörrstadt + Patrick Hochstein + p.hochstein&intertronic.de +54517 + Vibrant Health, P.C. + Linda Jager + tljager&msn.com +54518 + LLC "Medicina AlfaStrahovaniya" + Ilya Lubimov + it&alfazdrav.ru +54519 + JD + liyongchao + liyongchao&jd.com +54520 + Baryon, LLC + Dmitriy Dobromislov + d&baryonit.com +54521 + AP Sensing GmbH + Andreas Baier + info&apsensing.com +54522 + Vanderkooij Telecommunicatie BV + Harm van der Horst + h.vanderhorst&vdk-tele.com +54523 + MSS International Group + Stanislav Mensh + mss-88&yandex.ru +54524 + Center for Cancer and Blood Disorders + Carreen Huffman + chuffman®ionalcancercare.org +54525 + Montessori College Nijmegen + Alex Brand + a.brand&montessoricollege.nl +54526 + NFT Umweltdatensysteme GmbH + Thomas Hahnel-Mueller + noc&nft.de +54527 + Power Micro Controls America + Sergio Rabin + sergio.rabin&gmail.com +54528 + Datatrust Solutions + Andy Thomson + a.thomson&datatrustsolutions.com +54529 + jbash aka John Bashinski + jbash aka John Bashinski + jbash&jbash.com +54530 + Solvequest GmbH + Patrick Hochstein + ph&solvequest.com +54531 + Rhe-Ma Steuer­beratungs­gesell­schaft mbH + Patrick Hochstein + patrick.hochstein&rhe-ma.de +54532 + Sinteck + Fábio Moura + fabio&sinteck.com.br +54533 + 上海鼎频通信技术有限公司 (Shanghai Dingpin Communication Technology Co., Ltd.) + 孙李婷 (Sun Liting) + slt&shdptx.com +54534 + D-Wave Systems, Inc. + Mike Sollanych + msollanych&dwavesys.com +54535 + Alcon Vision, LLC + Matthew Castoe + matt.castoe&alcon.com +54536 + OSAKIDETZA + Susana Santa Cruz Aberasturi + susana.santacruzaberasturi&osakidetza.eus +54537 + Forestlink AB + Johan Svensson + johan&forestlink.se +54538 + FedEx Services + Timothy Cole + timothy.cole&fedex.com +54539 + Wizkers.io + Ed Lafargue + ed&wizkers.io +54540 + Rescale Inc. + Adam McKenzie + adam&rescale.com +54541 + Fuse Integration + Frank Dell Kronewitter + dell.kronewitter&fuseintegration.com +54542 + Qoppao LLC + Jonathan Farnham + jonathan&qoppao.net +54543 + cumulusone LLC + Terry Fernandez + tfernandez&cumulusone.tech +54544 + Metrodata GmbH + Rüdiger Kessel + r.kessel&metrodata.de +54545 + PEN01 - Centro de Tecnologia da Informação e Comunicação do Estado do Rio Grande do Sul S.A. + Daniel Soares de Oliveira + daniel-oliveira&procergs.rs.gov.br +54546 + California Kidney Specialists + Zahra Unwalla + zunwalla&ckskidney.com +54547 + L'Xtreme + Jan Willem Janssen + iana&lxtreme.nl +54548 + Vereign AG + Damyan Mitev + damyan.mitev&vereign.com +54549 + TSINGHUA TONGFANG CO., LTD. + Chengshan Geng + gengchengshan&thtfpc.com +54550 + MD Clinics + Patricia Porter + pporter&drmdclinics.com +54551 + 北京用友政务软件股份有限公司 (Beijing UF Government Software Co., Ltd.) + 宋朝晖 (Song Zhaohui) + Songzh&yonyou.com +54552 + BPSC + DC Ops + dcops&bpsc.com.pl +54553 + Organización de Estados Iberoamericanos - OEI + Santiago Ramos + sramos&oei.es +54554 + Universität des Saarlandes - Hochschul-IT-Zentrum + Alexander Cullmann + ident&hiz-saarland.de +54555 + YARUS Networks + Nikolay Kulagin + info&yarus-networks.ru +54556 + Finko Group + Janis Udensvirs + it.helpdesk&finkoinvest.com +54557 + Hangzhou Tuners Electronics Co.,Ltd. + Xie Zuguo + 17808239&qq.com +54558 + AmpThink, LLC + Lee Herlinger + lee.herlinger&think.com +54559 + Alvand Solutions LLC dba Accutive Security + Paul Horn + paul.horn&accutive.com +54560 + L.W. Hancock Corporation + Lyle W. Hancock + lyleh&lwhancock.com +54561 + Clairvoyant Technology + Scott McMillan + smcmillan&clair-tech.com +54562 + Mycroft Mind, a.s. + Filip Prochazka + info&mycroftmind.com +54563 + Pratum, Inc. + Dave Nelson + support&pratum.com +54564 + ultra-gigasat ltd + Riaz Saddiq + riaz.saddiq&ultra-gigasat.com +54565 + Almidones Mexicanos S.A. de C.V. + Jorge Sanchez + jorge.sanchez&almidones.com.mx +54566 + Florida Precision Oncology + Kathryn G. Bailey + kathryn.bailey&21co.com +54567 + ORES SCRL + stephane Baldino + stephane.baldino&ores.be +54568 + Rail-Mil sp. z o.o. sp. komandytowa + Sławomir Jasiński + biuro&rail-mil.eu +54569 + Connor Horman + Connor Horman + chorman64&gmail.com +54570 + JSC “Scientific Industrial Enterprise "Rubin" + Dmitriy Lukin + 6400&npp-rubin.ru +54571 + AMI Praha a.s. + Roman Pudil + roman.pudil&ami.cz +54572 + Viggo Service Enablers + Michiel Bolder + michiel.bolder&viggo.eu +54573 + OpenIT + Telyukh Sergey + noc&itisopen.ru +54574 + Adaptiv Networks + Arun Pereira + apereira&adaptiv-networks.com +54575 + PQShield + Markku-Juhani O. Saarinen + mjos&pqshield.com +54576 + backinthirty.net + Paul Bender + pebender&gmail.com +54577 + Red Cedar Oncology + Lisa Sweet-Brown + lisa&mycompasshealth.org +54578 + ART19, Inc. + Johannes Vetter + johannes&art19.com +54579 + Rekono d.o.o. + Miha Poberaj + miha.poberaj&rekono.si +54580 + Leypalhub, S.L. + Edwin Mata + edwin&leypal.com +54581 + Wilmore Electronics + Craig Galloway + info&wilmoreelectronics.com +54582 + AH-KOMP + Adam Hrazdil + adamhkomp&gmail.com +54583 + TV Tools Oy + Heikki Lindroos + devteam&tvtools.fi +54584 + Fusion Software (UK) Ltd. + Lee Ottaway + ca&software4dentists.co.uk +54585 + 株式会社天真堂 (Tenshindo Co., Ltd.) + reyren rémy + r-reyren&tenshindo.ne.jp +54586 + VE2DEE + David Bergeron + davidbergeron&ve2dee.name +54587 + NUSYN DIGITAL SOLUTIONS PVT LTD + Jahan Kadhar + jahan.kadhar&nusyndigital.com +54588 + Bombardier Transportation (ZWUS) Polska Sp. z o.o. + Wojciech Świderski + wojciech.swiderski&rail.bombardier.com +54589 + Weller IT Solutions + Max Weller + iana-pen&weller-it.com +54590 + 2JZ.SE + Pontus Engblom + pontus&2jz.se +54591 + RECSY + Eduard + ed_pozdnyakov&mail.ru +54592 + dmTECH GmbH + Stefan Armbruster + DMX-Windows-Applications&dm.de +54593 + Verlag Parzeller GmbH & Co. KG + Christian Wehner + iana-ldap&parzeller.org +54594 + Richter IT-Consulting + Sven Richter + sven.richter&richter-it-consulting.de +54595 + Phinergy + Atal Shargorodsky + atal&phinergy.com +54596 + Duracomm Corporation + Christian Cochran + christian&duracomm.com +54597 + L&T Technology Services + Nishant Kohli + nishant.kohli<ts.com +54598 + Mercedes-Benz Group AG + Thomas Krumm + thomas.t.krumm&mercedes-benz.com +54599 + DPD Direct Parcel Distribution Austria GmbH + Fabian Wehsner + fabian.wehsner&dpd.at +54600 + PGE Systemy S.A. + Rafał Durka + rafal.durka&gkpge.pl +54601 + LLC «FOTEL» + Nedashkovskiy Alexey + fotel&fotel.pro +54602 + Wobben Windpower Industria e Comercio Ltda + Elcio Lima + elcio.lima&wobben.com.br +54603 + Hosting.cl + Alvaro Flaño + alvaro&hosting.cl +54604 + Telecom Argentina SA + Sebastian Fernandez Moran + sfmoran&teco.com.ar +54605 + Targa Telematics S.p.A. + Marco Andrea Speronello + marcoandrea.speronello&targatelematics.com +54606 + Scalstrm AB + Ola Bengtsson + ola&scalstrm.com +54607 + Epsom & St Helier University Hospitals NHS Trust + Russell Ealing + russell.ealing&nhs.net +54608 + Definium Technologies + Mike Cruse + mcruse&definium.net +54609 + Centurion Intelligence Consulting Agency + Mr. M. S. Timotheus Weidner + webmaster¢urion.ovh +54610 + OOO "NTC Rotek" + Pavel Mustaev + admindir&rotek.ru +54611 + Stichting Casade + Pascal van den Dungen + admin&casadenl.onmicrosoft.com +54612 + Chicago Digital Power + Helmer Jesus Narvaez + jnarvaez&cdpups.com +54613 + Adaptive Security SpA + Ricardo Pérez + rperez&adaptivesecurity.cl +54614 + Southbank Centre + Quentin Bradley + headist&southbankcentre.co.uk +54615 + Surveillus Networks LLC + Mark Lewis + mark&surveillus.com +54616 + Australian Institute of Family Studies + Stephen Norman + aifsit&aifs.gov.au +54617 + Anomaly Software + Dev Mukherjee + hello&anomaly.net.au +54618 + SIG11 + Core Team (elratt0r) + core&sig11.de +54619 + 7 Sigma Systems + Phil Sampson + psampson&7sigma.com +54620 + Inxmail GmbH + Michael Ruf + michael.ruf&inxmail.de +54621 + Pantacor Ltd + Alexander Sack + alexander.sack&pantacor.com +54622 + James Cahill + James Cahill + oid&prouser123.me +54623 + Raven51 AG + Hartmut Schindler + it&raven51.de +54624 + RUP Limited + Rupert Williams + rup&rup.co.uk +54625 + lepidum Co. Ltd. + Ryo Kajiwara + kajiwara&lepidum.co.jp +54626 + Marxup GmbH + Lukas Mueller + mueller&marxup.de +54627 + Graham Automation Systems, LLC + Rodd Graham + rgraham&grahamautomation.com +54628 + Bonneville Joint School District no. 93 + Rick Davis + davisr&d93.k12.id.us +54629 + Nick Robison + Nick Robision + nick&nickrobison.com +54630 + PATHION + Vineet Dharmadhikari + vineetd&pathion.com +54631 + NETZWERK Software GmbH + Felix Krüger + edv_lizenz&netzwerk.de +54632 + Univerzitetni klinični center Ljubljana + Matej Grom + matej.grom&kclj.si +54633 + Innovative Interfaces Incorporated + Jeffrey Skelton + iana&iii.com +54634 + Informatyka Bogusławski spółka z ograniczoną odpowiedzialnością sp. k. + Administrator OID + administrator-oid&ib.pl +54635 + ShowCase Holding BV + Bart Bergman + b.bergman&showcase.nl +54636 + Axomem Pte Ltd + Sean Whiteley + axm.iana&axomem.io +54637 + Ningbo Rongxin Ansheng Machinery Co., Ltd. + Azayaka Hanataba + support&rxmech.com +54638 + Alvarez & Marsal + James Chalmers + jchalmers&alvarezandmarsal.com +54639 + RESPUBLIKA + Dmitry + dmitry.malyshok&tio2.com.ua +54640 + 深圳华远云联数据科技有限公司 (Shenzhen Huayuan Yunlian Data Technology Co., Ltd.) + 武晓花 (Wu Xiaohua) + xiaohua.wu&huayuan-iot.com +54641 + AERODISK LLC + Mustafaev Timur + t.mustafaev&aerodisk.ru +54642 + DiniTech GmbH + Ing. Dietmar Niederl + office&dinitech.at +54643 + Kriminalomsorgsdirektoratet + Vidar Hanto + vidar.hanto&kriminalomsorg.no +54644 + SolarEdge Technologies Ltd + Aviad Yeshaya + Compliance&solaredge.com +54645 + Parzeller Service und Support GmbH & Co. KG + Christian Becker + christian.becker&parzeller.de +54646 + hfp Informationssysteme GmbH + Andreas Kinzler + iana-pen&hfp.de +54647 + EXT'IN + Mathieu VACHER + staff&extin.fr +54648 + Bareweb Inc + James Kuo + jkuo&barenecessities.com +54649 + Kuo LLC + James Kuo + jim&kuo-llc.com +54650 + Kerry's Nice And Sweet Treats + Nigel Des Vignes + nigel&niceandsweettgo.com +54651 + Ningbo Ginlong Technologies Co.,Ltd. + min.xu + min.xu&ginlong.com +54652 + PERI GmbH + Tobias Rieder + tobias.rieder&peri.de +54653 + Beijing hereit Technology Co.,Ltd. + shengjing li + lisj&hereit.com.cn +54654 + Gemmb Pty Ltd + Garry Thomas + ianaregistration&gemmb.com +54655 + Cytognos S.L. + Elena Hervalejo + ehervalejo&cytognos.com +54656 + Silicongate Lda. + Gabriel Santos + itmgr&silicongate.com +54657 + Northrop Grumman - Space Systems + Jeff Hobbs + jeffrey.hobbs&ngc.com +54658 + Oncam + Duane Mohney + dmohney&oncamgrandeye.com +54659 + GigSky, Inc. + Tony Wyant + twyant&gigsky.com +54660 + RUTOLL, LLC + Nikolay Maximov + maximov&rutoll.ru +54661 + LLC NPO RPS + Krivko Artem + service&npo-rps.ru +54662 + SPEICHER + Sebastian Speicher + nospam.speicher&gmail.com +54663 + Tribunal Regional do Trabalho da 1ª Região + Armindo Gomes + armindo.gomes&trt1.jus.br +54664 + Genious Communications + Driss Najam + d.najam&genious.net +54665 + Tuopu Baorui Shenzhen Electronics Co.,Ltd. + Yan Zhang + zy&toprie.com +54666 + SwitchDin + Tim Schier + tim.schier&switchdin.com +54667 + Blue38 Consultants LLC + Matthew Groff + matt&blue38llc.com +54668 + ION Media Networks + Matthew McDonald + matthewmcdonald&ionmedia.com +54669 + Tibit Communications + Kevin A. Noll + kevin.noll&tibitcom.com +54670 + Finance Now Limited + Mark Lyons + mark.lyons&concepts.co.nz +54671 + Arcible Limited + Richard Green + richard.green&arcible.com +54672 + Bank Jateng + Thomas Irwan + thomas&bankjateng.co.id +54673 + Krone Business Center GmbH & Co. KG + Stefan Wenker + stefan.wenker&krone.de +54674 + The We Project Inc. + Nicolas Seritti + nick&twp-inc.com +54675 + Two Six Labs, LLC + Nicholas Laird + nicholas.laird&twosixlabs.com +54676 + Partridge Crossing + Richard Dawson + dawsora&gmail.com +54677 + Corporation of Norfolk County + OID Admin + webmaster&norfolkcounty.ca +54678 + SBS Information Systems Co.,Ltd. + Takehisa Iwasaki + t_iwasaki&sbs-infosys.co.jp +54679 + Kibotos + Alexander R. Krause + krause&kibotos.de +54680 + Airbus Operations GmbH + Philipp Griessler + igxi-indus&airbus.com +54681 + Mobica Limited + rafal.wisniewski&mobica.com + rafal.wisniewski&mobica.com +54682 + Sparkassenakademie Bayern + ASCHENBRENNER Florian + f.aschenbrenner&s-akaby.de +54683 + P.U.S. mvb + Szymon Skoczyński + szymon.skoczynski&mvb.pl +54684 + Valen Power + Stephen Daries + stephen&valen.com.au +54685 + Alolise + Jérôme Avond + jerome.avond&alolise.org +54686 + Innovative Collaboration Inc. + Pete Holcomb + pholcomb&icavn.com +54687 + Hestnet + Luke Hester + luke&hestnet.com +54688 + DIGIPAX + Devyatnikov Alexey + stingx&digipax.dev +54689 + PKI Services + Roberto Rodriguez + info&pkiservices.co +54690 + RXD NOVA Pharmaceuticals Inc. + Jeremy Gray + Jeremy.Gray&rxdnovapharma.com +54691 + Ministerium für Wirtschaft, Innovation, Digitalisierung und Energie des Landes Nordrhein-Westfalen + Benedikt Knust + benedikt.knust&mwide.nrw.de +54692 + phinneyridge.com + Tom Rutchik + tom&phinneyridge.com +54693 + CFAO + Simon ROBELIN + exploitation&cfao.com +54694 + South Correctional Entity + Adam Munson + amunson&scorejail.org +54695 + Satelles, Inc. + Mark Hargrove + mhargrove&satellesinc.com +54696 + AlphaTech + Álvaro Díaz + ads847ads&gmail.com +54697 + Oxbotica + Terry Dooher + sysadmin&oxbotica.com +54698 + Suburban Pulmonary Medicine, PC + Shannon Sharp + shantan34&gmail.com +54699 + JoVa + Joseph Riopelle + joseph.riopelle&gmail.com +54700 + Wachter, Inc. + Mark Hufford + itmgmt&wachter.com +54701 + ENABLER LTD. + Tsutomu Sano + t.sano&enabler.co.jp +54702 + Anaconda + Bradley Kreider + bkreider&anaconda.com +54703 + Djmemjy Enterprise + Matthew Kennedy + mattkennedy_12&yahoo.com +54704 + CASTCORE + SeougYun,Park + sypark&castcore.net +54705 + Dr. Gary A. Lieberman,PA + Afsi Halati + Afsih&aol.com +54706 + Vita Soft + Fang Weiming + china_zjfwm&126.com +54707 + Integrity Net Solutions and Services + Abdullah Alrasheed + abdullah&integritynet.biz +54708 + Wilson Bank & Trust + Stephen Jaquish + itadmins&wilsonbank.com +54709 + Walton K. Joyner Jr., MD + Stacy Upchurch + joynerophthalmology&gmail.com +54710 + Civilex Victoria + Kelvin Godde + kelvin&itsolutions.com.au +54711 + Make Nashville + John Northrup + john&8bitwizard.net +54712 + RADS Inc. + Nick Patel + nick&rads.io +54713 + Interactive Network (https://www.internet.de) + John Fitzgerald + iana-pen&internet.de +54714 + Thom Rounds Broadcast and Information Technologies + Thomas A Rounds + trounds&tdrounds.net +54715 + Beijing CloudFly Technology and Development Ltd. + Wang, Xijun + wangxj&cloud-fly.com +54716 + Senety Investment, Inc. + OID Administrator + oidadmin&senety.com +54717 + Corbium Company + Benjamin Beideman + infra&corbiumco.com +54718 + CVisionLab LLC + Dmitri Lapin + lapin&cvisionlab.com +54719 + 8 Bit Wizard + John Northrup + john&8bitwizard.net +54720 + Dokobit, UAB + Gintas Balciunas + gintas&dokobit.com +54721 + EasyVirt + Bernard Thierry + thierry.bernard&easyvirt.com +54722 + Fluepke + Carl Fabian Luepke + iana&luepke.email +54723 + CREATURUM + Creaturum Team + creaturum&protonmail.ch +54724 + Strij + Aleksandr Solomakha + asolomakha&gelarm.ru +54725 + Slthing + Michal Kirejczyk + michalkirejczyk&outlook.com +54726 + Thales Digital Factory + Hamza ATTAR + Hamza.ATTAR&thalesdigital.io +54727 + Korum Automotive Group + Kevin Loney + kloney&korum.com +54728 + Osceola County Sheriff's Office + Ryan Potts + ryan.potts&osceola.org +54729 + Electrify America LLC + Jonel Timbergen + jonel.timbergen&hubject.com +54730 + Cinkciarz.pl Sp. z o.o. + Security + security&cinkciarz.pl +54731 + Southern Health and Social Care Trust + Jason Donnan + jason.donnan&southerntrust.hscni.net +54732 + Schueco International KG + Admin + admin&schueco.com +54733 + Shenzhen Absen Optoelectronic Co., Ltd + parker.xu + parker.xu&absen.com +54734 + AdamsCon IT & Security Consulting Services Limited + Lynn Grant + lynn.grant&adamscon.bz +54735 + Zeit, Inc. + Joe Cohens + info&zeit.co +54736 + Urenco + Chris Thomas + chris.thomas&urenco.com +54737 + Cardinality Ltd + Kevin Boateng + kevin.boateng&cardinality.co.uk +54738 + Corvid Technologies, LLC + Greg Lincoln + greg.lincoln&corvidtec.com +54739 + Sektion Allgäu-Kempten des Deutschen Alpenvereins e.V. + Wolfgang Doll + wodo&dav-kempten.de +54740 + RESPUBLIKA MALL + Dmitry Malyshok + imalyshok&gmail.com +54741 + ClubCorp USA, Inc + Robert Butler + network.team&clubcorp.com +54742 + Hangzhou JiuLue Technology Co., Ltd + zhengguodong + zhengguodong&jiuluetec.com +54743 + NVT Phybridge Inc. + Ian MacPherson + ian.macpherson&nvtphybridge.com +54744 + ShangHaiDieNian Information Technology Co., Ltd. + jason wu + weifeng.wu&7x-networks.com +54745 + Careerforce + Mark Lyons + mark.lyons&concepts.co.nz +54746 + Canoo Inc. + David Dai + david.dai&canoo.com +54747 + ABUS Security Center + Stuart Parris + s.parris&abus-sc.com +54748 + Ministarstvo unutrasnjih poslova Crne Gore + Tatjana Drobnjak + tatjana.drobnjak&mup.gov.me +54749 + Gulf payment company + Abdulwahab Almimony + aalmimony&gccsg.org +54750 + Vincit + Janne Rönkkö + janne.ronkko&vincit.fi +54751 + GrayRobinson, PA + Patrick Heisinger + patrick.heisinger&gray-robinson.com +54752 + Nicotech Int + Andrey Murav'yov + ant&nicotech.ru +54753 + Oxolutions BV + Olivier Olmer + olivier.olmer&oxolutions.nl +54754 + Optics #1, LLC + Dmitry Tsaregradskiy + projectmanager&rrh.world +54755 + Stad Sint-Niklaas + Leander Quintelier + ict&sint-niklaas.be +54756 + Signaltec + Vasily Sidyakov + vsidyakov&signaltec.ru +54757 + Ahgora Sistemas + Ricardo Antonio Pralon Santos + ricardo.pralon&ahgora.com.br +54758 + Schattenportal + Sven Koch + oid&schattenportal.de +54759 + shift GmbH + Tobias Kirchhofer + admin&shift.agency +54760 + Siemens Schweiz AG SSP + Alexander Friesen + alexander.friesen&siemens.com +54761 + Just Another Shop* BMI Productions*& BHP Corporation + Harmony Adams or Schindler + kty593130&gmail.com +54762 + Openfactory GmbH + Silvan Gebhardt + factory&openfactory.ch +54763 + Nous Technologies + Luis Cordova + luis.cordova&noustrack.com +54764 + Arilia + Stephane Bourque + stephane.bourque&arilia.com +54765 + Power supply production (PSP) + Amir Golbaz + crm&psp.ir +54766 + WondaLink Inc. + Tom Chen + tom.chen&wondalink.com +54767 + Software Service, Inc. + Masato Mori + masato_mori&softs.co.jp +54768 + 360 Vision Technology Ltd + Richard Eccleshall + richard.eccleshall&360visiontechnology.com +54769 + Think Technology + Murillo Brito Chaves + ti&tkth.com.br +54770 + pargareh system parseh + Siamak Danesh + info&pargarehsystem.com +54771 + ys4fun + dong yang + aiaoyang&outlook.com +54772 + Nuratech Networks LLC + Mukunda Haveri S + mukunda.haveri&gmail.com +54773 + Papierfabrik Niederauer Mühle GmbH + Markus Körner + m.koerner&niederauer-muehle.de +54774 + Renuka's Castle + Martin Edgar Furter Rathod + pen&renukascastle.co.in +54775 + Center for Medical Interoperability + Sumanth Channabasappa + sumanth¢er4mi.org +54776 + Dealex + PEN Admin + penadmin&dealex.fi +54777 + HP-Hrvatska pošta d.d. + Ivan Poldrugač + ivan.poldrugac&posta.hr +54778 + Hyerpu Tech Co. Ltd + Wang Xiaodong + xdwangchn&163.com +54779 + Schmithuysen TestLab-AG + Kai Schmithuysen + kai.schmithuysen&outlook.com +54780 + Virtual Power Systems + Roland Larocque + roland&virtualpowersystems.com +54781 + Griwes Homelab + Michał Dominiak + griwes&griwes.info +54782 + BackBox Software LTD + Rafi Zvi + rafi&backbox.com +54783 + Delta Networks(Xiamen) Inc. + linki.chen + linki.chen&deltaww.com +54784 + ELEKON, s.r.o. + Milan Lukáč + ml&mobatime.cz +54785 + Schweizerische Mobiliar Versicherungsgesellschaft AG + Reto Marugg + reto.marugg&mobi.ch +54786 + X Connections Ltd. + Mathijs Johannes de Hoog + mdehoog&x-connections.com +54787 + 24CARE + Matthijs Johannes de Hoog + Mdhoog&24care.nu +54788 + Collé Sittard Machinehandel B.V. + Ronald Bruyenberg + ict&colle.eu +54789 + MaineHealth + Internet Domain Registrar at MaineHealth + InetDomAdmin&mmc.org +54790 + Taler Systems SA + Florian Dold + dold&taler.net +54791 + dbck + Dennis Böckmann + iana-pen&dbck.de +54792 + Osprey Video, Inc + Jeremy Gerdes + support&ospreyvideo.com +54793 + Innovation Care Partners + Hazen Kor + hkor&icphealth.com +54794 + Stamhuis Bouwbedrijf B.V. + Joep van der Ben + j.vanderben&felton.nl +54795 + HiveMQ / dc-square GmbH + Christian Götz + info&hivemq.com +54796 + Department of Public Instruction - State of Wisconsin + Martin Dunn + martin.dunn&dpi.wi.gov +54797 + New England Hernia Center LLC + Laurie Duchesne + laurie.duchesne&lowellsurgical.com +54798 + Dalian Neusoft Education Technology Group Co., Ltd. + Fulong Sun + sunfulong&neusoft.edu.cn +54799 + SVD Gmbh + Richard Anzenberger + lizenzmanagement&svdgmbh.at +54800 + Apparent Inc. + Ani Matan + ani.matan&apparent.com +54801 + County of Kern + Hugh McDaid + mcdaidh&kerncounty.com +54802 + Software Colombia S.A.S. + Alex Chacon + admin&software-colombia.com +54803 + LLC Sphera Telecom + Michael Zingerman + kad&spheratele.com +54804 + Robot Industries + Asiri Dissanayaka + asiri&onsel.lk +54805 + Moshk co Ltd + Mohammad Davood Asgari + md.asgari&gmail.com +54806 + spy.de + Peer Diestelhorst + iana&spy.de +54807 + Truepic, Inc. + Jason Lyons + jason&truepic.com +54808 + NICE + Danny Eventov + ssl&nice.com +54809 + Linkomnia Limited + WONG WAN LEUNG + wanleung&linkomnia.com +54810 + Mike Bressem + Mike Bressem + mike&bressem.com +54811 + Virgil Security, Inc. + Dmitry Dain + ddain&virgilsecurity.com +54812 + Keramist Ltd + Yevhen Hlazunov + admin&keramist.com.ua +54813 + Thales Defense & Security, Inc. + Ian Anderson + EID_Contact&thalesdsi.com +54814 + GuaranteeWise Technology Corp. + Jack Tsai + jacktjc>ewise.com +54815 + Model Obaly a.s. + Zdenka Luzarova + zdenka.luzarova&modelgroup.com +54816 + TPC Group, LLC + Danny Corgiat + infraops&tpcgrp.com +54817 + netUP Pty Ltd + Armin Marxer + armin&netup.co.za +54818 + NanoTrix + Ondrej Smola + ondrej.smola&nanotrix.cloud +54819 + Confluera, Inc. + Abhijit Ghosh + abhijit&confluera.com +54820 + Precision Test Systems + Martyn Smith + martyn&ptsyst.com +54821 + EX4 Tech + Eka Puji Widiyanto + ekapujiw2002&gmail.com +54822 + NewVac, LLC + Steven Marchegiano + helpdesk&newvac-llc.com +54823 + NeiMengGu Shipeng Technology + Leo Gan + 10107289&qq.com +54824 + Khwahish Technologies private limited + Srinivasan Murugappan + msvraji&yahoo.com +54825 + Frimley Health NHS Foundation Trust + Andy Landsberg + fhft.cybersecurity&nhs.net +54826 + Cayonyx, LLC + Keith Sarbaugh + keith&cayonyx.com +54827 + Noblesse + Vitalii Rykunov + vitalii.rykunov&gmail.com +54828 + KCON IT Consulting + Philip Könighofer + office&kcon.at +54829 + Realia Technologies, S.L. + Rubén Blanco + ruben.blanco&realsec.com +54830 + Podiatry Associates of Lake County Inc. + Donald Farley + lakepodiatry&sbcglobal.net +54831 + SIFULAN Malaysian Access Federation + Muhammad Farhan Sjaugi + farhan&sifulan.my +54832 + TAISYS Technologies Co., Ltd. + Banton Chen + banton.chen&taisys.com +54833 + Kennedy Baptist College + Kennedy ICT Department + kennedyit&kennedy.wa.edu.au +54834 + Zentera Systems, Inc. + Michael Ichiriu + michiriu&zentera.net +54835 + PocoDicom + Robin Boerdijk + rhmboerdijk&gmail.com +54836 + Sterling Endeavours Ltd + Trevor North + twjnorth&sterlingend.co.uk +54837 + Green Peak Innovations + Steven Wojciechowski + swojciechowski&greenpeakinnovations.com +54838 + Treasury Intelligence Solutions GmbH + Markus Stepins + cloudit&tis.biz +54839 + Iridium Satellite LLC + Web Admin + webadmin&iridium.com +54840 + GLOBAL POWER DESIGN INC. + KADIR YILMAZ + INFO&GLOBALPOWERDESIGN.COM +54841 + GoGet AB + K.T. Goguev + kiril&gogetcorp.com +54842 + mcraindrop + Cornelius Moucha + peniana&raindrop.ws +54843 + Betriebszentrum IT-SysBw + Burkhard Drueke + burkharddrueke&bundeswehr.org +54844 + JABETTO FOR ESDM + Jack Waskito + jack&jabetto.com +54845 + Universitätsklinikum Augsburg + Max Netzheimer + sysadmin&uk-augsburg.de +54846 + Gateshead Health NHS Foundation Trust + Ian Slater + i.slater&nhs.net +54847 + POWER CONVERTER TECH + Kevin Chen + kevintsen&hotmail.com +54848 + Amzetta Technologies LLC + Arran Hsieh + arranh&amzetta.com +54849 + Loadbalancer.org + Malcolm Turnbull + info&loadbalancer.org +54850 + Teradyne, Inc + Tuyen Nguyen + tuyen.nguyen&teradyne.com +54851 + Nuts foundation + Wout Slakhorst + info&nuts.nl +54852 + KRYPTON Polska Sp. z o.o. + Artur Zięba-Kozarzewski + it&krypton-polska.com +54853 + Cypher Strategic + Val Shaikh + val.shaikh&cypherstrategic.com +54854 + Simplifia + Jean-Baptiste VERCRUYSSE + jb.vercruysse&simplifia.fr +54855 + Brigantia Learning Trust + Adam Kubica + akubica&brigantiatrust.net +54856 + Electro Design Pty Ltd + Ingo Losch + ingo&electrodesign.com.au +54857 + UAB "Affidea Lietuva" + Giedrius Frankas + giedrius.frankas&affidea.com +54858 + T-CZ a.s. + Michal Pecka + it&tcz.cz +54859 + GTI Technologies Inc + DANIEL BICHARA + DANIELB&FASTSENSOR.US +54860 + US Home Automation LLC + Neil Cherry + ncherry&ushomeautomation.com +54861 + Starr Companies + John Harte + john.harte&starrcompanies.com +54862 + Milano Engineering GmbH + Alexander Gräf + graef&milano.engineering +54863 + Geo Blue + Chris Baiocchetti + cbaiocchetti&geo-blue.com +54864 + PoEWit Technologies Inc + Dusan Jankov + dusan&poewit.com +54865 + Migräne-Klinik Königstein Verwaltungsgesellschaft mbH + Erik Schmoock + edv&koenig.crsg.online +54866 + Image Ingegration Inc + Cullen Jennings + fluffy&iii.ca +54867 + Morgan Properties + John Shelton + oid&morepropertymgmt.com +54868 + Onomondo + Henrik Aagaard + hello&onomondo.com +54869 + Blade Group + Erik Arfvidson + erik.arfvidson&blade-group.com +54870 + COMET-Group + Wolfgang Reimann / Robin Beismann + CGP-DG-IT-IANA&comet-group.com +54871 + Fundamental Research + Clinton James Rocksted + clint-fundamental&outlook.com +54872 + Laumer Vertriebs- und Verwaltungs GmbH + Benjamin Zettl + benjamin.zettl&laumer.de +54873 + Aggregate Solutions LLC + Ben Kwayisi + ben&aggsol.com +54874 + Exyte AG + Michael Titgemeyer + michael.titgemeyer&exyte.net +54875 + Civica + Tony Ditchfield + tony.ditchfield&civica.co.uk +54876 + IPI GmbH + Sabine Eberlein + edv&ipi.crsg.online +54877 + Implaneo Dental Labor Regensburg GmbH + Fatih Birinci + edv&labor-rgbg.crsg.online +54878 + Schonfeld + Anthony Cheng + acheng&schonfeld.com +54879 + MANTELECTRIC S.A. + Roberto Fernández + rf&mantelectric.com +54880 + Neil Stone + Neil Stone + neil.stone&gmail.com +54881 + Shine Corporate Ltd + IT Infrastructure + ITInfrastructure&shine.com.au +54882 + Fiserv GIOLabs + Fiserv Global Cyber Security Services GIOLabs + pki.services&fiserv.com +54883 + Keewin display Co. Ltd + Chaoyang Li + cy.Li&keewin.com +54884 + Tano Systems LLC + Anton Kikin + a.kikin&tano-systems.com +54885 + Sdrsystems.Net + Marvin Wolfthal + maw&weichi.com +54886 + Capacicom + Dan Peleg + danp&capacicom.com +54887 + Jamestown US-Immobilien GmbH + Jörg Schoppmann + joerg.schoppmann&jamestown.de +54888 + Premier Medical Concierge + Joseph Bandeira MD + bandeiramd&gmail.com +54889 + Walker IT Group LLC + Richard Reuling + iana-pen&walkeritg.com +54890 + Roechling Medical Lancaster, LLC. + Mike Harsh + oid-admin&roechling-medical.us +54891 + NTHU Library + Yen-Chih Chang + apricot&lib.nthu.edu.tw +54892 + HENZ ICT + Rene Hennequin + pen&henz.nl +54893 + Anord Mardix Inc. + Robert Lantzy + robert.lantzy&anordmardix.com +54894 + Pason Power + Michael Gavenonis + michael.gavenonis&pason.com +54895 + NorthEast Treatment Centers + Andrew Sather + it-ops&net-centers.org +54896 + 柳州达迪通信技术股份有限公司 (Liuzhou Dadi Communication Technology Co., Ltd.) + 谭婵媛 (Tan Yuanyuan) + dadi&lzdd.com +54897 + Antarctic Group + Andrew Willett + contact&antarcticgroup.com +54898 + bekeris.net + Algis Bekeris + algis&bekeris.net +54899 + B3E + Christoph Boehme + christoph&b3e.net +54900 + GuangZhou HungTa Intelligent Technology + Zhu Sir + daniao14&qq.com +54901 + Safe-Com Wireless LLC + Henry Wojtunik + henry&Safe-Comwireless.com +54902 + WIS International + Matthew Burr + netadmin&wisintl.com +54903 + Minim Inc. + Sam Stelfox + devops&minim.co +54904 + HCS Electronics + Hans Christian Stokka + hcstokka&hcs.no +54905 + EXISCOM SAC + VICTOR BOSLEMAN + vbosleman&exiscom.com +54906 + MEDSOFT + Armin Weiner + armin_weiner&hotmail.com +54907 + MET Norway + Christian Skarby + christian.skarby&met.no +54908 + Vivid Inc. + Keiei Tanto (IANA coordinator) + iana-org&vivid-inc.net +54909 + Altenergy Power System Inc. + Kevin Lu + kevin.lu&apsystems.cn +54910 + Beijing INNOVIS Technology Co., Ltd. + Wenli Si + wlsi&innovisgroup.com +54911 + Univa Corporation + William Bryce + bbryce&univa.com +54912 + XeL Technology Inc. + Wen Zhiquan + wen.zhiquan&xel-tech.com +54913 + Codarra Advanced Systems + Chris Hope + information&codarra.com.au +54914 + AP Pension + Steen Nyborg + sny&appension.dk +54915 + Glodia Corporation + Hiroyasu Watanabe + watanabe&glodia.jp +54916 + IDEMIA France + Benoit MOUROUX + benoit.mouroux&idemia.com +54917 + Northern Health & Social Care Trust + Kevin Duffin + ICT.DataCentreTeam&northerntrust.hscni.net +54918 + Solnet S.A + Liliana Correa + liliana.correa&solnet.com.py +54919 + Chengdu Dongfangshengxing Electronics Co.,Ltd + Lianghuai Feng + flhcd&163.com +54920 + Cancer Center Middle Georgia + Tina Parham + cparham&ccmgeorgia.com +54921 + Cohesion Data Corp. + Ella Lien + ella.lien&cohesiondata.com +54922 + Zing sp. z o.o. + Leszek Masłowski + leszek.maslowski&zing.com.pl +54923 + U&S Services, Inc. + Dan Robbins + usfsysadmin&usservicesinc.com +54924 + Oradat Systems + Chris Oradat + coradat&gmail.com +54925 + LaserAnimation Sollinger GmbH + Michael Sollinger + development&laseranimation.com +54926 + Southwest Reinsurance, Inc. + Shawn Jones + itprojects&southwestre.com +54927 + Burckhardt Compression AG + Van Khiem Quach + vankhiem.quach&burckhardtcompression.com +54928 + Aquarius Production Company + Technical department + lic-reg&aq.ru +54929 + Socialdemokraterna + Mattias Gökinan + mattias.gokinan&socialdemokraterna.se +54930 + The Bravo's + Matt Bravo + matt&thebravos.org +54931 + Duvel Moortgat NV + Ronny Van Den Broeck + ronny.vandenbroeck&duvel.be +54932 + Validated ID, SL + Fernando Pino Sola + tsp&validatedid.com +54933 + Joint Stock Company «Academician A.L.Mints Radiotechnical Institute» + Dmitriy V. Drozdovich + ddrozdovich&rti-mints.ru +54934 + Ministerio del Interior + Julian Giménez Moreira + gimenezmoreira&yahoo.es +54935 + Center for Cancer and Blood Disorders + Kurt Campbell + kurt.campbell&cancerandblood.care +54936 + Karma Computing + Christopher Simpson + enquiries&karmacomputing.co.uk +54937 + WiTech és Társa Kreatív Mérnöki Iroda Kft. + Witold Kuciński + kucinski.witold&witechkft.hu +54938 + ARDIS Solutions + Guillermo Daniel Euillades + guillermo.euillades&ardis.com.ar +54939 + Preh GmbH + Andreas Mendel + andreas.mendel&preh.de +54940 + GEA Group AG + Michael Knudsen + michael.knudsen&gea.com +54941 + Startup Stack LLC + Peter Linss + peter&startupstack.tech +54942 + Telecom Liechtenstein AG + Jürgen Hoellger + juergen.hoellger&telecom.li +54943 + CSCELECTRONIK + Rafael Robles + rafael.robles&cscelectronik.es +54944 + evolutionQ + Geovandro Pereira + geovandro.pereira&evolutionq.com +54945 + Yaskawa Solectria Solar + Aegir Jonsson + aegir.jonsson&solectria.com +54946 + Horizon Energy Group + Kevin Clark + Kevin.Clark&hegroup.nz +54947 + QuantiCor Security GmbH + Rachid El Bansarkhani + info&quanticor-security.de +54948 + Tielen Consultancy + Jeroen Tielen + jeroen&tielenconsultancy.nl +54949 + MPS Service + Felipe Amaral + felipeamaral&mpsservice.com.br +54950 + Lifetrack Radiology Systems + Brendan Rees + brendan.rees&lifetrackmed.com +54951 + The Ferguson Group + Joe Ferguson + ferguson&hm-cs.com +54952 + High Mountain Computer Services + Joe Ferguson + dnsadmin&hm-cs.com +54953 + Opgal Optronic Industries Ltd. + Oded Tubias + oded.t&opgal.com +54954 + IT LEARNING SLOVAKIA, s. r. o. + Michal Dobsovic + dobsovic&itlearning.sk +54955 + Coast Appliances + Support + support&coastappliances.com +54956 + Marais Consulting Limited + Francois Marais + francois&marais.co.nz +54957 + Sesto Sp z o.o. + Tadeusz Baranczyk + snmp&sesto.pl +54958 + Simpleway Holding a.s. + Matej Zachar + mzachar&simpleway.cz +54959 + ALKU + Patrick van den Akker + ict&alkugroep.nl +54960 + Wrigleys Solicitors LLP + Gavin Brining + itmanager&wrigleys.co.uk +54961 + Spectrum Enterprise + Mark Flynn + mark.flynn&charter.com +54962 + ManTech + John DeVight + John.DeVight&ManTech.com +54963 + The Pinnacle Group + Lynn Osburn + losburn&thepinnaclegroup.com +54964 + Sina Communication Systems + Hasan Hasani + pen-contact&sinacomsys.com +54965 + Hamilton Medical AG + Daniel Bombis + dbombis&hamilton.ch +54966 + I-Consulting Kft. + Attila László + support&iconsulting.hu +54967 + Schule fuer Informationstechnik der Bundeswehr + Max Friedrich + itsbws6projektausbn&bundeswehr.org +54968 + OCTE + ZAMANT Armand + azamant&octe.eu +54969 + ETON tech co.,Ltd + Austin wong + timeismoney2011&163.com +54970 + Stichting Nijmeegs Interconfessioneel ziekenhuis Canisius-Wilhelmina + Edwin Strik + e.strik&cwz.nl +54971 + ANZ Bank New Zealand Limited + Tony Arnold + nzsecurity&anz.com +54972 + SCUDOS Systems GmbH + Christoph Hoopmann + christoph.hoopmann&scudos.eu +54973 + Bren-Tronics, Inc. + Steven Chew + schew&bren-tronics.com +54974 + HOSPITAL GENERAL ENSENADA + Eugenio Ariel Chiusaroli Palacios + fatchucho&hotmail.com +54975 + Staclar, Inc. + Matthias Merkel + matthias.merkel&staclar.com +54976 + Hällefors Tierp Skogar AB + Erik Reis + erik.reis&htskogar.se +54977 + Mag-Audio + Sergei Germaniuk + sergei.germanuk&mag-audio.com +54978 + Norbrook Laboratories Ltd + David Winter + david.winter&norbrook.co.uk +54979 + iFreetion Technologies Inc. + Xinyu Miao + xinyu&ifreetion.com +54980 + Ethertronics an AVX Group, EDC division + Chinda Keodouangsy + Chinda.keodouangsy&avx.com +54981 + BY-SYSTEMS SPRL + BOUJRAF YOUSSEF + yboujraf&by-systems.be +54982 + Manitoba Hydro International + Lawrence Arendt + larendt&mhi.ca +54983 + Henan Raytonne Trading Company + QI Yanjie + support&raytonne.com +54984 + Boliden AB + Mathias Ignberg + mathias.ignberg&boliden.com +54985 + Timmins and District Hospital + Brandon Cook + bcook&tadh.com +54986 + TRIBUNAL SUPERIOR ELEITORAL + SESAP - Ivanildo Gomes + sesap&tse.jus.br +54987 + BV USA, LLC. + Robert Lu + yufen&bvsecurity.com +54988 + VT Group + Steve Leonhardt + steve.leonhardt&vt-group.com +54989 + Davitec + Ricardo David + rdavid&davitec.com.br +54990 + Kirkland & Ellis LLP + Kirkland IT + dnsadmin&kirkland.com +54991 + DIRAK GmbH + Zachary Klares + z.klares&dirak.com +54992 + Swisscom Health AG + Federico Marmori + Federico.Marmori&swisscom.com +54993 + Du thu + Nguyen Khanh Tiem + tiemnk&gmail.com +54994 + Kenstel Networks Limited + Dhruv Kansal + dhruv&kenstel.com +54995 + Institut Régional du Travail Social PACA et Corse + FAGE Joëlle + joelle-fage&irts-pacacorse.com +54996 + PRXN + Andreas Herr + noc&proxion.de +54997 + Yui Networks + Yui-Man Mak + noc&yui.network +54998 + Keramist + Evhen Hlazunov + glazunov&ukr.net +54999 + Uniview Europe B.V. + Liu Ding + liuding&univiewled.com +55000 + Local + Ali Zafar + ali.zafar1296&gmail.com +55001 + eSite Power Systems AB + Magnus Persson + magnus.persson&esitepowersystems.com +55002 + Shenzhen Putianan Network Technology Co., Ltd. + Bo Zhou + bobzhou&putianan.com +55003 + mapway + zhang jianshe + 15910868680&163.com +55004 + Bridge Alliance + Antoine CLOUE + antoine.cloue&bridgealliance.com +55005 + Wesley Clover Solutions - North America, Inc + Todd Rossi + engineering&wesleycloversolutions.com +55006 + AltexSoft, Inc. + Artem Shamraiev + artem.shamraiev&altexsoft.com +55007 + OST + Mark Mahoney + mark.mahoney&nnsa.doe.gov +55008 + BEXT Inc. + Luca Borgnetto + lucabo&bext.com +55009 + rueen system + Jamali Zahra + superior.intelligence.zj&gmail.com +55010 + Nacionalna sluzba za zaposljavanje Republike Srbije + Branka Krzman + Branka.Krzman&nsz.gov.rs +55011 + Theritory fund of obligatory medical insurance of Kemerovo region + Victor A. Tarasov + victor&kemoms.ru +55012 + Fourthline + Charlie Tyler + iana&fourthline.com +55013 + Bolicom Innovation Technology (Beijing) Co., Ltd. + Victor Zhao + zhaozhiwei&bolicom.cn +55014 + Private ptm-Akademie + Alexander Bark + alexander.bark&ptm.de +55015 + Healthdirect Australia + Tim Atkinson + security&healthdirect.org.au +55016 + The Office for Information technologies and eGovernment + Jovana Bujošević + jovana.bujosevic&ite.gov.rs +55017 + Lifecycle Software Ltd + xavier charruau + xavier.charruau&lifecycle-software.com +55018 + Cermaq Group AS + Yarash Agarwal + monitoring&cermaq.com +55019 + TCU Financial Group Credit Union + Dave Greenough + dgreenough&tcu.sk.ca +55020 + Netheos + Olivier Detour + contact&netheos.net +55021 + S.S. White Technologies Inc. + Dev Vakharia + dev&sswhite.net +55022 + Brick Token, S.L. + Edwin Mata + edwin&brickken.com +55023 + Beijing Orient View Technology Co., Ltd. + Andy Li + andy.li&ovtec.com +55024 + NIBE AirSite AB + Johannes Axelsson + Johannes.Axelsson&nibeairsite.se +55025 + RayPlus + Zhang Fang + 1251886521&qq.com +55026 + MORCTEST + Morten Christensen + dkmorc&gmail.com +55027 + Capsule Technologie SAS + Franck Gatto + fgatto&capsuletech.com +55028 + Occam Networks Limited + Chris Sibley + cs&occam.global +55029 + GARDENA GmbH + Pascal Brogle + pascal.brogle&husqvarnagroup.com +55030 + Michael W Lucas + Michael W Lucas + mwlucas&michaelwlucas.com +55031 + netadvance Kft. + Lóránth Csaba + loranth.csaba&netadvance.hu +55032 + South West London and St George's Mental Health NHS Trust + Lincoln Watson + lincoln.watson&swlstg.nhs.uk +55033 + Radix IoT, LLC + Kenneth Endfinger + Kenneth.Endfinger&radixiot.com +55034 + Apstra + Mansour Karam + snmp-questions&apstra.com +55035 + OuterShadows Group + Adam Wolfe + Adam.Wolfe&OuterShadows.com +55036 + SentrId + Edward Curren + ecurren&sentrid.com +55037 + Walker Information + Brian Kovacs + networkops&walkerinfo.com +55038 + PlusClouds + Harun Barış Bulut + baris.bulut&plusclouds.com +55039 + Farktronix + Jacob Farkas + snmp&rkas.net +55040 + Heissler Informatik + Jörn Heissler + iana-pen&heissler-informatik.de +55041 + Dept of Chemistry, University of Oxford + Pete Biggs + pete.biggs&chem.ox.ac.uk +55042 + Hutchison 3G UK Limited + Christopher Lee + christopher.lee&three.co.uk +55043 + Gruber Schanksysteme + Clemens Gruber + iana&pqgruber.com +55044 + Japan Communications Inc. + William Hamlin + whamlin&j-com.co.jp +55045 + One Network + Andy Ketskes + numbers&onenetwork.io +55046 + Neutralcom Information Technology Ltd. + William Luo + contact&neutralcom.com +55047 + Mirabito Holdings Inc + Brent Strignano + brent&mirabito.com +55048 + OOO LOTES TM + Aleksey Semenov + semenov&lotes-tm.ru +55049 + CBU Organization + Christophe Burlon + christophe.burlon&gmail.com +55050 + TuGo Insurance + Jon Coppin + jcop&tugo.com +55051 + Sure Universal Ltd + Chaim Green + chaim&sureuniversal.com +55052 + Darkness Reigns (Holding) B.V. + Terrence Koeman + terrence&darkness-reigns.net +55053 + Hubject Inc. + Jonel Timbergen + jonel.timbergen&hubject.com +55054 + C-Labs Corporation + Markus Horstmann + markus.horstmann&c-labs.com +55055 + Larmia Control AB + Jerker Svenningstorp + Jerker.svenningstorp&larmia.se +55056 + Istanbul link Haberlesma + Mostafa Ibrahim + mostafa.ibrahim.kamel&gmail.com +55057 + riverdawn + JULIAO DUARTENN + juliao&riverdawn.pt +55058 + VFocuz Limited + Wilson Choi + wilson&vfocuz.com +55059 + DICSIT INFORMATIQUE + Eric BESSON + eric.besson&dicsit.com +55060 + BPC processing + Sergei Levin + sbrf&bpcprocessing.com +55061 + Activehack Technology + Danny Hong + danny&activehack.com +55062 + QNAP Systems, Inc. + Y.T. Lee + ytlee&qnap.com +55063 + Greeneris Sp. z o.o. + Rafal Stas + biuro&greeneris.com +55064 + axelity ag + Thomas Moretti + t.moretti&axelity.com +55065 + Mavimax, ltd + Madars Vitolins + madars.vitolins&gmail.com +55066 + DataWorld.NET + Peter Wojciechowski + peter&dataworld.net +55067 + IDH Group Limited + Ronald Davidson + rdavidson&mydentist.co.uk +55068 + Corner Technical LLC + Justin Corner + justin&cornertechnical.com +55069 + Cerner Population Health Domain - CERN_PH + Britton Jennings + brittjennings&yahoo.com +55070 + Concepts Beyond + Ashley Kopman + akopman&conceptsbeyond.com +55071 + Integral LLC + Alexander Andreev + andreev_a&integralllc.ru +55072 + Tandem Consulting + Paul Biesbrouck + paul.biesbrouck&tandemconsulting.be +55073 + Rézo Rennes + Corentin Canebier + rezo&rez-rennes.fr +55074 + uGrid Network Inc + Eric Zhang + eric.zhang&ugridnet.com +55075 + Bluwale Technologies Inc. + qinglong zhang + sales&bluwaletech.com +55076 + UCLA Health ISS IGAM + Lucas Rockwell + lrockwell&mednet.ucla.edu +55077 + Sportfechten Laim e.V. + Matthias Brückner + m.brueckner&sportfechten-laim.de +55078 + MEDICODE + Philippe COLAZET + philippe.colazet&medicode.fr +55079 + Interrin + Andrey Shurygin + it&interrin.kz +55080 + Hochschule der Polizei des Landes Brandenburg + Daniel Sommer + daniel.sommer3&hpolbb.de +55081 + Expyram LLC + Mathew Arnold + consulting&expyram.com +55082 + Urlittle.biz S.A.S. + Sébastien GRENIER + sebastien.grenier&urlittle.biz +55083 + Findlay, Inc. + Connor Findlay + connor.findlay&gmail.com +55084 + PBC + Bashkim Selmani + info&pbc-ks.com +55085 + MAPER Tecnología S.R.L. + Diego Ismirlian + dismirlian&maper.com.ar +55086 + Alamo Analytics + Gerardo Colo + gerardo&alamo-analytics.com +55087 + PROPERTY MANAGEMENT SOFTWARE, SL + Chema Larrea + admin&rentger.com +55088 + Upsher-Smith Laboratories, LLC + Dustin Seagren + dustin.seagren&upsher-smith.com +55089 + Further System Co.,Ltd. + Ryuichi Saeki + secure-contact&furthersystem.com +55090 + BAMBOO INGENIERIA + CARLOS ENRIQUE + CGUERRA&BAMBOOINGENIERIA.COM +55091 + S.P.P.P. + Eric DEBRY + contact&grammeo.com +55092 + The Achievement Network + Reid Mulkey + rmulkey&achievementnetwork.org +55093 + JStyleTech + Josh Sutton + accounts&jstyletech.com +55094 + Sub Tech System + Kofi A. Ofori + support&subtechsys.net +55095 + Advanced Data Machines, LLC + Samuel Beskur + sam&advanceddatamachines.com +55096 + Beijing Qingqi Network Technology Co., Ltd + zhongping zhu + zhuzhp&126.com +55097 + SIGTUNA ÅKERI AB + Roger Järn + roger&sigtunaakeri.se +55098 + Wang Tai + ZhiLiang Han + hanzhiliang&wangtai-tech.com +55099 + Hunter Supply Chain Management (Shanghai) Co., Ltd. + Li, Bo + public&scmhunter.com +55100 + Chicony Power Technology Inc. + Leo Yang + Leo_Yang&chiconypower.com +55101 + RedWax Foundation + Dirk-Willem van Gulik + dirkx&redwax.eu +55102 + Pavo Tasarim Uretim Elektronik Tic. A.S. + Bilgi Islem + bilgiislem&pavotek.com.tr +55103 + Korkem Telecom + Sanat Baiguanysh + s.baiguanysh&otgroup.kz +55104 + Xorcom Ltd + Leonid Fainshtein + Leonid.Fainshtein&xorcom.com +55105 + Modern University for Business and Science + Dr. Bassem Kaissi + bkaissi&mubs.edu.lb +55106 + The Fedeli Group, Inc. + Ryan Milligan + RMilligan&thefedeligroup.com +55107 + Kalevi Kolttonen homepage + Kalevi Kolttonen + kalevi&kolttonen.fi +55108 + Riedo Networks Ltd + Adrian Riedo + info&riedo.com +55109 + Nusura, Inc + Kenneth Colwell + adsteam&nusura.com +55110 + Union College + Phillip Horn + techadmins&unionky.edu +55111 + Steinbeis-Zentren Unternehmensentwicklung an der Hochschule Pforzheim + Sascha Xander + edv&szue.net +55112 + Certes Networks Inc. + Sean Everson + iana-contact&certesnetworks.com +55113 + Covéa + Support + support.java&covea.fr +55114 + KORE Wireless + Dirk Baijens + dbaijens&korewireless.com +55115 + South Eastern H&SC Trust + Liam.Hudson + ictdatacentremanager&setrust.hscni.net +55116 + Chunghua Intelligent Network Equipment Inc. + Miranda Chen + miranda&cinet.com.tw +55117 + Beijing bravo technology Co.,Ltd + tao.li + taoi.li&bwsctv.com +55118 + Alkit Communications AB + Mathias Johanson + mathias&alkit.se +55119 + Docler Solutions Kft. + Gabriella Feind + feind.gabriella&doclersolutions.com +55120 + Encom wireless Data solutions + Mike Kwan + MikeK&EncomWireless.com +55121 + Nanjing Huastart Network Technology + He Yuxin + he.yuxin&huastart.com +55122 + abiliware GmbH + Artur Krueger + a.krueger&abiligroup.de +55123 + Statewide FCU + Statewide IT + itsupport&statewidefcu.org +55124 + JOSEPH F. ROBERTS, MD PA + ANGELIA EDWARDS + angelianurse&bellsouth.net +55125 + SALES PERITIAS Group + Miroslav Crnkovic + payment&salesperitias.com +55126 + Prolixium Enterprises, LLC + Mark Kamichoff + prox&prolixium.com +55127 + IsUnreal + Marcel Siemienowski + postmaster&isunreal.de +55128 + Mate Consulting srl + Gregorio Barberio + g.barberio&mateconsulting.it +55129 + Voxx International Corporation + Jeffrey R Heinssen + JHeinssen&voxxintl.com +55130 + Deutsche Telekom AG (GHS) + Jonas Kopp + jonas.kopp&telekom.de +55131 + Advanced Cancer Care of New Jersey PC + Hemangini Shah + hemishah&gmail.com +55132 + Thales Netherlands BV + Dennis Semmekrot + dennis.semmekrot&nl.thalesgroup.com +55133 + x007007007 + xingci.xu + x007007007&hotmail.com +55134 + GERAP + Alexandre Renard + arenard&gerap.fr +55135 + Cyberdyne Systems, Inc. + John Connor + mrexcess&gmail.com +55136 + Mako Networks + Chris Massam + chrism&makonetworks.com +55137 + Qubit Solutions Limited + Michael Kolias + systems&qubit.com.cy +55138 + Transtrands Besparingsskog + Fredrik Eriksson + fredrik.eriksson&transkog.se +55139 + AlsoEnergy, Inc. + Seymen Ertaş + sertas&alsoenergy.com +55140 + Internal Medicine LLC + Katherine Bird + Internalmedicinellc&gmail.com +55141 + CS SOFT a.s. + Jan Zetek + admin&cs-soft.cz +55142 + Kath. Marienkrankenhaus gGmbH + Norbert Groth + hotline.it&marienkrankenhaus.org +55143 + iXblue + Stéphane Meyer + stephane.meyer&ixblue.com +55144 + RKU + Lars Luennemann + pen&rku.de +55145 + is-jo.org Online Services + Markus Feiler + markus.feiler&is-jo.org +55146 + Uniti Fiber + Graham Wooden + graham.wooden&uniti.com +55147 + SSL-CERTS Limited + Abdulrahman Al-Dabbagh + aldabbagha&learntotechsolutions.com +55148 + Die Mompfdies - IT Consult + Raimund Pließnig + pki&die-mompfdies.at +55149 + IoT Advanced Control + Juan Olivieri + juanpabloolivieri&gmail.com +55150 + SecureG, Inc + Carlos C Solari + carlos&secureg.co +55151 + Kronnika + Baris Ala + info&kronnika.com +55152 + CODE100 SA + FABIAN ACOSTA + FACOSTA&CODE100.COM.PY +55153 + Thomas Wild + Thomas Wild + kontakt&thomas-wild.de +55154 + Octabase + Mehmet Gürevin + hello&octabase.com +55155 + Novel-SuperTV Tech. Co. + Famin Ke + kfm&novel-supertv.com +55156 + Christian County Library + IT Department + tech&christiancountylibrary.org +55157 + MetroMail Ltd + Lee Barwick + lee.barwick&metromail.co.uk +55158 + ift Rosenheim GmbH + Lillyan Burhan + pki&ift-rosenheim.de +55159 + Caritasverband für die Diözese Osnabrück e.V. + Ulrich Sander + verwaltung-edv&caritas-os.de +55160 + dentaltrade GmbH + Rainer Zoppke + edv&dentaltrade.crsg.online +55161 + Expobank CZ a.s. + ITinfrastructure team + itinfra&expobank.cz +55162 + Cello Communications Ltd + Jonathan Holt + Jonathan.holt&cello.co.nz +55163 + Disk Archive Corporation + Andrea Pedretti + andrea.pedretti&diskarchive.com +55164 + Saltant Solutions LLC + John W. O'Brien + jobrien_pen&saltant.net +55165 + Nova Measuring Instruments + Yaniv Luria + admin&novami.com +55166 + 1NAR BILISIM DANISMANLIK EGT. SAN. VE TIC.LTD.STI. + SERKAN BEKTAŞ + serkan.bektas&1nar.com.tr +55167 + Aonta Technologies Ltd + David Seavers + david.seavers&aonta.com +55168 + AS/point GmbH + Wilhelm Krahe + wkrahe&aspoint.de +55169 + RxRevu Inc + Ritchie Latimore + ritchie.latimore&rxrevu.com +55170 + P&W Netzwerk GmbH & Co. KG + Ulrich Puschmann + technik&puw-netzwerk.de +55171 + IP Kulikov V.I. + Vladislav Kulikov + v&snkr.ru +55172 + PRISTALICA + Ramon Perez Silva + ramon&pristalica.com +55173 + Timbatec Holzbauingenieure Schweiz AG + Urs Fluekiger + urs.fluekiger&timbatec.ch +55174 + LPS France + Kevin Le Perf + kevin.leperf&nonopn.com +55175 + Hans Sasserath GmbH & Co. KG + Markus Krautner + it-Registrierung&syr.de +55176 + Hitachi Industrial Equipment Systems Co.,Ltd. + Tasuku Suzuki + suzuki-tasuku&hitachi-ies.co.jp +55177 + MOS Corporate Services GmbH + Felix Hafner + domain&oechsler.com +55178 + OECHSLER AG + Felix Hafner + domain&oechsler.com +55179 + OECHSLER Motion GmbH + Felix Hafner + domain&oechsler.com +55180 + Säters Kommun + Johan Lonnberg Drake + it&sater.se +55181 + GCHQ + Sam Smith + software&gchq.gov.uk +55182 + Sunberry Systems Ltd + Dita Gabalina + dita.gabalina&gmail.com +55183 + New Telecommunication Technologies LLC + Victor Lee + teraser&mail.ru +55184 + NetSpider GmbH + Daniel Fehse + daniel.fehse&netspider.ch +55185 + Prophecy International + Leigh Purdie + iana&prophecyinternational.com +55186 + Flightkeys GmbH + Martin Sponner + martin.sponner&flightkeys.com +55187 + Regional Cancer Care Associates LLC + iuliana Shapira + ishapira®ionalcancercare.org +55188 + Bourne Leisure Ltd + Dave Newton + itinfrastructure&bourne-leisure.co.uk +55189 + neuw + Karanbir Singh + krnbr&live.in +55190 + IoT Open One AB + Marcus Rejås + marcus.rejas&iotopen.se +55191 + PT. E-T-A Indonesia + Darmaji Darmaji + darmaji&e-t-a.co.id +55192 + COMMERCIAL BANK VRB (LLC) + Sergey Konnov + sergey.konnov&vrbmoscow.ru +55193 + ELECTRONIC IDENTIFICATION, S.L. + CRISTINA ROMERA SOTO + ex_cristina.romera&electronicid.eu +55194 + AmjadPlastics Ibrahim Amjad MD + Ibrahim Amjad + manager&driamjad.com +55195 + Base Telco + Rodrigo Rocha + rodrigo.rocha&basetelco.com +55196 + Direção Regional da Saúde do Governo Regional dos Açores + Igor Lima de Azevedo + Igor.L.Azevedo&azores.gov.pt +55197 + Wilhelm Geiger GmbH & Co.KG + Patrick Wagner + webmaster&geigergruppe.de +55198 + Folino Enterprises + Nick Folino + nick&folino.us +55199 + Modern Teaching Aids + Sachin Nand + snand&modernstar.com +55200 + Pioneer Long Distance, Inc. + Donnie Miller + sysadmin&pldi.net +55201 + PROTAHUB Uluslararasi Bil. ve Ilet. Tek. Arge Dan. Des. Paz A.S. + Irfan Kurtulmuslar + iana&protahub.com +55202 + Hainan Zhongming Technology Co., Ltd + Chang Feng + changfeng&xiaoyetech.com +55203 + NetX Networks a.s. + Tomas Podermanski + info&netx.as +55204 + Blackpool Council + Richard Percival + richard.percival&blackpool.gov.uk +55205 + Stura University Leipzig + Eric Mieth + eric.mieth&stura.uni-leipzig.de +55206 + PROMETEON TYRE GROUP S.r.l. + Remzi Ejderoglu + postmaster&prometeon.com +55207 + Uralenergotel, LLC + Vadim Voronov + voronov-va&uetel.ru +55208 + Lviv municipal academic theatre arts research and educational centre Word and Voice + Natalia Polovynka + slovo.i.golos&gmail.com +55209 + Town of Groton + Brian Hancock + it_helpdesk&groton-ct.gov +55210 + Alohanet + Steven Lei + 122014493&qq.com +55211 + dpco + Ali Rahmanian + rahmanian&dpco.net +55212 + IT Gesellschaft für Informationstechnik mbH + Klaus Gütter + klaus.guetter&it-gmbh.de +55213 + KAMPO + Jin Rong Fei + jinrf&kampo.cn +55214 + Smile America Partners + Charles Pippin + cpippin&mobiledentists.com +55215 + Stadt Kempten + Peter Augustus + iana&kempten.de +55216 + Rotom Nederland + Rens van de Paal + Support&rotom.eu +55217 + Zyston LLC + Alexis Conlin + aconlin&zyston.com +55218 + Drive and Shine + John Byers + jbyers&driveandshine.com +55219 + Sirona Medical + David Paik + dicom&sironamedical.com +55220 + Franklin Fastener Co + Ted Hayes + certificate&franklinfastener.com +55221 + Debarry Correa Ltda + Christiano Debarry + debarry&debarry.com.br +55222 + Actindo AG + Patrick Prasse + hostmaster&actindo.com +55223 + TAHOE + Piotr Kaczmarzyk + piotr&tahoe.pl +55224 + Capgemini CSD + DENIS Philippe + philippe.denis&capgemini.com +55225 + T24 Competence Centre sp. z o.o. sp. k. + Pawel Piaskowy + office&t24cc.com +55226 + SAP Ariba + Thomas Donnelly + thomas.donnelly&sap.com +55227 + PLINTRON Mobility Solutions Private Limited + Mr. Prakash Kumar Senapati + networks&plintron.com +55228 + China Telecom Global Limited + Gary Kam + garykam&chinatelecomglobal.com +55229 + Information Center, Shanghai Municipal Education Commission + Qi Feng + its&cloud.sh.edu.cn +55230 + RIXCAT SIA + MARIS GABALINS + maris.gabalins&rixcat.lv +55231 + Zwilling J.A.Henckels AG + Björn Kaleck + bjoern.kaleck&zwilling.com +55232 + BIOCEV + Filip Horák + itdesk&biocev.org +55233 + Stichting De Opbouw + Marc van der veer + systeembeheer&opbouw.nl +55234 + Payukotayno James and Hudson Bay Family Services + George Sutherland + it.servicedesk&payukotayno.ca +55235 + Jeff Wyler Automotive Family, Inc. + Brian Hoffman + bhoffman&jeffwyler.com +55236 + NETFLOWBROADBAND PVT LTD + Tausif Shaikh + tausif.tss&gmail.com +55237 + conpal GmbH + Ralf Engers + rmengers&conpal.de +55238 + Connecta Sp. z o.o. + Piotr Kaczmarzyk + piotr&connecta.pl +55239 + Cryptable BVBA + David Tillemans + david.tillemans&cryptable.org +55240 + Encode + Andreas Skylitsis + postmaster&encodegroup.com +55241 + Alta Rail Technology + Luiz Henrique Duma + luiz.duma&alta-rt.com +55242 + Rick Stroobosscher + Rick Stroobosscher + rick.stroobosscher&gmail.com +55243 + COMPESO GmbH + Harald Paulus + HPaulus&compeso.com +55244 + Fast Object LLC + Henry Zhou + henryzhou&fastobject.net +55245 + Capital Region Orthopaedics Associates + Tina Hakala + thakala&caportho.com +55246 + DIAXE + Adamantios Kyriakakis + dk&diaxe.com +55247 + Working Group Two + Erlend Prestgard + abuse&wgtwo.com +55248 + Treetop Innovation AB + Håkan Andersson + hakan.andersson&treetop.se +55249 + S.E.D. - A GPL Smart Embedded Devices + Chinda KEODOUANGSY + chindakeodouangsy&gmail.com +55250 + MOBIWEB + Diamantis Kyriakakis + diamantisk&solutions4mobiles.com +55251 + Guangzhou Navigateworx Technologies Co., Ltd + Frank Chen + support&navigateworx.com +55252 + Notesco Financial Services Limited + Socrates Socratous + sisocratous¬esco.com +55253 + brunner & gasser ceTec + Erich Gasser + erich.gasser&elektro-gasser.ch +55254 + Broadvine + Yury Nemchin + ynemchin&broadvine.com +55255 + RELOPS LTD + Ben Hood + ben&relops.com +55256 + Veon Georgia LLC + Iuri Tigiev + ytigiev&beeline.ge +55257 + Lightcyphers + Anatolie GOLOVCO + anatolie.golovco&lightcyphers.com +55258 + Harper Creek Community Schools + Jim Maynard + maynardj&harpercreek.net +55259 + Opus Holding, L.L.C. + Ken Stieers + ken.stieers&opus-group.com +55260 + TROPHY ELECTRONICS LLC + Andrii Tulparov + andrej.tulparov&gmail.com +55261 + Rosenberger Technologies Co., Ltd. + Rita Wang + wangdongxue&Rosenbergerap.com +55262 + Rosenberger Asia Pacific Electronic Co., Ltd. + Rita Wang + Rita&rosenbergerap.com +55263 + Haier Uplus Intelligent Technology (Beijing) Co., Ltd. + Betty Zhao + zhaomu.uh&haier.com +55264 + The Gnou Community + Sebastien Boulay + sebastien.boulay&the-gnou-community.net +55265 + RKU + Lars Luennemann + information&rku.de +55266 + SydIT + Stefan Midjich + stefan.midjich&gmail.com +55267 + Zeel Infotech Pvt. Ltd. + Rupesh Sangoi + rupesh&zeel.co.in +55268 + BEDIA Motorentechnik GmbH & Co.KG + Della Volpe Giuliano + dellavolpe&bedia.com +55269 + Datareon + Olga Kakutina + o.kakutina&datareon.ru +55270 + Fuel Business Intelligence SA + Diego Tondo + diego.tondo&cintelink.com.ar +55271 + GoodWe Technologies Co., Ltd. + James Chen + james1&goodwe.com +55272 + Glory Technology Service Inc. + Jackson Chen + jackson.chen&glory-tek.com +55273 + AROY System Pvt. Ltd. + Arnabendu Roy + aroy.systems&gmail.com +55274 + FlexSCADA + Jon Mundall + Jon&comcomservices.com +55275 + AVAG Holding SE + Mario Borowka + technik&avag.eu +55276 + EUROSIGN + Emmanuel MATHIEU + emmanuel&eurosign.com +55277 + Liberty HR Recruitment Ltd + Jane Barry + jane&libertyhr.co.uk +55278 + MartTrend + Renato Sapienza + renato.sapienza&marttrend.com.br +55279 + Cygna Labs Corp. + David Ruginski + dave&cygnalabs.com +55280 + BFG Supply Co + Doug Wolfe + dwolfe&bfgsupply.com +55281 + EyeConsultantsPC.com eyeoptics.net ECEO.us + Robert Townley + robert&eyeconsultantspc.com +55282 + Dr. Pfau Fernwirktechnik GmbH + Andreas Pfau + a.pfau&alarmmanagementsystem.com +55283 + OPER8 Global lImited + SIMON GARDNER + Simon.gardner&oper8global.com +55284 + kganyaoscar + oscar kganyago + oscarpapie1&gmail.com +55285 + Hessel Ventures, LLC + Michael Hessel + HESSELVENTURES&gmail.com +55286 + dsb it services GmbH + Dirk Merten + support&dsb-its.net +55287 + Banking Circle + Michael Vedel + IToperations&bankingcircle.com +55288 + allRadio + yoo seongsook + yss6390&allradio.co.kr +55289 + Beijing Yutian Technology Co., Ltd. + Shi Zhenyong + shizy&yutiantech.com +55290 + Smart Skog AB + Alexander G Haaland + alexander.g.haaland&gmail.com +55291 + Arrival Limited + Andrey Kostin + admin&arrival.com +55292 + SC ADI COM SOFT SRL + Cosmin Prund + cosmin.prund&adicomsoft.ro +55293 + Candisa Systems + Luis Sayago + luis&candisa.com +55294 + Blair Foot and Ankle LLC + Bridget Corey + bridgetcorey85&gmail.com +55295 + IDS Airnav s.r.l. + Daniele Lenzo + daniele.lenzo&idsairnav.com +55296 + Hillsboro Orthopedic Group, Inc. + Karen Rothstrom + krothstrom&hillsorthogroup.com +55297 + XueHao Science And Technology Company Limited + JiangXueHao + RDCenter&xuehao.email +55298 + Redslareds Skogstransporter HB + Lars Larsson + lars&skogstransporter.se +55299 + NTM Oy Ab + Johannes Heikkinen + johannes.heikkinen&ntm.fi +55300 + Abies Technology Inc. + YS Chen + sales&abiestech.com +55301 + PROMIS JSC + Pavel Konnov + konnov_p&promis.ru +55302 + Neovox + Matvey Solodovnikov + m.solodovnikov&neovox.ru +55303 + Varnamo kommun + Peter Sjolin + peter.sjolin&varnamo.se +55304 + The Wey Valley Academy + Mr Alexander Tibbey + tibbeya&weyvalley-academy.co.uk +55305 + NGMEDIA + FREDERIC CLEMENT + fclement&n-g-media.com +55306 + SafeKiddo Sp. z o.o. Sp.k. + Marcin Marzec + marcin.marzec&safekiddo.com +55307 + Czech Pirate Party + Technical Department + domeny&pirati.cz +55308 + ixecloud + Richard Su + lei.su&ixecloud.com +55309 + Sony Interactive Entertainment, LLC + Sean Ryan + sean.ryan&sony.com +55310 + Infocare Healthcare Services (Irl.) Ltd. + Sean McLaughlin + sean.mclaughlin+OID&infocarehealth.com +55311 + Facultad de Ciencias Médicas - UBA + Fernando Broqua + fbroqua&fmed.uba.ar +55312 + Kerr, Russell and Weber, PLC + Rebecca Lohr + KRWaccounts&kerr-russell.com +55313 + Compelling Technologies, LLC + OID Administrator + info&compellingtech.com +55314 + LLC NPO "Svyazkomplektservis" + Anton Dundukov + a.dundukov&skss.ru +55315 + Auerswald GmbH & Co. KG + Mark Rodehorst + oidmaster&auerswald.de +55316 + ROBERT DANIEL BUONOCORE + ROBERT DANIEL BUONOCORE + Tynas5210&gmail.com +55317 + Lely Holding BV + Jeroen Kulk + security&lely.com +55318 + X2 + Andrej Ramašeuski + andrej&x2.cz +55319 + Niagara Falls Memorial Medical Center + Peggy Grandinetti + peggy.grandinetti&nfmmc.org +55320 + pingdash AB + Pontus Engblom + noc&pingdash.se +55321 + Loralab + Serik Tuleshev + loraiotlab&gmail.com +55322 + Núcleo de Comunicaciones y Control, S.L. + Miguel Angel Pastor Benavent + miguel.pastor&nucleocc.com +55323 + Y20 Works + Cheyin Liao + y20w&yinyin.info +55324 + SCION Association + Matthias Frei + matzf&scion.org +55325 + Komprise + Kumar Goswami + kumar.goswami&komprise.com +55326 + State University of New York College of Technology at Delhi + Scott May + maysa&delhi.edu +55327 + SOS International LLC + Daniel Bizon + domainPOC&sosi.com +55328 + Tatweer + Houssam Melhem + houssam.melhem&tatweer.sy +55329 + MIC Global Services + Zak Rodriguez + zak.rodriguez&micglobalservices.com +55330 + adKor GmbH + Hartmut Kordus + hartmut.kordus&adkor.de +55331 + Noroutine + Oleksii Khilkevych + oleksiy&noroutine.me +55332 + PKI Solutions Inc + Mark B Cooper + mark&pkisolutions.com +55333 + Nanjing Buruike Electronics Technology Co., Ltd. + Weihua Tian + support&brickelectric.com +55334 + Digivalet + Priyank Shah + priyank&digivalet.com +55335 + yantaijiandianziCo.,Ltd. + YongLiang Li + happyboy036&163.com +55336 + Real Time Data Pty Ltd + Caleb Froese + calebf&teamrtd.com +55337 + RadioLab Srl + Stefano Nardo + stefano.nardo&radiolabonline.com +55338 + SMOK spółka z ograniczoną odpowiedzialnością + Piotr Maślanka + pmaslanka&smok.co +55339 + OptimERA Inc + Jester Purtteman + jester&optimerainc.com +55340 + Xware innovations + Ryan Mullenax + Ryan&xwinn.com +55341 + LA SAI + Olivier Darlot + o&lasai.fr +55342 + My Kidney Care L.L.C. + Meagan Strider + mstrider&mykidneycarellc.com +55343 + EZY-IO INC + Hui Huang + hui&ezyio.com +55344 + Basein Networks, Inc. + Donghoon shin + shin&basein.net +55345 + CMSS + R Keith Beal + r&cmss.com +55346 + Corporation Limited + Yichen Lu + pen&corpltd.net +55347 + HENRIETTA Piotr Maślanka + Piotr Maślanka + piotr.maslanka&henrietta.com.pl +55348 + Back Alley Labs + Craig Glaser + gr-admin&glaserrocks.com +55349 + Sipartech + Barriquand Paul + paul.barriquand&sipartech.com +55350 + Siemens Healthcare GmbH (SHS CS SLM SRS RSO) + Klaus Reinelt + asd.team&siemens-healthineers.com +55351 + Core Security, A HelpSystems Company + Pablo A. Zurro + pablo.zurro&helpsystems.com +55352 + Monty UK Global Ltd. + Farouk Tabbal + farouk.tabbal&montymobile.com +55353 + Ackerman Oil Co., Inc + Jeff Strother + support&ackoil.com +55354 + Allmon Technologies LLC + Anthony Allmon + allmonaj&gmail.com +55355 + VPM Media Corp + Austin Wright + awright&vpm.org +55356 + Huskey Truss & Building Supply + Poindexter Truss + PTruss&HuskeyTruss.com +55357 + Lakeside Robotics Corporation + Michael Sweet + msweet&lakesiderobotics.ca +55358 + Macali + Manuel Castineira + macalix&gmail.com +55359 + Huan's Software + Huan Wang + fredwanghuan&gmail.com +55360 + Brain Updaters + Pau Roura + pau&brainupdaters.net +55361 + Alphageek Systems + M. A. Walter + mike.walter&alphageeksystems.co.uk +55362 + Softronics Ltd. + Eric Ratliff + EricR&softronicsltd.com +55363 + TSGT.coop + Jerry Xiong + jxiong&tristategt.org +55364 + Groupement Belge des Graphothérapeutes asbl - GBGT asbl + Tramasure Sylvie + courriergbgt&gmail.com +55365 + Shanghai Mission Information Technologies (Group) Co., Ltd + Hui Li + lihui2&missiongroup.com.cn +55366 + TopBuild + IT Department + serversupport&topbuild.com +55367 + Six Degrees Technology Group Limited + Guven Mucuk + guven.mucuk&6dg.co.uk +55368 + Applied Concepts, Inc. + Russell Kautz, CTO + russell&a-concepts.com +55369 + L.I.S. + Sylvain MADERA + smadera&lis47.fr +55370 + Rockport Networks Inc. + Azim Jaffer + ajaffer&rockportnetworks.com +55371 + Mercadolibre + Fernando Russ + fernando.russ&mercadolibre.com +55372 + Companhia de Tecnologia e Sistemas de Minas Ltda. + Thiago Moura + admin&hills-tech.com +55373 + SDS System Design Service + John Jiang + jchiang&sds-tw.com +55374 + ARTIDIS AG + Philipp Oertle + philipp.oertle&artidis.com +55375 + blue:solution software GmbH + Rudolf Melching + ddenk&bluesolution.de +55376 + wocu-monitoring + Wocu Ops + wocu-ops&a3sec.com +55377 + Kaufhaus Martin Stolz GmbH + IT-Administration + iana&kaufhaus-stolz.network +55378 + Haws Corporation + IT Directory + hawsoid&hawsco.com +55379 + Bellarmine University + Shawn Snapp + ssnapp&bellarmine.edu +55380 + Nettrix Information Industry Co., Ltd. + "Yong" "Chen" + chenyonga&nettrix.com.cn +55381 + Omniscient Neurotechnology + Hugh Taylor + hugh.taylor&omni-neuro.com +55382 + Cheshire and Wirral Partnership NHS Foundation Trust + Aidan Chesworth + cwp.ictservicedesk&nhs.net +55383 + Intv Prime + Contact Executive + contact&intvprime.com +55384 + iCrypto Inc. + Adarbad Master + adarbad&icrypto.com +55385 + Tecnitia Servicios TIC S.L. + Diego Almagro + diego.almagro&tecnitia.com +55386 + NI Blood Transfusion Service + David Moore + itadmin&nibts.hscni.net +55387 + Tenon GmbH + Sebastian Böhm + office&tenon-backup.com +55388 + Frank Senkel + Frank Senkel + info&frank-senkel.de +55389 + Prodia Widyahusada + Lydwina + lydwina&prodia.co.id +55390 + wallbe GmbH + Oliver Duerr + oliver.duerr&wallbe.de +55391 + Ray Pte. Ltd. + Hemal Patel + registrations&ray.life +55392 + Institut für Arbeitsforschung - IfADo + Markus Konhoff + edv&ifado.de +55393 + Sentrica + Juan Jose Sierralta + juanjo.sierralta&sentrica.cl +55394 + Personalized Imaging Consultants + Wesley Falconer + wesleyf&ccsreno.com +55395 + Elektronika Krejan + Mihael Krejan + info&krejan.si +55396 + Painkiller AS + Thommy Mikkelsen + tm&painkiller.no +55397 + Jiangsu ZhongAnZhiXin Communication Technology Co., Ltd. + zhangsenfei + 15850200276&china-ssc.com +55398 + Jiangsu ZhongAnZhiXin Communication Technology Co., Ltd. + zhangsenfei + 15850200276&china-ssc.com +55399 + Technology Solutions Midwest + Scott Wells + scott.wells&tsmidwest.com +55400 + Secheron S.A. + Ferran Arumi + ferran.arumi&secheron.com +55401 + ENALISS GmbH + Ray Singel + help&enaliss.com +55402 + New Impulse 50 + IT department + it&utkonos.ru +55403 + News7haridwar + Purushotam Kumar + News7haridwar&Gmail.com +55404 + celerway + Lukasz + l.baj&celerway.com +55405 + Western Health and Social Care Trust + Paul McNulty + paul.mcnulty&westerntrust.hscni.net +55406 + Alstria office REIT-AG + Jan Burk + jburk&alstria.de +55407 + INFOLOGIC + Stephane REVOL + dsi&infologic.fr +55408 + American Health Network + Jay Williams + jay_williams&ahni.com +55409 + G-Way Solutions, LLC + Tsvika Blekher + t_blekher&gwaymicrowave.com +55410 + Salvatores Mundi + Paul Ciarlo + paul&salvatoresmundi.com +55411 + Marcus Corporation + Craig Glaser + serveradmin&marcuscorp.com +55412 + EDGEMATRIX, Inc. + Takenori Sato + tsato&edgematrix.com +55413 + SCC Data Systems + Bill Mitchell + sccdatasystems&gmail.com +55414 + Schüchtermann Schiller'sche Kliniken Bad Rothenfelde + IT-Infrastruktur Team + Infra&schuechtermann-klinik.de +55415 + WEG S.A. + Richard Heller Baeumle + richardhb&weg.net +55416 + Selcraft, Ltd. + Alexander Kefely + avkefely&selcraft.ru +55417 + solutio IT- und mediendienste + Carsten Weisgerber + c.weisgerber&solutio-it.com +55418 + EnergyHub, Inc. + Max Whitney + techadmin&energyhub.net +55419 + Archway School + James Moth + jamesmoth&archwayschool.net +55420 + WellSky + Josh Clark + josh.clark&wellsky.com +55421 + BartzHouse.com + Jason Bartz + jason.bartz.gr&gmail.com +55422 + Bartz.io + Jason Bartz + jason.bartz.gr&gmail.com +55423 + CGL Consolidated Ventures, LLC + Directory Registrar + registrar&directory.cargoio.com +55424 + «Laboratoriya Infocommunikatsionnykh Setey» LLC + Grigoriy Drubetskiy + drubetskiy&labics.ru +55425 + NPK STG LLC + Anton Plastinin + avp&npkstg.ru +55426 + Moelven SvT + Kjell Persson Granö + kjell.persson-grano&moelven.se +55427 + Stiftelsen för Internetinfrastruktur + Jan Säll + sfg&internetstiftelsen.se +55428 + HanPu Co. Ltd + Qiang Li + 18986492380&189.cn +55429 + HuBei Polytechnic Institute + Qiang Li + 18986492380&189.cn +55430 + SecureCloud+ Limited + James Booth + james.booth&securecloudplus.co.uk +55431 + A.J. Advanced Equipment + Anthony Luciano + anthony.luciano710&gmail.com +55432 + Tar Techcert Soluções em Meio Digital Ltda + Cesar Murilo Batista Vieira + cesar.murilo&techcert.com.br +55433 + Bank Ochrony Środowiska + Tomasz Miazga + bezpieczenstwo&bosbank.pl +55434 + Liberty Diversified International + Joshua Payeur + joshuapayeur&libertydiversified.com +55435 + Aetheros + Will Bell + will&aetheros.com +55436 + Solax Power Network Technology(Zhejiang) + GuoHuawei + guohuawei&solaxpower.com +55437 + Erste&Steiermärkische Bank d.d. + Marina Baric + mbaric3&erstebank.hr +55438 + Krizik Malaysia Sdn Bhd + Mohd Ilham Zulkifli + ilham&kmsb.com.my +55439 + GPlusMedia Inc. + Sébastien Gallet + websecurity&gplusmedia.com +55440 + SQUALIO + Aleksandrs Frolovs + Aleksandrs.Frolovs&squalio.com +55441 + Radius Elnet + Søren Steenslev + sstee&radiuselnet.dk +55442 + Cubic Mission Solutions + Nicholas Podolak + dtech.support&cubic.com +55443 + sonnen, Inc. + Gaurav Shah + g.shah&sonnen-batterie.com +55444 + Sandpolis + Tyler Cook + tcc&sandpolis.com +55445 + Matthieu TIRELLI + Matthieu TIRELLI + matthieu.tirelli&gmail.com +55446 + Tandarts M.S. van der Linden + Mark S. van der Linden + mark&tandartsvanderlinden.nl +55447 + dimensions11.net + z. dim + root&dimensions11.net +55448 + NETROX SYSTEMS s.r.o. + Milan Kundrát + info&netrox.sk +55449 + MangoTelecom + Nikita Mashko + sysadm&mangotele.com +55450 + IVECloud (Pty) Ltd + Donovan Van Der Merwe + donovan&ghsweb.co.za +55451 + Enable-U B.V. + Tomas Liem + it&enable-u.com +55452 + JSC "AGROBANK" + Meleshin Nikolay Evgenyevich + nikolay&agrobank.uz +55453 + Sprecher Brewing Co. Inc. + Tim Wright + sprecherbr&sprecherbrewery.com +55454 + Elgama-Elektronika + Robertas Matusa + info&elgama.eu +55455 + dualstack AG + Stefan Marti + contact&dualstack.ch +55456 + Ochsner Clinic Foundation + Glen Picadash + glpicadash&ochsner.org +55457 + InfiniGold Operating Pty Ltd + Jelte van der Hoek + jelte.vanderhoek&infinigold.com +55458 + Ajay Software + Ajay Bommena + ajaybommena2010&gmail.com +55459 + 成都七维频控科技有限公司 (Chengdu Qiwei Frequency Control Technology Co., Ltd.) + CUI BAOJIAN + cuiyige2005&126.com +55460 + AKDB-OSRZ + Benedikt Lutz + service.outsourcing&akdb.de +55461 + UK Grid Solutions Limited + Zhi Shi + zhi.shi&ge.com +55462 + White Box Networks + Paul Fleming + pfleming&wyltk.com +55463 + Studio XP + Keven Chausse + keven.chausse&studioxp.ca +55464 + SYNVERSO d.o.o + Dalibor Franjić + dalibor&synverso.com +55465 + Viktora Medizintechnik GmbH + Christian Gundermann + gundermann&viktora-medizintechnik.de +55466 + GLVI Gesellschaft für Luftverkehrsinformatik mbH + Kai Lothar John + l.john&glvi.de +55467 + Nerd4ever Desenvolvimento Tecnologico e Inovacao Ltda + Sileno de Oliveira Brito + sobrito&nerd4ever.com.br +55468 + michaelwaterman + Michael Waterman + michael.waterman&outlook.com +55469 + Bayu Dwiyan Satria + Bayu Dwiyan Satria + bayudwiyansatria&gmail.com +55470 + Premier Trailer Leasing + IT + it_tickets&premier-us.net +55471 + Etung Technology Co.,Ltd + min zhou + min.zhou&etungtech.com +55472 + CTFF + Jasmine Sa + sabzh&ffcs.cn +55473 + China Film Giant Screen Co. Ltd + John Troy + cuixiaoyu_john&cgstheater.cn +55474 + INFORION, OOO (limited liability company) + Evgeny Bukotko + info&inforion.ru +55475 + oxdeca + Wolfgang Schaefer + wolfgang.schaefer&oxdeca.com +55476 + Mir Upakovki + Vladimir Khailov + itpartners&mirupak.ru +55477 + Meter, Inc. + Anil Varanasi + anil&meter.com +55478 + T-Systems Austria GesmbH + Peter Weghofer + OIDReg&neonet.at +55479 + Greig Mitchell + Greig Mitchell + greig.mitchell&greigmitchell.co.uk +55480 + Landesbetrieb LBB + Reinhard Sabel + ianapen&lbbnet.de +55481 + PKP Intercity S.A. + Michał Nowak + sekcja.microsoft&intercity.pl +55482 + ZJinJa + Ruoyan Zhang + admin&z-touhou.org +55483 + Kellenberger & Co AG + Alex Buschor + informatik&kellenberger.net +55484 + DMHIS + Jeremy Clech + jclech&dmhis.com +55485 + Sierra Automated Systems & Engineering Corp + Edward Fritz + ed&sasaudio.com +55486 + JSC "GIS" + Dmitrii Iazykov + iazykov.dmitrii&globinform.ru +55487 + CookieDen Online + Chirag Shah + Chirag.Shah&CookieDen.online +55488 + Kirisun Communication Co.,Ltd. + eric.qin + eric.qin&szkirisun.com +55489 + Winnet Information + Xingjian Qin + q_xingjian&163.com +55490 + Vox Technologies + George Scott + security&voxtechnologies.com +55491 + Gradient Technologies + Richard Kelsey + richard&gradient.tech +55492 + Pawprint Prototyping + Zachary Sturgeon + me&ke4fox.net +55493 + Authlogics + Steven Hope + stevenh&authlogics.com +55494 + Streamworx + Timo Jung + hostmaster&streamworx.net +55495 + Elma Electronic AG + Johann Gysin + johann.gysin&elma.ch +55496 + Kantara Initiative, Inc. + Certification Department + certification-department&kantarainitiative.org +55497 + Centris AG + IT Collaboration + it-collaboration¢risag.ch +55498 + Dr. Hahn GmbH & Co. KG + David Weitz + david.weitz&dr-hahn.de +55499 + Pilot Corporation Of Europe + David Stephen + it_infra&piloteurope.com +55500 + Guangdong OPPO Mobile Telecommunications Corp.,Ltd. + ChunLiang Zeng + zengchunliang&oppo.com +55501 + Digital Sense Hosting Pty Ltd + Noel Kelly + noel.kelly&digitalsense.com.au +55502 + Xaptum, Inc + Xaptum Noc + noc&xaptum.com +55503 + GREYCORTEX s.r.o. + Marnix Janse + marnix.janse&greycortex.com +55504 + UT Southwestern Medical Center + Nicole Rios + Nicole.Rios&UTSouthwestern.edu +55505 + domotof + christophe DAUBA + christophe.dauba&free.fr +55506 + Radial, Inc. + Victor Fisher + technology&radial.com +55507 + Graylog, Inc + Kay Roepke + kay&graylog.com +55508 + Shenzhen Qunfang Technology Co., Ltd + ZhuoLi + admin&szkingfox.com +55509 + Wetron Logistics B.V. + Alin Palade + AlinP&wetron.nl +55510 + Brighton & Hove City Council + Jack English + jack.english&brighton-hove.gov.uk +55511 + Hochschule für Wirtschaft und Gesellschaft Ludwigshafen + Michel Dombach + Michel.Dombach&hwg-lu.de +55512 + Rock West Composites + Gary Santana + it&1rockwest.com +55513 + Varner It Solutions + Douglas Varner + doug&varneritsolutions.com +55514 + Sheehans + Daniel Sheehan + sheehand&sheehans.org +55515 + TL Industries + Matt Edinger + matte&tlindustries.com +55516 + Interexport d.o.o. + Miha Pečnik + it&interexport.si +55517 + Struppkärrs Skog AB + Jan Ericsson + jan&struppkarr.se +55518 + DwarfHack + Marco Amann + admin&dwarfhack.com +55519 + Dirección General de Registro Civil Identificación y Cedulación + Santiago Puga + santiago.puga®istrocivil.gob.ec +55520 + Alpha Software Development Zrt. + Pethő Jonatán + jonatan&alphadev.hu +55521 + Brun Telecomunicazioni + Luca Vicentini + luca&brunsat.it +55522 + Hana Inc. + Zeen He + 1440519152&qq.com +55523 + Wobben Research and Development GmbH + Thorsten Jakobs + thorsten.jakobs&enercon.de +55524 + Nane OON GlobalCom Corporation + Praneet Rai + company&oonglobal.com +55525 + Nane + Praneet Rai + company&oonglobal.com +55526 + OON + Praneet Rai + company&oonglobal.com +55527 + ecert.ca + Praneet Rai + company&oonglobal.com +55528 + FileMeta Initiative + Brandt Redd + brandt&redd.org +55529 + IEEE Learning Technology Standards Committee + Brandt Redd + secretary&ieee-ltsc.org +55530 + VAS GmbH + Frank Przybylski + fp&vas-gmbh.de +55531 + Cambridge Pixel Ltd + Steven Priest + steve&cambridgepixel.com +55532 + Katze Laboratories + Tom Katze + flauschekater&gmail.com +55533 + FORT Robotics, Inc. + Robert Sherbert + bob&fortrobotics.com +55534 + Allgemeine Deutsche Burschenschaft + Tilo Ullrich + adb&tilo-ullrich.de +55535 + Hoymiles Converter Technology Co., Ltd. + Steven Zhang + zhangxingyao&hoymiles.com +55536 + Nexion Data Systems Pty Ltd + Nick Stefanou + nicks&nexiondata.com +55537 + Shenzhen ledc Technology Development Co., Ltd. + Xiaolin Zhu + rd01&drawerkvm.cn +55538 + ZyWaK + Support Team + contact&zywak.net +55539 + 911 Secure, LLC + Dan Gosselin + dgosselin&911secure.com +55540 + Chaotic Logic + Sean Stoves + sean&seanstoves.com +55541 + Tvv Sound Project BV + Edo Dijkstra + info&tvvsound.be +55542 + Skan Holding AG + Juan Miguel Severino Rubino + oidadmin&skan.ch +55543 + Din Bil Sverige AB + Vitalijs Belisko + vitalijs.belisko&dinbil.se +55544 + EZ5 Systems Ltd. + Sergey Turlachev + contact&ezfive.com +55545 + suawek + Sławomir Kowalski + iana&suawek.pl +55546 + IDCUBE Identification Systems Pvt. Ltd. + Ujjal Sarmah + ujjal&idcube.co.in +55547 + brainit.sk, s. r. o. + Eduard Baraniak + info&nfqes.sk +55548 + Hydrasun Limited + Ian Logie + it&hydrasun.com +55549 + South Central Ambulance Service NHS Foundation Trust + Ed Morgan + ITSupplier.Information&scas.nhs.uk +55550 + Kitsap Mental Health Services + Robert Lockhart + isaccounts&kmhs.org +55551 + SI-nerGIE + Mr LE TORTOREC Jeannick + jeannick.le_tortorec&framatome.com +55552 + Barrett Communications Pty. Ltd. + Eric Koch + eric_koch&barrettcommunications.com.au +55553 + OLAF + Dimo Chervenkov + Dimo.CHERVENKOV&ext.ec.europa.eu +55554 + East Lindsey District Council + Phil Davies + phil.davies&pspsl.co.uk +55555 + Rendeer Systems LLC + Charles Dasher + cdasher&rendeersys.com +55556 + Danderyds Kommun + Josef Ereq + josef.ereq&danderyd.se +55557 + GBGT asbl - Groupement Belge des GraphoThérapeutes asbl + Sylvie TRAMASURE + info&gbgt.be +55558 + CORESLAB STRUCTURES MIAMI, INC + ROMMEL J. JIMENEZ + itsupportmiami&coreslab.com +55559 + NJFVision + Nicolas Debernardi + nicolas.debernardi&gmail.com +55560 + layline.io GmbH + Stefan Deigmueller + stefan&layline.io +55561 + ATMD Ltd. + Vladimir Ilichev + atmd&atmd.ru +55562 + Confinity Solutions GmbH + Stefan Ott + stefan.ott&confinity-solutions.com +55563 + KTNF + Chiyong Jang + cyjang&ktnf.co.kr +55564 + Shanghai Bean Tech Co.,Ltd + Elvis Yang + elvis.yang&beantechs.com +55565 + Elektrizitätswerke Reutte AG + Dipl.-Ing. (FH) Patrick Schmidt + it&ewr.at +55566 + Beijing tongtech co., LTD + yong hu + tongoid&tongtech.com +55567 + Cadwork + Jean-Sebastien Paquet + it&cadwork.ca +55568 + Groupe Morneau + Yvan Leclerc + Yvan.Leclerc&groupemorneau.com +55569 + Poney Express + Wadii Zaim + wadii&poney-express.com +55570 + McFarlin Tech Consulting + Michael J McFarlin + mmcfarlin&mcfarlintech.net +55571 + Felton B.V. + Jeffrey van Haeften + j.vanhaeften&felton.nl +55572 + Chris Whitfield IT + Chris Whitfield + chris_whitfield_it&yahoo.com +55573 + 北京元支点信息安全技术有限公司 (Beijing Yuanfu Information Security Technology Co., Ltd.) + 史晨伟 (Shi Chenwei) + shichenwei&yuanzhidian.com +55574 + frey-raum e.k. + Markus Frey + markus&frey-raum.net +55575 + Austin Regional Clinic, P.A. + Greg Madere + GMadere&AustinRegionalClinic.com +55576 + AuriStor, Inc. + Jeffrey Altman + jaltman&auristor.com +55577 + 4bergamot.net + Andrew Eastland + andrew.eastland&4bergamot.net +55578 + Invitech ICT Services Kft. + Balazs Safrany + ripe&invitech.hu +55579 + Ashley Leach & Associates Pty Ltd + Ashley Leach + a.leach&alassociates.com.au +55580 + staiger.it + Philipp Staiger + philipp&staiger.it +55581 + Bronics Infocom Inc. + Kyoung min. Lim. + troll98&naver.com +55582 + Balzano Informatik AG + Rene Balzano + rene&balzano.net +55583 + Pridnestrovskiy respublikanskiy bank + IT - department + ca&cbpmr.net +55584 + Felix Koop + Felix Koop + fkoop&fkoop.de +55585 + Thales LAS France + Patrick Redon + patrick.redon&thalesgroup.com +55586 + Simservice A/S + Steen Lenzing + sl&simservice.dk +55587 + Milan Reznicek + Milan Reznicek + pen&reznicek.biz +55588 + EA Technology Ltd + Lee OHalloran + lee.ohalloran&eatechnology.com +55589 + Roche Diagnostics International Ltd + Rouslan Partyka + rouslan.partyka&contractors.roche.com +55590 + Fourie Boerdery + Deon Fourie + fourieboerdery&xpress.co.za +55591 + LLC IT-KRYM + Viktor Smirnov + iana-pen&itpark.ru +55592 + XFrost Labs OÜ + Xavier Perarnau + iana-pen&xfrostlabs.org +55593 + Bouwonderneming Goevaers & Znn. B.V. + Bas Kratsborn + BasKratsborn&goevaers.nl +55594 + Cynthia Maja Revström + Cynthia Revström + iana&cynthia.re +55595 + Measures for Justice + Ryan Belair + ryan.belair&measuresforjustice.org +55596 + 5G Property Solutions + Brian Kinsey + bk&whoopwireless.com +55597 + Florida Atlantic University + Matt Ramsey + mramsey&fau.edu +55598 + Confluence Technologies, Inc + George Palmer + pki&confluence.com +55599 + Originpath + David Simal Gonzalez + david.simal&originpath.com +55600 + Thales - Secure Communications & Information Systems + Christopher Stinson + Christopher.Stinson&uk.thalesgroup.com +55601 + INDEPENDENCE BANK + IT Department + ibtech&ibyourbank.com +55602 + Eatontown Public Schools + Michael Brown + mcbrown&eatontown.org +55603 + Rummelsberger Diakonie + Thomas Mack + mack.thomas&rummelsberger.net +55604 + SYSTEM ENGINEERING & INFORMATION TECHNOLOGY SRL + Marco Vezzosi + supporto&seit.it +55605 + Bayerisches Staatsministerium für Digitales + Martin Kirschenbauer + ReferatB3&stmd.bayern.de +55606 + Ositech Communications Inc + Daniel Bartozzi + daniel&ositech.com +55607 + Cantada Inc + Thomas Pichieri + thomas.pichieri&cantada.com +55608 + Net Neural Ltd. + Simon Sparks + simonsparks&netneural.ltd.uk +55609 + NADEKS LLC + Ruslan Annenkov + r.annenkov&nadeks.ru +55610 + Xaru AB + Gunnar Thoernqvist + gunnar&igl.se +55611 + Cosner-Neipp Corporation + Adam Clayson + _arin&cosner-neipp.com +55612 + American Century Investments + Lisa Benson + lisa_benson&americancentury.com +55613 + PRETTL Electronics GmbH + Alexander Neubert + alexander.neubert&prettl-electronics.com +55614 + MobileMS + Michal Stefanski + serwis&mobilems.pl +55615 + EtherMatic + Przemyslaw Sztoch + info&EtherMatic.com +55616 + RosettaHealth + Buff Colchagoff + buff.colchagoff&rosettahealth.com +55617 + Pacific Petroleum Pty Ltd + Chris Stegner + cstegner&pacificpetroleum.com.au +55618 + VertisPro Pte Ltd + Harshad P + harshad&vertispro.com +55619 + 宜春宜联打印设备有限公司 (Yichun Yilian Printing Equipment Co., Ltd.) + 曾小宜 (Zeng Xiaoyi) + zengxiaoyi&elineprint.com +55620 + Alpla Staging + Heyko Kannenberg + heyko.kannenberg&alpla.com +55621 + Cnuth Network UTH + Makoto Minami + objectmanager&nest.cnuth.net +55622 + 2CRSI + Alain WILMOUTH + ibo&2crsi.com +55623 + University of Birmingham School of Computer Science + Ian Batten + i.g.batten&bham.ac.uk +55624 + Eutelsat - MENA + Ahmad Saidawi + ahmad.saidawi&platform.jo +55625 + Absolunet inc. + Jeremie Rioux + jrioux&absolunet.com +55626 + ANYROAM LLC + Support + support&anyroam.net +55627 + ANYROAM LLC + Support + support&anyroam.net +55628 + PacsLogic LLC + Matthew Granger + Matt&PacsLogic.com +55629 + Department of Health Tasmania + Ben Short + ben.short&health.tas.gov.au +55630 + Advanced Medical Footcare, LLC + Song Yu, DPM + drsongyudpm&yahoo.com +55631 + Dymond Consulting + James Dymond + iana-pen&dymond.consulting +55632 + Bolton Clarke + Johny Agotnes + jagotnes&boltonclarke.com.au +55633 + Xtended Specialists Inc, + Angel Sosa + soslorr&aol.com +55634 + cbrain A/S + Thomas Qvist + admin&cbrain.dk +55635 + EyePro System S.r.l. + Andrea Bertuolo + bertuolo&eyeprosystem.com +55636 + Watchtower Consulting (Pty) Ltd + Belinda Valentyn + watchtowerconsulting&outlook.com +55637 + gesagt.getan. GmbH + Reinhard Binder + office&gesagt-getan.at +55638 + Fountainhead Technologies, Inc. + Bill Gilpatric + bgilpatr&fountainhead.io +55639 + Star Group LLC + Yuskiv Andrey + itdir&star-grupp.ru +55640 + NU-CIVILISATION – Gemeinschaft zur Förderung einer organischen gemeinwohl- und werteorientierten Zivilisation + Mag. Martin Heiduk + verein&nu-civilisation.org +55641 + Trust The Press + Andrew Dodge + dodgea&TrustThePress.org +55642 + Ruuvi Innovations Ltd + Otso Jousimaa + otso&ruuvi.com +55643 + https://www.mrfixitit.org + Albert Joseph DeMaio + littleal78&gmail.com +55644 + Graphcore Ltd + Peter Simm + gc-iana&graphcore.ai +55645 + Stadt Herne + Michael Korsten + michael.korsten&herne.de +55646 + Beacon Federal Credit Union + Mark Maslonka + IANA-PEN.Admin&external.beaconfed.org +55647 + ExodusPoint Capital Management, LP + Robert van Veelen + robert.vanveelen&exoduspoint.com +55648 + Segmed + Wojciech Adam Koszek + wkoszek&segmed.ai +55649 + Mike van Zyl Incorporated + Mike van Zyl + mvzinc&mweb.co.za +55650 + Netha + Guillaume Marsay + gmarsay&gmail.com +55651 + Betanium, Inc. + Benjamin Miller + ben&betanium.com +55652 + Bass Pro Shops + Michael Bates + msbates&basspro.com +55653 + Leonardo MW Ltd + Neil Hillard + neil.hillard&leonardocompany.com +55654 + net.Media SR + Stefan Reichhard + office&netmedia.pro +55655 + Mobilinkd LLC + Rob Riggs + rob&mobilinkd.com +55656 + LAS Enterprises + Lorraine Sosa + soslorr&aol.com +55657 + TN2 Solutions EESV + Rene Jager + renej&tn2.solutions +55658 + ZD Technology(Beijing) Co., Ltd + Jin Zhang + research&zd-tech.com.cn +55659 + Zaji Consultant + Michael Schmidt + Sailsman78&protonmail.com +55660 + GCT Global Container Terminals Inc. + John Ferreira + netadmin&globalterminals.com +55661 + Revocent Inc + Mike Cooper + mike&revocent.com +55662 + KopterMax + Matteo Galet + info&koptermax.com +55663 + GK Rubezh, LLC + Ivan Pryakhin + admin&rubezh.ru +55664 + Markie Enterprises + David Markie + snmp&serialtech.com +55665 + London Square Developments Ltd + Kevin Murphy + itsuppliers&londonsquare.co.uk +55666 + Best bath Systems Inc + Russell McIntire + IT&bestbath.com +55667 + SKYELP + Anh Pham + certadmin&skyelp.net +55668 + City of Bristol, Tennessee + Shane Varney + itsupport&bristoltn.org +55669 + South Dakota Board of Regents + Ryan McGarry + ris-sys&sdbor.edu +55670 + NG9-1-1 Interoperability Oversight Commission + Administrator + admin&ng911ioc.org +55671 + Simple Solutions Learning, INC. + Reed Johnston + reed&simplesolutions.org +55672 + RateEngine + Dimitar Kokov + dkokov75&gmail.com +55673 + Telefonbau Behnke GmbH + Thomas Kraemer + thomas_kraemer&behnke-online.de +55674 + eskhome.net + Craig Eskdale + craigacs&eskhome.net +55675 + mitgedanken + Sara Tasche + sara.dolores.tasche&gmail.com +55676 + Hampton Roads Orthopaedics Spine & Sports Medicine + Wendy Jackson + wjackson&hrosm.com +55677 + AnyElectron LLC + Network Research + ioagent&anyelectron.com +55678 + Custom Control Concepts + Halley Kell + Halley.Kell&Astronics.com +55679 + Sunmark Credit Union + Information Technology + IT-Infrastructure&sunmark.org +55680 + American Library Association + Sherri Vanyek + svanyek&ala.org +55681 + Zeplay LLC + Brandon McKenzie + brandon&zeplay.tv +55682 + JayJay + Julian Jacobi + iana&julianjacobi.net +55683 + Swissomation Inc. + Stephan Casas + istauthority&swissomation.com +55684 + RBZ am Schuetzenpark + Rolke, Andreas + a.rolke&rbz-schuetzenpark.de +55685 + Albert Neef Consulting + Albert Neef + albert.neef&anconsulting.nl +55686 + Smawave + haoxu + hao.xu&smawave.com +55687 + chinamobile soc platform + Meng Xian + will_nx&163.com +55688 + LLC HUBTRUST + Dmitriy + efimovdi&gmail.com +55689 + Testin Ltd. + Bo Peng + bopeng&testin.cn +55690 + Musgrave maintenance + Glynn Delport + delport. g.a&Gmail. com +55691 + Nolmë Informatique + Vincent Duvernet + vincent.duvernet&nolme.com +55692 + Veyseloglu LLC + Araz Mirzayev + araz.mirzayev&veyseloglu.az +55693 + Bewor Tech S.L. + Elena Vera Muñoz + evera&vocces.com +55694 + ITPassion Ltd + Guus Leeuw + guus.leeuw&itpassion.com +55695 + MICROSET + LAURA GATTEL + infoµset.net +55696 + RTV Technologies s.r.o. + Luděk Ranc + info&rtvtechnologies.com +55697 + Groupama Biztosító Zrt. + Laszlo Szomor + oid&groupama.hu +55698 + Touro University Medical Group + Susan Anderson + sdaorlando&hotmail.com +55699 + VTNS + Nicolas Vuillermet + nicolas&vuillermet.bzh +55700 + AppLine + Jesse Liang + jesseliang&appline.xyz +55701 + Signers Technology LLC + Adrian Hector Mazza + amazza&signerstech.com +55702 + Adam Investments (formerly 'Adam Maxwell Investment Company AMI') + Charles Adam Maxwell + AdamInvestments&hotmail.com +55703 + OAG INDUSTRIES srl + Andrea Chelli + andrea&kelli.it +55704 + OpenetME + Openet InfoSec + security&openet.com +55705 + Smith Consulting LLC + Michael B. Smith + MICHAEL&SMITHCONS.COM +55706 + CUBOSOFT + Juan Pablo Chavez + jp3&cubosoft.net +55707 + Arilou Technologies Ltd. + Nir Sharony + nir.sharony&nng.com +55708 + BonaSoft sp. z o.o. sp. k. + Michal Danielak + sysadmin&bonasoft.pl +55709 + TECNED B.V. + Chris van Kalken + chris.vankalken&tecned.com +55710 + Talent4Transition + Professor Philip Garner + philipgarnerassociates&outlook.com +55711 + Proius + Vincent Van Houtte + vvh&aplusv.be +55712 + Joint-stock Company "Accent-Bank" + Evgenij Rudenko + evgenij.rudenko&a-bank.com.ua +55713 + KNS Inc + Wang Hyunho + hhwang&kns-kr.com +55714 + VRBANK + Danila Koshel + me&catinthe.space +55715 + Ikle + Alexei A Smekalkine + ikle&ikle.ru +55716 + 北京蓝拓扑科技股份有限公司 (Beijing Blue Topology Technology Co., Ltd.) + 刘艺龙 (Liu Yilong) + liuyilong&bluetop.com.cn +55717 + Fiat Uno Solutions + Don Clarke + don&donfiat.co.za +55718 + Zentry Security, Inc. + Roland Hsu + r.hsu&zentrysecurity.com +55719 + Doing It 4 U + Kevin Harpur + kevindoingit4u&gmail.com +55720 + Horns & Hoofs + Dmitry Makarov + dmitry.makarov&hornshoofs.com +55721 + Peter Mertes KG + Stefan Kiesgen + s.kiesgen&mertes.de +55722 + Aviti + Olivier Petit + opetit&aviti.fr +55723 + Knorr-Bremse AG + Nicolas Goupil + nicolas.goupil&selectron.ch +55724 + NATIONAL INFORMATION TECHNOLOGY DEVELOPMENT AGENCY, + DR. MOHAMMED ONIMISI YAHAYA + ymohammed&nitda.gov.ng +55725 + Ovio Smart World + Sina Ouji + support&ovio.io +55726 + MaxiTech + Serhii Zablotskiy + support&maxitech.com.ua +55727 + AO "RACIM" + Alexandr Pyshnyak + alex.tums&gmail.com +55728 + CER Groupe + Olivier Bruylandt + olivier&bfish.eu +55729 + Identification International, Inc. + Richard Fenrich + fenrich&idintl.com +55730 + BISDN GmbH + Andreas Koepsel + net&bisdn.de +55731 + Celera, S.A. + Roberto Chiroy + contact&celera.com.gt +55732 + Warpcom Services, SA + Antonio Martins + antonio.martins&warpcom.com +55733 + Advanced Design Technology Pty Ltd + Adrian Thearle + adrian_PEN&adt.com.au +55734 + iTarge Technology + Wei Lin + linwei&itarge.com +55735 + 北京椰子树信息技术有限公司 (Beijing Coconut Tree Information Technology Co., Ltd.) + 朱正路 (Zhu Zhenglu) + service&coconet.cn +55736 + OctoInsight Inc. + Jeremy Barker + jb&octoinsight.com +55737 + Data Handler LLC + Jesse Pechart + jpechart&datahandlerllc.com +55738 + William Woodruff + William Woodruff + william&yossarian.net +55739 + Yethfr certification authority + Huang Houfeng + 13426954351&163.com +55740 + Huang Houfeng + Huang Houfeng + HHF&ipv64.cn +55741 + S.M. Acton t/a Stelin Motors + Steven Acton + stelinadmin&rsaweb.co.za +55742 + Landratsamt Calw + Marco Gubitzer + software&kreis-calw.de +55743 + Reliance Energy Inc + Ronnie Covert + sysadmin&reimid.com +55744 + Tampa Microwave + Robert Nawrocki + b.nawrocki&tampamicrowave.com +55745 + Geo TV Network + Muhammad Furqan Uddin + furqan.uddin&geo.tv +55746 + Jang Group of Companies + Muhammad Furqan Uddin + furqan.uddin&janggroup.com.pk +55747 + Facebook Connectivity - Terragraph + Paul McCutcheon + iana-assign&fb.com +55748 + TriHealth + Bryan Greenberg + bryan_greenberg&trihealth.com +55749 + TXOne Networks Inc. + Justin Jan + justin_jan&txone.com +55750 + Kadfire Limited + Kadfire Networks Team + networks&kadfire.com +55751 + Azienda Universitaria Policlinico P. Giaccone + Nicola Alessi + alessi&policlinico.pa.it +55752 + HYTEC INTER Co., Ltd. + Hajime Hatanaka + hatanaka&hytec.co.jp +55753 + Comtron + David Lalehzar + dlalehzar&comtronusa.com +55754 + Datil Technologies, Inc + Eduardo Raad + ca&datil.co +55755 + South Holland District Council + Dave Morfee + ict&pspsl.co.uk +55756 + AceAxis + Nigel Chapman + info&aceaxis.co.uk +55757 + RiskAnalytics + Nathan Green + ngreen&riskanalytics.com +55758 + GT Solutions + Gregory Trobridge + gtsolutionsserv&gmail.com +55759 + Calltech SA + Mauricio Noguera + m.noguera&calltechsa.com +55760 + OPWILL Technologies (Beijing) Co., Ltd + zhutianquan + zhutq&opwill.com +55761 + Új Világ Nkft. + Novák Tamás + uzemeltetes&ujvilag.gov.hu +55762 + Truffco Ltd + Steve Threlfall-Rogers + SteveTR&truffco.co.uk +55763 + Allogene Therapeutics, Inc. + Tim McCarty + tim.mccarty&allogene.com +55764 + INSIS SPA + Luca Massini + luca.massini&insis.it +55765 + OGLplus + Matus Chochlik + chochlik&gmail.com +55766 + In Loco Tecnologia da Informação S.A. + Pedro Tôrres + pedro.torres&inloco.com.br +55767 + New Era Technology + Rezar Zefaj + rzefaj&neweratech.co.uk +55768 + CIRMMT + Julien Boissinot + julien.boissinot&cirmmt.org +55769 + Zynas co., ltd. + Shoichi Saito + saito.shoichi&zynas.co.jp +55770 + 北京英泰智科技股份有限公司(Beijing iTarge Technology Co., Ltd.) + Chong Li + lichong&itarge.com +55771 + Appmodule AG + Tobias Rothen + sysadmin&appmodule.net +55772 + Fiberworks Int. AG + Frank Witig + fw&fiberworks.com +55773 + Beijing Qinmu Data Technology Co.,LTD + Chi Yang + chy&qinmu.net.cn +55774 + US Wellness + Alyssa Williamson + awilliamson&uswellness.com +55775 + Bayerische Staatsforsten AöR + Matthias Frost + matthias.frost&baysf.de +55776 + NOGC Certificate Authority + Praneet Rai + corporate&nogc.ovh +55777 + NOGC Go Cert + Praneet Rai + corporate&nogc.ovh +55778 + NOGC Universal Root Certificate Authority + Praneet Rai + corporate&nogc.ovh +55779 + David Dai W + David Dai + davidjet&gmail.com +55780 + Definet Oy + Tea Murtomaki + tea.murtomaki&definet.fi +55781 + Safaricom PLC + Protus Etende + petende&safaricom.co.ke +55782 + Mirco Reimer + Mirco Reimer + pen.iana.org&mircoreimer.de +55783 + Net Assets, Inc. + Grant Gossett + sysops&netassets.com +55784 + CERTIFICADORA DEL SUR SPA + JOSE CRISTIAN ECHEVERRIA BRIONES + cecheverria&byeconta.cl +55785 + Nubo + Kenny Louveaux + hello&nubo.coop +55786 + Converged Compliance Solutions, Inc. + Terence McDonough + tjmcdonough&convergedcompliance.com +55787 + Meraxo AG + Jan Baumeler + itservices&meraxo.com +55788 + ecoprotec GmbH + Lukas Rogala + administrator&ecoprotec.de +55789 + PaTES s.r.o. + Michal Papai + papai&pates.sk +55790 + EkkoSense Ltd + David Corder + david.corder&ekkosense.co.uk +55791 + Clay Platte Family Medicine Clinic P.C. + Vince Shisler + vinces&clayplattefamily.com +55792 + DAERZK + Chris Martyn + chris.martyn&daerzk.com +55793 + Communauté PKILabs + ISAMBERT Patrick + patrick.isambert&pkilabs.eu +55794 + NINGBO DEYE INVERTER TECHNOLOGY Co., LTD + Xudong Chen + chenxd&deye.com.cn +55795 + BIZ-CHO + Alexis BIZON + alexis.bizon+iana&biz-cho.ovh +55796 + ORANO SA + Philippe BAGONNEAU + philippe.bagonneau&orano.group +55797 + Stephen Douglas Scotti + Stephen Douglas Scotti + sscotti&medinformatics.eu +55798 + OCN + Artem Fedoseev + afedoseev&ocn.group +55799 + E Squared + Andre Mostert + andrem&enterprise360.me +55800 + smartics + Anton Kronseder + anton.kronseder&smartics.de +55801 + Buypass AS + Kai Bjørnstad + kai.bjornstad&buypass.no +55802 + GLOTEC ENGINEERING (SA) (PTY) LTD. + Jeong, Haekwon + hkjeong52&gmail.com +55803 + GBS Backup B.V. + Roel Otten + roel.otten&gbsbackup.com +55804 + Neural Inference Solutions Inc. + Michael Durrant + mdurrant&neuralinference.com +55805 + eBay Inc + Ashwin Ambekar + DL-eBay-site-idm&ebay.com +55806 + Alertus Technologies LLC + Gary El-Gamil + gelgamil&alertus.com +55807 + Baronics LLC + William Meltzer + baronics&gmail.com +55808 + Drifting Mind Studio + Johannes Helgi Laxdal + johannes&laxdal.org +55809 + Crystal Crest Cleaners + Efren M. Gabriel + efigabriel&gmail.com +55810 + Vitesco Technologies + Helmut Seidl + helmut.seidl&vitesco.com +55811 + JSC "MIR UPAKOVKI" + Denisov Igor + igor&netx.ru +55812 + Laboratories of Molecular Anthropology and Microbiome Research + John K Boyd + lmamr.account&ou.edu +55813 + HashiCorp, Inc. + William Bengtson + wbengtson&hashicorp.com +55814 + RnD Center "IMPULSE" + Sirotkin Alexey + sa&sstmk.ru +55815 + Factor-y S.r.l. + Daniele Vistalli + daniele.vistalli&factor-y.com +55816 + LifeLens Technologies + Andy Hoffman + ahoffman&lifelenstech.com +55817 + Teichmann Integration Services + Heinz Teichmann + heinz&tis.cx +55818 + Cunningham Research Corporation Limited + James Cunningham + crc&jamescun.com +55819 + Eko-Okna S.A. + Dariusz Skonieczny + ds&ekookna.pl +55820 + MedPics LLC + Roland Talanow + DrTalanow&MedPics.me +55821 + ELTACOM + Elias MOUKAL + elias&eltacom.ma +55822 + Clover Cyber + Dongxu Hao + 1123182168&qq.com +55823 + Perry Familia + Steven Perry + steven&perryfamilia.com +55824 + SKS + Evgeny Seredkin + it&sksural.com +55825 + Tiny Tiny Computing + Jay Koby + jkoby&tinytiny.net +55826 + Nerd-Residenz + Ralph J.Mayer + oid&nerd-residenz.de +55827 + Uplevel Systems, Inc + Thomas Alexander + tom&uplevelsystems.com +55828 + Marshall Community Credit Union + Information Technology + ITAdmin&marcomcu.org +55829 + Arthur18 + Heiko Grill + heiko.grill&diegrills.org +55830 + Gazprom Remont + Ivan Sekretarev + i.sekretarev.grm&gmail.com +55831 + Speedway LLC + Raj Bhambri + rbhambri&speedway.com +55832 + Fidelity National Information Services + Andrew Harris + andrew.harris&fisglobal.com +55833 + Broadtech technology co. LTD + zhongshan Luo + luozs&broadtech.com.cn +55834 + NorthHinkle + John N Hinkle + northhinkle&comcast.net +55835 + Sutra Tech Labs Inc + Gaurav Garg + gaurav&sutratechlabs.com +55836 + MFA Informatik AG + Andreas Schneider + andreas.schneider&mfa-informatik.ch +55837 + Schäckermann + Frank Schäckermann + azj8wcfd5k&liamekaens.com +55838 + DLinuxguy INC. + Chintan Murty + dlinuxguy&gmail.com +55839 + Lynxgistics Inter Freight Co., Ltd. + Komson Lertboonyapun + it&lynxinterfreight.com +55840 + JieYue technical service studio + Hua xin + david_hwa&126.com +55841 + Tibasamaneh + Edwin Abnoosian + ed&tibasamaneh.com +55842 + Lagoni Engineering + Kaelan Thijs Fouwels + kaelan.fouwels&lagoni.co.uk +55843 + AXXICON Moulds Eindhoven BV + Bas Hendrix + B.Hendriks&axxicon.com +55844 + Nebig Verpakkingen B.V. + Michel Coolen + m.coolen&nebig.com +55845 + Mevi Beheer + Hendry Tjonadi + htjonadi&mevi.com +55846 + Smits Groep + Ton van der Aa + support&smits-groep.nl +55847 + Libéma Exploitatie B.V. + Willem-Jan Glaudemans + w.glaudemans&libema.nl +55848 + Swifthosting ApS + Mikkel Christensen + mikkel&swifthosting.dk +55849 + Mizar Magic and Research Network + Nanyuan Cheng + pen&mmrnet.org +55850 + Veea Systems Ltd + Adam Cottrell + adam.cottrell&veea.com +55851 + Jordan Klein + Jordan Klein + haplo&haplo.net +55852 + Saṃvásana Holdings + Ryan Morrison + ryan&samvasana.com +55853 + BaaSid International Lab Co.,LTD. (Taiwan) + Rocky Huang + rockyhuang&baas-holdings.com +55854 + Pagebe.fr + Thomas Paricaud + thomas&paricaud.fr +55855 + UkrTrade ltd + Gennady Lapin + gennady_lapin&klass.com.ua +55856 + EMCO Chemical Distributors, Inc. + Rory Baker + rbaker&emcochem.com +55857 + DD Cloud + Dmitry Stasenko + myrkul.000&gmail.com +55858 + ALL D.S. SECURITY SOLUTIONS LTD + Sharon Weiss + info&alldss.com +55859 + Mini-Circuits + Chi Man Shum + testsolutions&minicircuits.com +55860 + Advantech Wireless Technologies Inc. + Evan Provost + evan.provost&advantechwireless.com +55861 + Comet Networks + Shuji Ito + shuji.i0322&mail.sccsn.jp +55862 + Gold Corporation + Nathan Manzi + nathan.manzi&perthmint.com +55863 + COMNET INC. + Tetsuya Oshima + oshima&dbcom.co.jp +55864 + AMMG + Martin Aye + iana&aye-gruppe.de +55865 + The University of Alabama + Alex Hainen + ahainen&ua.edu +55866 + Mathima Technologies + Dennis Smalbrugge + dsmalbrugge&mathimatech.com +55867 + eSKamation + Stefan Kanthak + stefan.kanthak&nexgo.de +55868 + Useful Bytes LLC + Josh Dague + oid&useful-bytes.com +55869 + Leertouwer + Stefan Meurs + stefanmeurs&leertouwer.nl +55870 + Zaman 01 LTD + Vakilzade Tabriz + support&alver.kz +55871 + Crédit Mutuel ARKEA + Éric LE CORRE + offre.ged&arkea.com +55872 + ProLabs + Sergiu Rotenstein + sergiu.rotenstein&prolabs.com +55873 + Xona Systems, Inc. + Raed Albuliwi + raed&xonasystems.com +55874 + Østfold Interkommunale Arkivselskap IKS + Ole Aldric + ole.aldric&ikao.no +55875 + Agentilo GmbH + Felix Schuck + felix.schuck&agentilo.com +55876 + VitalThings AS + Stig Christian Aske + sca&vitalthings.com +55877 + AppEx Networks Corporation + Hao Zhuang + hao&appexnetworks.com +55878 + ABSU Limited + Denys Rtveliashvili + contact&absu.info +55879 + AddSecure Acquisitions Holdings AB + Phillip Cavell + phillip.cavell&addsecure.com +55880 + StealthPath Inc. + Mark Orton + morton&stealthpath.com +55881 + toolman.org + OID Administrator + oid-admin&toolman.org +55882 + GREEN CITY STI.,JSC + Admin Greencity + adminxc&xanhcity.com.vn +55883 + Micro-Design Inc + Patrick Patel + ppatelµ-design.com +55884 + fremetih solutions + Freddy Mendoza + frmendozat&uni.pe +55885 + Successful Endeavours Pty Ltd + Rob Struthers + rob&successful.com.au +55886 + Imperani Alarm Services T/A JA Security + Jordaan Marais + ias.jasec&gmail.com +55887 + Nanjing Huastart Network Technology Co.Ltd + Wei Chen + chen.wei&huastart.com +55888 + TradeWith + Micheal Moono + michealmoono23&gmail.com +55889 + TycheTools, SL + Jose M. Moya + jm.moya&tychetools.com +55890 + Technocon + Sara Ravi + sarabrk&yahoo.com +55891 + Artemis Tosini + Artemis Tosini + oid&artem.ist +55892 + Guang Zhou Aoshi Internet Information & Technology Co., Ltd. + Lu Wenhuan + 402526441&qq.com +55893 + Hyve Solutions + Janny Au + jannya&hyvedesignsolutions.com +55894 + e-Smart Systems Europe SL. + Rashabh Kumar + info&e-smartsystems.com +55895 + Katariya Infra Projects Private Limited + Chintan Katariya + info&katariyainfraproject.com +55896 + DAAD - Deutscher Akademischer Austauschdienst + Arnd Müller + mueller&daad.de +55897 + ARKRAY USA, Inc. + T.R. Piller + pillert&arkrayusa.com +55898 + MEDICON + IT Department + oid&mediconas.cz +55899 + IPG Information Process Group GmbH Deutschland + Holger Strickling + holger.strickling&ipg-group.com +55900 + National Academy of Forensic Engineers + Mitch Maifeld, PE + iana&maifeld.name +55901 + Sven Anders + Sven Anders + sven&anderss.de +55902 + 3FONT + Ferran Fontova + 3font&3font.net +55903 + hagebau IT GmbH + Christian Weise + hcs-os&hagebau.com +55904 + National Petroleum Construction Company + Basil El Khoury + itlicensing&npcc.ae +55905 + J.H.C. de Rooy Holding B.V. + Steven Goossens / Geert van Bergen + IT&derooy.com +55906 + SecretNest.info + Wei Cui + public&secretnest.info +55907 + Shenzhen New Trend International Logistics Technology Co.,LTD + Jianfeng Shao + info&nti56.com +55908 + Dinnissen B.V. + Erik Janssen + IT-Support&dinnissen.nl +55909 + Coolsure Limited + Warwick Dawes + warwick.dawes&coolsure.com +55910 + Heka Werkzeuge GmbH + Philip Kazmeier + it&heka-werkzeuge.de +55911 + Proper Code + Philip McGuire + phil&propercode.org +55912 + Operador Nacional do Registro Civil de Pessoas Naturais + Luis Carlos Vendramin Junior + ac®istrocivil.org.br +55913 + Pinnacle Oncology + Thomas Landers + tlanders&flatiron.com +55914 + Tomasz Szkutkowski IT Consulting + Tomasz Szkutkowski + tomasz&szkutkowski.pl +55915 + NEMZETI MOBILFIZETESI ZRT. + ISTVAN MIKLOS ERDELYI + erdelyi.istvan.miklos&nmzrt.hu +55916 + Junge Sprachwissenschaft e. V. + Markus Jochim + admin&junge-sprachwissenschaft.de +55917 + Tel + Gerhard Telschow + GERHARD.TELSCHOW&OUTLOOK.DE +55918 + SecureLogic LTD + Itzhak Sharon + itzik&securelogic.co.il +55919 + Martin Loehnertz IT Beratung + Martin Loehnertz + info&loehnertz.de +55920 + PCItek + Herbert Falk + herb.falk&pcitek.com +55921 + iSAtech water GmbH + Emanuel Mey + info&isatech.de +55922 + Kismet Wireless + Michael Kershaw + mike&kismetwireless.net +55923 + Lauterbach GmbH + Franz Sirl + franz.sirl-pen&lauterbach.com +55924 + yfn Beteiligungsgesellschaft UG + Yves Frédéric N´Soussoula + support&yfn-group.de +55925 + Buy Whole Foods Online Ltd + Simon Duffield + simon.duffield&ldeng.co.uk +55926 + Cumtenn Intelligent information science and technology(Zhejiang) Co., Ltd. + Sealand Ly + sealand-ly&sealand100.com +55927 + seot + xiaodong jia + jiaxiaodong&seot.cn +55928 + JSC Profotech + Maxim Yanin + yanin&profotech.ru +55929 + University of Lausanne + Alexandre Roy + alexandre.roy&unil.ch +55930 + Kwikbit + Jeff Stern + jstern&kwikbit.com +55931 + Safe Host S.A. + Jean-Christophe Valiere + securityit&safehost.com +55932 + GXM Consulting + Dylan Neves + neves&gxmconsulting.com +55933 + Powersoft S.p.A. + Filippo Digiugno + filippo.digiugno&powersoft.com +55934 + Cryptnox SA + Sebastien Armleder + sebastien.armleder&cryptnox.ch +55935 + Kutai Electronics Industry Co., LDTD. + Po-Ting Kang + pt.kang&mail.kutai.com.tw +55936 + Grentech + jingruhong + jingruhong&powercn.com +55937 + Delta Electronics, Inc. + Patrick + PATRICK.PK.LIN&deltaww.com +55938 + Kero Kero Information Technology Center + Soichiro Inoue + info&k-itc.com +55939 + Garantir Technologies Private Limited + Samir Gupta + sales&garantir.in +55940 + Temas LLC + Victor Fedosenkov + fvs&temas.ru +55941 + Wisdom Technologies Pvt Ltd + Ravi Kumar + ravi&wisdomtech.co.in +55942 + LightEdge Solutions + Ben Vargas + bvargas&lightedge.com +55943 + Opennetworks Kft. + Andras Beliczay + abeliczay&opennet.hu +55944 + Netlock Gmbh + Feind Gabriella + feind.gabriella&netlock.hu +55945 + MC-Technologies GmbH + Matthias Bannach + bannach&mc-technologies.net +55946 + Arra Networks + Paul Egermeier + operations&arranetworks.com +55947 + SkizNet + Ryan Schmidt + skizzerz&skizzerz.net +55948 + Rayn0r Ltd. + Raynor Hsu + jerry81591066&gmail.com +55949 + ALVO Spółka z ograniczoną odpowiedzialnością, Sp. k. + Agata Olszewska + agata.olszewska&alvo.pl +55950 + Keydok LLC + Xavier Lamicq Zorrilla + xavier.lamicq&keydok.com +55951 + AmLight/AMPATH + OPS AmLight/AMPATH + ops&ath.net +55952 + Laboratorio de Modelos y Datos de la UNAM + Luciano Diaz + luciano.diaz&correo.nucleares.unam.mx +55953 + FRAMATOME + Orion RAGOZIN + orion.ragozin&framatome.com +55954 + Episource LLC + Jeff Elrod + jeff.elrod&episource.com +55955 + Suzhou China Asterfusion Co.,LTD + Bochen JI + jibochen&asterfusion.com +55956 + iret Gesellschaft zur Entwicklung elektronischer Steuerungen mbH + Thomas Goß + edvservice&iret.de +55957 + ICS Technologies + Barulli Vittorio + vittorio.barulli&xtechnologies.it +55958 + reply + sharif uddin + m.uddin&reply.com +55959 + NeoVolta Inc. + BRENT WILLSON + bwillson&neovolta.com +55960 + Charles Russell Speechlys + Haroon Hanif + InfrastructureMonitor&crsblaw.com +55961 + Intenta GmbH + Basel Fardi + iana-pen&intenta.de +55962 + PD Inc + Jason Pyeron + iana-pen&pdinc.us +55963 + Bendigo and Adelaide Bank Limited + Paul Winters + paul.winters&bendigoadelaide.com.au +55964 + Carman Systems + Walter Carman + wcarman1&gmail.com +55965 + Ministry of Foreign Affairs of the Republic of Uzbekistan + Khikmatulla Zaynutdinov + it&mfa.uz +55966 + KeySwing + Austin Chang + austin880625&gmail.com +55967 + County of Lackawanna + Colin Doherty + dohertyc&lackawannacounty.org +55968 + Ammega Group B.V. + Dick de Groot + helpdesk&ammeraalbeltech.com +55969 + Gulf International Development LLC + Louis Christodoulides + louis&gulfid.com +55970 + BHDev Desenvolvimento e Consultoria em Informatica LTDA + Sebastian Kummer + info&bhdev.net +55971 + DAUPHIN TELECOM + Colin Haeberle + sysadmin&dauphintelecom.com +55972 + NARMI + Chris Narmi + Chris&NARMI.org +55973 + Lathrop GPM LLP + David Alberico + david.alberico&lathropgpm.com +55974 + Secure Computing Laboratory, NMT + Ratna Halder + ratna.halder&student.nmt.edu +55975 + Process Fusion Inc. + Arron Fu + arron.fu&processfusion.com +55976 + viralstrings technologies pvt ltd + deepesh vodala + deepeshvodala666&gmail.com +55977 + DNSmonitor Sverige AB + Henrik Dahlberg + info&dnsmonitor.com +55978 + Ekebu srl + Simon E. La Rosa + simon&ekebu.com +55979 + Shenzhen Kstar New Energy Company Limited + Demi Zhang + zhangdy&kstar.com.cn +55980 + Hoyos Integrity Corp. + Anner J. Bonilla + abonilla&hoyosintegrity.com +55981 + IIB Education Private Limited + Prof. Shyam Soni + admin&iibedu.com +55982 + Wenzhou Yeeka Lock Technology Co.,Ltd. + ChunFei Wu + 18367818320&163.com +55983 + MARCUUS + Vorobyev Daniil + danvor&rambler.ru +55984 + NETLOCK GmbH + Varga-Szabó Éva + szabo.eva&netlock.hu +55985 + ACCA + Ralph Thompson + ralph.thompson&getronics.com +55986 + novalink GmbH + Novalink Development + develop&novalink.ch +55987 + Tiffany Care Centers, Inc. + Robert Gibson + rgibson&tcc4care.com +55988 + eBay Inc + Ashwin Ambekar + DL-eBay-TrustFabric-OS&ebay.com +55989 + Lithnet + Ryan Newington + support&lithnet.io +55990 + Morrison & Foerster + Ross Sigworth + Messaging&mofo.com +55991 + EMERGY Führungs- und Servicegesellschaft mbH + Hermann Bookjans + it&emergy.de +55992 + Conradis Teknik AB + Peter Rennerstav + peter&conradis.se +55993 + Carmot Pursuits + Jason Medlin + Jason.Medlin&CarmotPursuits.com +55994 + GiftCard, Inc + Rick Schumann + rick.schumann&gmail.com +55995 + Scolino Nachhilfe & Schülerbetreuung + Dominic Alexander Windolph + dominic.windolph&scolino.com +55996 + calac GmbH + Stephan Wagner + stephan.wagner&calac.net +55997 + CompuTecNetworks + Daniel Banks + webmaster&computecnetworks.com +55998 + HAOHAN Data Technology Co.,LTD. + Dijia Fan + fdj&haohandata.com.cn +55999 + accel-ppp + Dmitry Kozlov + dev&accel-ppp.org +56000 + Bambú Tecnologías Aplicadas SRL + Gustavo G. Isabella + gustavoisabella&bambutec.com.ar +56001 + Seashell Trust + Richard Crompton + richard.crompton&seashelltrust.org.uk +56002 + PT Aksara Digital Indonesia + Satria Priambada + satriapriambada&gmail.com +56003 + Atende Software Sp. z o.o. + Przemysław Frasunek + przemyslaw.frasunek&atendesoftware.pl +56004 + Peter Raabye + Peter Raabye + praabye&dadlnet.dk +56005 + Stichting Ziekenhuis Gelderse Vallei + Richard de Mooij + MooijR&zgv.nl +56006 + Wrble + Casey Haakenson + casey&wrble.com +56007 + Festival D'été de Québec + Alain Martel + amartel&feq.ca +56008 + AKRA Kotschenreuther GmbH + Patrick Berthold + patrick.berthold&akra-world.com +56009 + ELSA Advanced Systems Pte Ltd + James Chua + james&elsaadv.com +56010 + Alpha-Safety LLP + Andrey Fursov + a.fursov&alpha-safety.kz +56011 + RTK + Komov Sergey + svkomov&rt-dc.ru +56012 + JItendra Singh + Jitendra Singh + jitendrasarsa&gmail.com +56013 + Aqua Broadcast Ltd. + Pavel Valousek + info&aquabroadcast.co.uk +56014 + Los Angeles Jewish Home for the Aging + Hadi Sabet + hadi.sabet&jha.org +56015 + Empire Die Casting Company + Andrew McCullough + info.tech&empirecastingco.com +56016 + Cayman Medical Ltd + Roland Talanow, MD, PhD + admin&caymanmedicalcenter.com +56017 + T1ED + Tom + T1ED&protonmail.com +56018 + SKAPF + Stefan Kapfhammer + skapf&skapf.de +56019 + Otter video + Maksim Lapshin + max&otter-video.com +56020 + BSB DATA AND INFORMATION PROCESSING SERVICES PRIVATE LIMITED + BIRENDER SINGH BUDHWAR + BSB&BSBDIPS.COM +56021 + Geo-Com + Damian Cikowski + iana&geo-com.pl +56022 + Elukerio + Philippe Martin + adminsys+iana&elukerio.org +56023 + Nanjing Aotong Intelligent Technology Co., Ltd. + Wenyong Ma + mawenyong&otint.com.cn +56024 + KPT Holding AG + Service Management + servicemanagementit&kpt.ch +56025 + nicos AG + Andre Stolze + office-it&nicos-ag.com +56026 + AIFusion.net + Sharad Singhal + sharad.singhal1&gmail.com +56027 + Active Shadow LLC + Bryan Richardson + bryan&activeshadow.com +56028 + Beijing Changkun Technology Co., Ltd + zeng.xiaosa + 274454204&qq.com +56029 + ALS Automated Lab Solutions GmbH + Jens Eberhardt + je&als-jena.de +56030 + Skylite AB + Anders Hannus + anders.hannus&skylite.se +56031 + CL SKOG AB + Robert Clason + robban&clskog.se +56032 + ZAE Bayern + Christian Hilgers + iana-admin&zae-bayern.de +56033 + Die Autobahn GmbH des Bundes + Manfred Felsch + manfred.felsch&autobahn.de +56034 + ALS Laboratory Group + Brian Hampson + brian.hampson&alsglobal.com +56035 + GALDERMA S.A. + Jean Kouame + Jeansaturnin.Kouame&galderma.com +56036 + Gentlent UG (haftungsbeschränkt) + Tom Klein + tom&gentlent.com +56037 + Argo AI + Mike Pochan + mpochan&argo.ai +56038 + Nroad + wangt + wangt&nroad.com.cn +56039 + Bold City Tech + Michael Potts + mpotts&boldcity.tech +56040 + Sosaley technologies private limited + Ujjwal Varadarajan + ujjwal.v&sosaley.in +56041 + CALINDA SOFTWARE + ALEXANDRE MERMOD + amermod&calindasoftware.com +56042 + PAJURO + Paul Dury + paul.dury&pajuro.fr +56043 + Argo AI + Jason Galanter + jmg&argo.ai +56044 + Sansay Corporation + James Hiott + jhiott&sansay.com +56045 + Postee + Gauthier Testu + gauthier&postee.io +56046 + Zbensoft Software + xie qiang + tony.xie&zbensoft.com +56047 + Lucid Motors + Keshava Ravulapati + keshavaravulapati&lucidmotors.com +56048 + ElastiFlow Inc. + Robert Cowart + rob&elastiflow.com +56049 + HED2 + Hamdi Hamdi + office&hed2.com +56050 + KASLAB + Jonas Lidgren + iana&kaslab.se +56051 + Skyservice Business Aviation Inc. + Mike Yabar + Mike_Yabar&skyservice.com +56052 + Growatt New Energy Technology Co.,LTD + Hao Cheng + hao.cheng&growatt.com +56053 + slowpoke.cloud + Josh Mills + jmills&posteo.net +56054 + East Alabama Womens Clinic + Dr. GWEN COOPER + Gwencoopersimon&bellsouth.net +56055 + PLEXSYS Interface Products + Andrew Willis + awillis&plexsys.com +56056 + Grid Protection Alliance + James R Carroll + rcarroll&gridprotectionalliance.org +56057 + Grid Protection Alliance + James R Carroll + rcarroll&gridprotectionalliance.org +56058 + The Farmers State Bank + Russ Redmyer + russ&myfsb.bank +56059 + Remmers Gruppe AG + Stefan Focke + sfocke&remmers.de +56060 + Chengdu Yinou Technology Co. LTD + ShunTing.Yu + ystdarkblue&qq.com +56061 + Rune Landbergs Åkeri AB + Björn Landberg + landbergsakeri&telia.com +56062 + Chakat Space + Glowpelt Chakat + glowpelt&chakat.space +56063 + Tri-State Communications, Inc. + Brian Cain + brian&tri-statecomm.com +56064 + Coca-Cola Amatil (N.Z.) Limited + Paresh Chovan + paresh.chovan&ccamatil.com +56065 + Architector Inc. + Sotaro SUZUKI + sotaro.suzuki&architector.jp +56066 + Centre Orcet-Mangini + Cloitre Thibaud + T.CLOITRE&orsac-ssr-01.org +56067 + Knox Horticulture LLC + Kevin Kintner + pki&knoxhort.com +56068 + Aggressive Tooling Inc. + Arden Bremmer + abremmer&aggtool.com +56069 + Net NV LLC + Will Arndt + info&netnv.net +56070 + Eni S.p.A. + Marco Bianchi + marco.bianchi&eni.com +56071 + MediasiteK.K. + Masaya Okada + m.okada&mediasite.co.jp +56072 + MOIN MIYAN + Moin Miyan + moink8326&gmail.com +56073 + Beijing VRV Software Corporation Limited + BXY VRV + gaokui&vrvmail.com.cn +56074 + Knightfall Systems LLC + Duncan Forsythe + duncan.forsythe&knightfall.io +56075 + Fuchshuber Architekten GmbH + Clemens Ebert + c.ebert&fuchshuberarchitekten.de +56076 + EDC HOFFMEISTER GmbH + Thomas Hopf + thopf&elink.de +56077 + codecentric AG + Hosting Team + hosting&codecentric.de +56078 + SorensenLab + Shane Sorensen + shane&alchemytechgroup.com +56079 + ServiceNow, Inc. + Gordon Tetlow + gordon.tetlow&servicenow.com +56080 + flaviofrancardi + Flavio Francardi + flaviofrancardi&libero.it +56081 + Smartsky Networks + Abhinay Sinai + abhinay.sinai&smartskynetworks.com +56082 + Data Connect Enterprise + BARRY PATTON + bpatton&data-connect.com +56083 + Facebook Connectivity - Joule + Neal Poole + iana-assign&fb.com +56084 + Liphatech INC + Liphatech Administrator + admin&liphatech.com +56085 + Highmark Health + Brian Wink + brian.wink&highmarkhealth.org +56086 + BlackSphere + Paul Smith + psmith.iana.pen&iostindex.com +56087 + Medis Holding + Prof. Dr. Behnam Rahnama + behnam.rahnama&gmail.com +56088 + Diggers + Armin Pipp + a.pipp&diggers.tech +56089 + IMSONIC MEDICAL, INC. + LIN ZHANG + lin.zhang&imsonicmedical.com +56090 + CJCHT GROUPS LIMITED + Clare Chen + admin&cjcht.com +56091 + lern.link GmbH + Lukas Müller (MuLu) + mulu&lernlink.de +56092 + Bitza IT Consulting + Alex Ignatenko + alex.ignatenko&gmail.com +56093 + PPC Africa + Steven Scott + eugene.vanrooyen&firsttech.digital +56094 + INNORS + Gilbert Park + gilbert.park&innors.com +56095 + PhilCo Consulting Inc. + Philippe Cote + philcoconsulting&gmail.com +56096 + fiskaly GmbH + Patrick Gaubatz + office&fiskaly.com +56097 + Norden Communication UK Ltd + Eldho Jacob + eldho&nordencommunication.com +56098 + Docuten Tech S.L. + Angel Aparicio + angel.aparicio&docuten.com +56099 + Example Limited + Burton + burton&typewritten.net +56100 + VMLabblog.com + Aad Lutgert + admin&vmlabblog.com +56101 + odehnal.org + Pavel Odehnal + pavel&odehnal.eu +56102 + Venjakob Maschinenbau GmbH & Co. KG + Michael Lakenbrink + it&venjakob.de +56103 + LKVS CONSTRUCTION PVT LTD + Vijyandra Narayan + lkvspvtltd&gmail.com +56104 + M-P-S + Peter SEDLÁK + mps&mpsgroup.sk +56105 + Alpha Technologies CIA. LDTA. + steven chiriboga + steven.chiriboga&alphaside.com +56106 + Cambridge University Hospitals NHS Foundation Trust + Ben Hilsden + ben.hilsden&addenbrookes.nhs.uk +56107 + Convention Center Authority of the Metropolitan Government of Nashville & Davidson County + Eric Blouin + Eric.Blouin&nashvillemcc.com +56108 + Intelligent Waves LLC + Garrett T. Wood + garrett.wood&intelligentwaves.com +56109 + ДП "Одеський порт" (State Enterprise "Odesa Sea Trade Port") + Негальчук Олександр Леонідович (Nehalchuk Oleksandr Leonidovych) + it&omtp.com.ua +56110 + FSV Analytics + John Phillips + jp&fsvanalytics.com +56111 + EKTA-Prom + Shatkovskyy Valeriy + valera&ekta.com.ua +56112 + Unisyue Technologies Co., Ltd. + Xiaosheng Deng + info&unisyue.com +56113 + Universidad Nacional de San Agustín de Arequipa + Boris Verastegui Bustamante + boris&unsa.edu.pe +56114 + B/D OPS, LLC + Neil Duda + support&bdops.com +56115 + JZ Lab + Jan Zak + pen&jan-zak.cz +56116 + nigelmann + Simon Ackermann + nigelmann-dev&protonmail.com +56117 + MAXIO Co.,Ltd. + KunPyo, Jeong + kpjeong&maxio.co.kr +56118 + Dissonance + Lilac Kapul + lilac&dissonance.tech +56119 + City of Hollywood, FL + CHRISTIAN LABOY + claboy&hollywoodfl.org +56120 + PLASP Child Care Services + Brad Hodgins + bradhodgins&plasp.com +56121 + Busch's INc. + Jeff Dannenberg + admin&buschs.com +56122 + LORIOT AG + Gianni Zizzi + ops&loriot.io +56123 + Vodafone India Services + Syed Rizwan Mohiuddin + syedrizwan.mohiuddin&vodafone.com +56124 + ANEO "Ust-Labinsky Lyceum" + Maksim Ivanov + Ivanov.M&ul-lyceum.ru +56125 + Mars Bioimaging Limited + Ross Younger + ross.younger&marsbioimaging.com +56126 + uni software plus GmbH + Roland Hopferwieser + hopferwieser&unisoftwareplus.com +56127 + WAREHOUSE SPECIALISTS LLC + JASON DROUT + DROJAS&WSINC.COM +56128 + SI Media + Andreas Panozzo + andreas.panozzo&si-media.tv +56129 + L3Harris (Chesapeake Sciences) + Dave Thomas + dave.thomas&l3harris.com +56130 + Landmarken AG + Rudolf Firnich + rfirnich&landmarken-ag.de +56131 + AG Klima, FB8, Uni Bremen + Timo Rothenpieler + timo.rothenpieler&uni-bremen.de +56132 + Chaos Computer Club Veranstaltungsgesellschaft mbH + Christian Carstensen + cc+iana&cccv.de +56133 + Excision Mail + Aisha Tammy + atammy&bsd.ac +56134 + Convexis GmbH + Florian Schenk + florian.schenk&convexis.de +56135 + MAIDAMESS LIMITED + Georgios Sikovaris + gsikovaris&maidamess.co.uk +56136 + Berci Engenharia + César Daltoé Berci + cesar&berci.com.br +56137 + TekSea + César Daltoé Berci + cesar&berci.com.br +56138 + AIS spol. s r.o. + Daniel Juřík + ais&ais-brno.cz +56139 + Three Star Group AS + Geir Allsted + oid&threestar.no +56140 + Metartec + Johnny Moore + johnny.moore&metartec.com +56141 + Fleetwood High School + Michael Kennedy + support&fleetwoodhs.org.uk +56142 + Telecom26 AG + Michael Ashdown + mashdown&telecom26.ch +56143 + NETCOM GROUP + M. MATRI + h.matri&netcom-group.fr +56144 + J. Knipper and Company + Michael Weber + domain.admin&knipper.com +56145 + hezhong + wanjun ma + mawanjun&hezhonghuineng.com +56146 + FireStone + Manchong Lu + lumc&hsmap.com +56147 + Foxtons Ltd + Infrastructure Team + ianaresponse&foxtons.co.uk +56148 + Chengdu Spaceon Electronics Co.,Ltd. + GarfieldLee + 447168133&qq.com +56149 + Arcturus Technologies + Deepak Batra + deepak&arcturustech.com +56150 + Bekomold GmbH + Andras Suveges + Andras.Suveges&bekomold.hu +56151 + TES s.r.o. + Michal Dobes + ianareg&tes.eu +56152 + UTIMCO + Stephen Mack + Smack&utimco.org +56153 + Powertec Solutions International + Paul Siglinger + psiglinger&powertecsolutions.net +56154 + KEYXENTIC INC. + Stone Chen + stone.chen&keyxentic.com +56155 + Quantum Labs + Simon Stone + simon.stone&quantumit.com.au +56156 + Banma Network Technology Co,Ltd + Yao Lu + ly186864&alibaba-inc.com +56157 + Axion Biosystems, Inc + Brad Major + bmajor&axion-biosystems.com +56158 + ComPsych Corporation + Adam Gotskind + agotskind&compsych.com +56159 + Mibes IT + Sebastian Mohr + sebi&iana.mibes.it +56160 + Lorier Internet + Mark Anthony Lorier + lorier&lycos.com +56161 + UST Technologies + Shanthi Nair + ihubdemo&gmail.com +56162 + Sinelec + Antonino Catalano + catalano&sinelec.it +56163 + Star Kay White, Inc. + Mikko Peltoniemi + it&starkaywhite.com +56164 + Arctic Paper Grycksbo AB + Johan Olérs + johan.olers&arcticpaper.com +56165 + Access Health Services, LLC + Kyle Hawkins + khawkins&accesshealth.services +56166 + 深圳凌特华盛科技有限公司(Shenzhen Smartbyte Technology Co., Ltd.) + Shigan Chen + chenshigan&smartbyte.com.cn +56167 + Calibre UK Ltd. + Paul Wilson + paul.wilson&calibreuk.com +56168 + dotup IT solutions + Peter Ullrich + peda76&gmail.com +56169 + Open Products, Networks & Software (OPNS) + Marc Boitel + marc.boitel&opns.net +56170 + Secure Nordic Payments + Tadas Kvedaras + tadas&mistertango.com +56171 + Samm Technology Communications Industry and Trade Inc. + Tayfun Erkorkmaz + tayfun.erkorkmaz&samm.com +56172 + Unassigned + Returned 2023-07-03 + no-reply&iana.org +56173 + A.J. Steenkist + Stephan van Der Sterren + it&steenkist.nl +56174 + Geminare Inc. + Matthew Goddard + mgoddard&geminare.com +56175 + AirPro Technology India Pvt Ltd + Deepak Jain + deepak&airpro.in +56176 + Safeweb Seguranca da Informacao Ltda + Gisele Strey + compliance&safeweb.com.br +56177 + ThinkAnalytics + IANA Admin + iana-admin&thinkanalytics.com +56178 + It-Management + Fred Matthiesen + it&matthiesen-whv.de +56179 + COMKOM + Enno Evers + admin&comkom.de +56180 + Quantum Prime + Jose Cancel + public&josecancel.com +56181 + Wavesight Limited + Raj Partheepan + raj.partheepan&wavesight.com +56182 + The Coetzer Group + IT Admin + admin&lyttleton-spar.co.za +56183 + Digital Advantage Corp. + Hiromichi SHIMADA + admin&d-advantage.jp +56184 + Deutsches Rotes Kreuz in Hessen Volunta gGmbH + Thomas May + iana&volunta.de +56185 + Otis Elevator Company + Kalpesh Mistry + kalpesh.mistry&otis.com +56186 + Sinus Nachrichtentechnik GmbH + Tobias Swiderski + noc&sinus-nt.de +56187 + Riker Wohnbau + Immobilien GmbH + IT Administrator + it&riker-immo.de +56188 + VE3BUX + James Buck + iana_pen&ve3bux.com +56189 + Instituto Costarricense de Electricidad + Fernando Chaves Obando + fchaveso&ice.go.cr +56190 + Telindus-ISIT B.V. + Wieger Bontekoe + wieger.bontekoe&telindus.nl +56191 + Dawgshouse Lab + Jim Louk + jimclik&yahoo.com +56192 + Wiseif + Mert Cingöz + mertcingoz&gmail.com +56193 + Storopack Deutschland GmbH & Co.KG + Wolfram Wadepohl + wolfram.wadepohl&storopack.com +56194 + High-Availability + Paul Griffiths-Todd + iana&high-availability.com +56195 + Telmo a.s. + Leos Mach + leos.mach&telmo.cz +56196 + SOL Industries Pty Ltd + Trevor Zeegers + trevor&solind.com.au +56197 + Xiamen Evada Electronics Co., Ltd + yongjun wang + 81984079&qq.com +56198 + darkfate.net + Leos Mach + leos&darkfate.net +56199 + NetCom Sicherheitstechnik GmbH + Max Truxa + iana-contact&netcom.eu +56200 + Speech-Soft Solutions + Myron Hayden + mhayden&speech-soft.com +56201 + Lekha Wireless Solutions Pvt Ltd + Amarnadha Reddy Mahasamudram + amar&lekhawireless.com +56202 + Alexander Reinert + Alexander Reinert + alex&areinert.de +56203 + Aalborg Lufthavn a.m.b.a. + Robin Rismoen - IT Manager + it&aal.dk +56204 + IVSEC + Craig Wood + craigw&alloys.com.au +56205 + South-West jiaotong unversity richsun coorperation .inc + Gang Cai + 1274644197&qq.com +56206 + Transdev + Philippe Tailleur + philippe.tailleur&transdev.com +56207 + YAAL COOP + Eloi Rivard + contact&yaal.coop +56208 + Vitality Corporate Services Limited + David Elliott + dave.elliott&vitality.co.uk +56209 + Povodi Vltavy, statni podnik (Vltava river basin, state enterprise) + Martin Paul + martin.paul&pvl.cz +56210 + Urzad Marszalkowski Wojewodztwa Slaskiego + Jaroslaw Stachurka + jaroslaw.stachurka&slaskie.pl +56211 + Crown Castle + RyanDon + ryan.don&crowncastle.com +56212 + Eugster Informatik + Christian Eugster + christian.eugster&gmx.net +56213 + Xuzhou Minghuan Energy Co. LTD + Cheng Lujun + minghuan_snmp&qq.com +56214 + Rainside s.r.o. + Tibor Bartos + bartos&rainside.sk +56215 + TapIt Inc. + Daejin Kim + tapit&tapit.kr +56216 + ALECOM AB + Vasilios Toutouzoglou + valle&alecom.se +56217 + Perspecta + Ronald E Jones + ronald.e.jones&perspecta.com +56218 + NSW Health + Junaid Mahomed + junaid.mahomed&health.nsw.gov.au +56219 + Neuralink Corporation + Logan Garbarini + logan&neuralink.com +56220 + BAOXING CO.,LTD. + Ruchun.Xia + xiaruchun&baoxingtech.com +56221 + Roche Diabetes Care + Luis Filipe Fernandes Costa + luis.costa.lc3&businesspartner.roche.com +56222 + Instytut Informatyki i Zarzadzania + Daria Konieczna + admin&instytutiz.pl +56223 + Canadian Secure Token Governance Authority + Marian Hearn + marian.hearn&cstga.ca +56224 + Stein Telecom Ltda + Luiz Leal Luiz + luiz.leal&steintelecom.com.br +56225 + DHK Storage, LLC + DHK Admin + info&chillirack.com +56226 + Wise Industria de Telecomumicacoes + Roberto Lucatelli + lucatelli&wi.com.br +56227 + Matthias Wübbeling + Matthias Wübbeling + matthias&wueb.de +56228 + Kiela + Marcel Kiela-Eliazar + mkiela&kiela.net +56229 + embeddedmcu + Mahdi Faghani + me.faghani&yahoo.com +56230 + Arctic Paper Munkedals AB + Per-Olof Aronsson + per-olof.aronsson&arcticpaper.com +56231 + Encedo Limited + Krzysztof Rutecki + krzysztof&encedo.com +56232 + Société Électrique des Forces de l'Aubonne + Stéphane Brot + s.brot&sefa.ch +56233 + inray Industriesoftware GmbH + Bo Biene + biene&inray.de +56234 + EdRixon + Rixon + edrixon&manx.net +56235 + Atos Systems Business Services GmbH + Bjoern Szyszka + bjoern.szyszka&atos.net +56236 + KPN Internet of Things + Marc Titulaer + marc.titulaer&kpn.com +56237 + SCADACID + Rémi PECE-BOURGEROLLE + remi.p.bourgerolle&gmail.com +56238 + wtm-solutions + Nicolas Duval + nicolas.duval&wtm-solutions.com +56239 + Behr Technologies Inc + Joe Zampino + it&behrtech.com +56240 + JSC ‘NPP ‘Radiosviaz’ + Trishkina Ludmila Fedorovna + it&krtz.su +56241 + Data Design System AS + Tore Ørpetveit + hostmaster&dds.no +56242 + AMV Sistemas de Alimentacion Electronica SL + Pedro Martin + pmartin&amvelectronica.com +56243 + Brunswick Group + ICT Admin + ictadmin&brunswickgroup.com +56244 + SPOe ITZ + Sascha Fruehwirth + itz-noc&spoe.at +56245 + Johannes Fortmann + Johannes Fortmann + pen&johannesfortmann.de +56246 + Central GA Cancer Care + Cile Lind + CLind&CentralGACancerCare.com +56247 + Rublon + Michal Wendrowski + m.wendrowski&rublon.com +56248 + Westek Technology Ltd. + Luke Lawrence + luke.lawrence&tpgroup.uk.com +56249 + Alec GmbH + Katrin Siewert + Katrin&alec.de +56250 + CM.com + Anil Bhaggan + tech&cm.com +56251 + ETC Solutions + Mark Angell + angellm&etcsol.com +56252 + Vimexx B.V. + W.v. Berendsen + wesley+iana&vimexx.nl +56253 + Corellium LLC + Amanda Gorton + amanda&corellium.com +56254 + Forte-CT + Dmitriy Khizhinskiy + dima&forte-it.ru +56255 + Changsha LANLI Technology Co., Ltd. + Shuai Zhan + huainian102&163.com +56256 + Max Planck Computing and Data Facility + John Kennedy + idm-alerts&mpcdf.mpg.de +56257 + LOGICALIS SPAIN + Ignacio Bermejo + ignacio.bermejo&es.logicalis.com +56258 + ASRC Federal Holding Company, LLC + Brian Sietz + netops&asrcfederal.com +56259 + E-Gear, LLC + Chris DeBone + chris&e-gear.us +56260 + MUFG Bank (Europe) N.V. + Ray Flinkerbusch + ray.flinkerbusch&nl.mufg.jp +56261 + UCLA Center for Computer Vision and Imaging Biomarkers + Dr. Koon-Pong Wong + kpwong&ucla.edu +56262 + Santa Lucía S.A. + Plácido Borja Estévez García + placidoborja.estevezg&santalucia.es +56263 + Sonic Healthcare Germany GmbH & Co. KG + Sascha Lüdemann + it-starnet&sonichealthcare.de +56264 + Shenzhen Acorid Communication Technology Co., Ltd(www.acorid.com) + 汪东辉 (Wang Donghui) + wangdh&acorid.cn +56265 + Woven Solutions Ltd + Gary Fosdike + gary.fosdike&wearewoven.com +56266 + Ogjos + Ray Ketcham + rayketcham&ogjos.com +56267 + AMC World Technologies GmbH + Daniel Ellermann + d.ellermann&amc-world.de +56268 + publica.re + David Baumgartner + ch.davidbaumgartner&gmail.com +56269 + SRK Electronics Pty Ltd + Simon Russell + general&srkelectronics.com.au +56270 + NSGUARD TECHNOLOGY INC. + Paul Luo + mis&gorilla-technology.com +56271 + Sanoma Manu Oy + Timo Pelkonen + timo.pelkonen&sanoma.fi +56272 + ROYAL MOSLEM IMPERIAL + ANTWON MADDOX + LordAntonMaddoxEl&Gmail.com +56273 + Machines talk + Jithin chandrasekharan unnithan + jithin.unnithan&machinestalk.com +56274 + PR Oncology, LLC + Luis Baez Vallecillo, MD + lbaez&proncology.com +56275 + RA-MICRO Hamburg GmbH + Joerg Wendlandt + iana&lab-x.de +56276 + Thales SIX Nederland + Marcel Wust + marcel.wust&nl.thalesgroup.com +56277 + Denbighshire County Council + Andrew Wallbank + nom-renewals&denbighshire.gov.uk +56278 + China Mobile IoT Company Limited + Li zuofa + lizuofa&cmiot.chinamobile.com +56279 + SereneIT, Inc. + Thomas R Breslin + thomas.breslin&sereneits.com +56280 + Cloud2 Software Inc. + Jay Legue + jay.legue&cloud2software.com +56281 + Arvato Supply Chain Solutions + Henry Möhsel + henry.moehsel&arvato-scs.com +56282 + Kapsch BusinessCom AG + Mario Cenc + mario.cenc&kapsch.net +56283 + EA Systems Dresden GmbH + Alina Scholz + alina.scholz&ea-energie.de +56284 + Atlas Cloud Services + Oussama El Meknassi + oussama.elmeknassi&ocpgroup.ma +56285 + S&S Activewear + Parker Warf + pwarf&ssactivewear.com +56286 + Steico Industries Inc + Eric Crescini + ecrescini&steicoindustries.com +56287 + Hacksoft + Stéphane Berthiaume + hacksoft2&hotmail.com +56288 + Cloudready.dk + Peter Wohlers + peter&wohlers.nu +56289 + LEAPBYTE TECHNOLOGY + MOHAMMAD SHAIKH + administrator&leapbyte.com +56290 + Community Brands + Chris Kitzmiller + chris.kitzmiller&communitybrands.com +56291 + Solmax Inc. + Dominic Cote + dcote&solmax.com +56292 + AS207960 Cyfyngedig + Q Misell + q&as207960.net +56293 + UI-ICON + Craig Hoffman + noc&uisupport.org +56294 + Magnetic Inspection Laboratory, Inc. + Shane Rundle + srundle&milinc.com +56295 + MORSON JAPAN Co., Ltd. + Teruo Haraki + haraki&morson.jp +56296 + East West Bank + Kelvin Mok + Kelvin.Mok&EastWestBank.com +56297 + 17bit.org + Ivan Gudkov + i1&17bit.online +56298 + Infotech Solutions (UK) Ltd + Steve Parnell + steve.parnell&infotech.co.uk +56299 + SUBnet192 Inc. + Marc Bouchard + marc&subnet192.com +56300 + STAUFEN.AG + Ingo Boehm + ingo.boehm&staufen.ag +56301 + IOIT + Daniel Fourie + devlab2000&gmail.com +56302 + Narodni agentura pro komunikacni a informacni technologie, s. p. + Jiri Ulman + sysadmins&nakit.cz +56303 + MFOConsulting AB + Marcus Folkesson + marcus.folkesson&gmail.com +56304 + Infrastructures Technologiques Quebec + Greg Aucoin + greg.aucoin&itq.gouv.qc.ca +56305 + ParaFlare Pty Ltd + Head of Engineering + engineering¶flare.com +56306 + Centro Oncológico Figueroa-Cabrera + Amy Cabrera + amycabreramd&gmail.com +56307 + Cancer Research And Biostatistics + Brent Watkins + brentw&crab.org +56308 + SunForce Technology Co.,LTD. + Lu Huang + hlu&sunforce.com.tw +56309 + Codapayments + Sebastian Aresca + sebastian&codapayments.com +56310 + Aardsoft Oy + Bernd Wachter + bwachter-iana&aardsoft.fi +56311 + Hatay Mustafa Kemal Universitesi + Mehmet GULER + mg&mku.edu.tr +56312 + İlteriş Yağıztegin Eroğlu (linuxgemini) + İlteriş Yağıztegin Eroğlu + iana&linuxgemini.space +56313 + HoCoSto B.V. + Karel Dupain + k.dupain&hocosto.com +56314 + Signify Holding BV + Oscar Deurloo + oscar.deurloo&signify.com +56315 + Stadt Georgsmarienhütte + Timm Rogatsch + it&georgsmarienhuette.de +56316 + Objectivity Sp. z o.o. + IT Support Team + itsupportteam&objectivity.co.uk +56317 + Boys and Girls Clubs of America + Daren Daigle + ddaigle&bgca.org +56318 + Rec10 Organization + Hidetoshi Matsuo + gn64&rec10.org +56319 + Rauf Born Divine Bey TR. + Rauf Bey + PYNWEST&PROTONMAIL.COM +56320 + Silvus Technologies + Ajit Warrier + ajit&silvustechnologies.com +56321 + Telemetrics, Inc. + Michael Cuomo + mac&telemetricsinc.com +56322 + Optus Digital Networks Automation + Jonathan Zhao + jonathan.zhao&optus.com.au +56323 + New Age Consulting Service Inc. + Jim Lohiser + support&n2net.com +56324 + Horten Folkeverksted + Daniel Løvbrøtte Olsen + daniel.olsen&folkeverkstedet.com +56325 + 1NCE GmbH + Legal & Regulatory Affairs + legalservices&1nce.com +56326 + EFKO Management Company + Evgeniy Leksenberg + e.leksenberg&efko.ru +56327 + CL International + Sung Joon, Park + support&clintl.kr +56328 + OBSTA + AT Contact + info&obsta.com +56329 + Digital Communications Company + Carlton Askia + Carlton.Askia&smartdcc.co.uk +56330 + Inovetech + MICHAEL FEIJO + michael&inovetech.ind.br +56331 + Balasys Ltd. + Bálint Kovács + balint.kovacs&balasys.hu +56332 + Transferant + Mark Janssens + ianaoid&transferant.nl +56333 + Optus Digital Networks CaaS + Ashutosh Swamy + ashutosh.swamy&optus.com.au +56334 + Middleman Software, Inc. + James Heliker + james.heliker&middleman.tv +56335 + Analytic Designs, Incorporated + Harry Shamansky + harry&adinc.com +56336 + Skeeter Health + Giorgio Azzinnaro + tech+iana-pen&skeeterhealth.com +56337 + Hack@UCF + Peyton Duncan + pduncan&hackucf.org +56338 + Carolina Glaucoma And Vision Center + Michael Chapin + michael.chapin&prismahealth.org +56339 + Garantiqa Hitelgarancia Zrt. + Ferenc Szlamka + szlamka.ferenc&garantiqa.hu +56340 + Cytocheck Laboratory + Chuck Meyer + chuck&cytocheck.com +56341 + Fraunhofer Institute for Production Technology IPT + Marcel Korte + marcel.korte&ipt.fraunhofer.de +56342 + Bits 'n' Bytes IT-Consulting + Juergen Bruckner + info&bitsnbytes.eu +56343 + RiskForge + Sean Bruton + sean&riskforge.io +56344 + Sherpa 6, Inc. + Jeremy Myrtle + jeremy&sherpa6.com +56345 + National Tsing Hua University + Chun-Tien Chang + chuntien&mx.nthu.edu.tw +56346 + nanjing zhiyutiancheng Technology Co.Ltd + chengxueyang + 4415753&qq.com +56347 + EdgerOS + Tongtang Fu + futongtang&edgeros.com +56348 + Unisinsight + Zhao Hua + zhao.hua&unisinsight.com +56349 + Gävle Kraftvärme AB + Mikael Sandanger + mikael.sandanger&gavleenergi.se +56350 + VEBO + Patrick Bruegger + tm-system&wagner.ch +56351 + vaf co. + Mohammad Moghim + moghim&vaf-co.com +56352 + SCCT Medical Group + Daren Burns + burnsds&henrymayo.com +56353 + OXD Consulting Ltd. + Lawrence Chan + system&oxd.com +56354 + ThoughtFarmer Inc. + Lawrence Chan + system&thoughtfarmer.com +56355 + Baelim + Priyanshi Chaudhari + priyac821&gmail.com +56356 + Inkwire Tech (Hangzhou) Co.,Ltd. + Zhang Youwang + dev&inkwire.cn +56357 + ITK » CONSULTING & SOLUTIONS + Daniel Funke + info&itkcs.com +56358 + WEFA Inotec GmbH + Christian Siggert + it&wefa.com +56359 + Karlborgs Elkontroll AB + Alexander Schmidt + alex&karlborgselkontroll.se +56360 + City of Lafayette + Hunter McCallum + hunter.mccallum&cityoflafayette.com +56361 + Beyond LED Inc + Steven Xie + steven.xie&beyondled.com +56362 + e-scopics + Eric guiffard + eric.guiffard&e-scopics.com +56363 + Nexeria AB + Martijn Katerbarg + info&nexeria.se +56364 + Leuchter IT Infrastructure Solutions AG + Jonas Stalder + jonas.stalder&gmx.ch +56365 + K-Soft + Mikhail Feklisov + mihail&feklisov.ru +56366 + Alstom Sverige AB + Rickard Mauritzson + cmpenmanagementrcs&alstomgroup.com +56367 + dSoft-Bulgaria Ltd. + Doychin Bondzhev + doychin&dsoft-bg.com +56368 + Singularity LLC + MARIO R HERNANDEZ JR + mario&sglrit.com +56369 + Mission Embedded + Michael Kreilmeier + krm&mission-embedded.com +56370 + Kantonspolizei St. Gallen + Thomas Breu + ict&kapo.sg.ch +56371 + DOTSCREEN + Stephane Boisson + sboisson&dotscreen.com +56372 + CONEXTOP Technologies Co., Ltd. + peter.zhou + whois&conextop.com +56373 + CSSF + Yannick Sauren + it.infra-network&cssf.lu +56374 + Media-Tel + Vassili Tchersky + vassili.tchersky&media-tel.ru +56375 + Elektro Celje d.d. + Diana Kosaber + licence&elektro-celje.si +56376 + New Technologies + Oleg Kovrigin + okovrigin&ntpccorp.ru +56377 + Penzagrazhdanproekt + Pavel Sinev + flanger&penza-gp.ru +56378 + SMSTraffic + Sergey Barkov + s.barkov&smstraffic.ru +56379 + Singularis development, s.r.o. + Boris Lajda + lajda&bowa.sk +56380 + Frank Czepat + Frank Czepat + fczepatµsoft.com +56381 + Ingenieria de Sistemas Avanzados del Centro S.A. de C.V. + Rodrigo Villalobos Rodriguez + rvillalobos&isavanzados.com.mx +56382 + BHP Hardware and Software + Bruno Milhoci + milhoci&bhphardsoft.com.br +56383 + Morsoe kommune + Steve Nielsen + IT&morsoe.dk +56384 + Landkreis Teltow-Fläming + Uwe Müller + uwe.mueller&teltow-flaeming.de +56385 + LLC Lotsiya + Sergey Zhmykhov + zhmykhovsv&loodsen.ru +56386 + Praxispunkt GmbH + IT Support + hilfe&praxispunkt.de +56387 + DFINITY USA, LLC + IANA Administrator + iana&dfinity.org +56388 + GTS Ground Transport Systems Austria GmbH + HAMMER Johannes + johannes.hammer&urbanandmainlines.com +56389 + The Munshi Group + Sumit Munshi + smunshi&munshigroup.com +56390 + Wuhan Yangtze Computing Technologies Co.,LTD + Min Jie + jmin&yctco.com.cn +56391 + Digital Devices GmbH + Bodo Rueskamp + numbers&digitaldevices.de +56392 + OOO "Gagar.IN" (LLC) + Sergey Zorin + s.zorin&gagar-in.com +56393 + Crnogorski Telekom A.D. + Ivan Stankovic + ivan.stankovic&telekom.me +56394 + American Lebanese Syrian Associated Charities + Jordan O'Conner + alsacpki&stjude.org +56395 + ENT Specialists of Metairie + Clare Collier + ccollier.ent&gmail.com +56396 + LETEC + Olivier Detiege + olivier.detiege&letec.be +56397 + Stichting Bravis Ziekenhuis + Dhr J.P.J. van Tillo + j.vantillo&bravis.nl +56398 + solutio GmbH & Co. KG + Julian Stitz + julian.stitz&solutio.de +56399 + johanssons åkeri i bygdeträsk ab + lars johansson + johanssons.akeri&bygdsiljum.se +56400 + YOURWiFi, s.r.o. + Martin Tesar + martin.tesar&yourwifi.cz +56401 + Cegep de St-Felicien + Danic Gagne + dgagne&cegepstfe.ca +56402 + Ministry of Justice + Matthew White + matthew.white1&justice.gov.uk +56403 + Indigo Technology Partners, Inc. + Wayne Osse + wosse&indigotp.com +56404 + Southend-on-Sea Borough Council + Mark Waldron + ICTNetworkTechnicians&southend.gov.uk +56405 + Connexiums + Angel Baldera + contact&connexiums.com +56406 + SoftwareNY.com + Mikhail Akhmeteli + makhmete&softwareNY.com +56407 + ECOS GmbH + Roland Zauner + r.zauner.iana&ecos.at +56408 + Tom Tech Limited + Thomas Lewis + thomaslewis&0xaa55.me +56409 + aramido GmbH + Andreas Sperber + andreas.sperber_pen-iana&aramido.de +56410 + KARMA GmbH + Sebastian Ludwig + sl&karma.de +56411 + Courtman + Nicholas Courtman + programming&courtman.me.uk +56412 + Courtman + Nicholas Courtman + programming&courtman.me.uk +56413 + X2nSat + Shane Watts + shane.watts&x2n.com +56414 + Wattsie + Shane Watts + wattsie&gmail.com +56415 + Bank Negara Malaysia + Sritharan Sivaguru + cyberninja&bnm.gov.my +56416 + Quadrangle Architects + Shabbir Salem + ssalem&quadrangle.ca +56417 + IONOSYS + Stephane Blondeau + stephane.blondeau&ionosys.com +56418 + blattenpau + Sabine Schmidt + bine&metrodiv.de +56419 + Wallinger Ricker Schlotter Tostmann, Patent- und Rechtsanwälte Partnerschaft mbB + Ferit Sari Tomé + tome&wallinger.de +56420 + DGTL Workshop + Matthew Conto + matthew&dgtlworkshop.com +56421 + CSIRO - Astronomy and Space Science + Daniel Craig + daniel.craig&csiro.au +56422 + GlobalConnect Outsourcing Services + Kasper Kristensen + kaskri&globalconnect.dk +56423 + Energy Toolbase + Nathan Gutzmann + nathan.gutzmann&energytoolbase.com +56424 + Integrus Architecture PS + Steve Lee + slee&integrusarch.com +56425 + KAITEC GmbH + Pascal Degener + pascal.degener&kaitec-gmbh.de +56426 + Tehama County office of Education + John Young + jyoung&tehamaschools.org +56427 + Girteka Logistics, UAB + Linas Mockevičius + nadmin&girteka.eu +56428 + M.T. s.r.l. + Cesare Riva + cesare.riva&mt-srl.it +56429 + Neuron Gmbh + Manu Sharma + info&neuron-comm.com +56430 + Chugach Government Solutions + Jeffrey Tucker + jeffrey.tucker&chugachgov.com +56431 + Nardini Klinikum GmbH + Vitalij Lubeschanin + v.lubeschanin&nardiniklinikum.de +56432 + Kulturleben in der Studentenstadt e.V. + Lennart Rehkaemper + lennart.rehkaemper&stusta.de +56433 + Paylead + Raphaël Barrois + iana-oid&paylead.fr +56434 + A.G.T. ENTERPRISE SRL + OLIVIERI BENEDETTO + olivieri&agtenterprise.it +56435 + Dermatology Associates of Northwest Florida PA + Maribel Noda + maribel&pensacoladerm.com +56436 + Elearning Program - Thu Dau Mot University + Thanh Pham Tien + thanhpt&tdmu.edu.vn +56437 + Ocean Controls + Brandon Speedie + brandon&oceancontrols.com.au +56438 + HORNBACH Baumarkt AG + Paul Sester + infosec&hornbach.com +56439 + Rheinmetall Cyber Solutions GmbH + Moritz Trepte + Moritz.Trepte&Rheinmetall.com +56440 + BMTECH PERÚ S.A.C. + Axell Alvarado Arévalo + axell.alvarado&bmtech.pe +56441 + Fizyr B.V. + Maarten de Vries + m.devries&fizyr.com +56442 + eForce FEE Prague Formula + Martin Cejp + admin&eforce.cvut.cz +56443 + NOVA Web Development, LLC + Stefan-Ionut Tarabuta + stefan.tarabuta&novawebdevelopment.org +56444 + Prenuvo + Ramin Sahebjavaher + technology&prenuvo.com +56445 + SiteVision AB + Kristian Carlsson + kristian.carlsson&sitevision.se +56446 + VIVAVIS AG + Andreas Pfüller + andreas.pfueller&vivavis.com +56447 + Shenzhen Yunzhang Technology Co. LTD + zhangyi + danfengzi&gmail.com +56448 + Projects By Samveen + Samveen Gulati + samveen&samveen.in +56449 + C24 Bank GmbH + Johannes Vetter + it-operations&c24.de +56450 + vleo.net + Andrea Leofreddi + a.leofreddi&vleo.net +56451 + Markoja d.o.o. + Ivan Pavelic + ivan.pavelic&markoja.hr +56452 + Dätwyler IT Infra GmbH + Alexander Kölbel + alexander.koelbel&datwyler.com +56453 + LA Care Health Plan + Britt Jennings + bj5989&yahoo.com +56454 + ELECTRONIC SERVICE + Alfieri Maurizio + info&telantel.it +56455 + TELANTEL S.R.L + LIVIO CORESI + INFO&TELANTEL.IT +56456 + Beijing CyberInx Technologies Company Limited + Weiran Wu + wuweiran&cyberinx.com +56457 + CHOO YI JIE + CHOO YI JIE + admin&chooyijie.com +56458 + Milford Film & Animation AB + Todd Daugherty + todd&milford.se +56459 + Personal Alliance Company + Lester R. Cox + Lesterrcox&gmail.com +56460 + IPARTNER + Jean-frederic VINCIGUERRA + jean-frederic.vinciguerra&ipartner-it.com +56461 + aetherAI Co., Ltd. + Chia-Pin, Kang + kjb&aetherai.com +56462 + Exhibition of Achievements of National Economy + Information Technology Department + ditteh&vdnh.ru +56463 + Alvondo AB + Olle E Johansson + registry&alvondo.se +56464 + Precision Practice Management (Terran) + Wayne Schiermeyer + admin&precisionpractice.com +56465 + Segnus + Can MOGOL + canmogol&gmail.com +56466 + Aegon Pensii SAFPP SA + It Dpt + oid-admin&aegon.ro +56467 + Finding Sed + Ben Kinney + ben.kinney&findingsed.com +56468 + Erntec Pty Ltd + Paul Tonga + paul.tonga&erntec.com.au +56469 + leafpuddle + Vi Roesler + vi&leafpuddle.com +56470 + Glen Jarvis Training & Consulting, LLC + Glen Jarvis + glen&glenjarvis.com +56471 + Phicus Tecnologia S.L. + Juan Gomez Melero + jgomez&phicus.es +56472 + Central Bank of Jordan + Ala' F. Wrikat + alaa.wreikat&cbj.gov.jo +56473 + Police & Crime Commissioner for West Mercia + Andrew Withers + andrew.withers&westmercia.police.uk +56474 + RebalanceMD + Chris Kasztner + sysadmin&rebalancemd.com +56475 + 上海申石软件有限公司深圳分公司 (Shanghai Shenshi Software Co., Ltd. Shenzhen Branch) + linghuangsong + ylzxlhs&163.com +56476 + 深圳市环迅网络技术服务有限公司 (Shenzhen Huanxun Network Technology Service Co., Ltd.) + linghuangsong + ylzxlhs&163.com +56477 + CritAcuity Medical Group + Joseph Spanier + jspanier&critacuity.com +56478 + swigg.net + Dustin Sweigart + ast3r3x&gmail.com +56479 + Kentucky Blood Center + IT Systems Administrators + SysAdmin&kybloodcenter.org +56480 + Ortiz Development + Jean Ortiz + jeanortiz14&gmail.com +56481 + Pacheco's Tech + Carlos Pacheco + acprd_carlinhos&hotmail.com +56482 + inCrypto Software + Daniel Marquardt + dcmarquardt&incryptosoft.com +56483 + Novosit SRL + Francis Antonio Reyes Pineda + freyes&novosit.com +56484 + Aniruddha Telemetry systems + Amit Ashok + amit.ashok&aniruddhagps.com +56485 + IANT GmbH + Claas Beyersdorf + info&iant.de +56486 + BattleCrate Ltd + Alan Doherty + alan&serverflex.io +56487 + IERUS Technologies, Inc. + Chris Davis + chris.davis&ierustech.com +56488 + Xolaris AB + Andreas Sikstrom + andreas.sikstrom&xolaris.se +56489 + UANATACA EL SALVADOR, S.A. DE C.V. + Mario Hernandez + info.sv&uanataca.com +56490 + Wildboar Software + Jonathan M. Wilbur + jonathan.wilbur&wildboarsoftware.com +56491 + AvD Wirtschaftsdienst GmbH + Daniel Cronauer + vlserveroperators&avd.de +56492 + Nexteer Automotive Poland + Pawel Misiak + pawel.misiak&nexteer.com +56493 + Awake Software + William Kudrle + bkudrle&awakesoft.com +56494 + Almariya Trust Services SLU + Tomas Hidalgo Salvador + thidalgosalvador&gmail.com +56495 + Quanzhou Kingtone Optic & Electronic Technology Co.,Ltd. + Mars Lin + mars&kingtone.net.cn +56496 + Lacerda Tecnologia + Paulo Ricardo Lacerda + paulodassie.lacerda&gmail.com +56497 + Webb Electronics + Scott Knutsen + scott&cissecurity.ca +56498 + Mei Sheng Textiles Vietnam Co., Ltd + Nicolas Koehl + nic&mstexvn.com +56499 + Psomagen inc + junseo kim + junseo&psomagen.com +56500 + Maryland Judiciary - Administrative Office of the Courts + Jason Thomas + jason.thomas&mdcourts.gov +56501 + AB Handshake Corporation + Network Admin + hostmaster&abhandshake.com +56502 + AVUTEC + Reinier van der Togt + r.vandertogt&avutec.com +56503 + Bachleitner Technology GmbH + Oliver Penzler + iana&bachleitner-technology.com +56504 + grammm GmbH + Norbert Lambing + norbert.lambing&grammm.com +56505 + Premier Suburban Medical Group, PLLC + Wendy Filipowski + psmg&premiersuburban.com +56506 + Project Antrea + Project Antrea Maintainers + projectantrea-maintainers&googlegroups.com +56507 + Kennedy Engineering + Sawyer M Kennedy + sawyerkennedy&gmail.com +56508 + AMETEK Solidstate Controls + Robert George + Rob.George&ametek.com +56509 + Mensing Jochim + Markus Jochim + admin&bildungs.cafe +56510 + ShenZhen SureCall COMM Tech Co., Ltd + LI RUIZHEN + liruizhen&cellphone-mate.net +56511 + JCC PAYMENT SYSTEMS LTD + Andreas Savva + a.savva&jcc.com.cy +56512 + Vizolution Ltd + Tim Boothby + infrastructure&vizolution.co.uk +56513 + Hottinger Brüel & Kjær + Jan Vinterberg + HBKITInfrastructureBank&hbkworld.com +56514 + Institute of Physics Bhubaneswar + System Administrator + sysadmin&iopb.res.in +56515 + Facultad de Ingeniería - UBA + Leonardo Petrora + aplicaciones&fi.uba.ar +56516 + Hermann Stemberger + Hermann Stemberger + hermann&stemberger.at +56517 + n0n3's + Adrian Kubok + n0n3&n0n3.org +56518 + FyeLabs Inc. + Tanner Ryan + tanner&fyelabs.com +56519 + Home Lab + Tobias Abellan + gfacek&gmx.de +56520 + SISPI SISTEMA PALERMO INNOVAZIONE S.P.A. + Ferdinando Gambino + f.gambino&sispi.it +56521 + Jesse Coretta + Jesse Coretta + jesse.coretta&icloud.com +56522 + Jokey Holding GmbH & Co.KG + Walter Wieserner + pki&jokey.com +56523 + AKSA-SDS, Islamabad + Nafees Qureshi + muhammad.nafees&aksa-sds.com +56524 + Magellan Midstream Partners + PKI Administrator + magellanpki&magellanlp.com +56525 + EBE Elektro-Bau-Elemente GmbH + Jens Matheis + Jens.Matheis&ebe.de +56526 + Telcan Inc. + Mamoon Rashid + mrashid&telcan.net +56527 + Stadt Rorschach + Brauen Patric + patric.brauen&rorschach.ch +56528 + ADMIT Sp. z o.o. Sp. k. + Adrian Kubok + informatyk&admit.pl +56529 + Ethra Tech S.r.L. + Daniele Claudio De Piano + ddepianoðratech.it +56530 + Zentive Group + Dan Shilcock + dan.shilcock&zentive.com +56531 + Daryl P Kelly, LLC + Daryl Kelly + daryl&darylpkelly.com +56532 + NAL Research Corporation + Peter Kormendi + hostmaster&nalresearch.com +56533 + Dowslake Technologies + LahaJo + contact&dowslakemicro.net +56534 + Net Ice 9 LTD + Dragan Milic + dragan&netice9.com +56535 + Great Wall Motor Company Limited. + Chaofeng.Ding + dingchaofeng&gwm.cn +56536 + HUARUI EXPON(henan) Technology Co.,Ltd. + Kai Yu + yukai&expontech.com +56537 + AVAP + Hajime MATSUMOTO + hajime.matsumoto&avap.co.jp +56538 + Portsmouth City Council + Ian Cox + is.infrastructureengineers&portsmouthcc.gov.uk +56539 + Horry County Airports + Ricky Helmer + admin&flymyrtlebeach.com +56540 + N-Squared Software (NZ) Limited + Jamie Love + admin&nsquared.nz +56541 + Mperativ, Inc. + Paul C. Bryan + pbryan&mperativ.io +56542 + Guardicore + Yair Shemla + yair.shemla&guardicore.com +56543 + Mälby bioenergi och skog + Ronny Sjöqvist + ronny&malbyskog.se +56544 + LIQUIDPLUS + Black Shui + support&liquid-plus.com +56545 + Primax Technologies Inc. + Elie Nasrat + eknasrat&primaxpower.com +56546 + Regionalne Centrum Bezpieczeństwa w Olsztynie + Paweł Drankowicz + pawel&kryzys.olsztyn.pl +56547 + Northeastern University + ITS Identity and Access Management + ois&northeastern.edu +56548 + Qsaúde Operadora de Saúde + Anderson Coelho + anderson.coelho&qsaude.com.br +56549 + abancert.cl + Claudio Inostroza + contacto&abancert.cl +56550 + Indiana Wesleyan University + Justin Hughes + justin.hughes&indwes.edu +56551 + S&B Engineers and Constructors + Neil Gaede + nsgaede&sbec.com +56552 + CELOG PARTICIPATIONS + ZURLENI + az&celog.fr +56553 + OPT-NC + Frederic OSVALT + frederic.osvalt&opt.nc +56554 + Cabtronix AG + Christian Bürki + info&cabtronix.ch +56555 + Shenzhen Gullpower Technology Co.Ltd. + Shadow bao + baoyajun&gullpower.com +56556 + Evangelische Stiftung Alsterdorf + Matthias Timm + matthias.timm&akquinet.de +56557 + Oleg's Lab + Oleg A. Arkhangelsky + sysoleg&yandex.ru +56558 + CARTI Cancer Center + Carmen Jacobson + carmen.jacobson&carti.com +56559 + DICOM SDL + Kim, Tae-Sung + taesung.angel&gmail.com +56560 + Schnapke GbR + Daniel Schnapke + ca&schnapke.name +56561 + Timor Sommer, Software- und Systemarchitektur + Timor Sommer + timor&timor-home.de +56562 + converge-it + Thomas Weber + webster1501&gmail.com +56563 + Succ Inc. + Ben Lavi + benbenlavi&hotmail.com +56564 + Tobias L. + Tobias Lieben + iana&tolinet.de +56565 + nexgenwave + Yangjin Kim + yjkim&nexgenwave.com +56566 + Billingo Technologies Zrt. + Daniel Fekete + dan&billingo.com +56567 + EDNA (OSK LLC) + Yuriy Andropov + admin&edna.ru +56568 + Nefos IT bv + Kees Meijs + kees&nefos.nl +56569 + Notre Dame High School + Donald Bauch + ndtechadmins&ndnj.org +56570 + Melissa Maxwell Independent Contractor Administrative Assistant + Melissa Maxwell + maxwellmelissa75&yahoo.com +56571 + ZHEJIAGN SHIP ELECTRONICS & TECHNOLOGY CO., LED. + TOM YU + 634015395&qq.com +56572 + SHENZHEN INCREASE TECHNOLOGY CO., LTD. + Weihua Zhang + admin&szincrease.com +56573 + Battenhausen GmbH + Patrick Battenhausen + pbattenhausen&battenhausen.net +56574 + endoo GmbH + Christian Hanster + iana&endoo.eu +56575 + Galvanotek Embalagens LTDA + Gerson Dalcin + gerson&galvanotek.com.br +56576 + Kulturkosmos Müritz e.V. + Magnus Keppeler + magnus&kulturkosmos.de +56577 + WATCHMYDC ANALYTICS OY + Mohammed Mijanur Rahman + info&watchmydc.com +56578 + Comda LTD + Yair Eisenstein + yaire&comda.co.il +56579 + Computer Engineering Service Co., Ltd. + Yoichi MURAKAMI + y-murakami.2&ces-cp.com +56580 + Address Institut für Höhere Studien – Institute for Advanced Studies (IHS) + Jwan Ali + ali&ihs.ac.at +56581 + Lucy Electric + Simon Andrews + simon.andrews&lucyelectric.com +56582 + LGI Ltd + Mitch Freeman + support&lgi.com.au +56583 + NTT Global + Shane Solomon + shane.solomon&global.ntt +56584 + Blue Cross Blue Shield of Vermont + Myra Bergeron + bergeronm&bcbsvt.com +56585 + Dr. D. Kolokythopoulos - Medical Practice + Dimosthenis Kolokythopoulos + dimos&tenios.gr +56586 + ALLIANCE FORETS BOIS + DOIRAT Gaylord + gaylord.doirat&alliancefb.fr +56587 + Tiller Technologies Limited + Mark Cadby + mark.cadby&tillertech.com +56588 + VelocIT Powered by DSi + Jon Swarner + jswarner&v-msp.com +56589 + ScoutLink + Tony McAndrew + iana-pen&scoutlink.net +56590 + Aryagami Cloud Services + Jeelani Syed + jsyed&aryagami.com +56591 + NPO RTS + Frolov Vadim + frolov&nports.ru +56592 + Moog Defense and Space + Kent Carver + kcarver&moog.com +56593 + Niedersaechsisches Justizministerium (Ministry of Justice, State of Lower Saxony) + Technisches Betriebszentrum (Technical Operations Center) + postmaster&justiz.niedersachsen.de +56594 + noortec.com + Masoud Jafari + admin&noortec.com +56595 + Siera + K123 + iana&siera.bayern +56596 + MTK LLC + Oksana Komarova + info&mtkooo.ru +56597 + RiverHills Bank + Todd Williams + twilliams&rhb24.com +56598 + Nettie Trust Services + Ole Aldric + cert&srv.nu +56599 + John Knox Village of Central Florida + Nicole Vega + nvega&johnknox.com +56600 + Gisual, Inc. + Robin Klingsberg + robin&gisual.com +56601 + WATTS Battery LTD + Mikhail Dyachenko + mdyachenko&wattsbattery.com +56602 + van Happen Containers + Paul van Oort + ictsupport&vanhappencontainers.nl +56603 + Peeeks BV + Wouter Smit + wouter.smit&peeekspower.com +56604 + AutoCanada Software Inc. + Robert Sellars + rsellars&autocan.ca +56605 + Wojewódzki Szpital Zespolony im. Stanisława Rybickiego w Skierniewicach (Provincial Complex Hospital Stanisław Rybicki in Skierniewice) + Marek Jarota + m.jarota&wsz-skier.pl +56606 + APC Technology + Damian Demasi + damian.demasi.1&gmail.com +56607 + Beijing Fineone Technology Co.,Ltd. + zilanzhao + zhaozilan_ly&163.com +56608 + Miltera Elektronik Ltd. Sti. + Mehmet Kurnaz + info&miltera.com.tr +56609 + Airbus CyberSecurity GmbH + Franz Wack + franz.wack&airbus.com +56610 + Innova Bilişim Çözümleri A.Ş. + Cevdet Erçağlar + cercaglar&innova.com.tr +56611 + Kurt Jähnig GmbH & Co. KG + Daniel Schomerus + schomerus&jaehnig.de +56612 + Futurum Fastigheter i Örebro AB + Mikael Karlsson + devop&futurumfastigheter.se +56613 + Jhol Momo Udhyog + Deepak Bomjan + dpaktamang&gmail.com +56614 + 杭州中恒电气股份有限公司 (Hangzhou Zhongheng Electric Co., Ltd.) + 窦文辉 (Dou Wenhui) + douwh&hzzh.com +56615 + itk communications GmbH + Administrator + admin&itk-com.de +56616 + Dr. Anibal Avila + Esther Williams + dravila123&gmail.com +56617 + Disastrophy + Kevin Day + kevin&disastrophy.com +56618 + Bank of Taiwan Los Angeles Branch + Pao Lo Yang + it&botla.us +56619 + Mneme + Wouter Aupers + oid-admin&mneme.nl +56620 + Xacria SRL + Gaspare Maria + gaspare.maria&xacria.com +56621 + Caban Systems + Will Whitford + will&cabansystems.com +56622 + Maxlink Industria e Eletrônica LTDA + Gabriel Augusto Ferreira Paulino + desenvolvimento.maxlink&gmail.com +56623 + Raidiam Services Ltd + Ralph Bragg + ralph.bragg&raidiam.com +56624 + ANDBANK MONACO + Pascal MAQUAIRE + pki&andbank-monaco.mc +56625 + GlaxoSmithKline Consumer Healthcare + Marcin Wachowiak + marcin.2.wachowiak&gsk.com +56626 + Kozhukhar IP + Alexander Kozhukhar + ak&uloc.ru +56627 + Sigur + Ilya Kondrashkin + dev&sigur.com +56628 + Baker College + Scott Wood + itvendor&baker.edu +56629 + Ostwestfalen-Lippe-IT + Hans-Peter Holzhausen + ad&owl-it.de +56630 + Azure Radio Solutions + Dennis I. Aspinwall + di&cflsound.com +56631 + JSC Basisbank + Andria Vasadze + admin&basisbank.ge +56632 + Brompton Technology Limited + Chris Deighton + chris_deighton&bromptontech.com +56633 + Associação Brasileira das Empresas de Cartões de Crédito e Serviços - ABECS + Marcelo Takeyama + marcelot&abecs.org.br +56634 + ysaf + Clemens du Bellier + clemens&ysaf.de +56635 + Betop Technologies + Shawn Lee + shawnlee&betop-tech.com.tw +56636 + Lars Lehmann + Lars Lehmann + kontakt&lars-lehmann.net +56637 + Austrian Power Grid AG + Michael Gansterer + office.uai&apg.at +56638 + Luke Granger-Brown + Luke Granger-Brown + iana.pen&lukegb.com +56639 + Ravensdown Ltd + Drewe Hinkley + drewe.hinkley&ravensdown.co.nz +56640 + ITLab + Artur Pawlowski + lab&itlabservice.co.uk +56641 + Ministerul Lucrarilor Publice, Dezvoltarii si Administratiei + Valeriu Nica + mlpda&mlpda.ro +56642 + SafeTwice + Jesús González + gonzalez.jesus&safetwice.com +56643 + Paysera + Gvidas Astromskas + g.astromskas&paysera.lt +56644 + Brock Enterprises + August Jacob + august.jacob&brockgroup.com +56645 + Strategic Explorations LTD + Charles Yang + Charles&strexp.net +56646 + Hermes Telecom + Niels Mortelmans + niels&hermes-telecom.net +56647 + Infotech Junction Inc + Ankit Agarwal + ankit&infotechjunction.com +56648 + Amphenol Socapex + Francis GUDIN + informatique&henol-socapex.fr +56649 + FOSSTECH SOLUTIONS PTE LTD + David Tio + davidtio&fosstech.biz +56650 + City administration Bern test + Nizethan Nithiyananthan + ks.systemtechnik&bern.ch +56651 + Soha Jin + Soha Jin + soha&lohu.info +56652 + 北斗时源(北京)科技有限公司 (Beidou Shiyuan (Beijing) Technology Co., Ltd.) + 周国峰 (Zhou Guofeng) + info&bdsytime.com +56653 + Varde kommune + Infrastuktur + infrastruktur&varde.dk +56654 + Herlev Kommune + Klaus Jørgensen + it&herlev.dk +56655 + Lona + Hill Hanxv + i&hanxv.com +56656 + National Research University Higher School of Economics + Zaskin Daniil Dmitrievich + ddzaskin&miem.hse.ru +56657 + Monocle Security + Christopher Ward + chris&monoclesecurity.com +56658 + EMBDIT IT-Solutions + Alexander Domanski + alexander.domanski&embdit.net +56659 + 中科腾龙信息技术有限公司 (PTEROSAUR) + 胡晓鑫 (Hu Xiaoxin) + sdhuxiaoxin&163.com +56660 + CatIO Network + Grover Chou + grover&catio.network +56661 + 北京中超伟业信息安全技术股份有限公司 (Beijing Zhongchao Weiye Information Security Technology Co., Ltd.) + 徐盼云 (Xu Panyun) + xu_py&zgzcwy.com +56662 + Sitemaster, Lda + Miguel Rio + miguel&sitemaster.pt +56663 + Herzog Enterprises, Inc. + Rod Wolf + rwolf&herzog.com +56664 + PinkRoccade Local Government BV + Pieter Krul + pieter.krul&pinkroccade.nl +56665 + HUSS B.V. + Jasper Huijser + jasper&huss.nl +56666 + Flaxseed Labs + Greg Geering + greg.geering&flaxseed.co.nz +56667 + shenzhen FranklinWH Technology .,LTD + Elsa zhong + elsa&franklin-wh.com +56668 + YUYAN NETWORKS LIMITED + YuYan Yang + noc&alleysakura.net +56669 + Nemocnice ve Frydku-Mistku, p.o. + Petr Kocvara + kocvara&nemfm.cz +56670 + Great River Cancer Center + Brittany McMunn + bjmcmunn&sbrmc.org +56671 + Architecture Technology Corporation + Ben Burnett + bburnett&atcorp.com +56672 + Max Planck Institute for Multidisciplinary Sciences + Petra Kuester + admin&mpinat.mpg.de +56673 + Johannes Endres + Johannes Endres + je&johannes-endres.de +56674 + KB1SPH + Jeffrey Lavoie + kb1sphianapen&outlook.com +56675 + Prometheus Communications Pte Ltd + Jason Foong + jason&prometheus-communications.com +56676 + City of Forest Grove + Jason Towell + it&forestgrove-or.gov +56677 + Fletchers Solicitors + Brian Mills + brianmills&fs.co.uk +56678 + Siekken + Christopher Joie + christopher.joie69&gmail.com +56679 + omniQ + Eran Rosenberg + erosenberg&omniq.com +56680 + Doolta + Arnaud ASSAD + iana&doolta.com +56681 + Belgian-Luxembourg Conference of Seventh-day Adventists + Jeroen Tuinstra + communication&adventist.be +56682 + Flow Traders + Arjan de Laat + adelaat&flowtraders.com +56683 + EDJX, Inc. + James Thomason + james&edjx.io +56684 + Logical Infrastructure (Changzhou) Technology Co., Ltd. + Yaron Lee + yaron.lee&loinf.com +56685 + ČEPRO, a.s. + Zalabák Jan + ceproas&ceproas.cz +56686 + iSYS RTS GmbH + Quirin Grottenthaler + iana&isys-rts.de +56687 + Primary Industries and Regions SA + Andrew Mazzeo + pirsa.level3support&sa.gov.au +56688 + Tianhai InfoTech + Soha Jin + soha&tianhai.info +56689 + Shanghai Personalis Biotechnology Co.,Ltd. + Ashley Rubin + it&personalis.com +56690 + Clue24 GmbH + Sebastian Abt + sabt&clue24.de +56691 + Among Bytes + Krzysztof Kwiatkowski + contact&amongbytes.com +56692 + Pac Neuro, Inc + Edith Smith + esmith&sentaclinic.com +56693 + Valve Corporation + Brandon Gilmore + bgilmore&valvesoftware.com +56694 + Wuhan SanLian Automation CO.,Ltd + Ms Liu + sl_automation&126.com +56695 + Visible Capital Limited + Diego Diquattro + diego.diquattro&visiblecapital.io +56696 + Triad Metals International + Sean Hammond + sean.hammond&triadmetals.com +56697 + insightsoftware + Viresh Gohil + viresh&insightsoftware.com +56698 + Intelfon SA de CV + Carlos Zelaya + czelaya&red.com.sv +56699 + BNV e. V. + karl-heinz Gruner + karl-heinz.gruner&buerger.net +56700 + Unassigned + Returned 2020-12-17 + ---none--- +56701 + OpenAirInterface Software Alliance + OSA + osa&openairinterface.org +56702 + Unassigned + Returned 2020-12-17 + ---none--- +56703 + Unassigned + Returned 2020-12-17 + ---none--- +56704 + AvalonBay Communities + Preston Curry + preston_curry&avalonbay.com +56705 + Unassigned + Returned 2020-12-17 + ---none--- +56706 + Unassigned + Returned 2020-12-17 + ---none--- +56707 + Unassigned + Returned 2020-12-17 + ---none--- +56708 + Whitewall Energy + José González + info&whitewallenergy.com +56709 + Unassigned + Returned 2020-12-17 + ---none--- +56710 + Kuhne electronic GmbH + Andreas Kuhne + andreas.kuhne&kuhne-electronic.de +56711 + Tunstall GmbH + Peter Suer + peter.suer&tunstall.com +56712 + qudoor + xshengshe + xsshe&qudoor.cn +56713 + Planet soft d.o.o. + Aleksandar Milinkovic + aleksandar.milinkovic&planetsoft.ba +56714 + Schoeller Werk GmbH & Co. KG + Guido Hahn + ghahn&schoellerwerk.de +56715 + SPIE COMNET GmbH + Igor Werner + igor.werner&spie.com +56716 + LS Elektronik AB + Lars Stahre + lst&lse.se +56717 + Ipswich School + Alex Rose + atr&ipswich.school +56718 + AccelWELL Inc. + Allison Soderberg + asoderberg&accelwell.com +56719 + Banished.dev Ltd + Tomasz Pietrzyk + dev&banished.games +56720 + Christian Borchert + Christian Borchert + cborcom68&hotmail.de +56721 + INCA TECNOLOGIAS + Leonardo Soliz Encinas + leosoliz&incatecnologias.com.br +56722 + Aurora Server + James Buck + iana&auroraserver.net +56723 + Kigen Ltd + Thomas Rhodes + thomas.rhodes&kigen.com +56724 + Nordic eSIM + Kenneth Cseh + kpc&nordicesim.com +56725 + Warren Podiatry + Nina Syme + cmsdpm&comcast.net +56726 + ASSA ABLOY Opening Solutions Sweden AB + Fran Fernandez + fran.fernandez&assaabloy.com +56727 + Solare Datensysteme GmbH + Brigitte Beck + b.beck&solar-log.com +56728 + Dawatek, LLC + Tony Vargas + tvargas&dawatek.com +56729 + Kylan Robinson + Kylan Robinson + me&kylanrobinson.com +56730 + ZedNot + Logan VanCuren + logan&zednot.com +56731 + Beijing SG technology Co.,Ltd + Adam Tian + tianlu&sg-tech.cn +56732 + ООО "Технологии Безопасности" (Security Technologies) + Habibullooev Juraboy Habibulloeich + habibulloev&stech.tj +56733 + Iberexáminis Consulting S.L. + José Alfonso Sanz Claramonte + jasanz&examinis.es +56734 + Eversec Technology Co.,Ltd. + zhanglikun + hardware_rd&eversec.cn +56735 + Bundesamt für Verfassungsschutz + Max Müller + pen&bfv.bund.de +56736 + TSB Bank plc. + Dan Cvrcek + ciso_pki&tsb.co.uk +56737 + Center for Skin Wellness + Evan Fannin + evan&thenetworkgurus.com +56738 + Independent School District No. 834 + Paul Holtz + hostmaster&stillwaterschools.org +56739 + T7 International Group + Jaime Moreno + jaime.moreno&t7internationalgroup.com +56740 + HoneycombData + Jingchao Luan + jingchao&hcdatainc.com +56741 + Haguenet + Orhan Hassan + orhan&haguenet.com +56742 + Scientific Protein Laboratories + Matt Ginter + ginterm&splpharma.com +56743 + NEW DEANTRONICS + Vic Cheng + Vic.Cheng&newdean.com.tw +56744 + Satair A/S + Per Arnbo + itsupport&satair.com +56745 + Jiangsu RCT Power Energy Technology Co., Ltd + Ryan Yan + ryan.yan&rct-power.com.cn +56746 + SPDT Solucoes em Energia + Alexandre de Andrade Lorencato + alexandre.lorencato&spdt.com.br +56747 + Luna Innovations Germany GmbH + Thibault North + thibault.north&lunainc.com +56748 + SETCCE d.o.o. + Davorka Sel + davorka.sel&setcce.com +56749 + EJADA + Karim Hamdy + kizofcb345&gmail.com +56750 + AZIMUT + Alexander Romanov + alexandr.romanov&azimut.ru +56751 + Dynamic IT Pty Ltd + Angus Warren + support&dynamicit.net.au +56752 + XEMO-NET + Erik Behenna + ebehenna&xemo-net.de +56753 + AUTEM Services + Marc Akoto + contact&autem-services.fr +56754 + Strike LLC + Christopher Bir + christopher.bir&strikeusa.com +56755 + Estimations, Inc. + Jeremy Sydney + oidadmin&estimations.com +56756 + Huaxing Communication Technology Co., Ltd + ren yongchang + 1059496125&qq.com +56757 + Antonios A. Chariton + Antonios A. Chariton + iana&daknob.net +56758 + ZK Technology + Gaojin Liu + liugaojin&zkchina.com.cn +56759 + HASCOM International Pty Ltd + Chris King + chris&hascom.com.au +56760 + West Chester University of Pennsylvania + Rashed Kabir + rkabir&wcupa.edu +56761 + TOLIFE TECNOLOGIA PARA A SAUDE S.A. + Victor Gomes + victor.gomes&tolife.com.br +56762 + WALK FORWARD SAS + EDWIN MATEO LEWITZKI DUJMUSIC + WEBMASTER&WALKFORWARD.COM.AR +56763 + Packet Forensics + Charles Caterall + info&packetforensics.com +56764 + wie-se.net + Stefan Wiesinger + stefan.wiesinger&gmail.com +56765 + Ragile Networks Inc. + Ethan Gao + Ethan.gao&ragilenetworks.com +56766 + Tilman Kranz + Tilman Kranz + t.kranz&tk-sls.de +56767 + STET + Pierre DUVINAGE + pierre.duvinage&stet.eu +56768 + Delaware Foot & Ankle Group + Virginia Hardin + v.hardin-defoot&outlook.com +56769 + Dino Polska S.A. + Dariusz Włodarczyk + dariusz.wlodarczyk&marketdino.pl +56770 + CIC Innovation Services + Michael Herman + hostmaster&cic.com +56771 + Swixx Biopharma AG + Darko Popovic + darko.popovic&swixxbiopharma.com +56772 + John Smith and Sons Group + Aiden Arnkels-Webb + Sys_Admin&aztecretail.co.uk +56773 + TUXGUARD GmbH + Steffen Schoch + s.schoch&tuxguard.com +56774 + Decatur City Schools + Gary Cloer + gary.cloer&dcs.edu +56775 + FredOS + Fred Bourque + f.bourque&global.industries +56776 + HickelSOFT Huth GmbH + Daniel Marschall + info&hickelsoft.de +56777 + D-Amp + Fred Bourque + support&d-amp.com +56778 + Global EvQ + Frederic Bourque + f.bourque&globalevq.com +56779 + Total Body Pain Institute, LLC + Jessica Singer + jessica&totalpaininstitute.com +56780 + Simeon Networks + Todd Crane + todd.crane&simeonnetworks.com +56781 + Contingit AB + Tobias Folin + tf&contingit.com +56782 + Thomas Scientific LLC + Kevin Martin + kevin.martin&thomassci.com +56783 + AVPro Global Holdings + Kevin Wang + kwang&avproglobal.com +56784 + Associated British Ports + Steven Rayment + srayment&abports.co.uk +56785 + First Choice Computer Clinic, LLC + Cody Ardoin + cody&ardoin.me +56786 + RODRIGO ARGENAL, MD PA + Elias Argenal + it&argenalpediatrics.com +56787 + Double Vision + Walter Arrighetti + walter.arrighetti.consulting&gmail.com +56788 + ShenZhen NetPower Technologies Inc. + Louis Lee + lcl060&hotmail.com +56789 + Hengyang Rich Power Co.,Ltd + Wei Yuanqin + wei&ritarpower.com +56790 + Bradley-Bourbonnais Community High School + Ryan Timm + rtimm&bbchs.org +56791 + RES PUBLICA Consulting Group + Daniel Saumur + dsaumur&avenir.global +56792 + Xyberdata + Keshwarsingh Nadan + kn&xyberdata.com +56793 + Ferdinand Kittinger GmbH + Benedikt Kittinger + benedikt&kittinger.at +56794 + Research Data and Communication Technologies + Christopher J. Whalen + info&researchdata.us +56795 + cre.ative IT + Andrew Powers-Holmes + andrewh&creativeit.net.au +56796 + Bird Construction + Kevin McCurdy + Kevin.McCurdy&bird.ca +56797 + Valimail + Ash Wilson + ash.wilson&valimail.com +56798 + Farinex + Cedric Ohayon + informatique&farinex.ca +56799 + Luxshare Precision Industry Co.,Ltd. + Lin.Yu + Yulin.YL&luxshare-ict.com +56800 + ASKOMA AG + Andreas Freund + Andreas.freund&askoma.com +56801 + Arbory Digital Group, Inc. + Peter Stolmar + peter&arborydigital.com +56802 + Impôt 2000 + Richard Simard + richard.simard&groupesti.com +56803 + S.D.S. Inc. + Richard Simard + richard.simard&groupesti.com +56804 + Norfolk and Norwich University Hospitals NHS Foundation Trust + IT Security + ITSEC&nnuh.nhs.uk +56805 + 网思科技股份有限公司 (NetThink Technology Co., Ltd.) + 宋周灏 (Song Zhouhao) + songzhouhao&sinontt.com +56806 + Fusion Technology Nepal Pvt. Ltd. + Prabin Dahal + prabin.dahal&ft-nepal.com +56807 + Brauns Control GmbH + Ingo Brauns + webmaster&brauns.de +56808 + TurnKey Services AG + David Schafer + helpdesk&turnkey.ch +56809 + Proton Techhnologies AG + Sebastien CEUTERICKX + sebastien.ceuterickx&protonmail.com +56810 + HCI ENERGY + JAY A. GARCIA + jay.garcia&hcienergy.com +56811 + Reno-Tahoe Airport Authority + Martin Mueller + it.admin&renoairport.com +56812 + China Science Pioneer Intelligent Technology Development Co.,Ltd + maoyan + maoyan&cspid.cn +56813 + DONGGUAN HUARONG COMMUNICATION TECHNOLOGIES CO.,LTD. + Gong Rong + hrqs&sz-huarong.com +56814 + Ashdown Consultant Engineers Ltd + Mark Faulks + markf&ashdownav.com +56815 + USMD PPM LLC + Mark Gollner + mark.gollner&usmd.com +56816 + MRS Holding GmbH + Florian Bertsche + Florian.Bertsche&mrs-electronic.com +56817 + CAE Australia Pty Ltd + Stelio Josephakis + stelio.josephakis&cae.com.au +56818 + The BENEFIT Company + Badran Bukamal + badran&benefit.bh +56819 + Lysi Energy AB + Hans Wilhelmson + hans.wilhelmson&lysienergy.com +56820 + Vancouver Island University + VIU SysAdmins + DL_NAN_ES&viu.ca +56821 + NSA IT Consulting e.U. + Michael Mayer-Gishyan + office&nsa.ag +56822 + MULTA MEDIO Informationssysteme AG + Ahmed Landolsi + Alandolsi&multamedio.de +56823 + City of Maryland Heights + Eric Weber + eweber&marylandheights.com +56824 + zhangjun.sh.cn + Jun Zhang + master&zhangjun.sh.cn +56825 + TWS Netz GmbH + Helmut Hertle + it&tws-netz.de +56826 + DIAWAY OÜ + Aleksandr Ragel + ar&diaway.com +56827 + Dragonsight Technology + Derrell Lipman + derrell.lipman&dragonsight.technology +56828 + Avinor AS - OSL FNT + Ole Øyvind Hove + ole.oyvind.hove&avinor.no +56829 + Digiearth + Keisuke Watanabe + k-wata&mail.digiearth.net +56830 + Coloet Srl + Giordano Godizzi + giordano.godizzi&coloet.com +56831 + Burton Primary School + Samuel Wittwer + samuel.wittwer&compnow.com.au +56832 + Rockwood Holdings Corporation + Justin Rockwood + justin.rockwood&rockheadelectronics.com +56833 + EVMG30 + Björn Ivarsson + bjorn.ivarsson1&hotmail.com +56834 + MGroup + Martin Kraus Larsen + martin&mgroup.dk +56835 + hyppoCom S.t.l. + Filippo Grassilli + info&hyppo.com +56836 + Main information and telecommunication comunication center + Kyrylo Efimenko + voa_ct&post.mil.gov.ua +56837 + Shanghai Jucheng Networks Technology Co. Ltd. + xuming ma + maxuming&wifiabc.cn +56838 + NEC Corporation, Public Infrastructure Business Unit + Hiroki Takahashi + takahashi&da.jp.nec.com +56839 + Petr Hadraba + Petr Hadraba + hadrabap&gmail.com +56840 + IJIN Co.,Ltd. + Yang Jin-ho + jinho&think25.net +56841 + Corgex Pte Ltd + Norman Zhou + normanzhou&corgex.com +56842 + Tele2 Russia PSCore + Artem Babushkin + artem.babushkin&tele2.ru +56843 + KASSEX s.r.o. + Petr Jordan + info&kassex.cz +56844 + CM Technology Gruop LLC + Jeyhun Mammadzada + j.mammadzada&cmgroup.az +56845 + Datafox GmbH + Sven Meyer + s.meyer&datafox.de +56846 + Aktiebolaget Sven Heribert + John Hiller + john.hiller&shbygg.se +56847 + Avanza Bank AB + Patrik Söderlindh + domain&avanza.se +56848 + Klub liječenih alkoholičara Novi korak + Tereza Oreb + tereza.oreb&gmail.com +56849 + Cinionic + Dries Vandelannoote + Dries.vandelannoote&cinionic.com +56850 + Two4Tek S.A. + Olivier Latignies + ol&two4tek.eu +56851 + Vectone Mobile Limited + Sysadmin + vectone&omnitouch.com.au +56852 + The Molson Brothers / Frères Molson + Patricia Dell'Elce + Patricia.Dell'Elce&MOLSONCOORS.COM +56853 + Lodige Systems GmbH + Christoph Striewe + info&lodige.com +56854 + Public Sector Partnership Services + Dave Morfee + ict&pspsl.co.uk +56855 + nehtechnine + Don Lohdon + nehtechnine&gmail.com +56856 + SIVSA SOLUCIONES INFORMATICAS, S.A.U. + Antonio Iglesias + antonio.iglesias&sivsa.com +56857 + KTI Institute for Transport Sciences Non-profit Ltd. + Tamas Brezvai + support&kti.hu +56858 + hisNET GmbH + Christopher Hoth + pen-iana-org--bf5sjf&cho.hisnet.de +56859 + FANOS + Areour Mohamed Cherif + areour.mohamed&gmail.com +56860 + Stefan Kania + Stefan Kania + stefan&kania-online.de +56861 + Community First Health Plans + Roger Gaza + rgarza&cfhp.com +56862 + Automated Delivery Solutions Ltd + Iain Adams + iain.adams&automateddeliverysolutions.co.uk +56863 + LLC Sitronics Telecom Solutions Ukraine + Denys Kuzura + routed&stsua.com +56864 + Mudgee Host + Fraser Stuart + oid&mudgee.host +56865 + Thirdline AB + Bahram Ghandchi + bahram.ghandchi&thirdline.org +56866 + Trelabra + Toni Siljander + toni.siljander&atea.fi +56867 + Group of industrial technologies, Ltd. + Sergey Panin + sergey.panin&git-holding.ru +56868 + WESTPRESS GmbH & Co. KG + Michael Peters + michael.peters&westpress.de +56869 + ITC - Israel Internet + Avshalom Aharoni + avshaloma&it-c.co.il +56870 + GAU RD "CIT" + Farid Asvarov + noc&e-dag.ru +56871 + First Quantum Minerals Limited + Frank Booysen + FQMICT.Security&fqml.com +56872 + Sidekicks House + Christopher Dodd + cdodd&sidekickshouse.com +56873 + ANO School21 + Anton Ivanov + admin&21-school.ru +56874 + Technity Solutions Inc. + Jerry Kao + jerry.kao&technitysolutions.com +56875 + Beijing Yaxunhongda Technology Co.,Ltd. + xiedacheng + xiedacheng&yaxunhongda.com +56876 + OTAK Technology, Inc. + Yusuf SAHIN + ysahin&otaknetworks.com +56877 + Ovzon AB + Christer Olofsson + col&ovzon.com +56878 + ENTEGA MediaNet GmbH + Frank Kraemer + webmaster&entega-medianet.de +56879 + DARES TECHNOLOGY, SL + Gerard Ruiz Carregal + gruiz&dares.tech +56880 + Ascon JSC + Alexander Chekalin + to&ascon.ru +56881 + LLC "Emzior" + Vladimir Kalmykov + v.kalmikov&emzior.ru +56882 + AWXG Corporation + AWXG Technical Coordinator + tech-coordinator&awxg.com +56883 + Rewara Inc + Krishna Tripathi + krishna.tripathi&rewara.com +56884 + Centro Oncologico de Cayey + Guillermo Pastrana + guillopastrana&yahoo.com +56885 + Aegees DMCC + Rustam Yusupov + info&aegees.com +56886 + Slovenská sporiteľňa, a. s. + Ľubomír Mojš + mojs.lubomir&slsp.sk +56887 + EssentialNET + Kevin Penn + kevin.penn&essential-net.com +56888 + Seratech BV + Tom van Stiphout + root&seratech.nl +56889 + Autentia S.A. + Gustavo Mendoza + gmendoza&autentia.cl +56890 + TubNet + Toby Smith + admin&tubbo.net +56891 + Electro Design Engineering, Inc. + Donald Steele + donald.steele&edeusa.com +56892 + 深圳市旭东数字医学影像技术有限公司 (Shenzhen Yorktal Digital Medical Imaging Tech. Cglted) + 杨珺 (Yang Jun) + yangjun&yorktal.com +56893 + DEMETRA srl + R. Gusmeroli + riccardo.gusmeroli&demetrafood.it +56894 + The Palace Group LLC + Sam Montenegro + itadmin&thepalaceus.com +56895 + Izaz Solutions LLC + Zubaidha Rehman + zubaidha&izazsolutions.com +56896 + Citybook Services Ltd. + Noah Sumner + nsumner&citybook.co.il +56897 + CSFCloud + Bence Skorka + pen&csfcloud.com +56898 + ehubcap.com + Jose Plunkett + jose&ehubcap.com +56899 + Erste Banka A.D. Podgorica + Davor Vuković + davukovic&erstebank.me +56900 + Elektra-Elektronik GmbH & Co. Störcontroller KG + Winfried Janz + winfried.janz&ees-online.de +56901 + SALTO Systems + Mikel Larreategui + security&saltosystems.com +56902 + European AIR Spares AB + Gustavo Moros + moros.gustavo&gmail.com +56903 + Fuzzy Systems + Sebastian Johansson + sebastian.johansson&fuzzy.systems +56904 + Techbrain Consultancy Ltd + Russell Sutton + russell&techbrain.org.uk +56905 + BhaiFi Networks Private Limited + Ankit Verma + ankit&bhaifi.com +56906 + Southern Virginia University + David Jones + David.jones&svu.edu +56907 + IDnow GmbH + Armin Bauer + armin.bauer&idnow.de +56908 + FIRSTSERVICE RESIDENTIAL, INC. + Leo Becerra + admin&fsresidential.com +56909 + Dymensione + Darwin Sitchon + darwin&dymensione.com +56910 + Avec + Mohamed Lounes + mohamed.lounes&yesdoc.com +56911 + ehubcaps.com + Jose Plunkett + jose&ehubcap.com +56912 + ehubcap.net + Jose Plunkett + jose&ehubcap.com +56913 + custom-hubcaps.com + Jose Plunkett + jose&custom-hubcaps.com +56914 + custom-hubcaps.com + Jose Plunkett + jose&custom-hubcaps.com +56915 + Bell Canada NG9-1-1 + Jim Watkins + jim.watkins&bellaliant.ca +56916 + Goonhilly Earth Station LTD + Alex Tweed + alex.tweed&goonhilly.org +56917 + ADEX GmbH + Duda Jan + duda&adex.at +56918 + BlueGrace Logistics + Michael Eby + meby&bluegracegroup.com +56919 + CHSI Georgy Tarlyovski + Mihail Georgiev Tarlyovski + mihail>arlyovski.com +56920 + TrustZero + Grayson Martin + grayson&trustzero.net +56921 + COMET Flight Test Data + Mats Svensson + mats.svensson&saabgroup.com +56922 + Capella Space Corp + Jeff Karp + itservices&capellaspace.com +56923 + FA2 Advisors, LLC + Sagy Langer + sagy&networxit.net +56924 + WuXi ANKTech Co.,Ltd + Willis Chen + willis.chen&ankedi.com +56925 + POMCube Inc + Justin Wang + justin.wang&pomcube.com +56926 + Causal Sarl + Xavier Perseguers + xavier&causal.ch +56927 + Dr. Welsch - IT Services + Dr. Dominic Welsch + pen&welsch-security.de +56928 + Changsha Asterfusion Data Technologies Co.,Ltd + Larry Lan + lanruyi&asterfusion.com +56929 + Securent Solutions, LLC + Michael Doser + mike&securentsolutions.com +56930 + Syntonics Corp LLC + Gary Bruce + gary.bruce&syntonicscorp.com +56931 + Nabla Prototypes + Troy Dowling + troy&nablaprototypes.com +56932 + USP + Enrique Mingyar Torrez Hinojosa + enrique.hinojosa&usp.br +56933 + NW Mobile Testing + Ken Ostrowski + kostrowski&nwmobiletesting.com +56934 + x2LAB + Dirk Beckert + oid&x2lab.de +56935 + DRG Technologies + Derek Kruger + derek.kruger&drgtech.com +56936 + TWS Netz GmbH + Frank Thurner + it&tws-netz.de +56937 + Open mobile platform LLC + Anosov Sergey + info&omprussia.ru +56938 + Brad Rubenstein + Brad Rubenstein + postmaster&bradrubenstein.com +56939 + Higgstar Information Technology + Jack Liu + jack.liu&higgstar.com +56940 + WohnMichel + Sabine Schmidt + bine&metrodiv.de +56941 + Prehensile Tales B.V. + Hein-Pieter van Braam-Stewart + hp&prehensile-tales.com +56942 + subtubes.io + Edgar Martinez + edgarhmartinez03&gmail.com +56943 + Syrex + David Herselman + dhe&syrex.com +56944 + VitaNetworks + Gustavo Domínguez + webmaster&vitanetworks.link +56945 + Marelcom AG + Michael Zimmermann + m.zimmermann&marelcom.ch +56946 + Court Grammar School + Jacob Curulli + itmanager&cgs.wa.edu.au +56947 + Koch IT + Michael Häberle + michael.haeberle&koch-it.ch +56948 + JUSAN + David Fernández + dfernandez&jusan.com.es +56949 + Bay Path University + Christopher Knerr + cknerr&baypath.edu +56950 + October Swimmer LLC + Christian Warden + cwarden&octoberswimmer.com +56951 + Hacom Tech. + Freddy Mendoza + freddy.mendoza&hacom-tech.com +56952 + Franklin Electric Co., Inc. + Joseph Pickering + jpickering&fele.com +56953 + ROBINSON + Stephen Surbey + stephen.surbey&barobinson.com +56954 + Redexis + Carlos Asenjo + carlos.asenjo&redexis.es +56955 + Antelope Valley Transit Authority + Steven Willibrand + caadmin&avta.com +56956 + Groupe Canam + Maxime Couture + maxime.couture&groupecanam.com +56957 + LLC BigDataRu + Evgenii Smirnov + mib&bigdata.ru +56958 + CORE|Vision BV + Rob Schalken + Rob.schalken&core-vision.nl +56959 + CPAC Systems AB + David Boman + pen&cpacsystems.se +56960 + Oberstufen-Kolleg + OSK Support + osksupport&uni-bielefeld.de +56961 + VMnic + Christopher Thorjussen + domain&vmnic.com +56962 + Digital Gaming Corporation USA + Enlin Neveling + enlin.neveling&digitalgamingcorp.com +56963 + WhiteBox Networks + Paul Fleming + pfleming&petabitscale.com +56964 + SteveMilligan.co.uk + Stephen Milligan + stevemtango&gmail.com +56965 + SI Concept LLC + Alexey Bogatyriov + aab&si-concept.com.ua +56966 + Bit-Wizards Information Technology Solutions, Inc. + Vincent Mayfield + it&bit-wizards.com +56967 + JaapWeissink + jaap Weissink + jaap&weissink.nl +56968 + ClearBlade, Inc. + Ryan McClure + rmcclure&clearblade.com +56969 + HOWLab + Alberto Mur López + amur&unizar.es +56970 + Shanghai ANKTech Co.,Ltd + Willis Chen + willis.chen&ankedi.com +56971 + AZITA. Co.,Ltd. + Lee Ju-hyung + ljh&azita.co.kr +56972 + Millabs Corporation + Robert Burckner + admin&millabs.net +56973 + e-intouch company + Taisiia Ilvitskaya + createdtv&gmail.com +56974 + Izaac Brånn + Izaac Brånn + izaac.brann&outlook.com +56975 + LLC "Helicoid" + Sergei Shcherbakov + s.shcherbakov&hc-company.ru +56976 + WISE SECURITY GLOBAL S.L. + Óscar Flor Lozano + oscar.flor&wsg127.com +56977 + ISECO.CZ + Petr Spurny + petr.spurny&iseco.cz +56978 + DATALIT S.R.L. + Michele Bagarin + michele.bagarin&datalit.it +56979 + Luminex Network Intelligence + Cedric Jehasse + cedric.jehasse&luminex.be +56980 + kerzinger.at + Oliver Kerzinger + iana&kerzinger.at +56981 + BETAMONT s.r.o. + Juraj Maciak + maciak.juraj&betamont.sk +56982 + Namita Mohideen MD Inc + Namta Mohideen + namitamohideen&gmail.com +56983 + HORUS-IT + Ralph Seichter + iana.org&horus-it.de +56984 + Astrogator AB + Gunnar Bråding + gunnar&astrogator.se +56985 + closip + Yoshihisa Ito + info&closip.co.jp +56986 + my FinTech Inc. + William Hamlin + whamlin&j-com.co.jp +56987 + my FinTech Inc. + William Hamlin + whamlin&j-com.co.jp +56988 + Arrow Powertech Pvt.Ltd + Babul Desai + babul&aptl.in +56989 + DDOR Novi Sad a.d.o. + Milan Banjac + milan.banjac&ddor.co.rs +56990 + ZOLA Electric + Jean Parpaillon + jean.parpaillon&zolaelectric.com +56991 + Huisman Equipment B.V. + Jeroen Zaal + jzaal&huisman-nl.com +56992 + Dox PSC SPA + Nelson Vásquez Gutierrez + nelson&hazlofacil.cl +56993 + Colectica + Dan Smith + dan&colectica.com +56994 + EXIS + Achraf Mabrouk + achraf.mabrouk&exis-it.com +56995 + Resnick Digital, LLC + Michael Resnick + mresnick&resnickdigital.com +56996 + Opplane Inc. + Ashok Padodara + ashok&opplane.com +56997 + National Centre for Nuclear Research (NCBJ) + Jarosław Szewiński + J.Szewinski&ncbj.gov.pl +56998 + Smart Building Solutions Ltd. + Dobrin Dobrev + d.dobrev&sbs-bg.net +56999 + Savills Plc + Richard Griggs + RGriggs&Savills.com +57000 + Venius Systems AB + Fredrik Ljungberg + flag&venius.io +57001 + KUBA + Cyril FERUGLIO + cyril.feruglio&kubapay.com +57002 + Alan N. Swartz MD + Amber Salablanca + asalablanca¶gonmedical.org +57003 + 3Key Company s.r.o. + Roman Cinkais + roman.cinkais&3key.company +57004 + ZMS + Yutaka Nemoto + nemoto&zms.co.jp +57005 + developithecus SIA + Dimitrijs Fedotovs + dima&fedoto.ws +57006 + Penneo A/S + Fredrik Lernevall + trustservice&penneo.com +57007 + Skala Software LLC + Anton Karasev + akarasev&skala-r.ru +57008 + Elisity Inc + Burjiz Pithawala + burjiz&elisity.com +57009 + AGMLAB + Gokhan Kesici + gkesici&agmlab.com +57010 + Rapaň + David Rapaň + david&rapan.cz +57011 + TR7 Siber Savunma AS + Kursad Yusuf KONUS + info&tr7.com +57012 + jmlecloud + jean-marc pigeon + jmpigeon&jmlecloud.com +57013 + jslarraz + Jorge Sancho + jslarraz&gmail.com +57014 + A1 Sprinkler & Systems Integration + Michael C. King + mking&a1ssi.com +57015 + iGEM Foundation + Fabio Maschi + fabio&igem.org +57016 + DTA FS GmbH Goslar + Ralf Hansmann + ralfhansmann&web.de +57017 + Isle of Man Post Office + Dylan Smith + dylan.smith&iompost.com +57018 + cerniglia.net, LLC + Stephen Cerniglia + stephen&cerniglia.net +57019 + Visiana ApS + Hans Henrik Thodberg + info&visiana.com +57020 + University of Pannonia + Ratting Gergely + ratting.gergo&uni-pannon.hu +57021 + Bossard + Aldo Furrer + ICTWINSystemAdmins&bossard.com +57022 + KarmaTek + Marty Sloan + marty&karmatek.io +57023 + De Buren Limited Liability Partnership + Toby Bryans + toby.bryans&deburen.uk +57024 + Sange Electronic Technology Co., Ltd + yongfang guo + guoyongfang68&163.com +57025 + Eng. Antonio Cannavacciuolo + Antonio Cannavacciuolo + ing.acannavacciuolo&gmail.com +57026 + TWB Company, LLC + Brad Atherton + brad.atherton&twbcompany.com +57027 + MatreComm Technologies Private Limited + Srinivas Gudipudi + srinivas&matrecomm.com +57028 + DigiKite + Saumya Kanta Swain + mail&skswain.in +57029 + Universitas Negeri Yogyakarta + harun noviar + harunnoviar&uny.ac.id +57030 + Link Innovation GmbH + Steffen Wickham + server&link-innovation.de +57031 + AutomationDirect.com + Mary Brehl + mbrehl&automationdirect.com +57032 + Auger Family + Joshua Auger + joshuaauger&gmail.com +57033 + Nightmare Computers Inc + Joshua Auger + joshuaauger&gmail.com +57034 + Inagora + Junwu Wang + wangjunwu&inagora.cn +57035 + MONT International Co., Ltd. + CHEN YU, LIU + gavin_liu&mont.com.tw +57036 + OOO "Nizhnekamskaya TEC" + Kurochkin Andrey + kurochkinaa&nktec2.ru +57037 + Thales SEC Transportation System Limited Company + Yueming HUANG + yueming.huang&thalessec.com.cn +57038 + BayCare Health Systems + Kathy Skenandore + kskenandore&baycare.net +57039 + Cash Time Title Loans + Bryant Schaper + support&cashtime.com +57040 + Podravska banka d.d. + Mario Škudar + card&poba.hr +57041 + Peter Slavik + Peter Slavik + info&peterslavik.com +57042 + Boat Rocker + Aaron Pearce + aaron.pearce&boatrocker.com +57043 + Mitsubishi Electric Automotive America, Inc. + Bill Lattin + meaa&iss.engineering +57044 + Summit Health + Dimitry Fooksman + dfooksman&shm.net +57045 + Softech di Germinara Francesco + Francesco Germinara + info&germinara.it +57046 + NetNordic Group + René Holmboe Ditlevsen + rene.ditlevsen&netnordic.com +57047 + Werktslim + Sjoerd van den Nieuwenhof + s.vandennieuwenhof&aca-it.nl +57048 + Volta Networks + Eric Peterson + eric&voltanet.io +57049 + Ownerview Tecnologia + Rodrigo Caldas + rodrigo.caldas&ownerview.org +57050 + CommScope Inc of North Carolina + Nicol So + nicol.so&commscope.com +57051 + Comsignal LTD + Lepikhin Aleksey Pavlovich + comsi&msn.com +57052 + PT. ALITA PRAYA MITRA + Iman Gema Kowara + iman.kowara&alita.id +57053 + Martin Gallitzendoerfer + Martin Gallitzendoerfer + mgallitzendoerfer&hotmail.com +57054 + Maxtropy + Maxtropy R&D + devops&maxtropy.com +57055 + Public Key Server + Rob Linton + rob.linton&senetas.com +57056 + JSC "Sintels" + Denis Tkachev + d.tkachev&sintels.ru +57057 + AmperageApps AS + Jeff Skinner + subscriptions&erageapps.com +57058 + TELECAMERA + Aleksei Prishchepo + avp&telecamera.me +57059 + Business Continuity Solutions, LLC + Sergey Kamyanov + sk&bcsolutions.com.ua +57060 + ŠKODA DIGITAL s.r.o. + Petr Cvachouček + petr.cvachoucek&skoda.cz +57061 + Global Home Force + Nicholas Utiger + nutiger&globalhomeforce.com +57062 + 贸联电子 (Bizlink Electronics) + yonggang lu + yonggang_lu&bizlinktech.com +57063 + CyOne Security AG + Markus Kaufmann + markus.kaufmann&cyone.ch +57064 + Lanaco d.o.o. + Boris Krečar + oid-management-team&lanaco.com +57065 + Eads innovative technologies LLC + Dylan Eads + Eadsd91&gmail.com +57066 + Northland Pioneer College + Isaac Hutton + isaac.hutton&npc.edu +57067 + National Theatre + Nicholas Triantafyllou + itinfrastructure&nationaltheatre.org.uk +57068 + DVHT Health Center + Roger Crown + roger.crown&cerner.com +57069 + GHT Leman Mont Blanc + LESPAGNOL Clement + c-lespagnol&ch-hopitauxduleman.fr +57070 + Asociación Prestadores Cualificados Servicios de Confianza de España + Álvaro Díaz Baño + alvaro.diaz&apcsce.com +57071 + Erste Card Club d.o.o. + Robert Vrenko + sysadmin&erstecardclub.hr +57072 + Volt Tecnologia + Alexandre Souza + gerenciaped&volt.ind.br +57073 + Big Dutchman AG + Eduard Weisgerber + eweisgerber&bigdutchman.de +57074 + Redisig, L.L.C. + Susana Milhem + info&redisig.com +57075 + Prometeo Srl + Giampaolo Lionello + ced&prometeo.com +57076 + CITSA Technologies PVT LTD + Sreekumar PM + sreekumar&citsat.com +57077 + Redast, L.L.C. + Ernesto Rodriguez + info&redast.com +57078 + Son Information Systems + matthew son + matthew.son&son-is.com +57079 + IEEE P21451-1-5 + Jun Wu + junwuhn&sjtu.edu.cn +57080 + LHM Services GmbH + Bürkev, Muhammed-Baris + baris.buerkev&lhm-services.de +57081 + Clearspan LLC + Randy Craig + randy.craig&clearspancloud.com +57082 + Prosoft-Systems LLC + Alexandrov Oleg + o.aleksandrov&prosoftsystems.ru +57083 + Cégep de Sept-Îles + Service TI + serviceTI&cegepsi.ca +57084 + LESANDO GmbH + Markus Neese + iana&lesando.de +57085 + Fan Tadbir Sabz Vira (FTSV) + Abolfazl Golchinfar + ceo&ftsv.ir +57086 + IT-world ITW GmbH + Markus Lorber + markus.lorber&it-world.eu +57087 + Arnold Immobilien Holding GmbH + Markus Lorber + lorber&arnold.immobilien +57088 + Rehoboth McKinley Christian Health Care Services + David Hodges + dhodges&rmchcs.org +57089 + EKTACOM + OLIVIER CAILLET + iana&ektacom.com +57090 + Ultinous Zrt + Szabó László + sysadmins&ultinous.com +57091 + Optimus Health Care + Ken Bjelka + kbjelka&opthc.org +57092 + TVM Verzekeringen + Martin de Jonge + m.dejonge&tvm.nl +57093 + Dr. Eilebrecht SSE GmbH & Co. KG + Ali Sipit + ali.sipit&eilebrecht.de +57094 + KOAT-TV + Brad Carter + bradcarter&hearst.com +57095 + Robin Radar Systems + Rob van der Meer + rob.vandermeer&robinradar.com +57096 + HAFINI Group + Mike Kelly + mike&hafini.group +57097 + China Telecom Fufu Information Technology CO.,LTD. + Ke Weiling + kewl&ffcs.cn +57098 + Dawning Information Industry Co., LTD. + Yanwei Jia + jiayw&sugon.com +57099 + Shanghai Chint Power Systems Co., Ltd. + Edward Tang + tzlin&chint.com +57100 + Joint Stock Company "Research Institute" Masshtab" + Nikolay Zaugolnyy + n.zaugolnyi&mashtab.org +57101 + Kleware + Johan Klewais + johan&kleware.be +57102 + Azelis Corporate Services NV + Ian Akkermans + InfrastructureTeam&azelis.com +57103 + Verband der Studierenden an der ETH Zürich (VSETH) + Lukas Reichart + isl&vseth.ethz.ch +57104 + Hekateros Pte Ltd + Damien Derby + damien.derby&hekateros.com +57105 + Sollievo IT, LLC + Leon Jaimes + leon&sollievoit.com +57106 + Bmbix + Robin Abbi + robin.abbi&bmbix.com +57107 + ReQuTech + Omid Sotoudeh + omid.sotoudeh&requtech.se +57108 + South Ayrshire Council + Keith Riddle + Keith.Riddle&south-ayrshire.gov.uk +57109 + CEG ELETTRONICA INDUSTRIALE SPA + FABIO CENNI + FABIO.CENNI&CEGELETTRONICA.COM +57110 + AXEL + Franck BOURSIN + franck.boursin&axel.com +57111 + Rolls-Royce Submarines + Carl Hamill + carl.t.hamill&rolls-royce.com +57112 + Guvenpark Bilisim Tek. Ar. Ge. Tic. Ltd. Sti. (Procenne) + Murat YILDIRIM + it&procenne.com +57113 + The People's Dispensary for Sick Animals + Stefan Minehan + minehan.stefan&pdsa.org.uk +57114 + Svetets Ltd + Yuriy Buzaev + svadmin&svetets.ru +57115 + Hellwig-IT + Christoph Hellwig + ch&hellwig.eu +57116 + Centre Hospitalier de Luxembourg + Morgan Henreaux + Henreaux.Morgan&chl.lu +57117 + Let's eSign PBC, Ltd. + Yan-Cheng Chang + yc&letsesign.org +57118 + WBR Tech Services + William Robins + wb&wbrtechservices.com +57119 + MRJTNE + Jordan Murray + mrjtne&gmail.com +57120 + OrenIT Services (002636109-T) + Mohd Zulkhairi Md Amin + tech&orenit.my +57121 + Terrebonne Parish School District + Travis Bourg + tbourg&tpsd.org +57122 + Primary Care Specialists, P.A. + Jennifer Gustaves + sunstonemedsol&gmail.com +57123 + Centro de Hematología y Oncología Médica + Elias Sobrino + Elias.Sobrino&gmail.com +57124 + Cervi Robotics sp. z o. o. + Piotr Maślanka + piotr.maslanka&dronehub.ai +57125 + SWTCH Energy Inc. + Carter Li + carter.li&swtchenergy.com +57126 + QEI, LLC + Michael Paul + mpaul&qeiinc.com +57127 + GABBE + Gabriel Gunnarsson + hej&gabbe.me +57128 + Plow Technologies LLC + Scott Murphy + info&plowtech.net +57129 + Excel Technical Projects + Mohamed Kamel ElSayed Salama + mohamed.kamel&excelsystems-eg.com +57130 + EDEKA Rechenzentrum Süd Betriebs GmbH + Stefan Ziegler + stefan.ziegler&edeka-suedwest.de +57131 + QEI, LLC + Michael Paul + mpaul&qeiinc.com +57132 + NEOX NETWORKS GmbH + Timur Özcan + timur.ozcan&neox-networks.com +57133 + Tech Pad LLC + Sean VanderMolen + sean&techpad.biz +57134 + Magnolia Manor Networks + Mitch Mitchell + mitchmitchell1616&gmail.com +57135 + ADK TRADING Co. + ANTONIS KONNARIDES + antonykonnarides&gmail.com +57136 + VIC + ANTONIS KONNARIDES + antonykonnarides&gmail.com +57137 + INTA + ANTONIS KONNARIDES + antonykonnarides&gmail.com +57138 + ANTON + ANTONIS KONNARIDES + antonykonnarides&gmail.com +57139 + Ivie Technologies, Inc. + Don Merrell + don&ivie.com +57140 + Beijing Unisguard Technology Co.,Ltd. + litiegen + litiegen&unisguard.com +57141 + Secsmart + Wu Jona + wuzhenyu&secsmart.net +57142 + QingCloud Cloud Computing + Tom Wang + tomwang&yunify.com +57143 + diondo GmbH + Martin Muenker + martin.muenker&diondo.com +57144 + ICEpower A/S + Johann Bjornsson + jhi&icepower.dk +57145 + Crowe Foederer + Arjen van Zon + applicatiebeheer&crowefoederer.nl +57146 + ExCoDa Service und Consulting GmbH + Volker Mischo + volker.mischo&excoda.de +57147 + Detect-It LLC + Zachary Davis + zachdavis&detect-it.ai +57148 + EntServ Schweiz GmbH + John Henz + john.henz&hpbsc.ch +57149 + NeoS + Andrei Nonin + sales&neo-s.com +57150 + CGI Finland GTO + Mäkisarka, Oskari + oskari.makisarka&cgi.com +57151 + Nichijou, Inc. + Yeqi Dai + daiyeqi&nichijou.com +57152 + FARECO + David HUE + d.hue&fareco.fayat.com +57153 + Eclipsoft S.A. + Juan Carlos Gordillo + jgordill&eclipsoft.com +57154 + JHW Holdings, LLC + Joseph Werhan + joe&systemsrealignment.com +57155 + Sparr Electronics Ltd + Mohandas Ukkandath + mohandas&sparrl.com +57156 + MARTIN CC + Chris Martin + chris&martin.cc +57157 + LIVIT + Jurijs Timofejevs + info&liv.lv +57158 + Festival Balélec + IT admin + sysadmin&balelec.ch +57159 + Tocca Systems + Danshiro Yamaguchi + danshiro.yamaguchi&tocca-net.jp +57160 + CORAF + PALA JEAN JACQUES + jj_pala&coraf.cg +57161 + TFS Labs + Matthew Burr + info&tfslabs.com +57162 + E-Magine Kft. + Viktor Varga + viktor.varga&e-magine.hu +57163 + Raden Solutions SIA + Victor Kirhenshtein + victor&radensolutions.com +57164 + Southern Orthopedic Sports Medicine Associates + John Gardner + john.gardner&bhsala.com +57165 + Lothian Pension Fund + Mark McNeilly + mark.mcneilly&caseddimensions.com +57166 + MECAFOR + Eric PAILLOT + mecafor&mecafor.eu +57167 + Matrix AI + Roger Qiu + enquiry&matrix.ai +57168 + Telecard + Andrii Nazarchuk + it&telecard.com.ua +57169 + Kotoi-Xie Consultancy, Inc. + Haochen Kotoi-Xie + hx&kxc.inc +57170 + OKB Alpha, LTD + Oleg Shestakov + ol&okbalfa.ru +57171 + DataNet Services UG + Klaus Charlier + info&datanetservices.eu +57172 + Alaska USA Federal Credit Union + Infrastructure Development + DIST-ITID&alaskausa.org +57173 + Sri Sai PC LLC + Ashok Kumar + akumarjg&gmail.com +57174 + RIO STC, LLC + Andrey Chekomasov + ac&riostc.ru +57175 + France SIEM + M SAR + Pdorian-pro&outlook.com +57176 + Banking Association of Georgia + Mikheil Kapanadze + mikheil&association.ge +57177 + J.M. Rizzardi + Jean Michel Rizzardi + contact&jmrizzardi.com +57178 + MCSS Military CyberSecurity Systems + Pijuant Dorian + postmaster&mcss.tech +57179 + IoTerop + David Navarro + david.navarro&ioterop.com +57180 + Maela SAS + Hubert Viot + hubert.viot&maela.fr +57181 + Thung Hua Sinn Group + Kolatat Thangkasemvathana + kolatat&thunghuasinn.com +57182 + CardLogix Inc + Sebastien Goulet + sebastien.goulet&cardlogix.com +57183 + Huanghe S&T Group Information Industry Development Co., Ltd + Yin yanli + yinyanli&chinahuanghe.com +57184 + Nuclei System Technology Co., Ltd. + Huanjie Zhu + james&nucleisys.com +57185 + CNTECH + Jiwoong Chae + jwchae&icntech.co.kr +57186 + Stuttgarter Lebensversicherung a.G. + Matthias Root + matthias.root&stuttgarter.de +57187 + BBT.live + Almog Duek + almog.duek&BBT.live +57188 + Webvoto Tecnologia em Eleicoes LTDA + Leonardo Pignataro + leonardo.pignataro&webvoto.com.br +57189 + The Ginger Developer + James Hall + james.hall&thegingerdeveloper.com +57190 + Confurious + Mitchell Grenier + mitchell&confurious.io +57191 + SureCo Inc. + SureCo IT + it&sureco.com +57192 + iWorks Corporation + Raj K. Muthu + rk&iworkscorp.com +57193 + Ajeeth, Inc. + Mr. Garun Gupta + garun&ajeeth.com +57194 + VRULL GmbH + Dr. Philipp Tomsich + philipp.tomsich&vrull.eu +57195 + CNTech Co., LTD + Ji Woong Chae + jwchae&icntech.co.kr +57196 + hailingguangdian + yong fei wang + wyf0330&aliyun.com +57197 + Magellan Power + LINDSAY MEEK + lmeek&magellanpower.com.au +57198 + OJSC "Eximbank" + Vitaliy Dascalitsa + dep&bankexim.com +57199 + Natural Resources Wales + Ian Johns + ian.johns&cyfoethnaturiolcymru.gov.uk +57200 + PowerOn Platforms + Leo D'Arcy + support&poweronplatforms.com +57201 + Aero Simulation, Inc. + IT Department + it&aerosimulation.com +57202 + Destinatech + Francis McBratney + fmcbra&gmail.com +57203 + Forsyth County Public Health Department + Blake Pate + pateb2&forsyth.cc +57204 + Family Connections + Andrew Bush + abush&familyconnectionsnj.org +57205 + Clovis Oncology, Inc + Michael Hipp + mhipp&clovisoncology.com +57206 + NAGTECH LLC + Petr Milenin + p.milenin&nagtech.ru +57207 + Healthcare Establishment «Mogilev Regional Treatment and Diagnostic Center» + Evgeniy Shestakov + evgeniy.shestakov&modc.by +57208 + ITR Ltd. + Maxim Dednev + m.dednev&itr.group +57209 + Ounce + Dima Fedorov + d.fedorov&chay.info +57210 + TriangleLTD + Sergey Petrov + atei____&mail.ru +57211 + Freiwillige Feuerwehr Sprockhövel + Max Frenzel + max.frenzel&feuerwehr-sprockhoevel.de +57212 + Pfizer Andover Engineering + Jason Ridenour + jason.ridenour&pfizer.com +57213 + Solutionsource + Björn van den Heuvel + bjorn&solutionsource.nl +57214 + LB Annatel LTD + Lirone Chimoni + lirone&annatel.net +57215 + Kalegra AB + Kristian Valind + kristian.valind&kalegra.se +57216 + SARL POM'ZED + Janusz Baranek + postmaster&pom-zed.fr +57217 + Crestview Aerospace LLC + Matthew White + IT-Department&cvasp.com +57218 + Danbury Mission Technologies, LLC + Anthony Della Rossa + sw-licenses&dmtllc.org +57219 + SOFOGGYC2C LLC + Adrian Aguilar + sofoggyc2cllc&gmail.com +57220 + RMT LTD + Maxim Evdolyuk + maxim.evdolyuk&rmt-tec.com +57221 + 1upHealth, Inc. + Eric Marriott + eric&1up.health +57222 + Robert Nelson + Robert Nelson + robert-ianapen&nelson.house +57223 + KSP GmbH + Fredy Walth + it&ksp-pflege.de +57224 + IB Guth + Wolf-Dietrich Guth + wolf-dietrich&ib-guth.de +57225 + Verbidio, Inc + Kristofer Pettijohn + kris&verbidio.com +57226 + Bentley University + David Brountas + dbrountas&bentley.edu +57227 + Peoples Bancorp + Patrick Ball + patrick.ball&pebo.com +57228 + Wojskowa Akademia Techniczna im. Jarosława Dąbrowskiego + Dział Informatyki + postmaster&wat.edu.pl +57229 + GreenPole Power Solutions India Private Limited + Madhu Babu + mbabu&greenpole-ps.com +57230 + Baran Elektronik Sistemleri San.Tic.Ltd.Şti + Ömer KAÇMAZ + snmp&barantech.com.tr +57231 + amazingcat LLC + Alexey Kusnetsov + alexey&amazingcat.net +57232 + GDIT + Jose Bernabe + jose.bernabe&gdit.com +57233 + Strong-IT GmbH + Philipp Jongthep-Pargger + office&strong-it.at +57234 + Mobilcom + Emmanuel Roberto Richiardone + emmanuel.richiardone&mobilcom.it +57235 + Forêt d'Aquitaine + Ernesto Freitas + freitasernesto&live.fr +57236 + slow.network + Tomasz Kramkowski + tk&the-tk.com +57237 + PEAK FINANCIAL GROUP + Daniel Frances + dfrances&peakgroup.com +57238 + Wind Energy Transmission Texas, LLC + Geoffrey Moon + gmoon&wettllc.com +57239 + DraffeLabs LLC + Nick Draffen + pen&draffelabs.net +57240 + Latvijas Daudzbērnu ģimeņu apvienība + Elina Treija + laiks.gimenei&gmail.com +57241 + ATC Labs + Deepen Sinha + sinha&atc-labs.com +57242 + Sturgeon Services International + IT Department + it&sturgeonservices.com +57243 + Central Rural Electric Cooperative + Charles Curry + ccurry&mycentral.coop +57244 + FoxTrot AeroWorks + Adam Del Vecchio + adamdelvecchio&foxtrot.aero +57245 + Dorel Home Furnishings Inc. + Mark Newton + mark.newton&dorel.com +57246 + Moonlite Electric & Construction, Inc. + Jay Treadway + jay&moonliteconstruction.co +57247 + Inner Mongolia Network Trust Electronic Authentication Ltd. + Jonathan Sun + jonathansun&tencent.com +57248 + Enapter S.r.l. + Nikolay V. Krasko + nikolay&enapter.com +57249 + Arxsine + Phillip Vuchetich + pvuchetich&arxsine.com +57250 + CFS Solutions, Inc + Aaron Bishop + aaron&cfssolutions.com +57251 + SG + Alex + Song.Gan.Alex&vertiv.com +57252 + Specialne Systemy a Software a.s. + Milan Svingal + milan.svingal&special.sk +57253 + Raspberry Pi (Trading) Ltd + Peter Harper + peter.harper&raspberrypi.com +57254 + Exelonix GmbH + Dr. Matthias Stege + it&exelonix.com +57255 + Jackson Siegelbaum + Heather Nairn + hnairn&gicare.com +57256 + Canopius Management Services + David Tucker + david.tucker&canopius.com +57257 + Silicon Hills LLC + Clay Risser + clayrisser&gmail.com +57258 + LabTech + Jiří Oravec + info&labtech.red +57259 + Japan Aviation Electronics Industry, Limited + UJIHARA Masato + ujiharam&jae.co.jp +57260 + Alleima AB + Martin Leringer + IAMatAlleima&alleima.com +57261 + ESPi + Brent Craig + brent&espicorp.com +57262 + TechArgos + Andrey Kuzmin + info&t-argos.ru +57263 + Attono Limited + Ian Gibbs + support&attono.net +57264 + Sigstore a series of LF Projects LLC + Bob Callaway + tac&sigstore.dev +57265 + Intelligent Wave Inc. + Masaki Tsuji + enadmin&iwi.co.jp +57266 + Ross School + Sean Carmichael + domainadmin&ross.org +57267 + Bravas Sistemas Ltda + Ricardo Vasconcellos + ricardo&bravas.ind.br +57268 + Unitac Technology Ltd. + Ansen Lian + UnitacSW&unitac.cn +57269 + Energy Queensland + Timothy Lewsey + ote&energyq.com.au +57270 + JRC Mobility Inc. + Wataru Soyama + jrcm-pen-contact&jrc-m.co.jp +57271 + Open SDN & NFV Lab (OSNL) + Karsten Schmidt + karsten.schmidt&highstreet-technologies.com +57272 + highstreet technologies USA Corp. + Karsten Schmidt + karsten.schmidt&highstreet-technologies.com +57273 + highstreet technologies GmbH + Karsten Schmidt + karsten.schmidt&highstreet-technologies.com +57274 + Gipercom LLC + Nikolay Prokopov + programm&gipercomvlz.ru +57275 + Nahanet + Mahdi Dashtbozorgi + m.bozorgi&nahanet.ir +57276 + SeeThru Networks + Nick Randall + nrandall&seethrunetworks.com +57277 + FALCON V SYSTEMS S.A. + Andriy Korud + a.korud&falconvsystems.com +57278 + Statkraft Energi AS + Bjørn Vollelv + p_domains&statkraft.com +57279 + Beijing JRunion Technology Co., Ltd. + frank mao + frank.mao&jrunion.com.cn +57280 + Aspire Defence Services Ltd + David Martin + david.martin&aspiredefence.co.uk +57281 + HelpBox + Patrick Fogarty + patrick&gethelpbox.com +57282 + Cardioline Spa + David Lombardi + d.lombardi&cardioline.it +57283 + Conterra Networks + Robert McDaniel + rmcdaniel&conterra.com +57284 + FiRa Consortium + Mitch Kettrick + cpm&firaconsortium.com +57285 + Digital Grid Research Institute,CSG. + Kangping Yang + yangkp&csg.cn +57286 + Security forces + Tran Cong Chien + tiensuchabo&gmail.com +57287 + JEL Corporation + Kazutoshi Hayashi + k-hayashi&jel-robot.co.jp +57288 + DataTech911 + Michael Salonish + info&datatech911.com +57289 + Balefyre (Pty) Ltd + Justin Brown + contact&balefyre.co.za +57290 + NextGen RF Design Inc. + Jeremy Sutherland + Jeremy.sutherland&nextgenrf.com +57291 + DeepBloom Studio + Huisu Yun + cppig1995&gmail.com +57292 + Withus-Inovação e Tecnologia Lda. + Emanuel Miranda + emanuel.miranda&withus.pt +57293 + Uaio Tecnologia + Alexandre Souza + alexandre&uaio.com.br +57294 + Guangzhou Ether Technology Limited + Hu, Runyu + billingðer.school +57295 + Sayers Technology Holdings, Inc + Dean Flatland + dflatland&sayers.com +57296 + Healthworks + David Smith + david.smith&pchp.net +57297 + ReleasePoint + Henry Cherry + hcherry&releasepoint.com +57298 + Basalte bv + Pieter De Gendt + pieter.degendt&basalte.be +57299 + Cour38b + Olivier Reymond + pen&cour38b.ch +57300 + BRIDGES MEDICAL SERVICES + Heather Culler + bridgesbilling&gmail.com +57301 + SVA System Vertrieb Alexander GmbH + Joerg Mueller + joerg.mueller&sva.de +57302 + AerQ GmbH + Henning Stahlke + oidadmin&aerq.com +57303 + Resolution Life US + Thomas Swafford + thomas.swafford&voya.com +57304 + SRAMAG SAS + François de Mareschal + fdemareschal&sramag.com +57305 + Six Floor Solutions + Pedro Aço + pedro.aco&sixfloorsolutions.com +57306 + www.davidbandinelli.it + David Bandinelli + david.bandinelli&gmail.com +57307 + NPF Crystall + Lakin Konstantin + lakin&npfcrystall.ru +57308 + Burning Timber + Joe Cooter + joe&burningtimber.com +57309 + pantheon.com + Mika Dede + me&mde.de.com +57310 + NetVisory Srl + Antonio Guerrisi + antonio.guerrisi&netvisory.com +57311 + InfraKnit Technologies Pvt. Ltd. + Jacob A Thomas + jacobthomas&infraknit.com +57312 + Centro Acadêmico de Ciência e Tecnologia - CENAC/CienTec + José Lucio Zancan Junior + contato&cenaccientec.org +57313 + East Japan Institute of Technology Co,Ltd + Naoki Hayakawa + hayakawa&tounichi-g.co.jp +57314 + SERVICIOS LEGALE SPA + MAURICIO MUÑOZ + MAURICIO.MUNOZ&DESPAPELIZA.CL +57315 + Sistemas y Computadores S.A. + Martin Vargas Linares + martin.vargas&syc.com.co +57316 + Alliance Automotive Group Benelux B.V. + Sjaak Roest + sroest&allianceautomotive.nl +57317 + TMGcore, Inc. + Jake Mertel + jake.mertel&tmgcore.com +57318 + Efore Telecom Finland Oy + Jani Soderstrom + jani.soderstrom&efore.com +57319 + BioCollections Worldwide, Inc. + Sixto Pacheco + sixto.pacheco&biocollections.com +57320 + ENGECOM-TECH ENGENHARIA DE SISTEMAS + Jorge S. Sales + octal&ig.com.br +57321 + COOP CENTRO ITALIA SOCIETA' COOPERATIVA + Carlo Cerquiglini + carlo.cerquiglini¢roitalia.coop.it +57322 + ARCHICREA DP + JEAN DO NASCIMENTO + jdo&archicreadp.fr +57323 + Integrated Micro-Chromatography Systems, Inc + Ben Grimes + it_info&imcstips.com +57324 + QT Medical, Inc + Brett Chien + software&qtmedical.com +57325 + Neukirchener Erziehungsverein + Mathias Türpitz + registrierung&neukirchener.de +57326 + Bamboo Systems + Stewart Gallacher + admin&bamboosystems.io +57327 + AVO Networks + Sean O'Hara + stohara&yahoo.com +57328 + BH Electronics, Inc. + Brad Gass + bgass&bhelectronics.com +57329 + Ozlem Goker-Alpan MD, LLC + Uyensa Beese + ubeese&Ldrtc.org +57330 + Nikola Corporation + Matthew Christian + matthew.christian&nikolamotor.com +57331 + Valid Card Manufacturing (Suzhou) Co., Ltd + Wendy Wang + wendy&beautifulcard.com +57332 + Inova Logic, s.r.o. + Miroslav Jombik + miroslav.jombik&inovalogic.sk +57333 + ePlus, Technology, Inc. + Chris Cordray + chris.cordray&eplus.com +57334 + LLC «Smart batteries» + Babkin Andrei + a.babkin&energon.ru +57335 + Wi-Ing Aktiv - Die Hamburger Wirtschaftsingenieure e.V. + Fin Maaß + f.maass&wiing-aktiv.de +57336 + Faculty of Organization and Informatics + Mario Harjač + maharjac&foi.unizg.hr +57337 + Bundesanstalt für den Digitalfunk der Behörden und Organisationen mit Sicherheitsaufgaben + Dr. Gerd Jungnickel + ISB&bdbos.bund.de +57338 + Leibniz-Institute for Psychology + Peter Weiland + pw&leibniz-psychology.org +57339 + PREMIUM SA + MIKEL ABEZIA + it&premiumpsu.com +57340 + TELERY NETWORK S.R.L + Alvin Nunez + a.nunez&telery.com +57341 + Capte B.V. + Sergey Vladimirov + sergey&capte.co +57342 + Fin Maaß + Fin Maaß + info&finmaass.de +57343 + Transitive Properties, LLC + Aria Bentle-Wasmundt + aria&aria.computer +57344 + Tachyon Networks + Lindsey Barlow + info&tachyon-networks.com +57345 + Shenzhen SDMC Technology Co., Ltd. + Jeff Chen + jeff_chen&sdmctech.com +57346 + Northern Medical Physics and Clinical Engineering + Dr. Rodney Padgett + rodney.padgett&nhs.net +57347 + Advancery Limited + Martin Emerson + martin&advancery.com +57348 + Northern Electric Power Technology, Inc + Fan Wang + fanw&northernep.com +57349 + Suncontract OÜ + IT Department + it&suncontract.org +57350 + Central CUSD 301 + Jacob Aguinaga + jacob.aguinaga¢ral301.net +57351 + Ondoan Servicios + Ander Azabal + aazabal&ondoan.com +57352 + Whitestack + Alejandro Martín García + support&whitestack.com +57353 + Mestrol Co., Ltd. + Shilin Wang + walkissw&mestrol.com +57354 + Epsilon Telecommunications Limited + Edward Ngui + ITsupport&epsilontel.com +57355 + Muhr und Bender KG + Team Applications and Server Services / Security + it-security&mubea.com +57356 + SRSENA + Linus Johansson + l.johansson&samsung.com +57357 + 2SN + Clément Fontaine + C.fontaine&2sn.fr +57358 + Safestyle UK PLC + Daniel McGann + Daniel.McGann&safestyle.co.uk +57359 + Sonce Energija d.o.o. + IT Department + it&sonce.com +57360 + Universidade Federal da Bahia + Fernando Lacerda + fernando.lacerda&ufba.br +57361 + Prometheus Security Group Global + PSG Administrator + IT&PSGGLOBAL.NET +57362 + Polylegio AB + Fredrik Nilsson + fredrik.nilsson&polylegio.se +57363 + FIRMA LEGAL S.A. + PEDRO CORTES + pedro.cortes.desolminihac&gmail.com +57364 + OrionGroup + Vivek Chandran + vivek.chandran&oriongroup.co.nz +57365 + Unassigned + Returned 2021-05-04 + ---none--- +57366 + Miami-Dade County, Office of the Property Appraiser + Emir Shabashvili + esh&mdcpa.net +57367 + Gering Public Schools + Lionel Newberry + lnewberry&geringschools.net +57368 + New Mexico Oncology and Hematology Consultants + Indira Gutierrez + indirag&nmohc.com +57369 + Querylog + Thierry GENGOUL + thierry.gengoul&querylog.fr +57370 + xBlau + Daniel Mosquera + daniel+oid&mosquera.dev +57371 + UC Berkeley AUTOLab + Jackson Chui + jacksonchui&berkeley.edu +57372 + PCCW Solutions Limited + Antony Snow + Antony.Snow&pccw.com +57373 + ERSTREAM VIDEO DELIVERY CORP + Ugur Kalaba + ugur.kalaba&erstream.com +57374 + rongyi.io + Yi Rong + i+ianapen&rongyi.io +57375 + JSC "NTC FB" + Maksim Scheglov + m.scheglov&npofb.ru +57376 + Hangchun Broadcast Equipment Co, Ltd + Liwen Wu + dashixiyuan&hotmail.com +57377 + ABZ Informatik Dr. Krottmaier e.U. + Harald Krottmaier + software&abz-informatik.at +57378 + bwIDM + Dr. Martin Nussbaumer + info&bwidm.de +57379 + Brose Fahrzeugteile SE & Co. Kommanditgesellschaft, Bamberg + Pavel Novotny + pavel.novotny&brose.com +57380 + Thiel2S + Lukas Thiel + iana&thiel2s.de +57381 + Waterkotte GmbH + Marvin Voß + voss&waterkotte.de +57382 + Genwyse SAS + Michel Reverbel + contact&genwyse.com +57383 + Synergy North + Joe Vermeulen + mis&synergynorth.ca +57384 + Grandeur Housing + Charles Sloane + it&grandeurhousing.com +57385 + HEMIC + Eric Daley + it.operations&hemic.com +57386 + Danial Moj + amirhossein abbasiaghdam + amir.h.a.aghdam&gmail.com +57387 + Whizzkids Training Ltd + Garry Lowe + garry&whizzkids.ie +57388 + Doosan Fuel Cell America + Jang Won Suh + jangwon.suh&doosan.com +57389 + Mediashop GesmbH + Michael Kriebernik + m.kriebernik&mediashop-group.com +57390 + 4G Management Inc + Jacob Grandlienard + Jake&jakepki.com +57391 + Verhoeven Grondverzetmachines B.V. + Edward van Nijmweegen + helpdesk&verhoevenbv.com +57392 + Mercedes-Benz Bank AG + Florian Thiele + dw_032_mbbank-pen&mercedes-benz.com +57393 + Naelan + Pascal Bonneton + admin&naelan.com +57394 + Seitz-IT + Roy Seitz + admin&seitz-it.ch +57395 + Kath. Kinderkrankenhaus Wilhelmstift gGmbH + Mark Brede + m.brede&kkh-wilhelmstift.de +57396 + Hazell Bros Group Pty Ltd + Patrick Moore + Patrick.Moore&hazellbros.com.au +57397 + Severalnines AB + Johan Andersson + johan&severalnines.com +57398 + Fingerprint Cards AB + Hsinyi Lu + hsinyi.lu&fingerprints.com +57399 + IT-Zentrum der Thüringer Hochschulen + Thomas Otto + thomas.otto&uni-jena.de +57400 + Justiça Federal de Primeiro Grau no Rio Grande do Sul + Alexandre da Silveira Ilha + cinfra&jfrs.jus.br +57401 + Temenos SA + James Holland + jwholland&temenos.com +57402 + Cohesive Computing + Valentin Pollart + valentin&csquare.ai +57403 + Trident Research LLC + IT + it&tridentresearch.com +57404 + Charter Spectrum Communications + Alberto Valez + alberto.valez&charter.com +57405 + Consilio LLC + Willem van den Berge + willem.vandenberge&consilio.com +57406 + Onward + Paul Oswald + infrastructure.admin&onward.co.uk +57407 + Wooninc. + Marly Claassen + helpdesk&wooninc.nl +57408 + Pingmaster + Anton Kadnikov + axis.tblog&yahoo.com +57409 + ELDON WITH MUSIC + Eldon Zanga + eldonzanga&gmail.com +57410 + mySolutions (Pty) Ltd + Mouritz Opperman + mouritz&mysolutions.co.za +57411 + Innovations ON GmbH + Tiara Rodney + t.rodney&inno-on.de +57412 + GEEVEN-NET + Michael Geeven + michael&geeven.de +57413 + Debgrasam Architectural association + Folarin Adewale Simeon + efolarin134&gmail.com +57414 + The Rust Belt Rebellion + Brad Chesney + bradchesney79&gmail.com +57415 + PT Bukit Asam Tbk + Asep Maryana, S.Kom + amaryana&bukitasam.co.id +57416 + WDI Wise Device Inc. + Shane Archibald + itcommunications&wdidevice.com +57417 + Roxanne Hall, nėe Otto + Barry J. Burns + barry&barryjburns.com +57418 + Crunchfish Digital Cash + Paul Cronholm + paul.cronholm&crunchfish.com +57419 + SMART-ING + Martin Gallegos + mgallegos&smart-ing.com +57420 + dcert.pl + Dawid Banaszewski + support&dcert.pl +57421 + Dynasys - Engenharia e Telecomunicações, S.A. + João Loureiro + jloureiro&dynasys.pt +57422 + PretoriaFM + Christopher Francis + tegnies&pretoriafm.co.za +57423 + ARKEMA + Simon BANGUE-HEMO + pki.nos&arkema.com +57424 + Nickatwork.com + Nick Palleon + me&nickatwork.com +57425 + NanoRay biotech + kluber CHEN + kluber&nanoray.com +57426 + HUAYEN WORLD MONASTERY + ERIC YEH + junfuyeh&gmail.com +57427 + Claroty + Shaul Kremer + shaul&claroty.com +57428 + NATIONAL INFORMATION TECHNOLOGY DEVELOPMENT AGENCY + DR. YAHAYA ONIMISI MOHAMMED + chuksobiora&yahoo.com +57429 + Lilium Gmbh + Darshan Dodia + darshan.dodia&lilium.com +57430 + Fuzhou Dockeer Technology Co., Ltd. + lang liu + lqy&dockeer.com.cn +57431 + Curtiss-Wright 901D + Anthony Luciano + anthony.luciano710&gmail.com +57432 + Christopher Ahrens + Christopher Ahrens + christopher&leviacomm.net +57433 + CHHOLAK TrustBit Private Limited + Buddhi Prakash + info&chholak.com +57434 + Winchester Foot & Ankle Associates PLLC + James E. Dodd + jed2wfaa&gmail.com +57435 + Platbox + Aleksey Kupreev + it2&platbox.com +57436 + UDAP.org + Luis Maas + oid-registry&udap.org +57437 + NATIONAL INFORMATION TECHNOLOGY DEVELOPMENT AGENCY, + DR. MOHAMMED ONIMISI YAHAYA + ymohammed&nitda.gov.ng +57438 + Quarkslab + Fred Raynal + fraynal&quarkslab.com +57439 + voice INTER connect GmbH + Diane Hirschfeld + diane.hirschfeld&voiceinterconnect.de +57440 + AC Transit + Tas Jalali + tjalali&actransit.org +57441 + achelos Gmbh + Alexander Smotrov + alexander.smotrov&achelos.de +57442 + Diskbit + Thomas M. van Vugt + t.m.vanvugt&diskbit.net +57443 + Wuhan Da Ta Technologies Co., Ltd. + Huaiwu Wang + wanghuaiwu&bigtmt.cn +57444 + BwFuhrparkService GmbH + Norman Doll + admins&bwfps.de +57445 + Valir Rehabilitation Hospital + Kami Scruggs + kami.scruggs&valir.com +57446 + Ally + Vito Bruno + allycerts&ally.com +57447 + Northwest Health + Katie Sarver + k.sarver2&nwhealthin.com +57448 + PPS + Park Taeyoon + pps&ppsystem.co.kr +57449 + Ermetris + Claudio Borrello + claudio.borrello&ermetris.it +57450 + Kao Collins Corp + Mike Johnson + mjohnson&kaocollins.com +57451 + HealthSmart Care Management Solutions, LP + LaToya Johnson + latoya.johnson&healthsmart.com +57452 + Ludowici Roof Tile + IT + it&ludowici.com +57453 + Mobius Wireless Solutions + Avis Ng + avis&mobiusws.com +57454 + AlSego Luxembourg S.A. + Marc Van Oost + tech-mgmt&alsego.com +57455 + KOIOS DatalytiX + David Gooding + david.gooding&koiosdatalytix.com +57456 + Ravn Alaska + Steven Peterson + steven.peterson&ravnalaska.com +57457 + Suchy MIPS GmbH + Waldemar Suchy + w.suchy&suchymips.de +57458 + Peloton Interactive, Inc. + Lucas Rockwell + lucas.rockwell&onepeloton.com +57459 + Verhoeven B.V. + Edward van Nijmweegen + administrator&verhoevenbv.com +57460 + Netis Technologies.,LTD + Thomas Min + Thomas.min&netis.com +57461 + Wago + Grzegorz Włosek + grzegorz.wlosek&wago.com +57462 + Nueve Solutions LLC + Clay Risser + clayrisser&gmail.com +57463 + Svenska Handelsbanken AB + Mona Faleij + minerva.backup&handelsbanken.se +57464 + Otto Martin Maschinenbau GmbH & Co. KG + Ludwig Kreuzer + l.kreuzer&martin.info +57465 + CloudQuant, LLC + Ryan Bermel + devops&cloudquant.com +57466 + ILOVEPDF + Juan Eduardo Eguiguren + juan&ilovepdf.com +57467 + Kitchell Corporation + Sean Julian + sjulian&kitchell.com +57468 + Sonotechnik Austria AngioExperience GmbH + Glantschnig Marc + marc.glantschnig&sot-medical.com +57469 + BigCat Wireless Pvt Ltd + Kannan Gaddam + kannan&bigcatwireless.com +57470 + JTG Web Solutions + Joseph Gullo + surfrock66&surfrock66.com +57471 + North Newton School Corporation + Gregory Laffoon + itadmin&nn.k12.in.us +57472 + PVG Holding B.V. + Peter Rijpkema + p.rijpkema&pvg.eu +57473 + Level IT + Olivier Hault + olivier.hault&level-it.be +57474 + Biofire + Kai Kloepfer + network-admin&biofire.io +57475 + Fourd tech LTD + jim hsieh + man671010&gmail.com +57476 + Shulins' Solutions + PAUL STANLEY SHULINS + paulshulins&gmail.com +57477 + ANSnullANS.org + Paco Lechner + contact&ansnullans.org +57478 + FIONE Sp. z o.o. Sp.k. + Maciej Musiol + admin&ostrog.net +57479 + MCO System + Michaël Costa + michael.costa&mcos.nc +57480 + Tom Communication Industrial Co.,Ltd. + Yoshiei Sone + sone.yoshiei&tomcom.co.jp +57481 + TEAL Technology Consulting GmbH + Manuel Hoffmann + manuel.hoffmann&teal-consulting.de +57482 + EVVA Sicherheitstechnologie GmbH + Christian Leier + c.leier&evva.com +57483 + Zetta System Kft + Balazs Toth + zsalab&zettasystem.com +57484 + Ruhloff & Dauner GmbH + Ingo Ruhloff + ingo.ruhloff&ruhloff-dauner.de +57485 + Ultratest Ltd + Stevan Lackovic + slckvc&aol.com +57486 + Brille24 + Christian Arndt + admin&brille24.de +57487 + Southern Technologies Corporation + Frederick Craft + fcraft&southern-tech.com +57488 + Cubic Networks + David Suárez + david.suarez.dominguez&gmail.com +57489 + Links Field Networks Ltd. + shengbin + shengbin&linksfield.net +57490 + Mental Asylum of Education + Arjun Dahal + arjundahalard.thereason&gmail.com +57491 + OVERTOP ELECTRIC GROUP.LLC BEIJING OFFICE + terry.dong + terry.dong&overtop.com.cn +57492 + Hangzhou Lanxum Security Technology Co., Ltd + Zhang Junfeng + zjfcn&vip.sina.com +57493 + Mutualités Libres / Onafhankelijke Ziekenfondsen + David Colot + david.colot&m-team.be +57494 + TEICEE + Philippe CHAUVAT + iana&teicee.com +57495 + JBT, Inc. + Changsoo Han + grayhan&ejbt.co.kr +57496 + FDL Group + Panayotis Katermayer + pkatermayer&fdlgroup.gr +57497 + Unassigned + Returned 2021-06-03 + ---none--- +57498 + Miles-Bramwell Executive Services Limited + Daniel Sutton + devsecops&slimmingworld.co.uk +57499 + bonumsoftware.com + Bonumsoftware Admin + info&bonumsoftware.com +57500 + Qonnect AS + Jon Agust Reynisson + jar&qonnect.no +57501 + Panasonic i-PRO Sensing Solutions Co., Ltd. + Shinichi Arai + arai.shinichi&jp.panasonic.com +57502 + Danish National Genome Center + NGC-Operations + sysops&ngc.dk +57503 + Town of Narragansett, RI + Caleb Seekell + cseekell&narragansettri.gov +57504 + MessaNet Kft. + Zsolt Garamszegi + iana&messanet.hu +57505 + SYSTEMTECHNIK-NEUHAUS + Frank Neuhaus + info&systemtechnik-neuhaus.de +57506 + Syswright Limited + Justin Terry + justin.terry&syswright.co.uk +57507 + Homatelecom + Sia Nariman + IT&Homatelecom.com +57508 + PASS Certification Alliance + Neo Lee + neominu&atoncorp.com +57509 + Intetra + Onur Kocak + onur.kocak&intetra.com.tr +57510 + Linkon Tech + Dragan Blagojevic + dragan.blagojevic&linkon.tech +57511 + National General Insurance + Paul Weese + paul.weese&ngic.com +57512 + Rossonet s.c.a r.l. + Andrea Ambrosini + andrea.ambrosini&rossonet.com +57513 + Greensboro Pediatricians + Heather Underwood + hunderwood&gpeds.com +57514 + First National Bank of America + Bill Nickerson + bnickerson&fnba.com +57515 + PT Freyabadi Indotama + Iman Rahmatulloh + iana.penreg&freyabadi.com +57516 + Kropplabs + David Kropp + dkropp79&gmail.com +57517 + Portax + Miroslav Lauš + mirek&laus.cz +57518 + Mubadala Investment Company + Adnan Siddiqi + certadmin&mubadala.ae +57519 + Dipl. Ing. Fust AG + Robin Hälg + r.haelg&fust.ch +57520 + Aulss8.veneto.it + Roberto Walczer + roberto.walczer&aulss8.veneto.it +57521 + Ditusz Kft + János Szinetár + janos.szinetar&ditusz.hu +57522 + Universidade Estadual de Roraima + Carlos Liberal + dti&uerr.edu.br +57523 + Taylors Crossing Public Charter School + Rick Davis + rickd&tceagles.com +57524 + Landratsamt Rhein-Neckar-Kreis + Martin Roskosch + EBVIT-IT-Backend&Rhein-Neckar-Kreis.de +57525 + Landratsamt Rhein-Neckar-Kreis Schulen + Patrick Angermund + EBVIT-IT-Schulen&Rhein-Neckar-Kreis.de +57526 + Dwight-Englewood School + Chris Fleischl + fleisc&d-e.org +57527 + SPAIRAL COMMERCE SL + Pedro Luis Alves de Souza San-Juan + info&spairal.com +57528 + Vinteo LLC + Dmitry Sery + info&vinteo.ru +57529 + Cayman Islands Health Services Authority + Keith Higgins + webmaster2&hsa.ky +57530 + Transmex Systems International Private Limited + Kok Whye Low + kokwhye.low&transmexsys.com +57531 + ELI-Beamlines + Dan Hart + daniel.hart&eli-beams.eu +57532 + TELROAMING ADVANCED COMMUNICATION SOLUTION LTD + Noam Lando + noam&iamwebbing.com +57533 + Due Date Logistics + Denny Weinstein + infotech&ddlogistics.ca +57534 + BlackBerry Government Solutions + Emmanuel Amissah + eamissah&blackberry.com +57535 + NEXOG + Nir Ezer + nir&nexog.com +57536 + Chongqing 321 Thermal Insulation Products Co., Ltd. + Yu Sea + sea&321jr.com +57537 + EverQuote, Inc. + IANA PEN Admin + iana-pen-admin&everquote.com +57538 + BH Desgins + Ben Hare + benjamin.hare&sca.com.au +57539 + William Sanders + William Sanders + william&wzsanders.com +57540 + World Technology + CHIHWEI CHANG + Jonathan.chihwei&gmail.com +57541 + Kettle Foods Ltd + Oliver Broughton + oliver.broughton&kettlefoods.co.uk +57542 + DocumentID.net + Ken Kubota + ianaoid&documentid.net +57543 + Valenz: Assurance + Assurance IT + AssuranceSupport&valenzhealth.com +57544 + ONFIDO SAS + PETKO PETKOV + petko.petkov&onfido.com +57545 + Objective Interface Systems, Inc. + IANA Administrator + iana.admin&ois.com +57546 + Advance IOT Connectivity System Co.,LTD + Chris Zhou + zhoufangjie&aiotcs.com +57547 + Pantherun Technologies Pvt Ltd + Srinivas Shekar + srinivas&pantherun.com +57548 + Nickb + Ramtin beheshti + Brjb2020&gmail.com +57549 + Ministerium für Bildung und Kultur Saarland + Guy Philipp Bollbach + gp.bollbach&bildung.saarland.de +57550 + Wyss Center for Bio and Neuro Engineering + George Kouvas + info&wysscenter.ch +57551 + Oxide Computer + Nils Nieuwejaar + iana&oxide.computer +57552 + danuba.eu GmbH + Werner Hahn + gmbh&danuba.eu +57553 + Radformation, Inc. + Alan Nelson + anelson&radformation.com +57554 + evidence, Inc. + Yusuke Tamura + y.tamura&evidence.jp +57555 + KeyChest Ltd + Dan Cvrcek + dan&keychest.net +57556 + Landmælingar Íslands + Haflidi Sigtryggur Magnusson + haflidi&lmi.is +57557 + SCRRA + Michael Rodriguez + RodriguezM&scrra.net +57558 + Kentik Technologies, Inc. + Aaron Kagawa + akagawa&kentik.com +57559 + Herospeed Digital Technology Limited + YongshengCao + cys&longse.net +57560 + Ardlin Systems Ltd + Simon Watson + support&ardlin.net +57561 + Carl-Thiem-Klinikum Cottbus gGmbH + Felix Zachlod + f.zachlod&ctk.de +57562 + MISSION PEDIATRIC CENTER + GABRIELA CUELLAR + MISSIONPEDIATRICCENTER&OUTLOOK.COM +57563 + Tungsten Web Ltd + Nick Johnston + pen&tungstenweb.com +57564 + Mater Misericordiae Limited + Craig Kenny + craig.kenny&mater.org.au +57565 + Evaluación IOFE + Luis Raymi + eraymi&gmail.com +57566 + CO6 Inc + Andy Savage + ssl&co6.com +57567 + International Civil Aviation Organization (ICAO) + SiNguyen Vo + svo&icao.int +57568 + CITIZENGATE + PETIT ERIC + epetit&citizengate.com +57569 + Digital Advice GmbH & Co. KG + Henrik Halbritter + henrik&halbritters.com +57570 + Zipper Services SRL + Judit Fekete + judit.fekete&ezipper.ro +57571 + Kier Ltd + Mark Bentley + mark.bentley&kier.co.uk +57572 + RuiXingHengFang Network (Shenzhen) Co.,Ltd + Qiuhua Kang + qiuhuakang&risinghf.com +57573 + Alligator Plastics + Stephen Coebergh + ict&alligator-plastics.nl +57574 + Luftamt Hersbruck + Lukas Grams + iana0&rfc788.net +57575 + Steward Advanced Materials + Bob Carneim + bcarneim&stewardmaterials.com +57576 + ams Computer Group + Tyler McEnaney + tmcenaney&amscomp.com +57577 + FUNDACION UNID + Fernando Latorre + fernandolatorre&unid.es +57578 + Yamaha Sound Systems Inc. + Takashi Takaku + takashi.takaku&music.yamaha.com +57579 + Sekai Europe GmbH + Henrik Wichert + henrik.wichert&sekai-europe.com +57580 + Samgongustofa + Thor Sigurdsson + thors&samgongustofa.is +57581 + ronygomes.me + Manuel Rony Gomes + rony.gomes89&gmail.com +57582 + Monteris Medical Inc + Steven Robbins + srobbins&monteris.com +57583 + UAB Nevda + Žygimantas Kazlauskas + zygimantas.kazlauskas&nevda.lt +57584 + Nexity Solutions Digitales + Arnaud Vetillart + avetillart&nexity.fr +57585 + Southgate Packaging + John Prendergast + john.prendergast&southgateglobal.com +57586 + 1 (GE/NL) Corps + Lizenzmanager + PEN-IGNC&web.de +57587 + SIMAJE + Houssam Badi + Houssam.Badi&simaje.com +57588 + Lee Deon ellis + Lee Ellis + leedeonellis&gmail.com +57589 + RMBT + Rob Broughall + rob&rmbt.co.uk +57590 + Prime Alliance Consulting Group Sarl + Philip Christ + it.admins&batrust.lu +57591 + Kenosha Public Library + Brent Williams + netadmin&mykpl.info +57592 + Taris Electronics Ltd + Everson Rodrigues + taris&taris.com.br +57593 + Data Respons Solutions AS + Jarl Guntveit + jg&datarespons.no +57594 + ARCLAN + Philippe Pinazo + philippe.pinazo&arclansys.com +57595 + Indis.io + Sam Paioletti + sam&t4i.io +57596 + TTK + Thor Sigurdsson + thor&ttk.is +57597 + LinmanSOFT + Ivanov Stanislav + stas&linman.ru +57598 + Guangdong Xintong Communication CO.,Ltd + junhai wang + 13415359199&139.com +57599 + Lightbeam Health Solutions, Inc. + Jacob Gomez + jgomez&lightbeamhealth.com +57600 + TSP + Camelia Ivan + camelia.ivan&transsped.ro +57601 + Department of CSIE, National University of Kaohsiung + Yung-Hsiang Hu + a1065524&mail.nuk.edu.tw +57602 + Yunnan Fullstack Technology Co., Ltd + Kunrong Hu + hoocoral&hotmail.com +57603 + 乌鲁木齐市今日创新软件开发有限公司 (Urumqi Today Innovation Software Development Co., Ltd.) + zhaoyi + 670702766&qq.com +57604 + Juice Technology AG + Andreas Köpke + andreas.koepke&juice-technology.com +57605 + NEC Magnus Communications, Ltd. + Nobuyuki Yamazawa + macrenraku&dmy.magnus.nec.co.jp +57606 + Zehnder Group International AG + Dan Gysel + informatik.tasks&zehndergroup.com +57607 + Syneos Health + Michael Rivett + michael.rivett&syneoshealth.com +57608 + Industria Sigrama SA de CV + Jorge Eduardo Sanchez Soto + jorge.sanchez&sigrama.com.mx +57609 + Nologin Consulting + Luis Ripollés Hernández + luis.ripolles&nologin.es +57610 + Securiton AG + Joerg Furrer + e.contact&securiton.ch +57611 + Ben E. Keith Company + IT Admin + itadmin&benekeith.com +57612 + 天津凤凰食品有限公司 (Tianjin Phoenix Food Co., Ltd.) + 张充招 (Zhang Chongzhao) + 1403037987&qq.com +57613 + CME Home + Chad Erato + cerato201&gmail.com +57614 + Nucleus Command Systems + Matthew Strange + mstrange&nucleuscommand.com +57615 + 3G Soluciones Movilidad SL + Jose Antonio Santiso Martinez + jasantiso&3gmg.com +57616 + Umpi S.r.l. + Giovanni Agarri + giovanniagarri&umpi.it +57617 + Semler Dermatology, Inc. + Rhonda Powers-Hunt + manager&semlerderm.com +57618 + České Radiokomunikace a.s. + IT Infra + lan&cra.cz +57619 + Carl Valentin GmbH + Benjamin Tisler + btisler&carl-valentin.de +57620 + Endeavour Foundation + Tony Saba + tony.saba&endeavour.com.au +57621 + iTrinegy Ltd + Frank Puranik + frank.puranik&itrinegy.com +57622 + 北京瑞和云图科技有限公司 (Beijing Ruihe Yuntu Technology Co., Ltd.) + 杨阳 (Yang Yang) + 18731260161&163.com +57623 + TTLH Inc + Wayne Hoang + whoang&ttlh.us +57624 + BAE Systems Hägglunds AB + Mathias Björklund + mathias.bjorklund&baesystems.se +57625 + Lunar Energy + Kevin Fine + kevin&lunarenergy.com +57626 + PBS El Salvador + Carlos Molina + info.sv&grouppbs.com +57627 + SBS Co.,Ltd. + Jehun Lee + jehun.lee&sbs.co.kr +57628 + Öka Skog AB + Göran Olsson + goran&okaskog.se +57629 + iTech + MohammadRez Ghaffarpour + it&itechco.net +57630 + EPTech bv + Pascal Eraerts + info&eptech.be +57631 + Ellis Medicine Cancer Center + Sue Hendricks + hendrickss&ellismedicine.org +57632 + Panthera Dental Inc. + Michel Labonte + michel&pantheradental.com +57633 + Aeronautical Radio of Thailand Ltd. + Maethee Riwsawai + maethee.ri&aerothai.co.th +57634 + WaltonTech + Sam Walton + samuel&waltontech.org +57635 + Evolium Technologies SLU + jordi hidalgo + jordi.hidalgo&redtrust.com +57636 + Edify Labs, Inc. + Neil Hobbs + legal&edify.cx +57637 + hyohenOs Software Labs Private Limited + Abhijit Gadgil + gabhijit&hyphenos.io +57638 + Virtual Infosec Africa + Daniel Affum + daffum&virtualinfosecafrica.com +57639 + Cyberzen + Jean-Philippe Gaulier + jean-philippe.gaulier&cyberzen.com +57640 + Davis Medical Solutions Ltd. + Todd Davis + tmdavis&todddavisconsulting.com +57641 + McKinneyTEK.com + Christopher Richardson + christr&gmail.com +57642 + WebX + Pankaj Kumar + pankajbrahmabhatta&gmail.com +57643 + MemberzPlus + Alan Moor + alan.moor&memberzplus.com +57644 + Belimed Life Science AG + Alex Kohler + alexander.kohler&belimed.com +57645 + Famedly GmbH + Niklas Zender + info&famedly.com +57646 + Tolka Telecommunications Corporation + Alex Day + alex.day&tolka.tv +57647 + Harbour Energy + Matt Thompson + Matthew.Thompson&chrysaor.com +57648 + Interlinx systems + Graeme McKay + graeme&interlinx.io +57649 + Computer Storm Ltd + Darren Wood + darren.wood&computerstorm.me +57650 + Gustavo Network + Gustavo Pedro + pen.iana&gustavo.network +57651 + Secretaria Regional das Finanças, Planeamento e Administração Pública + Pedro Freitas + emrap&azores.gov.pt +57652 + COMTEL TECHNOLOGIES + Gildas da MATHA SANT'ANNA + technique&comtel-group.com +57653 + Jagiellonian University + Janusz Bielec + janusz.bielec&uj.edu.pl +57654 + Info Circus, Inc. + Development Dept. + info&infocircus.jp +57655 + Swiftness LTD. + Ben Sebagh + bens&swiftness.co.il +57656 + M31 Italia srl + Michele Salvalaggio + it&m31.com +57657 + Muxi Technology (Nanjing) Co.,Ltd. + Cliff Zhang + zheng.zhang&muxi-connect.com +57658 + TRP Solutions + Thorbjørn Lorentzen + tel&trp-solutions.dk +57659 + Ascentac Inc. + Neville Hsiao + neville.hsiao&ascentac.com +57660 + RELIX CO., LIMITED + Alan Lee + alan&relixnetworks.com +57661 + Silverwolf + ralph stone + ralph.stone&gmail.com +57662 + e-Jogsegéd Szolgáltató Kft. + IFJ. LENGYEL TIBOR + ifj.lengyel&ejogseged.hu +57663 + e-Postoffice Szolgáltató Kft. + Jacsó Tamás + jacso.tamas&e-postoffice.hu +57664 + AlgoSec, Inc. + Anthony Cosgrove + anthony.cosgrove&algosec.com +57665 + nuron LIMITED + Tom Garwood + tom.garwood&nuron.tech +57666 + DMS + Mohammed Iqbal + mohammediqbal&dmsystem.co.uk +57667 + TPL Systèmes + SCHUMMER Bastien + bastien.schummer&tplsystemes.com +57668 + InfoMaaS d.o.o. + Zoran Peričić + info&infomaas.com +57669 + Nilanila + Radhan Habamungu + uhrama&gmail.com +57670 + Cleveron AS + Jaagup Saare + jaagup.saare&cleveron.com +57671 + Silvaris Corporation + Dave Sheffels + it&silvaris.com +57672 + CerSign Technology Limited + Richard Wang + postmaster&cersign.com +57673 + X.O Concepts + Dilan Walgampaya + dilan&xdoto.io +57674 + JUSTFLY NETWORKS + Joseph Flynn + justfly1111&icloud.com +57675 + Dr. Markus Regensburger + Dr. Markus Regensburger + edv&dr-regensburger.de +57676 + La marm'hotte + Commission GSI + contact&la-marmhotte.org +57677 + Shenandoah Medical Center + Joshua Barlow + jbarlow&smchospital.com +57678 + KGAU "RCSS" + Mikhail Burichenko + it&rcsskrsk.ru +57679 + Travel Technologies, LLC + Roman Prokhorenko + roman.prokhorenko&travelata.ru +57680 + Roman Prokhorenko + Roman Prokhorenko + klug&klug.pro +57681 + ICOWORKS + Soo Yuk + syuk&icoworks.co.kr +57682 + Sanritz Automation Co., Ltd. + Takeshi Hashizume + hashizume&sanritz.co.jp +57683 + Cloud Native Computing Foundation + Davanum Srinivas + sc1&kubernetes.io +57684 + Landratsamt Biberach + Martin Schweizer + IuK-Administration&biberach.de +57685 + F. Hoffmann-La Roche + Christian Hofmann + christian.hofmann.ch1&roche.com +57686 + Getriebebau Nord GmbH & Co.KG + Stefan Petrik + Security.DE&nord.com +57687 + All4Labels Management GmbH + Dennis Berger + dennis.berger&all4labels.com +57688 + ShangHai DongZhou Lawton Telecom Technologies Co.,Ltd Fuzhou Branch + QinHui Wei + 851396590&qq.com +57689 + Eleos Security + Paul Lizer + paul&eleossecurity.com +57690 + Beulah Wesleyan Church + Steve Felton + admin&beulahwesleyan.org +57691 + Side by Side Charter School + Angelo Roberti + aroberti&sbscharter.org +57692 + Datamart + Mauricio Thibaut + mthibaut&datamart.cl +57693 + REMI + Greg Semenov + greg&remi.ru +57694 + i3i Ingeniería Avanzada, S.L. + Alejandro Diaz + alex.diaz&i3i.es +57695 + NewEra + jackey.liu + wenget&163.com +57696 + Instabims + Philipp Koch + iana&instabims.de +57697 + Boston Borough Council + Jason Bagley + jason.bagley&boston.gov.uk +57698 + BTP ONETec + Rubén Da Silva + ruben.dasilva&btp.es +57699 + Cascoda Limited + Bruno Johnson + b.johnson&cascoda.com +57700 + Infinet-EKB LLC + Komarov Igor + i.komarov&infinet-ekb.ru +57701 + TrustFour Technologies, Inc. + Robert Levine + robert.levine&trustfour.com +57702 + Network Thermostat + Jerry Drew + jdrew&networkthermostat.com +57703 + City of Melbourne + Rob Messersmith + it.pki&mlbfl.org +57704 + Kerwin Shen + Kerwin Shen + sqh9595&qq.com +57705 + Irish Residential Properties REIT plc + Matthew Gillick + Matthew.gillick&caseddimensions.com +57706 + Busit + Emmanuel Lécharny + emmanuel.lecharny&busit.com +57707 + Ultra Electronics - Forensic Technology + IT Manager + It_subs&ultra-ft.com +57708 + Generic Network Systems LLC + Rob Trangmar + support&gnetsys.net +57709 + Secret Double Octopus, Inc. + Bobby Kuzma + bobby.kuzma&doubleoctopus.com +57710 + Arctec Solutions + adrian coles + adrian&arctecsolutions.co.uk +57711 + Oelmann Elektronik GmbH + Evert ter Veen + Evert.terVeen&Oelmann-Elektronik.de +57712 + JetBrains + Eugene Petrenko + eugene.petrenko&jetbrains.com +57713 + Intellian Technologies, Inc. + Seokbae Jeong + seokbae.jeong&intelliantech.com +57714 + Chengdu Volans Technology Co., Ltd. + DaiWenHao + daiwh&adslr.com +57715 + Bundesamt für Migration und Flüchtlinge + Steffen Wölfel + pki&bamf.bund.de +57716 + Raphael Peters + Raphael Peters + iana&raphaelpeters.de +57717 + Moritz Marquardt (peopled) + Moritz Marquardt + peopled&momar.de +57718 + EDL Systems + Edward Macnaghten + eddy&edlsystems.com +57719 + Beijing HZFD Technology Co., Ltd + Xiaokai Qi + qi.xiaokai&QQ.com +57720 + AFORP-AFTI Numerique + ANDRIGHETTI Dominique + d.andrighetti&aforp.fr +57721 + Telecom Armenia CJSC + Artyom Kuzikyan + artyom.kuzikyan&telecomarmenia.am +57722 + Mode Choc Alma ltee + Jean-Francois Caron + informatique&modechoc.ca +57723 + Christian Financial Resources + Jason Stroup + jason.stroup&cfrministry.org +57724 + SEPI Engineering & Construction + Lei Cao + lcao&sepiinc.com +57725 + Xage Security, Inc. + Alex Valderrama + support&xage.com +57726 + VivoKey Technologies + Riley Gall + riley&vivokey.com +57727 + Calspan + Thomas Losness + thomas.losness&calspan.com +57728 + InsidePacket Inc + Eli Karpilovski + contact&insidepacket.com +57729 + Foundry Health - an IQVIA business + Lennert Jansen + lennert.jansen&iqvia.com +57730 + Brayden Automation Corporation + Bill Brayden + bill&brayden.com +57731 + Zoom Video Communications, Inc. + Julio Montano + julio.montano&zoom.us +57732 + Frontier KZ LLP + Hleb Stsibla + gs&frontier.kz +57733 + Neom Company + Mohmad Yaseen + mohmad.yaseen&neom.com +57734 + Trasna Solutions Telecom Limited + Gregory Bussard + gregory.bussard&trasna.io +57735 + Computer Development Systems cc + Leon Lessing + leon&cds.co.za +57736 + AK Lab + Anton Korotenko + anton.korotenko&gmail.com +57737 + Informationstechnikzentrum Bund (ITZBund) + Carsten Rackel + zertifikatsmanagement&itzbund.de +57738 + Inception doo Beograd + Jovan Vistac + jovan.vistac&inception.rs +57739 + Pedro J. Carvajal MDPA + Nieves Rodriguez + nrodriguez&pedrocarvajalmd.com +57740 + Flipkart Internet Pvt. Ltd. + Debalin Kundu + debalin.kundu&flipkart.com +57741 + NYP Gracie Square Hospital (GRAC_NY (OPT-025867)) + Labbo Sulley + las9205&nyp.org +57742 + Wall Media Ltd + John Funk + john.funk&valleyfiber.ca +57743 + CounterFlow AI + Randy Caldejon + rc&counterflowai.com +57744 + MainConcept GmbH + Frank Schoenberger + frank.schoenberger&mainconcept.com +57745 + Jan Ackerstaff + Jan Ackerstaff + jan.ackerstaff&ackerstaff.eu +57746 + Acorel + marc lambert + admin.si&acorel.com +57747 + Unidata S.p.A. + Andrea Marinelli + a.marinelli&unidata.it +57748 + Fuze Inc. + Daniel Jolly + djolly&fuze.com +57749 + Nuphoton Technologies Inc + Emilmon Alias + emilmon&nuphoton.com +57750 + The Simpson Organization + Paul Kurr + ITServices&SimpsonOrg.com +57751 + Sven Hankel + Sven Hankel + ldapadm&findst.net +57752 + Whitmore High School + Nalin Uduwawala + ITnetmanager&whitmore.harrow.sch.uk +57753 + Therapixel + John Stark + sysadmin&therapixel.com +57754 + DATRAK Digitális Adattranzakciós Központ Kft. + Balaton Alexandra + ugyfelszolgalat.SZKR&datrak.hu +57755 + Guangzhou Junda Technology Co., Ltd + Nolan Li + lihejin&junda-tech.com +57756 + CFMM + Martyn Klassen + cfmmadmin&robarts.ca +57757 + MOST + Tamir Berejik + tamirb&most-sys.com +57758 + Stadtverwaltung Burgdorf + Lukas Haldemann + informatik&burgdorf.ch +57759 + Sam Brittingham + Sam Brittingham + slbrittin2&gmail.com +57760 + Awesomentertainment + Kenneth Cole + tretraxxx&yahoo.com +57761 + VSENS + Didier To + contact&vsens.fr +57762 + Kowloonbia International Limited + Raymond Lau + raykwlau&gmail.com +57763 + Vollmergruppe + Joeran Hesse + joeran.hesse&vollmergruppe.de +57764 + Cylus Cyber Security Ltd + Miki Shifman + miki&cylus.com +57765 + BKS Products Pvt Ltd. + Dron Rathore + drona188&gmail.com +57766 + Some Engineering Inc. + Lukas Loesche + lukas&some.engineering +57767 + Munif Salek Md Inc + Grace Macias + grace&rvegamd.onmicrosoft.com +57768 + ALTENLOH, BRINCK & CO + Helpdesk IT + info_pen&altenloh.com +57769 + Wyatt Accelerator + Robb Penoyer + robb&wyattaccelerator.com +57770 + NHS Arden & GEM Commissioning Support Unit + M. A. Walter + michael.walter&nhs.net +57771 + Rhisa + Amine Bouamama + amine.bouamama&rhisa.eu +57772 + Delta Fire Ltda + samara bertin + administrativo&deltafire.com.br +57773 + LYRASIS + Michael Blaine + michael.blaine&lyrasis.org +57774 + Tianjin Joint Optic Tech.Co,Ltd. + jackie + dengyaoyong&jointoptic.com +57775 + MirWiFi + Michael Koren + mc&incarnet.ru +57776 + iSimpleLab + Roman Chigvintsev + r.chigvincev&isimplelab.com +57777 + Teceves + Thomas Conway Poulsen + tecp&teceves.com +57778 + Hochschule fuer bildenede Kuenste + Bernd Flickenschild + oidadmin&hfbk-hamburg.de +57779 + PingTech GmbH + Christof Schulze + christof.schulze&pingtech.de +57780 + Stadtwerke Haltern am See GmbH + Markus Frerichmann + edv&stadtwerke-haltern.de +57781 + Idox PLC + Daniel Kirk + daniel.kirk&idoxgroup.com +57782 + ACOD + Aleksei Chukaev + allch1&yandex.ru +57783 + Delta Controls Germany GmbH + Christoph Diehm + cdiehm&deltacontrols.de +57784 + Trinidat + Karsten Dreimann + karsten.dreimann&trinidat.de +57785 + Eliagroup + Bert Van De Merckt + mailadmins&elia.be +57786 + Centre For Development of Telematics + Munish Kumar Gupta + munish&cdot.in +57787 + Profelis + Caglar Ulkuderner + caglar&profelis.com.tr +57788 + Western Alliance Bank + Majd Basharat + mbasharat&westernalliancebank.com +57789 + Biocrates Life Sciences AG + Robert Bartnicki + it&biocrates.com +57790 + Aubrey Smith + Aubrey Smith + aubrey.smith&orcon.net.nz +57791 + On Alert Systems, Inc. + Efrain Logreira + ehlogreira&onalert.com +57792 + FELIX A STANZIOLA + ESTHER GORDECHE + FSTANZI123&GMAIL.COM +57793 + City Water, Light, & Power + Michael David + michael.david&cwlp.com +57794 + Open Access Technology International, Inc. + Contracts Administration + Contracts&oati.net +57795 + Nucleus Group Ltd. + Michael Wilson + michael.wilson&n2.net.nz +57796 + Kingsley E. Agbeyegbe, MD, PC + Kingsley Agbeyegbe + kingsley.agbeyegbe&gmail.com +57797 + WebCode Kft. + Dr. David Vincze + iana&webcode.hu +57798 + Stiebel-Getriebebau GmbH & Co. KG + Holger Manz + h.manz&stiebel.de +57799 + Universitaetsspital Basel + Security Operations Center + pki&usb.ch +57800 + FromDual GmbH + Oli Sennhauser + oli.sennhauser&fromdual.com +57801 + Park State Bank and Trust + Jason Stroup + jason.stroup&psbtrust.com +57802 + Starwalt Consulting Services + Leighton Starwalt + leighton&starwaltconsulting.com +57803 + Colossus Data Company + Jeff Markham + jeff&colossus.io +57804 + CodeUP + Upik Saleh + upxsal&gmail.com +57805 + Link-Mink + Damir Franusic + damir.franusic&gmail.com +57806 + RGF Staffing Belgium + Xander Boedt + security&usgpeoplehosting.com +57807 + ABO-GROUP ENVIRONMENT + Xander Boedt + support&abo-group.eu +57808 + Bolaang Mongondow Selatan + Upik Saleh + upxsal&gmail.com +57809 + Kukusa + Upik Saleh + upxsal&gmail.com +57810 + Safe-T Group Ltd. + Denys Kutsak + denys.kutsak&safe-t.com +57811 + NEPHTHYS SYSTEMS + David Figgins + PTAH.NEPHTHYS&GMAIL.COM +57812 + Elm City Communities + James Pekar + jpekar&elmcitycommunities.org +57813 + OIM Consulting (Pty) Ltd. + Andrew Broekman + broekman&oimconsulting.com +57814 + New Balance Poland Sp. z o.o. + Jacek Kałucki + admin&newbalance.pl +57815 + Shenzhen C&M I.T. Co., Ltd. + Wang Libin + wang.libin&szkemai.com +57816 + Shanghai Xin Tonglian Packing Co.,Ltd + Jun Zhang + zhangjun&xtl.sh.cn +57817 + Guangzhou JN UNION Technology Co., Ltd ("JN Union") + Hongzhu Quan + supply&keyou.cn +57818 + PNTECH JSC + Nam Nguyen + nam&pntech.vn +57819 + eprimo GmbH + Sigrun Oehlke + postmaster&eprimo.de +57820 + Europapier Austria GmbH + Alfred Grunner + administrator&europapier.com +57821 + pubchat + Barry J Burns + barry&barryjburns.com +57822 + Michigan Medicine + John Kaminski + johntk&med.umich.edu +57823 + Entaksi Solutions SpA + Stefano Travelli + stefano.travelli&entaksi.eu +57824 + sim-hank + sim hank tan + sim-hank&sim-hank.com +57825 + Shanghai Saizhi Information Technology Ltd. + limin liao + robert.liao&saizhitech.com +57826 + Viktor Madarasz + Viktor Madarasz + viktor.madarasz&viktormadarasz.com +57827 + XiangTan BenChu Network Technology Co,ltd. + Yi Xie + xieyi&bcsite.cn +57828 + Ferrari SpA + Marco Severino + marco.severino&ferrari.com +57829 + Suzhou Tremenet Communication Technology Co., Ltd. + Bo Yang + admin&treme.net +57830 + DKE + Alexander Nollau + dke-ianapen&vde.com +57831 + I4 Consulting LLC + Justin Keller + Justin&i4consulting.com +57832 + UD Trucks Corporation + Naoto Honma + naoto.honma&udtrucks.com +57833 + Netmon + Sungsik, Kim + iamsch&netmon.co.kr +57834 + Metis IT B.V. + Theo Hardendood + theo.hardendood&metisit.com +57835 + Trafic Technologie Système + Pierre Olivier Billy + po.billy&ttsys.fr +57836 + LLC Cyberprotect + Alexander Feoktistov + Alexander.Feoktistov&acronis-infoprotect.ru +57837 + Tenet Networks Private Limited + Anil Joseph + anil&tenetnetworks.com +57838 + ProstoDNS Ltd. + Konstantin Mikhailov + k.mikhailov&navn.ru +57839 + Southeastern Archaeological Research, Inc. + Justin Richards + itvendors&searchinc.com +57840 + Arkham Technology Ltd. + Thomas Sato + arkinfo&arkhamtechnology.com +57841 + Elite AI + Adil Dahlan + adil.dahlan&ucd.ie +57842 + AVCOMM Technologies Inc + Kang Yu + kang.yu&avcomm.us +57843 + Neutralino Space Ventures, Inc + sam waldman + waldman&neutralinospace.com +57844 + First Business Bank + Jordan Altmann + jaltmann&firstbusiness.bank +57845 + Fälth Öst Skog AB + Anders Fälth + info&fo-skog.se +57846 + Guardtime-Federal + Greg Weber + info&guardtime-federal.com +57847 + ITSD + Jerry L Lewis + jllewis&itsdmail.com +57848 + International Aero Navigation Systems Concern, Joint Stock Company + Anton Zaytsev + zaytsev&ians.aero +57849 + RECTALE Inc. + Edison Lee + ed&rectale.com +57850 + vanheusden.com + Folkert van Heusden + mail&vanheusden.com +57851 + LLC Panasenko IV + Panasenko Vadim + 79034358866&yandex.ru +57852 + Dasa + Eduardo Santos + eduardo.santos&dasa.com.br +57853 + DEH Notificación Electrónica Habilitada, S.L. + Enrique Sánchez + esanchez&dehonline.com +57854 + Motive Energy + Daniel Park + dpark&motiveis.com +57855 + RadiusXS + Stefan Ali + stefanali&gmail.com +57856 + Wilson Logistics + Dan Brewer + dbrewer&wilsonlogistics.com +57857 + Simartis Telecom SRL + Iulian Topliceanu + iulian.topliceanu&simartis.com +57858 + Suffolk Coastal Port Health Authority + Shaun Andrews + shaun.andrews&scpha.gov.uk +57859 + CHROMANET + Alex Chirea + alexc&chromanet.org +57860 + Brighton Corporation + Russell McIntire + IT&brightoncorp.com +57861 + Vita Medical Associates + Kellie Mozingo + kelliem&vitamedicalassociates.com +57862 + MetaGeek, LLC + Ryan Woodings + ryan&metageek.net +57863 + ASETEC Ingenieria de Sistemas S.L. + Elías Barroso + elias&asetecgroup.com +57864 + City of Decatur, AL + Brad Phillips + bephillips&decatur-al.gov +57865 + Eltako GmbH + Ulrich Ziegler + operations&eltako.de +57866 + Umbhaba Estates (PTY) LTD + Anthony Lekhuwana + IT-Administrators&Umbhaba.biz +57867 + TELUS Communication Inc. + Frederic Plante + frederic.plante&telus.com +57868 + Dapyx IT s.r.l. + Ionita Ionut - Eugen + office&dapyxit.ro +57869 + BatteryDAQ + Alan Long + alan.long&batterydaq.com +57870 + Finke + Dr.-Ing. Thomas R. Finke + thomas.finke&gmx.de +57871 + Elementblend + Sam Lam + adminllc&elementblend.com +57872 + Stellantis N.V. + Philippe ROLLAND + philippe.rolland1&stellantis.com +57873 + e-Digital PKI SpA + Rafael Gonzalo Pérez López + pki&e-digital.cl +57874 + Alibaba Cloud Computing Ltd. + Hatter Jiang + haitao.jianght&alibaba-inc.com +57875 + Haitao Jiang + Haitao Jiang + hatter&hatter.me +57876 + Lux Projektmanagement GmbH & Co. KG + Michael Ramspeck + michael.ramspeck&luxhaus.de +57877 + Mayanweb Consulting Ltd + Antonio Boateng + iana&mayanweb.com +57878 + Tees, Esk and Wear Valley's NHS Foundation Trust + Richard Yaldren + serversupport&nhs.net +57879 + Livtech Tecnologia + Luis Felipe Scherer + felipe.scherer&livtech.com.br +57880 + Pend Oreille County + Shane Flowers + it&pendoreille.org +57881 + Pend Oreille County + Shane Flowers + it&pendoreille.org +57882 + UCLA Center for Health Policy Research + Mohamed Mostafa + mhmdmaher&ucla.edu +57883 + my FinTech Inc. + Atsuki Takazawa + atakazawa&j-com.co.jp +57884 + Personal-Touch Holding Corp + Dan Erlichman + derlichman&pthomecare.com +57885 + Alitheon, Inc. + Michael Schmidt + Michael&alitheon.com +57886 + RapidSoft + Egor Shokurov + egors&rapidsoft.ru +57887 + Chimera Systems Ltd + Iestyn Elfick + admin&chimera.dev +57888 + PTRSLVK + Petr Slavík + info&ptrslvk.com +57889 + GeoX Kft. + Tamás Prajczer + info&geox.hu +57890 + MIT-SOFT, UAB + Antanas Mitasiunas + antanas.mitasiunas&mitsoft.lt +57891 + COCONET GmbH + Martin Zottl + office&coconet.at +57892 + HERMES Systeme GmbH + Andreas Schmidt + a.schmidt&hermes-systeme.de +57893 + Indigital + Kent Claussen + kclaussen&indigital.net +57894 + Carrot Laboratories Pty Ltd + Jarrod Linahan + jl&carrotlabs.net +57895 + Nephrology Consultants + Monique Williams + nephrologymanager&gmail.com +57896 + Ballard Power Systems Europe + Jesper Knudsen + jkn&ballardeurope.com +57897 + Beijing EBPioneer Technology Co. Ltd + Zhixiong ZHOU + zzhoux&163.com +57898 + Kodas + Markus Hinkel + admin&kodas.io +57899 + Hilabs + Markus Hinkel + admin&hilabs.eu +57900 + Totally Kids Rehabilitation Hospital + Robert Bilicke + bob&totallykids.com +57901 + ProConnections + Michael ODea + mikeo&proconnections.com +57902 + Iceland Foods LTD + Paul Delamere + paul.delamere&iceland.co.uk +57903 + Airvine scientific inc. + Allen Lee + alee&airvine.com +57904 + Stations-e + Christopher Brown + christopher.brown&stations-e.com +57905 + Stadt Garching + Alexander Kletzl + it&garching.de +57906 + Mercurial Minds + Shahzad Zafar + shahzad.zafar&mercurialminds.com +57907 + Regopath, Inc. + Brad Hughes + brad.hughes®opath.com +57908 + IMAGO Technologies GmbH + Ralf Goebel + ralf.goebel&imago-technologies.com +57909 + Attendance on Demand, Inc + Vicki Craft + vickic&infotronics.com +57910 + ALTANA Management Service GmbH + .AMS IT Infrastructure Services - Markus Jaekel + ITInfrastructureServices.AMS&altana.com +57911 + Hochu + Titan Lo + gmajdj&gmail.com +57912 + Groupe Souris Mini inc. + Simon Létourneau + sletourneau&sourismini.com +57913 + Village Energy + Ben Hamilton + BEN&VILLAGE.ENERGY +57914 + NetMass Incorporated + Stephen Perkins + perkins&netmass.com +57915 + Landkreis Celle + Manuel Isgen + manuel.isgen&lkcelle.de +57916 + MAILSTONE + Jean-Remi QUIRICONI + contact&mailstone.fr +57917 + Kirchhoff Automotive GmbH + Jan Hartmann + jan.hartmann&kirchhoff-automotive.com +57918 + Halmek Business Solutions + Kazuhide Asaoka + nwkanri&halmek.co.jp +57919 + Lake Washington Internal Medicine PLLC + Arshad Shawky + oid-admin&lwimclinic.com +57920 + Berletex Aero Design + Andrew Barkley + it&berletex.ca +57921 + Condition-ALPHA + Alexander Adolf + pen-adm&condition-alpha.com +57922 + Executive Health of Coral Gables + Manuel Diaz + mannyjdiazmd&gmail.com +57923 + Open Physical Working Group + Lars Carlsen + pen&openphysical.org +57924 + Cobham SATCOM (Thrane & Thrane) + Morten Lyckegaard + morten.lyckegaard&cobham.com +57925 + Aston Martin Lagonda Ltd + Steve Armitstead + sarmitst&astonmartin.com +57926 + TownSuite Municipal Software Inc. + Marc A. Mapplebeck + marc.mapplebeck&townsuite.com +57927 + Open Web Standard + Hatter Jiang + openwebstandard&hatter.me +57928 + SPEDION GmbH + Tobias Heim + iana-req&spedion.de +57929 + IDIL PRODUKSIYON + ADNAN YAVAŞIR + ayavasir&idilpr.com.tr +57930 + Hannon Armstrong + Nauman Memon + nmemon&hannonarmstrong.com +57931 + Kuaishou + Zhao Yongliang + zhaoyongliang&kuaishou.com +57932 + Hefei Zijun guangheng Technology Co., Ltd + Hu Liangneng + 1976083590&qq.com +57933 + ZoTrus Technology Limited + Richard Wang + postmaster&zotrus.com +57934 + Rainy Lake Medical + Roma Korzinski + rkorzinski&rainylakemedical.com +57935 + Ethopass + Keys + keysðopass.com +57936 + Mogeneti Systems + Bart van der Bilt + bart&mogeneti.com +57937 + Vietnam National University, Ho Chi Minh City + Bui Quoc Anh + anh.bui&vnu-itp.edu.vn +57938 + OID Base + Quoc-Anh Bui + anh.bui.mail&gmail.com +57939 + Docrates Oy + Aleksi Vartiainen + aleksi.vartiainen&docrates.com +57940 + Witte, Weller & Partner + Christian Giambalvo + support&wwp.de +57941 + AV Soft + Anton Chukhnov + snmp&avsw.ru +57942 + Peter Janke Solutions Inc. + Peter Janke + peter&pjanke.com +57943 + Tenetics + David Albert + dalbert&tenetics.com +57944 + BPDL inc. + Steeve Bouchard + steeve.bouchard&bpdl.com +57945 + VPI Engineering + Mike Brewster + mbrewster&vpitech.com +57946 + Polyglot Labs, Inc. + Trey Jones + trey&cortexdigitalinc.com +57947 + Home Personal Private - Moustafa ElSerougy + Moustafa ElSerougy + mmoustafa001&outlook.com +57948 + Cerner Thrive + Britt Jennings + bj5989&yahoo.com +57949 + 融智通科技(北京)股份有限公司 (Rongzhitong Technology (Beijing) Co., Ltd.) + 陈亚峰 (Chen Yafeng) + chenyafeng&rongzhitong.com +57950 + UberEther, Inc. + George M Topper + matt&uberether.com +57951 + DisplayHub GmbH + Nicola Cecilian + ncecilian&display-hub.com +57952 + Bzu Tech + Andre Medeiros + andre.medeiros&me.com +57953 + Navigator Terminals UK Ltd + Richard Betts + helpdesk&navigatorterminals.com +57954 + BV Tauris + Jeroen Van Herwegen + jeroen.van.herwegen&tauris.be +57955 + Hangzhou Zhongchuan Digital Equipment Co., Ltd. + Will Zhang + 13656679548&163.com +57956 + NHS North of England Commissioning Support + John Gilbert + NECSU.Infrastructure-Systems&nhs.net +57957 + Hyroule.ch + Lionel Carron + lionel.carron&netplus.ch +57958 + Bordier & Cie SCmA + IT Support + itsupport&bordier.com +57959 + eCom Service IT GmbH + Samuel Krueger + s.krueger&m2trust.de +57960 + MOSS + Yann Meury + yann.meury&moss.fr +57961 + Telycan S.L. + Fabián Santana + fsantana&telycan.net +57962 + Sedam IT d.o.o. + Denis Tenšek + denis.tensek&sedamit.hr +57963 + Afzar Pardaz Tose'eh + Vahid Mahboubimorad + aptinfotechco&gmail.com +57964 + ZAP Surgical Systems, Inc + Stephen Girouard + stepheng&zapsurgical.com +57965 + LLC "Company DVK-electro" + Vladimir Chentsov + support&dvk-electro.ru +57966 + Aethera Technologies Limited + Brian Walker + snmp&aethera.com +57967 + INSTITUTO PARA LA DINAMIZACIÓN ECONÓMICA S.A. + BIBIANA MORENO LEYVA + BIBIANA.MORENO&INNOFORMA.COM +57968 + TUI Cruises GmbH + IT Support + it-support&tuicruises.com +57969 + WAEIT + Matt Wodzisz + matt.wodzisz&gmail.com +57970 + HKL Baumaschinen + Rolf Tittmann + webmaster&hkl24.com +57971 + Prodilys + Loïc Doceul + loic.doceul&prodilys.fr +57972 + OOO METAX + Kostin Alexander + alexander&metax.ru +57973 + Martini Security + Ryan Hurst + ryan&martinisecurity.com +57974 + Jetstone Asset Management + Gareth MacQuillin + gareth.macquillin&jetstoneam.com +57975 + Brighter Horizons Academy + Andrew McCullough + network&bhaprep.org +57976 + RDB IT Services Inc. + Andrew McCullough + andrew&rdbit.net +57977 + Lyquidity Solutions Limited + Bill Seddon + bill.seddon&lyquidity.com +57978 + Infomedics + Rob Denekamp + rdenekamp&infomedics.nl +57979 + Broadacres Housing Association + Steve Cook + ict.team&broadacres.org.uk +57980 + "RUPOST" LLC + Alexey Fomenko + afomenko&astralinux.ru +57981 + (Hefei) Zijun guangheng Technology Co., Ltd + Li Songhua + pier_2019&163.com +57982 + ALFIERI CARDIOLOGY + CYNTHIA HEPBRON + C.HEPBRON&ALFIERICARDIOLOGY.COM +57983 + Linka Cloud + Philippe-Adrien Nousse + philippe.adrien.nousse&gmail.com +57984 + simplequantum.com + Corban Petersoncordova + corban&simplequantum.com +57985 + Hybrid DSP Systems B.V. + Ingmar van Klink + ingmar&hybriddsp.nl +57986 + Poppe + Potthoff GmbH + Jacek Bonicki + iana&poppe-potthoff.com +57987 + Green Revolution Cooling + Jason Bockmon + jbockmon&grcooling.com +57988 + Bach-Simpson + Eric Enright + eenright&wabtec.com +57989 + Telecom Nancy + Cyril LAMY + cyril.lamy&telecomnancy.eu +57990 + ZuTES + OIT + oit&zutes.ru +57991 + W W Wood Products + Cody Wilhelm + it.department&wwinc.com +57992 + Rigger SA + João Doroana + joao.doroana&trigger.systems +57993 + Thor Technologies PTY LTD + George Forster-Jones + george&thortechnologies.com.au +57994 + Consent Proxy + Eric Lee + admin&consentproxy.com +57995 + YUAN High-Tech Development Co., Ltd. + TSAI YI CHIEH + Amber&yuan.com.tw +57996 + Parazzini + Andrea Parazzini + andrea¶zzini.it +57997 + SURVICE Engineering Company + Aaron Wawrzyniak + webmaster&survice.com +57998 + SURVICE Engineering Company + Aaron Wawrzyniak + webmaster&survice.com +57999 + Coastal Pulmonary, P.A. + INA JOYCE + inajoyce2014&gmail.com +58000 + 银君皮鞋服装超市 (Yinjun Leather Shoes and Clothing Supermarket) + 杨延 (Yang Yan) + 2891164865&qq.com +58001 + SCUT Machine Intelligence Lab + Weiwen Hu + huww98&outlook.com +58002 + Innovile Teknoloji ve Yazilim Hizmetleri Bilisim San. ve Tic. Ltd. Sti. + Erdal Koklu + erdal.koklu&innovile.com +58003 + ARD ZDF Deutschlandradio Beitragsservice + Alexander Jaschke + alexander.jaschke&beitragsservice.de +58004 + Lydia Solutions + Alison Alonso + alison.alonso&lydia-app.com +58005 + Mary Bird Perkins Cancer Center + Colton Morrish + cmorrish&marybird.com +58006 + Lepton Systems + Yossi Maish + yossi&leptonsys.com +58007 + ECI + Jeremy Sydney + support&eworx.com +58008 + Küchen Aktuell GmbH + David Schroff + contacting&kuechenaktuell.de +58009 + DataCache Online + Ben Kolling + admin&datacache.online +58010 + Terry Burton Consulting Ltd + Terry Burton + tez&terryburton.co.uk +58011 + Universitas Udayana + Nyoman Putra Sastra + administrator&unud.ac.id +58012 + REDTEA MOBILE PTE.LTD. + Helen Chen + helen.chen&redteamobile.com +58013 + Klassik Stiftung Weimar + Kai Gorschewski (Department Informationstechnik und Organisation) + informationstechnik&klassik-stiftung.de +58014 + Cleveland Diagnostics, Inc + Sean Hennigan + sean.hennigan&clevelanddx.com +58015 + Société de l’Aéroport de Luxembourg S.A. + Vincent Booz + IT-Engineer&lux-airport.lu +58016 + A&R TECH + IT Support + office&artech.at +58017 + Fast Enterprises, LLC + Jesse Dyer + jdyer&fastenterprises.com +58018 + CRDE + Mathieu Anquetin + mathieu.anquetin&groupe-cahors.com +58019 + OSS DESIGN + Nathan Griffiths + Nathan.griffiths&bt.com +58020 + Cozzoli Machine Company + Dragutin Stoicovici + dstoicovici&cozzoli.com +58021 + Scutech + Jimmy Yu + yujian&scutech.com +58022 + TPNet Informatica + Marco Alves + marco.alves&ispm.com +58023 + Monogon SE + Leopold Schabel + iana&monogon.tech +58024 + InfoCision Management Corp + Rick Search + rs44&infocision.com +58025 + cerbris + Jacob Cooper + admin&cerbris.com +58026 + 成都卓讯云网科技有限公司 (Chengdu Juson Cloud Network Technology Co., Ltd.) + 关创创 (Guan Chuangchuang) + gc1617903163&163.com +58027 + Marex + Danny Gayler + dgayler&marex.com +58028 + Badan Pemeriksa Keuangan Republik Indonesia + netadmin + netadmin&bpk.go.id +58029 + Angora Networks + Taylan Esen + tesen&angoranetworks.com +58030 + beltskyy.com + Pavel Poyasonebov + beltskyy&gmail.com +58031 + Gobierno de la Ciudad de Buenos Aires + Gustavo Linares + linaresg&buenosaires.gob.ar +58032 + Colibri SAS - ManoMano + Philippe de Chanville + iana-pen&manomano.com +58033 + Cayman Islands Government + Ian Tibbetts + ian.tibbetts&gov.ky +58034 + VirtualMetric + Yusuf Ozturk + yusuf.ozturk&virtualmetric.com +58035 + iColor Printing & Mailing Inc. + Joe Peterson + joe&icolorprinting.net +58036 + TELE 9752 Project G11 + Xi Wang + z5327657&ad.unsw.edu.au +58037 + ERY BILISIM LTD. STI. + Hasan Altin + hasan.altin&erysystem.com +58038 + TUNA Bilisim ve Yayincilik + Kivanc Oskay + kivanc.oskay&tuna.technology +58039 + TrilineSystems Ltd. + Nick Kosenko + n.kosenko&3lsystems.ru +58040 + Nettec AS + Jan Erik Bjerkholt + post&nettec.no +58041 + Call2Home Networking BV + Wouter de Beer + wouter.de.beer&call2home.nl +58042 + Brian J Lipman MD P.C. + Christy Sandoval + BJL.CHRISTYS&GMAIL.COM +58043 + Polar Communications + Edgar Resendiz + eresendiz&symas.com +58044 + Brian Dube IT + Brian Dube + support&bdubeit.com +58045 + daho.at + Daniel Hofer + webmaster&daho.at +58046 + Kreibich Open Source IT + Kilian Kreibich + info&kreibich.xyz +58047 + Brose-Sitech + Pavel Novotný + Pavel.Novotny&brose.com +58048 + SUNRAY GROUP + Maroš Varchola + marosvarchola&sunray.sk +58049 + smoca ag + David Gunzinger + david.gunzinger&smoca.ch +58050 + Action Car and Truck Accessories + Michael Roach + itsupport&actiontrucks.com +58051 + Ginelli Cancer Care + Joyce Ginelli + jginelli&flatiron.com +58052 + NEW CO 1 + Mathieu HAMON + mathieu.hamon&idemia.com +58053 + ADIXUM GmbH + Dr. Martin Rother + martin.rother&adixum.de +58054 + Colletti Tech LLC + David Colletti + david&colletti-tech.com +58055 + JASON G DEFRANCIS, MD PA + LUCINDA ALVARADO + DEFRANCIS380&YAHOO.COM +58056 + Health and Welfare Information Systems Centre + Marko Valing + hostmaster&tehik.ee +58057 + Kyndryl Japan Inc. + AOEMF Contact + aoemf-contact&kyndryl.com +58058 + LEERANG + Songi, Lee + iequalto&naver.com +58059 + Jinan Inspur Data Technology Co., Ltd. + Wilson Liu + liuyibj&inspur.com +58060 + LMAX Trust Services + George Palmer + palmerg&lmax.com +58061 + Booker Ltd + Sandra Sills + sandra.sills&booker.co.uk +58062 + Infozech Software Pvt Ltd + Manoj yadav + manojyadav&infozech.com +58063 + Beyond Identity, Inc. + Ismail Mustafa + ismail.mustafa&beyondidentity.com +58064 + Zezima + Maxence Caron + maxence.caron&protonmail.com +58065 + cyan Security Group GmbH + Markus Cserna + markus.cserna&cyansecurity.com +58066 + UAB "ICECO" + Ramūnas Vosylius + admin&iceco.lt +58067 + Exsolvi Holding AB + David Boman + pen&exsolvi.se +58068 + TCN Inc + Florin Stan + florin.stan&tcn.com +58069 + BxR + Gregor Kroesen + gregor&kroesen.de +58070 + CRO55 + Nicholas Cross + nicholas&cro55.eu +58071 + AMT Group + Polovinko Vyacheslav + vpolovinko&amt.ru +58072 + yamnord GmbH + Nicolas Stalder + nicolas&yamnord.com +58073 + Hellmann Poultry GmbH & Co. KG + Robert Pschorn + rpschorn&hellmannpoultry.de +58074 + Weidner Apartment Homes + Weidner Information Technology + postmaster&weidner.com +58075 + Wombkeepers AZ Obstetrics and Gynecology, PC + Cathie Galle + cathie&wombkeepers.com +58076 + Anduril Industries + Ameen Manghi + amanghi&anduril.com +58077 + Electrotecnica S.A. + Erwin Botas + ebotas&grupoelectrotecnica.com +58078 + International vending network service supply + Felix Lee + felix&ivnss.com +58079 + Linux Laboratory Network + Alexander Bergmann + alexander.bergmann&linlab.net +58080 + ITM Software sp. z o. o. sp. k. + Krzysztof Stebner + oid&itm-soft.pl +58081 + Ballerup Kommune + Michel Fruergaard Masana + mmas&balk.dk +58082 + ARS Products + Scott Croteau + scroteau&arsproducts.com +58083 + IoT4Farms Ltd + Richard Carpenter + richardc&iot4farms.com +58084 + Město Horšovský Týn + Jiří Kliment + j.kliment&muht.cz +58085 + dcbel Inc. + Peter Ibrahim + peteri&dcbel.energy +58086 + GEDS Global Inc. + Ed Neipris + ed&securedbyvault.com +58087 + R+GA GmbH + Rafael Schniedermann + r.schniedermann&ruga-verl.de +58088 + Cybersign + Roberto Chiroy + contact&celera.com.gt +58089 + Service Objects + Travis Quine + IT&serviceobjects.com +58090 + Lierda Technology Group Co., Ltd. + chenyongkang + devops&lierda.com +58091 + Air Hydro Power + Kevin Hester + khester&airhydropower.com +58092 + HK Danmark + Finn Jacobsen + Finn.jacobsen&hk.dk +58093 + Beijing Trusfort Technology Co., Ltd. + wang bo + wangbo&trusfort.com +58094 + Abu Dhabi Health Services Company + Kapil Dayal + kapilsd&seha.ae +58095 + Dadeh Gostar Asr Novin P.J.S. Co. (HiWEB) + Auraud Moulana + auraud&hiweb.ir +58096 + National Authority for Digital Certification + Fatima Saad Saeed Farah + fatima.saeed&nadc.gov.sd +58097 + Urzad Ochrony Konkurencji i Konsumentow (UOKiK) + Piotr Bukowski + it&uokik.gov.pl +58098 + Atos Systems Business Services GmbH - ASCN + Sabine Ludewig + sabine.ludewig&atos.net +58099 + Unassigned + Returned 2021-11-30 + ---none--- +58100 + INSOFT s.r.o. + Marek Seemann + marek.seemann&insoft.cz +58101 + Mayor and City Council of Cumberland Md + Chip Watkins + chip.watkins&cumberlandmd.gov +58102 + Tom Wellmann + Tom Wellmann + info&tomwellmann.de +58103 + Agsensio Pty Ltd (Zetifi) + Arunashis Ghose + arun&zetifi.com +58104 + 496tb + Karim Rahmani + krahmani&daemex.io +58105 + Guangzhou Bingo Software Co.,Ltd. + Li Jingwei + lijingwei9060&qq.com +58106 + RAMAXEL + Jiangchao WU + wjc532&163.com +58107 + Syskey Softlabs Private Limited + maheswaran T + maheswaran.t&syskeysoftlabs.com +58108 + ProCAncer-I Project + Stelios Sfakianakis + ssfak&ics.forth.gr +58109 + Allgeier IT Solutions GmbH + Alexander Wolfshohl + alexander.wolfshohl&allgeier-it.de +58110 + Lendio + Joshua Foster + it&lendio.com +58111 + F&G Annuities & Life + Chris Sweeney + chris.sweeney&fglife.com +58112 + Banco Solidario S.A. + Armando Medrano + amedrano&bancosol.com.bo +58113 + OAM company + Mike Yim + hpyim&yahoo.com +58114 + CloudAtWork + Tim Van haudt + tim&cloudatwork.be +58115 + Axess Networks Solutions + Mauricio Tamayo + mauricio.tamayo&axessnet.com +58116 + Cats Protection + Tristan Monk + CatsProtection.IANA&Cats.org.uk +58117 + Daemex LLC + Karim Rahmani + krahmani&daemex.io +58118 + Tau Hypercomputing Facility + Sam Tau + sysop&tau.zone +58119 + Sungrow Power Supply Co., Ltd. + Li Guangqiang + john.lee&cn.sungrowpower.com +58120 + Kombiverkehr Deutsche Gesellschaft für kombinierten Güterverkehr mbH & Co. KG + Gregor Lotz + glotz&kombiverkehr.de +58121 + Marcel Krüger + Marcel Krüger + iana&2krueger.de +58122 + FHEM e.V. + Rudolf Koenig + vorstand&fhem.de +58123 + Nations Holding Company + Julio Matienzo + jmatienzo&nationsts.com +58124 + DPC INDIA PRIVATE LIMITED + KRISHAN KANT + krishan.kant&dpcindia.net +58125 + Pulsar K Bogusz Spółka Jawna + Robert Świątko + pomoctechniczna&pulsar.pl +58126 + Frodexim Trade LLC + Ivan Karadenizov + frodexim&frodexim.com +58127 + ООО "Корда Групп" ("Korda Group" Ltd.) + Yaroslav Zolotov + y.zolotov&korda-group.ru +58128 + Salzgitter Maschinenbau + Kevin Kötz + hotline&smag.de +58129 + Beyond Expectations - BEXP + Moustafa ElSerougy + mmoustafa001&outlook.com +58130 + Sapphire Community Health Inc. + Coy Altenbaumer + caltenbaumer&sapphirechc.org +58131 + Digital Harmonic, LLC + Digital Harmnonic Webmaster + info&digitalharmonic.com +58132 + xFusion Digital Technologies Co., Ltd. + maoali + maoali&xfusion.com +58133 + Bait Al-Gomla Corporation + Information Technology Department + it&bgomla.com +58134 + IEF-Werner GmbH + Daniel Kaltenbach + it&ief-werner.de +58135 + Stephan Brunner + Stephan Brunner + iana-pen&stephan-brunner.net +58136 + Lexicon Pharmaceuticals, Inc + Manuel Arroyo + PenRegistry&lexpharma.com +58137 + TBS Factoring Service, LLC + Tyler Kerr + tyler.kerr&tbsokc.com +58138 + TBS Factoring Service, LLC + Tyler Kerr + tyler.kerr&tbsokc.com +58139 + Daniel Cardoza + Daniel Cardoza + danielpcardoza&gmail.com +58140 + Royal National Orthopaedic Hospital + Digital Services + rnoh.itsupport&nhs.net +58141 + Inetum Switzerland Ltd. + Patrick Joss + patrick.joss&inetum.com +58142 + Avenda Health + Joshua Shubert + josh&avendahealth.com +58143 + Sheldon College + ICT Services + softwarelicencing&sheldoncollege.com +58144 + Skaylink GmbH + Heiko Kuhlmann + heiko.kuhlmann&skaylink.com +58145 + PREMIER MEDICAL GROUP OF CA + Kerri Vanderwalker + KVANDERWALKER&PHGCAL.COM +58146 + ShenYang JinJinXin Science and Technology Ltd. + Wang Qing + wangqing&wdfts.com +58147 + Shenyang Wonderful Techenology Co., Ltd + Wang Qing + wangqing&wdfts.com +58148 + Busana Apparel Group + Jaimy Azle + sysadmin&usg.co.id +58149 + S&T Hrvatska d.o.o. + Rene Debeuc + rene.debeuc&snt.hr +58150 + West Texas Obgyn + Deepika Devalla + thecrownmeridianllc&gmail.com +58151 + MaadiXZone S.L + Maddalena Falzoni + contact&maadix.net +58152 + Krauss-Maffei Wegmann GmbH & Co. KG + Werner Stock + certadmin&kmweg.de +58153 + jdtw + John Wood + j&jdtw.us +58154 + Boardigo SA + Julien Jenoudet + contact&boardigo.com +58155 + DeBoer Tech Services, LLC + Thomas DeBoer + Thomas&deboertechservices.com +58156 + Bluefront + Rafael Stader + bluefrontstudios&gmail.com +58157 + Daehan Power Electronic + Kim Ik ky + tse2018&naver.com +58158 + Alpha Bridge Technologies Private Limited + Rajiv Mittal + rajiv&alphabridge.tech +58159 + Addison Lee + Ethan Garrett + infrastructure&addisonlee.com +58160 + Adportas Media GROUP S. A. + Igor Bohme + ibohme&adportas.cl +58161 + Emergent Vision Technologies Inc + John Ilett + jilett&emergentvisiontec.com +58162 + Shenzhen cloud Dier Technology Co.,Ltd + YUNDIER + golden&yunideal.com +58163 + Kemp ECS Connection Manager + David O'Connor + doconnor&kemp.ax +58164 + i2iCore Technologies + Baiju + baiju&i2icore.com +58165 + Swedavia AB + Swedavia PKI SDM + cert&swedavia.se +58166 + SVMS + Yann COHEN + yann.cohen&svms.eu +58167 + Town of Irondequoit + Kevin LaBarr + it&irondequoit.gov +58168 + Gmina Olsztyn + Centrum Informatycznych Uslug Wspolnych Olsztyna + sekretariat&ciuwo.olsztyn.eu +58169 + Cyburity, Inc. + Tris Emmy Wilson + oid-admin&cyburity.com +58170 + Canvas Medical + Alex Gentry + alex.gentry&canvasmedical.com +58171 + Groundspace + Radim Badsi + radim&groundspace.io +58172 + AudioScience, Inc. + Delio Brignoli + dbrignoli&audioscience.com +58173 + Taheem Johnson, Inc. + Taheem Johnson + johnson&taheemjohnson.com +58174 + WellJoint (Shanghai) Technology Co., Ltd. + Bin Yan + yanbin&welljoint.com +58175 + Ateliers François + Jérémie Mallié + jeremie.mallie&afcompressors.com +58176 + SYNCHRONIC + Nicolas BIGNARD + service-info&synchronic.fr +58177 + LS Project + William Fan + contact&lsa.moe +58178 + Vodafone Group Home Devices + Fabian Kraft + fabian.kraft1&vodafone.com +58179 + SJ – Die Falken LV Niedersachsen + Lorenz Sieben + webmaster&falken-niedersachsen.de +58180 + DUOMED S.A. + Mauricio Galamba + mgalamba&visualmedica.com +58181 + Jasper T Trading Inc. + Elton Hsu + it&cindychao.com +58182 + THREATINT (CYPRUS) LTD + Stefan Beyer + netops&threatint.com +58183 + The Mx Group + Lenard Fudala + lfudala&themxgroup.com +58184 + First Western Bank and Trust + Douglas Benzer + doug.benzer&firstwestern.bank +58185 + GOVCERT.LU + Guy Foetz + software+iana&govcert.etat.lu +58186 + trustable solutions UG + Andreas Kuehne + kuehne&trustable.de +58187 + Elemental Software + Peter Linss + peter&elemental.software +58188 + Pölkky Oy + Tuomas Virranniemi + tuomas.virranniemi&polkky.fi +58189 + BREAKFAST + Will Rigby + will&breakfastny.com +58190 + Prequal Digital Mining + Sunit Govind + sunitgovind571&gmail.com +58191 + En+ + Slesarev Aleksandr + iana&enplus.ru +58192 + Regio Energie Solothurn + Sascha Doerr + it_servicedesk®ioenergie.ch +58193 + Perto S.A. - Periféricos para Automação + Jefferson Augusto Krug Pereira + jkrug&perto.com.br +58194 + Bulgarian Deposit Insurance Fund + Mihail Mouhlov + iana&dif.bg +58195 + C Spire + Bryan Fallin + bfallin&cspire.com +58196 + BCi Digital + John Bartlett + john.bartlett&bcidigital.com +58197 + SIGN8 GmbH + Richard Ranftl + info&sign8.eu +58198 + FiberHome Supermicro lnformation Technology Co.,LTD + ziming cheng + zmcheng&fsit.com.cn +58199 + JetIO Technology Ltd. + Zi Xuan He + hezi&jet-io.com +58200 + NoBiG mbH + Jakob Heinemann + jakob.heinemann&nobig.de +58201 + Optimum Mobile + Axel Rivas + Axel.Rivas&AlticeUSA.com +58202 + SPbEC-Mining, Ltd. Co. + Sergei Borisenko + borisenko&spbec-mining.ru +58203 + Cataldo Ambulance Service, Inc. + Alexander Chute + achute&cataldoambulance.com +58204 + Com4 AS + Henning Solberg + hso&com4.no +58205 + diconium GmbH + Daniel Niccoli + daniel.niccoli&diconium.com +58206 + Newbury Corporation + Tom Johnson + tjohnson&ndgroup.com +58207 + Collège Sainte-Anne + Serge Bujold + bujolds&sainteanne.ca +58208 + Verbandsgemeinde Leiningerland + Thilo Buhl + it&vg-l.de +58209 + LLC "LODM-Technology" + Glazkov Valery + vg&LODM-technology.ru +58210 + MDS Global + George Sheridan-Smith + George.Sheridan-Smith&mdsglobal.com +58211 + Friedrichs + Lars Friedrichs + friedrichs.lars&gmail.com +58212 + Dytech IT-Solutions GmbH + Aleksandar Trkulja + aleksandar.trkulja&dytech.de +58213 + Xeos Medical + Phebe De Coene + phebe.decoene&xeos.care +58214 + IGEN Tech Co., Ltd. + Jenny FAN + tingting.fan&igen-tech.com +58215 + Azure-Hub + Tarek Hassan + tarek&azure-hub.com +58216 + Signoretta Maurizio & co + Emanuele Signoretta + signorettae&gmail.com +58217 + Rally Tactical Systems, Inc. + Shaun Botha + shaun.botha&rallytac.com +58218 + Tozed Kangwei Intelligent Technology + yuxin Hou + houyuxin&gztozed.com +58219 + GRID NV + Harm Scheltens + harm.scheltens&gridnv.nl +58220 + Université Jean Moulin Lyon 3 + Frederic Jacquot + frederic.jacquot&univ-lyon3.fr +58221 + European Spallation Source + Stephane Armanet + stephane.armanet&ess.eu +58222 + Näslund TransportService AB + Näslund Petter + apnaslund&hotmail.com +58223 + akYtec Gmbh + Alex Holm + info&akytec.de +58224 + Telnyx + Sarah Halko + sarah&telnyx.com +58225 + Miami County, KS + Bryce Carter + bcarter&sheriffmiamicountyks.gov +58226 + Trystar, LLC + Tracy Allen + tracy.allen&trystar.com +58227 + Chongqing Lavid Industrial Co.,Ltd. + Luo Hai Peng + pi20&pix-link.com +58228 + Nature Coast Psychiatry PLLC + Eilis Clark + office&naturecoastpsych.com +58229 + Terial Corp + Nick Kariger + terialcorp&gmail.com +58230 + Sectorinstituut Transport en Logistiek + Felton B.V. + servicedesk&felton.nl +58231 + Minervis GmbH + Timo Scheuer + timo.scheuer&minervis.com +58232 + IoTconsultancy.nl + Esko Dijk + esko.dijk&iotconsultancy.nl +58233 + ELPRO Technologies + John White + john.white&elpro.com.au +58234 + cherryEPG + Bojan Ramsak + info&cherryhill.eu +58235 + MUJIN ENG + yonghun + yonghunn&naver.com +58236 + ZeronsoftN + Joseph Lee + joseph&zeronsoftn.com +58237 + Jointsc + yang cao + caoyang&jointsc.com +58238 + Gators Homelab + William Hughes + william.gator&gmail.com +58239 + Oesterreichischer Bundesverlag + Goesta Smekal + it&oebv.at +58240 + Stickfish Software GmbH + Thomas Trenker + thomas.trenker&stickfish.at +58241 + Passport Automotive Group + Jeffrey Parker + jeffrey.parker&passportauto.com +58242 + A2 Corp. + Tsubasa Kawano + iana&a-2.co.jp +58243 + WBS Technology + Leo Huang + leo.h&wbstech.com.au +58244 + Andasis Elektronics Inc + Fuat Kayapınar + fuat.kayapinar&andasis.com +58245 + Stuttgart Inc. + Gerhard Lamprecht + gerhard&stuttgart-usa.com +58246 + NRA Group + Nick Zepp + nzepp&nragroup.com +58247 + National Steel Car + Garrett Bayross + garrett.bayross&reisinformatica.com +58248 + eFAB P.S.A + Marcin Kotulski + mjk&efab.pl +58249 + Bermuda Hospitals Board + Jenna Constable + Jenna.Constable&bhb.bm +58250 + Pyramid Heating and Cooling + Larry Trickel + sysadmin&pyramidheating.com +58251 + Småland Timber Aktiebolag + Daniel Martinsson + daniel.martinsson&smalandtimber.se +58252 + Marine Radio LLC + Anatoly Kochubey + anatoly&marineradio.ru +58253 + NetSeT Global Solutions + Matija Savić + matija.savic&netsetglobal.rs +58254 + 北京吉视汇通科技有限责任公司 (Beijing Jishihuitong Technology Co., Ltd.) + 姜芬 (Jiang Fen) + jiangfen12&126.com +58255 + gaelicWizard.LLC + John Pell + accounts&mayflowerinfotech.com +58256 + Rebio AB + Kristoffer Normark + kristoffer.normark&rebio.se +58257 + Siemens Shenzhen Magnetic Resonance Ltd. + Hang Zhang + hang.zhang&siemens-healthineers.com +58258 + Defensya ingenieria Internacional S.L. + victor fuentes + victor.fuentes&defensya.com +58259 + Mississippi County Electric Cooperative, Inc. + IT Department + administrator&mceci.com +58260 + MergeBase + Oscar van der Meer + oscar&mergebase.com +58261 + Nuvve Holding Corp + Hamza Lemsaddek + it&nuvve.com +58262 + Zamiri Page + Zamir Hoxha + zamirhoxha&gmail.com +58263 + Tealcommunications inc. + Björn Knudsen + bjorn&tealcommunications.com +58264 + Simon's Personal Information Infrastructure + IT Department + simons-pii-contact&protonmail.com +58265 + Cornerstone Christian College + Bruce Mapstone + Bruce.Mapstone&cornerstone.wa.edu.au +58266 + Hebei Jiaxu Fumei data information Co., Ltd + Panpan Liu + liupanpan&hbjxfm.com +58267 + BT EdenNET + Nathan Griffiths + nathan.griffiths&bt.com +58268 + Vastuu Group Oy + Jouni Hämäläinen + admin&vastuugroup.fi +58269 + MicroSpunk LLC + Timothy Ste. Marie + tim&stemariefirm.com +58270 + Profian Inc. + Niccholas Rodriguez Vidal + nick&profian.com +58271 + StreamSource.org + William Bunce + bill&ishiganto.com +58272 + Waterford Consultants, LLC + Matthew Schipani + mschipani&waterfordconsultants.com +58273 + Cellhire plc + Alex Burtt + ishelpdesk&cellhire.com +58274 + HILO.CZ + David Krotil + david.krotil&hilo.cz +58275 + Eternalplanet Energy Ltd + Jepson.zhu + jepson.zhu&eternal-planet.com +58276 + Bundeskriminalamt + Holger Münch + pen&bka.bund.de +58277 + Insite360 + Connor Hunt + connor.hunt&insite360suite.com +58278 + Adopt Nettech Pvt Ltd + Jignesh Kapadia + jignesh.kapadia&adoptnet.tech +58279 + Peer Software, Inc. + Matt Marsala + mattm&peersoftware.com +58280 + Grassie + Jethro Grassie + jtg&xtrabass.com +58281 + Allianz Technology SE + Christof Chen + christof.chen&allianz.com +58282 + Zethe + Zethembiso Gwala + zethembisogwala&yahoo.com +58283 + BIPBIP365 + Steven Cabu + cabusteven&gmail.com +58284 + Juuso Marttinen + Juuso Marttinen + juuso-iana&juusomarttinen.fi +58285 + Connor Goodwolf + Connor Goodwolf + connor&goodwolf.io +58286 + harada syouten + Akio Harada + harada&it-harada.com +58287 + bpm consult ag + Gabriel Winkler + monitor&bpm.ch +58288 + Digital Workplace 360 GmbH + Nico Ebeling + subscriptions&dwp360.de +58289 + FABMation GmbH + Emanuel Bennici + 20028&fabmation.de +58290 + Guangzhou GRG Wuzhou Technology Co., Ltd. + Fan Zhang + zhangfan&wuzhoucloud.com +58291 + Direction Générale des Finances publiques + Manuela Mandache + esi.lyon.pamela&dgfip.finances.gouv.fr +58292 + Satish A Shah MD PLLC + Domenic Borro + domenic&gettysburgoncology.com +58293 + Uptime-ICT + Cabu Steven + steven.cabu&uptime-ict.be +58294 + Copper River Seafoods + Eric Mitchell + emitchell&crsalaska.com +58295 + X-Pert Multimedia Solutions Ltd. + Alexander Nikiforov + support&x-pert.tv +58296 + W. Wahli AG + Michael Voser + support&wahli.com +58297 + TRILUX Group Management GmbH + Stefan Scheffler + oid-pen&trilux.com +58298 + KBBI + Jeff Lockwood + jeff&kbbi.org +58299 + hoek.io + Keeley Hoek + keeley&hoek.io +58300 + Deutsche Glasfaser Wholesale GmbH + Thomas Wilhelm + t.wilhelm&deutsche-glasfaser.de +58301 + state grid Zhejiang Electric Power Corporation + zhongpingli + 13777847034&139.com +58302 + Lanao Communication Technology Limited + Michael Tseng + info&lanaotek.com +58303 + North Canyon Medical Center + Jeff Quinton + jeff.quinton&northcanyon.org +58304 + Habilee S.A. de C.V. + Antonio Morillo + antonio.morillo&habilee.com.mx +58305 + Mission Dialysis Access Center, PA + Dalia Cobos + dcobos&missionvascularinstitute.com +58306 + BOUYER INDUSTRIE + David LOUSTAUNAU + david.loustaunau&bouyer.com +58307 + THengineering e.K. + Thomas Hohenberger + th&officethengineering.onmicrosoft.com +58308 + Küberpunk OÜ + Martin Paljak + martin&martinpaljak.net +58309 + 振生形象设计有限公司 (Zhensheng Image Design Co., Ltd.) + 孙广振 (Sun Guangzhen) + 2048735294&qq.com +58310 + Global Data Systems Inc. + John A Hollier + johnh&getgds.com +58311 + FSUE Moscow Endocrine Plant + Eugene Grokhotov + e_grohotov&endopharm.ru +58312 + iTeamChina NETWORK + VEP iTeam + x517138883&gmail.com +58313 + NILE GLOBAL, INC. + Suresh Katukam + suresh&nile-global.com +58314 + Bizer Ridge Enterprises + Registrations Administrator + registrations+pen&bizerridge.com +58315 + Trapeze Switzerland GmbH + Ahmed Elmahgoub + iana.contact&trapezegroup.com +58316 + Bastian Software Solutions + Justin Kaffenberger + jkaffenberger&bastiansolutions.com +58317 + Constitutional Law Center for Muslims in America + Andrew McCullough + amccullough&clcma.org +58318 + Leone Home Healthcare Agency + Andrew McCullough + amccullough&leonehomehealthcare.com +58319 + CMA Flooring and Design + Bader Obeid + bader&cmaflooring.com +58320 + Jayketek Studio Inc. + Jacob Boily + boij.pro&gmail.com +58321 + ZX SOLUTION CORPORATION + Xiao Jianhua + xiao.jianhua1&ztrs.com.cn +58322 + NetLab + Harry P Hilton + admin&cjy666.xyz +58323 + Fixed Misr + Mahmoud Ahmed + info&fedis.com.eg +58324 + ITSumma + Andrey Kislyak + akislyak&itsumma.ru +58325 + Industrial Monitoring & Control Pty Ltd + Patrick Ivory + info&imcontrol.com.au +58326 + ITeam SA + Bogusław Pilek + b.pilek&iteam.pl +58327 + Esino Incubazioni srl + Marco Bruzzechesse + s.neri&digimarksrl.com +58328 + Digimark srl + Simone Neri + s.neri&digimarksrl.com +58329 + DYSTEN SP. Z O.O. + PAWEL KRYSZAK + PKR&DYSTEN.PL +58330 + Gobierno de la Ciudad Autónoma de Buenos Aires + Gustavo Linares + linaresg&buenosaires.gob.ar +58331 + Three Z Printing + Alan Meinhart + alan&threez.com +58332 + Eagle's Flight + Stefan Holvik + stefan.holvik&eaglesflight.com +58333 + T Pieters + Rink Pieters + iana&pietershgv.nl +58334 + Gemeenschap van Wiskunde en Informatica Studenten (GEWIS) + Rink Pieters + iana&gewis.nl +58335 + Capitol Broadcasting Company + Lee Buie + lbuie&wral.com +58336 + Tekkgear + Johnson, Temo + TJohnson&tekkgear.com +58337 + runZero, Inc + HD Moore + support&runzero.com +58338 + Alauda + Wenbo + wbxu&alauda.io +58339 + wellcloud + Bin Yan + yanbin&welljoint.com +58340 + me0x + Teodor MAMOLEA + Teodor.MAMOLEA&me0x.com, me0x.com&gmail.com +58341 + Relib Technolgy + Ramesh Kumar Gupta + rgupta&relib.in +58342 + Blindsight Studios + Henry Aberg + henry.aberg&gmail.com +58343 + AC-Schaerbeek + Sihame Meskine + smeskine&1030.be +58344 + Fr. Lürssen Werft + Jonas Fathi Kopp + jonasfathi.kopp&luerssen.de +58345 + NVL B.V. & Co. KG + Jonas Fathi Kopp + jonasfathi.kopp&nvl.de +58346 + TechTeamer Ltd. + Gábor Tóth + gabor&techteamer.com +58347 + Kyubi System + Joan Salinas + jsalinas&kyubisystem.com +58348 + Navice Consulting + W. Boot + noc&navice.eu +58349 + Navice Consulting + W. Boot + noc&navice.eu +58350 + BinaryBabel LLC + Registry Administration + snmp-ra&mail.binarybabel.net +58351 + KentBank d.d. + Franjo Prgić + karticno&kentbank.hr +58352 + FiberTrade Ltd + Igor Gluzdov + gluzdov&fibertrade.ru +58353 + EURO-Leasing GmbH + Rainer Graf + it-infrastruktur&euroleasing.eu +58354 + The Ince Group + Michael London + itoperations&incegd.com +58355 + SkyFive AG + Andrey Petrosyan + andrey.petrosyan&skyfive.world +58356 + Bankovni identitra,a.s + Filip Hladký + admin&bankid.cz +58357 + Pedro J Penalo MD PA + Pedro J Penalo, M.D.,P.A. + wesmedclinic&hushmail.com +58358 + Goup Co.,Ltd + Sangwoo Jung + swjung78&gmail.com +58359 + Stadtwerke Ahaus GmbH + Michael Paslick + m.paslick&stadtwerke-ahaus.de +58360 + AETEK INC. + Jone Chen + jone.chen&aetektec.com +58361 + Novaya + Mathieu Denuit + md&novaya.io +58362 + NetLAB GmbH + Goekhan Sellur + gsellur&gmail.com +58363 + RTO GmbH + Marc Hesse + m.hesse&rto.de +58364 + Northern Arizona University + ITS Identity and Access Management + its-iam&nau.edu +58365 + Rowantek + Jawwad Hafeez + jawwadh&rowantek.com +58366 + NEPBROADCASTING.COM + Jason Jaskey + jjaskey&nepgroup.com +58367 + Five Mountains Hawaii dba Kipuka O Ke Ola + Kenneth Riff + riff&kipukaokeola.org +58368 + Hangzhou Clounix Technology Limited + Shen Xu + xushen&clounix.com +58369 + Graham Packaging LP + Patrick McNabb + patrick.mcnabb&grahampackaging.com +58370 + ACMOS INC. + Kazunori Hokari + hokari&acmos.jp +58371 + Torpshammars transporter AB + Jimmy Sandström + torpshammarstransporter&gmail.com +58372 + Servolift GmbH + IT Department + edv&servolift.de +58373 + Services TI TECHNIKA IT Services + Colin Stanners + CStanners&gmail.com +58374 + SMTech, Ltd. Co. + Sergei Borisenko + borisenko&spbec-mining.ru +58375 + Jirotech + James Sewell + james.sewell&jirotech.com +58376 + ImageMover MD + Koua Yang + koua&imagemovermd.com +58377 + UVAX Concepts, S.L. + Antonio Royo + a.royo&uvax.es +58378 + Delta Electronics Inc. + Obi Wang + obi.wang&deltaww.com +58379 + International Data Spaces Association e.V. + Sebastian Steinbuss + sebastian.steinbuss&internationaldataspaces.org +58380 + Tailor, inc. + Misato Takahashi + mistat&tailor.tech +58381 + ТОО "Юникс-Beverage" (Unix-Beverage LLP) + Alexander Lazutin + alexl&zvd.kz +58382 + CONSEIL DEPARTEMENTAL DE LA HAUTE-MARNE + VIAL Jérôme + jvial&haute-marne.fr +58383 + CONSEIL DEPARTEMENTAL DE LA HAUTE-MARNE + VIAL Jérôme + jvial&haute-marne.fr +58384 + Hanhaa GenX + Felipe Luccas + felipe&hanhaa.com +58385 + TELECALL + Engenharia + tic&telecall.com +58386 + N3 + Nathan Nelson + nrnelson&gmail.com +58387 + Canyon Aeroconnect + Bill Morgan + bill.morgan&canyonaero.com +58388 + Shenzhen RAKwireless Technology Co.,Ltd. + Yutao Xu + yutao.xu&rakwireless.com +58389 + Dawson + Dawson IT + it&dawson.com +58390 + TABControl Tecnologia LTDA + Antonio Taranto Goulart Filho + antonio.taranto&tabcontrol.com.br +58391 + MOVILMAX GUATEMALA, SOCIEDAD ANONIMA + David Ortiz + dortiz&movil-max.com +58392 + IdomSoft + Gabor Krisztian Szasz + gabor.szasz&idomsoft.hu +58393 + MDC-Service Wittenburg GmbH + Steffen Wittenburg + steffen.wittenburg&mdc-service.de +58394 + Mer-Bach.com + Chris Whitfield + merddyin&yahoo.com +58395 + Sachin Kumar + Sachin Kumar + sachinkumar93168&gmail.com +58396 + Eriksmåla Förvaltning AB + Niklas Sjöfors + niklas.sjofors&eriksmalaforvaltning.se +58397 + Letko Brosseau & Associates + Daniel Brosseau + daniel&lba.ca +58398 + Fidelix Oy + Esa Laakso + esa.laakso&fidelix.com +58399 + Genmon + Jason Yates + jason.yates&gmail.com +58400 + Viet Dragon Securities Corporation + NGO QUANG THONG + quangthong2405&gmail.com +58401 + Accelecom LTD. + Wanli Lv + tech_support&accelecom.com +58402 + ZRALNET + Lars Kriesten + hostmaster&zral.net +58403 + Sichuan Taifu Ground Beidou Technology Co., Ltd. + Yang Fan + yangfan&groundbeidou.com +58404 + CONFIRMA + Alejandro Sánchez Grangé + asanchezg&datamex.com.py +58405 + Innsbrucker Kommunalbetriebe Aktiengesellschaft + Philipp Meyer + snot&ikb.at +58406 + mdtspace.dmz + Mathias Dumont + mathias.dumont&mdtspace.net +58407 + Aulss55 + Nicolas Cartier + fisic87&yahoo.it +58408 + Ciraulo Wireless + Samuel Ciraulo + sam.ciraulo&outlook.com +58409 + IRIS CRM + Serhii Kaidalov + sergey&iriscrm.com +58410 + conveyxhome + Joe Galloway + conveyor&conveyxhome.com +58411 + Lund + Evan Selleck + EVANS&LUNDVT.ORG +58412 + Ringtail Security + Paul Duncan + duncan.f.paul&gmail.com +58413 + ip-studio + Menetrier Gregory + gregory.menetrier&ip-studio.com +58414 + ioTec Solutions + Stephen Irons + stephen.irons&iotec.space +58415 + JSC YIT Saint-Petersburg + Airat Mamin + it&yitrus.ru +58416 + Paragon Application Systems, Inc. + Stephen Reece + it¶gonedge.com +58417 + NAWC-WD + Justin Olson + justin.p.olson4.civ&us.navy.mil +58418 + Addon Networks + Ray Hagen + ray.hagen&addonnetworks.com +58419 + BlockOps + Corey Osman + opselite&blockops.party +58420 + Ciklet Iletisim Ltd. Sti. + Cemil Alpay Sunnetci + info&ciklet.com.tr +58421 + netops global + Alptekin Sunnetci + info&netops.com.tr +58422 + TOKAI Communications Corporation, IoT Dept + Daisuke Sato + SFM_INFO&tokai-grp.co.jp +58423 + Baza Life Co Ltd + Nurliyev Dashgyn + dashgynvn&gmail.com +58424 + Technica Engineering GmbH + Dr. Lars Voelker + iana-pen-contact&technica-engineering.de +58425 + AEV Broadcast srl + Francesco Barone + francesco.barone&aev.eu +58426 + Thaihong Plastic Industry Co., Ltd. + Komson Lertboonyapun + komson&thaihong.co.th +58427 + Cruise, LLC + Kurt VonEhr + kurt.vonehr&getcruise.com +58428 + Adwia Pharmaceuticals + ITD + it&adwia.com +58429 + IPCOM Ltd + Dmitriy Schekin + dshekin&rit.ua +58430 + Stream Circle, a. s. + Josef Vasica + vasica&streamcircle.com +58431 + CJY Internet + Theresa H Chavez + cjy666vip&163.com +58432 + Aptic GmbH + Thomas Diethelm + thomas.diethelm&aptic.ch +58433 + mob.id + Frans Bolk + frans.bolk&mob.id +58434 + EuleO + Dipl.-Ing. Rainer Sonnabend + uhu&euleo.de +58435 + Meteorcomm, LLC + Benjamin Fleming + bfleming&meteorcomm.com +58436 + Kodmyran AB + Erik Lundgren + erik&kodmyran.se +58437 + City Colleges of Chicago + Julio Caban + jcaban6&ccc.edu +58438 + Lynden Incorporated + Dmitry Koldunov + sysadmin&lynden.com +58439 + Quality Living Center Co., Ltd. + Komson Lertboonyapun + komson&qlc.co.th +58440 + SmartVox + PEN coordinator + iana&smartvox.com +58441 + Bitnetix + PEN Coordinator + iana&bitnetix.com +58442 + KANDENKO CO.,LTD + Shoichi Saruwatari + kdk-catvhe-tech&kandenko.co.jp +58443 + Addiko Bank d.d. + Stanko Košir + stane.kosir&addiko.com +58444 + University School of Physical Education in Krakow + Kajetan Mis + kajetan.mis&awf.krakow.pl +58445 + ondeso GmbH + Peter Lukesch + peter.lukesch&ondeso.com +58446 + Reinhold Keller GmbH + Michael Croessmann + michael.croessmann&reinhold-keller.de +58447 + Uno Informatica S.R.L. + Gianni Valeri + gianni.valeri&unoinformatica.it +58448 + Print Tracker + Clark McCauley + clark.mccauley&printtrackerpro.com +58449 + ESSECCA GmbH. + Michael Reiner + m.reiner&essecca.at +58450 + dmtp llc + Daniel TePaske + admin&dmtp.llc +58451 + Tianyi Network + Christopher Zhang + lty&luotianyi.dev +58452 + Stadtwerke Mühlhausen GmbH + Silvio Ulbrich + it-service&stadtwerke-muehlhausen.de +58453 + City of New Orleans + LaShonda Greenup + systems_team&nola.gov +58454 + Highway9 Networks + Debashis Basak + debashis&highway9networks.com +58455 + The School of Banking and Management in Cracow + Kajetan Mis + mis&wszib.edu.pl +58456 + tuxcademy Project + Anselm Lingnau + anselm&tuxcademy.org +58457 + Ikeja Wireless + Dieter Lubbe + dieter&ikeja.co.za +58458 + Ethernity + Oded Bergman + odedbðernitynet.com +58459 + Philotic.cloud + Mark Gilbert + mark.gilbert&m4kr.net +58460 + EMnify GmbH + Viktoria Krolikowski + viktoria.krolikowski&emnify.com +58461 + CWJ Power Electronics + Gilberto Pascoal + mib&cwj.pt +58462 + Kralupol a.s. + Martin Hofman + it&kralupol.cz +58463 + Amadon LLC + Stanislav Chetverikov + 1141&amadon.ru +58464 + E-Comm Emergency Communications for British Columbia Incorporated + Stephen Cluff + Stephen.Cluff&ecomm911.ca +58465 + Leibniz-Institut für Deutsche Sprache + Oliver Schonefeld + schonefeld&ids-mannheim.de +58466 + Nestor Nestor Diculescu Kingston Petersen + Cristian Nastase + cristian.nastase&nndkp.ro +58467 + Aitken Industries + Adrian Aitken + snmp&aitken.industries +58468 + Helmholtz-Zentrum fuer Infektionsforschung GmbH + Dietrich Volkov + dietrich.volkov&helmholtz-hzi.de +58469 + Atom Computing, Inc. + Antonia Jones + toni&atom-computing.com +58470 + Elekto Produtos Financeiros + JP Negri + negri&elekto.com.br +58471 + The Poor Man's Datacenter + William Hua + domainadministrator&thepoormansdatacenter.com +58472 + ZPrAE Sp. z o.o. + Krzysztof Jakubczyk + krzysztof.jakubczyk&zprae.pl +58473 + Dominion Packaging + Jim Joyner + joynerj&dompkg.com +58474 + Leibniz Institute for Regional Geography (IfL) + Dirk Beckert + oid&leibniz-ifl.de +58475 + Cerner Corporation + Josh Diaz + jdiaz&cerner.com +58476 + Alpha Assurances Inc. + Denis Carignan + admin&alphaassurances.com +58477 + Zone de Police de Huy + Martinez Gabriel + steven.cabu&uptime-ict.be +58478 + Toshiba Quantum Technology + Andrew Shields + enquiries&crl.toshiba.co.uk +58479 + MEWEM + Stephan Haughton + sokrates&haughton.de +58480 + genZ Energy Pty Ltd + Kevin Fairman + kevin.fairman&genz.com.au +58481 + IIS DEL COSSATESE E VALLE STRONA + Reparto Tecnico + assistenzatecnica&liceocossato.eu +58482 + tonekids.com + Paul N Zaremba + zaremba&tonekids.com +58483 + Shenzhen Link-All Technology Co., Ltd + Miss Apple Wang + apple.wang&link-all.com +58484 + National Institute of Technology,Tiruchirappalli + G.Girish Kumar + girishkumar&nitt.edu +58485 + PT. Rajakon Teknik + Septa Muhammad Rivaldy + rnd&rajakon.com +58486 + PT. Teknologi IoT Indonesia + Aida El Majnaoui + aida.elmajnaoui&iot-indonesia.com +58487 + ProHacktive + Ben Mz + admins&prohacktive.io +58488 + EvoLogics GmbH + Francisco Bustamante + bustamante&evologics.de +58489 + Spear Innovations Oy Ltd. + Antti-Ville Nauha + av.nauha&spear.fi +58490 + Care Oncology Center + Maria Marin + mmarin&careonc.com +58491 + MAPLE HOME GARDEN COMPANY LIMITED + Huy Ly + huy.ly&maplehomegarden.com +58492 + Stemmer Imaging AG + Moritz Maier + de.it&stemmer-imaging.com +58493 + AFFILIATED COMPANY "NAFTOGAZBEZPEKA" OF NATIONAL JOINT STOCK COMPANY "NAFTOGAZ OF UKRAINE" + Roman Sobol + soc&naftogazbezpeka.com +58494 + sector7 GmbH + Peter Homeister + info§or7.eu +58495 + Omnisens SA + Cyril Naimi + cyril.naimi&omnisens.com +58496 + Raincubic + ChenhaoShen + shenchenhao1234&hotmail.com +58497 + Stadt Bern Testlab Lernende + Christoph Siegrist + ks_server&bern.ch +58498 + Victorian Building Authority + Sean Matherson + sean.matherson&vba.vic.gov.au +58499 + Alphatron Medical Systems B.V. + Addy Klos + a.klos&alphatronmedical.com +58500 + ToCenTek + yuhaichuang + yuhaichuang&hztct.net +58501 + ExpertSolutions + Dmitriy Khizhinskiy + dkhizhinskiy&expertsolutions.ru +58502 + Fernandes Contabilidade + Fernandes Neto + contato&fernandescontabilidade.com +58503 + Disrupt Tech Labs + Chakshu Tandon + chakshu&disrupttechlabs.com +58504 + Beijing Yuanshan Networks + Wenfeng Liu + liuwf&ysxt.com.cn +58505 + PERCEIG AI SYSTEMS (OPC) PVT LTD + DEEPAK MADHAVAMANDIRAM RAJAN + deepak&perceig.com +58506 + The Sapling Company, Inc. + Dmytro Fedorchenko + dmytrof&sapling-inc.com +58507 + JSC "IB-Reform" + Sergey Shcherbakov + info&ib-reform.ru +58508 + ISEN Yncrea OUEST + Florent KARSENTY + florent.karsenty&yncrea.fr +58509 + INRAE + Jean-Damien POGOLOTTI + jean-damien.pogolotti&inrae.fr +58510 + ICHEC + De Foor Julie + steven.cabu&uptime-ict.be +58511 + De Foor Julie + Cabu Steven + steven.cabu&uptime-ict.be +58512 + Ably S.r..l. + Gaetano Grasso + tecnico&ably.it +58513 + Computational Imaging Systems, ITI, University of Stuttgart + Steffen Kieß + steffen.kiess&cis.iti.uni-stuttgart.de +58514 + Global Packaging, Inc. + Myron Kolodiy + admin&glopkg.com +58515 + HP Inc. + Jeff Jeansonne + jeff.jeansonne&hp.com +58516 + Vertigo Security + Mike Liddekee + mike&vertigosecurity.net +58517 + Docaposte + GHERNATI Mekki + mekki.ghernati&docaposte.fr +58518 + Thales URS + Roshan Jesuratnam + roshan.jesuratnam&thalesgroup.com +58519 + ZILLNK + Dellen Huang + nan.huang&zillnk.com +58520 + van Gelder Groep + Jaap Reijer Ottens + jrottens&vangelder.com +58521 + keyONE GmbH + Christian Schorr + c.schorr&keyONE.one +58522 + OSA Systeme GmbH + Ulf Pambor + up&osasysteme.de +58523 + COSMED S.r.l. + Paolo Brugnoli + paolo.brugnoli&cosmed.com +58524 + myshoenetwork + Carl Lindner + c.lindner&mailbox.org +58525 + PSC Vodec + Rob Lackey + rob.lackey&psc-vodec.com +58526 + pdhouse.store + Pablo Dominguez + cobroscompulsivos&gmail.com +58527 + Ginger Consulting + Martin Buckley + martin&gingerconsulting.com +58528 + StreamVX Sp. z o.o. + Szymon Karbowski + contact&streamvx.com +58529 + Antimatter + Michael Andersen + michael&antimatter.io +58530 + SolarPower + Nino Albrecht + mail01&schnickersbend.net +58531 + Biznes Systema Telecom, LLC + Roman Mironenko + accounts&bisyst.ru +58532 + Rofim + Stéphane GINER + stephane&rofim.doctor +58533 + Rasheda overseas limited + Roufique hossain + roufique2&gmail.com +58534 + The West African Examinations Council, Banjul + Muhammed Lamin Mbowe + palamin.mbowe&waecgambia.org +58535 + NASES + Juraj Rehak + juraj.rehak&nases.gov.sk +58536 + Služba Vlade Republike Slovenije za digitalno preobrazbo + Mark Boris Andrijanič + gp.sdp&gov.si +58537 + SEP2 + Mike Goodwin + mike.goodwin&sep2.co.uk +58538 + SecurityGen Czech, s.r.o + Artem Shpynov + artem.shpynov&secgen.com +58539 + Tenerity Limited + Amrik Gill + Amrik.Gill&Tenerity.com +58540 + Cobira ApS + Allan Dickow + ad&cobira.co +58541 + ring-0.net + Sebastian Büttner + hostmaster&ring-0.net +58542 + Universitätsklinikum Tübingen, Abteilung Sportzmedizin + Clemens Plank + clemens.plank&med.uni-tuebingen.de +58543 + HOSTVM + Ivan Klimov + i.klimov&pvhostvm.ru +58544 + Nordomatic + Michael Nyman Schmidt + mns&nordomatic.dk +58545 + FOXESS CO., LTD. + Michael Zhu + foxrd&fox-ess.com +58546 + muellerpublic.de + Erwin Müller + erwin&muellerpublic.de +58547 + Nowina Solutions + Christophe Labbé + info&nowina.lu +58548 + CHINESE METHODIST SCHOOL (NORTH POINT) + WONG HAU KIT MATTHEW [AITH] + whk&cmsnp.edu.hk +58549 + CHINESE METHODIST SCHOOL (NORTH POINT) INFORMATION TECHNOLOGY DEPARTMENT + WONG HAU KIT MATTHEW [AITH] + whk&cmsnp.edu.hk +58550 + Justin Schlatter + Justin Schlatter + justin&justinschlatter.com +58551 + Weld County School District RE-3J + Justin Schlatter + justinschlatter&re3j.com +58552 + Stellar Industries + John L Steenblock + jsteenblock&stellarindustries.com +58553 + Damanesign + Hamdaoui Zouhair + zouhair.hamdaoui&damanesign.ma +58554 + Gamache, Inc. + Mark R. Gamache + mark&markgamache.com +58555 + Union Dynamic, Inc. + Union Dynamic, Inc. ATTN Michael McMinn + admin&uniondynamic.com +58556 + Franklin City Schools + Craig Irgens + techdept&franklincityschools.com +58557 + netplay GmbH + Frederic Kromer + freddy&netplay-it.de +58558 + Leonardo Bracco + Leonardo Bracco + leonardo&leonardobracco.com +58559 + ShenZhen Lux Power technology Co., LTD + Long Yang + long.yang&luxpowertek.com +58560 + Odessa Missionary Fund + Jared Cheesman + humantraffickingspecialist&gmail.com +58561 + De Greef's Wagen-, Carrosserie- en Machinebouw B.V + Paul Sprang + psprang&greefa.nl +58562 + Sumitomo Bakelite Co., Ltd. + Tomoki Matsubara + matsubara-tomoki&sumibe.co.jp +58563 + XANTRONIX Industrial Heavy Manufacturing Concern + Jóna Jónsdóttir + iana-pen&xantronix.com +58564 + AVIZ NETWORKS + Vishal Shukla + vishal.shukla&aviznetworks.com +58565 + Stadtwerke Annaberg-Buchholz Energie AG + Uwe Kreher + edv&swa-b.de +58566 + OCSIN – République et Canton de Genève + Jean-Marc Mottet + jean-marc.mottet&etat.ge.ch +58567 + Questgates LTD + Steve Holland + steve.holland&questgates.co.uk +58568 + PT. Indoreka Jaya Wutama + Iskandar Iman Soeriyanto + iskandar.tong&gmail.com +58569 + NCHAIN UK LIMITED + Ajay Patel + software&nchain.com +58570 + Kalil Bottling Co. + Aaron Clay + aclay&kalilbottling.com +58571 + Dualz Solutions + Stephane van Hardeveld + stephane&dualz-solutions.nl +58572 + Christoph Meyer + Christoph Meyer + christoph.meyer.2006&gmail.com +58573 + Peak Data Networks, Inc. + Terri Beideman + compliance&peakdatanet.org +58574 + BSD HomeLab Project + Aisha Tammy + bsd-homelab&bsd.ac +58575 + Pnarp.com + Phillip Norbert Årp + enterprise-number&pnarp.com +58576 + Phelps County Regional Medical Center + Daniel Dunstedter + ddunstedte&phelpshealth.org +58577 + Red Lily + Keaton Alexander Guger Lair + keatonaglair&redlilyinter.net +58578 + Keaton Alexander Guger Lair + Keaton Alexander Guger Lair + me&kagl.me +58579 + Mercury Limited + Peter Bach + peter.bach&mercury.co.nz +58580 + Gamania Digital Entertainment Co., Ltd. + Hortin Wang + lic_info&gamania.com +58581 + iCloudShield Security Technology Co.,Ltd. + Guanwen Chen + it&icloudshield.com +58582 + Momenta + Cui Guangxu + cuiguangxu&momenta.ai +58583 + Association Consortium Standardization in Lab Automation (SiLA) + Daniel Juchli + daniel.juchli&sila-standard.org +58584 + Elpitech LLC + Konstantin Kirik + konstantin.kirik&elpitech.ru +58585 + Contact Software GmbH + Frank Patz-Brockmann + fp&contact.de +58586 + JMARK Business Solutions, Inc. + Thomas Douglas + TechOpsReg&jmark.com +58587 + Deutsche Edelstahlwerke Specialty Steel GmbH & Co. KG + Thomas Rakowski + thomas.rakowski&dew-stahl.com +58588 + Arteixo Telecom S.A. + Daniel Alonso López Grandal + dlopez&arteixotelecom.com +58589 + TrueConf + Aleksandr Kuznetsov + kuznetsov&trueconf.com +58590 + Antenna Entertainment + Szabolcs Vertesi + cee-itsupport&antenna-entertainment.com +58591 + US House of Representatives + David Waller + david.waller&mail.house.gov +58592 + Keller Rohrback + Casey Allard + itnotify&kellerrohrback.com +58593 + Keller Rohrback + Casey Allard + itnotify&kellerrohrback.com +58594 + ClusterPower SRL + Vladimir Ester + vladimir.ester&clusterpower.ro +58595 + Aplimedia + Nour Hatoum + n.hatoum&aplimedia.net +58596 + VIA Motors, Inc. + James Perriton + oid&viamotors.com +58597 + Hytera Austria GmbH + Mario Fenz + mario.fenz&hytera-austria.eu +58598 + Merlin Cinematic, Inc. + Curtis Lee Fulton + curtis&merlin.video +58599 + NIPPON TECHNO LAB INC. + Madan Pandey + madan&ntl.co.jp +58600 + ZPP Ingenieure AG + Hristo Hristov + hh&zpp.de +58601 + F+ Networks + Gorokhov Ilya + igorokhov&fplusnetworks.ru +58602 + Artaker Computersysteme GmbH + Martin Boeck + m.boeck&artaker.at +58603 + Acredita Assessment SpA + Alexander Osorio + chile.acredita&gmail.com +58604 + Sundwiger Messingwerk GmbH + Alexander Severing + alexander.severing&sundwiger-mw.com +58605 + Bundesanstalt für Arbeitsschutz und Arbeitsmedizin + Torsten Agerholm Laursen + Z1.3&baua.bund.de +58606 + Nictiz + Alexander Henket + henket&nictiz.nl +58607 + EZ Solutions Inc + Nate Folker + nate&ezsolutions.us +58608 + zitrotec + Falk Rismansanj + info&zitrotec.de +58609 + FoneSys + Mukunda Haveri S + mukunda&fonesys.in +58610 + NodeWeaver Corporation + Carlo Daffara + carlo.daffara&nodeweaver.eu +58611 + Rhein-Kreis Neuss + Timo Willeke + tuiv&rhein-kreis-neuss.de +58612 + Cynte Technologies + Ali Tunc + info&cynte.com +58613 + CNRS DR13 + Julien Garnier + julien.garnier&dr13.cnrs.fr +58614 + ARION CONTROLS + Ariel Garcia + ariel.garcia&arion-controls.com +58615 + Meduit + Bryan Stamm + bryan.stamm&meduitrcm.com +58616 + Nokia Shanghai Bell + Weihong Chen + weihong.chen&nokia-sbell.com +58617 + mediaBEAM GmbH + Ralph Seichter + ralph.seichter&mediabeam.com +58618 + Ideal Image Development Corporation + Vinh Dang + admin&idealimage.com +58619 + Setl Group + Aleksandr Li + admins&setlgroup.ru +58620 + Shengye Wang + Shengye Wang + iana&shengye.wang +58621 + The University of Tokyo + SATO, Hiroyuki + schuko&satolab.itc.u-tokyo.ac.jp +58622 + Siqens GmbH + Matthias Wössner + woessner&siqens.de +58623 + Beijing EasySky Technology Ltd. + Zhijian HAN + zhijian.han&easysky.com.cn +58624 + Kortel + Tomasz Sienicki + info&kortel.pl +58625 + Aptic Aktiebolag + CIO Aptic + cio&aptic.net +58626 + ASSE + Sebastian Nuñez + sebastian.nunez&asse.com.uy +58627 + JaggedXY Inc. + Greg Burdett + greg.burdett&jaggedxy.com +58628 + Glasgow Clyde College + Graeme MacCormick + gmaccormick&glasgowclyde.ac.uk +58629 + ITAutom8 + Samuel Riley + riley&itautom8.com +58630 + C9-OBSCURITY.CO.UK LAB + T Joynson + tim.joynson&yahoo.com +58631 + Scientific Software and Systems Limited + Paul Platen + Paul.Platen&sss.co.nz +58632 + Vacanceselect Group + SSI + ssi&vacanceselect.com +58633 + Spectralbeam Ltd + Alan Tipper + alan&spectralbeam.com +58634 + aity AG + John Henz + john.henz&hpbsc.ch +58635 + Almi GmbH + Almi Support + support&almi.at +58636 + Aegis Power Systems, Inc. + Aj Uhlik + auhlik&aegispower.com +58637 + CJSC "Interdnestrcom" + Mansur Dolgoarshinnykh + mansur&admin.idknet.com +58638 + HATSec LLC + Nicholas Hunt + nick&hatsec.com +58639 + sematicon AG + Michael Walser + admin&sematicon.com +58640 + SMC Gateway Ltd + Kevin Golding + kevin.golding&smc-gateway.com +58641 + Borussia Dortmund GmbH & Co. KGaA + Marcel Betz + marcel.betz&bvb.de +58642 + ExtWeb + Murty Rompalli + govadmin2&extweb.org +58643 + Nagano Japan Radio co Ltd + Kenji Shimada + shimada.kenji2&jrc.co.jp +58644 + DoveTech Consulting + Joshua Taylor + josh.taylor&lonnyfwd.e4ward.com +58645 + BearingPoint GmbH + Iana Responsible + ianaresponsible&bearingpoint.com +58646 + milch & zucker - Talent Acquisition & Talent Management Company AG + Arne Groskurth + arne.groskurth&milchundzucker.de +58647 + Pathec + Patricio Cobelo + patricio.cobelo&outlook.com +58648 + Motional AD Inc + Deepak Kumar + deepak.kumar&motional.com +58649 + ZIONCOM (Shenzhen) Technology Ltd. + Wang Xing + wangxing&carystudio.com +58650 + Proactive MD + David Van Laecke + dvanlaecke&proactive.md +58651 + NETxAutomation Software GmbH + Paul Furtak + office&netxautomation.com +58652 + University Medical Center Groningen + Frank Beckers + f.a.beckers&umcg.nl +58653 + robinkielgass + Robin Kielgaß + robin_kielgass&live.de +58654 + BelowZero + Raul Muñoz Pla + raul.munoz&belowzero.es +58655 + IRAM - Institut de Radioastronomie Millimétrique + Jean-Yves MICHAUD + michaud&iram.fr +58656 + octetwave GmbH + Luca Friedrich + admin&ops.octetwave.de +58657 + Shenzhen Megarevo Technology Co.,Ltd + teddy.xiong + xiongzhixue&megarevo.com.cn +58658 + CjyDev + Abbie R Gantt + cjydev&net.tnn.cc +58659 + Devin Nakamura + Devin Nakamura + devin&devinnakamura.com +58660 + Odessa Medical Enterprises + Janette Garcia + jgarcia&odessamd.com +58661 + Oakham School + Mike Dunderdale + software&oakham.rutland.sch.uk +58662 + Sol Plaatje Municipality + Wikus Gouws + wikusg&solplaatje.org.za +58663 + New Brunswick Community College (NBCC) + Joey Leclerc + joey.leclerc&nbcc.ca +58664 + PNETWORKS ELEKTRONIK BILISIM VE VERI HIZMETLERI A.S. + EMINE ILHAN + emine.ilhan&pnetworks.com.tr +58665 + RTM Innovation + Anna Bzymek + mkotasinski&rtminnovation.pl +58666 + Petco + Miguel Candelas + DataCenterServices-Wintel&PETCO.com +58667 + Remote Spark Corp. + Walter Wang + yongtao&toremote.com +58668 + Transaction Junction + Johan v.d Merwe + techadmin&switch.tj +58669 + Federal State Budgetary Institution “Centre for Strategic Planning and Management of Biomedical Health Risks” of the Federal Medical Biological Agency + Roman Ivanov + ib&cspfmba.ru +58670 + Kiss Kornel + Kiss Kornel + kornel&c8tv.co.hu +58671 + Common Fate + Chris Norman + chris&commonfate.io +58672 + Leibniz-Institut fuer Sonnenphysik (KIS) + Peter Caligari + edv&leibniz-kis.de +58673 + Sertrix GmbH & Co. KG + Alexander Becker + alexander.becker&sertrix.de +58674 + Coto CICSA + Fernando Ayail + seguridadinfo&coto.com.ar +58675 + zhiyang + luguoliang + 78585713&qq.com +58676 + Wolcott Public Schools + Jeremy DeRoy + jderoy&wolcottps.org +58677 + CYRO + Roger Kirchhofer + admin&cyro.ch +58678 + Bocabrowardtechexperts LLC + James Gibbons + bocabrowardtechexperts&outlook.com +58679 + Woodridge School District 68 + Mike Feeley + feeleym&woodridge68.org +58680 + Muzeum Jozefa Pilsudskiego w Sulejowku + Szymon Kowalczyk + pkit&mjpws.pl +58681 + Integrale IT + Szymon Janczak + ldap&integraleit.pl +58682 + Fisher Titus Medical Center + Matthew Wells + mwells&ftmc.com +58683 + LACO Technologies Inc. + Anson Luck + ansonl&lacotech.com +58684 + Neurozen Inc. + Horsng Junn + kai&neurozen.ai +58685 + Refulgent + Marius Kohsiek + javanaut&refulgent.de +58686 + PPL Electric Utilities + Matthew Wallace + mwwallace&pplweb.com +58687 + PPL Electric Utilities + Matthew Wallace + mwwallace&pplweb.com +58688 + Österreichisches Rotes Kreuz Landesverband Salzburg + Werner Maisl + support&s.roteskreuz.at +58689 + IT-Reset + Sergei Kalinin + pen&it-reset.com +58690 + MONDO POWER PTY LTD + Kirstie Wu + kirstie.wu&mondo.com.au +58691 + b-plus GmbH + Christian Marchl + christian.marchl&b-plus.com +58692 + ABYSS s.r.o. + Robo Murin + robo&abyss.sk +58693 + Stiegelmeyer GmbH & Co. KG + Dennis Vorschuetz + dennis.vorschuetz&stiegelmeyer.com +58694 + Synthetic Sistemas Ltda + Thiago Nascimento + thiago&synthetic.works +58695 + Ultimate Visual Solutions Limited + Phill Whitehead + phill&ultimatevs.co.uk +58696 + David Timber's Private Infrastructures + David Timber + david&snart.me +58697 + Kyodo KY-TEC Corporation + Masaki Ichino + ichino&ky-tec.co.jp +58698 + Bogon Flux Metrology + Kenneth Hartman + domain001&bogometer.com +58699 + Security101 + David Yack + dyack&security101.com +58700 + Institute for Social Policy & Research Development Pvt. Ltd + Manoj Kumar Bhatta + infoisprd&gmail.com +58701 + floLIVE + Gamly Romano + gamly.romano&flolive.net +58702 + NOUVEAL + Anne Thibaudon + athibaudon&nouveal.com +58703 + Peter Palfi + Peter Palfi + peterpalfi5&gmail.com +58704 + Hill + Mikko Korpelainen + mikko&kukkula.net +58705 + PQD + Pol-Quentin DUPONT + IANA&pqd.fr +58706 + DiCon Fiberoptics, Inc. + Kyle Kovacs + dfi-dept-desktopsupport&diconfiberoptics.com +58707 + Hangzhou Eagle Cloud Security Technology Inc + Xiaojun Li + lixiaojun&eagleyun.com +58708 + Relaycorp, Inc. + Gus Narea + gus&relaycorp.tech +58709 + ESSILORLUXOTTICA + DEFOSSEZ Etienne + etienne.defossez&essilor.com +58710 + IgniSign + Julien Jenoudet + julien&ignisign.io +58711 + Candy Horses LLC + Caramel Drop + caramel&candyhorses.social +58712 + Martin Winter + Martin Winter + iana&winter-martin.de +58713 + SANEF + SANEF + postmaster&sanef.com +58714 + NC Systems GmbH + Aniket Yeragi + aniket.yeragi&nc-systems.de +58715 + Nebraska Legislature + Jim Sheets + jsheets&leg.ne.gov +58716 + Kooima Company + Brent Van Oort + brentvo&kooimacompany.com +58717 + ActiveDirectoryKC.NET + ActiveDirectoryKC.NET Administrator + ActiveDirectoryKC&gmail.com +58718 + Boariu Technology SRL + Florin Boariu + florin.iana&rootshell.ro +58719 + Stadtwerke Haldensleben GmbH + Uwe Ahrend + uwe.ahrend&swhdl.de +58720 + Orthopedic and Sports Medicine Specialist of Green Bay, SC + Joan Johnson + jjohnson&osmsgb.com +58721 + it-economics GmbH + Martin Winter + sysadmin&it-economics.de +58722 + Maximilian Kapra + Maximilian Kapra + maximilian&kapra.de +58723 + TrusAuth Inc + Liu Yuanchen + levimarvin&icloud.com +58724 + Monban Project + Magnus Kaiser + monban&4xoc.com +58725 + Unassigned + Removed 2022-04-12 + ---none--- +58726 + CIS + Nikolaos Angelidis + nick.angelidis&hotmail.com +58727 + Traverse Technologies Australia Pty Ltd + Mathew McBride + matt&traverse.com.au +58728 + Mellon + Mikko Korpelainen + mikko&mellon.fi +58729 + Pangealand SL + Pau Mestre + pau.mestre&exoticca.com +58730 + XH Smart Tech + Jensen Peng + jensen.peng&xhsmartech.com +58731 + RABHU PROJECTS + SIMPHIWE ABRAM RABHU + simphiweabram&gmail.com +58732 + BASS GmbH + Christian Engel + christian.engel&bass-tools.com +58733 + Hunan GCE Technology Co.,Ltd + Hui Tang + 9810639&qq.com +58734 + Wireless-CAT LLC + Evgeniy Manachkin + support&wi-cat.ru +58735 + DIEFFENBACHER GMBH Maschinen- und Anlagenbau + Dirk Reimold + Dirk.Reimold&dieffenbacher.de +58736 + Seplat Energy + Chijioke Amaefuna + camaefuna&seplatenergy.com +58737 + Greenfire Resources Operating Corporation + Joel Francisco + jfrancisco&greenfireres.com +58738 + haoyufumaoyi + congyufei + 185013119&qq.com +58739 + BHL Building Health Lab UG + Alvaro Valera Sosa + a.valera.sosa&buildinghealth.eu +58740 + Softwerke Magdeburg e.V. + Malik Mann + admin&softwerke.md +58741 + The General Authority for State Registration of Mongolia + Tungalag Ochirsukh + esign_mongolia&burtgel.gov.mn +58742 + ETHIAS + Laurent GREVESSE + laurent.grevesse&nrb.be +58743 + SIA "TELESTRIDER" + Jekaterina Stehnovska + services&telestrider.com +58744 + zerotrustlab + Graye Holder + graye_holder&yahoo.com +58745 + VASSILI LEONOV LML + VASSILI LEONOV + vleolml&mail.ru +58746 + AMC Engineers + Keith Confer + kconfer&amc-engineers.com +58747 + Stricker Dienstleistungs GmbH + Robin Steimar + hotline&stricker-gruppe.de +58748 + SwarmFarm Robotics Pty Ltd + Andrew Lipscomb + andrew.lipscomb&swarmfarm.com +58749 + 贵州还破云科技有限公司 (Guizhou Huanpoyun Technology Co., Ltd.) + 吴川 (Wu Chuan) + 1143753121&qq.com +58750 + Haproxy Technologies, LLC + Michael Paulete + Network.management&haproxy.com +58751 + TallPine Technologies, Inc. + Jerold Sampson + sampson&tallpine.com +58752 + KCF Technologies, Inc. + IANA Registration Team + iana&kcftech.com +58753 + CERTISURE CERTIFICATION + Florian de Vaulx + florian&certisure.com +58754 + Communications & Power Industries, LLC + Michael Setto + michael.setto&cpii.com +58755 + Hut 8 + James Bothe + james.bothe&hut8mining.com +58756 + Civil Aviation Authority of New Zealand + Charlie Shi + Charlie.Shi&caa.govt.nz +58757 + Capx Nederland B.V. + Florian van Oudgaarden + support&capxnederland.nl +58758 + Tecalis Software S.L. + Alberto Jose Campos Viqueira + alberto.campos&tecalis.com +58759 + nodusTech + JAMES SCHOLL + info&nodus-tech.com +58760 + nodusGroup, Inc. + JAMES SCHOLL + info&nodus-group.com +58761 + TAIWAN MOBILE CO., LTD. + Jonathan Dan + jonathandan&taiwanmobile.com +58762 + Tech for the People Inc + Brian Dube + support&bdubeit.com +58763 + Beijing Tianchuang KaiRui science and Technology Co., LTD + wangzhu + wangzhu0112&163.com +58764 + Jim Goodall + Jim Goodall + jim.goodall&blueyonder.co.uk +58765 + Nanometrics + David Easton + davideaston&nanometrics.ca +58766 + Advisicon, Inc. + Kevin Schmitt + kevin.schmitt&advisicon.com +58767 + Aguas da Regiao de Maputo, SA + Boris Wiethoff + boris&adem.co.mz +58768 + Affiliated Podiatrists of South Jersey, Ltd. + Susan Ippolito + precerts&footcarecenters.net +58769 + Votez Cthulhu + Jérémie ANDRÉI + 5mpwki&lu6.fr +58770 + kplola + kevin KANGNI + mail&kplola.fr +58771 + Widenorth AS + Helge Fanebust + hfanebust&widenorth.com +58772 + OpenFIPS201 + Kim O'Sullivan + kim&openfips201.org +58773 + IONOTRONICS Corporation + Sasan Ardalan + sasan&ionotronics.com +58774 + GMTO Corp + Sam Chan + oid.admin&gmto.org +58775 + Shengzhen Enatel Electronics Co.,Ltd + TangXing + tangxing&enatel.com.cn +58776 + Lomax A/S + Lasse Meggele + lasse.meggele&lomax.dk +58777 + PzBrig 12 + Alexander Alt + fmkppzbrig12mobsysbtrbgrp&bundeswehr.org +58778 + One D. Piece + Derra Abdoul Rachid + rachidderra12&gmail.com +58779 + BofA TI + Michelle Jones + mjones104&bofa.com +58780 + Magenta Living + IT Helpdesk + ithelpdesk&magentaliving.org.uk +58781 + DMESG Solucoes em TI + Diego Oliveira + seko&dmesg.com.br +58782 + Minglink Technology(Shenzhen) Co.,Ltd + Tony Tan + tony&minglink.com +58783 + JSC Nefteavtomatika + Salavat Iskhakov + iskhakov-sr&nefteavtomatika.ru +58784 + Arjun + Arjun Gopalakrishna + 247arjun&gmail.com +58785 + Billon Digital Services sp. z o.o. + Jacek Pikul + jacek.pikul&billongroup.com +58786 + Marine Biological Laboratory + Richard Fox + rfox&mbl.edu +58787 + Amulet Hotkey + William Bagshaw + william.bagshaw&amulethotkey.com +58788 + LUMEO RHIS + Doug Parfett + doug.parfett&lumeorhis.ca +58789 + MMRMSolutions + Michael Setto + mike&mmrmsolutions.com +58790 + LifeScape + Dan Luke + dan.luke&lifescapesd.org +58791 + Drehertec Soluções Eletronicas LTDA ME + Johnny Dreher + johnnydreher&gmail.com +58792 + MedCrypt, Inc. + Eric Pancoast + iana&medcrypt.co +58793 + IDEC Technical Services Ltd. + Thomas Davidson + thomasdavidson&idectech.com +58794 + CNIT + Jihua Li + lijihua&cloudnineinfo.com +58795 + Beijing SmartAll Technology Co., Ltd. + DengYanbing + admin&smartall.com.cn +58796 + Beijing WoLink Information Technology Co., LTD + michael + michael&vsbc.com +58797 + MetaTransform AB + Stefan Gustafsson + stefan&metatransform.se +58798 + Anton Häring KG + Tomasz Adamczyk + tomasz.adamczyk&pl.anton-haering.com +58799 + Diakonie Michaelshoven e.V. + Stefan Joussen + s.joussen&diakonie-michaelshoven.de +58800 + Solutions 4 Media d.o.o. + Alen Obad + Alen.obad&solutions4media.com +58801 + KAO USA Inc. + Sunny Patel + sunny.patel&kao.com +58802 + IT Consulting Walonka + Christian Walonka + christian&walonka.de +58803 + Foot and Ankle Center Of SJ + Matthew Wohlgemuth + podiatrycare750&yahoo.com +58804 + Cimetrix + David Warren + david.warren&cimetrix.com +58805 + Saipher ATC + FRANCIS PEREIRA + iana&saipher.com.br +58806 + Servus Credit Union Ltd. + Sudarshan Shrinivasan + Sudarshan.Shrinivasan&servus.ca +58807 + Allied Associates International, Inc + Regina Hansen + regina.hansen&a2ius.com +58808 + Jungmann Systemtechnik GmbH & Co. KG + Daniel Richardt + dev&jst.ag +58809 + Quectel Wireless Solutions Co., Ltd. + Mao, Xuchuan + richard.mao&quectel.com +58810 + EIR + Marco Fais + marco.fais&eir.ie +58811 + Glacier Pediatrics, LLC + Thea Paddleford + tpaddleford&glacierpediatricsllc.com +58812 + Nexustorage + Glen Olsen + glen.olsen&nexustorage.com +58813 + Crisaleo Limited + Cristian Salvo Leonardi + company&crisaleo.com +58814 + Free SENEGAL + Daouda DIOP + ddiop&free.sn +58815 + Tehnika Pty Ltd + Tehnika Infrastructure + infrastructure&tehnika.com.au +58816 + EZ-NET Ubiquitous Co.,Ltd + Kyung il, Cho + ackia&ez-net.co.kr +58817 + CertCloud Pte. Ltd. + Edwin Zhai + edwin.zhai&certcloud.com +58818 + ITPRO + MIKE BRADSHAW + mike&mikebradshaw.dev +58819 + Panabit Software Co.,LTD. + Lynch Wang + wang.lin&panabit.com +58820 + Stadtwerke SH GmbH & Co. KG + Thorsten Spielmanns + t.spielmanns&stadtwerke-sh.de +58821 + Crea Inc + Makoto Murai + contact&creacorp.jp +58822 + Suzhou Huaqi Intelligent Technology Co. LTD + Monica Qiu + qiuli&huaqi.info +58823 + 深圳联合测试 (Shenzhen Joint Test) + 罗泽辉 (Luo Zehui) + cardinal_luo&combine-testing.com +58824 + Versity Tech + Arjun Dahal + arjundahalard.thereason&gmail.com +58825 + analiti LLC + Eran Naveh + contact&analiti.com +58826 + Diamondback Energy + Chris Young + CYoung&DiamondbackEnergy.com +58827 + Securenetology + Chris Young + CYoung&Securenetology.com +58828 + Rotex 1820 + Gideon Mohr + webmaster&rotex1820.de +58829 + D1GITAL.ORG PROSTA SPÓŁKA AKCYJNA + Dawid Banaszewski + dawid&d1gital.org +58830 + Computer Research Association, Southern University of Science and Technology + Qingyuan Fan + cra&sustech.edu.cn +58831 + Nozomi Networks + Alessandro Lo Forte + alessandro.loforte&nozominetworks.com +58832 + Assmann Electronic GmbH + Philip Glittenberg + AGROUP-IT-SERVICES&assmann.com +58833 + Elemy LLC + Vadim Voronov + info&elemy.ru +58834 + Kiel.social + Jan Kaessens + info&kiel.social +58835 + Mariner + Mariner IT + iteng&marinerwealth.com +58836 + TG Systems + Petr Tarbeev + dev&tgsystems.ru +58837 + MRM Health NV + Marcel de Leeuw + marcel.deleeuw&mrmhealth.com +58838 + China Greatwall Technology Group Co., Ltd. + Yanqin Wu + wuyanqin&greatwall.com.cn +58839 + MicroNOC Inc. + Isaac Yen + isaacny&MicronocInc.com +58840 + California Department of Transportation + Michael Robinson + michael.c.robinson&dot.ca.gov +58841 + Financial Statement Services Inc + Network Operations + netops&fssi-ca.com +58842 + NORTH CANTON PODIATRY INC + JODI STEPHENSON + NORTHCANTONPODIATRY&OUTLOOK.COM +58843 + Digital Research Alliance of Canada + Nathan Wielenga + nathan.wielenga&alliancecan.ca +58844 + Alpha ESS Co., Ltd. + Jester Li + jester.li&alpha-ess.com +58845 + Heldele GmbH + Daniel Baron + salach&heldele.de +58846 + Advertising Specialty Institute + Seth Kusiak + skusiak&asicentral.com +58847 + Energys + Hamish Lucas + hamish&lesolutions.com.au +58848 + Bensalem Township School District + Anthony Wilkus + awilkus&bensalemsd.org +58849 + Beijing Cgprintech Technology Co.,Ltd + Yan Zhixin + yanzhixin&cgprintech.com +58850 + CNH Industrial + Ying Hu + ying.hu&cnhind.com +58851 + Maria Parham Health + Kimberly Smith + kimberly.smith2&lpnt.net +58852 + China Digital Video + Lin Dai + dai_lin&cdv.com +58853 + Suono Telecom Srl + Ing. Pierangelo Longhi + lab&suono.com +58854 + unibob + Kozlov Oleg + okozlov&sales.pack.ru +58855 + novaroll + Kozlov Oleg + okozlov&sales.pack.ru +58856 + The Hope Center for Cancer Care + Raza Syed + Raza.Syed&neocancer.com +58857 + Borough of Quakertown + Paul Yaskowski + pyaskowski&quakertown.org +58858 + Beijing ThreatBook Technology Co., Ltd. + Zihui Qu + quzihui&threatbook.cn +58859 + PLNDRM + Grant Browne + grant.browne&plndrm.net +58860 + Vector Technologies, LLC + Sergey Gusakov + sgusakov&vectortechnologies.ru +58861 + Shaanxi Gangsion Electronic Technology Co., Ltd + chaikeran + chaikeran&126.com +58862 + Australian Digital Health Agency + Nathan Collins + nathan.collins&digitalhealth.gov.au +58863 + IIIoT Technologies Co.,Ltd + Frank Tsai + caixiansen&iiiot.io +58864 + Airgain, Inc. + IT Admin + it&airgain.com +58865 + CTMA + Sylvio Benard + support&ctma.ca +58866 + Parity Security + Jann So + jannso&protonmail.com +58867 + Gristlepit Labs + Robert Ridley + reridley&gp-lab.net +58868 + secore.ly GmbH + Michael Jung + michael.jung&secore.ly +58869 + Teal Communications + Paul Bagyenda + paulb&tealcommunications.com +58870 + Ministerium für Wissenschaft, Energie, Klimaschutz und Umwelt des Landes Sachsen-Anhalt + Dr.-Ing. Mario Hildebrandt + mario.hildebrandt&mwu.sachsen-anhalt.de +58871 + Istok-Audio Intl. + Aleksey Evtyushkin + ea&ia-group.ru +58872 + Ravenswood Technology Group, LLC + Brian Desmond + bdesmond&ravenswoodtechnology.com +58873 + Helsinki Systems GmbH + Janne Heß + hostmaster+iana&helsinki-systems.de +58874 + WLS Stamping + Aaron Shaffer + ashaffer&wlsstamping.com +58875 + Helix Linear Technologies + Aaron Shaffer + ashaffer&helixlinear.com +58876 + Shaffer Consulting + Aaron Shaffer + aaron&shafferconsulting.com +58877 + Elbe Flugzeugwerke GmbH + Bernd Zimmermann + bernd.zimmermann&efw.aero +58878 + FutureE GmbH + Thies Stange + edv&future-e.com +58879 + KKTC Polis Genel Müdürlüğü + Derviş Gürtekin + dervis.gurtekin&polis.gov.ct.tr +58880 + The Francis Crick Institute Limited + ITO Infrastructure Team + its-admin&crick.ac.uk +58881 + Liverpool Central School District + Chris Marion + cmarion&liverpool.k12.ny.us +58882 + Knödelwerkstatt GmbH & Co. KG + Sebastian Pape + sebastian&die-knoedelwerkstatt.de +58883 + City of Asheville + Shannon Barrett + sbarrett&ashevillenc.gov +58884 + Onyx Spectrum Technology Inc + Harper Anderton + Handerton&onyxspectrum.com +58885 + Darden School of Business + Matt Marine + marinem&darden.virginia.edu +58886 + Darden School of Business + Matt Marine + marinem&darden.virginia.edu +58887 + Intuitive Surgical + Harvey Tran + harvey.tran&intusurg.com +58888 + Telecom Infra Project + Christopher Busch + chris.busch&mail.telecominfraproject.com +58889 + Telecom Infra Project + Christopher Busch + chris.busch&mail.telecominfraproject.com +58890 + Telecom Infra Project + Christopher Busch + chris.busch&mail.telecominfraproject.com +58891 + FuriosaAI + Hyuck Han + hyuck.han&furiosa.ai +58892 + livefree + Joshua James Maisonneuve + livefree.joshua.j&gmail.com +58893 + Beijing Nodes Network Tech. Ltd. Co + Junwei.Ren + renjunwei&nodes.com.cn +58894 + Factum Identity Solutions + Rocio Casanova + rocio.casanova&factum.es +58895 + ask! – Beratungsdienste für Ausbildung und Beruf + WAGNER AG + tm-system&wagner.ch +58896 + Kheiron Medical Technologies Ltd + Dan Sullivan + systems&kheironmed.com +58897 + Profil Institut fuer Stoffwechselforschung GmbH + Marcus Daniel + marcus.daniel&profil.com +58898 + Bruckner Artists- & Eventmanagement OG + Juergen M. BRUCKNER + info&bruckner.events +58899 + Pliant.io, Inc. + Shane Hadden + shane&pliant.io +58900 + Tradesegur SA + Mario Lopez + mario&tradesegur.com +58901 + County of Volusia + John Bailey + SystemsandDB&volusia.org +58902 + Authenta + Stephen Pi + stephenpiµn.com +58903 + Erie Shores HealthCare + Scott Baker + integration&transformsso.ca +58904 + Hôtel-Dieu Grace Healthcare + Scott Baker + integration&transformsso.ca +58905 + Windsor Regional Hospital + Scott Baker + integration&transformsso.ca +58906 + Chatham-Kent Health Alliance + Scott Baker + integration&transformsso.ca +58907 + KGAU "RCSP "AZVS" + Mikhail Burichenko + it&azvs-krsk.ru +58908 + Gmantra + Adarsh + root&gmantra.org +58909 + Austin DeFrancesco + Austin DeFrancesco + austin&defcesco.io +58910 + Innoflight + Anthony VerBurg + averburg&innoflight.com +58911 + Royal Digital + Hakam Al Taher + hakam313&gmail.com +58912 + CLO + Yang Hui + 41063068&qq.com +58913 + Loserth Schranner & Partner mbB + Johannes Hinterberger + hinterberger&berater-kanzlei.bayern +58914 + HITRUST Services Corp. + Pat Lee + pat.lee&hitrustalliance.net +58915 + Compass Lab Services + Ned Sabbatini + nsabbatini&compasslabservices.com +58916 + Boomer Benefits + Miles Phillips + mphillips&boomerbenefits.com +58917 + pki-lexa.adds + lannion axel + loukoli&hotmail.fr +58918 + Aguas da Regiao Metropolitania de Maputo + Nugernio Nunes + nnunes&adrmm.co.mz +58919 + Spirit Super + David Watson + david.watson&spiritsuper.com.au +58920 + BRINDISI TRUCKING LLC + Jason Brindisi + brindisi.trucking&gmail.com +58921 + Collini Dienstleistungs GmbH + Christof Rusch + crusch&collini.eu +58922 + Nexer + Radosław Głowicki + mib_contact&nexer.pl +58923 + Netcode Nexus + James Urban + urban_james&outlook.com +58924 + Barco NV + David Martens + david.martens&barco.com +58925 + Simplified Information Management Services + Colin M Cheline + ccheline&gmail.com +58926 + Stadtwerke Lübbecke GmbH + Marvin Sontberg + marvin.sontberg&swlk.nrw +58927 + Protelion GmbH + Evgeny Fedchenko + admin&protelion.com +58928 + DatoPortal GmbH + Tobias Gurtner + support&dato-portal.ch +58929 + True Frontier + Nicholas Sharples + admin&true-frontier.com +58930 + Hangzhou Normaedelta Networks & Services Ltd. + Tao Hui + taohui&n6delta.com +58931 + TP Skogstjänst AB + David Nyren + david.nyren&tpskogstjanst.se +58932 + Verve Industrial Protection + Josh Heling + it-vendor&verveindustrial.com +58933 + DMZ6 + Marko Rizvic + marko.rizvic&gmail.com +58934 + n-lab + Christopher Van Horn + chris.vanhorn85&gmail.com +58935 + Nakhon Phanom University + Songrit Kitisriworapan + songrit&npu.ac.th +58936 + Hoffmeier Industrieanlagen GmbH & Co. KG + Stephan Henke + stephan.henke&hoffmeier.de +58937 + Agramer UG + Nino Kurtalj + ninok&agramer.de +58938 + Nucor Steel + Stephen Sinclair + stephen.sinclair&nucor.com +58939 + Hunt Productions, Inc. + James Hunt + james&huntprod.com +58940 + Town of Claremont + Tristan Dawson + itadmin&claremont.wa.gov.au +58941 + Gearlinx + David McCullough + david.mccullough&gearlinx.com +58942 + China Mobile International Limited + Owen Chen + owenchen&cmi.chinamobile.com +58943 + 4hiway + Majid Yavari + yavari&4hiway.com +58944 + Arla Foods Deutschland GmbH NL Pronsfeld + Günther Mohr + guenther.mohr&arlafoods.com +58945 + Staatliche Muenze Berlin + Orhan Genc + administrator&muenze-berlin.de +58946 + Sitelogged + Abhinav A V + av.abhinav&gmail.com +58947 + Spreenauten GmbH + Daniel Knappe + it&spreenauten.com +58948 + “PIXIETECH” LLC + Andrey Khegay + support&pixietech.uz +58949 + Samiedu + Mikko Korpelainen + mikko.korpelainen&frendy.fi +58950 + EMPRESA NACIONAL DE CERTIFICACIÓN ELECTRÓNICA SpA + Néstor Reyes Avaria + nreyes&ecertla.com +58951 + Unassigned + Returned 2023-03-20 + ---none--- +58952 + Central States Manufacturing, Inc. + Andrew Healey + team-itinfrastructureandsecurity¢ralstatesmfg.com +58953 + BGR Inc + BGR IT Department + bgrit&packbgr.com +58954 + HPM Building Supply + IS Department + techsupport&hpmhawaii.com +58955 + Shanghai Hi-tech Control System Co., Ltd. + Pengcheng Lu + lupc&hite.com.cn +58956 + Foticos + Carlos Gil + devsys&foticos.com +58957 + Portsmouth Hospital University Trust + Simon Scott + simon.scott&porthosp.nhs.uk +58958 + SPARQ systems + Na Lei + nlei&sparqsys.com +58959 + Always Networks Ltd + Nick Shaw + nick&alwaysnetworks.co.uk +58960 + AMBACAR LTDA. + Daniel Analuisa + danielanaluisa25&gmail.com +58961 + Sleeping Bear Systems + Charles Farris + charlesfarris71&gmail.com +58962 + J's Communication Co., Ltd. + Takumi Kadode + revo-dev&jscom.co.jp +58963 + Wingtech Technology Co.,Ltd + Oscar Liao + liaohualong&wingtech.com +58964 + P4 Sp. z o.o. + Zaneta Gliwa + zaneta.gliwa&play.pl +58965 + Dalys Automotive Group, LTD + Oleksandr Kostiuk + security&busmarket.ua +58966 + Old Gold + Cheryl Dennett + info&oldgoldjewelry.com +58967 + DASABO OÜ + Davide Salvatore Boccone + internal&dasabo.com +58968 + Bayhealth Medical Center + Seth Braunstein + seth_braunstein&bayhealth.org +58969 + Santo Domingo Motors + Juan Cabral + dominios&sdm.com.do +58970 + W&W Nordic Forest AB + Veronica Wijkström + wwnordicforest&gmail.com +58971 + CavanaSystems + Luca Cavana + luca.cavana&cavanasystems.com +58972 + Azurelab + Deepak John + deepakjohn1213&gmail.com +58973 + dwap + Bertrand DUBOIS + bert_dub&dwap.fr +58974 + Attensys.io GmbH + Noah Laux + laux&attensys.io +58975 + LinuxExam + Jinqiang Zhao + jonathan.jq.zhao&gmail.com +58976 + Federal Public Service (FPS) IBZ + EL HABIB EL MESKIOUI + elhabib.elmeskioui&rrn.fgov.be +58977 + Winsonic Electronics Co., Ltd. + David Lee + david.lee&ewinsonic.com +58978 + Oculus Imaging LLC + Keith Woodward + keithwoodward&oculusimaging.com +58979 + Power Service Engineering + Sergey Gorodilow + pse&monbs.ru +58980 + Blue Solutions Canada Inc. + Claude Carignan + it&blue-solutions.ca +58981 + SINENG ELECTRIC CO., LTD. + Li Zhongqiong + lizhongqiong&si-neng.com +58982 + Digital Products International, Inc. + Michael Sarlone + registrar&dpiinc.com +58983 + Sufina GmbH + Severin Hotz + severin.hotz&sufina.com +58984 + Blahaj Limited + Anze Jensterle + pen&blahaj.dev +58985 + ADiCo Corporation + Kazuo SAITO + adico&adico.co.jp +58986 + 3Tech Power (Dongguan) Co., Ltd. + Hong Shun Hua + shhong&3tech.net +58987 + Manyu Networks + Mukunda Haveri S + mukunda&manyunetworks.com +58988 + PESCO Energy & Resources LLC + Artem Mamatov + ma&pescopmc.com +58989 + Massmart Holdings Ltd. + Massmat InfoSec Technology + InfoSecTech&Massmart.co.za +58990 + Midlands Technical College + Carl Carraway + carrawayc&midlandstech.edu +58991 + Sk3w Technologies Inc. + Mark Hasse + mark.hasse&sk3w.net +58992 + 4Cyte Pathology Pty Ltd + George He + george.he&4cyte.com.au +58993 + Ezen Solution Inc. + Hocheol, Lee + hclee&ezensolution.co.kr +58994 + Qbik AB + Anders Ringsell + anders.ringsell&qbikab.se +58995 + 逸超科技(北京)有限公司 (eSonic image (Beijing) Co. Ltd.) + cong + cong.zhao&esonicimage.com +58996 + Holmes Group Limited + Sean McGrath + sean.mcgrath&holmesgroup.com +58997 + Mulki & Vogt innovative GmbH + Oliver Mulki + mulki&mvi.de +58998 + Peter Macfarlane + Peter Macfarlane + macfarlane.peter.s&gmail.com +58999 + Telenext Systems + Samrat Majumder + samrat&asia.com +59000 + ATOMA Verwaltungs GmbH & Co. KG für Industrieverwaltungen + Michael Seidl + m.seidl&atoma-multipond.de +59001 + Cloud Solutions LLC + Milekhin Dmitriy + MilekhinDN&enplus.ru +59002 + WireStar Networks + Kyle Leissner + kyle&wirestar.net +59003 + Pixar Animation Studios + AJ Zmudosky + iana-oid&pixar.com +59004 + GROS VENTRE OB-GYN, LLP + SHANNON C, MD ROBERTS + officemanager&gvog.net +59005 + TEWS Elektronik GmbH & Co. KG + André Tews + PEN-IANA&tewsworks.com +59006 + Prodec Networks Ltd. + Michael Dance + michael.dance&prodec.co.uk +59007 + Resolve Technology Limited + Leo Wong + leo&reslv.io +59008 + Oesterreichischer Rundfunk + Gilbert Unfried + gilbert.unfried&orf.at +59009 + Transports Touyre + Thierry Touyre + contact&transportstouyre.com +59010 + Nabasny Family + Jake Nabasny + jake&nabasny.com +59011 + Cubane Software + Sam Mikes + smikes+iana&cubane.com +59012 + Advitech Pty Ltd + IT Department + webmaster&advitech.com.au +59013 + Eviation Aircraft Inc + Cory Thompson + cory.t&eviation.co +59014 + LeapTech + Francois van Jaarsveld + francois&leaptech.co.za +59015 + Transports Baché + Frédéric Baché + frederic_bache&hotmail.com +59016 + JusonTech + Ma + mzhhaoo&foxmail.com +59017 + Beyondbell + Rishabh Ahlawat + rishabh&ahlawat.com +59018 + STAXCON + IANA Admin + iana&staxcon.de +59019 + ISSM Consulting SRL + Trandafirescu Alexandru + alex&issm.ro +59020 + Yashiro Laboratory + Takeshi Yashiro + takeshi.yashiro&iniad.org +59021 + Miln + Graham Miln + support&miln.eu +59022 + Echostreams Innovative Solutions LLC + Steven Deng + steven.deng&premioinc.com +59023 + Jörn Schumann - PKI Consultant + Jörn Schumann + pki.consultant&joernschumann.de +59024 + Missoline + LDAP Directory Architecture Department + directory&missoline.org +59025 + XyZ + Erik Jan Uitenbroek + xyztm.business&gmail.com +59026 + Surveily Sp. z o.o. + Wojciech Turowicz + info&surveily.com +59027 + Workflow Health + Winston Ray + winston&workflowhealth.com +59028 + eze System, Inc. + Anders Rehnvall + ar_iana&ezesys.com +59029 + Dr. Ecklebe GmbH + Dr. Ecklebe GmbH + it-betrieb&dr-ecklebe.de +59030 + Liberty Latin America + Rodrigo Cardenas + rodrigo.cardenas&lla.com +59031 + Netography, Inc + Barrett Lyon + b&ne.to +59032 + ACPM IT Sdn Bhd + Lim Huck Hai + info&acpmit.asia +59033 + ASEAN Trustmark Alliance + Lim Huck Hai + info&acpmit.asia +59034 + Charging Interface Initiative (CharIN) e. V. + Faranak Bakhtiari + baran&tinqplus.com +59035 + FaceKom Services Zrt. + Gábor Tóth + gabor&facekom.com +59036 + Network Communications International Corp + Tommy Peel + Tommy.Peel&ncic.com +59037 + Sunwoda Energy Solution Co., Ltd + Qizhen Ning + 820454821&qq.com +59038 + Cornerstone Church + Aaron Parks + aaronparks95&protonmail.com +59039 + boquette + Guillaume CERF + guillaume.cerf2625&orange.fr +59040 + MCAS Cherry Point FRCS + Rebecca Avery + chpt.facssoomb&usmc.mil +59041 + Swarco Sverige AB + Krister Sjögren + iana.sverige&swarco.com +59042 + Eneida + arthur autuori + aautuori&eneida.io +59043 + Tigo CA + Omar Ibrahim Alabi Mendez + imendez&sv.tigo.com +59044 + DigitalRayosX + Claudio Ciarla + claudiociarla&gmail.com +59045 + True Frontier + Nicholas Sharples + admin&true-frontier.com +59046 + Murena + Arnau Vazquez Palma + dev&murena.io +59047 + Stadtwerke Achim GmbH + Andreas Liedtke + andreas.liedtke&stadtwerke-achim.de +59048 + CBI:Electric Low Voltage + Future Letsoalo + design.engineering&cbi-electric.com +59049 + Ubicquia + Max Matton + mmatton&ubicquia.com +59050 + K & K Steuerberatungsgesellschaft mbH + Bernhard Kokott + info&kk-dresden.de +59051 + Södertälje Kommun + Mikael Edsbäcker + mikael.edsbacker&tietoevry.com +59052 + Arley CR + Jesus Arley Quiros + info&arleycr.com +59053 + W2SL Desenvolvimento e Consultoria Ltda. + Wagner Lima Lopes + wagner&ciadosistema.com.br +59054 + The Gpsd Project + Gary E. Miller + gem&rellim.com +59055 + ThePlace + Gareth Jones + g&theplace.co.uk +59056 + Klinikum Würzburg Mitte gGmbH + IT-Abteilung + it.kwm&kwm-klinikum.de +59057 + TETA-CO + Seyed Jafar Seyedi + sjsayedi&yahoo.com +59058 + CHSLD90 + Service Informatique + informatique&chsld-lechenois.fr +59059 + BeFoxIT + Mahmudul Hasan Sarker + engr.mahmudulhasansarker&gmail.com +59060 + Capgemini Outsourcing Services GmbH + Dörthe Melanchthon + appstwo-delivery-support.de&capgemini.com +59061 + PRIVATE JOINT-STOCK COMPANY "UKRAINSKY GRAFIT" + Max Belov + max&ukrgrafit.com.ua +59062 + e-Governance infrastructure implementation agency (EKENG) + Gevorg Bakhshyan + gevorg.bakhshyan&ekeng.am +59063 + NETWORK RAIL LIMITED + Darren Hepburn + DL-NRTSystemsArchitecture&networkrail.co.uk +59064 + Valdosta State University + Jeremy Scott + mjscott&valdosta.edu +59065 + Arzeda + Yih-En Andrew Ban + andrew.ban&arzeda.com +59066 + Lovisenberg Diakonale Sykehus + Thor Alf Gilbrant + tagi&lds.no +59067 + GIRD Systems, Inc. + James Caffery + JCaffery&girdsystems.com +59068 + Gxmicro + Martin Zhang + huan.zhang&gxmicro.cn +59069 + MobiFone Corporation + Tran Viet Hang + hang.tranviet&mobifone.vn +59070 + Minsk Central Customs + Kuksenok Dmitry + kuksenokdl&customs.gov.by +59071 + Erste Investment Hungary + Gabor Gondos + itadmin&ersteinvestment.hu +59072 + Technische Hochschule Bingen + Sascha Listner + helpdesk&th-bingen.de +59073 + NRW Audio GmbH & Co. KG + Jochen Mai + it&nrwaudio.de +59074 + Integrated Security Solutions, INC + Nicolas Feisthamel + nfeisthamel&mtiss.com +59075 + Antenne Bayern GmbH & Co.KG + Michael Kerscher + it&antenne.de +59076 + JCIUS + Jason Prouty + jprouty&jcius.com +59077 + Robson da Silva Andrade - Arduinfo + Robson da Silva Andrade + robsonandrade&hotmail.com +59078 + Jacob Boily Network + Jacob Boily + webmaster&jacobboily.network +59079 + Vard Electro AS + Frode Roenneberg + frode.ronneberg&vard.com +59080 + eBZ Tecnologia + Guilherme Monteiro + guilherme.monteiro&ebz.tec.br +59081 + Telonium + Frank Park + iana&telonium.com +59082 + Slovak Academy of Sciences + Viet Tran + viet.tran&savba.sk +59083 + Surf Telecom S/A + Sebastião Boanerges Ribeiro Junior + sebastiao&surf.com.br +59084 + coreMem Limited + Alexander Clouter + info&coremem.com +59085 + Meta Serviços de Informatica LTDA + Sergio Alexandre Gutheil + infra&meta.com.br +59086 + Purvis Industries + Rick Camp + rick.camp&purvisindustries.com +59087 + Delta Media Corporation + Nick Clardy + nclardy&deltamediacorp.com +59088 + SUNGROW AUSTRALIA GROUP PTY LTD + Liu Su + suliu&sungrowpower.com.au +59089 + NTA Systemhaus GmbH & Co. KG + Gagneur, René + service-it&nta.de +59090 + GreenSync + Chris Smekel + chris.smekel&cresconet.com +59091 + RGryncewicz + Robin Gryncewicz + robin&spengler-gryncewicz.be +59092 + SOLVIT - Innovation on Telecommunications + Nuno Cota + nunocota&solvit.pt +59093 + Wavespear LLC + Albert J. de Vera + albertus&wavespear.com +59094 + Combined Energy Technologies Ptd Ltd + Malcolm McEwan + mal&combined.energy +59095 + Zeido Technologies, LLC + Scott Hincy + support&zeido-tech.com +59096 + Discover Energy Pty Ltd + Jeff Yu + jeff&discoverenergy.com.au +59097 + Mohammad Kazem Azizollahi + Mohammad Kazem Azizollahi + mk.aziz.author&gmail.com +59098 + YOU-tele + bassam Alsharaby + balsharaby&you.com.ye +59099 + Yunex GmbH + Rafał Przybył + rafal.przybyl&yunextraffic.com +59100 + LiaredsTrävaror AB + Annica Engdahl + annica&liaredstra.com +59101 + Akrapovic d.d. + Borut Korosec + borut.korosec&akrapovic.com +59102 + Gantner Electronic GmbH + Alexander Doblinger + alexander.doblinger&gantner.com +59103 + Zürich Versicherungs-Aktiengesellschaft + Michael Brandstätter + michael.brandstaetter&at.zurich.com +59104 + Telvista Inc, + John Woodfin + john_woodfin&telvista.com +59105 + Peninsula Community Health Services + Dale Farrales + drfarrales&pchsweb.org +59106 + LiaredsTrävaror AB + Annica Leander + annica&liaredstra.com +59107 + Digital Magics S.p.A. + Alberto Fioravanti + service.oid&digitalmagics.com +59108 + Algo-Logic, Inc. + John Lockwood + jwlockwd&algo-logic.com +59109 + Enee Solutions + Tom Thorogood + team-ops&enee.com.au +59110 + Atende S.A. + Andrzej Jaskiewicz + andrzej.jaskiewicz&atende.pl +59111 + Confia Tecnologia da Informação Ltda + Eduardo Resende + eduardo.resende&confia.net.br +59112 + Affarii Technologies + Dave Mason + it&affarii.com +59113 + Aspiria Informationstechnologie GmbH + Michael Kail + michael.kail&aspiria.com +59114 + LeChatBzh + Gilles CHABROL + svc.gc&free.fr +59115 + AppSignal BV + Thijs Cadier + thijs&appsignal.com +59116 + Stadt Lünen + Martin Jarzombek + martin.jarzombek.83&luenen.de +59117 + Computerforensic & more GmbH + Michael Meixner + pen-request&computerforensic.at +59118 + EdgeHawk-Security + Gregory Patkin + gregory&edgehawk-security.com +59119 + CubedH.LLC + Clint Hardee + hardeec&me.com +59120 + The Skibowski Company LC + F. Edward Barrett + feb&skibowski.net +59121 + Alyansa SA + Maxence Censier + registar&alyansa.com +59122 + Blackstone Inc + Michael Bates + michael.bates&blackstone.com +59123 + Powow Power + Ryan Chen + ryan.c&shinehub.com.au +59124 + 深圳市万网博通科技有限公司 (Shenzhen Wanwang Botong Technology Co., Ltd.) + 喻良 (Yu Liang) + liang.yu&tg-net.cn +59125 + Arm Technology (China) Co., Ltd + Liangshou Chen + Liangshou.Chen&armchina.com +59126 + CrescoNet + Jack Dangar + jack.dangar&cresconet.com +59127 + POSTRON + Administrator + info&postron.org +59128 + i7sec Consulting + Hartmut Richthammer + iana-pen&i7sec.dev +59129 + ICCOA + Wayne Wang + wayne.wang&vivo.com +59130 + Zaram Technology. Inc. + won jung Kim + wjkim&zaram.com +59131 + Audemars Piguet SA + Régis Rochet + regis.rochet&audemarspiguet.com +59132 + Human Streaming + Yvan LE BOUFFANT + yvan.lebouffant&human-streaming.tv +59133 + North Central Texas Emergency Communications District + 9-1-1 Systems Team + 911Systems&nct911.org +59134 + Azercell Telecom LLC + Shahin Mammadov + Shain&azercell.com +59135 + X-Net Technologies GmbH + Wolfgang Eibner + office&x-net.at +59136 + X-Net Services GmbH + Wolfgang Eibner + office&x-net.at +59137 + ETRI (Electronics and Telecommunications Research Institute) + Joo-Chul Kevin Lee + rune&etri.re.kr +59138 + Moritz J. Weig GmbH & Co. KG + Kevin Naujokat + security&weig-karton.de +59139 + Mark ID UAB + Marius Stonkus + marius.stonkus&markid.lt +59140 + Sycope + Tomasz Winiarski + tomasz.winiarski&sycope.com +59141 + Unitel Technologies + Robin Kleinermanns + robin.kleinermanns&unitelgroup.net +59142 + ByteTerrace LLC + David F Smith + administrator&byteterrace.com +59143 + Billion Byte Technology (Chengdu) Co., Ltd. + Chang Mengnan + changmengnan&billion-byte.com +59144 + Stadtwerke Teterow GmbH + Marcel Hasart + m.hasart&sw-teterow.de +59145 + QT Global Software Ltd + Yves Mujyambere + ymujyambere&qtsoftwareltd.com +59146 + Ampron OY + Ollar Roovik + Ollar.roovik&ron.eu +59147 + Mylinex International (Private) limited + Buddika Witharana + buddika&mylinex.com +59148 + Good Hope School + Kenny Lee + gads&ghs.edu.hk +59149 + Open Ethics Initiative + Nikita Lukianets + n.lukianets&openethics.ai +59150 + Komlós Boldizsár + Komlós Boldizsár + komlos.boldizsar&gmail.com +59151 + Verhoef + Tonny Verhoef + iana&verhoef.se +59152 + VerneMQ + André Fatton + info&vernemq.com +59153 + KUKA Deutschland GmbH + Dominik Grether + IANA&kuka.com +59154 + berlinCreators e.V. + Bodo Eichstaedt + bodo&berlincreators.de +59155 + Changsha KILOVIEW Electronics Co., Ltd. + Jacob Zuo + zuo&kiloview.com +59156 + A6 Labs + Jonathan Maddocks + info&a6labs.co.uk +59157 + plachy.eu IT consultancy + Lukas Plachy + lukas&plachy.eu +59158 + Bade- und Kurverwaltung Bad Bellingen GmbH + Alexander Vos + alexander.vos&bad-bellingen.de +59159 + IBT + Andy Kim + andykim17&rocketibt.co.kr +59160 + AM Uggerud Holdings + Michael J Uggerud + amuggholdings&gmail.com +59161 + Paul Tagliamonte + Paul Tagliamonte + paultag&gmail.com +59162 + Haavard Ose Nordstrand + Haavard Ose Nordstrand + hon&hon.one +59163 + Japangaijin + Theodore Boismier + japangaijin&hotmail.com +59164 + Oregon Health & Science University + Michael Crino + netcomm&ohsu.edu +59165 + Energy Research Aps + Henning Svane + hsv&energy.dk +59166 + Olivance PTY LTD + Ahmed Tayeh + Atayeh&olivance.com +59167 + innogy Česká republika + Rostislav Fukala + rostislav.fukala&innogy.cz +59168 + Basellandschaftliche Gebäudeversicherung + Mehdi Ayed + licensing&bgv.ch +59169 + GRID NOORD + Bert Starke + bert.starke&gridnv.nl +59170 + GRID BEHEER + Bert Starke + bert.starke&gridnv.nl +59171 + Endpoint Clinical + Tom Bombardier + tbombardier&endpointclinical.com +59172 + Enclave Networks Limited + Alistair Evans + alistair.evans&enclave.io +59173 + CSL Dualcom Ltd + Kevin Batson + kevin.batson&csl-group.com +59174 + Primoris Services Corporation + Chad Haxton + HostingNotify&prim.com +59175 + Azdio + Youri Kalinov + youri.kalinov&azdio.com +59176 + PowerLeader Computer Systems Co., LTD + Weike Chi + 2490799795&qq.com +59177 + Asio + Ma Wanhong + mawh&createchasia.com +59178 + Indiana Foot & Ankle Specialists LLC + Matthew DeWitt + mtdewitt&indianafoot.com +59179 + USSOCOM + Kevin Stamm + Kevin.stamm.civ&socom.mil +59180 + Interface Corporation + Yoshinori Kurose + development&interface.co.jp +59181 + Creative Frequency Entertainment + Travis Crawford + travis.crawford&cfelab.com +59182 + Fohhn Audio AG + Jochen Schwarz + info&fohhn.com +59183 + Powow VPP Pty Ltd + Lucas Guo + lucas&powow.com.au +59184 + Global Sailfish Network + Leon Yu + yyybupt&163.com +59185 + Discar Metering + Franco Ceballos + fceballos&discar.com +59186 + Intuitiv Ltd + Tom Gould + hostmaster&intuitiv.net.uk +59187 + Cambridge Valley Machining, Inc. + Cecil Lasich + clasich&cvmusa.com +59188 + Skogslogistik AB + Christian Jönsson + christian.skogslogistik&gmail.com +59189 + Emil Frey IT Solutions AG + Tobias Schiessl + tobias.schiessl&emilfrey.ch +59190 + AzTrust Certificate Authority + Habib Abbasov + hebib.atilla&gmail.com +59191 + LI AO Integrated Systems + LI AO + liaomd&work2.liao.media +59192 + Datasoft Systems Bangladesh Limited + Muhammad Sajjad Hossain + sajjad.hossain&datasoft-bd.com +59193 + UMH Systems GmbH + Ferdinand Linnenberg + fl&umh.app +59194 + RaycoWylie Systems + Vincent Chartier + engineering&raycowylie.com +59195 + Asociacion de Bancos Multiples de la República Dominicana + Rosanna Ruiz + tecnologia&aba.org.do +59196 + Lead Control Inc. + Daniel Yaron + daniely&leadcontrol.com +59197 + Castagnet Duméou + Contact + contact&castagnet-dumeou.fr +59198 + ARGOSDATA CERTIFICACION DE INFORMACION Y SERVICIOS RELACIONADOS S.A.S. + ANDRES BANDA + andres.banda&argosdata.com.ec +59199 + NIO Inc. + Lijun Liao + lijun.liao&nio.io +59200 + atriga GmbH + Bernd Riedel + b.riedel&atriga.com +59201 + zetasys.net + Daniele Zelante + zeldan&zetasys.net +59202 + Expressway Authority of Thailand + Nattarat Rujiwararat + postmaster&exat.co.th +59203 + ASP Microcomputers + Goran Kovacevic + goran&asp.com.au +59204 + EV Converged Networks + Ethan Voorhees + evoorhees&evconverged.com +59205 + The University of Electro-Communications + YAZAKI Syunji + techstaff&cc.uec.ac.jp +59206 + The University of Electro-Communications + YAZAKI Syunji + techstaff&cc.uec.ac.jp +59207 + Humanising Autonomy Ltd + Richard Vodden + richard&humanisingautonomy.com +59208 + Quant Decisions, S.L. + Raul Wegmann + it&quant.global +59209 + TimberLand + Serge Chirik + admin&timbel.info +59210 + DURAG GmbH + Tom Schönfelder + tom.schoenfelder&durag.com +59211 + scorpiones.ch + Christian Hunn + chregu&ch-hunn.ch +59212 + Almawave SpA + Riccardo Cascioli + r.cascioli&almawave.it +59213 + Barrington Consulting GmbH + PKI Authority Heiko Mehrens + ianaoid&b8p.de +59214 + everti GmbH + PKI Authority Heiko Mehrens + ianaoid&everti.de +59215 + Danger-Management.com + Joseph Snyder + JosephS&Danger-Management.com +59216 + Radiomar Eletronica Naval Ltda. + Saulo Vieitas + saulo&radiomar.com.br +59217 + MSTronic Co., LTD. + Peter Hsiao + peter&mse.com.tw +59218 + Guangzhou Sanjing Electric Co., Ltd + Qiang Leng + qiang.leng&saj-electric.com +59219 + RADKit + Alex Honore + ahonore&cisco.com +59220 + IFD NETWORK + Piotr Koper + piotr.koper&ifd.network +59221 + CtiPath LLC + Justin Lecus + ianaoid&ctipath.com +59222 + Tide_Korea + Youngtae,Cho + ytcho&tidekorea.com +59223 + The Open Group SOSA™ Consortium + Judy Cerenzia + j.cerenzia&opengroup.org +59224 + Aydahwa Enterprise + Eldar Aydayev + office&aydahwa.com +59225 + Cetitec GmbH + Tobias Modschiedler + tobias.modschiedler&cetitec.com +59226 + Nightingale Nurses, LLC + Eddie Sraer + dev&nightingalenurses.net +59227 + terranets bw GmbH + Kai Schmithuysen + k.schmithuysen&terranets-bw.de +59228 + REHAU Industries SE & Co. KG + Baris Oezciftci + baris.oezciftci&rehau.com +59229 + Fiscalia General de la Republica de El salvador + Nelson Arnoldo Amaya Tadeo + nelson.amaya&fgr.gob.sv +59230 + Quick Iterate Co., Ltd. + Yoshiya Kato + y-kato&quickiterate.com +59231 + Total Foot Care, PA + Alexander Terris + alext0923&gmail.com +59232 + KS Technologies + Kyle Sparrow + kyle&kstnwi.com +59233 + Frey+Cie Techinvest22 Holding AG + Christian Hunn + christian.hunn&zettaplan.ch +59234 + Domestic & General + Aubrey Platel + aubrey.platel&domesticandgeneral.com +59235 + DHL Parcel + Danny Retel + danny.retel&dhl.com +59236 + Peterie + Christian Lösel + admin&peterie.de +59237 + INYOURSKY Organization + MC IYS + mooncity.iys&inyoursky.com +59238 + Stacuity Limited + Michael Bromwich + mike.bromwich&stacuity.com +59239 + Port of Rotterdam + Martijn Ebben + m.ebben&portofrotterdam.com +59240 + Axians BE + Dieter Van Geet + support.be&axians.com +59241 + yeekatee AG + Giorgio Azzinnaro + iana-pen&yeekatee.com +59242 + 3AE1.net + Steven Rowe + administrator&3ae1.net +59243 + EUC365 + David Brook + david&euc365.com +59244 + GLtecnologia + Gabriel Lima + gabriellimma&gmail.com +59245 + RedT Telecom + Atendimento RedT + atendimento&redt.com.br +59246 + RedT Telecom + Atendimento RedT + atendimento&redt.com.br +59247 + SYNCOMP Data Systems Handels GmbH + Philipp Zöchbauer + office&syncomp.at +59248 + Apex Microtechnology, Inc. + Joel Hansen + joel.hansen&apexanalog.com +59249 + iTik + Sébastien NAMÈCHE + sebastien&itik.fr +59250 + Ankh-Morp.ORG + Hanno Hecker + vetinari+iana&ankh-morp.org +59251 + LeiSec GmbH + Daniel Leiser + daniel.leiser&leisec.ch +59252 + Rapidmax Technology Corporation + Roger Yang + roger_yang&rapidmax.com.tw +59253 + Shenzhen FinDreams Battery Co., Ltd. + Even Xing + xing.yihong&fdbatt.com +59254 + bibliotheca AG + Yi-Chau Ng + yi-chau.ng&bibliotheca.com +59255 + SPECIAL AGENT SERVICES DBA JET AFFILIATIONS + Jesse Wall + dc.jet.affiliations&gmail.com +59256 + Alphaledger + Alex Olaru + alexander.olaru&alphaledger.com +59257 + RackN Inc. + Greg Althaus + greg&rackn.com +59258 + RHS Research LLC + David Reynolds + dave&rhsresearch.com +59259 + Progeek Consulting, LLC + Matt Salo + matt&progeek.com +59260 + Nanbu works Inc. + Yoshimasa Kawano + nanbuwks+iana&nanbu.com +59261 + pme Familienservice GmbH + Michael Kühnen + pme-admin&familienservice.de +59262 + Retail Navigator LLC + Danilov Vladimir + it&retail-navigator.ru +59263 + Siemens DigitSphere + Zhang Peter + zihan.zhang&siemens.com +59264 + Bustec s.r.o. + Martin Fortik + mfortik&bustec.cz +59265 + Bayerische Staatsbrauerei Weihenstephan + Daniel Brauer + daniel.brauer&weihenstephaner.de +59266 + TriNetX LLC + Jason Hubbard + jason.hubbard&trinetx.com +59267 + Window Nation LLC + Oluseye Fadiran + ofadiran&windownation.com +59268 + Sol-Ark + Kennedy Echeverry + kennedye&sol-ark.com +59269 + iPoxo IT GmbH + Thomas Donhauser + t.donhauser&ipoxo.com +59270 + Novella Satcoms ltd + Luis pereira + Luis.pereira&novella.co.uk +59271 + Clearpoint Business Group, LLC. + Alexander Mellott + Alexander.Mellott&clearpointbusiness.com +59272 + Golden Rings Technologies Co., LTD + Mr. Jinhua Chen + jinhua1966&yahoo.com +59273 + Shenzhen 3SNIC information technology company Limited + Yanling Song + songyl&3snic.com +59274 + ReadyOp Communications, Inc. + John Ohl + John&readyop.com +59275 + MadfooatCom For Electronic Payments + Mahmoud Jamaah + mjamaah&madfooat.com +59276 + EXATERRA LTD + Andrey Glushchenko + aglushchenko&exaterra.com +59277 + SWN Stadtwerke Northeim GmbH + Nils Peters + administrator&stadtwerke-northeim.de +59278 + D1GITAL GROUP Dawid Banaszewski + Dawid Banaszewski + kontakt&d1gitalgroup.pl +59279 + SinoVision Technology (Beijing) Co., Ltd + XiuYan.cao + xiuyan.cao1&sinovision-tech.com +59280 + Nexus (Tyne & Wear Passenger Transport Executive) + Craig Gilbert + ictservicedesk&nexus.org.uk +59281 + N3com + Petr Mitrofanov + pmitrofanov&n3com.ru +59282 + Systao + Gilles Dodinet + contact&systao.fr +59283 + DSV Air & Sea + Mark Tovey + mark.tovey&dsv.com +59284 + Seaboard Corporation + Dennis Ervin + dennis.ervin&seaboardfoods.com +59285 + DTM Worx + Nick Kariger + nkariger&gmail.com +59286 + Uni-Cast LLC + Jeremy Fine + jfine&uni-cast.com +59287 + Institute of Electrical Power Engineering of Warsaw University of Technology + Karol Kurek + karol.kurek&pw.edu.pl +59288 + Entek + Greg Straw + Greg.Straw&Entek.com +59289 + Burke County Public Schools + Melanie Honeycutt + mhoneycutt&burke.k12.nc.us +59290 + Cunningham Swan + Adam Saccon + asaccon&cswan.com +59291 + CloudForce + Geng Chen + chengeng&cloudforce.cn +59292 + RPV Transport AB + Roland Ekenberg + roland&rpv.nu +59293 + Maverick Technical Service + Anjan Reddy D L + info&mtsblr.co.in +59294 + Mindhash B.V. + Daniël van de Giessen + daniel&mindhash.nl +59295 + SRC UK Ltd + Simon Davidson + it&srcuk.com +59296 + POSTALIA BELGIUM + Bart Despontin + bart.despontin&easypost.eu +59297 + Philipp Butterhof + Philipp Butterhof + philipp.butterhof&outlook.de +59298 + Galbusera S.p.A. + Andrea Gaviraghi + andrea.gaviraghi&galbusera.it +59299 + HTP Microsystems GmbH + Peter Prechtl + peter.prechtl&htp-microsystems.com +59300 + Cross Design Group, Ltd. + Lynn Grant + lgrant&nevacross.com +59301 + LEGEND + wang zan + wangzan1&legendcf.com +59302 + GPEC SYSTEM SP. Z O.O. + IT System Operations + itso&gpec.pl +59303 + INBRES, LLC + Leonid Orlov + info&inbres.ru +59304 + Lion Elastomers Geismar + Eric Sawa + eric.sawa&lionelastomers.com +59305 + Versowood Oy + Timo Järvinen + timo.jarvinen&versowood.fi +59306 + METRO METALIKS LIMITED + Gautam Patel + postmaster&metrometaliks.com +59307 + CareConnections + Chris van de Steeg + chris&careconnections.nl +59308 + Sparkasse Bank dd BiH + Ajdin Alibegovic + Ajdin.Alibegovic&sparkasse.ba +59309 + Shina System Co., Ltd + Manseung Yoo + msyoo&shinasys.com +59310 + CHK Power Quality Pty Ltd + Chris Dubravs + c.dubravs&chkpowerquality.com.au +59311 + TU Energy Storage Technology (Shanghai) Co., Ltd + Kelly Tian + kelly.tian&tuestech.com +59312 + Gemeente Stadskanaal + Roelf Hulshof + systeembeheer&stadskanaal.nl +59313 + Westas Group Oy + Ville Järvinen + ville.jarvinen&westas.fi +59314 + National Telecommunication Corporation + Muhammad Kashif Fayyaz + kashif.fayyaz&ntc.net.pk +59315 + R&R Software Zrt. + Csaba Kalman + ifj.csaba.kalman&rrsoftware.hu +59316 + MB "Applicus" + Aidas Semezys + aidas&applicus.eu +59317 + itesys AG + Sven Dummel + sven.dummel&itesys.expert +59318 + BigCat TEK IT Services & Solutions, LLC + Francisco Urena + frank&bigcattek.com +59319 + dgos + Muhammad Al Mizan Ridho + info&dgos.id +59320 + Oakstead Inc. + Michael Steele + hostmaster&oakstead.net +59321 + Stadtwerke Herborn GmbH + Alexander Ferber + it-service&stadtwerke-herborn.de +59322 + Kontron Transportation France S.A.S + Sylia Baraka + sylia.baraka&kontron.com +59323 + grommunio GmbH + Jan Engelhardt + info&grommunio.com +59324 + OptumServe Health Services, Inc + PKI Administrator + pkiadmin-public&optumserve.com +59325 + Solvacom Inc + Brice DEBOUT + brice.debout&solvacom.us +59326 + Shenzhen Xbrother Technologies Co.,Ltd. + zhenjie ou + ouzhenjie&xbrother.com +59327 + shenzhen HAIOT technology co.,Ltd + Jeff.li + 25939353&qq.com +59328 + UNIFIED HEALTH ASSOCIATES, INC + Pejman Mansourian + pmansourian&uhaclinic.com +59329 + Dioss Smart Solutions + Saar De Zutter + saar.de.zutter&dioss.com +59330 + WeAre Web Designs + Anthony Weare + anthony.weare&yahoo.com +59331 + Vizatas Technology Solutions, LLC + Mark Smith + mark.smith&vizatas.com +59332 + INNOVIA SOLUCOES INTELIGENTES + Diego Carlos + diego.carlos&innoviasolucoes.com.br +59333 + Cancer Care Specialists-Carson + Wesley Falconer + wesleyf&ccsreno.com +59334 + QGE Pty Ltd + Paul Wicks + it&qge.com.au +59335 + Higgins Coatings + Martin Yang + e.myang&higgins.com.au +59336 + Wuhan Maiwe Communication Co., Ltd + Hu Hao + huhao&maiwe.com +59337 + Electronic Certification Accreditation Council + Muhammad Bin Qasim + muhammad.binqasim&ecac.gov.pk +59338 + kvm-tec + Markus Novak + markus.novak&kvm-tec.com +59339 + A. Eberle GmbH & Co. KG + Stefan Lämmermann + stefan.laemmermann&a-eberle.de +59340 + Qynapse + Olivier Courrèges + administrators&qynapse.com +59341 + Net Alliance + Luke Wu + 295087822&qq.com +59342 + Stadtwerke Einbeck GmbH + Justin Staeffler + it&stadtwerke-einbeck.de +59343 + Vworker Technologies + Umair Khalid + ukb.644&gmail.com +59344 + SeaCast, Inc. + Anthony Guerrero + oidadmin&seacast.com +59345 + beckmann.ch + Stefan Beckmann + stefan&beckmann.ch +59346 + HAKOM Time Series GmbH + Raphael Siebenhofer + raphael&hakom.at +59347 + Hangzhou Sanhui Software Co.,Ltd + 郭康君 (Guo Kangjun) + guokangjun&synway.com +59348 + Hillside Nordic AB + Patrik Jarlten + support&hillsidenordic.se +59349 + W4M Solutions + Fernando Marcos + sysadmin&w4msolutions.com +59350 + ITMIM CA Authority + Habib Abbasov + hebib.atilla&gmail.com +59351 + AS G4S Eesti + Dmitri Sabunov + dmitri.sabunov&ee.g4s.com +59352 + Evangelische Schulgemeinschaft Erzgebirge e.V. + Wolfram Günther + it&ege-annaberg.de +59353 + Quantum Meta Holding Corporation + Adam Glynn + support.iana.pen&quantummeta.us +59354 + Telecom Italia Sparkle S.p.A. + Marco Bruti + marco.bruti&tisparkle.com +59355 + Inevitable IT Solutions + Dennis Brands + dbrands79&gmail.com +59356 + Redback Technologies + Redback IT Operational Team + ITops&redbacktech.com +59357 + GlobalTill, Inc. + Michael MacKinnon + mike&globaltill.com +59358 + Beijing Zhongzhi Huida Technology Co., Ltd + ying.tang + 309074162&163.com +59359 + Adscanner d.o.o. + Ivan Pek Tasner + itasner&adscanner.tv +59360 + Axians RSC + Pepijn Stulemeijer + pepijn.stulemeijer&axians.com +59361 + China UnionPay Merchant Services Co., Ltd (ChinaUMS) + Xin Zheng + xinzheng1&chinaums.com +59362 + Think2Future S.r.l. + Massimiliano Calandrelli + massimiliano.calandrelli&think2future.it +59363 + Shandong Broadcasting and Television Info-Com Co.,Ltd + wei xiao + xw&sdtv.cn +59364 + The Dennis Group + Calen Coggeshall-Burr + ITAdmin&dennisgroup.com +59365 + Arihant Info Systems + Rajesh Kumar Sethi + arihantinfoghy&gmail.com +59366 + Loongson Technology Corporation Limited + Chao Li + lichao&loongson.cn +59367 + Energieversorgung Gera GmbH + Michael Barz + it.account&egg-gera.de +59368 + CIG Grande Couronne + Daniel LEVEL + pole.projet&cigversailles.fr +59369 + APA-IT Informationstechnologie GmbH + Jose Tojo + iamop&apa.at +59370 + RAGTECH + Ricardo Tavares da Silva + rsilva&ragtech.com.br +59371 + fidar electronics co. + saman nikzad + s.nikzad&fidarelectronics.com +59372 + Tigo Technology Center + TTC Cloud Delivery Team + ttc-cloud-delivery&millicom.com +59373 + Redbyte + Sean Reid + penquiries&redbyte.net +59374 + rostech.at + Administrator + admin&rostech.at +59375 + Centre Eugène Marquis + David BARATTE + d.baratte&rennes.unicancer.fr +59376 + Datapod (Australia) Pty Ltd + Anthony Cheung + anthony.cheung&datapod.com.au +59377 + Ahoku Electronic Company + Christine Lee + christine.lee&ahoku.com +59378 + euroNAS GmbH + Tvrtko Fritz + support&euronas.com +59379 + GOLDBECK GmbH + Mike Eichner + mike.eichner&goldbeck.de +59380 + Authing + Mathew Chan + mathew.chan&authing.cn +59381 + Projekt Pomorskie e-Zdrowie + Alicja Pęśko + ezdrowie&pomorskie.eu +59382 + Lazzate Cia. Ltda. + Medardo Silva + gerald.moreno&lazzatecorp.com +59383 + Silicon Curtain + Charles McCauley + Chuck.McCauley&SiliconCurtain.net +59384 + Fiinu Bank Ltd + Timo J. Rinne + timo.rinne&fiinu.com +59385 + Marqeta + John Kim + network&marqeta.com +59386 + Bulat Co., Limited + Crystal Huang + sales&bulat.com.cn +59387 + Tondo Solutions + Ferdinand Silva + info&tondosolutions.com +59388 + Eren DOGAN + Eren Dogan + eren_dogan&yahoo.com +59389 + Docbyte NV + Tom De Cubber + qtsp&docbyte.com +59390 + Holu Hou Energy, LLC + Romano Pauli + romano.pauli&holuhou.com +59391 + Solutions Heptaèdre Inc. + Eric Martel + eric.martel&SolutionsHeptaedre.com +59392 + Da Who + Randall Vogsland + acer&dawho.com +59393 + VNPT Net3 + Tiến Hưng Nguyễn + nguyentienhung&vnpt.vn +59394 + Trustnhope + Trustnhope + shjhwoo&trustnhope.com +59395 + sonnen Australia Pty Ltd + Leonid Kukarin + l.kukarin&sonnen.com.au +59396 + NADDOD + Qin Long + long.qin&naddod.com +59397 + Johnson County Kansas Government + Christopher Foltz + inetadmin&jocogov.org +59398 + GSW Gemeinschaftsstadtwerke GmbH + Sebastian Moecklinghoff + it&gsw-kamen.de +59399 + TeaTime VertriebsgmbH + Michael Haider + it-admin&teatime-austria.com +59400 + VertiSoft Corp + Donald French + dhfrench&vertisoft.com +59401 + dMux Sistemas + Maurício Mauad Menegaz Filho + mmauad&gmail.com +59402 + Wi-Tek + Alun Wang + luke&wireless-tek.com +59403 + CALM Oy + Tommi Grönlund + tommi.gronlund&calm.fi +59404 + Berolina Metallspritztechnik Wesnigk GmbH + Benjamin Neuse + administrator&metallspritztechnik.de +59405 + INDEX EDUCATION + Romain Bonomini + rbonomini&index-education.fr +59406 + Citel S.R.L. + Carelli Giuseppe + g.carelli&citel.it +59407 + Polynoia + Peter Rindfuss + peter.rindfuss&gmail.com +59408 + Wink Stanzwerkzeuge GmbH & Co. KG + Thomas Neuwinger + thomas.neuwinger&wink.de +59409 + Ministerio de Salud + Jhon Poveda + jpoveda&minsalud.gov.co +59410 + ZECO Energy + Jag Makam + admin&zecoenergy.com.au +59411 + PLATH Signal Products GmbH & Co. KG + Steffan Klimkiewicz + Steffan.Klimkiewicz&plath-signalproducts.com +59412 + 广州中维安特电子科技有限公司 (Guangzhou Zhongweiant Electronic Technology Co., Ltd.) + 陈英华 (yinghua chen) + 821908722&qq.com +59413 + SIS GmbH + Michael Salewski + timo.kuehn&sis.ag +59414 + Energieversorgung Sylt GmbH + Oliver Otto + o.otto&stadtwerke-husum.de +59415 + Stadtwerke Husum GmbH + Oliver Otto + o.otto&stadtwerke-husum.de +59416 + Transport BORIE + Hervé BRUN + transport.borie&orange.fr +59417 + Natwest Group Plc + Michele Proctor + michele.proctor&natwest.com +59418 + Nash twins + Josh Nash + jjnash77&gmail.com +59419 + Tarkett + Aaron Mack + aaron.mack&tarkett.com +59420 + Altice Dominicana + Nicolas Vidal + nvidal&altice.com.do +59421 + Altice Dominicana + Nicolas Vidal + nvidal&altice.com.do +59422 + Mountain Brook Schools + Jason C Falconer + falconerj&mtnbrook.k12.al.us +59423 + Xiamen Kehua Digital Energy Tech Co., Ltd. + Jianfu Li + lijianfu&kehua.com +59424 + SALZ Automation GmbH + Thomas Holm + thomas.holm&salz-automation.com +59425 + eSIX + Kinson Chan + marketing&esix.co +59426 + Galgus + Jesús Fernández Manzano + jesus.manzano&galgus.net +59427 + Cyber Insight + Maksim Zaikin + Max.V.Zaikin&gmail.com +59428 + Spoon Technologies + Network Operations + support&spoontech.biz +59429 + Specialised Assistance School for Youth + David Beaumont + drb&sasy.sa.edu.au +59430 + Fisher & Paykel Healthcare + Philip Rowe + philip.rowe&fphcare.co.nz +59431 + ALFA + WAEL Mustafa + alfacosmatics&gmail.com +59432 + Heinrich Peters + Heinrich Peters + iana&heinrich-peters.de +59433 + Gemeente Veendam + Bert Starke + bert.starke&gridnv.nl +59434 + Gemeente Pekela + Bert Starke + bert.starke&gridnv.nl +59435 + Afeer + Bert Starke + bert.starke&gridnv.nl +59436 + Gemeente Oldambt + Bert Starke + bert.starke&gridnv.nl +59437 + BCOM Networks Limited + Alex Woo + ae&bcom-net.com +59438 + CyberInx Limited + Cardiff Office + CardiffOffice&cyberinx.co.uk +59439 + Hellenic Mediterranean University (HMU) + Charampos Tsatsarakis + babis&hmu.gr +59440 + Fanvil + Tony Zhu + haibo.zhu&fanvil.com +59441 + Nextera Video, Inc + J. Deame + drdme&comcast.net +59442 + Kino Polska TV S.A. + Nikolay Stoyanov + it&kinopolska.pl +59443 + RELIEF VALIDATION LIMITED + ASM ASHRAFUZZAMAN + asm.ashraf&reliefvalidation.com.bd +59444 + Washington Office of the Secretary of State + Samuel Anderson + ciso&sos.wa.gov +59445 + S COP GmbH + Andreas Habedank + andreas.habedank&s-cop.bayern +59446 + Shenyang FCH Software Co.,Ltd + Jiazhengfeng + jiazhengfeng&fchsoft.com +59447 + Meadowfield Associates Ltd + Dean Scott + deanscott_48&yahoo.co.uk +59448 + ENet Network Security Research Group (ENSR) + Christopher M Blake + enet&net.tnn.cc +59449 + VINFAST TRADING AND PRODUCTION JOINT STOCK COMPANY + Trần Việt Đức + v.ductv12&vinfast.vn +59450 + Dumont Automation + Tiago Rosolen + contact&dumontautomation.com +59451 + QuantumIOT + Michael Walti + mwalti&quantumiot.com +59452 + Dama11 + Juergen Key + jkey&arcor.de +59453 + Perth Airport Pty Ltd + Damien Derby + damien.derby&perthairport.com.au +59454 + iWay AG + Andy Horvath + snmp&as8758.net +59455 + Gemeente Westerwolde + Bert Starke + bert.starke&gridnv.nl +59456 + RWLP + Bert Starke + bert.starke&gridnv.nl +59457 + Tasteful Selections + Dane Witham + dane.witham&tastefulselections.com +59458 + ENSA Strasbourg + Severine GUTIERREZ + informatique&strasbourg.archi.fr +59459 + Ceson Skog AB + Anders Carlsson + anders&cesonskog.se +59460 + Triton Digital a.s. + Miloš Sedláček + milos.sedlacek&triton.eu +59461 + London Borough of Enfield + Mark Aylwin + LBEServerInfrastructure&enfield.gov.uk +59462 + iDenfy + Domantas Ciulde + info&idenfy.com +59463 + Polmlek Sp. z o.o. + Mateusz Szczodruch + serwis.it&grupapolmlek.com +59464 + ICTKholdings + DoKyung Yeo + dkyeo&ictk.com +59465 + dtriger INC. + youjun yao + 413019541&qq.com +59466 + Mohsin Ansari MD FAAP PC + Saba Khan + sabamohsin&hotmail.com +59467 + Evolynx + François Robeyn + francois.robeyn&evolynx.eu +59468 + VANTIS II SOFTWARE,LDA + Ricardo Lacerda + ricardo.lacerda&vantis.pt +59469 + Sigilence Technologies + Remi Saurel + it&sigilence.com +59470 + magniX + John Troup + support&magnix.aero +59471 + Eberl Internationale Spedition GmbH & Co. KG + Bernhard Genghammer + it&spedition-eberl.de +59472 + Algar Telecom S/A + Cesar Augusto Carvalho Vieira + carvalho&algartelecom.com.br +59473 + China Society of Automotive Engineers (CSAE) + Jie Zhang + zjdazhuang&gmail.com +59474 + JSC KUCHUKSULPHATE + Pavel Nedoshivkin + npg&kuchuk.ru +59475 + АО «НТЦ ЕЭС Информационные комплексы» (JSC "NTC UES Information complexes") + Савилова Зоя (Savilova Zoya) + ic&ic.ntcees.ru +59476 + La Compagnie du Logement + Service Informatique + contact.pki&cie-logement.fr +59477 + Racklive + Joel Wineland + joel&racklive.com +59478 + Liljestig + Dan Liljestig + dan&liljestig.com +59479 + JORA Holding GmbH & Co. KG + Sven Uttinger + Sven.Uttinger&jora-holding.com +59480 + Analytical Instrumentation Services + Daniel Kasdan + Dankasdan&aisanalytical.com +59481 + Trustoic PKI Consulting + Gareth Hind + gareth&trustoic.com +59482 + Rady Children’s Hospital - San Diego + Parham Gholami + 3DPrintLab&rchsd.org +59483 + IUT de Bordeaux + Jordane Pluchon + jordane.pluchon&u-bordeaux.fr +59484 + Nafiux + Ignacio Ocampo Millan + ignacio.ocampo&nafiux.com +59485 + Invenco Group Ltd + James Pullen + james.pullen&invenco.com +59486 + TANLA PLATFORMS LIMITED + Pavan Kumar Kuchibhatla + pavankumar.k&tanla.com +59487 + JOHANN BUNTE Bauunternehmung SE & Co. KG + Henning Hanekamp + HanekampH&johann-bunte.de +59488 + arkona technologies GmbH + Rainer Sturm + r.sturm&arkona-technologies.de +59489 + aiMotive + Gergely Juhasz + gergely.juhasz&aimotive.com +59490 + OpenDrives + STEVEN MCLANE + s.mclane&opendrives.com +59491 + Joint Genome Institute + Georg Rath + gbrath&lbl.gov +59492 + imvaria inc. + Don Bigler + dbigler&imvaria.com +59493 + CRAG Consultants + Jane Waight + jw&chg.me.uk +59494 + Preston Meyer Group + Georg Meyer + dev&pmgroup.ch +59495 + Lion Elastomers Geismar + Eric Sawa + eric.sawa&lionelastomers.com +59496 + Shenzhen Phoenix Telecom Technology Co.,Ltd + xiang cheng + chengxiang&phoenixcompany.cn +59497 + MPEON Co, Ltd. + Shin, Seongho. + ssh&mpeon.com +59498 + PT Juke Solusi Teknologi + Iwan Setiawan + ncss.juke&gmail.com +59499 + PT Punggawa Siber Solusi + Zharfan Nugroho + zharfan.nugroho&punggawa.id +59500 + Innofriends GmbH + Markus Fischbacher + info&innofriends.world +59501 + Billund Aquaculture + Morten Bojer + license&billundaquaculture.com +59502 + Get-Random + Johan Åkerström + jakerstrom&gmail.com +59503 + Docentric d.o.o. + Matija Vrtačnik + admin&docentric.com +59504 + Mobtaker Ofoq + mohammad habibi + hsmo&mobtakerofoq.ir +59505 + Devuan + Andreas Messer + andi&bastelmap.de +59506 + Medical West Hospital + Scott Rogers + scott.rogers&uabmw.org +59507 + Nippy Networks + Ed Wildgoose + support&nippynetworks.com +59508 + Banco Central de Reserva de El Salvador + José Elías Alvarado + elias.alvarado&bcr.gob.sv +59509 + Beijing Guanglida Technology Co.,Ltd + Sam Wang + 402341951&qq.com +59510 + MasterCom Technology Services India Pvt Ltd + Sreenivasa Nagappa + sreenivasa.n&mastercom.co.in +59511 + concept610 + jean claude sageaux + jeanclaude.sageaux&free.fr +59512 + True Blue Sphere + Anderson Manly + anderson&truebluesphere.com +59513 + Summit Rock Advisors + Anthony Tsang + anthony.tsang&summit-rock.com +59514 + Coder Enterprises + Mark Ottaway + mark.ottaway&coderenterprises.co.uk +59515 + DANAM Systems + TAEKMYUNG, WOO + wootm&danam.co.kr +59516 + Autel New Energy Co., Ltd + Yuxing Zou + zouyuxing&autel.com +59517 + Stor Services Limited + Yiorgos Adamopoulos + iana-pen&storfund.com +59518 + Salisbury NHS Foundation Trust + Jon Arnold + apprnz.infra&nhs.net +59519 + SBK Telecom + Manuel Deschambault + manuel&sbktelecom.com +59520 + REDX TECHNOLOGY AUSTRALIA PTY LTD + Adam Wang + redxtechau&gmail.com +59521 + Shawarren.net + Jamie Shaw + jamie&shawarren.net +59522 + Shabodi Corp + Harpreet Geekee + hgeekee&shabodi.com +59523 + Bigtera + HM.Yang + info&bigtera.com +59524 + Kuko Home LAB + Bostjan Kukovicic + bostjan&e-kuko.eu +59525 + A4 Elite Engineering + Alberto Valez Maya + alberto.valez&a4ee.tech +59526 + Hangzhou LUXAR Technologies Co.Ltd. + Xiaohui Zhou + zhouxiaohui&luxartech.cn +59527 + Miltenyi Biotec B.V. & Co. KG + Dr. Uwe Engeland + U.Engeland&miltenyi.com +59528 + Logansport Memorial Hospital Cancer Care Center + Paul Soderlund + psoderlund&logansportmemorial.org +59529 + Unitas Global + Mike Simkins + mike.simkins&unitasglobal.com +59530 + SSOSEC + Suri Madhira + ms&ssosec.com +59531 + Hunan RongTeng Networks Technology Co., Ltd. + Tong Long + support&rtnetworks.com.cn +59532 + MAURER SE + Denis Härtinger, Kai Schittko + helpdesk&maurer.eu +59533 + FamNet.at + Stefan Reisinger + mail&famnet.at +59534 + American Cardiology, LLC + Tonya Saulsbury + tsaulsbury&amcareclinic.com +59535 + 3ntropy Technologies + Mariano Chingotto + mchingotto&3ntropy.net +59536 + ZIMA TARASHE TOOS IT AND TELECOM COMPANY + MOHSEN ATTARI + M.ATTARY&MSGROUP.IR +59537 + TsingYao Technology Limited + JunQing Gu + op&tsingyao.net +59538 + KliqSpot + Arulselvan Radhakrishnan + arulselvan&espinait.com +59539 + cryptMail.io + Christian Ermel + ce&cryptmail.io +59540 + Integración Digital Ingeniería + Software development team + integracion&integraciondigital.es +59541 + FCWSistemas - TI + Frederico C Wilhelms + fredcw&gmail.com +59542 + Liea Tecnologia Ltda + Alberto Ferrira da Silva + alberto&liea.com.br +59543 + Alberto Silva + Alberto Ferreira da Silva + alberto.silva&lieax.com.br +59544 + Xronex Corporation + Mohamad Hazizi + admin&mohamadhazizi.com +59545 + Sierra County + Jeremy Miller + crussell&sierracounty.ca.gov +59546 + Lightmatter, Inc. + IT Administrator + pen&lightmatter.co +59547 + 2WAY + Richard van Eijs + richard.van.eijs&2-way.nl +59548 + Survalent + Rick Hayden + rickhayden&survalent.com +59549 + Elettronica GmbH + Dejan Stankovic + it_elt&elettronica.de +59550 + PM Group GmbH + Manuel Waldner + it&pmgroup.de +59551 + TASSTA GMBH + Jana Gebauer + mcx&tassta.com +59552 + Jayketek Solutions + Jacob Boily + j.boily&jayketeksolutions.com +59553 + JMCME LLC + Jacob Cooke + jacobmcooke&jmcme.org +59554 + Cleaning901.com + Ebony Leake + Karmellebony&gmail.com +59555 + 山东英特力光通信开发有限公司 (Shandong Intelli Optical Communication Development Co., Ltd.) + 王飞 (Wang Fei) + wangfeicat126&126.com +59556 + TRANSPORT SOPYBOIS + Marie SENTENAC + sopybois31&gmail.com +59557 + Transports Ligneul + Jérôme Ligneul + transportsligneul&wanadoo.fr +59558 + karev.ch + Ivan Karev + ivan&karev.ch +59559 + MeshPlusPlus, Inc. + Danny Gardner + danny&meshplusplus.com +59560 + CA VESOUL + Quentin AIRAULT + quentin.airault&vesoul.fr +59561 + LABITI + jean-marc pigeon + Virtualisation&iti.ca +59562 + Beatrice Community Hospital and Health Center + Ken huber + kahuber&bchhc.org +59563 + Diputación Provincial de Albacete + Alejandro J Delgado-Gomez + a.delgado&dipualba.es +59564 + Landratsamt Boeblingen + Patrick Strohm + p.strohm&lrabb.de +59565 + VDL Groep B.V. + Ron Peeters + r.peeters&vdl.nl +59566 + T-COM LLC + Aleksandr Burnatsev + asburnatsev&rosatom.ru +59567 + PACCAR Inc. + Koen Savenije + Koen.savenije&paccar.com +59568 + Ekip Telekom + Özgür Özdurhan + o.ozdurhan&ekiptelekom.com +59569 + Beijing Shengshi Gongying Communication Technology Co., Ltd + Han Lu + 30857203&qq.com +59570 + LMN Software Corp + John Day + John&lmnsoftwarecorp.com +59571 + Sibopt, LLC + Paul V. Shvets + it&sibopt.ru +59572 + Exonetik + Jerome Marchand + jerome.marchand&exonetik.com +59573 + Jan F. Orth + Jan F. Orth + janforth&me.com +59574 + Solution 2000 + Richard Simard + richard.simard&groupesti.com +59575 + CMX Group + Nor Azizan Morban + cmxgroup&cmxcorp.group +59576 + CYMERTEK + Jason Freedman + iana&cymertek.com +59577 + Telecommunicatiebedrijf Suriname ( TELESUR ) + Wendell Weigle + wendell.weigle&telesur.sr +59578 + CYBERSECURITY.KR + donggeun lee + loon&cybersecurity.kr +59579 + Valence Group, Inc. + Terence Caravan + smp.valence&gmail.com +59580 + Skylane Optics + Patrick Beard + Patrick.beard&skylaneoptics.com +59581 + BotGuard OÜ + Denis Prochko + d.prochko&botguard.net +59582 + Shengli Technologies Co., Ltd + guhuanfeng + gu.huanfeng&shenglisoft.com +59583 + Ptavvs.Net + John Hall + ptavvsnet&gmail.com +59584 + MoeYork + Luo Yu + luoyu&york.moe +59585 + Luminar Technologies, Inc. + Adolfo L. Floer + adolfo.floer&luminartech.com +59586 + MAHESH CHANDRA + MAHESH CHANDRA + VIRUGEET74&GMAIL.COM +59587 + Magnolia Bank + Clint Phelps + network&magnoliabank.com +59588 + Laboratoire Méthodes Formelles + Dietmar Berwanger + dietmar.berwanger&lmf.cnrs.fr +59589 + Canopy Growth + Chris Rodgers + chris.rodgers&canopygrowth.com +59590 + NPO RTT LLC + Eugeny Zavizion + npo_rtt&hotmail.com +59591 + Logeo Seine + Denoix Kevin + kevin.denoix&logeo.fr +59592 + Take 2 Identity, Inc. + Bertold Kolics + bkolics&443id.com +59593 + Alereon, Inc + Andrew Kephart + it&alereon.com +59594 + Yunnan Nantian Electronics Information Corp.,Ltd. + Wu Hao + wuh&nantian.com.cn +59595 + Busse Design+Engineering GmbH + Roland Zmija + it-support&busse-design.com +59596 + Palantir Technologies + Eric Anderson + eanderson&palantir.com +59597 + Municipality of Central Huron + Cecil Coxen + it¢ralhuron.com +59598 + Elektrotechnik Nagel + Stefan Nagel + oid&as211635.net +59599 + James Law + James law + iana-pen&jameslaw.co.uk +59600 + Polestar Performance AB + Anders Runesson + cryptography.support&polestar.com +59601 + Hope Community Resources + Anthony Patarini + apatarini&hopealaska.org +59602 + Comprehensive Blood & Cancer Center + Karla Kim + kkim&cbccusa.com +59603 + Century Yuasa Batteries + Alexey Lobanov + alexey.lobanov&cyb.com.au +59604 + Statens geotekniska institut + Hans Jonsson + sgi&sgi.se +59605 + Andrea Perfetti + Andrea Perfetti + andrea&andreaperfetti.it +59606 + FAREVA + Julien Ricard + jricard.corporate&fareva.com +59607 + Tygerclan + Ben Tyger + ben.tyger&tygerclan.net +59608 + «HYTex» Limited Liability company («HYTex» LLC) + Sergey Mikhaylov + sm&hytex.ru +59609 + Gelarm LLC + Aleksandr Solomakha + info&gelarm.ru +59610 + Best Secret + Michael Szekely + cts-infrastructure&bestsecret.com +59611 + ITENERUM + Damian Cikowski + iana&itenerum.com +59612 + Stoll Berne + Matthew Clover + mclover&stollberne.com +59613 + Katzkin Leather + Paul Marcelonis + pmarcelonis&allsafeit.com +59614 + Miramar Consulting + Oscar Javier Perez + oscar.perez&miramar.consulting +59615 + General Electric (GE) HealthCare + Hasan Palaz + Hasan.Palaz&ge.com +59616 + Jonathan Foreman + Jonathan Foreman + svt540&gmail.com +59617 + Mulliken LLC + Katie Mulliken + katie&mulliken.net +59618 + FEV Europe GmbH + Ulrich Zimmer + zimmer_u&fev.com +59619 + Kühn Controls AG + Raúl R. Steimbach + raul.steimbach&kuehn-controls.de +59620 + Zhengzhou GEP Ecotech Co,. Ltd + Harris Yang + vip&hitoy.org +59621 + Aaron N Martin LLC + Aaron N Martin + aaron.n.martin&martindom.com +59622 + World Mobile Group Limited + Ed Guy + edguy&eguy.org +59623 + LeoNet Dariusz Leonarski + Dariusz Leonarski + dariusz&leonarski.pl +59624 + SRS Distribution Inc. + Rick Brewer + rick.brewer&srsdistribution.com +59625 + Country-Wide Insurance Company + Jayson Harrington + networksupport&cwico.com +59626 + ShenZhen Ubix Technology Co., Ltd + Yanli Zhang + zhangyanli&ubix.cn +59627 + WRP srl + Roberto Pagano + wrp&wrp.it +59628 + Confidence Management Ltd + Braydn Genik + bgenik&fwsgroup.com +59629 + Grand River Hospital + Rob Parnell + rob.parnell&grhosp.on.ca +59630 + TCNEWCO + Marcus Oh + marcus.oh&ey.com +59631 + Aoife Róisín Finch + Aoife Róisín Finch + aoife.iana&finch.ink +59632 + KLS CONSTRUCCIONES + Daryl Trujillo + sistemas&klsconstrucciones.com +59633 + Joanand Kuharajasekaram + Joanand Kuharajasekaram + joanandk&joanand.ch +59634 + CenturionSign.eu + Mr. Simon Weidner + info¢urion.ovh +59635 + Kinemetrics + Christina Nunez + cn&kmi.com +59636 + Antenna Hungaria ZRt. + Zsolt Cser + cserz&ahrt.hu +59637 + Stichting NTR + Michel van der Zijden + netwerkbeheer&ntr.nl +59638 + Viking Global Investors + Nate Carta + ncarta&vikingglobal.com +59639 + GLORY Technology Limited Inc + Hong Xuheng + 9509238&163.com +59640 + zyxyhb info + bill lee + leebilly007&163.com +59641 + Nexpoint + Boyd Gosserand + netadmin&nexpoint.com +59642 + IMBEL - Industria de Material Belico do Brasil + Luiz Renault Leite Rodrigues + renault.fmce&imbel.gov.br +59643 + Symbiat Ltd + Derek Russell + info&symbiat.co.uk +59644 + URMAZI Networks INC. + David Lin + david.lin&urmazi.com +59645 + Global IT Consultancy + Kathryn Barney + office&synapse-it.com +59646 + SHENZHEN TENDA TECHNOLOGY CO;LTD + chenhongxiang + chenhongxiang&tenda.cn +59647 + PCI-SIG + Amanda Cruz + administration&pcisig.com +59648 + HRL Laboratories + Joseph Prokop + neteng&hrl.com +59649 + Coop Pank AS + Martin Paljak + martin.paljak&cooppank.ee +59650 + Neatframe LTD + Ivar Johnsrud + ivar&neat.no +59651 + Prometic Bioproduction Inc. + Yann Bourgault + y.bourgault&prometic.com +59652 + Division of Planning and Finance - National Electronic Authentication Centre + Khang Nguyen + kenshine0212&gmail.com +59653 + Enerkem + Enerkem IT + enerkem.it&enerkem.com +59654 + North Carolina State Highway Patrol - VIPER + D. Tucker Sizemore + tucker.sizemore&ncdps.gov +59655 + ISIhr, inc. + Adam Schildmeyer + aschildmeyer&stratus.hr +59656 + CATCH Power + Jason de Jong + jason&catchpower.com.au +59657 + CRIFST (China Research Institute of Film Science & Technology) + Zhang Xin + zhangxin&crifst.ac.cn +59658 + Nucleo S.A - Personal Paraguay + Nestor Cardozo + Nestor.Cardozo&personal.com.py +59659 + Storkship - Navegação, Trânsitos e Logística, SA + Iria Soares + iria.soares&horizon-view.pt +59660 + uit + huang wenhua + disandai&126.com +59661 + Germinet Sdn Bhd + Marn Chia + marn&germinet.com +59662 + Informacinės visuomenės plėtros komitetas + Sertifikatu Administratorius + cert&ivpk.lt +59663 + TRT-VERİM + Kaan Bayazıt + omerkaan.bayazit&trt.net.tr +59664 + Alpine Quantum Technologies GmbH + AQT Admin + oidadmin&aqt.eu +59665 + Sandhiguna + Gilang Mentari Hamidy + gilang&sandhiguna.com +59666 + Conseil National des Barreaux + Samuel Renard + s.renard-ext&cnb.avocat.fr +59667 + Fondation Campus Biotech Geneva + Benoit Dubuis + it&fcbg.ch +59668 + Mosaic Smart Data Limited + Nick Chadwick + nick&mosaicsmartdata.com +59669 + COMMON NET S.r.l. + Emanuele Altomare + emanuele&common-net.org +59670 + Sandoz AG + Rohit Babar + rohit.babar&novartis.com +59671 + STRATUM LABS S.A. + Agustín Matías Rodríguez + agustin.rodriguez&stratum-labs.com +59672 + DefProc Engineering + Patrick Fenner + contact&defproc.co.uk +59673 + Verisys Corporation + Ben Greene + it_operations+ianaoid&verisys.com +59674 + COROS Wearables, Inc. + Wangxiaohu + wangxiaohu&coros.com +59675 + Aquila Technology, LLC + Phil Tull + webmaster&aquilatech.com +59676 + Granaasen + Ole Peder Granaasen + granaasen&gmail.com +59677 + Skyview Group + Boyd Gosserand + netadmin&skyviewgroup.com +59678 + CDO LTD + Dan Oberg + iana.admin&cdo.xyz +59679 + ThreatKey, Inc. + Carlos Beltran + engineering&threatkey.com +59680 + Amoeso + Brandon Pan + brandon.pan&amoeso.com.tw +59681 + junchenghe + junchenghe + 598497820&qq.com +59682 + linki + Nikita Ivanov + nikita&linki.systems +59683 + Idelco + Dries Coppens + support.be&axians.com +59684 + Matthias Fulz + Matthias Fulz + mfulz&olznet.de +59685 + PK-PACS + G.W. Habraken + gw&taglio.com +59686 + UNIVERSITY HOSPITAL + Dorothy Seiler + Dorothy.Seiler&lhsc.on.ca +59687 + WOODSTOCK HOSPITAL + Dorothy Seiler + Dorothy.Seiler&lhsc.on.ca +59688 + ALEXANDRA HOSPITAL + Dorothy Seiler + Dorothy.Seiler&lhsc.on.ca +59689 + Four Counties Health Services + Dorothy Seiler + Dorothy.Seiler&lhsc.on.ca +59690 + LISTOWEL MEMORIAL HOSPITAL + Dorothy Seiler + Dorothy.Seiler&lhsc.on.ca +59691 + St. Joseph’s Health Care London + Dorothy Seiler + Dorothy.Seiler&lhsc.on.ca +59692 + South Huron Hospital + Dorothy Seiler + Dorothy.Seiler&lhsc.on.ca +59693 + St Thomas Elgin General Hospital + Dorothy Seiler + Dorothy.Seiler&lhsc.on.ca +59694 + Strathroy Middlesex General Hospital + Dorothy Seiler + Dorothy.Seiler&lhsc.on.ca +59695 + Tillsonburg District Memorial Hospital + Dorothy Seiler + Dorothy.Seiler&lhsc.on.ca +59696 + Wingham and district hospital + Dorothy Seiler + Dorothy.Seiler&lhsc.on.ca +59697 + Victoria Hospital + Dorothy Seiler + Dorothy.Seiler&lhsc.on.ca +59698 + Fisher College + Fisher IS + fisheris&fisher.edu +59699 + TeamSystem S.p.A. + Marco Scognamiglio + msc&bit4id.com +59700 + Calandrilli L Dotson + Calandrilli L Dotson + calandrillid&gmail.com +59701 + Mustola Timber Oy + Raine Koskinen + raine.koskinen&mustolatimber.fi +59702 + Anhui Shengjiaruiduo Electronic Technology Co., Ltd. + Pengcheng Zhu + 373747464&qq.com +59703 + IPVideo Corporation + Ken Blume + ipv_admin&ipvideocorp.com +59704 + Thomas Mack + Thomas Mack + toking&toking.de +59705 + Coeur dAlene Tribe + Network Admin + networkadmin&cdatribe-nsn.gov +59706 + Elk Valley Mining Corporation + Kevin Gould + kevin.gould&teck.com +59707 + Studio Wromo Marketplace + Iulian Ghepes + info.wromo&gmail.com +59708 + Thomas Bella + Thomas Bella + thomas&bella.network +59709 + Behnam Saki + Behnam Saki + behnam.saki2004&gmail.com +59710 + Sysdex AG + Gunnar Greiter + gunnar.greiter&sysdex.ch +59711 + Maricopa Community College District + Julie Morgenthal + julie.morgenthal&domail.maricopa.edu +59712 + Peter Zgraggen + Peter Zgraggen + peter&zgraggen.net +59713 + Iconoclasta Digital Endeavours + PKI Development Authority + developer&iconoclasta.co +59714 + Autodoc SE + Autodoc + it_de&autodoc.eu +59715 + Devialet + Alexandre ESSE + alexandre.esse&devialet.com +59716 + Hangzhou Asia Infrastructure Tech. Co., Ltd + Sunny Liu + sunny&aitc.net +59717 + Anhui Wantong Posts and Telecommunications Co.,Ltd. + Wang Jianguo + wangjianguo&ahwt.com.cn +59718 + Interconnect Services B.V. + Lars Hacken + l.hacken&interconnect.nl +59719 + Chengde TsingYao Network Technology Service Co., Ltd + JunQing Gu + tsnet&tsingyao.com +59720 + DotEvolve + Mohd Sadiq + mohd.sadiq&DotEvolve.co.in +59721 + SpecCityStroy + Maxim + msofronov&bk.ru +59722 + Jamui Hunt + Akshay Kumar + jamuihunt&gmail.com +59723 + nWise AB + Fredrik Andreasson + fredrik.andreasson&nwise.se +59724 + Schwester Euthymia Stiftung + Carsten Giehoff + hotline&ses-stiftung.de +59725 + Servicio de Administración de Rentas + Osman Moreno + omoreno&sar.gob.hn +59726 + Eric Wegert + Eric Wegert + eric.wegert&mailbyshuttle.net +59727 + iFD GmbH + Paul Scholz + paul.scholz&ifd-gmbh.com +59728 + City of Kamloops + Marco Lussetti + mlussetti&kamloops.ca +59729 + Max banka a.s. + IT infrastructure team + itinfrastructure&maxbanka.eu +59730 + DATATOM + Jonas Sun + sun.zhuofeng&datatom.com +59731 + opendoor + Thomas Constans + thomas&opendoor.fr +59732 + Skyhigh Security LLC + Skyhigh Security Customer Service Department + support&skyhighsecurity.com +59733 + Les Marches d'Aliments Naturels Tau + Andy Munungu + amunungu&marchestau.com +59734 + Ekopak + Vincent Devolder + vincent.devolder&profunctional.be +59735 + Antti Backman + Antti Backman + anttidantti&gmail.com +59736 + Lake Travis ISD + Dustin Zahradnik + zahradnikd<isdschools.org +59737 + Sunit Govind + Sunit Govind + govindsunit008&gmail.com +59738 + Jamtkraft AB + Torbjorn Widmark + torbjorn.widmark&jamtkraft.se +59739 + MindFuture ApS + Thomas Steen Rasmussen + oid&mindfuture.com +59740 + Ibrahim Issa Ali + Ibrahim Issa Ali + www.ibrahimmaiga2&gmail.com +59741 + BlueCoral Technologies + Matthew Yao + matthew.yao&bluecoraltech.com +59742 + STD + Marcelo Lopes Corrêa + marcelo&std.com.br +59743 + Catinfor + Catiano Carvalho + geral&catinfor.ao +59744 + krakn + Mickael Delannay + mickael&mykraken.fr +59745 + EYYES GmbH + Julian Eder + julian.eder&eyyes.com +59746 + Prokyber + Aleksei Karavaev + alexok997&gmail.com +59747 + Robert Cato + Robert Cato + saiwolf&swmnu.net +59748 + Teknotel Elektronik Müh. San. ve Tic. Ltd. Şti + Can Özçiftci + can.ozciftci&teknotel.com.tr +59749 + Deutsche Akkreditierungsstelle GmbH (DAkkS) + Susanne Kuch + afi&dakks.de +59750 + Milwaukee Tool + Jason Kindler + jason.kindler&milwaukeetool.com +59751 + KREN(Korean Education Network) + ahn jin bum + kren&snu.ac.kr +59752 + Vanguard Packaging, LLC + Eric Steen + esteen&vanguardpkg.com +59753 + ZeroBase Energy, LLC + Sean Greenslade + sgreenslade&zerobaseenergy.com +59754 + SecTrail + Baris Solak + baris.solak&bntpro.com +59755 + Shahar nissinboim + Shahar Nis + Shaharnis&gmail.com +59756 + Department of Health and Social Care + Patrick Clover + patrick.clover&dhsc.gov.uk +59757 + Cybereason Inc. + Omri Manor + procurement&cybereason.com +59758 + DRK RDMH + Iwan Bingula + edv&rdmh.de +59759 + UNITEL, S.A. + Walter Ernesto + walter.ernesto&unitel.co.ao +59760 + Scientific and Technical Center of Unified Power System (https://www.ntcees.ru) + Eliseev Dmitrii + eliseev_d&ntcees.ru +59761 + NNIT Group A/S + Brian Lund Jensen (BLJE) + corp-it&nnit.com +59762 + CTM + scott wahlstrom + swahlstrom&kpmg.com +59763 + Harry-Brot GmbH + Roman Lang + roman.lang&harry.de +59764 + CLOUDETEER GmbH + Florian Stamer + fst&cloudeteer.de +59765 + Pridnestrovskiy Sberbank CJSC + Vitaliy Velikodniy + vitaliy&prisbank.com +59766 + Duxbury Networking + Tobie van Schalkwyk + tobie&duxnet.co.za +59767 + BITFIN Software Ltda. + Israel Aece + israel.aece&bitfin.com.br +59768 + Mertech LLC + Nikolov Dmitriy + nda9109&yandex.ru +59769 + Maryland State Retirement Agency + Brad Taylor + btaylor&sra.state.md.us +59770 + RENAL MEDICAL ASSOCIATES, LLC + andrew baddoo, md + renalmedical&gmail.com +59771 + Gergely Risko + Gergely Risko + gergely.risko&gmail.com +59772 + Nesecure Telecom PVT LTD + Shripad Rayewar + shripad&nesecure.net +59773 + RADTONICS AB + David Bridge + david.bridge&radtonics.com +59774 + Teyf Pardazan Spadana (TPS) Co. + Molood Mousavizadeh + m.mousavizade&teyfpardazan.ir +59775 + AB Gyllsjö Träindustri + Christofer Ohlsson + it&gyllsjo.se +59776 + Webloom S.r.l. + Paolo Conti + paolo.conti&webloom.it +59777 + Martin Johansson + Martin Johansson + it-avdelningen&tidaholm.se +59778 + Tiến Trần + Tiến Trần + tien.trantan&hcmut.edu.vn +59779 + INBOH DOOEL Skopje + Aleksandar Zorbovski + a.zorbovski&inbox.com.mk +59780 + PFALZKOM GmbH + Patrick Wageck + patrick.wageck&pfalzkom.de +59781 + The Dixon Group, INC. + Michael Bishop + mbishop&dixonvalve.com +59782 + Shenzhen Keliheng Electronics Co., Ltd + Handsome + rdhs&szklh.com +59783 + Lyons National Bank + Todd Juffs + tjuffs&bankwithlnb.com +59784 + Pluto Health + Seung Park + seung&pluto.health +59785 + Keisuke Tasaka + Keisuke Tasaka + hoge22hoge&gmail.com +59786 + Nema + Haukur Thor Bragason + htb&nema.fo +59787 + Papers AG + Alessandro De Carli + a.decarli&papers.ch +59788 + LOGOTEC Jerzy Dryndos + Slawomir Kulikow + kulikow&logotec.pl +59789 + ShenZhen PowerOak Newener Co.,Ltd + Shaojie Tang + tangsj&poweroak.net +59790 + Serrala Group GmbH + Sven Schmidt + it-global&serrala.com +59791 + Beijing Yahong Century Technology Co., Ltd + Xiaodan He + hexiaodan&act-telecom.com +59792 + Stirlitz Media + Grzegorz Podsiadlo + greg&stirlitzmedia.com +59793 + IPKO Telecommunications LLC + Faton Kurteshi + faton.kurteshi&ipko.com +59794 + ISS AG + Ulrich Bießenberger + ulrich.biessenberger&iss-ag.com +59795 + Carsten Goetzinger + Carsten Goetzinger + carsten.goetzinger&gmail.com +59796 + CardConnect + Jason Hane + jhane&cardconnect.com +59797 + NextBillion.AI + Liu Kai + kai&nextbillion.ai +59798 + RJM Energia + Felipe dos Santos + fss007&gmail.com +59799 + Slican sp. z o.o. + Czesław Noga + c.noga&slican.pl +59800 + Ville d'Esch-sur-Alzette + Gregory Sottiaux + Gregory.Sottiaux&villeesch.lu +59801 + IFSULDEMINAS + Marcio Prado + marcio.prado&ifsuldeminas.edu.br +59802 + Total Quality Logistics + Michael Domer + mdomer&tql.com +59803 + Beijing Yahong Century Technology Co., Ltd + Xiaodan He + hexiaodan&act-telecom.com +59804 + LOV Telecomunicaciones SAS + José Galán + josegalan&lov.com.co +59805 + Paul Mairinger + Paul Mairinger + mairingerpaul&gmail.com +59806 + Firmafy Soluciones Tecnológicas SL + Wenceslao Criado Báez + wence.criado&firmafy.com +59807 + Technix d.o.o. + Ziga Terdic + technix&siol.net +59808 + MiWire Aps + Jesper Rhode Andersen + jesper&miwire.net +59809 + Starchild Systems + Ambre Bertucci + ambre&starchild.systems +59810 + MailRoutingService + Jeremy Liteky + jeremy&mailroutingservice.com +59811 + Wuxi Gongbi Quanshu Network Co., Ltd. + Department of Technology and Site Operation + siteoperator&qiuwen.org +59812 + Nexus Pharmaceuticals + Bart Sudol + bsudol&nexuspharma.net +59813 + Levy Kaufmann-Kohler + Pascale Bergstrom + administrateur&lk-k.com +59814 + Alpsnet + Pierre Martin + it&alpsnet.io +59815 + Innomentarium + Angelos Fylakis + angelos.fylakis&innomentarium.fi +59816 + Konekto IT Solution + Predrag Rajicic + admin&konekto.rs +59817 + VotingWorks + Ben Adida + ben&voting.works +59818 + SnatchDreams + Paul P Joby + paulpjoby&snatchdreams.net +59819 + HR Duo + Sean McLaughlin + sean.mclaughlin&hrduo.com +59820 + CEMA Ltd + Ravi Manian + rmanian&cemaltd.co.uk +59821 + Neways Technologies BV + Dennis Engbers + dennis.engbers&newayselectronics.com +59822 + Versorgungsbetriebe Hann. Münden GmbH + Jan Däumichen + daeumichen&versorgungsbetriebe.de +59823 + Michael Hyde + Michael Hyde + me&michaelhyde.net +59824 + crifst + zhangxin + zhangxin&crifst.ac.cn +59825 + DAS Environmental Expert GmbH + Henrik Grossmann + iana-pen&das-ee.com +59826 + AIA New Zealand + Lukaz Sobko + lukaz-aia&comadot.net +59827 + Efficture AB + Thomas Svensson + thomas.svensson&efficture.com +59828 + Payment Alliance International + Michael Jung + mike.jung&gopai.com +59829 + AO "Nacional'naja nerudnaja kompanija" + Alexey Imenkov + nnk.it&nnk.ru +59830 + Mapo + James Tsang + it&mapo.io +59831 + CSSSPNQL + Vincent Tapin + vtapin&cssspnql.com +59832 + Ideamatrix + John Thompson + iana-pen&ideamatrix.co.uk +59833 + Asha Enterprises + MITCHELL BUSSARD + mitchell.bussard&gmail.com +59834 + BTS LABS + Anıl Ülgen + anil.ulgen&btslabs.ai +59835 + LAYER8 DEFENSE TECHNOLOGIES + Aaron Reilly + AARON&L8DT.COM +59836 + Cellcomm Solutions Limited + Prakash Mlavil Rajan + prakash&cellcommsolutions.com +59837 + David Emmett + David Emmett + licensing&emmett.id.au +59838 + Medical Intertrade d.o.o. + Antonio Matijašec + informatika&medical-intertrade.hr +59839 + Fabio Bettiol + Fabio Bettiol + informaticapro&coceder.org +59840 + Asociación de Autoridades Ancestrales Territoriales Nasa Çxhãçxha + Juan Diego Yasno + sistemas.asocabildos&tierradentro.co +59841 + Wojskowy Instytut Medyczny - PIB + Piotr Murawski + informatyka&wim.mil.pl +59842 + QuestNet + Jan Quest + admin&questnet.eu +59843 + Tortuga AgTech + Michael Witkowski + mike.witkowski&tortugaagtech.com +59844 + Thibaud + Thibaud Couchet + contact&thibaudcouchet.fr +59845 + Orlando L. Antonio + Orlando L. Antonio + lando.antonio2&gmail.com +59846 + Bechtle Nürnberg Cloud + Philipp Butterhof + philipp.butterhof&bechtle.com +59847 + Creativagenten + Hubertus Buchinger + hb&creativagenten.de +59848 + Michael Hörauf Maschinenfabrik GmbH & Co. KG + IT-Abteilung HÖRAUF + it&hoerauf.com +59849 + Profideo + Alban Duval + aduval+iana&profideo.com +59850 + Golem Factory GmbH + Przemyslaw Rekucki + admin&golem.network +59851 + Healthy IT, Inc. + Scott Sanford + scott&myhealthyit.com +59852 + TBK Bank, SSB DBA TriumphX + Jeremy Korell + jkorell&tbcap.com +59853 + Grey Bruce Health Services + Jamie Page + jpage&gbhs.on.ca +59854 + Karkinos Healthcare Private Limited + Raja Sekhar Kommu + rajs&karkinos.in +59855 + CCSMEC + LanGao + gaolan&ccsmec.com +59856 + SILAE + Gabriel GIL + ext.gabriel.gil&silae.fr +59857 + TSImaging Healthcare + Rex Chen + rdservice&tsimaging.net +59858 + Swenetters + Philip Sarmiento + gupez&live.se +59859 + Ziosoft, Inc. + Shusuke Chino + itsupport&zio.co.jp +59860 + David Andrew Brende + David Brende + brendedavid&gmail.com +59861 + Zweken + Amin Bahreini + aminn&engineer.com +59862 + The Downstairs Attic + Peter Van Hemert + hunterkiller2011&gmail.com +59863 + Hotell Edström AB + Andreas Dahlberg + andreas.dahlberg&bjurtrask.se +59864 + HDS, Inc + HDS IT Department + it_dept&hdsdrivers.com +59865 + NSW Rural Fire Service + Stephen Mason + servicedesk&rfs.nsw.gov.au +59866 + Centralne Operacije + Dzenan Muftic + dzenan.muftic&ceop.ba +59867 + JP BH Pošta d.o.o. + Haris Kozica + haris.kozica&posta.ba +59868 + Buska + Ken Buska + kbuska&gmail.com +59869 + HELIOS TECH + ARTURO SALAS FLORES + asalas.rwc&gmail.com +59870 + Smitty van Bodegom + Smitty van Bodegom + oid-contact&iter.ca +59871 + mikepiety.com + Michael R Piety + mrp&mikepiety.com +59872 + Frontier Developments Plc + Josh Purcell + JPurcell&Frontier.co.uk +59873 + Republic of Turkey General Directorate of Highways + Yalçın Özer + yozer2&kgm.gov.tr +59874 + Thueringer Rechnungshof + S. Baerwolf + stephan.baerwolf&trh.thueringen.de +59875 + ClinFlows + Jean-François Burr + cto&clinflows.com +59876 + AlmaLinux OS Foundation + Jonathan Wright + cloud-infra&almalinux.org +59877 + Cornelis Networks, Inc. + Gunnar K. Gunnarsson + gunnar.k.gunnarsson&cornelisnetworks.com +59878 + Powertech + Winston Lee + winston&power-tech.com.tw +59879 + Almonte General Hospital + Jamie Page + jpage&brightshores.ca +59880 + Orillia Soldiers Memorial Hospital + Jamie Page + jpage&gbhs.on.ca +59881 + Hanover and District Hospital + Jamie Page + jpage&gbhs.on.ca +59882 + Muskoka Algonquin Healthcare + Jamie Page + jpage&gbhs.on.ca +59883 + South Bruce Grey Health Centre + Jamie Page + jpage&gbhs.on.ca +59884 + William Smith + William Smith + macdaddy2smooth&gmail.com +59885 + KNS IT + Peter Kurfer + peter.kurfer&kns-it.de +59886 + Mediabox EIRL + Michael Epstein Jiménez + mepstein&mediabox.cl +59887 + Microsoft Mixed Reality + Mike Poulson + mpoulsonµsoft.com +59888 + DIAMOND ENERGY CONTROL SYSTEMS PTY LTD + Zohair Hassan + zhassan&diamond-energy.com +59889 + Mark Keating + Mark Keating + mark&the-keatings.com +59890 + Argotech Co., Ltd. + Shinichi Tokita + iana&argotech.jp +59891 + Baltic Institute of Advanced Technology + Robertas Urbutis + security&bpti.eu +59892 + Denomas Denetim Otomasyon + Tolga Karatas + tolga.karatas&denomas.com +59893 + ZhangZexin + ZhangZexin + zhangzexin&usr.cn +59894 + LBS Landesbausparkasse NordWest + Tim Lissel + tim.lissel&lbs-nw.de +59895 + DOXIO + Mohamed LAIRECHE + mohamed.laireche&doxio.com +59896 + NTS LLC + Vladimir Zinchenko + info&mwnts.ru +59897 + Anothermouse + Peter Edmond + iana_oid&anothermouse.com +59898 + xnybre.net + Daniel Kalør + iana&xnybre.net +59899 + Sebastian Sams + Sebastian Sams + iana-contact&a-sams.at +59900 + Starter Teck + Dee Deans + Info&starterteck.com +59901 + Bizland + Juan Manuel Lens + juan.lens&bizland.tech +59902 + Haroldo Banzoli Filho + Haroldo Banzoli Filho + engepaengenharia&gmail.com +59903 + Jinan USR IOT Technology Limited + Junwu Cao + xiaojun9922&163.com +59904 + S.C. Global Top Systems s.r.l. + VASILIU Bogdan + meteo.consultancy&gmail.com +59905 + Certutil + Tiago Zacchello + pki&certutil.net +59906 + Universiti Sultan Zainal Abidin (UniSZA) + Nik Mohd Imran + nik&unisza.edu.my +59907 + Alex Wilson + Alex Wilson + alex&cooperi.net +59908 + Truesec, Inc + Tim Davis + tim.davis&truesec.com +59909 + Hub One SA + GAMOT Cédric + iana&hubone.fr +59910 + RedMatter Solutions + Kiek Choeung + kiek.choeung&redmattersolutions.com +59911 + 3Ti + Gilles Tremblay + info&3ti.ca +59912 + OCELLOTT ENGENHARIA LTDA + Luiz Claudio Sampaio Ramos + sampaio&ocellott.com +59913 + Magic Trucking LLC + Dovirs S Williams + dovirs&gmail.com +59914 + Rexel Developpement SAS + Pablo Anon + pablo.anon&rexel.com +59915 + Carlos Roque + Carlos Roque + carlos.roque&tutanota.com +59916 + Schwarzer Milan + Philipp Lieb + philipp.lieb&schwarzer-milan.eu +59917 + Philipp Lieb + Philipp Lieb + philipplieb8&gmail.com +59918 + West Linn-Wilsonville School District + Khanh Duong + duongk&wlwv.k12.or.us +59919 + Thomas Katholnig + Thomas Katholnig + iana&katholnig.net +59920 + IndigoCare + Dennis Busselen + dennis.busselen&indigocare.com +59921 + California Coast Physicians, Inc. + Elvia Rhoades + erhoades&californiacoastphysicians.org +59922 + KIDZ Medical Services + Emilie Martinez + ContactUs&kidzmedical.com +59923 + Shenzhen Megarevo Technology Co., Ltd. + Guo Sheng Long + guoshenglong&megarevo.com.cn +59924 + Keith Lee Hong Sun + Keith Lee Hong Sun + keith.lee&global.ntt +59925 + NOC Network & IT-Services e.U. + Jan-Erik Helge Redl + jan.redl&noc.eu +59926 + SchoolsFirst Federal Credit Union + Brennen Wheelock + bwheelock&schoolsfirstfcu.org +59927 + Alkaloid AD + Dame Malov + dmalov&alkaloid.com.mk +59928 + EVERON ENERGY SYSTEMS PVT. LTD. + AJAY SABHARWAL + MKTG&EVERON.IN +59929 + Deloitte China + Felix Li + felixli&deloitte.com.cn +59930 + b2e.cloud + Michael Newiger + michael.newiger&b2e.cloud +59931 + Fachhochschule Nordwestschweiz + Patrick Zumstein + it-infrastructure.services&fhnw.ch +59932 + Xuan Khai Tran + Xuan Khai Tran + xktran&gmail.com +59933 + Firscom + eungoo seo + dev&firscom.co.kr +59934 + Jiansong Yang + Jiansong Yang + snmp&yangjiansong.com +59935 + Matthew Grant + Matthew Grant + matt&mattgrant.net.nz +59936 + BaseBit + Laisky Cai + iana-bbt&laisky.com +59937 + Nij Smellinghe + Robin Kluit + systeembeheer&nijsmellinghe.nl +59938 + IDeTRUST GmbH + Albertus Pretorius + a.pretorius&toennjes.com +59939 + BMK Group GmbH & Co. KG + Stefan Paintner + stefan.paintner&bmk-electronics.de +59940 + Sussex Community NHS Foundation Trust + Paul Shipperley + paul.shipperley&nhs.net +59941 + Alexey Saidov + Alexey Saidov + Alex8712a&mail.ru +59942 + MyAVR.ru + Kotitsyn Vladimir + workpage&mail.ru +59943 + Legacy Medical Management LLC + Barrett Pollard + barrett.pollard&legacyllc.org +59944 + Third Helix Technology Inc + Barrett Pollard + barrettp&thirdhelix.com +59945 + P&K Equipment Inc + Barrett Pollard + bpollard&pkequipment.com +59946 + Identiv + Barnaby Flint + bflint&identiv.com +59947 + Viettel Telecom + Vu Xuan Duong + duongvx&viettel.com.vn +59948 + Institut für Mathematik der Universität Würzburg + Florian Möller + fmoeller&mathematik.uni-wuerzburg.de +59949 + Kentox Consulting + Kenneth Swärd + kenneth.sward&kentox.se +59950 + ziwig + Omar Mrani + omrani&ziwig.com +59951 + Thibaut Mouly + Mouly Thibaut + thibaut&d-mouly.fr +59952 + Ubiq Systems Pty Ltd + Kenneth Acason + admin&ubiqsystems.com +59953 + Graphiant Inc. + Mosaddaq Turabi + mosaddaq&graphiant.com +59954 + Laisky + Laisky Cai + iana&laisky.com +59955 + Sofinet LLC + Vitaliy Siver + vsiver&sofinet.ru +59956 + Overon + Mario Manzano + mario.manzano&overon.es +59957 + United Airlines Inc + Patrick Sexton + patrick.sexton&united.com +59958 + Bäckebrons Sågverk Aktiebolag + Stefan Gillberg + it&backebronssagverk.se +59959 + Onslow County Public Schools + David Sanbeg + david.sanbeg&onslow.k12.nc.us +59960 + Balungstrands Sågverk AB + Stefen Gillberg + admin&balung.se +59961 + Kemmler Baustoffe GmbH + Cihan Gueler + cihan.gueler&kemmler.de +59962 + Mjölby-Svartådalen energi + Cecilia Hogla + cecilia.hogla&tekniskaverken.se +59963 + Georg Luthardt-Gumbrecht + Georg Luthardt-Gumbrecht + admin+pen&familie-gumbrecht.de +59964 + École professionnelle artisanale et industrielle de Fribourg + Jérôme Frossard + jerome.frossard&edufr.ch +59965 + My Creative Space + Wojciech Weglinski + administrator&mycreativespace.pl +59966 + Matthews Swartz + Matthews Swartz + swartzm.za&gmail.com +59967 + BW-Spielbanken GmbH und Co KG + Joerg Schittenhelm + j.schittenhelm&bw-spielbanken.de +59968 + ProEsys srl + Massimo Marini + m.marini&proesystech.com +59969 + Sycai Medical + Javier García + j.garcia&sycaitechnologies.com +59970 + Quantum Xchange, Inc. + Quantum Xchange, Inc. + support&quantumxc.com +59971 + Kcrypt Lab + Kirill A. Korinsky + k&kcry.pt +59972 + Mana Monitoring + Zoltan Milaskey + zoltan&manamonitoring.com +59973 + CASS France + Roland Despinoy + roland.despinoy&cass.fr +59974 + CCC S.A. + Rafal Wisniewski + rafal.wisniewski&ccc.eu +59975 + Architectural Bureau А1 + Ildar Abubakirov + ildar&arbura.one +59976 + Sofia Markusfeld + Sofia Markusfeld + s.markusfeld9544&student.leedsbeckett.ac.uk +59977 + Airbus Helicopters + Gay Jonathan + jonathan.gay&airbus.com +59978 + Lindenmeyr International Ltd + Pamela Woodall + pwoodall&lindenmeyrinternational.com +59979 + Benegon Enterprises LLC + David Bender + dbender&benegon.com +59980 + Cellium Technologies + Michael Brutman + michaelb&cellium.net +59981 + MedSoft + Racila Florin + florin.racila&med-soft.ro +59982 + Hydes Corporation + Harrison Hydes + Harrison.Hydes&hydes.co.nz +59983 + VDM Metals GmbH + Philipp Verbnik (Marketing department) + philipp.verbnik&acerinox.com +59984 + TEHTRIS + Lucas Jiang + team_isp_nexus&tehtris.com +59985 + Max-Planck-Institute for Infection Biology + Ralf Traeger + traeger&mpiib-berlin.mpg.de +59986 + Central Texas Cancer Centers + Elizabeth Cruz + lcruz¢raltexascancercenters.com +59987 + Tsuda University Keisan Center + Akio Katagiri + infoadmin&tsuda.ac.jp +59988 + Henan Kunlun Technologies Co., Ltd. + Ali Mao + maoaliwy&126.com +59989 + Kembeddo + Krzysztof Olejarczyk + owner&kembeddo.pl +59990 + Bastionic B.V. + Ernst Lawende + ernstl&bastion365.nl +59991 + Dalton Johnson III + Dalton Johnson iii + dm343702nv&gmail.com +59992 + Christopher Kay + Christopher Kay + Christopher.Kay&uk.thalesgroup.com +59993 + Reinhard Vielhaber + Reinhard Vielhaber + snmp_enterprise&rvis.de +59994 + SAVINO SOLUTION S.P.A. - SOCIETA BENEFIT + Nicola Savino + nicola.savino&savinosolution.com +59995 + Billy Yudani + Billy Yudani + billy.yudani&gmail.com +59996 + Niket Labs + Niket Sharma + niket67&gmail.com +59997 + Technische Universität Bergakademie Freiberg + Dr. Andreas Kluge + ServiceDesk&tu-freiberg.de +59998 + ACIS Technology + Alex Moskalenko + pen&acis.ru +59999 + Gonzo Org. + Marc Pearson + gonzo&g0nz0.me.uk +60000 + Feuerwehr Gundelfingen + Daniel Andris + daniel.andris&feuerwehr-gundelfingen.org +60001 + GPSPATRON + Maksim Barodzka + mb&gpspatron.com +60002 + vNET Hosting + Donald Cowan + info&vnethosting.us +60003 + Zyell Solutions Corporation + Pedro Ting + pedro.ting&zyxel.com.tw +60004 + TecTake GmbH + Steffen Dürr + helpdesk&tectake.de +60005 + Yourizon + Richard Timmermans + richard.timmermans&yourizon.nl +60006 + AP Informatica + Salvatore Canciello + info&apinformaticasrl.it +60007 + SWEEN SAS + Tristan Pateloup + tristan&sween.fr +60008 + Federal Defenders of New York, Inc. + James Ward + NYS_Admin&fd.org +60009 + Zorus, Inc + Francarlos Fernandez + FFernandez&ZorusTech.com +60010 + joerk GmbH + Enrico Gusek + info&joerk-plattform.de +60011 + Fiserv Payment System + Fiserv Global Cyber Security Services Payment System + PKI.Services&fiserv.com +60012 + International Human Rights Commission + Wareesali Taibani Sayyed + wareesali7500&janparichay.gov.in +60013 + ACN OCI-GSUITE + Rhett Lee Martinez + rhett.lee.martinez&accenture.com +60014 + Chongqing Telecom + Luo Kun + cqtsac&chinatelecom.cn +60015 + AMPHITECH + JULIEN WEHRBACH + webmaster&hitech.fr +60016 + City Storage Systems + Onha Choe + onha.choe&cloudkitchens.com +60017 + Les Neiges d'Antan + Andrea Cavalli + iana.org&ridesoft.it +60018 + MaitriseTechnologique + Frédéric MONGE + fmonge&maitrise-technologique.com +60019 + FANUC America Corporation + FAC Admin + fac.admin&fanucamerica.com +60020 + Astoria IT Consulting + Oleg Schatoff + oleg&astoria-it.com +60021 + Nordic Wood Supply + Magnus Olovsson + magnus.olovsson&nordicwood.se +60022 + Fotios Loukos + Fotios Loukos + fotisl&gmail.com +60023 + Transalpine Deutsche Oelleitungs GmbH + Gottschlich, Frank + frank.gottschlich&tal-oil.com +60024 + Nasqueron + Sébastien Santoro + dereckson&espace-win.org +60025 + VAIO Corporation + OID Admin + oid-admin&vaio.com +60026 + Alabaster City Schools + Anthony Kingston + acsit&acsboe.org +60027 + LEEGOOD AUTOMATIC SYSTEMS. INC. + KAIHUNG HSIAO + kai_hsiao&leegood.com.tw +60028 + Neil Farr + Neil Farr + nfarr&acquiredigital.com +60029 + Digital Check Corp + Kevin Jennings + KJennings&DigitalCheck.com +60030 + NonpareilTech + Timothy Conner + info&nonpareiltech.com +60031 + CLEARING HOUSE LIVE + WM JEFFREY + admin&clearing.live +60032 + Sikorsky Financial Credit Union + Scott Posey + sposey&sikorskyfcu.org +60033 + Whiterabbit AI Inc + Daniel Cardoza + daniel&whiterabbit.ai +60034 + ZTD Training + Elton Hsu + info&ztdtraining.com +60035 + Wolfspyre Labs + Wolf Noble + iana&wolfspaw.com +60036 + FoamyCastle + Aaron Anthony Sollman + dev&foamycastle.com +60037 + bonprix Handelsgesellschaft mbH + IT-Operation + it-operation&bonprix.net +60038 + Hitachi Energy Automation & Communication + Janis Kruse + janis.kruse&hitachienergy.com +60039 + Andrews Property Group + Tom McCabe + tmccabe&andrewsonline.co.uk +60040 + Nine Consulting + Marjan Pataca + marjan&nine-consulting.hr +60041 + Procontrol Electronics Ltd. + Attila Kovacs + KAttila&procontrol.hu +60042 + Tems Security Services GmbH + Michael MEixner + michael.meixner&tems-security.at +60043 + Admiral Sportwetten GmbH + Emil Huber + emil.huber&admiral.at +60044 + Chris Dauksza + Chris Dauksza + chrisdauksza&gmail.com +60045 + GSB – Sonderabfall-Entsorgung Bayern GmbH + Heribert See + Heribert.See&gsb-mbh.de +60046 + Anger Clan + Bjørn Anger + bjoern&anger-clan.de +60047 + GENERAL BROADCAST + Guilain ACHARD + gachard&general-broadcast.com +60048 + NSLComm + Vadim Kotchergin + vadim&beetlesat.com +60049 + European Union Agency for the Space Programme + Aitor Alvarez Rodriguez + pki&euspa.europa.eu +60050 + WyreStorm Technologies Corp. + Nicholas Meers + nick.meers&wyrestorm.com +60051 + Cirrus Link Solutions + Chad Kienle + chad.kienle&cirrus-link.com +60052 + Matt Melling + Matt Melling + mattmelling&fastmail.com +60053 + APP SOLUTIONS + Olivier CLEF + oc&appsolutions.fr +60054 + Tecnosul + Daniel Brito + daniel.brito&grupotecnosul.com +60055 + Pert Telecom Solutions Pvt Ltd + Mitesh Vageriya + mv&pertsol.com +60056 + Amensolar Ess Co., Ltd. + Eric Fu + amensolaress&gmail.com +60057 + Thales UK Ltd + Robin Barker + coref.admin&uk.thalesgroup.com +60058 + Assumption College Kilmore + ICT Department + ict.dept&assumption.vic.edu.au +60059 + Lapplands + Marcus Fagervall + marcus.fagervall&lapplands.se +60060 + Consometers + Cyril Lugan + iana.org&cyril.lu +60061 + Bayerische Versorgungskammer + Sioukri oglou, Sioukri + sSioukri&versorgungskammer.de +60062 + Francis Augusto Medeiros-Logeay + Francis Augusto Medeiros-Logeay + r_f&med-lo.eu +60063 + Mosaic Primary Care Network + Jarrett Blachly + Jarrett.Blachly&mosaicpcn.ca +60064 + Goldilocks Tech Solution Pvt. Ltd. + Sarju Garg + sarju&goldilocks-tech.com +60065 + Hone Group, Inc + Neilson Smith + neilson.smith&honeus.com +60066 + Capella Controls + Ariel Garcia + ariel.garcia&capellacontrols.com +60067 + Sudipto Kumar barmon + Sudipto Kumar barmon + Sudiptokajol02&gmail.com +60068 + Karlborgs Elkontroll AB + Alexander Schmidt + alex&karlborgselkontroll.se +60069 + LeXtudio Inc. + Yang Li + support&lextudio.com +60070 + Mälby Gård AB, 556499-7525 + Rickard Lennartsson + Rickard&malbygard.se +60071 + Wiregrass Electric Cooperative + Clayton Howell + chowell&wiregrass.coop +60072 + Hologic + Nick Moscone + Nick.Moscone&hologic.com +60073 + Accu-Precision Tool and Gauge + Andrew Mueller + andrew.mueller&accu-precision.com +60074 + Caddell Construction + Heather Marchand + heather.marchand&caddell.com +60075 + Morpheus Space GmbH + Thomas Hartmann + neo&morpheus-space.com +60076 + Teleproject Srl + Diego Bianchi + dbianchi&teleproject.it +60077 + Stadt Kassel - Amt für Schule und Bildung + Markus Schulz + server-ksan&kassel.de +60078 + BEYOND THE SCOPE + Honorio Marín García + honorio&permisso.io +60079 + Pathagility + Mark McCuin + info&pathagility.com +60080 + Nice Threads, LLC + Tim Nash + tim&nicethreadsllc.com +60081 + Jonno Sanderson-Smith + Jonno Sanderson-Smith + jonnoss&gmail.com +60082 + Square Enix + Yasmin Kauser + yasmin.kauser&comms-care.com +60083 + Spt Arrow Net + Aleksandr Buryak + aburyak&zntr.ru +60084 + Alteryx Extensibility + Alteryx Extensibility Team + devex&alteryx.com +60085 + PGUM Srl + Jochen Platzgummer + jochen.platzgummer&pgum.eu +60086 + Northwest Hills Surgical Hospital + Gordon Pedley + gordon.pedley&scasurgery.com +60087 + Grifon + Grifon Admin + iana&grifon.fr +60088 + Opelousas General Health System + Jesse Folds + ITLeadership&opelousasgeneral.com +60089 + Stiftung Auffangeinrichtung BVG + Christian Nobbe + iana&aeis.ch +60090 + Lopi elektronika sp. z o.o. + Bartosz Ostrowski + bo&lopi-elektronika.pl +60091 + EntryPoint Networks, Inc + Devyn Herrick + dherrick&entpnt.com +60092 + Treasure Valley Hospital + Rachel Schaffner + Rachel.Schaffner&scasurgery.com +60093 + PAYAM KHOSHBAKHT + PAYAM KHOSHBAKHT + evqhcm&gmail.com +60094 + La France insoumise + Salomé Cheysson + site&lafranceinsoumise.fr +60095 + CEC Huada Electronic Design Co., Ltd. + Xinyu Wei + weixy&hed.com.cn +60096 + Wealden District Council + IT Helpdesk + ithelpdesk&wealden.gov.uk +60097 + OESSE + Niccolò Carnelos + ict&oesse.com +60098 + strudelline.net + James Andariese + iana-pen-admin&strudelline.net +60099 + Amazon Web Services Inc + Michael Nearing + nearinm&amazon.com +60100 + UNpkl + Yogesh Nagarkar + yogesh&yeshog.com +60101 + Synapse Product Development + Ian Johnson + ian.johnson&synapse.com +60102 + Luogu + Soha Jin + soha&luogu.org +60103 + AustroControl + Thomas Brunner + thomas.brunner&austrocontrol.at +60104 + Western Sydney Airport + Scott Dowd + cybersecurity&wsaco.com.au +60105 + Xinjiang Yingsheng Information Technology Co., Ltd + GaoHu + 12396218&qq.com +60106 + Beijing Dayu Technology Co.,Ltd + Wang Xinpu + wangxinpu&dayudpu.com +60107 + Select Medical + Brandon Burger + ldap.admins&selectmedical.com +60108 + Rocky Enterprise Software Foundation + Neil Hanlon + infrastructure&rockylinux.org +60109 + Arising Technology Systems Pty Limited + Ralph Holland + ralph&arising.com.au +60110 + Junnikkala Oy + Kalle Junnikkala + kalle.junnikkala&junnikkala.com +60111 + Brickdoc HK Holding LTD + Ding Li + ding&pleisto.com +60112 + ASTELCO Systems GmbH + Michael Ruder + reg+pen&astelco.gmbh +60113 + Aires LLC + Mrinal Prasad + sysadmin&aires.com +60114 + Beijing YiShengChuangQi Technology Co., Ltd + LingLing Liu + 1613903985&qq.com +60115 + eCert + Praneet Rai + praneet&rai.email +60116 + eCert Corp. + Praneet Rai + corporate&rca.nogc.ca +60117 + Nane OON GlobalCom Corporation + Praneet Rai + corporate&rca.nogc.ca +60118 + INSERCOMHAI INTERNET SERVICE & COMMUNICATION LTDA + Neptune Duval + ADM&insercomhai.org +60119 + Tiuxo + Brian Clemens + brian&tiuxo.com +60120 + Konvekta AG + Samuel Hübl + des&konvekta.com +60121 + Surgical Hospital of Oklahoma, LLC + Tanisha Peterson + tpeterson&surgicalhospitalok.com +60122 + INFOGRID d.o.o. + Goran Petrović + admin&infogrid.hr +60123 + hefraTEC GmbH + Ha-Er Frank + info&hefratec.de +60124 + Larraby Electrónica y Comunicación, S.L. + Óscar Rived + oscar&larraby.com +60125 + RL Online Media + Rasmus Lauridsen + mail&rl-online-media.dk +60126 + AMAG Group AG + Jonas Stalder + jonas.stalder&amag.ch +60127 + AMAG Corporate Services AG + Jonas Stalder + jonas.stalder&amag.ch +60128 + Astelcom + Jérôme BEAREL + iana&astelcom.fr +60129 + NCube Hosting Consultants + Carl Staub + carl.staub&ncubehost.com +60130 + Plutometry Corporation + Guilherme Santos + hostmaster&plutometry.com +60131 + AGL Energy Limited + Isaac Rosado + irosado&agl.com.au +60132 + Surahammars Bruks AB + Jörgen Andersson + teknik&smdab.se +60133 + Septimiu Fabian Mare + Septimiu Fabian Mare + septimiu&me.com +60134 + Nexim Security Solutions + Septimiu Fabian Mare + septimiu&neximsecurity.com +60135 + Briggs & Stratton + Mark Ciszek + ciszek.mark&basco.com +60136 + Agima + Egor Braguntsov + e.braguntsov&agima.ru +60137 + Proba + Administrator + info+iana&proba.com +60138 + Granby Consulting LLC + Mike Granby + mikeg&mikeg.net +60139 + IETHCOM Information Technology Co., Ltd. + Pei Hao + simonhao&iethcom.com +60140 + NIFT (Pvt) ltd. + Imran Ashraf + imran.ashraf&nift.pk +60141 + Scott Corio + Scott Corio + scott.corio&gmail.com +60142 + 3A Systems, LLC + Valery Kharseko + vharseko&3a-systems.ru +60143 + Forney ISD + Joshua Herron + jeherron&forneyisd.net +60144 + Swift Navigation Incorporated + Jason Mobarak + jason&swift-nav.com +60145 + P1 Security + BONAMY Rémi + rby&p1sec.com +60146 + Vladimir Kleusov + Vladimir Kleusov + kleusov&gmail.com +60147 + logturm + Tobias Dittmann + hello&ditsche.dev +60148 + 北京兰云科技有限公司 (Beijing Lanyun Technology Co., Ltd.) + 北京兰云科技有限公司 (Beijing Lanyun Technology Co., Ltd.) + info&lanysec.com +60149 + Gireve + Rives Jean-Marc + jean-marc.rives&gireve.com +60150 + Elite Tile & Services + Richard Heatherington + masterjourneyman&elitetile.net +60151 + Kesseböhmer Holding KG + Christian Riedel + c.riedel&kesseboehmer.de +60152 + VentureOne + Septimiu Fabian Mare + septimiu.mare&tii.ae +60153 + IVM Zrt + Róbert Almás + robert.almas&ivm-micrologistics.com +60154 + Amantya Technologies Private Limited + Ankur Bharadwaj + ankur.bharadwaj&amantyatech.com +60155 + IHI Terrasun Solutions + Lyonell Keplar + lkeplar&ihiterrasun.com +60156 + ASSOCIATION VIGIK + Pascal MARIN + pascal.marin&laposte.fr +60157 + G2 Ops Inc + Cody Gifford + cody.gifford&g2-ops.com +60158 + GiffTech Inc. + Cody Gifford + codygifford&gifftechinc.com +60159 + Dimitris Maroulidis + Dimitris Maroulidis + security&dimitrismaroulidis.com +60160 + 深圳市菲菱科思通信技术股份有限公司 (Shenzhen Phoenix Telecom technology Co.,Ltd) + 汪精武 (Wang Jingwu) + jingwu.wang&phoenixcompany.cn +60161 + Guangzhou Sanjing Electric Co., Ltd. + Ziqing Li + ziqing.li&saj-electric.com +60162 + Mantis Software Company + Mantis System Admin + sistem&mantis.com.tr +60163 + Beijing Aodun Software Co., Ltd. + Zhao Feilong + zhaofeilong&aodun.com.cn +60164 + RBS LLC + Tatiana Prokofeva + rbs_tech&satel.org +60165 + Shenzhen Hi-Net technology Company Limited + Graeme Song + graeme&szhi-net.com +60166 + GBITIS Limited + Gary Burns + gary.burns&gbitis.com +60167 + Bona Electronic Solutions + Laurence Fourie + laurence&basixgroup.co.za +60168 + RRX Industries, Inc. + Randolph Sommerfeld + iana&rrx.ca +60169 + Curvalux Ltd. + Niv Magen + niv.magen&curvalux.com +60170 + Tritech Group limited + David Rogers + David.Rogers&tritechgroup.co.uk +60171 + Medx Connect Pty Ltd + Dalibor Frtunik + dalibor.frtunik&sorsix.com +60172 + John M. Barnwell, M.D., PLLC + Heidi Simonato + jbarnwell01&gmail.com +60173 + POLYWELL COMPUTERS INC. + JENNY LIN + jennylin&polywell.com +60174 + Insight Operations + Dan Tembe + dtembe&insightops.com +60175 + Shenzhen Growatt New Energy Co., Ltd. + Rongsen Li + rongsen.li&growatt.com +60176 + Technological Ray GmbH + Daniel Eisenschmidt + daniel.eisenschmidt&hotmail.de +60177 + Stanisław Skowron + Stanisław Skowron + ss181292&wp.pl +60178 + Vade + Florian Coulmier + florian.coulmier&vadesecure.com +60179 + EIZO Rugged Solutions Inc. + Christopher Fadeley + chris.fadeley&eizo.com +60180 + Owen Sound Medical Imaging + Jamie Page + jpage&gbhs.on.ca +60181 + cloudtech GmbH + Michael Newiger + office&cloudtech.global +60182 + THEMIS + Alain COCCONI + a.cocconi&themis.nc +60183 + Michael Hill International Limited + Chris Sword + chris.sword&michaelhill.com.au +60184 + PACIFIC PEERING + Alain COCCONI + cocconi&pacific-peering.com +60185 + Staffwerke GmbH + Lucie Nabet + iana&staffwerke.de +60186 + lugges + Lukas Adrian Kron + l.kron&lugges.eu +60187 + Guangzhou Yuhong Technology Co.,Ltd. + He Jinliang + hejl&gzyhtech.com +60188 + Foqus Technologies + Parisa Saat + parisa.saat&foqus.technology +60189 + Kreisverwaltung Recklinghausen + Ludger Berse + it-support&kreis-re.de +60190 + SER Interact Digital AG + Tae-Hoon Kwon + sch-icsave&sergroup.com +60191 + Shanghai BICFANTAS Technology Co., Ltd. + Jacky Hua + jacky.hua&bicfantas.com +60192 + William Bissett + William Bissett + williebissett4&gmail.com +60193 + wuthost + Walder Fyote + walderf&mailbox.org +60194 + Education and Research Library of Pilsen Region + Stanislav Teplik + teplik.stanislav&svkpk.cz +60195 + Bischof + Klein SE & Co KG + Gowshan Santhirapalan + gowshan.santhirapalan&bk-international.com +60196 + TimeTell B.V. + Daniel Knippers + daniel.knippers&timetell.nl +60197 + ECAN + YIN ZHANG + admin&ecan-tech.com +60198 + redhax + Freddy Grande + Freddy.Grande&redhax.com +60199 + Winckworth Sherwood LLP + Dan Wright + dwright&wslaw.co.uk +60200 + A.R.T.I.S. + Constantinos Papadamou + constantinos&justartis.com +60201 + vexo d.o.o. + Blaž Bregar + blaz&vexo.si +60202 + Global Infrastructure + Jean Lubatti + jean.lubatti&worldline.com +60203 + FLEXO + Grzegorz Kosicki + grzegorz.kosicki&flex-net.com.pl +60204 + ekotip ID s.r.o + Petr Ledvina + ledvina&ekotip.cz +60205 + Spenego Software, LLC + MUHAMMAD MUQUIT + directors&spenego.com +60206 + PortByte Technologies + Salvatore Cahyo + salva&dsgroupmedia.com +60207 + FirstFire Productions + Doug Leece + dleece&firstfiretech.ca +60208 + IP Techniek B.V. + Joost van de Weijer + joost&iptechniek.nl +60209 + Ucampus + Jose Miguel Garrido + sys&ucampus.cl +60210 + Expeto Wireless + Terje Strand + support&expeto.io +60211 + Codify Pty Ltd + David Connors + david&codify.com +60212 + SOUTHWEST WYOMING WIRELESS CONNECT LLC + NEAL RAYMOND VREELAND + admin&swwwc.org +60213 + MsgQ Technologies Services Private Limited + SIVA KUMAR KALAMRAJU + sivakumar&msgq.in +60214 + QuiltMC + Ambre Bertucci + ambre&quiltmc.org +60215 + ML Network DV-Systeme, Netzwerk & Kommunikation GmbH + Patrick Terlisten + p.terlisten&mlnetwork.de +60216 + Joe's + José Alisson Valério + joe.alisson&gmail.com +60217 + Cytracom, LLC + Jonathan Buchanan + jbuchanan&cytracom.com +60218 + LI AO + LI AO + ri-oid-contact&work2.liao.media +60219 + Adam Turner + Adam Turner + standingrockllc&gmail.com +60220 + US DEPARTMENT OF SPECIAL PROJECTS AND UNIFIED RESPONSE SERVICES (US-SPURS) + US SPURS OFFICE OF INFORMATION TECHNOLOGY + admin&spurs.gov +60221 + Efo AB + Tom Honig + tom.honig&bricco.se +60222 + Shenzhen Lancho Technology Co., Ltd. + Chang Feng + changf&lanchotech.com +60223 + cunova GmbH + Alexander Wolfshohl + a.wolfshohl.extern&sysback.de +60224 + National Information Technology Development Agency + Ahmed Mustapha Yahuza + amustapha&nitda.gov.ng +60225 + Oscar Alonso Toledo + Oscar Alonso Toledo + oscar&alonsotoledo.nom.es +60226 + AlphaESS + Bryant Zhu + bryant.zhu&alpha-ess.com +60227 + Data Action Pty Ltd + Ian Bedson + ibedson&da.com.au +60228 + MicroMade + Grzegorz Jaruszewski + jgregµmade.pl +60229 + Supercloud (Beijing) Technology Co., LTD + Tangwentao + petertang77&outlook.com +60230 + DFW Oil & Energy, LLC + Andrew McCullough + it.support&dfwoilenergy.com +60231 + cyonex GmbH + Fred Tate + f.tate&cy1x.net +60232 + Steven Garcia + Steven Garcia + garcia.steven.t.inet+iana&gmail.com +60233 + State Library of Queensland + Mark E. Thompson + Mark.Thompson&slq.qld.gov.au +60234 + AZ Technology Pte Ltd + Jackie Cheng + jackie.chengbing&azt.com.sg +60235 + Dubois & Dépraz S.A. + Matthias Linder + it&dubois-depraz.ch +60236 + TNC Consultant Group + Paul Co + paul.co&tncgrp.com +60237 + GhangorCloud + Shakuntala Addepally + shakuntala&ghangorcloud.com +60238 + Bürkert Werke GmbH & Co. KG + Dr. Udo Gais + licences.eu&burkert.com +60239 + Yardmasters Lawn Care Services LLC + Christopher Spivey + Christopher&cscoolingllc.com +60240 + Yurii Kolosov + Yurii Kolosov + yuriy&kyf.com.ua +60241 + cesium.cz + Matyáš Vohralík + it&cesium.cz +60242 + Mesto Dubnica nad Váhom + Dusan Misovec + spravait&dubnica.eu +60243 + Libre IT + Hugo Thunnissen + iana&libre-it.nl +60244 + USGS Hawaiian Volcano Observatory + Thomas-Jon Hoomanawanui + thoomanawanui&usgs.gov +60245 + AITIA International Zrt + Laszlo Kovacs + lkovacs&aitia.ai +60246 + Andy Lo-A-Foe + Andy Lo-A-Foe + andy.loafoe&gmail.com +60247 + Luxor Technology Corp + Eddie Wang + eddie&luxor.tech +60248 + AmNetwork + AmNetwork ICT CTO + administrator&amnetwork.it +60249 + Agatel Ltd + Rayhan Madeni + ray&agatel.co.uk +60250 + Swissgrid + Monica Zimmermann + monica.zimmermann&swissgrid.ch +60251 + VuNet Systems Private Limited + Bharat Joshi + info&vunetsystems.com +60252 + Quantum Optics Jena GmbH + Lorenz Josten + l.josten&qo-jena.com +60253 + Rene Baumgarten + Rene Baumgarten + zan1&gmx.de +60254 + ALL Tecnologias + Allexander Caneschi + allexander&alltecnologias.com.br +60255 + Pfeifer Holding GmbH + IT department + pki&pfeifergroup.com +60256 + Think Freely Consulting + Neil Horman + nhorman&think-freely.org +60257 + LITA S.r.l. + Andrea Lisa + andrea.lisa&lita.eu +60258 + STATE ENTERPRISE “AIR TRAFFIC SERVICES AUTHORITY” (BULATSA) + Emil Kostadinov + administrator&bulatsa.com +60259 + AUTMES s.r.o. + IT Department + it&autmes.cz +60260 + Maker Systems + Sjef van Gool + info&maker.systems +60261 + Methinks software SL + Carlos Crespo + systems&methinks.es +60262 + Stella Freyju + Stella Freyju + admin&freyju.me +60263 + Raiys GmbH + Prof. Dr. med. Stephan Schmitz + info&raiys.de +60264 + Fuzzylabs private limited + Sumayya Muradalisha Arab + ameerali&fuzzylabs.in +60265 + Lucifer Rigby + Shaun Rigby + shaunjeffreyrigby&gmail.com +60266 + Siemens Energy AG - Control + Dr. Junghans, Helge + helge.junghans&siemens-energy.com +60267 + CEPTRO.br - Centro de Estudos e Pesquisas em Tecnologia de Redes e Operações + Henrique de Moraes Holschuh + henrique&nic.br +60268 + DIVVAD Dawid Banaszewski + Dawid Banaszewski + kontakt&divvad.pl +60269 + HIMIKO sp. z o.o. + Michał Półrolniczak + iana&himiko.pl +60270 + Instnt Inc. + Justin Kamerman + justin&instnt.org +60271 + LifeWorks NW + Lora Kerr + Lora.Kerr&lifeworksnw.org +60272 + Turtleweb IT Training + Chris Quintero Dominguez + chris&quinterodominguez.co.uk +60273 + Joshua Hershey + Joshua Hershey + JMHershey125&gmail.com +60274 + Rugby Australia Ltd + IT Support + itsupport&rugby.com.au +60275 + ADOP SHENZHEN TECHNOLOGY CO.,LIMITED + Shaohui Wang + wsh&adop.cn +60276 + ACS International Schools + Simon MacDonald + smacdonald2&acs-schools.com +60277 + Yengeek + Andrew Yen + andrew.yen¥geek.net +60278 + Universidad Pontificia Comillas + Pedro Pérez Celis + cio&comillas.edu +60279 + S.A.F.E. e.V. + Matthias Grote + matthias.grote&safe-ev.de +60280 + Seminole State College of Florida + Steven Donaway + donaways&seminolestate.edu +60281 + Imaqliq Service LLC + Andrey Barokho + baa&imaqliq.com +60282 + AHMC Healthcare + Alan Leghart + iana-pen&ahmchealth.com +60283 + BEIJING FIBRLINK COMMUNICATIONS CO.,LTD. + Liwei Fu + fuliwei&sgitg.sgcc.com.cn +60284 + CloudSky + Roy Wang + roy.wang&icloudsky.com +60285 + OMS Ireneusz Smoczkiewicz + Ireneusz Smoczkiewicz + ismoczkiewicz&gmail.com +60286 + Tusker Direct + Andy Sabapathee + andy.sabapathee&tuskerdirect.com +60287 + Damarcus Jones + Damarcus Jones + damarcusjones847&gmail.com +60288 + Linuxito + Santiago Ruipérez + sruiperez&linuxito.es +60289 + Nihon Kohden Center of Expertise + Victor Boyd + victor_boyd&nihonkohden.com +60290 + TheThingsTalking Technology Co.,Ltd + Van He + 330406357&qq.com +60291 + ISCTE – Instituto Universitário de Lisboa + João Oliveira + diretor.siic&iscte-iul.pt +60292 + Star Comunicaciones S.L. + Manuel Ángel García + manuel&starcomunicaciones.es +60293 + Flywire Corporation + Barbara Cousins + iana-pen&flywire.com +60294 + Fujian Radio and Television Network Group(福建广电网络集团) + Cai Huixia + 466020522&qq.com +60295 + WuXi High Information Security Technology Co.,Ltd + Gao Qi + marketing&hinfose.com +60296 + DANAM Aerospace R&D Center S/W + Minseong, KIM + mcastle&danam.co.kr +60297 + Provincie Groningen + Daniel Kleve + bti&provinciegroningen.nl +60298 + Greater Manchester Mental Health NHS Foundation Trust + Infrastructure Alerts + InfrastructureAlerts&gmmh.nhs.uk +60299 + ev-lution-technology + Tom Tiberio + tommyteeabc&gmail.com +60300 + ev-lution-technologie + Tom Tiberio + tommyteeabc&gmail.com +60301 + ev-lution-charge + Tom Tiberio + tommyteeabc&gmail.com +60302 + ev-lution-charger + Tom Tiberio + tommyteeabc&gmail.com +60303 + ev-lution-chargeur + Tom Tiberio + tommyteeabc&gmail.com +60304 + Metaverse Multiverse + Alexander Matthew Estrada + admin&metaversemultiverse.net +60305 + Tendaworld + Chris Daniels + mail&tenda.world +60306 + Christopher Spivey + Christopher M Spivey + spivey575&gmail.com +60307 + Entrack AS + Tore Taugl + tore.taugl&entrack.no +60308 + Alder Holdings LLC + Collin Campbell + collin.campbell&alder.com +60309 + RayCorp + Ray Whitfield + raylwhitfield&gmail.com +60310 + $coindogg + Ronny Armstrong + ronny.armstrong86&gmail.com +60311 + XPOINT NETWORK COMPANY LIMITED + Ivan Mak + ivan.mak&xpointnetwork.com +60312 + winhong + Qi Zhang + zhangqi&winhong.com +60313 + STUPINSKIY ELECTROTECHNICHESKIY ZAVOD Limited Liability Company (STEZ LLC) + Toivonen Denis + dtoivonen&stezelectro.ru +60314 + LBDS + Technical Contact + lbds-io&protonmail.com +60315 + JET Services Marketing + Zeljko Baralic + zeljko.baralic&jet-services.com +60316 + ATEXO + MALHOMME Laurent + crypto&atexo.com +60317 + Cascade Raider Holdings + Darryl Hon + itsupport&raiderhansen.com +60318 + Will Buik + Will Buik + pen_admin&miats.com +60319 + Yarra City Council + Ross Witherby + netadmin&yarracity.vic.gov.au +60320 + Leids Universitair Medisch Centrum (LUMC) + Tim van Someren + t.van_someren&lumc.nl +60321 + meando + Anders Dovblad + adovblad&hotmail.com +60322 + Chutima Connect Co., Ltd. + Nonvapon Rojanavasu + r.nonvapon&chutima.com +60323 + EIM doo Belgrade + Nenad Vranesevic + nenad.vranesevic&eim.rs +60324 + Kotivity + Tito Alai + mail&kotivity.com +60325 + RTBF-STG + Jérôme DE SMEDT + hostmaster&rtbf-stg.be +60326 + Presidenza del Consiglio dei Ministri - DIS-UCSe + Gen. C.A. Piero Burla + certificazione&alfa.gov.it +60327 + Herbert AhSam-Kreiter + Herbert AhSam-Kreiter + info&ahsam-kreiter.net +60328 + Servizi e Tecnologie Informatiche Srl + Franco Marchesini + franco.marchesini&gmail.com +60329 + Marcus Alessandro Pavan Francisco + Marcus Alessandro Pavan Francisco + pavan.marcus&outlook.com +60330 + hssystem + Shenggen Chen + shenggen.chen&hssystem.cn +60331 + ACMOSS + Mariama DIEDHIOU + mariama.diedhiou&interieur.gouv.fr +60332 + RLS Consulting Services + Robert Lee Saganich + robert.saganich&rlsconsultingservices.com +60333 + Borders Online Ltd + Marty Lee + marty&bordersonline.net +60334 + Eugenio Tampieri + Eugenio Tampieri + eugenio+iana&tampieri.me +60335 + Jefferson County Board of Education + Bradley Arnold + barnold&jefcoed.com +60336 + PicassoMD, Inc. + Sean Carpenter + netops&picassomd.com +60337 + rapixus + Wang Ying-Tsung + chris&rapixus.com +60338 + Farpointe Data, Inc. + Kirk Bierach + kirk.bierach&farpointedata.com +60339 + IngeniArs S.r.l. + IngeniArs Administration + administration&ingeniars.com +60340 + parsasharif + rezaei parsasharif + rezaie.h568&gmail.com +60341 + JWIPC TECHNOLOGY CO., LTD. + David Wang + wanghl&jwele.com.cn +60342 + Ultimate Fitness Group LLC + John Staab + jstaab&orangetheory.com +60343 + The P.A. People Pty Ltd + Sam Dodds + info&papeople.com.au +60344 + Bravas Tecnologia + Ricardo Vasconcellos + ricardo&bravas.ind.br +60345 + Open industrial PKI + Andreas Philipp + andreas.philipp&trust-point.com +60346 + Voelkl Sports GmbH + Christian Wieland + it-einkauf&voelkl.de +60347 + Lutech SpA + Danilo Lozza + d.lozza&lutech.it +60348 + Aretiico Group PLC + Tim Shields + tim.s&aretiico.com +60349 + Barbaros Catkan + Barbaros Catkan + bcatkan&gmail.com +60350 + Settels Savenije Group of Companies + Alex Goris + alex&ag-its.nl +60351 + ITSEC RND MICHAŁ LESZCZYŃSKI + Michał Leszczyński + michal.leszczynski&itsecrnd.com +60352 + SPB Exchange + Kaltashkin Eugene + e.kaltashkin&spbexchange.ru +60353 + Kato Electric Industry CO.,LTD + Takehisa Nakagawa + nakagawa-t&kdk-katoudenki.co.jp +60354 + CHSLD Domaine Saint-Dominique + Support TI + supportti&domaine-saint-dominique.com +60355 + Shanghai UDH Technologies Co.. Ltd. + Qian Chen + cqian&udh.com.cn +60356 + Tom van Brienen + Tom van Brienen + pen-tom&tvbhosting.nl +60357 + BMI + Yin LIU + yin.liu&bmisolution.com +60358 + Machine Specialties + Owen Jacobs + oid-admin&machspec.com +60359 + YTL Communications Sdn Bhd + Joseph Lee + joseph.lee&ytlcomms.my +60360 + Europractice + John Chandler + john.chandler&stfc.ac.uk +60361 + Innopay Zrt. + Márk Kolovics + mark.kolovics&innopay.hu +60362 + Shenzhen Dazoo Technology Co., Ltd. + Mark Wang + mark.wang&idazoo.com +60363 + Nuuday A/S + Jens Galsgaard + jega&nuuday.dk +60364 + Arx Research, Inc. + Charles Cameron Robertson + cameron&arx.org +60365 + Infobezpeka LLC + Olexiy Podchashynsky + on&infobezpeka.com +60366 + Stakhanovets + Sergii Shamshyn + sergey&stakhanovets.ru +60367 + CRI Computer Rework & Installations AB + Lars Åkesson + Lars.Akesson&cri.se +60368 + Lago Networks Oy + Jussi Jarvi + registry&lago.fi +60369 + DNSFau + Evan Afentoulis + evn67&hotmail.com +60370 + Rheinmetall BAE Systems Land + Dave Crawford + rbslimt&rbsl.com +60371 + Magics Security Agency + Nick Bosters + contact&nederland-mail.nl +60372 + Logic sh.p.k. + Armando Vrushi + avrushi&logicshpk.al +60373 + Foxleys Consultancy Limited + Iain Segall + Iain.segall&foxleysconsultancy.co.uk +60374 + Medidata Solutions + Samir Jain + samir.jain&3ds.com +60375 + Kibc technology + Dennis Sim + Denis-sim&yandex.ru +60376 + A.M. Residential & Commercial Contractors + Adam Maxwell + AMContractor&yahoo.com +60377 + METER Solutions Kft. + Csaba Pesti + jerry&met3r.com +60378 + Turing Medical + Max Bertolero + max.bertolero&turingmedical.com +60379 + RTK-TECHNOLOGIES, LLC + Alexander Novozhenin + snmp&rtk-t.ru +60380 + best Systeme GmbH + Scharel Clemens + oid-admin&best.de +60381 + Zitel + Meysam Heydarlou + cto&zi-tel.com +60382 + Svante Technologies Inc + Svante Technologies Inc + web_admin&svanteinc.com +60383 + Frederickson Graphics Inc. + Syby Abraham + syby&mmpbc.ca +60384 + Ellevio AB + Joakim Bengtzon + joakim.bengtzon&ellevio.se +60385 + Svenska Kraftnät + Hannes Johnsson + Hannes.johnsson&svk.se +60386 + Yabarana Inc. + Alejandro Imass + aimass&yabarana.com +60387 + zhaochuninhefei + Zhao Chun + zhaochuninhefei&gmail.com +60388 + Protect Power Systems Limited + Valentin Kornev + v.kornev&upsprotect.kz +60389 + Parry Labs + Jon Wahlmann + jon.wahlmann&parrylabs.com +60390 + OUTREMER TELECOM + PERSEGOL DELPHINE + d.persegol&outremer-telecom.fr +60391 + ANALOG WAY SAS + Daniel BOUQUET + daniel.bouquet&analogway.com +60392 + Andrei-Alexandru Bleorțu + Andrei-Alexandru Bleorțu + me&andrei-z.com +60393 + Servicios Tecnológicos Integrales S.A. + Federico Nartallo + fnartallo&stisa.com.ar +60394 + 电科网安 (CETC) + 余婷婷 (Yu Tingting) + yu.tingting07312&westone.com.cn +60395 + ProjectPKI + Charles Hamby + Charles.hamby&projectpki.com +60396 + SOCIETE REUNIONNAISE DU RADIOTELEPHONE + PERSEGOL DELPHINE + delphine.persegol&srr.fr +60397 + SAIC General Motors Corporation Limited + Xiaodong Pang + xiaodong_pang&saic-gm.com +60398 + NÄTRAÄLVENS virkesförsäljningsförening u.p.a. + Håkan Thelin + hakan.thelin&natraalven.se +60399 + 深圳市道通合创数字能源有限公司 (Shenzhen Daotong Hechuang Digital Energy Co., Ltd.) + 瞿松松 (Qu Songsong) + qusongsong&autel.com +60400 + HTS Group Limited + Blair Marshall + bmarshall&htsgroup.co.nz +60401 + Micas Networks Inc. + Peng Tian + patrick.tian&micasnetworks.com +60402 + VOGL electronic GmbH + Matthias Breithaupt + m.breithaupt&vogl-electronic.com +60403 + Subspace Dynamics, LLC + David Holl, Jr. + dholl&subspacedynamics.com +60404 + Shanghai Onstar Telematics Corporation Limited + Weiwei Ji + weiwei.ji&shanghaionstar.com +60405 + Ricardo Vega + Ricardo Vega + rickyvega7766&icloud.com +60406 + Guangdong Huaiji Weifa Hydro-electric Power Company Limited + lihuiping + lihuiping&clp.com.cn +60407 + Houthoff + Arjan Bekkema + systeembeheer1&houthoff.com +60408 + AadiRi + Abhirup Sarkar + miles2gooo&gmail.com +60409 + Kraftwerke Oberhasli AG + Daniel Abegglen + it-support&kwo.ch +60410 + CJSC Bank of Asia + Seitek Kurmanov + bankasia&bankasia.kg +60411 + EGK Entsorgungsgesellschaft Krefeld GmbH & Co. KG + Andreas Ruegenberg + edv&egk.de +60412 + Centro regional de Servicios Avanzados + Ángel Martínez + angel.martinez&csa.es +60413 + Stephan Emich + Stephan Emich + noreply&emich.systems +60414 + crynet.one + Kilian Kreibich + info&crynet.one +60415 + Saint Gregory Heart and Vascular Center + Sarkis Kiramijyan, MD + aharutyunyan&stgregoryheart.com +60416 + Fidelis MGU + Anuj Bajaria + IT_infrastructure&fidelismgu.com +60417 + Cromix, LLC + Alexey Kolpakov + mvno&cromix.ru +60418 + LURE IT, LLC + Vladimir Korablev + info&lureit.ru +60419 + Strawnet + Steven Strawbridge + sdstraw&outlook.com +60420 + 山源科技 (Shanyuan Technologies) + 李晓鹏 (Leo Li) + 1540076809&qq.com +60421 + Peng Lihao + Peng Lihao + penleehow&gmail.com +60422 + hisource Technology Development Co., Ltd. + Peng Lihao + penleehow&gmail.com +60423 + KABEG Management + Gerald Regenfelder + gerald.regenfelder&kabeg.at +60424 + Nordic RCC A/S + Thomas Bille Joensen + tbj&nordic-rcc.net +60425 + MIU Labs + Isaac Amezaga i Saumell + iamezaga&miulabs.com +60426 + Namaste Informatics, LLC + Gay Dolin + gdolin&NamasteInformatics.com +60427 + Projecthive + Branislav Zeljak + it-support&z-iq.de +60428 + SAFY.io + Imane BOUIJ + contact&safy.io +60429 + Kymata Srl + Miki Ferrari + tecnico&kymata.it +60430 + Stockholm Exergi AB + Jimmy Renström + jimmy.renstrom&stockholmexergi.se +60431 + Medsquare + Damien JEANDEL + d.jeandel&medsquare.com +60432 + Paul Morrison + Paul Morrison + pauliewog&gmail.com +60433 + Universitätsklinikum des Saarlandes + ZIK - Server und Storage + san-server&uks.eu +60434 + WITHNETWORKS + Tony Kang + sykang&withnetworks.com +60435 + Huang Liuyong + Huang Liuyong + huangliuy&gmail.com +60436 + EUROIMMUN Medizinische Labordiagnostika AG + Andreas Lingk + a.lingk&euroimmun.de +60437 + RapidCloud + Tom van Brienen + pen-reg&rapidcloud.eu +60438 + Genesis Systems + Brandon Mennear + brandon&buygenesis.com +60439 + RioVIDEO Ltd + Matthew Compton + matthew.compton&riovideo.co.uk +60440 + Rob Harman + Rob Harman + rob&robharman.me +60441 + Helios Power Solutions + Gemmar Jhan Contayoso + jcontayoso&heliosps.com.au +60442 + Jonas Marklén + Jonas Marklén + txc&txc.se +60443 + Feeney Technologies, LLC + Kiern Feeney + kiern&feen.tech +60444 + Nordic Semiconductor ASA + Stig Bjørlykke + stig.bjorlykke&nordicsemi.no +60445 + SI.pl + Jacek Kowalski + iana&si.pl +60446 + Trout Software + Romain Doumenc + support&trout.software +60447 + Delta Surge Inc. + Jaswant Dhaliwal + icacorp&aol.com +60448 + Universitäts-Segel-Club Dortmund e. V. + Timo Schrappe + t.schrappe&usc-dortmund.de +60449 + Codium + Timo Schrappe + hello&codium.de +60450 + Clarity Security Corporation + Alexis Moyse + alexis&claritysecurity.io +60451 + Extract Systems + William Parr + william_parr&extractsystems.com +60452 + Fortyfive South Pty Ltd + Michael Shirtcliff + mshirtcliff&45south.com.au +60453 + dieEntwickler Elektronik GmbH + Robert Schedlberger + robert.schedlberger&dieentwickler.at +60454 + Complex LTD. + Sergey Smirnov + newitservices&yandex.ru +60455 + Oracle Health Applications and Infrastructure + Vartan Chukhadarian + vartan.chukhadarian&cerner.com +60456 + WE Europe B.V. + Roy Pellegrom + itsd&wefashion.com +60457 + Vail School District + Mark Breen + breenm&vailschooldistrict.org +60458 + Anitha. A + Anitha. A + anithaannadurai410&gmail.com +60459 + Fortis Security + Brendan + b.mcfarland&fortissecurity.com.au +60460 + Dynamic Shell + Mirjana Mikec + mirjana.mikec&dynshell.com +60461 + Tim Nowaczyk + Tim Nowaczyk + zimage&gmail.com +60462 + Renante Blanco Baniaga + Renante Blanco Baniaga + admin&renzbaniaga1-7xsd.wbx.ai +60463 + SoCal United Food & Commercial Workers Unions and Food Employers Joint Benefit Funds Administration, LLC + Brian Miller + BMiller&scufcwfunds.com +60464 + QLS Consulting + W. E. Summerville + w.summerville&qlsconsulting.com +60465 + Encompass HSCNI + Encompass Support + encompass&telefonicatech.uk +60466 + Australis M2M Pty Ltd + Patrick Paffard + patrickpaffard&australism2m.com.au +60467 + Lukas Hrabal + Lukas Hrabal + lukas.hrabal&lukinno.ml +60468 + AB Borlänge Energi + Mats Rusback + mats.rusback&borlange-energi.se +60469 + Landatel Comunicaciones S.L. + Manuel Arroyo + manuel&landatel.com +60470 + Data General + Manuel Arroyo + manuel&rroyo.es +60471 + Gigant LTD. + Sergei Smirnov + dsfinks&mail.ru +60472 + Fengrunda Technology Co.,Ltd. + Changqing Gao + product&poetech.net +60473 + Zhengdao Quantum + Lance Zhang + lingsongzhang&zdlz.top +60474 + EXO + Diego Luis Szychowski + diegos&exo.com.ar +60475 + Special Communication and Information Security State Service of the Republic of Azerbaijan + Aykhan Aghazada + ayxan&gov.az +60476 + Antara Teknik LLC + Mehmet Adalier + madalier&antarateknik.com +60477 + PicnicHealth + Akash Krishnan + akash&picnichealth.com +60478 + Airtime Network, Inc. + Charles Robertson + cameron&airtime.network +60479 + Elona Health GmbH + Leon Hillebrandt + leon&elona.health +60480 + National Tsing Hua University Tech Society + National Tsing Hua University Tech Society + mis&nthu.dev +60481 + Alvaka + Chris Cartwright + ccartwright&alvaka.net +60482 + Rebellions Inc. + Jinwook Oh + j.oh&rebellions.ai +60483 + GraphDefined GmbH + Achim Friedland + achim.friedland&graphdefined.com +60484 + AEPONYX + Louis Forest + lforest&aeponyx.com +60485 + E-LINTER + Ken + ken.cui&e-linter.com +60486 + DTE S.A. + Guillermo Crocetti + guillermocrocetti&dtesa.com.py +60487 + Nikolay TODOROW + Nikolay TODOROW + n.todorow&ibsg-online.de +60488 + Empower Energy + Michael Jensen + admin&empowerenergy.com.au +60489 + Akkutron Handels GmbH + Christian Tran + christian.tran&akkutron.at +60490 + Armtel + Irina Kazymova + i.kazymova&armtel.com +60491 + Calnex Solutions + Alan Potter + iana-admin&calnexsol.com +60492 + Ticketmaster + Greg Bowyer + greg.bowyer&ticketmaster.com +60493 + Ing. Alfredo Revenaz + Alfredo Revenaz + pr&revenaz.it +60494 + DMYTRO LOBASHEVSKYY + DMYTRO LOBASHEVSKYY + 0deadc0deh&gmail.com +60495 + Screen INFOmatch B.V. + Arno de waal + ict&screeninfomatch.com +60496 + SIMUR Europe SL + Victor Escudero Rubio + vescudero&simur.es +60497 + Centro de Gerenciamento Técnico do SISCEAB (CGTEC) + Tiago Porto Barbosa Cap Eng + portotpb&fab.mil.br +60498 + Engity GmbH + Gregor Noczinski + contact&engity.com +60499 + decarbon1ze GmbH + Marie-Carole Girbal + marie-carole.girbal&decarbon1ze.com +60500 + Mucse + Jun Yao + yaojun&mucse.com +60501 + Zheng Cui + Zheng Cui + woolenhy&163.com +60502 + Knosys + Nic Passmore + npassmore&knosys.it +60503 + Life Science Compute Cluster, University of Vienna + Thomas Rattei + contact.lisc&univie.ac.at +60504 + TRIZ Networks corp. + sungwoo baek + sw.baek&triznetworks.com +60505 + South East Coast Ambulance Service NHS Foundation Trust + Jason Tree + itmarketing&secamb.nhs.uk +60506 + Blake.DK + Alexander Blake + alex&blake.dk +60507 + TX Australia Pty Ltd + David Kilpatrick + dkilpatrick&txaustralia.com.au +60508 + Telenor Maritime AS + Trond Retterholt + trond.retterholt&telenormaritime.com +60509 + brainelectronics + Jonas Scharpf + jonas&brainelectronics.de +60510 + Spedition Hamann + Julian Hamann + hamann.julian&spedition-hamann.de +60511 + Erwin Renz Metallwarenfabrik GmbH & Co. KG + Bastian Haas + administrator&renzgroup.de +60512 + INFOSKILLS TECHNOLOGY PVT LTD + SONU KUMAR + admin&infoskillstechnology.com +60513 + 4M SOFTWARE SRL + MIRKO CALVIGIONI + info&4msoftware.it +60514 + Glendale Community College + Susan Bedker + Susan.Bedker&gccaz.edu +60515 + Fortress Power, LLC. + William Gathright + software_admin&fortresspower.com +60516 + Asheville Podiatry Associates + Debra Wright + henry1wright&charter.net +60517 + David Johnstone + David Johnstone + diana&dewlands.com +60518 + Tongxin Microelectronics Co.,LTD. + Yong Qin + qinyong&tsinghuaic.com +60519 + Cloudwyze, Inc. + Mark Doner + mark.doner&cloudwyze.com +60520 + My Serial, DEV. + ALAA BIN S B + Admiral&MySerial.DEV +60521 + Hyperian Energy Inc. + Eric Gilmore + eric&hyperian.com +60522 + TSODUM + Konstantin Strulev + info&smartdcim.ru +60523 + Moonlightmusicians + Philip W Johnson + Phil&Moonlightmusicians.com +60524 + Actual Broadband + Joshua Pool + admin&actualbroadband.com +60525 + Zsolt Turza + Zsolt Turza + turzazsolt&gmail.com +60526 + Wewins Wireless Co., Ltd + Yuanchen Chen + chenyc&we-wins.com +60527 + Radio-Télévision Belge de la Communauté Française + Jérôme DE SMEDT + hostmaster&rtbf.be +60528 + Entidad Nacional de Acreditación (ENAC) + Jose luis Miguel + jlmiguel&enac.es +60529 + Alois Müller GmbH + Matthias Botlik + Matthias.Botlik&alois-mueller.com +60530 + Feliks Westhoff + Feliks Westhoff + quickness_backside621&simplelogin.com +60531 + Teciatric Informatics Private Limited + Mathew Kochekkan Jacob + mathewkj&teciatric.net +60532 + Test Setup - Thomas Nielsen + Thomas Nielsen + thomas&tnit.dk +60533 + Henrik Norrman + Henrik Norrman + henrik&b6.se +60534 + Meidensha Corporation + Ryo Koshiya + koshiya-r&mb.meidensha.co.jp +60535 + thyssenkrupp Components Technology Hungary Kft. + Peter Dome + peter.dome&thyssenkrupp-automotive.com +60536 + UAB "iSense Technologies" + Algirdas Adamauskas + a.adamauskas&isense.lt +60537 + Nebius + Igor Znamenskii + iznam&nebius.com +60538 + EDR Credit Services + Robert van der Heijden + Ict&edrcreditservices.nl +60539 + Paper Excellence + Cory MacCharles + siteinfra&paperexcellence.com +60540 + Poulson Forensics LLC + Mike Poulson + mike&poulsonforensics.com +60541 + WJEC CBAC Ltd + IT Security Manager + security&wjec.co.uk +60542 + Virgin Media + OSS Team + OSSEntMonitorandAutomate&virginmedia.co.uk +60543 + Salman M. Khan- Belia Esparza- Shary Gardens Pediatrics- Donna Childrens Clinic + Belia Esparza + sharygardenspfnc&gmail.com +60544 + Belia Esparza- Shary Gardens Pediatrics + SHARON GARCIA + donnachildrensclinic&gmail.com +60545 + Clevon AS + Mairi Dubik + infrastructure&clevon.com +60546 + Verbio Technologies, S.L. + IT Department + infrastructure&verbio.com +60547 + Blue Ridge Bankshares, Inc. + Ryan A. Blake + oid&mybrb.bank +60548 + Wombkeepers Obstetrics and Gynecology + Ann Marie Pusterla + annmarie.pusterla&oneoakmedical.com +60549 + Piglet Engineering + Pete Ryland + pdr&piglet.ch +60550 + Heaven Palace + Rimvydas Zilinskas + rimvydas.zilinskas&gmail.com +60551 + Cardolite Corporation + Timothy Stonis + tim&cardolite.com +60552 + TSN Systems GmbH + Abraham van der Linde + abraham&tsn.systems +60553 + Courtiers + Courtiers + itsupport&courtiers.co.uk +60554 + Unite Logistics AB + Jan-Stefan.Karlsson + jan-stefan.karlsson&vsv.se +60555 + Joachim Raber + Joachim Raber + joachim&tereas.eu +60556 + Beaver Medical LLC + Julie Christensen + jchristensen&beaverclinic.net +60557 + kalytta.net + Philipp Kalytta + kontakt&kalytta.net +60558 + City of Yellowknife + IT Department + it&yellowknife.ca +60559 + Tigo Energy + Steve Brozic + steve.brozic&tigoenergy.com +60560 + GMO BRAND SECURITY Inc. + Mitsuaki Nakagawa + oid-contact&brightsconsulting.com +60561 + labcmd + Cory James Thompson + cornelius&labcmd.com +60562 + IPCOM + Yurii Remez + service&ipcom.ua +60563 + Binghamton University + Timothy Cortesi - Director of Innovation / Research Computing Support + tcortesi&binghamton.edu +60564 + Crutchfield Corporation + Paul Fitzsimmons + esg&crutchfield.com +60565 + Maricopa County Recorder's Office + George Tohannie + gtohannie&risc.maricopa.gov +60566 + McKenzie A Pepper + McKenzie A Pepper + mckenzie.a.pepper&gmail.com +60567 + PointEng + choi jinyoung + cjy.ups&gmail.com +60568 + Premier Surgical Associates + Kevin Burris + kburris&premiersurgical.com +60569 + Mevspace Sp. z o.o. + Adam Ochmański + adam.ochmanski&mevspace.com +60570 + Pairpoint + Mohammad Tahir + Mohammad&pairpoint.io +60571 + STL Systems AG + Martin Beck + m.beck&stl.systems +60572 + Alex Nagy + Alex Nagy + anagy&precisengineering.com +60573 + Flickswitch pty ltd + Jeremy de Agrela + jeremy&flickswitch.co.za +60574 + SÜSS MicroTec SE + SUSS MicroTec NOC + noc&suss.com +60575 + SD 27 Cariboo Chilcotin + IT Department + itdepartment&sd27.bc.ca +60576 + Stichting Open Electronics Lab + Eric Herman + eric.herman&gmail.com +60577 + pritac consulting gmbh + Markus Oesch + markus.oesch&pritac.de +60578 + Groupe Mooland + Justin DUVAL + informatique&mooland.fr +60579 + SourceFactory.at e.U. + Mario Klug + office&sourcefactory.at +60580 + Sandy Mossgrave + Sandy Mossgrave + sandym&ucsc.edu +60581 + Techartworks Pty Ltd + Mitul Bhatnagar + mitul.bhatnagar&gmail.com +60582 + Shanghai ICWiser Communication Technology Co., Ltd. + Eric Qiu + jmqiu&icwiser.com +60583 + Microdata Software + Pedro D. Sanchez + tecµdata.es +60584 + Origin Energy Limited + Will Yeates + will.yeates&originenergy.com.au +60585 + Alrahman LLC + Belia Esparza + sharygardenspfnc&gmail.com +60586 + Inpixon + David Westgate + admin&inpixon.com +60587 + Southeast Mississippi Rural Health Initiative, Inc + Brian Howard + oid&semrhi.com +60588 + Impulse Ltd + Mike Chaldin + chaldin&mail.ru +60589 + Stadt Wesseling + Oliver Schmitt + it-service&wesseling.de +60590 + ALTEN Technology USA Inc. + Andy Bolom + andy.bolom&alten.com +60591 + Shenzhen Yunlian Gongchuang Cloud Services Co., LTD + Hao Hu + huhao&szylgc.net +60592 + Shenzhen Toputel Technology Co.,Ltd + deqiao wei + wadetech&sina.cn +60593 + Europlacer + Nathaniel Cleland + nathaniel.cleland&europlacer.co.uk +60594 + Wismut GmbH + Leiter RZ + lizenz&wismut.de +60595 + Sinyalizasyon Elektronik İnş. San. Tic. Ltd. Şti + İlhami KIVRAK + ilhami.kivrak&sinyalizasyon.com.tr +60596 + INNOGENCE TECHNOLOGY + Jiuling Zhang + czlh&inno-gence.com +60597 + Astra Wireless Technology + Dmitry Gustelev + d.gustelev&astrawireless.net +60598 + Universitätsklinikum Jena + Martin Hirschelmann + iana&med.uni-jena.de +60599 + TUALCOM + Bora MUTLUER + bora.mutluer&tualcom.com.tr +60600 + Malmö Stad + Jesper Henning + systemdrift&malmo.se +60601 + Central Bank of Nigeria + Abubakar Sadiq Babah + asbabah&cbn.gov.ng +60602 + PEP + PEP Support + support&pep.cs.ru.nl +60603 + FTS + Fernando MT + ftsolucoes&outlook.com +60604 + Growatt New Energy Australia Pty Ltd + rex wang + rex.wang&growatt.com +60605 + Phitech + Hugo Hatzig + h.hatzig&phitech.fr +60606 + EXACT Technology + Andi Gerl + andi.gerl&exacttechnology.com +60607 + The University of Connecticut Foundation, Inc. + Daniel Nevelos + dnevelos&foundation.uconn.edu +60608 + InduXray Co., Ltd. + ChingHao, Hsieh + induxray.tw&gmail.com +60609 + Hyperconn Pte. Ltd + Paul Hu + paul.h&hyconext.com +60610 + hyxipower + Chao Hao + hao_chao&hyxipower.com +60611 + NOX Nachtexpress Austria GmbH + Daniel Schatz + daniel.schatz&nox-nachtexpress.at +60612 + Topazdom Technologies Limited + David Nzagha + nzaghad&topazdom.com +60613 + Meggitt Ltd + Hugh Smithson + hugh.smithson&meggitt.com +60614 + Ascension St. Vincent's Women's Health + Kendall Triplett + kendall.chambliss&ascension.org +60615 + Aerospace Data Security GmbH + Niels Born + admin&aerospace-datasecurity.de +60616 + China Telecom Cloud + Hu Peng + hup4&chinatelecom.cn +60617 + Tina Bilisim Teknolojileri Yazilim Sanayi ve Ticaret Ltd. Sti. + Emine Sule Celik + esulecelik&tina.com.tr +60618 + IPLAN + Maxi Tofani + mtofani&iplan.com.ar +60619 + Callie Jones + Callie Jones + Jonescallie&hotmail.com +60620 + Motech + Septa Muhammad Rivaldy + septamr&motech.id +60621 + Queensland Department of Environment and Science + Matt Jolly + Matthew.Jolly&des.qld.gov.au +60622 + CERN_COMK LD1 + Britton Jennings + bj5989&yahoo.com +60623 + TekLnk + Edward Moss + admin&teklnk.com +60624 + Enetrics LLC + Robby Simpson + robby.simpson&enetrics.com +60625 + Botimageai + Hans Johnson + hjohnson&botimageai.com +60626 + LEX COMPUTECH CO.,LTD. + Gary Lin + gary_lin&lex.com.tw +60627 + Internet Domain Name System Beijing Engineering Research Center Ltd. + SUN Guonian + sunguonian&zdns.cn +60628 + ASAP Holding GmbH + Athanasios Dolapsoglou + athanasios.dolapsoglou&asap.de +60629 + Peter Schumann + Peter Schumann + shoubpabeuhbeurtnw&tcwlm.com +60630 + Zsolt Sárkány + Zsolt Sárkány + zsolt.sarkany&gmail.com +60631 + Dycipher + Joshua Pelino + joshpelino&dycipher.com +60632 + MASCOM WIRELESS + Onkagetse Bonnie Itiseng + onkagetsebonnie.itiseng&mascom.bw +60633 + Trenkwalder Group + Nermin Music + domains&trenkwalder.com +60634 + Ringit Connect + Aliji Emmanuel + emmanuel&ringitconnect.com +60635 + Orchard and Vineyard Supply + Aaron Mendoza + Helpdesk&ovs.com +60636 + Web Sharp Studios, Inc. + Scott Weeden + iana-pen&fronts.cloud +60637 + Wavesys Global + Ruchi Jha + r.jha&wavesysglobal.com +60638 + Dorsch Holding GmbH + Maik Martin + it-hotline&dorsch.de +60639 + MediReva B.V. + Karsten Meijers + ict&medireva.nl +60640 + Cubetex Technologies Inc + Mykhaylo Melnyk + office&cubetex.com +60641 + Microdrive + Yura Kilin + engineerµ-drive.ru +60642 + Refactor Software Inc. + Michael Brown + mikeb&360replays.com +60643 + Geneverse Energy Inc. + Anson Liang + anson&geneverse.com +60644 + RFC IT + Terry Hoebe + terry.hoebe&rfcit.nl +60645 + House of Travel + HOT CIO + soa&hot.co.nz +60646 + Mark Olsson + Mark Olsson + mark&markolsson.se +60647 + Hoymiles Power Electronics Inc. + Silver Fu + silver.fu&hoymiles.com +60648 + Passengera s.r.o. + Jan Kolar + iana&passengera.com +60649 + BORDEAUX SCIENCES AGRO + Foury Jacques + dsi&agro-bordeaux.fr +60650 + Afni, Inc. + David Brooks + davidbrooks&afni.com +60651 + XPerience Technologies + Alexey Borodin + sales&xperiencetech.com +60652 + LibreNMS + Tony Murray + murraytony&gmail.com +60653 + TDJ Australia Pty Ltd + Aaron Were + awere&tdj.com.au +60654 + Trenton Systems + Svet Stoyanov + sstoyanov&trentonsystems.com +60655 + The Center For Manual Medicine + Seth Harrison + sharrison&ctrmm.com +60656 + Partners Telecom Colombia + Hector Giovanny Guerrero Zabala + hector.guerrero&wom.co +60657 + Andrew Williams + Andrew Williams + andy&you.havebroken.it +60658 + HFR Networks + Bryan Cho + bryan.cho&hfrnetworks.com +60659 + Stephen Spicknall + Stephen Spicknall + steve&totalconnectionmobile.com +60660 + IT unlimited AG + Jakob Döring + jakob.doering&itunlimited.de +60661 + Nectus + Andrey Melnikov + support&nectus5.com +60662 + Shenzhen Dingyan Technology Co. LTD + Yuwei Song + weisongyu1987&163.com +60663 + Era-platform + Peter Bukashin + tbotc&yandex.ru +60664 + F+ tech + Ilya Khomyakov + i.khomyakov&fplustech.ru +60665 + Aveanna Healthcare, LLC. + Remy Bell + IANA-Contact&aveanna.com +60666 + UCLA Information Security Office + Roozbeh Kavian + rkavian&it.ucla.edu +60667 + Tongyuan + James Zhuang + james.zhuang&tongyuantec.com +60668 + Australian Government Department of Finance + Scott Havelberg + scott.havelberg&finance.gov.au +60669 + InfiniPHY Ltd + Alexandru Panoviciu + apanovic&infiniphy.com +60670 + Dyness Digital Energy Technology Co., LTD + Xue Zhenyu + shawn.xue&dyness-tech.com +60671 + Center For Disability Services New York + Amos Potter + cfdsnetworksupport&cfdsny.org +60672 + FLEUBA SRL + Fernando Fariña + fernando.o.farina&gmail.com +60673 + ChintPower + Yang Liven + askazero0624&sina.com +60674 + Thomas Brown + Thomas Brown + thomas&w4xzr.xyz +60675 + Shenzhen Senergy Technology Co., Ltd. + Kevin Chang + KevinChang&apd.com.tw +60676 + Kingdom of Talossa + Danihel Txechescu + technology&talossa.com +60677 + Moolex + Rahul Chaturvedi + rahul&moolex.com +60678 + Zen Exim Pvt. Ltd. + Deepak Chandrayan + deepak&zengroup.co.in +60679 + KT Corporation + Seulki Jeon + seulki.jeon&kt.com +60680 + "TC Profenergy" LLC + Aleksandr Vysotskii + atomic&profenergy.ru +60681 + Vivek Bhoj + Vivek Bhoj + vivek.bhoj&gmail.com +60682 + Kingfisher Clinic, PLLC + Kristen McCann + kingfisher.f.clinic&gmail.com +60683 + EnuxMail + Daniel Adinew + daniel.adinew&gmail.com +60684 + Justketchup + David Graham + oid&justketchup.com +60685 + Shelly Group + Deomid Ryabkov + d.ryabkov&allterco.com +60686 + Think Force + Ji Jingzhou + ji.jingzhou&think-force.com +60687 + Shanghai Yunzhou Information and Technology Ltd. + Chen Yu + software&zstack.io +60688 + Gowone Industry (Ganzhou) Co., Ltd + Liping Chen + lepin&gowone.com +60689 + Faraday Development Center + Alexander Smirnov + alexander.smirnov&faradaydc.com +60690 + Odek Technologies + Morne van Rensburg + mornevr&odek.co.za +60691 + Cabalier Intelligent Software + Sean McCarthy + smccarthy&cabalier.es +60692 + Health Plan Services, Inc + Dan Cutler + dan.cutler&wipro.com +60693 + Levi Keehn + Levi Keehn + lkeehn&fender.com +60694 + MEBAY + dai kun + yxy&mebay.cn +60695 + iPresso S.A. + DevOps Team + admin&encja.com +60696 + SlashID + Giovanni Gola + giovanni&slashid.dev +60697 + Mike Chancey + Mike Chancey + mchancey&jhmhospital.com +60698 + Verisure Sàrl + Jorge Lopez + jorge.lopezh&verisure.com +60699 + Zhuzhou CRRC Times Electric Co., Ltd. + Ren Yi + renyi&csrzic.com +60700 + Transport for Greater Manchester + Jason Higgins + dcsops&tfgm.com +60701 + ED Elektronische Dienste GmbH + Juergen Heidel + heidel&it-favour.de +60702 + Chance Meador + Chance Meador + chancemeador&gmail.com +60703 + CTech Bilişim Teknolojileri San. ve Tic. AS + Emre Albayrak + licence_all&ctech.com.tr +60704 + FIRMATECH, INC + Paula Castillo + pcastillo&firmatech.io +60705 + Jessica Canas + Jessica Canas + Jcanasvalladares2&icloud.com +60706 + Keys Federal Credit Union + Ondrej Patrovic + ondrej.patrovic&keysfcu.org +60707 + Granite School District + Ryan B. Cooley + rcooley&graniteschools.org +60708 + Ivan Stepanov + Ivan Stepanov + ivan&stepanov.one +60709 + Anhui Xike Electronic Technology Co., Ltd. + Zhang Kuan + zhangkuan&cnonas.com +60710 + StarCharge + Billy Huang + qianquan.huang&wbstar.com +60711 + ZEKO.ME + Djordje Zekovic + dj&zeko.me +60712 + Highland Health Systems + Brandon Thrift + bthrift&hhsal.org +60713 + Mestobo + Robert Lichtenberger + r.lichtenberger&gmail.com +60714 + Panda Automatic + Maria Velichko + support&ipanda.pro +60715 + Cboe Global Markets + OIDAdmin + OID.Admin&cboe.com +60716 + Christian Wurm + Christian Wurm + info&wurm.biz +60717 + Stadler Rail AG + Christoph Kling + christoph.kling&stadlerrail.com +60718 + Interfase S.A. + Infraestructura + infraestructura&interfase.uy +60719 + Aapeli Vuorinen + Aapeli Vuorinen + iana-pen&aapelivuorinen.com +60720 + Schule Moehlin + Stefan Ries + informatik&moehlin.ch +60721 + Dohsnow Enterprises + Dylan Eden + dylan&dohsnow.com +60722 + Kantonsspital Baselland + Philipp Gerschwiler + philipp.gerschwiler&ksbl.ch +60723 + Zuidberg B.V. + Jan Wouter ten Napel + alex&ag-its.nl +60724 + Torus Inc + Trenton Bond + trent&torus.co +60725 + VAD Industrial Communication Technology Co., Ltd + Xie Gang + xiegang&vadsys.net +60726 + LeLu Berlin GmbH + Thomas Meier-Bading + info&bea-to-email.de +60727 + Dipl.-Ing. Martin Danjes GmbH + Frank Waermer + waermer&danjes.de +60728 + Galldris + Mahdi Chowdhury + it&galldris.co.uk +60729 + Krei.se + Richard Wachler + iana&krei.se +60730 + Georg Roth Stiftung & Co. Lebensmittelfilialbetrieb KG + Erik Schmeling + e.schmeling&norma-online.de +60731 + RationalCore LLC + Guillermo Simanavicius + simanavicius&rationalcore.com +60732 + Sigenergy + Coy + liuhongliang&sigenpower.com +60733 + Hanwha Solutions Corporation, SW development center + Youngwoong Lee + youngwoong.lee&qcells.com +60734 + Callisto Inc. + OSAKI Keishiro + keishiro&callisto-ai.com +60735 + Procono, S.A. + Juan Francisco Fernández Terán + fernandezteran&ptvtelecom.com +60736 + Curtis Vaughn Thompson © The Edinburgh Originale -O.E. ™ + Curtis Thompson + original-edinburgh&outlook.com +60737 + Mennonite Benevolent Society + Adaptive Technical Inc. + certmgr&beadaptive.ca +60738 + Meredith-Webb Printing Co + Rick James + MWIT&meredithwebb.com +60739 + Smart Gauge + Luiz Mariano Bertissolo Júnior + iana&smartgauge.com.br +60740 + Eugen Wintersberger + Eugen Wintersberger + eugen.wintersberger&posteo.de +60741 + Optictimes + Alan Zhang + alanzhang&optictimes.com +60742 + Savino Digital Trust Sl + Nicola Savino + conservazione&savinosolution.com +60743 + rcp + Department Bodegraven + agroenewegen&unica.nl +60744 + Federale Overheidsdienst Beleid en Ondersteuning - Service Public Fédéral Stratégie et Appui + David Mampaey + david.mampaey&bosa.fgov.be +60745 + Kontron Europe + Christian Hofmann + christian.hofmann&kontron.com +60746 + ESY SUNHOME CO.,LTD + CHUQIAO ZHAO + MARKETING&ESYSUNHOME.COM +60747 + 63 Network + Douelas Isera + oid&63.network +60748 + Senior:InnenEinrichtungen der Hansestadt Lübeck + Daniel von Walsleben + walsleben&aph-luebeck.de +60749 + Inverso GmbH + Juri Zirnsak + admins&inverso.de +60750 + HEMATOLOGY ONCOLOGY ASSOCIATES OF THE PALM BEACHES + Sue Santiago + ssantiago&hoapb.com +60751 + 3forge + Mir Ahmed + mir.ahmed&3forge.com +60752 + Michael Duggan + Michael Duggan + md5i&md5i.com +60753 + Parliament of Western Australia + Healey Merritt + hmerritt&parliament.wa.gov.au +60754 + Müller Frauenfeld AG + WAGNER AG + TM-System&wagner.ch +60755 + E&B Information communications. Co., Ltd. + Hansuk Baek + hansuk100&enbtele.co.kr +60756 + Hemag Nova AG + Hemag Nova IT Services + iana.it&hemagnova.ch +60757 + Authentic Vision GmbH + Infrastructure Administrator + admin&authenticvision.com +60758 + Airborn, Inc + Nathan Groover + groovern&airborn.com +60759 + Manuel Zelt + Manuel Zelt + manuel.zelt&naiers.de +60760 + Group Administrators, Ltd. + Anthony Willard + awillard&groupadministrators.com +60761 + TEMS GmbH + Philip Berger + office&tems.at +60762 + 780 Software, Inc. + Michael MacKinnon + 780software&pm.me +60763 + SnapsInAZfs + Brandon Thetford + numbers&snapsinazfs.com +60764 + Alioth Systems Limited + Erin Shepherd + alioth-pen&erinshepherd.net +60765 + Metropolitan Police + Rob Paddock + robert.paddock&met.police.uk +60766 + SICHUAN HUACUN ZHIGU TECHNOLOGY CO.,LTD + Dai Ran + dairan&tgstor.cn +60767 + PEN02 - Centro de Tecnologia da Informação e Comunicação do Estado do Rio Grande do Sul S.A. + Daniel Soares de Oliveira + daniel-oliveira&procergs.rs.gov.br +60768 + P&G + Matt Rinesmith + rinesmith.m&pg.com +60769 + AASeq + AASeq + aaseq&aaseq.com +60770 + HANDL Technology LLC + Eric Hoyt + admin&handltechnology.com +60771 + Triorail Bahnfunk GmbH + Maximilian Stanzl + iana&triorail.com +60772 + School District of Milton + Neil Lubke + lubken&milton.k12.wi.us +60773 + All4Conn Tecnologia + Luiz Mariano Bertissolo Júnior + mariano&all4conn.com.br +60774 + Woven by Toyota, Inc. + PKI Admin + pki-admin&woven.toyota +60775 + Inspur Computer Technology Co.,Ltd + Guolei Zhang + zhangguolei&inspur.com +60776 + NEOROS LLC + Leonid Kolpachev + lk&neoros.ru +60777 + OneCloud + Sumner Robinson + srobinson&telware.com +60778 + XCoreSec + Matej Srebre + srebre&xcoresec.com +60779 + FxPro + Sergey Rusak + system-admins&fxpro.com +60780 + Inditex + Luis Amadeo Fernandes de Sousa + luisafso&inditex.com +60781 + Pilbara Minerals Ltd + IT Admin + itadmin&pilbaraminerals.com.au +60782 + LucciTech + chenqiujie + chenqiujie&luccitech.com +60783 + Sirius, Ltd + Farhulin Sergej Muhametovich + FSM&sirius.tel +60784 + Rakworx Inc. + Jim Chu + jchu&rakworx.com +60785 + Marius Christ + Marius Christ + iana-pen&cyberpirate.eu +60786 + TrueNorth Medical Physics + Robert Hayward + rhayward&truenorthmedphys.com +60787 + Benny Zhou + Benny Zhou + benny_zhou2005&hotmail.com +60788 + Palomar Health + Lynne Deberry + lynne.deberry&palomarhealth.org +60789 + Ejtv + UGWU EJIKE JOSEPHAT + ugwuejikejosephat&gmail.com +60790 + VTI Corp. + Ba, Nguyen Van + banv&vticorp.co +60791 + Istarska kreditna banka Umag d.d. + Danko Brajković + dns&ikb.hr +60792 + Majava Consulting oy + Taavi Väänänen + taavi&majava.org +60793 + Gummi-Welz GmbH & Co. KG + Ingo Naumann + itservice&gummi-welz.de +60794 + Panzer Security Consulting Inc + Jacob Ideji + jideji&e-panzer.com +60795 + Sigdata + Julien Folly + julien.folly&sigdata.com +60796 + LOTTO Hessen GmbH + Dr. Uwe Kreibaum + uwe.kreibaum&lotto-hessen.de +60797 + Entrust Corporation + Ken Fischer + Ken.Fischer&entrust.com +60798 + Matej Srebre + Matej Srebre + matej&srebre.io +60799 + VK Tech LLC + Sergei Setin + s.setin&vk.team +60800 + Gemeinnützige und Hilfs-Gesellschaft der Stadt St.Gallen (GHG) + Managed Backend + tm-system&wagner.ch +60801 + Exro Technologies Inc. + Aida Afshar Nia + aafsharnia&exro.com +60802 + NebulaMatrix Technology Ltd. (Zhuhai) + Xuefeng Han + xuefeng.han&nebula-matrix.com +60803 + Exaion + Jacques DEMARE + jacques.demare&exaion.com +60804 + Kevin Thompson + Kevin Thompson + phlux&ewnix.net +60805 + BSST G + Mateusz Maćczak + macczakmateusz2&gmail.com +60806 + Arizona Regional Multiple Listing Services, Inc + April Parsons + aparsons&themarkt.com +60807 + PocketSign Inc. + Kazuki Sawada + hostmaster&pocketsign.co.jp +60808 + Gabriele Giulimondi + Gabriele Giulimondi + giulimondi&gmail.com +60809 + ACE-MULTIPASS + Jeremie Acemyan + contact.acemultipass&gmail.com +60810 + PT. Tata Sarana Mandiri + Jimmy Lim + jimmy.lin&tsmid.com +60811 + Kevin Ratliff + Kevin Ratliff + techkratliff&gmail.com +60812 + PR CANCER SPECIALISTS + Maria E Lugo + mlugo&prcancerspecialists.com +60813 + Shenzhen Kaifa Technology (Chengdu) Co., Ltd. + Mingxin Xie + mingxinxie&kaifa.cn +60814 + G. Siempelkamp GmbH & Co. KG + Tim Heim + tim.heim&siempelkamp.com +60815 + JEDEC Solid State Technology Association + Emily Desjardins + emilyd&jedec.org +60816 + Envent Engineering Ltd. + Randy Clarke + randy.clarke&enventengineering.com +60817 + API-OI + MAISON Flavien + flavien.maison&api-oi.re +60818 + Riccardo Bella + Riccardo Bella + IANA&riccardobella.com +60819 + Shenzhen NebulaMatrix Technology Ltd. + Xuefeng Han + xuefeng.han&nebula-matrix.com +60820 + Rasmus Tunfalk + Rasmus Tunfalk + rasmus.tunfalk&bahnhof.se +60821 + Savant Technologies, Inc. + Paul Smith + paul.smith&savant.com +60822 + tnmember + teerawat poochangthong + tnadmin&member.in.th +60823 + Greenland Television + Jonas Tetzschner + jte>v.gl +60824 + APS Networks + Alexander Jeffries + alexander.jeffries&aps-networks.com +60825 + Eye Care of San Diego + Robert Smith + roberts&eyecareofsandiego.com +60826 + Handbuch Experten GmbH + IT Admin + iana&handbuch-experten.de +60827 + AxiomDirekt + Oliver Conrad + AxiomDirekt&web.de +60828 + FIZ Karlsruhe – Leibniz-Institut für Informationsinfrastruktur GmbH + Stefan Balduf + desktopservices&fiz-karlsruhe.de +60829 + CARD CENTRIC LIMITED + TOLGA KISLAL + tkislal&cardcentric.com +60830 + NEOSCLOUD, LLC + Elvis Espinal + ldap-oid-contact&neosmail.com +60831 + Hou Zhen Vision + yiting zhang + 142857uk&gmail.com +60832 + GELSEN-NET Kommunikationsgesellschaft mbH + Meik Kaminski + nic&gelsen-net.de +60833 + Landesamt für Vermessung und Geobasisinformation + Carsten Bastian + carsten.bastian&vermkv.rlp.de +60834 + NetFoundry, Inc. + Steven Broderick Elias + domain-admin&netfoundry.io +60835 + Vaticle Ltd + Benjamin Small + ben&vaticle.com +60836 + OpenZiti.io + Steven Broderick Elias + developers&openziti.org +60837 + A5G Networks, Inc. + Ravi Nathwani + rnathwani&a5gnet.com +60838 + DEERFIELD MANAGEMENT COMPANY, L.P. + George Lytle + glytle&deerfield.com +60839 + Tidalis (parked) + Arno Roefs + arno.roefs&tidalis.com +60840 + Leber + Anton Gubin + gubin&leber.ru +60841 + Mosoblstroy + Alexsandr Dzhezhulya + dzhuzzepe&gmail.com +60842 + Shenzhen WOOSH Innovations Co., Ltd. + Zhengjun Zhang + webmaster&wooshrobot.com +60843 + Aniekan Ankoh + Aniekan Ankoh + ani4xstjejud&gmail.com +60844 + Robin's Media Team + Jeffrey Blinksma + jblinksma&home.robinsmediateam.dev +60845 + Stadtwerke Hildesheim AG + Leitung Informationsmanagement + iana-registration&swhi.de +60846 + Grand Royal Chyld + Mduduzi Maleka + KAKAZUYA27&GMAIL.COM +60847 + RaptureWerks + Jason Foley + jtf195&rapturewerks.net +60848 + IDFACTORS, Inc. + Thomas Corder + tcorder&idfactors.com +60849 + TL Certification Centre Co., Ltd + Sun Shengnan + 382331327&qq.com +60850 + Midwest Regional Health Services + Shelley Henderson + shenderson&mrhsomaha.com +60851 + Airone s.r.l. + Luca Menegazzo + luca.menegazzo&airone-sistemi.com +60852 + Ubitron Foundation + Zeev Kotzer + zeev&ubitron.com +60853 + ISCL GmbH + Nicola Wilhelmi + support&iscl.de +60854 + Steven Cutright + Steven Cutright + steve.cutright&icloud.com +60855 + pfish zone + Paul Fisher + paul&pfish.zone +60856 + Show Config + Burak Batuhan Bensoy + batuhan&bensoy.com +60857 + Crisham + Charles Crisham + charles&crisham.us +60858 + Ambit Sentry + Mark Jeffery + mjeffery&ambitsentry.co.za +60859 + Octo Halsema + Octo Halsema + support&omg-mediaproducties.nl +60860 + GE Aerospace + Greg Friedman + gregory.friedman&ge.com +60861 + Petr Novák + Petr Novák + petrn&me.com +60862 + TiGHT AV + Jens Nilsson + jens.nilsson&tightav.com +60863 + Shenyang Zhitong Intelligent Device Technology LLC + Wang Qing + wangqing&wdfts.com +60864 + TU Wien Informatics Infrastructure + Infra Team + team&inf2.tuwien.ac.at +60865 + CryptCard + Melvin Wong + melvin&cryptcard.cc +60866 + Yunke China Information Technology Limited + FENGMINGYUE + Kevin0801&126.com +60867 + Cyshield + Khaled Taher + khaled.taher&cyshield.com +60868 + Berufsgenossenschaft Holz und Metall (BGHM) + Etienne Fornoff + etienne.fornoff&bghm.de +60869 + UnityHPC + Hakan Saplakoglu + hakansaplakog&gmail.com +60870 + PHMG Oncology Escondido + Lynne Deberry + lynne.deberry&palomarhealth.org +60871 + Baka Network + Qiran Wang + qiran.wang&baka.network +60872 + Veterinærinstituttet + Nina Ystehede + Nina.Ystehede&vetinst.no +60873 + illumin Inc + Thomas Sampson + thomas.sampson&illumin.com +60874 + MI Technical Solutions + Charles Fackler + charles.fackler&mitechnicalsolutions.com +60875 + Larner College of Medicine at UVM + David Towle-Hilt + david.towle-hilt&med.uvm.edu +60876 + Redgtech Automação + Douglas Silva + douglas.silva&redgtech.com.br +60877 + Ztamp Pte. Ltd. + Melvin Wong + melvin&ztamp.io +60878 + Tmonet Corp + Seungyeon Park + sypark&t-monet.com +60879 + UNICLOUD TECH CO., LTD. + ShunwanCai + caishunwan&unicloud.com +60880 + LUGOS + Laurent DURU + laurent.duru&lugos.fr +60881 + Sergey Dashanov + Sergey Dashanov + dashanovsd&gmail.com +60882 + Emeres Inc. + Byungkook Kim + byungkook.kim&emeres.com +60883 + RiPSIM Technologies Inc + Ricardo Cavero + ricardo.cavero&ripsim.com +60884 + Mobile Frontiers LLC + Sri Vedurumudi + sri&mobilefrontiers.com +60885 + Impression Signatures + Verusha Appalsamy + verushaa&impression.cloud +60886 + Frogi Secure + Philippe BOSMANS + p.bosmans&frogi-secure.com +60887 + Galsie + Liwaa Al Kontar + liwaa&galsie.com +60888 + DIMITRIB NETWORKS + Dimitri Bakoapnos + dimitri&dimitrib.ca +60889 + Zenithtel Technology + Frank Zhang + frank.zhang&zenithtel.com +60890 + Ryll + Axel Ryll + aryll&web.de +60891 + Frankyd's World + Francois Deschenes + frankyd&nbnet.nb.ca +60892 + Glean Corporation + Hisashi Ohtsuji + hisashi.ohtsuji&glean-japan.com +60893 + Nant Networks LTD + Nathan Griffiths + nathan.griffiths&nantnetworks.com +60894 + Integrated Foot & Ankle Specialists of NJ - Paul S. Demarco DPM + Brittney Majnerick + bmajnerick&integratedfootandankle.com +60895 + 四川虹信软件股份有限公司 (Sichuan Hongxin Software Co., Ltd.) + 方桂友 (Fang Guiyou) + fangguiyou&schkzy.cn +60896 + 041专属 (041 Zhuanshu) + 古馨月 (Gu Xinyue) + mynameis041&vip.qq.com +60897 + SCMS Manager + Brian Romansky + brian.romansky&scmsmanager.org +60898 + John Malengrio + John Malengrio + mail&malengrio.me +60899 + Carl Zeiss AG + Roman Anastasini + identity&zeiss.com +60900 + Anchor Security, Inc. + Ben Burkert + benburkert&anchor.dev +60901 + MIND TECH INTERNATIONAL LIMITED + Issac Deng + issac.deng&mindtech.com.hk +60902 + Raum4 GmbH + Andreas Erhard + office&qedv.at +60903 + Rail Services International Austria GmbH + Clemens Schuster + cschuster&railsi.at +60904 + G-Wave B.V. + Fouad Badawi + fouad&g-wave.nl +60905 + Kristian Covic + Kristian Covic + kristian&krisnet.de +60906 + 中电云计算技术有限公司 (China Electronics Cloud Computing Technology Co., Ltd.) + 饶小松 (Rao Xiaosong) + raoxiaosong&cestc.cn +60907 + Kopparfors Skogar AB + Anders Norlin + anders.norlin&kopparfors.se +60908 + Szpital Uniwersytecki w Krakowie + Jarosław Malina + jarmal&su.krakow.pl +60909 + Kaspars Rocans + Kaspars Rocans + kaspars&warlock.lv +60910 + Jaycee James + Jaycee James + drops_timber.0g&icloud.com +60911 + ESFA + Alireza Mohammadi + alireza17010&gmail.com +60912 + Tower Extrusions + Robert Marzett + register&towerextrusion.com +60913 + Tiandy Technologies CO.,LTD + Liu Yan + liuyan&tiandy.com +60914 + Sparky Wits + Alexis Fidalgo + alzrck&gmail.com +60915 + Innovate solutions + Alexander Kalinkin + info&innovate-s.ru +60916 + Universal Robots + Jason Petursson Johannesen + itinfrastructure&universal-robots.com +60917 + Precision Optical Technologies + William Smith + william.smith&precisionot.com +60918 + FineMEDIA + Wojciech Wrona + wojciech.wrona&finemedia.pl +60919 + Pathomation + Luis Costa + luis&pathomation.com +60920 + Symbiosis ICT Solutions + Rafsanul Hasan + rafsanulhasan&outlook.com +60921 + Shenzhen Cudy Technology Co., Ltd. + Dylan Liao + dylan&cudy.com +60922 + Computer Telephone Integration Pty Ltd + Warren Simon + warren&ctipl.com.au +60923 + Yandlink Intelligent (Suzhou) Technology + Jay Zhou + jay.zhou&yandlink.com +60924 + Gemeente Súdwest-Fryslân + Alex Goris + a.goris&sudwestfryslan.nl +60925 + Rafsanul Hasan + Rafsanul Hasan + RAFSANULHASAN&OUTLOOK.COM +60926 + Streams Tech Limited + Rafsanul Hasan + rafsanul.hasan&streamstech.com +60927 + IQVIA + Rafsanul Hasan + rafsanul.hasan&iqvia.com +60928 + JDS Jerzy Drożdż + Jerzy Drożdż + tofik&jdsieci.pl +60929 + Geographic Data Dynamics LLC. + Christopher Hanni + channi&geographicdatadynamics.com +60930 + Hukseflux Thermal Sensors + Anne Sybesma + quality&hukseflux.com +60931 + Bialystok University of Technology + Marcin Kielbowicz + uci&pb.edu.pl +60932 + Seamcom GmbH & Co. KG + Marco Alexander Reinke + m.reinke&seamcom.de +60933 + Juehee Dawson + Juehee Dawson + jdaws087&uottawa.ca +60934 + OSS Health + Tricia Wolf + twolf&osshealth.com +60935 + Utech + Michael Utech + michael&utech.de +60936 + Vipaks + Ltd + ALEKSEY BAYDAROV + baydarov&domination.one +60937 + SPINELCo.,Ltd + Randy Kim + spinelcorp&naver.com +60938 + Unassigned + --Returned 2023-10-04-- + --None-- +60939 + Ceridwen Limited + Matthew Dovey + director&ceridwen.com +60940 + Mr. Hamel + Ryan Hamel + ryan&rkhtech.org +60941 + Motorcomm Electronic Technology Co., Ltd. + Chen Han + it_support&motor-comm.com +60942 + Fives Cinetic Corp. + Andrew Boyea + andy.boyea&fivesgroup.com +60943 + GL Computing + Mike Lazarus + iana&GLComputing.com.au +60944 + Ken Pollock Auto Group + Joshua Cummings + jcummings&kpautogroup.com +60945 + gang sun + gang sun + 13699247050&163.com +60946 + KLG Smartec + Leo Chen + leo.chen&klgsmartec.com +60947 + Conrad Kite + Conrad Kite + conrad.kite&gmail.com +60948 + Zarrin Mesbah Smart Technology + Ata Khajeh Amiri + atakhajehamirihagh&gmail.com +60949 + Xolile Mokoena + Xolile Mokoena + xolilentulin&gmail.com +60950 + Wagenborg Shipping BV + Alex Goris + alex&ag-its.nl +60951 + feltenpersonal GmbH + Danny Vogelhofer + danny.vogelhofer&felten.de +60952 + Versta + Alexander Miller + amiller&versta.tech +60953 + XenServer, a Business Unit of the Cloud Software Group + Matthew Allen + snmp&xenserver.com +60954 + SecuMailer + Meint Post + meint&secumailer.com +60955 + What-IT + Przemysław Klimkowski + biuro&what-it.pl +60956 + colpari + Frank Fricke + it&colpari.cx +60957 + ITIC Paris + Adnan RIHAN + adnan&rihan.fr +60958 + Diputación Provincial de Cádiz + Marta Álvarez-Requejo Pérez + secretaria.general&dipucadiz.es +60959 + Horner Technology Ltd + William Horner + william&horner.technology +60960 + NetCam Systems Corporation + Kazuyuki Sugihara + ncam-contact&netcam.co.jp +60961 + SentryWire + Steven Richards + steven.richards&sentrywire.com +60962 + zshield + zshield + gsun&zshield.net +60963 + Song GUO + Song GUO + kaku&matsu.dev +60964 + Birmingham Metropolitan College + Ian Reynolds + ireynolds2&bmet.ac.uk +60965 + MHR Soluções + Renan Daniel Belice + renan&mhrsolucoes.com.br +60966 + Joseph Bunce + Joseph Bunce + oshawk&protonmail.com +60967 + Shenzhen ACwatt Power Co., Ltd. + Martin mei + 952403875&qq.com +60968 + JingTsing Technology Ltd + Shi Mengtao + mengtao.shi&jingtsing.com +60969 + VestelKom A.S. + KORAY ATALAY + koray.atalay&vestel.com.tr +60970 + Fondation Butini + Bechtle Suisse SA + servicedesk.suisseromande&bechtle.com +60971 + Access Information Management + Andrew Stanton + iana-pen&accesscorp.com +60972 + Powered By Citizen + Keyth Mark Citizen + poweredbycitizen&gmail.com +60973 + Andreas Niedermair + Administrator + me&a76a6a65.nexus +60974 + FirstDMT + Adriaan Liebenberg + adriaan&firstdmt.com +60975 + Boyang Han + Boyang Han + yqszxx&gmail.com +60976 + MADEIT Inc + Christoph Schmolmueller + csm&madeit.com +60977 + Shalabh Soni + Shalabh Soni + shalabh.soni&ericsson.com +60978 + LambdaSystems Inc. + Tadahito Fukushima + fukusima&lambda.co.jp +60979 + SolarisBus&Coach Sp. z o.o. + Jaroslaw Gorzynski + technical.team&solarisbus.com +60980 + C-Labs Srl + Paolo Zebelloni + p.zebelloni&c-labs-wt.com +60981 + Anker Innovations Limited + Justin Zhou + Justin.zhou&anker-in.com +60982 + IGRA, S.A. + IGRA, S.A. + Adolfog&icazalaw.com +60983 + Mike Bondzio + Mike Bondzio + mike.bondzio&ggg-print.de +60984 + Cedar County Memorial Hospital + Jake Norman + IT&cedarcomem.com +60985 + Komcept Solutions + Komcept Solutions + support&komcept.com +60986 + RadiTools + Ma Yuankuo + mayk&raditools.com +60987 + TOWER BERSAMA INFRASTRUCTURE GROUP + TARMUDI TARMUDI + tbg.license&tower-bersama.com +60988 + CDW Canada + MS Infrastructure Services + ms.infrastructureservices&cdw.ca +60989 + VanDeVosse.com ICT Services + Erik van de Vosse + info&vandevosse.com +60990 + Lebenshilfe Bonn e.V. + Marco Schmidt + it-support&lebenshilfe-bonn.de +60991 + Stonewater + Peter Novakovic + peter.novakovic&stonewater.org +60992 + Goldistile + Farshid Golkarihagh + Fg7281&gmail.com +60993 + GSP Cloud Philippines + Kevin San Jose + kmsj13&gmail.com +60994 + Lamprecht Transport AG + Mike Haussener + informatik&lamprecht.ch +60995 + Pride Unbound Limited + IT Administrator + it&prideunbound.com +60996 + NHS South West London ICB + Nav Patel + nav.patel&swlondon.nhs.uk +60997 + IntelGenx Corp + Paul Freitas + paul.freitas&intelgenx.com +60998 + weird-web-workers.org + Georg Hopp + georg&steffers.org +60999 + CosmoPolitical Cooperative SCE + Laurent ZIBELL + laurent.zibell&kuneagi.org +61000 + Logikascium + Michael LAUNAY + michaellaunay&logikascium.com +61001 + BigO + Seongyun Ham + syham&bigoh.co.kr +61002 + ELGO Batscale AG + Urs Wettstein + urs.wettstein&elgo.li +61003 + OpenThreat + Razvan-Ioan Petrescu + razvan&openthreat.ro +61004 + mCloud Networx, Inc. + Richard Sfeir + richard&mcloudnetworx.com +61005 + Landratsamt Rosenheim + Jakob Stögmüller + iuk&lra-rosenheim.de +61006 + ŠKODA ELECTRIC a.s. + Vladimír Toncar + vladimir.toncar&skodagroup.com +61007 + Apice Sistemas de Energia Ltda + Luiz Gustavo Fernandes + lgustavo&apice.com.br +61008 + Fplus + Ilya Khomyakov + i.khomyakov&fplustech.ru +61009 + Stadtwerke Schweinfurt GmbH + Markus Dotzel + it-service&stadtwerke-sw.de +61010 + RuitingTech + casey chao + casey&ruitingtech.com.tw +61011 + ITG System + MINGPEI LU + robertlu&itgsystem.com.tw +61012 + REIGN Technology Corporation + Carol Kao + Carol_Kao&reignnet.com +61013 + platynum + Heiko Ronsdorf + heiko.ronsdorf+iana&platynum.ch +61014 + Chippewa Valley Technical College + Zack Shore + noc&cvtc.edu +61015 + InnoTrust Corporation + Laker LIN + laker.lin&innotrust.com.tw +61016 + Fibergate Inc. + Yasunori Kitamura + product_jp&fibergate.co.jp +61017 + FONDATION DES IMMEUBLES POUR LES ORGANISATIONS INTERNATIONALES + Philippe LEROY + p.leroy&fipoi.ch +61018 + Open Administration UG (haftungsbeschränkt) + Lukas Staab + lukas&open-administration.de +61019 + LUXGEN Motor Co., Ltd. + Isaac Li + it&luxgen.com.tw +61020 + Toronto East Health Network + Zaidalkilani, Mohammad + Mohammad.Zaidalkilani&tehn.ca +61021 + P.B. Elettronica srl + Mara Bottale + mara.bottale&pbelettronica.it +61022 + Alf Ringkowski + Alf Ringkowski + alf&ringkowski.de +61023 + Fulton-Montgomery Community College + John D. Koch + hostmaster&fmcc.edu +61024 + GlobalBases.com GmbH + Tibor Barna + info&globalbases.com +61025 + Avid Systems + David Verba + david.verba&avidsys.com +61026 + BRAINSAIT LTD + Mohamed Elfadil Abuagla + dr.mf.122986&icloud.com +61027 + Pannasastra University of Cambodia, Siem Reap Campus + Jeffrey Stark + stark.jeffrey&pucsr.edu.kh +61028 + Omega Power + Mussard Patrice + direction&omegapower.nc +61029 + msg mySaveID GmbH + Karsten Treiber + karsten.treiber&mySaveID.de +61030 + Antik Technology + Jozef Matyas + j.matyas&antik.sk +61031 + PyCERR Beta + Eve LoCastro + locastre&mskcc.org +61032 + Oltiva Hong Kong Limited + Maker He + maker.xiping.he&avanade.com +61033 + Energie Service Biel/Bienne + Patrick Messerli + informatik&esb.ch +61034 + EDEKA Südbayern Handels Stiftung & Co. KG + Daniel Schulze-Schwering + oid.admin.sb&edeka.de +61035 + Hotel President SA + John Williams + admin&hotelpresident.ch +61036 + SEE Critical Comms + Yann Morel-Chevillet + y.morel&see.fr +61037 + Alzheimer's Research UK + Iain Andrews + OID&alzheimersresearchuk.org +61038 + Edgeway Vision B.V. + Mark Roberts + support&edgeway.io +61039 + vlabs8.com + Vitlii Shu + shumius&gmail.com +61040 + Ocala Family Medical Center, Inc. + Tom Mentzer + tmentzer&ocalafmc.com +61041 + Modular, Inc. + Michael Edwards + operations&modular.com +61042 + Secure Bits + Lachlan Spencer + LSpencer&securebits.com.au +61043 + Clinical Research Services Management GmbH CRS + Michael Gölz + michael.goelz&crs-group.de +61044 + Persist Technologies + Geoffrey Hill + geoff&geoffhill.org +61045 + TdE-CTIC + IGNACIO GARCIA SANZ + ignacio.garciasanz&telefonica.com +61046 + Kernelkit + Tobias Waldekranz + tobias&waldekranz.com +61047 + Zscaler Information Security + Loren Weith + lweith&zscaler.com +61048 + QFence IT security Zrt. + Mark Kolovics + kolovics.mark&qfence.hu +61049 + JOKER TELEVISION SL + IGNACIO LAZARO FERNANDEZ + ignacio.lazaro&jokertv.eu +61050 + Lindner IT + Robin Lindner + robin.lindner&lindnerit.io +61051 + Pierrot + Pierrot + peter.aram&cloudcafe.net.au +61052 + Maclinker Intelligence Information + 张铭宇(Zhang Ming Yu) + zmy&maclinker.com +61053 + TePS'EG Inc. + Kang, Tae Hyeok + help&tepseg.com +61054 + KREBS+KIEFER + Nicolas Klaffke + support&kuk.de +61055 + ATAYA + William Huang + william&ataya.io +61056 + Tianjin Optoelectronic Group Xin An Advanced Technology (Jiangsu) Co., LTD + Chengyu Zhu + zcy&js754.cn +61057 + Infraknit Technologies Private Limited + Shrikant Khanduri + shrikantkhanduri&infraknit.com +61058 + J. Wagner GmbH + Wolfgang Thanner + wolfgang.thanner&wagner-group.com +61059 + esko-systems GmbH & Co. KG + Hüseyin Esnemez + hueseyin.esnemez&esko-systems.de +61060 + Bayvrio, Inc + Chief Product Officer + alex&bayvrio.com +61061 + Echo Global Logistics + Tom Semeniuk + Tom.Semeniuk&echo.com +61062 + Object First + Eric Schott + oid.admin&objectfirst.com +61063 + Axalon GmbH + Alexander Plate + Alexander.plate&axalon.ch +61064 + Atielo Networks + Marcelo Spohn + atielo&atielo.com +61065 + Suzhou Bytewatt Technology Co., Ltd. + albert.liang + albert.liang&byte-watt.com +61066 + DataRemote Inc. + Antonio Serrano + aserrano&dataremote.com +61067 + Andreas Neuf + Andreas Neuf + andreas.neuf&web.de +61068 + Elektrokem Ltd. + Siniša Grabovac + sinisa.grabovac&elektrokem.hr +61069 + ZENTRONICS SOLUTIONS PRIVATE LIMITED + Lintamol Thomas + linta&zentronics.co.in +61070 + Netica Srl + Filippo Tonellotto + filippo.tonellotto&netica.it +61071 + Kindred People AB + Oleksii Kondratiev + Oleksii.Kondratiev&kindredgroup.com +61072 + Systeme Electric + Valentin Kozlov + valentin.kozlov&systeme.ru +61073 + National Environmental Emergencies Centre + Vincent Boulianne + vincent.boulianne&ec.gc.ca +61074 + Insight Media Internet Limited + Ian Cudlip + ian&insight-media.co.uk +61075 + Chandler Police Department + Aaron Jones + aaron.jones&chandleraz.gov +61076 + Zeroed.tech + Adrian + Adrian&zeroed.tech +61077 + xswg + Valentin Kulesh + leshy&xswg.ru +61078 + Traceable AI + Sudeep Padiyar + sudeep&traceable.ai +61079 + RUAG AG + Yves Tanner + twi&ruag.ch +61080 + Lemon Network Technology Co.,Ltd + Bingle.dai + bingle.dai&magicwifi.com.cn +61081 + DsNetwork + Lucas Dousse + noc&dsnetwork.ch +61082 + Klavis Kripta + Product Klavis + product&klaviskripta.com +61083 + Marcel Metzen + Marcel Metzen + marcel&metzen.us +61084 + Cleverbase + Sander Dijkhuis + sander.dijkhuis&cleverbase.com +61085 + Terra Sound + Jeremy Karst + j.karst&terrasound.us +61086 + Chengdu BeiZhongWangXin Technology Co.Ltd + Cheng.Li + cheng.li&bzwx-kj.com +61087 + shenzhen real linkShenzhen Real Link Technology Co.,Ltd + Huang Qingmin + wanzi49&163.com +61088 + Anuvu + Brian Govanlu + brian.govanlu&anuvu.com +61089 + Lighthouse Credit Union + Sean LaBrie + slabrie&necu.org +61090 + PKITNEXT LABS + Klaus Dieter Wolfinger + _office_&pkitnext.de +61091 + DMIM + Jaycee James + bouquet-screen0v&icloud.com +61092 + Hochschule fuer Gesundheit + Markus Fischer + markus.fischer&hs-gesundheit.de +61093 + Airbus CyberSecurity SAS + David DE OLIVEIRA + david.deoliveira&airbus.com +61094 + Seuster KG + Uemit Akdag + u.akdag&seuster.de +61095 + MorningStar Senior Living + Aldrin Colorado + acolorado&effortlessoffice.com +61096 + Muhammad Amjad + Muhammad Amjad + contactamjid&gmail.com +61097 + Fu Xiao + Fu Xiao + fxhello&163.com +61098 + Liljewall Architects + Robin Rosengren + hostmaster&liljewall.se +61099 + ENIT (ECOLE NATIONALE D'INGENIEURS DE TARBES) + Walter Nicot + csn-isr&enit.fr +61100 + Softend + Horst Ender + horst.ender&softend.de +61101 + TEC AND TEC LATAM AMERICA LTDA + Research and Development + rd_e&tecandtec.com.br +61102 + MetriTrack, Inc. + Mirela Wohlford + mirela&metritrack.net +61103 + DataKnights Ltd + Object Identifier Administrator + oid-mgmt&dataknights.eu +61104 + Cosmian Tech SAS + Bruno Grieder + bruno.grieder&cosmian.com +61105 + GenCell Ltd + Gal Netzer + galn&gencellenergy.com +61106 + Christian Hett + Christian Hett + oidnumber2023&c-hett.de +61107 + International Vitamin Corporation, Inc. + Ryan Leamey + ryan.leamey&ivcinc.com +61108 + Telaversum Limited + Ranko Zivojnovic + registry&telaversum.com +61109 + Lance Hart + Lance Hart + me&lancehart.ca +61110 + Isojoen Saha Oy + Heli Juhala + heli.juhala&isojoensaha.fi +61111 + BCNINNOVA + ALBERT QUILIS + albert.quilis&bcninnova.com +61112 + ram electronic GmbH + David Jarkowski + hosting&ram24.net +61113 + Zertificon Solutions GmbH + Adnan Oeztuerk + a.oeztuerk&zertificon.com +61114 + Kevin Roberts NA7KR + Kevin Roberts NA7KR + kevin&na7kr.us +61115 + Defel Technologies Private Limited + Kowshik Ramesh + Kowshik&defel.in +61116 + Amanda's Collectables + Amanda Marie Julich + julichamanda8&gmail.com +61117 + R74n + R74n Role Account + contact&r74n.com +61118 + Simply NUC, Inc. + John Runyon + john&simplynuc.com +61119 + TapahTech + Wei Wei + weiwei&tapahtech.cn +61120 + Kempower + Juha Varkki + juha.varkki&kempower.com +61121 + ChargEye + Juha Varkki + juha.varkki&kempower.com +61122 + Sakarya University of Applied Sciences + Yavuz Selim Bozan + ysbozan&subu.edu.tr +61123 + HEXIN Technologies Co., Ltd + Xu Chu Jiang + xuchu.jiang&shingroup.cn +61124 + Paperless doo Beograd + Jovan Gruber + office&paperless.rs +61125 + "Group of Industrial Technologies", Ltd + Yury Tsivinsky + uriy.cyvinskiy&git-holding.ru +61126 + MRD Rail Technologies + Robert Gersbach + rob&mrd.com.au +61127 + Scenic Rim Regional Council + David Adamson + David.A&scenicrim.qld.gov.au +61128 + AppViewX Inc + Muralidharan Palanisamy + muralidharan.palanisamy&appviewx.com +61129 + MDCC Magdeburg-City-Com GmbH + Niklas Polte + niklas.polte&mdcc.de +61130 + NNet + Marc Collins + marc&nillth.com +61131 + Shenzhen Zhuoxun Optoelectronic Technology Co., LTD + Candy Cui + sales006&exoe.cn +61132 + MTN Group + Ayabonga Ngoma + ayabonga.ngoma&mtn.com +61133 + Reposit Power + Dean Spaccavento + dean&repositpower.com +61134 + Wallbox N.V. + Santiago Maestre + santiago.maestre&wallbox.com +61135 + GTD International + Laure Pauly + laure.pauly>d.eu +61136 + Aeven A/S + Sune Andersen + AevenPKI&aevengroup.com +61137 + Smart'r Solutions + Riazuddin Nazimuddin + Riazuddin&smartr-solutions.com +61138 + Attochron, LLC + Tom Chaffee + tc&attochron.com +61139 + F&F Filipowski + Przemyslaw Sztoch + p.sztoch&fif.com.pl +61140 + RWE Renewables Europe & Australia GmbH + Sanjay Chauhan + sanjay.chauhan&rwe.com +61141 + Efros Defence Operations + Viktor Kolesnikov + kolesnikov-v&datagile.ru +61142 + Kosovo Specialist Chambers + Roeland J.A. Stouthart + IT&scp-ks.org +61143 + STN BANK + Saint Tehdi + chibsleo05&gmail.com +61144 + Shenzhen Xijia Medical Technology Co., Ltd. + Michael Yan + michael.yan&o8t.com +61145 + Steadfast Financial LP + Steadfast IT Department + tech&steadfast.com +61146 + Puzzle ITC GmbH + Reto Kupferschmid + systems&puzzle.ch +61147 + Telefónica IoT & Big Data Tech, S.A.U. + Javier Montero de Blas + iot_swap&telefonica.com +61148 + Indorama Ventures Mobility Obernburg GmbH + Thomas Czegley + thomas.czegley&de.indorama.net +61149 + SevenTrust Zrt. + Peter Dohanyos + peter.dohanyos&seventrust.hu +61150 + MSAmlin Corporate Services Limted + John Swanscott + internetdomains&msamlin.com +61151 + Louisiana State Employees' Retirement System + Kenny Scelfo, Jr. + kscelfo&lasersonline.org +61152 + Lacus Inc. + Lacus Hand + kc&now01.cn +61153 + E-lige Informatica + Gervasio Garcia + gervasiogm&gmail.com +61154 + Universitätsklinikum Carl Gustav Carus an der Technischen Universität Dresden + Christian Kainz + christian.kainz&ukdd.de +61155 + TPCWDRW Team + Li Zemu + 1242410592&qq.com +61156 + vision-batt + David Huang + huangxianwen&vision-batt.com +61157 + Wolfgang Friesenecker + Wolfgang Friesenecker + wolfgang&friesenecker-it.at +61158 + Texas Native Health + Omer Tamir + ceo&texasnativehealth.org +61159 + OCHSNER Wärmepumpen GmbH + Armin Mahmutovic + support-it&ochsner.com +61160 + TUYA SMART + Kevin Liu + shilang.liu&tuya.com +61161 + Stadtentwaesserung Dresden + Olaf Boehm + olaf.boehm&se-dresden.de +61162 + Verana Networks + Shanthakumar Ramakrishnan + shanthakumar.ramakrishnan&verananetworks.com +61163 + heldtweit + Marco Tarsia + marco.tarsia&heldtweit.de +61164 + LuxQuanta Technologies S.L. + Marco Cofano + marco.cofano&luxquanta.com +61165 + Platinum Equity Advisors, LLC + PJ Hasleton + itservices&platinumequity.com +61166 + Tritium Ltd Pty + Mike Olagoroye + molagoroye&tritium.com.au +61167 + Wuhan Tengxin Communication Technology Co., Ltd + Hunt Hu + hunt&tensint.com +61168 + Beijing Broadwit Technology Co.,Ltd + Dai Yongxu + daiyongxu&broadwit.com.cn +61169 + Infrawaves + Wei Chen + chenwei&infrawaves.com +61170 + HAV Group ASA + HAV ICT Department + license&havgroup.no +61171 + South Texas Oncology and Hematology PLLC + Travis Ryals + travis.ryals&thestartcenter.com +61172 + 上海恩阶电子科技有限公司 (Shanghai Enjie Electronic Technology Co., Ltd.) + 丁传超 (Ding Chuanchao) + dingchuanchao&energyborn.com +61173 + UlMeCo AG + Jakob Ulrich Meier + ulrich.meier&ulmeco.ch +61174 + FMH Generalsekretariat + Kreuter Dominik + dominik.kreuter&fmh.ch +61175 + Itti Digital + Cristhian Benitez + cristhian.benitez&itti.digital +61176 + Secure Identity + John Martin + john.martin&clearme.com +61177 + 上海涛影医疗科技有限公司 (Shanghai Taoimage Medical Technology Co., Ltd.) + Zell Zhang + zhangaiping&taoimage.com +61178 + Promotech + Thomas Kastinger + it-support&promotech.at +61179 + Solotech + Ryan Kimpton + ryan.kimpton&solotech.com +61180 + myDid + Anthony Di Stefano + certmaster&mydid.com +61181 + Sibros Technologies Inc + Mahesh Venugopala + mvenugopala&sibros.tech +61182 + Raditek + Thomas King + thomas&technalogix.ca +61183 + Sean Sears + Sean Sears + spamless86&gmail.com +61184 + Corky Reed Lucas + Corky Reed Lucas + corkylucas1&gmail.com +61185 + Qingdao Yuze Intelligent Technology Co., Ltd + LYU, WEIGUO + lvwg&zrjscm.cn +61186 + Mitchell Neal + Mitchell Neal + blueghostprotocolinc&duck.com +61187 + Integrated Concepts + Tim Tavanello + ttavanello&yahoo.com +61188 + FassaBortolo + Massimiliano Beconi + massimiliano.beconi&fassabortolo.it +61189 + Goldplate + Alexandro Setiawan + alexandro&setiawan.id +61190 + Theo Koulayan + Theo Koulayan + theokt62&gmail.com +61191 + James Hackett + James Hackett + james&distrobyte.io +61192 + ncubed B.V. + Jasper Schoenmaker + jasper.schoenmaker&ncubed.nl +61193 + Intrust JSC + Le Van Chuong + chuonglv&intrust.com.vn +61194 + Forbes Computer Systems Ltd + David George Forbes + support&forbes.co.uk +61195 + Bluetti + Wang Xiang + wangx&poweroak.net +61196 + Genius Systems Pvt. Ltd. + Yagya Bahadur Chaudhary + yagya.chaudhary&geniussystems.com.np +61197 + Jiri Kroutil + Jiří Kroutil + kroutil85¢rum.cz +61198 + Sistemas Energéticos SA + Rene Marconi + rmarconi&sistemasenergeticos.com.ar +61199 + Tribunal Regional Eleitoral do Pará + Luiz Eduardo Alves de Alcântara + eduardo.alcantara&tre-pa.jus.br +61200 + Adashi Systems LLC + Jody L Durkacs + jdurkacs&adashisystems.com +61201 + Comsign Europe + Yair Eisenstein + yaire&comda.co.il +61202 + Comsign Europe + Yair Eisenstein + yaire&comda.co.il +61203 + METRO Markets GmbH + Christian Bönning + christian.boenning&metro-markets.de +61204 + ECCOS inzenjering d.o.o. + Zeljko Medic + zeljko.medic&eccos.com.hr +61205 + Nova Labs + Marc Nijdam + marc&nova-labs.com +61206 + MINISTRY OF FOREIGN AND EUROPEAN AFFAIRS + Dalibor Putak + Sistemska.Podrska&mvep.hr +61207 + A-ROSSO Sašo Cuder s.p. + Sašo Cuder + info&a-rosso.si +61208 + FREEDOMPOP MEXICO, S.A. DE C.V. + Jorge Abraham Alvarez Guerrero + j.alvarez&freedompop.mx +61209 + Canonical Identity + Lucas Rockwell + lr&canonicalidentity.com +61210 + ZOWEE TECHNOLOGY(HEYUAN) CO., LTD. + Tao Xiangqian + taoxq&zowee.com.cn +61211 + Erste Bank a.d. Novi Sad + Mario Radović + retail.cards&erstebank.rs +61212 + SOS Tecnologia y Gestion de Informacion Ltda + Christian D'Oliveira + christian&sosdocs.com.py +61213 + SiteB + Mike Salway + oid-admin&siteb.co.uk +61214 + Our Host + Our Host + ourhost&duck.com +61215 + Heavy-Duty Computer Systems ELC + Tomas Tudja + tomas.tudja&h-dcs.com +61216 + ProData + Primož Lukunič + primoz.lukunic&gmail.com +61217 + Atikur Rahman + Atikur Rahman + atikur&linowis.com +61218 + CICT Connected and Intelligent Technologies Co., Ltd + LV NAN + lvnan&cictci.com +61219 + 深圳市桑达无线通讯技术有限公司 (Shenzhen Sangda Wireless Communication Technology Co., Ltd.) + 习志鹏 (Xi Zhipeng) + zpxi&sedwt.com.cn +61220 + HBBLAB + Dick Hofflamut + qaqc&hbblab.fr +61221 + EUKALIN Spezial-Klebstoff Fabrik GmbH + Maik Röhrs + it&eukalin.de +61222 + Business Data Solutions GmbH + Thomas Götzinger + office.pen&bds.info +61223 + Consolis Oy + Riku Luhtisalmi + riku.luhtisalmi&consolis.com +61224 + Entcor-e LTD + Alexey Nikitin + alnikitich&gmail.com +61225 + CGN SA + Ducommun Steve + servicedesk&cgn.ch +61226 + Steve Houle + Steve Houle + steve&famille-houle.quebec +61227 + eDocs Bulgaria Ltd + Alexander Simeonov + simeonov&edocs.bg +61228 + ByteMedia + Andreas Kummer + kummer&bytemedia.ch +61229 + Step Forward Partners Limited + Zsolt Szalacsi + zsolt.szalacsi&stepforward.io +61230 + Никита (Nikita) + Никита Михайлович Чеканов (Nikita Mikhailovich Chekanov) + v&cnikita.ru +61231 + Cortus SAS + Riccardo Brama + riccardo.brama&cortus.com +61232 + liquidstack Limited + Thomas Lau + thomas.lau&liquidstack.com +61233 + PESCOER L.L.C-FZ + Christophe Miegeville + admin&pesco.global +61234 + Cisco Systems Inc + Amol Mukund Khire + akhire&cisco.com +61235 + MATTIOLI MATTEO + MATTIOLI MATTEO + matteo.mattioli&workplacetech.eu +61236 + Altimedia Corp. + Jinsuk Kim + moonfinger&altimedia.com +61237 + Infracontrol AB + Johan Lindqvist + johan.lindqvist&infracontrol.com +61238 + Waterstons Limited + Andrew Quinn + iana&waterstons.com +61239 + Varex Imaging Corporation + Greg Lewin + greg.lewin&vareximaging.com +61240 + AQA Education + Ian Walker + iwalker&aqa.org.uk +61241 + One9 Solutions + James Edington Administrator + james.edington&uah.edu +61242 + RMA Network + Răzvan Temelcea + razvan&rma-net.eu +61243 + Audisi B.V. + Bart Lugtmeier + internet&audisi.nl +61244 + Duratech Industrial Components(Beijing)Co.,Ltd + Luan Chang + qc-001&duratech.com +61245 + States of Jersey + Nathan Lawrence + n.Lawrence&gov.je +61246 + ZERTIBAN S.L.U. + Javier Fernández Gibellini + jfgibellini&zertiban.com +61247 + Medray Imaging Medical Corporation + Johannes Williams + iana-jw&medrayimaging.com +61248 + ESA Elektronische Steuerungs- und Automatisierungs Ges.m.b.H. + Markus Dreiling + it&esa.at +61249 + 1E Limited + Mark Blackburn + markb&1e.com +61250 + E. Breuninger GmbH & Co. + Jan Brückner + admin&breuninger.de +61251 + Ian Spence + Ian Spence + ian&ecnepsnai.com +61252 + Dubious, Inc. + Reef Shafer + boogie&dubious.com +61253 + 广州泓盈信息科技有限公司 (Guangzhou Hongying Information Technology Co., Ltd.) + 张富翔 (Zhang Fuxiang) + 443468404&qq.com +61254 + DCVC Management Co, LLC + DCVC NOC + noc&dcvc.com +61255 + Jonas Hofmann + Jonas Hofmann + jonas&der-nerd.systems +61256 + Carolyn Conroy, infiniteGratitude + Carolyn Conroy + Nbhomes808&gmail.com +61257 + Gendarmerie nationale + Maudoux Christophe + christophe.maudoux&gendarmerie.interieur.gouv.fr +61258 + NIWA ELECTRIC CO.,LTD. + Satoru Yamaguchi + yamaguchi&niwadenki.co.jp +61259 + 浙江恒捷通信科技有限公司 (Zhejiang Hengjie Communication Technology Co., Ltd.) + 李统孝 (Li Tongxiao) + tech&zjhjtx.com +61260 + Mahindra & Mahindra Ltd. + VARMA KAKARLAPUDI ARAVIND + VARMA.KAKARLAPUDIARAVIND&mahindra.com +61261 + Wuhan SmartWin Technology CO., Ltd. + ZhiXing Chen + chenzhixing&whsmartwin.com +61262 + SHANGHAI HUIJUE NETWORK COMMUNICATION EQUIPMENT CO., LTD. + Ke Chao Huang + hkc&hj-net.com +61263 + Stonefield Systems (Europe) Ltd + Russell Burton + russell.burton&stonefield.co.uk +61264 + Police nationale + Maudoux Christophe + christophe.maudoux&gendarmerie.interieur.gouv.fr +61265 + Prime Alliance Aisbl + Pilar Julian + pjulian&ianusgroup.com +61266 + Byron Center Public Schools + Chad Gross + support&bcpsk12.net +61267 + Layer18.com, Inc + Johannes Williams + iana-jw&layer18.com +61268 + NajamTech + Mohammed Rayees + rayees&najamtech.com +61269 + RELIANOID + Laura Garcia Liebana + laura&relianoid.com +61270 + Hoehn Motors + Juan Cruz + juancruz&hoehnmotors.com +61271 + Irdeto USA, Inc. + Dina Azzam + dina.azzam&irdeto.com +61272 + Ringer Mobile + David Aldworth + david.aldworth&ringer.tel +61273 + tablezip + Maxence Caron + maxence.caron&protonmail.com +61274 + CominTech, LLC + German Dudetskii + german&comintech.pro +61275 + CoreTech + CoreTech + info&coretech.se +61276 + Cordes & Graefe KG + Eike Christoph Greth + eikechristoph.greth&gc-gruppe.de +61277 + RazorSecure + RazorSecure Support + support&razorsecure.com +61278 + Fresenius Medical Care AG + Matthias Sturm + matthias.sturm&ext.fresenius.com +61279 + Cinring Networks + Yin Xiaowei + yinxiaowei&cinring.com +61280 + BEIJING SIFANG AUTOMATION CO.,LTD + lijinchao + 1411344328&qq.com +61281 + Shenzhen Cyclone Technology Co., Ltd. + wang qinghua + szuwqh201&126.com +61282 + ILYA BELOSTOTSKIY + ILYA BELOSTOTSKIY + ib&alfa-level.ru +61283 + ZINWELL CORPORATION + William Wu + william.wu&zinwell.com.tw +61284 + Whitewater Express + Brad Ploeger + brad&whitewater.express +61285 + ShenzhenTuSan technology Co., LTD + Andy Yu + tskj2000&tech-freshere.com.cn +61286 + New Look Retailers Ltd + Simon Rowland + uk-it-tsg&newlook.com +61287 + Satya aditya Varma + Satya aditya Varma + adhiivarma99&gmail.com +61288 + Shuangyili (Ningbo) Battery Co., Ltd + Junliang Chen + chenjl5&risenenergy.com +61289 + Beijing Cybercore Technology Co.,Ltd. + Xiaoqiang Li + ben&cybercore.cn +61290 + DYNADVANCE S.R.L. + Catalin Stavaru + support&dynadvance.com +61291 + Ministery Of Defense Saudi Arabia + Ali Aldossari + aaldosari&mod.gov.sa +61292 + CES Corporation + Josh Sparrow + j.sparrow&cescorp.ca +61293 + Mario Vernon + Mario Vernon + kaydenisfree&yahoo.com +61294 + Azbil Corporation + Daisuke Aikawa + d.aikawa.ds&azbil.com +61295 + Submer Technologies SL. + Jonathan Huet + software&submer.com +61296 + 厦门星创易联科技有限公司 (Xiamen Xingchuang Yilian Technology Co., Ltd.) + 罗云峰 (Luo Yunfeng) + fdangle&star-elink.com +61297 + DIGILEO + Mouhssine Sobaii + m.sobaii&digileo.ma +61298 + Uwe Schneider + Uwe Schneider + contact&uschneider-is.de +61299 + LimePoint Pty Ltd + Goran Stankovski + gstankovski&limepoint.com +61300 + Beijinig Shannoncyber Technology Co., Ltd. + Qing Chen + xueren&shannoncyber.com +61301 + Landkreis Vorpommern-Rügen + Frank Friebe + frank.friebe&sva.de +61302 + Damon Nicholas Chong + Damon Nicholas Chong + chongdamon&gmail.com +61303 + Decatur Utilities + John Kuhlman + admin&decaturutilities.com +61304 + Oros Ltd + Levkin Dmitry + d.a.levkin&fiume.ru +61305 + FIRMASEGURA S.A.S. + Serguéi Proaño + sergueip&alquimiasoft.com.ec +61306 + ELA BATHROOM SUPPLY LTD + ELENA MENELAOU + it&m-world.com +61307 + Deutsches Rotes Kreuz Landesverband Rheinland-Pfalz e.V. + Admin ITC + admin&itc.drk.de +61308 + BugProve, Inc. + Attila Szász + security&bugprove.com +61309 + M Travis Schnell + M Travis Schnell + mtschn2&gmail.com +61310 + Clive, Inc. + HyunChul Kim(김현철) + hc.kim&cliveinc.co.kr +61311 + DigiBird + Runqin Xu + xurunqin&digibird.com.cn +61312 + Ingrain SA Pty. Ltd. + IT Registrations + registrations&ingrainsa.com +61313 + Native Wind Pediatric & Family Care + Native Wind Staff + nativewindclinic&pci-nsn.gov +61314 + Colorlight Cloud Tech Ltd + Li LZ + frankli&lednets.com +61315 + VNO Roaming and Carrier Operations + VNO Roaming and Carrier Operations + 3rdline-ivc-operations&vodafone.com +61316 + Abwasserbetrieb TEO AoeR + Bernd Schäfer + bernd.schaefer&abwasserbetrieb-teo.de +61317 + BST POWER (Shenzhen) limited + Crispin Guo + crispin&bstbattery.com +61318 + Jupiter Networks LLC + Joe Smith + admin&jupiterservers.icu +61319 + ZeroSubnet + Kristoffer Olafsen + kristoffer.olafsen&zerosubnet.com +61320 + Maxauer Papierfabrik GmbH + Hans Anacker + h.anacker&schwarz-produktion.com +61321 + Mukogawa US Campus + Dru Morgan + drum&mukogawa.edu +61322 + ARIX NETWORK + Giuseppe Congedo + drgiuseppe.congedo&icloud.com +61323 + Matthias Nagel + Matthias Nagel + matthias.h.nagel&posteo.de +61324 + feibra GmbH + Markus Busta + contact.iana&feibra.at +61325 + Vid vitenskapelige høgskole + Tor Andreas Sand + andreas.sand&vid.no +61326 + Mikroszerviz SP Ltd + Dr. András Bükki-Deme + bukkideme&gmail.com +61327 + CSConsult + Jan Cardon + jan&csconsult.be +61328 + Robinson Lighting Ltd + Stephen Surbey + stephen.surbey&robinsonlighting.com +61329 + People's Technology Co., Ltd. + Alan Lin + corporate&people4s.com +61330 + Janelle Lynette Guidry + Janelle Guidry + Janellelynette&proton.me +61331 + Forgital Italy S.p.A. + Giorgio Caldana + giorgio.caldana&forgital.com +61332 + Research Institute for Road Safety of MPS + Boyue Zhang + boyuezhang&163.com +61333 + Shenzhen Goodtime Technology Co. ,Ltd + 邹市尧(Zou Shiyao) + zousy&itisgoodtime.com +61334 + Cryptob3auty + Aisha Redl-Sherwood + Aisharose2161&gmail.com +61335 + Michael C Pergolese + Michael C Pergolese + mc.pergolese&gmail.com +61336 + RASEKO + Mikko Mäkinen + mikko.makinen&raseko.fi +61337 + BrainGu + John Spencer-Taylor + iana-oid&braingu.com +61338 + EFit partners + Eric Deblon + administration&efit-partners.eu +61339 + VTech Telecommunications Limited + Owen Chan + owen_chan&vtech.com +61340 + VB-Tech + Eric Guiffard + e.guiffard&vb-tech.fr +61341 + Ideal Credit Union + Joel Turan + iana&idealcu.com +61342 + Mote Enterprises Inc. + James Mote + moteenterprises&gmail.com +61343 + iTran + Haishan Zhang + haishan.zhang&itran.fr +61344 + Juel Group + Alex Diomin + alexd&gandlgroup.com +61345 + Marshall Health Network + Chris Haddox + itadmin&mhnetwork.org +61346 + LLC «P C Most» + Igor Mikhaklchuk + mikhalchuk&most.site +61347 + Kirill Sluzhaev + Kirill Sluzhaev + ovol&d3s.ru +61348 + Limited Liability Company "Thunder Soft" + Mikhail Kladkevich + kladmv&thunder-soft.ru +61349 + 北京中科恒伦科技有限公司 (Beijing Zhongke Henglun Technology Co., Ltd.) + 潘永恒 (Pan Yongheng) + panyh&chelen.com.cn +61350 + smartgen + smartgen + 1052873338&qq.com +61351 + Telekomi Kosoves + Dren Fetiu + dren.fetiu&kosovotelecom.com +61352 + Parta Networks + Destan Yilanci + dyilanci&parta.com.tr +61353 + Polar Wind Medical Center, Inc. + Diane Williams + diane&polarwind.org +61354 + Blackbaud Global + Brian Roma + brian.roma&blackbaud.com +61355 + SHANDONG LURUAN DIGITAL TECHNOLOGY CO., LTD. SMART ENERGY BRANCH + Houzhao Dai + 18766109815&163.com +61356 + NOSSAMAN LLP + Jonathan Barker + jbarker&nossaman.com +61357 + Cobram Anglican Grammar School + Leigh Elgar + ict&cags.vic.edu.au +61358 + Hochschule für Grafik und Buchkunst Leipzig + Uwe Klaus + rz&hgb-leipzig.de +61359 + Private Tech, Inc + Stephen Dowhy + sdowhy&cape.co +61360 + Ediciones Kimün SpA + Sebastián Andrés Millán Coronado + sebastian05millan&gmail.com +61361 + Mental Health Innovation SpA + Sebastián Andrés Millán Coronado + sebastian05millan&gmail.com +61362 + Partner-ed Colaborativa SpA + Sebastián Andrés Millán Coronado + sebastian05millan&gmail.com +61363 + Health-ed SpA + Sebastián Andrés Millán Coronado + sebastian05millan&gmail.com +61364 + Corporación de Derecho Privado I`+DEA + Sebastián Andrés Millán Coronado + sebastian05millan&gmail.com +61365 + HONGTAO + Tao Hong + hongtao&deye.com.cn +61366 + Tung You Yu + Tung You Yu + youyu.0906989008&gmail.com +61367 + Big Ticket Solutions LLC + Colin Coyne + hello&thebigticketsolutions.com +61368 + Subledger + Johannes Degn + joi°n.de +61369 + CV Bilişim Teknoloji Ticaret Ltd. + CV Support + postmaster&cloudvision.com.tr +61370 + Big Geek Computer Sales and Service + Matthew C. Dutcher + matthewdutcher&bgserv.com +61371 + BAW SAS + Luc-Victor Duchateau + lv.duchateau&b-a-w.com +61372 + Fritz Kübler GmbH + Martin Heine + support.it&kuebler.com +61373 + JetStream Software Inc. + Anthony Lai + anthonylai&jetstreamsoft.com +61374 + spaixx AG + Martin Paierl + hostmaster&spaixx.ch +61375 + learnitlessons.com + Vitalii Shumylo + vitalii.shumylo&outlook.com +61376 + IT-Consulting Kinner + Kevin Kinner + kevin.kinner&it-c-k.com +61377 + Ningbo Deye Inverter Technology Co., Ltd. + Tao Hong + hongtao&deye.com.cn +61378 + Chinese Army Factory No. 6909 + Zhang Ruihao + 1418483078&qq.com +61379 + Appleby Westward Group Limited + Paul Moules + ITReports&applebywestward.co.uk +61380 + Vitamin IT Limited + Mark Royds + iana-pen&vitaminit.co.uk +61381 + Clipsal Cortex + Mitch Eadie + mitch.eadie&clipsalsolar.com +61382 + Squirrel Energy Pty Ltd + Jostin Meekels + jostin.meekels&squirrel-energy.com.au +61383 + Jeremy D. Pavleck + Jeremy D. Pavleck + jpavleck&gmail.com +61384 + SWARCO FUTURIT Verkehrssignalsysteme Ges.m.b.H + Thomas Ofner-Svatik + thomas.ofner&swarco.com +61385 + Dongdong Wen + Dongdong Wen + ddwen_work&163.com +61386 + ELIM Co., Ltd + Hyun.Gwan Baek + hyun.gwan&elimopt.co.kr +61387 + Verbandsgemeindeverwaltung Vallendar + Georg Beuler + edv&vg-vallendar.de +61388 + Sinai Health System + John Chen + John.Chen&sinaihealth.ca +61389 + Wesley College + ILT Support + ianaoid&wesley.wa.edu.au +61390 + ANSART B.V. + Viktar Arlouski + viktar.arlouski&ansart.nl +61391 + NEXT TELEKOM + Alberto Gonzalez + alberto.gonzalez&ubix.mx +61392 + Estado Maior das Forças Armadas + Nuno Caetano + nmcaetano&emfa.gov.pt +61393 + Goosnet + Ken Goossens + ken.goossens&gmail.com +61394 + iDCmini + Marcus Chui + marcus.chui&dcl.com.hk +61395 + UNIVERSAL INFORMATION SYSTEM TECHNOLOGIES LAB + KENNETH KIENLE + kenneth.kienle&gmail.com +61396 + Avernis Communications GmbH + Geschäftsführung + spamschutz+&avernis.de +61397 + Brett Eisenberg + Brett Eisenberg + brett&librum.org +61398 + Wohler Technologies Inc. + Amol Natekar + anatekar&wohler.com +61399 + SIGENERGY AUSTRALIA PTY LTD + Peter Ju + peter.ju&sigenergy.com +61400 + CPI International, Inc. + Michael Setto + Michael.Setto&cpii.com +61401 + Family Walk In Clinic of Mountain Grove Inc + Cindy Howard + cindy_4947&yahoo.com +61402 + Raw TV Ltd + Sam Stevens + IT&raw.co.uk +61403 + Nursyafiqah Mohdfaudzi + Nursyafiqah Mohdfaudzi + nursyafiqahmohdfaudzi3&gmail.com +61404 + ABB Robotics + Per Carlsson + per.v.carlsson&se.abb.com +61405 + Universitätsklinikum Erlangen Gebäudeleittechnik + Andreas Strobel + kv-gltadmin&uk-erlangen.de +61406 + TOOSIGN + Moussa TRAORE + contact&toosign.com +61407 + MULTITECH ENGINEERING LLC + PKI + pki&mte-cyber.by +61408 + Kompas + Sergey Gridnewskiy + itr&kompas.aero +61409 + Bavarian Centre for Families and Social Affairs (ZBFS) + Christian Degenkolb + christian.degenkolb&zbfs.bayern.de +61410 + Name Start + Eftiar Rahman + er.imam2009&gmail.com +61411 + GVTel Co. Ltd. + Byron Lin + affairs&gvtel.tw +61412 + John Dowling + John Dowling + juanton&wahcha.com +61413 + EPRI + Nasif Imtiaz + nimtiaz&epri.com +61414 + Innspark Solutions Private Limited + Govindamrit Gopinath + govindamrit&innspark.com +61415 + ZASP + Jakub Machnicki + admin&zasp.pl +61416 + SINET + Benedito Matos + bene&sinettelecom.net +61417 + Emerald + Oanh Nguyen + Oanh.nguyen&ncr.com +61418 + Emerald.local + Oanh Nguyen + Oanh.nguyen&ncrvoyix.com +61419 + The Based Department + George Witt + noc&baseddept.net +61420 + Capricorn Identity Services Private Limited. + Rajesh Mittal + support&CapricornID.com +61421 + Plantynet + Jimo Jung + xtmono&plantynet.com +61422 + OPSWAT INC. + Cosmin Banciu + cosmin.banciu&opswat.com +61423 + BioComputing UP + Ivan Mičetić + ivan.micetic&unipd.it +61424 + D&D Nilsson AB + Didrik Nilsson + didrik&ddnilsson.se +61425 + Xtend Technologies Pvt. Ltd. + Jayakrishnan Kurup + license&xtendtech.com +61426 + Salzburger Sand- und Kieswerk GmbH + Robert Scheuffele + r.scheuffele&ssk.cc +61427 + Synthesis Health Intelligence Inc. + Vidita Datar + vidita.datar&synthesishealthinc.com +61428 + AGILTECH + Laurent CHANAUD + it&agiltech.fr +61429 + Xi'an THRN Technology Co., Ltd + Weng Qiang + wengqiang&hua-fang.cn +61430 + Powerex Corp. + Ian Burca + ian.burca&telusinternational.com +61431 + Woolworths Group + David O'Connor + doconnor1&woolworths.com.au +61432 + Selectronic Australia Pty Ltd + David Shirley + dshirley&selectronic.com.au +61433 + HydroExceed GmbH + Frank Friebe + frank.friebe&sva.de +61434 + Product Marketing Service LLC + Barry Gossett + bjgossett&yahoo.com +61435 + BY Kalim + BY Kalim + bykalim&gmail.com +61436 + Tessenderlo Kerley Inc. + Tim McConnell + tim.mcconnell&tkinet.com +61437 + State of Nevada + David Colborne + sig&it.nv.gov +61438 + Sierra Vista Hospital + Heather Johnson + heather.johnson&svhnm.org +61439 + Meade's PC Repair Shop, Inc. + Josh David Meade + NOC&MeadesPCShop.com +61440 + Basalt AB + Magnus Lööf + magnus.loof&basalt.se +61441 + TELETECNICA s.r.l. + Gabriele Santinelli + gabriele.santinelli&teletecnicaweb.it +61442 + Netgroot Infotech Private Limited + Ankur Aggarwal + ankur.agg&netgroot.com +61443 + CommuniGate Software Development & Licensing SA + Jon Doyle + devops&communigate.com +61444 + IMESO-IT GmbH + Nico Rieck + imeso&imeso.de +61445 + Siemens SI GSW IT GC RAIL + Andrea Cavagna + andrea.cavagna&siemens.com +61446 + PT Bank Jago Tbk + Hannoto Yang + hannoto.yang&tech.jago.com +61447 + Shenzhen Beilai Technology Co.,Ltd. + Shaojun Peng + getorlost&163.com +61448 + Ooredoo Oman + Ashish Kumar + ashish.kumar&ooredoo.om +61449 + Zesp + Josh Purcell + IT-External-Service-IANA-PEN&Zesp.co.uk +61450 + AGENCE FRANCAISE DE DEVELOPPEMENT (AFD) + Xavier Wurmser + wurmserx&afd.fr +61451 + Schloss Tempelhof eG + Simon Harhues + it&schloss-tempelhof.de +61452 + Paul Hagedorn + Paul Hagedorn + paul_hagedorn&outlook.de +61453 + Cybernop, LLC + Taylor Crowe + tcrowe&cybernop.com +61454 + Shawna Odom-Burgower + Shawna Odom-Burgower + MonsterWorks.site&gmail.com +61455 + Leonardo (Digital Platform) + Michael Black + lsc&leonardocompany.com +61456 + Inferno Communications + Connor McFarlane + support&infernocomms.com +61457 + Marcelo Augusto Garbuli + Marcelo Augusto Garbuli + mgarbuli&gmail.com +61458 + Northern Schools Trust + Webmaster + webmaster&northernschoolstrust.co.uk +61459 + 天磊卫士(深圳)科技有限公司 (Tianlei Guardian (Shenzhen) Technology Co., Ltd.) + 郑子琪 (Zheng Ziqi) + zhengziqi&uguardsec.com +61460 + PT Sentra Vidya Utama + Benediktus Anindito + benediktus.anindito&sevima.id +61461 + Excellent Entertainment AG + Gregor Ganglberger + gganglberger&e-e.ag +61462 + 4Links + Simon Crowe + simon&4links.space +61463 + Zühlke Engineering AG + Piet De Vaere + piet.devaere&zuhlke.com +61464 + De Vaere Solutions + Piet De Vaere + piet&devae.re +61465 + Armstrong Equipment, Inc + Robert Narvaez + administrator&armstrongequipment.com +61466 + Connecticut Criminal Justice Information System Governing Board + Anatolie Criucov + anatolie.criucov&ct.gov +61467 + Roland Rechtsschutz Versicherungs AG + Frank Mueller + frank.mueller&roland-rechtsschutz.de +61468 + Mobius Networks Limited + Daniel Clayton + infra2&mobiusnetworks.co.uk +61469 + Lagardere Travel Retail sp. z o.o. + Tomasz Sobolewski + t.sobolewski&lagardere-tr.pl +61470 + LXDEV Technologies + Alexandru Chirea + alex&lxdev.ro +61471 + Conexys S.R.L. + Marcelo Ovando + ovando&conexys.com.ar +61472 + Swistec GmbH + Gerd Hoepfner + gerd.hoepfner&swistec.de +61473 + Alliance Transport Bois + Laurent Lataste + laurent.lataste&alliancetb.fr +61474 + Mthokozisi + Mthokozisi + mthokozisi.luthuli&irdeto.com +61475 + Shenzhen Eybond Co., Ltd + 孟祥梯(Mencius) + market&eybond.com +61476 + Götalands Logistik & Transport AB + Johan Sundberg + johan.sundberg&sundbergs.se +61477 + Marcelo Estriga + Marcelo Geraldo Estriga de Barros + mestriga&mestriga.com +61478 + Blep.cz, z. s. + Amélie Krejčí + amelie.krejci&blep.cz +61479 + Euro-Alkohol GmbH + Joachim Kottmann + j.kottmann&euro-alkohol.com +61480 + BRUNATA Wärmemesser Hagen GmbH & Co. KG + Sandra Möckelmann + lizenzen&brunata-hamburg.de +61481 + idesis GmbH + Andreas Mersch + kontakt&idesis.de +61482 + Amica Senior Lifestyles + Infrastructure Support + infrastructuresupport&amica.ca +61483 + Pirelli Tyre Russia + Yuriy Kazaryan + it&pirelli.ru +61484 + Daniel Fisher + Daniel Fisher + iana-pen&danielfisher.com +61485 + Enet Dev + Mark C Cheung + oid&enet.win +61486 + Evgeny Leontyev + Evgeny Leontyev + ev.leontyev&gmail.com +61487 + 宁波市商沃通信科技有限公司 (Ningbo Shangwo Communication Technology Co., Ltd.) + 宁波市商沃通信科技有限公司 (Ningbo Shangwo Communication Technology Co., Ltd.) + hongfei.tian&sunvot.com +61488 + NotalVision, Inc. + David Rachutin + david¬alvision.com +61489 + SGNT - Sibneftetransproekt + SGNT + ua&sntp.ru +61490 + Integrated Technical Vision Ltd. + Oleksandr Prybysh + pay&itvsystems.com.ua +61491 + Akleza + James Medlock + jmedlock&akleza.com +61492 + Energy Development Corporation Limited + Ntare Ronald + dms.license®.rw +61493 + Aviler SARL + Antoine Davous + antoine.davous&aviler.net +61494 + MWZCONNECT LLC + Arnold Mwanza + arnold_mwanza&mwzconnect.com +61495 + Festival de Cannes + Damien Brusset + infra&festival-cannes.fr +61496 + Aleksandr Loktev + Aleksandr Loktev + loktew&rambler.ru +61497 + Conor DeCamp + Conor DeCamp + cdecamp4601&gmail.com +61498 + RWB PrivateCapital Emissionshaus AG + Kai Biechele + edvmail&rwb-ag.de +61499 + steute Technologies GmbH & Co. KG + Marcus Koeny + m.koeny&steute.com +61500 + APL Red Team + ART + art&jhuapl.edu +61501 + Malayan Banking Berhad + Sam Hong + MBBNETENG&maybank.com +61502 + Stadt Nuernberg + OIDAdmin + oidadmin&stadt.nuernberg.de +61503 + Delta, LLC + Oleg Varygin + vaog&deltard.ru +61504 + PT. Satata Neka Tama + Saiful Anwar + saiful&sat.net.id +61505 + Glier's Meats, Inc. + M Travis Schnell + travis&goetta.com +61506 + Popli Design Group + Mohamed Elmasry + melmasry&popligroup.com +61507 + Armstrong Group International, Inc + Robert Narvaez + administrator&agi-corp.us +61508 + IdentiTek sh.a + Endri Vangjel + endri.vangjel&identitek.al +61509 + no42.org + Ronny Trommer + ronny&no42.org +61510 + Oficiul National al Registrului Comertului + Dan Tanase + dan.tanase&onrc.ro +61511 + J.H. Bennett & Company, Inc. + Alex Rourke + arourke&jhbennett.com +61512 + Crump Homelab + Joshua Crump + joshua&crump.net +61513 + SondeHub + Michaela Wheeler + oid&michaela.lgbt +61514 + Valley Metro + Phil Ozlin + pozlin&valleymetro.org +61515 + Xian Stannard + Xian Stannard + xian&xianic.net +61516 + CooperVision Specialty Eyecare + Jerry Trujillo + jtrujillo&coopervisionsec.com +61517 + Digit'Eaux + Philippe Picalausa + si-technique&digiteaux.be +61518 + Tout Pareil Corp. + Clément DELORT + clement.delort&tprc.ovh +61519 + TDK Sensors AG & Co. KG + Hartmut Jordan + hartmut.jordan&tdk.com +61520 + Abbott LLC + Kenyon Abbott + admin&abbott.llc +61521 + llang.at - IT-Dienstleistungen + Lukas Lang + office&llang.at +61522 + Perryton Equity Exchange + Network Administration + admin&myequityexchange.com +61523 + EQUIRON + Mikhail Seliverstov + info&equiron.com +61524 + Research and Production Association named after A.S. Popov + Ivanov Ivan + info&popovradio.ru +61525 + Darletto + Igor Sukhankin + igor&sukhankin.ru +61526 + Dmitry Vorobiev + Dmitry Vorobiev + v0rd&yandex.ru +61527 + Alexander R Craven + Alexander R Craven + alex.craven&uib.no +61528 + MOORNET SOLUTIONS LLC + Brandon v Moor Heredia + moornetsolutions&gmail.com +61529 + REC SYSTEM co.,ltd + Shinji Tanoue + s-tanoue&recsystem.co.jp +61530 + G & X Services + Mark Shaw + mark2018&hotmail.com +61531 + Layer9.space + Jens Becker + v2px&layer9.space +61532 + ServerStep Technology + Mayank Mittal + mayank&serverstep.in +61533 + ComroeStudios LLC + Dr Richard A Comroe + rcomroe01&gmail.com +61534 + Envirovision Solutions Systems + Jacques van der Westhuizen + jacques&evsolutions.biz +61535 + Groupe Hospitalier Nord-Essonne + Gremion Sylvain + s.gremion&ghne.fr +61536 + Polyphony + Florian Weber + flori&polyphony.chat +61537 + BAE Systems Air Sector (TEST) + Tom Lockhart + tom.lockhart2&baesystems.com +61538 + Brookhaven Surgical Services, PC D/B/A MY Health Long Island Frank Sconzo MD + Matthew B Peddie + Matthew.Peddie&nyulangone.org +61539 + Bidwells LLP + Lewis Cowell + lewis.cowell&bidwells.co.uk +61540 + Flowbix + Geovane Fernandes + glima&flowbix.com +61541 + Anton Luka Šijanec + Anton Luka Šijanec + anton&sijanec.eu +61542 + Danilo Klug + Danilo Klug + daniloklug&hotmail.com +61543 + Isfahan University + Abbas Zamani + zamani&ui.ac.ir +61544 + Flick Gocke Schaumburg Partnerschaft mbB + Michael Reinerth + michael.reinerth&fgs.de +61545 + Shanghai Yunsilicon Technology Co.,Ltd. + Zhang Xiao + zhangx&yunsilicon.com +61546 + MATTR LIMITED + Oliver Terbu + oliver.terbu&mattr.global +61547 + INNIO Jenbacher GmbH & Co OG + Gerhard Koell + productsecurity&innio.com +61548 + Schwarz Produktion Stiftung & Co. KG - SPW Papier + Hans Anacker + h.anacker&schwarz-produktion.com +61549 + Sierra Management Services + Alexis Masina + amasina&nvspecialtycare.com +61550 + Kyle Ellis + Kyle Ellis + ky.ellis83&gmail.com +61551 + Equinox Power Innovations Inc + Andrew Dickson + andrew.dickson&equinoxpowerinc.com +61552 + ABSULT-Group + Alexander Becker + absult-group&absult.de +61553 + Lindinger IT-Services KG + Lucas Lindinger + iana-ke8m&1uca5.li +61554 + Kosmos Infrastructure Foundation + Sebastian Kippe + foundation&kosmos.org +61555 + HYCU Support Lab (support.hycu.work) + Neven Randić + neven.randic&hycu.com +61556 + Rail Safety Systems Pty Ltd + Peter Feder + pfeder&railsafetysystems.com.au +61557 + Futurex Inc. + Jonel Mawirat + jmawirat&futurex.ph +61558 + Viana & Dias + Pedro Viana + pv&ved.pt +61559 + Sweetondale LLC + Vasiliy Tovkach + tovkach&sweetondale.cz +61560 + Muon Space, Inc. + Gregory Smirin + greg&muonspace.com +61561 + Thomas Ford Roofing Inc + Thomas Ford + thomasfordroofing&yahoo.com +61562 + Cisco Flex Platform + Joseph C Llamas + jollamas&cisco.com +61563 + EnerSys Sp. z o.o. + Adam Miśkiewicz + adam.miskiewicz&pl.enersys.com +61564 + nexo-standards + SOUSSANA Jacques + nexo-iana-pen&nexo-standards.org +61565 + flucon fluid control GmbH + Malte Kießling + kiessling&flucon.de +61566 + VAV Versicherungs AG + Sascha Budinsky + support&vav.at +61567 + Sydostskog AB + Mikael Nilsson + mikael&sydostskog.se +61568 + Shanghai Pingbo Info & Tech Ltd. + Liang Liang + liangliang&trustkernel.com +61569 + Matthew Gardner + Matthew Gardner + ihavenothingelsetoput&gmail.com +61570 + Bence Skorka + Bence Skorka + pen&benceskorka.com +61571 + ASEE Solutions d.o.o Beograd + Branko Radjen + branko.radjen&asseco-see.rs +61572 + BNGSOFT ltd. + Martin Zaharinov + admin&bngsoft.net +61573 + Code Fox + Code Fox + iana.pen.assignment&c0de.email +61574 + Stadt Troisdorf + Marius Joest + joestm&troisdorf.de +61575 + Hylte Paper AB + Istvan Jakob + istvan.jakob&hyltepaper.com +61576 + Exportkreditnämnden + Roger Norlin + admin&ekn.se +61577 + Coastal Enterprises, Inc. + Jeremy Johnson + jeremy.johnson&ceimaine.org +61578 + Linknat.com + build + build&linknat.com +61579 + TBC Kredit + Orkhan Jafarov + ojafarov&tbckredit.az +61580 + GIRASOL PE S.R.L + GUILLERMO FOSHLEN PAGANO FAUSTINO + ventas&firmeasy.legal +61581 + Hangzhou Huite Technology CO.,Ltd + Zhibao Cao + czb&anyfbus.com +61582 + RedEarth Energy Storage Pty Ltd + Jason Tubman + jasont&redearth.energy +61583 + Telenor Pakistan + Rashid Ayub Khan + rashid.khan1&telenor.com.pk +61584 + MAPAL Dr. Kress KG + IT Service + it-service&mapal.com +61585 + Apollo Infoways PVT LTD + Ravi Prakash + ravi&apolloinfoways.in +61586 + Kumi Systems e.U. + Klaus-Uwe Mitterer + kumitterer&kumi.systems +61587 + David Emett + David Emett + dave&sp4m.net +61588 + EKSELANS by ITS · ITS PARTNER O.B.S. S.L. + José Luis Avellano + jlavellano&ek.plus +61589 + Tii Technologies Inc. + David Foley + dfoley&tiitech.com +61590 + TrusAuth Inc + Liu Yuanchen + levimarvin&icloud.com +61591 + JKiene + Jens Kiene + j.kiene&gmx.de +61592 + GSW Frankfurt GmbH + IT/Orga + edv&gsw-ffm.de +61593 + Apollo Care + Tim Napthali + licensing&apollocare.com.au +61594 + Hartono Consulting + Santo Hartono + contact&hartonos.com +61595 + AISWEI PTY LTD + Cheng Wang + andy.wang&solplanet.net +61596 + Exail Aerospace + Christophe Villeneuve + christophe.villeneuve&exail.com +61597 + Overseas Family School + CTO + iana&lists.ofs.edu.sg +61598 + Wavetel Technology Limited + Kevin Zheng + kevin&wavetelco.com +61599 + Benjamin Parzella + Benjamin Parzella + bparzella&gmail.com +61600 + Lion Energy + Jey Subramaniaraja + jey.subramaniaraja&lionenergy.com +61601 + Aurora Innovation, Inc. + Janis Danisevskis + jdanisevskis&aurora.tech +61602 + EHOOME IOT PRIVATE LIMITED + Dipankar Biswas + dipankar.biswas&ehoome.in +61603 + Bundesanzeiger Verlag GmbH + Lars Kambeck + lars.kambeck&bundesanzeiger.de +61604 + Raytheon UK + David Roast + david.roast&raytheon.co.uk +61605 + Amdocs IOT + Elena Astrin + elenaf&amdocs.com +61606 + Tosee Enteghal Dade Aryan (TED Aryan) + Saeed Sami + sami&tedtelecom.com +61607 + Rolls-Royce Solutions GmbH + Torsten Karch + torsten.karch&ps.rolls-royce.com +61608 + Stichting Esdégé-Reigersdaal + Rudi Beeres + rudi.beeres&esdege-reigersdaal.nl +61609 + System Admins ApS + Alex Ørving Toftegaard Hansen + ath&systemadmins.com +61610 + Firda + Gaatse Zoodsma + jagoris&ziggo.nl +61611 + KYLink + Duke Huang + duke&kylink.com.tw +61612 + Beijing 3CAVI Tech Co.,Ltd + 张国梁 (Zhang Guoliang) + zgl&3cavi.com +61613 + Trophy Fish Outdoors + William Bursik + scott&trophy.fish +61614 + La MicroMega Srl + Paolo Barsacchi + paolobarsacchi&lamicromega.com +61615 + Thomas Blome + Thomas Blome + thomas.blome&outlook.com +61616 + EUROCOLOR Sp. z o.o. + Adam Andziol + license&eurocolor.com.pl +61617 + Zipit Wireless, Inc. + Ralph Heredia + rheredia&zipitwireless.com +61618 + PPDAC LTD + Louis Waweru + louis&ppdac.ltd +61619 + AGGRECOST OF NORTH AMERICA BENEFIT CORP + ANTHONY PATRICK URBANO + anthony&aggrecost.com +61620 + BYLT OF NORTH AMERICA BENEFIT CORP + ANTHONY PATRICK URBANO + anthony&aggrecost.com +61621 + xD OF NORTH AMERICA BENEFIT CORP + ANTHONY PATRICK URBANO + anthony&aggrecost.com +61622 + Beep Telecommunications & Computing Ltd. + Zoltan Urmossy + hostmaster&beep.hu +61623 + Grundio cPlc. + Zoltan Urmossy + hostmaster&grundio.net +61624 + Two Degrees Mobile + Simon Green + simon.green&2degrees.nz +61625 + Phrygian Solutions + Tony Mitchell + tony&tdrix.tech +61626 + Asmo Advanced Logistics Services Co. + Marwan Aldulaijan + marwan.dulaijan&asmo.com +61627 + Studierendenwerk Bremen AöR + Rolf Meyer + dv-admins&stw-bremen.de +61628 + Staatliche Hochschule für Musik und Darstellende Kunst Mannheim + Markus Linsner + iana&muho-mannheim.de +61629 + 上海鑫融网络科技股份有限公司 (Shanghai Xinrong Network Technology Co., Ltd.) + 李家荣 (Li Jiarong) + lijr&xr2004.cn +61630 + Everfox Holdings LLC + Joseph Bell + joe.bell&everfox.com +61631 + Ibermutua, Mutua Colaboradora con la Seguridad Social nº 274 + Jose Luis Gonzalez Martinez + joseluisgonzalez&ibermutua.es +61632 + Westnetz GmbH + Peter Stapf, Christian Trenk + Team-IAM&westnetz.de +61633 + Alysse Gapsis + Alysse Gapsis + alyssegapsis15&gmail.com +61634 + Cooley LLP + Ray Leidle + itsec&cooley.com +61635 + Tårnby Kommune + Team IT + it-afd&taarnby.dk +61636 + U.T.E. Electronic GmbH & Co. KG + Volker Pompetzki + info&ute.de +61637 + eSystems MTG GmbH + Alexander Bourgett + alexander.bourgett&esystems-mtg.de +61638 + Mecc Alte SPA + Miles Revell + miles.revell&meccalte.it +61639 + Metal Service Center Sp. z o. o. + Zdzislaw Labik + admin&mscinox.com +61640 + SIPEARL SAS + Olivier Deprez + olivier.deprez&sipearl.com +61641 + Resource Management Associates, Inc. + Christopher Bradley + chris&rmanet.com +61642 + Honeybadger Industries + Benjamin Curtis + tech&honeybadger.io +61643 + Provide Datenverarbeitung GmbH + Martin Korczynski + mkorczynski&e-e.ag +61644 + Lunner.Kommune.no + Lars Erik Blisten + leb&lunner.kommune.no +61645 + Dyagsys Karya Informatika + Suhadi Haqbar Sudaryat + dyagsys&gmail.com +61646 + Région Réunion + Fabrice Payet + dsi-pole-infra&cr-reunion.fr +61647 + Fractal Energy Pty Ltd + Jostin Meekels + jostin.meekels&squirrel-energy.com.au +61648 + Norcal Ambulance + Daniel Newman + oidadmin&norcalambulance.com +61649 + Citykinect Inc + Jeff Skillen + info&citykinect.com +61650 + Semco Maritime A/S + Martin Rossmann + mas&semcomaritime.com +61651 + PA Technology Solutions Limited + Tim Lunn + tim.lunn&paconsulting.com +61652 + America's First Responders Network + Adam Wolfe + Adam.Wolfe&afrn.org +61653 + DDL-Engineering + Dmitri Levkin + d.a.levkin&yandex.ru +61654 + Digitaltomass + Dmitri Levkin + d.a.levkin&yandex.ru +61655 + CV TRENINDO CIPTA MANDIRI + Tarkaya Rahadian + akay&trenindo.id +61656 + Information Softworks., Inc + Paul Wehr + sf_iana&informationsoftworks.com +61657 + ArtiVisi Intermedia + Endy Muhardin + endy&artivisi.com +61658 + Equalx Technologies Private Limited + Himalaya Barad + himalaya&equalx.in +61659 + xk-image + Yi Liang + matt&xk-image.com +61660 + PrecisionX Technology LLC + Jake Mertel + info&precisionx.tech +61661 + AISWEI Technology Co., Ltd. + Cheng Wang + andy.wang&solplanet.net +61662 + Community Health & Emergency Services Inc. + Ryan Hartmann + rhartmann&chesi.org +61663 + Monolit IT Sp. z o.o. + Artur Pyc + admin&monolit-it.pl +61664 + Etihad Etisalat Company (Mobily) + Jassem Binabd + VAS-Design&mobily.com.sa +61665 + Aleko Embedded + Oleksandr Kotenkov + oleksandr.kotenkov&aleko-embedded.com +61666 + Tunstall Systems + Seth Tunstall + seth&tunstall.in +61667 + Anastasios Mavrommatis - IT Lösungen + Anastasios Mavrommatis + info&mavrommatis.net +61668 + PT Sinar Baru Rajawali + Robby Yusuf Pratama + robby.yusuf&gmail.com +61669 + Noark Australia Pty Ltd + Zhengye Zhang + STEPHEN&NOARK.AU +61670 + OneTv S.r.l. + Christian Melchiorri + dev&one-tv.it +61671 + Gemeinnützige Industrie- Wohnungsaktiengesellschaft + Höllinger Marc-Andre + it&giwog.at +61672 + Creative Liquid Coatings + IT Support + ITSupport&creativeliquidcoatings.com +61673 + Concrete Genius Manufacturing Inc. + Nathanael Law + nathanael.law&concretegenius.com +61674 + Coridyan + Tynan Coles + info&soltaro.com +61675 + Hangzhou Bingte Technology Co., Ltd + Ying Chen + ying.chen&bingte.com +61676 + Wha Yu Industrial Co., Ltd. + Doris Lin + bu2rd&whayu.com +61677 + BxC GmbH & Co. KG + Carsten Schwant + iana-contact&bxc-consulting.com +61678 + FIMER S.p.A + Filippo Vernia + filippo.vernia&fimer.com +61679 + Hellenic National Defense General Staff (HNDGS) + Theofilos Nikolaidis + th.nikolaidis&hndgs.mil.gr +61680 + Troglodyte.io + Justin J Ryberg + administrator&troglodyte.io +61681 + Joe Sniderman & Associates, LLC + Domain Administrator + hostmaster+oid-ldap&joesniderman.com +61682 + Kontrolnext Technology + Tina Chen + tina.chen&kontrolnext.com +61683 + Hoffmann Group Engineering GmbH + Alexander Kolb, PhD + alexander.kolb&hoffmann-group.com +61684 + Beutlhauser Holding GmbH + Thomas Heindl + thomas.heindl&beutlhauser.de +61685 + Orxcel Co.,Limited + Eric He + eric&orxcel.com.cn +61686 + Athens-Limestone Hospital + Kevin Houston + kevin.houston&alhnet.org +61687 + Mani Raju (LcDigital) + Mani + lcdigital2815&gmail.com +61688 + oruga.de + Dennis Ehmer + 52334856&oruga.de +61689 + Ricon İletişim AŞ + Umut Sonkurt + umut.sonkurt&riconmobile.com +61690 + ThinkCloud + Michał Cichocki + michal.cichocki&thinkcloud.pl +61691 + Public Joint Stock Company Mobile TeleSystems (PJSC MTS) + Semen Zimin + smgalper&mts.ru +61692 + MA Labs + Mark Allen + malabs.iana&gmail.com +61693 + Karam Jameel Moore + Karam Jameel Moore + KaramJameelmoore&gmail.com +61694 + Local Connectivity Lab + Esther Jang + infrared&seattlecommunitynetwork.org +61695 + Eltron s.r.o. + Petr Pilík + info&eltronpraha.cz +61696 + MEMMERT GMBH + CO. KG + Thomas Ehnes + TEhnes&memmert.com +61697 + Bolle + Jan Bolle + iana&jan-bolle.de +61698 + r01.li hébergement communautaire + Ploba Dodiste + contact&r01.li +61699 + Esc Muhendislik A.S. + Bilgi Ekibi + lisans&escmuhendislik.com.tr +61700 + Southgate Community Schools + Nicola Quoziente + sysadmin&sgate.k12.mi.us +61701 + Piing.ai + Tim Beveridge + tim&piing.ai +61702 + Kuhmo Oy + Tommi Ruha + tommi.ruha&kuhmo.eu +61703 + Thomasville City Schools + Jamie L. Hunt + huntj&tcs.k12.nc.us +61704 + Luminex Software Inc. + Serge Rioux + srioux&luminex.com +61705 + Ravit Technologies Pvt. Ltd. + Purav Chokshi + purav.chokshi&thegoldenray.in +61706 + NTT DATA, Inc - Netherlands + Mark Oudsen + mark.oudsen&global.ntt +61707 + Shanghai Fanzhen Information Technology Co., Ltd. + Wang Hong Bao + whb&vangoo.com.cn +61708 + hotwirestudios + Alexander Lippling + alexander&hotwirestudios.com +61709 + PatchAmp + Jim Tronolone + jimt&patchamp.com +61710 + BJS Family + William Tandy + william.tandy&bjshomedelivery.com +61711 + Jess Schallenberg + Jess Schallenberg + jess.schallenberg&gmail.com +61712 + Masoud Shokohi + Masoud Shokohi + mshokohi195&gmail.com +61713 + NetworkECO + Nathanael L. Walters + nwalters&networkeco.net +61714 + The Principia Corporation + Chris Davis + nettel&principia.edu +61715 + Omnissa + Avinash Shrimali + avinash.shrimali&broadcom.com +61716 + A.E. Perkins + Son Tran + stran&aeperkins.co +61717 + iStore (QLD) Pty Ltd + Univers + crystal.zhou&univers.com +61718 + C&D Technologies, Inc + Lance Wulfers + lance.wulfers&cdtrojan.com +61719 + THINGS HORIZON PVT LTD + Ansh Yadav + Ansh&thingshorizon.com +61720 + Petroleum Development Oman + Debojyoti Sinha + ems.pdo&ge.com +61721 + Decatur Morgan Hospital + Brent Smith + dmhpki&dmhnet.org +61722 + Jennifer Ai + Jennifer Ai + jenniferai123&gmail.com +61723 + Hitachi Vantara Ltd. + Hiroaki Kasahara + hiroaki.kasahara&hitachivantara.com +61724 + LEMA + Patrick Barrett + patrick.barrett&lema.io +61725 + Carleton Place & District Memorial Hospital + Jamie Page + jpage&brightshores.ca +61726 + Technical Momentum + Brent Taylor + brent.taylor&technicalmomentum.com +61727 + Torrion River + Oleg Abrosimov + olegabros&torrionriver.com +61728 + 杭州众兴慧智能科技有限公司 (Hangzhou Zhongxinghui Intelligent Technology Co., Ltd.) + 荀浩 (Xun Hao) + xunhao&ih3c.net +61729 + Stadtverwaltung Schleswig + Dennis Borngraeber + itsm&schleswig.de +61730 + Franco Brothers Enterprises llc. + Cristian Franco + Francoland30&gmail.com +61731 + Grupo Armas Trasmediterranea + Juan Francisco Fernandez + adm_dominios&artra.es +61732 + TM Technology Services Sdn Bhd + Amalia Mohd Mahdzir + elia&tm.com.my +61733 + Defigo Ltd. + Zoltan Urmossy + hostmaster&defigo.hu +61734 + Syntony GNSS + Aurelien Korsakissok + aurelien.korsakissok&syntony.fr +61735 + IT-Schmid GmbH & Co.KG + Manfred Schmid + info&it-schmid.com +61736 + AssetBook IoT + William Jonasson + william&assetbook.eu +61737 + Global Message Services AG + Nikos Babalis + info&gms-worldwide.com +61738 + DWDM.RU LLC + Aleksandr Tolstokulakov + at&dwdm.ru +61739 + Mentech Power + Vic Feng + fengtao701&139.com +61740 + Karol Szczepanowski + Karol Szczepanowski + Karol.Szczepanowski&gmail.com +61741 + Ortocomputer + Toni Magni + amagni&ortocomputer.com +61742 + Cv Ridho + Virgo Sugondo Saragih + virgo.saragih&gmail.com +61743 + WorldwideOSC + Aungel Seiyaj + Awcreedus&proton.me +61744 + Chewing Project + Kan-Ru Chen + chewing.im&kanru.info +61745 + KanruInfo + Kan-Ru Chen + pen&kanru.info +61746 + Bostocks.Club + Andrew Bostock + andrewbostock&yahoo.com +61747 + Hanplast Sp. z o.o. + Jaroslaw Slawinski + j.slawinski&hanplast.com +61748 + Rikom Technologies SDH. BHD + Phillip Zee + philip.zee&rikomtech.com +61749 + Technické sitě Brno, akciová společnost + Kroner Vojtěch + kroner&tsb.cz +61750 + XDTRUST 02-28-2024 CHARLEY MINNGEAO + Charley Minngeao + Awcreed&proton.me +61751 + Enhub + Pavel Popov + ak&enfall.com +61752 + Shahed, Inc. + Md Shahed Hossain + info&shahed.biz +61753 + Lexi Winter + Lexi Winter + iana&le-fay.org +61754 + FADU + Sumin Yi + smyi&fadutec.com +61755 + Kraftringen Energi AB + Martin Järvestam + servicedesk&kraftringen.se +61756 + ANDA TELECOM PVT. LTD + SUBHASH PANT + subhash&andatelecom.com +61757 + Vierkant Software GmbH + Rene Henzinger + admin&vierkant-software.eu +61758 + Wartsila SAM Electronics GmbH + Jens Klose + helpdesk&sam-wartsila.de +61759 + Lehigh University + Dan Schwartz + hostmaster&lehigh.edu +61760 + Crist Engineers + Scott Musgrave + oid&cristengineers.com +61761 + Simau srl + Sebastiano Pastorino + sebastiano.pastorino&simau.com +61762 + RAPIN + RAPIN François + f.rapin&orange.fr +61763 + Växjö Energi AB + Malin Johannesson + malin.johannesson&veab.se +61764 + ISG Nordic AB + Björn Lager + bjorn.lager&isgnordic.se +61765 + DLR e.V. RB + Thomas Singer + gsoc-it&dlr.de +61766 + SigmaTron International, Inc. + Charles Donaldson + Charles.Donaldson&sigmatronintl.com +61767 + HTT + Alax Wong + 1198516784&qq.com +61768 + CommScope Technologies LLC + Aaron Wilkinson + aaron.wilkinson&commscope.com +61769 + Jeff Allen + Jeff Allen + im&jeffreyallen.tech +61770 + hacKRNspace + Robert Labudda + sir.vice+hackrnspace-remove-if-solicited&spacepanda.se +61771 + Shenzhen Vahours New Energy Corporation + BI YU YANG + yangbiyu&vahours.com +61772 + Shenzhen Number Energy Saving Corporation + Rebecca YANG + yangbiyu&vahours.com +61773 + SUMMIT IT CONSULT GmbH + Andreas Schmitz + it.hotline&summit-it-consult.de +61774 + Gröner Group AG + Sven Rackwitz + sven.rackwitz&cg-elementum.de +61775 + CG Elementum AG + Sven Rackwitz + sven.rackwitz&cg-elementum.de +61776 + ecobuilding AG + Sven Rackwitz + sven.rackwitz&cg-elementum.de +61777 + Metaminds + Adrian Tudor + admin&metaminds.com +61778 + GLN Schakt & Transport + Leif Nilsson + ekerydstra&gmail.com +61779 + Adrian Gonzaga-Ge Aguilar + Adrian Gonzaga-Ge Aguilar + adrianaguilar970&aol.com +61780 + Beijing Maker 6 Technology Co. Ltd. + LIU DING + 6366565&qq.com +61781 + Noosphere Technologies, Inc. + Andrew Brown + andrew&noosphere.tech +61782 + Century Autogroep BV + Tjalling Rijpstra + alex&ag-its.nl +61783 + RLKM UG + Nadja Reitzenstein + dequbed¶noidlabs.org +61784 + CIUSSS du Centre-Sud-de-l'Île-de-Montréal + Benoit de Mulder + benoit.demulder.ccsmtl&ssss.gouv.qc.ca +61785 + 上海源中信息科技有限公司 (Shanghai Yuanzhong Information Technology Co., Ltd.) + 李园 (Li Yuan) + yuan.li&yzitech.com +61786 + Medicalholodeck + Dominique Sandoz + dominique&medicalholodeck.com +61787 + Kostur IT SERVICES + Marek Kostur + pen&kostur.uk +61788 + IGOSEC.PL + Igor Szachniuk + sha&igosec.pl +61789 + Woningstichting Heteren + Theo Maassen + itpg&woningstichtingheteren.nl +61790 + Omnibit + Gemmar Jhan Contayoso + gemmarjhan&gmail.com +61791 + New Hope Information systems + Robert Alan Meltz + techmeltz&gmail.com +61792 + Call Box + William Cody Ardoin + cardoin&callbox.com +61793 + Tradewinds Networks Incorporated + Keith M. Alexis + keith.alexis&tradewindsnetworks.com +61794 + Masoud + Masoud hosseini + dartanian844&gmail.com +61795 + Moreforlesstech + Stacy Wilbon + swilbon&moreforlesstech.com +61796 + Ob/Gyn Professionals of East Tennessee + Tina Baker + tbaker&obgprofessionals.com +61797 + Sobel Tech + Abdourahmane Fall + fallphenix1987&gmail.com +61798 + Beijing Urban Construction Intelligent Control Co.,Ltd + wangfushuang + wfs_haitian&qq.com +61799 + Optima SC Inc. + Carl Eric Codere + cecodere&yahoo.ca +61800 + Vikor Scientific + Marshall Chew + mchew&vikorscientific.com +61801 + Nova Software SRL + Naum Ravinovich + naum.r&des.lat +61802 + Alta Labs + Jeff Hansen + jeff&alta.inc +61803 + Fox Crypto B.V. + Mark Revier + iana&foxcrypto.com +61804 + XobeBook + Abdourahmane Fall + abdourahmane.fall&outlook.com +61805 + NEXION Corporation + Takafumi Kawai + kawai-t&nexion.co.jp +61806 + VOLARIS Marcin Ziemiański + Marcin Ziemiański + biuro&volaris.pl +61807 + Marcin Ziemiański + Marcin Ziemiański + marcin&ziemianski.info +61808 + kISA@kubus IT GbR + Rico Rieger + rico.rieger&kubus-it.de +61809 + The Perfume Shop + Martyn Fox + mfox&theperfumeshop.com +61810 + Bitzer Compressores Ltda. + Leandro Casaqui Contieri + leandro.contieri&bitzer.com.br +61811 + Noah Mesel + Scott Kawaguchi + scott&qusecure.com +61812 + Tutoringbytiff + Tiffany Woods-Long + tiffany&tutoringbytiff.com +61813 + Irista Technologies, Inc + Ji Fan + jim&irista.ai +61814 + Gitservice.dk + Jens Galsgaard + jens&gitservice.dk +61815 + Blair A Piper + Blair A Piper + Blair.Piper&Students.Post.Edu +61816 + Jungheinrich AG + Dr. Gerhard Schwär + gerhard.schwaer&jungheinrich.de +61817 + New Yard B.V. + Sjoerd van den Nieuwenhof + support&newyard.nl +61818 + Surry County Schools + Tim Hippert + thippert&surry.k12.nc.us +61819 + Information Security Services S.A. + Osvaldo Casagrande + osvaldo&iss.com.py +61820 + Chorke, Inc. + Md Shahed Hossain + info&chorke.org +61821 + Dynamo Software + Jens Erich Lange + info&dynamo-software.de +61822 + Per ALBIN HANSSON + ALBIN HANSSON + albin.hansson&live.se +61823 + Middlesurf Inc. + Arash Sadeghi + ar.sadeghi&middlesurf.com +61824 + Canadian Centre for Cyber Security + CCCS OID Registrar + oid&cyber.gc.ca +61825 + H ENERGY LTD + Dmitry Frantov + dmitry.frantov&energy-h.ru +61826 + Cloud Solutions LLC + Milekhin Dmitry + MilekhinDN&enplus.ru +61827 + MC Burny + Bogushevich Dmitry + BogushevichDV&cloudx.group +61828 + MTN Converged Solutions + Ayabonga Ngoma + ayabonga.ngoma&mtn.com +61829 + SAJ DIGITAL ENERGY AUSTRALIA PTY LTD + Alan Li + alan.li&saj-electric.com +61830 + Tinh Van Technologies JSC. + Nguyen Tung + tungns&tinhvan.com +61831 + MFK Burny + Bogushevvich Dmitry + BogushevichDV&cloudx.group +61832 + Alfavit + Aleksandr Kerov + PEN&alfavit.tech +61833 + Xiaojun Ben + Xiaojun Ben + 804939573&qq.com +61834 + Venturus Centro de Inovacao Tecnologica + Rafael Gava de Oliveira + rafael.oliveira&venturus.org.br +61835 + Otheda Limited + Adam Barnes + adam&otheda.com +61836 + AtFocus + Andrea Perfetti + info&atfocus.org +61837 + Associated Medical Specialists, d/b/a Coastal Cancer Center + Teresa Moore + regcomp&coastalcancercenter.com +61838 + iatele + andreia maria slaghenaufi + sm&iatele.com +61839 + Secretarium + Florian Guitton + contact&secretarium.org +61840 + Pohjois-Suomen Metsämarkkinat Oy + Mika Säynäjäkangas + mika.saynajakangas&psmm.fi +61841 + M+C Schiffer GmbH + Dirk Schneider + dirk.schneider&mc-schiffer.com +61842 + TRANS ELECTRIC CO.,LTD. + BAN LIN + W0981919330&GMAIL.COM +61843 + Impala Terminals + Gareth Burch + gareth.burch&impalaterminals.com +61844 + meet patel + meet patel + meet3082&gmail.com +61845 + Paul Rodriguez + Paul Rodriguez + pjicloudbase&gmail.com +61846 + North Platte Public Schools + Spencer Knight + sknight&nppsd.org +61847 + IEIT SYSTEMS Co.,Ltd. + Chuanbo Yuan + yuancb&ieisystem.com +61848 + Nexfi + Eric Zhu + eric&nexfi.cn +61849 + ZJUSCT + haoxingxing + mr.haoxx&gmail.com +61850 + mrhaoxx + haoxingxing + mr.haoxx&gmail.com +61851 + Cummins, Inc. + John Maag + john.maag&cummins.com +61852 + Northside Hospital, Inc. for the benefit of Atlanta Cancer Care + Jason Liang + jliang&atlantacancercare.com +61853 + nexgentec AG + Boas Hochstrasser + bhochstrasser&nexgentec.eu +61854 + COFACE + CHEVAUCHE Emmanuel + emmanuel.chevauche&coface.com +61855 + Nationwide Telephone Assistance Ltd + Paul White + paul&nta.co.uk +61856 + NextGenGames + Deepak Goyal + unicorntechteam&gmail.com +61857 + Converge Enterprise Cloud + DJ Rose + dj.rose&convergetp.com +61858 + Silines + Anatoly Teterkin + ipteterkin&silines.ru +61859 + Miami-Dade Police Department, Fl. + Mark Reyes + mark.reyes&miamidade.gov +61860 + SipaZon AB + Richard Dahlstrom + richard&jnt.se +61861 + Home Lab LLC + Skyler Dickey + skylerdickey&swbell.net +61862 + NAMIRIAL LIMITED + Marco Scognamiglio + m.scognamiglio&namirial.com +61863 + The Danish Society of Engineers, IDA + Matthew Stevenson + mat&ida.dk +61864 + GIGALIGHT + Zhou xinjun + aaron&gigalight.com +61865 + SKS Welding Systems GmbH + Uli Lang + it&de.sks-welding.com +61866 + IronWiFi, LLC + Seb Adamski + seb&ironwifi.com +61867 + IDnow Trust Services AB + Sebastian Elfors + sebastian.elfors&idnow.io +61868 + 西安恒多智能科技有限公司 (Xi'an Hengduo Intelligent Technology Co., Ltd.) + 欧阳诚苏 ( Chengsu Ouyang) + 27559842&qq.com +61869 + Visional Labs + Jay Jones + jjones&visionallabs.com +61870 + Marieborgs Lantbruk AB + Dag-Henrik Larsson + dag-henrik&marieborgsgard.se +61871 + Davide Crivelli + Davide Crivelli + crivelli.davide&gmail.com +61872 + KAYTUS SYSTEMS PTE. LTD. + Xavier Cheng + xaviercheng&kaytus.com +61873 + Landratsamt Tübingen + Jonas Berweiler + j.berweiler&kreis-tuebingen.de +61874 + WFF IT-Service GmbH + Michael Luttmer + iana-oids&wff-it.de +61875 + Beijing Jinqun Technology Co., Ltd + Henry Han + henry&jinquntech.com +61876 + Shanghai ReeLink Global Communication Company LTD + Lin Niu + 479325746&qq.com +61877 + htp GmbH + Dirk Duncker + d.duncker&htp.net +61878 + New H3C Technologies Co., Ltd + HaiYang Zhang + zhang.haiyanga&h3c.com +61879 + Villageofdali + Trung Nguyen + liavalon&gmail.com +61880 + Xsight Labs + Neil O'Rourke + neilo&xsightlabs.com +61881 + New Zealand Institute for Plant and Food Research + Stephen Bell + Stephen.Bell&plantandfood.co.nz +61882 + Esri Chile + Rodrigo Molina + rmolina&esri.cl +61883 + BEQ Technology + Tom Tiberio + ttiberio&beqtechnology.com +61884 + 百信信息技术有限公司 (Baixin Information Technology Co., Ltd.) + 李龙 (Li Long) + lilong&100trust.cn +61885 + Bright Star Pty Ltd + Dylan Carruthers + dylan.carruthers&stella.net.au +61886 + Paweł Worwąg + Paweł Worwąg + pawel.worwag&gmail.com +61887 + EcoFlow + Arrow.Lu + arrow.lu&ecoflow.com +61888 + Ballpoint + Corentin Margraff + contact&ballpoint.fr +61889 + Hazel + Hazel Reimer + hazel+iana-ent-hazel&farfrom.earth +61890 + The Funkin' Crew Inc. + Hazel Reimer + hazel+iana-ent-funkin&farfrom.earth +61891 + MELCOMP S.A. + Pablo Damian Mazzú + pmazzu&gmail.com +61892 + WATSONGALAXY.COM + Redacted for Privacy + f76c8ebdb62341e1ba84a5c96ffb527d.protect&withheldforprivacy.com +61893 + Tom Schollmeyer + Tom Schollmeyer + tom.schollmeyer&acddirect.com +61894 + TERRAWARP CORPORATION + Sharach Berman + sberman&terrawarp.com +61895 + Fabian Riechsteiner + Fabian Riechsteiner + fabian&riechsteiner.tech +61896 + IOFE SAC + Ernesto Aranda V + earanda&kprtech.com +61897 + VNETS INFORMATION TECHNOLOGY LTD. + Henry Wu + henrywu19&163.com +61898 + Spruce Systems Inc + Wayne Chang + standards&spruceid.com +61899 + Grant Lewis + Grant Lewis + grantlewis&iheartmedia.com +61900 + eSIM Go Limited + Chris Maynard + chris&esim-go.com +61901 + Magnus Wissler + Magnus Wissler + gmw&wittyname.net +61902 + cyt.s365.us + Eric Thornton + eric.thornton&global.ntt +61903 + LLC Accent Capital + Gennadiy Volodarskiy + gvolodarskiy&accent.ru +61904 + Switchfly + Eric Prescott + eprescott&switchfly.com +61905 + Stichting WoonFriesland + Stichting WoonFriesland + alex&detechnischejongens.nl +61906 + DUG Technology Pty Ltd + Doug McCloud + procurement&dug.com +61907 + Kliskatek SL + Mikel Choperena + info&kliskatek.com +61908 + KOSTAL Solar Electric GmbH + Andreas Schmalenberg + a.schmalenberg&kostal.com +61909 + IT Security Services SA de CV + Vicente Alberto Ramirez Altamirano + vicente_ramirez&it-securityservices.com +61910 + Essence quantum technology + Jason Wang + wangls1062679&gmail.com +61911 + Inseego Wireless + Vikram Kumar + Engineering-Device-Managers&inseego.com +61912 + GoCert.Ca + Richard Simard + richard.simard&groupesti.com +61913 + AUSNET PTY LTD + Pankush Mehta + ict.commercial&ausnetservices.com.au +61914 + C-COR Broadband Australia Pty. Ltd. + Ashley Cummings + acummings&c-cor.com.au +61915 + Beeline Telecom + Abiy T Belachew + abiy.belachew&zedmobile.co.zm +61916 + Huettenes GmbH Architekten + Holger Rose + rose&huettenes.de +61917 + Thomas Jones + Thomas Jones + wortcook&gmail.com +61918 + IT LAB SG + Kai Loon + admin&itlabsg.net +61919 + Groupe La Québécoise + Richard Simard + richard.simard&groupealq.com +61920 + Linxpeed Limited + Henry Brankin + henry.brankin&linxpeed.com +61921 + GavinMP + Gavin Pawlovich + gsmallovich&gmail.com +61922 + Black Country Healthcare NHS Foundation Trust + Mr Joel Spink + bchft.IANA&nhs.net +61923 + ENELT GROUP Co.Ltd. + Mihailov Alexander + mihailov.as&enelt.com +61924 + recretix systems AG + Fabian Riechsteiner + fabian.riechsteiner&recretix.ch +61925 + 10T Tech Sdn Bhd + James Yeung + iana&10ttech.com +61926 + Grant Taylor + Grant Taylor + iana>aylor.tnetconsulting.net +61927 + Retailsystem.com + Gunnar Ewerlof + gunnar&retailsystem.com +61928 + NetDevWare + Phil Hsieh + contact.iana&netdevware.org +61929 + Schaffhauser Kantonalbank + Rico Steinemann + it-im&shkb.ch +61930 + PEBKAC.CA Technology Solutions + Ryan McIntosh + ryan&pebkac.ca +61931 + Kindred Tech + Lee Burgess + lee.burgess&kindredgroup.com +61932 + OtoTrak d.o.o. + Marin Rukavina + marin.rukavina&ototrak.com +61933 + IASO GmbH + Roland Zeidler + sysadmin&iaso.ch +61934 + QMW Technologies Inc. + Mohammed Kaddaoui + sales&qmw-tech.com +61935 + Swerve Limited + Dalibor Andzakovic + dalibor.andzakovic&swerve.nz +61936 + Prolan Power Co., Ltd. + Attila Horvath + horvath.attila&prolan-power.hu +61937 + Acertia + José Alberto Vidrio Noriega + jose.vidrio&diverza.com +61938 + Kurt Lagergrens Trävaru AB + Rikard Lagergren + rikard&kltab.se +61939 + Genew Technologies Co., Ltd. + Li Zhi + lizhi&genew.com +61940 + Excard Research GmbH + Dominik Haverkamp + oid&excard.de +61941 + Notre Dame Academy + NDA IT + nda-it&ndahingham.com +61942 + Thüringer Staatslotterie AöR + Axel Böttger + axel.boettger&lotto-thueringen.de +61943 + Beijing LXTV Technology Co., Ltd + wang xiaodan + 18610838007&163.com +61944 + PM Factory BV + Stefan Tobé + finance&pmfactory.nl +61945 + Bliss.AI Pty Ltd + Ian Connor + ian&blissai.com +61946 + PT Integra Teknologi Solusi + Hary Miftah Fauzan + hary.miftah&sevima.id +61947 + Lohith BK + Lohith BK + lohit.bk&outlook.com +61948 + Erik Jan Uitenbroek + Erik Jan Uitenbroek + ejuitenbroek&outlook.com +61949 + SANRO HEALTH PTY LTD + Karthigeyan Gunaseelan + karthi&sanrohealth.com +61950 + neu-itec GmbH + René Waterstradt + rene.waterstradt&neu-itec.de +61951 + Beijing Yingfei Networks Technology Co.Ltd. + Miles Zhang + zhangmiao&yf-networks.com +61952 + Luis Grünke + Luis Grünke + l.gruenke&grnk.de +61953 + Andreas Mueller + Andreas Mueller + andreas.mueller.dd&proton.me +61954 + Hochschule Karlsruhe + Guenther Schreiner + hans-guenther.schreiner&h-ka.de +61955 + Praxis Asmus-Timm + Christoffer T. Timm + praxis&asmus-timm.de +61956 + SaiFlow + Dor Shmaryahu + dor&saiflow.com +61957 + RCE Systems s.r.o. / DataFromSky + David Herman + david.herman&rcesystems.cz +61958 + BerryByte Limited + Syed Mohiuddin Qadri + noc&berrybyte.net +61959 + Lusory Limited + Syed Mohiuddin Qadri + network&lusory.net +61960 + Miralium Research + Rodrigo Ferreira Menezes dos Santos + rlaneth&miralium.re +61961 + Kodsport Sverige + Martin Oliver Wennberg + martin.wennberg&kodsport.se +61962 + Chengdu Rongshitong Technology Co., Ltd + jiabao shi + shijiabao&cd-rst.com +61963 + SCA Obbola AB + Magnus Tjärnskog + magnus.tjarnskog&sca.com +61964 + Craft Digital Inc. + Li Xi + 4053648&qq.com +61965 + Knoxville Utilities Board + Stefan Brundige + stefan.brundige&kub.org +61966 + MythicalKitten + Syed Mohiuddin Qadri + iana-private-numbers&mythicalkitten.com +61967 + SCM Insurance Services + Mike Watchman + mike.watchman&scm.ca +61968 + Wojciech Teichert (6961405014) + Wojciech Teichert + wojciech.teichert&wojciechteichert.pl +61969 + TCIIT + Izabela Teichert + izabela.teichert&tciit.pl +61970 + Kinder.Bildung.Zukunft e.V. + Stefan Schmidt + iana-contact&kibiz.org +61971 + Teleradiologia Avanzada SL + Jose Luis Infante Arco + jlinfante&telera.es +61972 + IPTAM GmbH + Karsten Wemheuer + k.wemheuer&iptam.com +61973 + Beijing Puruixin Technology Co., Ltd. + HAOMIAO YU + haomiao&triathlontech.io +61974 + Teske Virtual System + Lucas Teske + lucas&teske.com.br +61975 + Comprehensive Hematology Oncology + Iliana Bolotn + iebolton&comphemonc.com +61976 + NeuReality + Ramon Fried + ramon&neureality.ai +61977 + 7Generation + Yerden Zhumabekov + ye.zhumabekov&7g.team +61978 + Bindworks + Martin Vantara + serviceadmin&bindworks.eu +61979 + Redstor Limited + Tony Abbott + tony.abbott&redstor.com +61980 + Madison Women's Clinic + David Mecham + david&madisonwomensclinic.com +61981 + Volvo Autonomous Solutions + Eugene Portnoy + eugene.portnoy&volvo.com +61982 + ITS Consulting s.r.o. + Tomas Vlcek + tomas.vlcek&itsconsulting.cz +61983 + Tianjin Optical Electrical Juneng Communication Co. , Ltd. + Luyu Chen + cly&js754.cn +61984 + Zhengdao Quantum + Lance Zhang + lingsongzhang&zdlz.top +61985 + MapleCloud Technologies + Yogendra Rajput + yogendra.rajput&maplecloudtechnologies.com +61986 + TEAC Corporation + Hironobu Narita + hironobu.narita&teac.jp +61987 + E+H Rechtsanwälte GmbH + Jürgen Schnell + j.schnell&eh.at +61988 + FTP Today, Inc. + Martin Horan + mhoran&sharetru.com +61989 + Rudolf Presl GmbH & Co. Klinik Bavaria Rehabilitations KG + Lars Streubel + it&klinik-bavaria.de +61990 + Institut d'études politiques de Lyon + Service informatique et audiovisuel + oid-ldap&sciencespo-lyon.fr +61991 + Kian Alraqmiah + AHMED ELKHAYYAT + elkhayyat&kian.com.sa +61992 + ACSoftware + Tainã Rodrigues + taina.rodrigues&acsoftware.com.br +61993 + CelSian Glass & Solar B.V. + Harmen Kielstra + admin&celsian.nl +61994 + Guangzhou Huisheng Electronic Technology Co., Ltd + huangyong + 13636603&qq.com +61995 + Rogers Behavioral Health + olti babi + olti.babi&rogersbh.org +61996 + Raymond Sin + Raymond Sin + ray.sin.ca&gmail.com +61997 + Technologies Ssmidge + Adrian Trifonov + adriant&ssmidge.xyz +61998 + Umeå Energi AB + Herman Johnson + herman.johnson&umeaenergi.se +61999 + de.borderline + Hazel Reimer + hazel+iana-ent-borderline&farfrom.earth +62000 + GVS Ko + Srdjan Veljkovic + sveljko&gvs.rs +62001 + Sun Valley Holdings + Zayd Mohammed + zayd.md&gmail.com +62002 + ONAIR.VISION + ESPINET Philippe + philippe.espinet&onairvision.fr +62003 + YATELECOM.RU + Yaroslav Bovbel + yaroslavbovbel&mail.ru +62004 + R-Group International + Chris Kemp + neteng&r-group.com.au +62005 + Erabyte Inc + Matthew Tim + matthew.tim&erabyte.us +62006 + Simetel S.p.A. + Vittorio Barison + vittorio.barison&simetel.it +62007 + Say Webhost Inc + Stephen Spicknall + steve&sayweb.org +62008 + CyberSico + Tomasz Habrajski + thsico&hotmail.com +62009 + MARISSA ANN OBRYAN + MARISSA ANN OBRYAN + marissa7718&icloud.com +62010 + Hoffmann Automobile AG + Wagner AG + tm-system&wagner.ch +62011 + ECACTUS PTY LTD + Gary Dong + gary.dong&weiheng-tech.com +62012 + OneLayer Network + Rick Wong + noc&onelayer.net +62013 + Dielectric, LLC + Connor Pittman + connorjohnson41&gmail.com +62014 + Richard Tetlow + Richard Tetlow + pright&gmail.com +62015 + Shenzhen SH-fiber Technology Co., Ltd. + Yufang Dai + daiyf&sh-link.net +62016 + J. Schmalz GmbH + Certificate Administrator + cert-admin&schmalz.com +62017 + Genus Innovation + Rohit Kumar + rohit.kumar&genusinnovation.com +62018 + Northbrook School District 27 + Network Admin + networkadmin&nb27.org +62019 + Wanhua Chemical Group Co.,Ltd. + YingYu Cui + yycui&whchem.com +62020 + Shaun Courtney + Shaun Courtney + shaun&courtney.org.za +62021 + Decent Lab + Haofan Zheng + hzheng6&ucsc.edu +62022 + Brotel s.r.o. + Vojtech Brothanek + vbrothanek&brotel.cz +62023 + Not Not Consulting AB + Henrik Berggren + henrik¬not.se +62024 + PIXA + Patrick Mercier + patrick&pixa.ca +62025 + Tenex Software Solutions, Inc. + Ravi Kallem + ravi.kallem&tenexsolutions.com +62026 + While 1 SRL + Massimo Dardano + info&while1.com +62027 + Trenton Public Schools + Garrett Palowitch + gpalowitch&trenton.k12.nj.us +62028 + Ring2U + Gustavo Junior Alves + gustavo.alves&ring2utele.com +62029 + ThysTips + Antoine Thys + contact&thystips.net +62030 + CyberPKI + Anthony JULOU + anthony.julou&cyberpki.fr +62031 + Tekmo Co., Ltd. + Jung Hongmyun + jhm&tekmo.co.kr +62032 + ORLEN S.A. + Administratorzy + administratorzy&orlen.pl +62033 + Synerion Systems Lts + Mickey Markovits + mickey.markovits&rosslaresecurity.com +62034 + Vivek Chandran + Vivek Chandran + vivek&chandran.co.nz +62035 + Donni Maulana Sipa + Donni Maulana Sipa + iana&donnimsipa.my.id +62036 + Init7 + Roland Ulrich + sysadmin&init7.net +62037 + Forcap AB + Nils Lundmark + nils.lundmark&forcap.se +62038 + NexoPrima Sdn Bhd + Adzmely Bin Mansor + adzmely&nexoprima.com +62039 + snafu Gesellschaft für interaktive Netzwerke mbH (Society for interactive networks mbH) + Niels Kobschätzki + kobschaetzki&team.snafu.de +62040 + Blaguss + Helpdesk + edv&blaguss.com +62041 + Eguana Technologies Inc + Daljit Ghotra + daljit.ghotra&eguanatech.com +62042 + Al Hami Information Technology + Izzat Ali Izzat Alsalti + izzat.ali&alhamitech.com +62043 + Sidarion AG + Patrick Weber + info&sidarion.ch +62044 + DEYE NEW ENERGY AUSTRALIA PTY LTD + George Liu + georgeliu&deye.com.cn +62045 + Landesamt für Sicherheit in der Informationstechnik (LSI) + Marius Frinken + it-basisdienste&lsi.bayern.de +62046 + MED-EL Medical Electronics + Clemens Voller + it-security&medel.com +62047 + DESK SOFT COMERCIO E SERVICOS LTDA + ROBERTA J MESQUITA DA COSTA + master&desksoft.net +62048 + MODENA TERMINAL SRL + Emanuele Righi + e.righi&tlco.it +62049 + Eleasar Blum + Eleasar Blum + admin&eleasar.ch +62050 + H1 Radio Co.,Ltd. + junyoung kim + junyoung.kim&h1-radio.com +62051 + SAC/TC82(Techinical Committee 82 of Standardization Adiminstration of China ) + Qin LI + liqin&sgepri.sgcc.com.cn +62052 + Iredell Memorial Hosptial + Garrett Fox + garrett.fox&iredellhealth.org +62053 + Mobile Communications Company of Iran (MCI) + Parmida Nezhadjomeh + parmida.nejad&gmail.com +62054 + INFRAHEX + Dawid Banaszewski + kontakt&infrahex.pl +62055 + ActivCloud + Julien SIMBOLA + julien.simbola&activcloud.eu +62056 + Nigul.coop + Jaume Obrador + jobrador&lledoner.com +62057 + Entarian Limited + Adrian Owen + noreply&eesm.com +62058 + Flabrice + Fabrice Bucher + iana-pen&flabrice.ch +62059 + b-inside + Robby Swartenbroekx + robbys&b-inside.be +62060 + Tucson Embedded Systems + Nicholas West + nicholasw&tes-i.com +62061 + WalkHorizon + Hongzhuo Fan + fanhongzhuo&walkhorizon.com +62062 + 4MITECH + Leo Hong + leo.hong&4mitech.co.kr +62063 + EasyNet Industry (Shenzhen) Co., Ltd + Janet Wu + jwu&easynettek.com +62064 + ООО "Новые облачные технологии" (LLC "New cloud technologies") + Vladimir Zavoyskikh + vladimir.zavoyskikh&myoffice.team +62065 + CommScope Italy SRL + Federico Bergamelli + federico.bergamelli&commscope.com +62066 + CloudXP Ltd + David Brook + info&cloudxp.uk +62067 + ORLEN Oddział Centralny PGNiG w Warszawie + Jaroslaw Jakubowicz + jaroslaw.jakubowicz&pgnig.pl +62068 + Qasky + Jinping.Zhu + zhujinping&qasky.com +62069 + Triangle + Cabezas Andres + info&triangle.pm +62070 + Hobart Corporation + Randy Blankley + randy.blankley&itwfeg.com +62071 + E.T.I. Srl + Tiziano Bacocco + t.bacocco&etitech.net +62072 + UBIQCOM INDIA PVT LTD + Rajiv Goyal + rajiv&ubiqcom.in +62073 + DCM Services, LLC + Nick Wieseler + nick.wieseler&dcmservices.com +62074 + MDOC S.R.L. + Toni Magni + ads&medoco.health +62075 + Axonics, Inc + Faizal Abdeen + fabdeen&axonics.com +62076 + Fort Peck Assiniboine & Sioux Tribes + Amber Terry + amber.terry&fortpecktribes.net +62077 + Ares Green Technology Corporation + Jason_Lia + jason_liao&aresgreen.com.tw +62078 + Technology Innovation Lab, LLC. + Orlando Guitian + orlando&tiltx.com +62079 + ESERA GmbH + Andreas Geisler + andreas.geisler&esera.de +62080 + Netflash Internet Solutions + Daniel Pawliw + technical&netflash.net +62081 + City and Borough of Juneau + James Zuelow + james.zuelow&juneau.gov +62082 + Rosatom Automated Control Systems JSC + Borisov Aleksandr + info&rasu.ru +62083 + 丰天鼎业科技有限公司 (Fengtian Dingye Technology Co., Ltd.) + 付子轩 (Fu Zixuan) + 302552175&qq.com +62084 + ARNOWA PTY LTD + Om Dubey + a.singh&arnowa.com +62085 + Neat Path Networks + Dragos Vingarzan + dragos&neatpath.net +62086 + BluForce, Inc + Piotr Koszko + oid.admin&bluforce.net +62087 + SAL Navigation AB + Mikael Cogne + admin&salnavigation.com +62088 + Motorola Solutions AD + Steven Bramson + steven.bramson&motorolasolutions.com +62089 + Centerboard AG + Andreas Immeli + andreas.immeli¢erboard.ch +62090 + Bitchief Technolgy Services Private Limited + Alok Kumar Singh + alok&bitchief.in +62091 + Taichitel Technology Shanghai Co., Ltd. + Frank Zhang + frank.zhang&taichitel.com +62092 + Terra Kognita Limited Liability Company + Erlan Kazakov + techsupport&terkog.com +62093 + EVRAZ STEEL BUILDING + Aleksey Saltykov + it.info&evrazsteel.ru +62094 + Max Planck Institute for Meteorology + CIS Admin + iana-pen&mpimet.mpg.de +62095 + Stay Informed GmbH + Peter Horner + postmaster&stayinformed.de +62096 + QT Imaging Inc. + Jeff Wang + jeff.wang&qtimaging.com +62097 + Ackestens åkeri ab + Mattias Ackesten + Johansakeri&telia.com +62098 + SATEL ltd + Evgenii Mosin + mosin&satel.org +62099 + Dayou Plus Co Ltd + Song Chi Hoon + chsong&dayou.co.kr +62100 + ITLook + Andrey Lyubimtsev + info&itlook.ru +62101 + Siscom Communication Limited Company + 王健 (Wang Jian) + mymsnid&live.cn +62102 + Tkl Logistics LLC + Tkl + Muhamedh193&gmail.com +62103 + Cordova Courier + Vincent Cordova + V.cordova&cordovacourier.com +62104 + James W Pyle III + James W Pyle III + james&pyle.us +62105 + LIGHT IT SOLUTIONS PTY LTD + Austin Ofomah + austin&ofomah.com.au +62106 + JSC RVi GROUP + Averchenko Igor + AverchenkoIV&rvigroup.ru +62107 + LRNZ Network Services + Oliver Lorenz + lrnz.network.services&gmail.com +62108 + Card4B Systems SA + Jorge da Costa Mendes + jorge.mendes&card4b.pt +62109 + Armstrong Fluid Technology + Dylan Kauling + dkauling&armstrongfluidtechnology.com +62110 + Go!Foton + Ken Takeuchi + ken.takeuchi&gofoton.com +62111 + NOVO Energy R&D + Martin Hardselius + martin.hardselius&novoenergy.se +62112 + techssol + Edgar Recancoj + admin&techssol.com +62113 + Société wallonne des eaux (SWDE) + Philippe Picalausa + support&digiteaux.be +62114 + TF-Industries GmbH + Fabian Gieshoff + gieshoff&tf-industries.com +62115 + VELARTIS GmbH + Philipp Dürhammer + info&velartis.at +62116 + TrustForge + Peter Szabo + peter&trustforge.hu +62117 + essendi it GmbH + Holger Sellenschütter + iana-contact&essendi.de +62118 + IMPULSE CCTV NETWORKS INDIA PRIVATE LIMITED + Amol Sharma + ak&impulsecctv.com +62119 + County Durham & Darlington NHS Foundation Trust + Graeme Moody + oid&cddft.nhs.uk +62120 + Konstantin L. Metlov + Konstantin L. Metlov + metlov&donfti.ru +62121 + UMBOSS + Domagoj Mikac + sysadmin&umboss.com +62122 + AlphaCentri Corporation + William Gill + william.gill&alphacentri.com +62123 + Mindset Integrated Co + Chris Airriess + airriess&mindsetintegrated.com +62124 + Jigsolve + Damien Sugden + damien&jigsolve.com.au +62125 + Sameway + Daniel Hsu + daniel&sameway.com.tw +62126 + Xovis Germany GmbH + Jan Karsch + jan.karsch&xovis.com +62127 + Shanghai Horizon Networks Co., Ltd + Chunhua Gao + chgao&horizon-adn.com +62128 + Grand Rapids Community College + Scott Minton + iana&grcc.edu +62129 + DanGo Designs Inc. + Daniel Golick + dgolick&gmail.com +62130 + DevRay IT Solutions Pvt. Ltd. + Yogendra Rajput + yogendra.rajput&maplecloudtechnologies.com +62131 + RED Digital Cinema + Branin Lippincott + iana&red.com +62132 + Lifeline Data Centers + Jerry Purvis + jpurvis&lifelinedatacenters.com +62133 + Kabelbruch + Thomas Neuwinger + thomas&neuwinger.de +62134 + RxTrail + Timothy Kerr + admin&rxtrail.org +62135 + California Pickleball Club + Jaycee James + californiapickleballclub&outlook.com +62136 + Onbitel, Ltd. + Choi kwang-kyu + ggchoi&onbitel.com +62137 + AO Yug-Systema plus + Leguta Vladimir + support&yugsys.ru +62138 + VIDI GmbH + Max Lehn + mlehn&vidi.eu +62139 + LOVATO Electric s.p.a. + Roberto Labaa + roberto.labaa&lovatoelectric.com +62140 + AQUASYNC INNOVATION (ZHEJIANG) CO., LTD + Zibin Zhang + bob.zhang&aquasync.tech +62141 + UNEAL - Universidade Estadual de Alagoas + Antonio Raphael Felix de Souza Cavalcante + raphael.felix&uneal.edu.br +62142 + WingArc1st Inc. + Hiroyuki Satou + cit_usg&wingarc.com +62143 + TRUSTCLOUD TECH, S.L. + Alberto Angón Robles + alberto.angon&trustcloud.tech +62144 + Persistent Telecom + Chris Awad + chris&persistent-telecom.net +62145 + Phoddo + Shoyeb Waliullah + wshoyeb&gmail.com +62146 + Datafire Group + Nathan St Clair + admin&datafiregroup.com +62147 + Wealth Enhancement + Nathan St. Clair + tenantadmin&wealthenhancement.com +62148 + Ignition Networks Limited + Jono Sands + iana&ignition.net.nz +62149 + Guangdong Hongjun Microelectronics Co., Ltd + Roy Zhang + roy.zhang&hj-micro.com +62150 + Mirae Signal Co., Ltd + Roy Yeo + sjyeo&miraesignal.com +62151 + Jovian Software Consulting LLC + John Bunn + info&joviansc.com +62152 + Beijing Cool Shark Technology Co., Ltd. + Liu Cangsong + robinliu&coolshark.com +62153 + RIX Riga Airport + Edgars Zēbergs + Certificate&riga-airport.com +62154 + Netsend + Tim Kuijsten + info+iana&netsend.nl +62155 + NAVIELEKTRO KY + Mats Koivisto + mats.koivisto&navielektro.fi +62156 + Bobapps LLC + Khalil Azizi + K.Azizi&bobapps.co +62157 + yunzhisec + Wenkai Zhu + rubinwhited&gmail.com +62158 + ORIS Automation + Mathieu Crochard + mathieu.crochard&orisautomation.fr +62159 + SGC Technology LLC + Alaa Abbani + alaa.abbani&sgc-technology.com +62160 + Colegio Dominicano de Notarios + Francisco Antonio Furcal Alcantara + ffurcal&gmail.com +62161 + Ambra Solutions + William Deshaies + william.deshaies&ambra.co +62162 + ABDUL KADIR, M.D. ADULT AND PEDIATRIC NEUROLOGY + LIVY GARZA + ANEURO8&gmail.com +62163 + Guangzhou Guang ha Communications shares Co.,Ltd. + Cai Yanping + cai.yanping&ghtchina.com +62164 + City of Vancouver + SysAdmins + CityITSoftwareOrders&cityofvancouver.us +62165 + MNet Network ./ + Bert Tulescu + network&mnet.cc +62166 + NEXXUS NETWORKS INDIA PRIVATE LIMITED + Biswajyoti Das + biswajyoti&nexxusnetworks.com +62167 + IQ Tools LLC + Kupriyanova Nadezhda + info&iq-tools.ru +62168 + Octopus IoT srl + Paolo Marotta + p.marotta&octopusiot.it +62169 + ThinkQuantum s.r.l. + Luca Calderaro + luca.calderaro&thinkquantum.com +62170 + Telid Electronics + Yu Chunlin + yclfoxconn&163.com +62171 + AURIONPRO TRANSIT PTE. LTD. + Benny Tan + benny.tan&aurotransit.com +62172 + Fog Hashing Pte. Ltd. + Niande Yuan + niandeyuan&gmail.com +62173 + Softdent GmbH + Peter Derföldi + office&softdent.at +62174 + Universitaetsklinikum Bonn + Oliver Rebach + security-help&ukbonn.de +62175 + SL Alabama + Scott Lee + wlee&slworld.com +62176 + Codemaster + Alexander Milyar + a.milyar&codemaster.pro +62177 + Meteoric Ltd + Clive Gardner + clive&meteoric.net +62178 + Andrew Baxter, LLC + Andrew Baxter + zufnqrikdsg2z6&s.rendaw.me +62179 + Craft Computing + Jeffrey Soleim + business&craftcomputing.net +62180 + GridSpark + Justus van Biljon + justus&gridspark.au +62181 + Afzar Pardaz To (APT) + Alireza Paridar + alireza_paridar&outlook.com +62182 + Tiger Technologies Limited + Matthew Huxtable + hello&tigertechnologies.co.uk +62183 + Specific-Group Holding GmbH + Roman Korshakevich + adminby&specific-group.com +62184 + HWSE3 + Ronald van Dorp + iana&hwse3.com +62185 + Iru, LLC + Nicholas McDonald + nick&iru.io +62186 + ELOOM SYSTEM + KIM MIN SUNG + ksbjj&naver.com +62187 + Brooke Chalmers + Brooke Chalmers + breq&breq.dev +62188 + Integrated Procurement Technologies + Erik Reynoso + tech+iana&iptsb.com +62189 + RostLab + Burkhard Rost + iana&rostlab.org +62190 + Avem Health Partners + Scott Everhart + severhart&avemhealth.com +62191 + SingleID, K.K. + Tech Support + support&singleid.jp +62192 + Julian Keck + Julian Keck + iana&juliankeck.de +62193 + Ryan Goodman + Ryan Goodman + goodman.ryan.david&gmail.com +62194 + Testholm IT + Carsten Testholm + carsten&testholm.it +62195 + SMAP POWER TECHNOLOGY COMPANY + Gloria Lui + support&smappower.com +62196 + Sportmaster Lab + Alexey D. Filimonov + adfilimonov&sportmasterlab.net +62197 + Infinite Tech Labs + Heather Choi + heather-choi&infinitetechlabs.com +62198 + Borderless Technologies GmbH + Paul Becker + subscription&borderless-technologies.com +62199 + FlexCom AG + Christoph Zuber + info&flexcomag.ch +62200 + AD ASTRA ENTERPRISES + Mr. David McCabe + d.u.mccabe&gmail.com +62201 + Airbus Robotics + Pat Reynolds + patrick.reynolds&airbusrobotics.com +62202 + Satellite Industries, Inc. + Ava Triviski + AvaT&satelliteindustries.com +62203 + pdhouse.club + Pablo Dominguez + cobroscompulsivos&gmail.com +62204 + Fort Peck Assiniboine & Sioux Tribes + Kristofer Fourstar + kris.fourstar&fortpecktribes.net +62205 + CompAct GmbH + Roman Stahl + roman.stahl&compact.de +62206 + Fiberakuten + Nicke Johansson + nicke&fiberakuten.com +62207 + Shenzhen Huarui Intelligent Equipment Co., Ltd + Songyuan Peng + psy&sz-huarui.cn +62208 + APA Family Support Services + Wilson + it-apa&apafss.org +62209 + Knowledge Computing Lab, USTC + Chen Yongrong + yrchen&mail.ustc.edu.cn +62210 + Satcom World + Senol Eker + senoleker&satcomworld.com +62211 + DB SERIES + Crístian Reginatto + cristian&dbseries.com.br +62212 + Lowcountry Oncology Associates, LLC + Barry Logue + barry.logue&lowcountryoncology.com +62213 + rabbit it AG + Roger Kirchhofer + support&rabbit-it.ch +62214 + tCubeSystem + Ravi kumar Thakur + ravi80rintu&gmail.com +62215 + 微网优联科技(成都)有限公司 (Microgrid Youlian Technology (Chengdu) Co., Ltd.) + 文学帅 (Wénxué shuài) + wenxueshuaiµnet.ltd +62216 + Asociacion Bonao de Ahorros y Prestamos + Seguridad Cibernetica y de la Informacion + sci&abonap.com.do +62217 + Quanta Storage Inc. + Challo Chen + challo.chen&qsitw.com +62218 + Mobius Software + Yulian Oifa + yulian.oifa&mobius-software.com +62219 + WorkSimple GmbH + Thomas Drewermann + thomas&worksimple.de +62220 + Sorenson + Greg Darling + scs&sorenson.com +62221 + City of Athens + Seth Siniard + pki&athensal.us +62222 + Aptus Solutions Ltd + Abdul Murtaza + noc&aptus.co.tz +62223 + Long Zheng + Long Zheng + long.zheng&gmail.com +62224 + Edatalia Data Solutions + Pedro Bastarrica Escala + pedro.bastarrica&edatalia.com +62225 + ScurrNET + Gavin Scurr + admin+iana&scurr.net +62226 + Gteex + Daniel Brito + daniel.brito&grupotecnosul.com +62227 + THE SECOND RESEARCH INSTITUTE OF CAAC + Yan Nie + nieyan&caacsri.com +62228 + Pardis Energy Conversion Company (Pec Co.) + Mostafa Asadi + mostafa.asadi1&gmail.com +62229 + GLBB Japan + Gary Blankenship + gary.blankenship&corp.glbb.ne.jp +62230 + City of Philadelphia + Dennis Doyne + networkgroup&phila.gov +62231 + 黄佳鑫 (Huang Jiaxin) + 黄佳鑫 (Huang Jiaxin) + hoshino1989&qq.com +62232 + Protempis + Pervez Mohta + pervez.mohta&protempis.com +62233 + Ausgrid + Nigel Hornidge + nigel.hornidge&ausgrid.com.au +62234 + Mubvumela Corporation + Ray Erasmus + ray&mbvit.co.za +62235 + Stadtwerke Kapfenberg GmbH + Martin Weber + edv&stadtwerke-kapfenberg.at +62236 + KLM IT AG + Kilian Meister + kilian.meister&klmit.swiss +62237 + James Anderson + James Anderson + Rygar574&gmail.com +62238 + KTC Medical + KTC Medical + gylj_wfm&163.com +62239 + SMARTMETER SMARTMETER INOVACOES TECNOLOGICAS LTDA + ANTONIO MANOEL RIBEIRO DE ALMEIDA + contato&smartmeter.tec.br +62240 + World Media Group, LLC + Adrian Aguilar + adriangonzagaaguilar&icloud.com +62241 + Russell Mangel + Russell Mangel + russell&tymer.net +62242 + Banco Municipal de Rosario + Pablo Marinozzi + pablo.marinozzi&bmros.com.ar +62243 + SW7 Holdings Limited + Kiran Lakhotia + kiran.lakhotia&sw7group.com +62244 + EOX IT Services GmbH + Karl Grube + karl.grube&eox.at +62245 + Politecnico di Milano Cryptography Group + Alessandro Barenghi + alessandro.barenghi&polimi.it +62246 + Energa Informatyka i Technologie + Michał Żeberkiewicz + michal.zeberkiewicz&energa.pl +62247 + VOSTOK Trading LLC + Mohamed Shafi + shafi&vostok.ae +62248 + 言创智信(北京)科技有限公司 (Yanchuang Zhixin (Beijing) Technology Co., Ltd.) + 陈明 (Chen Ming) + chenming&yantronic.com +62249 + 廖雷 (Liao Lei) + 廖雷 (Liao Lei) + luckliaolei&gmail.com +62250 + Australian Centre for International Agricultural Research + Kyle Smith + Kyle.Smith&aciar.gov.au +62251 + OOO "Softime" + Dmitry Yazykov + d.yazykov&softi.me +62252 + Softime LLC + Evgeniy Zhilkov + e.zhilkov&softi.me +62253 + Bas Westerbaan + Bas Westerbaan + bas&westerbaan.name +62254 + KWD Automotive AG & Co. KG + Stefan Förster + edv&kwdag.com +62255 + Mats Klepsland + Mats Klepsland + mats.klepsland&gmail.com +62256 + Trusthub, LLC + Roberto Minoletti + roberto.minoletti&trusthub.cloud +62257 + candifloss.cc + Shaheem Thandupara + shaheem_tp&protonmail.com +62258 + Blokkus S.A.S. + Vincent Regnard + vincent&blokkus.fr +62259 + MD West One + April Campbell + acampbell&mdwestone.com +62260 + Hangzhou Yagena Technology Co., Ltd + Sun Shuiwang + sunshuiwang&yagena.net +62261 + Intravis GmbH + Björn Schlak + it-logs&intravis.de +62262 + IBI Verde Sp. z o.o. + Grzegorz Zieliński + grzegorz.zielinski&ibiverde.pl +62263 + Mariner Innovations + Jeff Parker + jeff.parker&marinerinnovations.com +62264 + embeX GmbH + Thomas Geerlings + IT&embex.de +62265 + Sixpolys + Matthias Schlemm + matthias&sixpolys.com +62266 + Stadtwerke Zweibruecken + Joerg Haas + it&stadtwerke-zw.de +62267 + Website Pipeline, Inc. + Greg Mitchell + greg.mitchell&cimcloud.com +62268 + Greater Regional Health + Ashley Chrisco + Ashleyc&greaterregional.org +62269 + CFE Laguna Verde + Jaime Carballo Díaz Infante + jaime.carballo&cfe.mx +62270 + 华鲲 (Huakun) + 周淋深 (Zhou Linshen) + zhoulinshen&schkzy.cn +62271 + MyDress Holdings Limited + IT Admin + itadmin&mydress.com +62272 + Boardwalk Pipelines, LP. + James Sumpter + OID.Admin&bwpipelines.com +62273 + Skynet sp. z o.o. + Mateusz Gołębiewski + mateusz.golebiewski&skynet.net.pl +62274 + Brits & Dale B.V. + Ruben Gommers + iana-poc&britsdale.com +62275 + AMEGA Corporation + Alexei Karpov + akarpov&amegadesign.com +62276 + Fundación Raúl Roberto Steimbach + Raul R. Steimbach + raul&steimbach.org +62277 + LIT LLC + Dmitri Levkin + d.a.levkin&yandex.ru +62278 + PT. Jalur Nugraha Ekakurir (JNE) + Sihar Panggabean + sihar.panggabean&jne.co.id +62279 + EByte + Eleasar Blum + iana-pen&ebyte.ch +62280 + Profundities LLC + Kenneth Brent Tingey + kenneth.tingey&profundities.info +62281 + Saman Tadbir Fartak + Morteza Kalantar + kalantar&samayesh.com +62282 + Shanghai Belong Technology Co., Ltd. + Kanson Ren + howtosay&yeah.net +62283 + LLC "Remer automation" + Leonid Karpenko + ra.info&remergroup.ru +62284 + Skogsfraktarna i Sundsvall AB + Petter Näslund + Skogsfraktarna&gmail.com +62285 + Aodun (Beijing) Technology Co., Ltd. + Herbst Zhao + 157775252&qq.com +62286 + Vidya Sagar + Vidya Sagar + vidya.sagar&live.in +62287 + DYNESS AUS PTY LTD. + Haifeng Zhang + zhanghaifeng1&dyness-tech.com +62288 + Resideo Technologies, Inc. + OID Administrators + oid.administrators&resideo.com +62289 + ROWI TECH LLC + Domain Administrator + pki&rowi.com +62290 + Honducert S.A. + Jimmy Amador + jimmy.amador&honducert.com +62291 + Viasat, Inc. + Ivan Nettles + ivan.nettles&viasat.com +62292 + Penguin Securities Pte. Ltd. + Tomofumi Okubo + tomofumi.okubo&penguinsecurities.sg +62293 + BIGGE ENERGIE GmbH & Co. KG + Heiko Pawelczyk + Heiko.Pawelczyk&bigge-energie.de +62294 + Migration Department under the Ministry of the Interior of the Republic of Lithuania + Miglė Juknevičienė + migle.jukneviciene&migracija.gov.lt +62295 + LoBoCOM + Bert Bos + inkoop&lobocom.nl +62296 + Roamability LLC. + Anika Oertel + anika&nexuce.com +62297 + TeleTeam Call-Center und Service GmbH + Thomas Hempe + CA-PEN&call-teleteam.de +62298 + EKF Group + Sergey Khutornoy + s.khutornoi&ekfgroup.com +62299 + PDM Precast Inc. + Dean Putnam + pdmadmin&pdmprecast.com +62300 + PEAKAIO Limited + Mark Klarzynski + mark.klarzynski&peakaio.com +62301 + Akses Femto Indonesia + Jun Saptaji + jun.saptaji&aksesfemto.co.id +62302 + Mini workgroups ltd + Mark Halliday + dev&miniwg.com +62303 + Wasp Enterprise + Jacob Riggan + waspadmin&waspenterprise.org +62304 + Millitronic Co., LTD + Huang, CHONG-LI + bayer&millitronic.com.tw +62305 + Lima Besparingsskog + Fredrik Eriksson + fredrik.eriksson&limaskog.com +62306 + SK ID Solution AS + Mihkel Tammsalu + mihkel.tammsalu&skidsolutions.eu +62307 + Len Bourgeois + Len Bourgeois + iamlenb&gmail.com +62308 + Just Great Software + Jan Goyvaerts + jg&jgsoft.com +62309 + SCF Ti + Salatiel Carvalho Farchi + suporte&scfti.com.br +62310 + Indefinity Inc. IT Services + Adam L Waring + certificates&indefinity.co.uk +62311 + Herzog Technologies, Inc. + HTIX IANA + iana&htix.org +62312 + Cloonar e.U. + Dominik Polakovics + dominik.polakovics&cloonar.com +62313 + delta enigma + Daniel Brooker + daniel.brooker&delta-enigma.com +62314 + ITI Operations Limited + Ben Draper + ben.draper&itigroup.com +62315 + Sensetechno Solutions + Vinod Kumar + vinod.k&sensetechno.com +62316 + Ken Garff Automotive Group + Ryan Taniguchi + rtaniguchi&kengarff.com +62317 + Zentec LLC + Зендер Михаил Викторович (Zender Mikhail Viktorovich) + info&zentec.ru +62318 + Иван Ивантей (Ivan Ivantey) + Иван Ивантей (Ivan Ivantey) + ivantey228&gmail.com +62319 + Fall River Deaconess Home + Andrew Estrella + iana-pen&deaconesshome.org +62320 + National TsingHua University Blockchain Club + National TsingHua University Blockchain Club + noc&nthu.dev +62321 + EmbedWay Technologies (Shanghai) Corporation + HuangQi + marketing&embedway.com +62322 + Jutos Timber AB + Björn Winsa + bjorn.winsa&jutos.se +62323 + Quarto Centro Integrado de Defesa Aérea e Controle de Tráfego Aéreo (CINDACTA IV) + Reginaldo Martins + rmartinsrom&fab.mil.br +62324 + MPLR Consultants + Martin Le Roux + admin&mplrconsultants.com +62325 + Syslog Works + Michal Sojka + syslog.works&proton.me +62326 + DEHOCO (Deutschland) GmbH + Alexander Hillmann + alexander.hillmann&deugro-group.com +62327 + EveryWare Ltd + Stefan Hofmann + alpha_admin&everyware.ch +62328 + Waters Limited + Matt Wherry + matthew_wherry&waters.com +62329 + Icebreaker One Limited + Frank Wales + devops&icebreakerone.org +62330 + Sandhya Sharma + Sandhya Sharma + sandhyasharma198&gmail.com +62331 + Watermark Auto Group + Andrew Hancock + andrew&watermarkautogroup.com +62332 + EcoFlow Inc. + Jason Guo + compliance&ecoflow.com +62333 + CertMe GmbH + Senn Eduard + eduard.senn&certme.at +62334 + Lakewood Communications + Joseph Worrall + worrall&lakecomm.com +62335 + Cyber Privilege Private Limited + G Vimal Kumar + cto&cyberprivilege.com +62336 + British Business Bank plc + Andy Littlewood + iana-pen&british-business-bank.co.uk +62337 + Datask B.V. + Jules Verhaeren + info&datask.nl +62338 + NTEX Inrikes AB + NTEX Inrikes AB + jan.lundberg&ntex.com +62339 + AmerisourceBergen - Cencora + Vartan Chukhadarian + vartan.chukhadarian&cencora.com +62340 + Motorola Solutions Poland + Tomasz Klys + tomasz.klys&motorolasolutions.com +62341 + Impregilo B. Ewing + Impregilo B. Ewing + factanord&protonmail.com +62342 + Li Auto + Li Xiangdong + lixiangdong&lixiang.com +62343 + A. Frauenrath Bauunternehmen GmbH + Chi-Binh Banh + edv&frauenrath.de +62344 + Smartmatica LLC + Panochkin Denis + bigtesty&yandex.ru +62345 + ToastStudios + Robert Pfaff + r.pfaff&toaststudios.de +62346 + Marché du Film + Davous Antoine + antoine.davous&gmail.com +62347 + The Thompson Consultancy + John W Thompson + John&the-thompson-consultancy.com +62348 + 云上北斗股份有限公司 (Yunshang Beidou Co., Ltd.) + 黎杨 (Li Yang) + 2515513681&qq.com +62349 + Qian Qian + Qian Qian + 7713217&qq.com +62350 + Ecole des Roches + Dominique Paris + support&ecoledesroches.com +62351 + SOFTBRIDGE Corporation + JunCheng He + lvyhe&softbridge.com +62352 + TELCON LLC + Andrey Raise + dev&telcon.ru +62353 + Peculiar Tech + Zach Johnson + szj9345&gmail.com +62354 + Szkola Glowna Gospodarstwa Wiejskiego w Warszawie (Warsaw University of Life Sciences) + Piotr Gawin + sysop&sggw.edu.pl +62355 + Zyle Moore + Zyle Moore + moore.zyle&gmail.com +62356 + Intelligence Research Systems + Richard Rosenheim + iana&intelligenceresearchsystems.com +62357 + Alicorn + Artjoms Inkins + servers&alicorn.lv +62358 + RELICK + Steven C. Relick + relicksc&netscape.net +62359 + Quilt + Brian Stack + brian&quilt.com +62360 + Hinen Australia + Zee Zhuang + zee.zhuang&hinen.com.au +62361 + Misr for Central Clearing, Depository and Registry + Mohamed Nour Refat + m.nour&mcsd.com.eg +62362 + Invisplus + Hamidreza Parvizian + Hamid&invisplus.com +62363 + Owl of Minerva Press + Ken Kubota + ianaoid&owlofminerva.net +62364 + pafin Inc. + Shunsuke Masuda + iana-registration&pafin.com +62365 + The Charlton Family + Chuck Charlton + chuck.charlton&icloud.com +62366 + Surgical Reality + Mike de Boer + dev&surgicalreality.com +62367 + CRmep + Shaun Savage + shaun.sav&crmep.cr +62368 + Henrik Carlqvist + Henrik Carlqvist + hc1600&poolhem.se +62369 + 中科启迪光电子科技(广州)有限公司 (Zhongkeqidi Optoelectronics Technology (Guangzhou) Co., Ltd.) + 李世科(Shike Li) + lishike&qdgdz.net +62370 + Atrinet LTD. + Meir Gilboa + meir.ben-moshe&atrinet.com +62371 + Sonio + Adarsh Pradeep Menon + adarsh.menon&sonio.ai +62372 + Zyax AB + Johan Gustafsson + johan&zyax.se +62373 + Astera Cancer Care + Fatiima Braxton + fatiima.braxton&asterahealthcare.org +62374 + America Movil S.A.B + Roberto Hernandez + roberto.hernandez&americamovil.com +62375 + Solmani SA + Galland + gregoire.galland&solmani.ch +62376 + Frankfurter Bankgesellschaft (Schweiz) AG + Steve Erzberger + steve.erzberger&frankfurter-bankgesellschaft.com +62377 + Airbus Defence and Space GmbH + Andreas Woerner + andy.woerner&airbus.com +62378 + Li Xiaowei + Li Xiaowei + 2633567133&qq.com +62379 + Adam Walz + Adam Walz + adam&adamwalz.net +62380 + SoundNodes + Marc Boitel + marc.boitel&soundnodes.com +62381 + CineLab Digital Lounge + Evgeny Ischenko + evgeny.ischenko&cinedigital.ru +62382 + Allen Independent School District + Jeff Black + jeffrey.black&allenisd.org +62383 + IIEP-UNESCO + Eric Lannaud + e.lannaud&unesco.org +62384 + Tagel Vasiliy + Tagel Vasiliy + vatagel&mail.ru +62385 + netwidedata.com + Jonathan David Brown + teklords&gmail.com +62386 + Chuanqi Zhang + Chuanqi Zhang + cqzhange&gmail.com +62387 + SEI Robotics + Nigg Zhang + zhangzhihu&seirobotics.net +62388 + Te Wananga o Aotearoa + Mike Buck + it.networks&twoa.ac.nz +62389 + Vlada Brčko distrikta BiH + Anel Ibrahimovic + anel.ibrahimovic&bdcentral.net +62390 + RSP GmbH & Co. KG + Frank Friebe + frank.friebe&sva.de +62391 + JOWENN.NET + Joseph Wenninger + reg&jowenn.net +62392 + Great Scott Gadgets + Michael Ossmann + info&greatscottgadgets.com +62393 + Cyphercor Inc. + Diego Matute + dmatute&cyphercor.com + + +End of Document \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/snmp/entnum/lookup.go b/src/go/plugin/go.d/modules/snmp/entnum/lookup.go new file mode 100644 index 000000000..bcbfa9d7f --- /dev/null +++ b/src/go/plugin/go.d/modules/snmp/entnum/lookup.go @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package entnum + +import ( + "bufio" + "bytes" + _ "embed" + "strconv" + "strings" +) + +// https://www.iana.org/assignments/enterprise-numbers.txt +// +//go:embed "enterprise-numbers.txt" +var enterpriseNumberTxt []byte + +func LookupBySysObject(sysObject string) string { + return numbers[extractEntNumber(sysObject)] +} + +var numbers = func() map[string]string { + if len(enterpriseNumberTxt) == 0 { + panic("snmp: enterprise-numbers.txt is empty") + } + + mapping := make(map[string]string, 65000) + + vr := strings.NewReplacer("\"", "", "`", "", "\\", "") + var id string + + sc := bufio.NewScanner(bytes.NewReader(enterpriseNumberTxt)) + + for sc.Scan() { + line := strings.TrimSpace(sc.Text()) + if line == "" { + continue + } + + if _, err := strconv.Atoi(line); err == nil { + if id == "" { + id = line + if _, ok := mapping[id]; ok { + panic("snmp: duplicate entry number: " + line) + } + } + continue + } + if id != "" { + line = vr.Replace(line) + if line == "---none---" || line == "Reserved" { + id = "" + continue + } + mapping[id] = line + id = "" + } + } + + if len(mapping) == 0 { + panic("snmp: enterprise-numbers mapping is empty after reading enterprise-numbers.txt") + } + + return mapping +}() + +func extractEntNumber(sysObject string) string { + const rootOidIanaPEN = "1.3.6.1.4.1" + + // .1.3.6.1.4.1.14988.1 => 14988 + + sysObject = strings.TrimPrefix(sysObject, ".") + + s := strings.TrimPrefix(sysObject, rootOidIanaPEN+".") + + num, _, ok := strings.Cut(s, ".") + if !ok { + return "" + } + + return num +} diff --git a/src/go/plugin/go.d/modules/snmp/init.go b/src/go/plugin/go.d/modules/snmp/init.go index acde4b9b8..32b0ce6b1 100644 --- a/src/go/plugin/go.d/modules/snmp/init.go +++ b/src/go/plugin/go.d/modules/snmp/init.go @@ -8,15 +8,21 @@ import ( "strings" "time" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" - + "github.com/google/uuid" "github.com/gosnmp/gosnmp" + + "github.com/netdata/netdata/go/plugins/pkg/matcher" ) func (s *SNMP) validateConfig() error { if s.Hostname == "" { return errors.New("SNMP hostname is required") } + if s.Vnode.GUID != "" { + if err := uuid.Validate(s.Vnode.GUID); err != nil { + return fmt.Errorf("invalid Vnode GUID: %v", err) + } + } return nil } diff --git a/src/go/plugin/go.d/modules/snmp/integrations/snmp_devices.md b/src/go/plugin/go.d/modules/snmp/integrations/snmp_devices.md index a2431b006..7a90325b8 100644 --- a/src/go/plugin/go.d/modules/snmp/integrations/snmp_devices.md +++ b/src/go/plugin/go.d/modules/snmp/integrations/snmp_devices.md @@ -134,8 +134,8 @@ No action required. The configuration file name for this integration is `go.d/snmp.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -153,6 +153,10 @@ The following options can be defined globally: update_every, autodetection_retry | update_every | Data collection frequency. | 10 | no | | autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | | hostname | Target ipv4 address. | | yes | +| create_vnode | If set, the collector will create a Netdata Virtual Node for this SNMP device, which will appear as a separate Node in Netdata. | false | no | +| vnode.guid | A unique identifier for the Virtual Node. If not set, a GUID will be automatically generated from the device's IP address. | | no | +| vnode.hostname | The hostname that will be used for the Virtual Node. If not set, the device's hostname will be used. | | no | +| vnode.labels | Additional key-value pairs to associate with the Virtual Node. | | no | | community | SNMPv1/2 community string. | public | no | | options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no | | options.port | Target port. | 161 | no | @@ -160,8 +164,8 @@ The following options can be defined globally: update_every, autodetection_retry | options.timeout | SNMP request/response timeout. | 5 | no | | options.max_repetitions | Controls how many SNMP variables to retrieve in a single GETBULK request. | 25 | no | | options.max_request_size | Maximum number of OIDs allowed in a single GET request. | 60 | no | -| network_interface_filter.by_name | Filter interfaces by their names using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no | -| network_interface_filter.by_type | Filter interfaces by their types using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no | +| network_interface_filter.by_name | Filter interfaces by their names using [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no | +| network_interface_filter.by_type | Filter interfaces by their types using [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no | | user.name | SNMPv3 user name. | | no | | user.name | Security level of SNMPv3 messages. | | no | | user.auth_proto | Security level of SNMPv3 messages. | | no | diff --git a/src/go/plugin/go.d/modules/snmp/metadata.yaml b/src/go/plugin/go.d/modules/snmp/metadata.yaml index 0475a2f21..77f602a84 100644 --- a/src/go/plugin/go.d/modules/snmp/metadata.yaml +++ b/src/go/plugin/go.d/modules/snmp/metadata.yaml @@ -79,6 +79,22 @@ modules: description: Target ipv4 address. default_value: "" required: true + - name: create_vnode + description: If set, the collector will create a Netdata Virtual Node for this SNMP device, which will appear as a separate Node in Netdata. + default_value: "false" + required: false + - name: vnode.guid + description: A unique identifier for the Virtual Node. If not set, a GUID will be automatically generated from the device's IP address. + default_value: "" + required: false + - name: vnode.hostname + description: The hostname that will be used for the Virtual Node. If not set, the device's hostname will be used. + default_value: "" + required: false + - name: vnode.labels + description: Additional key-value pairs to associate with the Virtual Node. + default_value: "" + required: false - name: community description: SNMPv1/2 community string. default_value: public diff --git a/src/go/plugin/go.d/modules/snmp/snmp.go b/src/go/plugin/go.d/modules/snmp/snmp.go index 253d9f50d..0c81c849e 100644 --- a/src/go/plugin/go.d/modules/snmp/snmp.go +++ b/src/go/plugin/go.d/modules/snmp/snmp.go @@ -5,9 +5,11 @@ package snmp import ( _ "embed" "errors" + "fmt" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/vnodes" "github.com/gosnmp/gosnmp" ) @@ -57,6 +59,8 @@ type SNMP struct { module.Base Config `yaml:",inline" json:""` + vnode *vnodes.VirtualNode + charts *module.Charts newSnmpClient func() gosnmp.Handler @@ -65,12 +69,14 @@ type SNMP struct { netIfaceFilterByName matcher.Matcher netIfaceFilterByType matcher.Matcher - checkMaxReps bool - collectIfMib bool + checkMaxReps bool + collectIfMib bool + netInterfaces map[string]*netInterface - sysName string - oids []string + sysInfo *sysInfo + + customOids []string } func (s *SNMP) Configuration() any { @@ -80,39 +86,34 @@ func (s *SNMP) Configuration() any { func (s *SNMP) Init() error { err := s.validateConfig() if err != nil { - s.Errorf("config validation failed: %v", err) - return err + return fmt.Errorf("config validation failed: %v", err) } snmpClient, err := s.initSNMPClient() if err != nil { - s.Errorf("failed to initialize SNMP client: %v", err) - return err + return fmt.Errorf("failed to initialize SNMP client: %v", err) } err = snmpClient.Connect() if err != nil { - s.Errorf("SNMP client connection failed: %v", err) - return err + return fmt.Errorf("SNMP client connection failed: %v", err) } s.snmpClient = snmpClient byName, byType, err := s.initNetIfaceFilters() if err != nil { - s.Errorf("failed to initialize network interface filters: %v", err) - return err + return fmt.Errorf("failed to initialize network interface filters: %v", err) } s.netIfaceFilterByName = byName s.netIfaceFilterByType = byType charts, err := newUserInputCharts(s.ChartsInput) if err != nil { - s.Errorf("failed to create user charts: %v", err) - return err + return fmt.Errorf("failed to create user charts: %v", err) } s.charts = charts - s.oids = s.initOIDs() + s.customOids = s.initOIDs() return nil } @@ -120,7 +121,6 @@ func (s *SNMP) Init() error { func (s *SNMP) Check() error { mx, err := s.collect() if err != nil { - s.Error(err) return err } @@ -153,3 +153,7 @@ func (s *SNMP) Cleanup() { _ = s.snmpClient.Close() } } + +func (s *SNMP) VirtualNode() *vnodes.VirtualNode { + return s.vnode +} diff --git a/src/go/plugin/go.d/modules/snmp/snmp_test.go b/src/go/plugin/go.d/modules/snmp/snmp_test.go index 1841235f1..38262fce6 100644 --- a/src/go/plugin/go.d/modules/snmp/snmp_test.go +++ b/src/go/plugin/go.d/modules/snmp/snmp_test.go @@ -580,10 +580,12 @@ func setMockClientInitExpect(m *snmpmock.MockHandler) { } func setMockClientSysExpect(m *snmpmock.MockHandler) { - m.EXPECT().Get([]string{oidSysName}).Return(&gosnmp.SnmpPacket{ - Variables: []gosnmp.SnmpPDU{ - {Value: []uint8("mock-host"), Type: gosnmp.OctetString}, - }, + m.EXPECT().WalkAll(rootOidMibSystem).Return([]gosnmp.SnmpPDU{ + {Name: oidSysDescr, Value: []uint8("mock sysDescr"), Type: gosnmp.OctetString}, + {Name: oidSysObject, Value: ".1.3.6.1.4.1.14988.1", Type: gosnmp.ObjectIdentifier}, + {Name: oidSysContact, Value: []uint8("mock sysContact"), Type: gosnmp.OctetString}, + {Name: oidSysName, Value: []uint8("mock sysName"), Type: gosnmp.OctetString}, + {Name: oidSysLocation, Value: []uint8("mock sysLocation"), Type: gosnmp.OctetString}, }, nil).MinTimes(1) m.EXPECT().Get([]string{oidSysUptime}).Return(&gosnmp.SnmpPacket{ diff --git a/src/go/plugin/go.d/modules/snmp/testdata/config.json b/src/go/plugin/go.d/modules/snmp/testdata/config.json index b88ac1c25..a2a2fc22b 100644 --- a/src/go/plugin/go.d/modules/snmp/testdata/config.json +++ b/src/go/plugin/go.d/modules/snmp/testdata/config.json @@ -1,6 +1,14 @@ { "update_every": 123, "hostname": "ok", + "create_vnode": true, + "vnode": { + "guid": "ok", + "hostname": "ok", + "labels": { + "ok": "ok" + } + }, "community": "ok", "network_interface_filter": { "by_name": "ok", diff --git a/src/go/plugin/go.d/modules/snmp/testdata/config.yaml b/src/go/plugin/go.d/modules/snmp/testdata/config.yaml index f4ddbf91c..bafee7313 100644 --- a/src/go/plugin/go.d/modules/snmp/testdata/config.yaml +++ b/src/go/plugin/go.d/modules/snmp/testdata/config.yaml @@ -1,5 +1,11 @@ update_every: 123 hostname: "ok" +create_vnode: yes +vnode: + guid: "ok" + hostname: "ok" + labels: + ok: "ok" community: "ok" network_interface_filter: by_name: "ok" diff --git a/src/go/plugin/go.d/modules/spigotmc/README.md b/src/go/plugin/go.d/modules/spigotmc/README.md new file mode 120000 index 000000000..66e5c9c47 --- /dev/null +++ b/src/go/plugin/go.d/modules/spigotmc/README.md @@ -0,0 +1 @@ +integrations/spigotmc.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/spigotmc/charts.go b/src/go/plugin/go.d/modules/spigotmc/charts.go new file mode 100644 index 000000000..f0000df1e --- /dev/null +++ b/src/go/plugin/go.d/modules/spigotmc/charts.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package spigotmc + +import ( + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +const ( + prioPlayers = module.Priority + iota + prioTps + prioMemory +) + +var charts = module.Charts{ + playersChart.Copy(), + tpsChart.Copy(), + memoryChart.Copy(), +} + +var playersChart = module.Chart{ + ID: "players", + Title: "Active Players", + Units: "players", + Fam: "players", + Ctx: "spigotmc.players", + Priority: prioPlayers, + Dims: module.Dims{ + {ID: "players", Name: "players"}, + }, +} + +var tpsChart = module.Chart{ + ID: "avg_tps", + Title: "Average Ticks Per Second", + Units: "ticks", + Fam: "ticks", + Ctx: "spigotmc.avg_tps", + Priority: prioTps, + Dims: module.Dims{ + {ID: "tps_1min", Name: "1min", Div: precision}, + {ID: "tps_5min", Name: "5min", Div: precision}, + {ID: "tps_15min", Name: "15min", Div: precision}, + }, +} + +var memoryChart = module.Chart{ + ID: "memory", + Title: "Memory Usage", + Units: "bytes", + Fam: "mem", + Ctx: "spigotmc.memory", + Priority: prioMemory, + Type: module.Area, + Dims: module.Dims{ + {ID: "mem_used", Name: "used"}, + {ID: "mem_alloc", Name: "alloc"}, + }, +} diff --git a/src/go/plugin/go.d/modules/spigotmc/client.go b/src/go/plugin/go.d/modules/spigotmc/client.go new file mode 100644 index 000000000..e887e7068 --- /dev/null +++ b/src/go/plugin/go.d/modules/spigotmc/client.go @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package spigotmc + +import ( + "time" + + "github.com/gorcon/rcon" +) + +type rconConn interface { + connect() error + disconnect() error + queryTps() (string, error) + queryList() (string, error) +} + +const ( + cmdTPS = "tps" + cmdList = "list" +) + +func newRconConn(cfg Config) rconConn { + return &rconClient{ + addr: cfg.Address, + password: cfg.Password, + timeout: cfg.Timeout.Duration(), + } +} + +type rconClient struct { + conn *rcon.Conn + addr string + password string + timeout time.Duration +} + +func (c *rconClient) queryTps() (string, error) { + return c.query(cmdTPS) +} + +func (c *rconClient) queryList() (string, error) { + return c.query(cmdList) +} + +func (c *rconClient) query(cmd string) (string, error) { + resp, err := c.conn.Execute(cmd) + if err != nil { + return "", err + } + return resp, nil +} + +func (c *rconClient) connect() error { + _ = c.disconnect() + + conn, err := rcon.Dial(c.addr, c.password, rcon.SetDialTimeout(c.timeout), rcon.SetDeadline(c.timeout)) + if err != nil { + return err + } + + c.conn = conn + + return nil +} + +func (c *rconClient) disconnect() error { + if c.conn != nil { + err := c.conn.Close() + c.conn = nil + return err + } + + return nil +} diff --git a/src/go/plugin/go.d/modules/spigotmc/collect.go b/src/go/plugin/go.d/modules/spigotmc/collect.go new file mode 100644 index 000000000..6a8498744 --- /dev/null +++ b/src/go/plugin/go.d/modules/spigotmc/collect.go @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package spigotmc + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +const precision = 100 + +var ( + reTPS = regexp.MustCompile(`(?ms)(?P\d+.\d+),.*?(?P\d+.\d+),.*?(?P\d+.\d+).*?$.*?(?P\d+)/(?P\d+)[^:]+:\s*(?P\d+)`) + reList = regexp.MustCompile(`(?P\d+)/?(?P\d+)?.*?(?P\d+)`) + reCleanResp = regexp.MustCompile(`§.`) +) + +func (s *SpigotMC) collect() (map[string]int64, error) { + if s.conn == nil { + conn, err := s.establishConn() + if err != nil { + return nil, err + } + s.conn = conn + } + + mx := make(map[string]int64) + + if err := s.collectTPS(mx); err != nil { + s.Cleanup() + return nil, fmt.Errorf("failed to collect '%s': %v", cmdTPS, err) + } + if err := s.collectList(mx); err != nil { + s.Cleanup() + return nil, fmt.Errorf("failed to collect '%s': %v", cmdList, err) + } + + return mx, nil +} + +func (s *SpigotMC) collectTPS(mx map[string]int64) error { + resp, err := s.conn.queryTps() + if err != nil { + return err + } + + s.Debugf("cmd '%s' response: %s", cmdTPS, resp) + + if err := parseResponse(resp, reTPS, func(s string, f float64) { + switch { + case strings.HasPrefix(s, "tps"): + f *= precision + case strings.HasPrefix(s, "mem"): + f *= 1024 * 1024 // mb to bytes + } + mx[s] = int64(f) + }); err != nil { + return err + } + + return nil +} + +func (s *SpigotMC) collectList(mx map[string]int64) error { + resp, err := s.conn.queryList() + if err != nil { + return err + } + s.Debugf("cmd '%s' response: %s", cmdList, resp) + + var players int64 + if err := parseResponse(resp, reList, func(s string, f float64) { + switch s { + case "players", "hidden_players": + players += int64(f) + } + }); err != nil { + return err + } + + mx["players"] = players + + return nil +} + +func parseResponse(resp string, re *regexp.Regexp, fn func(string, float64)) error { + if resp == "" { + return errors.New("empty response") + } + + resp = reCleanResp.ReplaceAllString(resp, "") + + matches := re.FindStringSubmatch(resp) + if len(matches) == 0 { + return errors.New("regexp does not match") + } + + for i, name := range re.SubexpNames() { + if name == "" || len(matches) <= i || matches[i] == "" { + continue + } + val := matches[i] + + v, err := strconv.ParseFloat(val, 64) + if err != nil { + return fmt.Errorf("failed to parse key '%s' value '%s': %v", name, val, err) + } + + fn(name, v) + } + + return nil +} + +func (s *SpigotMC) establishConn() (rconConn, error) { + conn := s.newConn(s.Config) + + if err := conn.connect(); err != nil { + return nil, err + } + + return conn, nil +} diff --git a/src/go/plugin/go.d/modules/spigotmc/config_schema.json b/src/go/plugin/go.d/modules/spigotmc/config_schema.json new file mode 100644 index 000000000..82d17ef35 --- /dev/null +++ b/src/go/plugin/go.d/modules/spigotmc/config_schema.json @@ -0,0 +1,52 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "SpigotMC collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "address": { + "title": "Address", + "description": "The IP address and port where the SpigotMC server listens for RCON connections.", + "type": "string", + "default": "127.0.0.1:25575" + }, + "password": { + "title": "Password", + "description": "The password for authentication.", + "type": "string", + "sensitive": true + }, + "timeout": { + "title": "Timeout", + "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.", + "type": "number", + "minimum": 0.5, + "default": 1 + } + }, + "required": [ + "address" + ], + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + }, + "password": { + "ui:widget": "password" + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + } + } +} diff --git a/src/go/plugin/go.d/modules/spigotmc/integrations/spigotmc.md b/src/go/plugin/go.d/modules/spigotmc/integrations/spigotmc.md new file mode 100644 index 000000000..ca613c11b --- /dev/null +++ b/src/go/plugin/go.d/modules/spigotmc/integrations/spigotmc.md @@ -0,0 +1,221 @@ + + +# SpigotMC + + + + + +Plugin: go.d.plugin +Module: spigotmc + + + +## Overview + +This collector monitors SpigotMC server server performance, in the form of ticks per second average, memory utilization, and active users. + + +It sends the `tps` and `list` commands to the Server, and gathers the metrics from the responses. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects SpigotMC instances running on localhost that are listening on port 25575. + +> **Note that the SpigotMC RCON API requires a password**. +> While Netdata can automatically detect SpigotMC instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per SpigotMC instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| spigotmc.players | players | players | +| spigotmc.avg_tps | 1min, 5min, 15min | ticks | +| spigotmc.memory | used, alloc | bytes | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/spigotmc.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/spigotmc.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | The IP address and port where the SpigotMC server listens for RCON connections. | 127.0.0.1:25575 | yes | +| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no | + +
    + +#### Examples + +##### Basic + +A basic example configuration. + +
    Config + +```yaml +jobs: + - name: local + address: 127.0.0.1:25575 + password: somePassword + +``` +
    + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
    Config + +```yaml +jobs: + - name: local + address: 127.0.0.1:25575 + password: somePassword + + - name: remote + address: 203.0.113.0:25575 + password: somePassword + +``` +
    + + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `spigotmc` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m spigotmc + ``` + +### Getting Logs + +If you're encountering problems with the `spigotmc` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep spigotmc +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep spigotmc /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep spigotmc +``` + + diff --git a/src/go/plugin/go.d/modules/spigotmc/metadata.yaml b/src/go/plugin/go.d/modules/spigotmc/metadata.yaml new file mode 100644 index 000000000..2f2a33c5d --- /dev/null +++ b/src/go/plugin/go.d/modules/spigotmc/metadata.yaml @@ -0,0 +1,136 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-spigotmc + plugin_name: go.d.plugin + module_name: spigotmc + monitored_instance: + name: SpigotMC + link: https://www.spigotmc.org/ + categories: + - data-collection.gaming + icon_filename: "spigot.jfif" + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - minecraft + - spigotmc + - spigot + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors SpigotMC server server performance, in the form of ticks per second average, memory utilization, and active users. + method_description: | + It sends the `tps` and `list` commands to the Server, and gathers the metrics from the responses. + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + By default, it detects SpigotMC instances running on localhost that are listening on port 25575. + + > **Note that the SpigotMC RCON API requires a password**. + > While Netdata can automatically detect SpigotMC instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials. + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/spigotmc.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: The IP address and port where the SpigotMC server listens for RCON connections. + default_value: 127.0.0.1:25575 + required: true + - name: timeout + description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution. + default_value: 1 + required: false + examples: + folding: + enabled: true + title: Config + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + address: 127.0.0.1:25575 + password: somePassword + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + address: 127.0.0.1:25575 + password: somePassword + + - name: remote + address: 203.0.113.0:25575 + password: somePassword + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: "These metrics refer to the entire monitored application." + labels: [] + metrics: + - name: spigotmc.players + description: Active Players + unit: "players" + chart_type: line + dimensions: + - name: players + - name: spigotmc.avg_tps + description: Average Ticks Per Second + unit: "ticks" + chart_type: line + dimensions: + - name: 1min + - name: 5min + - name: 15min + - name: spigotmc.memory + description: Memory Usage + unit: "bytes" + chart_type: area + dimensions: + - name: used + - name: alloc diff --git a/src/go/plugin/go.d/modules/spigotmc/spigotmc.go b/src/go/plugin/go.d/modules/spigotmc/spigotmc.go new file mode 100644 index 000000000..3029f558d --- /dev/null +++ b/src/go/plugin/go.d/modules/spigotmc/spigotmc.go @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package spigotmc + +import ( + _ "embed" + "errors" + "time" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("spigotmc", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *SpigotMC { + return &SpigotMC{ + Config: Config{ + Address: "127.0.0.1:25575", + Timeout: confopt.Duration(time.Second * 1), + }, + charts: charts.Copy(), + newConn: newRconConn, + } +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Password string `yaml:"password" json:"password"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` +} + +type SpigotMC struct { + module.Base + Config `yaml:",inline" json:""` + + charts *module.Charts + + newConn func(Config) rconConn + conn rconConn +} + +func (s *SpigotMC) Configuration() any { + return s.Config +} + +func (s *SpigotMC) Init() error { + if s.Address == "" { + return errors.New("config: 'address' required but not set") + } + if s.Password == "" { + return errors.New("config: 'password' required but not set") + } + return nil +} + +func (s *SpigotMC) Check() error { + mx, err := s.collect() + if err != nil { + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil +} + +func (s *SpigotMC) Charts() *module.Charts { + return s.charts +} + +func (s *SpigotMC) Collect() map[string]int64 { + mx, err := s.collect() + if err != nil { + s.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} + +func (s *SpigotMC) Cleanup() { + if s.conn != nil { + if err := s.conn.disconnect(); err != nil { + s.Warningf("error on disconnect: %s", err) + } + s.conn = nil + } +} diff --git a/src/go/plugin/go.d/modules/spigotmc/spigotmc_test.go b/src/go/plugin/go.d/modules/spigotmc/spigotmc_test.go new file mode 100644 index 000000000..3c0d6a434 --- /dev/null +++ b/src/go/plugin/go.d/modules/spigotmc/spigotmc_test.go @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package spigotmc + +import ( + "errors" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") + + dataRespTsp = ` +§6TPS from last 1m, 5m, 15m: §a*20.0, §a*20.0, §a*20.0 +§6Current Memory Usage: §a332/392 mb (Max: 6008 mb) +` + dataRespListClean = `There are 4 of a max 50 players online: player1, player2, player3, player4` + dataRespListDoubleS = `There are 4 of a max 50 players online: player1, player2, player3, player4` + dataRespListDoubleSWithHidden = `§6There are §c3§6/§c1§6 out of maximum §c50§6 players online.` + dataRespListDoubleSNonEng = `§6当前有 §c4§6 个玩家在线,最大在线人数为 §c50§6 个玩家.` +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + } { + require.NotNil(t, data, name) + } +} + +func TestSpigotMC_ConfigurationSerialize(t *testing.T) { + module.TestConfigurationSerialize(t, &SpigotMC{}, dataConfigJSON, dataConfigYAML) +} + +func TestSpigotMC_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "fails with default config": { + wantFail: true, + config: New().Config, + }, + "fails if address not set": { + wantFail: true, + config: func() Config { + conf := New().Config + conf.Address = "" + conf.Password = "pass" + return conf + }(), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + smc := New() + smc.Config = test.config + + if test.wantFail { + assert.Error(t, smc.Init()) + } else { + assert.NoError(t, smc.Init()) + } + }) + } +} + +func TestSpigotMC_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func() *SpigotMC + }{ + "not initialized": { + prepare: func() *SpigotMC { + return New() + }, + }, + "after check": { + prepare: func() *SpigotMC { + smc := New() + smc.newConn = func(config Config) rconConn { return prepareMockOk() } + _ = smc.Check() + return smc + }, + }, + "after collect": { + prepare: func() *SpigotMC { + smc := New() + smc.newConn = func(config Config) rconConn { return prepareMockOk() } + _ = smc.Collect() + return smc + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + smc := test.prepare() + + assert.NotPanics(t, smc.Cleanup) + }) + } +} + +func TestSpigotMC_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestSpigotMC_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func() *mockRcon + wantFail bool + }{ + "success case": { + wantFail: false, + prepareMock: prepareMockOk, + }, + "err on connect": { + wantFail: true, + prepareMock: prepareMockErrOnConnect, + }, + "unexpected response": { + wantFail: true, + prepareMock: prepareMockUnexpectedResponse, + }, + "empty response": { + wantFail: true, + prepareMock: prepareMockEmptyResponse, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + smc := New() + mock := test.prepareMock() + smc.newConn = func(config Config) rconConn { return mock } + + if test.wantFail { + assert.Error(t, smc.Check()) + } else { + assert.NoError(t, smc.Check()) + } + }) + } +} + +func TestSpigotMC_Collect(t *testing.T) { + tests := map[string]struct { + prepareMock func() *mockRcon + wantMetrics map[string]int64 + disconnectBeforeCleanup bool + disconnectAfterCleanup bool + }{ + "success case: clean": { + prepareMock: prepareMockOkClean, + disconnectBeforeCleanup: false, + disconnectAfterCleanup: true, + wantMetrics: map[string]int64{ + "mem_alloc": 411041792, + "mem_max": 6299844608, + "mem_used": 348127232, + "players": 4, + "tps_15min": 2000, + "tps_1min": 2000, + "tps_5min": 2000, + }, + }, + "success case: double s": { + prepareMock: prepareMockOk, + disconnectBeforeCleanup: false, + disconnectAfterCleanup: true, + wantMetrics: map[string]int64{ + "mem_alloc": 411041792, + "mem_max": 6299844608, + "mem_used": 348127232, + "players": 4, + "tps_15min": 2000, + "tps_1min": 2000, + "tps_5min": 2000, + }, + }, + "success case: double s with hidden": { + prepareMock: prepareMockOkSWithHidden, + disconnectBeforeCleanup: false, + disconnectAfterCleanup: true, + wantMetrics: map[string]int64{ + "mem_alloc": 411041792, + "mem_max": 6299844608, + "mem_used": 348127232, + "players": 4, + "tps_15min": 2000, + "tps_1min": 2000, + "tps_5min": 2000, + }, + }, + "success case: non english": { + prepareMock: prepareMockOkSNonEng, + disconnectBeforeCleanup: false, + disconnectAfterCleanup: true, + wantMetrics: map[string]int64{ + "mem_alloc": 411041792, + "mem_max": 6299844608, + "mem_used": 348127232, + "players": 4, + "tps_15min": 2000, + "tps_1min": 2000, + "tps_5min": 2000, + }, + }, + "unexpected response": { + prepareMock: prepareMockUnexpectedResponse, + disconnectBeforeCleanup: true, + disconnectAfterCleanup: true, + }, + "empty response": { + prepareMock: prepareMockEmptyResponse, + disconnectBeforeCleanup: true, + disconnectAfterCleanup: true, + wantMetrics: nil, + }, + "err on connect": { + prepareMock: prepareMockErrOnConnect, + disconnectBeforeCleanup: false, + disconnectAfterCleanup: false, + }, + "err on query status": { + prepareMock: prepareMockErrOnQuery, + disconnectBeforeCleanup: true, + disconnectAfterCleanup: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + smc := New() + mock := test.prepareMock() + smc.newConn = func(config Config) rconConn { return mock } + + mx := smc.Collect() + + require.Equal(t, test.wantMetrics, mx, "want metrics") + + if len(test.wantMetrics) > 0 { + module.TestMetricsHasAllChartsDims(t, smc.Charts(), mx) + assert.Equal(t, len(charts), len(*smc.Charts()), "want charts") + } + + assert.Equal(t, test.disconnectBeforeCleanup, mock.disconnectCalled, "disconnect before cleanup") + smc.Cleanup() + assert.Equal(t, test.disconnectAfterCleanup, mock.disconnectCalled, "disconnect after cleanup") + }) + } +} + +func prepareMockOkClean() *mockRcon { + return &mockRcon{ + responseTps: dataRespTsp, + responseList: dataRespListClean, + } +} + +func prepareMockOk() *mockRcon { + return &mockRcon{ + responseTps: dataRespTsp, + responseList: dataRespListDoubleS, + } +} + +func prepareMockOkSWithHidden() *mockRcon { + return &mockRcon{ + responseTps: dataRespTsp, + responseList: dataRespListDoubleSWithHidden, + } +} + +func prepareMockOkSNonEng() *mockRcon { + return &mockRcon{ + responseTps: dataRespTsp, + responseList: dataRespListDoubleSNonEng, + } +} + +func prepareMockErrOnConnect() *mockRcon { + return &mockRcon{ + errOnConnect: true, + } +} + +func prepareMockErrOnQuery() *mockRcon { + return &mockRcon{ + errOnQuery: true, + } +} + +func prepareMockUnexpectedResponse() *mockRcon { + resp := "Lorem ipsum dolor sit amet, consectetur adipiscing elit." + + return &mockRcon{ + responseTps: resp, + responseList: resp, + } +} + +func prepareMockEmptyResponse() *mockRcon { + return &mockRcon{ + responseTps: "", + responseList: "", + } +} + +type mockRcon struct { + errOnConnect bool + responseTps string + responseList string + errOnQuery bool + disconnectCalled bool +} + +func (m *mockRcon) connect() error { + if m.errOnConnect { + return errors.New("mock.connect() error") + } + return nil +} + +func (m *mockRcon) disconnect() error { + m.disconnectCalled = true + return nil +} + +func (m *mockRcon) queryTps() (string, error) { + if m.errOnQuery { + return "", errors.New("mock.queryTps() error") + } + return m.responseTps, nil +} + +func (m *mockRcon) queryList() (string, error) { + if m.errOnQuery { + return "", errors.New("mock.queryList() error") + } + return m.responseList, nil +} diff --git a/src/go/plugin/go.d/modules/spigotmc/testdata/config.json b/src/go/plugin/go.d/modules/spigotmc/testdata/config.json new file mode 100644 index 000000000..29784f301 --- /dev/null +++ b/src/go/plugin/go.d/modules/spigotmc/testdata/config.json @@ -0,0 +1,6 @@ +{ + "update_every": 123, + "address": "ok", + "password": "ok", + "timeout": 123.123 +} diff --git a/src/go/plugin/go.d/modules/spigotmc/testdata/config.yaml b/src/go/plugin/go.d/modules/spigotmc/testdata/config.yaml new file mode 100644 index 000000000..38c116597 --- /dev/null +++ b/src/go/plugin/go.d/modules/spigotmc/testdata/config.yaml @@ -0,0 +1,4 @@ +update_every: 123 +address: "ok" +password: "ok" +timeout: 123.123 diff --git a/src/go/plugin/go.d/modules/squid/collect.go b/src/go/plugin/go.d/modules/squid/collect.go index bb0cf1ab4..889223a97 100644 --- a/src/go/plugin/go.d/modules/squid/collect.go +++ b/src/go/plugin/go.d/modules/squid/collect.go @@ -6,7 +6,6 @@ import ( "bufio" "fmt" "io" - "net/http" "strconv" "strings" @@ -42,12 +41,12 @@ func (s *Squid) collect() (map[string]int64, error) { } func (s *Squid) collectCounters(mx map[string]int64) error { - req, err := web.NewHTTPRequestWithPath(s.Request, urlPathServerStats) + req, err := web.NewHTTPRequestWithPath(s.RequestConfig, urlPathServerStats) if err != nil { - return err + return fmt.Errorf("failed to create '%s' request: %w", urlPathServerStats, err) } - if err := s.doOK(req, func(body io.Reader) error { + return web.DoHTTP(s.httpClient).Request(req, func(body io.Reader) error { sc := bufio.NewScanner(body) for sc.Scan() { @@ -70,36 +69,10 @@ func (s *Squid) collectCounters(mx map[string]int64) error { mx[key] = v } - return nil - }); err != nil { - return err - } - - if len(mx) == 0 { - return fmt.Errorf("unexpected response from '%s': no metrics found", req.URL) - } - - return nil -} - -func (s *Squid) doOK(req *http.Request, parse func(body io.Reader) error) error { - resp, err := s.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - return parse(resp.Body) -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } + if len(mx) == 0 { + return fmt.Errorf("unexpected response from '%s': no metrics found", req.URL) + } + return nil + }) } diff --git a/src/go/plugin/go.d/modules/squid/config_schema.json b/src/go/plugin/go.d/modules/squid/config_schema.json index b1264b2b1..3a29522d4 100644 --- a/src/go/plugin/go.d/modules/squid/config_schema.json +++ b/src/go/plugin/go.d/modules/squid/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/squid/integrations/squid.md b/src/go/plugin/go.d/modules/squid/integrations/squid.md index 1a448de35..b2894664f 100644 --- a/src/go/plugin/go.d/modules/squid/integrations/squid.md +++ b/src/go/plugin/go.d/modules/squid/integrations/squid.md @@ -93,8 +93,8 @@ No action required. The configuration file name for this integration is `go.d/squid.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/squid/squid.go b/src/go/plugin/go.d/modules/squid/squid.go index fe9c15ecb..a2fb178ec 100644 --- a/src/go/plugin/go.d/modules/squid/squid.go +++ b/src/go/plugin/go.d/modules/squid/squid.go @@ -5,10 +5,12 @@ package squid import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *Squid { return &Squid{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:3128", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 1), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 1), }, }, }, @@ -40,8 +42,8 @@ func New() *Squid { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Squid struct { @@ -59,14 +61,12 @@ func (s *Squid) Configuration() any { func (s *Squid) Init() error { if s.URL == "" { - s.Error("URL not set") - return errors.New("url not set") + return errors.New("config: url not set") } - client, err := web.NewHTTPClient(s.Client) + client, err := web.NewHTTPClient(s.ClientConfig) if err != nil { - s.Error(err) - return err + return fmt.Errorf("init http client: %w", err) } s.httpClient = client @@ -79,7 +79,6 @@ func (s *Squid) Init() error { func (s *Squid) Check() error { mx, err := s.collect() if err != nil { - s.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/squid/squid_test.go b/src/go/plugin/go.d/modules/squid/squid_test.go index c0856f89d..304ef3c27 100644 --- a/src/go/plugin/go.d/modules/squid/squid_test.go +++ b/src/go/plugin/go.d/modules/squid/squid_test.go @@ -47,8 +47,8 @@ func TestSquid_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, @@ -187,7 +187,7 @@ Fusce et felis pulvinar, posuere sem non, porttitor eros.`) srv := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write([]byte(resp)) + _, _ = w.Write(resp) })) squid := New() @@ -203,7 +203,7 @@ func prepareCaseEmptyResponse(t *testing.T) (*Squid, func()) { srv := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write([]byte(resp)) + _, _ = w.Write(resp) })) squid := New() diff --git a/src/go/plugin/go.d/modules/squidlog/config_schema.json b/src/go/plugin/go.d/modules/squidlog/config_schema.json index 47e55b09b..ae3e78892 100644 --- a/src/go/plugin/go.d/modules/squidlog/config_schema.json +++ b/src/go/plugin/go.d/modules/squidlog/config_schema.json @@ -40,7 +40,6 @@ "path", "log_type" ], - "additionalProperties": false, "patternProperties": { "^name$": {} }, diff --git a/src/go/plugin/go.d/modules/squidlog/integrations/squid_log_files.md b/src/go/plugin/go.d/modules/squidlog/integrations/squid_log_files.md index 7d1e4799e..16bffc51e 100644 --- a/src/go/plugin/go.d/modules/squidlog/integrations/squid_log_files.md +++ b/src/go/plugin/go.d/modules/squidlog/integrations/squid_log_files.md @@ -104,8 +104,8 @@ No action required. The configuration file name for this integration is `go.d/squidlog.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -113,7 +113,7 @@ sudo ./edit-config go.d/squidlog.conf ``` #### Options -Squid [log format codes](http://www.squid-cache.org/Doc/config/logformat/). +Squid [log format codes](https://www.squid-cache.org/Doc/config/logformat/). Squidlog is aware how to parse and interpret the following codes: diff --git a/src/go/plugin/go.d/modules/squidlog/metadata.yaml b/src/go/plugin/go.d/modules/squidlog/metadata.yaml index 82712f9e5..19736fc77 100644 --- a/src/go/plugin/go.d/modules/squidlog/metadata.yaml +++ b/src/go/plugin/go.d/modules/squidlog/metadata.yaml @@ -46,7 +46,7 @@ modules: name: go.d/squidlog.conf options: description: | - Squid [log format codes](http://www.squid-cache.org/Doc/config/logformat/). + Squid [log format codes](https://www.squid-cache.org/Doc/config/logformat/). Squidlog is aware how to parse and interpret the following codes: diff --git a/src/go/plugin/go.d/modules/squidlog/squidlog.go b/src/go/plugin/go.d/modules/squidlog/squidlog.go index e2e743c69..5663c4faa 100644 --- a/src/go/plugin/go.d/modules/squidlog/squidlog.go +++ b/src/go/plugin/go.d/modules/squidlog/squidlog.go @@ -4,6 +4,7 @@ package squidlog import ( _ "embed" + "fmt" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs" @@ -72,18 +73,15 @@ func (s *SquidLog) Init() error { func (s *SquidLog) Check() error { // Note: these inits are here to make auto-detection retry working if err := s.createLogReader(); err != nil { - s.Warning("check failed: ", err) - return err + return fmt.Errorf("failed to create log reader: %v", err) } if err := s.createParser(); err != nil { - s.Warning("check failed: ", err) - return err + return fmt.Errorf("failed to create log parser: %v", err) } if err := s.createCharts(s.line); err != nil { - s.Warning("check failed: ", err) - return err + return fmt.Errorf("failed to create log charts: %v", err) } return nil diff --git a/src/go/plugin/go.d/modules/squidlog/squidlog_test.go b/src/go/plugin/go.d/modules/squidlog/squidlog_test.go index eb5ce635f..37c4e3afd 100644 --- a/src/go/plugin/go.d/modules/squidlog/squidlog_test.go +++ b/src/go/plugin/go.d/modules/squidlog/squidlog_test.go @@ -7,10 +7,10 @@ import ( "os" "testing" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -238,16 +238,18 @@ func TestSquidLog_Collect_ReturnOldDataIfNothingRead(t *testing.T) { } _ = squid.Collect() - collected := squid.Collect() - assert.Equal(t, expected, collected) - testCharts(t, squid, collected) + mx := squid.Collect() + + assert.Equal(t, expected, mx) + + testCharts(t, squid, mx) } -func testCharts(t *testing.T, squidlog *SquidLog, collected map[string]int64) { +func testCharts(t *testing.T, squidlog *SquidLog, mx map[string]int64) { t.Helper() ensureChartsDynamicDimsCreated(t, squidlog) - ensureCollectedHasAllChartsDimsVarsIDs(t, squidlog, collected) + module.TestMetricsHasAllChartsDims(t, squidlog.Charts(), mx) } func ensureChartsDynamicDimsCreated(t *testing.T, squid *SquidLog) { @@ -276,19 +278,6 @@ func ensureDynamicDimsCreated(t *testing.T, squid *SquidLog, chartID, dimPrefix } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, s *SquidLog, collected map[string]int64) { - for _, chart := range *s.Charts() { - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareSquidCollect(t *testing.T) *SquidLog { t.Helper() squid := New() diff --git a/src/go/plugin/go.d/modules/storcli/charts.go b/src/go/plugin/go.d/modules/storcli/charts.go index 3e0c07c1d..6f4e51e96 100644 --- a/src/go/plugin/go.d/modules/storcli/charts.go +++ b/src/go/plugin/go.d/modules/storcli/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package storcli import ( @@ -14,6 +16,7 @@ const ( prioControllerHealthStatus = module.Priority + iota prioControllerStatus prioControllerBBUStatus + prioControllerROCTemperature prioPhysDriveErrors prioPhysDrivePredictiveFailures @@ -31,6 +34,7 @@ var controllerMegaraidChartsTmpl = module.Charts{ var controllerMpt3sasChartsTmpl = module.Charts{ controllerHealthStatusChartTmpl.Copy(), + controllerROCTemperatureChartTmpl.Copy(), } var ( @@ -76,6 +80,18 @@ var ( {ID: "cntrl_%s_bbu_status_na", Name: "na"}, }, } + controllerROCTemperatureChartTmpl = module.Chart{ + ID: "controller_%s_roc_temperature", + Title: "Controller ROC temperature", + Units: "Celsius", + Fam: "cntrl roc temperature", + Ctx: "storcli.controller_roc_temperature", + Type: module.Line, + Priority: prioControllerROCTemperature, + Dims: module.Dims{ + {ID: "cntrl_%s_roc_temperature_celsius", Name: "temperature"}, + }, + } ) var physDriveChartsTmpl = module.Charts{ @@ -165,6 +181,9 @@ func (s *StorCli) addControllerCharts(cntrl controllerInfo) { charts = controllerMegaraidChartsTmpl.Copy() case driverNameSas: charts = controllerMpt3sasChartsTmpl.Copy() + if !strings.EqualFold(cntrl.HwCfg.TemperatureSensorForROC, "present") { + _ = charts.Remove(controllerROCTemperatureChartTmpl.ID) + } default: return } diff --git a/src/go/plugin/go.d/modules/storcli/collect.go b/src/go/plugin/go.d/modules/storcli/collect.go index df2b09d87..824bb7ebb 100644 --- a/src/go/plugin/go.d/modules/storcli/collect.go +++ b/src/go/plugin/go.d/modules/storcli/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package storcli import "fmt" diff --git a/src/go/plugin/go.d/modules/storcli/collect_controllers.go b/src/go/plugin/go.d/modules/storcli/collect_controllers.go index 64d615946..430d2a7c0 100644 --- a/src/go/plugin/go.d/modules/storcli/collect_controllers.go +++ b/src/go/plugin/go.d/modules/storcli/collect_controllers.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package storcli import ( @@ -33,6 +35,10 @@ type ( ControllerStatus string `json:"Controller Status"` BBUStatus *storNumber `json:"BBU Status"` } `json:"Status"` + HwCfg struct { + TemperatureSensorForROC string `json:"Temperature Sensor for ROC"` + ROCTemperatureC int `json:"ROC temperature(Degree Celsius)"` + } `json:"HwCfg"` BBUInfo []struct { Model string `json:"Model"` State string `json:"State"` @@ -119,11 +125,16 @@ func (s *StorCli) collectMpt3sasControllersInfo(mx map[string]int64, resp *contr for _, st := range []string{"healthy", "unhealthy"} { mx[px+"health_status_"+st] = 0 } - if strings.ToLower(cntrl.Status.ControllerStatus) == "ok" { + + if strings.EqualFold(cntrl.Status.ControllerStatus, "ok") { mx[px+"health_status_healthy"] = 1 } else { mx[px+"health_status_unhealthy"] = 1 } + + if strings.EqualFold(cntrl.HwCfg.TemperatureSensorForROC, "present") { + mx[px+"roc_temperature_celsius"] = int64(cntrl.HwCfg.ROCTemperatureC) + } } return nil diff --git a/src/go/plugin/go.d/modules/storcli/collect_drives.go b/src/go/plugin/go.d/modules/storcli/collect_drives.go index 95965d572..c18dda631 100644 --- a/src/go/plugin/go.d/modules/storcli/collect_drives.go +++ b/src/go/plugin/go.d/modules/storcli/collect_drives.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package storcli import ( diff --git a/src/go/plugin/go.d/modules/storcli/config_schema.json b/src/go/plugin/go.d/modules/storcli/config_schema.json index 226a370f4..af92540c7 100644 --- a/src/go/plugin/go.d/modules/storcli/config_schema.json +++ b/src/go/plugin/go.d/modules/storcli/config_schema.json @@ -19,7 +19,6 @@ "default": 2 } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/storcli/doc.go b/src/go/plugin/go.d/modules/storcli/doc.go new file mode 100644 index 000000000..fdf4eafbd --- /dev/null +++ b/src/go/plugin/go.d/modules/storcli/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package storcli diff --git a/src/go/plugin/go.d/modules/storcli/exec.go b/src/go/plugin/go.d/modules/storcli/exec.go index 5be88a899..367159d8b 100644 --- a/src/go/plugin/go.d/modules/storcli/exec.go +++ b/src/go/plugin/go.d/modules/storcli/exec.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package storcli import ( diff --git a/src/go/plugin/go.d/modules/storcli/init.go b/src/go/plugin/go.d/modules/storcli/init.go index d35ad07db..2f6299c86 100644 --- a/src/go/plugin/go.d/modules/storcli/init.go +++ b/src/go/plugin/go.d/modules/storcli/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package storcli import ( diff --git a/src/go/plugin/go.d/modules/storcli/integrations/storecli_raid.md b/src/go/plugin/go.d/modules/storcli/integrations/storecli_raid.md index 9b8b28480..88b1d16ef 100644 --- a/src/go/plugin/go.d/modules/storcli/integrations/storecli_raid.md +++ b/src/go/plugin/go.d/modules/storcli/integrations/storecli_raid.md @@ -33,7 +33,10 @@ Executed commands: -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux +- BSD This collector only supports collecting metrics from a single instance of this integration. @@ -80,6 +83,7 @@ Metrics: | storcli.controller_health_status | healthy, unhealthy | status | | storcli.controller_status | optimal, degraded, partially_degraded, failed | status | | storcli.controller_bbu_status | healthy, unhealthy, na | status | +| storcli.controller_roc_temperature | temperature | Celsius | ### Per physical drive @@ -149,8 +153,8 @@ No action required. The configuration file name for this integration is `go.d/storcli.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/storcli/metadata.yaml b/src/go/plugin/go.d/modules/storcli/metadata.yaml index 7e807f056..77a850ff5 100644 --- a/src/go/plugin/go.d/modules/storcli/metadata.yaml +++ b/src/go/plugin/go.d/modules/storcli/metadata.yaml @@ -33,7 +33,7 @@ modules: - `storcli /cALL/eALL/sALL show all J nolog` method_description: "" supported_platforms: - include: [] + include: [Linux, BSD] exclude: [] multi_instance: false additional_permissions: @@ -138,6 +138,12 @@ modules: - name: healthy - name: unhealthy - name: na + - name: storcli.controller_roc_temperature + description: Controller ROC temperature + unit: Celsius + chart_type: line + dimensions: + - name: temperature - name: physical drive description: These metrics refer to the Physical Drive. labels: diff --git a/src/go/plugin/go.d/modules/storcli/storcli.go b/src/go/plugin/go.d/modules/storcli/storcli.go index 0133c4700..52565210f 100644 --- a/src/go/plugin/go.d/modules/storcli/storcli.go +++ b/src/go/plugin/go.d/modules/storcli/storcli.go @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package storcli import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,7 +31,7 @@ func init() { func New() *StorCli { return &StorCli{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: &module.Charts{}, controllers: make(map[string]bool), @@ -38,8 +41,8 @@ func New() *StorCli { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` } type ( @@ -68,8 +71,7 @@ func (s *StorCli) Configuration() any { func (s *StorCli) Init() error { storExec, err := s.initStorCliExec() if err != nil { - s.Errorf("storcli exec initialization: %v", err) - return err + return fmt.Errorf("storcli exec initialization: %v", err) } s.exec = storExec @@ -79,7 +81,6 @@ func (s *StorCli) Init() error { func (s *StorCli) Check() error { mx, err := s.collect() if err != nil { - s.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/storcli/storcli_test.go b/src/go/plugin/go.d/modules/storcli/storcli_test.go index 63ee54b56..e065d41e2 100644 --- a/src/go/plugin/go.d/modules/storcli/storcli_test.go +++ b/src/go/plugin/go.d/modules/storcli/storcli_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package storcli import ( @@ -205,6 +207,7 @@ func TestStorCli_Collect(t *testing.T) { wantMetrics: map[string]int64{ "cntrl_0_health_status_healthy": 1, "cntrl_0_health_status_unhealthy": 0, + "cntrl_0_roc_temperature_celsius": 44, }, }, "err on exec": { @@ -230,8 +233,10 @@ func TestStorCli_Collect(t *testing.T) { mx := stor.Collect() assert.Equal(t, test.wantMetrics, mx) - assert.Len(t, *stor.Charts(), test.wantCharts) - testMetricsHasAllChartsDims(t, stor, mx) + + assert.Len(t, *stor.Charts(), test.wantCharts, "wantCharts") + + module.TestMetricsHasAllChartsDims(t, stor.Charts(), mx) }) } } @@ -291,19 +296,3 @@ func (m *mockStorCliExec) drivesInfo() ([]byte, error) { } return m.drivesInfoData, nil } - -func testMetricsHasAllChartsDims(t *testing.T, stor *StorCli, mx map[string]int64) { - for _, chart := range *stor.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} diff --git a/src/go/plugin/go.d/modules/supervisord/client.go b/src/go/plugin/go.d/modules/supervisord/client.go index da62ca21c..48bad7910 100644 --- a/src/go/plugin/go.d/modules/supervisord/client.go +++ b/src/go/plugin/go.d/modules/supervisord/client.go @@ -28,7 +28,7 @@ func newSupervisorRPCClient(serverURL *url.URL, httpClient *http.Client) (superv c := xmlrpc.NewClient("http://unix/RPC2") t, ok := httpClient.Transport.(*http.Transport) if !ok { - return nil, errors.New("unexpected HTTP client transport") + return nil, errors.New("unexpected HTTPConfig client transport") } t.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { d := net.Dialer{Timeout: httpClient.Timeout} @@ -66,7 +66,7 @@ func (c *supervisorRPCClient) closeIdleConnections() { c.client.HttpClient.CloseIdleConnections() } -func parseGetAllProcessInfo(resp interface{}) ([]processStatus, error) { +func parseGetAllProcessInfo(resp any) ([]processStatus, error) { arr, ok := resp.(xmlrpc.Array) if !ok { return nil, fmt.Errorf("unexpected response type, want=xmlrpc.Array, got=%T", resp) diff --git a/src/go/plugin/go.d/modules/supervisord/config_schema.json b/src/go/plugin/go.d/modules/supervisord/config_schema.json index 8d3c4e943..ac4c617b7 100644 --- a/src/go/plugin/go.d/modules/supervisord/config_schema.json +++ b/src/go/plugin/go.d/modules/supervisord/config_schema.json @@ -49,7 +49,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/supervisord/init.go b/src/go/plugin/go.d/modules/supervisord/init.go index c7ccc06b5..1c11618c1 100644 --- a/src/go/plugin/go.d/modules/supervisord/init.go +++ b/src/go/plugin/go.d/modules/supervisord/init.go @@ -22,7 +22,7 @@ func (s *Supervisord) initSupervisorClient() (supervisorClient, error) { if err != nil { return nil, fmt.Errorf("parse 'url': %v (%s)", err, s.URL) } - httpClient, err := web.NewHTTPClient(s.Client) + httpClient, err := web.NewHTTPClient(s.ClientConfig) if err != nil { return nil, fmt.Errorf("create HTTP client: %v", err) } diff --git a/src/go/plugin/go.d/modules/supervisord/integrations/supervisor.md b/src/go/plugin/go.d/modules/supervisord/integrations/supervisor.md index ba302e4a0..b3cd67751 100644 --- a/src/go/plugin/go.d/modules/supervisord/integrations/supervisor.md +++ b/src/go/plugin/go.d/modules/supervisord/integrations/supervisor.md @@ -111,8 +111,8 @@ No action required. The configuration file name for this integration is `go.d/supervisord.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/supervisord/supervisord.go b/src/go/plugin/go.d/modules/supervisord/supervisord.go index 0988cfc88..0b5ffa89e 100644 --- a/src/go/plugin/go.d/modules/supervisord/supervisord.go +++ b/src/go/plugin/go.d/modules/supervisord/supervisord.go @@ -5,9 +5,11 @@ package supervisord import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,8 +28,8 @@ func New() *Supervisord { return &Supervisord{ Config: Config{ URL: "http://127.0.0.1:9001/RPC2", - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, @@ -37,9 +39,9 @@ func New() *Supervisord { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - URL string `yaml:"url" json:"url"` - web.Client `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + URL string `yaml:"url" json:"url"` + web.ClientConfig `yaml:",inline" json:""` } type ( @@ -66,14 +68,12 @@ func (s *Supervisord) Configuration() any { func (s *Supervisord) Init() error { err := s.verifyConfig() if err != nil { - s.Errorf("verify config: %v", err) - return err + return fmt.Errorf("verify config: %v", err) } client, err := s.initSupervisorClient() if err != nil { - s.Errorf("init supervisord client: %v", err) - return err + return fmt.Errorf("init supervisord client: %v", err) } s.client = client @@ -83,7 +83,6 @@ func (s *Supervisord) Init() error { func (s *Supervisord) Check() error { mx, err := s.collect() if err != nil { - s.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/supervisord/supervisord_test.go b/src/go/plugin/go.d/modules/supervisord/supervisord_test.go index 7eb5df53a..eb4ed65c7 100644 --- a/src/go/plugin/go.d/modules/supervisord/supervisord_test.go +++ b/src/go/plugin/go.d/modules/supervisord/supervisord_test.go @@ -169,40 +169,15 @@ func TestSupervisord_Collect(t *testing.T) { supvr := test.prepare(t) defer supvr.Cleanup() - ms := supvr.Collect() - assert.Equal(t, test.wantCollected, ms) + mx := supvr.Collect() + assert.Equal(t, test.wantCollected, mx) if len(test.wantCollected) > 0 { - ensureCollectedHasAllChartsDimsVarsIDs(t, supvr, ms) - ensureCollectedProcessesAddedToCharts(t, supvr) + module.TestMetricsHasAllChartsDims(t, supvr.Charts(), mx) } }) } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, supvr *Supervisord, ms map[string]int64) { - for _, chart := range *supvr.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := ms[dim.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := ms[v.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) - } - } -} - -func ensureCollectedProcessesAddedToCharts(t *testing.T, supvr *Supervisord) { - for group := range supvr.cache { - for _, c := range *newProcGroupCharts(group) { - assert.NotNilf(t, supvr.Charts().Get(c.ID), "'%s' chart is not in charts", c.ID) - } - } -} - func prepareSupervisordSuccessOnGetAllProcessInfo(t *testing.T) *Supervisord { supvr := New() require.NoError(t, supvr.Init()) diff --git a/src/go/plugin/go.d/modules/systemdunits/charts.go b/src/go/plugin/go.d/modules/systemdunits/charts.go index 9f1f56b70..c493564bd 100644 --- a/src/go/plugin/go.d/modules/systemdunits/charts.go +++ b/src/go/plugin/go.d/modules/systemdunits/charts.go @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package systemdunits diff --git a/src/go/plugin/go.d/modules/systemdunits/client.go b/src/go/plugin/go.d/modules/systemdunits/client.go index e6363d132..969da35f6 100644 --- a/src/go/plugin/go.d/modules/systemdunits/client.go +++ b/src/go/plugin/go.d/modules/systemdunits/client.go @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package systemdunits diff --git a/src/go/plugin/go.d/modules/systemdunits/collect.go b/src/go/plugin/go.d/modules/systemdunits/collect.go index 0d61c9998..7402c32d9 100644 --- a/src/go/plugin/go.d/modules/systemdunits/collect.go +++ b/src/go/plugin/go.d/modules/systemdunits/collect.go @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package systemdunits diff --git a/src/go/plugin/go.d/modules/systemdunits/collect_unit_files.go b/src/go/plugin/go.d/modules/systemdunits/collect_unit_files.go index eff2d6ecb..a91d2544a 100644 --- a/src/go/plugin/go.d/modules/systemdunits/collect_unit_files.go +++ b/src/go/plugin/go.d/modules/systemdunits/collect_unit_files.go @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package systemdunits diff --git a/src/go/plugin/go.d/modules/systemdunits/collect_units.go b/src/go/plugin/go.d/modules/systemdunits/collect_units.go index 0cf97af03..44b5a669c 100644 --- a/src/go/plugin/go.d/modules/systemdunits/collect_units.go +++ b/src/go/plugin/go.d/modules/systemdunits/collect_units.go @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package systemdunits diff --git a/src/go/plugin/go.d/modules/systemdunits/config_schema.json b/src/go/plugin/go.d/modules/systemdunits/config_schema.json index 016e984ce..4ebf013a7 100644 --- a/src/go/plugin/go.d/modules/systemdunits/config_schema.json +++ b/src/go/plugin/go.d/modules/systemdunits/config_schema.json @@ -75,7 +75,6 @@ "required": [ "include" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/systemdunits/init.go b/src/go/plugin/go.d/modules/systemdunits/init.go index 8a1b579c1..636c3a029 100644 --- a/src/go/plugin/go.d/modules/systemdunits/init.go +++ b/src/go/plugin/go.d/modules/systemdunits/init.go @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package systemdunits @@ -9,7 +8,7 @@ import ( "errors" "strings" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" ) func (s *SystemdUnits) validateConfig() error { diff --git a/src/go/plugin/go.d/modules/systemdunits/integrations/systemd_units.md b/src/go/plugin/go.d/modules/systemdunits/integrations/systemd_units.md index a2ff90b0d..2243e28a2 100644 --- a/src/go/plugin/go.d/modules/systemdunits/integrations/systemd_units.md +++ b/src/go/plugin/go.d/modules/systemdunits/integrations/systemd_units.md @@ -132,8 +132,8 @@ No action required. The configuration file name for this integration is `go.d/systemdunits.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/systemdunits/systemdunits.go b/src/go/plugin/go.d/modules/systemdunits/systemdunits.go index 9a3478768..34e9c3e5b 100644 --- a/src/go/plugin/go.d/modules/systemdunits/systemdunits.go +++ b/src/go/plugin/go.d/modules/systemdunits/systemdunits.go @@ -1,18 +1,18 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package systemdunits import ( _ "embed" "errors" + "fmt" "time" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/coreos/go-systemd/v22/dbus" ) @@ -34,12 +34,12 @@ func init() { func New() *SystemdUnits { return &SystemdUnits{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), Include: []string{"*.service"}, SkipTransient: false, CollectUnitFiles: false, IncludeUnitFiles: []string{"*.service"}, - CollectUnitFilesEvery: web.Duration(time.Minute * 5), + CollectUnitFilesEvery: confopt.Duration(time.Minute * 5), }, charts: &module.Charts{}, client: newSystemdDBusClient(), @@ -50,13 +50,13 @@ func New() *SystemdUnits { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - Include []string `yaml:"include,omitempty" json:"include"` - SkipTransient bool `yaml:"skip_transient" json:"skip_transient"` - CollectUnitFiles bool `yaml:"collect_unit_files" json:"collect_unit_files"` - IncludeUnitFiles []string `yaml:"include_unit_files,omitempty" json:"include_unit_files"` - CollectUnitFilesEvery web.Duration `yaml:"collect_unit_files_every,omitempty" json:"collect_unit_files_every"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + Include []string `yaml:"include,omitempty" json:"include"` + SkipTransient bool `yaml:"skip_transient" json:"skip_transient"` + CollectUnitFiles bool `yaml:"collect_unit_files" json:"collect_unit_files"` + IncludeUnitFiles []string `yaml:"include_unit_files,omitempty" json:"include_unit_files"` + CollectUnitFilesEvery confopt.Duration `yaml:"collect_unit_files_every,omitempty" json:"collect_unit_files_every"` } type SystemdUnits struct { @@ -85,14 +85,12 @@ func (s *SystemdUnits) Configuration() any { func (s *SystemdUnits) Init() error { if err := s.validateConfig(); err != nil { - s.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } sr, err := s.initUnitSelector() if err != nil { - s.Errorf("init unit selector: %v", err) - return err + return fmt.Errorf("init unit selector: %v", err) } s.unitSr = sr @@ -107,7 +105,6 @@ func (s *SystemdUnits) Init() error { func (s *SystemdUnits) Check() error { mx, err := s.collect() if err != nil { - s.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/systemdunits/systemdunits_test.go b/src/go/plugin/go.d/modules/systemdunits/systemdunits_test.go index 7074e186e..188de9ec1 100644 --- a/src/go/plugin/go.d/modules/systemdunits/systemdunits_test.go +++ b/src/go/plugin/go.d/modules/systemdunits/systemdunits_test.go @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later //go:build linux -// +build linux package systemdunits @@ -862,7 +861,7 @@ func TestSystemdUnits_Collect(t *testing.T) { assert.Equal(t, test.wantCollected, mx) if len(test.wantCollected) > 0 { - ensureCollectedHasAllChartsDimsVarsIDs(t, systemd, mx) + module.TestMetricsHasAllChartsDims(t, systemd.Charts(), mx) } }) } @@ -884,22 +883,6 @@ func TestSystemdUnits_connectionReuse(t *testing.T) { assert.Equal(t, 1, client.connectCalls) } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, sd *SystemdUnits, collected map[string]int64) { - for _, chart := range *sd.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareOKClient(ver int) *mockClient { return &mockClient{ conn: &mockConn{ diff --git a/src/go/plugin/go.d/modules/tengine/apiclient.go b/src/go/plugin/go.d/modules/tengine/apiclient.go deleted file mode 100644 index e91b99769..000000000 --- a/src/go/plugin/go.d/modules/tengine/apiclient.go +++ /dev/null @@ -1,247 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package tengine - -import ( - "bufio" - "fmt" - "io" - "net/http" - "strconv" - "strings" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" -) - -const ( - bytesIn = "bytes_in" - bytesOut = "bytes_out" - connTotal = "conn_total" - reqTotal = "req_total" - http2xx = "http_2xx" - http3xx = "http_3xx" - http4xx = "http_4xx" - http5xx = "http_5xx" - httpOtherStatus = "http_other_status" - rt = "rt" - upsReq = "ups_req" - upsRT = "ups_rt" - upsTries = "ups_tries" - http200 = "http_200" - http206 = "http_206" - http302 = "http_302" - http304 = "http_304" - http403 = "http_403" - http404 = "http_404" - http416 = "http_416" - http499 = "http_499" - http500 = "http_500" - http502 = "http_502" - http503 = "http_503" - http504 = "http_504" - http508 = "http_508" - httpOtherDetailStatus = "http_other_detail_status" - httpUps4xx = "http_ups_4xx" - httpUps5xx = "http_ups_5xx" -) - -var defaultLineFormat = []string{ - bytesIn, - bytesOut, - connTotal, - reqTotal, - http2xx, - http3xx, - http4xx, - http5xx, - httpOtherStatus, - rt, - upsReq, - upsRT, - upsTries, - http200, - http206, - http302, - http304, - http403, - http404, - http416, - http499, - http500, - http502, - http503, - http504, - http508, - httpOtherDetailStatus, - httpUps4xx, - httpUps5xx, -} - -func newAPIClient(client *http.Client, request web.Request) *apiClient { - return &apiClient{httpClient: client, request: request} -} - -type apiClient struct { - httpClient *http.Client - request web.Request -} - -func (a apiClient) getStatus() (*tengineStatus, error) { - req, err := web.NewHTTPRequest(a.request) - if err != nil { - return nil, fmt.Errorf("error on creating request : %v", err) - } - - resp, err := a.doRequestOK(req) - defer closeBody(resp) - if err != nil { - return nil, err - } - - status, err := parseStatus(resp.Body) - if err != nil { - return nil, fmt.Errorf("error on parsing response : %v", err) - } - - return status, nil -} - -func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { - resp, err := a.httpClient.Do(req) - if err != nil { - return nil, fmt.Errorf("error on request : %v", err) - } - if resp.StatusCode != http.StatusOK { - return resp, fmt.Errorf("%s returned HTTP code %d", req.URL, resp.StatusCode) - } - return resp, nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} - -func parseStatus(r io.Reader) (*tengineStatus, error) { - var status tengineStatus - - s := bufio.NewScanner(r) - for s.Scan() { - m, err := parseStatusLine(s.Text(), defaultLineFormat) - if err != nil { - return nil, err - } - status = append(status, *m) - } - - return &status, nil -} - -func parseStatusLine(line string, lineFormat []string) (*metric, error) { - parts := strings.Split(line, ",") - - // NOTE: only default line format is supported - // TODO: custom line format? - // www.example.com,127.0.0.1:80,162,6242,1,1,1,0,0,0,0,10,1,10,1.... - i := findFirstInt(parts) - if i == -1 { - return nil, fmt.Errorf("invalid line : %s", line) - } - if len(parts[i:]) != len(lineFormat) { - return nil, fmt.Errorf("invalid line length, got %d, expected %d, line : %s", - len(parts[i:]), len(lineFormat), line) - } - - // skip "$host,$server_addr:$server_port" - parts = parts[i:] - - var m metric - for i, key := range lineFormat { - value := mustParseInt(parts[i]) - switch key { - default: - return nil, fmt.Errorf("unknown line format key: %s", key) - case bytesIn: - m.BytesIn = value - case bytesOut: - m.BytesOut = value - case connTotal: - m.ConnTotal = value - case reqTotal: - m.ReqTotal = value - case http2xx: - m.HTTP2xx = value - case http3xx: - m.HTTP3xx = value - case http4xx: - m.HTTP4xx = value - case http5xx: - m.HTTP5xx = value - case httpOtherStatus: - m.HTTPOtherStatus = value - case rt: - m.RT = value - case upsReq: - m.UpsReq = value - case upsRT: - m.UpsRT = value - case upsTries: - m.UpsTries = value - case http200: - m.HTTP200 = value - case http206: - m.HTTP206 = value - case http302: - m.HTTP302 = value - case http304: - m.HTTP304 = value - case http403: - m.HTTP403 = value - case http404: - m.HTTP404 = value - case http416: - m.HTTP416 = value - case http499: - m.HTTP499 = value - case http500: - m.HTTP500 = value - case http502: - m.HTTP502 = value - case http503: - m.HTTP503 = value - case http504: - m.HTTP504 = value - case http508: - m.HTTP508 = value - case httpOtherDetailStatus: - m.HTTPOtherDetailStatus = value - case httpUps4xx: - m.HTTPUps4xx = value - case httpUps5xx: - m.HTTPUps5xx = value - } - } - return &m, nil -} - -func findFirstInt(s []string) int { - for i, v := range s { - _, err := strconv.ParseInt(v, 10, 64) - if err != nil { - continue - } - return i - } - return -1 -} - -func mustParseInt(value string) *int64 { - v, err := strconv.ParseInt(value, 10, 64) - if err != nil { - panic(err) - } - - return &v -} diff --git a/src/go/plugin/go.d/modules/tengine/collect.go b/src/go/plugin/go.d/modules/tengine/collect.go index ffa39019e..cbe8d7e28 100644 --- a/src/go/plugin/go.d/modules/tengine/collect.go +++ b/src/go/plugin/go.d/modules/tengine/collect.go @@ -3,20 +3,38 @@ package tengine import ( + "fmt" + "io" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) func (t *Tengine) collect() (map[string]int64, error) { - status, err := t.apiClient.getStatus() + req, err := web.NewHTTPRequest(t.RequestConfig) if err != nil { + return nil, fmt.Errorf("failed to create HTTP request: %w", err) + } + + var status *tengineStatus + var perr error + + if err := web.DoHTTP(t.httpClient).Request(req, func(body io.Reader) error { + if status, perr = parseStatus(body); perr != nil { + return perr + } + return nil + }); err != nil { return nil, err } mx := make(map[string]int64) + for _, m := range *status { for k, v := range stm.ToMap(m) { mx[k] += v } } + return mx, nil } diff --git a/src/go/plugin/go.d/modules/tengine/config_schema.json b/src/go/plugin/go.d/modules/tengine/config_schema.json index 44f6968e1..1de6f6cdb 100644 --- a/src/go/plugin/go.d/modules/tengine/config_schema.json +++ b/src/go/plugin/go.d/modules/tengine/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/tengine/integrations/tengine.md b/src/go/plugin/go.d/modules/tengine/integrations/tengine.md index 44bec575b..592b553eb 100644 --- a/src/go/plugin/go.d/modules/tengine/integrations/tengine.md +++ b/src/go/plugin/go.d/modules/tengine/integrations/tengine.md @@ -98,8 +98,8 @@ The default line format is the only supported format. The configuration file name for this integration is `go.d/tengine.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/tengine/metrics.go b/src/go/plugin/go.d/modules/tengine/metrics.go deleted file mode 100644 index 425559479..000000000 --- a/src/go/plugin/go.d/modules/tengine/metrics.go +++ /dev/null @@ -1,75 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package tengine - -/* -http://tengine.taobao.org/document/http_reqstat.html - -bytes_in total number of bytes received from client -bytes_out total number of bytes sent to client -conn_total total number of accepted connections -req_total total number of processed requests -http_2xx total number of 2xx requests -http_3xx total number of 3xx requests -http_4xx total number of 4xx requests -http_5xx total number of 5xx requests -http_other_status total number of other requests -rt accumulation or rt -ups_req total number of requests calling for upstream -ups_rt accumulation or upstream rt -ups_tries total number of times calling for upstream -http_200 total number of 200 requests -http_206 total number of 206 requests -http_302 total number of 302 requests -http_304 total number of 304 requests -http_403 total number of 403 requests -http_404 total number of 404 requests -http_416 total number of 416 requests -http_499 total number of 499 requests -http_500 total number of 500 requests -http_502 total number of 502 requests -http_503 total number of 503 requests -http_504 total number of 504 requests -http_508 total number of 508 requests -http_other_detail_status total number of requests of other status codes -http_ups_4xx total number of requests of upstream 4xx -http_ups_5xx total number of requests of upstream 5xx -*/ - -type ( - tengineStatus []metric - - metric struct { - Host string - ServerAddress string - BytesIn *int64 `stm:"bytes_in"` - BytesOut *int64 `stm:"bytes_out"` - ConnTotal *int64 `stm:"conn_total"` - ReqTotal *int64 `stm:"req_total"` - HTTP2xx *int64 `stm:"http_2xx"` - HTTP3xx *int64 `stm:"http_3xx"` - HTTP4xx *int64 `stm:"http_4xx"` - HTTP5xx *int64 `stm:"http_5xx"` - HTTPOtherStatus *int64 `stm:"http_other_status"` - RT *int64 `stm:"rt"` - UpsReq *int64 `stm:"ups_req"` - UpsRT *int64 `stm:"ups_rt"` - UpsTries *int64 `stm:"ups_tries"` - HTTP200 *int64 `stm:"http_200"` - HTTP206 *int64 `stm:"http_206"` - HTTP302 *int64 `stm:"http_302"` - HTTP304 *int64 `stm:"http_304"` - HTTP403 *int64 `stm:"http_403"` - HTTP404 *int64 `stm:"http_404"` - HTTP416 *int64 `stm:"http_416"` - HTTP499 *int64 `stm:"http_499"` - HTTP500 *int64 `stm:"http_500"` - HTTP502 *int64 `stm:"http_502"` - HTTP503 *int64 `stm:"http_503"` - HTTP504 *int64 `stm:"http_504"` - HTTP508 *int64 `stm:"http_508"` - HTTPOtherDetailStatus *int64 `stm:"http_other_detail_status"` - HTTPUps4xx *int64 `stm:"http_ups_4xx"` - HTTPUps5xx *int64 `stm:"http_ups_5xx"` - } -) diff --git a/src/go/plugin/go.d/modules/tengine/status.go b/src/go/plugin/go.d/modules/tengine/status.go new file mode 100644 index 000000000..c3d4404c7 --- /dev/null +++ b/src/go/plugin/go.d/modules/tengine/status.go @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package tengine + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +/* +http://tengine.taobao.org/document/http_reqstat.html + +bytes_in total number of bytes received from client +bytes_out total number of bytes sent to client +conn_total total number of accepted connections +req_total total number of processed requests +http_2xx total number of 2xx requests +http_3xx total number of 3xx requests +http_4xx total number of 4xx requests +http_5xx total number of 5xx requests +http_other_status total number of other requests +rt accumulation or rt +ups_req total number of requests calling for upstream +ups_rt accumulation or upstream rt +ups_tries total number of times calling for upstream +http_200 total number of 200 requests +http_206 total number of 206 requests +http_302 total number of 302 requests +http_304 total number of 304 requests +http_403 total number of 403 requests +http_404 total number of 404 requests +http_416 total number of 416 requests +http_499 total number of 499 requests +http_500 total number of 500 requests +http_502 total number of 502 requests +http_503 total number of 503 requests +http_504 total number of 504 requests +http_508 total number of 508 requests +http_other_detail_status total number of requests of other status codes +http_ups_4xx total number of requests of upstream 4xx +http_ups_5xx total number of requests of upstream 5xx +*/ + +type ( + tengineStatus []metric + + metric struct { + Host string + ServerAddress string + BytesIn *int64 `stm:"bytes_in"` + BytesOut *int64 `stm:"bytes_out"` + ConnTotal *int64 `stm:"conn_total"` + ReqTotal *int64 `stm:"req_total"` + HTTP2xx *int64 `stm:"http_2xx"` + HTTP3xx *int64 `stm:"http_3xx"` + HTTP4xx *int64 `stm:"http_4xx"` + HTTP5xx *int64 `stm:"http_5xx"` + HTTPOtherStatus *int64 `stm:"http_other_status"` + RT *int64 `stm:"rt"` + UpsReq *int64 `stm:"ups_req"` + UpsRT *int64 `stm:"ups_rt"` + UpsTries *int64 `stm:"ups_tries"` + HTTP200 *int64 `stm:"http_200"` + HTTP206 *int64 `stm:"http_206"` + HTTP302 *int64 `stm:"http_302"` + HTTP304 *int64 `stm:"http_304"` + HTTP403 *int64 `stm:"http_403"` + HTTP404 *int64 `stm:"http_404"` + HTTP416 *int64 `stm:"http_416"` + HTTP499 *int64 `stm:"http_499"` + HTTP500 *int64 `stm:"http_500"` + HTTP502 *int64 `stm:"http_502"` + HTTP503 *int64 `stm:"http_503"` + HTTP504 *int64 `stm:"http_504"` + HTTP508 *int64 `stm:"http_508"` + HTTPOtherDetailStatus *int64 `stm:"http_other_detail_status"` + HTTPUps4xx *int64 `stm:"http_ups_4xx"` + HTTPUps5xx *int64 `stm:"http_ups_5xx"` + } +) + +const ( + bytesIn = "bytes_in" + bytesOut = "bytes_out" + connTotal = "conn_total" + reqTotal = "req_total" + http2xx = "http_2xx" + http3xx = "http_3xx" + http4xx = "http_4xx" + http5xx = "http_5xx" + httpOtherStatus = "http_other_status" + rt = "rt" + upsReq = "ups_req" + upsRT = "ups_rt" + upsTries = "ups_tries" + http200 = "http_200" + http206 = "http_206" + http302 = "http_302" + http304 = "http_304" + http403 = "http_403" + http404 = "http_404" + http416 = "http_416" + http499 = "http_499" + http500 = "http_500" + http502 = "http_502" + http503 = "http_503" + http504 = "http_504" + http508 = "http_508" + httpOtherDetailStatus = "http_other_detail_status" + httpUps4xx = "http_ups_4xx" + httpUps5xx = "http_ups_5xx" +) + +var defaultLineFormat = []string{ + bytesIn, + bytesOut, + connTotal, + reqTotal, + http2xx, + http3xx, + http4xx, + http5xx, + httpOtherStatus, + rt, + upsReq, + upsRT, + upsTries, + http200, + http206, + http302, + http304, + http403, + http404, + http416, + http499, + http500, + http502, + http503, + http504, + http508, + httpOtherDetailStatus, + httpUps4xx, + httpUps5xx, +} + +func parseStatus(r io.Reader) (*tengineStatus, error) { + var status tengineStatus + + s := bufio.NewScanner(r) + for s.Scan() { + m, err := parseStatusLine(s.Text(), defaultLineFormat) + if err != nil { + return nil, err + } + status = append(status, *m) + } + + return &status, nil +} + +func parseStatusLine(line string, lineFormat []string) (*metric, error) { + parts := strings.Split(line, ",") + + // NOTE: only default line format is supported + // TODO: custom line format? + // www.example.com,127.0.0.1:80,162,6242,1,1,1,0,0,0,0,10,1,10,1.... + i := findFirstInt(parts) + if i == -1 { + return nil, fmt.Errorf("invalid line : %s", line) + } + if len(parts[i:]) != len(lineFormat) { + return nil, fmt.Errorf("invalid line length, got %d, expected %d, line : %s", + len(parts[i:]), len(lineFormat), line) + } + + // skip "$host,$server_addr:$server_port" + parts = parts[i:] + + var m metric + for i, key := range lineFormat { + value := mustParseInt(parts[i]) + switch key { + default: + return nil, fmt.Errorf("unknown line format key: %s", key) + case bytesIn: + m.BytesIn = value + case bytesOut: + m.BytesOut = value + case connTotal: + m.ConnTotal = value + case reqTotal: + m.ReqTotal = value + case http2xx: + m.HTTP2xx = value + case http3xx: + m.HTTP3xx = value + case http4xx: + m.HTTP4xx = value + case http5xx: + m.HTTP5xx = value + case httpOtherStatus: + m.HTTPOtherStatus = value + case rt: + m.RT = value + case upsReq: + m.UpsReq = value + case upsRT: + m.UpsRT = value + case upsTries: + m.UpsTries = value + case http200: + m.HTTP200 = value + case http206: + m.HTTP206 = value + case http302: + m.HTTP302 = value + case http304: + m.HTTP304 = value + case http403: + m.HTTP403 = value + case http404: + m.HTTP404 = value + case http416: + m.HTTP416 = value + case http499: + m.HTTP499 = value + case http500: + m.HTTP500 = value + case http502: + m.HTTP502 = value + case http503: + m.HTTP503 = value + case http504: + m.HTTP504 = value + case http508: + m.HTTP508 = value + case httpOtherDetailStatus: + m.HTTPOtherDetailStatus = value + case httpUps4xx: + m.HTTPUps4xx = value + case httpUps5xx: + m.HTTPUps5xx = value + } + } + return &m, nil +} + +func findFirstInt(s []string) int { + for i, v := range s { + _, err := strconv.ParseInt(v, 10, 64) + if err != nil { + continue + } + return i + } + return -1 +} + +func mustParseInt(value string) *int64 { + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + panic(err) + } + + return &v +} diff --git a/src/go/plugin/go.d/modules/tengine/tengine.go b/src/go/plugin/go.d/modules/tengine/tengine.go index 8f67fae46..a530439ca 100644 --- a/src/go/plugin/go.d/modules/tengine/tengine.go +++ b/src/go/plugin/go.d/modules/tengine/tengine.go @@ -5,9 +5,12 @@ package tengine import ( _ "embed" "errors" + "fmt" + "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -25,12 +28,12 @@ func init() { func New() *Tengine { return &Tengine{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1/us", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 2), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 2), }, }, }, @@ -39,8 +42,8 @@ func New() *Tengine { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Tengine struct { @@ -49,7 +52,7 @@ type Tengine struct { charts *module.Charts - apiClient *apiClient + httpClient *http.Client } func (t *Tengine) Configuration() any { @@ -58,17 +61,14 @@ func (t *Tengine) Configuration() any { func (t *Tengine) Init() error { if t.URL == "" { - t.Error("url not set") - return errors.New("url not set") + return errors.New("config: url not set") } - client, err := web.NewHTTPClient(t.Client) + httpClient, err := web.NewHTTPClient(t.ClientConfig) if err != nil { - t.Errorf("error on creating http client : %v", err) - return err + return fmt.Errorf("error on creating http client : %v", err) } - - t.apiClient = newAPIClient(client, t.Request) + t.httpClient = httpClient t.Debugf("using URL: %s", t.URL) t.Debugf("using timeout: %s", t.Timeout) @@ -79,12 +79,13 @@ func (t *Tengine) Init() error { func (t *Tengine) Check() error { mx, err := t.collect() if err != nil { - t.Error(err) return err } + if len(mx) == 0 { return errors.New("no metrics collected") } + return nil } @@ -94,17 +95,20 @@ func (t *Tengine) Charts() *module.Charts { func (t *Tengine) Collect() map[string]int64 { mx, err := t.collect() - if err != nil { t.Error(err) return nil } + if len(mx) == 0 { + return nil + } + return mx } func (t *Tengine) Cleanup() { - if t.apiClient != nil && t.apiClient.httpClient != nil { - t.apiClient.httpClient.CloseIdleConnections() + if t.httpClient != nil { + t.httpClient.CloseIdleConnections() } } diff --git a/src/go/plugin/go.d/modules/tengine/tengine_test.go b/src/go/plugin/go.d/modules/tengine/tengine_test.go index e87e62b0c..4f603474b 100644 --- a/src/go/plugin/go.d/modules/tengine/tengine_test.go +++ b/src/go/plugin/go.d/modules/tengine/tengine_test.go @@ -43,7 +43,6 @@ func TestTengine_Init(t *testing.T) { job := New() require.NoError(t, job.Init()) - assert.NotNil(t, job.apiClient) } func TestTengine_Check(t *testing.T) { diff --git a/src/go/plugin/go.d/modules/testrandom/charts.go b/src/go/plugin/go.d/modules/testrandom/charts.go new file mode 100644 index 000000000..10dfb05fa --- /dev/null +++ b/src/go/plugin/go.d/modules/testrandom/charts.go @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package testrandom + +import ( + "fmt" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +var chartTemplate = module.Chart{ + ID: "random_%d", + Title: "A Random Number", + Units: "random", + Fam: "random", + Ctx: "testrandom.random", +} + +var hiddenChartTemplate = module.Chart{ + ID: "hidden_random_%d", + Title: "A Random Number", + Units: "random", + Fam: "random", + Ctx: "testrandom.random", + Opts: module.Opts{ + Hidden: true, + }, +} + +func newChart(num, ctx, labels int, typ module.ChartType) *module.Chart { + chart := chartTemplate.Copy() + chart.ID = fmt.Sprintf(chart.ID, num) + chart.Type = typ + if ctx > 0 { + chart.Ctx += fmt.Sprintf("_%d", ctx) + } + for i := 0; i < labels; i++ { + chart.Labels = append(chart.Labels, module.Label{ + Key: fmt.Sprintf("random_name_%d", i), + Value: fmt.Sprintf("random_value_%d_%d", num, i), + }) + } + return chart +} + +func newHiddenChart(num, ctx, labels int, typ module.ChartType) *module.Chart { + chart := hiddenChartTemplate.Copy() + chart.ID = fmt.Sprintf(chart.ID, num) + chart.Type = typ + if ctx > 0 { + chart.Ctx += fmt.Sprintf("_%d", ctx) + } + for i := 0; i < labels; i++ { + chart.Labels = append(chart.Labels, module.Label{ + Key: fmt.Sprintf("random_name_%d", i), + Value: fmt.Sprintf("random_value_%d_%d", num, i), + }) + } + return chart +} diff --git a/src/go/plugin/go.d/modules/testrandom/collect.go b/src/go/plugin/go.d/modules/testrandom/collect.go new file mode 100644 index 000000000..1a0508d28 --- /dev/null +++ b/src/go/plugin/go.d/modules/testrandom/collect.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package testrandom + +import ( + "fmt" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +func (tr *TestRandom) collect() (map[string]int64, error) { + collected := make(map[string]int64) + + for _, chart := range *tr.Charts() { + tr.collectChart(collected, chart) + } + return collected, nil +} + +func (tr *TestRandom) collectChart(collected map[string]int64, chart *module.Chart) { + var num int + if chart.Opts.Hidden { + num = tr.Config.HiddenCharts.Dims + } else { + num = tr.Config.Charts.Dims + } + + for i := 0; i < num; i++ { + name := fmt.Sprintf("random%d", i) + id := fmt.Sprintf("%s_%s", chart.ID, name) + + if !tr.collectedDims[id] { + tr.collectedDims[id] = true + + dim := &module.Dim{ID: id, Name: name} + if err := chart.AddDim(dim); err != nil { + tr.Warning(err) + } + chart.MarkNotCreated() + } + if i%2 == 0 { + collected[id] = tr.randInt() + } else { + collected[id] = -tr.randInt() + } + } +} diff --git a/src/go/plugin/go.d/modules/testrandom/config_schema.json b/src/go/plugin/go.d/modules/testrandom/config_schema.json new file mode 100644 index 000000000..3b2915db2 --- /dev/null +++ b/src/go/plugin/go.d/modules/testrandom/config_schema.json @@ -0,0 +1,176 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Example collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "charts": { + "title": "Charts configuration", + "type": [ + "object", + "null" + ], + "properties": { + "type": { + "title": "Chart type", + "description": "The type of all charts.", + "type": "string", + "enum": [ + "line", + "area", + "stacked" + ], + "default": "line" + }, + "num": { + "title": "Number of charts", + "description": "The total number of charts to create.", + "type": "integer", + "minimum": 0, + "default": 1 + }, + "contexts": { + "title": "Number of contexts", + "description": "The total number of unique contexts.", + "type": "integer", + "minimum": 0, + "default": 0 + }, + "dimensions": { + "title": "Number of dimensions", + "description": "The number of dimensions each chart will have.", + "type": "integer", + "minimum": 1, + "default": 4 + }, + "labels": { + "title": "Number of labels", + "description": "The number of labels each chart will have.", + "type": "integer", + "minimum": 0, + "default": 0 + } + }, + "required": [ + "type", + "num", + "contexts", + "dimensions", + "labels" + ] + }, + "hidden_charts": { + "title": "Hidden charts configuration", + "type": [ + "object", + "null" + ], + "properties": { + "type": { + "title": "Chart type", + "description": "The type of all charts.", + "type": "string", + "enum": [ + "line", + "area", + "stacked" + ], + "default": "line" + }, + "num": { + "title": "Number of charts", + "description": "The total number of charts to create.", + "type": "integer", + "minimum": 0, + "default": 0 + }, + "contexts": { + "title": "Number of contexts", + "description": "The total number of unique contexts.", + "type": "integer", + "minimum": 0, + "default": 0 + }, + "dimensions": { + "title": "Number of dimensions", + "description": "The number of dimensions each chart will have.", + "type": "integer", + "minimum": 1, + "default": 4 + }, + "labels": { + "title": "Number of labels", + "description": "The number of labels each chart will have.", + "type": "integer", + "minimum": 0, + "default": 0 + } + }, + "required": [ + "type", + "num", + "contexts", + "dimensions", + "labels" + ] + } + }, + "required": [ + "charts" + ], + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + }, + "charts": { + "type": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } + } + }, + "hidden_charts": { + "type": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } + } + }, + "ui:flavour": "tabs", + "ui:options": { + "tabs": [ + { + "title": "Base", + "fields": [ + "update_every" + ] + }, + { + "title": "Charts", + "fields": [ + "charts" + ] + }, + { + "title": "Hidden charts", + "fields": [ + "hidden_charts" + ] + } + ] + } + } +} diff --git a/src/go/plugin/go.d/modules/testrandom/init.go b/src/go/plugin/go.d/modules/testrandom/init.go new file mode 100644 index 000000000..f5085ae0d --- /dev/null +++ b/src/go/plugin/go.d/modules/testrandom/init.go @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package testrandom + +import ( + "errors" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +func (tr *TestRandom) validateConfig() error { + if tr.Config.Charts.Num <= 0 && tr.Config.HiddenCharts.Num <= 0 { + return errors.New("'charts->num' or `hidden_charts->num` must be > 0") + } + if tr.Config.Charts.Num > 0 && tr.Config.Charts.Dims <= 0 { + return errors.New("'charts->dimensions' must be > 0") + } + if tr.Config.HiddenCharts.Num > 0 && tr.Config.HiddenCharts.Dims <= 0 { + return errors.New("'hidden_charts->dimensions' must be > 0") + } + return nil +} + +func (tr *TestRandom) initCharts() (*module.Charts, error) { + charts := &module.Charts{} + + var ctx int + v := calcContextEvery(tr.Config.Charts.Num, tr.Config.Charts.Contexts) + for i := 0; i < tr.Config.Charts.Num; i++ { + if i != 0 && v != 0 && ctx < (tr.Config.Charts.Contexts-1) && i%v == 0 { + ctx++ + } + chart := newChart(i, ctx, tr.Config.Charts.Labels, module.ChartType(tr.Config.Charts.Type)) + + if err := charts.Add(chart); err != nil { + return nil, err + } + } + + ctx = 0 + v = calcContextEvery(tr.Config.HiddenCharts.Num, tr.Config.HiddenCharts.Contexts) + for i := 0; i < tr.Config.HiddenCharts.Num; i++ { + if i != 0 && v != 0 && ctx < (tr.Config.HiddenCharts.Contexts-1) && i%v == 0 { + ctx++ + } + chart := newHiddenChart(i, ctx, tr.Config.HiddenCharts.Labels, module.ChartType(tr.Config.HiddenCharts.Type)) + + if err := charts.Add(chart); err != nil { + return nil, err + } + } + + return charts, nil +} + +func calcContextEvery(charts, contexts int) int { + if contexts <= 1 { + return 0 + } + if contexts > charts { + return 1 + } + return charts / contexts +} diff --git a/src/go/plugin/go.d/modules/testrandom/testdata/config.json b/src/go/plugin/go.d/modules/testrandom/testdata/config.json new file mode 100644 index 000000000..af06e85ac --- /dev/null +++ b/src/go/plugin/go.d/modules/testrandom/testdata/config.json @@ -0,0 +1,17 @@ +{ + "update_every": 123, + "charts": { + "type": "ok", + "num": 123, + "contexts": 123, + "dimensions": 123, + "labels": 123 + }, + "hidden_charts": { + "type": "ok", + "num": 123, + "contexts": 123, + "dimensions": 123, + "labels": 123 + } +} diff --git a/src/go/plugin/go.d/modules/testrandom/testdata/config.yaml b/src/go/plugin/go.d/modules/testrandom/testdata/config.yaml new file mode 100644 index 000000000..a5f6556fd --- /dev/null +++ b/src/go/plugin/go.d/modules/testrandom/testdata/config.yaml @@ -0,0 +1,13 @@ +update_every: 123 +charts: + type: "ok" + num: 123 + contexts: 123 + dimensions: 123 + labels: 123 +hidden_charts: + type: "ok" + num: 123 + contexts: 123 + dimensions: 123 + labels: 123 diff --git a/src/go/plugin/go.d/modules/testrandom/testrandom.go b/src/go/plugin/go.d/modules/testrandom/testrandom.go new file mode 100644 index 000000000..16959d42f --- /dev/null +++ b/src/go/plugin/go.d/modules/testrandom/testrandom.go @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package testrandom + +import ( + _ "embed" + "fmt" + "math/rand" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("testrandom", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: module.UpdateEvery, + Priority: module.Priority, + Disabled: true, + }, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *TestRandom { + return &TestRandom{ + Config: Config{ + Charts: ConfigCharts{ + Num: 1, + Dims: 4, + }, + HiddenCharts: ConfigCharts{ + Num: 0, + Dims: 4, + }, + }, + + randInt: func() int64 { return rand.Int63n(100) }, + collectedDims: make(map[string]bool), + } +} + +type ( + Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Charts ConfigCharts `yaml:"charts" json:"charts"` + HiddenCharts ConfigCharts `yaml:"hidden_charts" json:"hidden_charts"` + } + ConfigCharts struct { + Type string `yaml:"type,omitempty" json:"type"` + Num int `yaml:"num" json:"num"` + Contexts int `yaml:"contexts" json:"contexts"` + Dims int `yaml:"dimensions" json:"dimensions"` + Labels int `yaml:"labels" json:"labels"` + } +) + +type TestRandom struct { + module.Base // should be embedded by every module + Config `yaml:",inline"` + + randInt func() int64 + charts *module.Charts + collectedDims map[string]bool +} + +func (tr *TestRandom) Configuration() any { + return tr.Config +} + +func (tr *TestRandom) Init() error { + err := tr.validateConfig() + if err != nil { + return fmt.Errorf("config validation: %v", err) + } + + charts, err := tr.initCharts() + if err != nil { + return fmt.Errorf("charts init: %v", err) + } + tr.charts = charts + return nil +} + +func (tr *TestRandom) Check() error { + return nil +} + +func (tr *TestRandom) Charts() *module.Charts { + return tr.charts +} + +func (tr *TestRandom) Collect() map[string]int64 { + mx, err := tr.collect() + if err != nil { + tr.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (tr *TestRandom) Cleanup() {} diff --git a/src/go/plugin/go.d/modules/testrandom/testrandom_test.go b/src/go/plugin/go.d/modules/testrandom/testrandom_test.go new file mode 100644 index 000000000..052d4249e --- /dev/null +++ b/src/go/plugin/go.d/modules/testrandom/testrandom_test.go @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package testrandom + +import ( + "os" + "testing" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + } { + require.NotNil(t, data, name) + } +} + +func TestTestRandom_ConfigurationSerialize(t *testing.T) { + module.TestConfigurationSerialize(t, &TestRandom{}, dataConfigJSON, dataConfigYAML) +} + +func TestNew(t *testing.T) { + assert.IsType(t, (*TestRandom)(nil), New()) +} + +func TestTestRandom_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "success when only 'charts' set": { + config: Config{ + Charts: ConfigCharts{ + Num: 1, + Dims: 2, + }, + }, + }, + "success when only 'hidden_charts' set": { + config: Config{ + HiddenCharts: ConfigCharts{ + Num: 1, + Dims: 2, + }, + }, + }, + "success when 'charts' and 'hidden_charts' set": { + config: Config{ + Charts: ConfigCharts{ + Num: 1, + Dims: 2, + }, + HiddenCharts: ConfigCharts{ + Num: 1, + Dims: 2, + }, + }, + }, + "fails when 'charts' and 'hidden_charts' set, but 'num' == 0": { + wantFail: true, + config: Config{ + Charts: ConfigCharts{ + Num: 0, + Dims: 2, + }, + HiddenCharts: ConfigCharts{ + Num: 0, + Dims: 2, + }, + }, + }, + "fails when only 'charts' set, 'num' > 0, but 'dimensions' == 0": { + wantFail: true, + config: Config{ + Charts: ConfigCharts{ + Num: 1, + Dims: 0, + }, + }, + }, + "fails when only 'hidden_charts' set, 'num' > 0, but 'dimensions' == 0": { + wantFail: true, + config: Config{ + HiddenCharts: ConfigCharts{ + Num: 1, + Dims: 0, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + tr := New() + tr.Config = test.config + + if test.wantFail { + assert.Error(t, tr.Init()) + } else { + assert.NoError(t, tr.Init()) + } + }) + } +} + +func TestTestRandom_Check(t *testing.T) { + tests := map[string]struct { + prepare func() *TestRandom + wantFail bool + }{ + "success on default": {prepare: prepareTRDefault}, + "success when only 'charts' set": {prepare: prepareTROnlyCharts}, + "success when only 'hidden_charts' set": {prepare: prepareTROnlyHiddenCharts}, + "success when 'charts' and 'hidden_charts' set": {prepare: prepareTRChartsAndHiddenCharts}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + tr := test.prepare() + require.NoError(t, tr.Init()) + + if test.wantFail { + assert.Error(t, tr.Check()) + } else { + assert.NoError(t, tr.Check()) + } + }) + } +} + +func TestTestRandom_Charts(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) *TestRandom + wantNil bool + }{ + "not initialized collector": { + wantNil: true, + prepare: func(t *testing.T) *TestRandom { + return New() + }, + }, + "initialized collector": { + prepare: func(t *testing.T) *TestRandom { + tr := New() + require.NoError(t, tr.Init()) + return tr + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + tr := test.prepare(t) + + if test.wantNil { + assert.Nil(t, tr.Charts()) + } else { + assert.NotNil(t, tr.Charts()) + } + }) + } +} + +func TestTestRandom_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestTestRandom_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *TestRandom + wantCollected map[string]int64 + }{ + "default config": { + prepare: prepareTRDefault, + wantCollected: map[string]int64{ + "random_0_random0": 1, + "random_0_random1": -1, + "random_0_random2": 1, + "random_0_random3": -1, + }, + }, + "only 'charts' set": { + prepare: prepareTROnlyCharts, + wantCollected: map[string]int64{ + "random_0_random0": 1, + "random_0_random1": -1, + "random_0_random2": 1, + "random_0_random3": -1, + "random_0_random4": 1, + "random_1_random0": 1, + "random_1_random1": -1, + "random_1_random2": 1, + "random_1_random3": -1, + "random_1_random4": 1, + }, + }, + "only 'hidden_charts' set": { + prepare: prepareTROnlyHiddenCharts, + wantCollected: map[string]int64{ + "hidden_random_0_random0": 1, + "hidden_random_0_random1": -1, + "hidden_random_0_random2": 1, + "hidden_random_0_random3": -1, + "hidden_random_0_random4": 1, + "hidden_random_1_random0": 1, + "hidden_random_1_random1": -1, + "hidden_random_1_random2": 1, + "hidden_random_1_random3": -1, + "hidden_random_1_random4": 1, + }, + }, + "'charts' and 'hidden_charts' set": { + prepare: prepareTRChartsAndHiddenCharts, + wantCollected: map[string]int64{ + "hidden_random_0_random0": 1, + "hidden_random_0_random1": -1, + "hidden_random_0_random2": 1, + "hidden_random_0_random3": -1, + "hidden_random_0_random4": 1, + "hidden_random_1_random0": 1, + "hidden_random_1_random1": -1, + "hidden_random_1_random2": 1, + "hidden_random_1_random3": -1, + "hidden_random_1_random4": 1, + "random_0_random0": 1, + "random_0_random1": -1, + "random_0_random2": 1, + "random_0_random3": -1, + "random_0_random4": 1, + "random_1_random0": 1, + "random_1_random1": -1, + "random_1_random2": 1, + "random_1_random3": -1, + "random_1_random4": 1, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + tr := test.prepare() + require.NoError(t, tr.Init()) + + mx := tr.Collect() + + assert.Equal(t, test.wantCollected, mx) + module.TestMetricsHasAllChartsDims(t, tr.Charts(), mx) + }) + } +} + +func prepareTRDefault() *TestRandom { + return prepareTR(New().Config) +} + +func prepareTROnlyCharts() *TestRandom { + return prepareTR(Config{ + Charts: ConfigCharts{ + Num: 2, + Dims: 5, + }, + }) +} + +func prepareTROnlyHiddenCharts() *TestRandom { + return prepareTR(Config{ + HiddenCharts: ConfigCharts{ + Num: 2, + Dims: 5, + }, + }) +} + +func prepareTRChartsAndHiddenCharts() *TestRandom { + return prepareTR(Config{ + Charts: ConfigCharts{ + Num: 2, + Dims: 5, + }, + HiddenCharts: ConfigCharts{ + Num: 2, + Dims: 5, + }, + }) +} + +func prepareTR(cfg Config) *TestRandom { + tr := New() + tr.Config = cfg + tr.randInt = func() int64 { return 1 } + return tr +} diff --git a/src/go/plugin/go.d/modules/tomcat/charts.go b/src/go/plugin/go.d/modules/tomcat/charts.go index 137f700b2..812c52cb5 100644 --- a/src/go/plugin/go.d/modules/tomcat/charts.go +++ b/src/go/plugin/go.d/modules/tomcat/charts.go @@ -103,7 +103,7 @@ var ( connectorRequestThreadsChartTmpl = module.Chart{ ID: "connector_%s_request_threads", - Title: "Connector Request Threads", + Title: "Connector RequestConfig Threads", Units: "threads", Fam: "threads", Ctx: "tomcat.connector_request_threads", diff --git a/src/go/plugin/go.d/modules/tomcat/collect.go b/src/go/plugin/go.d/modules/tomcat/collect.go index c6e2a74bd..70d33b0ed 100644 --- a/src/go/plugin/go.d/modules/tomcat/collect.go +++ b/src/go/plugin/go.d/modules/tomcat/collect.go @@ -3,11 +3,7 @@ package tomcat import ( - "encoding/xml" "errors" - "fmt" - "io" - "net/http" "net/url" "strings" @@ -88,7 +84,7 @@ func cleanName(name string) string { } func (t *Tomcat) queryServerStatus() (*serverStatusResponse, error) { - req, err := web.NewHTTPRequestWithPath(t.Request, urlPathServerStatus) + req, err := web.NewHTTPRequestWithPath(t.RequestConfig, urlPathServerStatus) if err != nil { return nil, err } @@ -96,35 +92,9 @@ func (t *Tomcat) queryServerStatus() (*serverStatusResponse, error) { req.URL.RawQuery = urlQueryServerStatus var status serverStatusResponse - - if err := t.doOKDecode(req, &status); err != nil { + if err := web.DoHTTP(t.httpClient).RequestXML(req, &status); err != nil { return nil, err } return &status, nil } - -func (t *Tomcat) doOKDecode(req *http.Request, in interface{}) error { - resp, err := t.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := xml.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error decoding XML response from '%s': %v", req.URL, err) - } - - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/tomcat/config_schema.json b/src/go/plugin/go.d/modules/tomcat/config_schema.json index 91d7096ee..219a6ed39 100644 --- a/src/go/plugin/go.d/modules/tomcat/config_schema.json +++ b/src/go/plugin/go.d/modules/tomcat/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/tomcat/init.go b/src/go/plugin/go.d/modules/tomcat/init.go index 2c2ee29e4..4dd353976 100644 --- a/src/go/plugin/go.d/modules/tomcat/init.go +++ b/src/go/plugin/go.d/modules/tomcat/init.go @@ -17,5 +17,5 @@ func (t *Tomcat) validateConfig() error { } func (t *Tomcat) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(t.Client) + return web.NewHTTPClient(t.ClientConfig) } diff --git a/src/go/plugin/go.d/modules/tomcat/integrations/tomcat.md b/src/go/plugin/go.d/modules/tomcat/integrations/tomcat.md index b404e66e2..709772956 100644 --- a/src/go/plugin/go.d/modules/tomcat/integrations/tomcat.md +++ b/src/go/plugin/go.d/modules/tomcat/integrations/tomcat.md @@ -135,8 +135,8 @@ Once you've created the `netdata` user, you'll need to configure the username an The configuration file name for this integration is `go.d/tomcat.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/tomcat/tomcat.go b/src/go/plugin/go.d/modules/tomcat/tomcat.go index 540247063..05eee6285 100644 --- a/src/go/plugin/go.d/modules/tomcat/tomcat.go +++ b/src/go/plugin/go.d/modules/tomcat/tomcat.go @@ -5,10 +5,12 @@ package tomcat import ( _ "embed" "errors" + "fmt" "net/http" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *Tomcat { return &Tomcat{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8080", }, - Client: web.Client{ - Timeout: web.Duration(time.Second * 1), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 1), }, }, }, @@ -42,8 +44,8 @@ func New() *Tomcat { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type Tomcat struct { @@ -64,14 +66,12 @@ func (t *Tomcat) Configuration() any { func (t *Tomcat) Init() error { if err := t.validateConfig(); err != nil { - t.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } httpClient, err := t.initHTTPClient() if err != nil { - t.Errorf("init HTTP client: %v", err) - return err + return fmt.Errorf("init HTTP client: %v", err) } t.httpClient = httpClient @@ -85,7 +85,6 @@ func (t *Tomcat) Init() error { func (t *Tomcat) Check() error { mx, err := t.collect() if err != nil { - t.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/tomcat/tomcat_test.go b/src/go/plugin/go.d/modules/tomcat/tomcat_test.go index 7dfb6ff1a..441882188 100644 --- a/src/go/plugin/go.d/modules/tomcat/tomcat_test.go +++ b/src/go/plugin/go.d/modules/tomcat/tomcat_test.go @@ -48,8 +48,8 @@ func TestTomcat_Init(t *testing.T) { "fail when URL not set": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, }, }, }, diff --git a/src/go/plugin/go.d/modules/tor/client.go b/src/go/plugin/go.d/modules/tor/client.go index e4a8045a9..66e784c3f 100644 --- a/src/go/plugin/go.d/modules/tor/client.go +++ b/src/go/plugin/go.d/modules/tor/client.go @@ -31,10 +31,8 @@ func newControlConn(conf Config) controlConn { return &torControlClient{ password: conf.Password, conn: socket.New(socket.Config{ - Address: conf.Address, - ConnectTimeout: conf.Timeout.Duration(), - ReadTimeout: conf.Timeout.Duration(), - WriteTimeout: conf.Timeout.Duration(), + Address: conf.Address, + Timeout: conf.Timeout.Duration(), })} } diff --git a/src/go/plugin/go.d/modules/tor/config_schema.json b/src/go/plugin/go.d/modules/tor/config_schema.json index abfc40d95..c60f652db 100644 --- a/src/go/plugin/go.d/modules/tor/config_schema.json +++ b/src/go/plugin/go.d/modules/tor/config_schema.json @@ -34,7 +34,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/tor/integrations/tor.md b/src/go/plugin/go.d/modules/tor/integrations/tor.md index 54b5a428c..c3fa2e3ea 100644 --- a/src/go/plugin/go.d/modules/tor/integrations/tor.md +++ b/src/go/plugin/go.d/modules/tor/integrations/tor.md @@ -96,8 +96,8 @@ Enable `ControlPort` in `/etc/tor/torrc`. The configuration file name for this integration is `go.d/tor.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/tor/tor.go b/src/go/plugin/go.d/modules/tor/tor.go index bb6cacab1..02c07ad11 100644 --- a/src/go/plugin/go.d/modules/tor/tor.go +++ b/src/go/plugin/go.d/modules/tor/tor.go @@ -8,7 +8,7 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -26,7 +26,7 @@ func New() *Tor { return &Tor{ Config: Config{ Address: "127.0.0.1:9051", - Timeout: web.Duration(time.Second * 1), + Timeout: confopt.Duration(time.Second * 1), }, newConn: newControlConn, charts: charts.Copy(), @@ -34,10 +34,10 @@ func New() *Tor { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout" json:"timeout"` - Password string `yaml:"password" json:"password"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout" json:"timeout"` + Password string `yaml:"password" json:"password"` } type Tor struct { @@ -56,8 +56,7 @@ func (t *Tor) Configuration() any { func (t *Tor) Init() error { if t.Address == "" { - t.Error("config: 'address' not set") - return errors.New("address not set") + return errors.New("config: address not set") } return nil @@ -66,7 +65,6 @@ func (t *Tor) Init() error { func (t *Tor) Check() error { mx, err := t.collect() if err != nil { - t.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/traefik/config_schema.json b/src/go/plugin/go.d/modules/traefik/config_schema.json index f027f20a0..bb0ce6e6c 100644 --- a/src/go/plugin/go.d/modules/traefik/config_schema.json +++ b/src/go/plugin/go.d/modules/traefik/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/traefik/init.go b/src/go/plugin/go.d/modules/traefik/init.go index 02c1dde0d..2e80728fb 100644 --- a/src/go/plugin/go.d/modules/traefik/init.go +++ b/src/go/plugin/go.d/modules/traefik/init.go @@ -18,12 +18,12 @@ func (t *Traefik) validateConfig() error { } func (t *Traefik) initPrometheusClient() (prometheus.Prometheus, error) { - httpClient, err := web.NewHTTPClient(t.Client) + httpClient, err := web.NewHTTPClient(t.ClientConfig) if err != nil { return nil, err } - prom := prometheus.NewWithSelector(httpClient, t.Request, sr) + prom := prometheus.NewWithSelector(httpClient, t.RequestConfig, sr) return prom, nil } diff --git a/src/go/plugin/go.d/modules/traefik/integrations/traefik.md b/src/go/plugin/go.d/modules/traefik/integrations/traefik.md index f5dc10eb9..0a5d09300 100644 --- a/src/go/plugin/go.d/modules/traefik/integrations/traefik.md +++ b/src/go/plugin/go.d/modules/traefik/integrations/traefik.md @@ -92,8 +92,8 @@ To enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability The configuration file name for this integration is `go.d/traefik.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/traefik/traefik.go b/src/go/plugin/go.d/modules/traefik/traefik.go index e38ff9699..bbed6d0d5 100644 --- a/src/go/plugin/go.d/modules/traefik/traefik.go +++ b/src/go/plugin/go.d/modules/traefik/traefik.go @@ -5,9 +5,11 @@ package traefik import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,12 +28,12 @@ func init() { func New() *Traefik { return &Traefik{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8082/metrics", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, @@ -45,8 +47,8 @@ func New() *Traefik { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type ( @@ -84,14 +86,12 @@ func (t *Traefik) Configuration() any { func (t *Traefik) Init() error { if err := t.validateConfig(); err != nil { - t.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } prom, err := t.initPrometheusClient() if err != nil { - t.Errorf("prometheus client initialization: %v", err) - return err + return fmt.Errorf("prometheus client initialization: %v", err) } t.prom = prom @@ -101,7 +101,6 @@ func (t *Traefik) Init() error { func (t *Traefik) Check() error { mx, err := t.collect() if err != nil { - t.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/traefik/traefik_test.go b/src/go/plugin/go.d/modules/traefik/traefik_test.go index f3ef024b8..b606c1b35 100644 --- a/src/go/plugin/go.d/modules/traefik/traefik_test.go +++ b/src/go/plugin/go.d/modules/traefik/traefik_test.go @@ -47,15 +47,15 @@ func TestTraefik_Init(t *testing.T) { }, "fails on unset 'url'": { wantFail: true, - config: Config{HTTP: web.HTTP{ - Request: web.Request{}, + config: Config{HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{}, }}, }, "fails on invalid TLSCA": { wantFail: true, config: Config{ - HTTP: web.HTTP{ - Client: web.Client{ + HTTPConfig: web.HTTPConfig{ + ClientConfig: web.ClientConfig{ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, }, }}, @@ -241,13 +241,13 @@ func TestTraefik_Collect(t *testing.T) { tk, cleanup := test.prepare(t) defer cleanup() - var ms map[string]int64 + var mx map[string]int64 for _, want := range test.wantCollected { - ms = tk.Collect() - assert.Equal(t, want, ms) + mx = tk.Collect() + assert.Equal(t, want, mx) } if len(test.wantCollected) > 0 { - ensureCollectedHasAllChartsDimsVarsIDs(t, tk, ms) + module.TestMetricsHasAllChartsDims(t, tk.Charts(), mx) } }) } @@ -352,19 +352,3 @@ func prepareCaseConnectionRefused(t *testing.T) (*Traefik, func()) { return h, func() {} } - -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, tk *Traefik, ms map[string]int64) { - for _, chart := range *tk.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := ms[dim.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := ms[v.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) - } - } -} diff --git a/src/go/plugin/go.d/modules/typesense/README.md b/src/go/plugin/go.d/modules/typesense/README.md new file mode 120000 index 000000000..9c36cd71e --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/README.md @@ -0,0 +1 @@ +integrations/typesense.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/typesense/charts.go b/src/go/plugin/go.d/modules/typesense/charts.go new file mode 100644 index 000000000..e2c3f958e --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/charts.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package typesense + +import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + +var baseCharts = module.Charts{ + healthStatusChart.Copy(), +} + +const precision = 1000 + +const ( + prioHealthStatus = module.Priority + iota + + prioTotalRequests + prioRequestsByOperation + prioLatencyByOperation + prioOverloadedRequests +) + +var healthStatusChart = module.Chart{ + ID: "health_status", + Title: "Health Status", + Units: "status", + Fam: "health", + Ctx: "typesense.health_status", + Type: module.Line, + Priority: prioHealthStatus, + Dims: module.Dims{ + {ID: "health_status_ok", Name: "ok"}, + {ID: "health_status_out_of_disk", Name: "out_of_disk"}, + {ID: "health_status_out_of_memory", Name: "out_of_memory"}, + }, +} + +var statsCharts = module.Charts{ + totalRequestsChart.Copy(), + requestsByOperationChart.Copy(), + overloadedRequestsChart.Copy(), + latencyByOperationChart.Copy(), +} + +var ( + totalRequestsChart = module.Chart{ + ID: "total_requests", + Title: "Total Requests", + Units: "requests/s", + Fam: "requests", + Ctx: "typesense.total_requests", + Type: module.Line, + Priority: prioTotalRequests, + Dims: module.Dims{ + {ID: "total_requests_per_second", Name: "requests", Div: precision}, + }, + } + requestsByOperationChart = module.Chart{ + ID: "requests_by_type", + Title: "Requests by Operation", + Units: "requests/s", + Fam: "requests", + Ctx: "typesense.requests_by_operation", + Type: module.Line, + Priority: prioRequestsByOperation, + Dims: module.Dims{ + {ID: "search_requests_per_second", Name: "search", Div: precision}, + {ID: "write_requests_per_second", Name: "write", Div: precision}, + {ID: "import_requests_per_second", Name: "import", Div: precision}, + {ID: "delete_requests_per_second", Name: "delete", Div: precision}, + }, + } + latencyByOperationChart = module.Chart{ + ID: "latency_by_operation", + Title: "Latency by Operation", + Units: "milliseconds", + Fam: "requests", + Ctx: "typesense.latency_by_operation", + Type: module.Line, + Priority: prioLatencyByOperation, + Dims: module.Dims{ + {ID: "search_latency_ms", Name: "search"}, + {ID: "write_latency_ms", Name: "write"}, + {ID: "import_latency_ms", Name: "import"}, + {ID: "delete_latency_ms", Name: "delete"}, + }, + } + overloadedRequestsChart = module.Chart{ + ID: "overloaded_requests", + Title: "Overloaded Requests", + Units: "requests/s", + Fam: "requests", + Ctx: "typesense.overloaded_requests", + Type: module.Line, + Priority: prioOverloadedRequests, + Dims: module.Dims{ + {ID: "overloaded_requests_per_second", Name: "overloaded", Div: precision}, + }, + } +) + +func (ts *Typesense) addStatsCharts() { + if err := ts.charts.Add(*statsCharts.Copy()...); err != nil { + ts.Warningf("error adding stats charts: %v", err) + } +} diff --git a/src/go/plugin/go.d/modules/typesense/collect.go b/src/go/plugin/go.d/modules/typesense/collect.go new file mode 100644 index 000000000..8c19fe10a --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/collect.go @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package typesense + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +const ( + urlPathHealth = "/health" + urlPathStats = "/stats.json" +) + +// https://typesense.org/docs/27.0/api/cluster-operations.html#health +type healthResponse struct { + Ok *bool `json:"ok"` + Err string `json:"resource_error"` +} + +// https://typesense.org/docs/27.0/api/cluster-operations.html#api-stats +type statsResponse struct { + DeleteLatencyMs float64 `json:"delete_latency_ms" stm:"delete_latency_ms"` + DeleteRequestsPerSecond float64 `json:"delete_requests_per_second" stm:"delete_requests_per_second,1000,1"` + ImportLatencyMs float64 `json:"import_latency_ms" stm:"import_latency_ms"` + ImportRequestsPerSecond float64 `json:"import_requests_per_second" stm:"import_requests_per_second,1000,1"` + OverloadedRequestsPerSecond float64 `json:"overloaded_requests_per_second" stm:"overloaded_requests_per_second,1000,1"` + PendingWriteBatches float64 `json:"pending_write_batches" stm:"pending_write_batches"` + SearchLatencyMs float64 `json:"search_latency_ms" stm:"search_latency_ms"` + SearchRequestsPerSecond float64 `json:"search_requests_per_second" stm:"search_requests_per_second,1000,1"` + TotalRequestsPerSecond float64 `json:"total_requests_per_second" stm:"total_requests_per_second,1000,1"` + WriteLatencyMs float64 `json:"write_latency_ms" stm:"write_latency_ms"` + WriteRequestsPerSecond float64 `json:"write_requests_per_second" stm:"write_requests_per_second,1000,1"` +} + +func (ts *Typesense) collect() (map[string]int64, error) { + mx := make(map[string]int64) + + if err := ts.collectHealth(mx); err != nil { + return nil, err + } + + if err := ts.collectStats(mx); err != nil { + return nil, err + } + + return mx, nil +} + +func (ts *Typesense) collectHealth(mx map[string]int64) error { + req, err := web.NewHTTPRequestWithPath(ts.RequestConfig, urlPathHealth) + if err != nil { + return fmt.Errorf("creating health request: %w", err) + } + + var resp healthResponse + if err := ts.client().RequestJSON(req, &resp); err != nil { + return err + } + + px := "health_status_" + + for _, v := range []string{"ok", "out_of_disk", "out_of_memory"} { + mx[px+v] = 0 + } + + if resp.Ok == nil { + return fmt.Errorf("unexpected response: no health status found") + } + + if resp.Err != "" { + mx[px+strings.ToLower(resp.Err)] = 1 + } else if *resp.Ok { + mx[px+"ok"] = 1 + } + + return nil +} + +func (ts *Typesense) collectStats(mx map[string]int64) error { + if !ts.doStats || ts.APIKey == "" { + return nil + } + + req, err := web.NewHTTPRequestWithPath(ts.RequestConfig, urlPathStats) + if err != nil { + return fmt.Errorf("creating stats request: %w", err) + } + + req.Header.Set("X-TYPESENSE-API-KEY", ts.APIKey) + + var resp statsResponse + if err := ts.client().RequestJSON(req, &resp); err != nil { + if !strings.Contains(err.Error(), "code: 401") { + return err + } + + ts.doStats = false + ts.Warning(err) + + return nil + } + + ts.once.Do(ts.addStatsCharts) + + for k, v := range stm.ToMap(resp) { + mx[k] = v + } + + return nil +} + +func (ts *Typesense) client() *web.Client { + return web.DoHTTP(ts.httpClient).OnNokCode(func(resp *http.Response) (bool, error) { + // {"message": "Forbidden - a valid `x-typesense-api-key` header must be sent."} + var msg struct { + Msg string `json:"message"` + } + if err := json.NewDecoder(resp.Body).Decode(&msg); err == nil && msg.Msg != "" { + return false, fmt.Errorf("msg: '%s'", msg.Msg) + } + return false, nil + }) +} diff --git a/src/go/plugin/go.d/modules/typesense/config_schema.json b/src/go/plugin/go.d/modules/typesense/config_schema.json new file mode 100644 index 000000000..1c1abee91 --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/config_schema.json @@ -0,0 +1,192 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Typesense collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "url": { + "title": "URL", + "description": "The base URL of the Typesense server.", + "type": "string", + "default": "http://127.0.0.1:8108", + "format": "uri" + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "api_key": { + "title": "API Key", + "description": "The Typesense [API Key](https://typesense.org/docs/0.20.0/api/api-keys.html#api-keys) (`X-TYPESENSE-API-KEY`).", + "type": "string", + "sensitive": true + }, + "username": { + "title": "Username", + "description": "The username for basic authentication.", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication.", + "type": "string", + "sensitive": true + }, + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server.", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication.", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication.", + "type": "string", + "sensitive": true + }, + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "string" + } + }, + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string", + "pattern": "^$|^/" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string", + "pattern": "^$|^/" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string", + "pattern": "^$|^/" + }, + "body": { + "title": "Body", + "type": "string" + }, + "method": { + "title": "Method", + "type": "string" + } + }, + "required": [ + "url" + ], + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + }, + "body": { + "ui:widget": "hidden" + }, + "method": { + "ui:widget": "hidden" + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + }, + "api_key": { + "ui:widget": "password" + }, + "username": { + "ui:widget": "password" + }, + "proxy_username": { + "ui:widget": "password" + }, + "password": { + "ui:widget": "password" + }, + "proxy_password": { + "ui:widget": "password" + }, + "ui:flavour": "tabs", + "ui:options": { + "tabs": [ + { + "title": "Base", + "fields": [ + "update_every", + "url", + "timeout", + "not_follow_redirects" + ] + }, + { + "title": "Auth", + "fields": [ + "api_key", + "username", + "password" + ] + }, + { + "title": "TLS", + "fields": [ + "tls_skip_verify", + "tls_ca", + "tls_cert", + "tls_key" + ] + }, + { + "title": "Proxy", + "fields": [ + "proxy_url", + "proxy_username", + "proxy_password" + ] + }, + { + "title": "Headers", + "fields": [ + "headers" + ] + } + ] + } + } +} diff --git a/src/go/plugin/go.d/modules/typesense/init.go b/src/go/plugin/go.d/modules/typesense/init.go new file mode 100644 index 000000000..89558e753 --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/init.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package typesense diff --git a/src/go/plugin/go.d/modules/typesense/integrations/typesense.md b/src/go/plugin/go.d/modules/typesense/integrations/typesense.md new file mode 100644 index 000000000..14cf3f1a4 --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/integrations/typesense.md @@ -0,0 +1,245 @@ + + +# Typesense + + + + + +Plugin: go.d.plugin +Module: typesense + + + +## Overview + +This collector monitors the overall health status and performance of your Typesense servers. +It gathers detailed metrics, including the total number of requests processed, the breakdown of different request types, and the average latency experienced by each request. + + +It gathers metrics by periodically issuing HTTP GET requests to the Typesense server: + +- [/health](https://typesense.org/docs/27.0/api/cluster-operations.html#health) endpoint to check server health. +- [/stats.json](https://typesense.org/docs/27.0/api/cluster-operations.html#api-stats) endpoint to collect data on requests and latency. + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +The collector can automatically detect Typesense instances running on: + +- localhost that are listening on port 8108 +- within Docker containers + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Typesense instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| typesense.health_status | ok, out_of_disk, out_of_memory | status | +| typesense.total_requests | requests | requests/s | +| typesense.requests_by_operation | search, write, import, delete | requests/s | +| typesense.latency_by_operation | search, write, import, delete | milliseconds | +| typesense.overloaded_requests | overloaded | requests/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### API Key Configuration + +While optional, configuring an [API key](https://typesense.org/docs/0.20.0/api/api-keys.html#api-keys) is highly recommended to enable the collector to gather [stats metrics](https://typesense.org/docs/27.0/api/cluster-operations.html#api-stats), including request counts and latency. +Without an API key, the collector will only collect health status information. + +> If you're running Typesense with the API key provided as a command-line parameter (e.g., `--api-key=XYZ`), Netdata can automatically detect and use this key for queries. +> In this case, no additional configuration is required. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/typesense.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/typesense.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:8108 | yes | +| timeout | HTTP request timeout. | 1 | no | +| api_key | The Typesense [API Key](https://typesense.org/docs/0.20.0/api/api-keys.html#api-keys) (`X-TYPESENSE-API-KEY`). | | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +
    + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8108 + api_key: XYZ + +``` +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +
    Config + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8108 + api_key: XYZ + + - name: remote + url: http://192.0.2.1:8108 + api_key: XYZ + +``` +
    + + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `typesense` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m typesense + ``` + +### Getting Logs + +If you're encountering problems with the `typesense` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep typesense +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep typesense /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep typesense +``` + + diff --git a/src/go/plugin/go.d/modules/typesense/metadata.yaml b/src/go/plugin/go.d/modules/typesense/metadata.yaml new file mode 100644 index 000000000..1abd6b121 --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/metadata.yaml @@ -0,0 +1,222 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-typesense + plugin_name: go.d.plugin + module_name: typesense + monitored_instance: + name: Typesense + link: https://typesense.org/ + categories: + - data-collection.search-engines + icon_filename: typesense.svg + related_resources: + integrations: + list: [] + alternative_monitored_instances: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - typesense + - search engine + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors the overall health status and performance of your Typesense servers. + It gathers detailed metrics, including the total number of requests processed, the breakdown of different request types, and the average latency experienced by each request. + method_description: | + It gathers metrics by periodically issuing HTTP GET requests to the Typesense server: + + - [/health](https://typesense.org/docs/27.0/api/cluster-operations.html#health) endpoint to check server health. + - [/stats.json](https://typesense.org/docs/27.0/api/cluster-operations.html#api-stats) endpoint to collect data on requests and latency. + default_behavior: + auto_detection: + description: | + The collector can automatically detect Typesense instances running on: + + - localhost that are listening on port 8108 + - within Docker containers + limits: + description: "" + performance_impact: + description: "" + additional_permissions: + description: "" + multi_instance: false + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: + - title: API Key Configuration + description: | + While optional, configuring an [API key](https://typesense.org/docs/0.20.0/api/api-keys.html#api-keys) is highly recommended to enable the collector to gather [stats metrics](https://typesense.org/docs/27.0/api/cluster-operations.html#api-stats), including request counts and latency. + Without an API key, the collector will only collect health status information. + + > If you're running Typesense with the API key provided as a command-line parameter (e.g., `--api-key=XYZ`), Netdata can automatically detect and use this key for queries. + > In this case, no additional configuration is required. + configuration: + file: + name: go.d/typesense.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:8108 + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: api_key + description: "The Typesense [API Key](https://typesense.org/docs/0.20.0/api/api-keys.html#api-keys) (`X-TYPESENSE-API-KEY`)." + default_value: "" + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + folding: + enabled: false + config: | + jobs: + - name: local + url: http://127.0.0.1:8108 + api_key: XYZ + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8108 + api_key: XYZ + + - name: remote + url: http://192.0.2.1:8108 + api_key: XYZ + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: typesense.health_status + description: Health Status + unit: status + chart_type: line + dimensions: + - name: ok + - name: out_of_disk + - name: out_of_memory + - name: typesense.total_requests + description: Total Requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: typesense.requests_by_operation + description: Requests by Operation + unit: requests/s + chart_type: line + dimensions: + - name: search + - name: write + - name: import + - name: delete + - name: typesense.latency_by_operation + description: Latency by Operation + unit: milliseconds + chart_type: line + dimensions: + - name: search + - name: write + - name: import + - name: delete + - name: typesense.overloaded_requests + description: Overloaded Requests + unit: requests/s + chart_type: line + dimensions: + - name: overloaded diff --git a/src/go/plugin/go.d/modules/typesense/testdata/config.json b/src/go/plugin/go.d/modules/typesense/testdata/config.json new file mode 100644 index 000000000..628fa6317 --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/testdata/config.json @@ -0,0 +1,21 @@ +{ + "update_every": 123, + "url": "ok", + "api_key": "ok", + "body": "ok", + "method": "ok", + "headers": { + "ok": "ok" + }, + "username": "ok", + "password": "ok", + "proxy_url": "ok", + "proxy_username": "ok", + "proxy_password": "ok", + "timeout": 123.123, + "not_follow_redirects": true, + "tls_ca": "ok", + "tls_cert": "ok", + "tls_key": "ok", + "tls_skip_verify": true +} diff --git a/src/go/plugin/go.d/modules/typesense/testdata/config.yaml b/src/go/plugin/go.d/modules/typesense/testdata/config.yaml new file mode 100644 index 000000000..7274c3ab0 --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/testdata/config.yaml @@ -0,0 +1,18 @@ +update_every: 123 +url: "ok" +api_key: "ok" +body: "ok" +method: "ok" +headers: + ok: "ok" +username: "ok" +password: "ok" +proxy_url: "ok" +proxy_username: "ok" +proxy_password: "ok" +timeout: 123.123 +not_follow_redirects: yes +tls_ca: "ok" +tls_cert: "ok" +tls_key: "ok" +tls_skip_verify: yes diff --git a/src/go/plugin/go.d/modules/typesense/testdata/v27.0/health_nok.json b/src/go/plugin/go.d/modules/typesense/testdata/v27.0/health_nok.json new file mode 100644 index 000000000..ace467fd3 --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/testdata/v27.0/health_nok.json @@ -0,0 +1,4 @@ +{ + "ok": false, + "resource_error": "OUT_OF_DISK" +} diff --git a/src/go/plugin/go.d/modules/typesense/testdata/v27.0/health_ok.json b/src/go/plugin/go.d/modules/typesense/testdata/v27.0/health_ok.json new file mode 100644 index 000000000..0287aedde --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/testdata/v27.0/health_ok.json @@ -0,0 +1,3 @@ +{ + "ok": true +} diff --git a/src/go/plugin/go.d/modules/typesense/testdata/v27.0/stats.json b/src/go/plugin/go.d/modules/typesense/testdata/v27.0/stats.json new file mode 100644 index 000000000..9be39cece --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/testdata/v27.0/stats.json @@ -0,0 +1,15 @@ +{ + "delete_latency_ms": 1, + "delete_requests_per_second": 1.1, + "import_latency_ms": 1, + "import_requests_per_second": 1.1, + "latency_ms": {}, + "overloaded_requests_per_second": 1.1, + "pending_write_batches": 1, + "requests_per_second": {}, + "search_latency_ms": 1, + "search_requests_per_second": 1.1, + "total_requests_per_second": 1.1, + "write_latency_ms": 1, + "write_requests_per_second": 1.1 +} diff --git a/src/go/plugin/go.d/modules/typesense/typesense.go b/src/go/plugin/go.d/modules/typesense/typesense.go new file mode 100644 index 000000000..77760c0b6 --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/typesense.go @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package typesense + +import ( + _ "embed" + "errors" + "fmt" + "net/http" + "sync" + "time" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("typesense", module.Creator{ + Create: func() module.Module { return New() }, + JobConfigSchema: configSchema, + Config: func() any { return &Config{} }, + }) +} + +func New() *Typesense { + return &Typesense{ + Config: Config{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ + URL: "http://127.0.0.1:8108", + }, + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), + }, + }, + }, + charts: baseCharts.Copy(), + doStats: true, + } +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + APIKey string `yaml:"api_key,omitempty" json:"api_key"` +} + +type Typesense struct { + module.Base + Config `yaml:",inline" json:""` + + charts *module.Charts + once sync.Once + + httpClient *http.Client + + doStats bool +} + +func (ts *Typesense) Configuration() any { + return ts.Config +} + +func (ts *Typesense) Init() error { + if ts.URL == "" { + return errors.New("configL: url not configured") + } + + httpClient, err := web.NewHTTPClient(ts.ClientConfig) + if err != nil { + return fmt.Errorf("init http client: %w", err) + } + + ts.httpClient = httpClient + + if ts.APIKey == "" { + ts.Warning("API key not set in configuration. Only health status will be collected.") + } + ts.Debugf("using URL %s", ts.URL) + ts.Debugf("using timeout: %s", ts.Timeout) + + return nil +} + +func (ts *Typesense) Check() error { + mx, err := ts.collect() + if err != nil { + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil +} + +func (ts *Typesense) Charts() *module.Charts { + return ts.charts +} + +func (ts *Typesense) Collect() map[string]int64 { + mx, err := ts.collect() + if err != nil { + ts.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (ts *Typesense) Cleanup() { + if ts.httpClient != nil { + ts.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/plugin/go.d/modules/typesense/typesense_test.go b/src/go/plugin/go.d/modules/typesense/typesense_test.go new file mode 100644 index 000000000..a40ecfdef --- /dev/null +++ b/src/go/plugin/go.d/modules/typesense/typesense_test.go @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package typesense + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const testApiKey = "XYZ" + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") + + dataVer27HealthOk, _ = os.ReadFile("testdata/v27.0/health_ok.json") + dataVer27HealthNok, _ = os.ReadFile("testdata/v27.0/health_nok.json") + dataVer27Stats, _ = os.ReadFile("testdata/v27.0/stats.json") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + "dataVer27HealthOk": dataVer27HealthOk, + "dataVer27HealthNok": dataVer27HealthNok, + "dataVer27Stats": dataVer27Stats, + } { + require.NotNil(t, data, name) + + } +} + +func TestTypesense_ConfigurationSerialize(t *testing.T) { + module.TestConfigurationSerialize(t, &Typesense{}, dataConfigJSON, dataConfigYAML) +} + +func TestTypesense_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "success with default": { + wantFail: false, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ts := New() + ts.Config = test.config + + if test.wantFail { + assert.Error(t, ts.Init()) + } else { + assert.NoError(t, ts.Init()) + } + }) + } +} + +func TestTypesense_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) (ts *Typesense, cleanup func()) + }{ + "success with valid API key": { + wantFail: false, + prepare: caseOk, + }, + "success without API key": { + wantFail: false, + prepare: caseOkNoApiKey, + }, + "fail on unexpected JSON response": { + wantFail: true, + prepare: caseUnexpectedJsonResponse, + }, + "fail on invalid data response": { + wantFail: true, + prepare: caseInvalidDataResponse, + }, + "fail on connection refused": { + wantFail: true, + prepare: caseConnectionRefused, + }, + "fail on 404 response": { + wantFail: true, + prepare: case404, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ts, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.Error(t, ts.Check()) + } else { + assert.NoError(t, ts.Check()) + } + }) + } +} + +func TestTypesense_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestTypesense_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (ts *Typesense, cleanup func()) + wantNumOfCharts int + wantMetrics map[string]int64 + }{ + "success with valid API key": { + prepare: caseOk, + wantNumOfCharts: len(baseCharts) + len(statsCharts), + wantMetrics: map[string]int64{ + "delete_latency_ms": 1, + "delete_requests_per_second": 1100, + "health_status_ok": 1, + "health_status_out_of_disk": 0, + "health_status_out_of_memory": 0, + "import_latency_ms": 1, + "import_requests_per_second": 1100, + "overloaded_requests_per_second": 1100, + "pending_write_batches": 1, + "search_latency_ms": 1, + "search_requests_per_second": 1100, + "total_requests_per_second": 1100, + "write_latency_ms": 1, + "write_requests_per_second": 1100, + }, + }, + "success without API key": { + prepare: caseOkNoApiKey, + wantNumOfCharts: len(baseCharts), + wantMetrics: map[string]int64{ + "health_status_ok": 0, + "health_status_out_of_disk": 1, + "health_status_out_of_memory": 0, + }, + }, + "fail on unexpected JSON response": { + prepare: caseUnexpectedJsonResponse, + wantMetrics: nil, + }, + "fail on invalid data response": { + prepare: caseInvalidDataResponse, + wantMetrics: nil, + }, + "fail on connection refused": { + prepare: caseConnectionRefused, + wantMetrics: nil, + }, + "fail on 404 response": { + prepare: case404, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ts, cleanup := test.prepare(t) + defer cleanup() + + _ = ts.Check() + + mx := ts.Collect() + + require.Equal(t, test.wantMetrics, mx) + + if len(test.wantMetrics) > 0 { + assert.Equal(t, test.wantNumOfCharts, len(*ts.Charts()), "want charts") + + module.TestMetricsHasAllChartsDims(t, ts.Charts(), mx) + } + }) + } +} + +func caseOk(t *testing.T) (*Typesense, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathHealth: + _, _ = w.Write(dataVer27HealthOk) + case urlPathStats: + if r.Header.Get("X-TYPESENSE-API-KEY") != testApiKey { + msg := "{\"message\": \"Forbidden - a valid `x-typesense-api-key` header must be sent.\"}" + _, _ = w.Write([]byte(msg)) + w.WriteHeader(http.StatusUnauthorized) + } else { + _, _ = w.Write(dataVer27Stats) + } + default: + w.WriteHeader(http.StatusNotFound) + } + })) + ts := New() + ts.URL = srv.URL + ts.APIKey = testApiKey + require.NoError(t, ts.Init()) + + return ts, srv.Close +} + +func caseOkNoApiKey(t *testing.T) (*Typesense, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathHealth: + _, _ = w.Write(dataVer27HealthNok) + case urlPathStats: + if r.Header.Get("X-TYPESENSE-API-KEY") != testApiKey { + msg := "{\"message\": \"Forbidden - a valid `x-typesense-api-key` header must be sent.\"}" + _, _ = w.Write([]byte(msg)) + w.WriteHeader(http.StatusUnauthorized) + } else { + _, _ = w.Write(dataVer27Stats) + } + default: + w.WriteHeader(http.StatusNotFound) + } + })) + ts := New() + ts.URL = srv.URL + ts.APIKey = "" + require.NoError(t, ts.Init()) + + return ts, srv.Close +} + +func caseUnexpectedJsonResponse(t *testing.T) (*Typesense, func()) { + t.Helper() + resp := ` +{ + "elephant": { + "burn": false, + "mountain": true, + "fog": false, + "skin": -1561907625, + "burst": "anyway", + "shadow": 1558616893 + }, + "start": "ever", + "base": 2093056027, + "mission": -2007590351, + "victory": 999053756, + "die": false +} +` + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(resp)) + })) + ts := New() + ts.URL = srv.URL + require.NoError(t, ts.Init()) + + return ts, srv.Close +} + +func caseInvalidDataResponse(t *testing.T) (*Typesense, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + ts := New() + ts.URL = srv.URL + require.NoError(t, ts.Init()) + + return ts, srv.Close +} + +func caseConnectionRefused(t *testing.T) (*Typesense, func()) { + t.Helper() + ts := New() + ts.URL = "http://127.0.0.1:65001" + require.NoError(t, ts.Init()) + + return ts, func() {} +} + +func case404(t *testing.T) (*Typesense, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + ts := New() + ts.URL = srv.URL + require.NoError(t, ts.Init()) + + return ts, srv.Close +} diff --git a/src/go/plugin/go.d/modules/unbound/config_schema.json b/src/go/plugin/go.d/modules/unbound/config_schema.json index 500b60169..7e471491c 100644 --- a/src/go/plugin/go.d/modules/unbound/config_schema.json +++ b/src/go/plugin/go.d/modules/unbound/config_schema.json @@ -72,7 +72,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/unbound/init.go b/src/go/plugin/go.d/modules/unbound/init.go index 88e5e5ab0..6895ce427 100644 --- a/src/go/plugin/go.d/modules/unbound/init.go +++ b/src/go/plugin/go.d/modules/unbound/init.go @@ -86,11 +86,9 @@ func (u *Unbound) initClient() (err error) { } u.client = socket.New(socket.Config{ - Address: u.Address, - ConnectTimeout: u.Timeout.Duration(), - ReadTimeout: u.Timeout.Duration(), - WriteTimeout: u.Timeout.Duration(), - TLSConf: tlsCfg, + Address: u.Address, + Timeout: u.Timeout.Duration(), + TLSConf: tlsCfg, }) return nil } diff --git a/src/go/plugin/go.d/modules/unbound/integrations/unbound.md b/src/go/plugin/go.d/modules/unbound/integrations/unbound.md index df6412270..18a09c21a 100644 --- a/src/go/plugin/go.d/modules/unbound/integrations/unbound.md +++ b/src/go/plugin/go.d/modules/unbound/integrations/unbound.md @@ -159,8 +159,8 @@ For auto-detection parameters from `unbound.conf`: The configuration file name for this integration is `go.d/unbound.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/unbound/unbound.go b/src/go/plugin/go.d/modules/unbound/unbound.go index fa071bb0f..d15e6fce5 100644 --- a/src/go/plugin/go.d/modules/unbound/unbound.go +++ b/src/go/plugin/go.d/modules/unbound/unbound.go @@ -5,12 +5,13 @@ package unbound import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) //go:embed "config_schema.json" @@ -29,7 +30,7 @@ func New() *Unbound { Config: Config{ Address: "127.0.0.1:8953", ConfPath: "/etc/unbound/unbound.conf", - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), Cumulative: false, UseTLS: true, TLSConfig: tlscfg.TLSConfig{ @@ -44,12 +45,12 @@ func New() *Unbound { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - ConfPath string `yaml:"conf_path,omitempty" json:"conf_path"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - Cumulative bool `yaml:"cumulative_stats" json:"cumulative_stats"` - UseTLS bool `yaml:"use_tls,omitempty" json:"use_tls"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + ConfPath string `yaml:"conf_path,omitempty" json:"conf_path"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + Cumulative bool `yaml:"cumulative_stats" json:"cumulative_stats"` + UseTLS bool `yaml:"use_tls,omitempty" json:"use_tls"` tlscfg.TLSConfig `yaml:",inline" json:""` } @@ -77,8 +78,7 @@ func (u *Unbound) Init() error { } if err := u.initClient(); err != nil { - u.Errorf("creating client: %v", err) - return err + return fmt.Errorf("creating client: %v", err) } u.charts = charts(u.Cumulative) @@ -94,7 +94,6 @@ func (u *Unbound) Init() error { func (u *Unbound) Check() error { mx, err := u.collect() if err != nil { - u.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/unbound/unbound_test.go b/src/go/plugin/go.d/modules/unbound/unbound_test.go index f9ed73afe..e23147cd3 100644 --- a/src/go/plugin/go.d/modules/unbound/unbound_test.go +++ b/src/go/plugin/go.d/modules/unbound/unbound_test.go @@ -269,11 +269,13 @@ func (m mockUnboundClient) Command(_ string, process socket.Processor) error { return nil } -func testCharts(t *testing.T, unbound *Unbound, collected map[string]int64) { +func testCharts(t *testing.T, unbound *Unbound, mx map[string]int64) { t.Helper() ensureChartsCreatedForEveryThread(t, unbound) ensureExtendedChartsCreated(t, unbound) - ensureCollectedHasAllChartsDimsVarsIDs(t, unbound, collected) + module.TestMetricsHasAllChartsDimsSkip(t, unbound.Charts(), mx, func(_ *module.Chart, dim *module.Dim) bool { + return dim.ID == "mem.mod.ipsecmod" + }) } func ensureChartsCreatedForEveryThread(t *testing.T, u *Unbound) { @@ -318,22 +320,6 @@ func ensureExtendedChartsCreated(t *testing.T, u *Unbound) { } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, u *Unbound, collected map[string]int64) { - for _, chart := range *u.Charts() { - for _, dim := range chart.Dims { - if dim.ID == "mem.mod.ipsecmod" { - continue - } - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - var ( expectedCommon = map[string]int64{ "thread0.num.cachehits": 21, diff --git a/src/go/plugin/go.d/modules/upsd/client.go b/src/go/plugin/go.d/modules/upsd/client.go index a708bdcaf..b45566923 100644 --- a/src/go/plugin/go.d/modules/upsd/client.go +++ b/src/go/plugin/go.d/modules/upsd/client.go @@ -29,10 +29,8 @@ type upsUnit struct { func newUpsdConn(conf Config) upsdConn { return &upsdClient{conn: socket.New(socket.Config{ - ConnectTimeout: conf.Timeout.Duration(), - ReadTimeout: conf.Timeout.Duration(), - WriteTimeout: conf.Timeout.Duration(), - Address: conf.Address, + Timeout: conf.Timeout.Duration(), + Address: conf.Address, })} } diff --git a/src/go/plugin/go.d/modules/upsd/config_schema.json b/src/go/plugin/go.d/modules/upsd/config_schema.json index 564c0179c..7d78d4eea 100644 --- a/src/go/plugin/go.d/modules/upsd/config_schema.json +++ b/src/go/plugin/go.d/modules/upsd/config_schema.json @@ -38,7 +38,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} }, diff --git a/src/go/plugin/go.d/modules/upsd/integrations/ups_nut.md b/src/go/plugin/go.d/modules/upsd/integrations/ups_nut.md index 002617bdf..1215504dc 100644 --- a/src/go/plugin/go.d/modules/upsd/integrations/ups_nut.md +++ b/src/go/plugin/go.d/modules/upsd/integrations/ups_nut.md @@ -121,8 +121,8 @@ No action required. The configuration file name for this integration is `go.d/upsd.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/upsd/upsd.go b/src/go/plugin/go.d/modules/upsd/upsd.go index 752697faa..ce589da13 100644 --- a/src/go/plugin/go.d/modules/upsd/upsd.go +++ b/src/go/plugin/go.d/modules/upsd/upsd.go @@ -8,7 +8,7 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -26,7 +26,7 @@ func New() *Upsd { return &Upsd{ Config: Config{ Address: "127.0.0.1:3493", - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, newUpsdConn: newUpsdConn, charts: &module.Charts{}, @@ -35,11 +35,11 @@ func New() *Upsd { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - Username string `yaml:"username,omitempty" json:"username"` - Password string `yaml:"password,omitempty" json:"password"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + Username string `yaml:"username,omitempty" json:"username"` + Password string `yaml:"password,omitempty" json:"password"` } type ( @@ -69,8 +69,7 @@ func (u *Upsd) Configuration() any { func (u *Upsd) Init() error { if u.Address == "" { - u.Error("config: 'address' not set") - return errors.New("address not set") + return errors.New("config: 'address' not set") } return nil @@ -79,7 +78,6 @@ func (u *Upsd) Init() error { func (u *Upsd) Check() error { mx, err := u.collect() if err != nil { - u.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/uwsgi/client.go b/src/go/plugin/go.d/modules/uwsgi/client.go index 487aeb930..12138672f 100644 --- a/src/go/plugin/go.d/modules/uwsgi/client.go +++ b/src/go/plugin/go.d/modules/uwsgi/client.go @@ -33,10 +33,8 @@ func (c *uwsgiClient) queryStats() ([]byte, error) { const readLineLimit = 1000 * 10 cfg := socket.Config{ - Address: c.address, - ConnectTimeout: c.timeout, - ReadTimeout: c.timeout, - WriteTimeout: c.timeout, + Address: c.address, + Timeout: c.timeout, } clientErr := socket.ConnectAndRead(cfg, func(bs []byte) bool { diff --git a/src/go/plugin/go.d/modules/uwsgi/config_schema.json b/src/go/plugin/go.d/modules/uwsgi/config_schema.json index 14c750432..ff771527f 100644 --- a/src/go/plugin/go.d/modules/uwsgi/config_schema.json +++ b/src/go/plugin/go.d/modules/uwsgi/config_schema.json @@ -28,7 +28,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/uwsgi/integrations/uwsgi.md b/src/go/plugin/go.d/modules/uwsgi/integrations/uwsgi.md index 6fe19263e..1b1cf9684 100644 --- a/src/go/plugin/go.d/modules/uwsgi/integrations/uwsgi.md +++ b/src/go/plugin/go.d/modules/uwsgi/integrations/uwsgi.md @@ -123,8 +123,8 @@ See [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) The configuration file name for this integration is `go.d/uwsgi.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/uwsgi/uwsgi.go b/src/go/plugin/go.d/modules/uwsgi/uwsgi.go index c70dd656c..fc081f83d 100644 --- a/src/go/plugin/go.d/modules/uwsgi/uwsgi.go +++ b/src/go/plugin/go.d/modules/uwsgi/uwsgi.go @@ -8,7 +8,7 @@ import ( "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -26,7 +26,7 @@ func New() *Uwsgi { return &Uwsgi{ Config: Config{ Address: "127.0.0.1:1717", - Timeout: web.Duration(time.Second * 1), + Timeout: confopt.Duration(time.Second * 1), }, charts: charts.Copy(), seenWorkers: make(map[int]bool), @@ -34,9 +34,9 @@ func New() *Uwsgi { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout" json:"timeout"` } type Uwsgi struct { @@ -56,8 +56,7 @@ func (u *Uwsgi) Configuration() any { func (u *Uwsgi) Init() error { if u.Address == "" { - u.Error("config: 'address' not set") - return errors.New("address not set") + return errors.New("config: 'address' not set") } u.conn = newUwsgiConn(u.Config) @@ -68,7 +67,6 @@ func (u *Uwsgi) Init() error { func (u *Uwsgi) Check() error { mx, err := u.collect() if err != nil { - u.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/varnish/README.md b/src/go/plugin/go.d/modules/varnish/README.md new file mode 120000 index 000000000..194be2335 --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/README.md @@ -0,0 +1 @@ +integrations/varnish.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/varnish/charts.go b/src/go/plugin/go.d/modules/varnish/charts.go new file mode 100644 index 000000000..49c2f70b9 --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/charts.go @@ -0,0 +1,387 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package varnish + +import ( + "fmt" + "strings" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +const ( + prioClientSessionConnections = module.Priority + iota + prioClientRequests + + prioBackendsConnections + prioBackendsRequests + prioBackendDataTransfer + + prioCacheHitRatioTotal + prioCacheHitRatioDelta + + prioCacheExpiredObjects + prioCacheLRUActivity + + prioThreadsTotal + prioThreadManagementActivity + prioThreadQueueLen + + prioEsiStatistics + + prioStorageSpaceUsage + prioStorageAllocatedObjects + + prioMgmtProcessUptime + prioChildProcessUptime +) + +var varnishCharts = module.Charts{ + clientSessionConnectionsChart.Copy(), + clientRequestsChart.Copy(), + + backendConnectionsChart.Copy(), + backendRequestsChart.Copy(), + + cacheHitRatioTotalChart.Copy(), + cacheHitRatioDeltaChart.Copy(), + cachedObjectsExpiredChart.Copy(), + cacheLRUActivityChart.Copy(), + + threadsTotalChart.Copy(), + threadManagementActivityChart.Copy(), + threadQueueLenChart.Copy(), + + esiParsingIssuesChart.Copy(), + + mgmtProcessUptimeChart.Copy(), + childProcessUptimeChart.Copy(), +} + +var backendChartsTmpl = module.Charts{ + backendDataTransferChartTmpl.Copy(), +} + +var storageChartsTmpl = module.Charts{ + storageSpaceUsageChartTmpl.Copy(), + storageAllocatedObjectsChartTmpl.Copy(), +} + +// Client metrics +var ( + clientSessionConnectionsChart = module.Chart{ + ID: "client_session_connections", + Title: "Client Session Connections", + Fam: "client connections", + Units: "connections/s", + Ctx: "varnish.client_session_connections", + Type: module.Line, + Priority: prioClientSessionConnections, + Dims: module.Dims{ + {ID: "MAIN.sess_conn", Name: "accepted", Algo: module.Incremental}, + {ID: "MAIN.sess_dropped", Name: "dropped", Algo: module.Incremental}, + }, + } + + clientRequestsChart = module.Chart{ + ID: "client_requests", + Title: "Client Requests", + Fam: "client requests", + Units: "requests/s", + Ctx: "varnish.client_requests", + Type: module.Line, + Priority: prioClientRequests, + Dims: module.Dims{ + {ID: "MAIN.client_req", Name: "received", Algo: module.Incremental}, + }, + } +) + +// Cache activity +var ( + cacheHitRatioTotalChart = module.Chart{ + ID: "cache_hit_ratio_total", + Title: "Cache Hit Ratio Total", + Fam: "cache activity", + Units: "percent", + Ctx: "varnish.cache_hit_ratio_total", + Type: module.Stacked, + Priority: prioCacheHitRatioTotal, + Dims: module.Dims{ + {ID: "MAIN.cache_hit", Name: "hit", Algo: module.PercentOfAbsolute}, + {ID: "MAIN.cache_miss", Name: "miss", Algo: module.PercentOfAbsolute}, + {ID: "MAIN.cache_hitpass", Name: "hitpass", Algo: module.PercentOfAbsolute}, + {ID: "MAIN.cache_hitmiss", Name: "hitmiss", Algo: module.PercentOfAbsolute}, + }, + } + cacheHitRatioDeltaChart = module.Chart{ + ID: "cache_hit_ratio_delta", + Title: "Cache Hit Ratio Current Poll", + Fam: "cache activity", + Units: "percent", + Ctx: "varnish.cache_hit_ratio_delta", + Type: module.Stacked, + Priority: prioCacheHitRatioDelta, + Dims: module.Dims{ + {ID: "MAIN.cache_hit", Name: "hit", Algo: module.PercentOfIncremental}, + {ID: "MAIN.cache_miss", Name: "miss", Algo: module.PercentOfIncremental}, + {ID: "MAIN.cache_hitpass", Name: "hitpass", Algo: module.PercentOfIncremental}, + {ID: "MAIN.cache_hitmiss", Name: "hitmiss", Algo: module.PercentOfIncremental}, + }, + } + cachedObjectsExpiredChart = module.Chart{ + ID: "cache_expired_objects", + Title: "Cache Expired Objects", + Fam: "cache activity", + Units: "objects/s", + Ctx: "varnish.cache_expired_objects", + Type: module.Line, + Priority: prioCacheExpiredObjects, + Dims: module.Dims{ + {ID: "MAIN.n_expired", Name: "expired", Algo: module.Incremental}, + }, + } + cacheLRUActivityChart = module.Chart{ + ID: "cache_lru_activity", + Title: "Cache LRU Activity", + Fam: "cache activity", + Units: "objects/s", + Ctx: "varnish.cache_lru_activity", + Type: module.Line, + Priority: prioCacheLRUActivity, + Dims: module.Dims{ + {ID: "MAIN.n_lru_nuked", Name: "nuked", Algo: module.Incremental}, + {ID: "MAIN.n_lru_moved", Name: "moved", Algo: module.Incremental}, + }, + } +) + +// Threads +var ( + threadsTotalChart = module.Chart{ + ID: "threads", + Title: "Threads In All Pools", + Fam: "threads", + Units: "threads", + Ctx: "varnish.threads", + Type: module.Line, + Priority: prioThreadsTotal, + Dims: module.Dims{ + {ID: "MAIN.threads", Name: "threads"}, + }, + } + threadManagementActivityChart = module.Chart{ + ID: "thread_management_activity", + Title: "Thread Management Activity", + Fam: "threads", + Units: "threads/s", + Ctx: "varnish.thread_management_activity", + Type: module.Line, + Priority: prioThreadManagementActivity, + Dims: module.Dims{ + {ID: "MAIN.threads_created", Name: "created", Algo: module.Incremental}, + {ID: "MAIN.threads_failed", Name: "failed", Algo: module.Incremental}, + {ID: "MAIN.threads_destroyed", Name: "destroyed", Algo: module.Incremental}, + {ID: "MAIN.threads_limited", Name: "limited", Algo: module.Incremental}, + }, + } + threadQueueLenChart = module.Chart{ + ID: "thread_queue_len", + Title: "Session Queue Length", + Fam: "threads", + Units: "requests", + Ctx: "varnish.thread_queue_len", + Type: module.Line, + Priority: prioThreadQueueLen, + Dims: module.Dims{ + {ID: "MAIN.thread_queue_len", Name: "queue_len"}, + }, + } +) + +var ( + backendConnectionsChart = module.Chart{ + ID: "backends_connections", + Title: "Backend Connections", + Fam: "backend connections", + Units: "connections/s", + Ctx: "varnish.backends_connections", + Type: module.Line, + Priority: prioBackendsConnections, + Dims: module.Dims{ + {ID: "MAIN.backend_conn", Name: "successful", Algo: module.Incremental}, + {ID: "MAIN.backend_unhealthy", Name: "unhealthy", Algo: module.Incremental}, + {ID: "MAIN.backend_busy", Name: "busy", Algo: module.Incremental}, + {ID: "MAIN.backend_fail", Name: "failed", Algo: module.Incremental}, + {ID: "MAIN.backend_reuse", Name: "reused", Algo: module.Incremental}, + {ID: "MAIN.backend_recycle", Name: "recycled", Algo: module.Incremental}, + {ID: "MAIN.backend_retry", Name: "retry", Algo: module.Incremental}, + }, + } + backendRequestsChart = module.Chart{ + ID: "backends_requests", + Title: "Backend Requests", + Fam: "backend requests", + Units: "requests/s", + Ctx: "varnish.backends_requests", + Type: module.Line, + Priority: prioBackendsRequests, + Dims: module.Dims{ + {ID: "MAIN.backend_req", Name: "sent", Algo: module.Incremental}, + }, + } +) + +// ESI +var ( + esiParsingIssuesChart = module.Chart{ + ID: "esi_parsing_issues", + Title: "ESI Parsing Issues", + Fam: "esi", + Units: "issues/s", + Ctx: "varnish.esi_parsing_issues", + Type: module.Line, + Priority: prioEsiStatistics, + Dims: module.Dims{ + {ID: "MAIN.esi_errors", Name: "errors", Algo: module.Incremental}, + {ID: "MAIN.esi_warnings", Name: "warnings", Algo: module.Incremental}, + }, + } +) + +// Uptime +var ( + mgmtProcessUptimeChart = module.Chart{ + ID: "mgmt_process_uptime", + Title: "Management Process Uptime", + Fam: "uptime", + Units: "seconds", + Ctx: "varnish.mgmt_process_uptime", + Type: module.Line, + Priority: prioMgmtProcessUptime, + Dims: module.Dims{ + {ID: "MGT.uptime", Name: "uptime"}, + }, + } + childProcessUptimeChart = module.Chart{ + ID: "child_process_uptime", + Title: "Child Process Uptime", + Fam: "uptime", + Units: "seconds", + Ctx: "varnish.child_process_uptime", + Type: module.Line, + Priority: prioChildProcessUptime, + Dims: module.Dims{ + {ID: "MAIN.uptime", Name: "uptime"}, + }, + } +) + +var ( + backendDataTransferChartTmpl = module.Chart{ + ID: "backend_%s_data_transfer", + Title: "Backend Data Transfer", + Fam: "backend traffic", + Units: "bytes/s", + Ctx: "varnish.backend_data_transfer", + Type: module.Area, + Priority: prioBackendDataTransfer, + Dims: module.Dims{ + {ID: "VBE.%s.bereq_hdrbytes", Name: "req_header", Algo: module.Incremental}, + {ID: "VBE.%s.bereq_bodybytes", Name: "req_body", Algo: module.Incremental}, + {ID: "VBE.%s.beresp_hdrbytes", Name: "resp_header", Algo: module.Incremental, Mul: -1}, + {ID: "VBE.%s.beresp_bodybytes", Name: "resp_body", Algo: module.Incremental, Mul: -1}, + }, + } +) + +var ( + storageSpaceUsageChartTmpl = module.Chart{ + ID: "storage_%s_usage", + Title: "Storage Space Usage", + Fam: "storage usage", + Units: "bytes", + Ctx: "varnish.storage_space_usage", + Type: module.Stacked, + Priority: prioStorageSpaceUsage, + Dims: module.Dims{ + {ID: "%s.g_space", Name: "free"}, + {ID: "%s.g_bytes", Name: "used"}, + }, + } + + storageAllocatedObjectsChartTmpl = module.Chart{ + ID: "storage_%s_allocated_objects", + Title: "Storage Allocated Objects", + Fam: "storage usage", + Units: "objects", + Ctx: "varnish.storage_allocated_objects", + Type: module.Line, + Priority: prioStorageAllocatedObjects, + Dims: module.Dims{ + {ID: "%s.g_alloc", Name: "allocated"}, + }, + } +) + +func (v *Varnish) addBackendCharts(fullName string) { + charts := backendChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = cleanChartID(fmt.Sprintf(chart.ID, fullName)) + chart.Labels = []module.Label{ + {Key: "backend", Value: fullName}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, fullName) + } + } + + if err := v.Charts().Add(*charts...); err != nil { + v.Warning(err) + } + +} + +func (v *Varnish) addStorageCharts(name string) { + charts := storageChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = cleanChartID(fmt.Sprintf(chart.ID, name)) + chart.Labels = []module.Label{ + {Key: "storage", Value: name}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + + if err := v.Charts().Add(*charts...); err != nil { + v.Warning(err) + } + +} + +func (v *Varnish) removeBackendCharts(name string) { + px := fmt.Sprintf("backend_%s_", name) + v.removeCharts(cleanChartID(px)) +} + +func (v *Varnish) removeStorageCharts(name string) { + px := fmt.Sprintf("storage_%s_", name) + v.removeCharts(cleanChartID(px)) +} + +func (v *Varnish) removeCharts(prefix string) { + for _, chart := range *v.Charts() { + if strings.HasPrefix(chart.ID, prefix) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} + +func cleanChartID(id string) string { + id = strings.ReplaceAll(id, ".", "_") + return strings.ToLower(id) +} diff --git a/src/go/plugin/go.d/modules/varnish/collect.go b/src/go/plugin/go.d/modules/varnish/collect.go new file mode 100644 index 000000000..a50f1647e --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/collect.go @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package varnish + +import ( + "bufio" + "bytes" + "strconv" + "strings" +) + +func (v *Varnish) collect() (map[string]int64, error) { + bs, err := v.exec.statistics() + if err != nil { + return nil, err + } + + mx := make(map[string]int64) + + if err := v.collectStatistics(mx, bs); err != nil { + return nil, err + } + + return mx, nil +} + +func (v *Varnish) collectStatistics(mx map[string]int64, bs []byte) error { + seenBackends, seenStorages := make(map[string]bool), make(map[string]bool) + + sc := bufio.NewScanner(bytes.NewReader(bs)) + + for sc.Scan() { + line := strings.TrimSpace(sc.Text()) + if line == "" { + continue + } + + parts := strings.Fields(line) + if len(parts) < 4 { + v.Debugf("invalid line format: '%s'. Expected at least 4 fields, skipping line.", line) + continue + } + + fullMetric := parts[0] + valueStr := parts[1] + + category, metric, ok := strings.Cut(fullMetric, ".") + if !ok { + v.Debugf("invalid metric format: '%s'. Expected 'category.metric', skipping metric.", fullMetric) + continue + } + value, err := strconv.ParseInt(valueStr, 10, 64) + if err != nil { + v.Debugf("failed to parse metric '%s' value '%s': %v, skipping metric", fullMetric, valueStr, err) + continue + } + + switch category { + case "MGT": + if mgtMetrics[metric] { + mx[fullMetric] = value + } + case "MAIN": + if mainMetrics[metric] { + mx[fullMetric] = value + } + case "SMA", "SMF", "MSE": + storage, sMetric, ok := strings.Cut(metric, ".") + if !ok { + v.Debugf("invalid metric format: '%s'. Expected 'type.storage.metric', skipping metric.", fullMetric) + continue + } + + fullStorage := category + "." + storage + + if storageMetrics[sMetric] { + seenStorages[fullStorage] = true + mx[fullMetric] = value + } + case "VBE": + // Varnish 4.0.x is not supported (values are 'VBE.default(127.0.0.1,,81).happy') + parts := strings.Split(metric, ".") + if len(parts) != 3 { + v.Debugf("invalid metric format: '%s'. Expected 'VBE.vcl.backend.metric', skipping metric.", fullMetric) + continue + } + + vcl, backend, bMetric := parts[0], parts[1], parts[2] + + if backendMetrics[bMetric] { + seenBackends[vcl+"."+backend] = true + mx[fullMetric] = value + } + } + } + + if len(mx) == 0 { + return nil + } + + for name := range seenStorages { + if !v.seenStorages[name] { + v.seenStorages[name] = true + v.addStorageCharts(name) + } + } + for name := range v.seenStorages { + if !seenStorages[name] { + delete(v.seenStorages, name) + v.removeBackendCharts(name) + } + } + + for fullName := range seenBackends { + if !v.seenBackends[fullName] { + v.seenBackends[fullName] = true + v.addBackendCharts(fullName) + } + } + for fullName := range v.seenBackends { + if !seenBackends[fullName] { + delete(v.seenBackends, fullName) + v.removeBackendCharts(fullName) + } + } + + return nil +} + +var mgtMetrics = map[string]bool{ + "uptime": true, + "child_start": true, + "child_exit": true, + "child_stop": true, + "child_died": true, + "child_dump": true, + "child_panic": true, +} + +var mainMetrics = map[string]bool{ + "sess_conn": true, + "sess_dropped": true, + "client_req": true, + "cache_hit": true, + "cache_hitpass": true, + "cache_miss": true, + "cache_hitmiss": true, + "n_expired": true, + "n_lru_nuked": true, + "n_lru_moved": true, + "n_lru_limited": true, + "threads": true, + "threads_limited": true, + "threads_created": true, + "threads_destroyed": true, + "threads_failed": true, + "thread_queue_len": true, + "backend_conn": true, + "backend_unhealthy": true, + "backend_busy": true, + "backend_fail": true, + "backend_reuse": true, + "backend_recycle": true, + "backend_retry": true, + "backend_req": true, + "esi_errors": true, + "esi_warnings": true, + "uptime": true, +} + +var storageMetrics = map[string]bool{ + "g_space": true, + "g_bytes": true, + "g_alloc": true, +} + +var backendMetrics = map[string]bool{ + "bereq_hdrbytes": true, + "bereq_bodybytes": true, + "beresp_hdrbytes": true, + "beresp_bodybytes": true, +} diff --git a/src/go/plugin/go.d/modules/varnish/config_schema.json b/src/go/plugin/go.d/modules/varnish/config_schema.json new file mode 100644 index 000000000..e549c0199 --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/config_schema.json @@ -0,0 +1,51 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Varnish collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 10 + }, + "timeout": { + "title": "Timeout", + "description": "Timeout for executing the binary, specified in seconds.", + "type": "number", + "minimum": 0.5, + "default": 2 + }, + "instance_name": { + "title": "Instance name", + "description": "Specifies the name of the Varnish instance to collect metrics from.", + "type": "string" + }, + "docker_container": { + "title": "Docker container name", + "description": "Specifies the name of the Docker container where the Varnish instance is running. If set, the `varnishstat` command will be executed within this container.", + "type": "string" + } + }, + "required": [], + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + }, + "binary_path": { + "ui:help": "If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable." + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + }, + "instance_name": { + "ui:help": "This corresponds to the `-n` argument used with the [varnishstat](https://varnish-cache.org/docs/trunk/reference/varnishstat.html) command. If not provided, the hostname will be used." + } + } +} diff --git a/src/go/plugin/go.d/modules/varnish/exec.go b/src/go/plugin/go.d/modules/varnish/exec.go new file mode 100644 index 000000000..683d26156 --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/exec.go @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package varnish + +import ( + "context" + "fmt" + "os/exec" + "strconv" + "time" + + "github.com/netdata/netdata/go/plugins/logger" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/dockerhost" +) + +type varnishstatBinary interface { + statistics() ([]byte, error) +} + +func newVarnishstatExecBinary(binPath string, cfg Config, log *logger.Logger) varnishstatBinary { + return &varnishstatExec{ + Logger: log, + binPath: binPath, + timeout: cfg.Timeout.Duration(), + instanceName: cfg.InstanceName, + } +} + +type varnishstatExec struct { + *logger.Logger + + binPath string + timeout time.Duration + instanceName string +} + +func (e *varnishstatExec) statistics() ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, e.binPath, "varnishstat-stats", "--instanceName", e.instanceName) + e.Debugf("executing '%s'", cmd) + + bs, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("error on '%s': %v", cmd, err) + } + + return bs, nil +} + +func newVarnishstatDockerExecBinary(cfg Config, log *logger.Logger) varnishstatBinary { + return &varnishstatDockerExec{ + Logger: log, + timeout: cfg.Timeout.Duration(), + instanceName: cfg.InstanceName, + container: cfg.DockerContainer, + } +} + +type varnishstatDockerExec struct { + *logger.Logger + + timeout time.Duration + instanceName string + container string +} + +func (e *varnishstatDockerExec) statistics() ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) + defer cancel() + + timeS := strconv.Itoa(max(int(e.timeout.Seconds()), 1)) + + args := []string{"-1", "-t", timeS} + if e.instanceName != "" { + args = append(args, "-n", e.instanceName) + } + + return dockerhost.Exec(ctx, e.container, "varnishstat", args...) +} diff --git a/src/go/plugin/go.d/modules/varnish/init.go b/src/go/plugin/go.d/modules/varnish/init.go new file mode 100644 index 000000000..e3f0dd5bf --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/init.go @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package varnish + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/netdata/netdata/go/plugins/pkg/executable" +) + +func (v *Varnish) initVarnishstatBinary() (varnishstatBinary, error) { + if v.Config.DockerContainer != "" { + return newVarnishstatDockerExecBinary(v.Config, v.Logger), nil + } + + ndsudoPath := filepath.Join(executable.Directory, "ndsudo") + + if _, err := os.Stat(ndsudoPath); err != nil { + return nil, fmt.Errorf("ndsudo executable not found: %v", err) + + } + + varnishstat := newVarnishstatExecBinary(ndsudoPath, v.Config, v.Logger) + + return varnishstat, nil +} diff --git a/src/go/plugin/go.d/modules/varnish/integrations/varnish.md b/src/go/plugin/go.d/modules/varnish/integrations/varnish.md new file mode 100644 index 000000000..ba8288f99 --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/integrations/varnish.md @@ -0,0 +1,231 @@ + + +# Varnish + + + + + +Plugin: go.d.plugin +Module: varnish + + + +## Overview + +This collector monitors Varnish instances, supporting both the open-source Varnish-Cache and the commercial Varnish-Plus. + +It tracks key performance metrics, along with detailed statistics for Backends (VBE) and Storages (SMF, SMA, MSE). + +It relies on the [`varnishstat`](https://varnish-cache.org/docs/trunk/reference/varnishstat.html) CLI tool but avoids directly executing the binary. +Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. +This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +Automatically detects and monitors Varnish instances running on the host or inside Docker containers. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Varnish instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| varnish.client_session_connections | accepted, dropped | connections/s | +| varnish.client_requests | received | requests/s | +| varnish.cache_hit_ratio_total | hit, miss, hitpass, hitmiss | percent | +| varnish.cache_hit_ratio_delta | hit, miss, hitpass, hitmiss | percent | +| varnish.cache_expired_objects | expired | objects/s | +| varnish.cache_lru_activity | nuked, moved | objects/s | +| varnish.threads | threads | threads | +| varnish.thread_management_activity | created, failed, destroyed, limited | threads/s | +| varnish.thread_queue_len | queue_length | threads | +| varnish.backends_requests | sent | requests/s | +| varnish.esi_parsing_issues | errors, warnings | issues/s | +| varnish.mgmt_process_uptime | uptime | seconds | +| varnish.child_process_uptime | uptime | seconds | + +### Per Backend + +These metrics refer to the Backend (VBE). + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| varnish.backend_data_transfer | req_header, req_body, resp_header, resp_body | bytes/s | + +### Per Storage + +These metrics refer to the Storage (SMA, SMF, MSE). + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| varnish.storage_space_usage | free, used | bytes | +| varnish.storage_allocated_objects | allocated | objects | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/varnish.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/varnish.conf +``` +#### Options + +The following options can be defined globally: update_every. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| timeout | Timeout for executing the binary, specified in seconds. | 2 | no | +| instance_name | Specifies the name of the Varnish instance to collect metrics from. This corresponds to the `-n` argument used with the [varnishstat](https://varnish-cache.org/docs/trunk/reference/varnishstat.html) command. | | no | +| docker_container | Specifies the name of the Docker container where the Varnish instance is running. If set, the `varnishstat` command will be executed within this container. | | no | + +
    + +#### Examples + +##### Custom update_every + +Allows you to override the default data collection interval. + +```yaml +jobs: + - name: varnish + update_every: 5 + +``` + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `varnish` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m varnish + ``` + +### Getting Logs + +If you're encountering problems with the `varnish` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep varnish +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep varnish /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep varnish +``` + + diff --git a/src/go/plugin/go.d/modules/varnish/metadata.yaml b/src/go/plugin/go.d/modules/varnish/metadata.yaml new file mode 100644 index 000000000..874bf360f --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/metadata.yaml @@ -0,0 +1,224 @@ +plugin_name: go.d.plugin +modules: + - meta: + plugin_name: go.d.plugin + module_name: varnish + monitored_instance: + name: Varnish + link: https://varnish-cache.org/ + categories: + - data-collection.web-servers-and-web-proxies + icon_filename: "varnish.svg" + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - varnish + - varnishstat + - varnishd + - cache + - web server + - web cache + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Varnish instances, supporting both the open-source Varnish-Cache and the commercial Varnish-Plus. + + It tracks key performance metrics, along with detailed statistics for Backends (VBE) and Storages (SMF, SMA, MSE). + + It relies on the [`varnishstat`](https://varnish-cache.org/docs/trunk/reference/varnishstat.html) CLI tool but avoids directly executing the binary. + Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. + This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + Automatically detects and monitors Varnish instances running on the host or inside Docker containers. + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/varnish.conf + options: + description: | + The following options can be defined globally: update_every. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 10 + required: false + - name: timeout + description: Timeout for executing the binary, specified in seconds. + default_value: 2 + required: false + - name: instance_name + description: "Specifies the name of the Varnish instance to collect metrics from. This corresponds to the `-n` argument used with the [varnishstat](https://varnish-cache.org/docs/trunk/reference/varnishstat.html) command." + default_value: "" + required: false + - name: docker_container + description: "Specifies the name of the Docker container where the Varnish instance is running. If set, the `varnishstat` command will be executed within this container." + default_value: "" + required: false + examples: + folding: + title: "" + enabled: false + list: + - name: Custom update_every + description: Allows you to override the default data collection interval. + config: | + jobs: + - name: varnish + update_every: 5 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: "These metrics refer to the entire monitored application." + labels: [] + metrics: + - name: varnish.client_session_connections + description: Connections Statistics + unit: "connections/s" + chart_type: line + dimensions: + - name: accepted + - name: dropped + - name: varnish.client_requests + description: Client Requests + unit: "requests/s" + chart_type: line + dimensions: + - name: received + - name: varnish.cache_hit_ratio_total + description: Cache Hit Ratio Total + unit: "percent" + chart_type: stacked + dimensions: + - name: hit + - name: miss + - name: hitpass + - name: hitmiss + - name: varnish.cache_hit_ratio_delta + description: Cache Hit Ratio Current Poll + unit: "percent" + chart_type: stacked + dimensions: + - name: hit + - name: miss + - name: hitpass + - name: hitmiss + - name: varnish.cache_expired_objects + description: Cache Expired Objects + unit: "objects/s" + chart_type: line + dimensions: + - name: expired + - name: varnish.cache_lru_activity + description: Cache LRU Activity + unit: "objects/s" + chart_type: line + dimensions: + - name: nuked + - name: moved + - name: varnish.threads + description: Threads In All Pools + unit: "threads" + chart_type: line + dimensions: + - name: threads + - name: varnish.thread_management_activity + description: Thread Management Activity + unit: "threads/s" + chart_type: line + dimensions: + - name: created + - name: failed + - name: destroyed + - name: limited + - name: varnish.thread_queue_len + description: Session Queue Length + unit: "threads" + chart_type: line + dimensions: + - name: queue_length + - name: varnish.backends_requests + description: Backend Requests + unit: "requests/s" + chart_type: line + dimensions: + - name: sent + - name: varnish.esi_parsing_issues + description: ESI Parsing Issues + unit: "issues/s" + chart_type: line + dimensions: + - name: errors + - name: warnings + - name: varnish.mgmt_process_uptime + description: Management Process Uptime + unit: "seconds" + chart_type: line + dimensions: + - name: uptime + - name: varnish.child_process_uptime + description: Child Process Uptime + unit: "seconds" + chart_type: line + dimensions: + - name: uptime + - name: Backend + description: "These metrics refer to the Backend (VBE)." + labels: [] + metrics: + - name: varnish.backend_data_transfer + description: Backend Data Transfer + unit: "bytes/s" + chart_type: area + dimensions: + - name: req_header + - name: req_body + - name: resp_header + - name: resp_body + - name: Storage + description: "These metrics refer to the Storage (SMA, SMF, MSE)." + labels: [] + metrics: + - name: varnish.storage_space_usage + description: Storage Space Usage + unit: "bytes" + chart_type: stacked + dimensions: + - name: free + - name: used + - name: varnish.storage_allocated_objects + description: Storage Allocated Objects + unit: "objects" + chart_type: line + dimensions: + - name: allocated diff --git a/src/go/plugin/go.d/modules/varnish/testdata/config.json b/src/go/plugin/go.d/modules/varnish/testdata/config.json new file mode 100644 index 000000000..93bb73700 --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/testdata/config.json @@ -0,0 +1,6 @@ +{ + "update_every": 123, + "timeout": 123.123, + "instance_name": "ok", + "docker_container": "ok" +} diff --git a/src/go/plugin/go.d/modules/varnish/testdata/config.yaml b/src/go/plugin/go.d/modules/varnish/testdata/config.yaml new file mode 100644 index 000000000..d7f571707 --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/testdata/config.yaml @@ -0,0 +1,4 @@ +update_every: 123 +timeout: 123.123 +instance_name: "ok" +docker_container: "ok" diff --git a/src/go/plugin/go.d/modules/varnish/testdata/v7.1/varnishstat.txt b/src/go/plugin/go.d/modules/varnish/testdata/v7.1/varnishstat.txt new file mode 100644 index 000000000..d94a6ff17 --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/testdata/v7.1/varnishstat.txt @@ -0,0 +1,370 @@ +MGT.uptime 33833 1.00 Management process uptime +MGT.child_start 1 0.00 Child process started +MGT.child_exit 0 0.00 Child process normal exit +MGT.child_stop 0 0.00 Child process unexpected exit +MGT.child_died 0 0.00 Child process died (signal) +MGT.child_dump 0 0.00 Child process core dumped +MGT.child_panic 0 0.00 Child process panic +MAIN.summs 20 0.00 stat summ operations +MAIN.uptime 33834 1.00 Child process uptime +MAIN.sess_conn 4 0.00 Sessions accepted +MAIN.sess_fail 0 0.00 Session accept failures +MAIN.sess_fail_econnaborted 0 0.00 Session accept failures: connection aborted +MAIN.sess_fail_eintr 0 0.00 Session accept failures: interrupted system call +MAIN.sess_fail_emfile 0 0.00 Session accept failures: too many open files +MAIN.sess_fail_ebadf 0 0.00 Session accept failures: bad file descriptor +MAIN.sess_fail_enomem 0 0.00 Session accept failures: not enough memory +MAIN.sess_fail_other 0 0.00 Session accept failures: other +MAIN.client_req_400 0 0.00 Client requests received, subject to 400 errors +MAIN.client_req_417 0 0.00 Client requests received, subject to 417 errors +MAIN.client_req 4 0.00 Good client requests received +MAIN.esi_req 0 0.00 ESI subrequests +MAIN.cache_hit 0 0.00 Cache hits +MAIN.cache_hit_grace 0 0.00 Cache grace hits +MAIN.cache_hitpass 0 0.00 Cache hits for pass. +MAIN.cache_hitmiss 0 0.00 Cache hits for miss. +MAIN.cache_miss 0 0.00 Cache misses +MAIN.beresp_uncacheable 4 0.00 Uncacheable backend responses +MAIN.beresp_shortlived 0 0.00 Shortlived objects +MAIN.backend_conn 2 0.00 Backend conn. success +MAIN.backend_unhealthy 0 0.00 Backend conn. not attempted +MAIN.backend_busy 0 0.00 Backend conn. too many +MAIN.backend_fail 0 0.00 Backend conn. failures +MAIN.backend_reuse 2 0.00 Backend conn. reuses +MAIN.backend_recycle 4 0.00 Backend conn. recycles +MAIN.backend_retry 0 0.00 Backend conn. retry +MAIN.fetch_head 0 0.00 Fetch no body (HEAD) +MAIN.fetch_length 2 0.00 Fetch with Length +MAIN.fetch_chunked 0 0.00 Fetch chunked +MAIN.fetch_eof 0 0.00 Fetch EOF +MAIN.fetch_bad 0 0.00 Fetch bad T-E +MAIN.fetch_none 0 0.00 Fetch no body +MAIN.fetch_1xx 0 0.00 Fetch no body (1xx) +MAIN.fetch_204 0 0.00 Fetch no body (204) +MAIN.fetch_304 2 0.00 Fetch no body (304) +MAIN.fetch_failed 0 0.00 Fetch failed (all causes) +MAIN.fetch_no_thread 0 0.00 Fetch failed (no thread) +MAIN.pools 2 . Number of thread pools +MAIN.threads 200 . Total number of threads +MAIN.threads_limited 0 0.00 Threads hit max +MAIN.threads_created 200 0.01 Threads created +MAIN.threads_destroyed 0 0.00 Threads destroyed +MAIN.threads_failed 0 0.00 Thread creation failed +MAIN.thread_queue_len 0 . Length of session queue +MAIN.busy_sleep 0 0.00 Number of requests sent to sleep on busy objhdr +MAIN.busy_wakeup 0 0.00 Number of requests woken after sleep on busy objhdr +MAIN.busy_killed 0 0.00 Number of requests killed after sleep on busy objhdr +MAIN.sess_queued 0 0.00 Sessions queued for thread +MAIN.sess_dropped 0 0.00 Sessions dropped for thread +MAIN.req_dropped 0 0.00 Requests dropped +MAIN.n_object 0 . object structs made +MAIN.n_vampireobject 0 . unresurrected objects +MAIN.n_objectcore 0 . objectcore structs made +MAIN.n_objecthead 0 . objecthead structs made +MAIN.n_backend 2 . Number of backends +MAIN.n_expired 0 0.00 Number of expired objects +MAIN.n_lru_nuked 0 0.00 Number of LRU nuked objects +MAIN.n_lru_moved 0 0.00 Number of LRU moved objects +MAIN.n_lru_limited 0 0.00 Reached nuke_limit +MAIN.losthdr 0 0.00 HTTP header overflows +MAIN.s_sess 4 0.00 Total sessions seen +MAIN.n_pipe 0 . Number of ongoing pipe sessions +MAIN.pipe_limited 0 0.00 Pipes hit pipe_sess_max +MAIN.s_pipe 0 0.00 Total pipe sessions seen +MAIN.s_pass 4 0.00 Total pass-ed requests seen +MAIN.s_fetch 4 0.00 Total backend fetches initiated +MAIN.s_bgfetch 0 0.00 Total backend background fetches initiated +MAIN.s_synth 0 0.00 Total synthetic responses made +MAIN.s_req_hdrbytes 5137 0.15 Request header bytes +MAIN.s_req_bodybytes 0 0.00 Request body bytes +MAIN.s_resp_hdrbytes 969 0.03 Response header bytes +MAIN.s_resp_bodybytes 1170 0.03 Response body bytes +MAIN.s_pipe_hdrbytes 0 0.00 Pipe request header bytes +MAIN.s_pipe_in 0 0.00 Piped bytes from client +MAIN.s_pipe_out 0 0.00 Piped bytes to client +MAIN.sess_closed 0 0.00 Session Closed +MAIN.sess_closed_err 0 0.00 Session Closed with error +MAIN.sess_readahead 0 0.00 Session Read Ahead +MAIN.sess_herd 6 0.00 Session herd +MAIN.sc_rem_close 0 0.00 Session OK REM_CLOSE +MAIN.sc_req_close 0 0.00 Session OK REQ_CLOSE +MAIN.sc_req_http10 0 0.00 Session Err REQ_HTTP10 +MAIN.sc_rx_bad 0 0.00 Session Err RX_BAD +MAIN.sc_rx_body 0 0.00 Session Err RX_BODY +MAIN.sc_rx_junk 0 0.00 Session Err RX_JUNK +MAIN.sc_rx_overflow 0 0.00 Session Err RX_OVERFLOW +MAIN.sc_rx_timeout 0 0.00 Session Err RX_TIMEOUT +MAIN.sc_rx_close_idle 4 0.00 Session Err RX_CLOSE_IDLE +MAIN.sc_tx_pipe 0 0.00 Session OK TX_PIPE +MAIN.sc_tx_error 0 0.00 Session Err TX_ERROR +MAIN.sc_tx_eof 0 0.00 Session OK TX_EOF +MAIN.sc_resp_close 0 0.00 Session OK RESP_CLOSE +MAIN.sc_overload 0 0.00 Session Err OVERLOAD +MAIN.sc_pipe_overflow 0 0.00 Session Err PIPE_OVERFLOW +MAIN.sc_range_short 0 0.00 Session Err RANGE_SHORT +MAIN.sc_req_http20 0 0.00 Session Err REQ_HTTP20 +MAIN.sc_vcl_failure 0 0.00 Session Err VCL_FAILURE +MAIN.client_resp_500 0 0.00 Delivery failed due to insufficient workspace. +MAIN.ws_backend_overflow 0 0.00 workspace_backend overflows +MAIN.ws_client_overflow 0 0.00 workspace_client overflows +MAIN.ws_thread_overflow 0 0.00 workspace_thread overflows +MAIN.ws_session_overflow 0 0.00 workspace_session overflows +MAIN.shm_records 22960 0.68 SHM records +MAIN.shm_writes 22612 0.67 SHM writes +MAIN.shm_flushes 0 0.00 SHM flushes due to overflow +MAIN.shm_cont 0 0.00 SHM MTX contention +MAIN.shm_cycles 0 0.00 SHM cycles through buffer +MAIN.backend_req 4 0.00 Backend requests made +MAIN.n_vcl 1 . Number of loaded VCLs in total +MAIN.n_vcl_avail 1 . Number of VCLs available +MAIN.n_vcl_discard 0 . Number of discarded VCLs +MAIN.vcl_fail 0 0.00 VCL failures +MAIN.bans 1 . Count of bans +MAIN.bans_completed 1 . Number of bans marked 'completed' +MAIN.bans_obj 0 . Number of bans using obj.* +MAIN.bans_req 0 . Number of bans using req.* +MAIN.bans_added 1 0.00 Bans added +MAIN.bans_deleted 0 0.00 Bans deleted +MAIN.bans_tested 0 0.00 Bans tested against objects (lookup) +MAIN.bans_obj_killed 0 0.00 Objects killed by bans (lookup) +MAIN.bans_lurker_tested 0 0.00 Bans tested against objects (lurker) +MAIN.bans_tests_tested 0 0.00 Ban tests tested against objects (lookup) +MAIN.bans_lurker_tests_tested 0 0.00 Ban tests tested against objects (lurker) +MAIN.bans_lurker_obj_killed 0 0.00 Objects killed by bans (lurker) +MAIN.bans_lurker_obj_killed_cutoff 0 0.00 Objects killed by bans for cutoff (lurker) +MAIN.bans_dups 0 0.00 Bans superseded by other bans +MAIN.bans_lurker_contention 0 0.00 Lurker gave way for lookup +MAIN.bans_persisted_bytes 16 . Bytes used by the persisted ban lists +MAIN.bans_persisted_fragmentation 0 . Extra bytes in persisted ban lists due to fragmentation +MAIN.n_purges 0 0.00 Number of purge operations executed +MAIN.n_obj_purged 0 0.00 Number of purged objects +MAIN.exp_mailed 0 0.00 Number of objects mailed to expiry thread +MAIN.exp_received 0 0.00 Number of objects received by expiry thread +MAIN.hcb_nolock 0 0.00 HCB Lookups without lock +MAIN.hcb_lock 0 0.00 HCB Lookups with lock +MAIN.hcb_insert 0 0.00 HCB Inserts +MAIN.esi_errors 0 0.00 ESI parse errors (unlock) +MAIN.esi_warnings 0 0.00 ESI parse warnings (unlock) +MAIN.vmods 0 . Loaded VMODs +MAIN.n_gzip 0 0.00 Gzip operations +MAIN.n_gunzip 0 0.00 Gunzip operations +MAIN.n_test_gunzip 0 0.00 Test gunzip operations +LCK.ban.creat 1 0.00 Created locks +LCK.ban.destroy 0 0.00 Destroyed locks +LCK.ban.locks 1374 0.04 Lock Operations +LCK.ban.dbg_busy 0 0.00 Contended lock operations +LCK.ban.dbg_try_fail 0 0.00 Contended trylock operations +LCK.busyobj.creat 4 0.00 Created locks +LCK.busyobj.destroy 4 0.00 Destroyed locks +LCK.busyobj.locks 26 0.00 Lock Operations +LCK.busyobj.dbg_busy 0 0.00 Contended lock operations +LCK.busyobj.dbg_try_fail 0 0.00 Contended trylock operations +LCK.cli.creat 1 0.00 Created locks +LCK.cli.destroy 0 0.00 Destroyed locks +LCK.cli.locks 11302 0.33 Lock Operations +LCK.cli.dbg_busy 0 0.00 Contended lock operations +LCK.cli.dbg_try_fail 0 0.00 Contended trylock operations +LCK.director.creat 2 0.00 Created locks +LCK.director.destroy 0 0.00 Destroyed locks +LCK.director.locks 8 0.00 Lock Operations +LCK.director.dbg_busy 0 0.00 Contended lock operations +LCK.director.dbg_try_fail 0 0.00 Contended trylock operations +LCK.exp.creat 1 0.00 Created locks +LCK.exp.destroy 0 0.00 Destroyed locks +LCK.exp.locks 10771 0.32 Lock Operations +LCK.exp.dbg_busy 0 0.00 Contended lock operations +LCK.exp.dbg_try_fail 0 0.00 Contended trylock operations +LCK.hcb.creat 1 0.00 Created locks +LCK.hcb.destroy 0 0.00 Destroyed locks +LCK.hcb.locks 188 0.01 Lock Operations +LCK.hcb.dbg_busy 0 0.00 Contended lock operations +LCK.hcb.dbg_try_fail 0 0.00 Contended trylock operations +LCK.lru.creat 2 0.00 Created locks +LCK.lru.destroy 0 0.00 Destroyed locks +LCK.lru.locks 0 0.00 Lock Operations +LCK.lru.dbg_busy 0 0.00 Contended lock operations +LCK.lru.dbg_try_fail 0 0.00 Contended trylock operations +LCK.mempool.creat 5 0.00 Created locks +LCK.mempool.destroy 0 0.00 Destroyed locks +LCK.mempool.locks 149990 4.43 Lock Operations +LCK.mempool.dbg_busy 0 0.00 Contended lock operations +LCK.mempool.dbg_try_fail 0 0.00 Contended trylock operations +LCK.objhdr.creat 1 0.00 Created locks +LCK.objhdr.destroy 0 0.00 Destroyed locks +LCK.objhdr.locks 41 0.00 Lock Operations +LCK.objhdr.dbg_busy 0 0.00 Contended lock operations +LCK.objhdr.dbg_try_fail 0 0.00 Contended trylock operations +LCK.perpool.creat 2 0.00 Created locks +LCK.perpool.destroy 0 0.00 Destroyed locks +LCK.perpool.locks 460 0.01 Lock Operations +LCK.perpool.dbg_busy 0 0.00 Contended lock operations +LCK.perpool.dbg_try_fail 0 0.00 Contended trylock operations +LCK.pipestat.creat 1 0.00 Created locks +LCK.pipestat.destroy 0 0.00 Destroyed locks +LCK.pipestat.locks 0 0.00 Lock Operations +LCK.pipestat.dbg_busy 0 0.00 Contended lock operations +LCK.pipestat.dbg_try_fail 0 0.00 Contended trylock operations +LCK.probe.creat 1 0.00 Created locks +LCK.probe.destroy 0 0.00 Destroyed locks +LCK.probe.locks 1 0.00 Lock Operations +LCK.probe.dbg_busy 0 0.00 Contended lock operations +LCK.probe.dbg_try_fail 0 0.00 Contended trylock operations +LCK.sess.creat 4 0.00 Created locks +LCK.sess.destroy 4 0.00 Destroyed locks +LCK.sess.locks 12 0.00 Lock Operations +LCK.sess.dbg_busy 0 0.00 Contended lock operations +LCK.sess.dbg_try_fail 0 0.00 Contended trylock operations +LCK.conn_pool.creat 3 0.00 Created locks +LCK.conn_pool.destroy 0 0.00 Destroyed locks +LCK.conn_pool.locks 16 0.00 Lock Operations +LCK.conn_pool.dbg_busy 0 0.00 Contended lock operations +LCK.conn_pool.dbg_try_fail 0 0.00 Contended trylock operations +LCK.vbe.creat 1 0.00 Created locks +LCK.vbe.destroy 0 0.00 Destroyed locks +LCK.vbe.locks 2 0.00 Lock Operations +LCK.vbe.dbg_busy 0 0.00 Contended lock operations +LCK.vbe.dbg_try_fail 0 0.00 Contended trylock operations +LCK.vcapace.creat 1 0.00 Created locks +LCK.vcapace.destroy 0 0.00 Destroyed locks +LCK.vcapace.locks 0 0.00 Lock Operations +LCK.vcapace.dbg_busy 0 0.00 Contended lock operations +LCK.vcapace.dbg_try_fail 0 0.00 Contended trylock operations +LCK.vcl.creat 1 0.00 Created locks +LCK.vcl.destroy 0 0.00 Destroyed locks +LCK.vcl.locks 25 0.00 Lock Operations +LCK.vcl.dbg_busy 0 0.00 Contended lock operations +LCK.vcl.dbg_try_fail 0 0.00 Contended trylock operations +LCK.vxid.creat 1 0.00 Created locks +LCK.vxid.destroy 0 0.00 Destroyed locks +LCK.vxid.locks 2 0.00 Lock Operations +LCK.vxid.dbg_busy 0 0.00 Contended lock operations +LCK.vxid.dbg_try_fail 0 0.00 Contended trylock operations +LCK.waiter.creat 2 0.00 Created locks +LCK.waiter.destroy 0 0.00 Destroyed locks +LCK.waiter.locks 714 0.02 Lock Operations +LCK.waiter.dbg_busy 0 0.00 Contended lock operations +LCK.waiter.dbg_try_fail 0 0.00 Contended trylock operations +LCK.wq.creat 1 0.00 Created locks +LCK.wq.destroy 0 0.00 Destroyed locks +LCK.wq.locks 34033 1.01 Lock Operations +LCK.wq.dbg_busy 0 0.00 Contended lock operations +LCK.wq.dbg_try_fail 0 0.00 Contended trylock operations +LCK.wstat.creat 1 0.00 Created locks +LCK.wstat.destroy 0 0.00 Destroyed locks +LCK.wstat.locks 11651 0.34 Lock Operations +LCK.wstat.dbg_busy 0 0.00 Contended lock operations +LCK.wstat.dbg_try_fail 0 0.00 Contended trylock operations +MEMPOOL.busyobj.live 0 . In use +MEMPOOL.busyobj.pool 10 . In Pool +MEMPOOL.busyobj.sz_wanted 98304 . Size requested +MEMPOOL.busyobj.sz_actual 98272 . Size allocated +MEMPOOL.busyobj.allocs 4 0.00 Allocations +MEMPOOL.busyobj.frees 4 0.00 Frees +MEMPOOL.busyobj.recycle 4 0.00 Recycled from pool +MEMPOOL.busyobj.timeout 0 0.00 Timed out from pool +MEMPOOL.busyobj.toosmall 0 0.00 Too small to recycle +MEMPOOL.busyobj.surplus 0 0.00 Too many for pool +MEMPOOL.busyobj.randry 0 0.00 Pool ran dry +MEMPOOL.req0.live 0 . In use +MEMPOOL.req0.pool 10 . In Pool +MEMPOOL.req0.sz_wanted 98304 . Size requested +MEMPOOL.req0.sz_actual 98272 . Size allocated +MEMPOOL.req0.allocs 4 0.00 Allocations +MEMPOOL.req0.frees 4 0.00 Frees +MEMPOOL.req0.recycle 4 0.00 Recycled from pool +MEMPOOL.req0.timeout 0 0.00 Timed out from pool +MEMPOOL.req0.toosmall 0 0.00 Too small to recycle +MEMPOOL.req0.surplus 0 0.00 Too many for pool +MEMPOOL.req0.randry 0 0.00 Pool ran dry +MEMPOOL.sess0.live 0 . In use +MEMPOOL.sess0.pool 10 . In Pool +MEMPOOL.sess0.sz_wanted 768 . Size requested +MEMPOOL.sess0.sz_actual 736 . Size allocated +MEMPOOL.sess0.allocs 2 0.00 Allocations +MEMPOOL.sess0.frees 2 0.00 Frees +MEMPOOL.sess0.recycle 2 0.00 Recycled from pool +MEMPOOL.sess0.timeout 2 0.00 Timed out from pool +MEMPOOL.sess0.toosmall 0 0.00 Too small to recycle +MEMPOOL.sess0.surplus 0 0.00 Too many for pool +MEMPOOL.sess0.randry 0 0.00 Pool ran dry +LCK.sma.creat 2 0.00 Created locks +LCK.sma.destroy 0 0.00 Destroyed locks +LCK.sma.locks 12 0.00 Lock Operations +LCK.sma.dbg_busy 0 0.00 Contended lock operations +LCK.sma.dbg_try_fail 0 0.00 Contended trylock operations +SMA.s0.c_req 0 0.00 Allocator requests +SMA.s0.c_fail 0 0.00 Allocator failures +SMA.s0.c_bytes 0 0.00 Bytes allocated +SMA.s0.c_freed 0 0.00 Bytes freed +SMA.s0.g_alloc 0 . Allocations outstanding +SMA.s0.g_bytes 0 . Bytes outstanding +SMA.s0.g_space 268435456 . Bytes available +SMA.Transient.c_req 6 0.00 Allocator requests +SMA.Transient.c_fail 0 0.00 Allocator failures +SMA.Transient.c_bytes 2322 0.07 Bytes allocated +SMA.Transient.c_freed 2322 0.07 Bytes freed +SMA.Transient.g_alloc 0 . Allocations outstanding +SMA.Transient.g_bytes 0 . Bytes outstanding +SMA.Transient.g_space 0 . Bytes available +MEMPOOL.req1.live 0 . In use +MEMPOOL.req1.pool 10 . In Pool +MEMPOOL.req1.sz_wanted 98304 . Size requested +MEMPOOL.req1.sz_actual 98272 . Size allocated +MEMPOOL.req1.allocs 2 0.00 Allocations +MEMPOOL.req1.frees 2 0.00 Frees +MEMPOOL.req1.recycle 2 0.00 Recycled from pool +MEMPOOL.req1.timeout 0 0.00 Timed out from pool +MEMPOOL.req1.toosmall 0 0.00 Too small to recycle +MEMPOOL.req1.surplus 0 0.00 Too many for pool +MEMPOOL.req1.randry 0 0.00 Pool ran dry +VBE.boot.default.happy 0 . Happy health probes +VBE.boot.default.bereq_hdrbytes 5214 0.15 Request header bytes +VBE.boot.default.bereq_bodybytes 0 0.00 Request body bytes +VBE.boot.default.beresp_hdrbytes 753 0.02 Response header bytes +VBE.boot.default.beresp_bodybytes 1170 0.03 Response body bytes +VBE.boot.default.pipe_hdrbytes 0 0.00 Pipe request header bytes +VBE.boot.default.pipe_out 0 0.00 Piped bytes to backend +VBE.boot.default.pipe_in 0 0.00 Piped bytes from backend +VBE.boot.default.conn 0 . Concurrent connections used +VBE.boot.default.req 4 0.00 Backend requests sent +VBE.boot.default.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy +VBE.boot.default.busy 0 0.00 Fetches not attempted due to backend being busy +VBE.boot.default.fail 0 0.00 Connections failed +VBE.boot.default.fail_eacces 0 0.00 Connections failed with EACCES or EPERM +VBE.boot.default.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL +VBE.boot.default.fail_econnrefused 0 0.00 Connections failed with ECONNREFUSED +VBE.boot.default.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH +VBE.boot.default.fail_etimedout 0 0.00 Connections failed ETIMEDOUT +VBE.boot.default.fail_other 0 0.00 Connections failed for other reason +VBE.boot.default.helddown 0 0.00 Connection opens not attempted +VBE.boot.nginx2.happy 0 . Happy health probes +VBE.boot.nginx2.bereq_hdrbytes 0 0.00 Request header bytes +VBE.boot.nginx2.bereq_bodybytes 0 0.00 Request body bytes +VBE.boot.nginx2.beresp_hdrbytes 0 0.00 Response header bytes +VBE.boot.nginx2.beresp_bodybytes 0 0.00 Response body bytes +VBE.boot.nginx2.pipe_hdrbytes 0 0.00 Pipe request header bytes +VBE.boot.nginx2.pipe_out 0 0.00 Piped bytes to backend +VBE.boot.nginx2.pipe_in 0 0.00 Piped bytes from backend +VBE.boot.nginx2.conn 0 . Concurrent connections used +VBE.boot.nginx2.req 0 0.00 Backend requests sent +VBE.boot.nginx2.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy +VBE.boot.nginx2.busy 0 0.00 Fetches not attempted due to backend being busy +VBE.boot.nginx2.fail 0 0.00 Connections failed +VBE.boot.nginx2.fail_eacces 0 0.00 Connections failed with EACCES or EPERM +VBE.boot.nginx2.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL +VBE.boot.nginx2.fail_econnrefused 0 0.00 Connections failed with ECONNREFUSED +VBE.boot.nginx2.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH +VBE.boot.nginx2.fail_etimedout 0 0.00 Connections failed ETIMEDOUT +VBE.boot.nginx2.fail_other 0 0.00 Connections failed for other reason +VBE.boot.nginx2.helddown 0 0.00 Connection opens not attempted +MEMPOOL.sess1.live 0 . In use +MEMPOOL.sess1.pool 10 . In Pool +MEMPOOL.sess1.sz_wanted 768 . Size requested +MEMPOOL.sess1.sz_actual 736 . Size allocated +MEMPOOL.sess1.allocs 2 0.00 Allocations +MEMPOOL.sess1.frees 2 0.00 Frees +MEMPOOL.sess1.recycle 2 0.00 Recycled from pool +MEMPOOL.sess1.timeout 2 0.00 Timed out from pool +MEMPOOL.sess1.toosmall 0 0.00 Too small to recycle +MEMPOOL.sess1.surplus 0 0.00 Too many for pool +MEMPOOL.sess1.randry 0 0.00 Pool ran dry diff --git a/src/go/plugin/go.d/modules/varnish/varnish.go b/src/go/plugin/go.d/modules/varnish/varnish.go new file mode 100644 index 000000000..d92662d1b --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/varnish.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package varnish + +import ( + _ "embed" + "errors" + "fmt" + "time" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("varnish", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 10, + }, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *Varnish { + return &Varnish{ + Config: Config{ + Timeout: confopt.Duration(time.Second * 2), + }, + + seenBackends: make(map[string]bool), + seenStorages: make(map[string]bool), + charts: varnishCharts.Copy(), + } + +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + InstanceName string `yaml:"instance_name,omitempty" json:"instance_name,omitempty"` + DockerContainer string `yaml:"docker_container,omitempty" json:"docker_container,omitempty"` +} + +type Varnish struct { + module.Base + Config `yaml:",inline" json:""` + + charts *module.Charts + + exec varnishstatBinary + + seenBackends map[string]bool + seenStorages map[string]bool +} + +func (v *Varnish) Configuration() any { + return v.Config +} + +func (v *Varnish) Init() error { + vs, err := v.initVarnishstatBinary() + if err != nil { + return fmt.Errorf("init varnishstat exec: %v", err) + } + v.exec = vs + + return nil +} + +func (v *Varnish) Check() error { + mx, err := v.collect() + if err != nil { + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil +} + +func (v *Varnish) Charts() *module.Charts { + return v.charts +} + +func (v *Varnish) Collect() map[string]int64 { + mx, err := v.collect() + if err != nil { + v.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} + +func (v *Varnish) Cleanup() {} diff --git a/src/go/plugin/go.d/modules/varnish/varnish_test.go b/src/go/plugin/go.d/modules/varnish/varnish_test.go new file mode 100644 index 000000000..ba1dc6c0d --- /dev/null +++ b/src/go/plugin/go.d/modules/varnish/varnish_test.go @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package varnish + +import ( + "errors" + "os" + "testing" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") + + dataVer71Varnishstat, _ = os.ReadFile("testdata/v7.1/varnishstat.txt") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + "dataVer71Varnishstat": dataVer71Varnishstat, + } { + require.NotNil(t, data, name) + } +} + +func TestVarnish_Configuration(t *testing.T) { + module.TestConfigurationSerialize(t, &Varnish{}, dataConfigJSON, dataConfigYAML) +} + +func TestVarnish_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "fails if failed to locate ndsudo": { + wantFail: true, + config: New().Config, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + varnish := New() + varnish.Config = test.config + + if test.wantFail { + assert.Error(t, varnish.Init()) + } else { + assert.NoError(t, varnish.Init()) + } + }) + } +} + +func TestVarnish_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func() *Varnish + }{ + "not initialized exec": { + prepare: func() *Varnish { + return New() + }, + }, + "after check": { + prepare: func() *Varnish { + varnish := New() + varnish.exec = prepareMockOkVer71() + _ = varnish.Check() + return varnish + }, + }, + "after collect": { + prepare: func() *Varnish { + varnish := New() + varnish.exec = prepareMockOkVer71() + _ = varnish.Collect() + return varnish + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + varnish := test.prepare() + + assert.NotPanics(t, varnish.Cleanup) + }) + } +} + +func TestVarnish_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestVarnish_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func() *mockVarnishstatExec + wantFail bool + }{ + "success case": { + wantFail: false, + prepareMock: prepareMockOkVer71, + }, + "error on varnishstat call": { + wantFail: true, + prepareMock: prepareMockErrOnVarnishstatCall, + }, + "unexpected response": { + wantFail: true, + prepareMock: prepareMockUnexpectedResponse, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + varnish := New() + varnish.exec = test.prepareMock() + + if test.wantFail { + assert.Error(t, varnish.Check()) + } else { + assert.NoError(t, varnish.Check()) + } + }) + } +} + +func TestVarnish_Collect(t *testing.T) { + tests := map[string]struct { + prepareMock func() *mockVarnishstatExec + wantMetrics map[string]int64 + wantCharts int + }{ + "success case varnish v7.1": { + prepareMock: prepareMockOkVer71, + wantCharts: len(varnishCharts) + len(backendChartsTmpl)*2 + len(storageChartsTmpl)*2, + wantMetrics: map[string]int64{ + "MAIN.backend_busy": 0, + "MAIN.backend_conn": 2, + "MAIN.backend_fail": 0, + "MAIN.backend_recycle": 4, + "MAIN.backend_req": 4, + "MAIN.backend_retry": 0, + "MAIN.backend_reuse": 2, + "MAIN.backend_unhealthy": 0, + "MAIN.cache_hit": 0, + "MAIN.cache_hitmiss": 0, + "MAIN.cache_hitpass": 0, + "MAIN.cache_miss": 0, + "MAIN.client_req": 4, + "MAIN.esi_errors": 0, + "MAIN.esi_warnings": 0, + "MAIN.n_expired": 0, + "MAIN.n_lru_limited": 0, + "MAIN.n_lru_moved": 0, + "MAIN.n_lru_nuked": 0, + "MAIN.sess_conn": 4, + "MAIN.sess_dropped": 0, + "MAIN.thread_queue_len": 0, + "MAIN.threads": 200, + "MAIN.threads_created": 200, + "MAIN.threads_destroyed": 0, + "MAIN.threads_failed": 0, + "MAIN.threads_limited": 0, + "MAIN.uptime": 33834, + "MGT.child_died": 0, + "MGT.child_dump": 0, + "MGT.child_exit": 0, + "MGT.child_panic": 0, + "MGT.child_start": 1, + "MGT.child_stop": 0, + "MGT.uptime": 33833, + "SMA.Transient.g_alloc": 0, + "SMA.Transient.g_bytes": 0, + "SMA.Transient.g_space": 0, + "SMA.s0.g_alloc": 0, + "SMA.s0.g_bytes": 0, + "SMA.s0.g_space": 268435456, + "VBE.boot.default.bereq_bodybytes": 0, + "VBE.boot.default.bereq_hdrbytes": 5214, + "VBE.boot.default.beresp_bodybytes": 1170, + "VBE.boot.default.beresp_hdrbytes": 753, + "VBE.boot.nginx2.bereq_bodybytes": 0, + "VBE.boot.nginx2.bereq_hdrbytes": 0, + "VBE.boot.nginx2.beresp_bodybytes": 0, + "VBE.boot.nginx2.beresp_hdrbytes": 0, + }, + }, + "error on varnishstat call": { + prepareMock: prepareMockErrOnVarnishstatCall, + }, + "unexpected response": { + prepareMock: prepareMockUnexpectedResponse, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + varnish := New() + varnish.exec = test.prepareMock() + + mx := varnish.Collect() + + assert.Equal(t, test.wantMetrics, mx) + + if len(test.wantMetrics) > 0 { + assert.Equal(t, test.wantCharts, len(*varnish.Charts())) + module.TestMetricsHasAllChartsDims(t, varnish.Charts(), mx) + } + }) + } +} + +func prepareMockOkVer71() *mockVarnishstatExec { + return &mockVarnishstatExec{ + dataVarnishstat: dataVer71Varnishstat, + } +} + +func prepareMockErrOnVarnishstatCall() *mockVarnishstatExec { + return &mockVarnishstatExec{ + dataVarnishstat: nil, + errOnVarnishstatCall: true, + } +} + +func prepareMockUnexpectedResponse() *mockVarnishstatExec { + return &mockVarnishstatExec{ + dataVarnishstat: []byte(` +Lorem ipsum dolor sit amet, consectetur adipiscing elit. +Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus. +Fusce et felis pulvinar, posuere sem non, porttitor eros. +`), + } +} + +type mockVarnishstatExec struct { + errOnVarnishstatCall bool + dataVarnishstat []byte +} + +func (m *mockVarnishstatExec) statistics() ([]byte, error) { + if m.errOnVarnishstatCall { + return nil, errors.New("mock statistics() error") + } + + return m.dataVarnishstat, nil +} diff --git a/src/go/plugin/go.d/modules/vcsa/client/client.go b/src/go/plugin/go.d/modules/vcsa/client/client.go index ea0dd1618..97d4102da 100644 --- a/src/go/plugin/go.d/modules/vcsa/client/client.go +++ b/src/go/plugin/go.d/modules/vcsa/client/client.go @@ -5,7 +5,6 @@ package client import ( "encoding/json" "fmt" - "io" "net/http" "sync" @@ -72,7 +71,7 @@ type Client struct { // Login creates a session with the API. This operation exchanges user credentials supplied in the security context // for a session identifier that is to be used for authenticating subsequent calls. func (c *Client) Login() error { - req := web.Request{ + req := web.RequestConfig{ URL: fmt.Sprintf("%s%s", c.url, pathCISSession), Username: c.username, Password: c.password, @@ -89,14 +88,14 @@ func (c *Client) Login() error { // Logout terminates the validity of a session token. func (c *Client) Logout() error { - req := web.Request{ + req := web.RequestConfig{ URL: fmt.Sprintf("%s%s", c.url, pathCISSession), Method: http.MethodDelete, Headers: map[string]string{apiSessIDKey: c.token.get()}, } resp, err := c.doOK(req) - closeBody(resp) + web.CloseBody(resp) c.token.set("") return err } @@ -104,13 +103,13 @@ func (c *Client) Logout() error { // Ping sent a request to VCSA server to ensure the link is operating. // In case of 401 error Ping tries to re authenticate. func (c *Client) Ping() error { - req := web.Request{ + req := web.RequestConfig{ URL: fmt.Sprintf("%s%s?~action=get", c.url, pathCISSession), Method: http.MethodPost, Headers: map[string]string{apiSessIDKey: c.token.get()}, } resp, err := c.doOK(req) - defer closeBody(resp) + defer web.CloseBody(resp) if resp != nil && resp.StatusCode == http.StatusUnauthorized { return c.Login() } @@ -118,7 +117,7 @@ func (c *Client) Ping() error { } func (c *Client) health(urlPath string) (string, error) { - req := web.Request{ + req := web.RequestConfig{ URL: fmt.Sprintf("%s%s", c.url, urlPath), Headers: map[string]string{apiSessIDKey: c.token.get()}, } @@ -171,7 +170,7 @@ func (c *Client) System() (string, error) { return c.health(pathHealthSystem) } -func (c *Client) do(req web.Request) (*http.Response, error) { +func (c *Client) do(req web.RequestConfig) (*http.Response, error) { httpReq, err := web.NewHTTPRequest(req) if err != nil { return nil, fmt.Errorf("error on creating http request to %s : %v", req.URL, err) @@ -179,7 +178,7 @@ func (c *Client) do(req web.Request) (*http.Response, error) { return c.httpClient.Do(httpReq) } -func (c *Client) doOK(req web.Request) (*http.Response, error) { +func (c *Client) doOK(req web.RequestConfig) (*http.Response, error) { resp, err := c.do(req) if err != nil { return nil, err @@ -191,9 +190,9 @@ func (c *Client) doOK(req web.Request) (*http.Response, error) { return resp, nil } -func (c *Client) doOKWithDecode(req web.Request, dst interface{}) error { +func (c *Client) doOKWithDecode(req web.RequestConfig, dst any) error { resp, err := c.doOK(req) - defer closeBody(resp) + defer web.CloseBody(resp) if err != nil { return err } @@ -204,10 +203,3 @@ func (c *Client) doOKWithDecode(req web.Request, dst interface{}) error { } return nil } - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/src/go/plugin/go.d/modules/vcsa/config_schema.json b/src/go/plugin/go.d/modules/vcsa/config_schema.json index 3302794c6..ced5667b8 100644 --- a/src/go/plugin/go.d/modules/vcsa/config_schema.json +++ b/src/go/plugin/go.d/modules/vcsa/config_schema.json @@ -104,7 +104,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/vcsa/init.go b/src/go/plugin/go.d/modules/vcsa/init.go index 20631ab48..6af0178a1 100644 --- a/src/go/plugin/go.d/modules/vcsa/init.go +++ b/src/go/plugin/go.d/modules/vcsa/init.go @@ -20,7 +20,7 @@ func (vc *VCSA) validateConfig() error { } func (vc *VCSA) initHealthClient() (*client.Client, error) { - httpClient, err := web.NewHTTPClient(vc.Client) + httpClient, err := web.NewHTTPClient(vc.ClientConfig) if err != nil { return nil, err } diff --git a/src/go/plugin/go.d/modules/vcsa/integrations/vcenter_server_appliance.md b/src/go/plugin/go.d/modules/vcsa/integrations/vcenter_server_appliance.md index 99517af3e..303cb0f0f 100644 --- a/src/go/plugin/go.d/modules/vcsa/integrations/vcenter_server_appliance.md +++ b/src/go/plugin/go.d/modules/vcsa/integrations/vcenter_server_appliance.md @@ -148,8 +148,8 @@ No action required. The configuration file name for this integration is `go.d/vcsa.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/vcsa/vcsa.go b/src/go/plugin/go.d/modules/vcsa/vcsa.go index aa12d7c60..d93e707a2 100644 --- a/src/go/plugin/go.d/modules/vcsa/vcsa.go +++ b/src/go/plugin/go.d/modules/vcsa/vcsa.go @@ -5,9 +5,11 @@ package vcsa import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -28,9 +30,9 @@ func init() { func New() *VCSA { return &VCSA{ Config: Config{ - HTTP: web.HTTP{ - Client: web.Client{ - Timeout: web.Duration(time.Second * 5), + HTTPConfig: web.HTTPConfig{ + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 5), }, }, }, @@ -39,8 +41,8 @@ func New() *VCSA { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } type ( @@ -74,14 +76,12 @@ func (vc *VCSA) Configuration() any { func (vc *VCSA) Init() error { if err := vc.validateConfig(); err != nil { - vc.Error(err) - return err + return fmt.Errorf("invalid config: %v", err) } c, err := vc.initHealthClient() if err != nil { - vc.Errorf("error on creating health client : %vc", err) - return err + return fmt.Errorf("error on creating health client : %vc", err) } vc.client = c @@ -94,13 +94,11 @@ func (vc *VCSA) Init() error { func (vc *VCSA) Check() error { err := vc.client.Login() if err != nil { - vc.Error(err) return err } mx, err := vc.collect() if err != nil { - vc.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/vcsa/vcsa_test.go b/src/go/plugin/go.d/modules/vcsa/vcsa_test.go index 2c51723d4..f41a007a0 100644 --- a/src/go/plugin/go.d/modules/vcsa/vcsa_test.go +++ b/src/go/plugin/go.d/modules/vcsa/vcsa_test.go @@ -46,7 +46,7 @@ func TestVCenter_InitErrorOnValidatingInitParameters(t *testing.T) { func TestVCenter_InitErrorOnCreatingClient(t *testing.T) { job := prepareVCSA() - job.Client.TLSConfig.TLSCA = "testdata/tls" + job.ClientConfig.TLSConfig.TLSCA = "testdata/tls" assert.Error(t, job.Init()) } diff --git a/src/go/plugin/go.d/modules/vernemq/charts.go b/src/go/plugin/go.d/modules/vernemq/charts.go index 5d81a26bc..0612dfdc2 100644 --- a/src/go/plugin/go.d/modules/vernemq/charts.go +++ b/src/go/plugin/go.d/modules/vernemq/charts.go @@ -2,910 +2,1189 @@ package vernemq -import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +import ( + "fmt" + "strings" -type ( - Charts = module.Charts - Chart = module.Chart - Dims = module.Dims - Dim = module.Dim + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" ) -var charts = Charts{ - chartOpenSockets.Copy(), - chartSocketEvents.Copy(), - chartClientKeepaliveExpired.Copy(), - chartSocketErrors.Copy(), - chartSocketCloseTimeout.Copy(), - - chartQueueProcesses.Copy(), - chartQueueProcessesEvents.Copy(), - chartQueueProcessesOfflineStorage.Copy(), - chartQueueMessages.Copy(), - chartQueueUndeliveredMessages.Copy(), - - chartRouterSubscriptions.Copy(), - chartRouterMatchedSubscriptions.Copy(), - chartRouterMemory.Copy(), - - chartAverageSchedulerUtilization.Copy(), - chartSchedulerUtilization.Copy(), - chartSystemProcesses.Copy(), - chartSystemReductions.Copy(), - chartSystemContextSwitches.Copy(), - chartSystemIO.Copy(), - chartSystemRunQueue.Copy(), - chartSystemGCCount.Copy(), - chartSystemGCWordsReclaimed.Copy(), - chartSystemMemoryAllocated.Copy(), - - chartBandwidth.Copy(), - - chartRetainMessages.Copy(), - chartRetainMemoryUsage.Copy(), - - chartClusterCommunicationBandwidth.Copy(), - chartClusterCommunicationDropped.Copy(), - chartNetSplitUnresolved.Copy(), - chartNetSplits.Copy(), - - chartMQTTv5AUTH.Copy(), - chartMQTTv5AUTHReceivedReason.Copy(), - chartMQTTv5AUTHSentReason.Copy(), - - chartMQTTv3v5CONNECT.Copy(), - chartMQTTv3v5CONNACKSentReason.Copy(), - - chartMQTTv3v5DISCONNECT.Copy(), - chartMQTTv5DISCONNECTReceivedReason.Copy(), - chartMQTTv5DISCONNECTSentReason.Copy(), - - chartMQTTv3v5SUBSCRIBE.Copy(), - chartMQTTv3v5SUBSCRIBEError.Copy(), - chartMQTTv3v5SUBSCRIBEAuthError.Copy(), - - chartMQTTv3v5UNSUBSCRIBE.Copy(), - chartMQTTv3v5UNSUBSCRIBEError.Copy(), - - chartMQTTv3v5PUBLISH.Copy(), - chartMQTTv3v5PUBLISHErrors.Copy(), - chartMQTTv3v5PUBLISHAuthErrors.Copy(), - chartMQTTv3v5PUBACK.Copy(), - chartMQTTv5PUBACKReceivedReason.Copy(), - chartMQTTv5PUBACKSentReason.Copy(), - chartMQTTv3v5PUBACKUnexpected.Copy(), - chartMQTTv3v5PUBREC.Copy(), - chartMQTTv5PUBRECReceivedReason.Copy(), - chartMQTTv5PUBRECSentReason.Copy(), - chartMQTTv3PUBRECUnexpected.Copy(), - chartMQTTv3v5PUBREL.Copy(), - chartMQTTv5PUBRELReceivedReason.Copy(), - chartMQTTv5PUBRELSentReason.Copy(), - chartMQTTv3v5PUBCOMP.Copy(), - chartMQTTv5PUBCOMPReceivedReason.Copy(), - chartMQTTv5PUBCOMPSentReason.Copy(), - chartMQTTv3v5PUBCOMPUnexpected.Copy(), - - chartMQTTv3v5PING.Copy(), - - chartUptime.Copy(), +const ( + prioNodeSockets = module.Priority + iota + prioNodeSocketEvents + prioNodeClientKeepaliveExpired + prioNodeSocketCloseTimeout + prioNodeSocketErrors + + prioNodeQueueProcesses + prioNodeQueueProcessesEvents + prioNodeQueueProcessesOfflineStorage + prioNodeQueueMessages + prioNodeQueuedMessages + prioNodeQueueUndeliveredMessages + + prioNodeRouterSubscriptions + prioNodeRouterMatchedSubscriptions + prioNodeRouterMemory + + prioNodeAverageSchedulerUtilization + prioNodeSystemProcesses + prioNodeSystemReductions + prioNodeSystemContext + prioNodeSystemIO + prioNodeSystemRunQueue + prioNodeSystemGCCount + prioNodeSystemGCWordsReclaimed + prioNodeSystemMemoryAllocated + + prioNodeTraffic + + prioNodeRetainMessages + prioNodeRetainMemoryUsage + + prioNodeClusterCommunicationTraffic + prioNodeClusterCommunicationDropped + prioNodeNetSplitUnresolved + prioNodeNetSplits + + prioMqttPublishPackets + prioMqttPublishErrors + prioMqttPublishAuthPackets + + prioMqttPubAckPackets + prioMqttPubAckReceivedReason + prioMqttPubAckSentReason + prioMqttPubAckUnexpectedMessages + + prioMqttPubRecPackets + prioMqttPubRecReceivedReason + prioMqttPubRecSentReason + prioMqttPubRecUnexpectedMessages + + prioMqttPubRelPackets + prioMqttPubRelReceivedReason + prioMqttPubRelSentReason + + prioMqttPubCompPackets + prioMqttPubCompReceivedReason + prioMqttPubCompSentReason + prioMqttPubCompUnexpectedMessages + + prioMqttConnectPackets + prioMqttConnectSentReason + + prioMqttDisconnectPackets + prioMqttDisconnectReceivedReason + prioMqttDisconnectSentReason + + prioMqttSubscribePackets + prioMqttSubscribeErrors + prioMqttSubscribeAuthPackets + + prioMqttUnsubscribePackets + prioMqttUnsubscribeErrors + + prioMqttAuthPackets + prioMqttAuthReceivedReason + prioMqttAuthSentReason + + prioMqttPingPackets + + prioNodeUptime +) + +var nodeChartsTmpl = module.Charts{ + nodeOpenSocketsChartTmpl.Copy(), + nodeSocketEventsChartTmpl.Copy(), + nodeSocketCloseTimeoutChartTmpl.Copy(), + nodeSocketErrorsChartTmpl.Copy(), + + nodeQueueProcessesChartTmpl.Copy(), + nodeQueueProcessesEventsChartTmpl.Copy(), + nodeQueueProcessesOfflineStorageChartTmpl.Copy(), + nodeQueueMessagesChartTmpl.Copy(), + nodeQueuedMessagesChartTmpl.Copy(), + nodeQueueUndeliveredMessagesChartTmpl.Copy(), + + nodeRouterSubscriptionsChartTmpl.Copy(), + nodeRouterMatchedSubscriptionsChartTmpl.Copy(), + nodeRouterMemoryChartTmpl.Copy(), + + nodeAverageSchedulerUtilizationChartTmpl.Copy(), + nodeSystemProcessesChartTmpl.Copy(), + nodeSystemReductionsChartTmpl.Copy(), + nodeSystemContextSwitches.Copy(), + nodeSystemIOChartTmpl.Copy(), + nodeSystemRunQueueChartTmpl.Copy(), + nodeSystemGCCountChartTmpl.Copy(), + nodeSystemGCWordsReclaimedChartTmpl.Copy(), + nodeSystemMemoryAllocatedChartTmpl.Copy(), + + nodeTrafficChartTmpl.Copy(), + + nodeRetainMessagesChartsTmpl.Copy(), + nodeRetainMemoryUsageChartTmpl.Copy(), + + nodeClusterCommunicationTrafficChartTmpl.Copy(), + nodeClusterCommunicationDroppedChartTmpl.Copy(), + nodeNetSplitUnresolvedChartTmpl.Copy(), + nodeNetSplitsChartTmpl.Copy(), + + nodeUptimeChartTmpl.Copy(), +} + +var nodeMqtt5ChartsTmpl = module.Charts{ + nodeClientKeepaliveExpiredChartTmpl.Copy(), + + nodeMqttPUBLISHPacketsChartTmpl.Copy(), + nodeMqttPUBLISHErrorsChartTmpl.Copy(), + nodeMqttPUBLISHAuthErrorsChartTmpl.Copy(), + + nodeMqttPUBACKPacketsChartTmpl.Copy(), + nodeMqttPUBACKReceivedByReasonChartTmpl.Copy(), + nodeMqttPUBACKSentByReasonChartTmpl.Copy(), + nodeMqttPUBACKUnexpectedMessagesChartTmpl.Copy(), + + nodeMqttPUBRECPacketsChartTmpl.Copy(), + nodeMqttPUBRECReceivedByReasonChartTmpl.Copy(), + nodeMqttPUBRECSentByReasonChartTmpl.Copy(), + + nodeMqttPUBRELPacketsChartTmpl.Copy(), + nodeMqttPUBRELReceivedByReasonChartTmpl.Copy(), + nodeMqttPUBRELSentByReasonChartTmpl.Copy(), + + nodeMqttPUBCOMPPacketsChartTmpl.Copy(), + nodeMqttPUBCOMPReceivedByReasonChartTmpl.Copy(), + nodeMqttPUBCOMPSentByReasonChartTmpl.Copy(), + nodeMqttPUBCOMPUnexpectedMessagesChartTmpl.Copy(), + + nodeMqttCONNECTPacketsChartTmpl.Copy(), + nodeMqttCONNACKSentByReasonCodeChartTmpl.Copy(), + + nodeMqtt5DISCONNECTPacketsChartTmpl.Copy(), + nodeMqttDISCONNECTReceivedByReasonChartTmpl.Copy(), + nodeMqttDISCONNECTSentByReasonChartTmpl.Copy(), + + nodeMqttSUBSCRIBEPacketsChartTmpl.Copy(), + modeMqttSUBSCRIBEErrorsChartTmpl.Copy(), + nodeMqttSUBSCRIBEAuthErrorsChartTmpl.Copy(), + + nodeMqttUNSUBSCRIBEPacketsChartTmpl.Copy(), + nodeMqttUNSUBSCRIBEErrorsChartTmpl.Copy(), + + nodeMqttAUTHPacketsChartTmpl.Copy(), + nodeMqttAUTHReceivedByReasonChartTmpl.Copy(), + nodeMqttAUTHSentByReasonChartTmpl.Copy(), + + nodeMqttPINGPacketsChartTmpl.Copy(), +} + +var nodeMqtt4ChartsTmpl = module.Charts{ + nodeClientKeepaliveExpiredChartTmpl.Copy(), + + nodeMqttPUBLISHPacketsChartTmpl.Copy(), + nodeMqttPUBLISHErrorsChartTmpl.Copy(), + nodeMqttPUBLISHAuthErrorsChartTmpl.Copy(), + + nodeMqttPUBACKPacketsChartTmpl.Copy(), + nodeMqttPUBACKUnexpectedMessagesChartTmpl.Copy(), + + nodeMqttPUBRECPacketsChartTmpl.Copy(), + nodeMqttPUBRECUnexpectedMessagesChartTmpl.Copy(), + + nodeMqttPUBRELPacketsChartTmpl.Copy(), + + nodeMqttPUBCOMPPacketsChartTmpl.Copy(), + nodeMqttPUBCOMPUnexpectedMessagesChartTmpl.Copy(), + + nodeMqttCONNECTPacketsChartTmpl.Copy(), + nodeMqttCONNACKSentByReturnCodeChartTmpl.Copy(), + + nodeMqtt4DISCONNECTPacketsChartTmpl.Copy(), + + nodeMqttSUBSCRIBEPacketsChartTmpl.Copy(), + modeMqttSUBSCRIBEErrorsChartTmpl.Copy(), + + nodeMqttSUBSCRIBEAuthErrorsChartTmpl.Copy(), + nodeMqttUNSUBSCRIBEPacketsChartTmpl.Copy(), + nodeMqttUNSUBSCRIBEErrorsChartTmpl.Copy(), + + nodeMqttPINGPacketsChartTmpl.Copy(), } // Sockets var ( - chartOpenSockets = Chart{ - ID: "sockets", - Title: "Open Sockets", - Units: "sockets", - Fam: "sockets", - Ctx: "vernemq.sockets", - Dims: Dims{ - {ID: "open_sockets", Name: "open"}, - }, - } - chartSocketEvents = Chart{ - ID: "socket_events", - Title: "Socket Open and Close Events", - Units: "events/s", - Fam: "sockets", - Ctx: "vernemq.socket_operations", - Dims: Dims{ - {ID: metricSocketOpen, Name: "open", Algo: module.Incremental}, - {ID: metricSocketClose, Name: "close", Algo: module.Incremental, Mul: -1}, - }, - } - chartClientKeepaliveExpired = Chart{ - ID: "client_keepalive_expired", - Title: "Closed Sockets due to Keepalive Time Expired", - Units: "sockets/s", - Fam: "sockets", - Ctx: "vernemq.client_keepalive_expired", - Dims: Dims{ - {ID: metricClientKeepaliveExpired, Name: "closed", Algo: module.Incremental}, - }, - } - chartSocketCloseTimeout = Chart{ - ID: "socket_close_timeout", - Title: "Closed Sockets due to no CONNECT Frame On Time", - Units: "sockets/s", - Fam: "sockets", - Ctx: "vernemq.socket_close_timeout", - Dims: Dims{ - {ID: metricSocketCloseTimeout, Name: "closed", Algo: module.Incremental}, - }, - } - chartSocketErrors = Chart{ - ID: "socket_errors", - Title: "Socket Errors", - Units: "errors/s", - Fam: "sockets", - Ctx: "vernemq.socket_errors", - Dims: Dims{ - {ID: metricSocketError, Name: "errors", Algo: module.Incremental}, + nodeOpenSocketsChartTmpl = module.Chart{ + ID: "node_%s_sockets", + Title: "Open Sockets", + Units: "sockets", + Fam: "sockets", + Ctx: "vernemq.node_sockets", + Priority: prioNodeSockets, + Dims: module.Dims{ + {ID: dimNode("open_sockets"), Name: "open"}, + }, + } + nodeSocketEventsChartTmpl = module.Chart{ + ID: "node_%s_socket_events", + Title: "Open and Close Socket Events", + Units: "events/s", + Fam: "sockets", + Ctx: "vernemq.node_socket_operations", + Priority: prioNodeSocketEvents, + Dims: module.Dims{ + {ID: dimNode(metricSocketOpen), Name: "open", Algo: module.Incremental}, + {ID: dimNode(metricSocketClose), Name: "close", Algo: module.Incremental, Mul: -1}, + }, + } + nodeClientKeepaliveExpiredChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_client_keepalive_expired", + Title: "Closed Sockets due to Keepalive Time Expired", + Units: "sockets/s", + Fam: "sockets", + Ctx: "vernemq.node_client_keepalive_expired", + Priority: prioNodeClientKeepaliveExpired, + Dims: module.Dims{ + {ID: dimMqttVer(metricClientKeepaliveExpired), Name: "closed", Algo: module.Incremental}, + }, + } + nodeSocketCloseTimeoutChartTmpl = module.Chart{ + ID: "node_%s_socket_close_timeout", + Title: "Closed Sockets due to no CONNECT Frame On Time", + Units: "sockets/s", + Fam: "sockets", + Ctx: "vernemq.node_socket_close_timeout", + Priority: prioNodeSocketCloseTimeout, + Dims: module.Dims{ + {ID: dimNode(metricSocketCloseTimeout), Name: "closed", Algo: module.Incremental}, + }, + } + nodeSocketErrorsChartTmpl = module.Chart{ + ID: "node_%s_socket_errors", + Title: "Socket Errors", + Units: "errors/s", + Fam: "sockets", + Ctx: "vernemq.node_socket_errors", + Priority: prioNodeSocketErrors, + Dims: module.Dims{ + {ID: dimNode(metricSocketError), Name: "errors", Algo: module.Incremental}, }, } ) // Queues var ( - chartQueueProcesses = Chart{ - ID: "queue_processes", - Title: "Living Queues in an Online or an Offline State", - Units: "queue processes", - Fam: "queues", - Ctx: "vernemq.queue_processes", - Dims: Dims{ - {ID: metricQueueProcesses, Name: "queue_processes"}, - }, - } - chartQueueProcessesEvents = Chart{ - ID: "queue_processes_events", - Title: "Queue Processes Setup and Teardown Events", - Units: "events/s", - Fam: "queues", - Ctx: "vernemq.queue_processes_operations", - Dims: Dims{ - {ID: metricQueueSetup, Name: "setup", Algo: module.Incremental}, - {ID: metricQueueTeardown, Name: "teardown", Algo: module.Incremental, Mul: -1}, - }, - } - chartQueueProcessesOfflineStorage = Chart{ - ID: "queue_process_init_from_storage", - Title: "Queue Processes Initialized from Offline Storage", - Units: "queue processes/s", - Fam: "queues", - Ctx: "vernemq.queue_process_init_from_storage", - Dims: Dims{ - {ID: metricQueueInitializedFromStorage, Name: "queue processes", Algo: module.Incremental}, - }, - } - chartQueueMessages = Chart{ - ID: "queue_messages", - Title: "Received and Sent PUBLISH Messages", - Units: "messages/s", - Fam: "queues", - Ctx: "vernemq.queue_messages", - Type: module.Area, - Dims: Dims{ - {ID: metricQueueMessageIn, Name: "received", Algo: module.Incremental}, - {ID: metricQueueMessageOut, Name: "sent", Algo: module.Incremental, Mul: -1}, - }, - } - chartQueueUndeliveredMessages = Chart{ - ID: "queue_undelivered_messages", - Title: "Undelivered PUBLISH Messages", - Units: "messages/s", - Fam: "queues", - Ctx: "vernemq.queue_undelivered_messages", - Type: module.Stacked, - Dims: Dims{ - {ID: metricQueueMessageDrop, Name: "dropped", Algo: module.Incremental}, - {ID: metricQueueMessageExpired, Name: "expired", Algo: module.Incremental}, - {ID: metricQueueMessageUnhandled, Name: "unhandled", Algo: module.Incremental}, + nodeQueueProcessesChartTmpl = module.Chart{ + ID: "node_%s_queue_processes", + Title: "Living Queues in an Online or an Offline State", + Units: "queue processes", + Fam: "queues", + Ctx: "vernemq.node_queue_processes", + Priority: prioNodeQueueProcesses, + Dims: module.Dims{ + {ID: dimNode(metricQueueProcesses), Name: "queue_processes"}, + }, + } + nodeQueueProcessesEventsChartTmpl = module.Chart{ + ID: "node_%s_queue_processes_events", + Title: "Queue Processes Setup and Teardown Events", + Units: "events/s", + Fam: "queues", + Ctx: "vernemq.node_queue_processes_operations", + Priority: prioNodeQueueProcessesEvents, + Dims: module.Dims{ + {ID: dimNode(metricQueueSetup), Name: "setup", Algo: module.Incremental}, + {ID: dimNode(metricQueueTeardown), Name: "teardown", Algo: module.Incremental, Mul: -1}, + }, + } + nodeQueueProcessesOfflineStorageChartTmpl = module.Chart{ + ID: "node_%s_queue_process_init_from_storage", + Title: "Queue Processes Initialized from Offline Storage", + Units: "queue processes/s", + Fam: "queues", + Ctx: "vernemq.node_queue_process_init_from_storage", + Priority: prioNodeQueueProcessesOfflineStorage, + Dims: module.Dims{ + {ID: dimNode(metricQueueInitializedFromStorage), Name: "queue processes", Algo: module.Incremental}, + }, + } + nodeQueueMessagesChartTmpl = module.Chart{ + ID: "node_%s_queue_messages", + Title: "Received and Sent PUBLISH Messages", + Units: "messages/s", + Fam: "queues", + Ctx: "vernemq.node_queue_messages", + Type: module.Area, + Priority: prioNodeQueueMessages, + Dims: module.Dims{ + {ID: dimNode(metricQueueMessageIn), Name: "received", Algo: module.Incremental}, + {ID: dimNode(metricQueueMessageOut), Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + nodeQueuedMessagesChartTmpl = module.Chart{ + ID: "node_%s_queued_messages", + Title: "Queued PUBLISH Messages", + Units: "messages", + Fam: "queues", + Ctx: "vernemq.node_queued_messages", + Type: module.Line, + Priority: prioNodeQueuedMessages, + Dims: module.Dims{ + {ID: dimNode("queued_messages"), Name: "queued"}, + }, + } + nodeQueueUndeliveredMessagesChartTmpl = module.Chart{ + ID: "node_%s_queue_undelivered_messages", + Title: "Undelivered PUBLISH Messages", + Units: "messages/s", + Fam: "queues", + Ctx: "vernemq.node_queue_undelivered_messages", + Type: module.Stacked, + Priority: prioNodeQueueUndeliveredMessages, + Dims: module.Dims{ + {ID: dimNode(metricQueueMessageDrop), Name: "dropped", Algo: module.Incremental}, + {ID: dimNode(metricQueueMessageExpired), Name: "expired", Algo: module.Incremental}, + {ID: dimNode(metricQueueMessageUnhandled), Name: "unhandled", Algo: module.Incremental}, }, } ) // Subscriptions var ( - chartRouterSubscriptions = Chart{ - ID: "router_subscriptions", - Title: "Subscriptions in the Routing Table", - Units: "subscriptions", - Fam: "subscriptions", - Ctx: "vernemq.router_subscriptions", - Dims: Dims{ - {ID: metricRouterSubscriptions, Name: "subscriptions"}, - }, - } - chartRouterMatchedSubscriptions = Chart{ - ID: "router_matched_subscriptions", - Title: "Matched Subscriptions", - Units: "subscriptions/s", - Fam: "subscriptions", - Ctx: "vernemq.router_matched_subscriptions", - Dims: Dims{ - {ID: metricRouterMatchesLocal, Name: "local", Algo: module.Incremental}, - {ID: metricRouterMatchesRemote, Name: "remote", Algo: module.Incremental}, - }, - } - chartRouterMemory = Chart{ - ID: "router_memory", - Title: "Routing Table Memory Usage", - Units: "KiB", - Fam: "subscriptions", - Ctx: "vernemq.router_memory", - Type: module.Area, - Dims: Dims{ - {ID: metricRouterMemory, Name: "used", Div: 1024}, + nodeRouterSubscriptionsChartTmpl = module.Chart{ + ID: "node_%s_router_subscriptions", + Title: "Subscriptions in the Routing Table", + Units: "subscriptions", + Fam: "subscriptions", + Ctx: "vernemq.node_router_subscriptions", + Priority: prioNodeRouterSubscriptions, + Dims: module.Dims{ + {ID: dimNode(metricRouterSubscriptions), Name: "subscriptions"}, + }, + } + nodeRouterMatchedSubscriptionsChartTmpl = module.Chart{ + ID: "node_%s_router_matched_subscriptions", + Title: "Matched Subscriptions", + Units: "subscriptions/s", + Fam: "subscriptions", + Ctx: "vernemq.node_router_matched_subscriptions", + Priority: prioNodeRouterMatchedSubscriptions, + Dims: module.Dims{ + {ID: dimNode(metricRouterMatchesLocal), Name: "local", Algo: module.Incremental}, + {ID: dimNode(metricRouterMatchesRemote), Name: "remote", Algo: module.Incremental}, + }, + } + nodeRouterMemoryChartTmpl = module.Chart{ + ID: "node_%s_router_memory", + Title: "Routing Table Memory Usage", + Units: "bytes", + Fam: "subscriptions", + Ctx: "vernemq.node_router_memory", + Type: module.Area, + Priority: prioNodeRouterMemory, + Dims: module.Dims{ + {ID: dimNode(metricRouterMemory), Name: "used"}, }, } ) // Erlang VM var ( - chartAverageSchedulerUtilization = Chart{ - ID: "average_scheduler_utilization", - Title: "Average Scheduler Utilization", - Units: "percentage", - Fam: "erlang vm", - Ctx: "vernemq.average_scheduler_utilization", - Type: module.Area, - Dims: Dims{ - {ID: metricSystemUtilization, Name: "utilization"}, - }, - } - chartSchedulerUtilization = Chart{ - ID: "scheduler_utilization", - Title: "Scheduler Utilization", - Units: "percentage", - Fam: "erlang vm", - Type: module.Stacked, - Ctx: "vernemq.system_utilization_scheduler", - } - chartSystemProcesses = Chart{ - ID: "system_processes", - Title: "Erlang Processes", - Units: "processes", - Fam: "erlang vm", - Ctx: "vernemq.system_processes", - Dims: Dims{ - {ID: metricSystemProcessCount, Name: "processes"}, - }, - } - chartSystemReductions = Chart{ - ID: "system_reductions", - Title: "Reductions", - Units: "ops/s", - Fam: "erlang vm", - Ctx: "vernemq.system_reductions", - Dims: Dims{ - {ID: metricSystemReductions, Name: "reductions", Algo: module.Incremental}, - }, - } - chartSystemContextSwitches = Chart{ - ID: "system_context_switches", - Title: "Context Switches", - Units: "ops/s", - Fam: "erlang vm", - Ctx: "vernemq.system_context_switches", - Dims: Dims{ - {ID: metricSystemContextSwitches, Name: "context switches", Algo: module.Incremental}, - }, - } - chartSystemIO = Chart{ - ID: "system_io", - Title: "Received and Sent Traffic through Ports", - Units: "kilobits/s", - Fam: "erlang vm", - Ctx: "vernemq.system_io", - Type: module.Area, - Dims: Dims{ - {ID: metricSystemIOIn, Name: "received", Algo: module.Incremental, Mul: 8, Div: 1024}, - {ID: metricSystemIOOut, Name: "sent", Algo: module.Incremental, Mul: 8, Div: -1024}, - }, - } - chartSystemRunQueue = Chart{ - ID: "system_run_queue", - Title: "Processes that are Ready to Run on All Run-Queues", - Units: "processes", - Fam: "erlang vm", - Ctx: "vernemq.system_run_queue", - Dims: Dims{ - {ID: metricSystemRunQueue, Name: "ready"}, - }, - } - chartSystemGCCount = Chart{ - ID: "system_gc_count", - Title: "GC Count", - Units: "ops/s", - Fam: "erlang vm", - Ctx: "vernemq.system_gc_count", - Dims: Dims{ - {ID: metricSystemGCCount, Name: "gc", Algo: module.Incremental}, - }, - } - chartSystemGCWordsReclaimed = Chart{ - ID: "system_gc_words_reclaimed", - Title: "GC Words Reclaimed", - Units: "ops/s", - Fam: "erlang vm", - Ctx: "vernemq.system_gc_words_reclaimed", - Dims: Dims{ - {ID: metricSystemWordsReclaimedByGC, Name: "words reclaimed", Algo: module.Incremental}, - }, - } - chartSystemMemoryAllocated = Chart{ - ID: "system_allocated_memory", - Title: "Memory Allocated by the Erlang Processes and by the Emulator", - Units: "KiB", - Fam: "erlang vm", - Ctx: "vernemq.system_allocated_memory", - Type: module.Stacked, - Dims: Dims{ - {ID: metricVMMemoryProcesses, Name: "processes", Div: 1024}, - {ID: metricVMMemorySystem, Name: "system", Div: 1024}, + nodeAverageSchedulerUtilizationChartTmpl = module.Chart{ + ID: "node_%s_average_scheduler_utilization", + Title: "Average Scheduler Utilization", + Units: "percentage", + Fam: "erlang vm", + Ctx: "vernemq.node_average_scheduler_utilization", + Type: module.Area, + Priority: prioNodeAverageSchedulerUtilization, + Dims: module.Dims{ + {ID: dimNode(metricSystemUtilization), Name: "utilization"}, + }, + } + nodeSystemProcessesChartTmpl = module.Chart{ + ID: "node_%s_system_processes", + Title: "Erlang Processes", + Units: "processes", + Fam: "erlang vm", + Ctx: "vernemq.node_system_processes", + Priority: prioNodeSystemProcesses, + Dims: module.Dims{ + {ID: dimNode(metricSystemProcessCount), Name: "processes"}, + }, + } + nodeSystemReductionsChartTmpl = module.Chart{ + ID: "node_%s_system_reductions", + Title: "Reductions", + Units: "ops/s", + Fam: "erlang vm", + Ctx: "vernemq.node_system_reductions", + Priority: prioNodeSystemReductions, + Dims: module.Dims{ + {ID: dimNode(metricSystemReductions), Name: "reductions", Algo: module.Incremental}, + }, + } + nodeSystemContextSwitches = module.Chart{ + ID: "node_%s_system_context_switches", + Title: "Context Switches", + Units: "ops/s", + Fam: "erlang vm", + Ctx: "vernemq.node_system_context_switches", + Priority: prioNodeSystemContext, + Dims: module.Dims{ + {ID: dimNode(metricSystemContextSwitches), Name: "context switches", Algo: module.Incremental}, + }, + } + nodeSystemIOChartTmpl = module.Chart{ + ID: "node_%s_system_io", + Title: "Received and Sent Traffic through Ports", + Units: "bytes/s", + Fam: "erlang vm", + Ctx: "vernemq.node_system_io", + Type: module.Area, + Priority: prioNodeSystemIO, + Dims: module.Dims{ + {ID: dimNode(metricSystemIOIn), Name: "received", Algo: module.Incremental}, + {ID: dimNode(metricSystemIOOut), Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + nodeSystemRunQueueChartTmpl = module.Chart{ + ID: "node_%s_system_run_queue", + Title: "Processes that are Ready to Run on All Run-Queues", + Units: "processes", + Fam: "erlang vm", + Ctx: "vernemq.node_system_run_queue", + Priority: prioNodeSystemRunQueue, + Dims: module.Dims{ + {ID: dimNode(metricSystemRunQueue), Name: "ready"}, + }, + } + nodeSystemGCCountChartTmpl = module.Chart{ + ID: "node_%s_system_gc_count", + Title: "GC Count", + Units: "ops/s", + Fam: "erlang vm", + Ctx: "vernemq.node_system_gc_count", + Priority: prioNodeSystemGCCount, + Dims: module.Dims{ + {ID: dimNode(metricSystemGCCount), Name: "gc", Algo: module.Incremental}, + }, + } + nodeSystemGCWordsReclaimedChartTmpl = module.Chart{ + ID: "node_%s_system_gc_words_reclaimed", + Title: "GC Words Reclaimed", + Units: "ops/s", + Fam: "erlang vm", + Ctx: "vernemq.node_system_gc_words_reclaimed", + Priority: prioNodeSystemGCWordsReclaimed, + Dims: module.Dims{ + {ID: dimNode(metricSystemWordsReclaimedByGC), Name: "words reclaimed", Algo: module.Incremental}, + }, + } + nodeSystemMemoryAllocatedChartTmpl = module.Chart{ + ID: "node_%s_system_allocated_memory", + Title: "Memory Allocated by the Erlang Processes and by the Emulator", + Units: "bytes", + Fam: "erlang vm", + Ctx: "vernemq.node_system_allocated_memory", + Type: module.Stacked, + Priority: prioNodeSystemMemoryAllocated, + Dims: module.Dims{ + {ID: dimNode(metricVMMemoryProcesses), Name: "processes"}, + {ID: dimNode(metricVMMemorySystem), Name: "system"}, }, } ) -// Bandwidth +// Traffic var ( - chartBandwidth = Chart{ - ID: "bandwidth", - Title: "Bandwidth", - Units: "kilobits/s", - Fam: "bandwidth", - Ctx: "vernemq.bandwidth", - Type: module.Area, - Dims: Dims{ - {ID: metricBytesReceived, Name: "received", Algo: module.Incremental, Mul: 8, Div: 1024}, - {ID: metricBytesSent, Name: "sent", Algo: module.Incremental, Mul: 8, Div: -1024}, + nodeTrafficChartTmpl = module.Chart{ + ID: "node_%s_traffic", + Title: "Node Traffic", + Units: "bytes/s", + Fam: "traffic", + Ctx: "vernemq.node_traffic", + Type: module.Area, + Priority: prioNodeTraffic, + Dims: module.Dims{ + {ID: dimNode(metricBytesReceived), Name: "received", Algo: module.Incremental}, + {ID: dimNode(metricBytesSent), Name: "sent", Algo: module.Incremental, Mul: -1}, }, } ) // Retain var ( - chartRetainMessages = Chart{ - ID: "retain_messages", - Title: "Stored Retained Messages", - Units: "messages", - Fam: "retain", - Ctx: "vernemq.retain_messages", - Dims: Dims{ - {ID: metricRetainMessages, Name: "messages"}, - }, - } - chartRetainMemoryUsage = Chart{ - ID: "retain_memory", - Title: "Stored Retained Messages Memory Usage", - Units: "KiB", - Fam: "retain", - Ctx: "vernemq.retain_memory", - Type: module.Area, - Dims: Dims{ - {ID: metricRetainMemory, Name: "used", Div: 1024}, + nodeRetainMessagesChartsTmpl = module.Chart{ + ID: "node_%s_retain_messages", + Title: "Stored Retained Messages", + Units: "messages", + Fam: "retain", + Ctx: "vernemq.node_retain_messages", + Priority: prioNodeRetainMessages, + Dims: module.Dims{ + {ID: dimNode(metricRetainMessages), Name: "messages"}, + }, + } + nodeRetainMemoryUsageChartTmpl = module.Chart{ + ID: "node_%s_retain_memory", + Title: "Stored Retained Messages Memory Usage", + Units: "bytes", + Fam: "retain", + Ctx: "vernemq.node_retain_memory", + Type: module.Area, + Priority: prioNodeRetainMemoryUsage, + Dims: module.Dims{ + {ID: dimNode(metricRetainMemory), Name: "used"}, }, } ) // Cluster var ( - chartClusterCommunicationBandwidth = Chart{ - ID: "cluster_bandwidth", - Title: "Communication with Other Cluster Nodes", - Units: "kilobits/s", - Fam: "cluster", - Ctx: "vernemq.cluster_bandwidth", - Type: module.Area, - Dims: Dims{ - {ID: metricClusterBytesReceived, Name: "received", Algo: module.Incremental, Mul: 8, Div: 1024}, - {ID: metricClusterBytesSent, Name: "sent", Algo: module.Incremental, Mul: 8, Div: -1024}, - }, - } - chartClusterCommunicationDropped = Chart{ - ID: "cluster_dropped", - Title: "Traffic Dropped During Communication with Other Cluster Nodes", - Units: "kilobits/s", - Fam: "cluster", - Type: module.Area, - Ctx: "vernemq.cluster_dropped", - Dims: Dims{ - {ID: metricClusterBytesDropped, Name: "dropped", Algo: module.Incremental, Mul: 8, Div: 1024}, - }, - } - chartNetSplitUnresolved = Chart{ - ID: "netsplit_unresolved", - Title: "Unresolved Netsplits", - Units: "netsplits", - Fam: "cluster", - Ctx: "vernemq.netsplit_unresolved", - Dims: Dims{ - {ID: "netsplit_unresolved", Name: "unresolved"}, - }, - } - chartNetSplits = Chart{ - ID: "netsplit", - Title: "Netsplits", - Units: "netsplits/s", - Fam: "cluster", - Ctx: "vernemq.netsplits", - Type: module.Stacked, - Dims: Dims{ - {ID: metricNetSplitResolved, Name: "resolved", Algo: module.Incremental}, - {ID: metricNetSplitDetected, Name: "detected", Algo: module.Incremental}, + nodeClusterCommunicationTrafficChartTmpl = module.Chart{ + ID: "node_%s_cluster_traffic", + Title: "Communication with Other Cluster Nodes", + Units: "bytes/s", + Fam: "cluster", + Ctx: "vernemq.node_cluster_traffic", + Type: module.Area, + Priority: prioNodeClusterCommunicationTraffic, + Dims: module.Dims{ + {ID: dimNode(metricClusterBytesReceived), Name: "received", Algo: module.Incremental}, + {ID: dimNode(metricClusterBytesSent), Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + nodeClusterCommunicationDroppedChartTmpl = module.Chart{ + ID: "node_%s_cluster_dropped", + Title: "Traffic Dropped During Communication with Other Cluster Nodes", + Units: "bytes/s", + Fam: "cluster", + Type: module.Area, + Ctx: "vernemq.node_cluster_dropped", + Priority: prioNodeClusterCommunicationDropped, + Dims: module.Dims{ + {ID: dimNode(metricClusterBytesDropped), Name: "dropped", Algo: module.Incremental}, + }, + } + nodeNetSplitUnresolvedChartTmpl = module.Chart{ + ID: "node_%s_netsplit_unresolved", + Title: "Unresolved Netsplits", + Units: "netsplits", + Fam: "cluster", + Ctx: "vernemq.node_netsplit_unresolved", + Priority: prioNodeNetSplitUnresolved, + Dims: module.Dims{ + {ID: dimNode("netsplit_unresolved"), Name: "unresolved"}, + }, + } + nodeNetSplitsChartTmpl = module.Chart{ + ID: "node_%s_netsplit", + Title: "Netsplits", + Units: "netsplits/s", + Fam: "cluster", + Ctx: "vernemq.node_netsplits", + Type: module.Stacked, + Priority: prioNodeNetSplits, + Dims: module.Dims{ + {ID: dimNode(metricNetSplitResolved), Name: "resolved", Algo: module.Incremental}, + {ID: dimNode(metricNetSplitDetected), Name: "detected", Algo: module.Incremental}, }, } ) -// AUTH var ( - chartMQTTv5AUTH = Chart{ - ID: "mqtt_auth", - Title: "v5 AUTH", - Units: "packets/s", - Fam: "mqtt auth", - Ctx: "vernemq.mqtt_auth", - Dims: Dims{ - {ID: metricAUTHReceived, Name: "received", Algo: module.Incremental}, - {ID: metricAUTHSent, Name: "sent", Algo: module.Incremental, Mul: -1}, - }, - } - chartMQTTv5AUTHReceivedReason = Chart{ - ID: "mqtt_auth_received_reason", - Title: "v5 AUTH Received by Reason", - Units: "packets/s", - Fam: "mqtt auth", - Ctx: "vernemq.mqtt_auth_received_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricAUTHReceived, "success"), Name: "success", Algo: module.Incremental}, - }, - } - chartMQTTv5AUTHSentReason = Chart{ - ID: "mqtt_auth_sent_reason", - Title: "v5 AUTH Sent by Reason", - Units: "packets/s", - Fam: "mqtt auth", - Ctx: "vernemq.mqtt_auth_sent_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricAUTHSent, "success"), Name: "success", Algo: module.Incremental}, + nodeUptimeChartTmpl = module.Chart{ + ID: "node_%s_uptime", + Title: "Node Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "vernemq.node_uptime", + Priority: prioNodeUptime, + Dims: module.Dims{ + {ID: dimNode(metricSystemWallClock), Name: "time", Div: 1000}, + }, + } +) + +// PUBLISH +var ( + nodeMqttPUBLISHPacketsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_publish", + Title: "MQTT QoS 0,1,2 PUBLISH", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_publish", + Priority: prioMqttPublishPackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricPUBSLISHReceived), Name: "received", Algo: module.Incremental}, + {ID: dimMqttVer(metricPUBSLIHSent), Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + nodeMqttPUBLISHErrorsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_publish_errors", + Title: "MQTT Failed PUBLISH Operations due to a Netsplit", + Units: "errors/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_publish_errors", + Priority: prioMqttPublishErrors, + Dims: module.Dims{ + {ID: dimMqttVer(metricPUBLISHError), Name: "publish", Algo: module.Incremental}, + }, + } + nodeMqttPUBLISHAuthErrorsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_publish_auth_errors", + Title: "MQTT Unauthorized PUBLISH Attempts", + Units: "errors/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_publish_auth_errors", + Type: module.Area, + Priority: prioMqttPublishAuthPackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricPUBLISHAuthError), Name: "publish_auth", Algo: module.Incremental}, + }, + } + nodeMqttPUBACKPacketsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_puback", + Title: "MQTT QoS 1 PUBACK Packets", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_puback", + Priority: prioMqttPubAckPackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricPUBACKReceived), Name: "received", Algo: module.Incremental}, + {ID: dimMqttVer(metricPUBACKSent), Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + nodeMqttPUBACKReceivedByReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_puback_received_by_reason_code", + Title: "MQTT PUBACK QoS 1 Received by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_puback_received_by_reason_code", + Type: module.Stacked, + Priority: prioMqttPubAckReceivedReason, + } + for _, v := range mqtt5PUBACKReceivedReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricPUBACKReceived, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() + nodeMqttPUBACKSentByReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_puback_sent_by_reason_code", + Title: "MQTT PUBACK QoS 1 Sent by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_puback_sent_by_reason_code", + Type: module.Stacked, + Priority: prioMqttPubAckSentReason, + } + for _, v := range mqtt5PUBACKSentReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricPUBACKSent, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() + nodeMqttPUBACKUnexpectedMessagesChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_puback_unexpected", + Title: "MQTT PUBACK QoS 1 Received Unexpected Messages", + Units: "messages/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_puback_invalid_error", + Priority: prioMqttPubAckUnexpectedMessages, + Dims: module.Dims{ + {ID: dimMqttVer(metricPUBACKInvalid), Name: "unexpected", Algo: module.Incremental}, + }, + } + nodeMqttPUBRECPacketsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_pubrec", + Title: "MQTT PUBREC QoS 2 Packets", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_pubrec", + Priority: prioMqttPubRecPackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricPUBRECReceived), Name: "received", Algo: module.Incremental}, + {ID: dimMqttVer(metricPUBRECSent), Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + nodeMqttPUBRECReceivedByReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_pubrec_received_by_reason_code", + Title: "MQTT PUBREC QoS 2 Received by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_pubrec_received_by_reason_code", + Type: module.Stacked, + Priority: prioMqttPubRecReceivedReason, + } + for _, v := range mqtt5PUBRECReceivedReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricPUBRECReceived, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() + nodeMqttPUBRECSentByReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_pubrec_sent_by_reason_code", + Title: "MQTT PUBREC QoS 2 Sent by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_pubrec_sent_by_reason_code", + Type: module.Stacked, + Priority: prioMqttPubRecSentReason, + } + for _, v := range mqtt5PUBRECSentReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricPUBRECSent, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() + nodeMqttPUBRECUnexpectedMessagesChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_pubrec_unexpected", + Title: "MQTT PUBREC QoS 2 Received Unexpected Messages", + Units: "messages/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_pubrec_invalid_error", + Priority: prioMqttPubRecUnexpectedMessages, + Dims: module.Dims{ + {ID: dimMqttVer(metricPUBRECInvalid), Name: "unexpected", Algo: module.Incremental}, + }, + } + nodeMqttPUBRELPacketsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_pubrel", + Title: "MQTT PUBREL QoS 2 Packets¬", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_pubrel", + Priority: prioMqttPubRelPackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricPUBRELReceived), Name: "received", Algo: module.Incremental}, + {ID: dimMqttVer(metricPUBRELSent), Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + nodeMqttPUBRELReceivedByReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_pubrel_received_by_reason_code", + Title: "MQTT PUBREL QoS 2 Received by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_pubrel_received_by_reason_code", + Type: module.Stacked, + Priority: prioMqttPubRelReceivedReason, + } + for _, v := range mqtt5PUBRELReceivedReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricPUBRELReceived, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() + nodeMqttPUBRELSentByReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_pubrel_sent_by_reason_code", + Title: "MQTT PUBREL QoS 2 Sent by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_pubrel_sent_by_reason_code", + Type: module.Stacked, + Priority: prioMqttPubRelSentReason, + } + for _, v := range mqtt5PUBRELSentReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricPUBRELSent, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() + nodeMqttPUBCOMPPacketsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_pubcomp", + Title: "MQTT PUBCOMP QoS 2 Packets", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_pubcomp", + Priority: prioMqttPubCompPackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricPUBCOMPReceived), Name: "received", Algo: module.Incremental}, + {ID: dimMqttVer(metricPUBCOMPSent), Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + nodeMqttPUBCOMPReceivedByReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_pubcomp_received_by_reason_code", + Title: "MQTT PUBCOMP QoS 2 Received by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_pubcomp_received_by_reason_code", + Type: module.Stacked, + Priority: prioMqttPubCompReceivedReason, + } + for _, v := range mqtt5PUBCOMPReceivedReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricPUBCOMPReceived, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() + nodeMqttPUBCOMPSentByReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_pubcomp_sent_by_reason_code", + Title: "MQTT PUBCOMP QoS 2 Sent by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_pubcomp_sent_by_reason_code", + Type: module.Stacked, + Priority: prioMqttPubCompSentReason, + } + for _, v := range mqtt5PUBCOMPSentReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricPUBCOMPSent, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() + nodeMqttPUBCOMPUnexpectedMessagesChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_pubcomp_unexpected", + Title: "MQTT PUBCOMP QoS 2 Received Unexpected Messages", + Units: "messages/s", + Fam: "mqtt publish", + Ctx: "vernemq.node_mqtt_pubcomp_invalid_error", + Priority: prioMqttPubCompUnexpectedMessages, + Dims: module.Dims{ + {ID: dimMqttVer(metricPUNCOMPInvalid), Name: "unexpected", Algo: module.Incremental}, }, } ) // CONNECT var ( - chartMQTTv3v5CONNECT = Chart{ - ID: "mqtt_connect", - Title: "v3/v5 CONNECT and CONNACK", - Units: "packets/s", - Fam: "mqtt connect", - Ctx: "vernemq.mqtt_connect", - Dims: Dims{ - {ID: metricCONNECTReceived, Name: "CONNECT", Algo: module.Incremental}, - {ID: metricCONNACKSent, Name: "CONNACK", Algo: module.Incremental, Mul: -1}, - }, - } - chartMQTTv3v5CONNACKSentReason = Chart{ - ID: "mqtt_connack_sent_reason", - Title: "v3/v5 CONNACK Sent by Reason", - Units: "packets/s", - Fam: "mqtt connect", - Ctx: "vernemq.mqtt_connack_sent_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricCONNACKSent, "success"), Name: "success", Algo: module.Incremental}, + nodeMqttCONNECTPacketsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_connect", + Title: "MQTT CONNECT and CONNACK", + Units: "packets/s", + Fam: "mqtt connect", + Ctx: "vernemq.node_mqtt_connect", + Priority: prioMqttConnectPackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricCONNECTReceived), Name: "connect", Algo: module.Incremental}, + {ID: dimMqttVer(metricCONNACKSent), Name: "connack", Algo: module.Incremental, Mul: -1}, }, } + nodeMqttCONNACKSentByReturnCodeChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_connack_sent_by_return_code", + Title: "MQTT CONNACK Sent by Return Code", + Units: "packets/s", + Fam: "mqtt connect", + Ctx: "vernemq.node_mqtt_connack_sent_by_return_code", + Type: module.Stacked, + Priority: prioMqttConnectSentReason, + } + for _, v := range mqtt4CONNACKSentReturnCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttRCode(metricCONNACKSent, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() + nodeMqttCONNACKSentByReasonCodeChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_connack_sent_by_reason_code", + Title: "MQTT CONNACK Sent by Reason", + Units: "packets/s", + Fam: "mqtt connect", + Ctx: "vernemq.node_mqtt_connack_sent_by_reason_code", + Type: module.Stacked, + Priority: prioMqttConnectSentReason, + } + for _, v := range mqtt5CONNACKSentReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricCONNACKSent, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() ) // DISCONNECT var ( - chartMQTTv3v5DISCONNECT = Chart{ - ID: "mqtt_disconnect", - Title: "v3/v5 DISCONNECT", - Units: "packets/s", - Fam: "mqtt disconnect", - Ctx: "vernemq.mqtt_disconnect", - Dims: Dims{ - {ID: metricDISCONNECTReceived, Name: "received", Algo: module.Incremental}, - {ID: metricDISCONNECTSent, Name: "sent", Algo: module.Incremental, Mul: -1}, - }, - } - chartMQTTv5DISCONNECTReceivedReason = Chart{ - ID: "mqtt_disconnect_received_reason", - Title: "v5 DISCONNECT Received by Reason", - Units: "packets/s", - Fam: "mqtt disconnect", - Ctx: "vernemq.mqtt_disconnect_received_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricDISCONNECTReceived, "normal_disconnect"), Name: "normal_disconnect", Algo: module.Incremental}, - }, - } - chartMQTTv5DISCONNECTSentReason = Chart{ - ID: "mqtt_disconnect_sent_reason", - Title: "v5 DISCONNECT Sent by Reason", - Units: "packets/s", - Fam: "mqtt disconnect", - Ctx: "vernemq.mqtt_disconnect_sent_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricDISCONNECTSent, "normal_disconnect"), Name: "normal_disconnect", Algo: module.Incremental}, + nodeMqtt5DISCONNECTPacketsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_disconnect", + Title: "MQTT DISCONNECT Packets", + Units: "packets/s", + Fam: "mqtt disconnect", + Ctx: "vernemq.node_mqtt_disconnect", + Priority: prioMqttDisconnectPackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricDISCONNECTReceived), Name: "received", Algo: module.Incremental}, + {ID: dimMqttVer(metricDISCONNECTSent), Name: "sent", Algo: module.Incremental, Mul: -1}, }, } + nodeMqtt4DISCONNECTPacketsChartTmpl = func() module.Chart { + chart := nodeMqtt5DISCONNECTPacketsChartTmpl.Copy() + _ = chart.RemoveDim(dimMqttVer(metricDISCONNECTSent)) + return *chart + }() + nodeMqttDISCONNECTReceivedByReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_disconnect_received_by_reason_code", + Title: "MQTT DISCONNECT Received by Reason", + Units: "packets/s", + Fam: "mqtt disconnect", + Ctx: "vernemq.node_mqtt_disconnect_received_by_reason_code", + Type: module.Stacked, + Priority: prioMqttDisconnectReceivedReason, + } + for _, v := range mqtt5DISCONNECTReceivedReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricDISCONNECTReceived, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() + nodeMqttDISCONNECTSentByReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_disconnect_sent_by_reason_code", + Title: "MQTT DISCONNECT Sent by Reason", + Units: "packets/s", + Fam: "mqtt disconnect", + Ctx: "vernemq.node_mqtt_disconnect_sent_by_reason_code", + Type: module.Stacked, + Priority: prioMqttDisconnectSentReason, + } + for _, v := range mqtt5DISCONNECTSentReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricDISCONNECTSent, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() ) // SUBSCRIBE var ( - chartMQTTv3v5SUBSCRIBE = Chart{ - ID: "mqtt_subscribe", - Title: "v3/v5 SUBSCRIBE and SUBACK", - Units: "packets/s", - Fam: "mqtt subscribe", - Ctx: "vernemq.mqtt_subscribe", - Dims: Dims{ - {ID: metricSUBSCRIBEReceived, Name: "SUBSCRIBE", Algo: module.Incremental}, - {ID: metricSUBACKSent, Name: "SUBACK", Algo: module.Incremental, Mul: -1}, - }, - } - chartMQTTv3v5SUBSCRIBEError = Chart{ - ID: "mqtt_subscribe_error", - Title: "v3/v5 Failed SUBSCRIBE Operations due to a Netsplit", - Units: "ops/s", - Fam: "mqtt subscribe", - Ctx: "vernemq.mqtt_subscribe_error", - Dims: Dims{ - {ID: metricSUBSCRIBEError, Name: "failed", Algo: module.Incremental}, - }, - } - chartMQTTv3v5SUBSCRIBEAuthError = Chart{ - ID: "mqtt_subscribe_auth_error", - Title: "v3/v5 Unauthorized SUBSCRIBE Attempts", - Units: "attempts/s", - Fam: "mqtt subscribe", - Ctx: "vernemq.mqtt_subscribe_auth_error", - Dims: Dims{ - {ID: metricSUBSCRIBEAuthError, Name: "unauth", Algo: module.Incremental}, + nodeMqttSUBSCRIBEPacketsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_subscribe", + Title: "MQTT SUBSCRIBE and SUBACK Packets", + Units: "packets/s", + Fam: "mqtt subscribe", + Ctx: "vernemq.node_mqtt_subscribe", + Priority: prioMqttSubscribePackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricSUBSCRIBEReceived), Name: "subscribe", Algo: module.Incremental}, + {ID: dimMqttVer(metricSUBACKSent), Name: "suback", Algo: module.Incremental, Mul: -1}, + }, + } + modeMqttSUBSCRIBEErrorsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_subscribe_error", + Title: "MQTT Failed SUBSCRIBE Operations due to Netsplit", + Units: "errors/s", + Fam: "mqtt subscribe", + Ctx: "vernemq.node_mqtt_subscribe_error", + Priority: prioMqttSubscribeErrors, + Dims: module.Dims{ + {ID: dimMqttVer(metricSUBSCRIBEError), Name: "subscribe", Algo: module.Incremental}, + }, + } + nodeMqttSUBSCRIBEAuthErrorsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_subscribe_auth_error", + Title: "MQTT Unauthorized SUBSCRIBE Attempts", + Units: "errors/s", + Fam: "mqtt subscribe", + Ctx: "vernemq.node_mqtt_subscribe_auth_error", + Priority: prioMqttSubscribeAuthPackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricSUBSCRIBEAuthError), Name: "subscribe_auth", Algo: module.Incremental}, }, } ) // UNSUBSCRIBE var ( - chartMQTTv3v5UNSUBSCRIBE = Chart{ - ID: "mqtt_unsubscribe", - Title: "v3/v5 UNSUBSCRIBE and UNSUBACK", - Units: "packets/s", - Fam: "mqtt unsubscribe", - Ctx: "vernemq.mqtt_unsubscribe", - Dims: Dims{ - {ID: metricUNSUBSCRIBEReceived, Name: "UNSUBSCRIBE", Algo: module.Incremental}, - {ID: metricUNSUBACKSent, Name: "UNSUBACK", Algo: module.Incremental, Mul: -1}, - }, - } - chartMQTTv3v5UNSUBSCRIBEError = Chart{ - ID: "mqtt_unsubscribe_error", - Title: "v3/v5 Failed UNSUBSCRIBE Operations due to a Netsplit", - Units: "ops/s", - Fam: "mqtt unsubscribe", - Ctx: "vernemq.mqtt_unsubscribe_error", - Dims: Dims{ - {ID: metricUNSUBSCRIBEError, Name: "failed", Algo: module.Incremental}, + nodeMqttUNSUBSCRIBEPacketsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_unsubscribe", + Title: "MQTT UNSUBSCRIBE and UNSUBACK Packets", + Units: "packets/s", + Fam: "mqtt unsubscribe", + Ctx: "vernemq.node_mqtt_unsubscribe", + Priority: prioMqttUnsubscribePackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricUNSUBSCRIBEReceived), Name: "unsubscribe", Algo: module.Incremental}, + {ID: dimMqttVer(metricUNSUBACKSent), Name: "unsuback", Algo: module.Incremental, Mul: -1}, + }, + } + nodeMqttUNSUBSCRIBEErrorsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_unsubscribe_error", + Title: "MQTT Failed UNSUBSCRIBE Operations due to Netsplit", + Units: "errors/s", + Fam: "mqtt unsubscribe", + Ctx: "vernemq.node_mqtt_unsubscribe_error", + Priority: prioMqttUnsubscribeErrors, + Dims: module.Dims{ + {ID: dimMqttVer(metricUNSUBSCRIBEError), Name: "unsubscribe", Algo: module.Incremental}, }, } ) -// PUBLISH +// AUTH var ( - chartMQTTv3v5PUBLISH = Chart{ - ID: "mqtt_publish", - Title: "v3/v5 QoS 0,1,2 PUBLISH", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_publish", - Dims: Dims{ - {ID: metricPUBSLISHReceived, Name: "received", Algo: module.Incremental}, - {ID: metricPUBSLIHSent, Name: "sent", Algo: module.Incremental, Mul: -1}, - }, - } - chartMQTTv3v5PUBLISHErrors = Chart{ - ID: "mqtt_publish_errors", - Title: "v3/v5 Failed PUBLISH Operations due to a Netsplit", - Units: "ops/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_publish_errors", - Dims: Dims{ - {ID: metricPUBLISHError, Name: "failed", Algo: module.Incremental}, - }, - } - chartMQTTv3v5PUBLISHAuthErrors = Chart{ - ID: "mqtt_publish_auth_errors", - Title: "v3/v5 Unauthorized PUBLISH Attempts", - Units: "attempts/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_publish_auth_errors", - Type: module.Area, - Dims: Dims{ - {ID: metricPUBLISHAuthError, Name: "unauth", Algo: module.Incremental}, - }, - } - chartMQTTv3v5PUBACK = Chart{ - ID: "mqtt_puback", - Title: "v3/v5 QoS 1 PUBACK", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_puback", - Dims: Dims{ - {ID: metricPUBACKReceived, Name: "received", Algo: module.Incremental}, - {ID: metricPUBACKSent, Name: "sent", Algo: module.Incremental, Mul: -1}, - }, - } - chartMQTTv5PUBACKReceivedReason = Chart{ - ID: "mqtt_puback_received_reason", - Title: "v5 PUBACK QoS 1 Received by Reason", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_puback_received_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricPUBACKReceived, "success"), Name: "success", Algo: module.Incremental}, - }, - } - chartMQTTv5PUBACKSentReason = Chart{ - ID: "mqtt_puback_sent_reason", - Title: "v5 PUBACK QoS 1 Sent by Reason", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_puback_sent_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricPUBACKSent, "success"), Name: "success", Algo: module.Incremental}, - }, - } - chartMQTTv3v5PUBACKUnexpected = Chart{ - ID: "mqtt_puback_unexpected", - Title: "v3/v5 PUBACK QoS 1 Received Unexpected Messages", - Units: "messages/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_puback_invalid_error", - Dims: Dims{ - {ID: metricPUBACKInvalid, Name: "unexpected", Algo: module.Incremental}, - }, - } - chartMQTTv3v5PUBREC = Chart{ - ID: "mqtt_pubrec", - Title: "v3/v5 PUBREC QoS 2", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_pubrec", - Dims: Dims{ - {ID: metricPUBRECReceived, Name: "received", Algo: module.Incremental}, - {ID: metricPUBRECSent, Name: "sent", Algo: module.Incremental, Mul: -1}, - }, - } - chartMQTTv5PUBRECReceivedReason = Chart{ - ID: "mqtt_pubrec_received_reason", - Title: "v5 PUBREC QoS 2 Received by Reason", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_pubrec_received_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricPUBRECReceived, "success"), Name: "success", Algo: module.Incremental}, - }, - } - chartMQTTv5PUBRECSentReason = Chart{ - ID: "mqtt_pubrec_sent_reason", - Title: "v5 PUBREC QoS 2 Sent by Reason", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_pubrec_sent_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricPUBRECSent, "success"), Name: "success", Algo: module.Incremental}, - }, - } - chartMQTTv3PUBRECUnexpected = Chart{ - ID: "mqtt_pubrec_unexpected", - Title: "v3 PUBREC QoS 2 Received Unexpected Messages", - Units: "messages/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_pubrec_invalid_error", - Dims: Dims{ - {ID: metricPUBRECInvalid, Name: "unexpected", Algo: module.Incremental}, - }, - } - chartMQTTv3v5PUBREL = Chart{ - ID: "mqtt_pubrel", - Title: "v3/v5 PUBREL QoS 2", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_pubrel", - Dims: Dims{ - {ID: metricPUBRELReceived, Name: "received", Algo: module.Incremental}, - {ID: metricPUBRELSent, Name: "sent", Algo: module.Incremental, Mul: -1}, - }, - } - chartMQTTv5PUBRELReceivedReason = Chart{ - ID: "mqtt_pubrel_received_reason", - Title: "v5 PUBREL QoS 2 Received by Reason", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_pubrel_received_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricPUBRELReceived, "success"), Name: "success", Algo: module.Incremental}, - }, - } - chartMQTTv5PUBRELSentReason = Chart{ - ID: "mqtt_pubrel_sent_reason", - Title: "v5 PUBREL QoS 2 Sent by Reason", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_pubrel_sent_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricPUBRELSent, "success"), Name: "success", Algo: module.Incremental}, - }, - } - chartMQTTv3v5PUBCOMP = Chart{ - ID: "mqtt_pubcomp", - Title: "v3/v5 PUBCOMP QoS 2", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_pubcom", - Dims: Dims{ - {ID: metricPUBCOMPReceived, Name: "received", Algo: module.Incremental}, - {ID: metricPUBCOMPSent, Name: "sent", Algo: module.Incremental, Mul: -1}, - }, - } - chartMQTTv5PUBCOMPReceivedReason = Chart{ - ID: "mqtt_pubcomp_received_reason", - Title: "v5 PUBCOMP QoS 2 Received by Reason", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_pubcomp_received_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricPUBCOMPReceived, "success"), Name: "success", Algo: module.Incremental}, - }, - } - chartMQTTv5PUBCOMPSentReason = Chart{ - ID: "mqtt_pubcomp_sent_reason", - Title: "v5 PUBCOMP QoS 2 Sent by Reason", - Units: "packets/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_pubcomp_sent_reason", - Type: module.Stacked, - Dims: Dims{ - {ID: join(metricPUBCOMPSent, "success"), Name: "success", Algo: module.Incremental}, - }, - } - chartMQTTv3v5PUBCOMPUnexpected = Chart{ - ID: "mqtt_pubcomp_unexpected", - Title: "v3/v5 PUBCOMP QoS 2 Received Unexpected Messages", - Units: "messages/s", - Fam: "mqtt publish", - Ctx: "vernemq.mqtt_pubcomp_invalid_error", - Dims: Dims{ - {ID: metricPUNCOMPInvalid, Name: "unexpected", Algo: module.Incremental}, + nodeMqttAUTHPacketsChartTmpl = module.Chart{ + ID: "node_%s_mqtt%s_auth", + Title: "MQTT AUTH Packets", + Units: "packets/s", + Fam: "mqtt auth", + Ctx: "vernemq.node_mqtt_auth", + Priority: prioMqttAuthPackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricAUTHReceived), Name: "received", Algo: module.Incremental}, + {ID: dimMqttVer(metricAUTHSent), Name: "sent", Algo: module.Incremental, Mul: -1}, }, } + nodeMqttAUTHReceivedByReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_auth_received_by_reason_code", + Title: "MQTT AUTH Received by Reason", + Units: "packets/s", + Fam: "mqtt auth", + Ctx: "vernemq.node_mqtt_auth_received_by_reason_code", + Type: module.Stacked, + Priority: prioMqttAuthReceivedReason, + } + for _, v := range mqtt5AUTHReceivedReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricAUTHReceived, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() + nodeMqttAUTHSentByReasonChartTmpl = func() module.Chart { + chart := module.Chart{ + ID: "node_%s_mqtt%s_auth_sent_by_reason_code", + Title: "MQTT AUTH Sent by Reason", + Units: "packets/s", + Fam: "mqtt auth", + Ctx: "vernemq.node_mqtt_auth_sent_by_reason_code", + Type: module.Stacked, + Priority: prioMqttAuthSentReason, + } + for _, v := range mqtt5AUTHSentReasonCodes { + chart.Dims = append(chart.Dims, &module.Dim{ + ID: dimMqttReason(metricAUTHSent, v), Name: v, Algo: module.Incremental, + }) + } + return chart + }() ) // PING var ( - chartMQTTv3v5PING = Chart{ - ID: "mqtt_ping", - Title: "v3/v5 PING", - Units: "packets/s", - Fam: "mqtt ping", - Ctx: "vernemq.mqtt_ping", - Dims: Dims{ - {ID: metricPINGREQReceived, Name: "PINGREQ", Algo: module.Incremental}, - {ID: metricPINGRESPSent, Name: "PINGRESP", Algo: module.Incremental, Mul: -1}, + nodeMqttPINGPacketsChartTmpl = module.Chart{ + ID: "node_%s_mqtt_ver_%s_ping", + Title: "MQTT PING Packets", + Units: "packets/s", + Fam: "mqtt ping", + Ctx: "vernemq.node_mqtt_ping", + Priority: prioMqttPingPackets, + Dims: module.Dims{ + {ID: dimMqttVer(metricPINGREQReceived), Name: "pingreq", Algo: module.Incremental}, + {ID: dimMqttVer(metricPINGRESPSent), Name: "pingresp", Algo: module.Incremental, Mul: -1}, }, } ) -var ( - chartUptime = Chart{ - ID: "node_uptime", - Title: "Node Uptime", - Units: "seconds", - Fam: "uptime", - Ctx: "vernemq.node_uptime", - Dims: Dims{ - {ID: metricSystemWallClock, Name: "time", Div: 1000}, - }, +func (v *VerneMQ) addNodeCharts(node string, nst *nodeStats) { + if err := v.Charts().Add(*newNodeCharts(node)...); err != nil { + v.Warningf("error on adding node '%s' charts: %v", node, err) } -) + if len(nst.mqtt4) > 0 { + if err := v.Charts().Add(*newNodeMqttCharts(node, "4")...); err != nil { + v.Warningf("error on adding node '%s' mqtt v4 charts: %v", node, err) + } + } + if len(nst.mqtt5) > 0 { + if err := v.Charts().Add(*newNodeMqttCharts(node, "5")...); err != nil { + v.Warningf("error on adding node '%s' mqtt 5 charts: %v", node, err) + } + } +} + +func (v *VerneMQ) removeNodeCharts(node string) { + px := cleanChartID(fmt.Sprintf("node_%s_", node)) -func (v *VerneMQ) notifyNewScheduler(name string) { - if v.cache[name] { - return + for _, chart := range *v.Charts() { + if strings.HasPrefix(chart.ID, px) { + chart.MarkRemove() + chart.MarkNotCreated() + } } - v.cache[name] = true +} + +func newNodeCharts(node string) *module.Charts { + charts := nodeChartsTmpl.Copy() - id := chartSchedulerUtilization.ID - num := name[len("system_utilization_scheduler_"):] + for _, chart := range *charts { + chart.ID = cleanChartID(fmt.Sprintf(chart.ID, node)) + chart.Labels = []module.Label{ + {Key: "node", Value: node}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, node) + } + } - v.addAbsDimToChart(id, name, num) + return charts } -func (v *VerneMQ) notifyNewReason(name, reason string) { - if reason == "success" || reason == "normal_disconnect" { - return - } - key := join(name, reason) - if v.cache[key] { - return - } - v.cache[key] = true - - var chart Chart - switch name { - case metricAUTHReceived: - chart = chartMQTTv5AUTHReceivedReason - case metricAUTHSent: - chart = chartMQTTv5AUTHSentReason - case metricCONNACKSent: - chart = chartMQTTv3v5CONNACKSentReason - case metricDISCONNECTReceived: - chart = chartMQTTv5DISCONNECTReceivedReason - case metricDISCONNECTSent: - chart = chartMQTTv5DISCONNECTSentReason - case metricPUBACKReceived: - chart = chartMQTTv5PUBACKReceivedReason - case metricPUBACKSent: - chart = chartMQTTv5PUBACKSentReason - case metricPUBRECReceived: - chart = chartMQTTv5PUBRECReceivedReason - case metricPUBRECSent: - chart = chartMQTTv5PUBRECSentReason - case metricPUBRELReceived: - chart = chartMQTTv5PUBRELReceivedReason - case metricPUBRELSent: - chart = chartMQTTv5PUBRELSentReason - case metricPUBCOMPReceived: - chart = chartMQTTv5PUBCOMPReceivedReason - case metricPUBCOMPSent: - chart = chartMQTTv5PUBCOMPSentReason +func newNodeMqttCharts(node, mqttVer string) *module.Charts { + var charts *module.Charts + + switch mqttVer { + case "4": + charts = nodeMqtt4ChartsTmpl.Copy() + case "5": + charts = nodeMqtt5ChartsTmpl.Copy() default: - v.Warningf("unknown metric name, wont be added to the charts: '%s'", name) - return + return nil } - v.addIncDimToChart(chart.ID, key, reason) + for _, chart := range *charts { + chart.ID = cleanChartID(fmt.Sprintf(chart.ID, node, mqttVer)) + chart.Labels = []module.Label{ + {Key: "node", Value: node}, + {Key: "mqtt_version", Value: mqttVer}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, node, mqttVer) + } + } + + return charts } -func (v *VerneMQ) addAbsDimToChart(chartID, dimID, dimName string) { - v.addDimToChart(chartID, dimID, dimName, false) +func dimNode(name string) string { + return join("node_%s", name) } -func (v *VerneMQ) addIncDimToChart(chartID, dimID, dimName string) { - v.addDimToChart(chartID, dimID, dimName, true) +func dimMqttVer(name string) string { + return join("node_%s_mqtt%s", name) } -func (v *VerneMQ) addDimToChart(chartID, dimID, dimName string, inc bool) { - chart := v.Charts().Get(chartID) - if chart == nil { - v.Warningf("add '%s' dim: couldn't find '%s' chart", dimID, chartID) - return - } +func dimMqttReason(name, reason string) string { + return join("node_%s_mqtt%s", name, "reason_code", reason) +} - dim := &Dim{ID: dimID, Name: dimName} - if inc { - dim.Algo = module.Incremental - } +func dimMqttRCode(name, rcode string) string { + return join("node_%s_mqtt%s", name, "return_code", rcode) +} - if err := chart.AddDim(dim); err != nil { - v.Warningf("add '%s' dim: %v", dimID, err) - return - } - chart.MarkNotCreated() +func cleanChartID(id string) string { + r := strings.NewReplacer(".", "_", "'", "_", " ", "_") + return r.Replace(id) } diff --git a/src/go/plugin/go.d/modules/vernemq/collect.go b/src/go/plugin/go.d/modules/vernemq/collect.go index c6fb3ecb9..239b30310 100644 --- a/src/go/plugin/go.d/modules/vernemq/collect.go +++ b/src/go/plugin/go.d/modules/vernemq/collect.go @@ -6,283 +6,155 @@ import ( "errors" "strings" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" ) -func isValidVerneMQMetrics(pms prometheus.Series) bool { - return pms.FindByName(metricPUBLISHError).Len() > 0 && pms.FindByName(metricRouterSubscriptions).Len() > 0 -} - func (v *VerneMQ) collect() (map[string]int64, error) { - pms, err := v.prom.ScrapeSeries() + mfs, err := v.prom.Scrape() if err != nil { return nil, err } - if !isValidVerneMQMetrics(pms) { - return nil, errors.New("returned metrics aren't VerneMQ metrics") + if !v.namespace.found { + name, err := v.getMetricNamespace(mfs) + if err != nil { + return nil, err + } + v.namespace.found = true + v.namespace.name = name } - mx := v.collectVerneMQ(pms) - - return stm.ToMap(mx), nil -} - -func (v *VerneMQ) collectVerneMQ(pms prometheus.Series) map[string]float64 { - mx := make(map[string]float64) - collectSockets(mx, pms) - collectQueues(mx, pms) - collectSubscriptions(mx, pms) - v.collectErlangVM(mx, pms) - collectBandwidth(mx, pms) - collectRetain(mx, pms) - collectCluster(mx, pms) - collectUptime(mx, pms) - - v.collectAUTH(mx, pms) - v.collectCONNECT(mx, pms) - v.collectDISCONNECT(mx, pms) - v.collectSUBSCRIBE(mx, pms) - v.collectUNSUBSCRIBE(mx, pms) - v.collectPUBLISH(mx, pms) - v.collectPING(mx, pms) - v.collectMQTTInvalidMsgSize(mx, pms) - return mx -} - -func (v *VerneMQ) collectCONNECT(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricCONNECTReceived, - metricCONNACKSent, - ) - v.collectMQTT(mx, pms) -} - -func (v *VerneMQ) collectDISCONNECT(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricDISCONNECTReceived, - metricDISCONNECTSent, - ) - v.collectMQTT(mx, pms) -} - -func (v *VerneMQ) collectPUBLISH(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricPUBACKReceived, - metricPUBACKSent, - metricPUBACKInvalid, - - metricPUBCOMPReceived, - metricPUBCOMPSent, - metricPUNCOMPInvalid, - - metricPUBSLISHReceived, - metricPUBSLIHSent, - metricPUBLISHError, - metricPUBLISHAuthError, - - metricPUBRECReceived, - metricPUBRECSent, - metricPUBRECInvalid, + mx := make(map[string]int64) - metricPUBRELReceived, - metricPUBRELSent, - ) - v.collectMQTT(mx, pms) -} - -func (v *VerneMQ) collectSUBSCRIBE(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricSUBSCRIBEReceived, - metricSUBACKSent, - metricSUBSCRIBEError, - metricSUBSCRIBEAuthError, - ) - v.collectMQTT(mx, pms) -} - -func (v *VerneMQ) collectUNSUBSCRIBE(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricUNSUBSCRIBEReceived, - metricUNSUBACKSent, - metricUNSUBSCRIBEError, - ) - v.collectMQTT(mx, pms) -} - -func (v *VerneMQ) collectPING(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricPINGREQReceived, - metricPINGRESPSent, - ) - v.collectMQTT(mx, pms) -} - -func (v *VerneMQ) collectAUTH(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricAUTHReceived, - metricAUTHSent, - ) - v.collectMQTT(mx, pms) -} + v.collectMetrics(mx, mfs) -func (v *VerneMQ) collectMQTTInvalidMsgSize(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByName(metricMQTTInvalidMsgSizeError) - v.collectMQTT(mx, pms) + return mx, nil } -func collectSockets(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricSocketClose, - metricSocketCloseTimeout, - metricSocketError, - metricSocketOpen, - metricClientKeepaliveExpired, - ) - collectNonMQTT(mx, pms) - mx["open_sockets"] = mx[metricSocketOpen] - mx[metricSocketClose] -} +func (v *VerneMQ) collectMetrics(mx map[string]int64, mfs prometheus.MetricFamilies) { + nodes := v.getNodesStats(mfs) -func collectQueues(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricQueueInitializedFromStorage, - metricQueueMessageDrop, - metricQueueMessageExpired, - metricQueueMessageIn, - metricQueueMessageOut, - metricQueueMessageUnhandled, - metricQueueProcesses, - metricQueueSetup, - metricQueueTeardown, - ) - collectNonMQTT(mx, pms) -} + for node, st := range nodes { + if !v.seenNodes[node] { + v.seenNodes[node] = true + v.addNodeCharts(node, st) + } -func collectSubscriptions(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricRouterMatchesLocal, - metricRouterMatchesRemote, - metricRouterMemory, - metricRouterSubscriptions, - ) - collectNonMQTT(mx, pms) -} + st.stats["open_sockets"] = st.stats[metricSocketOpen] - st.stats[metricSocketClose] + st.stats["netsplit_unresolved"] = st.stats[metricNetSplitDetected] - st.stats[metricNetSplitResolved] + // https://github.com/vernemq/vernemq/blob/a55ada8dfb6051362fcc468d888194bdcd6eb346/apps/vmq_server/priv/static/js/status.js#L167 + queued := st.stats[metricQueueMessageIn] - (st.stats[metricQueueMessageOut] + st.stats[metricQueueMessageDrop] + st.stats[metricQueueMessageUnhandled]) + st.stats["queued_messages"] = max(0, queued) -func (v *VerneMQ) collectErlangVM(mx map[string]float64, pms prometheus.Series) { - v.collectSchedulersUtilization(mx, pms) - pms = pms.FindByNames( - metricSystemContextSwitches, - metricSystemGCCount, - metricSystemIOIn, - metricSystemIOOut, - metricSystemProcessCount, - metricSystemReductions, - metricSystemRunQueue, - metricSystemUtilization, - metricSystemWordsReclaimedByGC, - metricVMMemoryProcesses, - metricVMMemorySystem, - ) - collectNonMQTT(mx, pms) -} + px := join("node", node) -func (v *VerneMQ) collectSchedulersUtilization(mx map[string]float64, pms prometheus.Series) { - for _, pm := range pms { - if isSchedulerUtilizationMetric(pm) { - mx[pm.Name()] += pm.Value - v.notifyNewScheduler(pm.Name()) + for k, val := range st.stats { + mx[join(px, k)] = val + } + for k, val := range st.mqtt4 { + mx[join(px, "mqtt4", k)] = val + } + for k, val := range st.mqtt5 { + mx[join(px, "mqtt5", k)] = val } } -} - -func collectBandwidth(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricBytesReceived, - metricBytesSent, - ) - collectNonMQTT(mx, pms) -} -func collectRetain(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricRetainMemory, - metricRetainMessages, - ) - collectNonMQTT(mx, pms) -} - -func collectCluster(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByNames( - metricClusterBytesDropped, - metricClusterBytesReceived, - metricClusterBytesSent, - metricNetSplitDetected, - metricNetSplitResolved, - ) - collectNonMQTT(mx, pms) - mx["netsplit_unresolved"] = mx[metricNetSplitDetected] - mx[metricNetSplitResolved] -} + for node := range v.seenNodes { + if _, ok := nodes[node]; !ok { + delete(v.seenNodes, node) + v.removeNodeCharts(node) + } + } -func collectUptime(mx map[string]float64, pms prometheus.Series) { - pms = pms.FindByName(metricSystemWallClock) - collectNonMQTT(mx, pms) } -func collectNonMQTT(mx map[string]float64, pms prometheus.Series) { - for _, pm := range pms { - mx[pm.Name()] += pm.Value - } -} +func (v *VerneMQ) getNodesStats(mfs prometheus.MetricFamilies) map[string]*nodeStats { + nodes := make(map[string]*nodeStats) -func (v *VerneMQ) collectMQTT(mx map[string]float64, pms prometheus.Series) { - for _, pm := range pms { - if !isMQTTMetric(pm) { + for _, mf := range mfs { + name, _ := strings.CutPrefix(mf.Name(), v.namespace.name+"_") + if isSchedulerUtilizationMetric(name) { continue } - version := versionLabelValue(pm) - if version == "" { - continue - } - - mx[pm.Name()] += pm.Value - mx[join(pm.Name(), "v", version)] += pm.Value - if reason := reasonCodeLabelValue(pm); reason != "" { - mx[join(pm.Name(), reason)] += pm.Value - mx[join(pm.Name(), "v", version, reason)] += pm.Value - - v.notifyNewReason(pm.Name(), reason) + for _, m := range mf.Metrics() { + var value float64 + + switch mf.Type() { + case model.MetricTypeGauge: + value = m.Gauge().Value() + case model.MetricTypeCounter: + value = m.Counter().Value() + default: + continue + } + + node := m.Labels().Get("node") + if node == "" { + continue + } + + if _, ok := nodes[node]; !ok { + nodes[node] = newNodeStats() + } + + nst := nodes[node] + + if len(m.Labels()) == 1 { + nst.stats[name] += int64(value) + continue + } + + if !strings.HasPrefix(name, "mqtt_") && name != metricClientKeepaliveExpired { + continue + } + + switch m.Labels().Get("mqtt_version") { + case "4": + nst.mqtt4[name] += int64(value) + m.Labels().Range(func(l labels.Label) { + if l.Name == "return_code" { + nst.mqtt4[join(name, l.Name, l.Value)] += int64(value) + } + }) + case "5": + nst.mqtt5[name] += int64(value) + m.Labels().Range(func(l labels.Label) { + if l.Name == "reason_code" { + nst.mqtt5[join(name, l.Name, l.Value)] += int64(value) + } + }) + } } } -} - -func isMQTTMetric(pm prometheus.SeriesSample) bool { - return strings.HasPrefix(pm.Name(), "mqtt_") -} -func isSchedulerUtilizationMetric(pm prometheus.SeriesSample) bool { - return strings.HasPrefix(pm.Name(), "system_utilization_scheduler_") + return nodes } -func reasonCodeLabelValue(pm prometheus.SeriesSample) string { - if v := pm.Labels.Get("reason_code"); v != "" { - return v +func (v *VerneMQ) getMetricNamespace(mfs prometheus.MetricFamilies) (string, error) { + want := metricPUBLISHError + for _, mf := range mfs { + if strings.HasSuffix(mf.Name(), want) { + s := strings.TrimSuffix(mf.Name(), want) + s = strings.TrimSuffix(s, "_") + return s, nil + } } - // "mqtt_connack_sent" v4 has return_code - return pm.Labels.Get("return_code") + + return "", errors.New("unexpected response: not VerneMQ metrics") } -func versionLabelValue(pm prometheus.SeriesSample) string { - return pm.Labels.Get("mqtt_version") +func isSchedulerUtilizationMetric(name string) bool { + return strings.HasPrefix(name, "system_utilization_scheduler_") } func join(a, b string, rest ...string) string { - v := a + "_" + b - switch len(rest) { - case 0: - return v - default: - return join(v, rest[0], rest[1:]...) + s := a + "_" + b + for _, v := range rest { + s += "_" + v } + return s } diff --git a/src/go/plugin/go.d/modules/vernemq/config_schema.json b/src/go/plugin/go.d/modules/vernemq/config_schema.json index 092d7f417..9f3454a37 100644 --- a/src/go/plugin/go.d/modules/vernemq/config_schema.json +++ b/src/go/plugin/go.d/modules/vernemq/config_schema.json @@ -105,7 +105,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/vernemq/init.go b/src/go/plugin/go.d/modules/vernemq/init.go index 64ed3418c..072372b39 100644 --- a/src/go/plugin/go.d/modules/vernemq/init.go +++ b/src/go/plugin/go.d/modules/vernemq/init.go @@ -11,16 +11,16 @@ import ( func (v *VerneMQ) validateConfig() error { if v.URL == "" { - return errors.New("url is not set") + return errors.New("url is required but not set") } return nil } func (v *VerneMQ) initPrometheusClient() (prometheus.Prometheus, error) { - client, err := web.NewHTTPClient(v.Client) + client, err := web.NewHTTPClient(v.ClientConfig) if err != nil { return nil, err } - return prometheus.New(client, v.Request), nil + return prometheus.New(client, v.RequestConfig), nil } diff --git a/src/go/plugin/go.d/modules/vernemq/integrations/vernemq.md b/src/go/plugin/go.d/modules/vernemq/integrations/vernemq.md index f3b4c2877..13db0114d 100644 --- a/src/go/plugin/go.d/modules/vernemq/integrations/vernemq.md +++ b/src/go/plugin/go.d/modules/vernemq/integrations/vernemq.md @@ -54,80 +54,101 @@ The scope defines the instance that the metric belongs to. An instance is unique -### Per VerneMQ instance +### Per node -These metrics refer to the entire monitored application. +These metrics refer to the VerneMQ node. -This scope has no labels. +Labels: + +| Label | Description | +|:-----------|:----------------| +| node | The value of this label is identical to the value of the "node" label exposed by VerneMQ. | Metrics: | Metric | Dimensions | Unit | |:------|:----------|:----| -| vernemq.sockets | open | sockets | -| vernemq.socket_operations | open, close | sockets/s | -| vernemq.client_keepalive_expired | closed | sockets/s | -| vernemq.socket_close_timeout | closed | sockets/s | -| vernemq.socket_errors | errors | errors/s | -| vernemq.queue_processes | queue_processes | queue processes | -| vernemq.queue_processes_operations | setup, teardown | events/s | -| vernemq.queue_process_init_from_storage | queue_processes | queue processes/s | -| vernemq.queue_messages | received, sent | messages/s | -| vernemq.queue_undelivered_messages | dropped, expired, unhandled | messages/s | -| vernemq.router_subscriptions | subscriptions | subscriptions | -| vernemq.router_matched_subscriptions | local, remote | subscriptions/s | -| vernemq.router_memory | used | KiB | -| vernemq.average_scheduler_utilization | utilization | percentage | -| vernemq.system_utilization_scheduler | a dimension per scheduler | percentage | -| vernemq.system_processes | processes | processes | -| vernemq.system_reductions | reductions | ops/s | -| vernemq.system_context_switches | context_switches | ops/s | -| vernemq.system_io | received, sent | kilobits/s | -| vernemq.system_run_queue | ready | processes | -| vernemq.system_gc_count | gc | ops/s | -| vernemq.system_gc_words_reclaimed | words_reclaimed | ops/s | -| vernemq.system_allocated_memory | processes, system | KiB | -| vernemq.bandwidth | received, sent | kilobits/s | -| vernemq.retain_messages | messages | messages | -| vernemq.retain_memory | used | KiB | -| vernemq.cluster_bandwidth | received, sent | kilobits/s | -| vernemq.cluster_dropped | dropped | kilobits/s | -| vernemq.netsplit_unresolved | unresolved | netsplits | -| vernemq.netsplits | resolved, detected | netsplits/s | -| vernemq.mqtt_auth | received, sent | packets/s | -| vernemq.mqtt_auth_received_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_auth_sent_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_connect | connect, connack | packets/s | -| vernemq.mqtt_connack_sent_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_disconnect | received, sent | packets/s | -| vernemq.mqtt_disconnect_received_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_disconnect_sent_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_subscribe | subscribe, suback | packets/s | -| vernemq.mqtt_subscribe_error | failed | ops/s | -| vernemq.mqtt_subscribe_auth_error | unauth | attempts/s | -| vernemq.mqtt_unsubscribe | unsubscribe, unsuback | packets/s | -| vernemq.mqtt_unsubscribe_error | mqtt_unsubscribe_error | ops/s | -| vernemq.mqtt_publish | received, sent | packets/s | -| vernemq.mqtt_publish_errors | failed | ops/s | -| vernemq.mqtt_publish_auth_errors | unauth | attempts/s | -| vernemq.mqtt_puback | received, sent | packets/s | -| vernemq.mqtt_puback_received_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_puback_sent_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_puback_invalid_error | unexpected | messages/s | -| vernemq.mqtt_pubrec | received, sent | packets/s | -| vernemq.mqtt_pubrec_received_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_pubrec_sent_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_pubrec_invalid_error | unexpected | messages/s | -| vernemq.mqtt_pubrel | received, sent | packets/s | -| vernemq.mqtt_pubrel_received_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_pubrel_sent_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_pubcom | received, sent | packets/s | -| vernemq.mqtt_pubcomp_received_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_pubcomp_sent_reason | a dimensions per reason | packets/s | -| vernemq.mqtt_pubcomp_invalid_error | unexpected | messages/s | -| vernemq.mqtt_ping | pingreq, pingresp | packets/s | +| vernemq.node_socket | open | sockets | +| vernemq.node_socket_operations | open, close | sockets/s | +| vernemq.node_client_keepalive_expired | closed | sockets/s | +| vernemq.node_socket_close_timeout | closed | sockets/s | +| vernemq.node_socket_errors | errors | errors/s | +| vernemq.node_queue_processes | queue_processes | queue processes | +| vernemq.node_queue_processes_operations | setup, teardown | events/s | +| vernemq.node_queue_process_init_from_storage | queue_processes | queue processes/s | +| vernemq.node_queue_messages | received, sent | messages/s | +| vernemq.node_queued_messages | queued | messages | +| vernemq.node_queue_undelivered_messages | dropped, expired, unhandled | messages/s | +| vernemq.node_router_subscriptions | subscriptions | subscriptions | +| vernemq.node_router_matched_subscriptions | local, remote | subscriptions/s | +| vernemq.node_router_memory | used | bytes | +| vernemq.node_average_scheduler_utilization | utilization | percentage | +| vernemq.node_system_processes | processes | processes | +| vernemq.node_system_reductions | reductions | ops/s | +| vernemq.node_system_context_switches | context_switches | ops/s | +| vernemq.node_system_io | received, sent | bytes/s | +| vernemq.node_system_run_queue | ready | processes | +| vernemq.node_system_gc_count | gc | ops/s | +| vernemq.node_system_gc_words_reclaimed | words_reclaimed | ops/s | +| vernemq.node_system_allocated_memory | processes, system | bytes | +| vernemq.node_traffic | received, sent | bytes/s | +| vernemq.node_retain_messages | messages | messages | +| vernemq.node_retain_memory | used | bytes | +| vernemq.node_cluster_traffic | received, sent | bytes/s | +| vernemq.node_cluster_dropped | dropped | bytes/s | +| vernemq.node_netsplit_unresolved | unresolved | netsplits | +| vernemq.node_netsplits | resolved, detected | netsplits/s | | vernemq.node_uptime | time | seconds | +### Per mqtt + +These metrics are specific to the used MQTT protocol version. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| node | The value of this label is identical to the value of the "node" label exposed by VerneMQ. | +| mqtt_version | MQTT version. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| vernemq.node_mqtt_auth | received, sent | packets/s | +| vernemq.node_mqtt_auth_received_by_reason_code | success, continue_authentication, reauthenticate | packets/s | +| vernemq.node_mqtt_auth_sent_by_reason_code | success, continue_authentication, reauthenticate | packets/s | +| vernemq.node_mqtt_connect | connect, connack | packets/s | +| vernemq.node_mqtt_connack_sent_by_return_code | success, unsupported_protocol_version, client_identifier_not_valid, server_unavailable, bad_username_or_password, not_authorized | packets/s | +| vernemq.node_mqtt_connack_sent_by_reason_code | success, unspecified_error, malformed_packet, protocol_error, impl_specific_error, unsupported_protocol_version, client_identifier_not_valid, bad_username_or_password, not_authorized, server_unavailable, server_busy, banned, bad_authentication_method, topic_name_invalid, packet_too_large, quota_exceeded, payload_format_invalid, retain_not_supported, qos_not_supported, use_another_server, server_moved, connection_rate_exceeded | packets/s | +| vernemq.node_mqtt_disconnect | received, sent | packets/s | +| vernemq.node_mqtt_disconnect_received_by_reason_code | normal_disconnect, disconnect_with_will_msg, unspecified_error, malformed_packet, protocol_error, impl_specific_error, topic_name_invalid, receive_max_exceeded, topic_alias_invalid, packet_too_large, message_rate_too_high, quota_exceeded, administrative_action, payload_format_invalid | packets/s | +| node_mqtt_disconnect_sent_by_reason_code | normal_disconnect, unspecified_error, malformed_packet, protocol_error, impl_specific_error, not_authorized, server_busy, server_shutting_down, keep_alive_timeout, session_taken_over, topic_filter_invalid, topic_name_invalid, receive_max_exceeded, topic_alias_invalid, packet_too_large, message_rate_too_high, quota_exceeded, administrative_action, payload_format_invalid, retain_not_supported, qos_not_supported, use_another_server, server_moved, shared_subs_not_supported, connection_rate_exceeded, max_connect_time, subscription_ids_not_supported, wildcard_subs_not_supported | packets/s | +| vernemq.node_mqtt_subscribe | subscribe, suback | packets/s | +| vernemq.node_mqtt_subscribe_error | subscribe | errors/s | +| vernemq.node_mqtt_subscribe_auth_error | subscribe_auth | errors/s | +| vernemq.node_mqtt_unsubscribe | unsubscribe, unsuback | packets/s | +| vernemq.node_mqtt_unsubscribe_error | unsubscribe | errors/s | +| vernemq.node_mqtt_publish | received, sent | packets/s | +| vernemq.node_mqtt_publish_errors | publish | errors/s | +| vernemq.node_mqtt_publish_auth_errors | publish_auth | errors/s | +| vernemq.node_mqtt_puback | received, sent | packets/s | +| vernemq.node_mqtt_puback_received_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s | +| vernemq.node_mqtt_puback_sent_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s | +| vernemq.node_mqtt_puback_invalid_error | unexpected | messages/s | +| vernemq.node_mqtt_pubrec | received, sent | packets/s | +| vernemq.node_mqtt_pubrec_received_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s | +| vernemq.node_mqtt_pubrec_sent_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s | +| vernemq.node_mqtt_pubrec_invalid_error | unexpected | messages/s | +| vernemq.node_mqtt_pubrel | received, sent | packets/s | +| vernemq.node_mqtt_pubrel_received_by_reason_code | success, packet_id_not_found | packets/s | +| vernemq.node_mqtt_pubrel_sent_by_reason_code | success, packet_id_not_found | packets/s | +| vernemq.node_mqtt_pubcomp | received, sent | packets/s | +| vernemq.node_mqtt_pubcomp_received_by_reason_code | success, packet_id_not_found | packets/s | +| vernemq.node_mqtt_pubcomp_sent_by_reason_cod | success, packet_id_not_found | packets/s | +| vernemq.node_mqtt_pubcomp_invalid_error | unexpected | messages/s | +| vernemq.node_mqtt_ping | pingreq, pingresp | packets/s | + ## Alerts @@ -137,32 +158,32 @@ The following alerts are available: | Alert name | On metric | Description | |:------------|:----------|:------------| -| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.socket_errors | number of socket errors in the last minute | -| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of dropped messaged due to full queues in the last minute | -| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of messages which expired before delivery in the last minute | -| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of unhandled messages (connections with clean session=true) in the last minute | -| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.average_scheduler_utilization | average scheduler utilization over the last 10 minutes | -| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.cluster_dropped | amount of traffic dropped during communication with the cluster nodes in the last minute | -| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vvernemq.netsplits | number of detected netsplits (split brain situation) in the last minute | -| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_connack_sent_reason | number of sent unsuccessful v3/v5 CONNACK packets in the last minute | -| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_received_reason | number of received not normal v5 DISCONNECT packets in the last minute | -| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_sent_reason | number of sent not normal v5 DISCONNECT packets in the last minute | -| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_error | number of failed v3/v5 SUBSCRIBE operations in the last minute | -| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_auth_error | number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute | -| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_unsubscribe_error | number of failed v3/v5 UNSUBSCRIBE operations in the last minute | -| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_errors | number of failed v3/v5 PUBLISH operations in the last minute | -| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_auth_errors | number of unauthorized v3/v5 PUBLISH attempts in the last minute | -| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_received_reason | number of received unsuccessful v5 PUBACK packets in the last minute | -| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_sent_reason | number of sent unsuccessful v5 PUBACK packets in the last minute | -| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_invalid_error | number of received unexpected v3/v5 PUBACK packets in the last minute | -| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_received_reason | number of received unsuccessful v5 PUBREC packets in the last minute | -| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_sent_reason | number of sent unsuccessful v5 PUBREC packets in the last minute | -| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_invalid_error | number of received unexpected v3 PUBREC packets in the last minute | -| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_received_reason | number of received unsuccessful v5 PUBREL packets in the last minute | -| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_sent_reason | number of sent unsuccessful v5 PUBREL packets in the last minute | -| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_received_reason | number of received unsuccessful v5 PUBCOMP packets in the last minute | -| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_sent_reason | number of sent unsuccessful v5 PUBCOMP packets in the last minute | -| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_invalid_error | number of received unexpected v3/v5 PUBCOMP packets in the last minute | +| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_socket_errors | Node ${label:node} socket errors in the last minute | +| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_queue_undelivered_messages | Node ${label:node} dropped messages due to full queues in the last minute | +| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_queue_undelivered_messages | Node ${label:node} expired before delivery messages in the last minute | +| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_queue_undelivered_messages | Node ${label:node} unhandled messages in the last minute | +| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_average_scheduler_utilization | Node ${label:node} scheduler utilization over the last 10 minutes | +| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_cluster_dropped | Node ${label:node} traffic dropped during communication with the cluster nodes in the last minute | +| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_netsplits | Node ${label:node} detected netsplits (split brain) in the last minute | +| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_connack_sent_by_reason_code | Node ${label:node} unsuccessful sent v5 CONNACK packets in the last minute | +| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_disconnect_received_by_reason_code | Node ${label:node} received not normal v5 DISCONNECT packets in the last minute | +| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_disconnect_sent_by_reason_code | Node ${label:node} sent not normal v5 DISCONNECT packets in the last minute | +| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_subscribe_error | Node ${label:node} mqtt v${label:mqtt_version} failed SUBSCRIBE operations in the last minute | +| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_subscribe_auth_error | Node ${label:node} mqtt v${label:mqtt_version} unauthorized SUBSCRIBE attempts in the last minute | +| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_unsubscribe_error | Node ${label:node} mqtt v${label:mqtt_version} failed UNSUBSCRIBE operations in the last minute | +| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_publish_errors | Node ${label:node} mqtt v${label:mqtt_version} failed PUBLISH operations in the last minute | +| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_publish_auth_errors | Node ${label:node} mqtt v${label:mqtt_version} unauthorized PUBLISH attempts in the last minute | +| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_puback_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBACK packets in the last minute | +| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_puback_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBACK packets in the last minute | +| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_puback_invalid_error | Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBACK messages in the last minute | +| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrec_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBREC packets in the last minute | +| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrec_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBREC packets in the last minute | +| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrec_invalid_error | Node ${label:node} mqtt v${label:mqtt_version} received invalid PUBREC packets in the last minute | +| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrel_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBREL packets in the last minute | +| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrel_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBREL packets in the last minute | +| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubcomp_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBCOMP packets in the last minute | +| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubcomp_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBCOMP packets in the last minute | +| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubcomp_invalid_error | Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBCOMP packets in the last minute | ## Setup @@ -178,8 +199,8 @@ No action required. The configuration file name for this integration is `go.d/vernemq.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/vernemq/metadata.yaml b/src/go/plugin/go.d/modules/vernemq/metadata.yaml index 2ec25fb77..0176bfc20 100644 --- a/src/go/plugin/go.d/modules/vernemq/metadata.yaml +++ b/src/go/plugin/go.d/modules/vernemq/metadata.yaml @@ -154,108 +154,108 @@ modules: list: [] alerts: - name: vernemq_socket_errors - metric: vernemq.socket_errors - info: number of socket errors in the last minute + metric: vernemq.node_socket_errors + info: Node ${label:node} socket errors in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_queue_message_drop - metric: vernemq.queue_undelivered_messages - info: number of dropped messaged due to full queues in the last minute + metric: vernemq.node_queue_undelivered_messages + info: Node ${label:node} dropped messages due to full queues in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_queue_message_expired - metric: vernemq.queue_undelivered_messages - info: number of messages which expired before delivery in the last minute + metric: vernemq.node_queue_undelivered_messages + info: Node ${label:node} expired before delivery messages in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_queue_message_unhandled - metric: vernemq.queue_undelivered_messages - info: "number of unhandled messages (connections with clean session=true) in the last minute" + metric: vernemq.node_queue_undelivered_messages + info: Node ${label:node} unhandled messages in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_average_scheduler_utilization - metric: vernemq.average_scheduler_utilization - info: average scheduler utilization over the last 10 minutes + metric: vernemq.node_average_scheduler_utilization + info: Node ${label:node} scheduler utilization over the last 10 minutes link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_cluster_dropped - metric: vernemq.cluster_dropped - info: amount of traffic dropped during communication with the cluster nodes in the last minute + metric: vernemq.node_cluster_dropped + info: Node ${label:node} traffic dropped during communication with the cluster nodes in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_netsplits - metric: vvernemq.netsplits - info: "number of detected netsplits (split brain situation) in the last minute" + metric: vernemq.node_netsplits + info: "Node ${label:node} detected netsplits (split brain) in the last minute" link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_connack_sent_reason_unsuccessful - metric: vernemq.mqtt_connack_sent_reason - info: number of sent unsuccessful v3/v5 CONNACK packets in the last minute + metric: vernemq.node_mqtt_connack_sent_by_reason_code + info: Node ${label:node} unsuccessful sent v5 CONNACK packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_disconnect_received_reason_not_normal - metric: vernemq.mqtt_disconnect_received_reason - info: number of received not normal v5 DISCONNECT packets in the last minute + metric: vernemq.node_mqtt_disconnect_received_by_reason_code + info: Node ${label:node} received not normal v5 DISCONNECT packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_disconnect_sent_reason_not_normal - metric: vernemq.mqtt_disconnect_sent_reason - info: number of sent not normal v5 DISCONNECT packets in the last minute + metric: vernemq.node_mqtt_disconnect_sent_by_reason_code + info: Node ${label:node} sent not normal v5 DISCONNECT packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_subscribe_error - metric: vernemq.mqtt_subscribe_error - info: number of failed v3/v5 SUBSCRIBE operations in the last minute + metric: vernemq.node_mqtt_subscribe_error + info: Node ${label:node} mqtt v${label:mqtt_version} failed SUBSCRIBE operations in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_subscribe_auth_error - metric: vernemq.mqtt_subscribe_auth_error - info: number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute + metric: vernemq.node_mqtt_subscribe_auth_error + info: Node ${label:node} mqtt v${label:mqtt_version} unauthorized SUBSCRIBE attempts in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_unsubscribe_error - metric: vernemq.mqtt_unsubscribe_error - info: number of failed v3/v5 UNSUBSCRIBE operations in the last minute + metric: vernemq.node_mqtt_unsubscribe_error + info: Node ${label:node} mqtt v${label:mqtt_version} failed UNSUBSCRIBE operations in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_publish_errors - metric: vernemq.mqtt_publish_errors - info: number of failed v3/v5 PUBLISH operations in the last minute + metric: vernemq.node_mqtt_publish_errors + info: Node ${label:node} mqtt v${label:mqtt_version} failed PUBLISH operations in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_publish_auth_errors - metric: vernemq.mqtt_publish_auth_errors - info: number of unauthorized v3/v5 PUBLISH attempts in the last minute + metric: vernemq.node_mqtt_publish_auth_errors + info: Node ${label:node} mqtt v${label:mqtt_version} unauthorized PUBLISH attempts in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_puback_received_reason_unsuccessful - metric: vernemq.mqtt_puback_received_reason - info: number of received unsuccessful v5 PUBACK packets in the last minute + metric: vernemq.node_mqtt_puback_received_by_reason_code + info: Node ${label:node} mqtt v5 received unsuccessful PUBACK packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_puback_sent_reason_unsuccessful - metric: vernemq.mqtt_puback_sent_reason - info: number of sent unsuccessful v5 PUBACK packets in the last minute + metric: vernemq.node_mqtt_puback_sent_by_reason_code + info: Node ${label:node} mqtt v5 unsuccessful sent PUBACK packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_puback_unexpected - metric: vernemq.mqtt_puback_invalid_error - info: number of received unexpected v3/v5 PUBACK packets in the last minute + metric: vernemq.node_mqtt_puback_invalid_error + info: Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBACK messages in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_pubrec_received_reason_unsuccessful - metric: vernemq.mqtt_pubrec_received_reason - info: number of received unsuccessful v5 PUBREC packets in the last minute + metric: vernemq.node_mqtt_pubrec_received_by_reason_code + info: Node ${label:node} mqtt v5 received unsuccessful PUBREC packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_pubrec_sent_reason_unsuccessful - metric: vernemq.mqtt_pubrec_sent_reason - info: number of sent unsuccessful v5 PUBREC packets in the last minute + metric: vernemq.node_mqtt_pubrec_sent_by_reason_code + info: Node ${label:node} mqtt v5 unsuccessful sent PUBREC packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_pubrec_invalid_error - metric: vernemq.mqtt_pubrec_invalid_error - info: number of received unexpected v3 PUBREC packets in the last minute + metric: vernemq.node_mqtt_pubrec_invalid_error + info: Node ${label:node} mqtt v${label:mqtt_version} received invalid PUBREC packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_pubrel_received_reason_unsuccessful - metric: vernemq.mqtt_pubrel_received_reason - info: number of received unsuccessful v5 PUBREL packets in the last minute + metric: vernemq.node_mqtt_pubrel_received_by_reason_code + info: Node ${label:node} mqtt v5 received unsuccessful PUBREL packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_pubrel_sent_reason_unsuccessful - metric: vernemq.mqtt_pubrel_sent_reason - info: number of sent unsuccessful v5 PUBREL packets in the last minute + metric: vernemq.node_mqtt_pubrel_sent_by_reason_code + info: Node ${label:node} mqtt v5 unsuccessful sent PUBREL packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_pubcomp_received_reason_unsuccessful - metric: vernemq.mqtt_pubcomp_received_reason - info: number of received unsuccessful v5 PUBCOMP packets in the last minute + metric: vernemq.node_mqtt_pubcomp_received_by_reason_code + info: Node ${label:node} mqtt v5 received unsuccessful PUBCOMP packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_pubcomp_sent_reason_unsuccessful - metric: vernemq.mqtt_pubcomp_sent_reason - info: number of sent unsuccessful v5 PUBCOMP packets in the last minute + metric: vernemq.node_mqtt_pubcomp_sent_by_reason_code + info: Node ${label:node} mqtt v5 unsuccessful sent PUBCOMP packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf - name: vernemq_mqtt_pubcomp_unexpected - metric: vernemq.mqtt_pubcomp_invalid_error - info: number of received unexpected v3/v5 PUBCOMP packets in the last minute + metric: vernemq.node_mqtt_pubcomp_invalid_error + info: Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBCOMP packets in the last minute link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf metrics: folding: @@ -264,68 +264,76 @@ modules: description: "" availability: [] scopes: - - name: global - description: These metrics refer to the entire monitored application. - labels: [] + - name: node + description: These metrics refer to the VerneMQ node. + labels: + - name: node + description: The value of this label is identical to the value of the "node" label exposed by VerneMQ. metrics: - - name: vernemq.sockets + - name: vernemq.node_socket description: Open Sockets unit: sockets chart_type: line dimensions: - name: open - - name: vernemq.socket_operations + - name: vernemq.node_socket_operations description: Socket Open and Close Events unit: sockets/s chart_type: line dimensions: - name: open - name: close - - name: vernemq.client_keepalive_expired + - name: vernemq.node_client_keepalive_expired description: Closed Sockets due to Keepalive Time Expired unit: sockets/s chart_type: line dimensions: - name: closed - - name: vernemq.socket_close_timeout + - name: vernemq.node_socket_close_timeout description: Closed Sockets due to no CONNECT Frame On Time unit: sockets/s chart_type: line dimensions: - name: closed - - name: vernemq.socket_errors + - name: vernemq.node_socket_errors description: Socket Errors unit: errors/s chart_type: line dimensions: - name: errors - - name: vernemq.queue_processes + - name: vernemq.node_queue_processes description: Living Queues in an Online or an Offline State unit: queue processes chart_type: line dimensions: - name: queue_processes - - name: vernemq.queue_processes_operations + - name: vernemq.node_queue_processes_operations description: Queue Processes Setup and Teardown Events unit: events/s chart_type: line dimensions: - name: setup - name: teardown - - name: vernemq.queue_process_init_from_storage + - name: vernemq.node_queue_process_init_from_storage description: Queue Processes Initialized from Offline Storage unit: queue processes/s chart_type: line dimensions: - name: queue_processes - - name: vernemq.queue_messages + - name: vernemq.node_queue_messages description: Received and Sent PUBLISH Messages unit: messages/s chart_type: area dimensions: - name: received - name: sent - - name: vernemq.queue_undelivered_messages + - name: vernemq.node_queued_messages + description: Queued PUBLISH Messages + unit: messages + chart_type: line + dimensions: + - name: queued + - name: vernemq.node_queue_undelivered_messages description: Undelivered PUBLISH Messages unit: messages/s chart_type: stacked @@ -333,338 +341,452 @@ modules: - name: dropped - name: expired - name: unhandled - - name: vernemq.router_subscriptions + - name: vernemq.node_router_subscriptions description: Subscriptions in the Routing Table unit: subscriptions chart_type: line dimensions: - name: subscriptions - - name: vernemq.router_matched_subscriptions + - name: vernemq.node_router_matched_subscriptions description: Matched Subscriptions unit: subscriptions/s chart_type: line dimensions: - name: local - name: remote - - name: vernemq.router_memory + - name: vernemq.node_router_memory description: Routing Table Memory Usage - unit: KiB + unit: bytes chart_type: area dimensions: - name: used - - name: vernemq.average_scheduler_utilization + - name: vernemq.node_average_scheduler_utilization description: Average Scheduler Utilization unit: percentage chart_type: area dimensions: - name: utilization - - name: vernemq.system_utilization_scheduler - description: Scheduler Utilization - unit: percentage - chart_type: stacked - dimensions: - - name: a dimension per scheduler - - name: vernemq.system_processes + - name: vernemq.node_system_processes description: Erlang Processes unit: processes chart_type: line dimensions: - name: processes - - name: vernemq.system_reductions + - name: vernemq.node_system_reductions description: Reductions unit: ops/s chart_type: line dimensions: - name: reductions - - name: vernemq.system_context_switches + - name: vernemq.node_system_context_switches description: Context Switches unit: ops/s chart_type: line dimensions: - name: context_switches - - name: vernemq.system_io + - name: vernemq.node_system_io description: Received and Sent Traffic through Ports - unit: kilobits/s + unit: bytes/s chart_type: area dimensions: - name: received - name: sent - - name: vernemq.system_run_queue + - name: vernemq.node_system_run_queue description: Processes that are Ready to Run on All Run-Queues unit: processes chart_type: line dimensions: - name: ready - - name: vernemq.system_gc_count + - name: vernemq.node_system_gc_count description: GC Count unit: ops/s chart_type: line dimensions: - name: gc - - name: vernemq.system_gc_words_reclaimed + - name: vernemq.node_system_gc_words_reclaimed description: GC Words Reclaimed unit: ops/s chart_type: line dimensions: - name: words_reclaimed - - name: vernemq.system_allocated_memory + - name: vernemq.node_system_allocated_memory description: Memory Allocated by the Erlang Processes and by the Emulator - unit: KiB + unit: bytes chart_type: stacked dimensions: - name: processes - name: system - - name: vernemq.bandwidth - description: Bandwidth - unit: kilobits/s + - name: vernemq.node_traffic + description: Traffic + unit: bytes/s chart_type: area dimensions: - name: received - name: sent - - name: vernemq.retain_messages + - name: vernemq.node_retain_messages description: Stored Retained Messages unit: messages chart_type: line dimensions: - name: messages - - name: vernemq.retain_memory + - name: vernemq.node_retain_memory description: Stored Retained Messages Memory Usage - unit: KiB + unit: bytes chart_type: area dimensions: - name: used - - name: vernemq.cluster_bandwidth + - name: vernemq.node_cluster_traffic description: Communication with Other Cluster Nodes - unit: kilobits/s + unit: bytes/s chart_type: area dimensions: - name: received - name: sent - - name: vernemq.cluster_dropped + - name: vernemq.node_cluster_dropped description: Traffic Dropped During Communication with Other Cluster Nodes - unit: kilobits/s + unit: bytes/s chart_type: area dimensions: - name: dropped - - name: vernemq.netsplit_unresolved + - name: vernemq.node_netsplit_unresolved description: Unresolved Netsplits unit: netsplits chart_type: line dimensions: - name: unresolved - - name: vernemq.netsplits + - name: vernemq.node_netsplits description: Netsplits unit: netsplits/s chart_type: stacked dimensions: - name: resolved - name: detected - - name: vernemq.mqtt_auth - description: v5 AUTH + - name: vernemq.node_uptime + description: Node Uptime + unit: seconds + chart_type: line + dimensions: + - name: time + - name: mqtt + description: These metrics are specific to the used MQTT protocol version. + labels: + - name: node + description: The value of this label is identical to the value of the "node" label exposed by VerneMQ. + - name: mqtt_version + description: MQTT version. + metrics: + - name: vernemq.node_mqtt_auth + description: MQTT AUTH Packets unit: packets/s chart_type: line dimensions: - name: received - name: sent - - name: vernemq.mqtt_auth_received_reason - description: v5 AUTH Received by Reason + - name: vernemq.node_mqtt_auth_received_by_reason_code + description: MQTT AUTH Received by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_auth_sent_reason - description: v5 AUTH Sent by Reason + - name: success + - name: continue_authentication + - name: reauthenticate + - name: vernemq.node_mqtt_auth_sent_by_reason_code + description: MQTT AUTH Sent by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_connect - description: v3/v5 CONNECT and CONNACK + - name: success + - name: continue_authentication + - name: reauthenticate + - name: vernemq.node_mqtt_connect + description: MQTT CONNECT and CONNACK unit: packets/s chart_type: line dimensions: - name: connect - name: connack - - name: vernemq.mqtt_connack_sent_reason - description: v3/v5 CONNACK Sent by Reason + - name: vernemq.node_mqtt_connack_sent_by_return_code + description: MQTT CONNACK Sent by Return Code + unit: packets/s + chart_type: stacked + dimensions: + - name: success + - name: unsupported_protocol_version + - name: client_identifier_not_valid + - name: server_unavailable + - name: bad_username_or_password + - name: not_authorized + - name: vernemq.node_mqtt_connack_sent_by_reason_code + description: MQTT CONNACK Sent by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_disconnect - description: v3/v5 DISCONNECT + - name: success + - name: unspecified_error + - name: malformed_packet + - name: protocol_error + - name: impl_specific_error + - name: unsupported_protocol_version + - name: client_identifier_not_valid + - name: bad_username_or_password + - name: not_authorized + - name: server_unavailable + - name: server_busy + - name: banned + - name: bad_authentication_method + - name: topic_name_invalid + - name: packet_too_large + - name: quota_exceeded + - name: payload_format_invalid + - name: retain_not_supported + - name: qos_not_supported + - name: use_another_server + - name: server_moved + - name: connection_rate_exceeded + - name: vernemq.node_mqtt_disconnect + description: MQTT DISCONNECT Packets unit: packets/s chart_type: line dimensions: - name: received - name: sent - - name: vernemq.mqtt_disconnect_received_reason - description: v5 DISCONNECT Received by Reason + - name: vernemq.node_mqtt_disconnect_received_by_reason_code + description: MQTT DISCONNECT Received by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_disconnect_sent_reason - description: v5 DISCONNECT Sent by Reason + - name: normal_disconnect + - name: disconnect_with_will_msg + - name: unspecified_error + - name: malformed_packet + - name: protocol_error + - name: impl_specific_error + - name: topic_name_invalid + - name: receive_max_exceeded + - name: topic_alias_invalid + - name: packet_too_large + - name: message_rate_too_high + - name: quota_exceeded + - name: administrative_action + - name: payload_format_invalid + - name: node_mqtt_disconnect_sent_by_reason_code + description: MQTT DISCONNECT Sent by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_subscribe - description: v3/v5 SUBSCRIBE and SUBACK + - name: normal_disconnect + - name: unspecified_error + - name: malformed_packet + - name: protocol_error + - name: impl_specific_error + - name: not_authorized + - name: server_busy + - name: server_shutting_down + - name: keep_alive_timeout + - name: session_taken_over + - name: topic_filter_invalid + - name: topic_name_invalid + - name: receive_max_exceeded + - name: topic_alias_invalid + - name: packet_too_large + - name: message_rate_too_high + - name: quota_exceeded + - name: administrative_action + - name: payload_format_invalid + - name: retain_not_supported + - name: qos_not_supported + - name: use_another_server + - name: server_moved + - name: shared_subs_not_supported + - name: connection_rate_exceeded + - name: max_connect_time + - name: subscription_ids_not_supported + - name: wildcard_subs_not_supported + - name: vernemq.node_mqtt_subscribe + description: MQTT SUBSCRIBE and SUBACK Packets unit: packets/s chart_type: line dimensions: - name: subscribe - name: suback - - name: vernemq.mqtt_subscribe_error - description: v3/v5 Failed SUBSCRIBE Operations due to a Netsplit - unit: ops/s + - name: vernemq.node_mqtt_subscribe_error + description: MQTT Failed SUBSCRIBE Operations due to a Netsplit + unit: errors/s chart_type: line dimensions: - - name: failed - - name: vernemq.mqtt_subscribe_auth_error - description: v3/v5 Unauthorized SUBSCRIBE Attempts - unit: attempts/s + - name: subscribe + - name: vernemq.node_mqtt_subscribe_auth_error + description: MQTT Unauthorized SUBSCRIBE Attempts + unit: errors/s chart_type: line dimensions: - - name: unauth - - name: vernemq.mqtt_unsubscribe - description: v3/v5 UNSUBSCRIBE and UNSUBACK + - name: subscribe_auth + - name: vernemq.node_mqtt_unsubscribe + description: MQTT UNSUBSCRIBE and UNSUBACK Packets unit: packets/s chart_type: line dimensions: - name: unsubscribe - name: unsuback - - name: vernemq.mqtt_unsubscribe_error - description: v3/v5 Failed UNSUBSCRIBE Operations due to a Netsplit - unit: ops/s + - name: vernemq.node_mqtt_unsubscribe_error + description: MQTT Failed UNSUBSCRIBE Operations due to a Netsplit + unit: errors/s chart_type: line dimensions: - - name: mqtt_unsubscribe_error - - name: vernemq.mqtt_publish - description: v3/v5 QoS 0,1,2 PUBLISH + - name: unsubscribe + - name: vernemq.node_mqtt_publish + description: MQTT QoS 0,1,2 PUBLISH unit: packets/s chart_type: line dimensions: - name: received - name: sent - - name: vernemq.mqtt_publish_errors - description: v3/v5 Failed PUBLISH Operations due to a Netsplit - unit: ops/s + - name: vernemq.node_mqtt_publish_errors + description: MQTT Failed PUBLISH Operations due to a Netsplit + unit: errors/s chart_type: line dimensions: - - name: failed - - name: vernemq.mqtt_publish_auth_errors - description: v3/v5 Unauthorized PUBLISH Attempts - unit: attempts/s + - name: publish + - name: vernemq.node_mqtt_publish_auth_errors + description: MQTT Unauthorized PUBLISH Attempts + unit: errors/s chart_type: area dimensions: - - name: unauth - - name: vernemq.mqtt_puback - description: v3/v5 QoS 1 PUBACK + - name: publish_auth + - name: vernemq.node_mqtt_puback + description: vMQTT QoS 1 PUBACK Packets unit: packets/s chart_type: line dimensions: - name: received - name: sent - - name: vernemq.mqtt_puback_received_reason - description: v5 PUBACK QoS 1 Received by Reason + - name: vernemq.node_mqtt_puback_received_by_reason_code + description: MQTT PUBACK QoS 1 Received by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_puback_sent_reason - description: v5 PUBACK QoS 1 Sent by Reason + - name: success + - name: no_matching_subscribers + - name: unspecified_error + - name: impl_specific_error + - name: not_authorized + - name: topic_name_invalid + - name: packet_id_in_use + - name: quota_exceeded + - name: payload_format_invalid + - name: vernemq.node_mqtt_puback_sent_by_reason_code + description: MQTT PUBACK QoS 1 Sent by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_puback_invalid_error - description: v3/v5 PUBACK QoS 1 Received Unexpected Messages + - name: success + - name: no_matching_subscribers + - name: unspecified_error + - name: impl_specific_error + - name: not_authorized + - name: topic_name_invalid + - name: packet_id_in_use + - name: quota_exceeded + - name: payload_format_invalid + - name: vernemq.node_mqtt_puback_invalid_error + description: MQTT PUBACK QoS 1 Received Unexpected Messages unit: messages/s chart_type: line dimensions: - name: unexpected - - name: vernemq.mqtt_pubrec - description: v3/v5 PUBREC QoS 2 + - name: vernemq.node_mqtt_pubrec + description: MQTT PUBREC QoS 2 Packets unit: packets/s chart_type: line dimensions: - name: received - name: sent - - name: vernemq.mqtt_pubrec_received_reason - description: v5 PUBREC QoS 2 Received by Reason + - name: vernemq.node_mqtt_pubrec_received_by_reason_code + description: MQTT PUBREC QoS 2 Received by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_pubrec_sent_reason - description: v5 PUBREC QoS 2 Sent by Reason + - name: success + - name: no_matching_subscribers + - name: unspecified_error + - name: impl_specific_error + - name: not_authorized + - name: topic_name_invalid + - name: packet_id_in_use + - name: quota_exceeded + - name: payload_format_invalid + - name: vernemq.node_mqtt_pubrec_sent_by_reason_code + description: MQTT PUBREC QoS 2 Sent by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_pubrec_invalid_error - description: v3 PUBREC QoS 2 Received Unexpected Messages + - name: success + - name: no_matching_subscribers + - name: unspecified_error + - name: impl_specific_error + - name: not_authorized + - name: topic_name_invalid + - name: packet_id_in_use + - name: quota_exceeded + - name: payload_format_invalid + - name: vernemq.node_mqtt_pubrec_invalid_error + description: MQTT PUBREC QoS 2 Received Unexpected Messages unit: messages/s chart_type: line dimensions: - name: unexpected - - name: vernemq.mqtt_pubrel - description: v3/v5 PUBREL QoS 2 + - name: vernemq.node_mqtt_pubrel + description: MQTT PUBREL QoS 2 Packets unit: packets/s chart_type: line dimensions: - name: received - name: sent - - name: vernemq.mqtt_pubrel_received_reason - description: v5 PUBREL QoS 2 Received by Reason + - name: vernemq.node_mqtt_pubrel_received_by_reason_code + description: MQTT PUBREL QoS 2 Received by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_pubrel_sent_reason - description: v5 PUBREL QoS 2 Sent by Reason + - name: success + - name: packet_id_not_found + - name: vernemq.node_mqtt_pubrel_sent_by_reason_code + description: MQTT PUBREL QoS 2 Sent by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_pubcom - description: v3/v5 PUBCOMP QoS 2 + - name: success + - name: packet_id_not_found + - name: vernemq.node_mqtt_pubcomp + description: MQTT PUBCOMP QoS 2 Packets unit: packets/s chart_type: line dimensions: - name: received - name: sent - - name: vernemq.mqtt_pubcomp_received_reason - description: v5 PUBCOMP QoS 2 Received by Reason + - name: vernemq.node_mqtt_pubcomp_received_by_reason_code + description: MQTT PUBCOMP QoS 2 Received by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_pubcomp_sent_reason - description: v5 PUBCOMP QoS 2 Sent by Reason + - name: success + - name: packet_id_not_found + - name: vernemq.node_mqtt_pubcomp_sent_by_reason_cod + description: MQTT PUBCOMP QoS 2 Sent by Reason unit: packets/s chart_type: stacked dimensions: - - name: a dimensions per reason - - name: vernemq.mqtt_pubcomp_invalid_error - description: v3/v5 PUBCOMP QoS 2 Received Unexpected Messages + - name: success + - name: packet_id_not_found + - name: vernemq.node_mqtt_pubcomp_invalid_error + description: MQTT PUBCOMP QoS 2 Received Unexpected Messages unit: messages/s chart_type: line dimensions: - name: unexpected - - name: vernemq.mqtt_ping - description: v3/v5 PING + - name: vernemq.node_mqtt_ping + description: MQTT PING Packets unit: packets/s chart_type: line dimensions: - name: pingreq - name: pingresp - - name: vernemq.node_uptime - description: Node Uptime - unit: seconds - chart_type: line - dimensions: - - name: time diff --git a/src/go/plugin/go.d/modules/vernemq/metrics.go b/src/go/plugin/go.d/modules/vernemq/metrics.go index 863cc6355..e6f18ed3c 100644 --- a/src/go/plugin/go.d/modules/vernemq/metrics.go +++ b/src/go/plugin/go.d/modules/vernemq/metrics.go @@ -2,37 +2,79 @@ package vernemq -// Source Code Metrics: -// - https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_metrics.erl -// - https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_metrics.hrl - -// Source Code FSM: -// - https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_mqtt_fsm.erl -// - https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_mqtt5_fsm.erl - -// MQTT Packet Types: -// - v4: http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/errata01/os/mqtt-v3.1.1-errata01-os-complete.html#_Toc442180834 -// - v5: https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901019 - -// Erlang VM: -// - http://erlang.org/documentation/doc-5.7.1/erts-5.7.1/doc/html/erlang.html - -// Not used metrics (https://docs.vernemq.com/monitoring/introduction): -// - "mqtt_connack_accepted_sent" // v4, not populated, "mqtt_connack_sent" used instead -// - "mqtt_connack_unacceptable_protocol_sent" // v4, not populated, "mqtt_connack_sent" used instead -// - "mqtt_connack_identifier_rejected_sent" // v4, not populated, "mqtt_connack_sent" used instead -// - "mqtt_connack_server_unavailable_sent" // v4, not populated, "mqtt_connack_sent" used instead -// - "mqtt_connack_bad_credentials_sent" // v4, not populated, "mqtt_connack_sent" used instead -// - "mqtt_connack_not_authorized_sent" // v4, not populated, "mqtt_connack_sent" used instead -// - "system_exact_reductions" -// - "system_runtime" -// - "vm_memory_atom" -// - "vm_memory_atom_used" -// - "vm_memory_binary" -// - "vm_memory_code" -// - "vm_memory_ets" -// - "vm_memory_processes_used" -// - "vm_memory_total" +func newNodeStats() *nodeStats { + return &nodeStats{ + stats: make(map[string]int64), + mqtt4: make(map[string]int64), + mqtt5: make(map[string]int64), + } +} + +type nodeStats struct { + stats map[string]int64 + mqtt4 map[string]int64 + mqtt5 map[string]int64 +} + +// Source code metrics: https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_metrics.erl +// Not used metrics: https://docs.vernemq.com/monitoring/introduction + +const ( + // Sockets + metricSocketOpen = "socket_open" + metricSocketClose = "socket_close" + metricSocketError = "socket_error" + metricSocketCloseTimeout = "socket_close_timeout" + metricClientKeepaliveExpired = "client_keepalive_expired" // v4, v5 + + // Queues + metricQueueProcesses = "queue_processes" + metricQueueSetup = "queue_setup" + metricQueueTeardown = "queue_teardown" + metricQueueMessageIn = "queue_message_in" + metricQueueMessageOut = "queue_message_out" + metricQueueMessageDrop = "queue_message_drop" + metricQueueMessageExpired = "queue_message_expired" + metricQueueMessageUnhandled = "queue_message_unhandled" + metricQueueInitializedFromStorage = "queue_initialized_from_storage" + + // Subscriptions + metricRouterMatchesLocal = "router_matches_local" + metricRouterMatchesRemote = "router_matches_remote" + metricRouterMemory = "router_memory" + metricRouterSubscriptions = "router_subscriptions" + + // Erlang VM + metricSystemUtilization = "system_utilization" + metricSystemProcessCount = "system_process_count" + metricSystemReductions = "system_reductions" + metricSystemContextSwitches = "system_context_switches" + metricSystemIOIn = "system_io_in" + metricSystemIOOut = "system_io_out" + metricSystemRunQueue = "system_run_queue" + metricSystemGCCount = "system_gc_count" + metricSystemWordsReclaimedByGC = "system_words_reclaimed_by_gc" + metricVMMemoryProcesses = "vm_memory_processes" + metricVMMemorySystem = "vm_memory_system" + + // Bandwidth + metricBytesReceived = "bytes_received" + metricBytesSent = "bytes_sent" + + // Retain + metricRetainMemory = "retain_memory" + metricRetainMessages = "retain_messages" + + // Cluster + metricClusterBytesDropped = "cluster_bytes_dropped" + metricClusterBytesReceived = "cluster_bytes_received" + metricClusterBytesSent = "cluster_bytes_sent" + metricNetSplitDetected = "netsplit_detected" + metricNetSplitResolved = "netsplit_resolved" + + // Uptime + metricSystemWallClock = "system_wallclock" +) // -----------------------------------------------MQTT------------------------------------------------------------------ const ( @@ -92,59 +134,116 @@ const ( metricMQTTInvalidMsgSizeError = "mqtt_invalid_msg_size_error" // v4, v5 ) -const ( - // Sockets - metricSocketOpen = "socket_open" - metricSocketClose = "socket_close" - metricSocketError = "socket_error" - metricSocketCloseTimeout = "socket_close_timeout" - metricClientKeepaliveExpired = "client_keepalive_expired" // v4, v5 - - // Queues - metricQueueProcesses = "queue_processes" - metricQueueSetup = "queue_setup" - metricQueueTeardown = "queue_teardown" - metricQueueMessageIn = "queue_message_in" - metricQueueMessageOut = "queue_message_out" - metricQueueMessageDrop = "queue_message_drop" - metricQueueMessageExpired = "queue_message_expired" - metricQueueMessageUnhandled = "queue_message_unhandled" - metricQueueInitializedFromStorage = "queue_initialized_from_storage" - - // Subscriptions - metricRouterMatchesLocal = "router_matches_local" - metricRouterMatchesRemote = "router_matches_remote" - metricRouterMemory = "router_memory" - metricRouterSubscriptions = "router_subscriptions" - - // Erlang VM - metricSystemUtilization = "system_utilization" - metricSystemProcessCount = "system_process_count" - metricSystemReductions = "system_reductions" - metricSystemContextSwitches = "system_context_switches" - metricSystemIOIn = "system_io_in" - metricSystemIOOut = "system_io_out" - metricSystemRunQueue = "system_run_queue" - metricSystemGCCount = "system_gc_count" - metricSystemWordsReclaimedByGC = "system_words_reclaimed_by_gc" - metricVMMemoryProcesses = "vm_memory_processes" - metricVMMemorySystem = "vm_memory_system" - - // Bandwidth - metricBytesReceived = "bytes_received" - metricBytesSent = "bytes_sent" - - // Retain - metricRetainMemory = "retain_memory" - metricRetainMessages = "retain_messages" - - // Cluster - metricClusterBytesDropped = "cluster_bytes_dropped" - metricClusterBytesReceived = "cluster_bytes_received" - metricClusterBytesSent = "cluster_bytes_sent" - metricNetSplitDetected = "netsplit_detected" - metricNetSplitResolved = "netsplit_resolved" - - // Uptime - metricSystemWallClock = "system_wallclock" +var ( + mqtt5AUTHReceivedReasonCodes = []string{ + "success", + "continue_authentication", + "reauthenticate", + } + mqtt5AUTHSentReasonCodes = mqtt5AUTHReceivedReasonCodes + + mqtt4CONNACKSentReturnCodes = []string{ + "success", + "unsupported_protocol_version", + "client_identifier_not_valid", + "server_unavailable", + "bad_username_or_password", + "not_authorized", + } + mqtt5CONNACKSentReasonCodes = []string{ + "success", + "unspecified_error", + "malformed_packet", + "protocol_error", + "impl_specific_error", + "unsupported_protocol_version", + "client_identifier_not_valid", + "bad_username_or_password", + "not_authorized", + "server_unavailable", + "server_busy", + "banned", + "bad_authentication_method", + "topic_name_invalid", + "packet_too_large", + "quota_exceeded", + "payload_format_invalid", + "retain_not_supported", + "qos_not_supported", + "use_another_server", + "server_moved", + "connection_rate_exceeded", + } + + mqtt5DISCONNECTReceivedReasonCodes = []string{ + "normal_disconnect", + "disconnect_with_will_msg", + "unspecified_error", + "malformed_packet", + "protocol_error", + "impl_specific_error", + "topic_name_invalid", + "receive_max_exceeded", + "topic_alias_invalid", + "packet_too_large", + "message_rate_too_high", + "quota_exceeded", + "administrative_action", + "payload_format_invalid", + } + mqtt5DISCONNECTSentReasonCodes = []string{ + "normal_disconnect", + "unspecified_error", + "malformed_packet", + "protocol_error", + "impl_specific_error", + "not_authorized", + "server_busy", + "server_shutting_down", + "keep_alive_timeout", + "session_taken_over", + "topic_filter_invalid", + "topic_name_invalid", + "receive_max_exceeded", + "topic_alias_invalid", + "packet_too_large", + "message_rate_too_high", + "quota_exceeded", + "administrative_action", + "payload_format_invalid", + "retain_not_supported", + "qos_not_supported", + "use_another_server", + "server_moved", + "shared_subs_not_supported", + "connection_rate_exceeded", + "max_connect_time", + "subscription_ids_not_supported", + "wildcard_subs_not_supported", + } + + mqtt5PUBACKReceivedReasonCodes = []string{ + "success", + "no_matching_subscribers", + "unspecified_error", + "impl_specific_error", + "not_authorized", + "topic_name_invalid", + "packet_id_in_use", + "quota_exceeded", + "payload_format_invalid", + } + mqtt5PUBACKSentReasonCodes = mqtt5PUBACKReceivedReasonCodes + + mqtt5PUBRECReceivedReasonCodes = mqtt5PUBACKReceivedReasonCodes + mqtt5PUBRECSentReasonCodes = mqtt5PUBACKReceivedReasonCodes + + mqtt5PUBRELReceivedReasonCodes = []string{ + "success", + "packet_id_not_found", + } + mqtt5PUBRELSentReasonCodes = mqtt5PUBRELReceivedReasonCodes + + mqtt5PUBCOMPReceivedReasonCodes = mqtt5PUBRELReceivedReasonCodes + mqtt5PUBCOMPSentReasonCodes = mqtt5PUBRELReceivedReasonCodes ) diff --git a/src/go/plugin/go.d/modules/vernemq/testdata/metrics-v1.10.1-mqtt5.txt b/src/go/plugin/go.d/modules/vernemq/testdata/metrics-v1.10.1-mqtt5.txt deleted file mode 100644 index 2e98a3e94..000000000 --- a/src/go/plugin/go.d/modules/vernemq/testdata/metrics-v1.10.1-mqtt5.txt +++ /dev/null @@ -1,416 +0,0 @@ -# HELP socket_open The number of times an MQTT socket has been opened. -# TYPE socket_open counter -socket_open{node="VerneMQ@172.17.0.2"} 338956 -# HELP socket_close The number of times an MQTT socket has been closed. -# TYPE socket_close counter -socket_close{node="VerneMQ@172.17.0.2"} 338956 -# HELP socket_close_timeout The number of times VerneMQ closed an MQTT socket due to no CONNECT frame has been received on time. -# TYPE socket_close_timeout counter -socket_close_timeout{node="VerneMQ@172.17.0.2"} 0 -# HELP socket_error The total number of socket errors that have occurred. -# TYPE socket_error counter -socket_error{node="VerneMQ@172.17.0.2"} 0 -# HELP bytes_received The total number of bytes received. -# TYPE bytes_received counter -bytes_received{node="VerneMQ@172.17.0.2"} 36796908 -# HELP bytes_sent The total number of bytes sent. -# TYPE bytes_sent counter -bytes_sent{node="VerneMQ@172.17.0.2"} 23361693 -# HELP mqtt_connect_received The number of CONNECT packets received. -# TYPE mqtt_connect_received counter -mqtt_connect_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 338956 -# HELP mqtt_publish_received The number of PUBLISH packets received. -# TYPE mqtt_publish_received counter -mqtt_publish_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 537088 -# HELP mqtt_puback_received The number of PUBACK packets received. -# TYPE mqtt_puback_received counter -mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 525694 -# HELP mqtt_pubrec_received The number of PUBREC packets received. -# TYPE mqtt_pubrec_received counter -mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_pubrel_received The number of PUBREL packets received. -# TYPE mqtt_pubrel_received counter -mqtt_pubrel_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_pubcomp_received The number of PUBCOMP packets received. -# TYPE mqtt_pubcomp_received counter -mqtt_pubcomp_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_subscribe_received The number of SUBSCRIBE packets received. -# TYPE mqtt_subscribe_received counter -mqtt_subscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 122 -# HELP mqtt_unsubscribe_received The number of UNSUBSCRIBE packets received. -# TYPE mqtt_unsubscribe_received counter -mqtt_unsubscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 108 -# HELP mqtt_pingreq_received The number of PINGREQ packets received. -# TYPE mqtt_pingreq_received counter -mqtt_pingreq_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 205 -# HELP mqtt_disconnect_received The number of DISCONNECT packets received. -# TYPE mqtt_disconnect_received counter -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 107 -# HELP mqtt_connack_accepted_sent The number of times a connection has been accepted. -# TYPE mqtt_connack_accepted_sent counter -mqtt_connack_accepted_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_connack_unacceptable_protocol_sent The number of times the broker is not able to support the requested protocol. -# TYPE mqtt_connack_unacceptable_protocol_sent counter -mqtt_connack_unacceptable_protocol_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_connack_identifier_rejected_sent The number of times a client was rejected due to a unacceptable identifier. -# TYPE mqtt_connack_identifier_rejected_sent counter -mqtt_connack_identifier_rejected_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_connack_server_unavailable_sent The number of times a client was rejected due the the broker being unavailable. -# TYPE mqtt_connack_server_unavailable_sent counter -mqtt_connack_server_unavailable_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_connack_bad_credentials_sent The number of times a client sent bad credentials. -# TYPE mqtt_connack_bad_credentials_sent counter -mqtt_connack_bad_credentials_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_connack_not_authorized_sent The number of times a client was rejected due to insufficient authorization. -# TYPE mqtt_connack_not_authorized_sent counter -mqtt_connack_not_authorized_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_publish_sent The number of PUBLISH packets sent. -# TYPE mqtt_publish_sent counter -mqtt_publish_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 525721 -# HELP mqtt_puback_sent The number of PUBACK packets sent. -# TYPE mqtt_puback_sent counter -mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 537068 -# HELP mqtt_pubrec_sent The number of PUBREC packets sent. -# TYPE mqtt_pubrec_sent counter -mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_pubrel_sent The number of PUBREL packets sent. -# TYPE mqtt_pubrel_sent counter -mqtt_pubrel_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_pubcomp_sent The number of PUBCOMP packets sent. -# TYPE mqtt_pubcomp_sent counter -mqtt_pubcomp_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_suback_sent The number of SUBACK packets sent. -# TYPE mqtt_suback_sent counter -mqtt_suback_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 122 -# HELP mqtt_unsuback_sent The number of UNSUBACK packets sent. -# TYPE mqtt_unsuback_sent counter -mqtt_unsuback_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 108 -# HELP mqtt_pingresp_sent The number of PINGRESP packets sent. -# TYPE mqtt_pingresp_sent counter -mqtt_pingresp_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 205 -# HELP mqtt_publish_auth_error The number of unauthorized publish attempts. -# TYPE mqtt_publish_auth_error counter -mqtt_publish_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_subscribe_auth_error The number of unauthorized subscription attempts. -# TYPE mqtt_subscribe_auth_error counter -mqtt_subscribe_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_invalid_msg_size_error The number of packages exceeding the maximum allowed size. -# TYPE mqtt_invalid_msg_size_error counter -mqtt_invalid_msg_size_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_puback_invalid_error The number of unexpected PUBACK messages received. -# TYPE mqtt_puback_invalid_error counter -mqtt_puback_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_pubrec_invalid_error The number of unexpected PUBREC messages received. -# TYPE mqtt_pubrec_invalid_error counter -mqtt_pubrec_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_pubcomp_invalid_error The number of unexpected PUBCOMP messages received. -# TYPE mqtt_pubcomp_invalid_error counter -mqtt_pubcomp_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_publish_error The number of times a PUBLISH operation failed due to a netsplit. -# TYPE mqtt_publish_error counter -mqtt_publish_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_subscribe_error The number of times a SUBSCRIBE operation failed due to a netsplit. -# TYPE mqtt_subscribe_error counter -mqtt_subscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP mqtt_unsubscribe_error The number of times an UNSUBSCRIBE operation failed due to a netsplit. -# TYPE mqtt_unsubscribe_error counter -mqtt_unsubscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 -# HELP client_keepalive_expired The number of clients which failed to communicate within the keepalive time period. -# TYPE client_keepalive_expired counter -client_keepalive_expired{node="VerneMQ@172.17.0.2",mqtt_version="4"} 1 -mqtt_connect_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_invalid_msg_size_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_pingreq_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_pingresp_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_puback_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_pubcomp_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_publish_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_publish_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_publish_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_publish_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_suback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_subscribe_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_subscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_subscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_unsuback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_unsubscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -mqtt_unsubscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -client_keepalive_expired{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 -# HELP queue_setup The number of times a MQTT queue process has been started. -# TYPE queue_setup counter -queue_setup{node="VerneMQ@172.17.0.2"} 338948 -# HELP queue_initialized_from_storage The number of times a MQTT queue process has been initialized from offline storage. -# TYPE queue_initialized_from_storage counter -queue_initialized_from_storage{node="VerneMQ@172.17.0.2"} 0 -# HELP queue_teardown The number of times a MQTT queue process has been terminated. -# TYPE queue_teardown counter -queue_teardown{node="VerneMQ@172.17.0.2"} 338948 -# HELP queue_message_drop The number of messages dropped due to full queues. -# TYPE queue_message_drop counter -queue_message_drop{node="VerneMQ@172.17.0.2"} 0 -# HELP queue_message_expired The number of messages which expired before delivery. -# TYPE queue_message_expired counter -queue_message_expired{node="VerneMQ@172.17.0.2"} 0 -# HELP queue_message_unhandled The number of unhandled messages when connecting with clean session=true. -# TYPE queue_message_unhandled counter -queue_message_unhandled{node="VerneMQ@172.17.0.2"} 1 -# HELP queue_message_in The number of PUBLISH packets received by MQTT queue processes. -# TYPE queue_message_in counter -queue_message_in{node="VerneMQ@172.17.0.2"} 525722 -# HELP queue_message_out The number of PUBLISH packets sent from MQTT queue processes. -# TYPE queue_message_out counter -queue_message_out{node="VerneMQ@172.17.0.2"} 525721 -# HELP client_expired Not in use (deprecated) -# TYPE client_expired counter -client_expired{node="VerneMQ@172.17.0.2"} 0 -# HELP cluster_bytes_received The number of bytes received from other cluster nodes. -# TYPE cluster_bytes_received counter -cluster_bytes_received{node="VerneMQ@172.17.0.2"} 0 -# HELP cluster_bytes_sent The number of bytes send to other cluster nodes. -# TYPE cluster_bytes_sent counter -cluster_bytes_sent{node="VerneMQ@172.17.0.2"} 0 -# HELP cluster_bytes_dropped The number of bytes dropped while sending data to other cluster nodes. -# TYPE cluster_bytes_dropped counter -cluster_bytes_dropped{node="VerneMQ@172.17.0.2"} 0 -# HELP router_matches_local The number of matched local subscriptions. -# TYPE router_matches_local counter -router_matches_local{node="VerneMQ@172.17.0.2"} 525722 -# HELP router_matches_remote The number of matched remote subscriptions. -# TYPE router_matches_remote counter -router_matches_remote{node="VerneMQ@172.17.0.2"} 0 -# HELP mqtt_connack_sent The number of CONNACK packets sent. -# TYPE mqtt_connack_sent counter -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="success"} 338948 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="unsupported_protocol_version"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="client_identifier_not_valid"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="server_unavailable"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="bad_username_or_password"} 4 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="not_authorized"} 4 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="normal_disconnect"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="disconnect_with_will_msg"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="malformed_packet"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="protocol_error"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="receive_max_exceeded"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_alias_invalid"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_too_large"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="message_rate_too_high"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="administrative_action"} 0 -mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 -# HELP mqtt_disconnect_sent The number of DISCONNECT packets sent. -# TYPE mqtt_disconnect_sent counter -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="normal_disconnect"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="malformed_packet"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="protocol_error"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_busy"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_shutting_down"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="keep_alive_timeout"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="session_taken_over"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_filter_invalid"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="receive_max_exceeded"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_alias_invalid"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_too_large"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="message_rate_too_high"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="administrative_action"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="retain_not_supported"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="qos_not_supported"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="use_another_server"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_moved"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="shared_subs_not_supported"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="connection_rate_exceeded"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="max_connect_time"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="subscription_ids_not_supported"} 0 -mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="wildcard_subs_not_supported"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="malformed_packet"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="protocol_error"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unsupported_protocol_version"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="client_identifier_not_valid"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="bad_username_or_password"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_unavailable"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_busy"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="banned"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="bad_authentication_method"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_too_large"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="retain_not_supported"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="qos_not_supported"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="use_another_server"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_moved"} 0 -mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="connection_rate_exceeded"} 0 -mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 -mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0 -mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 -mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 -mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 -mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 -mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0 -mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 -mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 -mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 -mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0 -mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 -mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 -mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 -mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 -mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0 -mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 -mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 -mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 -mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0 -mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 -mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 -mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 -mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 -mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0 -mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 -mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 -mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 -mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0 -mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 -mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 -mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 -mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 -mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0 -mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 -mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 -mqtt_pubrel_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 -mqtt_pubrel_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0 -mqtt_pubrel_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 -mqtt_pubrel_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0 -mqtt_pubcomp_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 -mqtt_pubcomp_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0 -mqtt_pubcomp_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 -mqtt_pubcomp_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0 -# HELP mqtt_auth_sent The number of AUTH packets sent. -# TYPE mqtt_auth_sent counter -mqtt_auth_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 -mqtt_auth_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="continue_authentication"} 0 -mqtt_auth_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="reauthenticate"} 0 -# HELP mqtt_auth_received The number of AUTH packets received. -# TYPE mqtt_auth_received counter -mqtt_auth_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 -mqtt_auth_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="continue_authentication"} 0 -mqtt_auth_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="reauthenticate"} 0 -# HELP queue_processes The number of MQTT queue processes. -# TYPE queue_processes gauge -queue_processes{node="VerneMQ@172.17.0.2"} 0 -# HELP retain_memory The number of bytes used for storing retained messages. -# TYPE retain_memory gauge -retain_memory{node="VerneMQ@172.17.0.2"} 11344 -# HELP retain_messages The number of currently stored retained messages. -# TYPE retain_messages gauge -retain_messages{node="VerneMQ@172.17.0.2"} 0 -# HELP router_memory The number of bytes used by the routing table. -# TYPE router_memory gauge -router_memory{node="VerneMQ@172.17.0.2"} 12752 -# HELP router_subscriptions The number of subscriptions in the routing table. -# TYPE router_subscriptions gauge -router_subscriptions{node="VerneMQ@172.17.0.2"} 0 -# HELP netsplit_resolved The number of resolved netsplits. -# TYPE netsplit_resolved counter -netsplit_resolved{node="VerneMQ@172.17.0.2"} 0 -# HELP netsplit_detected The number of detected netsplits. -# TYPE netsplit_detected counter -netsplit_detected{node="VerneMQ@172.17.0.2"} 0 -# HELP system_utilization_scheduler_8 Scheduler 8 utilization (percentage) -# TYPE system_utilization_scheduler_8 gauge -system_utilization_scheduler_8{node="VerneMQ@172.17.0.2"} 0 -# HELP system_utilization_scheduler_7 Scheduler 7 utilization (percentage) -# TYPE system_utilization_scheduler_7 gauge -system_utilization_scheduler_7{node="VerneMQ@172.17.0.2"} 0 -# HELP system_utilization_scheduler_6 Scheduler 6 utilization (percentage) -# TYPE system_utilization_scheduler_6 gauge -system_utilization_scheduler_6{node="VerneMQ@172.17.0.2"} 0 -# HELP system_utilization_scheduler_5 Scheduler 5 utilization (percentage) -# TYPE system_utilization_scheduler_5 gauge -system_utilization_scheduler_5{node="VerneMQ@172.17.0.2"} 0 -# HELP system_utilization_scheduler_4 Scheduler 4 utilization (percentage) -# TYPE system_utilization_scheduler_4 gauge -system_utilization_scheduler_4{node="VerneMQ@172.17.0.2"} 19 -# HELP system_utilization_scheduler_3 Scheduler 3 utilization (percentage) -# TYPE system_utilization_scheduler_3 gauge -system_utilization_scheduler_3{node="VerneMQ@172.17.0.2"} 14 -# HELP system_utilization_scheduler_2 Scheduler 2 utilization (percentage) -# TYPE system_utilization_scheduler_2 gauge -system_utilization_scheduler_2{node="VerneMQ@172.17.0.2"} 8 -# HELP system_utilization_scheduler_1 Scheduler 1 utilization (percentage) -# TYPE system_utilization_scheduler_1 gauge -system_utilization_scheduler_1{node="VerneMQ@172.17.0.2"} 34 -# HELP system_utilization The average system (scheduler) utilization (percentage). -# TYPE system_utilization gauge -system_utilization{node="VerneMQ@172.17.0.2"} 9 -# HELP vm_memory_ets The amount of memory allocated for ETS tables. -# TYPE vm_memory_ets gauge -vm_memory_ets{node="VerneMQ@172.17.0.2"} 6065944 -# HELP vm_memory_code The amount of memory allocated for code. -# TYPE vm_memory_code gauge -vm_memory_code{node="VerneMQ@172.17.0.2"} 11372082 -# HELP vm_memory_binary The amount of memory allocated for binaries. -# TYPE vm_memory_binary gauge -vm_memory_binary{node="VerneMQ@172.17.0.2"} 1293672 -# HELP vm_memory_atom_used The amount of memory used by atoms. -# TYPE vm_memory_atom_used gauge -vm_memory_atom_used{node="VerneMQ@172.17.0.2"} 755998 -# HELP vm_memory_atom The amount of memory allocated for atoms. -# TYPE vm_memory_atom gauge -vm_memory_atom{node="VerneMQ@172.17.0.2"} 768953 -# HELP vm_memory_system The amount of memory allocated for the emulator. -# TYPE vm_memory_system gauge -vm_memory_system{node="VerneMQ@172.17.0.2"} 27051848 -# HELP vm_memory_processes_used The amount of memory used by processes. -# TYPE vm_memory_processes_used gauge -vm_memory_processes_used{node="VerneMQ@172.17.0.2"} 8671232 -# HELP vm_memory_processes The amount of memory allocated for processes. -# TYPE vm_memory_processes gauge -vm_memory_processes{node="VerneMQ@172.17.0.2"} 8673288 -# HELP vm_memory_total The total amount of memory allocated. -# TYPE vm_memory_total gauge -vm_memory_total{node="VerneMQ@172.17.0.2"} 35725136 -# HELP system_process_count The number of Erlang processes. -# TYPE system_process_count gauge -system_process_count{node="VerneMQ@172.17.0.2"} 329 -# HELP system_wallclock The number of milli-seconds passed since the node was started. -# TYPE system_wallclock counter -system_wallclock{node="VerneMQ@172.17.0.2"} 163457858 -# HELP system_runtime The sum of the runtime for all threads in the Erlang runtime system. -# TYPE system_runtime counter -system_runtime{node="VerneMQ@172.17.0.2"} 1775355 -# HELP system_run_queue The total number of processes and ports ready to run on all run-queues. -# TYPE system_run_queue gauge -system_run_queue{node="VerneMQ@172.17.0.2"} 0 -# HELP system_reductions The number of reductions performed in the VM since the node was started. -# TYPE system_reductions counter -system_reductions{node="VerneMQ@172.17.0.2"} 3857458067 -# HELP system_io_out The total number of bytes sent through ports. -# TYPE system_io_out counter -system_io_out{node="VerneMQ@172.17.0.2"} 961001488 -# HELP system_io_in The total number of bytes received through ports. -# TYPE system_io_in counter -system_io_in{node="VerneMQ@172.17.0.2"} 68998296 -# HELP system_words_reclaimed_by_gc The number of words reclaimed by the garbage collector. -# TYPE system_words_reclaimed_by_gc counter -system_words_reclaimed_by_gc{node="VerneMQ@172.17.0.2"} 7158470019 -# HELP system_gc_count The number of garbage collections performed. -# TYPE system_gc_count counter -system_gc_count{node="VerneMQ@172.17.0.2"} 12189976 -# HELP system_exact_reductions The exact number of reductions performed. -# TYPE system_exact_reductions counter -system_exact_reductions{node="VerneMQ@172.17.0.2"} 3854024620 -# HELP system_context_switches The total number of context switches. -# TYPE system_context_switches counter -system_context_switches{node="VerneMQ@172.17.0.2"} 39088198 \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/vernemq/testdata/non_vernemq.txt b/src/go/plugin/go.d/modules/vernemq/testdata/non_vernemq.txt deleted file mode 100644 index f5f0ae082..000000000 --- a/src/go/plugin/go.d/modules/vernemq/testdata/non_vernemq.txt +++ /dev/null @@ -1,27 +0,0 @@ -# HELP wmi_os_process_memory_limix_bytes OperatingSystem.MaxProcessMemorySize -# TYPE wmi_os_process_memory_limix_bytes gauge -wmi_os_process_memory_limix_bytes 1.40737488224256e+14 -# HELP wmi_os_processes OperatingSystem.NumberOfProcesses -# TYPE wmi_os_processes gauge -wmi_os_processes 124 -# HELP wmi_os_processes_limit OperatingSystem.MaxNumberOfProcesses -# TYPE wmi_os_processes_limit gauge -wmi_os_processes_limit 4.294967295e+09 -# HELP wmi_os_time OperatingSystem.LocalDateTime -# TYPE wmi_os_time gauge -wmi_os_time 1.57804974e+09 -# HELP wmi_os_timezone OperatingSystem.LocalDateTime -# TYPE wmi_os_timezone gauge -wmi_os_timezone{timezone="MSK"} 1 -# HELP wmi_os_users OperatingSystem.NumberOfUsers -# TYPE wmi_os_users gauge -wmi_os_users 2 -# HELP wmi_os_virtual_memory_bytes OperatingSystem.TotalVirtualMemorySize -# TYPE wmi_os_virtual_memory_bytes gauge -wmi_os_virtual_memory_bytes 5.770891264e+09 -# HELP wmi_os_virtual_memory_free_bytes OperatingSystem.FreeVirtualMemory -# TYPE wmi_os_virtual_memory_free_bytes gauge -wmi_os_virtual_memory_free_bytes 3.76489984e+09 -# HELP wmi_os_visible_memory_bytes OperatingSystem.TotalVisibleMemorySize -# TYPE wmi_os_visible_memory_bytes gauge -wmi_os_visible_memory_bytes 4.294496256e+09 \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/vernemq/testdata/v1.10.1/metrics.txt b/src/go/plugin/go.d/modules/vernemq/testdata/v1.10.1/metrics.txt new file mode 100644 index 000000000..2e98a3e94 --- /dev/null +++ b/src/go/plugin/go.d/modules/vernemq/testdata/v1.10.1/metrics.txt @@ -0,0 +1,416 @@ +# HELP socket_open The number of times an MQTT socket has been opened. +# TYPE socket_open counter +socket_open{node="VerneMQ@172.17.0.2"} 338956 +# HELP socket_close The number of times an MQTT socket has been closed. +# TYPE socket_close counter +socket_close{node="VerneMQ@172.17.0.2"} 338956 +# HELP socket_close_timeout The number of times VerneMQ closed an MQTT socket due to no CONNECT frame has been received on time. +# TYPE socket_close_timeout counter +socket_close_timeout{node="VerneMQ@172.17.0.2"} 0 +# HELP socket_error The total number of socket errors that have occurred. +# TYPE socket_error counter +socket_error{node="VerneMQ@172.17.0.2"} 0 +# HELP bytes_received The total number of bytes received. +# TYPE bytes_received counter +bytes_received{node="VerneMQ@172.17.0.2"} 36796908 +# HELP bytes_sent The total number of bytes sent. +# TYPE bytes_sent counter +bytes_sent{node="VerneMQ@172.17.0.2"} 23361693 +# HELP mqtt_connect_received The number of CONNECT packets received. +# TYPE mqtt_connect_received counter +mqtt_connect_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 338956 +# HELP mqtt_publish_received The number of PUBLISH packets received. +# TYPE mqtt_publish_received counter +mqtt_publish_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 537088 +# HELP mqtt_puback_received The number of PUBACK packets received. +# TYPE mqtt_puback_received counter +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 525694 +# HELP mqtt_pubrec_received The number of PUBREC packets received. +# TYPE mqtt_pubrec_received counter +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_pubrel_received The number of PUBREL packets received. +# TYPE mqtt_pubrel_received counter +mqtt_pubrel_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_pubcomp_received The number of PUBCOMP packets received. +# TYPE mqtt_pubcomp_received counter +mqtt_pubcomp_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_subscribe_received The number of SUBSCRIBE packets received. +# TYPE mqtt_subscribe_received counter +mqtt_subscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 122 +# HELP mqtt_unsubscribe_received The number of UNSUBSCRIBE packets received. +# TYPE mqtt_unsubscribe_received counter +mqtt_unsubscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 108 +# HELP mqtt_pingreq_received The number of PINGREQ packets received. +# TYPE mqtt_pingreq_received counter +mqtt_pingreq_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 205 +# HELP mqtt_disconnect_received The number of DISCONNECT packets received. +# TYPE mqtt_disconnect_received counter +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 107 +# HELP mqtt_connack_accepted_sent The number of times a connection has been accepted. +# TYPE mqtt_connack_accepted_sent counter +mqtt_connack_accepted_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_connack_unacceptable_protocol_sent The number of times the broker is not able to support the requested protocol. +# TYPE mqtt_connack_unacceptable_protocol_sent counter +mqtt_connack_unacceptable_protocol_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_connack_identifier_rejected_sent The number of times a client was rejected due to a unacceptable identifier. +# TYPE mqtt_connack_identifier_rejected_sent counter +mqtt_connack_identifier_rejected_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_connack_server_unavailable_sent The number of times a client was rejected due the the broker being unavailable. +# TYPE mqtt_connack_server_unavailable_sent counter +mqtt_connack_server_unavailable_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_connack_bad_credentials_sent The number of times a client sent bad credentials. +# TYPE mqtt_connack_bad_credentials_sent counter +mqtt_connack_bad_credentials_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_connack_not_authorized_sent The number of times a client was rejected due to insufficient authorization. +# TYPE mqtt_connack_not_authorized_sent counter +mqtt_connack_not_authorized_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_publish_sent The number of PUBLISH packets sent. +# TYPE mqtt_publish_sent counter +mqtt_publish_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 525721 +# HELP mqtt_puback_sent The number of PUBACK packets sent. +# TYPE mqtt_puback_sent counter +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 537068 +# HELP mqtt_pubrec_sent The number of PUBREC packets sent. +# TYPE mqtt_pubrec_sent counter +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_pubrel_sent The number of PUBREL packets sent. +# TYPE mqtt_pubrel_sent counter +mqtt_pubrel_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_pubcomp_sent The number of PUBCOMP packets sent. +# TYPE mqtt_pubcomp_sent counter +mqtt_pubcomp_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_suback_sent The number of SUBACK packets sent. +# TYPE mqtt_suback_sent counter +mqtt_suback_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 122 +# HELP mqtt_unsuback_sent The number of UNSUBACK packets sent. +# TYPE mqtt_unsuback_sent counter +mqtt_unsuback_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 108 +# HELP mqtt_pingresp_sent The number of PINGRESP packets sent. +# TYPE mqtt_pingresp_sent counter +mqtt_pingresp_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 205 +# HELP mqtt_publish_auth_error The number of unauthorized publish attempts. +# TYPE mqtt_publish_auth_error counter +mqtt_publish_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_subscribe_auth_error The number of unauthorized subscription attempts. +# TYPE mqtt_subscribe_auth_error counter +mqtt_subscribe_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_invalid_msg_size_error The number of packages exceeding the maximum allowed size. +# TYPE mqtt_invalid_msg_size_error counter +mqtt_invalid_msg_size_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_puback_invalid_error The number of unexpected PUBACK messages received. +# TYPE mqtt_puback_invalid_error counter +mqtt_puback_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_pubrec_invalid_error The number of unexpected PUBREC messages received. +# TYPE mqtt_pubrec_invalid_error counter +mqtt_pubrec_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_pubcomp_invalid_error The number of unexpected PUBCOMP messages received. +# TYPE mqtt_pubcomp_invalid_error counter +mqtt_pubcomp_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_publish_error The number of times a PUBLISH operation failed due to a netsplit. +# TYPE mqtt_publish_error counter +mqtt_publish_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_subscribe_error The number of times a SUBSCRIBE operation failed due to a netsplit. +# TYPE mqtt_subscribe_error counter +mqtt_subscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_unsubscribe_error The number of times an UNSUBSCRIBE operation failed due to a netsplit. +# TYPE mqtt_unsubscribe_error counter +mqtt_unsubscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP client_keepalive_expired The number of clients which failed to communicate within the keepalive time period. +# TYPE client_keepalive_expired counter +client_keepalive_expired{node="VerneMQ@172.17.0.2",mqtt_version="4"} 1 +mqtt_connect_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_invalid_msg_size_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_pingreq_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_pingresp_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_puback_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_pubcomp_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_publish_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_publish_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_publish_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_publish_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_suback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_subscribe_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_subscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_subscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_unsuback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_unsubscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_unsubscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +client_keepalive_expired{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +# HELP queue_setup The number of times a MQTT queue process has been started. +# TYPE queue_setup counter +queue_setup{node="VerneMQ@172.17.0.2"} 338948 +# HELP queue_initialized_from_storage The number of times a MQTT queue process has been initialized from offline storage. +# TYPE queue_initialized_from_storage counter +queue_initialized_from_storage{node="VerneMQ@172.17.0.2"} 0 +# HELP queue_teardown The number of times a MQTT queue process has been terminated. +# TYPE queue_teardown counter +queue_teardown{node="VerneMQ@172.17.0.2"} 338948 +# HELP queue_message_drop The number of messages dropped due to full queues. +# TYPE queue_message_drop counter +queue_message_drop{node="VerneMQ@172.17.0.2"} 0 +# HELP queue_message_expired The number of messages which expired before delivery. +# TYPE queue_message_expired counter +queue_message_expired{node="VerneMQ@172.17.0.2"} 0 +# HELP queue_message_unhandled The number of unhandled messages when connecting with clean session=true. +# TYPE queue_message_unhandled counter +queue_message_unhandled{node="VerneMQ@172.17.0.2"} 1 +# HELP queue_message_in The number of PUBLISH packets received by MQTT queue processes. +# TYPE queue_message_in counter +queue_message_in{node="VerneMQ@172.17.0.2"} 525722 +# HELP queue_message_out The number of PUBLISH packets sent from MQTT queue processes. +# TYPE queue_message_out counter +queue_message_out{node="VerneMQ@172.17.0.2"} 525721 +# HELP client_expired Not in use (deprecated) +# TYPE client_expired counter +client_expired{node="VerneMQ@172.17.0.2"} 0 +# HELP cluster_bytes_received The number of bytes received from other cluster nodes. +# TYPE cluster_bytes_received counter +cluster_bytes_received{node="VerneMQ@172.17.0.2"} 0 +# HELP cluster_bytes_sent The number of bytes send to other cluster nodes. +# TYPE cluster_bytes_sent counter +cluster_bytes_sent{node="VerneMQ@172.17.0.2"} 0 +# HELP cluster_bytes_dropped The number of bytes dropped while sending data to other cluster nodes. +# TYPE cluster_bytes_dropped counter +cluster_bytes_dropped{node="VerneMQ@172.17.0.2"} 0 +# HELP router_matches_local The number of matched local subscriptions. +# TYPE router_matches_local counter +router_matches_local{node="VerneMQ@172.17.0.2"} 525722 +# HELP router_matches_remote The number of matched remote subscriptions. +# TYPE router_matches_remote counter +router_matches_remote{node="VerneMQ@172.17.0.2"} 0 +# HELP mqtt_connack_sent The number of CONNACK packets sent. +# TYPE mqtt_connack_sent counter +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="success"} 338948 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="unsupported_protocol_version"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="client_identifier_not_valid"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="server_unavailable"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="bad_username_or_password"} 4 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="not_authorized"} 4 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="normal_disconnect"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="disconnect_with_will_msg"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="malformed_packet"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="protocol_error"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="receive_max_exceeded"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_alias_invalid"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_too_large"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="message_rate_too_high"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="administrative_action"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +# HELP mqtt_disconnect_sent The number of DISCONNECT packets sent. +# TYPE mqtt_disconnect_sent counter +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="normal_disconnect"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="malformed_packet"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="protocol_error"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_busy"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_shutting_down"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="keep_alive_timeout"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="session_taken_over"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_filter_invalid"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="receive_max_exceeded"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_alias_invalid"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_too_large"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="message_rate_too_high"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="administrative_action"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="retain_not_supported"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="qos_not_supported"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="use_another_server"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_moved"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="shared_subs_not_supported"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="connection_rate_exceeded"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="max_connect_time"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="subscription_ids_not_supported"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="wildcard_subs_not_supported"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="malformed_packet"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="protocol_error"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unsupported_protocol_version"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="client_identifier_not_valid"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="bad_username_or_password"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_unavailable"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_busy"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="banned"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="bad_authentication_method"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_too_large"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="retain_not_supported"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="qos_not_supported"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="use_another_server"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_moved"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="connection_rate_exceeded"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +mqtt_pubrel_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_pubrel_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0 +mqtt_pubrel_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_pubrel_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0 +mqtt_pubcomp_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_pubcomp_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0 +mqtt_pubcomp_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_pubcomp_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0 +# HELP mqtt_auth_sent The number of AUTH packets sent. +# TYPE mqtt_auth_sent counter +mqtt_auth_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_auth_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="continue_authentication"} 0 +mqtt_auth_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="reauthenticate"} 0 +# HELP mqtt_auth_received The number of AUTH packets received. +# TYPE mqtt_auth_received counter +mqtt_auth_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_auth_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="continue_authentication"} 0 +mqtt_auth_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="reauthenticate"} 0 +# HELP queue_processes The number of MQTT queue processes. +# TYPE queue_processes gauge +queue_processes{node="VerneMQ@172.17.0.2"} 0 +# HELP retain_memory The number of bytes used for storing retained messages. +# TYPE retain_memory gauge +retain_memory{node="VerneMQ@172.17.0.2"} 11344 +# HELP retain_messages The number of currently stored retained messages. +# TYPE retain_messages gauge +retain_messages{node="VerneMQ@172.17.0.2"} 0 +# HELP router_memory The number of bytes used by the routing table. +# TYPE router_memory gauge +router_memory{node="VerneMQ@172.17.0.2"} 12752 +# HELP router_subscriptions The number of subscriptions in the routing table. +# TYPE router_subscriptions gauge +router_subscriptions{node="VerneMQ@172.17.0.2"} 0 +# HELP netsplit_resolved The number of resolved netsplits. +# TYPE netsplit_resolved counter +netsplit_resolved{node="VerneMQ@172.17.0.2"} 0 +# HELP netsplit_detected The number of detected netsplits. +# TYPE netsplit_detected counter +netsplit_detected{node="VerneMQ@172.17.0.2"} 0 +# HELP system_utilization_scheduler_8 Scheduler 8 utilization (percentage) +# TYPE system_utilization_scheduler_8 gauge +system_utilization_scheduler_8{node="VerneMQ@172.17.0.2"} 0 +# HELP system_utilization_scheduler_7 Scheduler 7 utilization (percentage) +# TYPE system_utilization_scheduler_7 gauge +system_utilization_scheduler_7{node="VerneMQ@172.17.0.2"} 0 +# HELP system_utilization_scheduler_6 Scheduler 6 utilization (percentage) +# TYPE system_utilization_scheduler_6 gauge +system_utilization_scheduler_6{node="VerneMQ@172.17.0.2"} 0 +# HELP system_utilization_scheduler_5 Scheduler 5 utilization (percentage) +# TYPE system_utilization_scheduler_5 gauge +system_utilization_scheduler_5{node="VerneMQ@172.17.0.2"} 0 +# HELP system_utilization_scheduler_4 Scheduler 4 utilization (percentage) +# TYPE system_utilization_scheduler_4 gauge +system_utilization_scheduler_4{node="VerneMQ@172.17.0.2"} 19 +# HELP system_utilization_scheduler_3 Scheduler 3 utilization (percentage) +# TYPE system_utilization_scheduler_3 gauge +system_utilization_scheduler_3{node="VerneMQ@172.17.0.2"} 14 +# HELP system_utilization_scheduler_2 Scheduler 2 utilization (percentage) +# TYPE system_utilization_scheduler_2 gauge +system_utilization_scheduler_2{node="VerneMQ@172.17.0.2"} 8 +# HELP system_utilization_scheduler_1 Scheduler 1 utilization (percentage) +# TYPE system_utilization_scheduler_1 gauge +system_utilization_scheduler_1{node="VerneMQ@172.17.0.2"} 34 +# HELP system_utilization The average system (scheduler) utilization (percentage). +# TYPE system_utilization gauge +system_utilization{node="VerneMQ@172.17.0.2"} 9 +# HELP vm_memory_ets The amount of memory allocated for ETS tables. +# TYPE vm_memory_ets gauge +vm_memory_ets{node="VerneMQ@172.17.0.2"} 6065944 +# HELP vm_memory_code The amount of memory allocated for code. +# TYPE vm_memory_code gauge +vm_memory_code{node="VerneMQ@172.17.0.2"} 11372082 +# HELP vm_memory_binary The amount of memory allocated for binaries. +# TYPE vm_memory_binary gauge +vm_memory_binary{node="VerneMQ@172.17.0.2"} 1293672 +# HELP vm_memory_atom_used The amount of memory used by atoms. +# TYPE vm_memory_atom_used gauge +vm_memory_atom_used{node="VerneMQ@172.17.0.2"} 755998 +# HELP vm_memory_atom The amount of memory allocated for atoms. +# TYPE vm_memory_atom gauge +vm_memory_atom{node="VerneMQ@172.17.0.2"} 768953 +# HELP vm_memory_system The amount of memory allocated for the emulator. +# TYPE vm_memory_system gauge +vm_memory_system{node="VerneMQ@172.17.0.2"} 27051848 +# HELP vm_memory_processes_used The amount of memory used by processes. +# TYPE vm_memory_processes_used gauge +vm_memory_processes_used{node="VerneMQ@172.17.0.2"} 8671232 +# HELP vm_memory_processes The amount of memory allocated for processes. +# TYPE vm_memory_processes gauge +vm_memory_processes{node="VerneMQ@172.17.0.2"} 8673288 +# HELP vm_memory_total The total amount of memory allocated. +# TYPE vm_memory_total gauge +vm_memory_total{node="VerneMQ@172.17.0.2"} 35725136 +# HELP system_process_count The number of Erlang processes. +# TYPE system_process_count gauge +system_process_count{node="VerneMQ@172.17.0.2"} 329 +# HELP system_wallclock The number of milli-seconds passed since the node was started. +# TYPE system_wallclock counter +system_wallclock{node="VerneMQ@172.17.0.2"} 163457858 +# HELP system_runtime The sum of the runtime for all threads in the Erlang runtime system. +# TYPE system_runtime counter +system_runtime{node="VerneMQ@172.17.0.2"} 1775355 +# HELP system_run_queue The total number of processes and ports ready to run on all run-queues. +# TYPE system_run_queue gauge +system_run_queue{node="VerneMQ@172.17.0.2"} 0 +# HELP system_reductions The number of reductions performed in the VM since the node was started. +# TYPE system_reductions counter +system_reductions{node="VerneMQ@172.17.0.2"} 3857458067 +# HELP system_io_out The total number of bytes sent through ports. +# TYPE system_io_out counter +system_io_out{node="VerneMQ@172.17.0.2"} 961001488 +# HELP system_io_in The total number of bytes received through ports. +# TYPE system_io_in counter +system_io_in{node="VerneMQ@172.17.0.2"} 68998296 +# HELP system_words_reclaimed_by_gc The number of words reclaimed by the garbage collector. +# TYPE system_words_reclaimed_by_gc counter +system_words_reclaimed_by_gc{node="VerneMQ@172.17.0.2"} 7158470019 +# HELP system_gc_count The number of garbage collections performed. +# TYPE system_gc_count counter +system_gc_count{node="VerneMQ@172.17.0.2"} 12189976 +# HELP system_exact_reductions The exact number of reductions performed. +# TYPE system_exact_reductions counter +system_exact_reductions{node="VerneMQ@172.17.0.2"} 3854024620 +# HELP system_context_switches The total number of context switches. +# TYPE system_context_switches counter +system_context_switches{node="VerneMQ@172.17.0.2"} 39088198 \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/vernemq/testdata/v2.0.1/metrics.txt b/src/go/plugin/go.d/modules/vernemq/testdata/v2.0.1/metrics.txt new file mode 100644 index 000000000..f0079011b --- /dev/null +++ b/src/go/plugin/go.d/modules/vernemq/testdata/v2.0.1/metrics.txt @@ -0,0 +1,588 @@ +# HELP vernemq_socket_open The number of times an MQTT socket has been opened. +# TYPE vernemq_socket_open counter +vernemq_socket_open{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_socket_close The number of times an MQTT socket has been closed. +# TYPE vernemq_socket_close counter +vernemq_socket_close{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_socket_close_timeout The number of times VerneMQ closed an MQTT socket due to no CONNECT frame has been received on time. +# TYPE vernemq_socket_close_timeout counter +vernemq_socket_close_timeout{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_socket_error The total number of socket errors that have occurred. +# TYPE vernemq_socket_error counter +vernemq_socket_error{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_bytes_received The total number of bytes received. +# TYPE vernemq_bytes_received counter +vernemq_bytes_received{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_bytes_sent The total number of bytes sent. +# TYPE vernemq_bytes_sent counter +vernemq_bytes_sent{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_mqtt_connect_received The number of CONNECT packets received. +# TYPE vernemq_mqtt_connect_received counter +vernemq_mqtt_connect_received{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_connect_received{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_publish_received The number of PUBLISH packets received. +# TYPE vernemq_mqtt_publish_received counter +vernemq_mqtt_publish_received{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_publish_received{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_puback_received The number of PUBACK packets received. +# TYPE vernemq_mqtt_puback_received counter +vernemq_mqtt_puback_received{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_pubrec_received The number of PUBREC packets received. +# TYPE vernemq_mqtt_pubrec_received counter +vernemq_mqtt_pubrec_received{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_pubrel_received The number of PUBREL packets received. +# TYPE vernemq_mqtt_pubrel_received counter +vernemq_mqtt_pubrel_received{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_pubcomp_received The number of PUBCOMP packets received. +# TYPE vernemq_mqtt_pubcomp_received counter +vernemq_mqtt_pubcomp_received{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_subscribe_received The number of SUBSCRIBE packets received. +# TYPE vernemq_mqtt_subscribe_received counter +vernemq_mqtt_subscribe_received{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_subscribe_received{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_unsubscribe_received The number of UNSUBSCRIBE packets received. +# TYPE vernemq_mqtt_unsubscribe_received counter +vernemq_mqtt_unsubscribe_received{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_unsubscribe_received{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_pingreq_received The number of PINGREQ packets received. +# TYPE vernemq_mqtt_pingreq_received counter +vernemq_mqtt_pingreq_received{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_pingreq_received{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_disconnect_received The number of DISCONNECT packets received. +# TYPE vernemq_mqtt_disconnect_received counter +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_connack_accepted_sent The number of times a connection has been accepted. +# TYPE vernemq_mqtt_connack_accepted_sent counter +vernemq_mqtt_connack_accepted_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_connack_unacceptable_protocol_sent The number of times the broker is not able to support the requested protocol. +# TYPE vernemq_mqtt_connack_unacceptable_protocol_sent counter +vernemq_mqtt_connack_unacceptable_protocol_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_connack_identifier_rejected_sent The number of times a client was rejected due to a unacceptable identifier. +# TYPE vernemq_mqtt_connack_identifier_rejected_sent counter +vernemq_mqtt_connack_identifier_rejected_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_connack_server_unavailable_sent The number of times a client was rejected due the the broker being unavailable. +# TYPE vernemq_mqtt_connack_server_unavailable_sent counter +vernemq_mqtt_connack_server_unavailable_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_connack_bad_credentials_sent The number of times a client sent bad credentials. +# TYPE vernemq_mqtt_connack_bad_credentials_sent counter +vernemq_mqtt_connack_bad_credentials_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_connack_not_authorized_sent The number of times a client was rejected due to insufficient authorization. +# TYPE vernemq_mqtt_connack_not_authorized_sent counter +vernemq_mqtt_connack_not_authorized_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_publish_sent The number of PUBLISH packets sent. +# TYPE vernemq_mqtt_publish_sent counter +vernemq_mqtt_publish_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_publish_sent{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_puback_sent The number of PUBACK packets sent. +# TYPE vernemq_mqtt_puback_sent counter +vernemq_mqtt_puback_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_pubrec_sent The number of PUBREC packets sent. +# TYPE vernemq_mqtt_pubrec_sent counter +vernemq_mqtt_pubrec_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_pubrel_sent The number of PUBREL packets sent. +# TYPE vernemq_mqtt_pubrel_sent counter +vernemq_mqtt_pubrel_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_pubcomp_sent The number of PUBCOMP packets sent. +# TYPE vernemq_mqtt_pubcomp_sent counter +vernemq_mqtt_pubcomp_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_suback_sent The number of SUBACK packets sent. +# TYPE vernemq_mqtt_suback_sent counter +vernemq_mqtt_suback_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_suback_sent{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_unsuback_sent The number of UNSUBACK packets sent. +# TYPE vernemq_mqtt_unsuback_sent counter +vernemq_mqtt_unsuback_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_unsuback_sent{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_pingresp_sent The number of PINGRESP packets sent. +# TYPE vernemq_mqtt_pingresp_sent counter +vernemq_mqtt_pingresp_sent{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_pingresp_sent{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_publish_auth_error The number of unauthorized publish attempts. +# TYPE vernemq_mqtt_publish_auth_error counter +vernemq_mqtt_publish_auth_error{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_publish_auth_error{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_subscribe_auth_error The number of unauthorized subscription attempts. +# TYPE vernemq_mqtt_subscribe_auth_error counter +vernemq_mqtt_subscribe_auth_error{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_subscribe_auth_error{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_invalid_msg_size_error The number of packages exceeding the maximum allowed size. +# TYPE vernemq_mqtt_invalid_msg_size_error counter +vernemq_mqtt_invalid_msg_size_error{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_invalid_msg_size_error{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_puback_invalid_error The number of unexpected PUBACK messages received. +# TYPE vernemq_mqtt_puback_invalid_error counter +vernemq_mqtt_puback_invalid_error{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_puback_invalid_error{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_pubrec_invalid_error The number of unexpected PUBREC messages received. +# TYPE vernemq_mqtt_pubrec_invalid_error counter +vernemq_mqtt_pubrec_invalid_error{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +# HELP vernemq_mqtt_pubcomp_invalid_error The number of unexpected PUBCOMP messages received. +# TYPE vernemq_mqtt_pubcomp_invalid_error counter +vernemq_mqtt_pubcomp_invalid_error{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_pubcomp_invalid_error{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_publish_error The number of times a PUBLISH operation failed due to a netsplit. +# TYPE vernemq_mqtt_publish_error counter +vernemq_mqtt_publish_error{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_publish_error{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_subscribe_error The number of times a SUBSCRIBE operation failed due to a netsplit. +# TYPE vernemq_mqtt_subscribe_error counter +vernemq_mqtt_subscribe_error{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_subscribe_error{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_mqtt_unsubscribe_error The number of times an UNSUBSCRIBE operation failed due to a netsplit. +# TYPE vernemq_mqtt_unsubscribe_error counter +vernemq_mqtt_unsubscribe_error{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_mqtt_unsubscribe_error{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_client_keepalive_expired The number of clients which failed to communicate within the keepalive time period. +# TYPE vernemq_client_keepalive_expired counter +vernemq_client_keepalive_expired{node="VerneMQ@10.10.10.20",mqtt_version="4"} 0 +vernemq_client_keepalive_expired{node="VerneMQ@10.10.10.20",mqtt_version="5"} 0 +# HELP vernemq_queue_setup The number of times a MQTT queue process has been started. +# TYPE vernemq_queue_setup counter +vernemq_queue_setup{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_queue_initialized_from_storage The number of times a MQTT queue process has been initialized from offline storage. +# TYPE vernemq_queue_initialized_from_storage counter +vernemq_queue_initialized_from_storage{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_queue_teardown The number of times a MQTT queue process has been terminated. +# TYPE vernemq_queue_teardown counter +vernemq_queue_teardown{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_queue_message_drop The number of messages dropped due to full queues. +# TYPE vernemq_queue_message_drop counter +vernemq_queue_message_drop{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_queue_message_expired The number of messages which expired before delivery. +# TYPE vernemq_queue_message_expired counter +vernemq_queue_message_expired{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_queue_message_unhandled The number of unhandled messages when connecting with clean session=true or QoS0 for offline sessions. +# TYPE vernemq_queue_message_unhandled counter +vernemq_queue_message_unhandled{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_queue_message_in The number of PUBLISH packets received by MQTT queue processes. +# TYPE vernemq_queue_message_in counter +vernemq_queue_message_in{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_queue_message_out The number of PUBLISH packets sent from MQTT queue processes. +# TYPE vernemq_queue_message_out counter +vernemq_queue_message_out{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_client_expired Not in use (deprecated) +# TYPE vernemq_client_expired counter +vernemq_client_expired{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_cluster_bytes_received The number of bytes received from other cluster nodes. +# TYPE vernemq_cluster_bytes_received counter +vernemq_cluster_bytes_received{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_cluster_bytes_sent The number of bytes send to other cluster nodes. +# TYPE vernemq_cluster_bytes_sent counter +vernemq_cluster_bytes_sent{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_cluster_bytes_dropped The number of bytes dropped while sending data to other cluster nodes. +# TYPE vernemq_cluster_bytes_dropped counter +vernemq_cluster_bytes_dropped{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_router_matches_local The number of matched local subscriptions. +# TYPE vernemq_router_matches_local counter +vernemq_router_matches_local{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_router_matches_remote The number of matched remote subscriptions. +# TYPE vernemq_router_matches_remote counter +vernemq_router_matches_remote{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_mqtt_connack_sent The number of CONNACK packets sent. +# TYPE vernemq_mqtt_connack_sent counter +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="4",return_code="success"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="4",return_code="unsupported_protocol_version"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="4",return_code="client_identifier_not_valid"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="4",return_code="server_unavailable"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="4",return_code="bad_username_or_password"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="4",return_code="not_authorized"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="normal_disconnect"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="disconnect_with_will_msg"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="unspecified_error"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="malformed_packet"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="protocol_error"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="impl_specific_error"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="topic_name_invalid"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="receive_max_exceeded"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="topic_alias_invalid"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="packet_too_large"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="message_rate_too_high"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="quota_exceeded"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="administrative_action"} 0 +vernemq_mqtt_disconnect_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="payload_format_invalid"} 0 +# HELP vernemq_mqtt_disconnect_sent The number of DISCONNECT packets sent. +# TYPE vernemq_mqtt_disconnect_sent counter +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="normal_disconnect"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="unspecified_error"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="malformed_packet"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="protocol_error"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="impl_specific_error"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="not_authorized"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="server_busy"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="server_shutting_down"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="keep_alive_timeout"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="session_taken_over"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="topic_filter_invalid"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="topic_name_invalid"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="receive_max_exceeded"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="topic_alias_invalid"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="packet_too_large"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="message_rate_too_high"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="quota_exceeded"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="administrative_action"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="payload_format_invalid"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="retain_not_supported"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="qos_not_supported"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="use_another_server"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="server_moved"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="shared_subs_not_supported"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="connection_rate_exceeded"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="max_connect_time"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="subscription_ids_not_supported"} 0 +vernemq_mqtt_disconnect_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="wildcard_subs_not_supported"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="success"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="unspecified_error"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="malformed_packet"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="protocol_error"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="impl_specific_error"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="unsupported_protocol_version"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="client_identifier_not_valid"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="bad_username_or_password"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="not_authorized"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="server_unavailable"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="server_busy"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="banned"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="bad_authentication_method"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="topic_name_invalid"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="packet_too_large"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="quota_exceeded"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="payload_format_invalid"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="retain_not_supported"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="qos_not_supported"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="use_another_server"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="server_moved"} 0 +vernemq_mqtt_connack_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="connection_rate_exceeded"} 0 +vernemq_mqtt_puback_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="success"} 0 +vernemq_mqtt_puback_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="no_matching_subscribers"} 0 +vernemq_mqtt_puback_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="unspecified_error"} 0 +vernemq_mqtt_puback_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="impl_specific_error"} 0 +vernemq_mqtt_puback_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="not_authorized"} 0 +vernemq_mqtt_puback_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="topic_name_invalid"} 0 +vernemq_mqtt_puback_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="packet_id_in_use"} 0 +vernemq_mqtt_puback_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="quota_exceeded"} 0 +vernemq_mqtt_puback_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="payload_format_invalid"} 0 +vernemq_mqtt_puback_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="success"} 0 +vernemq_mqtt_puback_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="no_matching_subscribers"} 0 +vernemq_mqtt_puback_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="unspecified_error"} 0 +vernemq_mqtt_puback_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="impl_specific_error"} 0 +vernemq_mqtt_puback_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="not_authorized"} 0 +vernemq_mqtt_puback_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="topic_name_invalid"} 0 +vernemq_mqtt_puback_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="packet_id_in_use"} 0 +vernemq_mqtt_puback_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="quota_exceeded"} 0 +vernemq_mqtt_puback_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="payload_format_invalid"} 0 +vernemq_mqtt_pubrec_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="success"} 0 +vernemq_mqtt_pubrec_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="no_matching_subscribers"} 0 +vernemq_mqtt_pubrec_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="unspecified_error"} 0 +vernemq_mqtt_pubrec_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="impl_specific_error"} 0 +vernemq_mqtt_pubrec_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="not_authorized"} 0 +vernemq_mqtt_pubrec_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="topic_name_invalid"} 0 +vernemq_mqtt_pubrec_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="packet_id_in_use"} 0 +vernemq_mqtt_pubrec_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="quota_exceeded"} 0 +vernemq_mqtt_pubrec_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="payload_format_invalid"} 0 +vernemq_mqtt_pubrec_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="success"} 0 +vernemq_mqtt_pubrec_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="no_matching_subscribers"} 0 +vernemq_mqtt_pubrec_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="unspecified_error"} 0 +vernemq_mqtt_pubrec_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="impl_specific_error"} 0 +vernemq_mqtt_pubrec_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="not_authorized"} 0 +vernemq_mqtt_pubrec_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="topic_name_invalid"} 0 +vernemq_mqtt_pubrec_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="packet_id_in_use"} 0 +vernemq_mqtt_pubrec_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="quota_exceeded"} 0 +vernemq_mqtt_pubrec_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="payload_format_invalid"} 0 +vernemq_mqtt_pubrel_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="success"} 0 +vernemq_mqtt_pubrel_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="packet_id_not_found"} 0 +vernemq_mqtt_pubrel_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="success"} 0 +vernemq_mqtt_pubrel_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="packet_id_not_found"} 0 +vernemq_mqtt_pubcomp_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="success"} 0 +vernemq_mqtt_pubcomp_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="packet_id_not_found"} 0 +vernemq_mqtt_pubcomp_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="success"} 0 +vernemq_mqtt_pubcomp_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="packet_id_not_found"} 0 +# HELP vernemq_mqtt_auth_sent The number of AUTH packets sent. +# TYPE vernemq_mqtt_auth_sent counter +vernemq_mqtt_auth_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="success"} 0 +vernemq_mqtt_auth_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="continue_authentication"} 0 +vernemq_mqtt_auth_sent{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="reauthenticate"} 0 +# HELP vernemq_mqtt_auth_received The number of AUTH packets received. +# TYPE vernemq_mqtt_auth_received counter +vernemq_mqtt_auth_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="success"} 0 +vernemq_mqtt_auth_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="continue_authentication"} 0 +vernemq_mqtt_auth_received{node="VerneMQ@10.10.10.20",mqtt_version="5",reason_code="reauthenticate"} 0 +# HELP vernemq_total_active_connections The total number of active MQTT and MQTTWS connections. +# TYPE vernemq_total_active_connections gauge +vernemq_total_active_connections{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_active_mqtt_connections The number of active MQTT(S) connections. +# TYPE vernemq_active_mqtt_connections gauge +vernemq_active_mqtt_connections{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_active_mqttws_connections The number of active MQTT WS(S) connections. +# TYPE vernemq_active_mqttws_connections gauge +vernemq_active_mqttws_connections{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_queue_processes The number of MQTT queue processes. +# TYPE vernemq_queue_processes gauge +vernemq_queue_processes{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_retain_memory The number of bytes used for storing retained messages. +# TYPE vernemq_retain_memory gauge +vernemq_retain_memory{node="VerneMQ@10.10.10.20"} 15792 +# HELP vernemq_retain_messages The number of currently stored retained messages. +# TYPE vernemq_retain_messages gauge +vernemq_retain_messages{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_router_memory The number of bytes used by the routing table. +# TYPE vernemq_router_memory gauge +vernemq_router_memory{node="VerneMQ@10.10.10.20"} 20224 +# HELP vernemq_router_subscriptions The number of subscriptions in the routing table. +# TYPE vernemq_router_subscriptions gauge +vernemq_router_subscriptions{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_netsplit_resolved The number of resolved netsplits. +# TYPE vernemq_netsplit_resolved counter +vernemq_netsplit_resolved{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_netsplit_detected The number of detected netsplits. +# TYPE vernemq_netsplit_detected counter +vernemq_netsplit_detected{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_32 Scheduler 32 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_32 gauge +vernemq_system_utilization_scheduler_32{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_31 Scheduler 31 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_31 gauge +vernemq_system_utilization_scheduler_31{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_30 Scheduler 30 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_30 gauge +vernemq_system_utilization_scheduler_30{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_29 Scheduler 29 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_29 gauge +vernemq_system_utilization_scheduler_29{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_28 Scheduler 28 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_28 gauge +vernemq_system_utilization_scheduler_28{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_27 Scheduler 27 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_27 gauge +vernemq_system_utilization_scheduler_27{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_26 Scheduler 26 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_26 gauge +vernemq_system_utilization_scheduler_26{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_25 Scheduler 25 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_25 gauge +vernemq_system_utilization_scheduler_25{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_24 Scheduler 24 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_24 gauge +vernemq_system_utilization_scheduler_24{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_23 Scheduler 23 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_23 gauge +vernemq_system_utilization_scheduler_23{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_22 Scheduler 22 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_22 gauge +vernemq_system_utilization_scheduler_22{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_21 Scheduler 21 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_21 gauge +vernemq_system_utilization_scheduler_21{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_20 Scheduler 20 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_20 gauge +vernemq_system_utilization_scheduler_20{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_19 Scheduler 19 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_19 gauge +vernemq_system_utilization_scheduler_19{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_18 Scheduler 18 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_18 gauge +vernemq_system_utilization_scheduler_18{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_17 Scheduler 17 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_17 gauge +vernemq_system_utilization_scheduler_17{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_utilization_scheduler_16 Scheduler 16 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_16 gauge +vernemq_system_utilization_scheduler_16{node="VerneMQ@10.10.10.20"} 2 +# HELP vernemq_system_utilization_scheduler_15 Scheduler 15 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_15 gauge +vernemq_system_utilization_scheduler_15{node="VerneMQ@10.10.10.20"} 1 +# HELP vernemq_system_utilization_scheduler_14 Scheduler 14 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_14 gauge +vernemq_system_utilization_scheduler_14{node="VerneMQ@10.10.10.20"} 3 +# HELP vernemq_system_utilization_scheduler_13 Scheduler 13 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_13 gauge +vernemq_system_utilization_scheduler_13{node="VerneMQ@10.10.10.20"} 4 +# HELP vernemq_system_utilization_scheduler_12 Scheduler 12 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_12 gauge +vernemq_system_utilization_scheduler_12{node="VerneMQ@10.10.10.20"} 2 +# HELP vernemq_system_utilization_scheduler_11 Scheduler 11 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_11 gauge +vernemq_system_utilization_scheduler_11{node="VerneMQ@10.10.10.20"} 3 +# HELP vernemq_system_utilization_scheduler_10 Scheduler 10 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_10 gauge +vernemq_system_utilization_scheduler_10{node="VerneMQ@10.10.10.20"} 3 +# HELP vernemq_system_utilization_scheduler_9 Scheduler 9 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_9 gauge +vernemq_system_utilization_scheduler_9{node="VerneMQ@10.10.10.20"} 3 +# HELP vernemq_system_utilization_scheduler_8 Scheduler 8 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_8 gauge +vernemq_system_utilization_scheduler_8{node="VerneMQ@10.10.10.20"} 14 +# HELP vernemq_system_utilization_scheduler_7 Scheduler 7 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_7 gauge +vernemq_system_utilization_scheduler_7{node="VerneMQ@10.10.10.20"} 5 +# HELP vernemq_system_utilization_scheduler_6 Scheduler 6 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_6 gauge +vernemq_system_utilization_scheduler_6{node="VerneMQ@10.10.10.20"} 4 +# HELP vernemq_system_utilization_scheduler_5 Scheduler 5 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_5 gauge +vernemq_system_utilization_scheduler_5{node="VerneMQ@10.10.10.20"} 4 +# HELP vernemq_system_utilization_scheduler_4 Scheduler 4 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_4 gauge +vernemq_system_utilization_scheduler_4{node="VerneMQ@10.10.10.20"} 5 +# HELP vernemq_system_utilization_scheduler_3 Scheduler 3 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_3 gauge +vernemq_system_utilization_scheduler_3{node="VerneMQ@10.10.10.20"} 3 +# HELP vernemq_system_utilization_scheduler_2 Scheduler 2 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_2 gauge +vernemq_system_utilization_scheduler_2{node="VerneMQ@10.10.10.20"} 6 +# HELP vernemq_system_utilization_scheduler_1 Scheduler 1 utilization (percentage) +# TYPE vernemq_system_utilization_scheduler_1 gauge +vernemq_system_utilization_scheduler_1{node="VerneMQ@10.10.10.20"} 100 +# HELP vernemq_system_utilization The average system (scheduler) utilization (percentage). +# TYPE vernemq_system_utilization gauge +vernemq_system_utilization{node="VerneMQ@10.10.10.20"} 5 +# HELP vernemq_vm_memory_ets The amount of memory allocated for ETS tables. +# TYPE vernemq_vm_memory_ets gauge +vernemq_vm_memory_ets{node="VerneMQ@10.10.10.20"} 6665000 +# HELP vernemq_vm_memory_code The amount of memory allocated for code. +# TYPE vernemq_vm_memory_code gauge +vernemq_vm_memory_code{node="VerneMQ@10.10.10.20"} 13546686 +# HELP vernemq_vm_memory_binary The amount of memory allocated for binaries. +# TYPE vernemq_vm_memory_binary gauge +vernemq_vm_memory_binary{node="VerneMQ@10.10.10.20"} 729792 +# HELP vernemq_vm_memory_atom_used The amount of memory used by atoms. +# TYPE vernemq_vm_memory_atom_used gauge +vernemq_vm_memory_atom_used{node="VerneMQ@10.10.10.20"} 634383 +# HELP vernemq_vm_memory_atom The amount of memory allocated for atoms. +# TYPE vernemq_vm_memory_atom gauge +vernemq_vm_memory_atom{node="VerneMQ@10.10.10.20"} 639193 +# HELP vernemq_vm_memory_system The amount of memory allocated for the emulator. +# TYPE vernemq_vm_memory_system gauge +vernemq_vm_memory_system{node="VerneMQ@10.10.10.20"} 44839192 +# HELP vernemq_vm_memory_processes_used The amount of memory used by processes. +# TYPE vernemq_vm_memory_processes_used gauge +vernemq_vm_memory_processes_used{node="VerneMQ@10.10.10.20"} 13751224 +# HELP vernemq_vm_memory_processes The amount of memory allocated for processes. +# TYPE vernemq_vm_memory_processes gauge +vernemq_vm_memory_processes{node="VerneMQ@10.10.10.20"} 13754320 +# HELP vernemq_vm_memory_total The total amount of memory allocated. +# TYPE vernemq_vm_memory_total gauge +vernemq_vm_memory_total{node="VerneMQ@10.10.10.20"} 58593512 +# HELP vernemq_system_process_count The number of Erlang processes. +# TYPE vernemq_system_process_count gauge +vernemq_system_process_count{node="VerneMQ@10.10.10.20"} 465 +# HELP vernemq_system_wallclock The number of milli-seconds passed since the node was started. +# TYPE vernemq_system_wallclock counter +vernemq_system_wallclock{node="VerneMQ@10.10.10.20"} 51082602 +# HELP vernemq_system_runtime The sum of the runtime for all threads in the Erlang runtime system. +# TYPE vernemq_system_runtime counter +vernemq_system_runtime{node="VerneMQ@10.10.10.20"} 54249 +# HELP vernemq_system_run_queue The total number of processes and ports ready to run on all run-queues. +# TYPE vernemq_system_run_queue gauge +vernemq_system_run_queue{node="VerneMQ@10.10.10.20"} 0 +# HELP vernemq_system_reductions The number of reductions performed in the VM since the node was started. +# TYPE vernemq_system_reductions counter +vernemq_system_reductions{node="VerneMQ@10.10.10.20"} 340640218 +# HELP vernemq_system_io_out The total number of bytes sent through ports. +# TYPE vernemq_system_io_out counter +vernemq_system_io_out{node="VerneMQ@10.10.10.20"} 1702500 +# HELP vernemq_system_io_in The total number of bytes received through ports. +# TYPE vernemq_system_io_in counter +vernemq_system_io_in{node="VerneMQ@10.10.10.20"} 30616669 +# HELP vernemq_system_words_reclaimed_by_gc The number of words reclaimed by the garbage collector. +# TYPE vernemq_system_words_reclaimed_by_gc counter +vernemq_system_words_reclaimed_by_gc{node="VerneMQ@10.10.10.20"} 661531058 +# HELP vernemq_system_gc_count The number of garbage collections performed. +# TYPE vernemq_system_gc_count counter +vernemq_system_gc_count{node="VerneMQ@10.10.10.20"} 355236 +# HELP vernemq_system_exact_reductions The exact number of reductions performed. +# TYPE vernemq_system_exact_reductions counter +vernemq_system_exact_reductions{node="VerneMQ@10.10.10.20"} 340307987 +# HELP vernemq_system_context_switches The total number of context switches. +# TYPE vernemq_system_context_switches counter +vernemq_system_context_switches{node="VerneMQ@10.10.10.20"} 3902972 +# HELP vernemq_storage_scan_microseconds A histogram of the storage scan latency. +# TYPE vernemq_storage_scan_microseconds histogram +vernemq_storage_scan_microseconds_bucket{node="VerneMQ@10.10.10.20",le="+Inf"} 50 +vernemq_storage_scan_microseconds_bucket{node="VerneMQ@10.10.10.20",le="1000000"} 50 +vernemq_storage_scan_microseconds_bucket{node="VerneMQ@10.10.10.20",le="100000"} 50 +vernemq_storage_scan_microseconds_bucket{node="VerneMQ@10.10.10.20",le="10000"} 50 +vernemq_storage_scan_microseconds_bucket{node="VerneMQ@10.10.10.20",le="1000"} 50 +vernemq_storage_scan_microseconds_bucket{node="VerneMQ@10.10.10.20",le="100"} 8 +vernemq_storage_scan_microseconds_bucket{node="VerneMQ@10.10.10.20",le="10"} 0 +vernemq_storage_scan_microseconds_count{node="VerneMQ@10.10.10.20"} 50 +vernemq_storage_scan_microseconds_sum{node="VerneMQ@10.10.10.20"} 6482 +# HELP vernemq_storage_write_microseconds A histogram of the storage write latency. +# TYPE vernemq_storage_write_microseconds histogram +vernemq_storage_write_microseconds_bucket{node="VerneMQ@10.10.10.20",le="+Inf"} 68140 +vernemq_storage_write_microseconds_bucket{node="VerneMQ@10.10.10.20",le="1000000"} 68140 +vernemq_storage_write_microseconds_bucket{node="VerneMQ@10.10.10.20",le="100000"} 68140 +vernemq_storage_write_microseconds_bucket{node="VerneMQ@10.10.10.20",le="10000"} 68140 +vernemq_storage_write_microseconds_bucket{node="VerneMQ@10.10.10.20",le="1000"} 68112 +vernemq_storage_write_microseconds_bucket{node="VerneMQ@10.10.10.20",le="100"} 35957 +vernemq_storage_write_microseconds_bucket{node="VerneMQ@10.10.10.20",le="10"} 0 +vernemq_storage_write_microseconds_count{node="VerneMQ@10.10.10.20"} 68140 +vernemq_storage_write_microseconds_sum{node="VerneMQ@10.10.10.20"} 7490218 +# HELP vernemq_storage_read_microseconds A histogram of the storage read latency. +# TYPE vernemq_storage_read_microseconds histogram +vernemq_storage_read_microseconds_bucket{node="VerneMQ@10.10.10.20",le="+Inf"} 362 +vernemq_storage_read_microseconds_bucket{node="VerneMQ@10.10.10.20",le="1000000"} 362 +vernemq_storage_read_microseconds_bucket{node="VerneMQ@10.10.10.20",le="100000"} 362 +vernemq_storage_read_microseconds_bucket{node="VerneMQ@10.10.10.20",le="10000"} 362 +vernemq_storage_read_microseconds_bucket{node="VerneMQ@10.10.10.20",le="1000"} 362 +vernemq_storage_read_microseconds_bucket{node="VerneMQ@10.10.10.20",le="100"} 350 +vernemq_storage_read_microseconds_bucket{node="VerneMQ@10.10.10.20",le="10"} 0 +vernemq_storage_read_microseconds_count{node="VerneMQ@10.10.10.20"} 362 +vernemq_storage_read_microseconds_sum{node="VerneMQ@10.10.10.20"} 19097 +# HELP vernemq_metadata_fold_microseconds A histogram of the metadata fold latency. +# TYPE vernemq_metadata_fold_microseconds histogram +vernemq_metadata_fold_microseconds_bucket{node="VerneMQ@10.10.10.20",le="+Inf"} 3 +vernemq_metadata_fold_microseconds_bucket{node="VerneMQ@10.10.10.20",le="1000000"} 3 +vernemq_metadata_fold_microseconds_bucket{node="VerneMQ@10.10.10.20",le="100000"} 3 +vernemq_metadata_fold_microseconds_bucket{node="VerneMQ@10.10.10.20",le="10000"} 3 +vernemq_metadata_fold_microseconds_bucket{node="VerneMQ@10.10.10.20",le="1000"} 0 +vernemq_metadata_fold_microseconds_bucket{node="VerneMQ@10.10.10.20",le="100"} 0 +vernemq_metadata_fold_microseconds_bucket{node="VerneMQ@10.10.10.20",le="10"} 0 +vernemq_metadata_fold_microseconds_count{node="VerneMQ@10.10.10.20"} 3 +vernemq_metadata_fold_microseconds_sum{node="VerneMQ@10.10.10.20"} 4332 +# HELP vernemq_metadata_get_microseconds A histogram of the metadata get latency. +# TYPE vernemq_metadata_get_microseconds histogram +vernemq_metadata_get_microseconds_bucket{node="VerneMQ@10.10.10.20",le="+Inf"} 342 +vernemq_metadata_get_microseconds_bucket{node="VerneMQ@10.10.10.20",le="1000000"} 342 +vernemq_metadata_get_microseconds_bucket{node="VerneMQ@10.10.10.20",le="100000"} 342 +vernemq_metadata_get_microseconds_bucket{node="VerneMQ@10.10.10.20",le="10000"} 342 +vernemq_metadata_get_microseconds_bucket{node="VerneMQ@10.10.10.20",le="1000"} 341 +vernemq_metadata_get_microseconds_bucket{node="VerneMQ@10.10.10.20",le="100"} 327 +vernemq_metadata_get_microseconds_bucket{node="VerneMQ@10.10.10.20",le="10"} 0 +vernemq_metadata_get_microseconds_count{node="VerneMQ@10.10.10.20"} 342 +vernemq_metadata_get_microseconds_sum{node="VerneMQ@10.10.10.20"} 23294 +# HELP vernemq_swc_dotkeymap_memory The number of words allocated to the SWC group dotkeymap. +# TYPE vernemq_swc_dotkeymap_memory gauge +vernemq_swc_dotkeymap_memory{node="VerneMQ@10.10.10.20",group="meta1"} 915 +vernemq_swc_dotkeymap_memory{node="VerneMQ@10.10.10.20",group="meta10"} 915 +vernemq_swc_dotkeymap_memory{node="VerneMQ@10.10.10.20",group="meta2"} 915 +vernemq_swc_dotkeymap_memory{node="VerneMQ@10.10.10.20",group="meta3"} 915 +vernemq_swc_dotkeymap_memory{node="VerneMQ@10.10.10.20",group="meta4"} 915 +vernemq_swc_dotkeymap_memory{node="VerneMQ@10.10.10.20",group="meta5"} 915 +vernemq_swc_dotkeymap_memory{node="VerneMQ@10.10.10.20",group="meta6"} 915 +vernemq_swc_dotkeymap_memory{node="VerneMQ@10.10.10.20",group="meta7"} 915 +vernemq_swc_dotkeymap_memory{node="VerneMQ@10.10.10.20",group="meta8"} 915 +vernemq_swc_dotkeymap_memory{node="VerneMQ@10.10.10.20",group="meta9"} 915 +# HELP vernemq_swc_object_count The number of replicated objects by this SWC group. +# TYPE vernemq_swc_object_count gauge +vernemq_swc_object_count{node="VerneMQ@10.10.10.20",group="meta1"} 0 +vernemq_swc_object_count{node="VerneMQ@10.10.10.20",group="meta10"} 0 +vernemq_swc_object_count{node="VerneMQ@10.10.10.20",group="meta2"} 0 +vernemq_swc_object_count{node="VerneMQ@10.10.10.20",group="meta3"} 0 +vernemq_swc_object_count{node="VerneMQ@10.10.10.20",group="meta4"} 0 +vernemq_swc_object_count{node="VerneMQ@10.10.10.20",group="meta5"} 0 +vernemq_swc_object_count{node="VerneMQ@10.10.10.20",group="meta6"} 0 +vernemq_swc_object_count{node="VerneMQ@10.10.10.20",group="meta7"} 0 +vernemq_swc_object_count{node="VerneMQ@10.10.10.20",group="meta8"} 0 +vernemq_swc_object_count{node="VerneMQ@10.10.10.20",group="meta9"} 0 +# HELP vernemq_swc_tombstone_count The number of replicated tombstones by this SWC group. +# TYPE vernemq_swc_tombstone_count gauge +vernemq_swc_tombstone_count{node="VerneMQ@10.10.10.20",group="meta1"} 0 +vernemq_swc_tombstone_count{node="VerneMQ@10.10.10.20",group="meta10"} 0 +vernemq_swc_tombstone_count{node="VerneMQ@10.10.10.20",group="meta2"} 0 +vernemq_swc_tombstone_count{node="VerneMQ@10.10.10.20",group="meta3"} 0 +vernemq_swc_tombstone_count{node="VerneMQ@10.10.10.20",group="meta4"} 0 +vernemq_swc_tombstone_count{node="VerneMQ@10.10.10.20",group="meta5"} 0 +vernemq_swc_tombstone_count{node="VerneMQ@10.10.10.20",group="meta6"} 0 +vernemq_swc_tombstone_count{node="VerneMQ@10.10.10.20",group="meta7"} 0 +vernemq_swc_tombstone_count{node="VerneMQ@10.10.10.20",group="meta8"} 0 +vernemq_swc_tombstone_count{node="VerneMQ@10.10.10.20",group="meta9"} 0 diff --git a/src/go/plugin/go.d/modules/vernemq/vernemq.go b/src/go/plugin/go.d/modules/vernemq/vernemq.go index 2f1de38ff..df007d763 100644 --- a/src/go/plugin/go.d/modules/vernemq/vernemq.go +++ b/src/go/plugin/go.d/modules/vernemq/vernemq.go @@ -5,9 +5,11 @@ package vernemq import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -26,37 +28,40 @@ func init() { func New() *VerneMQ { return &VerneMQ{ Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{ URL: "http://127.0.0.1:8888/metrics", }, - Client: web.Client{ - Timeout: web.Duration(time.Second), + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second), }, }, }, - charts: charts.Copy(), - cache: make(map[string]bool), + charts: &module.Charts{}, + seenNodes: make(map[string]bool), } } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` } -type ( - VerneMQ struct { - module.Base - Config `yaml:",inline" json:""` +type VerneMQ struct { + module.Base + Config `yaml:",inline" json:""` - charts *Charts + charts *module.Charts - prom prometheus.Prometheus + prom prometheus.Prometheus - cache map[string]bool - } -) + namespace struct { + found bool + name string + } // added in v2.0 (default is 'vernemq') + + seenNodes map[string]bool +} func (v *VerneMQ) Configuration() any { return v.Config @@ -64,14 +69,12 @@ func (v *VerneMQ) Configuration() any { func (v *VerneMQ) Init() error { if err := v.validateConfig(); err != nil { - v.Errorf("error on validating config: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } prom, err := v.initPrometheusClient() if err != nil { - v.Error(err) - return err + return fmt.Errorf("init prometheus client: %v", err) } v.prom = prom @@ -81,16 +84,17 @@ func (v *VerneMQ) Init() error { func (v *VerneMQ) Check() error { mx, err := v.collect() if err != nil { - v.Error(err) return err } + if len(mx) == 0 { return errors.New("no metrics collected") } + return nil } -func (v *VerneMQ) Charts() *Charts { +func (v *VerneMQ) Charts() *module.Charts { return v.charts } @@ -103,6 +107,7 @@ func (v *VerneMQ) Collect() map[string]int64 { if len(mx) == 0 { return nil } + return mx } diff --git a/src/go/plugin/go.d/modules/vernemq/vernemq_test.go b/src/go/plugin/go.d/modules/vernemq/vernemq_test.go index 13eb3dceb..2c7684456 100644 --- a/src/go/plugin/go.d/modules/vernemq/vernemq_test.go +++ b/src/go/plugin/go.d/modules/vernemq/vernemq_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -18,16 +19,16 @@ var ( dataConfigJSON, _ = os.ReadFile("testdata/config.json") dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") - dataVer1101MQTTv5Metrics, _ = os.ReadFile("testdata/metrics-v1.10.1-mqtt5.txt") - dataUnexpectedMetrics, _ = os.ReadFile("testdata/non_vernemq.txt") + dataVer1101Metrics, _ = os.ReadFile("testdata/v1.10.1/metrics.txt") + dataVer201Metrics, _ = os.ReadFile("testdata/v2.0.1/metrics.txt") ) func Test_testDataIsValid(t *testing.T) { for name, data := range map[string][]byte{ - "dataConfigJSON": dataConfigJSON, - "dataConfigYAML": dataConfigYAML, - "dataVer1101MQTTv5Metrics": dataVer1101MQTTv5Metrics, - "dataUnexpectedMetrics": dataUnexpectedMetrics, + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + "dataVer1101Metrics": dataVer1101Metrics, + "dataVer201Metrics": dataVer201Metrics, } { require.NotNil(t, data, name) } @@ -38,45 +39,36 @@ func TestVerneMQ_ConfigurationSerialize(t *testing.T) { } func TestVerneMQ_Init(t *testing.T) { - verneMQ := prepareVerneMQ() - - assert.NoError(t, verneMQ.Init()) -} - -func TestVerneMQ_Init_ReturnsFalseIfURLIsNotSet(t *testing.T) { - verneMQ := prepareVerneMQ() - verneMQ.URL = "" - - assert.Error(t, verneMQ.Init()) -} - -func TestVerneMQ_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { - verneMQ := prepareVerneMQ() - verneMQ.Client.TLSConfig.TLSCA = "testdata/tls" - - assert.Error(t, verneMQ.Init()) -} - -func TestVerneMQ_Check(t *testing.T) { - verneMQ, srv := prepareClientServerV1101(t) - defer srv.Close() - - assert.NoError(t, verneMQ.Check()) -} - -func TestVerneMQ_Check_ReturnsFalseIfConnectionRefused(t *testing.T) { - verneMQ := prepareVerneMQ() - require.NoError(t, verneMQ.Init()) - - assert.Error(t, verneMQ.Check()) -} - -func TestVerneMQ_Check_ReturnsFalseIfMetricsAreNotVerneMQ(t *testing.T) { - verneMQ, srv := prepareClientServerNotVerneMQ(t) - defer srv.Close() - require.NoError(t, verneMQ.Init()) + tests := map[string]struct { + wantFail bool + config Config + }{ + "success with default": { + wantFail: false, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTPConfig: web.HTTPConfig{ + RequestConfig: web.RequestConfig{URL: ""}, + }, + }, + }, + } - assert.Error(t, verneMQ.Check()) + for name, test := range tests { + t.Run(name, func(t *testing.T) { + vmq := New() + vmq.Config = test.config + + if test.wantFail { + assert.Error(t, vmq.Init()) + } else { + assert.NoError(t, vmq.Init()) + } + }) + } } func TestVerneMQ_Charts(t *testing.T) { @@ -87,492 +79,667 @@ func TestVerneMQ_Cleanup(t *testing.T) { assert.NotPanics(t, New().Cleanup) } -func TestVerneMQ_Collect(t *testing.T) { - verneMQ, srv := prepareClientServerV1101(t) - defer srv.Close() - - collected := verneMQ.Collect() - assert.Equal(t, v1101ExpectedMetrics, collected) - testCharts(t, verneMQ, collected) -} - -func TestVerneMQ_Collect_ReturnsNilIfConnectionRefused(t *testing.T) { - verneMQ := prepareVerneMQ() - require.NoError(t, verneMQ.Init()) +func TestVerneMQ_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) (vmq *VerneMQ, cleanup func()) + }{ + "success on valid response v1.10.1": { + wantFail: false, + prepare: caseOkVer1101, + }, + "success on valid response v2.0.1": { + wantFail: false, + prepare: caseOkVer201, + }, + "fail on unexpected Prometheus": { + wantFail: true, + prepare: caseUnexpectedPrometheusMetrics, + }, + "fail on invalid data response": { + wantFail: true, + prepare: caseInvalidDataResponse, + }, + "fail on connection refused": { + wantFail: true, + prepare: caseConnectionRefused, + }, + "fail on 404 response": { + wantFail: true, + prepare: case404, + }, + } - assert.Nil(t, verneMQ.Collect()) + for name, test := range tests { + t.Run(name, func(t *testing.T) { + vmq, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.Error(t, vmq.Check()) + } else { + assert.NoError(t, vmq.Check()) + } + }) + } } -func TestVerneMQ_Collect_ReturnsNilIfMetricsAreNotVerneMQ(t *testing.T) { - verneMQ, srv := prepareClientServerNotVerneMQ(t) - defer srv.Close() +func TestVerneMQ_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (vmq *VerneMQ, cleanup func()) + wantNumOfCharts int + wantMetrics map[string]int64 + }{ + "success on valid response ver 1.10.1": { + prepare: caseOkVer1101, + wantNumOfCharts: len(nodeChartsTmpl) + len(nodeMqtt4ChartsTmpl) + len(nodeMqtt5ChartsTmpl), + wantMetrics: map[string]int64{ + "node_VerneMQ@172.17.0.2_bytes_received": 36796908, + "node_VerneMQ@172.17.0.2_bytes_sent": 23361693, + "node_VerneMQ@172.17.0.2_client_expired": 0, + "node_VerneMQ@172.17.0.2_cluster_bytes_dropped": 0, + "node_VerneMQ@172.17.0.2_cluster_bytes_received": 0, + "node_VerneMQ@172.17.0.2_cluster_bytes_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt4_client_keepalive_expired": 1, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_accepted_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_bad_credentials_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_identifier_rejected_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_not_authorized_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_sent": 338956, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_sent_return_code_bad_username_or_password": 4, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_sent_return_code_client_identifier_not_valid": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_sent_return_code_not_authorized": 4, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_sent_return_code_server_unavailable": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_sent_return_code_success": 338948, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_sent_return_code_unsupported_protocol_version": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_server_unavailable_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connack_unacceptable_protocol_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_connect_received": 338956, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_disconnect_received": 107, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_invalid_msg_size_error": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_pingreq_received": 205, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_pingresp_sent": 205, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_puback_invalid_error": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_puback_received": 525694, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_puback_sent": 537068, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_pubcomp_invalid_error": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_pubcomp_received": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_pubcomp_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_publish_auth_error": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_publish_error": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_publish_received": 537088, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_publish_sent": 525721, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_pubrec_invalid_error": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_pubrec_received": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_pubrec_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_pubrel_received": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_pubrel_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_suback_sent": 122, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_subscribe_auth_error": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_subscribe_error": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_subscribe_received": 122, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_unsuback_sent": 108, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_unsubscribe_error": 0, + "node_VerneMQ@172.17.0.2_mqtt4_mqtt_unsubscribe_received": 108, + "node_VerneMQ@172.17.0.2_mqtt5_client_keepalive_expired": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_auth_received": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_auth_received_reason_code_continue_authentication": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_auth_received_reason_code_reauthenticate": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_auth_received_reason_code_success": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_auth_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_auth_sent_reason_code_continue_authentication": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_auth_sent_reason_code_reauthenticate": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_auth_sent_reason_code_success": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_bad_authentication_method": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_bad_username_or_password": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_banned": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_client_identifier_not_valid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_connection_rate_exceeded": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_impl_specific_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_malformed_packet": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_not_authorized": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_packet_too_large": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_payload_format_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_protocol_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_qos_not_supported": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_quota_exceeded": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_retain_not_supported": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_server_busy": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_server_moved": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_server_unavailable": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_success": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_topic_name_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_unspecified_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_unsupported_protocol_version": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connack_sent_reason_code_use_another_server": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_connect_received": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_administrative_action": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_disconnect_with_will_msg": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_impl_specific_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_malformed_packet": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_message_rate_too_high": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_normal_disconnect": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_packet_too_large": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_payload_format_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_protocol_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_quota_exceeded": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_receive_max_exceeded": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_topic_alias_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_topic_name_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_received_reason_code_unspecified_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_administrative_action": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_connection_rate_exceeded": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_impl_specific_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_keep_alive_timeout": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_malformed_packet": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_max_connect_time": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_message_rate_too_high": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_normal_disconnect": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_not_authorized": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_packet_too_large": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_payload_format_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_protocol_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_qos_not_supported": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_quota_exceeded": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_receive_max_exceeded": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_retain_not_supported": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_server_busy": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_server_moved": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_server_shutting_down": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_session_taken_over": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_shared_subs_not_supported": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_subscription_ids_not_supported": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_topic_alias_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_topic_filter_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_topic_name_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_unspecified_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_use_another_server": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_disconnect_sent_reason_code_wildcard_subs_not_supported": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_invalid_msg_size_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pingreq_received": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pingresp_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_invalid_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_received": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_received_reason_code_impl_specific_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_received_reason_code_no_matching_subscribers": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_received_reason_code_not_authorized": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_received_reason_code_packet_id_in_use": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_received_reason_code_payload_format_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_received_reason_code_quota_exceeded": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_received_reason_code_success": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_received_reason_code_topic_name_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_received_reason_code_unspecified_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_sent_reason_code_impl_specific_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_sent_reason_code_no_matching_subscribers": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_sent_reason_code_not_authorized": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_sent_reason_code_packet_id_in_use": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_sent_reason_code_payload_format_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_sent_reason_code_quota_exceeded": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_sent_reason_code_success": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_sent_reason_code_topic_name_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_puback_sent_reason_code_unspecified_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubcomp_invalid_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubcomp_received": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubcomp_received_reason_code_packet_id_not_found": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubcomp_received_reason_code_success": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubcomp_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubcomp_sent_reason_code_packet_id_not_found": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubcomp_sent_reason_code_success": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_publish_auth_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_publish_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_publish_received": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_publish_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_received": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_received_reason_code_impl_specific_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_received_reason_code_no_matching_subscribers": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_received_reason_code_not_authorized": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_received_reason_code_packet_id_in_use": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_received_reason_code_payload_format_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_received_reason_code_quota_exceeded": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_received_reason_code_success": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_received_reason_code_topic_name_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_received_reason_code_unspecified_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_sent_reason_code_impl_specific_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_sent_reason_code_no_matching_subscribers": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_sent_reason_code_not_authorized": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_sent_reason_code_packet_id_in_use": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_sent_reason_code_payload_format_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_sent_reason_code_quota_exceeded": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_sent_reason_code_success": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_sent_reason_code_topic_name_invalid": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrec_sent_reason_code_unspecified_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrel_received": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrel_received_reason_code_packet_id_not_found": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrel_received_reason_code_success": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrel_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrel_sent_reason_code_packet_id_not_found": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_pubrel_sent_reason_code_success": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_suback_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_subscribe_auth_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_subscribe_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_subscribe_received": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_unsuback_sent": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_unsubscribe_error": 0, + "node_VerneMQ@172.17.0.2_mqtt5_mqtt_unsubscribe_received": 0, + "node_VerneMQ@172.17.0.2_netsplit_detected": 0, + "node_VerneMQ@172.17.0.2_netsplit_resolved": 0, + "node_VerneMQ@172.17.0.2_netsplit_unresolved": 0, + "node_VerneMQ@172.17.0.2_open_sockets": 0, + "node_VerneMQ@172.17.0.2_queue_initialized_from_storage": 0, + "node_VerneMQ@172.17.0.2_queue_message_drop": 0, + "node_VerneMQ@172.17.0.2_queue_message_expired": 0, + "node_VerneMQ@172.17.0.2_queue_message_in": 525722, + "node_VerneMQ@172.17.0.2_queue_message_out": 525721, + "node_VerneMQ@172.17.0.2_queue_message_unhandled": 1, + "node_VerneMQ@172.17.0.2_queue_processes": 0, + "node_VerneMQ@172.17.0.2_queue_setup": 338948, + "node_VerneMQ@172.17.0.2_queue_teardown": 338948, + "node_VerneMQ@172.17.0.2_queued_messages": 0, + "node_VerneMQ@172.17.0.2_retain_memory": 11344, + "node_VerneMQ@172.17.0.2_retain_messages": 0, + "node_VerneMQ@172.17.0.2_router_matches_local": 525722, + "node_VerneMQ@172.17.0.2_router_matches_remote": 0, + "node_VerneMQ@172.17.0.2_router_memory": 12752, + "node_VerneMQ@172.17.0.2_router_subscriptions": 0, + "node_VerneMQ@172.17.0.2_socket_close": 338956, + "node_VerneMQ@172.17.0.2_socket_close_timeout": 0, + "node_VerneMQ@172.17.0.2_socket_error": 0, + "node_VerneMQ@172.17.0.2_socket_open": 338956, + "node_VerneMQ@172.17.0.2_system_context_switches": 39088198, + "node_VerneMQ@172.17.0.2_system_exact_reductions": 3854024620, + "node_VerneMQ@172.17.0.2_system_gc_count": 12189976, + "node_VerneMQ@172.17.0.2_system_io_in": 68998296, + "node_VerneMQ@172.17.0.2_system_io_out": 961001488, + "node_VerneMQ@172.17.0.2_system_process_count": 329, + "node_VerneMQ@172.17.0.2_system_reductions": 3857458067, + "node_VerneMQ@172.17.0.2_system_run_queue": 0, + "node_VerneMQ@172.17.0.2_system_runtime": 1775355, + "node_VerneMQ@172.17.0.2_system_utilization": 9, + "node_VerneMQ@172.17.0.2_system_wallclock": 163457858, + "node_VerneMQ@172.17.0.2_system_words_reclaimed_by_gc": 7158470019, + "node_VerneMQ@172.17.0.2_vm_memory_atom": 768953, + "node_VerneMQ@172.17.0.2_vm_memory_atom_used": 755998, + "node_VerneMQ@172.17.0.2_vm_memory_binary": 1293672, + "node_VerneMQ@172.17.0.2_vm_memory_code": 11372082, + "node_VerneMQ@172.17.0.2_vm_memory_ets": 6065944, + "node_VerneMQ@172.17.0.2_vm_memory_processes": 8673288, + "node_VerneMQ@172.17.0.2_vm_memory_processes_used": 8671232, + "node_VerneMQ@172.17.0.2_vm_memory_system": 27051848, + "node_VerneMQ@172.17.0.2_vm_memory_total": 35725136, + }, + }, + "success on valid response ver 2.0.1": { + prepare: caseOkVer201, + wantNumOfCharts: len(nodeChartsTmpl) + len(nodeMqtt4ChartsTmpl) + len(nodeMqtt5ChartsTmpl), + wantMetrics: map[string]int64{ + "node_VerneMQ@10.10.10.20_active_mqtt_connections": 0, + "node_VerneMQ@10.10.10.20_active_mqttws_connections": 0, + "node_VerneMQ@10.10.10.20_bytes_received": 0, + "node_VerneMQ@10.10.10.20_bytes_sent": 0, + "node_VerneMQ@10.10.10.20_client_expired": 0, + "node_VerneMQ@10.10.10.20_cluster_bytes_dropped": 0, + "node_VerneMQ@10.10.10.20_cluster_bytes_received": 0, + "node_VerneMQ@10.10.10.20_cluster_bytes_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_client_keepalive_expired": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_accepted_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_bad_credentials_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_identifier_rejected_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_not_authorized_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_sent_return_code_bad_username_or_password": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_sent_return_code_client_identifier_not_valid": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_sent_return_code_not_authorized": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_sent_return_code_server_unavailable": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_sent_return_code_success": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_sent_return_code_unsupported_protocol_version": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_server_unavailable_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connack_unacceptable_protocol_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_connect_received": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_disconnect_received": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_invalid_msg_size_error": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_pingreq_received": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_pingresp_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_puback_invalid_error": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_puback_received": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_puback_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_pubcomp_invalid_error": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_pubcomp_received": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_pubcomp_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_publish_auth_error": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_publish_error": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_publish_received": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_publish_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_pubrec_invalid_error": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_pubrec_received": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_pubrec_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_pubrel_received": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_pubrel_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_suback_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_subscribe_auth_error": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_subscribe_error": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_subscribe_received": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_unsuback_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_unsubscribe_error": 0, + "node_VerneMQ@10.10.10.20_mqtt4_mqtt_unsubscribe_received": 0, + "node_VerneMQ@10.10.10.20_mqtt5_client_keepalive_expired": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_auth_received": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_auth_received_reason_code_continue_authentication": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_auth_received_reason_code_reauthenticate": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_auth_received_reason_code_success": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_auth_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_auth_sent_reason_code_continue_authentication": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_auth_sent_reason_code_reauthenticate": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_auth_sent_reason_code_success": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_bad_authentication_method": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_bad_username_or_password": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_banned": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_client_identifier_not_valid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_connection_rate_exceeded": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_impl_specific_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_malformed_packet": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_not_authorized": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_packet_too_large": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_payload_format_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_protocol_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_qos_not_supported": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_quota_exceeded": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_retain_not_supported": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_server_busy": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_server_moved": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_server_unavailable": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_success": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_topic_name_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_unspecified_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_unsupported_protocol_version": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connack_sent_reason_code_use_another_server": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_connect_received": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_administrative_action": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_disconnect_with_will_msg": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_impl_specific_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_malformed_packet": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_message_rate_too_high": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_normal_disconnect": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_packet_too_large": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_payload_format_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_protocol_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_quota_exceeded": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_receive_max_exceeded": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_topic_alias_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_topic_name_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_received_reason_code_unspecified_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_administrative_action": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_connection_rate_exceeded": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_impl_specific_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_keep_alive_timeout": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_malformed_packet": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_max_connect_time": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_message_rate_too_high": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_normal_disconnect": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_not_authorized": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_packet_too_large": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_payload_format_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_protocol_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_qos_not_supported": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_quota_exceeded": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_receive_max_exceeded": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_retain_not_supported": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_server_busy": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_server_moved": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_server_shutting_down": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_session_taken_over": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_shared_subs_not_supported": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_subscription_ids_not_supported": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_topic_alias_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_topic_filter_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_topic_name_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_unspecified_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_use_another_server": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_disconnect_sent_reason_code_wildcard_subs_not_supported": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_invalid_msg_size_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pingreq_received": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pingresp_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_invalid_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_received": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_received_reason_code_impl_specific_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_received_reason_code_no_matching_subscribers": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_received_reason_code_not_authorized": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_received_reason_code_packet_id_in_use": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_received_reason_code_payload_format_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_received_reason_code_quota_exceeded": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_received_reason_code_success": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_received_reason_code_topic_name_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_received_reason_code_unspecified_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_sent_reason_code_impl_specific_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_sent_reason_code_no_matching_subscribers": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_sent_reason_code_not_authorized": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_sent_reason_code_packet_id_in_use": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_sent_reason_code_payload_format_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_sent_reason_code_quota_exceeded": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_sent_reason_code_success": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_sent_reason_code_topic_name_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_puback_sent_reason_code_unspecified_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubcomp_invalid_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubcomp_received": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubcomp_received_reason_code_packet_id_not_found": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubcomp_received_reason_code_success": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubcomp_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubcomp_sent_reason_code_packet_id_not_found": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubcomp_sent_reason_code_success": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_publish_auth_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_publish_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_publish_received": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_publish_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_received": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_received_reason_code_impl_specific_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_received_reason_code_no_matching_subscribers": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_received_reason_code_not_authorized": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_received_reason_code_packet_id_in_use": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_received_reason_code_payload_format_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_received_reason_code_quota_exceeded": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_received_reason_code_success": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_received_reason_code_topic_name_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_received_reason_code_unspecified_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_sent_reason_code_impl_specific_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_sent_reason_code_no_matching_subscribers": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_sent_reason_code_not_authorized": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_sent_reason_code_packet_id_in_use": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_sent_reason_code_payload_format_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_sent_reason_code_quota_exceeded": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_sent_reason_code_success": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_sent_reason_code_topic_name_invalid": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrec_sent_reason_code_unspecified_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrel_received": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrel_received_reason_code_packet_id_not_found": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrel_received_reason_code_success": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrel_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrel_sent_reason_code_packet_id_not_found": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_pubrel_sent_reason_code_success": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_suback_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_subscribe_auth_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_subscribe_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_subscribe_received": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_unsuback_sent": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_unsubscribe_error": 0, + "node_VerneMQ@10.10.10.20_mqtt5_mqtt_unsubscribe_received": 0, + "node_VerneMQ@10.10.10.20_netsplit_detected": 0, + "node_VerneMQ@10.10.10.20_netsplit_resolved": 0, + "node_VerneMQ@10.10.10.20_netsplit_unresolved": 0, + "node_VerneMQ@10.10.10.20_open_sockets": 0, + "node_VerneMQ@10.10.10.20_queue_initialized_from_storage": 0, + "node_VerneMQ@10.10.10.20_queue_message_drop": 0, + "node_VerneMQ@10.10.10.20_queue_message_expired": 0, + "node_VerneMQ@10.10.10.20_queue_message_in": 0, + "node_VerneMQ@10.10.10.20_queue_message_out": 0, + "node_VerneMQ@10.10.10.20_queue_message_unhandled": 0, + "node_VerneMQ@10.10.10.20_queue_processes": 0, + "node_VerneMQ@10.10.10.20_queue_setup": 0, + "node_VerneMQ@10.10.10.20_queue_teardown": 0, + "node_VerneMQ@10.10.10.20_queued_messages": 0, + "node_VerneMQ@10.10.10.20_retain_memory": 15792, + "node_VerneMQ@10.10.10.20_retain_messages": 0, + "node_VerneMQ@10.10.10.20_router_matches_local": 0, + "node_VerneMQ@10.10.10.20_router_matches_remote": 0, + "node_VerneMQ@10.10.10.20_router_memory": 20224, + "node_VerneMQ@10.10.10.20_router_subscriptions": 0, + "node_VerneMQ@10.10.10.20_socket_close": 0, + "node_VerneMQ@10.10.10.20_socket_close_timeout": 0, + "node_VerneMQ@10.10.10.20_socket_error": 0, + "node_VerneMQ@10.10.10.20_socket_open": 0, + "node_VerneMQ@10.10.10.20_system_context_switches": 3902972, + "node_VerneMQ@10.10.10.20_system_exact_reductions": 340307987, + "node_VerneMQ@10.10.10.20_system_gc_count": 355236, + "node_VerneMQ@10.10.10.20_system_io_in": 30616669, + "node_VerneMQ@10.10.10.20_system_io_out": 1702500, + "node_VerneMQ@10.10.10.20_system_process_count": 465, + "node_VerneMQ@10.10.10.20_system_reductions": 340640218, + "node_VerneMQ@10.10.10.20_system_run_queue": 0, + "node_VerneMQ@10.10.10.20_system_runtime": 54249, + "node_VerneMQ@10.10.10.20_system_utilization": 5, + "node_VerneMQ@10.10.10.20_system_wallclock": 51082602, + "node_VerneMQ@10.10.10.20_system_words_reclaimed_by_gc": 661531058, + "node_VerneMQ@10.10.10.20_total_active_connections": 0, + "node_VerneMQ@10.10.10.20_vm_memory_atom": 639193, + "node_VerneMQ@10.10.10.20_vm_memory_atom_used": 634383, + "node_VerneMQ@10.10.10.20_vm_memory_binary": 729792, + "node_VerneMQ@10.10.10.20_vm_memory_code": 13546686, + "node_VerneMQ@10.10.10.20_vm_memory_ets": 6665000, + "node_VerneMQ@10.10.10.20_vm_memory_processes": 13754320, + "node_VerneMQ@10.10.10.20_vm_memory_processes_used": 13751224, + "node_VerneMQ@10.10.10.20_vm_memory_system": 44839192, + "node_VerneMQ@10.10.10.20_vm_memory_total": 58593512, + }, + }, + "fails on unexpected Prometheus response": { + prepare: caseUnexpectedPrometheusMetrics, + wantMetrics: nil, + }, + "fails on invalid data response": { + prepare: caseInvalidDataResponse, + wantMetrics: nil, + }, + "fails on connection refused": { + prepare: caseConnectionRefused, + wantMetrics: nil, + }, + "fails on 404 response": { + prepare: case404, + wantMetrics: nil, + }, + } - assert.Nil(t, verneMQ.Collect()) -} + for name, test := range tests { + t.Run(name, func(t *testing.T) { + vmq, cleanup := test.prepare(t) + defer cleanup() -func TestVerneMQ_Collect_ReturnsNilIfReceiveInvalidResponse(t *testing.T) { - verneMQ, ts := prepareClientServerInvalid(t) - defer ts.Close() + mx := vmq.Collect() - assert.Nil(t, verneMQ.Collect()) -} + require.Equal(t, test.wantMetrics, mx) -func TestVerneMQ_Collect_ReturnsNilIfReceiveResponse404(t *testing.T) { - verneMQ, ts := prepareClientServerResponse404(t) - defer ts.Close() + if len(test.wantMetrics) > 0 { + assert.Equal(t, test.wantNumOfCharts, len(*vmq.Charts()), "want charts") - assert.Nil(t, verneMQ.Collect()) + module.TestMetricsHasAllChartsDims(t, vmq.Charts(), mx) + } + }) + } } -func testCharts(t *testing.T, verneMQ *VerneMQ, collected map[string]int64) { - ensureCollectedHasAllChartsDimsVarsIDs(t, verneMQ, collected) -} +func caseOkVer201(t *testing.T) (*VerneMQ, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(dataVer201Metrics) + })) -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, verneMQ *VerneMQ, collected map[string]int64) { - for _, chart := range *verneMQ.Charts() { - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} + vmq := New() + vmq.URL = srv.URL + require.NoError(t, vmq.Init()) -func prepareVerneMQ() *VerneMQ { - verneMQ := New() - verneMQ.URL = "http://127.0.0.1:38001/metrics" - return verneMQ + return vmq, srv.Close } -func prepareClientServerV1101(t *testing.T) (*VerneMQ, *httptest.Server) { +func caseOkVer1101(t *testing.T) (*VerneMQ, func()) { t.Helper() - ts := httptest.NewServer(http.HandlerFunc( + srv := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(dataVer1101MQTTv5Metrics) + _, _ = w.Write(dataVer1101Metrics) })) - verneMQ := New() - verneMQ.URL = ts.URL - require.NoError(t, verneMQ.Init()) + vmq := New() + vmq.URL = srv.URL + require.NoError(t, vmq.Init()) - return verneMQ, ts + return vmq, srv.Close } -func prepareClientServerNotVerneMQ(t *testing.T) (*VerneMQ, *httptest.Server) { +func caseUnexpectedPrometheusMetrics(t *testing.T) (*VerneMQ, func()) { + data := ` +# HELP wmi_os_process_memory_limix_bytes OperatingSystem.MaxProcessMemorySize +# TYPE wmi_os_process_memory_limix_bytes gauge +wmi_os_process_memory_limix_bytes 1.40737488224256e+14 +# HELP wmi_os_processes OperatingSystem.NumberOfProcesses +# TYPE wmi_os_processes gauge +wmi_os_processes 124 +# HELP wmi_os_processes_limit OperatingSystem.MaxNumberOfProcesses +# TYPE wmi_os_processes_limit gauge +wmi_os_processes_limit 4.294967295e+09 +` t.Helper() - ts := httptest.NewServer(http.HandlerFunc( + srv := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(dataUnexpectedMetrics) + _, _ = w.Write([]byte(data)) })) - verneMQ := New() - verneMQ.URL = ts.URL - require.NoError(t, verneMQ.Init()) + vmq := New() + vmq.URL = srv.URL + require.NoError(t, vmq.Init()) - return verneMQ, ts + return vmq, srv.Close } -func prepareClientServerInvalid(t *testing.T) (*VerneMQ, *httptest.Server) { +func caseInvalidDataResponse(t *testing.T) (*VerneMQ, func()) { t.Helper() - ts := httptest.NewServer(http.HandlerFunc( + srv := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte("hello and\n goodbye")) })) + vmq := New() + vmq.URL = srv.URL + require.NoError(t, vmq.Init()) - verneMQ := New() - verneMQ.URL = ts.URL - require.NoError(t, verneMQ.Init()) + return vmq, srv.Close +} - return verneMQ, ts +func caseConnectionRefused(t *testing.T) (*VerneMQ, func()) { + t.Helper() + vmq := New() + vmq.URL = "http://127.0.0.1:65001" + require.NoError(t, vmq.Init()) + + return vmq, func() {} } -func prepareClientServerResponse404(t *testing.T) (*VerneMQ, *httptest.Server) { +func case404(t *testing.T) (*VerneMQ, func()) { t.Helper() - ts := httptest.NewServer(http.HandlerFunc( + srv := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotFound) })) - verneMQ := New() - verneMQ.URL = ts.URL - require.NoError(t, verneMQ.Init()) - return verneMQ, ts -} + vmq := New() + vmq.URL = srv.URL + require.NoError(t, vmq.Init()) -var v1101ExpectedMetrics = map[string]int64{ - "bytes_received": 36796908, - "bytes_sent": 23361693, - "client_keepalive_expired": 1, - "cluster_bytes_dropped": 0, - "cluster_bytes_received": 0, - "cluster_bytes_sent": 0, - "mqtt_auth_received": 0, - "mqtt_auth_received_continue_authentication": 0, - "mqtt_auth_received_reauthenticate": 0, - "mqtt_auth_received_success": 0, - "mqtt_auth_received_v_5": 0, - "mqtt_auth_received_v_5_continue_authentication": 0, - "mqtt_auth_received_v_5_reauthenticate": 0, - "mqtt_auth_received_v_5_success": 0, - "mqtt_auth_sent": 0, - "mqtt_auth_sent_continue_authentication": 0, - "mqtt_auth_sent_reauthenticate": 0, - "mqtt_auth_sent_success": 0, - "mqtt_auth_sent_v_5": 0, - "mqtt_auth_sent_v_5_continue_authentication": 0, - "mqtt_auth_sent_v_5_reauthenticate": 0, - "mqtt_auth_sent_v_5_success": 0, - "mqtt_connack_sent": 338956, - "mqtt_connack_sent_bad_authentication_method": 0, - "mqtt_connack_sent_bad_username_or_password": 4, - "mqtt_connack_sent_banned": 0, - "mqtt_connack_sent_client_identifier_not_valid": 0, - "mqtt_connack_sent_connection_rate_exceeded": 0, - "mqtt_connack_sent_impl_specific_error": 0, - "mqtt_connack_sent_malformed_packet": 0, - "mqtt_connack_sent_not_authorized": 4, - "mqtt_connack_sent_packet_too_large": 0, - "mqtt_connack_sent_payload_format_invalid": 0, - "mqtt_connack_sent_protocol_error": 0, - "mqtt_connack_sent_qos_not_supported": 0, - "mqtt_connack_sent_quota_exceeded": 0, - "mqtt_connack_sent_retain_not_supported": 0, - "mqtt_connack_sent_server_busy": 0, - "mqtt_connack_sent_server_moved": 0, - "mqtt_connack_sent_server_unavailable": 0, - "mqtt_connack_sent_success": 338948, - "mqtt_connack_sent_topic_name_invalid": 0, - "mqtt_connack_sent_unspecified_error": 0, - "mqtt_connack_sent_unsupported_protocol_version": 0, - "mqtt_connack_sent_use_another_server": 0, - "mqtt_connack_sent_v_4": 338956, - "mqtt_connack_sent_v_4_bad_username_or_password": 4, - "mqtt_connack_sent_v_4_client_identifier_not_valid": 0, - "mqtt_connack_sent_v_4_not_authorized": 4, - "mqtt_connack_sent_v_4_server_unavailable": 0, - "mqtt_connack_sent_v_4_success": 338948, - "mqtt_connack_sent_v_4_unsupported_protocol_version": 0, - "mqtt_connack_sent_v_5": 0, - "mqtt_connack_sent_v_5_bad_authentication_method": 0, - "mqtt_connack_sent_v_5_bad_username_or_password": 0, - "mqtt_connack_sent_v_5_banned": 0, - "mqtt_connack_sent_v_5_client_identifier_not_valid": 0, - "mqtt_connack_sent_v_5_connection_rate_exceeded": 0, - "mqtt_connack_sent_v_5_impl_specific_error": 0, - "mqtt_connack_sent_v_5_malformed_packet": 0, - "mqtt_connack_sent_v_5_not_authorized": 0, - "mqtt_connack_sent_v_5_packet_too_large": 0, - "mqtt_connack_sent_v_5_payload_format_invalid": 0, - "mqtt_connack_sent_v_5_protocol_error": 0, - "mqtt_connack_sent_v_5_qos_not_supported": 0, - "mqtt_connack_sent_v_5_quota_exceeded": 0, - "mqtt_connack_sent_v_5_retain_not_supported": 0, - "mqtt_connack_sent_v_5_server_busy": 0, - "mqtt_connack_sent_v_5_server_moved": 0, - "mqtt_connack_sent_v_5_server_unavailable": 0, - "mqtt_connack_sent_v_5_success": 0, - "mqtt_connack_sent_v_5_topic_name_invalid": 0, - "mqtt_connack_sent_v_5_unspecified_error": 0, - "mqtt_connack_sent_v_5_unsupported_protocol_version": 0, - "mqtt_connack_sent_v_5_use_another_server": 0, - "mqtt_connect_received": 338956, - "mqtt_connect_received_v_4": 338956, - "mqtt_connect_received_v_5": 0, - "mqtt_disconnect_received": 107, - "mqtt_disconnect_received_administrative_action": 0, - "mqtt_disconnect_received_disconnect_with_will_msg": 0, - "mqtt_disconnect_received_impl_specific_error": 0, - "mqtt_disconnect_received_malformed_packet": 0, - "mqtt_disconnect_received_message_rate_too_high": 0, - "mqtt_disconnect_received_normal_disconnect": 0, - "mqtt_disconnect_received_packet_too_large": 0, - "mqtt_disconnect_received_payload_format_invalid": 0, - "mqtt_disconnect_received_protocol_error": 0, - "mqtt_disconnect_received_quota_exceeded": 0, - "mqtt_disconnect_received_receive_max_exceeded": 0, - "mqtt_disconnect_received_topic_alias_invalid": 0, - "mqtt_disconnect_received_topic_name_invalid": 0, - "mqtt_disconnect_received_unspecified_error": 0, - "mqtt_disconnect_received_v_4": 107, - "mqtt_disconnect_received_v_5": 0, - "mqtt_disconnect_received_v_5_administrative_action": 0, - "mqtt_disconnect_received_v_5_disconnect_with_will_msg": 0, - "mqtt_disconnect_received_v_5_impl_specific_error": 0, - "mqtt_disconnect_received_v_5_malformed_packet": 0, - "mqtt_disconnect_received_v_5_message_rate_too_high": 0, - "mqtt_disconnect_received_v_5_normal_disconnect": 0, - "mqtt_disconnect_received_v_5_packet_too_large": 0, - "mqtt_disconnect_received_v_5_payload_format_invalid": 0, - "mqtt_disconnect_received_v_5_protocol_error": 0, - "mqtt_disconnect_received_v_5_quota_exceeded": 0, - "mqtt_disconnect_received_v_5_receive_max_exceeded": 0, - "mqtt_disconnect_received_v_5_topic_alias_invalid": 0, - "mqtt_disconnect_received_v_5_topic_name_invalid": 0, - "mqtt_disconnect_received_v_5_unspecified_error": 0, - "mqtt_disconnect_sent": 0, - "mqtt_disconnect_sent_administrative_action": 0, - "mqtt_disconnect_sent_connection_rate_exceeded": 0, - "mqtt_disconnect_sent_impl_specific_error": 0, - "mqtt_disconnect_sent_keep_alive_timeout": 0, - "mqtt_disconnect_sent_malformed_packet": 0, - "mqtt_disconnect_sent_max_connect_time": 0, - "mqtt_disconnect_sent_message_rate_too_high": 0, - "mqtt_disconnect_sent_normal_disconnect": 0, - "mqtt_disconnect_sent_not_authorized": 0, - "mqtt_disconnect_sent_packet_too_large": 0, - "mqtt_disconnect_sent_payload_format_invalid": 0, - "mqtt_disconnect_sent_protocol_error": 0, - "mqtt_disconnect_sent_qos_not_supported": 0, - "mqtt_disconnect_sent_quota_exceeded": 0, - "mqtt_disconnect_sent_receive_max_exceeded": 0, - "mqtt_disconnect_sent_retain_not_supported": 0, - "mqtt_disconnect_sent_server_busy": 0, - "mqtt_disconnect_sent_server_moved": 0, - "mqtt_disconnect_sent_server_shutting_down": 0, - "mqtt_disconnect_sent_session_taken_over": 0, - "mqtt_disconnect_sent_shared_subs_not_supported": 0, - "mqtt_disconnect_sent_subscription_ids_not_supported": 0, - "mqtt_disconnect_sent_topic_alias_invalid": 0, - "mqtt_disconnect_sent_topic_filter_invalid": 0, - "mqtt_disconnect_sent_topic_name_invalid": 0, - "mqtt_disconnect_sent_unspecified_error": 0, - "mqtt_disconnect_sent_use_another_server": 0, - "mqtt_disconnect_sent_v_5": 0, - "mqtt_disconnect_sent_v_5_administrative_action": 0, - "mqtt_disconnect_sent_v_5_connection_rate_exceeded": 0, - "mqtt_disconnect_sent_v_5_impl_specific_error": 0, - "mqtt_disconnect_sent_v_5_keep_alive_timeout": 0, - "mqtt_disconnect_sent_v_5_malformed_packet": 0, - "mqtt_disconnect_sent_v_5_max_connect_time": 0, - "mqtt_disconnect_sent_v_5_message_rate_too_high": 0, - "mqtt_disconnect_sent_v_5_normal_disconnect": 0, - "mqtt_disconnect_sent_v_5_not_authorized": 0, - "mqtt_disconnect_sent_v_5_packet_too_large": 0, - "mqtt_disconnect_sent_v_5_payload_format_invalid": 0, - "mqtt_disconnect_sent_v_5_protocol_error": 0, - "mqtt_disconnect_sent_v_5_qos_not_supported": 0, - "mqtt_disconnect_sent_v_5_quota_exceeded": 0, - "mqtt_disconnect_sent_v_5_receive_max_exceeded": 0, - "mqtt_disconnect_sent_v_5_retain_not_supported": 0, - "mqtt_disconnect_sent_v_5_server_busy": 0, - "mqtt_disconnect_sent_v_5_server_moved": 0, - "mqtt_disconnect_sent_v_5_server_shutting_down": 0, - "mqtt_disconnect_sent_v_5_session_taken_over": 0, - "mqtt_disconnect_sent_v_5_shared_subs_not_supported": 0, - "mqtt_disconnect_sent_v_5_subscription_ids_not_supported": 0, - "mqtt_disconnect_sent_v_5_topic_alias_invalid": 0, - "mqtt_disconnect_sent_v_5_topic_filter_invalid": 0, - "mqtt_disconnect_sent_v_5_topic_name_invalid": 0, - "mqtt_disconnect_sent_v_5_unspecified_error": 0, - "mqtt_disconnect_sent_v_5_use_another_server": 0, - "mqtt_disconnect_sent_v_5_wildcard_subs_not_supported": 0, - "mqtt_disconnect_sent_wildcard_subs_not_supported": 0, - "mqtt_invalid_msg_size_error": 0, - "mqtt_invalid_msg_size_error_v_4": 0, - "mqtt_invalid_msg_size_error_v_5": 0, - "mqtt_pingreq_received": 205, - "mqtt_pingreq_received_v_4": 205, - "mqtt_pingreq_received_v_5": 0, - "mqtt_pingresp_sent": 205, - "mqtt_pingresp_sent_v_4": 205, - "mqtt_pingresp_sent_v_5": 0, - "mqtt_puback_invalid_error": 0, - "mqtt_puback_invalid_error_v_4": 0, - "mqtt_puback_invalid_error_v_5": 0, - "mqtt_puback_received": 525694, - "mqtt_puback_received_impl_specific_error": 0, - "mqtt_puback_received_no_matching_subscribers": 0, - "mqtt_puback_received_not_authorized": 0, - "mqtt_puback_received_packet_id_in_use": 0, - "mqtt_puback_received_payload_format_invalid": 0, - "mqtt_puback_received_quota_exceeded": 0, - "mqtt_puback_received_success": 0, - "mqtt_puback_received_topic_name_invalid": 0, - "mqtt_puback_received_unspecified_error": 0, - "mqtt_puback_received_v_4": 525694, - "mqtt_puback_received_v_5": 0, - "mqtt_puback_received_v_5_impl_specific_error": 0, - "mqtt_puback_received_v_5_no_matching_subscribers": 0, - "mqtt_puback_received_v_5_not_authorized": 0, - "mqtt_puback_received_v_5_packet_id_in_use": 0, - "mqtt_puback_received_v_5_payload_format_invalid": 0, - "mqtt_puback_received_v_5_quota_exceeded": 0, - "mqtt_puback_received_v_5_success": 0, - "mqtt_puback_received_v_5_topic_name_invalid": 0, - "mqtt_puback_received_v_5_unspecified_error": 0, - "mqtt_puback_sent": 537068, - "mqtt_puback_sent_impl_specific_error": 0, - "mqtt_puback_sent_no_matching_subscribers": 0, - "mqtt_puback_sent_not_authorized": 0, - "mqtt_puback_sent_packet_id_in_use": 0, - "mqtt_puback_sent_payload_format_invalid": 0, - "mqtt_puback_sent_quota_exceeded": 0, - "mqtt_puback_sent_success": 0, - "mqtt_puback_sent_topic_name_invalid": 0, - "mqtt_puback_sent_unspecified_error": 0, - "mqtt_puback_sent_v_4": 537068, - "mqtt_puback_sent_v_5": 0, - "mqtt_puback_sent_v_5_impl_specific_error": 0, - "mqtt_puback_sent_v_5_no_matching_subscribers": 0, - "mqtt_puback_sent_v_5_not_authorized": 0, - "mqtt_puback_sent_v_5_packet_id_in_use": 0, - "mqtt_puback_sent_v_5_payload_format_invalid": 0, - "mqtt_puback_sent_v_5_quota_exceeded": 0, - "mqtt_puback_sent_v_5_success": 0, - "mqtt_puback_sent_v_5_topic_name_invalid": 0, - "mqtt_puback_sent_v_5_unspecified_error": 0, - "mqtt_pubcomp_invalid_error": 0, - "mqtt_pubcomp_invalid_error_v_4": 0, - "mqtt_pubcomp_invalid_error_v_5": 0, - "mqtt_pubcomp_received": 0, - "mqtt_pubcomp_received_packet_id_not_found": 0, - "mqtt_pubcomp_received_success": 0, - "mqtt_pubcomp_received_v_4": 0, - "mqtt_pubcomp_received_v_5": 0, - "mqtt_pubcomp_received_v_5_packet_id_not_found": 0, - "mqtt_pubcomp_received_v_5_success": 0, - "mqtt_pubcomp_sent": 0, - "mqtt_pubcomp_sent_packet_id_not_found": 0, - "mqtt_pubcomp_sent_success": 0, - "mqtt_pubcomp_sent_v_4": 0, - "mqtt_pubcomp_sent_v_5": 0, - "mqtt_pubcomp_sent_v_5_packet_id_not_found": 0, - "mqtt_pubcomp_sent_v_5_success": 0, - "mqtt_publish_auth_error": 0, - "mqtt_publish_auth_error_v_4": 0, - "mqtt_publish_auth_error_v_5": 0, - "mqtt_publish_error": 0, - "mqtt_publish_error_v_4": 0, - "mqtt_publish_error_v_5": 0, - "mqtt_publish_received": 537088, - "mqtt_publish_received_v_4": 537088, - "mqtt_publish_received_v_5": 0, - "mqtt_publish_sent": 525721, - "mqtt_publish_sent_v_4": 525721, - "mqtt_publish_sent_v_5": 0, - "mqtt_pubrec_invalid_error": 0, - "mqtt_pubrec_invalid_error_v_4": 0, - "mqtt_pubrec_received": 0, - "mqtt_pubrec_received_impl_specific_error": 0, - "mqtt_pubrec_received_no_matching_subscribers": 0, - "mqtt_pubrec_received_not_authorized": 0, - "mqtt_pubrec_received_packet_id_in_use": 0, - "mqtt_pubrec_received_payload_format_invalid": 0, - "mqtt_pubrec_received_quota_exceeded": 0, - "mqtt_pubrec_received_success": 0, - "mqtt_pubrec_received_topic_name_invalid": 0, - "mqtt_pubrec_received_unspecified_error": 0, - "mqtt_pubrec_received_v_4": 0, - "mqtt_pubrec_received_v_5": 0, - "mqtt_pubrec_received_v_5_impl_specific_error": 0, - "mqtt_pubrec_received_v_5_no_matching_subscribers": 0, - "mqtt_pubrec_received_v_5_not_authorized": 0, - "mqtt_pubrec_received_v_5_packet_id_in_use": 0, - "mqtt_pubrec_received_v_5_payload_format_invalid": 0, - "mqtt_pubrec_received_v_5_quota_exceeded": 0, - "mqtt_pubrec_received_v_5_success": 0, - "mqtt_pubrec_received_v_5_topic_name_invalid": 0, - "mqtt_pubrec_received_v_5_unspecified_error": 0, - "mqtt_pubrec_sent": 0, - "mqtt_pubrec_sent_impl_specific_error": 0, - "mqtt_pubrec_sent_no_matching_subscribers": 0, - "mqtt_pubrec_sent_not_authorized": 0, - "mqtt_pubrec_sent_packet_id_in_use": 0, - "mqtt_pubrec_sent_payload_format_invalid": 0, - "mqtt_pubrec_sent_quota_exceeded": 0, - "mqtt_pubrec_sent_success": 0, - "mqtt_pubrec_sent_topic_name_invalid": 0, - "mqtt_pubrec_sent_unspecified_error": 0, - "mqtt_pubrec_sent_v_4": 0, - "mqtt_pubrec_sent_v_5": 0, - "mqtt_pubrec_sent_v_5_impl_specific_error": 0, - "mqtt_pubrec_sent_v_5_no_matching_subscribers": 0, - "mqtt_pubrec_sent_v_5_not_authorized": 0, - "mqtt_pubrec_sent_v_5_packet_id_in_use": 0, - "mqtt_pubrec_sent_v_5_payload_format_invalid": 0, - "mqtt_pubrec_sent_v_5_quota_exceeded": 0, - "mqtt_pubrec_sent_v_5_success": 0, - "mqtt_pubrec_sent_v_5_topic_name_invalid": 0, - "mqtt_pubrec_sent_v_5_unspecified_error": 0, - "mqtt_pubrel_received": 0, - "mqtt_pubrel_received_packet_id_not_found": 0, - "mqtt_pubrel_received_success": 0, - "mqtt_pubrel_received_v_4": 0, - "mqtt_pubrel_received_v_5": 0, - "mqtt_pubrel_received_v_5_packet_id_not_found": 0, - "mqtt_pubrel_received_v_5_success": 0, - "mqtt_pubrel_sent": 0, - "mqtt_pubrel_sent_packet_id_not_found": 0, - "mqtt_pubrel_sent_success": 0, - "mqtt_pubrel_sent_v_4": 0, - "mqtt_pubrel_sent_v_5": 0, - "mqtt_pubrel_sent_v_5_packet_id_not_found": 0, - "mqtt_pubrel_sent_v_5_success": 0, - "mqtt_suback_sent": 122, - "mqtt_suback_sent_v_4": 122, - "mqtt_suback_sent_v_5": 0, - "mqtt_subscribe_auth_error": 0, - "mqtt_subscribe_auth_error_v_4": 0, - "mqtt_subscribe_auth_error_v_5": 0, - "mqtt_subscribe_error": 0, - "mqtt_subscribe_error_v_4": 0, - "mqtt_subscribe_error_v_5": 0, - "mqtt_subscribe_received": 122, - "mqtt_subscribe_received_v_4": 122, - "mqtt_subscribe_received_v_5": 0, - "mqtt_unsuback_sent": 108, - "mqtt_unsuback_sent_v_4": 108, - "mqtt_unsuback_sent_v_5": 0, - "mqtt_unsubscribe_error": 0, - "mqtt_unsubscribe_error_v_4": 0, - "mqtt_unsubscribe_error_v_5": 0, - "mqtt_unsubscribe_received": 108, - "mqtt_unsubscribe_received_v_4": 108, - "mqtt_unsubscribe_received_v_5": 0, - "netsplit_detected": 0, - "netsplit_resolved": 0, - "netsplit_unresolved": 0, - "open_sockets": 0, - "queue_initialized_from_storage": 0, - "queue_message_drop": 0, - "queue_message_expired": 0, - "queue_message_in": 525722, - "queue_message_out": 525721, - "queue_message_unhandled": 1, - "queue_processes": 0, - "queue_setup": 338948, - "queue_teardown": 338948, - "retain_memory": 11344, - "retain_messages": 0, - "router_matches_local": 525722, - "router_matches_remote": 0, - "router_memory": 12752, - "router_subscriptions": 0, - "socket_close": 338956, - "socket_close_timeout": 0, - "socket_error": 0, - "socket_open": 338956, - "system_context_switches": 39088198, - "system_gc_count": 12189976, - "system_io_in": 68998296, - "system_io_out": 961001488, - "system_process_count": 329, - "system_reductions": 3857458067, - "system_run_queue": 0, - "system_utilization": 9, - "system_utilization_scheduler_1": 34, - "system_utilization_scheduler_2": 8, - "system_utilization_scheduler_3": 14, - "system_utilization_scheduler_4": 19, - "system_utilization_scheduler_5": 0, - "system_utilization_scheduler_6": 0, - "system_utilization_scheduler_7": 0, - "system_utilization_scheduler_8": 0, - "system_wallclock": 163457858, - "system_words_reclaimed_by_gc": 7158470019, - "vm_memory_processes": 8673288, - "vm_memory_system": 27051848, + return vmq, srv.Close } diff --git a/src/go/plugin/go.d/modules/vsphere/config_schema.json b/src/go/plugin/go.d/modules/vsphere/config_schema.json index 8902e73ed..2f38cace9 100644 --- a/src/go/plugin/go.d/modules/vsphere/config_schema.json +++ b/src/go/plugin/go.d/modules/vsphere/config_schema.json @@ -153,7 +153,6 @@ "host_include", "vm_include" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/vsphere/discover/discover.go b/src/go/plugin/go.d/modules/vsphere/discover/discover.go index f73c58c66..fc7903573 100644 --- a/src/go/plugin/go.d/modules/vsphere/discover/discover.go +++ b/src/go/plugin/go.d/modules/vsphere/discover/discover.go @@ -7,10 +7,10 @@ import ( "strings" "time" + "github.com/netdata/netdata/go/plugins/logger" "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/match" rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources" - "github.com/netdata/netdata/go/plugins/logger" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) diff --git a/src/go/plugin/go.d/modules/vsphere/init.go b/src/go/plugin/go.d/modules/vsphere/init.go index e9bfc4e5a..0869806ea 100644 --- a/src/go/plugin/go.d/modules/vsphere/init.go +++ b/src/go/plugin/go.d/modules/vsphere/init.go @@ -31,7 +31,7 @@ func (vs *VSphere) initClient() (*client.Client, error) { User: vs.Username, Password: vs.Password, Timeout: vs.Timeout.Duration(), - TLSConfig: vs.Client.TLSConfig, + TLSConfig: vs.ClientConfig.TLSConfig, } return client.New(config) } diff --git a/src/go/plugin/go.d/modules/vsphere/integrations/vmware_vcenter_server.md b/src/go/plugin/go.d/modules/vsphere/integrations/vmware_vcenter_server.md index 3f05eadfd..401d78f50 100644 --- a/src/go/plugin/go.d/modules/vsphere/integrations/vmware_vcenter_server.md +++ b/src/go/plugin/go.d/modules/vsphere/integrations/vmware_vcenter_server.md @@ -185,8 +185,8 @@ No action required. The configuration file name for this integration is `go.d/vsphere.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -224,7 +224,7 @@ The following options can be defined globally: update_every, autodetection_retry Metrics of hosts matching the selector will be collected. - Include pattern syntax: "/Datacenter pattern/Cluster pattern/Host pattern". -- Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). +- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). - Syntax: ```yaml @@ -240,7 +240,7 @@ Metrics of hosts matching the selector will be collected. Metrics of VMs matching the selector will be collected. - Include pattern syntax: "/Datacenter pattern/Cluster pattern/Host pattern/VM pattern". -- Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). +- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). - Syntax: ```yaml diff --git a/src/go/plugin/go.d/modules/vsphere/match/match.go b/src/go/plugin/go.d/modules/vsphere/match/match.go index 969b5d7c5..8e63a7104 100644 --- a/src/go/plugin/go.d/modules/vsphere/match/match.go +++ b/src/go/plugin/go.d/modules/vsphere/match/match.go @@ -6,8 +6,8 @@ import ( "fmt" "strings" + "github.com/netdata/netdata/go/plugins/pkg/matcher" rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" ) type HostMatcher interface { diff --git a/src/go/plugin/go.d/modules/vsphere/match/match_test.go b/src/go/plugin/go.d/modules/vsphere/match/match_test.go index c11697783..a85f9f8f3 100644 --- a/src/go/plugin/go.d/modules/vsphere/match/match_test.go +++ b/src/go/plugin/go.d/modules/vsphere/match/match_test.go @@ -6,8 +6,8 @@ import ( "strings" "testing" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" "github.com/stretchr/testify/assert" ) diff --git a/src/go/plugin/go.d/modules/vsphere/scrape/scrape.go b/src/go/plugin/go.d/modules/vsphere/scrape/scrape.go index ef882d73e..354585361 100644 --- a/src/go/plugin/go.d/modules/vsphere/scrape/scrape.go +++ b/src/go/plugin/go.d/modules/vsphere/scrape/scrape.go @@ -9,9 +9,9 @@ import ( "sync" "time" + "github.com/netdata/netdata/go/plugins/logger" rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources" - "github.com/netdata/netdata/go/plugins/logger" "github.com/vmware/govmomi/performance" "github.com/vmware/govmomi/vim25/types" ) diff --git a/src/go/plugin/go.d/modules/vsphere/scrape/throttled_caller_test.go b/src/go/plugin/go.d/modules/vsphere/scrape/throttled_caller_test.go index 545ed1603..56ad75518 100644 --- a/src/go/plugin/go.d/modules/vsphere/scrape/throttled_caller_test.go +++ b/src/go/plugin/go.d/modules/vsphere/scrape/throttled_caller_test.go @@ -13,7 +13,7 @@ import ( func Test_throttledCaller(t *testing.T) { var current int64 - var max int64 + var maxv int64 var total int64 var mux sync.Mutex limit := 5 @@ -28,8 +28,8 @@ func Test_throttledCaller(t *testing.T) { mux.Lock() defer mux.Unlock() - if atomic.LoadInt64(¤t) > max { - max = atomic.LoadInt64(¤t) + if atomic.LoadInt64(¤t) > maxv { + maxv = atomic.LoadInt64(¤t) } atomic.AddInt64(¤t, -1) } @@ -38,5 +38,5 @@ func Test_throttledCaller(t *testing.T) { tc.wait() assert.Equal(t, int64(n), total) - assert.Equal(t, max, int64(limit)) + assert.Equal(t, maxv, int64(limit)) } diff --git a/src/go/plugin/go.d/modules/vsphere/vsphere.go b/src/go/plugin/go.d/modules/vsphere/vsphere.go index 8df3ce6f0..0797886e5 100644 --- a/src/go/plugin/go.d/modules/vsphere/vsphere.go +++ b/src/go/plugin/go.d/modules/vsphere/vsphere.go @@ -4,12 +4,14 @@ package vsphere import ( _ "embed" + "fmt" "sync" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/match" rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" "github.com/vmware/govmomi/performance" @@ -32,12 +34,12 @@ func init() { func New() *VSphere { return &VSphere{ Config: Config{ - HTTP: web.HTTP{ - Client: web.Client{ - Timeout: web.Duration(time.Second * 20), + HTTPConfig: web.HTTPConfig{ + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 20), }, }, - DiscoveryInterval: web.Duration(time.Minute * 5), + DiscoveryInterval: confopt.Duration(time.Minute * 5), HostsInclude: []string{"/*"}, VMsInclude: []string{"/*"}, }, @@ -51,8 +53,8 @@ func New() *VSphere { type Config struct { UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - DiscoveryInterval web.Duration `yaml:"discovery_interval,omitempty" json:"discovery_interval"` + web.HTTPConfig `yaml:",inline" json:""` + DiscoveryInterval confopt.Duration `yaml:"discovery_interval,omitempty" json:"discovery_interval"` HostsInclude match.HostIncludes `yaml:"host_include,omitempty" json:"host_include"` VMsInclude match.VMIncludes `yaml:"vm_include,omitempty" json:"vm_include"` } @@ -89,26 +91,22 @@ func (vs *VSphere) Configuration() any { func (vs *VSphere) Init() error { if err := vs.validateConfig(); err != nil { - vs.Errorf("error on validating config: %v", err) - return err + return fmt.Errorf("error on validating config: %v", err) } vsClient, err := vs.initClient() if err != nil { - vs.Errorf("error on creating vsphere client: %v", err) - return err + return fmt.Errorf("error on creating vsphere client: %v", err) } if err := vs.initDiscoverer(vsClient); err != nil { - vs.Errorf("error on creating vsphere discoverer: %v", err) - return err + return fmt.Errorf("error on creating vsphere discoverer: %v", err) } vs.initScraper(vsClient) if err := vs.discoverOnce(); err != nil { - vs.Errorf("error on discovering: %v", err) - return err + return fmt.Errorf("error on discovering: %v", err) } vs.goDiscovery() diff --git a/src/go/plugin/go.d/modules/vsphere/vsphere_test.go b/src/go/plugin/go.d/modules/vsphere/vsphere_test.go index c7a91e253..b0c22a1f7 100644 --- a/src/go/plugin/go.d/modules/vsphere/vsphere_test.go +++ b/src/go/plugin/go.d/modules/vsphere/vsphere_test.go @@ -12,7 +12,7 @@ import ( "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/discover" "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/match" rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -77,7 +77,7 @@ func TestVSphere_Init_ReturnsFalseIfPasswordNotSet(t *testing.T) { func TestVSphere_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { vSphere, _, teardown := prepareVSphereSim(t) defer teardown() - vSphere.Client.TLSConfig.TLSCA = "testdata/tls" + vSphere.ClientConfig.TLSConfig.TLSCA = "testdata/tls" assert.Error(t, vSphere.Init()) } @@ -330,9 +330,9 @@ func TestVSphere_Collect(t *testing.T) { "vm-72_sys.uptime.latest": 200, } - collected := vSphere.Collect() + mx := vSphere.Collect() - require.Equal(t, expected, collected) + require.Equal(t, expected, mx) count := model.Count() assert.Len(t, vSphere.discoveredHosts, count.Host) @@ -340,7 +340,7 @@ func TestVSphere_Collect(t *testing.T) { assert.Len(t, vSphere.charted, count.Host+count.Machine) assert.Len(t, *vSphere.Charts(), count.Host*len(hostChartsTmpl)+count.Machine*len(vmChartsTmpl)) - ensureCollectedHasAllChartsDimsVarsIDs(t, vSphere, collected) + module.TestMetricsHasAllChartsDims(t, vSphere.Charts(), mx) } func TestVSphere_Collect_RemoveHostsVMsInRuntime(t *testing.T) { @@ -402,7 +402,7 @@ func TestVSphere_Collect_Run(t *testing.T) { vSphere, model, teardown := prepareVSphereSim(t) defer teardown() - vSphere.DiscoveryInterval = web.Duration(time.Second * 2) + vSphere.DiscoveryInterval = confopt.Duration(time.Second * 2) require.NoError(t, vSphere.Init()) require.NoError(t, vSphere.Check()) @@ -421,19 +421,6 @@ func TestVSphere_Collect_Run(t *testing.T) { assert.Len(t, *vSphere.charts, count.Host*len(hostChartsTmpl)+count.Machine*len(vmChartsTmpl)) } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, vSphere *VSphere, collected map[string]int64) { - for _, chart := range *vSphere.Charts() { - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareVSphereSim(t *testing.T) (vSphere *VSphere, model *simulator.Model, teardown func()) { model, srv := createSim(t) vSphere = New() diff --git a/src/go/plugin/go.d/modules/w1sensor/README.md b/src/go/plugin/go.d/modules/w1sensor/README.md new file mode 120000 index 000000000..c0fa9cd1b --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/README.md @@ -0,0 +1 @@ +integrations/1-wire_sensors.md \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/w1sensor/charts.go b/src/go/plugin/go.d/modules/w1sensor/charts.go new file mode 100644 index 000000000..b579b455a --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/charts.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux + +package w1sensor + +import ( + "fmt" + "strings" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +const ( + prioTemperature = module.Priority + iota +) + +var ( + sensorChartTmpl = module.Chart{ + ID: "w1sensor_%s_temperature", + Title: "1-Wire Temperature Sensor", + Units: "Celsius", + Fam: "Temperature", + Ctx: "w1sensor.temperature", + Type: module.Line, + Priority: prioTemperature, + Dims: module.Dims{ + {ID: "w1sensor_%s_temperature", Div: precision}, + }, + } +) + +func (w *W1sensor) addSensorChart(id string) { + chart := sensorChartTmpl.Copy() + + chart.ID = fmt.Sprintf(chart.ID, id) + chart.Labels = []module.Label{ + {Key: "sensor_id", Value: id}, + } + + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, id) + } + + if err := w.Charts().Add(chart); err != nil { + w.Warning(err) + } + +} + +func (w *W1sensor) removeSensorChart(id string) { + px := fmt.Sprintf("w1sensor_%s", id) + for _, chart := range *w.Charts() { + if strings.HasPrefix(chart.ID, px) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} diff --git a/src/go/plugin/go.d/modules/w1sensor/collect.go b/src/go/plugin/go.d/modules/w1sensor/collect.go new file mode 100644 index 000000000..768ba3497 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/collect.go @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux + +package w1sensor + +import ( + "bufio" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "strconv" + "strings" +) + +const precision = 10 + +func (w *W1sensor) collect() (map[string]int64, error) { + des, err := os.ReadDir(w.SensorsPath) + if err != nil { + return nil, err + } + + mx := make(map[string]int64) + seen := make(map[string]bool) + + for _, de := range des { + if !de.IsDir() { + continue + } + if !isW1sensorDir(de.Name()) { + w.Debugf("'%s' is not a w1sensor directory, skipping it", filepath.Join(w.SensorsPath, de.Name())) + continue + } + + filename := filepath.Join(w.SensorsPath, de.Name(), "w1_slave") + + temp, err := readW1sensorTemperature(filename) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + w.Debugf("'%s' doesn't have 'w1_slave', skipping it", filepath.Join(w.SensorsPath, de.Name())) + continue + } + return nil, fmt.Errorf("failed to read temperature from '%s': %w", filename, err) + } + + seen[de.Name()] = true + if !w.seenSensors[de.Name()] { + w.addSensorChart(de.Name()) + + } + + mx[fmt.Sprintf("w1sensor_%s_temperature", de.Name())] = temp + } + + for id := range w.seenSensors { + if !seen[id] { + delete(w.seenSensors, id) + w.removeSensorChart(id) + } + } + + if len(mx) == 0 { + return nil, errors.New("no w1 sensors found") + } + + return mx, nil +} + +func readW1sensorTemperature(filename string) (int64, error) { + file, err := os.Open(filename) + if err != nil { + return 0, err + } + defer func() { _ = file.Close() }() + + sc := bufio.NewScanner(file) + sc.Scan() + // The second line displays the retained values along with a temperature in milli degrees Centigrade after t=. + sc.Scan() + + _, tempStr, ok := strings.Cut(strings.TrimSpace(sc.Text()), "t=") + if !ok { + return 0, errors.New("no temperature found") + } + + v, err := strconv.ParseInt(tempStr, 10, 64) + if err != nil { + return 0, err + } + + return int64(float64(v) / 1000 * precision), nil +} + +func isW1sensorDir(dirName string) bool { + // Supported family members + // Based on linux/drivers/w1/w1_family.h and w1/slaves/w1_therm.c + for _, px := range []string{ + "10-", // W1_THERM_DS18S20 + "22-", // W1_THERM_DS1822 + "28-", // W1_THERM_DS18B20 + "3b-", // W1_THERM_DS1825 + "42-", // W1_THERM_DS28EA00 + } { + if strings.HasPrefix(dirName, px) { + return true + } + } + return false +} diff --git a/src/go/plugin/go.d/modules/w1sensor/config_schema.json b/src/go/plugin/go.d/modules/w1sensor/config_schema.json new file mode 100644 index 000000000..f14f372b2 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/config_schema.json @@ -0,0 +1,31 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Access Point collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "sensors_path": { + "title": "Sensors path", + "description": "Directory path containing sensor folders with w1_slave files.", + "type": "string", + "default": "/sys/bus/w1/devices" + } + }, + "required": [], + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } +} diff --git a/src/go/plugin/go.d/modules/w1sensor/doc.go b/src/go/plugin/go.d/modules/w1sensor/doc.go new file mode 100644 index 000000000..8e20e8548 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package w1sensor diff --git a/src/go/plugin/go.d/modules/w1sensor/integrations/1-wire_sensors.md b/src/go/plugin/go.d/modules/w1sensor/integrations/1-wire_sensors.md new file mode 100644 index 000000000..ed329406e --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/integrations/1-wire_sensors.md @@ -0,0 +1,190 @@ + + +# 1-Wire Sensors + + + + + +Plugin: go.d.plugin +Module: w1sensor + + + +## Overview + +Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts. + + +The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected. + +This collector is only supported on the following platforms: + +- Linux + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +The collector will try to auto detect available 1-Wire devices. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per sensor + +These metrics refer to the 1-Wire Sensor. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| w1sensor.temperature | temperature | Celsius | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Required Linux kernel modules + +Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded. + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/w1sensor.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/w1sensor.conf +``` +#### Options + +The following options can be defined globally: update_every. + + +
    Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| sensors_path | Directory path containing sensor folders with w1_slave files. | /sys/bus/w1/devices | no | + +
    + +#### Examples + +##### Custom sensor device path + +Monitors a virtual sensor when the w1_slave file is located in a custom directory instead of the default location. + +```yaml +jobs: + - name: custom_sensors_path + sensors_path: /custom/path/devices + +``` + + +## Troubleshooting + +### Debug Mode + +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. + +To troubleshoot issues with the `w1sensor` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m w1sensor + ``` + +### Getting Logs + +If you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues: + +- **Run the command** specific to your system (systemd, non-systemd, or Docker container). +- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem. + +#### System with systemd + +Use the following command to view logs generated since the last Netdata service restart: + +```bash +journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep w1sensor +``` + +#### System without systemd + +Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name: + +```bash +grep w1sensor /var/log/netdata/collector.log +``` + +**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues. + +#### Docker Container + +If your Netdata runs in a Docker container named "netdata" (replace if different), use this command: + +```bash +docker logs netdata 2>&1 | grep w1sensor +``` + + diff --git a/src/go/plugin/go.d/modules/w1sensor/metadata.yaml b/src/go/plugin/go.d/modules/w1sensor/metadata.yaml new file mode 100644 index 000000000..920fce499 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/metadata.yaml @@ -0,0 +1,95 @@ +plugin_name: go.d.plugin +modules: + - meta: + plugin_name: go.d.plugin + module_name: w1sensor + monitored_instance: + name: 1-Wire Sensors + link: "https://www.analog.com/en/product-category/1wire-temperature-sensors.html" + categories: + - data-collection.hardware-devices-and-sensors + icon_filename: "1-wire.png" + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - temperature + - sensor + - 1-wire + most_popular: false + overview: + data_collection: + metrics_description: | + Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts. + method_description: The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected. + supported_platforms: + include: + - Linux + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "The collector will try to auto detect available 1-Wire devices." + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: "Required Linux kernel modules" + description: "Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded." + configuration: + file: + name: go.d/w1sensor.conf + options: + description: | + The following options can be defined globally: update_every. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: sensors_path + description: Directory path containing sensor folders with w1_slave files. + default_value: /sys/bus/w1/devices + required: false + examples: + folding: + title: "" + enabled: false + list: + - name: Custom sensor device path + description: Monitors a virtual sensor when the w1_slave file is located in a custom directory instead of the default location. + config: | + jobs: + - name: custom_sensors_path + sensors_path: /custom/path/devices + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: sensor + description: These metrics refer to the 1-Wire Sensor. + labels: [] + metrics: + - name: w1sensor.temperature + description: 1-Wire Temperature Sensor + unit: "Celsius" + chart_type: line + dimensions: + - name: temperature diff --git a/src/go/plugin/go.d/modules/w1sensor/testdata/config.json b/src/go/plugin/go.d/modules/w1sensor/testdata/config.json new file mode 100644 index 000000000..7409104c1 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/testdata/config.json @@ -0,0 +1,4 @@ +{ + "update_every": 123, + "sensors_path": "ok" +} diff --git a/src/go/plugin/go.d/modules/w1sensor/testdata/config.yaml b/src/go/plugin/go.d/modules/w1sensor/testdata/config.yaml new file mode 100644 index 000000000..c897086f2 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/testdata/config.yaml @@ -0,0 +1,2 @@ +update_every: 123 +sensors_path: "ok" diff --git a/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa0/w1_slave b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa0/w1_slave new file mode 100644 index 000000000..b37c46650 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa0/w1_slave @@ -0,0 +1,2 @@ +17 01 4b 46 7f ff 0c 10 71 : crc=71 YES +17 01 4b 46 7f ff 0c 10 71 t=12435 \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa1/w1_slave b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa1/w1_slave new file mode 100644 index 000000000..d4dee090e --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa1/w1_slave @@ -0,0 +1,2 @@ +17 01 4b 46 7f ff 0c 10 71 : crc=71 YES +17 01 4b 46 7f ff 0c 10 71 t=29960 \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa2/w1_slave b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa2/w1_slave new file mode 100644 index 000000000..342fa5164 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa2/w1_slave @@ -0,0 +1,2 @@ +17 01 4b 46 7f ff 0c 10 71 : crc=71 YES +17 01 4b 46 7f ff 0c 10 71 t=10762 \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa3/w1_slave b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa3/w1_slave new file mode 100644 index 000000000..f1ec47dfe --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa3/w1_slave @@ -0,0 +1,2 @@ +17 01 4b 46 7f ff 0c 10 71 : crc=71 YES +17 01 4b 46 7f ff 0c 10 71 t=22926 \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/w1sensor/w1sensor.go b/src/go/plugin/go.d/modules/w1sensor/w1sensor.go new file mode 100644 index 000000000..467dee1a4 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/w1sensor.go @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux + +package w1sensor + +import ( + _ "embed" + "errors" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("w1sensor", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 1, + }, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *W1sensor { + return &W1sensor{ + Config: Config{ + SensorsPath: "/sys/bus/w1/devices", + }, + charts: &module.Charts{}, + seenSensors: make(map[string]bool), + } +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + SensorsPath string `yaml:"sensors_path,omitempty" json:"sensors_path"` +} + +type ( + W1sensor struct { + module.Base + Config `yaml:",inline" json:""` + + charts *module.Charts + + seenSensors map[string]bool + } +) + +func (w *W1sensor) Configuration() any { + return w.Config +} + +func (w *W1sensor) Init() error { + if w.SensorsPath == "" { + return errors.New("config: no sensors path specified") + } + + return nil +} + +func (w *W1sensor) Check() error { + mx, err := w.collect() + if err != nil { + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil +} + +func (w *W1sensor) Charts() *module.Charts { + return w.charts +} + +func (w *W1sensor) Collect() map[string]int64 { + mx, err := w.collect() + if err != nil { + w.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} + +func (w *W1sensor) Cleanup() {} diff --git a/src/go/plugin/go.d/modules/w1sensor/w1sensor_test.go b/src/go/plugin/go.d/modules/w1sensor/w1sensor_test.go new file mode 100644 index 000000000..21af99a55 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/w1sensor_test.go @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux + +package w1sensor + +import ( + "os" + "testing" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + } { + require.NotNil(t, data, name) + } +} + +func TestW1sensor_Configuration(t *testing.T) { + module.TestConfigurationSerialize(t, &W1sensor{}, dataConfigJSON, dataConfigYAML) +} + +func TestW1sensor_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "fails if 'sensors_path' is not set": { + wantFail: true, + config: Config{ + SensorsPath: "", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w1 := New() + w1.Config = test.config + + if test.wantFail { + assert.Error(t, w1.Init()) + } else { + assert.NoError(t, w1.Init()) + } + }) + } +} + +func TestAP_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func() *W1sensor + }{ + "not initialized exec": { + prepare: func() *W1sensor { + return New() + }, + }, + "after check": { + prepare: func() *W1sensor { + w1 := prepareCaseOk() + _ = w1.Check() + return w1 + }, + }, + "after collect": { + prepare: func() *W1sensor { + w1 := prepareCaseOk() + _ = w1.Collect() + return w1 + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w1 := test.prepare() + + assert.NotPanics(t, w1.Cleanup) + }) + } +} + +func TestW1sensor_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestW1sensor_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func() *W1sensor + wantFail bool + }{ + "success case": { + wantFail: false, + prepareMock: prepareCaseOk, + }, + "no sensors dir": { + wantFail: true, + prepareMock: prepareCaseNoSensorsDir, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w1 := test.prepareMock() + + if test.wantFail { + assert.Error(t, w1.Check()) + } else { + assert.NoError(t, w1.Check()) + } + }) + } +} + +func TestW1Sensors_Collect(t *testing.T) { + tests := map[string]struct { + prepareMock func() *W1sensor + wantMetrics map[string]int64 + wantCharts int + }{ + "success case": { + prepareMock: prepareCaseOk, + wantCharts: 4, + wantMetrics: map[string]int64{ + "w1sensor_28-01204e9d2fa0_temperature": 124, + "w1sensor_28-01204e9d2fa1_temperature": 299, + "w1sensor_28-01204e9d2fa2_temperature": 107, + "w1sensor_28-01204e9d2fa3_temperature": 229, + }, + }, + "no sensors dir": { + prepareMock: prepareCaseNoSensorsDir, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w1 := test.prepareMock() + + mx := w1.Collect() + + assert.Equal(t, test.wantMetrics, mx) + + assert.Equal(t, test.wantCharts, len(*w1.Charts()), "wantCharts") + + module.TestMetricsHasAllChartsDims(t, w1.Charts(), mx) + }) + } +} + +func prepareCaseOk() *W1sensor { + w1 := New() + w1.SensorsPath = "testdata/devices" + return w1 +} + +func prepareCaseNoSensorsDir() *W1sensor { + w1 := New() + w1.SensorsPath = "testdata/devices!" + return w1 +} diff --git a/src/go/plugin/go.d/modules/weblog/config_schema.json b/src/go/plugin/go.d/modules/weblog/config_schema.json index 61da661a4..8417adadc 100644 --- a/src/go/plugin/go.d/modules/weblog/config_schema.json +++ b/src/go/plugin/go.d/modules/weblog/config_schema.json @@ -23,6 +23,12 @@ "type": "string", "default": "*.gz" }, + "group_response_codes": { + "title": "Group response codes", + "description": "Controls how HTTP response codes are grouped in charts.", + "type": "boolean", + "default": true + }, "histogram": { "title": "Request processing time histogram", "description": "Buckets for the histogram in milliseconds.", @@ -248,7 +254,6 @@ "path", "log_type" ], - "additionalProperties": false, "patternProperties": { "^name$": {} }, @@ -285,6 +290,18 @@ "description": "Delimiter used to separate fields in the log file. Default: space (' ').", "type": "string", "default": " " + }, + "trim_leading_space": { + "title": "Trim leading space", + "description": "If true, leading white space in a field is ignored.", + "type": "boolean", + "default": false + }, + "fields_per_record": { + "title": "Fields per record", + "description": "The number of expected fields per record.", + "type": "integer", + "default": -1 } }, "required": [ @@ -399,6 +416,12 @@ "uiOptions": { "fullPage": true }, + "group_response_codes": { + "ui:help": "When true, creates separate charts for each response code family (1xx, 2xx, 3xx, etc.), with individual response codes (200, 201, etc.) as dimensions within each family. When false, creates a single chart with all response codes as separate dimensions." + }, + "fields_per_record": { + "ui:help": "If negative, no check is made and records may have a variable number of fields." + }, "log_type": { "ui:widget": "radio", "ui:options": { @@ -420,6 +443,7 @@ "update_every", "path", "exclude_path", + "group_response_codes", "histogram" ] }, diff --git a/src/go/plugin/go.d/modules/weblog/init.go b/src/go/plugin/go.d/modules/weblog/init.go index c76e43f30..f927e7726 100644 --- a/src/go/plugin/go.d/modules/weblog/init.go +++ b/src/go/plugin/go.d/modules/weblog/init.go @@ -7,8 +7,8 @@ import ( "fmt" "strings" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" ) type pattern struct { diff --git a/src/go/plugin/go.d/modules/weblog/integrations/web_server_log_files.md b/src/go/plugin/go.d/modules/weblog/integrations/web_server_log_files.md index 740af5f1d..9ecd6689c 100644 --- a/src/go/plugin/go.d/modules/weblog/integrations/web_server_log_files.md +++ b/src/go/plugin/go.d/modules/weblog/integrations/web_server_log_files.md @@ -162,8 +162,8 @@ No action required. The configuration file name for this integration is `go.d/web_log.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -216,7 +216,7 @@ Notes: | exclude_path | Path to exclude. | *.gz | no | | url_patterns | List of URL patterns. | [] | no | | url_patterns.name | Used as a dimension name. | | yes | -| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format). | | yes | +| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format). | | yes | | log_type | Log parser type. | auto | no | | csv_config | CSV log parser config. | | no | | csv_config.delimiter | CSV field delimiter. | , | no | diff --git a/src/go/plugin/go.d/modules/weblog/metadata.yaml b/src/go/plugin/go.d/modules/weblog/metadata.yaml index 7608b936c..0f0a66efd 100644 --- a/src/go/plugin/go.d/modules/weblog/metadata.yaml +++ b/src/go/plugin/go.d/modules/weblog/metadata.yaml @@ -124,7 +124,7 @@ modules: default_value: "" required: true - name: url_patterns.pattern - description: Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format). + description: Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format). default_value: "" required: true - name: log_type diff --git a/src/go/plugin/go.d/modules/weblog/weblog.go b/src/go/plugin/go.d/modules/weblog/weblog.go index 242999e68..94fd0430a 100644 --- a/src/go/plugin/go.d/modules/weblog/weblog.go +++ b/src/go/plugin/go.d/modules/weblog/weblog.go @@ -4,6 +4,7 @@ package weblog import ( _ "embed" + "fmt" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs" @@ -101,18 +102,15 @@ func (w *WebLog) Configuration() any { func (w *WebLog) Init() error { if err := w.createURLPatterns(); err != nil { - w.Errorf("init failed: %v", err) - return err + return fmt.Errorf("init failed: %v", err) } if err := w.createCustomFields(); err != nil { - w.Errorf("init failed: %v", err) - return err + return fmt.Errorf("init failed: %v", err) } if err := w.createCustomTimeFields(); err != nil { - w.Errorf("init failed: %v", err) - return err + return fmt.Errorf("init failed: %v", err) } if err := w.createCustomNumericFields(); err != nil { @@ -128,18 +126,15 @@ func (w *WebLog) Init() error { func (w *WebLog) Check() error { // Note: these inits are here to make auto-detection retry working if err := w.createLogReader(); err != nil { - w.Warning("check failed: ", err) - return err + return fmt.Errorf("failed to create log reader: %v", err) } if err := w.createParser(); err != nil { - w.Warning("check failed: ", err) - return err + return fmt.Errorf("failed to create parser: %v", err) } if err := w.createCharts(w.line); err != nil { - w.Warning("check failed: ", err) - return err + return fmt.Errorf("failed to create charts: %v", err) } return nil diff --git a/src/go/plugin/go.d/modules/weblog/weblog_test.go b/src/go/plugin/go.d/modules/weblog/weblog_test.go index 1e36bbf68..028eca39f 100644 --- a/src/go/plugin/go.d/modules/weblog/weblog_test.go +++ b/src/go/plugin/go.d/modules/weblog/weblog_test.go @@ -737,16 +737,7 @@ func testCharts(t *testing.T, w *WebLog, mx map[string]int64) { testCustomTimeFieldCharts(t, w) testCustomNumericFieldCharts(t, w) - testChartsDimIDs(t, w, mx) -} - -func testChartsDimIDs(t *testing.T, w *WebLog, mx map[string]int64) { - for _, chart := range *w.Charts() { - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - } + module.TestMetricsHasAllChartsDims(t, w.Charts(), mx) } func testVhostChart(t *testing.T, w *WebLog) { diff --git a/src/go/plugin/go.d/modules/whoisquery/config_schema.json b/src/go/plugin/go.d/modules/whoisquery/config_schema.json index fd3ef4955..3e81e7b9e 100644 --- a/src/go/plugin/go.d/modules/whoisquery/config_schema.json +++ b/src/go/plugin/go.d/modules/whoisquery/config_schema.json @@ -41,7 +41,6 @@ "required": [ "source" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/whoisquery/integrations/domain_expiration_date.md b/src/go/plugin/go.d/modules/whoisquery/integrations/domain_expiration_date.md index 78508e960..cac39c708 100644 --- a/src/go/plugin/go.d/modules/whoisquery/integrations/domain_expiration_date.md +++ b/src/go/plugin/go.d/modules/whoisquery/integrations/domain_expiration_date.md @@ -95,8 +95,8 @@ No action required. The configuration file name for this integration is `go.d/whoisquery.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/whoisquery/whoisquery.go b/src/go/plugin/go.d/modules/whoisquery/whoisquery.go index 1f59779b3..51f2c18a6 100644 --- a/src/go/plugin/go.d/modules/whoisquery/whoisquery.go +++ b/src/go/plugin/go.d/modules/whoisquery/whoisquery.go @@ -5,10 +5,11 @@ package whoisquery import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -28,7 +29,7 @@ func init() { func New() *WhoisQuery { return &WhoisQuery{ Config: Config{ - Timeout: web.Duration(time.Second * 5), + Timeout: confopt.Duration(time.Second * 5), DaysUntilWarn: 30, DaysUntilCrit: 15, }, @@ -36,11 +37,11 @@ func New() *WhoisQuery { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Source string `yaml:"source" json:"source"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - DaysUntilWarn int64 `yaml:"days_until_expiration_warning,omitempty" json:"days_until_expiration_warning"` - DaysUntilCrit int64 `yaml:"days_until_expiration_critical,omitempty" json:"days_until_expiration_critical"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Source string `yaml:"source" json:"source"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + DaysUntilWarn int64 `yaml:"days_until_expiration_warning,omitempty" json:"days_until_expiration_warning"` + DaysUntilCrit int64 `yaml:"days_until_expiration_critical,omitempty" json:"days_until_expiration_critical"` } type WhoisQuery struct { @@ -58,14 +59,12 @@ func (w *WhoisQuery) Configuration() any { func (w *WhoisQuery) Init() error { if err := w.validateConfig(); err != nil { - w.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } prov, err := w.initProvider() if err != nil { - w.Errorf("init whois provider: %v", err) - return err + return fmt.Errorf("init whois provider: %v", err) } w.prov = prov @@ -77,7 +76,6 @@ func (w *WhoisQuery) Init() error { func (w *WhoisQuery) Check() error { mx, err := w.collect() if err != nil { - w.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/whoisquery/whoisquery_test.go b/src/go/plugin/go.d/modules/whoisquery/whoisquery_test.go index 4979c7f57..57ba6b051 100644 --- a/src/go/plugin/go.d/modules/whoisquery/whoisquery_test.go +++ b/src/go/plugin/go.d/modules/whoisquery/whoisquery_test.go @@ -101,7 +101,7 @@ func TestWhoisQuery_Collect(t *testing.T) { require.NoError(t, whoisquery.Init()) whoisquery.prov = &mockProvider{remTime: 12345} - collected := whoisquery.Collect() + mx := whoisquery.Collect() expected := map[string]int64{ "expiry": 12345, @@ -109,9 +109,9 @@ func TestWhoisQuery_Collect(t *testing.T) { "days_until_expiration_critical": 15, } - assert.NotZero(t, collected) - assert.Equal(t, expected, collected) - ensureCollectedHasAllChartsDimsVarsIDs(t, whoisquery, collected) + assert.NotZero(t, mx) + assert.Equal(t, expected, mx) + module.TestMetricsHasAllChartsDims(t, whoisquery.Charts(), mx) } func TestWhoisQuery_Collect_ReturnsNilOnProviderError(t *testing.T) { @@ -123,19 +123,6 @@ func TestWhoisQuery_Collect_ReturnsNilOnProviderError(t *testing.T) { assert.Nil(t, whoisquery.Collect()) } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, whoisquery *WhoisQuery, collected map[string]int64) { - for _, chart := range *whoisquery.Charts() { - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - type mockProvider struct { remTime float64 err bool diff --git a/src/go/plugin/go.d/modules/windows/config_schema.json b/src/go/plugin/go.d/modules/windows/config_schema.json index e1011e876..25b2bca70 100644 --- a/src/go/plugin/go.d/modules/windows/config_schema.json +++ b/src/go/plugin/go.d/modules/windows/config_schema.json @@ -108,7 +108,6 @@ "required": [ "url" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/windows/init.go b/src/go/plugin/go.d/modules/windows/init.go index 87faf40bd..4c7bbd6ad 100644 --- a/src/go/plugin/go.d/modules/windows/init.go +++ b/src/go/plugin/go.d/modules/windows/init.go @@ -17,9 +17,9 @@ func (w *Windows) validateConfig() error { } func (w *Windows) initPrometheusClient() (prometheus.Prometheus, error) { - client, err := web.NewHTTPClient(w.Client) + client, err := web.NewHTTPClient(w.ClientConfig) if err != nil { return nil, err } - return prometheus.New(client, w.Request), nil + return prometheus.New(client, w.RequestConfig), nil } diff --git a/src/go/plugin/go.d/modules/windows/integrations/active_directory.md b/src/go/plugin/go.d/modules/windows/integrations/active_directory.md index 6d255aba8..a538d5ca7 100644 --- a/src/go/plugin/go.d/modules/windows/integrations/active_directory.md +++ b/src/go/plugin/go.d/modules/windows/integrations/active_directory.md @@ -77,13 +77,13 @@ Supported collectors: - [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) - [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) - [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) -- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) -- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) -- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) -- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) -- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) -- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) -- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) +- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions) +- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop) +- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit) +- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading) +- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads) +- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory) +- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting) - [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) - [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) @@ -651,8 +651,8 @@ To install the Windows exporter, follow the [official installation guide](https: The configuration file name for this integration is `go.d/windows.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/windows/integrations/hyperv.md b/src/go/plugin/go.d/modules/windows/integrations/hyperv.md index 42e4f308d..151da2810 100644 --- a/src/go/plugin/go.d/modules/windows/integrations/hyperv.md +++ b/src/go/plugin/go.d/modules/windows/integrations/hyperv.md @@ -77,13 +77,13 @@ Supported collectors: - [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) - [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) - [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) -- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) -- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) -- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) -- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) -- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) -- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) -- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) +- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions) +- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop) +- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit) +- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading) +- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads) +- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory) +- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting) - [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) - [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) @@ -651,8 +651,8 @@ To install the Windows exporter, follow the [official installation guide](https: The configuration file name for this integration is `go.d/windows.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/windows/integrations/ms_exchange.md b/src/go/plugin/go.d/modules/windows/integrations/ms_exchange.md index 24d416021..787a62850 100644 --- a/src/go/plugin/go.d/modules/windows/integrations/ms_exchange.md +++ b/src/go/plugin/go.d/modules/windows/integrations/ms_exchange.md @@ -77,13 +77,13 @@ Supported collectors: - [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) - [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) - [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) -- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) -- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) -- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) -- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) -- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) -- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) -- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) +- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions) +- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop) +- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit) +- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading) +- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads) +- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory) +- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting) - [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) - [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) @@ -651,8 +651,8 @@ To install the Windows exporter, follow the [official installation guide](https: The configuration file name for this integration is `go.d/windows.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/windows/integrations/ms_sql_server.md b/src/go/plugin/go.d/modules/windows/integrations/ms_sql_server.md index 1dd59c30e..eee652b99 100644 --- a/src/go/plugin/go.d/modules/windows/integrations/ms_sql_server.md +++ b/src/go/plugin/go.d/modules/windows/integrations/ms_sql_server.md @@ -77,13 +77,13 @@ Supported collectors: - [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) - [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) - [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) -- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) -- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) -- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) -- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) -- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) -- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) -- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) +- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions) +- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop) +- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit) +- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading) +- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads) +- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory) +- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting) - [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) - [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) @@ -651,8 +651,8 @@ To install the Windows exporter, follow the [official installation guide](https: The configuration file name for this integration is `go.d/windows.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/windows/integrations/net_framework.md b/src/go/plugin/go.d/modules/windows/integrations/net_framework.md index 01879ddea..e69742a44 100644 --- a/src/go/plugin/go.d/modules/windows/integrations/net_framework.md +++ b/src/go/plugin/go.d/modules/windows/integrations/net_framework.md @@ -77,13 +77,13 @@ Supported collectors: - [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) - [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) - [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) -- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) -- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) -- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) -- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) -- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) -- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) -- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) +- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions) +- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop) +- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit) +- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading) +- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads) +- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory) +- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting) - [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) - [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) @@ -651,8 +651,8 @@ To install the Windows exporter, follow the [official installation guide](https: The configuration file name for this integration is `go.d/windows.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/windows/integrations/windows.md b/src/go/plugin/go.d/modules/windows/integrations/windows.md index 60a3b7f30..b17d4bd97 100644 --- a/src/go/plugin/go.d/modules/windows/integrations/windows.md +++ b/src/go/plugin/go.d/modules/windows/integrations/windows.md @@ -77,13 +77,13 @@ Supported collectors: - [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) - [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) - [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) -- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) -- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) -- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) -- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) -- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) -- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) -- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) +- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions) +- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop) +- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit) +- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading) +- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads) +- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory) +- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting) - [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) - [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) @@ -651,8 +651,8 @@ To install the Windows exporter, follow the [official installation guide](https: The configuration file name for this integration is `go.d/windows.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/windows/metadata.yaml b/src/go/plugin/go.d/modules/windows/metadata.yaml index 87ac4cf63..e852e492f 100644 --- a/src/go/plugin/go.d/modules/windows/metadata.yaml +++ b/src/go/plugin/go.d/modules/windows/metadata.yaml @@ -243,13 +243,13 @@ modules: - [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) - [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) - [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) - - [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) - - [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) - - [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) - - [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) - - [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) - - [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) - - [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) + - [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-exceptions) + - [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-interop) + - [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-jit) + - [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-loading) + - [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-locks-and-threads) + - [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-memory) + - [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework.md#clr-remoting) - [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) - [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) availability: [] diff --git a/src/go/plugin/go.d/modules/windows/windows.go b/src/go/plugin/go.d/modules/windows/windows.go index 555990784..818f2ef78 100644 --- a/src/go/plugin/go.d/modules/windows/windows.go +++ b/src/go/plugin/go.d/modules/windows/windows.go @@ -5,9 +5,11 @@ package windows import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) @@ -29,9 +31,9 @@ func init() { func New() *Windows { return &Windows{ Config: Config{ - HTTP: web.HTTP{ - Client: web.Client{ - Timeout: web.Duration(time.Second * 5), + HTTPConfig: web.HTTPConfig{ + ClientConfig: web.ClientConfig{ + Timeout: confopt.Duration(time.Second * 5), }, }, }, @@ -69,9 +71,9 @@ func New() *Windows { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - web.HTTP `yaml:",inline" json:""` - Vnode string `yaml:"vnode,omitempty" json:"vnode"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + web.HTTPConfig `yaml:",inline" json:""` + Vnode string `yaml:"vnode,omitempty" json:"vnode"` } type ( @@ -122,14 +124,12 @@ func (w *Windows) Configuration() any { func (w *Windows) Init() error { if err := w.validateConfig(); err != nil { - w.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } prom, err := w.initPrometheusClient() if err != nil { - w.Errorf("init prometheus clients: %v", err) - return err + return fmt.Errorf("init prometheus clients: %v", err) } w.prom = prom @@ -139,7 +139,6 @@ func (w *Windows) Init() error { func (w *Windows) Check() error { mx, err := w.collect() if err != nil { - w.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/windows/windows_test.go b/src/go/plugin/go.d/modules/windows/windows_test.go index 052950248..fa55e6841 100644 --- a/src/go/plugin/go.d/modules/windows/windows_test.go +++ b/src/go/plugin/go.d/modules/windows/windows_test.go @@ -49,7 +49,7 @@ func TestWindows_Init(t *testing.T) { }{ "success if 'url' is set": { config: Config{ - HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:9182/metrics"}}}, + HTTPConfig: web.HTTPConfig{RequestConfig: web.RequestConfig{URL: "http://127.0.0.1:9182/metrics"}}}, }, "fails on default config": { wantFail: true, @@ -57,7 +57,7 @@ func TestWindows_Init(t *testing.T) { }, "fails if 'url' is unset": { wantFail: true, - config: Config{HTTP: web.HTTP{Request: web.Request{URL: ""}}}, + config: Config{HTTPConfig: web.HTTPConfig{RequestConfig: web.RequestConfig{URL: ""}}}, }, } @@ -817,7 +817,7 @@ func TestWindows_Collect(t *testing.T) { func testCharts(t *testing.T, win *Windows, mx map[string]int64) { ensureChartsDimsCreated(t, win) - ensureCollectedHasAllChartsDimsVarsIDs(t, win, mx) + module.TestMetricsHasAllChartsDims(t, win.Charts(), mx) } func ensureChartsDimsCreated(t *testing.T, w *Windows) { @@ -1047,19 +1047,6 @@ func ensureChartsDimsCreated(t *testing.T, w *Windows) { } } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, w *Windows, mx map[string]int64) { - for _, chart := range *w.Charts() { - for _, dim := range chart.Dims { - _, ok := mx[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := mx[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - func prepareWindowsV0200() (win *Windows, cleanup func()) { ts := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { diff --git a/src/go/plugin/go.d/modules/wireguard/config_schema.json b/src/go/plugin/go.d/modules/wireguard/config_schema.json index 5ff8ff717..bba8f6fc5 100644 --- a/src/go/plugin/go.d/modules/wireguard/config_schema.json +++ b/src/go/plugin/go.d/modules/wireguard/config_schema.json @@ -12,7 +12,6 @@ "default": 1 } }, - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/wireguard/integrations/wireguard.md b/src/go/plugin/go.d/modules/wireguard/integrations/wireguard.md index 2460cc839..74928c5f6 100644 --- a/src/go/plugin/go.d/modules/wireguard/integrations/wireguard.md +++ b/src/go/plugin/go.d/modules/wireguard/integrations/wireguard.md @@ -114,8 +114,8 @@ No action required. The configuration file name for this integration is `go.d/wireguard.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/wireguard/wireguard.go b/src/go/plugin/go.d/modules/wireguard/wireguard.go index fdd42e193..8e25f0bbd 100644 --- a/src/go/plugin/go.d/modules/wireguard/wireguard.go +++ b/src/go/plugin/go.d/modules/wireguard/wireguard.go @@ -70,7 +70,6 @@ func (w *WireGuard) Init() error { func (w *WireGuard) Check() error { mx, err := w.collect() if err != nil { - w.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/x509check/config_schema.json b/src/go/plugin/go.d/modules/x509check/config_schema.json index 7246cfa7a..c0e5ca3aa 100644 --- a/src/go/plugin/go.d/modules/x509check/config_schema.json +++ b/src/go/plugin/go.d/modules/x509check/config_schema.json @@ -69,7 +69,6 @@ "required": [ "source" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/x509check/integrations/x.509_certificate.md b/src/go/plugin/go.d/modules/x509check/integrations/x.509_certificate.md index ccbe12948..41e8ca734 100644 --- a/src/go/plugin/go.d/modules/x509check/integrations/x.509_certificate.md +++ b/src/go/plugin/go.d/modules/x509check/integrations/x.509_certificate.md @@ -97,8 +97,8 @@ No action required. The configuration file name for this integration is `go.d/x509check.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/x509check/x509check.go b/src/go/plugin/go.d/modules/x509check/x509check.go index c4fa70eac..543941462 100644 --- a/src/go/plugin/go.d/modules/x509check/x509check.go +++ b/src/go/plugin/go.d/modules/x509check/x509check.go @@ -5,13 +5,14 @@ package x509check import ( _ "embed" "errors" + "fmt" "time" + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" cfssllog "github.com/cloudflare/cfssl/log" - "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" ) //go:embed "config_schema.json" @@ -32,7 +33,7 @@ func init() { func New() *X509Check { return &X509Check{ Config: Config{ - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), DaysUntilWarn: 14, DaysUntilCritical: 7, }, @@ -40,12 +41,12 @@ func New() *X509Check { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Source string `yaml:"source" json:"source"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - DaysUntilWarn int64 `yaml:"days_until_expiration_warning,omitempty" json:"days_until_expiration_warning"` - DaysUntilCritical int64 `yaml:"days_until_expiration_critical,omitempty" json:"days_until_expiration_critical"` - CheckRevocation bool `yaml:"check_revocation_status" json:"check_revocation_status"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Source string `yaml:"source" json:"source"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + DaysUntilWarn int64 `yaml:"days_until_expiration_warning,omitempty" json:"days_until_expiration_warning"` + DaysUntilCritical int64 `yaml:"days_until_expiration_critical,omitempty" json:"days_until_expiration_critical"` + CheckRevocation bool `yaml:"check_revocation_status" json:"check_revocation_status"` tlscfg.TLSConfig `yaml:",inline" json:""` } @@ -64,14 +65,12 @@ func (x *X509Check) Configuration() any { func (x *X509Check) Init() error { if err := x.validateConfig(); err != nil { - x.Errorf("config validation: %v", err) - return err + return fmt.Errorf("config validation: %v", err) } prov, err := x.initProvider() if err != nil { - x.Errorf("certificate provider init: %v", err) - return err + return fmt.Errorf("certificate provider init: %v", err) } x.prov = prov @@ -83,7 +82,6 @@ func (x *X509Check) Init() error { func (x *X509Check) Check() error { mx, err := x.collect() if err != nil { - x.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/x509check/x509check_test.go b/src/go/plugin/go.d/modules/x509check/x509check_test.go index e0b287251..3bdd84ced 100644 --- a/src/go/plugin/go.d/modules/x509check/x509check_test.go +++ b/src/go/plugin/go.d/modules/x509check/x509check_test.go @@ -130,10 +130,10 @@ func TestX509Check_Collect(t *testing.T) { require.NoError(t, x509Check.Init()) x509Check.prov = &mockProvider{certs: []*x509.Certificate{{}}} - collected := x509Check.Collect() + mx := x509Check.Collect() - assert.NotZero(t, collected) - ensureCollectedHasAllChartsDimsVarsIDs(t, x509Check, collected) + assert.NotZero(t, mx) + module.TestMetricsHasAllChartsDims(t, x509Check.Charts(), mx) } func TestX509Check_Collect_ReturnsNilOnProviderError(t *testing.T) { @@ -151,19 +151,6 @@ func TestX509Check_Collect_ReturnsNilOnZeroCertificates(t *testing.T) { assert.Nil(t, mx) } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, x509Check *X509Check, collected map[string]int64) { - for _, chart := range *x509Check.Charts() { - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - type mockProvider struct { certs []*x509.Certificate err bool diff --git a/src/go/plugin/go.d/modules/zfspool/charts.go b/src/go/plugin/go.d/modules/zfspool/charts.go index 92a7d53bd..d3298bbf9 100644 --- a/src/go/plugin/go.d/modules/zfspool/charts.go +++ b/src/go/plugin/go.d/modules/zfspool/charts.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package zfspool import ( diff --git a/src/go/plugin/go.d/modules/zfspool/collect.go b/src/go/plugin/go.d/modules/zfspool/collect.go index b9b29058b..96c5ee3af 100644 --- a/src/go/plugin/go.d/modules/zfspool/collect.go +++ b/src/go/plugin/go.d/modules/zfspool/collect.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package zfspool var zpoolHealthStates = []string{ diff --git a/src/go/plugin/go.d/modules/zfspool/collect_zpool_list.go b/src/go/plugin/go.d/modules/zfspool/collect_zpool_list.go index f5e1c0812..1d0b974b2 100644 --- a/src/go/plugin/go.d/modules/zfspool/collect_zpool_list.go +++ b/src/go/plugin/go.d/modules/zfspool/collect_zpool_list.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package zfspool import ( diff --git a/src/go/plugin/go.d/modules/zfspool/collect_zpool_list_vdev.go b/src/go/plugin/go.d/modules/zfspool/collect_zpool_list_vdev.go index 30e1fe4e1..36790298b 100644 --- a/src/go/plugin/go.d/modules/zfspool/collect_zpool_list_vdev.go +++ b/src/go/plugin/go.d/modules/zfspool/collect_zpool_list_vdev.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package zfspool import ( diff --git a/src/go/plugin/go.d/modules/zfspool/config_schema.json b/src/go/plugin/go.d/modules/zfspool/config_schema.json index fcfcff1d4..1a3c3ab95 100644 --- a/src/go/plugin/go.d/modules/zfspool/config_schema.json +++ b/src/go/plugin/go.d/modules/zfspool/config_schema.json @@ -28,7 +28,6 @@ "required": [ "binary_path" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/zfspool/doc.go b/src/go/plugin/go.d/modules/zfspool/doc.go new file mode 100644 index 000000000..002fb4eb0 --- /dev/null +++ b/src/go/plugin/go.d/modules/zfspool/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package zfspool diff --git a/src/go/plugin/go.d/modules/zfspool/exec.go b/src/go/plugin/go.d/modules/zfspool/exec.go index 1a2bcf203..28b1d3267 100644 --- a/src/go/plugin/go.d/modules/zfspool/exec.go +++ b/src/go/plugin/go.d/modules/zfspool/exec.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package zfspool import ( diff --git a/src/go/plugin/go.d/modules/zfspool/init.go b/src/go/plugin/go.d/modules/zfspool/init.go index f640801dd..f5e98b8bd 100644 --- a/src/go/plugin/go.d/modules/zfspool/init.go +++ b/src/go/plugin/go.d/modules/zfspool/init.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package zfspool import ( diff --git a/src/go/plugin/go.d/modules/zfspool/integrations/zfs_pools.md b/src/go/plugin/go.d/modules/zfspool/integrations/zfs_pools.md index 060e4fb71..a7fc921fb 100644 --- a/src/go/plugin/go.d/modules/zfspool/integrations/zfs_pools.md +++ b/src/go/plugin/go.d/modules/zfspool/integrations/zfs_pools.md @@ -26,7 +26,10 @@ This collector monitors the health and space usage of ZFS pools using the comman -This collector is supported on all platforms. +This collector is only supported on the following platforms: + +- Linux +- BSD This collector only supports collecting metrics from a single instance of this integration. @@ -118,8 +121,8 @@ No action required. The configuration file name for this integration is `go.d/zfspool.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/zfspool/metadata.yaml b/src/go/plugin/go.d/modules/zfspool/metadata.yaml index 21cc307ca..de7f1d186 100644 --- a/src/go/plugin/go.d/modules/zfspool/metadata.yaml +++ b/src/go/plugin/go.d/modules/zfspool/metadata.yaml @@ -28,7 +28,7 @@ modules: tool [zpool](https://openzfs.github.io/openzfs-docs/man/master/8/zpool-list.8.html). method_description: "" supported_platforms: - include: [] + include: [Linux, BSD] exclude: [] multi_instance: false additional_permissions: diff --git a/src/go/plugin/go.d/modules/zfspool/zfspool.go b/src/go/plugin/go.d/modules/zfspool/zfspool.go index 02f1f7ce9..8b63bac04 100644 --- a/src/go/plugin/go.d/modules/zfspool/zfspool.go +++ b/src/go/plugin/go.d/modules/zfspool/zfspool.go @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package zfspool import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" ) //go:embed "config_schema.json" @@ -29,7 +32,7 @@ func New() *ZFSPool { return &ZFSPool{ Config: Config{ BinaryPath: "/usr/bin/zpool", - Timeout: web.Duration(time.Second * 2), + Timeout: confopt.Duration(time.Second * 2), }, charts: &module.Charts{}, seenZpools: make(map[string]bool), @@ -38,9 +41,9 @@ func New() *ZFSPool { } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` - BinaryPath string `yaml:"binary_path,omitempty" json:"binary_path"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + BinaryPath string `yaml:"binary_path,omitempty" json:"binary_path"` } type ( @@ -67,14 +70,12 @@ func (z *ZFSPool) Configuration() any { func (z *ZFSPool) Init() error { if err := z.validateConfig(); err != nil { - z.Errorf("config validation: %s", err) - return err + return fmt.Errorf("config validation: %s", err) } zpoolExec, err := z.initZPoolCLIExec() if err != nil { - z.Errorf("zpool exec initialization: %v", err) - return err + return fmt.Errorf("zpool exec initialization: %v", err) } z.exec = zpoolExec @@ -84,7 +85,6 @@ func (z *ZFSPool) Init() error { func (z *ZFSPool) Check() error { mx, err := z.collect() if err != nil { - z.Error(err) return err } diff --git a/src/go/plugin/go.d/modules/zfspool/zfspool_test.go b/src/go/plugin/go.d/modules/zfspool/zfspool_test.go index bf64d1713..d0be92da3 100644 --- a/src/go/plugin/go.d/modules/zfspool/zfspool_test.go +++ b/src/go/plugin/go.d/modules/zfspool/zfspool_test.go @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later +//go:build linux || freebsd || openbsd || netbsd || dragonfly + package zfspool import ( @@ -388,7 +390,7 @@ func TestZFSPool_Collect(t *testing.T) { assert.Len(t, *zp.Charts(), want, "want charts") - module.TestMetricsHasAllChartsDimsSkip(t, zp.Charts(), mx, func(chart *module.Chart) bool { + module.TestMetricsHasAllChartsDimsSkip(t, zp.Charts(), mx, func(chart *module.Chart, _ *module.Dim) bool { return strings.HasPrefix(chart.ID, "zfspool_zion") && !strings.HasSuffix(chart.ID, "health_state") }) } diff --git a/src/go/plugin/go.d/modules/zookeeper/config_schema.json b/src/go/plugin/go.d/modules/zookeeper/config_schema.json index e07a27c29..38ce2c9de 100644 --- a/src/go/plugin/go.d/modules/zookeeper/config_schema.json +++ b/src/go/plugin/go.d/modules/zookeeper/config_schema.json @@ -56,7 +56,6 @@ "required": [ "address" ], - "additionalProperties": false, "patternProperties": { "^name$": {} } diff --git a/src/go/plugin/go.d/modules/zookeeper/fetcher_test.go b/src/go/plugin/go.d/modules/zookeeper/fetcher_test.go index d0931abb9..582d91c6f 100644 --- a/src/go/plugin/go.d/modules/zookeeper/fetcher_test.go +++ b/src/go/plugin/go.d/modules/zookeeper/fetcher_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket" + "github.com/stretchr/testify/assert" ) diff --git a/src/go/plugin/go.d/modules/zookeeper/init.go b/src/go/plugin/go.d/modules/zookeeper/init.go index 380f4bb33..462064c72 100644 --- a/src/go/plugin/go.d/modules/zookeeper/init.go +++ b/src/go/plugin/go.d/modules/zookeeper/init.go @@ -30,11 +30,9 @@ func (z *Zookeeper) initZookeeperFetcher() (fetcher, error) { } sock := socket.New(socket.Config{ - Address: z.Address, - ConnectTimeout: z.Timeout.Duration(), - ReadTimeout: z.Timeout.Duration(), - WriteTimeout: z.Timeout.Duration(), - TLSConf: tlsConf, + Address: z.Address, + Timeout: z.Timeout.Duration(), + TLSConf: tlsConf, }) return &zookeeperFetcher{Client: sock}, nil diff --git a/src/go/plugin/go.d/modules/zookeeper/integrations/zookeeper.md b/src/go/plugin/go.d/modules/zookeeper/integrations/zookeeper.md index 8481ff8c8..88ef359fb 100644 --- a/src/go/plugin/go.d/modules/zookeeper/integrations/zookeeper.md +++ b/src/go/plugin/go.d/modules/zookeeper/integrations/zookeeper.md @@ -104,8 +104,8 @@ Add `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/ The configuration file name for this integration is `go.d/zookeeper.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/go/plugin/go.d/modules/zookeeper/zookeeper.go b/src/go/plugin/go.d/modules/zookeeper/zookeeper.go index 6d004a405..d61451825 100644 --- a/src/go/plugin/go.d/modules/zookeeper/zookeeper.go +++ b/src/go/plugin/go.d/modules/zookeeper/zookeeper.go @@ -5,11 +5,12 @@ package zookeeper import ( _ "embed" "errors" + "fmt" "time" "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web" ) //go:embed "config_schema.json" @@ -27,15 +28,15 @@ func New() *Zookeeper { return &Zookeeper{ Config: Config{ Address: "127.0.0.1:2181", - Timeout: web.Duration(time.Second), + Timeout: confopt.Duration(time.Second), UseTLS: false, }} } type Config struct { - UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` - Address string `yaml:"address" json:"address"` - Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"` + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + Address string `yaml:"address" json:"address"` + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` tlscfg.TLSConfig `yaml:",inline" json:""` UseTLS bool `yaml:"use_tls,omitempty" json:"use_tls"` } @@ -58,14 +59,12 @@ func (z *Zookeeper) Configuration() any { func (z *Zookeeper) Init() error { if err := z.verifyConfig(); err != nil { - z.Error(err) - return err + return fmt.Errorf("invalid config: %v", err) } f, err := z.initZookeeperFetcher() if err != nil { - z.Error(err) - return err + return fmt.Errorf("init zookeeper fetcher: %v", err) } z.fetcher = f @@ -75,7 +74,6 @@ func (z *Zookeeper) Init() error { func (z *Zookeeper) Check() error { mx, err := z.collect() if err != nil { - z.Error(err) return err } if len(mx) == 0 { diff --git a/src/go/plugin/go.d/modules/zookeeper/zookeeper_test.go b/src/go/plugin/go.d/modules/zookeeper/zookeeper_test.go index 3fc8ad5b4..9b45e2ad5 100644 --- a/src/go/plugin/go.d/modules/zookeeper/zookeeper_test.go +++ b/src/go/plugin/go.d/modules/zookeeper/zookeeper_test.go @@ -99,10 +99,10 @@ func TestZookeeper_Collect(t *testing.T) { "znode_count": 5, } - collected := job.Collect() + mx := job.Collect() - assert.Equal(t, expected, collected) - ensureCollectedHasAllChartsDimsVarsIDs(t, job, collected) + assert.Equal(t, expected, mx) + module.TestMetricsHasAllChartsDims(t, job.Charts(), mx) } func TestZookeeper_CollectMntrNotInWhiteList(t *testing.T) { @@ -137,22 +137,6 @@ func TestZookeeper_CollectMntrReceiveError(t *testing.T) { assert.Nil(t, job.Collect()) } -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, zk *Zookeeper, collected map[string]int64) { - for _, chart := range *zk.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := collected[dim.ID] - assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := collected[v.ID] - assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) - } - } -} - type mockZookeeperFetcher struct { data []byte err bool diff --git a/src/go/plugin/go.d/pkg/README.md b/src/go/plugin/go.d/pkg/README.md index 34561395f..e562fab87 100644 --- a/src/go/plugin/go.d/pkg/README.md +++ b/src/go/plugin/go.d/pkg/README.md @@ -1,11 +1,3 @@ - - # Helper Packages - if you need IP ranges consider to @@ -13,7 +5,7 @@ learn_rel_path: "Developers/External plugins/go.d.plugin/Helper Packages" - if you parse an application log files, then [`log`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/logs) is handy. - if you need filtering - check [`matcher`](/src/go/plugin/go.d/pkg/matcher). + check [`matcher`](/src/go/pkg/matcher). - if you collect metrics from an HTTP endpoint use [`web`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/web). - if you collect metrics from a prometheus endpoint, then [`prometheus`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/prometheus) diff --git a/src/go/plugin/go.d/pkg/confopt/duration.go b/src/go/plugin/go.d/pkg/confopt/duration.go new file mode 100644 index 000000000..7aebe062a --- /dev/null +++ b/src/go/plugin/go.d/pkg/confopt/duration.go @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package confopt + +import ( + "encoding/json" + "fmt" + "strconv" + "time" +) + +type Duration time.Duration + +func (d Duration) Duration() time.Duration { + return time.Duration(d) +} + +func (d Duration) String() string { + return d.Duration().String() +} + +func (d *Duration) UnmarshalYAML(unmarshal func(any) error) error { + var s string + + if err := unmarshal(&s); err != nil { + return err + } + + if v, err := time.ParseDuration(s); err == nil { + *d = Duration(v) + return nil + } + if v, err := strconv.ParseInt(s, 10, 64); err == nil { + *d = Duration(time.Duration(v) * time.Second) + return nil + } + if v, err := strconv.ParseFloat(s, 64); err == nil { + *d = Duration(v * float64(time.Second)) + return nil + } + + return fmt.Errorf("unparsable duration format '%s'", s) +} + +func (d Duration) MarshalYAML() (any, error) { + seconds := float64(d) / float64(time.Second) + return seconds, nil +} + +func (d *Duration) UnmarshalJSON(b []byte) error { + s := string(b) + + if v, err := time.ParseDuration(s); err == nil { + *d = Duration(v) + return nil + } + if v, err := strconv.ParseInt(s, 10, 64); err == nil { + *d = Duration(time.Duration(v) * time.Second) + return nil + } + if v, err := strconv.ParseFloat(s, 64); err == nil { + *d = Duration(v * float64(time.Second)) + return nil + } + + return fmt.Errorf("unparsable duration format '%s'", s) +} + +func (d Duration) MarshalJSON() ([]byte, error) { + seconds := float64(d) / float64(time.Second) + return json.Marshal(seconds) +} diff --git a/src/go/plugin/go.d/pkg/confopt/duration_test.go b/src/go/plugin/go.d/pkg/confopt/duration_test.go new file mode 100644 index 000000000..fe907bf53 --- /dev/null +++ b/src/go/plugin/go.d/pkg/confopt/duration_test.go @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package confopt + +import ( + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "gopkg.in/yaml.v2" +) + +func TestDuration_MarshalYAML(t *testing.T) { + tests := map[string]struct { + d Duration + want string + }{ + "1 second": {d: Duration(time.Second), want: "1"}, + "1.5 seconds": {d: Duration(time.Second + time.Millisecond*500), want: "1.5"}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + bs, err := yaml.Marshal(&test.d) + require.NoError(t, err) + + assert.Equal(t, test.want, strings.TrimSpace(string(bs))) + }) + } +} + +func TestDuration_MarshalJSON(t *testing.T) { + tests := map[string]struct { + d Duration + want string + }{ + "1 second": {d: Duration(time.Second), want: "1"}, + "1.5 seconds": {d: Duration(time.Second + time.Millisecond*500), want: "1.5"}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + bs, err := json.Marshal(&test.d) + require.NoError(t, err) + + assert.Equal(t, test.want, strings.TrimSpace(string(bs))) + }) + } +} + +func TestDuration_UnmarshalYAML(t *testing.T) { + tests := map[string]struct { + input any + }{ + "duration": {input: "300ms"}, + "string int": {input: "1"}, + "string float": {input: "1.1"}, + "int": {input: 2}, + "float": {input: 2.2}, + } + + var zero Duration + + for name, test := range tests { + name = fmt.Sprintf("%s (%v)", name, test.input) + t.Run(name, func(t *testing.T) { + data, err := yaml.Marshal(test.input) + require.NoError(t, err) + + var d Duration + require.NoError(t, yaml.Unmarshal(data, &d)) + assert.NotEqual(t, zero.String(), d.String()) + }) + } +} + +func TestDuration_UnmarshalJSON(t *testing.T) { + tests := map[string]struct { + input any + }{ + "duration": {input: "300ms"}, + "string int": {input: "1"}, + "string float": {input: "1.1"}, + "int": {input: 2}, + "float": {input: 2.2}, + } + + var zero Duration + + type duration struct { + D Duration `json:"d"` + } + type input struct { + D any `json:"d"` + } + + for name, test := range tests { + name = fmt.Sprintf("%s (%v)", name, test.input) + t.Run(name, func(t *testing.T) { + input := input{D: test.input} + data, err := yaml.Marshal(input) + require.NoError(t, err) + + var d duration + require.NoError(t, yaml.Unmarshal(data, &d)) + assert.NotEqual(t, zero.String(), d.D.String()) + }) + } +} diff --git a/src/go/plugin/go.d/pkg/dockerhost/dockerhost.go b/src/go/plugin/go.d/pkg/dockerhost/dockerhost.go index eb26b18fa..e53467b67 100644 --- a/src/go/plugin/go.d/pkg/dockerhost/dockerhost.go +++ b/src/go/plugin/go.d/pkg/dockerhost/dockerhost.go @@ -3,9 +3,15 @@ package dockerhost import ( + "bytes" + "context" "fmt" "os" "strings" + + typesContainer "github.com/docker/docker/api/types/container" + docker "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" ) func FromEnv() string { @@ -21,3 +27,72 @@ func FromEnv() string { } return fmt.Sprintf("tcp://%s", addr) } + +func Exec(ctx context.Context, container string, cmd string, args ...string) ([]byte, error) { + // based on https://github.com/moby/moby/blob/8e610b2b55bfd1bfa9436ab110d311f5e8a74dcb/integration/internal/container/exec.go#L38 + + addr := docker.DefaultDockerHost + if v := FromEnv(); v != "" { + addr = v + } + + cli, err := docker.NewClientWithOpts(docker.WithHost(addr)) + if err != nil { + return nil, fmt.Errorf("failed to create docker client: %v", err) + } + + defer func() { _ = cli.Close() }() + + cli.NegotiateAPIVersion(ctx) + + execCreateConfig := typesContainer.ExecOptions{ + AttachStderr: true, + AttachStdout: true, + Cmd: append([]string{cmd}, args...), + } + + createResp, err := cli.ContainerExecCreate(ctx, container, execCreateConfig) + if err != nil { + return nil, fmt.Errorf("failed to container exec create ('%s'): %v", container, err) + } + + attachResp, err := cli.ContainerExecAttach(ctx, createResp.ID, typesContainer.ExecAttachOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to container exec attach ('%s'): %v", container, err) + } + defer attachResp.Close() + + var outBuf, errBuf bytes.Buffer + done := make(chan error) + + defer close(done) + + go func() { + _, err := stdcopy.StdCopy(&outBuf, &errBuf, attachResp.Reader) + select { + case done <- err: + case <-ctx.Done(): + } + }() + + select { + case err := <-done: + if err != nil { + return nil, fmt.Errorf("failed to read response from container ('%s'): %v", container, err) + } + case <-ctx.Done(): + return nil, fmt.Errorf("timed out reading response") + } + + inspResp, err := cli.ContainerExecInspect(ctx, createResp.ID) + if err != nil { + return nil, fmt.Errorf("failed to container exec inspect ('%s'): %v", container, err) + } + + if inspResp.ExitCode != 0 { + msg := strings.ReplaceAll(errBuf.String(), "\n", " ") + return nil, fmt.Errorf("command returned non-zero exit code (%d), error: '%s'", inspResp.ExitCode, msg) + } + + return outBuf.Bytes(), nil +} diff --git a/src/go/plugin/go.d/pkg/iprange/README.md b/src/go/plugin/go.d/pkg/iprange/README.md index ee777989d..8ad33ebf4 100644 --- a/src/go/plugin/go.d/pkg/iprange/README.md +++ b/src/go/plugin/go.d/pkg/iprange/README.md @@ -1,11 +1,3 @@ - - # iprange This package helps you to work with IP ranges. diff --git a/src/go/plugin/go.d/pkg/k8sclient/k8sclient.go b/src/go/plugin/go.d/pkg/k8sclient/k8sclient.go index 079239c1c..32419dd09 100644 --- a/src/go/plugin/go.d/pkg/k8sclient/k8sclient.go +++ b/src/go/plugin/go.d/pkg/k8sclient/k8sclient.go @@ -27,7 +27,7 @@ func New(userAgent string) (kubernetes.Interface, error) { switch { case os.Getenv(EnvFakeClient) != "": - return fake.NewSimpleClientset(), nil + return fake.NewClientset(), nil case os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "": return newInCluster(userAgent) default: diff --git a/src/go/plugin/go.d/pkg/logs/csv.go b/src/go/plugin/go.d/pkg/logs/csv.go index 4057b8c2f..6c2956787 100644 --- a/src/go/plugin/go.d/pkg/logs/csv.go +++ b/src/go/plugin/go.d/pkg/logs/csv.go @@ -16,7 +16,7 @@ type ( CSVConfig struct { FieldsPerRecord int `yaml:"fields_per_record,omitempty" json:"fields_per_record"` Delimiter string `yaml:"delimiter,omitempty" json:"delimiter"` - TrimLeadingSpace bool `yaml:"trim_leading_space" json:"trim_leading_space"` + TrimLeadingSpace bool `yaml:"trim_leading_space,omitempty" json:"trim_leading_space"` Format string `yaml:"format,omitempty" json:"format"` CheckField func(string) (string, int, bool) `yaml:"-" json:"-"` } diff --git a/src/go/plugin/go.d/pkg/matcher/README.md b/src/go/plugin/go.d/pkg/matcher/README.md deleted file mode 100644 index 971774ec2..000000000 --- a/src/go/plugin/go.d/pkg/matcher/README.md +++ /dev/null @@ -1,142 +0,0 @@ - - -# matcher -## Supported Format - -* string -* glob -* regexp -* simple patterns - -Depending on the symbol at the start of the string, the `matcher` will use one of the supported formats. - -| matcher | short format | long format | -|-----------------|--------------|-------------------| -| string | ` =` | `string` | -| glob | `*` | `glob` | -| regexp | `~` | `regexp` | -| simple patterns | | `simple_patterns` | - -Example: - -- `* pattern`: It will use the `glob` matcher to find the `pattern` in the string. - -### Syntax - -**Tip**: Read `::=` as `is defined as`. - -``` -Short Syntax - [ ] - - ::= '!' - negative expression - ::= [ '=', '~', '*' ] - '=' means string match - '~' means regexp match - '*' means glob match - ::= { ' ' | '\t' | '\n' | '\n' | '\r' } - ::= any string - - Long Syntax - [ ] - - ::= [ 'string' | 'glob' | 'regexp' | 'simple_patterns' ] - ::= '!' - negative expression - ::= ':' - ::= any string -``` - -When using the short syntax, you can enable the glob format by starting the string with a `*`, while in the long syntax -you need to define it more explicitly. The following examples are identical. `simple_patterns` can be used **only** with -the long syntax. - -Examples: - -- Short Syntax: `'* * '` -- Long Syntax: `'glob:*'` - -### String matcher - -The string matcher reports whether the given value equals to the string. - -Examples: - -- `'= foo'` matches only if the string is `foo`. -- `'!= bar'` matches any string that is not `bar`. - -String matcher means **exact match** of the `string`. There are other string match related cases: - -- string has prefix `something` -- string has suffix `something` -- string contains `something` - -This is achievable using the `glob` matcher: - -- `* PREFIX*`, means that it matches with any string that *starts* with `PREFIX`, e.g `PREFIXnetdata` -- `* *SUFFIX`, means that it matches with any string that *ends* with `SUFFIX`, e.g `netdataSUFFIX` -- `* *SUBSTRING*`, means that it matches with any string that *contains* `SUBSTRING`, e.g `netdataSUBSTRINGnetdata` - -### Glob matcher - -The glob matcher reports whether the given value matches the wildcard pattern. It uses the standard `golang` -library `path`. You can read more about the library in the [golang documentation](https://golang.org/pkg/path/#Match), -where you can also practice with the library in order to learn the syntax and use it in your Netdata configuration. - -The pattern syntax is: - -``` - pattern: - { term } - term: - '*' matches any sequence of characters - '?' matches any single character - '[' [ '^' ] { character-range } ']' - character class (must be non-empty) - c matches character c (c != '*', '?', '\\', '[') - '\\' c matches character c - - character-range: - c matches character c (c != '\\', '-', ']') - '\\' c matches character c - lo '-' hi matches character c for lo <= c <= hi -``` - -Examples: - -- `* ?` matches any string that is a single character. -- `'?a'` matches any 2 character string that starts with any character and the second character is `a`, like `ba` but - not `bb` or `bba`. -- `'[^abc]'` matches any character that is NOT a,b,c. `'[abc]'` matches only a, b, c. -- `'*[a-d]'` matches any string (`*`) that ends with a character that is between `a` and `d` (i.e `a,b,c,d`). - -### Regexp matcher - -The regexp matcher reports whether the given value matches the RegExp pattern ( use regexp.Match ). - -The RegExp syntax is described at https://golang.org/pkg/regexp/syntax/. - -Learn more about regular expressions at [RegexOne](https://regexone.com/). - -### Simple patterns matcher - -The simple patterns matcher reports whether the given value matches the simple patterns. - -Simple patterns are a space separated list of words. Each word may use any number of wildcards `*`. Simple patterns -allow negative matches by prefixing a word with `!`. - -Examples: - -- `!*bad* *` matches anything, except all those that contain the word bad. -- `*foobar* !foo* !*bar *` matches everything containing foobar, except strings that start with foo or end with bar. - - - - diff --git a/src/go/plugin/go.d/pkg/matcher/cache.go b/src/go/plugin/go.d/pkg/matcher/cache.go deleted file mode 100644 index 4594fa06f..000000000 --- a/src/go/plugin/go.d/pkg/matcher/cache.go +++ /dev/null @@ -1,56 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import "sync" - -type ( - cachedMatcher struct { - matcher Matcher - - mux sync.RWMutex - cache map[string]bool - } -) - -// WithCache adds cache to the matcher. -func WithCache(m Matcher) Matcher { - switch m { - case TRUE(), FALSE(): - return m - default: - return &cachedMatcher{matcher: m, cache: make(map[string]bool)} - } -} - -func (m *cachedMatcher) Match(b []byte) bool { - s := string(b) - if result, ok := m.fetch(s); ok { - return result - } - result := m.matcher.Match(b) - m.put(s, result) - return result -} - -func (m *cachedMatcher) MatchString(s string) bool { - if result, ok := m.fetch(s); ok { - return result - } - result := m.matcher.MatchString(s) - m.put(s, result) - return result -} - -func (m *cachedMatcher) fetch(key string) (result bool, ok bool) { - m.mux.RLock() - result, ok = m.cache[key] - m.mux.RUnlock() - return -} - -func (m *cachedMatcher) put(key string, result bool) { - m.mux.Lock() - m.cache[key] = result - m.mux.Unlock() -} diff --git a/src/go/plugin/go.d/pkg/matcher/cache_test.go b/src/go/plugin/go.d/pkg/matcher/cache_test.go deleted file mode 100644 index a545777b3..000000000 --- a/src/go/plugin/go.d/pkg/matcher/cache_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestWithCache(t *testing.T) { - regMatcher, _ := NewRegExpMatcher("[0-9]+") - cached := WithCache(regMatcher) - - assert.True(t, cached.MatchString("1")) - assert.True(t, cached.MatchString("1")) - assert.True(t, cached.Match([]byte("2"))) - assert.True(t, cached.Match([]byte("2"))) -} - -func TestWithCache_specialCase(t *testing.T) { - assert.Equal(t, TRUE(), WithCache(TRUE())) - assert.Equal(t, FALSE(), WithCache(FALSE())) -} -func BenchmarkCachedMatcher_MatchString_cache_hit(b *testing.B) { - benchmarks := []struct { - name string - expr string - target string - }{ - {"stringFullMatcher", "= abc123", "abc123"}, - {"stringPrefixMatcher", "~ ^abc123", "abc123456"}, - {"stringSuffixMatcher", "~ abc123$", "hello abc123"}, - {"stringSuffixMatcher", "~ abc123", "hello abc123 world"}, - {"globMatcher", "* abc*def", "abc12345678def"}, - {"regexp", "~ [0-9]+", "1234567890"}, - } - for _, bm := range benchmarks { - m := Must(Parse(bm.expr)) - b.Run(bm.name+"_raw", func(b *testing.B) { - for i := 0; i < b.N; i++ { - m.MatchString(bm.target) - } - }) - b.Run(bm.name+"_cache", func(b *testing.B) { - cached := WithCache(m) - b.ResetTimer() - for i := 0; i < b.N; i++ { - cached.MatchString(bm.target) - } - }) - } -} diff --git a/src/go/plugin/go.d/pkg/matcher/doc.go b/src/go/plugin/go.d/pkg/matcher/doc.go deleted file mode 100644 index 33b06988d..000000000 --- a/src/go/plugin/go.d/pkg/matcher/doc.go +++ /dev/null @@ -1,40 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -/* -Package matcher implements vary formats of string matcher. - -Supported Format - - string - glob - regexp - simple patterns - -The string matcher reports whether the given value equals to the string ( use == ). - -The glob matcher reports whether the given value matches the wildcard pattern. -The pattern syntax is: - - pattern: - { term } - term: - '*' matches any sequence of characters - '?' matches any single character - '[' [ '^' ] { character-range } ']' - character class (must be non-empty) - c matches character c (c != '*', '?', '\\', '[') - '\\' c matches character c - - character-range: - c matches character c (c != '\\', '-', ']') - '\\' c matches character c - lo '-' hi matches character c for lo <= c <= hi - -The regexp matcher reports whether the given value matches the RegExp pattern ( use regexp.Match ). -The RegExp syntax is described at https://golang.org/pkg/regexp/syntax/. - -The simple patterns matcher reports whether the given value matches the simple patterns. -The simple patterns is a custom format used in netdata, -it's syntax is described at https://docs.netdata.cloud/libnetdata/simple_pattern/. -*/ -package matcher diff --git a/src/go/plugin/go.d/pkg/matcher/doc_test.go b/src/go/plugin/go.d/pkg/matcher/doc_test.go deleted file mode 100644 index 4cc3944df..000000000 --- a/src/go/plugin/go.d/pkg/matcher/doc_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher_test - -import "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" - -func ExampleNew_string_format() { - // create a string matcher, which perform full text match - m, err := matcher.New(matcher.FmtString, "hello") - if err != nil { - panic(err) - } - m.MatchString("hello") // => true - m.MatchString("hello world") // => false -} - -func ExampleNew_glob_format() { - // create a glob matcher, which perform wildcard match - m, err := matcher.New(matcher.FmtString, "hello*") - if err != nil { - panic(err) - } - m.MatchString("hello") // => true - m.MatchString("hello world") // => true - m.MatchString("Hello world") // => false -} - -func ExampleNew_simple_patterns_format() { - // create a simple patterns matcher, which perform wildcard match - m, err := matcher.New(matcher.FmtSimplePattern, "hello* !*world *") - if err != nil { - panic(err) - } - m.MatchString("hello") // => true - m.MatchString("hello world") // => true - m.MatchString("Hello world") // => false - m.MatchString("Hello world!") // => false -} - -func ExampleNew_regexp_format() { - // create a regexp matcher, which perform wildcard match - m, err := matcher.New(matcher.FmtRegExp, "[0-9]+") - if err != nil { - panic(err) - } - m.MatchString("1") // => true - m.MatchString("1a") // => true - m.MatchString("a") // => false -} diff --git a/src/go/plugin/go.d/pkg/matcher/expr.go b/src/go/plugin/go.d/pkg/matcher/expr.go deleted file mode 100644 index e5ea0cb2e..000000000 --- a/src/go/plugin/go.d/pkg/matcher/expr.go +++ /dev/null @@ -1,62 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "errors" - "fmt" -) - -type ( - Expr interface { - Parse() (Matcher, error) - } - - // SimpleExpr is a simple expression to describe the condition: - // (includes[0].Match(v) || includes[1].Match(v) || ...) && !(excludes[0].Match(v) || excludes[1].Match(v) || ...) - SimpleExpr struct { - Includes []string `yaml:"includes,omitempty" json:"includes"` - Excludes []string `yaml:"excludes,omitempty" json:"excludes"` - } -) - -var ( - ErrEmptyExpr = errors.New("empty expression") -) - -// Empty returns true if both Includes and Excludes are empty. You can't -func (s *SimpleExpr) Empty() bool { - return len(s.Includes) == 0 && len(s.Excludes) == 0 -} - -// Parse parses the given matchers in Includes and Excludes -func (s *SimpleExpr) Parse() (Matcher, error) { - if len(s.Includes) == 0 && len(s.Excludes) == 0 { - return nil, ErrEmptyExpr - } - var ( - includes = FALSE() - excludes = FALSE() - ) - if len(s.Includes) > 0 { - for _, item := range s.Includes { - m, err := Parse(item) - if err != nil { - return nil, fmt.Errorf("parse matcher %q error: %v", item, err) - } - includes = Or(includes, m) - } - } else { - includes = TRUE() - } - - for _, item := range s.Excludes { - m, err := Parse(item) - if err != nil { - return nil, fmt.Errorf("parse matcher %q error: %v", item, err) - } - excludes = Or(excludes, m) - } - - return And(includes, Not(excludes)), nil -} diff --git a/src/go/plugin/go.d/pkg/matcher/expr_test.go b/src/go/plugin/go.d/pkg/matcher/expr_test.go deleted file mode 100644 index 93a183226..000000000 --- a/src/go/plugin/go.d/pkg/matcher/expr_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSimpleExpr_none(t *testing.T) { - expr := &SimpleExpr{} - - m, err := expr.Parse() - assert.EqualError(t, err, ErrEmptyExpr.Error()) - assert.Nil(t, m) -} - -func TestSimpleExpr_include(t *testing.T) { - expr := &SimpleExpr{ - Includes: []string{ - "~ /api/", - "~ .php$", - }, - } - - m, err := expr.Parse() - assert.NoError(t, err) - - assert.True(t, m.MatchString("/api/a.php")) - assert.True(t, m.MatchString("/api/a.php2")) - assert.True(t, m.MatchString("/api2/a.php")) - assert.True(t, m.MatchString("/api/img.php")) - assert.False(t, m.MatchString("/api2/img.php2")) -} - -func TestSimpleExpr_exclude(t *testing.T) { - expr := &SimpleExpr{ - Excludes: []string{ - "~ /api/img", - }, - } - - m, err := expr.Parse() - assert.NoError(t, err) - - assert.True(t, m.MatchString("/api/a.php")) - assert.True(t, m.MatchString("/api/a.php2")) - assert.True(t, m.MatchString("/api2/a.php")) - assert.False(t, m.MatchString("/api/img.php")) - assert.True(t, m.MatchString("/api2/img.php2")) -} - -func TestSimpleExpr_both(t *testing.T) { - expr := &SimpleExpr{ - Includes: []string{ - "~ /api/", - "~ .php$", - }, - Excludes: []string{ - "~ /api/img", - }, - } - - m, err := expr.Parse() - assert.NoError(t, err) - - assert.True(t, m.MatchString("/api/a.php")) - assert.True(t, m.MatchString("/api/a.php2")) - assert.True(t, m.MatchString("/api2/a.php")) - assert.False(t, m.MatchString("/api/img.php")) - assert.False(t, m.MatchString("/api2/img.php2")) -} - -func TestSimpleExpr_Parse_NG(t *testing.T) { - { - expr := &SimpleExpr{ - Includes: []string{ - "~ (ab", - "~ .php$", - }, - } - - m, err := expr.Parse() - assert.Error(t, err) - assert.Nil(t, m) - } - { - expr := &SimpleExpr{ - Excludes: []string{ - "~ (ab", - "~ .php$", - }, - } - - m, err := expr.Parse() - assert.Error(t, err) - assert.Nil(t, m) - } -} diff --git a/src/go/plugin/go.d/pkg/matcher/glob.go b/src/go/plugin/go.d/pkg/matcher/glob.go deleted file mode 100644 index 726c94c45..000000000 --- a/src/go/plugin/go.d/pkg/matcher/glob.go +++ /dev/null @@ -1,265 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "errors" - "path/filepath" - "regexp" - "unicode/utf8" -) - -// globMatcher implements Matcher, it uses filepath.MatchString to match. -type globMatcher string - -var ( - errBadGlobPattern = errors.New("bad glob pattern") - erGlobPattern = regexp.MustCompile(`(?s)^(?:[*?]|\[\^?([^\\-\]]|\\.|.-.)+\]|\\.|[^\*\?\\\[])*$`) -) - -// NewGlobMatcher create a new matcher with glob format -func NewGlobMatcher(expr string) (Matcher, error) { - switch expr { - case "": - return stringFullMatcher(""), nil - case "*": - return TRUE(), nil - } - - // any strings pass this regexp check are valid pattern - if !erGlobPattern.MatchString(expr) { - return nil, errBadGlobPattern - } - - size := len(expr) - chars := []rune(expr) - startWith := true - endWith := true - startIdx := 0 - endIdx := size - 1 - if chars[startIdx] == '*' { - startWith = false - startIdx = 1 - } - if chars[endIdx] == '*' { - endWith = false - endIdx-- - } - - unescapedExpr := make([]rune, 0, endIdx-startIdx+1) - for i := startIdx; i <= endIdx; i++ { - ch := chars[i] - if ch == '\\' { - nextCh := chars[i+1] - unescapedExpr = append(unescapedExpr, nextCh) - i++ - } else if isGlobMeta(ch) { - return globMatcher(expr), nil - } else { - unescapedExpr = append(unescapedExpr, ch) - } - } - - return NewStringMatcher(string(unescapedExpr), startWith, endWith) -} - -func isGlobMeta(ch rune) bool { - switch ch { - case '*', '?', '[': - return true - default: - return false - } -} - -// Match matches. -func (m globMatcher) Match(b []byte) bool { - return m.MatchString(string(b)) -} - -// MatchString matches. -func (m globMatcher) MatchString(line string) bool { - rs, _ := m.globMatch(line) - return rs -} - -func (m globMatcher) globMatch(name string) (matched bool, err error) { - pattern := string(m) -Pattern: - for len(pattern) > 0 { - var star bool - var chunk string - star, chunk, pattern = scanChunk(pattern) - if star && chunk == "" { - // Trailing * matches rest of string unless it has a /. - // return !strings.Contains(name, string(Separator)), nil - - return true, nil - } - // Look for match at current position. - t, ok, err := matchChunk(chunk, name) - // if we're the last chunk, make sure we've exhausted the name - // otherwise we'll give a false result even if we could still match - // using the star - if ok && (len(t) == 0 || len(pattern) > 0) { - name = t - continue - } - if err != nil { - return false, err - } - if star { - // Look for match skipping i+1 bytes. - // Cannot skip /. - for i := 0; i < len(name); i++ { - //for i := 0; i < len(name) && name[i] != Separator; i++ { - t, ok, err := matchChunk(chunk, name[i+1:]) - if ok { - // if we're the last chunk, make sure we exhausted the name - if len(pattern) == 0 && len(t) > 0 { - continue - } - name = t - continue Pattern - } - if err != nil { - return false, err - } - } - } - return false, nil - } - return len(name) == 0, nil -} - -// scanChunk gets the next segment of pattern, which is a non-star string -// possibly preceded by a star. -func scanChunk(pattern string) (star bool, chunk, rest string) { - for len(pattern) > 0 && pattern[0] == '*' { - pattern = pattern[1:] - star = true - } - inrange := false - var i int -Scan: - for i = 0; i < len(pattern); i++ { - switch pattern[i] { - case '\\': - if i+1 < len(pattern) { - i++ - } - case '[': - inrange = true - case ']': - inrange = false - case '*': - if !inrange { - break Scan - } - } - } - return star, pattern[0:i], pattern[i:] -} - -// matchChunk checks whether chunk matches the beginning of s. -// If so, it returns the remainder of s (after the match). -// Chunk is all single-character operators: literals, char classes, and ?. -func matchChunk(chunk, s string) (rest string, ok bool, err error) { - for len(chunk) > 0 { - if len(s) == 0 { - return - } - switch chunk[0] { - case '[': - // character class - r, n := utf8.DecodeRuneInString(s) - s = s[n:] - chunk = chunk[1:] - // We can't end right after '[', we're expecting at least - // a closing bracket and possibly a caret. - if len(chunk) == 0 { - err = filepath.ErrBadPattern - return - } - // possibly negated - negated := chunk[0] == '^' - if negated { - chunk = chunk[1:] - } - // parse all ranges - match := false - nrange := 0 - for { - if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { - chunk = chunk[1:] - break - } - var lo, hi rune - if lo, chunk, err = getEsc(chunk); err != nil { - return - } - hi = lo - if chunk[0] == '-' { - if hi, chunk, err = getEsc(chunk[1:]); err != nil { - return - } - } - if lo <= r && r <= hi { - match = true - } - nrange++ - } - if match == negated { - return - } - - case '?': - //if s[0] == Separator { - // return - //} - _, n := utf8.DecodeRuneInString(s) - s = s[n:] - chunk = chunk[1:] - - case '\\': - chunk = chunk[1:] - if len(chunk) == 0 { - err = filepath.ErrBadPattern - return - } - fallthrough - - default: - if chunk[0] != s[0] { - return - } - s = s[1:] - chunk = chunk[1:] - } - } - return s, true, nil -} - -// getEsc gets a possibly-escaped character from chunk, for a character class. -func getEsc(chunk string) (r rune, nchunk string, err error) { - if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { - err = filepath.ErrBadPattern - return - } - if chunk[0] == '\\' { - chunk = chunk[1:] - if len(chunk) == 0 { - err = filepath.ErrBadPattern - return - } - } - r, n := utf8.DecodeRuneInString(chunk) - if r == utf8.RuneError && n == 1 { - err = filepath.ErrBadPattern - } - nchunk = chunk[n:] - if len(nchunk) == 0 { - err = filepath.ErrBadPattern - } - return -} diff --git a/src/go/plugin/go.d/pkg/matcher/glob_test.go b/src/go/plugin/go.d/pkg/matcher/glob_test.go deleted file mode 100644 index 09d456105..000000000 --- a/src/go/plugin/go.d/pkg/matcher/glob_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewGlobMatcher(t *testing.T) { - cases := []struct { - expr string - matcher Matcher - }{ - {"", stringFullMatcher("")}, - {"a", stringFullMatcher("a")}, - {"a*b", globMatcher("a*b")}, - {`a*\b`, globMatcher(`a*\b`)}, - {`a\[`, stringFullMatcher(`a[`)}, - {`ab\`, nil}, - {`ab[`, nil}, - {`ab]`, stringFullMatcher("ab]")}, - } - for _, c := range cases { - t.Run(c.expr, func(t *testing.T) { - m, err := NewGlobMatcher(c.expr) - if c.matcher != nil { - assert.NoError(t, err) - assert.Equal(t, c.matcher, m) - } else { - assert.Error(t, err) - } - }) - } -} - -func TestGlobMatcher_MatchString(t *testing.T) { - - cases := []struct { - expected bool - expr string - line string - }{ - {true, "/a/*/d", "/a/b/c/d"}, - {true, "foo*", "foo123"}, - {true, "*foo*", "123foo123"}, - {true, "*foo", "123foo"}, - {true, "foo*bar", "foobar"}, - {true, "foo*bar", "foo baz bar"}, - {true, "a[bc]d", "abd"}, - {true, "a[^bc]d", "add"}, - {true, "a??d", "abcd"}, - {true, `a\??d`, "a?cd"}, - {true, "a[b-z]d", "abd"}, - {false, "/a/*/d", "a/b/c/d"}, - {false, "/a/*/d", "This will fail!"}, - } - - for _, c := range cases { - t.Run(c.line, func(t *testing.T) { - m := globMatcher(c.expr) - assert.Equal(t, c.expected, m.Match([]byte(c.line))) - assert.Equal(t, c.expected, m.MatchString(c.line)) - }) - } -} - -func BenchmarkGlob_MatchString(b *testing.B) { - benchmarks := []struct { - expr string - test string - }{ - {"", ""}, - {"abc", "abcd"}, - {"*abc", "abcd"}, - {"abc*", "abcd"}, - {"*abc*", "abcd"}, - {"[a-z]", "abcd"}, - } - for _, bm := range benchmarks { - b.Run(bm.expr+"_raw", func(b *testing.B) { - m := globMatcher(bm.expr) - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.MatchString(bm.test) - } - }) - b.Run(bm.expr+"_optimized", func(b *testing.B) { - m, _ := NewGlobMatcher(bm.expr) - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.MatchString(bm.test) - } - }) - } -} diff --git a/src/go/plugin/go.d/pkg/matcher/logical.go b/src/go/plugin/go.d/pkg/matcher/logical.go deleted file mode 100644 index af07be8f4..000000000 --- a/src/go/plugin/go.d/pkg/matcher/logical.go +++ /dev/null @@ -1,101 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -type ( - trueMatcher struct{} - falseMatcher struct{} - andMatcher struct{ lhs, rhs Matcher } - orMatcher struct{ lhs, rhs Matcher } - negMatcher struct{ Matcher } -) - -var ( - matcherT trueMatcher - matcherF falseMatcher -) - -// TRUE returns a matcher which always returns true -func TRUE() Matcher { - return matcherT -} - -// FALSE returns a matcher which always returns false -func FALSE() Matcher { - return matcherF -} - -// Not returns a matcher which positive the sub-matcher's result -func Not(m Matcher) Matcher { - switch m { - case TRUE(): - return FALSE() - case FALSE(): - return TRUE() - default: - return negMatcher{m} - } -} - -// And returns a matcher which returns true only if all of it's sub-matcher return true -func And(lhs, rhs Matcher, others ...Matcher) Matcher { - var matcher Matcher - switch lhs { - case TRUE(): - matcher = rhs - case FALSE(): - matcher = FALSE() - default: - switch rhs { - case TRUE(): - matcher = lhs - case FALSE(): - matcher = FALSE() - default: - matcher = andMatcher{lhs, rhs} - } - } - if len(others) > 0 { - return And(matcher, others[0], others[1:]...) - } - return matcher -} - -// Or returns a matcher which returns true if any of it's sub-matcher return true -func Or(lhs, rhs Matcher, others ...Matcher) Matcher { - var matcher Matcher - switch lhs { - case TRUE(): - matcher = TRUE() - case FALSE(): - matcher = rhs - default: - switch rhs { - case TRUE(): - matcher = TRUE() - case FALSE(): - matcher = lhs - default: - matcher = orMatcher{lhs, rhs} - } - } - if len(others) > 0 { - return Or(matcher, others[0], others[1:]...) - } - return matcher -} - -func (trueMatcher) Match(_ []byte) bool { return true } -func (trueMatcher) MatchString(_ string) bool { return true } - -func (falseMatcher) Match(_ []byte) bool { return false } -func (falseMatcher) MatchString(_ string) bool { return false } - -func (m andMatcher) Match(b []byte) bool { return m.lhs.Match(b) && m.rhs.Match(b) } -func (m andMatcher) MatchString(s string) bool { return m.lhs.MatchString(s) && m.rhs.MatchString(s) } - -func (m orMatcher) Match(b []byte) bool { return m.lhs.Match(b) || m.rhs.Match(b) } -func (m orMatcher) MatchString(s string) bool { return m.lhs.MatchString(s) || m.rhs.MatchString(s) } - -func (m negMatcher) Match(b []byte) bool { return !m.Matcher.Match(b) } -func (m negMatcher) MatchString(s string) bool { return !m.Matcher.MatchString(s) } diff --git a/src/go/plugin/go.d/pkg/matcher/logical_test.go b/src/go/plugin/go.d/pkg/matcher/logical_test.go deleted file mode 100644 index 64491f1ad..000000000 --- a/src/go/plugin/go.d/pkg/matcher/logical_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestTRUE(t *testing.T) { - assert.True(t, TRUE().Match(nil)) - assert.True(t, TRUE().MatchString("")) -} - -func TestFALSE(t *testing.T) { - assert.False(t, FALSE().Match(nil)) - assert.False(t, FALSE().MatchString("")) -} - -func TestAnd(t *testing.T) { - assert.Equal(t, - matcherF, - And(FALSE(), stringFullMatcher(""))) - assert.Equal(t, - matcherF, - And(stringFullMatcher(""), FALSE())) - - assert.Equal(t, - stringFullMatcher(""), - And(TRUE(), stringFullMatcher(""))) - assert.Equal(t, - stringFullMatcher(""), - And(stringFullMatcher(""), TRUE())) - - assert.Equal(t, - andMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")}, - And(stringPartialMatcher("a"), stringPartialMatcher("b"))) - - assert.Equal(t, - andMatcher{ - andMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")}, - stringPartialMatcher("c"), - }, - And(stringPartialMatcher("a"), stringPartialMatcher("b"), stringPartialMatcher("c"))) -} - -func TestOr(t *testing.T) { - assert.Equal(t, - stringFullMatcher(""), - Or(FALSE(), stringFullMatcher(""))) - assert.Equal(t, - stringFullMatcher(""), - Or(stringFullMatcher(""), FALSE())) - - assert.Equal(t, - TRUE(), - Or(TRUE(), stringFullMatcher(""))) - assert.Equal(t, - TRUE(), - Or(stringFullMatcher(""), TRUE())) - - assert.Equal(t, - orMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")}, - Or(stringPartialMatcher("a"), stringPartialMatcher("b"))) - - assert.Equal(t, - orMatcher{ - orMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")}, - stringPartialMatcher("c"), - }, - Or(stringPartialMatcher("a"), stringPartialMatcher("b"), stringPartialMatcher("c"))) -} - -func TestAndMatcher_Match(t *testing.T) { - and := andMatcher{ - stringPrefixMatcher("a"), - stringSuffixMatcher("c"), - } - assert.True(t, and.Match([]byte("abc"))) - assert.True(t, and.MatchString("abc")) -} - -func TestOrMatcher_Match(t *testing.T) { - or := orMatcher{ - stringPrefixMatcher("a"), - stringPrefixMatcher("c"), - } - assert.True(t, or.Match([]byte("aaa"))) - assert.True(t, or.MatchString("ccc")) -} - -func TestNegMatcher_Match(t *testing.T) { - neg := negMatcher{stringPrefixMatcher("a")} - assert.False(t, neg.Match([]byte("aaa"))) - assert.True(t, neg.MatchString("ccc")) -} diff --git a/src/go/plugin/go.d/pkg/matcher/matcher.go b/src/go/plugin/go.d/pkg/matcher/matcher.go deleted file mode 100644 index 76d903325..000000000 --- a/src/go/plugin/go.d/pkg/matcher/matcher.go +++ /dev/null @@ -1,149 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "errors" - "fmt" - "regexp" -) - -type ( - // Matcher is an interface that wraps MatchString method. - Matcher interface { - // Match performs match against given []byte - Match(b []byte) bool - // MatchString performs match against given string - MatchString(string) bool - } - - // Format matcher format - Format string -) - -const ( - // FmtString is a string match format. - FmtString Format = "string" - // FmtGlob is a glob match format. - FmtGlob Format = "glob" - // FmtRegExp is a regex match format. - FmtRegExp Format = "regexp" - // FmtSimplePattern is a simple pattern match format - // https://docs.netdata.cloud/libnetdata/simple_pattern/ - FmtSimplePattern Format = "simple_patterns" - - // Separator is a separator between match format and expression. - Separator = ":" -) - -const ( - symString = "=" - symGlob = "*" - symRegExp = "~" -) - -var ( - reShortSyntax = regexp.MustCompile(`(?s)^(!)?(.)\s*(.*)$`) - reLongSyntax = regexp.MustCompile(`(?s)^(!)?([^:]+):(.*)$`) - - errNotShortSyntax = errors.New("not short syntax") -) - -// Must is a helper that wraps a call to a function returning (Matcher, error) and panics if the error is non-nil. -// It is intended for use in variable initializations such as -// -// var m = matcher.Must(matcher.New(matcher.FmtString, "hello world")) -func Must(m Matcher, err error) Matcher { - if err != nil { - panic(err) - } - return m -} - -// New create a matcher -func New(format Format, expr string) (Matcher, error) { - switch format { - case FmtString: - return NewStringMatcher(expr, true, true) - case FmtGlob: - return NewGlobMatcher(expr) - case FmtRegExp: - return NewRegExpMatcher(expr) - case FmtSimplePattern: - return NewSimplePatternsMatcher(expr) - default: - return nil, fmt.Errorf("unsupported matcher format: '%s'", format) - } -} - -// Parse parses line and returns appropriate matcher based on matched format. -// -// Short Syntax -// -// ::= [ ] -// ::= '!' -// negative expression -// ::= [ '=', '~', '*' ] -// '=' means string match -// '~' means regexp match -// '*' means glob match -// ::= { ' ' | '\t' | '\n' | '\n' | '\r' } -// ::= any string -// -// Long Syntax -// -// ::= [ ] -// ::= [ 'string' | 'glob' | 'regexp' | 'simple_patterns' ] -// ::= '!' -// negative expression -// ::= ':' -// ::= any string -func Parse(line string) (Matcher, error) { - matcher, err := parseShortFormat(line) - if err == nil { - return matcher, nil - } - return parseLongSyntax(line) -} - -func parseShortFormat(line string) (Matcher, error) { - m := reShortSyntax.FindStringSubmatch(line) - if m == nil { - return nil, errNotShortSyntax - } - var format Format - switch m[2] { - case symString: - format = FmtString - case symGlob: - format = FmtGlob - case symRegExp: - format = FmtRegExp - default: - return nil, fmt.Errorf("invalid short syntax: unknown symbol '%s'", m[2]) - } - expr := m[3] - matcher, err := New(format, expr) - if err != nil { - return nil, err - } - if m[1] != "" { - matcher = Not(matcher) - } - return matcher, nil -} - -func parseLongSyntax(line string) (Matcher, error) { - m := reLongSyntax.FindStringSubmatch(line) - if m == nil { - return nil, fmt.Errorf("invalid syntax") - } - matcher, err := New(Format(m[2]), m[3]) - if err != nil { - return nil, err - } - if m[1] != "" { - matcher = Not(matcher) - } - return matcher, nil -} diff --git a/src/go/plugin/go.d/pkg/matcher/matcher_test.go b/src/go/plugin/go.d/pkg/matcher/matcher_test.go deleted file mode 100644 index f304d983d..000000000 --- a/src/go/plugin/go.d/pkg/matcher/matcher_test.go +++ /dev/null @@ -1,122 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "log" - "reflect" - "regexp" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/stretchr/testify/assert" -) - -func TestParse(t *testing.T) { - tests := []struct { - valid bool - line string - matcher Matcher - }{ - {false, "", nil}, - {false, "abc", nil}, - {false, `~ abc\`, nil}, - {false, `invalid_fmt:abc`, nil}, - - {true, "=", stringFullMatcher("")}, - {true, "= ", stringFullMatcher("")}, - {true, "=full", stringFullMatcher("full")}, - {true, "= full", stringFullMatcher("full")}, - {true, "= \t\ffull", stringFullMatcher("full")}, - - {true, "string:", stringFullMatcher("")}, - {true, "string:full", stringFullMatcher("full")}, - - {true, "!=", Not(stringFullMatcher(""))}, - {true, "!=full", Not(stringFullMatcher("full"))}, - {true, "!= full", Not(stringFullMatcher("full"))}, - {true, "!= \t\ffull", Not(stringFullMatcher("full"))}, - - {true, "!string:", Not(stringFullMatcher(""))}, - {true, "!string:full", Not(stringFullMatcher("full"))}, - - {true, "~", TRUE()}, - {true, "~ ", TRUE()}, - {true, `~ ^$`, stringFullMatcher("")}, - {true, "~ partial", stringPartialMatcher("partial")}, - {true, `~ part\.ial`, stringPartialMatcher("part.ial")}, - {true, "~ ^prefix", stringPrefixMatcher("prefix")}, - {true, "~ suffix$", stringSuffixMatcher("suffix")}, - {true, "~ ^full$", stringFullMatcher("full")}, - {true, "~ [0-9]+", regexp.MustCompile(`[0-9]+`)}, - {true, `~ part\s1`, regexp.MustCompile(`part\s1`)}, - - {true, "!~", FALSE()}, - {true, "!~ ", FALSE()}, - {true, "!~ partial", Not(stringPartialMatcher("partial"))}, - {true, `!~ part\.ial`, Not(stringPartialMatcher("part.ial"))}, - {true, "!~ ^prefix", Not(stringPrefixMatcher("prefix"))}, - {true, "!~ suffix$", Not(stringSuffixMatcher("suffix"))}, - {true, "!~ ^full$", Not(stringFullMatcher("full"))}, - {true, "!~ [0-9]+", Not(regexp.MustCompile(`[0-9]+`))}, - - {true, `regexp:partial`, stringPartialMatcher("partial")}, - {true, `!regexp:partial`, Not(stringPartialMatcher("partial"))}, - - {true, `*`, stringFullMatcher("")}, - {true, `* foo`, stringFullMatcher("foo")}, - {true, `* foo*`, stringPrefixMatcher("foo")}, - {true, `* *foo`, stringSuffixMatcher("foo")}, - {true, `* *foo*`, stringPartialMatcher("foo")}, - {true, `* foo*bar`, globMatcher("foo*bar")}, - {true, `* *foo*bar`, globMatcher("*foo*bar")}, - {true, `* foo?bar`, globMatcher("foo?bar")}, - - {true, `!*`, Not(stringFullMatcher(""))}, - {true, `!* foo`, Not(stringFullMatcher("foo"))}, - {true, `!* foo*`, Not(stringPrefixMatcher("foo"))}, - {true, `!* *foo`, Not(stringSuffixMatcher("foo"))}, - {true, `!* *foo*`, Not(stringPartialMatcher("foo"))}, - {true, `!* foo*bar`, Not(globMatcher("foo*bar"))}, - {true, `!* *foo*bar`, Not(globMatcher("*foo*bar"))}, - {true, `!* foo?bar`, Not(globMatcher("foo?bar"))}, - - {true, "glob:foo*bar", globMatcher("foo*bar")}, - {true, "!glob:foo*bar", Not(globMatcher("foo*bar"))}, - - {true, `simple_patterns:`, FALSE()}, - {true, `simple_patterns: `, FALSE()}, - {true, `simple_patterns: foo`, simplePatternsMatcher{ - {stringFullMatcher("foo"), true}, - }}, - {true, `simple_patterns: !foo`, simplePatternsMatcher{ - {stringFullMatcher("foo"), false}, - }}, - } - for _, test := range tests { - t.Run(test.line, func(t *testing.T) { - m, err := Parse(test.line) - if test.valid { - require.NoError(t, err) - if test.matcher != nil { - log.Printf("%s %#v", reflect.TypeOf(m).Name(), m) - assert.Equal(t, test.matcher, m) - } - } else { - assert.Error(t, err) - } - }) - } -} - -func TestMust(t *testing.T) { - assert.NotPanics(t, func() { - m := Must(New(FmtRegExp, `[0-9]+`)) - assert.NotNil(t, m) - }) - - assert.Panics(t, func() { - Must(New(FmtRegExp, `[0-9]+\`)) - }) -} diff --git a/src/go/plugin/go.d/pkg/matcher/regexp.go b/src/go/plugin/go.d/pkg/matcher/regexp.go deleted file mode 100644 index 3a297f3b3..000000000 --- a/src/go/plugin/go.d/pkg/matcher/regexp.go +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import "regexp" - -// NewRegExpMatcher create new matcher with RegExp format -func NewRegExpMatcher(expr string) (Matcher, error) { - switch expr { - case "", "^", "$": - return TRUE(), nil - case "^$", "$^": - return NewStringMatcher("", true, true) - } - size := len(expr) - chars := []rune(expr) - var startWith, endWith bool - startIdx := 0 - endIdx := size - 1 - if chars[startIdx] == '^' { - startWith = true - startIdx = 1 - } - if chars[endIdx] == '$' { - endWith = true - endIdx-- - } - - unescapedExpr := make([]rune, 0, endIdx-startIdx+1) - for i := startIdx; i <= endIdx; i++ { - ch := chars[i] - if ch == '\\' { - if i == endIdx { // end with '\' => invalid format - return regexp.Compile(expr) - } - nextCh := chars[i+1] - if !isRegExpMeta(nextCh) { // '\' + mon-meta char => special meaning - return regexp.Compile(expr) - } - unescapedExpr = append(unescapedExpr, nextCh) - i++ - } else if isRegExpMeta(ch) { - return regexp.Compile(expr) - } else { - unescapedExpr = append(unescapedExpr, ch) - } - } - - return NewStringMatcher(string(unescapedExpr), startWith, endWith) -} - -// isRegExpMeta reports whether byte b needs to be escaped by QuoteMeta. -func isRegExpMeta(b rune) bool { - switch b { - case '\\', '.', '+', '*', '?', '(', ')', '|', '[', ']', '{', '}', '^', '$': - return true - default: - return false - } -} diff --git a/src/go/plugin/go.d/pkg/matcher/regexp_test.go b/src/go/plugin/go.d/pkg/matcher/regexp_test.go deleted file mode 100644 index fe644747b..000000000 --- a/src/go/plugin/go.d/pkg/matcher/regexp_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "regexp" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestRegExpMatch_Match(t *testing.T) { - m := regexp.MustCompile("[0-9]+") - - cases := []struct { - expected bool - line string - }{ - { - expected: true, - line: "2019", - }, - { - expected: true, - line: "It's over 9000!", - }, - { - expected: false, - line: "This will never fail!", - }, - } - - for _, c := range cases { - assert.Equal(t, c.expected, m.MatchString(c.line)) - } -} - -func BenchmarkRegExp_MatchString(b *testing.B) { - benchmarks := []struct { - expr string - test string - }{ - {"", ""}, - {"abc", "abcd"}, - {"^abc", "abcd"}, - {"abc$", "abcd"}, - {"^abc$", "abcd"}, - {"[a-z]+", "abcd"}, - } - for _, bm := range benchmarks { - b.Run(bm.expr+"_raw", func(b *testing.B) { - m := regexp.MustCompile(bm.expr) - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.MatchString(bm.test) - } - }) - b.Run(bm.expr+"_optimized", func(b *testing.B) { - m, _ := NewRegExpMatcher(bm.expr) - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.MatchString(bm.test) - } - }) - } -} diff --git a/src/go/plugin/go.d/pkg/matcher/simple_patterns.go b/src/go/plugin/go.d/pkg/matcher/simple_patterns.go deleted file mode 100644 index 91a0a3bbd..000000000 --- a/src/go/plugin/go.d/pkg/matcher/simple_patterns.go +++ /dev/null @@ -1,65 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "strings" -) - -type ( - simplePatternTerm struct { - matcher Matcher - positive bool - } - - // simplePatternsMatcher patterns. - simplePatternsMatcher []simplePatternTerm -) - -// NewSimplePatternsMatcher creates new simple patterns. It returns error in case one of patterns has bad syntax. -func NewSimplePatternsMatcher(expr string) (Matcher, error) { - ps := simplePatternsMatcher{} - - for _, pattern := range strings.Fields(expr) { - if err := ps.add(pattern); err != nil { - return nil, err - } - } - if len(ps) == 0 { - return FALSE(), nil - } - return ps, nil -} - -func (m *simplePatternsMatcher) add(term string) error { - p := simplePatternTerm{} - if term[0] == '!' { - p.positive = false - term = term[1:] - } else { - p.positive = true - } - matcher, err := NewGlobMatcher(term) - if err != nil { - return err - } - - p.matcher = matcher - *m = append(*m, p) - - return nil -} - -func (m simplePatternsMatcher) Match(b []byte) bool { - return m.MatchString(string(b)) -} - -// MatchString matches. -func (m simplePatternsMatcher) MatchString(line string) bool { - for _, p := range m { - if p.matcher.MatchString(line) { - return p.positive - } - } - return false -} diff --git a/src/go/plugin/go.d/pkg/matcher/simple_patterns_test.go b/src/go/plugin/go.d/pkg/matcher/simple_patterns_test.go deleted file mode 100644 index 016096d57..000000000 --- a/src/go/plugin/go.d/pkg/matcher/simple_patterns_test.go +++ /dev/null @@ -1,88 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewSimplePatternsMatcher(t *testing.T) { - tests := []struct { - expr string - expected Matcher - }{ - {"", FALSE()}, - {" ", FALSE()}, - {"foo", simplePatternsMatcher{ - {stringFullMatcher("foo"), true}, - }}, - {"!foo", simplePatternsMatcher{ - {stringFullMatcher("foo"), false}, - }}, - {"foo bar", simplePatternsMatcher{ - {stringFullMatcher("foo"), true}, - {stringFullMatcher("bar"), true}, - }}, - {"*foobar* !foo* !*bar *", simplePatternsMatcher{ - {stringPartialMatcher("foobar"), true}, - {stringPrefixMatcher("foo"), false}, - {stringSuffixMatcher("bar"), false}, - {TRUE(), true}, - }}, - {`ab\`, nil}, - } - for _, test := range tests { - t.Run(test.expr, func(t *testing.T) { - matcher, err := NewSimplePatternsMatcher(test.expr) - if test.expected == nil { - assert.Error(t, err) - } else { - assert.Equal(t, test.expected, matcher) - } - }) - } -} - -func TestSimplePatterns_Match(t *testing.T) { - m, err := NewSimplePatternsMatcher("*foobar* !foo* !*bar *") - - require.NoError(t, err) - - cases := []struct { - expected bool - line string - }{ - { - expected: true, - line: "hello world", - }, - { - expected: false, - line: "hello world bar", - }, - { - expected: true, - line: "hello world foobar", - }, - } - - for _, c := range cases { - t.Run(c.line, func(t *testing.T) { - assert.Equal(t, c.expected, m.MatchString(c.line)) - assert.Equal(t, c.expected, m.Match([]byte(c.line))) - }) - } -} - -func TestSimplePatterns_Match2(t *testing.T) { - m, err := NewSimplePatternsMatcher("*foobar") - - require.NoError(t, err) - - assert.True(t, m.MatchString("foobar")) - assert.True(t, m.MatchString("foo foobar")) - assert.False(t, m.MatchString("foobar baz")) -} diff --git a/src/go/plugin/go.d/pkg/matcher/string.go b/src/go/plugin/go.d/pkg/matcher/string.go deleted file mode 100644 index 43ba43eb3..000000000 --- a/src/go/plugin/go.d/pkg/matcher/string.go +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "bytes" - "strings" -) - -type ( - // stringFullMatcher implements Matcher, it uses "==" to match. - stringFullMatcher string - - // stringPartialMatcher implements Matcher, it uses strings.Contains to match. - stringPartialMatcher string - - // stringPrefixMatcher implements Matcher, it uses strings.HasPrefix to match. - stringPrefixMatcher string - - // stringSuffixMatcher implements Matcher, it uses strings.HasSuffix to match. - stringSuffixMatcher string -) - -// NewStringMatcher create a new matcher with string format -func NewStringMatcher(s string, startWith, endWith bool) (Matcher, error) { - if startWith { - if endWith { - return stringFullMatcher(s), nil - } - return stringPrefixMatcher(s), nil - } - if endWith { - return stringSuffixMatcher(s), nil - } - return stringPartialMatcher(s), nil -} - -func (m stringFullMatcher) Match(b []byte) bool { return string(m) == string(b) } -func (m stringFullMatcher) MatchString(line string) bool { return string(m) == line } - -func (m stringPartialMatcher) Match(b []byte) bool { return bytes.Contains(b, []byte(m)) } -func (m stringPartialMatcher) MatchString(line string) bool { return strings.Contains(line, string(m)) } - -func (m stringPrefixMatcher) Match(b []byte) bool { return bytes.HasPrefix(b, []byte(m)) } -func (m stringPrefixMatcher) MatchString(line string) bool { return strings.HasPrefix(line, string(m)) } - -func (m stringSuffixMatcher) Match(b []byte) bool { return bytes.HasSuffix(b, []byte(m)) } -func (m stringSuffixMatcher) MatchString(line string) bool { return strings.HasSuffix(line, string(m)) } diff --git a/src/go/plugin/go.d/pkg/matcher/string_test.go b/src/go/plugin/go.d/pkg/matcher/string_test.go deleted file mode 100644 index 1694efbd0..000000000 --- a/src/go/plugin/go.d/pkg/matcher/string_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package matcher - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -var stringMatcherTestCases = []struct { - line string - expr string - full, prefix, suffix, partial bool -}{ - {"", "", true, true, true, true}, - {"abc", "", false, true, true, true}, - {"power", "pow", false, true, false, true}, - {"netdata", "data", false, false, true, true}, - {"abc", "def", false, false, false, false}, - {"soon", "o", false, false, false, true}, -} - -func TestStringFullMatcher_MatchString(t *testing.T) { - for _, c := range stringMatcherTestCases { - t.Run(c.line, func(t *testing.T) { - m := stringFullMatcher(c.expr) - assert.Equal(t, c.full, m.Match([]byte(c.line))) - assert.Equal(t, c.full, m.MatchString(c.line)) - }) - } -} - -func TestStringPrefixMatcher_MatchString(t *testing.T) { - for _, c := range stringMatcherTestCases { - t.Run(c.line, func(t *testing.T) { - m := stringPrefixMatcher(c.expr) - assert.Equal(t, c.prefix, m.Match([]byte(c.line))) - assert.Equal(t, c.prefix, m.MatchString(c.line)) - }) - } -} - -func TestStringSuffixMatcher_MatchString(t *testing.T) { - for _, c := range stringMatcherTestCases { - t.Run(c.line, func(t *testing.T) { - m := stringSuffixMatcher(c.expr) - assert.Equal(t, c.suffix, m.Match([]byte(c.line))) - assert.Equal(t, c.suffix, m.MatchString(c.line)) - }) - } -} - -func TestStringPartialMatcher_MatchString(t *testing.T) { - for _, c := range stringMatcherTestCases { - t.Run(c.line, func(t *testing.T) { - m := stringPartialMatcher(c.expr) - assert.Equal(t, c.partial, m.Match([]byte(c.line))) - assert.Equal(t, c.partial, m.MatchString(c.line)) - }) - } -} diff --git a/src/go/plugin/go.d/pkg/metrics/unique_counter.go b/src/go/plugin/go.d/pkg/metrics/unique_counter.go index da80fd3d0..046bb3e1e 100644 --- a/src/go/plugin/go.d/pkg/metrics/unique_counter.go +++ b/src/go/plugin/go.d/pkg/metrics/unique_counter.go @@ -3,8 +3,9 @@ package metrics import ( - "github.com/axiomhq/hyperloglog" "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm" + + "github.com/axiomhq/hyperloglog" ) type ( diff --git a/src/go/plugin/go.d/pkg/multipath/multipath.go b/src/go/plugin/go.d/pkg/multipath/multipath.go deleted file mode 100644 index 6172def06..000000000 --- a/src/go/plugin/go.d/pkg/multipath/multipath.go +++ /dev/null @@ -1,90 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package multipath - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "slices" - "strings" - - "github.com/mitchellh/go-homedir" -) - -type ErrNotFound struct{ msg string } - -func (e ErrNotFound) Error() string { return e.msg } - -// IsNotFound returns a boolean indicating whether the error is ErrNotFound or not. -func IsNotFound(err error) bool { - var errNotFound ErrNotFound - return errors.As(err, &errNotFound) -} - -// MultiPath multi-paths -type MultiPath []string - -// New multi-paths -func New(paths ...string) MultiPath { - set := map[string]bool{} - mPath := make(MultiPath, 0) - - for _, dir := range paths { - if dir == "" { - continue - } - if d, err := homedir.Expand(dir); err != nil { - dir = d - } - if !set[dir] { - mPath = append(mPath, dir) - set[dir] = true - } - } - - return mPath -} - -// Find finds a file in given paths -func (p MultiPath) Find(filename string) (string, error) { - for _, dir := range p { - file := filepath.Join(dir, filename) - if _, err := os.Stat(file); !os.IsNotExist(err) { - return file, nil - } - } - return "", ErrNotFound{msg: fmt.Sprintf("can't find '%s' in %v", filename, p)} -} - -func (p MultiPath) FindFiles(suffixes ...string) ([]string, error) { - set := make(map[string]bool) - var files []string - - for _, dir := range p { - entries, err := os.ReadDir(dir) - if err != nil { - continue - } - - for _, e := range entries { - if !e.Type().IsRegular() { - continue - } - - ext := filepath.Ext(e.Name()) - name := strings.TrimSuffix(e.Name(), ext) - - if (len(suffixes) != 0 && !slices.Contains(suffixes, ext)) || set[name] { - continue - } - - set[name] = true - file := filepath.Join(dir, e.Name()) - files = append(files, file) - } - } - - return files, nil -} diff --git a/src/go/plugin/go.d/pkg/multipath/multipath_test.go b/src/go/plugin/go.d/pkg/multipath/multipath_test.go deleted file mode 100644 index cd6c90d95..000000000 --- a/src/go/plugin/go.d/pkg/multipath/multipath_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package multipath - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNew(t *testing.T) { - assert.Len( - t, - New("path1", "path2", "path2", "", "path3"), - 3, - ) -} - -func TestMultiPath_Find(t *testing.T) { - m := New("path1", "testdata/data1") - - v, err := m.Find("not exist") - assert.Zero(t, v) - assert.Error(t, err) - - v, err = m.Find("test-empty.conf") - assert.Equal(t, "testdata/data1/test-empty.conf", v) - assert.Nil(t, err) - - v, err = m.Find("test.conf") - assert.Equal(t, "testdata/data1/test.conf", v) - assert.Nil(t, err) -} - -func TestIsNotFound(t *testing.T) { - assert.True(t, IsNotFound(ErrNotFound{})) - assert.False(t, IsNotFound(errors.New(""))) -} - -func TestMultiPath_FindFiles(t *testing.T) { - m := New("path1", "testdata/data2", "testdata/data1") - - files, err := m.FindFiles(".conf") - assert.NoError(t, err) - assert.Equal(t, []string{"testdata/data2/test-empty.conf", "testdata/data2/test.conf"}, files) - - files, err = m.FindFiles() - assert.NoError(t, err) - assert.Equal(t, []string{"testdata/data2/test-empty.conf", "testdata/data2/test.conf"}, files) - - files, err = m.FindFiles(".not_exist") - assert.NoError(t, err) - assert.Equal(t, []string(nil), files) - - m = New("path1", "testdata/data1", "testdata/data2") - files, err = m.FindFiles(".conf") - assert.NoError(t, err) - assert.Equal(t, []string{"testdata/data1/test-empty.conf", "testdata/data1/test.conf"}, files) -} diff --git a/src/go/plugin/go.d/pkg/multipath/testdata/data1/test-empty.conf b/src/go/plugin/go.d/pkg/multipath/testdata/data1/test-empty.conf deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/go/plugin/go.d/pkg/multipath/testdata/data1/test.conf b/src/go/plugin/go.d/pkg/multipath/testdata/data1/test.conf deleted file mode 100644 index aebe64730..000000000 --- a/src/go/plugin/go.d/pkg/multipath/testdata/data1/test.conf +++ /dev/null @@ -1 +0,0 @@ -not empty! \ No newline at end of file diff --git a/src/go/plugin/go.d/pkg/multipath/testdata/data2/test-empty.conf b/src/go/plugin/go.d/pkg/multipath/testdata/data2/test-empty.conf deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/go/plugin/go.d/pkg/multipath/testdata/data2/test.conf b/src/go/plugin/go.d/pkg/multipath/testdata/data2/test.conf deleted file mode 100644 index aebe64730..000000000 --- a/src/go/plugin/go.d/pkg/multipath/testdata/data2/test.conf +++ /dev/null @@ -1 +0,0 @@ -not empty! \ No newline at end of file diff --git a/src/go/plugin/go.d/pkg/prometheus/client.go b/src/go/plugin/go.d/pkg/prometheus/client.go index 19d6bcfbc..d43b2240d 100644 --- a/src/go/plugin/go.d/pkg/prometheus/client.go +++ b/src/go/plugin/go.d/pkg/prometheus/client.go @@ -28,7 +28,7 @@ type ( prometheus struct { client *http.Client - request web.Request + request web.RequestConfig filepath string sr selector.Selector @@ -46,16 +46,12 @@ const ( ) // New creates a Prometheus instance. -func New(client *http.Client, request web.Request) Prometheus { - return &prometheus{ - client: client, - request: request, - buf: bytes.NewBuffer(make([]byte, 0, 16000)), - } +func New(client *http.Client, request web.RequestConfig) Prometheus { + return NewWithSelector(client, request, nil) } // NewWithSelector creates a Prometheus instance with the selector. -func NewWithSelector(client *http.Client, request web.Request, sr selector.Selector) Prometheus { +func NewWithSelector(client *http.Client, request web.RequestConfig, sr selector.Selector) Prometheus { p := &prometheus{ client: client, request: request, @@ -103,7 +99,7 @@ func (p *prometheus) fetch(w io.Writer) error { if err != nil { return err } - defer f.Close() + defer func() { _ = f.Close() }() _, err = io.Copy(w, f) @@ -123,10 +119,7 @@ func (p *prometheus) fetch(w io.Writer) error { return err } - defer func() { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - }() + defer web.CloseBody(resp) if resp.StatusCode != http.StatusOK { return fmt.Errorf("server '%s' returned HTTP status code %d (%s)", req.URL, resp.StatusCode, resp.Status) diff --git a/src/go/plugin/go.d/pkg/prometheus/client_test.go b/src/go/plugin/go.d/pkg/prometheus/client_test.go index e6f61b9af..af380b3cd 100644 --- a/src/go/plugin/go.d/pkg/prometheus/client_test.go +++ b/src/go/plugin/go.d/pkg/prometheus/client_test.go @@ -39,7 +39,7 @@ func TestPrometheus404(t *testing.T) { ts := httptest.NewServer(tsMux) defer ts.Close() - req := web.Request{URL: ts.URL + "/metrics"} + req := web.RequestConfig{URL: ts.URL + "/metrics"} prom := New(http.DefaultClient, req) res, err := prom.ScrapeSeries() @@ -55,7 +55,7 @@ func TestPrometheusPlain(t *testing.T) { ts := httptest.NewServer(tsMux) defer ts.Close() - req := web.Request{URL: ts.URL + "/metrics"} + req := web.RequestConfig{URL: ts.URL + "/metrics"} prom := New(http.DefaultClient, req) res, err := prom.ScrapeSeries() @@ -71,7 +71,7 @@ func TestPrometheusPlainWithSelector(t *testing.T) { ts := httptest.NewServer(tsMux) defer ts.Close() - req := web.Request{URL: ts.URL + "/metrics"} + req := web.RequestConfig{URL: ts.URL + "/metrics"} sr, err := selector.Parse("go_gc*") require.NoError(t, err) prom := NewWithSelector(http.DefaultClient, req, sr) @@ -101,7 +101,7 @@ func TestPrometheusGzip(t *testing.T) { ts := httptest.NewServer(tsMux) defer ts.Close() - req := web.Request{URL: ts.URL + "/metrics"} + req := web.RequestConfig{URL: ts.URL + "/metrics"} prom := New(http.DefaultClient, req) for i := 0; i < 2; i++ { @@ -112,7 +112,8 @@ func TestPrometheusGzip(t *testing.T) { } func TestPrometheusReadFromFile(t *testing.T) { - req := web.Request{URL: "file://testdata/testdata.txt"} + req := web.RequestConfig{URL: "file://testdata/testdata.txt"} + prom := NewWithSelector(http.DefaultClient, req, nil) for i := 0; i < 2; i++ { @@ -120,6 +121,14 @@ func TestPrometheusReadFromFile(t *testing.T) { assert.NoError(t, err) verifyTestData(t, res) } + + prom = New(http.DefaultClient, req) + + for i := 0; i < 2; i++ { + res, err := prom.ScrapeSeries() + assert.NoError(t, err) + verifyTestData(t, res) + } } func verifyTestData(t *testing.T, ms Series) { diff --git a/src/go/plugin/go.d/pkg/prometheus/metric_series.go b/src/go/plugin/go.d/pkg/prometheus/metric_series.go index 31914f4b2..d94cbd62a 100644 --- a/src/go/plugin/go.d/pkg/prometheus/metric_series.go +++ b/src/go/plugin/go.d/pkg/prometheus/metric_series.go @@ -100,11 +100,11 @@ func (s Series) Max() float64 { case 1: return s[0].Value } - max := s[0].Value + maxv := s[0].Value for _, kv := range s[1:] { - if max < kv.Value { - max = kv.Value + if maxv < kv.Value { + maxv = kv.Value } } - return max + return maxv } diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/README.md b/src/go/plugin/go.d/pkg/prometheus/selector/README.md index 601eb0891..19e1658d0 100644 --- a/src/go/plugin/go.d/pkg/prometheus/selector/README.md +++ b/src/go/plugin/go.d/pkg/prometheus/selector/README.md @@ -1,11 +1,3 @@ - - # Time series selector Selectors allow selecting and filtering of a set of time series. diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/parse.go b/src/go/plugin/go.d/pkg/prometheus/selector/parse.go index 81e970c48..4f0aaf4dc 100644 --- a/src/go/plugin/go.d/pkg/prometheus/selector/parse.go +++ b/src/go/plugin/go.d/pkg/prometheus/selector/parse.go @@ -7,7 +7,7 @@ import ( "regexp" "strings" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" ) var ( diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/parse_test.go b/src/go/plugin/go.d/pkg/prometheus/selector/parse_test.go index 1a1f8ab79..a1876481d 100644 --- a/src/go/plugin/go.d/pkg/prometheus/selector/parse_test.go +++ b/src/go/plugin/go.d/pkg/prometheus/selector/parse_test.go @@ -6,7 +6,7 @@ import ( "fmt" "testing" - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/assert" diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/selector.go b/src/go/plugin/go.d/pkg/prometheus/selector/selector.go index a42b846f2..01d0f0731 100644 --- a/src/go/plugin/go.d/pkg/prometheus/selector/selector.go +++ b/src/go/plugin/go.d/pkg/prometheus/selector/selector.go @@ -3,7 +3,7 @@ package selector import ( - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher" + "github.com/netdata/netdata/go/plugins/pkg/matcher" "github.com/prometheus/prometheus/model/labels" ) diff --git a/src/go/plugin/go.d/pkg/socket/client.go b/src/go/plugin/go.d/pkg/socket/client.go index c2bcbd9e9..f7f5a4bd5 100644 --- a/src/go/plugin/go.d/pkg/socket/client.go +++ b/src/go/plugin/go.d/pkg/socket/client.go @@ -10,24 +10,37 @@ import ( "time" ) -// New returns a new pointer to a socket client given the socket -// type (IP, TCP, UDP, UNIX), a network address (IP/domain:port), -// a timeout and a TLS config. It supports both IPv4 and IPv6 address -// and reuses connection where possible. -func New(config Config) *Socket { - return &Socket{ - Config: config, - conn: nil, - } +// Processor function passed to the Socket.Command function. +// It is passed by the caller to process a command's response line by line. +type Processor func([]byte) bool + +// Client is the interface that wraps the basic socket client operations +// and hides the implementation details from the users. +// Implementations should return TCP, UDP or Unix ready sockets. +type Client interface { + Connect() error + Disconnect() error + Command(command string, process Processor) error } -func ConnectAndRead(config Config, process Processor) error { - s := New(config) - if err := s.Connect(); err != nil { +func ConnectAndRead(cfg Config, process Processor) error { + sock := New(cfg) + + if err := sock.Connect(); err != nil { return err } - defer func() { _ = s.Disconnect() }() - return read(s.conn, process, s.ReadTimeout) + + defer func() { _ = sock.Disconnect() }() + + return sock.read(process) +} + +// New returns a new pointer to a socket client given the socket +// type (IP, TCP, UDP, UNIX), a network address (IP/domain:port), +// a timeout and a TLS config. It supports both IPv4 and IPv6 address +// and reuses connection where possible. +func New(cfg Config) *Socket { + return &Socket{Config: cfg} } // Socket is the implementation of a socket client. @@ -36,6 +49,14 @@ type Socket struct { conn net.Conn } +// Config holds the network ip v4 or v6 address, port, +// Socket type(ip, tcp, udp, unix), timeout and TLS configuration for a Socket +type Config struct { + Address string + Timeout time.Duration + TLSConf *tls.Config +} + // Connect connects to the Socket address on the named network. // If the address is a domain name it will also perform the DNS resolution. // Address like :80 will attempt to connect to the localhost. @@ -46,10 +67,10 @@ func (s *Socket) Connect() error { var err error if s.TLSConf == nil { - conn, err = net.DialTimeout(network, address, s.ConnectTimeout) + conn, err = net.DialTimeout(network, address, s.timeout()) } else { var d net.Dialer - d.Timeout = s.ConnectTimeout + d.Timeout = s.timeout() conn, err = tls.DialWithDialer(&d, network, address, s.TLSConf) } if err != nil { @@ -81,35 +102,52 @@ func (s *Socket) Command(command string, process Processor) error { if s.conn == nil { return errors.New("cannot send command on nil connection") } - if err := write(command, s.conn, s.WriteTimeout); err != nil { + + if err := s.write(command); err != nil { return err } - return read(s.conn, process, s.ReadTimeout) + + return s.read(process) } -func write(command string, writer net.Conn, timeout time.Duration) error { - if writer == nil { +func (s *Socket) write(command string) error { + if s.conn == nil { return errors.New("attempt to write on nil connection") } - if err := writer.SetWriteDeadline(time.Now().Add(timeout)); err != nil { + + if err := s.conn.SetWriteDeadline(time.Now().Add(s.timeout())); err != nil { return err } - _, err := writer.Write([]byte(command)) + + _, err := s.conn.Write([]byte(command)) + return err } -func read(reader net.Conn, process Processor, timeout time.Duration) error { +func (s *Socket) read(process Processor) error { if process == nil { return errors.New("process func is nil") } - if reader == nil { + + if s.conn == nil { return errors.New("attempt to read on nil connection") } - if err := reader.SetReadDeadline(time.Now().Add(timeout)); err != nil { + + if err := s.conn.SetReadDeadline(time.Now().Add(s.timeout())); err != nil { return err } - scanner := bufio.NewScanner(reader) - for scanner.Scan() && process(scanner.Bytes()) { + + sc := bufio.NewScanner(s.conn) + + for sc.Scan() && process(sc.Bytes()) { + } + + return sc.Err() +} + +func (s *Socket) timeout() time.Duration { + if s.Timeout == 0 { + return time.Second } - return scanner.Err() + return s.Timeout } diff --git a/src/go/plugin/go.d/pkg/socket/client_test.go b/src/go/plugin/go.d/pkg/socket/client_test.go index fa64f4558..53de50951 100644 --- a/src/go/plugin/go.d/pkg/socket/client_test.go +++ b/src/go/plugin/go.d/pkg/socket/client_test.go @@ -19,35 +19,27 @@ const ( ) var tcpConfig = Config{ - Address: testServerAddress, - ConnectTimeout: defaultTimeout, - ReadTimeout: defaultTimeout, - WriteTimeout: defaultTimeout, - TLSConf: nil, + Address: testServerAddress, + Timeout: defaultTimeout, + TLSConf: nil, } var udpConfig = Config{ - Address: testUdpServerAddress, - ConnectTimeout: defaultTimeout, - ReadTimeout: defaultTimeout, - WriteTimeout: defaultTimeout, - TLSConf: nil, + Address: testUdpServerAddress, + Timeout: defaultTimeout, + TLSConf: nil, } var unixConfig = Config{ - Address: testUnixServerAddress, - ConnectTimeout: defaultTimeout, - ReadTimeout: defaultTimeout, - WriteTimeout: defaultTimeout, - TLSConf: nil, + Address: testUnixServerAddress, + Timeout: defaultTimeout, + TLSConf: nil, } var tcpTlsConfig = Config{ - Address: testServerAddress, - ConnectTimeout: defaultTimeout, - ReadTimeout: defaultTimeout, - WriteTimeout: defaultTimeout, - TLSConf: &tls.Config{}, + Address: testServerAddress, + Timeout: defaultTimeout, + TLSConf: &tls.Config{}, } func Test_clientCommand(t *testing.T) { @@ -72,13 +64,12 @@ func Test_clientTimeout(t *testing.T) { time.Sleep(time.Millisecond * 100) sock := New(tcpConfig) require.NoError(t, sock.Connect()) - sock.ReadTimeout = 0 - sock.ReadTimeout = 0 + sock.Timeout = 0 err := sock.Command("ping\n", func(bytes []byte) bool { assert.Equal(t, "pong", string(bytes)) return true }) - require.Error(t, err) + require.NoError(t, err) } func Test_clientIncompleteSSL(t *testing.T) { diff --git a/src/go/plugin/go.d/pkg/socket/types.go b/src/go/plugin/go.d/pkg/socket/types.go deleted file mode 100644 index 693faf5be..000000000 --- a/src/go/plugin/go.d/pkg/socket/types.go +++ /dev/null @@ -1,41 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package socket - -import ( - "crypto/tls" - "time" -) - -// Processor function passed to the Socket.Command function. -// It is passed by the caller to process a command's response -// line by line. -type Processor func([]byte) bool - -// Client is the interface that wraps the basic socket client operations -// and hides the implementation details from the users. -// -// Connect should prepare the connection. -// -// Disconnect should stop any in-flight connections. -// -// Command should send the actual data to the wire and pass -// any results to the processor function. -// -// Implementations should return TCP, UDP or Unix ready sockets. -type Client interface { - Connect() error - Disconnect() error - Command(command string, process Processor) error -} - -// Config holds the network ip v4 or v6 address, port, -// Socket type(ip, tcp, udp, unix), timeout and TLS configuration -// for a Socket -type Config struct { - Address string - ConnectTimeout time.Duration - ReadTimeout time.Duration - WriteTimeout time.Duration - TLSConf *tls.Config -} diff --git a/src/go/plugin/go.d/pkg/stm/stm.go b/src/go/plugin/go.d/pkg/stm/stm.go index 7d07ba9a4..18dadc360 100644 --- a/src/go/plugin/go.d/pkg/stm/stm.go +++ b/src/go/plugin/go.d/pkg/stm/stm.go @@ -22,7 +22,7 @@ type ( ) // ToMap converts struct to a map[string]int64 based on 'stm' tags -func ToMap(s ...interface{}) map[string]int64 { +func ToMap(s ...any) map[string]int64 { rv := map[string]int64{} for _, v := range s { value := reflect.Indirect(reflect.ValueOf(v)) diff --git a/src/go/plugin/go.d/pkg/stm/stm_test.go b/src/go/plugin/go.d/pkg/stm/stm_test.go index 74ac6f3f1..9f76912ec 100644 --- a/src/go/plugin/go.d/pkg/stm/stm_test.go +++ b/src/go/plugin/go.d/pkg/stm/stm_test.go @@ -171,14 +171,14 @@ func TestToMap_map(t *testing.T) { func TestToMap_nestMap(t *testing.T) { s := struct { - I int `stm:"int"` - M map[string]interface{} `stm:""` + I int `stm:"int"` + M map[string]any `stm:""` }{ I: 1, - M: map[string]interface{}{ + M: map[string]any{ "a": 2, "b": 3, - "m": map[string]interface{}{ + "m": map[string]any{ "c": 4, }, }, @@ -352,7 +352,7 @@ func TestToMap_bool(t *testing.T) { } func TestToMap_ArraySlice(t *testing.T) { - s := [4]interface{}{ + s := [4]any{ map[string]int{ "B": 1, "C": 2, diff --git a/src/go/plugin/go.d/pkg/web/client.go b/src/go/plugin/go.d/pkg/web/client.go index 02dc17de1..61e6ff86b 100644 --- a/src/go/plugin/go.d/pkg/web/client.go +++ b/src/go/plugin/go.d/pkg/web/client.go @@ -3,78 +3,84 @@ package web import ( - "errors" + "encoding/json" + "encoding/xml" "fmt" - "net" + "io" "net/http" - "net/url" - - "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" ) -// ErrRedirectAttempted indicates that a redirect occurred. -var ErrRedirectAttempted = errors.New("redirect") - -// Client is the configuration of the HTTP client. -// This structure is not intended to be used directly as part of a module's configuration. -// Supported configuration file formats: YAML. type Client struct { - // Timeout specifies a time limit for requests made by this Client. - // Default (zero value) is no timeout. Must be set before http.Client creation. - Timeout Duration `yaml:"timeout,omitempty" json:"timeout"` + httpClient *http.Client + onNokCode func(resp *http.Response) (bool, error) +} - // NotFollowRedirect specifies the policy for handling redirects. - // Default (zero value) is std http package default policy (stop after 10 consecutive requests). - NotFollowRedirect bool `yaml:"not_follow_redirects,omitempty" json:"not_follow_redirects"` +func DoHTTP(cl *http.Client) *Client { + return &Client{ + httpClient: cl, + } +} + +func (c *Client) OnNokCode(fn func(resp *http.Response) (bool, error)) *Client { + c.onNokCode = fn + return c +} - // ProxyURL specifies the URL of the proxy to use. An empty string means use the environment variables - // HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions thereof) to get the URL. - ProxyURL string `yaml:"proxy_url,omitempty" json:"proxy_url"` +func (c *Client) RequestJSON(req *http.Request, in any) error { + return c.Request(req, func(body io.Reader) error { + return json.NewDecoder(body).Decode(in) + }) +} - // TLSConfig specifies the TLS configuration. - tlscfg.TLSConfig `yaml:",inline" json:""` +func (c *Client) RequestXML(req *http.Request, in any, opts ...func(dec *xml.Decoder)) error { + return c.Request(req, func(body io.Reader) error { + dec := xml.NewDecoder(body) + for _, opt := range opts { + opt(dec) + } + return dec.Decode(in) + }) } -// NewHTTPClient returns a new *http.Client given a Client configuration and an error if any. -func NewHTTPClient(cfg Client) (*http.Client, error) { - tlsConfig, err := tlscfg.NewTLSConfig(cfg.TLSConfig) +func (c *Client) Request(req *http.Request, parse func(body io.Reader) error) error { + resp, err := c.httpClient.Do(req) if err != nil { - return nil, fmt.Errorf("error on creating TLS config: %v", err) + return fmt.Errorf("error on HTTP request to '%s': %w", req.URL, err) } - if cfg.ProxyURL != "" { - if _, err := url.Parse(cfg.ProxyURL); err != nil { - return nil, fmt.Errorf("error on parsing proxy URL '%s': %v", cfg.ProxyURL, err) + defer CloseBody(resp) + + if resp.StatusCode != http.StatusOK { + if err := c.handleNokCode(req, resp); err != nil { + return err } } - d := &net.Dialer{Timeout: cfg.Timeout.Duration()} - - transport := &http.Transport{ - Proxy: proxyFunc(cfg.ProxyURL), - TLSClientConfig: tlsConfig, - DialContext: d.DialContext, - TLSHandshakeTimeout: cfg.Timeout.Duration(), + if parse != nil { + if err := parse(resp.Body); err != nil { + return fmt.Errorf("error on parsing response from '%s': %w", req.URL, err) + } } - return &http.Client{ - Timeout: cfg.Timeout.Duration(), - Transport: transport, - CheckRedirect: redirectFunc(cfg.NotFollowRedirect), - }, nil + return nil } -func redirectFunc(notFollowRedirect bool) func(req *http.Request, via []*http.Request) error { - if follow := !notFollowRedirect; follow { - return nil +func (c *Client) handleNokCode(req *http.Request, resp *http.Response) error { + if c.onNokCode != nil { + handled, err := c.onNokCode(resp) + if err != nil { + return fmt.Errorf("'%s' returned HTTP status code: %d (%w)", req.URL, resp.StatusCode, err) + } + if handled { + return nil + } } - return func(_ *http.Request, _ []*http.Request) error { return ErrRedirectAttempted } + return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) } -func proxyFunc(rawProxyURL string) func(r *http.Request) (*url.URL, error) { - if rawProxyURL == "" { - return http.ProxyFromEnvironment +func CloseBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() } - proxyURL, _ := url.Parse(rawProxyURL) - return http.ProxyURL(proxyURL) } diff --git a/src/go/plugin/go.d/pkg/web/client_config.go b/src/go/plugin/go.d/pkg/web/client_config.go new file mode 100644 index 000000000..0ab3a045a --- /dev/null +++ b/src/go/plugin/go.d/pkg/web/client_config.go @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +import ( + "errors" + "fmt" + "net" + "net/http" + "net/url" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg" +) + +// ErrRedirectAttempted indicates that a redirect occurred. +var ErrRedirectAttempted = errors.New("redirect") + +// ClientConfig is the configuration of the HTTPConfig client. +// This structure is not intended to be used directly as part of a module's configuration. +// Supported configuration file formats: YAML. +type ClientConfig struct { + // Timeout specifies a time limit for requests made by this ClientConfig. + // Default (zero value) is no timeout. Must be set before http.Client creation. + Timeout confopt.Duration `yaml:"timeout,omitempty" json:"timeout"` + + // NotFollowRedirect specifies the policy for handling redirects. + // Default (zero value) is std http package default policy (stop after 10 consecutive requests). + NotFollowRedirect bool `yaml:"not_follow_redirects,omitempty" json:"not_follow_redirects"` + + // ProxyURL specifies the URL of the proxy to use. An empty string means use the environment variables + // HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions thereof) to get the URL. + ProxyURL string `yaml:"proxy_url,omitempty" json:"proxy_url"` + + // TLSConfig specifies the TLS configuration. + tlscfg.TLSConfig `yaml:",inline" json:""` +} + +// NewHTTPClient returns a new *http.Client given a ClientConfig configuration and an error if any. +func NewHTTPClient(cfg ClientConfig) (*http.Client, error) { + tlsConfig, err := tlscfg.NewTLSConfig(cfg.TLSConfig) + if err != nil { + return nil, fmt.Errorf("error on creating TLS config: %v", err) + } + + if cfg.ProxyURL != "" { + if _, err := url.Parse(cfg.ProxyURL); err != nil { + return nil, fmt.Errorf("error on parsing proxy URL '%s': %v", cfg.ProxyURL, err) + } + } + + d := &net.Dialer{Timeout: cfg.Timeout.Duration()} + + transport := &http.Transport{ + Proxy: proxyFunc(cfg.ProxyURL), + TLSClientConfig: tlsConfig, + DialContext: d.DialContext, + TLSHandshakeTimeout: cfg.Timeout.Duration(), + } + + return &http.Client{ + Timeout: cfg.Timeout.Duration(), + Transport: transport, + CheckRedirect: redirectFunc(cfg.NotFollowRedirect), + }, nil +} + +func redirectFunc(notFollowRedirect bool) func(req *http.Request, via []*http.Request) error { + if follow := !notFollowRedirect; follow { + return nil + } + return func(_ *http.Request, _ []*http.Request) error { return ErrRedirectAttempted } +} + +func proxyFunc(rawProxyURL string) func(r *http.Request) (*url.URL, error) { + if rawProxyURL == "" { + return http.ProxyFromEnvironment + } + proxyURL, _ := url.Parse(rawProxyURL) + return http.ProxyURL(proxyURL) +} diff --git a/src/go/plugin/go.d/pkg/web/client_config_test.go b/src/go/plugin/go.d/pkg/web/client_config_test.go new file mode 100644 index 000000000..b7fc8a858 --- /dev/null +++ b/src/go/plugin/go.d/pkg/web/client_config_test.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +import ( + "net/http" + "testing" + "time" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/confopt" + + "github.com/stretchr/testify/assert" +) + +func TestNewHTTPClient(t *testing.T) { + client, _ := NewHTTPClient(ClientConfig{ + Timeout: confopt.Duration(time.Second * 5), + NotFollowRedirect: true, + ProxyURL: "http://127.0.0.1:3128", + }) + + assert.IsType(t, (*http.Client)(nil), client) + assert.Equal(t, time.Second*5, client.Timeout) + assert.NotNil(t, client.CheckRedirect) +} diff --git a/src/go/plugin/go.d/pkg/web/client_test.go b/src/go/plugin/go.d/pkg/web/client_test.go deleted file mode 100644 index ead1486c3..000000000 --- a/src/go/plugin/go.d/pkg/web/client_test.go +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package web - -import ( - "net/http" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestNewHTTPClient(t *testing.T) { - client, _ := NewHTTPClient(Client{ - Timeout: Duration(time.Second * 5), - NotFollowRedirect: true, - ProxyURL: "http://127.0.0.1:3128", - }) - - assert.IsType(t, (*http.Client)(nil), client) - assert.Equal(t, time.Second*5, client.Timeout) - assert.NotNil(t, client.CheckRedirect) -} diff --git a/src/go/plugin/go.d/pkg/web/config.go b/src/go/plugin/go.d/pkg/web/config.go new file mode 100644 index 000000000..f120fa047 --- /dev/null +++ b/src/go/plugin/go.d/pkg/web/config.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +// HTTPConfig is a struct with embedded RequestConfig and ClientConfig. +// This structure intended to be part of the module configuration. +// Supported configuration file formats: YAML. +type HTTPConfig struct { + RequestConfig `yaml:",inline" json:""` + ClientConfig `yaml:",inline" json:""` +} diff --git a/src/go/plugin/go.d/pkg/web/doc.go b/src/go/plugin/go.d/pkg/web/doc.go index 4c6d31461..73a9d7547 100644 --- a/src/go/plugin/go.d/pkg/web/doc.go +++ b/src/go/plugin/go.d/pkg/web/doc.go @@ -2,7 +2,7 @@ /* Package web contains HTTP request and client configurations. -HTTP structure embeds both of them, and it's the only structure that intended to be used as part of a module's configuration. +HTTPConfig structure embeds both of them, and it's the only structure that intended to be used as part of a module's configuration. Every module that uses HTTP requests to collect metrics should use it. It allows to have same set of user configurable options across all modules. */ diff --git a/src/go/plugin/go.d/pkg/web/doc_test.go b/src/go/plugin/go.d/pkg/web/doc_test.go index 137eed207..10464f0a4 100644 --- a/src/go/plugin/go.d/pkg/web/doc_test.go +++ b/src/go/plugin/go.d/pkg/web/doc_test.go @@ -2,14 +2,14 @@ package web -func ExampleHTTP_usage() { - // Just embed HTTP into your module structure. - // It allows you to have both Request and Client fields in the module configuration file. +func ExampleHTTPConfig_usage() { + // Just embed HTTPConfig into your module structure. + // It allows you to have both RequestConfig and ClientConfig fields in the module configuration file. type myModule struct { - HTTP `yaml:",inline"` + HTTPConfig `yaml:",inline"` } var m myModule - _, _ = NewHTTPRequest(m.Request) - _, _ = NewHTTPClient(m.Client) + _, _ = NewHTTPRequest(m.RequestConfig) + _, _ = NewHTTPClient(m.ClientConfig) } diff --git a/src/go/plugin/go.d/pkg/web/duration.go b/src/go/plugin/go.d/pkg/web/duration.go deleted file mode 100644 index 85d5ef650..000000000 --- a/src/go/plugin/go.d/pkg/web/duration.go +++ /dev/null @@ -1,72 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package web - -import ( - "encoding/json" - "fmt" - "strconv" - "time" -) - -type Duration time.Duration - -func (d Duration) Duration() time.Duration { - return time.Duration(d) -} - -func (d Duration) String() string { - return d.Duration().String() -} - -func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - - if err := unmarshal(&s); err != nil { - return err - } - - if v, err := time.ParseDuration(s); err == nil { - *d = Duration(v) - return nil - } - if v, err := strconv.ParseInt(s, 10, 64); err == nil { - *d = Duration(time.Duration(v) * time.Second) - return nil - } - if v, err := strconv.ParseFloat(s, 64); err == nil { - *d = Duration(v * float64(time.Second)) - return nil - } - - return fmt.Errorf("unparsable duration format '%s'", s) -} - -func (d Duration) MarshalYAML() (any, error) { - seconds := float64(d) / float64(time.Second) - return seconds, nil -} - -func (d *Duration) UnmarshalJSON(b []byte) error { - s := string(b) - - if v, err := time.ParseDuration(s); err == nil { - *d = Duration(v) - return nil - } - if v, err := strconv.ParseInt(s, 10, 64); err == nil { - *d = Duration(time.Duration(v) * time.Second) - return nil - } - if v, err := strconv.ParseFloat(s, 64); err == nil { - *d = Duration(v * float64(time.Second)) - return nil - } - - return fmt.Errorf("unparsable duration format '%s'", s) -} - -func (d Duration) MarshalJSON() ([]byte, error) { - seconds := float64(d) / float64(time.Second) - return json.Marshal(seconds) -} diff --git a/src/go/plugin/go.d/pkg/web/duration_test.go b/src/go/plugin/go.d/pkg/web/duration_test.go deleted file mode 100644 index b45063f13..000000000 --- a/src/go/plugin/go.d/pkg/web/duration_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package web - -import ( - "encoding/json" - "fmt" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "gopkg.in/yaml.v2" -) - -func TestDuration_MarshalYAML(t *testing.T) { - tests := map[string]struct { - d Duration - want string - }{ - "1 second": {d: Duration(time.Second), want: "1"}, - "1.5 seconds": {d: Duration(time.Second + time.Millisecond*500), want: "1.5"}, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - bs, err := yaml.Marshal(&test.d) - require.NoError(t, err) - - assert.Equal(t, test.want, strings.TrimSpace(string(bs))) - }) - } -} - -func TestDuration_MarshalJSON(t *testing.T) { - tests := map[string]struct { - d Duration - want string - }{ - "1 second": {d: Duration(time.Second), want: "1"}, - "1.5 seconds": {d: Duration(time.Second + time.Millisecond*500), want: "1.5"}, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - bs, err := json.Marshal(&test.d) - require.NoError(t, err) - - assert.Equal(t, test.want, strings.TrimSpace(string(bs))) - }) - } -} - -func TestDuration_UnmarshalYAML(t *testing.T) { - tests := map[string]struct { - input any - }{ - "duration": {input: "300ms"}, - "string int": {input: "1"}, - "string float": {input: "1.1"}, - "int": {input: 2}, - "float": {input: 2.2}, - } - - var zero Duration - - for name, test := range tests { - name = fmt.Sprintf("%s (%v)", name, test.input) - t.Run(name, func(t *testing.T) { - data, err := yaml.Marshal(test.input) - require.NoError(t, err) - - var d Duration - require.NoError(t, yaml.Unmarshal(data, &d)) - assert.NotEqual(t, zero.String(), d.String()) - }) - } -} - -func TestDuration_UnmarshalJSON(t *testing.T) { - tests := map[string]struct { - input any - }{ - "duration": {input: "300ms"}, - "string int": {input: "1"}, - "string float": {input: "1.1"}, - "int": {input: 2}, - "float": {input: 2.2}, - } - - var zero Duration - - type duration struct { - D Duration `json:"d"` - } - type input struct { - D any `json:"d"` - } - - for name, test := range tests { - name = fmt.Sprintf("%s (%v)", name, test.input) - t.Run(name, func(t *testing.T) { - input := input{D: test.input} - data, err := yaml.Marshal(input) - require.NoError(t, err) - - var d duration - require.NoError(t, yaml.Unmarshal(data, &d)) - assert.NotEqual(t, zero.String(), d.D.String()) - }) - } -} diff --git a/src/go/plugin/go.d/pkg/web/request.go b/src/go/plugin/go.d/pkg/web/request.go deleted file mode 100644 index 20a6ec093..000000000 --- a/src/go/plugin/go.d/pkg/web/request.go +++ /dev/null @@ -1,105 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package web - -import ( - "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "strings" - - "github.com/netdata/netdata/go/plugins/pkg/buildinfo" - "github.com/netdata/netdata/go/plugins/pkg/executable" -) - -// Request is the configuration of the HTTP request. -// This structure is not intended to be used directly as part of a module's configuration. -// Supported configuration file formats: YAML. -type Request struct { - // URL specifies the URL to access. - URL string `yaml:"url" json:"url"` - - // Username specifies the username for basic HTTP authentication. - Username string `yaml:"username,omitempty" json:"username"` - - // Password specifies the password for basic HTTP authentication. - Password string `yaml:"password,omitempty" json:"password"` - - // ProxyUsername specifies the username for basic HTTP authentication. - // It is used to authenticate a user agent to a proxy server. - ProxyUsername string `yaml:"proxy_username,omitempty" json:"proxy_username"` - - // ProxyPassword specifies the password for basic HTTP authentication. - // It is used to authenticate a user agent to a proxy server. - ProxyPassword string `yaml:"proxy_password,omitempty" json:"proxy_password"` - - // Method specifies the HTTP method (GET, POST, PUT, etc.). An empty string means GET. - Method string `yaml:"method,omitempty" json:"method"` - - // Headers specifies the HTTP request header fields to be sent by the client. - Headers map[string]string `yaml:"headers,omitempty" json:"headers"` - - // Body specifies the HTTP request body to be sent by the client. - Body string `yaml:"body,omitempty" json:"body"` -} - -// Copy makes a full copy of the Request. -func (r Request) Copy() Request { - headers := make(map[string]string, len(r.Headers)) - for k, v := range r.Headers { - headers[k] = v - } - r.Headers = headers - return r -} - -var userAgent = fmt.Sprintf("Netdata %s.plugin/%s", executable.Name, buildinfo.Version) - -// NewHTTPRequest returns a new *http.Requests given a Request configuration and an error if any. -func NewHTTPRequest(cfg Request) (*http.Request, error) { - var body io.Reader - if cfg.Body != "" { - body = strings.NewReader(cfg.Body) - } - - req, err := http.NewRequest(cfg.Method, cfg.URL, body) - if err != nil { - return nil, err - } - - req.Header.Set("User-Agent", userAgent) - - if cfg.Username != "" || cfg.Password != "" { - req.SetBasicAuth(cfg.Username, cfg.Password) - } - - if cfg.ProxyUsername != "" && cfg.ProxyPassword != "" { - basicAuth := base64.StdEncoding.EncodeToString([]byte(cfg.ProxyUsername + ":" + cfg.ProxyPassword)) - req.Header.Set("Proxy-Authorization", "Basic "+basicAuth) - } - - for k, v := range cfg.Headers { - switch k { - case "host", "Host": - req.Host = v - default: - req.Header.Set(k, v) - } - } - - return req, nil -} - -func NewHTTPRequestWithPath(cfg Request, urlPath string) (*http.Request, error) { - cfg = cfg.Copy() - - v, err := url.JoinPath(cfg.URL, urlPath) - if err != nil { - return nil, fmt.Errorf("failed to join URL path: %v", err) - } - cfg.URL = v - - return NewHTTPRequest(cfg) -} diff --git a/src/go/plugin/go.d/pkg/web/request_config.go b/src/go/plugin/go.d/pkg/web/request_config.go new file mode 100644 index 000000000..ca96eca08 --- /dev/null +++ b/src/go/plugin/go.d/pkg/web/request_config.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +import ( + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/netdata/netdata/go/plugins/pkg/buildinfo" + "github.com/netdata/netdata/go/plugins/pkg/executable" +) + +// RequestConfig is the configuration of the HTTP request. +// This structure is not intended to be used directly as part of a module's configuration. +// Supported configuration file formats: YAML. +type RequestConfig struct { + // URL specifies the URL to access. + URL string `yaml:"url" json:"url"` + + // Username specifies the username for basic HTTPConfig authentication. + Username string `yaml:"username,omitempty" json:"username"` + + // Password specifies the password for basic HTTPConfig authentication. + Password string `yaml:"password,omitempty" json:"password"` + + // ProxyUsername specifies the username for basic HTTPConfig authentication. + // It is used to authenticate a user agent to a proxy server. + ProxyUsername string `yaml:"proxy_username,omitempty" json:"proxy_username"` + + // ProxyPassword specifies the password for basic HTTPConfig authentication. + // It is used to authenticate a user agent to a proxy server. + ProxyPassword string `yaml:"proxy_password,omitempty" json:"proxy_password"` + + // Method specifies the HTTPConfig method (GET, POST, PUT, etc.). An empty string means GET. + Method string `yaml:"method,omitempty" json:"method"` + + // Headers specifies the HTTP request header fields to be sent by the client. + Headers map[string]string `yaml:"headers,omitempty" json:"headers"` + + // Body specifies the HTTP request body to be sent by the client. + Body string `yaml:"body,omitempty" json:"body"` +} + +// Copy makes a full copy of the RequestConfig. +func (r RequestConfig) Copy() RequestConfig { + headers := make(map[string]string, len(r.Headers)) + for k, v := range r.Headers { + headers[k] = v + } + r.Headers = headers + return r +} + +var userAgent = fmt.Sprintf("Netdata %s.plugin/%s", executable.Name, buildinfo.Version) + +// NewHTTPRequest returns a new *http.Requests given a RequestConfig configuration and an error if any. +func NewHTTPRequest(cfg RequestConfig) (*http.Request, error) { + var body io.Reader + if cfg.Body != "" { + body = strings.NewReader(cfg.Body) + } + + req, err := http.NewRequest(cfg.Method, cfg.URL, body) + if err != nil { + return nil, err + } + + req.Header.Set("User-Agent", userAgent) + + if cfg.Username != "" || cfg.Password != "" { + req.SetBasicAuth(cfg.Username, cfg.Password) + } + + if cfg.ProxyUsername != "" && cfg.ProxyPassword != "" { + basicAuth := base64.StdEncoding.EncodeToString([]byte(cfg.ProxyUsername + ":" + cfg.ProxyPassword)) + req.Header.Set("Proxy-Authorization", "Basic "+basicAuth) + } + + for k, v := range cfg.Headers { + switch k { + case "host", "Host": + req.Host = v + default: + req.Header.Set(k, v) + } + } + + return req, nil +} + +func NewHTTPRequestWithPath(cfg RequestConfig, urlPath string) (*http.Request, error) { + cfg = cfg.Copy() + + v, err := url.JoinPath(cfg.URL, urlPath) + if err != nil { + return nil, fmt.Errorf("failed to join URL path: %v", err) + } + cfg.URL = v + + return NewHTTPRequest(cfg) +} diff --git a/src/go/plugin/go.d/pkg/web/request_config_test.go b/src/go/plugin/go.d/pkg/web/request_config_test.go new file mode 100644 index 000000000..c3ef01827 --- /dev/null +++ b/src/go/plugin/go.d/pkg/web/request_config_test.go @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +import ( + "encoding/base64" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRequest_Copy(t *testing.T) { + tests := map[string]struct { + orig RequestConfig + change func(req *RequestConfig) + }{ + "change headers": { + orig: RequestConfig{ + URL: "http://127.0.0.1:19999/api/v1/info", + Method: "POST", + Headers: map[string]string{ + "X-Api-Key": "secret", + }, + Username: "username", + Password: "password", + ProxyUsername: "proxy_username", + ProxyPassword: "proxy_password", + }, + change: func(req *RequestConfig) { + req.Headers["header_key"] = "header_value" + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + reqCopy := test.orig.Copy() + + assert.Equal(t, test.orig, reqCopy) + test.change(&reqCopy) + assert.NotEqual(t, test.orig, reqCopy) + }) + } +} + +func TestNewHTTPRequest(t *testing.T) { + tests := map[string]struct { + req RequestConfig + wantErr bool + }{ + "test url": { + req: RequestConfig{ + URL: "http://127.0.0.1:19999/api/v1/info", + }, + wantErr: false, + }, + "test body": { + req: RequestConfig{ + Body: "content", + }, + wantErr: false, + }, + "test method": { + req: RequestConfig{ + Method: "POST", + }, + wantErr: false, + }, + "test headers": { + req: RequestConfig{ + Headers: map[string]string{ + "X-Api-Key": "secret", + }, + }, + wantErr: false, + }, + "test special headers (host)": { + req: RequestConfig{ + Headers: map[string]string{ + "host": "Host", + }, + }, + wantErr: false, + }, + "test special headers (Host)": { + req: RequestConfig{ + Headers: map[string]string{ + "Host": "Host", + }, + }, + wantErr: false, + }, + "test username and password": { + req: RequestConfig{ + Username: "username", + Password: "password", + }, + wantErr: false, + }, + "test proxy username and proxy password": { + req: RequestConfig{ + ProxyUsername: "proxy_username", + ProxyPassword: "proxy_password", + }, + wantErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + httpReq, err := NewHTTPRequest(test.req) + + if test.wantErr { + assert.Error(t, err) + assert.Nil(t, httpReq) + return + } + + require.NoError(t, err) + require.NotNil(t, httpReq) + require.IsType(t, (*http.Request)(nil), httpReq) + + assert.Equal(t, test.req.URL, httpReq.URL.String()) + + if test.req.Body != "" { + assert.NotNil(t, httpReq.Body) + } + + if test.req.Username != "" || test.req.Password != "" { + user, pass, ok := httpReq.BasicAuth() + assert.True(t, ok) + assert.Equal(t, test.req.Username, user) + assert.Equal(t, test.req.Password, pass) + } + + if test.req.Method != "" { + assert.Equal(t, test.req.Method, httpReq.Method) + } + + if test.req.ProxyUsername != "" || test.req.ProxyPassword != "" { + user, pass, ok := parseBasicAuth(httpReq.Header.Get("Proxy-Authorization")) + assert.True(t, ok) + assert.Equal(t, test.req.ProxyUsername, user) + assert.Equal(t, test.req.ProxyPassword, pass) + } + + for k, v := range test.req.Headers { + switch k { + case "host", "Host": + assert.Equal(t, httpReq.Host, v) + default: + assert.Equal(t, v, httpReq.Header.Get(k)) + } + } + }) + } +} + +func TestNewRequest(t *testing.T) { + tests := map[string]struct { + url string + path string + wantURL string + }{ + "base url": { + url: "http://127.0.0.1:65535", + path: "/bar", + wantURL: "http://127.0.0.1:65535/bar", + }, + "with path": { + url: "http://127.0.0.1:65535/foo/", + path: "/bar", + wantURL: "http://127.0.0.1:65535/foo/bar", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + req, err := NewHTTPRequestWithPath(RequestConfig{URL: test.url}.Copy(), test.path) + require.NoError(t, err) + + assert.Equal(t, test.wantURL, req.URL.String()) + }) + } +} + +func parseBasicAuth(auth string) (username, password string, ok bool) { + const prefix = "Basic " + if len(auth) < len(prefix) || !strings.EqualFold(auth[:len(prefix)], prefix) { + return "", "", false + } + + decoded, err := base64.StdEncoding.DecodeString(auth[len(prefix):]) + if err != nil { + return "", "", false + } + + decodedStr := string(decoded) + idx := strings.IndexByte(decodedStr, ':') + if idx < 0 { + return "", "", false + } + + return decodedStr[:idx], decodedStr[idx+1:], true +} diff --git a/src/go/plugin/go.d/pkg/web/request_test.go b/src/go/plugin/go.d/pkg/web/request_test.go deleted file mode 100644 index d39f9a36a..000000000 --- a/src/go/plugin/go.d/pkg/web/request_test.go +++ /dev/null @@ -1,208 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package web - -import ( - "encoding/base64" - "net/http" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestRequest_Copy(t *testing.T) { - tests := map[string]struct { - orig Request - change func(req *Request) - }{ - "change headers": { - orig: Request{ - URL: "http://127.0.0.1:19999/api/v1/info", - Method: "POST", - Headers: map[string]string{ - "X-Api-Key": "secret", - }, - Username: "username", - Password: "password", - ProxyUsername: "proxy_username", - ProxyPassword: "proxy_password", - }, - change: func(req *Request) { - req.Headers["header_key"] = "header_value" - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - reqCopy := test.orig.Copy() - - assert.Equal(t, test.orig, reqCopy) - test.change(&reqCopy) - assert.NotEqual(t, test.orig, reqCopy) - }) - } -} - -func TestNewHTTPRequest(t *testing.T) { - tests := map[string]struct { - req Request - wantErr bool - }{ - "test url": { - req: Request{ - URL: "http://127.0.0.1:19999/api/v1/info", - }, - wantErr: false, - }, - "test body": { - req: Request{ - Body: "content", - }, - wantErr: false, - }, - "test method": { - req: Request{ - Method: "POST", - }, - wantErr: false, - }, - "test headers": { - req: Request{ - Headers: map[string]string{ - "X-Api-Key": "secret", - }, - }, - wantErr: false, - }, - "test special headers (host)": { - req: Request{ - Headers: map[string]string{ - "host": "Host", - }, - }, - wantErr: false, - }, - "test special headers (Host)": { - req: Request{ - Headers: map[string]string{ - "Host": "Host", - }, - }, - wantErr: false, - }, - "test username and password": { - req: Request{ - Username: "username", - Password: "password", - }, - wantErr: false, - }, - "test proxy username and proxy password": { - req: Request{ - ProxyUsername: "proxy_username", - ProxyPassword: "proxy_password", - }, - wantErr: false, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - httpReq, err := NewHTTPRequest(test.req) - - if test.wantErr { - assert.Error(t, err) - assert.Nil(t, httpReq) - return - } - - require.NoError(t, err) - require.NotNil(t, httpReq) - require.IsType(t, (*http.Request)(nil), httpReq) - - assert.Equal(t, test.req.URL, httpReq.URL.String()) - - if test.req.Body != "" { - assert.NotNil(t, httpReq.Body) - } - - if test.req.Username != "" || test.req.Password != "" { - user, pass, ok := httpReq.BasicAuth() - assert.True(t, ok) - assert.Equal(t, test.req.Username, user) - assert.Equal(t, test.req.Password, pass) - } - - if test.req.Method != "" { - assert.Equal(t, test.req.Method, httpReq.Method) - } - - if test.req.ProxyUsername != "" || test.req.ProxyPassword != "" { - user, pass, ok := parseBasicAuth(httpReq.Header.Get("Proxy-Authorization")) - assert.True(t, ok) - assert.Equal(t, test.req.ProxyUsername, user) - assert.Equal(t, test.req.ProxyPassword, pass) - } - - for k, v := range test.req.Headers { - switch k { - case "host", "Host": - assert.Equal(t, httpReq.Host, v) - default: - assert.Equal(t, v, httpReq.Header.Get(k)) - } - } - }) - } -} - -func TestNewRequest(t *testing.T) { - tests := map[string]struct { - url string - path string - wantURL string - }{ - "base url": { - url: "http://127.0.0.1:65535", - path: "/bar", - wantURL: "http://127.0.0.1:65535/bar", - }, - "with path": { - url: "http://127.0.0.1:65535/foo/", - path: "/bar", - wantURL: "http://127.0.0.1:65535/foo/bar", - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - req, err := NewHTTPRequestWithPath(Request{URL: test.url}.Copy(), test.path) - require.NoError(t, err) - - assert.Equal(t, test.wantURL, req.URL.String()) - }) - } -} - -func parseBasicAuth(auth string) (username, password string, ok bool) { - const prefix = "Basic " - if len(auth) < len(prefix) || !strings.EqualFold(auth[:len(prefix)], prefix) { - return "", "", false - } - - decoded, err := base64.StdEncoding.DecodeString(auth[len(prefix):]) - if err != nil { - return "", "", false - } - - decodedStr := string(decoded) - idx := strings.IndexByte(decodedStr, ':') - if idx < 0 { - return "", "", false - } - - return decodedStr[:idx], decodedStr[idx+1:], true -} diff --git a/src/go/plugin/go.d/pkg/web/web.go b/src/go/plugin/go.d/pkg/web/web.go deleted file mode 100644 index cbda396d4..000000000 --- a/src/go/plugin/go.d/pkg/web/web.go +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package web - -// HTTP is a struct with embedded Request and Client. -// This structure intended to be part of the module configuration. -// Supported configuration file formats: YAML. -type HTTP struct { - Request `yaml:",inline" json:""` - Client `yaml:",inline" json:""` -} diff --git a/src/health/README.md b/src/health/README.md index 5c479af5f..081a8b8f8 100644 --- a/src/health/README.md +++ b/src/health/README.md @@ -2,9 +2,11 @@ Netdata offers two ways to receive alert notifications on external integrations. These methods work independently, which means you can enable both at the same time to send alert notifications to any number of endpoints. -Both methods use a node's health alerts to generate the content of a notification. +Both methods use a node's health alerts to generate the content of a notification. -Read our documentation on [configuring alerts](/src/health/REFERENCE.md) to change the preconfigured thresholds or to create tailored alerts for your infrastructure. +Read our documentation on [configuring alerts](/src/health/REFERENCE.md) to change the pre-configured thresholds or to create tailored alerts for your infrastructure. + + - Netdata Cloud provides centralized alert notifications, utilizing the health status data already sent to Netdata Cloud from connected nodes to send alerts to configured integrations. [Supported integrations](/docs/alerts-&-notifications/notifications/centralized-cloud-notifications) include Amazon SNS, Discord, Slack, Splunk, and others. diff --git a/src/health/REFERENCE.md b/src/health/REFERENCE.md index 8b0a9177e..b46012d04 100644 --- a/src/health/REFERENCE.md +++ b/src/health/REFERENCE.md @@ -640,7 +640,7 @@ See our [simple patterns docs](/src/libnetdata/simple_pattern/README.md) for mor Similar to host labels, the `chart labels` key can be used to filter if an alert will load or not for a specific chart, based on whether these chart labels match or not. -The list of chart labels present on each chart can be obtained from http://localhost:19999/api/v1/charts?all +The list of chart labels present on each chart can be obtained from For example, each `disk_space` chart defines a chart label called `mount_point` with each instance of this chart having a value there of which mount point it monitors. @@ -808,14 +808,14 @@ You can find all the variables that can be used for a given chart, using Agent dashboard. For example, [variables for the `system.cpu` chart of the registry](https://registry.my-netdata.io/api/v1/alarm_variables?chart=system.cpu). -> If you don't know how to find the CHART_NAME, you can read about it [here](/src/web/README.md#charts). + Netdata supports 3 internal indexes for variables that will be used in health monitoring.
    The variables below can be used in both chart alerts and context templates. Although the `alarm_variables` link shows you variables for a particular chart, the same variables can also be used in -templates for charts belonging to a given [context](/src/web/README.md#contexts). The reason is that all charts of a given +templates for charts belonging to a given context. The reason is that all charts of a given context are essentially identical, with the only difference being the family that identifies a particular hardware or software instance.
    @@ -1064,7 +1064,7 @@ template: ml_5min_cpu_chart info: rolling 5min anomaly rate for system.cpu chart ``` -The `lookup` line will calculate the average anomaly rate across all `system.cpu` dimensions over the last 5 minues. In this case +The `lookup` line will calculate the average anomaly rate across all `system.cpu` dimensions over the last 5 minutes. In this case Netdata will create one alert for the chart. ### Example 7 - [Anomaly rate](/src/ml/README.md#anomaly-rate) based node level alert @@ -1083,7 +1083,7 @@ template: ml_5min_node info: rolling 5min anomaly rate for all ML enabled dims ``` -The `lookup` line will use the `anomaly_rate` dimension of the `anomaly_detection.anomaly_rate` ML chart to calculate the average [node level anomaly rate](/src/ml/README.md#node-anomaly-rate) over the last 5 minutes. +The `lookup` line will use the `anomaly_rate` dimension of the `anomaly_detection.anomaly_rate` ML chart to calculate the average [node level anomaly rate](/src/ml/README.md#anomaly-rate) over the last 5 minutes. ## Troubleshooting diff --git a/src/health/health.c b/src/health/health.c index 7039a193c..78559d7f4 100644 --- a/src/health/health.c +++ b/src/health/health.c @@ -14,7 +14,7 @@ struct health_plugin_globals health_globals = { .use_summary_for_notifications = true, .health_log_entries_max = HEALTH_LOG_ENTRIES_DEFAULT, - .health_log_history = HEALTH_LOG_HISTORY_DEFAULT, + .health_log_retention_s = HEALTH_LOG_RETENTION_DEFAULT, .default_warn_repeat_every = 0, .default_crit_repeat_every = 0, @@ -55,17 +55,17 @@ static void health_load_config_defaults(void) { health_globals.config.use_summary_for_notifications); health_globals.config.default_warn_repeat_every = - config_get_duration(CONFIG_SECTION_HEALTH, "default repeat warning", "never"); + config_get_duration_seconds(CONFIG_SECTION_HEALTH, "default repeat warning", 0); health_globals.config.default_crit_repeat_every = - config_get_duration(CONFIG_SECTION_HEALTH, "default repeat critical", "never"); + config_get_duration_seconds(CONFIG_SECTION_HEALTH, "default repeat critical", 0); health_globals.config.health_log_entries_max = config_get_number(CONFIG_SECTION_HEALTH, "in memory max health log entries", health_globals.config.health_log_entries_max); - health_globals.config.health_log_history = - config_get_number(CONFIG_SECTION_HEALTH, "health log history", HEALTH_LOG_DEFAULT_HISTORY); + health_globals.config.health_log_retention_s = + config_get_duration_seconds(CONFIG_SECTION_HEALTH, "health log retention", HEALTH_LOG_RETENTION_DEFAULT); snprintfz(filename, FILENAME_MAX, "%s/alarm-notify.sh", netdata_configured_primary_plugins_dir); health_globals.config.default_exec = @@ -76,14 +76,13 @@ static void health_load_config_defaults(void) { NULL, SIMPLE_PATTERN_EXACT, true); health_globals.config.run_at_least_every_seconds = - (int)config_get_number(CONFIG_SECTION_HEALTH, - "run at least every seconds", - health_globals.config.run_at_least_every_seconds); + (int)config_get_duration_seconds(CONFIG_SECTION_HEALTH, "run at least every", + health_globals.config.run_at_least_every_seconds); health_globals.config.postpone_alarms_during_hibernation_for_seconds = - config_get_number(CONFIG_SECTION_HEALTH, - "postpone alarms during hibernation for seconds", - health_globals.config.postpone_alarms_during_hibernation_for_seconds); + config_get_duration_seconds(CONFIG_SECTION_HEALTH, + "postpone alarms during hibernation for", + health_globals.config.postpone_alarms_during_hibernation_for_seconds); health_globals.config.default_recipient = string_strdupz("root"); @@ -115,27 +114,27 @@ static void health_load_config_defaults(void) { (long)health_globals.config.health_log_entries_max); } - if (health_globals.config.health_log_history < HEALTH_LOG_MINIMUM_HISTORY) { + if (health_globals.config.health_log_retention_s < HEALTH_LOG_MINIMUM_HISTORY) { nd_log(NDLS_DAEMON, NDLP_WARNING, - "Health configuration has invalid health log history %u. Using minimum %d", - health_globals.config.health_log_history, HEALTH_LOG_MINIMUM_HISTORY); + "Health configuration has invalid health log retention %u. Using minimum %d", + health_globals.config.health_log_retention_s, HEALTH_LOG_MINIMUM_HISTORY); - health_globals.config.health_log_history = HEALTH_LOG_MINIMUM_HISTORY; - config_set_number(CONFIG_SECTION_HEALTH, "health log history", health_globals.config.health_log_history); + health_globals.config.health_log_retention_s = HEALTH_LOG_MINIMUM_HISTORY; + config_set_duration_seconds(CONFIG_SECTION_HEALTH, "health log retention", health_globals.config.health_log_retention_s); } nd_log(NDLS_DAEMON, NDLP_DEBUG, "Health log history is set to %u seconds (%u days)", - health_globals.config.health_log_history, health_globals.config.health_log_history / 86400); + health_globals.config.health_log_retention_s, health_globals.config.health_log_retention_s / 86400); } -inline char *health_user_config_dir(void) { +inline const char *health_user_config_dir(void) { char buffer[FILENAME_MAX + 1]; snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_user_config_dir); return config_get(CONFIG_SECTION_DIRECTORIES, "health config", buffer); } -inline char *health_stock_config_dir(void) { +inline const char *health_stock_config_dir(void) { char buffer[FILENAME_MAX + 1]; snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_stock_config_dir); return config_get(CONFIG_SECTION_DIRECTORIES, "stock health config", buffer); diff --git a/src/health/health.d/anomalies.conf b/src/health/health.d/anomalies.conf deleted file mode 100644 index 80d63bb8d..000000000 --- a/src/health/health.d/anomalies.conf +++ /dev/null @@ -1,25 +0,0 @@ -## raise a warning alarm if an anomaly probability is consistently above 50% - -## "foreach" was removed, these alarms don't work anymore - -# template: anomalies_anomaly_probabilities -# on: anomalies.probability -# class: Errors -# type: Netdata -#component: ML -# lookup: average -2m foreach * -# every: 1m -# warn: $this > 50 -# info: average anomaly probability over the last 2 minutes - -# raise a warning alarm if an anomaly flag is consistently firing - -# template: anomalies_anomaly_flags -# on: anomalies.anomaly -# class: Errors -# type: Netdata -#component: ML -# lookup: sum -2m foreach * -# every: 1m -# warn: $this > 10 -# info: number of anomalies in the last 2 minutes diff --git a/src/health/health.d/apcupsd.conf b/src/health/health.d/apcupsd.conf index 5fd7aa112..58d3b214b 100644 --- a/src/health/health.d/apcupsd.conf +++ b/src/health/health.d/apcupsd.conf @@ -1,11 +1,11 @@ # you can disable an alarm notification by setting the 'to' line to: silent - template: apcupsd_10min_ups_load - on: apcupsd.load + template: apcupsd_ups_load_capacity + on: apcupsd.ups_load_capacity_utilization class: Utilization type: Power Supply -component: UPS - lookup: average -10m unaligned of percentage +component: UPS device + lookup: average -10m unaligned of load units: % every: 1m warn: $this > (($status >= $WARNING) ? (70) : (80)) @@ -14,13 +14,11 @@ component: UPS info: APC UPS average load over the last 10 minutes to: sitemgr -# Discussion in https://github.com/netdata/netdata/pull/3928: -# Fire the alarm as soon as it's going on battery (99% charge) and clear only when full. - template: apcupsd_ups_charge - on: apcupsd.charge + template: apcupsd_ups_battery_charge + on: apcupsd.ups_battery_charge class: Errors type: Power Supply -component: UPS +component: UPS device lookup: average -60s unaligned of charge units: % every: 60s @@ -32,7 +30,7 @@ component: UPS to: sitemgr template: apcupsd_last_collected_secs - on: apcupsd.load + on: apcupsd.ups_status class: Latency type: Power Supply component: UPS device @@ -47,21 +45,21 @@ component: UPS device #Send out a warning when SELFTEST code is BT or NG. Code descriptions can be found at: #http://www.apcupsd.org/manual/#:~:text=or%20N/A.-,SELFTEST,-The%20results%20of - template: apcupsd_selftest_warning - on: apcupsd.selftest + template: apcupsd_ups_selftest_warning + on: apcupsd.ups_selftest lookup: max -1s unaligned match-names of BT,NG units: status every: 10s warn: $this == 1 delay: up 0 down 15m multiplier 1.5 max 1h - info: APC UPS self-test failed due to insufficient battery capacity or due to overload. + info: APC UPS self-test failed due to insufficient battery capacity or due to overload to: sitemgr #Send out a warning when STATUS code is ONBATT,OVERLOAD,LOWBATT,REPLACEBATT,NOBATT,COMMLOST #https://man.archlinux.org/man/apcaccess.8.en#:~:text=apcupsd%20was%20started-,STATUS,-%3A%20UPS%20status.%20One - template: apcupsd_status_onbatt - on: apcupsd.status + template: apcupsd_ups_status_onbatt + on: apcupsd.ups_status lookup: max -1s unaligned match-names of ONBATT units: status every: 10s @@ -70,8 +68,8 @@ component: UPS device info: APC UPS has switched to battery power because the input power has failed to: sitemgr - template: apcupsd_status_overload - on: apcupsd.status + template: apcupsd_ups_status_overload + on: apcupsd.ups_status lookup: max -1s unaligned match-names of OVERLOAD units: status every: 10s @@ -80,8 +78,8 @@ component: UPS device info: APC UPS is overloaded and cannot supply enough power to the load to: sitemgr - template: apcupsd_status_lowbatt - on: apcupsd.status + template: apcupsd_ups_status_lowbatt + on: apcupsd.ups_status lookup: max -1s unaligned match-names of LOWBATT units: status every: 10s @@ -90,8 +88,8 @@ component: UPS device info: APC UPS battery is low and needs to be recharged to: sitemgr - template: apcupsd_status_replacebatt - on: apcupsd.status + template: apcupsd_ups_status_replacebatt + on: apcupsd.ups_status lookup: max -1s unaligned match-names of REPLACEBATT units: status every: 10s @@ -100,8 +98,8 @@ component: UPS device info: APC UPS battery has reached the end of its lifespan and needs to be replaced to: sitemgr - template: apcupsd_status_nobatt - on: apcupsd.status + template: apcupsd_ups_status_nobatt + on: apcupsd.ups_status lookup: max -1s unaligned match-names of NOBATT units: status every: 10s @@ -110,8 +108,8 @@ component: UPS device info: APC UPS has no battery to: sitemgr - template: apcupsd_status_commlost - on: apcupsd.status + template: apcupsd_ups_status_commlost + on: apcupsd.ups_status lookup: max -1s unaligned match-names of COMMLOST units: status every: 10s diff --git a/src/health/health.d/boinc.conf b/src/health/health.d/boinc.conf index 6fd987de1..987d20212 100644 --- a/src/health/health.d/boinc.conf +++ b/src/health/health.d/boinc.conf @@ -2,11 +2,11 @@ # Warn on any compute errors encountered. template: boinc_compute_errors - on: boinc.states + on: boinc.tasks_per_state class: Errors type: Computing component: BOINC - lookup: average -10m unaligned of comperror + lookup: average -10m unaligned of compute_error units: tasks every: 1m warn: $this > 0 @@ -17,7 +17,7 @@ component: BOINC # Warn on lots of upload errors template: boinc_upload_errors - on: boinc.states + on: boinc.tasks_per_state class: Errors type: Computing component: BOINC diff --git a/src/health/health.d/ceph.conf b/src/health/health.d/ceph.conf index 44d351338..0048e2a7c 100644 --- a/src/health/health.d/ceph.conf +++ b/src/health/health.d/ceph.conf @@ -1,16 +1,16 @@ # low ceph disk available - template: ceph_cluster_space_usage - on: ceph.general_usage + template: ceph_cluster_physical_capacity_utilization + on: ceph.cluster_physical_capacity_utilization class: Utilization type: Storage component: Ceph - calc: $used * 100 / ($used + $avail) + calc: $utilization units: % every: 1m - warn: $this > (($status >= $WARNING ) ? (85) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) - delay: down 5m multiplier 1.2 max 1h - summary: Ceph cluster disk space utilization - info: Ceph cluster disk space utilization - to: sysadmin + warn: $this > (($status >= $WARNING ) ? (85) : (90)) + crit: $this > (($status == $CRITICAL) ? (90) : (98)) + delay: down 5m multiplier 1.2 max 1h + summary: Ceph cluster ${label:fsid} disk space utilization + info: Ceph cluster ${label:fsid} disk space utilization + to: sysadmin diff --git a/src/health/health.d/disks.conf b/src/health/health.d/disks.conf index fe96837fb..d8176a6be 100644 --- a/src/health/health.d/disks.conf +++ b/src/health/health.d/disks.conf @@ -12,24 +12,22 @@ class: Utilization type: System component: Disk - host labels: _os=linux freebsd -chart labels: mount_point=!/dev !/dev/* !/run !/run/* * - calc: $used * 100 / ($avail + $used) - units: % - every: 1m - warn: $this > (($status >= $WARNING ) ? (80) : (90)) - crit: ($this > (($status == $CRITICAL) ? (90) : (98))) && $avail < 5 - delay: up 1m down 15m multiplier 1.5 max 1h - summary: Disk ${label:mount_point} space usage - info: Total space utilization of disk ${label:mount_point} - to: sysadmin +chart labels: mount_point=!/dev !/dev/* !/run !/run/* !HarddiskVolume* * + calc: $used * 100 / ($avail + $used) + units: % + every: 1m + warn: $this > (($status >= $WARNING ) ? (80) : (90)) + crit: ($this > (($status == $CRITICAL) ? (90) : (98))) && $avail < 5 + delay: up 1m down 15m multiplier 1.5 max 1h + summary: Disk ${label:mount_point} space usage + info: Total space utilization of disk ${label:mount_point} + to: sysadmin template: disk_inode_usage on: disk.inodes class: Utilization type: System component: Disk - host labels: _os=linux freebsd chart labels: mount_point=!/dev !/dev/* !/run !/run/* * calc: $used * 100 / ($avail + $used) units: % @@ -55,7 +53,6 @@ chart labels: mount_point=!/dev !/dev/* !/run !/run/* * template: disk_fill_rate on: disk.space -host labels: _os=linux freebsd lookup: min -10m at -50m unaligned of avail calc: ($this - $avail) / (($now - $after) / 3600) every: 1m @@ -67,7 +64,6 @@ host labels: _os=linux freebsd template: out_of_disk_space_time on: disk.space -host labels: _os=linux freebsd calc: ($disk_fill_rate > 0) ? ($avail / $disk_fill_rate) : (inf) units: hours every: 10s @@ -92,7 +88,6 @@ host labels: _os=linux freebsd template: disk_inode_rate on: disk.inodes -host labels: _os=linux freebsd lookup: min -10m at -50m unaligned of avail calc: ($this - $avail) / (($now - $after) / 3600) every: 1m @@ -105,7 +100,6 @@ host labels: _os=linux freebsd template: out_of_disk_inodes_time on: disk.inodes -host labels: _os=linux freebsd calc: ($disk_inode_rate > 0) ? ($avail / $disk_inode_rate) : (inf) units: hours every: 10s @@ -129,7 +123,6 @@ host labels: _os=linux freebsd class: Utilization type: System component: Disk -host labels: _os=linux freebsd lookup: average -10m unaligned units: % every: 1m @@ -150,7 +143,6 @@ host labels: _os=linux freebsd class: Latency type: System component: Disk -host labels: _os=linux freebsd lookup: average -10m unaligned units: ms every: 1m diff --git a/src/health/health.d/net.conf b/src/health/health.d/net.conf index 448a3733d..609741aca 100644 --- a/src/health/health.d/net.conf +++ b/src/health/health.d/net.conf @@ -19,7 +19,7 @@ component: Network class: Workload type: System component: Network -host labels: _os=linux +host labels: _os=linux windows lookup: average -1m unaligned absolute of received calc: ($interface_speed > 0) ? ($this * 100 / ($interface_speed * 1000)) : ( nan ) units: % @@ -35,7 +35,7 @@ host labels: _os=linux class: Workload type: System component: Network -host labels: _os=linux +host labels: _os=linux windows lookup: average -1m unaligned absolute of sent calc: ($interface_speed > 0) ? ($this * 100 / ($interface_speed * 1000)) : ( nan ) units: % @@ -214,7 +214,6 @@ host labels: _os=linux class: Workload type: System component: Network -host labels: _os=linux freebsd lookup: average -1m unaligned of received units: packets every: 10s @@ -225,7 +224,6 @@ host labels: _os=linux freebsd class: Workload type: System component: Network -host labels: _os=linux freebsd lookup: average -10s unaligned of received calc: $this * 100 / (($1m_received_packets_rate < 1000)?(1000):($1m_received_packets_rate)) every: 10s @@ -237,3 +235,21 @@ host labels: _os=linux freebsd info: Ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, \ compared to the rate over the last minute to: silent + +# ----------------------------------------------------------------------------- +# output queue length + + template: network_interface_output_queue_length + on: net.queue_length + class: Errors + type: System + component: Network +host labels: _os=windows + units: packets + every: 10s + warn: $length > 2 + delay: up 1m down 1m multiplier 1.5 max 1h + summary: System network interface ${label:device} output queue length + info: The Output Queue Length on interface ${label:device} should be zero, otherwise there are delays and bottlenecks. + to: silent + diff --git a/src/health/health.d/vernemq.conf b/src/health/health.d/vernemq.conf index 6ea9f99dc..df7f68fc4 100644 --- a/src/health/health.d/vernemq.conf +++ b/src/health/health.d/vernemq.conf @@ -2,67 +2,67 @@ # Socket errors template: vernemq_socket_errors - on: vernemq.socket_errors + on: vernemq.node_socket_errors class: Errors type: Messaging component: VerneMQ - lookup: sum -1m unaligned absolute of socket_error + lookup: sum -1m unaligned units: errors every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ socket errors - info: Number of socket errors in the last minute + summary: Node ${label:node} socket errors + info: Node ${label:node} socket errors in the last minute to: sysadmin # Queues dropped/expired/unhandled PUBLISH messages template: vernemq_queue_message_drop - on: vernemq.queue_undelivered_messages + on: vernemq.node_queue_undelivered_messages class: Errors type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute of queue_message_drop + lookup: average -1m unaligned absolute of dropped units: dropped messages every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ dropped messages - info: Number of dropped messages due to full queues in the last minute + summary: Node ${label:node} dropped messages + info: Node ${label:node} dropped messages due to full queues in the last minute to: sysadmin template: vernemq_queue_message_expired - on: vernemq.queue_undelivered_messages + on: vernemq.node_queue_undelivered_messages class: Latency type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute of queue_message_expired + lookup: average -1m unaligned absolute of expired units: expired messages every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ expired messages - info: number of messages which expired before delivery in the last minute + summary: Node ${label:node} expired messages + info: Node ${label:node} expired before delivery messages in the last minute to: sysadmin template: vernemq_queue_message_unhandled - on: vernemq.queue_undelivered_messages + on: vernemq.node_queue_undelivered_messages class: Latency type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute of queue_message_unhandled + lookup: average -1m unaligned absolute of unhandled units: unhandled messages every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unhandled messages - info: Number of unhandled messages (connections with clean session=true) in the last minute + summary: Node ${label:node} unhandled messages + info: Node ${label:node} unhandled messages in the last minute to: sysadmin # Erlang VM template: vernemq_average_scheduler_utilization - on: vernemq.average_scheduler_utilization + on: vernemq.node_average_scheduler_utilization class: Utilization type: Messaging component: VerneMQ @@ -72,14 +72,14 @@ component: VerneMQ warn: $this > (($status >= $WARNING) ? (75) : (85)) crit: $this > (($status == $CRITICAL) ? (85) : (95)) delay: down 15m multiplier 1.5 max 1h - summary: VerneMQ scheduler utilization - info: Average scheduler utilization over the last 10 minutes + summary: Node ${label:node} scheduler utilization + info: Node ${label:node} scheduler utilization over the last 10 minutes to: sysadmin # Cluster communication and netsplits template: vernemq_cluster_dropped - on: vernemq.cluster_dropped + on: vernemq.node_cluster_dropped class: Errors type: Messaging component: VerneMQ @@ -88,74 +88,74 @@ component: VerneMQ every: 1m warn: $this > 0 delay: up 5m down 5m multiplier 1.5 max 1h - summary: VerneMQ dropped traffic - info: Amount of traffic dropped during communication with the cluster nodes in the last minute + summary: Node ${label:node} dropped cluster traffic + info: Node ${label:node} traffic dropped during communication with the cluster nodes in the last minute to: sysadmin template: vernemq_netsplits - on: vernemq.netsplits + on: vernemq.node_netsplits class: Workload type: Messaging component: VerneMQ - lookup: sum -1m unaligned absolute of netsplit_detected + lookup: sum -1m unaligned absolute of detected units: netsplits every: 10s warn: $this > 0 delay: down 5m multiplier 1.5 max 2h - summary: VerneMQ netsplits - info: Number of detected netsplits (split brain situation) in the last minute + summary: Node ${label:node} detected netsplits + info: Node ${label:node} detected netsplits (split brain) in the last minute to: sysadmin # Unsuccessful CONNACK template: vernemq_mqtt_connack_sent_reason_unsuccessful - on: vernemq.mqtt_connack_sent_reason + on: vernemq.node_mqtt_connack_sent_by_reason_code class: Errors type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute match-names of !success,* + lookup: average -1m unaligned absolute of !success,* units: packets every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unsuccessful CONNACK - info: Number of sent unsuccessful v3/v5 CONNACK packets in the last minute + summary: Node ${label:node} unsuccessful sent CONNACK + info: Node ${label:node} unsuccessful sent v5 CONNACK packets in the last minute to: sysadmin # Not normal DISCONNECT template: vernemq_mqtt_disconnect_received_reason_not_normal - on: vernemq.mqtt_disconnect_received_reason + on: vernemq.node_mqtt_disconnect_received_by_reason_code class: Workload type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute match-names of !normal_disconnect,* + lookup: average -1m unaligned absolute of !normal_disconnect,* units: packets every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ received not normal DISCONNECT - info: Number of received not normal v5 DISCONNECT packets in the last minute + summary: Node ${label:node} received not normal DISCONNECT + info: Node ${label:node} received not normal v5 DISCONNECT packets in the last minute to: sysadmin template: vernemq_mqtt_disconnect_sent_reason_not_normal - on: vernemq.mqtt_disconnect_sent_reason + on: vernemq.node_mqtt_disconnect_sent_by_reason_code class: Errors type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute match-names of !normal_disconnect,* + lookup: average -1m unaligned absolute of !normal_disconnect,* units: packets every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ sent not normal DISCONNECT - info: Number of sent not normal v5 DISCONNECT packets in the last minute + summary: Node ${label:node} sent not normal DISCONNECT + info: Node ${label:node} sent not normal v5 DISCONNECT packets in the last minute to: sysadmin # SUBSCRIBE errors and unauthorized attempts template: vernemq_mqtt_subscribe_error - on: vernemq.mqtt_subscribe_error + on: vernemq.node_mqtt_subscribe_error class: Errors type: Messaging component: VerneMQ @@ -164,12 +164,12 @@ component: VerneMQ every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ failed SUBSCRIBE - info: Number of failed v3/v5 SUBSCRIBE operations in the last minute + summary: Node ${label:node} mqtt v${label:mqtt_version} failed SUBSCRIBE + info: Node ${label:node} mqtt v${label:mqtt_version} failed SUBSCRIBE operations in the last minute to: sysadmin template: vernemq_mqtt_subscribe_auth_error - on: vernemq.mqtt_subscribe_auth_error + on: vernemq.node_mqtt_subscribe_auth_error class: Workload type: Messaging component: VerneMQ @@ -178,14 +178,14 @@ component: VerneMQ every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unauthorized SUBSCRIBE - info: number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute + summary: Node ${label:node} mqtt v${label:mqtt_version} unauthorized SUBSCRIBE + info: Node ${label:node} mqtt v${label:mqtt_version} unauthorized SUBSCRIBE attempts in the last minute to: sysadmin # UNSUBSCRIBE errors template: vernemq_mqtt_unsubscribe_error - on: vernemq.mqtt_unsubscribe_error + on: vernemq.node_mqtt_unsubscribe_error class: Errors type: Messaging component: VerneMQ @@ -194,14 +194,14 @@ component: VerneMQ every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ failed UNSUBSCRIBE - info: Number of failed v3/v5 UNSUBSCRIBE operations in the last minute + summary: Node ${label:node} mqtt v${label:mqtt_version} failed UNSUBSCRIBE + info: Node ${label:node} mqtt v${label:mqtt_version} failed UNSUBSCRIBE operations in the last minute to: sysadmin # PUBLISH errors and unauthorized attempts template: vernemq_mqtt_publish_errors - on: vernemq.mqtt_publish_errors + on: vernemq.node_mqtt_publish_errors class: Errors type: Messaging component: VerneMQ @@ -210,12 +210,12 @@ component: VerneMQ every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ failed PUBLISH - info: Number of failed v3/v5 PUBLISH operations in the last minute + summary: Node ${label:node} mqtt v${label:mqtt_version} failed PUBLISH + info: Node ${label:node} mqtt v${label:mqtt_version} failed PUBLISH operations in the last minute to: sysadmin template: vernemq_mqtt_publish_auth_errors - on: vernemq.mqtt_publish_auth_errors + on: vernemq.node_mqtt_publish_auth_errors class: Workload type: Messaging component: VerneMQ @@ -224,42 +224,42 @@ component: VerneMQ every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unauthorized PUBLISH - info: Number of unauthorized v3/v5 PUBLISH attempts in the last minute + summary: Node ${label:node} mqtt v${label:mqtt_version} unauthorized PUBLISH + info: Node ${label:node} mqtt v${label:mqtt_version} unauthorized PUBLISH attempts in the last minute to: sysadmin # Unsuccessful and unexpected PUBACK template: vernemq_mqtt_puback_received_reason_unsuccessful - on: vernemq.mqtt_puback_received_reason + on: vernemq.node_mqtt_puback_received_by_reason_code class: Errors type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute match-names of !success,* + lookup: average -1m unaligned absolute of !success,* units: packets every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unsuccessful received PUBACK - info: Number of received unsuccessful v5 PUBACK packets in the last minute + summary: Node ${label:node} mqtt v5 received unsuccessful PUBACK + info: Node ${label:node} mqtt v5 received unsuccessful PUBACK packets in the last minute to: sysadmin template: vernemq_mqtt_puback_sent_reason_unsuccessful - on: vernemq.mqtt_puback_sent_reason + on: vernemq.node_mqtt_puback_sent_by_reason_code class: Errors type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute match-names of !success,* + lookup: average -1m unaligned absolute of !success,* units: packets every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unsuccessful sent PUBACK - info: Number of sent unsuccessful v5 PUBACK packets in the last minute + summary: Node ${label:node} mqtt v5 unsuccessful sent PUBACK + info: Node ${label:node} mqtt v5 unsuccessful sent PUBACK packets in the last minute to: sysadmin template: vernemq_mqtt_puback_unexpected - on: vernemq.mqtt_puback_invalid_error + on: vernemq.node_mqtt_puback_invalid_error class: Workload type: Messaging component: VerneMQ @@ -268,42 +268,42 @@ component: VerneMQ every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unnexpected recieved PUBACK - info: Number of received unexpected v3/v5 PUBACK packets in the last minute + summary: Node ${label:node} mqtt v${label:mqtt_version} recieved unnexpected PUBACK + info: Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBACK messages in the last minute to: sysadmin # Unsuccessful and unexpected PUBREC template: vernemq_mqtt_pubrec_received_reason_unsuccessful - on: vernemq.mqtt_pubrec_received_reason + on: vernemq.node_mqtt_pubrec_received_by_reason_code class: Errors type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute match-names of !success,* + lookup: average -1m unaligned absolute of !success,* units: packets every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unsuccessful received PUBREC - info: Number of received unsuccessful v5 PUBREC packets in the last minute + summary: Node ${label:node} mqtt v5 received unsuccessful PUBREC + info: Node ${label:node} mqtt v5 received unsuccessful PUBREC packets in the last minute to: sysadmin template: vernemq_mqtt_pubrec_sent_reason_unsuccessful - on: vernemq.mqtt_pubrec_sent_reason + on: vernemq.node_mqtt_pubrec_sent_by_reason_code class: Errors type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute match-names of !success,* + lookup: average -1m unaligned absolute of !success,* units: packets every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unsuccessful sent PUBREC - info: Number of sent unsuccessful v5 PUBREC packets in the last minute + summary: Node ${label:node} mqtt v5 unsuccessful sent PUBREC + info: Node ${label:node} mqtt v5 unsuccessful sent PUBREC packets in the last minute to: sysadmin template: vernemq_mqtt_pubrec_invalid_error - on: vernemq.mqtt_pubrec_invalid_error + on: vernemq.node_mqtt_pubrec_invalid_error class: Workload type: Messaging component: VerneMQ @@ -312,72 +312,72 @@ component: VerneMQ every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ invalid received PUBREC - info: Number of received invalid v3 PUBREC packets in the last minute + summary: Node ${label:node} mqtt v${label:mqtt_version} received invalid PUBREC + info: Node ${label:node} mqtt v${label:mqtt_version} received invalid PUBREC packets in the last minute to: sysadmin # Unsuccessful PUBREL template: vernemq_mqtt_pubrel_received_reason_unsuccessful - on: vernemq.mqtt_pubrel_received_reason + on: vernemq.node_mqtt_pubrel_received_by_reason_code class: Errors type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute match-names of !success,* + lookup: average -1m unaligned absolute of !success,* units: packets every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unsuccessful received PUBREL - info: Number of received unsuccessful v5 PUBREL packets in the last minute + summary: Node ${label:node} mqtt v5 received unsuccessful PUBREL + info: Node ${label:node} mqtt v5 received unsuccessful PUBREL packets in the last minute to: sysadmin template: vernemq_mqtt_pubrel_sent_reason_unsuccessful - on: vernemq.mqtt_pubrel_sent_reason + on: vernemq.node_mqtt_pubrel_sent_by_reason_code class: Errors type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute match-names of !success,* + lookup: average -1m unaligned absolute of !success,* units: packets every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unsuccessful sent PUBREL - info: number of sent unsuccessful v5 PUBREL packets in the last minute + summary: Node ${label:node} mqtt v5 unsuccessful sent PUBREL + info: Node ${label:node} mqtt v5 unsuccessful sent PUBREL packets in the last minute to: sysadmin # Unsuccessful and unexpected PUBCOMP template: vernemq_mqtt_pubcomp_received_reason_unsuccessful - on: vernemq.mqtt_pubcomp_received_reason + on: vernemq.node_mqtt_pubcomp_received_by_reason_code class: Errors type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute match-names of !success,* + lookup: average -1m unaligned absolute of !success,* units: packets every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unsuccessful received PUBCOMP - info: Number of received unsuccessful v5 PUBCOMP packets in the last minute + summary: Node ${label:node} mqtt v5 received unsuccessful PUBCOMP + info: Node ${label:node} mqtt v5 received unsuccessful PUBCOMP packets in the last minute to: sysadmin template: vernemq_mqtt_pubcomp_sent_reason_unsuccessful - on: vernemq.mqtt_pubcomp_sent_reason + on: vernemq.node_mqtt_pubcomp_sent_by_reason_code class: Errors type: Messaging component: VerneMQ - lookup: average -1m unaligned absolute match-names of !success,* + lookup: average -1m unaligned absolute of !success,* units: packets every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unsuccessful sent PUBCOMP - info: number of sent unsuccessful v5 PUBCOMP packets in the last minute + summary: Node ${label:node} mqtt v5 unsuccessful sent PUBCOMP + info: Node ${label:node} mqtt v5 unsuccessful sent PUBCOMP packets in the last minute to: sysadmin template: vernemq_mqtt_pubcomp_unexpected - on: vernemq.mqtt_pubcomp_invalid_error + on: vernemq.node_mqtt_pubcomp_invalid_error class: Workload type: Messaging component: VerneMQ @@ -386,6 +386,6 @@ component: VerneMQ every: 1m warn: $this > (($status >= $WARNING) ? (0) : (5)) delay: up 2m down 5m multiplier 1.5 max 2h - summary: VerneMQ unexpected received PUBCOMP - info: number of received unexpected v3/v5 PUBCOMP packets in the last minute + summary: Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBCOMP + info: Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBCOMP packets in the last minute to: sysadmin diff --git a/src/health/health.h b/src/health/health.h index b1ac5a9e1..cdd089623 100644 --- a/src/health/health.h +++ b/src/health/health.h @@ -34,8 +34,8 @@ void health_entry_flags_to_json_array(BUFFER *wb, const char *key, HEALTH_ENTRY_ #define HEALTH_LISTEN_BACKLOG 4096 #endif -#ifndef HEALTH_LOG_DEFAULT_HISTORY -#define HEALTH_LOG_DEFAULT_HISTORY 432000 +#ifndef HEALTH_LOG_RETENTION_DEFAULT +#define HEALTH_LOG_RETENTION_DEFAULT (5 * 86400) #endif #ifndef HEALTH_LOG_MINIMUM_HISTORY @@ -51,7 +51,7 @@ void health_plugin_reload(void); void health_aggregate_alarms(RRDHOST *host, BUFFER *wb, BUFFER* context, RRDCALC_STATUS status); void health_alarms2json(RRDHOST *host, BUFFER *wb, int all); -void health_alert2json_conf(RRDHOST *host, BUFFER *wb, CONTEXTS_V2_OPTIONS all); +void health_alert2json_conf(RRDHOST *host, BUFFER *wb, CONTEXTS_OPTIONS all); void health_alarms_values2json(RRDHOST *host, BUFFER *wb, int all); void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *wb); @@ -75,8 +75,8 @@ ALARM_ENTRY* health_create_alarm_entry( void health_alarm_log_add_entry(RRDHOST *host, ALARM_ENTRY *ae); -char *health_user_config_dir(void); -char *health_stock_config_dir(void); +const char *health_user_config_dir(void); +const char *health_stock_config_dir(void); void health_alarm_log_free(RRDHOST *host); void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae); diff --git a/src/health/health_config.c b/src/health/health_config.c index c17f7e21d..d261f9022 100644 --- a/src/health/health_config.c +++ b/src/health/health_config.c @@ -29,14 +29,14 @@ static inline int health_parse_delay( while(*s && isspace((uint8_t)*s)) *s++ = '\0'; if(!strcasecmp(key, "up")) { - if (!config_parse_duration(value, delay_up_duration)) { + if (!duration_parse_seconds(value, delay_up_duration)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", line, filename, value, key); } else given_up = 1; } else if(!strcasecmp(key, "down")) { - if (!config_parse_duration(value, delay_down_duration)) { + if (!duration_parse_seconds(value, delay_down_duration)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", line, filename, value, key); } @@ -51,7 +51,7 @@ static inline int health_parse_delay( else given_multiplier = 1; } else if(!strcasecmp(key, "max")) { - if (!config_parse_duration(value, delay_max_duration)) { + if (!duration_parse_seconds(value, delay_max_duration)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", line, filename, value, key); } @@ -139,13 +139,13 @@ static inline int health_parse_repeat( return 1; } if(!strcasecmp(key, "warning")) { - if (!config_parse_duration(value, (int*)warn_repeat_every)) { + if (!duration_parse_seconds(value, (int *)warn_repeat_every)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", line, file, value, key); } } else if(!strcasecmp(key, "critical")) { - if (!config_parse_duration(value, (int*)crit_repeat_every)) { + if (!duration_parse_seconds(value, (int *)crit_repeat_every)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", line, file, value, key); } @@ -155,13 +155,6 @@ static inline int health_parse_repeat( return 1; } -static inline int isvariableterm(const char s) { - if(isalnum(s) || s == '.' || s == '_') - return 0; - - return 1; -} - static inline int health_parse_db_lookup(size_t line, const char *filename, char *string, struct rrd_alert_config *ac) { if(ac->dimensions) string_freez(ac->dimensions); ac->dimensions = NULL; @@ -273,7 +266,7 @@ static inline int health_parse_db_lookup(size_t line, const char *filename, char while(*s && !isspace((uint8_t)*s)) s++; while(*s && isspace((uint8_t)*s)) *s++ = '\0'; - if(!config_parse_duration(key, &ac->after)) { + if(!duration_parse_seconds(key, &ac->after)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid duration '%s' after group method", line, filename, key); return 0; @@ -294,7 +287,7 @@ static inline int health_parse_db_lookup(size_t line, const char *filename, char while(*s && !isspace((uint8_t)*s)) s++; while(*s && isspace((uint8_t)*s)) *s++ = '\0'; - if (!config_parse_duration(value, &ac->before)) { + if (!duration_parse_seconds(value, &ac->before)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword", line, filename, value, key); } @@ -304,7 +297,7 @@ static inline int health_parse_db_lookup(size_t line, const char *filename, char while(*s && !isspace((uint8_t)*s)) s++; while(*s && isspace((uint8_t)*s)) *s++ = '\0'; - if (!config_parse_duration(value, &ac->update_every)) { + if (!duration_parse_seconds(value, &ac->update_every)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword", line, filename, value, key); } @@ -725,7 +718,7 @@ int health_readfile(const char *filename, void *data __maybe_unused, bool stock_ health_parse_db_lookup(line, filename, value, ac); } else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) { - if(!config_parse_duration(value, &ac->update_every)) + if(!duration_parse_seconds(value, &ac->update_every)) netdata_log_error( "Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' " "cannot parse duration: '%s'.", diff --git a/src/health/health_dyncfg.c b/src/health/health_dyncfg.c index f2b9bc607..48346f662 100644 --- a/src/health/health_dyncfg.c +++ b/src/health/health_dyncfg.c @@ -68,8 +68,8 @@ static bool parse_match(json_object *jobj, const char *path, struct rrd_alert_ma } static bool parse_config_value_database_lookup(json_object *jobj, const char *path, struct rrd_alert_config *config, BUFFER *error, bool strict) { - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "after", config->after, error, strict); - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "before", config->before, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "after", config->after, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "before", config->before, error, strict); JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, path, "time_group", time_grouping_txt2id, config->time_group, error, strict); JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, path, "dims_group", alerts_dims_grouping2id, config->dims_group, error, strict); JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, path, "data_source", alerts_data_sources2id, config->data_source, error, strict); @@ -98,7 +98,7 @@ static bool parse_config_value(json_object *jobj, const char *path, struct rrd_a JSONC_PARSE_SUBOBJECT(jobj, path, "database_lookup", config, parse_config_value_database_lookup, error, strict); JSONC_PARSE_TXT2EXPRESSION_OR_ERROR_AND_RETURN(jobj, path, "calculation", config->calculation, error, false); JSONC_PARSE_TXT2STRING_OR_ERROR_AND_RETURN(jobj, path, "units", config->units, error, false); - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "update_every", config->update_every, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "update_every", config->update_every, error, strict); return true; } @@ -109,17 +109,17 @@ static bool parse_config_conditions(json_object *jobj, const char *path, struct } static bool parse_config_action_delay(json_object *jobj, const char *path, struct rrd_alert_config *config, BUFFER *error, bool strict) { - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "up", config->delay_up_duration, error, strict); - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "down", config->delay_down_duration, error, strict); - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "max", config->delay_max_duration, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "up", config->delay_up_duration, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "down", config->delay_down_duration, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "max", config->delay_max_duration, error, strict); JSONC_PARSE_DOUBLE_OR_ERROR_AND_RETURN(jobj, path, "multiplier", config->delay_multiplier, error, strict); return true; } static bool parse_config_action_repeat(json_object *jobj, const char *path, struct rrd_alert_config *config, BUFFER *error, bool strict) { JSONC_PARSE_BOOL_OR_ERROR_AND_RETURN(jobj, path, "enabled", config->has_custom_repeat_config, error, strict); - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "warning", config->warn_repeat_every, error, strict); - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "critical", config->crit_repeat_every, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "warning", config->warn_repeat_every, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "critical", config->crit_repeat_every, error, strict); return true; } @@ -153,7 +153,7 @@ static bool parse_config(json_object *jobj, const char *path, RRD_ALERT_PROTOTYP static bool parse_prototype(json_object *jobj, const char *path, RRD_ALERT_PROTOTYPE *base, BUFFER *error, const char *name, bool strict) { int64_t version = 0; - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "format_version", version, error, strict); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, "format_version", version, error, strict); if(version != 1) { buffer_sprintf(error, "unsupported document version"); @@ -164,6 +164,11 @@ static bool parse_prototype(json_object *jobj, const char *path, RRD_ALERT_PROTO json_object *rules; if (json_object_object_get_ex(jobj, "rules", &rules)) { + if (json_object_get_type(rules) != json_type_array) { + buffer_sprintf(error, "member 'rules' is not an array"); + return false; + } + size_t rules_len = json_object_array_length(rules); RRD_ALERT_PROTOTYPE *ap = base; // fill the first entry @@ -270,7 +275,7 @@ static inline void health_prototype_rule_to_json_array_member(BUFFER *wb, RRD_AL buffer_json_member_add_object(wb, "config"); { if(!for_hashing) { - buffer_json_member_add_uuid(wb, "hash", &ap->config.hash_id); + buffer_json_member_add_uuid(wb, "hash", ap->config.hash_id); buffer_json_member_add_string(wb, "source_type", dyncfg_id2source_type(ap->config.source_type)); buffer_json_member_add_string(wb, "source", string2str(ap->config.source)); } diff --git a/src/health/health_event_loop.c b/src/health/health_event_loop.c index 04d70e11f..0bf6892dd 100644 --- a/src/health/health_event_loop.c +++ b/src/health/health_event_loop.c @@ -133,12 +133,12 @@ static void health_initialize_rrdhost(RRDHOST *host) { rrdhost_flag_set(host, RRDHOST_FLAG_INITIALIZED_HEALTH); host->health_log.max = health_globals.config.health_log_entries_max; - host->health_log.health_log_history = health_globals.config.health_log_history; + host->health_log.health_log_retention_s = health_globals.config.health_log_retention_s; host->health.health_default_exec = string_dup(health_globals.config.default_exec); host->health.health_default_recipient = string_dup(health_globals.config.default_recipient); host->health.use_summary_for_notifications = health_globals.config.use_summary_for_notifications; - host->health_log.next_log_id = (uint32_t)now_realtime_sec(); + host->health_log.next_log_id = get_uint32_id(); host->health_log.next_alarm_id = 0; rw_spinlock_init(&host->health_log.spinlock); @@ -229,7 +229,7 @@ static void health_event_loop(void) { "Postponing alarm checks for %"PRId32" seconds, " "because it seems that the system was just resumed from suspension.", (int32_t)health_globals.config.postpone_alarms_during_hibernation_for_seconds); - schedule_node_info_update(localhost); + schedule_node_state_update(localhost, 0); } if (unlikely(silencers->all_alarms && silencers->stype == STYPE_DISABLE_ALARMS)) { @@ -298,13 +298,11 @@ static void health_event_loop(void) { } worker_is_busy(WORKER_HEALTH_JOB_HOST_LOCK); -#ifdef ENABLE_ACLK - if (netdata_cloud_enabled) { + { struct aclk_sync_cfg_t *wc = host->aclk_config; if (wc && wc->send_snapshot == 2) continue; } -#endif // the first loop is to lookup values from the db foreach_rrdcalc_in_rrdhost_read(host, rc) { @@ -651,7 +649,6 @@ static void health_event_loop(void) { break; } } -#ifdef ENABLE_ACLK struct aclk_sync_cfg_t *wc = host->aclk_config; if (wc && wc->send_snapshot == 1) { wc->send_snapshot = 2; @@ -660,7 +657,6 @@ static void health_event_loop(void) { else if (process_alert_pending_queue(host)) rrdhost_flag_set(host, RRDHOST_FLAG_ACLK_STREAM_ALERTS); -#endif dfe_done(host); diff --git a/src/health/health_internals.h b/src/health/health_internals.h index 638a96195..a86e62956 100644 --- a/src/health/health_internals.h +++ b/src/health/health_internals.h @@ -9,7 +9,7 @@ #define HEALTH_LOG_ENTRIES_MAX 100000U #define HEALTH_LOG_ENTRIES_MIN 10U -#define HEALTH_LOG_HISTORY_DEFAULT (5 * 86400) +#define HEALTH_LOG_RETENTION_DEFAULT (5 * 86400) #define HEALTH_CONF_MAX_LINE 4096 @@ -76,7 +76,7 @@ struct health_plugin_globals { bool use_summary_for_notifications; unsigned int health_log_entries_max; - uint32_t health_log_history; // the health log history in seconds to be kept in db + uint32_t health_log_retention_s; // the health log retention in seconds to be kept in db STRING *silencers_filename; STRING *default_exec; diff --git a/src/health/health_notifications.c b/src/health/health_notifications.c index 85dd2d0d8..443c0246f 100644 --- a/src/health/health_notifications.c +++ b/src/health/health_notifications.c @@ -20,17 +20,27 @@ struct health_raised_summary { }; void health_alarm_wait_for_execution(ALARM_ENTRY *ae) { - if (!(ae->flags & HEALTH_ENTRY_FLAG_EXEC_IN_PROGRESS)) - return; + // this has to ALWAYS remove the given alarm entry from the queue - if(!ae->popen_instance) { - // nd_log(NDLS_DAEMON, NDLP_ERR, "attempted to wait for the execution of alert that has not spawn a notification"); - return; + int code = 0; + + if (!(ae->flags & HEALTH_ENTRY_FLAG_EXEC_IN_PROGRESS)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "attempted to wait for the execution of alert that has not an execution in progress"); + code = 128; + goto cleanup; } - ae->exec_code = spawn_popen_wait(ae->popen_instance); + if(!ae->popen_instance) { + nd_log(NDLS_DAEMON, NDLP_ERR, "attempted to wait for the execution of alert that has not spawn a notification"); + code = 128; + goto cleanup; + } + code = spawn_popen_wait(ae->popen_instance); netdata_log_debug(D_HEALTH, "done executing command - returned with code %d", ae->exec_code); + +cleanup: + ae->exec_code = code; ae->flags &= ~HEALTH_ENTRY_FLAG_EXEC_IN_PROGRESS; if(ae->exec_code != 0) @@ -466,13 +476,18 @@ void health_send_notification(RRDHOST *host, ALARM_ENTRY *ae, struct health_rais ae->exec_run_timestamp = now_realtime_sec(); /* will be updated by real time after spawning */ netdata_log_debug(D_HEALTH, "executing command '%s'", command_to_run); - ae->flags |= HEALTH_ENTRY_FLAG_EXEC_IN_PROGRESS; ae->popen_instance = spawn_popen_run(command_to_run); - enqueue_alarm_notify_in_progress(ae); + if(ae->popen_instance) { + ae->flags |= HEALTH_ENTRY_FLAG_EXEC_IN_PROGRESS; + enqueue_alarm_notify_in_progress(ae); + } + else + netdata_log_error("Failed to execute alarm notification"); + health_alarm_log_save(host, ae); - } else { - netdata_log_error("Failed to format command arguments"); } + else + netdata_log_error("Failed to format command arguments"); buffer_free(warn_alarms); buffer_free(crit_alarms); diff --git a/src/health/notifications/README.md b/src/health/notifications/README.md index 5a2b032a3..e930e261f 100644 --- a/src/health/notifications/README.md +++ b/src/health/notifications/README.md @@ -10,10 +10,10 @@ The default script is `alarm-notify.sh`. > > This file mentions editing configuration files. > -> - To edit configuration files in a safe way, we provide the [`edit config` script](/docs/netdata-agent/configuration/README.md#edit-netdataconf)located in your [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory) (typically is `/etc/netdata`) that creates the proper file and opens it in an editor automatically. +> - To edit configuration files in a safe way, we provide the [`edit config` script](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config)located in your [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory) (typically is `/etc/netdata`) that creates the proper file and opens it in an editor automatically. > Note that to run the script you need to be inside your Netdata config directory. > -> - Please also note that after most configuration changes you will need to [restart the Agent](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for the changes to take effect. +> - Please also note that after most configuration changes you will need to [restart the Agent](/docs/netdata-agent/start-stop-restart.md) for the changes to take effect. > > It is recommended to use this way for configuring Netdata. @@ -29,7 +29,7 @@ It uses **roles**. For example `sysadmin`, `webmaster`, `dba`, etc. Each alert is assigned to one or more roles, using the `to` line of the alert configuration. For example, here is the alert configuration for `ram.conf` that defaults to the role `sysadmin`: -```conf +```text alarm: ram_in_use on: system.ram class: Utilization @@ -52,7 +52,7 @@ Then `alarm-notify.sh` uses its own configuration file `health_alarm_notify.conf Here is an example, of the `sysadmin`'s role recipients for the email notification. You can send the notification to multiple recipients by separating the emails with a space. -```conf +```text ############################################################################### # RECIPIENTS PER ROLE @@ -84,7 +84,7 @@ You can edit `health_alarm_notify.conf` using the `edit-config` script to config - **Recipients** per role per notification method - ```conf + ```text role_recipients_email[sysadmin]="${DEFAULT_RECIPIENT_EMAIL}" role_recipients_pushover[sysadmin]="${DEFAULT_RECIPIENT_PUSHOVER}" role_recipients_pushbullet[sysadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}" @@ -132,7 +132,7 @@ When you define recipients per role for notification methods, you can append `|c In the following examples, the first recipient receives all the alerts, while the second one receives only notifications for alerts that have at some point become critical. The second user may still receive warning and clear notifications, but only for the event that previously caused a critical alert. -```conf +```text email : "user1@example.com user2@example.com|critical" pushover : "2987343...9437837 8756278...2362736|critical" telegram : "111827421 112746832|critical" @@ -158,7 +158,7 @@ This works for all notification methods (including the default recipients). If you need to send curl based notifications (pushover, pushbullet, slack, alerta, flock, discord, telegram) via a proxy, you should set these variables to your proxy address: -```conf +```text export http_proxy="http://10.0.0.1:3128/" export https_proxy="http://10.0.0.1:3128/" ``` @@ -173,7 +173,7 @@ If you have an Internet facing netdata (or you have copied the images/ folder of netdata to your web server), set its URL here, to fetch the notification images from it. -```conf +```text images_base_url="http://my.public.netdata.server:19999" ``` diff --git a/src/health/notifications/alarm-notify.sh.in b/src/health/notifications/alarm-notify.sh.in index c7c44cb11..d7baa7345 100755 --- a/src/health/notifications/alarm-notify.sh.in +++ b/src/health/notifications/alarm-notify.sh.in @@ -769,6 +769,9 @@ fi # check custom [ -z "${DEFAULT_RECIPIENT_CUSTOM}" ] && SEND_CUSTOM="NO" +# check ilert +[ -z "${ILERT_ALERT_SOURCE_URL}" ] && SEND_ILERT="NO" + # ----------------------------------------------------------------------------- # check the availability of targets @@ -798,7 +801,8 @@ check_supported_targets() { [ "${SEND_DYNATRACE}" = "YES" ] || [ "${SEND_OPSGENIE}" = "YES" ] || [ "${SEND_GOTIFY}" = "YES" ] || - [ "${SEND_NTFY}" = "YES" ]; then + [ "${SEND_NTFY}" = "YES" ] || + [ "${SEND_ILERT}" = "YES" ]; then # if we need curl, check for the curl command if [ -z "${curl}" ]; then curl="$(command -v curl 2>/dev/null)" @@ -828,6 +832,7 @@ check_supported_targets() { SEND_OPSGENIE="NO" SEND_GOTIFY="NO" SEND_NTFY="NO" + SEND_ILERT="NO" fi fi @@ -983,7 +988,8 @@ for method in "${SEND_EMAIL}" \ "${SEND_DYNATRACE}" \ "${SEND_OPSGENIE}" \ "${SEND_GOTIFY}" \ - "${SEND_NTFY}" ; do + "${SEND_NTFY}" \ + "${SEND_ILERT}" ; do if [ "${method}" == "YES" ]; then proceed=1 @@ -2431,6 +2437,50 @@ send_ntfy() { return 1 } +# ----------------------------------------------------------------------------- +# ilert sender + +send_ilert() { + local payload httpcode + [ "${SEND_ILERT}" != "YES" ] && return 1 + + if [ -z "${ILERT_ALERT_SOURCE_URL}" ] ; then + info "Can't send ilert notification, because ILERT_ALERT_SOURCE_URL is not defined" + return 1 + fi + + payload=$(cat </dev/null || cd /opt/netdata/etc/netdata @@ -71,7 +71,7 @@ You will need an API key to send messages from any source, if Alerta is configur The `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file: -```conf +```text role_recipients_alerta[sysadmin]="Systems" role_recipients_alerta[domainadmin]="Domains" role_recipients_alerta[dba]="Databases Systems" diff --git a/src/health/notifications/alerta/metadata.yaml b/src/health/notifications/alerta/metadata.yaml index f815032b9..363dd6e2b 100644 --- a/src/health/notifications/alerta/metadata.yaml +++ b/src/health/notifications/alerta/metadata.yaml @@ -58,7 +58,7 @@ detailed_description: | The `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_alerta[sysadmin]="Systems" role_recipients_alerta[domainadmin]="Domains" role_recipients_alerta[dba]="Databases Systems" diff --git a/src/health/notifications/awssns/README.md b/src/health/notifications/awssns/README.md index b5a4cc5f4..8bcaf045c 100644 --- a/src/health/notifications/awssns/README.md +++ b/src/health/notifications/awssns/README.md @@ -56,8 +56,8 @@ You can send notifications through Amazon SNS using Netdata's Agent alert notifi The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -124,7 +124,7 @@ All roles will default to this variable if left unconfigured. You can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file: -```conf +```text role_recipients_awssns[sysadmin]="arn:aws:sns:us-east-2:123456789012:Systems" role_recipients_awssns[domainadmin]="arn:aws:sns:us-east-2:123456789012:Domains" role_recipients_awssns[dba]="arn:aws:sns:us-east-2:123456789012:Databases" @@ -143,7 +143,7 @@ role_recipients_awssns[sitemgr]="arn:aws:sns:us-east-2:123456789012:Sites" An example working configuration would be: ```yaml -```conf +```text #------------------------------------------------------------------------------ # Amazon SNS notifications diff --git a/src/health/notifications/awssns/metadata.yaml b/src/health/notifications/awssns/metadata.yaml index 93389bad0..0eb704d4a 100644 --- a/src/health/notifications/awssns/metadata.yaml +++ b/src/health/notifications/awssns/metadata.yaml @@ -104,7 +104,7 @@ You can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_awssns[sysadmin]="arn:aws:sns:us-east-2:123456789012:Systems" role_recipients_awssns[domainadmin]="arn:aws:sns:us-east-2:123456789012:Domains" role_recipients_awssns[dba]="arn:aws:sns:us-east-2:123456789012:Databases" @@ -122,7 +122,7 @@ enabled: false description: 'An example working configuration would be:' config: | - ```conf + ```text #------------------------------------------------------------------------------ # Amazon SNS notifications diff --git a/src/health/notifications/custom/README.md b/src/health/notifications/custom/README.md index 785aec59d..ba20f1c9c 100644 --- a/src/health/notifications/custom/README.md +++ b/src/health/notifications/custom/README.md @@ -36,8 +36,8 @@ Netdata Agent's alert notification feature allows you to send custom notificatio The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/health/notifications/discord/README.md b/src/health/notifications/discord/README.md index 128e04a44..5d6cda8e4 100644 --- a/src/health/notifications/discord/README.md +++ b/src/health/notifications/discord/README.md @@ -38,8 +38,8 @@ Send notifications to Discord using Netdata's Agent alert notification feature, The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -61,7 +61,7 @@ The following options can be defined for this notification All roles will default to this variable if left unconfigured. You can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file: -```conf +```text role_recipients_discord[sysadmin]="systems" role_recipients_discord[domainadmin]="domains" role_recipients_discord[dba]="databases systems" diff --git a/src/health/notifications/discord/metadata.yaml b/src/health/notifications/discord/metadata.yaml index a46a8ec98..f62a1fc08 100644 --- a/src/health/notifications/discord/metadata.yaml +++ b/src/health/notifications/discord/metadata.yaml @@ -45,7 +45,7 @@ detailed_description: | All roles will default to this variable if left unconfigured. You can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_discord[sysadmin]="systems" role_recipients_discord[domainadmin]="domains" role_recipients_discord[dba]="databases systems" diff --git a/src/health/notifications/dynatrace/README.md b/src/health/notifications/dynatrace/README.md index 6785cdb82..7764f893c 100644 --- a/src/health/notifications/dynatrace/README.md +++ b/src/health/notifications/dynatrace/README.md @@ -41,8 +41,8 @@ You can send notifications to Dynatrace using Netdata's Agent alert notification The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/health/notifications/email/README.md b/src/health/notifications/email/README.md index 1e831d58e..781ab7d53 100644 --- a/src/health/notifications/email/README.md +++ b/src/health/notifications/email/README.md @@ -37,8 +37,8 @@ Send notifications via Email using Netdata's Agent alert notification feature, w The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -60,7 +60,7 @@ The following options can be defined for this notification All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file: -```conf +```text role_recipients_email[sysadmin]="systems@example.com" role_recipients_email[domainadmin]="domains@example.com" role_recipients_email[dba]="databases@example.com systems@example.com" diff --git a/src/health/notifications/email/metadata.yaml b/src/health/notifications/email/metadata.yaml index f0d4a62a9..cbef3ab27 100644 --- a/src/health/notifications/email/metadata.yaml +++ b/src/health/notifications/email/metadata.yaml @@ -44,7 +44,7 @@ detailed_description: | All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_email[sysadmin]="systems@example.com" role_recipients_email[domainadmin]="domains@example.com" role_recipients_email[dba]="databases@example.com systems@example.com" diff --git a/src/health/notifications/flock/README.md b/src/health/notifications/flock/README.md index 332ede832..5db467cd3 100644 --- a/src/health/notifications/flock/README.md +++ b/src/health/notifications/flock/README.md @@ -37,8 +37,8 @@ Send notifications to Flock using Netdata's Agent alert notification feature, wh The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -59,7 +59,7 @@ The following options can be defined for this notification ##### DEFAULT_RECIPIENT_FLOCK You can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file: -```conf +```text role_recipients_flock[sysadmin]="systems" role_recipients_flock[domainadmin]="domains" role_recipients_flock[dba]="databases systems" diff --git a/src/health/notifications/flock/metadata.yaml b/src/health/notifications/flock/metadata.yaml index 62e7f4995..619c0a0a6 100644 --- a/src/health/notifications/flock/metadata.yaml +++ b/src/health/notifications/flock/metadata.yaml @@ -43,7 +43,7 @@ required: true detailed_description: | You can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_flock[sysadmin]="systems" role_recipients_flock[domainadmin]="domains" role_recipients_flock[dba]="databases systems" diff --git a/src/health/notifications/gotify/README.md b/src/health/notifications/gotify/README.md index f0f8a7edb..7ddeda55b 100644 --- a/src/health/notifications/gotify/README.md +++ b/src/health/notifications/gotify/README.md @@ -38,8 +38,8 @@ You can send alerts to your Gotify instance using Netdata's Agent alert notifica The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/health/notifications/health_alarm_notify.conf b/src/health/notifications/health_alarm_notify.conf index 9dcec27ae..84d6d6225 100755 --- a/src/health/notifications/health_alarm_notify.conf +++ b/src/health/notifications/health_alarm_notify.conf @@ -844,6 +844,15 @@ NTFY_ACCESS_TOKEN="" # Multiple recipients can be given like this: "https://SERVER1/TOPIC1 https://SERVER2/TOPIC2 ..." DEFAULT_RECIPIENT_NTFY="" +#------------------------------------------------------------------------------ +# ilert global notification options +SEND_ILERT="YES" + +# Api key +ILERT_ALERT_SOURCE_URL="" + +DEFAULT_RECIPIENT_ILERT="" + #------------------------------------------------------------------------------ # custom notifications # @@ -984,6 +993,8 @@ custom_sender() { # role_recipients_ntfy[sysadmin]="${DEFAULT_RECIPIENT_NTFY}" +# role_recipients_ilert[sysadmin]="${DEFAULT_RECIPIENT_ILERT}" + # ----------------------------------------------------------------------------- # DNS related alarms @@ -1041,6 +1052,8 @@ custom_sender() { # role_recipients_ntfy[domainadmin]="${DEFAULT_RECIPIENT_NTFY}" +# role_recipients_ilert[domainadmin]="${DEFAULT_RECIPIENT_ILERT}" + # ----------------------------------------------------------------------------- # database servers alarms # mysql, redis, memcached, postgres, etc @@ -1099,6 +1112,8 @@ custom_sender() { # role_recipients_ntfy[dba]="${DEFAULT_RECIPIENT_NTFY}" +# role_recipients_ilert[dba]="databases ${DEFAULT_RECIPIENT_ILERT}" + # ----------------------------------------------------------------------------- # web servers alarms # apache, nginx, lighttpd, etc @@ -1157,6 +1172,8 @@ custom_sender() { # role_recipients_ntfy[webmaster]="${DEFAULT_RECIPIENT_NTFY}" +# role_recipients_ilert[webmaster]="${DEFAULT_RECIPIENT_ILERT}" + # ----------------------------------------------------------------------------- # proxy servers alarms # squid, etc @@ -1215,6 +1232,8 @@ custom_sender() { # role_recipients_ntfy[proxyadmin]="${DEFAULT_RECIPIENT_NTFY}" +# role_recipients_ilert[proxyadmin]="${DEFAULT_RECIPIENT_ILERT}" + # ----------------------------------------------------------------------------- # peripheral devices # UPS, photovoltaics, etc @@ -1270,3 +1289,5 @@ custom_sender() { # role_recipients_gotify[sitemgr]="${DEFAULT_RECIPIENT_GOTIFY}" # role_recipients_ntfy[sitemgr]="${DEFAULT_RECIPIENT_NTFY}" + +# role_recipients_ilert[sitemgr]="${DEFAULT_RECIPIENT_ILERT}" diff --git a/src/health/notifications/ilert/README.md b/src/health/notifications/ilert/README.md new file mode 100644 index 000000000..6d6541ddd --- /dev/null +++ b/src/health/notifications/ilert/README.md @@ -0,0 +1,96 @@ + + +# ilert + + + + + +ilert is an alerting and incident management tool. It helps teams reduce response times by enhancing monitoring and ticketing tools with reliable alerts, automatic escalations, on-call schedules, and features for incident response, communication, and status updates. +Sending notification to ilert via Netdata's Agent alert notification feature includes links, images and resolving of corresponding alerts. + + + + + +## Setup + +### Prerequisites + +#### + +- A Netdata alert source in ilert. You can create a [Netdata alert source](https://docs.ilert.com/inbound-integrations/netdata) in [ilert](https://www.ilert.com/). +- Access to the terminal where Netdata Agent is running + + + +### Configuration + +#### File + +The configuration file name for this integration is `health_alarm_notify.conf`. + + +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config health_alarm_notify.conf +``` +#### Options + +The following options can be defined for this notification + +
    Config Options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| SEND_ILERT | Set `SEND_ILERT` to YES | YES | yes | +| ILERT_ALERT_SOURCE_URL | Set `ILERT_ALERT_SOURCE_URL` to your Netdata alert source url in ilert. | | yes | + +
    + +#### Examples + +##### Basic Configuration + + + +```yaml +SEND_ILERT="YES" +ILERT_ALERT_SOURCE_URL="https://api.ilert.com/api/v1/events/netdata/{API-KEY}" + +``` + + +## Troubleshooting + +### Test Notification + +You can run the following command by hand, to test alerts configuration: + +```bash +# become user netdata +sudo su -s /bin/bash netdata + +# enable debugging info on the console +export NETDATA_ALARM_NOTIFY_DEBUG=1 + +# send test alarms to sysadmin +/usr/libexec/netdata/plugins.d/alarm-notify.sh test + +# send test alarms to any role +/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE" +``` + +Note that this will test _all_ alert mechanisms for the selected role. + + diff --git a/src/health/notifications/ilert/metadata.yaml b/src/health/notifications/ilert/metadata.yaml new file mode 100644 index 000000000..7e2454834 --- /dev/null +++ b/src/health/notifications/ilert/metadata.yaml @@ -0,0 +1,55 @@ +# yamllint disable rule:line-length +--- +- id: "notify-ilert" + meta: + name: "ilert" + link: "https://www.ilert.com/" + categories: + - notify.agent + icon_filename: "ilert.svg" + keywords: + - ilert + overview: + notification_description: | + ilert is an alerting and incident management tool. It helps teams reduce response times by enhancing monitoring and ticketing tools with reliable alerts, automatic escalations, on-call schedules, and features for incident response, communication, and status updates. + Sending notification to ilert via Netdata's Agent alert notification feature includes links, images and resolving of corresponding alerts. + notification_limitations: "" + setup: + prerequisites: + list: + - title: "" + description: | + - A Netdata alert source in ilert. You can create a [Netdata alert source](https://docs.ilert.com/inbound-integrations/netdata) in [ilert](https://www.ilert.com/). + - Access to the terminal where Netdata Agent is running + configuration: + file: + name: "health_alarm_notify.conf" + options: + description: "The following options can be defined for this notification" + folding: + title: "Config Options" + enabled: true + list: + - name: "SEND_ILERT" + default_value: "YES" + description: "Set `SEND_ILERT` to YES" + required: true + - name: "ILERT_ALERT_SOURCE_URL" + default_value: "" + description: "Set `ILERT_ALERT_SOURCE_URL` to your Netdata alert source url in ilert." + required: true + examples: + folding: + enabled: true + title: "" + list: + - name: "Basic Configuration" + folding: + enabled: false + description: "" + config: | + SEND_ILERT="YES" + ILERT_ALERT_SOURCE_URL="https://api.ilert.com/api/v1/events/netdata/{API-KEY}" + troubleshooting: + problems: + list: [] diff --git a/src/health/notifications/irc/README.md b/src/health/notifications/irc/README.md index 76d3f5bc2..5674fb39d 100644 --- a/src/health/notifications/irc/README.md +++ b/src/health/notifications/irc/README.md @@ -37,8 +37,8 @@ Send notifications to IRC using Netdata's Agent alert notification feature, whic The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -76,7 +76,7 @@ nc="/usr/bin/nc" ##### DEFAULT_RECIPIENT_IRC The `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file: -```conf +```text role_recipients_irc[sysadmin]="#systems" role_recipients_irc[domainadmin]="#domains" role_recipients_irc[dba]="#databases #systems" diff --git a/src/health/notifications/irc/metadata.yaml b/src/health/notifications/irc/metadata.yaml index aa2593f91..4a7585eef 100644 --- a/src/health/notifications/irc/metadata.yaml +++ b/src/health/notifications/irc/metadata.yaml @@ -69,7 +69,7 @@ required: true detailed_description: | The `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_irc[sysadmin]="#systems" role_recipients_irc[domainadmin]="#domains" role_recipients_irc[dba]="#databases #systems" diff --git a/src/health/notifications/kavenegar/README.md b/src/health/notifications/kavenegar/README.md index eedd43a23..ff4479d6d 100644 --- a/src/health/notifications/kavenegar/README.md +++ b/src/health/notifications/kavenegar/README.md @@ -38,8 +38,8 @@ You can send notifications to Kavenegar using Netdata's Agent alert notification The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -63,7 +63,7 @@ The following options can be defined for this notification All roles will default to this variable if lest unconfigured. You can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file: -```conf +```text role_recipients_kavenegar[sysadmin]="09100000000" role_recipients_kavenegar[domainadmin]="09111111111" role_recipients_kavenegar[dba]="0922222222" diff --git a/src/health/notifications/kavenegar/metadata.yaml b/src/health/notifications/kavenegar/metadata.yaml index 559dbac09..70c87b637 100644 --- a/src/health/notifications/kavenegar/metadata.yaml +++ b/src/health/notifications/kavenegar/metadata.yaml @@ -50,7 +50,7 @@ All roles will default to this variable if lest unconfigured. You can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_kavenegar[sysadmin]="09100000000" role_recipients_kavenegar[domainadmin]="09111111111" role_recipients_kavenegar[dba]="0922222222" diff --git a/src/health/notifications/matrix/README.md b/src/health/notifications/matrix/README.md index 3c01a9ef2..c0387d211 100644 --- a/src/health/notifications/matrix/README.md +++ b/src/health/notifications/matrix/README.md @@ -39,8 +39,8 @@ Send notifications to Matrix network rooms using Netdata's Agent alert notificat The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -77,7 +77,7 @@ All roles will default to this variable if left unconfigured. You can have different Rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file: -```conf +```text role_recipients_matrix[sysadmin]="!roomid1:homeservername" role_recipients_matrix[domainadmin]="!roomid2:homeservername" role_recipients_matrix[dba]="!roomid3:homeservername" diff --git a/src/health/notifications/matrix/metadata.yaml b/src/health/notifications/matrix/metadata.yaml index db7f92eb1..770e0905a 100644 --- a/src/health/notifications/matrix/metadata.yaml +++ b/src/health/notifications/matrix/metadata.yaml @@ -61,7 +61,7 @@ You can have different Rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_matrix[sysadmin]="!roomid1:homeservername" role_recipients_matrix[domainadmin]="!roomid2:homeservername" role_recipients_matrix[dba]="!roomid3:homeservername" diff --git a/src/health/notifications/messagebird/README.md b/src/health/notifications/messagebird/README.md index 4b668fce3..d961a3b4d 100644 --- a/src/health/notifications/messagebird/README.md +++ b/src/health/notifications/messagebird/README.md @@ -37,8 +37,8 @@ Send notifications to MessageBird using Netdata's Agent alert notification featu The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -62,7 +62,7 @@ The following options can be defined for this notification All roles will default to this variable if left unconfigured. You can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file: -```conf +```text role_recipients_messagebird[sysadmin]="+15555555555" role_recipients_messagebird[domainadmin]="+15555555556" role_recipients_messagebird[dba]="+15555555557" diff --git a/src/health/notifications/messagebird/metadata.yaml b/src/health/notifications/messagebird/metadata.yaml index a97cdc712..3ba65ecf3 100644 --- a/src/health/notifications/messagebird/metadata.yaml +++ b/src/health/notifications/messagebird/metadata.yaml @@ -49,7 +49,7 @@ All roles will default to this variable if left unconfigured. You can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_messagebird[sysadmin]="+15555555555" role_recipients_messagebird[domainadmin]="+15555555556" role_recipients_messagebird[dba]="+15555555557" diff --git a/src/health/notifications/msteams/README.md b/src/health/notifications/msteams/README.md index e24730777..91fe7a081 100644 --- a/src/health/notifications/msteams/README.md +++ b/src/health/notifications/msteams/README.md @@ -38,8 +38,8 @@ You can send Netdata alerts to Microsoft Teams using Netdata's Agent alert notif The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -64,7 +64,7 @@ In Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhoo All roles will default to this variable if left unconfigured. You can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file: -```conf +```text role_recipients_msteams[sysadmin]="CHANNEL1" role_recipients_msteams[domainadmin]="CHANNEL2" role_recipients_msteams[dba]="databases CHANNEL3" diff --git a/src/health/notifications/msteams/metadata.yaml b/src/health/notifications/msteams/metadata.yaml index 72de507a4..d37c08c0a 100644 --- a/src/health/notifications/msteams/metadata.yaml +++ b/src/health/notifications/msteams/metadata.yaml @@ -50,7 +50,7 @@ All roles will default to this variable if left unconfigured. You can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_msteams[sysadmin]="CHANNEL1" role_recipients_msteams[domainadmin]="CHANNEL2" role_recipients_msteams[dba]="databases CHANNEL3" diff --git a/src/health/notifications/ntfy/README.md b/src/health/notifications/ntfy/README.md index a03e30304..39613338b 100644 --- a/src/health/notifications/ntfy/README.md +++ b/src/health/notifications/ntfy/README.md @@ -39,8 +39,8 @@ You can send alerts to an ntfy server using Netdata's Agent alert notification f The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -67,7 +67,7 @@ You can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `http All roles will default to this variable if left unconfigured. You can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file: -```conf +```text role_recipients_ntfy[sysadmin]="https://SERVER1/TOPIC1" role_recipients_ntfy[domainadmin]="https://SERVER2/TOPIC2" role_recipients_ntfy[dba]="https://SERVER3/TOPIC3" diff --git a/src/health/notifications/ntfy/metadata.yaml b/src/health/notifications/ntfy/metadata.yaml index 0d6c0beac..90ce3f6cd 100644 --- a/src/health/notifications/ntfy/metadata.yaml +++ b/src/health/notifications/ntfy/metadata.yaml @@ -45,7 +45,7 @@ All roles will default to this variable if left unconfigured. You can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_ntfy[sysadmin]="https://SERVER1/TOPIC1" role_recipients_ntfy[domainadmin]="https://SERVER2/TOPIC2" role_recipients_ntfy[dba]="https://SERVER3/TOPIC3" diff --git a/src/health/notifications/opsgenie/README.md b/src/health/notifications/opsgenie/README.md index fa5859d7d..2a4dc1fd8 100644 --- a/src/health/notifications/opsgenie/README.md +++ b/src/health/notifications/opsgenie/README.md @@ -38,8 +38,8 @@ You can send notifications to Opsgenie using Netdata's Agent alert notification The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/health/notifications/pagerduty/README.md b/src/health/notifications/pagerduty/README.md index ae45e5385..d85dd46c9 100644 --- a/src/health/notifications/pagerduty/README.md +++ b/src/health/notifications/pagerduty/README.md @@ -40,8 +40,8 @@ You can send notifications to PagerDuty using Netdata's Agent alert notification The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -63,7 +63,7 @@ The following options can be defined for this notification All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file: -```conf +```text role_recipients_pd[sysadmin]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa" role_recipients_pd[domainadmin]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb" role_recipients_pd[dba]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc" diff --git a/src/health/notifications/pagerduty/metadata.yaml b/src/health/notifications/pagerduty/metadata.yaml index 6fc1d640e..3973825fc 100644 --- a/src/health/notifications/pagerduty/metadata.yaml +++ b/src/health/notifications/pagerduty/metadata.yaml @@ -44,7 +44,7 @@ All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_pd[sysadmin]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa" role_recipients_pd[domainadmin]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb" role_recipients_pd[dba]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc" diff --git a/src/health/notifications/prowl/README.md b/src/health/notifications/prowl/README.md index 0d206cee0..ba00b3212 100644 --- a/src/health/notifications/prowl/README.md +++ b/src/health/notifications/prowl/README.md @@ -43,8 +43,8 @@ Send notifications to Prowl using Netdata's Agent alert notification feature, wh The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -66,7 +66,7 @@ The following options can be defined for this notification All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file: -```conf +```text role_recipients_prowl[sysadmin]="AAAAAAAA" role_recipients_prowl[domainadmin]="BBBBBBBBB" role_recipients_prowl[dba]="CCCCCCCCC" diff --git a/src/health/notifications/prowl/metadata.yaml b/src/health/notifications/prowl/metadata.yaml index b3f0e0a1e..3142d155c 100644 --- a/src/health/notifications/prowl/metadata.yaml +++ b/src/health/notifications/prowl/metadata.yaml @@ -43,7 +43,7 @@ All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_prowl[sysadmin]="AAAAAAAA" role_recipients_prowl[domainadmin]="BBBBBBBBB" role_recipients_prowl[dba]="CCCCCCCCC" diff --git a/src/health/notifications/pushbullet/README.md b/src/health/notifications/pushbullet/README.md index 1b30f4c97..0f22b5d54 100644 --- a/src/health/notifications/pushbullet/README.md +++ b/src/health/notifications/pushbullet/README.md @@ -37,8 +37,8 @@ Send notifications to Pushbullet using Netdata's Agent alert notification featur The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -63,7 +63,7 @@ You can define multiple entries like this: user1@email.com user2@email.com. All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file: -```conf +```text role_recipients_pushbullet[sysadmin]="user1@email.com" role_recipients_pushbullet[domainadmin]="user2@mail.com" role_recipients_pushbullet[dba]="#channel1" diff --git a/src/health/notifications/pushbullet/metadata.yaml b/src/health/notifications/pushbullet/metadata.yaml index 430033cca..03ee41233 100644 --- a/src/health/notifications/pushbullet/metadata.yaml +++ b/src/health/notifications/pushbullet/metadata.yaml @@ -47,7 +47,7 @@ All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_pushbullet[sysadmin]="user1@email.com" role_recipients_pushbullet[domainadmin]="user2@mail.com" role_recipients_pushbullet[dba]="#channel1" diff --git a/src/health/notifications/pushover/README.md b/src/health/notifications/pushover/README.md index 9d30dfa97..8a296691b 100644 --- a/src/health/notifications/pushover/README.md +++ b/src/health/notifications/pushover/README.md @@ -41,8 +41,8 @@ Send notification to Pushover using Netdata's Agent alert notification feature, The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -65,7 +65,7 @@ The following options can be defined for this notification All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file: -```conf +```text role_recipients_pushover[sysadmin]="USERTOKEN1" role_recipients_pushover[domainadmin]="USERTOKEN2" role_recipients_pushover[dba]="USERTOKEN3 USERTOKEN4" diff --git a/src/health/notifications/pushover/metadata.yaml b/src/health/notifications/pushover/metadata.yaml index 9af729ea8..e45f909b3 100644 --- a/src/health/notifications/pushover/metadata.yaml +++ b/src/health/notifications/pushover/metadata.yaml @@ -49,7 +49,7 @@ All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_pushover[sysadmin]="USERTOKEN1" role_recipients_pushover[domainadmin]="USERTOKEN2" role_recipients_pushover[dba]="USERTOKEN3 USERTOKEN4" diff --git a/src/health/notifications/rocketchat/README.md b/src/health/notifications/rocketchat/README.md index b9b0d5687..f23032bd0 100644 --- a/src/health/notifications/rocketchat/README.md +++ b/src/health/notifications/rocketchat/README.md @@ -38,8 +38,8 @@ Send notifications to Rocket.Chat using Netdata's Agent alert notification featu The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -62,7 +62,7 @@ The following options can be defined for this notification All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file: -```conf +```text role_recipients_rocketchat[sysadmin]="systems" role_recipients_rocketchat[domainadmin]="domains" role_recipients_rocketchat[dba]="databases systems" diff --git a/src/health/notifications/rocketchat/metadata.yaml b/src/health/notifications/rocketchat/metadata.yaml index f644b93e1..17ee37acb 100644 --- a/src/health/notifications/rocketchat/metadata.yaml +++ b/src/health/notifications/rocketchat/metadata.yaml @@ -46,7 +46,7 @@ All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_rocketchat[sysadmin]="systems" role_recipients_rocketchat[domainadmin]="domains" role_recipients_rocketchat[dba]="databases systems" diff --git a/src/health/notifications/slack/README.md b/src/health/notifications/slack/README.md index 35cb75a18..10fc707c9 100644 --- a/src/health/notifications/slack/README.md +++ b/src/health/notifications/slack/README.md @@ -38,8 +38,8 @@ Send notifications to a Slack workspace using Netdata's Agent alert notification The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata diff --git a/src/health/notifications/smstools3/README.md b/src/health/notifications/smstools3/README.md index dafc0b7f4..ee8eb6c8d 100644 --- a/src/health/notifications/smstools3/README.md +++ b/src/health/notifications/smstools3/README.md @@ -42,8 +42,8 @@ The SMS Server Tools 3 is a SMS Gateway software which can send and receive shor The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -74,7 +74,7 @@ sendsms="/usr/bin/sendsms" All roles will default to this variable if left unconfigured. You can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file: -```conf +```text role_recipients_sms[sysadmin]="PHONE1" role_recipients_sms[domainadmin]="PHONE2" role_recipients_sms[dba]="PHONE3" diff --git a/src/health/notifications/smstools3/metadata.yaml b/src/health/notifications/smstools3/metadata.yaml index 3a29183a5..e23e41c41 100644 --- a/src/health/notifications/smstools3/metadata.yaml +++ b/src/health/notifications/smstools3/metadata.yaml @@ -57,7 +57,7 @@ All roles will default to this variable if left unconfigured. You can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_sms[sysadmin]="PHONE1" role_recipients_sms[domainadmin]="PHONE2" role_recipients_sms[dba]="PHONE3" diff --git a/src/health/notifications/syslog/README.md b/src/health/notifications/syslog/README.md index 72534b1c8..0428533f5 100644 --- a/src/health/notifications/syslog/README.md +++ b/src/health/notifications/syslog/README.md @@ -37,8 +37,8 @@ Send notifications to Syslog using Netdata's Agent alert notification feature, w The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -78,7 +78,7 @@ All roles will default to this variable if left unconfigured. You can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file: -```conf +```text role_recipients_syslog[sysadmin]="daemon.notice@loghost1:514/netdata" role_recipients_syslog[domainadmin]="daemon.notice@loghost2:514/netdata" role_recipients_syslog[dba]="daemon.notice@loghost3:514/netdata" diff --git a/src/health/notifications/syslog/metadata.yaml b/src/health/notifications/syslog/metadata.yaml index c5f241e76..2793b3ae2 100644 --- a/src/health/notifications/syslog/metadata.yaml +++ b/src/health/notifications/syslog/metadata.yaml @@ -59,7 +59,7 @@ detailed_description: | You can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_syslog[sysadmin]="daemon.notice@loghost1:514/netdata" role_recipients_syslog[domainadmin]="daemon.notice@loghost2:514/netdata" role_recipients_syslog[dba]="daemon.notice@loghost3:514/netdata" diff --git a/src/health/notifications/telegram/README.md b/src/health/notifications/telegram/README.md index 90cca4214..f44cfcb9f 100644 --- a/src/health/notifications/telegram/README.md +++ b/src/health/notifications/telegram/README.md @@ -38,8 +38,8 @@ Send notifications to Telegram using Netdata's Agent alert notification feature, The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -63,7 +63,7 @@ All roles will default to this variable if left unconfigured. The `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file: -```conf +```text role_recipients_telegram[sysadmin]="-49999333324" role_recipients_telegram[domainadmin]="-49999333389" role_recipients_telegram[dba]="-10099992222" diff --git a/src/health/notifications/telegram/metadata.yaml b/src/health/notifications/telegram/metadata.yaml index daa45da72..7fd2f05b5 100644 --- a/src/health/notifications/telegram/metadata.yaml +++ b/src/health/notifications/telegram/metadata.yaml @@ -47,7 +47,7 @@ The `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_telegram[sysadmin]="-49999333324" role_recipients_telegram[domainadmin]="-49999333389" role_recipients_telegram[dba]="-10099992222" diff --git a/src/health/notifications/twilio/README.md b/src/health/notifications/twilio/README.md index cd9b17e7f..18b9ffa2b 100644 --- a/src/health/notifications/twilio/README.md +++ b/src/health/notifications/twilio/README.md @@ -37,8 +37,8 @@ Send notifications to Twilio using Netdata's Agent alert notification feature, w The configuration file name for this integration is `health_alarm_notify.conf`. -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). +You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata @@ -62,7 +62,7 @@ The following options can be defined for this notification You can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient's number you want, in the following entries at the bottom of the same file: -```conf +```text role_recipients_twilio[sysadmin]="+15555555555" role_recipients_twilio[domainadmin]="+15555555556" role_recipients_twilio[dba]="+15555555557" diff --git a/src/health/notifications/twilio/metadata.yaml b/src/health/notifications/twilio/metadata.yaml index 35fc3f042..594936a1d 100644 --- a/src/health/notifications/twilio/metadata.yaml +++ b/src/health/notifications/twilio/metadata.yaml @@ -52,7 +52,7 @@ detailed_description: | You can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient's number you want, in the following entries at the bottom of the same file: - ```conf + ```text role_recipients_twilio[sysadmin]="+15555555555" role_recipients_twilio[domainadmin]="+15555555556" role_recipients_twilio[dba]="+15555555557" diff --git a/src/health/notifications/web/README.md b/src/health/notifications/web/README.md index d7115be3d..baa0bfaaa 100644 --- a/src/health/notifications/web/README.md +++ b/src/health/notifications/web/README.md @@ -1,13 +1,3 @@ - - # Browser pop up agent alert notifications The Netdata dashboard shows HTML notifications, when it is open. diff --git a/src/health/rrdcalc.c b/src/health/rrdcalc.c index bce709bf4..e5a26db07 100644 --- a/src/health/rrdcalc.c +++ b/src/health/rrdcalc.c @@ -80,7 +80,7 @@ uint32_t rrdcalc_get_unique_id(RRDHOST *host, STRING *chart, STRING *name, uint3 alarm_id = sql_get_alarm_id(host, chart, name, next_event_id); if (!alarm_id) { if (unlikely(!host->health_log.next_alarm_id)) - host->health_log.next_alarm_id = (uint32_t)now_realtime_sec(); + host->health_log.next_alarm_id = get_uint32_id(); alarm_id = host->health_log.next_alarm_id++; } } diff --git a/src/health/rrdvar.c b/src/health/rrdvar.c index 75cb9739b..5d6e3cf84 100644 --- a/src/health/rrdvar.c +++ b/src/health/rrdvar.c @@ -9,20 +9,6 @@ typedef struct rrdvar { // ---------------------------------------------------------------------------- // RRDVAR management -inline int rrdvar_fix_name(char *variable) { - int fixed = 0; - while(*variable) { - if (!isalnum((uint8_t)*variable) && *variable != '.' && *variable != '_') { - *variable++ = '_'; - fixed++; - } - else - variable++; - } - - return fixed; -} - inline STRING *rrdvar_name_to_string(const char *name) { char *variable = strdupz(name); rrdvar_fix_name(variable); diff --git a/src/health/rrdvar.h b/src/health/rrdvar.h index 3297984cb..f61b04b4a 100644 --- a/src/health/rrdvar.h +++ b/src/health/rrdvar.h @@ -7,8 +7,6 @@ #define RRDVAR_MAX_LENGTH 1024 -int rrdvar_fix_name(char *variable); - #include "database/rrd.h" STRING *rrdvar_name_to_string(const char *name); diff --git a/src/health/schema.d/health%3Aalert%3Aprototype.json b/src/health/schema.d/health%3Aalert%3Aprototype.json index 309d052de..9d0f1eafd 100644 --- a/src/health/schema.d/health%3Aalert%3Aprototype.json +++ b/src/health/schema.d/health%3Aalert%3Aprototype.json @@ -380,7 +380,7 @@ "classification": { "$ref": "#/definitions/configClassification" }, "value": { "$ref": "#/definitions/configValue" }, "conditions": { "$ref": "#/definitions/configConditions" }, - "actions": { "$ref": "#/definitions/configAction" } + "action": { "$ref": "#/definitions/configAction" } }, "required": [] }, diff --git a/src/libnetdata/README.md b/src/libnetdata/README.md index fd2c79730..c425c07a6 100644 --- a/src/libnetdata/README.md +++ b/src/libnetdata/README.md @@ -1,14 +1,3 @@ - - # libnetdata `libnetdata` is a collection of library code that is used by all Netdata `C` programs. - - diff --git a/src/libnetdata/adaptive_resortable_list/README.md b/src/libnetdata/adaptive_resortable_list/README.md index 9aa864c9e..e5c22c519 100644 --- a/src/libnetdata/adaptive_resortable_list/README.md +++ b/src/libnetdata/adaptive_resortable_list/README.md @@ -1,12 +1,3 @@ - - # Adaptive Re-sortable List (ARL) This library allows Netdata to read a series of `name - value` pairs diff --git a/src/libnetdata/aral/README.md b/src/libnetdata/aral/README.md index d999e820a..564f00f6d 100644 --- a/src/libnetdata/aral/README.md +++ b/src/libnetdata/aral/README.md @@ -1,12 +1,3 @@ - - # Array Allocator Come on! Array allocators are embedded in libc! Why do we need such a thing in Netdata? diff --git a/src/libnetdata/aral/aral.c b/src/libnetdata/aral/aral.c index 64b63d8e0..2ec27b3df 100644 --- a/src/libnetdata/aral/aral.c +++ b/src/libnetdata/aral/aral.c @@ -72,7 +72,7 @@ struct aral { struct { bool enabled; const char *filename; - char **cache_dir; + const char **cache_dir; } mmap; } config; @@ -117,7 +117,7 @@ size_t aral_structures(ARAL *ar) { return aral_structures_from_stats(ar->stats); } -struct aral_statistics *aral_statistics(ARAL *ar) { +struct aral_statistics *aral_get_statistics(ARAL *ar) { return ar->stats; } @@ -709,7 +709,7 @@ size_t aral_element_size(ARAL *ar) { } ARAL *aral_create(const char *name, size_t element_size, size_t initial_page_elements, size_t max_page_size, - struct aral_statistics *stats, const char *filename, char **cache_dir, bool mmap, bool lockless) { + struct aral_statistics *stats, const char *filename, const char **cache_dir, bool mmap, bool lockless) { ARAL *ar = callocz(1, sizeof(ARAL)); ar->config.options = (lockless) ? ARAL_LOCKLESS : 0; ar->config.requested_element_size = element_size; @@ -799,6 +799,7 @@ ARAL *aral_create(const char *name, size_t element_size, size_t initial_page_ele aral_delete_leftover_files(ar->config.name, directory_name, file); } + errno_clear(); internal_error(true, "ARAL: '%s' " "element size %zu (requested %zu bytes), " @@ -1078,7 +1079,7 @@ int aral_stress_test(size_t threads, size_t elements, size_t seconds) { } int aral_unittest(size_t elements) { - char *cache_dir = "/tmp/"; + const char *cache_dir = "/tmp/"; struct aral_unittest_config auc = { .single_threaded = true, diff --git a/src/libnetdata/aral/aral.h b/src/libnetdata/aral/aral.h index 2e749bc4c..4cd21d17a 100644 --- a/src/libnetdata/aral/aral.h +++ b/src/libnetdata/aral/aral.h @@ -28,11 +28,11 @@ struct aral_statistics { }; ARAL *aral_create(const char *name, size_t element_size, size_t initial_page_elements, size_t max_page_size, - struct aral_statistics *stats, const char *filename, char **cache_dir, bool mmap, bool lockless); + struct aral_statistics *stats, const char *filename, const char **cache_dir, bool mmap, bool lockless); size_t aral_element_size(ARAL *ar); size_t aral_overhead(ARAL *ar); size_t aral_structures(ARAL *ar); -struct aral_statistics *aral_statistics(ARAL *ar); +struct aral_statistics *aral_get_statistics(ARAL *ar); size_t aral_structures_from_stats(struct aral_statistics *stats); size_t aral_overhead_from_stats(struct aral_statistics *stats); diff --git a/src/libnetdata/avl/README.md b/src/libnetdata/avl/README.md index eb85f884e..0b4a39b43 100644 --- a/src/libnetdata/avl/README.md +++ b/src/libnetdata/avl/README.md @@ -1,12 +1,3 @@ - - # AVL AVL is a library indexing objects in B-Trees. diff --git a/src/libnetdata/bitmap/bitmap64.h b/src/libnetdata/bitmap/bitmap64.h new file mode 100644 index 000000000..425f3fd20 --- /dev/null +++ b/src/libnetdata/bitmap/bitmap64.h @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_BITMAP64_H +#define NETDATA_BITMAP64_H + +#include +#include +#include + +typedef uint64_t bitmap64_t; + +#define BITMAP64_INITIALIZER 0 + +static inline void bitmap64_set(bitmap64_t *bitmap, int position) +{ + assert(position >= 0 && position < 64); + + *bitmap |= (1ULL << position); +} + +static inline void bitmap64_clear(bitmap64_t *bitmap, int position) +{ + assert(position >= 0 && position < 64); + + *bitmap &= ~(1ULL << position); +} + +static inline bool bitmap64_get(const bitmap64_t *bitmap, int position) +{ + assert(position >= 0 && position < 64); + + return (*bitmap & (1ULL << position)); +} + +#endif // NETDATA_BITMAP64_H diff --git a/src/libnetdata/bitmap64.h b/src/libnetdata/bitmap64.h deleted file mode 100644 index 425f3fd20..000000000 --- a/src/libnetdata/bitmap64.h +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_BITMAP64_H -#define NETDATA_BITMAP64_H - -#include -#include -#include - -typedef uint64_t bitmap64_t; - -#define BITMAP64_INITIALIZER 0 - -static inline void bitmap64_set(bitmap64_t *bitmap, int position) -{ - assert(position >= 0 && position < 64); - - *bitmap |= (1ULL << position); -} - -static inline void bitmap64_clear(bitmap64_t *bitmap, int position) -{ - assert(position >= 0 && position < 64); - - *bitmap &= ~(1ULL << position); -} - -static inline bool bitmap64_get(const bitmap64_t *bitmap, int position) -{ - assert(position >= 0 && position < 64); - - return (*bitmap & (1ULL << position)); -} - -#endif // NETDATA_BITMAP64_H diff --git a/src/libnetdata/buffer/README.md b/src/libnetdata/buffer/README.md index a7850df72..460c07753 100644 --- a/src/libnetdata/buffer/README.md +++ b/src/libnetdata/buffer/README.md @@ -1,12 +1,3 @@ - - # BUFFER `BUFFER` is a convenience library for working with strings in `C`. diff --git a/src/libnetdata/buffer/buffer.c b/src/libnetdata/buffer/buffer.c index 119216dd9..7194134b4 100644 --- a/src/libnetdata/buffer/buffer.c +++ b/src/libnetdata/buffer/buffer.c @@ -247,7 +247,7 @@ void buffer_free(BUFFER *b) { buffer_overflow_check(b); - netdata_log_debug(D_WEB_BUFFER, "Freeing web buffer of size %zu.", b->size); + netdata_log_debug(D_WEB_BUFFER, "Freeing web buffer of size %zu.", (size_t)b->size); if(b->statistics) __atomic_sub_fetch(b->statistics, b->size + sizeof(BUFFER) + sizeof(BUFFER_OVERFLOW_EOF) + 2, __ATOMIC_RELAXED); @@ -269,7 +269,7 @@ void buffer_increase(BUFFER *b, size_t free_size_required) { size_t optimal = (b->size > 5*1024*1024) ? b->size / 2 : b->size; if(optimal > increase) increase = optimal; - netdata_log_debug(D_WEB_BUFFER, "Increasing data buffer from size %zu to %zu.", b->size, b->size + increase); + netdata_log_debug(D_WEB_BUFFER, "Increasing data buffer from size %zu to %zu.", (size_t)b->size, (size_t)(b->size + increase)); b->buffer = reallocz(b->buffer, b->size + increase + sizeof(BUFFER_OVERFLOW_EOF) + 2); b->size += increase; diff --git a/src/libnetdata/buffer/buffer.h b/src/libnetdata/buffer/buffer.h index 92e14afb1..78ee49d54 100644 --- a/src/libnetdata/buffer/buffer.h +++ b/src/libnetdata/buffer/buffer.h @@ -3,6 +3,8 @@ #ifndef NETDATA_WEB_BUFFER_H #define NETDATA_WEB_BUFFER_H 1 +#include "../uuid/uuid.h" +#include "../http/content_type.h" #include "../string/utf8.h" #include "../libnetdata.h" @@ -39,14 +41,15 @@ typedef enum __attribute__ ((__packed__)) { } BUFFER_JSON_OPTIONS; typedef struct web_buffer { - size_t size; // allocation size of buffer, in bytes - size_t len; // current data length in buffer, in bytes - char *buffer; // the buffer itself + uint32_t size; // allocation size of buffer, in bytes + uint32_t len; // current data length in buffer, in bytes HTTP_CONTENT_TYPE content_type; // the content type of the data in the buffer BUFFER_OPTIONS options; // options related to the content + uint16_t response_code; time_t date; // the timestamp this content has been generated time_t expires; // the timestamp this content expires size_t *statistics; + char *buffer; // the buffer itself struct { char key_quote[BUFFER_QUOTE_MAX_SIZE + 1]; @@ -62,7 +65,7 @@ typedef struct web_buffer { #define buffer_cacheable(wb) do { (wb)->options |= WB_CONTENT_CACHEABLE; if((wb)->options & WB_CONTENT_NO_CACHEABLE) (wb)->options &= ~WB_CONTENT_NO_CACHEABLE; } while(0) #define buffer_no_cacheable(wb) do { (wb)->options |= WB_CONTENT_NO_CACHEABLE; if((wb)->options & WB_CONTENT_CACHEABLE) (wb)->options &= ~WB_CONTENT_CACHEABLE; (wb)->expires = 0; } while(0) -#define buffer_strlen(wb) ((wb)->len) +#define buffer_strlen(wb) (size_t)((wb)->len) #define BUFFER_OVERFLOW_EOF "EOF" @@ -152,13 +155,10 @@ static inline void _buffer_json_depth_pop(BUFFER *wb) { wb->json.depth--; } -static inline void buffer_fast_charcat(BUFFER *wb, const char c) { - +static inline void buffer_putc(BUFFER *wb, char c) { buffer_need_bytes(wb, 2); - *(&wb->buffer[wb->len]) = c; - wb->len += 1; + wb->buffer[wb->len++] = c; wb->buffer[wb->len] = '\0'; - buffer_overflow_check(wb); } @@ -181,13 +181,6 @@ static inline void buffer_fast_rawcat(BUFFER *wb, const char *txt, size_t len) { buffer_overflow_check(wb); } -static inline void buffer_putc(BUFFER *wb, char c) { - buffer_need_bytes(wb, 2); - wb->buffer[wb->len++] = c; - wb->buffer[wb->len] = '\0'; - buffer_overflow_check(wb); -} - static inline void buffer_fast_strcat(BUFFER *wb, const char *txt, size_t len) { if(unlikely(!txt || !*txt || !len)) return; @@ -423,6 +416,16 @@ static inline char *print_uint64_hex_reversed(char *dst, uint64_t value) { #endif } +static inline char *print_uint64_hex_reversed_full(char *dst, uint64_t value) { + char *d = dst; + for(size_t c = 0; c < sizeof(uint64_t) * 2; c++) { + *d++ = hex_digits[value & 0xf]; + value >>= 4; + } + + return d; +} + static inline char *print_uint64_base64_reversed(char *dst, uint64_t value) { char *d = dst; do *d++ = base64_digits[value & 63]; while ((value >>= 6)); @@ -498,47 +501,79 @@ static inline int print_netdata_double(char *dst, NETDATA_DOUBLE value) { return (int)(d - dst); } -static inline void buffer_print_uint64(BUFFER *wb, uint64_t value) { - buffer_need_bytes(wb, 50); - - char *s = &wb->buffer[wb->len]; +static inline size_t print_uint64(char *dst, uint64_t value) { + char *s = dst; char *d = print_uint64_reversed(s, value); char_array_reverse(s, d - 1); *d = '\0'; - wb->len += d - s; - - buffer_overflow_check(wb); + return d - s; } -static inline void buffer_print_int64(BUFFER *wb, int64_t value) { - buffer_need_bytes(wb, 50); +static inline size_t print_int64(char *dst, int64_t value) { + size_t len = 0; if(value < 0) { - buffer_fast_strcat(wb, "-", 1); + *dst++ = '-'; value = -value; + len++; } - buffer_print_uint64(wb, (uint64_t)value); + return print_uint64(dst, value) + len; +} +#define UINT64_MAX_LENGTH (24) // 21 should be enough +static inline void buffer_print_uint64(BUFFER *wb, uint64_t value) { + buffer_need_bytes(wb, UINT64_MAX_LENGTH); + wb->len += print_uint64(&wb->buffer[wb->len], value); buffer_overflow_check(wb); } -static inline void buffer_print_uint64_hex(BUFFER *wb, uint64_t value) { - buffer_need_bytes(wb, sizeof(uint64_t) * 2 + 2 + 1); +static inline void buffer_print_int64(BUFFER *wb, int64_t value) { + buffer_need_bytes(wb, UINT64_MAX_LENGTH); + wb->len += print_int64(&wb->buffer[wb->len], value); + buffer_overflow_check(wb); +} - buffer_fast_strcat(wb, HEX_PREFIX, sizeof(HEX_PREFIX) - 1); +#define UINT64_HEX_MAX_LENGTH ((sizeof(HEX_PREFIX) - 1) + (sizeof(uint64_t) * 2) + 1) +static inline size_t print_uint64_hex(char *dst, uint64_t value) { + char *d = dst; - char *s = &wb->buffer[wb->len]; - char *d = print_uint64_hex_reversed(s, value); - char_array_reverse(s, d - 1); - *d = '\0'; - wb->len += d - s; + const char *s = HEX_PREFIX; + while(*s) *d++ = *s++; + + char *e = print_uint64_hex_reversed(d, value); + char_array_reverse(d, e - 1); + *e = '\0'; + return e - dst; +} + +static inline size_t print_uint64_hex_full(char *dst, uint64_t value) { + char *d = dst; + + const char *s = HEX_PREFIX; + while(*s) *d++ = *s++; + + char *e = print_uint64_hex_reversed_full(d, value); + char_array_reverse(d, e - 1); + *e = '\0'; + return e - dst; +} + +static inline void buffer_print_uint64_hex(BUFFER *wb, uint64_t value) { + buffer_need_bytes(wb, UINT64_HEX_MAX_LENGTH); + wb->len += print_uint64_hex(&wb->buffer[wb->len], value); + buffer_overflow_check(wb); +} +static inline void buffer_print_uint64_hex_full(BUFFER *wb, uint64_t value) { + buffer_need_bytes(wb, UINT64_HEX_MAX_LENGTH); + wb->len += print_uint64_hex_full(&wb->buffer[wb->len], value); buffer_overflow_check(wb); } +#define UINT64_B64_MAX_LENGTH ((sizeof(IEEE754_UINT64_B64_PREFIX) - 1) + (sizeof(uint64_t) * 2) + 1) static inline void buffer_print_uint64_base64(BUFFER *wb, uint64_t value) { - buffer_need_bytes(wb, sizeof(uint64_t) * 2 + 2 + 1); + buffer_need_bytes(wb, UINT64_B64_MAX_LENGTH); buffer_fast_strcat(wb, IEEE754_UINT64_B64_PREFIX, sizeof(IEEE754_UINT64_B64_PREFIX) - 1); @@ -577,8 +612,9 @@ static inline void buffer_print_int64_base64(BUFFER *wb, int64_t value) { buffer_overflow_check(wb); } +#define DOUBLE_MAX_LENGTH (512) // 318 should be enough, including null static inline void buffer_print_netdata_double(BUFFER *wb, NETDATA_DOUBLE value) { - buffer_need_bytes(wb, 512 + 2); + buffer_need_bytes(wb, DOUBLE_MAX_LENGTH); if(isnan(value) || isinf(value)) { buffer_fast_strcat(wb, "null", 4); @@ -594,8 +630,9 @@ static inline void buffer_print_netdata_double(BUFFER *wb, NETDATA_DOUBLE value) buffer_overflow_check(wb); } +#define DOUBLE_HEX_MAX_LENGTH ((sizeof(IEEE754_DOUBLE_HEX_PREFIX) - 1) + (sizeof(uint64_t) * 2) + 1) static inline void buffer_print_netdata_double_hex(BUFFER *wb, NETDATA_DOUBLE value) { - buffer_need_bytes(wb, sizeof(uint64_t) * 2 + 2 + 1 + 1); + buffer_need_bytes(wb, DOUBLE_HEX_MAX_LENGTH); uint64_t *ptr = (uint64_t *) (&value); buffer_fast_strcat(wb, IEEE754_DOUBLE_HEX_PREFIX, sizeof(IEEE754_DOUBLE_HEX_PREFIX) - 1); @@ -609,8 +646,9 @@ static inline void buffer_print_netdata_double_hex(BUFFER *wb, NETDATA_DOUBLE va buffer_overflow_check(wb); } +#define DOUBLE_B64_MAX_LENGTH ((sizeof(IEEE754_DOUBLE_B64_PREFIX) - 1) + (sizeof(uint64_t) * 2) + 1) static inline void buffer_print_netdata_double_base64(BUFFER *wb, NETDATA_DOUBLE value) { - buffer_need_bytes(wb, sizeof(uint64_t) * 2 + 2 + 1 + 1); + buffer_need_bytes(wb, DOUBLE_B64_MAX_LENGTH); uint64_t *ptr = (uint64_t *) (&value); buffer_fast_strcat(wb, IEEE754_DOUBLE_B64_PREFIX, sizeof(IEEE754_DOUBLE_B64_PREFIX) - 1); @@ -775,7 +813,7 @@ static inline void buffer_json_member_add_quoted_string(BUFFER *wb, const char * wb->json.stack[wb->json.depth].count++; } -static inline void buffer_json_member_add_uuid(BUFFER *wb, const char *key, nd_uuid_t *value) { +static inline void buffer_json_member_add_uuid_ptr(BUFFER *wb, const char *key, nd_uuid_t *value) { buffer_print_json_comma_newline_spacing(wb); buffer_print_json_key(wb, key); buffer_fast_strcat(wb, ":", 1); @@ -791,6 +829,22 @@ static inline void buffer_json_member_add_uuid(BUFFER *wb, const char *key, nd_u wb->json.stack[wb->json.depth].count++; } +static inline void buffer_json_member_add_uuid(BUFFER *wb, const char *key, nd_uuid_t value) { + buffer_print_json_comma_newline_spacing(wb); + buffer_print_json_key(wb, key); + buffer_fast_strcat(wb, ":", 1); + + if(!uuid_is_null(value)) { + char uuid[GUID_LEN + 1]; + uuid_unparse_lower(value, uuid); + buffer_json_add_string_value(wb, uuid); + } + else + buffer_json_add_string_value(wb, NULL); + + wb->json.stack[wb->json.depth].count++; +} + static inline void buffer_json_member_add_boolean(BUFFER *wb, const char *key, bool value) { buffer_print_json_comma_newline_spacing(wb); buffer_print_json_key(wb, key); @@ -1066,6 +1120,7 @@ typedef enum __attribute__((packed)) { RRDF_FIELD_TRANSFORM_DURATION_S, // transform as duration in second to a human-readable duration RRDF_FIELD_TRANSFORM_DATETIME_MS, // UNIX epoch timestamp in ms RRDF_FIELD_TRANSFORM_DATETIME_USEC, // UNIX epoch timestamp in usec + RRDF_FIELD_TRANSFORM_XML, // format the field with an XML prettifier } RRDF_FIELD_TRANSFORM; static inline const char *rrdf_field_transform_to_string(RRDF_FIELD_TRANSFORM transform) { @@ -1085,6 +1140,9 @@ static inline const char *rrdf_field_transform_to_string(RRDF_FIELD_TRANSFORM tr case RRDF_FIELD_TRANSFORM_DATETIME_USEC: return "datetime_usec"; + + case RRDF_FIELD_TRANSFORM_XML: + return "xml"; } } diff --git a/src/libnetdata/buffered_reader/buffered_reader.h b/src/libnetdata/buffered_reader/buffered_reader.h index 1ec1d762b..505070b1c 100644 --- a/src/libnetdata/buffered_reader/buffered_reader.h +++ b/src/libnetdata/buffered_reader/buffered_reader.h @@ -55,9 +55,7 @@ static inline buffered_reader_ret_t buffered_reader_read(struct buffered_reader static inline buffered_reader_ret_t buffered_reader_read_timeout(struct buffered_reader *reader, int fd, int timeout_ms, bool log_error) { short int revents = 0; switch(wait_on_socket_or_cancel_with_timeout( -#ifdef ENABLE_HTTPS NULL, -#endif fd, timeout_ms, POLLIN, &revents)) { case 0: // data are waiting diff --git a/src/libnetdata/c_rhash/c_rhash.c b/src/libnetdata/c_rhash/c_rhash.c new file mode 100644 index 000000000..ec2c061a2 --- /dev/null +++ b/src/libnetdata/c_rhash/c_rhash.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" +#include "c_rhash_internal.h" + +c_rhash c_rhash_new(size_t bin_count) { + if (!bin_count) + bin_count = 1000; + + c_rhash hash = callocz(1, sizeof(struct c_rhash_s) + (bin_count * sizeof(struct bin_ll*)) ); + hash->bin_count = bin_count; + hash->bins = (c_rhash_bin *)((char*)hash + sizeof(struct c_rhash_s)); + + return hash; +} + +static size_t get_itemtype_len(uint8_t item_type, const void* item_data) { + switch (item_type) { + case ITEMTYPE_STRING: + return strlen(item_data) + 1; + case ITEMTYPE_UINT64: + return sizeof(uint64_t); + case ITEMTYPE_UINT8: + return 1; + case ITEMTYPE_OPAQUE_PTR: + return sizeof(void*); + default: + return 0; + } +} + +static int compare_bin_item(struct bin_item *item, uint8_t key_type, const void *key) { + if (item->key_type != key_type) + return 1; + + size_t key_value_len = get_itemtype_len(key_type, key); + + if(key_type == ITEMTYPE_STRING) { + size_t new_key_value_len = get_itemtype_len(item->key_type, item->key); + if (new_key_value_len != key_value_len) + return 1; + } + + if(memcmp(item->key, key, key_value_len) == 0) { + return 0; + } + + return 1; +} + +static int insert_into_bin(c_rhash_bin *bin, uint8_t key_type, const void *key, uint8_t value_type, const void *value) { + struct bin_item *prev = NULL; + while (*bin != NULL) { + if (!compare_bin_item(*bin, key_type, key)) { + freez((*bin)->value); + (*bin)->value_type = value_type; + (*bin)->value = mallocz(get_itemtype_len(value_type, value)); + memcpy((*bin)->value, value, get_itemtype_len(value_type, value)); + return 0; + } + prev = *bin; + bin = &(*bin)->next; + } + + if (*bin == NULL) + *bin = callocz(1, sizeof(struct bin_item)); + if (prev != NULL) + prev->next = *bin; + + (*bin)->key_type = key_type; + size_t len = get_itemtype_len(key_type, key); + (*bin)->key = mallocz(len); + memcpy((*bin)->key, key, len); + + (*bin)->value_type = value_type; + len = get_itemtype_len(value_type, value); + (*bin)->value = mallocz(len); + memcpy((*bin)->value, value, len); + return 0; +} + +static inline uint32_t get_bin_idx_str(c_rhash hash, const char *key) { + uint32_t nhash = simple_hash(key); + return nhash % hash->bin_count; +} + +static inline c_rhash_bin *get_binptr_by_str(c_rhash hash, const char *key) { + return &hash->bins[get_bin_idx_str(hash, key)]; +} + +int c_rhash_insert_str_ptr(c_rhash hash, const char *key, void *value) { + c_rhash_bin *bin = get_binptr_by_str(hash, key); + + return insert_into_bin(bin, ITEMTYPE_STRING, key, ITEMTYPE_OPAQUE_PTR, &value); +} + +int c_rhash_insert_str_uint8(c_rhash hash, const char *key, uint8_t value) { + c_rhash_bin *bin = get_binptr_by_str(hash, key); + + return insert_into_bin(bin, ITEMTYPE_STRING, key, ITEMTYPE_UINT8, &value); +} + +int c_rhash_insert_uint64_ptr(c_rhash hash, uint64_t key, void *value) { + c_rhash_bin *bin = &hash->bins[key % hash->bin_count]; + + return insert_into_bin(bin, ITEMTYPE_UINT64, &key, ITEMTYPE_OPAQUE_PTR, &value); +} + +int c_rhash_get_uint8_by_str(c_rhash hash, const char *key, uint8_t *ret_val) { + uint32_t nhash = get_bin_idx_str(hash, key); + + struct bin_item *bin = hash->bins[nhash]; + + while (bin) { + if (bin->key_type == ITEMTYPE_STRING) { + if (!strcmp(bin->key, key)) { + *ret_val = *(uint8_t*)bin->value; + return 0; + } + } + bin = bin->next; + } + return 1; +} + +int c_rhash_get_ptr_by_str(c_rhash hash, const char *key, void **ret_val) { + uint32_t nhash = get_bin_idx_str(hash, key); + + struct bin_item *bin = hash->bins[nhash]; + + while (bin) { + if (bin->key_type == ITEMTYPE_STRING) { + if (!strcmp(bin->key, key)) { + *ret_val = *((void**)bin->value); + return 0; + } + } + bin = bin->next; + } + *ret_val = NULL; + return 1; +} + +int c_rhash_get_ptr_by_uint64(c_rhash hash, uint64_t key, void **ret_val) { + uint32_t nhash = key % hash->bin_count; + + struct bin_item *bin = hash->bins[nhash]; + + while (bin) { + if (bin->key_type == ITEMTYPE_UINT64) { + if (*((uint64_t *)bin->key) == key) { + *ret_val = *((void**)bin->value); + return 0; + } + } + bin = bin->next; + } + *ret_val = NULL; + return 1; +} + +static void c_rhash_destroy_bin(c_rhash_bin bin) { + struct bin_item *next; + do { + next = bin->next; + freez(bin->key); + freez(bin->value); + freez(bin); + bin = next; + } while (bin != NULL); +} + +int c_rhash_iter_uint64_keys(c_rhash hash, c_rhash_iter_t *iter, uint64_t *key) { + while (iter->bin < hash->bin_count) { + if (iter->item != NULL) + iter->item = iter->item->next; + if (iter->item == NULL) { + if (iter->initialized) + iter->bin++; + else + iter->initialized = 1; + if (iter->bin < hash->bin_count) + iter->item = hash->bins[iter->bin]; + } + if (iter->item != NULL && iter->item->key_type == ITEMTYPE_UINT64) { + *key = *(uint64_t*)iter->item->key; + return 0; + } + } + return 1; +} + +int c_rhash_iter_str_keys(c_rhash hash, c_rhash_iter_t *iter, const char **key) { + while (iter->bin < hash->bin_count) { + if (iter->item != NULL) + iter->item = iter->item->next; + if (iter->item == NULL) { + if (iter->initialized) + iter->bin++; + else + iter->initialized = 1; + if (iter->bin < hash->bin_count) + iter->item = hash->bins[iter->bin]; + } + if (iter->item != NULL && iter->item->key_type == ITEMTYPE_STRING) { + *key = (const char*)iter->item->key; + return 0; + } + } + return 1; +} + +void c_rhash_destroy(c_rhash hash) { + for (size_t i = 0; i < hash->bin_count; i++) { + if (hash->bins[i] != NULL) + c_rhash_destroy_bin(hash->bins[i]); + } + freez(hash); +} diff --git a/src/libnetdata/c_rhash/c_rhash.h b/src/libnetdata/c_rhash/c_rhash.h new file mode 100644 index 000000000..990ef5432 --- /dev/null +++ b/src/libnetdata/c_rhash/c_rhash.h @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef C_RHASH_H +#define C_RHASH_H +#include "../libnetdata.h" + +#ifndef DEFAULT_BIN_COUNT + #define DEFAULT_BIN_COUNT 1000 +#endif + +#define ITEMTYPE_UNSET (0x0) +#define ITEMTYPE_STRING (0x1) +#define ITEMTYPE_UINT8 (0x2) +#define ITEMTYPE_UINT64 (0x3) +#define ITEMTYPE_OPAQUE_PTR (0x4) + +typedef struct c_rhash_s *c_rhash; + +c_rhash c_rhash_new(size_t bin_count); + +void c_rhash_destroy(c_rhash hash); + +// # Insert +// ## Insert where key is string +int c_rhash_insert_str_ptr(c_rhash hash, const char *key, void *value); +int c_rhash_insert_str_uint8(c_rhash hash, const char *key, uint8_t value); +// ## Insert where key is uint64 +int c_rhash_insert_uint64_ptr(c_rhash hash, uint64_t key, void *value); + +// # Get +// ## Get where key is string +int c_rhash_get_ptr_by_str(c_rhash hash, const char *key, void **ret_val); +int c_rhash_get_uint8_by_str(c_rhash hash, const char *key, uint8_t *ret_val); +// ## Get where key is uint64 +int c_rhash_get_ptr_by_uint64(c_rhash hash, uint64_t key, void **ret_val); + +typedef struct { + size_t bin; + struct bin_item *item; + int initialized; +} c_rhash_iter_t; + +#define C_RHASH_ITER_T_INITIALIZER { .bin = 0, .item = NULL, .initialized = 0 } + +#define c_rhash_iter_t_initialize(p_iter) memset(p_iter, 0, sizeof(c_rhash_iter_t)) + +/* + * goes trough whole hash map and returns every + * type uint64 key present/stored + * + * it is not necessary to finish iterating and iterator can be reinitialized + * there are no guarantees on the order in which the keys will come + * behavior here is implementation dependent and can change any time + * + * returns: + * 0 for every key and stores the key in *key + * 1 on error or when all keys of this type has been already iterated over + */ +int c_rhash_iter_uint64_keys(c_rhash hash, c_rhash_iter_t *iter, uint64_t *key); + +int c_rhash_iter_str_keys(c_rhash hash, c_rhash_iter_t *iter, const char **key); + +#endif diff --git a/src/libnetdata/c_rhash/c_rhash_internal.h b/src/libnetdata/c_rhash/c_rhash_internal.h new file mode 100644 index 000000000..c5800310c --- /dev/null +++ b/src/libnetdata/c_rhash/c_rhash_internal.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "c_rhash.h" + +struct bin_item { + uint8_t key_type:4; + void *key; + uint8_t value_type:4; + void *value; + + struct bin_item *next; +}; + +typedef struct bin_item *c_rhash_bin; + +struct c_rhash_s { + size_t bin_count; + c_rhash_bin *bins; +}; diff --git a/src/libnetdata/c_rhash/tests.c b/src/libnetdata/c_rhash/tests.c new file mode 100644 index 000000000..3caa7d003 --- /dev/null +++ b/src/libnetdata/c_rhash/tests.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include +#include + +#include "c_rhash.h" + +// terminal color codes +#define KNRM "\x1B[0m" +#define KRED "\x1B[31m" +#define KGRN "\x1B[32m" +#define KYEL "\x1B[33m" +#define KBLU "\x1B[34m" +#define KMAG "\x1B[35m" +#define KCYN "\x1B[36m" +#define KWHT "\x1B[37m" + +#define KEY_1 "key1" +#define KEY_2 "keya" + +#define PRINT_ERR(str, ...) fprintf(stderr, "└─╼ ❌ " KRED str KNRM "\n" __VA_OPT__(,) __VA_ARGS__) + +#define ASSERT_RETVAL(fnc, comparator, expected_retval, ...) \ +{ int rval; \ +if(!((rval = fnc(__VA_ARGS__)) comparator expected_retval)) { \ + PRINT_ERR("Failed test. Value returned by \"%s\" in fnc:\"%s\",line:%d is not equal to expected value. Expected:%d, Got:%d", #fnc, __FUNCTION__, __LINE__, expected_retval, rval); \ + rc = 1; \ + goto test_cleanup; \ +} passed_subtest_count++;}; + +#define ASSERT_VAL_UINT8(returned, expected) \ +if(returned != expected) { \ + PRINT_ERR("Failed test. Value returned (%d) doesn't match expected (%d)! fnc:\"%s\",line:%d", returned, expected, __FUNCTION__, __LINE__); \ + rc = 1; \ + goto test_cleanup; \ +} passed_subtest_count++; + +#define ASSERT_VAL_PTR(returned, expected) \ +if((void*)returned != (void*)expected) { \ + PRINT_ERR("Failed test. Value returned(%p) doesn't match expected(%p)! fnc:\"%s\",line:%d", (void*)returned, (void*)expected, __FUNCTION__, __LINE__); \ + rc = 1; \ + goto test_cleanup; \ +} passed_subtest_count++; + +#define ALL_SUBTESTS_PASS() printf("└─╼ ✅" KGRN " Test \"%s\" DONE. All of %zu subtests PASS. (line:%d)\n" KNRM, __FUNCTION__, passed_subtest_count, __LINE__); + +#define TEST_START() size_t passed_subtest_count = 0; int rc = 0; printf("╒═ Starting test \"%s\"\n", __FUNCTION__); + +int test_str_uint8() { + c_rhash hash = c_rhash_new(100); + uint8_t val; + + TEST_START(); + // function should fail on empty hash + ASSERT_RETVAL(c_rhash_get_uint8_by_str, !=, 0, hash, KEY_1, &val); + + ASSERT_RETVAL(c_rhash_insert_str_uint8, ==, 0, hash, KEY_1, 5); + ASSERT_RETVAL(c_rhash_get_uint8_by_str, ==, 0, hash, KEY_1, &val); + ASSERT_VAL_UINT8(5, val); + + ASSERT_RETVAL(c_rhash_insert_str_uint8, ==, 0, hash, KEY_2, 8); + ASSERT_RETVAL(c_rhash_get_uint8_by_str, ==, 0, hash, KEY_1, &val); + ASSERT_VAL_UINT8(5, val); + ASSERT_RETVAL(c_rhash_get_uint8_by_str, ==, 0, hash, KEY_2, &val); + ASSERT_VAL_UINT8(8, val); + ASSERT_RETVAL(c_rhash_get_uint8_by_str, !=, 0, hash, "sndnskjdf", &val); + + // test update of key + ASSERT_RETVAL(c_rhash_insert_str_uint8, ==, 0, hash, KEY_1, 100); + ASSERT_RETVAL(c_rhash_get_uint8_by_str, ==, 0, hash, KEY_1, &val); + ASSERT_VAL_UINT8(100, val); + + ALL_SUBTESTS_PASS(); +test_cleanup: + c_rhash_destroy(hash); + return rc; +} + +int test_uint64_ptr() { + c_rhash hash = c_rhash_new(100); + void *val; + + TEST_START(); + + // function should fail on empty hash + ASSERT_RETVAL(c_rhash_get_ptr_by_uint64, !=, 0, hash, 0, &val); + + ASSERT_RETVAL(c_rhash_insert_uint64_ptr, ==, 0, hash, 0, &hash); + ASSERT_RETVAL(c_rhash_get_ptr_by_uint64, ==, 0, hash, 0, &val); + ASSERT_VAL_PTR(&hash, val); + + ASSERT_RETVAL(c_rhash_insert_uint64_ptr, ==, 0, hash, 1, &val); + ASSERT_RETVAL(c_rhash_get_ptr_by_uint64, ==, 0, hash, 0, &val); + ASSERT_VAL_PTR(&hash, val); + ASSERT_RETVAL(c_rhash_get_ptr_by_uint64, ==, 0, hash, 1, &val); + ASSERT_VAL_PTR(&val, val); + ASSERT_RETVAL(c_rhash_get_ptr_by_uint64, !=, 0, hash, 2, &val); + + ALL_SUBTESTS_PASS(); +test_cleanup: + c_rhash_destroy(hash); + return rc; +} + +#define UINT64_PTR_INC_ITERATION_COUNT 5000 +int test_uint64_ptr_incremental() { + c_rhash hash = c_rhash_new(100); + void *val; + + TEST_START(); + + char a = 0x20; + char *ptr = &a; + while(ptr < &a + UINT64_PTR_INC_ITERATION_COUNT) { + ASSERT_RETVAL(c_rhash_insert_uint64_ptr, ==, 0, hash, (ptr-&a), ptr); + ptr++; + } + + ptr = &a; + char *retptr; + for(int i = 0; i < UINT64_PTR_INC_ITERATION_COUNT; i++) { + ASSERT_RETVAL(c_rhash_get_ptr_by_uint64, ==, 0, hash, i, (void**)&retptr); + ASSERT_VAL_PTR(retptr, (&a+i)); + } + + ALL_SUBTESTS_PASS(); +test_cleanup: + c_rhash_destroy(hash); + return rc; +} + +struct test_string { + const char *str; + int counter; +}; + +struct test_string test_strings[] = { + { .str = "Cillum reprehenderit eiusmod elit nisi aliquip esse exercitation commodo Lorem voluptate esse.", .counter = 0 }, + { .str = "Ullamco eiusmod tempor occaecat ad.", .counter = 0 }, + { .str = "Esse aliquip tempor sint tempor ullamco duis aute incididunt ad.", .counter = 0 }, + { .str = "Cillum Lorem labore cupidatat commodo proident adipisicing.", .counter = 0 }, + { .str = "Quis ad cillum officia exercitation.", .counter = 0 }, + { .str = "Ipsum enim dolor ullamco amet sint nisi ut occaecat sint non.", .counter = 0 }, + { .str = "Id duis officia ipsum cupidatat velit fugiat.", .counter = 0 }, + { .str = "Aliqua non occaecat voluptate reprehenderit reprehenderit veniam minim exercitation ea aliquip enim aliqua deserunt qui.", .counter = 0 }, + { .str = "Ullamco elit tempor laboris reprehenderit quis deserunt duis quis tempor reprehenderit magna dolore reprehenderit exercitation.", .counter = 0 }, + { .str = "Culpa do dolor quis incididunt et labore in ex.", .counter = 0 }, + { .str = "Aliquip velit cupidatat qui incididunt ipsum nostrud eiusmod ut proident nisi magna fugiat excepteur.", .counter = 0 }, + { .str = "Aliqua qui dolore tempor id proident ullamco sunt magna.", .counter = 0 }, + { .str = "Labore eiusmod ut fugiat dolore reprehenderit mollit magna.", .counter = 0 }, + { .str = "Veniam aliquip dolor excepteur minim nulla esse cupidatat esse.", .counter = 0 }, + { .str = "Do quis dolor irure nostrud occaecat aute proident anim.", .counter = 0 }, + { .str = "Enim veniam non nulla ad quis sit amet.", .counter = 0 }, + { .str = "Cillum reprehenderit do enim esse do ullamco consectetur ea.", .counter = 0 }, + { .str = "Sit et duis sint anim qui ad anim labore exercitation sunt cupidatat.", .counter = 0 }, + { .str = "Dolor officia adipisicing sint pariatur in dolor occaecat officia reprehenderit magna.", .counter = 0 }, + { .str = "Aliquip dolore qui occaecat eiusmod sunt incididunt reprehenderit minim et.", .counter = 0 }, + { .str = "Aute fugiat laboris cillum tempor consequat tempor do non laboris culpa officia nisi.", .counter = 0 }, + { .str = "Et excepteur do aliquip fugiat nisi velit tempor officia enim quis elit incididunt.", .counter = 0 }, + { .str = "Eu officia adipisicing incididunt occaecat officia cupidatat enim sit sit officia.", .counter = 0 }, + { .str = "Do amet cillum duis pariatur commodo nulla cillum magna nulla Lorem veniam cupidatat.", .counter = 0 }, + { .str = "Dolor adipisicing voluptate laboris occaecat culpa aliquip ipsum ut consequat aliqua aliquip commodo sunt velit.", .counter = 0 }, + { .str = "Nulla proident ipsum quis nulla.", .counter = 0 }, + { .str = "Laborum adipisicing nulla do aute aliqua est quis sint culpa pariatur laborum voluptate qui.", .counter = 0 }, + { .str = "Proident eiusmod sunt et nulla elit pariatur dolore irure ex voluptate excepteur adipisicing consectetur.", .counter = 0 }, + { .str = "Consequat ex voluptate officia excepteur aute deserunt proident commodo et.", .counter = 0 }, + { .str = "Velit sit cupidatat dolor dolore.", .counter = 0 }, + { .str = "Sunt enim do non anim nostrud exercitation ullamco ex proident commodo.", .counter = 0 }, + { .str = "Id ex officia cillum ad.", .counter = 0 }, + { .str = "Laboris in sunt eiusmod veniam laboris nostrud.", .counter = 0 }, + { .str = "Ex magna occaecat ea ea incididunt aliquip.", .counter = 0 }, + { .str = "Sunt eiusmod ex nostrud eu pariatur sit cupidatat ea adipisicing cillum culpa esse consequat aliquip.", .counter = 0 }, + { .str = "Excepteur commodo qui incididunt enim culpa sunt non excepteur Lorem adipisicing.", .counter = 0 }, + { .str = "Quis officia est ullamco reprehenderit incididunt occaecat pariatur ex reprehenderit nisi.", .counter = 0 }, + { .str = "Culpa irure proident proident et eiusmod irure aliqua ipsum cupidatat minim sit.", .counter = 0 }, + { .str = "Qui cupidatat aliquip est velit magna veniam.", .counter = 0 }, + { .str = "Pariatur ad ad mollit nostrud non irure minim veniam anim aliquip quis eu.", .counter = 0 }, + { .str = "Nisi ex minim eu adipisicing tempor Lorem nisi do ad exercitation est non eu.", .counter = 0 }, + { .str = "Cupidatat do mollit ad commodo cupidatat ut.", .counter = 0 }, + { .str = "Est non excepteur eiusmod nostrud et eu.", .counter = 0 }, + { .str = "Cupidatat mollit nisi magna officia ut elit eiusmod.", .counter = 0 }, + { .str = "Est aliqua consectetur laboris ex consequat est ut dolor.", .counter = 0 }, + { .str = "Duis eu laboris laborum ut id Lorem nostrud qui ad velit proident fugiat minim ullamco.", .counter = 0 }, + { .str = "Pariatur esse excepteur anim amet excepteur irure sint quis esse ex cupidatat ut.", .counter = 0 }, + { .str = "Esse reprehenderit amet qui excepteur aliquip amet.", .counter = 0 }, + { .str = "Ullamco laboris elit labore adipisicing aute nulla qui laborum tempor officia ut dolor aute.", .counter = 0 }, + { .str = "Commodo sunt cillum velit minim laborum Lorem aliqua tempor ad id eu.", .counter = 0 }, + { .str = NULL, .counter = 0 } +}; + +uint32_t test_strings_contain_element(const char *str) { + struct test_string *str_desc = test_strings; + while(str_desc->str) { + if (!strcmp(str, str_desc->str)) + return str_desc - test_strings; + str_desc++; + } + return -1; +} + +#define TEST_INCREMENT_STR_KEYS_HASH_SIZE 20 +int test_increment_str_keys() { + c_rhash hash; + const char *key; + + TEST_START(); + + hash = c_rhash_new(TEST_INCREMENT_STR_KEYS_HASH_SIZE); // less than element count of test_strings + + c_rhash_iter_t iter = C_RHASH_ITER_T_INITIALIZER; + + // check iter on empty hash + ASSERT_RETVAL(c_rhash_iter_str_keys, !=, 0, hash, &iter, &key); + + int32_t element_count = 0; + while (test_strings[element_count].str) { + ASSERT_RETVAL(c_rhash_insert_str_ptr, ==, 0, hash, test_strings[element_count].str, NULL); + test_strings[element_count].counter++; // we want to test we got each key exactly once + element_count++; + } + + if (element_count <= TEST_INCREMENT_STR_KEYS_HASH_SIZE * 2) { + // verify we are actually test also iteration trough single bin (when 2 keys have same hash pointing them to same bin) + PRINT_ERR("For this test to properly test all the hash size needs to be much smaller than all test key count."); + rc = 1; + goto test_cleanup; + } + + // we insert another type of key as iterator should skip it + // in case is another type + ASSERT_RETVAL(c_rhash_insert_uint64_ptr, ==, 0, hash, 5, NULL); + + c_rhash_iter_t_initialize(&iter); + while(!c_rhash_iter_str_keys(hash, &iter, &key)) { + element_count--; + int i; + if ( (i = test_strings_contain_element(key)) < 0) { + PRINT_ERR("Key \"%s\" is not present in test_strings array! (Fnc: %s, Line: %d)", key, __FUNCTION__, __LINE__); + rc = 1; + goto test_cleanup; + } + passed_subtest_count++; + + test_strings[i].counter--; + } + ASSERT_VAL_UINT8(element_count, 0); // we added also same non string keys + + // check each key was present exactly once + struct test_string *str_desc = test_strings; + while (str_desc->str) { + ASSERT_VAL_UINT8(str_desc->counter, 0); + str_desc++; + } + + ALL_SUBTESTS_PASS(); +test_cleanup: + c_rhash_destroy(hash); + return rc; +} + +#define RUN_TEST(fnc) \ +if(fnc()) \ + return 1; + +int main(int argc, char *argv[]) { + RUN_TEST(test_str_uint8); + RUN_TEST(test_uint64_ptr); + RUN_TEST(test_uint64_ptr_incremental); + RUN_TEST(test_increment_str_keys); + // TODO hash with mixed key tests + // TODO iterator test + return 0; +} diff --git a/src/libnetdata/circular_buffer/README.md b/src/libnetdata/circular_buffer/README.md index b2d580cb9..db42e5b3b 100644 --- a/src/libnetdata/circular_buffer/README.md +++ b/src/libnetdata/circular_buffer/README.md @@ -1,12 +1,3 @@ - - # Circular Buffer `struct circular_buffer` is an adaptive circular buffer. It will start at an initial size diff --git a/src/libnetdata/clocks/clocks.c b/src/libnetdata/clocks/clocks.c index 5da450a2d..c65886198 100644 --- a/src/libnetdata/clocks/clocks.c +++ b/src/libnetdata/clocks/clocks.c @@ -78,7 +78,9 @@ static usec_t get_clock_resolution(clockid_t clock) { // perform any initializations required for clocks -void clocks_init(void) { +static __attribute__((constructor)) void clocks_init(void) { + os_get_system_HZ(); + // monotonic raw has to be tested before boottime test_clock_monotonic_raw(); @@ -87,6 +89,18 @@ void clocks_init(void) { clock_monotonic_resolution = get_clock_resolution(clock_monotonic_to_use); clock_realtime_resolution = get_clock_resolution(CLOCK_REALTIME); + +#if defined(OS_WINDOWS) + timeBeginPeriod(1); + clock_monotonic_resolution = 1 * USEC_PER_MS; + clock_realtime_resolution = 1 * USEC_PER_MS; +#endif +} + +static __attribute__((destructor)) void clocks_fin(void) { +#if defined(OS_WINDOWS) + timeEndPeriod(1); +#endif } inline time_t now_sec(clockid_t clk_id) { @@ -246,13 +260,18 @@ void sleep_to_absolute_time(usec_t usec) { } #endif -#define HEARTBEAT_ALIGNMENT_STATISTICS_SIZE 10 -netdata_mutex_t heartbeat_alignment_mutex = NETDATA_MUTEX_INITIALIZER; +#define HEARTBEAT_MIN_OFFSET_UT (150 * USEC_PER_MS) +#define HEARTBEAT_RANDOM_OFFSET_UT (350 * USEC_PER_MS) + +#define HEARTBEAT_ALIGNMENT_STATISTICS_SIZE 20 +static SPINLOCK heartbeat_alignment_spinlock = NETDATA_SPINLOCK_INITIALIZER; static size_t heartbeat_alignment_id = 0; struct heartbeat_thread_statistics { + pid_t tid; size_t sequence; usec_t dt; + usec_t randomness; }; static struct heartbeat_thread_statistics heartbeat_alignment_values[HEARTBEAT_ALIGNMENT_STATISTICS_SIZE] = { 0 }; @@ -290,19 +309,58 @@ void heartbeat_statistics(usec_t *min_ptr, usec_t *max_ptr, usec_t *average_ptr, memcpy(old, current, sizeof(struct heartbeat_thread_statistics) * HEARTBEAT_ALIGNMENT_STATISTICS_SIZE); } -inline void heartbeat_init(heartbeat_t *hb) { - hb->realtime = 0ULL; - hb->randomness = (usec_t)250 * USEC_PER_MS + ((usec_t)(now_realtime_usec() * clock_realtime_resolution) % (250 * USEC_PER_MS)); - hb->randomness -= (hb->randomness % clock_realtime_resolution); +static XXH64_hash_t heartbeat_hash(usec_t step, size_t statistics_id) { + struct { + usec_t step; + pid_t pid; + pid_t tid; + usec_t now_ut; + size_t statistics_id; + char tag[ND_THREAD_TAG_MAX + 1]; + } key = { + .step = step, + .pid = getpid(), + .tid = os_gettid(), + .now_ut = now_realtime_usec(), + .statistics_id = statistics_id, + }; + strncpyz(key.tag, nd_thread_tag(), sizeof(key.tag) - 1); + return XXH3_64bits(&key, sizeof(key)); +} + +static usec_t heartbeat_randomness(XXH64_hash_t hash) { + usec_t offset_ut = HEARTBEAT_MIN_OFFSET_UT + (hash % HEARTBEAT_RANDOM_OFFSET_UT); + + // Calculate the scheduler tick interval in microseconds + usec_t scheduler_step_ut = USEC_PER_SEC / (usec_t)system_hz; + if(scheduler_step_ut > 10 * USEC_PER_MS) + scheduler_step_ut = 10 * USEC_PER_MS; + + // if the offset is close to the scheduler tick, move it away from it + if(offset_ut % scheduler_step_ut < scheduler_step_ut / 4) + offset_ut += scheduler_step_ut / 4; + + return offset_ut; +} - netdata_mutex_lock(&heartbeat_alignment_mutex); +inline void heartbeat_init(heartbeat_t *hb, usec_t step) { + if(!step) step = USEC_PER_SEC; + + spinlock_lock(&heartbeat_alignment_spinlock); hb->statistics_id = heartbeat_alignment_id; heartbeat_alignment_id++; - netdata_mutex_unlock(&heartbeat_alignment_mutex); + spinlock_unlock(&heartbeat_alignment_spinlock); + + hb->step = step; + hb->realtime = 0ULL; + hb->hash = heartbeat_hash(hb->step, hb->statistics_id); + hb->randomness = heartbeat_randomness(hb->hash); if(hb->statistics_id < HEARTBEAT_ALIGNMENT_STATISTICS_SIZE) { heartbeat_alignment_values[hb->statistics_id].dt = 0; heartbeat_alignment_values[hb->statistics_id].sequence = 0; + heartbeat_alignment_values[hb->statistics_id].randomness = hb->randomness; + heartbeat_alignment_values[hb->statistics_id].tid = os_gettid(); } } @@ -310,17 +368,8 @@ inline void heartbeat_init(heartbeat_t *hb) { // it waits using the monotonic clock // it returns the dt using the realtime clock -usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) { - if(unlikely(hb->randomness > tick / 2)) { - // TODO: The heartbeat tick should be specified at the heartbeat_init() function - usec_t tmp = (now_realtime_usec() * clock_realtime_resolution) % (tick / 2); - - nd_log_limit_static_global_var(erl, 10, 0); - nd_log_limit(&erl, NDLS_DAEMON, NDLP_NOTICE, - "heartbeat randomness of %"PRIu64" is too big for a tick of %"PRIu64" - setting it to %"PRIu64"", - hb->randomness, tick, tmp); - hb->randomness = tmp; - } +usec_t heartbeat_next(heartbeat_t *hb) { + usec_t tick = hb->step; usec_t dt; usec_t now = now_realtime_usec(); @@ -331,10 +380,13 @@ usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) { next = next - (next % clock_realtime_resolution) + clock_realtime_resolution; // sleep_usec() has a loop to guarantee we will sleep for at least the requested time. - // According the specs, when we sleep for a relative time, clock adjustments should not affect the duration - // we sleep. + // According to the specs, when we sleep for a relative time, clock adjustments should + // not affect the duration we sleep. sleep_usec_with_now(next - now, now); + spinlock_lock(&heartbeat_alignment_spinlock); now = now_realtime_usec(); + spinlock_unlock(&heartbeat_alignment_spinlock); + dt = now - hb->realtime; if(hb->statistics_id < HEARTBEAT_ALIGNMENT_STATISTICS_SIZE) { @@ -368,22 +420,15 @@ usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) { return dt; } -#ifdef OS_WINDOWS - -#include "windows.h" - -void sleep_usec_with_now(usec_t usec, usec_t started_ut) -{ +#if defined(OS_WINDOWS) +void sleep_usec_with_now(usec_t usec, usec_t started_ut) { if (!started_ut) started_ut = now_realtime_usec(); usec_t end_ut = started_ut + usec; usec_t remaining_ut = usec; - timeBeginPeriod(1); - - while (remaining_ut >= 1000) - { + while (remaining_ut >= clock_realtime_resolution) { DWORD sleep_ms = (DWORD) (remaining_ut / USEC_PER_MS); Sleep(sleep_ms); @@ -393,8 +438,6 @@ void sleep_usec_with_now(usec_t usec, usec_t started_ut) remaining_ut = end_ut - now_ut; } - - timeEndPeriod(1); } #else void sleep_usec_with_now(usec_t usec, usec_t started_ut) { @@ -406,7 +449,7 @@ void sleep_usec_with_now(usec_t usec, usec_t started_ut) { }; // make sure errno is not EINTR - errno = 0; + errno_clear(); if(!started_ut) started_ut = now_realtime_usec(); @@ -419,7 +462,7 @@ void sleep_usec_with_now(usec_t usec, usec_t started_ut) { rem = (struct timespec){ 0, 0 }; // break an infinite loop - errno = 0; + errno_clear(); usec_t now_ut = now_realtime_usec(); if(now_ut >= end_ut) @@ -429,8 +472,8 @@ void sleep_usec_with_now(usec_t usec, usec_t started_ut) { usec_t check_ut = now_ut - started_ut; if(remaining_ut > check_ut) { req = (struct timespec){ - .tv_sec = (time_t) ( check_ut / USEC_PER_SEC), - .tv_nsec = (suseconds_t) ((check_ut % USEC_PER_SEC) * NSEC_PER_USEC) + .tv_sec = (time_t) ( check_ut / USEC_PER_SEC), + .tv_nsec = (suseconds_t) ((check_ut % USEC_PER_SEC) * NSEC_PER_USEC) }; } } @@ -452,7 +495,7 @@ static inline collected_number uptime_from_boottime(void) { } static procfile *read_proc_uptime_ff = NULL; -static inline collected_number read_proc_uptime(char *filename) { +static inline collected_number read_proc_uptime(const char *filename) { if(unlikely(!read_proc_uptime_ff)) { read_proc_uptime_ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT); if(unlikely(!read_proc_uptime_ff)) return 0; @@ -473,7 +516,7 @@ static inline collected_number read_proc_uptime(char *filename) { return (collected_number)(strtondd(procfile_lineword(read_proc_uptime_ff, 0, 0), NULL) * 1000.0); } -inline collected_number uptime_msec(char *filename){ +inline collected_number uptime_msec(const char *filename){ static int use_boottime = -1; if(unlikely(use_boottime == -1)) { diff --git a/src/libnetdata/clocks/clocks.h b/src/libnetdata/clocks/clocks.h index f989fd6b8..03860d66d 100644 --- a/src/libnetdata/clocks/clocks.h +++ b/src/libnetdata/clocks/clocks.h @@ -4,6 +4,7 @@ #define NETDATA_CLOCKS_H 1 #include "../libnetdata.h" +#include "libnetdata/os/random.h" #ifndef HAVE_CLOCK_GETTIME struct timespec { @@ -18,12 +19,19 @@ struct timespec { typedef uint64_t nsec_t; typedef uint64_t msec_t; typedef uint64_t usec_t; + +typedef int64_t snsec_t; typedef int64_t susec_t; +typedef int64_t smsec_t; + +typedef int64_t stime_t; typedef struct heartbeat { + usec_t step; usec_t realtime; usec_t randomness; size_t statistics_id; + XXH64_hash_t hash; } heartbeat_t; /* Linux value is as good as any other */ @@ -72,6 +80,8 @@ typedef struct heartbeat { #define MSEC_PER_SEC 1000ULL #endif +#define NS100_PER_MS 10000ULL + #define USEC_PER_MS 1000ULL #ifndef HAVE_CLOCK_GETTIME @@ -132,26 +142,24 @@ msec_t timeval_msec(struct timeval *tv); usec_t dt_usec(struct timeval *now, struct timeval *old); susec_t dt_usec_signed(struct timeval *now, struct timeval *old); -void heartbeat_init(heartbeat_t *hb); +void heartbeat_init(heartbeat_t *hb, usec_t step); /* Sleeps until next multiple of tick using monotonic clock. * Returns elapsed time in microseconds since previous heartbeat */ -usec_t heartbeat_next(heartbeat_t *hb, usec_t tick); +usec_t heartbeat_next(heartbeat_t *hb); void heartbeat_statistics(usec_t *min_ptr, usec_t *max_ptr, usec_t *average_ptr, size_t *count_ptr); void sleep_usec_with_now(usec_t usec, usec_t started_ut); #define sleep_usec(usec) sleep_usec_with_now(usec, 0) -void clocks_init(void); - // lower level functions - avoid using directly time_t now_sec(clockid_t clk_id); usec_t now_usec(clockid_t clk_id); int now_timeval(clockid_t clk_id, struct timeval *tv); -collected_number uptime_msec(char *filename); +collected_number uptime_msec(const char *filename); extern usec_t clock_monotonic_resolution; extern usec_t clock_realtime_resolution; diff --git a/src/libnetdata/common.h b/src/libnetdata/common.h new file mode 100644 index 000000000..aafe51176 --- /dev/null +++ b/src/libnetdata/common.h @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +/* + * This file should include everything from the operating system needed to compile Netdata, + * without any Netdata specific includes. + * + * It should be the baseline of includes (operating system and common libraries related). + */ + +#ifndef LIBNETDATA_COMMON_H +#define LIBNETDATA_COMMON_H + +# ifdef __cplusplus +extern "C" { +# endif + +#include "config.h" + +#if defined(NETDATA_DEV_MODE) && !defined(NETDATA_INTERNAL_CHECKS) +#define NETDATA_INTERNAL_CHECKS 1 +#endif + +#ifndef SIZEOF_VOID_P +#error SIZEOF_VOID_P is not defined +#endif + +#if SIZEOF_VOID_P == 4 +#define ENV32BIT 1 +#else +#define ENV64BIT 1 +#endif + +#ifdef HAVE_LIBDATACHANNEL +#define ENABLE_WEBRTC 1 +#endif + +#define STRINGIFY(x) #x +#define TOSTRING(x) STRINGIFY(x) + +// -------------------------------------------------------------------------------------------------------------------- +// NETDATA_OS_TYPE + +#if defined(__FreeBSD__) +#include +#define NETDATA_OS_TYPE "freebsd" +#elif defined(__APPLE__) +#define NETDATA_OS_TYPE "macos" +#elif defined(OS_WINDOWS) +#define NETDATA_OS_TYPE "windows" +#else +#define NETDATA_OS_TYPE "linux" +#endif /* __FreeBSD__, __APPLE__*/ + +// -------------------------------------------------------------------------------------------------------------------- +// memory allocators + +/* select the memory allocator, based on autoconf findings */ +#if defined(ENABLE_JEMALLOC) + +#if defined(HAVE_JEMALLOC_JEMALLOC_H) +#include +#else // !defined(HAVE_JEMALLOC_JEMALLOC_H) +#include +#endif // !defined(HAVE_JEMALLOC_JEMALLOC_H) + +#elif defined(ENABLE_TCMALLOC) + +#include + +#else /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */ + +#if !(defined(__FreeBSD__) || defined(__APPLE__)) +#include +#endif /* __FreeBSD__ || __APPLE__ */ + +#endif /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */ + +// -------------------------------------------------------------------------------------------------------------------- + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_ARPA_INET_H +#include +#endif + +#ifdef HAVE_NETINET_TCP_H +#include +#endif + +#ifdef HAVE_SYS_IOCTL_H +#include +#endif + +#ifdef HAVE_GRP_H +#include +#else +typedef uint32_t gid_t; +#endif + +#ifdef HAVE_PWD_H +#include +#else +typedef uint32_t uid_t; +#endif + +#ifdef HAVE_NET_IF_H +#include +#endif + +#ifdef HAVE_POLL_H +#include +#endif + +#ifdef HAVE_SYSLOG_H +#include +#else +/* priorities */ +#define LOG_EMERG 0 /* system is unusable */ +#define LOG_ALERT 1 /* action must be taken immediately */ +#define LOG_CRIT 2 /* critical conditions */ +#define LOG_ERR 3 /* error conditions */ +#define LOG_WARNING 4 /* warning conditions */ +#define LOG_NOTICE 5 /* normal but significant condition */ +#define LOG_INFO 6 /* informational */ +#define LOG_DEBUG 7 /* debug-level messages */ + +/* facility codes */ +#define LOG_KERN (0<<3) /* kernel messages */ +#define LOG_USER (1<<3) /* random user-level messages */ +#define LOG_MAIL (2<<3) /* mail system */ +#define LOG_DAEMON (3<<3) /* system daemons */ +#define LOG_AUTH (4<<3) /* security/authorization messages */ +#define LOG_SYSLOG (5<<3) /* messages generated internally by syslogd */ +#define LOG_LPR (6<<3) /* line printer subsystem */ +#define LOG_NEWS (7<<3) /* network news subsystem */ +#define LOG_UUCP (8<<3) /* UUCP subsystem */ +#define LOG_CRON (9<<3) /* clock daemon */ +#define LOG_AUTHPRIV (10<<3) /* security/authorization messages (private) */ +#define LOG_FTP (11<<3) /* ftp daemon */ + +/* other codes through 15 reserved for system use */ +#define LOG_LOCAL0 (16<<3) /* reserved for local use */ +#define LOG_LOCAL1 (17<<3) /* reserved for local use */ +#define LOG_LOCAL2 (18<<3) /* reserved for local use */ +#define LOG_LOCAL3 (19<<3) /* reserved for local use */ +#define LOG_LOCAL4 (20<<3) /* reserved for local use */ +#define LOG_LOCAL5 (21<<3) /* reserved for local use */ +#define LOG_LOCAL6 (22<<3) /* reserved for local use */ +#define LOG_LOCAL7 (23<<3) /* reserved for local use */ +#endif + +#ifdef HAVE_SYS_MMAN_H +#include +#endif + +#ifdef HAVE_SYS_RESOURCE_H +#include +#endif + +#ifdef HAVE_SYS_SOCKET_H +#include +#endif + +#ifdef HAVE_SYS_WAIT_H +#include +#endif + +#ifdef HAVE_SYS_UN_H +#include +#endif + +#ifdef HAVE_SPAWN_H +#include +#endif + +#ifdef HAVE_NETINET_IN_H +#include +#endif + +#ifdef HAVE_RESOLV_H +#include +#endif + +#ifdef HAVE_NETDB_H +#include +#endif + +#ifdef HAVE_SYS_PRCTL_H +#include +#endif + +#ifdef HAVE_SYS_STAT_H +#include +#endif + +#ifdef HAVE_SYS_VFS_H +#include +#endif + +#ifdef HAVE_SYS_STATFS_H +#include +#endif + +#ifdef HAVE_LINUX_MAGIC_H +#include +#endif + +#ifdef HAVE_SYS_MOUNT_H +#include +#endif + +#ifdef HAVE_SYS_STATVFS_H +#include +#endif + +// #1408 +#ifdef MAJOR_IN_MKDEV +#include +#endif +#ifdef MAJOR_IN_SYSMACROS +#include +#endif + +#include +#include + +#if defined(HAVE_INTTYPES_H) +#include +#elif defined(HAVE_STDINT_H) +#include +#endif + +#include + +#ifdef HAVE_SYS_CAPABILITY_H +#include +#endif + +#define XXH_INLINE_ALL +#include "xxHash/xxhash.h" + +// -------------------------------------------------------------------------------------------------------------------- +// OpenSSL + +#define OPENSSL_VERSION_095 0x00905100L +#define OPENSSL_VERSION_097 0x0907000L +#define OPENSSL_VERSION_110 0x10100000L +#define OPENSSL_VERSION_111 0x10101000L +#define OPENSSL_VERSION_300 0x30000000L + +#include +#include +#include +#include +#include +#if (SSLEAY_VERSION_NUMBER >= OPENSSL_VERSION_097) && (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110) +#include +#endif + +#if OPENSSL_VERSION_NUMBER >= OPENSSL_VERSION_300 +#include +#include +#endif + +// -------------------------------------------------------------------------------------------------------------------- + +#ifndef O_CLOEXEC +#define O_CLOEXEC (0) +#endif + +// -------------------------------------------------------------------------------------------------------------------- +// FUNCTION ATTRIBUTES + +#define _cleanup_(x) __attribute__((__cleanup__(x))) + +#ifdef HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL +#define NEVERNULL __attribute__((returns_nonnull)) +#else +#define NEVERNULL +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_NOINLINE +#define NOINLINE __attribute__((noinline)) +#else +#define NOINLINE +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_MALLOC +#define MALLOCLIKE __attribute__((malloc)) +#else +#define MALLOCLIKE +#endif + +#if defined(HAVE_FUNC_ATTRIBUTE_FORMAT_GNU_PRINTF) +#define PRINTFLIKE(f, a) __attribute__ ((format(gnu_printf, f, a))) +#elif defined(HAVE_FUNC_ATTRIBUTE_FORMAT_PRINTF) +#define PRINTFLIKE(f, a) __attribute__ ((format(printf, f, a))) +#else +#define PRINTFLIKE(f, a) +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_NORETURN +#define NORETURN __attribute__ ((noreturn)) +#else +#define NORETURN +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT +#define WARNUNUSED __attribute__ ((warn_unused_result)) +#else +#define WARNUNUSED +#endif + +#define UNUSED(x) (void)(x) + +#ifdef __GNUC__ +#define UNUSED_FUNCTION(x) __attribute__((unused)) UNUSED_##x +#else +#define UNUSED_FUNCTION(x) UNUSED_##x +#endif + +// -------------------------------------------------------------------------------------------------------------------- +// fix for alpine linux + +#if !defined(RUSAGE_THREAD) && defined(RUSAGE_CHILDREN) +#define RUSAGE_THREAD RUSAGE_CHILDREN +#endif + +// -------------------------------------------------------------------------------------------------------------------- +// HELPFUL MACROS + +#define ABS(x) (((x) < 0)? (-(x)) : (x)) +#define MIN(a,b) (((a)<(b))?(a):(b)) +#define MAX(a,b) (((a)>(b))?(a):(b)) +#define SWAP(a, b) do { \ + typeof(a) _tmp = b; \ + b = a; \ + a = _tmp; \ +} while(0) + +// -------------------------------------------------------------------------------------------------------------------- +// NETDATA CLOUD + +// BEWARE: this exists in alarm-notify.sh +#define DEFAULT_CLOUD_BASE_URL "https://app.netdata.cloud" + +// -------------------------------------------------------------------------------------------------------------------- +// DBENGINE + +#define RRD_STORAGE_TIERS 5 + +// -------------------------------------------------------------------------------------------------------------------- +// PIPES + +#define PIPE_READ 0 +#define PIPE_WRITE 1 + +// -------------------------------------------------------------------------------------------------------------------- +// UUIDs + +#define GUID_LEN 36 + +// -------------------------------------------------------------------------------------------------------------------- +// Macro-only includes + +#include "linked_lists/linked_lists.h" + +// -------------------------------------------------------------------------------------------------------------------- + +// Taken from linux kernel +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) + +// -------------------------------------------------------------------------------------------------------------------- + +#if defined(OS_WINDOWS) +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// #include // conflicts on STRING, +#endif + +# ifdef __cplusplus +} +# endif + +#endif //LIBNETDATA_COMMON_H diff --git a/src/libnetdata/config/README.md b/src/libnetdata/config/README.md index 665a7196c..fb1473c8c 100644 --- a/src/libnetdata/config/README.md +++ b/src/libnetdata/config/README.md @@ -1,12 +1,3 @@ - - # Netdata ini config files Configuration files `netdata.conf` and `stream.conf` are Netdata ini files. diff --git a/src/libnetdata/config/appconfig.c b/src/libnetdata/config/appconfig.c index 81946b594..f26417ac3 100644 --- a/src/libnetdata/config/appconfig.c +++ b/src/libnetdata/config/appconfig.c @@ -1,961 +1,82 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../libnetdata.h" +#include "appconfig_internals.h" -/* - * @Input: - * Connector / instance to add to an internal structure - * @Return - * The current head of the linked list of connector_instance - * - */ - -_CONNECTOR_INSTANCE *add_connector_instance(struct section *connector, struct section *instance) -{ - static struct _connector_instance *global_connector_instance = NULL; - struct _connector_instance *local_ci, *local_ci_tmp; - - if (unlikely(!connector)) { - if (unlikely(!instance)) - return global_connector_instance; - - local_ci = global_connector_instance; - while (local_ci) { - local_ci_tmp = local_ci->next; - freez(local_ci); - local_ci = local_ci_tmp; - } - global_connector_instance = NULL; - return NULL; - } - - local_ci = callocz(1, sizeof(struct _connector_instance)); - local_ci->instance = instance; - local_ci->connector = connector; - strncpyz(local_ci->instance_name, instance->name, CONFIG_MAX_NAME); - strncpyz(local_ci->connector_name, connector->name, CONFIG_MAX_NAME); - local_ci->next = global_connector_instance; - global_connector_instance = local_ci; - - return global_connector_instance; -} - -int is_valid_connector(char *type, int check_reserved) -{ - int rc = 1; - - if (unlikely(!type)) - return 0; - - if (!check_reserved) { - if (unlikely(is_valid_connector(type,1))) { - return 0; - } - //if (unlikely(*type == ':') - // return 0; - char *separator = strrchr(type, ':'); - if (likely(separator)) { - *separator = '\0'; - rc = separator - type; - } else - return 0; - } -// else { -// if (unlikely(is_valid_connector(type,1))) { -// netdata_log_error("Section %s invalid -- reserved name", type); -// return 0; -// } -// } - - if (!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) { - return rc; - } else if (!strcmp(type, "graphite:http") || !strcmp(type, "graphite:https")) { - return rc; - } else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) { - return rc; - } else if (!strcmp(type, "json:http") || !strcmp(type, "json:https")) { - return rc; - } else if (!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) { - return rc; - } else if (!strcmp(type, "opentsdb:http") || !strcmp(type, "opentsdb:https")) { - return rc; - } else if (!strcmp(type, "prometheus_remote_write")) { - return rc; - } else if (!strcmp(type, "prometheus_remote_write:http") || !strcmp(type, "prometheus_remote_write:https")) { - return rc; - } else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) { - return rc; - } else if (!strcmp(type, "pubsub") || !strcmp(type, "pubsub:plaintext")) { - return rc; - } else if (!strcmp(type, "mongodb") || !strcmp(type, "mongodb:plaintext")) { - return rc; - } - - return 0; -} - -// ---------------------------------------------------------------------------- -// locking - -inline void appconfig_wrlock(struct config *root) { - netdata_mutex_lock(&root->mutex); -} - -inline void appconfig_unlock(struct config *root) { - netdata_mutex_unlock(&root->mutex); -} - -inline void config_section_wrlock(struct section *co) { - netdata_mutex_lock(&co->mutex); -} - -inline void config_section_unlock(struct section *co) { - netdata_mutex_unlock(&co->mutex); -} - - -// ---------------------------------------------------------------------------- -// config name-value index - -static int appconfig_option_compare(void *a, void *b) { - if(((struct config_option *)a)->hash < ((struct config_option *)b)->hash) return -1; - else if(((struct config_option *)a)->hash > ((struct config_option *)b)->hash) return 1; - else return strcmp(((struct config_option *)a)->name, ((struct config_option *)b)->name); -} - -#define appconfig_option_index_add(co, cv) (struct config_option *)avl_insert_lock(&((co)->values_index), (avl_t *)(cv)) -#define appconfig_option_index_del(co, cv) (struct config_option *)avl_remove_lock(&((co)->values_index), (avl_t *)(cv)) - -static struct config_option *appconfig_option_index_find(struct section *co, const char *name, uint32_t hash) { - struct config_option tmp; - tmp.hash = (hash)?hash:simple_hash(name); - tmp.name = (char *)name; - - return (struct config_option *)avl_search_lock(&(co->values_index), (avl_t *) &tmp); -} - - -// ---------------------------------------------------------------------------- -// config sections index - -int appconfig_section_compare(void *a, void *b) { - if(((struct section *)a)->hash < ((struct section *)b)->hash) return -1; - else if(((struct section *)a)->hash > ((struct section *)b)->hash) return 1; - else return strcmp(((struct section *)a)->name, ((struct section *)b)->name); -} - -#define appconfig_index_add(root, cfg) (struct section *)avl_insert_lock(&(root)->index, (avl_t *)(cfg)) -#define appconfig_index_del(root, cfg) (struct section *)avl_remove_lock(&(root)->index, (avl_t *)(cfg)) - -static struct section *appconfig_index_find(struct config *root, const char *name, uint32_t hash) { - struct section tmp; - tmp.hash = (hash)?hash:simple_hash(name); - tmp.name = (char *)name; - - return (struct section *)avl_search_lock(&root->index, (avl_t *) &tmp); -} - - -// ---------------------------------------------------------------------------- -// config section methods - -static inline struct section *appconfig_section_find(struct config *root, const char *section) { - return appconfig_index_find(root, section, 0); -} - -static inline struct section *appconfig_section_create(struct config *root, const char *section) { - netdata_log_debug(D_CONFIG, "Creating section '%s'.", section); - - struct section *co = callocz(1, sizeof(struct section)); - co->name = strdupz(section); - co->hash = simple_hash(co->name); - netdata_mutex_init(&co->mutex); - - avl_init_lock(&co->values_index, appconfig_option_compare); - - if(unlikely(appconfig_index_add(root, co) != co)) - netdata_log_error("INTERNAL ERROR: indexing of section '%s', already exists.", co->name); +int appconfig_exists(struct config *root, const char *section, const char *name) { + struct config_section *sect = appconfig_section_find(root, section); + if(!sect) return 0; - appconfig_wrlock(root); - struct section *co2 = root->last_section; - if(co2) { - co2->next = co; - } else { - root->first_section = co; - } - root->last_section = co; - appconfig_unlock(root); + struct config_option *opt = appconfig_option_find(sect, name); + if(!opt) return 0; - return co; + return 1; } -void appconfig_section_destroy_non_loaded(struct config *root, const char *section) -{ - struct section *co; - struct config_option *cv, *cv_next; - - netdata_log_debug(D_CONFIG, "Destroying section '%s'.", section); - - co = appconfig_section_find(root, section); - if(!co) { - netdata_log_error("Could not destroy section '%s'. Not found.", section); +void appconfig_set_default_raw_value(struct config *root, const char *section, const char *name, const char *value) { + struct config_section *sect = appconfig_section_find(root, section); + if(!sect) { + appconfig_set_raw_value(root, section, name, value, CONFIG_VALUE_TYPE_UNKNOWN); return; } - config_section_wrlock(co); - for(cv = co->values; cv ; cv = cv->next) { - if (cv->flags & CONFIG_VALUE_LOADED) { - /* Do not destroy values that were loaded from the configuration files. */ - config_section_unlock(co); - return; - } - } - for(cv = co->values ; cv ; cv = cv_next) { - cv_next = cv->next; - if(unlikely(!appconfig_option_index_del(co, cv))) - netdata_log_error("Cannot remove config option '%s' from section '%s'.", cv->name, co->name); - freez(cv->value); - freez(cv->name); - freez(cv); - } - co->values = NULL; - config_section_unlock(co); - - if (unlikely(!appconfig_index_del(root, co))) { - netdata_log_error("Cannot remove section '%s' from config.", section); + struct config_option *opt = appconfig_option_find(sect, name); + if(!opt) { + appconfig_set_raw_value(root, section, name, value, CONFIG_VALUE_TYPE_UNKNOWN); return; } - - appconfig_wrlock(root); - - if (root->first_section == co) { - root->first_section = co->next; - - if (root->last_section == co) - root->last_section = root->first_section; - } else { - struct section *co_cur = root->first_section, *co_prev = NULL; - while(co_cur && co_cur != co) { - co_prev = co_cur; - co_cur = co_cur->next; - } - - if (co_cur) { - co_prev->next = co_cur->next; + opt->flags |= CONFIG_VALUE_USED; - if (root->last_section == co_cur) - root->last_section = co_prev; - } - } - - appconfig_unlock(root); - - avl_destroy_lock(&co->values_index); - freez(co->name); - pthread_mutex_destroy(&co->mutex); - freez(co); -} - -void appconfig_section_option_destroy_non_loaded(struct config *root, const char *section, const char *name) -{ - netdata_log_debug(D_CONFIG, "Destroying section option '%s -> %s'.", section, name); - - struct section *co; - co = appconfig_section_find(root, section); - if (!co) { - netdata_log_error("Could not destroy section option '%s -> %s'. The section not found.", section, name); - return; - } - - config_section_wrlock(co); - - struct config_option *cv; - - cv = appconfig_option_index_find(co, name, simple_hash(name)); - - if (cv && cv->flags & CONFIG_VALUE_LOADED) { - config_section_unlock(co); - return; - } - - if (unlikely(!(cv && appconfig_option_index_del(co, cv)))) { - config_section_unlock(co); - netdata_log_error("Could not destroy section option '%s -> %s'. The option not found.", section, name); + if(opt->flags & CONFIG_VALUE_LOADED) return; - } - - if (co->values == cv) { - co->values = co->values->next; - } else { - struct config_option *cv_cur = co->values, *cv_prev = NULL; - while (cv_cur && cv_cur != cv) { - cv_prev = cv_cur; - cv_cur = cv_cur->next; - } - if (cv_cur) { - cv_prev->next = cv_cur->next; - } - } - - freez(cv->value); - freez(cv->name); - freez(cv); - - config_section_unlock(co); - return; -} - -// ---------------------------------------------------------------------------- -// config name-value methods - -static inline struct config_option *appconfig_value_create(struct section *co, const char *name, const char *value) { - netdata_log_debug(D_CONFIG, "Creating config entry for name '%s', value '%s', in section '%s'.", name, value, co->name); - - struct config_option *cv = callocz(1, sizeof(struct config_option)); - cv->name = strdupz(name); - cv->hash = simple_hash(cv->name); - cv->value = strdupz(value); - - struct config_option *found = appconfig_option_index_add(co, cv); - if(found != cv) { - netdata_log_error("indexing of config '%s' in section '%s': already exists - using the existing one.", cv->name, co->name); - freez(cv->value); - freez(cv->name); - freez(cv); - return found; - } - - config_section_wrlock(co); - struct config_option *cv2 = co->values; - if(cv2) { - while (cv2->next) cv2 = cv2->next; - cv2->next = cv; - } - else co->values = cv; - config_section_unlock(co); - - return cv; -} - -int appconfig_exists(struct config *root, const char *section, const char *name) { - struct config_option *cv; - - netdata_log_debug(D_CONFIG, "request to get config in section '%s', name '%s'", section, name); - - struct section *co = appconfig_section_find(root, section); - if(!co) return 0; - - cv = appconfig_option_index_find(co, name, 0); - if(!cv) return 0; - - return 1; -} - -int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new) { - struct config_option *cv_old, *cv_new; - int ret = -1; - - netdata_log_debug(D_CONFIG, "request to rename config in section '%s', old name '%s', to section '%s', new name '%s'", section_old, name_old, section_new, name_new); - - struct section *co_old = appconfig_section_find(root, section_old); - if(!co_old) return ret; - - struct section *co_new = appconfig_section_find(root, section_new); - if(!co_new) co_new = appconfig_section_create(root, section_new); - - config_section_wrlock(co_old); - if(co_old != co_new) - config_section_wrlock(co_new); - - cv_old = appconfig_option_index_find(co_old, name_old, 0); - if(!cv_old) goto cleanup; - - cv_new = appconfig_option_index_find(co_new, name_new, 0); - if(cv_new) goto cleanup; - - if(unlikely(appconfig_option_index_del(co_old, cv_old) != cv_old)) - netdata_log_error("INTERNAL ERROR: deletion of config '%s' from section '%s', deleted the wrong config entry.", cv_old->name, co_old->name); - - if(co_old->values == cv_old) { - co_old->values = cv_old->next; - } - else { - struct config_option *t; - for(t = co_old->values; t && t->next != cv_old ;t = t->next) ; - if(!t || t->next != cv_old) - netdata_log_error("INTERNAL ERROR: cannot find variable '%s' in section '%s' of the config - but it should be there.", cv_old->name, co_old->name); - else - t->next = cv_old->next; - } - - freez(cv_old->name); - cv_old->name = strdupz(name_new); - cv_old->hash = simple_hash(cv_old->name); - - cv_new = cv_old; - cv_new->next = co_new->values; - co_new->values = cv_new; - - if(unlikely(appconfig_option_index_add(co_new, cv_old) != cv_old)) - netdata_log_error("INTERNAL ERROR: re-indexing of config '%s' in section '%s', already exists.", cv_old->name, co_new->name); - - ret = 0; - -cleanup: - if(co_old != co_new) - config_section_unlock(co_new); - config_section_unlock(co_old); - return ret; -} - -char *appconfig_get_by_section(struct section *co, const char *name, const char *default_value) -{ - struct config_option *cv; - // Only calls internal to this file check for a NULL result and they do not supply a NULL arg. - // External caller should treat NULL as an error case. - cv = appconfig_option_index_find(co, name, 0); - if (!cv) { - if (!default_value) return NULL; - cv = appconfig_value_create(co, name, default_value); - if (!cv) return NULL; - } - cv->flags |= CONFIG_VALUE_USED; + if(string_strcmp(opt->value, value) != 0) { + opt->flags |= CONFIG_VALUE_CHANGED; - if((cv->flags & CONFIG_VALUE_LOADED) || (cv->flags & CONFIG_VALUE_CHANGED)) { - // this is a loaded value from the config file - // if it is different than the default, mark it - if(!(cv->flags & CONFIG_VALUE_CHECKED)) { - if(default_value && strcmp(cv->value, default_value) != 0) cv->flags |= CONFIG_VALUE_CHANGED; - cv->flags |= CONFIG_VALUE_CHECKED; - } + string_freez(opt->value); + opt->value = string_strdupz(value); } - - return(cv->value); } +bool stream_conf_needs_dbengine(struct config *root) { + struct config_section *sect; + bool ret = false; -char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value) -{ - if (default_value == NULL) - netdata_log_debug(D_CONFIG, "request to get config in section '%s', name '%s' or fail", section, name); - else - netdata_log_debug(D_CONFIG, "request to get config in section '%s', name '%s', default_value '%s'", section, name, default_value); - - struct section *co = appconfig_section_find(root, section); - if (!co && !default_value) - return NULL; - if(!co) co = appconfig_section_create(root, section); - - return appconfig_get_by_section(co, name, default_value); -} + APPCONFIG_LOCK(root); + for(sect = root->sections; sect; sect = sect->next) { + if(string_strcmp(sect->name, "stream") == 0) + continue; // the first section is not relevant -long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value) -{ - char buffer[100], *s; - sprintf(buffer, "%lld", value); - - s = appconfig_get(root, section, name, buffer); - if(!s) return value; - - return strtoll(s, NULL, 0); -} - -NETDATA_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value) -{ - char buffer[100], *s; - sprintf(buffer, "%0.5" NETDATA_DOUBLE_MODIFIER, value); - - s = appconfig_get(root, section, name, buffer); - if(!s) return value; - - return str2ndd(s, NULL); -} - -inline int appconfig_test_boolean_value(char *s) { - if(!strcasecmp(s, "yes") || !strcasecmp(s, "true") || !strcasecmp(s, "on") - || !strcasecmp(s, "auto") || !strcasecmp(s, "on demand")) - return 1; - - return 0; -} - -int appconfig_get_boolean_by_section(struct section *co, const char *name, int value) { - char *s; - - s = appconfig_get_by_section(co, name, (!value)?"no":"yes"); - if(!s) return value; - - return appconfig_test_boolean_value(s); -} - -int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value) -{ - char *s; - if(value) s = "yes"; - else s = "no"; - - s = appconfig_get(root, section, name, s); - if(!s) return value; - - return appconfig_test_boolean_value(s); -} - -int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value) -{ - char *s; - - if(value == CONFIG_BOOLEAN_AUTO) - s = "auto"; - - else if(value == CONFIG_BOOLEAN_NO) - s = "no"; - - else - s = "yes"; - - s = appconfig_get(root, section, name, s); - if(!s) return value; - - if(!strcmp(s, "yes") || !strcmp(s, "true") || !strcmp(s, "on")) - return CONFIG_BOOLEAN_YES; - else if(!strcmp(s, "no") || !strcmp(s, "false") || !strcmp(s, "off")) - return CONFIG_BOOLEAN_NO; - else if(!strcmp(s, "auto") || !strcmp(s, "on demand")) - return CONFIG_BOOLEAN_AUTO; - - return value; -} - -const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value) -{ - struct config_option *cv; - - netdata_log_debug(D_CONFIG, "request to set default config in section '%s', name '%s', value '%s'", section, name, value); - - struct section *co = appconfig_section_find(root, section); - if(!co) return appconfig_set(root, section, name, value); - - cv = appconfig_option_index_find(co, name, 0); - if(!cv) return appconfig_set(root, section, name, value); - - cv->flags |= CONFIG_VALUE_USED; - - if(cv->flags & CONFIG_VALUE_LOADED) - return cv->value; - - if(strcmp(cv->value, value) != 0) { - cv->flags |= CONFIG_VALUE_CHANGED; - - freez(cv->value); - cv->value = strdupz(value); - } - - return cv->value; -} - -const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value) -{ - struct config_option *cv; - - netdata_log_debug(D_CONFIG, "request to set config in section '%s', name '%s', value '%s'", section, name, value); - - struct section *co = appconfig_section_find(root, section); - if(!co) co = appconfig_section_create(root, section); - - cv = appconfig_option_index_find(co, name, 0); - if(!cv) cv = appconfig_value_create(co, name, value); - cv->flags |= CONFIG_VALUE_USED; - - if(strcmp(cv->value, value) != 0) { - cv->flags |= CONFIG_VALUE_CHANGED; - - freez(cv->value); - cv->value = strdupz(value); - } - - return value; -} - -long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value) -{ - char buffer[100]; - sprintf(buffer, "%lld", value); - - appconfig_set(root, section, name, buffer); - - return value; -} - -NETDATA_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value) -{ - char buffer[100]; - sprintf(buffer, "%0.5" NETDATA_DOUBLE_MODIFIER, value); - - appconfig_set(root, section, name, buffer); - - return value; -} - -int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value) -{ - char *s; - if(value) s = "yes"; - else s = "no"; - - appconfig_set(root, section, name, s); - - return value; -} - -int appconfig_get_duration(struct config *root, const char *section, const char *name, const char *value) -{ - int result = 0; - const char *s; - - s = appconfig_get(root, section, name, value); - if(!s) goto fallback; - - if(!config_parse_duration(s, &result)) { - netdata_log_error("config option '[%s].%s = %s' is configured with an valid duration", section, name, s); - goto fallback; - } - - return result; - - fallback: - if(!config_parse_duration(value, &result)) - netdata_log_error("INTERNAL ERROR: default duration supplied for option '[%s].%s = %s' is not a valid duration", section, name, value); - - return result; -} - -// ---------------------------------------------------------------------------- -// config load/save - -int appconfig_load(struct config *root, char *filename, int overwrite_used, const char *section_name) -{ - int line = 0; - struct section *co = NULL; - int is_exporter_config = 0; - int _connectors = 0; // number of exporting connector sections we have - char working_instance[CONFIG_MAX_NAME + 1]; - char working_connector[CONFIG_MAX_NAME + 1]; - struct section *working_connector_section = NULL; - int global_exporting_section = 0; - - char buffer[CONFIG_FILE_LINE_MAX + 1], *s; - - if(!filename) filename = CONFIG_DIR "/" CONFIG_FILENAME; - - netdata_log_debug(D_CONFIG, "CONFIG: opening config file '%s'", filename); - - FILE *fp = fopen(filename, "r"); - if(!fp) { - // netdata_log_info("CONFIG: cannot open file '%s'. Using internal defaults.", filename); - return 0; - } - - uint32_t section_hash = 0; - if(section_name) { - section_hash = simple_hash(section_name); - } - is_exporter_config = (strstr(filename, EXPORTING_CONF) != NULL); - - while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) { - buffer[CONFIG_FILE_LINE_MAX] = '\0'; - line++; - - s = trim(buffer); - if(!s || *s == '#') { - netdata_log_debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', it is empty.", line, filename); - continue; - } - - int len = (int) strlen(s); - if(*s == '[' && s[len - 1] == ']') { - // new section - s[len - 1] = '\0'; - s++; - - if (is_exporter_config) { - global_exporting_section = - !(strcmp(s, CONFIG_SECTION_EXPORTING)) || !(strcmp(s, CONFIG_SECTION_PROMETHEUS)); - if (unlikely(!global_exporting_section)) { - int rc; - rc = is_valid_connector(s, 0); - if (likely(rc)) { - strncpyz(working_connector, s, CONFIG_MAX_NAME); - s = s + rc + 1; - if (unlikely(!(*s))) { - _connectors++; - sprintf(buffer, "instance_%d", _connectors); - s = buffer; - } - strncpyz(working_instance, s, CONFIG_MAX_NAME); - working_connector_section = NULL; - if (unlikely(appconfig_section_find(root, working_instance))) { - netdata_log_error("Instance (%s) already exists", working_instance); - co = NULL; - continue; - } - } else { - co = NULL; - netdata_log_error("Section (%s) does not specify a valid connector", s); - continue; - } - } - } - - co = appconfig_section_find(root, s); - if(!co) co = appconfig_section_create(root, s); - - if(co && section_name && overwrite_used && section_hash == co->hash && !strcmp(section_name, co->name)) { - config_section_wrlock(co); - struct config_option *cv2 = co->values; - while (cv2) { - struct config_option *save = cv2->next; - struct config_option *found = appconfig_option_index_del(co, cv2); - if(found != cv2) - netdata_log_error("INTERNAL ERROR: Cannot remove '%s' from section '%s', it was not inserted before.", - cv2->name, co->name); - - freez(cv2->name); - freez(cv2->value); - freez(cv2); - cv2 = save; - } - co->values = NULL; - config_section_unlock(co); - } - - continue; - } - - if(!co) { - // line outside a section - netdata_log_error("CONFIG: ignoring line %d ('%s') of file '%s', it is outside all sections.", line, s, filename); - continue; - } - - if(section_name && overwrite_used && section_hash != co->hash && strcmp(section_name, co->name)) { - continue; - } - - char *name = s; - char *value = strchr(s, '='); - if(!value) { - netdata_log_error("CONFIG: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename); - continue; - } - *value = '\0'; - value++; - - name = trim(name); - value = trim(value); - - if(!name || *name == '#') { - netdata_log_error("CONFIG: ignoring line %d of file '%s', name is empty.", line, filename); + struct config_option *opt = appconfig_get_raw_value_of_option_in_section(sect, "enabled", NULL, CONFIG_VALUE_TYPE_UNKNOWN, NULL); + if(!opt || !appconfig_test_boolean_value(string2str(opt->value))) continue; - } - - if(!value) value = ""; - - struct config_option *cv = appconfig_option_index_find(co, name, 0); - - if (!cv) { - cv = appconfig_value_create(co, name, value); - if (likely(is_exporter_config) && unlikely(!global_exporting_section)) { - if (unlikely(!working_connector_section)) { - working_connector_section = appconfig_section_find(root, working_connector); - if (!working_connector_section) - working_connector_section = appconfig_section_create(root, working_connector); - if (likely(working_connector_section)) { - add_connector_instance(working_connector_section, co); - } - } - } - } else { - if (((cv->flags & CONFIG_VALUE_USED) && overwrite_used) || !(cv->flags & CONFIG_VALUE_USED)) { - netdata_log_debug( - D_CONFIG, "CONFIG: line %d of file '%s', overwriting '%s/%s'.", line, filename, co->name, cv->name); - freez(cv->value); - cv->value = strdupz(value); - } else - netdata_log_debug( - D_CONFIG, - "CONFIG: ignoring line %d of file '%s', '%s/%s' is already present and used.", - line, - filename, - co->name, - cv->name); - } - cv->flags |= CONFIG_VALUE_LOADED; - } - fclose(fp); - - return 1; -} - -void appconfig_generate(struct config *root, BUFFER *wb, int only_changed) -{ - int i, pri; - struct section *co; - struct config_option *cv; - - { - int found_host_labels = 0; - for (co = root->first_section; co; co = co->next) - if(!strcmp(co->name, CONFIG_SECTION_HOST_LABEL)) - found_host_labels = 1; - - if(!found_host_labels) { - appconfig_section_create(root, CONFIG_SECTION_HOST_LABEL); - appconfig_get(root, CONFIG_SECTION_HOST_LABEL, "name", "value"); + opt = appconfig_get_raw_value_of_option_in_section(sect, "db", NULL, CONFIG_VALUE_TYPE_UNKNOWN, NULL); + if(opt && string_strcmp(opt->value, "dbengine") == 0) { + ret = true; + break; } } + APPCONFIG_UNLOCK(root); - buffer_strcat(wb, - "# netdata configuration\n" - "#\n" - "# You can download the latest version of this file, using:\n" - "#\n" - "# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n" - "# or\n" - "# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n" - "#\n" - "# You can uncomment and change any of the options below.\n" - "# The value shown in the commented settings, is the default value.\n" - "#\n" - "\n# global netdata configuration\n"); - - for(i = 0; i <= 17 ;i++) { - appconfig_wrlock(root); - for(co = root->first_section; co ; co = co->next) { - if(!strcmp(co->name, CONFIG_SECTION_GLOBAL)) pri = 0; - else if(!strcmp(co->name, CONFIG_SECTION_DB)) pri = 1; - else if(!strcmp(co->name, CONFIG_SECTION_DIRECTORIES)) pri = 2; - else if(!strcmp(co->name, CONFIG_SECTION_LOGS)) pri = 3; - else if(!strcmp(co->name, CONFIG_SECTION_ENV_VARS)) pri = 4; - else if(!strcmp(co->name, CONFIG_SECTION_HOST_LABEL)) pri = 5; - else if(!strcmp(co->name, CONFIG_SECTION_SQLITE)) pri = 6; - else if(!strcmp(co->name, CONFIG_SECTION_CLOUD)) pri = 7; - else if(!strcmp(co->name, CONFIG_SECTION_ML)) pri = 8; - else if(!strcmp(co->name, CONFIG_SECTION_HEALTH)) pri = 9; - else if(!strcmp(co->name, CONFIG_SECTION_WEB)) pri = 10; - else if(!strcmp(co->name, CONFIG_SECTION_WEBRTC)) pri = 11; - // by default, new sections will get pri = 12 (set at the end, below) - else if(!strcmp(co->name, CONFIG_SECTION_REGISTRY)) pri = 13; - else if(!strcmp(co->name, CONFIG_SECTION_GLOBAL_STATISTICS)) pri = 14; - else if(!strcmp(co->name, CONFIG_SECTION_PLUGINS)) pri = 15; - else if(!strcmp(co->name, CONFIG_SECTION_STATSD)) pri = 16; - else if(!strncmp(co->name, "plugin:", 7)) pri = 17; // << change the loop too if you change this - else pri = 12; // this is used for any new (currently unknown) sections - - if(i == pri) { - int loaded = 0; - int used = 0; - int changed = 0; - int count = 0; - - config_section_wrlock(co); - for(cv = co->values; cv ; cv = cv->next) { - used += (cv->flags & CONFIG_VALUE_USED)?1:0; - loaded += (cv->flags & CONFIG_VALUE_LOADED)?1:0; - changed += (cv->flags & CONFIG_VALUE_CHANGED)?1:0; - count++; - } - config_section_unlock(co); - - if(!count) continue; - if(only_changed && !changed && !loaded) continue; - - if(!used) { - buffer_sprintf(wb, "\n# section '%s' is not used.", co->name); - } - - buffer_sprintf(wb, "\n[%s]\n", co->name); - - config_section_wrlock(co); - for(cv = co->values; cv ; cv = cv->next) { - - if(used && !(cv->flags & CONFIG_VALUE_USED)) { - buffer_sprintf(wb, "\n\t# option '%s' is not used.\n", cv->name); - } - buffer_sprintf(wb, "\t%s%s = %s\n", ((!(cv->flags & CONFIG_VALUE_LOADED)) && (!(cv->flags & CONFIG_VALUE_CHANGED)) && (cv->flags & CONFIG_VALUE_USED))?"# ":"", cv->name, cv->value); - } - config_section_unlock(co); - } - } - appconfig_unlock(root); - } + return ret; } -/** - * Parse Duration - * - * Parse the string setting the result - * - * @param string the timestamp string - * @param result the output variable - * - * @return It returns 1 on success and 0 otherwise - */ -int config_parse_duration(const char* string, int* result) { - while(*string && isspace((uint8_t)*string)) string++; - - if(unlikely(!*string)) goto fallback; - - if(*string == 'n' && !strcmp(string, "never")) { - // this is a valid option - *result = 0; - return 1; - } +bool stream_conf_has_uuid_section(struct config *root) { + struct config_section *sect = NULL; + bool is_parent = false; - // make sure it is a number - if(!(isdigit((uint8_t)*string) || *string == '+' || *string == '-')) goto fallback; + APPCONFIG_LOCK(root); + for (sect = root->sections; sect; sect = sect->next) { + nd_uuid_t uuid; - char *e = NULL; - NETDATA_DOUBLE n = str2ndd(string, &e); - if(e && *e) { - switch (*e) { - case 'Y': - *result = (int) (n * 31536000); - break; - case 'M': - *result = (int) (n * 2592000); - break; - case 'w': - *result = (int) (n * 604800); - break; - case 'd': - *result = (int) (n * 86400); - break; - case 'h': - *result = (int) (n * 3600); - break; - case 'm': - *result = (int) (n * 60); - break; - case 's': - default: - *result = (int) (n); - break; + if (uuid_parse(string2str(sect->name), uuid) != -1 && + appconfig_get_boolean_by_section(sect, "enabled", 0)) { + is_parent = true; + break; } } - else - *result = (int)(n); - - return 1; - - fallback: - *result = 0; - return 0; -} + APPCONFIG_UNLOCK(root); -struct section *appconfig_get_section(struct config *root, const char *name) -{ - return appconfig_section_find(root, name); + return is_parent; } diff --git a/src/libnetdata/config/appconfig.h b/src/libnetdata/config/appconfig.h index 214a15edd..f1551b387 100644 --- a/src/libnetdata/config/appconfig.h +++ b/src/libnetdata/config/appconfig.h @@ -103,7 +103,6 @@ #define CONFIG_SECTION_GLOBAL_STATISTICS "global statistics" #define CONFIG_SECTION_DB "db" - // these are used to limit the configuration names and values lengths // they are not enforced by config.c functions (they will strdup() all strings, no matter of their length) #define CONFIG_MAX_NAME 1024 @@ -113,94 +112,43 @@ // Config definitions #define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2) -#define CONFIG_VALUE_LOADED 0x01 // has been loaded from the config -#define CONFIG_VALUE_USED 0x02 // has been accessed from the program -#define CONFIG_VALUE_CHANGED 0x04 // has been changed from the loaded value or the internal default value -#define CONFIG_VALUE_CHECKED 0x08 // has been checked if the value is different from the default - -struct config_option { - avl_t avl_node; // the index entry of this entry - this has to be first! - - uint8_t flags; - uint32_t hash; // a simple hash to speed up searching - // we first compare hashes, and only if the hashes are equal we do string comparisons - - char *name; - char *value; - - struct config_option *next; // config->mutex protects just this -}; - -struct section { - avl_t avl_node; // the index entry of this section - this has to be first! - - uint32_t hash; // a simple hash to speed up searching - // we first compare hashes, and only if the hashes are equal we do string comparisons - - char *name; - - struct section *next; // global config_mutex protects just this - - struct config_option *values; - avl_tree_lock values_index; - - netdata_mutex_t mutex; // this locks only the writers, to ensure atomic updates - // readers are protected using the rwlock in avl_tree_lock -}; +struct config_section; struct config { - struct section *first_section; - struct section *last_section; // optimize inserting at the end - netdata_mutex_t mutex; + struct config_section *sections; + SPINLOCK spinlock; avl_tree_lock index; }; -#define CONFIG_BOOLEAN_INVALID 100 // an invalid value to check for validity (used as default initialization when needed) +#define APPCONFIG_INITIALIZER (struct config) { \ + .sections = NULL, \ + .spinlock = NETDATA_SPINLOCK_INITIALIZER, \ + .index = { \ + .avl_tree = { \ + .root = NULL, \ + .compar = appconfig_section_compare, \ + }, \ + .rwlock = AVL_LOCK_INITIALIZER, \ + }, \ + } -#define CONFIG_BOOLEAN_NO 0 // disabled -#define CONFIG_BOOLEAN_YES 1 // enabled +int appconfig_load(struct config *root, char *filename, int overwrite_used, const char *section_name); -#ifndef CONFIG_BOOLEAN_AUTO -#define CONFIG_BOOLEAN_AUTO 2 // enabled if it has useful info when enabled -#endif +typedef bool (*appconfig_foreach_value_cb_t)(void *data, const char *name, const char *value); +size_t appconfig_foreach_value_in_section(struct config *root, const char *section, appconfig_foreach_value_cb_t cb, void *data); -int appconfig_load(struct config *root, char *filename, int overwrite_used, const char *section_name); -void config_section_wrlock(struct section *co); -void config_section_unlock(struct section *co); - -char *appconfig_get_by_section(struct section *co, const char *name, const char *default_value); -char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value); -long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value); -NETDATA_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value); -int appconfig_get_boolean_by_section(struct section *co, const char *name, int value); -int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value); -int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value); -int appconfig_get_duration(struct config *root, const char *section, const char *name, const char *value); - -const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value); -const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value); -long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value); -NETDATA_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value); -int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value); +// sets a raw value, only if it is not loaded from the config +void appconfig_set_default_raw_value(struct config *root, const char *section, const char *name, const char *value); int appconfig_exists(struct config *root, const char *section, const char *name); int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new); +int appconfig_move_everywhere(struct config *root, const char *name_old, const char *name_new); -void appconfig_generate(struct config *root, BUFFER *wb, int only_changed); +void appconfig_generate(struct config *root, BUFFER *wb, int only_changed, bool netdata_conf); int appconfig_section_compare(void *a, void *b); -void appconfig_section_destroy_non_loaded(struct config *root, const char *section); -void appconfig_section_option_destroy_non_loaded(struct config *root, const char *section, const char *name); - -int config_parse_duration(const char* string, int* result); - -struct section *appconfig_get_section(struct config *root, const char *name); - -void appconfig_wrlock(struct config *root); -void appconfig_unlock(struct config *root); - -int appconfig_test_boolean_value(char *s); +bool appconfig_test_boolean_value(const char *s); struct connector_instance { char instance_name[CONFIG_MAX_NAME + 1]; @@ -208,13 +156,37 @@ struct connector_instance { }; typedef struct _connector_instance { - struct section *connector; // actual connector - struct section *instance; // This instance + struct config_section *connector; // actual connector + struct config_section *instance; // This instance char instance_name[CONFIG_MAX_NAME + 1]; char connector_name[CONFIG_MAX_NAME + 1]; struct _connector_instance *next; // Next instance } _CONNECTOR_INSTANCE; -_CONNECTOR_INSTANCE *add_connector_instance(struct section *connector, struct section *instance); +_CONNECTOR_INSTANCE *add_connector_instance(struct config_section *connector, struct config_section *instance); + +// ---------------------------------------------------------------------------- +// shortcuts for the default netdata configuration + +#define config_load(filename, overwrite_used, section) appconfig_load(&netdata_config, filename, overwrite_used, section) + +#define config_set_default_raw_value(section, name, value) appconfig_set_default_raw_value(&netdata_config, section, name, value) + +#define config_exists(section, name) appconfig_exists(&netdata_config, section, name) +#define config_move(section_old, name_old, section_new, name_new) appconfig_move(&netdata_config, section_old, name_old, section_new, name_new) + +#define netdata_conf_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed, true) + +#define config_section_destroy(section) appconfig_section_destroy_non_loaded(&netdata_config, section) +#define config_section_option_destroy(section, name) appconfig_section_option_destroy_non_loaded(&netdata_config, section, name) + +bool stream_conf_needs_dbengine(struct config *root); +bool stream_conf_has_uuid_section(struct config *root); + +#include "appconfig_api_text.h" +#include "appconfig_api_numbers.h" +#include "appconfig_api_boolean.h" +#include "appconfig_api_sizes.h" +#include "appconfig_api_durations.h" -#endif /* NETDATA_CONFIG_H */ \ No newline at end of file +#endif // NETDATA_CONFIG_H diff --git a/src/libnetdata/config/appconfig_api_boolean.c b/src/libnetdata/config/appconfig_api_boolean.c new file mode 100644 index 000000000..abe515736 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_boolean.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" +#include "appconfig_api_boolean.h" + +bool appconfig_test_boolean_value(const char *s) { + if(!strcasecmp(s, "yes") || !strcasecmp(s, "true") || !strcasecmp(s, "on") + || !strcasecmp(s, "auto") || !strcasecmp(s, "on demand")) + return true; + + return false; +} + +int appconfig_get_boolean_by_section(struct config_section *sect, const char *name, int value) { + struct config_option *opt = appconfig_get_raw_value_of_option_in_section( + sect, name, (!value) ? "no" : "yes", CONFIG_VALUE_TYPE_BOOLEAN, NULL); + if(!opt) return value; + + return appconfig_test_boolean_value(string2str(opt->value)); +} + +int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value) { + const char *s; + if(value) s = "yes"; + else s = "no"; + + struct config_option *opt = appconfig_get_raw_value(root, section, name, s, CONFIG_VALUE_TYPE_BOOLEAN, NULL); + if(!opt) return value; + s = string2str(opt->value); + + return appconfig_test_boolean_value(s); +} + +int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value) { + const char *s; + + if(value == CONFIG_BOOLEAN_AUTO) + s = "auto"; + + else if(value == CONFIG_BOOLEAN_NO) + s = "no"; + + else + s = "yes"; + + struct config_option *opt = appconfig_get_raw_value(root, section, name, s, CONFIG_VALUE_TYPE_BOOLEAN_ONDEMAND, NULL); + if(!opt) return value; + + s = string2str(opt->value); + if(!strcmp(s, "yes") || !strcmp(s, "true") || !strcmp(s, "on")) + return CONFIG_BOOLEAN_YES; + else if(!strcmp(s, "no") || !strcmp(s, "false") || !strcmp(s, "off")) + return CONFIG_BOOLEAN_NO; + else if(!strcmp(s, "auto") || !strcmp(s, "on demand")) + return CONFIG_BOOLEAN_AUTO; + + return value; +} + +int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value) { + const char *s; + if(value) s = "yes"; + else s = "no"; + + appconfig_set_raw_value(root, section, name, s, CONFIG_VALUE_TYPE_BOOLEAN); + + return value; +} diff --git a/src/libnetdata/config/appconfig_api_boolean.h b/src/libnetdata/config/appconfig_api_boolean.h new file mode 100644 index 000000000..2b05fce60 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_boolean.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_APPCONFIG_API_BOOLEAN_H +#define NETDATA_APPCONFIG_API_BOOLEAN_H + +#define CONFIG_BOOLEAN_INVALID 100 // an invalid value to check for validity (used as default initialization when needed) + +#define CONFIG_BOOLEAN_NO 0 // disabled +#define CONFIG_BOOLEAN_YES 1 // enabled + +#ifndef CONFIG_BOOLEAN_AUTO +#define CONFIG_BOOLEAN_AUTO 2 // enabled if it has useful info when enabled +#endif + +int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value); +#define config_get_boolean(section, name, value) appconfig_get_boolean(&netdata_config, section, name, value) + +int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value); +#define config_get_boolean_ondemand(section, name, value) appconfig_get_boolean_ondemand(&netdata_config, section, name, value) + +int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value); +#define config_set_boolean(section, name, value) appconfig_set_boolean(&netdata_config, section, name, value) + +#endif //NETDATA_APPCONFIG_API_BOOLEAN_H diff --git a/src/libnetdata/config/appconfig_api_durations.c b/src/libnetdata/config/appconfig_api_durations.c new file mode 100644 index 000000000..88c462ac6 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_durations.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" +#include "appconfig_api_durations.h" + + +static STRING *reformat_duration_seconds(STRING *value) { + int result = 0; + if(!duration_parse_seconds(string2str(value), &result)) + return value; + + char buf[128]; + if(duration_snprintf_time_t(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) { + string_freez(value); + return string_strdupz(buf); + } + + return value; +} + +time_t appconfig_get_duration_seconds(struct config *root, const char *section, const char *name, time_t default_value) { + char default_str[128]; + duration_snprintf_time_t(default_str, sizeof(default_str), default_value); + + struct config_option *opt = appconfig_get_raw_value( + root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_SECS, reformat_duration_seconds); + if(!opt) + return default_value; + + const char *s = string2str(opt->value); + + int result = 0; + if(!duration_parse_seconds(s, &result)) { + appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_SECS); + netdata_log_error("config option '[%s].%s = %s' is configured with an invalid duration", section, name, s); + return default_value; + } + + return ABS(result); +} + +time_t appconfig_set_duration_seconds(struct config *root, const char *section, const char *name, time_t value) { + char str[128]; + duration_snprintf_time_t(str, sizeof(str), value); + + appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_DURATION_IN_SECS); + return value; +} + +static STRING *reformat_duration_ms(STRING *value) { + int64_t result = 0; + if(!duration_parse_msec_t(string2str(value), &result)) + return value; + + char buf[128]; + if(duration_snprintf_msec_t(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) { + string_freez(value); + return string_strdupz(buf); + } + + return value; +} + +msec_t appconfig_get_duration_ms(struct config *root, const char *section, const char *name, msec_t default_value) { + char default_str[128]; + duration_snprintf_msec_t(default_str, sizeof(default_str), default_value); + + struct config_option *opt = appconfig_get_raw_value( + root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_MS, reformat_duration_ms); + if(!opt) + return default_value; + + const char *s = string2str(opt->value); + + smsec_t result = 0; + if(!duration_parse_msec_t(s, &result)) { + appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_MS); + netdata_log_error("config option '[%s].%s = %s' is configured with an invalid duration", section, name, s); + return default_value; + } + + return ABS(result); +} + +msec_t appconfig_set_duration_ms(struct config *root, const char *section, const char *name, msec_t value) { + char str[128]; + duration_snprintf_msec_t(str, sizeof(str), (smsec_t)value); + + appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_DURATION_IN_MS); + return value; +} + +static STRING *reformat_duration_days(STRING *value) { + int64_t result = 0; + if(!duration_parse_days(string2str(value), &result)) + return value; + + char buf[128]; + if(duration_snprintf_days(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) { + string_freez(value); + return string_strdupz(buf); + } + + return value; +} + +unsigned appconfig_get_duration_days(struct config *root, const char *section, const char *name, unsigned default_value) { + char default_str[128]; + duration_snprintf_days(default_str, sizeof(default_str), (int)default_value); + + struct config_option *opt = appconfig_get_raw_value( + root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_DAYS, reformat_duration_days); + if(!opt) + return default_value; + + const char *s = string2str(opt->value); + + int64_t result = 0; + if(!duration_parse_days(s, &result)) { + appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_DAYS); + netdata_log_error("config option '[%s].%s = %s' is configured with an invalid duration", section, name, s); + return default_value; + } + + return (unsigned)ABS(result); +} + +unsigned appconfig_set_duration_days(struct config *root, const char *section, const char *name, unsigned value) { + char str[128]; + duration_snprintf_days(str, sizeof(str), value); + appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_DURATION_IN_DAYS); + return value; +} + diff --git a/src/libnetdata/config/appconfig_api_durations.h b/src/libnetdata/config/appconfig_api_durations.h new file mode 100644 index 000000000..26d6c6ba3 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_durations.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_APPCONFIG_API_DURATIONS_H +#define NETDATA_APPCONFIG_API_DURATIONS_H + +msec_t appconfig_get_duration_ms(struct config *root, const char *section, const char *name, msec_t default_value); +msec_t appconfig_set_duration_ms(struct config *root, const char *section, const char *name, msec_t value); +#define config_get_duration_ms(section, name, value) appconfig_get_duration_ms(&netdata_config, section, name, value) +#define config_set_duration_ms(section, name, value) appconfig_set_duration_ms(&netdata_config, section, name, value) + +time_t appconfig_get_duration_seconds(struct config *root, const char *section, const char *name, time_t default_value); +time_t appconfig_set_duration_seconds(struct config *root, const char *section, const char *name, time_t value); +#define config_get_duration_seconds(section, name, value) appconfig_get_duration_seconds(&netdata_config, section, name, value) +#define config_set_duration_seconds(section, name, value) appconfig_set_duration_seconds(&netdata_config, section, name, value) + +unsigned appconfig_get_duration_days(struct config *root, const char *section, const char *name, unsigned default_value); +unsigned appconfig_set_duration_days(struct config *root, const char *section, const char *name, unsigned value); +#define config_get_duration_days(section, name, value) appconfig_get_duration_days(&netdata_config, section, name, value) +#define config_set_duration_days(section, name, value) appconfig_set_duration_days(&netdata_config, section, name, value) + +#endif //NETDATA_APPCONFIG_API_DURATIONS_H diff --git a/src/libnetdata/config/appconfig_api_numbers.c b/src/libnetdata/config/appconfig_api_numbers.c new file mode 100644 index 000000000..cc3776c18 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_numbers.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" +#include "appconfig_api_numbers.h" + +long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value) { + char buffer[100]; + sprintf(buffer, "%lld", value); + + struct config_option *opt = appconfig_get_raw_value(root, section, name, buffer, CONFIG_VALUE_TYPE_INTEGER, NULL); + if(!opt) return value; + + const char *s = string2str(opt->value); + return strtoll(s, NULL, 0); +} + +NETDATA_DOUBLE appconfig_get_double(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value) { + char buffer[100]; + sprintf(buffer, "%0.5" NETDATA_DOUBLE_MODIFIER, value); + + struct config_option *opt = appconfig_get_raw_value(root, section, name, buffer, CONFIG_VALUE_TYPE_DOUBLE, NULL); + if(!opt) return value; + + const char *s = string2str(opt->value); + return str2ndd(s, NULL); +} + +long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value) { + char buffer[100]; + sprintf(buffer, "%lld", value); + + appconfig_set_raw_value(root, section, name, buffer, CONFIG_VALUE_TYPE_INTEGER); + return value; +} + +NETDATA_DOUBLE appconfig_set_double(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value) { + char buffer[100]; + sprintf(buffer, "%0.5" NETDATA_DOUBLE_MODIFIER, value); + + appconfig_set_raw_value(root, section, name, buffer, CONFIG_VALUE_TYPE_DOUBLE); + return value; +} + diff --git a/src/libnetdata/config/appconfig_api_numbers.h b/src/libnetdata/config/appconfig_api_numbers.h new file mode 100644 index 000000000..58d382e3d --- /dev/null +++ b/src/libnetdata/config/appconfig_api_numbers.h @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_APPCONFIG_API_NUMBERS_H +#define NETDATA_APPCONFIG_API_NUMBERS_H + +long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value); +long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value); +#define config_get_number(section, name, value) appconfig_get_number(&netdata_config, section, name, value) +#define config_set_number(section, name, value) appconfig_set_number(&netdata_config, section, name, value) + +NETDATA_DOUBLE appconfig_get_double(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value); +NETDATA_DOUBLE appconfig_set_double(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value); +#define config_get_double(section, name, value) appconfig_get_double(&netdata_config, section, name, value) +#define config_set_double(section, name, value) appconfig_set_float(&netdata_config, section, name, value) + +#endif //NETDATA_APPCONFIG_API_NUMBERS_H diff --git a/src/libnetdata/config/appconfig_api_sizes.c b/src/libnetdata/config/appconfig_api_sizes.c new file mode 100644 index 000000000..67b1dce9e --- /dev/null +++ b/src/libnetdata/config/appconfig_api_sizes.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" +#include "appconfig_api_sizes.h" + +static STRING *reformat_size_bytes(STRING *value) { + uint64_t result = 0; + if(!size_parse_bytes(string2str(value), &result)) + return value; + + char buf[128]; + if(size_snprintf_bytes(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) { + string_freez(value); + return string_strdupz(buf); + } + + return value; +} + +uint64_t appconfig_get_size_bytes(struct config *root, const char *section, const char *name, uint64_t default_value) { + char default_str[128]; + size_snprintf_bytes(default_str, sizeof(default_str), (int)default_value); + + struct config_option *opt = + appconfig_get_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_SIZE_IN_BYTES, reformat_size_bytes); + if(!opt) + return default_value; + + const char *s = string2str(opt->value); + uint64_t result = 0; + if(!size_parse_bytes(s, &result)) { + appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_SIZE_IN_BYTES); + netdata_log_error("config option '[%s].%s = %s' is configured with an invalid size", section, name, s); + return default_value; + } + + return result; +} + +uint64_t appconfig_set_size_bytes(struct config *root, const char *section, const char *name, uint64_t value) { + char str[128]; + size_snprintf_bytes(str, sizeof(str), value); + appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_SIZE_IN_BYTES); + return value; +} + +static STRING *reformat_size_mb(STRING *value) { + uint64_t result = 0; + if(!size_parse_mb(string2str(value), &result)) + return value; + + char buf[128]; + if(size_snprintf_mb(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) { + string_freez(value); + return string_strdupz(buf); + } + + return value; +} + +uint64_t appconfig_get_size_mb(struct config *root, const char *section, const char *name, uint64_t default_value) { + char default_str[128]; + size_snprintf_mb(default_str, sizeof(default_str), (int)default_value); + + struct config_option *opt = + appconfig_get_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_SIZE_IN_MB, reformat_size_mb); + if(!opt) + return default_value; + + const char *s = string2str(opt->value); + uint64_t result = 0; + if(!size_parse_mb(s, &result)) { + appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_SIZE_IN_MB); + netdata_log_error("config option '[%s].%s = %s' is configured with an invalid size", section, name, s); + return default_value; + } + + return (unsigned)result; +} + +uint64_t appconfig_set_size_mb(struct config *root, const char *section, const char *name, uint64_t value) { + char str[128]; + size_snprintf_mb(str, sizeof(str), value); + appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_SIZE_IN_MB); + return value; +} diff --git a/src/libnetdata/config/appconfig_api_sizes.h b/src/libnetdata/config/appconfig_api_sizes.h new file mode 100644 index 000000000..98ef209fe --- /dev/null +++ b/src/libnetdata/config/appconfig_api_sizes.h @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_APPCONFIG_API_SIZES_H +#define NETDATA_APPCONFIG_API_SIZES_H + +uint64_t appconfig_get_size_bytes(struct config *root, const char *section, const char *name, uint64_t default_value); +uint64_t appconfig_set_size_bytes(struct config *root, const char *section, const char *name, uint64_t value); +#define config_get_size_bytes(section, name, value) appconfig_get_size_bytes(&netdata_config, section, name, value) +#define config_set_size_bytes(section, name, value) appconfig_set_size_bytes(&netdata_config, section, name, value) + +uint64_t appconfig_get_size_mb(struct config *root, const char *section, const char *name, uint64_t default_value); +uint64_t appconfig_set_size_mb(struct config *root, const char *section, const char *name, uint64_t value); +#define config_get_size_mb(section, name, value) appconfig_get_size_mb(&netdata_config, section, name, value) +#define config_set_size_mb(section, name, value) appconfig_set_size_mb(&netdata_config, section, name, value) + +#endif //NETDATA_APPCONFIG_API_SIZES_H diff --git a/src/libnetdata/config/appconfig_api_text.c b/src/libnetdata/config/appconfig_api_text.c new file mode 100644 index 000000000..b314972f0 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_text.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" +#include "appconfig_api_text.h" + +const char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value) { + struct config_option *opt = appconfig_get_raw_value(root, section, name, default_value, CONFIG_VALUE_TYPE_TEXT, NULL); + if(!opt) + return default_value; + + return string2str(opt->value); +} + +const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value) { + struct config_option *opt = appconfig_set_raw_value(root, section, name, value, CONFIG_VALUE_TYPE_TEXT); + return string2str(opt->value); +} diff --git a/src/libnetdata/config/appconfig_api_text.h b/src/libnetdata/config/appconfig_api_text.h new file mode 100644 index 000000000..7e1e85f7e --- /dev/null +++ b/src/libnetdata/config/appconfig_api_text.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_APPCONFIG_API_TEXT_H +#define NETDATA_APPCONFIG_API_TEXT_H + +const char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value); +const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value); +#define config_get(section, name, default_value) appconfig_get(&netdata_config, section, name, default_value) +#define config_set(section, name, default_value) appconfig_set(&netdata_config, section, name, default_value) + + +#endif //NETDATA_APPCONFIG_API_TEXT_H diff --git a/src/libnetdata/config/appconfig_cleanup.c b/src/libnetdata/config/appconfig_cleanup.c new file mode 100644 index 000000000..22f4ac3e9 --- /dev/null +++ b/src/libnetdata/config/appconfig_cleanup.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +void appconfig_section_destroy_non_loaded(struct config *root, const char *section) +{ + struct config_section *sect; + struct config_option *opt; + + netdata_log_debug(D_CONFIG, "Destroying section '%s'.", section); + + sect = appconfig_section_find(root, section); + if(!sect) { + netdata_log_error("Could not destroy section '%s'. Not found.", section); + return; + } + + SECTION_LOCK(sect); + + // find if there is any loaded option + for(opt = sect->values; opt; opt = opt->next) { + if (opt->flags & CONFIG_VALUE_LOADED) { + // do not destroy values that were loaded from the configuration files. + SECTION_UNLOCK(sect); + return; + } + } + + // no option is loaded, free them all + appconfig_section_remove_and_delete(root, sect, false, true); +} + +void appconfig_section_option_destroy_non_loaded(struct config *root, const char *section, const char *name) { + struct config_section *sect; + sect = appconfig_section_find(root, section); + if (!sect) { + netdata_log_error("Could not destroy section option '%s -> %s'. The section not found.", section, name); + return; + } + + SECTION_LOCK(sect); + + struct config_option *opt = appconfig_option_find(sect, name); + if (opt && opt->flags & CONFIG_VALUE_LOADED) { + SECTION_UNLOCK(sect); + return; + } + + if (unlikely(!(opt && appconfig_option_del(sect, opt)))) { + SECTION_UNLOCK(sect); + netdata_log_error("Could not destroy section option '%s -> %s'. The option not found.", section, name); + return; + } + + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(sect->values, opt, prev, next); + + appconfig_option_free(opt); + SECTION_UNLOCK(sect); +} + diff --git a/src/libnetdata/config/appconfig_conf_file.c b/src/libnetdata/config/appconfig_conf_file.c new file mode 100644 index 000000000..4ac8b376e --- /dev/null +++ b/src/libnetdata/config/appconfig_conf_file.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +ENUM_STR_MAP_DEFINE(CONFIG_VALUE_TYPES) = { + { .id = CONFIG_VALUE_TYPE_UNKNOWN, .name ="unknown", }, + { .id = CONFIG_VALUE_TYPE_TEXT, .name ="text", }, + { .id = CONFIG_VALUE_TYPE_HOSTNAME, .name ="hostname", }, + { .id = CONFIG_VALUE_TYPE_USERNAME, .name ="username", }, + { .id = CONFIG_VALUE_TYPE_FILENAME, .name ="filename", }, + { .id = CONFIG_VALUE_TYPE_PATH, .name ="path", }, + { .id = CONFIG_VALUE_TYPE_SIMPLE_PATTERN, .name ="simple pattern", }, + { .id = CONFIG_VALUE_TYPE_URL, .name ="URL", }, + { .id = CONFIG_VALUE_TYPE_ENUM, .name ="one of keywords", }, + { .id = CONFIG_VALUE_TYPE_BITMAP, .name ="any of keywords", }, + { .id = CONFIG_VALUE_TYPE_INTEGER, .name ="number (integer)", }, + { .id = CONFIG_VALUE_TYPE_DOUBLE, .name ="number (double)", }, + { .id = CONFIG_VALUE_TYPE_BOOLEAN, .name ="yes or no", }, + { .id = CONFIG_VALUE_TYPE_BOOLEAN_ONDEMAND, .name ="yes, no, or auto", }, + { .id = CONFIG_VALUE_TYPE_DURATION_IN_SECS, .name ="duration (seconds)", }, + { .id = CONFIG_VALUE_TYPE_DURATION_IN_MS, .name ="duration (ms)", }, + { .id = CONFIG_VALUE_TYPE_DURATION_IN_DAYS, .name ="duration (days)", }, + { .id = CONFIG_VALUE_TYPE_SIZE_IN_BYTES, .name ="size (bytes)", }, + { .id = CONFIG_VALUE_TYPE_SIZE_IN_MB, .name ="size (MiB)", }, +}; + +ENUM_STR_DEFINE_FUNCTIONS(CONFIG_VALUE_TYPES, CONFIG_VALUE_TYPE_UNKNOWN, "unknown"); + + +// ---------------------------------------------------------------------------- +// config load/save + +int appconfig_load(struct config *root, char *filename, int overwrite_used, const char *section_name) { + int line = 0; + struct config_section *sect = NULL; + int is_exporter_config = 0; + int _connectors = 0; // number of exporting connector sections we have + char working_instance[CONFIG_MAX_NAME + 1]; + char working_connector[CONFIG_MAX_NAME + 1]; + struct config_section *working_connector_section = NULL; + int global_exporting_section = 0; + + char buffer[CONFIG_FILE_LINE_MAX + 1], *s; + + if(!filename) filename = CONFIG_DIR "/" CONFIG_FILENAME; + + netdata_log_debug(D_CONFIG, "CONFIG: opening config file '%s'", filename); + + FILE *fp = fopen(filename, "r"); + if(!fp) { + if(errno != ENOENT) + netdata_log_info("CONFIG: cannot open file '%s'. Using internal defaults.", filename); + + return 0; + } + + CLEAN_STRING *section_string = string_strdupz(section_name); + is_exporter_config = (strstr(filename, EXPORTING_CONF) != NULL); + + while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) { + buffer[CONFIG_FILE_LINE_MAX] = '\0'; + line++; + + s = trim(buffer); + if(!s || *s == '#') { + netdata_log_debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', it is empty.", line, filename); + continue; + } + + int len = (int) strlen(s); + if(*s == '[' && s[len - 1] == ']') { + // new section + s[len - 1] = '\0'; + s++; + + if (is_exporter_config) { + global_exporting_section = !(strcmp(s, CONFIG_SECTION_EXPORTING)) || !(strcmp(s, CONFIG_SECTION_PROMETHEUS)); + + if (unlikely(!global_exporting_section)) { + int rc; + rc = is_valid_connector(s, 0); + if (likely(rc)) { + strncpyz(working_connector, s, CONFIG_MAX_NAME); + s = s + rc + 1; + if (unlikely(!(*s))) { + _connectors++; + sprintf(buffer, "instance_%d", _connectors); + s = buffer; + } + strncpyz(working_instance, s, CONFIG_MAX_NAME); + working_connector_section = NULL; + if (unlikely(appconfig_section_find(root, working_instance))) { + netdata_log_error("Instance (%s) already exists", working_instance); + sect = NULL; + continue; + } + } + else { + sect = NULL; + netdata_log_error("Section (%s) does not specify a valid connector", s); + continue; + } + } + } + + sect = appconfig_section_find(root, s); + if(!sect) + sect = appconfig_section_create(root, s); + + if(sect && section_string && overwrite_used && section_string == sect->name) { + SECTION_LOCK(sect); + + while(sect->values) + appconfig_option_remove_and_delete(sect, sect->values, true); + + SECTION_UNLOCK(sect); + } + + continue; + } + + if(!sect) { + // line outside a section + netdata_log_error("CONFIG: ignoring line %d ('%s') of file '%s', it is outside all sections.", line, s, filename); + continue; + } + + if(section_string && overwrite_used && section_string != sect->name) + continue; + + char *name = s; + char *value = strchr(s, '='); + if(!value) { + netdata_log_error("CONFIG: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename); + continue; + } + *value = '\0'; + value++; + + name = trim(name); + value = trim(value); + + if(!name || *name == '#') { + netdata_log_error("CONFIG: ignoring line %d of file '%s', name is empty.", line, filename); + continue; + } + + if(!value) value = ""; + + struct config_option *opt = appconfig_option_find(sect, name); + + if (!opt) { + opt = appconfig_option_create(sect, name, value); + if (likely(is_exporter_config) && unlikely(!global_exporting_section)) { + if (unlikely(!working_connector_section)) { + working_connector_section = appconfig_section_find(root, working_connector); + if (!working_connector_section) + working_connector_section = appconfig_section_create(root, working_connector); + if (likely(working_connector_section)) { + add_connector_instance(working_connector_section, sect); + } + } + } + } + else { + if (((opt->flags & CONFIG_VALUE_USED) && overwrite_used) || !(opt->flags & CONFIG_VALUE_USED)) { + string_freez(opt->value); + opt->value = string_strdupz(value); + } + } + opt->flags |= CONFIG_VALUE_LOADED; + } + + fclose(fp); + + return 1; +} + +void appconfig_generate(struct config *root, BUFFER *wb, int only_changed, bool netdata_conf) +{ + int i, pri; + struct config_section *sect; + struct config_option *opt; + + { + int found_host_labels = 0; + for (sect = root->sections; sect; sect = sect->next) + if(!string_strcmp(sect->name, CONFIG_SECTION_HOST_LABEL)) + found_host_labels = 1; + + if(netdata_conf && !found_host_labels) { + appconfig_section_create(root, CONFIG_SECTION_HOST_LABEL); + appconfig_get_raw_value(root, CONFIG_SECTION_HOST_LABEL, "name", "value", CONFIG_VALUE_TYPE_TEXT, NULL); + } + } + + if(netdata_conf) { + buffer_strcat(wb, + "# netdata configuration\n" + "#\n" + "# You can download the latest version of this file, using:\n" + "#\n" + "# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n" + "# or\n" + "# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n" + "#\n" + "# You can uncomment and change any of the options below.\n" + "# The value shown in the commented settings, is the default value.\n" + "#\n" + "\n# global netdata configuration\n"); + } + + for(i = 0; i <= 17 ;i++) { + APPCONFIG_LOCK(root); + for(sect = root->sections; sect; sect = sect->next) { + if(!string_strcmp(sect->name, CONFIG_SECTION_GLOBAL)) pri = 0; + else if(!string_strcmp(sect->name, CONFIG_SECTION_DB)) pri = 1; + else if(!string_strcmp(sect->name, CONFIG_SECTION_DIRECTORIES)) pri = 2; + else if(!string_strcmp(sect->name, CONFIG_SECTION_LOGS)) pri = 3; + else if(!string_strcmp(sect->name, CONFIG_SECTION_ENV_VARS)) pri = 4; + else if(!string_strcmp(sect->name, CONFIG_SECTION_HOST_LABEL)) pri = 5; + else if(!string_strcmp(sect->name, CONFIG_SECTION_SQLITE)) pri = 6; + else if(!string_strcmp(sect->name, CONFIG_SECTION_CLOUD)) pri = 7; + else if(!string_strcmp(sect->name, CONFIG_SECTION_ML)) pri = 8; + else if(!string_strcmp(sect->name, CONFIG_SECTION_HEALTH)) pri = 9; + else if(!string_strcmp(sect->name, CONFIG_SECTION_WEB)) pri = 10; + else if(!string_strcmp(sect->name, CONFIG_SECTION_WEBRTC)) pri = 11; + // by default, new sections will get pri = 12 (set at the end, below) + else if(!string_strcmp(sect->name, CONFIG_SECTION_REGISTRY)) pri = 13; + else if(!string_strcmp(sect->name, CONFIG_SECTION_GLOBAL_STATISTICS)) pri = 14; + else if(!string_strcmp(sect->name, CONFIG_SECTION_PLUGINS)) pri = 15; + else if(!string_strcmp(sect->name, CONFIG_SECTION_STATSD)) pri = 16; + else if(!string_strncmp(sect->name, "plugin:", 7)) pri = 17; // << change the loop too if you change this + else pri = 12; // this is used for any new (currently unknown) sections + + if(i == pri) { + int loaded = 0; + int used = 0; + int changed = 0; + int count = 0; + + SECTION_LOCK(sect); + for(opt = sect->values; opt; opt = opt->next) { + used += (opt->flags & CONFIG_VALUE_USED)?1:0; + loaded += (opt->flags & CONFIG_VALUE_LOADED)?1:0; + changed += (opt->flags & CONFIG_VALUE_CHANGED)?1:0; + count++; + } + SECTION_UNLOCK(sect); + + if(!count) continue; + if(only_changed && !changed && !loaded) continue; + + if(!used) + buffer_sprintf(wb, "\n# section '%s' is not used.", string2str(sect->name)); + + buffer_sprintf(wb, "\n[%s]\n", string2str(sect->name)); + + size_t options_added = 0; + bool last_had_comments = false; + SECTION_LOCK(sect); + for(opt = sect->values; opt; opt = opt->next) { + bool unused = used && !(opt->flags & CONFIG_VALUE_USED); + bool migrated = used && (opt->flags & CONFIG_VALUE_MIGRATED); + bool reformatted = used && (opt->flags & CONFIG_VALUE_REFORMATTED); + bool show_default = used && (opt->flags & (CONFIG_VALUE_LOADED|CONFIG_VALUE_CHANGED) && opt->value_default); + + if((unused || migrated || reformatted || show_default)) { + if(options_added) + buffer_strcat(wb, "\n"); + + buffer_sprintf(wb, "\t#| >>> [%s].%s <<<\n", + string2str(sect->name), string2str(opt->name)); + + last_had_comments = true; + } + else if(last_had_comments) { + buffer_strcat(wb, "\n"); + last_had_comments = false; + } + + if(unused) + buffer_sprintf(wb, "\t#| found in the config file, but is not used\n"); + + if(migrated && reformatted) + buffer_sprintf(wb, "\t#| migrated from: [%s].%s = %s\n", + string2str(opt->migrated.section), string2str(opt->migrated.name), + string2str(opt->value_original)); + else { + if (migrated) + buffer_sprintf(wb, "\t#| migrated from: [%s].%s\n", + string2str(opt->migrated.section), string2str(opt->migrated.name)); + + if (reformatted) + buffer_sprintf(wb, "\t#| reformatted from: %s\n", + string2str(opt->value_original)); + } + + if(show_default) + buffer_sprintf(wb, "\t#| datatype: %s, default value: %s\n", + CONFIG_VALUE_TYPES_2str(opt->type), + string2str(opt->value_default)); + + buffer_sprintf(wb, "\t%s%s = %s\n", + ( + !(opt->flags & CONFIG_VALUE_LOADED) && + !(opt->flags & CONFIG_VALUE_CHANGED) && + (opt->flags & CONFIG_VALUE_USED) + ) ? "# " : "", + string2str(opt->name), + string2str(opt->value)); + + options_added++; + } + SECTION_UNLOCK(sect); + } + } + APPCONFIG_UNLOCK(root); + } +} diff --git a/src/libnetdata/config/appconfig_exporters.c b/src/libnetdata/config/appconfig_exporters.c new file mode 100644 index 000000000..1fafb298c --- /dev/null +++ b/src/libnetdata/config/appconfig_exporters.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +/* + * @Input: + * Connector / instance to add to an internal structure + * @Return + * The current head of the linked list of connector_instance + * + */ + +_CONNECTOR_INSTANCE *add_connector_instance(struct config_section *connector, struct config_section *instance) +{ + static struct _connector_instance *global_connector_instance = NULL; + struct _connector_instance *local_ci, *local_ci_tmp; + + if (unlikely(!connector)) { + if (unlikely(!instance)) + return global_connector_instance; + + local_ci = global_connector_instance; + while (local_ci) { + local_ci_tmp = local_ci->next; + freez(local_ci); + local_ci = local_ci_tmp; + } + global_connector_instance = NULL; + return NULL; + } + + local_ci = callocz(1, sizeof(struct _connector_instance)); + local_ci->instance = instance; + local_ci->connector = connector; + strncpyz(local_ci->instance_name, string2str(instance->name), CONFIG_MAX_NAME); + strncpyz(local_ci->connector_name, string2str(connector->name), CONFIG_MAX_NAME); + local_ci->next = global_connector_instance; + global_connector_instance = local_ci; + + return global_connector_instance; +} + +int is_valid_connector(char *type, int check_reserved) { + int rc = 1; + + if (unlikely(!type)) + return 0; + + if (!check_reserved) { + if (unlikely(is_valid_connector(type,1))) { + return 0; + } + //if (unlikely(*type == ':') + // return 0; + char *separator = strrchr(type, ':'); + if (likely(separator)) { + *separator = '\0'; + rc = (int)(separator - type); + } else + return 0; + } + // else { + // if (unlikely(is_valid_connector(type,1))) { + // netdata_log_error("Section %s invalid -- reserved name", type); + // return 0; + // } + // } + + if (!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) { + return rc; + } else if (!strcmp(type, "graphite:http") || !strcmp(type, "graphite:https")) { + return rc; + } else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) { + return rc; + } else if (!strcmp(type, "json:http") || !strcmp(type, "json:https")) { + return rc; + } else if (!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) { + return rc; + } else if (!strcmp(type, "opentsdb:http") || !strcmp(type, "opentsdb:https")) { + return rc; + } else if (!strcmp(type, "prometheus_remote_write")) { + return rc; + } else if (!strcmp(type, "prometheus_remote_write:http") || !strcmp(type, "prometheus_remote_write:https")) { + return rc; + } else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) { + return rc; + } else if (!strcmp(type, "pubsub") || !strcmp(type, "pubsub:plaintext")) { + return rc; + } else if (!strcmp(type, "mongodb") || !strcmp(type, "mongodb:plaintext")) { + return rc; + } + + return 0; +} + diff --git a/src/libnetdata/config/appconfig_internals.h b/src/libnetdata/config/appconfig_internals.h new file mode 100644 index 000000000..492e8ce5c --- /dev/null +++ b/src/libnetdata/config/appconfig_internals.h @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_APPCONFIG_INTERNALS_H +#define NETDATA_APPCONFIG_INTERNALS_H + +#include "appconfig.h" + +typedef enum __attribute__((packed)) { + CONFIG_VALUE_TYPE_UNKNOWN = 0, + CONFIG_VALUE_TYPE_TEXT, + CONFIG_VALUE_TYPE_HOSTNAME, + CONFIG_VALUE_TYPE_USERNAME, + CONFIG_VALUE_TYPE_FILENAME, + CONFIG_VALUE_TYPE_PATH, + CONFIG_VALUE_TYPE_SIMPLE_PATTERN, + CONFIG_VALUE_TYPE_URL, + CONFIG_VALUE_TYPE_ENUM, + CONFIG_VALUE_TYPE_BITMAP, + CONFIG_VALUE_TYPE_INTEGER, + CONFIG_VALUE_TYPE_DOUBLE, + CONFIG_VALUE_TYPE_BOOLEAN, + CONFIG_VALUE_TYPE_BOOLEAN_ONDEMAND, + CONFIG_VALUE_TYPE_DURATION_IN_SECS, + CONFIG_VALUE_TYPE_DURATION_IN_MS, + CONFIG_VALUE_TYPE_DURATION_IN_DAYS, + CONFIG_VALUE_TYPE_SIZE_IN_BYTES, + CONFIG_VALUE_TYPE_SIZE_IN_MB, +} CONFIG_VALUE_TYPES; + +typedef enum __attribute__((packed)) { + CONFIG_VALUE_LOADED = (1 << 0), // has been loaded from the config + CONFIG_VALUE_USED = (1 << 1), // has been accessed from the program + CONFIG_VALUE_CHANGED = (1 << 2), // has been changed from the loaded value or the internal default value + CONFIG_VALUE_CHECKED = (1 << 3), // has been checked if the value is different from the default + CONFIG_VALUE_MIGRATED = (1 << 4), // has been migrated from an old config + CONFIG_VALUE_REFORMATTED = (1 << 5), // has been reformatted with the official formatting +} CONFIG_VALUE_FLAGS; + +struct config_option { + avl_t avl_node; // the index entry of this entry - this has to be first! + + CONFIG_VALUE_TYPES type; + CONFIG_VALUE_FLAGS flags; + + STRING *name; + STRING *value; + + STRING *value_original; // the original value of this option (the first value it got, independently on how it got it) + STRING *value_default; // the internal default value of this option (the first value it got, from appconfig_get_XXX()) + + // when we move options around, this is where we keep the original + // section and name (of the first migration) + struct { + STRING *section; + STRING *name; + } migrated; + + struct config_option *prev, *next; // config->mutex protects just this +}; + +struct config_section { + avl_t avl_node; // the index entry of this section - this has to be first! + + STRING *name; + + struct config_option *values; + avl_tree_lock values_index; + + SPINLOCK spinlock; + struct config_section *prev, *next; // global config_mutex protects just this +}; + +// ---------------------------------------------------------------------------- +// locking + +#define APPCONFIG_LOCK(root) spinlock_lock(&((root)->spinlock)) +#define APPCONFIG_UNLOCK(root) spinlock_unlock(&((root)->spinlock)) +#define SECTION_LOCK(sect) spinlock_lock(&((sect)->spinlock)) +#define SECTION_UNLOCK(sect) spinlock_unlock(&((sect)->spinlock)); + +// config sections +void appconfig_section_free(struct config_section *sect); +void appconfig_section_remove_and_delete(struct config *root, struct config_section *sect, bool have_root_lock, bool have_sect_lock); +#define appconfig_section_add(root, cfg) (struct config_section *)avl_insert_lock(&(root)->index, (avl_t *)(cfg)) +#define appconfig_section_del(root, cfg) (struct config_section *)avl_remove_lock(&(root)->index, (avl_t *)(cfg)) +struct config_section *appconfig_section_find(struct config *root, const char *name); +struct config_section *appconfig_section_create(struct config *root, const char *section); + +// config options +void appconfig_option_cleanup(struct config_option *opt); +void appconfig_option_free(struct config_option *opt); +void appconfig_option_remove_and_delete(struct config_section *sect, struct config_option *opt, bool have_sect_lock); +void appconfig_option_remove_and_delete_all(struct config_section *sect, bool have_sect_lock); +int appconfig_option_compare(void *a, void *b); +#define appconfig_option_add(co, cv) (struct config_option *)avl_insert_lock(&((co)->values_index), (avl_t *)(cv)) +#define appconfig_option_del(co, cv) (struct config_option *)avl_remove_lock(&((co)->values_index), (avl_t *)(cv)) +struct config_option *appconfig_option_find(struct config_section *sect, const char *name); +struct config_option *appconfig_option_create(struct config_section *sect, const char *name, const char *value); + +// lookup +int appconfig_get_boolean_by_section(struct config_section *sect, const char *name, int value); + +typedef STRING *(*reformat_t)(STRING *value); +struct config_option *appconfig_get_raw_value_of_option_in_section(struct config_section *sect, const char *option, const char *default_value, CONFIG_VALUE_TYPES type, reformat_t cb); +struct config_option *appconfig_get_raw_value(struct config *root, const char *section, const char *option, const char *default_value, CONFIG_VALUE_TYPES type, reformat_t cb); + +void appconfig_set_raw_value_of_option(struct config_option *opt, const char *value, CONFIG_VALUE_TYPES type); +struct config_option *appconfig_set_raw_value_of_option_in_section(struct config_section *sect, const char *option, const char *value, CONFIG_VALUE_TYPES type); +struct config_option *appconfig_set_raw_value(struct config *root, const char *section, const char *option, const char *value, CONFIG_VALUE_TYPES type); + +// cleanup +void appconfig_section_destroy_non_loaded(struct config *root, const char *section); +void appconfig_section_option_destroy_non_loaded(struct config *root, const char *section, const char *name); + +// exporters +_CONNECTOR_INSTANCE *add_connector_instance(struct config_section *connector, struct config_section *instance); +int is_valid_connector(char *type, int check_reserved); + +#endif //NETDATA_APPCONFIG_INTERNALS_H diff --git a/src/libnetdata/config/appconfig_migrate.c b/src/libnetdata/config/appconfig_migrate.c new file mode 100644 index 000000000..0c21ec06c --- /dev/null +++ b/src/libnetdata/config/appconfig_migrate.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new) { + struct config_option *opt_old, *opt_new; + int ret = -1; + + netdata_log_debug(D_CONFIG, "request to rename config in section '%s', old name '%s', to section '%s', new name '%s'", section_old, name_old, section_new, name_new); + + struct config_section *sect_old = appconfig_section_find(root, section_old); + if(!sect_old) return ret; + + struct config_section *sect_new = appconfig_section_find(root, section_new); + if(!sect_new) sect_new = appconfig_section_create(root, section_new); + + SECTION_LOCK(sect_old); + if(sect_old != sect_new) + SECTION_LOCK(sect_new); + + opt_old = appconfig_option_find(sect_old, name_old); + if(!opt_old) goto cleanup; + + opt_new = appconfig_option_find(sect_new, name_new); + if(opt_new) goto cleanup; + + if(unlikely(appconfig_option_del(sect_old, opt_old) != opt_old)) + netdata_log_error("INTERNAL ERROR: deletion of config '%s' from section '%s', deleted the wrong config entry.", + string2str(opt_old->name), string2str(sect_old->name)); + + // remember the old position of the item + struct config_option *opt_old_next = (sect_old == sect_new) ? opt_old->next : NULL; + + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(sect_old->values, opt_old, prev, next); + + nd_log(NDLS_DAEMON, NDLP_WARNING, + "CONFIG: option '[%s].%s' has been migrated to '[%s].%s'.", + section_old, name_old, + section_new, name_new); + + if(!opt_old->migrated.name) { + string_freez(opt_old->migrated.section); + opt_old->migrated.section = string_dup(sect_old->name); + opt_old->migrated.name = opt_old->name; + } + else + string_freez(opt_old->name); + + opt_old->name = string_strdupz(name_new); + opt_old->flags |= CONFIG_VALUE_MIGRATED; + + opt_new = opt_old; + + // put in the list, but try to keep the order + if(opt_old_next && sect_old == sect_new) + DOUBLE_LINKED_LIST_INSERT_ITEM_BEFORE_UNSAFE(sect_new->values, opt_old_next, opt_new, prev, next); + else { + // we don't have the old next item (probably a different section?) + // find the last MIGRATED one + struct config_option *t = sect_new->values ? sect_new->values->prev : NULL; + for (; t && t != sect_new->values ; t = t->prev) { + if (t->flags & CONFIG_VALUE_MIGRATED) + break; + } + if (t == sect_new->values) + DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(sect_new->values, opt_new, prev, next); + else + DOUBLE_LINKED_LIST_INSERT_ITEM_AFTER_UNSAFE(sect_new->values, t, opt_new, prev, next); + } + + if(unlikely(appconfig_option_add(sect_new, opt_old) != opt_old)) + netdata_log_error("INTERNAL ERROR: re-indexing of config '%s' in section '%s', already exists.", + string2str(opt_old->name), string2str(sect_new->name)); + + ret = 0; + +cleanup: + if(sect_old != sect_new) + SECTION_UNLOCK(sect_new); + SECTION_UNLOCK(sect_old); + return ret; +} + +int appconfig_move_everywhere(struct config *root, const char *name_old, const char *name_new) { + int ret = -1; + APPCONFIG_LOCK(root); + struct config_section *sect; + for(sect = root->sections; sect; sect = sect->next) { + if(appconfig_move(root, string2str(sect->name), name_old, string2str(sect->name), name_new) == 0) + ret = 0; + } + APPCONFIG_UNLOCK(root); + return ret; +} + diff --git a/src/libnetdata/config/appconfig_options.c b/src/libnetdata/config/appconfig_options.c new file mode 100644 index 000000000..f619d08a6 --- /dev/null +++ b/src/libnetdata/config/appconfig_options.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +// ---------------------------------------------------------------------------- +// config options index + +int appconfig_option_compare(void *a, void *b) { + if(((struct config_option *)a)->name < ((struct config_option *)b)->name) return -1; + else if(((struct config_option *)a)->name > ((struct config_option *)b)->name) return 1; + else return string_cmp(((struct config_option *)a)->name, ((struct config_option *)b)->name); +} + +struct config_option *appconfig_option_find(struct config_section *sect, const char *name) { + struct config_option opt_tmp = { + .name = string_strdupz(name), + }; + + struct config_option *rc = (struct config_option *)avl_search_lock(&(sect->values_index), (avl_t *) &opt_tmp); + + appconfig_option_cleanup(&opt_tmp); + return rc; +} + +// ---------------------------------------------------------------------------- +// config options methods + +void appconfig_option_cleanup(struct config_option *opt) { + string_freez(opt->value); + string_freez(opt->name); + string_freez(opt->migrated.section); + string_freez(opt->migrated.name); + string_freez(opt->value_original); + string_freez(opt->value_default); + + opt->value = NULL; + opt->name = NULL; + opt->migrated.section = NULL; + opt->migrated.name = NULL; + opt->value_original = NULL; + opt->value_default = NULL; +} + +void appconfig_option_free(struct config_option *opt) { + appconfig_option_cleanup(opt); + freez(opt); +} + +struct config_option *appconfig_option_create(struct config_section *sect, const char *name, const char *value) { + struct config_option *opt = callocz(1, sizeof(struct config_option)); + opt->name = string_strdupz(name); + opt->value = string_strdupz(value); + opt->value_original = string_dup(opt->value); + + struct config_option *opt_found = appconfig_option_add(sect, opt); + if(opt_found != opt) { + nd_log(NDLS_DAEMON, NDLP_INFO, + "CONFIG: config '%s' in section '%s': already exists - using the existing one.", + string2str(opt->name), string2str(sect->name)); + appconfig_option_free(opt); + return opt_found; + } + + SECTION_LOCK(sect); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(sect->values, opt, prev, next); + SECTION_UNLOCK(sect); + + return opt; +} + +void appconfig_option_remove_and_delete(struct config_section *sect, struct config_option *opt, bool have_sect_lock) { + struct config_option *opt_found = appconfig_option_del(sect, opt); + if(opt_found != opt) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "INTERNAL ERROR: Cannot remove '%s' from section '%s', it was not inserted before.", + string2str(opt->name), string2str(sect->name)); + return; + } + + if(!have_sect_lock) + SECTION_LOCK(sect); + + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(sect->values, opt, prev, next); + + if(!have_sect_lock) + SECTION_UNLOCK(sect); + + appconfig_option_free(opt); +} + +void appconfig_option_remove_and_delete_all(struct config_section *sect, bool have_sect_lock) { + if(!have_sect_lock) + SECTION_LOCK(sect); + + while(sect->values) + appconfig_option_remove_and_delete(sect, sect->values, true); + + if(!have_sect_lock) + SECTION_UNLOCK(sect); +} + +void appconfig_get_raw_value_of_option(struct config_option *opt, const char *default_value, CONFIG_VALUE_TYPES type, reformat_t cb) { + opt->flags |= CONFIG_VALUE_USED; + + if(type != CONFIG_VALUE_TYPE_UNKNOWN) + opt->type = type; + + if((opt->flags & CONFIG_VALUE_LOADED) || (opt->flags & CONFIG_VALUE_CHANGED)) { + // this is a loaded value from the config file + // if it is different from the default, mark it + if(!(opt->flags & CONFIG_VALUE_CHECKED)) { + if(!(opt->flags & CONFIG_VALUE_REFORMATTED) && cb) { + STRING *value_old = opt->value; + opt->value = cb(opt->value); + if(opt->value != value_old) + opt->flags |= CONFIG_VALUE_REFORMATTED; + } + + if(default_value && string_strcmp(opt->value, default_value) != 0) + opt->flags |= CONFIG_VALUE_CHANGED; + + opt->flags |= CONFIG_VALUE_CHECKED; + } + } + + if(!opt->value_default) + opt->value_default = string_strdupz(default_value); +} + +struct config_option *appconfig_get_raw_value_of_option_in_section(struct config_section *sect, const char *option, const char *default_value, CONFIG_VALUE_TYPES type, reformat_t cb) { + // Only calls internal to this file check for a NULL result, and they do not supply a NULL arg. + // External caller should treat NULL as an error case. + struct config_option *opt = appconfig_option_find(sect, option); + if (!opt) { + if (!default_value) return NULL; + opt = appconfig_option_create(sect, option, default_value); + if (!opt) return NULL; + } + + appconfig_get_raw_value_of_option(opt, default_value, type, cb); + return opt; +} + +struct config_option *appconfig_get_raw_value(struct config *root, const char *section, const char *option, const char *default_value, CONFIG_VALUE_TYPES type, reformat_t cb) { + struct config_section *sect = appconfig_section_find(root, section); + if(!sect) { + if(!default_value) return NULL; + sect = appconfig_section_create(root, section); + } + + return appconfig_get_raw_value_of_option_in_section(sect, option, default_value, type, cb); +} + +void appconfig_set_raw_value_of_option(struct config_option *opt, const char *value, CONFIG_VALUE_TYPES type) { + opt->flags |= CONFIG_VALUE_USED; + + if(opt->type == CONFIG_VALUE_TYPE_UNKNOWN) + opt->type = type; + + if(string_strcmp(opt->value, value) != 0) { + opt->flags |= CONFIG_VALUE_CHANGED; + + string_freez(opt->value); + opt->value = string_strdupz(value); + } +} + +struct config_option *appconfig_set_raw_value_of_option_in_section(struct config_section *sect, const char *option, const char *value, CONFIG_VALUE_TYPES type) { + struct config_option *opt = appconfig_option_find(sect, option); + if(!opt) + opt = appconfig_option_create(sect, option, value); + + appconfig_set_raw_value_of_option(opt, value, type); + return opt; +} + +struct config_option *appconfig_set_raw_value(struct config *root, const char *section, const char *option, const char *value, CONFIG_VALUE_TYPES type) { + struct config_section *sect = appconfig_section_find(root, section); + if(!sect) + sect = appconfig_section_create(root, section); + + return appconfig_set_raw_value_of_option_in_section(sect, option, value, type); +} diff --git a/src/libnetdata/config/appconfig_sections.c b/src/libnetdata/config/appconfig_sections.c new file mode 100644 index 000000000..2180803a9 --- /dev/null +++ b/src/libnetdata/config/appconfig_sections.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +// ---------------------------------------------------------------------------- +// config sections index + +int appconfig_section_compare(void *a, void *b) { + if(((struct config_section *)a)->name < ((struct config_section *)b)->name) return -1; + else if(((struct config_section *)a)->name > ((struct config_section *)b)->name) return 1; + else return string_cmp(((struct config_section *)a)->name, ((struct config_section *)b)->name); +} + +struct config_section *appconfig_section_find(struct config *root, const char *name) { + struct config_section sect_tmp = { + .name = string_strdupz(name), + }; + + struct config_section *rc = (struct config_section *)avl_search_lock(&root->index, (avl_t *) §_tmp); + string_freez(sect_tmp.name); + return rc; +} + +// ---------------------------------------------------------------------------- +// config section methods + +void appconfig_section_free(struct config_section *sect) { + avl_destroy_lock(§->values_index); + string_freez(sect->name); + freez(sect); +} + +void appconfig_section_remove_and_delete(struct config *root, struct config_section *sect, bool have_root_lock, bool have_sect_lock) { + struct config_section *sect_found = appconfig_section_del(root, sect); + if(sect_found != sect) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "INTERNAL ERROR: Cannot remove section '%s', it was not inserted before.", + string2str(sect->name)); + return; + } + + appconfig_option_remove_and_delete_all(sect, have_sect_lock); + + if(!have_root_lock) + APPCONFIG_LOCK(root); + + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(root->sections, sect, prev, next); + + if(!have_root_lock) + APPCONFIG_UNLOCK(root); + + // if the caller has the section lock, we will unlock it, to cleanup + if(have_sect_lock) + SECTION_UNLOCK(sect); + + appconfig_section_free(sect); +} + +struct config_section *appconfig_section_create(struct config *root, const char *section) { + struct config_section *sect = callocz(1, sizeof(struct config_section)); + sect->name = string_strdupz(section); + spinlock_init(§->spinlock); + + avl_init_lock(§->values_index, appconfig_option_compare); + + struct config_section *sect_found = appconfig_section_add(root, sect); + if(sect_found != sect) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "CONFIG: section '%s', already exists, using existing.", + string2str(sect->name)); + appconfig_section_free(sect); + return sect_found; + } + + APPCONFIG_LOCK(root); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(root->sections, sect, prev, next); + APPCONFIG_UNLOCK(root); + + return sect; +} + + diff --git a/src/libnetdata/config/appconfig_traversal.c b/src/libnetdata/config/appconfig_traversal.c new file mode 100644 index 000000000..f26def2c2 --- /dev/null +++ b/src/libnetdata/config/appconfig_traversal.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +size_t appconfig_foreach_value_in_section(struct config *root, const char *section, appconfig_foreach_value_cb_t cb, void *data) { + size_t used = 0; + struct config_section *co = appconfig_section_find(root, section); + if(co) { + SECTION_LOCK(co); + struct config_option *cv; + for(cv = co->values; cv ; cv = cv->next) { + if(cb(data, string2str(cv->name), string2str(cv->value))) { + cv->flags |= CONFIG_VALUE_USED; + used++; + } + } + SECTION_UNLOCK(co); + } + + return used; +} diff --git a/src/libnetdata/config/dyncfg.c b/src/libnetdata/config/dyncfg.c index 244864c65..81b050f89 100644 --- a/src/libnetdata/config/dyncfg.c +++ b/src/libnetdata/config/dyncfg.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../../libnetdata/libnetdata.h" +#include "../libnetdata.h" // ---------------------------------------------------------------------------- @@ -211,7 +211,7 @@ bool dyncfg_is_valid_id(const char *id) { return true; } -static inline bool is_forbidden_char(char c) { +static inline bool is_forbidden_filename_char(char c) { if(isspace((uint8_t)c) || !isprint((uint8_t)c)) return true; @@ -239,7 +239,7 @@ char *dyncfg_escape_id_for_filename(const char *id) { char *dest = escaped; while (*src) { - if (is_forbidden_char(*src)) { + if (is_forbidden_filename_char(*src)) { sprintf(dest, "%%%02X", (unsigned char)*src); dest += 3; } else { @@ -277,7 +277,7 @@ int dyncfg_node_find_and_call(DICTIONARY *dyncfg_nodes, const char *transaction, memcpy(buf, function, sizeof(buf)); char *words[MAX_FUNCTION_PARAMETERS]; // an array of pointers for the words in this line - size_t num_words = quoted_strings_splitter_pluginsd(buf, words, MAX_FUNCTION_PARAMETERS); + size_t num_words = quoted_strings_splitter_whitespace(buf, words, MAX_FUNCTION_PARAMETERS); const char *id = get_word(words, num_words, 1); const char *action = get_word(words, num_words, 2); diff --git a/src/libnetdata/datetime/README.md b/src/libnetdata/datetime/README.md index 303ba8bf2..e10848f20 100644 --- a/src/libnetdata/datetime/README.md +++ b/src/libnetdata/datetime/README.md @@ -1,11 +1,3 @@ - - # Datetime Formatting dates and timestamps. diff --git a/src/libnetdata/dictionary/dictionary-hashtable.h b/src/libnetdata/dictionary/dictionary-hashtable.h index 865f0b360..14c81cfcc 100644 --- a/src/libnetdata/dictionary/dictionary-hashtable.h +++ b/src/libnetdata/dictionary/dictionary-hashtable.h @@ -8,96 +8,96 @@ // ---------------------------------------------------------------------------- // hashtable operations with simple hashtable -static inline bool compare_keys(void *key1, void *key2) { - const char *k1 = key1; - const char *k2 = key2; - return strcmp(k1, k2) == 0; -} - -static inline void *item_to_key(DICTIONARY_ITEM *item) { - return (void *)item_get_name(item); -} - -#define SIMPLE_HASHTABLE_VALUE_TYPE DICTIONARY_ITEM -#define SIMPLE_HASHTABLE_NAME _DICTIONARY -#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION item_to_key -#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION compare_keys -#include "..//simple_hashtable.h" - -static inline size_t hashtable_init_hashtable(DICTIONARY *dict) { - SIMPLE_HASHTABLE_DICTIONARY *ht = callocz(1, sizeof(*ht)); - simple_hashtable_init_DICTIONARY(ht, 4); - dict->index.JudyHSArray = ht; - return 0; -} - -static inline size_t hashtable_destroy_hashtable(DICTIONARY *dict) { - SIMPLE_HASHTABLE_DICTIONARY *ht = dict->index.JudyHSArray; - if(unlikely(!ht)) return 0; - - size_t mem = sizeof(*ht) + ht->size * sizeof(SIMPLE_HASHTABLE_SLOT_DICTIONARY); - simple_hashtable_destroy_DICTIONARY(ht); - freez(ht); - dict->index.JudyHSArray = NULL; - - return mem; -} - -static inline void *hashtable_insert_hashtable(DICTIONARY *dict, const char *name, size_t name_len) { - SIMPLE_HASHTABLE_DICTIONARY *ht = dict->index.JudyHSArray; - - char key[name_len+1]; - memcpy(key, name, name_len); - key[name_len] = '\0'; - - XXH64_hash_t hash = XXH3_64bits(name, name_len); - SIMPLE_HASHTABLE_SLOT_DICTIONARY *sl = simple_hashtable_get_slot_DICTIONARY(ht, hash, key, true); - sl->hash = hash; // we will need it in insert later - it is ok to overwrite - it is the same already - return sl; -} - -static inline DICTIONARY_ITEM *hashtable_insert_handle_to_item_hashtable(DICTIONARY *dict, void *handle) { - (void)dict; - SIMPLE_HASHTABLE_SLOT_DICTIONARY *sl = handle; - DICTIONARY_ITEM *item = SIMPLE_HASHTABLE_SLOT_DATA(sl); - return item; -} - -static inline void hashtable_set_item_hashtable(DICTIONARY *dict, void *handle, DICTIONARY_ITEM *item) { - SIMPLE_HASHTABLE_DICTIONARY *ht = dict->index.JudyHSArray; - SIMPLE_HASHTABLE_SLOT_DICTIONARY *sl = handle; - simple_hashtable_set_slot_DICTIONARY(ht, sl, sl->hash, item); -} - -static inline int hashtable_delete_hashtable(DICTIONARY *dict, const char *name, size_t name_len, DICTIONARY_ITEM *item_to_delete) { - (void)item_to_delete; - SIMPLE_HASHTABLE_DICTIONARY *ht = dict->index.JudyHSArray; - - char key[name_len+1]; - memcpy(key, name, name_len); - key[name_len] = '\0'; - - XXH64_hash_t hash = XXH3_64bits(name, name_len); - SIMPLE_HASHTABLE_SLOT_DICTIONARY *sl = simple_hashtable_get_slot_DICTIONARY(ht, hash, key, false); - DICTIONARY_ITEM *item = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if(!item) return 0; // return not-found - - simple_hashtable_del_slot_DICTIONARY(ht, sl); - return 1; // return deleted -} - -static inline DICTIONARY_ITEM *hashtable_get_hashtable(DICTIONARY *dict, const char *name, size_t name_len) { - SIMPLE_HASHTABLE_DICTIONARY *ht = dict->index.JudyHSArray; - if(unlikely(!ht)) return NULL; - - char key[name_len+1]; - memcpy(key, name, name_len); - key[name_len] = '\0'; - - XXH64_hash_t hash = XXH3_64bits(name, name_len); - SIMPLE_HASHTABLE_SLOT_DICTIONARY *sl = simple_hashtable_get_slot_DICTIONARY(ht, hash, key, true); - return SIMPLE_HASHTABLE_SLOT_DATA(sl); -} +//static inline bool compare_keys(void *key1, void *key2) { +// const char *k1 = key1; +// const char *k2 = key2; +// return strcmp(k1, k2) == 0; +//} +// +//static inline void *item_to_key(DICTIONARY_ITEM *item) { +// return (void *)item_get_name(item); +//} +// +//#define SIMPLE_HASHTABLE_VALUE_TYPE DICTIONARY_ITEM +//#define SIMPLE_HASHTABLE_NAME _DICTIONARY +//#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION item_to_key +//#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION compare_keys +//#include "..//simple_hashtable.h" + +//static inline size_t hashtable_init_hashtable(DICTIONARY *dict) { +// SIMPLE_HASHTABLE_DICTIONARY *ht = callocz(1, sizeof(*ht)); +// simple_hashtable_init_DICTIONARY(ht, 4); +// dict->index.JudyHSArray = ht; +// return 0; +//} +// +//static inline size_t hashtable_destroy_hashtable(DICTIONARY *dict) { +// SIMPLE_HASHTABLE_DICTIONARY *ht = dict->index.JudyHSArray; +// if(unlikely(!ht)) return 0; +// +// size_t mem = sizeof(*ht) + ht->size * sizeof(SIMPLE_HASHTABLE_SLOT_DICTIONARY); +// simple_hashtable_destroy_DICTIONARY(ht); +// freez(ht); +// dict->index.JudyHSArray = NULL; +// +// return mem; +//} +// +//static inline void *hashtable_insert_hashtable(DICTIONARY *dict, const char *name, size_t name_len) { +// SIMPLE_HASHTABLE_DICTIONARY *ht = dict->index.JudyHSArray; +// +// char key[name_len+1]; +// memcpy(key, name, name_len); +// key[name_len] = '\0'; +// +// XXH64_hash_t hash = XXH3_64bits(name, name_len); +// SIMPLE_HASHTABLE_SLOT_DICTIONARY *sl = simple_hashtable_get_slot_DICTIONARY(ht, hash, key, true); +// sl->hash = hash; // we will need it in insert later - it is ok to overwrite - it is the same already +// return sl; +//} +// +//static inline DICTIONARY_ITEM *hashtable_insert_handle_to_item_hashtable(DICTIONARY *dict, void *handle) { +// (void)dict; +// SIMPLE_HASHTABLE_SLOT_DICTIONARY *sl = handle; +// DICTIONARY_ITEM *item = SIMPLE_HASHTABLE_SLOT_DATA(sl); +// return item; +//} +// +//static inline void hashtable_set_item_hashtable(DICTIONARY *dict, void *handle, DICTIONARY_ITEM *item) { +// SIMPLE_HASHTABLE_DICTIONARY *ht = dict->index.JudyHSArray; +// SIMPLE_HASHTABLE_SLOT_DICTIONARY *sl = handle; +// simple_hashtable_set_slot_DICTIONARY(ht, sl, sl->hash, item); +//} +// +//static inline int hashtable_delete_hashtable(DICTIONARY *dict, const char *name, size_t name_len, DICTIONARY_ITEM *item_to_delete) { +// (void)item_to_delete; +// SIMPLE_HASHTABLE_DICTIONARY *ht = dict->index.JudyHSArray; +// +// char key[name_len+1]; +// memcpy(key, name, name_len); +// key[name_len] = '\0'; +// +// XXH64_hash_t hash = XXH3_64bits(name, name_len); +// SIMPLE_HASHTABLE_SLOT_DICTIONARY *sl = simple_hashtable_get_slot_DICTIONARY(ht, hash, key, false); +// DICTIONARY_ITEM *item = SIMPLE_HASHTABLE_SLOT_DATA(sl); +// if(!item) return 0; // return not-found +// +// simple_hashtable_del_slot_DICTIONARY(ht, sl); +// return 1; // return deleted +//} +// +//static inline DICTIONARY_ITEM *hashtable_get_hashtable(DICTIONARY *dict, const char *name, size_t name_len) { +// SIMPLE_HASHTABLE_DICTIONARY *ht = dict->index.JudyHSArray; +// if(unlikely(!ht)) return NULL; +// +// char key[name_len+1]; +// memcpy(key, name, name_len); +// key[name_len] = '\0'; +// +// XXH64_hash_t hash = XXH3_64bits(name, name_len); +// SIMPLE_HASHTABLE_SLOT_DICTIONARY *sl = simple_hashtable_get_slot_DICTIONARY(ht, hash, key, true); +// return SIMPLE_HASHTABLE_SLOT_DATA(sl); +//} // ---------------------------------------------------------------------------- // hashtable operations with Judy @@ -201,40 +201,44 @@ static inline DICTIONARY_ITEM *hashtable_get_judy(DICTIONARY *dict, const char * // select the right hashtable static inline size_t hashtable_init_unsafe(DICTIONARY *dict) { - if(dict->options & DICT_OPTION_INDEX_JUDY) - return hashtable_init_judy(dict); - else - return hashtable_init_hashtable(dict); + return hashtable_init_judy(dict); +// if(dict->options & DICT_OPTION_INDEX_JUDY) +// return hashtable_init_judy(dict); +// else +// return hashtable_init_hashtable(dict); } static inline size_t hashtable_destroy_unsafe(DICTIONARY *dict) { pointer_destroy_index(dict); - if(dict->options & DICT_OPTION_INDEX_JUDY) - return hashtable_destroy_judy(dict); - else - return hashtable_destroy_hashtable(dict); +// if(dict->options & DICT_OPTION_INDEX_JUDY) + return hashtable_destroy_judy(dict); +// else +// return hashtable_destroy_hashtable(dict); } static inline void *hashtable_insert_unsafe(DICTIONARY *dict, const char *name, size_t name_len) { - if(dict->options & DICT_OPTION_INDEX_JUDY) - return hashtable_insert_judy(dict, name, name_len); - else - return hashtable_insert_hashtable(dict, name, name_len); + return hashtable_insert_judy(dict, name, name_len); +// if(dict->options & DICT_OPTION_INDEX_JUDY) +// return hashtable_insert_judy(dict, name, name_len); +// else +// return hashtable_insert_hashtable(dict, name, name_len); } static inline DICTIONARY_ITEM *hashtable_insert_handle_to_item_unsafe(DICTIONARY *dict, void *handle) { - if(dict->options & DICT_OPTION_INDEX_JUDY) - return hashtable_insert_handle_to_item_judy(dict, handle); - else - return hashtable_insert_handle_to_item_hashtable(dict, handle); + return hashtable_insert_handle_to_item_judy(dict, handle); +// if(dict->options & DICT_OPTION_INDEX_JUDY) +// return hashtable_insert_handle_to_item_judy(dict, handle); +// else +// return hashtable_insert_handle_to_item_hashtable(dict, handle); } static inline int hashtable_delete_unsafe(DICTIONARY *dict, const char *name, size_t name_len, DICTIONARY_ITEM *item) { - if(dict->options & DICT_OPTION_INDEX_JUDY) - return hashtable_delete_judy(dict, name, name_len, item); - else - return hashtable_delete_hashtable(dict, name, name_len, item); + return hashtable_delete_judy(dict, name, name_len, item); +// if(dict->options & DICT_OPTION_INDEX_JUDY) +// return hashtable_delete_judy(dict, name, name_len, item); +// else +// return hashtable_delete_hashtable(dict, name, name_len, item); } static inline DICTIONARY_ITEM *hashtable_get_unsafe(DICTIONARY *dict, const char *name, size_t name_len) { @@ -242,10 +246,11 @@ static inline DICTIONARY_ITEM *hashtable_get_unsafe(DICTIONARY *dict, const char DICTIONARY_ITEM *item; - if(dict->options & DICT_OPTION_INDEX_JUDY) - item = hashtable_get_judy(dict, name, name_len); - else - item = hashtable_get_hashtable(dict, name, name_len); + item = hashtable_get_judy(dict, name, name_len); +// if(dict->options & DICT_OPTION_INDEX_JUDY) +// item = hashtable_get_judy(dict, name, name_len); +// else +// item = hashtable_get_hashtable(dict, name, name_len); if(item) pointer_check(dict, item); @@ -254,10 +259,11 @@ static inline DICTIONARY_ITEM *hashtable_get_unsafe(DICTIONARY *dict, const char } static inline void hashtable_set_item_unsafe(DICTIONARY *dict, void *handle, DICTIONARY_ITEM *item) { - if(dict->options & DICT_OPTION_INDEX_JUDY) - hashtable_set_item_judy(dict, handle, item); - else - hashtable_set_item_hashtable(dict, handle, item); + hashtable_set_item_judy(dict, handle, item); +// if(dict->options & DICT_OPTION_INDEX_JUDY) +// hashtable_set_item_judy(dict, handle, item); +// else +// hashtable_set_item_hashtable(dict, handle, item); } #endif //NETDATA_DICTIONARY_HASHTABLE_H diff --git a/src/libnetdata/dictionary/dictionary.c b/src/libnetdata/dictionary/dictionary.c index 9d50ed62c..ebe67269a 100644 --- a/src/libnetdata/dictionary/dictionary.c +++ b/src/libnetdata/dictionary/dictionary.c @@ -318,10 +318,11 @@ static void dictionary_queue_for_destruction(DICTIONARY *dict) { } void cleanup_destroyed_dictionaries(void) { - if(!dictionaries_waiting_to_be_destroyed) - return; - netdata_mutex_lock(&dictionaries_waiting_to_be_destroyed_mutex); + if (!dictionaries_waiting_to_be_destroyed) { + netdata_mutex_unlock(&dictionaries_waiting_to_be_destroyed_mutex); + return; + } DICTIONARY *dict, *last = NULL, *next = NULL; for(dict = dictionaries_waiting_to_be_destroyed; dict ; dict = next) { @@ -497,8 +498,8 @@ static DICTIONARY *dictionary_create_internal(DICT_OPTIONS options, struct dicti else dict->value_aral = NULL; - if(!(dict->options & (DICT_OPTION_INDEX_JUDY|DICT_OPTION_INDEX_HASHTABLE))) - dict->options |= DICT_OPTION_INDEX_JUDY; +// if(!(dict->options & (DICT_OPTION_INDEX_JUDY|DICT_OPTION_INDEX_HASHTABLE))) + dict->options |= DICT_OPTION_INDEX_JUDY; size_t dict_size = 0; dict_size += sizeof(DICTIONARY); diff --git a/src/libnetdata/dictionary/dictionary.h b/src/libnetdata/dictionary/dictionary.h index 231fbfebd..3d041018d 100644 --- a/src/libnetdata/dictionary/dictionary.h +++ b/src/libnetdata/dictionary/dictionary.h @@ -59,7 +59,7 @@ typedef enum __attribute__((packed)) dictionary_options { DICT_OPTION_ADD_IN_FRONT = (1 << 4), // add dictionary items at the front of the linked list (default: at the end) DICT_OPTION_FIXED_SIZE = (1 << 5), // the items of the dictionary have a fixed size DICT_OPTION_INDEX_JUDY = (1 << 6), // the default, if no other indexing is set - DICT_OPTION_INDEX_HASHTABLE = (1 << 7), // use SIMPLE_HASHTABLE for indexing +// DICT_OPTION_INDEX_HASHTABLE = (1 << 7), // use SIMPLE_HASHTABLE for indexing } DICT_OPTIONS; struct dictionary_stats { @@ -299,7 +299,8 @@ typedef DICTFE_CONST struct dictionary_foreach { #define dfe_start_rw(dict, value, mode) \ do { \ - DICTFE value ## _dfe = {}; \ + /* automatically cleanup DFE, to allow using return from within the loop */ \ + DICTFE _cleanup_(dictionary_foreach_done) value ## _dfe = {}; \ (void)(value); /* needed to avoid warning when looping without using this */ \ for((value) = dictionary_foreach_start_rw(&value ## _dfe, (dict), (mode)); \ (value ## _dfe.item) || (value) ; \ @@ -308,7 +309,6 @@ typedef DICTFE_CONST struct dictionary_foreach { #define dfe_done(value) \ } \ - dictionary_foreach_done(&value ## _dfe); \ } while(0) #define dfe_unlock(value) dictionary_foreach_unlock(&value ## _dfe) diff --git a/src/libnetdata/ebpf/README.md b/src/libnetdata/ebpf/README.md index 8d9edb076..17fc13046 100644 --- a/src/libnetdata/ebpf/README.md +++ b/src/libnetdata/ebpf/README.md @@ -1,12 +1,3 @@ - - # eBPF library Netdata's eBPF library supports the [eBPF collector](/src/collectors/ebpf.plugin/README.md). diff --git a/src/libnetdata/ebpf/ebpf.c b/src/libnetdata/ebpf/ebpf.c index 4e7c85943..27042a794 100644 --- a/src/libnetdata/ebpf/ebpf.c +++ b/src/libnetdata/ebpf/ebpf.c @@ -1014,7 +1014,7 @@ int ebpf_load_config(struct config *config, char *filename) } -static netdata_run_mode_t ebpf_select_mode(char *mode) +static netdata_run_mode_t ebpf_select_mode(const char *mode) { if (!strcasecmp(mode,EBPF_CFG_LOAD_MODE_RETURN )) return MODE_RETURN; @@ -1041,7 +1041,7 @@ static void ebpf_select_mode_string(char *output, size_t len, netdata_run_mode_t * * @return It returns the value to be used. */ -netdata_ebpf_load_mode_t epbf_convert_string_to_load_mode(char *str) +netdata_ebpf_load_mode_t epbf_convert_string_to_load_mode(const char *str) { if (!strcasecmp(str, EBPF_CFG_CORE_PROGRAM)) return EBPF_LOAD_CORE; @@ -1094,7 +1094,7 @@ static char *ebpf_convert_collect_pid_to_string(netdata_apps_level_t level) * * @return it returns the level associated to the string or default when it is a wrong value */ -netdata_apps_level_t ebpf_convert_string_to_apps_level(char *str) +netdata_apps_level_t ebpf_convert_string_to_apps_level(const char *str) { if (!strcasecmp(str, EBPF_CFG_PID_REAL_PARENT)) return NETDATA_APPS_LEVEL_REAL_PARENT; @@ -1114,7 +1114,7 @@ netdata_apps_level_t ebpf_convert_string_to_apps_level(char *str) * @param str value read from configuration file. * @param lmode load mode used by collector. */ -netdata_ebpf_program_loaded_t ebpf_convert_core_type(char *str, netdata_run_mode_t lmode) +netdata_ebpf_program_loaded_t ebpf_convert_core_type(const char *str, netdata_run_mode_t lmode) { if (!strcasecmp(str, EBPF_CFG_ATTACH_TRACEPOINT)) return EBPF_LOAD_TRACEPOINT; @@ -1174,7 +1174,7 @@ struct btf *ebpf_parse_btf_file(const char *filename) * @param path is the fullpath * @param filename is the file inside BTF path. */ -struct btf *ebpf_load_btf_file(char *path, char *filename) +struct btf *ebpf_load_btf_file(const char *path, const char *filename) { char fullpath[PATH_MAX + 1]; snprintfz(fullpath, PATH_MAX, "%s/%s", path, filename); @@ -1299,7 +1299,7 @@ void ebpf_update_module_using_config(ebpf_module_t *modules, netdata_ebpf_load_m { char default_value[EBPF_MAX_MODE_LENGTH + 1]; ebpf_select_mode_string(default_value, EBPF_MAX_MODE_LENGTH, modules->mode); - char *load_mode = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_LOAD_MODE, default_value); + const char *load_mode = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_LOAD_MODE, default_value); modules->mode = ebpf_select_mode(load_mode); modules->update_every = (int)appconfig_get_number(modules->cfg, EBPF_GLOBAL_SECTION, @@ -1318,17 +1318,17 @@ void ebpf_update_module_using_config(ebpf_module_t *modules, netdata_ebpf_load_m EBPF_CFG_LIFETIME, EBPF_DEFAULT_LIFETIME); char *value = ebpf_convert_load_mode_to_string(modules->load & NETDATA_EBPF_LOAD_METHODS); - char *type_format = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_TYPE_FORMAT, value); + const char *type_format = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_TYPE_FORMAT, value); netdata_ebpf_load_mode_t load = epbf_convert_string_to_load_mode(type_format); load = ebpf_select_load_mode(btf_file, load, kver, is_rh); modules->load = origin | load; - char *core_attach = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_CORE_ATTACH, EBPF_CFG_ATTACH_TRAMPOLINE); + const char *core_attach = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_CORE_ATTACH, EBPF_CFG_ATTACH_TRAMPOLINE); netdata_ebpf_program_loaded_t fill_lm = ebpf_convert_core_type(core_attach, modules->mode); ebpf_update_target_with_conf(modules, fill_lm); value = ebpf_convert_collect_pid_to_string(modules->apps_level); - char *collect_pid = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_COLLECT_PID, value); + const char *collect_pid = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_COLLECT_PID, value); modules->apps_level = ebpf_convert_string_to_apps_level(collect_pid); modules->maps_per_core = appconfig_get_boolean(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_MAPS_PER_CORE, diff --git a/src/libnetdata/ebpf/ebpf.h b/src/libnetdata/ebpf/ebpf.h index 1c612ad32..d65dc2205 100644 --- a/src/libnetdata/ebpf/ebpf.h +++ b/src/libnetdata/ebpf/ebpf.h @@ -470,13 +470,13 @@ int ebpf_disable_tracing_values(char *subsys, char *eventname); // BTF helpers #define NETDATA_EBPF_MAX_SYSCALL_LENGTH 255 -netdata_ebpf_load_mode_t epbf_convert_string_to_load_mode(char *str); -netdata_ebpf_program_loaded_t ebpf_convert_core_type(char *str, netdata_run_mode_t lmode); +netdata_ebpf_load_mode_t epbf_convert_string_to_load_mode(const char *str); +netdata_ebpf_program_loaded_t ebpf_convert_core_type(const char *str, netdata_run_mode_t lmode); void ebpf_select_host_prefix(char *output, size_t length, char *syscall, int kver); #ifdef LIBBPF_MAJOR_VERSION void ebpf_adjust_thread_load(ebpf_module_t *mod, struct btf *file); struct btf *ebpf_parse_btf_file(const char *filename); -struct btf *ebpf_load_btf_file(char *path, char *filename); +struct btf *ebpf_load_btf_file(const char *path, const char *filename); int ebpf_is_function_inside_btf(struct btf *file, char *function); void ebpf_update_map_type(struct bpf_map *map, ebpf_local_maps_t *w); void ebpf_define_map_type(ebpf_local_maps_t *maps, int maps_per_core, int kver); @@ -492,4 +492,13 @@ void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em); int ebpf_can_plugin_load_code(int kver, char *plugin_name); int ebpf_adjust_memory_limit(); +#ifdef LIBBPF_MAJOR_VERSION +static inline int netdata_silent_libbpf_vfprintf(enum libbpf_print_level level __maybe_unused, + const char *format __maybe_unused, + va_list args __maybe_unused) +{ + return 0; +} +#endif + #endif /* NETDATA_EBPF_H */ diff --git a/src/libnetdata/eval/eval.c b/src/libnetdata/eval/eval.c index bacac9c17..7c587fa6d 100644 --- a/src/libnetdata/eval/eval.c +++ b/src/libnetdata/eval/eval.c @@ -387,29 +387,63 @@ static inline void skip_spaces(const char **string) { *string = s; } -// what character can appear just after an operator keyword -// like NOT AND OR ? -static inline int isoperatorterm_word(const char s) { - if(isspace(s) || s == '(' || s == '$' || s == '!' || s == '-' || s == '+' || isdigit(s) || !s) - return 1; - - return 0; +//static inline int old_isoperatorterm_word(const char s) { +// if (isspace(s) || s == '(' || s == '$' || s == '!' || s == '-' || s == '+' || isdigit(s) || !s) +// return 1; +// return 0; +//} +// +//static inline int old_isoperatorterm_symbol(const char s) { +// if (old_isoperatorterm_word(s) || isalpha(s)) +// return 1; +// return 0; +//} +// +//// return 1 if the character should never appear in a variable +//static inline int old_isvariableterm(const char s) { +// if (isalnum(s) || s == '.' || s == '_') +// return 0; +// return 1; +//} + +static inline bool is_operator_first_symbol_or_space(const char s) { + return ( + isspace((uint8_t)s) || !s || + s == '&' || s == '|' || s == '!' || s == '>' || s == '<' || + s == '=' || s == '+' || s == '-' || s == '*' || s == '/' || s == '?'); +} + +// what character can appear just after the operators: NOT, AND, OR +static inline bool is_valid_after_operator_word(const char s) { + bool rc = isspace((uint8_t)s) || s == '(' || s == '$' || s == '!' || s == '-' || s == '+' || isdigit((uint8_t)s) || !s; +// bool old = old_isoperatorterm_word(s); +// if(rc != old) { +// int x = 0; +// x++; +// } + return rc; } // what character can appear just after an operator symbol? -static inline int isoperatorterm_symbol(const char s) { - if(isoperatorterm_word(s) || isalpha(s)) - return 1; - - return 0; -} - -// return 1 if the character should never appear in a variable -static inline int isvariableterm(const char s) { - if(isalnum(s) || s == '.' || s == '_') - return 0; - - return 1; +static inline bool is_valid_after_operator_symbol(const char s) { + bool rc = is_valid_after_operator_word(s) || is_operator_first_symbol_or_space(s); +// bool old = old_isoperatorterm_symbol(s); +// if(rc != old) { +// int x = 0; +// x++; +// } + return rc; +} + +// return true if the character may appear in a variable name +static inline bool is_valid_variable_character(const char s) { + bool rc = !is_operator_first_symbol_or_space(s) && s != ')' && s != '}'; +// bool old = !old_isvariableterm(s); +// if(rc != old) { +// int x = 0; +// x++; +// } + return rc; } // ---------------------------------------------------------------------------- @@ -419,13 +453,14 @@ static inline int parse_and(const char **string) { const char *s = *string; // AND - if((s[0] == 'A' || s[0] == 'a') && (s[1] == 'N' || s[1] == 'n') && (s[2] == 'D' || s[2] == 'd') && isoperatorterm_word(s[3])) { + if((s[0] == 'A' || s[0] == 'a') && (s[1] == 'N' || s[1] == 'n') && (s[2] == 'D' || s[2] == 'd') && + is_valid_after_operator_word(s[3])) { *string = &s[4]; return 1; } // && - if(s[0] == '&' && s[1] == '&' && isoperatorterm_symbol(s[2])) { + if(s[0] == '&' && s[1] == '&' && is_valid_after_operator_symbol(s[2])) { *string = &s[2]; return 1; } @@ -437,13 +472,13 @@ static inline int parse_or(const char **string) { const char *s = *string; // OR - if((s[0] == 'O' || s[0] == 'o') && (s[1] == 'R' || s[1] == 'r') && isoperatorterm_word(s[2])) { + if((s[0] == 'O' || s[0] == 'o') && (s[1] == 'R' || s[1] == 'r') && is_valid_after_operator_word(s[2])) { *string = &s[3]; return 1; } // || - if(s[0] == '|' && s[1] == '|' && isoperatorterm_symbol(s[2])) { + if(s[0] == '|' && s[1] == '|' && is_valid_after_operator_symbol(s[2])) { *string = &s[2]; return 1; } @@ -455,7 +490,7 @@ static inline int parse_greater_than_or_equal(const char **string) { const char *s = *string; // >= - if(s[0] == '>' && s[1] == '=' && isoperatorterm_symbol(s[2])) { + if(s[0] == '>' && s[1] == '=' && is_valid_after_operator_symbol(s[2])) { *string = &s[2]; return 1; } @@ -467,7 +502,7 @@ static inline int parse_less_than_or_equal(const char **string) { const char *s = *string; // <= - if (s[0] == '<' && s[1] == '=' && isoperatorterm_symbol(s[2])) { + if (s[0] == '<' && s[1] == '=' && is_valid_after_operator_symbol(s[2])) { *string = &s[2]; return 1; } @@ -479,7 +514,7 @@ static inline int parse_greater(const char **string) { const char *s = *string; // > - if(s[0] == '>' && isoperatorterm_symbol(s[1])) { + if(s[0] == '>' && is_valid_after_operator_symbol(s[1])) { *string = &s[1]; return 1; } @@ -491,7 +526,7 @@ static inline int parse_less(const char **string) { const char *s = *string; // < - if(s[0] == '<' && isoperatorterm_symbol(s[1])) { + if(s[0] == '<' && is_valid_after_operator_symbol(s[1])) { *string = &s[1]; return 1; } @@ -503,13 +538,13 @@ static inline int parse_equal(const char **string) { const char *s = *string; // == - if(s[0] == '=' && s[1] == '=' && isoperatorterm_symbol(s[2])) { + if(s[0] == '=' && s[1] == '=' && is_valid_after_operator_symbol(s[2])) { *string = &s[2]; return 1; } // = - if(s[0] == '=' && isoperatorterm_symbol(s[1])) { + if(s[0] == '=' && is_valid_after_operator_symbol(s[1])) { *string = &s[1]; return 1; } @@ -521,13 +556,13 @@ static inline int parse_not_equal(const char **string) { const char *s = *string; // != - if(s[0] == '!' && s[1] == '=' && isoperatorterm_symbol(s[2])) { + if(s[0] == '!' && s[1] == '=' && is_valid_after_operator_symbol(s[2])) { *string = &s[2]; return 1; } // <> - if(s[0] == '<' && s[1] == '>' && isoperatorterm_symbol(s[2])) { + if(s[0] == '<' && s[1] == '>' && is_valid_after_operator_symbol(s[2])) { *string = &s[2]; } @@ -538,7 +573,8 @@ static inline int parse_not(const char **string) { const char *s = *string; // NOT - if((s[0] == 'N' || s[0] == 'n') && (s[1] == 'O' || s[1] == 'o') && (s[2] == 'T' || s[2] == 't') && isoperatorterm_word(s[3])) { + if((s[0] == 'N' || s[0] == 'n') && (s[1] == 'O' || s[1] == 'o') && (s[2] == 'T' || s[2] == 't') && + is_valid_after_operator_word(s[3])) { *string = &s[3]; return 1; } @@ -555,7 +591,7 @@ static inline int parse_multiply(const char **string) { const char *s = *string; // * - if(s[0] == '*' && isoperatorterm_symbol(s[1])) { + if(s[0] == '*' && is_valid_after_operator_symbol(s[1])) { *string = &s[1]; return 1; } @@ -567,7 +603,7 @@ static inline int parse_divide(const char **string) { const char *s = *string; // / - if(s[0] == '/' && isoperatorterm_symbol(s[1])) { + if(s[0] == '/' && is_valid_after_operator_symbol(s[1])) { *string = &s[1]; return 1; } @@ -579,7 +615,7 @@ static inline int parse_minus(const char **string) { const char *s = *string; // - - if(s[0] == '-' && isoperatorterm_symbol(s[1])) { + if(s[0] == '-' && is_valid_after_operator_symbol(s[1])) { *string = &s[1]; return 1; } @@ -591,7 +627,7 @@ static inline int parse_plus(const char **string) { const char *s = *string; // + - if(s[0] == '+' && isoperatorterm_symbol(s[1])) { + if(s[0] == '+' && is_valid_after_operator_symbol(s[1])) { *string = &s[1]; return 1; } @@ -646,7 +682,7 @@ static inline int parse_variable(const char **string, char *buffer, size_t len) else { // $variable_name - while (*s && !isvariableterm(*s) && i < len) + while (*s && is_valid_variable_character(*s) && i < len) buffer[i++] = *s++; } @@ -1219,7 +1255,7 @@ void expression_hardcode_variable(EVAL_EXPRESSION *expression, STRING *variable, } if (s) { - if (s == s1 && (isalnum((uint8_t)s[len]) || s[len] == '_')) { + if (s == s1 && is_valid_variable_character(s[len])) { // Move past the variable if it's part of a larger word. source_ptr = s + len; continue; diff --git a/src/libnetdata/facets/facets.c b/src/libnetdata/facets/facets.c index 3c746cbc3..230e03de5 100644 --- a/src/libnetdata/facets/facets.c +++ b/src/libnetdata/facets/facets.c @@ -102,25 +102,25 @@ static inline bool is_valid_string_hash(const char *s) { // hashtable for FACET_VALUE // cleanup hashtable defines -#include "../../libnetdata/simple_hashtable_undef.h" +#include "../simple_hashtable/simple_hashtable_undef.h" struct facet_value; // #define SIMPLE_HASHTABLE_SORT_FUNCTION compare_facet_value #define SIMPLE_HASHTABLE_VALUE_TYPE struct facet_value #define SIMPLE_HASHTABLE_NAME _VALUE -#include "../simple_hashtable.h" +#include "../simple_hashtable/simple_hashtable.h" // ---------------------------------------------------------------------------- // hashtable for FACET_KEY // cleanup hashtable defines -#include "../../libnetdata/simple_hashtable_undef.h" +#include "../simple_hashtable/simple_hashtable_undef.h" struct facet_key; // #define SIMPLE_HASHTABLE_SORT_FUNCTION compare_facet_key #define SIMPLE_HASHTABLE_VALUE_TYPE struct facet_key #define SIMPLE_HASHTABLE_NAME _KEY -#include "../simple_hashtable.h" +#include "../simple_hashtable/simple_hashtable.h" // ---------------------------------------------------------------------------- @@ -222,6 +222,7 @@ struct facets { SIMPLE_PATTERN *visible_keys; SIMPLE_PATTERN *excluded_keys; SIMPLE_PATTERN *included_keys; + bool all_keys_included_by_default; FACETS_OPTIONS options; @@ -255,6 +256,7 @@ struct facets { } keys_in_row; FACET_ROW *base; // double linked list of the selected facets rows + FACET_ROW_BIN_DATA bin_data; uint32_t items_to_return; uint32_t max_items_to_return; @@ -328,6 +330,10 @@ struct facets { struct { size_t searches; } fts; + + struct { + size_t bin_data_inflight; + }; } operations; struct { @@ -353,6 +359,27 @@ uint32_t facets_rows(FACETS *facets) { return facets->items_to_return; } +static const char *facets_key_id(FACET_KEY *k) { + if(k->facets->options & FACETS_OPTION_HASH_IDS) + return hash_to_static_string(k->hash); + else + return k->name ? k->name : hash_to_static_string(k->hash); +} + +static const char *facets_key_value_id(FACET_KEY *k, FACET_VALUE *v) { + if(k->facets->options & FACETS_OPTION_HASH_IDS) + return hash_to_static_string(v->hash); + else + return v->name ? v->name : hash_to_static_string(v->hash); +} + +void facets_use_hashes_for_ids(FACETS *facets, bool set) { + if(set) + facets->options |= FACETS_OPTION_HASH_IDS; + else + facets->options &= ~(FACETS_OPTION_HASH_IDS); +} + // ---------------------------------------------------------------------------- static void facets_row_free(FACETS *facets __maybe_unused, FACET_ROW *row); @@ -570,12 +597,12 @@ static inline void FACET_VALUE_ADD_CURRENT_VALUE_TO_INDEX(FACET_KEY *k) { k->facets->operations.values.indexed++; } -static inline void FACET_VALUE_ADD_OR_UPDATE_SELECTED(FACET_KEY *k, FACETS_HASH hash) { +static inline void FACET_VALUE_ADD_OR_UPDATE_SELECTED(FACET_KEY *k, const char *name, FACETS_HASH hash) { FACET_VALUE tv = { .hash = hash, .selected = true, - .name = NULL, - .name_len = 0, + .name = name, + .name_len = name ? strlen(name) : 0, }; FACET_VALUE_ADD_TO_INDEX(k, &tv); } @@ -643,6 +670,35 @@ bool facets_key_name_value_length_is_selected(FACETS *facets, const char *key, s return (v && v->selected) ? true : false; } +bool facets_foreach_selected_value_in_key(FACETS *facets, const char *key, size_t key_length, DICTIONARY *used_hashes_registry, facets_foreach_selected_value_in_key_t cb, void *data) { + FACETS_HASH hash = FACETS_HASH_FUNCTION(key, key_length); + FACET_KEY *k = FACETS_KEY_GET_FROM_INDEX(facets, hash); + if(!k || k->default_selected_for_values) + return false; + + size_t selected = 0; + for(FACET_VALUE *v = k->values.ll; v ;v = v->next) { + if(!v->selected) continue; + + const char *value = v->name; + if(!value) { + if(used_hashes_registry) { + char hash_str[FACET_STRING_HASH_SIZE]; + facets_hash_to_str(v->hash, hash_str); + value = dictionary_get(used_hashes_registry, hash_str); + } + + if(!value) + return false; + } + + if(!cb(facets, selected++, k->name, value, data)) + return false; + } + + return selected > 0; +} + void facets_add_possible_value_name_to_key(FACETS *facets, const char *key, size_t key_length, const char *value, size_t value_length) { FACETS_HASH hash = FACETS_HASH_FUNCTION(key, key_length); FACET_KEY *k = FACETS_KEY_GET_FROM_INDEX(facets, hash); @@ -691,7 +747,7 @@ static inline FACET_KEY *FACETS_KEY_CREATE(FACETS *facets, FACETS_HASH hash, con k->current_value.b = buffer_create(sizeof(FACET_VALUE_UNSET), NULL); k->default_selected_for_values = true; - if(!(k->options & FACET_KEY_OPTION_REORDER)) + if(unlikely((k->options & (FACET_KEY_OPTION_REORDER | FACET_KEY_OPTION_REORDER_DONE)) == 0)) k->order = facets->order++; if((k->options & FACET_KEY_OPTION_FTS) || (facets->options & FACETS_OPTION_ALL_KEYS_FTS)) @@ -724,10 +780,11 @@ static inline FACET_KEY *FACETS_KEY_ADD_TO_INDEX(FACETS *facets, FACETS_HASH has FACET_KEY *k = SIMPLE_HASHTABLE_SLOT_DATA(slot); facet_key_set_name(k, name, name_length); + k->options |= options; - if(unlikely(k->options & FACET_KEY_OPTION_REORDER)) { + if(unlikely((k->options & (FACET_KEY_OPTION_REORDER | FACET_KEY_OPTION_REORDER_DONE)) == FACET_KEY_OPTION_REORDER)) { k->order = facets->order++; - k->options &= ~FACET_KEY_OPTION_REORDER; + k->options |= FACET_KEY_OPTION_REORDER_DONE; } return k; @@ -1547,7 +1604,7 @@ static inline void facet_value_is_used(FACET_KEY *k, FACET_VALUE *v) { } static inline bool facets_key_is_facet(FACETS *facets, FACET_KEY *k) { - bool included = true, excluded = false, never = false; + bool included = facets->all_keys_included_by_default, excluded = false, never = false; if(k->options & (FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_NO_FACET | FACET_KEY_OPTION_NEVER_FACET)) { if(k->options & FACET_KEY_OPTION_FACET) { @@ -1590,10 +1647,40 @@ static inline bool facets_key_is_facet(FACETS *facets, FACET_KEY *k) { return false; } +// ---------------------------------------------------------------------------- +// bin_data management + +static inline void facets_row_bin_data_cleanup(FACETS *facets, FACET_ROW_BIN_DATA *bin_data) { + if(!bin_data->data) + return; + + bin_data->cleanup_cb(bin_data->data); + *bin_data = FACET_ROW_BIN_DATA_EMPTY; + + fatal_assert(facets->operations.bin_data_inflight > 0); + facets->operations.bin_data_inflight--; +} + +void facets_row_bin_data_set(FACETS *facets, void (*cleanup_cb)(void *data), void *data) { + // in case the caller tries to register bin_data multiple times + // for the same row. + facets_row_bin_data_cleanup(facets, &facets->bin_data); + + // set the new values + facets->bin_data.cleanup_cb = cleanup_cb; + facets->bin_data.data = data; + facets->operations.bin_data_inflight++; +} + +void *facets_row_bin_data_get(FACETS *facets __maybe_unused, FACET_ROW *row) { + return row->bin_data.data; +} + // ---------------------------------------------------------------------------- FACETS *facets_create(uint32_t items_to_return, FACETS_OPTIONS options, const char *visible_keys, const char *facet_keys, const char *non_facet_keys) { FACETS *facets = callocz(1, sizeof(FACETS)); + facets->all_keys_included_by_default = true; facets->options = options; FACETS_KEYS_INDEX_CREATE(facets); @@ -1616,6 +1703,8 @@ FACETS *facets_create(uint32_t items_to_return, FACETS_OPTIONS options, const ch } void facets_destroy(FACETS *facets) { + if(!facets) return; + dictionary_destroy(facets->accepted_params); FACETS_KEYS_INDEX_DESTROY(facets); simple_pattern_free(facets->visible_keys); @@ -1629,6 +1718,13 @@ void facets_destroy(FACETS *facets) { facets_row_free(facets, r); } + // in case the caller did not call facets_row_finished() + // on the last row. + facets_row_bin_data_cleanup(facets, &facets->bin_data); + + // make sure we didn't lose any data + fatal_assert(facets->operations.bin_data_inflight == 0); + freez(facets->histogram.chart); freez(facets); } @@ -1691,6 +1787,40 @@ void facets_enable_slice_mode(FACETS *facets) { facets->options |= FACETS_OPTION_DONT_SEND_EMPTY_VALUE_FACETS | FACETS_OPTION_SORT_FACETS_ALPHABETICALLY; } +void facets_reset_and_disable_all_facets(FACETS *facets) { + facets->all_keys_included_by_default = false; + + simple_pattern_free(facets->included_keys); + facets->included_keys = NULL; + +// We need this, because the exclusions are good for controlling which key can become a facet. +// The excluded ones are not offered for facets at all. +// simple_pattern_free(facets->excluded_keys); +// facets->excluded_keys = NULL; + + simple_pattern_free(facets->visible_keys); + facets->visible_keys = NULL; + + FACET_KEY *k; + foreach_key_in_facets(facets, k) { + k->options |= FACET_KEY_OPTION_NO_FACET; + k->options &= ~FACET_KEY_OPTION_FACET; + } + foreach_key_in_facets_done(k); +} + +inline FACET_KEY *facets_register_facet(FACETS *facets, const char *name, FACET_KEY_OPTIONS options) { + size_t name_length = strlen(name); + FACETS_HASH hash = FACETS_HASH_FUNCTION(name, name_length); + + FACET_KEY *k = FACETS_KEY_ADD_TO_INDEX(facets, hash, name, name_length, options); + k->options |= FACET_KEY_OPTION_FACET; + k->options &= ~FACET_KEY_OPTION_NO_FACET; + facet_key_late_init(facets, k); + + return k; +} + inline FACET_KEY *facets_register_facet_id(FACETS *facets, const char *key_id, FACET_KEY_OPTIONS options) { if(!is_valid_string_hash(key_id)) return NULL; @@ -1708,16 +1838,25 @@ inline FACET_KEY *facets_register_facet_id(FACETS *facets, const char *key_id, F return k; } -void facets_register_facet_id_filter(FACETS *facets, const char *key_id, char *value_id, FACET_KEY_OPTIONS options) { +void facets_register_facet_filter_id(FACETS *facets, const char *key_id, const char *value_id, FACET_KEY_OPTIONS options) { FACET_KEY *k = facets_register_facet_id(facets, key_id, options); if(k) { if(is_valid_string_hash(value_id)) { k->default_selected_for_values = false; - FACET_VALUE_ADD_OR_UPDATE_SELECTED(k, str_to_facets_hash(value_id)); + FACET_VALUE_ADD_OR_UPDATE_SELECTED(k, NULL, str_to_facets_hash(value_id)); } } } +void facets_register_facet_filter(FACETS *facets, const char *key, const char *value, FACET_KEY_OPTIONS options) { + FACET_KEY *k = facets_register_facet(facets, key, options); + if(k) { + FACETS_HASH hash = FACETS_HASH_FUNCTION(value, strlen(value)); + k->default_selected_for_values = false; + FACET_VALUE_ADD_OR_UPDATE_SELECTED(k, value, hash); + } +} + void facets_set_current_row_severity(FACETS *facets, FACET_ROW_SEVERITY severity) { facets->current_row.severity = severity; } @@ -1834,6 +1973,10 @@ void facets_add_key_value(FACETS *facets, const char *key, const char *value) { } void facets_add_key_value_length(FACETS *facets, const char *key, size_t key_len, const char *value, size_t value_len) { + if(!key || !*key || !key_len || !value || !*value || !value_len) + // adding empty values, makes the rows unmatched + return; + FACET_KEY *k = facets_register_key_name_length(facets, key, key_len, 0); k->current_value.raw = value; k->current_value.raw_len = value_len; @@ -1879,7 +2022,9 @@ static void facet_row_key_value_delete_callback(const DICTIONARY_ITEM *item __ma // FACET_ROW management static void facets_row_free(FACETS *facets __maybe_unused, FACET_ROW *row) { + facets_row_bin_data_cleanup(facets, &row->bin_data); dictionary_destroy(row->dict); + row->dict = NULL; freez(row); } @@ -1889,6 +2034,7 @@ static FACET_ROW *facets_row_create(FACETS *facets, usec_t usec, FACET_ROW *into if(into) { row = into; facets->operations.rows.reused++; + facets_row_bin_data_cleanup(facets, &row->bin_data); } else { row = callocz(1, sizeof(FACET_ROW)); @@ -1899,6 +2045,11 @@ static FACET_ROW *facets_row_create(FACETS *facets, usec_t usec, FACET_ROW *into facets->operations.rows.created++; } + // copy the bin_data to the row + // and forget about them in facets + row->bin_data = facets->bin_data; + facets->bin_data = FACET_ROW_BIN_DATA_EMPTY; + row->severity = facets->current_row.severity; row->usec = usec; @@ -2083,6 +2234,8 @@ static void facets_reset_keys_with_value_and_row(FACETS *facets) { facets->current_row.keys_matched_by_query_positive = 0; facets->current_row.keys_matched_by_query_negative = 0; facets->keys_in_row.used = 0; + + facets_row_bin_data_cleanup(facets, &facets->bin_data); } void facets_rows_begin(FACETS *facets) { @@ -2097,6 +2250,9 @@ void facets_rows_begin(FACETS *facets) { } bool facets_row_finished(FACETS *facets, usec_t usec) { +// char buf[RFC3339_MAX_LENGTH]; +// rfc3339_datetime_ut(buf, sizeof(buf), usec, 3, false); + facets->operations.rows.evaluated++; if(unlikely((facets->query && facets->keys_filtered_by_query && @@ -2207,11 +2363,11 @@ void facets_accepted_parameters_to_json_array(FACETS *facets, BUFFER *wb, bool w if(with_keys) { FACET_KEY *k; - foreach_key_in_facets(facets, k){ - if (!k->values.enabled) + foreach_key_in_facets(facets, k) { + if (!k->values.enabled || k->options & FACET_KEY_OPTION_HIDDEN) continue; - buffer_json_add_array_item_string(wb, hash_to_static_string(k->hash)); + buffer_json_add_array_item_string(wb, facets_key_id(k)); } foreach_key_in_facets_done(k); } @@ -2391,8 +2547,8 @@ static uint32_t facets_sort_and_reorder_values(FACET_KEY *k) { return ret; } -void facets_table_config(BUFFER *wb) { - buffer_json_member_add_boolean(wb, "show_ids", false); // do not show the column ids to the user +void facets_table_config(FACETS *facets, BUFFER *wb) { + buffer_json_member_add_boolean(wb, "show_ids", (facets->options & FACETS_OPTION_HASH_IDS) ? false : true); buffer_json_member_add_boolean(wb, "has_history", true); // enable date-time picker with after-before buffer_json_member_add_object(wb, "pagination"); @@ -2408,8 +2564,9 @@ void facets_table_config(BUFFER *wb) { void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) { facets->report.used_hashes_registry = used_hashes_registry; + facets_table_config(facets, wb); + if(!(facets->options & FACETS_OPTION_DATA_ONLY)) { - facets_table_config(wb); facets_accepted_parameters_to_json_array(facets, wb, true); } @@ -2434,19 +2591,21 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) CLEAN_BUFFER *tb = buffer_create(0, NULL); FACET_KEY *k; foreach_key_in_facets(facets, k) { - if(!k->values.enabled) + if(!k->values.enabled || k->options & FACET_KEY_OPTION_HIDDEN) continue; - if(!facets_sort_and_reorder_values(k)) - // no values for this key - continue; + facets_sort_and_reorder_values(k); buffer_json_add_array_item_object(wb); // key { - buffer_json_member_add_string(wb, "id", hash_to_static_string(k->hash)); - buffer_json_member_add_string(wb, "name", facets_key_name_cached(k - , facets->report.used_hashes_registry - )); + buffer_json_member_add_string( + wb, "id", facets_key_id(k)); + + buffer_json_member_add_string( + wb, "name", + facets_key_name_cached(k, facets->report.used_hashes_registry)); + + // buffer_json_member_add_string(wb, "raw", k->name); if(!k->order) k->order = facets->order++; buffer_json_member_add_uint64(wb, "order", k->order); @@ -2463,10 +2622,11 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) buffer_json_add_array_item_object(wb); { - buffer_json_member_add_string(wb, "id", hash_to_static_string(v->hash)); + buffer_json_member_add_string(wb, "id", facets_key_value_id(k, v)); facets_key_value_transformed(facets, k, v, tb, FACETS_TRANSFORM_FACET); buffer_json_member_add_string(wb, "name", buffer_tostring(tb)); + // buffer_json_member_add_string(wb, "raw", v->name); buffer_json_member_add_uint64(wb, "count", v->final_facet_value_counter); buffer_json_member_add_uint64(wb, "order", v->order); } @@ -2517,38 +2677,40 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) FACET_KEY *k; foreach_key_in_facets(facets, k) { - RRDF_FIELD_OPTIONS options = RRDF_FIELD_OPTS_WRAP; - bool visible = k->options & (FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_STICKY); + if(k->options & FACET_KEY_OPTION_HIDDEN) + continue; - if ((facets->options & FACETS_OPTION_ALL_FACETS_VISIBLE && k->values.enabled)) - visible = true; + RRDF_FIELD_OPTIONS options = RRDF_FIELD_OPTS_WRAP; + RRDF_FIELD_VISUAL visual = (k->options & FACET_KEY_OPTION_RICH_TEXT) ? RRDF_FIELD_VISUAL_RICH : RRDF_FIELD_VISUAL_VALUE; + RRDF_FIELD_TRANSFORM transform = RRDF_FIELD_TRANSFORM_NONE; - if (!visible) - visible = simple_pattern_matches(facets->visible_keys, k->name); + if (k->options & (FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_STICKY) || + ((facets->options & FACETS_OPTION_ALL_FACETS_VISIBLE) && k->values.enabled) || + simple_pattern_matches(facets->visible_keys, k->name)) + options |= RRDF_FIELD_OPTS_VISIBLE; - if (visible) - options |= RRDF_FIELD_OPTS_VISIBLE; + if (k->options & FACET_KEY_OPTION_MAIN_TEXT) + options |= RRDF_FIELD_OPTS_FULL_WIDTH | RRDF_FIELD_OPTS_WRAP; - if (k->options & FACET_KEY_OPTION_MAIN_TEXT) - options |= RRDF_FIELD_OPTS_FULL_WIDTH | RRDF_FIELD_OPTS_WRAP; + if (k->options & FACET_KEY_OPTION_EXPANDED_FILTER) + options |= RRDF_FIELD_OPTS_EXPANDED_FILTER; - if (k->options & FACET_KEY_OPTION_EXPANDED_FILTER) - options |= RRDF_FIELD_OPTS_EXPANDED_FILTER; + if (k->options & FACET_KEY_OPTION_PRETTY_XML) + transform = RRDF_FIELD_TRANSFORM_XML; - const char *hash_str = hash_to_static_string(k->hash); + const char *key_id = facets_key_id(k); - buffer_rrdf_table_add_field( - wb, field_id++, - hash_str, k->name ? k->name : hash_str, - RRDF_FIELD_TYPE_STRING, - (k->options & FACET_KEY_OPTION_RICH_TEXT) ? RRDF_FIELD_VISUAL_RICH : RRDF_FIELD_VISUAL_VALUE, - RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN, - RRDF_FIELD_SORT_FIXED, - NULL, - RRDF_FIELD_SUMMARY_COUNT, - (k->options & FACET_KEY_OPTION_NEVER_FACET) ? RRDF_FIELD_FILTER_NONE : RRDF_FIELD_FILTER_FACET, - options, FACET_VALUE_UNSET); - } + buffer_rrdf_table_add_field( + wb, field_id++, + key_id, k->name ? k->name : key_id, + RRDF_FIELD_TYPE_STRING, + visual, transform, 0, NULL, NAN, + RRDF_FIELD_SORT_FIXED, + NULL, + RRDF_FIELD_SUMMARY_COUNT, + (k->options & FACET_KEY_OPTION_NEVER_FACET) ? RRDF_FIELD_FILTER_NONE : RRDF_FIELD_FILTER_FACET, + options, FACET_VALUE_UNSET); + } foreach_key_in_facets_done(k); } buffer_json_object_close(wb); // columns @@ -2585,6 +2747,9 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) FACET_KEY *k; foreach_key_in_facets(facets, k) { + if(k->options & FACET_KEY_OPTION_HIDDEN) + continue; + FACET_ROW_KEY_VALUE *rkv = dictionary_get(row->dict, k->name); if(unlikely(k->dynamic.cb)) { @@ -2627,14 +2792,14 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) { FACET_KEY *k; foreach_key_in_facets(facets, k) { - if (!k->values.enabled) + if (!k->values.enabled || k->options & FACET_KEY_OPTION_HIDDEN) continue; if(unlikely(!first_histogram_hash)) first_histogram_hash = k->hash; buffer_json_add_array_item_object(wb); - buffer_json_member_add_string(wb, "id", hash_to_static_string(k->hash)); + buffer_json_member_add_string(wb, "id", facets_key_id(k)); buffer_json_member_add_string(wb, "name", k->name); buffer_json_member_add_uint64(wb, "order", k->order); buffer_json_object_close(wb); @@ -2662,7 +2827,7 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) } if(show_histogram) { - buffer_json_member_add_string(wb, "id", k ? hash_to_static_string(k->hash) : ""); + buffer_json_member_add_string(wb, "id", k ? facets_key_id(k) : ""); buffer_json_member_add_string(wb, "name", k ? k->name : ""); buffer_json_member_add_object(wb, "chart"); { diff --git a/src/libnetdata/facets/facets.h b/src/libnetdata/facets/facets.h index 8364d8612..1d2b89c2b 100644 --- a/src/libnetdata/facets/facets.h +++ b/src/libnetdata/facets/facets.h @@ -23,6 +23,7 @@ typedef enum __attribute__((packed)) { } FACETS_TRANSFORMATION_SCOPE; typedef enum __attribute__((packed)) { + FACET_KEY_OPTION_NONE = 0, FACET_KEY_OPTION_FACET = (1 << 0), // filterable values FACET_KEY_OPTION_NO_FACET = (1 << 1), // non-filterable value FACET_KEY_OPTION_NEVER_FACET = (1 << 2), // never enable this field as facet @@ -32,8 +33,11 @@ typedef enum __attribute__((packed)) { FACET_KEY_OPTION_MAIN_TEXT = (1 << 6), // full width and wrap FACET_KEY_OPTION_RICH_TEXT = (1 << 7), FACET_KEY_OPTION_REORDER = (1 << 8), // give the key a new order id on first encounter - FACET_KEY_OPTION_TRANSFORM_VIEW = (1 << 9), // when registering the transformation, do it only at the view, not on all data - FACET_KEY_OPTION_EXPANDED_FILTER = (1 << 10), // the presentation should have this filter expanded by default + FACET_KEY_OPTION_REORDER_DONE = (1 << 9), // done re-ordering for this field + FACET_KEY_OPTION_TRANSFORM_VIEW = (1 << 10), // when registering the transformation, do it only at the view, not on all data + FACET_KEY_OPTION_EXPANDED_FILTER = (1 << 11), // the presentation should have this filter expanded by default + FACET_KEY_OPTION_PRETTY_XML = (1 << 12), // instruct the UI to parse this as an XML document + FACET_KEY_OPTION_HIDDEN = (1 << 13), // do not include this field in the response } FACET_KEY_OPTIONS; typedef enum __attribute__((packed)) { @@ -51,10 +55,18 @@ typedef struct facet_row_key_value { BUFFER *wb; } FACET_ROW_KEY_VALUE; +typedef struct facet_row_bin_data { + void (*cleanup_cb)(void *data); + void *data; +} FACET_ROW_BIN_DATA; + +#define FACET_ROW_BIN_DATA_EMPTY (FACET_ROW_BIN_DATA){.data = NULL, .cleanup_cb = NULL} + typedef struct facet_row { usec_t usec; DICTIONARY *dict; FACET_ROW_SEVERITY severity; + FACET_ROW_BIN_DATA bin_data; struct facet_row *prev, *next; } FACET_ROW; @@ -77,6 +89,7 @@ typedef enum __attribute__((packed)) { FACETS_OPTION_DONT_SEND_EMPTY_VALUE_FACETS = (1 << 5), // empty facet values will not be included in the report FACETS_OPTION_SORT_FACETS_ALPHABETICALLY = (1 << 6), FACETS_OPTION_SHOW_DELTAS = (1 << 7), + FACETS_OPTION_HASH_IDS = (1 << 8), // when set, the id of the facets, keys and values will be their hash } FACETS_OPTIONS; FACETS *facets_create(uint32_t items_to_return, FACETS_OPTIONS options, const char *visible_keys, const char *facet_keys, const char *non_facet_keys); @@ -98,8 +111,14 @@ void facets_set_anchor(FACETS *facets, usec_t start_ut, usec_t stop_ut, FACETS_A void facets_enable_slice_mode(FACETS *facets); bool facets_row_candidate_to_keep(FACETS *facets, usec_t usec); +void facets_reset_and_disable_all_facets(FACETS *facets); + +FACET_KEY *facets_register_facet(FACETS *facets, const char *name, FACET_KEY_OPTIONS options); FACET_KEY *facets_register_facet_id(FACETS *facets, const char *key_id, FACET_KEY_OPTIONS options); -void facets_register_facet_id_filter(FACETS *facets, const char *key_id, char *value_id, FACET_KEY_OPTIONS options); + +void facets_register_facet_filter(FACETS *facets, const char *key, const char *value, FACET_KEY_OPTIONS options); +void facets_register_facet_filter_id(FACETS *facets, const char *key_id, const char *value_id, FACET_KEY_OPTIONS options); + void facets_set_timeframe_and_histogram_by_id(FACETS *facets, const char *key_id, usec_t after_ut, usec_t before_ut); void facets_set_timeframe_and_histogram_by_name(FACETS *facets, const char *key_name, usec_t after_ut, usec_t before_ut); @@ -121,8 +140,16 @@ usec_t facets_row_oldest_ut(FACETS *facets); usec_t facets_row_newest_ut(FACETS *facets); uint32_t facets_rows(FACETS *facets); -void facets_table_config(BUFFER *wb); +void facets_table_config(FACETS *facets, BUFFER *wb); const char *facets_severity_to_string(FACET_ROW_SEVERITY severity); +typedef bool (*facets_foreach_selected_value_in_key_t)(FACETS *facets, size_t id, const char *key, const char *value, void *data); +bool facets_foreach_selected_value_in_key(FACETS *facets, const char *key, size_t key_length, DICTIONARY *used_hashes_registry, facets_foreach_selected_value_in_key_t cb, void *data); + +void facets_row_bin_data_set(FACETS *facets, void (*cleanup_cb)(void *data), void *data); +void *facets_row_bin_data_get(FACETS *facets __maybe_unused, FACET_ROW *row); + +void facets_use_hashes_for_ids(FACETS *facets, bool set); + #endif diff --git a/src/libnetdata/facets/logs_query_status.h b/src/libnetdata/facets/logs_query_status.h new file mode 100644 index 000000000..4fde24998 --- /dev/null +++ b/src/libnetdata/facets/logs_query_status.h @@ -0,0 +1,868 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_LOGS_QUERY_STATUS_H +#define NETDATA_LOGS_QUERY_STATUS_H + +#include "../libnetdata.h" + +#define LQS_PARAMETER_HELP "help" +#define LQS_PARAMETER_AFTER "after" +#define LQS_PARAMETER_BEFORE "before" +#define LQS_PARAMETER_ANCHOR "anchor" +#define LQS_PARAMETER_LAST "last" +#define LQS_PARAMETER_QUERY "query" +#define LQS_PARAMETER_FACETS "facets" +#define LQS_PARAMETER_HISTOGRAM "histogram" +#define LQS_PARAMETER_DIRECTION "direction" +#define LQS_PARAMETER_IF_MODIFIED_SINCE "if_modified_since" +#define LQS_PARAMETER_DATA_ONLY "data_only" +#define LQS_PARAMETER_SOURCE "__logs_sources" // this must never conflict with user fields +#define LQS_PARAMETER_INFO "info" +#define LQS_PARAMETER_SLICE "slice" +#define LQS_PARAMETER_DELTA "delta" +#define LQS_PARAMETER_TAIL "tail" +#define LQS_PARAMETER_SAMPLING "sampling" + +#define LQS_MAX_PARAMS 1000 +#define LQS_DEFAULT_QUERY_DURATION (1 * 3600) + +#undef LQS_SLICE_PARAMETER +#if LQS_DEFAULT_SLICE_MODE == 1 +#define LQS_SLICE_PARAMETER 1 +#endif + +typedef struct { + const char *transaction; + + FACET_KEY_OPTIONS default_facet; // the option to be used for internal fields. + // when the requests set facets, we disable all default facets, + // so that the UI has full control over them. + + bool fields_are_ids; // POST works with field names, GET works with field hashes (IDs) + bool info; // the request is an INFO request, do not execute a query. + + bool data_only; // return as fast as possible, with the requested amount of data, + // without scanning the entire duration. + + bool slice; // apply native backend filters to slice the events database. + bool delta; // return incremental data for the histogram (used with data_only) + bool tail; // return NOT MODIFIED if no more data are available after the anchor given. + + time_t after_s; // the starting timestamp of the query + time_t before_s; // the ending timestamp of the query + usec_t after_ut; // in microseconds + usec_t before_ut; // in microseconds + + usec_t anchor; // the anchor to seek to + FACETS_ANCHOR_DIRECTION direction; // the direction based on the anchor (or the query timeframe) + + usec_t if_modified_since; // the timestamp to check with tail == true + + size_t entries; // the number of log events to return in a single response + + const char *query; // full text search query string + const char *histogram; // the field to use for the histogram + + SIMPLE_PATTERN *sources; // custom log sources to query + LQS_SOURCE_TYPE source_type; // pre-defined log sources to query + + size_t filters; // the number of filters (facets selected) in the query + size_t sampling; // the number of log events to sample, when the query is too big + + time_t now_s; // the timestamp the query was received + time_t expires_s; // the timestamp the response expires +} LOGS_QUERY_REQUEST; + +#define LOGS_QUERY_REQUEST_DEFAULTS(function_transaction, default_slice, default_direction) \ + (LOGS_QUERY_REQUEST) { \ + .transaction = (function_transaction), \ + .default_facet = FACET_KEY_OPTION_FACET, \ + .info = false, \ + .data_only = false, \ + .slice = (default_slice), \ + .delta = false, \ + .tail = false, \ + .after_s = 0, \ + .before_s = 0, \ + .anchor = 0, \ + .if_modified_since = 0, \ + .entries = 0, \ + .direction = (default_direction), \ + .query = NULL, \ + .histogram = NULL, \ + .sources = NULL, \ + .source_type = LQS_SOURCE_TYPE_ALL, \ + .filters = 0, \ + .sampling = LQS_DEFAULT_ITEMS_SAMPLING, \ +} + +typedef struct { + FACETS *facets; + + LOGS_QUERY_REQUEST rq; + + bool *cancelled; // a pointer to the cancelling boolean + usec_t *stop_monotonic_ut; + + struct { + usec_t start_ut; + usec_t stop_ut; + usec_t delta_ut; + } anchor; + + struct { + usec_t start_ut; + usec_t stop_ut; + bool stop_when_full; + } query; + + usec_t last_modified; + + struct lqs_extension c; +} LOGS_QUERY_STATUS; + +struct logs_query_data { + const char *transaction; + FACETS *facets; + LOGS_QUERY_REQUEST *rq; + BUFFER *wb; +}; + +static inline FACETS_ANCHOR_DIRECTION lgs_get_direction(const char *value) { + return strcasecmp(value, "forward") == 0 ? FACETS_ANCHOR_DIRECTION_FORWARD : FACETS_ANCHOR_DIRECTION_BACKWARD; +} + +static inline void lqs_log_error(LOGS_QUERY_STATUS *lqs, const char *msg) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "LOGS QUERY ERROR: %s, on query " + "timeframe [%"PRIu64" - %"PRIu64"], " + "anchor [%"PRIu64" - %"PRIu64"], " + "if_modified_since %"PRIu64", " + "data_only:%s, delta:%s, tail:%s, direction:%s" + , msg + , lqs->rq.after_ut + , lqs->rq.before_ut + , lqs->anchor.start_ut + , lqs->anchor.stop_ut + , lqs->rq.if_modified_since + , lqs->rq.data_only ? "true" : "false" + , lqs->rq.delta ? "true" : "false" + , lqs->rq.tail ? "tail" : "false" + , lqs->rq.direction == FACETS_ANCHOR_DIRECTION_FORWARD ? "forward" : "backward"); +} + +static inline void lqs_query_timeframe(LOGS_QUERY_STATUS *lqs, usec_t anchor_delta_ut) { + lqs->anchor.delta_ut = anchor_delta_ut; + + if(lqs->rq.direction == FACETS_ANCHOR_DIRECTION_FORWARD) { + lqs->query.start_ut = (lqs->rq.data_only && lqs->anchor.start_ut) ? lqs->anchor.start_ut : lqs->rq.after_ut; + lqs->query.stop_ut = ((lqs->rq.data_only && lqs->anchor.stop_ut) ? lqs->anchor.stop_ut : lqs->rq.before_ut) + lqs->anchor.delta_ut; + } + else { + lqs->query.start_ut = ((lqs->rq.data_only && lqs->anchor.start_ut) ? lqs->anchor.start_ut : lqs->rq.before_ut) + lqs->anchor.delta_ut; + lqs->query.stop_ut = (lqs->rq.data_only && lqs->anchor.stop_ut) ? lqs->anchor.stop_ut : lqs->rq.after_ut; + } + + lqs->query.stop_when_full = (lqs->rq.data_only && !lqs->anchor.stop_ut); +} + +static inline void lqs_function_help(LOGS_QUERY_STATUS *lqs, BUFFER *wb) { + buffer_reset(wb); + wb->content_type = CT_TEXT_PLAIN; + wb->response_code = HTTP_RESP_OK; + + buffer_sprintf(wb, + "%s / %s\n" + "\n" + "%s\n" + "\n" + "The following parameters are supported:\n" + "\n" + , program_name + , LQS_FUNCTION_NAME + , LQS_FUNCTION_DESCRIPTION + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_HELP "\n" + " Shows this help message.\n" + "\n" + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_INFO "\n" + " Request initial configuration information about the plugin.\n" + " The key entity returned is the required_params array, which includes\n" + " all the available log sources.\n" + " When `" LQS_PARAMETER_INFO "` is requested, all other parameters are ignored.\n" + "\n" + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_DATA_ONLY ":true or " LQS_PARAMETER_DATA_ONLY ":false\n" + " Quickly respond with data requested, without generating a\n" + " `histogram`, `facets` counters and `items`.\n" + "\n" + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_DELTA ":true or " LQS_PARAMETER_DELTA ":false\n" + " When doing data only queries, include deltas for histogram, facets and items.\n" + "\n" + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_TAIL ":true or " LQS_PARAMETER_TAIL ":false\n" + " When doing data only queries, respond with the newest messages,\n" + " and up to the anchor, but calculate deltas (if requested) for\n" + " the duration [anchor - before].\n" + "\n" + ); + +#ifdef LQS_SLICE_PARAMETER + buffer_sprintf(wb, + " " LQS_PARAMETER_SLICE ":true or " LQS_PARAMETER_SLICE ":false\n" + " When it is turned on, the plugin is is slicing the logs database,\n" + " utilizing the underlying available indexes.\n" + " When it is off, all filtering is done by the plugin.\n" + " The default is: %s\n" + "\n" + , lqs->rq.slice ? "true" : "false" + ); +#endif + buffer_sprintf(wb, + " " LQS_PARAMETER_SOURCE ":SOURCE\n" + " Query only the specified log sources.\n" + " Do an `" LQS_PARAMETER_INFO "` query to find the sources.\n" + "\n" + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_BEFORE ":TIMESTAMP_IN_SECONDS\n" + " Absolute or relative (to now) timestamp in seconds, to start the query.\n" + " The query is always executed from the most recent to the oldest log entry.\n" + " If not given the default is: now.\n" + "\n" + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_AFTER ":TIMESTAMP_IN_SECONDS\n" + " Absolute or relative (to `before`) timestamp in seconds, to end the query.\n" + " If not given, the default is %d.\n" + "\n" + , -LQS_DEFAULT_QUERY_DURATION + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_LAST ":ITEMS\n" + " The number of items to return.\n" + " The default is %zu.\n" + "\n" + , lqs->rq.entries + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_SAMPLING ":ITEMS\n" + " The number of log entries to sample to estimate facets counters and histogram.\n" + " The default is %zu.\n" + "\n" + , lqs->rq.sampling + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_ANCHOR ":TIMESTAMP_IN_MICROSECONDS\n" + " Return items relative to this timestamp.\n" + " The exact items to be returned depend on the query `" LQS_PARAMETER_DIRECTION "`.\n" + "\n" + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_DIRECTION ":forward or " LQS_PARAMETER_DIRECTION ":backward\n" + " When set to `backward` (default) the items returned are the newest before the\n" + " `" LQS_PARAMETER_ANCHOR "`, (or `" LQS_PARAMETER_BEFORE "` if `" LQS_PARAMETER_ANCHOR "` is not set)\n" + " When set to `forward` the items returned are the oldest after the\n" + " `" LQS_PARAMETER_ANCHOR "`, (or `" LQS_PARAMETER_AFTER "` if `" LQS_PARAMETER_ANCHOR "` is not set)\n" + " The default is: %s\n" + "\n" + , lqs->rq.direction == FACETS_ANCHOR_DIRECTION_FORWARD ? "forward" : "backward" + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_QUERY ":SIMPLE_PATTERN\n" + " Do a full text search to find the log entries matching the pattern given.\n" + " The plugin is searching for matches on all fields of the database.\n" + "\n" + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_IF_MODIFIED_SINCE ":TIMESTAMP_IN_MICROSECONDS\n" + " Each successful response, includes a `last_modified` field.\n" + " By providing the timestamp to the `" LQS_PARAMETER_IF_MODIFIED_SINCE "` parameter,\n" + " the plugin will return 200 with a successful response, or 304 if the source has not\n" + " been modified since that timestamp.\n" + "\n" + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_HISTOGRAM ":facet_id\n" + " Use the given `facet_id` for the histogram.\n" + " This parameter is ignored in `" LQS_PARAMETER_DATA_ONLY "` mode.\n" + "\n" + ); + + buffer_sprintf(wb, + " " LQS_PARAMETER_FACETS ":facet_id1,facet_id2,facet_id3,...\n" + " Add the given facets to the list of fields for which analysis is required.\n" + " The plugin will offer both a histogram and facet value counters for its values.\n" + " This parameter is ignored in `" LQS_PARAMETER_DATA_ONLY "` mode.\n" + "\n" + ); + + buffer_sprintf(wb, + " facet_id:value_id1,value_id2,value_id3,...\n" + " Apply filters to the query, based on the facet IDs returned.\n" + " Each `facet_id` can be given once, but multiple `facet_ids` can be given.\n" + "\n" + ); +} + +static inline bool lqs_request_parse_json_payload(json_object *jobj, const char *path, void *data, BUFFER *error) { + struct logs_query_data *qd = data; + LOGS_QUERY_REQUEST *rq = qd->rq; + BUFFER *wb = qd->wb; + FACETS *facets = qd->facets; + // const char *transaction = qd->transaction; + + buffer_flush(error); + + JSONC_PARSE_BOOL_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_INFO, rq->info, error, false); + JSONC_PARSE_BOOL_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_DELTA, rq->delta, error, false); + JSONC_PARSE_BOOL_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_TAIL, rq->tail, error, false); + JSONC_PARSE_BOOL_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_SLICE, rq->slice, error, false); + JSONC_PARSE_BOOL_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_DATA_ONLY, rq->data_only, error, false); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_SAMPLING, rq->sampling, error, false); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_AFTER, rq->after_s, error, false); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_BEFORE, rq->before_s, error, false); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_IF_MODIFIED_SINCE, rq->if_modified_since, error, false); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_ANCHOR, rq->anchor, error, false); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_LAST, rq->entries, error, false); + JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_DIRECTION, lgs_get_direction, rq->direction, error, false); + JSONC_PARSE_TXT2STRDUPZ_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_QUERY, rq->query, error, false); + JSONC_PARSE_TXT2STRDUPZ_OR_ERROR_AND_RETURN(jobj, path, LQS_PARAMETER_HISTOGRAM, rq->histogram, error, false); + + json_object *fcts; + if (json_object_object_get_ex(jobj, LQS_PARAMETER_FACETS, &fcts)) { + if (json_object_get_type(fcts) != json_type_array) { + buffer_sprintf(error, "member '%s' is not an array.", LQS_PARAMETER_FACETS); + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "POST payload: '%s' is not an array", LQS_PARAMETER_FACETS); + return false; + } + + rq->default_facet = FACET_KEY_OPTION_NONE; + facets_reset_and_disable_all_facets(facets); + + buffer_json_member_add_array(wb, LQS_PARAMETER_FACETS); + + size_t facets_len = json_object_array_length(fcts); + for (size_t i = 0; i < facets_len; i++) { + json_object *fct = json_object_array_get_idx(fcts, i); + + if (json_object_get_type(fct) != json_type_string) { + buffer_sprintf(error, "facets array item %zu is not a string", i); + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "POST payload: facets array item %zu is not a string", i); + return false; + } + + const char *value = json_object_get_string(fct); + facets_register_facet(facets, value, FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS|FACET_KEY_OPTION_REORDER); + buffer_json_add_array_item_string(wb, value); + } + + buffer_json_array_close(wb); // facets + } + + json_object *selections; + if (json_object_object_get_ex(jobj, "selections", &selections)) { + if (json_object_get_type(selections) != json_type_object) { + buffer_sprintf(error, "member 'selections' is not an object"); + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "POST payload: '%s' is not an object", "selections"); + return false; + } + + buffer_json_member_add_object(wb, "selections"); + + CLEAN_BUFFER *sources_list = buffer_create(0, NULL); + + json_object_object_foreach(selections, key, val) { + if(strcmp(key, "query") == 0) continue; + + if (json_object_get_type(val) != json_type_array) { + buffer_sprintf(error, "selection '%s' is not an array", key); + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "POST payload: selection '%s' is not an array", key); + return false; + } + + bool is_source = false; + if(strcmp(key, LQS_PARAMETER_SOURCE) == 0) { + // reset the sources, so that only what the user selects will be shown + is_source = true; + rq->source_type = LQS_SOURCE_TYPE_NONE; + } + + buffer_json_member_add_array(wb, key); + + size_t values_len = json_object_array_length(val); + for (size_t i = 0; i < values_len; i++) { + json_object *value_obj = json_object_array_get_idx(val, i); + + if (json_object_get_type(value_obj) != json_type_string) { + buffer_sprintf(error, "selection '%s' array item %zu is not a string", key, i); + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "POST payload: selection '%s' array item %zu is not a string", key, i); + return false; + } + + const char *value = json_object_get_string(value_obj); + + if(is_source) { + // processing sources + LQS_SOURCE_TYPE t = LQS_FUNCTION_GET_INTERNAL_SOURCE_TYPE(value); + if(t != LQS_SOURCE_TYPE_NONE) { + rq->source_type |= t; + value = NULL; + } + else { + // else, match the source, whatever it is + if(buffer_strlen(sources_list)) + buffer_putc(sources_list, '|'); + + buffer_strcat(sources_list, value); + } + } + else { + // Call facets_register_facet_id_filter for each value + facets_register_facet_filter( + facets, key, value, FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_REORDER); + + rq->filters++; + } + + buffer_json_add_array_item_string(wb, value); + } + + buffer_json_array_close(wb); // key + } + + if(buffer_strlen(sources_list)) { + simple_pattern_free(rq->sources); + rq->sources = simple_pattern_create(buffer_tostring(sources_list), "|", SIMPLE_PATTERN_EXACT, false); + } + + buffer_json_object_close(wb); // selections + } + + facets_use_hashes_for_ids(facets, false); + rq->fields_are_ids = false; + return true; +} + +static inline bool lqs_request_parse_POST(LOGS_QUERY_STATUS *lqs, BUFFER *wb, BUFFER *payload, const char *transaction) { + FACETS *facets = lqs->facets; + LOGS_QUERY_REQUEST *rq = &lqs->rq; + + buffer_json_member_add_object(wb, "_request"); + + struct logs_query_data qd = { + .transaction = transaction, + .facets = facets, + .rq = rq, + .wb = wb, + }; + + int code; + CLEAN_JSON_OBJECT *jobj = + json_parse_function_payload_or_error(wb, payload, &code, lqs_request_parse_json_payload, &qd); + wb->response_code = code; + + return (jobj && code == HTTP_RESP_OK); +} + +static inline bool lqs_request_parse_GET(LOGS_QUERY_STATUS *lqs, BUFFER *wb, char *function) { + FACETS *facets = lqs->facets; + LOGS_QUERY_REQUEST *rq = &lqs->rq; + + buffer_json_member_add_object(wb, "_request"); + + char func_copy[strlen(function) + 1]; + memcpy(func_copy, function, sizeof(func_copy)); + + char *words[LQS_MAX_PARAMS] = { NULL }; + size_t num_words = quoted_strings_splitter_whitespace(func_copy, words, LQS_MAX_PARAMS); + for(int i = 1; i < LQS_MAX_PARAMS;i++) { + char *keyword = get_word(words, num_words, i); + if(!keyword) break; + + if(strcmp(keyword, LQS_PARAMETER_HELP) == 0) { + lqs_function_help(lqs, wb); + return false; + } + else if(strcmp(keyword, LQS_PARAMETER_INFO) == 0) { + rq->info = true; + } + else if(strncmp(keyword, LQS_PARAMETER_DELTA ":", sizeof(LQS_PARAMETER_DELTA ":") - 1) == 0) { + char *v = &keyword[sizeof(LQS_PARAMETER_DELTA ":") - 1]; + + if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) + rq->delta = false; + else + rq->delta = true; + } + else if(strncmp(keyword, LQS_PARAMETER_TAIL ":", sizeof(LQS_PARAMETER_TAIL ":") - 1) == 0) { + char *v = &keyword[sizeof(LQS_PARAMETER_TAIL ":") - 1]; + + if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) + rq->tail = false; + else + rq->tail = true; + } + else if(strncmp(keyword, LQS_PARAMETER_SAMPLING ":", sizeof(LQS_PARAMETER_SAMPLING ":") - 1) == 0) { + rq->sampling = str2ul(&keyword[sizeof(LQS_PARAMETER_SAMPLING ":") - 1]); + } + else if(strncmp(keyword, LQS_PARAMETER_DATA_ONLY ":", sizeof(LQS_PARAMETER_DATA_ONLY ":") - 1) == 0) { + char *v = &keyword[sizeof(LQS_PARAMETER_DATA_ONLY ":") - 1]; + + if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) + rq->data_only = false; + else + rq->data_only = true; + } + else if(strncmp(keyword, LQS_PARAMETER_SLICE ":", sizeof(LQS_PARAMETER_SLICE ":") - 1) == 0) { + char *v = &keyword[sizeof(LQS_PARAMETER_SLICE ":") - 1]; + + if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) + rq->slice = false; + else + rq->slice = true; + } + else if(strncmp(keyword, LQS_PARAMETER_SOURCE ":", sizeof(LQS_PARAMETER_SOURCE ":") - 1) == 0) { + const char *value = &keyword[sizeof(LQS_PARAMETER_SOURCE ":") - 1]; + + buffer_json_member_add_array(wb, LQS_PARAMETER_SOURCE); + + CLEAN_BUFFER *sources_list = buffer_create(0, NULL); + + rq->source_type = LQS_SOURCE_TYPE_NONE; + while(value) { + char *sep = strchr(value, ','); + if(sep) + *sep++ = '\0'; + + buffer_json_add_array_item_string(wb, value); + + LQS_SOURCE_TYPE t = LQS_FUNCTION_GET_INTERNAL_SOURCE_TYPE(value); + if(t != LQS_SOURCE_TYPE_NONE) { + rq->source_type |= t; + value = NULL; + } + else { + // else, match the source, whatever it is + if(buffer_strlen(sources_list)) + buffer_putc(sources_list, '|'); + + buffer_strcat(sources_list, value); + } + + value = sep; + } + + if(buffer_strlen(sources_list)) { + simple_pattern_free(rq->sources); + rq->sources = simple_pattern_create(buffer_tostring(sources_list), "|", SIMPLE_PATTERN_EXACT, false); + } + + buffer_json_array_close(wb); // source + } + else if(strncmp(keyword, LQS_PARAMETER_AFTER ":", sizeof(LQS_PARAMETER_AFTER ":") - 1) == 0) { + rq->after_s = str2l(&keyword[sizeof(LQS_PARAMETER_AFTER ":") - 1]); + } + else if(strncmp(keyword, LQS_PARAMETER_BEFORE ":", sizeof(LQS_PARAMETER_BEFORE ":") - 1) == 0) { + rq->before_s = str2l(&keyword[sizeof(LQS_PARAMETER_BEFORE ":") - 1]); + } + else if(strncmp(keyword, LQS_PARAMETER_IF_MODIFIED_SINCE ":", sizeof(LQS_PARAMETER_IF_MODIFIED_SINCE ":") - 1) == 0) { + rq->if_modified_since = str2ull(&keyword[sizeof(LQS_PARAMETER_IF_MODIFIED_SINCE ":") - 1], NULL); + } + else if(strncmp(keyword, LQS_PARAMETER_ANCHOR ":", sizeof(LQS_PARAMETER_ANCHOR ":") - 1) == 0) { + rq->anchor = str2ull(&keyword[sizeof(LQS_PARAMETER_ANCHOR ":") - 1], NULL); + } + else if(strncmp(keyword, LQS_PARAMETER_DIRECTION ":", sizeof(LQS_PARAMETER_DIRECTION ":") - 1) == 0) { + rq->direction = lgs_get_direction(&keyword[sizeof(LQS_PARAMETER_DIRECTION ":") - 1]); + } + else if(strncmp(keyword, LQS_PARAMETER_LAST ":", sizeof(LQS_PARAMETER_LAST ":") - 1) == 0) { + rq->entries = str2ul(&keyword[sizeof(LQS_PARAMETER_LAST ":") - 1]); + } + else if(strncmp(keyword, LQS_PARAMETER_QUERY ":", sizeof(LQS_PARAMETER_QUERY ":") - 1) == 0) { + freez((void *)rq->query); + rq->query= strdupz(&keyword[sizeof(LQS_PARAMETER_QUERY ":") - 1]); + } + else if(strncmp(keyword, LQS_PARAMETER_HISTOGRAM ":", sizeof(LQS_PARAMETER_HISTOGRAM ":") - 1) == 0) { + freez((void *)rq->histogram); + rq->histogram = strdupz(&keyword[sizeof(LQS_PARAMETER_HISTOGRAM ":") - 1]); + } + else if(strncmp(keyword, LQS_PARAMETER_FACETS ":", sizeof(LQS_PARAMETER_FACETS ":") - 1) == 0) { + rq->default_facet = FACET_KEY_OPTION_NONE; + facets_reset_and_disable_all_facets(facets); + + char *value = &keyword[sizeof(LQS_PARAMETER_FACETS ":") - 1]; + if(*value) { + buffer_json_member_add_array(wb, LQS_PARAMETER_FACETS); + + while(value) { + char *sep = strchr(value, ','); + if(sep) + *sep++ = '\0'; + + facets_register_facet_id(facets, value, FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS|FACET_KEY_OPTION_REORDER); + buffer_json_add_array_item_string(wb, value); + + value = sep; + } + + buffer_json_array_close(wb); // facets + } + } + else { + char *value = strchr(keyword, ':'); + if(value) { + *value++ = '\0'; + + buffer_json_member_add_array(wb, keyword); + + while(value) { + char *sep = strchr(value, ','); + if(sep) + *sep++ = '\0'; + + facets_register_facet_filter_id( + facets, keyword, value, + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_REORDER); + + buffer_json_add_array_item_string(wb, value); + rq->filters++; + + value = sep; + } + + buffer_json_array_close(wb); // keyword + } + } + } + + facets_use_hashes_for_ids(facets, true); + rq->fields_are_ids = true; + return true; +} + +static inline void lqs_info_response(BUFFER *wb, FACETS *facets) { + // the buffer already has the request in it + // DO NOT FLUSH IT + + buffer_json_member_add_uint64(wb, "v", 3); + facets_accepted_parameters_to_json_array(facets, wb, false); + buffer_json_member_add_array(wb, "required_params"); + { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "id", LQS_PARAMETER_SOURCE); + buffer_json_member_add_string(wb, "name", LQS_PARAMETER_SOURCE_NAME); + buffer_json_member_add_string(wb, "help", "Select the logs source to query"); + buffer_json_member_add_string(wb, "type", "multiselect"); + buffer_json_member_add_array(wb, "options"); + { + LQS_FUNCTION_SOURCE_TO_JSON_ARRAY(wb); + } + buffer_json_array_close(wb); // options array + } + buffer_json_object_close(wb); // required params object + } + buffer_json_array_close(wb); // required_params array + + facets_table_config(facets, wb); + + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_string(wb, "help", LQS_FUNCTION_DESCRIPTION); + buffer_json_finalize(wb); + + wb->content_type = CT_APPLICATION_JSON; + wb->response_code = HTTP_RESP_OK; +} + +static inline BUFFER *lqs_create_output_buffer(void) { + BUFFER *wb = buffer_create(0, NULL); + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + return wb; +} + +static inline FACETS *lqs_facets_create(uint32_t items_to_return, FACETS_OPTIONS options, const char *visible_keys, const char *facet_keys, const char *non_facet_keys, bool have_slice) { + FACETS *facets = facets_create(items_to_return, options, + visible_keys, facet_keys, non_facet_keys); + + facets_accepted_param(facets, LQS_PARAMETER_INFO); + facets_accepted_param(facets, LQS_PARAMETER_SOURCE); + facets_accepted_param(facets, LQS_PARAMETER_AFTER); + facets_accepted_param(facets, LQS_PARAMETER_BEFORE); + facets_accepted_param(facets, LQS_PARAMETER_ANCHOR); + facets_accepted_param(facets, LQS_PARAMETER_DIRECTION); + facets_accepted_param(facets, LQS_PARAMETER_LAST); + facets_accepted_param(facets, LQS_PARAMETER_QUERY); + facets_accepted_param(facets, LQS_PARAMETER_FACETS); + facets_accepted_param(facets, LQS_PARAMETER_HISTOGRAM); + facets_accepted_param(facets, LQS_PARAMETER_IF_MODIFIED_SINCE); + facets_accepted_param(facets, LQS_PARAMETER_DATA_ONLY); + facets_accepted_param(facets, LQS_PARAMETER_DELTA); + facets_accepted_param(facets, LQS_PARAMETER_TAIL); + facets_accepted_param(facets, LQS_PARAMETER_SAMPLING); + + if(have_slice) + facets_accepted_param(facets, LQS_PARAMETER_SLICE); + + return facets; +} + +static inline bool lqs_request_parse_and_validate(LOGS_QUERY_STATUS *lqs, BUFFER *wb, char *function, BUFFER *payload, bool have_slice, const char *default_histogram) { + LOGS_QUERY_REQUEST *rq = &lqs->rq; + FACETS *facets = lqs->facets; + + if( (payload && !lqs_request_parse_POST(lqs, wb, payload, rq->transaction)) || + (!payload && !lqs_request_parse_GET(lqs, wb, function)) ) + return false; + + // ---------------------------------------------------------------------------------------------------------------- + // validate parameters + + if(rq->query && !*rq->query) { + freez((void *)rq->query); + rq->query = NULL; + } + + if(rq->histogram && !*rq->histogram) { + freez((void *)rq->histogram); + rq->histogram = NULL; + } + + if(!rq->data_only) + rq->delta = false; + + if(!rq->data_only || !rq->if_modified_since) + rq->tail = false; + + rq->now_s = now_realtime_sec(); + rq->expires_s = rq->now_s + 1; + wb->expires = rq->expires_s; + + if(!rq->after_s && !rq->before_s) { + rq->before_s = rq->now_s; + rq->after_s = rq->before_s - LQS_DEFAULT_QUERY_DURATION; + } + else + rrdr_relative_window_to_absolute(&rq->after_s, &rq->before_s, rq->now_s); + + if(rq->after_s > rq->before_s) { + time_t tmp = rq->after_s; + rq->after_s = rq->before_s; + rq->before_s = tmp; + } + + if(rq->after_s == rq->before_s) + rq->after_s = rq->before_s - LQS_DEFAULT_QUERY_DURATION; + + rq->after_ut = rq->after_s * USEC_PER_SEC; + rq->before_ut = (rq->before_s * USEC_PER_SEC) + USEC_PER_SEC - 1; + + if(!rq->entries) + rq->entries = LQS_DEFAULT_ITEMS_PER_QUERY; + + // ---------------------------------------------------------------------------------------------------------------- + // validate the anchor + + lqs->last_modified = 0; + lqs->anchor.start_ut = lqs->rq.anchor; + lqs->anchor.stop_ut = 0; + + if(lqs->anchor.start_ut && lqs->rq.tail) { + // a tail request + // we need the top X entries from BEFORE + // but, we need to calculate the facets and the + // histogram up to the anchor + lqs->rq.direction = FACETS_ANCHOR_DIRECTION_BACKWARD; + lqs->anchor.start_ut = 0; + lqs->anchor.stop_ut = lqs->rq.anchor; + } + + if(lqs->rq.anchor && lqs->rq.anchor < lqs->rq.after_ut) { + lqs_log_error(lqs, "received anchor is too small for query timeframe, ignoring anchor"); + lqs->rq.anchor = 0; + lqs->anchor.start_ut = 0; + lqs->anchor.stop_ut = 0; + lqs->rq.direction = FACETS_ANCHOR_DIRECTION_BACKWARD; + } + else if(lqs->rq.anchor > lqs->rq.before_ut) { + lqs_log_error(lqs, "received anchor is too big for query timeframe, ignoring anchor"); + lqs->rq.anchor = 0; + lqs->anchor.start_ut = 0; + lqs->anchor.stop_ut = 0; + lqs->rq.direction = FACETS_ANCHOR_DIRECTION_BACKWARD; + } + + facets_set_anchor(facets, lqs->anchor.start_ut, lqs->anchor.stop_ut, lqs->rq.direction); + + facets_set_additional_options(facets, + ((lqs->rq.data_only) ? FACETS_OPTION_DATA_ONLY : 0) | + ((lqs->rq.delta) ? FACETS_OPTION_SHOW_DELTAS : 0)); + + facets_set_items(facets, lqs->rq.entries); + facets_set_query(facets, lqs->rq.query); + + if(lqs->rq.slice && have_slice) + facets_enable_slice_mode(facets); + else + lqs->rq.slice = false; + + if(lqs->rq.histogram) { + if(lqs->rq.fields_are_ids) + facets_set_timeframe_and_histogram_by_id(facets, lqs->rq.histogram, lqs->rq.after_ut, lqs->rq.before_ut); + else + facets_set_timeframe_and_histogram_by_name(facets, lqs->rq.histogram, lqs->rq.after_ut, lqs->rq.before_ut); + } + else if(default_histogram) + facets_set_timeframe_and_histogram_by_name(facets, default_histogram, lqs->rq.after_ut, lqs->rq.before_ut); + + // complete the request object + buffer_json_member_add_boolean(wb, LQS_PARAMETER_INFO, lqs->rq.info); + buffer_json_member_add_boolean(wb, LQS_PARAMETER_SLICE, lqs->rq.slice); + buffer_json_member_add_boolean(wb, LQS_PARAMETER_DATA_ONLY, lqs->rq.data_only); + buffer_json_member_add_boolean(wb, LQS_PARAMETER_DELTA, lqs->rq.delta); + buffer_json_member_add_boolean(wb, LQS_PARAMETER_TAIL, lqs->rq.tail); + buffer_json_member_add_uint64(wb, LQS_PARAMETER_SAMPLING, lqs->rq.sampling); + buffer_json_member_add_uint64(wb, "source_type", lqs->rq.source_type); + buffer_json_member_add_uint64(wb, LQS_PARAMETER_AFTER, lqs->rq.after_ut / USEC_PER_SEC); + buffer_json_member_add_uint64(wb, LQS_PARAMETER_BEFORE, lqs->rq.before_ut / USEC_PER_SEC); + buffer_json_member_add_uint64(wb, "if_modified_since", lqs->rq.if_modified_since); + buffer_json_member_add_uint64(wb, LQS_PARAMETER_ANCHOR, lqs->rq.anchor); + buffer_json_member_add_string(wb, LQS_PARAMETER_DIRECTION, lqs->rq.direction == FACETS_ANCHOR_DIRECTION_FORWARD ? "forward" : "backward"); + buffer_json_member_add_uint64(wb, LQS_PARAMETER_LAST, lqs->rq.entries); + buffer_json_member_add_string(wb, LQS_PARAMETER_QUERY, lqs->rq.query); + buffer_json_member_add_string(wb, LQS_PARAMETER_HISTOGRAM, lqs->rq.histogram); + buffer_json_object_close(wb); // request + + return true; +} + +static inline void lqs_cleanup(LOGS_QUERY_STATUS *lqs) { + freez((void *)lqs->rq.query); + freez((void *)lqs->rq.histogram); + simple_pattern_free(lqs->rq.sources); + facets_destroy(lqs->facets); +} + +#endif //NETDATA_LOGS_QUERY_STATUS_H diff --git a/src/libnetdata/functions_evloop/functions_evloop.c b/src/libnetdata/functions_evloop/functions_evloop.c index 5000d038f..fd0061844 100644 --- a/src/libnetdata/functions_evloop/functions_evloop.c +++ b/src/libnetdata/functions_evloop/functions_evloop.c @@ -137,6 +137,8 @@ static void worker_add_job(struct functions_evloop_globals *wg, const char *keyw function?function:"(unset)"); } else { + // nd_log(NDLS_COLLECTORS, NDLP_INFO, "WORKER JOB WITH PAYLOAD '%s'", payload ? buffer_tostring(payload) : "NONE"); + int timeout = str2i(timeout_s); const char *msg = "No function with this name found"; @@ -222,6 +224,8 @@ static void *rrd_functions_worker_globals_reader_main(void *arg) { char *s = (char *)buffer_tostring(buffer); if(strstr(&s[deferred.last_len], PLUGINSD_CALL_FUNCTION_PAYLOAD_END "\n") != NULL) { + // nd_log(NDLS_COLLECTORS, NDLP_INFO, "FUNCTION PAYLOAD END"); + if(deferred.last_len > 0) // remove the trailing newline from the buffer deferred.last_len--; @@ -249,11 +253,12 @@ static void *rrd_functions_worker_globals_reader_main(void *arg) { } char *words[MAX_FUNCTION_PARAMETERS] = { NULL }; - size_t num_words = quoted_strings_splitter_pluginsd((char *)buffer_tostring(buffer), words, MAX_FUNCTION_PARAMETERS); + size_t num_words = quoted_strings_splitter_whitespace((char *)buffer_tostring(buffer), words, MAX_FUNCTION_PARAMETERS); const char *keyword = get_word(words, num_words, 0); if(keyword && (strcmp(keyword, PLUGINSD_CALL_FUNCTION) == 0)) { + // nd_log(NDLS_COLLECTORS, NDLP_INFO, "FUNCTION CALL"); char *transaction = get_word(words, num_words, 1); char *timeout_s = get_word(words, num_words, 2); char *function = get_word(words, num_words, 3); @@ -262,6 +267,7 @@ static void *rrd_functions_worker_globals_reader_main(void *arg) { worker_add_job(wg, keyword, transaction, function, timeout_s, NULL, access, source); } else if(keyword && (strcmp(keyword, PLUGINSD_CALL_FUNCTION_PAYLOAD_BEGIN) == 0)) { + // nd_log(NDLS_COLLECTORS, NDLP_INFO, "FUNCTION PAYLOAD CALL"); char *transaction = get_word(words, num_words, 1); char *timeout_s = get_word(words, num_words, 2); char *function = get_word(words, num_words, 3); @@ -279,6 +285,7 @@ static void *rrd_functions_worker_globals_reader_main(void *arg) { deferred.enabled = true; } else if(keyword && strcmp(keyword, PLUGINSD_CALL_FUNCTION_CANCEL) == 0) { + // nd_log(NDLS_COLLECTORS, NDLP_INFO, "FUNCTION CANCEL"); char *transaction = get_word(words, num_words, 1); const DICTIONARY_ITEM *acquired = dictionary_get_and_acquire_item(wg->worker_queue, transaction); if(acquired) { @@ -292,6 +299,7 @@ static void *rrd_functions_worker_globals_reader_main(void *arg) { nd_log(NDLS_COLLECTORS, NDLP_NOTICE, "Received CANCEL for transaction '%s', but it not available here", transaction); } else if(keyword && strcmp(keyword, PLUGINSD_CALL_FUNCTION_PROGRESS) == 0) { + // nd_log(NDLS_COLLECTORS, NDLP_INFO, "FUNCTION PROGRESS"); char *transaction = get_word(words, num_words, 1); const DICTIONARY_ITEM *acquired = dictionary_get_and_acquire_item(wg->worker_queue, transaction); if(acquired) { @@ -305,7 +313,7 @@ static void *rrd_functions_worker_globals_reader_main(void *arg) { nd_log(NDLS_COLLECTORS, NDLP_NOTICE, "Received PROGRESS for transaction '%s', but it not available here", transaction); } else - nd_log(NDLS_COLLECTORS, NDLP_NOTICE, "Received unknown command: %s", keyword?keyword:"(unset)"); + nd_log(NDLS_COLLECTORS, NDLP_NOTICE, "Received unknown command: %s", keyword ? keyword : "(unset)"); buffer_flush(buffer); } diff --git a/src/libnetdata/functions_evloop/functions_evloop.h b/src/libnetdata/functions_evloop/functions_evloop.h index 5c575bd17..35defe355 100644 --- a/src/libnetdata/functions_evloop/functions_evloop.h +++ b/src/libnetdata/functions_evloop/functions_evloop.h @@ -71,6 +71,14 @@ #define PLUGINSD_KEYWORD_CONFIG_ACTION_STATUS "status" #define PLUGINSD_FUNCTION_CONFIG "config" +// claiming +#define PLUGINSD_KEYWORD_NODE_ID "NODE_ID" +#define PLUGINSD_KEYWORD_CLAIMED_ID "CLAIMED_ID" + +#define PLUGINSD_KEYWORD_JSON "JSON" +#define PLUGINSD_KEYWORD_JSON_END "JSON_PAYLOAD_END" +#define PLUGINSD_KEYWORD_STREAM_PATH "STREAM_PATH" + typedef void (*functions_evloop_worker_execute_t)(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled, BUFFER *payload, HTTP_ACCESS access, const char *source, void *data); @@ -125,9 +133,13 @@ static inline void pluginsd_function_json_error_to_stdout(const char *transactio fflush(stdout); } -static inline void pluginsd_function_result_to_stdout(const char *transaction, int code, const char *content_type, time_t expires, BUFFER *result) { - pluginsd_function_result_begin_to_stdout(transaction, code, content_type, expires); +static inline void pluginsd_function_result_to_stdout(const char *transaction, BUFFER *result) { + pluginsd_function_result_begin_to_stdout(transaction, result->response_code, + content_type_id2string(result->content_type), + result->expires); + fwrite(buffer_tostring(result), buffer_strlen(result), 1, stdout); + pluginsd_function_result_end_to_stdout(); fflush(stdout); } diff --git a/src/libnetdata/http/content_type.c b/src/libnetdata/http/content_type.c index 3e388a1da..e8f273912 100644 --- a/src/libnetdata/http/content_type.c +++ b/src/libnetdata/http/content_type.c @@ -10,14 +10,14 @@ static struct { const char *options; } content_types[] = { // primary - preferred during id-to-string conversions - { .format = "text/html", CT_TEXT_HTML, true }, + { .format = "application/json", CT_APPLICATION_JSON, true }, { .format = "text/plain", CT_TEXT_PLAIN, true }, + { .format = "text/html", CT_TEXT_HTML, true }, { .format = "text/css", CT_TEXT_CSS, true }, { .format = "text/yaml", CT_TEXT_YAML, true }, { .format = "application/yaml", CT_APPLICATION_YAML, true }, { .format = "text/xml", CT_TEXT_XML, true }, { .format = "text/xsl", CT_TEXT_XSL, true }, - { .format = "application/json", CT_APPLICATION_JSON, true }, { .format = "application/xml", CT_APPLICATION_XML, true }, { .format = "application/javascript", CT_APPLICATION_X_JAVASCRIPT, true }, { .format = "application/octet-stream", CT_APPLICATION_OCTET_STREAM, false }, @@ -42,16 +42,16 @@ static struct { // secondary - overlapping with primary - { .format = "text/plain", CT_PROMETHEUS, false, "version=0.0.4" }, - { .format = "prometheus", CT_PROMETHEUS }, - { .format = "text", CT_TEXT_PLAIN }, - { .format = "txt", CT_TEXT_PLAIN }, - { .format = "json", CT_APPLICATION_JSON }, - { .format = "html", CT_TEXT_HTML }, - { .format = "xml", CT_APPLICATION_XML }, + { .format = "text/plain", CT_PROMETHEUS, true, "version=0.0.4" }, + { .format = "prometheus", CT_PROMETHEUS, true }, + { .format = "text", CT_TEXT_PLAIN, true }, + { .format = "txt", CT_TEXT_PLAIN, true }, + { .format = "json", CT_APPLICATION_JSON, true }, + { .format = "html", CT_TEXT_HTML, true }, + { .format = "xml", CT_APPLICATION_XML, true }, // terminator - { .format = NULL, CT_TEXT_PLAIN }, + { .format = NULL, CT_TEXT_PLAIN, true }, }; HTTP_CONTENT_TYPE content_type_string2id(const char *format) { diff --git a/src/libnetdata/http/http_access.c b/src/libnetdata/http/http_access.c index 5be63bb19..398015cd3 100644 --- a/src/libnetdata/http/http_access.c +++ b/src/libnetdata/http/http_access.c @@ -3,24 +3,24 @@ #include "../libnetdata.h" static struct { - HTTP_USER_ROLE access; + HTTP_USER_ROLE role; const char *name; } user_roles[] = { - { .access = HTTP_USER_ROLE_NONE, .name = "none" }, - { .access = HTTP_USER_ROLE_ADMIN, .name = "admin" }, - { .access = HTTP_USER_ROLE_MANAGER, .name = "manager" }, - { .access = HTTP_USER_ROLE_TROUBLESHOOTER, .name = "troubleshooter" }, - { .access = HTTP_USER_ROLE_OBSERVER, .name = "observer" }, - { .access = HTTP_USER_ROLE_MEMBER, .name = "member" }, - { .access = HTTP_USER_ROLE_BILLING, .name = "billing" }, - { .access = HTTP_USER_ROLE_ANY, .name = "any" }, - - { .access = HTTP_USER_ROLE_MEMBER, .name = "members" }, - { .access = HTTP_USER_ROLE_ADMIN, .name = "admins" }, - { .access = HTTP_USER_ROLE_ANY, .name = "all" }, + { .role = HTTP_USER_ROLE_NONE, .name = "none" }, + { .role = HTTP_USER_ROLE_ADMIN, .name = "admin" }, + { .role = HTTP_USER_ROLE_MANAGER, .name = "manager" }, + { .role = HTTP_USER_ROLE_TROUBLESHOOTER, .name = "troubleshooter" }, + { .role = HTTP_USER_ROLE_OBSERVER, .name = "observer" }, + { .role = HTTP_USER_ROLE_MEMBER, .name = "member" }, + { .role = HTTP_USER_ROLE_BILLING, .name = "billing" }, + { .role = HTTP_USER_ROLE_ANY, .name = "any" }, + + { .role = HTTP_USER_ROLE_MEMBER, .name = "members" }, + { .role = HTTP_USER_ROLE_ADMIN, .name = "admins" }, + { .role = HTTP_USER_ROLE_ANY, .name = "all" }, // terminator - { .access = 0, .name = NULL }, + { .role = 0, .name = NULL }, }; HTTP_USER_ROLE http_user_role2id(const char *role) { @@ -29,7 +29,7 @@ HTTP_USER_ROLE http_user_role2id(const char *role) { for(size_t i = 0; user_roles[i].name ;i++) { if(strcmp(user_roles[i].name, role) == 0) - return user_roles[i].access; + return user_roles[i].role; } nd_log(NDLS_DAEMON, NDLP_WARNING, "HTTP user role '%s' is not valid", role); @@ -38,7 +38,7 @@ HTTP_USER_ROLE http_user_role2id(const char *role) { const char *http_id2user_role(HTTP_USER_ROLE role) { for(size_t i = 0; user_roles[i].name ;i++) { - if(role == user_roles[i].access) + if(role == user_roles[i].role) return user_roles[i].name; } diff --git a/src/libnetdata/http/http_access.h b/src/libnetdata/http/http_access.h index afc2e1dc7..00929f9b4 100644 --- a/src/libnetdata/http/http_access.h +++ b/src/libnetdata/http/http_access.h @@ -93,12 +93,16 @@ typedef enum __attribute__((packed)) { HTTP_ACL_WEBRTC = (1 << 6), // from WebRTC // HTTP_ACL_API takes the following additional ACLs, based on pattern matching of the client IP - HTTP_ACL_DASHBOARD = (1 << 10), - HTTP_ACL_REGISTRY = (1 << 11), - HTTP_ACL_BADGES = (1 << 12), - HTTP_ACL_MANAGEMENT = (1 << 13), - HTTP_ACL_STREAMING = (1 << 14), - HTTP_ACL_NETDATACONF = (1 << 15), + HTTP_ACL_METRICS = (1 << 10), + HTTP_ACL_FUNCTIONS = (1 << 11), + HTTP_ACL_NODES = (1 << 12), + HTTP_ACL_ALERTS = (1 << 13), + HTTP_ACL_DYNCFG = (1 << 14), + HTTP_ACL_REGISTRY = (1 << 15), + HTTP_ACL_BADGES = (1 << 16), + HTTP_ACL_MANAGEMENT = (1 << 17), + HTTP_ACL_STREAMING = (1 << 18), + HTTP_ACL_NETDATACONF = (1 << 19), // SSL related HTTP_ACL_SSL_OPTIONAL = (1 << 28), @@ -106,6 +110,14 @@ typedef enum __attribute__((packed)) { HTTP_ACL_SSL_DEFAULT = (1 << 30), } HTTP_ACL; +#define HTTP_ACL_DASHBOARD (HTTP_ACL)( \ + HTTP_ACL_METRICS \ + | HTTP_ACL_FUNCTIONS \ + | HTTP_ACL_ALERTS \ + | HTTP_ACL_NODES \ + | HTTP_ACL_DYNCFG \ + ) + #define HTTP_ACL_TRANSPORTS (HTTP_ACL)( \ HTTP_ACL_API \ | HTTP_ACL_API_UDP \ @@ -121,7 +133,11 @@ typedef enum __attribute__((packed)) { ) #define HTTP_ACL_ALL_FEATURES (HTTP_ACL)( \ - HTTP_ACL_DASHBOARD \ + HTTP_ACL_METRICS \ + | HTTP_ACL_FUNCTIONS \ + | HTTP_ACL_NODES \ + | HTTP_ACL_ALERTS \ + | HTTP_ACL_DYNCFG \ | HTTP_ACL_REGISTRY \ | HTTP_ACL_BADGES \ | HTTP_ACL_MANAGEMENT \ @@ -129,20 +145,24 @@ typedef enum __attribute__((packed)) { | HTTP_ACL_NETDATACONF \ ) +#define HTTP_ACL_ACLK_LICENSE_MANAGER (HTTP_ACL)( \ + HTTP_ACL_NODES \ +) + #ifdef NETDATA_DEV_MODE #define ACL_DEV_OPEN_ACCESS HTTP_ACL_NOCHECK #else #define ACL_DEV_OPEN_ACCESS 0 #endif -#define http_can_access_dashboard(w) ((w)->acl & HTTP_ACL_DASHBOARD) -#define http_can_access_registry(w) ((w)->acl & HTTP_ACL_REGISTRY) -#define http_can_access_badges(w) ((w)->acl & HTTP_ACL_BADGES) -#define http_can_access_mgmt(w) ((w)->acl & HTTP_ACL_MANAGEMENT) -#define http_can_access_stream(w) ((w)->acl & HTTP_ACL_STREAMING) -#define http_can_access_netdataconf(w) ((w)->acl & HTTP_ACL_NETDATACONF) -#define http_is_using_ssl_optional(w) ((w)->port_acl & HTTP_ACL_SSL_OPTIONAL) -#define http_is_using_ssl_force(w) ((w)->port_acl & HTTP_ACL_SSL_FORCE) -#define http_is_using_ssl_default(w) ((w)->port_acl & HTTP_ACL_SSL_DEFAULT) +#define http_can_access_dashboard(w) (((w)->acl & HTTP_ACL_DASHBOARD) == HTTP_ACL_DASHBOARD) +#define http_can_access_registry(w) (((w)->acl & HTTP_ACL_REGISTRY) == HTTP_ACL_REGISTRY) +#define http_can_access_badges(w) (((w)->acl & HTTP_ACL_BADGES) == HTTP_ACL_BADGES) +#define http_can_access_mgmt(w) (((w)->acl & HTTP_ACL_MANAGEMENT) == HTTP_ACL_MANAGEMENT) +#define http_can_access_stream(w) (((w)->acl & HTTP_ACL_STREAMING) == HTTP_ACL_STREAMING) +#define http_can_access_netdataconf(w) (((w)->acl & HTTP_ACL_NETDATACONF) == HTTP_ACL_NETDATACONF) +#define http_is_using_ssl_optional(w) (((w)->port_acl & HTTP_ACL_SSL_OPTIONAL) == HTTP_ACL_SSL_OPTIONAL) +#define http_is_using_ssl_force(w) (((w)->port_acl & HTTP_ACL_SSL_FORCE) == HTTP_ACL_SSL_FORCE) +#define http_is_using_ssl_default(w) (((w)->port_acl & HTTP_ACL_SSL_DEFAULT) == HTTP_ACL_SSL_DEFAULT) #endif //NETDATA_HTTP_ACCESS_H diff --git a/src/libnetdata/inlined.h b/src/libnetdata/inlined.h index 6b71590c9..50bc5e269 100644 --- a/src/libnetdata/inlined.h +++ b/src/libnetdata/inlined.h @@ -106,17 +106,6 @@ static inline uint64_t murmur64(uint64_t k) { return k; } -static inline size_t indexing_partition(Word_t ptr, Word_t modulo) __attribute__((const)); -static inline size_t indexing_partition(Word_t ptr, Word_t modulo) { -#ifdef ENV64BIT - uint64_t hash = murmur64(ptr); - return hash % modulo; -#else - uint32_t hash = murmur32(ptr); - return hash % modulo; -#endif -} - static inline unsigned int str2u(const char *s) { unsigned int n = 0; @@ -506,6 +495,43 @@ static inline int read_txt_file(const char *filename, char *buffer, size_t size) return 0; } +static inline bool read_txt_file_to_buffer(const char *filename, BUFFER *wb, size_t max_size) { + // Open the file + int fd = open(filename, O_RDONLY | O_CLOEXEC); + if (fd == -1) + return false; + + // Get the file size + struct stat st; + if (fstat(fd, &st) == -1) { + close(fd); + return false; + } + + size_t file_size = st.st_size; + + // Check if the file size exceeds the maximum allowed size + if (file_size > max_size) { + close(fd); + return false; // File size too large + } + + buffer_need_bytes(wb, file_size + 1); + + // Read the file contents into the buffer + ssize_t r = read(fd, &wb->buffer[wb->len], file_size); + if (r != (ssize_t)file_size) { + close(fd); + return false; // Read error + } + wb->len = r; + + // Close the file descriptor + close(fd); + + return true; // Success +} + static inline int read_proc_cmdline(const char *filename, char *buffer, size_t size) { if (unlikely(!size)) return 3; diff --git a/src/libnetdata/json/README.md b/src/libnetdata/json/README.md index 9ae5ff382..21cd42a79 100644 --- a/src/libnetdata/json/README.md +++ b/src/libnetdata/json/README.md @@ -1,12 +1,3 @@ - - # json `json` contains a parser for json strings, based on `jsmn` (), but case you have installed the JSON-C library, the installation script will prefer it, you can also force its use with `--enable-jsonc` in the compilation time. diff --git a/src/libnetdata/json/json-c-parser-inline.c b/src/libnetdata/json/json-c-parser-inline.c new file mode 100644 index 000000000..a17847a3e --- /dev/null +++ b/src/libnetdata/json/json-c-parser-inline.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +int rrd_call_function_error(BUFFER *wb, const char *msg, int code) { + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + buffer_json_member_add_int64(wb, "status", code); + buffer_json_member_add_string(wb, "error_message", msg); + buffer_json_finalize(wb); + wb->date = now_realtime_sec(); + wb->expires = wb->date + 1; + wb->response_code = code; + return code; +} + +struct json_object *json_parse_function_payload_or_error(BUFFER *output, BUFFER *payload, int *code, json_parse_function_payload_t cb, void *cb_data) { + if(!payload || !buffer_strlen(payload)) { + *code = rrd_call_function_error(output, "No payload given, but a payload is required for this feature.", HTTP_RESP_BAD_REQUEST); + return NULL; + } + + struct json_tokener *tokener = json_tokener_new(); + if (!tokener) { + *code = rrd_call_function_error(output, "Failed to initialize json parser.", HTTP_RESP_INTERNAL_SERVER_ERROR); + return NULL; + } + + struct json_object *jobj = json_tokener_parse_ex(tokener, buffer_tostring(payload), (int)buffer_strlen(payload)); + if (json_tokener_get_error(tokener) != json_tokener_success) { + const char *error_msg = json_tokener_error_desc(json_tokener_get_error(tokener)); + char tmp[strlen(error_msg) + 100]; + snprintf(tmp, sizeof(tmp), "JSON parser failed: %s", error_msg); + json_tokener_free(tokener); + *code = rrd_call_function_error(output, tmp, HTTP_RESP_INTERNAL_SERVER_ERROR); + return NULL; + } + json_tokener_free(tokener); + + CLEAN_BUFFER *error = buffer_create(0, NULL); + if(!cb(jobj, "", cb_data, error)) { + char tmp[buffer_strlen(error) + 100]; + snprintfz(tmp, sizeof(tmp), "JSON parser failed: %s", buffer_tostring(error)); + *code = rrd_call_function_error(output, tmp, HTTP_RESP_BAD_REQUEST); + json_object_put(jobj); + return NULL; + } + + *code = HTTP_RESP_OK; + + return jobj; +} diff --git a/src/libnetdata/json/json-c-parser-inline.h b/src/libnetdata/json/json-c-parser-inline.h index c1d60ca45..e51cb232e 100644 --- a/src/libnetdata/json/json-c-parser-inline.h +++ b/src/libnetdata/json/json-c-parser-inline.h @@ -25,6 +25,45 @@ } \ } while(0) +#define JSONC_PARSE_TXT2STRDUPZ_OR_ERROR_AND_RETURN(jobj, path, member, dst, error, required) do { \ + json_object *_j; \ + if (json_object_object_get_ex(jobj, member, &_j) && json_object_is_type(_j, json_type_string)) { \ + freez((void *)dst); \ + dst = strdupz(json_object_get_string(_j)); \ + } \ + else if(required) { \ + buffer_sprintf(error, "missing or invalid type for '%s.%s' string", path, member); \ + return false; \ + } \ +} while(0) + +#define JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, member, dst, error, required) do { \ + json_object *_j; \ + if (json_object_object_get_ex(jobj, member, &_j)) { \ + if (json_object_is_type(_j, json_type_string)) { \ + if (uuid_parse(json_object_get_string(_j), dst) != 0) { \ + if(required) { \ + buffer_sprintf(error, "invalid UUID '%s.%s'", path, member); \ + return false; \ + } \ + else \ + uuid_clear(dst); \ + } \ + } \ + else if (json_object_is_type(_j, json_type_null)) { \ + uuid_clear(dst); \ + } \ + else if (required) { \ + buffer_sprintf(error, "expected UUID or null '%s.%s'", path, member); \ + return false; \ + } \ + } \ + else if (required) { \ + buffer_sprintf(error, "missing UUID '%s.%s'", path, member); \ + return false; \ + } \ +} while(0) + #define JSONC_PARSE_TXT2BUFFER_OR_ERROR_AND_RETURN(jobj, path, member, dst, error, required) do { \ json_object *_j; \ if (json_object_object_get_ex(jobj, member, &_j) && json_object_is_type(_j, json_type_string)) { \ @@ -111,7 +150,6 @@ } \ } while(0) - #define JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, path, member, converter, dst, error, required) do { \ json_object *_j; \ if (json_object_object_get_ex(jobj, member, &_j) && json_object_is_type(_j, json_type_string)) \ @@ -122,11 +160,11 @@ } \ } while(0) -#define JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, member, dst, error, required) do { \ +#define JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, member, dst, error, required) do { \ json_object *_j; \ if (json_object_object_get_ex(jobj, member, &_j)) { \ if (_j != NULL && json_object_is_type(_j, json_type_int)) \ - dst = json_object_get_int(_j); \ + dst = json_object_get_int64(_j); \ else if (_j != NULL && json_object_is_type(_j, json_type_double)) \ dst = (typeof(dst))json_object_get_double(_j); \ else if (_j == NULL) \ @@ -136,7 +174,26 @@ return false; \ } \ } else if(required) { \ - buffer_sprintf(error, "missing or invalid type (expected int value or null) for '%s.%s'", path, member); \ + buffer_sprintf(error, "missing or invalid type (expected int value or null) for '%s.%s'", path, member);\ + return false; \ + } \ +} while(0) + +#define JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, member, dst, error, required) do { \ + json_object *_j; \ + if (json_object_object_get_ex(jobj, member, &_j)) { \ + if (_j != NULL && json_object_is_type(_j, json_type_int)) \ + dst = json_object_get_uint64(_j); \ + else if (_j != NULL && json_object_is_type(_j, json_type_double)) \ + dst = (typeof(dst))json_object_get_double(_j); \ + else if (_j == NULL) \ + dst = 0; \ + else { \ + buffer_sprintf(error, "not supported type (expected int) for '%s.%s'", path, member); \ + return false; \ + } \ + } else if(required) { \ + buffer_sprintf(error, "missing or invalid type (expected int value or null) for '%s.%s'", path, member);\ return false; \ } \ } while(0) @@ -174,4 +231,8 @@ } \ } while(0) +typedef bool (*json_parse_function_payload_t)(json_object *jobj, const char *path, void *data, BUFFER *error); +int rrd_call_function_error(BUFFER *wb, const char *msg, int code); +struct json_object *json_parse_function_payload_or_error(BUFFER *output, BUFFER *payload, int *code, json_parse_function_payload_t cb, void *cb_data); + #endif //NETDATA_JSON_C_PARSER_INLINE_H diff --git a/src/libnetdata/libjudy/judy-malloc.c b/src/libnetdata/libjudy/judy-malloc.c new file mode 100644 index 000000000..ec736393d --- /dev/null +++ b/src/libnetdata/libjudy/judy-malloc.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "judy-malloc.h" + +#define MAX_JUDY_SIZE_TO_ARAL 24 +static bool judy_sizes_config[MAX_JUDY_SIZE_TO_ARAL + 1] = { + [3] = true, + [4] = true, + [5] = true, + [6] = true, + [7] = true, + [8] = true, + [10] = true, + [11] = true, + [15] = true, + [23] = true, +}; +static ARAL *judy_sizes_aral[MAX_JUDY_SIZE_TO_ARAL + 1] = {}; + +struct aral_statistics judy_sizes_aral_statistics = {}; + +__attribute__((constructor)) void aral_judy_init(void) { + for(size_t Words = 0; Words <= MAX_JUDY_SIZE_TO_ARAL; Words++) + if(judy_sizes_config[Words]) { + char buf[30+1]; + snprintfz(buf, sizeof(buf) - 1, "judy-%zu", Words * sizeof(Word_t)); + judy_sizes_aral[Words] = aral_create( + buf, + Words * sizeof(Word_t), + 0, + 65536, + &judy_sizes_aral_statistics, + NULL, NULL, false, false); + } +} + +size_t judy_aral_overhead(void) { + return aral_overhead_from_stats(&judy_sizes_aral_statistics); +} + +size_t judy_aral_structures(void) { + return aral_structures_from_stats(&judy_sizes_aral_statistics); +} + +static ARAL *judy_size_aral(Word_t Words) { + if(Words <= MAX_JUDY_SIZE_TO_ARAL && judy_sizes_aral[Words]) + return judy_sizes_aral[Words]; + + return NULL; +} + +inline Word_t JudyMalloc(Word_t Words) { + Word_t Addr; + + ARAL *ar = judy_size_aral(Words); + if(ar) + Addr = (Word_t) aral_mallocz(ar); + else + Addr = (Word_t) mallocz(Words * sizeof(Word_t)); + + return(Addr); +} + +inline void JudyFree(void * PWord, Word_t Words) { + ARAL *ar = judy_size_aral(Words); + if(ar) + aral_freez(ar, PWord); + else + freez(PWord); +} + +Word_t JudyMallocVirtual(Word_t Words) { + return JudyMalloc(Words); +} + +void JudyFreeVirtual(void * PWord, Word_t Words) { + JudyFree(PWord, Words); +} diff --git a/src/libnetdata/libjudy/judy-malloc.h b/src/libnetdata/libjudy/judy-malloc.h new file mode 100644 index 000000000..65cba982b --- /dev/null +++ b/src/libnetdata/libjudy/judy-malloc.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_JUDY_MALLOC_H +#define NETDATA_JUDY_MALLOC_H + +#include "../libnetdata.h" + +size_t judy_aral_overhead(void); +size_t judy_aral_structures(void); + +#endif //NETDATA_JUDY_MALLOC_H diff --git a/src/libnetdata/libnetdata.c b/src/libnetdata/libnetdata.c index b36a139d2..e21bf119d 100644 --- a/src/libnetdata/libnetdata.c +++ b/src/libnetdata/libnetdata.c @@ -2,6 +2,10 @@ #include "libnetdata.h" +#define MALLOC_ALIGNMENT (sizeof(uintptr_t) * 2) +#define size_t_atomic_count(op, var, size) __atomic_## op ##_fetch(&(var), size, __ATOMIC_RELAXED) +#define size_t_atomic_bytes(op, var, size) __atomic_## op ##_fetch(&(var), ((size) % MALLOC_ALIGNMENT)?((size) + MALLOC_ALIGNMENT - ((size) % MALLOC_ALIGNMENT)):(size), __ATOMIC_RELAXED) + #if !defined(MADV_DONTFORK) #define MADV_DONTFORK 0 #endif @@ -13,88 +17,13 @@ struct rlimit rlimit_nofile = { .rlim_cur = 1024, .rlim_max = 1024 }; #if defined(MADV_MERGEABLE) -int enable_ksm = 1; +int enable_ksm = CONFIG_BOOLEAN_AUTO; #else int enable_ksm = 0; #endif volatile sig_atomic_t netdata_exit = 0; -#define MAX_JUDY_SIZE_TO_ARAL 24 -static bool judy_sizes_config[MAX_JUDY_SIZE_TO_ARAL + 1] = { - [3] = true, - [4] = true, - [5] = true, - [6] = true, - [7] = true, - [8] = true, - [10] = true, - [11] = true, - [15] = true, - [23] = true, -}; -static ARAL *judy_sizes_aral[MAX_JUDY_SIZE_TO_ARAL + 1] = {}; - -struct aral_statistics judy_sizes_aral_statistics = {}; - -void aral_judy_init(void) { - for(size_t Words = 0; Words <= MAX_JUDY_SIZE_TO_ARAL; Words++) - if(judy_sizes_config[Words]) { - char buf[30+1]; - snprintfz(buf, sizeof(buf) - 1, "judy-%zu", Words * sizeof(Word_t)); - judy_sizes_aral[Words] = aral_create( - buf, - Words * sizeof(Word_t), - 0, - 65536, - &judy_sizes_aral_statistics, - NULL, NULL, false, false); - } -} - -size_t judy_aral_overhead(void) { - return aral_overhead_from_stats(&judy_sizes_aral_statistics); -} - -size_t judy_aral_structures(void) { - return aral_structures_from_stats(&judy_sizes_aral_statistics); -} - -static ARAL *judy_size_aral(Word_t Words) { - if(Words <= MAX_JUDY_SIZE_TO_ARAL && judy_sizes_aral[Words]) - return judy_sizes_aral[Words]; - - return NULL; -} - -inline Word_t JudyMalloc(Word_t Words) { - Word_t Addr; - - ARAL *ar = judy_size_aral(Words); - if(ar) - Addr = (Word_t) aral_mallocz(ar); - else - Addr = (Word_t) mallocz(Words * sizeof(Word_t)); - - return(Addr); -} - -inline void JudyFree(void * PWord, Word_t Words) { - ARAL *ar = judy_size_aral(Words); - if(ar) - aral_freez(ar, PWord); - else - freez(PWord); -} - -Word_t JudyMallocVirtual(Word_t Words) { - return JudyMalloc(Words); -} - -void JudyFreeVirtual(void * PWord, Word_t Words) { - JudyFree(PWord, Words); -} - // ---------------------------------------------------------------------------- // memory allocation functions that handle failures @@ -553,536 +482,6 @@ void json_fix_string(char *s) { } } -unsigned char netdata_map_chart_names[256] = { - [0] = '\0', // - [1] = '_', // - [2] = '_', // - [3] = '_', // - [4] = '_', // - [5] = '_', // - [6] = '_', // - [7] = '_', // - [8] = '_', // - [9] = '_', // - [10] = '_', // - [11] = '_', // - [12] = '_', // - [13] = '_', // - [14] = '_', // - [15] = '_', // - [16] = '_', // - [17] = '_', // - [18] = '_', // - [19] = '_', // - [20] = '_', // - [21] = '_', // - [22] = '_', // - [23] = '_', // - [24] = '_', // - [25] = '_', // - [26] = '_', // - [27] = '_', // - [28] = '_', // - [29] = '_', // - [30] = '_', // - [31] = '_', // - [32] = '_', // - [33] = '_', // ! - [34] = '_', // " - [35] = '_', // # - [36] = '_', // $ - [37] = '_', // % - [38] = '_', // & - [39] = '_', // ' - [40] = '_', // ( - [41] = '_', // ) - [42] = '_', // * - [43] = '_', // + - [44] = '.', // , - [45] = '-', // - - [46] = '.', // . - [47] = '/', // / - [48] = '0', // 0 - [49] = '1', // 1 - [50] = '2', // 2 - [51] = '3', // 3 - [52] = '4', // 4 - [53] = '5', // 5 - [54] = '6', // 6 - [55] = '7', // 7 - [56] = '8', // 8 - [57] = '9', // 9 - [58] = '_', // : - [59] = '_', // ; - [60] = '_', // < - [61] = '_', // = - [62] = '_', // > - [63] = '_', // ? - [64] = '_', // @ - [65] = 'a', // A - [66] = 'b', // B - [67] = 'c', // C - [68] = 'd', // D - [69] = 'e', // E - [70] = 'f', // F - [71] = 'g', // G - [72] = 'h', // H - [73] = 'i', // I - [74] = 'j', // J - [75] = 'k', // K - [76] = 'l', // L - [77] = 'm', // M - [78] = 'n', // N - [79] = 'o', // O - [80] = 'p', // P - [81] = 'q', // Q - [82] = 'r', // R - [83] = 's', // S - [84] = 't', // T - [85] = 'u', // U - [86] = 'v', // V - [87] = 'w', // W - [88] = 'x', // X - [89] = 'y', // Y - [90] = 'z', // Z - [91] = '_', // [ - [92] = '/', // backslash - [93] = '_', // ] - [94] = '_', // ^ - [95] = '_', // _ - [96] = '_', // ` - [97] = 'a', // a - [98] = 'b', // b - [99] = 'c', // c - [100] = 'd', // d - [101] = 'e', // e - [102] = 'f', // f - [103] = 'g', // g - [104] = 'h', // h - [105] = 'i', // i - [106] = 'j', // j - [107] = 'k', // k - [108] = 'l', // l - [109] = 'm', // m - [110] = 'n', // n - [111] = 'o', // o - [112] = 'p', // p - [113] = 'q', // q - [114] = 'r', // r - [115] = 's', // s - [116] = 't', // t - [117] = 'u', // u - [118] = 'v', // v - [119] = 'w', // w - [120] = 'x', // x - [121] = 'y', // y - [122] = 'z', // z - [123] = '_', // { - [124] = '_', // | - [125] = '_', // } - [126] = '_', // ~ - [127] = '_', // - [128] = '_', // - [129] = '_', // - [130] = '_', // - [131] = '_', // - [132] = '_', // - [133] = '_', // - [134] = '_', // - [135] = '_', // - [136] = '_', // - [137] = '_', // - [138] = '_', // - [139] = '_', // - [140] = '_', // - [141] = '_', // - [142] = '_', // - [143] = '_', // - [144] = '_', // - [145] = '_', // - [146] = '_', // - [147] = '_', // - [148] = '_', // - [149] = '_', // - [150] = '_', // - [151] = '_', // - [152] = '_', // - [153] = '_', // - [154] = '_', // - [155] = '_', // - [156] = '_', // - [157] = '_', // - [158] = '_', // - [159] = '_', // - [160] = '_', // - [161] = '_', // - [162] = '_', // - [163] = '_', // - [164] = '_', // - [165] = '_', // - [166] = '_', // - [167] = '_', // - [168] = '_', // - [169] = '_', // - [170] = '_', // - [171] = '_', // - [172] = '_', // - [173] = '_', // - [174] = '_', // - [175] = '_', // - [176] = '_', // - [177] = '_', // - [178] = '_', // - [179] = '_', // - [180] = '_', // - [181] = '_', // - [182] = '_', // - [183] = '_', // - [184] = '_', // - [185] = '_', // - [186] = '_', // - [187] = '_', // - [188] = '_', // - [189] = '_', // - [190] = '_', // - [191] = '_', // - [192] = '_', // - [193] = '_', // - [194] = '_', // - [195] = '_', // - [196] = '_', // - [197] = '_', // - [198] = '_', // - [199] = '_', // - [200] = '_', // - [201] = '_', // - [202] = '_', // - [203] = '_', // - [204] = '_', // - [205] = '_', // - [206] = '_', // - [207] = '_', // - [208] = '_', // - [209] = '_', // - [210] = '_', // - [211] = '_', // - [212] = '_', // - [213] = '_', // - [214] = '_', // - [215] = '_', // - [216] = '_', // - [217] = '_', // - [218] = '_', // - [219] = '_', // - [220] = '_', // - [221] = '_', // - [222] = '_', // - [223] = '_', // - [224] = '_', // - [225] = '_', // - [226] = '_', // - [227] = '_', // - [228] = '_', // - [229] = '_', // - [230] = '_', // - [231] = '_', // - [232] = '_', // - [233] = '_', // - [234] = '_', // - [235] = '_', // - [236] = '_', // - [237] = '_', // - [238] = '_', // - [239] = '_', // - [240] = '_', // - [241] = '_', // - [242] = '_', // - [243] = '_', // - [244] = '_', // - [245] = '_', // - [246] = '_', // - [247] = '_', // - [248] = '_', // - [249] = '_', // - [250] = '_', // - [251] = '_', // - [252] = '_', // - [253] = '_', // - [254] = '_', // - [255] = '_' // -}; - -// make sure the supplied string -// is good for a netdata chart/dimension ID/NAME -void netdata_fix_chart_name(char *s) { - while ((*s = netdata_map_chart_names[(unsigned char) *s])) s++; -} - -unsigned char netdata_map_chart_ids[256] = { - [0] = '\0', // - [1] = '_', // - [2] = '_', // - [3] = '_', // - [4] = '_', // - [5] = '_', // - [6] = '_', // - [7] = '_', // - [8] = '_', // - [9] = '_', // - [10] = '_', // - [11] = '_', // - [12] = '_', // - [13] = '_', // - [14] = '_', // - [15] = '_', // - [16] = '_', // - [17] = '_', // - [18] = '_', // - [19] = '_', // - [20] = '_', // - [21] = '_', // - [22] = '_', // - [23] = '_', // - [24] = '_', // - [25] = '_', // - [26] = '_', // - [27] = '_', // - [28] = '_', // - [29] = '_', // - [30] = '_', // - [31] = '_', // - [32] = '_', // - [33] = '_', // ! - [34] = '_', // " - [35] = '_', // # - [36] = '_', // $ - [37] = '_', // % - [38] = '_', // & - [39] = '_', // ' - [40] = '_', // ( - [41] = '_', // ) - [42] = '_', // * - [43] = '_', // + - [44] = '.', // , - [45] = '-', // - - [46] = '.', // . - [47] = '_', // / - [48] = '0', // 0 - [49] = '1', // 1 - [50] = '2', // 2 - [51] = '3', // 3 - [52] = '4', // 4 - [53] = '5', // 5 - [54] = '6', // 6 - [55] = '7', // 7 - [56] = '8', // 8 - [57] = '9', // 9 - [58] = '_', // : - [59] = '_', // ; - [60] = '_', // < - [61] = '_', // = - [62] = '_', // > - [63] = '_', // ? - [64] = '_', // @ - [65] = 'a', // A - [66] = 'b', // B - [67] = 'c', // C - [68] = 'd', // D - [69] = 'e', // E - [70] = 'f', // F - [71] = 'g', // G - [72] = 'h', // H - [73] = 'i', // I - [74] = 'j', // J - [75] = 'k', // K - [76] = 'l', // L - [77] = 'm', // M - [78] = 'n', // N - [79] = 'o', // O - [80] = 'p', // P - [81] = 'q', // Q - [82] = 'r', // R - [83] = 's', // S - [84] = 't', // T - [85] = 'u', // U - [86] = 'v', // V - [87] = 'w', // W - [88] = 'x', // X - [89] = 'y', // Y - [90] = 'z', // Z - [91] = '_', // [ - [92] = '_', // backslash - [93] = '_', // ] - [94] = '_', // ^ - [95] = '_', // _ - [96] = '_', // ` - [97] = 'a', // a - [98] = 'b', // b - [99] = 'c', // c - [100] = 'd', // d - [101] = 'e', // e - [102] = 'f', // f - [103] = 'g', // g - [104] = 'h', // h - [105] = 'i', // i - [106] = 'j', // j - [107] = 'k', // k - [108] = 'l', // l - [109] = 'm', // m - [110] = 'n', // n - [111] = 'o', // o - [112] = 'p', // p - [113] = 'q', // q - [114] = 'r', // r - [115] = 's', // s - [116] = 't', // t - [117] = 'u', // u - [118] = 'v', // v - [119] = 'w', // w - [120] = 'x', // x - [121] = 'y', // y - [122] = 'z', // z - [123] = '_', // { - [124] = '_', // | - [125] = '_', // } - [126] = '_', // ~ - [127] = '_', // - [128] = '_', // - [129] = '_', // - [130] = '_', // - [131] = '_', // - [132] = '_', // - [133] = '_', // - [134] = '_', // - [135] = '_', // - [136] = '_', // - [137] = '_', // - [138] = '_', // - [139] = '_', // - [140] = '_', // - [141] = '_', // - [142] = '_', // - [143] = '_', // - [144] = '_', // - [145] = '_', // - [146] = '_', // - [147] = '_', // - [148] = '_', // - [149] = '_', // - [150] = '_', // - [151] = '_', // - [152] = '_', // - [153] = '_', // - [154] = '_', // - [155] = '_', // - [156] = '_', // - [157] = '_', // - [158] = '_', // - [159] = '_', // - [160] = '_', // - [161] = '_', // - [162] = '_', // - [163] = '_', // - [164] = '_', // - [165] = '_', // - [166] = '_', // - [167] = '_', // - [168] = '_', // - [169] = '_', // - [170] = '_', // - [171] = '_', // - [172] = '_', // - [173] = '_', // - [174] = '_', // - [175] = '_', // - [176] = '_', // - [177] = '_', // - [178] = '_', // - [179] = '_', // - [180] = '_', // - [181] = '_', // - [182] = '_', // - [183] = '_', // - [184] = '_', // - [185] = '_', // - [186] = '_', // - [187] = '_', // - [188] = '_', // - [189] = '_', // - [190] = '_', // - [191] = '_', // - [192] = '_', // - [193] = '_', // - [194] = '_', // - [195] = '_', // - [196] = '_', // - [197] = '_', // - [198] = '_', // - [199] = '_', // - [200] = '_', // - [201] = '_', // - [202] = '_', // - [203] = '_', // - [204] = '_', // - [205] = '_', // - [206] = '_', // - [207] = '_', // - [208] = '_', // - [209] = '_', // - [210] = '_', // - [211] = '_', // - [212] = '_', // - [213] = '_', // - [214] = '_', // - [215] = '_', // - [216] = '_', // - [217] = '_', // - [218] = '_', // - [219] = '_', // - [220] = '_', // - [221] = '_', // - [222] = '_', // - [223] = '_', // - [224] = '_', // - [225] = '_', // - [226] = '_', // - [227] = '_', // - [228] = '_', // - [229] = '_', // - [230] = '_', // - [231] = '_', // - [232] = '_', // - [233] = '_', // - [234] = '_', // - [235] = '_', // - [236] = '_', // - [237] = '_', // - [238] = '_', // - [239] = '_', // - [240] = '_', // - [241] = '_', // - [242] = '_', // - [243] = '_', // - [244] = '_', // - [245] = '_', // - [246] = '_', // - [247] = '_', // - [248] = '_', // - [249] = '_', // - [250] = '_', // - [251] = '_', // - [252] = '_', // - [253] = '_', // - [254] = '_', // - [255] = '_' // -}; - -// make sure the supplied string -// is good for a netdata chart/dimension ID/NAME -void netdata_fix_chart_id(char *s) { - while ((*s = netdata_map_chart_ids[(unsigned char) *s])) s++; -} - static int memory_file_open(const char *filename, size_t size) { // netdata_log_info("memory_file_open('%s', %zu", filename, size); @@ -1302,315 +701,6 @@ int snprintfz(char *dst, size_t n, const char *fmt, ...) { return ret; } -static int is_procfs(const char *path, char **reason) { -#if defined(__APPLE__) || defined(__FreeBSD__) - (void)path; - (void)reason; -#else - struct statfs stat; - - if (statfs(path, &stat) == -1) { - if (reason) - *reason = "failed to statfs()"; - return -1; - } - -#if defined PROC_SUPER_MAGIC - if (stat.f_type != PROC_SUPER_MAGIC) { - if (reason) - *reason = "type is not procfs"; - return -1; - } -#endif - -#endif - - return 0; -} - -static int is_sysfs(const char *path, char **reason) { -#if defined(__APPLE__) || defined(__FreeBSD__) - (void)path; - (void)reason; -#else - struct statfs stat; - - if (statfs(path, &stat) == -1) { - if (reason) - *reason = "failed to statfs()"; - return -1; - } - -#if defined SYSFS_MAGIC - if (stat.f_type != SYSFS_MAGIC) { - if (reason) - *reason = "type is not sysfs"; - return -1; - } -#endif - -#endif - - return 0; -} - -int verify_netdata_host_prefix(bool log_msg) { - if(!netdata_configured_host_prefix) - netdata_configured_host_prefix = ""; - - if(!*netdata_configured_host_prefix) - return 0; - - char buffer[FILENAME_MAX + 1]; - char *path = netdata_configured_host_prefix; - char *reason = "unknown reason"; - errno_clear(); - - struct stat sb; - if (stat(path, &sb) == -1) { - reason = "failed to stat()"; - goto failed; - } - - if((sb.st_mode & S_IFMT) != S_IFDIR) { - errno = EINVAL; - reason = "is not a directory"; - goto failed; - } - - path = buffer; - snprintfz(path, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix); - if(is_procfs(path, &reason) == -1) - goto failed; - - snprintfz(path, FILENAME_MAX, "%s/sys", netdata_configured_host_prefix); - if(is_sysfs(path, &reason) == -1) - goto failed; - - if (netdata_configured_host_prefix && *netdata_configured_host_prefix) { - if (log_msg) - netdata_log_info("Using host prefix directory '%s'", netdata_configured_host_prefix); - } - - return 0; - -failed: - if (log_msg) - netdata_log_error("Ignoring host prefix '%s': path '%s' %s", netdata_configured_host_prefix, path, reason); - netdata_configured_host_prefix = ""; - return -1; -} - -char *strdupz_path_subpath(const char *path, const char *subpath) { - if(unlikely(!path || !*path)) path = "."; - if(unlikely(!subpath)) subpath = ""; - - // skip trailing slashes in path - size_t len = strlen(path); - while(len > 0 && path[len - 1] == '/') len--; - - // skip leading slashes in subpath - while(subpath[0] == '/') subpath++; - - // if the last character in path is / and (there is a subpath or path is now empty) - // keep the trailing slash in path and remove the additional slash - char *slash = "/"; - if(path[len] == '/' && (*subpath || len == 0)) { - slash = ""; - len++; - } - else if(!*subpath) { - // there is no subpath - // no need for trailing slash - slash = ""; - } - - char buffer[FILENAME_MAX + 1]; - snprintfz(buffer, FILENAME_MAX, "%.*s%s%s", (int)len, path, slash, subpath); - return strdupz(buffer); -} - -int path_is_dir(const char *path, const char *subpath) { - char *s = strdupz_path_subpath(path, subpath); - - size_t max_links = 100; - - int is_dir = 0; - struct stat statbuf; - while(max_links-- && stat(s, &statbuf) == 0) { - if((statbuf.st_mode & S_IFMT) == S_IFDIR) { - is_dir = 1; - break; - } - else if((statbuf.st_mode & S_IFMT) == S_IFLNK) { - char buffer[FILENAME_MAX + 1]; - ssize_t l = readlink(s, buffer, FILENAME_MAX); - if(l > 0) { - buffer[l] = '\0'; - freez(s); - s = strdupz(buffer); - continue; - } - else { - is_dir = 0; - break; - } - } - else { - is_dir = 0; - break; - } - } - - freez(s); - return is_dir; -} - -int path_is_file(const char *path, const char *subpath) { - char *s = strdupz_path_subpath(path, subpath); - - size_t max_links = 100; - - int is_file = 0; - struct stat statbuf; - while(max_links-- && stat(s, &statbuf) == 0) { - if((statbuf.st_mode & S_IFMT) == S_IFREG) { - is_file = 1; - break; - } - else if((statbuf.st_mode & S_IFMT) == S_IFLNK) { - char buffer[FILENAME_MAX + 1]; - ssize_t l = readlink(s, buffer, FILENAME_MAX); - if(l > 0) { - buffer[l] = '\0'; - freez(s); - s = strdupz(buffer); - continue; - } - else { - is_file = 0; - break; - } - } - else { - is_file = 0; - break; - } - } - - freez(s); - return is_file; -} - -void recursive_config_double_dir_load(const char *user_path, const char *stock_path, const char *subpath, int (*callback)(const char *filename, void *data, bool stock_config), void *data, size_t depth) { - if(depth > 3) { - netdata_log_error("CONFIG: Max directory depth reached while reading user path '%s', stock path '%s', subpath '%s'", user_path, stock_path, subpath); - return; - } - - if(!stock_path) - stock_path = user_path; - - char *udir = strdupz_path_subpath(user_path, subpath); - char *sdir = strdupz_path_subpath(stock_path, subpath); - - netdata_log_debug(D_HEALTH, "CONFIG traversing user-config directory '%s', stock config directory '%s'", udir, sdir); - - DIR *dir = opendir(udir); - if (!dir) { - netdata_log_error("CONFIG cannot open user-config directory '%s'.", udir); - } - else { - struct dirent *de = NULL; - while((de = readdir(dir))) { - if(de->d_type == DT_DIR || de->d_type == DT_LNK) { - if( !de->d_name[0] || - (de->d_name[0] == '.' && de->d_name[1] == '\0') || - (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') - ) { - netdata_log_debug(D_HEALTH, "CONFIG ignoring user-config directory '%s/%s'", udir, de->d_name); - continue; - } - - if(path_is_dir(udir, de->d_name)) { - recursive_config_double_dir_load(udir, sdir, de->d_name, callback, data, depth + 1); - continue; - } - } - - if(de->d_type == DT_UNKNOWN || de->d_type == DT_REG || de->d_type == DT_LNK) { - size_t len = strlen(de->d_name); - if(path_is_file(udir, de->d_name) && - len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { - char *filename = strdupz_path_subpath(udir, de->d_name); - netdata_log_debug(D_HEALTH, "CONFIG calling callback for user file '%s'", filename); - callback(filename, data, false); - freez(filename); - continue; - } - } - - netdata_log_debug(D_HEALTH, "CONFIG ignoring user-config file '%s/%s' of type %d", udir, de->d_name, (int)de->d_type); - } - - closedir(dir); - } - - netdata_log_debug(D_HEALTH, "CONFIG traversing stock config directory '%s', user config directory '%s'", sdir, udir); - - dir = opendir(sdir); - if (!dir) { - netdata_log_error("CONFIG cannot open stock config directory '%s'.", sdir); - } - else { - if (strcmp(udir, sdir)) { - struct dirent *de = NULL; - while((de = readdir(dir))) { - if(de->d_type == DT_DIR || de->d_type == DT_LNK) { - if( !de->d_name[0] || - (de->d_name[0] == '.' && de->d_name[1] == '\0') || - (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') - ) { - netdata_log_debug(D_HEALTH, "CONFIG ignoring stock config directory '%s/%s'", sdir, de->d_name); - continue; - } - - if(path_is_dir(sdir, de->d_name)) { - // we recurse in stock subdirectory, only when there is no corresponding - // user subdirectory - to avoid reading the files twice - - if(!path_is_dir(udir, de->d_name)) - recursive_config_double_dir_load(udir, sdir, de->d_name, callback, data, depth + 1); - - continue; - } - } - - if(de->d_type == DT_UNKNOWN || de->d_type == DT_REG || de->d_type == DT_LNK) { - size_t len = strlen(de->d_name); - if(path_is_file(sdir, de->d_name) && !path_is_file(udir, de->d_name) && - len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { - char *filename = strdupz_path_subpath(sdir, de->d_name); - netdata_log_debug(D_HEALTH, "CONFIG calling callback for stock file '%s'", filename); - callback(filename, data, true); - freez(filename); - continue; - } - - } - - netdata_log_debug(D_HEALTH, "CONFIG ignoring stock-config file '%s/%s' of type %d", udir, de->d_name, (int)de->d_type); - } - } - closedir(dir); - } - - netdata_log_debug(D_HEALTH, "CONFIG done traversing user-config directory '%s', stock config directory '%s'", udir, sdir); - - freez(udir); - freez(sdir); -} - // Returns the number of bytes read from the file if file_size is not NULL. // The actual buffer has an extra byte set to zero (not included in the count). char *read_by_filename(const char *filename, long *file_size) @@ -1618,34 +708,37 @@ char *read_by_filename(const char *filename, long *file_size) FILE *f = fopen(filename, "r"); if (!f) return NULL; + if (fseek(f, 0, SEEK_END) < 0) { fclose(f); return NULL; } + long size = ftell(f); if (size <= 0 || fseek(f, 0, SEEK_END) < 0) { fclose(f); return NULL; } + char *contents = callocz(size + 1, 1); - if (!contents) { - fclose(f); - return NULL; - } if (fseek(f, 0, SEEK_SET) < 0) { fclose(f); freez(contents); return NULL; } + size_t res = fread(contents, 1, size, f); if ( res != (size_t)size) { freez(contents); fclose(f); return NULL; } + fclose(f); + if (file_size) *file_size = size; + return contents; } @@ -1685,7 +778,7 @@ BUFFER *run_command_and_get_output_to_buffer(const char *command, int max_line_l POPEN_INSTANCE *pi = spawn_popen_run(command); if(pi) { char buffer[max_line_length + 1]; - while (fgets(buffer, max_line_length, pi->child_stdout_fp)) { + while (fgets(buffer, max_line_length, spawn_popen_stdout(pi))) { buffer[max_line_length] = '\0'; buffer_strcat(wb, buffer); } @@ -1705,7 +798,7 @@ bool run_command_and_copy_output_to_stdout(const char *command, int max_line_len if(pi) { char buffer[max_line_length + 1]; - while (fgets(buffer, max_line_length, pi->child_stdout_fp)) + while (fgets(buffer, max_line_length, spawn_popen_stdout(pi))) fprintf(stdout, "%s", buffer); spawn_popen_kill(pi); @@ -1831,7 +924,6 @@ void timing_action(TIMING_ACTION action, TIMING_STEP step) { } } -#ifdef ENABLE_HTTPS int hash256_string(const unsigned char *string, size_t size, char *hash) { EVP_MD_CTX *ctx; ctx = EVP_MD_CTX_create(); @@ -1856,7 +948,6 @@ int hash256_string(const unsigned char *string, size_t size, char *hash) { EVP_MD_CTX_destroy(ctx); return 1; } -#endif bool rrdr_relative_window_to_absolute(time_t *after, time_t *before, time_t now) { @@ -1953,52 +1044,93 @@ bool rrdr_relative_window_to_absolute_query(time_t *after, time_t *before, time_ return (absolute_period_requested != 1); } -int netdata_base64_decode(const char *encoded, char *decoded, size_t decoded_size) { - static const unsigned char base64_table[256] = { - ['A'] = 0, ['B'] = 1, ['C'] = 2, ['D'] = 3, ['E'] = 4, ['F'] = 5, ['G'] = 6, ['H'] = 7, - ['I'] = 8, ['J'] = 9, ['K'] = 10, ['L'] = 11, ['M'] = 12, ['N'] = 13, ['O'] = 14, ['P'] = 15, - ['Q'] = 16, ['R'] = 17, ['S'] = 18, ['T'] = 19, ['U'] = 20, ['V'] = 21, ['W'] = 22, ['X'] = 23, - ['Y'] = 24, ['Z'] = 25, ['a'] = 26, ['b'] = 27, ['c'] = 28, ['d'] = 29, ['e'] = 30, ['f'] = 31, - ['g'] = 32, ['h'] = 33, ['i'] = 34, ['j'] = 35, ['k'] = 36, ['l'] = 37, ['m'] = 38, ['n'] = 39, - ['o'] = 40, ['p'] = 41, ['q'] = 42, ['r'] = 43, ['s'] = 44, ['t'] = 45, ['u'] = 46, ['v'] = 47, - ['w'] = 48, ['x'] = 49, ['y'] = 50, ['z'] = 51, ['0'] = 52, ['1'] = 53, ['2'] = 54, ['3'] = 55, - ['4'] = 56, ['5'] = 57, ['6'] = 58, ['7'] = 59, ['8'] = 60, ['9'] = 61, ['+'] = 62, ['/'] = 63, - [0 ... '+' - 1] = 255, - ['+' + 1 ... '/' - 1] = 255, - ['9' + 1 ... 'A' - 1] = 255, - ['Z' + 1 ... 'a' - 1] = 255, - ['z' + 1 ... 255] = 255 - }; - size_t count = 0; - unsigned int tmp = 0; - int i, bit; - - if (decoded_size < 1) - return 0; // Buffer size must be at least 1 for null termination - - for (i = 0, bit = 0; encoded[i]; i++) { - unsigned char value = base64_table[(unsigned char)encoded[i]]; - if (value > 63) - return -1; // Invalid character in input - - tmp = tmp << 6 | value; - if (++bit == 4) { - if (count + 3 >= decoded_size) break; // Stop decoding if buffer is full - decoded[count++] = (tmp >> 16) & 0xFF; - decoded[count++] = (tmp >> 8) & 0xFF; - decoded[count++] = tmp & 0xFF; - tmp = 0; - bit = 0; - } - } +#if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110 +static inline EVP_ENCODE_CTX *EVP_ENCODE_CTX_new(void) +{ + EVP_ENCODE_CTX *ctx = OPENSSL_malloc(sizeof(*ctx)); - if (bit > 0 && count + 1 < decoded_size) { - tmp <<= 6 * (4 - bit); - if (bit > 2 && count + 1 < decoded_size) decoded[count++] = (tmp >> 16) & 0xFF; - if (bit > 3 && count + 1 < decoded_size) decoded[count++] = (tmp >> 8) & 0xFF; + if (ctx != NULL) { + memset(ctx, 0, sizeof(*ctx)); } + return ctx; +} + +static void EVP_ENCODE_CTX_free(EVP_ENCODE_CTX *ctx) +{ + OPENSSL_free(ctx); +} +#endif - decoded[count] = '\0'; // Null terminate the output string - return count; +int netdata_base64_decode(unsigned char *out, const unsigned char *in, const int in_len) +{ + int outl; + unsigned char remaining_data[256]; + + EVP_ENCODE_CTX *ctx = EVP_ENCODE_CTX_new(); + EVP_DecodeInit(ctx); + EVP_DecodeUpdate(ctx, out, &outl, in, in_len); + int remainder = 0; + EVP_DecodeFinal(ctx, remaining_data, &remainder); + EVP_ENCODE_CTX_free(ctx); + if (remainder) + return -1; + + return outl; } + +int netdata_base64_encode(unsigned char *encoded, const unsigned char *input, size_t input_size) +{ + return EVP_EncodeBlock(encoded, input, input_size); +} + +// Keep internal implementation +// int netdata_base64_decode_internal(const char *encoded, char *decoded, size_t decoded_size) { +// static const unsigned char base64_table[256] = { +// ['A'] = 0, ['B'] = 1, ['C'] = 2, ['D'] = 3, ['E'] = 4, ['F'] = 5, ['G'] = 6, ['H'] = 7, +// ['I'] = 8, ['J'] = 9, ['K'] = 10, ['L'] = 11, ['M'] = 12, ['N'] = 13, ['O'] = 14, ['P'] = 15, +// ['Q'] = 16, ['R'] = 17, ['S'] = 18, ['T'] = 19, ['U'] = 20, ['V'] = 21, ['W'] = 22, ['X'] = 23, +// ['Y'] = 24, ['Z'] = 25, ['a'] = 26, ['b'] = 27, ['c'] = 28, ['d'] = 29, ['e'] = 30, ['f'] = 31, +// ['g'] = 32, ['h'] = 33, ['i'] = 34, ['j'] = 35, ['k'] = 36, ['l'] = 37, ['m'] = 38, ['n'] = 39, +// ['o'] = 40, ['p'] = 41, ['q'] = 42, ['r'] = 43, ['s'] = 44, ['t'] = 45, ['u'] = 46, ['v'] = 47, +// ['w'] = 48, ['x'] = 49, ['y'] = 50, ['z'] = 51, ['0'] = 52, ['1'] = 53, ['2'] = 54, ['3'] = 55, +// ['4'] = 56, ['5'] = 57, ['6'] = 58, ['7'] = 59, ['8'] = 60, ['9'] = 61, ['+'] = 62, ['/'] = 63, +// [0 ... '+' - 1] = 255, +// ['+' + 1 ... '/' - 1] = 255, +// ['9' + 1 ... 'A' - 1] = 255, +// ['Z' + 1 ... 'a' - 1] = 255, +// ['z' + 1 ... 255] = 255 +// }; +// +// size_t count = 0; +// unsigned int tmp = 0; +// int i, bit; +// +// if (decoded_size < 1) +// return 0; // Buffer size must be at least 1 for null termination +// +// for (i = 0, bit = 0; encoded[i]; i++) { +// unsigned char value = base64_table[(unsigned char)encoded[i]]; +// if (value > 63) +// return -1; // Invalid character in input +// +// tmp = tmp << 6 | value; +// if (++bit == 4) { +// if (count + 3 >= decoded_size) break; // Stop decoding if buffer is full +// decoded[count++] = (tmp >> 16) & 0xFF; +// decoded[count++] = (tmp >> 8) & 0xFF; +// decoded[count++] = tmp & 0xFF; +// tmp = 0; +// bit = 0; +// } +// } +// +// if (bit > 0 && count + 1 < decoded_size) { +// tmp <<= 6 * (4 - bit); +// if (bit > 2 && count + 1 < decoded_size) decoded[count++] = (tmp >> 16) & 0xFF; +// if (bit > 3 && count + 1 < decoded_size) decoded[count++] = (tmp >> 8) & 0xFF; +// } +// +// decoded[count] = '\0'; // Null terminate the output string +// return count; +// } diff --git a/src/libnetdata/libnetdata.h b/src/libnetdata/libnetdata.h index b4bddb70a..acee0675f 100644 --- a/src/libnetdata/libnetdata.h +++ b/src/libnetdata/libnetdata.h @@ -7,333 +7,19 @@ extern "C" { # endif -#include "config.h" - -#ifdef ENABLE_OPENSSL -#define ENABLE_HTTPS 1 -#endif - -#ifdef HAVE_LIBDATACHANNEL -#define ENABLE_WEBRTC 1 -#endif - -#define STRINGIFY(x) #x -#define TOSTRING(x) STRINGIFY(x) +#include "common.h" #define JUDYHS_INDEX_SIZE_ESTIMATE(key_bytes) (((key_bytes) + sizeof(Word_t) - 1) / sizeof(Word_t) * 4) -#if defined(NETDATA_DEV_MODE) && !defined(NETDATA_INTERNAL_CHECKS) -#define NETDATA_INTERNAL_CHECKS 1 -#endif - -#ifndef SIZEOF_VOID_P -#error SIZEOF_VOID_P is not defined -#endif - -#if SIZEOF_VOID_P == 4 -#define ENV32BIT 1 -#else -#define ENV64BIT 1 -#endif - // NETDATA_TRACE_ALLOCATIONS does not work under musl libc, so don't enable it //#if defined(NETDATA_INTERNAL_CHECKS) && !defined(NETDATA_TRACE_ALLOCATIONS) //#define NETDATA_TRACE_ALLOCATIONS 1 //#endif -#define MALLOC_ALIGNMENT (sizeof(uintptr_t) * 2) -#define size_t_atomic_count(op, var, size) __atomic_## op ##_fetch(&(var), size, __ATOMIC_RELAXED) -#define size_t_atomic_bytes(op, var, size) __atomic_## op ##_fetch(&(var), ((size) % MALLOC_ALIGNMENT)?((size) + MALLOC_ALIGNMENT - ((size) % MALLOC_ALIGNMENT)):(size), __ATOMIC_RELAXED) - -// ---------------------------------------------------------------------------- -// system include files for all netdata C programs - -/* select the memory allocator, based on autoconf findings */ -#if defined(ENABLE_JEMALLOC) - -#if defined(HAVE_JEMALLOC_JEMALLOC_H) -#include -#else // !defined(HAVE_JEMALLOC_JEMALLOC_H) -#include -#endif // !defined(HAVE_JEMALLOC_JEMALLOC_H) - -#elif defined(ENABLE_TCMALLOC) - -#include - -#else /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */ - -#if !(defined(__FreeBSD__) || defined(__APPLE__)) -#include -#endif /* __FreeBSD__ || __APPLE__ */ - -#endif /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */ - -// ---------------------------------------------------------------------------- - -#if defined(__FreeBSD__) -#include -#define NETDATA_OS_TYPE "freebsd" -#elif defined(__APPLE__) -#define NETDATA_OS_TYPE "macos" -#elif defined(OS_WINDOWS) -#define NETDATA_OS_TYPE "windows" -#else -#define NETDATA_OS_TYPE "linux" -#endif /* __FreeBSD__, __APPLE__*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef HAVE_ARPA_INET_H -#include -#endif - -#ifdef HAVE_NETINET_TCP_H -#include -#endif - -#ifdef HAVE_SYS_IOCTL_H -#include -#endif - -#ifdef HAVE_GRP_H -#include -#else -typedef uint32_t gid_t; -#endif - -#ifdef HAVE_PWD_H -#include -#else -typedef uint32_t uid_t; -#endif - -#ifdef HAVE_NET_IF_H -#include -#endif - -#ifdef HAVE_POLL_H -#include -#endif - -#ifdef HAVE_SYSLOG_H -#include -#else -/* priorities */ -#define LOG_EMERG 0 /* system is unusable */ -#define LOG_ALERT 1 /* action must be taken immediately */ -#define LOG_CRIT 2 /* critical conditions */ -#define LOG_ERR 3 /* error conditions */ -#define LOG_WARNING 4 /* warning conditions */ -#define LOG_NOTICE 5 /* normal but significant condition */ -#define LOG_INFO 6 /* informational */ -#define LOG_DEBUG 7 /* debug-level messages */ - -/* facility codes */ -#define LOG_KERN (0<<3) /* kernel messages */ -#define LOG_USER (1<<3) /* random user-level messages */ -#define LOG_MAIL (2<<3) /* mail system */ -#define LOG_DAEMON (3<<3) /* system daemons */ -#define LOG_AUTH (4<<3) /* security/authorization messages */ -#define LOG_SYSLOG (5<<3) /* messages generated internally by syslogd */ -#define LOG_LPR (6<<3) /* line printer subsystem */ -#define LOG_NEWS (7<<3) /* network news subsystem */ -#define LOG_UUCP (8<<3) /* UUCP subsystem */ -#define LOG_CRON (9<<3) /* clock daemon */ -#define LOG_AUTHPRIV (10<<3) /* security/authorization messages (private) */ -#define LOG_FTP (11<<3) /* ftp daemon */ - -/* other codes through 15 reserved for system use */ -#define LOG_LOCAL0 (16<<3) /* reserved for local use */ -#define LOG_LOCAL1 (17<<3) /* reserved for local use */ -#define LOG_LOCAL2 (18<<3) /* reserved for local use */ -#define LOG_LOCAL3 (19<<3) /* reserved for local use */ -#define LOG_LOCAL4 (20<<3) /* reserved for local use */ -#define LOG_LOCAL5 (21<<3) /* reserved for local use */ -#define LOG_LOCAL6 (22<<3) /* reserved for local use */ -#define LOG_LOCAL7 (23<<3) /* reserved for local use */ -#endif - -#ifdef HAVE_SYS_MMAN_H -#include -#endif - -#ifdef HAVE_SYS_RESOURCE_H -#include -#endif - -#ifdef HAVE_SYS_SOCKET_H -#include -#endif - -#ifdef HAVE_SYS_WAIT_H -#include -#endif - -#ifdef HAVE_SYS_UN_H -#include -#endif - -#ifdef HAVE_SPAWN_H -#include -#endif - -#ifdef HAVE_NETINET_IN_H -#include -#endif - -#ifdef HAVE_RESOLV_H -#include -#endif - -#ifdef HAVE_NETDB_H -#include -#endif - -#ifdef HAVE_SYS_PRCTL_H -#include -#endif - -#ifdef HAVE_SYS_STAT_H -#include -#endif - -#ifdef HAVE_SYS_VFS_H -#include -#endif - -#ifdef HAVE_SYS_STATFS_H -#include -#endif - -#ifdef HAVE_LINUX_MAGIC_H -#include -#endif - -#ifdef HAVE_SYS_MOUNT_H -#include -#endif - -#ifdef HAVE_SYS_STATVFS_H -#include -#endif - -// #1408 -#ifdef MAJOR_IN_MKDEV -#include -#endif -#ifdef MAJOR_IN_SYSMACROS -#include -#endif - -#include -#include - -#if defined(HAVE_INTTYPES_H) -#include -#elif defined(HAVE_STDINT_H) -#include -#endif - -#include - -#ifdef HAVE_SYS_CAPABILITY_H -#include -#endif - +#include "libjudy/judy-malloc.h" -#ifndef O_CLOEXEC -#define O_CLOEXEC (0) -#endif - -// ---------------------------------------------------------------------------- -// netdata common definitions - -#define _cleanup_(x) __attribute__((__cleanup__(x))) - -#ifdef HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL -#define NEVERNULL __attribute__((returns_nonnull)) -#else -#define NEVERNULL -#endif - -#ifdef HAVE_FUNC_ATTRIBUTE_NOINLINE -#define NOINLINE __attribute__((noinline)) -#else -#define NOINLINE -#endif - -#ifdef HAVE_FUNC_ATTRIBUTE_MALLOC -#define MALLOCLIKE __attribute__((malloc)) -#else -#define MALLOCLIKE -#endif - -#if defined(HAVE_FUNC_ATTRIBUTE_FORMAT_GNU_PRINTF) -#define PRINTFLIKE(f, a) __attribute__ ((format(gnu_printf, f, a))) -#elif defined(HAVE_FUNC_ATTRIBUTE_FORMAT_PRINTF) -#define PRINTFLIKE(f, a) __attribute__ ((format(printf, f, a))) -#else -#define PRINTFLIKE(f, a) -#endif - -#ifdef HAVE_FUNC_ATTRIBUTE_NORETURN -#define NORETURN __attribute__ ((noreturn)) -#else -#define NORETURN -#endif - -#ifdef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT -#define WARNUNUSED __attribute__ ((warn_unused_result)) -#else -#define WARNUNUSED -#endif - -void aral_judy_init(void); -size_t judy_aral_overhead(void); -size_t judy_aral_structures(void); - -#define ABS(x) (((x) < 0)? (-(x)) : (x)) -#define MIN(a,b) (((a)<(b))?(a):(b)) -#define MAX(a,b) (((a)>(b))?(a):(b)) -#define SWAP(a, b) do { \ - typeof(a) _tmp = b; \ - b = a; \ - a = _tmp; \ -} while(0) - -#define GUID_LEN 36 - -#define PIPE_READ 0 -#define PIPE_WRITE 1 - -#include "linked-lists.h" #include "storage-point.h" - -void netdata_fix_chart_id(char *s); -void netdata_fix_chart_name(char *s); +#include "paths/paths.h" int madvise_sequential(void *mem, size_t len); int madvise_random(void *mem, size_t len); @@ -394,46 +80,15 @@ int verify_netdata_host_prefix(bool log_msg); extern volatile sig_atomic_t netdata_exit; -char *strdupz_path_subpath(const char *path, const char *subpath); -int path_is_dir(const char *path, const char *subpath); -int path_is_file(const char *path, const char *subpath); -void recursive_config_double_dir_load( - const char *user_path - , const char *stock_path - , const char *subpath - , int (*callback)(const char *filename, void *data, bool stock_config) - , void *data - , size_t depth -); char *read_by_filename(const char *filename, long *file_size); char *find_and_replace(const char *src, const char *find, const char *replace, const char *where); -/* fix for alpine linux */ -#ifndef RUSAGE_THREAD -#ifdef RUSAGE_CHILDREN -#define RUSAGE_THREAD RUSAGE_CHILDREN -#endif -#endif - #define BITS_IN_A_KILOBIT 1000 #define KILOBITS_IN_A_MEGABIT 1000 -/* misc. */ - -#define UNUSED(x) (void)(x) - -#ifdef __GNUC__ -#define UNUSED_FUNCTION(x) __attribute__((unused)) UNUSED_##x -#else -#define UNUSED_FUNCTION(x) UNUSED_##x -#endif - #define error_report(x, args...) do { errno_clear(); netdata_log_error(x, ##args); } while(0) -// Taken from linux kernel -#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) - -#include "bitmap64.h" +#include "bitmap/bitmap64.h" #define COMPRESSION_MAX_CHUNK 0x4000 #define COMPRESSION_MAX_OVERHEAD 128 @@ -449,47 +104,58 @@ void netdata_cleanup_and_exit(int ret, const char *action, const char *action_re void netdata_cleanup_and_exit(int ret, const char *action, const char *action_result, const char *action_data) NORETURN; #endif -extern char *netdata_configured_host_prefix; +extern const char *netdata_configured_host_prefix; -#include "os/os.h" +// safe includes before O/S specific functions +#include "template-enum.h" +#include "libjudy/src/Judy.h" +#include "july/july.h" -#define XXH_INLINE_ALL -#include "xxhash.h" +#include "string/string.h" +#include "buffer/buffer.h" #include "uuid/uuid.h" -#include "template-enum.h" -#include "http/http_access.h" #include "http/content_type.h" -#include "config/dyncfg.h" -#include "libjudy/src/Judy.h" -#include "july/july.h" +#include "http/http_access.h" + +#include "inlined.h" +#include "parsers/parsers.h" + #include "threads/threads.h" -#include "buffer/buffer.h" #include "locks/locks.h" -#include "circular_buffer/circular_buffer.h" +#include "completion/completion.h" +#include "clocks/clocks.h" +#include "simple_pattern/simple_pattern.h" +#include "libnetdata/log/nd_log.h" + +#include "socket/security.h" // must be before windows.h + +// this may include windows.h +#include "os/os.h" + +#include "socket/socket.h" #include "avl/avl.h" -#include "inlined.h" + #include "line_splitter/line_splitter.h" -#include "clocks/clocks.h" +#include "c_rhash/c_rhash.h" +#include "ringbuffer/ringbuffer.h" +#include "circular_buffer/circular_buffer.h" +#include "buffered_reader/buffered_reader.h" #include "datetime/iso8601.h" #include "datetime/rfc3339.h" #include "datetime/rfc7231.h" -#include "completion/completion.h" -#include "log/log.h" +#include "sanitizers/sanitizers.h" + +#include "config/dyncfg.h" +#include "config/appconfig.h" #include "spawn_server/spawn_server.h" #include "spawn_server/spawn_popen.h" -#include "simple_pattern/simple_pattern.h" -#ifdef ENABLE_HTTPS -# include "socket/security.h" -#endif -#include "socket/socket.h" -#include "config/appconfig.h" -#include "log/journal.h" -#include "buffered_reader/buffered_reader.h" #include "procfile/procfile.h" -#include "string/string.h" #include "dictionary/dictionary.h" #include "dictionary/thread-cache.h" + +#include "log/systemd-journal-helpers.h" + #if defined(HAVE_LIBBPF) && !defined(__cplusplus) #include "ebpf/ebpf.h" #endif @@ -510,11 +176,6 @@ extern char *netdata_configured_host_prefix; #include "functions_evloop/functions_evloop.h" #include "query_progress/progress.h" -// BEWARE: this exists in alarm-notify.sh -#define DEFAULT_CLOUD_BASE_URL "https://app.netdata.cloud" - -#define RRD_STORAGE_TIERS 5 - static inline size_t struct_natural_alignment(size_t size) __attribute__((const)); #define STRUCT_NATURAL_ALIGNMENT (sizeof(uintptr_t) * 2) @@ -653,12 +314,14 @@ void timing_action(TIMING_ACTION action, TIMING_STEP step); int hash256_string(const unsigned char *string, size_t size, char *hash); extern bool unittest_running; -#define API_RELATIVE_TIME_MAX (3 * 365 * 86400) bool rrdr_relative_window_to_absolute(time_t *after, time_t *before, time_t now); bool rrdr_relative_window_to_absolute_query(time_t *after, time_t *before, time_t *now_ptr, bool unittest); -int netdata_base64_decode(const char *encoded, char *decoded, size_t decoded_size); +int netdata_base64_decode(unsigned char *out, const unsigned char *in, int in_len); +int netdata_base64_encode(unsigned char *encoded, const unsigned char *input, size_t input_size); + +// -------------------------------------------------------------------------------------------------------------------- static inline void freez_charp(char **p) { freez(*p); diff --git a/src/libnetdata/line_splitter/README.md b/src/libnetdata/line_splitter/README.md index b391a492c..1c8263421 100644 --- a/src/libnetdata/line_splitter/README.md +++ b/src/libnetdata/line_splitter/README.md @@ -1,12 +1,3 @@ - - # Log The netdata log library supports debug, info, error and fatal error logging. diff --git a/src/libnetdata/line_splitter/line_splitter.c b/src/libnetdata/line_splitter/line_splitter.c index 6726d9096..aa56e9b4e 100644 --- a/src/libnetdata/line_splitter/line_splitter.c +++ b/src/libnetdata/line_splitter/line_splitter.c @@ -21,12 +21,29 @@ bool line_splitter_reconstruct_line(BUFFER *wb, void *ptr) { return added > 0; } -inline int pluginsd_isspace(char c) { +inline int isspace_whitespace(char c) { switch(c) { case ' ': case '\t': case '\r': case '\n': + case '\f': + case '\v': + return 1; + + default: + return 0; + } +} + +inline int isspace_pluginsd(char c) { + switch(c) { + case ' ': + case '\t': + case '\r': + case '\n': + case '\f': + case '\v': case '=': return 1; @@ -35,12 +52,14 @@ inline int pluginsd_isspace(char c) { } } -inline int config_isspace(char c) { +inline int isspace_config(char c) { switch (c) { case ' ': case '\t': case '\r': case '\n': + case '\f': + case '\v': case ',': return 1; @@ -49,20 +68,21 @@ inline int config_isspace(char c) { } } -inline int group_by_label_isspace(char c) { +inline int isspace_group_by_label(char c) { if(c == ',' || c == '|') return 1; return 0; } -inline int dyncfg_id_isspace(char c) { +inline int isspace_dyncfg_id(char c) { if(c == ':') return 1; return 0; } +bool isspace_map_whitespace[256] = {}; bool isspace_map_pluginsd[256] = {}; bool isspace_map_config[256] = {}; bool isspace_map_group_by_label[256] = {}; @@ -70,9 +90,10 @@ bool isspace_dyncfg_id_map[256] = {}; __attribute__((constructor)) void initialize_is_space_arrays(void) { for(int c = 0; c < 256 ; c++) { - isspace_map_pluginsd[c] = pluginsd_isspace((char) c); - isspace_map_config[c] = config_isspace((char) c); - isspace_map_group_by_label[c] = group_by_label_isspace((char) c); - isspace_dyncfg_id_map[c] = dyncfg_id_isspace((char)c); + isspace_map_whitespace[c] = isspace_whitespace((char) c); + isspace_map_pluginsd[c] = isspace_pluginsd((char) c); + isspace_map_config[c] = isspace_config((char) c); + isspace_map_group_by_label[c] = isspace_group_by_label((char) c); + isspace_dyncfg_id_map[c] = isspace_dyncfg_id((char) c); } } diff --git a/src/libnetdata/line_splitter/line_splitter.h b/src/libnetdata/line_splitter/line_splitter.h index 968930410..157d91786 100644 --- a/src/libnetdata/line_splitter/line_splitter.h +++ b/src/libnetdata/line_splitter/line_splitter.h @@ -19,11 +19,13 @@ static inline void line_splitter_reset(struct line_splitter *line) { line->num_words = 0; } -int pluginsd_isspace(char c); -int config_isspace(char c); -int group_by_label_isspace(char c); -int dyncfg_id_isspace(char c); +int isspace_pluginsd(char c); +int isspace_config(char c); +int isspace_group_by_label(char c); +int isspace_dyncfg_id(char c); +int isspace_whitespace(char c); +extern bool isspace_map_whitespace[256]; extern bool isspace_map_pluginsd[256]; extern bool isspace_map_config[256]; extern bool isspace_map_group_by_label[256]; @@ -55,6 +57,9 @@ static inline size_t quoted_strings_splitter(char *str, char **words, size_t max while (likely(*s)) { // if it is an escape if (unlikely(*s == '\\' && s[1])) { + // IMPORTANT: support for escaping is incomplete! + // The backslash character needs to be removed + // from the parsed string. s += 2; continue; } @@ -103,6 +108,9 @@ static inline size_t quoted_strings_splitter(char *str, char **words, size_t max return i; } +#define quoted_strings_splitter_whitespace(str, words, max_words) \ + quoted_strings_splitter(str, words, max_words, isspace_map_whitespace) + #define quoted_strings_splitter_query_group_by_label(str, words, max_words) \ quoted_strings_splitter(str, words, max_words, isspace_map_group_by_label) diff --git a/src/libnetdata/linked-lists.h b/src/libnetdata/linked-lists.h deleted file mode 100644 index 033d11226..000000000 --- a/src/libnetdata/linked-lists.h +++ /dev/null @@ -1,133 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_LINKED_LISTS_H -#define NETDATA_LINKED_LISTS_H - -// --------------------------------------------------------------------------------------------- -// double linked list management -// inspired by https://github.com/troydhanson/uthash/blob/master/src/utlist.h - -#define DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(head, item, prev, next) \ - do { \ - (item)->next = (head); \ - \ - if(likely(head)) { \ - (item)->prev = (head)->prev; \ - (head)->prev = (item); \ - } \ - else \ - (item)->prev = (item); \ - \ - (head) = (item); \ - } while (0) - -#define DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(head, item, prev, next) \ - do { \ - \ - (item)->next = NULL; \ - \ - if(likely(head)) { \ - (item)->prev = (head)->prev; \ - (head)->prev->next = (item); \ - (head)->prev = (item); \ - } \ - else { \ - (item)->prev = (item); \ - (head) = (item); \ - } \ - \ - } while (0) - -#define DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(head, item, prev, next) \ - do { \ - fatal_assert((head) != NULL); \ - fatal_assert((item)->prev != NULL); \ - \ - if((item)->prev == (item)) \ - /* it is the only item in the list */ \ - (head) = NULL; \ - \ - else if((item) == (head)) { \ - /* it is the first item */ \ - fatal_assert((item)->next != NULL); \ - (item)->next->prev = (item)->prev; \ - (head) = (item)->next; \ - } \ - else { \ - /* it is any other item */ \ - (item)->prev->next = (item)->next; \ - \ - if ((item)->next) \ - (item)->next->prev = (item)->prev; \ - else \ - (head)->prev = (item)->prev; \ - } \ - \ - (item)->next = NULL; \ - (item)->prev = NULL; \ - } while (0) - -#define DOUBLE_LINKED_LIST_INSERT_ITEM_BEFORE_UNSAFE(head, existing, item, prev, next) \ - do { \ - if (existing) { \ - fatal_assert((head) != NULL); \ - fatal_assert((item) != NULL); \ - \ - (item)->next = (existing); \ - (item)->prev = (existing)->prev; \ - (existing)->prev = (item); \ - \ - if ((head) == (existing)) \ - (head) = (item); \ - else \ - (item)->prev->next = (item); \ - \ - } \ - else \ - DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(head, item, prev, next); \ - \ - } while (0) - -#define DOUBLE_LINKED_LIST_INSERT_ITEM_AFTER_UNSAFE(head, existing, item, prev, next) \ - do { \ - if (existing) { \ - fatal_assert((head) != NULL); \ - fatal_assert((item) != NULL); \ - \ - (item)->next = (existing)->next; \ - (item)->prev = (existing); \ - (existing)->next = (item); \ - \ - if ((item)->next) \ - (item)->next->prev = (item); \ - else \ - (head)->prev = (item); \ - } \ - else \ - DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(head, item, prev, next); \ - \ - } while (0) - -#define DOUBLE_LINKED_LIST_APPEND_LIST_UNSAFE(head, head2, prev, next) \ - do { \ - if (head2) { \ - if (head) { \ - __typeof(head2) _head2_last_item = (head2)->prev; \ - \ - (head2)->prev = (head)->prev; \ - (head)->prev->next = (head2); \ - \ - (head)->prev = _head2_last_item; \ - } \ - else \ - (head) = (head2); \ - } \ - } while (0) - -#define DOUBLE_LINKED_LIST_FOREACH_FORWARD(head, var, prev, next) \ - for ((var) = (head); (var) ; (var) = (var)->next) - -#define DOUBLE_LINKED_LIST_FOREACH_BACKWARD(head, var, prev, next) \ - for ((var) = (head) ? (head)->prev : NULL ; (var) ; (var) = ((var) == (head)) ? NULL : (var)->prev) - -#endif //NETDATA_LINKED_LISTS_H diff --git a/src/libnetdata/linked_lists/linked_lists.h b/src/libnetdata/linked_lists/linked_lists.h new file mode 100644 index 000000000..033d11226 --- /dev/null +++ b/src/libnetdata/linked_lists/linked_lists.h @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_LINKED_LISTS_H +#define NETDATA_LINKED_LISTS_H + +// --------------------------------------------------------------------------------------------- +// double linked list management +// inspired by https://github.com/troydhanson/uthash/blob/master/src/utlist.h + +#define DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(head, item, prev, next) \ + do { \ + (item)->next = (head); \ + \ + if(likely(head)) { \ + (item)->prev = (head)->prev; \ + (head)->prev = (item); \ + } \ + else \ + (item)->prev = (item); \ + \ + (head) = (item); \ + } while (0) + +#define DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(head, item, prev, next) \ + do { \ + \ + (item)->next = NULL; \ + \ + if(likely(head)) { \ + (item)->prev = (head)->prev; \ + (head)->prev->next = (item); \ + (head)->prev = (item); \ + } \ + else { \ + (item)->prev = (item); \ + (head) = (item); \ + } \ + \ + } while (0) + +#define DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(head, item, prev, next) \ + do { \ + fatal_assert((head) != NULL); \ + fatal_assert((item)->prev != NULL); \ + \ + if((item)->prev == (item)) \ + /* it is the only item in the list */ \ + (head) = NULL; \ + \ + else if((item) == (head)) { \ + /* it is the first item */ \ + fatal_assert((item)->next != NULL); \ + (item)->next->prev = (item)->prev; \ + (head) = (item)->next; \ + } \ + else { \ + /* it is any other item */ \ + (item)->prev->next = (item)->next; \ + \ + if ((item)->next) \ + (item)->next->prev = (item)->prev; \ + else \ + (head)->prev = (item)->prev; \ + } \ + \ + (item)->next = NULL; \ + (item)->prev = NULL; \ + } while (0) + +#define DOUBLE_LINKED_LIST_INSERT_ITEM_BEFORE_UNSAFE(head, existing, item, prev, next) \ + do { \ + if (existing) { \ + fatal_assert((head) != NULL); \ + fatal_assert((item) != NULL); \ + \ + (item)->next = (existing); \ + (item)->prev = (existing)->prev; \ + (existing)->prev = (item); \ + \ + if ((head) == (existing)) \ + (head) = (item); \ + else \ + (item)->prev->next = (item); \ + \ + } \ + else \ + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(head, item, prev, next); \ + \ + } while (0) + +#define DOUBLE_LINKED_LIST_INSERT_ITEM_AFTER_UNSAFE(head, existing, item, prev, next) \ + do { \ + if (existing) { \ + fatal_assert((head) != NULL); \ + fatal_assert((item) != NULL); \ + \ + (item)->next = (existing)->next; \ + (item)->prev = (existing); \ + (existing)->next = (item); \ + \ + if ((item)->next) \ + (item)->next->prev = (item); \ + else \ + (head)->prev = (item); \ + } \ + else \ + DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(head, item, prev, next); \ + \ + } while (0) + +#define DOUBLE_LINKED_LIST_APPEND_LIST_UNSAFE(head, head2, prev, next) \ + do { \ + if (head2) { \ + if (head) { \ + __typeof(head2) _head2_last_item = (head2)->prev; \ + \ + (head2)->prev = (head)->prev; \ + (head)->prev->next = (head2); \ + \ + (head)->prev = _head2_last_item; \ + } \ + else \ + (head) = (head2); \ + } \ + } while (0) + +#define DOUBLE_LINKED_LIST_FOREACH_FORWARD(head, var, prev, next) \ + for ((var) = (head); (var) ; (var) = (var)->next) + +#define DOUBLE_LINKED_LIST_FOREACH_BACKWARD(head, var, prev, next) \ + for ((var) = (head) ? (head)->prev : NULL ; (var) ; (var) = ((var) == (head)) ? NULL : (var)->prev) + +#endif //NETDATA_LINKED_LISTS_H diff --git a/src/libnetdata/local-sockets/local-sockets.h b/src/libnetdata/local-sockets/local-sockets.h new file mode 100644 index 000000000..06ac08767 --- /dev/null +++ b/src/libnetdata/local-sockets/local-sockets.h @@ -0,0 +1,1821 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_LOCAL_SOCKETS_H +#define NETDATA_LOCAL_SOCKETS_H + +#include "libnetdata/libnetdata.h" + +#ifndef _countof +#define _countof(x) (sizeof(x) / sizeof(*(x))) +#endif + +#define LOCAL_SOCKETS_USE_SETNS +#define USE_LIBMNL_AFTER_SETNS + +#if defined(HAVE_LIBMNL) +#include +#include +#include +#include +#include +#include +#endif + +#define UID_UNSET (uid_t)(UINT32_MAX) + +// -------------------------------------------------------------------------------------------------------------------- +// hashtable for keeping the namespaces +// key and value is the namespace inode + +#define SIMPLE_HASHTABLE_VALUE_TYPE uint64_t +#define SIMPLE_HASHTABLE_NAME _NET_NS +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +// -------------------------------------------------------------------------------------------------------------------- +// hashtable for keeping the sockets of PIDs +// key is the inode + +struct pid_socket; +#define SIMPLE_HASHTABLE_VALUE_TYPE struct pid_socket +#define SIMPLE_HASHTABLE_NAME _PID_SOCKET +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +// -------------------------------------------------------------------------------------------------------------------- +// hashtable for keeping all the sockets +// key is the inode + +struct local_socket; +#define SIMPLE_HASHTABLE_VALUE_TYPE struct local_socket +#define SIMPLE_HASHTABLE_NAME _LOCAL_SOCKET +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +// -------------------------------------------------------------------------------------------------------------------- +// hashtable for keeping all local IPs +// key is XXH3_64bits hash of the IP + +union ipv46; +#define SIMPLE_HASHTABLE_VALUE_TYPE union ipv46 +#define SIMPLE_HASHTABLE_NAME _LOCAL_IP +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +// -------------------------------------------------------------------------------------------------------------------- +// hashtable for keeping all listening ports +// key is XXH3_64bits hash of the family, protocol, port number, namespace + +struct local_port; +#define SIMPLE_HASHTABLE_VALUE_TYPE struct local_port +#define SIMPLE_HASHTABLE_NAME _LISTENING_PORT +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +// -------------------------------------------------------------------------------------------------------------------- + +struct local_socket_state; +typedef void (*local_sockets_cb_t)(struct local_socket_state *state, const struct local_socket *n, void *data); + +struct local_sockets_config { + bool listening; + bool inbound; + bool outbound; + bool local; + bool tcp4; + bool tcp6; + bool udp4; + bool udp6; + bool pid; + bool cmdline; + bool comm; + bool uid; + bool namespaces; + bool tcp_info; + bool no_mnl; + bool procfile; + bool report; + + size_t max_errors; + size_t max_concurrent_namespaces; + + local_sockets_cb_t cb; + void *data; + + const char *host_prefix; +}; + +struct local_sockets_state { + uint32_t nl_seq; + uint64_t net_ns_inode; + pid_t net_ns_pid; +}; + +struct timing_work { + usec_t start_ut; + usec_t end_ut; + const char *name; +}; + +struct local_sockets_ns_req { + struct local_sockets_config config; + struct local_sockets_state ns_state; +}; + +typedef struct local_socket_state { + struct local_sockets_config config; + struct local_sockets_state ns_state; + + struct { + size_t mnl_sends; + size_t tcp_info_received; + size_t pid_fds_processed; + size_t pid_fds_opendir_failed; + size_t pid_fds_readlink_failed; + size_t pid_fds_parse_failed; + size_t errors_encountered; + + size_t sockets_added; + + size_t namespaces_found; + size_t namespaces_absent; + size_t namespaces_invalid; +#if defined(LOCAL_SOCKETS_USE_SETNS) + size_t namespaces_forks_attempted; + size_t namespaces_forks_failed; + size_t namespaces_forks_unresponsive; + size_t namespaces_sockets_new; + size_t namespaces_sockets_existing; +#endif + + struct procfile_stats ff; + } stats; + + size_t timings_idx; + struct timing_work timings[30]; + +#if defined(LOCAL_SOCKETS_USE_SETNS) + bool spawn_server_is_mine; + SPAWN_SERVER *spawn_server; +#endif + +#if defined(HAVE_LIBMNL) + uint16_t tmp_protocol; +#endif + + procfile *ff; + + ARAL *local_socket_aral; + ARAL *pid_socket_aral; + SPINLOCK spinlock; // for namespaces + + uint64_t proc_self_net_ns_inode; + + SIMPLE_HASHTABLE_NET_NS ns_hashtable; + SIMPLE_HASHTABLE_PID_SOCKET pid_sockets_hashtable; + SIMPLE_HASHTABLE_LOCAL_SOCKET sockets_hashtable; + SIMPLE_HASHTABLE_LOCAL_IP local_ips_hashtable; + SIMPLE_HASHTABLE_LISTENING_PORT listening_ports_hashtable; +} LS_STATE; + +// -------------------------------------------------------------------------------------------------------------------- + +typedef enum __attribute__((packed)) { + SOCKET_DIRECTION_NONE = 0, + SOCKET_DIRECTION_LISTEN = (1 << 0), // a listening socket + SOCKET_DIRECTION_INBOUND = (1 << 1), // an inbound socket connecting a remote system to a local listening socket + SOCKET_DIRECTION_OUTBOUND = (1 << 2), // a socket initiated by this system, connecting to another system + SOCKET_DIRECTION_LOCAL_INBOUND = (1 << 3), // the socket connecting 2 localhost applications + SOCKET_DIRECTION_LOCAL_OUTBOUND = (1 << 4), // the socket connecting 2 localhost applications +} SOCKET_DIRECTION; + +#ifndef TASK_COMM_LEN +#define TASK_COMM_LEN 16 +#endif + +struct pid_socket { + uint64_t inode; + pid_t pid; + uid_t uid; + uint64_t net_ns_inode; + char *cmdline; + char comm[TASK_COMM_LEN]; +}; + +struct local_port { + uint16_t protocol; + uint16_t family; + uint16_t port; + uint64_t net_ns_inode; +}; + +union ipv46 { + uint32_t ipv4; + struct in6_addr ipv6; +}; + +struct socket_endpoint { + uint16_t protocol; + uint16_t family; + uint16_t port; + union ipv46 ip; +}; + +static inline void ipv6_to_in6_addr(const char *ipv6_str, struct in6_addr *d) { + char buf[9]; + + for (size_t k = 0; k < 4; ++k) { + memcpy(buf, ipv6_str + (k * 8), 8); + buf[sizeof(buf) - 1] = '\0'; + d->s6_addr32[k] = str2uint32_hex(buf, NULL); + } +} + +typedef struct local_socket { + uint64_t inode; + uint64_t net_ns_inode; + + int state; + struct socket_endpoint local; + struct socket_endpoint remote; + pid_t pid; + + SOCKET_DIRECTION direction; + + uint8_t timer; + uint8_t retransmits; // the # of packets currently queued for retransmission (not yet acknowledged) + uint32_t expires; + uint32_t rqueue; + uint32_t wqueue; + uid_t uid; + + struct { + bool checked; + bool ipv46; + } ipv6ony; + + union { + struct tcp_info tcp; + } info; + + char comm[TASK_COMM_LEN]; + STRING *cmdline; + + struct local_port local_port_key; + + XXH64_hash_t local_ip_hash; + XXH64_hash_t remote_ip_hash; + XXH64_hash_t local_port_hash; + +#ifdef LOCAL_SOCKETS_EXTENDED_MEMBERS + LOCAL_SOCKETS_EXTENDED_MEMBERS +#endif +} LOCAL_SOCKET; + +#if defined(LOCAL_SOCKETS_USE_SETNS) +static inline int local_sockets_spawn_server_callback(SPAWN_REQUEST *request); +#endif + +// -------------------------------------------------------------------------------------------------------------------- + +static inline void local_sockets_log(LS_STATE *ls, const char *format, ...) PRINTFLIKE(2, 3); +static inline void local_sockets_log(LS_STATE *ls, const char *format, ...) { + if(ls && ++ls->stats.errors_encountered == ls->config.max_errors) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "LOCAL-SOCKETS: max number of logs reached. Not logging anymore"); + return; + } + + if(ls && ls->stats.errors_encountered > ls->config.max_errors) + return; + + char buf[16384]; + va_list args; + va_start(args, format); + vsnprintf(buf, sizeof(buf), format, args); + va_end(args); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, "LOCAL-SOCKETS: %s", buf); +} + +// -------------------------------------------------------------------------------------------------------------------- + +static bool local_sockets_is_ipv4_mapped_ipv6_address(const struct in6_addr *addr) { + // An IPv4-mapped IPv6 address starts with 80 bits of zeros followed by 16 bits of ones + static const unsigned char ipv4_mapped_prefix[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF }; + return memcmp(addr->s6_addr, ipv4_mapped_prefix, 12) == 0; +} + +static bool local_sockets_is_loopback_address(const struct socket_endpoint *se) { + if (se->family == AF_INET) { + // For IPv4, loopback addresses are in the 127.0.0.0/8 range + return (ntohl(se->ip.ipv4) >> 24) == 127; // Check if the first byte is 127 + } else if (se->family == AF_INET6) { + // Check if the address is an IPv4-mapped IPv6 address + if (local_sockets_is_ipv4_mapped_ipv6_address(&se->ip.ipv6)) { + // Extract the last 32 bits (IPv4 address) and check if it's in the 127.0.0.0/8 range + uint8_t *ip6 = (uint8_t *)&se->ip.ipv6; + const uint32_t ipv4_addr = *((const uint32_t *)(ip6 + 12)); + return (ntohl(ipv4_addr) >> 24) == 127; + } + + // For IPv6, loopback address is ::1 + return memcmp(&se->ip.ipv6, &in6addr_loopback, sizeof(se->ip.ipv6)) == 0; + } + + return false; +} + +static inline bool local_sockets_is_ipv4_reserved_address(uint32_t ip) { + // Check for the reserved address ranges + ip = ntohl(ip); + return ( + (ip >> 24 == 10) || // Private IP range (A class) + (ip >> 20 == (172 << 4) + 1) || // Private IP range (B class) + (ip >> 16 == (192 << 8) + 168) || // Private IP range (C class) + (ip >> 24 == 127) || // Loopback address (127.0.0.0) + (ip >> 24 == 0) || // Reserved (0.0.0.0) + (ip >> 24 == 169 && (ip >> 16) == 254) || // Link-local address (169.254.0.0) + (ip >> 16 == (192 << 8) + 0) // Test-Net (192.0.0.0) + ); +} + +static inline bool local_sockets_is_private_address(const struct socket_endpoint *se) { + if (se->family == AF_INET) { + return local_sockets_is_ipv4_reserved_address(se->ip.ipv4); + } + else if (se->family == AF_INET6) { + uint8_t *ip6 = (uint8_t *)&se->ip.ipv6; + + // Check if the address is an IPv4-mapped IPv6 address + if (local_sockets_is_ipv4_mapped_ipv6_address(&se->ip.ipv6)) { + // Extract the last 32 bits (IPv4 address) and check if it's in the 127.0.0.0/8 range + const uint32_t ipv4_addr = *((const uint32_t *)(ip6 + 12)); + return local_sockets_is_ipv4_reserved_address(ipv4_addr); + } + + // Check for link-local addresses (fe80::/10) + if ((ip6[0] == 0xFE) && ((ip6[1] & 0xC0) == 0x80)) + return true; + + // Check for Unique Local Addresses (ULA) (fc00::/7) + if ((ip6[0] & 0xFE) == 0xFC) + return true; + + // Check for multicast addresses (ff00::/8) + if (ip6[0] == 0xFF) + return true; + + // For IPv6, loopback address is :: or ::1 + return memcmp(&se->ip.ipv6, &in6addr_any, sizeof(se->ip.ipv6)) == 0 || + memcmp(&se->ip.ipv6, &in6addr_loopback, sizeof(se->ip.ipv6)) == 0; + } + + return false; +} + +static bool local_sockets_is_multicast_address(const struct socket_endpoint *se) { + if (se->family == AF_INET) { + // For IPv4, check if the address is 0.0.0.0 + uint32_t ip = htonl(se->ip.ipv4); + return (ip >= 0xE0000000 && ip <= 0xEFFFFFFF); // Multicast address range (224.0.0.0/4) + } + else if (se->family == AF_INET6) { + // For IPv6, check if the address is ff00::/8 + uint8_t *ip6 = (uint8_t *)&se->ip.ipv6; + return ip6[0] == 0xff; + } + + return false; +} + +static bool local_sockets_is_zero_address(const struct socket_endpoint *se) { + if (se->family == AF_INET) { + // For IPv4, check if the address is 0.0.0.0 + return se->ip.ipv4 == 0; + } + else if (se->family == AF_INET6) { + // For IPv6, check if the address is :: + return memcmp(&se->ip.ipv6, &in6addr_any, sizeof(se->ip.ipv6)) == 0; + } + + return false; +} + +static inline const char *local_sockets_address_space(const struct socket_endpoint *se) { + if(local_sockets_is_zero_address(se)) + return "zero"; + else if(local_sockets_is_loopback_address(se)) + return "loopback"; + else if(local_sockets_is_multicast_address(se)) + return "multicast"; + else if(local_sockets_is_private_address(se)) + return "private"; + else + return "public"; +} + +static inline void ipv6_address_to_txt(const struct in6_addr *in6_addr, char *dst) { + struct sockaddr_in6 sa = { 0 }; + + sa.sin6_family = AF_INET6; + sa.sin6_port = htons(0); + sa.sin6_addr = *in6_addr; + + // Convert to human-readable format + if (inet_ntop(AF_INET6, &(sa.sin6_addr), dst, INET6_ADDRSTRLEN) == NULL) + *dst = '\0'; +} + +static inline void ipv4_address_to_txt(uint32_t ip, char *dst) { + uint8_t octets[4]; + octets[0] = ip & 0xFF; + octets[1] = (ip >> 8) & 0xFF; + octets[2] = (ip >> 16) & 0xFF; + octets[3] = (ip >> 24) & 0xFF; + sprintf(dst, "%u.%u.%u.%u", octets[0], octets[1], octets[2], octets[3]); +} + +static inline bool is_local_socket_ipv46(const LOCAL_SOCKET *n) { + return n->local.family == AF_INET6 && + n->direction == SOCKET_DIRECTION_LISTEN && + local_sockets_is_zero_address(&n->local) && + n->ipv6ony.checked && + n->ipv6ony.ipv46; +} + +static inline const char *local_sockets_protocol_name(LOCAL_SOCKET *n) { + if(n->local.family == AF_INET) { + if(n->local.protocol == IPPROTO_TCP) + return "TCP"; + else if(n->local.protocol == IPPROTO_UDP) + return "UDP"; + else + return "UNKNOWN_IPV4"; + } + else if(is_local_socket_ipv46(n)) { + if (n->local.protocol == IPPROTO_TCP) + return "TCP46"; + else if(n->local.protocol == IPPROTO_UDP) + return "UDP46"; + else + return "UNKNOWN_IPV46"; + } + else if(n->local.family == AF_INET6) { + if (n->local.protocol == IPPROTO_TCP) + return "TCP6"; + else if(n->local.protocol == IPPROTO_UDP) + return "UDP6"; + else + return "UNKNOWN_IPV6"; + } + else + return "UNKNOWN"; +} + +static inline void local_listeners_print_socket(LS_STATE *ls __maybe_unused, const LOCAL_SOCKET *nn, void *data __maybe_unused) { + LOCAL_SOCKET *n = (LOCAL_SOCKET *)nn; + + char local_address[INET6_ADDRSTRLEN]; + char remote_address[INET6_ADDRSTRLEN]; + + if(n->local.family == AF_INET) { + ipv4_address_to_txt(n->local.ip.ipv4, local_address); + ipv4_address_to_txt(n->remote.ip.ipv4, remote_address); + } + else if(n->local.family == AF_INET6) { + ipv6_address_to_txt(&n->local.ip.ipv6, local_address); + ipv6_address_to_txt(&n->remote.ip.ipv6, remote_address); + } + + printf("%s, direction=%s%s%s%s%s pid=%d, state=0x%0x, ns=%"PRIu64", local=%s[:%u], remote=%s[:%u], uid=%u, inode=%"PRIu64", comm=%s\n", + local_sockets_protocol_name(n), + (n->direction & SOCKET_DIRECTION_LISTEN) ? "LISTEN," : "", + (n->direction & SOCKET_DIRECTION_INBOUND) ? "INBOUND," : "", + (n->direction & SOCKET_DIRECTION_OUTBOUND) ? "OUTBOUND," : "", + (n->direction & (SOCKET_DIRECTION_LOCAL_INBOUND|SOCKET_DIRECTION_LOCAL_OUTBOUND)) ? "LOCAL," : "", + (n->direction == 0) ? "NONE," : "", + n->pid, + (unsigned int)n->state, + n->net_ns_inode, + local_address, n->local.port, + remote_address, n->remote.port, + n->uid, + n->inode, + n->comm); +} + +// -------------------------------------------------------------------------------------------------------------------- + +static void local_sockets_foreach_local_socket_call_cb(LS_STATE *ls) { + for(SIMPLE_HASHTABLE_SLOT_LOCAL_SOCKET *sl = simple_hashtable_first_read_only_LOCAL_SOCKET(&ls->sockets_hashtable); + sl; + sl = simple_hashtable_next_read_only_LOCAL_SOCKET(&ls->sockets_hashtable, sl)) { + LOCAL_SOCKET *n = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(!n) continue; + + if((ls->config.listening && n->direction & SOCKET_DIRECTION_LISTEN) || + (ls->config.local && n->direction & (SOCKET_DIRECTION_LOCAL_INBOUND|SOCKET_DIRECTION_LOCAL_OUTBOUND)) || + (ls->config.inbound && n->direction & SOCKET_DIRECTION_INBOUND) || + (ls->config.outbound && n->direction & SOCKET_DIRECTION_OUTBOUND) + ) { + // we have to call the callback for this socket + if (ls->config.cb) + ls->config.cb(ls, n, ls->config.data); + } + } +} + +// -------------------------------------------------------------------------------------------------------------------- + +static inline void local_sockets_fix_cmdline(char* str) { + char *s = str; + + // map invalid characters to underscores + while(*s) { + if(*s == '|' || iscntrl(*s)) *s = '_'; + s++; + } +} + +// -------------------------------------------------------------------------------------------------------------------- + +static inline bool +local_sockets_read_proc_inode_link(LS_STATE *ls, const char *filename, uint64_t *inode, const char *type) { + char link_target[FILENAME_MAX + 1]; + + *inode = 0; + + ssize_t len = readlink(filename, link_target, sizeof(link_target) - 1); + if (len == -1) { + local_sockets_log(ls, "cannot read '%s' link '%s'", type, filename); + + ls->stats.pid_fds_readlink_failed++; + return false; + } + link_target[len] = '\0'; + + len = strlen(type); + if(strncmp(link_target, type, len) == 0 && link_target[len] == ':' && link_target[len + 1] == '[' && isdigit(link_target[len + 2])) { + *inode = strtoull(&link_target[len + 2], NULL, 10); + // ll_log(ls, "read link of type '%s' '%s' from '%s', inode = %"PRIu64, type, link_target, filename, *inode); + return true; + } + else { + // ll_log(ls, "cannot read '%s' link '%s' from '%s'", type, link_target, filename); + ls->stats.pid_fds_processed++; + return false; + } +} + +static inline bool local_sockets_is_path_a_pid(const char *s) { + if(!s || !*s) return false; + + while(*s) { + if(!isdigit(*s++)) + return false; + } + + return true; +} + +static inline bool local_sockets_find_all_sockets_in_proc(LS_STATE *ls, const char *proc_filename) { + DIR *proc_dir; + struct dirent *proc_entry; + char filename[FILENAME_MAX + 1]; + char comm[TASK_COMM_LEN]; + char cmdline[8192]; + const char *cmdline_trimmed; + uint64_t net_ns_inode; + + proc_dir = opendir(proc_filename); + if (proc_dir == NULL) { + local_sockets_log(ls, "cannot opendir() '%s'", proc_filename); + ls->stats.pid_fds_readlink_failed++; + return false; + } + + while ((proc_entry = readdir(proc_dir)) != NULL) { + if(proc_entry->d_type != DT_DIR) + continue; + + if(!strcmp(proc_entry->d_name, ".") || !strcmp(proc_entry->d_name, "..")) + continue; + + if(!local_sockets_is_path_a_pid(proc_entry->d_name)) + continue; + + // Build the path to the fd directory of the process + snprintfz(filename, FILENAME_MAX, "%s/%s/fd/", proc_filename, proc_entry->d_name); + DIR *fd_dir = opendir(filename); + if (fd_dir == NULL) { + local_sockets_log(ls, "cannot opendir() '%s'", filename); + ls->stats.pid_fds_opendir_failed++; + continue; + } + + comm[0] = '\0'; + cmdline[0] = '\0'; + cmdline_trimmed = NULL; + pid_t pid = (pid_t)strtoul(proc_entry->d_name, NULL, 10); + if(!pid) { + local_sockets_log(ls, "cannot parse pid of '%s'", proc_entry->d_name); + closedir(fd_dir); + continue; + } + net_ns_inode = 0; + uid_t uid = UID_UNSET; + + struct dirent *fd_entry; + while ((fd_entry = readdir(fd_dir)) != NULL) { + if(fd_entry->d_type != DT_LNK) + continue; + + snprintfz(filename, sizeof(filename), "%s/%s/fd/%s", proc_filename, proc_entry->d_name, fd_entry->d_name); + uint64_t inode = 0; + if(!local_sockets_read_proc_inode_link(ls, filename, &inode, "socket")) + continue; + + // fprintf(stderr, "%d: PID %d is using socket inode %"PRIu64"\n", gettid_uncached(), pid, inode); + XXH64_hash_t inode_hash = XXH3_64bits(&inode, sizeof(inode)); + SIMPLE_HASHTABLE_SLOT_PID_SOCKET *sl = simple_hashtable_get_slot_PID_SOCKET(&ls->pid_sockets_hashtable, inode_hash, &inode, true); + struct pid_socket *ps = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(!ps || (ps->pid == 1 && pid != 1)) { + if(uid == UID_UNSET && ls->config.uid) { + char status_buf[512]; + snprintfz(filename, sizeof(filename), "%s/%s/status", proc_filename, proc_entry->d_name); + if (read_txt_file(filename, status_buf, sizeof(status_buf))) + local_sockets_log(ls, "cannot open file: %s\n", filename); + else { + char *u = strstr(status_buf, "Uid:"); + if(u) { + u += 4; + while(isspace(*u)) u++; // skip spaces + while(*u >= '0' && *u <= '9') u++; // skip the first number (real uid) + while(isspace(*u)) u++; // skip spaces again + uid = strtol(u, NULL, 10); // parse the 2nd number (effective uid) + } + } + } + if(!comm[0] && ls->config.comm) { + snprintfz(filename, sizeof(filename), "%s/%s/comm", proc_filename, proc_entry->d_name); + if (read_txt_file(filename, comm, sizeof(comm))) + local_sockets_log(ls, "cannot open file: %s\n", filename); + else { + size_t clen = strlen(comm); + if(comm[clen - 1] == '\n') + comm[clen - 1] = '\0'; + } + } + if(!cmdline[0] && ls->config.cmdline) { + snprintfz(filename, sizeof(filename), "%s/%s/cmdline", proc_filename, proc_entry->d_name); + if (read_proc_cmdline(filename, cmdline, sizeof(cmdline))) + local_sockets_log(ls, "cannot open file: %s\n", filename); + else { + local_sockets_fix_cmdline(cmdline); + cmdline_trimmed = trim(cmdline); + } + } + if(!net_ns_inode && ls->config.namespaces) { + snprintfz(filename, sizeof(filename), "%s/%s/ns/net", proc_filename, proc_entry->d_name); + if(local_sockets_read_proc_inode_link(ls, filename, &net_ns_inode, "net")) { + XXH64_hash_t net_ns_inode_hash = XXH3_64bits(&net_ns_inode, sizeof(net_ns_inode)); + SIMPLE_HASHTABLE_SLOT_NET_NS *sl_ns = simple_hashtable_get_slot_NET_NS(&ls->ns_hashtable, net_ns_inode_hash, (uint64_t *)net_ns_inode, true); + simple_hashtable_set_slot_NET_NS(&ls->ns_hashtable, sl_ns, net_ns_inode, (uint64_t *)net_ns_inode); + } + } + + if(!ps) + ps = aral_callocz(ls->pid_socket_aral); + + ps->inode = inode; + ps->pid = pid; + ps->uid = uid; + ps->net_ns_inode = net_ns_inode; + strncpyz(ps->comm, comm, sizeof(ps->comm) - 1); + + if(ps->cmdline) + freez(ps->cmdline); + + ps->cmdline = cmdline_trimmed ? strdupz(cmdline_trimmed) : NULL; + simple_hashtable_set_slot_PID_SOCKET(&ls->pid_sockets_hashtable, sl, inode_hash, ps); + // fprintf(stderr, "%d: PID %d indexed for using socket inode %"PRIu64"\n", gettid_uncached(), pid, inode); + } + } + + closedir(fd_dir); + } + + closedir(proc_dir); + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- + +static inline void local_sockets_index_listening_port(LS_STATE *ls, LOCAL_SOCKET *n) { + if(n->direction & SOCKET_DIRECTION_LISTEN) { + // for the listening sockets, keep a hashtable with all the local ports + // so that we will be able to detect INBOUND sockets + + SIMPLE_HASHTABLE_SLOT_LISTENING_PORT *sl_port = + simple_hashtable_get_slot_LISTENING_PORT(&ls->listening_ports_hashtable, n->local_port_hash, &n->local_port_key, true); + + struct local_port *port = SIMPLE_HASHTABLE_SLOT_DATA(sl_port); + if(!port) + simple_hashtable_set_slot_LISTENING_PORT(&ls->listening_ports_hashtable, sl_port, n->local_port_hash, &n->local_port_key); + } +} + +static inline bool local_sockets_add_socket(LS_STATE *ls, LOCAL_SOCKET *tmp) { + if(!tmp->inode) return false; + + XXH64_hash_t inode_hash = XXH3_64bits(&tmp->inode, sizeof(tmp->inode)); + SIMPLE_HASHTABLE_SLOT_LOCAL_SOCKET *sl = simple_hashtable_get_slot_LOCAL_SOCKET(&ls->sockets_hashtable, inode_hash, &tmp->inode, true); + LOCAL_SOCKET *n = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(n) { + local_sockets_log(ls, "inode %" PRIu64" already exists in hashtable - ignoring duplicate", tmp->inode); + return false; + } + + ls->stats.sockets_added++; + + n = aral_mallocz(ls->local_socket_aral); + *n = *tmp; // copy all contents + + // fix the key + n->local_port_key.port = n->local.port; + n->local_port_key.family = n->local.family; + n->local_port_key.protocol = n->local.protocol; + n->local_port_key.net_ns_inode = ls->proc_self_net_ns_inode; + + n->local_ip_hash = XXH3_64bits(&n->local.ip, sizeof(n->local.ip)); + n->remote_ip_hash = XXH3_64bits(&n->remote.ip, sizeof(n->remote.ip)); + n->local_port_hash = XXH3_64bits(&n->local_port_key, sizeof(n->local_port_key)); + + // --- look up a pid for it ----------------------------------------------------------------------------------- + + SIMPLE_HASHTABLE_SLOT_PID_SOCKET *sl_pid = simple_hashtable_get_slot_PID_SOCKET(&ls->pid_sockets_hashtable, inode_hash, &n->inode, false); + struct pid_socket *ps = SIMPLE_HASHTABLE_SLOT_DATA(sl_pid); + if(ps) { + n->net_ns_inode = ps->net_ns_inode; + n->pid = ps->pid; + + if(ps->uid != UID_UNSET && n->uid == UID_UNSET) + n->uid = ps->uid; + + if(ps->cmdline) { + if(n->cmdline) string_freez(n->cmdline); + n->cmdline = string_strdupz(ps->cmdline); + } + + strncpyz(n->comm, ps->comm, sizeof(n->comm) - 1); + } +// else +// fprintf(stderr, "%d: No PID found for inode %"PRIu64"\n", gettid_uncached(), n->inode); + + // --- index it ----------------------------------------------------------------------------------------------- + + simple_hashtable_set_slot_LOCAL_SOCKET(&ls->sockets_hashtable, sl, inode_hash, n); + + if(!local_sockets_is_zero_address(&n->local)) { + // put all the local IPs into the local_ips hashtable + // so, we learn all local IPs the system has + + SIMPLE_HASHTABLE_SLOT_LOCAL_IP *sl_ip = + simple_hashtable_get_slot_LOCAL_IP(&ls->local_ips_hashtable, n->local_ip_hash, &n->local.ip, true); + + union ipv46 *ip = SIMPLE_HASHTABLE_SLOT_DATA(sl_ip); + if(!ip) + simple_hashtable_set_slot_LOCAL_IP(&ls->local_ips_hashtable, sl_ip, n->local_ip_hash, &n->local.ip); + } + + // --- 1st phase for direction detection ---------------------------------------------------------------------- + + if((n->local.protocol == IPPROTO_TCP && n->state == TCP_LISTEN) || + local_sockets_is_zero_address(&n->local) || + local_sockets_is_zero_address(&n->remote)) { + // the socket is either in a TCP LISTEN, or + // the remote address is zero + n->direction |= SOCKET_DIRECTION_LISTEN; + } + else { + // we can't say yet if it is inbound or outboud + // so, mark it as both inbound and outbound + n->direction |= SOCKET_DIRECTION_INBOUND | SOCKET_DIRECTION_OUTBOUND; + } + + // --- index it in LISTENING_PORT ----------------------------------------------------------------------------- + + local_sockets_index_listening_port(ls, n); + + return true; +} + +#if defined(HAVE_LIBMNL) + +static inline int local_sockets_libmnl_cb_data(const struct nlmsghdr *nlh, void *data) { + LS_STATE *ls = data; + + struct inet_diag_msg *diag_msg = mnl_nlmsg_get_payload(nlh); + + LOCAL_SOCKET n = { + .inode = diag_msg->idiag_inode, + .direction = SOCKET_DIRECTION_NONE, + .state = diag_msg->idiag_state, + .ipv6ony = { + .checked = false, + .ipv46 = false, + }, + .local = { + .protocol = ls->tmp_protocol, + .family = diag_msg->idiag_family, + .port = ntohs(diag_msg->id.idiag_sport), + }, + .remote = { + .protocol = ls->tmp_protocol, + .family = diag_msg->idiag_family, + .port = ntohs(diag_msg->id.idiag_dport), + }, + .timer = diag_msg->idiag_timer, + .retransmits = diag_msg->idiag_retrans, + .expires = diag_msg->idiag_expires, + .rqueue = diag_msg->idiag_rqueue, + .wqueue = diag_msg->idiag_wqueue, + .uid = diag_msg->idiag_uid, + }; + + if (diag_msg->idiag_family == AF_INET) { + memcpy(&n.local.ip.ipv4, diag_msg->id.idiag_src, sizeof(n.local.ip.ipv4)); + memcpy(&n.remote.ip.ipv4, diag_msg->id.idiag_dst, sizeof(n.remote.ip.ipv4)); + } + else if (diag_msg->idiag_family == AF_INET6) { + memcpy(&n.local.ip.ipv6, diag_msg->id.idiag_src, sizeof(n.local.ip.ipv6)); + memcpy(&n.remote.ip.ipv6, diag_msg->id.idiag_dst, sizeof(n.remote.ip.ipv6)); + } + + struct rtattr *attr = (struct rtattr *)(diag_msg + 1); + int rtattrlen = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*diag_msg)); + for (; !n.ipv6ony.checked && RTA_OK(attr, rtattrlen); attr = RTA_NEXT(attr, rtattrlen)) { + switch (attr->rta_type) { + case INET_DIAG_INFO: { + if(ls->tmp_protocol == IPPROTO_TCP) { + struct tcp_info *info = (struct tcp_info *)RTA_DATA(attr); + n.info.tcp = *info; + ls->stats.tcp_info_received++; + } + } + break; + + case INET_DIAG_SKV6ONLY: { + n.ipv6ony.checked = true; + int ipv6only = *(int *)RTA_DATA(attr); + n.ipv6ony.ipv46 = !ipv6only; + } + break; + + default: + break; + } + } + + local_sockets_add_socket(ls, &n); + + return MNL_CB_OK; +} + +static inline bool local_sockets_libmnl_get_sockets(LS_STATE *ls, uint16_t family, uint16_t protocol) { + ls->tmp_protocol = protocol; + + struct mnl_socket *nl = mnl_socket_open(NETLINK_INET_DIAG); + if (nl == NULL) { + local_sockets_log(ls, "mnl_socket_open() failed"); + return false; + } + + if (mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID) < 0) { + local_sockets_log(ls, "mnl_socket_bind() failed"); + mnl_socket_close(nl); + return false; + } + + char buf[MNL_SOCKET_BUFFER_SIZE]; + struct nlmsghdr *nlh = mnl_nlmsg_put_header(buf); + nlh->nlmsg_type = SOCK_DIAG_BY_FAMILY; + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP; + nlh->nlmsg_seq = ls->ns_state.nl_seq ? ls->ns_state.nl_seq++ : time(NULL); + + struct inet_diag_req_v2 req = { + .sdiag_family = family, + .sdiag_protocol = protocol, + .idiag_states = ~0, // Request all socket states + .idiag_ext = 0, + }; + + if(family == AF_INET6) + req.idiag_ext |= 1 << (INET_DIAG_SKV6ONLY - 1); + + if(protocol == IPPROTO_TCP && ls->config.tcp_info) + req.idiag_ext |= 1 << (INET_DIAG_INFO - 1); + + mnl_nlmsg_put_extra_header(nlh, sizeof(req)); + memcpy(mnl_nlmsg_get_payload(nlh), &req, sizeof(req)); + + ls->stats.mnl_sends++; + if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) { + local_sockets_log(ls, "mnl_socket_sendto() failed"); + mnl_socket_close(nl); + return false; + } + + bool rc = true; + size_t received = 0; + ssize_t ret; + while ((ret = mnl_socket_recvfrom(nl, buf, sizeof(buf))) > 0) { + ret = mnl_cb_run(buf, ret, 0, 0, local_sockets_libmnl_cb_data, ls); + if (ret == MNL_CB_ERROR) { + local_sockets_log(ls, "mnl_cb_run() failed"); + rc = false; + break; + } + else if (ret <= MNL_CB_STOP) + break; + + received++; + } + mnl_socket_close(nl); + + if (ret == -1) { + local_sockets_log(ls, "mnl_socket_recvfrom() failed"); + rc = false; + } + + return rc; +} +#endif // HAVE_LIBMNL + +static inline bool local_sockets_process_proc_line(LS_STATE *ls, const char *filename, uint16_t family, uint16_t protocol, size_t line, char **words, size_t num_words) { + // char *sl_txt = get_word(words, num_words, 0); + char *local_ip_txt = get_word(words, num_words, 1); + char *local_port_txt = get_word(words, num_words, 2); + char *remote_ip_txt = get_word(words, num_words, 3); + char *remote_port_txt = get_word(words, num_words, 4); + char *state_txt = get_word(words, num_words, 5); + char *tx_queue_txt = get_word(words, num_words, 6); + char *rx_queue_txt = get_word(words, num_words, 7); + char *tr_txt = get_word(words, num_words, 8); + char *tm_when_txt = get_word(words, num_words, 9); + char *retrans_txt = get_word(words, num_words, 10); + char *uid_txt = get_word(words, num_words, 11); + // char *timeout_txt = get_word(words, num_words, 12); + char *inode_txt = get_word(words, num_words, 13); + + if(!local_ip_txt || !local_port_txt || !remote_ip_txt || !remote_port_txt || !state_txt || + !tx_queue_txt || !rx_queue_txt || !tr_txt || !tm_when_txt || !retrans_txt || !uid_txt || !inode_txt) { + local_sockets_log(ls, "cannot parse ipv4 line No %zu of filename '%s'", line, filename); + return false; + } + + LOCAL_SOCKET n = { + .direction = SOCKET_DIRECTION_NONE, + .ipv6ony = { + .checked = false, + .ipv46 = false, + }, + .local = { + .family = family, + .protocol = protocol, + }, + .remote = { + .family = family, + .protocol = protocol, + }, + .uid = UID_UNSET, + }; + + n.local.port = str2uint32_hex(local_port_txt, NULL); + n.remote.port = str2uint32_hex(remote_port_txt, NULL); + n.state = str2uint32_hex(state_txt, NULL); + n.wqueue = str2uint32_hex(tx_queue_txt, NULL); + n.rqueue = str2uint32_hex(rx_queue_txt, NULL); + n.timer = str2uint32_hex(tr_txt, NULL); + n.expires = str2uint32_hex(tm_when_txt, NULL); + n.retransmits = str2uint32_hex(retrans_txt, NULL); + n.uid = str2uint32_t(uid_txt, NULL); + n.inode = str2uint64_t(inode_txt, NULL); + + if(family == AF_INET) { + n.local.ip.ipv4 = str2uint32_hex(local_ip_txt, NULL); + n.remote.ip.ipv4 = str2uint32_hex(remote_ip_txt, NULL); + } + else if(family == AF_INET6) { + ipv6_to_in6_addr(local_ip_txt, &n.local.ip.ipv6); + ipv6_to_in6_addr(remote_ip_txt, &n.remote.ip.ipv6); + } + + local_sockets_add_socket(ls, &n); + return true; +} + +static inline bool local_sockets_read_proc_net_x_getline(LS_STATE *ls, const char *filename, uint16_t family, uint16_t protocol) { + static bool is_space[256] = { + [':'] = true, + [' '] = true, + }; + + if(family != AF_INET && family != AF_INET6) + return false; + + FILE *fp = fopen(filename, "r"); + if (fp == NULL) + return false; + + char *line = malloc(1024); // no mallocz() here because getline() may resize + if(!line) { + fclose(fp); + return false; + } + + size_t len = 1024; + ssize_t read; + + ssize_t min_line_length = (family == AF_INET) ? 105 : 155; + size_t counter = 0; + + // Read line by line + while ((read = getline(&line, &len, fp)) != -1) { + if(counter++ == 0) continue; // skip the first line + + if(read < min_line_length) { + local_sockets_log(ls, "too small line No %zu of filename '%s': %s", counter, filename, line); + continue; + } + + char *words[32]; + size_t num_words = quoted_strings_splitter(line, words, 32, is_space); + local_sockets_process_proc_line(ls, filename, family, protocol, counter, words, num_words); + } + + fclose(fp); + + if (line) + free(line); // no freez() here because getline() may resize + + return true; +} + +#define INITIALLY_EXPECTED_PROC_NET_LINES 16384 +#define PROC_NET_BYTES_PER_LINE 155 // 105 for IPv4, 155 for IPv6 +#define PROC_NET_WORDS_PER_LINE 22 +#define INITIALLY_EXPECTED_PROC_NET_WORDS (INITIALLY_EXPECTED_PROC_NET_LINES * PROC_NET_WORDS_PER_LINE) +#define INITIALLY_EXPECTED_PROC_NET_BYTES (INITIALLY_EXPECTED_PROC_NET_LINES * PROC_NET_BYTES_PER_LINE) + +static inline bool local_sockets_read_proc_net_x_procfile(LS_STATE *ls, const char *filename, uint16_t family, uint16_t protocol) { + if(family != AF_INET && family != AF_INET6) + return false; + + procfile_set_adaptive_allocation(true, INITIALLY_EXPECTED_PROC_NET_BYTES, INITIALLY_EXPECTED_PROC_NET_LINES, INITIALLY_EXPECTED_PROC_NET_WORDS); + + bool copy_initial_ff_stats = ls->ff == NULL && ls->stats.ff.memory > 0; + ls->ff = procfile_reopen(ls->ff, filename, ls->ff ? NULL :" :", PROCFILE_FLAG_DEFAULT); + + // we just created ff, copy our old stats to it + if(ls->ff && copy_initial_ff_stats) ls->ff->stats = ls->stats.ff; + + ls->ff = procfile_readall(ls->ff); + if(!ls->ff) return false; + + // get the latest stats from ff; + ls->stats.ff = ls->ff->stats; + + for(size_t l = 1; l < procfile_lines(ls->ff) ;l++) { + size_t w = procfile_linewords(ls->ff, l); + if(!w) continue; + if(w < 14) { + local_sockets_log(ls, "too small line No %zu of filename '%s' (has %zu words)", l, filename, w); + continue; + } + + char *words[14] = { 0 }; + words[0] = procfile_lineword(ls->ff, l, 0); + words[1] = procfile_lineword(ls->ff, l, 1); + words[2] = procfile_lineword(ls->ff, l, 2); + words[3] = procfile_lineword(ls->ff, l, 3); + words[4] = procfile_lineword(ls->ff, l, 4); + words[5] = procfile_lineword(ls->ff, l, 5); + words[6] = procfile_lineword(ls->ff, l, 6); + words[7] = procfile_lineword(ls->ff, l, 7); + words[8] = procfile_lineword(ls->ff, l, 8); + words[9] = procfile_lineword(ls->ff, l, 9); + words[10] = procfile_lineword(ls->ff, l, 10); + words[11] = procfile_lineword(ls->ff, l, 11); + words[12] = procfile_lineword(ls->ff, l, 12); + words[13] = procfile_lineword(ls->ff, l, 13); + local_sockets_process_proc_line(ls, filename, family, protocol, l, words, _countof(words)); + } + + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- + +static inline void local_sockets_detect_directions(LS_STATE *ls) { + for(SIMPLE_HASHTABLE_SLOT_LOCAL_SOCKET *sl = simple_hashtable_first_read_only_LOCAL_SOCKET(&ls->sockets_hashtable); + sl ; + sl = simple_hashtable_next_read_only_LOCAL_SOCKET(&ls->sockets_hashtable, sl)) { + LOCAL_SOCKET *n = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if (!n) continue; + + if ((n->direction & (SOCKET_DIRECTION_INBOUND|SOCKET_DIRECTION_OUTBOUND)) != + (SOCKET_DIRECTION_INBOUND|SOCKET_DIRECTION_OUTBOUND)) + continue; + + // check if the local port is one of our listening ports + { + SIMPLE_HASHTABLE_SLOT_LISTENING_PORT *sl_port = + simple_hashtable_get_slot_LISTENING_PORT(&ls->listening_ports_hashtable, n->local_port_hash, &n->local_port_key, false); + + struct local_port *port = SIMPLE_HASHTABLE_SLOT_DATA(sl_port); // do not reference this pointer - is invalid + if(port) { + // the local port of this socket is a port we listen to + n->direction &= ~SOCKET_DIRECTION_OUTBOUND; + } + else + n->direction &= ~SOCKET_DIRECTION_INBOUND; + } + + // check if the remote IP is one of our local IPs + { + SIMPLE_HASHTABLE_SLOT_LOCAL_IP *sl_ip = + simple_hashtable_get_slot_LOCAL_IP(&ls->local_ips_hashtable, n->remote_ip_hash, &n->remote.ip, false); + + union ipv46 *d = SIMPLE_HASHTABLE_SLOT_DATA(sl_ip); + if (d) { + // the remote IP of this socket is one of our local IPs + if(n->direction & SOCKET_DIRECTION_INBOUND) { + n->direction &= ~SOCKET_DIRECTION_INBOUND; + n->direction |= SOCKET_DIRECTION_LOCAL_INBOUND; + } + else if(n->direction & SOCKET_DIRECTION_OUTBOUND) { + n->direction &= ~SOCKET_DIRECTION_OUTBOUND; + n->direction |= SOCKET_DIRECTION_LOCAL_OUTBOUND; + } + continue; + } + } + + if (local_sockets_is_loopback_address(&n->local) || + local_sockets_is_loopback_address(&n->remote)) { + // both IP addresses are loopback + if(n->direction & SOCKET_DIRECTION_INBOUND) { + n->direction &= ~SOCKET_DIRECTION_INBOUND; + n->direction |= SOCKET_DIRECTION_LOCAL_INBOUND; + } + else if(n->direction & SOCKET_DIRECTION_OUTBOUND) { + n->direction &= ~SOCKET_DIRECTION_OUTBOUND; + n->direction |= SOCKET_DIRECTION_LOCAL_OUTBOUND; + } + } + } +} + +// -------------------------------------------------------------------------------------------------------------------- + +static inline void local_sockets_init(LS_STATE *ls) { + ls->config.host_prefix = netdata_configured_host_prefix; + + spinlock_init(&ls->spinlock); + + simple_hashtable_init_NET_NS(&ls->ns_hashtable, 1024); + simple_hashtable_init_PID_SOCKET(&ls->pid_sockets_hashtable, 65535); + simple_hashtable_init_LOCAL_SOCKET(&ls->sockets_hashtable, 65535); + simple_hashtable_init_LOCAL_IP(&ls->local_ips_hashtable, 4096); + simple_hashtable_init_LISTENING_PORT(&ls->listening_ports_hashtable, 4096); + + ls->local_socket_aral = aral_create( + "local-sockets", + sizeof(LOCAL_SOCKET), + 65536, + 65536, + NULL, NULL, NULL, false, true); + + ls->pid_socket_aral = aral_create( + "pid-sockets", + sizeof(struct pid_socket), + 65536, + 65536, + NULL, NULL, NULL, false, true); + + memset(&ls->stats, 0, sizeof(ls->stats)); + +#if defined(HAVE_LIBMNL) + ls->tmp_protocol = 0; +#endif + +#if defined(LOCAL_SOCKETS_USE_SETNS) + if(ls->config.namespaces && ls->spawn_server == NULL) { + ls->spawn_server = spawn_server_create(SPAWN_SERVER_OPTION_CALLBACK, NULL, local_sockets_spawn_server_callback, 0, NULL); + ls->spawn_server_is_mine = true; + } + else + ls->spawn_server_is_mine = false; +#endif +} + +static inline void local_sockets_cleanup(LS_STATE *ls) { + if(ls->ff) { + ls->stats.ff = ls->ff->stats; + procfile_close(ls->ff); + ls->ff = NULL; + } + +#if defined(LOCAL_SOCKETS_USE_SETNS) + if(ls->spawn_server_is_mine) { + spawn_server_destroy(ls->spawn_server); + ls->spawn_server = NULL; + ls->spawn_server_is_mine = false; + } +#endif + + // free the sockets hashtable data + for(SIMPLE_HASHTABLE_SLOT_LOCAL_SOCKET *sl = simple_hashtable_first_read_only_LOCAL_SOCKET(&ls->sockets_hashtable); + sl; + sl = simple_hashtable_next_read_only_LOCAL_SOCKET(&ls->sockets_hashtable, sl)) { + LOCAL_SOCKET *n = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(!n) continue; + + string_freez(n->cmdline); + aral_freez(ls->local_socket_aral, n); + } + + // free the pid_socket hashtable data + for(SIMPLE_HASHTABLE_SLOT_PID_SOCKET *sl = simple_hashtable_first_read_only_PID_SOCKET(&ls->pid_sockets_hashtable); + sl; + sl = simple_hashtable_next_read_only_PID_SOCKET(&ls->pid_sockets_hashtable, sl)) { + struct pid_socket *ps = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(!ps) continue; + + freez(ps->cmdline); + aral_freez(ls->pid_socket_aral, ps); + } + + // free the hashtable + simple_hashtable_destroy_NET_NS(&ls->ns_hashtable); + simple_hashtable_destroy_PID_SOCKET(&ls->pid_sockets_hashtable); + simple_hashtable_destroy_LISTENING_PORT(&ls->listening_ports_hashtable); + simple_hashtable_destroy_LOCAL_IP(&ls->local_ips_hashtable); + simple_hashtable_destroy_LOCAL_SOCKET(&ls->sockets_hashtable); + + aral_destroy(ls->local_socket_aral); + aral_destroy(ls->pid_socket_aral); +} + +// -------------------------------------------------------------------------------------------------------------------- + +static inline void local_sockets_track_time(LS_STATE *ls, const char *name) { + if(!ls->config.report || ls->timings_idx >= _countof(ls->timings)) + return; + + usec_t now_ut = now_monotonic_usec(); + + if(ls->timings_idx == 0 && !ls->timings[0].start_ut) { + ls->timings[0].start_ut = now_ut; + ls->timings[0].name = name; + } + else if(ls->timings_idx + 1 < _countof(ls->timings)) { + ls->timings[ls->timings_idx].end_ut = now_ut; + ls->timings_idx++; + ls->timings[ls->timings_idx].start_ut = now_ut; + ls->timings[ls->timings_idx].name = name; + } + else if(ls->timings_idx + 1 == _countof(ls->timings)) { + ls->timings[ls->timings_idx].end_ut = now_ut; + ls->timings_idx++; // out of bounds + } +} + +static void local_sockets_track_time_by_protocol(LS_STATE *ls, bool mnl, uint16_t family, uint16_t protocol) { + if(mnl) { + if(family == AF_INET) { + if(protocol == IPPROTO_TCP) + local_sockets_track_time(ls, "mnl_read_tcp4"); + else if(protocol == IPPROTO_UDP) + local_sockets_track_time(ls, "mnl_read_udp4"); + } + else if(family == AF_INET6) { + if(protocol == IPPROTO_TCP) + local_sockets_track_time(ls, "mnl_read_tcp6"); + else if(protocol == IPPROTO_UDP) + local_sockets_track_time(ls, "mnl_read_udp6"); + } + else + local_sockets_track_time(ls, "mnl_read_unknown"); + } + else { + if(family == AF_INET) { + if(protocol == IPPROTO_TCP) + local_sockets_track_time(ls, "proc_read_tcp4"); + else if(protocol == IPPROTO_UDP) + local_sockets_track_time(ls, "proc_read_udp4"); + } + else if(family == AF_INET6) { + if(protocol == IPPROTO_TCP) + local_sockets_track_time(ls, "proc_read_tcp6"); + else if(protocol == IPPROTO_UDP) + local_sockets_track_time(ls, "proc_read_udp6"); + } + else + local_sockets_track_time(ls, "proc_read_unknown"); + } +} + +static inline void local_sockets_do_family_protocol(LS_STATE *ls, const char *filename, uint16_t family, uint16_t protocol) { +#if defined(HAVE_LIBMNL) + if(!ls->config.no_mnl) { + local_sockets_track_time_by_protocol(ls, true, family, protocol); + if(local_sockets_libmnl_get_sockets(ls, family, protocol)) + return; + + // else, do proc + } +#endif + + local_sockets_track_time_by_protocol(ls, false, family, protocol); + + if(ls->config.procfile) + local_sockets_read_proc_net_x_procfile(ls, filename, family, protocol); + else + local_sockets_read_proc_net_x_getline(ls, filename, family, protocol); +} + +static inline void local_sockets_read_all_system_sockets(LS_STATE *ls) { + char path[FILENAME_MAX + 1]; + + if(ls->config.namespaces) { + local_sockets_track_time(ls, "read_namespaces"); + snprintfz(path, sizeof(path), "%s/proc/self/ns/net", ls->config.host_prefix); + local_sockets_read_proc_inode_link(ls, path, &ls->proc_self_net_ns_inode, "net"); + } + + if(ls->config.cmdline || ls->config.comm || ls->config.pid || ls->config.namespaces) { + local_sockets_track_time(ls, "proc_read_pids"); + snprintfz(path, sizeof(path), "%s/proc", ls->config.host_prefix); + local_sockets_find_all_sockets_in_proc(ls, path); + } + + if(ls->config.tcp4) { + snprintfz(path, sizeof(path), "%s/proc/net/tcp", ls->config.host_prefix); + local_sockets_do_family_protocol(ls, path, AF_INET, IPPROTO_TCP); + } + + if(ls->config.udp4) { + snprintfz(path, sizeof(path), "%s/proc/net/udp", ls->config.host_prefix); + local_sockets_do_family_protocol(ls, path, AF_INET, IPPROTO_UDP); + } + + if(ls->config.tcp6) { + snprintfz(path, sizeof(path), "%s/proc/net/tcp6", ls->config.host_prefix); + local_sockets_do_family_protocol(ls, path, AF_INET6, IPPROTO_TCP); + } + + if(ls->config.udp6) { + snprintfz(path, sizeof(path), "%s/proc/net/udp6", ls->config.host_prefix); + local_sockets_do_family_protocol(ls, path, AF_INET6, IPPROTO_UDP); + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// switch namespaces to read namespace sockets + +#if defined(LOCAL_SOCKETS_USE_SETNS) + +struct local_sockets_child_work { + int fd; + uint64_t net_ns_inode; +}; + +#define LOCAL_SOCKET_TERMINATOR (struct local_socket) { \ + .expires = UINT32_MAX, \ + .timer = UINT8_MAX, \ + .inode = UINT64_MAX, \ + .net_ns_inode = UINT64_MAX, \ +} + +static inline bool local_socket_is_terminator(const struct local_socket *n) { + static const struct local_socket t = LOCAL_SOCKET_TERMINATOR; + return (n->expires == t.expires && + n->timer == t.timer && + n->inode == t.inode && + n->net_ns_inode == t.net_ns_inode); +} + +static inline void local_sockets_send_to_parent(struct local_socket_state *ls, const struct local_socket *n, void *data) { + struct local_sockets_child_work *cw = data; + int fd = cw->fd; + + if(!local_socket_is_terminator(n)) { + ls->stats.errors_encountered = 0; +// local_sockets_log( +// ls, +// "child is sending inode %"PRIu64" of namespace %"PRIu64", from namespace %"PRIu64" for pid %d", +// n->inode, n->net_ns_inode, ls->proc_self_net_ns_inode, ls->ns_state.net_ns_pid); + } + + if(write(fd, n, sizeof(*n)) != sizeof(*n)) + local_sockets_log(ls, "failed to write local socket to pipe"); + + size_t len = n->cmdline ? string_strlen(n->cmdline) + 1 : 0; + if(write(fd, &len, sizeof(len)) != sizeof(len)) + local_sockets_log(ls, "failed to write cmdline length to pipe"); + + if(len) + if(write(fd, string2str(n->cmdline), len) != (ssize_t)len) + local_sockets_log(ls, "failed to write cmdline to pipe"); +} + +static inline int local_sockets_spawn_server_callback(SPAWN_REQUEST *request) { + static const struct local_socket terminator = LOCAL_SOCKET_TERMINATOR; + + struct local_sockets_ns_req *req = (struct local_sockets_ns_req *)request->data; + + LS_STATE ls = { 0 }; + ls.config = req->config; + ls.ns_state = req->ns_state; + ls.ns_state.nl_seq += gettid_uncached() * 10; + + // we don't need these inside namespaces + ls.config.cmdline = false; + ls.config.comm = false; + ls.config.pid = false; + ls.config.namespaces = false; + +#if !defined(USE_LIBMNL_AFTER_SETNS) + ls.config.no_mnl = true; // disable mnl since this collects all sockets from the entire system +#endif + + // initialize local sockets + local_sockets_init(&ls); + ls.proc_self_net_ns_inode = ls.ns_state.net_ns_inode; + ls.config.host_prefix = ""; // we need the /proc of the container + + struct local_sockets_child_work cw = { + .net_ns_inode = ls.proc_self_net_ns_inode, + .fd = request->fds[1], // stdout + }; + + ls.config.cb = local_sockets_send_to_parent; + ls.config.data = &cw; + + // switch namespace using the custom fd passed via the spawn server + if (setns(request->fds[3], CLONE_NEWNET) == -1) { + local_sockets_log(&ls, "failed to switch network namespace at child process using fd %d", request->fds[3]); + return EXIT_FAILURE; + } + + // close the custom fd + close(request->fds[3]); request->fds[3] = -1; + + // read all sockets from /proc + local_sockets_read_all_system_sockets(&ls); + + // send all sockets to parent + local_sockets_foreach_local_socket_call_cb(&ls); + + // send the terminating socket + local_sockets_send_to_parent(&ls, &terminator, &cw); + + local_sockets_cleanup(&ls); + + return EXIT_SUCCESS; +} + +static inline bool local_sockets_get_namespace_sockets_with_pid(LS_STATE *ls, struct pid_socket *ps) { + char filename[1024]; + snprintfz(filename, sizeof(filename), "%s/proc/%d/ns/net", ls->config.host_prefix, ps->pid); + + // verify the pid is in the target namespace + int fd = open(filename, O_RDONLY | O_CLOEXEC); + if (fd == -1) { + local_sockets_log(ls, "cannot open file '%s'", filename); + if(ls->config.report) + __atomic_add_fetch(&ls->stats.namespaces_absent, 1, __ATOMIC_RELAXED); + return false; + } + + struct stat statbuf; + if (fstat(fd, &statbuf) == -1) { + close(fd); + local_sockets_log(ls, "failed to get file statistics for '%s'", filename); + if(ls->config.report) + __atomic_add_fetch(&ls->stats.namespaces_absent, 1, __ATOMIC_RELAXED); + return false; + } + + if (statbuf.st_ino != ps->net_ns_inode) { + close(fd); + local_sockets_log(ls, "pid %d is not in the wanted network namespace", ps->pid); + if(ls->config.report) + __atomic_add_fetch(&ls->stats.namespaces_invalid, 1, __ATOMIC_RELAXED); + return false; + } + + if(ls->spawn_server == NULL) { + close(fd); + local_sockets_log(ls, "spawn server is not available"); + if(ls->config.report) + __atomic_add_fetch(&ls->stats.namespaces_forks_failed, 1, __ATOMIC_RELAXED); + return false; + } + + struct local_sockets_ns_req req = { + .config = ls->config, + .ns_state = ls->ns_state, + }; + req.ns_state.net_ns_pid = ps->pid; + req.ns_state.net_ns_inode = ps->net_ns_inode; + + SPAWN_INSTANCE *si = spawn_server_exec(ls->spawn_server, STDERR_FILENO, fd, NULL, &req, sizeof(req), SPAWN_INSTANCE_TYPE_CALLBACK); + close(fd); fd = -1; + + if(ls->config.report) + __atomic_add_fetch(&ls->stats.namespaces_forks_attempted, 1, __ATOMIC_RELAXED); + + if(si == NULL) { + local_sockets_log(ls, "cannot create spawn instance"); + + if(ls->config.report) + __atomic_add_fetch(&ls->stats.namespaces_forks_failed, 1, __ATOMIC_RELAXED); + + return false; + } + + size_t received = 0; + struct local_socket buf; + while(read(spawn_server_instance_read_fd(si), &buf, sizeof(buf)) == sizeof(buf)) { + size_t len = 0; + if(read(spawn_server_instance_read_fd(si), &len, sizeof(len)) != sizeof(len)) + local_sockets_log(ls, "failed to read cmdline length from pipe"); + + if(len) { + char cmdline[len + 1]; + if(read(spawn_server_instance_read_fd(si), cmdline, len) != (ssize_t)len) + local_sockets_log(ls, "failed to read cmdline from pipe"); + else { + cmdline[len] = '\0'; + buf.cmdline = string_strdupz(cmdline); + } + } + else + buf.cmdline = NULL; + + received++; + + if(local_socket_is_terminator(&buf)) + // the child finished + break; + + // overwrite the net_ns_inode we receive + buf.net_ns_inode = ps->net_ns_inode; + + spinlock_lock(&ls->spinlock); + + if(!local_sockets_add_socket(ls, &buf)) { + // fprintf(stderr, "Failed to add duplicate namespace socket inode %"PRIu64"\n", buf.inode); + string_freez(buf.cmdline); + if(ls->config.report) + __atomic_add_fetch(&ls->stats.namespaces_sockets_existing, 1, __ATOMIC_RELAXED); + } + else { + // fprintf(stderr, "Added namespace socket inode %"PRIu64"\n", buf.inode); + if(ls->config.report) + __atomic_add_fetch(&ls->stats.namespaces_sockets_new, 1, __ATOMIC_RELAXED); + } + + spinlock_unlock(&ls->spinlock); + } + + spawn_server_exec_kill(ls->spawn_server, si); + + if(ls->config.report && received == 0) + __atomic_add_fetch(&ls->stats.namespaces_forks_unresponsive, 1, __ATOMIC_RELAXED); + + return received > 0; +} + +struct local_sockets_namespace_worker { + LS_STATE *ls; + uint64_t inode; +}; + +static inline void *local_sockets_get_namespace_sockets_worker(void *arg) { + struct local_sockets_namespace_worker *data = arg; + LS_STATE *ls = data->ls; + const uint64_t inode = data->inode; + + spinlock_lock(&ls->spinlock); + + // find a pid_socket that has this namespace + for(SIMPLE_HASHTABLE_SLOT_PID_SOCKET *sl_pid = simple_hashtable_first_read_only_PID_SOCKET(&ls->pid_sockets_hashtable) ; + sl_pid ; + sl_pid = simple_hashtable_next_read_only_PID_SOCKET(&ls->pid_sockets_hashtable, sl_pid)) { + struct pid_socket *ps = SIMPLE_HASHTABLE_SLOT_DATA(sl_pid); + if(!ps || ps->net_ns_inode != inode) continue; + + // now we have a pid that has the same namespace inode + + spinlock_unlock(&ls->spinlock); + const bool worked = local_sockets_get_namespace_sockets_with_pid(ls, ps); + spinlock_lock(&ls->spinlock); + + if(worked) + break; + } + + spinlock_unlock(&ls->spinlock); + + return NULL; +} + +static inline void local_sockets_namespaces(LS_STATE *ls) { + size_t threads = ls->config.max_concurrent_namespaces; + if(threads == 0) threads = 5; + if(threads > 100) threads = 100; + + size_t last_thread = 0; + ND_THREAD *workers[threads]; + struct local_sockets_namespace_worker workers_data[threads]; + memset(workers, 0, sizeof(workers)); + memset(workers_data, 0, sizeof(workers_data)); + + spinlock_lock(&ls->spinlock); + + for(SIMPLE_HASHTABLE_SLOT_NET_NS *sl = simple_hashtable_first_read_only_NET_NS(&ls->ns_hashtable); + sl; + sl = simple_hashtable_next_read_only_NET_NS(&ls->ns_hashtable, sl)) { + const uint64_t inode = (uint64_t)SIMPLE_HASHTABLE_SLOT_DATA(sl); + + if(inode == ls->proc_self_net_ns_inode) + // skip our own namespace, we already have them + continue; + + spinlock_unlock(&ls->spinlock); + + ls->stats.namespaces_found++; + + if(workers[last_thread] != NULL) { + if(++last_thread >= threads) + last_thread = 0; + + if(workers[last_thread]) { + nd_thread_join(workers[last_thread]); + workers[last_thread] = NULL; + } + } + + workers_data[last_thread].ls = ls; + workers_data[last_thread].inode = inode; + workers[last_thread] = nd_thread_create( + "local-sockets-worker", + NETDATA_THREAD_OPTION_JOINABLE, + local_sockets_get_namespace_sockets_worker, + &workers_data[last_thread]); + + spinlock_lock(&ls->spinlock); + } + + spinlock_unlock(&ls->spinlock); + + // wait all the threads running + for(size_t i = 0; i < threads ;i++) { + if(workers[i]) + nd_thread_join(workers[i]); + } +} + +#endif // LOCAL_SOCKETS_USE_SETNS + +// -------------------------------------------------------------------------------------------------------------------- +// read namespace sockets from the host's /proc + +#if !defined(LOCAL_SOCKETS_USE_SETNS) + +static inline bool local_sockets_namespaces_from_proc_with_pid(LS_STATE *ls, struct pid_socket *ps) { + char filename[1024]; + snprintfz(filename, sizeof(filename), "%s/proc/%d/ns/net", ls->config.host_prefix, ps->pid); + + // verify the pid is in the target namespace + int fd = open(filename, O_RDONLY | O_CLOEXEC); + if (fd == -1) { + local_sockets_log(ls, "cannot open file '%s'", filename); + if(ls->config.report) + __atomic_add_fetch(&ls->stats.namespaces_absent, 1, __ATOMIC_RELAXED); + return false; + } + + struct stat statbuf; + if (fstat(fd, &statbuf) == -1) { + close(fd); + local_sockets_log(ls, "failed to get file statistics for '%s'", filename); + if(ls->config.report) + __atomic_add_fetch(&ls->stats.namespaces_absent, 1, __ATOMIC_RELAXED); + return false; + } + + if (statbuf.st_ino != ps->net_ns_inode) { + close(fd); + local_sockets_log(ls, "pid %d is not in the wanted network namespace", ps->pid); + if(ls->config.report) + __atomic_add_fetch(&ls->stats.namespaces_invalid, 1, __ATOMIC_RELAXED); + return false; + } + + char path[FILENAME_MAX + 1]; + + if(ls->config.tcp4) { + snprintfz(path, sizeof(path), "%s/proc/%d/net/tcp", ls->config.host_prefix, ps->pid); + if(!local_sockets_read_proc_net_x(ls, path, AF_INET, IPPROTO_TCP)) + return false; + } + + if(ls->config.udp4) { + snprintfz(path, sizeof(path), "%s/proc/%d/net/udp", ls->config.host_prefix, ps->pid); + if(!local_sockets_read_proc_net_x(ls, path, AF_INET, IPPROTO_UDP)) + return false; + } + + if(ls->config.tcp6) { + snprintfz(path, sizeof(path), "%s/proc/%d/net/tcp6", ls->config.host_prefix, ps->pid); + if(!local_sockets_read_proc_net_x(ls, path, AF_INET6, IPPROTO_TCP)) + return false; + } + + if(ls->config.udp6) { + snprintfz(path, sizeof(path), "%s/proc/%d/net/udp6", ls->config.host_prefix, ps->pid); + if(!local_sockets_read_proc_net_x(ls, path, AF_INET6, IPPROTO_UDP)) + return false; + } + + return true; +} + +static inline void local_sockets_namespaces_from_proc(LS_STATE *ls) { + for(SIMPLE_HASHTABLE_SLOT_NET_NS *sl = simple_hashtable_first_read_only_NET_NS(&ls->ns_hashtable); + sl; + sl = simple_hashtable_next_read_only_NET_NS(&ls->ns_hashtable, sl)) { + const uint64_t inode = (uint64_t)SIMPLE_HASHTABLE_SLOT_DATA(sl); + + if (inode == ls->proc_self_net_ns_inode) + // skip our own namespace, we already have them + continue; + + ls->stats.namespaces_found++; + + for(SIMPLE_HASHTABLE_SLOT_PID_SOCKET *sl_pid = simple_hashtable_first_read_only_PID_SOCKET(&ls->pid_sockets_hashtable) ; + sl_pid ; + sl_pid = simple_hashtable_next_read_only_PID_SOCKET(&ls->pid_sockets_hashtable, sl_pid)) { + struct pid_socket *ps = SIMPLE_HASHTABLE_SLOT_DATA(sl_pid); + if(!ps || ps->net_ns_inode != inode) continue; + + // now we have a pid that has the same namespace inode + + if(local_sockets_namespaces_from_proc_with_pid(ls, ps)) + break; + } + } +} + +#endif + +// -------------------------------------------------------------------------------------------------------------------- + +static inline void local_sockets_process(LS_STATE *ls) { + ls->timings_idx = 0; + local_sockets_track_time(ls, "init"); + + // initialize our hashtables + local_sockets_init(ls); + + local_sockets_track_time(ls, "all_sockets"); + + // read all sockets from /proc + local_sockets_read_all_system_sockets(ls); + + // check all socket namespaces + if(ls->config.namespaces) { + local_sockets_track_time(ls, "switch_namespaces"); +#if defined(LOCAL_SOCKETS_USE_SETNS) + local_sockets_namespaces(ls); +#else + local_sockets_namespaces_from_proc(ls); +#endif + } + + // detect the directions of the sockets + if(ls->config.inbound || ls->config.outbound || ls->config.local) { + local_sockets_track_time(ls, "detect_direction"); + local_sockets_detect_directions(ls); + } + + // call the callback for each socket + local_sockets_track_time(ls, "output"); + local_sockets_foreach_local_socket_call_cb(ls); + + // free all memory + local_sockets_track_time(ls, "cleanup"); + local_sockets_cleanup(ls); +} + +#endif //NETDATA_LOCAL_SOCKETS_H diff --git a/src/libnetdata/locks/README.md b/src/libnetdata/locks/README.md index 35d602f2a..25916b002 100644 --- a/src/libnetdata/locks/README.md +++ b/src/libnetdata/locks/README.md @@ -1,12 +1,3 @@ - - # Locks ## How to trace netdata locks @@ -58,13 +49,13 @@ If any call is expected to pause the caller (ie the caller is attempting a read ``` RW_LOCK ON LOCK 0x0x5651c9fcce20: 4190039 'HEALTH' (function health_execute_pending_updates() 661@health/health.c) WANTS a 'W' lock (while holding 1 rwlocks and 1 mutexes). There are 7 readers and 0 writers are holding the lock: - => 1: RW_LOCK: process 4190091 'WEB_SERVER[static14]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 709847 usec. - => 2: RW_LOCK: process 4190079 'WEB_SERVER[static6]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 709869 usec. - => 3: RW_LOCK: process 4190084 'WEB_SERVER[static10]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 709948 usec. - => 4: RW_LOCK: process 4190076 'WEB_SERVER[static3]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710190 usec. - => 5: RW_LOCK: process 4190092 'WEB_SERVER[static15]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710195 usec. - => 6: RW_LOCK: process 4190077 'WEB_SERVER[static4]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710208 usec. - => 7: RW_LOCK: process 4190044 'WEB_SERVER[static1]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710221 usec. + => 1: RW_LOCK: process 4190091 'WEB_SERVER[static14]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 709847 usec. + => 2: RW_LOCK: process 4190079 'WEB_SERVER[static6]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 709869 usec. + => 3: RW_LOCK: process 4190084 'WEB_SERVER[static10]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 709948 usec. + => 4: RW_LOCK: process 4190076 'WEB_SERVER[static3]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710190 usec. + => 5: RW_LOCK: process 4190092 'WEB_SERVER[static15]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710195 usec. + => 6: RW_LOCK: process 4190077 'WEB_SERVER[static4]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710208 usec. + => 7: RW_LOCK: process 4190044 'WEB_SERVER[static1]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710221 usec. ``` And each of the above is paired with a `GOT` log, like this: diff --git a/src/libnetdata/log/README.md b/src/libnetdata/log/README.md index ef9ca1ef3..c7a42f28b 100644 --- a/src/libnetdata/log/README.md +++ b/src/libnetdata/log/README.md @@ -1,12 +1,3 @@ - - # Netdata Logging This document describes how Netdata generates its own logs, not how Netdata manages and queries logs databases. @@ -26,14 +17,15 @@ For each log source, Netdata supports the following output methods: - **off**, to disable this log source - **journal**, to send the logs to systemd-journal. +- **etw**, to send the logs to Event Tracing for Windows (ETW). +- **wel**, to send the logs to the Windows Event Log (WEL). - **syslog**, to send the logs to syslog. - **system**, to send the output to `stderr` or `stdout` depending on the log source. - **stdout**, to write the logs to Netdata's `stdout`. - **stderr**, to write the logs to Netdata's `stderr`. - **filename**, to send the logs to a file. -For `daemon` and `collector` the default is `journal` when systemd-journal is available. -To decide if systemd-journal is available, Netdata checks: +On Linux, when systemd-journal is available, the default is `journal` for `daemon` and `collector` and `filename` for the rest. To decide if systemd-journal is available, Netdata checks: 1. `stderr` is connected to systemd-journald 2. `/run/systemd/journal/socket` exists @@ -41,13 +33,16 @@ To decide if systemd-journal is available, Netdata checks: If any of the above is detected, Netdata will select `journal` for `daemon` and `collector` sources. -All other sources default to a file. +On Windows, the default is `etw` and if that is not available it falls back to `wel`. The availability of `etw` is decided at compile time. ## Log formats | Format | Description | |---------|--------------------------------------------------------------------------------------------------------| | journal | journald-specific log format. Automatically selected when logging to systemd-journal. | +| etw | Event Tracing for Windows specific format. Structured logging in Event Viewer. | +| wel | Windows Event Log specific format. Basic field-based logging in Event Viewer. | +| journal | journald-specific log format. Automatically selected when logging to systemd-journal. | | logfmt | logs data as a series of key/value pairs. The default when logging to any output other than `journal`. | | json | logs data in JSON format. | @@ -66,6 +61,9 @@ Each time Netdata logs, it assigns a priority to the log. It can be one of this | info | the default log level about information the user should know. | | debug | these are more verbose logs that can be ignored. | +For `etw` these are mapped to `Verbose`, `Informational`, `Warning`, `Error` and `Critical`. +For `wel` these are mapped to `Informational`, `Warning`, `Error`. + ## Logs Configuration In `netdata.conf`, there are the following settings: @@ -73,7 +71,7 @@ In `netdata.conf`, there are the following settings: ``` [logs] # logs to trigger flood protection = 1000 - # logs flood protection period = 60 + # logs flood protection period = 1m # facility = daemon # level = info # daemon = journal @@ -117,66 +115,69 @@ Sending a `SIGHUP` to Netdata, will instruct it to re-open all its log files.
    All fields exposed by Netdata -| journal | logfmt | json | Description | -|:--------------------------------------:|:------------------------------:|:------------------------------:|:---------------------------------------------------------------------------------------------------------:| -| `_SOURCE_REALTIME_TIMESTAMP` | `time` | `time` | the timestamp of the event | -| `SYSLOG_IDENTIFIER` | `comm` | `comm` | the program logging the event | -| `ND_LOG_SOURCE` | `source` | `source` | one of the [log sources](#log-sources) | -| `PRIORITY`
    numeric | `level`
    text | `level`
    numeric | one of the [log levels](#log-levels) | -| `ERRNO` | `errno` | `errno` | the numeric value of `errno` | -| `INVOCATION_ID` | - | - | a unique UUID of the Netdata session, reset on every Netdata restart, inherited by systemd when available | -| `CODE_LINE` | - | - | the line number of of the source code logging this event | -| `CODE_FILE` | - | - | the filename of the source code logging this event | -| `CODE_FUNCTION` | - | - | the function name of the source code logging this event | -| `TID` | `tid` | `tid` | the thread id of the thread logging this event | -| `THREAD_TAG` | `thread` | `thread` | the name of the thread logging this event | -| `MESSAGE_ID` | `msg_id` | `msg_id` | see [message IDs](#message-ids) | -| `ND_MODULE` | `module` | `module` | the Netdata module logging this event | -| `ND_NIDL_NODE` | `node` | `node` | the hostname of the node the event is related to | -| `ND_NIDL_INSTANCE` | `instance` | `instance` | the instance of the node the event is related to | -| `ND_NIDL_CONTEXT` | `context` | `context` | the context the event is related to (this is usually the chart name, as shown on netdata dashboards | -| `ND_NIDL_DIMENSION` | `dimension` | `dimension` | the dimension the event is related to | -| `ND_SRC_TRANSPORT` | `src_transport` | `src_transport` | when the event happened during a request, this is the request transport | -| `ND_SRC_IP` | `src_ip` | `src_ip` | when the event happened during an inbound request, this is the IP the request came from | -| `ND_SRC_PORT` | `src_port` | `src_port` | when the event happened during an inbound request, this is the port the request came from | -| `ND_SRC_FORWARDED_HOST` | `src_forwarded_host` | `src_forwarded_host` | the contents of the HTTP header `X-Forwarded-Host` | -| `ND_SRC_FORWARDED_FOR` | `src_forwarded_for` | `src_forwarded_for` | the contents of the HTTP header `X-Forwarded-For` | -| `ND_SRC_CAPABILITIES` | `src_capabilities` | `src_capabilities` | when the request came from a child, this is the communication capabilities of the child | -| `ND_DST_TRANSPORT` | `dst_transport` | `dst_transport` | when the event happened during an outbound request, this is the outbound request transport | -| `ND_DST_IP` | `dst_ip` | `dst_ip` | when the event happened during an outbound request, this is the IP the request destination | -| `ND_DST_PORT` | `dst_port` | `dst_port` | when the event happened during an outbound request, this is the port the request destination | -| `ND_DST_CAPABILITIES` | `dst_capabilities` | `dst_capabilities` | when the request goes to a parent, this is the communication capabilities of the parent | -| `ND_REQUEST_METHOD` | `req_method` | `req_method` | when the event happened during an inbound request, this is the method the request was received | -| `ND_RESPONSE_CODE` | `code` | `code` | when responding to a request, this this the response code | -| `ND_CONNECTION_ID` | `conn` | `conn` | when there is a connection id for an inbound connection, this is the connection id | -| `ND_TRANSACTION_ID` | `transaction` | `transaction` | the transaction id (UUID) of all API requests | -| `ND_RESPONSE_SENT_BYTES` | `sent_bytes` | `sent_bytes` | the bytes we sent to API responses | -| `ND_RESPONSE_SIZE_BYTES` | `size_bytes` | `size_bytes` | the uncompressed bytes of the API responses | -| `ND_RESPONSE_PREP_TIME_USEC` | `prep_ut` | `prep_ut` | the time needed to prepare a response | -| `ND_RESPONSE_SENT_TIME_USEC` | `sent_ut` | `sent_ut` | the time needed to send a response | -| `ND_RESPONSE_TOTAL_TIME_USEC` | `total_ut` | `total_ut` | the total time needed to complete a response | -| `ND_ALERT_ID` | `alert_id` | `alert_id` | the alert id this event is related to | -| `ND_ALERT_EVENT_ID` | `alert_event_id` | `alert_event_id` | a sequential number of the alert transition (per host) | -| `ND_ALERT_UNIQUE_ID` | `alert_unique_id` | `alert_unique_id` | a sequential number of the alert transition (per alert) | -| `ND_ALERT_TRANSITION_ID` | `alert_transition_id` | `alert_transition_id` | the unique UUID of this alert transition | -| `ND_ALERT_CONFIG` | `alert_config` | `alert_config` | the alert configuration hash (UUID) | -| `ND_ALERT_NAME` | `alert` | `alert` | the alert name | -| `ND_ALERT_CLASS` | `alert_class` | `alert_class` | the alert classification | -| `ND_ALERT_COMPONENT` | `alert_component` | `alert_component` | the alert component | -| `ND_ALERT_TYPE` | `alert_type` | `alert_type` | the alert type | -| `ND_ALERT_EXEC` | `alert_exec` | `alert_exec` | the alert notification program | -| `ND_ALERT_RECIPIENT` | `alert_recipient` | `alert_recipient` | the alert recipient(s) | -| `ND_ALERT_VALUE` | `alert_value` | `alert_value` | the current alert value | -| `ND_ALERT_VALUE_OLD` | `alert_value_old` | `alert_value_old` | the previous alert value | -| `ND_ALERT_STATUS` | `alert_status` | `alert_status` | the current alert status | -| `ND_ALERT_STATUS_OLD` | `alert_value_old` | `alert_value_old` | the previous alert value | -| `ND_ALERT_UNITS` | `alert_units` | `alert_units` | the units of the alert | -| `ND_ALERT_SUMMARY` | `alert_summary` | `alert_summary` | the summary text of the alert | -| `ND_ALERT_INFO` | `alert_info` | `alert_info` | the info text of the alert | -| `ND_ALERT_DURATION` | `alert_duration` | `alert_duration` | the duration the alert was in its previous state | -| `ND_ALERT_NOTIFICATION_TIMESTAMP_USEC` | `alert_notification_timestamp` | `alert_notification_timestamp` | the timestamp the notification delivery is scheduled | -| `ND_REQUEST` | `request` | `request` | the full request during which the event happened | -| `MESSAGE` | `msg` | `msg` | the event message | +| `journal` | `logfmt` and `json` | `etw` | `wel` | Description | +|:--------------------------------------:|:------------------------------:|:-----------------------------:|:-----:|:----------------------------------------------------------------------------------------------------------| +| `_SOURCE_REALTIME_TIMESTAMP` | `time` | `Timestamp` | 1 | the timestamp of the event | +| `SYSLOG_IDENTIFIER` | `comm` | `Program` | 2 | the program logging the event | +| `ND_LOG_SOURCE` | `source` | `NetdataLogSource` | 3 | one of the [log sources](#log-sources) | +| `PRIORITY`
    numeric | `level`
    text | `Level`
    text | 4 | one of the [log levels](#log-levels) | +| `ERRNO` | `errno` | `UnixErrno` | 5 | the numeric value of `errno` | +| `INVOCATION_ID` | - | `InvocationID` | 7 | a unique UUID of the Netdata session, reset on every Netdata restart, inherited by systemd when available | +| `CODE_LINE` | - | `CodeLine` | 8 | the line number of of the source code logging this event | +| `CODE_FILE` | - | `CodeFile` | 9 | the filename of the source code logging this event | +| `CODE_FUNCTION` | - | `CodeFunction` | 10 | the function name of the source code logging this event | +| `TID` | `tid` | `ThreadID` | 11 | the thread id of the thread logging this event | +| `THREAD_TAG` | `thread` | `ThreadName` | 12 | the name of the thread logging this event | +| `MESSAGE_ID` | `msg_id` | `MessageID` | 13 | see [message IDs](#message-ids) | +| `ND_MODULE` | `module` | `Module` | 14 | the Netdata module logging this event | +| `ND_NIDL_NODE` | `node` | `Node` | 15 | the hostname of the node the event is related to | +| `ND_NIDL_INSTANCE` | `instance` | `Instance` | 16 | the instance of the node the event is related to | +| `ND_NIDL_CONTEXT` | `context` | `Context` | 17 | the context the event is related to (this is usually the chart name, as shown on netdata dashboards | +| `ND_NIDL_DIMENSION` | `dimension` | `Dimension` | 18 | the dimension the event is related to | +| `ND_SRC_TRANSPORT` | `src_transport` | `SourceTransport` | 19 | when the event happened during a request, this is the request transport | +| `ND_SRC_IP` | `src_ip` | `SourceIP` | 24 | when the event happened during an inbound request, this is the IP the request came from | +| `ND_SRC_PORT` | `src_port` | `SourcePort` | 25 | when the event happened during an inbound request, this is the port the request came from | +| `ND_SRC_FORWARDED_HOST` | `src_forwarded_host` | `SourceForwardedHost` | 26 | the contents of the HTTP header `X-Forwarded-Host` | +| `ND_SRC_FORWARDED_FOR` | `src_forwarded_for` | `SourceForwardedFor` | 27 | the contents of the HTTP header `X-Forwarded-For` | +| `ND_SRC_CAPABILITIES` | `src_capabilities` | `SourceCapabilities` | 28 | when the request came from a child, this is the communication capabilities of the child | +| `ND_DST_TRANSPORT` | `dst_transport` | `DestinationTransport` | 29 | when the event happened during an outbound request, this is the outbound request transport | +| `ND_DST_IP` | `dst_ip` | `DestinationIP` | 30 | when the event happened during an outbound request, this is the IP the request destination | +| `ND_DST_PORT` | `dst_port` | `DestinationPort` | 31 | when the event happened during an outbound request, this is the port the request destination | +| `ND_DST_CAPABILITIES` | `dst_capabilities` | `DestinationCapabilities` | 32 | when the request goes to a parent, this is the communication capabilities of the parent | +| `ND_REQUEST_METHOD` | `req_method` | `RequestMethod` | 33 | when the event happened during an inbound request, this is the method the request was received | +| `ND_RESPONSE_CODE` | `code` | `ResponseCode` | 34 | when responding to a request, this this the response code | +| `ND_CONNECTION_ID` | `conn` | `ConnectionID` | 35 | when there is a connection id for an inbound connection, this is the connection id | +| `ND_TRANSACTION_ID` | `transaction` | `TransactionID` | 36 | the transaction id (UUID) of all API requests | +| `ND_RESPONSE_SENT_BYTES` | `sent_bytes` | `ResponseSentBytes` | 37 | the bytes we sent to API responses | +| `ND_RESPONSE_SIZE_BYTES` | `size_bytes` | `ResponseSizeBytes` | 38 | the uncompressed bytes of the API responses | +| `ND_RESPONSE_PREP_TIME_USEC` | `prep_ut` | `ResponsePreparationTimeUsec` | 39 | the time needed to prepare a response | +| `ND_RESPONSE_SENT_TIME_USEC` | `sent_ut` | `ResponseSentTimeUsec` | 40 | the time needed to send a response | +| `ND_RESPONSE_TOTAL_TIME_USEC` | `total_ut` | `ResponseTotalTimeUsec` | 41 | the total time needed to complete a response | +| `ND_ALERT_ID` | `alert_id` | `AlertID` | 42 | the alert id this event is related to | +| `ND_ALERT_EVENT_ID` | `alert_event_id` | `AlertEventID` | 44 | a sequential number of the alert transition (per host) | +| `ND_ALERT_UNIQUE_ID` | `alert_unique_id` | `AlertUniqueID` | 43 | a sequential number of the alert transition (per alert) | +| `ND_ALERT_TRANSITION_ID` | `alert_transition_id` | `AlertTransitionID` | 45 | the unique UUID of this alert transition | +| `ND_ALERT_CONFIG` | `alert_config` | `AlertConfig` | 46 | the alert configuration hash (UUID) | +| `ND_ALERT_NAME` | `alert` | `AlertName` | 47 | the alert name | +| `ND_ALERT_CLASS` | `alert_class` | `AlertClass` | 48 | the alert classification | +| `ND_ALERT_COMPONENT` | `alert_component` | `AlertComponent` | 49 | the alert component | +| `ND_ALERT_TYPE` | `alert_type` | `AlertType` | 50 | the alert type | +| `ND_ALERT_EXEC` | `alert_exec` | `AlertExec` | 51 | the alert notification program | +| `ND_ALERT_RECIPIENT` | `alert_recipient` | `AlertRecipient` | 52 | the alert recipient(s) | +| `ND_ALERT_VALUE` | `alert_value` | `AlertValue` | 54 | the current alert value | +| `ND_ALERT_VALUE_OLD` | `alert_value_old` | `AlertOldValue` | 55 | the previous alert value | +| `ND_ALERT_STATUS` | `alert_status` | `AlertStatus` | 56 | the current alert status | +| `ND_ALERT_STATUS_OLD` | `alert_value_old` | `AlertOldStatus` | 57 | the previous alert status | +| `ND_ALERT_UNITS` | `alert_units` | `AlertUnits` | 59 | the units of the alert | +| `ND_ALERT_SUMMARY` | `alert_summary` | `AlertSummary` | 60 | the summary text of the alert | +| `ND_ALERT_INFO` | `alert_info` | `AlertInfo` | 61 | the info text of the alert | +| `ND_ALERT_DURATION` | `alert_duration` | `AlertDuration` | 53 | the duration the alert was in its previous state | +| `ND_ALERT_NOTIFICATION_TIMESTAMP_USEC` | `alert_notification_timestamp` | `AlertNotificationTimeUsec` | 62 | the timestamp the notification delivery is scheduled | +| `ND_REQUEST` | `request` | `Request` | 63 | the full request during which the event happened | +| `MESSAGE` | `msg` | `Message` | 64 | the event message | + +For `wel` (Windows Event Logs), all logs have an array of 64 fields strings, and their index number provides their meaning. +For `etw` (Event Tracing for Windows), Netdata logs in a structured way, and field names are available.
    @@ -221,3 +222,117 @@ journalctl -u netdata --namespace=netdata # All netdata logs, the newest entries are displayed first journalctl -u netdata --namespace=netdata -r ``` + +## Using Event Tracing for Windows (ETW) + +ETW requires the publisher `Netdata` to be registered. Our Windows installer does this automatically. + +Registering the publisher is done via a manifest (`%SystemRoot%\System32\wevt_netdata_manifest.xml`) +and its messages resources DLL (`%SystemRoot%\System32\wevt_netdata.dll`). + +If needed, the publisher can be registered and unregistered manually using these commands: + +```bat +REM register the Netdata publisher +wevtutil im "%SystemRoot%\System32\wevt_netdata_manifest.xml" "/mf:%SystemRoot%\System32\wevt_netdata.dll" "/rf:%SystemRoot%\System32\wevt_netdata.dll" + +REM unregister the Netdata publisher +wevtutil um "%SystemRoot%\System32\wevt_netdata_manifest.xml" +``` + +The structure of the logs are as follows: + + - Publisher `Netdata` + - Channel `Netdata/Daemon`: general messages about the Netdata service + - Channel `Netdata/Collector`: general messages about Netdata external plugins + - Channel `Netdata/Health`: alert transitions and general messages generated by Netdata's health engine + - Channel `Netdata/Access`: all accesses to Netdata APIs + - Channel `Netdata/Aclk`: for cloud connectivity tracing (disabled by default) + +Retention can be configured per Channel via the Event Viewer. Netdata does not set a default, so the system default is used. + +> **IMPORTANT**
    +> Event Tracing for Windows (ETW) does not allow logging the percentage character `%`. +> The `%` followed by a number, is recursively used for fields expansion and ETW has not +> provided any way to escape the character for preventing further expansion.
    +>
    +> To work around this limitation, Netdata replaces all `%` which are followed by a number, with `℅` +> (the Unicode character `care of`). Visually, they look similar, but when copying IPv6 addresses +> or URLs from the logs, you have to be careful to manually replace `℅` with `%` before using them. + +## Using Windows Event Logs (WEL) + +WEL has a different logs structure and unfortunately WEL and ETW need to use different names if they are to be used +concurrently. + +For WEL, Netdata logs as follows: + + - Channel `NetdataWEL` (unfortunately `Netdata` cannot be used, it conflicts with the ETW Publisher name) + - Publisher `NetdataDaemon`: general messages about the Netdata service + - Publisher `NetdataCollector`: general messages about Netdata external plugins + - Publisher `NetdataHealth`: alert transitions and general messages generated by Netdata's health engine + - Publisher `NetdataAccess`: all accesses to Netdata APIs + - Publisher `NetdataAclk`: for cloud connectivity tracing (disabled by default) + +Publishers must have unique names system-wide, so we had to prefix them with `Netdata`. + +Retention can be configured per Publisher via the Event Viewer or the Registry. +Netdata sets by default 20MiB for all of them, except `NetdataAclk` (5MiB) and `NetdataAccess` (35MiB), +for a total of 100MiB. + +For WEL some registry entries are needed. Netdata automatically takes care of them when it starts. + +WEL does not have the problem ETW has with the percent character `%`, so Netdata logs it as-is. + +## Differences between ETW and WEL + +There are key differences between ETW and WEL. + +### Publishers and Providers +**Publishers** are collections of ETW Providers. A Publisher is implied by a manifest file, +each of which is considered a Publisher, and each manifest file can define multiple **Providers** in it. +Other than that there is no entity related to **Publishers** in the system. + +**Publishers** are not defined for WEL. + +**Providers** are the applications or modules logging. Provider names must be unique across the system, +for ETW and WEL together. + +To define a **Provider**: + +- ETW requires a **Publisher** manifest coupled with resources DLLs and must be registered + via `wevtutil` (handled by the Netdata Windows installer automatically). +- WEL requires some registry entries and a message resources DLL (handled by Netdata automatically on startup). + +The Provider appears as `Source` in the Event Viewer, for both WEL and ETW. + +### Channels +- **Channels** for WEL are collections of WEL Providers, (each WEL Provider is a single Stream of logs). +- **Channels** for ETW slice the logs of each Provider into multiple Streams. + +WEL Channels cannot have the same name as ETW Providers. This is why Netdata's ETW provider is +called `Netdata`, and WEL channel is called `NetdataWEL`. + +Despite the fact that ETW **Publishers** and WEL **Channels** are both collections of Providers, +they are not similar. In ETW a Publisher is a collection on the publisher's Providers, but in WEL +a Channel may include independent WEL Providers (e.g. the "Applications" Channel). Additionally, +WEL Channels cannot include ETW Providers. + +### Retention +Retention is always defined per Stream. + +- Retention in ETW is defined per ETW Channel (ETW Provider Stream). +- Retention in WEL is defined per WEL Provider (each WEL Provider is a single Stream). + +### Messages Formatting +- ETW supports recursive fields expansion, and therefore `%N` in fields is expanded recursively + (or replaced with an error message if expansion fails). Netdata replaces `%N` with `℅N` to stop + recursive expansion (since `%N` cannot be logged otherwise). +- WEL performs a single field expansion, and therefore the `%` character in fields is never expanded. + +### Usability + +- ETW names all the fields and allows multiple datatypes per field, enabling log consumers to know + what each field means and its datatype. +- WEL uses a simple string table for fields, and consumers need to map these string fields based on + their index. diff --git a/src/libnetdata/log/journal.c b/src/libnetdata/log/journal.c deleted file mode 100644 index 2182212f6..000000000 --- a/src/libnetdata/log/journal.c +++ /dev/null @@ -1,142 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "journal.h" - -bool is_path_unix_socket(const char *path) { - // Check if the path is valid - if(!path || !*path) - return false; - - struct stat statbuf; - - // Use stat to check if the file exists and is a socket - if (stat(path, &statbuf) == -1) - // The file does not exist or cannot be accessed - return false; - - // Check if the file is a socket - if (S_ISSOCK(statbuf.st_mode)) - return true; - - return false; -} - -bool is_stderr_connected_to_journal(void) { - const char *journal_stream = getenv("JOURNAL_STREAM"); - if (!journal_stream) - return false; // JOURNAL_STREAM is not set - - struct stat stderr_stat; - if (fstat(STDERR_FILENO, &stderr_stat) < 0) - return false; // Error in getting stderr info - - // Parse device and inode from JOURNAL_STREAM - char *endptr; - long journal_dev = strtol(journal_stream, &endptr, 10); - if (*endptr != ':') - return false; // Format error in JOURNAL_STREAM - - long journal_ino = strtol(endptr + 1, NULL, 10); - - return (stderr_stat.st_dev == (dev_t)journal_dev) && (stderr_stat.st_ino == (ino_t)journal_ino); -} - -int journal_direct_fd(const char *path) { - if(!path || !*path) - path = JOURNAL_DIRECT_SOCKET; - - if(!is_path_unix_socket(path)) - return -1; - - int fd = socket(AF_UNIX, SOCK_DGRAM| DEFAULT_SOCKET_FLAGS, 0); - if (fd < 0) return -1; - - sock_setcloexec(fd); - - struct sockaddr_un addr; - memset(&addr, 0, sizeof(struct sockaddr_un)); - addr.sun_family = AF_UNIX; - strncpy(addr.sun_path, path, sizeof(addr.sun_path) - 1); - - // Connect the socket (optional, but can simplify send operations) - if (connect(fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { - close(fd); - return -1; - } - - return fd; -} - -static inline bool journal_send_with_memfd(int fd __maybe_unused, const char *msg __maybe_unused, size_t msg_len __maybe_unused) { -#if defined(__NR_memfd_create) && defined(MFD_ALLOW_SEALING) && defined(F_ADD_SEALS) && defined(F_SEAL_SHRINK) && defined(F_SEAL_GROW) && defined(F_SEAL_WRITE) - // Create a memory file descriptor - int memfd = (int)syscall(__NR_memfd_create, "journald", MFD_ALLOW_SEALING); - if (memfd < 0) return false; - - // Write data to the memfd - if (write(memfd, msg, msg_len) != (ssize_t)msg_len) { - close(memfd); - return false; - } - - // Seal the memfd to make it immutable - if (fcntl(memfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE) < 0) { - close(memfd); - return false; - } - - struct iovec iov = {0}; - struct msghdr msghdr = {0}; - struct cmsghdr *cmsghdr; - char cmsgbuf[CMSG_SPACE(sizeof(int))]; - - msghdr.msg_iov = &iov; - msghdr.msg_iovlen = 1; - msghdr.msg_control = cmsgbuf; - msghdr.msg_controllen = sizeof(cmsgbuf); - - cmsghdr = CMSG_FIRSTHDR(&msghdr); - if(!cmsghdr) { - close(memfd); - return false; - } - - cmsghdr->cmsg_level = SOL_SOCKET; - cmsghdr->cmsg_type = SCM_RIGHTS; - cmsghdr->cmsg_len = CMSG_LEN(sizeof(int)); - memcpy(CMSG_DATA(cmsghdr), &memfd, sizeof(int)); - - ssize_t r = sendmsg(fd, &msghdr, 0); - - close(memfd); - return r >= 0; -#else - return false; -#endif -} - -bool journal_direct_send(int fd, const char *msg, size_t msg_len) { - // Send the datagram - if (send(fd, msg, msg_len, 0) < 0) { - if(errno != EMSGSIZE) - return false; - - // datagram is too large, fallback to memfd - if(!journal_send_with_memfd(fd, msg, msg_len)) - return false; - } - - return true; -} - -void journal_construct_path(char *dst, size_t dst_len, const char *host_prefix, const char *namespace_str) { - if(!host_prefix) - host_prefix = ""; - - if(namespace_str) - snprintfz(dst, dst_len, "%s/run/systemd/journal.%s/socket", - host_prefix, namespace_str); - else - snprintfz(dst, dst_len, "%s" JOURNAL_DIRECT_SOCKET, - host_prefix); -} diff --git a/src/libnetdata/log/journal.h b/src/libnetdata/log/journal.h deleted file mode 100644 index df8ece18b..000000000 --- a/src/libnetdata/log/journal.h +++ /dev/null @@ -1,18 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "../libnetdata.h" - -#ifndef NETDATA_LOG_JOURNAL_H -#define NETDATA_LOG_JOURNAL_H - -#define JOURNAL_DIRECT_SOCKET "/run/systemd/journal/socket" - -void journal_construct_path(char *dst, size_t dst_len, const char *host_prefix, const char *namespace_str); - -int journal_direct_fd(const char *path); -bool journal_direct_send(int fd, const char *msg, size_t msg_len); - -bool is_path_unix_socket(const char *path); -bool is_stderr_connected_to_journal(void); - -#endif //NETDATA_LOG_JOURNAL_H diff --git a/src/libnetdata/log/log.c b/src/libnetdata/log/log.c deleted file mode 100644 index a31127c42..000000000 --- a/src/libnetdata/log/log.c +++ /dev/null @@ -1,2545 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -// do not REMOVE this, it is used by systemd-journal includes to prevent saving the file, function, line of the -// source code that makes the calls, allowing our loggers to log the lines of source code that actually log -#define SD_JOURNAL_SUPPRESS_LOCATION - -#include "../libnetdata.h" - -#if defined(OS_WINDOWS) -#include -#endif - -#ifdef __FreeBSD__ -#include -#endif - -#ifdef __APPLE__ -#include -#endif - -#if !defined(ENABLE_SENTRY) && defined(HAVE_BACKTRACE) -#include -#endif - -#ifdef HAVE_SYSTEMD -#include -#endif - -const char *program_name = ""; - -uint64_t debug_flags = 0; - -#ifdef ENABLE_ACLK -int aclklog_enabled = 0; -#endif - -// ---------------------------------------------------------------------------- - -struct nd_log_source; -static bool nd_log_limit_reached(struct nd_log_source *source); - -// ---------------------------------------------------------------------------- - -void errno_clear(void) { - errno = 0; - -#if defined(OS_WINDOWS) - SetLastError(ERROR_SUCCESS); -#endif -} - -// ---------------------------------------------------------------------------- -// logging method - -typedef enum __attribute__((__packed__)) { - NDLM_DISABLED = 0, - NDLM_DEVNULL, - NDLM_DEFAULT, - NDLM_JOURNAL, - NDLM_SYSLOG, - NDLM_STDOUT, - NDLM_STDERR, - NDLM_FILE, -} ND_LOG_METHOD; - -static struct { - ND_LOG_METHOD method; - const char *name; -} nd_log_methods[] = { - { .method = NDLM_DISABLED, .name = "none" }, - { .method = NDLM_DEVNULL, .name = "/dev/null" }, - { .method = NDLM_DEFAULT, .name = "default" }, - { .method = NDLM_JOURNAL, .name = "journal" }, - { .method = NDLM_SYSLOG, .name = "syslog" }, - { .method = NDLM_STDOUT, .name = "stdout" }, - { .method = NDLM_STDERR, .name = "stderr" }, - { .method = NDLM_FILE, .name = "file" }, -}; - -static ND_LOG_METHOD nd_log_method2id(const char *method) { - if(!method || !*method) - return NDLM_DEFAULT; - - size_t entries = sizeof(nd_log_methods) / sizeof(nd_log_methods[0]); - for(size_t i = 0; i < entries ;i++) { - if(strcmp(nd_log_methods[i].name, method) == 0) - return nd_log_methods[i].method; - } - - return NDLM_FILE; -} - -static const char *nd_log_id2method(ND_LOG_METHOD method) { - size_t entries = sizeof(nd_log_methods) / sizeof(nd_log_methods[0]); - for(size_t i = 0; i < entries ;i++) { - if(method == nd_log_methods[i].method) - return nd_log_methods[i].name; - } - - return "unknown"; -} - -#define IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(ndlo) ((ndlo) == NDLM_JOURNAL || (ndlo) == NDLM_SYSLOG || (ndlo) == NDLM_STDERR) - -const char *nd_log_method_for_external_plugins(const char *s) { - if(s && *s) { - ND_LOG_METHOD method = nd_log_method2id(s); - if(IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(method)) - return nd_log_id2method(method); - } - - return nd_log_id2method(NDLM_STDERR); -} - -// ---------------------------------------------------------------------------- -// workaround strerror_r() - -#if defined(STRERROR_R_CHAR_P) -// GLIBC version of strerror_r -static const char *strerror_result(const char *a, const char *b) { (void)b; return a; } -#elif defined(HAVE_STRERROR_R) -// POSIX version of strerror_r -static const char *strerror_result(int a, const char *b) { (void)a; return b; } -#elif defined(HAVE_C__GENERIC) - -// what a trick! -// http://stackoverflow.com/questions/479207/function-overloading-in-c -static const char *strerror_result_int(int a, const char *b) { (void)a; return b; } -static const char *strerror_result_string(const char *a, const char *b) { (void)b; return a; } - -#define strerror_result(a, b) _Generic((a), \ - int: strerror_result_int, \ - char *: strerror_result_string \ - )(a, b) - -#else -#error "cannot detect the format of function strerror_r()" -#endif - -static const char *errno2str(int errnum, char *buf, size_t size) { - return strerror_result(strerror_r(errnum, buf, size), buf); -} - -// ---------------------------------------------------------------------------- -// facilities -// -// sys/syslog.h (Linux) -// sys/sys/syslog.h (FreeBSD) -// bsd/sys/syslog.h (darwin-xnu) - -static struct { - int facility; - const char *name; -} nd_log_facilities[] = { - { LOG_AUTH, "auth" }, - { LOG_AUTHPRIV, "authpriv" }, - { LOG_CRON, "cron" }, - { LOG_DAEMON, "daemon" }, - { LOG_FTP, "ftp" }, - { LOG_KERN, "kern" }, - { LOG_LPR, "lpr" }, - { LOG_MAIL, "mail" }, - { LOG_NEWS, "news" }, - { LOG_SYSLOG, "syslog" }, - { LOG_USER, "user" }, - { LOG_UUCP, "uucp" }, - { LOG_LOCAL0, "local0" }, - { LOG_LOCAL1, "local1" }, - { LOG_LOCAL2, "local2" }, - { LOG_LOCAL3, "local3" }, - { LOG_LOCAL4, "local4" }, - { LOG_LOCAL5, "local5" }, - { LOG_LOCAL6, "local6" }, - { LOG_LOCAL7, "local7" }, - -#ifdef __FreeBSD__ - { LOG_CONSOLE, "console" }, - { LOG_NTP, "ntp" }, - - // FreeBSD does not consider 'security' as deprecated. - { LOG_SECURITY, "security" }, -#else - // For all other O/S 'security' is mapped to 'auth'. - { LOG_AUTH, "security" }, -#endif - -#ifdef __APPLE__ - { LOG_INSTALL, "install" }, - { LOG_NETINFO, "netinfo" }, - { LOG_RAS, "ras" }, - { LOG_REMOTEAUTH, "remoteauth" }, - { LOG_LAUNCHD, "launchd" }, - -#endif -}; - -static int nd_log_facility2id(const char *facility) { - size_t entries = sizeof(nd_log_facilities) / sizeof(nd_log_facilities[0]); - for(size_t i = 0; i < entries ;i++) { - if(strcmp(nd_log_facilities[i].name, facility) == 0) - return nd_log_facilities[i].facility; - } - - return LOG_DAEMON; -} - -static const char *nd_log_id2facility(int facility) { - size_t entries = sizeof(nd_log_facilities) / sizeof(nd_log_facilities[0]); - for(size_t i = 0; i < entries ;i++) { - if(nd_log_facilities[i].facility == facility) - return nd_log_facilities[i].name; - } - - return "daemon"; -} - -// ---------------------------------------------------------------------------- -// priorities - -static struct { - ND_LOG_FIELD_PRIORITY priority; - const char *name; -} nd_log_priorities[] = { - { .priority = NDLP_EMERG, .name = "emergency" }, - { .priority = NDLP_EMERG, .name = "emerg" }, - { .priority = NDLP_ALERT, .name = "alert" }, - { .priority = NDLP_CRIT, .name = "critical" }, - { .priority = NDLP_CRIT, .name = "crit" }, - { .priority = NDLP_ERR, .name = "error" }, - { .priority = NDLP_ERR, .name = "err" }, - { .priority = NDLP_WARNING, .name = "warning" }, - { .priority = NDLP_WARNING, .name = "warn" }, - { .priority = NDLP_NOTICE, .name = "notice" }, - { .priority = NDLP_INFO, .name = NDLP_INFO_STR }, - { .priority = NDLP_DEBUG, .name = "debug" }, -}; - -int nd_log_priority2id(const char *priority) { - size_t entries = sizeof(nd_log_priorities) / sizeof(nd_log_priorities[0]); - for(size_t i = 0; i < entries ;i++) { - if(strcmp(nd_log_priorities[i].name, priority) == 0) - return nd_log_priorities[i].priority; - } - - return NDLP_INFO; -} - -const char *nd_log_id2priority(ND_LOG_FIELD_PRIORITY priority) { - size_t entries = sizeof(nd_log_priorities) / sizeof(nd_log_priorities[0]); - for(size_t i = 0; i < entries ;i++) { - if(priority == nd_log_priorities[i].priority) - return nd_log_priorities[i].name; - } - - return NDLP_INFO_STR; -} - -// ---------------------------------------------------------------------------- -// log sources - -const char *nd_log_sources[] = { - [NDLS_UNSET] = "UNSET", - [NDLS_ACCESS] = "access", - [NDLS_ACLK] = "aclk", - [NDLS_COLLECTORS] = "collector", - [NDLS_DAEMON] = "daemon", - [NDLS_HEALTH] = "health", - [NDLS_DEBUG] = "debug", -}; - -size_t nd_log_source2id(const char *source, ND_LOG_SOURCES def) { - size_t entries = sizeof(nd_log_sources) / sizeof(nd_log_sources[0]); - for(size_t i = 0; i < entries ;i++) { - if(strcmp(nd_log_sources[i], source) == 0) - return i; - } - - return def; -} - - -static const char *nd_log_id2source(ND_LOG_SOURCES source) { - size_t entries = sizeof(nd_log_sources) / sizeof(nd_log_sources[0]); - if(source < entries) - return nd_log_sources[source]; - - return nd_log_sources[NDLS_COLLECTORS]; -} - -// ---------------------------------------------------------------------------- -// log output formats - -typedef enum __attribute__((__packed__)) { - NDLF_JOURNAL, - NDLF_LOGFMT, - NDLF_JSON, -} ND_LOG_FORMAT; - -static struct { - ND_LOG_FORMAT format; - const char *name; -} nd_log_formats[] = { - { .format = NDLF_JOURNAL, .name = "journal" }, - { .format = NDLF_LOGFMT, .name = "logfmt" }, - { .format = NDLF_JSON, .name = "json" }, -}; - -static ND_LOG_FORMAT nd_log_format2id(const char *format) { - if(!format || !*format) - return NDLF_LOGFMT; - - size_t entries = sizeof(nd_log_formats) / sizeof(nd_log_formats[0]); - for(size_t i = 0; i < entries ;i++) { - if(strcmp(nd_log_formats[i].name, format) == 0) - return nd_log_formats[i].format; - } - - return NDLF_LOGFMT; -} - -static const char *nd_log_id2format(ND_LOG_FORMAT format) { - size_t entries = sizeof(nd_log_formats) / sizeof(nd_log_formats[0]); - for(size_t i = 0; i < entries ;i++) { - if(format == nd_log_formats[i].format) - return nd_log_formats[i].name; - } - - return "logfmt"; -} - -// ---------------------------------------------------------------------------- -// format dates - -void log_date(char *buffer, size_t len, time_t now) { - if(unlikely(!buffer || !len)) - return; - - time_t t = now; - struct tm *tmp, tmbuf; - - tmp = localtime_r(&t, &tmbuf); - - if (unlikely(!tmp)) { - buffer[0] = '\0'; - return; - } - - if (unlikely(strftime(buffer, len, "%Y-%m-%d %H:%M:%S", tmp) == 0)) - buffer[0] = '\0'; - - buffer[len - 1] = '\0'; -} - -// ---------------------------------------------------------------------------- - -struct nd_log_limit { - usec_t started_monotonic_ut; - uint32_t counter; - uint32_t prevented; - - uint32_t throttle_period; - uint32_t logs_per_period; - uint32_t logs_per_period_backup; -}; - -#define ND_LOG_LIMITS_DEFAULT (struct nd_log_limit){ .logs_per_period = ND_LOG_DEFAULT_THROTTLE_LOGS, .logs_per_period_backup = ND_LOG_DEFAULT_THROTTLE_LOGS, .throttle_period = ND_LOG_DEFAULT_THROTTLE_PERIOD, } -#define ND_LOG_LIMITS_UNLIMITED (struct nd_log_limit){ .logs_per_period = 0, .logs_per_period_backup = 0, .throttle_period = 0, } - -struct nd_log_source { - SPINLOCK spinlock; - ND_LOG_METHOD method; - ND_LOG_FORMAT format; - const char *filename; - int fd; - FILE *fp; - - ND_LOG_FIELD_PRIORITY min_priority; - const char *pending_msg; - struct nd_log_limit limits; -}; - -static struct { - nd_uuid_t invocation_id; - - ND_LOG_SOURCES overwrite_process_source; - - struct nd_log_source sources[_NDLS_MAX]; - - struct { - bool initialized; - } journal; - - struct { - bool initialized; - int fd; - char filename[FILENAME_MAX + 1]; - } journal_direct; - - struct { - bool initialized; - int facility; - } syslog; - - struct { - SPINLOCK spinlock; - bool initialized; - } std_output; - - struct { - SPINLOCK spinlock; - bool initialized; - } std_error; - -} nd_log = { - .overwrite_process_source = 0, - .journal = { - .initialized = false, - }, - .journal_direct = { - .initialized = false, - .fd = -1, - }, - .syslog = { - .initialized = false, - .facility = LOG_DAEMON, - }, - .std_output = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .initialized = false, - }, - .std_error = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .initialized = false, - }, - .sources = { - [NDLS_UNSET] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_DISABLED, - .format = NDLF_JOURNAL, - .filename = NULL, - .fd = -1, - .fp = NULL, - .min_priority = NDLP_EMERG, - .limits = ND_LOG_LIMITS_UNLIMITED, - }, - [NDLS_ACCESS] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_DEFAULT, - .format = NDLF_LOGFMT, - .filename = LOG_DIR "/access.log", - .fd = -1, - .fp = NULL, - .min_priority = NDLP_DEBUG, - .limits = ND_LOG_LIMITS_UNLIMITED, - }, - [NDLS_ACLK] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_FILE, - .format = NDLF_LOGFMT, - .filename = LOG_DIR "/aclk.log", - .fd = -1, - .fp = NULL, - .min_priority = NDLP_DEBUG, - .limits = ND_LOG_LIMITS_UNLIMITED, - }, - [NDLS_COLLECTORS] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_DEFAULT, - .format = NDLF_LOGFMT, - .filename = LOG_DIR "/collectors.log", - .fd = STDERR_FILENO, - .fp = NULL, - .min_priority = NDLP_INFO, - .limits = ND_LOG_LIMITS_DEFAULT, - }, - [NDLS_DEBUG] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_DISABLED, - .format = NDLF_LOGFMT, - .filename = LOG_DIR "/debug.log", - .fd = STDOUT_FILENO, - .fp = NULL, - .min_priority = NDLP_DEBUG, - .limits = ND_LOG_LIMITS_UNLIMITED, - }, - [NDLS_DAEMON] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_DEFAULT, - .filename = LOG_DIR "/daemon.log", - .format = NDLF_LOGFMT, - .fd = -1, - .fp = NULL, - .min_priority = NDLP_INFO, - .limits = ND_LOG_LIMITS_DEFAULT, - }, - [NDLS_HEALTH] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_DEFAULT, - .format = NDLF_LOGFMT, - .filename = LOG_DIR "/health.log", - .fd = -1, - .fp = NULL, - .min_priority = NDLP_DEBUG, - .limits = ND_LOG_LIMITS_UNLIMITED, - }, - }, -}; - -__attribute__((constructor)) void initialize_invocation_id(void) { - // check for a NETDATA_INVOCATION_ID - if(uuid_parse_flexi(getenv("NETDATA_INVOCATION_ID"), nd_log.invocation_id) != 0) { - // not found, check for systemd set INVOCATION_ID - if(uuid_parse_flexi(getenv("INVOCATION_ID"), nd_log.invocation_id) != 0) { - // not found, generate a new one - uuid_generate_random(nd_log.invocation_id); - } - } - - char uuid[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(nd_log.invocation_id, uuid); - setenv("NETDATA_INVOCATION_ID", uuid, 1); -} - -int nd_log_health_fd(void) { - if(nd_log.sources[NDLS_HEALTH].method == NDLM_FILE && nd_log.sources[NDLS_HEALTH].fd != -1) - return nd_log.sources[NDLS_HEALTH].fd; - - return STDERR_FILENO; -} - -int nd_log_collectors_fd(void) { - if(nd_log.sources[NDLS_COLLECTORS].method == NDLM_FILE && nd_log.sources[NDLS_COLLECTORS].fd != -1) - return nd_log.sources[NDLS_COLLECTORS].fd; - - return STDERR_FILENO; -} - -void nd_log_set_user_settings(ND_LOG_SOURCES source, const char *setting) { - char buf[FILENAME_MAX + 100]; - if(setting && *setting) - strncpyz(buf, setting, sizeof(buf) - 1); - else - buf[0] = '\0'; - - struct nd_log_source *ls = &nd_log.sources[source]; - char *output = strrchr(buf, '@'); - - if(!output) - // all of it is the output - output = buf; - else { - // we found an '@', the next char is the output - *output = '\0'; - output++; - - // parse the other params - char *remaining = buf; - while(remaining) { - char *value = strsep_skip_consecutive_separators(&remaining, ","); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) continue; - - if(strcmp(name, "logfmt") == 0) - ls->format = NDLF_LOGFMT; - else if(strcmp(name, "json") == 0) - ls->format = NDLF_JSON; - else if(strcmp(name, "journal") == 0) - ls->format = NDLF_JOURNAL; - else if(strcmp(name, "level") == 0 && value && *value) - ls->min_priority = nd_log_priority2id(value); - else if(strcmp(name, "protection") == 0 && value && *value) { - if(strcmp(value, "off") == 0 || strcmp(value, "none") == 0) { - ls->limits = ND_LOG_LIMITS_UNLIMITED; - ls->limits.counter = 0; - ls->limits.prevented = 0; - } - else { - ls->limits = ND_LOG_LIMITS_DEFAULT; - - char *slash = strchr(value, '/'); - if(slash) { - *slash = '\0'; - slash++; - ls->limits.logs_per_period = ls->limits.logs_per_period_backup = str2u(value); - ls->limits.throttle_period = str2u(slash); - } - else { - ls->limits.logs_per_period = ls->limits.logs_per_period_backup = str2u(value); - ls->limits.throttle_period = ND_LOG_DEFAULT_THROTTLE_PERIOD; - } - } - } - else - nd_log(NDLS_DAEMON, NDLP_ERR, "Error while parsing configuration of log source '%s'. " - "In config '%s', '%s' is not understood.", - nd_log_id2source(source), setting, name); - } - } - - if(!output || !*output || strcmp(output, "none") == 0 || strcmp(output, "off") == 0) { - ls->method = NDLM_DISABLED; - ls->filename = "/dev/null"; - } - else if(strcmp(output, "journal") == 0) { - ls->method = NDLM_JOURNAL; - ls->filename = NULL; - } - else if(strcmp(output, "syslog") == 0) { - ls->method = NDLM_SYSLOG; - ls->filename = NULL; - } - else if(strcmp(output, "/dev/null") == 0) { - ls->method = NDLM_DEVNULL; - ls->filename = "/dev/null"; - } - else if(strcmp(output, "system") == 0) { - if(ls->fd == STDERR_FILENO) { - ls->method = NDLM_STDERR; - ls->filename = NULL; - ls->fd = STDERR_FILENO; - } - else { - ls->method = NDLM_STDOUT; - ls->filename = NULL; - ls->fd = STDOUT_FILENO; - } - } - else if(strcmp(output, "stderr") == 0) { - ls->method = NDLM_STDERR; - ls->filename = NULL; - ls->fd = STDERR_FILENO; - } - else if(strcmp(output, "stdout") == 0) { - ls->method = NDLM_STDOUT; - ls->filename = NULL; - ls->fd = STDOUT_FILENO; - } - else { - ls->method = NDLM_FILE; - ls->filename = strdupz(output); - } - -#if defined(NETDATA_INTERNAL_CHECKS) || defined(NETDATA_DEV_MODE) - ls->min_priority = NDLP_DEBUG; -#endif - - if(source == NDLS_COLLECTORS) { - // set the method for the collector processes we will spawn - - ND_LOG_METHOD method; - ND_LOG_FORMAT format = ls->format; - ND_LOG_FIELD_PRIORITY priority = ls->min_priority; - - if(ls->method == NDLM_SYSLOG || ls->method == NDLM_JOURNAL) - method = ls->method; - else - method = NDLM_STDERR; - - setenv("NETDATA_LOG_METHOD", nd_log_id2method(method), 1); - setenv("NETDATA_LOG_FORMAT", nd_log_id2format(format), 1); - setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1); - } -} - -void nd_log_set_priority_level(const char *setting) { - if(!setting || !*setting) - setting = "info"; - - ND_LOG_FIELD_PRIORITY priority = nd_log_priority2id(setting); - -#if defined(NETDATA_INTERNAL_CHECKS) || defined(NETDATA_DEV_MODE) - priority = NDLP_DEBUG; -#endif - - for (size_t i = 0; i < _NDLS_MAX; i++) { - if (i != NDLS_DEBUG) - nd_log.sources[i].min_priority = priority; - } - - // the right one - setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1); -} - -void nd_log_set_facility(const char *facility) { - if(!facility || !*facility) - facility = "daemon"; - - nd_log.syslog.facility = nd_log_facility2id(facility); - setenv("NETDATA_SYSLOG_FACILITY", nd_log_id2facility(nd_log.syslog.facility), 1); -} - -void nd_log_set_flood_protection(size_t logs, time_t period) { - nd_log.sources[NDLS_DAEMON].limits.logs_per_period = - nd_log.sources[NDLS_DAEMON].limits.logs_per_period_backup; - nd_log.sources[NDLS_COLLECTORS].limits.logs_per_period = - nd_log.sources[NDLS_COLLECTORS].limits.logs_per_period_backup = logs; - - nd_log.sources[NDLS_DAEMON].limits.throttle_period = - nd_log.sources[NDLS_COLLECTORS].limits.throttle_period = period; - - char buf[100]; - snprintfz(buf, sizeof(buf), "%" PRIu64, (uint64_t )period); - setenv("NETDATA_ERRORS_THROTTLE_PERIOD", buf, 1); - snprintfz(buf, sizeof(buf), "%" PRIu64, (uint64_t )logs); - setenv("NETDATA_ERRORS_PER_PERIOD", buf, 1); -} - -static bool nd_log_journal_systemd_init(void) { -#ifdef HAVE_SYSTEMD - nd_log.journal.initialized = true; -#else - nd_log.journal.initialized = false; -#endif - - return nd_log.journal.initialized; -} - -static void nd_log_journal_direct_set_env(void) { - if(nd_log.sources[NDLS_COLLECTORS].method == NDLM_JOURNAL) - setenv("NETDATA_SYSTEMD_JOURNAL_PATH", nd_log.journal_direct.filename, 1); -} - -static bool nd_log_journal_direct_init(const char *path) { - if(nd_log.journal_direct.initialized) { - nd_log_journal_direct_set_env(); - return true; - } - - int fd; - char filename[FILENAME_MAX + 1]; - if(!is_path_unix_socket(path)) { - - journal_construct_path(filename, sizeof(filename), netdata_configured_host_prefix, "netdata"); - if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) { - - journal_construct_path(filename, sizeof(filename), netdata_configured_host_prefix, NULL); - if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) { - - journal_construct_path(filename, sizeof(filename), NULL, "netdata"); - if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) { - - journal_construct_path(filename, sizeof(filename), NULL, NULL); - if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) - return false; - } - } - } - } - else { - snprintfz(filename, sizeof(filename), "%s", path); - fd = journal_direct_fd(filename); - } - - if(fd < 0) - return false; - - nd_log.journal_direct.fd = fd; - nd_log.journal_direct.initialized = true; - - strncpyz(nd_log.journal_direct.filename, filename, sizeof(nd_log.journal_direct.filename) - 1); - nd_log_journal_direct_set_env(); - - return true; -} - -static void nd_log_syslog_init() { - if(nd_log.syslog.initialized) - return; - - openlog(program_name, LOG_PID, nd_log.syslog.facility); - nd_log.syslog.initialized = true; -} - -void nd_log_initialize_for_external_plugins(const char *name) { - // if we don't run under Netdata, log to stderr, - // otherwise, use the logging method Netdata wants us to use. - setenv("NETDATA_LOG_METHOD", "stderr", 0); - setenv("NETDATA_LOG_FORMAT", "logfmt", 0); - - nd_log.overwrite_process_source = NDLS_COLLECTORS; - program_name = name; - - for(size_t i = 0; i < _NDLS_MAX ;i++) { - nd_log.sources[i].method = STDERR_FILENO; - nd_log.sources[i].fd = -1; - nd_log.sources[i].fp = NULL; - } - - nd_log_set_priority_level(getenv("NETDATA_LOG_LEVEL")); - nd_log_set_facility(getenv("NETDATA_SYSLOG_FACILITY")); - - time_t period = 1200; - size_t logs = 200; - const char *s = getenv("NETDATA_ERRORS_THROTTLE_PERIOD"); - if(s && *s >= '0' && *s <= '9') { - period = str2l(s); - if(period < 0) period = 0; - } - - s = getenv("NETDATA_ERRORS_PER_PERIOD"); - if(s && *s >= '0' && *s <= '9') - logs = str2u(s); - - nd_log_set_flood_protection(logs, period); - - if(!netdata_configured_host_prefix) { - s = getenv("NETDATA_HOST_PREFIX"); - if(s && *s) - netdata_configured_host_prefix = (char *)s; - } - - ND_LOG_METHOD method = nd_log_method2id(getenv("NETDATA_LOG_METHOD")); - ND_LOG_FORMAT format = nd_log_format2id(getenv("NETDATA_LOG_FORMAT")); - - if(!IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(method)) { - if(is_stderr_connected_to_journal()) { - nd_log(NDLS_COLLECTORS, NDLP_WARNING, "NETDATA_LOG_METHOD is not set. Using journal."); - method = NDLM_JOURNAL; - } - else { - nd_log(NDLS_COLLECTORS, NDLP_WARNING, "NETDATA_LOG_METHOD is not set. Using stderr."); - method = NDLM_STDERR; - } - } - - switch(method) { - case NDLM_JOURNAL: - if(!nd_log_journal_direct_init(getenv("NETDATA_SYSTEMD_JOURNAL_PATH")) || - !nd_log_journal_direct_init(NULL) || !nd_log_journal_systemd_init()) { - nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to initialize journal. Using stderr."); - method = NDLM_STDERR; - } - break; - - case NDLM_SYSLOG: - nd_log_syslog_init(); - break; - - default: - method = NDLM_STDERR; - break; - } - - for(size_t i = 0; i < _NDLS_MAX ;i++) { - nd_log.sources[i].method = method; - nd_log.sources[i].format = format; - nd_log.sources[i].fd = -1; - nd_log.sources[i].fp = NULL; - } - -// nd_log(NDLS_COLLECTORS, NDLP_NOTICE, "FINAL_LOG_METHOD: %s", nd_log_id2method(method)); -} - -static bool nd_log_replace_existing_fd(struct nd_log_source *e, int new_fd) { - if(new_fd == -1 || e->fd == -1 || - (e->fd == STDOUT_FILENO && nd_log.std_output.initialized) || - (e->fd == STDERR_FILENO && nd_log.std_error.initialized)) - return false; - - if(new_fd != e->fd) { - int t = dup2(new_fd, e->fd); - - bool ret = true; - if (t == -1) { - netdata_log_error("Cannot dup2() new fd %d to old fd %d for '%s'", new_fd, e->fd, e->filename); - ret = false; - } - else - close(new_fd); - - if(e->fd == STDOUT_FILENO) - nd_log.std_output.initialized = true; - else if(e->fd == STDERR_FILENO) - nd_log.std_error.initialized = true; - - return ret; - } - - return false; -} - -static void nd_log_open(struct nd_log_source *e, ND_LOG_SOURCES source) { - if(e->method == NDLM_DEFAULT) - nd_log_set_user_settings(source, e->filename); - - if((e->method == NDLM_FILE && !e->filename) || - (e->method == NDLM_DEVNULL && e->fd == -1)) - e->method = NDLM_DISABLED; - - if(e->fp) - fflush(e->fp); - - switch(e->method) { - case NDLM_SYSLOG: - nd_log_syslog_init(); - break; - - case NDLM_JOURNAL: - nd_log_journal_direct_init(NULL); - nd_log_journal_systemd_init(); - break; - - case NDLM_STDOUT: - e->fp = stdout; - e->fd = STDOUT_FILENO; - break; - - case NDLM_DISABLED: - break; - - case NDLM_DEFAULT: - case NDLM_STDERR: - e->method = NDLM_STDERR; - e->fp = stderr; - e->fd = STDERR_FILENO; - break; - - case NDLM_DEVNULL: - case NDLM_FILE: { - int fd = open(e->filename, O_WRONLY | O_APPEND | O_CREAT, 0664); - if(fd == -1) { - if(e->fd != STDOUT_FILENO && e->fd != STDERR_FILENO) { - e->fd = STDERR_FILENO; - e->method = NDLM_STDERR; - netdata_log_error("Cannot open log file '%s'. Falling back to stderr.", e->filename); - } - else - netdata_log_error("Cannot open log file '%s'. Leaving fd %d as-is.", e->filename, e->fd); - } - else { - if (!nd_log_replace_existing_fd(e, fd)) { - if(e->fd == STDOUT_FILENO || e->fd == STDERR_FILENO) { - if(e->fd == STDOUT_FILENO) - e->method = NDLM_STDOUT; - else if(e->fd == STDERR_FILENO) - e->method = NDLM_STDERR; - - // we have dup2() fd, so we can close the one we opened - if(fd != STDOUT_FILENO && fd != STDERR_FILENO) - close(fd); - } - else - e->fd = fd; - } - } - - // at this point we have e->fd set properly - - if(e->fd == STDOUT_FILENO) - e->fp = stdout; - else if(e->fd == STDERR_FILENO) - e->fp = stderr; - - if(!e->fp) { - e->fp = fdopen(e->fd, "a"); - if (!e->fp) { - netdata_log_error("Cannot fdopen() fd %d ('%s')", e->fd, e->filename); - - if(e->fd != STDOUT_FILENO && e->fd != STDERR_FILENO) - close(e->fd); - - e->fp = stderr; - e->fd = STDERR_FILENO; - } - } - else { - if (setvbuf(e->fp, NULL, _IOLBF, 0) != 0) - netdata_log_error("Cannot set line buffering on fd %d ('%s')", e->fd, e->filename); - } - } - break; - } -} - -static void nd_log_stdin_init(int fd, const char *filename) { - int f = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0664); - if(f == -1) - return; - - if(f != fd) { - dup2(f, fd); - close(f); - } -} - -void nd_log_initialize(void) { - nd_log_stdin_init(STDIN_FILENO, "/dev/null"); - - for(size_t i = 0 ; i < _NDLS_MAX ; i++) - nd_log_open(&nd_log.sources[i], i); -} - -void nd_log_reopen_log_files(bool log) { - if(log) - netdata_log_info("Reopening all log files."); - - nd_log.std_output.initialized = false; - nd_log.std_error.initialized = false; - nd_log_initialize(); - - if(log) - netdata_log_info("Log files re-opened."); -} - -void nd_log_reopen_log_files_for_spawn_server(void) { - if(nd_log.syslog.initialized) { - closelog(); - nd_log.syslog.initialized = false; - nd_log_syslog_init(); - } - - if(nd_log.journal_direct.initialized) { - close(nd_log.journal_direct.fd); - nd_log.journal_direct.fd = -1; - nd_log.journal_direct.initialized = false; - nd_log_journal_direct_init(NULL); - } - - nd_log.sources[NDLS_UNSET].method = NDLM_DISABLED; - nd_log.sources[NDLS_ACCESS].method = NDLM_DISABLED; - nd_log.sources[NDLS_ACLK].method = NDLM_DISABLED; - nd_log.sources[NDLS_DEBUG].method = NDLM_DISABLED; - nd_log.sources[NDLS_HEALTH].method = NDLM_DISABLED; - nd_log_reopen_log_files(false); -} - -void chown_open_file(int fd, uid_t uid, gid_t gid) { - if(fd == -1) return; - - struct stat buf; - - if(fstat(fd, &buf) == -1) { - netdata_log_error("Cannot fstat() fd %d", fd); - return; - } - - if((buf.st_uid != uid || buf.st_gid != gid) && S_ISREG(buf.st_mode)) { - if(fchown(fd, uid, gid) == -1) - netdata_log_error("Cannot fchown() fd %d.", fd); - } -} - -void nd_log_chown_log_files(uid_t uid, gid_t gid) { - for(size_t i = 0 ; i < _NDLS_MAX ; i++) { - if(nd_log.sources[i].fd != -1 && nd_log.sources[i].fd != STDIN_FILENO) - chown_open_file(nd_log.sources[i].fd, uid, gid); - } -} - -// ---------------------------------------------------------------------------- -// annotators -struct log_field; -static void errno_annotator(BUFFER *wb, const char *key, struct log_field *lf); -static void priority_annotator(BUFFER *wb, const char *key, struct log_field *lf); -static void timestamp_usec_annotator(BUFFER *wb, const char *key, struct log_field *lf); - -#if defined(OS_WINDOWS) -static void winerror_annotator(BUFFER *wb, const char *key, struct log_field *lf); -#endif - -// ---------------------------------------------------------------------------- - -typedef void (*annotator_t)(BUFFER *wb, const char *key, struct log_field *lf); - -struct log_field { - const char *journal; - const char *logfmt; - annotator_t logfmt_annotator; - struct log_stack_entry entry; -}; - -#define THREAD_LOG_STACK_MAX 50 - -static __thread struct log_stack_entry *thread_log_stack_base[THREAD_LOG_STACK_MAX]; -static __thread size_t thread_log_stack_next = 0; - -static __thread struct log_field thread_log_fields[_NDF_MAX] = { - // THE ORDER DEFINES THE ORDER FIELDS WILL APPEAR IN logfmt - - [NDF_STOP] = { // processing will not stop on this - so it is ok to be first - .journal = NULL, - .logfmt = NULL, - .logfmt_annotator = NULL, - }, - [NDF_TIMESTAMP_REALTIME_USEC] = { - .journal = NULL, - .logfmt = "time", - .logfmt_annotator = timestamp_usec_annotator, - }, - [NDF_SYSLOG_IDENTIFIER] = { - .journal = "SYSLOG_IDENTIFIER", // standard journald field - .logfmt = "comm", - }, - [NDF_LOG_SOURCE] = { - .journal = "ND_LOG_SOURCE", - .logfmt = "source", - }, - [NDF_PRIORITY] = { - .journal = "PRIORITY", // standard journald field - .logfmt = "level", - .logfmt_annotator = priority_annotator, - }, - [NDF_ERRNO] = { - .journal = "ERRNO", // standard journald field - .logfmt = "errno", - .logfmt_annotator = errno_annotator, - }, -#if defined(OS_WINDOWS) - [NDF_WINERROR] = { - .journal = "WINERROR", - .logfmt = "winerror", - .logfmt_annotator = winerror_annotator, - }, -#endif - [NDF_INVOCATION_ID] = { - .journal = "INVOCATION_ID", // standard journald field - .logfmt = NULL, - }, - [NDF_LINE] = { - .journal = "CODE_LINE", // standard journald field - .logfmt = NULL, - }, - [NDF_FILE] = { - .journal = "CODE_FILE", // standard journald field - .logfmt = NULL, - }, - [NDF_FUNC] = { - .journal = "CODE_FUNC", // standard journald field - .logfmt = NULL, - }, - [NDF_TID] = { - .journal = "TID", // standard journald field - .logfmt = "tid", - }, - [NDF_THREAD_TAG] = { - .journal = "THREAD_TAG", - .logfmt = "thread", - }, - [NDF_MESSAGE_ID] = { - .journal = "MESSAGE_ID", - .logfmt = "msg_id", - }, - [NDF_MODULE] = { - .journal = "ND_MODULE", - .logfmt = "module", - }, - [NDF_NIDL_NODE] = { - .journal = "ND_NIDL_NODE", - .logfmt = "node", - }, - [NDF_NIDL_INSTANCE] = { - .journal = "ND_NIDL_INSTANCE", - .logfmt = "instance", - }, - [NDF_NIDL_CONTEXT] = { - .journal = "ND_NIDL_CONTEXT", - .logfmt = "context", - }, - [NDF_NIDL_DIMENSION] = { - .journal = "ND_NIDL_DIMENSION", - .logfmt = "dimension", - }, - [NDF_SRC_TRANSPORT] = { - .journal = "ND_SRC_TRANSPORT", - .logfmt = "src_transport", - }, - [NDF_ACCOUNT_ID] = { - .journal = "ND_ACCOUNT_ID", - .logfmt = "account", - }, - [NDF_USER_NAME] = { - .journal = "ND_USER_NAME", - .logfmt = "user", - }, - [NDF_USER_ROLE] = { - .journal = "ND_USER_ROLE", - .logfmt = "role", - }, - [NDF_USER_ACCESS] = { - .journal = "ND_USER_PERMISSIONS", - .logfmt = "permissions", - }, - [NDF_SRC_IP] = { - .journal = "ND_SRC_IP", - .logfmt = "src_ip", - }, - [NDF_SRC_FORWARDED_HOST] = { - .journal = "ND_SRC_FORWARDED_HOST", - .logfmt = "src_forwarded_host", - }, - [NDF_SRC_FORWARDED_FOR] = { - .journal = "ND_SRC_FORWARDED_FOR", - .logfmt = "src_forwarded_for", - }, - [NDF_SRC_PORT] = { - .journal = "ND_SRC_PORT", - .logfmt = "src_port", - }, - [NDF_SRC_CAPABILITIES] = { - .journal = "ND_SRC_CAPABILITIES", - .logfmt = "src_capabilities", - }, - [NDF_DST_TRANSPORT] = { - .journal = "ND_DST_TRANSPORT", - .logfmt = "dst_transport", - }, - [NDF_DST_IP] = { - .journal = "ND_DST_IP", - .logfmt = "dst_ip", - }, - [NDF_DST_PORT] = { - .journal = "ND_DST_PORT", - .logfmt = "dst_port", - }, - [NDF_DST_CAPABILITIES] = { - .journal = "ND_DST_CAPABILITIES", - .logfmt = "dst_capabilities", - }, - [NDF_REQUEST_METHOD] = { - .journal = "ND_REQUEST_METHOD", - .logfmt = "req_method", - }, - [NDF_RESPONSE_CODE] = { - .journal = "ND_RESPONSE_CODE", - .logfmt = "code", - }, - [NDF_CONNECTION_ID] = { - .journal = "ND_CONNECTION_ID", - .logfmt = "conn", - }, - [NDF_TRANSACTION_ID] = { - .journal = "ND_TRANSACTION_ID", - .logfmt = "transaction", - }, - [NDF_RESPONSE_SENT_BYTES] = { - .journal = "ND_RESPONSE_SENT_BYTES", - .logfmt = "sent_bytes", - }, - [NDF_RESPONSE_SIZE_BYTES] = { - .journal = "ND_RESPONSE_SIZE_BYTES", - .logfmt = "size_bytes", - }, - [NDF_RESPONSE_PREPARATION_TIME_USEC] = { - .journal = "ND_RESPONSE_PREP_TIME_USEC", - .logfmt = "prep_ut", - }, - [NDF_RESPONSE_SENT_TIME_USEC] = { - .journal = "ND_RESPONSE_SENT_TIME_USEC", - .logfmt = "sent_ut", - }, - [NDF_RESPONSE_TOTAL_TIME_USEC] = { - .journal = "ND_RESPONSE_TOTAL_TIME_USEC", - .logfmt = "total_ut", - }, - [NDF_ALERT_ID] = { - .journal = "ND_ALERT_ID", - .logfmt = "alert_id", - }, - [NDF_ALERT_UNIQUE_ID] = { - .journal = "ND_ALERT_UNIQUE_ID", - .logfmt = "alert_unique_id", - }, - [NDF_ALERT_TRANSITION_ID] = { - .journal = "ND_ALERT_TRANSITION_ID", - .logfmt = "alert_transition_id", - }, - [NDF_ALERT_EVENT_ID] = { - .journal = "ND_ALERT_EVENT_ID", - .logfmt = "alert_event_id", - }, - [NDF_ALERT_CONFIG_HASH] = { - .journal = "ND_ALERT_CONFIG", - .logfmt = "alert_config", - }, - [NDF_ALERT_NAME] = { - .journal = "ND_ALERT_NAME", - .logfmt = "alert", - }, - [NDF_ALERT_CLASS] = { - .journal = "ND_ALERT_CLASS", - .logfmt = "alert_class", - }, - [NDF_ALERT_COMPONENT] = { - .journal = "ND_ALERT_COMPONENT", - .logfmt = "alert_component", - }, - [NDF_ALERT_TYPE] = { - .journal = "ND_ALERT_TYPE", - .logfmt = "alert_type", - }, - [NDF_ALERT_EXEC] = { - .journal = "ND_ALERT_EXEC", - .logfmt = "alert_exec", - }, - [NDF_ALERT_RECIPIENT] = { - .journal = "ND_ALERT_RECIPIENT", - .logfmt = "alert_recipient", - }, - [NDF_ALERT_VALUE] = { - .journal = "ND_ALERT_VALUE", - .logfmt = "alert_value", - }, - [NDF_ALERT_VALUE_OLD] = { - .journal = "ND_ALERT_VALUE_OLD", - .logfmt = "alert_value_old", - }, - [NDF_ALERT_STATUS] = { - .journal = "ND_ALERT_STATUS", - .logfmt = "alert_status", - }, - [NDF_ALERT_STATUS_OLD] = { - .journal = "ND_ALERT_STATUS_OLD", - .logfmt = "alert_value_old", - }, - [NDF_ALERT_UNITS] = { - .journal = "ND_ALERT_UNITS", - .logfmt = "alert_units", - }, - [NDF_ALERT_SUMMARY] = { - .journal = "ND_ALERT_SUMMARY", - .logfmt = "alert_summary", - }, - [NDF_ALERT_INFO] = { - .journal = "ND_ALERT_INFO", - .logfmt = "alert_info", - }, - [NDF_ALERT_DURATION] = { - .journal = "ND_ALERT_DURATION", - .logfmt = "alert_duration", - }, - [NDF_ALERT_NOTIFICATION_REALTIME_USEC] = { - .journal = "ND_ALERT_NOTIFICATION_TIMESTAMP_USEC", - .logfmt = "alert_notification_timestamp", - .logfmt_annotator = timestamp_usec_annotator, - }, - - // put new items here - // leave the request URL and the message last - - [NDF_REQUEST] = { - .journal = "ND_REQUEST", - .logfmt = "request", - }, - [NDF_MESSAGE] = { - .journal = "MESSAGE", - .logfmt = "msg", - }, -}; - -#define THREAD_FIELDS_MAX (sizeof(thread_log_fields) / sizeof(thread_log_fields[0])) - -ND_LOG_FIELD_ID nd_log_field_id_by_name(const char *field, size_t len) { - for(size_t i = 0; i < THREAD_FIELDS_MAX ;i++) { - if(thread_log_fields[i].journal && strlen(thread_log_fields[i].journal) == len && strncmp(field, thread_log_fields[i].journal, len) == 0) - return i; - } - - return NDF_STOP; -} - -void log_stack_pop(void *ptr) { - if(!ptr) return; - - struct log_stack_entry *lgs = *(struct log_stack_entry (*)[])ptr; - - if(unlikely(!thread_log_stack_next || lgs != thread_log_stack_base[thread_log_stack_next - 1])) { - fatal("You cannot pop in the middle of the stack, or an item not in the stack"); - return; - } - - thread_log_stack_next--; -} - -void log_stack_push(struct log_stack_entry *lgs) { - if(!lgs || thread_log_stack_next >= THREAD_LOG_STACK_MAX) return; - thread_log_stack_base[thread_log_stack_next++] = lgs; -} - -// ---------------------------------------------------------------------------- -// json formatter - -static void nd_logger_json(BUFFER *wb, struct log_field *fields, size_t fields_max) { - - // --- FIELD_PARSER_VERSIONS --- - // - // IMPORTANT: - // THERE ARE 6 VERSIONS OF THIS CODE - // - // 1. journal (direct socket API), - // 2. journal (libsystemd API), - // 3. logfmt, - // 4. json, - // 5. convert to uint64 - // 6. convert to int64 - // - // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES - - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); - CLEAN_BUFFER *tmp = NULL; - - for (size_t i = 0; i < fields_max; i++) { - if (!fields[i].entry.set || !fields[i].logfmt) - continue; - - const char *key = fields[i].logfmt; - - const char *s = NULL; - switch(fields[i].entry.type) { - case NDFT_TXT: - s = fields[i].entry.txt; - break; - case NDFT_STR: - s = string2str(fields[i].entry.str); - break; - case NDFT_BFR: - s = buffer_tostring(fields[i].entry.bfr); - break; - case NDFT_U64: - buffer_json_member_add_uint64(wb, key, fields[i].entry.u64); - break; - case NDFT_I64: - buffer_json_member_add_int64(wb, key, fields[i].entry.i64); - break; - case NDFT_DBL: - buffer_json_member_add_double(wb, key, fields[i].entry.dbl); - break; - case NDFT_UUID: - if(!uuid_is_null(*fields[i].entry.uuid)) { - char u[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(*fields[i].entry.uuid, u); - buffer_json_member_add_string(wb, key, u); - } - break; - case NDFT_CALLBACK: { - if(!tmp) - tmp = buffer_create(1024, NULL); - else - buffer_flush(tmp); - if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) - s = buffer_tostring(tmp); - else - s = NULL; - } - break; - default: - s = "UNHANDLED"; - break; - } - - if(s && *s) - buffer_json_member_add_string(wb, key, s); - } - - buffer_json_finalize(wb); -} - -// ---------------------------------------------------------------------------- -// logfmt formatter - - -static int64_t log_field_to_int64(struct log_field *lf) { - - // --- FIELD_PARSER_VERSIONS --- - // - // IMPORTANT: - // THERE ARE 6 VERSIONS OF THIS CODE - // - // 1. journal (direct socket API), - // 2. journal (libsystemd API), - // 3. logfmt, - // 4. json, - // 5. convert to uint64 - // 6. convert to int64 - // - // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES - - CLEAN_BUFFER *tmp = NULL; - const char *s = NULL; - - switch(lf->entry.type) { - case NDFT_UUID: - case NDFT_UNSET: - return 0; - - case NDFT_TXT: - s = lf->entry.txt; - break; - - case NDFT_STR: - s = string2str(lf->entry.str); - break; - - case NDFT_BFR: - s = buffer_tostring(lf->entry.bfr); - break; - - case NDFT_CALLBACK: - tmp = buffer_create(0, NULL); - - if(lf->entry.cb.formatter(tmp, lf->entry.cb.formatter_data)) - s = buffer_tostring(tmp); - else - s = NULL; - break; - - case NDFT_U64: - return (int64_t)lf->entry.u64; - - case NDFT_I64: - return (int64_t)lf->entry.i64; - - case NDFT_DBL: - return (int64_t)lf->entry.dbl; - } - - if(s && *s) - return str2ll(s, NULL); - - return 0; -} - -static uint64_t log_field_to_uint64(struct log_field *lf) { - - // --- FIELD_PARSER_VERSIONS --- - // - // IMPORTANT: - // THERE ARE 6 VERSIONS OF THIS CODE - // - // 1. journal (direct socket API), - // 2. journal (libsystemd API), - // 3. logfmt, - // 4. json, - // 5. convert to uint64 - // 6. convert to int64 - // - // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES - - CLEAN_BUFFER *tmp = NULL; - const char *s = NULL; - - switch(lf->entry.type) { - case NDFT_UUID: - case NDFT_UNSET: - return 0; - - case NDFT_TXT: - s = lf->entry.txt; - break; - - case NDFT_STR: - s = string2str(lf->entry.str); - break; - - case NDFT_BFR: - s = buffer_tostring(lf->entry.bfr); - break; - - case NDFT_CALLBACK: - tmp = buffer_create(0, NULL); - - if(lf->entry.cb.formatter(tmp, lf->entry.cb.formatter_data)) - s = buffer_tostring(tmp); - else - s = NULL; - break; - - case NDFT_U64: - return lf->entry.u64; - - case NDFT_I64: - return lf->entry.i64; - - case NDFT_DBL: - return (uint64_t) lf->entry.dbl; - } - - if(s && *s) - return str2uint64_t(s, NULL); - - return 0; -} - -static void timestamp_usec_annotator(BUFFER *wb, const char *key, struct log_field *lf) { - usec_t ut = log_field_to_uint64(lf); - - if(!ut) - return; - - char datetime[RFC3339_MAX_LENGTH]; - rfc3339_datetime_ut(datetime, sizeof(datetime), ut, 3, false); - - if(buffer_strlen(wb)) - buffer_fast_strcat(wb, " ", 1); - - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - buffer_json_strcat(wb, datetime); -} - -static void errno_annotator(BUFFER *wb, const char *key, struct log_field *lf) { - int64_t errnum = log_field_to_int64(lf); - - if(errnum == 0) - return; - - char buf[1024]; - const char *s = errno2str((int)errnum, buf, sizeof(buf)); - - if(buffer_strlen(wb)) - buffer_fast_strcat(wb, " ", 1); - - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=\"", 2); - buffer_print_int64(wb, errnum); - buffer_fast_strcat(wb, ", ", 2); - buffer_json_strcat(wb, s); - buffer_fast_strcat(wb, "\"", 1); -} - -#if defined(OS_WINDOWS) -static void winerror_annotator(BUFFER *wb, const char *key, struct log_field *lf) { - DWORD errnum = log_field_to_uint64(lf); - - if(errnum == 0) - return; - - char buf[1024]; - DWORD size = FormatMessageA( - FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, - errnum, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - buf, - (DWORD)(sizeof(buf) - 1), - NULL - ); - if(size > 0) { - // remove \r\n at the end - while(size > 0 && (buf[size - 1] == '\r' || buf[size - 1] == '\n')) - buf[--size] = '\0'; - } - else - size = snprintf(buf, sizeof(buf) - 1, "unknown error code"); - - buf[size] = '\0'; - - if(buffer_strlen(wb)) - buffer_fast_strcat(wb, " ", 1); - - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=\"", 2); - buffer_print_int64(wb, errnum); - buffer_fast_strcat(wb, ", ", 2); - buffer_json_strcat(wb, buf); - buffer_fast_strcat(wb, "\"", 1); -} -#endif - -static void priority_annotator(BUFFER *wb, const char *key, struct log_field *lf) { - uint64_t pri = log_field_to_uint64(lf); - - if(buffer_strlen(wb)) - buffer_fast_strcat(wb, " ", 1); - - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - buffer_strcat(wb, nd_log_id2priority(pri)); -} - -static bool needs_quotes_for_logfmt(const char *s) -{ - static bool safe_for_logfmt[256] = { - [' '] = true, ['!'] = true, ['"'] = false, ['#'] = true, ['$'] = true, ['%'] = true, ['&'] = true, - ['\''] = true, ['('] = true, [')'] = true, ['*'] = true, ['+'] = true, [','] = true, ['-'] = true, - ['.'] = true, ['/'] = true, ['0'] = true, ['1'] = true, ['2'] = true, ['3'] = true, ['4'] = true, - ['5'] = true, ['6'] = true, ['7'] = true, ['8'] = true, ['9'] = true, [':'] = true, [';'] = true, - ['<'] = true, ['='] = true, ['>'] = true, ['?'] = true, ['@'] = true, ['A'] = true, ['B'] = true, - ['C'] = true, ['D'] = true, ['E'] = true, ['F'] = true, ['G'] = true, ['H'] = true, ['I'] = true, - ['J'] = true, ['K'] = true, ['L'] = true, ['M'] = true, ['N'] = true, ['O'] = true, ['P'] = true, - ['Q'] = true, ['R'] = true, ['S'] = true, ['T'] = true, ['U'] = true, ['V'] = true, ['W'] = true, - ['X'] = true, ['Y'] = true, ['Z'] = true, ['['] = true, ['\\'] = false, [']'] = true, ['^'] = true, - ['_'] = true, ['`'] = true, ['a'] = true, ['b'] = true, ['c'] = true, ['d'] = true, ['e'] = true, - ['f'] = true, ['g'] = true, ['h'] = true, ['i'] = true, ['j'] = true, ['k'] = true, ['l'] = true, - ['m'] = true, ['n'] = true, ['o'] = true, ['p'] = true, ['q'] = true, ['r'] = true, ['s'] = true, - ['t'] = true, ['u'] = true, ['v'] = true, ['w'] = true, ['x'] = true, ['y'] = true, ['z'] = true, - ['{'] = true, ['|'] = true, ['}'] = true, ['~'] = true, [0x7f] = true, - }; - - if(!*s) - return true; - - while(*s) { - if(*s == '=' || isspace((uint8_t)*s) || !safe_for_logfmt[(uint8_t)*s]) - return true; - - s++; - } - - return false; -} - -static void string_to_logfmt(BUFFER *wb, const char *s) -{ - bool spaces = needs_quotes_for_logfmt(s); - - if(spaces) - buffer_fast_strcat(wb, "\"", 1); - - buffer_json_strcat(wb, s); - - if(spaces) - buffer_fast_strcat(wb, "\"", 1); -} - -static void nd_logger_logfmt(BUFFER *wb, struct log_field *fields, size_t fields_max) -{ - - // --- FIELD_PARSER_VERSIONS --- - // - // IMPORTANT: - // THERE ARE 6 VERSIONS OF THIS CODE - // - // 1. journal (direct socket API), - // 2. journal (libsystemd API), - // 3. logfmt, - // 4. json, - // 5. convert to uint64 - // 6. convert to int64 - // - // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES - - CLEAN_BUFFER *tmp = NULL; - - for (size_t i = 0; i < fields_max; i++) { - if (!fields[i].entry.set || !fields[i].logfmt) - continue; - - const char *key = fields[i].logfmt; - - if(fields[i].logfmt_annotator) - fields[i].logfmt_annotator(wb, key, &fields[i]); - else { - if(buffer_strlen(wb)) - buffer_fast_strcat(wb, " ", 1); - - switch(fields[i].entry.type) { - case NDFT_TXT: - if(*fields[i].entry.txt) { - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - string_to_logfmt(wb, fields[i].entry.txt); - } - break; - case NDFT_STR: - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - string_to_logfmt(wb, string2str(fields[i].entry.str)); - break; - case NDFT_BFR: - if(buffer_strlen(fields[i].entry.bfr)) { - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - string_to_logfmt(wb, buffer_tostring(fields[i].entry.bfr)); - } - break; - case NDFT_U64: - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - buffer_print_uint64(wb, fields[i].entry.u64); - break; - case NDFT_I64: - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - buffer_print_int64(wb, fields[i].entry.i64); - break; - case NDFT_DBL: - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - buffer_print_netdata_double(wb, fields[i].entry.dbl); - break; - case NDFT_UUID: - if(!uuid_is_null(*fields[i].entry.uuid)) { - char u[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(*fields[i].entry.uuid, u); - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - buffer_fast_strcat(wb, u, sizeof(u) - 1); - } - break; - case NDFT_CALLBACK: { - if(!tmp) - tmp = buffer_create(1024, NULL); - else - buffer_flush(tmp); - if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) { - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - string_to_logfmt(wb, buffer_tostring(tmp)); - } - } - break; - default: - buffer_strcat(wb, "UNHANDLED"); - break; - } - } - } -} - -// ---------------------------------------------------------------------------- -// journal logger - -bool nd_log_journal_socket_available(void) { - if(netdata_configured_host_prefix && *netdata_configured_host_prefix) { - char filename[FILENAME_MAX + 1]; - - snprintfz(filename, sizeof(filename), "%s%s", - netdata_configured_host_prefix, "/run/systemd/journal/socket"); - - if(is_path_unix_socket(filename)) - return true; - } - - return is_path_unix_socket("/run/systemd/journal/socket"); -} - -static bool nd_logger_journal_libsystemd(struct log_field *fields __maybe_unused, size_t fields_max __maybe_unused) { -#ifdef HAVE_SYSTEMD - - // --- FIELD_PARSER_VERSIONS --- - // - // IMPORTANT: - // THERE ARE 6 VERSIONS OF THIS CODE - // - // 1. journal (direct socket API), - // 2. journal (libsystemd API), - // 3. logfmt, - // 4. json, - // 5. convert to uint64 - // 6. convert to int64 - // - // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES - - struct iovec iov[fields_max]; - int iov_count = 0; - - memset(iov, 0, sizeof(iov)); - - CLEAN_BUFFER *tmp = NULL; - - for (size_t i = 0; i < fields_max; i++) { - if (!fields[i].entry.set || !fields[i].journal) - continue; - - const char *key = fields[i].journal; - char *value = NULL; - int rc = 0; - switch (fields[i].entry.type) { - case NDFT_TXT: - if(*fields[i].entry.txt) - rc = asprintf(&value, "%s=%s", key, fields[i].entry.txt); - break; - case NDFT_STR: - rc = asprintf(&value, "%s=%s", key, string2str(fields[i].entry.str)); - break; - case NDFT_BFR: - if(buffer_strlen(fields[i].entry.bfr)) - rc = asprintf(&value, "%s=%s", key, buffer_tostring(fields[i].entry.bfr)); - break; - case NDFT_U64: - rc = asprintf(&value, "%s=%" PRIu64, key, fields[i].entry.u64); - break; - case NDFT_I64: - rc = asprintf(&value, "%s=%" PRId64, key, fields[i].entry.i64); - break; - case NDFT_DBL: - rc = asprintf(&value, "%s=%f", key, fields[i].entry.dbl); - break; - case NDFT_UUID: - if(!uuid_is_null(*fields[i].entry.uuid)) { - char u[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(*fields[i].entry.uuid, u); - rc = asprintf(&value, "%s=%s", key, u); - } - break; - case NDFT_CALLBACK: { - if(!tmp) - tmp = buffer_create(1024, NULL); - else - buffer_flush(tmp); - if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) - rc = asprintf(&value, "%s=%s", key, buffer_tostring(tmp)); - } - break; - default: - rc = asprintf(&value, "%s=%s", key, "UNHANDLED"); - break; - } - - if (rc != -1 && value) { - iov[iov_count].iov_base = value; - iov[iov_count].iov_len = strlen(value); - iov_count++; - } - } - - int r = sd_journal_sendv(iov, iov_count); - - // Clean up allocated memory - for (int i = 0; i < iov_count; i++) { - if (iov[i].iov_base != NULL) { - free(iov[i].iov_base); - } - } - - return r == 0; -#else - return false; -#endif -} - -static bool nd_logger_journal_direct(struct log_field *fields, size_t fields_max) { - if(!nd_log.journal_direct.initialized) - return false; - - // --- FIELD_PARSER_VERSIONS --- - // - // IMPORTANT: - // THERE ARE 6 VERSIONS OF THIS CODE - // - // 1. journal (direct socket API), - // 2. journal (libsystemd API), - // 3. logfmt, - // 4. json, - // 5. convert to uint64 - // 6. convert to int64 - // - // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES - - CLEAN_BUFFER *wb = buffer_create(4096, NULL); - CLEAN_BUFFER *tmp = NULL; - - for (size_t i = 0; i < fields_max; i++) { - if (!fields[i].entry.set || !fields[i].journal) - continue; - - const char *key = fields[i].journal; - - const char *s = NULL; - switch(fields[i].entry.type) { - case NDFT_TXT: - s = fields[i].entry.txt; - break; - case NDFT_STR: - s = string2str(fields[i].entry.str); - break; - case NDFT_BFR: - s = buffer_tostring(fields[i].entry.bfr); - break; - case NDFT_U64: - buffer_strcat(wb, key); - buffer_putc(wb, '='); - buffer_print_uint64(wb, fields[i].entry.u64); - buffer_putc(wb, '\n'); - break; - case NDFT_I64: - buffer_strcat(wb, key); - buffer_putc(wb, '='); - buffer_print_int64(wb, fields[i].entry.i64); - buffer_putc(wb, '\n'); - break; - case NDFT_DBL: - buffer_strcat(wb, key); - buffer_putc(wb, '='); - buffer_print_netdata_double(wb, fields[i].entry.dbl); - buffer_putc(wb, '\n'); - break; - case NDFT_UUID: - if(!uuid_is_null(*fields[i].entry.uuid)) { - char u[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(*fields[i].entry.uuid, u); - buffer_strcat(wb, key); - buffer_putc(wb, '='); - buffer_fast_strcat(wb, u, sizeof(u) - 1); - buffer_putc(wb, '\n'); - } - break; - case NDFT_CALLBACK: { - if(!tmp) - tmp = buffer_create(1024, NULL); - else - buffer_flush(tmp); - if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) - s = buffer_tostring(tmp); - else - s = NULL; - } - break; - default: - s = "UNHANDLED"; - break; - } - - if(s && *s) { - buffer_strcat(wb, key); - if(!strchr(s, '\n')) { - buffer_putc(wb, '='); - buffer_strcat(wb, s); - buffer_putc(wb, '\n'); - } - else { - buffer_putc(wb, '\n'); - size_t size = strlen(s); - uint64_t le_size = htole64(size); - buffer_memcat(wb, &le_size, sizeof(le_size)); - buffer_memcat(wb, s, size); - buffer_putc(wb, '\n'); - } - } - } - - return journal_direct_send(nd_log.journal_direct.fd, buffer_tostring(wb), buffer_strlen(wb)); -} - -// ---------------------------------------------------------------------------- -// syslog logger - uses logfmt - -static bool nd_logger_syslog(int priority, ND_LOG_FORMAT format __maybe_unused, struct log_field *fields, size_t fields_max) { - CLEAN_BUFFER *wb = buffer_create(1024, NULL); - - nd_logger_logfmt(wb, fields, fields_max); - syslog(priority, "%s", buffer_tostring(wb)); - - return true; -} - -// ---------------------------------------------------------------------------- -// file logger - uses logfmt - -static bool nd_logger_file(FILE *fp, ND_LOG_FORMAT format, struct log_field *fields, size_t fields_max) { - BUFFER *wb = buffer_create(1024, NULL); - - if(format == NDLF_JSON) - nd_logger_json(wb, fields, fields_max); - else - nd_logger_logfmt(wb, fields, fields_max); - - int r = fprintf(fp, "%s\n", buffer_tostring(wb)); - fflush(fp); - - buffer_free(wb); - return r > 0; -} - -// ---------------------------------------------------------------------------- -// logger router - -static ND_LOG_METHOD nd_logger_select_output(ND_LOG_SOURCES source, FILE **fpp, SPINLOCK **spinlock) { - *spinlock = NULL; - ND_LOG_METHOD output = nd_log.sources[source].method; - - switch(output) { - case NDLM_JOURNAL: - if(unlikely(!nd_log.journal_direct.initialized && !nd_log.journal.initialized)) { - output = NDLM_FILE; - *fpp = stderr; - *spinlock = &nd_log.std_error.spinlock; - } - else { - *fpp = NULL; - *spinlock = NULL; - } - break; - - case NDLM_SYSLOG: - if(unlikely(!nd_log.syslog.initialized)) { - output = NDLM_FILE; - *spinlock = &nd_log.std_error.spinlock; - *fpp = stderr; - } - else { - *spinlock = NULL; - *fpp = NULL; - } - break; - - case NDLM_FILE: - if(!nd_log.sources[source].fp) { - *fpp = stderr; - *spinlock = &nd_log.std_error.spinlock; - } - else { - *fpp = nd_log.sources[source].fp; - *spinlock = &nd_log.sources[source].spinlock; - } - break; - - case NDLM_STDOUT: - output = NDLM_FILE; - *fpp = stdout; - *spinlock = &nd_log.std_output.spinlock; - break; - - default: - case NDLM_DEFAULT: - case NDLM_STDERR: - output = NDLM_FILE; - *fpp = stderr; - *spinlock = &nd_log.std_error.spinlock; - break; - - case NDLM_DISABLED: - case NDLM_DEVNULL: - output = NDLM_DISABLED; - *fpp = NULL; - *spinlock = NULL; - break; - } - - return output; -} - -// ---------------------------------------------------------------------------- -// high level logger - -static void nd_logger_log_fields(SPINLOCK *spinlock, FILE *fp, bool limit, ND_LOG_FIELD_PRIORITY priority, - ND_LOG_METHOD output, struct nd_log_source *source, - struct log_field *fields, size_t fields_max) { - if(spinlock) - spinlock_lock(spinlock); - - // check the limits - if(limit && nd_log_limit_reached(source)) - goto cleanup; - - if(output == NDLM_JOURNAL) { - if(!nd_logger_journal_direct(fields, fields_max) && !nd_logger_journal_libsystemd(fields, fields_max)) { - // we can't log to journal, let's log to stderr - if(spinlock) - spinlock_unlock(spinlock); - - output = NDLM_FILE; - spinlock = &nd_log.std_error.spinlock; - fp = stderr; - - if(spinlock) - spinlock_lock(spinlock); - } - } - - if(output == NDLM_SYSLOG) - nd_logger_syslog(priority, source->format, fields, fields_max); - - if(output == NDLM_FILE) - nd_logger_file(fp, source->format, fields, fields_max); - - -cleanup: - if(spinlock) - spinlock_unlock(spinlock); -} - -static void nd_logger_unset_all_thread_fields(void) { - size_t fields_max = THREAD_FIELDS_MAX; - for(size_t i = 0; i < fields_max ; i++) - thread_log_fields[i].entry.set = false; -} - -static void nd_logger_merge_log_stack_to_thread_fields(void) { - for(size_t c = 0; c < thread_log_stack_next ;c++) { - struct log_stack_entry *lgs = thread_log_stack_base[c]; - - for(size_t i = 0; lgs[i].id != NDF_STOP ; i++) { - if(lgs[i].id >= _NDF_MAX || !lgs[i].set) - continue; - - struct log_stack_entry *e = &lgs[i]; - ND_LOG_STACK_FIELD_TYPE type = lgs[i].type; - - // do not add empty / unset fields - if((type == NDFT_TXT && (!e->txt || !*e->txt)) || - (type == NDFT_BFR && (!e->bfr || !buffer_strlen(e->bfr))) || - (type == NDFT_STR && !e->str) || - (type == NDFT_UUID && (!e->uuid || uuid_is_null(*e->uuid))) || - (type == NDFT_CALLBACK && !e->cb.formatter) || - type == NDFT_UNSET) - continue; - - thread_log_fields[lgs[i].id].entry = *e; - } - } -} - -static void nd_logger(const char *file, const char *function, const unsigned long line, - ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, bool limit, - int saved_errno, size_t saved_winerror __maybe_unused, const char *fmt, va_list ap) { - - SPINLOCK *spinlock; - FILE *fp; - ND_LOG_METHOD output = nd_logger_select_output(source, &fp, &spinlock); - if(output != NDLM_FILE && output != NDLM_JOURNAL && output != NDLM_SYSLOG) - return; - - // mark all fields as unset - nd_logger_unset_all_thread_fields(); - - // flatten the log stack into the fields - nd_logger_merge_log_stack_to_thread_fields(); - - // set the common fields that are automatically set by the logging subsystem - - if(likely(!thread_log_fields[NDF_INVOCATION_ID].entry.set)) - thread_log_fields[NDF_INVOCATION_ID].entry = ND_LOG_FIELD_UUID(NDF_INVOCATION_ID, &nd_log.invocation_id); - - if(likely(!thread_log_fields[NDF_LOG_SOURCE].entry.set)) - thread_log_fields[NDF_LOG_SOURCE].entry = ND_LOG_FIELD_TXT(NDF_LOG_SOURCE, nd_log_id2source(source)); - else { - ND_LOG_SOURCES src = source; - - if(thread_log_fields[NDF_LOG_SOURCE].entry.type == NDFT_TXT) - src = nd_log_source2id(thread_log_fields[NDF_LOG_SOURCE].entry.txt, source); - else if(thread_log_fields[NDF_LOG_SOURCE].entry.type == NDFT_U64) - src = thread_log_fields[NDF_LOG_SOURCE].entry.u64; - - if(src != source && src < _NDLS_MAX) { - source = src; - output = nd_logger_select_output(source, &fp, &spinlock); - if(output != NDLM_FILE && output != NDLM_JOURNAL && output != NDLM_SYSLOG) - return; - } - } - - if(likely(!thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry.set)) - thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry = ND_LOG_FIELD_TXT(NDF_SYSLOG_IDENTIFIER, program_name); - - if(likely(!thread_log_fields[NDF_LINE].entry.set)) { - thread_log_fields[NDF_LINE].entry = ND_LOG_FIELD_U64(NDF_LINE, line); - thread_log_fields[NDF_FILE].entry = ND_LOG_FIELD_TXT(NDF_FILE, file); - thread_log_fields[NDF_FUNC].entry = ND_LOG_FIELD_TXT(NDF_FUNC, function); - } - - if(likely(!thread_log_fields[NDF_PRIORITY].entry.set)) { - thread_log_fields[NDF_PRIORITY].entry = ND_LOG_FIELD_U64(NDF_PRIORITY, priority); - } - - if(likely(!thread_log_fields[NDF_TID].entry.set)) - thread_log_fields[NDF_TID].entry = ND_LOG_FIELD_U64(NDF_TID, gettid_cached()); - - if(likely(!thread_log_fields[NDF_THREAD_TAG].entry.set)) { - const char *thread_tag = nd_thread_tag(); - thread_log_fields[NDF_THREAD_TAG].entry = ND_LOG_FIELD_TXT(NDF_THREAD_TAG, thread_tag); - - // TODO: fix the ND_MODULE in logging by setting proper module name in threads -// if(!thread_log_fields[NDF_MODULE].entry.set) -// thread_log_fields[NDF_MODULE].entry = ND_LOG_FIELD_CB(NDF_MODULE, thread_tag_to_module, (void *)thread_tag); - } - - if(likely(!thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry.set)) - thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry = ND_LOG_FIELD_U64(NDF_TIMESTAMP_REALTIME_USEC, now_realtime_usec()); - - if(saved_errno != 0 && !thread_log_fields[NDF_ERRNO].entry.set) - thread_log_fields[NDF_ERRNO].entry = ND_LOG_FIELD_I64(NDF_ERRNO, saved_errno); - -#if defined(OS_WINDOWS) - if(saved_winerror != 0 && !thread_log_fields[NDF_WINERROR].entry.set) - thread_log_fields[NDF_WINERROR].entry = ND_LOG_FIELD_U64(NDF_WINERROR, saved_winerror); -#endif - - CLEAN_BUFFER *wb = NULL; - if(fmt && !thread_log_fields[NDF_MESSAGE].entry.set) { - wb = buffer_create(1024, NULL); - buffer_vsprintf(wb, fmt, ap); - thread_log_fields[NDF_MESSAGE].entry = ND_LOG_FIELD_TXT(NDF_MESSAGE, buffer_tostring(wb)); - } - - nd_logger_log_fields(spinlock, fp, limit, priority, output, &nd_log.sources[source], - thread_log_fields, THREAD_FIELDS_MAX); - - if(nd_log.sources[source].pending_msg) { - // log a pending message - - nd_logger_unset_all_thread_fields(); - - thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry = (struct log_stack_entry){ - .set = true, - .type = NDFT_U64, - .u64 = now_realtime_usec(), - }; - - thread_log_fields[NDF_LOG_SOURCE].entry = (struct log_stack_entry){ - .set = true, - .type = NDFT_TXT, - .txt = nd_log_id2source(source), - }; - - thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry = (struct log_stack_entry){ - .set = true, - .type = NDFT_TXT, - .txt = program_name, - }; - - thread_log_fields[NDF_MESSAGE].entry = (struct log_stack_entry){ - .set = true, - .type = NDFT_TXT, - .txt = nd_log.sources[source].pending_msg, - }; - - nd_logger_log_fields(spinlock, fp, false, priority, output, - &nd_log.sources[source], - thread_log_fields, THREAD_FIELDS_MAX); - - freez((void *)nd_log.sources[source].pending_msg); - nd_log.sources[source].pending_msg = NULL; - } - - errno_clear(); -} - -static ND_LOG_SOURCES nd_log_validate_source(ND_LOG_SOURCES source) { - if(source >= _NDLS_MAX) - source = NDLS_DAEMON; - - if(nd_log.overwrite_process_source) - source = nd_log.overwrite_process_source; - - return source; -} - -// ---------------------------------------------------------------------------- -// public API for loggers - -void netdata_logger(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file, const char *function, unsigned long line, const char *fmt, ... ) -{ - int saved_errno = errno; - - size_t saved_winerror = 0; -#if defined(OS_WINDOWS) - saved_winerror = GetLastError(); -#endif - - source = nd_log_validate_source(source); - - if (source != NDLS_DEBUG && priority > nd_log.sources[source].min_priority) - return; - - va_list args; - va_start(args, fmt); - nd_logger(file, function, line, source, priority, - source == NDLS_DAEMON || source == NDLS_COLLECTORS, - saved_errno, saved_winerror, fmt, args); - va_end(args); -} - -void netdata_logger_with_limit(ERROR_LIMIT *erl, ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... ) { - int saved_errno = errno; - - size_t saved_winerror = 0; -#if defined(OS_WINDOWS) - saved_winerror = GetLastError(); -#endif - - source = nd_log_validate_source(source); - - if (source != NDLS_DEBUG && priority > nd_log.sources[source].min_priority) - return; - - if(erl->sleep_ut) - sleep_usec(erl->sleep_ut); - - spinlock_lock(&erl->spinlock); - - erl->count++; - time_t now = now_boottime_sec(); - if(now - erl->last_logged < erl->log_every) { - spinlock_unlock(&erl->spinlock); - return; - } - - spinlock_unlock(&erl->spinlock); - - va_list args; - va_start(args, fmt); - nd_logger(file, function, line, source, priority, - source == NDLS_DAEMON || source == NDLS_COLLECTORS, - saved_errno, saved_winerror, fmt, args); - va_end(args); - erl->last_logged = now; - erl->count = 0; -} - -void netdata_logger_fatal( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) { - int saved_errno = errno; - - size_t saved_winerror = 0; -#if defined(OS_WINDOWS) - saved_winerror = GetLastError(); -#endif - - ND_LOG_SOURCES source = NDLS_DAEMON; - source = nd_log_validate_source(source); - - va_list args; - va_start(args, fmt); - nd_logger(file, function, line, source, NDLP_ALERT, true, saved_errno, saved_winerror, fmt, args); - va_end(args); - - char date[LOG_DATE_LENGTH]; - log_date(date, LOG_DATE_LENGTH, now_realtime_sec()); - - char action_data[70+1]; - snprintfz(action_data, 70, "%04lu@%-10.10s:%-15.15s/%d", line, file, function, saved_errno); - - const char *thread_tag = nd_thread_tag(); - const char *tag_to_send = thread_tag; - - // anonymize thread names - if(strncmp(thread_tag, THREAD_TAG_STREAM_RECEIVER, strlen(THREAD_TAG_STREAM_RECEIVER)) == 0) - tag_to_send = THREAD_TAG_STREAM_RECEIVER; - if(strncmp(thread_tag, THREAD_TAG_STREAM_SENDER, strlen(THREAD_TAG_STREAM_SENDER)) == 0) - tag_to_send = THREAD_TAG_STREAM_SENDER; - - char action_result[60+1]; - snprintfz(action_result, 60, "%s:%s", program_name, tag_to_send); - -#if !defined(ENABLE_SENTRY) && defined(HAVE_BACKTRACE) - int fd = nd_log.sources[NDLS_DAEMON].fd; - if(fd == -1) - fd = STDERR_FILENO; - - int nptrs; - void *buffer[10000]; - - nptrs = backtrace(buffer, sizeof(buffer)); - if(nptrs) - backtrace_symbols_fd(buffer, nptrs, fd); -#endif - -#ifdef NETDATA_INTERNAL_CHECKS - abort(); -#endif - - netdata_cleanup_and_exit(1, "FATAL", action_result, action_data); -} - -// ---------------------------------------------------------------------------- -// log limits - -void nd_log_limits_reset(void) { - usec_t now_ut = now_monotonic_usec(); - - spinlock_lock(&nd_log.std_output.spinlock); - spinlock_lock(&nd_log.std_error.spinlock); - - for(size_t i = 0; i < _NDLS_MAX ;i++) { - spinlock_lock(&nd_log.sources[i].spinlock); - nd_log.sources[i].limits.prevented = 0; - nd_log.sources[i].limits.counter = 0; - nd_log.sources[i].limits.started_monotonic_ut = now_ut; - nd_log.sources[i].limits.logs_per_period = nd_log.sources[i].limits.logs_per_period_backup; - spinlock_unlock(&nd_log.sources[i].spinlock); - } - - spinlock_unlock(&nd_log.std_output.spinlock); - spinlock_unlock(&nd_log.std_error.spinlock); -} - -void nd_log_limits_unlimited(void) { - nd_log_limits_reset(); - for(size_t i = 0; i < _NDLS_MAX ;i++) { - nd_log.sources[i].limits.logs_per_period = 0; - } -} - -static bool nd_log_limit_reached(struct nd_log_source *source) { - if(source->limits.throttle_period == 0 || source->limits.logs_per_period == 0) - return false; - - usec_t now_ut = now_monotonic_usec(); - if(!source->limits.started_monotonic_ut) - source->limits.started_monotonic_ut = now_ut; - - source->limits.counter++; - - if(now_ut - source->limits.started_monotonic_ut > (usec_t)source->limits.throttle_period) { - if(source->limits.prevented) { - BUFFER *wb = buffer_create(1024, NULL); - buffer_sprintf(wb, - "LOG FLOOD PROTECTION: resuming logging " - "(prevented %"PRIu32" logs in the last %"PRIu32" seconds).", - source->limits.prevented, - source->limits.throttle_period); - - if(source->pending_msg) - freez((void *)source->pending_msg); - - source->pending_msg = strdupz(buffer_tostring(wb)); - - buffer_free(wb); - } - - // restart the period accounting - source->limits.started_monotonic_ut = now_ut; - source->limits.counter = 1; - source->limits.prevented = 0; - - // log this error - return false; - } - - if(source->limits.counter > source->limits.logs_per_period) { - if(!source->limits.prevented) { - BUFFER *wb = buffer_create(1024, NULL); - buffer_sprintf(wb, - "LOG FLOOD PROTECTION: too many logs (%"PRIu32" logs in %"PRId64" seconds, threshold is set to %"PRIu32" logs " - "in %"PRIu32" seconds). Preventing more logs from process '%s' for %"PRId64" seconds.", - source->limits.counter, - (int64_t)((now_ut - source->limits.started_monotonic_ut) / USEC_PER_SEC), - source->limits.logs_per_period, - source->limits.throttle_period, - program_name, - (int64_t)(((source->limits.started_monotonic_ut + (source->limits.throttle_period * USEC_PER_SEC) - now_ut)) / USEC_PER_SEC) - ); - - if(source->pending_msg) - freez((void *)source->pending_msg); - - source->pending_msg = strdupz(buffer_tostring(wb)); - - buffer_free(wb); - } - - source->limits.prevented++; - - // prevent logging this error -#ifdef NETDATA_INTERNAL_CHECKS - return false; -#else - return true; -#endif - } - - return false; -} diff --git a/src/libnetdata/log/log.h b/src/libnetdata/log/log.h deleted file mode 100644 index 015c02eb6..000000000 --- a/src/libnetdata/log/log.h +++ /dev/null @@ -1,313 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_LOG_H -#define NETDATA_LOG_H 1 - -# ifdef __cplusplus -extern "C" { -# endif - -#include "../libnetdata.h" - -#define ND_LOG_DEFAULT_THROTTLE_LOGS 1000 -#define ND_LOG_DEFAULT_THROTTLE_PERIOD 60 - -typedef enum __attribute__((__packed__)) { - NDLS_UNSET = 0, // internal use only - NDLS_ACCESS, // access.log - NDLS_ACLK, // aclk.log - NDLS_COLLECTORS, // collectors.log - NDLS_DAEMON, // error.log - NDLS_HEALTH, // health.log - NDLS_DEBUG, // debug.log - - // terminator - _NDLS_MAX, -} ND_LOG_SOURCES; - -typedef enum __attribute__((__packed__)) { - NDLP_EMERG = LOG_EMERG, - NDLP_ALERT = LOG_ALERT, - NDLP_CRIT = LOG_CRIT, - NDLP_ERR = LOG_ERR, - NDLP_WARNING = LOG_WARNING, - NDLP_NOTICE = LOG_NOTICE, - NDLP_INFO = LOG_INFO, - NDLP_DEBUG = LOG_DEBUG, -} ND_LOG_FIELD_PRIORITY; - -typedef enum __attribute__((__packed__)) { - // KEEP THESE IN THE SAME ORDER AS in thread_log_fields (log.c) - // so that it easy to audit for missing fields - - NDF_STOP = 0, - NDF_TIMESTAMP_REALTIME_USEC, // the timestamp of the log message - added automatically - NDF_SYSLOG_IDENTIFIER, // the syslog identifier of the application - added automatically - NDF_LOG_SOURCE, // DAEMON, COLLECTORS, HEALTH, ACCESS, ACLK - set at the log call - NDF_PRIORITY, // the syslog priority (severity) - set at the log call - NDF_ERRNO, // the ERRNO at the time of the log call - added automatically -#if defined(OS_WINDOWS) - NDF_WINERROR, // Windows GetLastError() -#endif - NDF_INVOCATION_ID, // the INVOCATION_ID of Netdata - added automatically - NDF_LINE, // the source code file line number - added automatically - NDF_FILE, // the source code filename - added automatically - NDF_FUNC, // the source code function - added automatically - NDF_TID, // the thread ID of the thread logging - added automatically - NDF_THREAD_TAG, // the thread tag of the thread logging - added automatically - NDF_MESSAGE_ID, // for specific events - NDF_MODULE, // for internal plugin module, all other get the NDF_THREAD_TAG - - NDF_NIDL_NODE, // the node / rrdhost currently being worked - NDF_NIDL_INSTANCE, // the instance / rrdset currently being worked - NDF_NIDL_CONTEXT, // the context of the instance currently being worked - NDF_NIDL_DIMENSION, // the dimension / rrddim currently being worked - - // web server, aclk and stream receiver - NDF_SRC_TRANSPORT, // the transport we received the request, one of: http, https, pluginsd - - // Netdata Cloud Related - NDF_ACCOUNT_ID, - NDF_USER_NAME, - NDF_USER_ROLE, - NDF_USER_ACCESS, - - // web server and stream receiver - NDF_SRC_IP, // the streaming / web server source IP - NDF_SRC_PORT, // the streaming / web server source Port - NDF_SRC_FORWARDED_HOST, - NDF_SRC_FORWARDED_FOR, - NDF_SRC_CAPABILITIES, // the stream receiver capabilities - - // stream sender (established links) - NDF_DST_TRANSPORT, // the transport we send the request, one of: http, https - NDF_DST_IP, // the destination streaming IP - NDF_DST_PORT, // the destination streaming Port - NDF_DST_CAPABILITIES, // the destination streaming capabilities - - // web server, aclk and stream receiver - NDF_REQUEST_METHOD, // for http like requests, the http request method - NDF_RESPONSE_CODE, // for http like requests, the http response code, otherwise a status string - - // web server (all), aclk (queries) - NDF_CONNECTION_ID, // the web server connection ID - NDF_TRANSACTION_ID, // the web server and API transaction ID - NDF_RESPONSE_SENT_BYTES, // for http like requests, the response bytes - NDF_RESPONSE_SIZE_BYTES, // for http like requests, the uncompressed response size - NDF_RESPONSE_PREPARATION_TIME_USEC, // for http like requests, the preparation time - NDF_RESPONSE_SENT_TIME_USEC, // for http like requests, the time to send the response back - NDF_RESPONSE_TOTAL_TIME_USEC, // for http like requests, the total time to complete the response - - // health alerts - NDF_ALERT_ID, - NDF_ALERT_UNIQUE_ID, - NDF_ALERT_EVENT_ID, - NDF_ALERT_TRANSITION_ID, - NDF_ALERT_CONFIG_HASH, - NDF_ALERT_NAME, - NDF_ALERT_CLASS, - NDF_ALERT_COMPONENT, - NDF_ALERT_TYPE, - NDF_ALERT_EXEC, - NDF_ALERT_RECIPIENT, - NDF_ALERT_DURATION, - NDF_ALERT_VALUE, - NDF_ALERT_VALUE_OLD, - NDF_ALERT_STATUS, - NDF_ALERT_STATUS_OLD, - NDF_ALERT_SOURCE, - NDF_ALERT_UNITS, - NDF_ALERT_SUMMARY, - NDF_ALERT_INFO, - NDF_ALERT_NOTIFICATION_REALTIME_USEC, - // NDF_ALERT_FLAGS, - - // put new items here - // leave the request URL and the message last - - NDF_REQUEST, // the request we are currently working on - NDF_MESSAGE, // the log message, if any - - // terminator - _NDF_MAX, -} ND_LOG_FIELD_ID; - -typedef enum __attribute__((__packed__)) { - NDFT_UNSET = 0, - NDFT_TXT, - NDFT_STR, - NDFT_BFR, - NDFT_U64, - NDFT_I64, - NDFT_DBL, - NDFT_UUID, - NDFT_CALLBACK, -} ND_LOG_STACK_FIELD_TYPE; - -void errno_clear(void); -void nd_log_set_user_settings(ND_LOG_SOURCES source, const char *setting); -void nd_log_set_facility(const char *facility); -void nd_log_set_priority_level(const char *setting); -void nd_log_initialize(void); -void nd_log_reopen_log_files(bool log); -void chown_open_file(int fd, uid_t uid, gid_t gid); -void nd_log_chown_log_files(uid_t uid, gid_t gid); -void nd_log_set_flood_protection(size_t logs, time_t period); -void nd_log_initialize_for_external_plugins(const char *name); -void nd_log_reopen_log_files_for_spawn_server(void); -bool nd_log_journal_socket_available(void); -ND_LOG_FIELD_ID nd_log_field_id_by_name(const char *field, size_t len); -int nd_log_priority2id(const char *priority); -const char *nd_log_id2priority(ND_LOG_FIELD_PRIORITY priority); -const char *nd_log_method_for_external_plugins(const char *s); - -int nd_log_health_fd(void); -int nd_log_collectors_fd(void); -typedef bool (*log_formatter_callback_t)(BUFFER *wb, void *data); - -struct log_stack_entry { - ND_LOG_FIELD_ID id; - ND_LOG_STACK_FIELD_TYPE type; - bool set; - union { - const char *txt; - struct netdata_string *str; - BUFFER *bfr; - uint64_t u64; - int64_t i64; - double dbl; - const nd_uuid_t *uuid; - struct { - log_formatter_callback_t formatter; - void *formatter_data; - } cb; - }; -}; - -#define ND_LOG_STACK _cleanup_(log_stack_pop) struct log_stack_entry -#define ND_LOG_STACK_PUSH(lgs) log_stack_push(lgs) - -#define ND_LOG_FIELD_TXT(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_TXT, .txt = (value), .set = true, } -#define ND_LOG_FIELD_STR(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_STR, .str = (value), .set = true, } -#define ND_LOG_FIELD_BFR(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_BFR, .bfr = (value), .set = true, } -#define ND_LOG_FIELD_U64(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_U64, .u64 = (value), .set = true, } -#define ND_LOG_FIELD_I64(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_I64, .i64 = (value), .set = true, } -#define ND_LOG_FIELD_DBL(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_DBL, .dbl = (value), .set = true, } -#define ND_LOG_FIELD_CB(field, func, data) (struct log_stack_entry){ .id = (field), .type = NDFT_CALLBACK, .cb = { .formatter = (func), .formatter_data = (data) }, .set = true, } -#define ND_LOG_FIELD_UUID(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_UUID, .uuid = (value), .set = true, } -#define ND_LOG_FIELD_END() (struct log_stack_entry){ .id = NDF_STOP, .type = NDFT_UNSET, .set = false, } - -void log_stack_pop(void *ptr); -void log_stack_push(struct log_stack_entry *lgs); - -#define D_WEB_BUFFER 0x0000000000000001 -#define D_WEB_CLIENT 0x0000000000000002 -#define D_LISTENER 0x0000000000000004 -#define D_WEB_DATA 0x0000000000000008 -#define D_OPTIONS 0x0000000000000010 -#define D_PROCNETDEV_LOOP 0x0000000000000020 -#define D_RRD_STATS 0x0000000000000040 -#define D_WEB_CLIENT_ACCESS 0x0000000000000080 -#define D_TC_LOOP 0x0000000000000100 -#define D_DEFLATE 0x0000000000000200 -#define D_CONFIG 0x0000000000000400 -#define D_PLUGINSD 0x0000000000000800 -#define D_CHILDS 0x0000000000001000 -#define D_EXIT 0x0000000000002000 -#define D_CHECKS 0x0000000000004000 -#define D_NFACCT_LOOP 0x0000000000008000 -#define D_PROCFILE 0x0000000000010000 -#define D_RRD_CALLS 0x0000000000020000 -#define D_DICTIONARY 0x0000000000040000 -#define D_MEMORY 0x0000000000080000 -#define D_CGROUP 0x0000000000100000 -#define D_REGISTRY 0x0000000000200000 -#define D_VARIABLES 0x0000000000400000 -#define D_HEALTH 0x0000000000800000 -#define D_CONNECT_TO 0x0000000001000000 -#define D_RRDHOST 0x0000000002000000 -#define D_LOCKS 0x0000000004000000 -#define D_EXPORTING 0x0000000008000000 -#define D_STATSD 0x0000000010000000 -#define D_POLLFD 0x0000000020000000 -#define D_STREAM 0x0000000040000000 -#define D_ANALYTICS 0x0000000080000000 -#define D_RRDENGINE 0x0000000100000000 -#define D_ACLK 0x0000000200000000 -#define D_REPLICATION 0x0000002000000000 -#define D_SYSTEM 0x8000000000000000 - -extern uint64_t debug_flags; - -extern const char *program_name; - -#ifdef ENABLE_ACLK -extern int aclklog_enabled; -#endif - -#define LOG_DATE_LENGTH 26 -void log_date(char *buffer, size_t len, time_t now); - -static inline void debug_dummy(void) {} - -void nd_log_limits_reset(void); -void nd_log_limits_unlimited(void); - -#define NDLP_INFO_STR "info" - -#ifdef NETDATA_INTERNAL_CHECKS -#define netdata_log_debug(type, args...) do { if(unlikely(debug_flags & type)) netdata_logger(NDLS_DEBUG, NDLP_DEBUG, __FILE__, __FUNCTION__, __LINE__, ##args); } while(0) -#define internal_error(condition, args...) do { if(unlikely(condition)) netdata_logger(NDLS_DAEMON, NDLP_DEBUG, __FILE__, __FUNCTION__, __LINE__, ##args); } while(0) -#define internal_fatal(condition, args...) do { if(unlikely(condition)) netdata_logger_fatal(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0) -#else -#define netdata_log_debug(type, args...) debug_dummy() -#define internal_error(args...) debug_dummy() -#define internal_fatal(args...) debug_dummy() -#endif - -#define fatal(args...) netdata_logger_fatal(__FILE__, __FUNCTION__, __LINE__, ##args) -#define fatal_assert(expr) ((expr) ? (void)(0) : netdata_logger_fatal(__FILE__, __FUNCTION__, __LINE__, "Assertion `%s' failed", #expr)) - -// ---------------------------------------------------------------------------- -// normal logging - -void netdata_logger(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file, const char *function, unsigned long line, const char *fmt, ... ) PRINTFLIKE(6, 7); -#define nd_log(NDLS, NDLP, args...) netdata_logger(NDLS, NDLP, __FILE__, __FUNCTION__, __LINE__, ##args) -#define nd_log_daemon(NDLP, args...) netdata_logger(NDLS_DAEMON, NDLP, __FILE__, __FUNCTION__, __LINE__, ##args) -#define nd_log_collector(NDLP, args...) netdata_logger(NDLS_COLLECTORS, NDLP, __FILE__, __FUNCTION__, __LINE__, ##args) - -#define netdata_log_info(args...) netdata_logger(NDLS_DAEMON, NDLP_INFO, __FILE__, __FUNCTION__, __LINE__, ##args) -#define netdata_log_error(args...) netdata_logger(NDLS_DAEMON, NDLP_ERR, __FILE__, __FUNCTION__, __LINE__, ##args) -#define collector_info(args...) netdata_logger(NDLS_COLLECTORS, NDLP_INFO, __FILE__, __FUNCTION__, __LINE__, ##args) -#define collector_error(args...) netdata_logger(NDLS_COLLECTORS, NDLP_ERR, __FILE__, __FUNCTION__, __LINE__, ##args) - -#define log_aclk_message_bin(__data, __data_len, __tx, __mqtt_topic, __message_name) \ - nd_log(NDLS_ACLK, NDLP_INFO, \ - "direction:%s message:'%s' topic:'%s' json:'%.*s'", \ - (__tx) ? "OUTGOING" : "INCOMING", __message_name, __mqtt_topic, (int)(__data_len), __data) - -// ---------------------------------------------------------------------------- -// logging with limits - -typedef struct error_with_limit { - SPINLOCK spinlock; - time_t log_every; - size_t count; - time_t last_logged; - usec_t sleep_ut; -} ERROR_LIMIT; - -#define nd_log_limit_static_global_var(var, log_every_secs, sleep_usecs) static ERROR_LIMIT var = { .last_logged = 0, .count = 0, .log_every = (log_every_secs), .sleep_ut = (sleep_usecs) } -#define nd_log_limit_static_thread_var(var, log_every_secs, sleep_usecs) static __thread ERROR_LIMIT var = { .last_logged = 0, .count = 0, .log_every = (log_every_secs), .sleep_ut = (sleep_usecs) } -void netdata_logger_with_limit(ERROR_LIMIT *erl, ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file, const char *function, unsigned long line, const char *fmt, ... ) PRINTFLIKE(7, 8); -#define nd_log_limit(erl, NDLS, NDLP, args...) netdata_logger_with_limit(erl, NDLS, NDLP, __FILE__, __FUNCTION__, __LINE__, ##args) - -// ---------------------------------------------------------------------------- - -void netdata_logger_fatal( const char *file, const char *function, unsigned long line, const char *fmt, ... ) NORETURN PRINTFLIKE(4, 5); - -# ifdef __cplusplus -} -# endif - -#endif /* NETDATA_LOG_H */ diff --git a/src/libnetdata/log/nd_log-annotators.c b/src/libnetdata/log/nd_log-annotators.c new file mode 100644 index 000000000..92e9bf310 --- /dev/null +++ b/src/libnetdata/log/nd_log-annotators.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +const char *timestamp_usec_annotator(struct log_field *lf) { + usec_t ut = log_field_to_uint64(lf); + + if(!ut) + return NULL; + + static __thread char datetime[RFC3339_MAX_LENGTH]; + rfc3339_datetime_ut(datetime, sizeof(datetime), ut, 3, false); + return datetime; +} + +const char *errno_annotator(struct log_field *lf) { + int64_t errnum = log_field_to_int64(lf); + + if(errnum == 0) + return NULL; + + static __thread char buf[256]; + size_t len = print_uint64(buf, errnum); + buf[len++] = ','; + buf[len++] = ' '; + + char *msg_to = &buf[len]; + size_t msg_size = sizeof(buf) - len; + + const char *s = errno2str((int)errnum, msg_to, msg_size); + if(s != msg_to) + strncpyz(msg_to, s, msg_size - 1); + + return buf; +} + +#if defined(OS_WINDOWS) +const char *winerror_annotator(struct log_field *lf) { + DWORD errnum = log_field_to_uint64(lf); + + if (errnum == 0) + return NULL; + + static __thread char buf[256]; + size_t len = print_uint64(buf, errnum); + buf[len++] = ','; + buf[len++] = ' '; + + char *msg_to = &buf[len]; + size_t msg_size = sizeof(buf) - len; + + wchar_t wbuf[1024]; + DWORD size = FormatMessageW( + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, + errnum, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + wbuf, + (DWORD)(sizeof(wbuf) / sizeof(wchar_t) - 1), + NULL + ); + + if (size > 0) { + // Remove \r\n at the end + while (size > 0 && (wbuf[size - 1] == L'\r' || wbuf[size - 1] == L'\n')) + wbuf[--size] = L'\0'; + + // Convert wide string to UTF-8 + int utf8_size = WideCharToMultiByte(CP_UTF8, 0, wbuf, -1, msg_to, (int)msg_size, NULL, NULL); + if (utf8_size == 0) + snprintf(msg_to, msg_size - 1, "unknown error code"); + msg_to[msg_size - 1] = '\0'; + } + else + snprintf(msg_to, msg_size - 1, "unknown error code"); + + return buf; +} +#endif + +const char *priority_annotator(struct log_field *lf) { + uint64_t pri = log_field_to_uint64(lf); + return nd_log_id2priority(pri); +} diff --git a/src/libnetdata/log/nd_log-common.h b/src/libnetdata/log/nd_log-common.h new file mode 100644 index 000000000..d06bbbd16 --- /dev/null +++ b/src/libnetdata/log/nd_log-common.h @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_ND_LOG_COMMON_H +#define NETDATA_ND_LOG_COMMON_H + +#include + +typedef enum __attribute__((__packed__)) { + NDLS_UNSET = 0, // internal use only + NDLS_ACCESS, // access.log + NDLS_ACLK, // aclk.log + NDLS_COLLECTORS, // collector.log + NDLS_DAEMON, // error.log + NDLS_HEALTH, // health.log + NDLS_DEBUG, // debug.log + + // terminator + _NDLS_MAX, +} ND_LOG_SOURCES; + +typedef enum __attribute__((__packed__)) { + NDLP_EMERG = LOG_EMERG, // from syslog.h + NDLP_ALERT = LOG_ALERT, // from syslog.h + NDLP_CRIT = LOG_CRIT, // from syslog.h + NDLP_ERR = LOG_ERR, // from syslog.h + NDLP_WARNING = LOG_WARNING, // from syslog.h + NDLP_NOTICE = LOG_NOTICE, // from syslog.h + NDLP_INFO = LOG_INFO, // from syslog.h + NDLP_DEBUG = LOG_DEBUG, // from syslog.h + + // terminator + _NDLP_MAX, +} ND_LOG_FIELD_PRIORITY; + +typedef enum __attribute__((__packed__)) { + // KEEP THESE IN THE SAME ORDER AS in thread_log_fields (log.c) + // so that it easy to audit for missing fields + + // NEVER RENUMBER THIS LIST + // The Windows Events Log has them at fixed positions + + NDF_STOP = 0, + NDF_TIMESTAMP_REALTIME_USEC = 1, // the timestamp of the log message - added automatically + NDF_SYSLOG_IDENTIFIER = 2, // the syslog identifier of the application - added automatically + NDF_LOG_SOURCE = 3, // DAEMON, COLLECTORS, HEALTH, MSGID_ACCESS, ACLK - set at the log call + NDF_PRIORITY = 4, // the syslog priority (severity) - set at the log call + NDF_ERRNO = 5, // the ERRNO at the time of the log call - added automatically + NDF_WINERROR = 6, // Windows GetLastError() + NDF_INVOCATION_ID = 7, // the INVOCATION_ID of Netdata - added automatically + NDF_LINE = 8, // the source code file line number - added automatically + NDF_FILE = 9, // the source code filename - added automatically + NDF_FUNC = 10, // the source code function - added automatically + NDF_TID = 11, // the thread ID of the thread logging - added automatically + NDF_THREAD_TAG = 12, // the thread tag of the thread logging - added automatically + NDF_MESSAGE_ID = 13, // for specific events + NDF_MODULE = 14, // for internal plugin module, all other get the NDF_THREAD_TAG + + NDF_NIDL_NODE = 15, // the node / rrdhost currently being worked + NDF_NIDL_INSTANCE = 16, // the instance / rrdset currently being worked + NDF_NIDL_CONTEXT = 17, // the context of the instance currently being worked + NDF_NIDL_DIMENSION = 18, // the dimension / rrddim currently being worked + + // web server, aclk and stream receiver + NDF_SRC_TRANSPORT = 19, // the transport we received the request, one of: http, https, pluginsd + + // Netdata Cloud Related + NDF_ACCOUNT_ID = 20, + NDF_USER_NAME = 21, + NDF_USER_ROLE = 22, + NDF_USER_ACCESS = 23, + + // web server and stream receiver + NDF_SRC_IP = 24, // the streaming / web server source IP + NDF_SRC_PORT = 25, // the streaming / web server source Port + NDF_SRC_FORWARDED_HOST = 26, + NDF_SRC_FORWARDED_FOR = 27, + NDF_SRC_CAPABILITIES = 28, // the stream receiver capabilities + + // stream sender (established links) + NDF_DST_TRANSPORT = 29, // the transport we send the request, one of: http, https + NDF_DST_IP = 30, // the destination streaming IP + NDF_DST_PORT = 31, // the destination streaming Port + NDF_DST_CAPABILITIES = 32, // the destination streaming capabilities + + // web server, aclk and stream receiver + NDF_REQUEST_METHOD = 33, // for http like requests, the http request method + NDF_RESPONSE_CODE = 34, // for http like requests, the http response code, otherwise a status string + + // web server (all), aclk (queries) + NDF_CONNECTION_ID = 35, // the web server connection ID + NDF_TRANSACTION_ID = 36, // the web server and API transaction ID + NDF_RESPONSE_SENT_BYTES = 37, // for http like requests, the response bytes + NDF_RESPONSE_SIZE_BYTES = 38, // for http like requests, the uncompressed response size + NDF_RESPONSE_PREPARATION_TIME_USEC = 39, // for http like requests, the preparation time + NDF_RESPONSE_SENT_TIME_USEC = 40, // for http like requests, the time to send the response back + NDF_RESPONSE_TOTAL_TIME_USEC = 41, // for http like requests, the total time to complete the response + + // health alerts + NDF_ALERT_ID = 42, + NDF_ALERT_UNIQUE_ID = 43, + NDF_ALERT_EVENT_ID = 44, + NDF_ALERT_TRANSITION_ID = 45, + NDF_ALERT_CONFIG_HASH = 46, + NDF_ALERT_NAME = 47, + NDF_ALERT_CLASS = 48, + NDF_ALERT_COMPONENT = 49, + NDF_ALERT_TYPE = 50, + NDF_ALERT_EXEC = 51, + NDF_ALERT_RECIPIENT = 52, + NDF_ALERT_DURATION = 53, + NDF_ALERT_VALUE = 54, + NDF_ALERT_VALUE_OLD = 55, + NDF_ALERT_STATUS = 56, + NDF_ALERT_STATUS_OLD = 57, + NDF_ALERT_SOURCE = 58, + NDF_ALERT_UNITS = 59, + NDF_ALERT_SUMMARY = 60, + NDF_ALERT_INFO = 61, + NDF_ALERT_NOTIFICATION_REALTIME_USEC = 62, + // NDF_ALERT_FLAGS, + + // put new items here + // leave the request URL and the message last + + NDF_REQUEST = 63, // the request we are currently working on + NDF_MESSAGE = 64, // the log message, if any + + // terminator + _NDF_MAX, +} ND_LOG_FIELD_ID; + +typedef enum __attribute__((__packed__)) { + NDFT_UNSET = 0, + NDFT_TXT, + NDFT_STR, + NDFT_BFR, + NDFT_U64, + NDFT_I64, + NDFT_DBL, + NDFT_UUID, + NDFT_CALLBACK, + + // terminator + _NDFT_MAX, +} ND_LOG_STACK_FIELD_TYPE; + +#endif //NETDATA_ND_LOG_COMMON_H diff --git a/src/libnetdata/log/nd_log-config.c b/src/libnetdata/log/nd_log-config.c new file mode 100644 index 000000000..c8e17402e --- /dev/null +++ b/src/libnetdata/log/nd_log-config.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +void nd_log_set_user_settings(ND_LOG_SOURCES source, const char *setting) { + char buf[FILENAME_MAX + 100]; + if(setting && *setting) + strncpyz(buf, setting, sizeof(buf) - 1); + else + buf[0] = '\0'; + + struct nd_log_source *ls = &nd_log.sources[source]; + char *output = strrchr(buf, '@'); + + if(!output) + // all of it is the output + output = buf; + else { + // we found an '@', the next char is the output + *output = '\0'; + output++; + + // parse the other params + char *remaining = buf; + while(remaining) { + char *value = strsep_skip_consecutive_separators(&remaining, ","); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) continue; + + if(strcmp(name, "logfmt") == 0) + ls->format = NDLF_LOGFMT; + else if(strcmp(name, "json") == 0) + ls->format = NDLF_JSON; + else if(strcmp(name, "journal") == 0) + ls->format = NDLF_JOURNAL; +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + else if(strcmp(name, ETW_NAME) == 0) + ls->format = NDLF_ETW; +#endif +#if defined(HAVE_WEL) + else if(strcmp(name, WEL_NAME) == 0) + ls->format = NDLF_WEL; +#endif +#endif + else if(strcmp(name, "level") == 0 && value && *value) + ls->min_priority = nd_log_priority2id(value); + else if(strcmp(name, "protection") == 0 && value && *value) { + if(strcmp(value, "off") == 0 || strcmp(value, "none") == 0) { + ls->limits = ND_LOG_LIMITS_UNLIMITED; + ls->limits.counter = 0; + ls->limits.prevented = 0; + } + else { + ls->limits = ND_LOG_LIMITS_DEFAULT; + + char *slash = strchr(value, '/'); + if(slash) { + *slash = '\0'; + slash++; + ls->limits.logs_per_period = ls->limits.logs_per_period_backup = str2u(value); + + int period; + if(!duration_parse_seconds(slash, &period)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Error while parsing period '%s'", slash); + period = ND_LOG_DEFAULT_THROTTLE_PERIOD; + } + + ls->limits.throttle_period = period; + } + else { + ls->limits.logs_per_period = ls->limits.logs_per_period_backup = str2u(value); + ls->limits.throttle_period = ND_LOG_DEFAULT_THROTTLE_PERIOD; + } + } + } + else + nd_log(NDLS_DAEMON, NDLP_ERR, + "Error while parsing configuration of log source '%s'. " + "In config '%s', '%s' is not understood.", + nd_log_id2source(source), setting, name); + } + } + + if(!output || !*output || strcmp(output, "none") == 0 || strcmp(output, "off") == 0) { + ls->method = NDLM_DISABLED; + ls->filename = "/dev/null"; + } + else if(strcmp(output, "journal") == 0) { + ls->method = NDLM_JOURNAL; + ls->filename = NULL; + } +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + else if(strcmp(output, ETW_NAME) == 0) { + ls->method = NDLM_ETW; + ls->filename = NULL; + } +#endif +#if defined(HAVE_WEL) + else if(strcmp(output, WEL_NAME) == 0) { + ls->method = NDLM_WEL; + ls->filename = NULL; + } +#endif +#endif + else if(strcmp(output, "syslog") == 0) { + ls->method = NDLM_SYSLOG; + ls->filename = NULL; + } + else if(strcmp(output, "/dev/null") == 0) { + ls->method = NDLM_DEVNULL; + ls->filename = "/dev/null"; + } + else if(strcmp(output, "system") == 0) { + if(ls->fd == STDERR_FILENO) { + ls->method = NDLM_STDERR; + ls->filename = NULL; + ls->fd = STDERR_FILENO; + } + else { + ls->method = NDLM_STDOUT; + ls->filename = NULL; + ls->fd = STDOUT_FILENO; + } + } + else if(strcmp(output, "stderr") == 0) { + ls->method = NDLM_STDERR; + ls->filename = NULL; + ls->fd = STDERR_FILENO; + } + else if(strcmp(output, "stdout") == 0) { + ls->method = NDLM_STDOUT; + ls->filename = NULL; + ls->fd = STDOUT_FILENO; + } + else { + ls->method = NDLM_FILE; + ls->filename = strdupz(output); + } + +#if defined(NETDATA_INTERNAL_CHECKS) || defined(NETDATA_DEV_MODE) + ls->min_priority = NDLP_DEBUG; +#endif + + if(source == NDLS_COLLECTORS) { + // set the method for the collector processes we will spawn + + ND_LOG_METHOD method = NDLM_STDERR; + ND_LOG_FORMAT format = NDLF_LOGFMT; + ND_LOG_FIELD_PRIORITY priority = ls->min_priority; + + if(IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(ls->method)) { + method = ls->method; + format = ls->format; + } + + nd_setenv("NETDATA_LOG_METHOD", nd_log_id2method(method), 1); + nd_setenv("NETDATA_LOG_FORMAT", nd_log_id2format(format), 1); + nd_setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1); + } +} + +void nd_log_set_priority_level(const char *setting) { + if(!setting || !*setting) + setting = "info"; + + ND_LOG_FIELD_PRIORITY priority = nd_log_priority2id(setting); + +#if defined(NETDATA_INTERNAL_CHECKS) || defined(NETDATA_DEV_MODE) + priority = NDLP_DEBUG; +#endif + + for (size_t i = 0; i < _NDLS_MAX; i++) { + if (i != NDLS_DEBUG) + nd_log.sources[i].min_priority = priority; + } + + // the right one + nd_setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1); +} + +void nd_log_set_facility(const char *facility) { + if(!facility || !*facility) + facility = "daemon"; + + nd_log.syslog.facility = nd_log_facility2id(facility); + nd_setenv("NETDATA_SYSLOG_FACILITY", nd_log_id2facility(nd_log.syslog.facility), 1); +} + +void nd_log_set_flood_protection(size_t logs, time_t period) { + nd_log.sources[NDLS_DAEMON].limits.logs_per_period = + nd_log.sources[NDLS_DAEMON].limits.logs_per_period_backup; + nd_log.sources[NDLS_COLLECTORS].limits.logs_per_period = + nd_log.sources[NDLS_COLLECTORS].limits.logs_per_period_backup = logs; + + nd_log.sources[NDLS_DAEMON].limits.throttle_period = + nd_log.sources[NDLS_COLLECTORS].limits.throttle_period = period; + + char buf[100]; + snprintfz(buf, sizeof(buf), "%" PRIu64, (uint64_t )period); + nd_setenv("NETDATA_ERRORS_THROTTLE_PERIOD", buf, 1); + snprintfz(buf, sizeof(buf), "%" PRIu64, (uint64_t )logs); + nd_setenv("NETDATA_ERRORS_PER_PERIOD", buf, 1); +} diff --git a/src/libnetdata/log/nd_log-field-formatters.c b/src/libnetdata/log/nd_log-field-formatters.c new file mode 100644 index 000000000..e1b3c0d08 --- /dev/null +++ b/src/libnetdata/log/nd_log-field-formatters.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +int64_t log_field_to_int64(struct log_field *lf) { + + // --- FIELD_PARSER_VERSIONS --- + // + // IMPORTANT: + // THERE ARE 6 VERSIONS OF THIS CODE + // + // 1. journal (direct socket API), + // 2. journal (libsystemd API), + // 3. logfmt, + // 4. json, + // 5. convert to uint64 + // 6. convert to int64 + // + // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES + + CLEAN_BUFFER *tmp = NULL; + const char *s = NULL; + + switch(lf->entry.type) { + default: + case NDFT_UUID: + case NDFT_UNSET: + return 0; + + case NDFT_TXT: + s = lf->entry.txt; + break; + + case NDFT_STR: + s = string2str(lf->entry.str); + break; + + case NDFT_BFR: + s = buffer_tostring(lf->entry.bfr); + break; + + case NDFT_CALLBACK: + tmp = buffer_create(0, NULL); + + if(lf->entry.cb.formatter(tmp, lf->entry.cb.formatter_data)) + s = buffer_tostring(tmp); + else + s = NULL; + break; + + case NDFT_U64: + return (int64_t)lf->entry.u64; + + case NDFT_I64: + return (int64_t)lf->entry.i64; + + case NDFT_DBL: + return (int64_t)lf->entry.dbl; + } + + if(s && *s) + return str2ll(s, NULL); + + return 0; +} + +uint64_t log_field_to_uint64(struct log_field *lf) { + + // --- FIELD_PARSER_VERSIONS --- + // + // IMPORTANT: + // THERE ARE 6 VERSIONS OF THIS CODE + // + // 1. journal (direct socket API), + // 2. journal (libsystemd API), + // 3. logfmt, + // 4. json, + // 5. convert to uint64 + // 6. convert to int64 + // + // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES + + CLEAN_BUFFER *tmp = NULL; + const char *s = NULL; + + switch(lf->entry.type) { + default: + case NDFT_UUID: + case NDFT_UNSET: + return 0; + + case NDFT_TXT: + s = lf->entry.txt; + break; + + case NDFT_STR: + s = string2str(lf->entry.str); + break; + + case NDFT_BFR: + s = buffer_tostring(lf->entry.bfr); + break; + + case NDFT_CALLBACK: + tmp = buffer_create(0, NULL); + + if(lf->entry.cb.formatter(tmp, lf->entry.cb.formatter_data)) + s = buffer_tostring(tmp); + else + s = NULL; + break; + + case NDFT_U64: + return lf->entry.u64; + + case NDFT_I64: + return lf->entry.i64; + + case NDFT_DBL: + return (uint64_t) lf->entry.dbl; + } + + if(s && *s) + return str2uint64_t(s, NULL); + + return 0; +} diff --git a/src/libnetdata/log/nd_log-format-json.c b/src/libnetdata/log/nd_log-format-json.c new file mode 100644 index 000000000..c25bf19c5 --- /dev/null +++ b/src/libnetdata/log/nd_log-format-json.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +void nd_logger_json(BUFFER *wb, struct log_field *fields, size_t fields_max) { + + // --- FIELD_PARSER_VERSIONS --- + // + // IMPORTANT: + // THERE ARE 6 VERSIONS OF THIS CODE + // + // 1. journal (direct socket API), + // 2. journal (libsystemd API), + // 3. logfmt, + // 4. json, + // 5. convert to uint64 + // 6. convert to int64 + // + // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES + + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + CLEAN_BUFFER *tmp = NULL; + + for (size_t i = 0; i < fields_max; i++) { + if (!fields[i].entry.set || !fields[i].logfmt) + continue; + + const char *key = fields[i].logfmt; + + const char *s = NULL; + switch(fields[i].entry.type) { + case NDFT_TXT: + s = fields[i].entry.txt; + break; + case NDFT_STR: + s = string2str(fields[i].entry.str); + break; + case NDFT_BFR: + s = buffer_tostring(fields[i].entry.bfr); + break; + case NDFT_U64: + buffer_json_member_add_uint64(wb, key, fields[i].entry.u64); + break; + case NDFT_I64: + buffer_json_member_add_int64(wb, key, fields[i].entry.i64); + break; + case NDFT_DBL: + buffer_json_member_add_double(wb, key, fields[i].entry.dbl); + break; + case NDFT_UUID: + if(!uuid_is_null(*fields[i].entry.uuid)) { + char u[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(*fields[i].entry.uuid, u); + buffer_json_member_add_string(wb, key, u); + } + break; + case NDFT_CALLBACK: { + if(!tmp) + tmp = buffer_create(1024, NULL); + else + buffer_flush(tmp); + if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) + s = buffer_tostring(tmp); + else + s = NULL; + } + break; + default: + s = "UNHANDLED"; + break; + } + + if(s && *s) + buffer_json_member_add_string(wb, key, s); + } + + buffer_json_finalize(wb); +} diff --git a/src/libnetdata/log/nd_log-format-logfmt.c b/src/libnetdata/log/nd_log-format-logfmt.c new file mode 100644 index 000000000..d65211dfc --- /dev/null +++ b/src/libnetdata/log/nd_log-format-logfmt.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +static bool needs_quotes_for_logfmt(const char *s) +{ + static bool safe_for_logfmt[256] = { + [' '] = true, ['!'] = true, ['"'] = false, ['#'] = true, ['$'] = true, ['%'] = true, ['&'] = true, + ['\''] = true, ['('] = true, [')'] = true, ['*'] = true, ['+'] = true, [','] = true, ['-'] = true, + ['.'] = true, ['/'] = true, ['0'] = true, ['1'] = true, ['2'] = true, ['3'] = true, ['4'] = true, + ['5'] = true, ['6'] = true, ['7'] = true, ['8'] = true, ['9'] = true, [':'] = true, [';'] = true, + ['<'] = true, ['='] = true, ['>'] = true, ['?'] = true, ['@'] = true, ['A'] = true, ['B'] = true, + ['C'] = true, ['D'] = true, ['E'] = true, ['F'] = true, ['G'] = true, ['H'] = true, ['I'] = true, + ['J'] = true, ['K'] = true, ['L'] = true, ['M'] = true, ['N'] = true, ['O'] = true, ['P'] = true, + ['Q'] = true, ['R'] = true, ['S'] = true, ['T'] = true, ['U'] = true, ['V'] = true, ['W'] = true, + ['X'] = true, ['Y'] = true, ['Z'] = true, ['['] = true, ['\\'] = false, [']'] = true, ['^'] = true, + ['_'] = true, ['`'] = true, ['a'] = true, ['b'] = true, ['c'] = true, ['d'] = true, ['e'] = true, + ['f'] = true, ['g'] = true, ['h'] = true, ['i'] = true, ['j'] = true, ['k'] = true, ['l'] = true, + ['m'] = true, ['n'] = true, ['o'] = true, ['p'] = true, ['q'] = true, ['r'] = true, ['s'] = true, + ['t'] = true, ['u'] = true, ['v'] = true, ['w'] = true, ['x'] = true, ['y'] = true, ['z'] = true, + ['{'] = true, ['|'] = true, ['}'] = true, ['~'] = true, [0x7f] = true, + }; + + if(!*s) + return true; + + while(*s) { + if(*s == '=' || isspace((uint8_t)*s) || !safe_for_logfmt[(uint8_t)*s]) + return true; + + s++; + } + + return false; +} + +static void string_to_logfmt(BUFFER *wb, const char *s) +{ + bool spaces = needs_quotes_for_logfmt(s); + + if(spaces) + buffer_fast_strcat(wb, "\"", 1); + + buffer_json_strcat(wb, s); + + if(spaces) + buffer_fast_strcat(wb, "\"", 1); +} + +void nd_logger_logfmt(BUFFER *wb, struct log_field *fields, size_t fields_max) { + + // --- FIELD_PARSER_VERSIONS --- + // + // IMPORTANT: + // THERE ARE 6 VERSIONS OF THIS CODE + // + // 1. journal (direct socket API), + // 2. journal (libsystemd API), + // 3. logfmt, + // 4. json, + // 5. convert to uint64 + // 6. convert to int64 + // + // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES + + CLEAN_BUFFER *tmp = NULL; + + for (size_t i = 0; i < fields_max; i++) { + if (!fields[i].entry.set || !fields[i].logfmt) + continue; + + const char *key = fields[i].logfmt; + + if(fields[i].annotator) { + const char *s = fields[i].annotator(&fields[i]); + if(!s) continue; + + if(buffer_strlen(wb)) + buffer_fast_strcat(wb, " ", 1); + + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + string_to_logfmt(wb, s); + } + else { + if(buffer_strlen(wb)) + buffer_fast_strcat(wb, " ", 1); + + switch(fields[i].entry.type) { + case NDFT_TXT: + if(*fields[i].entry.txt) { + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + string_to_logfmt(wb, fields[i].entry.txt); + } + break; + case NDFT_STR: + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + string_to_logfmt(wb, string2str(fields[i].entry.str)); + break; + case NDFT_BFR: + if(buffer_strlen(fields[i].entry.bfr)) { + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + string_to_logfmt(wb, buffer_tostring(fields[i].entry.bfr)); + } + break; + case NDFT_U64: + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + buffer_print_uint64(wb, fields[i].entry.u64); + break; + case NDFT_I64: + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + buffer_print_int64(wb, fields[i].entry.i64); + break; + case NDFT_DBL: + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + buffer_print_netdata_double(wb, fields[i].entry.dbl); + break; + case NDFT_UUID: + if(!uuid_is_null(*fields[i].entry.uuid)) { + char u[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(*fields[i].entry.uuid, u); + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + buffer_fast_strcat(wb, u, sizeof(u) - 1); + } + break; + case NDFT_CALLBACK: { + if(!tmp) + tmp = buffer_create(1024, NULL); + else + buffer_flush(tmp); + if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) { + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + string_to_logfmt(wb, buffer_tostring(tmp)); + } + } + break; + default: + buffer_strcat(wb, "UNHANDLED"); + break; + } + } + } +} diff --git a/src/libnetdata/log/nd_log-init.c b/src/libnetdata/log/nd_log-init.c new file mode 100644 index 000000000..7f846b136 --- /dev/null +++ b/src/libnetdata/log/nd_log-init.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +// -------------------------------------------------------------------------------------------------------------------- + +__attribute__((constructor)) void initialize_invocation_id(void) { + // check for a NETDATA_INVOCATION_ID + if(uuid_parse_flexi(getenv("NETDATA_INVOCATION_ID"), nd_log.invocation_id) != 0) { + // not found, check for systemd set INVOCATION_ID + if(uuid_parse_flexi(getenv("INVOCATION_ID"), nd_log.invocation_id) != 0) { + // not found, generate a new one + uuid_generate_random(nd_log.invocation_id); + } + } + + char uuid[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(nd_log.invocation_id, uuid); + nd_setenv("NETDATA_INVOCATION_ID", uuid, 1); +} + +// -------------------------------------------------------------------------------------------------------------------- + +void nd_log_initialize_for_external_plugins(const char *name) { + // if we don't run under Netdata, log to stderr, + // otherwise, use the logging method Netdata wants us to use. +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + nd_setenv("NETDATA_LOG_METHOD", ETW_NAME, 0); + nd_setenv("NETDATA_LOG_FORMAT", ETW_NAME, 0); +#elif defined(HAVE_WEL) + nd_setenv("NETDATA_LOG_METHOD", WEL_NAME, 0); + nd_setenv("NETDATA_LOG_FORMAT", WEL_NAME, 0); +#else + nd_setenv("NETDATA_LOG_METHOD", "stderr", 0); + nd_setenv("NETDATA_LOG_FORMAT", "logfmt", 0); +#endif +#else + nd_setenv("NETDATA_LOG_METHOD", "stderr", 0); + nd_setenv("NETDATA_LOG_FORMAT", "logfmt", 0); +#endif + + nd_log.overwrite_process_source = NDLS_COLLECTORS; + program_name = name; + + for(size_t i = 0; i < _NDLS_MAX ;i++) { + nd_log.sources[i].method = NDLM_DEFAULT; + nd_log.sources[i].fd = -1; + nd_log.sources[i].fp = NULL; + } + + nd_log_set_priority_level(getenv("NETDATA_LOG_LEVEL")); + nd_log_set_facility(getenv("NETDATA_SYSLOG_FACILITY")); + + time_t period = 1200; + size_t logs = 200; + const char *s = getenv("NETDATA_ERRORS_THROTTLE_PERIOD"); + if(s && *s >= '0' && *s <= '9') { + period = str2l(s); + if(period < 0) period = 0; + } + + s = getenv("NETDATA_ERRORS_PER_PERIOD"); + if(s && *s >= '0' && *s <= '9') + logs = str2u(s); + + nd_log_set_flood_protection(logs, period); + + if(!netdata_configured_host_prefix) { + s = getenv("NETDATA_HOST_PREFIX"); + if(s && *s) + netdata_configured_host_prefix = (char *)s; + } + + ND_LOG_METHOD method = nd_log_method2id(getenv("NETDATA_LOG_METHOD")); + ND_LOG_FORMAT format = nd_log_format2id(getenv("NETDATA_LOG_FORMAT")); + + if(!IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(method)) { + if(is_stderr_connected_to_journal()) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "NETDATA_LOG_METHOD is not set. Using journal."); + method = NDLM_JOURNAL; + } + else { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "NETDATA_LOG_METHOD is not set. Using stderr."); + method = NDLM_STDERR; + } + } + + switch(method) { + case NDLM_JOURNAL: + if(!nd_log_journal_direct_init(getenv("NETDATA_SYSTEMD_JOURNAL_PATH")) || + !nd_log_journal_direct_init(NULL) || !nd_log_journal_systemd_init()) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to initialize journal. Using stderr."); + method = NDLM_STDERR; + } + break; + +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + case NDLM_ETW: + if(!nd_log_init_etw()) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to initialize Events Tracing for Windows (ETW). Using stderr."); + method = NDLM_STDERR; + } + break; +#endif +#if defined(HAVE_WEL) + case NDLM_WEL: + if(!nd_log_init_wel()) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to initialize Windows Event Log (WEL). Using stderr."); + method = NDLM_STDERR; + } + break; +#endif +#endif + + case NDLM_SYSLOG: + nd_log_init_syslog(); + break; + + default: + method = NDLM_STDERR; + break; + } + + nd_log.sources[NDLS_COLLECTORS].method = method; + nd_log.sources[NDLS_COLLECTORS].format = format; + nd_log.sources[NDLS_COLLECTORS].fd = -1; + nd_log.sources[NDLS_COLLECTORS].fp = NULL; + + // nd_log(NDLS_COLLECTORS, NDLP_NOTICE, "FINAL_LOG_METHOD: %s", nd_log_id2method(method)); +} + +// -------------------------------------------------------------------------------------------------------------------- + +void nd_log_open(struct nd_log_source *e, ND_LOG_SOURCES source) { + if(e->method == NDLM_DEFAULT) + nd_log_set_user_settings(source, e->filename); + + if((e->method == NDLM_FILE && !e->filename) || + (e->method == NDLM_DEVNULL && e->fd == -1)) + e->method = NDLM_DISABLED; + + if(e->fp) + fflush(e->fp); + + switch(e->method) { + case NDLM_SYSLOG: + nd_log_init_syslog(); + break; + + case NDLM_JOURNAL: + nd_log_journal_direct_init(NULL); + nd_log_journal_systemd_init(); + break; + +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + case NDLM_ETW: + nd_log_init_etw(); + break; +#endif +#if defined(HAVE_WEL) + case NDLM_WEL: + nd_log_init_wel(); + break; +#endif +#endif + + case NDLM_STDOUT: + e->fp = stdout; + e->fd = STDOUT_FILENO; + break; + + case NDLM_DISABLED: + break; + + case NDLM_DEFAULT: + case NDLM_STDERR: + e->method = NDLM_STDERR; + e->fp = stderr; + e->fd = STDERR_FILENO; + break; + + case NDLM_DEVNULL: + case NDLM_FILE: { + int fd = open(e->filename, O_WRONLY | O_APPEND | O_CREAT, 0664); + if(fd == -1) { + if(e->fd != STDOUT_FILENO && e->fd != STDERR_FILENO) { + e->fd = STDERR_FILENO; + e->method = NDLM_STDERR; + netdata_log_error("Cannot open log file '%s'. Falling back to stderr.", e->filename); + } + else + netdata_log_error("Cannot open log file '%s'. Leaving fd %d as-is.", e->filename, e->fd); + } + else { + if (!nd_log_replace_existing_fd(e, fd)) { + if(e->fd == STDOUT_FILENO || e->fd == STDERR_FILENO) { + if(e->fd == STDOUT_FILENO) + e->method = NDLM_STDOUT; + else if(e->fd == STDERR_FILENO) + e->method = NDLM_STDERR; + + // we have dup2() fd, so we can close the one we opened + if(fd != STDOUT_FILENO && fd != STDERR_FILENO) + close(fd); + } + else + e->fd = fd; + } + } + + // at this point we have e->fd set properly + + if(e->fd == STDOUT_FILENO) + e->fp = stdout; + else if(e->fd == STDERR_FILENO) + e->fp = stderr; + + if(!e->fp) { + e->fp = fdopen(e->fd, "a"); + if (!e->fp) { + netdata_log_error("Cannot fdopen() fd %d ('%s')", e->fd, e->filename); + + if(e->fd != STDOUT_FILENO && e->fd != STDERR_FILENO) + close(e->fd); + + e->fp = stderr; + e->fd = STDERR_FILENO; + } + } + else { + if (setvbuf(e->fp, NULL, _IOLBF, 0) != 0) + netdata_log_error("Cannot set line buffering on fd %d ('%s')", e->fd, e->filename); + } + } + break; + } +} + +// -------------------------------------------------------------------------------------------------------------------- + +void nd_log_stdin_init(int fd, const char *filename) { + int f = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0664); + if(f == -1) + return; + + if(f != fd) { + dup2(f, fd); + close(f); + } +} + +void nd_log_initialize(void) { + nd_log_stdin_init(STDIN_FILENO, "/dev/null"); + + for(size_t i = 0 ; i < _NDLS_MAX ; i++) + nd_log_open(&nd_log.sources[i], i); +} + +void nd_log_reopen_log_files(bool log) { + if(log) + netdata_log_info("Reopening all log files."); + + nd_log_initialize(); + + if(log) + netdata_log_info("Log files re-opened."); +} + +int nd_log_systemd_journal_fd(void) { + return nd_log.journal.fd; +} + +void nd_log_reopen_log_files_for_spawn_server(const char *name) { + gettid_uncached(); + + if(nd_log.syslog.initialized) { + closelog(); + nd_log.syslog.initialized = false; + nd_log_init_syslog(); + } + + if(nd_log.journal_direct.initialized) { + close(nd_log.journal_direct.fd); + nd_log.journal_direct.fd = -1; + nd_log.journal_direct.initialized = false; + } + + for(size_t i = 0; i < _NDLS_MAX ;i++) { + spinlock_init(&nd_log.sources[i].spinlock); + nd_log.sources[i].method = NDLM_DEFAULT; + nd_log.sources[i].fd = -1; + nd_log.sources[i].fp = NULL; + nd_log.sources[i].pending_msg = NULL; +#if defined(OS_WINDOWS) + nd_log.sources[i].hEventLog = NULL; +#endif + } + + // initialize spinlocks + spinlock_init(&nd_log.std_output.spinlock); + spinlock_init(&nd_log.std_error.spinlock); + + nd_log.syslog.initialized = false; + nd_log.eventlog.initialized = false; + nd_log.std_output.initialized = false; + nd_log.std_error.initialized = false; + + nd_log_initialize_for_external_plugins(name); +} + diff --git a/src/libnetdata/log/nd_log-internals.c b/src/libnetdata/log/nd_log-internals.c new file mode 100644 index 000000000..97f521fad --- /dev/null +++ b/src/libnetdata/log/nd_log-internals.c @@ -0,0 +1,823 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +// -------------------------------------------------------------------------------------------------------------------- +// workaround strerror_r() + +#if defined(STRERROR_R_CHAR_P) +// GLIBC version of strerror_r +static const char *strerror_result(const char *a, const char *b) { (void)b; return a; } +#elif defined(HAVE_STRERROR_R) +// POSIX version of strerror_r +static const char *strerror_result(int a, const char *b) { (void)a; return b; } +#elif defined(HAVE_C__GENERIC) + +// what a trick! +// http://stackoverflow.com/questions/479207/function-overloading-in-c +static const char *strerror_result_int(int a, const char *b) { (void)a; return b; } +static const char *strerror_result_string(const char *a, const char *b) { (void)b; return a; } + +#define strerror_result(a, b) _Generic((a), \ + int: strerror_result_int, \ + char *: strerror_result_string \ + )(a, b) + +#else +#error "cannot detect the format of function strerror_r()" +#endif + +const char *errno2str(int errnum, char *buf, size_t size) { + return strerror_result(strerror_r(errnum, buf, size), buf); +} + +// -------------------------------------------------------------------------------------------------------------------- +// logging method + +static struct { + ND_LOG_METHOD method; + const char *name; +} nd_log_methods[] = { + { .method = NDLM_DISABLED, .name = "none" }, + { .method = NDLM_DEVNULL, .name = "/dev/null" }, + { .method = NDLM_DEFAULT, .name = "default" }, + { .method = NDLM_JOURNAL, .name = "journal" }, + { .method = NDLM_SYSLOG, .name = "syslog" }, + { .method = NDLM_STDOUT, .name = "stdout" }, + { .method = NDLM_STDERR, .name = "stderr" }, + { .method = NDLM_FILE, .name = "file" }, +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + { .method = NDLM_ETW, .name = ETW_NAME }, +#endif +#if defined(HAVE_WEL) + { .method = NDLM_WEL, .name = WEL_NAME }, +#endif +#endif +}; + +ND_LOG_METHOD nd_log_method2id(const char *method) { + if(!method || !*method) + return NDLM_DEFAULT; + + size_t entries = sizeof(nd_log_methods) / sizeof(nd_log_methods[0]); + for(size_t i = 0; i < entries ;i++) { + if(strcmp(nd_log_methods[i].name, method) == 0) + return nd_log_methods[i].method; + } + + return NDLM_FILE; +} + +const char *nd_log_id2method(ND_LOG_METHOD method) { + size_t entries = sizeof(nd_log_methods) / sizeof(nd_log_methods[0]); + for(size_t i = 0; i < entries ;i++) { + if(method == nd_log_methods[i].method) + return nd_log_methods[i].name; + } + + return "unknown"; +} + +const char *nd_log_method_for_external_plugins(const char *s) { + if(s && *s) { + ND_LOG_METHOD method = nd_log_method2id(s); + if(IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(method)) + return nd_log_id2method(method); + } + + return nd_log_id2method(NDLM_STDERR); +} + +// -------------------------------------------------------------------------------------------------------------------- +// facilities +// +// sys/syslog.h (Linux) +// sys/sys/syslog.h (FreeBSD) +// bsd/sys/syslog.h (darwin-xnu) + +static struct { + int facility; + const char *name; +} nd_log_facilities[] = { + { LOG_AUTH, "auth" }, + { LOG_AUTHPRIV, "authpriv" }, + { LOG_CRON, "cron" }, + { LOG_DAEMON, "daemon" }, + { LOG_FTP, "ftp" }, + { LOG_KERN, "kern" }, + { LOG_LPR, "lpr" }, + { LOG_MAIL, "mail" }, + { LOG_NEWS, "news" }, + { LOG_SYSLOG, "syslog" }, + { LOG_USER, "user" }, + { LOG_UUCP, "uucp" }, + { LOG_LOCAL0, "local0" }, + { LOG_LOCAL1, "local1" }, + { LOG_LOCAL2, "local2" }, + { LOG_LOCAL3, "local3" }, + { LOG_LOCAL4, "local4" }, + { LOG_LOCAL5, "local5" }, + { LOG_LOCAL6, "local6" }, + { LOG_LOCAL7, "local7" }, + +#ifdef __FreeBSD__ + { LOG_CONSOLE, "console" }, + { LOG_NTP, "ntp" }, + + // FreeBSD does not consider 'security' as deprecated. + { LOG_SECURITY, "security" }, +#else + // For all other O/S 'security' is mapped to 'auth'. + { LOG_AUTH, "security" }, +#endif + +#ifdef __APPLE__ + { LOG_INSTALL, "install" }, + { LOG_NETINFO, "netinfo" }, + { LOG_RAS, "ras" }, + { LOG_REMOTEAUTH, "remoteauth" }, + { LOG_LAUNCHD, "launchd" }, + +#endif +}; + +int nd_log_facility2id(const char *facility) { + size_t entries = sizeof(nd_log_facilities) / sizeof(nd_log_facilities[0]); + for(size_t i = 0; i < entries ;i++) { + if(strcmp(nd_log_facilities[i].name, facility) == 0) + return nd_log_facilities[i].facility; + } + + return LOG_DAEMON; +} + +const char *nd_log_id2facility(int facility) { + size_t entries = sizeof(nd_log_facilities) / sizeof(nd_log_facilities[0]); + for(size_t i = 0; i < entries ;i++) { + if(nd_log_facilities[i].facility == facility) + return nd_log_facilities[i].name; + } + + return "daemon"; +} + +// -------------------------------------------------------------------------------------------------------------------- +// priorities + +static struct { + ND_LOG_FIELD_PRIORITY priority; + const char *name; +} nd_log_priorities[] = { + { .priority = NDLP_EMERG, .name = "emergency" }, + { .priority = NDLP_EMERG, .name = "emerg" }, + { .priority = NDLP_ALERT, .name = "alert" }, + { .priority = NDLP_CRIT, .name = "critical" }, + { .priority = NDLP_CRIT, .name = "crit" }, + { .priority = NDLP_ERR, .name = "error" }, + { .priority = NDLP_ERR, .name = "err" }, + { .priority = NDLP_WARNING, .name = "warning" }, + { .priority = NDLP_WARNING, .name = "warn" }, + { .priority = NDLP_NOTICE, .name = "notice" }, + { .priority = NDLP_INFO, .name = NDLP_INFO_STR }, + { .priority = NDLP_DEBUG, .name = "debug" }, +}; + +int nd_log_priority2id(const char *priority) { + size_t entries = sizeof(nd_log_priorities) / sizeof(nd_log_priorities[0]); + for(size_t i = 0; i < entries ;i++) { + if(strcmp(nd_log_priorities[i].name, priority) == 0) + return nd_log_priorities[i].priority; + } + + return NDLP_INFO; +} + +const char *nd_log_id2priority(ND_LOG_FIELD_PRIORITY priority) { + size_t entries = sizeof(nd_log_priorities) / sizeof(nd_log_priorities[0]); + for(size_t i = 0; i < entries ;i++) { + if(priority == nd_log_priorities[i].priority) + return nd_log_priorities[i].name; + } + + return NDLP_INFO_STR; +} + +// -------------------------------------------------------------------------------------------------------------------- +// log sources + +const char *nd_log_sources[] = { + [NDLS_UNSET] = "UNSET", + [NDLS_ACCESS] = "access", + [NDLS_ACLK] = "aclk", + [NDLS_COLLECTORS] = "collector", + [NDLS_DAEMON] = "daemon", + [NDLS_HEALTH] = "health", + [NDLS_DEBUG] = "debug", +}; + +size_t nd_log_source2id(const char *source, ND_LOG_SOURCES def) { + size_t entries = sizeof(nd_log_sources) / sizeof(nd_log_sources[0]); + for(size_t i = 0; i < entries ;i++) { + if(strcmp(nd_log_sources[i], source) == 0) + return i; + } + + return def; +} + + +const char *nd_log_id2source(ND_LOG_SOURCES source) { + size_t entries = sizeof(nd_log_sources) / sizeof(nd_log_sources[0]); + if(source < entries) + return nd_log_sources[source]; + + return nd_log_sources[NDLS_COLLECTORS]; +} + +// -------------------------------------------------------------------------------------------------------------------- +// log output formats + +static struct { + ND_LOG_FORMAT format; + const char *name; +} nd_log_formats[] = { + { .format = NDLF_JOURNAL, .name = "journal" }, + { .format = NDLF_LOGFMT, .name = "logfmt" }, + { .format = NDLF_JSON, .name = "json" }, +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + { .format = NDLF_ETW, .name = ETW_NAME }, +#endif +#if defined(HAVE_WEL) + { .format = NDLF_WEL, .name = WEL_NAME }, +#endif +#endif +}; + +ND_LOG_FORMAT nd_log_format2id(const char *format) { + if(!format || !*format) + return NDLF_LOGFMT; + + size_t entries = sizeof(nd_log_formats) / sizeof(nd_log_formats[0]); + for(size_t i = 0; i < entries ;i++) { + if(strcmp(nd_log_formats[i].name, format) == 0) + return nd_log_formats[i].format; + } + + return NDLF_LOGFMT; +} + +const char *nd_log_id2format(ND_LOG_FORMAT format) { + size_t entries = sizeof(nd_log_formats) / sizeof(nd_log_formats[0]); + for(size_t i = 0; i < entries ;i++) { + if(format == nd_log_formats[i].format) + return nd_log_formats[i].name; + } + + return "logfmt"; +} + +// -------------------------------------------------------------------------------------------------------------------- + +struct nd_log nd_log = { + .overwrite_process_source = 0, + .journal = { + .initialized = false, + .first_msg = false, + .fd = -1, + }, + .journal_direct = { + .initialized = false, + .fd = -1, + }, + .syslog = { + .initialized = false, + .facility = LOG_DAEMON, + }, +#if defined(OS_WINDOWS) + .eventlog = { + .initialized = false, + }, +#endif + .std_output = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .initialized = false, + }, + .std_error = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .initialized = false, + }, + .sources = { + [NDLS_UNSET] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_DISABLED, + .format = NDLF_JOURNAL, + .filename = NULL, + .fd = -1, + .fp = NULL, + .min_priority = NDLP_EMERG, + .limits = ND_LOG_LIMITS_UNLIMITED, + }, + [NDLS_ACCESS] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_DEFAULT, + .format = NDLF_LOGFMT, + .filename = LOG_DIR "/access.log", + .fd = -1, + .fp = NULL, + .min_priority = NDLP_DEBUG, + .limits = ND_LOG_LIMITS_UNLIMITED, + }, + [NDLS_ACLK] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_FILE, + .format = NDLF_LOGFMT, + .filename = LOG_DIR "/aclk.log", + .fd = -1, + .fp = NULL, + .min_priority = NDLP_DEBUG, + .limits = ND_LOG_LIMITS_UNLIMITED, + }, + [NDLS_COLLECTORS] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_DEFAULT, + .format = NDLF_LOGFMT, + .filename = LOG_DIR "/collector.log", + .fd = STDERR_FILENO, + .fp = NULL, + .min_priority = NDLP_INFO, + .limits = ND_LOG_LIMITS_DEFAULT, + }, + [NDLS_DEBUG] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_DISABLED, + .format = NDLF_LOGFMT, + .filename = LOG_DIR "/debug.log", + .fd = STDOUT_FILENO, + .fp = NULL, + .min_priority = NDLP_DEBUG, + .limits = ND_LOG_LIMITS_UNLIMITED, + }, + [NDLS_DAEMON] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_DEFAULT, + .filename = LOG_DIR "/daemon.log", + .format = NDLF_LOGFMT, + .fd = -1, + .fp = NULL, + .min_priority = NDLP_INFO, + .limits = ND_LOG_LIMITS_DEFAULT, + }, + [NDLS_HEALTH] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_DEFAULT, + .format = NDLF_LOGFMT, + .filename = LOG_DIR "/health.log", + .fd = -1, + .fp = NULL, + .min_priority = NDLP_DEBUG, + .limits = ND_LOG_LIMITS_UNLIMITED, + }, + }, +}; + +// -------------------------------------------------------------------------------------------------------------------- + +__thread struct log_stack_entry *thread_log_stack_base[THREAD_LOG_STACK_MAX]; +__thread size_t thread_log_stack_next = 0; +__thread struct log_field thread_log_fields[_NDF_MAX] = { + // THE ORDER HERE IS IRRELEVANT (but keep them sorted by their number) + + [NDF_STOP] = { // processing will not stop on this - so it is ok to be first + .journal = NULL, + .logfmt = NULL, + .eventlog = NULL, + .annotator = NULL, + }, + [NDF_TIMESTAMP_REALTIME_USEC] = { + .journal = NULL, + .eventlog = "Timestamp", + .logfmt = "time", + .annotator = timestamp_usec_annotator, + }, + [NDF_SYSLOG_IDENTIFIER] = { + .journal = "SYSLOG_IDENTIFIER", // standard journald field + .eventlog = "Program", + .logfmt = "comm", + }, + [NDF_LOG_SOURCE] = { + .journal = "ND_LOG_SOURCE", + .eventlog = "NetdataLogSource", + .logfmt = "source", + }, + [NDF_PRIORITY] = { + .journal = "PRIORITY", // standard journald field + .eventlog = "Level", + .logfmt = "level", + .annotator = priority_annotator, + }, + [NDF_ERRNO] = { + .journal = "ERRNO", // standard journald field + .eventlog = "UnixErrno", + .logfmt = "errno", + .annotator = errno_annotator, + }, + [NDF_WINERROR] = { +#if defined(OS_WINDOWS) + .journal = "WINERROR", + .eventlog = "WindowsLastError", + .logfmt = "winerror", + .annotator = winerror_annotator, +#endif + }, + [NDF_INVOCATION_ID] = { + .journal = "INVOCATION_ID", // standard journald field + .eventlog = "InvocationID", + .logfmt = NULL, + }, + [NDF_LINE] = { + .journal = "CODE_LINE", // standard journald field + .eventlog = "CodeLine", + .logfmt = NULL, + }, + [NDF_FILE] = { + .journal = "CODE_FILE", // standard journald field + .eventlog = "CodeFile", + .logfmt = NULL, + }, + [NDF_FUNC] = { + .journal = "CODE_FUNC", // standard journald field + .eventlog = "CodeFunction", + .logfmt = NULL, + }, + [NDF_TID] = { + .journal = "TID", // standard journald field + .eventlog = "ThreadID", + .logfmt = "tid", + }, + [NDF_THREAD_TAG] = { + .journal = "THREAD_TAG", + .eventlog = "ThreadName", + .logfmt = "thread", + }, + [NDF_MESSAGE_ID] = { + .journal = "MESSAGE_ID", + .eventlog = "MessageID", + .logfmt = "msg_id", + }, + [NDF_MODULE] = { + .journal = "ND_MODULE", + .eventlog = "Module", + .logfmt = "module", + }, + [NDF_NIDL_NODE] = { + .journal = "ND_NIDL_NODE", + .eventlog = "Node", + .logfmt = "node", + }, + [NDF_NIDL_INSTANCE] = { + .journal = "ND_NIDL_INSTANCE", + .eventlog = "Instance", + .logfmt = "instance", + }, + [NDF_NIDL_CONTEXT] = { + .journal = "ND_NIDL_CONTEXT", + .eventlog = "Context", + .logfmt = "context", + }, + [NDF_NIDL_DIMENSION] = { + .journal = "ND_NIDL_DIMENSION", + .eventlog = "Dimension", + .logfmt = "dimension", + }, + [NDF_SRC_TRANSPORT] = { + .journal = "ND_SRC_TRANSPORT", + .eventlog = "SourceTransport", + .logfmt = "src_transport", + }, + [NDF_ACCOUNT_ID] = { + .journal = "ND_ACCOUNT_ID", + .eventlog = "AccountID", + .logfmt = "account", + }, + [NDF_USER_NAME] = { + .journal = "ND_USER_NAME", + .eventlog = "UserName", + .logfmt = "user", + }, + [NDF_USER_ROLE] = { + .journal = "ND_USER_ROLE", + .eventlog = "UserRole", + .logfmt = "role", + }, + [NDF_USER_ACCESS] = { + .journal = "ND_USER_PERMISSIONS", + .eventlog = "UserPermissions", + .logfmt = "permissions", + }, + [NDF_SRC_IP] = { + .journal = "ND_SRC_IP", + .eventlog = "SourceIP", + .logfmt = "src_ip", + }, + [NDF_SRC_FORWARDED_HOST] = { + .journal = "ND_SRC_FORWARDED_HOST", + .eventlog = "SourceForwardedHost", + .logfmt = "src_forwarded_host", + }, + [NDF_SRC_FORWARDED_FOR] = { + .journal = "ND_SRC_FORWARDED_FOR", + .eventlog = "SourceForwardedFor", + .logfmt = "src_forwarded_for", + }, + [NDF_SRC_PORT] = { + .journal = "ND_SRC_PORT", + .eventlog = "SourcePort", + .logfmt = "src_port", + }, + [NDF_SRC_CAPABILITIES] = { + .journal = "ND_SRC_CAPABILITIES", + .eventlog = "SourceCapabilities", + .logfmt = "src_capabilities", + }, + [NDF_DST_TRANSPORT] = { + .journal = "ND_DST_TRANSPORT", + .eventlog = "DestinationTransport", + .logfmt = "dst_transport", + }, + [NDF_DST_IP] = { + .journal = "ND_DST_IP", + .eventlog = "DestinationIP", + .logfmt = "dst_ip", + }, + [NDF_DST_PORT] = { + .journal = "ND_DST_PORT", + .eventlog = "DestinationPort", + .logfmt = "dst_port", + }, + [NDF_DST_CAPABILITIES] = { + .journal = "ND_DST_CAPABILITIES", + .eventlog = "DestinationCapabilities", + .logfmt = "dst_capabilities", + }, + [NDF_REQUEST_METHOD] = { + .journal = "ND_REQUEST_METHOD", + .eventlog = "RequestMethod", + .logfmt = "req_method", + }, + [NDF_RESPONSE_CODE] = { + .journal = "ND_RESPONSE_CODE", + .eventlog = "ResponseCode", + .logfmt = "code", + }, + [NDF_CONNECTION_ID] = { + .journal = "ND_CONNECTION_ID", + .eventlog = "ConnectionID", + .logfmt = "conn", + }, + [NDF_TRANSACTION_ID] = { + .journal = "ND_TRANSACTION_ID", + .eventlog = "TransactionID", + .logfmt = "transaction", + }, + [NDF_RESPONSE_SENT_BYTES] = { + .journal = "ND_RESPONSE_SENT_BYTES", + .eventlog = "ResponseSentBytes", + .logfmt = "sent_bytes", + }, + [NDF_RESPONSE_SIZE_BYTES] = { + .journal = "ND_RESPONSE_SIZE_BYTES", + .eventlog = "ResponseSizeBytes", + .logfmt = "size_bytes", + }, + [NDF_RESPONSE_PREPARATION_TIME_USEC] = { + .journal = "ND_RESPONSE_PREP_TIME_USEC", + .eventlog = "ResponsePreparationTimeUsec", + .logfmt = "prep_ut", + }, + [NDF_RESPONSE_SENT_TIME_USEC] = { + .journal = "ND_RESPONSE_SENT_TIME_USEC", + .eventlog = "ResponseSentTimeUsec", + .logfmt = "sent_ut", + }, + [NDF_RESPONSE_TOTAL_TIME_USEC] = { + .journal = "ND_RESPONSE_TOTAL_TIME_USEC", + .eventlog = "ResponseTotalTimeUsec", + .logfmt = "total_ut", + }, + [NDF_ALERT_ID] = { + .journal = "ND_ALERT_ID", + .eventlog = "AlertID", + .logfmt = "alert_id", + }, + [NDF_ALERT_UNIQUE_ID] = { + .journal = "ND_ALERT_UNIQUE_ID", + .eventlog = "AlertUniqueID", + .logfmt = "alert_unique_id", + }, + [NDF_ALERT_TRANSITION_ID] = { + .journal = "ND_ALERT_TRANSITION_ID", + .eventlog = "AlertTransitionID", + .logfmt = "alert_transition_id", + }, + [NDF_ALERT_EVENT_ID] = { + .journal = "ND_ALERT_EVENT_ID", + .eventlog = "AlertEventID", + .logfmt = "alert_event_id", + }, + [NDF_ALERT_CONFIG_HASH] = { + .journal = "ND_ALERT_CONFIG", + .eventlog = "AlertConfig", + .logfmt = "alert_config", + }, + [NDF_ALERT_NAME] = { + .journal = "ND_ALERT_NAME", + .eventlog = "AlertName", + .logfmt = "alert", + }, + [NDF_ALERT_CLASS] = { + .journal = "ND_ALERT_CLASS", + .eventlog = "AlertClass", + .logfmt = "alert_class", + }, + [NDF_ALERT_COMPONENT] = { + .journal = "ND_ALERT_COMPONENT", + .eventlog = "AlertComponent", + .logfmt = "alert_component", + }, + [NDF_ALERT_TYPE] = { + .journal = "ND_ALERT_TYPE", + .eventlog = "AlertType", + .logfmt = "alert_type", + }, + [NDF_ALERT_EXEC] = { + .journal = "ND_ALERT_EXEC", + .eventlog = "AlertExec", + .logfmt = "alert_exec", + }, + [NDF_ALERT_RECIPIENT] = { + .journal = "ND_ALERT_RECIPIENT", + .eventlog = "AlertRecipient", + .logfmt = "alert_recipient", + }, + [NDF_ALERT_VALUE] = { + .journal = "ND_ALERT_VALUE", + .eventlog = "AlertValue", + .logfmt = "alert_value", + }, + [NDF_ALERT_VALUE_OLD] = { + .journal = "ND_ALERT_VALUE_OLD", + .eventlog = "AlertOldValue", + .logfmt = "alert_value_old", + }, + [NDF_ALERT_STATUS] = { + .journal = "ND_ALERT_STATUS", + .eventlog = "AlertStatus", + .logfmt = "alert_status", + }, + [NDF_ALERT_STATUS_OLD] = { + .journal = "ND_ALERT_STATUS_OLD", + .eventlog = "AlertOldStatus", + .logfmt = "alert_value_old", + }, + [NDF_ALERT_UNITS] = { + .journal = "ND_ALERT_UNITS", + .eventlog = "AlertUnits", + .logfmt = "alert_units", + }, + [NDF_ALERT_SUMMARY] = { + .journal = "ND_ALERT_SUMMARY", + .eventlog = "AlertSummary", + .logfmt = "alert_summary", + }, + [NDF_ALERT_INFO] = { + .journal = "ND_ALERT_INFO", + .eventlog = "AlertInfo", + .logfmt = "alert_info", + }, + [NDF_ALERT_DURATION] = { + .journal = "ND_ALERT_DURATION", + .eventlog = "AlertDuration", + .logfmt = "alert_duration", + }, + [NDF_ALERT_NOTIFICATION_REALTIME_USEC] = { + .journal = "ND_ALERT_NOTIFICATION_TIMESTAMP_USEC", + .eventlog = "AlertNotificationTime", + .logfmt = "alert_notification_timestamp", + .annotator = timestamp_usec_annotator, + }, + + // put new items here + // leave the request URL and the message last + + [NDF_REQUEST] = { + .journal = "ND_REQUEST", + .eventlog = "Request", + .logfmt = "request", + }, + [NDF_MESSAGE] = { + .journal = "MESSAGE", + .eventlog = "Message", + .logfmt = "msg", + }, +}; + +// -------------------------------------------------------------------------------------------------------------------- + +void log_stack_pop(void *ptr) { + if(!ptr) return; + + struct log_stack_entry *lgs = *(struct log_stack_entry (*)[])ptr; + + if(unlikely(!thread_log_stack_next || lgs != thread_log_stack_base[thread_log_stack_next - 1])) { + fatal("You cannot pop in the middle of the stack, or an item not in the stack"); + return; + } + + thread_log_stack_next--; +} + +void log_stack_push(struct log_stack_entry *lgs) { + if(!lgs || thread_log_stack_next >= THREAD_LOG_STACK_MAX) return; + thread_log_stack_base[thread_log_stack_next++] = lgs; +} + +// -------------------------------------------------------------------------------------------------------------------- + +ND_LOG_FIELD_ID nd_log_field_id_by_journal_name(const char *field, size_t len) { + for(size_t i = 0; i < THREAD_FIELDS_MAX ;i++) { + if(thread_log_fields[i].journal && strlen(thread_log_fields[i].journal) == len && strncmp(field, thread_log_fields[i].journal, len) == 0) + return i; + } + + return NDF_STOP; +} + +// -------------------------------------------------------------------------------------------------------------------- + +int nd_log_health_fd(void) { + if(nd_log.sources[NDLS_HEALTH].method == NDLM_FILE && nd_log.sources[NDLS_HEALTH].fd != -1) + return nd_log.sources[NDLS_HEALTH].fd; + + return STDERR_FILENO; +} + +int nd_log_collectors_fd(void) { + if(nd_log.sources[NDLS_COLLECTORS].method == NDLM_FILE && nd_log.sources[NDLS_COLLECTORS].fd != -1) + return nd_log.sources[NDLS_COLLECTORS].fd; + + return STDERR_FILENO; +} + +// -------------------------------------------------------------------------------------------------------------------- + +void log_date(char *buffer, size_t len, time_t now) { + if(unlikely(!buffer || !len)) + return; + + time_t t = now; + struct tm *tmp, tmbuf; + + tmp = localtime_r(&t, &tmbuf); + + if (unlikely(!tmp)) { + buffer[0] = '\0'; + return; + } + + if (unlikely(strftime(buffer, len, "%Y-%m-%d %H:%M:%S", tmp) == 0)) + buffer[0] = '\0'; + + buffer[len - 1] = '\0'; +} + +// -------------------------------------------------------------------------------------------------------------------- + +bool nd_log_replace_existing_fd(struct nd_log_source *e, int new_fd) { + if(new_fd == -1 || e->fd == -1 || + (e->fd == STDOUT_FILENO && nd_log.std_output.initialized) || + (e->fd == STDERR_FILENO && nd_log.std_error.initialized)) + return false; + + if(new_fd != e->fd) { + int t = dup2(new_fd, e->fd); + + bool ret = true; + if (t == -1) { + netdata_log_error("Cannot dup2() new fd %d to old fd %d for '%s'", new_fd, e->fd, e->filename); + ret = false; + } + else + close(new_fd); + + if(e->fd == STDOUT_FILENO) + nd_log.std_output.initialized = true; + else if(e->fd == STDERR_FILENO) + nd_log.std_error.initialized = true; + + return ret; + } + + return false; +} diff --git a/src/libnetdata/log/nd_log-internals.h b/src/libnetdata/log/nd_log-internals.h new file mode 100644 index 000000000..7bebf5a4a --- /dev/null +++ b/src/libnetdata/log/nd_log-internals.h @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_ND_LOG_INTERNALS_H +#define NETDATA_ND_LOG_INTERNALS_H + +#include "../libnetdata.h" + +#ifdef __FreeBSD__ +#include +#endif + +#ifdef __APPLE__ +#include +#endif + +#if !defined(ENABLE_SENTRY) && defined(HAVE_BACKTRACE) +#include +#endif + +#ifdef HAVE_SYSTEMD +#include +#endif + +const char *errno2str(int errnum, char *buf, size_t size); + +// -------------------------------------------------------------------------------------------------------------------- +// ND_LOG_METHOD + +typedef enum __attribute__((__packed__)) { + NDLM_DISABLED = 0, + NDLM_DEVNULL, + NDLM_DEFAULT, + NDLM_JOURNAL, + NDLM_SYSLOG, + NDLM_STDOUT, + NDLM_STDERR, + NDLM_FILE, +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + NDLM_ETW, +#endif +#if defined(HAVE_WEL) + NDLM_WEL, +#endif +#endif +} ND_LOG_METHOD; + +// all the log methods are finally mapped to these +#if defined(HAVE_ETW) +#define ETW_CONDITION(ndlo) ((ndlo) == NDLM_ETW) +#else +#define ETW_CONDITION(ndlo) (false) +#endif + +#if defined(HAVE_WEL) +#define WEL_CONDITION(ndlo) ((ndlo) == NDLM_WEL) +#else +#define WEL_CONDITION(ndlo) (false) +#endif + +#define IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(ndlo) ((ndlo) == NDLM_JOURNAL || (ndlo) == NDLM_SYSLOG || (ndlo) == NDLM_STDERR || ETW_CONDITION(ndlo) || WEL_CONDITION(ndlo)) +#define IS_FINAL_LOG_METHOD(ndlo) ((ndlo) == NDLM_FILE || (ndlo) == NDLM_JOURNAL || (ndlo) == NDLM_SYSLOG || ETW_CONDITION(ndlo) || WEL_CONDITION(ndlo)) + +ND_LOG_METHOD nd_log_method2id(const char *method); +const char *nd_log_id2method(ND_LOG_METHOD method); + +// -------------------------------------------------------------------------------------------------------------------- +// ND_LOG_FORMAT + +typedef enum __attribute__((__packed__)) { + NDLF_JOURNAL, + NDLF_LOGFMT, + NDLF_JSON, +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + NDLF_ETW, // Event Tracing for Windows +#endif +#if defined(HAVE_WEL) + NDLF_WEL, // Windows Event Log +#endif +#endif +} ND_LOG_FORMAT; + +#define ETW_NAME "etw" +#define WEL_NAME "wel" + +const char *nd_log_id2format(ND_LOG_FORMAT format); +ND_LOG_FORMAT nd_log_format2id(const char *format); + +size_t nd_log_source2id(const char *source, ND_LOG_SOURCES def); +const char *nd_log_id2source(ND_LOG_SOURCES source); + +const char *nd_log_id2priority(ND_LOG_FIELD_PRIORITY priority); +int nd_log_priority2id(const char *priority); + +const char *nd_log_id2facility(int facility); +int nd_log_facility2id(const char *facility); + +#include "nd_log_limit.h" + +struct nd_log_source { + SPINLOCK spinlock; + ND_LOG_METHOD method; + ND_LOG_FORMAT format; + const char *filename; + int fd; + FILE *fp; + + ND_LOG_FIELD_PRIORITY min_priority; + const char *pending_msg; + struct nd_log_limit limits; + +#if defined(OS_WINDOWS) + ND_LOG_SOURCES source; + HANDLE hEventLog; + USHORT channelID; + UCHAR Opcode; + USHORT Task; + ULONGLONG Keyword; +#endif +}; + +struct nd_log { + nd_uuid_t invocation_id; + + ND_LOG_SOURCES overwrite_process_source; + + struct nd_log_source sources[_NDLS_MAX]; + + struct { + bool initialized; + bool first_msg; + int fd; // we don't control this, we just detect it to keep it open + } journal; + + struct { + bool initialized; + int fd; + char filename[FILENAME_MAX]; + } journal_direct; + + struct { + bool initialized; + int facility; + } syslog; + + struct { + bool etw; // when set use etw, otherwise wel + bool initialized; + } eventlog; + + struct { + SPINLOCK spinlock; + bool initialized; + } std_output; + + struct { + SPINLOCK spinlock; + bool initialized; + } std_error; + +}; + +// -------------------------------------------------------------------------------------------------------------------- + +struct log_field; +typedef const char *(*annotator_t)(struct log_field *lf); + +struct log_field { + const char *journal; + const char *logfmt; + const char *eventlog; + annotator_t annotator; + struct log_stack_entry entry; +}; + +#define THREAD_LOG_STACK_MAX 50 +#define THREAD_FIELDS_MAX (sizeof(thread_log_fields) / sizeof(thread_log_fields[0])) + +extern __thread struct log_stack_entry *thread_log_stack_base[THREAD_LOG_STACK_MAX]; +extern __thread size_t thread_log_stack_next; +extern __thread struct log_field thread_log_fields[_NDF_MAX]; + +// -------------------------------------------------------------------------------------------------------------------- + +extern struct nd_log nd_log; +bool nd_log_replace_existing_fd(struct nd_log_source *e, int new_fd); +void nd_log_open(struct nd_log_source *e, ND_LOG_SOURCES source); +void nd_log_stdin_init(int fd, const char *filename); + +// -------------------------------------------------------------------------------------------------------------------- +// annotators + +struct log_field; +const char *errno_annotator(struct log_field *lf); +const char *priority_annotator(struct log_field *lf); +const char *timestamp_usec_annotator(struct log_field *lf); + +#if defined(OS_WINDOWS) +const char *winerror_annotator(struct log_field *lf); +#endif + +// -------------------------------------------------------------------------------------------------------------------- +// field formatters + +uint64_t log_field_to_uint64(struct log_field *lf); +int64_t log_field_to_int64(struct log_field *lf); + +// -------------------------------------------------------------------------------------------------------------------- +// common text formatters + +void nd_logger_logfmt(BUFFER *wb, struct log_field *fields, size_t fields_max); +void nd_logger_json(BUFFER *wb, struct log_field *fields, size_t fields_max); + +// -------------------------------------------------------------------------------------------------------------------- +// output to syslog + +void nd_log_init_syslog(void); +void nd_log_reset_syslog(void); +bool nd_logger_syslog(int priority, ND_LOG_FORMAT format, struct log_field *fields, size_t fields_max); + +// -------------------------------------------------------------------------------------------------------------------- +// output to systemd-journal + +bool nd_log_journal_systemd_init(void); +bool nd_log_journal_direct_init(const char *path); +bool nd_logger_journal_direct(struct log_field *fields, size_t fields_max); +bool nd_logger_journal_libsystemd(struct log_field *fields, size_t fields_max); + +// -------------------------------------------------------------------------------------------------------------------- +// output to file + +bool nd_logger_file(FILE *fp, ND_LOG_FORMAT format, struct log_field *fields, size_t fields_max); + +// -------------------------------------------------------------------------------------------------------------------- +// output to windows events log + +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) +bool nd_log_init_etw(void); +bool nd_logger_etw(struct nd_log_source *source, struct log_field *fields, size_t fields_max); +#endif +#if defined(HAVE_WEL) +bool nd_log_init_wel(void); +bool nd_logger_wel(struct nd_log_source *source, struct log_field *fields, size_t fields_max); +#endif +#endif + +#endif //NETDATA_ND_LOG_INTERNALS_H diff --git a/src/libnetdata/log/nd_log-to-file.c b/src/libnetdata/log/nd_log-to-file.c new file mode 100644 index 000000000..2de76536b --- /dev/null +++ b/src/libnetdata/log/nd_log-to-file.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +void chown_open_file(int fd, uid_t uid, gid_t gid) { + if(fd == -1) return; + + struct stat buf; + + if(fstat(fd, &buf) == -1) { + netdata_log_error("Cannot fstat() fd %d", fd); + return; + } + + if((buf.st_uid != uid || buf.st_gid != gid) && S_ISREG(buf.st_mode)) { + if(fchown(fd, uid, gid) == -1) + netdata_log_error("Cannot fchown() fd %d.", fd); + } +} + +void nd_log_chown_log_files(uid_t uid, gid_t gid) { + for(size_t i = 0 ; i < _NDLS_MAX ; i++) { + if(nd_log.sources[i].fd != -1 && nd_log.sources[i].fd != STDIN_FILENO) + chown_open_file(nd_log.sources[i].fd, uid, gid); + } +} + +bool nd_logger_file(FILE *fp, ND_LOG_FORMAT format, struct log_field *fields, size_t fields_max) { + BUFFER *wb = buffer_create(1024, NULL); + + if(format == NDLF_JSON) + nd_logger_json(wb, fields, fields_max); + else + nd_logger_logfmt(wb, fields, fields_max); + + int r = fprintf(fp, "%s\n", buffer_tostring(wb)); + fflush(fp); + + buffer_free(wb); + return r > 0; +} diff --git a/src/libnetdata/log/nd_log-to-syslog.c b/src/libnetdata/log/nd_log-to-syslog.c new file mode 100644 index 000000000..2903bf591 --- /dev/null +++ b/src/libnetdata/log/nd_log-to-syslog.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +void nd_log_init_syslog(void) { + if(nd_log.syslog.initialized) + return; + + openlog(program_name, LOG_PID, nd_log.syslog.facility); + nd_log.syslog.initialized = true; +} + +bool nd_logger_syslog(int priority, ND_LOG_FORMAT format __maybe_unused, struct log_field *fields, size_t fields_max) { + CLEAN_BUFFER *wb = buffer_create(1024, NULL); + + nd_logger_logfmt(wb, fields, fields_max); + syslog(priority, "%s", buffer_tostring(wb)); + + return true; +} diff --git a/src/libnetdata/log/nd_log-to-systemd-journal.c b/src/libnetdata/log/nd_log-to-systemd-journal.c new file mode 100644 index 000000000..922427777 --- /dev/null +++ b/src/libnetdata/log/nd_log-to-systemd-journal.c @@ -0,0 +1,296 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +bool nd_log_journal_systemd_init(void) { +#ifdef HAVE_SYSTEMD + nd_log.journal.initialized = true; +#else + nd_log.journal.initialized = false; +#endif + + return nd_log.journal.initialized; +} + +static int nd_log_journal_direct_fd_find_and_open(char *filename, size_t size) { + int fd; + + if(netdata_configured_host_prefix && *netdata_configured_host_prefix) { + journal_construct_path(filename, size, netdata_configured_host_prefix, "netdata"); + if (is_path_unix_socket(filename) && (fd = journal_direct_fd(filename)) != -1) + return fd; + + journal_construct_path(filename, size, netdata_configured_host_prefix, NULL); + if (is_path_unix_socket(filename) && (fd = journal_direct_fd(filename)) != -1) + return fd; + } + + journal_construct_path(filename, size, NULL, "netdata"); + if (is_path_unix_socket(filename) && (fd = journal_direct_fd(filename)) != -1) + return fd; + + journal_construct_path(filename, size, NULL, NULL); + if (is_path_unix_socket(filename) && (fd = journal_direct_fd(filename)) != -1) + return fd; + + return -1; +} + +bool nd_log_journal_socket_available(void) { + char filename[FILENAME_MAX]; + int fd = nd_log_journal_direct_fd_find_and_open(filename, sizeof(filename)); + if(fd == -1) return false; + close(fd); + return true; +} + +static void nd_log_journal_direct_set_env(void) { + if(nd_log.sources[NDLS_COLLECTORS].method == NDLM_JOURNAL) + nd_setenv("NETDATA_SYSTEMD_JOURNAL_PATH", nd_log.journal_direct.filename, 1); +} + +bool nd_log_journal_direct_init(const char *path) { + if(nd_log.journal_direct.initialized) { + nd_log_journal_direct_set_env(); + return true; + } + + int fd; + char filename[FILENAME_MAX]; + if(!is_path_unix_socket(path)) + fd = nd_log_journal_direct_fd_find_and_open(filename, sizeof(filename)); + else { + snprintfz(filename, sizeof(filename), "%s", path); + fd = journal_direct_fd(filename); + } + + if(fd < 0) + return false; + + nd_log.journal_direct.fd = fd; + nd_log.journal_direct.initialized = true; + + strncpyz(nd_log.journal_direct.filename, filename, sizeof(nd_log.journal_direct.filename) - 1); + nd_log_journal_direct_set_env(); + + return true; +} + +bool nd_logger_journal_libsystemd(struct log_field *fields __maybe_unused, size_t fields_max __maybe_unused) { +#ifdef HAVE_SYSTEMD + + // --- FIELD_PARSER_VERSIONS --- + // + // IMPORTANT: + // THERE ARE 6 VERSIONS OF THIS CODE + // + // 1. journal (direct socket API), + // 2. journal (libsystemd API), + // 3. logfmt, + // 4. json, + // 5. convert to uint64 + // 6. convert to int64 + // + // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES + + struct iovec iov[fields_max]; + int iov_count = 0; + + memset(iov, 0, sizeof(iov)); + + CLEAN_BUFFER *tmp = NULL; + + for (size_t i = 0; i < fields_max; i++) { + if (!fields[i].entry.set || !fields[i].journal) + continue; + + const char *key = fields[i].journal; + char *value = NULL; + int rc = 0; + switch (fields[i].entry.type) { + case NDFT_TXT: + if(*fields[i].entry.txt) + rc = asprintf(&value, "%s=%s", key, fields[i].entry.txt); + break; + case NDFT_STR: + rc = asprintf(&value, "%s=%s", key, string2str(fields[i].entry.str)); + break; + case NDFT_BFR: + if(buffer_strlen(fields[i].entry.bfr)) + rc = asprintf(&value, "%s=%s", key, buffer_tostring(fields[i].entry.bfr)); + break; + case NDFT_U64: + rc = asprintf(&value, "%s=%" PRIu64, key, fields[i].entry.u64); + break; + case NDFT_I64: + rc = asprintf(&value, "%s=%" PRId64, key, fields[i].entry.i64); + break; + case NDFT_DBL: + rc = asprintf(&value, "%s=%f", key, fields[i].entry.dbl); + break; + case NDFT_UUID: + if(!uuid_is_null(*fields[i].entry.uuid)) { + char u[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(*fields[i].entry.uuid, u); + rc = asprintf(&value, "%s=%s", key, u); + } + break; + case NDFT_CALLBACK: { + if(!tmp) + tmp = buffer_create(1024, NULL); + else + buffer_flush(tmp); + if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) + rc = asprintf(&value, "%s=%s", key, buffer_tostring(tmp)); + } + break; + default: + rc = asprintf(&value, "%s=%s", key, "UNHANDLED"); + break; + } + + if (rc != -1 && value) { + iov[iov_count].iov_base = value; + iov[iov_count].iov_len = strlen(value); + iov_count++; + } + } + + static bool sockets_before[1024]; + bool detect_systemd_socket = __atomic_load_n(&nd_log.journal.first_msg, __ATOMIC_RELAXED) == false; + if(detect_systemd_socket) { + for(int i = 3 ; (size_t)i < _countof(sockets_before); i++) + sockets_before[i] = fd_is_socket(i); + } + + int r = sd_journal_sendv(iov, iov_count); + + if(r == 0 && detect_systemd_socket) { + __atomic_store_n(&nd_log.journal.first_msg, true, __ATOMIC_RELAXED); + + // this is the first successful libsystemd log + // let's detect its fd number (we need it for the spawn server) + + for(int i = 3 ; (size_t)i < _countof(sockets_before); i++) { + if (!sockets_before[i] && fd_is_socket(i)) { + nd_log.journal.fd = i; + break; + } + } + } + + // Clean up allocated memory + for (int i = 0; i < iov_count; i++) { + if (iov[i].iov_base != NULL) { + free(iov[i].iov_base); + } + } + + return r == 0; +#else + return false; +#endif +} + +bool nd_logger_journal_direct(struct log_field *fields, size_t fields_max) { + if(!nd_log.journal_direct.initialized) + return false; + + // --- FIELD_PARSER_VERSIONS --- + // + // IMPORTANT: + // THERE ARE 6 VERSIONS OF THIS CODE + // + // 1. journal (direct socket API), + // 2. journal (libsystemd API), + // 3. logfmt, + // 4. json, + // 5. convert to uint64 + // 6. convert to int64 + // + // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES + + CLEAN_BUFFER *wb = buffer_create(4096, NULL); + CLEAN_BUFFER *tmp = NULL; + + for (size_t i = 0; i < fields_max; i++) { + if (!fields[i].entry.set || !fields[i].journal) + continue; + + const char *key = fields[i].journal; + + const char *s = NULL; + switch(fields[i].entry.type) { + case NDFT_TXT: + s = fields[i].entry.txt; + break; + case NDFT_STR: + s = string2str(fields[i].entry.str); + break; + case NDFT_BFR: + s = buffer_tostring(fields[i].entry.bfr); + break; + case NDFT_U64: + buffer_strcat(wb, key); + buffer_putc(wb, '='); + buffer_print_uint64(wb, fields[i].entry.u64); + buffer_putc(wb, '\n'); + break; + case NDFT_I64: + buffer_strcat(wb, key); + buffer_putc(wb, '='); + buffer_print_int64(wb, fields[i].entry.i64); + buffer_putc(wb, '\n'); + break; + case NDFT_DBL: + buffer_strcat(wb, key); + buffer_putc(wb, '='); + buffer_print_netdata_double(wb, fields[i].entry.dbl); + buffer_putc(wb, '\n'); + break; + case NDFT_UUID: + if(!uuid_is_null(*fields[i].entry.uuid)) { + char u[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(*fields[i].entry.uuid, u); + buffer_strcat(wb, key); + buffer_putc(wb, '='); + buffer_fast_strcat(wb, u, sizeof(u) - 1); + buffer_putc(wb, '\n'); + } + break; + case NDFT_CALLBACK: { + if(!tmp) + tmp = buffer_create(1024, NULL); + else + buffer_flush(tmp); + if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) + s = buffer_tostring(tmp); + else + s = NULL; + } + break; + default: + s = "UNHANDLED"; + break; + } + + if(s && *s) { + buffer_strcat(wb, key); + if(!strchr(s, '\n')) { + buffer_putc(wb, '='); + buffer_strcat(wb, s); + buffer_putc(wb, '\n'); + } + else { + buffer_putc(wb, '\n'); + size_t size = strlen(s); + uint64_t le_size = htole64(size); + buffer_memcat(wb, &le_size, sizeof(le_size)); + buffer_memcat(wb, s, size); + buffer_putc(wb, '\n'); + } + } + } + + return journal_direct_send(nd_log.journal_direct.fd, buffer_tostring(wb), buffer_strlen(wb)); +} diff --git a/src/libnetdata/log/nd_log-to-windows-common.h b/src/libnetdata/log/nd_log-to-windows-common.h new file mode 100644 index 000000000..2b2833ed1 --- /dev/null +++ b/src/libnetdata/log/nd_log-to-windows-common.h @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_ND_LOG_TO_WINDOWS_COMMON_H +#define NETDATA_ND_LOG_TO_WINDOWS_COMMON_H + +// Helper macro to create wide string literals +#define WIDEN2(x) L ## x +#define WIDEN(x) WIDEN2(x) + +#define NETDATA_ETW_PROVIDER_GUID_STR "{96c5ca72-9bd8-4634-81e5-000014e7da7a}" +#define NETDATA_ETW_PROVIDER_GUID_STR_W WIDEN(NETDATA_ETW_PROVIDER_GUID) + +#define NETDATA_CHANNEL_NAME "Netdata" +#define NETDATA_CHANNEL_NAME_W WIDEN(NETDATA_CHANNEL_NAME) + +#define NETDATA_WEL_CHANNEL_NAME "NetdataWEL" +#define NETDATA_WEL_CHANNEL_NAME_W WIDEN(NETDATA_WEL_CHANNEL_NAME) + +#define NETDATA_ETW_CHANNEL_NAME "Netdata" +#define NETDATA_ETW_CHANNEL_NAME_W WIDEN(NETDATA_ETW_CHANNEL_NAME) + +#define NETDATA_ETW_PROVIDER_NAME "Netdata" +#define NETDATA_ETW_PROVIDER_NAME_W WIDEN(NETDATA_ETW_PROVIDER_NAME) + +#define NETDATA_WEL_PROVIDER_PREFIX "Netdata" +#define NETDATA_WEL_PROVIDER_PREFIX_W WIDEN(NETDATA_WEL_PROVIDER_PREFIX) + +#define NETDATA_WEL_PROVIDER_ACCESS NETDATA_WEL_PROVIDER_PREFIX "Access" +#define NETDATA_WEL_PROVIDER_ACCESS_W WIDEN(NETDATA_WEL_PROVIDER_ACCESS) + +#define NETDATA_WEL_PROVIDER_ACLK NETDATA_WEL_PROVIDER_PREFIX "Aclk" +#define NETDATA_WEL_PROVIDER_ACLK_W WIDEN(NETDATA_WEL_PROVIDER_ACLK) + +#define NETDATA_WEL_PROVIDER_COLLECTORS NETDATA_WEL_PROVIDER_PREFIX "Collectors" +#define NETDATA_WEL_PROVIDER_COLLECTORS_W WIDEN(NETDATA_WEL_PROVIDER_COLLECTORS) + +#define NETDATA_WEL_PROVIDER_DAEMON NETDATA_WEL_PROVIDER_PREFIX "Daemon" +#define NETDATA_WEL_PROVIDER_DAEMON_W WIDEN(NETDATA_WEL_PROVIDER_DAEMON) + +#define NETDATA_WEL_PROVIDER_HEALTH NETDATA_WEL_PROVIDER_PREFIX "Health" +#define NETDATA_WEL_PROVIDER_HEALTH_W WIDEN(NETDATA_WEL_PROVIDER_HEALTH) + + +#define NETDATA_ETW_SUBCHANNEL_ACCESS "Access" +#define NETDATA_ETW_SUBCHANNEL_ACCESS_W WIDEN(NETDATA_ETW_SUBCHANNEL_ACCESS) + +#define NETDATA_ETW_SUBCHANNEL_ACLK "Aclk" +#define NETDATA_ETW_SUBCHANNEL_ACLK_W WIDEN(NETDATA_ETW_SUBCHANNEL_ACLK) + +#define NETDATA_ETW_SUBCHANNEL_COLLECTORS "Collectors" +#define NETDATA_ETW_SUBCHANNEL_COLLECTORS_W WIDEN(NETDATA_ETW_SUBCHANNEL_COLLECTORS) + +#define NETDATA_ETW_SUBCHANNEL_DAEMON "Daemon" +#define NETDATA_ETW_SUBCHANNEL_DAEMON_W WIDEN(NETDATA_ETW_SUBCHANNEL_DAEMON) + +#define NETDATA_ETW_SUBCHANNEL_HEALTH "Health" +#define NETDATA_ETW_SUBCHANNEL_HEALTH_W WIDEN(NETDATA_ETW_SUBCHANNEL_HEALTH) + +// Define shift values +#define EVENT_ID_SEV_SHIFT 30 +#define EVENT_ID_C_SHIFT 29 +#define EVENT_ID_R_SHIFT 28 +#define EVENT_ID_FACILITY_SHIFT 16 +#define EVENT_ID_CODE_SHIFT 0 + +#define EVENT_ID_PRIORITY_SHIFT 0 // Shift 0 bits +#define EVENT_ID_SOURCE_SHIFT 4 // Shift 4 bits + +// Define masks +#define EVENT_ID_SEV_MASK 0xC0000000 // Bits 31-30 +#define EVENT_ID_C_MASK 0x20000000 // Bit 29 +#define EVENT_ID_R_MASK 0x10000000 // Bit 28 +#define EVENT_ID_FACILITY_MASK 0x0FFF0000 // Bits 27-16 +#define EVENT_ID_CODE_MASK 0x0000FFFF // Bits 15-0 + +#define EVENT_ID_PRIORITY_MASK 0x000F // Bits 0-3 +#define EVENT_ID_SOURCE_MASK 0x00F0 // Bits 4-7 + +typedef enum __attribute__((packed)) { + MSGID_MESSAGE_ONLY = 1, + MSGID_MESSAGE_ERRNO, + MSGID_REQUEST_ONLY, + MSGID_ALERT_TRANSITION, + MSGID_ACCESS, + MSGID_ACCESS_FORWARDER, + MSGID_ACCESS_USER, + MSGID_ACCESS_FORWARDER_USER, + MSGID_ACCESS_MESSAGE, + MSGID_ACCESS_MESSAGE_REQUEST, + MSGID_ACCESS_MESSAGE_USER, + + // terminator + _MSGID_MAX, +} MESSAGE_ID; + +static inline uint32_t get_event_type_from_priority(ND_LOG_FIELD_PRIORITY priority) { + switch (priority) { + case NDLP_EMERG: + case NDLP_ALERT: + case NDLP_CRIT: + case NDLP_ERR: + return EVENTLOG_ERROR_TYPE; + + case NDLP_WARNING: + return EVENTLOG_WARNING_TYPE; + + case NDLP_NOTICE: + case NDLP_INFO: + case NDLP_DEBUG: + default: + return EVENTLOG_INFORMATION_TYPE; + } +} + +static inline uint8_t get_severity_from_priority(ND_LOG_FIELD_PRIORITY priority) { + switch (priority) { + case NDLP_EMERG: + case NDLP_ALERT: + case NDLP_CRIT: + case NDLP_ERR: + return STATUS_SEVERITY_ERROR; + + case NDLP_WARNING: + return STATUS_SEVERITY_WARNING; + + case NDLP_NOTICE: + case NDLP_INFO: + case NDLP_DEBUG: + default: + return STATUS_SEVERITY_INFORMATIONAL; + } +} + +static inline uint8_t get_level_from_priority(ND_LOG_FIELD_PRIORITY priority) { + switch (priority) { + // return 0 = log an event regardless of any filtering applied + + case NDLP_EMERG: + case NDLP_ALERT: + case NDLP_CRIT: + return 1; + + case NDLP_ERR: + return 2; + + case NDLP_WARNING: + return 3; + + case NDLP_NOTICE: + case NDLP_INFO: + return 4; + + case NDLP_DEBUG: + default: + return 5; + } +} + +static inline const char *get_level_from_priority_str(ND_LOG_FIELD_PRIORITY priority) { + switch (priority) { + // return "win:LogAlways" to log an event regardless of any filtering applied + + case NDLP_EMERG: + case NDLP_ALERT: + case NDLP_CRIT: + return "win:Critical"; + + case NDLP_ERR: + return "win:Error"; + + case NDLP_WARNING: + return "win:Warning"; + + case NDLP_NOTICE: + case NDLP_INFO: + return "win:Informational"; + + case NDLP_DEBUG: + default: + return "win:Verbose"; + } +} + +static inline uint16_t construct_event_code(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, MESSAGE_ID messageID) { + return (source << 12 | priority << 8 | messageID << 0); +} + +#endif //NETDATA_ND_LOG_TO_WINDOWS_COMMON_H diff --git a/src/libnetdata/log/nd_log-to-windows-events.c b/src/libnetdata/log/nd_log-to-windows-events.c new file mode 100644 index 000000000..f32289daa --- /dev/null +++ b/src/libnetdata/log/nd_log-to-windows-events.c @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +#if defined(OS_WINDOWS) && (defined(HAVE_ETW) || defined(HAVE_WEL)) + +// -------------------------------------------------------------------------------------------------------------------- +// construct an event id + +// load message resources generated header +#include "wevt_netdata.h" + +// include the common definitions with the message resources and manifest generator +#include "nd_log-to-windows-common.h" + +#if defined(HAVE_ETW) +// we need the manifest, only in ETW mode + +// eliminate compiler warnings and load manifest generated header +#undef EXTERN_C +#define EXTERN_C +#undef __declspec +#define __declspec(x) +#include "wevt_netdata_manifest.h" + +static REGHANDLE regHandle; +#endif + +// Function to construct EventID +static DWORD complete_event_id(DWORD facility, DWORD severity, DWORD event_code) { + DWORD event_id = 0; + + // Set Severity + event_id |= ((DWORD)(severity) << EVENT_ID_SEV_SHIFT) & EVENT_ID_SEV_MASK; + + // Set Customer Code Flag (C) + event_id |= (0x0 << EVENT_ID_C_SHIFT) & EVENT_ID_C_MASK; + + // Set Reserved Bit (R) - typically 0 + event_id |= (0x0 << EVENT_ID_R_SHIFT) & EVENT_ID_R_MASK; + + // Set Facility + event_id |= ((DWORD)(facility) << EVENT_ID_FACILITY_SHIFT) & EVENT_ID_FACILITY_MASK; + + // Set Code + event_id |= ((DWORD)(event_code) << EVENT_ID_CODE_SHIFT) & EVENT_ID_CODE_MASK; + + return event_id; +} + +DWORD construct_event_id(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, MESSAGE_ID messageID) { + DWORD event_code = construct_event_code(source, priority, messageID); + return complete_event_id(FACILITY_NETDATA, get_severity_from_priority(priority), event_code); +} + +static bool check_event_id(ND_LOG_SOURCES source __maybe_unused, ND_LOG_FIELD_PRIORITY priority __maybe_unused, MESSAGE_ID messageID __maybe_unused, DWORD event_code __maybe_unused) { +#ifdef NETDATA_INTERNAL_CHECKS + DWORD generated = construct_event_id(source, priority, messageID); + if(generated != event_code) { + + // this is just used for a break point, to see the values in hex + char current[UINT64_HEX_MAX_LENGTH]; + print_uint64_hex(current, generated); + + char wanted[UINT64_HEX_MAX_LENGTH]; + print_uint64_hex(wanted, event_code); + + const char *got = current; + const char *good = wanted; + internal_fatal(true, "EventIDs mismatch, expected %s, got %s", good, got); + } +#endif + + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- +// initialization + +// Define provider names per source (only when not using ETW) +static const wchar_t *wel_provider_per_source[_NDLS_MAX] = { + [NDLS_UNSET] = NULL, // not used, linked to NDLS_DAEMON + [NDLS_ACCESS] = NETDATA_WEL_PROVIDER_ACCESS_W, // + [NDLS_ACLK] = NETDATA_WEL_PROVIDER_ACLK_W, // + [NDLS_COLLECTORS] = NETDATA_WEL_PROVIDER_COLLECTORS_W,// + [NDLS_DAEMON] = NETDATA_WEL_PROVIDER_DAEMON_W, // + [NDLS_HEALTH] = NETDATA_WEL_PROVIDER_HEALTH_W, // + [NDLS_DEBUG] = NULL, // used, linked to NDLS_DAEMON +}; + +bool wel_replace_program_with_wevt_netdata_dll(wchar_t *str, size_t size) { + const wchar_t *replacement = L"\\wevt_netdata.dll"; + + // Find the last occurrence of '\\' to isolate the filename + wchar_t *lastBackslash = wcsrchr(str, L'\\'); + + if (lastBackslash != NULL) { + // Calculate new length after replacement + size_t newLen = (lastBackslash - str) + wcslen(replacement); + + // Ensure new length does not exceed buffer size + if (newLen >= size) + return false; // Not enough space in the buffer + + // Terminate the string at the last backslash + *lastBackslash = L'\0'; + + // Append the replacement filename + wcsncat(str, replacement, size - wcslen(str) - 1); + + // Check if the new file exists + if (GetFileAttributesW(str) != INVALID_FILE_ATTRIBUTES) + return true; // The file exists + else + return false; // The file does not exist + } + + return false; // No backslash found (likely invalid input) +} + +static bool wel_add_to_registry(const wchar_t *channel, const wchar_t *provider, DWORD defaultMaxSize) { + // Build the registry path: SYSTEM\CurrentControlSet\Services\EventLog\\ + wchar_t key[MAX_PATH]; + if(!provider) + swprintf(key, MAX_PATH, L"SYSTEM\\CurrentControlSet\\Services\\EventLog\\%ls", channel); + else + swprintf(key, MAX_PATH, L"SYSTEM\\CurrentControlSet\\Services\\EventLog\\%ls\\%ls", channel, provider); + + HKEY hRegKey; + DWORD disposition; + LONG result = RegCreateKeyExW(HKEY_LOCAL_MACHINE, key, + 0, NULL, REG_OPTION_NON_VOLATILE, KEY_SET_VALUE, NULL, &hRegKey, &disposition); + + if (result != ERROR_SUCCESS) + return false; // Could not create the registry key + + // Check if MaxSize is already set + DWORD maxSize = 0; + DWORD size = sizeof(maxSize); + if (RegQueryValueExW(hRegKey, L"MaxSize", NULL, NULL, (LPBYTE)&maxSize, &size) != ERROR_SUCCESS) { + // MaxSize is not set, set it to the default value + RegSetValueExW(hRegKey, L"MaxSize", 0, REG_DWORD, (const BYTE*)&defaultMaxSize, sizeof(defaultMaxSize)); + } + + wchar_t modulePath[MAX_PATH]; + if (GetModuleFileNameW(NULL, modulePath, MAX_PATH) == 0) { + RegCloseKey(hRegKey); + return false; + } + + if(wel_replace_program_with_wevt_netdata_dll(modulePath, _countof(modulePath))) { + RegSetValueExW(hRegKey, L"EventMessageFile", 0, REG_EXPAND_SZ, + (LPBYTE)modulePath, (wcslen(modulePath) + 1) * sizeof(wchar_t)); + + DWORD types_supported = EVENTLOG_SUCCESS | EVENTLOG_ERROR_TYPE | EVENTLOG_WARNING_TYPE | EVENTLOG_INFORMATION_TYPE; + RegSetValueExW(hRegKey, L"TypesSupported", 0, REG_DWORD, (LPBYTE)&types_supported, sizeof(DWORD)); + } + + RegCloseKey(hRegKey); + return true; +} + +#if defined(HAVE_ETW) +static void etw_set_source_meta(struct nd_log_source *source, USHORT channelID, const EVENT_DESCRIPTOR *ed) { + // It turns out that the keyword varies per only per channel! + // so, to log with the right keyword, Task, Opcode we copy the ids from the header + // the messages compiler (mc.exe) generated from the manifest. + + source->channelID = channelID; + source->Opcode = ed->Opcode; + source->Task = ed->Task; + source->Keyword = ed->Keyword; +} + +static bool etw_register_provider(void) { + // Register the ETW provider + if (EventRegister(&NETDATA_ETW_PROVIDER_GUID, NULL, NULL, ®Handle) != ERROR_SUCCESS) + return false; + + etw_set_source_meta(&nd_log.sources[NDLS_DAEMON], CHANNEL_DAEMON, &ED_DAEMON_INFO_MESSAGE_ONLY); + etw_set_source_meta(&nd_log.sources[NDLS_COLLECTORS], CHANNEL_COLLECTORS, &ED_COLLECTORS_INFO_MESSAGE_ONLY); + etw_set_source_meta(&nd_log.sources[NDLS_ACCESS], CHANNEL_ACCESS, &ED_ACCESS_INFO_MESSAGE_ONLY); + etw_set_source_meta(&nd_log.sources[NDLS_HEALTH], CHANNEL_HEALTH, &ED_HEALTH_INFO_MESSAGE_ONLY); + etw_set_source_meta(&nd_log.sources[NDLS_ACLK], CHANNEL_ACLK, &ED_ACLK_INFO_MESSAGE_ONLY); + etw_set_source_meta(&nd_log.sources[NDLS_UNSET], CHANNEL_DAEMON, &ED_DAEMON_INFO_MESSAGE_ONLY); + etw_set_source_meta(&nd_log.sources[NDLS_DEBUG], CHANNEL_DAEMON, &ED_DAEMON_INFO_MESSAGE_ONLY); + + return true; +} +#endif + +bool nd_log_init_windows(void) { + if(nd_log.eventlog.initialized) + return true; + + // validate we have the right keys + if( + !check_event_id(NDLS_COLLECTORS, NDLP_INFO, MSGID_MESSAGE_ONLY, MC_COLLECTORS_INFO_MESSAGE_ONLY) || + !check_event_id(NDLS_DAEMON, NDLP_ERR, MSGID_MESSAGE_ONLY, MC_DAEMON_ERR_MESSAGE_ONLY) || + !check_event_id(NDLS_ACCESS, NDLP_WARNING, MSGID_ACCESS_USER, MC_ACCESS_WARN_ACCESS_USER) || + !check_event_id(NDLS_HEALTH, NDLP_CRIT, MSGID_ALERT_TRANSITION, MC_HEALTH_CRIT_ALERT_TRANSITION) || + !check_event_id(NDLS_DEBUG, NDLP_ALERT, MSGID_ACCESS_FORWARDER_USER, MC_DEBUG_ALERT_ACCESS_FORWARDER_USER)) + return false; + +#if defined(HAVE_ETW) + if(nd_log.eventlog.etw && !etw_register_provider()) + return false; +#endif + +// if(!nd_log.eventlog.etw && !wel_add_to_registry(NETDATA_WEL_CHANNEL_NAME_W, NULL, 50 * 1024 * 1024)) +// return false; + + // Loop through each source and add it to the registry + for(size_t i = 0; i < _NDLS_MAX; i++) { + nd_log.sources[i].source = i; + + const wchar_t *sub_channel = wel_provider_per_source[i]; + + if(!sub_channel) + // we will map these to NDLS_DAEMON + continue; + + DWORD defaultMaxSize = 0; + switch (i) { + case NDLS_ACLK: + defaultMaxSize = 5 * 1024 * 1024; + break; + + case NDLS_HEALTH: + defaultMaxSize = 35 * 1024 * 1024; + break; + + default: + case NDLS_ACCESS: + case NDLS_COLLECTORS: + case NDLS_DAEMON: + defaultMaxSize = 20 * 1024 * 1024; + break; + } + + if(!nd_log.eventlog.etw) { + if(!wel_add_to_registry(NETDATA_WEL_CHANNEL_NAME_W, sub_channel, defaultMaxSize)) + return false; + + // when not using a manifest, each source is a provider + nd_log.sources[i].hEventLog = RegisterEventSourceW(NULL, sub_channel); + if (!nd_log.sources[i].hEventLog) + return false; + } + } + + if(!nd_log.eventlog.etw) { + // Map the unset ones to NDLS_DAEMON + for (size_t i = 0; i < _NDLS_MAX; i++) { + if (!nd_log.sources[i].hEventLog) + nd_log.sources[i].hEventLog = nd_log.sources[NDLS_DAEMON].hEventLog; + } + } + + nd_log.eventlog.initialized = true; + return true; +} + +bool nd_log_init_etw(void) { + nd_log.eventlog.etw = true; + return nd_log_init_windows(); +} + +bool nd_log_init_wel(void) { + nd_log.eventlog.etw = false; + return nd_log_init_windows(); +} + +// -------------------------------------------------------------------------------------------------------------------- +// we pass all our fields to the windows events logs +// numbered the same way we have them in memory. +// +// to avoid runtime memory allocations, we use a static allocations with ready to use buffers +// which are immediately available for logging. + +#define SMALL_WIDE_BUFFERS_SIZE 256 +#define MEDIUM_WIDE_BUFFERS_SIZE 2048 +#define BIG_WIDE_BUFFERS_SIZE 16384 +static wchar_t small_wide_buffers[_NDF_MAX][SMALL_WIDE_BUFFERS_SIZE]; +static wchar_t medium_wide_buffers[2][MEDIUM_WIDE_BUFFERS_SIZE]; +static wchar_t big_wide_buffers[2][BIG_WIDE_BUFFERS_SIZE]; + +static struct { + size_t size; + wchar_t *buf; +} fields_buffers[_NDF_MAX] = { 0 }; + +#if defined(HAVE_ETW) +static EVENT_DATA_DESCRIPTOR etw_eventData[_NDF_MAX - 1]; +#endif + +static LPCWSTR wel_messages[_NDF_MAX - 1]; + +__attribute__((constructor)) void wevents_initialize_buffers(void) { + for(size_t i = 0; i < _NDF_MAX ;i++) { + fields_buffers[i].buf = small_wide_buffers[i]; + fields_buffers[i].size = SMALL_WIDE_BUFFERS_SIZE; + } + + fields_buffers[NDF_NIDL_INSTANCE].buf = medium_wide_buffers[0]; + fields_buffers[NDF_NIDL_INSTANCE].size = MEDIUM_WIDE_BUFFERS_SIZE; + + fields_buffers[NDF_REQUEST].buf = big_wide_buffers[0]; + fields_buffers[NDF_REQUEST].size = BIG_WIDE_BUFFERS_SIZE; + fields_buffers[NDF_MESSAGE].buf = big_wide_buffers[1]; + fields_buffers[NDF_MESSAGE].size = BIG_WIDE_BUFFERS_SIZE; + + for(size_t i = 1; i < _NDF_MAX ;i++) + wel_messages[i - 1] = fields_buffers[i].buf; +} + +// -------------------------------------------------------------------------------------------------------------------- + +#define is_field_set(fields, fields_max, field) ((field) < (fields_max) && (fields)[field].entry.set) + +static const char *get_field_value_unsafe(struct log_field *fields, ND_LOG_FIELD_ID i, size_t fields_max, BUFFER **tmp) { + if(!is_field_set(fields, fields_max, i) || !fields[i].eventlog) + return ""; + + static char number_str[MAX(MAX(UINT64_MAX_LENGTH, DOUBLE_MAX_LENGTH), UUID_STR_LEN)]; + + const char *s = NULL; + if (fields[i].annotator) + s = fields[i].annotator(&fields[i]); + + else + switch (fields[i].entry.type) { + case NDFT_TXT: + s = fields[i].entry.txt; + break; + case NDFT_STR: + s = string2str(fields[i].entry.str); + break; + case NDFT_BFR: + s = buffer_tostring(fields[i].entry.bfr); + break; + case NDFT_U64: + print_uint64(number_str, fields[i].entry.u64); + s = number_str; + break; + case NDFT_I64: + print_int64(number_str, fields[i].entry.i64); + s = number_str; + break; + case NDFT_DBL: + print_netdata_double(number_str, fields[i].entry.dbl); + s = number_str; + break; + case NDFT_UUID: + if (!uuid_is_null(*fields[i].entry.uuid)) { + uuid_unparse_lower(*fields[i].entry.uuid, number_str); + s = number_str; + } + break; + case NDFT_CALLBACK: + if (!*tmp) + *tmp = buffer_create(1024, NULL); + else + buffer_flush(*tmp); + + if (fields[i].entry.cb.formatter(*tmp, fields[i].entry.cb.formatter_data)) + s = buffer_tostring(*tmp); + else + s = NULL; + break; + + default: + s = "UNHANDLED"; + break; + } + + if(!s || !*s) return ""; + return s; +} +static void etw_replace_percent_with_unicode(wchar_t *s, size_t size) { + size_t original_len = wcslen(s); + + // Traverse the string, replacing '%' with the Unicode fullwidth percent sign + for (size_t i = 0; i < original_len && i < size - 1; i++) { + if (s[i] == L'%' && iswdigit(s[i + 1])) { + // s[i] = 0xFF05; // Replace '%' with fullwidth percent sign '%' + // s[i] = 0x29BC; // ⦼ + s[i] = 0x2105; // ℅ + } + } + + // Ensure null termination if needed + s[size - 1] = L'\0'; +} + +static void wevt_generate_all_fields_unsafe(struct log_field *fields, size_t fields_max, BUFFER **tmp) { + for (size_t i = 0; i < fields_max; i++) { + fields_buffers[i].buf[0] = L'\0'; + + if (!fields[i].entry.set || !fields[i].eventlog) + continue; + + const char *s = get_field_value_unsafe(fields, i, fields_max, tmp); + if (s && *s) { + utf8_to_utf16(fields_buffers[i].buf, (int) fields_buffers[i].size, s, -1); + + if(nd_log.eventlog.etw) + // UNBELIEVABLE! they do recursive parameter expansion in ETW... + etw_replace_percent_with_unicode(fields_buffers[i].buf, fields_buffers[i].size); + } + } +} + +static bool has_user_role_permissions(struct log_field *fields, size_t fields_max, BUFFER **tmp) { + const char *t; + + t = get_field_value_unsafe(fields, NDF_USER_NAME, fields_max, tmp); + if (*t) return true; + + t = get_field_value_unsafe(fields, NDF_USER_ROLE, fields_max, tmp); + if (*t && strcmp(t, "none") != 0) return true; + + t = get_field_value_unsafe(fields, NDF_USER_ACCESS, fields_max, tmp); + if (*t && strcmp(t, "0x0") != 0) return true; + + return false; +} + +static bool nd_logger_windows(struct nd_log_source *source, struct log_field *fields, size_t fields_max) { + if (!nd_log.eventlog.initialized) + return false; + + ND_LOG_FIELD_PRIORITY priority = NDLP_INFO; + if (fields[NDF_PRIORITY].entry.set) + priority = (ND_LOG_FIELD_PRIORITY) fields[NDF_PRIORITY].entry.u64; + + DWORD wType = get_event_type_from_priority(priority); + (void) wType; + + CLEAN_BUFFER *tmp = NULL; + + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + spinlock_lock(&spinlock); + wevt_generate_all_fields_unsafe(fields, fields_max, &tmp); + + MESSAGE_ID messageID; + switch (source->source) { + default: + case NDLS_DEBUG: + case NDLS_DAEMON: + case NDLS_COLLECTORS: + messageID = MSGID_MESSAGE_ONLY; + break; + + case NDLS_HEALTH: + messageID = MSGID_ALERT_TRANSITION; + break; + + case NDLS_ACCESS: + if (is_field_set(fields, fields_max, NDF_MESSAGE)) { + messageID = MSGID_ACCESS_MESSAGE; + + if (has_user_role_permissions(fields, fields_max, &tmp)) + messageID = MSGID_ACCESS_MESSAGE_USER; + else if (*get_field_value_unsafe(fields, NDF_REQUEST, fields_max, &tmp)) + messageID = MSGID_ACCESS_MESSAGE_REQUEST; + } else if (is_field_set(fields, fields_max, NDF_RESPONSE_CODE)) { + messageID = MSGID_ACCESS; + + if (*get_field_value_unsafe(fields, NDF_SRC_FORWARDED_FOR, fields_max, &tmp)) + messageID = MSGID_ACCESS_FORWARDER; + + if (has_user_role_permissions(fields, fields_max, &tmp)) { + if (messageID == MSGID_ACCESS) + messageID = MSGID_ACCESS_USER; + else + messageID = MSGID_ACCESS_FORWARDER_USER; + } + } else + messageID = MSGID_REQUEST_ONLY; + break; + + case NDLS_ACLK: + messageID = MSGID_MESSAGE_ONLY; + break; + } + + if (messageID == MSGID_MESSAGE_ONLY && ( + *get_field_value_unsafe(fields, NDF_ERRNO, fields_max, &tmp) || + *get_field_value_unsafe(fields, NDF_WINERROR, fields_max, &tmp))) { + messageID = MSGID_MESSAGE_ERRNO; + } + + DWORD eventID = construct_event_id(source->source, priority, messageID); + + // wType + // + // without a manifest => this determines the Level of the event + // with a manifest => Level from the manifest is used (wType ignored) + // [however it is good to have, in case the manifest is not accessible somehow] + // + + // wCategory + // + // without a manifest => numeric Task values appear + // with a manifest => Task from the manifest is used (wCategory ignored) + + BOOL rc; +#if defined(HAVE_ETW) + if (nd_log.eventlog.etw) { + // metadata based logging - ETW + + for (size_t i = 1; i < _NDF_MAX; i++) + EventDataDescCreate(&etw_eventData[i - 1], fields_buffers[i].buf, + (wcslen(fields_buffers[i].buf) + 1) * sizeof(WCHAR)); + + EVENT_DESCRIPTOR EventDesc = { + .Id = eventID & EVENT_ID_CODE_MASK, // ETW needs the raw event id + .Version = 0, + .Channel = source->channelID, + .Level = get_level_from_priority(priority), + .Opcode = source->Opcode, + .Task = source->Task, + .Keyword = source->Keyword, + }; + + rc = ERROR_SUCCESS == EventWrite(regHandle, &EventDesc, _NDF_MAX - 1, etw_eventData); + + } + else +#endif + { + // eventID based logging - WEL + rc = ReportEventW(source->hEventLog, wType, 0, eventID, NULL, _NDF_MAX - 1, 0, wel_messages, NULL); + } + + spinlock_unlock(&spinlock); + + return rc == TRUE; +} + +#if defined(HAVE_ETW) +bool nd_logger_etw(struct nd_log_source *source, struct log_field *fields, size_t fields_max) { + return nd_logger_windows(source, fields, fields_max); +} +#endif + +#if defined(HAVE_WEL) +bool nd_logger_wel(struct nd_log_source *source, struct log_field *fields, size_t fields_max) { + return nd_logger_windows(source, fields, fields_max); +} +#endif + +#endif diff --git a/src/libnetdata/log/nd_log.c b/src/libnetdata/log/nd_log.c new file mode 100644 index 000000000..a605fe460 --- /dev/null +++ b/src/libnetdata/log/nd_log.c @@ -0,0 +1,465 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +// do not REMOVE this, it is used by systemd-journal includes to prevent saving the file, function, line of the +// source code that makes the calls, allowing our loggers to log the lines of source code that actually log +#define SD_JOURNAL_SUPPRESS_LOCATION + +#include "../libnetdata.h" +#include "nd_log-internals.h" + +const char *program_name = ""; +uint64_t debug_flags = 0; +int aclklog_enabled = 0; + +// -------------------------------------------------------------------------------------------------------------------- + +void errno_clear(void) { + errno = 0; + +#if defined(OS_WINDOWS) + SetLastError(ERROR_SUCCESS); +#endif +} + +// -------------------------------------------------------------------------------------------------------------------- +// logger router + +static ND_LOG_METHOD nd_logger_select_output(ND_LOG_SOURCES source, FILE **fpp, SPINLOCK **spinlock) { + *spinlock = NULL; + ND_LOG_METHOD output = nd_log.sources[source].method; + + switch(output) { + case NDLM_JOURNAL: + if(unlikely(!nd_log.journal_direct.initialized && !nd_log.journal.initialized)) { + output = NDLM_FILE; + *fpp = stderr; + *spinlock = &nd_log.std_error.spinlock; + } + else { + *fpp = NULL; + *spinlock = NULL; + } + break; + +#if defined(OS_WINDOWS) && (defined(HAVE_ETW) || defined(HAVE_WEL)) +#if defined(HAVE_ETW) + case NDLM_ETW: +#endif +#if defined(HAVE_WEL) + case NDLM_WEL: +#endif + if(unlikely(!nd_log.eventlog.initialized)) { + output = NDLM_FILE; + *fpp = stderr; + *spinlock = &nd_log.std_error.spinlock; + } + else { + *fpp = NULL; + *spinlock = NULL; + } + break; +#endif + + case NDLM_SYSLOG: + if(unlikely(!nd_log.syslog.initialized)) { + output = NDLM_FILE; + *spinlock = &nd_log.std_error.spinlock; + *fpp = stderr; + } + else { + *spinlock = NULL; + *fpp = NULL; + } + break; + + case NDLM_FILE: + if(!nd_log.sources[source].fp) { + *fpp = stderr; + *spinlock = &nd_log.std_error.spinlock; + } + else { + *fpp = nd_log.sources[source].fp; + *spinlock = &nd_log.sources[source].spinlock; + } + break; + + case NDLM_STDOUT: + output = NDLM_FILE; + *fpp = stdout; + *spinlock = &nd_log.std_output.spinlock; + break; + + default: + case NDLM_DEFAULT: + case NDLM_STDERR: + output = NDLM_FILE; + *fpp = stderr; + *spinlock = &nd_log.std_error.spinlock; + break; + + case NDLM_DISABLED: + case NDLM_DEVNULL: + output = NDLM_DISABLED; + *fpp = NULL; + *spinlock = NULL; + break; + } + + return output; +} + +// -------------------------------------------------------------------------------------------------------------------- +// high level logger + +static void nd_logger_log_fields(SPINLOCK *spinlock, FILE *fp, bool limit, ND_LOG_FIELD_PRIORITY priority, + ND_LOG_METHOD output, struct nd_log_source *source, + struct log_field *fields, size_t fields_max) { + if(spinlock) + spinlock_lock(spinlock); + + // check the limits + if(limit && nd_log_limit_reached(source)) + goto cleanup; + + if(output == NDLM_JOURNAL) { + if(!nd_logger_journal_direct(fields, fields_max) && !nd_logger_journal_libsystemd(fields, fields_max)) { + // we can't log to journal, let's log to stderr + if(spinlock) + spinlock_unlock(spinlock); + + output = NDLM_FILE; + spinlock = &nd_log.std_error.spinlock; + fp = stderr; + + if(spinlock) + spinlock_lock(spinlock); + } + } + +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + if(output == NDLM_ETW) { + if(!nd_logger_etw(source, fields, fields_max)) { + // we can't log to windows events, let's log to stderr + if(spinlock) + spinlock_unlock(spinlock); + + output = NDLM_FILE; + spinlock = &nd_log.std_error.spinlock; + fp = stderr; + + if(spinlock) + spinlock_lock(spinlock); + } + } +#endif +#if defined(HAVE_WEL) + if(output == NDLM_WEL) { + if(!nd_logger_wel(source, fields, fields_max)) { + // we can't log to windows events, let's log to stderr + if(spinlock) + spinlock_unlock(spinlock); + + output = NDLM_FILE; + spinlock = &nd_log.std_error.spinlock; + fp = stderr; + + if(spinlock) + spinlock_lock(spinlock); + } + } +#endif +#endif + + if(output == NDLM_SYSLOG) + nd_logger_syslog(priority, source->format, fields, fields_max); + + if(output == NDLM_FILE) + nd_logger_file(fp, source->format, fields, fields_max); + + +cleanup: + if(spinlock) + spinlock_unlock(spinlock); +} + +static void nd_logger_unset_all_thread_fields(void) { + size_t fields_max = THREAD_FIELDS_MAX; + for(size_t i = 0; i < fields_max ; i++) + thread_log_fields[i].entry.set = false; +} + +static void nd_logger_merge_log_stack_to_thread_fields(void) { + for(size_t c = 0; c < thread_log_stack_next ;c++) { + struct log_stack_entry *lgs = thread_log_stack_base[c]; + + for(size_t i = 0; lgs[i].id != NDF_STOP ; i++) { + if(lgs[i].id >= _NDF_MAX || !lgs[i].set) + continue; + + struct log_stack_entry *e = &lgs[i]; + ND_LOG_STACK_FIELD_TYPE type = lgs[i].type; + + // do not add empty / unset fields + if((type == NDFT_TXT && (!e->txt || !*e->txt)) || + (type == NDFT_BFR && (!e->bfr || !buffer_strlen(e->bfr))) || + (type == NDFT_STR && !e->str) || + (type == NDFT_UUID && (!e->uuid || uuid_is_null(*e->uuid))) || + (type == NDFT_CALLBACK && !e->cb.formatter) || + type == NDFT_UNSET) + continue; + + thread_log_fields[lgs[i].id].entry = *e; + } + } +} + +static void nd_logger(const char *file, const char *function, const unsigned long line, + ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, bool limit, + int saved_errno, size_t saved_winerror __maybe_unused, const char *fmt, va_list ap) { + + SPINLOCK *spinlock; + FILE *fp; + ND_LOG_METHOD output = nd_logger_select_output(source, &fp, &spinlock); + if(!IS_FINAL_LOG_METHOD(output)) + return; + + // mark all fields as unset + nd_logger_unset_all_thread_fields(); + + // flatten the log stack into the fields + nd_logger_merge_log_stack_to_thread_fields(); + + // set the common fields that are automatically set by the logging subsystem + + if(likely(!thread_log_fields[NDF_INVOCATION_ID].entry.set)) + thread_log_fields[NDF_INVOCATION_ID].entry = ND_LOG_FIELD_UUID(NDF_INVOCATION_ID, &nd_log.invocation_id); + + if(likely(!thread_log_fields[NDF_LOG_SOURCE].entry.set)) + thread_log_fields[NDF_LOG_SOURCE].entry = ND_LOG_FIELD_TXT(NDF_LOG_SOURCE, nd_log_id2source(source)); + else { + ND_LOG_SOURCES src = source; + + if(thread_log_fields[NDF_LOG_SOURCE].entry.type == NDFT_TXT) + src = nd_log_source2id(thread_log_fields[NDF_LOG_SOURCE].entry.txt, source); + else if(thread_log_fields[NDF_LOG_SOURCE].entry.type == NDFT_U64) + src = thread_log_fields[NDF_LOG_SOURCE].entry.u64; + + if(src != source && src < _NDLS_MAX) { + source = src; + output = nd_logger_select_output(source, &fp, &spinlock); + if(output != NDLM_FILE && output != NDLM_JOURNAL && output != NDLM_SYSLOG) + return; + } + } + + if(likely(!thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry.set)) + thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry = ND_LOG_FIELD_TXT(NDF_SYSLOG_IDENTIFIER, program_name); + + if(likely(!thread_log_fields[NDF_LINE].entry.set)) { + thread_log_fields[NDF_LINE].entry = ND_LOG_FIELD_U64(NDF_LINE, line); + thread_log_fields[NDF_FILE].entry = ND_LOG_FIELD_TXT(NDF_FILE, file); + thread_log_fields[NDF_FUNC].entry = ND_LOG_FIELD_TXT(NDF_FUNC, function); + } + + if(likely(!thread_log_fields[NDF_PRIORITY].entry.set)) { + thread_log_fields[NDF_PRIORITY].entry = ND_LOG_FIELD_U64(NDF_PRIORITY, priority); + } + + if(likely(!thread_log_fields[NDF_TID].entry.set)) + thread_log_fields[NDF_TID].entry = ND_LOG_FIELD_U64(NDF_TID, gettid_cached()); + + if(likely(!thread_log_fields[NDF_THREAD_TAG].entry.set)) { + const char *thread_tag = nd_thread_tag(); + thread_log_fields[NDF_THREAD_TAG].entry = ND_LOG_FIELD_TXT(NDF_THREAD_TAG, thread_tag); + + // TODO: fix the ND_MODULE in logging by setting proper module name in threads +// if(!thread_log_fields[NDF_MODULE].entry.set) +// thread_log_fields[NDF_MODULE].entry = ND_LOG_FIELD_CB(NDF_MODULE, thread_tag_to_module, (void *)thread_tag); + } + + if(likely(!thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry.set)) + thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry = ND_LOG_FIELD_U64(NDF_TIMESTAMP_REALTIME_USEC, now_realtime_usec()); + + if(saved_errno != 0 && !thread_log_fields[NDF_ERRNO].entry.set) + thread_log_fields[NDF_ERRNO].entry = ND_LOG_FIELD_I64(NDF_ERRNO, saved_errno); + + if(saved_winerror != 0 && !thread_log_fields[NDF_WINERROR].entry.set) + thread_log_fields[NDF_WINERROR].entry = ND_LOG_FIELD_U64(NDF_WINERROR, saved_winerror); + + CLEAN_BUFFER *wb = NULL; + if(fmt && !thread_log_fields[NDF_MESSAGE].entry.set) { + wb = buffer_create(1024, NULL); + buffer_vsprintf(wb, fmt, ap); + thread_log_fields[NDF_MESSAGE].entry = ND_LOG_FIELD_TXT(NDF_MESSAGE, buffer_tostring(wb)); + } + + nd_logger_log_fields(spinlock, fp, limit, priority, output, &nd_log.sources[source], + thread_log_fields, THREAD_FIELDS_MAX); + + if(nd_log.sources[source].pending_msg) { + // log a pending message + + nd_logger_unset_all_thread_fields(); + + thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry = (struct log_stack_entry){ + .set = true, + .type = NDFT_U64, + .u64 = now_realtime_usec(), + }; + + thread_log_fields[NDF_LOG_SOURCE].entry = (struct log_stack_entry){ + .set = true, + .type = NDFT_TXT, + .txt = nd_log_id2source(source), + }; + + thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry = (struct log_stack_entry){ + .set = true, + .type = NDFT_TXT, + .txt = program_name, + }; + + thread_log_fields[NDF_MESSAGE].entry = (struct log_stack_entry){ + .set = true, + .type = NDFT_TXT, + .txt = nd_log.sources[source].pending_msg, + }; + + nd_logger_log_fields(spinlock, fp, false, priority, output, + &nd_log.sources[source], + thread_log_fields, THREAD_FIELDS_MAX); + + freez((void *)nd_log.sources[source].pending_msg); + nd_log.sources[source].pending_msg = NULL; + } + + errno_clear(); +} + +static ND_LOG_SOURCES nd_log_validate_source(ND_LOG_SOURCES source) { + if(source >= _NDLS_MAX) + source = NDLS_DAEMON; + + if(nd_log.overwrite_process_source) + source = nd_log.overwrite_process_source; + + return source; +} + +// -------------------------------------------------------------------------------------------------------------------- +// public API for loggers + +void netdata_logger(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file, const char *function, unsigned long line, const char *fmt, ... ) +{ + int saved_errno = errno; + + size_t saved_winerror = 0; +#if defined(OS_WINDOWS) + saved_winerror = GetLastError(); +#endif + + source = nd_log_validate_source(source); + + if (source != NDLS_DEBUG && priority > nd_log.sources[source].min_priority) + return; + + va_list args; + va_start(args, fmt); + nd_logger(file, function, line, source, priority, + source == NDLS_DAEMON || source == NDLS_COLLECTORS, + saved_errno, saved_winerror, fmt, args); + va_end(args); +} + +void netdata_logger_with_limit(ERROR_LIMIT *erl, ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... ) { + int saved_errno = errno; + + size_t saved_winerror = 0; +#if defined(OS_WINDOWS) + saved_winerror = GetLastError(); +#endif + + source = nd_log_validate_source(source); + + if (source != NDLS_DEBUG && priority > nd_log.sources[source].min_priority) + return; + + if(erl->sleep_ut) + sleep_usec(erl->sleep_ut); + + spinlock_lock(&erl->spinlock); + + erl->count++; + time_t now = now_boottime_sec(); + if(now - erl->last_logged < erl->log_every) { + spinlock_unlock(&erl->spinlock); + return; + } + + spinlock_unlock(&erl->spinlock); + + va_list args; + va_start(args, fmt); + nd_logger(file, function, line, source, priority, + source == NDLS_DAEMON || source == NDLS_COLLECTORS, + saved_errno, saved_winerror, fmt, args); + va_end(args); + erl->last_logged = now; + erl->count = 0; +} + +void netdata_logger_fatal( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) { + int saved_errno = errno; + + size_t saved_winerror = 0; +#if defined(OS_WINDOWS) + saved_winerror = GetLastError(); +#endif + + ND_LOG_SOURCES source = NDLS_DAEMON; + source = nd_log_validate_source(source); + + va_list args; + va_start(args, fmt); + nd_logger(file, function, line, source, NDLP_ALERT, true, saved_errno, saved_winerror, fmt, args); + va_end(args); + + char date[LOG_DATE_LENGTH]; + log_date(date, LOG_DATE_LENGTH, now_realtime_sec()); + + char action_data[70+1]; + snprintfz(action_data, 70, "%04lu@%-10.10s:%-15.15s/%d", line, file, function, saved_errno); + + const char *thread_tag = nd_thread_tag(); + const char *tag_to_send = thread_tag; + + // anonymize thread names + if(strncmp(thread_tag, THREAD_TAG_STREAM_RECEIVER, strlen(THREAD_TAG_STREAM_RECEIVER)) == 0) + tag_to_send = THREAD_TAG_STREAM_RECEIVER; + if(strncmp(thread_tag, THREAD_TAG_STREAM_SENDER, strlen(THREAD_TAG_STREAM_SENDER)) == 0) + tag_to_send = THREAD_TAG_STREAM_SENDER; + + char action_result[60+1]; + snprintfz(action_result, 60, "%s:%s", program_name, tag_to_send); + +#if !defined(ENABLE_SENTRY) && defined(HAVE_BACKTRACE) + int fd = nd_log.sources[NDLS_DAEMON].fd; + if(fd == -1) + fd = STDERR_FILENO; + + int nptrs; + void *buffer[10000]; + + nptrs = backtrace(buffer, sizeof(buffer)); + if(nptrs) + backtrace_symbols_fd(buffer, nptrs, fd); +#endif + +#ifdef NETDATA_INTERNAL_CHECKS + abort(); +#endif + + netdata_cleanup_and_exit(1, "FATAL", action_result, action_data); +} + diff --git a/src/libnetdata/log/nd_log.h b/src/libnetdata/log/nd_log.h new file mode 100644 index 000000000..1fefbe328 --- /dev/null +++ b/src/libnetdata/log/nd_log.h @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_ND_LOG_H +#define NETDATA_ND_LOG_H 1 + +# ifdef __cplusplus +extern "C" { +# endif + +#include "../libnetdata.h" +#include "nd_log-common.h" + +#define ND_LOG_DEFAULT_THROTTLE_LOGS 1000 +#define ND_LOG_DEFAULT_THROTTLE_PERIOD 60 + +void errno_clear(void); +int nd_log_systemd_journal_fd(void); +void nd_log_set_user_settings(ND_LOG_SOURCES source, const char *setting); +void nd_log_set_facility(const char *facility); +void nd_log_set_priority_level(const char *setting); +void nd_log_initialize(void); +void nd_log_reopen_log_files(bool log); +void chown_open_file(int fd, uid_t uid, gid_t gid); +void nd_log_chown_log_files(uid_t uid, gid_t gid); +void nd_log_set_flood_protection(size_t logs, time_t period); +void nd_log_initialize_for_external_plugins(const char *name); +void nd_log_reopen_log_files_for_spawn_server(const char *name); +bool nd_log_journal_socket_available(void); +ND_LOG_FIELD_ID nd_log_field_id_by_journal_name(const char *field, size_t len); +int nd_log_priority2id(const char *priority); +const char *nd_log_id2priority(ND_LOG_FIELD_PRIORITY priority); +const char *nd_log_method_for_external_plugins(const char *s); + +int nd_log_health_fd(void); +int nd_log_collectors_fd(void); +typedef bool (*log_formatter_callback_t)(BUFFER *wb, void *data); + +struct log_stack_entry { + ND_LOG_FIELD_ID id; + ND_LOG_STACK_FIELD_TYPE type; + bool set; + union { + const char *txt; + struct netdata_string *str; + BUFFER *bfr; + uint64_t u64; + int64_t i64; + double dbl; + const nd_uuid_t *uuid; + struct { + log_formatter_callback_t formatter; + void *formatter_data; + } cb; + }; +}; + +#define ND_LOG_STACK _cleanup_(log_stack_pop) struct log_stack_entry +#define ND_LOG_STACK_PUSH(lgs) log_stack_push(lgs) + +#define ND_LOG_FIELD_TXT(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_TXT, .txt = (value), .set = true, } +#define ND_LOG_FIELD_STR(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_STR, .str = (value), .set = true, } +#define ND_LOG_FIELD_BFR(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_BFR, .bfr = (value), .set = true, } +#define ND_LOG_FIELD_U64(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_U64, .u64 = (value), .set = true, } +#define ND_LOG_FIELD_I64(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_I64, .i64 = (value), .set = true, } +#define ND_LOG_FIELD_DBL(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_DBL, .dbl = (value), .set = true, } +#define ND_LOG_FIELD_CB(field, func, data) (struct log_stack_entry){ .id = (field), .type = NDFT_CALLBACK, .cb = { .formatter = (func), .formatter_data = (data) }, .set = true, } +#define ND_LOG_FIELD_UUID(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_UUID, .uuid = (value), .set = true, } +#define ND_LOG_FIELD_END() (struct log_stack_entry){ .id = NDF_STOP, .type = NDFT_UNSET, .set = false, } + +void log_stack_pop(void *ptr); +void log_stack_push(struct log_stack_entry *lgs); + +#define D_WEB_BUFFER 0x0000000000000001 +#define D_WEB_CLIENT 0x0000000000000002 +#define D_LISTENER 0x0000000000000004 +#define D_WEB_DATA 0x0000000000000008 +#define D_OPTIONS 0x0000000000000010 +#define D_PROCNETDEV_LOOP 0x0000000000000020 +#define D_RRD_STATS 0x0000000000000040 +#define D_WEB_CLIENT_ACCESS 0x0000000000000080 +#define D_TC_LOOP 0x0000000000000100 +#define D_DEFLATE 0x0000000000000200 +#define D_CONFIG 0x0000000000000400 +#define D_PLUGINSD 0x0000000000000800 +#define D_CHILDS 0x0000000000001000 +#define D_EXIT 0x0000000000002000 +#define D_CHECKS 0x0000000000004000 +#define D_NFACCT_LOOP 0x0000000000008000 +#define D_PROCFILE 0x0000000000010000 +#define D_RRD_CALLS 0x0000000000020000 +#define D_DICTIONARY 0x0000000000040000 +#define D_MEMORY 0x0000000000080000 +#define D_CGROUP 0x0000000000100000 +#define D_REGISTRY 0x0000000000200000 +#define D_VARIABLES 0x0000000000400000 +#define D_HEALTH 0x0000000000800000 +#define D_CONNECT_TO 0x0000000001000000 +#define D_RRDHOST 0x0000000002000000 +#define D_LOCKS 0x0000000004000000 +#define D_EXPORTING 0x0000000008000000 +#define D_STATSD 0x0000000010000000 +#define D_POLLFD 0x0000000020000000 +#define D_STREAM 0x0000000040000000 +#define D_ANALYTICS 0x0000000080000000 +#define D_RRDENGINE 0x0000000100000000 +#define D_ACLK 0x0000000200000000 +#define D_REPLICATION 0x0000002000000000 +#define D_SYSTEM 0x8000000000000000 + +extern uint64_t debug_flags; +extern const char *program_name; +extern int aclklog_enabled; + +#define LOG_DATE_LENGTH 26 +void log_date(char *buffer, size_t len, time_t now); + +static inline void debug_dummy(void) {} + +void nd_log_limits_reset(void); +void nd_log_limits_unlimited(void); + +#define NDLP_INFO_STR "info" + +#ifdef NETDATA_INTERNAL_CHECKS +#define netdata_log_debug(type, args...) do { if(unlikely(debug_flags & type)) netdata_logger(NDLS_DEBUG, NDLP_DEBUG, __FILE__, __FUNCTION__, __LINE__, ##args); } while(0) +#define internal_error(condition, args...) do { if(unlikely(condition)) netdata_logger(NDLS_DAEMON, NDLP_DEBUG, __FILE__, __FUNCTION__, __LINE__, ##args); } while(0) +#define internal_fatal(condition, args...) do { if(unlikely(condition)) netdata_logger_fatal(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0) +#else +#define netdata_log_debug(type, args...) debug_dummy() +#define internal_error(args...) debug_dummy() +#define internal_fatal(args...) debug_dummy() +#endif + +#define fatal(args...) netdata_logger_fatal(__FILE__, __FUNCTION__, __LINE__, ##args) +#define fatal_assert(expr) ((expr) ? (void)(0) : netdata_logger_fatal(__FILE__, __FUNCTION__, __LINE__, "Assertion `%s' failed", #expr)) + +// ---------------------------------------------------------------------------- +// normal logging + +void netdata_logger(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file, const char *function, unsigned long line, const char *fmt, ... ) PRINTFLIKE(6, 7); +#define nd_log(NDLS, NDLP, args...) netdata_logger(NDLS, NDLP, __FILE__, __FUNCTION__, __LINE__, ##args) +#define nd_log_daemon(NDLP, args...) netdata_logger(NDLS_DAEMON, NDLP, __FILE__, __FUNCTION__, __LINE__, ##args) +#define nd_log_collector(NDLP, args...) netdata_logger(NDLS_COLLECTORS, NDLP, __FILE__, __FUNCTION__, __LINE__, ##args) + +#define netdata_log_info(args...) netdata_logger(NDLS_DAEMON, NDLP_INFO, __FILE__, __FUNCTION__, __LINE__, ##args) +#define netdata_log_error(args...) netdata_logger(NDLS_DAEMON, NDLP_ERR, __FILE__, __FUNCTION__, __LINE__, ##args) +#define collector_info(args...) netdata_logger(NDLS_COLLECTORS, NDLP_INFO, __FILE__, __FUNCTION__, __LINE__, ##args) +#define collector_error(args...) netdata_logger(NDLS_COLLECTORS, NDLP_ERR, __FILE__, __FUNCTION__, __LINE__, ##args) + +#define log_aclk_message_bin(__data, __data_len, __tx, __mqtt_topic, __message_name) \ + nd_log(NDLS_ACLK, NDLP_INFO, \ + "direction:%s message:'%s' topic:'%s' json:'%.*s'", \ + (__tx) ? "OUTGOING" : "INCOMING", __message_name, __mqtt_topic, (int)(__data_len), __data) + +// ---------------------------------------------------------------------------- +// logging with limits + +typedef struct error_with_limit { + SPINLOCK spinlock; + time_t log_every; + size_t count; + time_t last_logged; + usec_t sleep_ut; +} ERROR_LIMIT; + +#define nd_log_limit_static_global_var(var, log_every_secs, sleep_usecs) static ERROR_LIMIT var = { .last_logged = 0, .count = 0, .log_every = (log_every_secs), .sleep_ut = (sleep_usecs) } +#define nd_log_limit_static_thread_var(var, log_every_secs, sleep_usecs) static __thread ERROR_LIMIT var = { .last_logged = 0, .count = 0, .log_every = (log_every_secs), .sleep_ut = (sleep_usecs) } +void netdata_logger_with_limit(ERROR_LIMIT *erl, ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file, const char *function, unsigned long line, const char *fmt, ... ) PRINTFLIKE(7, 8); +#define nd_log_limit(erl, NDLS, NDLP, args...) netdata_logger_with_limit(erl, NDLS, NDLP, __FILE__, __FUNCTION__, __LINE__, ##args) + +// ---------------------------------------------------------------------------- + +void netdata_logger_fatal( const char *file, const char *function, unsigned long line, const char *fmt, ... ) NORETURN PRINTFLIKE(4, 5); + +# ifdef __cplusplus +} +# endif + +#endif /* NETDATA_ND_LOG_H */ diff --git a/src/libnetdata/log/nd_log_limit.c b/src/libnetdata/log/nd_log_limit.c new file mode 100644 index 000000000..272138196 --- /dev/null +++ b/src/libnetdata/log/nd_log_limit.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log_limit.h" + +void nd_log_limits_reset(void) { + usec_t now_ut = now_monotonic_usec(); + + spinlock_lock(&nd_log.std_output.spinlock); + spinlock_lock(&nd_log.std_error.spinlock); + + for(size_t i = 0; i < _NDLS_MAX ;i++) { + spinlock_lock(&nd_log.sources[i].spinlock); + nd_log.sources[i].limits.prevented = 0; + nd_log.sources[i].limits.counter = 0; + nd_log.sources[i].limits.started_monotonic_ut = now_ut; + nd_log.sources[i].limits.logs_per_period = nd_log.sources[i].limits.logs_per_period_backup; + spinlock_unlock(&nd_log.sources[i].spinlock); + } + + spinlock_unlock(&nd_log.std_output.spinlock); + spinlock_unlock(&nd_log.std_error.spinlock); +} + +void nd_log_limits_unlimited(void) { + nd_log_limits_reset(); + for(size_t i = 0; i < _NDLS_MAX ;i++) { + nd_log.sources[i].limits.logs_per_period = 0; + } +} + +bool nd_log_limit_reached(struct nd_log_source *source) { + if(source->limits.throttle_period == 0 || source->limits.logs_per_period == 0) + return false; + + usec_t now_ut = now_monotonic_usec(); + if(!source->limits.started_monotonic_ut) + source->limits.started_monotonic_ut = now_ut; + + source->limits.counter++; + + if(now_ut - source->limits.started_monotonic_ut > (usec_t)source->limits.throttle_period) { + if(source->limits.prevented) { + BUFFER *wb = buffer_create(1024, NULL); + buffer_sprintf(wb, + "LOG FLOOD PROTECTION: resuming logging " + "(prevented %"PRIu32" logs in the last %"PRIu32" seconds).", + source->limits.prevented, + source->limits.throttle_period); + + if(source->pending_msg) + freez((void *)source->pending_msg); + + source->pending_msg = strdupz(buffer_tostring(wb)); + + buffer_free(wb); + } + + // restart the period accounting + source->limits.started_monotonic_ut = now_ut; + source->limits.counter = 1; + source->limits.prevented = 0; + + // log this error + return false; + } + + if(source->limits.counter > source->limits.logs_per_period) { + if(!source->limits.prevented) { + BUFFER *wb = buffer_create(1024, NULL); + buffer_sprintf(wb, + "LOG FLOOD PROTECTION: too many logs (%"PRIu32" logs in %"PRId64" seconds, threshold is set to %"PRIu32" logs " + "in %"PRIu32" seconds). Preventing more logs from process '%s' for %"PRId64" seconds.", + source->limits.counter, + (int64_t)((now_ut - source->limits.started_monotonic_ut) / USEC_PER_SEC), + source->limits.logs_per_period, + source->limits.throttle_period, + program_name, + (int64_t)(((source->limits.started_monotonic_ut + (source->limits.throttle_period * USEC_PER_SEC) - now_ut)) / USEC_PER_SEC) + ); + + if(source->pending_msg) + freez((void *)source->pending_msg); + + source->pending_msg = strdupz(buffer_tostring(wb)); + + buffer_free(wb); + } + + source->limits.prevented++; + + // prevent logging this error +#ifdef NETDATA_INTERNAL_CHECKS + return false; +#else + return true; +#endif + } + + return false; +} diff --git a/src/libnetdata/log/nd_log_limit.h b/src/libnetdata/log/nd_log_limit.h new file mode 100644 index 000000000..5486abde9 --- /dev/null +++ b/src/libnetdata/log/nd_log_limit.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_ND_LOG_LIMIT_H +#define NETDATA_ND_LOG_LIMIT_H + +#include "../libnetdata.h" + +struct nd_log_source; +bool nd_log_limit_reached(struct nd_log_source *source); + +struct nd_log_limit { + usec_t started_monotonic_ut; + uint32_t counter; + uint32_t prevented; + + uint32_t throttle_period; + uint32_t logs_per_period; + uint32_t logs_per_period_backup; +}; + +#define ND_LOG_LIMITS_DEFAULT (struct nd_log_limit){ .logs_per_period = ND_LOG_DEFAULT_THROTTLE_LOGS, .logs_per_period_backup = ND_LOG_DEFAULT_THROTTLE_LOGS, .throttle_period = ND_LOG_DEFAULT_THROTTLE_PERIOD, } +#define ND_LOG_LIMITS_UNLIMITED (struct nd_log_limit){ .logs_per_period = 0, .logs_per_period_backup = 0, .throttle_period = 0, } + +#include "nd_log-internals.h" + +#endif //NETDATA_ND_LOG_LIMIT_H diff --git a/src/libnetdata/log/nd_wevents_manifest.xml b/src/libnetdata/log/nd_wevents_manifest.xml new file mode 100644 index 000000000..9e326c1cb --- /dev/null +++ b/src/libnetdata/log/nd_wevents_manifest.xml @@ -0,0 +1,295 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/libnetdata/log/systemd-cat-native.c b/src/libnetdata/log/systemd-cat-native.c index 74d3728a3..2e4f55e97 100644 --- a/src/libnetdata/log/systemd-cat-native.c +++ b/src/libnetdata/log/systemd-cat-native.c @@ -11,7 +11,9 @@ #include #endif -static inline void log_message_to_stderr(BUFFER *msg) { +bool verbose = false; + +static inline void log_message_to_stderr(BUFFER *msg, const char *scope) { CLEAN_BUFFER *tmp = buffer_create(0, NULL); for(size_t i = 0; i < msg->len ;i++) { @@ -24,13 +26,13 @@ static inline void log_message_to_stderr(BUFFER *msg) { } } - fprintf(stderr, "SENDING: %s\n", buffer_tostring(tmp)); + fprintf(stderr, "SENDING %s: %s\n", scope, buffer_tostring(tmp)); } static inline buffered_reader_ret_t get_next_line(struct buffered_reader *reader, BUFFER *line, int timeout_ms) { while(true) { if(unlikely(!buffered_reader_next_line(reader, line))) { - buffered_reader_ret_t ret = buffered_reader_read_timeout(reader, STDIN_FILENO, timeout_ms, false); + buffered_reader_ret_t ret = buffered_reader_read_timeout(reader, STDIN_FILENO, timeout_ms, verbose); if(unlikely(ret != BUFFERED_READER_READ_OK)) return ret; @@ -126,7 +128,7 @@ static inline void buffer_memcat_replacing_newlines(BUFFER *wb, const char *src, // ---------------------------------------------------------------------------- // log to a systemd-journal-remote -#ifdef HAVE_CURL +#ifdef HAVE_LIBCURL #include #ifndef HOST_NAME_MAX @@ -203,8 +205,8 @@ static void journal_remote_complete_event(BUFFER *msg, usec_t *monotonic_ut) { buffer_sprintf(msg, "" - "__REALTIME_TIMESTAMP=%llu\n" - "__MONOTONIC_TIMESTAMP=%llu\n" + "__REALTIME_TIMESTAMP=%"PRIu64"\n" + "__MONOTONIC_TIMESTAMP=%"PRIu64"\n" "_MACHINE_ID=%s\n" "_BOOT_ID=%s\n" "_HOSTNAME=%s\n" @@ -226,7 +228,8 @@ static void journal_remote_complete_event(BUFFER *msg, usec_t *monotonic_ut) { static CURLcode journal_remote_send_buffer(CURL* curl, BUFFER *msg) { - // log_message_to_stderr(msg); + if(verbose) + log_message_to_stderr(msg, "REMOTE"); struct upload_data upload = {0}; @@ -260,8 +263,8 @@ static log_to_journal_remote_ret_t log_input_to_journal_remote(const char *url, global_boot_id[0] = '\0'; char buffer[1024]; - if(read_file(BOOT_ID_PATH, buffer, sizeof(buffer)) == 0) { - uuid_t uuid; + if(read_txt_file(BOOT_ID_PATH, buffer, sizeof(buffer)) == 0) { + nd_uuid_t uuid; if(uuid_parse_flexi(buffer, uuid) == 0) uuid_unparse_lower_compact(uuid, global_boot_id); else @@ -270,13 +273,13 @@ static log_to_journal_remote_ret_t log_input_to_journal_remote(const char *url, if(global_boot_id[0] == '\0') { fprintf(stderr, "WARNING: cannot read '%s'. Will generate a random _BOOT_ID.\n", BOOT_ID_PATH); - uuid_t uuid; + nd_uuid_t uuid; uuid_generate_random(uuid); uuid_unparse_lower_compact(uuid, global_boot_id); } - if(read_file(MACHINE_ID_PATH, buffer, sizeof(buffer)) == 0) { - uuid_t uuid; + if(read_txt_file(MACHINE_ID_PATH, buffer, sizeof(buffer)) == 0) { + nd_uuid_t uuid; if(uuid_parse_flexi(buffer, uuid) == 0) uuid_unparse_lower_compact(uuid, global_machine_id); else @@ -285,13 +288,13 @@ static log_to_journal_remote_ret_t log_input_to_journal_remote(const char *url, if(global_machine_id[0] == '\0') { fprintf(stderr, "WARNING: cannot read '%s'. Will generate a random _MACHINE_ID.\n", MACHINE_ID_PATH); - uuid_t uuid; + nd_uuid_t uuid; uuid_generate_random(uuid); uuid_unparse_lower_compact(uuid, global_boot_id); } if(global_stream_id[0] == '\0') { - uuid_t uuid; + nd_uuid_t uuid; uuid_generate_random(uuid); uuid_unparse_lower_compact(uuid, global_stream_id); } @@ -456,10 +459,11 @@ static int help(void) { "Usage:\n" "\n" " %s\n" + " [--verbose|-v]\n" " [--newline=STRING]\n" " [--log-as-netdata|-N]\n" " [--namespace=NAMESPACE] [--socket=PATH]\n" -#ifdef HAVE_CURL +#ifdef HAVE_LIBCURL " [--url=URL [--key=FILENAME] [--cert=FILENAME] [--trust=FILENAME|all]]\n" #endif "\n" @@ -488,7 +492,7 @@ static int help(void) { " the log destination. Only log fields defined by Netdata are accepted.\n" " If the environment variables expected by Netdata are not found, it\n" " falls back to stderr logging in logfmt format.\n" -#ifdef HAVE_CURL +#ifdef HAVE_LIBCURL "\n" " * Log to a systemd-journal-remote TCP socket, enabled with --url=URL\n" "\n" @@ -585,15 +589,16 @@ static int log_input_as_netdata(const char *newline, int timeout_ms) { ND_LOG_STACK_PUSH(lgs); lgs_reset(lgs); + ND_LOG_SOURCES source = NDLS_HEALTH; + ND_LOG_FIELD_PRIORITY priority = NDLP_INFO; size_t fields_added = 0; size_t messages_logged = 0; - ND_LOG_FIELD_PRIORITY priority = NDLP_INFO; while(get_next_line(&reader, line, timeout_ms) == BUFFERED_READER_READ_OK) { if(!line->len) { // an empty line - we are done for this message - nd_log(NDLS_HEALTH, priority, + nd_log(source, priority, "added %zu fields", // if the user supplied a MESSAGE, this will be ignored fields_added); @@ -606,7 +611,7 @@ static int log_input_as_netdata(const char *newline, int timeout_ms) { if(equal) { const char *field = line->buffer; size_t field_len = equal - line->buffer; - ND_LOG_FIELD_ID id = nd_log_field_id_by_name(field, field_len); + ND_LOG_FIELD_ID id = nd_log_field_id_by_journal_name(field, field_len); if(id != NDF_STOP) { const char *value = ++equal; @@ -625,7 +630,7 @@ static int log_input_as_netdata(const char *newline, int timeout_ms) { struct log_stack_entry backup = lgs[NDF_MESSAGE]; lgs[NDF_MESSAGE] = ND_LOG_FIELD_TXT(NDF_MESSAGE, NULL); - nd_log(NDLS_COLLECTORS, NDLP_ERR, + nd_log(source, NDLP_ERR, "Field '%.*s' is not a Netdata field. Ignoring it.", (int)field_len, field); @@ -636,7 +641,7 @@ static int log_input_as_netdata(const char *newline, int timeout_ms) { struct log_stack_entry backup = lgs[NDF_MESSAGE]; lgs[NDF_MESSAGE] = ND_LOG_FIELD_TXT(NDF_MESSAGE, NULL); - nd_log(NDLS_COLLECTORS, NDLP_ERR, + nd_log(source, NDLP_ERR, "Line does not contain an = sign; ignoring it: %s", line->buffer); @@ -648,7 +653,7 @@ static int log_input_as_netdata(const char *newline, int timeout_ms) { } if(fields_added) { - nd_log(NDLS_HEALTH, priority, "added %zu fields", fields_added); + nd_log(source, priority, "added %zu fields", fields_added); messages_logged++; } @@ -659,7 +664,8 @@ static int log_input_as_netdata(const char *newline, int timeout_ms) { // log to a local systemd-journald static bool journal_local_send_buffer(int fd, BUFFER *msg) { - // log_message_to_stderr(msg); + if(verbose) + log_message_to_stderr(msg, "LOCAL"); bool ret = journal_direct_send(fd, msg->buffer, msg->len); if (!ret) @@ -720,19 +726,25 @@ static int log_input_to_journal(const char *socket, const char *namespace, const } cleanup: + if(verbose) { + if(failed_messages) + fprintf(stderr, "%zu messages failed to be logged\n", failed_messages); + if(!messages_logged) + fprintf(stderr, "No messages were logged!\n"); + } + return !failed_messages && messages_logged ? 0 : 1; } int main(int argc, char *argv[]) { - clocks_init(); nd_log_initialize_for_external_plugins(argv[0]); - int timeout_ms = -1; // wait forever + int timeout_ms = 0; // wait forever bool log_as_netdata = false; const char *newline = NULL; const char *namespace = NULL; const char *socket = getenv("NETDATA_SYSTEMD_JOURNAL_PATH"); -#ifdef HAVE_CURL +#ifdef HAVE_LIBCURL const char *url = NULL; const char *key = NULL; const char *cert = NULL; @@ -746,6 +758,9 @@ int main(int argc, char *argv[]) { if(strcmp(k, "--help") == 0 || strcmp(k, "-h") == 0) return help(); + else if(strcmp(k, "--verbose") == 0 || strcmp(k, "-v") == 0) + verbose = true; + else if(strcmp(k, "--log-as-netdata") == 0 || strcmp(k, "-N") == 0) log_as_netdata = true; @@ -758,7 +773,7 @@ int main(int argc, char *argv[]) { else if(strncmp(k, "--newline=", 10) == 0) newline = &k[10]; -#ifdef HAVE_CURL +#ifdef HAVE_LIBCURL else if (strncmp(k, "--url=", 6) == 0) url = &k[6]; @@ -780,7 +795,7 @@ int main(int argc, char *argv[]) { } } -#ifdef HAVE_CURL +#ifdef HAVE_LIBCURL if(log_as_netdata && url) { fprintf(stderr, "Cannot log to a systemd-journal-remote URL as Netdata. " "Please either give --url or --log-as-netdata, not both.\n"); @@ -804,7 +819,7 @@ int main(int argc, char *argv[]) { if(log_as_netdata) return log_input_as_netdata(newline, timeout_ms); -#ifdef HAVE_CURL +#ifdef HAVE_LIBCURL if(url) { if(url && namespace && *namespace) snprintfz(global_namespace, sizeof(global_namespace), "_NAMESPACE=%s\n", namespace); diff --git a/src/libnetdata/log/systemd-journal-helpers.c b/src/libnetdata/log/systemd-journal-helpers.c new file mode 100644 index 000000000..24553364b --- /dev/null +++ b/src/libnetdata/log/systemd-journal-helpers.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "systemd-journal-helpers.h" + +bool is_path_unix_socket(const char *path) { + // Check if the path is valid + if(!path || !*path) + return false; + + struct stat statbuf; + + // Use stat to check if the file exists and is a socket + if (stat(path, &statbuf) == -1) + // The file does not exist or cannot be accessed + return false; + + // Check if the file is a socket + if (S_ISSOCK(statbuf.st_mode)) + return true; + + return false; +} + +bool is_stderr_connected_to_journal(void) { + const char *journal_stream = getenv("JOURNAL_STREAM"); + if (!journal_stream) + return false; // JOURNAL_STREAM is not set + + struct stat stderr_stat; + if (fstat(STDERR_FILENO, &stderr_stat) < 0) + return false; // Error in getting stderr info + + // Parse device and inode from JOURNAL_STREAM + char *endptr; + long journal_dev = strtol(journal_stream, &endptr, 10); + if (*endptr != ':') + return false; // Format error in JOURNAL_STREAM + + long journal_ino = strtol(endptr + 1, NULL, 10); + + return (stderr_stat.st_dev == (dev_t)journal_dev) && (stderr_stat.st_ino == (ino_t)journal_ino); +} + +int journal_direct_fd(const char *path) { + if(!path || !*path) + path = JOURNAL_DIRECT_SOCKET; + + if(!is_path_unix_socket(path)) + return -1; + + int fd = socket(AF_UNIX, SOCK_DGRAM| DEFAULT_SOCKET_FLAGS, 0); + if (fd < 0) return -1; + + sock_setcloexec(fd); + + struct sockaddr_un addr; + memset(&addr, 0, sizeof(struct sockaddr_un)); + addr.sun_family = AF_UNIX; + strncpy(addr.sun_path, path, sizeof(addr.sun_path) - 1); + + // Connect the socket (optional, but can simplify send operations) + if (connect(fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { + close(fd); + return -1; + } + + return fd; +} + +static inline bool journal_send_with_memfd(int fd __maybe_unused, const char *msg __maybe_unused, size_t msg_len __maybe_unused) { +#if defined(__NR_memfd_create) && defined(MFD_ALLOW_SEALING) && defined(F_ADD_SEALS) && defined(F_SEAL_SHRINK) && defined(F_SEAL_GROW) && defined(F_SEAL_WRITE) + // Create a memory file descriptor + int memfd = (int)syscall(__NR_memfd_create, "journald", MFD_ALLOW_SEALING); + if (memfd < 0) return false; + + // Write data to the memfd + if (write(memfd, msg, msg_len) != (ssize_t)msg_len) { + close(memfd); + return false; + } + + // Seal the memfd to make it immutable + if (fcntl(memfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE) < 0) { + close(memfd); + return false; + } + + struct iovec iov = {0}; + struct msghdr msghdr = {0}; + struct cmsghdr *cmsghdr; + char cmsgbuf[CMSG_SPACE(sizeof(int))]; + + msghdr.msg_iov = &iov; + msghdr.msg_iovlen = 1; + msghdr.msg_control = cmsgbuf; + msghdr.msg_controllen = sizeof(cmsgbuf); + + cmsghdr = CMSG_FIRSTHDR(&msghdr); + if(!cmsghdr) { + close(memfd); + return false; + } + + cmsghdr->cmsg_level = SOL_SOCKET; + cmsghdr->cmsg_type = SCM_RIGHTS; + cmsghdr->cmsg_len = CMSG_LEN(sizeof(int)); + memcpy(CMSG_DATA(cmsghdr), &memfd, sizeof(int)); + + ssize_t r = sendmsg(fd, &msghdr, 0); + + close(memfd); + return r >= 0; +#else + return false; +#endif +} + +bool journal_direct_send(int fd, const char *msg, size_t msg_len) { + // Send the datagram + if (send(fd, msg, msg_len, 0) < 0) { + if(errno != EMSGSIZE) + return false; + + // datagram is too large, fallback to memfd + if(!journal_send_with_memfd(fd, msg, msg_len)) + return false; + } + + return true; +} + +void journal_construct_path(char *dst, size_t dst_len, const char *host_prefix, const char *namespace_str) { + if(!host_prefix) + host_prefix = ""; + + if(namespace_str) + snprintfz(dst, dst_len, "%s/run/systemd/journal.%s/socket", + host_prefix, namespace_str); + else + snprintfz(dst, dst_len, "%s" JOURNAL_DIRECT_SOCKET, + host_prefix); +} diff --git a/src/libnetdata/log/systemd-journal-helpers.h b/src/libnetdata/log/systemd-journal-helpers.h new file mode 100644 index 000000000..a85f8e85a --- /dev/null +++ b/src/libnetdata/log/systemd-journal-helpers.h @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +#ifndef NETDATA_LOG_SYSTEMD_JOURNAL_HELPERS_H +#define NETDATA_LOG_SYSTEMD_JOURNAL_HELPERS_H + +#define JOURNAL_DIRECT_SOCKET "/run/systemd/journal/socket" + +void journal_construct_path(char *dst, size_t dst_len, const char *host_prefix, const char *namespace_str); + +int journal_direct_fd(const char *path); +bool journal_direct_send(int fd, const char *msg, size_t msg_len); + +bool is_path_unix_socket(const char *path); +bool is_stderr_connected_to_journal(void); + +#endif // NETDATA_LOG_SYSTEMD_JOURNAL_HELPERS_H diff --git a/src/libnetdata/log/wevt_netdata_compile.bat b/src/libnetdata/log/wevt_netdata_compile.bat new file mode 100644 index 000000000..279b6c31b --- /dev/null +++ b/src/libnetdata/log/wevt_netdata_compile.bat @@ -0,0 +1,121 @@ +@echo off +setlocal enabledelayedexpansion + +echo PATH=%PATH% + +if "%~1"=="" ( + echo Error: Missing .mc file path. + goto :usage +) +if "%~2"=="" ( + echo Error: Missing destination directory. + goto :usage +) + +REM Set variables +set "SRC_DIR=%~1" +set "BIN_DIR=%~2" +set "MC_FILE=%BIN_DIR%\wevt_netdata.mc" +set "MAN_FILE=%BIN_DIR%\wevt_netdata_manifest.xml" +set "BASE_NAME=wevt_netdata" +set "SDK_PATH=C:\Program Files (x86)\Windows Kits\10\bin\10.0.26100.0\x64" +set "VS_PATH=C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.39.33519\bin\Hostx64\x64" + +if not exist "%SRC_DIR%" ( + echo Error: Source directory does not exist. + exit /b 1 +) + +if not exist "%BIN_DIR%" ( + echo Error: Destination directory does not exist. + exit /b 1 +) + +if not exist "%MC_FILE%" ( + echo Error: %MC_FILE% not found. + exit /b 1 +) + +if not exist "%MAN_FILE%" ( + echo Error: %MAN_FILE% not found. + exit /b 1 +) + +REM Add SDK paths to PATH +set "PATH=C:\Windows\System32;%SDK_PATH%;%VS_PATH%;%PATH%" + +REM Check if commands are available +where mc >nul 2>nul +if %errorlevel% neq 0 ( + echo Error: mc.exe not found in PATH. + exit /b 1 +) +where rc >nul 2>nul +if %errorlevel% neq 0 ( + echo Error: rc.exe not found in PATH. + exit /b 1 +) +where link >nul 2>nul +if %errorlevel% neq 0 ( + echo Error: link.exe not found in PATH. + exit /b 1 +) +where wevtutil >nul 2>nul +if %errorlevel% neq 0 ( + echo Error: wevtutil.exe not found in PATH. + exit /b 1 +) + +REM Change to the destination directory +cd /d "%BIN_DIR%" + +echo. +echo Running mc.exe... +mc -v -b -U "%MC_FILE%" "%MAN_FILE%" +if %errorlevel% neq 0 ( + echo Error: mc.exe failed on messages. + exit /b 1 +) + +if not exist "%BASE_NAME%.rc" ( + echo Error: %BASE_NAME%.rc not found. + exit /b 1 +) + +echo. +echo Modifying %BASE_NAME%.rc to include the manifest... +copy "%MAN_FILE%" %BASE_NAME%_manifest.man +echo 1 2004 "%BASE_NAME%_manifest.man" >> %BASE_NAME%.rc + +echo. +echo %BASE_NAME%.rc contents: +type %BASE_NAME%.rc + +echo. +echo Running rc.exe... +rc /v /fo %BASE_NAME%.res %BASE_NAME%.rc +if %errorlevel% neq 0 ( + echo Error: rc.exe failed. + exit /b 1 +) + +if not exist "%BASE_NAME%.res" ( + echo Error: %BASE_NAME%.res not found. + exit /b 1 +) + +echo. +echo Running link.exe... +link /dll /noentry /machine:x64 /out:%BASE_NAME%.dll %BASE_NAME%.res +if %errorlevel% neq 0 ( + echo Error: link.exe failed. + exit /b 1 +) + +echo. +echo Process completed successfully. +exit /b 0 + +:usage +echo Usage: %~nx0 [path_to_mc_file] [destination_directory] +exit /b 1 diff --git a/src/libnetdata/log/wevt_netdata_compile.sh b/src/libnetdata/log/wevt_netdata_compile.sh new file mode 100644 index 000000000..eae510645 --- /dev/null +++ b/src/libnetdata/log/wevt_netdata_compile.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +mylocation=$(dirname "${0}") + +# Check if both parameters are provided +if [ $# -ne 2 ]; then + echo "Error: Incorrect number of parameters." + echo "Usage: $0 " + exit 1 +fi + +# Get the parameters +src_dir="$1" +dest_dir="$2" + +# Get the directory of this script +SCRIPT_DIR="$(dirname "$0")" + +# Create a temporary batch file +temp_bat=$(mktemp --suffix=.bat) + +# Write the contents to the temporary batch file +# Use cygpath directly within the heredoc +cat << EOF > "$temp_bat" +@echo off +set "PATH=%SYSTEMROOT%;$("${mylocation}/../../../packaging/windows/find-sdk-path.sh" --sdk -w);$("${mylocation}/../../../packaging/windows/find-sdk-path.sh" --visualstudio -w)" +call "$(cygpath -w -a "$SCRIPT_DIR/wevt_netdata_compile.bat")" "$(cygpath -w -a "$src_dir")" "$(cygpath -w -a "$dest_dir")" +EOF + +# Execute the temporary batch file +echo +echo "Executing Windows Batch File..." +echo +cat "$temp_bat" +cmd.exe //c "$(cygpath -w -a "$temp_bat")" +exit_status=$? + +# Remove the temporary batch file +rm "$temp_bat" + +# Check the exit status +if [ $exit_status -eq 0 ]; then + echo "nd_wevents_compile.bat executed successfully." +else + echo "nd_wevents_compile.bat failed with exit status $exit_status." +fi + +exit $exit_status diff --git a/src/libnetdata/log/wevt_netdata_install.bat b/src/libnetdata/log/wevt_netdata_install.bat new file mode 100644 index 000000000..515607592 --- /dev/null +++ b/src/libnetdata/log/wevt_netdata_install.bat @@ -0,0 +1,52 @@ +@echo off +setlocal enabledelayedexpansion + +set "MAN_SRC=%~dp0wevt_netdata_manifest.xml" +set "DLL_SRC=%~dp0wevt_netdata.dll" +set "DLL_DST=%SystemRoot%\System32\wevt_netdata.dll" + +where wevtutil >nul 2>nul +if %errorlevel% neq 0 ( + echo Error: wevtutil.exe not found in PATH. + exit /b 1 +) + +echo. +echo Uninstalling previous manifest (if any)... +wevtutil um "%MAN_SRC%" + +echo. +echo Copying %DLL_SRC% to %DLL_DST% +copy /y "%DLL_SRC%" "%DLL_DST%" +if %errorlevel% neq 0 ( + echo Error: Failed to copy %DLL_SRC% to %DLL_DST% + exit /b 1 +) + +echo. +echo Granting access to %DLL_DST% for Windows Event Logging... +icacls "%DLL_DST%" /grant "NT SERVICE\EventLog":R +if %errorlevel% neq 0 ( + echo Error: Failed to grant access to %DLL_DST%. + exit /b 1 +) + +echo. +echo Importing the manifest... +wevtutil im "%MAN_SRC%" /rf:"%DLL_DST%" /mf:"%DLL_DST%" +if %errorlevel% neq 0 ( + echo Error: Failed to import the manifest. + exit /b 1 +) + +echo. +echo Verifying Netdata Publisher for Event Tracing for Windows (ETW)... +wevtutil gp "Netdata" +if %errorlevel% neq 0 ( + echo Error: Failed to get publisher Netdata. + exit /b 1 +) + +echo. +echo Netdata Event Tracing for Windows manifest installed successfully. +exit /b 0 diff --git a/src/libnetdata/log/wevt_netdata_mc_generate.c b/src/libnetdata/log/wevt_netdata_mc_generate.c new file mode 100644 index 000000000..5ab2bdf17 --- /dev/null +++ b/src/libnetdata/log/wevt_netdata_mc_generate.c @@ -0,0 +1,518 @@ +#include +#include +#include +#include +#include +#include + +// from winnt.h +#define EVENTLOG_SUCCESS 0x0000 +#define EVENTLOG_ERROR_TYPE 0x0001 +#define EVENTLOG_WARNING_TYPE 0x0002 +#define EVENTLOG_INFORMATION_TYPE 0x0004 +#define EVENTLOG_AUDIT_SUCCESS 0x0008 +#define EVENTLOG_AUDIT_FAILURE 0x0010 + +// the severities we define in .mc file +#define STATUS_SEVERITY_INFORMATIONAL 0x1 +#define STATUS_SEVERITY_WARNING 0x2 +#define STATUS_SEVERITY_ERROR 0x3 + +#define FACILITY_APPLICATION 0x0fff + +#include "nd_log-common.h" +#include "nd_log-to-windows-common.h" + +const char *get_msg_symbol(MESSAGE_ID msg) { + switch(msg) { + case MSGID_MESSAGE_ONLY: + return "MESSAGE_ONLY"; + + case MSGID_MESSAGE_ERRNO: + return "MESSAGE_ERRNO"; + + case MSGID_REQUEST_ONLY: + return "REQUEST_ONLY"; + + case MSGID_ACCESS_MESSAGE: + return "ACCESS_MESSAGE"; + + case MSGID_ACCESS_MESSAGE_REQUEST: + return "ACCESS_MESSAGE_REQUEST"; + + case MSGID_ACCESS_MESSAGE_USER: + return "ACCESS_MESSAGE_USER"; + + case MSGID_ACCESS: + return "ACCESS"; + + case MSGID_ACCESS_USER: + return "ACCESS_USER"; + + case MSGID_ACCESS_FORWARDER: + return "ACCESS_FORWARDER"; + + case MSGID_ACCESS_FORWARDER_USER: + return "ACCESS_FORWARDER_USER"; + + case MSGID_ALERT_TRANSITION: + return "ALERT_TRANSITION"; + + default: + fprintf(stderr, "\n\nInvalid message id %d!\n\n\n", msg); + exit(1); + } +} + +const char *get_msg_format(MESSAGE_ID msg) { + switch(msg) { + case MSGID_MESSAGE_ONLY: + return "%2(%12): %64\r\n"; + + case MSGID_MESSAGE_ERRNO: + return "%2(%12): %64%n\r\n" + "%n\r\n" + " Unix Errno : %5%n\r\n" + " Windows Error: %6%n\r\n" + ; + + case MSGID_REQUEST_ONLY: + return "%2(%12): %63\r\n"; + + case MSGID_ACCESS_MESSAGE: + return "%64\r\n"; + + case MSGID_ACCESS_MESSAGE_REQUEST: + return "%64%n\r\n" + "%n\r\n" + " Request: %63%n\r\n" + ; + + case MSGID_ACCESS_MESSAGE_USER: + return "%64%n\r\n" + "%n\r\n" + " User: %21, role: %22, permissions: %23%n\r\n" + ; + + case MSGID_ACCESS: + return "%33 %63%n\r\n" + "%n\r\n" + " Response Code : %34%n\r\n" + " Transaction ID: %36%n\r\n" + " Source IP : %24%n\r\n" + ; + + case MSGID_ACCESS_USER: + return "%33 %63%n\r\n" + "%n\r\n" + " Response Code : %34%n\r\n" + " Transaction ID: %36%n\r\n" + " Source IP : %24%n\r\n" + " User : %21, role: %22, permissions: %23%n\r\n" + ; + + case MSGID_ACCESS_FORWARDER: + return "%33 %63%n\r\n" + "%n\r\n" + " Response Code : %34%n\r\n" + " Transaction ID: %36%n\r\n" + " Source IP : %24, For %27%n\r\n" + ; + + case MSGID_ACCESS_FORWARDER_USER: + return "%33 %63%n\r\n" + "%n\r\n" + " Response Code : %34%n\r\n" + " Transaction ID: %36%n\r\n" + " Source IP : %24, For %27%n\r\n" + " User : %21, role: %22, permissions: %23%n\r\n" + ; + + case MSGID_ALERT_TRANSITION: + return "Alert '%47' of instance '%16' on node '%15' transitioned from %57 to %56\r\n"; + + default: + fprintf(stderr, "\n\nInvalid message id %d!\n\n\n", msg); + exit(1); + } +} + +int main(int argc, const char **argv) { + (void)argc; (void)argv; + + const char *header = NULL, *footer = NULL, *s_header = NULL, *s_footer = NULL; + + bool manifest = false; + if(argc == 2 && strcmp(argv[1], "--manifest") == 0) { + manifest = true; + + header = "\r\n" + "\r\n" + "\r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + "\r\n" + " \r\n" + "\r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + ; + + footer = " \r\n" + " \r\n" + " \r\n" + " \r\n" + ; + + s_header = " \r\n" + " \r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + " \r\n" + " \r\n" + " \r\n" + "\r\n" + ; + + s_footer = " \r\n" + " \r\n" + " \r\n" + "\r\n" + ; + } + else { + header = ";// THIS FILE IS AUTOMATICALLY GENERATED - DO NOT EDIT\r\n" + "\r\n" + "MessageIdTypedef=DWORD\r\n" + "\r\n" + "SeverityNames=(\r\n" + " Informational=0x1:STATUS_SEVERITY_INFORMATIONAL\r\n" + " Warning=0x2:STATUS_SEVERITY_WARNING\r\n" + " Error=0x3:STATUS_SEVERITY_ERROR\r\n" + " )\r\n" + "\r\n" + "FacilityNames=(\r\n" + " " NETDATA_CHANNEL_NAME "=0x0FFF:FACILITY_NETDATA\r\n" + " )\r\n" + "\r\n" + "LanguageNames=(\r\n" + " English=0x409:MSG00409\r\n" + " )\r\n" + "\r\n" + ; + + footer = ""; + } + + bool done[UINT16_MAX] = { 0 }; + char symbol[1024]; + + printf("%s", header); + for(size_t src = 1; src < _NDLS_MAX ;src++) { + for(size_t pri = 0; pri < _NDLP_MAX ;pri++) { + uint8_t severity = get_severity_from_priority(pri); + + for(size_t msg = 1; msg < _MSGID_MAX ;msg++) { + + if(src >= 16) { + fprintf(stderr, "\n\nSource %zu is bigger than 4 bits!\n\n", src); + return 1; + } + + if(pri >= 16) { + fprintf(stderr, "\n\nPriority %zu is bigger than 4 bits!\n\n", pri); + return 1; + } + + if(msg >= 256) { + fprintf(stderr, "\n\nMessageID %zu is bigger than 8 bits!\n\n", msg); + return 1; + } + + uint16_t eventID = construct_event_code(src, pri, msg); + if((eventID & 0xFFFF) != eventID) { + fprintf(stderr, "\n\nEventID 0x%x is bigger than 16 bits!\n\n", eventID); + return 1; + } + + if(done[eventID]) continue; + done[eventID] = true; + + const char *level = get_level_from_priority_str(pri); + const char *pri_txt; + switch(pri) { + case NDLP_EMERG: + pri_txt = "EMERG"; + break; + + case NDLP_CRIT: + pri_txt = "CRIT"; + break; + + case NDLP_ALERT: + pri_txt = "ALERT"; + break; + + case NDLP_ERR: + pri_txt = "ERR"; + break; + + case NDLP_WARNING: + pri_txt = "WARN"; + break; + + case NDLP_INFO: + pri_txt = "INFO"; + break; + + case NDLP_NOTICE: + pri_txt = "NOTICE"; + break; + + case NDLP_DEBUG: + pri_txt = "DEBUG"; + break; + + default: + fprintf(stderr, "\n\nInvalid priority %zu!\n\n\n", pri); + return 1; + } + + const char *channel; + const char *src_txt; + switch(src) { + case NDLS_COLLECTORS: + src_txt = "COLLECTORS"; + channel = NETDATA_ETW_CHANNEL_NAME "/" NETDATA_ETW_SUBCHANNEL_COLLECTORS; + break; + + case NDLS_ACCESS: + src_txt = "ACCESS"; + channel = NETDATA_ETW_CHANNEL_NAME "/" NETDATA_ETW_SUBCHANNEL_ACCESS; + break; + + case NDLS_HEALTH: + src_txt = "HEALTH"; + channel = NETDATA_ETW_CHANNEL_NAME "/" NETDATA_ETW_SUBCHANNEL_HEALTH; + break; + + case NDLS_DEBUG: + src_txt = "DEBUG"; + channel = NETDATA_ETW_CHANNEL_NAME "/" NETDATA_ETW_SUBCHANNEL_DAEMON; + break; + + case NDLS_DAEMON: + src_txt = "DAEMON"; + channel = NETDATA_ETW_CHANNEL_NAME "/" NETDATA_ETW_SUBCHANNEL_DAEMON; + break; + + case NDLS_ACLK: + src_txt = "ACLK"; + channel = NETDATA_ETW_CHANNEL_NAME "/" NETDATA_ETW_SUBCHANNEL_ACLK; + break; + + default: + fprintf(stderr, "\n\nInvalid source %zu!\n\n\n", src); + return 1; + } + + const char *msg_txt = get_msg_symbol(msg); + const char *format = get_msg_format(msg); + + const char *severity_txt; + switch (severity) { + case STATUS_SEVERITY_INFORMATIONAL: + severity_txt = "Informational"; + break; + + case STATUS_SEVERITY_ERROR: + severity_txt = "Error"; + break; + + case STATUS_SEVERITY_WARNING: + severity_txt = "Warning"; + break; + + default: + fprintf(stderr, "\n\nInvalid severity id %u!\n\n\n", severity); + return 1; + } + + if(manifest) + snprintf(symbol, sizeof(symbol), "ED_%s_%s_%s", src_txt, pri_txt, msg_txt); + else + snprintf(symbol, sizeof(symbol), "MC_%s_%s_%s", src_txt, pri_txt, msg_txt); + + if(manifest) + printf(" \r\n\r\n", + symbol, eventID, msg_txt, channel, level); + else + printf("MessageId=0x%x\r\n" + "Severity=%s\r\n" + "Facility=" NETDATA_CHANNEL_NAME "\r\n" + "SymbolicName=%s\r\n" + "Language=English\r\n" + "%s" + ".\r\n" + "\r\n", + eventID, severity_txt, symbol, format); + } + } + } + printf("%s", footer); + + if(s_header) { + printf("%s", s_header); + + for(size_t msg = 1; msg < _MSGID_MAX ;msg++) { + const char *msg_txt = get_msg_symbol(msg); + const char *format = get_msg_format(msg); + printf(" \r\n", msg_txt, format); + } + + printf("%s", s_footer); + } +} + diff --git a/src/libnetdata/maps/local-sockets.h b/src/libnetdata/maps/local-sockets.h deleted file mode 100644 index 6f2ffd81a..000000000 --- a/src/libnetdata/maps/local-sockets.h +++ /dev/null @@ -1,1419 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_LOCAL_SOCKETS_H -#define NETDATA_LOCAL_SOCKETS_H - -#include "libnetdata/libnetdata.h" - -#ifdef HAVE_LIBMNL -#include -#include -#include -#include -#include -#include -#endif - -#define UID_UNSET (uid_t)(UINT32_MAX) - -// -------------------------------------------------------------------------------------------------------------------- -// hashtable for keeping the namespaces -// key and value is the namespace inode - -#define SIMPLE_HASHTABLE_VALUE_TYPE uint64_t -#define SIMPLE_HASHTABLE_NAME _NET_NS -#include "libnetdata/simple_hashtable.h" - -// -------------------------------------------------------------------------------------------------------------------- -// hashtable for keeping the sockets of PIDs -// key is the inode - -struct pid_socket; -#define SIMPLE_HASHTABLE_VALUE_TYPE struct pid_socket -#define SIMPLE_HASHTABLE_NAME _PID_SOCKET -#include "libnetdata/simple_hashtable.h" - -// -------------------------------------------------------------------------------------------------------------------- -// hashtable for keeping all the sockets -// key is the inode - -struct local_socket; -#define SIMPLE_HASHTABLE_VALUE_TYPE struct local_socket -#define SIMPLE_HASHTABLE_NAME _LOCAL_SOCKET -#include "libnetdata/simple_hashtable.h" - -// -------------------------------------------------------------------------------------------------------------------- -// hashtable for keeping all local IPs -// key is XXH3_64bits hash of the IP - -union ipv46; -#define SIMPLE_HASHTABLE_VALUE_TYPE union ipv46 -#define SIMPLE_HASHTABLE_NAME _LOCAL_IP -#include "libnetdata/simple_hashtable.h" - -// -------------------------------------------------------------------------------------------------------------------- -// hashtable for keeping all listening ports -// key is XXH3_64bits hash of the family, protocol, port number, namespace - -struct local_port; -#define SIMPLE_HASHTABLE_VALUE_TYPE struct local_port -#define SIMPLE_HASHTABLE_NAME _LISTENING_PORT -#include "libnetdata/simple_hashtable.h" - -// -------------------------------------------------------------------------------------------------------------------- - -struct local_socket_state; -typedef void (*local_sockets_cb_t)(struct local_socket_state *state, struct local_socket *n, void *data); - -struct local_sockets_config { - bool listening; - bool inbound; - bool outbound; - bool local; - bool tcp4; - bool tcp6; - bool udp4; - bool udp6; - bool pid; - bool cmdline; - bool comm; - bool uid; - bool namespaces; - bool tcp_info; - - size_t max_errors; - size_t max_concurrent_namespaces; - - local_sockets_cb_t cb; - void *data; - - const char *host_prefix; - - // internal use - uint64_t net_ns_inode; -}; - -typedef struct local_socket_state { - struct local_sockets_config config; - - struct { - size_t mnl_sends; - size_t namespaces_found; - size_t tcp_info_received; - size_t pid_fds_processed; - size_t pid_fds_opendir_failed; - size_t pid_fds_readlink_failed; - size_t pid_fds_parse_failed; - size_t errors_encountered; - } stats; - - bool spawn_server_is_mine; - SPAWN_SERVER *spawn_server; - -#ifdef HAVE_LIBMNL - bool use_nl; - struct mnl_socket *nl; - uint16_t tmp_protocol; -#endif - - ARAL *local_socket_aral; - ARAL *pid_socket_aral; - SPINLOCK spinlock; // for namespaces - - uint64_t proc_self_net_ns_inode; - - SIMPLE_HASHTABLE_NET_NS ns_hashtable; - SIMPLE_HASHTABLE_PID_SOCKET pid_sockets_hashtable; - SIMPLE_HASHTABLE_LOCAL_SOCKET sockets_hashtable; - SIMPLE_HASHTABLE_LOCAL_IP local_ips_hashtable; - SIMPLE_HASHTABLE_LISTENING_PORT listening_ports_hashtable; -} LS_STATE; - -// -------------------------------------------------------------------------------------------------------------------- - -typedef enum __attribute__((packed)) { - SOCKET_DIRECTION_NONE = 0, - SOCKET_DIRECTION_LISTEN = (1 << 0), // a listening socket - SOCKET_DIRECTION_INBOUND = (1 << 1), // an inbound socket connecting a remote system to a local listening socket - SOCKET_DIRECTION_OUTBOUND = (1 << 2), // a socket initiated by this system, connecting to another system - SOCKET_DIRECTION_LOCAL_INBOUND = (1 << 3), // the socket connecting 2 localhost applications - SOCKET_DIRECTION_LOCAL_OUTBOUND = (1 << 4), // the socket connecting 2 localhost applications -} SOCKET_DIRECTION; - -#ifndef TASK_COMM_LEN -#define TASK_COMM_LEN 16 -#endif - -struct pid_socket { - uint64_t inode; - pid_t pid; - uid_t uid; - uint64_t net_ns_inode; - char *cmdline; - char comm[TASK_COMM_LEN]; -}; - -struct local_port { - uint16_t protocol; - uint16_t family; - uint16_t port; - uint64_t net_ns_inode; -}; - -union ipv46 { - uint32_t ipv4; - struct in6_addr ipv6; -}; - -struct socket_endpoint { - uint16_t protocol; - uint16_t family; - uint16_t port; - union ipv46 ip; -}; - -static inline void ipv6_to_in6_addr(const char *ipv6_str, struct in6_addr *d) { - char buf[9]; - - for (size_t k = 0; k < 4; ++k) { - memcpy(buf, ipv6_str + (k * 8), 8); - buf[sizeof(buf) - 1] = '\0'; - d->s6_addr32[k] = str2uint32_hex(buf, NULL); - } -} - -typedef struct local_socket { - uint64_t inode; - uint64_t net_ns_inode; - - int state; - struct socket_endpoint local; - struct socket_endpoint remote; - pid_t pid; - - SOCKET_DIRECTION direction; - - uint8_t timer; - uint8_t retransmits; // the # of packets currently queued for retransmission (not yet acknowledged) - uint32_t expires; - uint32_t rqueue; - uint32_t wqueue; - uid_t uid; - - struct { - bool checked; - bool ipv46; - } ipv6ony; - - union { - struct tcp_info tcp; - } info; - - char comm[TASK_COMM_LEN]; - STRING *cmdline; - - struct local_port local_port_key; - - XXH64_hash_t local_ip_hash; - XXH64_hash_t remote_ip_hash; - XXH64_hash_t local_port_hash; - -#ifdef LOCAL_SOCKETS_EXTENDED_MEMBERS - LOCAL_SOCKETS_EXTENDED_MEMBERS -#endif -} LOCAL_SOCKET; - -static inline void local_sockets_spawn_server_callback(SPAWN_REQUEST *request); - -// -------------------------------------------------------------------------------------------------------------------- - -static inline void local_sockets_log(LS_STATE *ls, const char *format, ...) PRINTFLIKE(2, 3); -static inline void local_sockets_log(LS_STATE *ls, const char *format, ...) { - if(ls && ++ls->stats.errors_encountered == ls->config.max_errors) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "LOCAL-SOCKETS: max number of logs reached. Not logging anymore"); - return; - } - - if(ls && ls->stats.errors_encountered > ls->config.max_errors) - return; - - char buf[16384]; - va_list args; - va_start(args, format); - vsnprintf(buf, sizeof(buf), format, args); - va_end(args); - - nd_log(NDLS_COLLECTORS, NDLP_ERR, "LOCAL-SOCKETS: %s", buf); -} - -// -------------------------------------------------------------------------------------------------------------------- - -static bool local_sockets_is_ipv4_mapped_ipv6_address(const struct in6_addr *addr) { - // An IPv4-mapped IPv6 address starts with 80 bits of zeros followed by 16 bits of ones - static const unsigned char ipv4_mapped_prefix[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF }; - return memcmp(addr->s6_addr, ipv4_mapped_prefix, 12) == 0; -} - -static bool local_sockets_is_loopback_address(struct socket_endpoint *se) { - if (se->family == AF_INET) { - // For IPv4, loopback addresses are in the 127.0.0.0/8 range - return (ntohl(se->ip.ipv4) >> 24) == 127; // Check if the first byte is 127 - } else if (se->family == AF_INET6) { - // Check if the address is an IPv4-mapped IPv6 address - if (local_sockets_is_ipv4_mapped_ipv6_address(&se->ip.ipv6)) { - // Extract the last 32 bits (IPv4 address) and check if it's in the 127.0.0.0/8 range - uint8_t *ip6 = (uint8_t *)&se->ip.ipv6; - const uint32_t ipv4_addr = *((const uint32_t *)(ip6 + 12)); - return (ntohl(ipv4_addr) >> 24) == 127; - } - - // For IPv6, loopback address is ::1 - return memcmp(&se->ip.ipv6, &in6addr_loopback, sizeof(se->ip.ipv6)) == 0; - } - - return false; -} - -static inline bool local_sockets_is_ipv4_reserved_address(uint32_t ip) { - // Check for the reserved address ranges - ip = ntohl(ip); - return ( - (ip >> 24 == 10) || // Private IP range (A class) - (ip >> 20 == (172 << 4) + 1) || // Private IP range (B class) - (ip >> 16 == (192 << 8) + 168) || // Private IP range (C class) - (ip >> 24 == 127) || // Loopback address (127.0.0.0) - (ip >> 24 == 0) || // Reserved (0.0.0.0) - (ip >> 24 == 169 && (ip >> 16) == 254) || // Link-local address (169.254.0.0) - (ip >> 16 == (192 << 8) + 0) // Test-Net (192.0.0.0) - ); -} - -static inline bool local_sockets_is_private_address(struct socket_endpoint *se) { - if (se->family == AF_INET) { - return local_sockets_is_ipv4_reserved_address(se->ip.ipv4); - } - else if (se->family == AF_INET6) { - uint8_t *ip6 = (uint8_t *)&se->ip.ipv6; - - // Check if the address is an IPv4-mapped IPv6 address - if (local_sockets_is_ipv4_mapped_ipv6_address(&se->ip.ipv6)) { - // Extract the last 32 bits (IPv4 address) and check if it's in the 127.0.0.0/8 range - const uint32_t ipv4_addr = *((const uint32_t *)(ip6 + 12)); - return local_sockets_is_ipv4_reserved_address(ipv4_addr); - } - - // Check for link-local addresses (fe80::/10) - if ((ip6[0] == 0xFE) && ((ip6[1] & 0xC0) == 0x80)) - return true; - - // Check for Unique Local Addresses (ULA) (fc00::/7) - if ((ip6[0] & 0xFE) == 0xFC) - return true; - - // Check for multicast addresses (ff00::/8) - if (ip6[0] == 0xFF) - return true; - - // For IPv6, loopback address is :: or ::1 - return memcmp(&se->ip.ipv6, &in6addr_any, sizeof(se->ip.ipv6)) == 0 || - memcmp(&se->ip.ipv6, &in6addr_loopback, sizeof(se->ip.ipv6)) == 0; - } - - return false; -} - -static bool local_sockets_is_multicast_address(struct socket_endpoint *se) { - if (se->family == AF_INET) { - // For IPv4, check if the address is 0.0.0.0 - uint32_t ip = htonl(se->ip.ipv4); - return (ip >= 0xE0000000 && ip <= 0xEFFFFFFF); // Multicast address range (224.0.0.0/4) - } - else if (se->family == AF_INET6) { - // For IPv6, check if the address is ff00::/8 - uint8_t *ip6 = (uint8_t *)&se->ip.ipv6; - return ip6[0] == 0xff; - } - - return false; -} - -static bool local_sockets_is_zero_address(struct socket_endpoint *se) { - if (se->family == AF_INET) { - // For IPv4, check if the address is 0.0.0.0 - return se->ip.ipv4 == 0; - } - else if (se->family == AF_INET6) { - // For IPv6, check if the address is :: - return memcmp(&se->ip.ipv6, &in6addr_any, sizeof(se->ip.ipv6)) == 0; - } - - return false; -} - -static inline const char *local_sockets_address_space(struct socket_endpoint *se) { - if(local_sockets_is_zero_address(se)) - return "zero"; - else if(local_sockets_is_loopback_address(se)) - return "loopback"; - else if(local_sockets_is_multicast_address(se)) - return "multicast"; - else if(local_sockets_is_private_address(se)) - return "private"; - else - return "public"; -} - -// -------------------------------------------------------------------------------------------------------------------- - -static inline bool is_local_socket_ipv46(LOCAL_SOCKET *n) { - return n->local.family == AF_INET6 && - n->direction == SOCKET_DIRECTION_LISTEN && - local_sockets_is_zero_address(&n->local) && - n->ipv6ony.checked && - n->ipv6ony.ipv46; -} - -// -------------------------------------------------------------------------------------------------------------------- - -static void local_sockets_foreach_local_socket_call_cb(LS_STATE *ls) { - for(SIMPLE_HASHTABLE_SLOT_LOCAL_SOCKET *sl = simple_hashtable_first_read_only_LOCAL_SOCKET(&ls->sockets_hashtable); - sl; - sl = simple_hashtable_next_read_only_LOCAL_SOCKET(&ls->sockets_hashtable, sl)) { - LOCAL_SOCKET *n = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if(!n) continue; - - if((ls->config.listening && n->direction & SOCKET_DIRECTION_LISTEN) || - (ls->config.local && n->direction & (SOCKET_DIRECTION_LOCAL_INBOUND|SOCKET_DIRECTION_LOCAL_OUTBOUND)) || - (ls->config.inbound && n->direction & SOCKET_DIRECTION_INBOUND) || - (ls->config.outbound && n->direction & SOCKET_DIRECTION_OUTBOUND) - ) { - // we have to call the callback for this socket - if (ls->config.cb) - ls->config.cb(ls, n, ls->config.data); - } - } -} - -// -------------------------------------------------------------------------------------------------------------------- - -static inline void local_sockets_fix_cmdline(char* str) { - char *s = str; - - // map invalid characters to underscores - while(*s) { - if(*s == '|' || iscntrl(*s)) *s = '_'; - s++; - } -} - -// -------------------------------------------------------------------------------------------------------------------- - -static inline bool -local_sockets_read_proc_inode_link(LS_STATE *ls, const char *filename, uint64_t *inode, const char *type) { - char link_target[FILENAME_MAX + 1]; - - *inode = 0; - - ssize_t len = readlink(filename, link_target, sizeof(link_target) - 1); - if (len == -1) { - local_sockets_log(ls, "cannot read '%s' link '%s'", type, filename); - - ls->stats.pid_fds_readlink_failed++; - return false; - } - link_target[len] = '\0'; - - len = strlen(type); - if(strncmp(link_target, type, len) == 0 && link_target[len] == ':' && link_target[len + 1] == '[' && isdigit(link_target[len + 2])) { - *inode = strtoull(&link_target[len + 2], NULL, 10); - // ll_log(ls, "read link of type '%s' '%s' from '%s', inode = %"PRIu64, type, link_target, filename, *inode); - return true; - } - else { - // ll_log(ls, "cannot read '%s' link '%s' from '%s'", type, link_target, filename); - ls->stats.pid_fds_processed++; - return false; - } -} - -static inline bool local_sockets_is_path_a_pid(const char *s) { - if(!s || !*s) return false; - - while(*s) { - if(!isdigit(*s++)) - return false; - } - - return true; -} - -static inline bool local_sockets_find_all_sockets_in_proc(LS_STATE *ls, const char *proc_filename) { - DIR *proc_dir; - struct dirent *proc_entry; - char filename[FILENAME_MAX + 1]; - char comm[TASK_COMM_LEN]; - char cmdline[8192]; - const char *cmdline_trimmed; - uint64_t net_ns_inode; - - proc_dir = opendir(proc_filename); - if (proc_dir == NULL) { - local_sockets_log(ls, "cannot opendir() '%s'", proc_filename); - ls->stats.pid_fds_readlink_failed++; - return false; - } - - while ((proc_entry = readdir(proc_dir)) != NULL) { - if(proc_entry->d_type != DT_DIR) - continue; - - if(!strcmp(proc_entry->d_name, ".") || !strcmp(proc_entry->d_name, "..")) - continue; - - if(!local_sockets_is_path_a_pid(proc_entry->d_name)) - continue; - - // Build the path to the fd directory of the process - snprintfz(filename, FILENAME_MAX, "%s/%s/fd/", proc_filename, proc_entry->d_name); - DIR *fd_dir = opendir(filename); - if (fd_dir == NULL) { - local_sockets_log(ls, "cannot opendir() '%s'", filename); - ls->stats.pid_fds_opendir_failed++; - continue; - } - - comm[0] = '\0'; - cmdline[0] = '\0'; - cmdline_trimmed = NULL; - pid_t pid = (pid_t)strtoul(proc_entry->d_name, NULL, 10); - if(!pid) { - local_sockets_log(ls, "cannot parse pid of '%s'", proc_entry->d_name); - closedir(fd_dir); - continue; - } - net_ns_inode = 0; - uid_t uid = UID_UNSET; - - struct dirent *fd_entry; - while ((fd_entry = readdir(fd_dir)) != NULL) { - if(fd_entry->d_type != DT_LNK) - continue; - - snprintfz(filename, sizeof(filename), "%s/%s/fd/%s", proc_filename, proc_entry->d_name, fd_entry->d_name); - uint64_t inode = 0; - if(!local_sockets_read_proc_inode_link(ls, filename, &inode, "socket")) - continue; - - SIMPLE_HASHTABLE_SLOT_PID_SOCKET *sl = simple_hashtable_get_slot_PID_SOCKET(&ls->pid_sockets_hashtable, inode, &inode, true); - struct pid_socket *ps = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if(!ps || (ps->pid == 1 && pid != 1)) { - if(uid == UID_UNSET && ls->config.uid) { - char status_buf[512]; - snprintfz(filename, sizeof(filename), "%s/%s/status", proc_filename, proc_entry->d_name); - if (read_txt_file(filename, status_buf, sizeof(status_buf))) - local_sockets_log(ls, "cannot open file: %s\n", filename); - else { - char *u = strstr(status_buf, "Uid:"); - if(u) { - u += 4; - while(isspace(*u)) u++; // skip spaces - while(*u >= '0' && *u <= '9') u++; // skip the first number (real uid) - while(isspace(*u)) u++; // skip spaces again - uid = strtol(u, NULL, 10); // parse the 2nd number (effective uid) - } - } - } - if(!comm[0] && ls->config.comm) { - snprintfz(filename, sizeof(filename), "%s/%s/comm", proc_filename, proc_entry->d_name); - if (read_txt_file(filename, comm, sizeof(comm))) - local_sockets_log(ls, "cannot open file: %s\n", filename); - else { - size_t clen = strlen(comm); - if(comm[clen - 1] == '\n') - comm[clen - 1] = '\0'; - } - } - if(!cmdline[0] && ls->config.cmdline) { - snprintfz(filename, sizeof(filename), "%s/%s/cmdline", proc_filename, proc_entry->d_name); - if (read_proc_cmdline(filename, cmdline, sizeof(cmdline))) - local_sockets_log(ls, "cannot open file: %s\n", filename); - else { - local_sockets_fix_cmdline(cmdline); - cmdline_trimmed = trim(cmdline); - } - } - if(!net_ns_inode && ls->config.namespaces) { - snprintfz(filename, sizeof(filename), "%s/%s/ns/net", proc_filename, proc_entry->d_name); - if(local_sockets_read_proc_inode_link(ls, filename, &net_ns_inode, "net")) { - SIMPLE_HASHTABLE_SLOT_NET_NS *sl_ns = simple_hashtable_get_slot_NET_NS(&ls->ns_hashtable, net_ns_inode, (uint64_t *)net_ns_inode, true); - simple_hashtable_set_slot_NET_NS(&ls->ns_hashtable, sl_ns, net_ns_inode, (uint64_t *)net_ns_inode); - } - } - - if(!ps) - ps = aral_callocz(ls->pid_socket_aral); - - ps->inode = inode; - ps->pid = pid; - ps->uid = uid; - ps->net_ns_inode = net_ns_inode; - strncpyz(ps->comm, comm, sizeof(ps->comm) - 1); - - if(ps->cmdline) - freez(ps->cmdline); - - ps->cmdline = cmdline_trimmed ? strdupz(cmdline_trimmed) : NULL; - simple_hashtable_set_slot_PID_SOCKET(&ls->pid_sockets_hashtable, sl, inode, ps); - } - } - - closedir(fd_dir); - } - - closedir(proc_dir); - return true; -} - -// -------------------------------------------------------------------------------------------------------------------- - -static inline void local_sockets_index_listening_port(LS_STATE *ls, LOCAL_SOCKET *n) { - if(n->direction & SOCKET_DIRECTION_LISTEN) { - // for the listening sockets, keep a hashtable with all the local ports - // so that we will be able to detect INBOUND sockets - - SIMPLE_HASHTABLE_SLOT_LISTENING_PORT *sl_port = - simple_hashtable_get_slot_LISTENING_PORT(&ls->listening_ports_hashtable, n->local_port_hash, &n->local_port_key, true); - - struct local_port *port = SIMPLE_HASHTABLE_SLOT_DATA(sl_port); - if(!port) - simple_hashtable_set_slot_LISTENING_PORT(&ls->listening_ports_hashtable, sl_port, n->local_port_hash, &n->local_port_key); - } -} - -static inline bool local_sockets_add_socket(LS_STATE *ls, LOCAL_SOCKET *tmp) { - if(!tmp->inode) return false; - - SIMPLE_HASHTABLE_SLOT_LOCAL_SOCKET *sl = simple_hashtable_get_slot_LOCAL_SOCKET(&ls->sockets_hashtable, tmp->inode, &tmp->inode, true); - LOCAL_SOCKET *n = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if(n) { - local_sockets_log(ls, "inode %" PRIu64" already exists in hashtable - ignoring duplicate", tmp->inode); - return false; - } - - n = aral_mallocz(ls->local_socket_aral); - *n = *tmp; // copy all contents - - // fix the key - n->local_port_key.port = n->local.port; - n->local_port_key.family = n->local.family; - n->local_port_key.protocol = n->local.protocol; - n->local_port_key.net_ns_inode = ls->proc_self_net_ns_inode; - - n->local_ip_hash = XXH3_64bits(&n->local.ip, sizeof(n->local.ip)); - n->remote_ip_hash = XXH3_64bits(&n->remote.ip, sizeof(n->remote.ip)); - n->local_port_hash = XXH3_64bits(&n->local_port_key, sizeof(n->local_port_key)); - - // --- look up a pid for it ----------------------------------------------------------------------------------- - - SIMPLE_HASHTABLE_SLOT_PID_SOCKET *sl_pid = simple_hashtable_get_slot_PID_SOCKET(&ls->pid_sockets_hashtable, n->inode, &n->inode, false); - struct pid_socket *ps = SIMPLE_HASHTABLE_SLOT_DATA(sl_pid); - if(ps) { - n->net_ns_inode = ps->net_ns_inode; - n->pid = ps->pid; - - if(ps->uid != UID_UNSET && n->uid == UID_UNSET) - n->uid = ps->uid; - - if(ps->cmdline) - n->cmdline = string_strdupz(ps->cmdline); - - strncpyz(n->comm, ps->comm, sizeof(n->comm) - 1); - } - - // --- index it ----------------------------------------------------------------------------------------------- - - simple_hashtable_set_slot_LOCAL_SOCKET(&ls->sockets_hashtable, sl, n->inode, n); - - if(!local_sockets_is_zero_address(&n->local)) { - // put all the local IPs into the local_ips hashtable - // so, we learn all local IPs the system has - - SIMPLE_HASHTABLE_SLOT_LOCAL_IP *sl_ip = - simple_hashtable_get_slot_LOCAL_IP(&ls->local_ips_hashtable, n->local_ip_hash, &n->local.ip, true); - - union ipv46 *ip = SIMPLE_HASHTABLE_SLOT_DATA(sl_ip); - if(!ip) - simple_hashtable_set_slot_LOCAL_IP(&ls->local_ips_hashtable, sl_ip, n->local_ip_hash, &n->local.ip); - } - - // --- 1st phase for direction detection ---------------------------------------------------------------------- - - if((n->local.protocol == IPPROTO_TCP && n->state == TCP_LISTEN) || - local_sockets_is_zero_address(&n->local) || - local_sockets_is_zero_address(&n->remote)) { - // the socket is either in a TCP LISTEN, or - // the remote address is zero - n->direction |= SOCKET_DIRECTION_LISTEN; - } - else { - // we can't say yet if it is inbound or outboud - // so, mark it as both inbound and outbound - n->direction |= SOCKET_DIRECTION_INBOUND | SOCKET_DIRECTION_OUTBOUND; - } - - // --- index it in LISTENING_PORT ----------------------------------------------------------------------------- - - local_sockets_index_listening_port(ls, n); - - return true; -} - -#ifdef HAVE_LIBMNL - -static inline void local_sockets_libmnl_init(LS_STATE *ls) { - ls->nl = mnl_socket_open(NETLINK_INET_DIAG); - if (ls->nl == NULL) { - local_sockets_log(ls, "cannot open libmnl netlink socket"); - ls->use_nl = false; - } - else if (mnl_socket_bind(ls->nl, 0, MNL_SOCKET_AUTOPID) < 0) { - local_sockets_log(ls, "cannot bind libmnl netlink socket"); - mnl_socket_close(ls->nl); - ls->nl = NULL; - ls->use_nl = false; - } - else - ls->use_nl = true; -} - -static inline void local_sockets_libmnl_cleanup(LS_STATE *ls) { - if(ls->nl) { - mnl_socket_close(ls->nl); - ls->nl = NULL; - ls->use_nl = false; - } -} - -static inline int local_sockets_libmnl_cb_data(const struct nlmsghdr *nlh, void *data) { - LS_STATE *ls = data; - - struct inet_diag_msg *diag_msg = mnl_nlmsg_get_payload(nlh); - - LOCAL_SOCKET n = { - .inode = diag_msg->idiag_inode, - .direction = SOCKET_DIRECTION_NONE, - .state = diag_msg->idiag_state, - .ipv6ony = { - .checked = false, - .ipv46 = false, - }, - .local = { - .protocol = ls->tmp_protocol, - .family = diag_msg->idiag_family, - .port = ntohs(diag_msg->id.idiag_sport), - }, - .remote = { - .protocol = ls->tmp_protocol, - .family = diag_msg->idiag_family, - .port = ntohs(diag_msg->id.idiag_dport), - }, - .timer = diag_msg->idiag_timer, - .retransmits = diag_msg->idiag_retrans, - .expires = diag_msg->idiag_expires, - .rqueue = diag_msg->idiag_rqueue, - .wqueue = diag_msg->idiag_wqueue, - .uid = diag_msg->idiag_uid, - }; - - if (diag_msg->idiag_family == AF_INET) { - memcpy(&n.local.ip.ipv4, diag_msg->id.idiag_src, sizeof(n.local.ip.ipv4)); - memcpy(&n.remote.ip.ipv4, diag_msg->id.idiag_dst, sizeof(n.remote.ip.ipv4)); - } - else if (diag_msg->idiag_family == AF_INET6) { - memcpy(&n.local.ip.ipv6, diag_msg->id.idiag_src, sizeof(n.local.ip.ipv6)); - memcpy(&n.remote.ip.ipv6, diag_msg->id.idiag_dst, sizeof(n.remote.ip.ipv6)); - } - - struct rtattr *attr = (struct rtattr *)(diag_msg + 1); - int rtattrlen = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*diag_msg)); - for (; !n.ipv6ony.checked && RTA_OK(attr, rtattrlen); attr = RTA_NEXT(attr, rtattrlen)) { - switch (attr->rta_type) { - case INET_DIAG_INFO: { - if(ls->tmp_protocol == IPPROTO_TCP) { - struct tcp_info *info = (struct tcp_info *)RTA_DATA(attr); - n.info.tcp = *info; - ls->stats.tcp_info_received++; - } - } - break; - - case INET_DIAG_SKV6ONLY: { - n.ipv6ony.checked = true; - int ipv6only = *(int *)RTA_DATA(attr); - n.ipv6ony.ipv46 = !ipv6only; - } - break; - - default: - break; - } - } - - local_sockets_add_socket(ls, &n); - - return MNL_CB_OK; -} - -static inline bool local_sockets_libmnl_get_sockets(LS_STATE *ls, uint16_t family, uint16_t protocol) { - ls->tmp_protocol = protocol; - - char buf[MNL_SOCKET_BUFFER_SIZE]; - struct nlmsghdr *nlh; - struct inet_diag_req_v2 req; - unsigned int seq, portid = mnl_socket_get_portid(ls->nl); - - memset(&req, 0, sizeof(req)); - req.sdiag_family = family; - req.sdiag_protocol = protocol; - req.idiag_states = -1; - req.idiag_ext = 0; - - if(family == AF_INET6) - req.idiag_ext |= 1 << (INET_DIAG_SKV6ONLY - 1); - - if(protocol == IPPROTO_TCP && ls->config.tcp_info) - req.idiag_ext |= 1 << (INET_DIAG_INFO - 1); - - nlh = mnl_nlmsg_put_header(buf); - nlh->nlmsg_type = SOCK_DIAG_BY_FAMILY; - nlh->nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST; - nlh->nlmsg_seq = seq = time(NULL); - mnl_nlmsg_put_extra_header(nlh, sizeof(req)); - memcpy(mnl_nlmsg_get_payload(nlh), &req, sizeof(req)); - - ls->stats.mnl_sends++; - if (mnl_socket_sendto(ls->nl, nlh, nlh->nlmsg_len) < 0) { - local_sockets_log(ls, "mnl_socket_send failed"); - return false; - } - - ssize_t ret; - while ((ret = mnl_socket_recvfrom(ls->nl, buf, sizeof(buf))) > 0) { - ret = mnl_cb_run(buf, ret, seq, portid, local_sockets_libmnl_cb_data, ls); - if (ret <= MNL_CB_STOP) - break; - } - if (ret == -1) { - local_sockets_log(ls, "mnl_socket_recvfrom"); - return false; - } - - return true; -} -#endif // HAVE_LIBMNL - -static inline bool local_sockets_read_proc_net_x(LS_STATE *ls, const char *filename, uint16_t family, uint16_t protocol) { - static bool is_space[256] = { - [':'] = true, - [' '] = true, - }; - - if(family != AF_INET && family != AF_INET6) - return false; - - FILE *fp = fopen(filename, "r"); - if (fp == NULL) - return false; - - char *line = malloc(1024); // no mallocz() here because getline() may resize - if(!line) { - fclose(fp); - return false; - } - - size_t len = 1024; - ssize_t read; - - ssize_t min_line_length = (family == AF_INET) ? 105 : 155; - size_t counter = 0; - - // Read line by line - while ((read = getline(&line, &len, fp)) != -1) { - if(counter++ == 0) continue; // skip the first line - - if(read < min_line_length) { - local_sockets_log(ls, "too small line No %zu of filename '%s': %s", counter, filename, line); - continue; - } - - LOCAL_SOCKET n = { - .direction = SOCKET_DIRECTION_NONE, - .ipv6ony = { - .checked = false, - .ipv46 = false, - }, - .local = { - .family = family, - .protocol = protocol, - }, - .remote = { - .family = family, - .protocol = protocol, - }, - .uid = UID_UNSET, - }; - - char *words[32]; - size_t num_words = quoted_strings_splitter(line, words, 32, is_space); - // char *sl_txt = get_word(words, num_words, 0); - char *local_ip_txt = get_word(words, num_words, 1); - char *local_port_txt = get_word(words, num_words, 2); - char *remote_ip_txt = get_word(words, num_words, 3); - char *remote_port_txt = get_word(words, num_words, 4); - char *state_txt = get_word(words, num_words, 5); - char *tx_queue_txt = get_word(words, num_words, 6); - char *rx_queue_txt = get_word(words, num_words, 7); - char *tr_txt = get_word(words, num_words, 8); - char *tm_when_txt = get_word(words, num_words, 9); - char *retrans_txt = get_word(words, num_words, 10); - char *uid_txt = get_word(words, num_words, 11); - // char *timeout_txt = get_word(words, num_words, 12); - char *inode_txt = get_word(words, num_words, 13); - - if(!local_ip_txt || !local_port_txt || !remote_ip_txt || !remote_port_txt || !state_txt || - !tx_queue_txt || !rx_queue_txt || !tr_txt || !tm_when_txt || !retrans_txt || !uid_txt || !inode_txt) { - local_sockets_log(ls, "cannot parse ipv4 line No %zu of filename '%s'", counter, filename); - continue; - } - - n.local.port = str2uint32_hex(local_port_txt, NULL); - n.remote.port = str2uint32_hex(remote_port_txt, NULL); - n.state = str2uint32_hex(state_txt, NULL); - n.wqueue = str2uint32_hex(tx_queue_txt, NULL); - n.rqueue = str2uint32_hex(rx_queue_txt, NULL); - n.timer = str2uint32_hex(tr_txt, NULL); - n.expires = str2uint32_hex(tm_when_txt, NULL); - n.retransmits = str2uint32_hex(retrans_txt, NULL); - n.uid = str2uint32_t(uid_txt, NULL); - n.inode = str2uint64_t(inode_txt, NULL); - - if(family == AF_INET) { - n.local.ip.ipv4 = str2uint32_hex(local_ip_txt, NULL); - n.remote.ip.ipv4 = str2uint32_hex(remote_ip_txt, NULL); - } - else if(family == AF_INET6) { - ipv6_to_in6_addr(local_ip_txt, &n.local.ip.ipv6); - ipv6_to_in6_addr(remote_ip_txt, &n.remote.ip.ipv6); - } - - local_sockets_add_socket(ls, &n); - } - - fclose(fp); - - if (line) - free(line); // no freez() here because getline() may resize - - return true; -} - -// -------------------------------------------------------------------------------------------------------------------- - -static inline void local_sockets_detect_directions(LS_STATE *ls) { - for(SIMPLE_HASHTABLE_SLOT_LOCAL_SOCKET *sl = simple_hashtable_first_read_only_LOCAL_SOCKET(&ls->sockets_hashtable); - sl ; - sl = simple_hashtable_next_read_only_LOCAL_SOCKET(&ls->sockets_hashtable, sl)) { - LOCAL_SOCKET *n = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if (!n) continue; - - if ((n->direction & (SOCKET_DIRECTION_INBOUND|SOCKET_DIRECTION_OUTBOUND)) != - (SOCKET_DIRECTION_INBOUND|SOCKET_DIRECTION_OUTBOUND)) - continue; - - // check if the local port is one of our listening ports - { - SIMPLE_HASHTABLE_SLOT_LISTENING_PORT *sl_port = - simple_hashtable_get_slot_LISTENING_PORT(&ls->listening_ports_hashtable, n->local_port_hash, &n->local_port_key, false); - - struct local_port *port = SIMPLE_HASHTABLE_SLOT_DATA(sl_port); // do not reference this pointer - is invalid - if(port) { - // the local port of this socket is a port we listen to - n->direction &= ~SOCKET_DIRECTION_OUTBOUND; - } - else - n->direction &= ~SOCKET_DIRECTION_INBOUND; - } - - // check if the remote IP is one of our local IPs - { - SIMPLE_HASHTABLE_SLOT_LOCAL_IP *sl_ip = - simple_hashtable_get_slot_LOCAL_IP(&ls->local_ips_hashtable, n->remote_ip_hash, &n->remote.ip, false); - - union ipv46 *d = SIMPLE_HASHTABLE_SLOT_DATA(sl_ip); - if (d) { - // the remote IP of this socket is one of our local IPs - if(n->direction & SOCKET_DIRECTION_INBOUND) { - n->direction &= ~SOCKET_DIRECTION_INBOUND; - n->direction |= SOCKET_DIRECTION_LOCAL_INBOUND; - } - else if(n->direction & SOCKET_DIRECTION_OUTBOUND) { - n->direction &= ~SOCKET_DIRECTION_OUTBOUND; - n->direction |= SOCKET_DIRECTION_LOCAL_OUTBOUND; - } - continue; - } - } - - if (local_sockets_is_loopback_address(&n->local) || - local_sockets_is_loopback_address(&n->remote)) { - // both IP addresses are loopback - if(n->direction & SOCKET_DIRECTION_INBOUND) { - n->direction &= ~SOCKET_DIRECTION_INBOUND; - n->direction |= SOCKET_DIRECTION_LOCAL_INBOUND; - } - else if(n->direction & SOCKET_DIRECTION_OUTBOUND) { - n->direction &= ~SOCKET_DIRECTION_OUTBOUND; - n->direction |= SOCKET_DIRECTION_LOCAL_OUTBOUND; - } - } - } -} - -// -------------------------------------------------------------------------------------------------------------------- - -static inline void local_sockets_init(LS_STATE *ls) { - ls->config.host_prefix = netdata_configured_host_prefix; - - spinlock_init(&ls->spinlock); - - simple_hashtable_init_NET_NS(&ls->ns_hashtable, 1024); - simple_hashtable_init_PID_SOCKET(&ls->pid_sockets_hashtable, 65535); - simple_hashtable_init_LOCAL_SOCKET(&ls->sockets_hashtable, 65535); - simple_hashtable_init_LOCAL_IP(&ls->local_ips_hashtable, 4096); - simple_hashtable_init_LISTENING_PORT(&ls->listening_ports_hashtable, 4096); - - ls->local_socket_aral = aral_create( - "local-sockets", - sizeof(LOCAL_SOCKET), - 65536, - 65536, - NULL, NULL, NULL, false, true); - - ls->pid_socket_aral = aral_create( - "pid-sockets", - sizeof(struct pid_socket), - 65536, - 65536, - NULL, NULL, NULL, false, true); - - memset(&ls->stats, 0, sizeof(ls->stats)); - -#ifdef HAVE_LIBMNL - ls->use_nl = false; - ls->nl = NULL; - ls->tmp_protocol = 0; - local_sockets_libmnl_init(ls); -#endif - - if(ls->config.namespaces && ls->spawn_server == NULL) { - ls->spawn_server = spawn_server_create(SPAWN_SERVER_OPTION_CALLBACK, NULL, local_sockets_spawn_server_callback, 0, NULL); - ls->spawn_server_is_mine = true; - } - else - ls->spawn_server_is_mine = false; -} - -static inline void local_sockets_cleanup(LS_STATE *ls) { - - if(ls->spawn_server_is_mine) { - spawn_server_destroy(ls->spawn_server); - ls->spawn_server = NULL; - ls->spawn_server_is_mine = false; - } - -#ifdef HAVE_LIBMNL - local_sockets_libmnl_cleanup(ls); -#endif - - // free the sockets hashtable data - for(SIMPLE_HASHTABLE_SLOT_LOCAL_SOCKET *sl = simple_hashtable_first_read_only_LOCAL_SOCKET(&ls->sockets_hashtable); - sl; - sl = simple_hashtable_next_read_only_LOCAL_SOCKET(&ls->sockets_hashtable, sl)) { - LOCAL_SOCKET *n = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if(!n) continue; - - string_freez(n->cmdline); - aral_freez(ls->local_socket_aral, n); - } - - // free the pid_socket hashtable data - for(SIMPLE_HASHTABLE_SLOT_PID_SOCKET *sl = simple_hashtable_first_read_only_PID_SOCKET(&ls->pid_sockets_hashtable); - sl; - sl = simple_hashtable_next_read_only_PID_SOCKET(&ls->pid_sockets_hashtable, sl)) { - struct pid_socket *ps = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if(!ps) continue; - - freez(ps->cmdline); - aral_freez(ls->pid_socket_aral, ps); - } - - // free the hashtable - simple_hashtable_destroy_NET_NS(&ls->ns_hashtable); - simple_hashtable_destroy_PID_SOCKET(&ls->pid_sockets_hashtable); - simple_hashtable_destroy_LISTENING_PORT(&ls->listening_ports_hashtable); - simple_hashtable_destroy_LOCAL_IP(&ls->local_ips_hashtable); - simple_hashtable_destroy_LOCAL_SOCKET(&ls->sockets_hashtable); - - aral_destroy(ls->local_socket_aral); - aral_destroy(ls->pid_socket_aral); -} - -// -------------------------------------------------------------------------------------------------------------------- - -static inline void local_sockets_do_family_protocol(LS_STATE *ls, const char *filename, uint16_t family, uint16_t protocol) { -#ifdef HAVE_LIBMNL - if(ls->nl && ls->use_nl) { - ls->use_nl = local_sockets_libmnl_get_sockets(ls, family, protocol); - - if(ls->use_nl) - return; - } -#endif - - local_sockets_read_proc_net_x(ls, filename, family, protocol); -} - -static inline void local_sockets_read_all_system_sockets(LS_STATE *ls) { - char path[FILENAME_MAX + 1]; - - if(ls->config.namespaces) { - snprintfz(path, sizeof(path), "%s/proc/self/ns/net", ls->config.host_prefix); - local_sockets_read_proc_inode_link(ls, path, &ls->proc_self_net_ns_inode, "net"); - } - - if(ls->config.cmdline || ls->config.comm || ls->config.pid || ls->config.namespaces) { - snprintfz(path, sizeof(path), "%s/proc", ls->config.host_prefix); - local_sockets_find_all_sockets_in_proc(ls, path); - } - - if(ls->config.tcp4) { - snprintfz(path, sizeof(path), "%s/proc/net/tcp", ls->config.host_prefix); - local_sockets_do_family_protocol(ls, path, AF_INET, IPPROTO_TCP); - } - - if(ls->config.udp4) { - snprintfz(path, sizeof(path), "%s/proc/net/udp", ls->config.host_prefix); - local_sockets_do_family_protocol(ls, path, AF_INET, IPPROTO_UDP); - } - - if(ls->config.tcp6) { - snprintfz(path, sizeof(path), "%s/proc/net/tcp6", ls->config.host_prefix); - local_sockets_do_family_protocol(ls, path, AF_INET6, IPPROTO_TCP); - } - - if(ls->config.udp6) { - snprintfz(path, sizeof(path), "%s/proc/net/udp6", ls->config.host_prefix); - local_sockets_do_family_protocol(ls, path, AF_INET6, IPPROTO_UDP); - } -} - -// -------------------------------------------------------------------------------------------------------------------- - -struct local_sockets_child_work { - int fd; - uint64_t net_ns_inode; -}; - -static inline void local_sockets_send_to_parent(struct local_socket_state *ls __maybe_unused, struct local_socket *n, void *data) { - struct local_sockets_child_work *cw = data; - int fd = cw->fd; - - if(n->net_ns_inode != cw->net_ns_inode) - return; - - // local_sockets_log(ls, "child is sending inode %"PRIu64" of namespace %"PRIu64, n->inode, n->net_ns_inode); - - if(write(fd, n, sizeof(*n)) != sizeof(*n)) - local_sockets_log(ls, "failed to write local socket to pipe"); - - size_t len = n->cmdline ? string_strlen(n->cmdline) + 1 : 0; - if(write(fd, &len, sizeof(len)) != sizeof(len)) - local_sockets_log(ls, "failed to write cmdline length to pipe"); - - if(len) - if(write(fd, string2str(n->cmdline), len) != (ssize_t)len) - local_sockets_log(ls, "failed to write cmdline to pipe"); -} - -static inline void local_sockets_spawn_server_callback(SPAWN_REQUEST *request) { - LS_STATE ls = { 0 }; - ls.config = *((struct local_sockets_config *)request->data); - - // we don't need these inside namespaces - ls.config.cmdline = false; - ls.config.comm = false; - ls.config.pid = false; - ls.config.namespaces = false; - - // initialize local sockets - local_sockets_init(&ls); - - ls.config.host_prefix = ""; // we need the /proc of the container - - struct local_sockets_child_work cw = { - .net_ns_inode = ls.proc_self_net_ns_inode, - .fd = request->fds[1], // stdout - }; - - ls.config.cb = local_sockets_send_to_parent; - ls.config.data = &cw; - ls.proc_self_net_ns_inode = ls.config.net_ns_inode; - - // switch namespace using the custom fd passed via the spawn server - if (setns(request->fds[3], CLONE_NEWNET) == -1) { - local_sockets_log(&ls, "failed to switch network namespace at child process using fd %d", request->fds[3]); - exit(EXIT_FAILURE); - } - - // read all sockets from /proc - local_sockets_read_all_system_sockets(&ls); - - // send all sockets to parent - local_sockets_foreach_local_socket_call_cb(&ls); - - // send the terminating socket - struct local_socket zero = { - .net_ns_inode = ls.config.net_ns_inode, - }; - local_sockets_send_to_parent(&ls, &zero, &cw); - - exit(EXIT_SUCCESS); -} - -static inline bool local_sockets_get_namespace_sockets_with_pid(LS_STATE *ls, struct pid_socket *ps) { - char filename[1024]; - snprintfz(filename, sizeof(filename), "%s/proc/%d/ns/net", ls->config.host_prefix, ps->pid); - - // verify the pid is in the target namespace - int fd = open(filename, O_RDONLY | O_CLOEXEC); - if (fd == -1) { - local_sockets_log(ls, "cannot open file '%s'", filename); - return false; - } - - struct stat statbuf; - if (fstat(fd, &statbuf) == -1) { - close(fd); - local_sockets_log(ls, "failed to get file statistics for '%s'", filename); - return false; - } - - if (statbuf.st_ino != ps->net_ns_inode) { - close(fd); - local_sockets_log(ls, "pid %d is not in the wanted network namespace", ps->pid); - return false; - } - - if(ls->spawn_server == NULL) { - close(fd); - local_sockets_log(ls, "spawn server is not available"); - return false; - } - - struct local_sockets_config config = ls->config; - config.net_ns_inode = ps->net_ns_inode; - SPAWN_INSTANCE *si = spawn_server_exec(ls->spawn_server, STDERR_FILENO, fd, NULL, &config, sizeof(config), SPAWN_INSTANCE_TYPE_CALLBACK); - close(fd); fd = -1; - - if(si == NULL) { - local_sockets_log(ls, "cannot create spawn instance"); - return false; - } - - size_t received = 0; - struct local_socket buf; - while(read(spawn_server_instance_read_fd(si), &buf, sizeof(buf)) == sizeof(buf)) { - size_t len = 0; - if(read(spawn_server_instance_read_fd(si), &len, sizeof(len)) != sizeof(len)) - local_sockets_log(ls, "failed to read cmdline length from pipe"); - - if(len) { - char cmdline[len + 1]; - if(read(spawn_server_instance_read_fd(si), cmdline, len) != (ssize_t)len) - local_sockets_log(ls, "failed to read cmdline from pipe"); - else { - cmdline[len] = '\0'; - buf.cmdline = string_strdupz(cmdline); - } - } - else - buf.cmdline = NULL; - - received++; - - struct local_socket zero = { - .net_ns_inode = ps->net_ns_inode, - }; - if(memcmp(&buf, &zero, sizeof(buf)) == 0) { - // the terminator - break; - } - - spinlock_lock(&ls->spinlock); - - SIMPLE_HASHTABLE_SLOT_LOCAL_SOCKET *sl = simple_hashtable_get_slot_LOCAL_SOCKET(&ls->sockets_hashtable, buf.inode, &buf, true); - LOCAL_SOCKET *n = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if(n) { - string_freez(buf.cmdline); -// local_sockets_log(ls, -// "ns inode %" PRIu64" (comm: '%s', pid: %u, ns: %"PRIu64") already exists in hashtable (comm: '%s', pid: %u, ns: %"PRIu64") - ignoring duplicate", -// buf.inode, buf.comm, buf.pid, buf.net_ns_inode, n->comm, n->pid, n->net_ns_inode); - } - else { - n = aral_mallocz(ls->local_socket_aral); - memcpy(n, &buf, sizeof(*n)); - simple_hashtable_set_slot_LOCAL_SOCKET(&ls->sockets_hashtable, sl, n->inode, n); - - local_sockets_index_listening_port(ls, n); - } - - spinlock_unlock(&ls->spinlock); - } - - spawn_server_exec_kill(ls->spawn_server, si); - return received > 0; -} - -struct local_sockets_namespace_worker { - LS_STATE *ls; - uint64_t inode; -}; - -static inline void *local_sockets_get_namespace_sockets(void *arg) { - struct local_sockets_namespace_worker *data = arg; - LS_STATE *ls = data->ls; - const uint64_t inode = data->inode; - - spinlock_lock(&ls->spinlock); - - // find a pid_socket that has this namespace - for(SIMPLE_HASHTABLE_SLOT_PID_SOCKET *sl_pid = simple_hashtable_first_read_only_PID_SOCKET(&ls->pid_sockets_hashtable) ; - sl_pid ; - sl_pid = simple_hashtable_next_read_only_PID_SOCKET(&ls->pid_sockets_hashtable, sl_pid)) { - struct pid_socket *ps = SIMPLE_HASHTABLE_SLOT_DATA(sl_pid); - if(!ps || ps->net_ns_inode != inode) continue; - - // now we have a pid that has the same namespace inode - - spinlock_unlock(&ls->spinlock); - const bool worked = local_sockets_get_namespace_sockets_with_pid(ls, ps); - spinlock_lock(&ls->spinlock); - - if(worked) - break; - } - - spinlock_unlock(&ls->spinlock); - - return NULL; -} - -static inline void local_sockets_namespaces(LS_STATE *ls) { - size_t threads = ls->config.max_concurrent_namespaces; - if(threads == 0) threads = 5; - if(threads > 100) threads = 100; - - size_t last_thread = 0; - ND_THREAD *workers[threads]; - struct local_sockets_namespace_worker workers_data[threads]; - memset(workers, 0, sizeof(workers)); - memset(workers_data, 0, sizeof(workers_data)); - - spinlock_lock(&ls->spinlock); - - for(SIMPLE_HASHTABLE_SLOT_NET_NS *sl = simple_hashtable_first_read_only_NET_NS(&ls->ns_hashtable); - sl; - sl = simple_hashtable_next_read_only_NET_NS(&ls->ns_hashtable, sl)) { - const uint64_t inode = (uint64_t)SIMPLE_HASHTABLE_SLOT_DATA(sl); - - if(inode == ls->proc_self_net_ns_inode) - continue; - - spinlock_unlock(&ls->spinlock); - - ls->stats.namespaces_found++; - - if(workers[last_thread] != NULL) { - if(++last_thread >= threads) - last_thread = 0; - - if(workers[last_thread]) { - nd_thread_join(workers[last_thread]); - workers[last_thread] = NULL; - } - } - - workers_data[last_thread].ls = ls; - workers_data[last_thread].inode = inode; - workers[last_thread] = nd_thread_create( - "local-sockets-worker", NETDATA_THREAD_OPTION_JOINABLE, - local_sockets_get_namespace_sockets, &workers_data[last_thread]); - - spinlock_lock(&ls->spinlock); - } - - spinlock_unlock(&ls->spinlock); - - // wait all the threads running - for(size_t i = 0; i < threads ;i++) { - if(workers[i]) - nd_thread_join(workers[i]); - } -} - -// -------------------------------------------------------------------------------------------------------------------- - -static inline void local_sockets_process(LS_STATE *ls) { - // initialize our hashtables - local_sockets_init(ls); - - // read all sockets from /proc - local_sockets_read_all_system_sockets(ls); - - // check all socket namespaces - if(ls->config.namespaces) - local_sockets_namespaces(ls); - - // detect the directions of the sockets - if(ls->config.inbound || ls->config.outbound || ls->config.local) - local_sockets_detect_directions(ls); - - // call the callback for each socket - local_sockets_foreach_local_socket_call_cb(ls); - - // free all memory - local_sockets_cleanup(ls); -} - -static inline void ipv6_address_to_txt(struct in6_addr *in6_addr, char *dst) { - struct sockaddr_in6 sa = { 0 }; - - sa.sin6_family = AF_INET6; - sa.sin6_port = htons(0); - sa.sin6_addr = *in6_addr; - - // Convert to human-readable format - if (inet_ntop(AF_INET6, &(sa.sin6_addr), dst, INET6_ADDRSTRLEN) == NULL) - *dst = '\0'; -} - -static inline void ipv4_address_to_txt(uint32_t ip, char *dst) { - uint8_t octets[4]; - octets[0] = ip & 0xFF; - octets[1] = (ip >> 8) & 0xFF; - octets[2] = (ip >> 16) & 0xFF; - octets[3] = (ip >> 24) & 0xFF; - sprintf(dst, "%u.%u.%u.%u", octets[0], octets[1], octets[2], octets[3]); -} - -#endif //NETDATA_LOCAL_SOCKETS_H diff --git a/src/libnetdata/maps/system-groups.h b/src/libnetdata/maps/system-groups.h deleted file mode 100644 index fd042cd4e..000000000 --- a/src/libnetdata/maps/system-groups.h +++ /dev/null @@ -1,67 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_SYSTEM_GROUPS_H -#define NETDATA_SYSTEM_GROUPS_H - -#include "libnetdata/libnetdata.h" - -// -------------------------------------------------------------------------------------------------------------------- -// hashtable for caching uid to username mappings -// key is the uid, value is username (STRING) - -#define SIMPLE_HASHTABLE_VALUE_TYPE STRING -#define SIMPLE_HASHTABLE_NAME _GROUPNAMES_CACHE -#include "libnetdata/simple_hashtable.h" - -typedef struct groupnames_cache { - SPINLOCK spinlock; - SIMPLE_HASHTABLE_GROUPNAMES_CACHE ht; -} GROUPNAMES_CACHE; - -static inline STRING *system_groupnames_cache_lookup_gid(GROUPNAMES_CACHE *gc, gid_t gid) { - spinlock_lock(&gc->spinlock); - - SIMPLE_HASHTABLE_SLOT_GROUPNAMES_CACHE *sl = simple_hashtable_get_slot_GROUPNAMES_CACHE(&gc->ht, gid, &gid, true); - STRING *g = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if(!g) { - char tmp[1024 + 1]; - struct group grp, *result = NULL; - - if (getgrgid_r(gid, &grp, tmp, sizeof(tmp), &result) != 0 || !result || !grp.gr_name || !(*grp.gr_name)) { - char name[50]; - snprintfz(name, sizeof(name), "%u", gid); - g = string_strdupz(name); - } - else - g = string_strdupz(grp.gr_name); - - simple_hashtable_set_slot_GROUPNAMES_CACHE(&gc->ht, sl, gid, g); - } - - g = string_dup(g); - spinlock_unlock(&gc->spinlock); - return g; -} - -static inline GROUPNAMES_CACHE *system_groupnames_cache_init(void) { - GROUPNAMES_CACHE *gc = callocz(1, sizeof(*gc)); - spinlock_init(&gc->spinlock); - simple_hashtable_init_GROUPNAMES_CACHE(&gc->ht, 100); - return gc; -} - -static inline void system_groupnames_cache_destroy(GROUPNAMES_CACHE *gc) { - spinlock_lock(&gc->spinlock); - - for(SIMPLE_HASHTABLE_SLOT_GROUPNAMES_CACHE *sl = simple_hashtable_first_read_only_GROUPNAMES_CACHE(&gc->ht); - sl; - sl = simple_hashtable_next_read_only_GROUPNAMES_CACHE(&gc->ht, sl)) { - STRING *u = SIMPLE_HASHTABLE_SLOT_DATA(sl); - string_freez(u); - } - - simple_hashtable_destroy_GROUPNAMES_CACHE(&gc->ht); - freez(gc); -} - -#endif //NETDATA_SYSTEM_GROUPS_H diff --git a/src/libnetdata/maps/system-services.h b/src/libnetdata/maps/system-services.h deleted file mode 100644 index 123f4f10b..000000000 --- a/src/libnetdata/maps/system-services.h +++ /dev/null @@ -1,92 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_SYSTEM_SERVICES_H -#define NETDATA_SYSTEM_SERVICES_H - -#include "libnetdata/libnetdata.h" -#include - -// -------------------------------------------------------------------------------------------------------------------- -// hashtable for caching port and protocol to service name mappings -// key is the combination of protocol and port packed into a uint64_t, value is service name (STRING) - -#define SIMPLE_HASHTABLE_VALUE_TYPE STRING -#define SIMPLE_HASHTABLE_NAME _SERVICENAMES_CACHE -#include "libnetdata/simple_hashtable.h" - -typedef struct servicenames_cache { - SPINLOCK spinlock; - SIMPLE_HASHTABLE_SERVICENAMES_CACHE ht; -} SERVICENAMES_CACHE; - -static inline uint64_t system_servicenames_key(uint16_t port, uint16_t ipproto) { - return ((uint64_t)ipproto << 16) | (uint64_t)port; -} - -static inline const char *system_servicenames_ipproto2str(uint16_t ipproto) { - return (ipproto == IPPROTO_TCP) ? "tcp" : "udp"; -} - -static inline const char *static_portnames(uint16_t port, uint16_t ipproto) { - if(port == 19999 && ipproto == IPPROTO_TCP) - return "netdata"; - - if(port == 8125) - return "statsd"; - - return NULL; -} - -static inline STRING *system_servicenames_cache_lookup(SERVICENAMES_CACHE *sc, uint16_t port, uint16_t ipproto) { - uint64_t key = system_servicenames_key(port, ipproto); - spinlock_lock(&sc->spinlock); - - SIMPLE_HASHTABLE_SLOT_SERVICENAMES_CACHE *sl = simple_hashtable_get_slot_SERVICENAMES_CACHE(&sc->ht, key, &key, true); - STRING *s = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if (!s) { - const char *st = static_portnames(port, ipproto); - if(st) { - s = string_strdupz(st); - } - else { - struct servent *se = getservbyport(htons(port), system_servicenames_ipproto2str(ipproto)); - - if (!se || !se->s_name) { - char name[50]; - snprintfz(name, sizeof(name), "%u/%s", port, system_servicenames_ipproto2str(ipproto)); - s = string_strdupz(name); - } - else - s = string_strdupz(se->s_name); - } - - simple_hashtable_set_slot_SERVICENAMES_CACHE(&sc->ht, sl, key, s); - } - - s = string_dup(s); - spinlock_unlock(&sc->spinlock); - return s; -} - -static inline SERVICENAMES_CACHE *system_servicenames_cache_init(void) { - SERVICENAMES_CACHE *sc = callocz(1, sizeof(*sc)); - spinlock_init(&sc->spinlock); - simple_hashtable_init_SERVICENAMES_CACHE(&sc->ht, 100); - return sc; -} - -static inline void system_servicenames_cache_destroy(SERVICENAMES_CACHE *sc) { - spinlock_lock(&sc->spinlock); - - for (SIMPLE_HASHTABLE_SLOT_SERVICENAMES_CACHE *sl = simple_hashtable_first_read_only_SERVICENAMES_CACHE(&sc->ht); - sl; - sl = simple_hashtable_next_read_only_SERVICENAMES_CACHE(&sc->ht, sl)) { - STRING *s = SIMPLE_HASHTABLE_SLOT_DATA(sl); - string_freez(s); - } - - simple_hashtable_destroy_SERVICENAMES_CACHE(&sc->ht); - freez(sc); -} - -#endif //NETDATA_SYSTEM_SERVICES_H diff --git a/src/libnetdata/maps/system-users.h b/src/libnetdata/maps/system-users.h deleted file mode 100644 index 5f7dfae1a..000000000 --- a/src/libnetdata/maps/system-users.h +++ /dev/null @@ -1,67 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_SYSTEM_USERS_H -#define NETDATA_SYSTEM_USERS_H - -#include "libnetdata/libnetdata.h" - -// -------------------------------------------------------------------------------------------------------------------- -// hashtable for caching uid to username mappings -// key is the uid, value is username (STRING) - -#define SIMPLE_HASHTABLE_VALUE_TYPE STRING -#define SIMPLE_HASHTABLE_NAME _USERNAMES_CACHE -#include "libnetdata/simple_hashtable.h" - -typedef struct usernames_cache { - SPINLOCK spinlock; - SIMPLE_HASHTABLE_USERNAMES_CACHE ht; -} USERNAMES_CACHE; - -static inline STRING *system_usernames_cache_lookup_uid(USERNAMES_CACHE *uc, uid_t uid) { - spinlock_lock(&uc->spinlock); - - SIMPLE_HASHTABLE_SLOT_USERNAMES_CACHE *sl = simple_hashtable_get_slot_USERNAMES_CACHE(&uc->ht, uid, &uid, true); - STRING *u = SIMPLE_HASHTABLE_SLOT_DATA(sl); - if(!u) { - char tmp[1024 + 1]; - struct passwd pw, *result = NULL; - - if (getpwuid_r(uid, &pw, tmp, sizeof(tmp), &result) != 0 || !result || !pw.pw_name || !(*pw.pw_name)) { - char name[50]; - snprintfz(name, sizeof(name), "%u", uid); - u = string_strdupz(name); - } - else - u = string_strdupz(pw.pw_name); - - simple_hashtable_set_slot_USERNAMES_CACHE(&uc->ht, sl, uid, u); - } - - u = string_dup(u); - spinlock_unlock(&uc->spinlock); - return u; -} - -static inline USERNAMES_CACHE *system_usernames_cache_init(void) { - USERNAMES_CACHE *uc = callocz(1, sizeof(*uc)); - spinlock_init(&uc->spinlock); - simple_hashtable_init_USERNAMES_CACHE(&uc->ht, 100); - return uc; -} - -static inline void system_usernames_cache_destroy(USERNAMES_CACHE *uc) { - spinlock_lock(&uc->spinlock); - - for(SIMPLE_HASHTABLE_SLOT_USERNAMES_CACHE *sl = simple_hashtable_first_read_only_USERNAMES_CACHE(&uc->ht); - sl; - sl = simple_hashtable_next_read_only_USERNAMES_CACHE(&uc->ht, sl)) { - STRING *u = SIMPLE_HASHTABLE_SLOT_DATA(sl); - string_freez(u); - } - - simple_hashtable_destroy_USERNAMES_CACHE(&uc->ht); - freez(uc); -} - -#endif //NETDATA_SYSTEM_USERS_H diff --git a/src/libnetdata/onewayalloc/README.md b/src/libnetdata/onewayalloc/README.md index 082085db0..fbaad0aea 100644 --- a/src/libnetdata/onewayalloc/README.md +++ b/src/libnetdata/onewayalloc/README.md @@ -1,12 +1,3 @@ - - # One Way Allocator This is a very fast single-threaded-only memory allocator, that minimized system calls diff --git a/src/libnetdata/os/close_range.c b/src/libnetdata/os/close_range.c index 56d5c2527..2ee5837ee 100644 --- a/src/libnetdata/os/close_range.c +++ b/src/libnetdata/os/close_range.c @@ -7,6 +7,12 @@ static int fd_is_valid(int fd) { return fcntl(fd, F_GETFD) != -1 || errno != EBADF; } +static void setcloexec(int fd) { + int flags = fcntl(fd, F_GETFD); + if (flags != -1) + (void) fcntl(fd, F_SETFD, flags | FD_CLOEXEC); +} + int os_get_fd_open_max(void) { static int fd_open_max = CLOSE_RANGE_FD_MAX; @@ -33,9 +39,9 @@ int os_get_fd_open_max(void) { return fd_open_max; } -void os_close_range(int first, int last) { +void os_close_range(int first, int last, int flags) { #if defined(HAVE_CLOSE_RANGE) - if(close_range(first, last, 0) == 0) return; + if(close_range(first, last, flags) == 0) return; #endif #if defined(OS_LINUX) @@ -44,8 +50,12 @@ void os_close_range(int first, int last) { struct dirent *entry; while ((entry = readdir(dir)) != NULL) { int fd = str2i(entry->d_name); - if (fd >= first && (last == CLOSE_RANGE_FD_MAX || fd <= last) && fd_is_valid(fd)) - (void)close(fd); + if (fd >= first && (last == CLOSE_RANGE_FD_MAX || fd <= last) && fd_is_valid(fd)) { + if(flags & CLOSE_RANGE_CLOEXEC) + setcloexec(fd); + else + (void)close(fd); + } } closedir(dir); return; @@ -57,7 +67,12 @@ void os_close_range(int first, int last) { last = os_get_fd_open_max(); for (int fd = first; fd <= last; fd++) { - if (fd_is_valid(fd)) (void)close(fd); + if (fd_is_valid(fd)) { + if(flags & CLOSE_RANGE_CLOEXEC) + setcloexec(fd); + else + (void)close(fd); + } } } @@ -67,9 +82,9 @@ static int compare_ints(const void *a, const void *b) { return (int_a > int_b) - (int_a < int_b); } -void os_close_all_non_std_open_fds_except(const int fds[], size_t fds_num) { +void os_close_all_non_std_open_fds_except(const int fds[], size_t fds_num, int flags) { if (fds_num == 0 || fds == NULL) { - os_close_range(STDERR_FILENO + 1, CLOSE_RANGE_FD_MAX); + os_close_range(STDERR_FILENO + 1, CLOSE_RANGE_FD_MAX, flags); return; } @@ -89,10 +104,10 @@ void os_close_all_non_std_open_fds_except(const int fds[], size_t fds_num) { // call os_close_range() as many times as needed for (; i < fds_num; i++) { if (fds_copy[i] > start) - os_close_range(start, fds_copy[i] - 1); + os_close_range(start, fds_copy[i] - 1, flags); start = fds_copy[i] + 1; } - os_close_range(start, CLOSE_RANGE_FD_MAX); + os_close_range(start, CLOSE_RANGE_FD_MAX, flags); } diff --git a/src/libnetdata/os/close_range.h b/src/libnetdata/os/close_range.h index e3cb93798..7914ac3f6 100644 --- a/src/libnetdata/os/close_range.h +++ b/src/libnetdata/os/close_range.h @@ -5,8 +5,16 @@ #define CLOSE_RANGE_FD_MAX (int)(~0U) +#ifndef CLOSE_RANGE_UNSHARE +#define CLOSE_RANGE_UNSHARE (1U << 1) +#endif + +#ifndef CLOSE_RANGE_CLOEXEC +#define CLOSE_RANGE_CLOEXEC (1U << 2) +#endif + int os_get_fd_open_max(void); -void os_close_range(int first, int last); -void os_close_all_non_std_open_fds_except(const int fds[], size_t fds_num); +void os_close_range(int first, int last, int flags); +void os_close_all_non_std_open_fds_except(const int fds[], size_t fds_num, int flags); #endif //CLOSE_RANGE_H diff --git a/src/libnetdata/os/get_system_cpus.c b/src/libnetdata/os/get_system_cpus.c index 5a76d8aa5..f8234d8bc 100644 --- a/src/libnetdata/os/get_system_cpus.c +++ b/src/libnetdata/os/get_system_cpus.c @@ -2,10 +2,6 @@ #include "../libnetdata.h" -#if defined(OS_WINDOWS) -#include -#endif - #define CPUS_FOR_COLLECTORS 0 #define CPUS_FOR_NETDATA 1 @@ -82,7 +78,14 @@ long os_get_system_cpus_cached(bool cache, bool for_netdata) { SYSTEM_INFO sysInfo; GetSystemInfo(&sysInfo); - return (long) sysInfo.dwNumberOfProcessors; + processors[index] = sysInfo.dwNumberOfProcessors; + + if(processors[index] < 1) { + processors[index] = 1; + netdata_log_error("Assuming system has %ld processors.", processors[index]); + } + + return processors[index]; #else diff --git a/src/libnetdata/os/gettid.c b/src/libnetdata/os/gettid.c index 273c428f8..d61819445 100644 --- a/src/libnetdata/os/gettid.c +++ b/src/libnetdata/os/gettid.c @@ -2,10 +2,6 @@ #include "../libnetdata.h" -#if defined(OS_WINDOWS) -#include -#endif - pid_t os_gettid(void) { #if defined(HAVE_GETTID) return gettid(); @@ -30,4 +26,9 @@ pid_t gettid_cached(void) { gettid_cached_tid = os_gettid(); return gettid_cached_tid; -} \ No newline at end of file +} + +pid_t gettid_uncached(void) { + gettid_cached_tid = 0; + return gettid_cached(); +} diff --git a/src/libnetdata/os/gettid.h b/src/libnetdata/os/gettid.h index f04d9c365..6debfd928 100644 --- a/src/libnetdata/os/gettid.h +++ b/src/libnetdata/os/gettid.h @@ -7,5 +7,6 @@ pid_t os_gettid(void); pid_t gettid_cached(void); +pid_t gettid_uncached(void); #endif //NETDATA_GETTID_H diff --git a/src/libnetdata/os/os-windows-wrappers.c b/src/libnetdata/os/os-windows-wrappers.c index 64076eae2..a79ae41f2 100644 --- a/src/libnetdata/os/os-windows-wrappers.c +++ b/src/libnetdata/os/os-windows-wrappers.c @@ -3,8 +3,6 @@ #include "../libnetdata.h" #if defined(OS_WINDOWS) -#include - long netdata_registry_get_dword_from_open_key(unsigned int *out, void *lKey, char *name) { DWORD length = 260; @@ -58,4 +56,42 @@ bool netdata_registry_get_string(char *out, unsigned int length, void *hKey, cha return status; } +bool EnableWindowsPrivilege(const char *privilegeName) { + HANDLE hToken; + LUID luid; + TOKEN_PRIVILEGES tkp; + + // Open the process token with appropriate access rights + if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hToken)) + return false; + + // Lookup the LUID for the specified privilege + if (!LookupPrivilegeValue(NULL, privilegeName, &luid)) { + CloseHandle(hToken); // Close the token handle before returning + return false; + } + + // Set up the TOKEN_PRIVILEGES structure + tkp.PrivilegeCount = 1; + tkp.Privileges[0].Luid = luid; + tkp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; + + // Adjust the token's privileges + if (!AdjustTokenPrivileges(hToken, FALSE, &tkp, sizeof(tkp), NULL, NULL)) { + CloseHandle(hToken); // Close the token handle before returning + return false; + } + + // Check if AdjustTokenPrivileges succeeded + if (GetLastError() == ERROR_NOT_ALL_ASSIGNED) { + CloseHandle(hToken); // Close the token handle before returning + return false; + } + + // Close the handle to the token after success + CloseHandle(hToken); + + return true; +} + #endif diff --git a/src/libnetdata/os/os-windows-wrappers.h b/src/libnetdata/os/os-windows-wrappers.h index 5ae73043a..30e1fc50d 100644 --- a/src/libnetdata/os/os-windows-wrappers.h +++ b/src/libnetdata/os/os-windows-wrappers.h @@ -14,5 +14,7 @@ bool netdata_registry_get_dword(unsigned int *out, void *hKey, char *subKey, cha long netdata_registry_get_string_from_open_key(char *out, unsigned int length, void *lKey, char *name); bool netdata_registry_get_string(char *out, unsigned int length, void *hKey, char *subKey, char *name); +bool EnableWindowsPrivilege(const char *privilegeName); + #endif // OS_WINDOWS #endif //NETDATA_OS_WINDOWS_WRAPPERS_H diff --git a/src/libnetdata/os/os.c b/src/libnetdata/os/os.c index 1caa25f85..780801fa1 100644 --- a/src/libnetdata/os/os.c +++ b/src/libnetdata/os/os.c @@ -6,12 +6,13 @@ // system functions // to retrieve settings of the system -unsigned int system_hz; +unsigned int system_hz = 100; void os_get_system_HZ(void) { long ticks; if ((ticks = sysconf(_SC_CLK_TCK)) == -1) { netdata_log_error("Cannot get system clock ticks"); + ticks = 100; } system_hz = (unsigned int) ticks; diff --git a/src/libnetdata/os/os.h b/src/libnetdata/os/os.h index 15e74faa7..1846afb6d 100644 --- a/src/libnetdata/os/os.h +++ b/src/libnetdata/os/os.h @@ -7,6 +7,8 @@ #include #endif +#include "random.h" +#include "timestamps.h" #include "setproctitle.h" #include "close_range.h" #include "setresuid.h" @@ -16,12 +18,21 @@ #include "gettid.h" #include "get_pid_max.h" #include "get_system_cpus.h" -#include "tinysleep.h" +#include "sleep.h" #include "uuid_generate.h" #include "setenv.h" #include "os-freebsd-wrappers.h" #include "os-macos-wrappers.h" #include "os-windows-wrappers.h" +#include "system-maps/cached-uid-username.h" +#include "system-maps/cached-gid-groupname.h" +#include "system-maps/cache-host-users-and-groups.h" +#include "system-maps/cached-sid-username.h" +#include "windows-perflib/perflib.h" + +// this includes windows.h to the whole of netdata +// so various conflicts arise +// #include "windows-wmi/windows-wmi.h" // ===================================================================================================================== // common defs for Apple/FreeBSD/Linux diff --git a/src/libnetdata/os/random.c b/src/libnetdata/os/random.c new file mode 100644 index 000000000..125e1cdb5 --- /dev/null +++ b/src/libnetdata/os/random.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "libnetdata/libnetdata.h" + +#if !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_RAND_S) +static SPINLOCK random_lock = NETDATA_SPINLOCK_INITIALIZER; +static __attribute__((constructor)) void random_seed() { + // Use current time and process ID to create a high-entropy seed + struct timeval tv; + gettimeofday(&tv, NULL); + + uint32_t seed = (uint32_t)(tv.tv_sec ^ tv.tv_usec ^ getpid()); + + // Seed the random number generator + srandom(seed); +} + +static inline void random_bytes(void *buf, size_t bytes) { + spinlock_lock(&random_lock); + while (bytes > 0) { + if (bytes >= sizeof(uint32_t)) { + // Generate 4 bytes at a time + uint32_t temp = random(); + memcpy(buf, &temp, sizeof(uint32_t)); + buf = (uint8_t *)buf + sizeof(uint32_t); + bytes -= sizeof(uint32_t); + } else if (bytes >= sizeof(uint16_t)) { + // Generate 2 bytes at a time + uint16_t temp = random(); + memcpy(buf, &temp, sizeof(uint16_t)); + buf = (uint8_t *)buf + sizeof(uint16_t); + bytes -= sizeof(uint16_t); + } else { + // Generate remaining bytes + uint32_t temp = random(); + for (size_t i = 0; i < bytes; i++) { + ((uint8_t *)buf)[i] = temp & 0xFF; + temp >>= 8; + } + bytes = 0; + } + } + spinlock_unlock(&random_lock); +} + +#if defined(HAVE_GETRANDOM) +#include +static inline void getrandom_bytes(void *buf, size_t bytes) { + ssize_t result; + while (bytes > 0) { + result = getrandom(buf, bytes, 0); + if (result == -1) { + if (errno == EINTR) { + // Interrupted, retry + continue; + } else if (errno == EAGAIN) { + // Insufficient entropy; wait and retry + tinysleep(); + continue; + } else { + // fallback to RAND_bytes + random_bytes(buf, bytes); + return; + } + } + buf = (uint8_t *)buf + result; + bytes -= result; + } +} +#endif // HAVE_GETRANDOM +#endif // !HAVE_ARC4RANDOM_BUF && !HAVE_RAND_S + +#if defined(HAVE_RAND_S) +static inline void rand_s_bytes(void *buf, size_t bytes) { + while (bytes > 0) { + if (bytes >= sizeof(unsigned int)) { + unsigned int temp; + rand_s(&temp); + memcpy(buf, &temp, sizeof(unsigned int)); + buf = (uint8_t *)buf + sizeof(unsigned int); + bytes -= sizeof(unsigned int); + } else if (bytes >= sizeof(uint16_t)) { + // Generate 2 bytes at a time + unsigned int t; + rand_s(&t); + uint16_t temp = t; + memcpy(buf, &temp, sizeof(uint16_t)); + buf = (uint8_t *)buf + sizeof(uint16_t); + bytes -= sizeof(uint16_t); + } else { + // Generate remaining bytes + unsigned int temp; + rand_s(&temp); + for (size_t i = 0; i < sizeof(temp) && i < bytes; i++) { + ((uint8_t *)buf)[0] = temp & 0xFF; + temp >>= 8; + buf = (uint8_t *)buf + 1; + bytes--; + } + } + } +} +#endif + +inline void os_random_bytes(void *buf, size_t bytes) { +#if defined(HAVE_ARC4RANDOM_BUF) + arc4random_buf(buf, bytes); +#else + + if(RAND_bytes((unsigned char *)buf, bytes) == 1) + return; + +#if defined(HAVE_GETRANDOM) + getrandom_bytes(buf, bytes); +#elif defined(HAVE_RAND_S) + rand_s_bytes(buf, bytes); +#else + random_bytes(buf, bytes); +#endif +#endif +} + +// Generate an 8-bit random number +uint8_t os_random8(void) { + uint8_t value; + os_random_bytes(&value, sizeof(value)); + return value; +} + +// Generate a 16-bit random number +uint16_t os_random16(void) { + uint16_t value; + os_random_bytes(&value, sizeof(value)); + return value; +} + +// Generate a 32-bit random number +uint32_t os_random32(void) { + uint32_t value; + os_random_bytes(&value, sizeof(value)); + return value; +} + +// Generate a 64-bit random number +uint64_t os_random64(void) { + uint64_t value; + os_random_bytes(&value, sizeof(value)); + return value; +} + +/* + * Rejection Sampling + * To reduce bias, we can use rejection sampling without creating an infinite loop. + * This technique works by discarding values that would introduce bias, but limiting + * the number of retries to avoid infinite loops. +*/ + +// Calculate an upper limit so that the range evenly divides into max. +// Any values greater than this limit would introduce bias, so we discard them. +#define MAX_RETRIES 10 +#define os_random_rejection_sampling_X(type, type_max, func, max) \ + ({ \ + size_t retries = 0; \ + type value, upper_limit = type_max - (type_max % (max)); \ + while ((value = func()) >= upper_limit && retries++ < MAX_RETRIES); \ + value % (max); \ + }) + +uint64_t os_random(uint64_t max) { + if (max <= 1) return 0; + +#if defined(HAVE_ARC4RANDOM_UNIFORM) + if(max <= UINT32_MAX) + // this is not biased + return arc4random_uniform(max); +#endif + + if ((max & (max - 1)) == 0) { + // max is a power of 2 + // use bitmasking to directly generate an unbiased random number + + if (max <= UINT8_MAX) + return os_random8() & (max - 1); + else if (max <= UINT16_MAX) + return os_random16() & (max - 1); + else if (max <= UINT32_MAX) + return os_random32() & (max - 1); + else + return os_random64() & (max - 1); + } + + if (max <= UINT8_MAX) + return os_random_rejection_sampling_X(uint8_t, UINT8_MAX, os_random8, max); + else if (max <= UINT16_MAX) + return os_random_rejection_sampling_X(uint16_t, UINT16_MAX, os_random16, max); + else if (max <= UINT32_MAX) + return os_random_rejection_sampling_X(uint32_t, UINT32_MAX, os_random32, max); + else + return os_random_rejection_sampling_X(uint64_t, UINT64_MAX, os_random64, max); +} diff --git a/src/libnetdata/os/random.h b/src/libnetdata/os/random.h new file mode 100644 index 000000000..d09cee5ea --- /dev/null +++ b/src/libnetdata/os/random.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RANDOM_H +#define NETDATA_RANDOM_H + +#include "libnetdata/common.h" + +// fill a buffer with random bytes +void os_random_bytes(void *buf, size_t bytes); + +// return a random number 0 to max - 1 +uint64_t os_random(uint64_t max); + +uint8_t os_random8(void); +uint16_t os_random16(void); +uint32_t os_random32(void); +uint64_t os_random64(void); + +#endif //NETDATA_RANDOM_H diff --git a/src/libnetdata/os/setenv.c b/src/libnetdata/os/setenv.c index 5aa4302b8..c0de1b4b6 100644 --- a/src/libnetdata/os/setenv.c +++ b/src/libnetdata/os/setenv.c @@ -1,13 +1,8 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "config.h" +#include "libnetdata/libnetdata.h" #ifndef HAVE_SETENV - -#include -#include -#include - int os_setenv(const char *name, const char *value, int overwrite) { char *env_var; int result; @@ -28,3 +23,21 @@ int os_setenv(const char *name, const char *value, int overwrite) { } #endif + +void nd_setenv(const char *name, const char *value, int overwrite) { +#if defined(OS_WINDOWS) + if(overwrite) + SetEnvironmentVariable(name, value); + else { + char buf[1024]; + if(GetEnvironmentVariable(name, buf, sizeof(buf)) == 0) + SetEnvironmentVariable(name, value); + } +#endif + +#ifdef HAVE_SETENV + setenv(name, value, overwrite); +#else + os_setenv(name, value, overwite); +#endif +} diff --git a/src/libnetdata/os/setenv.h b/src/libnetdata/os/setenv.h index 3ed63714c..78e7224de 100644 --- a/src/libnetdata/os/setenv.h +++ b/src/libnetdata/os/setenv.h @@ -10,4 +10,6 @@ int os_setenv(const char *name, const char *value, int overwrite); #define setenv(name, value, overwrite) os_setenv(name, value, overwrite) #endif +void nd_setenv(const char *name, const char *value, int overwrite); + #endif //NETDATA_SETENV_H diff --git a/src/libnetdata/os/sleep.c b/src/libnetdata/os/sleep.c new file mode 100644 index 000000000..131b47c44 --- /dev/null +++ b/src/libnetdata/os/sleep.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +#ifdef OS_WINDOWS +void tinysleep(void) { + Sleep(1); +} +#else +void tinysleep(void) { + static const struct timespec ns = { .tv_sec = 0, .tv_nsec = 1 }; + nanosleep(&ns, NULL); +} +#endif + +#ifdef OS_WINDOWS +void microsleep(usec_t ut) { + size_t ms = ut / USEC_PER_MS + ((ut == 0 || (ut % USEC_PER_MS)) ? 1 : 0); + Sleep(ms); +} +#else +void microsleep(usec_t ut) { + time_t secs = (time_t)(ut / USEC_PER_SEC); + nsec_t nsec = (ut % USEC_PER_SEC) * NSEC_PER_USEC + ((ut == 0) ? 1 : 0); + + struct timespec remaining = { + .tv_sec = secs, + .tv_nsec = nsec, + }; + + errno_clear(); + while (nanosleep(&remaining, &remaining) == -1 && errno == EINTR && (remaining.tv_sec || remaining.tv_nsec)) { + // Loop continues if interrupted by a signal + } +} +#endif diff --git a/src/libnetdata/os/sleep.h b/src/libnetdata/os/sleep.h new file mode 100644 index 000000000..358238762 --- /dev/null +++ b/src/libnetdata/os/sleep.h @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SLEEP_H +#define NETDATA_SLEEP_H + +void tinysleep(void); +void microsleep(usec_t ut); + +#endif //NETDATA_SLEEP_H diff --git a/src/libnetdata/os/system-maps/cache-host-users-and-groups.c b/src/libnetdata/os/system-maps/cache-host-users-and-groups.c new file mode 100644 index 000000000..53825fd35 --- /dev/null +++ b/src/libnetdata/os/system-maps/cache-host-users-and-groups.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "libnetdata/libnetdata.h" + +static bool file_changed(const struct stat *statbuf __maybe_unused, struct timespec *last_modification_time __maybe_unused) { +#if defined(OS_MACOS) || defined(OS_WINDOWS) + return false; +#else + if(likely(statbuf->st_mtim.tv_sec == last_modification_time->tv_sec && + statbuf->st_mtim.tv_nsec == last_modification_time->tv_nsec)) return false; + + last_modification_time->tv_sec = statbuf->st_mtim.tv_sec; + last_modification_time->tv_nsec = statbuf->st_mtim.tv_nsec; + + return true; +#endif +} + +static size_t read_passwd_or_group(const char *filename, struct timespec *last_modification_time, void (*cb)(uint32_t gid, const char *name, uint32_t version), uint32_t version) { + struct stat statbuf; + if(unlikely(stat(filename, &statbuf) || !file_changed(&statbuf, last_modification_time))) + return 0; + + procfile *ff = procfile_open(filename, " :\t", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 0; + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; + + size_t line, lines = procfile_lines(ff); + + size_t added = 0; + for(line = 0; line < lines ;line++) { + size_t words = procfile_linewords(ff, line); + if(unlikely(words < 3)) continue; + + char *name = procfile_lineword(ff, line, 0); + if(unlikely(!name || !*name)) continue; + + char *id_string = procfile_lineword(ff, line, 2); + if(unlikely(!id_string || !*id_string)) continue; + + uint32_t id = str2ull(id_string, NULL); + + cb(id, name, version); + added++; + } + + procfile_close(ff); + return added; +} + +void update_cached_host_users(void) { + if(!netdata_configured_host_prefix || !*netdata_configured_host_prefix) return; + + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + if(!spinlock_trylock(&spinlock)) return; + + char filename[FILENAME_MAX]; + static bool initialized = false; + + size_t added = 0; + + if(!initialized) { + initialized = true; + cached_usernames_init(); + } + + static uint32_t passwd_version = 0; + static struct timespec passwd_ts = { 0 }; + snprintfz(filename, FILENAME_MAX, "%s/etc/passwd", netdata_configured_host_prefix); + added = read_passwd_or_group(filename, &passwd_ts, cached_username_populate_by_uid, ++passwd_version); + if(added) cached_usernames_delete_old_versions(passwd_version); + + spinlock_unlock(&spinlock); +} + +void update_cached_host_groups(void) { + if(!netdata_configured_host_prefix || !*netdata_configured_host_prefix) return; + + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + if(!spinlock_trylock(&spinlock)) return; + + char filename[FILENAME_MAX]; + static bool initialized = false; + + size_t added = 0; + + if(!initialized) { + initialized = true; + cached_groupnames_init(); + } + + static uint32_t group_version = 0; + static struct timespec group_ts = { 0 }; + snprintfz(filename, FILENAME_MAX, "%s/etc/group", netdata_configured_host_prefix); + added = read_passwd_or_group(filename, &group_ts, cached_groupname_populate_by_gid, ++group_version); + if(added) cached_groupnames_delete_old_versions(group_version); + + spinlock_unlock(&spinlock); +} diff --git a/src/libnetdata/os/system-maps/cache-host-users-and-groups.h b/src/libnetdata/os/system-maps/cache-host-users-and-groups.h new file mode 100644 index 000000000..7a84bcadf --- /dev/null +++ b/src/libnetdata/os/system-maps/cache-host-users-and-groups.h @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CACHE_HOST_USERS_AND_GROUPS_H +#define NETDATA_CACHE_HOST_USERS_AND_GROUPS_H + +void update_cached_host_users(void); +void update_cached_host_groups(void); + +#endif //NETDATA_CACHE_HOST_USERS_AND_GROUPS_H diff --git a/src/libnetdata/os/system-maps/cached-gid-groupname.c b/src/libnetdata/os/system-maps/cached-gid-groupname.c new file mode 100644 index 000000000..3fabe94a2 --- /dev/null +++ b/src/libnetdata/os/system-maps/cached-gid-groupname.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "cached-gid-groupname.h" + +// -------------------------------------------------------------------------------------------------------------------- +// hashtable for caching gid to groupname mappings +// key is the gid, value is groupname (STRING) + +#define SIMPLE_HASHTABLE_KEY_TYPE gid_t +#define SIMPLE_HASHTABLE_VALUE_TYPE CACHED_GROUPNAME +#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION cached_groupname_to_gid_ptr +#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION compar_gid_ptr +#define SIMPLE_HASHTABLE_NAME _GROUPNAMES_CACHE +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +static struct { + bool initialized; + SPINLOCK spinlock; + SIMPLE_HASHTABLE_GROUPNAMES_CACHE ht; +} group_cache = { + .initialized = false, + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .ht = { 0 }, +}; + +static gid_t *cached_groupname_to_gid_ptr(CACHED_GROUPNAME *cu) { + return &cu->gid; +} + +static bool compar_gid_ptr(gid_t *a, gid_t *b) { + return *a == *b; +} + +void cached_groupname_populate_by_gid(gid_t gid, const char *groupname, uint32_t version) { + internal_fatal(!group_cache.initialized, "system-users cache needs to be initialized"); + if(!groupname || !*groupname) return; + + spinlock_lock(&group_cache.spinlock); + + XXH64_hash_t hash = XXH3_64bits(&gid, sizeof(gid)); + SIMPLE_HASHTABLE_SLOT_GROUPNAMES_CACHE *sl = simple_hashtable_get_slot_GROUPNAMES_CACHE(&group_cache.ht, hash, &gid, true); + CACHED_GROUPNAME *cg = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(!cg || (cg->version && version > cg->version)) { + internal_fatal(cg && cg->gid != gid, "invalid gid matched from cache"); + + if(cg) + string_freez(cg->groupname); + else + cg = callocz(1, sizeof(*cg)); + + cg->version = version; + cg->gid = gid; + cg->groupname = string_strdupz(groupname); + simple_hashtable_set_slot_GROUPNAMES_CACHE(&group_cache.ht, sl, hash, cg); + } + + spinlock_unlock(&group_cache.spinlock); +} + +CACHED_GROUPNAME cached_groupname_get_by_gid(gid_t gid) { + internal_fatal(!group_cache.initialized, "system-users cache needs to be initialized"); + + spinlock_lock(&group_cache.spinlock); + + XXH64_hash_t hash = XXH3_64bits(&gid, sizeof(gid)); + SIMPLE_HASHTABLE_SLOT_GROUPNAMES_CACHE *sl = simple_hashtable_get_slot_GROUPNAMES_CACHE(&group_cache.ht, hash, &gid, true); + CACHED_GROUPNAME *cg = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(!cg) { + cg = callocz(1, sizeof(*cg)); + + static char tmp[1024]; // we are inside a global spinlock - it is ok to be static + struct group gr, *result = NULL; + + if (getgrgid_r(gid, &gr, tmp, sizeof(tmp), &result) != 0 || !result || !gr.gr_name || !(*gr.gr_name)) { + char name[UINT64_MAX_LENGTH]; + print_uint64(name, gid); + cg->groupname = string_strdupz(name); + } + else + cg->groupname = string_strdupz(gr.gr_name); + + cg->gid = gid; + simple_hashtable_set_slot_GROUPNAMES_CACHE(&group_cache.ht, sl, hash, cg); + } + + internal_fatal(cg->gid != gid, "invalid gid matched from cache"); + + CACHED_GROUPNAME rc = { + .version = cg->version, + .gid = cg->gid, + .groupname = string_dup(cg->groupname), + }; + + spinlock_unlock(&group_cache.spinlock); + return rc; +} + +void cached_groupname_release(CACHED_GROUPNAME cg) { + string_freez(cg.groupname); +} + +void cached_groupnames_init(void) { + if(group_cache.initialized) return; + group_cache.initialized = true; + + spinlock_init(&group_cache.spinlock); + simple_hashtable_init_GROUPNAMES_CACHE(&group_cache.ht, 100); +} + +void cached_groupnames_destroy(void) { + if(!group_cache.initialized) return; + + spinlock_lock(&group_cache.spinlock); + + for(SIMPLE_HASHTABLE_SLOT_GROUPNAMES_CACHE *sl = simple_hashtable_first_read_only_GROUPNAMES_CACHE(&group_cache.ht); + sl; + sl = simple_hashtable_next_read_only_GROUPNAMES_CACHE(&group_cache.ht, sl)) { + CACHED_GROUPNAME *u = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(u) { + string_freez(u->groupname); + freez(u); + // simple_hashtable_del_slot_GROUPNAMES_CACHE(&uc.ht, sl); + } + } + + simple_hashtable_destroy_GROUPNAMES_CACHE(&group_cache.ht); + group_cache.initialized = false; + + spinlock_unlock(&group_cache.spinlock); +} + +void cached_groupnames_delete_old_versions(uint32_t version) { + if(!group_cache.initialized) return; + + spinlock_lock(&group_cache.spinlock); + + for(SIMPLE_HASHTABLE_SLOT_GROUPNAMES_CACHE *sl = simple_hashtable_first_read_only_GROUPNAMES_CACHE(&group_cache.ht); + sl; + sl = simple_hashtable_next_read_only_GROUPNAMES_CACHE(&group_cache.ht, sl)) { + CACHED_GROUPNAME *cg = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(cg && cg->version && cg->version < version) { + string_freez(cg->groupname); + freez(cg); + simple_hashtable_del_slot_GROUPNAMES_CACHE(&group_cache.ht, sl); + } + } + + spinlock_unlock(&group_cache.spinlock); +} diff --git a/src/libnetdata/os/system-maps/cached-gid-groupname.h b/src/libnetdata/os/system-maps/cached-gid-groupname.h new file mode 100644 index 000000000..81a62523e --- /dev/null +++ b/src/libnetdata/os/system-maps/cached-gid-groupname.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CACHED_UID_GROUPNAME_H +#define NETDATA_CACHED_UID_GROUPNAME_H + +#include "libnetdata/libnetdata.h" + +struct netdata_string; + +typedef struct { + uint32_t version; + gid_t gid; + struct netdata_string *groupname; +} CACHED_GROUPNAME; + +void cached_groupname_populate_by_gid(gid_t gid, const char *groupname, uint32_t version); +CACHED_GROUPNAME cached_groupname_get_by_gid(gid_t gid); +void cached_groupname_release(CACHED_GROUPNAME cg); +void cached_groupnames_delete_old_versions(uint32_t version); + +void cached_groupnames_init(void); +void cached_groupnames_destroy(void); + +#endif //NETDATA_CACHED_UID_GROUPNAME_H diff --git a/src/libnetdata/os/system-maps/cached-sid-username.c b/src/libnetdata/os/system-maps/cached-sid-username.c new file mode 100644 index 000000000..a0f90c546 --- /dev/null +++ b/src/libnetdata/os/system-maps/cached-sid-username.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../../libnetdata.h" + +#if defined(OS_WINDOWS) +#include "cached-sid-username.h" + +typedef struct { + size_t len; + uint8_t sid[]; +} SID_KEY; + +typedef struct { + // IMPORTANT: + // This is malloc'd ! You have to manually set fields to zero. + + STRING *account; + STRING *domain; + STRING *full; + STRING *sid_str; + + // this needs to be last, because of its variable size + SID_KEY key; +} SID_VALUE; + +#define SIMPLE_HASHTABLE_NAME _SID +#define SIMPLE_HASHTABLE_VALUE_TYPE SID_VALUE +#define SIMPLE_HASHTABLE_KEY_TYPE SID_KEY +#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION sid_value_to_key +#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION sid_cache_compar +#define SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION 1 +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +static struct { + SPINLOCK spinlock; + struct simple_hashtable_SID hashtable; +} sid_globals = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .hashtable = { 0 }, +}; + +static inline SID_KEY *sid_value_to_key(SID_VALUE *s) { + return &s->key; +} + +static inline bool sid_cache_compar(SID_KEY *a, SID_KEY *b) { + return a->len == b->len && memcmp(&a->sid, &b->sid, a->len) == 0; +} + +void cached_sid_username_init(void) { + simple_hashtable_init_SID(&sid_globals.hashtable, 100); +} + +static char *account2utf8(const wchar_t *user) { + static __thread char buffer[256]; + if(utf16_to_utf8(buffer, sizeof(buffer), user, -1, NULL) == 0) + buffer[0] = '\0'; + return buffer; +} + +static char *domain2utf8(const wchar_t *domain) { + static __thread char buffer[256]; + if(utf16_to_utf8(buffer, sizeof(buffer), domain, -1, NULL) == 0) + buffer[0] = '\0'; + return buffer; +} + +static void lookup_user_in_system(SID_VALUE *sv) { + static __thread wchar_t account_unicode[256]; + static __thread wchar_t domain_unicode[256]; + static __thread char tmp[512 + 2]; + + DWORD account_name_size = sizeof(account_unicode) / sizeof(account_unicode[0]); + DWORD domain_name_size = sizeof(domain_unicode) / sizeof(domain_unicode[0]); + SID_NAME_USE sid_type; + + if (LookupAccountSidW(NULL, sv->key.sid, account_unicode, &account_name_size, domain_unicode, &domain_name_size, &sid_type)) { + const char *account = account2utf8(account_unicode); + const char *domain = domain2utf8(domain_unicode); + snprintfz(tmp, sizeof(tmp), "%s\\%s", domain, account); + sv->domain = string_strdupz(domain); + sv->account = string_strdupz(account); + sv->full = string_strdupz(tmp); + } + else { + sv->domain = NULL; + sv->account = NULL; + sv->full = NULL; + } + + wchar_t *sid_string = NULL; + if (ConvertSidToStringSidW(sv->key.sid, &sid_string)) + sv->sid_str = string_strdupz(account2utf8(sid_string)); + else + sv->sid_str = NULL; +} + +static SID_VALUE *lookup_or_convert_user_id_to_name_lookup(PSID sid) { + if(!sid || !IsValidSid(sid)) + return NULL; + + size_t size = GetLengthSid(sid); + + size_t tmp_size = sizeof(SID_VALUE) + size; + size_t tmp_key_size = sizeof(SID_KEY) + size; + uint8_t buf[tmp_size]; + SID_VALUE *tmp = (SID_VALUE *)&buf; + memcpy(&tmp->key.sid, sid, size); + tmp->key.len = size; + + spinlock_lock(&sid_globals.spinlock); + SID_VALUE *found = simple_hashtable_get_SID(&sid_globals.hashtable, &tmp->key, tmp_key_size); + spinlock_unlock(&sid_globals.spinlock); + if(found) return found; + + // allocate the SID_VALUE + found = mallocz(tmp_size); + memcpy(found, buf, tmp_size); + + lookup_user_in_system(found); + + // add it to the cache + spinlock_lock(&sid_globals.spinlock); + simple_hashtable_set_SID(&sid_globals.hashtable, &found->key, tmp_key_size, found); + spinlock_unlock(&sid_globals.spinlock); + + return found; +} + +bool cached_sid_to_account_domain_sidstr(PSID sid, TXT_UTF8 *dst_account, TXT_UTF8 *dst_domain, TXT_UTF8 *dst_sid_str) { + SID_VALUE *found = lookup_or_convert_user_id_to_name_lookup(sid); + + if(found) { + if (found->account) { + txt_utf8_resize(dst_account, string_strlen(found->account) + 1, false); + memcpy(dst_account->data, string2str(found->account), string_strlen(found->account) + 1); + dst_account->used = string_strlen(found->account) + 1; + } + else + txt_utf8_empty(dst_account); + + if (found->domain) { + txt_utf8_resize(dst_domain, string_strlen(found->domain) + 1, false); + memcpy(dst_domain->data, string2str(found->domain), string_strlen(found->domain) + 1); + dst_domain->used = string_strlen(found->domain) + 1; + } + else + txt_utf8_empty(dst_domain); + + if (found->sid_str) { + txt_utf8_resize(dst_sid_str, string_strlen(found->sid_str) + 1, false); + memcpy(dst_sid_str->data, string2str(found->sid_str), string_strlen(found->sid_str) + 1); + dst_sid_str->used = string_strlen(found->sid_str) + 1; + } + else + txt_utf8_empty(dst_sid_str); + + return true; + } + + txt_utf8_empty(dst_account); + txt_utf8_empty(dst_domain); + txt_utf8_empty(dst_sid_str); + return false; +} + +bool cached_sid_to_buffer_append(PSID sid, BUFFER *dst, const char *prefix) { + SID_VALUE *found = lookup_or_convert_user_id_to_name_lookup(sid); + size_t added = 0; + + if(found) { + if (found->full) { + if (prefix && *prefix) + buffer_strcat(dst, prefix); + + buffer_fast_strcat(dst, string2str(found->full), string_strlen(found->full)); + added++; + } + if (found->sid_str) { + if (prefix && *prefix) + buffer_strcat(dst, prefix); + + buffer_fast_strcat(dst, string2str(found->sid_str), string_strlen(found->sid_str)); + added++; + } + } + + return added > 0; +} + +STRING *cached_sid_fullname_or_sid_str(PSID sid) { + SID_VALUE *found = lookup_or_convert_user_id_to_name_lookup(sid); + if(found) { + if(found->full) return string_dup(found->full); + return string_dup(found->sid_str); + } + return NULL; +} + +#endif \ No newline at end of file diff --git a/src/libnetdata/os/system-maps/cached-sid-username.h b/src/libnetdata/os/system-maps/cached-sid-username.h new file mode 100644 index 000000000..4077cad11 --- /dev/null +++ b/src/libnetdata/os/system-maps/cached-sid-username.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CACHED_SID_USERNAME_H +#define NETDATA_CACHED_SID_USERNAME_H + +#include "../../libnetdata.h" + +#if defined(OS_WINDOWS) +#include "../../string/utf8.h" + +bool cached_sid_to_account_domain_sidstr(void *sid, TXT_UTF8 *dst_account, TXT_UTF8 *dst_domain, TXT_UTF8 *dst_sid_str); +bool cached_sid_to_buffer_append(void *sid, BUFFER *dst, const char *prefix); +void cached_sid_username_init(void); +STRING *cached_sid_fullname_or_sid_str(void *sid); +#endif + +#endif //NETDATA_CACHED_SID_USERNAME_H diff --git a/src/libnetdata/os/system-maps/cached-uid-username.c b/src/libnetdata/os/system-maps/cached-uid-username.c new file mode 100644 index 000000000..35d93f2f0 --- /dev/null +++ b/src/libnetdata/os/system-maps/cached-uid-username.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "cached-uid-username.h" + +// -------------------------------------------------------------------------------------------------------------------- +// hashtable for caching uid to username mappings +// key is the uid, value is username (STRING) + +#define SIMPLE_HASHTABLE_KEY_TYPE uid_t +#define SIMPLE_HASHTABLE_VALUE_TYPE CACHED_USERNAME +#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION cached_username_to_uid_ptr +#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION compar_uid_ptr +#define SIMPLE_HASHTABLE_NAME _USERNAMES_CACHE +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +static struct { + bool initialized; + SPINLOCK spinlock; + SIMPLE_HASHTABLE_USERNAMES_CACHE ht; +} user_cache = { + .initialized = false, + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .ht = { 0 }, +}; + +static uid_t *cached_username_to_uid_ptr(CACHED_USERNAME *cu) { + return &cu->uid; +} + +static bool compar_uid_ptr(uid_t *a, uid_t *b) { + return *a == *b; +} + +void cached_username_populate_by_uid(uid_t uid, const char *username, uint32_t version) { + internal_fatal(!user_cache.initialized, "system-users cache needs to be initialized"); + if(!username || !*username) return; + + spinlock_lock(&user_cache.spinlock); + + XXH64_hash_t hash = XXH3_64bits(&uid, sizeof(uid)); + SIMPLE_HASHTABLE_SLOT_USERNAMES_CACHE *sl = simple_hashtable_get_slot_USERNAMES_CACHE(&user_cache.ht, hash, &uid, true); + CACHED_USERNAME *cu = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(!cu || (cu->version && version > cu->version)) { + internal_fatal(cu && cu->uid != uid, "invalid uid matched from cache"); + + if(cu) + string_freez(cu->username); + else + cu = callocz(1, sizeof(*cu)); + + cu->version = version; + cu->uid = uid; + cu->username = string_strdupz(username); + simple_hashtable_set_slot_USERNAMES_CACHE(&user_cache.ht, sl, hash, cu); + } + + spinlock_unlock(&user_cache.spinlock); +} + +CACHED_USERNAME cached_username_get_by_uid(uid_t uid) { + internal_fatal(!user_cache.initialized, "system-users cache needs to be initialized"); + + spinlock_lock(&user_cache.spinlock); + + XXH64_hash_t hash = XXH3_64bits(&uid, sizeof(uid)); + SIMPLE_HASHTABLE_SLOT_USERNAMES_CACHE *sl = simple_hashtable_get_slot_USERNAMES_CACHE(&user_cache.ht, hash, &uid, true); + CACHED_USERNAME *cu = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(!cu) { + cu = callocz(1, sizeof(*cu)); + + static char tmp[1024]; // we are inside a global spinlock - it is ok to be static + struct passwd pw, *result = NULL; + + if (getpwuid_r(uid, &pw, tmp, sizeof(tmp), &result) != 0 || !result || !pw.pw_name || !(*pw.pw_name)) { + char name[UINT64_MAX_LENGTH]; + print_uint64(name, uid); + cu->username = string_strdupz(name); + } + else + cu->username = string_strdupz(pw.pw_name); + + cu->uid = uid; + simple_hashtable_set_slot_USERNAMES_CACHE(&user_cache.ht, sl, hash, cu); + } + + internal_fatal(cu->uid != uid, "invalid uid matched from cache"); + + CACHED_USERNAME rc = { + .version = cu->version, + .uid = cu->uid, + .username = string_dup(cu->username), + }; + + spinlock_unlock(&user_cache.spinlock); + return rc; +} + +void cached_username_release(CACHED_USERNAME cu) { + string_freez(cu.username); +} + +void cached_usernames_init(void) { + if(user_cache.initialized) return; + user_cache.initialized = true; + + spinlock_init(&user_cache.spinlock); + simple_hashtable_init_USERNAMES_CACHE(&user_cache.ht, 100); +} + +void cached_usernames_destroy(void) { + if(!user_cache.initialized) return; + + spinlock_lock(&user_cache.spinlock); + + for(SIMPLE_HASHTABLE_SLOT_USERNAMES_CACHE *sl = simple_hashtable_first_read_only_USERNAMES_CACHE(&user_cache.ht); + sl; + sl = simple_hashtable_next_read_only_USERNAMES_CACHE(&user_cache.ht, sl)) { + CACHED_USERNAME *u = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(u) { + string_freez(u->username); + freez(u); + // simple_hashtable_del_slot_USERNAMES_CACHE(&uc.ht, sl); + } + } + + simple_hashtable_destroy_USERNAMES_CACHE(&user_cache.ht); + user_cache.initialized = false; + + spinlock_unlock(&user_cache.spinlock); +} + +void cached_usernames_delete_old_versions(uint32_t version) { + if(!user_cache.initialized) return; + + spinlock_lock(&user_cache.spinlock); + + for(SIMPLE_HASHTABLE_SLOT_USERNAMES_CACHE *sl = simple_hashtable_first_read_only_USERNAMES_CACHE(&user_cache.ht); + sl; + sl = simple_hashtable_next_read_only_USERNAMES_CACHE(&user_cache.ht, sl)) { + CACHED_USERNAME *cu = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(cu && cu->version && cu->version < version) { + string_freez(cu->username); + freez(cu); + simple_hashtable_del_slot_USERNAMES_CACHE(&user_cache.ht, sl); + } + } + + spinlock_unlock(&user_cache.spinlock); +} diff --git a/src/libnetdata/os/system-maps/cached-uid-username.h b/src/libnetdata/os/system-maps/cached-uid-username.h new file mode 100644 index 000000000..b7c52c7c4 --- /dev/null +++ b/src/libnetdata/os/system-maps/cached-uid-username.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CACHED_UID_USERNAME_H +#define NETDATA_CACHED_UID_USERNAME_H + +#include "libnetdata/libnetdata.h" + +struct netdata_string; + +typedef struct { + uint32_t version; + uid_t uid; + struct netdata_string *username; +} CACHED_USERNAME; + +void cached_username_populate_by_uid(uid_t uid, const char *username, uint32_t version); +CACHED_USERNAME cached_username_get_by_uid(uid_t uid); +void cached_username_release(CACHED_USERNAME cu); + +void cached_usernames_init(void); +void cached_usernames_destroy(void); +void cached_usernames_delete_old_versions(uint32_t version); + +#endif //NETDATA_CACHED_UID_USERNAME_H diff --git a/src/libnetdata/os/system-maps/system-services.h b/src/libnetdata/os/system-maps/system-services.h new file mode 100644 index 000000000..5d3592bbf --- /dev/null +++ b/src/libnetdata/os/system-maps/system-services.h @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SYSTEM_SERVICES_H +#define NETDATA_SYSTEM_SERVICES_H + +#include "libnetdata/libnetdata.h" +#include + +// -------------------------------------------------------------------------------------------------------------------- +// hashtable for caching port and protocol to service name mappings +// key is the combination of protocol and port packed into an uint64_t, value is service name (STRING) + +#define SIMPLE_HASHTABLE_VALUE_TYPE STRING +#define SIMPLE_HASHTABLE_NAME _SERVICENAMES_CACHE +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +typedef struct servicenames_cache { + SPINLOCK spinlock; + SIMPLE_HASHTABLE_SERVICENAMES_CACHE ht; +} SERVICENAMES_CACHE; + +static inline const char *system_servicenames_ipproto2str(uint16_t ipproto) { + return (ipproto == IPPROTO_TCP) ? "tcp" : "udp"; +} + +static inline const char *static_portnames(uint16_t port, uint16_t ipproto) { + if(port == 19999 && ipproto == IPPROTO_TCP) + return "netdata"; + + if(port == 8125) + return "statsd"; + + return NULL; +} + +static inline STRING *system_servicenames_cache_lookup(SERVICENAMES_CACHE *sc, uint16_t port, uint16_t ipproto) { + struct { + uint16_t ipproto; + uint16_t port; + } key = { + .ipproto = ipproto, + .port = port, + }; + XXH64_hash_t hash = XXH3_64bits(&key, sizeof(key)); + + spinlock_lock(&sc->spinlock); + + SIMPLE_HASHTABLE_SLOT_SERVICENAMES_CACHE *sl = simple_hashtable_get_slot_SERVICENAMES_CACHE(&sc->ht, hash, &key, true); + STRING *s = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if (!s) { + const char *st = static_portnames(port, ipproto); + if(st) { + s = string_strdupz(st); + } + else { + struct servent *se = getservbyport(htons(port), system_servicenames_ipproto2str(ipproto)); + + if (!se || !se->s_name) { + char name[50]; + snprintfz(name, sizeof(name), "%u/%s", port, system_servicenames_ipproto2str(ipproto)); + s = string_strdupz(name); + } + else + s = string_strdupz(se->s_name); + } + + simple_hashtable_set_slot_SERVICENAMES_CACHE(&sc->ht, sl, hash, s); + } + + s = string_dup(s); + spinlock_unlock(&sc->spinlock); + return s; +} + +static inline SERVICENAMES_CACHE *system_servicenames_cache_init(void) { + SERVICENAMES_CACHE *sc = callocz(1, sizeof(*sc)); + spinlock_init(&sc->spinlock); + simple_hashtable_init_SERVICENAMES_CACHE(&sc->ht, 100); + return sc; +} + +static inline void system_servicenames_cache_destroy(SERVICENAMES_CACHE *sc) { + spinlock_lock(&sc->spinlock); + + for (SIMPLE_HASHTABLE_SLOT_SERVICENAMES_CACHE *sl = simple_hashtable_first_read_only_SERVICENAMES_CACHE(&sc->ht); + sl; + sl = simple_hashtable_next_read_only_SERVICENAMES_CACHE(&sc->ht, sl)) { + STRING *s = SIMPLE_HASHTABLE_SLOT_DATA(sl); + string_freez(s); + } + + simple_hashtable_destroy_SERVICENAMES_CACHE(&sc->ht); + freez(sc); +} + +#endif //NETDATA_SYSTEM_SERVICES_H diff --git a/src/libnetdata/os/timestamps.c b/src/libnetdata/os/timestamps.c new file mode 100644 index 000000000..602899d34 --- /dev/null +++ b/src/libnetdata/os/timestamps.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "libnetdata/libnetdata.h" +#include "timestamps.h" diff --git a/src/libnetdata/os/timestamps.h b/src/libnetdata/os/timestamps.h new file mode 100644 index 000000000..3737a4f40 --- /dev/null +++ b/src/libnetdata/os/timestamps.h @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef LIBNETDATA_OS_TIMESTAMPS_H +#define LIBNETDATA_OS_TIMESTAMPS_H + +// Windows file time starts on January 1, 1601, Unix epoch starts on January 1, 1970 +// Difference in 100-nanosecond intervals between these two dates is 116444736000000000ULL + +// Convert Windows file time (in 100-nanosecond intervals) to Unix epoch in nanoseconds +#define os_windows_ulonglong_to_unix_epoch_ns(ft) (((uint64_t)(ft) - 116444736000000000ULL) * 100ULL) + +// Convert Unix epoch time (in nanoseconds) to Windows file time (in 100-nanosecond intervals) +#define os_unix_epoch_ns_to_windows_ulonglong(ns) (((uint64_t)(ns) / 100ULL) + 116444736000000000ULL) + +#if defined(OS_WINDOWS) +// Convert FILETIME to Unix epoch in nanoseconds +#define os_filetime_to_unix_epoch_ns(ft) \ + ((((uint64_t)(ft).dwHighDateTime << 32 | (ft).dwLowDateTime) - 116444736000000000ULL) * 100ULL) + +// Convert Unix epoch in nanoseconds to FILETIME (returns FILETIME) +#define os_unix_epoch_ns_to_filetime(ns) \ + ({ \ + uint64_t temp = ((uint64_t)(ns) / 100ULL) + 116444736000000000ULL; \ + FILETIME ft; \ + ft.dwLowDateTime = (uint32_t)(temp & 0xFFFFFFFF); \ + ft.dwHighDateTime = (uint32_t)(temp >> 32); \ + ft; \ + }) + +// Convert Unix epoch in microseconds to FILETIME (returns FILETIME) +#define os_unix_epoch_ut_to_filetime(ns) \ + ({ \ + uint64_t temp = ((uint64_t)(ns) * 10ULL) + 116444736000000000ULL; \ + FILETIME ft; \ + ft.dwLowDateTime = (uint32_t)(temp & 0xFFFFFFFF); \ + ft.dwHighDateTime = (uint32_t)(temp >> 32); \ + ft; \ + }) + +#endif //OS_WINDOWS + +#endif //LIBNETDATA_OS_TIMESTAMPS_H diff --git a/src/libnetdata/os/tinysleep.c b/src/libnetdata/os/tinysleep.c deleted file mode 100644 index f04cbdadc..000000000 --- a/src/libnetdata/os/tinysleep.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "../libnetdata.h" - -#ifdef OS_WINDOWS -#include - -void tinysleep(void) { - // Improve the system timer resolution to 1 ms - timeBeginPeriod(1); - - // Sleep for the desired duration - Sleep(1); - - // Reset the system timer resolution - timeEndPeriod(1); -} -#else -void tinysleep(void) { - static const struct timespec ns = { .tv_sec = 0, .tv_nsec = 1 }; - nanosleep(&ns, NULL); -} -#endif diff --git a/src/libnetdata/os/tinysleep.h b/src/libnetdata/os/tinysleep.h deleted file mode 100644 index 480575a3a..000000000 --- a/src/libnetdata/os/tinysleep.h +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_TINYSLEEP_H -#define NETDATA_TINYSLEEP_H - -void tinysleep(void); - -#endif //NETDATA_TINYSLEEP_H diff --git a/src/libnetdata/os/uuid_generate.c b/src/libnetdata/os/uuid_generate.c index 4a7a9b6bc..6019f8844 100644 --- a/src/libnetdata/os/uuid_generate.c +++ b/src/libnetdata/os/uuid_generate.c @@ -6,8 +6,6 @@ #undef uuid_generate_time #ifdef OS_WINDOWS -#include - void os_uuid_generate(void *out) { RPC_STATUS status = UuidCreate(out); while (status != RPC_S_OK && status != RPC_S_UUID_LOCAL_ONLY) { diff --git a/src/libnetdata/os/windows-perflib/perflib-dump.c b/src/libnetdata/os/windows-perflib/perflib-dump.c new file mode 100644 index 000000000..eaccb7827 --- /dev/null +++ b/src/libnetdata/os/windows-perflib/perflib-dump.c @@ -0,0 +1,531 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "perflib.h" + +#if defined(OS_WINDOWS) +static const char *getCounterType(DWORD CounterType) { + switch (CounterType) { + case PERF_COUNTER_COUNTER: + return "PERF_COUNTER_COUNTER"; + + case PERF_COUNTER_TIMER: + return "PERF_COUNTER_TIMER"; + + case PERF_COUNTER_QUEUELEN_TYPE: + return "PERF_COUNTER_QUEUELEN_TYPE"; + + case PERF_COUNTER_LARGE_QUEUELEN_TYPE: + return "PERF_COUNTER_LARGE_QUEUELEN_TYPE"; + + case PERF_COUNTER_100NS_QUEUELEN_TYPE: + return "PERF_COUNTER_100NS_QUEUELEN_TYPE"; + + case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: + return "PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE"; + + case PERF_COUNTER_BULK_COUNT: + return "PERF_COUNTER_BULK_COUNT"; + + case PERF_COUNTER_TEXT: + return "PERF_COUNTER_TEXT"; + + case PERF_COUNTER_RAWCOUNT: + return "PERF_COUNTER_RAWCOUNT"; + + case PERF_COUNTER_LARGE_RAWCOUNT: + return "PERF_COUNTER_LARGE_RAWCOUNT"; + + case PERF_COUNTER_RAWCOUNT_HEX: + return "PERF_COUNTER_RAWCOUNT_HEX"; + + case PERF_COUNTER_LARGE_RAWCOUNT_HEX: + return "PERF_COUNTER_LARGE_RAWCOUNT_HEX"; + + case PERF_SAMPLE_FRACTION: + return "PERF_SAMPLE_FRACTION"; + + case PERF_SAMPLE_COUNTER: + return "PERF_SAMPLE_COUNTER"; + + case PERF_COUNTER_NODATA: + return "PERF_COUNTER_NODATA"; + + case PERF_COUNTER_TIMER_INV: + return "PERF_COUNTER_TIMER_INV"; + + case PERF_SAMPLE_BASE: + return "PERF_SAMPLE_BASE"; + + case PERF_AVERAGE_TIMER: + return "PERF_AVERAGE_TIMER"; + + case PERF_AVERAGE_BASE: + return "PERF_AVERAGE_BASE"; + + case PERF_AVERAGE_BULK: + return "PERF_AVERAGE_BULK"; + + case PERF_OBJ_TIME_TIMER: + return "PERF_OBJ_TIME_TIMER"; + + case PERF_100NSEC_TIMER: + return "PERF_100NSEC_TIMER"; + + case PERF_100NSEC_TIMER_INV: + return "PERF_100NSEC_TIMER_INV"; + + case PERF_COUNTER_MULTI_TIMER: + return "PERF_COUNTER_MULTI_TIMER"; + + case PERF_COUNTER_MULTI_TIMER_INV: + return "PERF_COUNTER_MULTI_TIMER_INV"; + + case PERF_COUNTER_MULTI_BASE: + return "PERF_COUNTER_MULTI_BASE"; + + case PERF_100NSEC_MULTI_TIMER: + return "PERF_100NSEC_MULTI_TIMER"; + + case PERF_100NSEC_MULTI_TIMER_INV: + return "PERF_100NSEC_MULTI_TIMER_INV"; + + case PERF_RAW_FRACTION: + return "PERF_RAW_FRACTION"; + + case PERF_LARGE_RAW_FRACTION: + return "PERF_LARGE_RAW_FRACTION"; + + case PERF_RAW_BASE: + return "PERF_RAW_BASE"; + + case PERF_LARGE_RAW_BASE: + return "PERF_LARGE_RAW_BASE"; + + case PERF_ELAPSED_TIME: + return "PERF_ELAPSED_TIME"; + + case PERF_COUNTER_HISTOGRAM_TYPE: + return "PERF_COUNTER_HISTOGRAM_TYPE"; + + case PERF_COUNTER_DELTA: + return "PERF_COUNTER_DELTA"; + + case PERF_COUNTER_LARGE_DELTA: + return "PERF_COUNTER_LARGE_DELTA"; + + case PERF_PRECISION_SYSTEM_TIMER: + return "PERF_PRECISION_SYSTEM_TIMER"; + + case PERF_PRECISION_100NS_TIMER: + return "PERF_PRECISION_100NS_TIMER"; + + case PERF_PRECISION_OBJECT_TIMER: + return "PERF_PRECISION_OBJECT_TIMER"; + + default: + return "UNKNOWN_COUNTER_TYPE"; + } +} + +static const char *getCounterDescription(DWORD CounterType) { + switch (CounterType) { + case PERF_COUNTER_COUNTER: + return "32-bit Counter. Divide delta by delta time. Display suffix: \"/sec\""; + + case PERF_COUNTER_TIMER: + return "64-bit Timer. Divide delta by delta time. Display suffix: \"%\""; + + case PERF_COUNTER_QUEUELEN_TYPE: + case PERF_COUNTER_LARGE_QUEUELEN_TYPE: + return "Queue Length Space-Time Product. Divide delta by delta time. No Display Suffix"; + + case PERF_COUNTER_100NS_QUEUELEN_TYPE: + return "Queue Length Space-Time Product using 100 Ns timebase. Divide delta by delta time. No Display Suffix"; + + case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: + return "Queue Length Space-Time Product using Object specific timebase. Divide delta by delta time. No Display Suffix."; + + case PERF_COUNTER_BULK_COUNT: + return "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\""; + + case PERF_COUNTER_TEXT: + return "Unicode text Display as text."; + + case PERF_COUNTER_RAWCOUNT: + case PERF_COUNTER_LARGE_RAWCOUNT: + return "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."; + + case PERF_COUNTER_RAWCOUNT_HEX: + case PERF_COUNTER_LARGE_RAWCOUNT_HEX: + return "Special case for RAWCOUNT which should be displayed in hex. A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."; + + case PERF_SAMPLE_FRACTION: + return "A count which is either 1 or 0 on each sampling interrupt (% busy). Divide delta by delta base. Display Suffix: \"%\""; + + case PERF_SAMPLE_COUNTER: + return "A count which is sampled on each sampling interrupt (queue length). Divide delta by delta time. No Display Suffix."; + + case PERF_COUNTER_NODATA: + return "A label: no data is associated with this counter (it has 0 length). Do not display."; + + case PERF_COUNTER_TIMER_INV: + return "64-bit Timer inverse (e.g., idle is measured, but display busy %). Display 100 - delta divided by delta time. Display suffix: \"%\""; + + case PERF_SAMPLE_BASE: + return "The divisor for a sample, used with the previous counter to form a sampled %. You must check for >0 before dividing by this! This counter will directly follow the numerator counter. It should not be displayed to the user."; + + case PERF_AVERAGE_TIMER: + return "A timer which, when divided by an average base, produces a time in seconds which is the average time of some operation. This timer times total operations, and the base is the number of operations. Display Suffix: \"sec\""; + + case PERF_AVERAGE_BASE: + return "Used as the denominator in the computation of time or count averages. Must directly follow the numerator counter. Not displayed to the user."; + + case PERF_AVERAGE_BULK: + return "A bulk count which, when divided (typically) by the number of operations, gives (typically) the number of bytes per operation. No Display Suffix."; + + case PERF_OBJ_TIME_TIMER: + return "64-bit Timer in object specific units. Display delta divided by delta time as returned in the object type header structure. Display suffix: \"%\""; + + case PERF_100NSEC_TIMER: + return "64-bit Timer in 100 nsec units. Display delta divided by delta time. Display suffix: \"%\""; + + case PERF_100NSEC_TIMER_INV: + return "64-bit Timer inverse (e.g., idle is measured, but display busy %). Display 100 - delta divided by delta time. Display suffix: \"%\""; + + case PERF_COUNTER_MULTI_TIMER: + return "64-bit Timer. Divide delta by delta time. Display suffix: \"%\". Timer for multiple instances, so result can exceed 100%."; + + case PERF_COUNTER_MULTI_TIMER_INV: + return "64-bit Timer inverse (e.g., idle is measured, but display busy %). Display 100 * _MULTI_BASE - delta divided by delta time. Display suffix: \"%\" Timer for multiple instances, so result can exceed 100%. Followed by a counter of type _MULTI_BASE."; + + case PERF_COUNTER_MULTI_BASE: + return "Number of instances to which the preceding _MULTI_..._INV counter applies. Used as a factor to get the percentage."; + + case PERF_100NSEC_MULTI_TIMER: + return "64-bit Timer in 100 nsec units. Display delta divided by delta time. Display suffix: \"%\" Timer for multiple instances, so result can exceed 100%."; + + case PERF_100NSEC_MULTI_TIMER_INV: + return "64-bit Timer inverse (e.g., idle is measured, but display busy %). Display 100 * _MULTI_BASE - delta divided by delta time. Display suffix: \"%\" Timer for multiple instances, so result can exceed 100%. Followed by a counter of type _MULTI_BASE."; + + case PERF_LARGE_RAW_FRACTION: + case PERF_RAW_FRACTION: + return "Indicates the data is a fraction of the following counter which should not be time averaged on display (such as free space over total space.) Display as is. Display the quotient as \"%\""; + + case PERF_RAW_BASE: + case PERF_LARGE_RAW_BASE: + return "Indicates the data is a base for the preceding counter which should not be time averaged on display (such as free space over total space.)"; + + case PERF_ELAPSED_TIME: + return "The data collected in this counter is actually the start time of the item being measured. For display, this data is subtracted from the sample time to yield the elapsed time as the difference between the two. In the definition below, the PerfTime field of the Object contains the sample time as indicated by the PERF_OBJECT_TIMER bit and the difference is scaled by the PerfFreq of the Object to convert the time units into seconds."; + + case PERF_COUNTER_HISTOGRAM_TYPE: + return "Counter type can be used with the preceding types to define a range of values to be displayed in a histogram."; + + case PERF_COUNTER_DELTA: + case PERF_COUNTER_LARGE_DELTA: + return "This counter is used to display the difference from one sample to the next. The counter value is a constantly increasing number and the value displayed is the difference between the current value and the previous value. Negative numbers are not allowed which shouldn't be a problem as long as the counter value is increasing or unchanged."; + + case PERF_PRECISION_SYSTEM_TIMER: + return "The precision counters are timers that consist of two counter values:\r\n\t1) the count of elapsed time of the event being monitored\r\n\t2) the \"clock\" time in the same units\r\nthe precision timers are used where the standard system timers are not precise enough for accurate readings. It's assumed that the service providing the data is also providing a timestamp at the same time which will eliminate any error that may occur since some small and variable time elapses between the time the system timestamp is captured and when the data is collected from the performance DLL. Only in extreme cases has this been observed to be problematic.\r\nwhen using this type of timer, the definition of the PERF_PRECISION_TIMESTAMP counter must immediately follow the definition of the PERF_PRECISION_*_TIMER in the Object header\r\nThe timer used has the same frequency as the System Performance Timer"; + + case PERF_PRECISION_100NS_TIMER: + return "The precision counters are timers that consist of two counter values:\r\n\t1) the count of elapsed time of the event being monitored\r\n\t2) the \"clock\" time in the same units\r\nthe precision timers are used where the standard system timers are not precise enough for accurate readings. It's assumed that the service providing the data is also providing a timestamp at the same time which will eliminate any error that may occur since some small and variable time elapses between the time the system timestamp is captured and when the data is collected from the performance DLL. Only in extreme cases has this been observed to be problematic.\r\nwhen using this type of timer, the definition of the PERF_PRECISION_TIMESTAMP counter must immediately follow the definition of the PERF_PRECISION_*_TIMER in the Object header\r\nThe timer used has the same frequency as the 100 NanoSecond Timer"; + + case PERF_PRECISION_OBJECT_TIMER: + return "The precision counters are timers that consist of two counter values:\r\n\t1) the count of elapsed time of the event being monitored\r\n\t2) the \"clock\" time in the same units\r\nthe precision timers are used where the standard system timers are not precise enough for accurate readings. It's assumed that the service providing the data is also providing a timestamp at the same time which will eliminate any error that may occur since some small and variable time elapses between the time the system timestamp is captured and when the data is collected from the performance DLL. Only in extreme cases has this been observed to be problematic.\r\nwhen using this type of timer, the definition of the PERF_PRECISION_TIMESTAMP counter must immediately follow the definition of the PERF_PRECISION_*_TIMER in the Object header\r\nThe timer used is of the frequency specified in the Object header's. PerfFreq field (PerfTime is ignored)"; + + default: + return ""; + } +} + +static const char *getCounterAlgorithm(DWORD CounterType) { + switch (CounterType) + { + case PERF_COUNTER_COUNTER: + case PERF_SAMPLE_COUNTER: + case PERF_COUNTER_BULK_COUNT: + return "(data1 - data0) / ((time1 - time0) / frequency)"; + + case PERF_COUNTER_QUEUELEN_TYPE: + case PERF_COUNTER_100NS_QUEUELEN_TYPE: + case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: + case PERF_COUNTER_LARGE_QUEUELEN_TYPE: + case PERF_AVERAGE_BULK: // normally not displayed + return "(data1 - data0) / (time1 - time0)"; + + case PERF_OBJ_TIME_TIMER: + case PERF_COUNTER_TIMER: + case PERF_100NSEC_TIMER: + case PERF_PRECISION_SYSTEM_TIMER: + case PERF_PRECISION_100NS_TIMER: + case PERF_PRECISION_OBJECT_TIMER: + case PERF_SAMPLE_FRACTION: + return "100 * (data1 - data0) / (time1 - time0)"; + + case PERF_COUNTER_TIMER_INV: + return "100 * (1 - ((data1 - data0) / (time1 - time0)))"; + + case PERF_100NSEC_TIMER_INV: + return "100 * (1- (data1 - data0) / (time1 - time0))"; + + case PERF_COUNTER_MULTI_TIMER: + return "100 * ((data1 - data0) / ((time1 - time0) / frequency1)) / multi1"; + + case PERF_100NSEC_MULTI_TIMER: + return "100 * ((data1 - data0) / (time1 - time0)) / multi1"; + + case PERF_COUNTER_MULTI_TIMER_INV: + case PERF_100NSEC_MULTI_TIMER_INV: + return "100 * (multi1 - ((data1 - data0) / (time1 - time0)))"; + + case PERF_COUNTER_RAWCOUNT: + case PERF_COUNTER_LARGE_RAWCOUNT: + return "data0"; + + case PERF_COUNTER_RAWCOUNT_HEX: + case PERF_COUNTER_LARGE_RAWCOUNT_HEX: + return "hex(data0)"; + + case PERF_COUNTER_DELTA: + case PERF_COUNTER_LARGE_DELTA: + return "data1 - data0"; + + case PERF_RAW_FRACTION: + case PERF_LARGE_RAW_FRACTION: + return "100 * data0 / time0"; + + case PERF_AVERAGE_TIMER: + return "((data1 - data0) / frequency1) / (time1 - time0)"; + + case PERF_ELAPSED_TIME: + return "(time0 - data0) / frequency0"; + + case PERF_COUNTER_TEXT: + case PERF_SAMPLE_BASE: + case PERF_AVERAGE_BASE: + case PERF_COUNTER_MULTI_BASE: + case PERF_RAW_BASE: + case PERF_COUNTER_NODATA: + case PERF_PRECISION_TIMESTAMP: + default: + return ""; + } +} + +void dumpSystemTime(BUFFER *wb, SYSTEMTIME *st) { + buffer_json_member_add_uint64(wb, "Year", st->wYear); + buffer_json_member_add_uint64(wb, "Month", st->wMonth); + buffer_json_member_add_uint64(wb, "DayOfWeek", st->wDayOfWeek); + buffer_json_member_add_uint64(wb, "Day", st->wDay); + buffer_json_member_add_uint64(wb, "Hour", st->wHour); + buffer_json_member_add_uint64(wb, "Minute", st->wMinute); + buffer_json_member_add_uint64(wb, "Second", st->wSecond); + buffer_json_member_add_uint64(wb, "Milliseconds", st->wMilliseconds); +} + +bool dumpDataCb(PERF_DATA_BLOCK *pDataBlock, void *data) { + char name[4096]; + if(!getSystemName(pDataBlock, name, sizeof(name))) + strncpyz(name, "[failed]", sizeof(name) - 1); + + BUFFER *wb = data; + buffer_json_member_add_string(wb, "SystemName", name); + + // Number of types of objects being reported + // Type: DWORD + buffer_json_member_add_int64(wb, "NumObjectTypes", pDataBlock->NumObjectTypes); + + buffer_json_member_add_int64(wb, "LittleEndian", pDataBlock->LittleEndian); + + // Version and Revision of these data structures. + // Version starts at 1. + // Revision starts at 0 for each Version. + // Type: DWORD + buffer_json_member_add_int64(wb, "Version", pDataBlock->Version); + buffer_json_member_add_int64(wb, "Revision", pDataBlock->Revision); + + // Object Title Index of default object to display when data from this system is retrieved + // (-1 = none, but this is not expected to be used) + // Type: LONG + buffer_json_member_add_int64(wb, "DefaultObject", pDataBlock->DefaultObject); + + // Performance counter frequency at the system under measurement + // Type: LARGE_INTEGER + buffer_json_member_add_int64(wb, "PerfFreq", pDataBlock->PerfFreq.QuadPart); + + // Performance counter value at the system under measurement + // Type: LARGE_INTEGER + buffer_json_member_add_int64(wb, "PerfTime", pDataBlock->PerfTime.QuadPart); + + // Performance counter time in 100 nsec units at the system under measurement + // Type: LARGE_INTEGER + buffer_json_member_add_int64(wb, "PerfTime100nSec", pDataBlock->PerfTime100nSec.QuadPart); + + // Time at the system under measurement in UTC + // Type: SYSTEMTIME + buffer_json_member_add_object(wb, "SystemTime"); + dumpSystemTime(wb, &pDataBlock->SystemTime); + buffer_json_object_close(wb); + + if(pDataBlock->NumObjectTypes) + buffer_json_member_add_array(wb, "Objects"); + + return true; +} + +static const char *GetDetailLevel(DWORD num) { + switch (num) { + case 100: + return "Novice (100)"; + case 200: + return "Advanced (200)"; + case 300: + return "Expert (300)"; + case 400: + return "Wizard (400)"; + + default: + return "Unknown"; + } +} + +bool dumpObjectCb(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, void *data) { + (void)pDataBlock; + BUFFER *wb = data; + if(!pObjectType) { + buffer_json_array_close(wb); // instances or counters + buffer_json_object_close(wb); // objectType + return true; + } + + buffer_json_add_array_item_object(wb); // objectType + buffer_json_member_add_int64(wb, "NameId", pObjectType->ObjectNameTitleIndex); + buffer_json_member_add_string(wb, "Name", RegistryFindNameByID(pObjectType->ObjectNameTitleIndex)); + buffer_json_member_add_int64(wb, "HelpId", pObjectType->ObjectHelpTitleIndex); + buffer_json_member_add_string(wb, "Help", RegistryFindHelpByID(pObjectType->ObjectHelpTitleIndex)); + buffer_json_member_add_int64(wb, "NumInstances", pObjectType->NumInstances); + buffer_json_member_add_int64(wb, "NumCounters", pObjectType->NumCounters); + buffer_json_member_add_int64(wb, "PerfTime", pObjectType->PerfTime.QuadPart); + buffer_json_member_add_int64(wb, "PerfFreq", pObjectType->PerfFreq.QuadPart); + buffer_json_member_add_int64(wb, "CodePage", pObjectType->CodePage); + buffer_json_member_add_int64(wb, "DefaultCounter", pObjectType->DefaultCounter); + buffer_json_member_add_string(wb, "DetailLevel", GetDetailLevel(pObjectType->DetailLevel)); + + if(ObjectTypeHasInstances(pDataBlock, pObjectType)) + buffer_json_member_add_array(wb, "Instances"); + else + buffer_json_member_add_array(wb, "Counters"); + + return true; +} + +bool dumpInstanceCb(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, void *data) { + (void)pDataBlock; + BUFFER *wb = data; + if(!pInstance) { + buffer_json_array_close(wb); // counters + buffer_json_object_close(wb); // instance + return true; + } + + char name[4096]; + if(!getInstanceName(pDataBlock, pObjectType, pInstance, name, sizeof(name))) + strncpyz(name, "[failed]", sizeof(name) - 1); + + buffer_json_add_array_item_object(wb); + buffer_json_member_add_string(wb, "Instance", name); + buffer_json_member_add_int64(wb, "UniqueID", pInstance->UniqueID); + buffer_json_member_add_array(wb, "Labels"); + { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "key", RegistryFindNameByID(pObjectType->ObjectNameTitleIndex)); + buffer_json_member_add_string(wb, "value", name); + } + buffer_json_object_close(wb); + + if(pInstance->ParentObjectTitleIndex) { + PERF_INSTANCE_DEFINITION *pi = pInstance; + while(pi->ParentObjectTitleIndex) { + PERF_OBJECT_TYPE *po = getObjectTypeByIndex(pDataBlock, pInstance->ParentObjectTitleIndex); + pi = getInstanceByPosition(pDataBlock, po, pi->ParentObjectInstance); + + if(!getInstanceName(pDataBlock, po, pi, name, sizeof(name))) + strncpyz(name, "[failed]", sizeof(name) - 1); + + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "key", RegistryFindNameByID(po->ObjectNameTitleIndex)); + buffer_json_member_add_string(wb, "value", name); + } + buffer_json_object_close(wb); + } + } + } + buffer_json_array_close(wb); // rrdlabels + + buffer_json_member_add_array(wb, "Counters"); + return true; +} + +void dumpSample(BUFFER *wb, RAW_DATA *d) { + buffer_json_member_add_object(wb, "Value"); + buffer_json_member_add_uint64(wb, "data", d->Data); + buffer_json_member_add_int64(wb, "time", d->Time); + buffer_json_member_add_uint64(wb, "type", d->CounterType); + buffer_json_member_add_int64(wb, "multi", d->MultiCounterData); + buffer_json_member_add_int64(wb, "frequency", d->Frequency); + buffer_json_object_close(wb); +} + +bool dumpCounterCb(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_COUNTER_DEFINITION *pCounter, RAW_DATA *sample, void *data) { + (void)pDataBlock; + (void)pObjectType; + BUFFER *wb = data; + buffer_json_add_array_item_object(wb); + buffer_json_member_add_string(wb, "Counter", RegistryFindNameByID(pCounter->CounterNameTitleIndex)); + dumpSample(wb, sample); + buffer_json_member_add_string(wb, "Help", RegistryFindHelpByID(pCounter->CounterHelpTitleIndex)); + buffer_json_member_add_string(wb, "Type", getCounterType(pCounter->CounterType)); + buffer_json_member_add_string(wb, "Algorithm", getCounterAlgorithm(pCounter->CounterType)); + buffer_json_member_add_string(wb, "Description", getCounterDescription(pCounter->CounterType)); + buffer_json_object_close(wb); + return true; +} + +bool dumpInstanceCounterCb(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, PERF_COUNTER_DEFINITION *pCounter, RAW_DATA *sample, void *data) { + (void)pInstance; + return dumpCounterCb(pDataBlock, pObjectType, pCounter, sample, data); +} + + +int windows_perflib_dump(const char *key) { + if(key && !*key) + key = NULL; + + PerflibNamesRegistryInitialize(); + + DWORD id = 0; + if(key) { + id = RegistryFindIDByName(key); + if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) { + fprintf(stderr, "Cannot find key '%s' in Windows Performance Counters Registry.\n", key); + exit(1); + } + } + + CLEAN_BUFFER *wb = buffer_create(0, NULL); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + + perflibQueryAndTraverse(id, dumpDataCb, dumpObjectCb, dumpInstanceCb, dumpInstanceCounterCb, dumpCounterCb, wb); + + buffer_json_finalize(wb); + printf("\n%s\n", buffer_tostring(wb)); + + perflibFreePerformanceData(); + + return 0; +} + +#endif // OS_WINDOWS \ No newline at end of file diff --git a/src/libnetdata/os/windows-perflib/perflib-names.c b/src/libnetdata/os/windows-perflib/perflib-names.c new file mode 100644 index 000000000..18ff2af65 --- /dev/null +++ b/src/libnetdata/os/windows-perflib/perflib-names.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "perflib.h" + +#if defined(OS_WINDOWS) +#define REGISTRY_KEY "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Perflib\\009" + +typedef struct perflib_registry { + DWORD id; + char *key; + char *help; +} perfLibRegistryEntry; + +static inline bool compare_perfLibRegistryEntry(const char *k1, const char *k2) { + return strcmp(k1, k2) == 0; +} + +static inline const char *value2key_perfLibRegistryEntry(perfLibRegistryEntry *entry) { + return entry->key; +} + +#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION compare_perfLibRegistryEntry +#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION value2key_perfLibRegistryEntry +#define SIMPLE_HASHTABLE_KEY_TYPE const char +#define SIMPLE_HASHTABLE_VALUE_TYPE perfLibRegistryEntry +#define SIMPLE_HASHTABLE_NAME _PERFLIB +#include "libnetdata/simple_hashtable/simple_hashtable.h" + +static struct { + SPINLOCK spinlock; + size_t size; + perfLibRegistryEntry **array; + struct simple_hashtable_PERFLIB hashtable; + FILETIME lastWriteTime; +} names_globals = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .size = 0, + .array = NULL, +}; + +DWORD RegistryFindIDByName(const char *name) { + DWORD rc = PERFLIB_REGISTRY_NAME_NOT_FOUND; + + spinlock_lock(&names_globals.spinlock); + XXH64_hash_t hash = XXH3_64bits((void *)name, strlen(name)); + SIMPLE_HASHTABLE_SLOT_PERFLIB *sl = simple_hashtable_get_slot_PERFLIB(&names_globals.hashtable, hash, name, false); + perfLibRegistryEntry *e = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(e) rc = e->id; + spinlock_unlock(&names_globals.spinlock); + + return rc; +} + +static inline void RegistryAddToHashTable_unsafe(perfLibRegistryEntry *entry) { + XXH64_hash_t hash = XXH3_64bits((void *)entry->key, strlen(entry->key)); + SIMPLE_HASHTABLE_SLOT_PERFLIB *sl = simple_hashtable_get_slot_PERFLIB(&names_globals.hashtable, hash, entry->key, true); + perfLibRegistryEntry *e = SIMPLE_HASHTABLE_SLOT_DATA(sl); + if(!e || e->id > entry->id) + simple_hashtable_set_slot_PERFLIB(&names_globals.hashtable, sl, hash, entry); +} + +static void RegistrySetData_unsafe(DWORD id, const char *key, const char *help) { + if(id >= names_globals.size) { + // increase the size of the array + + size_t old_size = names_globals.size; + + if(!names_globals.size) + names_globals.size = 20000; + else + names_globals.size *= 2; + + names_globals.array = reallocz(names_globals.array, names_globals.size * sizeof(perfLibRegistryEntry *)); + + memset(names_globals.array + old_size, 0, (names_globals.size - old_size) * sizeof(perfLibRegistryEntry *)); + } + + perfLibRegistryEntry *entry = names_globals.array[id]; + if(!entry) + entry = names_globals.array[id] = (perfLibRegistryEntry *)calloc(1, sizeof(perfLibRegistryEntry)); + + bool add_to_hash = false; + if(key && !entry->key) { + entry->key = strdup(key); + add_to_hash = true; + } + + if(help && !entry->help) + entry->help = strdup(help); + + entry->id = id; + + if(add_to_hash) + RegistryAddToHashTable_unsafe(entry); +} + +const char *RegistryFindNameByID(DWORD id) { + const char *s = ""; + spinlock_lock(&names_globals.spinlock); + + if(id < names_globals.size) { + perfLibRegistryEntry *titleEntry = names_globals.array[id]; + if(titleEntry && titleEntry->key) + s = titleEntry->key; + } + + spinlock_unlock(&names_globals.spinlock); + return s; +} + +const char *RegistryFindHelpByID(DWORD id) { + const char *s = ""; + spinlock_lock(&names_globals.spinlock); + + if(id < names_globals.size) { + perfLibRegistryEntry *titleEntry = names_globals.array[id]; + if(titleEntry && titleEntry->help) + s = titleEntry->help; + } + + spinlock_unlock(&names_globals.spinlock); + return s; +} + +// ---------------------------------------------------------- + +static inline void readRegistryKeys_unsafe(BOOL helps) { + TCHAR *pData = NULL; + + HKEY hKey; + DWORD dwType; + DWORD dwSize = 0; + LONG lStatus; + + LPCSTR valueName; + if(helps) + valueName = TEXT("help"); + else + valueName = TEXT("CounterDefinition"); + + // Open the key for the English counters + lStatus = RegOpenKeyEx(HKEY_LOCAL_MACHINE, TEXT(REGISTRY_KEY), 0, KEY_READ, &hKey); + if (lStatus != ERROR_SUCCESS) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Failed to open registry key HKEY_LOCAL_MACHINE, subkey '%s', error %ld\n", REGISTRY_KEY, (long)lStatus); + return; + } + + // Get the size of the 'Counters' data + lStatus = RegQueryValueEx(hKey, valueName, NULL, &dwType, NULL, &dwSize); + if (lStatus != ERROR_SUCCESS) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Failed to get registry key HKEY_LOCAL_MACHINE, subkey '%s', value '%s', size of data, error %ld\n", + REGISTRY_KEY, (const char *)valueName, (long)lStatus); + goto cleanup; + } + + // Allocate memory for the data + pData = mallocz(dwSize); + + // Read the 'Counters' data + lStatus = RegQueryValueEx(hKey, valueName, NULL, &dwType, (LPBYTE)pData, &dwSize); + if (lStatus != ERROR_SUCCESS || dwType != REG_MULTI_SZ) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Failed to get registry key HKEY_LOCAL_MACHINE, subkey '%s', value '%s', data, error %ld\n", + REGISTRY_KEY, (const char *)valueName, (long)lStatus); + goto cleanup; + } + + // Process the counter data + TCHAR *ptr = pData; + while (*ptr) { + TCHAR *sid = ptr; // First string is the ID + ptr += lstrlen(ptr) + 1; // Move to the next string + TCHAR *name = ptr; // Second string is the name + ptr += lstrlen(ptr) + 1; // Move to the next pair + + DWORD id = strtoul(sid, NULL, 10); + + if(helps) + RegistrySetData_unsafe(id, NULL, name); + else + RegistrySetData_unsafe(id, name, NULL); + } + +cleanup: + if(pData) freez(pData); + RegCloseKey(hKey); +} + +static BOOL RegistryKeyModification(FILETIME *lastWriteTime) { + HKEY hKey; + LONG lResult; + BOOL ret = FALSE; + + // Open the registry key + lResult = RegOpenKeyEx(HKEY_LOCAL_MACHINE, TEXT(REGISTRY_KEY), 0, KEY_READ, &hKey); + if (lResult != ERROR_SUCCESS) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Failed to open registry key HKEY_LOCAL_MACHINE, subkey '%s', error %ld\n", REGISTRY_KEY, (long)lResult); + return FALSE; + } + + // Get the last write time + lResult = RegQueryInfoKey(hKey, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, lastWriteTime); + if (lResult != ERROR_SUCCESS) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Failed to query registry key HKEY_LOCAL_MACHINE, subkey '%s', last write time, error %ld\n", REGISTRY_KEY, (long)lResult); + ret = FALSE; + } + else + ret = TRUE; + + RegCloseKey(hKey); + return ret; +} + +static inline void RegistryFetchAll_unsafe(void) { + readRegistryKeys_unsafe(FALSE); + readRegistryKeys_unsafe(TRUE); +} + +void PerflibNamesRegistryInitialize(void) { + spinlock_lock(&names_globals.spinlock); + simple_hashtable_init_PERFLIB(&names_globals.hashtable, 20000); + RegistryKeyModification(&names_globals.lastWriteTime); + RegistryFetchAll_unsafe(); + spinlock_unlock(&names_globals.spinlock); +} + +void PerflibNamesRegistryUpdate(void) { + FILETIME lastWriteTime = { 0 }; + RegistryKeyModification(&lastWriteTime); + + if(CompareFileTime(&lastWriteTime, &names_globals.lastWriteTime) > 0) { + spinlock_lock(&names_globals.spinlock); + if(CompareFileTime(&lastWriteTime, &names_globals.lastWriteTime) > 0) { + names_globals.lastWriteTime = lastWriteTime; + RegistryFetchAll_unsafe(); + } + spinlock_unlock(&names_globals.spinlock); + } +} + +#endif // OS_WINDOWS diff --git a/src/libnetdata/os/windows-perflib/perflib.c b/src/libnetdata/os/windows-perflib/perflib.c new file mode 100644 index 000000000..413a202fa --- /dev/null +++ b/src/libnetdata/os/windows-perflib/perflib.c @@ -0,0 +1,687 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "perflib.h" + +#if defined(OS_WINDOWS) +// -------------------------------------------------------------------------------- + +// Retrieve a buffer that contains the specified performance data. +// The pwszSource parameter determines the data that GetRegistryBuffer returns. +// +// Typically, when calling RegQueryValueEx, you can specify zero for the size of the buffer +// and the RegQueryValueEx will set your size variable to the required buffer size. However, +// if the source is "Global" or one or more object index values, you will need to increment +// the buffer size in a loop until RegQueryValueEx does not return ERROR_MORE_DATA. +static LPBYTE getPerformanceData(const char *pwszSource) { + static __thread DWORD size = 0; + static __thread LPBYTE buffer = NULL; + + if(pwszSource == (const char *)0x01) { + freez(buffer); + buffer = NULL; + size = 0; + return NULL; + } + + if(!size) { + size = 32 * 1024; + buffer = mallocz(size); + } + + LONG status = ERROR_SUCCESS; + while ((status = RegQueryValueEx(HKEY_PERFORMANCE_DATA, pwszSource, + NULL, NULL, buffer, &size)) == ERROR_MORE_DATA) { + size *= 2; + buffer = reallocz(buffer, size); + } + + if (status != ERROR_SUCCESS) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "RegQueryValueEx failed with 0x%x.\n", status); + return NULL; + } + + return buffer; +} + +void perflibFreePerformanceData(void) { + getPerformanceData((const char *)0x01); +} + +// -------------------------------------------------------------------------------------------------------------------- + +// Retrieve the raw counter value and any supporting data needed to calculate +// a displayable counter value. Use the counter type to determine the information +// needed to calculate the value. + +static BOOL getCounterData( + PERF_DATA_BLOCK *pDataBlock, + PERF_OBJECT_TYPE* pObject, + PERF_COUNTER_DEFINITION* pCounter, + PERF_COUNTER_BLOCK* pCounterDataBlock, + PRAW_DATA pRawData) +{ + PVOID pData = NULL; + UNALIGNED ULONGLONG* pullData = NULL; + PERF_COUNTER_DEFINITION* pBaseCounter = NULL; + BOOL fSuccess = TRUE; + + //Point to the raw counter data. + pData = (PVOID)((LPBYTE)pCounterDataBlock + pCounter->CounterOffset); + + //Now use the PERF_COUNTER_DEFINITION.CounterType value to figure out what + //other information you need to calculate a displayable value. + switch (pCounter->CounterType) { + + case PERF_COUNTER_COUNTER: + case PERF_COUNTER_QUEUELEN_TYPE: + case PERF_SAMPLE_COUNTER: + pRawData->Data = (ULONGLONG)(*(DWORD*)pData); + pRawData->Time = pDataBlock->PerfTime.QuadPart; + if (PERF_COUNTER_COUNTER == pCounter->CounterType || PERF_SAMPLE_COUNTER == pCounter->CounterType) + pRawData->Frequency = pDataBlock->PerfFreq.QuadPart; + break; + + case PERF_OBJ_TIME_TIMER: + pRawData->Data = (ULONGLONG)(*(DWORD*)pData); + pRawData->Time = pObject->PerfTime.QuadPart; + break; + + case PERF_COUNTER_100NS_QUEUELEN_TYPE: + pRawData->Data = *(UNALIGNED ULONGLONG *)pData; + pRawData->Time = pDataBlock->PerfTime100nSec.QuadPart; + break; + + case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE: + pRawData->Data = *(UNALIGNED ULONGLONG *)pData; + pRawData->Time = pObject->PerfTime.QuadPart; + break; + + case PERF_COUNTER_TIMER: + case PERF_COUNTER_TIMER_INV: + case PERF_COUNTER_BULK_COUNT: + case PERF_COUNTER_LARGE_QUEUELEN_TYPE: + pullData = (UNALIGNED ULONGLONG *)pData; + pRawData->Data = *pullData; + pRawData->Time = pDataBlock->PerfTime.QuadPart; + if (pCounter->CounterType == PERF_COUNTER_BULK_COUNT) + pRawData->Frequency = pDataBlock->PerfFreq.QuadPart; + break; + + case PERF_COUNTER_MULTI_TIMER: + case PERF_COUNTER_MULTI_TIMER_INV: + pullData = (UNALIGNED ULONGLONG *)pData; + pRawData->Data = *pullData; + pRawData->Frequency = pDataBlock->PerfFreq.QuadPart; + pRawData->Time = pDataBlock->PerfTime.QuadPart; + + //These counter types have a second counter value that is adjacent to + //this counter value in the counter data block. The value is needed for + //the calculation. + if ((pCounter->CounterType & PERF_MULTI_COUNTER) == PERF_MULTI_COUNTER) { + ++pullData; + pRawData->MultiCounterData = *(DWORD*)pullData; + } + break; + + //These counters do not use any time reference. + case PERF_COUNTER_RAWCOUNT: + case PERF_COUNTER_RAWCOUNT_HEX: + case PERF_COUNTER_DELTA: + // some counters in these categories, have CounterSize = sizeof(ULONGLONG) + // but the official documentation always uses them as sizeof(DWORD) + pRawData->Data = (ULONGLONG)(*(DWORD*)pData); + pRawData->Time = 0; + break; + + case PERF_COUNTER_LARGE_RAWCOUNT: + case PERF_COUNTER_LARGE_RAWCOUNT_HEX: + case PERF_COUNTER_LARGE_DELTA: + pRawData->Data = *(UNALIGNED ULONGLONG*)pData; + pRawData->Time = 0; + break; + + //These counters use the 100ns time base in their calculation. + case PERF_100NSEC_TIMER: + case PERF_100NSEC_TIMER_INV: + case PERF_100NSEC_MULTI_TIMER: + case PERF_100NSEC_MULTI_TIMER_INV: + pullData = (UNALIGNED ULONGLONG*)pData; + pRawData->Data = *pullData; + pRawData->Time = pDataBlock->PerfTime100nSec.QuadPart; + + //These counter types have a second counter value that is adjacent to + //this counter value in the counter data block. The value is needed for + //the calculation. + if ((pCounter->CounterType & PERF_MULTI_COUNTER) == PERF_MULTI_COUNTER) { + ++pullData; + pRawData->MultiCounterData = *(DWORD*)pullData; + } + break; + + //These counters use two data points, this value and one from this counter's + //base counter. The base counter should be the next counter in the object's + //list of counters. + case PERF_SAMPLE_FRACTION: + case PERF_RAW_FRACTION: + pRawData->Data = (ULONGLONG)(*(DWORD*)pData); + pBaseCounter = pCounter + 1; //Get base counter + if ((pBaseCounter->CounterType & PERF_COUNTER_BASE) == PERF_COUNTER_BASE) { + pData = (PVOID)((LPBYTE)pCounterDataBlock + pBaseCounter->CounterOffset); + pRawData->Time = (LONGLONG)(*(DWORD*)pData); + } + else + fSuccess = FALSE; + break; + + case PERF_LARGE_RAW_FRACTION: + case PERF_PRECISION_SYSTEM_TIMER: + case PERF_PRECISION_100NS_TIMER: + case PERF_PRECISION_OBJECT_TIMER: + pRawData->Data = *(UNALIGNED ULONGLONG*)pData; + pBaseCounter = pCounter + 1; + if ((pBaseCounter->CounterType & PERF_COUNTER_BASE) == PERF_COUNTER_BASE) { + pData = (PVOID)((LPBYTE)pCounterDataBlock + pBaseCounter->CounterOffset); + pRawData->Time = *(LONGLONG*)pData; + } + else + fSuccess = FALSE; + break; + + case PERF_AVERAGE_TIMER: + case PERF_AVERAGE_BULK: + pRawData->Data = *(UNALIGNED ULONGLONG*)pData; + pBaseCounter = pCounter+1; + if ((pBaseCounter->CounterType & PERF_COUNTER_BASE) == PERF_COUNTER_BASE) { + pData = (PVOID)((LPBYTE)pCounterDataBlock + pBaseCounter->CounterOffset); + pRawData->Time = *(DWORD*)pData; + } + else + fSuccess = FALSE; + + if (pCounter->CounterType == PERF_AVERAGE_TIMER) + pRawData->Frequency = pDataBlock->PerfFreq.QuadPart; + break; + + //These are base counters and are used in calculations for other counters. + //This case should never be entered. + case PERF_SAMPLE_BASE: + case PERF_AVERAGE_BASE: + case PERF_COUNTER_MULTI_BASE: + case PERF_RAW_BASE: + case PERF_LARGE_RAW_BASE: + pRawData->Data = 0; + pRawData->Time = 0; + fSuccess = FALSE; + break; + + case PERF_ELAPSED_TIME: + pRawData->Data = *(UNALIGNED ULONGLONG*)pData; + pRawData->Time = pObject->PerfTime.QuadPart; + pRawData->Frequency = pObject->PerfFreq.QuadPart; + break; + + //These counters are currently not supported. + case PERF_COUNTER_TEXT: + case PERF_COUNTER_NODATA: + case PERF_COUNTER_HISTOGRAM_TYPE: + default: // unknown counter types + pRawData->Data = 0; + pRawData->Time = 0; + fSuccess = FALSE; + break; + } + + return fSuccess; +} + +// -------------------------------------------------------------------------------------------------------------------- + +static inline BOOL isValidPointer(PERF_DATA_BLOCK *pDataBlock __maybe_unused, void *ptr __maybe_unused) { +#ifdef NETDATA_INTERNAL_CHECKS + return (PBYTE)ptr >= (PBYTE)pDataBlock + pDataBlock->TotalByteLength ? FALSE : TRUE; +#else + return TRUE; +#endif +} + +static inline BOOL isValidStructure(PERF_DATA_BLOCK *pDataBlock __maybe_unused, void *ptr __maybe_unused, size_t length __maybe_unused) { +#ifdef NETDATA_INTERNAL_CHECKS + return (PBYTE)ptr + length > (PBYTE)pDataBlock + pDataBlock->TotalByteLength ? FALSE : TRUE; +#else + return TRUE; +#endif +} + +static inline PERF_DATA_BLOCK *getDataBlock(BYTE *pBuffer) { + PERF_DATA_BLOCK *pDataBlock = (PERF_DATA_BLOCK *)pBuffer; + + static WCHAR signature[] = { 'P', 'E', 'R', 'F' }; + + if(memcmp(pDataBlock->Signature, signature, sizeof(signature)) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Invalid data block signature."); + return NULL; + } + + if(!isValidPointer(pDataBlock, (PBYTE)pDataBlock + pDataBlock->SystemNameOffset) || + !isValidStructure(pDataBlock, (PBYTE)pDataBlock + pDataBlock->SystemNameOffset, pDataBlock->SystemNameLength)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Invalid system name array."); + return NULL; + } + + return pDataBlock; +} + +static inline PERF_OBJECT_TYPE *getObjectType(PERF_DATA_BLOCK* pDataBlock, PERF_OBJECT_TYPE *lastObjectType) { + PERF_OBJECT_TYPE* pObjectType = NULL; + + if(!lastObjectType) + pObjectType = (PERF_OBJECT_TYPE *)((PBYTE)pDataBlock + pDataBlock->HeaderLength); + else if (lastObjectType->TotalByteLength != 0) + pObjectType = (PERF_OBJECT_TYPE *)((PBYTE)lastObjectType + lastObjectType->TotalByteLength); + + if(pObjectType && (!isValidPointer(pDataBlock, pObjectType) || !isValidStructure(pDataBlock, pObjectType, pObjectType->TotalByteLength))) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Invalid ObjectType!"); + pObjectType = NULL; + } + + return pObjectType; +} + +inline PERF_OBJECT_TYPE *getObjectTypeByIndex(PERF_DATA_BLOCK *pDataBlock, DWORD ObjectNameTitleIndex) { + PERF_OBJECT_TYPE *po = NULL; + for(DWORD o = 0; o < pDataBlock->NumObjectTypes ; o++) { + po = getObjectType(pDataBlock, po); + if(po->ObjectNameTitleIndex == ObjectNameTitleIndex) + return po; + } + + return NULL; +} + +static inline PERF_INSTANCE_DEFINITION *getInstance( + PERF_DATA_BLOCK *pDataBlock, + PERF_OBJECT_TYPE *pObjectType, + PERF_COUNTER_BLOCK *lastCounterBlock +) { + PERF_INSTANCE_DEFINITION *pInstance; + + if(!lastCounterBlock) + pInstance = (PERF_INSTANCE_DEFINITION *)((PBYTE)pObjectType + pObjectType->DefinitionLength); + else + pInstance = (PERF_INSTANCE_DEFINITION *)((PBYTE)lastCounterBlock + lastCounterBlock->ByteLength); + + if(pInstance && (!isValidPointer(pDataBlock, pInstance) || !isValidStructure(pDataBlock, pInstance, pInstance->ByteLength))) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Invalid Instance Definition!"); + pInstance = NULL; + } + + return pInstance; +} + +static inline PERF_COUNTER_BLOCK *getObjectTypeCounterBlock( + PERF_DATA_BLOCK *pDataBlock, + PERF_OBJECT_TYPE *pObjectType +) { + PERF_COUNTER_BLOCK *pCounterBlock = (PERF_COUNTER_BLOCK *)((PBYTE)pObjectType + pObjectType->DefinitionLength); + + if(pCounterBlock && (!isValidPointer(pDataBlock, pCounterBlock) || !isValidStructure(pDataBlock, pCounterBlock, pCounterBlock->ByteLength))) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Invalid ObjectType CounterBlock!"); + pCounterBlock = NULL; + } + + return pCounterBlock; +} + +static inline PERF_COUNTER_BLOCK *getInstanceCounterBlock( + PERF_DATA_BLOCK *pDataBlock, + PERF_OBJECT_TYPE *pObjectType, + PERF_INSTANCE_DEFINITION *pInstance +) { + (void)pObjectType; + PERF_COUNTER_BLOCK *pCounterBlock = (PERF_COUNTER_BLOCK *)((PBYTE)pInstance + pInstance->ByteLength); + + if(pCounterBlock && (!isValidPointer(pDataBlock, pCounterBlock) || !isValidStructure(pDataBlock, pCounterBlock, pCounterBlock->ByteLength))) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Invalid Instance CounterBlock!"); + pCounterBlock = NULL; + } + + return pCounterBlock; +} + +inline PERF_INSTANCE_DEFINITION *getInstanceByPosition(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, DWORD instancePosition) { + PERF_INSTANCE_DEFINITION *pi = NULL; + PERF_COUNTER_BLOCK *pc = NULL; + for(DWORD i = 0; i <= instancePosition ;i++) { + pi = getInstance(pDataBlock, pObjectType, pc); + pc = getInstanceCounterBlock(pDataBlock, pObjectType, pi); + } + return pi; +} + +static inline PERF_COUNTER_DEFINITION *getCounterDefinition(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_COUNTER_DEFINITION *lastCounterDefinition) { + PERF_COUNTER_DEFINITION *pCounterDefinition = NULL; + + if(!lastCounterDefinition) + pCounterDefinition = (PERF_COUNTER_DEFINITION *)((PBYTE)pObjectType + pObjectType->HeaderLength); + else + pCounterDefinition = (PERF_COUNTER_DEFINITION *)((PBYTE)lastCounterDefinition + lastCounterDefinition->ByteLength); + + if(pCounterDefinition && (!isValidPointer(pDataBlock, pCounterDefinition) || !isValidStructure(pDataBlock, pCounterDefinition, pCounterDefinition->ByteLength))) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Invalid Counter Definition!"); + pCounterDefinition = NULL; + } + + return pCounterDefinition; +} + +// -------------------------------------------------------------------------------------------------------------------- + +static inline BOOL getEncodedStringToUTF8(char *dst, size_t dst_len, DWORD CodePage, char *start, DWORD length) { + static __thread wchar_t unicode[PERFLIB_MAX_NAME_LENGTH]; + + WCHAR *tempBuffer; // Temporary buffer for Unicode data + DWORD charsCopied = 0; + + if (CodePage == 0) { + // Input is already Unicode (UTF-16) + tempBuffer = (WCHAR *)start; + charsCopied = length / sizeof(WCHAR); // Convert byte length to number of WCHARs + } + else { + tempBuffer = unicode; + charsCopied = any_to_utf16(CodePage, unicode, _countof(unicode), start, (int)length, NULL); + if(!charsCopied) return FALSE; + } + + // Now convert from Unicode (UTF-16) to UTF-8 + int bytesCopied = WideCharToMultiByte(CP_UTF8, 0, tempBuffer, (int)charsCopied, dst, (int)dst_len, NULL, NULL); + if (bytesCopied == 0) { + dst[0] = '\0'; // Ensure the buffer is null-terminated even on failure + return FALSE; + } + + dst[bytesCopied - 1] = '\0'; // Ensure buffer is null-terminated + return TRUE; +} + +inline BOOL getInstanceName(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, + char *buffer, size_t bufferLen) { + (void)pDataBlock; + if (!pInstance || !buffer || !bufferLen) return FALSE; + + return getEncodedStringToUTF8(buffer, bufferLen, pObjectType->CodePage, + ((char *)pInstance + pInstance->NameOffset), pInstance->NameLength); +} + +inline BOOL getSystemName(PERF_DATA_BLOCK *pDataBlock, char *buffer, size_t bufferLen) { + return getEncodedStringToUTF8(buffer, bufferLen, 0, + ((char *)pDataBlock + pDataBlock->SystemNameOffset), pDataBlock->SystemNameLength); +} + +inline bool ObjectTypeHasInstances(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType) { + (void)pDataBlock; + return pObjectType->NumInstances != PERF_NO_INSTANCES && pObjectType->NumInstances > 0; +} + +PERF_OBJECT_TYPE *perflibFindObjectTypeByName(PERF_DATA_BLOCK *pDataBlock, const char *name) { + PERF_OBJECT_TYPE* pObjectType = NULL; + for(DWORD o = 0; o < pDataBlock->NumObjectTypes; o++) { + pObjectType = getObjectType(pDataBlock, pObjectType); + if(strcmp(name, RegistryFindNameByID(pObjectType->ObjectNameTitleIndex)) == 0) + return pObjectType; + } + + return NULL; +} + +PERF_INSTANCE_DEFINITION *perflibForEachInstance(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *lastInstance) { + if(!ObjectTypeHasInstances(pDataBlock, pObjectType)) + return NULL; + + return getInstance(pDataBlock, pObjectType, + lastInstance ? + getInstanceCounterBlock(pDataBlock, pObjectType, lastInstance) : + NULL ); +} + +bool perflibGetInstanceCounter(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, COUNTER_DATA *cd) { + DWORD id = cd->id; + const char *key = cd->key; + internal_fatal(key == NULL, "You have to set a key for this call."); + + if(unlikely(cd->failures >= PERFLIB_MAX_FAILURES_TO_FIND_METRIC)) { + // we don't want to lookup and compare strings all the time + // when a metric is not there, so we try to find it for + // XX times, and then we give up. + + if(cd->failures == PERFLIB_MAX_FAILURES_TO_FIND_METRIC) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Giving up on metric '%s' (tried to find it %u times).", + cd->key, cd->failures); + + cd->failures++; // increment it once, so that we will not log this again + } + + goto failed; + } + + PERF_COUNTER_DEFINITION *pCounterDefinition = NULL; + for(DWORD c = 0; c < pObjectType->NumCounters ;c++) { + pCounterDefinition = getCounterDefinition(pDataBlock, pObjectType, pCounterDefinition); + if(!pCounterDefinition) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Cannot read counter definition No %u (out of %u)", + c, pObjectType->NumCounters); + break; + } + + if(id) { + if(id != pCounterDefinition->CounterNameTitleIndex) + continue; + } + else { + const char *name = RegistryFindNameByID(pCounterDefinition->CounterNameTitleIndex); + if(strcmp(name, key) != 0) + continue; + + cd->id = pCounterDefinition->CounterNameTitleIndex; + } + + cd->current.CounterType = cd->OverwriteCounterType ? cd->OverwriteCounterType : pCounterDefinition->CounterType; + PERF_COUNTER_BLOCK *pCounterBlock = getInstanceCounterBlock(pDataBlock, pObjectType, pInstance); + + cd->previous = cd->current; + if(likely(getCounterData(pDataBlock, pObjectType, pCounterDefinition, pCounterBlock, &cd->current))) { + cd->updated = true; + cd->failures = 0; + return true; + } + } + + cd->failures++; + +failed: + cd->previous = cd->current; + cd->current = RAW_DATA_EMPTY; + cd->updated = false; + return false; +} + +bool perflibGetObjectCounter(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, COUNTER_DATA *cd) { + PERF_COUNTER_DEFINITION *pCounterDefinition = NULL; + for(DWORD c = 0; c < pObjectType->NumCounters ;c++) { + pCounterDefinition = getCounterDefinition(pDataBlock, pObjectType, pCounterDefinition); + if(!pCounterDefinition) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Cannot read counter definition No %u (out of %u)", + c, pObjectType->NumCounters); + break; + } + + if(cd->id) { + if(cd->id != pCounterDefinition->CounterNameTitleIndex) + continue; + } + else { + if(strcmp(RegistryFindNameByID(pCounterDefinition->CounterNameTitleIndex), cd->key) != 0) + continue; + + cd->id = pCounterDefinition->CounterNameTitleIndex; + } + + cd->current.CounterType = cd->OverwriteCounterType ? cd->OverwriteCounterType : pCounterDefinition->CounterType; + PERF_COUNTER_BLOCK *pCounterBlock = getObjectTypeCounterBlock(pDataBlock, pObjectType); + + cd->previous = cd->current; + cd->updated = getCounterData(pDataBlock, pObjectType, pCounterDefinition, pCounterBlock, &cd->current); + return cd->updated; + } + + cd->previous = cd->current; + cd->current = RAW_DATA_EMPTY; + cd->updated = false; + return false; +} + +PERF_DATA_BLOCK *perflibGetPerformanceData(DWORD id) { + char source[24]; + snprintfz(source, sizeof(source), "%u", id); + + LPBYTE pData = (LPBYTE)getPerformanceData((id > 0) ? source : NULL); + if (!pData) return NULL; + + PERF_DATA_BLOCK *pDataBlock = getDataBlock(pData); + if(!pDataBlock) return NULL; + + return pDataBlock; +} + +int perflibQueryAndTraverse(DWORD id, + perflib_data_cb dataCb, + perflib_object_cb objectCb, + perflib_instance_cb instanceCb, + perflib_instance_counter_cb instanceCounterCb, + perflib_counter_cb counterCb, + void *data) { + int counters = -1; + + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if(!pDataBlock) goto cleanup; + + bool do_data = true; + if(dataCb) + do_data = dataCb(pDataBlock, data); + + PERF_OBJECT_TYPE* pObjectType = NULL; + for(DWORD o = 0; do_data && o < pDataBlock->NumObjectTypes; o++) { + pObjectType = getObjectType(pDataBlock, pObjectType); + if(!pObjectType) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Cannot read object type No %d (out of %d)", + o, pDataBlock->NumObjectTypes); + break; + } + + bool do_object = true; + if(objectCb) + do_object = objectCb(pDataBlock, pObjectType, data); + + if(!do_object) + continue; + + if(ObjectTypeHasInstances(pDataBlock, pObjectType)) { + PERF_INSTANCE_DEFINITION *pInstance = NULL; + PERF_COUNTER_BLOCK *pCounterBlock = NULL; + for(LONG i = 0; i < pObjectType->NumInstances ;i++) { + pInstance = getInstance(pDataBlock, pObjectType, pCounterBlock); + if(!pInstance) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Cannot read Instance No %d (out of %d)", + i, pObjectType->NumInstances); + break; + } + + pCounterBlock = getInstanceCounterBlock(pDataBlock, pObjectType, pInstance); + if(!pCounterBlock) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Cannot read CounterBlock of instance No %d (out of %d)", + i, pObjectType->NumInstances); + break; + } + + bool do_instance = true; + if(instanceCb) + do_instance = instanceCb(pDataBlock, pObjectType, pInstance, data); + + if(!do_instance) + continue; + + PERF_COUNTER_DEFINITION *pCounterDefinition = NULL; + for(DWORD c = 0; c < pObjectType->NumCounters ;c++) { + pCounterDefinition = getCounterDefinition(pDataBlock, pObjectType, pCounterDefinition); + if(!pCounterDefinition) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Cannot read counter definition No %u (out of %u)", + c, pObjectType->NumCounters); + break; + } + + RAW_DATA sample = { + .CounterType = pCounterDefinition->CounterType, + }; + if(getCounterData(pDataBlock, pObjectType, pCounterDefinition, pCounterBlock, &sample)) { + // DisplayCalculatedValue(&sample, &sample); + + if(instanceCounterCb) { + instanceCounterCb(pDataBlock, pObjectType, pInstance, pCounterDefinition, &sample, data); + counters++; + } + } + } + + if(instanceCb) + instanceCb(pDataBlock, pObjectType, NULL, data); + } + } + else { + PERF_COUNTER_BLOCK *pCounterBlock = getObjectTypeCounterBlock(pDataBlock, pObjectType); + PERF_COUNTER_DEFINITION *pCounterDefinition = NULL; + for(DWORD c = 0; c < pObjectType->NumCounters ;c++) { + pCounterDefinition = getCounterDefinition(pDataBlock, pObjectType, pCounterDefinition); + if(!pCounterDefinition) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "WINDOWS: PERFLIB: Cannot read counter definition No %u (out of %u)", + c, pObjectType->NumCounters); + break; + } + + RAW_DATA sample = { + .CounterType = pCounterDefinition->CounterType, + }; + if(getCounterData(pDataBlock, pObjectType, pCounterDefinition, pCounterBlock, &sample)) { + // DisplayCalculatedValue(&sample, &sample); + + if(counterCb) { + counterCb(pDataBlock, pObjectType, pCounterDefinition, &sample, data); + counters++; + } + } + } + } + + if(objectCb) + objectCb(pDataBlock, NULL, data); + } + +cleanup: + return counters; +} + +#endif // OS_WINDOWS \ No newline at end of file diff --git a/src/libnetdata/os/windows-perflib/perflib.h b/src/libnetdata/os/windows-perflib/perflib.h new file mode 100644 index 000000000..650e5503b --- /dev/null +++ b/src/libnetdata/os/windows-perflib/perflib.h @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PERFLIB_H +#define NETDATA_PERFLIB_H + +#include "libnetdata/libnetdata.h" + +#if defined(OS_WINDOWS) + +typedef uint32_t DWORD; +typedef long long LONGLONG; +typedef unsigned long long ULONGLONG; +typedef int BOOL; + +struct _PERF_DATA_BLOCK; +typedef struct _PERF_DATA_BLOCK PERF_DATA_BLOCK; +struct _PERF_OBJECT_TYPE; +typedef struct _PERF_OBJECT_TYPE PERF_OBJECT_TYPE; +struct _PERF_INSTANCE_DEFINITION; +typedef struct _PERF_INSTANCE_DEFINITION PERF_INSTANCE_DEFINITION; +struct _PERF_COUNTER_DEFINITION; +typedef struct _PERF_COUNTER_DEFINITION PERF_COUNTER_DEFINITION; + +const char *RegistryFindNameByID(DWORD id); +const char *RegistryFindHelpByID(DWORD id); +DWORD RegistryFindIDByName(const char *name); +#define PERFLIB_REGISTRY_NAME_NOT_FOUND (DWORD)-1 +#define PERFLIB_MAX_NAME_LENGTH 1024 + +PERF_DATA_BLOCK *perflibGetPerformanceData(DWORD id); +void perflibFreePerformanceData(void); +PERF_OBJECT_TYPE *perflibFindObjectTypeByName(PERF_DATA_BLOCK *pDataBlock, const char *name); +PERF_INSTANCE_DEFINITION *perflibForEachInstance(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *lastInstance); + +typedef struct _rawdata { + DWORD CounterType; + DWORD MultiCounterData; // Second raw counter value for multi-valued counters + ULONGLONG Data; // Raw counter data + LONGLONG Time; // Is a time value or a base value + LONGLONG Frequency; +} RAW_DATA, *PRAW_DATA; + +typedef struct _counterdata { + DWORD id; + bool updated; + uint8_t failures; // counts the number of failures to find this key + const char *key; + DWORD OverwriteCounterType; // if set, the counter type will be overwritten once read + RAW_DATA current; + RAW_DATA previous; +} COUNTER_DATA; + +#define PERFLIB_MAX_FAILURES_TO_FIND_METRIC 10 + +#define RAW_DATA_EMPTY (RAW_DATA){ 0 } + +bool perflibGetInstanceCounter(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, COUNTER_DATA *cd); +bool perflibGetObjectCounter(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, COUNTER_DATA *cd); + +typedef bool (*perflib_data_cb)(PERF_DATA_BLOCK *pDataBlock, void *data); +typedef bool (*perflib_object_cb)(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, void *data); +typedef bool (*perflib_instance_cb)(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, void *data); +typedef bool (*perflib_instance_counter_cb)(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, PERF_COUNTER_DEFINITION *pCounter, RAW_DATA *sample, void *data); +typedef bool (*perflib_counter_cb)(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_COUNTER_DEFINITION *pCounter, RAW_DATA *sample, void *data); + +int perflibQueryAndTraverse(DWORD id, + perflib_data_cb dataCb, + perflib_object_cb objectCb, + perflib_instance_cb instanceCb, + perflib_instance_counter_cb instanceCounterCb, + perflib_counter_cb counterCb, + void *data); + +bool ObjectTypeHasInstances(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType); + +BOOL getInstanceName(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, + char *buffer, size_t bufferLen); + +BOOL getSystemName(PERF_DATA_BLOCK *pDataBlock, char *buffer, size_t bufferLen); + +PERF_OBJECT_TYPE *getObjectTypeByIndex(PERF_DATA_BLOCK *pDataBlock, DWORD ObjectNameTitleIndex); + +PERF_INSTANCE_DEFINITION *getInstanceByPosition( + PERF_DATA_BLOCK *pDataBlock, + PERF_OBJECT_TYPE *pObjectType, + DWORD instancePosition); + +void PerflibNamesRegistryInitialize(void); +void PerflibNamesRegistryUpdate(void); + +#endif // OS_WINDOWS +#endif //NETDATA_PERFLIB_H diff --git a/src/libnetdata/os/windows-wmi/windows-wmi-GetDiskDriveInfo.c b/src/libnetdata/os/windows-wmi/windows-wmi-GetDiskDriveInfo.c new file mode 100644 index 000000000..283c6f09e --- /dev/null +++ b/src/libnetdata/os/windows-wmi/windows-wmi-GetDiskDriveInfo.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows-wmi-GetDiskDriveInfo.h" + +#if defined(OS_WINDOWS) + +size_t GetDiskDriveInfo(DiskDriveInfoWMI *diskInfoArray, size_t array_size) { + if (InitializeWMI() != S_OK) return 0; + + HRESULT hr; + IEnumWbemClassObject* pEnumerator = NULL; + + // Execute the query, including new properties + BSTR query = SysAllocString(L"SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability, Index, Manufacturer, InstallDate, MediaType, NeedsCleaning FROM WIN32_DiskDrive"); + BSTR wql = SysAllocString(L"WQL"); + hr = nd_wmi.pSvc->lpVtbl->ExecQuery( + nd_wmi.pSvc, + wql, + query, + WBEM_FLAG_FORWARD_ONLY | WBEM_FLAG_RETURN_IMMEDIATELY, + NULL, + &pEnumerator + ); + SysFreeString(query); + SysFreeString(wql); + + if (FAILED(hr)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "GetDiskDriveInfo() WMI query failed. Error code = 0x%X", hr); + return 0; + } + + // Iterate through the results + IWbemClassObject *pclsObj = NULL; + ULONG uReturn = 0; + size_t index = 0; + while (pEnumerator && index < array_size) { + hr = pEnumerator->lpVtbl->Next(pEnumerator, WBEM_INFINITE, 1, &pclsObj, &uReturn); + if (0 == uReturn) break; + + VARIANT vtProp; + + // Extract DeviceID + hr = pclsObj->lpVtbl->Get(pclsObj, L"DeviceID", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && vtProp.vt == VT_BSTR) { + wcstombs(diskInfoArray[index].DeviceID, vtProp.bstrVal, sizeof(diskInfoArray[index].DeviceID)); + } + VariantClear(&vtProp); + + // Extract Model + hr = pclsObj->lpVtbl->Get(pclsObj, L"Model", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && vtProp.vt == VT_BSTR) { + wcstombs(diskInfoArray[index].Model, vtProp.bstrVal, sizeof(diskInfoArray[index].Model)); + } + VariantClear(&vtProp); + + // Extract Caption + hr = pclsObj->lpVtbl->Get(pclsObj, L"Caption", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && vtProp.vt == VT_BSTR) { + wcstombs(diskInfoArray[index].Caption, vtProp.bstrVal, sizeof(diskInfoArray[index].Caption)); + } + VariantClear(&vtProp); + + // Extract Name + hr = pclsObj->lpVtbl->Get(pclsObj, L"Name", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && vtProp.vt == VT_BSTR) { + wcstombs(diskInfoArray[index].Name, vtProp.bstrVal, sizeof(diskInfoArray[index].Name)); + } + VariantClear(&vtProp); + + // Extract Partitions + hr = pclsObj->lpVtbl->Get(pclsObj, L"Partitions", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && (vtProp.vt == VT_I4 || vtProp.vt == VT_UI4)) { + diskInfoArray[index].Partitions = vtProp.intVal; + } + VariantClear(&vtProp); + + // Extract Size (convert BSTR to uint64) + hr = pclsObj->lpVtbl->Get(pclsObj, L"Size", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && vtProp.vt == VT_BSTR) { + char sizeStr[64]; + wcstombs(sizeStr, vtProp.bstrVal, sizeof(sizeStr)); + diskInfoArray[index].Size = strtoull(sizeStr, NULL, 10); + } + VariantClear(&vtProp); + + // Extract Status + hr = pclsObj->lpVtbl->Get(pclsObj, L"Status", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && vtProp.vt == VT_BSTR) { + wcstombs(diskInfoArray[index].Status, vtProp.bstrVal, sizeof(diskInfoArray[index].Status)); + } + VariantClear(&vtProp); + + // Extract Availability + hr = pclsObj->lpVtbl->Get(pclsObj, L"Availability", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && (vtProp.vt == VT_I4 || vtProp.vt == VT_UI4)) { + diskInfoArray[index].Availability = vtProp.intVal; + } + VariantClear(&vtProp); + + // Extract Index + hr = pclsObj->lpVtbl->Get(pclsObj, L"Index", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && (vtProp.vt == VT_I4 || vtProp.vt == VT_UI4)) { + diskInfoArray[index].Index = vtProp.intVal; + } + VariantClear(&vtProp); + + // Extract Manufacturer + hr = pclsObj->lpVtbl->Get(pclsObj, L"Manufacturer", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && vtProp.vt == VT_BSTR) { + wcstombs(diskInfoArray[index].Manufacturer, vtProp.bstrVal, sizeof(diskInfoArray[index].Manufacturer)); + } + VariantClear(&vtProp); + + // Extract InstallDate + hr = pclsObj->lpVtbl->Get(pclsObj, L"InstallDate", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && vtProp.vt == VT_BSTR) { + wcstombs(diskInfoArray[index].InstallDate, vtProp.bstrVal, sizeof(diskInfoArray[index].InstallDate)); + } + VariantClear(&vtProp); + + // Extract MediaType + hr = pclsObj->lpVtbl->Get(pclsObj, L"MediaType", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && vtProp.vt == VT_BSTR) { + wcstombs(diskInfoArray[index].MediaType, vtProp.bstrVal, sizeof(diskInfoArray[index].MediaType)); + } + VariantClear(&vtProp); + + // Extract NeedsCleaning + hr = pclsObj->lpVtbl->Get(pclsObj, L"NeedsCleaning", 0, &vtProp, 0, 0); + if (SUCCEEDED(hr) && (vtProp.vt == VT_BOOL)) { + diskInfoArray[index].NeedsCleaning = vtProp.boolVal; + } + VariantClear(&vtProp); + + pclsObj->lpVtbl->Release(pclsObj); + index++; + } + + pEnumerator->lpVtbl->Release(pEnumerator); + + return index; +} + +#endif \ No newline at end of file diff --git a/src/libnetdata/os/windows-wmi/windows-wmi-GetDiskDriveInfo.h b/src/libnetdata/os/windows-wmi/windows-wmi-GetDiskDriveInfo.h new file mode 100644 index 000000000..cc9b46067 --- /dev/null +++ b/src/libnetdata/os/windows-wmi/windows-wmi-GetDiskDriveInfo.h @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WINDOWS_WMI_GETDISKDRIVEINFO_H +#define NETDATA_WINDOWS_WMI_GETDISKDRIVEINFO_H + +#include "windows-wmi.h" + +#if defined(OS_WINDOWS) + +typedef struct { + char DeviceID[256]; + char Model[256]; + char Caption[256]; + char Name[256]; + int Partitions; + unsigned long long Size; + char Status[64]; + int Availability; + int Index; + char Manufacturer[256]; + char InstallDate[64]; + char MediaType[128]; + bool NeedsCleaning; +} DiskDriveInfoWMI; + +size_t GetDiskDriveInfo(DiskDriveInfoWMI *diskInfoArray, size_t array_size); + +#endif + +#endif //NETDATA_WINDOWS_WMI_GETDISKDRIVEINFO_H diff --git a/src/libnetdata/os/windows-wmi/windows-wmi.c b/src/libnetdata/os/windows-wmi/windows-wmi.c new file mode 100644 index 000000000..02d3faa7c --- /dev/null +++ b/src/libnetdata/os/windows-wmi/windows-wmi.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows-wmi.h" + +#if defined(OS_WINDOWS) + +__thread ND_WMI nd_wmi = { 0 }; + +HRESULT InitializeWMI(void) { + if(nd_wmi.pLoc && nd_wmi.pSvc) return S_OK; + CleanupWMI(); + + IWbemLocator **pLoc = &nd_wmi.pLoc; + IWbemServices **pSvc = &nd_wmi.pSvc; + + HRESULT hr; + + // Initialize COM + hr = CoInitializeEx(0, COINIT_MULTITHREADED); + if (FAILED(hr)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Failed to initialize COM library. Error code = 0x%X", hr); + CleanupWMI(); + return hr; + } + + // Set COM security levels + hr = CoInitializeSecurity( + NULL, + -1, + NULL, + NULL, + RPC_C_AUTHN_LEVEL_DEFAULT, + RPC_C_IMP_LEVEL_IMPERSONATE, + NULL, + EOAC_NONE, + NULL + ); + if (FAILED(hr)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Failed to initialize security. Error code = 0x%X", hr); + CleanupWMI(); + return hr; + } + + // Obtain the initial locator to WMI + hr = CoCreateInstance( + &CLSID_WbemLocator, 0, + CLSCTX_INPROC_SERVER, + &IID_IWbemLocator, (LPVOID *)pLoc + ); + if (FAILED(hr)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Failed to create IWbemLocator object. Error code = 0x%X", hr); + CleanupWMI(); + return hr; + } + + // Connect to WMI + BSTR namespacePath = SysAllocString(L"ROOT\\CIMV2"); + hr = (*pLoc)->lpVtbl->ConnectServer( + *pLoc, + namespacePath, + NULL, + NULL, + 0, + 0, + 0, + 0, + pSvc + ); + SysFreeString(namespacePath); + + if (FAILED(hr)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Could not connect to WMI server. Error code = 0x%X", hr); + CleanupWMI(); + return hr; + } + + // Set security levels on the proxy + hr = CoSetProxyBlanket( + (IUnknown *)*pSvc, + RPC_C_AUTHN_WINNT, + RPC_C_AUTHZ_NONE, + NULL, + RPC_C_AUTHN_LEVEL_CALL, + RPC_C_IMP_LEVEL_IMPERSONATE, + NULL, + EOAC_NONE + ); + if (FAILED(hr)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Could not set proxy blanket. Error code = 0x%X", hr); + CleanupWMI(); + return hr; + } + + return S_OK; +} + +void CleanupWMI(void) { + if(nd_wmi.pLoc) + nd_wmi.pLoc->lpVtbl->Release(nd_wmi.pLoc); + + if (nd_wmi.pSvc) + nd_wmi.pSvc->lpVtbl->Release(nd_wmi.pSvc); + + nd_wmi.pLoc = NULL; + nd_wmi.pSvc = NULL; + + CoUninitialize(); +} + +#endif \ No newline at end of file diff --git a/src/libnetdata/os/windows-wmi/windows-wmi.h b/src/libnetdata/os/windows-wmi/windows-wmi.h new file mode 100644 index 000000000..69d7244aa --- /dev/null +++ b/src/libnetdata/os/windows-wmi/windows-wmi.h @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WINDOWS_WMI_H +#define NETDATA_WINDOWS_WMI_H + +#include "../../libnetdata.h" + +#if defined(OS_WINDOWS) +typedef struct { + IWbemLocator *pLoc; + IWbemServices *pSvc; +} ND_WMI; + +extern __thread ND_WMI nd_wmi; + +HRESULT InitializeWMI(void); +void CleanupWMI(void); + +#include "windows-wmi-GetDiskDriveInfo.h" + +#endif + +#endif //NETDATA_WINDOWS_WMI_H diff --git a/src/libnetdata/parsers/duration.c b/src/libnetdata/parsers/duration.c new file mode 100644 index 000000000..16dc5170c --- /dev/null +++ b/src/libnetdata/parsers/duration.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "duration.h" + +#ifdef NSEC_PER_USEC +#undef NSEC_PER_USEC +#endif +#define NSEC_PER_USEC (1000ULL) + +#ifdef USEC_PER_MS +#undef USEC_PER_MS +#endif +#define USEC_PER_MS (1000ULL) + +#ifdef NSEC_PER_SEC +#undef NSEC_PER_SEC +#endif +#define NSEC_PER_SEC (1000000000ULL) + +#define NSEC_PER_MS (USEC_PER_MS * NSEC_PER_USEC) +#define NSEC_PER_MIN (NSEC_PER_SEC * 60ULL) +#define NSEC_PER_HOUR (NSEC_PER_MIN * 60ULL) +#define NSEC_PER_DAY (NSEC_PER_HOUR * 24ULL) +#define NSEC_PER_WEEK (NSEC_PER_DAY * 7ULL) +#define NSEC_PER_MONTH (NSEC_PER_DAY * 30ULL) +#define NSEC_PER_QUARTER (NSEC_PER_MONTH * 3ULL) + +// more accurate, but not an integer multiple of days, weeks, months +#define NSEC_PER_YEAR (NSEC_PER_DAY * 365ULL) + +// Define a structure to map time units to their multipliers +static const struct duration_unit { + const char *unit; + const bool formatter; // true when this unit should be used when formatting to string + const snsec_t multiplier; +} units[] = { + + // IMPORTANT: the order of this array is crucial! + // The array should be sorted from the smaller unit to the biggest unit. + + { .unit = "ns", .formatter = true, .multiplier = 1 }, // UCUM + { .unit = "us", .formatter = true, .multiplier = NSEC_PER_USEC }, // UCUM + { .unit = "ms", .formatter = true, .multiplier = NSEC_PER_MS }, // UCUM + { .unit = "s", .formatter = true, .multiplier = NSEC_PER_SEC }, // UCUM + { .unit = "m", .formatter = true, .multiplier = NSEC_PER_MIN }, // - + { .unit = "min", .formatter = false, .multiplier = NSEC_PER_MIN }, // UCUM + { .unit = "h", .formatter = true, .multiplier = NSEC_PER_HOUR }, // UCUM + { .unit = "d", .formatter = true, .multiplier = NSEC_PER_DAY }, // UCUM + { .unit = "w", .formatter = false, .multiplier = NSEC_PER_WEEK }, // - + { .unit = "wk", .formatter = false, .multiplier = NSEC_PER_WEEK }, // UCUM + { .unit = "mo", .formatter = true, .multiplier = NSEC_PER_MONTH }, // UCUM + { .unit = "M", .formatter = false, .multiplier = NSEC_PER_MONTH }, // compatibility + { .unit = "q", .formatter = false, .multiplier = NSEC_PER_QUARTER }, // - + { .unit = "y", .formatter = true, .multiplier = NSEC_PER_YEAR }, // - + { .unit = "Y", .formatter = false, .multiplier = NSEC_PER_YEAR }, // compatibility + { .unit = "a", .formatter = false, .multiplier = NSEC_PER_YEAR }, // UCUM +}; + +static inline const struct duration_unit *duration_find_unit(const char *unit) { + if(!unit || !*unit) + unit = "ns"; + + for (size_t i = 0; i < sizeof(units) / sizeof(units[0]); i++) { + const struct duration_unit *du = &units[i]; + if ((uint8_t)unit[0] == (uint8_t)du->unit[0] && strcmp(unit, du->unit) == 0) + return du; + } + + return NULL; +} + +inline int64_t duration_round_to_resolution(int64_t value, int64_t resolution) { + if(value > 0) + return (value + ((resolution - 1) / 2)) / resolution; + + if(value < 0) + return (value - ((resolution - 1) / 2)) / resolution; + + return 0; +} + +// ------------------------------------------------------------------------------------------------------------------- +// parse a duration string + +bool duration_parse(const char *duration, int64_t *result, const char *default_unit) { + if (!duration || !*duration) { + *result = 0; + return false; + } + + const struct duration_unit *du_def = duration_find_unit(default_unit); + if(!du_def) { + *result = 0; + return false; + } + + int64_t sign = 1; + const char *s = duration; + while (isspace((uint8_t)*s)) s++; + if(*s == '-') { + s++; + sign = -1; + } + + int64_t v = 0; + + while (*s) { + // Skip leading spaces + while (isspace((uint8_t)*s)) s++; + + // compatibility + if(*s == 'n' && strcmp(s, "never") == 0) { + *result = 0; + return true; + } + + if(*s == 'o' && strcmp(s, "off") == 0) { + *result = 0; + return true; + } + + // Parse the number + const char *number_start = s; + NETDATA_DOUBLE value = str2ndd(s, (char **)&s); + + // If no valid number found, return default + if (s == number_start) { + *result = 0; + return false; + } + + // Skip spaces between number and unit + while (isspace((uint8_t)*s)) s++; + + const char *unit_start = s; + while (isalpha((uint8_t)*s)) s++; + + char unit[4]; + size_t unit_len = s - unit_start; + const struct duration_unit *du; + if (unit_len == 0) + du = du_def; + else { + if (unit_len >= sizeof(unit)) unit_len = sizeof(unit) - 1; + strncpyz(unit, unit_start, unit_len); + du = duration_find_unit(unit); + if(!du) { + *result = 0; + return false; + } + } + + v += (int64_t)round(value * (NETDATA_DOUBLE)du->multiplier); + } + + v *= sign; + + if(du_def->multiplier == 1) + *result = v; + else + *result = duration_round_to_resolution(v, du_def->multiplier); + + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- +// generate a string to represent a duration + +ssize_t duration_snprintf(char *dst, size_t dst_size, int64_t value, const char *unit, bool add_spaces) { + if (!dst || dst_size == 0) return -1; + if (dst_size == 1) { + dst[0] = '\0'; + return -2; + } + + if(value == 0) + return snprintfz(dst, dst_size, "off"); + + const char *sign = ""; + if(value < 0) { + sign = "-"; + value = -value; + } + + const struct duration_unit *du_min = duration_find_unit(unit); + size_t offset = 0; + + int64_t nsec = value * du_min->multiplier; + + // Iterate through units from largest to smallest + for (size_t i = sizeof(units) / sizeof(units[0]) - 1; i > 0 && nsec > 0; i--) { + const struct duration_unit *du = &units[i]; + if(!units[i].formatter && du != du_min) + continue; + + // IMPORTANT: + // The week (7 days) is not aligned to the quarter (~91 days) or the year (365.25 days). + // To make sure that the value returned can be parsed back without loss, + // we have to round the value per unit (inside this loop), not globally. + // Otherwise, we have to make sure that all larger units are integer multiples of the smaller ones. + + int64_t multiplier = units[i].multiplier; + int64_t rounded = (du == du_min) ? (duration_round_to_resolution(nsec, multiplier) * multiplier) : nsec; + + int64_t unit_count = rounded / multiplier; + if (unit_count > 0) { + const char *space = (add_spaces && offset) ? " " : ""; + int written = snprintfz(dst + offset, dst_size - offset, + "%s%s%" PRIi64 "%s", space, sign, unit_count, units[i].unit); + + if (written < 0) + return -3; + + sign = ""; + offset += written; + + if (offset >= dst_size) { + // buffer overflow + return (ssize_t)offset; + } + + if(unit_count * multiplier >= nsec) + break; + else + nsec -= unit_count * multiplier; + } + + if(du == du_min) + // we should not go to smaller units + break; + } + + if (offset == 0) + // nothing has been written + offset = snprintfz(dst, dst_size, "off"); + + return (ssize_t)offset; +} + +// -------------------------------------------------------------------------------------------------------------------- +// compatibility for parsing seconds in int. + +bool duration_parse_seconds(const char *str, int *result) { + int64_t v; + + if(duration_parse_time_t(str, &v)) { + *result = (int)v; + return true; + } + + return false; +} diff --git a/src/libnetdata/parsers/duration.h b/src/libnetdata/parsers/duration.h new file mode 100644 index 000000000..b95da5d2f --- /dev/null +++ b/src/libnetdata/parsers/duration.h @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef LIBNETDATA_PARSERS_DURATION_H +#define LIBNETDATA_PARSERS_DURATION_H + +#include "parsers.h" + +int64_t duration_round_to_resolution(int64_t value, int64_t resolution); + +// duration (string to number) +bool duration_parse(const char *duration, int64_t *result, const char *default_unit); +#define duration_parse_nsec_t(duration, ns_ptr) duration_parse(duration, ns_ptr, "ns") +#define duration_parse_usec_t(duration, us_ptr) duration_parse(duration, us_ptr, "us") +#define duration_parse_msec_t(duration, ms_ptr) duration_parse(duration, ms_ptr, "ms") +#define duration_parse_time_t(duration, secs_ptr) duration_parse(duration, secs_ptr, "s") +#define duration_parse_mins(duration, mins_ptr) duration_parse(duration, mins_ptr, "m") +#define duration_parse_hours(duration, hours_ptr) duration_parse(duration, hours_ptr, "h") +#define duration_parse_days(duration, days_ptr) duration_parse(duration, days_ptr, "d") + +// duration (number to string) +ssize_t duration_snprintf(char *dst, size_t dst_size, int64_t value, const char *unit, bool add_spaces); +#define duration_snprintf_nsec_t(dst, dst_size, ns) duration_snprintf(dst, dst_size, ns, "ns", false) +#define duration_snprintf_usec_t(dst, dst_size, us) duration_snprintf(dst, dst_size, us, "us", false) +#define duration_snprintf_msec_t(dst, dst_size, ms) duration_snprintf(dst, dst_size, ms, "ms", false) +#define duration_snprintf_time_t(dst, dst_size, secs) duration_snprintf(dst, dst_size, secs, "s", false) +#define duration_snprintf_mins(dst, dst_size, mins) duration_snprintf(dst, dst_size, mins, "m", false) +#define duration_snprintf_hours(dst, dst_size, hours) duration_snprintf(dst, dst_size, hours, "h", false) +#define duration_snprintf_days(dst, dst_size, days) duration_snprintf(dst, dst_size, days, "d", false) + +bool duration_parse_seconds(const char *str, int *result); + +#endif //LIBNETDATA_PARSERS_DURATION_H diff --git a/src/libnetdata/parsers/duration.html b/src/libnetdata/parsers/duration.html new file mode 100644 index 000000000..8f6f8a416 --- /dev/null +++ b/src/libnetdata/parsers/duration.html @@ -0,0 +1,205 @@ + + + + + + + Duration Converter + + + +

    Duration Converter

    + +
    + + + + + + + + + + + + +
    UnitValueFormattedCheck
    + + + + diff --git a/src/libnetdata/parsers/durations.md b/src/libnetdata/parsers/durations.md new file mode 100644 index 000000000..e952faa1a --- /dev/null +++ b/src/libnetdata/parsers/durations.md @@ -0,0 +1,94 @@ +## Durations in Netdata + +Netdata provides a flexible and powerful way to specify durations for various configurations and operations, such as alerts, database retention, and other configuration options. Durations can be expressed in a variety of units, ranging from nanoseconds to years, allowing users to define time intervals in a human-readable format. + +### Supported Duration Units + +Netdata supports a wide range of duration units. The system follows the Unified Code for Units of Measure (UCUM) standard where applicable. Below is a table of all the supported units, their corresponding representations, and their compatibility: + +| Symbol | Description | Value | Compatibility | Formatter | +|:------:|:------------:|:--------:|:-------------:|:---------:| +| `ns` | Nanoseconds | `1ns` | UCUM | **Yes** | +| `us` | Microseconds | `1000ns` | UCUM | **Yes** | +| `ms` | Milliseconds | `1000us` | UCUM | **Yes** | +| `s` | Seconds | `1000ms` | UCUM | **Yes** | +| `m` | Minutes | `60s` | Natural | **Yes** | +| `min` | Minutes | `60s` | UCUM | No | +| `h` | Hours | `60m` | UCUM | **Yes** | +| `d` | Days | `24h` | UCUM | **Yes** | +| `w` | Weeks | `7d` | Natural | No | +| `wk` | Weeks | `7d` | UCUM | No | +| `mo` | Months | `30d` | UCUM | **Yes** | +| `M` | Months | `30d` | Backwards | No | +| `q` | Quarters | `3mo` | Natural | No | +| `y` | Years | `365d` | Natural | **Yes** | +| `Y` | Years | `365d` | Backwards | No | +| `a` | Years | `365d` | UCUM | No | + +- **UCUM**: The unit is specified in the Unified Code for Units of Measure (UCUM) standard. +- **Natural**: We feel that this is more natural for expressing durations with single letter units. +- **Backwards**: This unit has been used in the past in Netdata, and we support it for backwards compatibility. + +### Duration Expression Format + +Netdata allows users to express durations in both simple and complex formats. + +- **Simple Formats**: A duration can be specified using a number followed by a unit, such as `5m` (5 minutes), `2h` (2 hours), or `1d` (1 day). Fractional numbers are also supported, such as `1.5d`, `3.5mo` or `1.2y`. + +- **Complex Formats**: A duration can also be composed of multiple units added together. For example: + - `1y2mo3w4d` represents 1 year, 2 months, 3 weeks, and 4 days. + - `15d-12h` represents 15 days minus 12 hours (which equals 14 days and 12 hours). + +Each number given in durations can be either positive or negative. For example `1h15m` is 1 hour and 15 minutes, but `1h-15m` results to `45m`. + +The same unit can be given multiple times, so that `1d0.5d` is `1d12h` and `1d-0.5d` is `12h`. + +The order of units in the expressions is irrelevant, so that `1m2h3d` is the same to `3d2h1m`. + +The system will parse durations with spaces in them, but we suggest to write them down in compact form, without spaces. This is required, especially in alerts configuration, since spaces in durations will affect how parent expressions are tokenized. + +### Duration Rounding + +Netdata provides various functions to parse and round durations according to specific needs: + +- **Default Rounding to Seconds**: Most duration uses in Netdata are rounded to the nearest second. For example, a duration of `1.4s` would round to `1s`, while `1.5s` would round to `2s`. + +- **Rounding to Larger Units**: In some cases, such as database retention, durations are rounded to larger units like days. Even when rounding to a larger unit, durations can still be expressed in smaller units (e.g., `24h86400s` for `2d`). + +### Maximum and Minimum Duration Limits + +Netdata's duration expressions can handle durations ranging from the minimum possible value of `-INT64_MAX` to the maximum of `INT64_MAX` in nanoseconds. This range translates approximately to durations between -292 years to +292 years. + +### Inconsistencies in Duration Units + +While Netdata provides a flexible system for specifying durations, some inconsistencies arise due to the way different units are defined: + +- **1 Year (`y`) = 365 Days (`d`)**: In Netdata, a year is defined as 365 days. This is an approximation, since the average year is about 365.25 days. + +- **1 Month (`mo`) = 30 Days (`d`)**: Similarly, a month in Netdata is defined as 30 days, which is also an approximation. In reality, months vary in length (28 to 31 days). + +- **1 Quarter (`q`) = 3 Months (`mo`) = 90 Days (`d`)**: A quarter is defined as 3 months, or 90 days, which aligns with the approximation of each month being 30 days. + +These definitions can lead to some unexpected results when performing arithmetic with durations: + +**Example of Inconsistency**: + +`1y-1d` in Netdata calculates to `364d` but also as `12mo4d` because `1y = 365d` and `1mo = 30d`. This is inconsistent because `1y` is defined as `12mo5d` or `4q5d` (given the approximations above). + +### Negative Durations + +When the first letter of a duration expression is the minus character, Netdata parses the entire expression as positive and then it negates the result. for example: `-1m15s` is `-75s`, not `-45s`. To get `-45s` the expression should be `-1m-15s`. So the initial `-` is treated like `-(expression)`. + +The same rule is applied when generating duration expressions. + +### Example Duration Expressions + +Here are some examples of valid duration expressions: + +1. **`30s`**: 30 seconds. +2. **`5m`**: 5 minutes. +3. **`2h30m`**: 2 hours and 30 minutes. +4. **`1.5d`**: 1 day and 12 hours. +5. **`1w3d4h`**: 1 week, 3 days, and 4 hours. +6. **`1y2mo3d`**: 1 year, 2 months, and 3 days. +7. **`15d-12h`**: 14 days and 12 hours. diff --git a/src/libnetdata/parsers/entries.c b/src/libnetdata/parsers/entries.c new file mode 100644 index 000000000..d6ed31de1 --- /dev/null +++ b/src/libnetdata/parsers/entries.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "entries.h" + +// Define multipliers for base 10 (decimal) units +#define ENTRIES_MULTIPLIER_BASE10 1000ULL +#define ENTRIES_MULTIPLIER_K (ENTRIES_MULTIPLIER_BASE10) +#define ENTRIES_MULTIPLIER_M (ENTRIES_MULTIPLIER_K * ENTRIES_MULTIPLIER_BASE10) +#define ENTRIES_MULTIPLIER_G (ENTRIES_MULTIPLIER_M * ENTRIES_MULTIPLIER_BASE10) +#define ENTRIES_MULTIPLIER_T (ENTRIES_MULTIPLIER_G * ENTRIES_MULTIPLIER_BASE10) +#define ENTRIES_MULTIPLIER_P (ENTRIES_MULTIPLIER_T * ENTRIES_MULTIPLIER_BASE10) +#define ENTRIES_MULTIPLIER_E (ENTRIES_MULTIPLIER_P * ENTRIES_MULTIPLIER_BASE10) +#define ENTRIES_MULTIPLIER_Z (ENTRIES_MULTIPLIER_E * ENTRIES_MULTIPLIER_BASE10) +#define ENTRIES_MULTIPLIER_Y (ENTRIES_MULTIPLIER_Z * ENTRIES_MULTIPLIER_BASE10) + +// Define a structure to map size units to their multipliers +static const struct size_unit { + const char *unit; + const bool formatter; // true when this unit should be used when formatting to string + const uint64_t multiplier; +} entries_units[] = { + // the order of this table is important: smaller to bigger units! + + { .unit = "", .formatter = true, .multiplier = 1ULL }, + { .unit = "k", .formatter = false, .multiplier = ENTRIES_MULTIPLIER_K }, + { .unit = "K", .formatter = true, .multiplier = ENTRIES_MULTIPLIER_K }, + { .unit = "M", .formatter = true, .multiplier = ENTRIES_MULTIPLIER_M }, + { .unit = "G", .formatter = true, .multiplier = ENTRIES_MULTIPLIER_G }, + { .unit = "T", .formatter = true, .multiplier = ENTRIES_MULTIPLIER_T }, + { .unit = "P", .formatter = true, .multiplier = ENTRIES_MULTIPLIER_P }, + { .unit = "E", .formatter = true, .multiplier = ENTRIES_MULTIPLIER_E }, + { .unit = "Z", .formatter = true, .multiplier = ENTRIES_MULTIPLIER_Z }, + { .unit = "Y", .formatter = true, .multiplier = ENTRIES_MULTIPLIER_Y }, +}; + +static inline const struct size_unit *entries_find_unit(const char *unit) { + if (!unit || !*unit) unit = ""; + + for (size_t i = 0; i < sizeof(entries_units) / sizeof(entries_units[0]); i++) { + const struct size_unit *su = &entries_units[i]; + if ((uint8_t)unit[0] == (uint8_t)su->unit[0] && strcmp(unit, su->unit) == 0) + return su; + } + + return NULL; +} + +static inline double entries_round_to_resolution_dbl2(uint64_t value, uint64_t resolution) { + double converted = (double)value / (double)resolution; + return round(converted * 100.0) / 100.0; +} + +static inline uint64_t entries_round_to_resolution_int(uint64_t value, uint64_t resolution) { + return (value + (resolution / 2)) / resolution; +} + +// ------------------------------------------------------------------------------------------------------------------- +// parse a size string + +bool entries_parse(const char *entries_str, uint64_t *result, const char *default_unit) { + if (!entries_str || !*entries_str) { + *result = 0; + return false; + } + + const struct size_unit *su_def = entries_find_unit(default_unit); + if(!su_def) { + *result = 0; + return false; + } + + const char *s = entries_str; + + // Skip leading spaces + while (isspace((uint8_t)*s)) s++; + + if(strcmp(s, "off") == 0) { + *result = 0; + return true; + } + + // Parse the number + const char *number_start = s; + NETDATA_DOUBLE value = strtondd(s, (char **)&s); + + // If no valid number found, return false + if (s == number_start || value < 0) { + *result = 0; + return false; + } + + // Skip spaces between number and unit + while (isspace((uint8_t)*s)) s++; + + const char *unit_start = s; + while (isalpha((uint8_t)*s)) s++; + + char unit[4]; + size_t unit_len = s - unit_start; + const struct size_unit *su; + if (unit_len == 0) + su = su_def; + else { + if (unit_len >= sizeof(unit)) unit_len = sizeof(unit) - 1; + strncpy(unit, unit_start, unit_len); + unit[unit_len] = '\0'; + su = entries_find_unit(unit); + if (!su) { + *result = 0; + return false; + } + } + + uint64_t bytes = (uint64_t)round(value * (NETDATA_DOUBLE)su->multiplier); + *result = entries_round_to_resolution_int(bytes, su_def->multiplier); + + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- +// generate a string to represent a size + +ssize_t entries_snprintf(char *dst, size_t dst_size, uint64_t value, const char *unit, bool accurate) { + if (!dst || dst_size == 0) return -1; + if (dst_size == 1) { + dst[0] = '\0'; + return -2; + } + + if (value == 0) + return snprintfz(dst, dst_size, "off"); + + const struct size_unit *su_def = entries_find_unit(unit); + if(!su_def) return -3; + + // use the units multiplier to find the units + uint64_t bytes = value * su_def->multiplier; + + // Find the best unit to represent the size with up to 2 fractional digits + const struct size_unit *su_best = su_def; + for (size_t i = 0; i < sizeof(entries_units) / sizeof(entries_units[0]); i++) { + const struct size_unit *su = &entries_units[i]; + if (su->multiplier < su_def->multiplier || // the multiplier is too small + (!su->formatter && su != su_def) || // it is not to be used in formatting (except our unit) + (bytes < su->multiplier && su != su_def) ) // the converted value will be <1.0 + continue; + + double converted = entries_round_to_resolution_dbl2(bytes, su->multiplier); + + uint64_t reversed_bytes = (uint64_t)(converted * (double)su->multiplier); + + if(accurate) { + // no precision loss is required + if (reversed_bytes == bytes) + // no precision loss, this is good to use + su_best = su; + } + else { + if(converted > 1.0) + su_best = su; + } + } + + double converted = entries_round_to_resolution_dbl2(bytes, su_best->multiplier); + + // print it either with 0, 1 or 2 fractional digits + int written; + if(converted == (double)((uint64_t)converted)) + written = snprintfz(dst, dst_size, "%.0f%s", converted, su_best->unit); + else if(converted * 10.0 == (double)((uint64_t)(converted * 10.0))) + written = snprintfz(dst, dst_size, "%.1f%s", converted, su_best->unit); + else + written = snprintfz(dst, dst_size, "%.2f%s", converted, su_best->unit); + + if (written < 0) + return -4; + + if ((size_t)written >= dst_size) + return (ssize_t)(dst_size - 1); + + return written; +} + diff --git a/src/libnetdata/parsers/entries.h b/src/libnetdata/parsers/entries.h new file mode 100644 index 000000000..a90b8f6f4 --- /dev/null +++ b/src/libnetdata/parsers/entries.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef LIBNETDATA_PARSERS_ENTRIES_H +#define LIBNETDATA_PARSERS_ENTRIES_H + +#include "parsers.h" + +bool entries_parse(const char *entries_str, uint64_t *result, const char *default_unit); +#define entries_parse_k(size_str, kb) size_parse(size_str, kb, "K") +#define entries_parse_m(size_str, mb) size_parse(size_str, mb, "M") +#define entries_parse_g(size_str, gb) size_parse(size_str, gb, "G") + +ssize_t entries_snprintf(char *dst, size_t dst_size, uint64_t value, const char *unit, bool accurate); +#define entries_snprintf_n(dst, dst_size, value) size_snprintf(dst, dst_size, value, "", true) +#define entries_snprintf_k(dst, dst_size, value) size_snprintf(dst, dst_size, value, "K", true) +#define entries_snprintf_m(dst, dst_size, value) size_snprintf(dst, dst_size, value, "M", true) +#define entries_snprintf_g(dst, dst_size, value) size_snprintf(dst, dst_size, value, "G", true) + +#endif //LIBNETDATA_PARSERS_ENTRIES_H diff --git a/src/libnetdata/parsers/parsers.h b/src/libnetdata/parsers/parsers.h new file mode 100644 index 000000000..27b60b040 --- /dev/null +++ b/src/libnetdata/parsers/parsers.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PARSERS_H +#define NETDATA_PARSERS_H + +#include "../libnetdata.h" +#include "size.h" +#include "entries.h" +#include "duration.h" +#include "timeframe.h" + +#endif //NETDATA_PARSERS_H diff --git a/src/libnetdata/parsers/size.c b/src/libnetdata/parsers/size.c new file mode 100644 index 000000000..d3a24c540 --- /dev/null +++ b/src/libnetdata/parsers/size.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "size.h" + +// Define multipliers for base 2 (binary) units +#define SIZE_MULTIPLIER_BASE2 1024ULL +#define SIZE_MULTIPLIER_KiB (SIZE_MULTIPLIER_BASE2) +#define SIZE_MULTIPLIER_MiB (SIZE_MULTIPLIER_KiB * SIZE_MULTIPLIER_BASE2) +#define SIZE_MULTIPLIER_GiB (SIZE_MULTIPLIER_MiB * SIZE_MULTIPLIER_BASE2) +#define SIZE_MULTIPLIER_TiB (SIZE_MULTIPLIER_GiB * SIZE_MULTIPLIER_BASE2) +#define SIZE_MULTIPLIER_PiB (SIZE_MULTIPLIER_TiB * SIZE_MULTIPLIER_BASE2) +//#define SIZE_MULTIPLIER_EiB (SIZE_MULTIPLIER_PiB * SIZE_MULTIPLIER_BASE2) +//#define SIZE_MULTIPLIER_ZiB (SIZE_MULTIPLIER_EiB * SIZE_MULTIPLIER_BASE2) +//#define SIZE_MULTIPLIER_YiB (SIZE_MULTIPLIER_ZiB * SIZE_MULTIPLIER_BASE2) + +// Define multipliers for base 10 (decimal) units +#define SIZE_MULTIPLIER_BASE10 1000ULL +#define SIZE_MULTIPLIER_K (SIZE_MULTIPLIER_BASE10) +#define SIZE_MULTIPLIER_M (SIZE_MULTIPLIER_K * SIZE_MULTIPLIER_BASE10) +#define SIZE_MULTIPLIER_G (SIZE_MULTIPLIER_M * SIZE_MULTIPLIER_BASE10) +#define SIZE_MULTIPLIER_T (SIZE_MULTIPLIER_G * SIZE_MULTIPLIER_BASE10) +#define SIZE_MULTIPLIER_P (SIZE_MULTIPLIER_T * SIZE_MULTIPLIER_BASE10) +//#define SIZE_MULTIPLIER_E (SIZE_MULTIPLIER_P * SIZE_MULTIPLIER_BASE10) +//#define SIZE_MULTIPLIER_Z (SIZE_MULTIPLIER_E * SIZE_MULTIPLIER_BASE10) +//#define SIZE_MULTIPLIER_Y (SIZE_MULTIPLIER_Z * SIZE_MULTIPLIER_BASE10) + +// Define a structure to map size units to their multipliers +static const struct size_unit { + const char *unit; + const uint8_t base; + const bool formatter; // true when this unit should be used when formatting to string + const uint64_t multiplier; +} size_units[] = { + // the order of this table is important: smaller to bigger units! + + { .unit = "B", .base = 2, .formatter = true, .multiplier = 1ULL }, + { .unit = "k", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_K }, + { .unit = "K", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_K }, + { .unit = "KB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_K }, + { .unit = "KiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_KiB }, + { .unit = "M", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_M }, + { .unit = "MB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_M }, + { .unit = "MiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_MiB }, + { .unit = "G", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_G }, + { .unit = "GB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_G }, + { .unit = "GiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_GiB }, + { .unit = "T", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_T }, + { .unit = "TB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_T }, + { .unit = "TiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_TiB }, + { .unit = "P", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_P }, + { .unit = "PB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_P }, + { .unit = "PiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_PiB }, +// { .unit = "E", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_E }, +// { .unit = "EB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_E }, +// { .unit = "EiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_EiB }, +// { .unit = "Z", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_Z }, +// { .unit = "ZB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_Z }, +// { .unit = "ZiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_ZiB }, +// { .unit = "Y", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_Y }, +// { .unit = "YB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_Y }, +// { .unit = "YiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_YiB }, +}; + +static inline const struct size_unit *size_find_unit(const char *unit) { + if (!unit || !*unit) unit = "B"; + + for (size_t i = 0; i < sizeof(size_units) / sizeof(size_units[0]); i++) { + const struct size_unit *su = &size_units[i]; + if ((uint8_t)unit[0] == (uint8_t)su->unit[0] && strcmp(unit, su->unit) == 0) + return su; + } + + return NULL; +} + +static inline double size_round_to_resolution_dbl2(uint64_t value, uint64_t resolution) { + double converted = (double)value / (double)resolution; + return round(converted * 100.0) / 100.0; +} + +static inline uint64_t size_round_to_resolution_int(uint64_t value, uint64_t resolution) { + return (value + (resolution / 2)) / resolution; +} + +// ------------------------------------------------------------------------------------------------------------------- +// parse a size string + +bool size_parse(const char *size_str, uint64_t *result, const char *default_unit) { + if (!size_str || !*size_str) { + *result = 0; + return false; + } + + const struct size_unit *su_def = size_find_unit(default_unit); + if(!su_def) { + *result = 0; + return false; + } + + const char *s = size_str; + + // Skip leading spaces + while (isspace((uint8_t)*s)) s++; + + if(strcmp(s, "off") == 0) { + *result = 0; + return true; + } + + // Parse the number + const char *number_start = s; + NETDATA_DOUBLE value = strtondd(s, (char **)&s); + + // If no valid number found, return false + if (s == number_start || value < 0) { + *result = 0; + return false; + } + + // Skip spaces between number and unit + while (isspace((uint8_t)*s)) s++; + + const char *unit_start = s; + while (isalpha((uint8_t)*s)) s++; + + char unit[4]; + size_t unit_len = s - unit_start; + const struct size_unit *su; + if (unit_len == 0) + su = su_def; + else { + if (unit_len >= sizeof(unit)) unit_len = sizeof(unit) - 1; + strncpy(unit, unit_start, unit_len); + unit[unit_len] = '\0'; + su = size_find_unit(unit); + if (!su) { + *result = 0; + return false; + } + } + + uint64_t bytes = (uint64_t)round(value * (NETDATA_DOUBLE)su->multiplier); + *result = size_round_to_resolution_int(bytes, su_def->multiplier); + + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- +// generate a string to represent a size + +ssize_t size_snprintf(char *dst, size_t dst_size, uint64_t value, const char *unit, bool accurate) { + if (!dst || dst_size == 0) return -1; + if (dst_size == 1) { + dst[0] = '\0'; + return -2; + } + + if (value == 0) + return snprintfz(dst, dst_size, "off"); + + const struct size_unit *su_def = size_find_unit(unit); + if(!su_def) return -3; + + // use the units multiplier to find the units + uint64_t bytes = value * su_def->multiplier; + + // Find the best unit to represent the size with up to 2 fractional digits + const struct size_unit *su_best = su_def; + for (size_t i = 0; i < sizeof(size_units) / sizeof(size_units[0]); i++) { + const struct size_unit *su = &size_units[i]; + if (su->base != su_def->base || // not the right base + su->multiplier < su_def->multiplier || // the multiplier is too small + (!su->formatter && su != su_def) || // it is not to be used in formatting (except our unit) + (bytes < su->multiplier && su != su_def) ) // the converted value will be <1.0 + continue; + + double converted = size_round_to_resolution_dbl2(bytes, su->multiplier); + + uint64_t reversed_bytes = (uint64_t)(converted * (double)su->multiplier); + + if(accurate) { + // no precision loss is required + if (reversed_bytes == bytes) + // no precision loss, this is good to use + su_best = su; + } + else { + if(converted > 1.0) + su_best = su; + } + } + + double converted = size_round_to_resolution_dbl2(bytes, su_best->multiplier); + + // print it either with 0, 1 or 2 fractional digits + int written; + if(converted == (double)((uint64_t)converted)) + written = snprintfz(dst, dst_size, "%.0f%s", converted, su_best->unit); + else if(converted * 10.0 == (double)((uint64_t)(converted * 10.0))) + written = snprintfz(dst, dst_size, "%.1f%s", converted, su_best->unit); + else + written = snprintfz(dst, dst_size, "%.2f%s", converted, su_best->unit); + + if (written < 0) + return -4; + + if ((size_t)written >= dst_size) + return (ssize_t)(dst_size - 1); + + return written; +} + diff --git a/src/libnetdata/parsers/size.h b/src/libnetdata/parsers/size.h new file mode 100644 index 000000000..6abfe7235 --- /dev/null +++ b/src/libnetdata/parsers/size.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef LIBNETDATA_PARSERS_SIZE_H +#define LIBNETDATA_PARSERS_SIZE_H + +#include "parsers.h" + +bool size_parse(const char *size_str, uint64_t *result, const char *default_unit); +#define size_parse_bytes(size_str, bytes) size_parse(size_str, bytes, "B") +#define size_parse_kb(size_str, kb) size_parse(size_str, kb, "KiB") +#define size_parse_mb(size_str, mb) size_parse(size_str, mb, "MiB") +#define size_parse_gb(size_str, gb) size_parse(size_str, gb, "GiB") + +ssize_t size_snprintf(char *dst, size_t dst_size, uint64_t value, const char *unit, bool accurate); +#define size_snprintf_bytes(dst, dst_size, value) size_snprintf(dst, dst_size, value, "B", true) +#define size_snprintf_kb(dst, dst_size, value) size_snprintf(dst, dst_size, value, "KiB", true) +#define size_snprintf_mb(dst, dst_size, value) size_snprintf(dst, dst_size, value, "MiB", true) +#define size_snprintf_gb(dst, dst_size, value) size_snprintf(dst, dst_size, value, "GiB", true) + +#endif //LIBNETDATA_PARSERS_SIZE_H diff --git a/src/libnetdata/parsers/sizes.md b/src/libnetdata/parsers/sizes.md new file mode 100644 index 000000000..ac9e09053 --- /dev/null +++ b/src/libnetdata/parsers/sizes.md @@ -0,0 +1,52 @@ +## Data Sizes in Netdata + +Netdata provides a flexible system for specifying and formatting data sizes, used in various configurations and operations such as disk space management, and memory usage. This system allows users to specify data sizes in a human-readable format using multiple units from bytes to terabytes, supporting both binary (base-2) and decimal (base-10) standards. All units are UCUM-based for consistency and clarity. + +### Supported Size Units + +The following table lists all supported units and their corresponding values: + +| Symbol | Description | Value | Base | Formatter | +|:------:|:-----------:|:---------:|:-------:|:---------:| +| `B` | Bytes | `1B` | - | **Yes** | +| `k` | Kilobytes | `1000B` | Base-10 | No | +| `K` | Kilobytes | `1000B` | Base-10 | No | +| `KB` | Kilobytes | `1000B` | Base-10 | No | +| `KiB` | Kibibytes | `1024B` | Base-2 | **Yes** | +| `M` | Megabytes | `1000K` | Base-10 | No | +| `MB` | Megabytes | `1000K` | Base-10 | No | +| `MiB` | Mebibytes | `1024KiB` | Base-2 | **Yes** | +| `G` | Gigabytes | `1000M` | Base-10 | No | +| `GB` | Gigabytes | `1000M` | Base-10 | No | +| `GiB` | Gibibytes | `1024MiB` | Base-2 | **Yes** | +| `T` | Terabytes | `1000G` | Base-10 | No | +| `TB` | Terabytes | `1000G` | Base-10 | No | +| `TiB` | Tebibytes | `1024GiB` | Base-2 | **Yes** | +| `P` | Petabytes | `1000T` | Base-10 | No | +| `PB` | Petabytes | `1000T` | Base-10 | No | +| `PiB` | Pebibytes | `1024TiB` | Base-2 | **Yes** | + +### Size Expression Format + +Netdata allows users to express sizes using a number followed by a unit, such as `500MiB` (500 Mebibytes), `1GB` (1 Gigabyte), or `256K` (256 Kilobytes). + +- **Case Sensitivity**: Note that the parsing of units is case-sensitive. + +### Size Formatting + +Netdata formats a numeric size value (in bytes) into a human-readable string with an appropriate unit. The formatter's goal is to select the largest unit that can represent the size exactly, using up to two fractional digits. If two fractional digits are not enough to precisely represent the byte count, the formatter will use a smaller unit until it can accurately express the size, eventually falling back to bytes (`B`) if necessary. + +When formatting, Netdata prefers Base-2 units (`KiB`, `MiB`, `GiB`, etc.). + +- **Examples of Size Formatting**: + - **10,485,760 bytes** is formatted as `10MiB` (10 Mebibytes). + - **1,024 bytes** is formatted as `1KiB` (1 Kibibyte). + - **1,500 bytes** remains formatted as `1500B` because it cannot be precisely represented in `KiB` or any larger unit using up to two fractional digits. + +### Example Size Expressions + +Here are some examples of valid size expressions: + +1. `1024B`: 1024 bytes. +2. `1KiB`: 1024 bytes. +3. `5MiB`: 5 mebibytes (5 * 1024 * 1024 bytes). diff --git a/src/libnetdata/parsers/timeframe.c b/src/libnetdata/parsers/timeframe.c new file mode 100644 index 000000000..33ea69750 --- /dev/null +++ b/src/libnetdata/parsers/timeframe.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "timeframe.h" + +// -------------------------------------------------------------------------------------------------------------------- +// timeframe +/* +TIMEFRAME timeframe_parse(const char *txt) { + if(!txt || !*txt) + return TIMEFRAME_INVALID; + +char buf[strlen(txt) + 1]; +memcpy(buf, txt, strlen(txt) + 1); +char *s = trim_all(buf); +if(!s) + return TIMEFRAME_INVALID; + +while(isspace(*s)) s++; + +if(strcasecmp(s, "this minute") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_THIS_MINUTE, + .before = 0, + }; +} +if(strcasecmp(s, "this hour") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_THIS_HOUR, + .before = 0, + }; +} +if(strcasecmp(s, "today") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_TODAY, + .before = 0, + }; +} +if(strcasecmp(s, "this week") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_THIS_WEEK, + .before = 0, + }; +} +if(strcasecmp(s, "this month") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_THIS_MONTH, + .before = 0, + }; +} +if(strcasecmp(s, "this year") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_THIS_YEAR, + .before = 0, + }; +} + +if(strcasecmp(s, "last minute") == 0) { + return (TIMEFRAME) { + .after = -60, + .before = API_RELATIVE_TIME_THIS_MINUTE, + }; +} +if(strcasecmp(s, "last hour") == 0) { + return (TIMEFRAME) { + .after = -3600, + .before = API_RELATIVE_TIME_THIS_HOUR, + }; +} +if(strcasecmp(s, "yesterday") == 0) { + return (TIMEFRAME) { + .after = -86400, + .before = API_RELATIVE_TIME_TODAY, + }; +} +if(strcasecmp(s, "this week") == 0) { + return (TIMEFRAME) { + .after = -86400 * 7, + .before = API_RELATIVE_TIME_THIS_WEEK, + }; +} +if(strcasecmp(s, "this month") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_LAST_MONTH, + .before = API_RELATIVE_TIME_THIS_MONTH, + }; +} +if(strcasecmp(s, "this year") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_LAST_YEAR, + .before = API_RELATIVE_TIME_THIS_YEAR, + }; +} + +const char *end; +double after = strtondd(s, (char **)&end); + +if(end == s) + return TIMEFRAME_INVALID; + +s = end; +while(isspace(*s)) s++; + +time_t multiplier = 1; +if(!isdigit(*s) && *s != '-') { + // after has units + bool found = false; + + for (size_t i = 0; i < sizeof(units) / sizeof(units[0]); i++) { + size_t len = strlen(units[i].unit); + + if (units[i].multiplier >= 1 * NSEC_PER_USEC && + strncmp(s, units[i].unit, len) == 0 && + (isspace(s[len]) || s[len] == '-')) { + multiplier = units[i].multiplier / NSEC_PER_SEC; + found = true; + s += len; + } + } + + if(!found) + return TIMEFRAME_INVALID; +} + +const char *dash = strchr(s, '-'); +if(!dash) return TIMEFRAME_INVALID; + +} +*/ diff --git a/src/libnetdata/parsers/timeframe.h b/src/libnetdata/parsers/timeframe.h new file mode 100644 index 000000000..a176dd30a --- /dev/null +++ b/src/libnetdata/parsers/timeframe.h @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_TIMEFRAME_H +#define NETDATA_TIMEFRAME_H + +#include "parsers.h" + +typedef struct { + time_t after; + time_t before; +} TIMEFRAME; + +#define API_RELATIVE_TIME_MAX (3 * 365 * 86400) + +#define API_RELATIVE_TIME_INVALID (-1000000000) + +#define API_RELATIVE_TIME_THIS_MINUTE (API_RELATIVE_TIME_INVALID - 1) // this minute at 00 seconds +#define API_RELATIVE_TIME_THIS_HOUR (API_RELATIVE_TIME_INVALID - 2) // this hour at 00 minutes, 00 seconds +#define API_RELATIVE_TIME_TODAY (API_RELATIVE_TIME_INVALID - 3) // today at 00:00:00 +#define API_RELATIVE_TIME_THIS_WEEK (API_RELATIVE_TIME_INVALID - 4) // this Monday, 00:00:00 +#define API_RELATIVE_TIME_THIS_MONTH (API_RELATIVE_TIME_INVALID - 5) // this month's 1st at 00:00:00 +#define API_RELATIVE_TIME_THIS_YEAR (API_RELATIVE_TIME_INVALID - 6) // this year's Jan 1st, at 00:00:00 +#define API_RELATIVE_TIME_LAST_MONTH (API_RELATIVE_TIME_INVALID - 7) // last month's 1st, at 00:00:00 +#define API_RELATIVE_TIME_LAST_YEAR (API_RELATIVE_TIME_INVALID - 8) // last year's Jan 1st, at 00:00:00 + +#define TIMEFRAME_INVALID (TIMEFRAME){ .after = API_RELATIVE_TIME_INVALID, .before = API_RELATIVE_TIME_INVALID } + +#endif //NETDATA_TIMEFRAME_H diff --git a/src/libnetdata/paths/paths.c b/src/libnetdata/paths/paths.c new file mode 100644 index 000000000..c68ee805f --- /dev/null +++ b/src/libnetdata/paths/paths.c @@ -0,0 +1,327 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "paths.h" + +static int is_procfs(const char *path, char **reason) { +#if defined(__APPLE__) || defined(__FreeBSD__) + (void)path; + (void)reason; +#else + struct statfs stat; + + if (statfs(path, &stat) == -1) { + if (reason) + *reason = "failed to statfs()"; + return -1; + } + +#if defined PROC_SUPER_MAGIC + if (stat.f_type != PROC_SUPER_MAGIC) { + if (reason) + *reason = "type is not procfs"; + return -1; + } +#endif + +#endif + + return 0; +} + +static int is_sysfs(const char *path, char **reason) { +#if defined(__APPLE__) || defined(__FreeBSD__) + (void)path; + (void)reason; +#else + struct statfs stat; + + if (statfs(path, &stat) == -1) { + if (reason) + *reason = "failed to statfs()"; + return -1; + } + +#if defined SYSFS_MAGIC + if (stat.f_type != SYSFS_MAGIC) { + if (reason) + *reason = "type is not sysfs"; + return -1; + } +#endif + +#endif + + return 0; +} + +int verify_netdata_host_prefix(bool log_msg) { + if(!netdata_configured_host_prefix) + netdata_configured_host_prefix = ""; + + if(!*netdata_configured_host_prefix) + return 0; + + char path[FILENAME_MAX]; + char *reason = "unknown reason"; + errno_clear(); + + strncpyz(path, netdata_configured_host_prefix, sizeof(path) - 1); + + struct stat sb; + if (stat(path, &sb) == -1) { + reason = "failed to stat()"; + goto failed; + } + + if((sb.st_mode & S_IFMT) != S_IFDIR) { + errno = EINVAL; + reason = "is not a directory"; + goto failed; + } + + snprintfz(path, sizeof(path), "%s/proc", netdata_configured_host_prefix); + if(is_procfs(path, &reason) == -1) + goto failed; + + snprintfz(path, sizeof(path), "%s/sys", netdata_configured_host_prefix); + if(is_sysfs(path, &reason) == -1) + goto failed; + + if (netdata_configured_host_prefix && *netdata_configured_host_prefix) { + if (log_msg) + netdata_log_info("Using host prefix directory '%s'", netdata_configured_host_prefix); + } + + return 0; + +failed: + if (log_msg) + netdata_log_error("Ignoring host prefix '%s': path '%s' %s", netdata_configured_host_prefix, path, reason); + + netdata_configured_host_prefix = ""; + return -1; +} + +size_t filename_from_path_entry(char out[FILENAME_MAX], const char *path, const char *entry, const char *extension) { + if(unlikely(!path || !*path)) path = "."; + if(unlikely(!entry)) entry = ""; + + // skip trailing slashes in path + size_t len = strlen(path); + while(len > 0 && path[len - 1] == '/') len--; + + // skip leading slashes in subpath + while(entry[0] == '/') entry++; + + // if the last character in path is / and (there is a subpath or path is now empty) + // keep the trailing slash in path and remove the additional slash + char *slash = "/"; + if(path[len] == '/' && (*entry || len == 0)) { + slash = ""; + len++; + } + else if(!*entry) { + // there is no entry + // no need for trailing slash + slash = ""; + } + + return snprintfz(out, FILENAME_MAX, "%.*s%s%s%s%s", (int)len, path, slash, entry, + extension && *extension ? "." : "", + extension && *extension ? extension : ""); +} + +char *filename_from_path_entry_strdupz(const char *path, const char *entry) { + char filename[FILENAME_MAX]; + filename_from_path_entry(filename, path, entry, NULL); + return strdupz(filename); +} + +bool filename_is_dir(const char *filename, bool create_it) { + CLEAN_CHAR_P *buffer = NULL; + + size_t max_links = 100; + + bool is_dir = false; + struct stat st; + while(max_links && stat(filename, &st) == 0) { + if ((st.st_mode & S_IFMT) == S_IFDIR) + is_dir = true; + else if ((st.st_mode & S_IFMT) == S_IFLNK) { + max_links--; + + if(!buffer) + buffer = mallocz(FILENAME_MAX); + + char link_dst[FILENAME_MAX]; + ssize_t l = readlink(filename, link_dst, FILENAME_MAX - 1); + if (l > 0) { + link_dst[l] = '\0'; + strncpyz(buffer, link_dst, FILENAME_MAX - 1); + filename = buffer; + continue; + } + } + + break; + } + + if(!is_dir && create_it && max_links == 100 && mkdir(filename, 0750) == 0) + is_dir = true; + + return is_dir; +} + +bool path_entry_is_dir(const char *path, const char *entry, bool create_it) { + char filename[FILENAME_MAX]; + filename_from_path_entry(filename, path, entry, NULL); + return filename_is_dir(filename, create_it); +} + +bool filename_is_file(const char *filename) { + CLEAN_CHAR_P *buffer = NULL; + + size_t max_links = 100; + + bool is_file = false; + struct stat st; + while(max_links && stat(filename, &st) == 0) { + if((st.st_mode & S_IFMT) == S_IFREG) + is_file = true; + else if((st.st_mode & S_IFMT) == S_IFLNK) { + max_links--; + + if(!buffer) + buffer = mallocz(FILENAME_MAX); + + char link_dst[FILENAME_MAX]; + ssize_t l = readlink(filename, link_dst, FILENAME_MAX - 1); + if(l > 0) { + link_dst[l] = '\0'; + strncpyz(buffer, link_dst, FILENAME_MAX - 1); + filename = buffer; + continue; + } + } + + break; + } + + return is_file; +} + +bool path_entry_is_file(const char *path, const char *entry) { + char filename[FILENAME_MAX]; + filename_from_path_entry(filename, path, entry, NULL); + return filename_is_file(filename); +} + +void recursive_config_double_dir_load(const char *user_path, const char *stock_path, const char *entry, int (*callback)(const char *filename, void *data, bool stock_config), void *data, size_t depth) { + if(depth > 3) { + netdata_log_error("CONFIG: Max directory depth reached while reading user path '%s', stock path '%s', subpath '%s'", user_path, stock_path, + entry); + return; + } + + if(!stock_path) + stock_path = user_path; + + char *udir = filename_from_path_entry_strdupz(user_path, entry); + char *sdir = filename_from_path_entry_strdupz(stock_path, entry); + + netdata_log_debug(D_HEALTH, "CONFIG traversing user-config directory '%s', stock config directory '%s'", udir, sdir); + + DIR *dir = opendir(udir); + if (!dir) { + netdata_log_error("CONFIG cannot open user-config directory '%s'.", udir); + } + else { + struct dirent *de = NULL; + while((de = readdir(dir))) { + if(de->d_type == DT_DIR || de->d_type == DT_LNK) { + if( !de->d_name[0] || + (de->d_name[0] == '.' && de->d_name[1] == '\0') || + (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') + ) { + netdata_log_debug(D_HEALTH, "CONFIG ignoring user-config directory '%s/%s'", udir, de->d_name); + continue; + } + + if(path_entry_is_dir(udir, de->d_name, false)) { + recursive_config_double_dir_load(udir, sdir, de->d_name, callback, data, depth + 1); + continue; + } + } + + if(de->d_type == DT_UNKNOWN || de->d_type == DT_REG || de->d_type == DT_LNK) { + size_t len = strlen(de->d_name); + if(path_entry_is_file(udir, de->d_name) && + len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { + char *filename = filename_from_path_entry_strdupz(udir, de->d_name); + netdata_log_debug(D_HEALTH, "CONFIG calling callback for user file '%s'", filename); + callback(filename, data, false); + freez(filename); + continue; + } + } + + netdata_log_debug(D_HEALTH, "CONFIG ignoring user-config file '%s/%s' of type %d", udir, de->d_name, (int)de->d_type); + } + + closedir(dir); + } + + netdata_log_debug(D_HEALTH, "CONFIG traversing stock config directory '%s', user config directory '%s'", sdir, udir); + + dir = opendir(sdir); + if (!dir) { + netdata_log_error("CONFIG cannot open stock config directory '%s'.", sdir); + } + else { + if (strcmp(udir, sdir)) { + struct dirent *de = NULL; + while((de = readdir(dir))) { + if(de->d_type == DT_DIR || de->d_type == DT_LNK) { + if( !de->d_name[0] || + (de->d_name[0] == '.' && de->d_name[1] == '\0') || + (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') + ) { + netdata_log_debug(D_HEALTH, "CONFIG ignoring stock config directory '%s/%s'", sdir, de->d_name); + continue; + } + + if(path_entry_is_dir(sdir, de->d_name, false)) { + // we recurse in stock subdirectory, only when there is no corresponding + // user subdirectory - to avoid reading the files twice + + if(!path_entry_is_dir(udir, de->d_name, false)) + recursive_config_double_dir_load(udir, sdir, de->d_name, callback, data, depth + 1); + + continue; + } + } + + if(de->d_type == DT_UNKNOWN || de->d_type == DT_REG || de->d_type == DT_LNK) { + size_t len = strlen(de->d_name); + if(path_entry_is_file(sdir, de->d_name) && !path_entry_is_file(udir, de->d_name) && + len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { + char *filename = filename_from_path_entry_strdupz(sdir, de->d_name); + netdata_log_debug(D_HEALTH, "CONFIG calling callback for stock file '%s'", filename); + callback(filename, data, true); + freez(filename); + continue; + } + + } + + netdata_log_debug(D_HEALTH, "CONFIG ignoring stock-config file '%s/%s' of type %d", udir, de->d_name, (int)de->d_type); + } + } + closedir(dir); + } + + netdata_log_debug(D_HEALTH, "CONFIG done traversing user-config directory '%s', stock config directory '%s'", udir, sdir); + + freez(udir); + freez(sdir); +} diff --git a/src/libnetdata/paths/paths.h b/src/libnetdata/paths/paths.h new file mode 100644 index 000000000..9c5a8a748 --- /dev/null +++ b/src/libnetdata/paths/paths.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PATHS_H +#define NETDATA_PATHS_H + +#include "../libnetdata.h" + +size_t filename_from_path_entry(char out[FILENAME_MAX], const char *path, const char *entry, const char *extension); +char *filename_from_path_entry_strdupz(const char *path, const char *entry); + +bool filename_is_file(const char *filename); +bool filename_is_dir(const char *filename, bool create_it); + +bool path_entry_is_file(const char *path, const char *entry); +bool path_entry_is_dir(const char *path, const char *entry, bool create_it); + +void recursive_config_double_dir_load( + const char *user_path + , const char *stock_path + , const char *entry + , int (*callback)(const char *filename, void *data, bool stock_config) + , void *data + , size_t depth +); + +#endif //NETDATA_PATHS_H diff --git a/src/libnetdata/procfile/README.md b/src/libnetdata/procfile/README.md index 9e737a511..faa00c6a2 100644 --- a/src/libnetdata/procfile/README.md +++ b/src/libnetdata/procfile/README.md @@ -1,12 +1,3 @@ - - # PROCFILE procfile is a library for reading text data files (i.e `/proc` files) in the fastest possible way. diff --git a/src/libnetdata/procfile/procfile.c b/src/libnetdata/procfile/procfile.c index 2b7eeeb56..fb6b0f8c3 100644 --- a/src/libnetdata/procfile/procfile.c +++ b/src/libnetdata/procfile/procfile.c @@ -10,14 +10,23 @@ int procfile_open_flags = O_RDONLY | O_CLOEXEC; -int procfile_adaptive_initial_allocation = 0; - // if adaptive allocation is set, these store the // max values we have seen so far -size_t procfile_max_lines = PFLINES_INCREASE_STEP; -size_t procfile_max_words = PFWORDS_INCREASE_STEP; -size_t procfile_max_allocation = PROCFILE_INCREMENT_BUFFER; - +static bool procfile_adaptive_initial_allocation = false; +static size_t procfile_max_lines = PFLINES_INCREASE_STEP; +static size_t procfile_max_words = PFWORDS_INCREASE_STEP; +static size_t procfile_max_allocation = PROCFILE_INCREMENT_BUFFER; + +void procfile_set_adaptive_allocation(bool enable, size_t bytes, size_t lines, size_t words) { + procfile_adaptive_initial_allocation = enable; + + if(bytes > procfile_max_allocation) + procfile_max_allocation = bytes; + if(lines > procfile_max_lines) + procfile_max_lines = lines; + if(words > procfile_max_words) + procfile_max_words = words; +} // ---------------------------------------------------------------------------- @@ -59,6 +68,8 @@ static inline void procfile_words_add(procfile *ff, char *str) { ff->words = fw = reallocz(fw, sizeof(pfwords) + (fw->size + wanted) * sizeof(char *)); fw->size += wanted; + ff->stats.memory += wanted * sizeof(char *); + ff->stats.resizes++; } fw->words[fw->len++] = str; @@ -92,7 +103,7 @@ static inline void procfile_words_free(pfwords *fw) { // An array of lines NEVERNULL -static inline size_t *procfile_lines_add(procfile *ff) { +static inline uint32_t *procfile_lines_add(procfile *ff) { // netdata_log_debug(D_PROCFILE, PF_PREFIX ": adding line %d at word %d", fl->len, first_word); pflines *fl = ff->lines; @@ -104,6 +115,8 @@ static inline size_t *procfile_lines_add(procfile *ff) { ff->lines = fl = reallocz(fl, sizeof(pflines) + (fl->size + wanted) * sizeof(ffline)); fl->size += wanted; + ff->stats.memory += wanted * sizeof(ffline); + ff->stats.resizes++; } ffline *ffl = &fl->lines[fl->len++]; @@ -168,7 +181,7 @@ static void procfile_parser(procfile *ff) { char quote = 0; // the quote character - only when in quoted string size_t opened = 0; // counts the number of open parenthesis - size_t *line_words = procfile_lines_add(ff); + uint32_t *line_words = procfile_lines_add(ff); while(s < e) { PF_CHAR_TYPE ct = separators[(unsigned char)(*s)]; @@ -230,8 +243,12 @@ static void procfile_parser(procfile *ff) { } else if(likely(ct == PF_CHAR_IS_OPEN)) { if(s == t) { + if(!opened) + t = ++s; + else + ++s; + opened++; - t = ++s; } else if(opened) { opened++; @@ -275,6 +292,8 @@ static void procfile_parser(procfile *ff) { } procfile *procfile_readall(procfile *ff) { + if(!ff) return NULL; + // netdata_log_debug(D_PROCFILE, PF_PREFIX ": Reading file '%s'.", ff->filename); ff->len = 0; // zero the used size @@ -291,9 +310,12 @@ procfile *procfile_readall(procfile *ff) { netdata_log_debug(D_PROCFILE, PF_PREFIX ": Expanding data buffer for file '%s' by %zu bytes.", procfile_filename(ff), wanted); ff = reallocz(ff, sizeof(procfile) + ff->size + wanted); ff->size += wanted; + ff->stats.memory += wanted; + ff->stats.resizes++; } - netdata_log_debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s)); + // netdata_log_info("Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s)); + ff->stats.reads++; r = read(ff->fd, &ff->data[s], ff->size - s); if(unlikely(r == -1)) { if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) collector_error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd); @@ -303,6 +325,9 @@ procfile *procfile_readall(procfile *ff) { return NULL; } + if((ssize_t)ff->stats.max_read_size < r) + ff->stats.max_read_size = r; + ff->len += r; } @@ -325,6 +350,17 @@ procfile *procfile_readall(procfile *ff) { if(unlikely(ff->words->len > procfile_max_words)) procfile_max_words = ff->words->len; } + if(ff->stats.max_source_bytes < ff->len) + ff->stats.max_source_bytes = ff->len; + + if(ff->stats.max_lines < ff->lines->len) + ff->stats.max_lines = ff->lines->len; + + if(ff->stats.max_words < ff->words->len) + ff->stats.max_words = ff->words->len; + + ff->stats.total_read_bytes += ff->len; + // netdata_log_debug(D_PROCFILE, "File '%s' updated.", ff->filename); return ff; } @@ -429,10 +465,18 @@ procfile *procfile_open(const char *filename, const char *separators, uint32_t f ff->size = size; ff->len = 0; ff->flags = flags; + ff->stats.opens = 1; + ff->stats.reads = ff->stats.resizes = 0; + ff->stats.max_lines = ff->stats.max_words = ff->stats.max_source_bytes = 0; + ff->stats.total_read_bytes = ff->stats.max_read_size = 0; ff->lines = procfile_lines_create(); ff->words = procfile_words_create(); + ff->stats.memory = sizeof(procfile) + size + + (sizeof(pflines) + ff->lines->size * sizeof(ffline)) + + (sizeof(pfwords) + ff->words->size * sizeof(char *)); + procfile_set_separators(ff, separators); netdata_log_debug(D_PROCFILE, "File '%s' opened.", filename); @@ -452,6 +496,7 @@ procfile *procfile_reopen(procfile *ff, const char *filename, const char *separa procfile_close(ff); return NULL; } + ff->stats.opens++; // netdata_log_info("PROCFILE: opened '%s' on fd %d", filename, ff->fd); @@ -479,7 +524,7 @@ void procfile_print(procfile *ff) { for(l = 0; likely(l < lines) ;l++) { size_t words = procfile_linewords(ff, l); - netdata_log_debug(D_PROCFILE, " line %zu starts at word %zu and has %zu words", l, ff->lines->lines[l].first, ff->lines->lines[l].words); + netdata_log_debug(D_PROCFILE, " line %zu starts at word %zu and has %zu words", l, (size_t)ff->lines->lines[l].first, (size_t)ff->lines->lines[l].words); size_t w; for(w = 0; likely(w < words) ;w++) { diff --git a/src/libnetdata/procfile/procfile.h b/src/libnetdata/procfile/procfile.h index 8db5b45f4..25b976988 100644 --- a/src/libnetdata/procfile/procfile.h +++ b/src/libnetdata/procfile/procfile.h @@ -19,9 +19,8 @@ typedef struct { // An array of lines typedef struct { - size_t words; // how many words this line has - size_t first; // the id of the first word of this line - // in the words array + uint32_t words; // how many words this line has + uint32_t first; // the id of the first word of this line in the words array } ffline; typedef struct { @@ -35,7 +34,7 @@ typedef struct { // The procfile #define PROCFILE_FLAG_DEFAULT 0x00000000 // To store inside `collector.log` -#define PROCFILE_FLAG_NO_ERROR_ON_FILE_IO 0x00000001 // Do not store nothing +#define PROCFILE_FLAG_NO_ERROR_ON_FILE_IO 0x00000001 // Do not log anything #define PROCFILE_FLAG_ERROR_ON_ERROR_LOG 0x00000002 // Store inside `error.log` typedef enum __attribute__ ((__packed__)) procfile_separator { @@ -47,7 +46,22 @@ typedef enum __attribute__ ((__packed__)) procfile_separator { PF_CHAR_IS_CLOSE } PF_CHAR_TYPE; +struct procfile_stats { + size_t opens; + size_t reads; + size_t resizes; + size_t memory; + size_t total_read_bytes; + size_t max_source_bytes; + size_t max_lines; + size_t max_words; + size_t max_read_size; +}; + + typedef struct procfile { + // this structure is malloc'd (you need to initialize it at procfile_open() + char *filename; // not populated until procfile_filename() is called uint32_t flags; int fd; // the file descriptor @@ -56,6 +70,7 @@ typedef struct procfile { pflines *lines; pfwords *words; PF_CHAR_TYPE separators[256]; + struct procfile_stats stats; char data[]; // allocated buffer to keep file contents } procfile; @@ -85,8 +100,8 @@ char *procfile_filename(procfile *ff); // set to the O_XXXX flags, to have procfile_open and procfile_reopen use them when opening proc files extern int procfile_open_flags; -// set this to 1, to have procfile adapt its initial buffer allocation to the max allocation used so far -extern int procfile_adaptive_initial_allocation; +// call this with true and the expected initial sizes to allow procfile learn the sizes needed +void procfile_set_adaptive_allocation(bool enable, size_t bytes, size_t lines, size_t words); // return the number of lines present #define procfile_lines(ff) ((ff)->lines->len) diff --git a/src/libnetdata/query_progress/progress.c b/src/libnetdata/query_progress/progress.c index 10e083e0c..157f20f11 100644 --- a/src/libnetdata/query_progress/progress.c +++ b/src/libnetdata/query_progress/progress.c @@ -8,7 +8,7 @@ // hashtable for HASHED_KEY // cleanup hashtable defines -#include "../simple_hashtable_undef.h" +#include "../simple_hashtable/simple_hashtable_undef.h" struct query; #define SIMPLE_HASHTABLE_VALUE_TYPE struct query @@ -16,7 +16,7 @@ struct query; #define SIMPLE_HASHTABLE_NAME _QUERY #define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION query_transaction #define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION query_compare_keys -#include "../simple_hashtable.h" +#include "../simple_hashtable/simple_hashtable.h" // ---------------------------------------------------------------------------- @@ -76,12 +76,7 @@ static struct progress { }; SIMPLE_HASHTABLE_HASH query_hash(nd_uuid_t *transaction) { - struct uuid_hi_lo_t { - uint64_t hi; - uint64_t lo; - } *parts = (struct uuid_hi_lo_t *)transaction; - - return parts->lo; + return XXH3_64bits(transaction, sizeof(*transaction)); } static void query_progress_init_unsafe(void) { diff --git a/src/libnetdata/required_dummies.h b/src/libnetdata/required_dummies.h index 3b23b87f7..cff4c563a 100644 --- a/src/libnetdata/required_dummies.h +++ b/src/libnetdata/required_dummies.h @@ -13,11 +13,6 @@ void netdata_cleanup_and_exit(int ret, const char *action, const char *action_re exit(ret); } -// callbacks required by popen() -void signals_block(void){} -void signals_unblock(void){} -void signals_reset(void){} - void rrdset_thread_rda_free(void){} void sender_thread_buffer_free(void){} void query_target_free(void){} @@ -25,6 +20,6 @@ void service_exits(void){} void rrd_collector_finished(void){} // required by get_system_cpus() -char *netdata_configured_host_prefix = ""; +const char *netdata_configured_host_prefix = ""; #endif // NETDATA_LIB_DUMMIES_H diff --git a/src/libnetdata/ringbuffer/ringbuffer.c b/src/libnetdata/ringbuffer/ringbuffer.c new file mode 100644 index 000000000..b30b3c39a --- /dev/null +++ b/src/libnetdata/ringbuffer/ringbuffer.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" +#include "ringbuffer_internal.h" + +rbuf_t rbuf_create(size_t size) +{ + rbuf_t buffer = mallocz(sizeof(struct rbuf) + size); + memset(buffer, 0, sizeof(struct rbuf)); + + buffer->data = ((char*)buffer) + sizeof(struct rbuf); + + buffer->head = buffer->data; + buffer->tail = buffer->data; + buffer->size = size; + buffer->end = buffer->data + size; + + return buffer; +} + +void rbuf_free(rbuf_t buffer) +{ + freez(buffer); +} + +void rbuf_flush(rbuf_t buffer) +{ + buffer->head = buffer->data; + buffer->tail = buffer->data; + buffer->size_data = 0; +} + +char *rbuf_get_linear_insert_range(rbuf_t buffer, size_t *bytes) +{ + *bytes = 0; + if (buffer->head == buffer->tail && buffer->size_data) + return NULL; + + *bytes = ((buffer->head >= buffer->tail) ? buffer->end : buffer->tail) - buffer->head; + return buffer->head; +} + +char *rbuf_get_linear_read_range(rbuf_t buffer, size_t *bytes) +{ + *bytes = 0; + if(buffer->head == buffer->tail && !buffer->size_data) + return NULL; + + *bytes = ((buffer->tail >= buffer->head) ? buffer->end : buffer->head) - buffer->tail; + + return buffer->tail; +} + +int rbuf_bump_head(rbuf_t buffer, size_t bytes) +{ + size_t free_bytes = rbuf_bytes_free(buffer); + if (bytes > free_bytes) + return 0; + int i = buffer->head - buffer->data; + buffer->head = &buffer->data[(i + bytes) % buffer->size]; + buffer->size_data += bytes; + return 1; +} + +int rbuf_bump_tail_noopt(rbuf_t buffer, size_t bytes) +{ + if (bytes > buffer->size_data) + return 0; + int i = buffer->tail - buffer->data; + buffer->tail = &buffer->data[(i + bytes) % buffer->size]; + buffer->size_data -= bytes; + + return 1; +} + +int rbuf_bump_tail(rbuf_t buffer, size_t bytes) +{ + if(!rbuf_bump_tail_noopt(buffer, bytes)) + return 0; + + // if tail catched up with head + // start writing buffer from beggining + // this is not necessary (rbuf must work well without it) + // but helps to optimize big writes as rbuf_get_linear_insert_range + // will return bigger continuous region + if(buffer->tail == buffer->head) { + assert(buffer->size_data == 0); + rbuf_flush(buffer); + } + + return 1; +} + +size_t rbuf_get_capacity(rbuf_t buffer) +{ + return buffer->size; +} + +size_t rbuf_bytes_available(rbuf_t buffer) +{ + return buffer->size_data; +} + +size_t rbuf_bytes_free(rbuf_t buffer) +{ + return buffer->size - buffer->size_data; +} + +size_t rbuf_push(rbuf_t buffer, const char *data, size_t len) +{ + size_t to_cpy; + char *w_ptr = rbuf_get_linear_insert_range(buffer, &to_cpy); + if(!to_cpy) + return to_cpy; + + to_cpy = MIN(to_cpy, len); + memcpy(w_ptr, data, to_cpy); + rbuf_bump_head(buffer, to_cpy); + if(to_cpy < len) + to_cpy += rbuf_push(buffer, &data[to_cpy], len - to_cpy); + return to_cpy; +} + +size_t rbuf_pop(rbuf_t buffer, char *data, size_t len) +{ + size_t to_cpy; + const char *r_ptr = rbuf_get_linear_read_range(buffer, &to_cpy); + if(!to_cpy) + return to_cpy; + + to_cpy = MIN(to_cpy, len); + memcpy(data, r_ptr, to_cpy); + rbuf_bump_tail(buffer, to_cpy); + if(to_cpy < len) + to_cpy += rbuf_pop(buffer, &data[to_cpy], len - to_cpy); + return to_cpy; +} + +static inline void rbuf_ptr_inc(rbuf_t buffer, const char **ptr) +{ + (*ptr)++; + if(*ptr >= buffer->end) + *ptr = buffer->data; +} + +int rbuf_memcmp(rbuf_t buffer, const char *haystack, const char *needle, size_t needle_bytes) +{ + const char *end = needle + needle_bytes; + + // as head==tail can mean 2 things here + if (haystack == buffer->head && buffer->size_data) { + if (*haystack != *needle) + return (*haystack - *needle); + rbuf_ptr_inc(buffer, &haystack); + needle++; + } + + while (haystack != buffer->head && needle != end) { + if (*haystack != *needle) + return (*haystack - *needle); + rbuf_ptr_inc(buffer, &haystack); + needle++; + } + return 0; +} + +int rbuf_memcmp_n(rbuf_t buffer, const char *to_cmp, size_t to_cmp_bytes) +{ + return rbuf_memcmp(buffer, buffer->tail, to_cmp, to_cmp_bytes); +} + +char *rbuf_find_bytes(rbuf_t buffer, const char *needle, size_t needle_bytes, int *found_idx) +{ + const char *ptr = buffer->tail; + *found_idx = 0; + + if (!rbuf_bytes_available(buffer)) + return NULL; + + if (buffer->head == buffer->tail && buffer->size_data) { + if(!rbuf_memcmp(buffer, ptr, needle, needle_bytes)) + return (char *)ptr; + rbuf_ptr_inc(buffer, &ptr); + (*found_idx)++; + } + + while (ptr != buffer->head) + { + if(!rbuf_memcmp(buffer, ptr, needle, needle_bytes)) + return (char *)ptr; + rbuf_ptr_inc(buffer, &ptr); + (*found_idx)++; + } + return NULL; +} diff --git a/src/libnetdata/ringbuffer/ringbuffer.h b/src/libnetdata/ringbuffer/ringbuffer.h new file mode 100644 index 000000000..340112a8f --- /dev/null +++ b/src/libnetdata/ringbuffer/ringbuffer.h @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef RINGBUFFER_H +#define RINGBUFFER_H +#include "../libnetdata.h" + +typedef struct rbuf *rbuf_t; + +rbuf_t rbuf_create(size_t size); +void rbuf_free(rbuf_t buffer); +void rbuf_flush(rbuf_t buffer); + +/* /param bytes how much bytes can be copied into pointer returned + * /return pointer where data can be copied to or NULL if buffer full + */ +char *rbuf_get_linear_insert_range(rbuf_t buffer, size_t *bytes); +char *rbuf_get_linear_read_range(rbuf_t buffer, size_t *bytes); + +int rbuf_bump_head(rbuf_t buffer, size_t bytes); +int rbuf_bump_tail(rbuf_t buffer, size_t bytes); + +/* @param buffer related buffer instance + * @returns total capacity of buffer in bytes (not free/used) + */ +size_t rbuf_get_capacity(rbuf_t buffer); + +/* @param buffer related buffer instance + * @returns count of bytes stored in the buffer + */ +size_t rbuf_bytes_available(rbuf_t buffer); + +/* @param buffer related buffer instance + * @returns count of bytes available/free in the buffer (how many more bytes you can store in this buffer) + */ +size_t rbuf_bytes_free(rbuf_t buffer); + +/* writes as many bytes from `data` into the `buffer` as possible + * but maximum `len` bytes + */ +size_t rbuf_push(rbuf_t buffer, const char *data, size_t len); +size_t rbuf_pop(rbuf_t buffer, char *data, size_t len); + +char *rbuf_find_bytes(rbuf_t buffer, const char *needle, size_t needle_bytes, int *found_idx); +int rbuf_memcmp_n(rbuf_t buffer, const char *to_cmp, size_t to_cmp_bytes); + +#endif diff --git a/src/libnetdata/ringbuffer/ringbuffer_internal.h b/src/libnetdata/ringbuffer/ringbuffer_internal.h new file mode 100644 index 000000000..0cc254aa8 --- /dev/null +++ b/src/libnetdata/ringbuffer/ringbuffer_internal.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef RINGBUFFER_INTERNAL_H +#define RINGBUFFER_INTERNAL_H + +#include "ringbuffer.h" + +struct rbuf { + char *data; + + // points to next byte where we can write + char *head; + // points to oldest (next to be poped) readable byte + char *tail; + + // to avoid calculating data + size + // all the time + char *end; + + size_t size; + size_t size_data; +}; + +typedef struct rbuf *rbuf_t; + +#endif diff --git a/src/libnetdata/sanitizers/chart_id_and_name.c b/src/libnetdata/sanitizers/chart_id_and_name.c new file mode 100644 index 000000000..5af8aa686 --- /dev/null +++ b/src/libnetdata/sanitizers/chart_id_and_name.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +/* + * control characters become space, which are deduplicated. + * + * Character Name Sym To Why + * ---------------- --- --- ------------------------------------------------------------------------------------- + * space [ ] -> [_] + * exclamation mark [!] -> [_] (only when it is the first character) simple patterns negation + * double quotes ["] -> [_] needs escaping when parsing + * dollar [$] -> [_] health variables and security in alarm-notify.sh, cgroup-name.sh, etc. + * percent [%] -> [_] http GET encoded characters + * ampersand [&] -> [_] http GET fields separator + * single quote ['] -> [_] needs escaping when parsing + * asterisk [*] -> [_] simple pattern wildcard + * plus [+] -> [_] http GET space + * comma [,] -> [.] list separator (probably not used today) + * equal [=] -> [_] plugins.d protocol separator + * question mark [?] -> [_] http GET query string separator + * at [@] -> [_] hostname separator (on the UI) + * apostrophe [`] -> [_] bash expansion (security in alarm-notify.sh and other shell scripts) + * pipe [|] -> [_] list separator (simple patterns and http GET) + * backslash [\] -> [/] to avoid interfering with escaping logic + */ + +unsigned char chart_names_allowed_chars[256] = { + [0] = '\0', [1] = ' ', [2] = ' ', [3] = ' ', [4] = ' ', [5] = ' ', [6] = ' ', [7] = ' ', [8] = ' ', + + // control characters to be treated as spaces + ['\t'] = ' ', ['\n'] = ' ', ['\v'] = ' ', ['\f'] = ' ', ['\r'] = ' ', + + [14] = ' ', [15] = ' ', [16] = ' ', [17] = ' ', [18] = ' ', [19] = ' ', [20] = ' ', [21] = ' ', + [22] = ' ', [23] = ' ', [24] = ' ', [25] = ' ', [26] = ' ', [27] = ' ', [28] = ' ', [29] = ' ', + [30] = ' ', [31] = ' ', + + // symbols + [' '] = ' ', ['!'] = '!', ['"'] = '_', ['#'] = '#', ['$'] = '_', ['%'] = '_', ['&'] = '_', ['\''] = '_', + ['('] = '(', [')'] = ')', ['*'] = '_', ['+'] = '_', [','] = '.', ['-'] = '-', ['.'] = '.', ['/'] = '/', + + // numbers + ['0'] = '0', ['1'] = '1', ['2'] = '2', ['3'] = '3', ['4'] = '4', ['5'] = '5', ['6'] = '6', ['7'] = '7', + ['8'] = '8', ['9'] = '9', + + // symbols + [':'] = ':', [';'] = ';', ['<'] = '<', ['='] = '_', ['>'] = '>', ['?'] = '_', ['@'] = '_', + + // capitals + ['A'] = 'A', ['B'] = 'B', ['C'] = 'C', ['D'] = 'D', ['E'] = 'E', ['F'] = 'F', ['G'] = 'G', ['H'] = 'H', + ['I'] = 'I', ['J'] = 'J', ['K'] = 'K', ['L'] = 'L', ['M'] = 'M', ['N'] = 'N', ['O'] = 'O', ['P'] = 'P', + ['Q'] = 'Q', ['R'] = 'R', ['S'] = 'S', ['T'] = 'T', ['U'] = 'U', ['V'] = 'V', ['W'] = 'W', ['X'] = 'X', + ['Y'] = 'Y', ['Z'] = 'Z', + + // symbols + ['['] = '[', ['\\'] = '/', [']'] = ']', ['^'] = '_', ['_'] = '_', ['`'] = '_', + + // lower + ['a'] = 'a', ['b'] = 'b', ['c'] = 'c', ['d'] = 'd', ['e'] = 'e', ['f'] = 'f', ['g'] = 'g', ['h'] = 'h', + ['i'] = 'i', ['j'] = 'j', ['k'] = 'k', ['l'] = 'l', ['m'] = 'm', ['n'] = 'n', ['o'] = 'o', ['p'] = 'p', + ['q'] = 'q', ['r'] = 'r', ['s'] = 's', ['t'] = 't', ['u'] = 'u', ['v'] = 'v', ['w'] = 'w', ['x'] = 'x', + ['y'] = 'y', ['z'] = 'z', + + // symbols + ['{'] = '{', ['|'] = '_', ['}'] = '}', ['~'] = '~', + + // rest + [127] = ' ', [128] = ' ', [129] = ' ', [130] = ' ', [131] = ' ', [132] = ' ', [133] = ' ', [134] = ' ', + [135] = ' ', [136] = ' ', [137] = ' ', [138] = ' ', [139] = ' ', [140] = ' ', [141] = ' ', [142] = ' ', + [143] = ' ', [144] = ' ', [145] = ' ', [146] = ' ', [147] = ' ', [148] = ' ', [149] = ' ', [150] = ' ', + [151] = ' ', [152] = ' ', [153] = ' ', [154] = ' ', [155] = ' ', [156] = ' ', [157] = ' ', [158] = ' ', + [159] = ' ', [160] = ' ', [161] = ' ', [162] = ' ', [163] = ' ', [164] = ' ', [165] = ' ', [166] = ' ', + [167] = ' ', [168] = ' ', [169] = ' ', [170] = ' ', [171] = ' ', [172] = ' ', [173] = ' ', [174] = ' ', + [175] = ' ', [176] = ' ', [177] = ' ', [178] = ' ', [179] = ' ', [180] = ' ', [181] = ' ', [182] = ' ', + [183] = ' ', [184] = ' ', [185] = ' ', [186] = ' ', [187] = ' ', [188] = ' ', [189] = ' ', [190] = ' ', + [191] = ' ', [192] = ' ', [193] = ' ', [194] = ' ', [195] = ' ', [196] = ' ', [197] = ' ', [198] = ' ', + [199] = ' ', [200] = ' ', [201] = ' ', [202] = ' ', [203] = ' ', [204] = ' ', [205] = ' ', [206] = ' ', + [207] = ' ', [208] = ' ', [209] = ' ', [210] = ' ', [211] = ' ', [212] = ' ', [213] = ' ', [214] = ' ', + [215] = ' ', [216] = ' ', [217] = ' ', [218] = ' ', [219] = ' ', [220] = ' ', [221] = ' ', [222] = ' ', + [223] = ' ', [224] = ' ', [225] = ' ', [226] = ' ', [227] = ' ', [228] = ' ', [229] = ' ', [230] = ' ', + [231] = ' ', [232] = ' ', [233] = ' ', [234] = ' ', [235] = ' ', [236] = ' ', [237] = ' ', [238] = ' ', + [239] = ' ', [240] = ' ', [241] = ' ', [242] = ' ', [243] = ' ', [244] = ' ', [245] = ' ', [246] = ' ', + [247] = ' ', [248] = ' ', [249] = ' ', [250] = ' ', [251] = ' ', [252] = ' ', [253] = ' ', [254] = ' ', + [255] = ' ' +}; + +static inline void sanitize_chart_name(char *dst, const char *src, size_t dst_size) { + // text_sanitize deduplicates spaces + text_sanitize((unsigned char *)dst, (const unsigned char *)src, dst_size, + chart_names_allowed_chars, true, "", NULL); + + char *d = dst; + + // do not accept ! as the first character + if(*d == '!') *d = '_'; + + // convert remaining spaces to underscores + while(*d) { + if(*d == ' ') *d = '_'; + d++; + } +} + +// make sure the supplied string +// is good for a netdata chart/dimension ID/NAME +void netdata_fix_chart_name(char *s) { + sanitize_chart_name(s, s, strlen(s) + 1); +} + +void netdata_fix_chart_id(char *s) { + sanitize_chart_name(s, s, strlen(s) + 1); +// size_t len = strlen(s); +// char buf[len + 1]; +// +// text_sanitize((unsigned char *)buf, (const unsigned char *)s, sizeof(buf), +// chart_names_allowed_chars, true, "", NULL); +// +// if(memcmp(s, buf, sizeof(buf)) == 0) +// // they are the same +// return; +// +// // they differ +// XXH128_hash_t hash = XXH3_128bits(s, len); +// ND_UUID *uuid = (ND_UUID *)&hash; +// internal_fatal(sizeof(hash) != sizeof(ND_UUID), "XXH128 and ND_UUID do not have the same size"); +// buf[0] = 'x'; +// buf[1] = 'x'; +// buf[2] = 'h'; +// buf[3] = '_'; +// uuid_unparse_lower_compact(uuid->uuid, &buf[4]); +} + +char *rrdset_strncpyz_name(char *dst, const char *src, size_t dst_size_minus_1) { + // src starts with "type." + sanitize_chart_name(dst, src, dst_size_minus_1 + 1); + return dst; +} + +bool rrdvar_fix_name(char *variable) { + size_t len = strlen(variable); + char buf[len + 1]; + memcpy(buf, variable, sizeof(buf)); + sanitize_chart_name(variable, variable, len + 1); + return memcmp(buf, variable, sizeof(buf)) != 0; +} diff --git a/src/libnetdata/sanitizers/chart_id_and_name.h b/src/libnetdata/sanitizers/chart_id_and_name.h new file mode 100644 index 000000000..eda6e3f30 --- /dev/null +++ b/src/libnetdata/sanitizers/chart_id_and_name.h @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CHART_ID_AND_NAME_H +#define NETDATA_CHART_ID_AND_NAME_H + +#include "../libnetdata.h" + +void netdata_fix_chart_id(char *s); +void netdata_fix_chart_name(char *s); +char *rrdset_strncpyz_name(char *dst, const char *src, size_t dst_size_minus_1); +bool rrdvar_fix_name(char *variable); + +extern unsigned char chart_names_allowed_chars[256]; +static inline bool is_netdata_api_valid_character(char c) { + if(IS_UTF8_BYTE(c)) return true; + unsigned char t = chart_names_allowed_chars[(unsigned char)c]; + // the translation converts space to space + // so we have to check explicitly + return t == (unsigned char)c && t != ' ' && t != '!'; +} + +#endif //NETDATA_CHART_ID_AND_NAME_H diff --git a/src/libnetdata/sanitizers/sanitizers-functions.c b/src/libnetdata/sanitizers/sanitizers-functions.c new file mode 100644 index 000000000..5e1d87c35 --- /dev/null +++ b/src/libnetdata/sanitizers/sanitizers-functions.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sanitizers-functions.h" + +static unsigned char functions_allowed_chars[256] = { + [0] = '\0', [1] = ' ', [2] = ' ', [3] = ' ', [4] = ' ', [5] = ' ', [6] = ' ', [7] = ' ', [8] = ' ', + + // control characters to be treated as spaces + ['\t'] = ' ', ['\n'] = ' ', ['\v'] = ' ', ['\f'] = ' ', ['\r'] = ' ', + + [14] = ' ', [15] = ' ', [16] = ' ', [17] = ' ', [18] = ' ', [19] = ' ', [20] = ' ', [21] = ' ', + [22] = ' ', [23] = ' ', [24] = ' ', [25] = ' ', [26] = ' ', [27] = ' ', [28] = ' ', [29] = ' ', + [30] = ' ', [31] = ' ', + + // symbols + [' '] = ' ', ['!'] = '!', ['"'] = '\'', ['#'] = '#', ['$'] = '$', ['%'] = '%', ['&'] = '&', ['\''] = '\'', + ['('] = '(', [')'] = ')', ['*'] = '*', ['+'] = '+', [','] = ',', ['-'] = '-', ['.'] = '.', ['/'] = '/', + + // numbers + ['0'] = '0', ['1'] = '1', ['2'] = '2', ['3'] = '3', ['4'] = '4', ['5'] = '5', ['6'] = '6', ['7'] = '7', + ['8'] = '8', ['9'] = '9', + + // symbols + [':'] = ':', [';'] = ';', ['<'] = '<', ['='] = '=', ['>'] = '>', ['?'] = '?', ['@'] = '@', + + // capitals + ['A'] = 'A', ['B'] = 'B', ['C'] = 'C', ['D'] = 'D', ['E'] = 'E', ['F'] = 'F', ['G'] = 'G', ['H'] = 'H', + ['I'] = 'I', ['J'] = 'J', ['K'] = 'K', ['L'] = 'L', ['M'] = 'M', ['N'] = 'N', ['O'] = 'O', ['P'] = 'P', + ['Q'] = 'Q', ['R'] = 'R', ['S'] = 'S', ['T'] = 'T', ['U'] = 'U', ['V'] = 'V', ['W'] = 'W', ['X'] = 'X', + ['Y'] = 'Y', ['Z'] = 'Z', + + // symbols + ['['] = '[', ['\\'] = '\\', [']'] = ']', ['^'] = '^', ['_'] = '_', ['`'] = '`', + + // lower + ['a'] = 'a', ['b'] = 'b', ['c'] = 'c', ['d'] = 'd', ['e'] = 'e', ['f'] = 'f', ['g'] = 'g', ['h'] = 'h', + ['i'] = 'i', ['j'] = 'j', ['k'] = 'k', ['l'] = 'l', ['m'] = 'm', ['n'] = 'n', ['o'] = 'o', ['p'] = 'p', + ['q'] = 'q', ['r'] = 'r', ['s'] = 's', ['t'] = 't', ['u'] = 'u', ['v'] = 'v', ['w'] = 'w', ['x'] = 'x', + ['y'] = 'y', ['z'] = 'z', + + // symbols + ['{'] = '{', ['|'] = '|', ['}'] = '}', ['~'] = '~', + + // rest + [127] = ' ', [128] = ' ', [129] = ' ', [130] = ' ', [131] = ' ', [132] = ' ', [133] = ' ', [134] = ' ', + [135] = ' ', [136] = ' ', [137] = ' ', [138] = ' ', [139] = ' ', [140] = ' ', [141] = ' ', [142] = ' ', + [143] = ' ', [144] = ' ', [145] = ' ', [146] = ' ', [147] = ' ', [148] = ' ', [149] = ' ', [150] = ' ', + [151] = ' ', [152] = ' ', [153] = ' ', [154] = ' ', [155] = ' ', [156] = ' ', [157] = ' ', [158] = ' ', + [159] = ' ', [160] = ' ', [161] = ' ', [162] = ' ', [163] = ' ', [164] = ' ', [165] = ' ', [166] = ' ', + [167] = ' ', [168] = ' ', [169] = ' ', [170] = ' ', [171] = ' ', [172] = ' ', [173] = ' ', [174] = ' ', + [175] = ' ', [176] = ' ', [177] = ' ', [178] = ' ', [179] = ' ', [180] = ' ', [181] = ' ', [182] = ' ', + [183] = ' ', [184] = ' ', [185] = ' ', [186] = ' ', [187] = ' ', [188] = ' ', [189] = ' ', [190] = ' ', + [191] = ' ', [192] = ' ', [193] = ' ', [194] = ' ', [195] = ' ', [196] = ' ', [197] = ' ', [198] = ' ', + [199] = ' ', [200] = ' ', [201] = ' ', [202] = ' ', [203] = ' ', [204] = ' ', [205] = ' ', [206] = ' ', + [207] = ' ', [208] = ' ', [209] = ' ', [210] = ' ', [211] = ' ', [212] = ' ', [213] = ' ', [214] = ' ', + [215] = ' ', [216] = ' ', [217] = ' ', [218] = ' ', [219] = ' ', [220] = ' ', [221] = ' ', [222] = ' ', + [223] = ' ', [224] = ' ', [225] = ' ', [226] = ' ', [227] = ' ', [228] = ' ', [229] = ' ', [230] = ' ', + [231] = ' ', [232] = ' ', [233] = ' ', [234] = ' ', [235] = ' ', [236] = ' ', [237] = ' ', [238] = ' ', + [239] = ' ', [240] = ' ', [241] = ' ', [242] = ' ', [243] = ' ', [244] = ' ', [245] = ' ', [246] = ' ', + [247] = ' ', [248] = ' ', [249] = ' ', [250] = ' ', [251] = ' ', [252] = ' ', [253] = ' ', [254] = ' ', + [255] = ' ' +}; + +size_t rrd_functions_sanitize(char *dst, const char *src, size_t dst_len) { + return text_sanitize((unsigned char *)dst, (const unsigned char *)src, dst_len, + functions_allowed_chars, true, "", NULL); +} + diff --git a/src/libnetdata/sanitizers/sanitizers-functions.h b/src/libnetdata/sanitizers/sanitizers-functions.h new file mode 100644 index 000000000..f4c934040 --- /dev/null +++ b/src/libnetdata/sanitizers/sanitizers-functions.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SANITIZERS_FUNCTIONS_H +#define NETDATA_SANITIZERS_FUNCTIONS_H + +#include "../libnetdata.h" + +size_t rrd_functions_sanitize(char *dst, const char *src, size_t dst_len); + +#endif //NETDATA_SANITIZERS_FUNCTIONS_H diff --git a/src/libnetdata/sanitizers/sanitizers-labels.c b/src/libnetdata/sanitizers/sanitizers-labels.c new file mode 100644 index 000000000..714897a88 --- /dev/null +++ b/src/libnetdata/sanitizers/sanitizers-labels.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sanitizers-labels.h" + +/* + * All labels follow these rules: + * + * Character Symbol Names Values + * UTF-8 characters UTF-8 -> _ yes + * Lower case letter [a-z] yes yes + * Upper case letter [A-Z] yes yes + * Digit [0-9] yes yes + * Underscore _ yes yes + * Minus - yes yes + * Plus + -> _ yes + * Colon : -> _ yes + * Semicolon ; -> _ -> : + * Equal = -> _ -> : + * Period . yes yes + * Comma , -> . -> . + * Slash / yes yes + * Backslash \ -> / -> / + * At @ -> _ yes + * Space -> _ yes + * Opening parenthesis ( -> _ yes + * Closing parenthesis ) -> _ yes + * anything else -> _ -> space +* + * The above rules should allow users to set in tags (indicative): + * + * 1. hostnames and domain names as-is + * 2. email addresses as-is + * 3. floating point numbers, converted to always use a dot as the decimal point + * + * Leading and trailing spaces and control characters are removed from both label + * names and values. + * + * Multiple spaces inside the label name or the value are removed (only 1 is retained). + * In names spaces are also converted to underscores. + * + * Names that are only underscores are rejected (they do not enter the dictionary). + * + * The above rules do not require any conversion to be included in JSON strings. + * + * Label names and values are truncated to LABELS_MAX_LENGTH (200) characters. + * + * When parsing, label key and value are separated by the first colon (:) found. + * So label:value1:value2 is parsed as key = "label", value = "value1:value2" + * + * This means a label key cannot contain a colon (:) - it is converted to + * underscore if it does. + * + */ + +static unsigned char prometheus_label_names_char_map[256]; +static unsigned char label_names_char_map[256]; +static unsigned char label_values_char_map[256] = { + [0] = '\0', [1] = ' ', [2] = ' ', [3] = ' ', [4] = ' ', [5] = ' ', [6] = ' ', [7] = ' ', [8] = ' ', + + // control characters to be treated as spaces + ['\t'] = ' ', ['\n'] = ' ', ['\v'] = ' ', ['\f'] = ' ', ['\r'] = ' ', + + [14] = ' ', [15] = ' ', [16] = ' ', [17] = ' ', [18] = ' ', [19] = ' ', [20] = ' ', [21] = ' ', + [22] = ' ', [23] = ' ', [24] = ' ', [25] = ' ', [26] = ' ', [27] = ' ', [28] = ' ', [29] = ' ', + [30] = ' ', [31] = ' ', + + // symbols + [' '] = ' ', ['!'] = '_', ['"'] = '_', ['#'] = '_', ['$'] = '_', ['%'] = '_', ['&'] = '_', ['\''] = '_', + ['('] = '(', [')'] = ')', ['*'] = '_', ['+'] = '+', [','] = '.', ['-'] = '-', ['.'] = '.', ['/'] = '/', + + // numbers + ['0'] = '0', ['1'] = '1', ['2'] = '2', ['3'] = '3', ['4'] = '4', ['5'] = '5', ['6'] = '6', ['7'] = '7', + ['8'] = '8', ['9'] = '9', + + // symbols + [':'] = ':', [';'] = ':', ['<'] = '_', ['='] = ':', ['>'] = '_', ['?'] = '_', ['@'] = '@', + + // capitals + ['A'] = 'A', ['B'] = 'B', ['C'] = 'C', ['D'] = 'D', ['E'] = 'E', ['F'] = 'F', ['G'] = 'G', ['H'] = 'H', + ['I'] = 'I', ['J'] = 'J', ['K'] = 'K', ['L'] = 'L', ['M'] = 'M', ['N'] = 'N', ['O'] = 'O', ['P'] = 'P', + ['Q'] = 'Q', ['R'] = 'R', ['S'] = 'S', ['T'] = 'T', ['U'] = 'U', ['V'] = 'V', ['W'] = 'W', ['X'] = 'X', + ['Y'] = 'Y', ['Z'] = 'Z', + + // symbols + ['['] = '[', ['\\'] = '/', [']'] = ']', ['^'] = '_', ['_'] = '_', ['`'] = '_', + + // lower + ['a'] = 'a', ['b'] = 'b', ['c'] = 'c', ['d'] = 'd', ['e'] = 'e', ['f'] = 'f', ['g'] = 'g', ['h'] = 'h', + ['i'] = 'i', ['j'] = 'j', ['k'] = 'k', ['l'] = 'l', ['m'] = 'm', ['n'] = 'n', ['o'] = 'o', ['p'] = 'p', + ['q'] = 'q', ['r'] = 'r', ['s'] = 's', ['t'] = 't', ['u'] = 'u', ['v'] = 'v', ['w'] = 'w', ['x'] = 'x', + ['y'] = 'y', ['z'] = 'z', + + // symbols + ['{'] = '_', ['|'] = '_', ['}'] = '_', ['~'] = '_', + + // rest + [127] = ' ', [128] = ' ', [129] = ' ', [130] = ' ', [131] = ' ', [132] = ' ', [133] = ' ', [134] = ' ', + [135] = ' ', [136] = ' ', [137] = ' ', [138] = ' ', [139] = ' ', [140] = ' ', [141] = ' ', [142] = ' ', + [143] = ' ', [144] = ' ', [145] = ' ', [146] = ' ', [147] = ' ', [148] = ' ', [149] = ' ', [150] = ' ', + [151] = ' ', [152] = ' ', [153] = ' ', [154] = ' ', [155] = ' ', [156] = ' ', [157] = ' ', [158] = ' ', + [159] = ' ', [160] = ' ', [161] = ' ', [162] = ' ', [163] = ' ', [164] = ' ', [165] = ' ', [166] = ' ', + [167] = ' ', [168] = ' ', [169] = ' ', [170] = ' ', [171] = ' ', [172] = ' ', [173] = ' ', [174] = ' ', + [175] = ' ', [176] = ' ', [177] = ' ', [178] = ' ', [179] = ' ', [180] = ' ', [181] = ' ', [182] = ' ', + [183] = ' ', [184] = ' ', [185] = ' ', [186] = ' ', [187] = ' ', [188] = ' ', [189] = ' ', [190] = ' ', + [191] = ' ', [192] = ' ', [193] = ' ', [194] = ' ', [195] = ' ', [196] = ' ', [197] = ' ', [198] = ' ', + [199] = ' ', [200] = ' ', [201] = ' ', [202] = ' ', [203] = ' ', [204] = ' ', [205] = ' ', [206] = ' ', + [207] = ' ', [208] = ' ', [209] = ' ', [210] = ' ', [211] = ' ', [212] = ' ', [213] = ' ', [214] = ' ', + [215] = ' ', [216] = ' ', [217] = ' ', [218] = ' ', [219] = ' ', [220] = ' ', [221] = ' ', [222] = ' ', + [223] = ' ', [224] = ' ', [225] = ' ', [226] = ' ', [227] = ' ', [228] = ' ', [229] = ' ', [230] = ' ', + [231] = ' ', [232] = ' ', [233] = ' ', [234] = ' ', [235] = ' ', [236] = ' ', [237] = ' ', [238] = ' ', + [239] = ' ', [240] = ' ', [241] = ' ', [242] = ' ', [243] = ' ', [244] = ' ', [245] = ' ', [246] = ' ', + [247] = ' ', [248] = ' ', [249] = ' ', [250] = ' ', [251] = ' ', [252] = ' ', [253] = ' ', [254] = ' ', + [255] = ' ' +}; + +__attribute__((constructor)) void initialize_labels_keys_char_map(void) { + // copy the values char map to the names char map + size_t i; + for(i = 0; i < 256 ;i++) + label_names_char_map[i] = label_values_char_map[i]; + + // apply overrides to the label names map + label_names_char_map['='] = '_'; + label_names_char_map[':'] = '_'; + label_names_char_map['+'] = '_'; + label_names_char_map[';'] = '_'; + label_names_char_map['@'] = '_'; + label_names_char_map['('] = '_'; + label_names_char_map[')'] = '_'; + label_names_char_map['\\'] = '/'; + + // prometheus label names + for(i = 0; i < 256 ;i++) prometheus_label_names_char_map[i] = '_'; + for(int s = 'A' ; s <= 'Z' ; s++) prometheus_label_names_char_map[s] = s; + for(int s = 'a' ; s <= 'z' ; s++) prometheus_label_names_char_map[s] = s; + for(int s = '0' ; s <= '9' ; s++) prometheus_label_names_char_map[s] = s; + prometheus_label_names_char_map[0] = '\0'; + prometheus_label_names_char_map[':'] = ':'; + prometheus_label_names_char_map['_'] = '_'; +} + +size_t rrdlabels_sanitize_name(char *dst, const char *src, size_t dst_size) { + size_t rc = text_sanitize((unsigned char *)dst, (const unsigned char *)src, dst_size, label_names_char_map, 0, "", NULL); + + for(size_t i = 0; i < rc ; i++) + if(dst[i] == ' ') dst[i] = '_'; + + return rc; +} + +size_t rrdlabels_sanitize_value(char *dst, const char *src, size_t dst_size) { + return text_sanitize((unsigned char *)dst, (const unsigned char *)src, dst_size, label_values_char_map, 1, "[none]", NULL); +} + +size_t prometheus_rrdlabels_sanitize_name(char *dst, const char *src, size_t dst_size) { + return text_sanitize((unsigned char *)dst, (const unsigned char *)src, dst_size, prometheus_label_names_char_map, 0, "", NULL); +} diff --git a/src/libnetdata/sanitizers/sanitizers-labels.h b/src/libnetdata/sanitizers/sanitizers-labels.h new file mode 100644 index 000000000..39fd6a67a --- /dev/null +++ b/src/libnetdata/sanitizers/sanitizers-labels.h @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SANITIZERS_LABELS_H +#define NETDATA_SANITIZERS_LABELS_H + +#include "../libnetdata.h" + +size_t rrdlabels_sanitize_name(char *dst, const char *src, size_t dst_size); +size_t rrdlabels_sanitize_value(char *dst, const char *src, size_t dst_size); + +size_t prometheus_rrdlabels_sanitize_name(char *dst, const char *src, size_t dst_size); + +#endif //NETDATA_SANITIZERS_LABELS_H diff --git a/src/libnetdata/sanitizers/sanitizers-pluginsd.c b/src/libnetdata/sanitizers/sanitizers-pluginsd.c new file mode 100644 index 000000000..2659cffee --- /dev/null +++ b/src/libnetdata/sanitizers/sanitizers-pluginsd.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sanitizers-pluginsd.h" + +/* + * Undefined and control characters become underscores + * ! -> _ + * " -> _ + * ' -> _ + * ` -> _ + * \ -> / + * = -> _ + * | -> _ + */ + +static unsigned char external_plugins_map[256] = { + [0] = '\0', [1] = ' ', [2] = ' ', [3] = ' ', [4] = ' ', [5] = ' ', [6] = ' ', [7] = ' ', [8] = ' ', + + // control characters to be treated as spaces + ['\t'] = ' ', ['\n'] = ' ', ['\v'] = ' ', ['\f'] = ' ', ['\r'] = ' ', + + [14] = ' ', [15] = ' ', [16] = ' ', [17] = ' ', [18] = ' ', [19] = ' ', [20] = ' ', [21] = ' ', + [22] = ' ', [23] = ' ', [24] = ' ', [25] = ' ', [26] = ' ', [27] = ' ', [28] = ' ', [29] = ' ', + [30] = ' ', [31] = ' ', + + // symbols + [' '] = ' ', ['!'] = '_', ['"'] = '_', ['#'] = '#', ['$'] = '$', ['%'] = '%', ['&'] = '&', ['\''] = '_', + ['('] = '(', [')'] = ')', ['*'] = '*', ['+'] = '+', [','] = ',', ['-'] = '-', ['.'] = '.', ['/'] = '/', + + // numbers + ['0'] = '0', ['1'] = '1', ['2'] = '2', ['3'] = '3', ['4'] = '4', ['5'] = '5', ['6'] = '6', ['7'] = '7', + ['8'] = '8', ['9'] = '9', + + // symbols + [':'] = ':', [';'] = ';', ['<'] = '<', ['='] = '_', ['>'] = '>', ['?'] = '?', ['@'] = '@', + + // capitals + ['A'] = 'A', ['B'] = 'B', ['C'] = 'C', ['D'] = 'D', ['E'] = 'E', ['F'] = 'F', ['G'] = 'G', ['H'] = 'H', + ['I'] = 'I', ['J'] = 'J', ['K'] = 'K', ['L'] = 'L', ['M'] = 'M', ['N'] = 'N', ['O'] = 'O', ['P'] = 'P', + ['Q'] = 'Q', ['R'] = 'R', ['S'] = 'S', ['T'] = 'T', ['U'] = 'U', ['V'] = 'V', ['W'] = 'W', ['X'] = 'X', + ['Y'] = 'Y', ['Z'] = 'Z', + + // symbols + ['['] = '[', ['\\'] = '/', [']'] = ']', ['^'] = '^', ['_'] = '_', ['`'] = '_', + + // lower + ['a'] = 'a', ['b'] = 'b', ['c'] = 'c', ['d'] = 'd', ['e'] = 'e', ['f'] = 'f', ['g'] = 'g', ['h'] = 'h', + ['i'] = 'i', ['j'] = 'j', ['k'] = 'k', ['l'] = 'l', ['m'] = 'm', ['n'] = 'n', ['o'] = 'o', ['p'] = 'p', + ['q'] = 'q', ['r'] = 'r', ['s'] = 's', ['t'] = 't', ['u'] = 'u', ['v'] = 'v', ['w'] = 'w', ['x'] = 'x', + ['y'] = 'y', ['z'] = 'z', + + // symbols + ['{'] = '{', ['|'] = '_', ['}'] = '}', ['~'] = '~', + + // rest + [127] = ' ', [128] = ' ', [129] = ' ', [130] = ' ', [131] = ' ', [132] = ' ', [133] = ' ', [134] = ' ', + [135] = ' ', [136] = ' ', [137] = ' ', [138] = ' ', [139] = ' ', [140] = ' ', [141] = ' ', [142] = ' ', + [143] = ' ', [144] = ' ', [145] = ' ', [146] = ' ', [147] = ' ', [148] = ' ', [149] = ' ', [150] = ' ', + [151] = ' ', [152] = ' ', [153] = ' ', [154] = ' ', [155] = ' ', [156] = ' ', [157] = ' ', [158] = ' ', + [159] = ' ', [160] = ' ', [161] = ' ', [162] = ' ', [163] = ' ', [164] = ' ', [165] = ' ', [166] = ' ', + [167] = ' ', [168] = ' ', [169] = ' ', [170] = ' ', [171] = ' ', [172] = ' ', [173] = ' ', [174] = ' ', + [175] = ' ', [176] = ' ', [177] = ' ', [178] = ' ', [179] = ' ', [180] = ' ', [181] = ' ', [182] = ' ', + [183] = ' ', [184] = ' ', [185] = ' ', [186] = ' ', [187] = ' ', [188] = ' ', [189] = ' ', [190] = ' ', + [191] = ' ', [192] = ' ', [193] = ' ', [194] = ' ', [195] = ' ', [196] = ' ', [197] = ' ', [198] = ' ', + [199] = ' ', [200] = ' ', [201] = ' ', [202] = ' ', [203] = ' ', [204] = ' ', [205] = ' ', [206] = ' ', + [207] = ' ', [208] = ' ', [209] = ' ', [210] = ' ', [211] = ' ', [212] = ' ', [213] = ' ', [214] = ' ', + [215] = ' ', [216] = ' ', [217] = ' ', [218] = ' ', [219] = ' ', [220] = ' ', [221] = ' ', [222] = ' ', + [223] = ' ', [224] = ' ', [225] = ' ', [226] = ' ', [227] = ' ', [228] = ' ', [229] = ' ', [230] = ' ', + [231] = ' ', [232] = ' ', [233] = ' ', [234] = ' ', [235] = ' ', [236] = ' ', [237] = ' ', [238] = ' ', + [239] = ' ', [240] = ' ', [241] = ' ', [242] = ' ', [243] = ' ', [244] = ' ', [245] = ' ', [246] = ' ', + [247] = ' ', [248] = ' ', [249] = ' ', [250] = ' ', [251] = ' ', [252] = ' ', [253] = ' ', [254] = ' ', + [255] = ' ' +}; + +size_t external_plugins_sanitize(char *dst, const char *src, size_t dst_len) { + return text_sanitize((unsigned char *)dst, (const unsigned char *)src, dst_len, + external_plugins_map, true, "", NULL); +} + diff --git a/src/libnetdata/sanitizers/sanitizers-pluginsd.h b/src/libnetdata/sanitizers/sanitizers-pluginsd.h new file mode 100644 index 000000000..1779a1451 --- /dev/null +++ b/src/libnetdata/sanitizers/sanitizers-pluginsd.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SANITIZERS_PLUGINSD_H +#define NETDATA_SANITIZERS_PLUGINSD_H + +#include "../libnetdata.h" + +size_t external_plugins_sanitize(char *dst, const char *src, size_t dst_len); + +#endif //NETDATA_SANITIZERS_PLUGINSD_H diff --git a/src/libnetdata/sanitizers/sanitizers.h b/src/libnetdata/sanitizers/sanitizers.h new file mode 100644 index 000000000..d76b18f7d --- /dev/null +++ b/src/libnetdata/sanitizers/sanitizers.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SANITIZERS_H +#define NETDATA_SANITIZERS_H + +#include "utf8-sanitizer.h" +#include "sanitizers-labels.h" +#include "sanitizers-functions.h" +#include "sanitizers-pluginsd.h" +#include "chart_id_and_name.h" + +#endif //NETDATA_SANITIZERS_H diff --git a/src/libnetdata/sanitizers/utf8-sanitizer.c b/src/libnetdata/sanitizers/utf8-sanitizer.c new file mode 100644 index 000000000..e10d88f41 --- /dev/null +++ b/src/libnetdata/sanitizers/utf8-sanitizer.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_size, const unsigned char *char_map, bool utf, const char *empty, size_t *multibyte_length) { + if(unlikely(!dst || !dst_size)) return 0; + + // skip leading spaces and invalid characters + while(src && *src && !IS_UTF8_BYTE(*src) && (isspace(*src) || iscntrl(*src) || !isprint(*src))) + src++; + + if(unlikely(!src || !*src)) { + strncpyz((char *)dst, empty, dst_size); + dst[dst_size - 1] = '\0'; + size_t len = strlen((char *)dst); + if(multibyte_length) *multibyte_length = len; + return len; + } + + unsigned char *d = dst; + + // make room for the final string termination + unsigned char *end = &dst[dst_size - 1]; + + // copy while converting, but keep only one space + // we start wil last_is_space = 1 to skip leading spaces + int last_is_space = 1; + + size_t mblen = 0; + + while(*src && d < end) { + unsigned char c = *src; + + if(IS_UTF8_STARTBYTE(c) && IS_UTF8_BYTE(src[1]) && d + 2 <= end) { + // UTF-8 multi-byte encoded character + + // find how big this character is (2-4 bytes) + size_t utf_character_size = 2; + while(utf_character_size < 4 && + d + utf_character_size <= end && + IS_UTF8_BYTE(src[utf_character_size]) && + !IS_UTF8_STARTBYTE(src[utf_character_size])) + utf_character_size++; + + if(utf) { + while(utf_character_size) { + utf_character_size--; + *d++ = *src++; + } + } + else { + // UTF-8 characters are not allowed. + // Assume it is an underscore + // and skip all except the first byte + *d++ = '_'; + src += (utf_character_size - 1); + } + + last_is_space = 0; + mblen++; + continue; + } + + c = char_map[c]; + if(c == ' ') { + // a space character + + if(!last_is_space) { + // add one space + *d++ = c; + mblen++; + } + + last_is_space++; + } + else { + *d++ = c; + last_is_space = 0; + mblen++; + } + + src++; + } + + // remove trailing spaces + while(d > dst && !IS_UTF8_BYTE(*(d - 1)) && *(d - 1) == ' ') { + d--; + mblen--; + } + + // put a termination at the end of what we copied + *d = '\0'; + + // check if dst is all underscores and empty it if it is + if(*dst == '_') { + unsigned char *t = dst; + while (*t == '_') t++; + if (unlikely(*t == '\0')) { + *dst = '\0'; + mblen = 0; + } + } + + // check if it is empty + if(unlikely(*dst == '\0')) { + strncpyz((char *)dst, empty, dst_size); + dst[dst_size - 1] = '\0'; + mblen = strlen((char *)dst); + if(multibyte_length) *multibyte_length = mblen; + return mblen; + } + + if(multibyte_length) *multibyte_length = mblen; + + return d - dst; +} diff --git a/src/libnetdata/sanitizers/utf8-sanitizer.h b/src/libnetdata/sanitizers/utf8-sanitizer.h new file mode 100644 index 000000000..8b5f73a7f --- /dev/null +++ b/src/libnetdata/sanitizers/utf8-sanitizer.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_UTF8_SANITIZER_H +#define NETDATA_UTF8_SANITIZER_H + +#include "../libnetdata.h" + +size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_size, const unsigned char *char_map, bool utf, const char *empty, size_t *multibyte_length); + +#endif //NETDATA_UTF8_SANITIZER_H diff --git a/src/libnetdata/simple_hashtable.h b/src/libnetdata/simple_hashtable.h deleted file mode 100644 index 13cdcd10e..000000000 --- a/src/libnetdata/simple_hashtable.h +++ /dev/null @@ -1,544 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_SIMPLE_HASHTABLE_H -#define NETDATA_SIMPLE_HASHTABLE_H - -typedef uint64_t SIMPLE_HASHTABLE_HASH; -#define SIMPLE_HASHTABLE_HASH_SECOND_HASH_SHIFTS 32 - -/* - * CONFIGURATION - * - * SIMPLE_HASHTABLE_NAME - * The name of the hashtable - all functions and defines will have this name appended - * Example: #define SIMPLE_HASHTABLE_NAME _FACET_KEY - * - * SIMPLE_HASHTABLE_VALUE_TYPE and SIMPLE_HASHTABLE_KEY_TYPE - * The data types of values and keys - optional - setting them will enable strict type checking by the compiler. - * If undefined, they both default to void. - * - * SIMPLE_HASHTABLE_SORT_FUNCTION - * A function name that accepts 2x values and compares them for sorting (returning -1, 0, 1). - * When set, the hashtable will maintain an always sorted array of the values in the hashtable. - * Do not use this for non-static hashtables. So, if your data is changing all the time, this can make the - * hashtable quite slower (it memmove()s an array of pointers to keep it sorted, on every single change). - * - * SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION and SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION - * The hashtable can either compare just hashes (the default), or hashes and keys (when these are set). - * Both need to be set for this feature to be enabled. - * - * - SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION - * The name of a function accepting SIMPLE_HASHTABLE_VALUE_TYPE pointer. - * It should return a pointer to SIMPLE_HASHTABLE_KEY_TYPE. - * This function is called prior to SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION to extract the key from a value. - * It is also called during hashtable resize, to rehash all values in the hashtable. - * - * - SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION - * The name of a function accepting 2x SIMPLE_HASHTABLE_KEY_TYPE pointers. - * It should return true when the keys match. - * This function is only called when the hashes match, to verify that the keys also match. - * - * SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION - * If defined, 3x functions will be injected for easily working with the hashtable. - * - */ - - -#ifndef SIMPLE_HASHTABLE_NAME -#define SIMPLE_HASHTABLE_NAME -#endif - -#ifndef SIMPLE_HASHTABLE_VALUE_TYPE -#define SIMPLE_HASHTABLE_VALUE_TYPE void -#endif - -#ifndef SIMPLE_HASHTABLE_KEY_TYPE -#define SIMPLE_HASHTABLE_KEY_TYPE void -#endif - -#ifndef SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION -#undef SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION -#endif - -#if defined(SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION) -static inline SIMPLE_HASHTABLE_KEY_TYPE *SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION(SIMPLE_HASHTABLE_VALUE_TYPE *); -#endif - -#if defined(SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION) -static inline bool SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION(SIMPLE_HASHTABLE_KEY_TYPE *, SIMPLE_HASHTABLE_KEY_TYPE *); -#endif - -// First layer of macro for token concatenation -#define CONCAT_INTERNAL(a, b) a ## b -// Second layer of macro, which ensures proper expansion -#define CONCAT(a, b) CONCAT_INTERNAL(a, b) - -// define names for all structures and structures -#define simple_hashtable_init_named CONCAT(simple_hashtable_init, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_destroy_named CONCAT(simple_hashtable_destroy, SIMPLE_HASHTABLE_NAME) - -#define simple_hashtable_slot_named CONCAT(simple_hashtable_slot, SIMPLE_HASHTABLE_NAME) -#define SIMPLE_HASHTABLE_SLOT_NAMED CONCAT(SIMPLE_HASHTABLE_SLOT, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_named CONCAT(simple_hashtable, SIMPLE_HASHTABLE_NAME) -#define SIMPLE_HASHTABLE_NAMED CONCAT(SIMPLE_HASHTABLE, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_resize_named CONCAT(simple_hashtable_resize, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_can_use_slot_named CONCAT(simple_hashtable_keys_match, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_get_slot_named CONCAT(simple_hashtable_get_slot, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_del_slot_named CONCAT(simple_hashtable_del_slot, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_set_slot_named CONCAT(simple_hashtable_set_slot, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_first_read_only_named CONCAT(simple_hashtable_first_read_only, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_next_read_only_named CONCAT(simple_hashtable_next_read_only, SIMPLE_HASHTABLE_NAME) - -#define simple_hashtable_sorted_binary_search_named CONCAT(simple_hashtable_sorted_binary_search, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_add_value_sorted_named CONCAT(simple_hashtable_add_value_sorted, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_del_value_sorted_named CONCAT(simple_hashtable_del_value_sorted, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_replace_value_sorted_named CONCAT(simple_hashtable_replace_value_sorted, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_sorted_array_first_read_only_named CONCAT(simple_hashtable_sorted_array_first_read_only, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_sorted_array_next_read_only_named CONCAT(simple_hashtable_sorted_array_next_read_only, SIMPLE_HASHTABLE_NAME) - -typedef struct simple_hashtable_slot_named { - SIMPLE_HASHTABLE_HASH hash; - SIMPLE_HASHTABLE_VALUE_TYPE *data; -} SIMPLE_HASHTABLE_SLOT_NAMED; - -typedef struct simple_hashtable_named { - size_t resizes; - size_t searches; - size_t collisions; - size_t additions; - size_t deletions; - size_t deleted; - size_t used; - size_t size; - bool needs_cleanup; - SIMPLE_HASHTABLE_SLOT_NAMED *hashtable; - -#ifdef SIMPLE_HASHTABLE_SORT_FUNCTION - struct { - size_t used; - size_t size; - SIMPLE_HASHTABLE_VALUE_TYPE **array; - } sorted; -#endif -} SIMPLE_HASHTABLE_NAMED; - -#ifdef SIMPLE_HASHTABLE_SORT_FUNCTION -static inline size_t simple_hashtable_sorted_binary_search_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE *value) { - size_t left = 0, right = ht->sorted.used; - - while (left < right) { - size_t mid = left + (right - left) / 2; - if (SIMPLE_HASHTABLE_SORT_FUNCTION(ht->sorted.array[mid], value) < 0) - left = mid + 1; - else - right = mid; - } - - return left; -} - -static inline void simple_hashtable_add_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE *value) { - size_t index = simple_hashtable_sorted_binary_search_named(ht, value); - - // Ensure there's enough space in the sorted array - if (ht->sorted.used >= ht->sorted.size) { - size_t size = ht->sorted.size ? ht->sorted.size * 2 : 64; - SIMPLE_HASHTABLE_VALUE_TYPE **array = mallocz(size * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *)); - if(ht->sorted.array) { - memcpy(array, ht->sorted.array, ht->sorted.size * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *)); - freez(ht->sorted.array); - } - ht->sorted.array = array; - ht->sorted.size = size; - } - - // Use memmove to shift elements and create space for the new element - memmove(&ht->sorted.array[index + 1], &ht->sorted.array[index], (ht->sorted.used - index) * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *)); - - ht->sorted.array[index] = value; - ht->sorted.used++; -} - -static inline void simple_hashtable_del_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE *value) { - size_t index = simple_hashtable_sorted_binary_search_named(ht, value); - - // Check if the value exists at the found index - assert(index < ht->sorted.used && ht->sorted.array[index] == value); - - // Use memmove to shift elements and close the gap - memmove(&ht->sorted.array[index], &ht->sorted.array[index + 1], (ht->sorted.used - index - 1) * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *)); - ht->sorted.used--; -} - -static inline void simple_hashtable_replace_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE *old_value, SIMPLE_HASHTABLE_VALUE_TYPE *new_value) { - if(new_value == old_value) - return; - - size_t old_value_index = simple_hashtable_sorted_binary_search_named(ht, old_value); - assert(old_value_index < ht->sorted.used && ht->sorted.array[old_value_index] == old_value); - - int r = SIMPLE_HASHTABLE_SORT_FUNCTION(old_value, new_value); - if(r == 0) { - // Same value, so use the same index - ht->sorted.array[old_value_index] = new_value; - return; - } - - size_t new_value_index = simple_hashtable_sorted_binary_search_named(ht, new_value); - if(old_value_index == new_value_index) { - // Not the same value, but still at the same index - ht->sorted.array[old_value_index] = new_value; - return; - } - else if (old_value_index < new_value_index) { - // The old value is before the new value - size_t shift_start = old_value_index + 1; - size_t shift_end = new_value_index - 1; - size_t shift_size = shift_end - old_value_index; - - memmove(&ht->sorted.array[old_value_index], &ht->sorted.array[shift_start], shift_size * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *)); - ht->sorted.array[shift_end] = new_value; - } - else { - // The old value is after the new value - size_t shift_start = new_value_index; - size_t shift_end = old_value_index; - size_t shift_size = shift_end - new_value_index; - - memmove(&ht->sorted.array[new_value_index + 1], &ht->sorted.array[shift_start], shift_size * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *)); - ht->sorted.array[new_value_index] = new_value; - } -} - -static inline SIMPLE_HASHTABLE_VALUE_TYPE **simple_hashtable_sorted_array_first_read_only_named(SIMPLE_HASHTABLE_NAMED *ht) { - if (ht->sorted.used > 0) { - return &ht->sorted.array[0]; - } - return NULL; -} - -static inline SIMPLE_HASHTABLE_VALUE_TYPE **simple_hashtable_sorted_array_next_read_only_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE **last) { - if (!last) return NULL; - - // Calculate the current position in the sorted array - size_t currentIndex = last - ht->sorted.array; - - // Proceed to the next element if it exists - if (currentIndex + 1 < ht->sorted.used) { - return &ht->sorted.array[currentIndex + 1]; - } - - // If no more elements, return NULL - return NULL; -} - -#define SIMPLE_HASHTABLE_SORTED_FOREACH_READ_ONLY(ht, var, type, name) \ - for (type **(var) = simple_hashtable_sorted_array_first_read_only ## name(ht); \ - var; \ - (var) = simple_hashtable_sorted_array_next_read_only ## name(ht, var)) - -#define SIMPLE_HASHTABLE_SORTED_FOREACH_READ_ONLY_VALUE(var) (*(var)) - -#else -static inline void simple_hashtable_add_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht __maybe_unused, SIMPLE_HASHTABLE_VALUE_TYPE *value __maybe_unused) { ; } -static inline void simple_hashtable_del_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht __maybe_unused, SIMPLE_HASHTABLE_VALUE_TYPE *value __maybe_unused) { ; } -static inline void simple_hashtable_replace_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht __maybe_unused, SIMPLE_HASHTABLE_VALUE_TYPE *old_value __maybe_unused, SIMPLE_HASHTABLE_VALUE_TYPE *new_value __maybe_unused) { ; } -#endif - -static inline void simple_hashtable_init_named(SIMPLE_HASHTABLE_NAMED *ht, size_t size) { - memset(ht, 0, sizeof(*ht)); - ht->size = size; - ht->hashtable = callocz(ht->size, sizeof(*ht->hashtable)); -} - -static inline void simple_hashtable_destroy_named(SIMPLE_HASHTABLE_NAMED *ht) { -#ifdef SIMPLE_HASHTABLE_SORT_FUNCTION - freez(ht->sorted.array); -#endif - - freez(ht->hashtable); - memset(ht, 0, sizeof(*ht)); -} - -static inline void simple_hashtable_resize_named(SIMPLE_HASHTABLE_NAMED *ht); - -#define simple_hashtable_data_unset ((void *)NULL) -#define simple_hashtable_data_deleted ((void *)UINT64_MAX) -#define simple_hashtable_data_usernull ((void *)(UINT64_MAX - 1)) -#define simple_hashtable_is_slot_unset(sl) ((sl)->data == simple_hashtable_data_unset) -#define simple_hashtable_is_slot_deleted(sl) ((sl)->data == simple_hashtable_data_deleted) -#define simple_hashtable_is_slot_usernull(sl) ((sl)->data == simple_hashtable_data_usernull) -#define SIMPLE_HASHTABLE_SLOT_DATA(sl) ((simple_hashtable_is_slot_unset(sl) || simple_hashtable_is_slot_deleted(sl) || simple_hashtable_is_slot_usernull(sl)) ? NULL : (sl)->data) - -static inline bool simple_hashtable_can_use_slot_named( - SIMPLE_HASHTABLE_SLOT_NAMED *sl, SIMPLE_HASHTABLE_HASH hash, - SIMPLE_HASHTABLE_KEY_TYPE *key __maybe_unused) { - - if(simple_hashtable_is_slot_unset(sl)) - return true; - - if(simple_hashtable_is_slot_deleted(sl)) - return false; - - if(sl->hash == hash) { -#if defined(SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION) && defined(SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION) - return SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION(SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION(SIMPLE_HASHTABLE_SLOT_DATA(sl)), key); -#else - return true; -#endif - } - - return false; -} - -#define SIMPLE_HASHTABLE_NEEDS_RESIZE(ht) ((ht)->size <= ((ht)->used - (ht)->deleted) << 1 || (ht)->used >= (ht)->size) - -// IMPORTANT: the pointer returned by this call is valid up to the next call of this function (or the resize one). -// If you need to cache something, cache the hash, not the slot pointer. -static inline SIMPLE_HASHTABLE_SLOT_NAMED *simple_hashtable_get_slot_named( - SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_HASH hash, - SIMPLE_HASHTABLE_KEY_TYPE *key, bool resize) { - - // This function finds the requested hash and key in the hashtable. - // It uses a second version of the hash in case of collisions, and then linear probing. - // It may resize the hashtable if it is more than 50% full. - - // Deleted items remain in the hashtable, but they are marked as DELETED. - // Reuse of DELETED slots happens only if the slot to be returned is UNSET. - // So, when looking up for an item, it tries to find it, assuming DELETED - // slots are occupied. If the item to be returned is UNSET, and it has - // encountered a DELETED slot, it returns the DELETED one instead of the UNSET. - - ht->searches++; - - size_t slot; - SIMPLE_HASHTABLE_SLOT_NAMED *sl; - SIMPLE_HASHTABLE_SLOT_NAMED *deleted; - - slot = hash % ht->size; - sl = &ht->hashtable[slot]; - deleted = simple_hashtable_is_slot_deleted(sl) ? sl : NULL; - if(likely(simple_hashtable_can_use_slot_named(sl, hash, key))) - return (simple_hashtable_is_slot_unset(sl) && deleted) ? deleted : sl; - - ht->collisions++; - - if(unlikely(resize && (ht->needs_cleanup || SIMPLE_HASHTABLE_NEEDS_RESIZE(ht)))) { - simple_hashtable_resize_named(ht); - deleted = NULL; // our deleted pointer is not valid anymore - - slot = hash % ht->size; - sl = &ht->hashtable[slot]; - if(likely(simple_hashtable_can_use_slot_named(sl, hash, key))) - return sl; - - ht->collisions++; - } - - slot = ((hash >> SIMPLE_HASHTABLE_HASH_SECOND_HASH_SHIFTS) + 1) % ht->size; - sl = &ht->hashtable[slot]; - deleted = (!deleted && simple_hashtable_is_slot_deleted(sl)) ? sl : deleted; - - // Linear probing until we find it - SIMPLE_HASHTABLE_SLOT_NAMED *sl_started = sl; - size_t collisions_started = ht->collisions; - while (!simple_hashtable_can_use_slot_named(sl, hash, key)) { - slot = (slot + 1) % ht->size; // Wrap around if necessary - sl = &ht->hashtable[slot]; - deleted = (!deleted && simple_hashtable_is_slot_deleted(sl)) ? sl : deleted; - ht->collisions++; - - if(sl == sl_started) { - if(deleted) { - // we looped through all items, and we didn't find a free slot, - // but we have found a deleted slot, so return it. - return deleted; - } - else if(resize) { - // the hashtable is full, without any deleted slots. - // we need to resize it now. - simple_hashtable_resize_named(ht); - return simple_hashtable_get_slot_named(ht, hash, key, false); - } - else { - // the hashtable is full, but resize is false. - // this should never happen. - assert(sl != sl_started); - } - } - } - - if((ht->collisions - collisions_started) > (ht->size / 2) && ht->deleted >= (ht->size / 3)) { - // we traversed through half of the hashtable to find a slot, - // but we have more than 1/3 deleted items - ht->needs_cleanup = true; - } - - return (simple_hashtable_is_slot_unset(sl) && deleted) ? deleted : sl; -} - -static inline bool simple_hashtable_del_slot_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_SLOT_NAMED *sl) { - if(simple_hashtable_is_slot_unset(sl) || simple_hashtable_is_slot_deleted(sl)) - return false; - - ht->deletions++; - ht->deleted++; - - simple_hashtable_del_value_sorted_named(ht, SIMPLE_HASHTABLE_SLOT_DATA(sl)); - - sl->data = simple_hashtable_data_deleted; - return true; -} - -static inline void simple_hashtable_set_slot_named( - SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_SLOT_NAMED *sl, - SIMPLE_HASHTABLE_HASH hash, SIMPLE_HASHTABLE_VALUE_TYPE *data) { - - if(data == NULL) - data = simple_hashtable_data_usernull; - - if(unlikely(data == simple_hashtable_data_unset || data == simple_hashtable_data_deleted)) { - simple_hashtable_del_slot_named(ht, sl); - return; - } - - if(likely(simple_hashtable_is_slot_unset(sl))) { - simple_hashtable_add_value_sorted_named(ht, data); - ht->used++; - } - - else if(unlikely(simple_hashtable_is_slot_deleted(sl))) { - ht->deleted--; - } - - else - simple_hashtable_replace_value_sorted_named(ht, SIMPLE_HASHTABLE_SLOT_DATA(sl), data); - - sl->hash = hash; - sl->data = data; - ht->additions++; -} - -// IMPORTANT -// this call invalidates all SIMPLE_HASHTABLE_SLOT_NAMED pointers -static inline void simple_hashtable_resize_named(SIMPLE_HASHTABLE_NAMED *ht) { - SIMPLE_HASHTABLE_SLOT_NAMED *old = ht->hashtable; - size_t old_size = ht->size; - - size_t new_size = ht->size; - - if(SIMPLE_HASHTABLE_NEEDS_RESIZE(ht)) - new_size = (ht->size << 1) - ((ht->size > 16) ? 1 : 0); - - ht->resizes++; - ht->size = new_size; - ht->hashtable = callocz(new_size, sizeof(*ht->hashtable)); - size_t used = 0; - for(size_t i = 0 ; i < old_size ; i++) { - SIMPLE_HASHTABLE_SLOT_NAMED *slot = &old[i]; - if(simple_hashtable_is_slot_unset(slot) || simple_hashtable_is_slot_deleted(slot)) - continue; - - SIMPLE_HASHTABLE_KEY_TYPE *key = NULL; - -#if defined(SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION) && defined(SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION) - SIMPLE_HASHTABLE_VALUE_TYPE *value = SIMPLE_HASHTABLE_SLOT_DATA(slot); - key = SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION(value); -#endif - - SIMPLE_HASHTABLE_SLOT_NAMED *slot2 = simple_hashtable_get_slot_named(ht, slot->hash, key, false); - *slot2 = *slot; - used++; - } - - assert(used == ht->used - ht->deleted); - - ht->used = used; - ht->deleted = 0; - ht->needs_cleanup = false; - - freez(old); -} - -// ---------------------------------------------------------------------------- -// hashtable traversal, in read-only mode -// the hashtable should not be modified while the traversal is taking place - -static inline SIMPLE_HASHTABLE_SLOT_NAMED *simple_hashtable_first_read_only_named(SIMPLE_HASHTABLE_NAMED *ht) { - for(size_t i = 0; i < ht->size ;i++) { - SIMPLE_HASHTABLE_SLOT_NAMED *sl = &ht->hashtable[i]; - if(!simple_hashtable_is_slot_unset(sl) && !simple_hashtable_is_slot_deleted(sl)) - return sl; - } - - return NULL; -} - -static inline SIMPLE_HASHTABLE_SLOT_NAMED *simple_hashtable_next_read_only_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_SLOT_NAMED *last) { - if (!last) return NULL; - - // Calculate the current position in the array - size_t index = last - ht->hashtable; - - // Iterate over the hashtable starting from the next element - for (size_t i = index + 1; i < ht->size; i++) { - SIMPLE_HASHTABLE_SLOT_NAMED *sl = &ht->hashtable[i]; - if (!simple_hashtable_is_slot_unset(sl) && !simple_hashtable_is_slot_deleted(sl)) { - return sl; - } - } - - // If no more data slots are found, return NULL - return NULL; -} - -#define SIMPLE_HASHTABLE_FOREACH_READ_ONLY(ht, var, name) \ - for(struct simple_hashtable_slot ## name *(var) = simple_hashtable_first_read_only ## name(ht); \ - var; \ - (var) = simple_hashtable_next_read_only ## name(ht, var)) - -#define SIMPLE_HASHTABLE_FOREACH_READ_ONLY_VALUE(var) SIMPLE_HASHTABLE_SLOT_DATA(var) - -// ---------------------------------------------------------------------------- -// high level implementation - -#ifdef SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION - -#ifndef XXH_INLINE_ALL -#define XXH_INLINE_ALL -#endif -#include "xxhash.h" - -#define simple_hashtable_set_named CONCAT(simple_hashtable_set, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_get_named CONCAT(simple_hashtable_get, SIMPLE_HASHTABLE_NAME) -#define simple_hashtable_del_named CONCAT(simple_hashtable_del, SIMPLE_HASHTABLE_NAME) - -static inline SIMPLE_HASHTABLE_VALUE_TYPE *simple_hashtable_set_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_KEY_TYPE *key, size_t key_len, SIMPLE_HASHTABLE_VALUE_TYPE *data) { - XXH64_hash_t hash = XXH3_64bits((void *)key, key_len); - SIMPLE_HASHTABLE_SLOT_NAMED *sl = simple_hashtable_get_slot_named(ht, hash, key, true); - simple_hashtable_set_slot_named(ht, sl, hash, data); - return SIMPLE_HASHTABLE_SLOT_DATA(sl); -} - -static inline SIMPLE_HASHTABLE_VALUE_TYPE *simple_hashtable_get_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_KEY_TYPE *key, size_t key_len, SIMPLE_HASHTABLE_VALUE_TYPE *data) { - XXH64_hash_t hash = XXH3_64bits((void *)key, key_len); - SIMPLE_HASHTABLE_SLOT_NAMED *sl = simple_hashtable_get_slot_named(ht, hash, key, true); - return SIMPLE_HASHTABLE_SLOT_DATA(sl); -} - -static inline bool simple_hashtable_del_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_KEY_TYPE *key, size_t key_len, SIMPLE_HASHTABLE_VALUE_TYPE *data) { - XXH64_hash_t hash = XXH3_64bits((void *)key, key_len); - SIMPLE_HASHTABLE_SLOT_NAMED *sl = simple_hashtable_get_slot_named(ht, hash, key, true); - return simple_hashtable_del_slot_named(ht, sl); -} - -#endif // SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION - -// ---------------------------------------------------------------------------- -// Clear the preprocessor defines of simple_hashtable.h -// allowing simple_hashtable.h to be included multiple times -// with different configuration each time. - -#include "simple_hashtable_undef.h" - -#endif //NETDATA_SIMPLE_HASHTABLE_H diff --git a/src/libnetdata/simple_hashtable/simple_hashtable.h b/src/libnetdata/simple_hashtable/simple_hashtable.h new file mode 100644 index 000000000..fe88d23f8 --- /dev/null +++ b/src/libnetdata/simple_hashtable/simple_hashtable.h @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SIMPLE_HASHTABLE_H +#define NETDATA_SIMPLE_HASHTABLE_H + +typedef uint64_t SIMPLE_HASHTABLE_HASH; +#define SIMPLE_HASHTABLE_HASH_SECOND_HASH_SHIFTS 32 + +/* + * CONFIGURATION + * + * SIMPLE_HASHTABLE_NAME + * The name of the hashtable - all functions and defines will have this name appended + * Example: #define SIMPLE_HASHTABLE_NAME _FACET_KEY + * + * SIMPLE_HASHTABLE_VALUE_TYPE and SIMPLE_HASHTABLE_KEY_TYPE + * The data types of values and keys - optional - setting them will enable strict type checking by the compiler. + * If undefined, they both default to void. + * + * SIMPLE_HASHTABLE_SORT_FUNCTION + * A function name that accepts 2x values and compares them for sorting (returning -1, 0, 1). + * When set, the hashtable will maintain an always sorted array of the values in the hashtable. + * Do not use this for non-static hashtables. So, if your data is changing all the time, this can make the + * hashtable quite slower (it memmove()s an array of pointers to keep it sorted, on every single change). + * + * SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION and SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION + * The hashtable can either compare just hashes (the default), or hashes and keys (when these are set). + * Both need to be set for this feature to be enabled. + * + * - SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION + * The name of a function accepting SIMPLE_HASHTABLE_VALUE_TYPE pointer. + * It should return a pointer to SIMPLE_HASHTABLE_KEY_TYPE. + * This function is called prior to SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION to extract the key from a value. + * It is also called during hashtable resize, to rehash all values in the hashtable. + * + * - SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION + * The name of a function accepting 2x SIMPLE_HASHTABLE_KEY_TYPE pointers. + * It should return true when the keys match. + * This function is only called when the hashes match, to verify that the keys also match. + * + * SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION + * If defined, 3x functions will be injected for easily working with the hashtable. + * + */ + + +#ifndef SIMPLE_HASHTABLE_NAME +#define SIMPLE_HASHTABLE_NAME +#endif + +#ifndef SIMPLE_HASHTABLE_VALUE_TYPE +#define SIMPLE_HASHTABLE_VALUE_TYPE void +#endif + +#ifndef SIMPLE_HASHTABLE_KEY_TYPE +#define SIMPLE_HASHTABLE_KEY_TYPE void +#endif + +#ifndef SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION +#undef SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION +#endif + +#if defined(SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION) +static inline SIMPLE_HASHTABLE_KEY_TYPE *SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION(SIMPLE_HASHTABLE_VALUE_TYPE *); +#endif + +#if defined(SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION) +static inline bool SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION(SIMPLE_HASHTABLE_KEY_TYPE *, SIMPLE_HASHTABLE_KEY_TYPE *); +#endif + +// First layer of macro for token concatenation +#define CONCAT_INTERNAL(a, b) a ## b +// Second layer of macro, which ensures proper expansion +#define CONCAT(a, b) CONCAT_INTERNAL(a, b) + +// define names for all structures and structures +#define simple_hashtable_init_named CONCAT(simple_hashtable_init, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_destroy_named CONCAT(simple_hashtable_destroy, SIMPLE_HASHTABLE_NAME) + +#define simple_hashtable_slot_named CONCAT(simple_hashtable_slot, SIMPLE_HASHTABLE_NAME) +#define SIMPLE_HASHTABLE_SLOT_NAMED CONCAT(SIMPLE_HASHTABLE_SLOT, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_named CONCAT(simple_hashtable, SIMPLE_HASHTABLE_NAME) +#define SIMPLE_HASHTABLE_NAMED CONCAT(SIMPLE_HASHTABLE, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_resize_named CONCAT(simple_hashtable_resize, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_can_use_slot_named CONCAT(simple_hashtable_keys_match, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_get_slot_named CONCAT(simple_hashtable_get_slot, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_del_slot_named CONCAT(simple_hashtable_del_slot, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_set_slot_named CONCAT(simple_hashtable_set_slot, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_first_read_only_named CONCAT(simple_hashtable_first_read_only, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_next_read_only_named CONCAT(simple_hashtable_next_read_only, SIMPLE_HASHTABLE_NAME) + +#define simple_hashtable_sorted_binary_search_named CONCAT(simple_hashtable_sorted_binary_search, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_add_value_sorted_named CONCAT(simple_hashtable_add_value_sorted, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_del_value_sorted_named CONCAT(simple_hashtable_del_value_sorted, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_replace_value_sorted_named CONCAT(simple_hashtable_replace_value_sorted, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_sorted_array_first_read_only_named CONCAT(simple_hashtable_sorted_array_first_read_only, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_sorted_array_next_read_only_named CONCAT(simple_hashtable_sorted_array_next_read_only, SIMPLE_HASHTABLE_NAME) + +typedef struct simple_hashtable_slot_named { + SIMPLE_HASHTABLE_HASH hash; + SIMPLE_HASHTABLE_VALUE_TYPE *data; +} SIMPLE_HASHTABLE_SLOT_NAMED; + +typedef struct simple_hashtable_named { + size_t resizes; + size_t searches; + size_t collisions; + size_t additions; + size_t deletions; + size_t deleted; + size_t used; + size_t size; + bool needs_cleanup; + SIMPLE_HASHTABLE_SLOT_NAMED *hashtable; + +#ifdef SIMPLE_HASHTABLE_SORT_FUNCTION + struct { + size_t used; + size_t size; + SIMPLE_HASHTABLE_VALUE_TYPE **array; + } sorted; +#endif +} SIMPLE_HASHTABLE_NAMED; + +#ifdef SIMPLE_HASHTABLE_SORT_FUNCTION +static inline size_t simple_hashtable_sorted_binary_search_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE *value) { + size_t left = 0, right = ht->sorted.used; + + while (left < right) { + size_t mid = left + (right - left) / 2; + if (SIMPLE_HASHTABLE_SORT_FUNCTION(ht->sorted.array[mid], value) < 0) + left = mid + 1; + else + right = mid; + } + + return left; +} + +static inline void simple_hashtable_add_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE *value) { + size_t index = simple_hashtable_sorted_binary_search_named(ht, value); + + // Ensure there's enough space in the sorted array + if (ht->sorted.used >= ht->sorted.size) { + size_t size = ht->sorted.size ? ht->sorted.size * 2 : 64; + SIMPLE_HASHTABLE_VALUE_TYPE **array = mallocz(size * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *)); + if(ht->sorted.array) { + memcpy(array, ht->sorted.array, ht->sorted.size * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *)); + freez(ht->sorted.array); + } + ht->sorted.array = array; + ht->sorted.size = size; + } + + // Use memmove to shift elements and create space for the new element + memmove(&ht->sorted.array[index + 1], &ht->sorted.array[index], (ht->sorted.used - index) * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *)); + + ht->sorted.array[index] = value; + ht->sorted.used++; +} + +static inline void simple_hashtable_del_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE *value) { + size_t index = simple_hashtable_sorted_binary_search_named(ht, value); + + // Check if the value exists at the found index + assert(index < ht->sorted.used && ht->sorted.array[index] == value); + + // Use memmove to shift elements and close the gap + memmove(&ht->sorted.array[index], &ht->sorted.array[index + 1], (ht->sorted.used - index - 1) * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *)); + ht->sorted.used--; +} + +static inline void simple_hashtable_replace_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE *old_value, SIMPLE_HASHTABLE_VALUE_TYPE *new_value) { + if(new_value == old_value) + return; + + size_t old_value_index = simple_hashtable_sorted_binary_search_named(ht, old_value); + assert(old_value_index < ht->sorted.used && ht->sorted.array[old_value_index] == old_value); + + int r = SIMPLE_HASHTABLE_SORT_FUNCTION(old_value, new_value); + if(r == 0) { + // Same value, so use the same index + ht->sorted.array[old_value_index] = new_value; + return; + } + + size_t new_value_index = simple_hashtable_sorted_binary_search_named(ht, new_value); + if(old_value_index == new_value_index) { + // Not the same value, but still at the same index + ht->sorted.array[old_value_index] = new_value; + return; + } + else if (old_value_index < new_value_index) { + // The old value is before the new value + size_t shift_start = old_value_index + 1; + size_t shift_end = new_value_index - 1; + size_t shift_size = shift_end - old_value_index; + + memmove(&ht->sorted.array[old_value_index], &ht->sorted.array[shift_start], shift_size * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *)); + ht->sorted.array[shift_end] = new_value; + } + else { + // The old value is after the new value + size_t shift_start = new_value_index; + size_t shift_end = old_value_index; + size_t shift_size = shift_end - new_value_index; + + memmove(&ht->sorted.array[new_value_index + 1], &ht->sorted.array[shift_start], shift_size * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *)); + ht->sorted.array[new_value_index] = new_value; + } +} + +static inline SIMPLE_HASHTABLE_VALUE_TYPE **simple_hashtable_sorted_array_first_read_only_named(SIMPLE_HASHTABLE_NAMED *ht) { + if (ht->sorted.used > 0) { + return &ht->sorted.array[0]; + } + return NULL; +} + +static inline SIMPLE_HASHTABLE_VALUE_TYPE **simple_hashtable_sorted_array_next_read_only_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE **last) { + if (!last) return NULL; + + // Calculate the current position in the sorted array + size_t currentIndex = last - ht->sorted.array; + + // Proceed to the next element if it exists + if (currentIndex + 1 < ht->sorted.used) { + return &ht->sorted.array[currentIndex + 1]; + } + + // If no more elements, return NULL + return NULL; +} + +#define SIMPLE_HASHTABLE_SORTED_FOREACH_READ_ONLY(ht, var, type, name) \ + for (type **(var) = simple_hashtable_sorted_array_first_read_only ## name(ht); \ + var; \ + (var) = simple_hashtable_sorted_array_next_read_only ## name(ht, var)) + +#define SIMPLE_HASHTABLE_SORTED_FOREACH_READ_ONLY_VALUE(var) (*(var)) + +#else +static inline void simple_hashtable_add_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht __maybe_unused, SIMPLE_HASHTABLE_VALUE_TYPE *value __maybe_unused) { ; } +static inline void simple_hashtable_del_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht __maybe_unused, SIMPLE_HASHTABLE_VALUE_TYPE *value __maybe_unused) { ; } +static inline void simple_hashtable_replace_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht __maybe_unused, SIMPLE_HASHTABLE_VALUE_TYPE *old_value __maybe_unused, SIMPLE_HASHTABLE_VALUE_TYPE *new_value __maybe_unused) { ; } +#endif + +static inline void simple_hashtable_init_named(SIMPLE_HASHTABLE_NAMED *ht, size_t size) { + memset(ht, 0, sizeof(*ht)); + ht->size = size; + ht->hashtable = callocz(ht->size, sizeof(*ht->hashtable)); +} + +static inline void simple_hashtable_destroy_named(SIMPLE_HASHTABLE_NAMED *ht) { +#ifdef SIMPLE_HASHTABLE_SORT_FUNCTION + freez(ht->sorted.array); +#endif + + freez(ht->hashtable); + memset(ht, 0, sizeof(*ht)); +} + +static inline void simple_hashtable_resize_named(SIMPLE_HASHTABLE_NAMED *ht); + +#define simple_hashtable_data_unset ((void *)NULL) +#define simple_hashtable_data_deleted ((void *)UINT64_MAX) +#define simple_hashtable_data_usernull ((void *)(UINT64_MAX - 1)) +#define simple_hashtable_is_slot_unset(sl) ((sl)->data == simple_hashtable_data_unset) +#define simple_hashtable_is_slot_deleted(sl) ((sl)->data == simple_hashtable_data_deleted) +#define simple_hashtable_is_slot_usernull(sl) ((sl)->data == simple_hashtable_data_usernull) +#define SIMPLE_HASHTABLE_SLOT_DATA(sl) ((simple_hashtable_is_slot_unset(sl) || simple_hashtable_is_slot_deleted(sl) || simple_hashtable_is_slot_usernull(sl)) ? NULL : (sl)->data) + +static inline bool simple_hashtable_can_use_slot_named( + SIMPLE_HASHTABLE_SLOT_NAMED *sl, SIMPLE_HASHTABLE_HASH hash, + SIMPLE_HASHTABLE_KEY_TYPE *key __maybe_unused) { + + if(simple_hashtable_is_slot_unset(sl)) + return true; + + if(simple_hashtable_is_slot_deleted(sl)) + return false; + + if(sl->hash == hash) { +#if defined(SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION) && defined(SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION) + return SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION(SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION(SIMPLE_HASHTABLE_SLOT_DATA(sl)), key); +#else + return true; +#endif + } + + return false; +} + +#define SIMPLE_HASHTABLE_NEEDS_RESIZE(ht) ((ht)->size <= ((ht)->used - (ht)->deleted) << 1 || (ht)->used >= (ht)->size) + +// IMPORTANT: the pointer returned by this call is valid up to the next call of this function (or the resize one). +// If you need to cache something, cache the hash, not the slot pointer. +static inline SIMPLE_HASHTABLE_SLOT_NAMED *simple_hashtable_get_slot_named( + SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_HASH hash, + SIMPLE_HASHTABLE_KEY_TYPE *key, bool resize) { + + // This function finds the requested hash and key in the hashtable. + // It uses a second version of the hash in case of collisions, and then linear probing. + // It may resize the hashtable if it is more than 50% full. + + // Deleted items remain in the hashtable, but they are marked as DELETED. + // Reuse of DELETED slots happens only if the slot to be returned is UNSET. + // So, when looking up for an item, it tries to find it, assuming DELETED + // slots are occupied. If the item to be returned is UNSET, and it has + // encountered a DELETED slot, it returns the DELETED one instead of the UNSET. + + ht->searches++; + + size_t slot; + SIMPLE_HASHTABLE_SLOT_NAMED *sl; + SIMPLE_HASHTABLE_SLOT_NAMED *deleted; + + slot = hash % ht->size; + sl = &ht->hashtable[slot]; + deleted = simple_hashtable_is_slot_deleted(sl) ? sl : NULL; + if(likely(simple_hashtable_can_use_slot_named(sl, hash, key))) + return (simple_hashtable_is_slot_unset(sl) && deleted) ? deleted : sl; + + ht->collisions++; + + if(unlikely(resize && (ht->needs_cleanup || SIMPLE_HASHTABLE_NEEDS_RESIZE(ht)))) { + simple_hashtable_resize_named(ht); + deleted = NULL; // our deleted pointer is not valid anymore + + slot = hash % ht->size; + sl = &ht->hashtable[slot]; + if(likely(simple_hashtable_can_use_slot_named(sl, hash, key))) + return sl; + + ht->collisions++; + } + + slot = ((hash >> SIMPLE_HASHTABLE_HASH_SECOND_HASH_SHIFTS) + 1) % ht->size; + sl = &ht->hashtable[slot]; + deleted = (!deleted && simple_hashtable_is_slot_deleted(sl)) ? sl : deleted; + + // Linear probing until we find it + SIMPLE_HASHTABLE_SLOT_NAMED *sl_started = sl; + size_t collisions_started = ht->collisions; + while (!simple_hashtable_can_use_slot_named(sl, hash, key)) { + slot = (slot + 1) % ht->size; // Wrap around if necessary + sl = &ht->hashtable[slot]; + deleted = (!deleted && simple_hashtable_is_slot_deleted(sl)) ? sl : deleted; + ht->collisions++; + + if(sl == sl_started) { + if(deleted) { + // we looped through all items, and we didn't find a free slot, + // but we have found a deleted slot, so return it. + return deleted; + } + else if(resize) { + // the hashtable is full, without any deleted slots. + // we need to resize it now. + simple_hashtable_resize_named(ht); + return simple_hashtable_get_slot_named(ht, hash, key, false); + } + else { + // the hashtable is full, but resize is false. + // this should never happen. + assert(sl != sl_started); + } + } + } + + if((ht->collisions - collisions_started) > (ht->size / 2) && ht->deleted >= (ht->size / 3)) { + // we traversed through half of the hashtable to find a slot, + // but we have more than 1/3 deleted items + ht->needs_cleanup = true; + } + + return (simple_hashtable_is_slot_unset(sl) && deleted) ? deleted : sl; +} + +static inline bool simple_hashtable_del_slot_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_SLOT_NAMED *sl) { + if(simple_hashtable_is_slot_unset(sl) || simple_hashtable_is_slot_deleted(sl)) + return false; + + ht->deletions++; + ht->deleted++; + + simple_hashtable_del_value_sorted_named(ht, SIMPLE_HASHTABLE_SLOT_DATA(sl)); + + sl->data = simple_hashtable_data_deleted; + return true; +} + +static inline void simple_hashtable_set_slot_named( + SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_SLOT_NAMED *sl, + SIMPLE_HASHTABLE_HASH hash, SIMPLE_HASHTABLE_VALUE_TYPE *data) { + + if(data == NULL) + data = simple_hashtable_data_usernull; + + if(unlikely(data == simple_hashtable_data_unset || data == simple_hashtable_data_deleted)) { + simple_hashtable_del_slot_named(ht, sl); + return; + } + + if(likely(simple_hashtable_is_slot_unset(sl))) { + simple_hashtable_add_value_sorted_named(ht, data); + ht->used++; + } + + else if(unlikely(simple_hashtable_is_slot_deleted(sl))) { + ht->deleted--; + } + + else + simple_hashtable_replace_value_sorted_named(ht, SIMPLE_HASHTABLE_SLOT_DATA(sl), data); + + sl->hash = hash; + sl->data = data; + ht->additions++; +} + +// IMPORTANT +// this call invalidates all SIMPLE_HASHTABLE_SLOT_NAMED pointers +static inline void simple_hashtable_resize_named(SIMPLE_HASHTABLE_NAMED *ht) { + SIMPLE_HASHTABLE_SLOT_NAMED *old = ht->hashtable; + size_t old_size = ht->size; + + size_t new_size = ht->size; + + if(SIMPLE_HASHTABLE_NEEDS_RESIZE(ht)) + new_size = (ht->size << 1) - ((ht->size > 16) ? 1 : 0); + + ht->resizes++; + ht->size = new_size; + ht->hashtable = callocz(new_size, sizeof(*ht->hashtable)); + size_t used = 0; + for(size_t i = 0 ; i < old_size ; i++) { + SIMPLE_HASHTABLE_SLOT_NAMED *slot = &old[i]; + if(simple_hashtable_is_slot_unset(slot) || simple_hashtable_is_slot_deleted(slot)) + continue; + + SIMPLE_HASHTABLE_KEY_TYPE *key = NULL; + +#if defined(SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION) && defined(SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION) + SIMPLE_HASHTABLE_VALUE_TYPE *value = SIMPLE_HASHTABLE_SLOT_DATA(slot); + key = SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION(value); +#endif + + SIMPLE_HASHTABLE_SLOT_NAMED *slot2 = simple_hashtable_get_slot_named(ht, slot->hash, key, false); + *slot2 = *slot; + used++; + } + + assert(used == ht->used - ht->deleted); + + ht->used = used; + ht->deleted = 0; + ht->needs_cleanup = false; + + freez(old); +} + +// ---------------------------------------------------------------------------- +// hashtable traversal, in read-only mode +// the hashtable should not be modified while the traversal is taking place + +static inline SIMPLE_HASHTABLE_SLOT_NAMED *simple_hashtable_first_read_only_named(SIMPLE_HASHTABLE_NAMED *ht) { + for(size_t i = 0; i < ht->size ;i++) { + SIMPLE_HASHTABLE_SLOT_NAMED *sl = &ht->hashtable[i]; + if(!simple_hashtable_is_slot_unset(sl) && !simple_hashtable_is_slot_deleted(sl)) + return sl; + } + + return NULL; +} + +static inline SIMPLE_HASHTABLE_SLOT_NAMED *simple_hashtable_next_read_only_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_SLOT_NAMED *last) { + if (!last) return NULL; + + // Calculate the current position in the array + size_t index = last - ht->hashtable; + + // Iterate over the hashtable starting from the next element + for (size_t i = index + 1; i < ht->size; i++) { + SIMPLE_HASHTABLE_SLOT_NAMED *sl = &ht->hashtable[i]; + if (!simple_hashtable_is_slot_unset(sl) && !simple_hashtable_is_slot_deleted(sl)) { + return sl; + } + } + + // If no more data slots are found, return NULL + return NULL; +} + +#define SIMPLE_HASHTABLE_FOREACH_READ_ONLY(ht, var, name) \ + for(struct simple_hashtable_slot ## name *(var) = simple_hashtable_first_read_only ## name(ht); \ + var; \ + (var) = simple_hashtable_next_read_only ## name(ht, var)) + +#define SIMPLE_HASHTABLE_FOREACH_READ_ONLY_VALUE(var) SIMPLE_HASHTABLE_SLOT_DATA(var) + +// ---------------------------------------------------------------------------- +// high level implementation + +#ifdef SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION + +#ifndef XXH_INLINE_ALL +#define XXH_INLINE_ALL +#endif +#include "../xxHash/xxhash.h" + +#define simple_hashtable_set_named CONCAT(simple_hashtable_set, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_get_named CONCAT(simple_hashtable_get, SIMPLE_HASHTABLE_NAME) +#define simple_hashtable_del_named CONCAT(simple_hashtable_del, SIMPLE_HASHTABLE_NAME) + +static inline SIMPLE_HASHTABLE_VALUE_TYPE *simple_hashtable_set_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_KEY_TYPE *key, size_t key_len, SIMPLE_HASHTABLE_VALUE_TYPE *data) { + XXH64_hash_t hash = XXH3_64bits((void *)key, key_len); + SIMPLE_HASHTABLE_SLOT_NAMED *sl = simple_hashtable_get_slot_named(ht, hash, key, true); + simple_hashtable_set_slot_named(ht, sl, hash, data); + return SIMPLE_HASHTABLE_SLOT_DATA(sl); +} + +static inline SIMPLE_HASHTABLE_VALUE_TYPE *simple_hashtable_get_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_KEY_TYPE *key, size_t key_len) { + XXH64_hash_t hash = XXH3_64bits((void *)key, key_len); + SIMPLE_HASHTABLE_SLOT_NAMED *sl = simple_hashtable_get_slot_named(ht, hash, key, true); + return SIMPLE_HASHTABLE_SLOT_DATA(sl); +} + +static inline bool simple_hashtable_del_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_KEY_TYPE *key, size_t key_len) { + XXH64_hash_t hash = XXH3_64bits((void *)key, key_len); + SIMPLE_HASHTABLE_SLOT_NAMED *sl = simple_hashtable_get_slot_named(ht, hash, key, true); + return simple_hashtable_del_slot_named(ht, sl); +} + +#endif // SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION + +// ---------------------------------------------------------------------------- +// Clear the preprocessor defines of simple_hashtable.h +// allowing simple_hashtable.h to be included multiple times +// with different configuration each time. + +#include "simple_hashtable_undef.h" + +#endif //NETDATA_SIMPLE_HASHTABLE_H diff --git a/src/libnetdata/simple_hashtable/simple_hashtable_undef.h b/src/libnetdata/simple_hashtable/simple_hashtable_undef.h new file mode 100644 index 000000000..3fe5a708d --- /dev/null +++ b/src/libnetdata/simple_hashtable/simple_hashtable_undef.h @@ -0,0 +1,35 @@ + +// this file clears the preprocessor defines of simple_hashtable.h +// allowing simple_hashtable.h to be included multiple times +// with different configuration each time. + +#undef SIMPLE_HASHTABLE_HASH_SECOND_HASH_SHIFTS + +#undef simple_hashtable_init_named +#undef simple_hashtable_destroy_named +#undef simple_hashtable_slot_named +#undef SIMPLE_HASHTABLE_SLOT_NAMED +#undef simple_hashtable_named +#undef SIMPLE_HASHTABLE_NAMED +#undef simple_hashtable_resize_named +#undef simple_hashtable_can_use_slot_named +#undef simple_hashtable_get_slot_named +#undef simple_hashtable_del_slot_named +#undef simple_hashtable_set_slot_named +#undef simple_hashtable_first_read_only_named +#undef simple_hashtable_next_read_only_named +#undef simple_hashtable_sorted_binary_search_named +#undef simple_hashtable_add_value_sorted_named +#undef simple_hashtable_del_value_sorted_named +#undef simple_hashtable_replace_value_sorted_named +#undef simple_hashtable_sorted_array_first_read_only_named +#undef simple_hashtable_sorted_array_next_read_only_named + +#undef SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION +#undef SIMPLE_HASHTABLE_SORT_FUNCTION +#undef SIMPLE_HASHTABLE_VALUE_TYPE +#undef SIMPLE_HASHTABLE_KEY_TYPE +#undef SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION +#undef SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION +#undef SIMPLE_HASHTABLE_NAME +#undef NETDATA_SIMPLE_HASHTABLE_H diff --git a/src/libnetdata/simple_hashtable_undef.h b/src/libnetdata/simple_hashtable_undef.h deleted file mode 100644 index 3fe5a708d..000000000 --- a/src/libnetdata/simple_hashtable_undef.h +++ /dev/null @@ -1,35 +0,0 @@ - -// this file clears the preprocessor defines of simple_hashtable.h -// allowing simple_hashtable.h to be included multiple times -// with different configuration each time. - -#undef SIMPLE_HASHTABLE_HASH_SECOND_HASH_SHIFTS - -#undef simple_hashtable_init_named -#undef simple_hashtable_destroy_named -#undef simple_hashtable_slot_named -#undef SIMPLE_HASHTABLE_SLOT_NAMED -#undef simple_hashtable_named -#undef SIMPLE_HASHTABLE_NAMED -#undef simple_hashtable_resize_named -#undef simple_hashtable_can_use_slot_named -#undef simple_hashtable_get_slot_named -#undef simple_hashtable_del_slot_named -#undef simple_hashtable_set_slot_named -#undef simple_hashtable_first_read_only_named -#undef simple_hashtable_next_read_only_named -#undef simple_hashtable_sorted_binary_search_named -#undef simple_hashtable_add_value_sorted_named -#undef simple_hashtable_del_value_sorted_named -#undef simple_hashtable_replace_value_sorted_named -#undef simple_hashtable_sorted_array_first_read_only_named -#undef simple_hashtable_sorted_array_next_read_only_named - -#undef SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION -#undef SIMPLE_HASHTABLE_SORT_FUNCTION -#undef SIMPLE_HASHTABLE_VALUE_TYPE -#undef SIMPLE_HASHTABLE_KEY_TYPE -#undef SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION -#undef SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION -#undef SIMPLE_HASHTABLE_NAME -#undef NETDATA_SIMPLE_HASHTABLE_H diff --git a/src/libnetdata/simple_pattern/README.md b/src/libnetdata/simple_pattern/README.md index cf8a0f640..93a6f2d8a 100644 --- a/src/libnetdata/simple_pattern/README.md +++ b/src/libnetdata/simple_pattern/README.md @@ -1,13 +1,3 @@ - - # Simple patterns Unix prefers regular expressions. But they are just too hard, too cryptic diff --git a/src/libnetdata/simple_pattern/simple_pattern.c b/src/libnetdata/simple_pattern/simple_pattern.c index 7a7f41b1c..d0feefb4d 100644 --- a/src/libnetdata/simple_pattern/simple_pattern.c +++ b/src/libnetdata/simple_pattern/simple_pattern.c @@ -78,18 +78,21 @@ SIMPLE_PATTERN *simple_pattern_create(const char *list, const char *separators, if(unlikely(!list || !*list)) return root; - char isseparator[256] = { - [' '] = 1 // space - , ['\t'] = 1 // tab - , ['\r'] = 1 // carriage return - , ['\n'] = 1 // new line - , ['\f'] = 1 // form feed - , ['\v'] = 1 // vertical tab + bool isseparator[256] = { + [' '] = true // space + , ['\t'] = true // tab + , ['\r'] = true // carriage return + , ['\n'] = true // new line + , ['\f'] = true // form feed + , ['\v'] = true // vertical tab }; - if (unlikely(separators && *separators)) { - memset(&isseparator[0], 0, sizeof(isseparator)); - while(*separators) isseparator[(unsigned char)*separators++] = 1; + if (unlikely(separators == SIMPLE_PATTERN_NO_SEPARATORS)) + memset(isseparator, false, sizeof(isseparator)); + + else if (unlikely(separators && *separators)) { + memset(isseparator, false, sizeof(isseparator)); + while(*separators) isseparator[(unsigned char)*separators++] = true; } char *buf = mallocz(strlen(list) + 1); diff --git a/src/libnetdata/simple_pattern/simple_pattern.h b/src/libnetdata/simple_pattern/simple_pattern.h index 1af0f87b9..2c105c54b 100644 --- a/src/libnetdata/simple_pattern/simple_pattern.h +++ b/src/libnetdata/simple_pattern/simple_pattern.h @@ -21,6 +21,8 @@ typedef enum __attribute__ ((__packed__)) { struct simple_pattern; typedef struct simple_pattern SIMPLE_PATTERN; +#define SIMPLE_PATTERN_NO_SEPARATORS (const char *)(0xFFFFFFFF) + // create a simple_pattern from the string given // default_mode is used in cases where EXACT matches, without an asterisk, // should be considered PREFIX matches. diff --git a/src/libnetdata/socket/security.c b/src/libnetdata/socket/security.c index 502998b79..33bf22d75 100644 --- a/src/libnetdata/socket/security.c +++ b/src/libnetdata/socket/security.c @@ -1,7 +1,5 @@ #include "../libnetdata.h" -#ifdef ENABLE_HTTPS - SSL_CTX *netdata_ssl_exporting_ctx =NULL; SSL_CTX *netdata_ssl_streaming_sender_ctx =NULL; SSL_CTX *netdata_ssl_web_server_ctx =NULL; @@ -732,7 +730,7 @@ int security_test_certificate(SSL *ssl) { * * @return It returns 0 on success and -1 otherwise. */ -int ssl_security_location_for_context(SSL_CTX *ctx, char *file, char *path) { +int ssl_security_location_for_context(SSL_CTX *ctx, const char *file, const char *path) { int load_custom = 1, load_default = 1; if (file || path) { if(!SSL_CTX_load_verify_locations(ctx, file, path)) { @@ -751,4 +749,3 @@ int ssl_security_location_for_context(SSL_CTX *ctx, char *file, char *path) { return 0; } -#endif diff --git a/src/libnetdata/socket/security.h b/src/libnetdata/socket/security.h index 283d81db8..7deb1d797 100644 --- a/src/libnetdata/socket/security.h +++ b/src/libnetdata/socket/security.h @@ -1,5 +1,5 @@ #ifndef NETDATA_SECURITY_H -# define NETDATA_SECURITY_H +#define NETDATA_SECURITY_H typedef enum __attribute__((packed)) { NETDATA_SSL_STATE_NOT_SSL = 1, // This connection is not SSL @@ -12,27 +12,6 @@ typedef enum __attribute__((packed)) { #define NETDATA_SSL_STREAMING_SENDER_CTX 1 #define NETDATA_SSL_EXPORTING_CTX 2 -# ifdef ENABLE_HTTPS - -#define OPENSSL_VERSION_095 0x00905100L -#define OPENSSL_VERSION_097 0x0907000L -#define OPENSSL_VERSION_110 0x10100000L -#define OPENSSL_VERSION_111 0x10101000L -#define OPENSSL_VERSION_300 0x30000000L - -# include -# include -# include -# include -# if (SSLEAY_VERSION_NUMBER >= OPENSSL_VERSION_097) && (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110) -# include -# endif - -#if OPENSSL_VERSION_NUMBER >= OPENSSL_VERSION_300 -#include -#include -#endif - typedef struct netdata_ssl { SSL *conn; // SSL connection NETDATA_SSL_STATE state; // The state for SSL connection @@ -52,7 +31,7 @@ extern const char *tls_version; extern const char *tls_ciphers; extern bool netdata_ssl_validate_certificate; extern bool netdata_ssl_validate_certificate_sender; -int ssl_security_location_for_context(SSL_CTX *ctx,char *file,char *path); +int ssl_security_location_for_context(SSL_CTX *ctx, const char *file, const char *path); void netdata_ssl_initialize_openssl(); void netdata_ssl_cleanup(); @@ -73,5 +52,4 @@ ssize_t netdata_ssl_write(NETDATA_SSL *ssl, const void *buf, size_t num); ssize_t netdata_ssl_pending(NETDATA_SSL *ssl); bool netdata_ssl_has_pending(NETDATA_SSL *ssl); -# endif //ENABLE_HTTPS #endif //NETDATA_SECURITY_H diff --git a/src/libnetdata/socket/socket.c b/src/libnetdata/socket/socket.c index f907fefeb..3b0d1f824 100644 --- a/src/libnetdata/socket/socket.c +++ b/src/libnetdata/socket/socket.c @@ -119,22 +119,17 @@ bool fd_is_socket(int fd) { return true; } -bool sock_has_output_error(int fd) { - if(fd < 0) { - //internal_error(true, "invalid socket %d", fd); - return false; - } +#ifdef POLLRDHUP +bool is_socket_closed(int fd) { + if(fd < 0) + return true; // if(!fd_is_socket(fd)) { // //internal_error(true, "fd %d is not a socket", fd); // return false; // } - short int errors = POLLERR | POLLHUP | POLLNVAL; - -#ifdef POLLRDHUP - errors |= POLLRDHUP; -#endif + short int errors = POLLERR | POLLHUP | POLLNVAL | POLLRDHUP; struct pollfd pfd = { .fd = fd, @@ -149,6 +144,31 @@ bool sock_has_output_error(int fd) { return ((pfd.revents & errors) || !(pfd.revents & POLLOUT)); } +#else +bool is_socket_closed(int fd) { + if(fd < 0) + return true; + + char buffer; + ssize_t result = recv(fd, &buffer, 1, MSG_PEEK | MSG_DONTWAIT); + if (result == 0) { + // Connection closed + return true; + } + else if (result < 0) { + if (errno == EAGAIN || errno == EWOULDBLOCK) { + // No data available, but socket is still open + return false; + } else { + // An error occurred + return true; + } + } + + // Data is available, socket is open + return false; +} +#endif int sock_setnonblock(int fd) { int flags; @@ -515,7 +535,6 @@ HTTP_ACL socket_ssl_acl(char *acl) { //Due the format of the SSL command it is always the last command, //we finish it here to avoid problems with the ACLs *ssl = '\0'; -#ifdef ENABLE_HTTPS ssl++; if (!strncmp("SSL=",ssl,4)) { ssl += 4; @@ -526,7 +545,6 @@ HTTP_ACL socket_ssl_acl(char *acl) { return HTTP_ACL_SSL_FORCE; } } -#endif } return HTTP_ACL_NONE; @@ -558,7 +576,7 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, char buffer2[10 + 1]; snprintfz(buffer2, 10, "%d", default_port); - char *ip = buffer, *port = buffer2, *interface = "", *portconfig; + char *ip = buffer, *port = buffer2, *iface = "", *portconfig; int protocol = IPPROTO_TCP, socktype = SOCK_STREAM; const char *protocol_str = "tcp"; @@ -613,7 +631,7 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, if(*e == '%') { *e = '\0'; e++; - interface = e; + iface = e; while(*e && *e != ':' && *e != '=') e++; } @@ -650,13 +668,13 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, } uint32_t scope_id = 0; - if(*interface) { - scope_id = if_nametoindex(interface); + if(*iface) { + scope_id = if_nametoindex(iface); if(!scope_id) nd_log(NDLS_DAEMON, NDLP_ERR, "LISTENER: Cannot find a network interface named '%s'. " "Continuing with limiting the network interface", - interface); + iface); } if(!*ip || *ip == '*' || !strcmp(ip, "any") || !strcmp(ip, "all")) @@ -750,9 +768,9 @@ int listen_sockets_setup(LISTEN_SOCKETS *sockets) { } else sockets->default_port = (uint16_t)new_port; - char *s = appconfig_get(sockets->config, sockets->config_section, "bind to", sockets->default_bind_to); + const char *s = appconfig_get(sockets->config, sockets->config_section, "bind to", sockets->default_bind_to); while(*s) { - char *e = s; + const char *e = s; // skip separators, moving both s(tart) and e(nd) while(isspace((uint8_t)*e) || *e == ',') s = ++e; @@ -935,12 +953,10 @@ int connect_to_this_ip46( hostBfr, servBfr); // Convert 'struct timeval' to milliseconds for poll(): - int timeout_ms = timeout->tv_sec * 1000 + timeout->tv_usec / 1000; + int timeout_ms = timeout ? (timeout->tv_sec * 1000 + timeout->tv_usec / 1000) : 1000; switch(wait_on_socket_or_cancel_with_timeout( -#ifdef ENABLE_HTTPS - NULL, -#endif + NULL, fd, timeout_ms, POLLOUT, NULL)) { case 0: // proceed nd_log(NDLS_DAEMON, NDLP_DEBUG, @@ -1019,7 +1035,7 @@ int connect_to_this(const char *definition, int default_port, struct timeval *ti char default_service[10 + 1]; snprintfz(default_service, 10, "%d", default_port); - char *host = buffer, *service = default_service, *interface = ""; + char *host = buffer, *service = default_service, *iface = ""; int protocol = IPPROTO_TCP, socktype = SOCK_STREAM; uint32_t scope_id = 0; @@ -1058,7 +1074,7 @@ int connect_to_this(const char *definition, int default_port, struct timeval *ti if(*e == '%') { *e = '\0'; e++; - interface = e; + iface = e; while(*e && *e != ':') e++; } @@ -1076,12 +1092,12 @@ int connect_to_this(const char *definition, int default_port, struct timeval *ti return -1; } - if(*interface) { - scope_id = if_nametoindex(interface); + if(*iface) { + scope_id = if_nametoindex(iface); if(!scope_id) nd_log(NDLS_DAEMON, NDLP_ERR, "Cannot find a network interface named '%s'. Continuing with limiting the network interface", - interface); + iface); } if(!*service) @@ -1186,9 +1202,7 @@ int connect_to_one_of_urls(const char *destination, int default_port, struct tim // returns: -1 = thread cancelled, 0 = proceed to read/write, 1 = time exceeded, 2 = error on fd // timeout parameter can be zero to wait forever inline int wait_on_socket_or_cancel_with_timeout( -#ifdef ENABLE_HTTPS NETDATA_SSL *ssl, -#endif int fd, int timeout_ms, short int poll_events, short int *revents) { struct pollfd pfd = { .fd = fd, @@ -1204,10 +1218,8 @@ inline int wait_on_socket_or_cancel_with_timeout( return -1; } -#ifdef ENABLE_HTTPS if(poll_events == POLLIN && ssl && SSL_connection(ssl) && netdata_ssl_has_pending(ssl)) return 0; -#endif const int wait_ms = (timeout_ms >= ND_CHECK_CANCELLABILITY_WHILE_WAITING_EVERY_MS || forever) ? ND_CHECK_CANCELLABILITY_WHILE_WAITING_EVERY_MS : timeout_ms; @@ -1247,16 +1259,10 @@ inline int wait_on_socket_or_cancel_with_timeout( return 1; } -ssize_t recv_timeout( -#ifdef ENABLE_HTTPS - NETDATA_SSL *ssl, -#endif - int sockfd, void *buf, size_t len, int flags, int timeout) { +ssize_t recv_timeout(NETDATA_SSL *ssl, int sockfd, void *buf, size_t len, int flags, int timeout) { switch(wait_on_socket_or_cancel_with_timeout( -#ifdef ENABLE_HTTPS - ssl, -#endif + ssl, sockfd, timeout * 1000, POLLIN, NULL)) { case 0: // data are waiting break; @@ -1270,25 +1276,16 @@ ssize_t recv_timeout( return -1; } -#ifdef ENABLE_HTTPS - if (SSL_connection(ssl)) { + if (SSL_connection(ssl)) return netdata_ssl_read(ssl, buf, len); - } -#endif return recv(sockfd, buf, len, flags); } -ssize_t send_timeout( -#ifdef ENABLE_HTTPS - NETDATA_SSL *ssl, -#endif - int sockfd, void *buf, size_t len, int flags, int timeout) { +ssize_t send_timeout(NETDATA_SSL *ssl, int sockfd, void *buf, size_t len, int flags, int timeout) { switch(wait_on_socket_or_cancel_with_timeout( -#ifdef ENABLE_HTTPS - ssl, -#endif + ssl, sockfd, timeout * 1000, POLLOUT, NULL)) { case 0: // data are waiting break; @@ -1302,7 +1299,6 @@ ssize_t send_timeout( return -1; } -#ifdef ENABLE_HTTPS if(ssl->conn) { if (SSL_connection(ssl)) { return netdata_ssl_write(ssl, buf, len); @@ -1314,7 +1310,7 @@ ssize_t send_timeout( return -1; } } -#endif + return send(sockfd, buf, len, flags); } diff --git a/src/libnetdata/socket/socket.h b/src/libnetdata/socket/socket.h index 8147c9774..2c282c4c6 100644 --- a/src/libnetdata/socket/socket.h +++ b/src/libnetdata/socket/socket.h @@ -46,18 +46,12 @@ int connect_to_one_of(const char *destination, int default_port, struct timeval int connect_to_one_of_urls(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size); -#ifdef ENABLE_HTTPS ssize_t recv_timeout(NETDATA_SSL *ssl,int sockfd, void *buf, size_t len, int flags, int timeout); ssize_t send_timeout(NETDATA_SSL *ssl,int sockfd, void *buf, size_t len, int flags, int timeout); int wait_on_socket_or_cancel_with_timeout(NETDATA_SSL *ssl, int fd, int timeout_ms, short int poll_events, short int *revents); -#else -ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout); -ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout); -int wait_on_socket_or_cancel_with_timeout(int fd, int timeout_ms, short int poll_events, short int *revents); -#endif bool fd_is_socket(int fd); -bool sock_has_output_error(int fd); +bool is_socket_closed(int fd); int sock_setnonblock(int fd); int sock_delnonblock(int fd); @@ -200,7 +194,7 @@ void poll_events(LISTEN_SOCKETS *sockets #define INET6_ADDRSTRLEN 46 #endif -typedef struct socket_peers { +typedef struct { struct { char ip[INET6_ADDRSTRLEN]; int port; diff --git a/src/libnetdata/spawn_server/log-forwarder.c b/src/libnetdata/spawn_server/log-forwarder.c new file mode 100644 index 000000000..5c4db55ea --- /dev/null +++ b/src/libnetdata/spawn_server/log-forwarder.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" +#include "log-forwarder.h" + +typedef struct LOG_FORWARDER_ENTRY { + int fd; + char *cmd; + pid_t pid; + BUFFER *wb; + size_t pfds_idx; + bool delete; + + struct LOG_FORWARDER_ENTRY *prev; + struct LOG_FORWARDER_ENTRY *next; +} LOG_FORWARDER_ENTRY; + +typedef struct LOG_FORWARDER { + LOG_FORWARDER_ENTRY *entries; + ND_THREAD *thread; + SPINLOCK spinlock; + int pipe_fds[2]; // Pipe for notifications + bool running; +} LOG_FORWARDER; + +static void *log_forwarder_thread_func(void *arg); + +// -------------------------------------------------------------------------------------------------------------------- +// helper functions + +static inline LOG_FORWARDER_ENTRY *log_forwarder_find_entry_unsafe(LOG_FORWARDER *lf, int fd) { + for (LOG_FORWARDER_ENTRY *entry = lf->entries; entry; entry = entry->next) { + if (entry->fd == fd) + return entry; + } + + return NULL; +} + +static inline void log_forwarder_del_entry_unsafe(LOG_FORWARDER *lf, LOG_FORWARDER_ENTRY *entry) { + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(lf->entries, entry, prev, next); + buffer_free(entry->wb); + freez(entry->cmd); + close(entry->fd); + freez(entry); +} + +static inline void log_forwarder_wake_up_worker(LOG_FORWARDER *lf) { + char ch = 0; + ssize_t bytes_written = write(lf->pipe_fds[PIPE_WRITE], &ch, 1); + if (bytes_written != 1) + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Failed to write to notification pipe"); +} + +// -------------------------------------------------------------------------------------------------------------------- +// starting / stopping + +LOG_FORWARDER *log_forwarder_start(void) { + LOG_FORWARDER *lf = callocz(1, sizeof(LOG_FORWARDER)); + + spinlock_init(&lf->spinlock); + if (pipe(lf->pipe_fds) != 0) { + freez(lf); + return NULL; + } + + // make sure read() will not block on this pipe + sock_setnonblock(lf->pipe_fds[PIPE_READ]); + + lf->running = true; + lf->thread = nd_thread_create("log-fw", NETDATA_THREAD_OPTION_JOINABLE, log_forwarder_thread_func, lf); + + return lf; +} + +static inline void mark_all_entries_for_deletion_unsafe(LOG_FORWARDER *lf) { + for(LOG_FORWARDER_ENTRY *entry = lf->entries; entry ;entry = entry->next) + entry->delete = true; +} + +void log_forwarder_stop(LOG_FORWARDER *lf) { + if(!lf || !lf->running) return; + + // Signal the thread to stop + spinlock_lock(&lf->spinlock); + lf->running = false; + + // mark them all for deletion + mark_all_entries_for_deletion_unsafe(lf); + + // Send a byte to the pipe to wake up the thread + char ch = 0; + write(lf->pipe_fds[PIPE_WRITE], &ch, 1); + spinlock_unlock(&lf->spinlock); + + // Wait for the thread to finish + close(lf->pipe_fds[PIPE_WRITE]); // force it to quit + nd_thread_join(lf->thread); + close(lf->pipe_fds[PIPE_READ]); + + freez(lf); +} + +// -------------------------------------------------------------------------------------------------------------------- +// managing entries + +void log_forwarder_add_fd(LOG_FORWARDER *lf, int fd) { + if(!lf || !lf->running || fd < 0) return; + + LOG_FORWARDER_ENTRY *entry = callocz(1, sizeof(LOG_FORWARDER_ENTRY)); + entry->fd = fd; + entry->cmd = NULL; + entry->pid = 0; + entry->pfds_idx = 0; + entry->delete = false; + entry->wb = buffer_create(0, NULL); + + spinlock_lock(&lf->spinlock); + + // Append to the entries list + DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(lf->entries, entry, prev, next); + + // Send a byte to the pipe to wake up the thread + log_forwarder_wake_up_worker(lf); + + spinlock_unlock(&lf->spinlock); +} + +bool log_forwarder_del_and_close_fd(LOG_FORWARDER *lf, int fd) { + if(!lf || !lf->running || fd < 0) return false; + + bool ret = false; + + spinlock_lock(&lf->spinlock); + + LOG_FORWARDER_ENTRY *entry = log_forwarder_find_entry_unsafe(lf, fd); + if(entry) { + entry->delete = true; + + // Send a byte to the pipe to wake up the thread + log_forwarder_wake_up_worker(lf); + + ret = true; + } + + spinlock_unlock(&lf->spinlock); + + return ret; +} + +void log_forwarder_annotate_fd_name(LOG_FORWARDER *lf, int fd, const char *cmd) { + if(!lf || !lf->running || fd < 0 || !cmd || !*cmd) return; + + spinlock_lock(&lf->spinlock); + + LOG_FORWARDER_ENTRY *entry = log_forwarder_find_entry_unsafe(lf, fd); + if (entry) { + freez(entry->cmd); + entry->cmd = strdupz(cmd); + } + + spinlock_unlock(&lf->spinlock); +} + +void log_forwarder_annotate_fd_pid(LOG_FORWARDER *lf, int fd, pid_t pid) { + if(!lf || !lf->running || fd < 0) return; + + spinlock_lock(&lf->spinlock); + + LOG_FORWARDER_ENTRY *entry = log_forwarder_find_entry_unsafe(lf, fd); + if (entry) + entry->pid = pid; + + spinlock_unlock(&lf->spinlock); +} + +// -------------------------------------------------------------------------------------------------------------------- +// log forwarder thread + +static inline void log_forwarder_log(LOG_FORWARDER *lf __maybe_unused, LOG_FORWARDER_ENTRY *entry, const char *msg) { + const char *s = msg; + while(*s && isspace((uint8_t)*s)) s++; + if(*s == '\0') return; // do not log empty lines + + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_SYSLOG_IDENTIFIER, entry->cmd ? entry->cmd : "unknown"), + ND_LOG_FIELD_I64(NDF_TID, entry->pid), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "STDERR: %s", msg); +} + +// returns the number of entries active +static inline size_t log_forwarder_remove_deleted_unsafe(LOG_FORWARDER *lf) { + size_t entries = 0; + + LOG_FORWARDER_ENTRY *entry = lf->entries; + while(entry) { + LOG_FORWARDER_ENTRY *next = entry->next; + + if(entry->delete) { + if (buffer_strlen(entry->wb)) + // there is something not logged in it - log it + log_forwarder_log(lf, entry, buffer_tostring(entry->wb)); + + log_forwarder_del_entry_unsafe(lf, entry); + } + else + entries++; + + entry = next; + } + + return entries; +} + +static void *log_forwarder_thread_func(void *arg) { + LOG_FORWARDER *lf = (LOG_FORWARDER *)arg; + + while (1) { + spinlock_lock(&lf->spinlock); + if (!lf->running) { + mark_all_entries_for_deletion_unsafe(lf); + log_forwarder_remove_deleted_unsafe(lf); + spinlock_unlock(&lf->spinlock); + break; + } + + // Count the number of fds + size_t nfds = 1 + log_forwarder_remove_deleted_unsafe(lf); + + struct pollfd pfds[nfds]; + + // First, the notification pipe + pfds[0].fd = lf->pipe_fds[PIPE_READ]; + pfds[0].events = POLLIN; + + int idx = 1; + for(LOG_FORWARDER_ENTRY *entry = lf->entries; entry ; entry = entry->next, idx++) { + pfds[idx].fd = entry->fd; + pfds[idx].events = POLLIN; + entry->pfds_idx = idx; + } + + spinlock_unlock(&lf->spinlock); + + int timeout = 200; // 200ms + int ret = poll(pfds, nfds, timeout); + + if (ret > 0) { + // Check the notification pipe + if (pfds[0].revents & POLLIN) { + // Read and discard the data + char buf[256]; + ssize_t bytes_read = read(lf->pipe_fds[PIPE_READ], buf, sizeof(buf)); + // Ignore the data; proceed regardless of the result + if (bytes_read == -1) { + if (errno != EAGAIN && errno != EWOULDBLOCK) { + // Handle read error if necessary + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Failed to read from notification pipe"); + return NULL; + } + } + } + + // Now check the other fds + spinlock_lock(&lf->spinlock); + + size_t to_remove = 0; + + // read or mark them for deletion + for(LOG_FORWARDER_ENTRY *entry = lf->entries; entry ; entry = entry->next) { + if (entry->pfds_idx < 1 || entry->pfds_idx >= nfds || !(pfds[entry->pfds_idx].revents & POLLIN)) + continue; + + BUFFER *wb = entry->wb; + buffer_need_bytes(wb, 1024); + + ssize_t bytes_read = read(entry->fd, &wb->buffer[wb->len], wb->size - wb->len - 1); + if(bytes_read > 0) + wb->len += bytes_read; + else if(bytes_read == 0 || (bytes_read == -1 && errno != EINTR && errno != EAGAIN)) { + // EOF or error + entry->delete = true; + to_remove++; + } + + // log as many lines are they have been received + char *start = (char *)buffer_tostring(wb); + char *newline = strchr(start, '\n'); + while(newline) { + *newline = '\0'; + log_forwarder_log(lf, entry, start); + + start = ++newline; + newline = strchr(newline, '\n'); + } + + if(start != wb->buffer) { + wb->len = strlen(start); + if (wb->len) + memmove(wb->buffer, start, wb->len); + } + + entry->pfds_idx = 0; + } + + spinlock_unlock(&lf->spinlock); + } + else if (ret == 0) { + // Timeout, nothing to do + continue; + + } + else + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Log forwarder: poll() error"); + } + + return NULL; +} diff --git a/src/libnetdata/spawn_server/log-forwarder.h b/src/libnetdata/spawn_server/log-forwarder.h new file mode 100644 index 000000000..344601c1f --- /dev/null +++ b/src/libnetdata/spawn_server/log-forwarder.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_LOG_FORWARDER_H +#define NETDATA_LOG_FORWARDER_H + +#include "../libnetdata.h" + +typedef struct LOG_FORWARDER LOG_FORWARDER; + +LOG_FORWARDER *log_forwarder_start(void); // done once, at spawn_server_create() +void log_forwarder_add_fd(LOG_FORWARDER *lf, int fd); // to add a new fd +void log_forwarder_annotate_fd_name(LOG_FORWARDER *lf, int fd, const char *cmd); // set the syslog identifier +void log_forwarder_annotate_fd_pid(LOG_FORWARDER *lf, int fd, pid_t pid); // set the pid of the child process +bool log_forwarder_del_and_close_fd(LOG_FORWARDER *lf, int fd); // to remove an fd +void log_forwarder_stop(LOG_FORWARDER *lf); // done once, at spawn_server_destroy() + +#endif //NETDATA_LOG_FORWARDER_H diff --git a/src/libnetdata/spawn_server/spawn-tester.c b/src/libnetdata/spawn_server/spawn-tester.c new file mode 100644 index 000000000..fbd9431ac --- /dev/null +++ b/src/libnetdata/spawn_server/spawn-tester.c @@ -0,0 +1,493 @@ +#include "libnetdata/libnetdata.h" +#include "libnetdata/required_dummies.h" + +#define ENV_VAR_KEY "SPAWN_TESTER" +#define ENV_VAR_VALUE "1234567890" + +size_t warnings = 0; + +void child_check_environment(void) { + const char *s = getenv(ENV_VAR_KEY); + if(!s || !*s || strcmp(s, ENV_VAR_VALUE) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Wrong environment. Variable '%s' should have value '%s' but it has '%s'", + ENV_VAR_KEY, ENV_VAR_VALUE, s ? s : "(unset)"); + + exit(1); + } +} + +static bool is_valid_fd(int fd) { + errno_clear(); + return fcntl(fd, F_GETFD) != -1 || errno != EBADF; +} + +void child_check_fds(void) { + for(int fd = 0; fd < 3; fd++) { + if(!is_valid_fd(fd)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "fd No %d should be a valid file descriptor - but it isn't.", fd); + + exit(1); + } + } + + for(int fd = 3; fd < /* os_get_fd_open_max() */ 1024; fd++) { + if(is_valid_fd(fd)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "fd No %d is a valid file descriptor - it shouldn't.", fd); + + exit(1); + } + } + + errno_clear(); +} + +// -------------------------------------------------------------------------------------------------------------------- +// kill to stop + +int plugin_kill_to_stop() { + child_check_fds(); + child_check_environment(); + + char buffer[1024]; + while (fgets(buffer, sizeof(buffer), stdin) != NULL) { + fprintf(stderr, "+"); + printf("%s", buffer); + fflush(stdout); + } + + return 0; +} + +void test_int_fds_plugin_kill_to_stop(SPAWN_SERVER *server, const char *argv0) { + const char *params[] = { + argv0, + "plugin-kill-to-stop", + NULL, + }; + + SPAWN_INSTANCE *si = spawn_server_exec(server, STDERR_FILENO, 0, params, NULL, 0, SPAWN_INSTANCE_TYPE_EXEC); + if(!si) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot run myself as plugin (spawn)"); + exit(1); + } + + const char *msg = "Hello World!\n"; + ssize_t len = strlen(msg); + char buffer[len * 2]; + + for(size_t j = 0; j < 30 ;j++) { + fprintf(stderr, "-"); + memset(buffer, 0, sizeof(buffer)); + + ssize_t rc = write(spawn_server_instance_write_fd(si), msg, len); + + if (rc != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot write to plugin. Expected to write %zd bytes, wrote %zd bytes", + len, rc); + exit(1); + } + + rc = read(spawn_server_instance_read_fd(si), buffer, sizeof(buffer)); + if (rc != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %zd bytes, read %zd bytes", + len, rc); + exit(1); + } + + if (memcmp(msg, buffer, len) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Read corrupted data. Expected '%s', Read '%s'", + msg, buffer); + exit(1); + } + } + fprintf(stderr, "\n"); + + int code = spawn_server_exec_kill(server, si); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "child exited with code %d", + code); + + if(code != 15 && code != 0) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "child should exit with code 0 or 15, but exited with code %d", code); + warnings++; + } +} + +void test_popen_plugin_kill_to_stop(const char *argv0) { + char cmd[FILENAME_MAX + 100]; + snprintfz(cmd, sizeof(cmd), "exec %s plugin-kill-to-stop", argv0); + POPEN_INSTANCE *pi = spawn_popen_run(cmd); + if(!pi) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot run myself as plugin (popen)"); + exit(1); + } + + const char *msg = "Hello World!\n"; + size_t len = strlen(msg); + char buffer[len * 2]; + + for(size_t j = 0; j < 30 ;j++) { + fprintf(stderr, "-"); + memset(buffer, 0, sizeof(buffer)); + + size_t rc = fwrite(msg, 1, len, spawn_popen_stdin(pi)); + if (rc != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot write to plugin. Expected to write %zu bytes, wrote %zu bytes", + len, rc); + exit(1); + } + fflush(spawn_popen_stdin(pi)); + + char *s = fgets(buffer, sizeof(buffer), spawn_popen_stdout(pi)); + if (!s || strlen(s) != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %zu bytes, read %zu bytes", + len, (size_t)(s ? strlen(s) : 0)); + exit(1); + } + if (memcmp(msg, buffer, len) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Read corrupted data. Expected '%s', Read '%s'", + msg, buffer); + exit(1); + } + } + fprintf(stderr, "\n"); + + int code = spawn_popen_kill(pi); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "child exited with code %d", + code); + + if(code != 0) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "child should exit with code 0, but exited with code %d", code); + warnings++; + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// close to stop + +int plugin_close_to_stop() { + child_check_fds(); + child_check_environment(); + + char buffer[1024]; + while (fgets(buffer, sizeof(buffer), stdin) != NULL) { + fprintf(stderr, "+"); + printf("%s", buffer); + fflush(stdout); + } + + nd_log(NDLS_COLLECTORS, NDLP_ERR, "child detected a closed pipe."); + exit(1); +} + +void test_int_fds_plugin_close_to_stop(SPAWN_SERVER *server, const char *argv0) { + const char *params[] = { + argv0, + "plugin-close-to-stop", + NULL, + }; + + SPAWN_INSTANCE *si = spawn_server_exec(server, STDERR_FILENO, 0, params, NULL, 0, SPAWN_INSTANCE_TYPE_EXEC); + if(!si) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot run myself as plugin (spawn)"); + exit(1); + } + + const char *msg = "Hello World!\n"; + ssize_t len = strlen(msg); + char buffer[len * 2]; + + for(size_t j = 0; j < 30 ;j++) { + fprintf(stderr, "-"); + memset(buffer, 0, sizeof(buffer)); + + ssize_t rc = write(spawn_server_instance_write_fd(si), msg, len); + if (rc != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot write to plugin. Expected to write %zd bytes, wrote %zd bytes", + len, rc); + exit(1); + } + + rc = read(spawn_server_instance_read_fd(si), buffer, sizeof(buffer)); + if (rc != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %zd bytes, read %zd bytes", + len, rc); + exit(1); + } + if (memcmp(msg, buffer, len) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Read corrupted data. Expected '%s', Read '%s'", + msg, buffer); + exit(1); + } + + break; + } + fprintf(stderr, "\n"); + + int code = spawn_server_exec_wait(server, si); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "child exited with code %d", + code); + + if(!WIFEXITED(code) || WEXITSTATUS(code) != 1) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "child should exit with code 1, but exited with code %d", code); + warnings++; + } +} + +void test_popen_plugin_close_to_stop(const char *argv0) { + char cmd[FILENAME_MAX + 100]; + snprintfz(cmd, sizeof(cmd), "exec %s plugin-close-to-stop", argv0); + POPEN_INSTANCE *pi = spawn_popen_run(cmd); + if(!pi) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot run myself as plugin (popen)"); + exit(1); + } + + const char *msg = "Hello World!\n"; + size_t len = strlen(msg); + char buffer[len * 2]; + + for(size_t j = 0; j < 30 ;j++) { + fprintf(stderr, "-"); + memset(buffer, 0, sizeof(buffer)); + + size_t rc = fwrite(msg, 1, len, spawn_popen_stdin(pi)); + if (rc != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot write to plugin. Expected to write %zu bytes, wrote %zu bytes", + len, rc); + exit(1); + } + fflush(spawn_popen_stdin(pi)); + + char *s = fgets(buffer, sizeof(buffer), spawn_popen_stdout(pi)); + if (!s || strlen(s) != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %zu bytes, read %zu bytes", + len, (size_t)(s ? strlen(s) : 0)); + exit(1); + } + if (memcmp(msg, buffer, len) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Read corrupted data. Expected '%s', Read '%s'", + msg, buffer); + exit(1); + } + + break; + } + fprintf(stderr, "\n"); + + int code = spawn_popen_wait(pi); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "child exited with code %d", + code); + + if(code != 1) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "child should exit with code 1, but exited with code %d", code); + warnings++; + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// echo and exit + +#define ECHO_AND_EXIT_MSG "GOODBYE\n" + +int plugin_echo_and_exit() { + child_check_fds(); + child_check_environment(); + + printf(ECHO_AND_EXIT_MSG); + exit(0); +} + +void test_int_fds_plugin_echo_and_exit(SPAWN_SERVER *server, const char *argv0) { + const char *params[] = { + argv0, + "plugin-echo-and-exit", + NULL, + }; + + SPAWN_INSTANCE *si = spawn_server_exec(server, STDERR_FILENO, 0, params, NULL, 0, SPAWN_INSTANCE_TYPE_EXEC); + if(!si) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot run myself as plugin (spawn)"); + exit(1); + } + + char buffer[1024]; + size_t reads = 0; + + for(size_t j = 0; j < 30 ;j++) { + fprintf(stderr, "-"); + memset(buffer, 0, sizeof(buffer)); + + ssize_t rc = read(spawn_server_instance_read_fd(si), buffer, sizeof(buffer)); + if(rc <= 0) + break; + + reads++; + + if (rc != strlen(ECHO_AND_EXIT_MSG)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %zu bytes, read %zd bytes", + strlen(ECHO_AND_EXIT_MSG), rc); + exit(1); + } + if (memcmp(ECHO_AND_EXIT_MSG, buffer, strlen(ECHO_AND_EXIT_MSG)) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Read corrupted data. Expected '%s', Read '%s'", + ECHO_AND_EXIT_MSG, buffer); + exit(1); + } + } + fprintf(stderr, "\n"); + + if(reads != 1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %d times, but read %zu", + 1, reads); + exit(1); + } + + int code = spawn_server_exec_wait(server, si); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "child exited with code %d", + code); + + if(code != 0) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "child should exit with code 0, but exited with code %d", code); + warnings++; + } +} + +void test_popen_plugin_echo_and_exit(const char *argv0) { + char cmd[FILENAME_MAX + 100]; + snprintfz(cmd, sizeof(cmd), "exec %s plugin-echo-and-exit", argv0); + POPEN_INSTANCE *pi = spawn_popen_run(cmd); + if(!pi) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot run myself as plugin (popen)"); + exit(1); + } + + char buffer[1024]; + size_t reads = 0; + for(size_t j = 0; j < 30 ;j++) { + fprintf(stderr, "-"); + memset(buffer, 0, sizeof(buffer)); + + char *s = fgets(buffer, sizeof(buffer), spawn_popen_stdout(pi)); + if(!s) break; + reads++; + if (strlen(s) != strlen(ECHO_AND_EXIT_MSG)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %zu bytes, read %zu bytes", + strlen(ECHO_AND_EXIT_MSG), (size_t)(s ? strlen(s) : 0)); + exit(1); + } + if (memcmp(ECHO_AND_EXIT_MSG, buffer, strlen(ECHO_AND_EXIT_MSG)) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Read corrupted data. Expected '%s', Read '%s'", + ECHO_AND_EXIT_MSG, buffer); + exit(1); + } + } + fprintf(stderr, "\n"); + + if(reads != 1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %d times, but read %zu", + 1, reads); + exit(1); + } + + int code = spawn_popen_wait(pi); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "child exited with code %d", + code); + + if(code != 0) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "child should exit with code 0, but exited with code %d", code); + warnings++; + } +} + +// -------------------------------------------------------------------------------------------------------------------- + +int main(int argc, const char **argv) { + if(argc > 1 && strcmp(argv[1], "plugin-kill-to-stop") == 0) + return plugin_kill_to_stop(); + + if(argc > 1 && strcmp(argv[1], "plugin-echo-and-exit") == 0) + return plugin_echo_and_exit(); + + if(argc > 1 && strcmp(argv[1], "plugin-close-to-stop") == 0) + return plugin_close_to_stop(); + + if(argc <= 1 || strcmp(argv[1], "test") != 0) { + fprintf(stderr, "Run me with 'test' parameter!\n"); + exit(1); + } + + nd_setenv(ENV_VAR_KEY, ENV_VAR_VALUE, 1); + + fprintf(stderr, "\n\nTESTING fds\n\n"); + SPAWN_SERVER *server = spawn_server_create(SPAWN_SERVER_OPTION_EXEC, "test", NULL, argc, argv); + if(!server) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot create spawn server"); + exit(1); + } + for(size_t i = 0; i < 5; i++) { + fprintf(stderr, "\n\nTESTING fds No %zu (kill to stop)\n\n", i + 1); + test_int_fds_plugin_kill_to_stop(server, argv[0]); + } + for(size_t i = 0; i < 5; i++) { + fprintf(stderr, "\n\nTESTING fds No %zu (echo and exit)\n\n", i + 1); + test_int_fds_plugin_echo_and_exit(server, argv[0]); + } + for(size_t i = 0; i < 5; i++) { + fprintf(stderr, "\n\nTESTING fds No %zu (close to stop)\n\n", i + 1); + test_int_fds_plugin_close_to_stop(server, argv[0]); + } + spawn_server_destroy(server); + + fprintf(stderr, "\n\nTESTING popen\n\n"); + netdata_main_spawn_server_init("test", argc, argv); + for(size_t i = 0; i < 5; i++) { + fprintf(stderr, "\n\nTESTING popen No %zu (kill to stop)\n\n", i + 1); + test_popen_plugin_kill_to_stop(argv[0]); + } + for(size_t i = 0; i < 5; i++) { + fprintf(stderr, "\n\nTESTING popen No %zu (echo and exit)\n\n", i + 1); + test_popen_plugin_echo_and_exit(argv[0]); + } + for(size_t i = 0; i < 5; i++) { + fprintf(stderr, "\n\nTESTING popen No %zu (close to stop)\n\n", i + 1); + test_popen_plugin_close_to_stop(argv[0]); + } + netdata_main_spawn_server_cleanup(); + + fprintf(stderr, "\n\nTests passed! (%zu warnings)\n\n", warnings); + + exit(0); +} diff --git a/src/libnetdata/spawn_server/spawn_library.c b/src/libnetdata/spawn_server/spawn_library.c new file mode 100644 index 000000000..bdf64544c --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_library.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "spawn_library.h" + +BUFFER *argv_to_cmdline_buffer(const char **argv) { + BUFFER *wb = buffer_create(0, NULL); + + for(size_t i = 0; argv[i] ;i++) { + const char *s = argv[i]; + size_t len = strlen(s); + buffer_need_bytes(wb, len * 2 + 1); + + bool needs_quotes = false; + for(const char *c = s; !needs_quotes && *c ; c++) { + switch(*c) { + case ' ': + case '\v': + case '\t': + case '\n': + case '"': + needs_quotes = true; + break; + + default: + break; + } + } + + if(needs_quotes && buffer_strlen(wb)) + buffer_strcat(wb, " \""); + else if(buffer_strlen(wb)) + buffer_putc(wb, ' '); + + for(const char *c = s; *c ; c++) { + switch(*c) { + case '"': + buffer_putc(wb, '\\'); + // fall through + + default: + buffer_putc(wb, *c); + break; + } + } + + if(needs_quotes) + buffer_strcat(wb, "\""); + } + + return wb; +} diff --git a/src/libnetdata/spawn_server/spawn_library.h b/src/libnetdata/spawn_server/spawn_library.h new file mode 100644 index 000000000..a9b9dc14d --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_library.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SPAWN_LIBRARY_H +#define NETDATA_SPAWN_LIBRARY_H + +#include "../libnetdata.h" + +BUFFER *argv_to_cmdline_buffer(const char **argv); + +#endif //NETDATA_SPAWN_LIBRARY_H diff --git a/src/libnetdata/spawn_server/spawn_popen.c b/src/libnetdata/spawn_server/spawn_popen.c index f354b1f2a..b8ea0afe6 100644 --- a/src/libnetdata/spawn_server/spawn_popen.c +++ b/src/libnetdata/spawn_server/spawn_popen.c @@ -2,6 +2,12 @@ #include "spawn_popen.h" +struct popen_instance { + SPAWN_INSTANCE *si; + FILE *child_stdin_fp; + FILE *child_stdout_fp; +}; + SPAWN_SERVER *netdata_main_spawn_server = NULL; static SPINLOCK netdata_main_spawn_server_spinlock = NETDATA_SPINLOCK_INITIALIZER; @@ -27,6 +33,30 @@ void netdata_main_spawn_server_cleanup(void) { } } +FILE *spawn_popen_stdin(POPEN_INSTANCE *pi) { + if(!pi->child_stdin_fp) + pi->child_stdin_fp = fdopen(spawn_server_instance_write_fd(pi->si), "w"); + + if(!pi->child_stdin_fp) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot open FILE on child's stdin on fd %d.", + spawn_server_instance_write_fd(pi->si)); + + return pi->child_stdin_fp; +} + +FILE *spawn_popen_stdout(POPEN_INSTANCE *pi) { + if(!pi->child_stdout_fp) + pi->child_stdout_fp = fdopen(spawn_server_instance_read_fd(pi->si), "r"); + + if(!pi->child_stdout_fp) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot open FILE on child's stdout on fd %d.", + spawn_server_instance_read_fd(pi->si)); + + return pi->child_stdout_fp; +} + POPEN_INSTANCE *spawn_popen_run_argv(const char **argv) { netdata_main_spawn_server_init(NULL, 0, NULL); @@ -35,29 +65,9 @@ POPEN_INSTANCE *spawn_popen_run_argv(const char **argv) { if(si == NULL) return NULL; - POPEN_INSTANCE *pi = mallocz(sizeof(*pi)); + POPEN_INSTANCE *pi = callocz(1, sizeof(*pi)); pi->si = si; - pi->child_stdin_fp = fdopen(spawn_server_instance_write_fd(si), "w"); - pi->child_stdout_fp = fdopen(spawn_server_instance_read_fd(si), "r"); - - if(!pi->child_stdin_fp) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot open FILE on child's stdin on fd %d.", spawn_server_instance_write_fd(si)); - goto cleanup; - } - - if(!pi->child_stdout_fp) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot open FILE on child's stdout on fd %d.", spawn_server_instance_read_fd(si)); - goto cleanup; - } - return pi; - -cleanup: - if(pi->child_stdin_fp) { fclose(pi->child_stdin_fp); spawn_server_instance_write_fd(si); } - if(pi->child_stdout_fp) { fclose(pi->child_stdout_fp); spawn_server_instance_read_fd_unset(si); } - spawn_server_exec_kill(netdata_main_spawn_server, si); - freez(pi); - return NULL; } POPEN_INSTANCE *spawn_popen_run_variadic(const char *cmd, ...) { @@ -92,7 +102,33 @@ POPEN_INSTANCE *spawn_popen_run_variadic(const char *cmd, ...) { POPEN_INSTANCE *spawn_popen_run(const char *cmd) { if(!cmd || !*cmd) return NULL; - + +//#if defined(OS_WINDOWS) +// if(strncmp(cmd, "exec ", 5) == 0) { +// size_t len = strlen(cmd); +// char cmd_copy[strlen(cmd) + 1]; +// memcpy(cmd_copy, cmd, len + 1); +// char *words[100]; +// size_t num_words = quoted_strings_splitter(cmd_copy, words, 100, isspace_map_pluginsd); +// char *exec = get_word(words, num_words, 0); +// char *prog = get_word(words, num_words, 1); +// if (strcmp(exec, "exec") == 0 && +// prog && +// strendswith(prog, ".plugin") && +// !strendswith(prog, "charts.d.plugin") && +// !strendswith(prog, "ioping.plugin")) { +// const char *argv[num_words - 1 + 1]; // remove exec, add terminator +// +// size_t dst = 0; +// for (size_t i = 1; i < num_words; i++) +// argv[dst++] = get_word(words, num_words, i); +// +// argv[dst] = NULL; +// return spawn_popen_run_argv(argv); +// } +// } +//#endif + const char *argv[] = { "/bin/sh", "-c", @@ -121,11 +157,24 @@ static int spawn_popen_status_rc(int status) { return -1; } +static void spawn_popen_close_files(POPEN_INSTANCE *pi) { + if(pi->child_stdin_fp) { + fclose(pi->child_stdin_fp); + pi->child_stdin_fp = NULL; + spawn_server_instance_write_fd_unset(pi->si); + } + + if(pi->child_stdout_fp) { + fclose(pi->child_stdout_fp); + pi->child_stdout_fp = NULL; + spawn_server_instance_read_fd_unset(pi->si); + } +} + int spawn_popen_wait(POPEN_INSTANCE *pi) { if(!pi) return -1; - fclose(pi->child_stdin_fp); pi->child_stdin_fp = NULL; spawn_server_instance_write_fd_unset(pi->si); - fclose(pi->child_stdout_fp); pi->child_stdout_fp = NULL; spawn_server_instance_read_fd_unset(pi->si); + spawn_popen_close_files(pi); int status = spawn_server_exec_wait(netdata_main_spawn_server, pi->si); freez(pi); return spawn_popen_status_rc(status); @@ -134,9 +183,23 @@ int spawn_popen_wait(POPEN_INSTANCE *pi) { int spawn_popen_kill(POPEN_INSTANCE *pi) { if(!pi) return -1; - fclose(pi->child_stdin_fp); pi->child_stdin_fp = NULL; spawn_server_instance_write_fd_unset(pi->si); - fclose(pi->child_stdout_fp); pi->child_stdout_fp = NULL; spawn_server_instance_read_fd_unset(pi->si); + spawn_popen_close_files(pi); int status = spawn_server_exec_kill(netdata_main_spawn_server, pi->si); freez(pi); return spawn_popen_status_rc(status); } + +pid_t spawn_popen_pid(POPEN_INSTANCE *pi) { + if(!pi) return -1; + return spawn_server_instance_pid(pi->si); +} + +int spawn_popen_read_fd(POPEN_INSTANCE *pi) { + if(!pi) return -1; + return spawn_server_instance_read_fd(pi->si); +} + +int spawn_popen_write_fd(POPEN_INSTANCE *pi) { + if(!pi) return -1; + return spawn_server_instance_write_fd(pi->si); +} diff --git a/src/libnetdata/spawn_server/spawn_popen.h b/src/libnetdata/spawn_server/spawn_popen.h index 253d1f34b..5c00f32ff 100644 --- a/src/libnetdata/spawn_server/spawn_popen.h +++ b/src/libnetdata/spawn_server/spawn_popen.h @@ -9,11 +9,7 @@ extern SPAWN_SERVER *netdata_main_spawn_server; bool netdata_main_spawn_server_init(const char *name, int argc, const char **argv); void netdata_main_spawn_server_cleanup(void); -typedef struct { - SPAWN_INSTANCE *si; - FILE *child_stdin_fp; - FILE *child_stdout_fp; -} POPEN_INSTANCE; +typedef struct popen_instance POPEN_INSTANCE; POPEN_INSTANCE *spawn_popen_run(const char *cmd); POPEN_INSTANCE *spawn_popen_run_argv(const char **argv); @@ -21,4 +17,10 @@ POPEN_INSTANCE *spawn_popen_run_variadic(const char *cmd, ...); int spawn_popen_wait(POPEN_INSTANCE *pi); int spawn_popen_kill(POPEN_INSTANCE *pi); +pid_t spawn_popen_pid(POPEN_INSTANCE *pi); +int spawn_popen_read_fd(POPEN_INSTANCE *pi); +int spawn_popen_write_fd(POPEN_INSTANCE *pi); +FILE *spawn_popen_stdin(POPEN_INSTANCE *pi); +FILE *spawn_popen_stdout(POPEN_INSTANCE *pi); + #endif //SPAWN_POPEN_H diff --git a/src/libnetdata/spawn_server/spawn_server.c b/src/libnetdata/spawn_server/spawn_server.c deleted file mode 100644 index ef6755c32..000000000 --- a/src/libnetdata/spawn_server/spawn_server.c +++ /dev/null @@ -1,1533 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "../libnetdata.h" - -#include "spawn_server.h" - -#if defined(OS_WINDOWS) -#include -#include -#include -#include -#include -#endif - -struct spawn_server { - size_t id; - size_t request_id; - const char *name; -#if !defined(OS_WINDOWS) - SPAWN_SERVER_OPTIONS options; - - ND_UUID magic; // for authorizing requests, the client needs to know our random UUID - // it is ignored for PING requests - - int pipe[2]; - int sock; // the listening socket of the server - pid_t server_pid; - char *path; - spawn_request_callback_t cb; - - int argc; - const char **argv; -#endif -}; - -struct spawm_instance { - size_t request_id; - int sock; - int write_fd; - int read_fd; - pid_t child_pid; - -#if defined(OS_WINDOWS) - HANDLE process_handle; - HANDLE read_handle; - HANDLE write_handle; -#endif -}; - -int spawn_server_instance_read_fd(SPAWN_INSTANCE *si) { return si->read_fd; } -int spawn_server_instance_write_fd(SPAWN_INSTANCE *si) { return si->write_fd; } -pid_t spawn_server_instance_pid(SPAWN_INSTANCE *si) { return si->child_pid; } -void spawn_server_instance_read_fd_unset(SPAWN_INSTANCE *si) { si->read_fd = -1; } -void spawn_server_instance_write_fd_unset(SPAWN_INSTANCE *si) { si->write_fd = -1; } - -#if defined(OS_WINDOWS) - -SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options __maybe_unused, const char *name, spawn_request_callback_t cb __maybe_unused, int argc __maybe_unused, const char **argv __maybe_unused) { - SPAWN_SERVER* server = callocz(1, sizeof(SPAWN_SERVER)); - if(name) - server->name = strdupz(name); - else - server->name = strdupz("unnamed"); - return server; -} - -void spawn_server_destroy(SPAWN_SERVER *server) { - if (server) { - freez((void *)server->name); - freez(server); - } -} - -static BUFFER *argv_to_windows(const char **argv) { - BUFFER *wb = buffer_create(0, NULL); - - // argv[0] is the path - char b[strlen(argv[0]) * 2 + 1024]; - cygwin_conv_path(CCP_POSIX_TO_WIN_A | CCP_ABSOLUTE, argv[0], b, sizeof(b)); - - buffer_strcat(wb, "cmd.exe /C "); - - for(size_t i = 0; argv[i] ;i++) { - const char *s = (i == 0) ? b : argv[i]; - size_t len = strlen(s); - buffer_need_bytes(wb, len * 2 + 1); - - bool needs_quotes = false; - for(const char *c = s; !needs_quotes && *c ; c++) { - switch(*c) { - case ' ': - case '\v': - case '\t': - case '\n': - case '"': - needs_quotes = true; - break; - - default: - break; - } - } - - if(needs_quotes && buffer_strlen(wb)) - buffer_strcat(wb, " \""); - else - buffer_putc(wb, ' '); - - for(const char *c = s; *c ; c++) { - switch(*c) { - case '"': - buffer_putc(wb, '\\'); - // fall through - - default: - buffer_putc(wb, *c); - break; - } - } - - if(needs_quotes) - buffer_strcat(wb, "\""); - } - - return wb; -} - -SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custom_fd __maybe_unused, const char **argv, const void *data __maybe_unused, size_t data_size __maybe_unused, SPAWN_INSTANCE_TYPE type) { - static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; - - if (type != SPAWN_INSTANCE_TYPE_EXEC) - return NULL; - - int pipe_stdin[2] = { -1, -1 }, pipe_stdout[2] = { -1, -1 }; - - errno_clear(); - - SPAWN_INSTANCE *instance = callocz(1, sizeof(*instance)); - instance->request_id = __atomic_add_fetch(&server->request_id, 1, __ATOMIC_RELAXED); - - CLEAN_BUFFER *wb = argv_to_windows(argv); - char *command = (char *)buffer_tostring(wb); - - if (pipe(pipe_stdin) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Cannot create stdin pipe() for request No %zu, command: %s", - instance->request_id, command); - goto cleanup; - } - - if (pipe(pipe_stdout) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Cannot create stdout pipe() for request No %zu, command: %s", - instance->request_id, command); - goto cleanup; - } - - // do not run multiple times this section - // to prevent handles leaking - spinlock_lock(&spinlock); - - // Convert POSIX file descriptors to Windows handles - HANDLE stdin_read_handle = (HANDLE)_get_osfhandle(pipe_stdin[0]); - HANDLE stdout_write_handle = (HANDLE)_get_osfhandle(pipe_stdout[1]); - HANDLE stderr_handle = (HANDLE)_get_osfhandle(stderr_fd); - - if (stdin_read_handle == INVALID_HANDLE_VALUE || stdout_write_handle == INVALID_HANDLE_VALUE || stderr_handle == INVALID_HANDLE_VALUE) { - spinlock_unlock(&spinlock); - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Invalid handle value(s) for request No %zu, command: %s", - instance->request_id, command); - goto cleanup; - } - - // Set handle inheritance - if (!SetHandleInformation(stdin_read_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT) || - !SetHandleInformation(stdout_write_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT) || - !SetHandleInformation(stderr_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT)) { - spinlock_unlock(&spinlock); - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Cannot set handle(s) inheritance for request No %zu, command: %s", - instance->request_id, command); - goto cleanup; - } - - // Set up the STARTUPINFO structure - STARTUPINFO si; - PROCESS_INFORMATION pi; - ZeroMemory(&si, sizeof(si)); - si.cb = sizeof(si); - si.dwFlags = STARTF_USESTDHANDLES; - si.hStdInput = stdin_read_handle; - si.hStdOutput = stdout_write_handle; - si.hStdError = stderr_handle; - - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Running request No %zu, command: %s", - instance->request_id, command); - - // Spawn the process - if (!CreateProcess(NULL, command, NULL, NULL, TRUE, 0, NULL, NULL, &si, &pi)) { - spinlock_unlock(&spinlock); - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: cannot CreateProcess() for request No %zu, command: %s", - instance->request_id, command); - goto cleanup; - } - - CloseHandle(pi.hThread); - - // end of the critical section - spinlock_unlock(&spinlock); - - // Close unused pipe ends - close(pipe_stdin[0]); pipe_stdin[0] = -1; - close(pipe_stdout[1]); pipe_stdout[1] = -1; - - // Store process information in instance - instance->child_pid = cygwin_winpid_to_pid(pi.dwProcessId); - if(instance->child_pid == -1) instance->child_pid = pi.dwProcessId; - - instance->process_handle = pi.hProcess; - - // Convert handles to POSIX file descriptors - instance->write_fd = pipe_stdin[1]; - instance->read_fd = pipe_stdout[0]; - - errno_clear(); - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: created process for request No %zu, pid %d, command: %s", - instance->request_id, (int)instance->child_pid, command); - - return instance; - -cleanup: - if (pipe_stdin[0] >= 0) close(pipe_stdin[0]); - if (pipe_stdin[1] >= 0) close(pipe_stdin[1]); - if (pipe_stdout[0] >= 0) close(pipe_stdout[0]); - if (pipe_stdout[1] >= 0) close(pipe_stdout[1]); - freez(instance); - return NULL; -} - -int spawn_server_exec_kill(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *instance) { - if(instance->read_fd != -1) { close(instance->read_fd); instance->read_fd = -1; } - if(instance->write_fd != -1) { close(instance->write_fd); instance->write_fd = -1; } - CloseHandle(instance->read_handle); instance->read_handle = NULL; - CloseHandle(instance->write_handle); instance->write_handle = NULL; - - TerminateProcess(instance->process_handle, 0); - - DWORD exit_code; - GetExitCodeProcess(instance->process_handle, &exit_code); - CloseHandle(instance->process_handle); - - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: child of request No %zu, pid %d, killed and exited with code %d", - instance->request_id, (int)instance->child_pid, (int)exit_code); - - freez(instance); - return (int)exit_code; -} - -int spawn_server_exec_wait(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *instance) { - if(instance->read_fd != -1) { close(instance->read_fd); instance->read_fd = -1; } - if(instance->write_fd != -1) { close(instance->write_fd); instance->write_fd = -1; } - CloseHandle(instance->read_handle); instance->read_handle = NULL; - CloseHandle(instance->write_handle); instance->write_handle = NULL; - - WaitForSingleObject(instance->process_handle, INFINITE); - - DWORD exit_code = -1; - GetExitCodeProcess(instance->process_handle, &exit_code); - CloseHandle(instance->process_handle); - - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: child of request No %zu, pid %d, waited and exited with code %d", - instance->request_id, (int)instance->child_pid, (int)exit_code); - - freez(instance); - return (int)exit_code; -} - -#else // !OS_WINDOWS - -#ifdef __APPLE__ -#include -#define environ (*_NSGetEnviron()) -#else -extern char **environ; -#endif - -static size_t spawn_server_id = 0; -static volatile bool spawn_server_exit = false; -static volatile bool spawn_server_sigchld = false; -static SPAWN_REQUEST *spawn_server_requests = NULL; - -// -------------------------------------------------------------------------------------------------------------------- - -static int connect_to_spawn_server(const char *path, bool log) { - int sock = -1; - - if ((sock = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) { - if(log) - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: cannot create socket() to connect to spawn server."); - return -1; - } - - struct sockaddr_un server_addr = { - .sun_family = AF_UNIX, - }; - strcpy(server_addr.sun_path, path); - - if (connect(sock, (struct sockaddr *)&server_addr, sizeof(server_addr)) == -1) { - if(log) - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Cannot connect() to spawn server."); - close(sock); - return -1; - } - - return sock; -} - -// -------------------------------------------------------------------------------------------------------------------- -// the child created by the spawn server - -static void spawn_server_run_child(SPAWN_SERVER *server, SPAWN_REQUEST *rq) { - // close the server sockets; - close(server->sock); server->sock = -1; - if(server->pipe[0] != -1) { close(server->pipe[0]); server->pipe[0] = -1; } - if(server->pipe[1] != -1) { close(server->pipe[1]); server->pipe[1] = -1; } - - // set the process name - os_setproctitle("spawn-child", server->argc, server->argv); - - // get the fds from the request - int stdin_fd = rq->fds[0]; - int stdout_fd = rq->fds[1]; - int stderr_fd = rq->fds[2]; - int custom_fd = rq->fds[3]; (void)custom_fd; - - // change stdio fds to the ones in the request - if (dup2(stdin_fd, STDIN_FILENO) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: cannot dup2(%d) stdin of request No %zu: %s", - stdin_fd, rq->request_id, rq->cmdline); - exit(1); - } - if (dup2(stdout_fd, STDOUT_FILENO) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: cannot dup2(%d) stdin of request No %zu: %s", - stdout_fd, rq->request_id, rq->cmdline); - exit(1); - } - if (dup2(stderr_fd, STDERR_FILENO) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: cannot dup2(%d) stderr of request No %zu: %s", - stderr_fd, rq->request_id, rq->cmdline); - exit(1); - } - - // close the excess fds - close(stdin_fd); stdin_fd = rq->fds[0] = STDIN_FILENO; - close(stdout_fd); stdout_fd = rq->fds[1] = STDOUT_FILENO; - close(stderr_fd); stderr_fd = rq->fds[2] = STDERR_FILENO; - - // overwrite the process environment - environ = (char **)rq->environment; - - // Perform different actions based on the type - switch (rq->type) { - - case SPAWN_INSTANCE_TYPE_EXEC: - // close all fds except the ones we need - os_close_all_non_std_open_fds_except(NULL, 0); - - // run the command - execvp(rq->argv[0], (char **)rq->argv); - - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: Failed to execute command of request No %zu: %s", - rq->request_id, rq->cmdline); - - exit(1); - break; - - case SPAWN_INSTANCE_TYPE_CALLBACK: - server->cb(rq); - exit(0); - break; - - default: - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: unknown request type %u", rq->type); - exit(1); - } -} - -// -------------------------------------------------------------------------------------------------------------------- -// Encoding and decoding of spawn server request argv type of data - -// Function to encode argv or envp -static void* argv_encode(const char **argv, size_t *out_size) { - size_t buffer_size = 1024; // Initial buffer size - size_t buffer_used = 0; - char *buffer = mallocz(buffer_size); - - if(argv) { - for (const char **p = argv; *p != NULL; p++) { - if (strlen(*p) == 0) - continue; // Skip empty strings - - size_t len = strlen(*p) + 1; - size_t wanted_size = buffer_used + len + 1; - - if (wanted_size >= buffer_size) { - buffer_size *= 2; - - if(buffer_size < wanted_size) - buffer_size = wanted_size; - - buffer = reallocz(buffer, buffer_size); - } - - memcpy(&buffer[buffer_used], *p, len); - buffer_used += len; - } - } - - buffer[buffer_used++] = '\0'; // Final empty string - *out_size = buffer_used; - - return buffer; -} - -// Function to decode argv or envp -static const char** argv_decode(const char *buffer, size_t size) { - size_t count = 0; - const char *ptr = buffer; - while (ptr < buffer + size) { - if(ptr && *ptr) { - count++; - ptr += strlen(ptr) + 1; - } - else - break; - } - - const char **argv = mallocz((count + 1) * sizeof(char *)); - - ptr = buffer; - for (size_t i = 0; i < count; i++) { - argv[i] = ptr; - ptr += strlen(ptr) + 1; - } - argv[count] = NULL; // Null-terminate the array - - return argv; -} - -static BUFFER *argv_to_cmdline_buffer(const char **argv) { - BUFFER *wb = buffer_create(0, NULL); - - for(size_t i = 0; argv[i] ;i++) { - const char *s = argv[i]; - size_t len = strlen(s); - buffer_need_bytes(wb, len * 2 + 1); - - bool needs_quotes = false; - for(const char *c = s; !needs_quotes && *c ; c++) { - switch(*c) { - case ' ': - case '\v': - case '\t': - case '\n': - case '"': - needs_quotes = true; - break; - - default: - break; - } - } - - if(needs_quotes && buffer_strlen(wb)) - buffer_strcat(wb, " \""); - else - buffer_putc(wb, ' '); - - for(const char *c = s; *c ; c++) { - switch(*c) { - case '"': - buffer_putc(wb, '\\'); - // fall through - - default: - buffer_putc(wb, *c); - break; - } - } - - if(needs_quotes) - buffer_strcat(wb, "\""); - } - - return wb; -} - -// -------------------------------------------------------------------------------------------------------------------- -// status reports - -typedef enum __attribute__((packed)) { - STATUS_REPORT_NONE = 0, - STATUS_REPORT_STARTED, - STATUS_REPORT_FAILED, - STATUS_REPORT_EXITED, - STATUS_REPORT_PING, -} STATUS_REPORT; - -#define STATUS_REPORT_MAGIC 0xBADA55EE - -struct status_report { - uint32_t magic; - STATUS_REPORT status; - union { - struct { - pid_t pid; - } started; - - struct { - int err_no; - } failed; - - struct { - int waitpid_status; - } exited; - }; -}; - -static void spawn_server_send_status_ping(int sock) { - struct status_report sr = { - .magic = STATUS_REPORT_MAGIC, - .status = STATUS_REPORT_PING, - }; - - if(write(sock, &sr, sizeof(sr)) != sizeof(sr)) - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: Cannot send ping reply."); -} - -static void spawn_server_send_status_success(SPAWN_REQUEST *rq) { - const struct status_report sr = { - .magic = STATUS_REPORT_MAGIC, - .status = STATUS_REPORT_STARTED, - .started = { - .pid = rq->pid, - }, - }; - - if(write(rq->sock, &sr, sizeof(sr)) != sizeof(sr)) - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: Cannot send success status report for pid %d, request %zu: %s", - rq->pid, rq->request_id, rq->cmdline); -} - -static void spawn_server_send_status_failure(SPAWN_REQUEST *rq) { - struct status_report sr = { - .magic = STATUS_REPORT_MAGIC, - .status = STATUS_REPORT_FAILED, - .failed = { - .err_no = errno, - }, - }; - - if(write(rq->sock, &sr, sizeof(sr)) != sizeof(sr)) - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: Cannot send failure status report for request %zu: %s", - rq->request_id, rq->cmdline); -} - -static void spawn_server_send_status_exit(SPAWN_REQUEST *rq, int waitpid_status) { - struct status_report sr = { - .magic = STATUS_REPORT_MAGIC, - .status = STATUS_REPORT_EXITED, - .exited = { - .waitpid_status = waitpid_status, - }, - }; - - if(write(rq->sock, &sr, sizeof(sr)) != sizeof(sr)) - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: Cannot send exit status (%d) report for pid %d, request %zu: %s", - waitpid_status, rq->pid, rq->request_id, rq->cmdline); -} - -// -------------------------------------------------------------------------------------------------------------------- -// execute a received request - -static void request_free(SPAWN_REQUEST *rq) { - if(rq->fds[0] != -1) close(rq->fds[0]); - if(rq->fds[1] != -1) close(rq->fds[1]); - if(rq->fds[2] != -1) close(rq->fds[2]); - if(rq->fds[3] != -1) close(rq->fds[3]); - if(rq->sock != -1) close(rq->sock); - freez((void *)rq->argv); - freez((void *)rq->environment); - freez((void *)rq->data); - freez((void *)rq->cmdline); - freez((void *)rq); -} - -static void spawn_server_execute_request(SPAWN_SERVER *server, SPAWN_REQUEST *rq) { - switch(rq->type) { - case SPAWN_INSTANCE_TYPE_EXEC: - // close custom_fd - it is not needed for exec mode - if(rq->fds[3] != -1) { close(rq->fds[3]); rq->fds[3] = -1; } - - // create the cmdline for logs - if(rq->argv) { - CLEAN_BUFFER *wb = argv_to_cmdline_buffer(rq->argv); - rq->cmdline = strdupz(buffer_tostring(wb)); - } - break; - - case SPAWN_INSTANCE_TYPE_CALLBACK: - if(server->cb == NULL) { - errno = ENOSYS; - spawn_server_send_status_failure(rq); - request_free(rq); - return; - } - rq->cmdline = strdupz("callback() function"); - break; - - default: - errno = EINVAL; - spawn_server_send_status_failure(rq); - request_free(rq); - return; - } - - pid_t pid = fork(); - if (pid < 0) { - // fork failed - - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to fork() child."); - spawn_server_send_status_failure(rq); - request_free(rq); - return; - } - else if (pid == 0) { - // the child - - spawn_server_run_child(server, rq); - exit(63); - } - - // the parent - rq->pid = pid; - - // let the parent know - spawn_server_send_status_success(rq); - - // do not keep data we don't need at the parent - freez((void *)rq->environment); rq->environment = NULL; - freez((void *)rq->argv); rq->argv = NULL; - freez((void *)rq->data); rq->data = NULL; - rq->data_size = 0; - - // do not keep fds we don't need at the parent - if(rq->fds[0] != -1) { close(rq->fds[0]); rq->fds[0] = -1; } - if(rq->fds[1] != -1) { close(rq->fds[1]); rq->fds[1] = -1; } - if(rq->fds[2] != -1) { close(rq->fds[2]); rq->fds[2] = -1; } - if(rq->fds[3] != -1) { close(rq->fds[3]); rq->fds[3] = -1; } - - // keep it in the list - DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(spawn_server_requests, rq, prev, next); -} - -// -------------------------------------------------------------------------------------------------------------------- -// Sending and receiving requests - -typedef enum __attribute__((packed)) { - SPAWN_SERVER_MSG_INVALID = 0, - SPAWN_SERVER_MSG_REQUEST, - SPAWN_SERVER_MSG_PING, -} SPAWN_SERVER_MSG; - -static bool spawn_server_is_running(const char *path) { - struct msghdr msg = {0}; - struct iovec iov[7]; - SPAWN_SERVER_MSG msg_type = SPAWN_SERVER_MSG_PING; - size_t dummy_size = 0; - SPAWN_INSTANCE_TYPE dummy_type = 0; - ND_UUID magic = UUID_ZERO; - char cmsgbuf[CMSG_SPACE(sizeof(int))]; - - iov[0].iov_base = &msg_type; - iov[0].iov_len = sizeof(msg_type); - - iov[1].iov_base = magic.uuid; - iov[1].iov_len = sizeof(magic.uuid); - - iov[2].iov_base = &dummy_size; - iov[2].iov_len = sizeof(dummy_size); - - iov[3].iov_base = &dummy_size; - iov[3].iov_len = sizeof(dummy_size); - - iov[4].iov_base = &dummy_size; - iov[4].iov_len = sizeof(dummy_size); - - iov[5].iov_base = &dummy_size; - iov[5].iov_len = sizeof(dummy_size); - - iov[6].iov_base = &dummy_type; - iov[6].iov_len = sizeof(dummy_type); - - msg.msg_iov = iov; - msg.msg_iovlen = 7; - msg.msg_control = cmsgbuf; - msg.msg_controllen = sizeof(cmsgbuf); - - int sock = connect_to_spawn_server(path, false); - if(sock == -1) - return false; - - int rc = sendmsg(sock, &msg, 0); - if (rc < 0) { - // cannot send the message - close(sock); - return false; - } - - // Receive response - struct status_report sr = { 0 }; - if (read(sock, &sr, sizeof(sr)) != sizeof(sr)) { - // cannot receive a ping reply - close(sock); - return false; - } - - close(sock); - return sr.status == STATUS_REPORT_PING; -} - -static bool spawn_server_send_request(ND_UUID *magic, SPAWN_REQUEST *request) { - bool ret = false; - - size_t env_size = 0; - void *encoded_env = argv_encode(request->environment, &env_size); - if (!encoded_env) - goto cleanup; - - size_t argv_size = 0; - void *encoded_argv = argv_encode(request->argv, &argv_size); - if (!encoded_argv) - goto cleanup; - - struct msghdr msg = {0}; - struct cmsghdr *cmsg; - SPAWN_SERVER_MSG msg_type = SPAWN_SERVER_MSG_REQUEST; - char cmsgbuf[CMSG_SPACE(sizeof(int) * SPAWN_SERVER_TRANSFER_FDS)]; - struct iovec iov[11]; - - - // We send 1 request with 10 iovec in it - // The request will be received in 2 parts - // 1. the first 6 iovec which include the sizes of the memory allocations required - // 2. the last 4 iovec which require the memory allocations to be received - - iov[0].iov_base = &msg_type; - iov[0].iov_len = sizeof(msg_type); - - iov[1].iov_base = magic->uuid; - iov[1].iov_len = sizeof(magic->uuid); - - iov[2].iov_base = &request->request_id; - iov[2].iov_len = sizeof(request->request_id); - - iov[3].iov_base = &env_size; - iov[3].iov_len = sizeof(env_size); - - iov[4].iov_base = &argv_size; - iov[4].iov_len = sizeof(argv_size); - - iov[5].iov_base = &request->data_size; - iov[5].iov_len = sizeof(request->data_size); - - iov[6].iov_base = &request->type; // Added this line - iov[6].iov_len = sizeof(request->type); - - iov[7].iov_base = encoded_env; - iov[7].iov_len = env_size; - - iov[8].iov_base = encoded_argv; - iov[8].iov_len = argv_size; - - iov[9].iov_base = (char *)request->data; - iov[9].iov_len = request->data_size; - - iov[10].iov_base = NULL; - iov[10].iov_len = 0; - - msg.msg_iov = iov; - msg.msg_iovlen = 11; - msg.msg_control = cmsgbuf; - msg.msg_controllen = CMSG_SPACE(sizeof(int) * SPAWN_SERVER_TRANSFER_FDS); - - cmsg = CMSG_FIRSTHDR(&msg); - cmsg->cmsg_level = SOL_SOCKET; - cmsg->cmsg_type = SCM_RIGHTS; - cmsg->cmsg_len = CMSG_LEN(sizeof(int) * SPAWN_SERVER_TRANSFER_FDS); - - memcpy(CMSG_DATA(cmsg), request->fds, sizeof(int) * SPAWN_SERVER_TRANSFER_FDS); - - int rc = sendmsg(request->sock, &msg, 0); - - if (rc < 0) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Failed to sendmsg() request to spawn server using socket %d.", request->sock); - goto cleanup; - } - else { - ret = true; - // fprintf(stderr, "PARENT: sent request %zu on socket %d (fds: %d, %d, %d, %d) from tid %d\n", - // request->request_id, request->socket, request->fds[0], request->fds[1], request->fds[2], request->fds[3], os_gettid()); - } - -cleanup: - freez(encoded_env); - freez(encoded_argv); - return ret; -} - -static void spawn_server_receive_request(int sock, SPAWN_SERVER *server) { - struct msghdr msg = {0}; - struct iovec iov[7]; - SPAWN_SERVER_MSG msg_type = SPAWN_SERVER_MSG_INVALID; - size_t request_id; - size_t env_size; - size_t argv_size; - size_t data_size; - ND_UUID magic = UUID_ZERO; - SPAWN_INSTANCE_TYPE type; - char cmsgbuf[CMSG_SPACE(sizeof(int) * SPAWN_SERVER_TRANSFER_FDS)]; - char *envp_encoded = NULL, *argv_encoded = NULL, *data = NULL; - int stdin_fd = -1, stdout_fd = -1, stderr_fd = -1, custom_fd = -1; - - // First recvmsg() to read sizes and control message - iov[0].iov_base = &msg_type; - iov[0].iov_len = sizeof(msg_type); - - iov[1].iov_base = magic.uuid; - iov[1].iov_len = sizeof(magic.uuid); - - iov[2].iov_base = &request_id; - iov[2].iov_len = sizeof(request_id); - - iov[3].iov_base = &env_size; - iov[3].iov_len = sizeof(env_size); - - iov[4].iov_base = &argv_size; - iov[4].iov_len = sizeof(argv_size); - - iov[5].iov_base = &data_size; - iov[5].iov_len = sizeof(data_size); - - iov[6].iov_base = &type; - iov[6].iov_len = sizeof(type); - - msg.msg_iov = iov; - msg.msg_iovlen = 7; - msg.msg_control = cmsgbuf; - msg.msg_controllen = sizeof(cmsgbuf); - - if (recvmsg(sock, &msg, 0) < 0) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: failed to recvmsg() the first part of the request."); - close(sock); - return; - } - - if(msg_type == SPAWN_SERVER_MSG_PING) { - spawn_server_send_status_ping(sock); - close(sock); - return; - } - - if(!UUIDeq(magic, server->magic)) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: Invalid authorization key for request %zu. " - "Rejecting request.", - request_id); - close(sock); - return; - } - - if(type == SPAWN_INSTANCE_TYPE_EXEC && !(server->options & SPAWN_SERVER_OPTION_EXEC)) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: Request %zu wants to exec, but exec is not allowed for this spawn server. " - "Rejecting request.", - request_id); - close(sock); - return; - } - - if(type == SPAWN_INSTANCE_TYPE_CALLBACK && !(server->options & SPAWN_SERVER_OPTION_CALLBACK)) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: Request %zu wants to run a callback, but callbacks are not allowed for this spawn server. " - "Rejecting request.", - request_id); - close(sock); - return; - } - - // Extract file descriptors from control message - struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); - if (cmsg == NULL || cmsg->cmsg_len != CMSG_LEN(sizeof(int) * SPAWN_SERVER_TRANSFER_FDS)) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: Received invalid control message (expected %zu bytes, received %zu bytes)", - CMSG_LEN(sizeof(int) * SPAWN_SERVER_TRANSFER_FDS), cmsg?cmsg->cmsg_len:0); - close(sock); - return; - } - - if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Received unexpected control message type."); - close(sock); - return; - } - - int *fds = (int *)CMSG_DATA(cmsg); - stdin_fd = fds[0]; - stdout_fd = fds[1]; - stderr_fd = fds[2]; - custom_fd = fds[3]; - - if (stdin_fd < 0 || stdout_fd < 0 || stderr_fd < 0) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: invalid file descriptors received, stdin = %d, stdout = %d, stderr = %d", - stdin_fd, stdout_fd, stderr_fd); - goto cleanup; - } - - // Second recvmsg() to read buffer contents - iov[0].iov_base = envp_encoded = mallocz(env_size); - iov[0].iov_len = env_size; - iov[1].iov_base = argv_encoded = mallocz(argv_size); - iov[1].iov_len = argv_size; - iov[2].iov_base = data = mallocz(data_size); - iov[2].iov_len = data_size; - - msg.msg_iov = iov; - msg.msg_iovlen = 3; - msg.msg_control = NULL; - msg.msg_controllen = 0; - - ssize_t total_bytes_received = recvmsg(sock, &msg, 0); - if (total_bytes_received < 0) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: failed to recvmsg() the second part of the request."); - goto cleanup; - } - - // fprintf(stderr, "SPAWN SERVER: received request %zu (fds: %d, %d, %d, %d)\n", request_id, - // stdin_fd, stdout_fd, stderr_fd, custom_fd); - - SPAWN_REQUEST *rq = mallocz(sizeof(*rq)); - *rq = (SPAWN_REQUEST){ - .pid = 0, - .request_id = request_id, - .sock = sock, - .fds = { - [0] = stdin_fd, - [1] = stdout_fd, - [2] = stderr_fd, - [3] = custom_fd, - }, - .environment = argv_decode(envp_encoded, env_size), - .argv = argv_decode(argv_encoded, argv_size), - .data = data, - .data_size = data_size, - .type = type - }; - - // all allocations given to the request are now handled by this - spawn_server_execute_request(server, rq); - - // since we make rq->argv and rq->environment NULL when we keep it, - // we don't need these anymore. - freez(envp_encoded); - freez(argv_encoded); - return; - -cleanup: - close(sock); - if(stdin_fd != -1) close(stdin_fd); - if(stdout_fd != -1) close(stdout_fd); - if(stderr_fd != -1) close(stderr_fd); - if(custom_fd != -1) close(custom_fd); - freez(envp_encoded); - freez(argv_encoded); - freez(data); -} - -// -------------------------------------------------------------------------------------------------------------------- -// the spawn server main event loop - -static void spawn_server_sigchld_handler(int signo __maybe_unused) { - spawn_server_sigchld = true; -} - -static void spawn_server_sigterm_handler(int signo __maybe_unused) { - spawn_server_exit = true; -} - -static SPAWN_REQUEST *find_request_by_pid(pid_t pid) { - for(SPAWN_REQUEST *rq = spawn_server_requests; rq ;rq = rq->next) - if(rq->pid == pid) - return rq; - - return NULL; -} - -static void spawn_server_process_sigchld(void) { - // nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN SERVER: checking for exited children"); - - int status; - pid_t pid; - - // Loop to check for exited child processes - while ((pid = waitpid((pid_t)(-1), &status, WNOHANG)) != 0) { - if(pid == -1) - break; - - errno_clear(); - - SPAWN_REQUEST *rq = find_request_by_pid(pid); - size_t request_id = rq ? rq->request_id : 0; - bool send_report_remove_request = false; - - if(WIFEXITED(status)) { - if(WEXITSTATUS(status)) - nd_log(NDLS_COLLECTORS, NDLP_INFO, - "SPAWN SERVER: child with pid %d (request %zu) exited with exit code %d: %s", - pid, request_id, WEXITSTATUS(status), rq ? rq->cmdline : "[request not found]"); - send_report_remove_request = true; - } - else if(WIFSIGNALED(status)) { - if(WCOREDUMP(status)) - nd_log(NDLS_COLLECTORS, NDLP_INFO, - "SPAWN SERVER: child with pid %d (request %zu) coredump'd due to signal %d: %s", - pid, request_id, WTERMSIG(status), rq ? rq->cmdline : "[request not found]"); - else - nd_log(NDLS_COLLECTORS, NDLP_INFO, - "SPAWN SERVER: child with pid %d (request %zu) killed by signal %d: %s", - pid, request_id, WTERMSIG(status), rq ? rq->cmdline : "[request not found]"); - send_report_remove_request = true; - } - else if(WIFSTOPPED(status)) { - nd_log(NDLS_COLLECTORS, NDLP_INFO, - "SPAWN SERVER: child with pid %d (request %zu) stopped due to signal %d: %s", - pid, request_id, WSTOPSIG(status), rq ? rq->cmdline : "[request not found]"); - send_report_remove_request = false; - } - else if(WIFCONTINUED(status)) { - nd_log(NDLS_COLLECTORS, NDLP_INFO, - "SPAWN SERVER: child with pid %d (request %zu) continued due to signal %d: %s", - pid, request_id, SIGCONT, rq ? rq->cmdline : "[request not found]"); - send_report_remove_request = false; - } - else { - nd_log(NDLS_COLLECTORS, NDLP_INFO, - "SPAWN SERVER: child with pid %d (request %zu) reports unhandled status: %s", - pid, request_id, rq ? rq->cmdline : "[request not found]"); - send_report_remove_request = false; - } - - if(send_report_remove_request && rq) { - spawn_server_send_status_exit(rq, status); - DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(spawn_server_requests, rq, prev, next); - request_free(rq); - } - } -} - -static void signals_unblock(void) { - sigset_t sigset; - sigfillset(&sigset); - - if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) == -1) { - netdata_log_error("SPAWN SERVER: Could not unblock signals for threads"); - } -} - -static void spawn_server_event_loop(SPAWN_SERVER *server) { - int pipe_fd = server->pipe[1]; - close(server->pipe[0]); server->pipe[0] = -1; - - signals_unblock(); - - // Set up the signal handler for SIGCHLD and SIGTERM - struct sigaction sa; - sa.sa_handler = spawn_server_sigchld_handler; - sigemptyset(&sa.sa_mask); - sa.sa_flags = SA_RESTART | SA_NOCLDSTOP; - if (sigaction(SIGCHLD, &sa, NULL) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: sigaction() failed for SIGCHLD"); - exit(1); - } - - sa.sa_handler = spawn_server_sigterm_handler; - if (sigaction(SIGTERM, &sa, NULL) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: sigaction() failed for SIGTERM"); - exit(1); - } - - struct status_report sr = { - .status = STATUS_REPORT_STARTED, - .started = { - .pid = getpid(), - }, - }; - if (write(pipe_fd, &sr, sizeof(sr)) != sizeof(sr)) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: failed to write initial status report."); - exit(1); - } - - struct pollfd fds[2]; - fds[0].fd = server->sock; - fds[0].events = POLLIN; - fds[1].fd = pipe_fd; - fds[1].events = POLLHUP | POLLERR; - - while(!spawn_server_exit) { - int ret = poll(fds, 2, -1); - if (spawn_server_sigchld) { - spawn_server_sigchld = false; - spawn_server_process_sigchld(); - errno_clear(); - - if(ret == -1) - continue; - } - - if (ret == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: poll() failed"); - break; - } - - if (fds[1].revents & (POLLHUP|POLLERR)) { - // Pipe has been closed (parent has exited) - nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "SPAWN SERVER: Parent process has exited"); - break; - } - - if (fds[0].revents & POLLIN) { - int sock = accept(server->sock, NULL, NULL); - if (sock == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: accept() failed"); - continue; - } - - // do not fork this socket - sock_setcloexec(sock); - - // receive the request and process it - spawn_server_receive_request(sock, server); - } - } - - // Cleanup before exiting - unlink(server->path); - - // stop all children - if(spawn_server_requests) { - // nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN SERVER: killing all children..."); - size_t killed = 0; - for(SPAWN_REQUEST *rq = spawn_server_requests; rq ; rq = rq->next) { - kill(rq->pid, SIGTERM); - killed++; - } - while(spawn_server_requests) { - spawn_server_process_sigchld(); - tinysleep(); - } - // nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN SERVER: all %zu children finished", killed); - } - - exit(1); -} - -// -------------------------------------------------------------------------------------------------------------------- -// management of the spawn server - -void spawn_server_destroy(SPAWN_SERVER *server) { - if(server->pipe[0] != -1) close(server->pipe[0]); - if(server->pipe[1] != -1) close(server->pipe[1]); - if(server->sock != -1) close(server->sock); - - if(server->server_pid) { - kill(server->server_pid, SIGTERM); - waitpid(server->server_pid, NULL, 0); - } - - if(server->path) { - unlink(server->path); - freez(server->path); - } - - freez((void *)server->name); - freez(server); -} - -static bool spawn_server_create_listening_socket(SPAWN_SERVER *server) { - if(spawn_server_is_running(server->path)) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Server is already listening on path '%s'", server->path); - return false; - } - - if ((server->sock = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to create socket()"); - return false; - } - - struct sockaddr_un server_addr = { - .sun_family = AF_UNIX, - }; - strcpy(server_addr.sun_path, server->path); - unlink(server->path); - errno = 0; - - if (bind(server->sock, (struct sockaddr *)&server_addr, sizeof(server_addr)) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to bind()"); - return false; - } - - if (listen(server->sock, 5) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to listen()"); - return false; - } - - return true; -} - -static void replace_stdio_with_dev_null() { - int dev_null_fd = open("/dev/null", O_RDWR); - if (dev_null_fd == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to open /dev/null: %s", strerror(errno)); - return; - } - - // Redirect stdin (fd 0) - if (dup2(dev_null_fd, STDIN_FILENO) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to redirect stdin to /dev/null: %s", strerror(errno)); - close(dev_null_fd); - return; - } - - // Redirect stdout (fd 1) - if (dup2(dev_null_fd, STDOUT_FILENO) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to redirect stdout to /dev/null: %s", strerror(errno)); - close(dev_null_fd); - return; - } - - // Close the original /dev/null file descriptor - close(dev_null_fd); -} - -SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options, const char *name, spawn_request_callback_t child_callback, int argc, const char **argv) { - SPAWN_SERVER *server = callocz(1, sizeof(SPAWN_SERVER)); - server->pipe[0] = -1; - server->pipe[1] = -1; - server->sock = -1; - server->cb = child_callback; - server->argc = argc; - server->argv = argv; - server->options = options; - server->id = __atomic_add_fetch(&spawn_server_id, 1, __ATOMIC_RELAXED); - os_uuid_generate_random(server->magic.uuid); - - char *runtime_directory = getenv("NETDATA_CACHE_DIR"); - if(runtime_directory && !*runtime_directory) runtime_directory = NULL; - if (runtime_directory) { - struct stat statbuf; - - if(!*runtime_directory) - // it is empty - runtime_directory = NULL; - - else if (stat(runtime_directory, &statbuf) == 0 && S_ISDIR(statbuf.st_mode)) { - // it exists and it is a directory - - if (access(runtime_directory, W_OK) != 0) { - // it is not writable by us - nd_log(NDLS_COLLECTORS, NDLP_ERR, "Runtime directory '%s' is not writable, falling back to '/tmp'", runtime_directory); - runtime_directory = NULL; - } - } - else { - // it does not exist - nd_log(NDLS_COLLECTORS, NDLP_ERR, "Runtime directory '%s' does not exist, falling back to '/tmp'", runtime_directory); - runtime_directory = NULL; - } - } - if(!runtime_directory) - runtime_directory = "/tmp"; - - char path[1024]; - if(name && *name) { - server->name = strdupz(name); - snprintf(path, sizeof(path), "%s/.netdata-spawn-%s.sock", runtime_directory, name); - } - else { - server->name = strdupz("unnamed"); - snprintf(path, sizeof(path), "%s/.netdata-spawn-%d-%zu.sock", runtime_directory, getpid(), server->id); - } - - server->path = strdupz(path); - - if (!spawn_server_create_listening_socket(server)) - goto cleanup; - - if (pipe(server->pipe) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Cannot create status pipe()"); - goto cleanup; - } - - pid_t pid = fork(); - if (pid == 0) { - // the child - the spawn server - - { - char buf[15]; - snprintfz(buf, sizeof(buf), "spawn-%s", server->name); - os_setproctitle(buf, server->argc, server->argv); - } - - replace_stdio_with_dev_null(); - os_close_all_non_std_open_fds_except((int[]){ server->sock, server->pipe[1] }, 2); - nd_log_reopen_log_files_for_spawn_server(); - spawn_server_event_loop(server); - } - else if (pid > 0) { - // the parent - server->server_pid = pid; - close(server->sock); server->sock = -1; - close(server->pipe[1]); server->pipe[1] = -1; - - struct status_report sr = { 0 }; - if (read(server->pipe[0], &sr, sizeof(sr)) != sizeof(sr)) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: cannot read() initial status report from spawn server"); - goto cleanup; - } - - if(sr.status != STATUS_REPORT_STARTED) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: server did not respond with success."); - goto cleanup; - } - - if(sr.started.pid != server->server_pid) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: server sent pid %d but we have created %d.", sr.started.pid, server->server_pid); - goto cleanup; - } - - return server; - } - - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Cannot fork()"); - -cleanup: - spawn_server_destroy(server); - return NULL; -} - -// -------------------------------------------------------------------------------------------------------------------- -// creating spawn server instances - -void spawn_server_exec_destroy(SPAWN_INSTANCE *instance) { - if(instance->child_pid) kill(instance->child_pid, SIGTERM); - if(instance->write_fd != -1) close(instance->write_fd); - if(instance->read_fd != -1) close(instance->read_fd); - if(instance->sock != -1) close(instance->sock); - freez(instance); -} - -int spawn_server_exec_wait(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *instance) { - int rc = -1; - - // close the child pipes, to make it exit - if(instance->write_fd != -1) { close(instance->write_fd); instance->write_fd = -1; } - if(instance->read_fd != -1) { close(instance->read_fd); instance->read_fd = -1; } - - // get the result - struct status_report sr = { 0 }; - if(read(instance->sock, &sr, sizeof(sr)) != sizeof(sr)) - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: failed to read final status report for child %d, request %zu", - instance->child_pid, instance->request_id); - - else if(sr.magic != STATUS_REPORT_MAGIC) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: invalid final status report for child %d, request %zu (invalid magic %#x in response)", - instance->child_pid, instance->request_id, sr.magic); - } - else switch(sr.status) { - case STATUS_REPORT_EXITED: - rc = sr.exited.waitpid_status; - break; - - case STATUS_REPORT_STARTED: - case STATUS_REPORT_FAILED: - default: - errno = 0; - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: invalid status report to exec spawn request %zu for pid %d (status = %u)", - instance->request_id, instance->child_pid, sr.status); - break; - } - - instance->child_pid = 0; - spawn_server_exec_destroy(instance); - return rc; -} - -int spawn_server_exec_kill(SPAWN_SERVER *server, SPAWN_INSTANCE *instance) { - // kill the child, if it is still running - if(instance->child_pid) kill(instance->child_pid, SIGTERM); - return spawn_server_exec_wait(server, instance); -} - -SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custom_fd, const char **argv, const void *data, size_t data_size, SPAWN_INSTANCE_TYPE type) { - int pipe_stdin[2] = { -1, -1 }, pipe_stdout[2] = { -1, -1 }; - - SPAWN_INSTANCE *instance = callocz(1, sizeof(SPAWN_INSTANCE)); - instance->read_fd = -1; - instance->write_fd = -1; - - instance->sock = connect_to_spawn_server(server->path, true); - if(instance->sock == -1) - goto cleanup; - - if (pipe(pipe_stdin) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Cannot create stdin pipe()"); - goto cleanup; - } - - if (pipe(pipe_stdout) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Cannot create stdout pipe()"); - goto cleanup; - } - - SPAWN_REQUEST request = { - .request_id = __atomic_add_fetch(&server->request_id, 1, __ATOMIC_RELAXED), - .sock = instance->sock, - .fds = { - [0] = pipe_stdin[0], - [1] = pipe_stdout[1], - [2] = stderr_fd, - [3] = custom_fd, - }, - .environment = (const char **)environ, - .argv = argv, - .data = data, - .data_size = data_size, - .type = type - }; - - if(!spawn_server_send_request(&server->magic, &request)) - goto cleanup; - - close(pipe_stdin[0]); pipe_stdin[0] = -1; - instance->write_fd = pipe_stdin[1]; pipe_stdin[1] = -1; - - close(pipe_stdout[1]); pipe_stdout[1] = -1; - instance->read_fd = pipe_stdout[0]; pipe_stdout[0] = -1; - - // copy the request id to the instance - instance->request_id = request.request_id; - - struct status_report sr = { 0 }; - if(read(instance->sock, &sr, sizeof(sr)) != sizeof(sr)) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Failed to exec spawn request %zu (cannot get initial status report)", - request.request_id); - goto cleanup; - } - - if(sr.magic != STATUS_REPORT_MAGIC) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Failed to exec spawn request %zu (invalid magic %#x in response)", - request.request_id, sr.magic); - goto cleanup; - } - - switch(sr.status) { - case STATUS_REPORT_STARTED: - instance->child_pid = sr.started.pid; - return instance; - - case STATUS_REPORT_FAILED: - errno = sr.failed.err_no; - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Failed to exec spawn request %zu (server reports failure, errno is updated)", - request.request_id); - errno = 0; - break; - - case STATUS_REPORT_EXITED: - errno = ENOEXEC; - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Failed to exec spawn request %zu (server reports exit, errno is updated)", - request.request_id); - errno = 0; - break; - - default: - errno = 0; - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Invalid status report to exec spawn request %zu (received invalid data)", - request.request_id); - break; - } - -cleanup: - if (pipe_stdin[0] >= 0) close(pipe_stdin[0]); - if (pipe_stdin[1] >= 0) close(pipe_stdin[1]); - if (pipe_stdout[0] >= 0) close(pipe_stdout[0]); - if (pipe_stdout[1] >= 0) close(pipe_stdout[1]); - spawn_server_exec_destroy(instance); - return NULL; -} - -#endif // !OS_WINDOWS diff --git a/src/libnetdata/spawn_server/spawn_server.h b/src/libnetdata/spawn_server/spawn_server.h index 5ba66ae38..e68a53ab4 100644 --- a/src/libnetdata/spawn_server/spawn_server.h +++ b/src/libnetdata/spawn_server/spawn_server.h @@ -7,16 +7,12 @@ typedef enum __attribute__((packed)) { SPAWN_INSTANCE_TYPE_EXEC = 0, -#if !defined(OS_WINDOWS) SPAWN_INSTANCE_TYPE_CALLBACK = 1 -#endif } SPAWN_INSTANCE_TYPE; typedef enum __attribute__((packed)) { SPAWN_SERVER_OPTION_EXEC = (1 << 0), -#if !defined(OS_WINDOWS) SPAWN_SERVER_OPTION_CALLBACK = (1 << 1), -#endif } SPAWN_SERVER_OPTIONS; // this is only used publicly for SPAWN_INSTANCE_TYPE_CALLBACK @@ -27,7 +23,7 @@ typedef struct spawn_request { pid_t pid; // the pid of the child int sock; // the socket for this request int fds[SPAWN_SERVER_TRANSFER_FDS]; // 0 = stdin, 1 = stdout, 2 = stderr, 3 = custom - const char **environment; // the environment of the parent process + const char **envp; // the environment of the parent process const char **argv; // the command line and its parameters const void *data; // the data structure for the callback size_t data_size; // the data structure size @@ -36,17 +32,18 @@ typedef struct spawn_request { struct spawn_request *prev, *next; // linking of active requests at the spawn server } SPAWN_REQUEST; -typedef void (*spawn_request_callback_t)(SPAWN_REQUEST *request); +typedef int (*spawn_request_callback_t)(SPAWN_REQUEST *request); -typedef struct spawm_instance SPAWN_INSTANCE; +typedef struct spawn_instance SPAWN_INSTANCE; typedef struct spawn_server SPAWN_SERVER; SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options, const char *name, spawn_request_callback_t child_callback, int argc, const char **argv); void spawn_server_destroy(SPAWN_SERVER *server); +pid_t spawn_server_pid(SPAWN_SERVER *server); SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custom_fd, const char **argv, const void *data, size_t data_size, SPAWN_INSTANCE_TYPE type); -int spawn_server_exec_kill(SPAWN_SERVER *server, SPAWN_INSTANCE *instance); -int spawn_server_exec_wait(SPAWN_SERVER *server, SPAWN_INSTANCE *instance); +int spawn_server_exec_kill(SPAWN_SERVER *server, SPAWN_INSTANCE *si); +int spawn_server_exec_wait(SPAWN_SERVER *server, SPAWN_INSTANCE *si); int spawn_server_instance_read_fd(SPAWN_INSTANCE *si); int spawn_server_instance_write_fd(SPAWN_INSTANCE *si); diff --git a/src/libnetdata/spawn_server/spawn_server_internals.h b/src/libnetdata/spawn_server/spawn_server_internals.h new file mode 100644 index 000000000..1031e3b1a --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_server_internals.h @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SPAWN_SERVER_INTERNALS_H +#define NETDATA_SPAWN_SERVER_INTERNALS_H + +#include "../libnetdata.h" +#include "spawn_server.h" +#include "spawn_library.h" +#include "log-forwarder.h" + +#if defined(OS_WINDOWS) +#define SPAWN_SERVER_VERSION_WINDOWS 1 +// #define SPAWN_SERVER_VERSION_UV 1 +// #define SPAWN_SERVER_VERSION_POSIX_SPAWN 1 +#else +#define SPAWN_SERVER_VERSION_NOFORK 1 +// #define SPAWN_SERVER_VERSION_UV 1 +// #define SPAWN_SERVER_VERSION_POSIX_SPAWN 1 +#endif + +struct spawn_server { + size_t id; + size_t request_id; + const char *name; + +#if defined(SPAWN_SERVER_VERSION_UV) + uv_loop_t *loop; + uv_thread_t thread; + uv_async_t async; + bool stopping; + + SPINLOCK spinlock; + struct work_item *work_queue; +#endif + +#if defined(SPAWN_SERVER_VERSION_NOFORK) + SPAWN_SERVER_OPTIONS options; + + ND_UUID magic; // for authorizing requests, the client needs to know our random UUID + // it is ignored for PING requests + + int pipe[2]; + int sock; // the listening socket of the server + pid_t server_pid; + char *path; + spawn_request_callback_t cb; + + int argc; + const char **argv; +#endif + +#if defined(SPAWN_SERVER_VERSION_POSIX_SPAWN) +#endif + +#if defined(SPAWN_SERVER_VERSION_WINDOWS) + LOG_FORWARDER *log_forwarder; +#endif +}; + +struct spawn_instance { + size_t request_id; + int sock; + int write_fd; // the child's input pipe, writing side + int read_fd; // the child's output pipe, reading side + int stderr_fd; + pid_t child_pid; + +#if defined(SPAWN_SERVER_VERSION_UV) + uv_process_t process; + int exit_code; + uv_sem_t sem; +#endif + +#if defined(SPAWN_SERVER_VERSION_NOFORK) +#endif + +#if defined(SPAWN_SERVER_VERSION_POSIX_SPAWN) + const char *cmdline; + bool exited; + int waitpid_status; + struct spawn_instance *prev, *next; +#endif + +#if defined(SPAWN_SERVER_VERSION_WINDOWS) + HANDLE process_handle; + DWORD dwProcessId; +#endif +}; + +#endif //NETDATA_SPAWN_SERVER_INTERNALS_H diff --git a/src/libnetdata/spawn_server/spawn_server_libuv.c b/src/libnetdata/spawn_server/spawn_server_libuv.c new file mode 100644 index 000000000..e01c5407e --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_server_libuv.c @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "spawn_server_internals.h" + +#if defined(SPAWN_SERVER_VERSION_UV) + +int spawn_server_instance_read_fd(SPAWN_INSTANCE *si) { return si->read_fd; } +int spawn_server_instance_write_fd(SPAWN_INSTANCE *si) { return si->write_fd; } +void spawn_server_instance_read_fd_unset(SPAWN_INSTANCE *si) { si->read_fd = -1; } +void spawn_server_instance_write_fd_unset(SPAWN_INSTANCE *si) { si->write_fd = -1; } +pid_t spawn_server_instance_pid(SPAWN_INSTANCE *si) { return uv_process_get_pid(&si->process); } + +typedef struct work_item { + int stderr_fd; + const char **argv; + uv_sem_t sem; + SPAWN_INSTANCE *instance; + struct work_item *prev; + struct work_item *next; +} work_item; + +int uv_errno_to_errno(int uv_err) { + switch (uv_err) { + case 0: return 0; + case UV_E2BIG: return E2BIG; + case UV_EACCES: return EACCES; + case UV_EADDRINUSE: return EADDRINUSE; + case UV_EADDRNOTAVAIL: return EADDRNOTAVAIL; + case UV_EAFNOSUPPORT: return EAFNOSUPPORT; + case UV_EAGAIN: return EAGAIN; + case UV_EAI_ADDRFAMILY: return EAI_ADDRFAMILY; + case UV_EAI_AGAIN: return EAI_AGAIN; + case UV_EAI_BADFLAGS: return EAI_BADFLAGS; +#if defined(EAI_CANCELED) + case UV_EAI_CANCELED: return EAI_CANCELED; +#endif + case UV_EAI_FAIL: return EAI_FAIL; + case UV_EAI_FAMILY: return EAI_FAMILY; + case UV_EAI_MEMORY: return EAI_MEMORY; + case UV_EAI_NODATA: return EAI_NODATA; + case UV_EAI_NONAME: return EAI_NONAME; + case UV_EAI_OVERFLOW: return EAI_OVERFLOW; + case UV_EAI_SERVICE: return EAI_SERVICE; + case UV_EAI_SOCKTYPE: return EAI_SOCKTYPE; + case UV_EALREADY: return EALREADY; + case UV_EBADF: return EBADF; + case UV_EBUSY: return EBUSY; + case UV_ECANCELED: return ECANCELED; + case UV_ECHARSET: return EILSEQ; // No direct mapping, using EILSEQ + case UV_ECONNABORTED: return ECONNABORTED; + case UV_ECONNREFUSED: return ECONNREFUSED; + case UV_ECONNRESET: return ECONNRESET; + case UV_EDESTADDRREQ: return EDESTADDRREQ; + case UV_EEXIST: return EEXIST; + case UV_EFAULT: return EFAULT; + case UV_EFBIG: return EFBIG; + case UV_EHOSTUNREACH: return EHOSTUNREACH; + case UV_EINTR: return EINTR; + case UV_EINVAL: return EINVAL; + case UV_EIO: return EIO; + case UV_EISCONN: return EISCONN; + case UV_EISDIR: return EISDIR; + case UV_ELOOP: return ELOOP; + case UV_EMFILE: return EMFILE; + case UV_EMSGSIZE: return EMSGSIZE; + case UV_ENAMETOOLONG: return ENAMETOOLONG; + case UV_ENETDOWN: return ENETDOWN; + case UV_ENETUNREACH: return ENETUNREACH; + case UV_ENFILE: return ENFILE; + case UV_ENOBUFS: return ENOBUFS; + case UV_ENODEV: return ENODEV; + case UV_ENOENT: return ENOENT; + case UV_ENOMEM: return ENOMEM; + case UV_ENONET: return ENONET; + case UV_ENOSPC: return ENOSPC; + case UV_ENOSYS: return ENOSYS; + case UV_ENOTCONN: return ENOTCONN; + case UV_ENOTDIR: return ENOTDIR; + case UV_ENOTEMPTY: return ENOTEMPTY; + case UV_ENOTSOCK: return ENOTSOCK; + case UV_ENOTSUP: return ENOTSUP; + case UV_ENOTTY: return ENOTTY; + case UV_ENXIO: return ENXIO; + case UV_EPERM: return EPERM; + case UV_EPIPE: return EPIPE; + case UV_EPROTO: return EPROTO; + case UV_EPROTONOSUPPORT: return EPROTONOSUPPORT; + case UV_EPROTOTYPE: return EPROTOTYPE; + case UV_ERANGE: return ERANGE; + case UV_EROFS: return EROFS; + case UV_ESHUTDOWN: return ESHUTDOWN; + case UV_ESPIPE: return ESPIPE; + case UV_ESRCH: return ESRCH; + case UV_ETIMEDOUT: return ETIMEDOUT; + case UV_ETXTBSY: return ETXTBSY; + case UV_EXDEV: return EXDEV; + default: return EINVAL; // Use EINVAL for unknown libuv errors + } +} + +static void posix_unmask_sigchld_on_thread(void) { + sigset_t sigset; + sigemptyset(&sigset); // Initialize the signal set to empty + sigaddset(&sigset, SIGCHLD); // Add SIGCHLD to the set + + if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) != 0) + netdata_log_error("SPAWN SERVER: cannot unmask SIGCHLD"); +} + +static void server_thread(void *arg) { + SPAWN_SERVER *server = (SPAWN_SERVER *)arg; + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: started"); + + // this thread needs to process SIGCHLD (by libuv) + // otherwise the on_exit() callback is never run + posix_unmask_sigchld_on_thread(); + + // run the event loop + uv_run(server->loop, UV_RUN_DEFAULT); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: ended"); +} + +static void on_process_exit(uv_process_t *req, int64_t exit_status, int term_signal) { + SPAWN_INSTANCE *si = (SPAWN_INSTANCE *)req->data; + si->exit_code = (int)(term_signal ? term_signal : exit_status << 8); + uv_close((uv_handle_t *)req, NULL); // Properly close the process handle + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: process with pid %d exited with code %d and term_signal %d", + si->child_pid, (int)exit_status, term_signal); + + uv_sem_post(&si->sem); // Signal that the process has exited +} + +static SPAWN_INSTANCE *spawn_process_with_libuv(uv_loop_t *loop, int stderr_fd, const char **argv) { + SPAWN_INSTANCE *si = NULL; + bool si_sem_init = false; + + int stdin_pipe[2] = { -1, -1 }; + int stdout_pipe[2] = { -1, -1 }; + + if (pipe(stdin_pipe) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: stdin pipe() failed"); + goto cleanup; + } + + if (pipe(stdout_pipe) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: stdout pipe() failed"); + goto cleanup; + } + + si = callocz(1, sizeof(SPAWN_INSTANCE)); + si->exit_code = -1; + + if (uv_sem_init(&si->sem, 0)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: uv_sem_init() failed"); + goto cleanup; + } + si_sem_init = true; + + uv_stdio_container_t stdio[3] = { 0 }; + stdio[0].flags = UV_INHERIT_FD; + stdio[0].data.fd = stdin_pipe[PIPE_READ]; + stdio[1].flags = UV_INHERIT_FD; + stdio[1].data.fd = stdout_pipe[PIPE_WRITE]; + stdio[2].flags = UV_INHERIT_FD; + stdio[2].data.fd = stderr_fd; + + uv_process_options_t options = { 0 }; + options.stdio_count = 3; + options.stdio = stdio; + options.exit_cb = on_process_exit; + options.file = argv[0]; + options.args = (char **)argv; + options.env = (char **)environ; + + // uv_spawn() does not close all other open file descriptors + // we have to close them manually + int fds[3] = { stdio[0].data.fd, stdio[1].data.fd, stdio[2].data.fd }; + os_close_all_non_std_open_fds_except(fds, 3, CLOSE_RANGE_CLOEXEC); + + int rc = uv_spawn(loop, &si->process, &options); + if (rc) { + errno = uv_errno_to_errno(rc); + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: uv_spawn() failed with error %s, %s", + uv_err_name(rc), uv_strerror(rc)); + goto cleanup; + } + + // Successfully spawned + + // get the pid of the process spawned + si->child_pid = uv_process_get_pid(&si->process); + + // on_process_exit() needs this to find the si + si->process.data = si; + + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN SERVER: process created with pid %d", si->child_pid); + + // close the child sides of the pipes + close(stdin_pipe[PIPE_READ]); + si->write_fd = stdin_pipe[PIPE_WRITE]; + si->read_fd = stdout_pipe[PIPE_READ]; + close(stdout_pipe[PIPE_WRITE]); + + return si; + +cleanup: + if(stdin_pipe[PIPE_READ] != -1) close(stdin_pipe[PIPE_READ]); + if(stdin_pipe[PIPE_WRITE] != -1) close(stdin_pipe[PIPE_WRITE]); + if(stdout_pipe[PIPE_READ] != -1) close(stdout_pipe[PIPE_READ]); + if(stdout_pipe[PIPE_WRITE] != -1) close(stdout_pipe[PIPE_WRITE]); + if(si) { + if(si_sem_init) + uv_sem_destroy(&si->sem); + + freez(si); + } + return NULL; +} + +static void async_callback(uv_async_t *handle) { + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN SERVER: dequeue commands started"); + SPAWN_SERVER *server = (SPAWN_SERVER *)handle->data; + + // Check if the server is stopping + if (__atomic_load_n(&server->stopping, __ATOMIC_RELAXED)) { + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN SERVER: stopping..."); + uv_stop(server->loop); + return; + } + + work_item *item; + spinlock_lock(&server->spinlock); + while (server->work_queue) { + item = server->work_queue; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(server->work_queue, item, prev, next); + spinlock_unlock(&server->spinlock); + + item->instance = spawn_process_with_libuv(server->loop, item->stderr_fd, item->argv); + uv_sem_post(&item->sem); + + spinlock_lock(&server->spinlock); + } + spinlock_unlock(&server->spinlock); + + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN SERVER: dequeue commands done"); +} + + +SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options __maybe_unused, const char *name, spawn_request_callback_t cb __maybe_unused, int argc __maybe_unused, const char **argv __maybe_unused) { + SPAWN_SERVER* server = callocz(1, sizeof(SPAWN_SERVER)); + spinlock_init(&server->spinlock); + + if (name) + server->name = strdupz(name); + else + server->name = strdupz("unnamed"); + + server->loop = callocz(1, sizeof(uv_loop_t)); + if (uv_loop_init(server->loop)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: uv_loop_init() failed"); + freez(server->loop); + freez((void *)server->name); + freez(server); + return NULL; + } + + if (uv_async_init(server->loop, &server->async, async_callback)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: uv_async_init() failed"); + uv_loop_close(server->loop); + freez(server->loop); + freez((void *)server->name); + freez(server); + return NULL; + } + server->async.data = server; + + if (uv_thread_create(&server->thread, server_thread, server)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: uv_thread_create() failed"); + uv_close((uv_handle_t*)&server->async, NULL); + uv_loop_close(server->loop); + freez(server->loop); + freez((void *)server->name); + freez(server); + return NULL; + } + + return server; +} + +static void close_handle(uv_handle_t* handle, void* arg __maybe_unused) { + if (!uv_is_closing(handle)) { + uv_close(handle, NULL); + } +} + +void spawn_server_destroy(SPAWN_SERVER *server) { + if (!server) return; + + __atomic_store_n(&server->stopping, true, __ATOMIC_RELAXED); + + // Trigger the async callback to stop the event loop + uv_async_send(&server->async); + + // Wait for the server thread to finish + uv_thread_join(&server->thread); + + uv_stop(server->loop); + uv_close((uv_handle_t*)&server->async, NULL); + + // Walk through and close any remaining handles + uv_walk(server->loop, close_handle, NULL); + + uv_loop_close(server->loop); + freez(server->loop); + freez((void *)server->name); + freez(server); +} + +SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd __maybe_unused, int custom_fd __maybe_unused, const char **argv, const void *data __maybe_unused, size_t data_size __maybe_unused, SPAWN_INSTANCE_TYPE type) { + if (type != SPAWN_INSTANCE_TYPE_EXEC) + return NULL; + + work_item item = { 0 }; + item.stderr_fd = stderr_fd; + item.argv = argv; + + if (uv_sem_init(&item.sem, 0)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: uv_sem_init() failed"); + return NULL; + } + + spinlock_lock(&server->spinlock); + // item is in the stack, but the server will remove it before sending to us + // the semaphore, so it is safe to have the item in the stack. + work_item *item_ptr = &item; + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(server->work_queue, item_ptr, prev, next); + spinlock_unlock(&server->spinlock); + + uv_async_send(&server->async); + + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN PARENT: queued command"); + + // Wait for the command to be executed + uv_sem_wait(&item.sem); + uv_sem_destroy(&item.sem); + + if (!item.instance) { + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN PARENT: process failed to be started"); + return NULL; + } + + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN PARENT: process started"); + + return item.instance; +} + +int spawn_server_exec_kill(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *si) { + if(!si) return -1; + + // close all pipe descriptors to force the child to exit + if(si->read_fd != -1) { close(si->read_fd); si->read_fd = -1; } + if(si->write_fd != -1) { close(si->write_fd); si->write_fd = -1; } + + if (uv_process_kill(&si->process, SIGTERM)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: uv_process_kill() failed"); + return -1; + } + + return spawn_server_exec_wait(server, si); +} + +int spawn_server_exec_wait(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *si) { + if (!si) return -1; + + // close all pipe descriptors to force the child to exit + if(si->read_fd != -1) { close(si->read_fd); si->read_fd = -1; } + if(si->write_fd != -1) { close(si->write_fd); si->write_fd = -1; } + + // Wait for the process to exit + uv_sem_wait(&si->sem); + int exit_code = si->exit_code; + + uv_sem_destroy(&si->sem); + freez(si); + return exit_code; +} + +#endif diff --git a/src/libnetdata/spawn_server/spawn_server_nofork.c b/src/libnetdata/spawn_server/spawn_server_nofork.c new file mode 100644 index 000000000..9986740de --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_server_nofork.c @@ -0,0 +1,1308 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "spawn_server_internals.h" + +#if defined(SPAWN_SERVER_VERSION_NOFORK) + +// the child's output pipe, reading side +int spawn_server_instance_read_fd(SPAWN_INSTANCE *si) { return si->read_fd; } + +// the child's input pipe, writing side +int spawn_server_instance_write_fd(SPAWN_INSTANCE *si) { return si->write_fd; } + +void spawn_server_instance_read_fd_unset(SPAWN_INSTANCE *si) { si->read_fd = -1; } +void spawn_server_instance_write_fd_unset(SPAWN_INSTANCE *si) { si->write_fd = -1; } +pid_t spawn_server_instance_pid(SPAWN_INSTANCE *si) { return si->child_pid; } + +pid_t spawn_server_pid(SPAWN_SERVER *server) { return server->server_pid; } + +#ifdef __APPLE__ +#include +#define environ (*_NSGetEnviron()) +#else +extern char **environ; +#endif + +static size_t spawn_server_id = 0; +static volatile bool spawn_server_exit = false; +static volatile bool spawn_server_sigchld = false; +static SPAWN_REQUEST *spawn_server_requests = NULL; + +// -------------------------------------------------------------------------------------------------------------------- + +static int connect_to_spawn_server(const char *path, bool log) { + int sock = -1; + + if ((sock = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) { + if(log) + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: cannot create socket() to connect to spawn server."); + return -1; + } + + struct sockaddr_un server_addr = { + .sun_family = AF_UNIX, + }; + strcpy(server_addr.sun_path, path); + + if (connect(sock, (struct sockaddr *)&server_addr, sizeof(server_addr)) == -1) { + if(log) + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Cannot connect() to spawn server on path '%s'.", path); + close(sock); + return -1; + } + + return sock; +} + +// -------------------------------------------------------------------------------------------------------------------- +// Encoding and decoding of spawn server request argv type of data + +// Function to encode argv or envp +static void* argv_encode(const char **argv, size_t *out_size) { + size_t buffer_size = 1024; // Initial buffer size + size_t buffer_used = 0; + char *buffer = mallocz(buffer_size); + + if(argv) { + for (const char **p = argv; *p != NULL; p++) { + if (strlen(*p) == 0) + continue; // Skip empty strings + + size_t len = strlen(*p) + 1; + size_t wanted_size = buffer_used + len + 1; + + if (wanted_size >= buffer_size) { + buffer_size *= 2; + + if(buffer_size < wanted_size) + buffer_size = wanted_size; + + buffer = reallocz(buffer, buffer_size); + } + + memcpy(&buffer[buffer_used], *p, len); + buffer_used += len; + } + } + + buffer[buffer_used++] = '\0'; // Final empty string + *out_size = buffer_used; + + return buffer; +} + +// Function to decode argv or envp +static const char** argv_decode(const char *buffer, size_t size) { + size_t count = 0; + const char *ptr = buffer; + while (ptr < buffer + size) { + if(ptr && *ptr) { + count++; + ptr += strlen(ptr) + 1; + } + else + break; + } + + const char **argv = mallocz((count + 1) * sizeof(char *)); + + ptr = buffer; + for (size_t i = 0; i < count; i++) { + argv[i] = ptr; + ptr += strlen(ptr) + 1; + } + argv[count] = NULL; // Null-terminate the array + + return argv; +} + +// -------------------------------------------------------------------------------------------------------------------- +// status reports + +typedef enum __attribute__((packed)) { + STATUS_REPORT_NONE = 0, + STATUS_REPORT_STARTED, + STATUS_REPORT_FAILED, + STATUS_REPORT_EXITED, + STATUS_REPORT_PING, +} STATUS_REPORT; + +#define STATUS_REPORT_MAGIC 0xBADA55EE + +struct status_report { + uint32_t magic; + STATUS_REPORT status; + union { + struct { + pid_t pid; + } started; + + struct { + int err_no; + } failed; + + struct { + int waitpid_status; + } exited; + }; +}; + +static void spawn_server_send_status_ping(int sock) { + struct status_report sr = { + .magic = STATUS_REPORT_MAGIC, + .status = STATUS_REPORT_PING, + }; + + if(write(sock, &sr, sizeof(sr)) != sizeof(sr)) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: Cannot send ping reply."); +} + +static void spawn_server_send_status_success(SPAWN_REQUEST *rq) { + const struct status_report sr = { + .magic = STATUS_REPORT_MAGIC, + .status = STATUS_REPORT_STARTED, + .started = { + .pid = rq->pid, + }, + }; + + if(write(rq->sock, &sr, sizeof(sr)) != sizeof(sr)) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: Cannot send success status report for pid %d, request %zu: %s", + rq->pid, rq->request_id, rq->cmdline); +} + +static void spawn_server_send_status_failure(SPAWN_REQUEST *rq) { + struct status_report sr = { + .magic = STATUS_REPORT_MAGIC, + .status = STATUS_REPORT_FAILED, + .failed = { + .err_no = errno, + }, + }; + + if(write(rq->sock, &sr, sizeof(sr)) != sizeof(sr)) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: Cannot send failure status report for request %zu: %s", + rq->request_id, rq->cmdline); +} + +static void spawn_server_send_status_exit(SPAWN_REQUEST *rq, int waitpid_status) { + struct status_report sr = { + .magic = STATUS_REPORT_MAGIC, + .status = STATUS_REPORT_EXITED, + .exited = { + .waitpid_status = waitpid_status, + }, + }; + + if(write(rq->sock, &sr, sizeof(sr)) != sizeof(sr)) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: Cannot send exit status (%d) report for pid %d, request %zu: %s", + waitpid_status, rq->pid, rq->request_id, rq->cmdline); +} + +// -------------------------------------------------------------------------------------------------------------------- +// execute a received request + +static void request_free(SPAWN_REQUEST *rq) { + if(rq->fds[0] != -1) close(rq->fds[0]); + if(rq->fds[1] != -1) close(rq->fds[1]); + if(rq->fds[2] != -1) close(rq->fds[2]); + if(rq->fds[3] != -1) close(rq->fds[3]); + if(rq->sock != -1) close(rq->sock); + freez((void *)rq->argv); + freez((void *)rq->envp); + freez((void *)rq->data); + freez((void *)rq->cmdline); + freez((void *)rq); +} + +static bool spawn_external_command(SPAWN_SERVER *server __maybe_unused, SPAWN_REQUEST *rq) { + // Close custom_fd - it is not needed for exec mode + if(rq->fds[3] != -1) { close(rq->fds[3]); rq->fds[3] = -1; } + + if(!rq->argv) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: there is no argv pointer to exec"); + return false; + } + + if(rq->fds[0] == -1 || rq->fds[1] == -1 || rq->fds[2] == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: stdio fds are missing from the request"); + return false; + } + + CLEAN_BUFFER *wb = argv_to_cmdline_buffer(rq->argv); + rq->cmdline = strdupz(buffer_tostring(wb)); + + posix_spawn_file_actions_t file_actions; + if (posix_spawn_file_actions_init(&file_actions) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawn_file_actions_init() failed: %s", rq->cmdline); + return false; + } + + posix_spawn_file_actions_adddup2(&file_actions, rq->fds[0], STDIN_FILENO); + posix_spawn_file_actions_adddup2(&file_actions, rq->fds[1], STDOUT_FILENO); + posix_spawn_file_actions_adddup2(&file_actions, rq->fds[2], STDERR_FILENO); + posix_spawn_file_actions_addclose(&file_actions, rq->fds[0]); + posix_spawn_file_actions_addclose(&file_actions, rq->fds[1]); + posix_spawn_file_actions_addclose(&file_actions, rq->fds[2]); + + posix_spawnattr_t attr; + if (posix_spawnattr_init(&attr) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawnattr_init() failed: %s", rq->cmdline); + posix_spawn_file_actions_destroy(&file_actions); + return false; + } + + // Set the flags to reset the signal mask and signal actions + sigset_t empty_mask; + sigemptyset(&empty_mask); + if (posix_spawnattr_setsigmask(&attr, &empty_mask) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawnattr_setsigmask() failed: %s", rq->cmdline); + posix_spawn_file_actions_destroy(&file_actions); + posix_spawnattr_destroy(&attr); + return false; + } + + short flags = POSIX_SPAWN_SETSIGMASK | POSIX_SPAWN_SETSIGDEF; + if (posix_spawnattr_setflags(&attr, flags) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawnattr_setflags() failed: %s", rq->cmdline); + posix_spawn_file_actions_destroy(&file_actions); + posix_spawnattr_destroy(&attr); + return false; + } + + int fds_to_keep[] = { + rq->fds[0], + rq->fds[1], + rq->fds[2], + nd_log_systemd_journal_fd(), + }; + os_close_all_non_std_open_fds_except(fds_to_keep, _countof(fds_to_keep), CLOSE_RANGE_CLOEXEC); + + errno_clear(); + if (posix_spawn(&rq->pid, rq->argv[0], &file_actions, &attr, (char * const *)rq->argv, (char * const *)rq->envp) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: posix_spawn() failed: %s", rq->cmdline); + + posix_spawnattr_destroy(&attr); + posix_spawn_file_actions_destroy(&file_actions); + return false; + } + + // Destroy the posix_spawnattr_t and posix_spawn_file_actions_t structures + posix_spawnattr_destroy(&attr); + posix_spawn_file_actions_destroy(&file_actions); + + // Close the read end of the stdin pipe and the write end of the stdout pipe in the parent process + close(rq->fds[0]); rq->fds[0] = -1; + close(rq->fds[1]); rq->fds[1] = -1; + close(rq->fds[2]); rq->fds[2] = -1; + + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "SPAWN SERVER: process created with pid %d: %s", rq->pid, rq->cmdline); + return true; +} + +static bool spawn_server_run_callback(SPAWN_SERVER *server __maybe_unused, SPAWN_REQUEST *rq) { + rq->cmdline = strdupz("callback() function"); + + if(server->cb == NULL) { + errno = ENOSYS; + return false; + } + + pid_t pid = fork(); + if (pid < 0) { + // fork failed + + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to fork() child for callback."); + return false; + } + else if (pid == 0) { + // the child + + // close the server sockets; + close(server->sock); server->sock = -1; + if(server->pipe[0] != -1) { close(server->pipe[0]); server->pipe[0] = -1; } + if(server->pipe[1] != -1) { close(server->pipe[1]); server->pipe[1] = -1; } + + // set the process name + os_setproctitle("spawn-callback", server->argc, server->argv); + + // close all open file descriptors of the parent, but keep ours + int fds_to_keep[] = { + rq->fds[0], + rq->fds[1], + rq->fds[2], + rq->fds[3], + nd_log_systemd_journal_fd(), + }; + os_close_all_non_std_open_fds_except(fds_to_keep, _countof(fds_to_keep), 0); + nd_log_reopen_log_files_for_spawn_server("spawn-callback"); + + // get the fds from the request + int stdin_fd = rq->fds[0]; + int stdout_fd = rq->fds[1]; + int stderr_fd = rq->fds[2]; + int custom_fd = rq->fds[3]; (void)custom_fd; + + // change stdio fds to the ones in the request + if (dup2(stdin_fd, STDIN_FILENO) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: cannot dup2(%d) stdin of request No %zu: %s", + stdin_fd, rq->request_id, rq->cmdline); + exit(EXIT_FAILURE); + } + if (dup2(stdout_fd, STDOUT_FILENO) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: cannot dup2(%d) stdin of request No %zu: %s", + stdout_fd, rq->request_id, rq->cmdline); + exit(EXIT_FAILURE); + } + if (dup2(stderr_fd, STDERR_FILENO) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: cannot dup2(%d) stderr of request No %zu: %s", + stderr_fd, rq->request_id, rq->cmdline); + exit(EXIT_FAILURE); + } + + // close the excess fds + close(stdin_fd); stdin_fd = rq->fds[0] = STDIN_FILENO; + close(stdout_fd); stdout_fd = rq->fds[1] = STDOUT_FILENO; + close(stderr_fd); stderr_fd = rq->fds[2] = STDERR_FILENO; + + // overwrite the process environment + environ = (char **)rq->envp; + + // run the callback and return its code + exit(server->cb(rq)); + } + + // the parent + rq->pid = pid; + + return true; +} + +static void spawn_server_execute_request(SPAWN_SERVER *server, SPAWN_REQUEST *rq) { + bool done; + switch(rq->type) { + case SPAWN_INSTANCE_TYPE_EXEC: + done = spawn_external_command(server, rq); + break; + + case SPAWN_INSTANCE_TYPE_CALLBACK: + done = spawn_server_run_callback(server, rq); + break; + + default: + errno = EINVAL; + done = false; + break; + } + + if(!done) { + spawn_server_send_status_failure(rq); + request_free(rq); + return; + } + + // let the parent know + spawn_server_send_status_success(rq); + + // do not keep data we don't need at the parent + freez((void *)rq->envp); rq->envp = NULL; + freez((void *)rq->argv); rq->argv = NULL; + freez((void *)rq->data); rq->data = NULL; + rq->data_size = 0; + + // do not keep fds we don't need at the parent + if(rq->fds[0] != -1) { close(rq->fds[0]); rq->fds[0] = -1; } + if(rq->fds[1] != -1) { close(rq->fds[1]); rq->fds[1] = -1; } + if(rq->fds[2] != -1) { close(rq->fds[2]); rq->fds[2] = -1; } + if(rq->fds[3] != -1) { close(rq->fds[3]); rq->fds[3] = -1; } + + // keep it in the list + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(spawn_server_requests, rq, prev, next); +} + +// -------------------------------------------------------------------------------------------------------------------- +// Sending and receiving requests + +typedef enum __attribute__((packed)) { + SPAWN_SERVER_MSG_INVALID = 0, + SPAWN_SERVER_MSG_REQUEST, + SPAWN_SERVER_MSG_PING, +} SPAWN_SERVER_MSG; + +static bool spawn_server_is_running(const char *path) { + struct msghdr msg = {0}; + struct iovec iov[7]; + SPAWN_SERVER_MSG msg_type = SPAWN_SERVER_MSG_PING; + size_t dummy_size = 0; + SPAWN_INSTANCE_TYPE dummy_type = 0; + ND_UUID magic = UUID_ZERO; + char cmsgbuf[CMSG_SPACE(sizeof(int))]; + + iov[0].iov_base = &msg_type; + iov[0].iov_len = sizeof(msg_type); + + iov[1].iov_base = magic.uuid; + iov[1].iov_len = sizeof(magic.uuid); + + iov[2].iov_base = &dummy_size; + iov[2].iov_len = sizeof(dummy_size); + + iov[3].iov_base = &dummy_size; + iov[3].iov_len = sizeof(dummy_size); + + iov[4].iov_base = &dummy_size; + iov[4].iov_len = sizeof(dummy_size); + + iov[5].iov_base = &dummy_size; + iov[5].iov_len = sizeof(dummy_size); + + iov[6].iov_base = &dummy_type; + iov[6].iov_len = sizeof(dummy_type); + + msg.msg_iov = iov; + msg.msg_iovlen = 7; + msg.msg_control = cmsgbuf; + msg.msg_controllen = sizeof(cmsgbuf); + + int sock = connect_to_spawn_server(path, false); + if(sock == -1) + return false; + + int rc = sendmsg(sock, &msg, 0); + if (rc < 0) { + // cannot send the message + close(sock); + return false; + } + + // Receive response + struct status_report sr = { 0 }; + if (read(sock, &sr, sizeof(sr)) != sizeof(sr)) { + // cannot receive a ping reply + close(sock); + return false; + } + + close(sock); + return sr.status == STATUS_REPORT_PING; +} + +static bool spawn_server_send_request(ND_UUID *magic, SPAWN_REQUEST *request) { + bool ret = false; + + size_t env_size = 0; + void *encoded_env = argv_encode(request->envp, &env_size); + if (!encoded_env) + goto cleanup; + + size_t argv_size = 0; + void *encoded_argv = argv_encode(request->argv, &argv_size); + if (!encoded_argv) + goto cleanup; + + struct msghdr msg = {0}; + struct cmsghdr *cmsg; + SPAWN_SERVER_MSG msg_type = SPAWN_SERVER_MSG_REQUEST; + char cmsgbuf[CMSG_SPACE(sizeof(int) * SPAWN_SERVER_TRANSFER_FDS)]; + struct iovec iov[11]; + + + // We send 1 request with 10 iovec in it + // The request will be received in 2 parts + // 1. the first 6 iovec which include the sizes of the memory allocations required + // 2. the last 4 iovec which require the memory allocations to be received + + iov[0].iov_base = &msg_type; + iov[0].iov_len = sizeof(msg_type); + + iov[1].iov_base = magic->uuid; + iov[1].iov_len = sizeof(magic->uuid); + + iov[2].iov_base = &request->request_id; + iov[2].iov_len = sizeof(request->request_id); + + iov[3].iov_base = &env_size; + iov[3].iov_len = sizeof(env_size); + + iov[4].iov_base = &argv_size; + iov[4].iov_len = sizeof(argv_size); + + iov[5].iov_base = &request->data_size; + iov[5].iov_len = sizeof(request->data_size); + + iov[6].iov_base = &request->type; // Added this line + iov[6].iov_len = sizeof(request->type); + + iov[7].iov_base = encoded_env; + iov[7].iov_len = env_size; + + iov[8].iov_base = encoded_argv; + iov[8].iov_len = argv_size; + + iov[9].iov_base = (char *)request->data; + iov[9].iov_len = request->data_size; + + iov[10].iov_base = NULL; + iov[10].iov_len = 0; + + msg.msg_iov = iov; + msg.msg_iovlen = 11; + msg.msg_control = cmsgbuf; + msg.msg_controllen = CMSG_SPACE(sizeof(int) * SPAWN_SERVER_TRANSFER_FDS); + + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + cmsg->cmsg_len = CMSG_LEN(sizeof(int) * SPAWN_SERVER_TRANSFER_FDS); + + memcpy(CMSG_DATA(cmsg), request->fds, sizeof(int) * SPAWN_SERVER_TRANSFER_FDS); + + int rc = sendmsg(request->sock, &msg, 0); + + if (rc < 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Failed to sendmsg() request to spawn server using socket %d.", request->sock); + goto cleanup; + } + else { + ret = true; + // fprintf(stderr, "PARENT: sent request %zu on socket %d (fds: %d, %d, %d, %d) from tid %d\n", + // request->request_id, request->socket, request->fds[0], request->fds[1], request->fds[2], request->fds[3], os_gettid()); + } + +cleanup: + freez(encoded_env); + freez(encoded_argv); + return ret; +} + +static void spawn_server_receive_request(int sock, SPAWN_SERVER *server) { + struct msghdr msg = {0}; + struct iovec iov[7]; + SPAWN_SERVER_MSG msg_type = SPAWN_SERVER_MSG_INVALID; + size_t request_id; + size_t env_size; + size_t argv_size; + size_t data_size; + ND_UUID magic = UUID_ZERO; + SPAWN_INSTANCE_TYPE type; + char cmsgbuf[CMSG_SPACE(sizeof(int) * SPAWN_SERVER_TRANSFER_FDS)]; + char *envp_encoded = NULL, *argv_encoded = NULL, *data = NULL; + int stdin_fd = -1, stdout_fd = -1, stderr_fd = -1, custom_fd = -1; + + // First recvmsg() to read sizes and control message + iov[0].iov_base = &msg_type; + iov[0].iov_len = sizeof(msg_type); + + iov[1].iov_base = magic.uuid; + iov[1].iov_len = sizeof(magic.uuid); + + iov[2].iov_base = &request_id; + iov[2].iov_len = sizeof(request_id); + + iov[3].iov_base = &env_size; + iov[3].iov_len = sizeof(env_size); + + iov[4].iov_base = &argv_size; + iov[4].iov_len = sizeof(argv_size); + + iov[5].iov_base = &data_size; + iov[5].iov_len = sizeof(data_size); + + iov[6].iov_base = &type; + iov[6].iov_len = sizeof(type); + + msg.msg_iov = iov; + msg.msg_iovlen = 7; + msg.msg_control = cmsgbuf; + msg.msg_controllen = sizeof(cmsgbuf); + + if (recvmsg(sock, &msg, 0) < 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: failed to recvmsg() the first part of the request."); + close(sock); + return; + } + + if(msg_type == SPAWN_SERVER_MSG_PING) { + spawn_server_send_status_ping(sock); + close(sock); + return; + } + + if(!UUIDeq(magic, server->magic)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: Invalid authorization key for request %zu. " + "Rejecting request.", + request_id); + close(sock); + return; + } + + if(type == SPAWN_INSTANCE_TYPE_EXEC && !(server->options & SPAWN_SERVER_OPTION_EXEC)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: Request %zu wants to exec, but exec is not allowed for this spawn server. " + "Rejecting request.", + request_id); + close(sock); + return; + } + + if(type == SPAWN_INSTANCE_TYPE_CALLBACK && !(server->options & SPAWN_SERVER_OPTION_CALLBACK)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: Request %zu wants to run a callback, but callbacks are not allowed for this spawn server. " + "Rejecting request.", + request_id); + close(sock); + return; + } + + // Extract file descriptors from control message + struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); + if (cmsg == NULL || cmsg->cmsg_len != CMSG_LEN(sizeof(int) * SPAWN_SERVER_TRANSFER_FDS)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: Received invalid control message (expected %zu bytes, received %zu bytes)", + CMSG_LEN(sizeof(int) * SPAWN_SERVER_TRANSFER_FDS), cmsg?cmsg->cmsg_len:0); + close(sock); + return; + } + + if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Received unexpected control message type."); + close(sock); + return; + } + + int *fds = (int *)CMSG_DATA(cmsg); + stdin_fd = fds[0]; + stdout_fd = fds[1]; + stderr_fd = fds[2]; + custom_fd = fds[3]; + + if (stdin_fd < 0 || stdout_fd < 0 || stderr_fd < 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: invalid file descriptors received, stdin = %d, stdout = %d, stderr = %d", + stdin_fd, stdout_fd, stderr_fd); + goto cleanup; + } + + // Second recvmsg() to read buffer contents + iov[0].iov_base = envp_encoded = mallocz(env_size); + iov[0].iov_len = env_size; + iov[1].iov_base = argv_encoded = mallocz(argv_size); + iov[1].iov_len = argv_size; + iov[2].iov_base = data = mallocz(data_size); + iov[2].iov_len = data_size; + + msg.msg_iov = iov; + msg.msg_iovlen = 3; + msg.msg_control = NULL; + msg.msg_controllen = 0; + + ssize_t total_bytes_received = recvmsg(sock, &msg, 0); + if (total_bytes_received < 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: failed to recvmsg() the second part of the request."); + goto cleanup; + } + + // fprintf(stderr, "SPAWN SERVER: received request %zu (fds: %d, %d, %d, %d)\n", request_id, + // stdin_fd, stdout_fd, stderr_fd, custom_fd); + + SPAWN_REQUEST *rq = mallocz(sizeof(*rq)); + *rq = (SPAWN_REQUEST){ + .pid = 0, + .request_id = request_id, + .sock = sock, + .fds = { + [0] = stdin_fd, + [1] = stdout_fd, + [2] = stderr_fd, + [3] = custom_fd, + }, + .envp = argv_decode(envp_encoded, env_size), + .argv = argv_decode(argv_encoded, argv_size), + .data = data, + .data_size = data_size, + .type = type + }; + + // all allocations given to the request are now handled by this + spawn_server_execute_request(server, rq); + + // since we make rq->argv and rq->environment NULL when we keep it, + // we don't need these anymore. + freez(envp_encoded); + freez(argv_encoded); + return; + +cleanup: + close(sock); + if(stdin_fd != -1) close(stdin_fd); + if(stdout_fd != -1) close(stdout_fd); + if(stderr_fd != -1) close(stderr_fd); + if(custom_fd != -1) close(custom_fd); + freez(envp_encoded); + freez(argv_encoded); + freez(data); +} + +// -------------------------------------------------------------------------------------------------------------------- +// the spawn server main event loop + +static void spawn_server_sigchld_handler(int signo __maybe_unused) { + spawn_server_sigchld = true; +} + +static void spawn_server_sigterm_handler(int signo __maybe_unused) { + spawn_server_exit = true; +} + +static SPAWN_REQUEST *find_request_by_pid(pid_t pid) { + for(SPAWN_REQUEST *rq = spawn_server_requests; rq ;rq = rq->next) + if(rq->pid == pid) + return rq; + + return NULL; +} + +static void spawn_server_process_sigchld(void) { + // nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN SERVER: checking for exited children"); + + spawn_server_sigchld = false; + + int status; + pid_t pid; + + // Loop to check for exited child processes + while ((pid = waitpid((pid_t)(-1), &status, WNOHANG)) != 0) { + if(pid == -1) + break; + + errno_clear(); + + SPAWN_REQUEST *rq = find_request_by_pid(pid); + size_t request_id = rq ? rq->request_id : 0; + bool send_report_remove_request = false; + + if(WIFEXITED(status)) { + if(WEXITSTATUS(status)) + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "SPAWN SERVER: child with pid %d (request %zu) exited with exit code %d: %s", + pid, request_id, WEXITSTATUS(status), rq ? rq->cmdline : "[request not found]"); + send_report_remove_request = true; + } + else if(WIFSIGNALED(status)) { + if(WCOREDUMP(status)) + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "SPAWN SERVER: child with pid %d (request %zu) coredump'd due to signal %d: %s", + pid, request_id, WTERMSIG(status), rq ? rq->cmdline : "[request not found]"); + else + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "SPAWN SERVER: child with pid %d (request %zu) killed by signal %d: %s", + pid, request_id, WTERMSIG(status), rq ? rq->cmdline : "[request not found]"); + send_report_remove_request = true; + } + else if(WIFSTOPPED(status)) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "SPAWN SERVER: child with pid %d (request %zu) stopped due to signal %d: %s", + pid, request_id, WSTOPSIG(status), rq ? rq->cmdline : "[request not found]"); + send_report_remove_request = false; + } + else if(WIFCONTINUED(status)) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "SPAWN SERVER: child with pid %d (request %zu) continued due to signal %d: %s", + pid, request_id, SIGCONT, rq ? rq->cmdline : "[request not found]"); + send_report_remove_request = false; + } + else { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "SPAWN SERVER: child with pid %d (request %zu) reports unhandled status: %s", + pid, request_id, rq ? rq->cmdline : "[request not found]"); + send_report_remove_request = false; + } + + if(send_report_remove_request && rq) { + spawn_server_send_status_exit(rq, status); + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(spawn_server_requests, rq, prev, next); + request_free(rq); + } + } +} + +static void posix_unmask_sigchld_on_thread(void) { + sigset_t sigset; + sigemptyset(&sigset); // Initialize the signal set to empty + sigaddset(&sigset, SIGCHLD); // Add SIGCHLD to the set + + if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) != 0) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: cannot unmask SIGCHLD"); +} + +static int spawn_server_event_loop(SPAWN_SERVER *server) { + int pipe_fd = server->pipe[1]; + close(server->pipe[0]); server->pipe[0] = -1; + + posix_unmask_sigchld_on_thread(); + + // Set up the signal handler for SIGCHLD and SIGTERM + struct sigaction sa; + sa.sa_handler = spawn_server_sigchld_handler; + sigemptyset(&sa.sa_mask); + sa.sa_flags = SA_RESTART | SA_NOCLDSTOP; + if (sigaction(SIGCHLD, &sa, NULL) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: sigaction() failed for SIGCHLD"); + return 1; + } + + sa.sa_handler = spawn_server_sigterm_handler; + if (sigaction(SIGTERM, &sa, NULL) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: sigaction() failed for SIGTERM"); + return 1; + } + + struct status_report sr = { + .status = STATUS_REPORT_STARTED, + .started = { + .pid = getpid(), + }, + }; + if (write(pipe_fd, &sr, sizeof(sr)) != sizeof(sr)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: failed to write initial status report."); + return 1; + } + + struct pollfd fds[2]; + fds[0].fd = server->sock; + fds[0].events = POLLIN; + fds[1].fd = pipe_fd; + fds[1].events = POLLHUP | POLLERR; + + while(!spawn_server_exit) { + int ret = poll(fds, 2, 500); + if (spawn_server_sigchld || ret == 0) { + spawn_server_process_sigchld(); + errno_clear(); + + if(ret == -1 || ret == 0) + continue; + } + + if (ret == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: poll() failed"); + break; + } + + if (fds[1].revents & (POLLHUP|POLLERR)) { + // Pipe has been closed (parent has exited) + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "SPAWN SERVER: Parent process closed socket (exited?)"); + break; + } + + if (fds[0].revents & POLLIN) { + int sock = accept(server->sock, NULL, NULL); + if (sock == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: accept() failed"); + continue; + } + + // do not fork this socket + sock_setcloexec(sock); + + // receive the request and process it + spawn_server_receive_request(sock, server); + } + } + + // Cleanup before exiting + unlink(server->path); + + // stop all children + if(spawn_server_requests) { + // nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN SERVER: killing all children..."); + size_t killed = 0; + for(SPAWN_REQUEST *rq = spawn_server_requests; rq ; rq = rq->next) { + kill(rq->pid, SIGTERM); + killed++; + } + while(spawn_server_requests) { + spawn_server_process_sigchld(); + tinysleep(); + } + // nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN SERVER: all %zu children finished", killed); + } + + return 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// management of the spawn server + +void spawn_server_destroy(SPAWN_SERVER *server) { + if(server->pipe[0] != -1) close(server->pipe[0]); + if(server->pipe[1] != -1) close(server->pipe[1]); + if(server->sock != -1) close(server->sock); + + if(server->server_pid) { + kill(server->server_pid, SIGTERM); + waitpid(server->server_pid, NULL, 0); + } + + if(server->path) { + unlink(server->path); + freez(server->path); + } + + freez((void *)server->name); + freez(server); +} + +static bool spawn_server_create_listening_socket(SPAWN_SERVER *server) { + if(spawn_server_is_running(server->path)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Server is already listening on path '%s'", server->path); + return false; + } + + if ((server->sock = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to create socket()"); + return false; + } + + struct sockaddr_un server_addr = { + .sun_family = AF_UNIX, + }; + strcpy(server_addr.sun_path, server->path); + unlink(server->path); + errno = 0; + + if (bind(server->sock, (struct sockaddr *)&server_addr, sizeof(server_addr)) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to bind()"); + return false; + } + + if (listen(server->sock, 5) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to listen()"); + return false; + } + + return true; +} + +static void replace_stdio_with_dev_null() { + // we cannot log in this function - the logger is not yet initialized after fork() + + int dev_null_fd = open("/dev/null", O_RDWR); + if (dev_null_fd == -1) { + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to open /dev/null: %s", strerror(errno)); + return; + } + + // Redirect stdin (fd 0) + if (dup2(dev_null_fd, STDIN_FILENO) == -1) { + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to redirect stdin to /dev/null: %s", strerror(errno)); + close(dev_null_fd); + return; + } + + // Redirect stdout (fd 1) + if (dup2(dev_null_fd, STDOUT_FILENO) == -1) { + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to redirect stdout to /dev/null: %s", strerror(errno)); + close(dev_null_fd); + return; + } + + // Close the original /dev/null file descriptor + close(dev_null_fd); +} + +SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options, const char *name, spawn_request_callback_t child_callback, int argc, const char **argv) { + SPAWN_SERVER *server = callocz(1, sizeof(SPAWN_SERVER)); + server->pipe[0] = -1; + server->pipe[1] = -1; + server->sock = -1; + server->cb = child_callback; + server->argc = argc; + server->argv = argv; + server->options = options; + server->id = __atomic_add_fetch(&spawn_server_id, 1, __ATOMIC_RELAXED); + os_uuid_generate_random(server->magic.uuid); + + char *runtime_directory = getenv("NETDATA_CACHE_DIR"); + if(runtime_directory && !*runtime_directory) runtime_directory = NULL; + if (runtime_directory) { + struct stat statbuf; + + if(!*runtime_directory) + // it is empty + runtime_directory = NULL; + + else if (stat(runtime_directory, &statbuf) == 0 && S_ISDIR(statbuf.st_mode)) { + // it exists and it is a directory + + if (access(runtime_directory, W_OK) != 0) { + // it is not writable by us + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Runtime directory '%s' is not writable, falling back to '/tmp'", runtime_directory); + runtime_directory = NULL; + } + } + else { + // it does not exist + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Runtime directory '%s' does not exist, falling back to '/tmp'", runtime_directory); + runtime_directory = NULL; + } + } + if(!runtime_directory) + runtime_directory = "/tmp"; + + char path[1024]; + if(name && *name) { + server->name = strdupz(name); + snprintf(path, sizeof(path), "%s/.netdata-spawn-%s.sock", runtime_directory, name); + } + else { + server->name = strdupz("unnamed"); + snprintf(path, sizeof(path), "%s/.netdata-spawn-%d-%zu.sock", runtime_directory, getpid(), server->id); + } + + server->path = strdupz(path); + + if (!spawn_server_create_listening_socket(server)) + goto cleanup; + + if (pipe(server->pipe) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Cannot create status pipe()"); + goto cleanup; + } + + pid_t pid = fork(); + if (pid == 0) { + // the child - the spawn server + + char buf[16]; + snprintfz(buf, sizeof(buf), "spawn-%s", server->name); + os_setproctitle(buf, server->argc, server->argv); + + replace_stdio_with_dev_null(); + int fds_to_keep[] = { + server->sock, + server->pipe[1], + nd_log_systemd_journal_fd(), + }; + os_close_all_non_std_open_fds_except(fds_to_keep, _countof(fds_to_keep), 0); + nd_log_reopen_log_files_for_spawn_server(buf); + exit(spawn_server_event_loop(server)); + } + else if (pid > 0) { + // the parent + server->server_pid = pid; + close(server->sock); server->sock = -1; + close(server->pipe[1]); server->pipe[1] = -1; + + struct status_report sr = { 0 }; + if (read(server->pipe[0], &sr, sizeof(sr)) != sizeof(sr)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: cannot read() initial status report from spawn server"); + goto cleanup; + } + + if(sr.status != STATUS_REPORT_STARTED) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: server did not respond with success."); + goto cleanup; + } + + if(sr.started.pid != server->server_pid) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: server sent pid %d but we have created %d.", sr.started.pid, server->server_pid); + goto cleanup; + } + + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "SPAWN SERVER: server created on pid %d", server->server_pid); + + return server; + } + + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Cannot fork()"); + +cleanup: + spawn_server_destroy(server); + return NULL; +} + +// -------------------------------------------------------------------------------------------------------------------- +// creating spawn server instances + +void spawn_server_exec_destroy(SPAWN_INSTANCE *instance) { + if(instance->child_pid) kill(instance->child_pid, SIGTERM); + if(instance->write_fd != -1) close(instance->write_fd); + if(instance->read_fd != -1) close(instance->read_fd); + if(instance->sock != -1) close(instance->sock); + freez(instance); +} + +static void log_invalid_magic(SPAWN_INSTANCE *instance, struct status_report *sr) { + unsigned char buf[sizeof(*sr) + 1]; + memcpy(buf, sr, sizeof(*sr)); + buf[sizeof(buf) - 1] = '\0'; + + for(size_t i = 0; i < sizeof(buf) - 1; i++) { + if (iscntrl(buf[i]) || !isprint(buf[i])) + buf[i] = '_'; + } + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: invalid final status report for child %d, request %zu (invalid magic %#x in response, reads like '%s')", + instance->child_pid, instance->request_id, sr->magic, buf); +} + +int spawn_server_exec_wait(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *instance) { + int rc = -1; + + // close the child pipes, to make it exit + if(instance->write_fd != -1) { close(instance->write_fd); instance->write_fd = -1; } + if(instance->read_fd != -1) { close(instance->read_fd); instance->read_fd = -1; } + + // get the result + struct status_report sr = { 0 }; + if(read(instance->sock, &sr, sizeof(sr)) != sizeof(sr)) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: failed to read final status report for child %d, request %zu", + instance->child_pid, instance->request_id); + + else if(sr.magic != STATUS_REPORT_MAGIC) + log_invalid_magic(instance, &sr); + else { + switch (sr.status) { + case STATUS_REPORT_EXITED: + rc = sr.exited.waitpid_status; + break; + + case STATUS_REPORT_STARTED: + case STATUS_REPORT_FAILED: + default: + errno = 0; + nd_log( + NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: invalid status report to exec spawn request %zu for pid %d (status = %u)", + instance->request_id, instance->child_pid, sr.status); + break; + } + } + + instance->child_pid = 0; + spawn_server_exec_destroy(instance); + return rc; +} + +int spawn_server_exec_kill(SPAWN_SERVER *server, SPAWN_INSTANCE *instance) { + // kill the child, if it is still running + if(instance->child_pid) kill(instance->child_pid, SIGTERM); + return spawn_server_exec_wait(server, instance); +} + +SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custom_fd, const char **argv, const void *data, size_t data_size, SPAWN_INSTANCE_TYPE type) { + int pipe_stdin[2] = { -1, -1 }, pipe_stdout[2] = { -1, -1 }; + + SPAWN_INSTANCE *instance = callocz(1, sizeof(SPAWN_INSTANCE)); + instance->read_fd = -1; + instance->write_fd = -1; + + instance->sock = connect_to_spawn_server(server->path, true); + if(instance->sock == -1) + goto cleanup; + + if (pipe(pipe_stdin) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Cannot create stdin pipe()"); + goto cleanup; + } + + if (pipe(pipe_stdout) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Cannot create stdout pipe()"); + goto cleanup; + } + + SPAWN_REQUEST request = { + .request_id = __atomic_add_fetch(&server->request_id, 1, __ATOMIC_RELAXED), + .sock = instance->sock, + .fds = { + [0] = pipe_stdin[0], + [1] = pipe_stdout[1], + [2] = stderr_fd, + [3] = custom_fd, + }, + .envp = (const char **)environ, + .argv = argv, + .data = data, + .data_size = data_size, + .type = type + }; + + if(!spawn_server_send_request(&server->magic, &request)) + goto cleanup; + + close(pipe_stdin[0]); pipe_stdin[0] = -1; + instance->write_fd = pipe_stdin[1]; pipe_stdin[1] = -1; + + close(pipe_stdout[1]); pipe_stdout[1] = -1; + instance->read_fd = pipe_stdout[0]; pipe_stdout[0] = -1; + + // copy the request id to the instance + instance->request_id = request.request_id; + + struct status_report sr = { 0 }; + if(read(instance->sock, &sr, sizeof(sr)) != sizeof(sr)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Failed to exec spawn request %zu (cannot get initial status report)", + request.request_id); + goto cleanup; + } + + if(sr.magic != STATUS_REPORT_MAGIC) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Failed to exec spawn request %zu (invalid magic %#x in response)", + request.request_id, sr.magic); + goto cleanup; + } + + switch(sr.status) { + case STATUS_REPORT_STARTED: + instance->child_pid = sr.started.pid; + return instance; + + case STATUS_REPORT_FAILED: + errno = sr.failed.err_no; + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Failed to exec spawn request %zu (server reports failure, errno is updated)", + request.request_id); + errno = 0; + break; + + case STATUS_REPORT_EXITED: + errno = ENOEXEC; + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Failed to exec spawn request %zu (server reports exit, errno is updated)", + request.request_id); + errno = 0; + break; + + default: + errno = 0; + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Invalid status report to exec spawn request %zu (received invalid data)", + request.request_id); + break; + } + +cleanup: + if (pipe_stdin[0] >= 0) close(pipe_stdin[0]); + if (pipe_stdin[1] >= 0) close(pipe_stdin[1]); + if (pipe_stdout[0] >= 0) close(pipe_stdout[0]); + if (pipe_stdout[1] >= 0) close(pipe_stdout[1]); + spawn_server_exec_destroy(instance); + return NULL; +} + +#endif diff --git a/src/libnetdata/spawn_server/spawn_server_posix.c b/src/libnetdata/spawn_server/spawn_server_posix.c new file mode 100644 index 000000000..f96921bb9 --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_server_posix.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "spawn_server_internals.h" + +#if defined(SPAWN_SERVER_VERSION_POSIX_SPAWN) + +#ifdef __APPLE__ +#include +#define environ (*_NSGetEnviron()) +#else +extern char **environ; +#endif + +int spawn_server_instance_read_fd(SPAWN_INSTANCE *si) { return si->read_fd; } +int spawn_server_instance_write_fd(SPAWN_INSTANCE *si) { return si->write_fd; } +void spawn_server_instance_read_fd_unset(SPAWN_INSTANCE *si) { si->read_fd = -1; } +void spawn_server_instance_write_fd_unset(SPAWN_INSTANCE *si) { si->write_fd = -1; } +pid_t spawn_server_instance_pid(SPAWN_INSTANCE *si) { return si->child_pid; } + +static struct { + bool sigchld_initialized; + SPINLOCK spinlock; + SPAWN_INSTANCE *instances; +} spawn_globals = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .instances = NULL, +}; + +//static void sigchld_handler(int signum __maybe_unused) { +// pid_t pid; +// int status; +// +// while ((pid = waitpid(-1, &status, WNOHANG)) > 0) { +// // Find the SPAWN_INSTANCE corresponding to this pid +// spinlock_lock(&spawn_globals.spinlock); +// for(SPAWN_INSTANCE *si = spawn_globals.instances; si ;si = si->next) { +// if (si->child_pid == pid) { +// __atomic_store_n(&si->waitpid_status, status, __ATOMIC_RELAXED); +// __atomic_store_n(&si->exited, true, __ATOMIC_RELAXED); +// break; +// } +// } +// spinlock_unlock(&spawn_globals.spinlock); +// } +//} + +SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options __maybe_unused, const char *name, spawn_request_callback_t cb __maybe_unused, int argc __maybe_unused, const char **argv __maybe_unused) { + SPAWN_SERVER* server = callocz(1, sizeof(SPAWN_SERVER)); + + if (name) + server->name = strdupz(name); + else + server->name = strdupz("unnamed"); + + if(!spawn_globals.sigchld_initialized) { + spawn_globals.sigchld_initialized = true; + +// struct sigaction sa; +// sa.sa_handler = sigchld_handler; +// sigemptyset(&sa.sa_mask); +// sa.sa_flags = SA_RESTART | SA_NOCLDSTOP; +// if (sigaction(SIGCHLD, &sa, NULL) == -1) { +// nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Failed to set SIGCHLD handler"); +// freez((void *)server->name); +// freez(server); +// return NULL; +// } + } + + return server; +} + +void spawn_server_destroy(SPAWN_SERVER *server) { + if (!server) return; + freez((void *)server->name); + freez(server); +} + +SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custom_fd __maybe_unused, const char **argv, const void *data __maybe_unused, size_t data_size __maybe_unused, SPAWN_INSTANCE_TYPE type) { + if (type != SPAWN_INSTANCE_TYPE_EXEC) + return NULL; + + CLEAN_BUFFER *cmdline_wb = argv_to_cmdline_buffer(argv); + const char *cmdline = buffer_tostring(cmdline_wb); + + SPAWN_INSTANCE *si = callocz(1, sizeof(SPAWN_INSTANCE)); + si->child_pid = -1; + si->request_id = __atomic_add_fetch(&server->request_id, 1, __ATOMIC_RELAXED); + + int stdin_pipe[2] = { -1, -1 }; + int stdout_pipe[2] = { -1, -1 }; + + if (pipe(stdin_pipe) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: stdin pipe() failed: %s", cmdline); + freez(si); + return NULL; + } + + if (pipe(stdout_pipe) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: stdout pipe() failed: %s", cmdline); + close(stdin_pipe[PIPE_READ]); + close(stdin_pipe[PIPE_WRITE]); + freez(si); + return NULL; + } + + posix_spawn_file_actions_t file_actions; + posix_spawnattr_t attr; + + if (posix_spawn_file_actions_init(&file_actions) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawn_file_actions_init() failed: %s", cmdline); + close(stdin_pipe[PIPE_READ]); + close(stdin_pipe[PIPE_WRITE]); + close(stdout_pipe[PIPE_READ]); + close(stdout_pipe[PIPE_WRITE]); + freez(si); + return NULL; + } + + posix_spawn_file_actions_adddup2(&file_actions, stdin_pipe[PIPE_READ], STDIN_FILENO); + posix_spawn_file_actions_adddup2(&file_actions, stdout_pipe[PIPE_WRITE], STDOUT_FILENO); + posix_spawn_file_actions_addclose(&file_actions, stdin_pipe[PIPE_READ]); + posix_spawn_file_actions_addclose(&file_actions, stdin_pipe[PIPE_WRITE]); + posix_spawn_file_actions_addclose(&file_actions, stdout_pipe[PIPE_READ]); + posix_spawn_file_actions_addclose(&file_actions, stdout_pipe[PIPE_WRITE]); + if(stderr_fd != STDERR_FILENO) { + posix_spawn_file_actions_adddup2(&file_actions, stderr_fd, STDERR_FILENO); + posix_spawn_file_actions_addclose(&file_actions, stderr_fd); + } + + if (posix_spawnattr_init(&attr) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawnattr_init() failed: %s", cmdline); + posix_spawn_file_actions_destroy(&file_actions); + close(stdin_pipe[PIPE_READ]); + close(stdin_pipe[PIPE_WRITE]); + close(stdout_pipe[PIPE_READ]); + close(stdout_pipe[PIPE_WRITE]); + freez(si); + return NULL; + } + + // Set the flags to reset the signal mask and signal actions + sigset_t empty_mask; + sigemptyset(&empty_mask); + if (posix_spawnattr_setsigmask(&attr, &empty_mask) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawnattr_setsigmask() failed: %s", cmdline); + posix_spawn_file_actions_destroy(&file_actions); + posix_spawnattr_destroy(&attr); + return false; + } + + short flags = POSIX_SPAWN_SETSIGMASK | POSIX_SPAWN_SETSIGDEF; + if (posix_spawnattr_setflags(&attr, flags) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawnattr_setflags() failed: %s", cmdline); + posix_spawn_file_actions_destroy(&file_actions); + posix_spawnattr_destroy(&attr); + return false; + } + + spinlock_lock(&spawn_globals.spinlock); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(spawn_globals.instances, si, prev, next); + spinlock_unlock(&spawn_globals.spinlock); + + // unfortunately, on CYGWIN/MSYS posix_spawn() is not thread safe + // so, we run it one by one. + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + spinlock_lock(&spinlock); + + int fds[3] = { stdin_pipe[PIPE_READ], stdout_pipe[PIPE_WRITE], stderr_fd }; + os_close_all_non_std_open_fds_except(fds, 3, CLOSE_RANGE_CLOEXEC); + + errno_clear(); + if (posix_spawn(&si->child_pid, argv[0], &file_actions, &attr, (char * const *)argv, environ) != 0) { + spinlock_unlock(&spinlock); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: posix_spawn() failed: %s", cmdline); + + spinlock_lock(&spawn_globals.spinlock); + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(spawn_globals.instances, si, prev, next); + spinlock_unlock(&spawn_globals.spinlock); + + posix_spawnattr_destroy(&attr); + posix_spawn_file_actions_destroy(&file_actions); + + close(stdin_pipe[PIPE_READ]); + close(stdin_pipe[PIPE_WRITE]); + close(stdout_pipe[PIPE_READ]); + close(stdout_pipe[PIPE_WRITE]); + freez(si); + return NULL; + } + spinlock_unlock(&spinlock); + + // Destroy the posix_spawnattr_t and posix_spawn_file_actions_t structures + posix_spawnattr_destroy(&attr); + posix_spawn_file_actions_destroy(&file_actions); + + // Close the read end of the stdin pipe and the write end of the stdout pipe in the parent process + close(stdin_pipe[PIPE_READ]); + close(stdout_pipe[PIPE_WRITE]); + + si->write_fd = stdin_pipe[PIPE_WRITE]; + si->read_fd = stdout_pipe[PIPE_READ]; + si->cmdline = strdupz(cmdline); + + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN SERVER: process created with pid %d: %s", + si->child_pid, cmdline); + return si; +} + +int spawn_server_exec_kill(SPAWN_SERVER *server, SPAWN_INSTANCE *si) { + if (!si) return -1; + + if (kill(si->child_pid, SIGTERM)) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: kill() of pid %d failed: %s", + si->child_pid, si->cmdline); + + return spawn_server_exec_wait(server, si); +} + +static int spawn_server_waitpid(SPAWN_INSTANCE *si) { + int status; + pid_t pid; + + pid = waitpid(si->child_pid, &status, 0); + + if(pid != si->child_pid) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: failed to wait for pid %d: %s", + si->child_pid, si->cmdline); + + return -1; + } + + errno_clear(); + + if(WIFEXITED(status)) { + if(WEXITSTATUS(status)) + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN SERVER: child with pid %d (request %zu) exited with exit code %d: %s", + pid, si->request_id, WEXITSTATUS(status), si->cmdline); + } + else if(WIFSIGNALED(status)) { + if(WCOREDUMP(status)) + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN SERVER: child with pid %d (request %zu) coredump'd due to signal %d: %s", + pid, si->request_id, WTERMSIG(status), si->cmdline); + else + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN SERVER: child with pid %d (request %zu) killed by signal %d: %s", + pid, si->request_id, WTERMSIG(status), si->cmdline); + } + else if(WIFSTOPPED(status)) { + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN SERVER: child with pid %d (request %zu) stopped due to signal %d: %s", + pid, si->request_id, WSTOPSIG(status), si->cmdline); + } + else if(WIFCONTINUED(status)) { + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN SERVER: child with pid %d (request %zu) continued due to signal %d: %s", + pid, si->request_id, SIGCONT, si->cmdline); + } + else { + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN SERVER: child with pid %d (request %zu) reports unhandled status: %s", + pid, si->request_id, si->cmdline); + } + + return status; +} + +int spawn_server_exec_wait(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *si) { + if (!si) return -1; + + // Close all pipe descriptors to force the child to exit + if (si->read_fd != -1) close(si->read_fd); + if (si->write_fd != -1) close(si->write_fd); + + // Wait for the process to exit + int status = __atomic_load_n(&si->waitpid_status, __ATOMIC_RELAXED); + bool exited = __atomic_load_n(&si->exited, __ATOMIC_RELAXED); + if(!exited) + status = spawn_server_waitpid(si); + else + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN PARENT: child with pid %d exited with status %d (sighandler): %s", + si->child_pid, status, si->cmdline); + + spinlock_lock(&spawn_globals.spinlock); + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(spawn_globals.instances, si, prev, next); + spinlock_unlock(&spawn_globals.spinlock); + + freez((void *)si->cmdline); + freez(si); + return status; +} + +#endif diff --git a/src/libnetdata/spawn_server/spawn_server_windows.c b/src/libnetdata/spawn_server/spawn_server_windows.c new file mode 100644 index 000000000..f80925a24 --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_server_windows.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "spawn_server_internals.h" + +#if defined(SPAWN_SERVER_VERSION_WINDOWS) + +int spawn_server_instance_read_fd(SPAWN_INSTANCE *si) { return si->read_fd; } +int spawn_server_instance_write_fd(SPAWN_INSTANCE *si) { return si->write_fd; } +void spawn_server_instance_read_fd_unset(SPAWN_INSTANCE *si) { si->read_fd = -1; } +void spawn_server_instance_write_fd_unset(SPAWN_INSTANCE *si) { si->write_fd = -1; } + +pid_t spawn_server_instance_pid(SPAWN_INSTANCE *si) { + if(si->child_pid != -1) + return si->child_pid; + + return (pid_t)si->dwProcessId; +} + +static void update_cygpath_env(void) { + static volatile bool done = false; + + if(done) return; + done = true; + + char win_path[MAX_PATH]; + + // Convert Cygwin root path to Windows path + cygwin_conv_path(CCP_POSIX_TO_WIN_A, "/", win_path, sizeof(win_path)); + + nd_setenv("NETDATA_CYGWIN_BASE_PATH", win_path, 1); + + nd_log(NDLS_COLLECTORS, NDLP_INFO, "Cygwin/MSYS2 base path set to '%s'", win_path); +} + +SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options __maybe_unused, const char *name, spawn_request_callback_t cb __maybe_unused, int argc __maybe_unused, const char **argv __maybe_unused) { + update_cygpath_env(); + + SPAWN_SERVER* server = callocz(1, sizeof(SPAWN_SERVER)); + if(name) + server->name = strdupz(name); + else + server->name = strdupz("unnamed"); + + server->log_forwarder = log_forwarder_start(); + + return server; +} + +void spawn_server_destroy(SPAWN_SERVER *server) { + if (server) { + if (server->log_forwarder) { + log_forwarder_stop(server->log_forwarder); + server->log_forwarder = NULL; + } + freez((void *)server->name); + freez(server); + } +} + +static BUFFER *argv_to_windows(const char **argv) { + BUFFER *wb = buffer_create(0, NULL); + + // argv[0] is the path + char b[strlen(argv[0]) * 2 + FILENAME_MAX]; + cygwin_conv_path(CCP_POSIX_TO_WIN_A | CCP_ABSOLUTE, argv[0], b, sizeof(b)); + + for(size_t i = 0; argv[i] ;i++) { + const char *s = (i == 0) ? b : argv[i]; + size_t len = strlen(s); + buffer_need_bytes(wb, len * 2 + 1); + + bool needs_quotes = false; + for(const char *c = s; !needs_quotes && *c ; c++) { + switch(*c) { + case ' ': + case '\v': + case '\t': + case '\n': + case '"': + needs_quotes = true; + break; + + default: + break; + } + } + + if(buffer_strlen(wb)) { + if (needs_quotes) + buffer_strcat(wb, " \""); + else + buffer_putc(wb, ' '); + } + else if (needs_quotes) + buffer_putc(wb, '"'); + + for(const char *c = s; *c ; c++) { + switch(*c) { + case '"': + buffer_putc(wb, '\\'); + // fall through + + default: + buffer_putc(wb, *c); + break; + } + } + + if(needs_quotes) + buffer_strcat(wb, "\""); + } + + return wb; +} + +int set_fd_blocking(int fd) { + int flags = fcntl(fd, F_GETFL, 0); + if (flags == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: fcntl(F_GETFL) failed"); + return -1; + } + + flags &= ~O_NONBLOCK; + if (fcntl(fd, F_SETFL, flags) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: fcntl(F_SETFL) failed"); + return -1; + } + + return 0; +} + +//static void print_environment_block(char *env_block) { +// if (env_block == NULL) { +// fprintf(stderr, "Environment block is NULL\n"); +// return; +// } +// +// char *env = env_block; +// while (*env) { +// fprintf(stderr, "ENVIRONMENT: %s\n", env); +// // Move to the next string in the block +// env += strlen(env) + 1; +// } +//} + +SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd __maybe_unused, int custom_fd __maybe_unused, const char **argv, const void *data __maybe_unused, size_t data_size __maybe_unused, SPAWN_INSTANCE_TYPE type) { + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + + if (type != SPAWN_INSTANCE_TYPE_EXEC) + return NULL; + + int pipe_stdin[2] = { -1, -1 }, pipe_stdout[2] = { -1, -1 }, pipe_stderr[2] = { -1, -1 }; + + errno_clear(); + + SPAWN_INSTANCE *instance = callocz(1, sizeof(*instance)); + instance->request_id = __atomic_add_fetch(&server->request_id, 1, __ATOMIC_RELAXED); + + CLEAN_BUFFER *wb = argv_to_windows(argv); + char *command = (char *)buffer_tostring(wb); + + if (pipe(pipe_stdin) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Cannot create stdin pipe() for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + + if (pipe(pipe_stdout) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Cannot create stdout pipe() for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + + if (pipe(pipe_stderr) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Cannot create stderr pipe() for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + + // Ensure pipes are in blocking mode + if (set_fd_blocking(pipe_stdin[PIPE_READ]) == -1 || set_fd_blocking(pipe_stdin[PIPE_WRITE]) == -1 || + set_fd_blocking(pipe_stdout[PIPE_READ]) == -1 || set_fd_blocking(pipe_stdout[PIPE_WRITE]) == -1 || + set_fd_blocking(pipe_stderr[PIPE_READ]) == -1 || set_fd_blocking(pipe_stderr[PIPE_WRITE]) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Failed to set blocking I/O on pipes for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + + // do not run multiple times this section + // to prevent handles leaking + spinlock_lock(&spinlock); + + // Convert POSIX file descriptors to Windows handles + HANDLE stdin_read_handle = (HANDLE)_get_osfhandle(pipe_stdin[PIPE_READ]); + HANDLE stdout_write_handle = (HANDLE)_get_osfhandle(pipe_stdout[PIPE_WRITE]); + HANDLE stderr_write_handle = (HANDLE)_get_osfhandle(pipe_stderr[PIPE_WRITE]); + + if (stdin_read_handle == INVALID_HANDLE_VALUE || stdout_write_handle == INVALID_HANDLE_VALUE || stderr_write_handle == INVALID_HANDLE_VALUE) { + spinlock_unlock(&spinlock); + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Invalid handle value(s) for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + + // Set handle inheritance + if (!SetHandleInformation(stdin_read_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT) || + !SetHandleInformation(stdout_write_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT) || + !SetHandleInformation(stderr_write_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT)) { + spinlock_unlock(&spinlock); + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Cannot set handle(s) inheritance for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + + // Set up the STARTUPINFO structure + STARTUPINFO si; + PROCESS_INFORMATION pi; + ZeroMemory(&si, sizeof(si)); + si.cb = sizeof(si); + si.dwFlags = STARTF_USESTDHANDLES; + si.hStdInput = stdin_read_handle; + si.hStdOutput = stdout_write_handle; + si.hStdError = stderr_write_handle; + + // Retrieve the current environment block + char* env_block = GetEnvironmentStrings(); +// print_environment_block(env_block); + + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN PARENT: Running request No %zu, command: '%s'", + instance->request_id, command); + + int fds_to_keep_open[] = { pipe_stdin[PIPE_READ], pipe_stdout[PIPE_WRITE], pipe_stderr[PIPE_WRITE] }; + os_close_all_non_std_open_fds_except(fds_to_keep_open, 3, CLOSE_RANGE_CLOEXEC); + + // Spawn the process + errno_clear(); + if (!CreateProcess(NULL, command, NULL, NULL, TRUE, 0, env_block, NULL, &si, &pi)) { + spinlock_unlock(&spinlock); + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: cannot CreateProcess() for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + + FreeEnvironmentStrings(env_block); + + // When we create a process with the CreateProcess function, it returns two handles: + // - one for the process (pi.hProcess) and + // - one for the primary thread of the new process (pi.hThread). + // Both of these handles need to be explicitly closed when they are no longer needed. + CloseHandle(pi.hThread); + + // end of the critical section + spinlock_unlock(&spinlock); + + // Close unused pipe ends + close(pipe_stdin[PIPE_READ]); pipe_stdin[PIPE_READ] = -1; + close(pipe_stdout[PIPE_WRITE]); pipe_stdout[PIPE_WRITE] = -1; + close(pipe_stderr[PIPE_WRITE]); pipe_stderr[PIPE_WRITE] = -1; + + // Store process information in instance + instance->dwProcessId = pi.dwProcessId; + instance->child_pid = cygwin_winpid_to_pid((pid_t)pi.dwProcessId); + instance->process_handle = pi.hProcess; + + // Convert handles to POSIX file descriptors + instance->write_fd = pipe_stdin[PIPE_WRITE]; + instance->read_fd = pipe_stdout[PIPE_READ]; + instance->stderr_fd = pipe_stderr[PIPE_READ]; + + // Add stderr_fd to the log forwarder + log_forwarder_add_fd(server->log_forwarder, instance->stderr_fd); + log_forwarder_annotate_fd_name(server->log_forwarder, instance->stderr_fd, command); + log_forwarder_annotate_fd_pid(server->log_forwarder, instance->stderr_fd, spawn_server_instance_pid(instance)); + + errno_clear(); + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN PARENT: created process for request No %zu, pid %d (winpid %d), command: %s", + instance->request_id, (int)instance->child_pid, (int)pi.dwProcessId, command); + + return instance; + + cleanup: + if (pipe_stdin[PIPE_READ] >= 0) close(pipe_stdin[PIPE_READ]); + if (pipe_stdin[PIPE_WRITE] >= 0) close(pipe_stdin[PIPE_WRITE]); + if (pipe_stdout[PIPE_READ] >= 0) close(pipe_stdout[PIPE_READ]); + if (pipe_stdout[PIPE_WRITE] >= 0) close(pipe_stdout[PIPE_WRITE]); + if (pipe_stderr[PIPE_READ] >= 0) close(pipe_stderr[PIPE_READ]); + if (pipe_stderr[PIPE_WRITE] >= 0) close(pipe_stderr[PIPE_WRITE]); + freez(instance); + return NULL; +} + +static char* GetErrorString(DWORD errorCode) { + DWORD lastError = GetLastError(); + + LPVOID lpMsgBuf; + DWORD bufLen = FormatMessage( + FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, + errorCode, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPTSTR) &lpMsgBuf, + 0, NULL ); + + SetLastError(lastError); + + if (bufLen) { + char* errorString = (char*)LocalAlloc(LMEM_FIXED, bufLen + 1); + if (errorString) { + strcpy(errorString, (char*)lpMsgBuf); + } + LocalFree(lpMsgBuf); + return errorString; + } + + return NULL; +} + +static void TerminateChildProcesses(SPAWN_INSTANCE *si) { + HANDLE hSnapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0); + if (hSnapshot == INVALID_HANDLE_VALUE) + return; + + PROCESSENTRY32 pe; + pe.dwSize = sizeof(PROCESSENTRY32); + + if (Process32First(hSnapshot, &pe)) { + do { + if (pe.th32ParentProcessID == si->dwProcessId) { + HANDLE hChildProcess = OpenProcess(PROCESS_TERMINATE, FALSE, pe.th32ProcessID); + if (hChildProcess) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "SPAWN PARENT: killing subprocess %u of request No %zu, pid %d (winpid %u)", + pe.th32ProcessID, si->request_id, (int)si->child_pid, si->dwProcessId); + + TerminateProcess(hChildProcess, STATUS_CONTROL_C_EXIT); + CloseHandle(hChildProcess); + } + } + } while (Process32Next(hSnapshot, &pe)); + } + + CloseHandle(hSnapshot); +} + +int map_status_code_to_signal(DWORD status_code) { + switch (status_code) { + case STATUS_ACCESS_VIOLATION: + return SIGSEGV; + case STATUS_ILLEGAL_INSTRUCTION: + return SIGILL; + case STATUS_FLOAT_DIVIDE_BY_ZERO: + case STATUS_INTEGER_DIVIDE_BY_ZERO: + case STATUS_ARRAY_BOUNDS_EXCEEDED: + case STATUS_FLOAT_OVERFLOW: + case STATUS_FLOAT_UNDERFLOW: + case STATUS_FLOAT_INVALID_OPERATION: + return SIGFPE; + case STATUS_BREAKPOINT: + case STATUS_SINGLE_STEP: + return SIGTRAP; + case STATUS_STACK_OVERFLOW: + case STATUS_INVALID_HANDLE: + case STATUS_INVALID_PARAMETER: + case STATUS_NO_MEMORY: + case STATUS_PRIVILEGED_INSTRUCTION: + case STATUS_DLL_NOT_FOUND: + case STATUS_DLL_INIT_FAILED: + case STATUS_ORDINAL_NOT_FOUND: + case STATUS_ENTRYPOINT_NOT_FOUND: + case STATUS_CONTROL_STACK_VIOLATION: + case STATUS_STACK_BUFFER_OVERRUN: + case STATUS_ASSERTION_FAILURE: + case STATUS_INVALID_CRUNTIME_PARAMETER: + case STATUS_HEAP_CORRUPTION: + return SIGABRT; + case STATUS_CONTROL_C_EXIT: + return SIGTERM; // we use this internally as such + case STATUS_FATAL_APP_EXIT: + return SIGTERM; + default: + return (status_code & 0xFF) << 8; + } +} + +int spawn_server_exec_kill(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *si) { + if(si->child_pid != -1 && kill(si->child_pid, SIGTERM) != 0) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: child of request No %zu, pid %d (winpid %u), failed to be killed", + si->request_id, (int)si->child_pid, si->dwProcessId); + + // this gives some warnings at the spawn-tester, but it is generally better + // to have them, to avoid abnormal shutdown of the plugins + if(si->read_fd != -1) { close(si->read_fd); si->read_fd = -1; } + if(si->write_fd != -1) { close(si->write_fd); si->write_fd = -1; } + if(si->stderr_fd != -1) { + if(!log_forwarder_del_and_close_fd(server->log_forwarder, si->stderr_fd)) + close(si->stderr_fd); + + si->stderr_fd = -1; + } + + errno_clear(); + if(TerminateProcess(si->process_handle, STATUS_CONTROL_C_EXIT) == 0) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: child of request No %zu, pid %d (winpid %u), failed to be terminated", + si->request_id, (int)si->child_pid, si->dwProcessId); + + errno_clear(); + TerminateChildProcesses(si); + + return spawn_server_exec_wait(server, si); +} + +int spawn_server_exec_wait(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *si) { + if(si->read_fd != -1) { close(si->read_fd); si->read_fd = -1; } + if(si->write_fd != -1) { close(si->write_fd); si->write_fd = -1; } + if(si->stderr_fd != -1) { + if(!log_forwarder_del_and_close_fd(server->log_forwarder, si->stderr_fd)) + close(si->stderr_fd); + + si->stderr_fd = -1; + } + + // wait for the process to end + WaitForSingleObject(si->process_handle, INFINITE); + + DWORD exit_code = -1; + GetExitCodeProcess(si->process_handle, &exit_code); + CloseHandle(si->process_handle); + + char *err = GetErrorString(exit_code); + + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN PARENT: child of request No %zu, pid %d (winpid %u), exited with code %u (0x%x): %s", + si->request_id, (int)si->child_pid, si->dwProcessId, + (unsigned)exit_code, (unsigned)exit_code, err ? err : "(no reason text)"); + + if(err) + LocalFree(err); + + freez(si); + return map_status_code_to_signal(exit_code); +} + +#endif diff --git a/src/libnetdata/statistical/README.md b/src/libnetdata/statistical/README.md index 1d1d2afd4..03a91dc45 100644 --- a/src/libnetdata/statistical/README.md +++ b/src/libnetdata/statistical/README.md @@ -1,12 +1,3 @@ - - # Statistical functions A library for easy and fast calculations of statistical measurements like average, median etc. diff --git a/src/libnetdata/storage_number/README.md b/src/libnetdata/storage_number/README.md index f0096fb9b..360d44a0a 100644 --- a/src/libnetdata/storage_number/README.md +++ b/src/libnetdata/storage_number/README.md @@ -1,12 +1,3 @@ - - # Netdata storage number Although `netdata` does all its calculations using `long double`, it stores all values using diff --git a/src/libnetdata/string/README.md b/src/libnetdata/string/README.md index 54c905946..c23160233 100644 --- a/src/libnetdata/string/README.md +++ b/src/libnetdata/string/README.md @@ -1,12 +1,3 @@ - - # STRING STRING provides a way to allocate and free text strings, while de-duplicating them. diff --git a/src/libnetdata/string/string.c b/src/libnetdata/string/string.c index 257a3cc4b..107c7eea5 100644 --- a/src/libnetdata/string/string.c +++ b/src/libnetdata/string/string.c @@ -347,16 +347,34 @@ void string_freez(STRING *string) { string_stats_atomic_increment(partition, releases); } -inline size_t string_strlen(STRING *string) { +inline size_t string_strlen(const STRING *string) { if(unlikely(!string)) return 0; return string->length - 1; } -inline const char *string2str(STRING *string) { +inline const char *string2str(const STRING *string) { if(unlikely(!string)) return ""; return string->str; } +bool string_ends_with_string(const STRING *whole, const STRING *end) { + if(whole == end) return true; + if(!whole || !end) return false; + if(end->length > whole->length) return false; + if(end->length == whole->length) return strcmp(string2str(whole), string2str(end)) == 0; + const char *we = string2str(whole); + we = &we[string_strlen(whole) - string_strlen(end)]; + return strncmp(we, end->str, string_strlen(end)) == 0; +} + +bool string_starts_with_string(const STRING *whole, const STRING *end) { + if(whole == end) return true; + if(!whole || !end) return false; + if(end->length > whole->length) return false; + if(end->length == whole->length) return strcmp(string2str(whole), string2str(end)) == 0; + return strncmp(string2str(whole), string2str(end), string_strlen(end)) == 0; +} + STRING *string_2way_merge(STRING *a, STRING *b) { static STRING *X = NULL; diff --git a/src/libnetdata/string/string.h b/src/libnetdata/string/string.h index c44696be2..e86ac6fb5 100644 --- a/src/libnetdata/string/string.h +++ b/src/libnetdata/string/string.h @@ -14,8 +14,10 @@ STRING *string_strndupz(const char *str, size_t len); STRING *string_dup(STRING *string); void string_freez(STRING *string); -size_t string_strlen(STRING *string); -const char *string2str(STRING *string) NEVERNULL; +size_t string_strlen(const STRING *string); +const char *string2str(const STRING *string) NEVERNULL; +bool string_ends_with_string(const STRING *whole, const STRING *end); +bool string_starts_with_string(const STRING *whole, const STRING *end); // keep common prefix/suffix and replace everything else with [x] STRING *string_2way_merge(STRING *a, STRING *b); @@ -30,10 +32,21 @@ static inline int string_strcmp(STRING *string, const char *s) { return strcmp(string2str(string), s); } +static inline int string_strncmp(STRING *string, const char *s, size_t n) { + return strncmp(string2str(string), s, n); +} + void string_statistics(size_t *inserts, size_t *deletes, size_t *searches, size_t *entries, size_t *references, size_t *memory, size_t *duplications, size_t *releases); int string_unittest(size_t entries); void string_init(void); +static inline void cleanup_string_pp(STRING **stringpp) { + if(stringpp) + string_freez(*stringpp); +} + +#define CLEAN_STRING _cleanup_(cleanup_string_pp) STRING + #endif diff --git a/src/libnetdata/string/utf8.c b/src/libnetdata/string/utf8.c new file mode 100644 index 000000000..0b4f138a6 --- /dev/null +++ b/src/libnetdata/string/utf8.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +#if defined(OS_WINDOWS) +/* + * Convert any CodePage to UTF16 + * Goals: + * 1. Destination is always NULL terminated + * 2. If the destination buffer is not enough, return as much as possible data (truncate) + * 3. Always return the number of wide characters written, including the null terminator + */ + +size_t any_to_utf16(uint32_t CodePage, wchar_t *dst, size_t dst_size, const char *src, int src_len, bool *truncated) { + if(!src || src_len == 0) { + // invalid input + if(truncated) + *truncated = true; + + if(dst && dst_size) + *dst = L'\0'; + return 0; + } + + if(!dst || !dst_size) { + // the caller wants to know the buffer to allocate for the conversion + + if(truncated) + *truncated = true; + + int required = MultiByteToWideChar(CodePage, 0, src, src_len, NULL, 0); + if(required <= 0) return 0; // error in the conversion + + // Add 1 for null terminator only if src_len is not -1 + // so that the caller can call us again to get the entire string (not truncated) + return (size_t)required + ((src_len != -1) ? 1 : 0); + } + + // do the conversion directly to the destination buffer + int rc = MultiByteToWideChar(CodePage, 0, src, src_len, dst, (int)dst_size); + if(rc <= 0) { + if(truncated) + *truncated = true; + + // conversion failed, let's see why... + DWORD status = GetLastError(); + if(status == ERROR_INSUFFICIENT_BUFFER) { + // it cannot fit entirely, let's allocate a new buffer to convert it + // and then truncate it to the destination buffer + + // clear errno and LastError to clear the error of the + // MultiByteToWideChar() that failed + errno_clear(); + + // get the required size + int required_size = MultiByteToWideChar(CodePage, 0, src, src_len, NULL, 0); + + // mallocz() never fails (exits the program on NULL) + wchar_t *tmp = mallocz(required_size * sizeof(wchar_t)); + + // convert it, now it should fit + rc = MultiByteToWideChar(CodePage, 0, src, src_len, tmp, required_size); + if (rc <= 0) { + // it failed! + *dst = L'\0'; + freez(tmp); + return 0; + } + + size_t len = rc; + + // copy as much as we can + memcpy(dst, tmp, MIN(len, (dst_size - 1)) * sizeof(wchar_t)); + + // null terminate it + dst[MIN(len, (dst_size - 1))] = L'\0'; + + // free the temporary buffer + freez(tmp); + + // return the actual bytes written + return MIN(len, dst_size); + } + + // empty the destination + *dst = L'\0'; + return 0; + } + + size_t len = rc; + + if(truncated) + *truncated = false; + + if(len >= dst_size) { + if(dst[dst_size - 1] != L'\0') { + if (truncated) + *truncated = true; + + // Truncate it to fit the null terminator + dst[dst_size - 1] = L'\0'; + } + return dst_size; + } + + if(dst[len - 1] != L'\0') { + // the result is not null terminated + // append the null + dst[len] = L'\0'; + return len + 1; + } + + // the result is already null terminated + return len; +} + +/* + * Convert UTF16 (wide-character string) to UTF8 + * Goals: + * 1. Destination is always NULL terminated + * 2. If the destination buffer is not enough, return as much as possible data (truncate) + * 3. Always return the number of bytes written, including the null terminator + */ + +size_t utf16_to_utf8(char *dst, size_t dst_size, const wchar_t *src, int src_len, bool *truncated) { + if (!src || src_len == 0) { + // invalid input + if(truncated) + *truncated = true; + + if(dst && dst_size) + *dst = '\0'; + + return 0; + } + + if (!dst || dst_size == 0) { + // The caller wants to know the buffer size required for the conversion + + if(truncated) + *truncated = true; + + int required = WideCharToMultiByte(CP_UTF8, 0, src, src_len, NULL, 0, NULL, NULL); + if (required <= 0) return 0; // error in the conversion + + // Add 1 for null terminator only if src_len is not -1 + return (size_t)required + ((src_len != -1) ? 1 : 0); + } + + // Perform the conversion directly into the destination buffer + int rc = WideCharToMultiByte(CP_UTF8, 0, src, src_len, dst, (int)dst_size, NULL, NULL); + if (rc <= 0) { + if(truncated) + *truncated = true; + + // Conversion failed, let's see why... + DWORD status = GetLastError(); + if (status == ERROR_INSUFFICIENT_BUFFER) { + // It cannot fit entirely, let's allocate a new buffer to convert it + // and then truncate it to the destination buffer + + // Clear errno and LastError to clear the error of the + // WideCharToMultiByte() that failed + errno_clear(); + + // Get the required size + int required_size = WideCharToMultiByte(CP_UTF8, 0, src, src_len, NULL, 0, NULL, NULL); + + // mallocz() never fails (exits the program on NULL) + char *tmp = mallocz(required_size * sizeof(char)); + + // Convert it, now it should fit + rc = WideCharToMultiByte(CP_UTF8, 0, src, src_len, tmp, required_size, NULL, NULL); + if (rc <= 0) { + // Conversion failed + *dst = '\0'; + freez(tmp); + return 0; + } + + size_t len = rc; + + // Copy as much as we can + memcpy(dst, tmp, MIN(len, (dst_size - 1)) * sizeof(char)); + + // Null-terminate it + dst[MIN(len, (dst_size - 1))] = '\0'; + + // Free the temporary buffer + freez(tmp); + + // Return the actual bytes written + return MIN(len, dst_size); + } + + // Empty the destination + *dst = '\0'; + return 0; + } + + size_t len = rc; + + if(truncated) + *truncated = false; + + if (len >= dst_size) { + if(dst[dst_size - 1] != '\0') { + if (truncated) + *truncated = true; + + // Truncate it to fit the null terminator + dst[dst_size - 1] = '\0'; + } + return dst_size; + } + + if (dst[len - 1] != '\0') { + // The result is not null-terminated + // Append the null terminator + dst[len] = '\0'; + return len + 1; + } + + // The result is already null-terminated + return len; +} + +// -------------------------------------------------------------------------------------------------------------------- + +size_t txt_compute_new_size(size_t old_size, size_t required_size) { + size_t size = (required_size % 2048 == 0) ? required_size : required_size + 2048; + size = (size / 2048) * 2048; + + if(size < old_size * 2) + size = old_size * 2; + + return size; +} + +// -------------------------------------------------------------------------------------------------------------------- +// TXT_UTF8 + +void txt_utf8_cleanup(TXT_UTF8 *dst) { + freez(dst->data); + dst->data = NULL; + dst->used = 0; +} + +void txt_utf8_resize(TXT_UTF8 *dst, size_t required_size, bool keep) { + if(required_size <= dst->size) + return; + + size_t new_size = txt_compute_new_size(dst->size, required_size); + + if(keep && dst->data) + dst->data = reallocz(dst->data, new_size); + else { + txt_utf8_cleanup(dst); + dst->data = mallocz(new_size); + dst->used = 0; + } + + dst->size = new_size; +} + +void txt_utf8_empty(TXT_UTF8 *dst) { + txt_utf8_resize(dst, 1, false); + dst->data[0] = '\0'; + dst->used = 1; +} + +void txt_utf8_set(TXT_UTF8 *dst, const char *txt, size_t txt_len) { + txt_utf8_resize(dst, txt_len + 1, false); + memcpy(dst->data, txt, txt_len); + dst->used = txt_len + 1; + dst->data[dst->used - 1] = '\0'; +} + +void txt_utf8_append(TXT_UTF8 *dst, const char *txt, size_t txt_len) { + if(dst->used <= 1) { + // the destination is empty + txt_utf8_set(dst, txt, txt_len); + } + else { + // there is something already in the buffer + txt_utf8_resize(dst, dst->used + txt_len, true); + memcpy(&dst->data[dst->used - 1], txt, txt_len); + dst->used += txt_len; // the null was already counted + dst->data[dst->used - 1] = '\0'; + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// TXT_UTF16 + +void txt_utf16_cleanup(TXT_UTF16 *dst) { + freez(dst->data); +} + +void txt_utf16_resize(TXT_UTF16 *dst, size_t required_size, bool keep) { + if(required_size <= dst->size) + return; + + size_t new_size = txt_compute_new_size(dst->size, required_size); + + if (keep && dst->data) { + dst->data = reallocz(dst->data, new_size * sizeof(wchar_t)); + } else { + txt_utf16_cleanup(dst); + dst->data = mallocz(new_size * sizeof(wchar_t)); + dst->used = 0; + } + + dst->size = new_size; +} + +void txt_utf16_set(TXT_UTF16 *dst, const wchar_t *txt, size_t txt_len) { + txt_utf16_resize(dst, dst->used + txt_len + 1, true); + memcpy(dst->data, txt, txt_len * sizeof(wchar_t)); + dst->used = txt_len + 1; + dst->data[dst->used - 1] = '\0'; +} + +void txt_utf16_append(TXT_UTF16 *dst, const wchar_t *txt, size_t txt_len) { + if(dst->used <= 1) { + // the destination is empty + txt_utf16_set(dst, txt, txt_len); + } + else { + // there is something already in the buffer + txt_utf16_resize(dst, dst->used + txt_len, true); + memcpy(&dst->data[dst->used - 1], txt, txt_len * sizeof(wchar_t)); + dst->used += txt_len; // the null was already counted + dst->data[dst->used - 1] = '\0'; + } +} + +// -------------------------------------------------------------------------------------------------------------------- + +bool wchar_to_txt_utf8(TXT_UTF8 *dst, const wchar_t *src, int src_len) { + if(!src || !src_len) { + txt_utf8_empty(dst); + return false; + } + + if(!dst->data && !dst->size) { + size_t size = utf16_to_utf8(NULL, 0, src, src_len, NULL); + if(!size) { + txt_utf8_empty(dst); + return false; + } + + // we +1 here to avoid entering the next condition below + txt_utf8_resize(dst, size, false); + } + + bool truncated = false; + dst->used = utf16_to_utf8(dst->data, dst->size, src, src_len, &truncated); + if(truncated) { + // we need to resize + size_t needed = utf16_to_utf8(NULL, 0, src, src_len, NULL); // find the size needed + if(!needed) { + txt_utf8_empty(dst); + return false; + } + + txt_utf8_resize(dst, needed, false); + dst->used = utf16_to_utf8(dst->data, dst->size, src, src_len, NULL); + } + + // Make sure it is not zero padded at the end + while(dst->used >= 2 && dst->data[dst->used - 2] == 0) + dst->used--; + + internal_fatal(strlen(dst->data) + 1 != dst->used, + "Wrong UTF8 string length"); + + return true; +} + +bool txt_utf16_to_utf8(TXT_UTF8 *utf8, TXT_UTF16 *utf16) { + fatal_assert(utf8 && ((utf8->data && utf8->size) || (!utf8->data && !utf8->size))); + fatal_assert(utf16 && ((utf16->data && utf16->size) || (!utf16->data && !utf16->size))); + + // pass the entire utf16 size, including the null terminator + // so that the resulting utf8 message will be null terminated too. + return wchar_to_txt_utf8(utf8, utf16->data, (int)utf16->used - 1); +} + +char *utf16_to_utf8_strdupz(const wchar_t *src, size_t *dst_len) { + size_t size = utf16_to_utf8(NULL, 0, src, -1, NULL); + if (size) { + char *dst = mallocz(size); + + size = utf16_to_utf8(dst, size, src, -1, NULL); + if(dst_len) + *dst_len = size - 1; + + return dst; + } + + if(dst_len) + *dst_len = 0; + + return NULL; +} + +#endif diff --git a/src/libnetdata/string/utf8.h b/src/libnetdata/string/utf8.h index 3e6c8c288..f27ba5447 100644 --- a/src/libnetdata/string/utf8.h +++ b/src/libnetdata/string/utf8.h @@ -3,7 +3,81 @@ #ifndef NETDATA_STRING_UTF8_H #define NETDATA_STRING_UTF8_H 1 -#define IS_UTF8_BYTE(x) ((x) & 0x80) -#define IS_UTF8_STARTBYTE(x) (IS_UTF8_BYTE(x)&&((x) & 0x40)) +#include "../libnetdata.h" + +#define IS_UTF8_BYTE(x) ((uint8_t)(x) & (uint8_t)0x80) +#define IS_UTF8_STARTBYTE(x) (IS_UTF8_BYTE(x) && ((uint8_t)(x) & (uint8_t)0x40)) + +#ifndef _countof +#define _countof(x) (sizeof(x) / sizeof(*(x))) +#endif + +#if defined(OS_WINDOWS) + +// return an always null terminated wide string, truncate to given size if destination is not big enough, +// src_len can be -1 use all of it. +// returns zero on errors, > 0 otherwise (including the null, even if src is not null terminated). +size_t any_to_utf16(uint32_t CodePage, wchar_t *dst, size_t dst_size, const char *src, int src_len, bool *truncated); + +// always null terminated, truncated if it does not fit, src_len can be -1 to use all of it. +// returns zero on errors, > 0 otherwise (including the null, even if src is not null terminated). +#define utf8_to_utf16(utf16, utf16_count, src, src_len) any_to_utf16(CP_UTF8, utf16, utf16_count, src, src_len, NULL) + +// always null terminated, truncated if it does not fit, src_len can be -1 to use all of it. +// returns zero on errors, > 0 otherwise (including the null, even if src is not null terminated). +size_t utf16_to_utf8(char *dst, size_t dst_size, const wchar_t *src, int src_len, bool *truncated); + +// -------------------------------------------------------------------------------------------------------------------- +// TXT_UTF8 + +typedef enum __attribute__((packed)) { + TXT_SOURCE_UNKNOWN = 0, + TXT_SOURCE_PROVIDER, + TXT_SOURCE_FIELD_CACHE, + TXT_SOURCE_EVENT_LOG, + TXT_SOURCE_HARDCODED, + + // terminator + TXT_SOURCE_MAX, +} TXT_SOURCE; + +typedef struct { + char *data; + uint32_t size; // the allocated size of data buffer + uint32_t used; // the used size of the data buffer (including null terminators, if any) + TXT_SOURCE src; +} TXT_UTF8; + +void txt_utf8_append(TXT_UTF8 *dst, const char *txt, size_t txt_len); +void txt_utf8_set(TXT_UTF8 *dst, const char *txt, size_t txt_len); +void txt_utf8_empty(TXT_UTF8 *dst); +void txt_utf8_resize(TXT_UTF8 *dst, size_t required_size, bool keep); +void txt_utf8_cleanup(TXT_UTF8 *dst); + +// -------------------------------------------------------------------------------------------------------------------- +// TXT_UTF16 + +typedef struct { + wchar_t *data; + uint32_t size; // the allocated size of data buffer + uint32_t used; // the used size of the data buffer (including null terminators, if any) +} TXT_UTF16; + +void txt_utf16_cleanup(TXT_UTF16 *dst); +void txt_utf16_resize(TXT_UTF16 *dst, size_t required_size, bool keep); +void txt_utf16_set(TXT_UTF16 *dst, const wchar_t *txt, size_t txt_len); +void txt_utf16_append(TXT_UTF16 *dst, const wchar_t *txt, size_t txt_len); + +// -------------------------------------------------------------------------------------------------------------------- + +size_t txt_compute_new_size(size_t old_size, size_t required_size); + +bool txt_utf16_to_utf8(TXT_UTF8 *utf8, TXT_UTF16 *utf16); +bool wchar_to_txt_utf8(TXT_UTF8 *dst, const wchar_t *src, int src_len); +char *utf16_to_utf8_strdupz(const wchar_t *src, size_t *dst_len); + +// -------------------------------------------------------------------------------------------------------------------- + +#endif // OS_WINDOWS #endif /* NETDATA_STRING_UTF8_H */ diff --git a/src/libnetdata/template-enum.h b/src/libnetdata/template-enum.h index 393a6a945..2170ee86b 100644 --- a/src/libnetdata/template-enum.h +++ b/src/libnetdata/template-enum.h @@ -37,4 +37,47 @@ return def_str; \ } +// -------------------------------------------------------------------------------------------------------------------- + +#define BITMAP_STR_DEFINE_FUNCTIONS_EXTERN(type) \ + type type ## _2id_one(const char *str); \ + const char *type##_2str_one(type id); \ + const char *type##_2json(BUFFER *wb, const char *key, type id); + +#define BITMAP_STR_DEFINE_FUNCTIONS(type, def, def_str) \ + type type##_2id_one(const char *str) \ + { \ + if (!str || !*str) \ + return def; \ + \ + for (size_t i = 0; type ## _names[i].name; i++) { \ + if (strcmp(type ## _names[i].name, str) == 0) \ + return type ## _names[i].id; \ + } \ + \ + return def; \ + } \ + \ + const char *type##_2str_one(type id) \ + { \ + for (size_t i = 0; type ## _names[i].name; i++) { \ + if (id == type ## _names[i].id) \ + return type ## _names[i].name; \ + } \ + \ + return def_str; \ + } \ + \ + const char *type##_2json(BUFFER *wb, const char *key, type id) \ + { \ + buffer_json_member_add_array(wb, key); \ + for (size_t i = 0; type ## _names[i].name; i++) { \ + if ((id & type ## _names[i].id) == type ## _names[i].id) \ + buffer_json_add_array_item_string(wb, type ## _names[i].name); \ + } \ + buffer_json_array_close(wb); \ + \ + return def_str; \ + } + #endif //NETDATA_TEMPLATE_ENUM_H diff --git a/src/libnetdata/threads/README.md b/src/libnetdata/threads/README.md index 906f47952..adf38be74 100644 --- a/src/libnetdata/threads/README.md +++ b/src/libnetdata/threads/README.md @@ -1,12 +1,3 @@ - - # Threads Netdata uses a custom threads library diff --git a/src/libnetdata/url/README.md b/src/libnetdata/url/README.md index 01a2dddb6..35bdb19b9 100644 --- a/src/libnetdata/url/README.md +++ b/src/libnetdata/url/README.md @@ -1,12 +1,3 @@ - - # URL The URL library contains common functions useful for URLs, like conversion from/to hex, diff --git a/src/libnetdata/uuid/README.md b/src/libnetdata/uuid/README.md index a0da380a9..6bc1c7d27 100644 --- a/src/libnetdata/uuid/README.md +++ b/src/libnetdata/uuid/README.md @@ -1,11 +1,3 @@ - - # UUID Netdata uses libuuid for managing UUIDs. diff --git a/src/libnetdata/uuid/uuid.h b/src/libnetdata/uuid/uuid.h index cde457616..5fb1bce68 100644 --- a/src/libnetdata/uuid/uuid.h +++ b/src/libnetdata/uuid/uuid.h @@ -37,6 +37,8 @@ ND_UUID UUID_generate_from_hash(const void *payload, size_t payload_len); #define UUIDeq(a, b) ((a).parts.hig64 == (b).parts.hig64 && (a).parts.low64 == (b).parts.low64) +#define UUIDiszero(a) ((a).parts.hig64 == 0 && (a).parts.low64 == 0) + static inline ND_UUID uuid2UUID(const nd_uuid_t uu1) { // uu1 may not be aligned, so copy it to the output ND_UUID copy; diff --git a/src/libnetdata/worker_utilization/README.md b/src/libnetdata/worker_utilization/README.md index 1a354376c..17dd85e3e 100644 --- a/src/libnetdata/worker_utilization/README.md +++ b/src/libnetdata/worker_utilization/README.md @@ -1,12 +1,3 @@ - - # Worker Utilization This library is to be used when there are 1 or more worker threads accepting requests diff --git a/src/libnetdata/xxHash/xxhash.h b/src/libnetdata/xxHash/xxhash.h new file mode 100644 index 000000000..5e2c0ed24 --- /dev/null +++ b/src/libnetdata/xxHash/xxhash.h @@ -0,0 +1,6773 @@ +/* + * xxHash - Extremely Fast Hash algorithm + * Header File + * Copyright (C) 2012-2023 Yann Collet + * + * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at: + * - xxHash homepage: https://www.xxhash.com + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + +/*! + * @mainpage xxHash + * + * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed + * limits. + * + * It is proposed in four flavors, in three families: + * 1. @ref XXH32_family + * - Classic 32-bit hash function. Simple, compact, and runs on almost all + * 32-bit and 64-bit systems. + * 2. @ref XXH64_family + * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most + * 64-bit systems (but _not_ 32-bit systems). + * 3. @ref XXH3_family + * - Modern 64-bit and 128-bit hash function family which features improved + * strength and performance across the board, especially on smaller data. + * It benefits greatly from SIMD and 64-bit without requiring it. + * + * Benchmarks + * --- + * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04. + * The open source benchmark program is compiled with clang v10.0 using -O3 flag. + * + * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity | + * | -------------------- | ------- | ----: | ---------------: | ------------------: | + * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 | + * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 | + * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 | + * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 | + * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 | + * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 | + * | RAM sequential read | | N/A | 28.0 GB/s | N/A | + * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 | + * | City64 | | 64 | 22.0 GB/s | 76.6 | + * | T1ha2 | | 64 | 22.0 GB/s | 99.0 | + * | City128 | | 128 | 21.7 GB/s | 57.7 | + * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 | + * | XXH64() | | 64 | 19.4 GB/s | 71.0 | + * | SpookyHash | | 64 | 19.3 GB/s | 53.2 | + * | Mum | | 64 | 18.0 GB/s | 67.0 | + * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 | + * | XXH32() | | 32 | 9.7 GB/s | 71.9 | + * | City32 | | 32 | 9.1 GB/s | 66.0 | + * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 | + * | Murmur3 | | 32 | 3.9 GB/s | 56.1 | + * | SipHash* | | 64 | 3.0 GB/s | 43.2 | + * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 | + * | HighwayHash | | 64 | 1.4 GB/s | 6.0 | + * | FNV64 | | 64 | 1.2 GB/s | 62.7 | + * | Blake2* | | 256 | 1.1 GB/s | 5.1 | + * | SHA1* | | 160 | 0.8 GB/s | 5.6 | + * | MD5* | | 128 | 0.6 GB/s | 7.8 | + * @note + * - Hashes which require a specific ISA extension are noted. SSE2 is also noted, + * even though it is mandatory on x64. + * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic + * by modern standards. + * - Small data velocity is a rough average of algorithm's efficiency for small + * data. For more accurate information, see the wiki. + * - More benchmarks and strength tests are found on the wiki: + * https://github.com/Cyan4973/xxHash/wiki + * + * Usage + * ------ + * All xxHash variants use a similar API. Changing the algorithm is a trivial + * substitution. + * + * @pre + * For functions which take an input and length parameter, the following + * requirements are assumed: + * - The range from [`input`, `input + length`) is valid, readable memory. + * - The only exception is if the `length` is `0`, `input` may be `NULL`. + * - For C++, the objects must have the *TriviallyCopyable* property, as the + * functions access bytes directly as if it was an array of `unsigned char`. + * + * @anchor single_shot_example + * **Single Shot** + * + * These functions are stateless functions which hash a contiguous block of memory, + * immediately returning the result. They are the easiest and usually the fastest + * option. + * + * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits() + * + * @code{.c} + * #include + * #include "xxhash.h" + * + * // Example for a function which hashes a null terminated string with XXH32(). + * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed) + * { + * // NULL pointers are only valid if the length is zero + * size_t length = (string == NULL) ? 0 : strlen(string); + * return XXH32(string, length, seed); + * } + * @endcode + * + * @anchor streaming_example + * **Streaming** + * + * These groups of functions allow incremental hashing of unknown size, even + * more than what would fit in a size_t. + * + * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset() + * + * @code{.c} + * #include + * #include + * #include "xxhash.h" + * // Example for a function which hashes a FILE incrementally with XXH3_64bits(). + * XXH64_hash_t hashFile(FILE* f) + * { + * // Allocate a state struct. Do not just use malloc() or new. + * XXH3_state_t* state = XXH3_createState(); + * assert(state != NULL && "Out of memory!"); + * // Reset the state to start a new hashing session. + * XXH3_64bits_reset(state); + * char buffer[4096]; + * size_t count; + * // Read the file in chunks + * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) { + * // Run update() as many times as necessary to process the data + * XXH3_64bits_update(state, buffer, count); + * } + * // Retrieve the finalized hash. This will not change the state. + * XXH64_hash_t result = XXH3_64bits_digest(state); + * // Free the state. Do not use free(). + * XXH3_freeState(state); + * return result; + * } + * @endcode + * + * @file xxhash.h + * xxHash prototypes and implementation + */ + +#if defined (__cplusplus) +extern "C" { +#endif + +/* **************************** + * INLINE mode + ******************************/ +/*! + * @defgroup public Public API + * Contains details on the public xxHash functions. + * @{ + */ +#ifdef XXH_DOXYGEN +/*! + * @brief Gives access to internal state declaration, required for static allocation. + * + * Incompatible with dynamic linking, due to risks of ABI changes. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #include "xxhash.h" + * @endcode + */ +# define XXH_STATIC_LINKING_ONLY +/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */ + +/*! + * @brief Gives access to internal definitions. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #define XXH_IMPLEMENTATION + * #include "xxhash.h" + * @endcode + */ +# define XXH_IMPLEMENTATION +/* Do not undef XXH_IMPLEMENTATION for Doxygen */ + +/*! + * @brief Exposes the implementation and marks all functions as `inline`. + * + * Use these build macros to inline xxhash into the target unit. + * Inlining improves performance on small inputs, especially when the length is + * expressed as a compile-time constant: + * + * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html + * + * It also keeps xxHash symbols private to the unit, so they are not exported. + * + * Usage: + * @code{.c} + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * @endcode + * Do not compile and link xxhash.o as a separate object, as it is not useful. + */ +# define XXH_INLINE_ALL +# undef XXH_INLINE_ALL +/*! + * @brief Exposes the implementation without marking functions as inline. + */ +# define XXH_PRIVATE_API +# undef XXH_PRIVATE_API +/*! + * @brief Emulate a namespace by transparently prefixing all symbols. + * + * If you want to include _and expose_ xxHash functions from within your own + * library, but also want to avoid symbol collisions with other libraries which + * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix + * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE + * (therefore, avoid empty or numeric values). + * + * Note that no change is required within the calling program as long as it + * includes `xxhash.h`: Regular symbol names will be automatically translated + * by this header. + */ +# define XXH_NAMESPACE /* YOUR NAME HERE */ +# undef XXH_NAMESPACE +#endif + +#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ + && !defined(XXH_INLINE_ALL_31684351384) + /* this section should be traversed only once */ +# define XXH_INLINE_ALL_31684351384 + /* give access to the advanced API, required to compile implementations */ +# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ +# define XXH_STATIC_LINKING_ONLY + /* make all functions private */ +# undef XXH_PUBLIC_API +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else + /* note: this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static +# endif + + /* + * This part deals with the special case where a unit wants to inline xxHash, + * but "xxhash.h" has previously been included without XXH_INLINE_ALL, + * such as part of some previously included *.h header file. + * Without further action, the new include would just be ignored, + * and functions would effectively _not_ be inlined (silent failure). + * The following macros solve this situation by prefixing all inlined names, + * avoiding naming collision with previous inclusions. + */ + /* Before that, we unconditionally #undef all symbols, + * in case they were already defined with XXH_NAMESPACE. + * They will then be redefined for XXH_INLINE_ALL + */ +# undef XXH_versionNumber + /* XXH32 */ +# undef XXH32 +# undef XXH32_createState +# undef XXH32_freeState +# undef XXH32_reset +# undef XXH32_update +# undef XXH32_digest +# undef XXH32_copyState +# undef XXH32_canonicalFromHash +# undef XXH32_hashFromCanonical + /* XXH64 */ +# undef XXH64 +# undef XXH64_createState +# undef XXH64_freeState +# undef XXH64_reset +# undef XXH64_update +# undef XXH64_digest +# undef XXH64_copyState +# undef XXH64_canonicalFromHash +# undef XXH64_hashFromCanonical + /* XXH3_64bits */ +# undef XXH3_64bits +# undef XXH3_64bits_withSecret +# undef XXH3_64bits_withSeed +# undef XXH3_64bits_withSecretandSeed +# undef XXH3_createState +# undef XXH3_freeState +# undef XXH3_copyState +# undef XXH3_64bits_reset +# undef XXH3_64bits_reset_withSeed +# undef XXH3_64bits_reset_withSecret +# undef XXH3_64bits_update +# undef XXH3_64bits_digest +# undef XXH3_generateSecret + /* XXH3_128bits */ +# undef XXH128 +# undef XXH3_128bits +# undef XXH3_128bits_withSeed +# undef XXH3_128bits_withSecret +# undef XXH3_128bits_reset +# undef XXH3_128bits_reset_withSeed +# undef XXH3_128bits_reset_withSecret +# undef XXH3_128bits_reset_withSecretandSeed +# undef XXH3_128bits_update +# undef XXH3_128bits_digest +# undef XXH128_isEqual +# undef XXH128_cmp +# undef XXH128_canonicalFromHash +# undef XXH128_hashFromCanonical + /* Finally, free the namespace itself */ +# undef XXH_NAMESPACE + + /* employ the namespace for XXH_INLINE_ALL */ +# define XXH_NAMESPACE XXH_INLINE_ + /* + * Some identifiers (enums, type names) are not symbols, + * but they must nonetheless be renamed to avoid redeclaration. + * Alternative solution: do not redeclare them. + * However, this requires some #ifdefs, and has a more dispersed impact. + * Meanwhile, renaming can be achieved in a single place. + */ +# define XXH_IPREF(Id) XXH_NAMESPACE ## Id +# define XXH_OK XXH_IPREF(XXH_OK) +# define XXH_ERROR XXH_IPREF(XXH_ERROR) +# define XXH_errorcode XXH_IPREF(XXH_errorcode) +# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) +# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) +# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) +# define XXH32_state_s XXH_IPREF(XXH32_state_s) +# define XXH32_state_t XXH_IPREF(XXH32_state_t) +# define XXH64_state_s XXH_IPREF(XXH64_state_s) +# define XXH64_state_t XXH_IPREF(XXH64_state_t) +# define XXH3_state_s XXH_IPREF(XXH3_state_s) +# define XXH3_state_t XXH_IPREF(XXH3_state_t) +# define XXH128_hash_t XXH_IPREF(XXH128_hash_t) + /* Ensure the header is parsed again, even if it was previously included */ +# undef XXHASH_H_5627135585666179 +# undef XXHASH_H_STATIC_13879238742 +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ + +/* **************************************************************** + * Stable API + *****************************************************************/ +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + +/*! @brief Marks a global symbol. */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif + +#ifdef XXH_NAMESPACE +# define XXH_CAT(A,B) A##B +# define XXH_NAME2(A,B) XXH_CAT(A,B) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +/* XXH32 */ +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +/* XXH64 */ +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +/* XXH3_64bits */ +# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) +# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) +# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) +# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) +# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) +# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) +# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) +# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) +# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) +# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) +# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) +# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) +# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) +# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) +# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) +/* XXH3_128bits */ +# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) +# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) +# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) +# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) +# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) +# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) +# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) +# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) +# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) +# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) +# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) +# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) +# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) +# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) +# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) +#endif + + +/* ************************************* +* Compiler specifics +***************************************/ + +/* specific declaration modes for Windows */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif + +#if defined (__GNUC__) +# define XXH_CONSTF __attribute__((const)) +# define XXH_PUREF __attribute__((pure)) +# define XXH_MALLOCF __attribute__((malloc)) +#else +# define XXH_CONSTF /* disable */ +# define XXH_PUREF +# define XXH_MALLOCF +#endif + +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 8 +#define XXH_VERSION_RELEASE 2 +/*! @brief Version number, encoded as two digits each */ +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) + +/*! + * @brief Obtains the xxHash version. + * + * This is mostly useful when xxHash is compiled as a shared library, + * since the returned value comes from the library, as opposed to header file. + * + * @return @ref XXH_VERSION_NUMBER of the invoked library. + */ +XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void); + + +/* **************************** +* Common basic types +******************************/ +#include /* size_t */ +/*! + * @brief Exit code for the streaming API. + */ +typedef enum { + XXH_OK = 0, /*!< OK */ + XXH_ERROR /*!< Error */ +} XXH_errorcode; + + +/*-********************************************************************** +* 32-bit hash +************************************************************************/ +#if defined(XXH_DOXYGEN) /* Don't show include */ +/*! + * @brief An unsigned 32-bit integer. + * + * Not necessarily defined to `uint32_t` but functionally equivalent. + */ +typedef uint32_t XXH32_hash_t; + +#elif !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint32_t XXH32_hash_t; + +#else +# include +# if UINT_MAX == 0xFFFFFFFFUL + typedef unsigned int XXH32_hash_t; +# elif ULONG_MAX == 0xFFFFFFFFUL + typedef unsigned long XXH32_hash_t; +# else +# error "unsupported platform: need a 32-bit type" +# endif +#endif + +/*! + * @} + * + * @defgroup XXH32_family XXH32 family + * @ingroup public + * Contains functions used in the classic 32-bit xxHash algorithm. + * + * @note + * XXH32 is useful for older platforms, with no or poor 64-bit performance. + * Note that the @ref XXH3_family provides competitive speed for both 32-bit + * and 64-bit systems, and offers true 64/128 bit hash results. + * + * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families + * @see @ref XXH32_impl for implementation details + * @{ + */ + +/*! + * @brief Calculates the 32-bit hash of @p input using xxHash32. + * + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s + * + * See @ref single_shot_example "Single Shot Example" for an example. + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 32-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 32-bit hash value. + * + * @see + * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): + * Direct equivalents for the other variants of xxHash. + * @see + * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); + +#ifndef XXH_NO_STREAM +/*! + * Streaming functions generate the xxHash value from an incremental input. + * This method is slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * An XXH state must first be allocated using `XXH*_createState()`. + * + * Start a new hash by initializing the state with a seed using `XXH*_reset()`. + * + * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. + * + * The function returns an error code, with 0 meaning OK, and any other value + * meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a + * digest, and generate new hash values later on by invoking `XXH*_digest()`. + * + * When done, release the state using `XXH*_freeState()`. + * + * @see streaming_example at the top of @ref xxhash.h for an example. + */ + +/*! + * @typedef struct XXH32_state_s XXH32_state_t + * @brief The opaque state struct for the XXH32 streaming API. + * + * @see XXH32_state_s for details. + */ +typedef struct XXH32_state_s XXH32_state_t; + +/*! + * @brief Allocates an @ref XXH32_state_t. + * + * Must be freed with XXH32_freeState(). + * @return An allocated XXH32_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void); +/*! + * @brief Frees an @ref XXH32_state_t. + * + * Must be allocated with XXH32_createState(). + * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +/*! + * @brief Copies one @ref XXH32_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); + +/*! + * @brief Resets an @ref XXH32_state_t to begin a new hash. + * + * This function resets and seeds a state. Call it before @ref XXH32_update(). + * + * @param statePtr The state struct to reset. + * @param seed The 32-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); + +/*! + * @brief Consumes a block of @p input to an @ref XXH32_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH32_state_t. + * + * @note + * Calling XXH32_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated xxHash32 value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/* + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * This the simplest and fastest format for further post-processing. + * + * However, this leaves open the question of what is the order on the byte level, + * since little and big endian conventions will store the same number differently. + * + * The canonical representation settles this issue by mandating big-endian + * convention, the same convention as human-readable numbers (large digits first). + * + * When writing hash values to storage, sending them over a network, or printing + * them, it's highly recommended to use the canonical representation to ensure + * portability across a wider range of systems, present and future. + * + * The following functions allow transformation of hash values to and from + * canonical format. + */ + +/*! + * @brief Canonical (big endian) representation of @ref XXH32_hash_t. + */ +typedef struct { + unsigned char digest[4]; /*!< Hash bytes, big endian */ +} XXH32_canonical_t; + +/*! + * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. + * + * @param dst The @ref XXH32_canonical_t pointer to be stored to. + * @param hash The @ref XXH32_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); + +/*! + * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t. + * + * @param src The @ref XXH32_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); + + +/*! @cond Doxygen ignores this part */ +#ifdef __has_attribute +# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) +#else +# define XXH_HAS_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* + * C23 __STDC_VERSION__ number hasn't been specified yet. For now + * leave as `201711L` (C17 + 1). + * TODO: Update to correct value when its been specified. + */ +#define XXH_C23_VN 201711L +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* C-language Attributes are added in C23. */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute) +# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) +#else +# define XXH_HAS_C_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +#if defined(__cplusplus) && defined(__has_cpp_attribute) +# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +# define XXH_HAS_CPP_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* + * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute + * introduced in CPP17 and C23. + * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough + * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough + */ +#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough) +# define XXH_FALLTHROUGH [[fallthrough]] +#elif XXH_HAS_ATTRIBUTE(__fallthrough__) +# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__)) +#else +# define XXH_FALLTHROUGH /* fallthrough */ +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* + * Define XXH_NOESCAPE for annotated pointers in public API. + * https://clang.llvm.org/docs/AttributeReference.html#noescape + * As of writing this, only supported by clang. + */ +#if XXH_HAS_ATTRIBUTE(noescape) +# define XXH_NOESCAPE __attribute__((noescape)) +#else +# define XXH_NOESCAPE +#endif +/*! @endcond */ + + +/*! + * @} + * @ingroup public + * @{ + */ + +#ifndef XXH_NO_LONG_LONG +/*-********************************************************************** +* 64-bit hash +************************************************************************/ +#if defined(XXH_DOXYGEN) /* don't include */ +/*! + * @brief An unsigned 64-bit integer. + * + * Not necessarily defined to `uint64_t` but functionally equivalent. + */ +typedef uint64_t XXH64_hash_t; +#elif !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint64_t XXH64_hash_t; +#else +# include +# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL + /* LP64 ABI says uint64_t is unsigned long */ + typedef unsigned long XXH64_hash_t; +# else + /* the following type must have a width of 64-bit */ + typedef unsigned long long XXH64_hash_t; +# endif +#endif + +/*! + * @} + * + * @defgroup XXH64_family XXH64 family + * @ingroup public + * @{ + * Contains functions used in the classic 64-bit xxHash algorithm. + * + * @note + * XXH3 provides competitive speed for both 32-bit and 64-bit systems, + * and offers true 64/128 bit hash results. + * It provides better speed for systems with vector processing capabilities. + */ + +/*! + * @brief Calculates the 64-bit hash of @p input using xxHash64. + * + * This function usually runs faster on 64-bit systems, but slower on 32-bit + * systems (see benchmark). + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 64-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit hash. + * + * @see + * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): + * Direct equivalents for the other variants of xxHash. + * @see + * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/*! + * @brief The opaque state struct for the XXH64 streaming API. + * + * @see XXH64_state_s for details. + */ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ + +/*! + * @brief Allocates an @ref XXH64_state_t. + * + * Must be freed with XXH64_freeState(). + * @return An allocated XXH64_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void); + +/*! + * @brief Frees an @ref XXH64_state_t. + * + * Must be allocated with XXH64_createState(). + * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); + +/*! + * @brief Copies one @ref XXH64_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state); + +/*! + * @brief Resets an @ref XXH64_state_t to begin a new hash. + * + * This function resets and seeds a state. Call it before @ref XXH64_update(). + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed); + +/*! + * @brief Consumes a block of @p input to an @ref XXH64_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH64_state_t. + * + * @note + * Calling XXH64_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated xxHash64 value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ +/******* Canonical representation *******/ + +/*! + * @brief Canonical (big endian) representation of @ref XXH64_hash_t. + */ +typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; + +/*! + * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t. + * + * @param dst The @ref XXH64_canonical_t pointer to be stored to. + * @param hash The @ref XXH64_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash); + +/*! + * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t. + * + * @param src The @ref XXH64_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src); + +#ifndef XXH_NO_XXH3 + +/*! + * @} + * ************************************************************************ + * @defgroup XXH3_family XXH3 family + * @ingroup public + * @{ + * + * XXH3 is a more recent hash algorithm featuring: + * - Improved speed for both small and large inputs + * - True 64-bit and 128-bit outputs + * - SIMD acceleration + * - Improved 32-bit viability + * + * Speed analysis methodology is explained here: + * + * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html + * + * Compared to XXH64, expect XXH3 to run approximately + * ~2x faster on large inputs and >3x faster on small ones, + * exact differences vary depending on platform. + * + * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic, + * but does not require it. + * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3 + * at competitive speeds, even without vector support. Further details are + * explained in the implementation. + * + * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD + * implementations for many common platforms: + * - AVX512 + * - AVX2 + * - SSE2 + * - ARM NEON + * - WebAssembly SIMD128 + * - POWER8 VSX + * - s390x ZVector + * This can be controlled via the @ref XXH_VECTOR macro, but it automatically + * selects the best version according to predefined macros. For the x86 family, an + * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c. + * + * XXH3 implementation is portable: + * it has a generic C90 formulation that can be compiled on any platform, + * all implementations generate exactly the same hash value on all platforms. + * Starting from v0.8.0, it's also labelled "stable", meaning that + * any future version will also generate the same hash value. + * + * XXH3 offers 2 variants, _64bits and _128bits. + * + * When only 64 bits are needed, prefer invoking the _64bits variant, as it + * reduces the amount of mixing, resulting in faster speed on small inputs. + * It's also generally simpler to manipulate a scalar return type than a struct. + * + * The API supports one-shot hashing, streaming mode, and custom secrets. + */ +/*-********************************************************************** +* XXH3 64-bit variant +************************************************************************/ + +/*! + * @brief 64-bit unseeded variant of XXH3. + * + * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of 0, however + * it may have slightly better performance due to constant propagation of the + * defaults. + * + * @see + * XXH32(), XXH64(), XXH3_128bits(): equivalent for the other xxHash algorithms + * @see + * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants + * @see + * XXH3_64bits_reset(), XXH3_64bits_update(), XXH3_64bits_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief 64-bit seeded variant of XXH3 + * + * This variant generates a custom secret on the fly based on default secret + * altered using the `seed` value. + * + * While this operation is decently fast, note that it's not completely free. + * + * @note + * seed == 0 produces the same results as @ref XXH3_64bits(). + * + * @param input The data to hash + * @param length The length + * @param seed The 64-bit seed to alter the state. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); + +/*! + * The bare minimum size for a custom secret. + * + * @see + * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), + * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). + */ +#define XXH3_SECRET_SIZE_MIN 136 + +/*! + * @brief 64-bit variant of XXH3 with a custom "secret". + * + * It's possible to provide any blob of bytes as a "secret" to generate the hash. + * This makes it more difficult for an external actor to prepare an intentional collision. + * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN). + * However, the quality of the secret impacts the dispersion of the hash algorithm. + * Therefore, the secret _must_ look like a bunch of random bytes. + * Avoid "trivial" or structured data such as repeated sequences or a text document. + * Whenever in doubt about the "randomness" of the blob of bytes, + * consider employing "XXH3_generateSecret()" instead (see below). + * It will generate a proper high entropy secret derived from the blob of bytes. + * Another advantage of using XXH3_generateSecret() is that + * it guarantees that all bits within the initial blob of bytes + * will impact every bit of the output. + * This is not necessarily the case when using the blob of bytes directly + * because, when hashing _small_ inputs, only a portion of the secret is employed. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); + + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + */ + +/*! + * @brief The state struct for the XXH3 streaming API. + * + * @see XXH3_state_s for details. + */ +typedef struct XXH3_state_s XXH3_state_t; +XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); + +/*! + * @brief Copies one @ref XXH3_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state); + +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_64bits_update(). + * Digest will be equivalent to `XXH3_64bits()`. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_64bits_update(). + * Digest will be equivalent to `XXH3_64bits_withSeed()`. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the state. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); + +/*! + * XXH3_64bits_reset_withSecret(): + * `secret` is referenced, it _must outlive_ the hash streaming session. + * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`, + * and the quality of produced hash values depends on secret's entropy + * (secret's content should look like a bunch of random bytes). + * When in doubt about the randomness of a candidate `secret`, + * consider employing `XXH3_generateSecret()` instead (see below). + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t. + * + * @note + * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 64-bit hash value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/* note : canonical representation of XXH3 is the same as XXH64 + * since they both produce XXH64_hash_t values */ + + +/*-********************************************************************** +* XXH3 128-bit variant +************************************************************************/ + +/*! + * @brief The return value from 128-bit hashes. + * + * Stored in little endian order, although the fields themselves are in native + * endianness. + */ +typedef struct { + XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */ + XXH64_hash_t high64; /*!< `value >> 64` */ +} XXH128_hash_t; + +/*! + * @brief Unseeded 128-bit variant of XXH3 + * + * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead + * for shorter inputs. + * + * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of 0, however + * it may have slightly better performance due to constant propagation of the + * defaults. + * + * @see + * XXH32(), XXH64(), XXH3_64bits(): equivalent for the other xxHash algorithms + * @see + * XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants + * @see + * XXH3_128bits_reset(), XXH3_128bits_update(), XXH3_128bits_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len); +/*! @brief Seeded 128-bit variant of XXH3. @see XXH3_64bits_withSeed(). */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); +/*! @brief Custom secret 128-bit variant of XXH3. @see XXH3_64bits_withSecret(). */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + * + * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits(). + * Use already declared XXH3_createState() and XXH3_freeState(). + * + * All reset and streaming functions have same meaning as their 64-bit counterpart. + */ + +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_128bits_update(). + * Digest will be equivalent to `XXH3_128bits()`. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_128bits_update(). + * Digest will be equivalent to `XXH3_128bits_withSeed()`. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the state. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); +/*! @brief Custom secret 128-bit variant of XXH3. @see XXH_64bits_reset_withSecret(). */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t. + * + * @note + * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 128-bit hash value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/* Following helper functions make it possible to compare XXH128_hast_t values. + * Since XXH128_hash_t is a structure, this capability is not offered by the language. + * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ + +/*! + * XXH128_isEqual(): + * Return: 1 if `h1` and `h2` are equal, 0 if they are not. + */ +XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); + +/*! + * @brief Compares two @ref XXH128_hash_t + * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. + * + * @return: >0 if *h128_1 > *h128_2 + * =0 if *h128_1 == *h128_2 + * <0 if *h128_1 < *h128_2 + */ +XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2); + + +/******* Canonical representation *******/ +typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; + + +/*! + * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t. + * + * @param dst The @ref XXH128_canonical_t pointer to be stored to. + * @param hash The @ref XXH128_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash); + +/*! + * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t. + * + * @param src The @ref XXH128_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src); + + +#endif /* !XXH_NO_XXH3 */ +#endif /* XXH_NO_LONG_LONG */ + +/*! + * @} + */ +#endif /* XXHASH_H_5627135585666179 */ + + + +#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) +#define XXHASH_H_STATIC_13879238742 +/* **************************************************************************** + * This section contains declarations which are not guaranteed to remain stable. + * They may change in future versions, becoming incompatible with a different + * version of the library. + * These declarations should only be used with static linking. + * Never use them in association with dynamic linking! + ***************************************************************************** */ + +/* + * These definitions are only present to allow static allocation + * of XXH states, on stack or in a struct, for example. + * Never **ever** access their members directly. + */ + +/*! + * @internal + * @brief Structure for XXH32 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH32_state_t. + * Do not access the members of this struct directly. + * @see XXH64_state_s, XXH3_state_s + */ +struct XXH32_state_s { + XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ + XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ + XXH32_hash_t v[4]; /*!< Accumulator lanes */ + XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ + XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */ +}; /* typedef'd to XXH32_state_t */ + + +#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ + +/*! + * @internal + * @brief Structure for XXH64 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH64_state_t. + * Do not access the members of this struct directly. + * @see XXH32_state_s, XXH3_state_s + */ +struct XXH64_state_s { + XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ + XXH64_hash_t v[4]; /*!< Accumulator lanes */ + XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ + XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ + XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ +}; /* typedef'd to XXH64_state_t */ + +#ifndef XXH_NO_XXH3 + +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */ +# include +# define XXH_ALIGN(n) alignas(n) +#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ +/* In C++ alignas() is a keyword */ +# define XXH_ALIGN(n) alignas(n) +#elif defined(__GNUC__) +# define XXH_ALIGN(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +# define XXH_ALIGN(n) __declspec(align(n)) +#else +# define XXH_ALIGN(n) /* disabled */ +#endif + +/* Old GCC versions only accept the attribute after the type in structures. */ +#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ + && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ + && defined(__GNUC__) +# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) +#else +# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type +#endif + +/*! + * @brief The size of the internal XXH3 buffer. + * + * This is the optimal update size for incremental hashing. + * + * @see XXH3_64b_update(), XXH3_128b_update(). + */ +#define XXH3_INTERNALBUFFER_SIZE 256 + +/*! + * @internal + * @brief Default size of the secret buffer (and @ref XXH3_kSecret). + * + * This is the size used in @ref XXH3_kSecret and the seeded functions. + * + * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. + */ +#define XXH3_SECRET_DEFAULT_SIZE 192 + +/*! + * @internal + * @brief Structure for XXH3 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. + * Otherwise it is an opaque type. + * Never use this definition in combination with dynamic library. + * This allows fields to safely be changed in the future. + * + * @note ** This structure has a strict alignment requirement of 64 bytes!! ** + * Do not allocate this with `malloc()` or `new`, + * it will not be sufficiently aligned. + * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation. + * + * Typedef'd to @ref XXH3_state_t. + * Do never access the members of this struct directly. + * + * @see XXH3_INITSTATE() for stack initialization. + * @see XXH3_createState(), XXH3_freeState(). + * @see XXH32_state_s, XXH64_state_s + */ +struct XXH3_state_s { + XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); + /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */ + XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); + /*!< Used to store a custom secret generated from a seed. */ + XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); + /*!< The internal buffer. @see XXH32_state_s::mem32 */ + XXH32_hash_t bufferedSize; + /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ + XXH32_hash_t useSeed; + /*!< Reserved field. Needed for padding on 64-bit. */ + size_t nbStripesSoFar; + /*!< Number or stripes processed. */ + XXH64_hash_t totalLen; + /*!< Total length hashed. 64-bit even on 32-bit targets. */ + size_t nbStripesPerBlock; + /*!< Number of stripes per block. */ + size_t secretLimit; + /*!< Size of @ref customSecret or @ref extSecret */ + XXH64_hash_t seed; + /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */ + XXH64_hash_t reserved64; + /*!< Reserved field. */ + const unsigned char* extSecret; + /*!< Reference to an external secret for the _withSecret variants, NULL + * for other variants. */ + /* note: there may be some padding at the end due to alignment on 64 bytes */ +}; /* typedef'd to XXH3_state_t */ + +#undef XXH_ALIGN_MEMBER + +/*! + * @brief Initializes a stack-allocated `XXH3_state_s`. + * + * When the @ref XXH3_state_t structure is merely emplaced on stack, + * it should be initialized with XXH3_INITSTATE() or a memset() + * in case its first reset uses XXH3_NNbits_reset_withSeed(). + * This init can be omitted if the first reset uses default or _withSecret mode. + * This operation isn't necessary when the state is created with XXH3_createState(). + * Note that this doesn't prepare the state for a streaming operation, + * it's still necessary to use XXH3_NNbits_reset*() afterwards. + */ +#define XXH3_INITSTATE(XXH3_state_ptr) \ + do { \ + XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \ + tmp_xxh3_state_ptr->seed = 0; \ + tmp_xxh3_state_ptr->extSecret = NULL; \ + } while(0) + + +/*! + * simple alias to pre-selected XXH3_128bits variant + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); + + +/* === Experimental API === */ +/* Symbols defined below must be considered tied to a specific library version. */ + +/*! + * XXH3_generateSecret(): + * + * Derive a high-entropy secret from any user-defined content, named customSeed. + * The generated secret can be used in combination with `*_withSecret()` functions. + * The `_withSecret()` variants are useful to provide a higher level of protection + * than 64-bit seed, as it becomes much more difficult for an external actor to + * guess how to impact the calculation logic. + * + * The function accepts as input a custom seed of any length and any content, + * and derives from it a high-entropy secret of length @p secretSize into an + * already allocated buffer @p secretBuffer. + * + * The generated secret can then be used with any `*_withSecret()` variant. + * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(), + * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret() + * are part of this list. They all accept a `secret` parameter + * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN) + * _and_ feature very high entropy (consist of random-looking bytes). + * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can + * be employed to ensure proper quality. + * + * @p customSeed can be anything. It can have any size, even small ones, + * and its content can be anything, even "poor entropy" sources such as a bunch + * of zeroes. The resulting `secret` will nonetheless provide all required qualities. + * + * @pre + * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN + * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior. + * + * Example code: + * @code{.c} + * #include + * #include + * #include + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Hashes argv[2] using the entropy from argv[1]. + * int main(int argc, char* argv[]) + * { + * char secret[XXH3_SECRET_SIZE_MIN]; + * if (argv != 3) { return 1; } + * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1])); + * XXH64_hash_t h = XXH3_64bits_withSecret( + * argv[2], strlen(argv[2]), + * secret, sizeof(secret) + * ); + * printf("%016llx\n", (unsigned long long) h); + * } + * @endcode + */ +XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize); + +/*! + * @brief Generate the same secret as the _withSeed() variants. + * + * The generated secret can be used in combination with + *`*_withSecret()` and `_withSecretandSeed()` variants. + * + * Example C++ `std::string` hash class: + * @code{.cpp} + * #include + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Slow, seeds each time + * class HashSlow { + * XXH64_hash_t seed; + * public: + * HashSlow(XXH64_hash_t s) : seed{s} {} + * size_t operator()(const std::string& x) const { + * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)}; + * } + * }; + * // Fast, caches the seeded secret for future uses. + * class HashFast { + * unsigned char secret[XXH3_SECRET_SIZE_MIN]; + * public: + * HashFast(XXH64_hash_t s) { + * XXH3_generateSecret_fromSeed(secret, seed); + * } + * size_t operator()(const std::string& x) const { + * return size_t{ + * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret)) + * }; + * } + * }; + * @endcode + * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes + * @param seed The seed to seed the state. + */ +XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed); + +/*! + * These variants generate hash values using either + * @p seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes) + * or @p secret for "large" keys (>= XXH3_MIDSIZE_MAX). + * + * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`. + * `_withSeed()` has to generate the secret on the fly for "large" keys. + * It's fast, but can be perceptible for "not so large" keys (< 1 KB). + * `_withSecret()` has to generate the masks on the fly for "small" keys, + * which requires more instructions than _withSeed() variants. + * Therefore, _withSecretandSeed variant combines the best of both worlds. + * + * When @p secret has been generated by XXH3_generateSecret_fromSeed(), + * this variant produces *exactly* the same results as `_withSeed()` variant, + * hence offering only a pure speed benefit on "large" input, + * by skipping the need to regenerate the secret for every large input. + * + * Another usage scenario is to hash the secret to a 64-bit hash value, + * for example with XXH3_64bits(), which then becomes the seed, + * and then employ both the seed and the secret in _withSecretandSeed(). + * On top of speed, an added benefit is that each bit in the secret + * has a 50% chance to swap each bit in the output, via its impact to the seed. + * + * This is not guaranteed when using the secret directly in "small data" scenarios, + * because only portions of the secret are employed for small data. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed); +/*! @copydoc XXH3_64bits_withSecretandSeed() */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +#ifndef XXH_NO_STREAM +/*! @copydoc XXH3_64bits_withSecretandSeed() */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +/*! @copydoc XXH3_64bits_withSecretandSeed() */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +#endif /* !XXH_NO_STREAM */ + +#endif /* !XXH_NO_XXH3 */ +#endif /* XXH_NO_LONG_LONG */ +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# define XXH_IMPLEMENTATION +#endif + +#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ + + +/* ======================================================================== */ +/* ======================================================================== */ +/* ======================================================================== */ + + +/*-********************************************************************** + * xxHash implementation + *-********************************************************************** + * xxHash's implementation used to be hosted inside xxhash.c. + * + * However, inlining requires implementation to be visible to the compiler, + * hence be included alongside the header. + * Previously, implementation was hosted inside xxhash.c, + * which was then #included when inlining was activated. + * This construction created issues with a few build and install systems, + * as it required xxhash.c to be stored in /include directory. + * + * xxHash implementation is now directly integrated within xxhash.h. + * As a consequence, xxhash.c is no longer needed in /include. + * + * xxhash.c is still available and is still useful. + * In a "normal" setup, when xxhash is not inlined, + * xxhash.h only exposes the prototypes and public symbols, + * while xxhash.c can be built into an object file xxhash.o + * which can then be linked into the final binary. + ************************************************************************/ + +#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ + || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) +# define XXH_IMPLEM_13a8737387 + +/* ************************************* +* Tuning parameters +***************************************/ + +/*! + * @defgroup tuning Tuning parameters + * @{ + * + * Various macros to control xxHash's behavior. + */ +#ifdef XXH_DOXYGEN +/*! + * @brief Define this to disable 64-bit code. + * + * Useful if only using the @ref XXH32_family and you have a strict C90 compiler. + */ +# define XXH_NO_LONG_LONG +# undef XXH_NO_LONG_LONG /* don't actually */ +/*! + * @brief Controls how unaligned memory is accessed. + * + * By default, access to unaligned memory is controlled by `memcpy()`, which is + * safe and portable. + * + * Unfortunately, on some target/compiler combinations, the generated assembly + * is sub-optimal. + * + * The below switch allow selection of a different access method + * in the search for improved performance. + * + * @par Possible options: + * + * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy` + * @par + * Use `memcpy()`. Safe and portable. Note that most modern compilers will + * eliminate the function call and treat it as an unaligned access. + * + * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))` + * @par + * Depends on compiler extensions and is therefore not portable. + * This method is safe _if_ your compiler supports it, + * and *generally* as fast or faster than `memcpy`. + * + * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast + * @par + * Casts directly and dereferences. This method doesn't depend on the + * compiler, but it violates the C standard as it directly dereferences an + * unaligned pointer. It can generate buggy code on targets which do not + * support unaligned memory accesses, but in some circumstances, it's the + * only known way to get the most performance. + * + * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift + * @par + * Also portable. This can generate the best code on old compilers which don't + * inline small `memcpy()` calls, and it might also be faster on big-endian + * systems which lack a native byteswap instruction. However, some compilers + * will emit literal byteshifts even if the target supports unaligned access. + * + * + * @warning + * Methods 1 and 2 rely on implementation-defined behavior. Use these with + * care, as what works on one compiler/platform/optimization level may cause + * another to read garbage data or even crash. + * + * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. + * + * Prefer these methods in priority order (0 > 3 > 1 > 2) + */ +# define XXH_FORCE_MEMORY_ACCESS 0 + +/*! + * @def XXH_SIZE_OPT + * @brief Controls how much xxHash optimizes for size. + * + * xxHash, when compiled, tends to result in a rather large binary size. This + * is mostly due to heavy usage to forced inlining and constant folding of the + * @ref XXH3_family to increase performance. + * + * However, some developers prefer size over speed. This option can + * significantly reduce the size of the generated code. When using the `-Os` + * or `-Oz` options on GCC or Clang, this is defined to 1 by default, + * otherwise it is defined to 0. + * + * Most of these size optimizations can be controlled manually. + * + * This is a number from 0-2. + * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed + * comes first. + * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more + * conservative and disables hacks that increase code size. It implies the + * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0, + * and @ref XXH3_NEON_LANES == 8 if they are not already defined. + * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible. + * Performance may cry. For example, the single shot functions just use the + * streaming API. + */ +# define XXH_SIZE_OPT 0 + +/*! + * @def XXH_FORCE_ALIGN_CHECK + * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32() + * and XXH64() only). + * + * This is an important performance trick for architectures without decent + * unaligned memory access performance. + * + * It checks for input alignment, and when conditions are met, uses a "fast + * path" employing direct 32-bit/64-bit reads, resulting in _dramatically + * faster_ read speed. + * + * The check costs one initial branch per hash, which is generally negligible, + * but not zero. + * + * Moreover, it's not useful to generate an additional code path if memory + * access uses the same instruction for both aligned and unaligned + * addresses (e.g. x86 and aarch64). + * + * In these cases, the alignment check can be removed by setting this macro to 0. + * Then the code will always use unaligned memory access. + * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips + * which are platforms known to offer good unaligned memory accesses performance. + * + * It is also disabled by default when @ref XXH_SIZE_OPT >= 1. + * + * This option does not affect XXH3 (only XXH32 and XXH64). + */ +# define XXH_FORCE_ALIGN_CHECK 0 + +/*! + * @def XXH_NO_INLINE_HINTS + * @brief When non-zero, sets all functions to `static`. + * + * By default, xxHash tries to force the compiler to inline almost all internal + * functions. + * + * This can usually improve performance due to reduced jumping and improved + * constant folding, but significantly increases the size of the binary which + * might not be favorable. + * + * Additionally, sometimes the forced inlining can be detrimental to performance, + * depending on the architecture. + * + * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the + * compiler full control on whether to inline or not. + * + * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if + * @ref XXH_SIZE_OPT >= 1, this will automatically be defined. + */ +# define XXH_NO_INLINE_HINTS 0 + +/*! + * @def XXH3_INLINE_SECRET + * @brief Determines whether to inline the XXH3 withSecret code. + * + * When the secret size is known, the compiler can improve the performance + * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret(). + * + * However, if the secret size is not known, it doesn't have any benefit. This + * happens when xxHash is compiled into a global symbol. Therefore, if + * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0. + * + * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers + * that are *sometimes* force inline on -Og, and it is impossible to automatically + * detect this optimization level. + */ +# define XXH3_INLINE_SECRET 0 + +/*! + * @def XXH32_ENDJMP + * @brief Whether to use a jump for `XXH32_finalize`. + * + * For performance, `XXH32_finalize` uses multiple branches in the finalizer. + * This is generally preferable for performance, + * but depending on exact architecture, a jmp may be preferable. + * + * This setting is only possibly making a difference for very small inputs. + */ +# define XXH32_ENDJMP 0 + +/*! + * @internal + * @brief Redefines old internal names. + * + * For compatibility with code that uses xxHash's internals before the names + * were changed to improve namespacing. There is no other reason to use this. + */ +# define XXH_OLD_NAMES +# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ + +/*! + * @def XXH_NO_STREAM + * @brief Disables the streaming API. + * + * When xxHash is not inlined and the streaming functions are not used, disabling + * the streaming functions can improve code size significantly, especially with + * the @ref XXH3_family which tends to make constant folded copies of itself. + */ +# define XXH_NO_STREAM +# undef XXH_NO_STREAM /* don't actually */ +#endif /* XXH_DOXYGEN */ +/*! + * @} + */ + +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ + /* prefer __packed__ structures (method 1) for GCC + * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy + * which for some reason does unaligned loads. */ +# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED)) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +#ifndef XXH_SIZE_OPT + /* default to 1 for -Os or -Oz */ +# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__) +# define XXH_SIZE_OPT 1 +# else +# define XXH_SIZE_OPT 0 +# endif +#endif + +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ + /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */ +# if XXH_SIZE_OPT >= 1 || \ + defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \ + || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */ +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + +#ifndef XXH_NO_INLINE_HINTS +# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */ +# define XXH_NO_INLINE_HINTS 1 +# else +# define XXH_NO_INLINE_HINTS 0 +# endif +#endif + +#ifndef XXH3_INLINE_SECRET +# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \ + || !defined(XXH_INLINE_ALL) +# define XXH3_INLINE_SECRET 0 +# else +# define XXH3_INLINE_SECRET 1 +# endif +#endif + +#ifndef XXH32_ENDJMP +/* generally preferable for performance */ +# define XXH32_ENDJMP 0 +#endif + +/*! + * @defgroup impl Implementation + * @{ + */ + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +#if defined(XXH_NO_STREAM) +/* nothing */ +#elif defined(XXH_NO_STDLIB) + +/* When requesting to disable any mention of stdlib, + * the library loses the ability to invoked malloc / free. + * In practice, it means that functions like `XXH*_createState()` + * will always fail, and return NULL. + * This flag is useful in situations where + * xxhash.h is integrated into some kernel, embedded or limited environment + * without access to dynamic allocation. + */ + +static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; } +static void XXH_free(void* p) { (void)p; } + +#else + +/* + * Modify the local functions below should you wish to use + * different memory routines for malloc() and free() + */ +#include + +/*! + * @internal + * @brief Modify this function to use a different routine than malloc(). + */ +static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); } + +/*! + * @internal + * @brief Modify this function to use a different routine than free(). + */ +static void XXH_free(void* p) { free(p); } + +#endif /* XXH_NO_STDLIB */ + +#include + +/*! + * @internal + * @brief Modify this function to use a different routine than memcpy(). + */ +static void* XXH_memcpy(void* dest, const void* src, size_t size) +{ + return memcpy(dest,src,size); +} + +#include /* ULLONG_MAX */ + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio warning fix */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + +#if XXH_NO_INLINE_HINTS /* disable inlining hints */ +# if defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __attribute__((unused)) +# else +# define XXH_FORCE_INLINE static +# endif +# define XXH_NO_INLINE static +/* enable inlining hints */ +#elif defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) +# define XXH_NO_INLINE static __attribute__((noinline)) +#elif defined(_MSC_VER) /* Visual Studio */ +# define XXH_FORCE_INLINE static __forceinline +# define XXH_NO_INLINE static __declspec(noinline) +#elif defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ +# define XXH_FORCE_INLINE static inline +# define XXH_NO_INLINE static +#else +# define XXH_FORCE_INLINE static +# define XXH_NO_INLINE static +#endif + +#if XXH3_INLINE_SECRET +# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE +#else +# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE +#endif + + +/* ************************************* +* Debug +***************************************/ +/*! + * @ingroup tuning + * @def XXH_DEBUGLEVEL + * @brief Sets the debugging level. + * + * XXH_DEBUGLEVEL is expected to be defined externally, typically via the + * compiler's command line options. The value must be a number. + */ +#ifndef XXH_DEBUGLEVEL +# ifdef DEBUGLEVEL /* backwards compat */ +# define XXH_DEBUGLEVEL DEBUGLEVEL +# else +# define XXH_DEBUGLEVEL 0 +# endif +#endif + +#if (XXH_DEBUGLEVEL>=1) +# include /* note: can still be disabled with NDEBUG */ +# define XXH_ASSERT(c) assert(c) +#else +# if defined(__INTEL_COMPILER) +# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c)) +# else +# define XXH_ASSERT(c) XXH_ASSUME(c) +# endif +#endif + +/* note: use after variable declarations */ +#ifndef XXH_STATIC_ASSERT +# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0) +# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) +# else +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0) +# endif +# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c) +#endif + +/*! + * @internal + * @def XXH_COMPILER_GUARD(var) + * @brief Used to prevent unwanted optimizations for @p var. + * + * It uses an empty GCC inline assembly statement with a register constraint + * which forces @p var into a general purpose register (eg eax, ebx, ecx + * on x86) and marks it as modified. + * + * This is used in a few places to avoid unwanted autovectorization (e.g. + * XXH32_round()). All vectorization we want is explicit via intrinsics, + * and _usually_ isn't wanted elsewhere. + * + * We also use it to prevent unwanted constant folding for AArch64 in + * XXH3_initCustomSecret_scalar(). + */ +#if defined(__GNUC__) || defined(__clang__) +# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var)) +#else +# define XXH_COMPILER_GUARD(var) ((void)0) +#endif + +/* Specifically for NEON vectors which use the "w" constraint, on + * Clang. */ +#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__) +# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var)) +#else +# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0) +#endif + +/* ************************************* +* Basic Types +***************************************/ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t xxh_u8; +#else + typedef unsigned char xxh_u8; +#endif +typedef XXH32_hash_t xxh_u32; + +#ifdef XXH_OLD_NAMES +# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly" +# define BYTE xxh_u8 +# define U8 xxh_u8 +# define U32 xxh_u32 +#endif + +/* *** Memory access *** */ + +/*! + * @internal + * @fn xxh_u32 XXH_read32(const void* ptr) + * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit native endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32(const void* ptr) + * @brief Reads an unaligned 32-bit little endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readBE32(const void* ptr) + * @brief Reads an unaligned 32-bit big endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit big endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align) + * @brief Like @ref XXH_readLE32(), but has an option for aligned reads. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is + * always @ref XXH_alignment::XXH_unaligned. + * + * @param ptr The pointer to read from. + * @param align Whether @p ptr is aligned. + * @pre + * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte + * aligned. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE32 and XXH_readBE32. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* + * Force direct memory access. Only works on CPU which support unaligned memory + * access in hardware. + */ +static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually it + * can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; +#endif +static xxh_u32 XXH_read32(const void* ptr) +{ + typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32; + return *((const xxh_unalign32*)ptr); +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u32 XXH_read32(const void* memPtr) +{ + xxh_u32 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* *** Endianness *** */ + +/*! + * @ingroup tuning + * @def XXH_CPU_LITTLE_ENDIAN + * @brief Whether the target is little endian. + * + * Defined to 1 if the target is little endian, or 0 if it is big endian. + * It can be defined externally, for example on the compiler command line. + * + * If it is not defined, + * a runtime check (which is usually constant folded) is used instead. + * + * @note + * This is not necessarily defined to an integer constant. + * + * @see XXH_isLittleEndian() for the runtime check. + */ +#ifndef XXH_CPU_LITTLE_ENDIAN +/* + * Try to detect endianness automatically, to avoid the nonstandard behavior + * in `XXH_isLittleEndian()` + */ +# if defined(_WIN32) /* Windows is always little endian */ \ + || defined(__LITTLE_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 1 +# elif defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 0 +# else +/*! + * @internal + * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN. + * + * Most compilers will constant fold this. + */ +static int XXH_isLittleEndian(void) +{ + /* + * Portable and well-defined behavior. + * Don't use static: it is detrimental to performance. + */ + const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +# endif +#endif + + + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +#ifdef __has_builtin +# define XXH_HAS_BUILTIN(x) __has_builtin(x) +#else +# define XXH_HAS_BUILTIN(x) 0 +#endif + + + +/* + * C23 and future versions have standard "unreachable()". + * Once it has been implemented reliably we can add it as an + * additional case: + * + * ``` + * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) + * # include + * # ifdef unreachable + * # define XXH_UNREACHABLE() unreachable() + * # endif + * #endif + * ``` + * + * Note C++23 also has std::unreachable() which can be detected + * as follows: + * ``` + * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L) + * # include + * # define XXH_UNREACHABLE() std::unreachable() + * #endif + * ``` + * NB: `__cpp_lib_unreachable` is defined in the `` header. + * We don't use that as including `` in `extern "C"` blocks + * doesn't work on GCC12 + */ + +#if XXH_HAS_BUILTIN(__builtin_unreachable) +# define XXH_UNREACHABLE() __builtin_unreachable() + +#elif defined(_MSC_VER) +# define XXH_UNREACHABLE() __assume(0) + +#else +# define XXH_UNREACHABLE() +#endif + +#if XXH_HAS_BUILTIN(__builtin_assume) +# define XXH_ASSUME(c) __builtin_assume(c) +#else +# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); } +#endif + +/*! + * @internal + * @def XXH_rotl32(x,r) + * @brief 32-bit rotate left. + * + * @param x The 32-bit integer to be rotated. + * @param r The number of bits to rotate. + * @pre + * @p r > 0 && @p r < 32 + * @note + * @p x and @p r may be evaluated multiple times. + * @return The rotated result. + */ +#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ + && XXH_HAS_BUILTIN(__builtin_rotateleft64) +# define XXH_rotl32 __builtin_rotateleft32 +# define XXH_rotl64 __builtin_rotateleft64 +/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ +#elif defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) +# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) +#endif + +/*! + * @internal + * @fn xxh_u32 XXH_swap32(xxh_u32 x) + * @brief A 32-bit byteswap. + * + * @param x The 32-bit integer to byteswap. + * @return @p x, byteswapped. + */ +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +#else +static xxh_u32 XXH_swap32 (xxh_u32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +#endif + + +/* *************************** +* Memory reads +*****************************/ + +/*! + * @internal + * @brief Enum to indicate whether a pointer is aligned. + */ +typedef enum { + XXH_aligned, /*!< Aligned */ + XXH_unaligned /*!< Possibly unaligned */ +} XXH_alignment; + +/* + * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. + * + * This is ideal for older compilers which don't inline memcpy. + */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u32)bytePtr[1] << 8) + | ((xxh_u32)bytePtr[2] << 16) + | ((xxh_u32)bytePtr[3] << 24); +} + +XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[3] + | ((xxh_u32)bytePtr[2] << 8) + | ((xxh_u32)bytePtr[1] << 16) + | ((xxh_u32)bytePtr[0] << 24); +} + +#else +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); +} + +static xxh_u32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u32 +XXH_readLE32_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) { + return XXH_readLE32(ptr); + } else { + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); + } +} + + +/* ************************************* +* Misc +***************************************/ +/*! @ingroup public */ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ******************************************************************* +* 32-bit hash functions +*********************************************************************/ +/*! + * @} + * @defgroup XXH32_impl XXH32 implementation + * @ingroup impl + * + * Details on the XXH32 implementation. + * @{ + */ + /* #define instead of static const, to be used as initializers */ +#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */ +#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */ +#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */ +#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */ +#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */ + +#ifdef XXH_OLD_NAMES +# define PRIME32_1 XXH_PRIME32_1 +# define PRIME32_2 XXH_PRIME32_2 +# define PRIME32_3 XXH_PRIME32_3 +# define PRIME32_4 XXH_PRIME32_4 +# define PRIME32_5 XXH_PRIME32_5 +#endif + +/*! + * @internal + * @brief Normal stripe processing routine. + * + * This shuffles the bits so that any bit from @p input impacts several bits in + * @p acc. + * + * @param acc The accumulator lane. + * @param input The stripe of input to mix. + * @return The mixed accumulator lane. + */ +static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) +{ + acc += input * XXH_PRIME32_2; + acc = XXH_rotl32(acc, 13); + acc *= XXH_PRIME32_1; +#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) + /* + * UGLY HACK: + * A compiler fence is the only thing that prevents GCC and Clang from + * autovectorizing the XXH32 loop (pragmas and attributes don't work for some + * reason) without globally disabling SSE4.1. + * + * The reason we want to avoid vectorization is because despite working on + * 4 integers at a time, there are multiple factors slowing XXH32 down on + * SSE4: + * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on + * newer chips!) making it slightly slower to multiply four integers at + * once compared to four integers independently. Even when pmulld was + * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE + * just to multiply unless doing a long operation. + * + * - Four instructions are required to rotate, + * movqda tmp, v // not required with VEX encoding + * pslld tmp, 13 // tmp <<= 13 + * psrld v, 19 // x >>= 19 + * por v, tmp // x |= tmp + * compared to one for scalar: + * roll v, 13 // reliably fast across the board + * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason + * + * - Instruction level parallelism is actually more beneficial here because + * the SIMD actually serializes this operation: While v1 is rotating, v2 + * can load data, while v3 can multiply. SSE forces them to operate + * together. + * + * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing + * the loop. NEON is only faster on the A53, and with the newer cores, it is less + * than half the speed. + * + * Additionally, this is used on WASM SIMD128 because it JITs to the same + * SIMD instructions and has the same issue. + */ + XXH_COMPILER_GUARD(acc); +#endif + return acc; +} + +/*! + * @internal + * @brief Mixes all bits to finalize the hash. + * + * The final mix ensures that all input bits have a chance to impact any bit in + * the output digest, resulting in an unbiased distribution. + * + * @param hash The hash to avalanche. + * @return The avalanched hash. + */ +static xxh_u32 XXH32_avalanche(xxh_u32 hash) +{ + hash ^= hash >> 15; + hash *= XXH_PRIME32_2; + hash ^= hash >> 13; + hash *= XXH_PRIME32_3; + hash ^= hash >> 16; + return hash; +} + +#define XXH_get32bits(p) XXH_readLE32_align(p, align) + +/*! + * @internal + * @brief Processes the last 0-15 bytes of @p ptr. + * + * There may be up to 15 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 16. + * @param align Whether @p ptr is aligned. + * @return The finalized hash. + * @see XXH64_finalize(). + */ +static XXH_PUREF xxh_u32 +XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ +#define XXH_PROCESS1 do { \ + hash += (*ptr++) * XXH_PRIME32_5; \ + hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \ +} while (0) + +#define XXH_PROCESS4 do { \ + hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \ + ptr += 4; \ + hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \ +} while (0) + + if (ptr==NULL) XXH_ASSERT(len == 0); + + /* Compact rerolled version; generally faster */ + if (!XXH32_ENDJMP) { + len &= 15; + while (len >= 4) { + XXH_PROCESS4; + len -= 4; + } + while (len > 0) { + XXH_PROCESS1; + --len; + } + return XXH32_avalanche(hash); + } else { + switch(len&15) /* or switch(bEnd - p) */ { + case 12: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 8: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 4: XXH_PROCESS4; + return XXH32_avalanche(hash); + + case 13: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 9: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 5: XXH_PROCESS4; + XXH_PROCESS1; + return XXH32_avalanche(hash); + + case 14: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 10: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 6: XXH_PROCESS4; + XXH_PROCESS1; + XXH_PROCESS1; + return XXH32_avalanche(hash); + + case 15: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 11: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 7: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 3: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 2: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 1: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 0: return XXH32_avalanche(hash); + } + XXH_ASSERT(0); + return hash; /* reaching this point is deemed impossible */ + } +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1 XXH_PROCESS1 +# define PROCESS4 XXH_PROCESS4 +#else +# undef XXH_PROCESS1 +# undef XXH_PROCESS4 +#endif + +/*! + * @internal + * @brief The implementation for @ref XXH32(). + * + * @param input , len , seed Directly passed from @ref XXH32(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE XXH_PUREF xxh_u32 +XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) +{ + xxh_u32 h32; + + if (input==NULL) XXH_ASSERT(len == 0); + + if (len>=16) { + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 15; + xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + xxh_u32 v2 = seed + XXH_PRIME32_2; + xxh_u32 v3 = seed + 0; + xxh_u32 v4 = seed - XXH_PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4; + v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4; + v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4; + v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4; + } while (input < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + XXH_PRIME32_5; + } + + h32 += (xxh_u32)len; + + return XXH32_finalize(h32, input, len&15, align); +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) +{ +#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, (const xxh_u8*)input, len); + return XXH32_digest(&state); +#else + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); +#endif +} + + + +/******* Hash streaming *******/ +#ifndef XXH_NO_STREAM +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + XXH_memcpy(dstState, srcState, sizeof(*dstState)); +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + statePtr->v[1] = seed + XXH_PRIME32_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME32_1; + return XXH_OK; +} + + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode +XXH32_update(XXH32_state_t* state, const void* input, size_t len) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len_32 += (XXH32_hash_t)len; + state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16)); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len); + state->memsize += (XXH32_hash_t)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const xxh_u32* p32 = state->mem32; + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const xxh_u8* const limit = bEnd - 16; + + do { + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4; + } while (p<=limit); + + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) +{ + xxh_u32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v[0], 1) + + XXH_rotl32(state->v[1], 7) + + XXH_rotl32(state->v[2], 12) + + XXH_rotl32(state->v[3], 18); + } else { + h32 = state->v[2] /* == seed */ + XXH_PRIME32_5; + } + + h32 += state->total_len_32; + + return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned); +} +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/*! + * @ingroup XXH32_family + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * + * The canonical representation uses big endian convention, the same convention + * as human-readable numbers (large digits first). + * + * This way, hash values can be written into a file or buffer, remaining + * comparable across different systems. + * + * The following functions allow transformation of hash values to and from their + * canonical format. + */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); +} +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + + +#ifndef XXH_NO_LONG_LONG + +/* ******************************************************************* +* 64-bit hash functions +*********************************************************************/ +/*! + * @} + * @ingroup impl + * @{ + */ +/******* Memory access *******/ + +typedef XXH64_hash_t xxh_u64; + +#ifdef XXH_OLD_NAMES +# define U64 xxh_u64 +#endif + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE64 and XXH_readBE64. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + return *(const xxh_u64*) memPtr; +} + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually it + * can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; +#endif +static xxh_u64 XXH_read64(const void* ptr) +{ + typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64; + return *((const xxh_unalign64*)ptr); +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + xxh_u64 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap64 __builtin_bswap64 +#else +static xxh_u64 XXH_swap64(xxh_u64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + + +/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u64)bytePtr[1] << 8) + | ((xxh_u64)bytePtr[2] << 16) + | ((xxh_u64)bytePtr[3] << 24) + | ((xxh_u64)bytePtr[4] << 32) + | ((xxh_u64)bytePtr[5] << 40) + | ((xxh_u64)bytePtr[6] << 48) + | ((xxh_u64)bytePtr[7] << 56); +} + +XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[7] + | ((xxh_u64)bytePtr[6] << 8) + | ((xxh_u64)bytePtr[5] << 16) + | ((xxh_u64)bytePtr[4] << 24) + | ((xxh_u64)bytePtr[3] << 32) + | ((xxh_u64)bytePtr[2] << 40) + | ((xxh_u64)bytePtr[1] << 48) + | ((xxh_u64)bytePtr[0] << 56); +} + +#else +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); +} + +static xxh_u64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u64 +XXH_readLE64_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) + return XXH_readLE64(ptr); + else + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); +} + + +/******* xxh64 *******/ +/*! + * @} + * @defgroup XXH64_impl XXH64 implementation + * @ingroup impl + * + * Details on the XXH64 implementation. + * @{ + */ +/* #define rather that static const, to be used as initializers */ +#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */ +#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */ +#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */ +#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */ +#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */ + +#ifdef XXH_OLD_NAMES +# define PRIME64_1 XXH_PRIME64_1 +# define PRIME64_2 XXH_PRIME64_2 +# define PRIME64_3 XXH_PRIME64_3 +# define PRIME64_4 XXH_PRIME64_4 +# define PRIME64_5 XXH_PRIME64_5 +#endif + +/*! @copydoc XXH32_round */ +static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) +{ + acc += input * XXH_PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= XXH_PRIME64_1; + return acc; +} + +static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; + return acc; +} + +/*! @copydoc XXH32_avalanche */ +static xxh_u64 XXH64_avalanche(xxh_u64 hash) +{ + hash ^= hash >> 33; + hash *= XXH_PRIME64_2; + hash ^= hash >> 29; + hash *= XXH_PRIME64_3; + hash ^= hash >> 32; + return hash; +} + + +#define XXH_get64bits(p) XXH_readLE64_align(p, align) + +/*! + * @internal + * @brief Processes the last 0-31 bytes of @p ptr. + * + * There may be up to 31 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 32. + * @param align Whether @p ptr is aligned. + * @return The finalized hash + * @see XXH32_finalize(). + */ +static XXH_PUREF xxh_u64 +XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ + if (ptr==NULL) XXH_ASSERT(len == 0); + len &= 31; + while (len >= 8) { + xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); + ptr += 8; + hash ^= k1; + hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4; + len -= 8; + } + if (len >= 4) { + hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; + ptr += 4; + hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; + len -= 4; + } + while (len > 0) { + hash ^= (*ptr++) * XXH_PRIME64_5; + hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1; + --len; + } + return XXH64_avalanche(hash); +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1_64 XXH_PROCESS1_64 +# define PROCESS4_64 XXH_PROCESS4_64 +# define PROCESS8_64 XXH_PROCESS8_64 +#else +# undef XXH_PROCESS1_64 +# undef XXH_PROCESS4_64 +# undef XXH_PROCESS8_64 +#endif + +/*! + * @internal + * @brief The implementation for @ref XXH64(). + * + * @param input , len , seed Directly passed from @ref XXH64(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE XXH_PUREF xxh_u64 +XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) +{ + xxh_u64 h64; + if (input==NULL) XXH_ASSERT(len == 0); + + if (len>=32) { + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 31; + xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + xxh_u64 v2 = seed + XXH_PRIME64_2; + xxh_u64 v3 = seed + 0; + xxh_u64 v4 = seed - XXH_PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; + v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; + v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; + v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; + } while (input= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, (const xxh_u8*)input, len); + return XXH64_digest(&state); +#else + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); + +#endif +} + +/******* Hash Streaming *******/ +#ifndef XXH_NO_STREAM +/*! @ingroup XXH64_family*/ +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + XXH_memcpy(dstState, srcState, sizeof(*dstState)); +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + statePtr->v[1] = seed + XXH_PRIME64_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME64_1; + return XXH_OK; +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode +XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len); + state->memsize += (xxh_u32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0)); + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1)); + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2)); + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3)); + p += 32 - state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const xxh_u8* const limit = bEnd - 32; + + do { + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8; + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8; + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8; + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8; + } while (p<=limit); + + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state) +{ + xxh_u64 h64; + + if (state->total_len >= 32) { + h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18); + h64 = XXH64_mergeRound(h64, state->v[0]); + h64 = XXH64_mergeRound(h64, state->v[1]); + h64 = XXH64_mergeRound(h64, state->v[2]); + h64 = XXH64_mergeRound(h64, state->v[3]); + } else { + h64 = state->v[2] /*seed*/ + XXH_PRIME64_5; + } + + h64 += (xxh_u64) state->total_len; + + return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned); +} +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} + +#ifndef XXH_NO_XXH3 + +/* ********************************************************************* +* XXH3 +* New generation hash designed for speed on small keys and vectorization +************************************************************************ */ +/*! + * @} + * @defgroup XXH3_impl XXH3 implementation + * @ingroup impl + * @{ + */ + +/* === Compiler specifics === */ + +#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */ +# define XXH_RESTRICT /* disable */ +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ +# define XXH_RESTRICT restrict +#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \ + || (defined (__clang__)) \ + || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \ + || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300)) +/* + * There are a LOT more compilers that recognize __restrict but this + * covers the major ones. + */ +# define XXH_RESTRICT __restrict +#else +# define XXH_RESTRICT /* disable */ +#endif + +#if (defined(__GNUC__) && (__GNUC__ >= 3)) \ + || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \ + || defined(__clang__) +# define XXH_likely(x) __builtin_expect(x, 1) +# define XXH_unlikely(x) __builtin_expect(x, 0) +#else +# define XXH_likely(x) (x) +# define XXH_unlikely(x) (x) +#endif + +#ifndef XXH_HAS_INCLUDE +# ifdef __has_include +# define XXH_HAS_INCLUDE(x) __has_include(x) +# else +# define XXH_HAS_INCLUDE(x) 0 +# endif +#endif + +#if defined(__GNUC__) || defined(__clang__) +# if defined(__ARM_FEATURE_SVE) +# include +# endif +# if defined(__ARM_NEON__) || defined(__ARM_NEON) \ + || (defined(_M_ARM) && _M_ARM >= 7) \ + || defined(_M_ARM64) || defined(_M_ARM64EC) \ + || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* WASM SIMD128 via SIMDe */ +# define inline __inline__ /* circumvent a clang bug */ +# include +# undef inline +# elif defined(__AVX2__) +# include +# elif defined(__SSE2__) +# include +# endif +#endif + +#if defined(_MSC_VER) +# include +#endif + +/* + * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while + * remaining a true 64-bit/128-bit hash function. + * + * This is done by prioritizing a subset of 64-bit operations that can be + * emulated without too many steps on the average 32-bit machine. + * + * For example, these two lines seem similar, and run equally fast on 64-bit: + * + * xxh_u64 x; + * x ^= (x >> 47); // good + * x ^= (x >> 13); // bad + * + * However, to a 32-bit machine, there is a major difference. + * + * x ^= (x >> 47) looks like this: + * + * x.lo ^= (x.hi >> (47 - 32)); + * + * while x ^= (x >> 13) looks like this: + * + * // note: funnel shifts are not usually cheap. + * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13)); + * x.hi ^= (x.hi >> 13); + * + * The first one is significantly faster than the second, simply because the + * shift is larger than 32. This means: + * - All the bits we need are in the upper 32 bits, so we can ignore the lower + * 32 bits in the shift. + * - The shift result will always fit in the lower 32 bits, and therefore, + * we can ignore the upper 32 bits in the xor. + * + * Thanks to this optimization, XXH3 only requires these features to be efficient: + * + * - Usable unaligned access + * - A 32-bit or 64-bit ALU + * - If 32-bit, a decent ADC instruction + * - A 32 or 64-bit multiply with a 64-bit result + * - For the 128-bit variant, a decent byteswap helps short inputs. + * + * The first two are already required by XXH32, and almost all 32-bit and 64-bit + * platforms which can run XXH32 can run XXH3 efficiently. + * + * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one + * notable exception. + * + * First of all, Thumb-1 lacks support for the UMULL instruction which + * performs the important long multiply. This means numerous __aeabi_lmul + * calls. + * + * Second of all, the 8 functional registers are just not enough. + * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need + * Lo registers, and this shuffling results in thousands more MOVs than A32. + * + * A32 and T32 don't have this limitation. They can access all 14 registers, + * do a 32->64 multiply with UMULL, and the flexible operand allowing free + * shifts is helpful, too. + * + * Therefore, we do a quick sanity check. + * + * If compiling Thumb-1 for a target which supports ARM instructions, we will + * emit a warning, as it is not a "sane" platform to compile for. + * + * Usually, if this happens, it is because of an accident and you probably need + * to specify -march, as you likely meant to compile for a newer architecture. + * + * Credit: large sections of the vectorial and asm source code paths + * have been contributed by @easyaspi314 + */ +#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) +# warning "XXH3 is highly inefficient without ARM or Thumb-2." +#endif + +/* ========================================== + * Vectorization detection + * ========================================== */ + +#ifdef XXH_DOXYGEN +/*! + * @ingroup tuning + * @brief Overrides the vectorization implementation chosen for XXH3. + * + * Can be defined to 0 to disable SIMD or any of the values mentioned in + * @ref XXH_VECTOR_TYPE. + * + * If this is not defined, it uses predefined macros to determine the best + * implementation. + */ +# define XXH_VECTOR XXH_SCALAR +/*! + * @ingroup tuning + * @brief Possible values for @ref XXH_VECTOR. + * + * Note that these are actually implemented as macros. + * + * If this is not defined, it is detected automatically. + * internal macro XXH_X86DISPATCH overrides this. + */ +enum XXH_VECTOR_TYPE /* fake enum */ { + XXH_SCALAR = 0, /*!< Portable scalar version */ + XXH_SSE2 = 1, /*!< + * SSE2 for Pentium 4, Opteron, all x86_64. + * + * @note SSE2 is also guaranteed on Windows 10, macOS, and + * Android x86. + */ + XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ + XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ + XXH_NEON = 4, /*!< + * NEON for most ARMv7-A, all AArch64, and WASM SIMD128 + * via the SIMDeverywhere polyfill provided with the + * Emscripten SDK. + */ + XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ + XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */ +}; +/*! + * @ingroup tuning + * @brief Selects the minimum alignment for XXH3's accumulators. + * + * When using SIMD, this should match the alignment required for said vector + * type, so, for example, 32 for AVX2. + * + * Default: Auto detected. + */ +# define XXH_ACC_ALIGN 8 +#endif + +/* Actual definition */ +#ifndef XXH_DOXYGEN +# define XXH_SCALAR 0 +# define XXH_SSE2 1 +# define XXH_AVX2 2 +# define XXH_AVX512 3 +# define XXH_NEON 4 +# define XXH_VSX 5 +# define XXH_SVE 6 +#endif + +#ifndef XXH_VECTOR /* can be defined on command line */ +# if defined(__ARM_FEATURE_SVE) +# define XXH_VECTOR XXH_SVE +# elif ( \ + defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ + || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \ + || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* wasm simd128 via SIMDe */ \ + ) && ( \ + defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ + ) +# define XXH_VECTOR XXH_NEON +# elif defined(__AVX512F__) +# define XXH_VECTOR XXH_AVX512 +# elif defined(__AVX2__) +# define XXH_VECTOR XXH_AVX2 +# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) +# define XXH_VECTOR XXH_SSE2 +# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ + || (defined(__s390x__) && defined(__VEC__)) \ + && defined(__GNUC__) /* TODO: IBM XL */ +# define XXH_VECTOR XXH_VSX +# else +# define XXH_VECTOR XXH_SCALAR +# endif +#endif + +/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */ +#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE) +# ifdef _MSC_VER +# pragma warning(once : 4606) +# else +# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead." +# endif +# undef XXH_VECTOR +# define XXH_VECTOR XXH_SCALAR +#endif + +/* + * Controls the alignment of the accumulator, + * for compatibility with aligned vector loads, which are usually faster. + */ +#ifndef XXH_ACC_ALIGN +# if defined(XXH_X86DISPATCH) +# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */ +# elif XXH_VECTOR == XXH_SCALAR /* scalar */ +# define XXH_ACC_ALIGN 8 +# elif XXH_VECTOR == XXH_SSE2 /* sse2 */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX2 /* avx2 */ +# define XXH_ACC_ALIGN 32 +# elif XXH_VECTOR == XXH_NEON /* neon */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_VSX /* vsx */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX512 /* avx512 */ +# define XXH_ACC_ALIGN 64 +# elif XXH_VECTOR == XXH_SVE /* sve */ +# define XXH_ACC_ALIGN 64 +# endif +#endif + +#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \ + || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 +# define XXH_SEC_ALIGN XXH_ACC_ALIGN +#elif XXH_VECTOR == XXH_SVE +# define XXH_SEC_ALIGN XXH_ACC_ALIGN +#else +# define XXH_SEC_ALIGN 8 +#endif + +#if defined(__GNUC__) || defined(__clang__) +# define XXH_ALIASING __attribute__((may_alias)) +#else +# define XXH_ALIASING /* nothing */ +#endif + +/* + * UGLY HACK: + * GCC usually generates the best code with -O3 for xxHash. + * + * However, when targeting AVX2, it is overzealous in its unrolling resulting + * in code roughly 3/4 the speed of Clang. + * + * There are other issues, such as GCC splitting _mm256_loadu_si256 into + * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which + * only applies to Sandy and Ivy Bridge... which don't even support AVX2. + * + * That is why when compiling the AVX2 version, it is recommended to use either + * -O2 -mavx2 -march=haswell + * or + * -O2 -mavx2 -mno-avx256-split-unaligned-load + * for decent performance, or to use Clang instead. + * + * Fortunately, we can control the first one with a pragma that forces GCC into + * -O2, but the other one we can't control without "failed to inline always + * inline function due to target mismatch" warnings. + */ +#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ + && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ +# pragma GCC push_options +# pragma GCC optimize("-O2") +#endif + +#if XXH_VECTOR == XXH_NEON + +/* + * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3 + * optimizes out the entire hashLong loop because of the aliasing violation. + * + * However, GCC is also inefficient at load-store optimization with vld1q/vst1q, + * so the only option is to mark it as aliasing. + */ +typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING; + +/*! + * @internal + * @brief `vld1q_u64` but faster and alignment-safe. + * + * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only + * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86). + * + * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it + * prohibits load-store optimizations. Therefore, a direct dereference is used. + * + * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe + * unaligned load. + */ +#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */ +{ + return *(xxh_aliasing_uint64x2_t const *)ptr; +} +#else +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) +{ + return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr)); +} +#endif + +/*! + * @internal + * @brief `vmlal_u32` on low and high halves of a vector. + * + * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with + * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32` + * with `vmlal_u32`. + */ +#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11 +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + /* Inline assembly is the only way */ + __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs)); + return acc; +} +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + /* This intrinsic works as expected */ + return vmlal_high_u32(acc, lhs, rhs); +} +#else +/* Portable intrinsic versions */ +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs)); +} +/*! @copydoc XXH_vmlal_low_u32 + * Assume the compiler converts this to vmlal_high_u32 on aarch64 */ +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs)); +} +#endif + +/*! + * @ingroup tuning + * @brief Controls the NEON to scalar ratio for XXH3 + * + * This can be set to 2, 4, 6, or 8. + * + * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used. + * + * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those + * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU + * bandwidth. + * + * This is even more noticeable on the more advanced cores like the Cortex-A76 which + * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once. + * + * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes + * and 2 scalar lanes, which is chosen by default. + * + * This does not apply to Apple processors or 32-bit processors, which run better with + * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes. + * + * This change benefits CPUs with large micro-op buffers without negatively affecting + * most other CPUs: + * + * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. | + * |:----------------------|:--------------------|----------:|-----------:|------:| + * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% | + * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% | + * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% | + * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% | + * + * It also seems to fix some bad codegen on GCC, making it almost as fast as clang. + * + * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning + * it effectively becomes worse 4. + * + * @see XXH3_accumulate_512_neon() + */ +# ifndef XXH3_NEON_LANES +# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \ + && !defined(__APPLE__) && XXH_SIZE_OPT <= 0 +# define XXH3_NEON_LANES 6 +# else +# define XXH3_NEON_LANES XXH_ACC_NB +# endif +# endif +#endif /* XXH_VECTOR == XXH_NEON */ + +/* + * VSX and Z Vector helpers. + * + * This is very messy, and any pull requests to clean this up are welcome. + * + * There are a lot of problems with supporting VSX and s390x, due to + * inconsistent intrinsics, spotty coverage, and multiple endiannesses. + */ +#if XXH_VECTOR == XXH_VSX +/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`, + * and `pixel`. This is a problem for obvious reasons. + * + * These keywords are unnecessary; the spec literally says they are + * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd + * after including the header. + * + * We use pragma push_macro/pop_macro to keep the namespace clean. */ +# pragma push_macro("bool") +# pragma push_macro("vector") +# pragma push_macro("pixel") +/* silence potential macro redefined warnings */ +# undef bool +# undef vector +# undef pixel + +# if defined(__s390x__) +# include +# else +# include +# endif + +/* Restore the original macro values, if applicable. */ +# pragma pop_macro("pixel") +# pragma pop_macro("vector") +# pragma pop_macro("bool") + +typedef __vector unsigned long long xxh_u64x2; +typedef __vector unsigned char xxh_u8x16; +typedef __vector unsigned xxh_u32x4; + +/* + * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue. + */ +typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING; + +# ifndef XXH_VSX_BE +# if defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_VSX_BE 1 +# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ +# warning "-maltivec=be is not recommended. Please use native endianness." +# define XXH_VSX_BE 1 +# else +# define XXH_VSX_BE 0 +# endif +# endif /* !defined(XXH_VSX_BE) */ + +# if XXH_VSX_BE +# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) +# define XXH_vec_revb vec_revb +# else +/*! + * A polyfill for POWER9's vec_revb(). + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) +{ + xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, + 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; + return vec_perm(val, val, vByteSwap); +} +# endif +# endif /* XXH_VSX_BE */ + +/*! + * Performs an unaligned vector load and byte swaps it on big endian. + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) +{ + xxh_u64x2 ret; + XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2)); +# if XXH_VSX_BE + ret = XXH_vec_revb(ret); +# endif + return ret; +} + +/* + * vec_mulo and vec_mule are very problematic intrinsics on PowerPC + * + * These intrinsics weren't added until GCC 8, despite existing for a while, + * and they are endian dependent. Also, their meaning swap depending on version. + * */ +# if defined(__s390x__) + /* s390x is always big endian, no issue on this platform */ +# define XXH_vec_mulo vec_mulo +# define XXH_vec_mule vec_mule +# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__) +/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */ + /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */ +# define XXH_vec_mulo __builtin_altivec_vmulouw +# define XXH_vec_mule __builtin_altivec_vmuleuw +# else +/* gcc needs inline assembly */ +/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +# endif /* XXH_vec_mulo, XXH_vec_mule */ +#endif /* XXH_VECTOR == XXH_VSX */ + +#if XXH_VECTOR == XXH_SVE +#define ACCRND(acc, offset) \ +do { \ + svuint64_t input_vec = svld1_u64(mask, xinput + offset); \ + svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \ + svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \ + svuint64_t swapped = svtbl_u64(input_vec, kSwap); \ + svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \ + svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \ + svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \ + acc = svadd_u64_x(mask, acc, mul); \ +} while (0) +#endif /* XXH_VECTOR == XXH_SVE */ + +/* prefetch + * can be disabled, by declaring XXH_NO_PREFETCH build macro */ +#if defined(XXH_NO_PREFETCH) +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +#else +# if XXH_SIZE_OPT >= 1 +# define XXH_PREFETCH(ptr) (void)(ptr) +# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ +# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ +# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) +# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) +# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) +# else +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +# endif +#endif /* XXH_NO_PREFETCH */ + + +/* ========================================== + * XXH3 default settings + * ========================================== */ + +#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ + +#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) +# error "default keyset is not large enough" +#endif + +/*! Pseudorandom secret taken directly from FARSH. */ +XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { + 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, + 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, + 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, + 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, + 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, + 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, + 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, + 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, + 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, + 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, + 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, + 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, +}; + +static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */ +static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */ + +#ifdef XXH_OLD_NAMES +# define kSecret XXH3_kSecret +#endif + +#ifdef XXH_DOXYGEN +/*! + * @brief Calculates a 32-bit to 64-bit long multiply. + * + * Implemented as a macro. + * + * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't + * need to (but it shouldn't need to anyways, it is about 7 instructions to do + * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we + * use that instead of the normal method. + * + * If you are compiling for platforms like Thumb-1 and don't have a better option, + * you may also want to write your own long multiply routine here. + * + * @param x, y Numbers to be multiplied + * @return 64-bit product of the low 32 bits of @p x and @p y. + */ +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64(xxh_u64 x, xxh_u64 y) +{ + return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); +} +#elif defined(_MSC_VER) && defined(_M_IX86) +# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) +#else +/* + * Downcast + upcast is usually better than masking on older compilers like + * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers. + * + * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands + * and perform a full 64x64 multiply -- entirely redundant on 32-bit. + */ +# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) +#endif + +/*! + * @brief Calculates a 64->128-bit long multiply. + * + * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar + * version. + * + * @param lhs , rhs The 64-bit integers to be multiplied + * @return The 128-bit result represented in an @ref XXH128_hash_t. + */ +static XXH128_hash_t +XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) +{ + /* + * GCC/Clang __uint128_t method. + * + * On most 64-bit targets, GCC and Clang define a __uint128_t type. + * This is usually the best way as it usually uses a native long 64-bit + * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. + * + * Usually. + * + * Despite being a 32-bit platform, Clang (and emscripten) define this type + * despite not having the arithmetic for it. This results in a laggy + * compiler builtin call which calculates a full 128-bit multiply. + * In that case it is best to use the portable one. + * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 + */ +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \ + && defined(__SIZEOF_INT128__) \ + || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + + __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; + XXH128_hash_t r128; + r128.low64 = (xxh_u64)(product); + r128.high64 = (xxh_u64)(product >> 64); + return r128; + + /* + * MSVC for x64's _umul128 method. + * + * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); + * + * This compiles to single operand MUL on x64. + */ +#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC) + +#ifndef _MSC_VER +# pragma intrinsic(_umul128) +#endif + xxh_u64 product_high; + xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); + XXH128_hash_t r128; + r128.low64 = product_low; + r128.high64 = product_high; + return r128; + + /* + * MSVC for ARM64's __umulh method. + * + * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method. + */ +#elif defined(_M_ARM64) || defined(_M_ARM64EC) + +#ifndef _MSC_VER +# pragma intrinsic(__umulh) +#endif + XXH128_hash_t r128; + r128.low64 = lhs * rhs; + r128.high64 = __umulh(lhs, rhs); + return r128; + +#else + /* + * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. + * + * This is a fast and simple grade school multiply, which is shown below + * with base 10 arithmetic instead of base 0x100000000. + * + * 9 3 // D2 lhs = 93 + * x 7 5 // D2 rhs = 75 + * ---------- + * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15 + * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45 + * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21 + * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63 + * --------- + * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27 + * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67 + * --------- + * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975 + * + * The reasons for adding the products like this are: + * 1. It avoids manual carry tracking. Just like how + * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX. + * This avoids a lot of complexity. + * + * 2. It hints for, and on Clang, compiles to, the powerful UMAAL + * instruction available in ARM's Digital Signal Processing extension + * in 32-bit ARMv6 and later, which is shown below: + * + * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) + * { + * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; + * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); + * *RdHi = (xxh_u32)(product >> 32); + * } + * + * This instruction was designed for efficient long multiplication, and + * allows this to be calculated in only 4 instructions at speeds + * comparable to some 64-bit ALUs. + * + * 3. It isn't terrible on other platforms. Usually this will be a couple + * of 32-bit ADD/ADCs. + */ + + /* First calculate all of the cross products. */ + xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); + xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); + xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); + xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); + + /* Now add the products together. These will never overflow. */ + xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; + xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; + xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); + + XXH128_hash_t r128; + r128.low64 = lower; + r128.high64 = upper; + return r128; +#endif +} + +/*! + * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it. + * + * The reason for the separate function is to prevent passing too many structs + * around by value. This will hopefully inline the multiply, but we don't force it. + * + * @param lhs , rhs The 64-bit integers to multiply + * @return The low 64 bits of the product XOR'd by the high 64 bits. + * @see XXH_mult64to128() + */ +static xxh_u64 +XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) +{ + XXH128_hash_t product = XXH_mult64to128(lhs, rhs); + return product.low64 ^ product.high64; +} + +/*! Seems to produce slightly better code on GCC for some reason. */ +XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) +{ + XXH_ASSERT(0 <= shift && shift < 64); + return v64 ^ (v64 >> shift); +} + +/* + * This is a fast avalanche stage, + * suitable when input bits are already partially mixed + */ +static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) +{ + h64 = XXH_xorshift64(h64, 37); + h64 *= PRIME_MX1; + h64 = XXH_xorshift64(h64, 32); + return h64; +} + +/* + * This is a stronger avalanche, + * inspired by Pelle Evensen's rrmxmx + * preferable when input has not been previously mixed + */ +static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) +{ + /* this mix is inspired by Pelle Evensen's rrmxmx */ + h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); + h64 *= PRIME_MX2; + h64 ^= (h64 >> 35) + len ; + h64 *= PRIME_MX2; + return XXH_xorshift64(h64, 28); +} + + +/* ========================================== + * Short keys + * ========================================== + * One of the shortcomings of XXH32 and XXH64 was that their performance was + * sub-optimal on short lengths. It used an iterative algorithm which strongly + * favored lengths that were a multiple of 4 or 8. + * + * Instead of iterating over individual inputs, we use a set of single shot + * functions which piece together a range of lengths and operate in constant time. + * + * Additionally, the number of multiplies has been significantly reduced. This + * reduces latency, especially when emulating 64-bit multiplies on 32-bit. + * + * Depending on the platform, this may or may not be faster than XXH32, but it + * is almost guaranteed to be faster than XXH64. + */ + +/* + * At very short lengths, there isn't enough input to fully hide secrets, or use + * the entire secret. + * + * There is also only a limited amount of mixing we can do before significantly + * impacting performance. + * + * Therefore, we use different sections of the secret and always mix two secret + * samples with an XOR. This should have no effect on performance on the + * seedless or withSeed variants because everything _should_ be constant folded + * by modern compilers. + * + * The XOR mixing hides individual parts of the secret and increases entropy. + * + * This adds an extra layer of strength for custom secrets. + */ +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combined = { input[0], 0x01, input[0], input[0] } + * len = 2: combined = { input[1], 0x02, input[0], input[1] } + * len = 3: combined = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const keyed = (xxh_u64)combined ^ bitflip; + return XXH64_avalanche(keyed); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input1 = XXH_readLE32(input); + xxh_u32 const input2 = XXH_readLE32(input + len - 4); + xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed; + xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32); + xxh_u64 const keyed = input64 ^ bitflip; + return XXH3_rrmxmx(keyed, len); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed; + xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed; + xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; + xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2; + xxh_u64 const acc = len + + XXH_swap64(input_lo) + input_hi + + XXH3_mul128_fold64(input_lo, input_hi); + return XXH3_avalanche(acc); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed); + if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed); + if (len) return XXH3_len_1to3_64b(input, len, secret, seed); + return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64))); + } +} + +/* + * DISCLAIMER: There are known *seed-dependent* multicollisions here due to + * multiplication by zero, affecting hashes of lengths 17 to 240. + * + * However, they are very unlikely. + * + * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all + * unseeded non-cryptographic hashes, it does not attempt to defend itself + * against specially crafted inputs, only random inputs. + * + * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes + * cancelling out the secret is taken an arbitrary number of times (addressed + * in XXH3_accumulate_512), this collision is very unlikely with random inputs + * and/or proper seeding: + * + * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a + * function that is only called up to 16 times per hash with up to 240 bytes of + * input. + * + * This is not too bad for a non-cryptographic hash function, especially with + * only 64 bit outputs. + * + * The 128-bit variant (which trades some speed for strength) is NOT affected + * by this, although it is always a good idea to use a proper seed if you care + * about strength. + */ +XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) +{ +#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */ + /* + * UGLY HACK: + * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in + * slower code. + * + * By forcing seed64 into a register, we disrupt the cost model and + * cause it to scalarize. See `XXH32_round()` + * + * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600, + * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on + * GCC 9.2, despite both emitting scalar code. + * + * GCC generates much better scalar code than Clang for the rest of XXH3, + * which is why finding a more optimal codepath is an interest. + */ + XXH_COMPILER_GUARD(seed64); +#endif + { xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 const input_hi = XXH_readLE64(input+8); + return XXH3_mul128_fold64( + input_lo ^ (XXH_readLE64(secret) + seed64), + input_hi ^ (XXH_readLE64(secret+8) - seed64) + ); + } +} + +/* For mid range keys, XXH3 uses a Mum-hash variant. */ +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { xxh_u64 acc = len * XXH_PRIME64_1; +#if XXH_SIZE_OPT >= 1 + /* Smaller and cleaner, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + acc += XXH3_mix16B(input+16 * i, secret+32*i, seed); + acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed); + } while (i-- != 0); +#else + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc += XXH3_mix16B(input+48, secret+96, seed); + acc += XXH3_mix16B(input+len-64, secret+112, seed); + } + acc += XXH3_mix16B(input+32, secret+64, seed); + acc += XXH3_mix16B(input+len-48, secret+80, seed); + } + acc += XXH3_mix16B(input+16, secret+32, seed); + acc += XXH3_mix16B(input+len-32, secret+48, seed); + } + acc += XXH3_mix16B(input+0, secret+0, seed); + acc += XXH3_mix16B(input+len-16, secret+16, seed); +#endif + return XXH3_avalanche(acc); + } +} + +#define XXH3_MIDSIZE_MAX 240 + +XXH_NO_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + #define XXH3_MIDSIZE_STARTOFFSET 3 + #define XXH3_MIDSIZE_LASTOFFSET 17 + + { xxh_u64 acc = len * XXH_PRIME64_1; + xxh_u64 acc_end; + unsigned int const nbRounds = (unsigned int)len / 16; + unsigned int i; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + for (i=0; i<8; i++) { + acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); + } + /* last bytes */ + acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); + XXH_ASSERT(nbRounds >= 8); + acc = XXH3_avalanche(acc); +#if defined(__clang__) /* Clang */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86. + * In everywhere else, it uses scalar code. + * + * For 64->128-bit multiplies, even if the NEON was 100% optimal, it + * would still be slower than UMAAL (see XXH_mult64to128). + * + * Unfortunately, Clang doesn't handle the long multiplies properly and + * converts them to the nonexistent "vmulq_u64" intrinsic, which is then + * scalarized into an ugly mess of VMOV.32 instructions. + * + * This mess is difficult to avoid without turning autovectorization + * off completely, but they are usually relatively minor and/or not + * worth it to fix. + * + * This loop is the easiest to fix, as unlike XXH32, this pragma + * _actually works_ because it is a loop vectorization instead of an + * SLP vectorization. + */ + #pragma clang loop vectorize(disable) +#endif + for (i=8 ; i < nbRounds; i++) { + /* + * Prevents clang for unrolling the acc loop and interleaving with this one. + */ + XXH_COMPILER_GUARD(acc); + acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); + } + return XXH3_avalanche(acc + acc_end); + } +} + + +/* ======= Long Keys ======= */ + +#define XXH_STRIPE_LEN 64 +#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ +#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64)) + +#ifdef XXH_OLD_NAMES +# define STRIPE_LEN XXH_STRIPE_LEN +# define ACC_NB XXH_ACC_NB +#endif + +#ifndef XXH_PREFETCH_DIST +# ifdef __clang__ +# define XXH_PREFETCH_DIST 320 +# else +# if (XXH_VECTOR == XXH_AVX512) +# define XXH_PREFETCH_DIST 512 +# else +# define XXH_PREFETCH_DIST 384 +# endif +# endif /* __clang__ */ +#endif /* XXH_PREFETCH_DIST */ + +/* + * These macros are to generate an XXH3_accumulate() function. + * The two arguments select the name suffix and target attribute. + * + * The name of this symbol is XXH3_accumulate_() and it calls + * XXH3_accumulate_512_(). + * + * It may be useful to hand implement this function if the compiler fails to + * optimize the inline function. + */ +#define XXH3_ACCUMULATE_TEMPLATE(name) \ +void \ +XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \ + const xxh_u8* XXH_RESTRICT input, \ + const xxh_u8* XXH_RESTRICT secret, \ + size_t nbStripes) \ +{ \ + size_t n; \ + for (n = 0; n < nbStripes; n++ ) { \ + const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \ + XXH_PREFETCH(in + XXH_PREFETCH_DIST); \ + XXH3_accumulate_512_##name( \ + acc, \ + in, \ + secret + n*XXH_SECRET_CONSUME_RATE); \ + } \ +} + + +XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) +{ + if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); + XXH_memcpy(dst, &v64, sizeof(v64)); +} + +/* Several intrinsic functions below are supposed to accept __int64 as argument, + * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . + * However, several environments do not define __int64 type, + * requiring a workaround. + */ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) + typedef int64_t xxh_i64; +#else + /* the following type must have a width of 64-bit */ + typedef long long xxh_i64; +#endif + + +/* + * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized. + * + * It is a hardened version of UMAC, based off of FARSH's implementation. + * + * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD + * implementations, and it is ridiculously fast. + * + * We harden it by mixing the original input to the accumulators as well as the product. + * + * This means that in the (relatively likely) case of a multiply by zero, the + * original input is preserved. + * + * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve + * cross-pollination, as otherwise the upper and lower halves would be + * essentially independent. + * + * This doesn't matter on 64-bit hashes since they all get merged together in + * the end, so we skip the extra step. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +#if (XXH_VECTOR == XXH_AVX512) \ + || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0) + +#ifndef XXH_TARGET_AVX512 +# define XXH_TARGET_AVX512 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + __m512i* const xacc = (__m512i *) acc; + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + + { + /* data_vec = input[0]; */ + __m512i const data_vec = _mm512_loadu_si512 (input); + /* key_vec = secret[0]; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + /* data_key = data_vec ^ key_vec; */ + __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo); + /* xacc[0] += swap(data_vec); */ + __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2)); + __m512i const sum = _mm512_add_epi64(*xacc, data_swap); + /* xacc[0] += product; */ + *xacc = _mm512_add_epi64(product, sum); + } +} +XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512) + +/* + * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. + * + * Multiplication isn't perfect, as explained by Google in HighwayHash: + * + * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to + * // varying degrees. In descending order of goodness, bytes + * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. + * // As expected, the upper and lower bytes are much worse. + * + * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 + * + * Since our algorithm uses a pseudorandom secret to add some variance into the + * mix, we don't need to (or want to) mix as often or as much as HighwayHash does. + * + * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid + * extraction. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + { __m512i* const xacc = (__m512i*) acc; + const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); + + /* xacc[0] ^= (xacc[0] >> 47) */ + __m512i const acc_vec = *xacc; + __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47); + /* xacc[0] ^= secret; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */); + + /* xacc[0] *= XXH_PRIME32_1; */ + __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32); + __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32); + __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32); + *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64); + XXH_ASSERT(((size_t)customSecret & 63) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); + __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64); + __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos); + + const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret); + __m512i* const dest = ( __m512i*) customSecret; + int i; + XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 63) == 0); + for (i=0; i < nbRounds; ++i) { + dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_AVX2) \ + || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0) + +#ifndef XXH_TARGET_AVX2 +# define XXH_TARGET_AVX2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { __m256i* const xacc = (__m256i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xinput = (const __m256i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* data_vec = xinput[i]; */ + __m256i const data_vec = _mm256_loadu_si256 (xinput+i); + /* key_vec = xsecret[i]; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); + __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm256_add_epi64(product, sum); + } } +} +XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2) + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { __m256i* const xacc = (__m256i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m256i const acc_vec = xacc[i]; + __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); + __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); + /* xacc[i] ^= xsecret; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32); + __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); + __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0); + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); + (void)(&XXH_writeLE64); + XXH_PREFETCH(customSecret); + { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64); + + const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret); + __m256i* dest = ( __m256i*) customSecret; + +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dest); +# endif + XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 31) == 0); + + /* GCC -O2 need unroll loop manually */ + dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed); + dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed); + dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed); + dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed); + dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed); + dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed); + } +} + +#endif + +/* x86dispatch always generates SSE2 */ +#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) + +#ifndef XXH_TARGET_SSE2 +# define XXH_TARGET_SSE2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* SSE2 is just a half-scale version of the AVX2 version. */ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { __m128i* const xacc = (__m128i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xinput = (const __m128i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* data_vec = xinput[i]; */ + __m128i const data_vec = _mm_loadu_si128 (xinput+i); + /* key_vec = xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m128i const product = _mm_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); + __m128i const sum = _mm_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm_add_epi64(product, sum); + } } +} +XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2) + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { __m128i* const xacc = (__m128i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m128i const acc_vec = xacc[i]; + __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); + __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); + /* xacc[i] ^= xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); + __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); + +# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 + /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */ + XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) }; + __m128i const seed = _mm_load_si128((__m128i const*)seed64x2); +# else + __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64); +# endif + int i; + + const void* const src16 = XXH3_kSecret; + __m128i* dst16 = (__m128i*) customSecret; +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dst16); +# endif + XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dst16 & 15) == 0); + + for (i=0; i < nbRounds; ++i) { + dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_NEON) + +/* forward declarations for the scalar routines */ +XXH_FORCE_INLINE void +XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input, + void const* XXH_RESTRICT secret, size_t lane); + +XXH_FORCE_INLINE void +XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT secret, size_t lane); + +/*! + * @internal + * @brief The bulk processing loop for NEON and WASM SIMD128. + * + * The NEON code path is actually partially scalar when running on AArch64. This + * is to optimize the pipelining and can have up to 15% speedup depending on the + * CPU, and it also mitigates some GCC codegen issues. + * + * @see XXH3_NEON_LANES for configuring this and details about this optimization. + * + * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit + * integers instead of the other platforms which mask full 64-bit vectors, + * so the setup is more complicated than just shifting right. + * + * Additionally, there is an optimization for 4 lanes at once noted below. + * + * Since, as stated, the most optimal amount of lanes for Cortexes is 6, + * there needs to be *three* versions of the accumulate operation used + * for the remaining 2 lanes. + * + * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap + * nearly perfectly. + */ + +XXH_FORCE_INLINE void +XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0); + { /* GCC for darwin arm64 does not like aliasing here */ + xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc; + /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ + uint8_t const* xinput = (const uint8_t *) input; + uint8_t const* xsecret = (const uint8_t *) secret; + + size_t i; +#ifdef __wasm_simd128__ + /* + * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret + * is constant propagated, which results in it converting it to this + * inside the loop: + * + * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0) + * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0) + * ... + * + * This requires a full 32-bit address immediate (and therefore a 6 byte + * instruction) as well as an add for each offset. + * + * Putting an asm guard prevents it from folding (at the cost of losing + * the alignment hint), and uses the free offset in `v128.load` instead + * of adding secret_offset each time which overall reduces code size by + * about a kilobyte and improves performance. + */ + XXH_COMPILER_GUARD(xsecret); +#endif + /* Scalar lanes use the normal scalarRound routine */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } + i = 0; + /* 4 NEON lanes at a time. */ + for (; i+1 < XXH3_NEON_LANES / 2; i+=2) { + /* data_vec = xinput[i]; */ + uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16)); + uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16)); + /* data_swap = swap(data_vec) */ + uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1); + uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1); + uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2); + + /* + * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a + * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to + * get one vector with the low 32 bits of each lane, and one vector + * with the high 32 bits of each lane. + * + * The intrinsic returns a double vector because the original ARMv7-a + * instruction modified both arguments in place. AArch64 and SIMD128 emit + * two instructions from this intrinsic. + * + * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ] + * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ] + */ + uint32x4x2_t unzipped = vuzpq_u32( + vreinterpretq_u32_u64(data_key_1), + vreinterpretq_u32_u64(data_key_2) + ); + /* data_key_lo = data_key & 0xFFFFFFFF */ + uint32x4_t data_key_lo = unzipped.val[0]; + /* data_key_hi = data_key >> 32 */ + uint32x4_t data_key_hi = unzipped.val[1]; + /* + * Then, we can split the vectors horizontally and multiply which, as for most + * widening intrinsics, have a variant that works on both high half vectors + * for free on AArch64. A similar instruction is available on SIMD128. + * + * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi + */ + uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi); + uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi); + /* + * Clang reorders + * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s + * c += a; // add acc.2d, acc.2d, swap.2d + * to + * c += a; // add acc.2d, acc.2d, swap.2d + * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s + * + * While it would make sense in theory since the addition is faster, + * for reasons likely related to umlal being limited to certain NEON + * pipelines, this is worse. A compiler guard fixes this. + */ + XXH_COMPILER_GUARD_CLANG_NEON(sum_1); + XXH_COMPILER_GUARD_CLANG_NEON(sum_2); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64(xacc[i], sum_1); + xacc[i+1] = vaddq_u64(xacc[i+1], sum_2); + } + /* Operate on the remaining NEON lanes 2 at a time. */ + for (; i < XXH3_NEON_LANES / 2; i++) { + /* data_vec = xinput[i]; */ + uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + /* acc_vec_2 = swap(data_vec) */ + uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* For two lanes, just use VMOVN and VSHRN. */ + /* data_key_lo = data_key & 0xFFFFFFFF; */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* data_key_hi = data_key >> 32; */ + uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32); + /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */ + uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi); + /* Same Clang workaround as before */ + XXH_COMPILER_GUARD_CLANG_NEON(sum); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64 (xacc[i], sum); + } + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon) + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc; + uint8_t const* xsecret = (uint8_t const*) secret; + + size_t i; + /* WASM uses operator overloads and doesn't need these. */ +#ifndef __wasm_simd128__ + /* { prime32_1, prime32_1 } */ + uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1); + /* { 0, prime32_1, 0, prime32_1 } */ + uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32)); +#endif + + /* AArch64 uses both scalar and neon at the same time */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } + for (i=0; i < XXH3_NEON_LANES / 2; i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + uint64x2_t acc_vec = xacc[i]; + uint64x2_t shifted = vshrq_n_u64(acc_vec, 47); + uint64x2_t data_vec = veorq_u64(acc_vec, shifted); + + /* xacc[i] ^= xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* xacc[i] *= XXH_PRIME32_1 */ +#ifdef __wasm_simd128__ + /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */ + xacc[i] = data_key * XXH_PRIME32_1; +#else + /* + * Expanded version with portable NEON intrinsics + * + * lo(x) * lo(y) + (hi(x) * lo(y) << 32) + * + * prod_hi = hi(data_key) * lo(prime) << 32 + * + * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector + * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits + * and avoid the shift. + */ + uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi); + /* Extract low bits for vmlal_u32 */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */ + xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo); +#endif + } + } +} +#endif + +#if (XXH_VECTOR == XXH_VSX) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* presumed aligned */ + xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; + xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */ + xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */ + xxh_u64x2 const v32 = { 32, 32 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* data_vec = xinput[i]; */ + xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i); + /* key_vec = xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + /* shuffled = (data_key << 32) | (data_key >> 32); */ + xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); + /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ + xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); + /* acc_vec = xacc[i]; */ + xxh_u64x2 acc_vec = xacc[i]; + acc_vec += product; + + /* swap high and low halves */ +#ifdef __s390x__ + acc_vec += vec_permi(data_vec, data_vec, 2); +#else + acc_vec += vec_xxpermdi(data_vec, data_vec, 2); +#endif + xacc[i] = acc_vec; + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx) + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; + const xxh_u8* const xsecret = (const xxh_u8*) secret; + /* constants */ + xxh_u64x2 const v32 = { 32, 32 }; + xxh_u64x2 const v47 = { 47, 47 }; + xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + xxh_u64x2 const acc_vec = xacc[i]; + xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); + + /* xacc[i] ^= xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + + /* xacc[i] *= XXH_PRIME32_1 */ + /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */ + xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime); + /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */ + xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime); + xacc[i] = prod_odd + (prod_even << v32); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_SVE) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_sve( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc); + ACCRND(vacc, 0); + svst1_u64(mask, xacc, vacc); + } else if (element_count == 2) { /* sve128 */ + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + } +} + +XXH_FORCE_INLINE void +XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, + size_t nbStripes) +{ + if (nbStripes != 0) { + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc + 0); + do { + /* svprfd(svbool_t, void *, enum svfprop); */ + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(vacc, 0); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, vacc); + } else if (element_count == 2) { /* sve128 */ + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + do { + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + do { + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + } + } +} + +#endif + +/* scalar variants - universal */ + +#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) +/* + * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they + * emit an excess mask and a full 64-bit multiply-add (MADD X-form). + * + * While this might not seem like much, as AArch64 is a 64-bit architecture, only + * big Cortex designs have a full 64-bit multiplier. + * + * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit + * multiplies expand to 2-3 multiplies in microcode. This has a major penalty + * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline. + * + * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does + * not have this penalty and does the mask automatically. + */ +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) +{ + xxh_u64 ret; + /* note: %x = 64-bit register, %w = 32-bit register */ + __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc)); + return ret; +} +#else +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) +{ + return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc; +} +#endif + +/*! + * @internal + * @brief Scalar round for @ref XXH3_accumulate_512_scalar(). + * + * This is extracted to its own function because the NEON path uses a combination + * of NEON and scalar. + */ +XXH_FORCE_INLINE void +XXH3_scalarRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT input, + void const* XXH_RESTRICT secret, + size_t lane) +{ + xxh_u64* xacc = (xxh_u64*) acc; + xxh_u8 const* xinput = (xxh_u8 const*) input; + xxh_u8 const* xsecret = (xxh_u8 const*) secret; + XXH_ASSERT(lane < XXH_ACC_NB); + XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); + { + xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8); + xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8); + xacc[lane ^ 1] += data_val; /* swap adjacent lanes */ + xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]); + } +} + +/*! + * @internal + * @brief Processes a 64 byte block of data using the scalar path. + */ +XXH_FORCE_INLINE void +XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + size_t i; + /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */ +#if defined(__GNUC__) && !defined(__clang__) \ + && (defined(__arm__) || defined(__thumb2__)) \ + && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \ + && XXH_SIZE_OPT <= 0 +# pragma GCC unroll 8 +#endif + for (i=0; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar) + +/*! + * @internal + * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar(). + * + * This is extracted to its own function because the NEON path uses a combination + * of NEON and scalar. + */ +XXH_FORCE_INLINE void +XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT secret, + size_t lane) +{ + xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ + const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ + XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); + XXH_ASSERT(lane < XXH_ACC_NB); + { + xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8); + xxh_u64 acc64 = xacc[lane]; + acc64 = XXH_xorshift64(acc64, 47); + acc64 ^= key64; + acc64 *= XXH_PRIME32_1; + xacc[lane] = acc64; + } +} + +/*! + * @internal + * @brief Scrambles the accumulators after a large chunk has been read + */ +XXH_FORCE_INLINE void +XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + size_t i; + for (i=0; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } +} + +XXH_FORCE_INLINE void +XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + /* + * We need a separate pointer for the hack below, + * which requires a non-const pointer. + * Any decent compiler will optimize this out otherwise. + */ + const xxh_u8* kSecretPtr = XXH3_kSecret; + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + +#if defined(__GNUC__) && defined(__aarch64__) + /* + * UGLY HACK: + * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are + * placed sequentially, in order, at the top of the unrolled loop. + * + * While MOVK is great for generating constants (2 cycles for a 64-bit + * constant compared to 4 cycles for LDR), it fights for bandwidth with + * the arithmetic instructions. + * + * I L S + * MOVK + * MOVK + * MOVK + * MOVK + * ADD + * SUB STR + * STR + * By forcing loads from memory (as the asm line causes the compiler to assume + * that XXH3_kSecretPtr has been changed), the pipelines are used more + * efficiently: + * I L S + * LDR + * ADD LDR + * SUB STR + * STR + * + * See XXH3_NEON_LANES for details on the pipsline. + * + * XXH3_64bits_withSeed, len == 256, Snapdragon 835 + * without hack: 2654.4 MB/s + * with hack: 3202.9 MB/s + */ + XXH_COMPILER_GUARD(kSecretPtr); +#endif + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; + int i; + for (i=0; i < nbRounds; i++) { + /* + * The asm hack causes the compiler to assume that kSecretPtr aliases with + * customSecret, and on aarch64, this prevented LDP from merging two + * loads together for free. Putting the loads together before the stores + * properly generates LDP. + */ + xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64; + xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64; + XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo); + XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi); + } } +} + + +typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t); +typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*); +typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); + + +#if (XXH_VECTOR == XXH_AVX512) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx512 +#define XXH3_accumulate XXH3_accumulate_avx512 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 + +#elif (XXH_VECTOR == XXH_AVX2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx2 +#define XXH3_accumulate XXH3_accumulate_avx2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 + +#elif (XXH_VECTOR == XXH_SSE2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_sse2 +#define XXH3_accumulate XXH3_accumulate_sse2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 + +#elif (XXH_VECTOR == XXH_NEON) + +#define XXH3_accumulate_512 XXH3_accumulate_512_neon +#define XXH3_accumulate XXH3_accumulate_neon +#define XXH3_scrambleAcc XXH3_scrambleAcc_neon +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#elif (XXH_VECTOR == XXH_VSX) + +#define XXH3_accumulate_512 XXH3_accumulate_512_vsx +#define XXH3_accumulate XXH3_accumulate_vsx +#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#elif (XXH_VECTOR == XXH_SVE) +#define XXH3_accumulate_512 XXH3_accumulate_512_sve +#define XXH3_accumulate XXH3_accumulate_sve +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#else /* scalar */ + +#define XXH3_accumulate_512 XXH3_accumulate_512_scalar +#define XXH3_accumulate XXH3_accumulate_scalar +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#endif + +#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */ +# undef XXH3_initCustomSecret +# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar +#endif + +XXH_FORCE_INLINE void +XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; + size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock; + size_t const nb_blocks = (len - 1) / block_len; + + size_t n; + + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + + for (n = 0; n < nb_blocks; n++) { + f_acc(acc, input + n*block_len, secret, nbStripesPerBlock); + f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); + } + + /* last partial block */ + XXH_ASSERT(len > XXH_STRIPE_LEN); + { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; + XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); + f_acc(acc, input + nb_blocks*block_len, secret, nbStripes); + + /* last stripe */ + { const xxh_u8* const p = input + len - XXH_STRIPE_LEN; +#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */ + XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); + } } +} + +XXH_FORCE_INLINE xxh_u64 +XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) +{ + return XXH3_mul128_fold64( + acc[0] ^ XXH_readLE64(secret), + acc[1] ^ XXH_readLE64(secret+8) ); +} + +static XXH64_hash_t +XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) +{ + xxh_u64 result64 = start; + size_t i = 0; + + for (i = 0; i < 4; i++) { + result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i); +#if defined(__clang__) /* Clang */ \ + && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Prevent autovectorization on Clang ARMv7-a. Exact same problem as + * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b. + * XXH3_64bits, len == 256, Snapdragon 835: + * without hack: 2063.7 MB/s + * with hack: 2560.7 MB/s + */ + XXH_COMPILER_GUARD(result64); +#endif + } + + return XXH3_avalanche(result64); +} + +#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \ + XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 } + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, + const void* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + /* do not align on 8, so that the secret is different from the accumulator */ +#define XXH_SECRET_MERGEACCS_START 11 + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1); +} + +/* + * It's important for performance to transmit secret's size (when it's static) + * so that the compiler can properly optimize the vectorized loop. + * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set. + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE + * breaks -Og, this is XXH_NO_INLINE. + */ +XXH3_WITH_SECRET_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * It's preferable for performance that XXH3_hashLong is not inlined, + * as it results in a smaller function for small data, easier to the instruction cache. + * Note that inside this no_inline function, we do inline the internal loop, + * and provide a statically defined secret size to allow optimization of vector loop. + */ +XXH_NO_INLINE XXH_PUREF XXH64_hash_t +XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * XXH3_hashLong_64b_withSeed(): + * Generate a custom key based on alteration of default XXH3_kSecret with the seed, + * and then use this key for long mode hashing. + * + * This operation is decently fast but nonetheless costs a little bit of time. + * Try to avoid it whenever possible (typically when seed==0). + * + * It's important for performance that XXH3_hashLong is not inlined. Not sure + * why (uop cache maybe?), but the difference is large and easily measurable. + */ +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, + XXH64_hash_t seed, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ +#if XXH_SIZE_OPT <= 0 + if (seed == 0) + return XXH3_hashLong_64b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc, f_scramble); +#endif + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed); + return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), + f_acc, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_64b_withSeed_internal(input, len, seed, + XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + + +typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong64_f f_hashLong) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secretLen` condition is not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + * Also, note that function signature doesn't offer room to return an error. + */ + if (len <= 16) + return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen); +} + + +/* === Public entry point === */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length) +{ + return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed) +{ + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); +} + +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (length <= XXH3_MIDSIZE_MAX) + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize); +} + + +/* === XXH3 streaming === */ +#ifndef XXH_NO_STREAM +/* + * Malloc's a pointer that is always aligned to align. + * + * This must be freed with `XXH_alignedFree()`. + * + * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte + * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2 + * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON. + * + * This underalignment previously caused a rather obvious crash which went + * completely unnoticed due to XXH3_createState() not actually being tested. + * Credit to RedSpah for noticing this bug. + * + * The alignment is done manually: Functions like posix_memalign or _mm_malloc + * are avoided: To maintain portability, we would have to write a fallback + * like this anyways, and besides, testing for the existence of library + * functions without relying on external build tools is impossible. + * + * The method is simple: Overallocate, manually align, and store the offset + * to the original behind the returned pointer. + * + * Align must be a power of 2 and 8 <= align <= 128. + */ +static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align) +{ + XXH_ASSERT(align <= 128 && align >= 8); /* range check */ + XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */ + XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */ + { /* Overallocate to make room for manual realignment and an offset byte */ + xxh_u8* base = (xxh_u8*)XXH_malloc(s + align); + if (base != NULL) { + /* + * Get the offset needed to align this pointer. + * + * Even if the returned pointer is aligned, there will always be + * at least one byte to store the offset to the original pointer. + */ + size_t offset = align - ((size_t)base & (align - 1)); /* base % align */ + /* Add the offset for the now-aligned pointer */ + xxh_u8* ptr = base + offset; + + XXH_ASSERT((size_t)ptr % align == 0); + + /* Store the offset immediately before the returned pointer. */ + ptr[-1] = (xxh_u8)offset; + return ptr; + } + return NULL; + } +} +/* + * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass + * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout. + */ +static void XXH_alignedFree(void* p) +{ + if (p != NULL) { + xxh_u8* ptr = (xxh_u8*)p; + /* Get the offset byte we added in XXH_malloc. */ + xxh_u8 offset = ptr[-1]; + /* Free the original malloc'd pointer */ + xxh_u8* base = ptr - offset; + XXH_free(base); + } +} +/*! @ingroup XXH3_family */ +/*! + * @brief Allocate an @ref XXH3_state_t. + * + * Must be freed with XXH3_freeState(). + * @return An allocated XXH3_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) +{ + XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); + if (state==NULL) return NULL; + XXH3_INITSTATE(state); + return state; +} + +/*! @ingroup XXH3_family */ +/*! + * @brief Frees an @ref XXH3_state_t. + * + * Must be allocated with XXH3_createState(). + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) +{ + XXH_alignedFree(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void +XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state) +{ + XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); +} + +static void +XXH3_reset_internal(XXH3_state_t* statePtr, + XXH64_hash_t seed, + const void* secret, size_t secretSize) +{ + size_t const initStart = offsetof(XXH3_state_t, bufferedSize); + size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; + XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart); + XXH_ASSERT(statePtr != NULL); + /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */ + memset((char*)statePtr + initStart, 0, initLength); + statePtr->acc[0] = XXH_PRIME32_3; + statePtr->acc[1] = XXH_PRIME64_1; + statePtr->acc[2] = XXH_PRIME64_2; + statePtr->acc[3] = XXH_PRIME64_3; + statePtr->acc[4] = XXH_PRIME64_4; + statePtr->acc[5] = XXH_PRIME32_2; + statePtr->acc[6] = XXH_PRIME64_5; + statePtr->acc[7] = XXH_PRIME32_1; + statePtr->seed = seed; + statePtr->useSeed = (seed != 0); + statePtr->extSecret = (const unsigned char*)secret; + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; + statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, secret, secretSize); + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + if (statePtr == NULL) return XXH_ERROR; + if (seed==0) return XXH3_64bits_reset(statePtr); + if ((seed != statePtr->seed) || (statePtr->extSecret != NULL)) + XXH3_initCustomSecret(statePtr->customSecret, seed); + XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64) +{ + if (statePtr == NULL) return XXH_ERROR; + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + XXH3_reset_internal(statePtr, seed64, secret, secretSize); + statePtr->useSeed = 1; /* always, even if seed64==0 */ + return XXH_OK; +} + +/*! + * @internal + * @brief Processes a large input for XXH3_update() and XXH3_digest_long(). + * + * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block. + * + * @param acc Pointer to the 8 accumulator lanes + * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block* + * @param nbStripesPerBlock Number of stripes in a block + * @param input Input pointer + * @param nbStripes Number of stripes to process + * @param secret Secret pointer + * @param secretLimit Offset of the last block in @p secret + * @param f_acc Pointer to an XXH3_accumulate implementation + * @param f_scramble Pointer to an XXH3_scrambleAcc implementation + * @return Pointer past the end of @p input after processing + */ +XXH_FORCE_INLINE const xxh_u8 * +XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, + size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, + const xxh_u8* XXH_RESTRICT input, size_t nbStripes, + const xxh_u8* XXH_RESTRICT secret, size_t secretLimit, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE; + /* Process full blocks */ + if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) { + /* Process the initial partial block... */ + size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr; + + do { + /* Accumulate and scramble */ + f_acc(acc, input, initialSecret, nbStripesThisIter); + f_scramble(acc, secret + secretLimit); + input += nbStripesThisIter * XXH_STRIPE_LEN; + nbStripes -= nbStripesThisIter; + /* Then continue the loop with the full block size */ + nbStripesThisIter = nbStripesPerBlock; + initialSecret = secret; + } while (nbStripes >= nbStripesPerBlock); + *nbStripesSoFarPtr = 0; + } + /* Process a partial block */ + if (nbStripes > 0) { + f_acc(acc, input, initialSecret, nbStripes); + input += nbStripes * XXH_STRIPE_LEN; + *nbStripesSoFarPtr += nbStripes; + } + /* Return end pointer */ + return input; +} + +#ifndef XXH3_STREAM_USE_STACK +# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */ +# define XXH3_STREAM_USE_STACK 1 +# endif +#endif +/* + * Both XXH3_64bits_update and XXH3_128bits_update use this routine. + */ +XXH_FORCE_INLINE XXH_errorcode +XXH3_update(XXH3_state_t* XXH_RESTRICT const state, + const xxh_u8* XXH_RESTRICT input, size_t len, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + XXH_ASSERT(state != NULL); + { const xxh_u8* const bEnd = input + len; + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* For some reason, gcc and MSVC seem to suffer greatly + * when operating accumulators directly into state. + * Operating into stack space seems to enable proper optimization. + * clang, on the other hand, doesn't seem to need this trick */ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; + XXH_memcpy(acc, state->acc, sizeof(acc)); +#else + xxh_u64* XXH_RESTRICT const acc = state->acc; +#endif + state->totalLen += len; + XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); + + /* small input : just fill in tmp buffer */ + if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) { + XXH_memcpy(state->buffer + state->bufferedSize, input, len); + state->bufferedSize += (XXH32_hash_t)len; + return XXH_OK; + } + + /* total input is now > XXH3_INTERNALBUFFER_SIZE */ + #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) + XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ + + /* + * Internal buffer is partially filled (always, except at beginning) + * Complete it, then consume it. + */ + if (state->bufferedSize) { + size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; + XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); + input += loadSize; + XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, XXH3_INTERNALBUFFER_STRIPES, + secret, state->secretLimit, + f_acc, f_scramble); + state->bufferedSize = 0; + } + XXH_ASSERT(input < bEnd); + if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { + size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; + input = XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + input, nbStripes, + secret, state->secretLimit, + f_acc, f_scramble); + XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + + } + /* Some remaining input (always) : buffer it */ + XXH_ASSERT(input < bEnd); + XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); + XXH_ASSERT(state->bufferedSize == 0); + XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); + state->bufferedSize = (XXH32_hash_t)(bEnd-input); +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* save stack accumulators into state */ + XXH_memcpy(state->acc, acc, sizeof(acc)); +#endif + } + + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_update(state, (const xxh_u8*)input, len, + XXH3_accumulate, XXH3_scrambleAcc); +} + + +XXH_FORCE_INLINE void +XXH3_digest_long (XXH64_hash_t* acc, + const XXH3_state_t* state, + const unsigned char* secret) +{ + xxh_u8 lastStripe[XXH_STRIPE_LEN]; + const xxh_u8* lastStripePtr; + + /* + * Digest on a local copy. This way, the state remains unaltered, and it can + * continue ingesting more input afterwards. + */ + XXH_memcpy(acc, state->acc, sizeof(state->acc)); + if (state->bufferedSize >= XXH_STRIPE_LEN) { + /* Consume remaining stripes then point to remaining data in buffer */ + size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; + size_t nbStripesSoFar = state->nbStripesSoFar; + XXH3_consumeStripes(acc, + &nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, nbStripes, + secret, state->secretLimit, + XXH3_accumulate, XXH3_scrambleAcc); + lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN; + } else { /* bufferedSize < XXH_STRIPE_LEN */ + /* Copy to temp buffer */ + size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; + XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ + XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); + XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); + lastStripePtr = lastStripe; + } + /* Last stripe */ + XXH3_accumulate_512(acc, + lastStripePtr, + secret + state->secretLimit - XXH_SECRET_LASTACC_START); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + return XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + } + /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ + if (state->useSeed) + return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} +#endif /* !XXH_NO_STREAM */ + + +/* ========================================== + * XXH3 128 bits (a.k.a XXH128) + * ========================================== + * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant, + * even without counting the significantly larger output size. + * + * For example, extra steps are taken to avoid the seed-dependent collisions + * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B). + * + * This strength naturally comes at the cost of some speed, especially on short + * lengths. Note that longer hashes are about as fast as the 64-bit version + * due to it using only a slight modification of the 64-bit loop. + * + * XXH128 is also more oriented towards 64-bit machines. It is still extremely + * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). + */ + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + /* A doubled version of 1to3_64b with different constants. */ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combinedl = { input[0], 0x01, input[0], input[0] } + * len = 2: combinedl = { input[1], 0x02, input[0], input[1] } + * len = 3: combinedl = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13); + xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed; + xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl; + xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph; + XXH128_hash_t h128; + h128.low64 = XXH64_avalanche(keyed_lo); + h128.high64 = XXH64_avalanche(keyed_hi); + return h128; + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input_lo = XXH_readLE32(input); + xxh_u32 const input_hi = XXH_readLE32(input + len - 4); + xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32); + xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed; + xxh_u64 const keyed = input_64 ^ bitflip; + + /* Shift len to the left to ensure it is even, this avoids even multiplies. */ + XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2)); + + m128.high64 += (m128.low64 << 1); + m128.low64 ^= (m128.high64 >> 3); + + m128.low64 = XXH_xorshift64(m128.low64, 35); + m128.low64 *= PRIME_MX2; + m128.low64 = XXH_xorshift64(m128.low64, 28); + m128.high64 = XXH3_avalanche(m128.high64); + return m128; + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed; + xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed; + xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 input_hi = XXH_readLE64(input + len - 8); + XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1); + /* + * Put len in the middle of m128 to ensure that the length gets mixed to + * both the low and high bits in the 128x64 multiply below. + */ + m128.low64 += (xxh_u64)(len - 1) << 54; + input_hi ^= bitfliph; + /* + * Add the high 32 bits of input_hi to the high 32 bits of m128, then + * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to + * the high 64 bits of m128. + * + * The best approach to this operation is different on 32-bit and 64-bit. + */ + if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */ + /* + * 32-bit optimized version, which is more readable. + * + * On 32-bit, it removes an ADC and delays a dependency between the two + * halves of m128.high64, but it generates an extra mask on 64-bit. + */ + m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2); + } else { + /* + * 64-bit optimized (albeit more confusing) version. + * + * Uses some properties of addition and multiplication to remove the mask: + * + * Let: + * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF) + * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000) + * c = XXH_PRIME32_2 + * + * a + (b * c) + * Inverse Property: x + y - x == y + * a + (b * (1 + c - 1)) + * Distributive Property: x * (y + z) == (x * y) + (x * z) + * a + (b * 1) + (b * (c - 1)) + * Identity Property: x * 1 == x + * a + b + (b * (c - 1)) + * + * Substitute a, b, and c: + * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + * + * Since input_hi.hi + input_hi.lo == input_hi, we get this: + * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + */ + m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1); + } + /* m128 ^= XXH_swap64(m128 >> 64); */ + m128.low64 ^= XXH_swap64(m128.high64); + + { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */ + XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2); + h128.high64 += m128.high64 * XXH_PRIME64_2; + + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = XXH3_avalanche(h128.high64); + return h128; + } } +} + +/* + * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN + */ +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); + if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); + if (len) return XXH3_len_1to3_128b(input, len, secret, seed); + { XXH128_hash_t h128; + xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72); + xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88); + h128.low64 = XXH64_avalanche(seed ^ bitflipl); + h128.high64 = XXH64_avalanche( seed ^ bitfliph); + return h128; + } } +} + +/* + * A bit slower than XXH3_mix16B, but handles multiply by zero better. + */ +XXH_FORCE_INLINE XXH128_hash_t +XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, + const xxh_u8* secret, XXH64_hash_t seed) +{ + acc.low64 += XXH3_mix16B (input_1, secret+0, seed); + acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); + acc.high64 += XXH3_mix16B (input_2, secret+16, seed); + acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); + return acc; +} + + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { XXH128_hash_t acc; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + +#if XXH_SIZE_OPT >= 1 + { + /* Smaller, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed); + } while (i-- != 0); + } +#else + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); + } + acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); + } + acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); + } + acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); +#endif + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_NO_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + { XXH128_hash_t acc; + unsigned i; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + /* + * We set as `i` as offset + 32. We do this so that unchanged + * `len` can be used as upper bound. This reaches a sweet spot + * where both x86 and aarch64 get simple agen and good codegen + * for the loop. + */ + for (i = 32; i < 160; i += 32) { + acc = XXH128_mix32B(acc, + input + i - 32, + input + i - 16, + secret + i - 32, + seed); + } + acc.low64 = XXH3_avalanche(acc.low64); + acc.high64 = XXH3_avalanche(acc.high64); + /* + * NB: `i <= len` will duplicate the last 32-bytes if + * len % 32 was zero. This is an unfortunate necessity to keep + * the hash result stable. + */ + for (i=160; i <= len; i += 32) { + acc = XXH128_mix32B(acc, + input + i - 32, + input + i - 16, + secret + XXH3_MIDSIZE_STARTOFFSET + i - 160, + seed); + } + /* last bytes */ + acc = XXH128_mix32B(acc, + input + len - 16, + input + len - 32, + secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, + (XXH64_hash_t)0 - seed); + + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)len * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + secretSize + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)len * XXH_PRIME64_2)); + return h128; + } +} + +/* + * It's important for performance that XXH3_hashLong() is not inlined. + */ +XXH_NO_INLINE XXH_PUREF XXH128_hash_t +XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * It's important for performance to pass @p secretLen (when it's static) + * to the compiler, so that it can properly optimize the vectorized loop. + * + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE + * breaks -Og, this is XXH_NO_INLINE. + */ +XXH3_WITH_SECRET_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen, + XXH3_accumulate, XXH3_scrambleAcc); +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ + if (seed64 == 0) + return XXH3_hashLong_128b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc, f_scramble); + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed64); + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret), + f_acc, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, + XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + +typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const void* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_128bits_internal(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong128_f f_hl128) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secret` conditions are not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + */ + if (len <= 16) + return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hl128(input, len, seed64, secret, secretLen); +} + + +/* === Public XXH128 API === */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_128bits_internal(input, len, 0, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_default); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_128bits_internal(input, len, 0, + (const xxh_u8*)secret, secretSize, + XXH3_hashLong_128b_withSecret); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_internal(input, len, seed, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_withSeed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_withSeed(input, len, seed); +} + + +/* === XXH3 128-bit streaming === */ +#ifndef XXH_NO_STREAM +/* + * All initialization and update functions are identical to 64-bit streaming variant. + * The only difference is the finalization routine. + */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) +{ + return XXH3_64bits_reset(statePtr); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSeed(statePtr, seed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_64bits_update(state, input, len); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + state->secretLimit + XXH_STRIPE_LEN + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)state->totalLen * XXH_PRIME64_2)); + return h128; + } + } + /* len <= XXH3_MIDSIZE_MAX : short code */ + if (state->seed) + return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} +#endif /* !XXH_NO_STREAM */ +/* 128-bit utility functions */ + +#include /* memcmp, memcpy */ + +/* return : 1 is equal, 0 if different */ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) +{ + /* note : XXH128_hash_t is compact, it has no padding byte */ + return !(memcmp(&h1, &h2, sizeof(h1))); +} + +/* This prototype is compatible with stdlib's qsort(). + * @return : >0 if *h128_1 > *h128_2 + * <0 if *h128_1 < *h128_2 + * =0 if *h128_1 == *h128_2 */ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2) +{ + XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; + XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; + int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); + /* note : bets that, in most cases, hash values are different */ + if (hcmp) return hcmp; + return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); +} + + +/*====== Canonical representation ======*/ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void +XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) { + hash.high64 = XXH_swap64(hash.high64); + hash.low64 = XXH_swap64(hash.low64); + } + XXH_memcpy(dst, &hash.high64, sizeof(hash.high64)); + XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src) +{ + XXH128_hash_t h; + h.high64 = XXH_readBE64(src); + h.low64 = XXH_readBE64(src->digest + 8); + return h; +} + + + +/* ========================================== + * Secret generators + * ========================================== + */ +#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) + +XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128) +{ + XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 ); + XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 ); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize) +{ +#if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(secretBuffer != NULL); + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); +#else + /* production mode, assert() are disabled */ + if (secretBuffer == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; +#endif + + if (customSeedSize == 0) { + customSeed = XXH3_kSecret; + customSeedSize = XXH_SECRET_DEFAULT_SIZE; + } +#if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(customSeed != NULL); +#else + if (customSeed == NULL) return XXH_ERROR; +#endif + + /* Fill secretBuffer with a copy of customSeed - repeat as needed */ + { size_t pos = 0; + while (pos < secretSize) { + size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize); + memcpy((char*)secretBuffer + pos, customSeed, toCopy); + pos += toCopy; + } } + + { size_t const nbSeg16 = secretSize / 16; + size_t n; + XXH128_canonical_t scrambler; + XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); + for (n=0; n - * #include "xxhash.h" - * - * // Example for a function which hashes a null terminated string with XXH32(). - * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed) - * { - * // NULL pointers are only valid if the length is zero - * size_t length = (string == NULL) ? 0 : strlen(string); - * return XXH32(string, length, seed); - * } - * @endcode - * - * @anchor streaming_example - * **Streaming** - * - * These groups of functions allow incremental hashing of unknown size, even - * more than what would fit in a size_t. - * - * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset() - * - * @code{.c} - * #include - * #include - * #include "xxhash.h" - * // Example for a function which hashes a FILE incrementally with XXH3_64bits(). - * XXH64_hash_t hashFile(FILE* f) - * { - * // Allocate a state struct. Do not just use malloc() or new. - * XXH3_state_t* state = XXH3_createState(); - * assert(state != NULL && "Out of memory!"); - * // Reset the state to start a new hashing session. - * XXH3_64bits_reset(state); - * char buffer[4096]; - * size_t count; - * // Read the file in chunks - * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) { - * // Run update() as many times as necessary to process the data - * XXH3_64bits_update(state, buffer, count); - * } - * // Retrieve the finalized hash. This will not change the state. - * XXH64_hash_t result = XXH3_64bits_digest(state); - * // Free the state. Do not use free(). - * XXH3_freeState(state); - * return result; - * } - * @endcode - * - * @file xxhash.h - * xxHash prototypes and implementation - */ - -#if defined (__cplusplus) -extern "C" { -#endif - -/* **************************** - * INLINE mode - ******************************/ -/*! - * @defgroup public Public API - * Contains details on the public xxHash functions. - * @{ - */ -#ifdef XXH_DOXYGEN -/*! - * @brief Gives access to internal state declaration, required for static allocation. - * - * Incompatible with dynamic linking, due to risks of ABI changes. - * - * Usage: - * @code{.c} - * #define XXH_STATIC_LINKING_ONLY - * #include "xxhash.h" - * @endcode - */ -# define XXH_STATIC_LINKING_ONLY -/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */ - -/*! - * @brief Gives access to internal definitions. - * - * Usage: - * @code{.c} - * #define XXH_STATIC_LINKING_ONLY - * #define XXH_IMPLEMENTATION - * #include "xxhash.h" - * @endcode - */ -# define XXH_IMPLEMENTATION -/* Do not undef XXH_IMPLEMENTATION for Doxygen */ - -/*! - * @brief Exposes the implementation and marks all functions as `inline`. - * - * Use these build macros to inline xxhash into the target unit. - * Inlining improves performance on small inputs, especially when the length is - * expressed as a compile-time constant: - * - * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html - * - * It also keeps xxHash symbols private to the unit, so they are not exported. - * - * Usage: - * @code{.c} - * #define XXH_INLINE_ALL - * #include "xxhash.h" - * @endcode - * Do not compile and link xxhash.o as a separate object, as it is not useful. - */ -# define XXH_INLINE_ALL -# undef XXH_INLINE_ALL -/*! - * @brief Exposes the implementation without marking functions as inline. - */ -# define XXH_PRIVATE_API -# undef XXH_PRIVATE_API -/*! - * @brief Emulate a namespace by transparently prefixing all symbols. - * - * If you want to include _and expose_ xxHash functions from within your own - * library, but also want to avoid symbol collisions with other libraries which - * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix - * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE - * (therefore, avoid empty or numeric values). - * - * Note that no change is required within the calling program as long as it - * includes `xxhash.h`: Regular symbol names will be automatically translated - * by this header. - */ -# define XXH_NAMESPACE /* YOUR NAME HERE */ -# undef XXH_NAMESPACE -#endif - -#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ - && !defined(XXH_INLINE_ALL_31684351384) - /* this section should be traversed only once */ -# define XXH_INLINE_ALL_31684351384 - /* give access to the advanced API, required to compile implementations */ -# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ -# define XXH_STATIC_LINKING_ONLY - /* make all functions private */ -# undef XXH_PUBLIC_API -# if defined(__GNUC__) -# define XXH_PUBLIC_API static __inline __attribute__((unused)) -# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define XXH_PUBLIC_API static inline -# elif defined(_MSC_VER) -# define XXH_PUBLIC_API static __inline -# else - /* note: this version may generate warnings for unused static functions */ -# define XXH_PUBLIC_API static -# endif - - /* - * This part deals with the special case where a unit wants to inline xxHash, - * but "xxhash.h" has previously been included without XXH_INLINE_ALL, - * such as part of some previously included *.h header file. - * Without further action, the new include would just be ignored, - * and functions would effectively _not_ be inlined (silent failure). - * The following macros solve this situation by prefixing all inlined names, - * avoiding naming collision with previous inclusions. - */ - /* Before that, we unconditionally #undef all symbols, - * in case they were already defined with XXH_NAMESPACE. - * They will then be redefined for XXH_INLINE_ALL - */ -# undef XXH_versionNumber - /* XXH32 */ -# undef XXH32 -# undef XXH32_createState -# undef XXH32_freeState -# undef XXH32_reset -# undef XXH32_update -# undef XXH32_digest -# undef XXH32_copyState -# undef XXH32_canonicalFromHash -# undef XXH32_hashFromCanonical - /* XXH64 */ -# undef XXH64 -# undef XXH64_createState -# undef XXH64_freeState -# undef XXH64_reset -# undef XXH64_update -# undef XXH64_digest -# undef XXH64_copyState -# undef XXH64_canonicalFromHash -# undef XXH64_hashFromCanonical - /* XXH3_64bits */ -# undef XXH3_64bits -# undef XXH3_64bits_withSecret -# undef XXH3_64bits_withSeed -# undef XXH3_64bits_withSecretandSeed -# undef XXH3_createState -# undef XXH3_freeState -# undef XXH3_copyState -# undef XXH3_64bits_reset -# undef XXH3_64bits_reset_withSeed -# undef XXH3_64bits_reset_withSecret -# undef XXH3_64bits_update -# undef XXH3_64bits_digest -# undef XXH3_generateSecret - /* XXH3_128bits */ -# undef XXH128 -# undef XXH3_128bits -# undef XXH3_128bits_withSeed -# undef XXH3_128bits_withSecret -# undef XXH3_128bits_reset -# undef XXH3_128bits_reset_withSeed -# undef XXH3_128bits_reset_withSecret -# undef XXH3_128bits_reset_withSecretandSeed -# undef XXH3_128bits_update -# undef XXH3_128bits_digest -# undef XXH128_isEqual -# undef XXH128_cmp -# undef XXH128_canonicalFromHash -# undef XXH128_hashFromCanonical - /* Finally, free the namespace itself */ -# undef XXH_NAMESPACE - - /* employ the namespace for XXH_INLINE_ALL */ -# define XXH_NAMESPACE XXH_INLINE_ - /* - * Some identifiers (enums, type names) are not symbols, - * but they must nonetheless be renamed to avoid redeclaration. - * Alternative solution: do not redeclare them. - * However, this requires some #ifdefs, and has a more dispersed impact. - * Meanwhile, renaming can be achieved in a single place. - */ -# define XXH_IPREF(Id) XXH_NAMESPACE ## Id -# define XXH_OK XXH_IPREF(XXH_OK) -# define XXH_ERROR XXH_IPREF(XXH_ERROR) -# define XXH_errorcode XXH_IPREF(XXH_errorcode) -# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) -# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) -# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) -# define XXH32_state_s XXH_IPREF(XXH32_state_s) -# define XXH32_state_t XXH_IPREF(XXH32_state_t) -# define XXH64_state_s XXH_IPREF(XXH64_state_s) -# define XXH64_state_t XXH_IPREF(XXH64_state_t) -# define XXH3_state_s XXH_IPREF(XXH3_state_s) -# define XXH3_state_t XXH_IPREF(XXH3_state_t) -# define XXH128_hash_t XXH_IPREF(XXH128_hash_t) - /* Ensure the header is parsed again, even if it was previously included */ -# undef XXHASH_H_5627135585666179 -# undef XXHASH_H_STATIC_13879238742 -#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ - -/* **************************************************************** - * Stable API - *****************************************************************/ -#ifndef XXHASH_H_5627135585666179 -#define XXHASH_H_5627135585666179 1 - -/*! @brief Marks a global symbol. */ -#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) -# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) -# ifdef XXH_EXPORT -# define XXH_PUBLIC_API __declspec(dllexport) -# elif XXH_IMPORT -# define XXH_PUBLIC_API __declspec(dllimport) -# endif -# else -# define XXH_PUBLIC_API /* do nothing */ -# endif -#endif - -#ifdef XXH_NAMESPACE -# define XXH_CAT(A,B) A##B -# define XXH_NAME2(A,B) XXH_CAT(A,B) -# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) -/* XXH32 */ -# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) -# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) -# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) -# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) -# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) -# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) -# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) -# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) -# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) -/* XXH64 */ -# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) -# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) -# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) -# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) -# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) -# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) -# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) -# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) -# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) -/* XXH3_64bits */ -# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) -# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) -# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) -# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) -# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) -# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) -# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) -# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) -# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) -# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) -# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) -# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) -# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) -# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) -# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) -/* XXH3_128bits */ -# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) -# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) -# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) -# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) -# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) -# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) -# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) -# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) -# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) -# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) -# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) -# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) -# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) -# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) -# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) -#endif - - -/* ************************************* -* Compiler specifics -***************************************/ - -/* specific declaration modes for Windows */ -#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) -# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) -# ifdef XXH_EXPORT -# define XXH_PUBLIC_API __declspec(dllexport) -# elif XXH_IMPORT -# define XXH_PUBLIC_API __declspec(dllimport) -# endif -# else -# define XXH_PUBLIC_API /* do nothing */ -# endif -#endif - -#if defined (__GNUC__) -# define XXH_CONSTF __attribute__((const)) -# define XXH_PUREF __attribute__((pure)) -# define XXH_MALLOCF __attribute__((malloc)) -#else -# define XXH_CONSTF /* disable */ -# define XXH_PUREF -# define XXH_MALLOCF -#endif - -/* ************************************* -* Version -***************************************/ -#define XXH_VERSION_MAJOR 0 -#define XXH_VERSION_MINOR 8 -#define XXH_VERSION_RELEASE 2 -/*! @brief Version number, encoded as two digits each */ -#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) - -/*! - * @brief Obtains the xxHash version. - * - * This is mostly useful when xxHash is compiled as a shared library, - * since the returned value comes from the library, as opposed to header file. - * - * @return @ref XXH_VERSION_NUMBER of the invoked library. - */ -XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void); - - -/* **************************** -* Common basic types -******************************/ -#include /* size_t */ -/*! - * @brief Exit code for the streaming API. - */ -typedef enum { - XXH_OK = 0, /*!< OK */ - XXH_ERROR /*!< Error */ -} XXH_errorcode; - - -/*-********************************************************************** -* 32-bit hash -************************************************************************/ -#if defined(XXH_DOXYGEN) /* Don't show include */ -/*! - * @brief An unsigned 32-bit integer. - * - * Not necessarily defined to `uint32_t` but functionally equivalent. - */ -typedef uint32_t XXH32_hash_t; - -#elif !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint32_t XXH32_hash_t; - -#else -# include -# if UINT_MAX == 0xFFFFFFFFUL - typedef unsigned int XXH32_hash_t; -# elif ULONG_MAX == 0xFFFFFFFFUL - typedef unsigned long XXH32_hash_t; -# else -# error "unsupported platform: need a 32-bit type" -# endif -#endif - -/*! - * @} - * - * @defgroup XXH32_family XXH32 family - * @ingroup public - * Contains functions used in the classic 32-bit xxHash algorithm. - * - * @note - * XXH32 is useful for older platforms, with no or poor 64-bit performance. - * Note that the @ref XXH3_family provides competitive speed for both 32-bit - * and 64-bit systems, and offers true 64/128 bit hash results. - * - * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families - * @see @ref XXH32_impl for implementation details - * @{ - */ - -/*! - * @brief Calculates the 32-bit hash of @p input using xxHash32. - * - * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s - * - * See @ref single_shot_example "Single Shot Example" for an example. - * - * @param input The block of data to be hashed, at least @p length bytes in size. - * @param length The length of @p input, in bytes. - * @param seed The 32-bit seed to alter the hash's output predictably. - * - * @pre - * The memory between @p input and @p input + @p length must be valid, - * readable, contiguous memory. However, if @p length is `0`, @p input may be - * `NULL`. In C++, this also must be *TriviallyCopyable*. - * - * @return The calculated 32-bit hash value. - * - * @see - * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): - * Direct equivalents for the other variants of xxHash. - * @see - * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version. - */ -XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); - -#ifndef XXH_NO_STREAM -/*! - * Streaming functions generate the xxHash value from an incremental input. - * This method is slower than single-call functions, due to state management. - * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. - * - * An XXH state must first be allocated using `XXH*_createState()`. - * - * Start a new hash by initializing the state with a seed using `XXH*_reset()`. - * - * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. - * - * The function returns an error code, with 0 meaning OK, and any other value - * meaning there is an error. - * - * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. - * This function returns the nn-bits hash as an int or long long. - * - * It's still possible to continue inserting input into the hash state after a - * digest, and generate new hash values later on by invoking `XXH*_digest()`. - * - * When done, release the state using `XXH*_freeState()`. - * - * @see streaming_example at the top of @ref xxhash.h for an example. - */ - -/*! - * @typedef struct XXH32_state_s XXH32_state_t - * @brief The opaque state struct for the XXH32 streaming API. - * - * @see XXH32_state_s for details. - */ -typedef struct XXH32_state_s XXH32_state_t; - -/*! - * @brief Allocates an @ref XXH32_state_t. - * - * Must be freed with XXH32_freeState(). - * @return An allocated XXH32_state_t on success, `NULL` on failure. - */ -XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void); -/*! - * @brief Frees an @ref XXH32_state_t. - * - * Must be allocated with XXH32_createState(). - * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState(). - * @return XXH_OK. - */ -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); -/*! - * @brief Copies one @ref XXH32_state_t to another. - * - * @param dst_state The state to copy to. - * @param src_state The state to copy from. - * @pre - * @p dst_state and @p src_state must not be `NULL` and must not overlap. - */ -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); - -/*! - * @brief Resets an @ref XXH32_state_t to begin a new hash. - * - * This function resets and seeds a state. Call it before @ref XXH32_update(). - * - * @param statePtr The state struct to reset. - * @param seed The 32-bit seed to alter the hash result predictably. - * - * @pre - * @p statePtr must not be `NULL`. - * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. - */ -XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); - -/*! - * @brief Consumes a block of @p input to an @ref XXH32_state_t. - * - * Call this to incrementally consume blocks of data. - * - * @param statePtr The state struct to update. - * @param input The block of data to be hashed, at least @p length bytes in size. - * @param length The length of @p input, in bytes. - * - * @pre - * @p statePtr must not be `NULL`. - * @pre - * The memory between @p input and @p input + @p length must be valid, - * readable, contiguous memory. However, if @p length is `0`, @p input may be - * `NULL`. In C++, this also must be *TriviallyCopyable*. - * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. - */ -XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); - -/*! - * @brief Returns the calculated hash value from an @ref XXH32_state_t. - * - * @note - * Calling XXH32_digest() will not affect @p statePtr, so you can update, - * digest, and update again. - * - * @param statePtr The state struct to calculate the hash from. - * - * @pre - * @p statePtr must not be `NULL`. - * - * @return The calculated xxHash32 value from that state. - */ -XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); -#endif /* !XXH_NO_STREAM */ - -/******* Canonical representation *******/ - -/* - * The default return values from XXH functions are unsigned 32 and 64 bit - * integers. - * This the simplest and fastest format for further post-processing. - * - * However, this leaves open the question of what is the order on the byte level, - * since little and big endian conventions will store the same number differently. - * - * The canonical representation settles this issue by mandating big-endian - * convention, the same convention as human-readable numbers (large digits first). - * - * When writing hash values to storage, sending them over a network, or printing - * them, it's highly recommended to use the canonical representation to ensure - * portability across a wider range of systems, present and future. - * - * The following functions allow transformation of hash values to and from - * canonical format. - */ - -/*! - * @brief Canonical (big endian) representation of @ref XXH32_hash_t. - */ -typedef struct { - unsigned char digest[4]; /*!< Hash bytes, big endian */ -} XXH32_canonical_t; - -/*! - * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. - * - * @param dst The @ref XXH32_canonical_t pointer to be stored to. - * @param hash The @ref XXH32_hash_t to be converted. - * - * @pre - * @p dst must not be `NULL`. - */ -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); - -/*! - * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t. - * - * @param src The @ref XXH32_canonical_t to convert. - * - * @pre - * @p src must not be `NULL`. - * - * @return The converted hash. - */ -XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); - - -/*! @cond Doxygen ignores this part */ -#ifdef __has_attribute -# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) -#else -# define XXH_HAS_ATTRIBUTE(x) 0 -#endif -/*! @endcond */ - -/*! @cond Doxygen ignores this part */ -/* - * C23 __STDC_VERSION__ number hasn't been specified yet. For now - * leave as `201711L` (C17 + 1). - * TODO: Update to correct value when its been specified. - */ -#define XXH_C23_VN 201711L -/*! @endcond */ - -/*! @cond Doxygen ignores this part */ -/* C-language Attributes are added in C23. */ -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute) -# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) -#else -# define XXH_HAS_C_ATTRIBUTE(x) 0 -#endif -/*! @endcond */ - -/*! @cond Doxygen ignores this part */ -#if defined(__cplusplus) && defined(__has_cpp_attribute) -# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) -#else -# define XXH_HAS_CPP_ATTRIBUTE(x) 0 -#endif -/*! @endcond */ - -/*! @cond Doxygen ignores this part */ -/* - * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute - * introduced in CPP17 and C23. - * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough - * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough - */ -#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough) -# define XXH_FALLTHROUGH [[fallthrough]] -#elif XXH_HAS_ATTRIBUTE(__fallthrough__) -# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__)) -#else -# define XXH_FALLTHROUGH /* fallthrough */ -#endif -/*! @endcond */ - -/*! @cond Doxygen ignores this part */ -/* - * Define XXH_NOESCAPE for annotated pointers in public API. - * https://clang.llvm.org/docs/AttributeReference.html#noescape - * As of writing this, only supported by clang. - */ -#if XXH_HAS_ATTRIBUTE(noescape) -# define XXH_NOESCAPE __attribute__((noescape)) -#else -# define XXH_NOESCAPE -#endif -/*! @endcond */ - - -/*! - * @} - * @ingroup public - * @{ - */ - -#ifndef XXH_NO_LONG_LONG -/*-********************************************************************** -* 64-bit hash -************************************************************************/ -#if defined(XXH_DOXYGEN) /* don't include */ -/*! - * @brief An unsigned 64-bit integer. - * - * Not necessarily defined to `uint64_t` but functionally equivalent. - */ -typedef uint64_t XXH64_hash_t; -#elif !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint64_t XXH64_hash_t; -#else -# include -# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL - /* LP64 ABI says uint64_t is unsigned long */ - typedef unsigned long XXH64_hash_t; -# else - /* the following type must have a width of 64-bit */ - typedef unsigned long long XXH64_hash_t; -# endif -#endif - -/*! - * @} - * - * @defgroup XXH64_family XXH64 family - * @ingroup public - * @{ - * Contains functions used in the classic 64-bit xxHash algorithm. - * - * @note - * XXH3 provides competitive speed for both 32-bit and 64-bit systems, - * and offers true 64/128 bit hash results. - * It provides better speed for systems with vector processing capabilities. - */ - -/*! - * @brief Calculates the 64-bit hash of @p input using xxHash64. - * - * This function usually runs faster on 64-bit systems, but slower on 32-bit - * systems (see benchmark). - * - * @param input The block of data to be hashed, at least @p length bytes in size. - * @param length The length of @p input, in bytes. - * @param seed The 64-bit seed to alter the hash's output predictably. - * - * @pre - * The memory between @p input and @p input + @p length must be valid, - * readable, contiguous memory. However, if @p length is `0`, @p input may be - * `NULL`. In C++, this also must be *TriviallyCopyable*. - * - * @return The calculated 64-bit hash. - * - * @see - * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): - * Direct equivalents for the other variants of xxHash. - * @see - * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version. - */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); - -/******* Streaming *******/ -#ifndef XXH_NO_STREAM -/*! - * @brief The opaque state struct for the XXH64 streaming API. - * - * @see XXH64_state_s for details. - */ -typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ - -/*! - * @brief Allocates an @ref XXH64_state_t. - * - * Must be freed with XXH64_freeState(). - * @return An allocated XXH64_state_t on success, `NULL` on failure. - */ -XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void); - -/*! - * @brief Frees an @ref XXH64_state_t. - * - * Must be allocated with XXH64_createState(). - * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState(). - * @return XXH_OK. - */ -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); - -/*! - * @brief Copies one @ref XXH64_state_t to another. - * - * @param dst_state The state to copy to. - * @param src_state The state to copy from. - * @pre - * @p dst_state and @p src_state must not be `NULL` and must not overlap. - */ -XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state); - -/*! - * @brief Resets an @ref XXH64_state_t to begin a new hash. - * - * This function resets and seeds a state. Call it before @ref XXH64_update(). - * - * @param statePtr The state struct to reset. - * @param seed The 64-bit seed to alter the hash result predictably. - * - * @pre - * @p statePtr must not be `NULL`. - * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. - */ -XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed); - -/*! - * @brief Consumes a block of @p input to an @ref XXH64_state_t. - * - * Call this to incrementally consume blocks of data. - * - * @param statePtr The state struct to update. - * @param input The block of data to be hashed, at least @p length bytes in size. - * @param length The length of @p input, in bytes. - * - * @pre - * @p statePtr must not be `NULL`. - * @pre - * The memory between @p input and @p input + @p length must be valid, - * readable, contiguous memory. However, if @p length is `0`, @p input may be - * `NULL`. In C++, this also must be *TriviallyCopyable*. - * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. - */ -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); - -/*! - * @brief Returns the calculated hash value from an @ref XXH64_state_t. - * - * @note - * Calling XXH64_digest() will not affect @p statePtr, so you can update, - * digest, and update again. - * - * @param statePtr The state struct to calculate the hash from. - * - * @pre - * @p statePtr must not be `NULL`. - * - * @return The calculated xxHash64 value from that state. - */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr); -#endif /* !XXH_NO_STREAM */ -/******* Canonical representation *******/ - -/*! - * @brief Canonical (big endian) representation of @ref XXH64_hash_t. - */ -typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; - -/*! - * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t. - * - * @param dst The @ref XXH64_canonical_t pointer to be stored to. - * @param hash The @ref XXH64_hash_t to be converted. - * - * @pre - * @p dst must not be `NULL`. - */ -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash); - -/*! - * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t. - * - * @param src The @ref XXH64_canonical_t to convert. - * - * @pre - * @p src must not be `NULL`. - * - * @return The converted hash. - */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src); - -#ifndef XXH_NO_XXH3 - -/*! - * @} - * ************************************************************************ - * @defgroup XXH3_family XXH3 family - * @ingroup public - * @{ - * - * XXH3 is a more recent hash algorithm featuring: - * - Improved speed for both small and large inputs - * - True 64-bit and 128-bit outputs - * - SIMD acceleration - * - Improved 32-bit viability - * - * Speed analysis methodology is explained here: - * - * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html - * - * Compared to XXH64, expect XXH3 to run approximately - * ~2x faster on large inputs and >3x faster on small ones, - * exact differences vary depending on platform. - * - * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic, - * but does not require it. - * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3 - * at competitive speeds, even without vector support. Further details are - * explained in the implementation. - * - * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD - * implementations for many common platforms: - * - AVX512 - * - AVX2 - * - SSE2 - * - ARM NEON - * - WebAssembly SIMD128 - * - POWER8 VSX - * - s390x ZVector - * This can be controlled via the @ref XXH_VECTOR macro, but it automatically - * selects the best version according to predefined macros. For the x86 family, an - * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c. - * - * XXH3 implementation is portable: - * it has a generic C90 formulation that can be compiled on any platform, - * all implementations generate exactly the same hash value on all platforms. - * Starting from v0.8.0, it's also labelled "stable", meaning that - * any future version will also generate the same hash value. - * - * XXH3 offers 2 variants, _64bits and _128bits. - * - * When only 64 bits are needed, prefer invoking the _64bits variant, as it - * reduces the amount of mixing, resulting in faster speed on small inputs. - * It's also generally simpler to manipulate a scalar return type than a struct. - * - * The API supports one-shot hashing, streaming mode, and custom secrets. - */ -/*-********************************************************************** -* XXH3 64-bit variant -************************************************************************/ - -/*! - * @brief 64-bit unseeded variant of XXH3. - * - * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of 0, however - * it may have slightly better performance due to constant propagation of the - * defaults. - * - * @see - * XXH32(), XXH64(), XXH3_128bits(): equivalent for the other xxHash algorithms - * @see - * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants - * @see - * XXH3_64bits_reset(), XXH3_64bits_update(), XXH3_64bits_digest(): Streaming version. - */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length); - -/*! - * @brief 64-bit seeded variant of XXH3 - * - * This variant generates a custom secret on the fly based on default secret - * altered using the `seed` value. - * - * While this operation is decently fast, note that it's not completely free. - * - * @note - * seed == 0 produces the same results as @ref XXH3_64bits(). - * - * @param input The data to hash - * @param length The length - * @param seed The 64-bit seed to alter the state. - */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); - -/*! - * The bare minimum size for a custom secret. - * - * @see - * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), - * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). - */ -#define XXH3_SECRET_SIZE_MIN 136 - -/*! - * @brief 64-bit variant of XXH3 with a custom "secret". - * - * It's possible to provide any blob of bytes as a "secret" to generate the hash. - * This makes it more difficult for an external actor to prepare an intentional collision. - * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN). - * However, the quality of the secret impacts the dispersion of the hash algorithm. - * Therefore, the secret _must_ look like a bunch of random bytes. - * Avoid "trivial" or structured data such as repeated sequences or a text document. - * Whenever in doubt about the "randomness" of the blob of bytes, - * consider employing "XXH3_generateSecret()" instead (see below). - * It will generate a proper high entropy secret derived from the blob of bytes. - * Another advantage of using XXH3_generateSecret() is that - * it guarantees that all bits within the initial blob of bytes - * will impact every bit of the output. - * This is not necessarily the case when using the blob of bytes directly - * because, when hashing _small_ inputs, only a portion of the secret is employed. - */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); - - -/******* Streaming *******/ -#ifndef XXH_NO_STREAM -/* - * Streaming requires state maintenance. - * This operation costs memory and CPU. - * As a consequence, streaming is slower than one-shot hashing. - * For better performance, prefer one-shot functions whenever applicable. - */ - -/*! - * @brief The state struct for the XXH3 streaming API. - * - * @see XXH3_state_s for details. - */ -typedef struct XXH3_state_s XXH3_state_t; -XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); - -/*! - * @brief Copies one @ref XXH3_state_t to another. - * - * @param dst_state The state to copy to. - * @param src_state The state to copy from. - * @pre - * @p dst_state and @p src_state must not be `NULL` and must not overlap. - */ -XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state); - -/*! - * @brief Resets an @ref XXH3_state_t to begin a new hash. - * - * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_64bits_update(). - * Digest will be equivalent to `XXH3_64bits()`. - * - * @param statePtr The state struct to reset. - * - * @pre - * @p statePtr must not be `NULL`. - * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. - * - */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); - -/*! - * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. - * - * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_64bits_update(). - * Digest will be equivalent to `XXH3_64bits_withSeed()`. - * - * @param statePtr The state struct to reset. - * @param seed The 64-bit seed to alter the state. - * - * @pre - * @p statePtr must not be `NULL`. - * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. - * - */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); - -/*! - * XXH3_64bits_reset_withSecret(): - * `secret` is referenced, it _must outlive_ the hash streaming session. - * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`, - * and the quality of produced hash values depends on secret's entropy - * (secret's content should look like a bunch of random bytes). - * When in doubt about the randomness of a candidate `secret`, - * consider employing `XXH3_generateSecret()` instead (see below). - */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); - -/*! - * @brief Consumes a block of @p input to an @ref XXH3_state_t. - * - * Call this to incrementally consume blocks of data. - * - * @param statePtr The state struct to update. - * @param input The block of data to be hashed, at least @p length bytes in size. - * @param length The length of @p input, in bytes. - * - * @pre - * @p statePtr must not be `NULL`. - * @pre - * The memory between @p input and @p input + @p length must be valid, - * readable, contiguous memory. However, if @p length is `0`, @p input may be - * `NULL`. In C++, this also must be *TriviallyCopyable*. - * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. - */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); - -/*! - * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t. - * - * @note - * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update, - * digest, and update again. - * - * @param statePtr The state struct to calculate the hash from. - * - * @pre - * @p statePtr must not be `NULL`. - * - * @return The calculated XXH3 64-bit hash value from that state. - */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); -#endif /* !XXH_NO_STREAM */ - -/* note : canonical representation of XXH3 is the same as XXH64 - * since they both produce XXH64_hash_t values */ - - -/*-********************************************************************** -* XXH3 128-bit variant -************************************************************************/ - -/*! - * @brief The return value from 128-bit hashes. - * - * Stored in little endian order, although the fields themselves are in native - * endianness. - */ -typedef struct { - XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */ - XXH64_hash_t high64; /*!< `value >> 64` */ -} XXH128_hash_t; - -/*! - * @brief Unseeded 128-bit variant of XXH3 - * - * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead - * for shorter inputs. - * - * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of 0, however - * it may have slightly better performance due to constant propagation of the - * defaults. - * - * @see - * XXH32(), XXH64(), XXH3_64bits(): equivalent for the other xxHash algorithms - * @see - * XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants - * @see - * XXH3_128bits_reset(), XXH3_128bits_update(), XXH3_128bits_digest(): Streaming version. - */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len); -/*! @brief Seeded 128-bit variant of XXH3. @see XXH3_64bits_withSeed(). */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); -/*! @brief Custom secret 128-bit variant of XXH3. @see XXH3_64bits_withSecret(). */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); - -/******* Streaming *******/ -#ifndef XXH_NO_STREAM -/* - * Streaming requires state maintenance. - * This operation costs memory and CPU. - * As a consequence, streaming is slower than one-shot hashing. - * For better performance, prefer one-shot functions whenever applicable. - * - * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits(). - * Use already declared XXH3_createState() and XXH3_freeState(). - * - * All reset and streaming functions have same meaning as their 64-bit counterpart. - */ - -/*! - * @brief Resets an @ref XXH3_state_t to begin a new hash. - * - * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_128bits_update(). - * Digest will be equivalent to `XXH3_128bits()`. - * - * @param statePtr The state struct to reset. - * - * @pre - * @p statePtr must not be `NULL`. - * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. - * - */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); - -/*! - * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. - * - * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_128bits_update(). - * Digest will be equivalent to `XXH3_128bits_withSeed()`. - * - * @param statePtr The state struct to reset. - * @param seed The 64-bit seed to alter the state. - * - * @pre - * @p statePtr must not be `NULL`. - * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. - * - */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); -/*! @brief Custom secret 128-bit variant of XXH3. @see XXH_64bits_reset_withSecret(). */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); - -/*! - * @brief Consumes a block of @p input to an @ref XXH3_state_t. - * - * Call this to incrementally consume blocks of data. - * - * @param statePtr The state struct to update. - * @param input The block of data to be hashed, at least @p length bytes in size. - * @param length The length of @p input, in bytes. - * - * @pre - * @p statePtr must not be `NULL`. - * @pre - * The memory between @p input and @p input + @p length must be valid, - * readable, contiguous memory. However, if @p length is `0`, @p input may be - * `NULL`. In C++, this also must be *TriviallyCopyable*. - * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. - */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); - -/*! - * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t. - * - * @note - * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update, - * digest, and update again. - * - * @param statePtr The state struct to calculate the hash from. - * - * @pre - * @p statePtr must not be `NULL`. - * - * @return The calculated XXH3 128-bit hash value from that state. - */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); -#endif /* !XXH_NO_STREAM */ - -/* Following helper functions make it possible to compare XXH128_hast_t values. - * Since XXH128_hash_t is a structure, this capability is not offered by the language. - * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ - -/*! - * XXH128_isEqual(): - * Return: 1 if `h1` and `h2` are equal, 0 if they are not. - */ -XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); - -/*! - * @brief Compares two @ref XXH128_hash_t - * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. - * - * @return: >0 if *h128_1 > *h128_2 - * =0 if *h128_1 == *h128_2 - * <0 if *h128_1 < *h128_2 - */ -XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2); - - -/******* Canonical representation *******/ -typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; - - -/*! - * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t. - * - * @param dst The @ref XXH128_canonical_t pointer to be stored to. - * @param hash The @ref XXH128_hash_t to be converted. - * - * @pre - * @p dst must not be `NULL`. - */ -XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash); - -/*! - * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t. - * - * @param src The @ref XXH128_canonical_t to convert. - * - * @pre - * @p src must not be `NULL`. - * - * @return The converted hash. - */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src); - - -#endif /* !XXH_NO_XXH3 */ -#endif /* XXH_NO_LONG_LONG */ - -/*! - * @} - */ -#endif /* XXHASH_H_5627135585666179 */ - - - -#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) -#define XXHASH_H_STATIC_13879238742 -/* **************************************************************************** - * This section contains declarations which are not guaranteed to remain stable. - * They may change in future versions, becoming incompatible with a different - * version of the library. - * These declarations should only be used with static linking. - * Never use them in association with dynamic linking! - ***************************************************************************** */ - -/* - * These definitions are only present to allow static allocation - * of XXH states, on stack or in a struct, for example. - * Never **ever** access their members directly. - */ - -/*! - * @internal - * @brief Structure for XXH32 streaming API. - * - * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, - * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is - * an opaque type. This allows fields to safely be changed. - * - * Typedef'd to @ref XXH32_state_t. - * Do not access the members of this struct directly. - * @see XXH64_state_s, XXH3_state_s - */ -struct XXH32_state_s { - XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ - XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ - XXH32_hash_t v[4]; /*!< Accumulator lanes */ - XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ - XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ - XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */ -}; /* typedef'd to XXH32_state_t */ - - -#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ - -/*! - * @internal - * @brief Structure for XXH64 streaming API. - * - * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, - * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is - * an opaque type. This allows fields to safely be changed. - * - * Typedef'd to @ref XXH64_state_t. - * Do not access the members of this struct directly. - * @see XXH32_state_s, XXH3_state_s - */ -struct XXH64_state_s { - XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ - XXH64_hash_t v[4]; /*!< Accumulator lanes */ - XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ - XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ - XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ - XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ -}; /* typedef'd to XXH64_state_t */ - -#ifndef XXH_NO_XXH3 - -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */ -# include -# define XXH_ALIGN(n) alignas(n) -#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ -/* In C++ alignas() is a keyword */ -# define XXH_ALIGN(n) alignas(n) -#elif defined(__GNUC__) -# define XXH_ALIGN(n) __attribute__ ((aligned(n))) -#elif defined(_MSC_VER) -# define XXH_ALIGN(n) __declspec(align(n)) -#else -# define XXH_ALIGN(n) /* disabled */ -#endif - -/* Old GCC versions only accept the attribute after the type in structures. */ -#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ - && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ - && defined(__GNUC__) -# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) -#else -# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type -#endif - -/*! - * @brief The size of the internal XXH3 buffer. - * - * This is the optimal update size for incremental hashing. - * - * @see XXH3_64b_update(), XXH3_128b_update(). - */ -#define XXH3_INTERNALBUFFER_SIZE 256 - -/*! - * @internal - * @brief Default size of the secret buffer (and @ref XXH3_kSecret). - * - * This is the size used in @ref XXH3_kSecret and the seeded functions. - * - * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. - */ -#define XXH3_SECRET_DEFAULT_SIZE 192 - -/*! - * @internal - * @brief Structure for XXH3 streaming API. - * - * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, - * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. - * Otherwise it is an opaque type. - * Never use this definition in combination with dynamic library. - * This allows fields to safely be changed in the future. - * - * @note ** This structure has a strict alignment requirement of 64 bytes!! ** - * Do not allocate this with `malloc()` or `new`, - * it will not be sufficiently aligned. - * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation. - * - * Typedef'd to @ref XXH3_state_t. - * Do never access the members of this struct directly. - * - * @see XXH3_INITSTATE() for stack initialization. - * @see XXH3_createState(), XXH3_freeState(). - * @see XXH32_state_s, XXH64_state_s - */ -struct XXH3_state_s { - XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); - /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */ - XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); - /*!< Used to store a custom secret generated from a seed. */ - XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); - /*!< The internal buffer. @see XXH32_state_s::mem32 */ - XXH32_hash_t bufferedSize; - /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ - XXH32_hash_t useSeed; - /*!< Reserved field. Needed for padding on 64-bit. */ - size_t nbStripesSoFar; - /*!< Number or stripes processed. */ - XXH64_hash_t totalLen; - /*!< Total length hashed. 64-bit even on 32-bit targets. */ - size_t nbStripesPerBlock; - /*!< Number of stripes per block. */ - size_t secretLimit; - /*!< Size of @ref customSecret or @ref extSecret */ - XXH64_hash_t seed; - /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */ - XXH64_hash_t reserved64; - /*!< Reserved field. */ - const unsigned char* extSecret; - /*!< Reference to an external secret for the _withSecret variants, NULL - * for other variants. */ - /* note: there may be some padding at the end due to alignment on 64 bytes */ -}; /* typedef'd to XXH3_state_t */ - -#undef XXH_ALIGN_MEMBER - -/*! - * @brief Initializes a stack-allocated `XXH3_state_s`. - * - * When the @ref XXH3_state_t structure is merely emplaced on stack, - * it should be initialized with XXH3_INITSTATE() or a memset() - * in case its first reset uses XXH3_NNbits_reset_withSeed(). - * This init can be omitted if the first reset uses default or _withSecret mode. - * This operation isn't necessary when the state is created with XXH3_createState(). - * Note that this doesn't prepare the state for a streaming operation, - * it's still necessary to use XXH3_NNbits_reset*() afterwards. - */ -#define XXH3_INITSTATE(XXH3_state_ptr) \ - do { \ - XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \ - tmp_xxh3_state_ptr->seed = 0; \ - tmp_xxh3_state_ptr->extSecret = NULL; \ - } while(0) - - -/*! - * simple alias to pre-selected XXH3_128bits variant - */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); - - -/* === Experimental API === */ -/* Symbols defined below must be considered tied to a specific library version. */ - -/*! - * XXH3_generateSecret(): - * - * Derive a high-entropy secret from any user-defined content, named customSeed. - * The generated secret can be used in combination with `*_withSecret()` functions. - * The `_withSecret()` variants are useful to provide a higher level of protection - * than 64-bit seed, as it becomes much more difficult for an external actor to - * guess how to impact the calculation logic. - * - * The function accepts as input a custom seed of any length and any content, - * and derives from it a high-entropy secret of length @p secretSize into an - * already allocated buffer @p secretBuffer. - * - * The generated secret can then be used with any `*_withSecret()` variant. - * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(), - * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret() - * are part of this list. They all accept a `secret` parameter - * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN) - * _and_ feature very high entropy (consist of random-looking bytes). - * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can - * be employed to ensure proper quality. - * - * @p customSeed can be anything. It can have any size, even small ones, - * and its content can be anything, even "poor entropy" sources such as a bunch - * of zeroes. The resulting `secret` will nonetheless provide all required qualities. - * - * @pre - * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN - * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior. - * - * Example code: - * @code{.c} - * #include - * #include - * #include - * #define XXH_STATIC_LINKING_ONLY // expose unstable API - * #include "xxhash.h" - * // Hashes argv[2] using the entropy from argv[1]. - * int main(int argc, char* argv[]) - * { - * char secret[XXH3_SECRET_SIZE_MIN]; - * if (argv != 3) { return 1; } - * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1])); - * XXH64_hash_t h = XXH3_64bits_withSecret( - * argv[2], strlen(argv[2]), - * secret, sizeof(secret) - * ); - * printf("%016llx\n", (unsigned long long) h); - * } - * @endcode - */ -XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize); - -/*! - * @brief Generate the same secret as the _withSeed() variants. - * - * The generated secret can be used in combination with - *`*_withSecret()` and `_withSecretandSeed()` variants. - * - * Example C++ `std::string` hash class: - * @code{.cpp} - * #include - * #define XXH_STATIC_LINKING_ONLY // expose unstable API - * #include "xxhash.h" - * // Slow, seeds each time - * class HashSlow { - * XXH64_hash_t seed; - * public: - * HashSlow(XXH64_hash_t s) : seed{s} {} - * size_t operator()(const std::string& x) const { - * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)}; - * } - * }; - * // Fast, caches the seeded secret for future uses. - * class HashFast { - * unsigned char secret[XXH3_SECRET_SIZE_MIN]; - * public: - * HashFast(XXH64_hash_t s) { - * XXH3_generateSecret_fromSeed(secret, seed); - * } - * size_t operator()(const std::string& x) const { - * return size_t{ - * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret)) - * }; - * } - * }; - * @endcode - * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes - * @param seed The seed to seed the state. - */ -XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed); - -/*! - * These variants generate hash values using either - * @p seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes) - * or @p secret for "large" keys (>= XXH3_MIDSIZE_MAX). - * - * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`. - * `_withSeed()` has to generate the secret on the fly for "large" keys. - * It's fast, but can be perceptible for "not so large" keys (< 1 KB). - * `_withSecret()` has to generate the masks on the fly for "small" keys, - * which requires more instructions than _withSeed() variants. - * Therefore, _withSecretandSeed variant combines the best of both worlds. - * - * When @p secret has been generated by XXH3_generateSecret_fromSeed(), - * this variant produces *exactly* the same results as `_withSeed()` variant, - * hence offering only a pure speed benefit on "large" input, - * by skipping the need to regenerate the secret for every large input. - * - * Another usage scenario is to hash the secret to a 64-bit hash value, - * for example with XXH3_64bits(), which then becomes the seed, - * and then employ both the seed and the secret in _withSecretandSeed(). - * On top of speed, an added benefit is that each bit in the secret - * has a 50% chance to swap each bit in the output, via its impact to the seed. - * - * This is not guaranteed when using the secret directly in "small data" scenarios, - * because only portions of the secret are employed for small data. - */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t -XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len, - XXH_NOESCAPE const void* secret, size_t secretSize, - XXH64_hash_t seed); -/*! @copydoc XXH3_64bits_withSecretandSeed() */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t -XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, - XXH_NOESCAPE const void* secret, size_t secretSize, - XXH64_hash_t seed64); -#ifndef XXH_NO_STREAM -/*! @copydoc XXH3_64bits_withSecretandSeed() */ -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, - XXH_NOESCAPE const void* secret, size_t secretSize, - XXH64_hash_t seed64); -/*! @copydoc XXH3_64bits_withSecretandSeed() */ -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, - XXH_NOESCAPE const void* secret, size_t secretSize, - XXH64_hash_t seed64); -#endif /* !XXH_NO_STREAM */ - -#endif /* !XXH_NO_XXH3 */ -#endif /* XXH_NO_LONG_LONG */ -#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) -# define XXH_IMPLEMENTATION -#endif - -#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ - - -/* ======================================================================== */ -/* ======================================================================== */ -/* ======================================================================== */ - - -/*-********************************************************************** - * xxHash implementation - *-********************************************************************** - * xxHash's implementation used to be hosted inside xxhash.c. - * - * However, inlining requires implementation to be visible to the compiler, - * hence be included alongside the header. - * Previously, implementation was hosted inside xxhash.c, - * which was then #included when inlining was activated. - * This construction created issues with a few build and install systems, - * as it required xxhash.c to be stored in /include directory. - * - * xxHash implementation is now directly integrated within xxhash.h. - * As a consequence, xxhash.c is no longer needed in /include. - * - * xxhash.c is still available and is still useful. - * In a "normal" setup, when xxhash is not inlined, - * xxhash.h only exposes the prototypes and public symbols, - * while xxhash.c can be built into an object file xxhash.o - * which can then be linked into the final binary. - ************************************************************************/ - -#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ - || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) -# define XXH_IMPLEM_13a8737387 - -/* ************************************* -* Tuning parameters -***************************************/ - -/*! - * @defgroup tuning Tuning parameters - * @{ - * - * Various macros to control xxHash's behavior. - */ -#ifdef XXH_DOXYGEN -/*! - * @brief Define this to disable 64-bit code. - * - * Useful if only using the @ref XXH32_family and you have a strict C90 compiler. - */ -# define XXH_NO_LONG_LONG -# undef XXH_NO_LONG_LONG /* don't actually */ -/*! - * @brief Controls how unaligned memory is accessed. - * - * By default, access to unaligned memory is controlled by `memcpy()`, which is - * safe and portable. - * - * Unfortunately, on some target/compiler combinations, the generated assembly - * is sub-optimal. - * - * The below switch allow selection of a different access method - * in the search for improved performance. - * - * @par Possible options: - * - * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy` - * @par - * Use `memcpy()`. Safe and portable. Note that most modern compilers will - * eliminate the function call and treat it as an unaligned access. - * - * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))` - * @par - * Depends on compiler extensions and is therefore not portable. - * This method is safe _if_ your compiler supports it, - * and *generally* as fast or faster than `memcpy`. - * - * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast - * @par - * Casts directly and dereferences. This method doesn't depend on the - * compiler, but it violates the C standard as it directly dereferences an - * unaligned pointer. It can generate buggy code on targets which do not - * support unaligned memory accesses, but in some circumstances, it's the - * only known way to get the most performance. - * - * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift - * @par - * Also portable. This can generate the best code on old compilers which don't - * inline small `memcpy()` calls, and it might also be faster on big-endian - * systems which lack a native byteswap instruction. However, some compilers - * will emit literal byteshifts even if the target supports unaligned access. - * - * - * @warning - * Methods 1 and 2 rely on implementation-defined behavior. Use these with - * care, as what works on one compiler/platform/optimization level may cause - * another to read garbage data or even crash. - * - * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. - * - * Prefer these methods in priority order (0 > 3 > 1 > 2) - */ -# define XXH_FORCE_MEMORY_ACCESS 0 - -/*! - * @def XXH_SIZE_OPT - * @brief Controls how much xxHash optimizes for size. - * - * xxHash, when compiled, tends to result in a rather large binary size. This - * is mostly due to heavy usage to forced inlining and constant folding of the - * @ref XXH3_family to increase performance. - * - * However, some developers prefer size over speed. This option can - * significantly reduce the size of the generated code. When using the `-Os` - * or `-Oz` options on GCC or Clang, this is defined to 1 by default, - * otherwise it is defined to 0. - * - * Most of these size optimizations can be controlled manually. - * - * This is a number from 0-2. - * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed - * comes first. - * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more - * conservative and disables hacks that increase code size. It implies the - * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0, - * and @ref XXH3_NEON_LANES == 8 if they are not already defined. - * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible. - * Performance may cry. For example, the single shot functions just use the - * streaming API. - */ -# define XXH_SIZE_OPT 0 - -/*! - * @def XXH_FORCE_ALIGN_CHECK - * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32() - * and XXH64() only). - * - * This is an important performance trick for architectures without decent - * unaligned memory access performance. - * - * It checks for input alignment, and when conditions are met, uses a "fast - * path" employing direct 32-bit/64-bit reads, resulting in _dramatically - * faster_ read speed. - * - * The check costs one initial branch per hash, which is generally negligible, - * but not zero. - * - * Moreover, it's not useful to generate an additional code path if memory - * access uses the same instruction for both aligned and unaligned - * addresses (e.g. x86 and aarch64). - * - * In these cases, the alignment check can be removed by setting this macro to 0. - * Then the code will always use unaligned memory access. - * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips - * which are platforms known to offer good unaligned memory accesses performance. - * - * It is also disabled by default when @ref XXH_SIZE_OPT >= 1. - * - * This option does not affect XXH3 (only XXH32 and XXH64). - */ -# define XXH_FORCE_ALIGN_CHECK 0 - -/*! - * @def XXH_NO_INLINE_HINTS - * @brief When non-zero, sets all functions to `static`. - * - * By default, xxHash tries to force the compiler to inline almost all internal - * functions. - * - * This can usually improve performance due to reduced jumping and improved - * constant folding, but significantly increases the size of the binary which - * might not be favorable. - * - * Additionally, sometimes the forced inlining can be detrimental to performance, - * depending on the architecture. - * - * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the - * compiler full control on whether to inline or not. - * - * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if - * @ref XXH_SIZE_OPT >= 1, this will automatically be defined. - */ -# define XXH_NO_INLINE_HINTS 0 - -/*! - * @def XXH3_INLINE_SECRET - * @brief Determines whether to inline the XXH3 withSecret code. - * - * When the secret size is known, the compiler can improve the performance - * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret(). - * - * However, if the secret size is not known, it doesn't have any benefit. This - * happens when xxHash is compiled into a global symbol. Therefore, if - * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0. - * - * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers - * that are *sometimes* force inline on -Og, and it is impossible to automatically - * detect this optimization level. - */ -# define XXH3_INLINE_SECRET 0 - -/*! - * @def XXH32_ENDJMP - * @brief Whether to use a jump for `XXH32_finalize`. - * - * For performance, `XXH32_finalize` uses multiple branches in the finalizer. - * This is generally preferable for performance, - * but depending on exact architecture, a jmp may be preferable. - * - * This setting is only possibly making a difference for very small inputs. - */ -# define XXH32_ENDJMP 0 - -/*! - * @internal - * @brief Redefines old internal names. - * - * For compatibility with code that uses xxHash's internals before the names - * were changed to improve namespacing. There is no other reason to use this. - */ -# define XXH_OLD_NAMES -# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ - -/*! - * @def XXH_NO_STREAM - * @brief Disables the streaming API. - * - * When xxHash is not inlined and the streaming functions are not used, disabling - * the streaming functions can improve code size significantly, especially with - * the @ref XXH3_family which tends to make constant folded copies of itself. - */ -# define XXH_NO_STREAM -# undef XXH_NO_STREAM /* don't actually */ -#endif /* XXH_DOXYGEN */ -/*! - * @} - */ - -#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ - /* prefer __packed__ structures (method 1) for GCC - * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy - * which for some reason does unaligned loads. */ -# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED)) -# define XXH_FORCE_MEMORY_ACCESS 1 -# endif -#endif - -#ifndef XXH_SIZE_OPT - /* default to 1 for -Os or -Oz */ -# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__) -# define XXH_SIZE_OPT 1 -# else -# define XXH_SIZE_OPT 0 -# endif -#endif - -#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ - /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */ -# if XXH_SIZE_OPT >= 1 || \ - defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \ - || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */ -# define XXH_FORCE_ALIGN_CHECK 0 -# else -# define XXH_FORCE_ALIGN_CHECK 1 -# endif -#endif - -#ifndef XXH_NO_INLINE_HINTS -# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */ -# define XXH_NO_INLINE_HINTS 1 -# else -# define XXH_NO_INLINE_HINTS 0 -# endif -#endif - -#ifndef XXH3_INLINE_SECRET -# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \ - || !defined(XXH_INLINE_ALL) -# define XXH3_INLINE_SECRET 0 -# else -# define XXH3_INLINE_SECRET 1 -# endif -#endif - -#ifndef XXH32_ENDJMP -/* generally preferable for performance */ -# define XXH32_ENDJMP 0 -#endif - -/*! - * @defgroup impl Implementation - * @{ - */ - - -/* ************************************* -* Includes & Memory related functions -***************************************/ -#if defined(XXH_NO_STREAM) -/* nothing */ -#elif defined(XXH_NO_STDLIB) - -/* When requesting to disable any mention of stdlib, - * the library loses the ability to invoked malloc / free. - * In practice, it means that functions like `XXH*_createState()` - * will always fail, and return NULL. - * This flag is useful in situations where - * xxhash.h is integrated into some kernel, embedded or limited environment - * without access to dynamic allocation. - */ - -static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; } -static void XXH_free(void* p) { (void)p; } - -#else - -/* - * Modify the local functions below should you wish to use - * different memory routines for malloc() and free() - */ -#include - -/*! - * @internal - * @brief Modify this function to use a different routine than malloc(). - */ -static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); } - -/*! - * @internal - * @brief Modify this function to use a different routine than free(). - */ -static void XXH_free(void* p) { free(p); } - -#endif /* XXH_NO_STDLIB */ - -#include - -/*! - * @internal - * @brief Modify this function to use a different routine than memcpy(). - */ -static void* XXH_memcpy(void* dest, const void* src, size_t size) -{ - return memcpy(dest,src,size); -} - -#include /* ULLONG_MAX */ - - -/* ************************************* -* Compiler Specific Options -***************************************/ -#ifdef _MSC_VER /* Visual Studio warning fix */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -#endif - -#if XXH_NO_INLINE_HINTS /* disable inlining hints */ -# if defined(__GNUC__) || defined(__clang__) -# define XXH_FORCE_INLINE static __attribute__((unused)) -# else -# define XXH_FORCE_INLINE static -# endif -# define XXH_NO_INLINE static -/* enable inlining hints */ -#elif defined(__GNUC__) || defined(__clang__) -# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) -# define XXH_NO_INLINE static __attribute__((noinline)) -#elif defined(_MSC_VER) /* Visual Studio */ -# define XXH_FORCE_INLINE static __forceinline -# define XXH_NO_INLINE static __declspec(noinline) -#elif defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ -# define XXH_FORCE_INLINE static inline -# define XXH_NO_INLINE static -#else -# define XXH_FORCE_INLINE static -# define XXH_NO_INLINE static -#endif - -#if XXH3_INLINE_SECRET -# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE -#else -# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE -#endif - - -/* ************************************* -* Debug -***************************************/ -/*! - * @ingroup tuning - * @def XXH_DEBUGLEVEL - * @brief Sets the debugging level. - * - * XXH_DEBUGLEVEL is expected to be defined externally, typically via the - * compiler's command line options. The value must be a number. - */ -#ifndef XXH_DEBUGLEVEL -# ifdef DEBUGLEVEL /* backwards compat */ -# define XXH_DEBUGLEVEL DEBUGLEVEL -# else -# define XXH_DEBUGLEVEL 0 -# endif -#endif - -#if (XXH_DEBUGLEVEL>=1) -# include /* note: can still be disabled with NDEBUG */ -# define XXH_ASSERT(c) assert(c) -#else -# if defined(__INTEL_COMPILER) -# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c)) -# else -# define XXH_ASSERT(c) XXH_ASSUME(c) -# endif -#endif - -/* note: use after variable declarations */ -#ifndef XXH_STATIC_ASSERT -# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ -# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0) -# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ -# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) -# else -# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0) -# endif -# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c) -#endif - -/*! - * @internal - * @def XXH_COMPILER_GUARD(var) - * @brief Used to prevent unwanted optimizations for @p var. - * - * It uses an empty GCC inline assembly statement with a register constraint - * which forces @p var into a general purpose register (eg eax, ebx, ecx - * on x86) and marks it as modified. - * - * This is used in a few places to avoid unwanted autovectorization (e.g. - * XXH32_round()). All vectorization we want is explicit via intrinsics, - * and _usually_ isn't wanted elsewhere. - * - * We also use it to prevent unwanted constant folding for AArch64 in - * XXH3_initCustomSecret_scalar(). - */ -#if defined(__GNUC__) || defined(__clang__) -# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var)) -#else -# define XXH_COMPILER_GUARD(var) ((void)0) -#endif - -/* Specifically for NEON vectors which use the "w" constraint, on - * Clang. */ -#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__) -# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var)) -#else -# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0) -#endif - -/* ************************************* -* Basic Types -***************************************/ -#if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint8_t xxh_u8; -#else - typedef unsigned char xxh_u8; -#endif -typedef XXH32_hash_t xxh_u32; - -#ifdef XXH_OLD_NAMES -# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly" -# define BYTE xxh_u8 -# define U8 xxh_u8 -# define U32 xxh_u32 -#endif - -/* *** Memory access *** */ - -/*! - * @internal - * @fn xxh_u32 XXH_read32(const void* ptr) - * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness. - * - * Affected by @ref XXH_FORCE_MEMORY_ACCESS. - * - * @param ptr The pointer to read from. - * @return The 32-bit native endian integer from the bytes at @p ptr. - */ - -/*! - * @internal - * @fn xxh_u32 XXH_readLE32(const void* ptr) - * @brief Reads an unaligned 32-bit little endian integer from @p ptr. - * - * Affected by @ref XXH_FORCE_MEMORY_ACCESS. - * - * @param ptr The pointer to read from. - * @return The 32-bit little endian integer from the bytes at @p ptr. - */ - -/*! - * @internal - * @fn xxh_u32 XXH_readBE32(const void* ptr) - * @brief Reads an unaligned 32-bit big endian integer from @p ptr. - * - * Affected by @ref XXH_FORCE_MEMORY_ACCESS. - * - * @param ptr The pointer to read from. - * @return The 32-bit big endian integer from the bytes at @p ptr. - */ - -/*! - * @internal - * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align) - * @brief Like @ref XXH_readLE32(), but has an option for aligned reads. - * - * Affected by @ref XXH_FORCE_MEMORY_ACCESS. - * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is - * always @ref XXH_alignment::XXH_unaligned. - * - * @param ptr The pointer to read from. - * @param align Whether @p ptr is aligned. - * @pre - * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte - * aligned. - * @return The 32-bit little endian integer from the bytes at @p ptr. - */ - -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) -/* - * Manual byteshift. Best for old compilers which don't inline memcpy. - * We actually directly use XXH_readLE32 and XXH_readBE32. - */ -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - -/* - * Force direct memory access. Only works on CPU which support unaligned memory - * access in hardware. - */ -static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } - -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - -/* - * __attribute__((aligned(1))) is supported by gcc and clang. Originally the - * documentation claimed that it only increased the alignment, but actually it - * can decrease it on gcc, clang, and icc: - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, - * https://gcc.godbolt.org/z/xYez1j67Y. - */ -#ifdef XXH_OLD_NAMES -typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; -#endif -static xxh_u32 XXH_read32(const void* ptr) -{ - typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32; - return *((const xxh_unalign32*)ptr); -} - -#else - -/* - * Portable and safe solution. Generally efficient. - * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html - */ -static xxh_u32 XXH_read32(const void* memPtr) -{ - xxh_u32 val; - XXH_memcpy(&val, memPtr, sizeof(val)); - return val; -} - -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ - - -/* *** Endianness *** */ - -/*! - * @ingroup tuning - * @def XXH_CPU_LITTLE_ENDIAN - * @brief Whether the target is little endian. - * - * Defined to 1 if the target is little endian, or 0 if it is big endian. - * It can be defined externally, for example on the compiler command line. - * - * If it is not defined, - * a runtime check (which is usually constant folded) is used instead. - * - * @note - * This is not necessarily defined to an integer constant. - * - * @see XXH_isLittleEndian() for the runtime check. - */ -#ifndef XXH_CPU_LITTLE_ENDIAN -/* - * Try to detect endianness automatically, to avoid the nonstandard behavior - * in `XXH_isLittleEndian()` - */ -# if defined(_WIN32) /* Windows is always little endian */ \ - || defined(__LITTLE_ENDIAN__) \ - || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) -# define XXH_CPU_LITTLE_ENDIAN 1 -# elif defined(__BIG_ENDIAN__) \ - || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) -# define XXH_CPU_LITTLE_ENDIAN 0 -# else -/*! - * @internal - * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN. - * - * Most compilers will constant fold this. - */ -static int XXH_isLittleEndian(void) -{ - /* - * Portable and well-defined behavior. - * Don't use static: it is detrimental to performance. - */ - const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; - return one.c[0]; -} -# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() -# endif -#endif - - - - -/* **************************************** -* Compiler-specific Functions and Macros -******************************************/ -#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) - -#ifdef __has_builtin -# define XXH_HAS_BUILTIN(x) __has_builtin(x) -#else -# define XXH_HAS_BUILTIN(x) 0 -#endif - - - -/* - * C23 and future versions have standard "unreachable()". - * Once it has been implemented reliably we can add it as an - * additional case: - * - * ``` - * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) - * # include - * # ifdef unreachable - * # define XXH_UNREACHABLE() unreachable() - * # endif - * #endif - * ``` - * - * Note C++23 also has std::unreachable() which can be detected - * as follows: - * ``` - * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L) - * # include - * # define XXH_UNREACHABLE() std::unreachable() - * #endif - * ``` - * NB: `__cpp_lib_unreachable` is defined in the `` header. - * We don't use that as including `` in `extern "C"` blocks - * doesn't work on GCC12 - */ - -#if XXH_HAS_BUILTIN(__builtin_unreachable) -# define XXH_UNREACHABLE() __builtin_unreachable() - -#elif defined(_MSC_VER) -# define XXH_UNREACHABLE() __assume(0) - -#else -# define XXH_UNREACHABLE() -#endif - -#if XXH_HAS_BUILTIN(__builtin_assume) -# define XXH_ASSUME(c) __builtin_assume(c) -#else -# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); } -#endif - -/*! - * @internal - * @def XXH_rotl32(x,r) - * @brief 32-bit rotate left. - * - * @param x The 32-bit integer to be rotated. - * @param r The number of bits to rotate. - * @pre - * @p r > 0 && @p r < 32 - * @note - * @p x and @p r may be evaluated multiple times. - * @return The rotated result. - */ -#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ - && XXH_HAS_BUILTIN(__builtin_rotateleft64) -# define XXH_rotl32 __builtin_rotateleft32 -# define XXH_rotl64 __builtin_rotateleft64 -/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ -#elif defined(_MSC_VER) -# define XXH_rotl32(x,r) _rotl(x,r) -# define XXH_rotl64(x,r) _rotl64(x,r) -#else -# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) -# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) -#endif - -/*! - * @internal - * @fn xxh_u32 XXH_swap32(xxh_u32 x) - * @brief A 32-bit byteswap. - * - * @param x The 32-bit integer to byteswap. - * @return @p x, byteswapped. - */ -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap32 _byteswap_ulong -#elif XXH_GCC_VERSION >= 403 -# define XXH_swap32 __builtin_bswap32 -#else -static xxh_u32 XXH_swap32 (xxh_u32 x) -{ - return ((x << 24) & 0xff000000 ) | - ((x << 8) & 0x00ff0000 ) | - ((x >> 8) & 0x0000ff00 ) | - ((x >> 24) & 0x000000ff ); -} -#endif - - -/* *************************** -* Memory reads -*****************************/ - -/*! - * @internal - * @brief Enum to indicate whether a pointer is aligned. - */ -typedef enum { - XXH_aligned, /*!< Aligned */ - XXH_unaligned /*!< Possibly unaligned */ -} XXH_alignment; - -/* - * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. - * - * This is ideal for older compilers which don't inline memcpy. - */ -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) - -XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) -{ - const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; - return bytePtr[0] - | ((xxh_u32)bytePtr[1] << 8) - | ((xxh_u32)bytePtr[2] << 16) - | ((xxh_u32)bytePtr[3] << 24); -} - -XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) -{ - const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; - return bytePtr[3] - | ((xxh_u32)bytePtr[2] << 8) - | ((xxh_u32)bytePtr[1] << 16) - | ((xxh_u32)bytePtr[0] << 24); -} - -#else -XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); -} - -static xxh_u32 XXH_readBE32(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); -} -#endif - -XXH_FORCE_INLINE xxh_u32 -XXH_readLE32_align(const void* ptr, XXH_alignment align) -{ - if (align==XXH_unaligned) { - return XXH_readLE32(ptr); - } else { - return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); - } -} - - -/* ************************************* -* Misc -***************************************/ -/*! @ingroup public */ -XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } - - -/* ******************************************************************* -* 32-bit hash functions -*********************************************************************/ -/*! - * @} - * @defgroup XXH32_impl XXH32 implementation - * @ingroup impl - * - * Details on the XXH32 implementation. - * @{ - */ - /* #define instead of static const, to be used as initializers */ -#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */ -#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */ -#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */ -#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */ -#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */ - -#ifdef XXH_OLD_NAMES -# define PRIME32_1 XXH_PRIME32_1 -# define PRIME32_2 XXH_PRIME32_2 -# define PRIME32_3 XXH_PRIME32_3 -# define PRIME32_4 XXH_PRIME32_4 -# define PRIME32_5 XXH_PRIME32_5 -#endif - -/*! - * @internal - * @brief Normal stripe processing routine. - * - * This shuffles the bits so that any bit from @p input impacts several bits in - * @p acc. - * - * @param acc The accumulator lane. - * @param input The stripe of input to mix. - * @return The mixed accumulator lane. - */ -static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) -{ - acc += input * XXH_PRIME32_2; - acc = XXH_rotl32(acc, 13); - acc *= XXH_PRIME32_1; -#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) - /* - * UGLY HACK: - * A compiler fence is the only thing that prevents GCC and Clang from - * autovectorizing the XXH32 loop (pragmas and attributes don't work for some - * reason) without globally disabling SSE4.1. - * - * The reason we want to avoid vectorization is because despite working on - * 4 integers at a time, there are multiple factors slowing XXH32 down on - * SSE4: - * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on - * newer chips!) making it slightly slower to multiply four integers at - * once compared to four integers independently. Even when pmulld was - * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE - * just to multiply unless doing a long operation. - * - * - Four instructions are required to rotate, - * movqda tmp, v // not required with VEX encoding - * pslld tmp, 13 // tmp <<= 13 - * psrld v, 19 // x >>= 19 - * por v, tmp // x |= tmp - * compared to one for scalar: - * roll v, 13 // reliably fast across the board - * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason - * - * - Instruction level parallelism is actually more beneficial here because - * the SIMD actually serializes this operation: While v1 is rotating, v2 - * can load data, while v3 can multiply. SSE forces them to operate - * together. - * - * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing - * the loop. NEON is only faster on the A53, and with the newer cores, it is less - * than half the speed. - * - * Additionally, this is used on WASM SIMD128 because it JITs to the same - * SIMD instructions and has the same issue. - */ - XXH_COMPILER_GUARD(acc); -#endif - return acc; -} - -/*! - * @internal - * @brief Mixes all bits to finalize the hash. - * - * The final mix ensures that all input bits have a chance to impact any bit in - * the output digest, resulting in an unbiased distribution. - * - * @param hash The hash to avalanche. - * @return The avalanched hash. - */ -static xxh_u32 XXH32_avalanche(xxh_u32 hash) -{ - hash ^= hash >> 15; - hash *= XXH_PRIME32_2; - hash ^= hash >> 13; - hash *= XXH_PRIME32_3; - hash ^= hash >> 16; - return hash; -} - -#define XXH_get32bits(p) XXH_readLE32_align(p, align) - -/*! - * @internal - * @brief Processes the last 0-15 bytes of @p ptr. - * - * There may be up to 15 bytes remaining to consume from the input. - * This final stage will digest them to ensure that all input bytes are present - * in the final mix. - * - * @param hash The hash to finalize. - * @param ptr The pointer to the remaining input. - * @param len The remaining length, modulo 16. - * @param align Whether @p ptr is aligned. - * @return The finalized hash. - * @see XXH64_finalize(). - */ -static XXH_PUREF xxh_u32 -XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) -{ -#define XXH_PROCESS1 do { \ - hash += (*ptr++) * XXH_PRIME32_5; \ - hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \ -} while (0) - -#define XXH_PROCESS4 do { \ - hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \ - ptr += 4; \ - hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \ -} while (0) - - if (ptr==NULL) XXH_ASSERT(len == 0); - - /* Compact rerolled version; generally faster */ - if (!XXH32_ENDJMP) { - len &= 15; - while (len >= 4) { - XXH_PROCESS4; - len -= 4; - } - while (len > 0) { - XXH_PROCESS1; - --len; - } - return XXH32_avalanche(hash); - } else { - switch(len&15) /* or switch(bEnd - p) */ { - case 12: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 8: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 4: XXH_PROCESS4; - return XXH32_avalanche(hash); - - case 13: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 9: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 5: XXH_PROCESS4; - XXH_PROCESS1; - return XXH32_avalanche(hash); - - case 14: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 10: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 6: XXH_PROCESS4; - XXH_PROCESS1; - XXH_PROCESS1; - return XXH32_avalanche(hash); - - case 15: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 11: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 7: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 3: XXH_PROCESS1; - XXH_FALLTHROUGH; /* fallthrough */ - case 2: XXH_PROCESS1; - XXH_FALLTHROUGH; /* fallthrough */ - case 1: XXH_PROCESS1; - XXH_FALLTHROUGH; /* fallthrough */ - case 0: return XXH32_avalanche(hash); - } - XXH_ASSERT(0); - return hash; /* reaching this point is deemed impossible */ - } -} - -#ifdef XXH_OLD_NAMES -# define PROCESS1 XXH_PROCESS1 -# define PROCESS4 XXH_PROCESS4 -#else -# undef XXH_PROCESS1 -# undef XXH_PROCESS4 -#endif - -/*! - * @internal - * @brief The implementation for @ref XXH32(). - * - * @param input , len , seed Directly passed from @ref XXH32(). - * @param align Whether @p input is aligned. - * @return The calculated hash. - */ -XXH_FORCE_INLINE XXH_PUREF xxh_u32 -XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) -{ - xxh_u32 h32; - - if (input==NULL) XXH_ASSERT(len == 0); - - if (len>=16) { - const xxh_u8* const bEnd = input + len; - const xxh_u8* const limit = bEnd - 15; - xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; - xxh_u32 v2 = seed + XXH_PRIME32_2; - xxh_u32 v3 = seed + 0; - xxh_u32 v4 = seed - XXH_PRIME32_1; - - do { - v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4; - v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4; - v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4; - v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4; - } while (input < limit); - - h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) - + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); - } else { - h32 = seed + XXH_PRIME32_5; - } - - h32 += (xxh_u32)len; - - return XXH32_finalize(h32, input, len&15, align); -} - -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) -{ -#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH32_state_t state; - XXH32_reset(&state, seed); - XXH32_update(&state, (const xxh_u8*)input, len); - return XXH32_digest(&state); -#else - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ - return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); - } } - - return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); -#endif -} - - - -/******* Hash streaming *******/ -#ifndef XXH_NO_STREAM -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) -{ - return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); -} -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) -{ - XXH_memcpy(dstState, srcState, sizeof(*dstState)); -} - -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) -{ - XXH_ASSERT(statePtr != NULL); - memset(statePtr, 0, sizeof(*statePtr)); - statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2; - statePtr->v[1] = seed + XXH_PRIME32_2; - statePtr->v[2] = seed + 0; - statePtr->v[3] = seed - XXH_PRIME32_1; - return XXH_OK; -} - - -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH_errorcode -XXH32_update(XXH32_state_t* state, const void* input, size_t len) -{ - if (input==NULL) { - XXH_ASSERT(len == 0); - return XXH_OK; - } - - { const xxh_u8* p = (const xxh_u8*)input; - const xxh_u8* const bEnd = p + len; - - state->total_len_32 += (XXH32_hash_t)len; - state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16)); - - if (state->memsize + len < 16) { /* fill in tmp buffer */ - XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len); - state->memsize += (XXH32_hash_t)len; - return XXH_OK; - } - - if (state->memsize) { /* some data left from previous update */ - XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize); - { const xxh_u32* p32 = state->mem32; - state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++; - state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++; - state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++; - state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); - } - p += 16-state->memsize; - state->memsize = 0; - } - - if (p <= bEnd-16) { - const xxh_u8* const limit = bEnd - 16; - - do { - state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4; - state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4; - state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4; - state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4; - } while (p<=limit); - - } - - if (p < bEnd) { - XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - } - - return XXH_OK; -} - - -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) -{ - xxh_u32 h32; - - if (state->large_len) { - h32 = XXH_rotl32(state->v[0], 1) - + XXH_rotl32(state->v[1], 7) - + XXH_rotl32(state->v[2], 12) - + XXH_rotl32(state->v[3], 18); - } else { - h32 = state->v[2] /* == seed */ + XXH_PRIME32_5; - } - - h32 += state->total_len_32; - - return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned); -} -#endif /* !XXH_NO_STREAM */ - -/******* Canonical representation *******/ - -/*! - * @ingroup XXH32_family - * The default return values from XXH functions are unsigned 32 and 64 bit - * integers. - * - * The canonical representation uses big endian convention, the same convention - * as human-readable numbers (large digits first). - * - * This way, hash values can be written into a file or buffer, remaining - * comparable across different systems. - * - * The following functions allow transformation of hash values to and from their - * canonical format. - */ -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); - XXH_memcpy(dst, &hash, sizeof(*dst)); -} -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) -{ - return XXH_readBE32(src); -} - - -#ifndef XXH_NO_LONG_LONG - -/* ******************************************************************* -* 64-bit hash functions -*********************************************************************/ -/*! - * @} - * @ingroup impl - * @{ - */ -/******* Memory access *******/ - -typedef XXH64_hash_t xxh_u64; - -#ifdef XXH_OLD_NAMES -# define U64 xxh_u64 -#endif - -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) -/* - * Manual byteshift. Best for old compilers which don't inline memcpy. - * We actually directly use XXH_readLE64 and XXH_readBE64. - */ -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static xxh_u64 XXH_read64(const void* memPtr) -{ - return *(const xxh_u64*) memPtr; -} - -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - -/* - * __attribute__((aligned(1))) is supported by gcc and clang. Originally the - * documentation claimed that it only increased the alignment, but actually it - * can decrease it on gcc, clang, and icc: - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, - * https://gcc.godbolt.org/z/xYez1j67Y. - */ -#ifdef XXH_OLD_NAMES -typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; -#endif -static xxh_u64 XXH_read64(const void* ptr) -{ - typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64; - return *((const xxh_unalign64*)ptr); -} - -#else - -/* - * Portable and safe solution. Generally efficient. - * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html - */ -static xxh_u64 XXH_read64(const void* memPtr) -{ - xxh_u64 val; - XXH_memcpy(&val, memPtr, sizeof(val)); - return val; -} - -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ - -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap64 _byteswap_uint64 -#elif XXH_GCC_VERSION >= 403 -# define XXH_swap64 __builtin_bswap64 -#else -static xxh_u64 XXH_swap64(xxh_u64 x) -{ - return ((x << 56) & 0xff00000000000000ULL) | - ((x << 40) & 0x00ff000000000000ULL) | - ((x << 24) & 0x0000ff0000000000ULL) | - ((x << 8) & 0x000000ff00000000ULL) | - ((x >> 8) & 0x00000000ff000000ULL) | - ((x >> 24) & 0x0000000000ff0000ULL) | - ((x >> 40) & 0x000000000000ff00ULL) | - ((x >> 56) & 0x00000000000000ffULL); -} -#endif - - -/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) - -XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) -{ - const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; - return bytePtr[0] - | ((xxh_u64)bytePtr[1] << 8) - | ((xxh_u64)bytePtr[2] << 16) - | ((xxh_u64)bytePtr[3] << 24) - | ((xxh_u64)bytePtr[4] << 32) - | ((xxh_u64)bytePtr[5] << 40) - | ((xxh_u64)bytePtr[6] << 48) - | ((xxh_u64)bytePtr[7] << 56); -} - -XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) -{ - const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; - return bytePtr[7] - | ((xxh_u64)bytePtr[6] << 8) - | ((xxh_u64)bytePtr[5] << 16) - | ((xxh_u64)bytePtr[4] << 24) - | ((xxh_u64)bytePtr[3] << 32) - | ((xxh_u64)bytePtr[2] << 40) - | ((xxh_u64)bytePtr[1] << 48) - | ((xxh_u64)bytePtr[0] << 56); -} - -#else -XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); -} - -static xxh_u64 XXH_readBE64(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); -} -#endif - -XXH_FORCE_INLINE xxh_u64 -XXH_readLE64_align(const void* ptr, XXH_alignment align) -{ - if (align==XXH_unaligned) - return XXH_readLE64(ptr); - else - return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); -} - - -/******* xxh64 *******/ -/*! - * @} - * @defgroup XXH64_impl XXH64 implementation - * @ingroup impl - * - * Details on the XXH64 implementation. - * @{ - */ -/* #define rather that static const, to be used as initializers */ -#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */ -#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */ -#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */ -#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */ -#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */ - -#ifdef XXH_OLD_NAMES -# define PRIME64_1 XXH_PRIME64_1 -# define PRIME64_2 XXH_PRIME64_2 -# define PRIME64_3 XXH_PRIME64_3 -# define PRIME64_4 XXH_PRIME64_4 -# define PRIME64_5 XXH_PRIME64_5 -#endif - -/*! @copydoc XXH32_round */ -static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) -{ - acc += input * XXH_PRIME64_2; - acc = XXH_rotl64(acc, 31); - acc *= XXH_PRIME64_1; - return acc; -} - -static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) -{ - val = XXH64_round(0, val); - acc ^= val; - acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; - return acc; -} - -/*! @copydoc XXH32_avalanche */ -static xxh_u64 XXH64_avalanche(xxh_u64 hash) -{ - hash ^= hash >> 33; - hash *= XXH_PRIME64_2; - hash ^= hash >> 29; - hash *= XXH_PRIME64_3; - hash ^= hash >> 32; - return hash; -} - - -#define XXH_get64bits(p) XXH_readLE64_align(p, align) - -/*! - * @internal - * @brief Processes the last 0-31 bytes of @p ptr. - * - * There may be up to 31 bytes remaining to consume from the input. - * This final stage will digest them to ensure that all input bytes are present - * in the final mix. - * - * @param hash The hash to finalize. - * @param ptr The pointer to the remaining input. - * @param len The remaining length, modulo 32. - * @param align Whether @p ptr is aligned. - * @return The finalized hash - * @see XXH32_finalize(). - */ -static XXH_PUREF xxh_u64 -XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) -{ - if (ptr==NULL) XXH_ASSERT(len == 0); - len &= 31; - while (len >= 8) { - xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); - ptr += 8; - hash ^= k1; - hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4; - len -= 8; - } - if (len >= 4) { - hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; - ptr += 4; - hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; - len -= 4; - } - while (len > 0) { - hash ^= (*ptr++) * XXH_PRIME64_5; - hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1; - --len; - } - return XXH64_avalanche(hash); -} - -#ifdef XXH_OLD_NAMES -# define PROCESS1_64 XXH_PROCESS1_64 -# define PROCESS4_64 XXH_PROCESS4_64 -# define PROCESS8_64 XXH_PROCESS8_64 -#else -# undef XXH_PROCESS1_64 -# undef XXH_PROCESS4_64 -# undef XXH_PROCESS8_64 -#endif - -/*! - * @internal - * @brief The implementation for @ref XXH64(). - * - * @param input , len , seed Directly passed from @ref XXH64(). - * @param align Whether @p input is aligned. - * @return The calculated hash. - */ -XXH_FORCE_INLINE XXH_PUREF xxh_u64 -XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) -{ - xxh_u64 h64; - if (input==NULL) XXH_ASSERT(len == 0); - - if (len>=32) { - const xxh_u8* const bEnd = input + len; - const xxh_u8* const limit = bEnd - 31; - xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; - xxh_u64 v2 = seed + XXH_PRIME64_2; - xxh_u64 v3 = seed + 0; - xxh_u64 v4 = seed - XXH_PRIME64_1; - - do { - v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; - v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; - v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; - v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; - } while (input= 2 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH64_state_t state; - XXH64_reset(&state, seed); - XXH64_update(&state, (const xxh_u8*)input, len); - return XXH64_digest(&state); -#else - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ - return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); - } } - - return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); - -#endif -} - -/******* Hash Streaming *******/ -#ifndef XXH_NO_STREAM -/*! @ingroup XXH64_family*/ -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) -{ - return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); -} -/*! @ingroup XXH64_family */ -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - -/*! @ingroup XXH64_family */ -XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState) -{ - XXH_memcpy(dstState, srcState, sizeof(*dstState)); -} - -/*! @ingroup XXH64_family */ -XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed) -{ - XXH_ASSERT(statePtr != NULL); - memset(statePtr, 0, sizeof(*statePtr)); - statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2; - statePtr->v[1] = seed + XXH_PRIME64_2; - statePtr->v[2] = seed + 0; - statePtr->v[3] = seed - XXH_PRIME64_1; - return XXH_OK; -} - -/*! @ingroup XXH64_family */ -XXH_PUBLIC_API XXH_errorcode -XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len) -{ - if (input==NULL) { - XXH_ASSERT(len == 0); - return XXH_OK; - } - - { const xxh_u8* p = (const xxh_u8*)input; - const xxh_u8* const bEnd = p + len; - - state->total_len += len; - - if (state->memsize + len < 32) { /* fill in tmp buffer */ - XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len); - state->memsize += (xxh_u32)len; - return XXH_OK; - } - - if (state->memsize) { /* tmp buffer is full */ - XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize); - state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0)); - state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1)); - state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2)); - state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3)); - p += 32 - state->memsize; - state->memsize = 0; - } - - if (p+32 <= bEnd) { - const xxh_u8* const limit = bEnd - 32; - - do { - state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8; - state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8; - state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8; - state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8; - } while (p<=limit); - - } - - if (p < bEnd) { - XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - } - - return XXH_OK; -} - - -/*! @ingroup XXH64_family */ -XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state) -{ - xxh_u64 h64; - - if (state->total_len >= 32) { - h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18); - h64 = XXH64_mergeRound(h64, state->v[0]); - h64 = XXH64_mergeRound(h64, state->v[1]); - h64 = XXH64_mergeRound(h64, state->v[2]); - h64 = XXH64_mergeRound(h64, state->v[3]); - } else { - h64 = state->v[2] /*seed*/ + XXH_PRIME64_5; - } - - h64 += (xxh_u64) state->total_len; - - return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned); -} -#endif /* !XXH_NO_STREAM */ - -/******* Canonical representation *******/ - -/*! @ingroup XXH64_family */ -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); - XXH_memcpy(dst, &hash, sizeof(*dst)); -} - -/*! @ingroup XXH64_family */ -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src) -{ - return XXH_readBE64(src); -} - -#ifndef XXH_NO_XXH3 - -/* ********************************************************************* -* XXH3 -* New generation hash designed for speed on small keys and vectorization -************************************************************************ */ -/*! - * @} - * @defgroup XXH3_impl XXH3 implementation - * @ingroup impl - * @{ - */ - -/* === Compiler specifics === */ - -#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */ -# define XXH_RESTRICT /* disable */ -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ -# define XXH_RESTRICT restrict -#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \ - || (defined (__clang__)) \ - || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \ - || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300)) -/* - * There are a LOT more compilers that recognize __restrict but this - * covers the major ones. - */ -# define XXH_RESTRICT __restrict -#else -# define XXH_RESTRICT /* disable */ -#endif - -#if (defined(__GNUC__) && (__GNUC__ >= 3)) \ - || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \ - || defined(__clang__) -# define XXH_likely(x) __builtin_expect(x, 1) -# define XXH_unlikely(x) __builtin_expect(x, 0) -#else -# define XXH_likely(x) (x) -# define XXH_unlikely(x) (x) -#endif - -#ifndef XXH_HAS_INCLUDE -# ifdef __has_include -# define XXH_HAS_INCLUDE(x) __has_include(x) -# else -# define XXH_HAS_INCLUDE(x) 0 -# endif -#endif - -#if defined(__GNUC__) || defined(__clang__) -# if defined(__ARM_FEATURE_SVE) -# include -# endif -# if defined(__ARM_NEON__) || defined(__ARM_NEON) \ - || (defined(_M_ARM) && _M_ARM >= 7) \ - || defined(_M_ARM64) || defined(_M_ARM64EC) \ - || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* WASM SIMD128 via SIMDe */ -# define inline __inline__ /* circumvent a clang bug */ -# include -# undef inline -# elif defined(__AVX2__) -# include -# elif defined(__SSE2__) -# include -# endif -#endif - -#if defined(_MSC_VER) -# include -#endif - -/* - * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while - * remaining a true 64-bit/128-bit hash function. - * - * This is done by prioritizing a subset of 64-bit operations that can be - * emulated without too many steps on the average 32-bit machine. - * - * For example, these two lines seem similar, and run equally fast on 64-bit: - * - * xxh_u64 x; - * x ^= (x >> 47); // good - * x ^= (x >> 13); // bad - * - * However, to a 32-bit machine, there is a major difference. - * - * x ^= (x >> 47) looks like this: - * - * x.lo ^= (x.hi >> (47 - 32)); - * - * while x ^= (x >> 13) looks like this: - * - * // note: funnel shifts are not usually cheap. - * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13)); - * x.hi ^= (x.hi >> 13); - * - * The first one is significantly faster than the second, simply because the - * shift is larger than 32. This means: - * - All the bits we need are in the upper 32 bits, so we can ignore the lower - * 32 bits in the shift. - * - The shift result will always fit in the lower 32 bits, and therefore, - * we can ignore the upper 32 bits in the xor. - * - * Thanks to this optimization, XXH3 only requires these features to be efficient: - * - * - Usable unaligned access - * - A 32-bit or 64-bit ALU - * - If 32-bit, a decent ADC instruction - * - A 32 or 64-bit multiply with a 64-bit result - * - For the 128-bit variant, a decent byteswap helps short inputs. - * - * The first two are already required by XXH32, and almost all 32-bit and 64-bit - * platforms which can run XXH32 can run XXH3 efficiently. - * - * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one - * notable exception. - * - * First of all, Thumb-1 lacks support for the UMULL instruction which - * performs the important long multiply. This means numerous __aeabi_lmul - * calls. - * - * Second of all, the 8 functional registers are just not enough. - * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need - * Lo registers, and this shuffling results in thousands more MOVs than A32. - * - * A32 and T32 don't have this limitation. They can access all 14 registers, - * do a 32->64 multiply with UMULL, and the flexible operand allowing free - * shifts is helpful, too. - * - * Therefore, we do a quick sanity check. - * - * If compiling Thumb-1 for a target which supports ARM instructions, we will - * emit a warning, as it is not a "sane" platform to compile for. - * - * Usually, if this happens, it is because of an accident and you probably need - * to specify -march, as you likely meant to compile for a newer architecture. - * - * Credit: large sections of the vectorial and asm source code paths - * have been contributed by @easyaspi314 - */ -#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) -# warning "XXH3 is highly inefficient without ARM or Thumb-2." -#endif - -/* ========================================== - * Vectorization detection - * ========================================== */ - -#ifdef XXH_DOXYGEN -/*! - * @ingroup tuning - * @brief Overrides the vectorization implementation chosen for XXH3. - * - * Can be defined to 0 to disable SIMD or any of the values mentioned in - * @ref XXH_VECTOR_TYPE. - * - * If this is not defined, it uses predefined macros to determine the best - * implementation. - */ -# define XXH_VECTOR XXH_SCALAR -/*! - * @ingroup tuning - * @brief Possible values for @ref XXH_VECTOR. - * - * Note that these are actually implemented as macros. - * - * If this is not defined, it is detected automatically. - * internal macro XXH_X86DISPATCH overrides this. - */ -enum XXH_VECTOR_TYPE /* fake enum */ { - XXH_SCALAR = 0, /*!< Portable scalar version */ - XXH_SSE2 = 1, /*!< - * SSE2 for Pentium 4, Opteron, all x86_64. - * - * @note SSE2 is also guaranteed on Windows 10, macOS, and - * Android x86. - */ - XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ - XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ - XXH_NEON = 4, /*!< - * NEON for most ARMv7-A, all AArch64, and WASM SIMD128 - * via the SIMDeverywhere polyfill provided with the - * Emscripten SDK. - */ - XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ - XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */ -}; -/*! - * @ingroup tuning - * @brief Selects the minimum alignment for XXH3's accumulators. - * - * When using SIMD, this should match the alignment required for said vector - * type, so, for example, 32 for AVX2. - * - * Default: Auto detected. - */ -# define XXH_ACC_ALIGN 8 -#endif - -/* Actual definition */ -#ifndef XXH_DOXYGEN -# define XXH_SCALAR 0 -# define XXH_SSE2 1 -# define XXH_AVX2 2 -# define XXH_AVX512 3 -# define XXH_NEON 4 -# define XXH_VSX 5 -# define XXH_SVE 6 -#endif - -#ifndef XXH_VECTOR /* can be defined on command line */ -# if defined(__ARM_FEATURE_SVE) -# define XXH_VECTOR XXH_SVE -# elif ( \ - defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ - || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \ - || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* wasm simd128 via SIMDe */ \ - ) && ( \ - defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \ - || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ - ) -# define XXH_VECTOR XXH_NEON -# elif defined(__AVX512F__) -# define XXH_VECTOR XXH_AVX512 -# elif defined(__AVX2__) -# define XXH_VECTOR XXH_AVX2 -# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) -# define XXH_VECTOR XXH_SSE2 -# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ - || (defined(__s390x__) && defined(__VEC__)) \ - && defined(__GNUC__) /* TODO: IBM XL */ -# define XXH_VECTOR XXH_VSX -# else -# define XXH_VECTOR XXH_SCALAR -# endif -#endif - -/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */ -#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE) -# ifdef _MSC_VER -# pragma warning(once : 4606) -# else -# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead." -# endif -# undef XXH_VECTOR -# define XXH_VECTOR XXH_SCALAR -#endif - -/* - * Controls the alignment of the accumulator, - * for compatibility with aligned vector loads, which are usually faster. - */ -#ifndef XXH_ACC_ALIGN -# if defined(XXH_X86DISPATCH) -# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */ -# elif XXH_VECTOR == XXH_SCALAR /* scalar */ -# define XXH_ACC_ALIGN 8 -# elif XXH_VECTOR == XXH_SSE2 /* sse2 */ -# define XXH_ACC_ALIGN 16 -# elif XXH_VECTOR == XXH_AVX2 /* avx2 */ -# define XXH_ACC_ALIGN 32 -# elif XXH_VECTOR == XXH_NEON /* neon */ -# define XXH_ACC_ALIGN 16 -# elif XXH_VECTOR == XXH_VSX /* vsx */ -# define XXH_ACC_ALIGN 16 -# elif XXH_VECTOR == XXH_AVX512 /* avx512 */ -# define XXH_ACC_ALIGN 64 -# elif XXH_VECTOR == XXH_SVE /* sve */ -# define XXH_ACC_ALIGN 64 -# endif -#endif - -#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \ - || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 -# define XXH_SEC_ALIGN XXH_ACC_ALIGN -#elif XXH_VECTOR == XXH_SVE -# define XXH_SEC_ALIGN XXH_ACC_ALIGN -#else -# define XXH_SEC_ALIGN 8 -#endif - -#if defined(__GNUC__) || defined(__clang__) -# define XXH_ALIASING __attribute__((may_alias)) -#else -# define XXH_ALIASING /* nothing */ -#endif - -/* - * UGLY HACK: - * GCC usually generates the best code with -O3 for xxHash. - * - * However, when targeting AVX2, it is overzealous in its unrolling resulting - * in code roughly 3/4 the speed of Clang. - * - * There are other issues, such as GCC splitting _mm256_loadu_si256 into - * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which - * only applies to Sandy and Ivy Bridge... which don't even support AVX2. - * - * That is why when compiling the AVX2 version, it is recommended to use either - * -O2 -mavx2 -march=haswell - * or - * -O2 -mavx2 -mno-avx256-split-unaligned-load - * for decent performance, or to use Clang instead. - * - * Fortunately, we can control the first one with a pragma that forces GCC into - * -O2, but the other one we can't control without "failed to inline always - * inline function due to target mismatch" warnings. - */ -#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ - && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ - && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ -# pragma GCC push_options -# pragma GCC optimize("-O2") -#endif - -#if XXH_VECTOR == XXH_NEON - -/* - * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3 - * optimizes out the entire hashLong loop because of the aliasing violation. - * - * However, GCC is also inefficient at load-store optimization with vld1q/vst1q, - * so the only option is to mark it as aliasing. - */ -typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING; - -/*! - * @internal - * @brief `vld1q_u64` but faster and alignment-safe. - * - * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only - * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86). - * - * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it - * prohibits load-store optimizations. Therefore, a direct dereference is used. - * - * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe - * unaligned load. - */ -#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) -XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */ -{ - return *(xxh_aliasing_uint64x2_t const *)ptr; -} -#else -XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) -{ - return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr)); -} -#endif - -/*! - * @internal - * @brief `vmlal_u32` on low and high halves of a vector. - * - * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with - * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32` - * with `vmlal_u32`. - */ -#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11 -XXH_FORCE_INLINE uint64x2_t -XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) -{ - /* Inline assembly is the only way */ - __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs)); - return acc; -} -XXH_FORCE_INLINE uint64x2_t -XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) -{ - /* This intrinsic works as expected */ - return vmlal_high_u32(acc, lhs, rhs); -} -#else -/* Portable intrinsic versions */ -XXH_FORCE_INLINE uint64x2_t -XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) -{ - return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs)); -} -/*! @copydoc XXH_vmlal_low_u32 - * Assume the compiler converts this to vmlal_high_u32 on aarch64 */ -XXH_FORCE_INLINE uint64x2_t -XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) -{ - return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs)); -} -#endif - -/*! - * @ingroup tuning - * @brief Controls the NEON to scalar ratio for XXH3 - * - * This can be set to 2, 4, 6, or 8. - * - * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used. - * - * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those - * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU - * bandwidth. - * - * This is even more noticeable on the more advanced cores like the Cortex-A76 which - * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once. - * - * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes - * and 2 scalar lanes, which is chosen by default. - * - * This does not apply to Apple processors or 32-bit processors, which run better with - * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes. - * - * This change benefits CPUs with large micro-op buffers without negatively affecting - * most other CPUs: - * - * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. | - * |:----------------------|:--------------------|----------:|-----------:|------:| - * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% | - * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% | - * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% | - * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% | - * - * It also seems to fix some bad codegen on GCC, making it almost as fast as clang. - * - * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning - * it effectively becomes worse 4. - * - * @see XXH3_accumulate_512_neon() - */ -# ifndef XXH3_NEON_LANES -# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \ - && !defined(__APPLE__) && XXH_SIZE_OPT <= 0 -# define XXH3_NEON_LANES 6 -# else -# define XXH3_NEON_LANES XXH_ACC_NB -# endif -# endif -#endif /* XXH_VECTOR == XXH_NEON */ - -/* - * VSX and Z Vector helpers. - * - * This is very messy, and any pull requests to clean this up are welcome. - * - * There are a lot of problems with supporting VSX and s390x, due to - * inconsistent intrinsics, spotty coverage, and multiple endiannesses. - */ -#if XXH_VECTOR == XXH_VSX -/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`, - * and `pixel`. This is a problem for obvious reasons. - * - * These keywords are unnecessary; the spec literally says they are - * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd - * after including the header. - * - * We use pragma push_macro/pop_macro to keep the namespace clean. */ -# pragma push_macro("bool") -# pragma push_macro("vector") -# pragma push_macro("pixel") -/* silence potential macro redefined warnings */ -# undef bool -# undef vector -# undef pixel - -# if defined(__s390x__) -# include -# else -# include -# endif - -/* Restore the original macro values, if applicable. */ -# pragma pop_macro("pixel") -# pragma pop_macro("vector") -# pragma pop_macro("bool") - -typedef __vector unsigned long long xxh_u64x2; -typedef __vector unsigned char xxh_u8x16; -typedef __vector unsigned xxh_u32x4; - -/* - * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue. - */ -typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING; - -# ifndef XXH_VSX_BE -# if defined(__BIG_ENDIAN__) \ - || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) -# define XXH_VSX_BE 1 -# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ -# warning "-maltivec=be is not recommended. Please use native endianness." -# define XXH_VSX_BE 1 -# else -# define XXH_VSX_BE 0 -# endif -# endif /* !defined(XXH_VSX_BE) */ - -# if XXH_VSX_BE -# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) -# define XXH_vec_revb vec_revb -# else -/*! - * A polyfill for POWER9's vec_revb(). - */ -XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) -{ - xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, - 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; - return vec_perm(val, val, vByteSwap); -} -# endif -# endif /* XXH_VSX_BE */ - -/*! - * Performs an unaligned vector load and byte swaps it on big endian. - */ -XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) -{ - xxh_u64x2 ret; - XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2)); -# if XXH_VSX_BE - ret = XXH_vec_revb(ret); -# endif - return ret; -} - -/* - * vec_mulo and vec_mule are very problematic intrinsics on PowerPC - * - * These intrinsics weren't added until GCC 8, despite existing for a while, - * and they are endian dependent. Also, their meaning swap depending on version. - * */ -# if defined(__s390x__) - /* s390x is always big endian, no issue on this platform */ -# define XXH_vec_mulo vec_mulo -# define XXH_vec_mule vec_mule -# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__) -/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */ - /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */ -# define XXH_vec_mulo __builtin_altivec_vmulouw -# define XXH_vec_mule __builtin_altivec_vmuleuw -# else -/* gcc needs inline assembly */ -/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ -XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) -{ - xxh_u64x2 result; - __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); - return result; -} -XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) -{ - xxh_u64x2 result; - __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); - return result; -} -# endif /* XXH_vec_mulo, XXH_vec_mule */ -#endif /* XXH_VECTOR == XXH_VSX */ - -#if XXH_VECTOR == XXH_SVE -#define ACCRND(acc, offset) \ -do { \ - svuint64_t input_vec = svld1_u64(mask, xinput + offset); \ - svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \ - svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \ - svuint64_t swapped = svtbl_u64(input_vec, kSwap); \ - svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \ - svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \ - svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \ - acc = svadd_u64_x(mask, acc, mul); \ -} while (0) -#endif /* XXH_VECTOR == XXH_SVE */ - -/* prefetch - * can be disabled, by declaring XXH_NO_PREFETCH build macro */ -#if defined(XXH_NO_PREFETCH) -# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ -#else -# if XXH_SIZE_OPT >= 1 -# define XXH_PREFETCH(ptr) (void)(ptr) -# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ -# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ -# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) -# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) -# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) -# else -# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ -# endif -#endif /* XXH_NO_PREFETCH */ - - -/* ========================================== - * XXH3 default settings - * ========================================== */ - -#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ - -#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) -# error "default keyset is not large enough" -#endif - -/*! Pseudorandom secret taken directly from FARSH. */ -XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { - 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, - 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, - 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, - 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, - 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, - 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, - 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, - 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, - 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, - 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, - 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, - 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, -}; - -static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */ -static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */ - -#ifdef XXH_OLD_NAMES -# define kSecret XXH3_kSecret -#endif - -#ifdef XXH_DOXYGEN -/*! - * @brief Calculates a 32-bit to 64-bit long multiply. - * - * Implemented as a macro. - * - * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't - * need to (but it shouldn't need to anyways, it is about 7 instructions to do - * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we - * use that instead of the normal method. - * - * If you are compiling for platforms like Thumb-1 and don't have a better option, - * you may also want to write your own long multiply routine here. - * - * @param x, y Numbers to be multiplied - * @return 64-bit product of the low 32 bits of @p x and @p y. - */ -XXH_FORCE_INLINE xxh_u64 -XXH_mult32to64(xxh_u64 x, xxh_u64 y) -{ - return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); -} -#elif defined(_MSC_VER) && defined(_M_IX86) -# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) -#else -/* - * Downcast + upcast is usually better than masking on older compilers like - * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers. - * - * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands - * and perform a full 64x64 multiply -- entirely redundant on 32-bit. - */ -# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) -#endif - -/*! - * @brief Calculates a 64->128-bit long multiply. - * - * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar - * version. - * - * @param lhs , rhs The 64-bit integers to be multiplied - * @return The 128-bit result represented in an @ref XXH128_hash_t. - */ -static XXH128_hash_t -XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) -{ - /* - * GCC/Clang __uint128_t method. - * - * On most 64-bit targets, GCC and Clang define a __uint128_t type. - * This is usually the best way as it usually uses a native long 64-bit - * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. - * - * Usually. - * - * Despite being a 32-bit platform, Clang (and emscripten) define this type - * despite not having the arithmetic for it. This results in a laggy - * compiler builtin call which calculates a full 128-bit multiply. - * In that case it is best to use the portable one. - * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 - */ -#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \ - && defined(__SIZEOF_INT128__) \ - || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) - - __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; - XXH128_hash_t r128; - r128.low64 = (xxh_u64)(product); - r128.high64 = (xxh_u64)(product >> 64); - return r128; - - /* - * MSVC for x64's _umul128 method. - * - * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); - * - * This compiles to single operand MUL on x64. - */ -#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC) - -#ifndef _MSC_VER -# pragma intrinsic(_umul128) -#endif - xxh_u64 product_high; - xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); - XXH128_hash_t r128; - r128.low64 = product_low; - r128.high64 = product_high; - return r128; - - /* - * MSVC for ARM64's __umulh method. - * - * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method. - */ -#elif defined(_M_ARM64) || defined(_M_ARM64EC) - -#ifndef _MSC_VER -# pragma intrinsic(__umulh) -#endif - XXH128_hash_t r128; - r128.low64 = lhs * rhs; - r128.high64 = __umulh(lhs, rhs); - return r128; - -#else - /* - * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. - * - * This is a fast and simple grade school multiply, which is shown below - * with base 10 arithmetic instead of base 0x100000000. - * - * 9 3 // D2 lhs = 93 - * x 7 5 // D2 rhs = 75 - * ---------- - * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15 - * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45 - * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21 - * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63 - * --------- - * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27 - * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67 - * --------- - * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975 - * - * The reasons for adding the products like this are: - * 1. It avoids manual carry tracking. Just like how - * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX. - * This avoids a lot of complexity. - * - * 2. It hints for, and on Clang, compiles to, the powerful UMAAL - * instruction available in ARM's Digital Signal Processing extension - * in 32-bit ARMv6 and later, which is shown below: - * - * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) - * { - * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; - * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); - * *RdHi = (xxh_u32)(product >> 32); - * } - * - * This instruction was designed for efficient long multiplication, and - * allows this to be calculated in only 4 instructions at speeds - * comparable to some 64-bit ALUs. - * - * 3. It isn't terrible on other platforms. Usually this will be a couple - * of 32-bit ADD/ADCs. - */ - - /* First calculate all of the cross products. */ - xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); - xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); - xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); - xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); - - /* Now add the products together. These will never overflow. */ - xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; - xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; - xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); - - XXH128_hash_t r128; - r128.low64 = lower; - r128.high64 = upper; - return r128; -#endif -} - -/*! - * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it. - * - * The reason for the separate function is to prevent passing too many structs - * around by value. This will hopefully inline the multiply, but we don't force it. - * - * @param lhs , rhs The 64-bit integers to multiply - * @return The low 64 bits of the product XOR'd by the high 64 bits. - * @see XXH_mult64to128() - */ -static xxh_u64 -XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) -{ - XXH128_hash_t product = XXH_mult64to128(lhs, rhs); - return product.low64 ^ product.high64; -} - -/*! Seems to produce slightly better code on GCC for some reason. */ -XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) -{ - XXH_ASSERT(0 <= shift && shift < 64); - return v64 ^ (v64 >> shift); -} - -/* - * This is a fast avalanche stage, - * suitable when input bits are already partially mixed - */ -static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) -{ - h64 = XXH_xorshift64(h64, 37); - h64 *= PRIME_MX1; - h64 = XXH_xorshift64(h64, 32); - return h64; -} - -/* - * This is a stronger avalanche, - * inspired by Pelle Evensen's rrmxmx - * preferable when input has not been previously mixed - */ -static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) -{ - /* this mix is inspired by Pelle Evensen's rrmxmx */ - h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); - h64 *= PRIME_MX2; - h64 ^= (h64 >> 35) + len ; - h64 *= PRIME_MX2; - return XXH_xorshift64(h64, 28); -} - - -/* ========================================== - * Short keys - * ========================================== - * One of the shortcomings of XXH32 and XXH64 was that their performance was - * sub-optimal on short lengths. It used an iterative algorithm which strongly - * favored lengths that were a multiple of 4 or 8. - * - * Instead of iterating over individual inputs, we use a set of single shot - * functions which piece together a range of lengths and operate in constant time. - * - * Additionally, the number of multiplies has been significantly reduced. This - * reduces latency, especially when emulating 64-bit multiplies on 32-bit. - * - * Depending on the platform, this may or may not be faster than XXH32, but it - * is almost guaranteed to be faster than XXH64. - */ - -/* - * At very short lengths, there isn't enough input to fully hide secrets, or use - * the entire secret. - * - * There is also only a limited amount of mixing we can do before significantly - * impacting performance. - * - * Therefore, we use different sections of the secret and always mix two secret - * samples with an XOR. This should have no effect on performance on the - * seedless or withSeed variants because everything _should_ be constant folded - * by modern compilers. - * - * The XOR mixing hides individual parts of the secret and increases entropy. - * - * This adds an extra layer of strength for custom secrets. - */ -XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t -XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(1 <= len && len <= 3); - XXH_ASSERT(secret != NULL); - /* - * len = 1: combined = { input[0], 0x01, input[0], input[0] } - * len = 2: combined = { input[1], 0x02, input[0], input[1] } - * len = 3: combined = { input[2], 0x03, input[0], input[1] } - */ - { xxh_u8 const c1 = input[0]; - xxh_u8 const c2 = input[len >> 1]; - xxh_u8 const c3 = input[len - 1]; - xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) - | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); - xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; - xxh_u64 const keyed = (xxh_u64)combined ^ bitflip; - return XXH64_avalanche(keyed); - } -} - -XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t -XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(secret != NULL); - XXH_ASSERT(4 <= len && len <= 8); - seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; - { xxh_u32 const input1 = XXH_readLE32(input); - xxh_u32 const input2 = XXH_readLE32(input + len - 4); - xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed; - xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32); - xxh_u64 const keyed = input64 ^ bitflip; - return XXH3_rrmxmx(keyed, len); - } -} - -XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t -XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(secret != NULL); - XXH_ASSERT(9 <= len && len <= 16); - { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed; - xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed; - xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; - xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2; - xxh_u64 const acc = len - + XXH_swap64(input_lo) + input_hi - + XXH3_mul128_fold64(input_lo, input_hi); - return XXH3_avalanche(acc); - } -} - -XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t -XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(len <= 16); - { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed); - if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed); - if (len) return XXH3_len_1to3_64b(input, len, secret, seed); - return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64))); - } -} - -/* - * DISCLAIMER: There are known *seed-dependent* multicollisions here due to - * multiplication by zero, affecting hashes of lengths 17 to 240. - * - * However, they are very unlikely. - * - * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all - * unseeded non-cryptographic hashes, it does not attempt to defend itself - * against specially crafted inputs, only random inputs. - * - * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes - * cancelling out the secret is taken an arbitrary number of times (addressed - * in XXH3_accumulate_512), this collision is very unlikely with random inputs - * and/or proper seeding: - * - * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a - * function that is only called up to 16 times per hash with up to 240 bytes of - * input. - * - * This is not too bad for a non-cryptographic hash function, especially with - * only 64 bit outputs. - * - * The 128-bit variant (which trades some speed for strength) is NOT affected - * by this, although it is always a good idea to use a proper seed if you care - * about strength. - */ -XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, - const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) -{ -#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ - && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \ - && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */ - /* - * UGLY HACK: - * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in - * slower code. - * - * By forcing seed64 into a register, we disrupt the cost model and - * cause it to scalarize. See `XXH32_round()` - * - * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600, - * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on - * GCC 9.2, despite both emitting scalar code. - * - * GCC generates much better scalar code than Clang for the rest of XXH3, - * which is why finding a more optimal codepath is an interest. - */ - XXH_COMPILER_GUARD(seed64); -#endif - { xxh_u64 const input_lo = XXH_readLE64(input); - xxh_u64 const input_hi = XXH_readLE64(input+8); - return XXH3_mul128_fold64( - input_lo ^ (XXH_readLE64(secret) + seed64), - input_hi ^ (XXH_readLE64(secret+8) - seed64) - ); - } -} - -/* For mid range keys, XXH3 uses a Mum-hash variant. */ -XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t -XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH64_hash_t seed) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; - XXH_ASSERT(16 < len && len <= 128); - - { xxh_u64 acc = len * XXH_PRIME64_1; -#if XXH_SIZE_OPT >= 1 - /* Smaller and cleaner, but slightly slower. */ - unsigned int i = (unsigned int)(len - 1) / 32; - do { - acc += XXH3_mix16B(input+16 * i, secret+32*i, seed); - acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed); - } while (i-- != 0); -#else - if (len > 32) { - if (len > 64) { - if (len > 96) { - acc += XXH3_mix16B(input+48, secret+96, seed); - acc += XXH3_mix16B(input+len-64, secret+112, seed); - } - acc += XXH3_mix16B(input+32, secret+64, seed); - acc += XXH3_mix16B(input+len-48, secret+80, seed); - } - acc += XXH3_mix16B(input+16, secret+32, seed); - acc += XXH3_mix16B(input+len-32, secret+48, seed); - } - acc += XXH3_mix16B(input+0, secret+0, seed); - acc += XXH3_mix16B(input+len-16, secret+16, seed); -#endif - return XXH3_avalanche(acc); - } -} - -#define XXH3_MIDSIZE_MAX 240 - -XXH_NO_INLINE XXH_PUREF XXH64_hash_t -XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH64_hash_t seed) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; - XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); - - #define XXH3_MIDSIZE_STARTOFFSET 3 - #define XXH3_MIDSIZE_LASTOFFSET 17 - - { xxh_u64 acc = len * XXH_PRIME64_1; - xxh_u64 acc_end; - unsigned int const nbRounds = (unsigned int)len / 16; - unsigned int i; - XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); - for (i=0; i<8; i++) { - acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); - } - /* last bytes */ - acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); - XXH_ASSERT(nbRounds >= 8); - acc = XXH3_avalanche(acc); -#if defined(__clang__) /* Clang */ \ - && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ - && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ - /* - * UGLY HACK: - * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86. - * In everywhere else, it uses scalar code. - * - * For 64->128-bit multiplies, even if the NEON was 100% optimal, it - * would still be slower than UMAAL (see XXH_mult64to128). - * - * Unfortunately, Clang doesn't handle the long multiplies properly and - * converts them to the nonexistent "vmulq_u64" intrinsic, which is then - * scalarized into an ugly mess of VMOV.32 instructions. - * - * This mess is difficult to avoid without turning autovectorization - * off completely, but they are usually relatively minor and/or not - * worth it to fix. - * - * This loop is the easiest to fix, as unlike XXH32, this pragma - * _actually works_ because it is a loop vectorization instead of an - * SLP vectorization. - */ - #pragma clang loop vectorize(disable) -#endif - for (i=8 ; i < nbRounds; i++) { - /* - * Prevents clang for unrolling the acc loop and interleaving with this one. - */ - XXH_COMPILER_GUARD(acc); - acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); - } - return XXH3_avalanche(acc + acc_end); - } -} - - -/* ======= Long Keys ======= */ - -#define XXH_STRIPE_LEN 64 -#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ -#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64)) - -#ifdef XXH_OLD_NAMES -# define STRIPE_LEN XXH_STRIPE_LEN -# define ACC_NB XXH_ACC_NB -#endif - -#ifndef XXH_PREFETCH_DIST -# ifdef __clang__ -# define XXH_PREFETCH_DIST 320 -# else -# if (XXH_VECTOR == XXH_AVX512) -# define XXH_PREFETCH_DIST 512 -# else -# define XXH_PREFETCH_DIST 384 -# endif -# endif /* __clang__ */ -#endif /* XXH_PREFETCH_DIST */ - -/* - * These macros are to generate an XXH3_accumulate() function. - * The two arguments select the name suffix and target attribute. - * - * The name of this symbol is XXH3_accumulate_() and it calls - * XXH3_accumulate_512_(). - * - * It may be useful to hand implement this function if the compiler fails to - * optimize the inline function. - */ -#define XXH3_ACCUMULATE_TEMPLATE(name) \ -void \ -XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \ - const xxh_u8* XXH_RESTRICT input, \ - const xxh_u8* XXH_RESTRICT secret, \ - size_t nbStripes) \ -{ \ - size_t n; \ - for (n = 0; n < nbStripes; n++ ) { \ - const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \ - XXH_PREFETCH(in + XXH_PREFETCH_DIST); \ - XXH3_accumulate_512_##name( \ - acc, \ - in, \ - secret + n*XXH_SECRET_CONSUME_RATE); \ - } \ -} - - -XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) -{ - if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); - XXH_memcpy(dst, &v64, sizeof(v64)); -} - -/* Several intrinsic functions below are supposed to accept __int64 as argument, - * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . - * However, several environments do not define __int64 type, - * requiring a workaround. - */ -#if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) - typedef int64_t xxh_i64; -#else - /* the following type must have a width of 64-bit */ - typedef long long xxh_i64; -#endif - - -/* - * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized. - * - * It is a hardened version of UMAC, based off of FARSH's implementation. - * - * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD - * implementations, and it is ridiculously fast. - * - * We harden it by mixing the original input to the accumulators as well as the product. - * - * This means that in the (relatively likely) case of a multiply by zero, the - * original input is preserved. - * - * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve - * cross-pollination, as otherwise the upper and lower halves would be - * essentially independent. - * - * This doesn't matter on 64-bit hashes since they all get merged together in - * the end, so we skip the extra step. - * - * Both XXH3_64bits and XXH3_128bits use this subroutine. - */ - -#if (XXH_VECTOR == XXH_AVX512) \ - || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0) - -#ifndef XXH_TARGET_AVX512 -# define XXH_TARGET_AVX512 /* disable attribute target */ -#endif - -XXH_FORCE_INLINE XXH_TARGET_AVX512 void -XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - __m512i* const xacc = (__m512i *) acc; - XXH_ASSERT((((size_t)acc) & 63) == 0); - XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); - - { - /* data_vec = input[0]; */ - __m512i const data_vec = _mm512_loadu_si512 (input); - /* key_vec = secret[0]; */ - __m512i const key_vec = _mm512_loadu_si512 (secret); - /* data_key = data_vec ^ key_vec; */ - __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); - /* data_key_lo = data_key >> 32; */ - __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32); - /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ - __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo); - /* xacc[0] += swap(data_vec); */ - __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2)); - __m512i const sum = _mm512_add_epi64(*xacc, data_swap); - /* xacc[0] += product; */ - *xacc = _mm512_add_epi64(product, sum); - } -} -XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512) - -/* - * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. - * - * Multiplication isn't perfect, as explained by Google in HighwayHash: - * - * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to - * // varying degrees. In descending order of goodness, bytes - * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. - * // As expected, the upper and lower bytes are much worse. - * - * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 - * - * Since our algorithm uses a pseudorandom secret to add some variance into the - * mix, we don't need to (or want to) mix as often or as much as HighwayHash does. - * - * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid - * extraction. - * - * Both XXH3_64bits and XXH3_128bits use this subroutine. - */ - -XXH_FORCE_INLINE XXH_TARGET_AVX512 void -XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 63) == 0); - XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); - { __m512i* const xacc = (__m512i*) acc; - const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); - - /* xacc[0] ^= (xacc[0] >> 47) */ - __m512i const acc_vec = *xacc; - __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47); - /* xacc[0] ^= secret; */ - __m512i const key_vec = _mm512_loadu_si512 (secret); - __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */); - - /* xacc[0] *= XXH_PRIME32_1; */ - __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32); - __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32); - __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32); - *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); - } -} - -XXH_FORCE_INLINE XXH_TARGET_AVX512 void -XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) -{ - XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0); - XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64); - XXH_ASSERT(((size_t)customSecret & 63) == 0); - (void)(&XXH_writeLE64); - { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); - __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64); - __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos); - - const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret); - __m512i* const dest = ( __m512i*) customSecret; - int i; - XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */ - XXH_ASSERT(((size_t)dest & 63) == 0); - for (i=0; i < nbRounds; ++i) { - dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed); - } } -} - -#endif - -#if (XXH_VECTOR == XXH_AVX2) \ - || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0) - -#ifndef XXH_TARGET_AVX2 -# define XXH_TARGET_AVX2 /* disable attribute target */ -#endif - -XXH_FORCE_INLINE XXH_TARGET_AVX2 void -XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 31) == 0); - { __m256i* const xacc = (__m256i *) acc; - /* Unaligned. This is mainly for pointer arithmetic, and because - * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ - const __m256i* const xinput = (const __m256i *) input; - /* Unaligned. This is mainly for pointer arithmetic, and because - * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ - const __m256i* const xsecret = (const __m256i *) secret; - - size_t i; - for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { - /* data_vec = xinput[i]; */ - __m256i const data_vec = _mm256_loadu_si256 (xinput+i); - /* key_vec = xsecret[i]; */ - __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); - /* data_key = data_vec ^ key_vec; */ - __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); - /* data_key_lo = data_key >> 32; */ - __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32); - /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ - __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo); - /* xacc[i] += swap(data_vec); */ - __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); - __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); - /* xacc[i] += product; */ - xacc[i] = _mm256_add_epi64(product, sum); - } } -} -XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2) - -XXH_FORCE_INLINE XXH_TARGET_AVX2 void -XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 31) == 0); - { __m256i* const xacc = (__m256i*) acc; - /* Unaligned. This is mainly for pointer arithmetic, and because - * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ - const __m256i* const xsecret = (const __m256i *) secret; - const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1); - - size_t i; - for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { - /* xacc[i] ^= (xacc[i] >> 47) */ - __m256i const acc_vec = xacc[i]; - __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); - __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); - /* xacc[i] ^= xsecret; */ - __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); - __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); - - /* xacc[i] *= XXH_PRIME32_1; */ - __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32); - __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); - __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); - xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); - } - } -} - -XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) -{ - XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0); - XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6); - XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); - (void)(&XXH_writeLE64); - XXH_PREFETCH(customSecret); - { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64); - - const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret); - __m256i* dest = ( __m256i*) customSecret; - -# if defined(__GNUC__) || defined(__clang__) - /* - * On GCC & Clang, marking 'dest' as modified will cause the compiler: - * - do not extract the secret from sse registers in the internal loop - * - use less common registers, and avoid pushing these reg into stack - */ - XXH_COMPILER_GUARD(dest); -# endif - XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */ - XXH_ASSERT(((size_t)dest & 31) == 0); - - /* GCC -O2 need unroll loop manually */ - dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed); - dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed); - dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed); - dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed); - dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed); - dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed); - } -} - -#endif - -/* x86dispatch always generates SSE2 */ -#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) - -#ifndef XXH_TARGET_SSE2 -# define XXH_TARGET_SSE2 /* disable attribute target */ -#endif - -XXH_FORCE_INLINE XXH_TARGET_SSE2 void -XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - /* SSE2 is just a half-scale version of the AVX2 version. */ - XXH_ASSERT((((size_t)acc) & 15) == 0); - { __m128i* const xacc = (__m128i *) acc; - /* Unaligned. This is mainly for pointer arithmetic, and because - * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ - const __m128i* const xinput = (const __m128i *) input; - /* Unaligned. This is mainly for pointer arithmetic, and because - * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ - const __m128i* const xsecret = (const __m128i *) secret; - - size_t i; - for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { - /* data_vec = xinput[i]; */ - __m128i const data_vec = _mm_loadu_si128 (xinput+i); - /* key_vec = xsecret[i]; */ - __m128i const key_vec = _mm_loadu_si128 (xsecret+i); - /* data_key = data_vec ^ key_vec; */ - __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); - /* data_key_lo = data_key >> 32; */ - __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); - /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ - __m128i const product = _mm_mul_epu32 (data_key, data_key_lo); - /* xacc[i] += swap(data_vec); */ - __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); - __m128i const sum = _mm_add_epi64(xacc[i], data_swap); - /* xacc[i] += product; */ - xacc[i] = _mm_add_epi64(product, sum); - } } -} -XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2) - -XXH_FORCE_INLINE XXH_TARGET_SSE2 void -XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 15) == 0); - { __m128i* const xacc = (__m128i*) acc; - /* Unaligned. This is mainly for pointer arithmetic, and because - * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ - const __m128i* const xsecret = (const __m128i *) secret; - const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1); - - size_t i; - for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { - /* xacc[i] ^= (xacc[i] >> 47) */ - __m128i const acc_vec = xacc[i]; - __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); - __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); - /* xacc[i] ^= xsecret[i]; */ - __m128i const key_vec = _mm_loadu_si128 (xsecret+i); - __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); - - /* xacc[i] *= XXH_PRIME32_1; */ - __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); - __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); - __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); - xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); - } - } -} - -XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) -{ - XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); - (void)(&XXH_writeLE64); - { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); - -# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 - /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */ - XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) }; - __m128i const seed = _mm_load_si128((__m128i const*)seed64x2); -# else - __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64); -# endif - int i; - - const void* const src16 = XXH3_kSecret; - __m128i* dst16 = (__m128i*) customSecret; -# if defined(__GNUC__) || defined(__clang__) - /* - * On GCC & Clang, marking 'dest' as modified will cause the compiler: - * - do not extract the secret from sse registers in the internal loop - * - use less common registers, and avoid pushing these reg into stack - */ - XXH_COMPILER_GUARD(dst16); -# endif - XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */ - XXH_ASSERT(((size_t)dst16 & 15) == 0); - - for (i=0; i < nbRounds; ++i) { - dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed); - } } -} - -#endif - -#if (XXH_VECTOR == XXH_NEON) - -/* forward declarations for the scalar routines */ -XXH_FORCE_INLINE void -XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input, - void const* XXH_RESTRICT secret, size_t lane); - -XXH_FORCE_INLINE void -XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, - void const* XXH_RESTRICT secret, size_t lane); - -/*! - * @internal - * @brief The bulk processing loop for NEON and WASM SIMD128. - * - * The NEON code path is actually partially scalar when running on AArch64. This - * is to optimize the pipelining and can have up to 15% speedup depending on the - * CPU, and it also mitigates some GCC codegen issues. - * - * @see XXH3_NEON_LANES for configuring this and details about this optimization. - * - * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit - * integers instead of the other platforms which mask full 64-bit vectors, - * so the setup is more complicated than just shifting right. - * - * Additionally, there is an optimization for 4 lanes at once noted below. - * - * Since, as stated, the most optimal amount of lanes for Cortexes is 6, - * there needs to be *three* versions of the accumulate operation used - * for the remaining 2 lanes. - * - * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap - * nearly perfectly. - */ - -XXH_FORCE_INLINE void -XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 15) == 0); - XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0); - { /* GCC for darwin arm64 does not like aliasing here */ - xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc; - /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ - uint8_t const* xinput = (const uint8_t *) input; - uint8_t const* xsecret = (const uint8_t *) secret; - - size_t i; -#ifdef __wasm_simd128__ - /* - * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret - * is constant propagated, which results in it converting it to this - * inside the loop: - * - * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0) - * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0) - * ... - * - * This requires a full 32-bit address immediate (and therefore a 6 byte - * instruction) as well as an add for each offset. - * - * Putting an asm guard prevents it from folding (at the cost of losing - * the alignment hint), and uses the free offset in `v128.load` instead - * of adding secret_offset each time which overall reduces code size by - * about a kilobyte and improves performance. - */ - XXH_COMPILER_GUARD(xsecret); -#endif - /* Scalar lanes use the normal scalarRound routine */ - for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { - XXH3_scalarRound(acc, input, secret, i); - } - i = 0; - /* 4 NEON lanes at a time. */ - for (; i+1 < XXH3_NEON_LANES / 2; i+=2) { - /* data_vec = xinput[i]; */ - uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16)); - uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16)); - /* key_vec = xsecret[i]; */ - uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16)); - uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16)); - /* data_swap = swap(data_vec) */ - uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1); - uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1); - /* data_key = data_vec ^ key_vec; */ - uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1); - uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2); - - /* - * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a - * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to - * get one vector with the low 32 bits of each lane, and one vector - * with the high 32 bits of each lane. - * - * The intrinsic returns a double vector because the original ARMv7-a - * instruction modified both arguments in place. AArch64 and SIMD128 emit - * two instructions from this intrinsic. - * - * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ] - * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ] - */ - uint32x4x2_t unzipped = vuzpq_u32( - vreinterpretq_u32_u64(data_key_1), - vreinterpretq_u32_u64(data_key_2) - ); - /* data_key_lo = data_key & 0xFFFFFFFF */ - uint32x4_t data_key_lo = unzipped.val[0]; - /* data_key_hi = data_key >> 32 */ - uint32x4_t data_key_hi = unzipped.val[1]; - /* - * Then, we can split the vectors horizontally and multiply which, as for most - * widening intrinsics, have a variant that works on both high half vectors - * for free on AArch64. A similar instruction is available on SIMD128. - * - * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi - */ - uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi); - uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi); - /* - * Clang reorders - * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s - * c += a; // add acc.2d, acc.2d, swap.2d - * to - * c += a; // add acc.2d, acc.2d, swap.2d - * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s - * - * While it would make sense in theory since the addition is faster, - * for reasons likely related to umlal being limited to certain NEON - * pipelines, this is worse. A compiler guard fixes this. - */ - XXH_COMPILER_GUARD_CLANG_NEON(sum_1); - XXH_COMPILER_GUARD_CLANG_NEON(sum_2); - /* xacc[i] = acc_vec + sum; */ - xacc[i] = vaddq_u64(xacc[i], sum_1); - xacc[i+1] = vaddq_u64(xacc[i+1], sum_2); - } - /* Operate on the remaining NEON lanes 2 at a time. */ - for (; i < XXH3_NEON_LANES / 2; i++) { - /* data_vec = xinput[i]; */ - uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16)); - /* key_vec = xsecret[i]; */ - uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); - /* acc_vec_2 = swap(data_vec) */ - uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1); - /* data_key = data_vec ^ key_vec; */ - uint64x2_t data_key = veorq_u64(data_vec, key_vec); - /* For two lanes, just use VMOVN and VSHRN. */ - /* data_key_lo = data_key & 0xFFFFFFFF; */ - uint32x2_t data_key_lo = vmovn_u64(data_key); - /* data_key_hi = data_key >> 32; */ - uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32); - /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */ - uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi); - /* Same Clang workaround as before */ - XXH_COMPILER_GUARD_CLANG_NEON(sum); - /* xacc[i] = acc_vec + sum; */ - xacc[i] = vaddq_u64 (xacc[i], sum); - } - } -} -XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon) - -XXH_FORCE_INLINE void -XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 15) == 0); - - { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc; - uint8_t const* xsecret = (uint8_t const*) secret; - - size_t i; - /* WASM uses operator overloads and doesn't need these. */ -#ifndef __wasm_simd128__ - /* { prime32_1, prime32_1 } */ - uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1); - /* { 0, prime32_1, 0, prime32_1 } */ - uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32)); -#endif - - /* AArch64 uses both scalar and neon at the same time */ - for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { - XXH3_scalarScrambleRound(acc, secret, i); - } - for (i=0; i < XXH3_NEON_LANES / 2; i++) { - /* xacc[i] ^= (xacc[i] >> 47); */ - uint64x2_t acc_vec = xacc[i]; - uint64x2_t shifted = vshrq_n_u64(acc_vec, 47); - uint64x2_t data_vec = veorq_u64(acc_vec, shifted); - - /* xacc[i] ^= xsecret[i]; */ - uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); - uint64x2_t data_key = veorq_u64(data_vec, key_vec); - /* xacc[i] *= XXH_PRIME32_1 */ -#ifdef __wasm_simd128__ - /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */ - xacc[i] = data_key * XXH_PRIME32_1; -#else - /* - * Expanded version with portable NEON intrinsics - * - * lo(x) * lo(y) + (hi(x) * lo(y) << 32) - * - * prod_hi = hi(data_key) * lo(prime) << 32 - * - * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector - * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits - * and avoid the shift. - */ - uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi); - /* Extract low bits for vmlal_u32 */ - uint32x2_t data_key_lo = vmovn_u64(data_key); - /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */ - xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo); -#endif - } - } -} -#endif - -#if (XXH_VECTOR == XXH_VSX) - -XXH_FORCE_INLINE void -XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - /* presumed aligned */ - xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; - xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */ - xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */ - xxh_u64x2 const v32 = { 32, 32 }; - size_t i; - for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { - /* data_vec = xinput[i]; */ - xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i); - /* key_vec = xsecret[i]; */ - xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); - xxh_u64x2 const data_key = data_vec ^ key_vec; - /* shuffled = (data_key << 32) | (data_key >> 32); */ - xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); - /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ - xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); - /* acc_vec = xacc[i]; */ - xxh_u64x2 acc_vec = xacc[i]; - acc_vec += product; - - /* swap high and low halves */ -#ifdef __s390x__ - acc_vec += vec_permi(data_vec, data_vec, 2); -#else - acc_vec += vec_xxpermdi(data_vec, data_vec, 2); -#endif - xacc[i] = acc_vec; - } -} -XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx) - -XXH_FORCE_INLINE void -XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 15) == 0); - - { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; - const xxh_u8* const xsecret = (const xxh_u8*) secret; - /* constants */ - xxh_u64x2 const v32 = { 32, 32 }; - xxh_u64x2 const v47 = { 47, 47 }; - xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 }; - size_t i; - for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { - /* xacc[i] ^= (xacc[i] >> 47); */ - xxh_u64x2 const acc_vec = xacc[i]; - xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); - - /* xacc[i] ^= xsecret[i]; */ - xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); - xxh_u64x2 const data_key = data_vec ^ key_vec; - - /* xacc[i] *= XXH_PRIME32_1 */ - /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */ - xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime); - /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */ - xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime); - xacc[i] = prod_odd + (prod_even << v32); - } } -} - -#endif - -#if (XXH_VECTOR == XXH_SVE) - -XXH_FORCE_INLINE void -XXH3_accumulate_512_sve( void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - uint64_t *xacc = (uint64_t *)acc; - const uint64_t *xinput = (const uint64_t *)(const void *)input; - const uint64_t *xsecret = (const uint64_t *)(const void *)secret; - svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); - uint64_t element_count = svcntd(); - if (element_count >= 8) { - svbool_t mask = svptrue_pat_b64(SV_VL8); - svuint64_t vacc = svld1_u64(mask, xacc); - ACCRND(vacc, 0); - svst1_u64(mask, xacc, vacc); - } else if (element_count == 2) { /* sve128 */ - svbool_t mask = svptrue_pat_b64(SV_VL2); - svuint64_t acc0 = svld1_u64(mask, xacc + 0); - svuint64_t acc1 = svld1_u64(mask, xacc + 2); - svuint64_t acc2 = svld1_u64(mask, xacc + 4); - svuint64_t acc3 = svld1_u64(mask, xacc + 6); - ACCRND(acc0, 0); - ACCRND(acc1, 2); - ACCRND(acc2, 4); - ACCRND(acc3, 6); - svst1_u64(mask, xacc + 0, acc0); - svst1_u64(mask, xacc + 2, acc1); - svst1_u64(mask, xacc + 4, acc2); - svst1_u64(mask, xacc + 6, acc3); - } else { - svbool_t mask = svptrue_pat_b64(SV_VL4); - svuint64_t acc0 = svld1_u64(mask, xacc + 0); - svuint64_t acc1 = svld1_u64(mask, xacc + 4); - ACCRND(acc0, 0); - ACCRND(acc1, 4); - svst1_u64(mask, xacc + 0, acc0); - svst1_u64(mask, xacc + 4, acc1); - } -} - -XXH_FORCE_INLINE void -XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc, - const xxh_u8* XXH_RESTRICT input, - const xxh_u8* XXH_RESTRICT secret, - size_t nbStripes) -{ - if (nbStripes != 0) { - uint64_t *xacc = (uint64_t *)acc; - const uint64_t *xinput = (const uint64_t *)(const void *)input; - const uint64_t *xsecret = (const uint64_t *)(const void *)secret; - svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); - uint64_t element_count = svcntd(); - if (element_count >= 8) { - svbool_t mask = svptrue_pat_b64(SV_VL8); - svuint64_t vacc = svld1_u64(mask, xacc + 0); - do { - /* svprfd(svbool_t, void *, enum svfprop); */ - svprfd(mask, xinput + 128, SV_PLDL1STRM); - ACCRND(vacc, 0); - xinput += 8; - xsecret += 1; - nbStripes--; - } while (nbStripes != 0); - - svst1_u64(mask, xacc + 0, vacc); - } else if (element_count == 2) { /* sve128 */ - svbool_t mask = svptrue_pat_b64(SV_VL2); - svuint64_t acc0 = svld1_u64(mask, xacc + 0); - svuint64_t acc1 = svld1_u64(mask, xacc + 2); - svuint64_t acc2 = svld1_u64(mask, xacc + 4); - svuint64_t acc3 = svld1_u64(mask, xacc + 6); - do { - svprfd(mask, xinput + 128, SV_PLDL1STRM); - ACCRND(acc0, 0); - ACCRND(acc1, 2); - ACCRND(acc2, 4); - ACCRND(acc3, 6); - xinput += 8; - xsecret += 1; - nbStripes--; - } while (nbStripes != 0); - - svst1_u64(mask, xacc + 0, acc0); - svst1_u64(mask, xacc + 2, acc1); - svst1_u64(mask, xacc + 4, acc2); - svst1_u64(mask, xacc + 6, acc3); - } else { - svbool_t mask = svptrue_pat_b64(SV_VL4); - svuint64_t acc0 = svld1_u64(mask, xacc + 0); - svuint64_t acc1 = svld1_u64(mask, xacc + 4); - do { - svprfd(mask, xinput + 128, SV_PLDL1STRM); - ACCRND(acc0, 0); - ACCRND(acc1, 4); - xinput += 8; - xsecret += 1; - nbStripes--; - } while (nbStripes != 0); - - svst1_u64(mask, xacc + 0, acc0); - svst1_u64(mask, xacc + 4, acc1); - } - } -} - -#endif - -/* scalar variants - universal */ - -#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) -/* - * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they - * emit an excess mask and a full 64-bit multiply-add (MADD X-form). - * - * While this might not seem like much, as AArch64 is a 64-bit architecture, only - * big Cortex designs have a full 64-bit multiplier. - * - * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit - * multiplies expand to 2-3 multiplies in microcode. This has a major penalty - * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline. - * - * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does - * not have this penalty and does the mask automatically. - */ -XXH_FORCE_INLINE xxh_u64 -XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) -{ - xxh_u64 ret; - /* note: %x = 64-bit register, %w = 32-bit register */ - __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc)); - return ret; -} -#else -XXH_FORCE_INLINE xxh_u64 -XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) -{ - return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc; -} -#endif - -/*! - * @internal - * @brief Scalar round for @ref XXH3_accumulate_512_scalar(). - * - * This is extracted to its own function because the NEON path uses a combination - * of NEON and scalar. - */ -XXH_FORCE_INLINE void -XXH3_scalarRound(void* XXH_RESTRICT acc, - void const* XXH_RESTRICT input, - void const* XXH_RESTRICT secret, - size_t lane) -{ - xxh_u64* xacc = (xxh_u64*) acc; - xxh_u8 const* xinput = (xxh_u8 const*) input; - xxh_u8 const* xsecret = (xxh_u8 const*) secret; - XXH_ASSERT(lane < XXH_ACC_NB); - XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); - { - xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8); - xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8); - xacc[lane ^ 1] += data_val; /* swap adjacent lanes */ - xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]); - } -} - -/*! - * @internal - * @brief Processes a 64 byte block of data using the scalar path. - */ -XXH_FORCE_INLINE void -XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - size_t i; - /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */ -#if defined(__GNUC__) && !defined(__clang__) \ - && (defined(__arm__) || defined(__thumb2__)) \ - && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \ - && XXH_SIZE_OPT <= 0 -# pragma GCC unroll 8 -#endif - for (i=0; i < XXH_ACC_NB; i++) { - XXH3_scalarRound(acc, input, secret, i); - } -} -XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar) - -/*! - * @internal - * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar(). - * - * This is extracted to its own function because the NEON path uses a combination - * of NEON and scalar. - */ -XXH_FORCE_INLINE void -XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, - void const* XXH_RESTRICT secret, - size_t lane) -{ - xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ - const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ - XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); - XXH_ASSERT(lane < XXH_ACC_NB); - { - xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8); - xxh_u64 acc64 = xacc[lane]; - acc64 = XXH_xorshift64(acc64, 47); - acc64 ^= key64; - acc64 *= XXH_PRIME32_1; - xacc[lane] = acc64; - } -} - -/*! - * @internal - * @brief Scrambles the accumulators after a large chunk has been read - */ -XXH_FORCE_INLINE void -XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ - size_t i; - for (i=0; i < XXH_ACC_NB; i++) { - XXH3_scalarScrambleRound(acc, secret, i); - } -} - -XXH_FORCE_INLINE void -XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) -{ - /* - * We need a separate pointer for the hack below, - * which requires a non-const pointer. - * Any decent compiler will optimize this out otherwise. - */ - const xxh_u8* kSecretPtr = XXH3_kSecret; - XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); - -#if defined(__GNUC__) && defined(__aarch64__) - /* - * UGLY HACK: - * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are - * placed sequentially, in order, at the top of the unrolled loop. - * - * While MOVK is great for generating constants (2 cycles for a 64-bit - * constant compared to 4 cycles for LDR), it fights for bandwidth with - * the arithmetic instructions. - * - * I L S - * MOVK - * MOVK - * MOVK - * MOVK - * ADD - * SUB STR - * STR - * By forcing loads from memory (as the asm line causes the compiler to assume - * that XXH3_kSecretPtr has been changed), the pipelines are used more - * efficiently: - * I L S - * LDR - * ADD LDR - * SUB STR - * STR - * - * See XXH3_NEON_LANES for details on the pipsline. - * - * XXH3_64bits_withSeed, len == 256, Snapdragon 835 - * without hack: 2654.4 MB/s - * with hack: 3202.9 MB/s - */ - XXH_COMPILER_GUARD(kSecretPtr); -#endif - { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; - int i; - for (i=0; i < nbRounds; i++) { - /* - * The asm hack causes the compiler to assume that kSecretPtr aliases with - * customSecret, and on aarch64, this prevented LDP from merging two - * loads together for free. Putting the loads together before the stores - * properly generates LDP. - */ - xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64; - xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64; - XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo); - XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi); - } } -} - - -typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t); -typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*); -typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); - - -#if (XXH_VECTOR == XXH_AVX512) - -#define XXH3_accumulate_512 XXH3_accumulate_512_avx512 -#define XXH3_accumulate XXH3_accumulate_avx512 -#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 -#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 - -#elif (XXH_VECTOR == XXH_AVX2) - -#define XXH3_accumulate_512 XXH3_accumulate_512_avx2 -#define XXH3_accumulate XXH3_accumulate_avx2 -#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 -#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 - -#elif (XXH_VECTOR == XXH_SSE2) - -#define XXH3_accumulate_512 XXH3_accumulate_512_sse2 -#define XXH3_accumulate XXH3_accumulate_sse2 -#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 -#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 - -#elif (XXH_VECTOR == XXH_NEON) - -#define XXH3_accumulate_512 XXH3_accumulate_512_neon -#define XXH3_accumulate XXH3_accumulate_neon -#define XXH3_scrambleAcc XXH3_scrambleAcc_neon -#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar - -#elif (XXH_VECTOR == XXH_VSX) - -#define XXH3_accumulate_512 XXH3_accumulate_512_vsx -#define XXH3_accumulate XXH3_accumulate_vsx -#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx -#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar - -#elif (XXH_VECTOR == XXH_SVE) -#define XXH3_accumulate_512 XXH3_accumulate_512_sve -#define XXH3_accumulate XXH3_accumulate_sve -#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar -#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar - -#else /* scalar */ - -#define XXH3_accumulate_512 XXH3_accumulate_512_scalar -#define XXH3_accumulate XXH3_accumulate_scalar -#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar -#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar - -#endif - -#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */ -# undef XXH3_initCustomSecret -# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar -#endif - -XXH_FORCE_INLINE void -XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, - const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble) -{ - size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; - size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock; - size_t const nb_blocks = (len - 1) / block_len; - - size_t n; - - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); - - for (n = 0; n < nb_blocks; n++) { - f_acc(acc, input + n*block_len, secret, nbStripesPerBlock); - f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); - } - - /* last partial block */ - XXH_ASSERT(len > XXH_STRIPE_LEN); - { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; - XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); - f_acc(acc, input + nb_blocks*block_len, secret, nbStripes); - - /* last stripe */ - { const xxh_u8* const p = input + len - XXH_STRIPE_LEN; -#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */ - XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); - } } -} - -XXH_FORCE_INLINE xxh_u64 -XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) -{ - return XXH3_mul128_fold64( - acc[0] ^ XXH_readLE64(secret), - acc[1] ^ XXH_readLE64(secret+8) ); -} - -static XXH64_hash_t -XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) -{ - xxh_u64 result64 = start; - size_t i = 0; - - for (i = 0; i < 4; i++) { - result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i); -#if defined(__clang__) /* Clang */ \ - && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \ - && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ - && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ - /* - * UGLY HACK: - * Prevent autovectorization on Clang ARMv7-a. Exact same problem as - * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b. - * XXH3_64bits, len == 256, Snapdragon 835: - * without hack: 2063.7 MB/s - * with hack: 2560.7 MB/s - */ - XXH_COMPILER_GUARD(result64); -#endif - } - - return XXH3_avalanche(result64); -} - -#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \ - XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 } - -XXH_FORCE_INLINE XXH64_hash_t -XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, - const void* XXH_RESTRICT secret, size_t secretSize, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble) -{ - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; - - XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble); - - /* converge into final hash */ - XXH_STATIC_ASSERT(sizeof(acc) == 64); - /* do not align on 8, so that the secret is different from the accumulator */ -#define XXH_SECRET_MERGEACCS_START 11 - XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); - return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1); -} - -/* - * It's important for performance to transmit secret's size (when it's static) - * so that the compiler can properly optimize the vectorized loop. - * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set. - * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE - * breaks -Og, this is XXH_NO_INLINE. - */ -XXH3_WITH_SECRET_INLINE XXH64_hash_t -XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) -{ - (void)seed64; - return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc); -} - -/* - * It's preferable for performance that XXH3_hashLong is not inlined, - * as it results in a smaller function for small data, easier to the instruction cache. - * Note that inside this no_inline function, we do inline the internal loop, - * and provide a statically defined secret size to allow optimization of vector loop. - */ -XXH_NO_INLINE XXH_PUREF XXH64_hash_t -XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) -{ - (void)seed64; (void)secret; (void)secretLen; - return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc); -} - -/* - * XXH3_hashLong_64b_withSeed(): - * Generate a custom key based on alteration of default XXH3_kSecret with the seed, - * and then use this key for long mode hashing. - * - * This operation is decently fast but nonetheless costs a little bit of time. - * Try to avoid it whenever possible (typically when seed==0). - * - * It's important for performance that XXH3_hashLong is not inlined. Not sure - * why (uop cache maybe?), but the difference is large and easily measurable. - */ -XXH_FORCE_INLINE XXH64_hash_t -XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, - XXH64_hash_t seed, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble, - XXH3_f_initCustomSecret f_initSec) -{ -#if XXH_SIZE_OPT <= 0 - if (seed == 0) - return XXH3_hashLong_64b_internal(input, len, - XXH3_kSecret, sizeof(XXH3_kSecret), - f_acc, f_scramble); -#endif - { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; - f_initSec(secret, seed); - return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), - f_acc, f_scramble); - } -} - -/* - * It's important for performance that XXH3_hashLong is not inlined. - */ -XXH_NO_INLINE XXH64_hash_t -XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) -{ - (void)secret; (void)secretLen; - return XXH3_hashLong_64b_withSeed_internal(input, len, seed, - XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); -} - - -typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t, - XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t); - -XXH_FORCE_INLINE XXH64_hash_t -XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, - XXH3_hashLong64_f f_hashLong) -{ - XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); - /* - * If an action is to be taken if `secretLen` condition is not respected, - * it should be done here. - * For now, it's a contract pre-condition. - * Adding a check and a branch here would cost performance at every hash. - * Also, note that function signature doesn't offer room to return an error. - */ - if (len <= 16) - return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); - if (len <= 128) - return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); - if (len <= XXH3_MIDSIZE_MAX) - return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); - return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen); -} - - -/* === Public entry point === */ - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length) -{ - return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH64_hash_t -XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize) -{ - return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH64_hash_t -XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed) -{ - return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); -} - -XXH_PUBLIC_API XXH64_hash_t -XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) -{ - if (length <= XXH3_MIDSIZE_MAX) - return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); - return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize); -} - - -/* === XXH3 streaming === */ -#ifndef XXH_NO_STREAM -/* - * Malloc's a pointer that is always aligned to align. - * - * This must be freed with `XXH_alignedFree()`. - * - * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte - * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2 - * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON. - * - * This underalignment previously caused a rather obvious crash which went - * completely unnoticed due to XXH3_createState() not actually being tested. - * Credit to RedSpah for noticing this bug. - * - * The alignment is done manually: Functions like posix_memalign or _mm_malloc - * are avoided: To maintain portability, we would have to write a fallback - * like this anyways, and besides, testing for the existence of library - * functions without relying on external build tools is impossible. - * - * The method is simple: Overallocate, manually align, and store the offset - * to the original behind the returned pointer. - * - * Align must be a power of 2 and 8 <= align <= 128. - */ -static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align) -{ - XXH_ASSERT(align <= 128 && align >= 8); /* range check */ - XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */ - XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */ - { /* Overallocate to make room for manual realignment and an offset byte */ - xxh_u8* base = (xxh_u8*)XXH_malloc(s + align); - if (base != NULL) { - /* - * Get the offset needed to align this pointer. - * - * Even if the returned pointer is aligned, there will always be - * at least one byte to store the offset to the original pointer. - */ - size_t offset = align - ((size_t)base & (align - 1)); /* base % align */ - /* Add the offset for the now-aligned pointer */ - xxh_u8* ptr = base + offset; - - XXH_ASSERT((size_t)ptr % align == 0); - - /* Store the offset immediately before the returned pointer. */ - ptr[-1] = (xxh_u8)offset; - return ptr; - } - return NULL; - } -} -/* - * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass - * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout. - */ -static void XXH_alignedFree(void* p) -{ - if (p != NULL) { - xxh_u8* ptr = (xxh_u8*)p; - /* Get the offset byte we added in XXH_malloc. */ - xxh_u8 offset = ptr[-1]; - /* Free the original malloc'd pointer */ - xxh_u8* base = ptr - offset; - XXH_free(base); - } -} -/*! @ingroup XXH3_family */ -/*! - * @brief Allocate an @ref XXH3_state_t. - * - * Must be freed with XXH3_freeState(). - * @return An allocated XXH3_state_t on success, `NULL` on failure. - */ -XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) -{ - XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); - if (state==NULL) return NULL; - XXH3_INITSTATE(state); - return state; -} - -/*! @ingroup XXH3_family */ -/*! - * @brief Frees an @ref XXH3_state_t. - * - * Must be allocated with XXH3_createState(). - * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). - * @return XXH_OK. - */ -XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) -{ - XXH_alignedFree(statePtr); - return XXH_OK; -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API void -XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state) -{ - XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); -} - -static void -XXH3_reset_internal(XXH3_state_t* statePtr, - XXH64_hash_t seed, - const void* secret, size_t secretSize) -{ - size_t const initStart = offsetof(XXH3_state_t, bufferedSize); - size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; - XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart); - XXH_ASSERT(statePtr != NULL); - /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */ - memset((char*)statePtr + initStart, 0, initLength); - statePtr->acc[0] = XXH_PRIME32_3; - statePtr->acc[1] = XXH_PRIME64_1; - statePtr->acc[2] = XXH_PRIME64_2; - statePtr->acc[3] = XXH_PRIME64_3; - statePtr->acc[4] = XXH_PRIME64_4; - statePtr->acc[5] = XXH_PRIME32_2; - statePtr->acc[6] = XXH_PRIME64_5; - statePtr->acc[7] = XXH_PRIME32_1; - statePtr->seed = seed; - statePtr->useSeed = (seed != 0); - statePtr->extSecret = (const unsigned char*)secret; - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); - statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; - statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) -{ - if (statePtr == NULL) return XXH_ERROR; - XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); - return XXH_OK; -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) -{ - if (statePtr == NULL) return XXH_ERROR; - XXH3_reset_internal(statePtr, 0, secret, secretSize); - if (secret == NULL) return XXH_ERROR; - if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; - return XXH_OK; -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) -{ - if (statePtr == NULL) return XXH_ERROR; - if (seed==0) return XXH3_64bits_reset(statePtr); - if ((seed != statePtr->seed) || (statePtr->extSecret != NULL)) - XXH3_initCustomSecret(statePtr->customSecret, seed); - XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); - return XXH_OK; -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64) -{ - if (statePtr == NULL) return XXH_ERROR; - if (secret == NULL) return XXH_ERROR; - if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; - XXH3_reset_internal(statePtr, seed64, secret, secretSize); - statePtr->useSeed = 1; /* always, even if seed64==0 */ - return XXH_OK; -} - -/*! - * @internal - * @brief Processes a large input for XXH3_update() and XXH3_digest_long(). - * - * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block. - * - * @param acc Pointer to the 8 accumulator lanes - * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block* - * @param nbStripesPerBlock Number of stripes in a block - * @param input Input pointer - * @param nbStripes Number of stripes to process - * @param secret Secret pointer - * @param secretLimit Offset of the last block in @p secret - * @param f_acc Pointer to an XXH3_accumulate implementation - * @param f_scramble Pointer to an XXH3_scrambleAcc implementation - * @return Pointer past the end of @p input after processing - */ -XXH_FORCE_INLINE const xxh_u8 * -XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, - size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, - const xxh_u8* XXH_RESTRICT input, size_t nbStripes, - const xxh_u8* XXH_RESTRICT secret, size_t secretLimit, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble) -{ - const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE; - /* Process full blocks */ - if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) { - /* Process the initial partial block... */ - size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr; - - do { - /* Accumulate and scramble */ - f_acc(acc, input, initialSecret, nbStripesThisIter); - f_scramble(acc, secret + secretLimit); - input += nbStripesThisIter * XXH_STRIPE_LEN; - nbStripes -= nbStripesThisIter; - /* Then continue the loop with the full block size */ - nbStripesThisIter = nbStripesPerBlock; - initialSecret = secret; - } while (nbStripes >= nbStripesPerBlock); - *nbStripesSoFarPtr = 0; - } - /* Process a partial block */ - if (nbStripes > 0) { - f_acc(acc, input, initialSecret, nbStripes); - input += nbStripes * XXH_STRIPE_LEN; - *nbStripesSoFarPtr += nbStripes; - } - /* Return end pointer */ - return input; -} - -#ifndef XXH3_STREAM_USE_STACK -# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */ -# define XXH3_STREAM_USE_STACK 1 -# endif -#endif -/* - * Both XXH3_64bits_update and XXH3_128bits_update use this routine. - */ -XXH_FORCE_INLINE XXH_errorcode -XXH3_update(XXH3_state_t* XXH_RESTRICT const state, - const xxh_u8* XXH_RESTRICT input, size_t len, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble) -{ - if (input==NULL) { - XXH_ASSERT(len == 0); - return XXH_OK; - } - - XXH_ASSERT(state != NULL); - { const xxh_u8* const bEnd = input + len; - const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; -#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 - /* For some reason, gcc and MSVC seem to suffer greatly - * when operating accumulators directly into state. - * Operating into stack space seems to enable proper optimization. - * clang, on the other hand, doesn't seem to need this trick */ - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; - XXH_memcpy(acc, state->acc, sizeof(acc)); -#else - xxh_u64* XXH_RESTRICT const acc = state->acc; -#endif - state->totalLen += len; - XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); - - /* small input : just fill in tmp buffer */ - if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) { - XXH_memcpy(state->buffer + state->bufferedSize, input, len); - state->bufferedSize += (XXH32_hash_t)len; - return XXH_OK; - } - - /* total input is now > XXH3_INTERNALBUFFER_SIZE */ - #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) - XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ - - /* - * Internal buffer is partially filled (always, except at beginning) - * Complete it, then consume it. - */ - if (state->bufferedSize) { - size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; - XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); - input += loadSize; - XXH3_consumeStripes(acc, - &state->nbStripesSoFar, state->nbStripesPerBlock, - state->buffer, XXH3_INTERNALBUFFER_STRIPES, - secret, state->secretLimit, - f_acc, f_scramble); - state->bufferedSize = 0; - } - XXH_ASSERT(input < bEnd); - if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { - size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; - input = XXH3_consumeStripes(acc, - &state->nbStripesSoFar, state->nbStripesPerBlock, - input, nbStripes, - secret, state->secretLimit, - f_acc, f_scramble); - XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); - - } - /* Some remaining input (always) : buffer it */ - XXH_ASSERT(input < bEnd); - XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); - XXH_ASSERT(state->bufferedSize == 0); - XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); - state->bufferedSize = (XXH32_hash_t)(bEnd-input); -#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 - /* save stack accumulators into state */ - XXH_memcpy(state->acc, acc, sizeof(acc)); -#endif - } - - return XXH_OK; -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) -{ - return XXH3_update(state, (const xxh_u8*)input, len, - XXH3_accumulate, XXH3_scrambleAcc); -} - - -XXH_FORCE_INLINE void -XXH3_digest_long (XXH64_hash_t* acc, - const XXH3_state_t* state, - const unsigned char* secret) -{ - xxh_u8 lastStripe[XXH_STRIPE_LEN]; - const xxh_u8* lastStripePtr; - - /* - * Digest on a local copy. This way, the state remains unaltered, and it can - * continue ingesting more input afterwards. - */ - XXH_memcpy(acc, state->acc, sizeof(state->acc)); - if (state->bufferedSize >= XXH_STRIPE_LEN) { - /* Consume remaining stripes then point to remaining data in buffer */ - size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; - size_t nbStripesSoFar = state->nbStripesSoFar; - XXH3_consumeStripes(acc, - &nbStripesSoFar, state->nbStripesPerBlock, - state->buffer, nbStripes, - secret, state->secretLimit, - XXH3_accumulate, XXH3_scrambleAcc); - lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN; - } else { /* bufferedSize < XXH_STRIPE_LEN */ - /* Copy to temp buffer */ - size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; - XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ - XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); - XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); - lastStripePtr = lastStripe; - } - /* Last stripe */ - XXH3_accumulate_512(acc, - lastStripePtr, - secret + state->secretLimit - XXH_SECRET_LASTACC_START); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state) -{ - const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; - if (state->totalLen > XXH3_MIDSIZE_MAX) { - XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; - XXH3_digest_long(acc, state, secret); - return XXH3_mergeAccs(acc, - secret + XXH_SECRET_MERGEACCS_START, - (xxh_u64)state->totalLen * XXH_PRIME64_1); - } - /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ - if (state->useSeed) - return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); - return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), - secret, state->secretLimit + XXH_STRIPE_LEN); -} -#endif /* !XXH_NO_STREAM */ - - -/* ========================================== - * XXH3 128 bits (a.k.a XXH128) - * ========================================== - * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant, - * even without counting the significantly larger output size. - * - * For example, extra steps are taken to avoid the seed-dependent collisions - * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B). - * - * This strength naturally comes at the cost of some speed, especially on short - * lengths. Note that longer hashes are about as fast as the 64-bit version - * due to it using only a slight modification of the 64-bit loop. - * - * XXH128 is also more oriented towards 64-bit machines. It is still extremely - * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). - */ - -XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t -XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - /* A doubled version of 1to3_64b with different constants. */ - XXH_ASSERT(input != NULL); - XXH_ASSERT(1 <= len && len <= 3); - XXH_ASSERT(secret != NULL); - /* - * len = 1: combinedl = { input[0], 0x01, input[0], input[0] } - * len = 2: combinedl = { input[1], 0x02, input[0], input[1] } - * len = 3: combinedl = { input[2], 0x03, input[0], input[1] } - */ - { xxh_u8 const c1 = input[0]; - xxh_u8 const c2 = input[len >> 1]; - xxh_u8 const c3 = input[len - 1]; - xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24) - | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); - xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13); - xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; - xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed; - xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl; - xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph; - XXH128_hash_t h128; - h128.low64 = XXH64_avalanche(keyed_lo); - h128.high64 = XXH64_avalanche(keyed_hi); - return h128; - } -} - -XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t -XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(secret != NULL); - XXH_ASSERT(4 <= len && len <= 8); - seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; - { xxh_u32 const input_lo = XXH_readLE32(input); - xxh_u32 const input_hi = XXH_readLE32(input + len - 4); - xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32); - xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed; - xxh_u64 const keyed = input_64 ^ bitflip; - - /* Shift len to the left to ensure it is even, this avoids even multiplies. */ - XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2)); - - m128.high64 += (m128.low64 << 1); - m128.low64 ^= (m128.high64 >> 3); - - m128.low64 = XXH_xorshift64(m128.low64, 35); - m128.low64 *= PRIME_MX2; - m128.low64 = XXH_xorshift64(m128.low64, 28); - m128.high64 = XXH3_avalanche(m128.high64); - return m128; - } -} - -XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t -XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(secret != NULL); - XXH_ASSERT(9 <= len && len <= 16); - { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed; - xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed; - xxh_u64 const input_lo = XXH_readLE64(input); - xxh_u64 input_hi = XXH_readLE64(input + len - 8); - XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1); - /* - * Put len in the middle of m128 to ensure that the length gets mixed to - * both the low and high bits in the 128x64 multiply below. - */ - m128.low64 += (xxh_u64)(len - 1) << 54; - input_hi ^= bitfliph; - /* - * Add the high 32 bits of input_hi to the high 32 bits of m128, then - * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to - * the high 64 bits of m128. - * - * The best approach to this operation is different on 32-bit and 64-bit. - */ - if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */ - /* - * 32-bit optimized version, which is more readable. - * - * On 32-bit, it removes an ADC and delays a dependency between the two - * halves of m128.high64, but it generates an extra mask on 64-bit. - */ - m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2); - } else { - /* - * 64-bit optimized (albeit more confusing) version. - * - * Uses some properties of addition and multiplication to remove the mask: - * - * Let: - * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF) - * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000) - * c = XXH_PRIME32_2 - * - * a + (b * c) - * Inverse Property: x + y - x == y - * a + (b * (1 + c - 1)) - * Distributive Property: x * (y + z) == (x * y) + (x * z) - * a + (b * 1) + (b * (c - 1)) - * Identity Property: x * 1 == x - * a + b + (b * (c - 1)) - * - * Substitute a, b, and c: - * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) - * - * Since input_hi.hi + input_hi.lo == input_hi, we get this: - * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) - */ - m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1); - } - /* m128 ^= XXH_swap64(m128 >> 64); */ - m128.low64 ^= XXH_swap64(m128.high64); - - { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */ - XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2); - h128.high64 += m128.high64 * XXH_PRIME64_2; - - h128.low64 = XXH3_avalanche(h128.low64); - h128.high64 = XXH3_avalanche(h128.high64); - return h128; - } } -} - -/* - * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN - */ -XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t -XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(len <= 16); - { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); - if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); - if (len) return XXH3_len_1to3_128b(input, len, secret, seed); - { XXH128_hash_t h128; - xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72); - xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88); - h128.low64 = XXH64_avalanche(seed ^ bitflipl); - h128.high64 = XXH64_avalanche( seed ^ bitfliph); - return h128; - } } -} - -/* - * A bit slower than XXH3_mix16B, but handles multiply by zero better. - */ -XXH_FORCE_INLINE XXH128_hash_t -XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, - const xxh_u8* secret, XXH64_hash_t seed) -{ - acc.low64 += XXH3_mix16B (input_1, secret+0, seed); - acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); - acc.high64 += XXH3_mix16B (input_2, secret+16, seed); - acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); - return acc; -} - - -XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t -XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH64_hash_t seed) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; - XXH_ASSERT(16 < len && len <= 128); - - { XXH128_hash_t acc; - acc.low64 = len * XXH_PRIME64_1; - acc.high64 = 0; - -#if XXH_SIZE_OPT >= 1 - { - /* Smaller, but slightly slower. */ - unsigned int i = (unsigned int)(len - 1) / 32; - do { - acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed); - } while (i-- != 0); - } -#else - if (len > 32) { - if (len > 64) { - if (len > 96) { - acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); - } - acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); - } - acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); - } - acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); -#endif - { XXH128_hash_t h128; - h128.low64 = acc.low64 + acc.high64; - h128.high64 = (acc.low64 * XXH_PRIME64_1) - + (acc.high64 * XXH_PRIME64_4) - + ((len - seed) * XXH_PRIME64_2); - h128.low64 = XXH3_avalanche(h128.low64); - h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); - return h128; - } - } -} - -XXH_NO_INLINE XXH_PUREF XXH128_hash_t -XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH64_hash_t seed) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; - XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); - - { XXH128_hash_t acc; - unsigned i; - acc.low64 = len * XXH_PRIME64_1; - acc.high64 = 0; - /* - * We set as `i` as offset + 32. We do this so that unchanged - * `len` can be used as upper bound. This reaches a sweet spot - * where both x86 and aarch64 get simple agen and good codegen - * for the loop. - */ - for (i = 32; i < 160; i += 32) { - acc = XXH128_mix32B(acc, - input + i - 32, - input + i - 16, - secret + i - 32, - seed); - } - acc.low64 = XXH3_avalanche(acc.low64); - acc.high64 = XXH3_avalanche(acc.high64); - /* - * NB: `i <= len` will duplicate the last 32-bytes if - * len % 32 was zero. This is an unfortunate necessity to keep - * the hash result stable. - */ - for (i=160; i <= len; i += 32) { - acc = XXH128_mix32B(acc, - input + i - 32, - input + i - 16, - secret + XXH3_MIDSIZE_STARTOFFSET + i - 160, - seed); - } - /* last bytes */ - acc = XXH128_mix32B(acc, - input + len - 16, - input + len - 32, - secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, - (XXH64_hash_t)0 - seed); - - { XXH128_hash_t h128; - h128.low64 = acc.low64 + acc.high64; - h128.high64 = (acc.low64 * XXH_PRIME64_1) - + (acc.high64 * XXH_PRIME64_4) - + ((len - seed) * XXH_PRIME64_2); - h128.low64 = XXH3_avalanche(h128.low64); - h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); - return h128; - } - } -} - -XXH_FORCE_INLINE XXH128_hash_t -XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble) -{ - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; - - XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble); - - /* converge into final hash */ - XXH_STATIC_ASSERT(sizeof(acc) == 64); - XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); - { XXH128_hash_t h128; - h128.low64 = XXH3_mergeAccs(acc, - secret + XXH_SECRET_MERGEACCS_START, - (xxh_u64)len * XXH_PRIME64_1); - h128.high64 = XXH3_mergeAccs(acc, - secret + secretSize - - sizeof(acc) - XXH_SECRET_MERGEACCS_START, - ~((xxh_u64)len * XXH_PRIME64_2)); - return h128; - } -} - -/* - * It's important for performance that XXH3_hashLong() is not inlined. - */ -XXH_NO_INLINE XXH_PUREF XXH128_hash_t -XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed64, - const void* XXH_RESTRICT secret, size_t secretLen) -{ - (void)seed64; (void)secret; (void)secretLen; - return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), - XXH3_accumulate, XXH3_scrambleAcc); -} - -/* - * It's important for performance to pass @p secretLen (when it's static) - * to the compiler, so that it can properly optimize the vectorized loop. - * - * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE - * breaks -Og, this is XXH_NO_INLINE. - */ -XXH3_WITH_SECRET_INLINE XXH128_hash_t -XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed64, - const void* XXH_RESTRICT secret, size_t secretLen) -{ - (void)seed64; - return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen, - XXH3_accumulate, XXH3_scrambleAcc); -} - -XXH_FORCE_INLINE XXH128_hash_t -XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed64, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble, - XXH3_f_initCustomSecret f_initSec) -{ - if (seed64 == 0) - return XXH3_hashLong_128b_internal(input, len, - XXH3_kSecret, sizeof(XXH3_kSecret), - f_acc, f_scramble); - { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; - f_initSec(secret, seed64); - return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret), - f_acc, f_scramble); - } -} - -/* - * It's important for performance that XXH3_hashLong is not inlined. - */ -XXH_NO_INLINE XXH128_hash_t -XXH3_hashLong_128b_withSeed(const void* input, size_t len, - XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) -{ - (void)secret; (void)secretLen; - return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, - XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); -} - -typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t, - XXH64_hash_t, const void* XXH_RESTRICT, size_t); - -XXH_FORCE_INLINE XXH128_hash_t -XXH3_128bits_internal(const void* input, size_t len, - XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, - XXH3_hashLong128_f f_hl128) -{ - XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); - /* - * If an action is to be taken if `secret` conditions are not respected, - * it should be done here. - * For now, it's a contract pre-condition. - * Adding a check and a branch here would cost performance at every hash. - */ - if (len <= 16) - return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); - if (len <= 128) - return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); - if (len <= XXH3_MIDSIZE_MAX) - return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); - return f_hl128(input, len, seed64, secret, secretLen); -} - - -/* === Public XXH128 API === */ - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len) -{ - return XXH3_128bits_internal(input, len, 0, - XXH3_kSecret, sizeof(XXH3_kSecret), - XXH3_hashLong_128b_default); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH128_hash_t -XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize) -{ - return XXH3_128bits_internal(input, len, 0, - (const xxh_u8*)secret, secretSize, - XXH3_hashLong_128b_withSecret); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH128_hash_t -XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) -{ - return XXH3_128bits_internal(input, len, seed, - XXH3_kSecret, sizeof(XXH3_kSecret), - XXH3_hashLong_128b_withSeed); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH128_hash_t -XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) -{ - if (len <= XXH3_MIDSIZE_MAX) - return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); - return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH128_hash_t -XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) -{ - return XXH3_128bits_withSeed(input, len, seed); -} - - -/* === XXH3 128-bit streaming === */ -#ifndef XXH_NO_STREAM -/* - * All initialization and update functions are identical to 64-bit streaming variant. - * The only difference is the finalization routine. - */ - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) -{ - return XXH3_64bits_reset(statePtr); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) -{ - return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) -{ - return XXH3_64bits_reset_withSeed(statePtr, seed); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) -{ - return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) -{ - return XXH3_64bits_update(state, input, len); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state) -{ - const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; - if (state->totalLen > XXH3_MIDSIZE_MAX) { - XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; - XXH3_digest_long(acc, state, secret); - XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); - { XXH128_hash_t h128; - h128.low64 = XXH3_mergeAccs(acc, - secret + XXH_SECRET_MERGEACCS_START, - (xxh_u64)state->totalLen * XXH_PRIME64_1); - h128.high64 = XXH3_mergeAccs(acc, - secret + state->secretLimit + XXH_STRIPE_LEN - - sizeof(acc) - XXH_SECRET_MERGEACCS_START, - ~((xxh_u64)state->totalLen * XXH_PRIME64_2)); - return h128; - } - } - /* len <= XXH3_MIDSIZE_MAX : short code */ - if (state->seed) - return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); - return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), - secret, state->secretLimit + XXH_STRIPE_LEN); -} -#endif /* !XXH_NO_STREAM */ -/* 128-bit utility functions */ - -#include /* memcmp, memcpy */ - -/* return : 1 is equal, 0 if different */ -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) -{ - /* note : XXH128_hash_t is compact, it has no padding byte */ - return !(memcmp(&h1, &h2, sizeof(h1))); -} - -/* This prototype is compatible with stdlib's qsort(). - * @return : >0 if *h128_1 > *h128_2 - * <0 if *h128_1 < *h128_2 - * =0 if *h128_1 == *h128_2 */ -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2) -{ - XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; - XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; - int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); - /* note : bets that, in most cases, hash values are different */ - if (hcmp) return hcmp; - return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); -} - - -/*====== Canonical representation ======*/ -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API void -XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) { - hash.high64 = XXH_swap64(hash.high64); - hash.low64 = XXH_swap64(hash.low64); - } - XXH_memcpy(dst, &hash.high64, sizeof(hash.high64)); - XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH128_hash_t -XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src) -{ - XXH128_hash_t h; - h.high64 = XXH_readBE64(src); - h.low64 = XXH_readBE64(src->digest + 8); - return h; -} - - - -/* ========================================== - * Secret generators - * ========================================== - */ -#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) - -XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128) -{ - XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 ); - XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 ); -} - -/*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize) -{ -#if (XXH_DEBUGLEVEL >= 1) - XXH_ASSERT(secretBuffer != NULL); - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); -#else - /* production mode, assert() are disabled */ - if (secretBuffer == NULL) return XXH_ERROR; - if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; -#endif - - if (customSeedSize == 0) { - customSeed = XXH3_kSecret; - customSeedSize = XXH_SECRET_DEFAULT_SIZE; - } -#if (XXH_DEBUGLEVEL >= 1) - XXH_ASSERT(customSeed != NULL); -#else - if (customSeed == NULL) return XXH_ERROR; -#endif - - /* Fill secretBuffer with a copy of customSeed - repeat as needed */ - { size_t pos = 0; - while (pos < secretSize) { - size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize); - memcpy((char*)secretBuffer + pos, customSeed, toCopy); - pos += toCopy; - } } - - { size_t const nbSeg16 = secretSize / 16; - size_t n; - XXH128_canonical_t scrambler; - XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); - for (n=0; nsuppression_threshold = suppression_threshold; cfg->enable_statistics_charts = enable_statistics_charts; + + if (cfg->enable_anomaly_detection == CONFIG_BOOLEAN_AUTO && default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) { + Cfg.enable_anomaly_detection = 0; + config_set_boolean(config_section_ml, "enabled", CONFIG_BOOLEAN_NO); + return; + } } diff --git a/src/ml/ml-configuration.md b/src/ml/ml-configuration.md index 12cc20a47..dc5d535db 100644 --- a/src/ml/ml-configuration.md +++ b/src/ml/ml-configuration.md @@ -1,21 +1,21 @@ # ML Configuration -Netdata's [Machine Learning](/src/ml/README.md) capabilities are enabled by default. +Netdata's [Machine Learning](/src/ml/README.md) capabilities are enabled by default if the [Database mode](/src/database/README.md) is set to `db = dbengine` To enable or disable Machine Learning capabilities on a node: -1. [Edit `netdata.conf`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) -2. In the `[ml]` section, set `enabled = yes` to enable or `enabled = no` to disable +1. [Edit `netdata.conf`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) +2. In the `[ml]` section, set `enabled` to `yes` to enable ML, `no` to disable it, or leave it at the default `auto` to enable ML only when [Database mode](/src/database/README.md) is set to `dbengine` 3. [Restart Netdata](/docs/netdata-agent/start-stop-restart.md) Below is a list of all the available configuration params and their default values. ```bash [ml] - # enabled = yes + # enabled = auto # maximum num samples to train = 21600 # minimum num samples to train = 900 - # train every = 10800 + # train every = 3h # number of models per dimension = 18 # dbengine anomaly rate every = 30 # num samples to diff = 1 @@ -26,12 +26,12 @@ Below is a list of all the available configuration params and their default valu # dimension anomaly score threshold = 0.99 # host anomaly rate threshold = 1.0 # anomaly detection grouping method = average - # anomaly detection grouping duration = 300 + # anomaly detection grouping duration = 5m # hosts to skip from training = !* # charts to skip from training = netdata.* - # dimension anomaly rate suppression window = 900 + # dimension anomaly rate suppression window = 15m # dimension anomaly rate suppression threshold = 450 - # delete models older than = 604800 + # delete models older than = 7d ``` ## Configuration Examples @@ -85,11 +85,11 @@ flowchart BT ## Descriptions (min/max) -- `enabled`: `yes` to enable, `no` to disable. +- `enabled`: `yes` to enable, `no` to disable, or `auto` to let Netdata decide when to enable ML. - `maximum num samples to train`: (`3600`/`86400`) This is the maximum amount of time you would like to train each model on. For example, the default of `21600` trains on the preceding 6 hours of data, assuming an `update every` of 1 second. - `minimum num samples to train`: (`900`/`21600`) This is the minimum amount of data required to be able to train a model. For example, the default of `900` implies that once at least 15 minutes of data is available for training, a model is trained, otherwise it is skipped and checked again at the next training run. -- `train every`: (`1800`/`21600`) This is how often each model will be retrained. For example, the default of `10800` means that each model is retrained every 3 hours. Note: The training of all models is spread out across the `train every` period for efficiency, so in reality, it means that each model will be trained in a staggered manner within each `train every` period. -- `number of models per dimension`: (`1`/`168`) This is the number of trained models that will be used for scoring. For example the default `number of models per dimension = 18` means that the most recently trained 18 models for the dimension will be used to determine the corresponding anomaly bit. This means that under default settings of `maximum num samples to train = 21600`, `train every = 10800` and `number of models per dimension = 18`, netdata will store and use the last 18 trained models for each dimension when determining the anomaly bit. This means that for the latest feature vector in this configuration to be considered anomalous it would need to look anomalous across _all_ the models trained for that dimension in the last 18*(10800/3600) ~= 54 hours. As such, increasing `number of models per dimension` may reduce some false positives since it will result in more models (covering a wider time frame of training) being used during scoring. +- `train every`: (`3h`/`6h`) This is how often each model will be retrained. For example, the default of `3h` means that each model is retrained every 3 hours. Note: The training of all models is spread out across the `train every` period for efficiency, so in reality, it means that each model will be trained in a staggered manner within each `train every` period. +- `number of models per dimension`: (`1`/`168`) This is the number of trained models that will be used for scoring. For example the default `number of models per dimension = 18` means that the most recently trained 18 models for the dimension will be used to determine the corresponding anomaly bit. This means that under default settings of `maximum num samples to train = 21600`, `train every = 3h` and `number of models per dimension = 18`, netdata will store and use the last 18 trained models for each dimension when determining the anomaly bit. This means that for the latest feature vector in this configuration to be considered anomalous it would need to look anomalous across _all_ the models trained for that dimension in the last 18*(10800/3600) ~= 54 hours. As such, increasing `number of models per dimension` may reduce some false positives since it will result in more models (covering a wider time frame of training) being used during scoring. - `dbengine anomaly rate every`: (`30`/`900`) This is how often netdata will aggregate all the anomaly bits into a single chart (`anomaly_detection.anomaly_rates`). The aggregation into a single chart allows enabling anomaly rate ranking over _all_ metrics with one API call as opposed to a call per chart. - `num samples to diff`: (`0`/`1`) This is a `0` or `1` to determine if you want the model to operate on differences of the raw data or just the raw data. For example, the default of `1` means that we take differences of the raw values. Using differences is more general and works on dimensions that might naturally tend to have some trends or cycles in them that is normal behavior to which we don't want to be too sensitive. - `num samples to smooth`: (`0`/`5`) This is a small integer that controls the amount of smoothing applied as part of the feature processing used by the model. For example, the default of `3` means that the rolling average of the last 3 values is used. Smoothing like this helps the model be a little more robust to spiky types of dimensions that naturally "jump" up or down as part of their normal behavior. @@ -99,7 +99,7 @@ flowchart BT - `dimension anomaly score threshold`: (`0.01`/`5.00`) This is the threshold at which an individual dimension at a specific timestep is considered anomalous or not. For example, the default of `0.99` means that a dimension with an anomaly score of 99% or higher is flagged as anomalous. This is a normalized probability based on the training data, so the default of 99% means that anything that is as strange (based on distance measure) or more strange as the most strange 1% of data observed during training will be flagged as anomalous. If you wanted to make the anomaly detection on individual dimensions more sensitive you could try a value like `0.90` (90%) or to make it less sensitive you could try `1.5` (150%). - `host anomaly rate threshold`: (`0.1`/`10.0`) This is the percentage of dimensions (based on all those enabled for anomaly detection) that need to be considered anomalous at specific timestep for the host itself to be considered anomalous. For example, the default value of `1.0` means that if more than 1% of dimensions are anomalous at the same time then the host itself is considered in an anomalous state. - `anomaly detection grouping method`: The grouping method used when calculating node level anomaly rate. -- `anomaly detection grouping duration`: (`60`/`900`) The duration across which to calculate the node level anomaly rate, the default of `900` means that the node level anomaly rate is calculated across a rolling 5 minute window. +- `anomaly detection grouping duration`: (`1m`/`15m`) The duration across which to calculate the node level anomaly rate, the default of `900` means that the node level anomaly rate is calculated across a rolling 5 minute window. - `hosts to skip from training`: This parameter allows you to turn off anomaly detection for any child hosts on a parent host by defining those you would like to skip from training here. For example, a value like `dev-*` skips all hosts on a parent that begin with the "dev-" prefix. The default value of `!*` means "don't skip any". - `charts to skip from training`: This parameter allows you to exclude certain charts from anomaly detection. By default, only netdata related charts are excluded. This is to avoid the scenario where accessing the netdata dashboard could itself trigger some anomalies if you don't access them regularly. If you want to include charts that are excluded by default, add them in small groups and then measure any impact on performance before adding additional ones. Example: If you want to include system, apps, and user charts:`!system.* !apps.* !user.* *`. -- `delete models older than`: (`86400`/`604800`) Delete old models from the database that are unused, by default models will be deleted after 7 days. +- `delete models older than`: (`1d`/`7d`) Delete old models from the database that are unused, by default models will be deleted after 7 days. diff --git a/src/ml/ml-private.h b/src/ml/ml-private.h index fc90589b3..cda24d0ed 100644 --- a/src/ml/ml-private.h +++ b/src/ml/ml-private.h @@ -313,7 +313,7 @@ typedef struct { } ml_training_thread_t; typedef struct { - bool enable_anomaly_detection; + int enable_anomaly_detection; unsigned max_train_samples; unsigned min_train_samples; diff --git a/src/ml/ml.cc b/src/ml/ml.cc index 7ecdce418..61574b287 100644 --- a/src/ml/ml.cc +++ b/src/ml/ml.cc @@ -1218,11 +1218,11 @@ ml_detect_main(void *arg) worker_register_job_name(WORKER_JOB_DETECTION_STATS, "training stats"); heartbeat_t hb; - heartbeat_init(&hb); + heartbeat_init(&hb, USEC_PER_SEC); while (!Cfg.detection_stop && service_running(SERVICE_COLLECTORS)) { worker_is_idle(); - heartbeat_next(&hb, USEC_PER_SEC); + heartbeat_next(&hb); RRDHOST *rh; rrd_rdlock(); diff --git a/src/plugins.d/README.md b/src/plugins.d/README.md new file mode 100644 index 000000000..d82a7cd9d --- /dev/null +++ b/src/plugins.d/README.md @@ -0,0 +1,868 @@ +# External plugins + +`plugins.d` is the Netdata internal plugin that collects metrics +from external processes, thus allowing Netdata to use **external plugins**. + +## Provided External Plugins + +| plugin | language | O/S | description | +|:------------------------------------------------------------------------------------------------------:|:--------:|:--------------:|:----------------------------------------------------------------------------------------------------------------------------------------| +| [apps.plugin](/src/collectors/apps.plugin/README.md) | `C` | linux, freebsd | monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**. | +| [charts.d.plugin](/src/collectors/charts.d.plugin/README.md) | `BASH` | all | a **plugin orchestrator** for data collection modules written in `BASH` v4+. | +| [cups.plugin](/src/collectors/cups.plugin/README.md) | `C` | all | monitors **CUPS** | +| [ebpf.plugin](/src/collectors/ebpf.plugin/README.md) | `C` | linux | monitors different metrics on environments using kernel internal functions. | +| [go.d.plugin](/src/go/plugin/go.d/README.md) | `GO` | all | collects metrics from the system, applications, or third-party APIs. | +| [ioping.plugin](/src/collectors/ioping.plugin/README.md) | `C` | all | measures disk latency. | +| [freeipmi.plugin](/src/collectors/freeipmi.plugin/README.md) | `C` | linux | collects metrics from enterprise hardware sensors, on Linux servers. | +| [nfacct.plugin](/src/collectors/nfacct.plugin/README.md) | `C` | linux | collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`. | +| [xenstat.plugin](/src/collectors/xenstat.plugin/README.md) | `C` | linux | collects XenServer and XCP-ng metrics using `lxenstat`. | +| [perf.plugin](/src/collectors/perf.plugin/README.md) | `C` | linux | collects CPU performance metrics using performance monitoring units (PMU). | +| [python.d.plugin](/src/collectors/python.d.plugin/README.md) | `python` | all | a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported). | +| [slabinfo.plugin](/src/collectors/slabinfo.plugin/README.md) | `C` | linux | collects kernel internal cache objects (SLAB) metrics. | + +Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native Netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator). +Each of these modular plugins has each own methods for defining modules. Please check the examples and their documentation. + +## Motivation + +This plugin allows Netdata to use **external plugins** for data collection: + +1. external data collection plugins may be written in any computer language. + +2. external data collection plugins may use O/S capabilities or `setuid` to + run with escalated privileges (compared to the `netdata` daemon). + The communication between the external plugin and Netdata is unidirectional + (from the plugin to Netdata), so that Netdata cannot manipulate an external + plugin running with escalated privileges. + +## Operation + +Each of the external plugins is expected to run forever. +Netdata will start it when it starts and stop it when it exits. + +If the external plugin exits or crashes, Netdata will log an error. +If the external plugin exits or crashes without pushing metrics to Netdata, Netdata will not start it again. + +- Plugins that exit with any value other than zero, will be disabled. Plugins that exit with zero, will be restarted after some time. +- Plugins may also be disabled by Netdata if they output things that Netdata does not understand. + +The `stdout` of external plugins is connected to Netdata to receive metrics, +with the API defined below. + +The `stderr` of external plugins is connected to Netdata's `error.log`. + +Plugins can create any number of charts with any number of dimensions each. Each chart can have its own characteristics independently of the others generated by the same plugin. For example, one chart may have an update frequency of 1 second, another may have 5 seconds and a third may have 10 seconds. + +## Configuration + +Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for Netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs. + +The `netdata.conf` section `[plugins]` section contains a list of all the plugins found at the system where Netdata runs, with a boolean setting to enable them or not. + +Example: + +``` +[plugins] + # enable running new plugins = yes + # check for new plugins every = 60 + + # charts.d = yes + # ioping = yes + # python.d = yes +``` + +The setting `enable running new plugins` sets the default behavior for all external plugins. It can be +overridden for distinct plugins by modifying the appropriate plugin value configuration to either `yes` or `no`. + +The setting `check for new plugins every` sets the interval between scans of the directory +`/usr/libexec/netdata/plugins.d`. New plugins can be added any time, and Netdata will detect them in a timely manner. + +For each of the external plugins enabled, another `netdata.conf` section +is created, in the form of `[plugin:NAME]`, where `NAME` is the name of the external plugin. +This section allows controlling the update frequency of the plugin and provide +additional command line arguments to it. + +For example, for `apps.plugin` the following section is available: + +``` +[plugin:apps] + # update every = 1 + # command options = +``` + +- `update every` controls the granularity of the external plugin. +- `command options` allows giving additional command line options to the plugin. + +Netdata will provide to the external plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources. + +Netdata will call the plugin with just one command line parameter: the number of seconds the user requested this plugin to update its data (by default is also 1). + +Other than the above, the plugin configuration is up to the plugin. + +Keep in mind, that the user may use Netdata configuration to overwrite chart and dimension parameters. This is transparent to the plugin. + +### Autoconfiguration + +Plugins should attempt to autoconfigure themselves when possible. + +For example, if your plugin wants to monitor `squid`, you can search for it on port `3128` or `8080`. If any succeeds, you can proceed. If it fails you can output an error (on stderr) saying that you cannot find `squid` running and giving instructions about the plugin configuration. Then you can stop (exit with non-zero value), so that Netdata will not attempt to start the plugin again. + +## External Plugins API + +Any program that can print a few values to its standard output can become a Netdata external plugin. + +Netdata parses lines starting with: + +- `CHART` - create or update a chart +- `DIMENSION` - add or update a dimension to the chart just created +- `VARIABLE` - define a variable (to be used in health calculations) +- `CLABEL` - add a label to a chart +- `CLABEL_COMMIT` - commit added labels to the chart +- `FUNCTION` - define a function that can be called later to execute it +- `BEGIN` - initialize data collection for a chart +- `SET` - set the value of a dimension for the initialized chart +- `END` - complete data collection for the initialized chart +- `FLUSH` - ignore the last collected values +- `DISABLE` - disable this plugin +- `FUNCTION` - define functions +- `FUNCTION_PROGRESS` - report the progress of a function execution +- `FUNCTION_RESULT_BEGIN` - to initiate the transmission of function results +- `FUNCTION_RESULT_END` - to end the transmission of function result +- `CONFIG` - to define dynamic configuration entities + +a single program can produce any number of charts with any number of dimensions each. + +Charts can be added any time (not just the beginning). + +Netdata may send the following commands to the plugin's `stdin`: + +- `FUNCTION` - to call a specific function, with all parameters inline +- `FUNCTION_PAYLOAD` - to call a specific function, with a payload of parameters +- `FUNCTION_PAYLOAD_END` - to end the payload of parameters +- `FUNCTION_CANCEL` - to cancel a running function transaction - no response is required +- `FUNCTION_PROGRESS` - to report that a user asked the progress of running function call - no response is required + +### Command line parameters + +The plugin **MUST** accept just **one** parameter: **the number of seconds it is +expected to update the values for its charts**. The value passed by Netdata +to the plugin is controlled via its configuration file (so there is no need +for the plugin to handle this configuration option). + +The external plugin can overwrite the update frequency. For example, the server may +request per second updates, but the plugin may ignore it and update its charts +every 5 seconds. + +### Environment variables + +There are a few environment variables that are set by `netdata` and are +available for the plugin to use. + +| variable | description | +|:--------------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `NETDATA_USER_CONFIG_DIR` | The directory where all Netdata-related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`). | +| `NETDATA_STOCK_CONFIG_DIR` | The directory where all Netdata -related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`). | +| `NETDATA_PLUGINS_DIR` | The directory where all Netdata plugins are stored. | +| `NETDATA_USER_PLUGINS_DIRS` | The list of directories where custom plugins are stored. | +| `NETDATA_WEB_DIR` | The directory where the web files of Netdata are saved. | +| `NETDATA_CACHE_DIR` | The directory where the cache files of Netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory. | +| `NETDATA_LOG_DIR` | The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of Netdata. | +| `NETDATA_HOST_PREFIX` | This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path. | +| `NETDATA_DEBUG_FLAGS` | This is a number (probably in hex starting with `0x`), that enables certain Netdata debugging features. Check **\[[Tracing Options]]** for more information. | +| `NETDATA_UPDATE_EVERY` | The minimum number of seconds between chart refreshes. This is like the **internal clock** of Netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds. | +| `NETDATA_INVOCATION_ID` | A random UUID in compact form, representing the unique invocation identifier of Netdata. When running under systemd, Netdata uses the `INVOCATION_ID` set by systemd. | +| `NETDATA_LOG_METHOD` | One of `syslog`, `journal`, `stderr` or `none`, indicating the preferred log method of external plugins. | +| `NETDATA_LOG_FORMAT` | One of `journal`, `logfmt` or `json`, indicating the format of the logs. Plugins can use the Netdata `systemd-cat-native` command to log always in `journal` format, and have it automatically converted to the format expected by netdata. | +| `NETDATA_LOG_LEVEL` | One of `emergency`, `alert`, `critical`, `error`, `warning`, `notice`, `info`, `debug`. Plugins are expected to log events with the given priority and the more important ones. | +| `NETDATA_SYSLOG_FACILITY` | Set only when the `NETDATA_LOG_METHOD` is `syslog`. Possible values are `auth`, `authpriv`, `cron`, `daemon`, `ftp`, `kern`, `lpr`, `mail`, `news`, `syslog`, `user`, `uucp` and `local0` to `local7` | +| `NETDATA_ERRORS_THROTTLE_PERIOD` | The log throttling period in seconds. | +| `NETDATA_ERRORS_PER_PERIOD` | The allowed number of log events per period. | +| `NETDATA_SYSTEMD_JOURNAL_PATH` | When `NETDATA_LOG_METHOD` is set to `journal`, this is the systemd-journald socket path to use. | + +### The output of the plugin + +The plugin should output instructions for Netdata to its output (`stdout`). Since this uses pipes, please make sure you flush stdout after every iteration. + +#### DISABLE + +`DISABLE` will disable this plugin. This will prevent Netdata from restarting the plugin. You can also exit with the value `1` to have the same effect. + +#### HOST_DEFINE + +`HOST_DEFINE` defines a new (or updates an existing) virtual host. + +The template is: + +> HOST_DEFINE machine_guid hostname + +where: + +- `machine_guid` + + uniquely identifies the host, this is what will be needed to add charts to the host. + +- `hostname` + + is the hostname of the virtual host + +#### HOST_LABEL + +`HOST_LABEL` adds a key-value pair to the virtual host labels. It has to be given between `HOST_DEFINE` and `HOST_DEFINE_END`. + +The template is: + +> HOST_LABEL key value + +where: + +- `key` + + uniquely identifies the key of the label + +- `value` + + is the value associated with this key + +There are a few special keys that are used to define the system information of the monitored system: + +- `_cloud_provider_type` +- `_cloud_instance_type` +- `_cloud_instance_region` +- `_os_name` +- `_os_version` +- `_kernel_version` +- `_system_cores` +- `_system_cpu_freq` +- `_system_ram_total` +- `_system_disk_space` +- `_architecture` +- `_virtualization` +- `_container` +- `_container_detection` +- `_virt_detection` +- `_is_k8s_node` +- `_install_type` +- `_prebuilt_arch` +- `_prebuilt_dist` + +#### HOST_DEFINE_END + +`HOST_DEFINE_END` commits the host information, creating a new host entity, or updating an existing one with the same `machine_guid`. + +#### HOST + +`HOST` switches data collection between hosts. + +The template is: + +> HOST machine_guid + +where: + +- `machine_guid` + + is the UUID of the host to switch to. After this command, every other command following it is assumed to be associated with this host. + Setting machine_guid to `localhost` switches data collection to the local host. + +#### CHART + +`CHART` defines a new chart. + +the template is: + +> CHART type.id name title units \[family \[context \[charttype \[priority \[update_every \[options \[plugin [module]]]]]]]] + + where: + +- `type.id` + + uniquely identifies the chart, + this is what will be needed to add values to the chart + + the `type` part controls the menu the charts will appear in + +- `name` + + is the name that will be presented to the user instead of `id` in `type.id`. This means that only the `id` part of + `type.id` is changed. When a name has been given, the chart is indexed (and can be referred) as both `type.id` and + `type.name`. You can set name to `''`, or `null`, or `(null)` to disable it. If a chart with the same name already + exists, a serial number is automatically attached to the name to avoid naming collisions. + +- `title` + + the text above the chart + +- `units` + + the label of the vertical axis of the chart, + all dimensions added to a chart should have the same units + of measurement + +- `family` + + is used to group charts together + (for example all eth0 charts should say: eth0), + if empty or missing, the `id` part of `type.id` will be used + + this controls the sub-menu on the dashboard + +- `context` + + the context is giving the template of the chart. For example, if multiple charts present the same information for a different family, they should have the same `context` + + this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alerts to it + +- `charttype` + + one of `line`, `area` or `stacked`, + if empty or missing, the `line` will be used + +- `priority` + + is the relative priority of the charts as rendered on the web page, + lower numbers make the charts appear before the ones with higher numbers, + if empty or missing, `1000` will be used + +- `update_every` + + overwrite the update frequency set by the server, + if empty or missing, the user configured value will be used + +- `options` + + a space separated list of options, enclosed in quotes. The following options are currently supported: `obsolete` to mark a chart as obsolete (Netdata will hide it and delete it after some time), `store_first` to make Netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to external databases). `CHART` options have been added in Netdata v1.7 and the `hidden` option was added in 1.10. + +- `plugin` and `module` + + both are just names that are used to let the user identify the plugin and the module that generated the chart. If `plugin` is unset or empty, Netdata will automatically set the filename of the plugin that generated the chart. `module` has not default. + +#### DIMENSION + +`DIMENSION` defines a new dimension for the chart + +the template is: + +> DIMENSION id \[name \[algorithm \[multiplier \[divisor [options]]]]] + + where: + +- `id` + + the `id` of this dimension (it is a text value, not numeric), + this will be needed later to add values to the dimension + + We suggest to avoid using `.` in dimension ids. External databases expect metrics to be `.` separated and people will get confused if a dimension id contains a dot. + +- `name` + + the name of the dimension as it will appear at the legend of the chart, + if empty or missing the `id` will be used + +- `algorithm` + + one of: + + - `absolute` + + the value is to drawn as-is (interpolated to second boundary), + if `algorithm` is empty, invalid or missing, `absolute` is used + + - `incremental` + + the value increases over time, + the difference from the last value is presented in the chart, + the server interpolates the value and calculates a per second figure + + - `percentage-of-absolute-row` + + the % of this value compared to the total of all dimensions + + - `percentage-of-incremental-row` + + the % of this value compared to the incremental total of + all dimensions + +- `multiplier` + + an integer value to multiply the collected value, + if empty or missing, `1` is used + +- `divisor` + + an integer value to divide the collected value, + if empty or missing, `1` is used + +- `options` + + a space separated list of options, enclosed in quotes. Options supported: `obsolete` to mark a dimension as obsolete (Netdata will delete it after some time) and `hidden` to make this dimension hidden, it will take part in the calculations but will not be presented in the chart. + +#### VARIABLE + +> VARIABLE [SCOPE] name = value + +`VARIABLE` defines a variable that can be used in alerts. This is to used for setting constants (like the max connections a server may accept). + +Variables support 2 scopes: + +- `GLOBAL` or `HOST` to define the variable at the host level. +- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. Netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alert templates. + +The position of the `VARIABLE` line, sets its default scope (in case you do not specify a scope). So, defining a `VARIABLE` before any `CHART`, or between `END` and `BEGIN` (outside any chart), sets `GLOBAL` scope, while defining a `VARIABLE` just after a `CHART` or a `DIMENSION`, or within the `BEGIN` - `END` block of a chart, sets `LOCAL` scope. + +These variables can be set and updated at any point. + +Variable names should use alphanumeric characters, the `.` and the `_`. + +The `value` is floating point (Netdata used `long double`). + +Variables are transferred to upstream Netdata servers (streaming and database replication). + +#### CLABEL + +> CLABEL name value source + +`CLABEL` defines a label used to organize and identify a chart. + +Name and value accept characters according to the following table: + +| Character | Symbol | Label Name | Label Value | +|---------------------|:------:|:----------:|:-----------:| +| UTF-8 character | UTF-8 | _ | keep | +| Lower case letter | [a-z] | keep | keep | +| Upper case letter | [A-Z] | keep | [a-z] | +| Digit | [0-9] | keep | keep | +| Underscore | _ | keep | keep | +| Minus | - | keep | keep | +| Plus | + | _ | keep | +| Colon | : | _ | keep | +| Semicolon | ; | _ | : | +| Equal | = | _ | : | +| Period | . | keep | keep | +| Comma | , | . | . | +| Slash | / | keep | keep | +| Backslash | \ | / | / | +| At | @ | _ | keep | +| Space | ' ' | _ | keep | +| Opening parenthesis | ( | _ | keep | +| Closing parenthesis | ) | _ | keep | +| Anything else | | _ | _ | + +The `source` is an integer field that can have the following values: +- `1`: The value was set automatically. +- `2`: The value was set manually. +- `4`: This is a K8 label. +- `8`: This is a label defined using `netdata` agent cloud link. + +#### CLABEL_COMMIT + +`CLABEL_COMMIT` indicates that all labels were defined and the chart can be updated. + +#### FUNCTION + +The plugin can register functions to Netdata, like this: + +> FUNCTION [GLOBAL] "name and parameters of the function" timeout "help string for users" "tags" "access" priority version + +- Tags currently recognized are either `top` or `logs` (or both, space separated). +- Access is one of `any`, `member`, or `admin`: + - `any` to offer the function to all users of Netdata, even if they are not authenticated. + - `member` to offer the function to all authenticated members of Netdata. + - `admin` to offer the function only to authenticated administrators. +- Priority defines the position of the function relative to the other functions (default is 100). +- Version defines the version of the function (default is 0). + +Users can use a function to ask for more information from the collector. Netdata maintains a registry of functions in 2 levels: + +- per node +- per chart + +Both node and chart functions are exactly the same, but chart functions allow Netdata to relate functions with charts and therefore present a context-sensitive menu of functions related to the chart the user is using. + +Users can get a list of all the registered functions using the `/api/v1/functions` endpoint of Netdata and call functions using the `/api/v1/function` API call of Netdata. + +Once a function is called, the plugin will receive at its standard input a command that looks like this: + +``` +FUNCTION transaction_id timeout "name and parameters of the function as one quoted parameter" "user permissions value" "source of request" +``` + +When the function to be called is to receive a payload of parameters, the call looks like this: + +``` +FUNCTION_PAYLOAD transaction_id timeout "name and parameters of the function as one quoted parameter" "user permissions value" "source of request" "content/type" +body of the payload, formatted according to content/type +FUNCTION PAYLOAD END +``` + +In this case, Netdata will send: + +- A line starting with `FUNCTION_PAYLOAD` together with the required metadata for the function, like the transaction id, the function name and its parameters, the timeout and the content type. This line ends with a newline. +- Then, the payload itself (which may or may not have newlines in it). The payload should be parsed according to the content type parameter. +- Finally, a line starting with `FUNCTION_PAYLOAD_END`, so it is expected like `\nFUNCTION_PAYLOAD_END\n`. + +Note 1: The plugins.d protocol allows parameters without single or double quotes if they don't contain spaces. However, the plugin should be able to parse parameters even if they are enclosed in single or double quotes. If the first character of a parameter is a single quote, its last character should also be a single quote too, and similarly for double quotes. + +Note 2: Netdata always sends the function and its parameters enclosed in double quotes. If the function command and its parameters contain quotes, they are converted to single quotes. + +The plugin is expected to parse and validate `name and parameters of the function as one quotes parameter`. Netdata allows the user interface to manipulate this string by appending more parameters. + +If the plugin rejects the request, it should respond with this: + +``` +FUNCTION_RESULT_BEGIN transaction_id 400 application/json +{ + "status": 400, + "error_message": "description of the rejection reasons" +} +FUNCTION_RESULT_END +``` + +If the plugin prepares a response, it should send (via its standard output, together with the collected data, but not interleaved with them): + +``` +FUNCTION_RESULT_BEGIN transaction_id http_response_code content_type expiration +``` + +Where: + + - `transaction_id` is the transaction id that Netdata sent for this function execution + - `http_response_code` is the http error code Netdata should respond with, 200 is the "ok" response + - `content_type` is the content type of the response + - `expiration` is the absolute timestamp (number, unix epoch) this response expires + +Immediately after this, all text is assumed to be the response content. +The content is text and line oriented. The maximum line length accepted is 15kb. Longer lines will be truncated. +The type of the context itself depends on the plugin and the UI. + +To terminate the message, Netdata seeks a line with just this: + +``` +FUNCTION_RESULT_END +``` + +This defines the end of the message. `FUNCTION_RESULT_END` should appear in a line alone, without any other text, so it is wise to add `\n` before and after it. + +After this line, Netdata resumes processing collected metrics from the plugin. + +The maximum uncompressed payload size Netdata will accept is 100MB. + +##### Functions cancellation + +Netdata is able to detect when a user made an API request, but abandoned it before it was completed. If this happens to an API called for a function served by the plugin, Netdata will generate a `FUNCTION_CANCEL` request to let the plugin know that it can stop processing the query. + +After receiving such a command, the plugin **must still send a response for the original function request**, to wake up any waiting threads before they timeout. The http response code is not important, since the response will be discarded, however for auditing reasons we suggest to send back a 499 http response code. This is not a standard response code according to the HTTP protocol, but web servers like `nginx` are using it to indicate that a request was abandoned by a user. + +##### Functions progress + +When a request takes too long to be processed, Netdata allows the plugin to report progress to Netdata, which in turn will report progress to the caller. + +The plugin can send `FUNCTION_PROGRESS` like this: + +``` +FUNCTION_PROGRESS transaction_id done all +``` + +Where: + +- `transaction_id` is the transaction id of the function request +- `done` is an integer value indicating the amount of work done +- `all` is an integer value indicating the total amount of work to be done + +Netdata supports two kinds of progress: +- progress as a percentage, which is calculated as `done * 100 / all` +- progress without knowing the total amount of work to be done, which is enabled when the plugin reports `all` as zero. + +##### Functions timeout + +All functions calls specify a timeout, at which all the intermediate routing nodes (parents, web server threads) will time out and abort the call. + +However, all intermediate routing nodes are configured to extend the timeout when the caller asks for progress. This works like this: + +When a progress request is received, if the expected timeout of the request is less than or equal to 10 seconds, the expected timeout is extended by 10 seconds. + +Usually, the user interface asks for a progress every second. So, during the last 10 seconds of the timeout, every progress request made shifts the timeout 10 seconds to the future. + +To accomplish this, when Netdata receives a progress request by a user, it generates progress requests to the plugin, updating all the intermediate nodes to extend their timeout if necessary. + +The plugin will receive progress requests like this: + +``` +FUNCTION_PROGRESS transaction_id +``` + +There is no need to respond to this command. It is only there to let the plugin know that a user is still waiting for the query to finish. + +#### CONFIG + +`CONFIG` commands sent from the plugin to Netdata define dynamic configuration entities. These configurable entities are exposed to the user interface, allowing users to change configuration at runtime. + +Dynamically configurations made this way are saved to disk by Netdata and are replayed automatically when Netdata or the plugin restarts. + +`CONFIG` commands look like this: + +``` +CONFIG id action ... +``` + +Where: + +- `id` is a unique identifier for the configurable entity. This should by design be unique across Netdata. It should be something like `plugin:module:jobs`, e.g. `go.d:postgresql:jobs:masterdb`. This is assumed to be colon-separated with the last part (`masterdb` in our example), being the one displayed to users when there ano conflicts under the same configuration path. +- `action` can be: + - `create`, to declare the dynamic configuration entity + - `delete`, to delete the dynamic configuration entity - this does not delete user configuration, we if an entity with the same id is created in the future, the saved configuration will be given to it. + - `status`, to update the dynamic configuration entity status + +> IMPORTANT:
    +> The plugin should blindly create, delete and update the status of its dynamic configuration entities, without any special logic applied to it. Netdata needs to be updated of what is actually happening at the plugin. Keep in mind that creating dynamic configuration entities triggers responses from Netdata, depending on its type and status. Re-creating a job, triggers the same responses every time, so make sure you create jobs only when you add jobs. + +When the `action` is `create`, the following additional parameters are expected: + +``` +CONFIG id action status type "path" source_type "source" "supported commands" "view permissions" "edit permissions" +``` + +Where: + +- `action` should be `create` +- `status` can be: + - `accepted`, the plugin accepted the configuration, but it is not running yet. + - `running`, the plugin accepted and runs the configuration. + - `failed`, the plugin tries to run the configuration but it fails. + - `incomplete`, the plugin needs additional settings to run this configuration. This is usually used for the cases the plugin discovered a job, but important information is missing for it to work. + - `disabled`, the configuration has been disabled by a user. + - `orphan`, the configuration is not claimed by any plugin. This is used internally by Netdata to mark the configuration nodes available, for which there is no plugin related to them. Do not use in plugins directly. +- `type` can be `single`, `template` or `job`: + - `single` is used when the configurable entity is fixed and users should never be able to add or delete it. + - `template` is used to define a template based on which users can add multiple configurations, like adding data collection jobs. So, the plugin defines the template of the jobs and users are presented with a `[+]` button to add such configuration jobs. The plugin can define multiple templates by giving different `id`s to them. + - `job` is used to define a job of a template. The plugin should always add all its jobs, independently of the way they have been discovered. It is important to note the relation between `template` and `job` when it comes it the `id`: The `id` of the template should be the prefix of the `job`'s `id`. For example, if the template is `go.d:postgresql:jobs`, then all its jobs be like `go.d:postgresql:jobs:jobname`. +- `path` is the absolute path of the configurable entity inside the tree of Netdata configurations. Usually, this is should be `/collectors`. +- `source` can be `internal`, `stock`, `user`, `discovered` or `dyncfg`: + - `internal` is used for configurations that are based on internal code settings + - `stock` is used for default configurations + - `discovered` is used for dynamic configurations the plugin discovers by its own + - `user` is used for user configurations, usually via a configuration file + - `dyncfg` is used for configuration received via this dynamic configuration mechanism +- `source` should provide more details about the exact source of the configuration, like `line@file`, or `user@ip`, etc. +- `supported_commands` is a space separated list of the following keywords, enclosed in single or double quotes. These commands are used by the user interface to determine the actions the users can take: + - `schema`, to expose the JSON schema for the user interface. This is mandatory for all configurable entities. When `schema` requests are received, Netdata will first attempt to load the schema from `/etc/netdata/schema.d/` and `/var/lib/netdata/conf.d/schema.d`. For jobs, it will serve the schema of their template. If no schema is found for the required `id`, the `schema` request will be forwarded to the plugin, which is expected to send back the relevant schema. + - `get`, to expose the current configuration values, according the schema defined. `templates` cannot support `get`, since they don't maintain any data. + - `update`, to receive configuration updates for this entity. `templates` cannot support `update`, since they don't maintain any data. + - `test`, like `update` but only test the configuration and report success or failure. + - `add`, to receive job creation commands for templates. Only `templates` should support this command. + - `remove`, to remove a configuration. Only `jobs` should support this command. + - `enable` and `disable`, to receive user requests to enable and disable this entity. Adding only one of `enable` or `disable` to the supported commands, Netdata will add both of them. The plugin should expose these commands on `templates` only when it wants to receive `enable` and `disable` commands for all the `jobs` of this `template`. + - `restart`, to restart a job. +- `view permissions` and `edit permissions` are bitmaps of the Netdata permission system to control access to the configuration. If set to zero, Netdata will require a signed in user with view and edit permissions to the Netdata's configuration system. + +The plugin receives commands as if it had exposed a `FUNCTION` named `config`. Netdata formats all these calls like this: + +``` +config id command +``` + +Where `id` is the unique id of the configurable entity and `command` is one of the supported commands the plugin sent to Netdata. + +The plugin will receive (for commands: `schema`, `get`, `remove`, `enable`, `disable` and `restart`): + +``` +FUNCTION transaction_id timeout "config id command" "user permissions value" "source string" +``` + +or (for commands: `update`, `add` and `test`): + +``` +FUNCTION_PAYLOAD transaction_id timeout "config id command" "user permissions value" "source string" "content/type" +body of the payload formatted according to content/type +FUNCTION_PAYLOAD_END +``` + +Once received, the plugin should process it and respond accordingly. + +Immediately after the plugin adds a configuration entity, if the commands `enable` and `disable` are supported by it, Netdata will send either `enable` or `disable` for it, based on the last user action, which has been persisted to disk. + +Plugin responses follow the same format `FUNCTIONS` do: + +``` +FUNCTION_RESULT_BEGIN transaction_id http_response_code content/type expiration +body of the response formatted according to content/type +FUNCTION_RESULT_END +``` + +Successful responses (HTTP response code 200) to `schema` and `get` should send back the relevant JSON object. +All other responses should have the following response body: + +```json +{ + "status" : 404, + "message" : "some text" +} +``` + +The user interface presents the message to users, even when the response is successful (HTTP code 200). + +When responding to additions and updates, Netdata uses the following success response codes to derive additional information: + +- `200`, responding with 200, means the configuration has been accepted and it is running. +- `202`, responding with 202, means the configuration has been accepted but it is not yet running. A subsequent `status` action will update it. +- `298`, responding with 298, means the configuration has been accepted but it is disabled for some reason (probably because it matches nothing or the contents are not useful - use the `message` to provide additional information). +- `299`, responding with 299, means the configuration has been accepted but a restart is required to apply it. + +## Data collection + +data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines + +> BEGIN type.id [microseconds] + +- `type.id` + + is the unique identification of the chart (as given in `CHART`) + +- `microseconds` + + is the number of microseconds since the last update of the chart. It is optional. + + Under heavy system load, the system may have some latency transferring + data from the plugins to Netdata via the pipe. This number improves + accuracy significantly, since the plugin is able to calculate the + duration between its iterations better than Netdata. + + The first time the plugin is started, no microseconds should be given + to Netdata. + +> SET id = value + +- `id` + + is the unique identification of the dimension (of the chart just began) + +- `value` + + is the collected value, only integer values are collected. If you want to push fractional values, multiply this value by 100 or 1000 and set the `DIMENSION` divider to 1000. + +> END + + END does not take any parameters, it commits the collected values for all dimensions to the chart. If a dimensions was not `SET`, its value will be empty for this commit. + +More `SET` lines may appear to update all the dimensions of the chart. +All of them in one `BEGIN` -> `END` block. + +All `SET` lines within a single `BEGIN` -> `END` block have to refer to the +same chart. + +If more charts need to be updated, each chart should have its own +`BEGIN` -> `SET` -> `END` block. + +If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it, +it can issue a `FLUSH`. The `FLUSH` command will instruct Netdata to ignore +all the values collected since the last `BEGIN` command. + +If a plugin does not behave properly (outputs invalid lines, or does not +follow these guidelines), will be disabled by Netdata. + +### collected values + +Netdata will collect any **signed** value in the 64bit range: +`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807` + +If a value is not collected, leave it empty, like this: + +`SET id =` + +or do not output the line at all. + +## Modular Plugins + +1. **python**, use `python.d.plugin`, there are many examples in the [python.d + directory](/src/collectors/python.d.plugin/README.md) + + python is ideal for Netdata plugins. It is a simple, yet powerful way to collect data, it has a very small memory footprint, although it is not the most CPU efficient way to do it. + +2. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d + directory](/src/collectors/charts.d.plugin/README.md) + + BASH is the simplest scripting language for collecting values. It is the less efficient though in terms of CPU resources. You can use it to collect data quickly, but extensive use of it might use a lot of system resources. + +3. **C** + + Of course, C is the most efficient way of collecting data. This is why Netdata itself is written in C. + +## Writing Plugins Properly + +There are a few rules for writing plugins properly: + +1. Respect system resources + + Pay special attention to efficiency: + + - Initialize everything once, at the beginning. Initialization is not an expensive operation. Your plugin will most probably be started once and run forever. So, do whatever heavy operation is needed at the beginning, just once. + - Do the absolutely minimum while iterating to collect values repeatedly. + - If you need to connect to another server to collect values, avoid re-connects if possible. Connect just once, with keep-alive (for HTTP) enabled and collect values using the same connection. + - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation while iterating to collect values. + - Avoid running external commands when possible. If you are writing shell scripts avoid especially pipes (each pipe is another fork, a very expensive operation). + +2. The best way to iterate at a constant pace is this pseudo code: + +```js + var update_every = argv[1] * 1000; /* seconds * 1000 = milliseconds */ + + readConfiguration(); + + if(!verifyWeCanCollectValues()) { + print("DISABLE"); + exit(1); + } + + createCharts(); /* print CHART and DIMENSION statements */ + + var loops = 0; + var last_run = 0; + var next_run = 0; + var dt_since_last_run = 0; + var now = 0; + + while(true) { + /* find the current time in milliseconds */ + now = currentTimeStampInMilliseconds(); + + /* + * find the time of the next loop + * this makes sure we are always aligned + * with the Netdata daemon + */ + next_run = now - (now % update_every) + update_every; + + /* + * wait until it is time + * it is important to do it in a loop + * since many wait functions can be interrupted + */ + while( now < next_run ) { + sleepMilliseconds(next_run - now); + now = currentTimeStampInMilliseconds(); + } + + /* calculate the time passed since the last run */ + if ( loops > 0 ) + dt_since_last_run = (now - last_run) * 1000; /* in microseconds */ + + /* prepare for the next loop */ + last_run = now; + loops++; + + /* do your magic here to collect values */ + collectValues(); + + /* send the collected data to Netdata */ + printValues(dt_since_last_run); /* print BEGIN, SET, END statements */ + } +``` + + Using the above procedure, your plugin will be synchronized to start data collection on steps of `update_every`. There will be no need to keep track of latencies in data collection. + + Netdata interpolates values to second boundaries, so even if your plugin is not perfectly aligned it does not matter. Netdata will find out. When your plugin works in increments of `update_every`, there will be no gaps in the charts due to the possible cumulative micro-delays in data collection. Gaps will only appear if the data collection is really delayed. + +3. If you are not sure of memory leaks, exit every one hour. Netdata will re-start your process. + +4. If possible, try to autodetect if your plugin should be enabled, without any configuration. + + diff --git a/src/plugins.d/functions-table.md b/src/plugins.d/functions-table.md new file mode 100644 index 000000000..f3a8bcf36 --- /dev/null +++ b/src/plugins.d/functions-table.md @@ -0,0 +1,418 @@ + +> This document is a work in progress. + +Plugin functions can support any kind of responses. However, the UI of Netdata has defined some structures as responses it can parse, understand and visualize. + +One of these responses is the `table`. This is used in almost all functions implemented today. + +# Functions Tables + +Tables are defined when `"type": "table"` is set. The following is the standard header that should be available on all `table` responses: + +```json +{ + "type": "table", + "status": 200, + "update_every": 1, + "help": "help text", + "hostname": "the hostname of the server sending this response, to appear at the title of the UI.", + "expires": "UNIX epoch timestamp that the response expires", + "has_history": "boolean: true when the datetime picker plays a role in the result set", + // rest of the response +} +``` + +## Preflight `info` request + +The UI, before making the first call to a function, it does a preflight request to understand what the function supports. The plugin receives this request as a FUNCTION call specifying the `info` parameter (possible among others). + +The response from the plugin is expected to have the following: + +```json +{ + // standard table header - as above + "accepted_params": [ "a", "b", "c", ...], + "required_params": [ + { + "id": "the keyword to use when sending / receiving this parameter", + "name": "the name to present to users for this parameter", + "help": "a help string to help users understand this parameter", + "type": "the type of the parameter, either: 'select' or 'multiselect'", + "options": [ + { + "id": "the keyword to use when sending / receiving this option", + "name": "the name to present to users for this option", + "pill": "a short text to show next to this option as a pill", + "info": "a longer text to show on a tooltip when the user is hovering this option" + }, + // more options for this required parameter + ] + }, + // more required parameters + ] +} +``` + +If there are no required parameters, `required_params` can be omitted. +If there are no accepted parameters, `accepted_params` can be omitted. `accepted_param` can be sent during normal responses to update the UI with a new set of parameters available, between calls. + +For `logs`, the UI requires this set of `accepted_params`. + +Ref [Pagination](#pagination), [Deltas](#incremental-responses) +```json +[ + "info", // boolean: requests the preflight `info` request + "after", // interval start timestamp + "before", // interval end timestamp + "direction", // sort direction [backward,forward] + "last", // number of records to retrieve + "anchor", // timestamp to divide records in pages + "facets", + "histogram", // selects facet to be used on the histogram + "if_modified_since", // used in PLAY mode, to indicate that the UI wants data newer than the specified timestamp + "data_only", // boolean: requests data (logs) only + "delta", // boolean: requests incremental responses + "tail", + "sampling", + "slice" +] +``` + +If there are `required_params`, the UI by default selects the first option. [](VERIFY_WITH_UI) + +## Table data + +To define table data, the UI expects this: + +```json +{ + // header + "columns": { + "id": { + "index": "number: the sort order for the columns, lower numbers are first", + "name": "string: the name of the column as it should be presented to users", + "unique_key": "boolean: true when the column uniquely identifies the row", + "visible": "boolean: true when the column should be visible by default", + "type": "enum: see column types", + "units": "string: the units of the value, if any - this item can be omitted if the column does not have units [](VERIFY_WITH_UI)", + "visualization": "enum: see visualization types", + "value_options": { + "units": "string: the units of the value [](VERIFY_WITH_UI)", + "transform": "enum: see transformation types", + "decimal_points": "number: the number of fractional digits for the number", + "default_value": "whatever the value is: when the value is null, show this instead" + }, + "max": "number: when the column is numeric, this is the max value the data have - this is used when range filtering is set and value bars", + "pointer_to": "id of another field: this is used when detail-string is set, to point to the column this column is detail of", + "sort": "enum: sorting order", + "sortable": "boolean: whether the column is sortable by users", + "sticky": "boolean: whether the column should always be visible in the UI", + "summary": "string: ???", + "filter": "enum: the filtering type for this column", + "full_width": "boolean: the value is expected to get most of the available column space. When multiple columns are full_width, the available space is given to all of them.", + "wrap": "boolean: true when the entire value should be shown, even when it occupies a big space.", + "default_expanded_filter": "boolean: true when the filter of this column should be expanded by default.", + "dummy": "boolean: when set to true, the column is not to be presented to users." + }, + // more IDs + }, + "data": [ // array of rows + [ // array of columns + // values for each column linked to their "index" in the columns + ], + // next row + ], + "default_sort_column": "id: the id of the column that should be sorted by default" +} +``` + +**IMPORTANT** + +On Data values, `timestamp` column value must be in unix micro. + + +### Sorting order + +- `ascending` +- `descending` + +### Transformation types + +- `none`, just show the value, without any processing +- `number`, just show a number with its units, respecting `decimal_points` +- `duration`, makes the UI show a human readable duration, of the seconds given +- `datetime`, makes the UI show a human readable datetime of the timestamp in UNIX epoch +- `datetime_usec`, makes the UI show a human readable datetime of the timestamp in USEC UNIX epoch + +### Visualization types + +- `value` +- `bar` +- `pill` +- `richValue`, this is not used yet, it is supposed to be a structure that will provide a value and options for it +- `rowOptions`, defines options for the entire row - this column is hidden from the UI + +### rowOptions + +TBD + +### Column types + +- `none` +- `integer` +- `boolean` +- `string` +- `detail-string` +- `bar-with-integer` +- `duration` +- `timestamp` +- `array` + +### Filter types + +- `none`, this facet is not selectable by users +- `multiselect`, the user can select any number of the available options +- `facet`, similar to `multiselect`, but it also indicates that the column has been indexed and has values with counters. Columns set to `facet` must appear in the `facets` list. +- `range`, the user can select a range of values (numeric) + +The plugin may send non visible columns with filter type `facet`. This means that the plugin can enable indexing on these columns, but it has not done it. Then the UI may send `facets:{ID1},{ID2},{ID3},...` to enable indexing of the columns specified. + +What is the default? + +#### Facets + +Facets are a special case of `multiselect` fields. They are used to provide additional information about each possible value, including their relative sort order and the number of times each value appears in the result set. Facets are filters handled by the plugin. So, the plugin will receive user selected filter like: `{KEY}:{VALUE1},{VALUE2},...`, where `{KEY}` is the id of the column and `{VALUEX}` is the id the facet option the user selected. + +```json +{ + // header, + "columns": ..., + "data": ..., + "facets": [ + { + "id": "string: the unique id of the facet", + "name": "string: the human readable name of the facet", + "order": "integer: the sorting order of this facet - lower numbers move items above others" + "options": [ + { + "id": "string: the unique id of the facet value", + "name": "string: the human readable version of the facet value", + "count": "integer: the number of times this value appears in the result set", + "order": "integer: the sorting order of this facet value - lower numbers move items above others" + }, + // next option + ], + }, + // next facet + ] +} +``` + +## Charts + +```json +{ + // header, + "charts": { + + }, + "default_charts": [ + + ] +} +``` + + +## Histogram + +```json +{ + "available_histograms": [ + { + "id": "string: the unique id of the histogram", + "name": "string: the human readable name of the histogram", + "order": "integer: the sorting order of available histograms - lower numbers move items above others" + } + ], + "histogram": { + "id": "string: the unique id of the histogram", + "name": "string: the human readable name of the histogram", + "chart": { + "summary": { + "nodes": [ + { + "mg": "string", + "nm": "string: node name", + "ni": "integer: node index" + } + ], + "contexts": [ + { + "id": "string: context id" + } + ], + "instances": [ + { + "id": "string: instance id", + "ni": "integer: instance index" + } + ], + "dimensions": [ + { + "id": "string: dimension id", + "pri": "integer", + "sts": { + "min": "float: dimension min value", + "max": "float: dimension max value", + "avg": "float: dimension avarage value", + "arp": "float", + "con": "float" + } + } + ] + }, + "result": { + "labels": [ + // histogram labels + ], + "point": { + "value": "integer", + "arp": "integer", + "pa": "integer" + }, + "data": [ + [ + "timestamp" // unix milli + // one array per label + [ + // values + ], + ] + ] + }, + "view": { + "title": "string: histogram tittle", + "update_every": "integer", + "after": "timestamp: histogram window start", + "before": "timestamp: histogram window end", + "units": "string: histogram units", + "chart_type": "string: histogram chart type", + "min": "integer: histogram min value", + "max": "integer: histogram max value", + "dimensions": { + "grouped_by": [ + // "string: histogram grouped by", + ], + "ids": [ + // "string: histogram label id", + ], + "names": [ + // "string: histogram human readable label name", + ], + "colors": [], + "units": [ + // "string: histogram label unit", + ], + "sts": { + "min": [ + // "float: label min value", + ], + "max": [ + // "float: label max value", + ], + "avg": [ + // "float: label avarage value", + ], + "arp": [ + // "float", + ], + "con": [ + // "float", + ] + } + } + }, + "totals": { + "nodes": { + "sl": "integer", + "qr": "integer" + }, + "contexts": { + "sl": "integer", + "qr": "integer" + }, + "instances": { + "sl": "integer", + "qr": "integer" + }, + "dimensions": { + "sl": "integer", + "qr": "integer" + } + }, + "db": { + "update_every": "integer" + } + } + } +} +``` + +**IMPORTANT** + +On Result Data, `timestamps` must be in unix milli. + +## Grouping + +```json +{ + // header, + "group_by": { + + } +} +``` + +## Datetime picker + +When `has_history: true`, the plugin must accept `after:TIMESTAMP_IN_SECONDS` and `before:TIMESTAMP_IN_SECONDS` parameters. +The plugin can also turn pagination on, so that only a small set of the data are sent to the UI at a time. + + +## Pagination + +The UI supports paginating results when `has_history: true`. So, when the result depends on the datetime picker and it is too big to be sent to the UI in one response, the plugin can enable datetime pagination like this: + +```json +{ + // header, + "columns": ..., + "data": ..., + "has_history": true, + "pagination": { + "enabled": "boolean: true to enable it", + "column": "string: the column id that is used for pagination", + "key": "string: the accepted_param that is used as the pagination anchor", + "units": "enum: a transformation of the datetime picker to make it compatible with the anchor: timestamp, timestamp_usec" + } +} +``` + +Once pagination is enabled, the plugin must support the following parameters: + +- `{ANCHOR}:{VALUE}`, `{ANCHOR}` is the `pagination.key`, `{VALUE}` is the point the user wants to see entries at, formatted according to `pagination.units`. +- `direction:backward` or `direction:forward` to specify if the data to be returned if before are after the anchor. +- `last:NUMER`, the number of entries the plugin should return in the table data. +- `query:STRING`, the full text search string the user wants to search for. +- `if_modified_since:TIMESTAMP_USEC` and `tail:true`, used in PLAY mode, to indicate that the UI wants data newer than the specified timestamp. If there are no new data, the plugin must respond with 304 (Not Modified). + +### Incremental Responses + +- `delta:true` or `delta:false`, when the plugin supports incremental queries, it can accept the parameter `delta`. When set to true, the response of the plugin will be "added" to the previous response already available. This is used in combination with `if_modified_since` to optimize the amount of work the plugin has to do to respond. + + +### Other + +- `slice:BOOLEAN` [](VERIFY_WITH_UI) +- `sampling:NUMBER` + diff --git a/src/plugins.d/gperf-config.txt b/src/plugins.d/gperf-config.txt new file mode 100644 index 000000000..bfaa4eecc --- /dev/null +++ b/src/plugins.d/gperf-config.txt @@ -0,0 +1,118 @@ +%{ + +#define PLUGINSD_KEYWORD_ID_FLUSH 97 +#define PLUGINSD_KEYWORD_ID_DISABLE 98 +#define PLUGINSD_KEYWORD_ID_EXIT 99 +#define PLUGINSD_KEYWORD_ID_HOST 71 +#define PLUGINSD_KEYWORD_ID_HOST_DEFINE 72 +#define PLUGINSD_KEYWORD_ID_HOST_DEFINE_END 73 +#define PLUGINSD_KEYWORD_ID_HOST_LABEL 74 + +#define PLUGINSD_KEYWORD_ID_BEGIN 12 +#define PLUGINSD_KEYWORD_ID_CHART 32 +#define PLUGINSD_KEYWORD_ID_CLABEL 34 +#define PLUGINSD_KEYWORD_ID_CLABEL_COMMIT 35 +#define PLUGINSD_KEYWORD_ID_DIMENSION 31 +#define PLUGINSD_KEYWORD_ID_END 13 +#define PLUGINSD_KEYWORD_ID_FUNCTION 41 +#define PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN 42 +#define PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS 43 +#define PLUGINSD_KEYWORD_ID_LABEL 51 +#define PLUGINSD_KEYWORD_ID_OVERWRITE 52 +#define PLUGINSD_KEYWORD_ID_SET 11 +#define PLUGINSD_KEYWORD_ID_VARIABLE 53 +#define PLUGINSD_KEYWORD_ID_CONFIG 100 + +#define PLUGINSD_KEYWORD_ID_CLAIMED_ID 61 +#define PLUGINSD_KEYWORD_ID_BEGIN2 2 +#define PLUGINSD_KEYWORD_ID_SET2 1 +#define PLUGINSD_KEYWORD_ID_END2 3 + +#define PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END 33 +#define PLUGINSD_KEYWORD_ID_RBEGIN 22 +#define PLUGINSD_KEYWORD_ID_RDSTATE 23 +#define PLUGINSD_KEYWORD_ID_REND 25 +#define PLUGINSD_KEYWORD_ID_RSET 21 +#define PLUGINSD_KEYWORD_ID_RSSTATE 24 + +#define PLUGINSD_KEYWORD_ID_JSON 80 + +#define PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE 901 +#define PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE 902 +#define PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB 903 +#define PLUGINSD_KEYWORD_ID_DYNCFG_RESET 904 +#define PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS 905 +#define PLUGINSD_KEYWORD_ID_DELETE_JOB 906 + +%} + +%struct-type +%omit-struct-type +%define hash-function-name gperf_keyword_hash_function +%define lookup-function-name gperf_lookup_keyword +%define word-array-name gperf_keywords +%define constants-prefix GPERF_PARSER_ +%define slot-name keyword +%define initializer-suffix ,0,PARSER_INIT_PLUGINSD,0 +%global-table +%readonly-tables +%null-strings +PARSER_KEYWORD; + +%% +# +# Plugins Only Keywords +# +FLUSH, PLUGINSD_KEYWORD_ID_FLUSH, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1 +DISABLE, PLUGINSD_KEYWORD_ID_DISABLE, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2 +EXIT, PLUGINSD_KEYWORD_ID_EXIT, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3 +HOST, PLUGINSD_KEYWORD_ID_HOST, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 4 +HOST_DEFINE, PLUGINSD_KEYWORD_ID_HOST_DEFINE, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 5 +HOST_DEFINE_END, PLUGINSD_KEYWORD_ID_HOST_DEFINE_END, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 6 +HOST_LABEL, PLUGINSD_KEYWORD_ID_HOST_LABEL, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 7 +# +# Common keywords +# +BEGIN, PLUGINSD_KEYWORD_ID_BEGIN, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8 +CHART, PLUGINSD_KEYWORD_ID_CHART, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 9 +CLABEL, PLUGINSD_KEYWORD_ID_CLABEL, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 10 +CLABEL_COMMIT, PLUGINSD_KEYWORD_ID_CLABEL_COMMIT, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 11 +DIMENSION, PLUGINSD_KEYWORD_ID_DIMENSION, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 12 +END, PLUGINSD_KEYWORD_ID_END, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13 +FUNCTION, PLUGINSD_KEYWORD_ID_FUNCTION, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 14 +FUNCTION_RESULT_BEGIN, PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15 +FUNCTION_PROGRESS, PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 16 +LABEL, PLUGINSD_KEYWORD_ID_LABEL, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 17 +OVERWRITE, PLUGINSD_KEYWORD_ID_OVERWRITE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 18 +SET, PLUGINSD_KEYWORD_ID_SET, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 19 +VARIABLE, PLUGINSD_KEYWORD_ID_VARIABLE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 20 +CONFIG, PLUGINSD_KEYWORD_ID_CONFIG, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 21 +# +# Streaming only keywords +# +CLAIMED_ID, PLUGINSD_KEYWORD_ID_CLAIMED_ID, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 22 +BEGIN2, PLUGINSD_KEYWORD_ID_BEGIN2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23 +SET2, PLUGINSD_KEYWORD_ID_SET2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24 +END2, PLUGINSD_KEYWORD_ID_END2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25 +# +# Streaming Replication keywords +# +CHART_DEFINITION_END, PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 26 +RBEGIN, PLUGINSD_KEYWORD_ID_RBEGIN, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27 +RDSTATE, PLUGINSD_KEYWORD_ID_RDSTATE, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28 +REND, PLUGINSD_KEYWORD_ID_REND, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29 +RSET, PLUGINSD_KEYWORD_ID_RSET, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 30 +RSSTATE, PLUGINSD_KEYWORD_ID_RSSTATE, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31 +# +# JSON +# +JSON, PLUGINSD_KEYWORD_ID_JSON, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 32 +# +# obsolete - do nothing commands +# +DYNCFG_ENABLE, PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33 +DYNCFG_REGISTER_MODULE, PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34 +DYNCFG_REGISTER_JOB, PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35 +DYNCFG_RESET, PLUGINSD_KEYWORD_ID_DYNCFG_RESET, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 36 +REPORT_JOB_STATUS, PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 37 +DELETE_JOB, PLUGINSD_KEYWORD_ID_DELETE_JOB, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 38 diff --git a/src/plugins.d/gperf-hashtable.h b/src/plugins.d/gperf-hashtable.h new file mode 100644 index 000000000..5375de832 --- /dev/null +++ b/src/plugins.d/gperf-hashtable.h @@ -0,0 +1,241 @@ +/* ANSI-C code produced by gperf version 3.1 */ +/* Command-line: gperf --multiple-iterations=1000 --output-file=gperf-hashtable.h gperf-config.txt */ +/* Computed positions: -k'1-2' */ + +#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \ + && ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) \ + && (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) \ + && ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) \ + && ('1' == 49) && ('2' == 50) && ('3' == 51) && ('4' == 52) \ + && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) \ + && ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) \ + && ('=' == 61) && ('>' == 62) && ('?' == 63) && ('A' == 65) \ + && ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) \ + && ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) \ + && ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) \ + && ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) \ + && ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) \ + && ('V' == 86) && ('W' == 87) && ('X' == 88) && ('Y' == 89) \ + && ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) \ + && ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) \ + && ('c' == 99) && ('d' == 100) && ('e' == 101) && ('f' == 102) \ + && ('g' == 103) && ('h' == 104) && ('i' == 105) && ('j' == 106) \ + && ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) \ + && ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) \ + && ('s' == 115) && ('t' == 116) && ('u' == 117) && ('v' == 118) \ + && ('w' == 119) && ('x' == 120) && ('y' == 121) && ('z' == 122) \ + && ('{' == 123) && ('|' == 124) && ('}' == 125) && ('~' == 126)) +/* The character set is not based on ISO-646. */ +#error "gperf generated tables don't work with this execution character set. Please report a bug to ." +#endif + +#line 1 "gperf-config.txt" + + +#define PLUGINSD_KEYWORD_ID_FLUSH 97 +#define PLUGINSD_KEYWORD_ID_DISABLE 98 +#define PLUGINSD_KEYWORD_ID_EXIT 99 +#define PLUGINSD_KEYWORD_ID_HOST 71 +#define PLUGINSD_KEYWORD_ID_HOST_DEFINE 72 +#define PLUGINSD_KEYWORD_ID_HOST_DEFINE_END 73 +#define PLUGINSD_KEYWORD_ID_HOST_LABEL 74 + +#define PLUGINSD_KEYWORD_ID_BEGIN 12 +#define PLUGINSD_KEYWORD_ID_CHART 32 +#define PLUGINSD_KEYWORD_ID_CLABEL 34 +#define PLUGINSD_KEYWORD_ID_CLABEL_COMMIT 35 +#define PLUGINSD_KEYWORD_ID_DIMENSION 31 +#define PLUGINSD_KEYWORD_ID_END 13 +#define PLUGINSD_KEYWORD_ID_FUNCTION 41 +#define PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN 42 +#define PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS 43 +#define PLUGINSD_KEYWORD_ID_LABEL 51 +#define PLUGINSD_KEYWORD_ID_OVERWRITE 52 +#define PLUGINSD_KEYWORD_ID_SET 11 +#define PLUGINSD_KEYWORD_ID_VARIABLE 53 +#define PLUGINSD_KEYWORD_ID_CONFIG 100 + +#define PLUGINSD_KEYWORD_ID_CLAIMED_ID 61 +#define PLUGINSD_KEYWORD_ID_BEGIN2 2 +#define PLUGINSD_KEYWORD_ID_SET2 1 +#define PLUGINSD_KEYWORD_ID_END2 3 + +#define PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END 33 +#define PLUGINSD_KEYWORD_ID_RBEGIN 22 +#define PLUGINSD_KEYWORD_ID_RDSTATE 23 +#define PLUGINSD_KEYWORD_ID_REND 25 +#define PLUGINSD_KEYWORD_ID_RSET 21 +#define PLUGINSD_KEYWORD_ID_RSSTATE 24 + +#define PLUGINSD_KEYWORD_ID_JSON 80 + +#define PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE 901 +#define PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE 902 +#define PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB 903 +#define PLUGINSD_KEYWORD_ID_DYNCFG_RESET 904 +#define PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS 905 +#define PLUGINSD_KEYWORD_ID_DELETE_JOB 906 + + +#define GPERF_PARSER_TOTAL_KEYWORDS 38 +#define GPERF_PARSER_MIN_WORD_LENGTH 3 +#define GPERF_PARSER_MAX_WORD_LENGTH 22 +#define GPERF_PARSER_MIN_HASH_VALUE 4 +#define GPERF_PARSER_MAX_HASH_VALUE 53 +/* maximum key range = 50, duplicates = 0 */ + +#ifdef __GNUC__ +__inline +#else +#ifdef __cplusplus +inline +#endif +#endif +static unsigned int +gperf_keyword_hash_function (register const char *str, register size_t len) +{ + static const unsigned char asso_values[] = + { + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 31, 28, 2, 4, 0, + 5, 54, 0, 25, 20, 54, 17, 54, 27, 0, + 54, 54, 1, 16, 54, 15, 0, 54, 2, 0, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 54, 54, 54, 54, 54 + }; + return len + asso_values[(unsigned char)str[1]] + asso_values[(unsigned char)str[0]]; +} + +static const PARSER_KEYWORD gperf_keywords[] = + { + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, +#line 69 "gperf-config.txt" + {"HOST", PLUGINSD_KEYWORD_ID_HOST, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 4}, +#line 103 "gperf-config.txt" + {"REND", PLUGINSD_KEYWORD_ID_REND, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29}, +#line 68 "gperf-config.txt" + {"EXIT", PLUGINSD_KEYWORD_ID_EXIT, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3}, +#line 77 "gperf-config.txt" + {"CHART", PLUGINSD_KEYWORD_ID_CHART, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 9}, +#line 89 "gperf-config.txt" + {"CONFIG", PLUGINSD_KEYWORD_ID_CONFIG, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 21}, +#line 86 "gperf-config.txt" + {"OVERWRITE", PLUGINSD_KEYWORD_ID_OVERWRITE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 18}, +#line 72 "gperf-config.txt" + {"HOST_LABEL", PLUGINSD_KEYWORD_ID_HOST_LABEL, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 7}, +#line 70 "gperf-config.txt" + {"HOST_DEFINE", PLUGINSD_KEYWORD_ID_HOST_DEFINE, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 5}, +#line 102 "gperf-config.txt" + {"RDSTATE", PLUGINSD_KEYWORD_ID_RDSTATE, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, +#line 118 "gperf-config.txt" + {"DELETE_JOB", PLUGINSD_KEYWORD_ID_DELETE_JOB, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 38}, +#line 71 "gperf-config.txt" + {"HOST_DEFINE_END", PLUGINSD_KEYWORD_ID_HOST_DEFINE_END, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 6}, +#line 116 "gperf-config.txt" + {"DYNCFG_RESET", PLUGINSD_KEYWORD_ID_DYNCFG_RESET, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 36}, +#line 113 "gperf-config.txt" + {"DYNCFG_ENABLE", PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33}, +#line 117 "gperf-config.txt" + {"REPORT_JOB_STATUS", PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 37}, +#line 87 "gperf-config.txt" + {"SET", PLUGINSD_KEYWORD_ID_SET, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 19}, +#line 95 "gperf-config.txt" + {"SET2", PLUGINSD_KEYWORD_ID_SET2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24}, +#line 104 "gperf-config.txt" + {"RSET", PLUGINSD_KEYWORD_ID_RSET, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 30}, +#line 100 "gperf-config.txt" + {"CHART_DEFINITION_END", PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 26}, +#line 115 "gperf-config.txt" + {"DYNCFG_REGISTER_JOB", PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35}, +#line 105 "gperf-config.txt" + {"RSSTATE", PLUGINSD_KEYWORD_ID_RSSTATE, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31}, +#line 78 "gperf-config.txt" + {"CLABEL", PLUGINSD_KEYWORD_ID_CLABEL, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 10}, +#line 114 "gperf-config.txt" + {"DYNCFG_REGISTER_MODULE", PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34}, +#line 66 "gperf-config.txt" + {"FLUSH", PLUGINSD_KEYWORD_ID_FLUSH, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1}, +#line 82 "gperf-config.txt" + {"FUNCTION", PLUGINSD_KEYWORD_ID_FUNCTION, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 14}, +#line 93 "gperf-config.txt" + {"CLAIMED_ID", PLUGINSD_KEYWORD_ID_CLAIMED_ID, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 22}, +#line 81 "gperf-config.txt" + {"END", PLUGINSD_KEYWORD_ID_END, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13}, +#line 96 "gperf-config.txt" + {"END2", PLUGINSD_KEYWORD_ID_END2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25}, +#line 79 "gperf-config.txt" + {"CLABEL_COMMIT", PLUGINSD_KEYWORD_ID_CLABEL_COMMIT, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 11}, +#line 76 "gperf-config.txt" + {"BEGIN", PLUGINSD_KEYWORD_ID_BEGIN, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8}, +#line 94 "gperf-config.txt" + {"BEGIN2", PLUGINSD_KEYWORD_ID_BEGIN2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23}, +#line 101 "gperf-config.txt" + {"RBEGIN", PLUGINSD_KEYWORD_ID_RBEGIN, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27}, +#line 67 "gperf-config.txt" + {"DISABLE", PLUGINSD_KEYWORD_ID_DISABLE, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2}, +#line 84 "gperf-config.txt" + {"FUNCTION_PROGRESS", PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 16}, +#line 80 "gperf-config.txt" + {"DIMENSION", PLUGINSD_KEYWORD_ID_DIMENSION, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 12}, +#line 88 "gperf-config.txt" + {"VARIABLE", PLUGINSD_KEYWORD_ID_VARIABLE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 20}, +#line 109 "gperf-config.txt" + {"JSON", PLUGINSD_KEYWORD_ID_JSON, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 32}, +#line 83 "gperf-config.txt" + {"FUNCTION_RESULT_BEGIN", PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, + {(char*)0,0,PARSER_INIT_PLUGINSD,0}, +#line 85 "gperf-config.txt" + {"LABEL", PLUGINSD_KEYWORD_ID_LABEL, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 17} + }; + +const PARSER_KEYWORD * +gperf_lookup_keyword (register const char *str, register size_t len) +{ + if (len <= GPERF_PARSER_MAX_WORD_LENGTH && len >= GPERF_PARSER_MIN_WORD_LENGTH) + { + register unsigned int key = gperf_keyword_hash_function (str, len); + + if (key <= GPERF_PARSER_MAX_HASH_VALUE) + { + register const char *s = gperf_keywords[key].keyword; + + if (s && *str == *s && !strcmp (str + 1, s + 1)) + return &gperf_keywords[key]; + } + } + return 0; +} diff --git a/src/plugins.d/plugins_d.c b/src/plugins.d/plugins_d.c new file mode 100644 index 000000000..09be1ffc6 --- /dev/null +++ b/src/plugins.d/plugins_d.c @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugins_d.h" +#include "pluginsd_parser.h" + +char *plugin_directories[PLUGINSD_MAX_DIRECTORIES] = { [0] = PLUGINS_DIR, }; +struct plugind *pluginsd_root = NULL; + +static inline void pluginsd_sleep(const int seconds) { + int timeout_ms = seconds * 1000; + int waited_ms = 0; + while(waited_ms < timeout_ms) { + if(!service_running(SERVICE_COLLECTORS)) break; + sleep_usec(ND_CHECK_CANCELLABILITY_WHILE_WAITING_EVERY_MS * USEC_PER_MS); + waited_ms += ND_CHECK_CANCELLABILITY_WHILE_WAITING_EVERY_MS; + } +} + +inline size_t pluginsd_initialize_plugin_directories() +{ + char plugins_dirs[(FILENAME_MAX * 2) + 1]; + static char *plugins_dir_list = NULL; + + // Get the configuration entry + if (likely(!plugins_dir_list)) { + snprintfz(plugins_dirs, FILENAME_MAX * 2, "\"%s\" \"%s/custom-plugins.d\"", PLUGINS_DIR, CONFIG_DIR); + plugins_dir_list = strdupz(config_get(CONFIG_SECTION_DIRECTORIES, "plugins", plugins_dirs)); + } + + // Parse it and store it to plugin directories + return quoted_strings_splitter_config(plugins_dir_list, plugin_directories, PLUGINSD_MAX_DIRECTORIES); +} + +static inline void plugin_set_disabled(struct plugind *cd) { + spinlock_lock(&cd->unsafe.spinlock); + cd->unsafe.enabled = false; + spinlock_unlock(&cd->unsafe.spinlock); +} + +bool plugin_is_enabled(struct plugind *cd) { + spinlock_lock(&cd->unsafe.spinlock); + bool ret = cd->unsafe.enabled; + spinlock_unlock(&cd->unsafe.spinlock); + return ret; +} + +static inline void plugin_set_running(struct plugind *cd) { + spinlock_lock(&cd->unsafe.spinlock); + cd->unsafe.running = true; + spinlock_unlock(&cd->unsafe.spinlock); +} + +static inline bool plugin_is_running(struct plugind *cd) { + spinlock_lock(&cd->unsafe.spinlock); + bool ret = cd->unsafe.running; + spinlock_unlock(&cd->unsafe.spinlock); + return ret; +} + +static void pluginsd_worker_thread_cleanup(void *pptr) { + struct plugind *cd = CLEANUP_FUNCTION_GET_PTR(pptr); + if(!cd) return; + + worker_unregister(); + + spinlock_lock(&cd->unsafe.spinlock); + + cd->unsafe.running = false; + cd->unsafe.thread = 0; + + cd->unsafe.pid = 0; + + POPEN_INSTANCE *pi = cd->unsafe.pi; + cd->unsafe.pi = NULL; + + spinlock_unlock(&cd->unsafe.spinlock); + + if (pi) + spawn_popen_kill(pi); +} + +#define SERIAL_FAILURES_THRESHOLD 10 +static void pluginsd_worker_thread_handle_success(struct plugind *cd) { + if (likely(cd->successful_collections)) { + pluginsd_sleep(cd->update_every); + return; + } + + if (likely(cd->serial_failures <= SERIAL_FAILURES_THRESHOLD)) { + netdata_log_info("PLUGINSD: 'host:%s', '%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.", + rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, + plugin_is_enabled(cd) ? "Waiting a bit before starting it again." : "Will not start it again - it is now disabled."); + + pluginsd_sleep(cd->update_every * 10); + return; + } + + if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) { + netdata_log_error("PLUGINSD: 'host:'%s', '%s' (pid %d) does not generate useful output, " + "although it reports success (exits with 0)." + "We have tried to collect something %zu times - unsuccessfully. Disabling it.", + rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, cd->serial_failures); + plugin_set_disabled(cd); + return; + } +} + +static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_ret_code) { + if (worker_ret_code == -1) { + netdata_log_info("PLUGINSD: 'host:%s', '%s' (pid %d) was killed with SIGTERM. Disabling it.", + rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid); + plugin_set_disabled(cd); + return; + } + + if (!cd->successful_collections) { + netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d and haven't collected any data. Disabling it.", + rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code); + plugin_set_disabled(cd); + return; + } + + if (cd->serial_failures <= SERIAL_FAILURES_THRESHOLD) { + netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s", + rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code, cd->successful_collections, + plugin_is_enabled(cd) ? "Waiting a bit before starting it again." : "Will not start it again - it is disabled."); + + pluginsd_sleep(cd->update_every * 10); + return; + } + + if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) { + netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times)." + "We tried to restart it %zu times, but it failed to generate data. Disabling it.", + rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code, + cd->successful_collections, cd->serial_failures); + plugin_set_disabled(cd); + return; + } +} + +#undef SERIAL_FAILURES_THRESHOLD + +static void *pluginsd_worker_thread(void *arg) { + struct plugind *cd = (struct plugind *) arg; + CLEANUP_FUNCTION_REGISTER(pluginsd_worker_thread_cleanup) cleanup_ptr = cd; + + worker_register("PLUGINSD"); + + plugin_set_running(cd); + + size_t count = 0; + + while(service_running(SERVICE_COLLECTORS)) { + cd->unsafe.pi = spawn_popen_run(cd->cmd); + if(!cd->unsafe.pi) { + netdata_log_error("PLUGINSD: 'host:%s', cannot popen(\"%s\", \"r\").", + rrdhost_hostname(cd->host), cd->cmd); + break; + } + cd->unsafe.pid = spawn_popen_pid(cd->unsafe.pi); + + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "PLUGINSD: 'host:%s' connected to '%s' running on pid %d", + rrdhost_hostname(cd->host), + cd->fullfilename, cd->unsafe.pid); + + const char *plugin = strrchr(cd->fullfilename, '/'); + if(plugin) + plugin++; + else + plugin = cd->fullfilename; + + char module[100]; + snprintfz(module, sizeof(module), "plugins.d[%s]", plugin); + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_MODULE, module), + ND_LOG_FIELD_TXT(NDF_NIDL_NODE, rrdhost_hostname(cd->host)), + ND_LOG_FIELD_TXT(NDF_SRC_TRANSPORT, "pluginsd"), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + count = pluginsd_process(cd->host, cd, + spawn_popen_read_fd(cd->unsafe.pi), + spawn_popen_write_fd(cd->unsafe.pi), + 0); + + nd_log(NDLS_COLLECTORS, NDLP_WARNING, + "PLUGINSD: 'host:%s', '%s' (pid %d) disconnected after %zu successful data collections.", + rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, count); + + int worker_ret_code = spawn_popen_kill(cd->unsafe.pi); + cd->unsafe.pi = NULL; + + if(likely(worker_ret_code == 0)) + pluginsd_worker_thread_handle_success(cd); + else + pluginsd_worker_thread_handle_error(cd, worker_ret_code); + + cd->unsafe.pid = 0; + + if(unlikely(!plugin_is_enabled(cd))) + break; + } + return NULL; +} + +static void pluginsd_main_cleanup(void *pptr) { + struct netdata_static_thread *static_thread = CLEANUP_FUNCTION_GET_PTR(pptr); + if(!static_thread) return; + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + netdata_log_info("PLUGINSD: cleaning up..."); + + struct plugind *cd; + for (cd = pluginsd_root; cd; cd = cd->next) { + spinlock_lock(&cd->unsafe.spinlock); + if (cd->unsafe.enabled && cd->unsafe.running && cd->unsafe.thread != 0) { + netdata_log_info("PLUGINSD: 'host:%s', stopping plugin thread: %s", + rrdhost_hostname(cd->host), cd->id); + + nd_thread_signal_cancel(cd->unsafe.thread); + } + spinlock_unlock(&cd->unsafe.spinlock); + } + + netdata_log_info("PLUGINSD: cleanup completed."); + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; + + worker_unregister(); +} + +static bool is_plugin(char *dst, size_t dst_size, const char *filename) { + size_t len = strlen(filename); + + const char *suffix; + size_t suffix_len; + + suffix = ".plugin"; + suffix_len = strlen(suffix); + if (len > suffix_len && + strcmp(suffix, &filename[len - suffix_len]) == 0) { + snprintfz(dst, dst_size, "%.*s", (int)(len - suffix_len), filename); + return true; + } + +#if defined(OS_WINDOWS) + suffix = ".plugin.exe"; + suffix_len = strlen(suffix); + if (len > suffix_len && + strcmp(suffix, &filename[len - suffix_len]) == 0) { + snprintfz(dst, dst_size, "%.*s", (int)(len - suffix_len), filename); + return true; + } +#endif + + return false; +} + +void *pluginsd_main(void *ptr) { + CLEANUP_FUNCTION_REGISTER(pluginsd_main_cleanup) cleanup_ptr = ptr; + + int automatic_run = config_get_boolean(CONFIG_SECTION_PLUGINS, "enable running new plugins", 1); + int scan_frequency = (int)config_get_number(CONFIG_SECTION_PLUGINS, "check for new plugins every", 60); + if (scan_frequency < 1) + scan_frequency = 1; + + // disable some plugins by default + config_get_boolean(CONFIG_SECTION_PLUGINS, "slabinfo", CONFIG_BOOLEAN_NO); + // it crashes (both threads) on Alpine after we made it multi-threaded + // works with "--device /dev/ipmi0", but this is not default + // see https://github.com/netdata/netdata/pull/15564 for details + if (getenv("NETDATA_LISTENER_PORT")) + config_get_boolean(CONFIG_SECTION_PLUGINS, "freeipmi", CONFIG_BOOLEAN_NO); + + // store the errno for each plugins directory + // so that we don't log broken directories on each loop + int directory_errors[PLUGINSD_MAX_DIRECTORIES] = { 0 }; + + while (service_running(SERVICE_COLLECTORS)) { + int idx; + const char *directory_name; + + for (idx = 0; idx < PLUGINSD_MAX_DIRECTORIES && (directory_name = plugin_directories[idx]); idx++) { + if (unlikely(!service_running(SERVICE_COLLECTORS))) + break; + + errno_clear(); + DIR *dir = opendir(directory_name); + if (unlikely(!dir)) { + if (directory_errors[idx] != errno) { + directory_errors[idx] = errno; + netdata_log_error("cannot open plugins directory '%s'", directory_name); + } + continue; + } + + struct dirent *file = NULL; + while (likely((file = readdir(dir)))) { + if (unlikely(!service_running(SERVICE_COLLECTORS))) + break; + + netdata_log_debug(D_PLUGINSD, "examining file '%s'", file->d_name); + + if (unlikely(strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0)) + continue; + + char pluginname[CONFIG_MAX_NAME + 1]; + if(!is_plugin(pluginname, sizeof(pluginname), file->d_name)) { + netdata_log_debug(D_PLUGINSD, "file '%s' does not look like a plugin", file->d_name); + continue; + } + + int enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, pluginname, automatic_run); + if (unlikely(!enabled)) { + netdata_log_debug(D_PLUGINSD, "plugin '%s' is not enabled", file->d_name); + continue; + } + + // check if it runs already + struct plugind *cd; + for (cd = pluginsd_root; cd; cd = cd->next) + if (unlikely(strcmp(cd->filename, file->d_name) == 0)) + break; + + if (likely(cd && plugin_is_running(cd))) { + netdata_log_debug(D_PLUGINSD, "plugin '%s' is already running", cd->filename); + continue; + } + + // it is not running + // allocate a new one, or use the obsolete one + if (unlikely(!cd)) { + cd = callocz(sizeof(struct plugind), 1); + + snprintfz(cd->id, CONFIG_MAX_NAME, "plugin:%s", pluginname); + + strncpyz(cd->filename, file->d_name, FILENAME_MAX); + snprintfz(cd->fullfilename, FILENAME_MAX, "%s/%s", directory_name, cd->filename); + + cd->host = localhost; + cd->unsafe.enabled = enabled; + cd->unsafe.running = false; + + cd->update_every = (int)config_get_duration_seconds(cd->id, "update every", localhost->rrd_update_every); + cd->started_t = now_realtime_sec(); + + char *def = ""; + snprintfz( + cd->cmd, PLUGINSD_CMD_MAX, "exec %s %d %s", cd->fullfilename, cd->update_every, + config_get(cd->id, "command options", def)); + + // link it + DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(pluginsd_root, cd, prev, next); + + if (plugin_is_enabled(cd)) { + char tag[NETDATA_THREAD_TAG_MAX + 1]; + snprintfz(tag, NETDATA_THREAD_TAG_MAX, "PD[%s]", pluginname); + + // spawn a new thread for it + cd->unsafe.thread = nd_thread_create(tag, NETDATA_THREAD_OPTION_DEFAULT, + pluginsd_worker_thread, cd); + } + } + } + + closedir(dir); + } + + pluginsd_sleep(scan_frequency); + } + + return NULL; +} diff --git a/src/plugins.d/plugins_d.h b/src/plugins.d/plugins_d.h new file mode 100644 index 000000000..4da7448bf --- /dev/null +++ b/src/plugins.d/plugins_d.h @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGINS_D_H +#define NETDATA_PLUGINS_D_H 1 + +#include "daemon/common.h" + + #define PLUGINSD_CMD_MAX (FILENAME_MAX*2) +#define PLUGINSD_STOCK_PLUGINS_DIRECTORY_PATH 0 + +#define PLUGINSD_MAX_DIRECTORIES 20 +extern char *plugin_directories[PLUGINSD_MAX_DIRECTORIES]; + +struct plugind { + char id[CONFIG_MAX_NAME+1]; // config node id + + char filename[FILENAME_MAX+1]; // just the filename + char fullfilename[FILENAME_MAX+1]; // with path + char cmd[PLUGINSD_CMD_MAX+1]; // the command that it executes + + size_t successful_collections; // the number of times we have seen + // values collected from this plugin + + size_t serial_failures; // the number of times the plugin started + // without collecting values + + RRDHOST *host; // the host the plugin collects data for + int update_every; // the plugin default data collection frequency + + struct { + SPINLOCK spinlock; + bool running; // do not touch this structure after setting this to 1 + bool enabled; // if this is enabled or not + ND_THREAD *thread; + POPEN_INSTANCE *pi; + pid_t pid; + } unsafe; + + time_t started_t; + + struct plugind *prev; + struct plugind *next; +}; + +extern struct plugind *pluginsd_root; + +size_t pluginsd_process(RRDHOST *host, struct plugind *cd, int fd_input, int fd_output, int trust_durations); + +struct parser; +void pluginsd_process_cleanup(struct parser *parser); +void pluginsd_process_thread_cleanup(void *pptr); + +size_t pluginsd_initialize_plugin_directories(); + +#endif /* NETDATA_PLUGINS_D_H */ diff --git a/src/plugins.d/pluginsd_dyncfg.c b/src/plugins.d/pluginsd_dyncfg.c new file mode 100644 index 000000000..c4dd42a73 --- /dev/null +++ b/src/plugins.d/pluginsd_dyncfg.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "pluginsd_dyncfg.h" + + +// ---------------------------------------------------------------------------- + +PARSER_RC pluginsd_config(char **words, size_t num_words, PARSER *parser) { + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CONFIG); + if(!host) return PARSER_RC_ERROR; + + size_t i = 1; + char *id = get_word(words, num_words, i++); + char *action = get_word(words, num_words, i++); + + if(strcmp(action, PLUGINSD_KEYWORD_CONFIG_ACTION_CREATE) == 0) { + char *status_str = get_word(words, num_words, i++); + char *type_str = get_word(words, num_words, i++); + char *path = get_word(words, num_words, i++); + char *source_type_str = get_word(words, num_words, i++); + char *source = get_word(words, num_words, i++); + char *supported_cmds_str = get_word(words, num_words, i++); + char *view_permissions_str = get_word(words, num_words, i++); + char *edit_permissions_str = get_word(words, num_words, i++); + + DYNCFG_STATUS status = dyncfg_status2id(status_str); + DYNCFG_TYPE type = dyncfg_type2id(type_str); + DYNCFG_SOURCE_TYPE source_type = dyncfg_source_type2id(source_type_str); + DYNCFG_CMDS cmds = dyncfg_cmds2id(supported_cmds_str); + HTTP_ACCESS view_access = http_access_from_hex(view_permissions_str); + HTTP_ACCESS edit_access = http_access_from_hex(edit_permissions_str); + + if(!dyncfg_add_low_level( + host, + id, + path, + status, + type, + source_type, + source, + cmds, + 0, + 0, + false, + view_access, + edit_access, + pluginsd_function_execute_cb, + parser)) + return PARSER_RC_ERROR; + } + else if(strcmp(action, PLUGINSD_KEYWORD_CONFIG_ACTION_DELETE) == 0) { + dyncfg_del_low_level(host, id); + } + else if(strcmp(action, PLUGINSD_KEYWORD_CONFIG_ACTION_STATUS) == 0) { + char *status_str = get_word(words, num_words, i++); + dyncfg_status_low_level(host, id, dyncfg_status2id(status_str)); + } + else + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "DYNCFG: unknown action '%s' received from plugin", action); + + parser->user.data_collections_count++; + return PARSER_RC_OK; +} + +// ---------------------------------------------------------------------------- + +PARSER_RC pluginsd_dyncfg_noop(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) { + return PARSER_RC_OK; +} diff --git a/src/plugins.d/pluginsd_dyncfg.h b/src/plugins.d/pluginsd_dyncfg.h new file mode 100644 index 000000000..fd35a3c36 --- /dev/null +++ b/src/plugins.d/pluginsd_dyncfg.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGINSD_DYNCFG_H +#define NETDATA_PLUGINSD_DYNCFG_H + +#include "pluginsd_internals.h" + +PARSER_RC pluginsd_config(char **words, size_t num_words, PARSER *parser); +PARSER_RC pluginsd_dyncfg_noop(char **words, size_t num_words, PARSER *parser); + +#endif //NETDATA_PLUGINSD_DYNCFG_H diff --git a/src/plugins.d/pluginsd_functions.c b/src/plugins.d/pluginsd_functions.c new file mode 100644 index 000000000..26477a7db --- /dev/null +++ b/src/plugins.d/pluginsd_functions.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "pluginsd_functions.h" + +#define LOG_FUNCTIONS false + +// ---------------------------------------------------------------------------- +// execution of functions + +static void inflight_functions_insert_callback(const DICTIONARY_ITEM *item, void *func, void *parser_ptr) { + struct inflight_function *pf = func; + + PARSER *parser = parser_ptr; + + // leave this code as default, so that when the dictionary is destroyed this will be sent back to the caller + pf->code = HTTP_RESP_SERVICE_UNAVAILABLE; + + const char *transaction = dictionary_acquired_item_name(item); + + int rc = uuid_parse_flexi(transaction, pf->transaction); + if(rc != 0) + netdata_log_error("FUNCTION: '%s': cannot parse transaction UUID", string2str(pf->function)); + + CLEAN_BUFFER *buffer = buffer_create(1024, NULL); + if(pf->payload && buffer_strlen(pf->payload)) { + buffer_sprintf( + buffer, + PLUGINSD_CALL_FUNCTION_PAYLOAD_BEGIN " %s %d \"%s\" \""HTTP_ACCESS_FORMAT"\" \"%s\" \"%s\"\n", + transaction, + pf->timeout_s, + string2str(pf->function), + (HTTP_ACCESS_FORMAT_CAST)pf->access, + pf->source ? pf->source : "", + content_type_id2string(pf->payload->content_type) + ); + + buffer_fast_strcat(buffer, buffer_tostring(pf->payload), buffer_strlen(pf->payload)); + buffer_strcat(buffer, "\nFUNCTION_PAYLOAD_END\n"); + } + else { + buffer_sprintf( + buffer, + PLUGINSD_CALL_FUNCTION " %s %d \"%s\" \""HTTP_ACCESS_FORMAT"\" \"%s\"\n", + transaction, + pf->timeout_s, + string2str(pf->function), + (HTTP_ACCESS_FORMAT_CAST)pf->access, + pf->source ? pf->source : "" + ); + } + + // send the command to the plugin + // IMPORTANT: make sure all commands are sent in 1 call, because in streaming they may interfere with others + ssize_t ret = send_to_plugin(buffer_tostring(buffer), parser); + pf->sent_monotonic_ut = now_monotonic_usec(); + + if(ret < 0) { + pf->sent_successfully = false; + + pf->code = HTTP_RESP_SERVICE_UNAVAILABLE; + netdata_log_error("FUNCTION '%s': failed to send it to the plugin, error %zd", string2str(pf->function), ret); + rrd_call_function_error(pf->result_body_wb, "Failed to send this request to the plugin that offered it.", pf->code); + } + else { + pf->sent_successfully = true; + + internal_error(LOG_FUNCTIONS, + "FUNCTION '%s' with transaction '%s' sent to collector (%zd bytes, in %"PRIu64" usec)", + string2str(pf->function), dictionary_acquired_item_name(item), ret, + pf->sent_monotonic_ut - pf->started_monotonic_ut); + } +} + +static bool inflight_functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func __maybe_unused, void *new_func, void *parser_ptr __maybe_unused) { + struct inflight_function *pf = new_func; + + netdata_log_error("PLUGINSD_PARSER: duplicate UUID on pending function '%s' detected. Ignoring the second one.", string2str(pf->function)); + pf->code = rrd_call_function_error(pf->result_body_wb, "This transaction is already in progress.", HTTP_RESP_BAD_REQUEST); + pf->result.cb(pf->result_body_wb, pf->code, pf->result.data); + string_freez(pf->function); + + return false; +} + +static void inflight_functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func, void *parser_ptr) { + struct inflight_function *pf = func; + struct parser *parser = (struct parser *)parser_ptr; (void)parser; + + internal_error(LOG_FUNCTIONS, + "FUNCTION '%s' result of transaction '%s' received from collector " + "(%zu bytes, request %"PRIu64" usec, response %"PRIu64" usec)", + string2str(pf->function), dictionary_acquired_item_name(item), + buffer_strlen(pf->result_body_wb), + pf->sent_monotonic_ut - pf->started_monotonic_ut, now_realtime_usec() - pf->sent_monotonic_ut); + + if(pf->code == HTTP_RESP_SERVICE_UNAVAILABLE && !buffer_strlen(pf->result_body_wb)) + rrd_call_function_error(pf->result_body_wb, "The plugin that was servicing this request, exited before responding.", pf->code); + + pf->result.cb(pf->result_body_wb, pf->code, pf->result.data); + + string_freez(pf->function); + buffer_free((void *)pf->payload); + freez((void *)pf->source); +} + +void pluginsd_inflight_functions_init(PARSER *parser) { + parser->inflight.functions = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE, &dictionary_stats_category_functions, 0); + dictionary_register_insert_callback(parser->inflight.functions, inflight_functions_insert_callback, parser); + dictionary_register_delete_callback(parser->inflight.functions, inflight_functions_delete_callback, parser); + dictionary_register_conflict_callback(parser->inflight.functions, inflight_functions_conflict_callback, parser); +} + +void pluginsd_inflight_functions_cleanup(PARSER *parser) { + dictionary_destroy(parser->inflight.functions); +} + +// ---------------------------------------------------------------------------- + +void pluginsd_inflight_functions_garbage_collect(PARSER *parser, usec_t now_ut) { + parser->inflight.smaller_monotonic_timeout_ut = 0; + struct inflight_function *pf; + dfe_start_write(parser->inflight.functions, pf) { + if (*pf->stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < now_ut) { + internal_error(true, + "FUNCTION '%s' removing expired transaction '%s', after %"PRIu64" usec.", + string2str(pf->function), pf_dfe.name, now_ut - pf->started_monotonic_ut); + + if(!buffer_strlen(pf->result_body_wb) || pf->code == HTTP_RESP_OK) + pf->code = rrd_call_function_error(pf->result_body_wb, + "Timeout waiting for a response.", + HTTP_RESP_GATEWAY_TIMEOUT); + + dictionary_del(parser->inflight.functions, pf_dfe.name); + } + + else if(!parser->inflight.smaller_monotonic_timeout_ut || *pf->stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < parser->inflight.smaller_monotonic_timeout_ut) + parser->inflight.smaller_monotonic_timeout_ut = *pf->stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT; + } + dfe_done(pf); +} + +// ---------------------------------------------------------------------------- + +static void pluginsd_function_cancel(void *data) { + struct inflight_function *look_for = data, *t; + + bool sent = false; + dfe_start_read(look_for->parser->inflight.functions, t) { + if(look_for == t) { + const char *transaction = t_dfe.name; + + internal_error(true, "PLUGINSD: sending function cancellation to plugin for transaction '%s'", transaction); + + char buffer[2048]; + snprintfz(buffer, sizeof(buffer), PLUGINSD_CALL_FUNCTION_CANCEL " %s\n", transaction); + + // send the command to the plugin + ssize_t ret = send_to_plugin(buffer, t->parser); + if(ret < 0) + sent = true; + + break; + } + } + dfe_done(t); + + if(sent <= 0) + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "PLUGINSD: FUNCTION_CANCEL request didn't match any pending function requests in pluginsd.d."); +} + +static void pluginsd_function_progress_to_plugin(void *data) { + struct inflight_function *look_for = data, *t; + + bool sent = false; + dfe_start_read(look_for->parser->inflight.functions, t) { + if(look_for == t) { + const char *transaction = t_dfe.name; + + internal_error(true, "PLUGINSD: sending function progress to plugin for transaction '%s'", transaction); + + char buffer[2048]; + snprintfz(buffer, sizeof(buffer), PLUGINSD_CALL_FUNCTION_PROGRESS " %s\n", transaction); + + // send the command to the plugin + ssize_t ret = send_to_plugin(buffer, t->parser); + if(ret < 0) + sent = true; + + break; + } + } + dfe_done(t); + + if(sent <= 0) + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "PLUGINSD: FUNCTION_PROGRESS request didn't match any pending function requests in pluginsd.d."); +} + +// this is the function called from +// rrd_call_function_and_wait() and rrd_call_function_async() +int pluginsd_function_execute_cb(struct rrd_function_execute *rfe, void *data) { + + // IMPORTANT: this function MUST call the result_cb even on failures + + PARSER *parser = data; + + usec_t now_ut = now_monotonic_usec(); + + int timeout_s = (int)((*rfe->stop_monotonic_ut - now_ut + USEC_PER_SEC / 2) / USEC_PER_SEC); + + struct inflight_function tmp = { + .started_monotonic_ut = now_ut, + .stop_monotonic_ut = rfe->stop_monotonic_ut, + .result_body_wb = rfe->result.wb, + .timeout_s = timeout_s, + .function = string_strdupz(rfe->function), + .payload = buffer_dup(rfe->payload), + .access = rfe->user_access, + .source = rfe->source ? strdupz(rfe->source) : NULL, + .parser = parser, + + .result = { + .cb = rfe->result.cb, + .data = rfe->result.data, + }, + .progress = { + .cb = rfe->progress.cb, + .data = rfe->progress.data, + }, + }; + uuid_copy(tmp.transaction, *rfe->transaction); + + char transaction_str[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(tmp.transaction, transaction_str); + + dictionary_write_lock(parser->inflight.functions); + + // if there is any error, our dictionary callbacks will call the caller callback to notify + // the caller about the error - no need for error handling here. + struct inflight_function *t = dictionary_set(parser->inflight.functions, transaction_str, &tmp, sizeof(struct inflight_function)); + if(!t->sent_successfully) { + int code = t->code; + dictionary_write_unlock(parser->inflight.functions); + dictionary_del(parser->inflight.functions, transaction_str); + pluginsd_inflight_functions_garbage_collect(parser, now_ut); + return code; + } + else { + if (rfe->register_canceller.cb) + rfe->register_canceller.cb(rfe->register_canceller.data, pluginsd_function_cancel, t); + + if (rfe->register_progresser.cb && + (parser->repertoire == PARSER_INIT_PLUGINSD || (parser->repertoire == PARSER_INIT_STREAMING && + stream_has_capability(&parser->user, STREAM_CAP_PROGRESS)))) + rfe->register_progresser.cb(rfe->register_progresser.data, pluginsd_function_progress_to_plugin, t); + + if (!parser->inflight.smaller_monotonic_timeout_ut || + *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < parser->inflight.smaller_monotonic_timeout_ut) + parser->inflight.smaller_monotonic_timeout_ut = *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT; + + // garbage collect stale inflight functions + if (parser->inflight.smaller_monotonic_timeout_ut < now_ut) + pluginsd_inflight_functions_garbage_collect(parser, now_ut); + + dictionary_write_unlock(parser->inflight.functions); + + return HTTP_RESP_OK; + } +} + +PARSER_RC pluginsd_function(char **words, size_t num_words, PARSER *parser) { + // a plugin or a child is registering a function + + bool global = false; + size_t i = 1; + if(num_words >= 2 && strcmp(get_word(words, num_words, 1), "GLOBAL") == 0) { + i++; + global = true; + } + + char *name = get_word(words, num_words, i++); + char *timeout_str = get_word(words, num_words, i++); + char *help = get_word(words, num_words, i++); + char *tags = get_word(words, num_words, i++); + char *access_str = get_word(words, num_words, i++); + char *priority_str = get_word(words, num_words, i++); + char *version_str = get_word(words, num_words, i++); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_FUNCTION); + if(!host) return PARSER_RC_ERROR; + + RRDSET *st = (global)? NULL: pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_FUNCTION, PLUGINSD_KEYWORD_CHART); + if(!st) global = true; + + if (unlikely(!timeout_str || !name || !help || (!global && !st))) { + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a FUNCTION, without providing the required data (global = '%s', name = '%s', timeout = '%s', priority = '%s', version = '%s', help = '%s'). Ignoring it.", + rrdhost_hostname(host), + st?rrdset_id(st):"(unset)", + global?"yes":"no", + name?name:"(unset)", + timeout_str ? timeout_str : "(unset)", + priority_str ? priority_str : "(unset)", + version_str ? version_str : "(unset)", + help?help:"(unset)" + ); + return PARSER_RC_ERROR; + } + + int timeout_s = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT; + if (timeout_str && *timeout_str) { + timeout_s = str2i(timeout_str); + if (unlikely(timeout_s <= 0)) + timeout_s = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT; + } + + int priority = RRDFUNCTIONS_PRIORITY_DEFAULT; + if(priority_str && *priority_str) { + priority = str2i(priority_str); + if(priority <= 0) + priority = RRDFUNCTIONS_PRIORITY_DEFAULT; + } + + uint32_t version = RRDFUNCTIONS_VERSION_DEFAULT; + if(version_str && *version_str) + version = str2u(version_str); + + rrd_function_add(host, st, name, timeout_s, priority, version, help, tags, + http_access_from_hex_mapping_old_roles(access_str), false, + pluginsd_function_execute_cb, parser); + + parser->user.data_collections_count++; + + return PARSER_RC_OK; +} + +static void pluginsd_function_result_end(struct parser *parser, void *action_data) { + STRING *key = action_data; + if(key) + dictionary_del(parser->inflight.functions, string2str(key)); + string_freez(key); + + parser->user.data_collections_count++; +} + +static inline struct inflight_function *inflight_function_find(PARSER *parser, const char *transaction) { + struct inflight_function *pf = NULL; + + if(transaction && *transaction) + pf = (struct inflight_function *)dictionary_get(parser->inflight.functions, transaction); + + if(!pf) + netdata_log_error("got a " PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " for transaction '%s', but the transaction is not found.", transaction ? transaction : "(unset)"); + + return pf; +} + +PARSER_RC pluginsd_function_result_begin(char **words, size_t num_words, PARSER *parser) { + char *transaction = get_word(words, num_words, 1); + char *status = get_word(words, num_words, 2); + char *format = get_word(words, num_words, 3); + char *expires = get_word(words, num_words, 4); + + if (unlikely(!transaction || !*transaction || !status || !*status || !format || !*format || !expires || !*expires)) { + netdata_log_error("got a " PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " without providing the required data (key = '%s', status = '%s', format = '%s', expires = '%s')." + , transaction ? transaction : "(unset)" + , status ? status : "(unset)" + , format ? format : "(unset)" + , expires ? expires : "(unset)" + ); + } + + int code = (status && *status) ? str2i(status) : 0; + if (code <= 0) + code = HTTP_RESP_BACKEND_RESPONSE_INVALID; + + time_t expiration = (expires && *expires) ? str2l(expires) : 0; + + struct inflight_function *pf = inflight_function_find(parser, transaction); + if(pf) { + if(format && *format) + pf->result_body_wb->content_type = content_type_string2id(format); + + pf->code = code; + + pf->result_body_wb->expires = expiration; + if(expiration <= now_realtime_sec()) + buffer_no_cacheable(pf->result_body_wb); + else + buffer_cacheable(pf->result_body_wb); + } + + parser->defer.response = (pf) ? pf->result_body_wb : NULL; + parser->defer.end_keyword = PLUGINSD_KEYWORD_FUNCTION_RESULT_END; + parser->defer.action = pluginsd_function_result_end; + parser->defer.action_data = string_strdupz(transaction); // it is ok is key is NULL + parser->flags |= PARSER_DEFER_UNTIL_KEYWORD; + + return PARSER_RC_OK; +} + +PARSER_RC pluginsd_function_progress(char **words, size_t num_words, PARSER *parser) { + size_t i = 1; + + char *transaction = get_word(words, num_words, i++); + char *done_str = get_word(words, num_words, i++); + char *all_str = get_word(words, num_words, i++); + + struct inflight_function *pf = inflight_function_find(parser, transaction); + if(pf) { + size_t done = done_str && *done_str ? str2u(done_str) : 0; + size_t all = all_str && *all_str ? str2u(all_str) : 0; + + if(pf->progress.cb) + pf->progress.cb(pf->progress.data, done, all); + } + + return PARSER_RC_OK; +} diff --git a/src/plugins.d/pluginsd_functions.h b/src/plugins.d/pluginsd_functions.h new file mode 100644 index 000000000..ad47dc23a --- /dev/null +++ b/src/plugins.d/pluginsd_functions.h @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGINSD_FUNCTIONS_H +#define NETDATA_PLUGINSD_FUNCTIONS_H + +#include "pluginsd_internals.h" + +struct inflight_function { + nd_uuid_t transaction; + + int code; + int timeout_s; + STRING *function; + BUFFER *payload; + HTTP_ACCESS access; + const char *source; + + BUFFER *result_body_wb; + + usec_t *stop_monotonic_ut; // pointer to caller data + usec_t started_monotonic_ut; + usec_t sent_monotonic_ut; + PARSER *parser; + + bool sent_successfully; + + struct { + rrd_function_result_callback_t cb; + void *data; + } result; + + struct { + rrd_function_progress_cb_t cb; + void *data; + } progress; +}; + +PARSER_RC pluginsd_function(char **words, size_t num_words, PARSER *parser); +PARSER_RC pluginsd_function_result_begin(char **words, size_t num_words, PARSER *parser); +PARSER_RC pluginsd_function_progress(char **words, size_t num_words, PARSER *parser); + +void pluginsd_inflight_functions_init(PARSER *parser); +void pluginsd_inflight_functions_cleanup(PARSER *parser); +void pluginsd_inflight_functions_garbage_collect(PARSER *parser, usec_t now_ut); + +int pluginsd_function_execute_cb(struct rrd_function_execute *rfe, void *data); + +#endif //NETDATA_PLUGINSD_FUNCTIONS_H diff --git a/src/plugins.d/pluginsd_internals.c b/src/plugins.d/pluginsd_internals.c new file mode 100644 index 000000000..c57362506 --- /dev/null +++ b/src/plugins.d/pluginsd_internals.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "pluginsd_internals.h" + +ssize_t send_to_plugin(const char *txt, PARSER *parser) { + if(!txt || !*txt || !parser) + return 0; + +#ifdef ENABLE_H2O + if(parser->h2o_ctx) + return h2o_stream_write(parser->h2o_ctx, txt, strlen(txt)); +#endif + + errno_clear(); + spinlock_lock(&parser->writer.spinlock); + ssize_t bytes = -1; + + NETDATA_SSL *ssl = parser->ssl_output; + if(ssl) { + + if(SSL_connection(ssl)) + bytes = netdata_ssl_write(ssl, (void *) txt, strlen(txt)); + + else + netdata_log_error("PLUGINSD: cannot send command (SSL)"); + + spinlock_unlock(&parser->writer.spinlock); + return bytes; + } + + if(parser->fd_output != -1) { + bytes = 0; + ssize_t total = (ssize_t)strlen(txt); + ssize_t sent; + + do { + sent = write(parser->fd_output, &txt[bytes], total - bytes); + if(sent <= 0) { + netdata_log_error("PLUGINSD: cannot send command (fd)"); + spinlock_unlock(&parser->writer.spinlock); + return -3; + } + bytes += sent; + } + while(bytes < total); + + spinlock_unlock(&parser->writer.spinlock); + return (int)bytes; + } + + spinlock_unlock(&parser->writer.spinlock); + netdata_log_error("PLUGINSD: cannot send command (no output socket/pipe/file given to plugins.d parser)"); + return -4; +} + +PARSER_RC PLUGINSD_DISABLE_PLUGIN(PARSER *parser, const char *keyword, const char *msg) { + parser->user.enabled = 0; + + if(keyword && msg) { + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_INFO, + "PLUGINSD: keyword %s: %s", keyword, msg); + } + + return PARSER_RC_ERROR; +} + +void pluginsd_keywords_init(PARSER *parser, PARSER_REPERTOIRE repertoire) { + parser_init_repertoire(parser, repertoire); + + if (repertoire & (PARSER_INIT_PLUGINSD | PARSER_INIT_STREAMING)) + pluginsd_inflight_functions_init(parser); +} + +void parser_destroy(PARSER *parser) { + if (unlikely(!parser)) + return; + + pluginsd_inflight_functions_cleanup(parser); + + freez(parser); +} + + +PARSER *parser_init(struct parser_user_object *user, int fd_input, int fd_output, + PARSER_INPUT_TYPE flags, void *ssl __maybe_unused) { + PARSER *parser; + + parser = callocz(1, sizeof(*parser)); + if(user) + parser->user = *user; + parser->fd_input = fd_input; + parser->fd_output = fd_output; + parser->ssl_output = ssl; + parser->flags = flags; + + spinlock_init(&parser->writer.spinlock); + return parser; +} diff --git a/src/plugins.d/pluginsd_internals.h b/src/plugins.d/pluginsd_internals.h new file mode 100644 index 000000000..ed0714dd2 --- /dev/null +++ b/src/plugins.d/pluginsd_internals.h @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGINSD_INTERNALS_H +#define NETDATA_PLUGINSD_INTERNALS_H + +#include "pluginsd_parser.h" +#include "pluginsd_functions.h" +#include "pluginsd_dyncfg.h" +#include "pluginsd_replication.h" + +#define SERVING_STREAMING(parser) ((parser)->repertoire == PARSER_INIT_STREAMING) +#define SERVING_PLUGINSD(parser) ((parser)->repertoire == PARSER_INIT_PLUGINSD) + +PARSER_RC PLUGINSD_DISABLE_PLUGIN(PARSER *parser, const char *keyword, const char *msg); + +ssize_t send_to_plugin(const char *txt, PARSER *parser); + +static inline RRDHOST *pluginsd_require_scope_host(PARSER *parser, const char *cmd) { + RRDHOST *host = parser->user.host; + + if(unlikely(!host)) + netdata_log_error("PLUGINSD: command %s requires a host, but is not set.", cmd); + + return host; +} + +static inline RRDSET *pluginsd_require_scope_chart(PARSER *parser, const char *cmd, const char *parent_cmd) { + RRDSET *st = parser->user.st; + + if(unlikely(!st)) + netdata_log_error("PLUGINSD: command %s requires a chart defined via command %s, but is not set.", cmd, parent_cmd); + + return st; +} + +static inline RRDSET *pluginsd_get_scope_chart(PARSER *parser) { + return parser->user.st; +} + +static inline void pluginsd_lock_rrdset_data_collection(PARSER *parser) { + if(parser->user.st && !parser->user.v2.locked_data_collection) { + spinlock_lock(&parser->user.st->data_collection_lock); + parser->user.v2.locked_data_collection = true; + } +} + +static inline bool pluginsd_unlock_rrdset_data_collection(PARSER *parser) { + if(parser->user.st && parser->user.v2.locked_data_collection) { + spinlock_unlock(&parser->user.st->data_collection_lock); + parser->user.v2.locked_data_collection = false; + return true; + } + + return false; +} + +static inline void pluginsd_unlock_previous_scope_chart(PARSER *parser, const char *keyword, bool stale) { + if(unlikely(pluginsd_unlock_rrdset_data_collection(parser))) { + if(stale) + netdata_log_error("PLUGINSD: 'host:%s/chart:%s/' stale data collection lock found during %s; it has been unlocked", + rrdhost_hostname(parser->user.st->rrdhost), + rrdset_id(parser->user.st), + keyword); + } + + if(unlikely(parser->user.v2.ml_locked)) { + ml_chart_update_end(parser->user.st); + parser->user.v2.ml_locked = false; + + if(stale) + netdata_log_error("PLUGINSD: 'host:%s/chart:%s/' stale ML lock found during %s, it has been unlocked", + rrdhost_hostname(parser->user.st->rrdhost), + rrdset_id(parser->user.st), + keyword); + } +} + +static inline void pluginsd_clear_scope_chart(PARSER *parser, const char *keyword) { + pluginsd_unlock_previous_scope_chart(parser, keyword, true); + + if(parser->user.cleanup_slots && parser->user.st) + rrdset_pluginsd_receive_unslot(parser->user.st); + + parser->user.st = NULL; + parser->user.cleanup_slots = false; +} + +static inline bool pluginsd_set_scope_chart(PARSER *parser, RRDSET *st, const char *keyword) { + RRDSET *old_st = parser->user.st; + pid_t old_collector_tid = (old_st) ? old_st->pluginsd.collector_tid : 0; + pid_t my_collector_tid = gettid_cached(); + + if(unlikely(old_collector_tid)) { + if(old_collector_tid != my_collector_tid) { + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_WARNING, + "PLUGINSD: keyword %s: 'host:%s/chart:%s' is collected twice (my tid %d, other collector tid %d)", + keyword ? keyword : "UNKNOWN", + rrdhost_hostname(st->rrdhost), rrdset_id(st), + my_collector_tid, old_collector_tid); + + return false; + } + + old_st->pluginsd.collector_tid = 0; + } + + st->pluginsd.collector_tid = my_collector_tid; + + pluginsd_clear_scope_chart(parser, keyword); + + st->pluginsd.pos = 0; + parser->user.st = st; + parser->user.cleanup_slots = false; + + return true; +} + +static inline void pluginsd_rrddim_put_to_slot(PARSER *parser, RRDSET *st, RRDDIM *rd, ssize_t slot, bool obsolete) { + size_t wanted_size = st->pluginsd.size; + + if(slot >= 1) { + st->pluginsd.dims_with_slots = true; + wanted_size = slot; + } + else { + st->pluginsd.dims_with_slots = false; + wanted_size = dictionary_entries(st->rrddim_root_index); + } + + if(wanted_size > st->pluginsd.size) { + st->pluginsd.prd_array = reallocz(st->pluginsd.prd_array, wanted_size * sizeof(struct pluginsd_rrddim)); + + // initialize the empty slots + for(ssize_t i = (ssize_t) wanted_size - 1; i >= (ssize_t) st->pluginsd.size; i--) { + st->pluginsd.prd_array[i].rda = NULL; + st->pluginsd.prd_array[i].rd = NULL; + st->pluginsd.prd_array[i].id = NULL; + } + + st->pluginsd.size = wanted_size; + } + + if(st->pluginsd.dims_with_slots) { + struct pluginsd_rrddim *prd = &st->pluginsd.prd_array[slot - 1]; + + if(prd->rd != rd) { + prd->rda = rrddim_find_and_acquire(st, string2str(rd->id)); + prd->rd = rrddim_acquired_to_rrddim(prd->rda); + prd->id = string2str(prd->rd->id); + } + + if(obsolete) + parser->user.cleanup_slots = true; + } +} + +static inline RRDDIM *pluginsd_acquire_dimension(RRDHOST *host, RRDSET *st, const char *dimension, ssize_t slot, const char *cmd) { + if (unlikely(!dimension || !*dimension)) { + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s, without a dimension.", + rrdhost_hostname(host), rrdset_id(st), cmd); + return NULL; + } + + if (unlikely(!st->pluginsd.size)) { + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s, but the chart has no dimensions.", + rrdhost_hostname(host), rrdset_id(st), cmd); + return NULL; + } + + struct pluginsd_rrddim *prd; + RRDDIM *rd; + + if(likely(st->pluginsd.dims_with_slots)) { + // caching with slots + + if(unlikely(slot < 1 || slot > st->pluginsd.size)) { + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s with slot %zd, but slots in the range [1 - %u] are expected.", + rrdhost_hostname(host), rrdset_id(st), cmd, slot, st->pluginsd.size); + return NULL; + } + + prd = &st->pluginsd.prd_array[slot - 1]; + + rd = prd->rd; + if(likely(rd)) { +#ifdef NETDATA_INTERNAL_CHECKS + if(strcmp(prd->id, dimension) != 0) { + ssize_t t; + for(t = 0; t < st->pluginsd.size ;t++) { + if (strcmp(st->pluginsd.prd_array[t].id, dimension) == 0) + break; + } + if(t >= st->pluginsd.size) + t = -1; + + internal_fatal(true, + "PLUGINSD: expected to find dimension '%s' on slot %zd, but found '%s', " + "the right slot is %zd", + dimension, slot, prd->id, t); + } +#endif + return rd; + } + } + else { + // caching without slots + + if(unlikely(st->pluginsd.pos >= st->pluginsd.size)) + st->pluginsd.pos = 0; + + prd = &st->pluginsd.prd_array[st->pluginsd.pos++]; + + rd = prd->rd; + if(likely(rd)) { + const char *id = prd->id; + + if(strcmp(id, dimension) == 0) { + // we found it cached + return rd; + } + else { + // the cached one is not good for us + rrddim_acquired_release(prd->rda); + prd->rda = NULL; + prd->rd = NULL; + prd->id = NULL; + } + } + } + + // we need to find the dimension and set it to prd + + RRDDIM_ACQUIRED *rda = rrddim_find_and_acquire(st, dimension); + if (unlikely(!rda)) { + netdata_log_error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s but dimension does not exist.", + rrdhost_hostname(host), rrdset_id(st), dimension, cmd); + + return NULL; + } + + prd->rda = rda; + prd->rd = rd = rrddim_acquired_to_rrddim(rda); + prd->id = string2str(rd->id); + + return rd; +} + +static inline RRDSET *pluginsd_find_chart(RRDHOST *host, const char *chart, const char *cmd) { + if (unlikely(!chart || !*chart)) { + netdata_log_error("PLUGINSD: 'host:%s' got a %s without a chart id.", + rrdhost_hostname(host), cmd); + return NULL; + } + + RRDSET *st = rrdset_find(host, chart); + if (unlikely(!st)) + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s but chart does not exist.", + rrdhost_hostname(host), chart, cmd); + + return st; +} + +static inline ssize_t pluginsd_parse_rrd_slot(char **words, size_t num_words) { + ssize_t slot = -1; + char *id = get_word(words, num_words, 1); + if(id && id[0] == PLUGINSD_KEYWORD_SLOT[0] && id[1] == PLUGINSD_KEYWORD_SLOT[1] && + id[2] == PLUGINSD_KEYWORD_SLOT[2] && id[3] == PLUGINSD_KEYWORD_SLOT[3] && id[4] == ':') { + slot = (ssize_t) str2ull_encoded(&id[5]); + if(slot < 0) slot = 0; // to make the caller increment its idx of the words + } + + return slot; +} + +static inline void pluginsd_rrdset_cache_put_to_slot(PARSER *parser, RRDSET *st, ssize_t slot, bool obsolete) { + // clean possible old cached data + rrdset_pluginsd_receive_unslot(st); + + if(unlikely(slot < 1 || slot >= INT32_MAX)) + return; + + RRDHOST *host = st->rrdhost; + + if(unlikely((size_t)slot > host->rrdpush.receive.pluginsd_chart_slots.size)) { + spinlock_lock(&host->rrdpush.receive.pluginsd_chart_slots.spinlock); + size_t old_slots = host->rrdpush.receive.pluginsd_chart_slots.size; + size_t new_slots = (old_slots < PLUGINSD_MIN_RRDSET_POINTERS_CACHE) ? PLUGINSD_MIN_RRDSET_POINTERS_CACHE : old_slots * 2; + + if(new_slots < (size_t)slot) + new_slots = slot; + + host->rrdpush.receive.pluginsd_chart_slots.array = + reallocz(host->rrdpush.receive.pluginsd_chart_slots.array, new_slots * sizeof(RRDSET *)); + + for(size_t i = old_slots; i < new_slots ;i++) + host->rrdpush.receive.pluginsd_chart_slots.array[i] = NULL; + + host->rrdpush.receive.pluginsd_chart_slots.size = new_slots; + spinlock_unlock(&host->rrdpush.receive.pluginsd_chart_slots.spinlock); + } + + host->rrdpush.receive.pluginsd_chart_slots.array[slot - 1] = st; + st->pluginsd.last_slot = (int32_t)slot - 1; + parser->user.cleanup_slots = obsolete; +} + +static inline RRDSET *pluginsd_rrdset_cache_get_from_slot(PARSER *parser, RRDHOST *host, const char *id, ssize_t slot, const char *keyword) { + if(unlikely(slot < 1 || (size_t)slot > host->rrdpush.receive.pluginsd_chart_slots.size)) + return pluginsd_find_chart(host, id, keyword); + + RRDSET *st = host->rrdpush.receive.pluginsd_chart_slots.array[slot - 1]; + + if(!st) { + st = pluginsd_find_chart(host, id, keyword); + if(st) + pluginsd_rrdset_cache_put_to_slot(parser, st, slot, rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)); + } + else { + internal_fatal(string_strcmp(st->id, id) != 0, + "PLUGINSD: wrong chart in slot %zd, expected '%s', found '%s'", + slot - 1, id, string2str(st->id)); + } + + return st; +} + +static inline SN_FLAGS pluginsd_parse_storage_number_flags(const char *flags_str) { + SN_FLAGS flags = SN_FLAG_NONE; + + char c; + while ((c = *flags_str++)) { + switch (c) { + case 'A': + flags |= SN_FLAG_NOT_ANOMALOUS; + break; + + case 'R': + flags |= SN_FLAG_RESET; + break; + + case 'E': + flags = SN_EMPTY_SLOT; + return flags; + + default: + internal_error(true, "Unknown SN_FLAGS flag '%c'", c); + break; + } + } + + return flags; +} + +#endif //NETDATA_PLUGINSD_INTERNALS_H diff --git a/src/plugins.d/pluginsd_parser.c b/src/plugins.d/pluginsd_parser.c new file mode 100644 index 000000000..62f56d309 --- /dev/null +++ b/src/plugins.d/pluginsd_parser.c @@ -0,0 +1,1372 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "pluginsd_internals.h" + +static inline PARSER_RC pluginsd_set(char **words, size_t num_words, PARSER *parser) { + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *dimension = get_word(words, num_words, idx++); + char *value = get_word(words, num_words, idx++); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_SET); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_SET, PLUGINSD_KEYWORD_CHART); + if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_SET); + if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + st->pluginsd.set = true; + + if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) + netdata_log_debug(D_PLUGINSD, "PLUGINSD: 'host:%s/chart:%s/dim:%s' SET is setting value to '%s'", + rrdhost_hostname(host), rrdset_id(st), dimension, value && *value ? value : "UNSET"); + + if (value && *value) + rrddim_set_by_pointer(st, rd, str2ll_encoded(value)); + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_begin(char **words, size_t num_words, PARSER *parser) { + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *id = get_word(words, num_words, idx++); + char *microseconds_txt = get_word(words, num_words, idx++); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_BEGIN); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_BEGIN); + if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_BEGIN)) + return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + usec_t microseconds = 0; + if (microseconds_txt && *microseconds_txt) { + long long t = str2ll(microseconds_txt, NULL); + if(t >= 0) + microseconds = t; + } + +#ifdef NETDATA_LOG_REPLICATION_REQUESTS + if(st->replay.log_next_data_collection) { + st->replay.log_next_data_collection = false; + + internal_error(true, + "REPLAY: 'host:%s/chart:%s' first BEGIN after replication, last collected %llu, last updated %llu, microseconds %llu", + rrdhost_hostname(host), rrdset_id(st), + st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec, + st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec, + microseconds + ); + } +#endif + + if (likely(st->counter_done)) { + if (likely(microseconds)) { + if (parser->user.trust_durations) + rrdset_next_usec_unfiltered(st, microseconds); + else + rrdset_next_usec(st, microseconds); + } + else + rrdset_next(st); + } + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_end(char **words, size_t num_words, PARSER *parser) { + char *tv_sec = get_word(words, num_words, 1); + char *tv_usec = get_word(words, num_words, 2); + char *pending_rrdset_next = get_word(words, num_words, 3); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_END); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_END, PLUGINSD_KEYWORD_BEGIN); + if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) + netdata_log_debug(D_PLUGINSD, "requested an END on chart '%s'", rrdset_id(st)); + + pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_END); + parser->user.data_collections_count++; + + struct timeval tv = { + .tv_sec = (tv_sec && *tv_sec) ? str2ll(tv_sec, NULL) : 0, + .tv_usec = (tv_usec && *tv_usec) ? str2ll(tv_usec, NULL) : 0 + }; + + if(!tv.tv_sec) + now_realtime_timeval(&tv); + + rrdset_timed_done(st, tv, pending_rrdset_next && *pending_rrdset_next ? true : false); + + return PARSER_RC_OK; +} + +static void pluginsd_host_define_cleanup(PARSER *parser) { + string_freez(parser->user.host_define.hostname); + rrdlabels_destroy(parser->user.host_define.rrdlabels); + + parser->user.host_define.hostname = NULL; + parser->user.host_define.rrdlabels = NULL; + parser->user.host_define.parsing_host = false; +} + +static inline bool pluginsd_validate_machine_guid(const char *guid, nd_uuid_t *uuid, char *output) { + if(uuid_parse(guid, *uuid)) + return false; + + uuid_unparse_lower(*uuid, output); + + return true; +} + +static inline PARSER_RC pluginsd_host_define(char **words, size_t num_words, PARSER *parser) { + char *guid = get_word(words, num_words, 1); + char *hostname = get_word(words, num_words, 2); + + if(unlikely(!guid || !*guid || !hostname || !*hostname)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE, "missing parameters"); + + if(unlikely(parser->user.host_define.parsing_host)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE, + "another host definition is already open - did you send " PLUGINSD_KEYWORD_HOST_DEFINE_END "?"); + + if(!pluginsd_validate_machine_guid(guid, &parser->user.host_define.machine_guid, parser->user.host_define.machine_guid_str)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE, "cannot parse MACHINE_GUID - is it a valid UUID?"); + + parser->user.host_define.hostname = string_strdupz(hostname); + parser->user.host_define.rrdlabels = rrdlabels_create(); + parser->user.host_define.parsing_host = true; + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_host_dictionary(char **words, size_t num_words, PARSER *parser, RRDLABELS *labels, const char *keyword) { + char *name = get_word(words, num_words, 1); + char *value = get_word(words, num_words, 2); + + if(!name || !*name || !value) + return PLUGINSD_DISABLE_PLUGIN(parser, keyword, "missing parameters"); + + if(!parser->user.host_define.parsing_host || !labels) + return PLUGINSD_DISABLE_PLUGIN(parser, keyword, "host is not defined, send " PLUGINSD_KEYWORD_HOST_DEFINE " before this"); + + rrdlabels_add(labels, name, value, RRDLABEL_SRC_CONFIG); + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_host_labels(char **words, size_t num_words, PARSER *parser) { + return pluginsd_host_dictionary(words, num_words, parser, + parser->user.host_define.rrdlabels, + PLUGINSD_KEYWORD_HOST_LABEL); +} + +static inline PARSER_RC pluginsd_host_define_end(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { + if(!parser->user.host_define.parsing_host) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE_END, "missing initialization, send " PLUGINSD_KEYWORD_HOST_DEFINE " before this"); + + RRDHOST *host = rrdhost_find_or_create( + string2str(parser->user.host_define.hostname), + string2str(parser->user.host_define.hostname), + parser->user.host_define.machine_guid_str, + "Netdata Virtual Host 1.0", + netdata_configured_timezone, + netdata_configured_abbrev_timezone, + netdata_configured_utc_offset, + program_name, + NETDATA_VERSION, + default_rrd_update_every, + default_rrd_history_entries, + default_rrd_memory_mode, + health_plugin_enabled(), + stream_conf_send_enabled, + stream_conf_send_destination, + stream_conf_send_api_key, + stream_conf_send_charts_matching, + stream_conf_replication_enabled, + stream_conf_replication_period, + stream_conf_replication_step, + rrdhost_labels_to_system_info(parser->user.host_define.rrdlabels), + false); + + rrdhost_option_set(host, RRDHOST_OPTION_VIRTUAL_HOST); + dyncfg_host_init(host); + + if(host->rrdlabels) { + rrdlabels_migrate_to_these(host->rrdlabels, parser->user.host_define.rrdlabels); + } + else { + host->rrdlabels = parser->user.host_define.rrdlabels; + parser->user.host_define.rrdlabels = NULL; + } + + pluginsd_host_define_cleanup(parser); + + parser->user.host = host; + pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_HOST_DEFINE_END); + + rrdhost_flag_clear(host, RRDHOST_FLAG_ORPHAN); + rrdcontext_host_child_connected(host); + schedule_node_state_update(host, 100); + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_host(char **words, size_t num_words, PARSER *parser) { + char *guid = get_word(words, num_words, 1); + + if(!guid || !*guid || strcmp(guid, "localhost") == 0) { + parser->user.host = localhost; + return PARSER_RC_OK; + } + + nd_uuid_t uuid; + char uuid_str[UUID_STR_LEN]; + if(!pluginsd_validate_machine_guid(guid, &uuid, uuid_str)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST, "cannot parse MACHINE_GUID - is it a valid UUID?"); + + RRDHOST *host = rrdhost_find_by_guid(uuid_str); + if(unlikely(!host)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST, "cannot find a host with this machine guid - have you created it?"); + + parser->user.host = host; + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_chart(char **words, size_t num_words, PARSER *parser) { + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CHART); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *type = get_word(words, num_words, idx++); + char *name = get_word(words, num_words, idx++); + char *title = get_word(words, num_words, idx++); + char *units = get_word(words, num_words, idx++); + char *family = get_word(words, num_words, idx++); + char *context = get_word(words, num_words, idx++); + char *chart = get_word(words, num_words, idx++); + char *priority_s = get_word(words, num_words, idx++); + char *update_every_s = get_word(words, num_words, idx++); + char *options = get_word(words, num_words, idx++); + char *plugin = get_word(words, num_words, idx++); + char *module = get_word(words, num_words, idx++); + + // parse the id from type + char *id = NULL; + if (likely(type && (id = strchr(type, '.')))) { + *id = '\0'; + id++; + } + + // make sure we have the required variables + if (unlikely((!type || !*type || !id || !*id))) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_CHART, "missing parameters"); + + // parse the name, and make sure it does not include 'type.' + if (unlikely(name && *name)) { + // when data are streamed from child nodes + // name will be type.name + // so, we have to remove 'type.' from name too + size_t len = strlen(type); + if (strncmp(type, name, len) == 0 && name[len] == '.') + name = &name[len + 1]; + + // if the name is the same with the id, + // or is just 'NULL', clear it. + if (unlikely(strcmp(name, id) == 0 || strcasecmp(name, "NULL") == 0 || strcasecmp(name, "(NULL)") == 0)) + name = NULL; + } + + int priority = 1000; + if (likely(priority_s && *priority_s)) + priority = str2i(priority_s); + + int update_every = parser->user.cd->update_every; + if (likely(update_every_s && *update_every_s)) + update_every = str2i(update_every_s); + if (unlikely(!update_every)) + update_every = parser->user.cd->update_every; + + RRDSET_TYPE chart_type = RRDSET_TYPE_LINE; + if (unlikely(chart)) + chart_type = rrdset_type_id(chart); + + if (unlikely(name && !*name)) + name = NULL; + if (unlikely(family && !*family)) + family = NULL; + if (unlikely(context && !*context)) + context = NULL; + if (unlikely(!title)) + title = ""; + if (unlikely(!units)) + units = "unknown"; + + netdata_log_debug( + D_PLUGINSD, + "creating chart type='%s', id='%s', name='%s', family='%s', context='%s', chart='%s', priority=%d, update_every=%d", + type, id, name ? name : "", family ? family : "", context ? context : "", rrdset_type_name(chart_type), + priority, update_every); + + RRDSET *st = NULL; + + st = rrdset_create( + host, type, id, name, family, context, title, units, + (plugin && *plugin) ? plugin : parser->user.cd->filename, + module, priority, update_every, + chart_type); + + bool obsolete = false; + if (likely(st)) { + if (options && *options) { + if (strstr(options, "obsolete")) { + rrdset_is_obsolete___safe_from_collector_thread(st); + obsolete = true; + } + else + rrdset_isnot_obsolete___safe_from_collector_thread(st); + + if (strstr(options, "hidden")) + rrdset_flag_set(st, RRDSET_FLAG_HIDDEN); + else + rrdset_flag_clear(st, RRDSET_FLAG_HIDDEN); + + if (strstr(options, "store_first")) + rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST); + else + rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST); + } + else { + rrdset_isnot_obsolete___safe_from_collector_thread(st); + rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST); + } + + if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_CHART)) + return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + pluginsd_rrdset_cache_put_to_slot(parser, st, slot, obsolete); + } + else + pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_CHART); + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_chart_definition_end(char **words, size_t num_words, PARSER *parser) { + const char *first_entry_txt = get_word(words, num_words, 1); + const char *last_entry_txt = get_word(words, num_words, 2); + const char *wall_clock_time_txt = get_word(words, num_words, 3); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CHART_DEFINITION_END); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_CHART_DEFINITION_END, PLUGINSD_KEYWORD_CHART); + if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + time_t first_entry_child = (first_entry_txt && *first_entry_txt) ? (time_t)str2ul(first_entry_txt) : 0; + time_t last_entry_child = (last_entry_txt && *last_entry_txt) ? (time_t)str2ul(last_entry_txt) : 0; + time_t child_wall_clock_time = (wall_clock_time_txt && *wall_clock_time_txt) ? (time_t)str2ul(wall_clock_time_txt) : now_realtime_sec(); + + bool ok = true; + if(!rrdset_flag_check(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS)) { + +#ifdef NETDATA_LOG_REPLICATION_REQUESTS + st->replay.start_streaming = false; + st->replay.after = 0; + st->replay.before = 0; +#endif + + rrdset_flag_set(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS); + rrdset_flag_clear(st, RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED); + rrdhost_receiver_replicating_charts_plus_one(st->rrdhost); + + ok = replicate_chart_request(send_to_plugin, parser, host, st, + first_entry_child, last_entry_child, child_wall_clock_time, + 0, 0); + } +#ifdef NETDATA_LOG_REPLICATION_REQUESTS + else { + internal_error(true, "REPLAY: 'host:%s/chart:%s' not sending duplicate replication request", + rrdhost_hostname(st->rrdhost), rrdset_id(st)); + } +#endif + + return ok ? PARSER_RC_OK : PARSER_RC_ERROR; +} + +static inline PARSER_RC pluginsd_dimension(char **words, size_t num_words, PARSER *parser) { + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *id = get_word(words, num_words, idx++); + char *name = get_word(words, num_words, idx++); + char *algorithm = get_word(words, num_words, idx++); + char *multiplier_s = get_word(words, num_words, idx++); + char *divisor_s = get_word(words, num_words, idx++); + char *options = get_word(words, num_words, idx++); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_DIMENSION); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_DIMENSION, PLUGINSD_KEYWORD_CHART); + if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + if (unlikely(!id || !*id)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DIMENSION, "missing dimension id"); + + long multiplier = 1; + if (multiplier_s && *multiplier_s) { + multiplier = str2ll_encoded(multiplier_s); + if (unlikely(!multiplier)) + multiplier = 1; + } + + long divisor = 1; + if (likely(divisor_s && *divisor_s)) { + divisor = str2ll_encoded(divisor_s); + if (unlikely(!divisor)) + divisor = 1; + } + + if (unlikely(!algorithm || !*algorithm)) + algorithm = "absolute"; + + if (unlikely(st && rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) + netdata_log_debug( + D_PLUGINSD, + "creating dimension in chart %s, id='%s', name='%s', algorithm='%s', multiplier=%ld, divisor=%ld, hidden='%s'", + rrdset_id(st), id, name ? name : "", rrd_algorithm_name(rrd_algorithm_id(algorithm)), multiplier, divisor, + options ? options : ""); + + RRDDIM *rd = rrddim_add(st, id, name, multiplier, divisor, rrd_algorithm_id(algorithm)); + if (unlikely(!rd)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DIMENSION, "failed to create dimension"); + + int unhide_dimension = 1; + + rrddim_option_clear(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS); + bool obsolete = false; + if (options && *options) { + if (strstr(options, "obsolete") != NULL) { + obsolete = true; + rrddim_is_obsolete___safe_from_collector_thread(st, rd); + } + else + rrddim_isnot_obsolete___safe_from_collector_thread(st, rd); + + unhide_dimension = !strstr(options, "hidden"); + + if (strstr(options, "noreset") != NULL) + rrddim_option_set(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS); + if (strstr(options, "nooverflow") != NULL) + rrddim_option_set(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS); + } + else + rrddim_isnot_obsolete___safe_from_collector_thread(st, rd); + + bool should_update_dimension = false; + + if (likely(unhide_dimension)) { + rrddim_option_clear(rd, RRDDIM_OPTION_HIDDEN); + should_update_dimension = rrddim_flag_check(rd, RRDDIM_FLAG_META_HIDDEN); + } + else { + rrddim_option_set(rd, RRDDIM_OPTION_HIDDEN); + should_update_dimension = !rrddim_flag_check(rd, RRDDIM_FLAG_META_HIDDEN); + } + + if (should_update_dimension) { + rrddim_flag_set(rd, RRDDIM_FLAG_METADATA_UPDATE); + rrdhost_flag_set(rd->rrdset->rrdhost, RRDHOST_FLAG_METADATA_UPDATE); + } + + pluginsd_rrddim_put_to_slot(parser, st, rd, slot, obsolete); + + return PARSER_RC_OK; +} + +// ---------------------------------------------------------------------------- + +static inline PARSER_RC pluginsd_variable(char **words, size_t num_words, PARSER *parser) { + char *name = get_word(words, num_words, 1); + char *value = get_word(words, num_words, 2); + NETDATA_DOUBLE v; + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_VARIABLE); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_get_scope_chart(parser); + + int global = (st) ? 0 : 1; + + if (name && *name) { + if ((strcmp(name, "GLOBAL") == 0 || strcmp(name, "HOST") == 0)) { + global = 1; + name = get_word(words, num_words, 2); + value = get_word(words, num_words, 3); + } else if ((strcmp(name, "LOCAL") == 0 || strcmp(name, "CHART") == 0)) { + global = 0; + name = get_word(words, num_words, 2); + value = get_word(words, num_words, 3); + } + } + + if (unlikely(!name || !*name)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_VARIABLE, "missing variable name"); + + if (unlikely(!value || !*value)) + value = NULL; + + if (unlikely(!value)) { + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' cannot set %s VARIABLE '%s' to an empty value", + rrdhost_hostname(host), + st ? rrdset_id(st):"UNSET", + (global) ? "HOST" : "CHART", + name); + return PARSER_RC_OK; + } + + if (!global && !st) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_VARIABLE, "no chart is defined and no GLOBAL is given"); + + char *endptr = NULL; + v = (NETDATA_DOUBLE) str2ndd_encoded(value, &endptr); + if (unlikely(endptr && *endptr)) { + if (endptr == value) + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' the value '%s' of VARIABLE '%s' cannot be parsed as a number", + rrdhost_hostname(host), + st ? rrdset_id(st):"UNSET", + value, + name); + else + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' the value '%s' of VARIABLE '%s' has leftovers: '%s'", + rrdhost_hostname(host), + st ? rrdset_id(st):"UNSET", + value, + name, + endptr); + } + + if (global) { + const RRDVAR_ACQUIRED *rva = rrdvar_host_variable_add_and_acquire(host, name); + if (rva) { + rrdvar_host_variable_set(host, rva, v); + rrdvar_host_variable_release(host, rva); + } + else + netdata_log_error("PLUGINSD: 'host:%s' cannot find/create HOST VARIABLE '%s'", + rrdhost_hostname(host), + name); + } else { + const RRDVAR_ACQUIRED *rsa = rrdvar_chart_variable_add_and_acquire(st, name); + if (rsa) { + rrdvar_chart_variable_set(st, rsa, v); + rrdvar_chart_variable_release(st, rsa); + } + else + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' cannot find/create CHART VARIABLE '%s'", + rrdhost_hostname(host), rrdset_id(st), name); + } + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_flush(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { + netdata_log_debug(D_PLUGINSD, "requested a " PLUGINSD_KEYWORD_FLUSH); + pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_FLUSH); + parser->user.replay.start_time = 0; + parser->user.replay.end_time = 0; + parser->user.replay.start_time_ut = 0; + parser->user.replay.end_time_ut = 0; + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_disable(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { + netdata_log_info("PLUGINSD: plugin called DISABLE. Disabling it."); + parser->user.enabled = 0; + return PARSER_RC_STOP; +} + +static inline PARSER_RC pluginsd_label(char **words, size_t num_words, PARSER *parser) { + const char *name = get_word(words, num_words, 1); + const char *label_source = get_word(words, num_words, 2); + const char *value = get_word(words, num_words, 3); + + if (!name || !label_source || !value) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_LABEL, "missing parameters"); + + char *store = (char *)value; + bool allocated_store = false; + + if(unlikely(num_words > 4)) { + allocated_store = true; + store = mallocz(PLUGINSD_LINE_MAX + 1); + size_t remaining = PLUGINSD_LINE_MAX; + char *move = store; + char *word; + for(size_t i = 3; i < num_words && remaining > 2 && (word = get_word(words, num_words, i)) ;i++) { + if(i > 3) { + *move++ = ' '; + *move = '\0'; + remaining--; + } + + size_t length = strlen(word); + if (length > remaining) + length = remaining; + + remaining -= length; + memcpy(move, word, length); + move += length; + *move = '\0'; + } + } + + if(unlikely(!(parser->user.new_host_labels))) + parser->user.new_host_labels = rrdlabels_create(); + + if (strcmp(name,HOST_LABEL_IS_EPHEMERAL) == 0) { + int is_ephemeral = appconfig_test_boolean_value((char *) value); + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_LABEL); + if (host) { + if (is_ephemeral) + rrdhost_option_set(host, RRDHOST_OPTION_EPHEMERAL_HOST); + else + rrdhost_option_clear(host, RRDHOST_OPTION_EPHEMERAL_HOST); + } + } + + rrdlabels_add(parser->user.new_host_labels, name, store, str2l(label_source)); + + if (allocated_store) + freez(store); + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_overwrite(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_OVERWRITE); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + netdata_log_debug(D_PLUGINSD, "requested to OVERWRITE host labels"); + + if(unlikely(!host->rrdlabels)) + host->rrdlabels = rrdlabels_create(); + + rrdlabels_migrate_to_these(host->rrdlabels, parser->user.new_host_labels); + if (rrdhost_option_check(host, RRDHOST_OPTION_EPHEMERAL_HOST)) + rrdlabels_add(host->rrdlabels, HOST_LABEL_IS_EPHEMERAL, "true", RRDLABEL_SRC_CONFIG); + + if(!rrdlabels_exist(host->rrdlabels, "_os")) + rrdlabels_add(host->rrdlabels, "_os", string2str(host->os), RRDLABEL_SRC_AUTO); + + if(!rrdlabels_exist(host->rrdlabels, "_hostname")) + rrdlabels_add(host->rrdlabels, "_hostname", string2str(host->hostname), RRDLABEL_SRC_AUTO); + + rrdhost_flag_set(host, RRDHOST_FLAG_METADATA_LABELS | RRDHOST_FLAG_METADATA_UPDATE); + + rrdlabels_destroy(parser->user.new_host_labels); + parser->user.new_host_labels = NULL; + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_clabel(char **words, size_t num_words, PARSER *parser) { + const char *name = get_word(words, num_words, 1); + const char *value = get_word(words, num_words, 2); + const char *label_source = get_word(words, num_words, 3); + + if (!name || !value || !label_source) { + netdata_log_error("Ignoring malformed or empty CHART LABEL command."); + return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + } + + if(unlikely(!parser->user.chart_rrdlabels_linked_temporarily)) { + RRDSET *st = pluginsd_get_scope_chart(parser); + parser->user.chart_rrdlabels_linked_temporarily = st->rrdlabels; + rrdlabels_unmark_all(parser->user.chart_rrdlabels_linked_temporarily); + } + + rrdlabels_add(parser->user.chart_rrdlabels_linked_temporarily, name, value, str2l(label_source)); + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_clabel_commit(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CLABEL_COMMIT); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_CLABEL_COMMIT, PLUGINSD_KEYWORD_BEGIN); + if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + netdata_log_debug(D_PLUGINSD, "requested to commit chart labels"); + + if(!parser->user.chart_rrdlabels_linked_temporarily) { + netdata_log_error("PLUGINSD: 'host:%s' got CLABEL_COMMIT, without a CHART or BEGIN. Ignoring it.", rrdhost_hostname(host)); + return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + } + + rrdlabels_remove_all_unmarked(parser->user.chart_rrdlabels_linked_temporarily); + + rrdset_flag_set(st, RRDSET_FLAG_METADATA_UPDATE); + rrdhost_flag_set(st->rrdhost, RRDHOST_FLAG_METADATA_UPDATE); + rrdset_metadata_updated(st); + + parser->user.chart_rrdlabels_linked_temporarily = NULL; + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_begin_v2(char **words, size_t num_words, PARSER *parser) { + timing_init(); + + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *id = get_word(words, num_words, idx++); + char *update_every_str = get_word(words, num_words, idx++); + char *end_time_str = get_word(words, num_words, idx++); + char *wall_clock_time_str = get_word(words, num_words, idx++); + + if(unlikely(!id || !update_every_str || !end_time_str || !wall_clock_time_str)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_BEGIN_V2, "missing parameters"); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_BEGIN_V2); + if(unlikely(!host)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + timing_step(TIMING_STEP_BEGIN2_PREPARE); + + RRDSET *st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_BEGIN_V2); + + if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_BEGIN_V2)) + return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE))) + rrdset_isnot_obsolete___safe_from_collector_thread(st); + + timing_step(TIMING_STEP_BEGIN2_FIND_CHART); + + // ------------------------------------------------------------------------ + // parse the parameters + + time_t update_every = (time_t) str2ull_encoded(update_every_str); + time_t end_time = (time_t) str2ull_encoded(end_time_str); + + time_t wall_clock_time; + if(likely(*wall_clock_time_str == '#')) + wall_clock_time = end_time; + else + wall_clock_time = (time_t) str2ull_encoded(wall_clock_time_str); + + if (unlikely(update_every != st->update_every)) + rrdset_set_update_every_s(st, update_every); + + timing_step(TIMING_STEP_BEGIN2_PARSE); + + // ------------------------------------------------------------------------ + // prepare our state + + pluginsd_lock_rrdset_data_collection(parser); + + parser->user.v2.update_every = update_every; + parser->user.v2.end_time = end_time; + parser->user.v2.wall_clock_time = wall_clock_time; + parser->user.v2.ml_locked = ml_chart_update_begin(st); + + timing_step(TIMING_STEP_BEGIN2_ML); + + // ------------------------------------------------------------------------ + // propagate it forward in v2 + + if(!parser->user.v2.stream_buffer.wb && rrdhost_has_rrdpush_sender_enabled(st->rrdhost)) + parser->user.v2.stream_buffer = rrdset_push_metric_initialize(parser->user.st, wall_clock_time); + + if(parser->user.v2.stream_buffer.v2 && parser->user.v2.stream_buffer.wb) { + // check receiver capabilities + bool can_copy = stream_has_capability(&parser->user, STREAM_CAP_IEEE754) == stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754); + + // check sender capabilities + bool with_slots = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_SLOTS) ? true : false; + NUMBER_ENCODING integer_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX; + + BUFFER *wb = parser->user.v2.stream_buffer.wb; + + buffer_need_bytes(wb, 1024); + + if(unlikely(parser->user.v2.stream_buffer.begin_v2_added)) + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1); + + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_BEGIN_V2, sizeof(PLUGINSD_KEYWORD_BEGIN_V2) - 1); + + if(with_slots) { + buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); + buffer_print_uint64_encoded(wb, integer_encoding, st->rrdpush.sender.chart_slot); + } + + buffer_fast_strcat(wb, " '", 2); + buffer_fast_strcat(wb, rrdset_id(st), string_strlen(st->id)); + buffer_fast_strcat(wb, "' ", 2); + + if(can_copy) + buffer_strcat(wb, update_every_str); + else + buffer_print_uint64_encoded(wb, integer_encoding, update_every); + + buffer_fast_strcat(wb, " ", 1); + + if(can_copy) + buffer_strcat(wb, end_time_str); + else + buffer_print_uint64_encoded(wb, integer_encoding, end_time); + + buffer_fast_strcat(wb, " ", 1); + + if(can_copy) + buffer_strcat(wb, wall_clock_time_str); + else + buffer_print_uint64_encoded(wb, integer_encoding, wall_clock_time); + + buffer_fast_strcat(wb, "\n", 1); + + parser->user.v2.stream_buffer.last_point_end_time_s = end_time; + parser->user.v2.stream_buffer.begin_v2_added = true; + } + + timing_step(TIMING_STEP_BEGIN2_PROPAGATE); + + // ------------------------------------------------------------------------ + // store it + + st->last_collected_time.tv_sec = end_time; + st->last_collected_time.tv_usec = 0; + st->last_updated.tv_sec = end_time; + st->last_updated.tv_usec = 0; + st->counter++; + st->counter_done++; + + // these are only needed for db mode RAM, ALLOC + st->db.current_entry++; + if(st->db.current_entry >= st->db.entries) + st->db.current_entry -= st->db.entries; + + timing_step(TIMING_STEP_BEGIN2_STORE); + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_set_v2(char **words, size_t num_words, PARSER *parser) { + timing_init(); + + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *dimension = get_word(words, num_words, idx++); + char *collected_str = get_word(words, num_words, idx++); + char *value_str = get_word(words, num_words, idx++); + char *flags_str = get_word(words, num_words, idx++); + + if(unlikely(!dimension || !collected_str || !value_str || !flags_str)) + return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_SET_V2, "missing parameters"); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_SET_V2); + if(unlikely(!host)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_SET_V2, PLUGINSD_KEYWORD_BEGIN_V2); + if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + timing_step(TIMING_STEP_SET2_PREPARE); + + RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_SET_V2); + if(unlikely(!rd)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + st->pluginsd.set = true; + + if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE | RRDDIM_FLAG_ARCHIVED))) + rrddim_isnot_obsolete___safe_from_collector_thread(st, rd); + + timing_step(TIMING_STEP_SET2_LOOKUP_DIMENSION); + + // ------------------------------------------------------------------------ + // parse the parameters + + collected_number collected_value = (collected_number) str2ll_encoded(collected_str); + + NETDATA_DOUBLE value; + if(*value_str == '#') + value = (NETDATA_DOUBLE)collected_value; + else + value = str2ndd_encoded(value_str, NULL); + + SN_FLAGS flags = pluginsd_parse_storage_number_flags(flags_str); + + timing_step(TIMING_STEP_SET2_PARSE); + + // ------------------------------------------------------------------------ + // check value and ML + + if (unlikely(!netdata_double_isnumber(value) || (flags == SN_EMPTY_SLOT))) { + value = NAN; + flags = SN_EMPTY_SLOT; + + if(parser->user.v2.ml_locked) + ml_dimension_is_anomalous(rd, parser->user.v2.end_time, 0, false); + } + else if(parser->user.v2.ml_locked) { + if (ml_dimension_is_anomalous(rd, parser->user.v2.end_time, value, true)) { + // clear anomaly bit: 0 -> is anomalous, 1 -> not anomalous + flags &= ~((storage_number) SN_FLAG_NOT_ANOMALOUS); + } + else + flags |= SN_FLAG_NOT_ANOMALOUS; + } + + timing_step(TIMING_STEP_SET2_ML); + + // ------------------------------------------------------------------------ + // propagate it forward in v2 + + if(parser->user.v2.stream_buffer.v2 && parser->user.v2.stream_buffer.begin_v2_added && parser->user.v2.stream_buffer.wb) { + // check if receiver and sender have the same number parsing capabilities + bool can_copy = stream_has_capability(&parser->user, STREAM_CAP_IEEE754) == stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754); + + // check the sender capabilities + bool with_slots = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_SLOTS) ? true : false; + NUMBER_ENCODING integer_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX; + NUMBER_ENCODING doubles_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_DECIMAL; + + BUFFER *wb = parser->user.v2.stream_buffer.wb; + buffer_need_bytes(wb, 1024); + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_SET_V2, sizeof(PLUGINSD_KEYWORD_SET_V2) - 1); + + if(with_slots) { + buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); + buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdpush.sender.dim_slot); + } + + buffer_fast_strcat(wb, " '", 2); + buffer_fast_strcat(wb, rrddim_id(rd), string_strlen(rd->id)); + buffer_fast_strcat(wb, "' ", 2); + if(can_copy) + buffer_strcat(wb, collected_str); + else + buffer_print_int64_encoded(wb, integer_encoding, collected_value); // original v2 had hex + buffer_fast_strcat(wb, " ", 1); + if(can_copy) + buffer_strcat(wb, value_str); + else + buffer_print_netdata_double_encoded(wb, doubles_encoding, value); // original v2 had decimal + buffer_fast_strcat(wb, " ", 1); + buffer_print_sn_flags(wb, flags, true); + buffer_fast_strcat(wb, "\n", 1); + } + + timing_step(TIMING_STEP_SET2_PROPAGATE); + + // ------------------------------------------------------------------------ + // store it + + rrddim_store_metric(rd, parser->user.v2.end_time * USEC_PER_SEC, value, flags); + rd->collector.last_collected_time.tv_sec = parser->user.v2.end_time; + rd->collector.last_collected_time.tv_usec = 0; + rd->collector.last_collected_value = collected_value; + rd->collector.last_stored_value = value; + rd->collector.last_calculated_value = value; + rd->collector.counter++; + rrddim_set_updated(rd); + + timing_step(TIMING_STEP_SET2_STORE); + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_end_v2(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { + timing_init(); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_END_V2); + if(unlikely(!host)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_END_V2, PLUGINSD_KEYWORD_BEGIN_V2); + if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + parser->user.data_collections_count++; + + timing_step(TIMING_STEP_END2_PREPARE); + + // ------------------------------------------------------------------------ + // propagate the whole chart update in v1 + + if(unlikely(!parser->user.v2.stream_buffer.v2 && !parser->user.v2.stream_buffer.begin_v2_added && parser->user.v2.stream_buffer.wb)) + rrdset_push_metrics_v1(&parser->user.v2.stream_buffer, st); + + timing_step(TIMING_STEP_END2_PUSH_V1); + + // ------------------------------------------------------------------------ + // unblock data collection + + pluginsd_unlock_previous_scope_chart(parser, PLUGINSD_KEYWORD_END_V2, false); + rrdcontext_collected_rrdset(st); + store_metric_collection_completed(); + + timing_step(TIMING_STEP_END2_RRDSET); + + // ------------------------------------------------------------------------ + // propagate it forward + + rrdset_push_metrics_finished(&parser->user.v2.stream_buffer, st); + + timing_step(TIMING_STEP_END2_PROPAGATE); + + // ------------------------------------------------------------------------ + // cleanup RRDSET / RRDDIM + + if(likely(st->pluginsd.dims_with_slots)) { + for(size_t i = 0; i < st->pluginsd.size ;i++) { + RRDDIM *rd = st->pluginsd.prd_array[i].rd; + + if(!rd) + continue; + + rd->collector.calculated_value = 0; + rd->collector.collected_value = 0; + rrddim_clear_updated(rd); + } + } + else { + RRDDIM *rd; + rrddim_foreach_read(rd, st){ + rd->collector.calculated_value = 0; + rd->collector.collected_value = 0; + rrddim_clear_updated(rd); + } + rrddim_foreach_done(rd); + } + + // ------------------------------------------------------------------------ + // reset state + + parser->user.v2 = (struct parser_user_object_v2){ 0 }; + + timing_step(TIMING_STEP_END2_STORE); + timing_report(); + + return PARSER_RC_OK; +} + +static inline PARSER_RC pluginsd_exit(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) { + netdata_log_info("PLUGINSD: plugin called EXIT."); + return PARSER_RC_STOP; +} + +static void pluginsd_json_stream_paths(PARSER *parser, void *action_data __maybe_unused) { + stream_path_set_from_json(parser->user.host, buffer_tostring(parser->defer.response), false); + buffer_free(parser->defer.response); +} + +static void pluginsd_json_dev_null(PARSER *parser, void *action_data __maybe_unused) { + buffer_free(parser->defer.response); +} + +static PARSER_RC pluginsd_json(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) { + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_JSON); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + char *keyword = get_word(words, num_words, 1); + + parser->defer.response = buffer_create(0, NULL); + parser->defer.end_keyword = PLUGINSD_KEYWORD_JSON_END; + parser->defer.action = pluginsd_json_dev_null; + parser->defer.action_data = NULL; + parser->flags |= PARSER_DEFER_UNTIL_KEYWORD; + + if(strcmp(keyword, PLUGINSD_KEYWORD_STREAM_PATH) == 0) + parser->defer.action = pluginsd_json_stream_paths; + else + netdata_log_error("PLUGINSD: invalid JSON payload keyword '%s'", keyword); + + return PARSER_RC_OK; +} + +PARSER_RC rrdpush_receiver_pluginsd_claimed_id(char **words, size_t num_words, PARSER *parser); + +// ---------------------------------------------------------------------------- + +void pluginsd_cleanup_v2(PARSER *parser) { + // this is called when the thread is stopped while processing + pluginsd_clear_scope_chart(parser, "THREAD CLEANUP"); +} + +void pluginsd_process_cleanup(PARSER *parser) { + if(!parser) return; + + pluginsd_cleanup_v2(parser); + pluginsd_host_define_cleanup(parser); + + rrd_collector_finished(); + +#ifdef NETDATA_LOG_STREAM_RECEIVE + if(parser->user.stream_log_fp) { + fclose(parser->user.stream_log_fp); + parser->user.stream_log_fp = NULL; + } +#endif + + parser_destroy(parser); +} + +void pluginsd_process_thread_cleanup(void *pptr) { + PARSER *parser = CLEANUP_FUNCTION_GET_PTR(pptr); + pluginsd_process_cleanup(parser); +} + +bool parser_reconstruct_node(BUFFER *wb, void *ptr) { + PARSER *parser = ptr; + if(!parser || !parser->user.host) + return false; + + buffer_strcat(wb, rrdhost_hostname(parser->user.host)); + return true; +} + +bool parser_reconstruct_instance(BUFFER *wb, void *ptr) { + PARSER *parser = ptr; + if(!parser || !parser->user.st) + return false; + + buffer_strcat(wb, rrdset_name(parser->user.st)); + return true; +} + +bool parser_reconstruct_context(BUFFER *wb, void *ptr) { + PARSER *parser = ptr; + if(!parser || !parser->user.st) + return false; + + buffer_strcat(wb, string2str(parser->user.st->context)); + return true; +} + +inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, int fd_input, int fd_output, int trust_durations) +{ + int enabled = cd->unsafe.enabled; + + if (fd_input == -1 || fd_output == -1 || !enabled) { + cd->unsafe.enabled = 0; + return 0; + } + + PARSER *parser; + { + PARSER_USER_OBJECT user = { + .enabled = cd->unsafe.enabled, + .host = host, + .cd = cd, + .trust_durations = trust_durations + }; + + parser = parser_init(&user, fd_input, fd_output, PARSER_INPUT_SPLIT, NULL); + } + + pluginsd_keywords_init(parser, PARSER_INIT_PLUGINSD); + + rrd_collector_started(); + + size_t count = 0; + + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_CB(NDF_REQUEST, line_splitter_reconstruct_line, &parser->line), + ND_LOG_FIELD_CB(NDF_NIDL_NODE, parser_reconstruct_node, parser), + ND_LOG_FIELD_CB(NDF_NIDL_INSTANCE, parser_reconstruct_instance, parser), + ND_LOG_FIELD_CB(NDF_NIDL_CONTEXT, parser_reconstruct_context, parser), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + CLEANUP_FUNCTION_REGISTER(pluginsd_process_thread_cleanup) cleanup_parser = parser; + buffered_reader_init(&parser->reader); + CLEAN_BUFFER *buffer = buffer_create(sizeof(parser->reader.read_buffer) + 2, NULL); + while(likely(service_running(SERVICE_COLLECTORS))) { + + if(unlikely(!buffered_reader_next_line(&parser->reader, buffer))) { + buffered_reader_ret_t ret = buffered_reader_read_timeout( + &parser->reader, parser->fd_input, + 2 * 60 * MSEC_PER_SEC, true); + + if(unlikely(ret != BUFFERED_READER_READ_OK)) { + nd_log(NDLS_COLLECTORS, NDLP_INFO, "Buffered reader not OK"); + break; + } + + continue; + } + + if(unlikely(parser_action(parser, buffer->buffer))) + break; + + buffer->len = 0; + buffer->buffer[0] = '\0'; + } + + cd->unsafe.enabled = parser->user.enabled; + count = parser->user.data_collections_count; + + if(likely(count)) { + cd->successful_collections += count; + cd->serial_failures = 0; + } + else + cd->serial_failures++; + + return count; +} + +#include "gperf-hashtable.h" + +PARSER_RC parser_execute(PARSER *parser, const PARSER_KEYWORD *keyword, char **words, size_t num_words) { + // put all the keywords ordered by the frequency they are used + + switch(keyword->id) { + case PLUGINSD_KEYWORD_ID_SET2: + return pluginsd_set_v2(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_BEGIN2: + return pluginsd_begin_v2(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_END2: + return pluginsd_end_v2(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_SET: + return pluginsd_set(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_BEGIN: + return pluginsd_begin(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_END: + return pluginsd_end(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_RSET: + return pluginsd_replay_set(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_RBEGIN: + return pluginsd_replay_begin(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_RDSTATE: + return pluginsd_replay_rrddim_collection_state(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_RSSTATE: + return pluginsd_replay_rrdset_collection_state(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_REND: + return pluginsd_replay_end(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_DIMENSION: + return pluginsd_dimension(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_CHART: + return pluginsd_chart(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END: + return pluginsd_chart_definition_end(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_CLABEL: + return pluginsd_clabel(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_CLABEL_COMMIT: + return pluginsd_clabel_commit(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_FUNCTION: + return pluginsd_function(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN: + return pluginsd_function_result_begin(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS: + return pluginsd_function_progress(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_JSON: + return pluginsd_json(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_LABEL: + return pluginsd_label(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_OVERWRITE: + return pluginsd_overwrite(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_VARIABLE: + return pluginsd_variable(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_CLAIMED_ID: + return rrdpush_receiver_pluginsd_claimed_id(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_HOST: + return pluginsd_host(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_HOST_DEFINE: + return pluginsd_host_define(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_HOST_DEFINE_END: + return pluginsd_host_define_end(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_HOST_LABEL: + return pluginsd_host_labels(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_FLUSH: + return pluginsd_flush(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_DISABLE: + return pluginsd_disable(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_EXIT: + return pluginsd_exit(words, num_words, parser); + case PLUGINSD_KEYWORD_ID_CONFIG: + return pluginsd_config(words, num_words, parser); + + case PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE: + case PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE: + case PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB: + case PLUGINSD_KEYWORD_ID_DYNCFG_RESET: + case PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS: + case PLUGINSD_KEYWORD_ID_DELETE_JOB: + return pluginsd_dyncfg_noop(words, num_words, parser); + + default: + netdata_log_error("Unknown keyword '%s' with id %zu", keyword->keyword, keyword->id); + return PARSER_RC_ERROR;; + } +} + +void parser_init_repertoire(PARSER *parser, PARSER_REPERTOIRE repertoire) { + parser->repertoire = repertoire; + + for(size_t i = GPERF_PARSER_MIN_HASH_VALUE ; i <= GPERF_PARSER_MAX_HASH_VALUE ;i++) { + if(gperf_keywords[i].keyword && *gperf_keywords[i].keyword && (parser->repertoire & gperf_keywords[i].repertoire)) + worker_register_job_name(gperf_keywords[i].worker_job_id, gperf_keywords[i].keyword); + } +} + +int pluginsd_parser_unittest(void) { + PARSER *p = parser_init(NULL, -1, -1, PARSER_INPUT_SPLIT, NULL); + pluginsd_keywords_init(p, PARSER_INIT_PLUGINSD | PARSER_INIT_STREAMING); + + char *lines[] = { + "BEGIN2 abcdefghijklmnopqr 123", + "SET2 abcdefg 0x12345678 0 0", + "SET2 hijklmnoqr 0x12345678 0 0", + "SET2 stuvwxyz 0x12345678 0 0", + "END2", + NULL, + }; + + char *words[PLUGINSD_MAX_WORDS]; + size_t iterations = 1000000; + size_t count = 0; + char input[PLUGINSD_LINE_MAX + 1]; + + usec_t started = now_realtime_usec(); + while(--iterations) { + for(size_t line = 0; lines[line] ;line++) { + strncpyz(input, lines[line], PLUGINSD_LINE_MAX); + size_t num_words = quoted_strings_splitter_pluginsd(input, words, PLUGINSD_MAX_WORDS); + const char *command = get_word(words, num_words, 0); + const PARSER_KEYWORD *keyword = parser_find_keyword(p, command); + if(unlikely(!keyword)) + fatal("Cannot parse the line '%s'", lines[line]); + count++; + } + } + usec_t ended = now_realtime_usec(); + + netdata_log_info("Parsed %zu lines in %0.2f secs, %0.2f klines/sec", count, + (double)(ended - started) / (double)USEC_PER_SEC, + (double)count / ((double)(ended - started) / (double)USEC_PER_SEC) / 1000.0); + + parser_destroy(p); + return 0; +} diff --git a/src/plugins.d/pluginsd_parser.h b/src/plugins.d/pluginsd_parser.h new file mode 100644 index 000000000..983da7d13 --- /dev/null +++ b/src/plugins.d/pluginsd_parser.h @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGINSD_PARSER_H +#define NETDATA_PLUGINSD_PARSER_H + +#include "daemon/common.h" + +#define WORKER_PARSER_FIRST_JOB 3 + +// this has to be in-sync with the same at receiver.c +#define WORKER_RECEIVER_JOB_REPLICATION_COMPLETION (WORKER_PARSER_FIRST_JOB - 3) + +// this controls the max response size of a function +#define PLUGINSD_MAX_DEFERRED_SIZE (100 * 1024 * 1024) + +#define PLUGINSD_MIN_RRDSET_POINTERS_CACHE 1024 + +#define HOST_LABEL_IS_EPHEMERAL "_is_ephemeral" +// PARSER return codes +typedef enum __attribute__ ((__packed__)) parser_rc { + PARSER_RC_OK, // Callback was successful, go on + PARSER_RC_STOP, // Callback says STOP + PARSER_RC_ERROR // Callback failed (abort rest of callbacks) +} PARSER_RC; + +typedef enum __attribute__ ((__packed__)) parser_input_type { + PARSER_INPUT_SPLIT = (1 << 1), + PARSER_DEFER_UNTIL_KEYWORD = (1 << 2), +} PARSER_INPUT_TYPE; + +typedef enum __attribute__ ((__packed__)) { + PARSER_INIT_PLUGINSD = (1 << 1), + PARSER_INIT_STREAMING = (1 << 2), + PARSER_REP_METADATA = (1 << 3), +} PARSER_REPERTOIRE; + +struct parser; +typedef PARSER_RC (*keyword_function)(char **words, size_t num_words, struct parser *parser); + +typedef struct parser_keyword { + char *keyword; + size_t id; + PARSER_REPERTOIRE repertoire; + size_t worker_job_id; +} PARSER_KEYWORD; + +typedef struct parser_user_object { + bool cleanup_slots; + RRDSET *st; + RRDHOST *host; + void *opaque; + struct plugind *cd; + int trust_durations; + RRDLABELS *new_host_labels; + RRDLABELS *chart_rrdlabels_linked_temporarily; + size_t data_collections_count; + int enabled; + +#ifdef NETDATA_LOG_STREAM_RECEIVE + FILE *stream_log_fp; + PARSER_REPERTOIRE stream_log_repertoire; +#endif + + STREAM_CAPABILITIES capabilities; // receiver capabilities + + struct { + bool parsing_host; + nd_uuid_t machine_guid; + char machine_guid_str[UUID_STR_LEN]; + STRING *hostname; + RRDLABELS *rrdlabels; + } host_define; + + struct parser_user_object_replay { + time_t start_time; + time_t end_time; + + usec_t start_time_ut; + usec_t end_time_ut; + + time_t wall_clock_time; + + bool rset_enabled; + } replay; + + struct parser_user_object_v2 { + bool locked_data_collection; + RRDSET_STREAM_BUFFER stream_buffer; // sender capabilities in this + time_t update_every; + time_t end_time; + time_t wall_clock_time; + bool ml_locked; + } v2; +} PARSER_USER_OBJECT; + +typedef void (*parser_deferred_action_t)(struct parser *parser, void *action_data); + +struct parser { + uint8_t version; // Parser version + PARSER_REPERTOIRE repertoire; + uint32_t flags; + int fd_input; + int fd_output; + + NETDATA_SSL *ssl_output; + +#ifdef ENABLE_H2O + void *h2o_ctx; // if set we use h2o_stream functions to send data +#endif + + PARSER_USER_OBJECT user; // User defined structure to hold extra state between calls + + struct buffered_reader reader; + struct line_splitter line; + const PARSER_KEYWORD *keyword; + + struct { + const char *end_keyword; + BUFFER *response; + parser_deferred_action_t action; + void *action_data; + } defer; + + struct { + DICTIONARY *functions; + usec_t smaller_monotonic_timeout_ut; + } inflight; + + struct { + SPINLOCK spinlock; + } writer; +}; + +typedef struct parser PARSER; + +PARSER *parser_init(struct parser_user_object *user, int fd_input, int fd_output, PARSER_INPUT_TYPE flags, void *ssl); +void parser_init_repertoire(PARSER *parser, PARSER_REPERTOIRE repertoire); +void parser_destroy(PARSER *working_parser); +void pluginsd_cleanup_v2(PARSER *parser); +void pluginsd_keywords_init(PARSER *parser, PARSER_REPERTOIRE repertoire); +PARSER_RC parser_execute(PARSER *parser, const PARSER_KEYWORD *keyword, char **words, size_t num_words); + +static inline int find_first_keyword(const char *src, char *dst, int dst_size, bool *isspace_map) { + const char *s = src, *keyword_start; + + while (unlikely(isspace_map[(uint8_t)*s])) s++; + keyword_start = s; + + while (likely(*s && !isspace_map[(uint8_t)*s]) && dst_size > 1) { + *dst++ = *s++; + dst_size--; + } + *dst = '\0'; + return dst_size == 0 ? 0 : (int) (s - keyword_start); +} + +const PARSER_KEYWORD *gperf_lookup_keyword(register const char *str, register size_t len); + +static inline const PARSER_KEYWORD *parser_find_keyword(PARSER *parser, const char *command) { + const PARSER_KEYWORD *t = gperf_lookup_keyword(command, strlen(command)); + if(t && (t->repertoire & parser->repertoire)) + return t; + + return NULL; +} + +bool parser_reconstruct_node(BUFFER *wb, void *ptr); +bool parser_reconstruct_instance(BUFFER *wb, void *ptr); +bool parser_reconstruct_context(BUFFER *wb, void *ptr); + +static inline int parser_action(PARSER *parser, char *input) { +#ifdef NETDATA_LOG_STREAM_RECEIVE + static __thread char line[PLUGINSD_LINE_MAX + 1]; + strncpyz(line, input, sizeof(line) - 1); +#endif + + parser->line.count++; + + if(unlikely(parser->flags & PARSER_DEFER_UNTIL_KEYWORD)) { + char command[100 + 1]; + bool has_keyword = find_first_keyword(input, command, 100, isspace_map_pluginsd); + + if(!has_keyword || strcmp(command, parser->defer.end_keyword) != 0) { + if(parser->defer.response) { + buffer_strcat(parser->defer.response, input); + if(buffer_strlen(parser->defer.response) > PLUGINSD_MAX_DEFERRED_SIZE) { + // more than PLUGINSD_MAX_DEFERRED_SIZE of data, + // or a bad plugin that did not send the end_keyword + nd_log(NDLS_DAEMON, NDLP_ERR, "PLUGINSD: deferred response is too big (%zu bytes). Stopping this plugin.", buffer_strlen(parser->defer.response)); + return 1; + } + } + return 0; + } + else { + // call the action + parser->defer.action(parser, parser->defer.action_data); + + // empty everything + parser->defer.action = NULL; + parser->defer.action_data = NULL; + parser->defer.end_keyword = NULL; + parser->defer.response = NULL; + parser->flags &= ~PARSER_DEFER_UNTIL_KEYWORD; + } + return 0; + } + + parser->line.num_words = quoted_strings_splitter_pluginsd(input, parser->line.words, PLUGINSD_MAX_WORDS); + const char *command = get_word(parser->line.words, parser->line.num_words, 0); + + if(unlikely(!command)) { + line_splitter_reset(&parser->line); + return 0; + } + + PARSER_RC rc; + parser->keyword = parser_find_keyword(parser, command); + if(likely(parser->keyword)) { + worker_is_busy(parser->keyword->worker_job_id); + +#ifdef NETDATA_LOG_STREAM_RECEIVE + if(parser->user.stream_log_fp && parser->keyword->repertoire & parser->user.stream_log_repertoire) + fprintf(parser->user.stream_log_fp, "%s", line); +#endif + + rc = parser_execute(parser, parser->keyword, parser->line.words, parser->line.num_words); + // rc = (*t->func)(words, num_words, parser); + worker_is_idle(); + } + else + rc = PARSER_RC_ERROR; + + if(rc == PARSER_RC_ERROR) { + CLEAN_BUFFER *wb = buffer_create(1024, NULL); + line_splitter_reconstruct_line(wb, &parser->line); + netdata_log_error("PLUGINSD: parser_action('%s') failed on line %zu: { %s } (quotes added to show parsing)", + command, parser->line.count, buffer_tostring(wb)); + } + + line_splitter_reset(&parser->line); + return (rc == PARSER_RC_ERROR || rc == PARSER_RC_STOP); +} + +#endif //NETDATA_PLUGINSD_PARSER_H diff --git a/src/plugins.d/pluginsd_replication.c b/src/plugins.d/pluginsd_replication.c new file mode 100644 index 000000000..8d0975210 --- /dev/null +++ b/src/plugins.d/pluginsd_replication.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "pluginsd_replication.h" + +PARSER_RC pluginsd_replay_begin(char **words, size_t num_words, PARSER *parser) { + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *id = get_word(words, num_words, idx++); + char *start_time_str = get_word(words, num_words, idx++); + char *end_time_str = get_word(words, num_words, idx++); + char *child_now_str = get_word(words, num_words, idx++); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_BEGIN); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st; + if (likely(!id || !*id)) + st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_BEGIN, PLUGINSD_KEYWORD_REPLAY_BEGIN); + else + st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_REPLAY_BEGIN); + + if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_REPLAY_BEGIN)) + return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + if(start_time_str && end_time_str) { + time_t start_time = (time_t) str2ull_encoded(start_time_str); + time_t end_time = (time_t) str2ull_encoded(end_time_str); + + time_t wall_clock_time = 0, tolerance; + bool wall_clock_comes_from_child; (void)wall_clock_comes_from_child; + if(child_now_str) { + wall_clock_time = (time_t) str2ull_encoded(child_now_str); + tolerance = st->update_every + 1; + wall_clock_comes_from_child = true; + } + + if(wall_clock_time <= 0) { + wall_clock_time = now_realtime_sec(); + tolerance = st->update_every + 5; + wall_clock_comes_from_child = false; + } + +#ifdef NETDATA_LOG_REPLICATION_REQUESTS + internal_error( + (!st->replay.start_streaming && (end_time < st->replay.after || start_time > st->replay.before)), + "REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN " from %ld to %ld, which does not match our request (%ld to %ld).", + rrdhost_hostname(st->rrdhost), rrdset_id(st), start_time, end_time, st->replay.after, st->replay.before); + + internal_error( + true, + "REPLAY: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN " from %ld to %ld, child wall clock is %ld (%s), had requested %ld to %ld", + rrdhost_hostname(st->rrdhost), rrdset_id(st), + start_time, end_time, wall_clock_time, wall_clock_comes_from_child ? "from child" : "parent time", + st->replay.after, st->replay.before); +#endif + + if(start_time && end_time && start_time < wall_clock_time + tolerance && end_time < wall_clock_time + tolerance && start_time < end_time) { + if (unlikely(end_time - start_time != st->update_every)) + rrdset_set_update_every_s(st, end_time - start_time); + + st->last_collected_time.tv_sec = end_time; + st->last_collected_time.tv_usec = 0; + + st->last_updated.tv_sec = end_time; + st->last_updated.tv_usec = 0; + + st->counter++; + st->counter_done++; + + // these are only needed for db mode RAM, ALLOC + st->db.current_entry++; + if(st->db.current_entry >= st->db.entries) + st->db.current_entry -= st->db.entries; + + parser->user.replay.start_time = start_time; + parser->user.replay.end_time = end_time; + parser->user.replay.start_time_ut = (usec_t) start_time * USEC_PER_SEC; + parser->user.replay.end_time_ut = (usec_t) end_time * USEC_PER_SEC; + parser->user.replay.wall_clock_time = wall_clock_time; + parser->user.replay.rset_enabled = true; + + return PARSER_RC_OK; + } + + netdata_log_error("PLUGINSD REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN + " from %ld to %ld, but timestamps are invalid " + "(now is %ld [%s], tolerance %ld). Ignoring " PLUGINSD_KEYWORD_REPLAY_SET, + rrdhost_hostname(st->rrdhost), rrdset_id(st), start_time, end_time, + wall_clock_time, wall_clock_comes_from_child ? "child wall clock" : "parent wall clock", + tolerance); + } + + // the child sends an RBEGIN without any parameters initially + // setting rset_enabled to false, means the RSET should not store any metrics + // to store metrics, the RBEGIN needs to have timestamps + parser->user.replay.start_time = 0; + parser->user.replay.end_time = 0; + parser->user.replay.start_time_ut = 0; + parser->user.replay.end_time_ut = 0; + parser->user.replay.wall_clock_time = 0; + parser->user.replay.rset_enabled = false; + return PARSER_RC_OK; +} + +PARSER_RC pluginsd_replay_set(char **words, size_t num_words, PARSER *parser) { + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *dimension = get_word(words, num_words, idx++); + char *value_str = get_word(words, num_words, idx++); + char *flags_str = get_word(words, num_words, idx++); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_SET); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_SET, PLUGINSD_KEYWORD_REPLAY_BEGIN); + if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + if(!parser->user.replay.rset_enabled) { + nd_log_limit_static_thread_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_ERR, + "PLUGINSD: 'host:%s/chart:%s' got a %s but it is disabled by %s errors", + rrdhost_hostname(host), rrdset_id(st), PLUGINSD_KEYWORD_REPLAY_SET, PLUGINSD_KEYWORD_REPLAY_BEGIN); + + // we have to return OK here + return PARSER_RC_OK; + } + + RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_REPLAY_SET); + if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + st->pluginsd.set = true; + + if (unlikely(!parser->user.replay.start_time || !parser->user.replay.end_time)) { + netdata_log_error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s with invalid timestamps %ld to %ld from a %s. Disabling it.", + rrdhost_hostname(host), + rrdset_id(st), + dimension, + PLUGINSD_KEYWORD_REPLAY_SET, + parser->user.replay.start_time, + parser->user.replay.end_time, + PLUGINSD_KEYWORD_REPLAY_BEGIN); + return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + } + + if (unlikely(!value_str || !*value_str)) + value_str = "NAN"; + + if(unlikely(!flags_str)) + flags_str = ""; + + if (likely(value_str)) { + RRDDIM_FLAGS rd_flags = rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE | RRDDIM_FLAG_ARCHIVED); + + if(!(rd_flags & RRDDIM_FLAG_ARCHIVED)) { + NETDATA_DOUBLE value = str2ndd_encoded(value_str, NULL); + SN_FLAGS flags = pluginsd_parse_storage_number_flags(flags_str); + + if (!netdata_double_isnumber(value) || (flags == SN_EMPTY_SLOT)) { + value = NAN; + flags = SN_EMPTY_SLOT; + } + + rrddim_store_metric(rd, parser->user.replay.end_time_ut, value, flags); + rd->collector.last_collected_time.tv_sec = parser->user.replay.end_time; + rd->collector.last_collected_time.tv_usec = 0; + rd->collector.counter++; + } + else { + nd_log_limit_static_global_var(erl, 1, 0); + nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_WARNING, + "PLUGINSD: 'host:%s/chart:%s/dim:%s' has the ARCHIVED flag set, but it is replicated. " + "Ignoring data.", + rrdhost_hostname(st->rrdhost), rrdset_id(st), rrddim_name(rd)); + } + } + + return PARSER_RC_OK; +} + +PARSER_RC pluginsd_replay_rrddim_collection_state(char **words, size_t num_words, PARSER *parser) { + if(parser->user.replay.rset_enabled == false) + return PARSER_RC_OK; + + int idx = 1; + ssize_t slot = pluginsd_parse_rrd_slot(words, num_words); + if(slot >= 0) idx++; + + char *dimension = get_word(words, num_words, idx++); + char *last_collected_ut_str = get_word(words, num_words, idx++); + char *last_collected_value_str = get_word(words, num_words, idx++); + char *last_calculated_value_str = get_word(words, num_words, idx++); + char *last_stored_value_str = get_word(words, num_words, idx++); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE, PLUGINSD_KEYWORD_REPLAY_BEGIN); + if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + if(st->pluginsd.set) { + // reset pos to reuse the same RDAs + st->pluginsd.pos = 0; + st->pluginsd.set = false; + } + + RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE); + if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + usec_t dim_last_collected_ut = (usec_t)rd->collector.last_collected_time.tv_sec * USEC_PER_SEC + (usec_t)rd->collector.last_collected_time.tv_usec; + usec_t last_collected_ut = last_collected_ut_str ? str2ull_encoded(last_collected_ut_str) : 0; + if(last_collected_ut > dim_last_collected_ut) { + rd->collector.last_collected_time.tv_sec = (time_t)(last_collected_ut / USEC_PER_SEC); + rd->collector.last_collected_time.tv_usec = (last_collected_ut % USEC_PER_SEC); + } + + rd->collector.last_collected_value = last_collected_value_str ? str2ll_encoded(last_collected_value_str) : 0; + rd->collector.last_calculated_value = last_calculated_value_str ? str2ndd_encoded(last_calculated_value_str, NULL) : 0; + rd->collector.last_stored_value = last_stored_value_str ? str2ndd_encoded(last_stored_value_str, NULL) : 0.0; + + return PARSER_RC_OK; +} + +PARSER_RC pluginsd_replay_rrdset_collection_state(char **words, size_t num_words, PARSER *parser) { + if(parser->user.replay.rset_enabled == false) + return PARSER_RC_OK; + + char *last_collected_ut_str = get_word(words, num_words, 1); + char *last_updated_ut_str = get_word(words, num_words, 2); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_RRDSET_STATE); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_RRDSET_STATE, + PLUGINSD_KEYWORD_REPLAY_BEGIN); + if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + usec_t chart_last_collected_ut = (usec_t)st->last_collected_time.tv_sec * USEC_PER_SEC + (usec_t)st->last_collected_time.tv_usec; + usec_t last_collected_ut = last_collected_ut_str ? str2ull_encoded(last_collected_ut_str) : 0; + if(last_collected_ut > chart_last_collected_ut) { + st->last_collected_time.tv_sec = (time_t)(last_collected_ut / USEC_PER_SEC); + st->last_collected_time.tv_usec = (last_collected_ut % USEC_PER_SEC); + } + + usec_t chart_last_updated_ut = (usec_t)st->last_updated.tv_sec * USEC_PER_SEC + (usec_t)st->last_updated.tv_usec; + usec_t last_updated_ut = last_updated_ut_str ? str2ull_encoded(last_updated_ut_str) : 0; + if(last_updated_ut > chart_last_updated_ut) { + st->last_updated.tv_sec = (time_t)(last_updated_ut / USEC_PER_SEC); + st->last_updated.tv_usec = (last_updated_ut % USEC_PER_SEC); + } + + st->counter++; + st->counter_done++; + + return PARSER_RC_OK; +} + +PARSER_RC pluginsd_replay_end(char **words, size_t num_words, PARSER *parser) { + if (num_words < 7) { // accepts 7, but the 7th is optional + netdata_log_error("REPLAY: malformed " PLUGINSD_KEYWORD_REPLAY_END " command"); + return PARSER_RC_ERROR; + } + + const char *update_every_child_txt = get_word(words, num_words, 1); + const char *first_entry_child_txt = get_word(words, num_words, 2); + const char *last_entry_child_txt = get_word(words, num_words, 3); + const char *start_streaming_txt = get_word(words, num_words, 4); + const char *first_entry_requested_txt = get_word(words, num_words, 5); + const char *last_entry_requested_txt = get_word(words, num_words, 6); + const char *child_world_time_txt = get_word(words, num_words, 7); // optional + + time_t update_every_child = (time_t) str2ull_encoded(update_every_child_txt); + time_t first_entry_child = (time_t) str2ull_encoded(first_entry_child_txt); + time_t last_entry_child = (time_t) str2ull_encoded(last_entry_child_txt); + + bool start_streaming = (strcmp(start_streaming_txt, "true") == 0); + time_t first_entry_requested = (time_t) str2ull_encoded(first_entry_requested_txt); + time_t last_entry_requested = (time_t) str2ull_encoded(last_entry_requested_txt); + + // the optional child world time + time_t child_world_time = (child_world_time_txt && *child_world_time_txt) ? (time_t) str2ull_encoded( + child_world_time_txt) : now_realtime_sec(); + + RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_END); + if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + + RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_END, PLUGINSD_KEYWORD_REPLAY_BEGIN); + if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); + +#ifdef NETDATA_LOG_REPLICATION_REQUESTS + internal_error(true, + "PLUGINSD REPLAY: 'host:%s/chart:%s': got a " PLUGINSD_KEYWORD_REPLAY_END " child db from %llu to %llu, start_streaming %s, had requested from %llu to %llu, wall clock %llu", + rrdhost_hostname(host), rrdset_id(st), + (unsigned long long)first_entry_child, (unsigned long long)last_entry_child, + start_streaming?"true":"false", + (unsigned long long)first_entry_requested, (unsigned long long)last_entry_requested, + (unsigned long long)child_world_time + ); +#endif + + parser->user.data_collections_count++; + + if(parser->user.replay.rset_enabled && st->rrdhost->receiver) { + time_t now = now_realtime_sec(); + time_t started = st->rrdhost->receiver->replication_first_time_t; + time_t current = parser->user.replay.end_time; + + if(started && current > started) { + host->rrdpush_receiver_replication_percent = (NETDATA_DOUBLE) (current - started) * 100.0 / (NETDATA_DOUBLE) (now - started); + worker_set_metric(WORKER_RECEIVER_JOB_REPLICATION_COMPLETION, + host->rrdpush_receiver_replication_percent); + } + } + + parser->user.replay.start_time = 0; + parser->user.replay.end_time = 0; + parser->user.replay.start_time_ut = 0; + parser->user.replay.end_time_ut = 0; + parser->user.replay.wall_clock_time = 0; + parser->user.replay.rset_enabled = false; + + st->counter++; + st->counter_done++; + store_metric_collection_completed(); + +#ifdef NETDATA_LOG_REPLICATION_REQUESTS + st->replay.start_streaming = false; + st->replay.after = 0; + st->replay.before = 0; + if(start_streaming) + st->replay.log_next_data_collection = true; +#endif + + if (start_streaming) { + if (st->update_every != update_every_child) + rrdset_set_update_every_s(st, update_every_child); + + if(rrdset_flag_check(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS)) { + rrdset_flag_set(st, RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED); + rrdset_flag_clear(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS); + rrdset_flag_clear(st, RRDSET_FLAG_SYNC_CLOCK); + rrdhost_receiver_replicating_charts_minus_one(st->rrdhost); + } +#ifdef NETDATA_LOG_REPLICATION_REQUESTS + else + internal_error(true, "REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_END " with enable_streaming = true, but there is no replication in progress for this chart.", + rrdhost_hostname(host), rrdset_id(st)); +#endif + + pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_END); + + host->rrdpush_receiver_replication_percent = 100.0; + worker_set_metric(WORKER_RECEIVER_JOB_REPLICATION_COMPLETION, host->rrdpush_receiver_replication_percent); + + return PARSER_RC_OK; + } + + pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_END); + + rrdcontext_updated_retention_rrdset(st); + + bool ok = replicate_chart_request(send_to_plugin, parser, host, st, + first_entry_child, last_entry_child, child_world_time, + first_entry_requested, last_entry_requested); + return ok ? PARSER_RC_OK : PARSER_RC_ERROR; +} diff --git a/src/plugins.d/pluginsd_replication.h b/src/plugins.d/pluginsd_replication.h new file mode 100644 index 000000000..1c6f617e6 --- /dev/null +++ b/src/plugins.d/pluginsd_replication.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PLUGINSD_REPLICATION_H +#define NETDATA_PLUGINSD_REPLICATION_H + +#include "pluginsd_internals.h" + +PARSER_RC pluginsd_replay_begin(char **words, size_t num_words, PARSER *parser); +PARSER_RC pluginsd_replay_set(char **words, size_t num_words, PARSER *parser); +PARSER_RC pluginsd_replay_rrddim_collection_state(char **words, size_t num_words, PARSER *parser); +PARSER_RC pluginsd_replay_rrdset_collection_state(char **words, size_t num_words, PARSER *parser); +PARSER_RC pluginsd_replay_end(char **words, size_t num_words, PARSER *parser); + +#endif //NETDATA_PLUGINSD_REPLICATION_H diff --git a/src/registry/README.md b/src/registry/README.md index d976528c7..97db113f7 100644 --- a/src/registry/README.md +++ b/src/registry/README.md @@ -1,12 +1,3 @@ - - # Registry Netdata provides distributed monitoring. @@ -14,21 +5,21 @@ Netdata provides distributed monitoring. Traditional monitoring solutions centralize all the data to provide unified dashboards across all servers. Before Netdata, this was the standard practice. However it has a few issues: -1. due to the resources required, the number of metrics collected is limited. -2. for the same reason, the data collection frequency is not that high, at best it will be once every 10 or 15 seconds, +1. due to the resources required, the number of metrics collected is limited. +2. for the same reason, the data collection frequency is not that high, at best it will be once every 10 or 15 seconds, at worst every 5 or 10 mins. -3. the central monitoring solution needs dedicated resources, thus becoming "another bottleneck" in the whole +3. the central monitoring solution needs dedicated resources, thus becoming "another bottleneck" in the whole ecosystem. It also requires maintenance, administration, etc. -4. most centralized monitoring solutions are usually only good for presenting _statistics of past performance_ (i.e. +4. most centralized monitoring solutions are usually only good for presenting _statistics of past performance_ (i.e. cannot be used for real-time performance troubleshooting). Netdata follows a different approach: -1. data collection happens per second -2. thousands of metrics per server are collected -3. data do not leave the server where they are collected -4. Netdata servers do not talk to each other -5. your browser connects all the Netdata servers +1. data collection happens per second +2. thousands of metrics per server are collected +3. data do not leave the server where they are collected +4. Netdata servers do not talk to each other +5. your browser connects all the Netdata servers Using Netdata, your monitoring infrastructure is embedded on each server, limiting significantly the need of additional resources. Netdata is blazingly fast, very resource efficient and utilizes server resources that already exist and are @@ -46,31 +37,30 @@ etc.) are propagated to the new server, so that the new dashboard will come with The registry keeps track of 4 entities: -1. **machines**: i.e. the Netdata installations (a random GUID generated by each Netdata the first time it starts; we +1. **machines**: i.e. the Netdata installations (a random GUID generated by each Netdata the first time it starts; we call this **machine_guid**) - For each Netdata installation (each `machine_guid`) the registry keeps track of the different URLs it has accessed. + For each Netdata installation (each `machine_guid`) the registry keeps track of the different URLs it has accessed. -2. **persons**: i.e. the web browsers accessing the Netdata installations (a random GUID generated by the registry the +2. **persons**: i.e. the web browsers accessing the Netdata installations (a random GUID generated by the registry the first time it sees a new web browser; we call this **person_guid**) - For each person, the registry keeps track of the Netdata installations it has accessed and their URLs. + For each person, the registry keeps track of the Netdata installations it has accessed and their URLs. -3. **URLs** of Netdata installations (as seen by the web browsers) +3. **URLs** of Netdata installations (as seen by the web browsers) - For each URL, the registry keeps the URL and nothing more. Each URL is linked to _persons_ and _machines_. The only + For each URL, the registry keeps the URL and nothing more. Each URL is linked to _persons_ and _machines_. The only way to find a URL is to know its **machine_guid** or have a **person_guid** it is linked to it. -4. **accounts**: i.e. the information used to sign-in via one of the available sign-in methods. Depending on the - method, this may include an email, or an email and a profile picture or avatar. +4. **accounts**: i.e. the information used to sign-in via one of the available sign-in methods. Depending on the method, this may include an email, or an email and a profile picture or avatar. For _persons_/_accounts_ and _machines_, the registry keeps links to _URLs_, each link with 2 timestamps (first time seen, last time seen) and a counter (number of times it has been seen). *machines_, _persons_ and timestamps are stored -in the Netdata registry regardless of whether you sign in or not. +in the Netdata registry regardless of whether you sign in or not. ## Who talks to the registry? -Your web browser **only**! If sending this information is against your policies, you +Your web browser **only**! If sending this information is against your policies, you can [run your own registry](#run-your-own-registry) Your Netdata servers do not talk to the registry. This is a UML diagram of its operation: @@ -95,7 +85,7 @@ We believe, it can do it... **To turn any Netdata into a registry**, edit `/etc/netdata/netdata.conf` and set: -```conf +```text [registry] enabled = yes registry to announce = http://your.registry:19999 @@ -106,7 +96,7 @@ Restart your Netdata to activate it. Then, you need to tell **all your other Netdata servers to advertise your registry**, instead of the default. To do this, on each of your Netdata servers, edit `/etc/netdata/netdata.conf` and set: -```conf +```text [registry] enabled = no registry to announce = http://your.registry:19999 @@ -120,7 +110,7 @@ This is it. You have your registry now. You may also want to give your server different names under the node menu (i.e. to have them sorted / grouped). You can change its registry name, by setting on each Netdata server: -```conf +```text [registry] registry hostname = Group1 - Master DB ``` @@ -131,7 +121,7 @@ So this server will appear in the node menu as `Group1 - Master DB`. The max nam Netdata v1.9+ support limiting access to the registry from given IPs, like this: -```conf +```text [registry] allow from = * ``` @@ -152,15 +142,16 @@ against the name-pattern. Please note that this process can be expensive on a machine that is serving many connections. The behaviour of the pattern matching can be controlled with the following setting: -```conf +```text [registry] allow by dns = heuristic ``` The settings are: -- `yes` allows the pattern to match DNS names. -- `no` disables DNS matching for the patterns (they only match IP addresses). -- `heuristic` will estimate if the patterns should match FQDNs by the presence or absence of `:`s or alpha-characters. + +- `yes` allows the pattern to match DNS names. +- `no` disables DNS matching for the patterns (they only match IP addresses). +- `heuristic` will estimate if the patterns should match FQDNs by the presence or absence of `:`s or alpha-characters. ### Where is the registry database stored? @@ -168,14 +159,13 @@ The settings are: There can be up to 2 files: -- `registry-log.db`, the transaction log - - all incoming requests that affect the registry are saved in this file in real-time. +- `registry-log.db`, the transaction log -- `registry.db`, the database + all incoming requests that affect the registry are saved in this file in real-time. - every `[registry].registry save db every new entries` entries in `registry-log.db`, Netdata will save its database - to `registry.db` and empty `registry-log.db`. +- `registry.db`, the database + + every `[registry].registry save db every new entries` entries in `registry-log.db`, Netdata will save its database to `registry.db` and empty `registry-log.db`. Both files are machine readable text files. @@ -183,9 +173,9 @@ Both files are machine readable text files. Beginning with `v1.30.0`, when the Netdata Agent's web server processes a request, it delivers the `SameSite=none` and `Secure` cookies. If you have problems accessing the local Agent dashboard or Netdata Cloud, disable these -cookies by [editing `netdata.conf`](/docs/netdata-agent/configuration/README.md#edit-netdataconf): +cookies by [editing `netdata.conf`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config): -```conf +```text [registry] enable cookies SameSite and Secure = no ``` @@ -208,10 +198,8 @@ redirecting the browser back to itself hoping that it will receive the cookie. I registry will keep redirecting your web browser back to itself, which after a few redirects will fail with an error like this: -```conf +```text ERROR 409: Cannot ACCESS netdata registry: https://registry.my-netdata.io responded with: {"status":"redirect","registry":"https://registry.my-netdata.io"} ``` This error is printed on your web browser console (press F12 on your browser to see it). - - diff --git a/src/registry/registry.c b/src/registry/registry.c index 803115231..be8d6948f 100644 --- a/src/registry/registry.c +++ b/src/registry/registry.c @@ -154,8 +154,8 @@ static inline int registry_person_url_callback_verify_machine_exists(REGISTRY_PE // that could make this safe, so try to be as atomic as possible. void registry_update_cloud_base_url() { - registry.cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", DEFAULT_CLOUD_BASE_URL); - setenv("NETDATA_REGISTRY_CLOUD_BASE_URL", registry.cloud_base_url, 1); + registry.cloud_base_url = cloud_config_url_get(); + nd_setenv("NETDATA_REGISTRY_CLOUD_BASE_URL", registry.cloud_base_url, 1); } // ---------------------------------------------------------------------------- @@ -164,21 +164,19 @@ void registry_update_cloud_base_url() { int registry_request_hello_json(RRDHOST *host, struct web_client *w, bool do_not_track) { registry_json_header(host, w, "hello", REGISTRY_STATUS_OK); - if(host->node_id) - buffer_json_member_add_uuid(w->response.data, "node_id", host->node_id); + if(!UUIDiszero(host->node_id)) + buffer_json_member_add_uuid(w->response.data, "node_id", host->node_id.uuid); buffer_json_member_add_object(w->response.data, "agent"); { buffer_json_member_add_string(w->response.data, "machine_guid", localhost->machine_guid); - if(localhost->node_id) - buffer_json_member_add_uuid(w->response.data, "node_id", localhost->node_id); + if(!UUIDiszero(localhost->node_id)) + buffer_json_member_add_uuid(w->response.data, "node_id", localhost->node_id.uuid); - char *claim_id = get_agent_claimid(); - if (claim_id) { - buffer_json_member_add_string(w->response.data, "claim_id", claim_id); - freez(claim_id); - } + CLAIM_ID claim_id = claim_id_get(); + if (claim_id_is_set(claim_id)) + buffer_json_member_add_string(w->response.data, "claim_id", claim_id.str); buffer_json_member_add_boolean(w->response.data, "bearer_protection", netdata_is_protected_by_bearer); } @@ -198,8 +196,8 @@ int registry_request_hello_json(RRDHOST *host, struct web_client *w, bool do_not buffer_json_add_array_item_object(w->response.data); buffer_json_member_add_string(w->response.data, "machine_guid", h->machine_guid); - if(h->node_id) - buffer_json_member_add_uuid(w->response.data, "node_id", h->node_id); + if(!UUIDiszero(h->node_id)) + buffer_json_member_add_uuid(w->response.data, "node_id", h->node_id.uuid); buffer_json_member_add_string(w->response.data, "hostname", rrdhost_registry_hostname(h)); buffer_json_object_close(w->response.data); @@ -519,16 +517,16 @@ void registry_statistics(void) { rrddim_add(stm, "machines_urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); } - struct aral_statistics *p_aral_stats = aral_statistics(registry.persons_aral); + struct aral_statistics *p_aral_stats = aral_get_statistics(registry.persons_aral); rrddim_set(stm, "persons", (collected_number)p_aral_stats->structures.allocated_bytes + (collected_number)p_aral_stats->malloc.allocated_bytes + (collected_number)p_aral_stats->mmap.allocated_bytes); - struct aral_statistics *m_aral_stats = aral_statistics(registry.machines_aral); + struct aral_statistics *m_aral_stats = aral_get_statistics(registry.machines_aral); rrddim_set(stm, "machines", (collected_number)m_aral_stats->structures.allocated_bytes + (collected_number)m_aral_stats->malloc.allocated_bytes + (collected_number)m_aral_stats->mmap.allocated_bytes); - struct aral_statistics *pu_aral_stats = aral_statistics(registry.person_urls_aral); + struct aral_statistics *pu_aral_stats = aral_get_statistics(registry.person_urls_aral); rrddim_set(stm, "persons_urls", (collected_number)pu_aral_stats->structures.allocated_bytes + (collected_number)pu_aral_stats->malloc.allocated_bytes + (collected_number)pu_aral_stats->mmap.allocated_bytes); - struct aral_statistics *mu_aral_stats = aral_statistics(registry.machine_urls_aral); + struct aral_statistics *mu_aral_stats = aral_get_statistics(registry.machine_urls_aral); rrddim_set(stm, "machines_urls", (collected_number)mu_aral_stats->structures.allocated_bytes + (collected_number)mu_aral_stats->malloc.allocated_bytes + (collected_number)mu_aral_stats->mmap.allocated_bytes); rrdset_done(stm); diff --git a/src/registry/registry.h b/src/registry/registry.h index 848eb0ac0..b2eb7c00d 100644 --- a/src/registry/registry.h +++ b/src/registry/registry.h @@ -76,7 +76,7 @@ void registry_statistics(void); const char *registry_get_this_machine_guid(void); char *registry_get_mgmt_api_key(void); -char *registry_get_this_machine_hostname(void); +const char *registry_get_this_machine_hostname(void); int regenerate_guid(const char *guid, char *result); diff --git a/src/registry/registry_init.c b/src/registry/registry_init.c index c291c6f82..b98c04bea 100644 --- a/src/registry/registry_init.c +++ b/src/registry/registry_init.c @@ -93,7 +93,7 @@ int registry_init(void) { // configuration options registry.save_registry_every_entries = (unsigned long long)config_get_number(CONFIG_SECTION_REGISTRY, "registry save db every new entries", 1000000); - registry.persons_expiration = config_get_number(CONFIG_SECTION_REGISTRY, "registry expire idle persons days", 365) * 86400; + registry.persons_expiration = config_get_duration_days(CONFIG_SECTION_REGISTRY, "registry expire idle persons", 365) * 86400; registry.registry_domain = config_get(CONFIG_SECTION_REGISTRY, "registry domain", ""); registry.registry_to_announce = config_get(CONFIG_SECTION_REGISTRY, "registry to announce", "https://registry.my-netdata.io"); registry.hostname = config_get(CONFIG_SECTION_REGISTRY, "registry hostname", netdata_configured_hostname); @@ -101,8 +101,8 @@ int registry_init(void) { registry.enable_cookies_samesite_secure = config_get_boolean(CONFIG_SECTION_REGISTRY, "enable cookies SameSite and Secure", 1); registry_update_cloud_base_url(); - setenv("NETDATA_REGISTRY_HOSTNAME", registry.hostname, 1); - setenv("NETDATA_REGISTRY_URL", registry.registry_to_announce, 1); + nd_setenv("NETDATA_REGISTRY_HOSTNAME", registry.hostname, 1); + nd_setenv("NETDATA_REGISTRY_URL", registry.registry_to_announce, 1); registry.max_url_length = (size_t)config_get_number(CONFIG_SECTION_REGISTRY, "max URL length", 1024); if(registry.max_url_length < 10) { diff --git a/src/registry/registry_internals.c b/src/registry/registry_internals.c index 54fad4254..51a861866 100644 --- a/src/registry/registry_internals.c +++ b/src/registry/registry_internals.c @@ -266,7 +266,7 @@ static inline int is_machine_guid_blacklisted(const char *guid) { return 0; } -char *registry_get_this_machine_hostname(void) { +const char *registry_get_this_machine_hostname(void) { return registry.hostname; } @@ -315,7 +315,7 @@ const char *registry_get_this_machine_guid(void) { close(fd); } - setenv("NETDATA_REGISTRY_UNIQUE_ID", guid, 1); + nd_setenv("NETDATA_REGISTRY_UNIQUE_ID", guid, 1); return guid; } diff --git a/src/registry/registry_internals.h b/src/registry/registry_internals.h index c7f8f43dd..39d37e4f0 100644 --- a/src/registry/registry_internals.h +++ b/src/registry/registry_internals.h @@ -30,10 +30,10 @@ struct registry { // configuration unsigned long long save_registry_every_entries; - char *registry_domain; - char *hostname; - char *registry_to_announce; - char *cloud_base_url; + const char *registry_domain; + const char *hostname; + const char *registry_to_announce; + const char *cloud_base_url; time_t persons_expiration; // seconds to expire idle persons int verify_cookies_redirects; int enable_cookies_samesite_secure; @@ -42,10 +42,10 @@ struct registry { size_t max_name_length; // file/path names - char *pathname; - char *db_filename; - char *log_filename; - char *machine_guid_filename; + const char *pathname; + const char *db_filename; + const char *log_filename; + const char *machine_guid_filename; // open files FILE *log_fp; diff --git a/src/streaming/README.md b/src/streaming/README.md index fe4e01bae..74b5691d0 100644 --- a/src/streaming/README.md +++ b/src/streaming/README.md @@ -30,6 +30,8 @@ node**. This file is automatically generated by Netdata the first time it is sta #### `[stream]` section +This section is used by the sending Netdata. + | Setting | Default | Description | |-------------------------------------------------|---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `enabled` | `no` | Whether this node streams metrics to any parent. Change to `yes` to enable streaming. | @@ -38,34 +40,62 @@ node**. This file is automatically generated by Netdata the first time it is sta | `CApath` | `/etc/ssl/certs/` | The directory where known certificates are found. Defaults to OpenSSL's default path. | | `CAfile` | `/etc/ssl/certs/cert.pem` | Add a parent node certificate to the list of known certificates in `CAPath`. | | `api key` | | The `API_KEY` to use as the child node. | -| `timeout seconds` | `60` | The timeout to connect and send metrics to a parent. | +| `timeout` | `1m` | The timeout to connect and send metrics to a parent. | | `default port` | `19999` | The port to use if `destination` does not specify one. | | [`send charts matching`](#send-charts-matching) | `*` | A space-separated list of [Netdata simple patterns](/src/libnetdata/simple_pattern/README.md) to filter which charts are streamed. [Read more →](#send-charts-matching) | | `buffer size bytes` | `10485760` | The size of the buffer to use when sending metrics. The default `10485760` equals a buffer of 10MB, which is good for 60 seconds of data. Increase this if you expect latencies higher than that. The buffer is flushed on reconnect. | -| `reconnect delay seconds` | `5` | How long to wait until retrying to connect to the parent node. | +| `reconnect delay` | `5s` | How long to wait until retrying to connect to the parent node. | | `initial clock resync iterations` | `60` | Sync the clock of charts for how many seconds when starting. | | `parent using h2o` | `no` | Set to yes if you are connecting to parent trough it's h2o webserver/port. Currently there is no reason to set this to `yes` unless you are testing the new h2o based netdata webserver. When production ready this will be set to `yes` as default. | -### `[API_KEY]` and `[MACHINE_GUID]` sections - -| Setting | Default | Description | -|-----------------------------------------------|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `enabled` | `no` | Whether this API KEY enabled or disabled. | -| [`allow from`](#allow-from) | `*` | A space-separated list of [Netdata simple patterns](/src/libnetdata/simple_pattern/README.md) matching the IPs of nodes that will stream metrics using this API key. [Read more →](#allow-from) | -| `default history` | `3600` | The default amount of child metrics history to retain when using the `ram` memory mode. | -| [`default memory mode`](#default-memory-mode) | `ram` | The [database](/src/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `ram`, or `none`. [Read more →](#default-memory-mode) | -| `health enabled by default` | `auto` | Whether alerts and notifications should be enabled for nodes using this `API_KEY`. `auto` enables alerts when the child is connected. `yes` enables alerts always, and `no` disables alerts. | -| `default postpone alarms on connect seconds` | `60` | Postpone alerts and notifications for a period of time after the child connects. | -| `default health log history` | `432000` | History of health log events (in seconds) kept in the database. | -| `default proxy enabled` | | Route metrics through a proxy. | -| `default proxy destination` | | Space-separated list of `IP:PORT` for proxies. | -| `default proxy api key` | | The `API_KEY` of the proxy. | -| `default send charts matching` | `*` | See [`send charts matching`](#send-charts-matching). | -| `enable compression` | `yes` | Enable/disable stream compression. | -| `enable replication` | `yes` | Enable/disable replication. | -| `seconds to replicate` | `86400` | How many seconds of data to replicate from each child at a time | -| `seconds per replication step` | `600` | The duration we want to replicate per each replication step. | -| `is ephemeral node` | `no` | Indicate whether this child is an ephemeral node. An ephemeral node will become unavailable after the specified duration of "cleanup ephemeral hosts after secs" from the time of the node's last connection. | +### `[API_KEY]` sections + +This section defines an API key for other agents to connect to this Netdata. + +| Setting | Default | Description | +|------------------------------|------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `enabled` | `no` | Whether this API KEY enabled or disabled. | +| `type` | `api` | This section defines an API key. | +| [`allow from`](#allow-from) | `*` | A space-separated list of [Netdata simple patterns](/src/libnetdata/simple_pattern/README.md) matching the IPs of nodes that will stream metrics using this API key. [Read more →](#allow-from) | +| `retention` | `1h` | The default amount of child metrics history to retain when using the `ram` db. | +| [`db`](#default-memory-mode) | `dbengine` | The [database](/src/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `ram`, or `none`. [Read more →](#default-memory-mode) | +| `health enabled by default` | `auto` | Whether alerts and notifications should be enabled for nodes using this `API_KEY`. `auto` enables alerts when the child is connected. `yes` enables alerts always, and `no` disables alerts. | +| `postpone alerts on connect` | `1m` | Postpone alerts and notifications for a period of time after the child connects. | +| `health log retention` | `5d` | History of health log events (in seconds) kept in the database. | +| `proxy enabled` | | Route metrics through a proxy. | +| `proxy destination` | | Space-separated list of `IP:PORT` for proxies. | +| `proxy api key` | | The `API_KEY` of the proxy. | +| `send charts matching` | `*` | See [`send charts matching`](#send-charts-matching). | +| `enable compression` | `yes` | Enable/disable stream compression. | +| `enable replication` | `yes` | Enable/disable replication. | +| `replication period` | `1d` | Limits the maximum window that will be replicated from each child. | +| `replication step` | `10m` | The duration we want to replicate per each replication step. | +| `is ephemeral node` | `no` | Indicate whether this child is an ephemeral node. An ephemeral node will become unavailable after the specified duration of "cleanup ephemeral hosts after" from the time of the node's last connection. | + + +### `[MACHINE_GUID]` sections + +This section is about customizing configuration for specific agents. It allows many agents to share the same API key, while providing customizability per remote agent. + +| Setting | Default | Description | +|------------------------------|------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `enabled` | `no` | Whether this MACHINE_GUID enabled or disabled. | +| `type` | `machine` | This section defines the configuration for a specific agent. | +| [`allow from`](#allow-from) | `*` | A space-separated list of [Netdata simple patterns](/src/libnetdata/simple_pattern/README.md) matching the IPs of nodes that will stream metrics using this API key. [Read more →](#allow-from) | +| `retention` | `3600` | The default amount of child metrics history to retain when using the `ram` db. | +| [`db`](#default-memory-mode) | `dbengine` | The [database](/src/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `ram`, or `none`. [Read more →](#default-memory-mode) | +| `health enabled` | `auto` | Whether alerts and notifications should be enabled for nodes using this `API_KEY`. `auto` enables alerts when the child is connected. `yes` enables alerts always, and `no` disables alerts. | +| `postpone alerts on connect` | `1m` | Postpone alerts and notifications for a period of time after the child connects. | +| `health log retention` | `5d` | History of health log events (in seconds) kept in the database. | +| `proxy enabled` | | Route metrics through a proxy. | +| `proxy destination` | | Space-separated list of `IP:PORT` for proxies. | +| `proxy api key` | | The `API_KEY` of the proxy. | +| `send charts matching` | `*` | See [`send charts matching`](#send-charts-matching). | +| `enable compression` | `yes` | Enable/disable stream compression. | +| `enable replication` | `yes` | Enable/disable replication. | +| `replication period` | `1d` | Limits the maximum window that will be replicated from each child. | +| `replication step` | `10m` | The duration we want to replicate per each replication step. | +| `is ephemeral node` | `no` | Indicate whether this child is an ephemeral node. An ephemeral node will become unavailable after the specified duration of "cleanup ephemeral hosts after" from the time of the node's last connection. | #### `destination` @@ -81,7 +111,7 @@ the following format: `[PROTOCOL:]HOST[%INTERFACE][:PORT][:SSL]`. To enable TCP streaming to a parent node at `203.0.113.0` on port `20000` and with TLS/SSL encryption: -```conf +```text [stream] destination = tcp:203.0.113.0:20000:SSL ``` @@ -95,14 +125,14 @@ The default is a single wildcard `*`, which streams all charts. To send only a few charts, list them explicitly, or list a group using a wildcard. To send _only_ the `apps.cpu` chart and charts with contexts beginning with `system.`: -```conf +```text [stream] send charts matching = apps.cpu system.* ``` To send all but a few charts, use `!` to create a negative match. To send _all_ charts _but_ `apps.cpu`: -```conf +```text [stream] send charts matching = !apps.cpu * ``` @@ -116,14 +146,14 @@ The default is `*`, which accepts all requests including the `API_KEY`. To allow from only a specific IP address: -```conf +```text [API_KEY] allow from = 203.0.113.10 ``` To allow all IPs starting with `10.*`, except `10.1.2.3`: -```conf +```text [API_KEY] allow from = !10.1.2.3 10.* ``` @@ -131,7 +161,7 @@ To allow all IPs starting with `10.*`, except `10.1.2.3`: > If you set specific IP addresses here, and also use the `allow connections` setting in the `[web]` section of > `netdata.conf`, be sure to add the IP address there so that it can access the API port. -#### `default memory mode` +#### `db` The [database](/src/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `ram`, , or `none`. @@ -142,19 +172,15 @@ Valid settings are `dbengine`, `ram`, , or `none`. streaming configurations that use ephemeral nodes. - `none`: No database. -When using `default memory mode = dbengine`, the parent node creates a separate instance of the TSDB to store metrics -from child nodes. The [size of _each_ instance is configurable](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md) with the `page -cache size` and `dbengine multihost disk space` settings in the `[global]` section in `netdata.conf`. - ### `netdata.conf` -| Setting | Default | Description | -|--------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `[global]` section | | | -| `memory mode` | `dbengine` | Determines the [database type](/src/database/README.md) to be used on that node. Other options settings include `none`, and `ram`. `none` disables the database at this host. This also disables alerts and notifications, as those can't run without a database. | -| `[web]` section | | | -| `mode` | `static-threaded` | Determines the [web server](/src/web/server/README.md) type. The other option is `none`, which disables the dashboard, API, and registry. | -| `accept a streaming request every seconds` | `0` | Set a limit on how often a parent node accepts streaming requests from child nodes. `0` equals no limit. If this is set, you may see `... too busy to accept new streaming request. Will be allowed in X secs` in Netdata's `error.log`. | +| Setting | Default | Description | +|------------------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `[db]` section | | | +| `mode` | `dbengine` | Determines the [database type](/src/database/README.md) to be used on that node. Other options settings include `none`, and `ram`. `none` disables the database at this host. This also disables alerts and notifications, as those can't run without a database. | +| `[web]` section | | | +| `mode` | `static-threaded` | Determines the [web server](/src/web/server/README.md) type. The other option is `none`, which disables the dashboard, API, and registry. | +| `accept a streaming request every` | `off` | Set a limit on how often a parent node accepts streaming requests from child nodes. `0` equals no limit. If this is set, you may see `... too busy to accept new streaming request. Will be allowed in X secs` in Netdata's `error.log`. | ### Basic use cases @@ -175,16 +201,16 @@ with the `[MACHINE_GUID]` section. For example, the metrics streamed from only the child node with `MACHINE_GUID` are saved in memory, not using the default `dbengine` as specified by the `API_KEY`, and alerts are disabled. -```conf +```text [API_KEY] enabled = yes - default memory mode = dbengine - health enabled by default = auto + db = dbengine + health enabled = auto allow from = * [MACHINE_GUID] enabled = yes - memory mode = ram + db = ram health enabled = no ``` @@ -405,7 +431,7 @@ In the following example, the proxy receives metrics from a child node using the `66666666-7777-8888-9999-000000000000`, then stores metrics using `dbengine`. It then uses the `API_KEY` of `11111111-2222-3333-4444-555555555555` to proxy those same metrics on to a parent node at `203.0.113.0`. -```conf +```text [stream] enabled = yes destination = 203.0.113.0 @@ -413,7 +439,7 @@ In the following example, the proxy receives metrics from a child node using the [66666666-7777-8888-9999-000000000000] enabled = yes - default memory mode = dbengine + db = dbengine ``` ### Ephemeral nodes @@ -423,13 +449,13 @@ metrics to any number of permanently-running parent nodes. On the parent, set the following in `stream.conf`: -```conf +```text [11111111-2222-3333-4444-555555555555] # enable/disable this API key enabled = yes # one hour of data for each of the child nodes - default history = 3600 + history = 1h # do not save child metrics on disk default memory = ram @@ -455,9 +481,9 @@ On the child nodes, set the following in `stream.conf`: In addition, edit `netdata.conf` on each child node to disable the database and alerts. ```bash -[global] +[db] # disable the local database - memory mode = none + db = none [health] # disable health checks @@ -471,16 +497,16 @@ This replication process ensures data continuity even if child nodes temporarily Replication is enabled by default in Netdata, but you can customize the replication behavior by modifying the `[API_KEY]` section of the `stream.conf` file. Here's an example configuration: -```conf +```text [11111111-2222-3333-4444-555555555555] # Enable replication for all hosts using this api key. Default: yes. enable replication = yes - # How many seconds of data to replicate from each child at a time. Default: a day (86400 seconds). - seconds to replicate = 86400 + # How many seconds of data to replicate from each child at a time. Default: a day. + replication period = 1d - # The duration we want to replicate per each replication step. Default: 600 seconds (10 minutes). - seconds per replication step = 600 + # The duration we want to replicate per each replication step. Default: 10 minutes. + replication step = 10m ``` You can monitor the replication process in two ways: @@ -597,9 +623,9 @@ ERROR : STREAM_SENDER[CHILD HOSTNAME] : STREAM child HOSTNAME [send to PARENT HO ### Stream charts wrong Chart data needs to be consistent between child and parent nodes. If there are differences between chart data on -a parent and a child, such as gaps in metrics collection, it most often means your child's `memory mode` +a parent and a child, such as gaps in metrics collection, it most often means your child's `[db].db` setting does not match the parent's. To learn more about the different ways Netdata can store metrics, and thus keep chart -data consistent, read our [memory mode documentation](/src/database/README.md). +data consistent, read our [db documentation](/src/database/README.md). ### Forbidding access diff --git a/src/streaming/common.h b/src/streaming/common.h deleted file mode 100644 index b7292f4d0..000000000 --- a/src/streaming/common.h +++ /dev/null @@ -1,9 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef STREAMING_COMMON_H -#define STREAMING_COMMON_H - -#define NETDATA_STREAM_URL "/stream" -#define NETDATA_STREAM_PROTO_NAME "netdata_stream/2.0" - -#endif /* STREAMING_COMMON_H */ diff --git a/src/streaming/compression.c b/src/streaming/compression.c deleted file mode 100644 index a94c8a0a6..000000000 --- a/src/streaming/compression.c +++ /dev/null @@ -1,707 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "compression.h" - -#include "compression_gzip.h" - -#ifdef ENABLE_LZ4 -#include "compression_lz4.h" -#endif - -#ifdef ENABLE_ZSTD -#include "compression_zstd.h" -#endif - -#ifdef ENABLE_BROTLI -#include "compression_brotli.h" -#endif - -int rrdpush_compression_levels[COMPRESSION_ALGORITHM_MAX] = { - [COMPRESSION_ALGORITHM_NONE] = 0, - [COMPRESSION_ALGORITHM_ZSTD] = 3, // 1 (faster) - 22 (smaller) - [COMPRESSION_ALGORITHM_LZ4] = 1, // 1 (smaller) - 9 (faster) - [COMPRESSION_ALGORITHM_BROTLI] = 3, // 0 (faster) - 11 (smaller) - [COMPRESSION_ALGORITHM_GZIP] = 1, // 1 (faster) - 9 (smaller) -}; - -void rrdpush_parse_compression_order(struct receiver_state *rpt, const char *order) { - // empty all slots - for(size_t i = 0; i < COMPRESSION_ALGORITHM_MAX ;i++) - rpt->config.compression_priorities[i] = STREAM_CAP_NONE; - - char *s = strdupz(order); - - char *words[COMPRESSION_ALGORITHM_MAX + 100] = { NULL }; - size_t num_words = quoted_strings_splitter_pluginsd(s, words, COMPRESSION_ALGORITHM_MAX + 100); - size_t slot = 0; - STREAM_CAPABILITIES added = STREAM_CAP_NONE; - for(size_t i = 0; i < num_words && slot < COMPRESSION_ALGORITHM_MAX ;i++) { - if((STREAM_CAP_ZSTD_AVAILABLE) && strcasecmp(words[i], "zstd") == 0 && !(added & STREAM_CAP_ZSTD)) { - rpt->config.compression_priorities[slot++] = STREAM_CAP_ZSTD; - added |= STREAM_CAP_ZSTD; - } - else if((STREAM_CAP_LZ4_AVAILABLE) && strcasecmp(words[i], "lz4") == 0 && !(added & STREAM_CAP_LZ4)) { - rpt->config.compression_priorities[slot++] = STREAM_CAP_LZ4; - added |= STREAM_CAP_LZ4; - } - else if((STREAM_CAP_BROTLI_AVAILABLE) && strcasecmp(words[i], "brotli") == 0 && !(added & STREAM_CAP_BROTLI)) { - rpt->config.compression_priorities[slot++] = STREAM_CAP_BROTLI; - added |= STREAM_CAP_BROTLI; - } - else if(strcasecmp(words[i], "gzip") == 0 && !(added & STREAM_CAP_GZIP)) { - rpt->config.compression_priorities[slot++] = STREAM_CAP_GZIP; - added |= STREAM_CAP_GZIP; - } - } - - freez(s); - - // make sure all participate - if((STREAM_CAP_ZSTD_AVAILABLE) && slot < COMPRESSION_ALGORITHM_MAX && !(added & STREAM_CAP_ZSTD)) - rpt->config.compression_priorities[slot++] = STREAM_CAP_ZSTD; - if((STREAM_CAP_LZ4_AVAILABLE) && slot < COMPRESSION_ALGORITHM_MAX && !(added & STREAM_CAP_LZ4)) - rpt->config.compression_priorities[slot++] = STREAM_CAP_LZ4; - if((STREAM_CAP_BROTLI_AVAILABLE) && slot < COMPRESSION_ALGORITHM_MAX && !(added & STREAM_CAP_BROTLI)) - rpt->config.compression_priorities[slot++] = STREAM_CAP_BROTLI; - if(slot < COMPRESSION_ALGORITHM_MAX && !(added & STREAM_CAP_GZIP)) - rpt->config.compression_priorities[slot++] = STREAM_CAP_GZIP; -} - -void rrdpush_select_receiver_compression_algorithm(struct receiver_state *rpt) { - if (!rpt->config.rrdpush_compression) - rpt->capabilities &= ~STREAM_CAP_COMPRESSIONS_AVAILABLE; - - // select the right compression before sending our capabilities to the child - if(stream_has_more_than_one_capability_of(rpt->capabilities, STREAM_CAP_COMPRESSIONS_AVAILABLE)) { - STREAM_CAPABILITIES compressions = rpt->capabilities & STREAM_CAP_COMPRESSIONS_AVAILABLE; - for(int i = 0; i < COMPRESSION_ALGORITHM_MAX; i++) { - STREAM_CAPABILITIES c = rpt->config.compression_priorities[i]; - - if(!(c & STREAM_CAP_COMPRESSIONS_AVAILABLE)) - continue; - - if(compressions & c) { - STREAM_CAPABILITIES exclude = compressions; - exclude &= ~c; - - rpt->capabilities &= ~exclude; - break; - } - } - } -} - -bool rrdpush_compression_initialize(struct sender_state *s) { - rrdpush_compressor_destroy(&s->compressor); - - // IMPORTANT - // KEEP THE SAME ORDER IN DECOMPRESSION - - if(stream_has_capability(s, STREAM_CAP_ZSTD)) - s->compressor.algorithm = COMPRESSION_ALGORITHM_ZSTD; - else if(stream_has_capability(s, STREAM_CAP_LZ4)) - s->compressor.algorithm = COMPRESSION_ALGORITHM_LZ4; - else if(stream_has_capability(s, STREAM_CAP_BROTLI)) - s->compressor.algorithm = COMPRESSION_ALGORITHM_BROTLI; - else if(stream_has_capability(s, STREAM_CAP_GZIP)) - s->compressor.algorithm = COMPRESSION_ALGORITHM_GZIP; - else - s->compressor.algorithm = COMPRESSION_ALGORITHM_NONE; - - if(s->compressor.algorithm != COMPRESSION_ALGORITHM_NONE) { - s->compressor.level = rrdpush_compression_levels[s->compressor.algorithm]; - rrdpush_compressor_init(&s->compressor); - return true; - } - - return false; -} - -bool rrdpush_decompression_initialize(struct receiver_state *rpt) { - rrdpush_decompressor_destroy(&rpt->decompressor); - - // IMPORTANT - // KEEP THE SAME ORDER IN COMPRESSION - - if(stream_has_capability(rpt, STREAM_CAP_ZSTD)) - rpt->decompressor.algorithm = COMPRESSION_ALGORITHM_ZSTD; - else if(stream_has_capability(rpt, STREAM_CAP_LZ4)) - rpt->decompressor.algorithm = COMPRESSION_ALGORITHM_LZ4; - else if(stream_has_capability(rpt, STREAM_CAP_BROTLI)) - rpt->decompressor.algorithm = COMPRESSION_ALGORITHM_BROTLI; - else if(stream_has_capability(rpt, STREAM_CAP_GZIP)) - rpt->decompressor.algorithm = COMPRESSION_ALGORITHM_GZIP; - else - rpt->decompressor.algorithm = COMPRESSION_ALGORITHM_NONE; - - if(rpt->decompressor.algorithm != COMPRESSION_ALGORITHM_NONE) { - rrdpush_decompressor_init(&rpt->decompressor); - return true; - } - - return false; -} - -/* -* In case of stream compression buffer overflow -* Inform the user through the error log file and -* deactivate compression by downgrading the stream protocol. -*/ -void rrdpush_compression_deactivate(struct sender_state *s) { - switch(s->compressor.algorithm) { - case COMPRESSION_ALGORITHM_MAX: - case COMPRESSION_ALGORITHM_NONE: - netdata_log_error("STREAM_COMPRESSION: compression error on 'host:%s' without any compression enabled. Ignoring error.", - rrdhost_hostname(s->host)); - break; - - case COMPRESSION_ALGORITHM_GZIP: - netdata_log_error("STREAM_COMPRESSION: GZIP compression error on 'host:%s'. Disabling GZIP for this node.", - rrdhost_hostname(s->host)); - s->disabled_capabilities |= STREAM_CAP_GZIP; - break; - - case COMPRESSION_ALGORITHM_LZ4: - netdata_log_error("STREAM_COMPRESSION: LZ4 compression error on 'host:%s'. Disabling ZSTD for this node.", - rrdhost_hostname(s->host)); - s->disabled_capabilities |= STREAM_CAP_LZ4; - break; - - case COMPRESSION_ALGORITHM_ZSTD: - netdata_log_error("STREAM_COMPRESSION: ZSTD compression error on 'host:%s'. Disabling ZSTD for this node.", - rrdhost_hostname(s->host)); - s->disabled_capabilities |= STREAM_CAP_ZSTD; - break; - - case COMPRESSION_ALGORITHM_BROTLI: - netdata_log_error("STREAM_COMPRESSION: BROTLI compression error on 'host:%s'. Disabling BROTLI for this node.", - rrdhost_hostname(s->host)); - s->disabled_capabilities |= STREAM_CAP_BROTLI; - break; - } -} - -// ---------------------------------------------------------------------------- -// compressor public API - -void rrdpush_compressor_init(struct compressor_state *state) { - switch(state->algorithm) { -#ifdef ENABLE_ZSTD - case COMPRESSION_ALGORITHM_ZSTD: - rrdpush_compressor_init_zstd(state); - break; -#endif - -#ifdef ENABLE_LZ4 - case COMPRESSION_ALGORITHM_LZ4: - rrdpush_compressor_init_lz4(state); - break; -#endif - -#ifdef ENABLE_BROTLI - case COMPRESSION_ALGORITHM_BROTLI: - rrdpush_compressor_init_brotli(state); - break; -#endif - - default: - case COMPRESSION_ALGORITHM_GZIP: - rrdpush_compressor_init_gzip(state); - break; - } - - simple_ring_buffer_reset(&state->input); - simple_ring_buffer_reset(&state->output); -} - -void rrdpush_compressor_destroy(struct compressor_state *state) { - switch(state->algorithm) { -#ifdef ENABLE_ZSTD - case COMPRESSION_ALGORITHM_ZSTD: - rrdpush_compressor_destroy_zstd(state); - break; -#endif - -#ifdef ENABLE_LZ4 - case COMPRESSION_ALGORITHM_LZ4: - rrdpush_compressor_destroy_lz4(state); - break; -#endif - -#ifdef ENABLE_BROTLI - case COMPRESSION_ALGORITHM_BROTLI: - rrdpush_compressor_destroy_brotli(state); - break; -#endif - - default: - case COMPRESSION_ALGORITHM_GZIP: - rrdpush_compressor_destroy_gzip(state); - break; - } - - state->initialized = false; - - simple_ring_buffer_destroy(&state->input); - simple_ring_buffer_destroy(&state->output); -} - -size_t rrdpush_compress(struct compressor_state *state, const char *data, size_t size, const char **out) { - size_t ret = 0; - - switch(state->algorithm) { -#ifdef ENABLE_ZSTD - case COMPRESSION_ALGORITHM_ZSTD: - ret = rrdpush_compress_zstd(state, data, size, out); - break; -#endif - -#ifdef ENABLE_LZ4 - case COMPRESSION_ALGORITHM_LZ4: - ret = rrdpush_compress_lz4(state, data, size, out); - break; -#endif - -#ifdef ENABLE_BROTLI - case COMPRESSION_ALGORITHM_BROTLI: - ret = rrdpush_compress_brotli(state, data, size, out); - break; -#endif - - default: - case COMPRESSION_ALGORITHM_GZIP: - ret = rrdpush_compress_gzip(state, data, size, out); - break; - } - - if(unlikely(ret >= COMPRESSION_MAX_CHUNK)) { - netdata_log_error("RRDPUSH_COMPRESS: compressed data is %zu bytes, which is >= than the max chunk size %d", - ret, COMPRESSION_MAX_CHUNK); - return 0; - } - - return ret; -} - -// ---------------------------------------------------------------------------- -// decompressor public API - -void rrdpush_decompressor_destroy(struct decompressor_state *state) { - if(unlikely(!state->initialized)) - return; - - switch(state->algorithm) { -#ifdef ENABLE_ZSTD - case COMPRESSION_ALGORITHM_ZSTD: - rrdpush_decompressor_destroy_zstd(state); - break; -#endif - -#ifdef ENABLE_LZ4 - case COMPRESSION_ALGORITHM_LZ4: - rrdpush_decompressor_destroy_lz4(state); - break; -#endif - -#ifdef ENABLE_BROTLI - case COMPRESSION_ALGORITHM_BROTLI: - rrdpush_decompressor_destroy_brotli(state); - break; -#endif - - default: - case COMPRESSION_ALGORITHM_GZIP: - rrdpush_decompressor_destroy_gzip(state); - break; - } - - simple_ring_buffer_destroy(&state->output); - - state->initialized = false; -} - -void rrdpush_decompressor_init(struct decompressor_state *state) { - switch(state->algorithm) { -#ifdef ENABLE_ZSTD - case COMPRESSION_ALGORITHM_ZSTD: - rrdpush_decompressor_init_zstd(state); - break; -#endif - -#ifdef ENABLE_LZ4 - case COMPRESSION_ALGORITHM_LZ4: - rrdpush_decompressor_init_lz4(state); - break; -#endif - -#ifdef ENABLE_BROTLI - case COMPRESSION_ALGORITHM_BROTLI: - rrdpush_decompressor_init_brotli(state); - break; -#endif - - default: - case COMPRESSION_ALGORITHM_GZIP: - rrdpush_decompressor_init_gzip(state); - break; - } - - state->signature_size = RRDPUSH_COMPRESSION_SIGNATURE_SIZE; - simple_ring_buffer_reset(&state->output); -} - -size_t rrdpush_decompress(struct decompressor_state *state, const char *compressed_data, size_t compressed_size) { - if (unlikely(state->output.read_pos != state->output.write_pos)) - fatal("RRDPUSH_DECOMPRESS: asked to decompress new data, while there are unread data in the decompression buffer!"); - - size_t ret = 0; - - switch(state->algorithm) { -#ifdef ENABLE_ZSTD - case COMPRESSION_ALGORITHM_ZSTD: - ret = rrdpush_decompress_zstd(state, compressed_data, compressed_size); - break; -#endif - -#ifdef ENABLE_LZ4 - case COMPRESSION_ALGORITHM_LZ4: - ret = rrdpush_decompress_lz4(state, compressed_data, compressed_size); - break; -#endif - -#ifdef ENABLE_BROTLI - case COMPRESSION_ALGORITHM_BROTLI: - ret = rrdpush_decompress_brotli(state, compressed_data, compressed_size); - break; -#endif - - default: - case COMPRESSION_ALGORITHM_GZIP: - ret = rrdpush_decompress_gzip(state, compressed_data, compressed_size); - break; - } - - // for backwards compatibility we cannot check for COMPRESSION_MAX_MSG_SIZE, - // because old children may send this big payloads. - if(unlikely(ret > COMPRESSION_MAX_CHUNK)) { - netdata_log_error("RRDPUSH_DECOMPRESS: decompressed data is %zu bytes, which is bigger than the max msg size %d", - ret, COMPRESSION_MAX_CHUNK); - return 0; - } - - return ret; -} - -// ---------------------------------------------------------------------------- -// unit test - -static inline long int my_random (void) { - return random(); -} - -void unittest_generate_random_name(char *dst, size_t size) { - if(size < 7) - size = 7; - - size_t len = 5 + my_random() % (size - 6); - - for(size_t i = 0; i < len ; i++) { - if(my_random() % 2 == 0) - dst[i] = 'A' + my_random() % 26; - else - dst[i] = 'a' + my_random() % 26; - } - - dst[len] = '\0'; -} - -void unittest_generate_message(BUFFER *wb, time_t now_s, size_t counter) { - bool with_slots = true; - NUMBER_ENCODING integer_encoding = NUMBER_ENCODING_BASE64; - NUMBER_ENCODING doubles_encoding = NUMBER_ENCODING_BASE64; - time_t update_every = 1; - time_t point_end_time_s = now_s; - time_t wall_clock_time_s = now_s; - size_t chart_slot = counter + 1; - size_t dimensions = 2 + my_random() % 5; - char chart[RRD_ID_LENGTH_MAX + 1] = "name"; - unittest_generate_random_name(chart, 5 + my_random() % 30); - - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_BEGIN_V2, sizeof(PLUGINSD_KEYWORD_BEGIN_V2) - 1); - - if(with_slots) { - buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); - buffer_print_uint64_encoded(wb, integer_encoding, chart_slot); - } - - buffer_fast_strcat(wb, " '", 2); - buffer_strcat(wb, chart); - buffer_fast_strcat(wb, "' ", 2); - buffer_print_uint64_encoded(wb, integer_encoding, update_every); - buffer_fast_strcat(wb, " ", 1); - buffer_print_uint64_encoded(wb, integer_encoding, point_end_time_s); - buffer_fast_strcat(wb, " ", 1); - if(point_end_time_s == wall_clock_time_s) - buffer_fast_strcat(wb, "#", 1); - else - buffer_print_uint64_encoded(wb, integer_encoding, wall_clock_time_s); - buffer_fast_strcat(wb, "\n", 1); - - - for(size_t d = 0; d < dimensions ;d++) { - size_t dim_slot = d + 1; - char dim_id[RRD_ID_LENGTH_MAX + 1] = "dimension"; - unittest_generate_random_name(dim_id, 10 + my_random() % 20); - int64_t last_collected_value = (my_random() % 2 == 0) ? (int64_t)(counter + d) : (int64_t)my_random(); - NETDATA_DOUBLE value = (my_random() % 2 == 0) ? (NETDATA_DOUBLE)my_random() / ((NETDATA_DOUBLE)my_random() + 1) : (NETDATA_DOUBLE)last_collected_value; - SN_FLAGS flags = (my_random() % 1000 == 0) ? SN_FLAG_NONE : SN_FLAG_NOT_ANOMALOUS; - - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_SET_V2, sizeof(PLUGINSD_KEYWORD_SET_V2) - 1); - - if(with_slots) { - buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); - buffer_print_uint64_encoded(wb, integer_encoding, dim_slot); - } - - buffer_fast_strcat(wb, " '", 2); - buffer_strcat(wb, dim_id); - buffer_fast_strcat(wb, "' ", 2); - buffer_print_int64_encoded(wb, integer_encoding, last_collected_value); - buffer_fast_strcat(wb, " ", 1); - - if((NETDATA_DOUBLE)last_collected_value == value) - buffer_fast_strcat(wb, "#", 1); - else - buffer_print_netdata_double_encoded(wb, doubles_encoding, value); - - buffer_fast_strcat(wb, " ", 1); - buffer_print_sn_flags(wb, flags, true); - buffer_fast_strcat(wb, "\n", 1); - } - - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1); -} - -int unittest_rrdpush_compression_speed(compression_algorithm_t algorithm, const char *name) { - fprintf(stderr, "\nTesting streaming compression speed with %s\n", name); - - struct compressor_state cctx = { - .initialized = false, - .algorithm = algorithm, - }; - struct decompressor_state dctx = { - .initialized = false, - .algorithm = algorithm, - }; - - rrdpush_compressor_init(&cctx); - rrdpush_decompressor_init(&dctx); - - int errors = 0; - - BUFFER *wb = buffer_create(COMPRESSION_MAX_MSG_SIZE, NULL); - time_t now_s = now_realtime_sec(); - usec_t compression_ut = 0; - usec_t decompression_ut = 0; - size_t bytes_compressed = 0; - size_t bytes_uncompressed = 0; - - usec_t compression_started_ut = now_monotonic_usec(); - usec_t decompression_started_ut = compression_started_ut; - - for(int i = 0; i < 10000 ;i++) { - compression_started_ut = now_monotonic_usec(); - decompression_ut += compression_started_ut - decompression_started_ut; - - buffer_flush(wb); - while(buffer_strlen(wb) < COMPRESSION_MAX_MSG_SIZE - 1024) - unittest_generate_message(wb, now_s, i); - - const char *txt = buffer_tostring(wb); - size_t txt_len = buffer_strlen(wb); - bytes_uncompressed += txt_len; - - const char *out; - size_t size = rrdpush_compress(&cctx, txt, txt_len, &out); - - bytes_compressed += size; - decompression_started_ut = now_monotonic_usec(); - compression_ut += decompression_started_ut - compression_started_ut; - - if(size == 0) { - fprintf(stderr, "iteration %d: compressed size %zu is zero\n", - i, size); - errors++; - goto cleanup; - } - else if(size >= COMPRESSION_MAX_CHUNK) { - fprintf(stderr, "iteration %d: compressed size %zu exceeds max allowed size\n", - i, size); - errors++; - goto cleanup; - } - else { - size_t dtxt_len = rrdpush_decompress(&dctx, out, size); - char *dtxt = (char *) &dctx.output.data[dctx.output.read_pos]; - - if(rrdpush_decompressed_bytes_in_buffer(&dctx) != dtxt_len) { - fprintf(stderr, "iteration %d: decompressed size %zu does not rrdpush_decompressed_bytes_in_buffer() %zu\n", - i, dtxt_len, rrdpush_decompressed_bytes_in_buffer(&dctx) - ); - errors++; - goto cleanup; - } - - if(!dtxt_len) { - fprintf(stderr, "iteration %d: decompressed size is zero\n", i); - errors++; - goto cleanup; - } - else if(dtxt_len != txt_len) { - fprintf(stderr, "iteration %d: decompressed size %zu does not match original size %zu\n", - i, dtxt_len, txt_len - ); - errors++; - goto cleanup; - } - else { - if(memcmp(txt, dtxt, txt_len) != 0) { - fprintf(stderr, "iteration %d: decompressed data '%s' do not match original data length %zu\n", - i, dtxt, txt_len); - errors++; - goto cleanup; - } - } - } - - // here we are supposed to copy the data and advance the position - dctx.output.read_pos += rrdpush_decompressed_bytes_in_buffer(&dctx); - } - -cleanup: - rrdpush_compressor_destroy(&cctx); - rrdpush_decompressor_destroy(&dctx); - - if(errors) - fprintf(stderr, "Compression with %s: FAILED (%d errors)\n", name, errors); - else - fprintf(stderr, "Compression with %s: OK " - "(compression %zu usec, decompression %zu usec, bytes raw %zu, compressed %zu, savings ratio %0.2f%%)\n", - name, compression_ut, decompression_ut, - bytes_uncompressed, bytes_compressed, - 100.0 - (double)bytes_compressed * 100.0 / (double)bytes_uncompressed); - - return errors; -} - -int unittest_rrdpush_compression(compression_algorithm_t algorithm, const char *name) { - fprintf(stderr, "\nTesting streaming compression with %s\n", name); - - struct compressor_state cctx = { - .initialized = false, - .algorithm = algorithm, - }; - struct decompressor_state dctx = { - .initialized = false, - .algorithm = algorithm, - }; - - char txt[COMPRESSION_MAX_MSG_SIZE]; - - rrdpush_compressor_init(&cctx); - rrdpush_decompressor_init(&dctx); - - int errors = 0; - - memset(txt, '=', COMPRESSION_MAX_MSG_SIZE); - - for(int i = 0; i < COMPRESSION_MAX_MSG_SIZE ;i++) { - txt[i] = 'A' + (i % 26); - size_t txt_len = i + 1; - - const char *out; - size_t size = rrdpush_compress(&cctx, txt, txt_len, &out); - - if(size == 0) { - fprintf(stderr, "iteration %d: compressed size %zu is zero\n", - i, size); - errors++; - goto cleanup; - } - else if(size >= COMPRESSION_MAX_CHUNK) { - fprintf(stderr, "iteration %d: compressed size %zu exceeds max allowed size\n", - i, size); - errors++; - goto cleanup; - } - else { - size_t dtxt_len = rrdpush_decompress(&dctx, out, size); - char *dtxt = (char *) &dctx.output.data[dctx.output.read_pos]; - - if(rrdpush_decompressed_bytes_in_buffer(&dctx) != dtxt_len) { - fprintf(stderr, "iteration %d: decompressed size %zu does not rrdpush_decompressed_bytes_in_buffer() %zu\n", - i, dtxt_len, rrdpush_decompressed_bytes_in_buffer(&dctx) - ); - errors++; - goto cleanup; - } - - if(!dtxt_len) { - fprintf(stderr, "iteration %d: decompressed size is zero\n", i); - errors++; - goto cleanup; - } - else if(dtxt_len != txt_len) { - fprintf(stderr, "iteration %d: decompressed size %zu does not match original size %zu\n", - i, dtxt_len, txt_len - ); - errors++; - goto cleanup; - } - else { - if(memcmp(txt, dtxt, txt_len) != 0) { - txt[txt_len] = '\0'; - dtxt[txt_len + 5] = '\0'; - - fprintf(stderr, "iteration %d: decompressed data '%s' do not match original data '%s' of length %zu\n", - i, dtxt, txt, txt_len); - errors++; - goto cleanup; - } - } - } - - // fill the compressed buffer with garbage - memset((void *)out, 'x', size); - - // here we are supposed to copy the data and advance the position - dctx.output.read_pos += rrdpush_decompressed_bytes_in_buffer(&dctx); - } - -cleanup: - rrdpush_compressor_destroy(&cctx); - rrdpush_decompressor_destroy(&dctx); - - if(errors) - fprintf(stderr, "Compression with %s: FAILED (%d errors)\n", name, errors); - else - fprintf(stderr, "Compression with %s: OK\n", name); - - return errors; -} - -int unittest_rrdpush_compressions(void) { - int ret = 0; - - ret += unittest_rrdpush_compression(COMPRESSION_ALGORITHM_ZSTD, "ZSTD"); - ret += unittest_rrdpush_compression(COMPRESSION_ALGORITHM_LZ4, "LZ4"); - ret += unittest_rrdpush_compression(COMPRESSION_ALGORITHM_BROTLI, "BROTLI"); - ret += unittest_rrdpush_compression(COMPRESSION_ALGORITHM_GZIP, "GZIP"); - - ret += unittest_rrdpush_compression_speed(COMPRESSION_ALGORITHM_ZSTD, "ZSTD"); - ret += unittest_rrdpush_compression_speed(COMPRESSION_ALGORITHM_LZ4, "LZ4"); - ret += unittest_rrdpush_compression_speed(COMPRESSION_ALGORITHM_BROTLI, "BROTLI"); - ret += unittest_rrdpush_compression_speed(COMPRESSION_ALGORITHM_GZIP, "GZIP"); - - return ret; -} diff --git a/src/streaming/compression.h b/src/streaming/compression.h deleted file mode 100644 index 285fb2cf6..000000000 --- a/src/streaming/compression.h +++ /dev/null @@ -1,175 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "rrdpush.h" - -#ifndef NETDATA_RRDPUSH_COMPRESSION_H -#define NETDATA_RRDPUSH_COMPRESSION_H 1 - -// signature MUST end with a newline - -#if COMPRESSION_MAX_MSG_SIZE >= (COMPRESSION_MAX_CHUNK - COMPRESSION_MAX_OVERHEAD) -#error "COMPRESSION_MAX_MSG_SIZE >= (COMPRESSION_MAX_CHUNK - COMPRESSION_MAX_OVERHEAD)" -#endif - -typedef uint32_t rrdpush_signature_t; -#define RRDPUSH_COMPRESSION_SIGNATURE ((rrdpush_signature_t)('z' | 0x80) | (0x80 << 8) | (0x80 << 16) | ('\n' << 24)) -#define RRDPUSH_COMPRESSION_SIGNATURE_MASK ((rrdpush_signature_t) 0xffU | (0x80U << 8) | (0x80U << 16) | (0xffU << 24)) -#define RRDPUSH_COMPRESSION_SIGNATURE_SIZE sizeof(rrdpush_signature_t) - -static inline rrdpush_signature_t rrdpush_compress_encode_signature(size_t compressed_data_size) { - rrdpush_signature_t len = ((compressed_data_size & 0x7f) | 0x80 | (((compressed_data_size & (0x7f << 7)) << 1) | 0x8000)) << 8; - return len | RRDPUSH_COMPRESSION_SIGNATURE; -} - -typedef enum { - COMPRESSION_ALGORITHM_NONE = 0, - COMPRESSION_ALGORITHM_ZSTD, - COMPRESSION_ALGORITHM_LZ4, - COMPRESSION_ALGORITHM_GZIP, - COMPRESSION_ALGORITHM_BROTLI, - - // terminator - COMPRESSION_ALGORITHM_MAX, -} compression_algorithm_t; - -extern int rrdpush_compression_levels[COMPRESSION_ALGORITHM_MAX]; - -// this defines the order the algorithms will be selected by the receiver (parent) -#define RRDPUSH_COMPRESSION_ALGORITHMS_ORDER "zstd lz4 brotli gzip" - -// ---------------------------------------------------------------------------- - -typedef struct simple_ring_buffer { - const char *data; - size_t size; - size_t read_pos; - size_t write_pos; -} SIMPLE_RING_BUFFER; - -static inline void simple_ring_buffer_reset(SIMPLE_RING_BUFFER *b) { - b->read_pos = b->write_pos = 0; -} - -static inline void simple_ring_buffer_make_room(SIMPLE_RING_BUFFER *b, size_t size) { - if(b->write_pos + size > b->size) { - if(!b->size) - b->size = COMPRESSION_MAX_CHUNK; - else - b->size *= 2; - - if(b->write_pos + size > b->size) - b->size += size; - - b->data = (const char *)reallocz((void *)b->data, b->size); - } -} - -static inline void simple_ring_buffer_append_data(SIMPLE_RING_BUFFER *b, const void *data, size_t size) { - simple_ring_buffer_make_room(b, size); - memcpy((void *)(b->data + b->write_pos), data, size); - b->write_pos += size; -} - -static inline void simple_ring_buffer_destroy(SIMPLE_RING_BUFFER *b) { - freez((void *)b->data); - b->data = NULL; - b->read_pos = b->write_pos = b->size = 0; -} - -// ---------------------------------------------------------------------------- - -struct compressor_state { - bool initialized; - compression_algorithm_t algorithm; - - SIMPLE_RING_BUFFER input; - SIMPLE_RING_BUFFER output; - - int level; - void *stream; - - struct { - size_t total_compressed; - size_t total_uncompressed; - size_t total_compressions; - } sender_locked; -}; - -void rrdpush_compressor_init(struct compressor_state *state); -void rrdpush_compressor_destroy(struct compressor_state *state); -size_t rrdpush_compress(struct compressor_state *state, const char *data, size_t size, const char **out); - -// ---------------------------------------------------------------------------- - -struct decompressor_state { - bool initialized; - compression_algorithm_t algorithm; - size_t signature_size; - - size_t total_compressed; - size_t total_uncompressed; - size_t total_compressions; - - SIMPLE_RING_BUFFER output; - - void *stream; -}; - -void rrdpush_decompressor_destroy(struct decompressor_state *state); -void rrdpush_decompressor_init(struct decompressor_state *state); -size_t rrdpush_decompress(struct decompressor_state *state, const char *compressed_data, size_t compressed_size); - -static inline size_t rrdpush_decompress_decode_signature(const char *data, size_t data_size) { - if (unlikely(!data || !data_size)) - return 0; - - if (unlikely(data_size != RRDPUSH_COMPRESSION_SIGNATURE_SIZE)) - return 0; - - rrdpush_signature_t sign = *(rrdpush_signature_t *)data; - if (unlikely((sign & RRDPUSH_COMPRESSION_SIGNATURE_MASK) != RRDPUSH_COMPRESSION_SIGNATURE)) - return 0; - - size_t length = ((sign >> 8) & 0x7f) | ((sign >> 9) & (0x7f << 7)); - return length; -} - -static inline size_t rrdpush_decompressor_start(struct decompressor_state *state, const char *header, size_t header_size) { - if(unlikely(state->output.read_pos != state->output.write_pos)) - fatal("RRDPUSH DECOMPRESS: asked to decompress new data, while there are unread data in the decompression buffer!"); - - return rrdpush_decompress_decode_signature(header, header_size); -} - -static inline size_t rrdpush_decompressed_bytes_in_buffer(struct decompressor_state *state) { - if(unlikely(state->output.read_pos > state->output.write_pos)) - fatal("RRDPUSH DECOMPRESS: invalid read/write stream positions"); - - return state->output.write_pos - state->output.read_pos; -} - -static inline size_t rrdpush_decompressor_get(struct decompressor_state *state, char *dst, size_t size) { - if (unlikely(!state || !size || !dst)) - return 0; - - size_t remaining = rrdpush_decompressed_bytes_in_buffer(state); - - if(unlikely(!remaining)) - return 0; - - size_t bytes_to_return = size; - if(bytes_to_return > remaining) - bytes_to_return = remaining; - - memcpy(dst, state->output.data + state->output.read_pos, bytes_to_return); - state->output.read_pos += bytes_to_return; - - if(unlikely(state->output.read_pos > state->output.write_pos)) - fatal("RRDPUSH DECOMPRESS: invalid read/write stream positions"); - - return bytes_to_return; -} - -// ---------------------------------------------------------------------------- - -#endif // NETDATA_RRDPUSH_COMPRESSION_H 1 diff --git a/src/streaming/compression_brotli.c b/src/streaming/compression_brotli.c deleted file mode 100644 index cf52f3bca..000000000 --- a/src/streaming/compression_brotli.c +++ /dev/null @@ -1,142 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "compression_brotli.h" - -#ifdef ENABLE_BROTLI -#include -#include - -void rrdpush_compressor_init_brotli(struct compressor_state *state) { - if (!state->initialized) { - state->initialized = true; - state->stream = BrotliEncoderCreateInstance(NULL, NULL, NULL); - - if (state->level < BROTLI_MIN_QUALITY) { - state->level = BROTLI_MIN_QUALITY; - } else if (state->level > BROTLI_MAX_QUALITY) { - state->level = BROTLI_MAX_QUALITY; - } - - BrotliEncoderSetParameter(state->stream, BROTLI_PARAM_QUALITY, state->level); - } -} - -void rrdpush_compressor_destroy_brotli(struct compressor_state *state) { - if (state->stream) { - BrotliEncoderDestroyInstance(state->stream); - state->stream = NULL; - } -} - -size_t rrdpush_compress_brotli(struct compressor_state *state, const char *data, size_t size, const char **out) { - if (unlikely(!state || !size || !out)) - return 0; - - simple_ring_buffer_make_room(&state->output, MAX(BrotliEncoderMaxCompressedSize(size), COMPRESSION_MAX_CHUNK)); - - size_t available_out = state->output.size; - - size_t available_in = size; - const uint8_t *next_in = (const uint8_t *)data; - uint8_t *next_out = (uint8_t *)state->output.data; - - if (!BrotliEncoderCompressStream(state->stream, BROTLI_OPERATION_FLUSH, &available_in, &next_in, &available_out, &next_out, NULL)) { - netdata_log_error("STREAM: Brotli compression failed."); - return 0; - } - - if(available_in != 0) { - netdata_log_error("STREAM: BrotliEncoderCompressStream() did not use all the input buffer, %zu bytes out of %zu remain", - available_in, size); - return 0; - } - - size_t compressed_size = state->output.size - available_out; - if(available_out == 0) { - netdata_log_error("STREAM: BrotliEncoderCompressStream() needs a bigger output buffer than the one we provided " - "(output buffer %zu bytes, compressed payload %zu bytes)", - state->output.size, size); - return 0; - } - - if(compressed_size == 0) { - netdata_log_error("STREAM: BrotliEncoderCompressStream() did not produce any output from the input provided " - "(input buffer %zu bytes)", - size); - return 0; - } - - state->sender_locked.total_compressions++; - state->sender_locked.total_uncompressed += size - available_in; - state->sender_locked.total_compressed += compressed_size; - - *out = state->output.data; - return compressed_size; -} - -void rrdpush_decompressor_init_brotli(struct decompressor_state *state) { - if (!state->initialized) { - state->initialized = true; - state->stream = BrotliDecoderCreateInstance(NULL, NULL, NULL); - - simple_ring_buffer_make_room(&state->output, COMPRESSION_MAX_CHUNK); - } -} - -void rrdpush_decompressor_destroy_brotli(struct decompressor_state *state) { - if (state->stream) { - BrotliDecoderDestroyInstance(state->stream); - state->stream = NULL; - } -} - -size_t rrdpush_decompress_brotli(struct decompressor_state *state, const char *compressed_data, size_t compressed_size) { - if (unlikely(!state || !compressed_data || !compressed_size)) - return 0; - - // The state.output ring buffer is always EMPTY at this point, - // meaning that (state->output.read_pos == state->output.write_pos) - // However, THEY ARE NOT ZERO. - - size_t available_out = state->output.size; - size_t available_in = compressed_size; - const uint8_t *next_in = (const uint8_t *)compressed_data; - uint8_t *next_out = (uint8_t *)state->output.data; - - if (BrotliDecoderDecompressStream(state->stream, &available_in, &next_in, &available_out, &next_out, NULL) == BROTLI_DECODER_RESULT_ERROR) { - netdata_log_error("STREAM: Brotli decompression failed."); - return 0; - } - - if(available_in != 0) { - netdata_log_error("STREAM: BrotliDecoderDecompressStream() did not use all the input buffer, %zu bytes out of %zu remain", - available_in, compressed_size); - return 0; - } - - size_t decompressed_size = state->output.size - available_out; - if(available_out == 0) { - netdata_log_error("STREAM: BrotliDecoderDecompressStream() needs a bigger output buffer than the one we provided " - "(output buffer %zu bytes, compressed payload %zu bytes)", - state->output.size, compressed_size); - return 0; - } - - if(decompressed_size == 0) { - netdata_log_error("STREAM: BrotliDecoderDecompressStream() did not produce any output from the input provided " - "(input buffer %zu bytes)", - compressed_size); - return 0; - } - - state->output.read_pos = 0; - state->output.write_pos = decompressed_size; - - state->total_compressed += compressed_size - available_in; - state->total_uncompressed += decompressed_size; - state->total_compressions++; - - return decompressed_size; -} - -#endif // ENABLE_BROTLI diff --git a/src/streaming/compression_brotli.h b/src/streaming/compression_brotli.h deleted file mode 100644 index 4955e5a82..000000000 --- a/src/streaming/compression_brotli.h +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "compression.h" - -#ifndef NETDATA_STREAMING_COMPRESSION_BROTLI_H -#define NETDATA_STREAMING_COMPRESSION_BROTLI_H - -void rrdpush_compressor_init_brotli(struct compressor_state *state); -void rrdpush_compressor_destroy_brotli(struct compressor_state *state); -size_t rrdpush_compress_brotli(struct compressor_state *state, const char *data, size_t size, const char **out); -size_t rrdpush_decompress_brotli(struct decompressor_state *state, const char *compressed_data, size_t compressed_size); -void rrdpush_decompressor_init_brotli(struct decompressor_state *state); -void rrdpush_decompressor_destroy_brotli(struct decompressor_state *state); - -#endif //NETDATA_STREAMING_COMPRESSION_BROTLI_H diff --git a/src/streaming/compression_gzip.c b/src/streaming/compression_gzip.c deleted file mode 100644 index c4ef3af05..000000000 --- a/src/streaming/compression_gzip.c +++ /dev/null @@ -1,164 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "compression_gzip.h" -#include - -void rrdpush_compressor_init_gzip(struct compressor_state *state) { - if (!state->initialized) { - state->initialized = true; - - // Initialize deflate stream - z_stream *strm = state->stream = (z_stream *) mallocz(sizeof(z_stream)); - strm->zalloc = Z_NULL; - strm->zfree = Z_NULL; - strm->opaque = Z_NULL; - - if(state->level < Z_BEST_SPEED) - state->level = Z_BEST_SPEED; - - if(state->level > Z_BEST_COMPRESSION) - state->level = Z_BEST_COMPRESSION; - - // int r = deflateInit2(strm, Z_BEST_COMPRESSION, Z_DEFLATED, 15 + 16, 8, Z_DEFAULT_STRATEGY); - int r = deflateInit2(strm, state->level, Z_DEFLATED, 15 + 16, 8, Z_DEFAULT_STRATEGY); - if (r != Z_OK) { - netdata_log_error("Failed to initialize deflate with error: %d", r); - freez(state->stream); - state->initialized = false; - return; - } - - } -} - -void rrdpush_compressor_destroy_gzip(struct compressor_state *state) { - if (state->stream) { - deflateEnd(state->stream); - freez(state->stream); - state->stream = NULL; - } -} - -size_t rrdpush_compress_gzip(struct compressor_state *state, const char *data, size_t size, const char **out) { - if (unlikely(!state || !size || !out)) - return 0; - - simple_ring_buffer_make_room(&state->output, deflateBound(state->stream, size)); - - z_stream *strm = state->stream; - strm->avail_in = (uInt)size; - strm->next_in = (Bytef *)data; - strm->avail_out = (uInt)state->output.size; - strm->next_out = (Bytef *)state->output.data; - - int ret = deflate(strm, Z_SYNC_FLUSH); - if (ret != Z_OK && ret != Z_STREAM_END) { - netdata_log_error("STREAM: deflate() failed with error %d", ret); - return 0; - } - - if(strm->avail_in != 0) { - netdata_log_error("STREAM: deflate() did not use all the input buffer, %u bytes out of %zu remain", - strm->avail_in, size); - return 0; - } - - if(strm->avail_out == 0) { - netdata_log_error("STREAM: deflate() needs a bigger output buffer than the one we provided " - "(output buffer %zu bytes, compressed payload %zu bytes)", - state->output.size, size); - return 0; - } - - size_t compressed_data_size = state->output.size - strm->avail_out; - - if(compressed_data_size == 0) { - netdata_log_error("STREAM: deflate() did not produce any output " - "(output buffer %zu bytes, compressed payload %zu bytes)", - state->output.size, size); - return 0; - } - - state->sender_locked.total_compressions++; - state->sender_locked.total_uncompressed += size; - state->sender_locked.total_compressed += compressed_data_size; - - *out = state->output.data; - return compressed_data_size; -} - -void rrdpush_decompressor_init_gzip(struct decompressor_state *state) { - if (!state->initialized) { - state->initialized = true; - - // Initialize inflate stream - z_stream *strm = state->stream = (z_stream *)mallocz(sizeof(z_stream)); - strm->zalloc = Z_NULL; - strm->zfree = Z_NULL; - strm->opaque = Z_NULL; - - int r = inflateInit2(strm, 15 + 16); - if (r != Z_OK) { - netdata_log_error("Failed to initialize inflateInit2() with error: %d", r); - freez(state->stream); - state->initialized = false; - return; - } - - simple_ring_buffer_make_room(&state->output, COMPRESSION_MAX_CHUNK); - } -} - -void rrdpush_decompressor_destroy_gzip(struct decompressor_state *state) { - if (state->stream) { - inflateEnd(state->stream); - freez(state->stream); - state->stream = NULL; - } -} - -size_t rrdpush_decompress_gzip(struct decompressor_state *state, const char *compressed_data, size_t compressed_size) { - if (unlikely(!state || !compressed_data || !compressed_size)) - return 0; - - // The state.output ring buffer is always EMPTY at this point, - // meaning that (state->output.read_pos == state->output.write_pos) - // However, THEY ARE NOT ZERO. - - z_stream *strm = state->stream; - strm->avail_in = (uInt)compressed_size; - strm->next_in = (Bytef *)compressed_data; - strm->avail_out = (uInt)state->output.size; - strm->next_out = (Bytef *)state->output.data; - - int ret = inflate(strm, Z_SYNC_FLUSH); - if (ret != Z_STREAM_END && ret != Z_OK) { - netdata_log_error("RRDPUSH DECOMPRESS: inflate() failed with error %d", ret); - return 0; - } - - if(strm->avail_in != 0) { - netdata_log_error("RRDPUSH DECOMPRESS: inflate() did not use all compressed data we provided " - "(compressed payload %zu bytes, remaining to be uncompressed %u)" - , compressed_size, strm->avail_in); - return 0; - } - - if(strm->avail_out == 0) { - netdata_log_error("RRDPUSH DECOMPRESS: inflate() needs a bigger output buffer than the one we provided " - "(compressed payload %zu bytes, output buffer size %zu bytes)" - , compressed_size, state->output.size); - return 0; - } - - size_t decompressed_size = state->output.size - strm->avail_out; - - state->output.read_pos = 0; - state->output.write_pos = decompressed_size; - - state->total_compressed += compressed_size; - state->total_uncompressed += decompressed_size; - state->total_compressions++; - - return decompressed_size; -} diff --git a/src/streaming/compression_gzip.h b/src/streaming/compression_gzip.h deleted file mode 100644 index 85f34bc6d..000000000 --- a/src/streaming/compression_gzip.h +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "compression.h" - -#ifndef NETDATA_STREAMING_COMPRESSION_GZIP_H -#define NETDATA_STREAMING_COMPRESSION_GZIP_H - -void rrdpush_compressor_init_gzip(struct compressor_state *state); -void rrdpush_compressor_destroy_gzip(struct compressor_state *state); -size_t rrdpush_compress_gzip(struct compressor_state *state, const char *data, size_t size, const char **out); -size_t rrdpush_decompress_gzip(struct decompressor_state *state, const char *compressed_data, size_t compressed_size); -void rrdpush_decompressor_init_gzip(struct decompressor_state *state); -void rrdpush_decompressor_destroy_gzip(struct decompressor_state *state); - -#endif //NETDATA_STREAMING_COMPRESSION_GZIP_H diff --git a/src/streaming/compression_lz4.c b/src/streaming/compression_lz4.c deleted file mode 100644 index f5174134e..000000000 --- a/src/streaming/compression_lz4.c +++ /dev/null @@ -1,143 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "compression_lz4.h" - -#ifdef ENABLE_LZ4 -#include "lz4.h" - -// ---------------------------------------------------------------------------- -// compress - -void rrdpush_compressor_init_lz4(struct compressor_state *state) { - if(!state->initialized) { - state->initialized = true; - state->stream = LZ4_createStream(); - - // LZ4 needs access to the last 64KB of source data - // so, we keep twice the size of each message - simple_ring_buffer_make_room(&state->input, 65536 + COMPRESSION_MAX_CHUNK * 2); - } -} - -void rrdpush_compressor_destroy_lz4(struct compressor_state *state) { - if (state->stream) { - LZ4_freeStream(state->stream); - state->stream = NULL; - } -} - -/* - * Compress the given block of data - * Compressed data will remain in the internal buffer until the next invocation - * Return the size of compressed data block as result and the pointer to internal buffer using the last argument - * or 0 in case of error - */ -size_t rrdpush_compress_lz4(struct compressor_state *state, const char *data, size_t size, const char **out) { - if(unlikely(!state || !size || !out)) - return 0; - - // we need to keep the last 64K of our previous source data - // as they were in the ring buffer - - simple_ring_buffer_make_room(&state->output, LZ4_COMPRESSBOUND(size)); - - if(state->input.write_pos + size > state->input.size) - // the input buffer cannot fit out data, restart from zero - simple_ring_buffer_reset(&state->input); - - simple_ring_buffer_append_data(&state->input, data, size); - - long int compressed_data_size = LZ4_compress_fast_continue( - state->stream, - state->input.data + state->input.read_pos, - (char *)state->output.data, - (int)(state->input.write_pos - state->input.read_pos), - (int)state->output.size, - state->level); - - if (compressed_data_size <= 0) { - netdata_log_error("STREAM: LZ4_compress_fast_continue() returned %ld " - "(source is %zu bytes, output buffer can fit %zu bytes)", - compressed_data_size, size, state->output.size); - return 0; - } - - state->input.read_pos = state->input.write_pos; - - state->sender_locked.total_compressions++; - state->sender_locked.total_uncompressed += size; - state->sender_locked.total_compressed += compressed_data_size; - - *out = state->output.data; - return compressed_data_size; -} - -// ---------------------------------------------------------------------------- -// decompress - -void rrdpush_decompressor_init_lz4(struct decompressor_state *state) { - if(!state->initialized) { - state->initialized = true; - state->stream = LZ4_createStreamDecode(); - simple_ring_buffer_make_room(&state->output, 65536 + COMPRESSION_MAX_CHUNK * 2); - } -} - -void rrdpush_decompressor_destroy_lz4(struct decompressor_state *state) { - if (state->stream) { - LZ4_freeStreamDecode(state->stream); - state->stream = NULL; - } -} - -/* - * Decompress the compressed data in the internal buffer - * Return the size of uncompressed data or 0 for error - */ -size_t rrdpush_decompress_lz4(struct decompressor_state *state, const char *compressed_data, size_t compressed_size) { - if (unlikely(!state || !compressed_data || !compressed_size)) - return 0; - - // The state.output ring buffer is always EMPTY at this point, - // meaning that (state->output.read_pos == state->output.write_pos) - // However, THEY ARE NOT ZERO. - - if (unlikely(state->output.write_pos + COMPRESSION_MAX_CHUNK > state->output.size)) - // the input buffer cannot fit out data, restart from zero - simple_ring_buffer_reset(&state->output); - - long int decompressed_size = LZ4_decompress_safe_continue( - state->stream - , compressed_data - , (char *)(state->output.data + state->output.write_pos) - , (int)compressed_size - , (int)(state->output.size - state->output.write_pos) - ); - - if (unlikely(decompressed_size < 0)) { - netdata_log_error("RRDPUSH DECOMPRESS: LZ4_decompress_safe_continue() returned negative value: %ld " - "(compressed chunk is %zu bytes)" - , decompressed_size, compressed_size); - return 0; - } - - if(unlikely(decompressed_size + state->output.write_pos > state->output.size)) - fatal("RRDPUSH DECOMPRESS: LZ4_decompress_safe_continue() overflown the stream_buffer " - "(size: %zu, pos: %zu, added: %ld, exceeding the buffer by %zu)" - , state->output.size - , state->output.write_pos - , decompressed_size - , (size_t)(state->output.write_pos + decompressed_size - state->output.size) - ); - - state->output.write_pos += decompressed_size; - - // statistics - state->total_compressed += compressed_size; - state->total_uncompressed += decompressed_size; - state->total_compressions++; - - return decompressed_size; -} - -#endif // ENABLE_LZ4 diff --git a/src/streaming/compression_lz4.h b/src/streaming/compression_lz4.h deleted file mode 100644 index 69f0fadcc..000000000 --- a/src/streaming/compression_lz4.h +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "compression.h" - -#ifndef NETDATA_STREAMING_COMPRESSION_LZ4_H -#define NETDATA_STREAMING_COMPRESSION_LZ4_H - -#ifdef ENABLE_LZ4 - -void rrdpush_compressor_init_lz4(struct compressor_state *state); -void rrdpush_compressor_destroy_lz4(struct compressor_state *state); -size_t rrdpush_compress_lz4(struct compressor_state *state, const char *data, size_t size, const char **out); -size_t rrdpush_decompress_lz4(struct decompressor_state *state, const char *compressed_data, size_t compressed_size); -void rrdpush_decompressor_init_lz4(struct decompressor_state *state); -void rrdpush_decompressor_destroy_lz4(struct decompressor_state *state); - -#endif // ENABLE_LZ4 - -#endif //NETDATA_STREAMING_COMPRESSION_LZ4_H diff --git a/src/streaming/compression_zstd.c b/src/streaming/compression_zstd.c deleted file mode 100644 index dabc044f7..000000000 --- a/src/streaming/compression_zstd.c +++ /dev/null @@ -1,163 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "compression_zstd.h" - -#ifdef ENABLE_ZSTD -#include - -void rrdpush_compressor_init_zstd(struct compressor_state *state) { - if(!state->initialized) { - state->initialized = true; - state->stream = ZSTD_createCStream(); - - if(state->level < 1) - state->level = 1; - - if(state->level > ZSTD_maxCLevel()) - state->level = ZSTD_maxCLevel(); - - size_t ret = ZSTD_initCStream(state->stream, state->level); - if(ZSTD_isError(ret)) - netdata_log_error("STREAM: ZSTD_initCStream() returned error: %s", ZSTD_getErrorName(ret)); - - // ZSTD_CCtx_setParameter(state->stream, ZSTD_c_compressionLevel, 1); - // ZSTD_CCtx_setParameter(state->stream, ZSTD_c_strategy, ZSTD_fast); - } -} - -void rrdpush_compressor_destroy_zstd(struct compressor_state *state) { - if(state->stream) { - ZSTD_freeCStream(state->stream); - state->stream = NULL; - } -} - -size_t rrdpush_compress_zstd(struct compressor_state *state, const char *data, size_t size, const char **out) { - if(unlikely(!state || !size || !out)) - return 0; - - ZSTD_inBuffer inBuffer = { - .pos = 0, - .size = size, - .src = data, - }; - - size_t wanted_size = MAX(ZSTD_compressBound(inBuffer.size - inBuffer.pos), ZSTD_CStreamOutSize()); - simple_ring_buffer_make_room(&state->output, wanted_size); - - ZSTD_outBuffer outBuffer = { - .pos = 0, - .size = state->output.size, - .dst = (void *)state->output.data, - }; - - // compress - size_t ret = ZSTD_compressStream(state->stream, &outBuffer, &inBuffer); - - // error handling - if(ZSTD_isError(ret)) { - netdata_log_error("STREAM: ZSTD_compressStream() return error: %s", ZSTD_getErrorName(ret)); - return 0; - } - - if(inBuffer.pos < inBuffer.size) { - netdata_log_error("STREAM: ZSTD_compressStream() left unprocessed input (source payload %zu bytes, consumed %zu bytes)", - inBuffer.size, inBuffer.pos); - return 0; - } - - if(outBuffer.pos == 0) { - // ZSTD needs more input to flush the output, so let's flush it manually - ret = ZSTD_flushStream(state->stream, &outBuffer); - - if(ZSTD_isError(ret)) { - netdata_log_error("STREAM: ZSTD_flushStream() return error: %s", ZSTD_getErrorName(ret)); - return 0; - } - - if(outBuffer.pos == 0) { - netdata_log_error("STREAM: ZSTD_compressStream() returned zero compressed bytes " - "(source is %zu bytes, output buffer can fit %zu bytes) " - , size, outBuffer.size); - return 0; - } - } - - state->sender_locked.total_compressions++; - state->sender_locked.total_uncompressed += size; - state->sender_locked.total_compressed += outBuffer.pos; - - // return values - *out = state->output.data; - return outBuffer.pos; -} - -void rrdpush_decompressor_init_zstd(struct decompressor_state *state) { - if(!state->initialized) { - state->initialized = true; - state->stream = ZSTD_createDStream(); - - size_t ret = ZSTD_initDStream(state->stream); - if(ZSTD_isError(ret)) - netdata_log_error("STREAM: ZSTD_initDStream() returned error: %s", ZSTD_getErrorName(ret)); - - simple_ring_buffer_make_room(&state->output, MAX(COMPRESSION_MAX_CHUNK, ZSTD_DStreamOutSize())); - } -} - -void rrdpush_decompressor_destroy_zstd(struct decompressor_state *state) { - if (state->stream) { - ZSTD_freeDStream(state->stream); - state->stream = NULL; - } -} - -size_t rrdpush_decompress_zstd(struct decompressor_state *state, const char *compressed_data, size_t compressed_size) { - if (unlikely(!state || !compressed_data || !compressed_size)) - return 0; - - // The state.output ring buffer is always EMPTY at this point, - // meaning that (state->output.read_pos == state->output.write_pos) - // However, THEY ARE NOT ZERO. - - ZSTD_inBuffer inBuffer = { - .pos = 0, - .size = compressed_size, - .src = compressed_data, - }; - - ZSTD_outBuffer outBuffer = { - .pos = 0, - .dst = (char *)state->output.data, - .size = state->output.size, - }; - - size_t ret = ZSTD_decompressStream( - state->stream - , &outBuffer - , &inBuffer); - - if(ZSTD_isError(ret)) { - netdata_log_error("STREAM: ZSTD_decompressStream() return error: %s", ZSTD_getErrorName(ret)); - return 0; - } - - if(inBuffer.pos < inBuffer.size) - fatal("RRDPUSH DECOMPRESS: ZSTD ZSTD_decompressStream() decompressed %zu bytes, " - "but %zu bytes of compressed data remain", - inBuffer.pos, inBuffer.size); - - size_t decompressed_size = outBuffer.pos; - - state->output.read_pos = 0; - state->output.write_pos = outBuffer.pos; - - // statistics - state->total_compressed += compressed_size; - state->total_uncompressed += decompressed_size; - state->total_compressions++; - - return decompressed_size; -} - -#endif // ENABLE_ZSTD diff --git a/src/streaming/compression_zstd.h b/src/streaming/compression_zstd.h deleted file mode 100644 index bfabbf89d..000000000 --- a/src/streaming/compression_zstd.h +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "compression.h" - -#ifndef NETDATA_STREAMING_COMPRESSION_ZSTD_H -#define NETDATA_STREAMING_COMPRESSION_ZSTD_H - -#ifdef ENABLE_ZSTD - -void rrdpush_compressor_init_zstd(struct compressor_state *state); -void rrdpush_compressor_destroy_zstd(struct compressor_state *state); -size_t rrdpush_compress_zstd(struct compressor_state *state, const char *data, size_t size, const char **out); -size_t rrdpush_decompress_zstd(struct decompressor_state *state, const char *compressed_data, size_t compressed_size); -void rrdpush_decompressor_init_zstd(struct decompressor_state *state); -void rrdpush_decompressor_destroy_zstd(struct decompressor_state *state); - -#endif // ENABLE_ZSTD - -#endif //NETDATA_STREAMING_COMPRESSION_ZSTD_H diff --git a/src/streaming/h2o-common.h b/src/streaming/h2o-common.h new file mode 100644 index 000000000..b7292f4d0 --- /dev/null +++ b/src/streaming/h2o-common.h @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef STREAMING_COMMON_H +#define STREAMING_COMMON_H + +#define NETDATA_STREAM_URL "/stream" +#define NETDATA_STREAM_PROTO_NAME "netdata_stream/2.0" + +#endif /* STREAMING_COMMON_H */ diff --git a/src/streaming/protocol/command-begin-set-end.c b/src/streaming/protocol/command-begin-set-end.c new file mode 100644 index 000000000..17daef776 --- /dev/null +++ b/src/streaming/protocol/command-begin-set-end.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "commands.h" +#include "plugins.d/pluginsd_internals.h" + +static void rrdpush_send_chart_metrics(BUFFER *wb, RRDSET *st, struct sender_state *s __maybe_unused, RRDSET_FLAGS flags) { + buffer_fast_strcat(wb, "BEGIN \"", 7); + buffer_fast_strcat(wb, rrdset_id(st), string_strlen(st->id)); + buffer_fast_strcat(wb, "\" ", 2); + + if(st->last_collected_time.tv_sec > st->rrdpush.sender.resync_time_s) + buffer_print_uint64(wb, st->usec_since_last_update); + else + buffer_fast_strcat(wb, "0", 1); + + buffer_fast_strcat(wb, "\n", 1); + + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + if(unlikely(!rrddim_check_updated(rd))) + continue; + + if(likely(rrddim_check_upstream_exposed_collector(rd))) { + buffer_fast_strcat(wb, "SET \"", 5); + buffer_fast_strcat(wb, rrddim_id(rd), string_strlen(rd->id)); + buffer_fast_strcat(wb, "\" = ", 4); + buffer_print_int64(wb, rd->collector.collected_value); + buffer_fast_strcat(wb, "\n", 1); + } + else { + internal_error(true, "STREAM: 'host:%s/chart:%s/dim:%s' flag 'exposed' is updated but not exposed", + rrdhost_hostname(st->rrdhost), rrdset_id(st), rrddim_id(rd)); + // we will include it in the next iteration + rrddim_metadata_updated(rd); + } + } + rrddim_foreach_done(rd); + + if(unlikely(flags & RRDSET_FLAG_UPSTREAM_SEND_VARIABLES)) + rrdvar_print_to_streaming_custom_chart_variables(st, wb); + + buffer_fast_strcat(wb, "END\n", 4); +} + +void rrdset_push_metrics_v1(RRDSET_STREAM_BUFFER *rsb, RRDSET *st) { + RRDHOST *host = st->rrdhost; + rrdpush_send_chart_metrics(rsb->wb, st, host->sender, rsb->rrdset_flags); +} + +void rrddim_push_metrics_v2(RRDSET_STREAM_BUFFER *rsb, RRDDIM *rd, usec_t point_end_time_ut, NETDATA_DOUBLE n, SN_FLAGS flags) { + if(!rsb->wb || !rsb->v2 || !netdata_double_isnumber(n) || !does_storage_number_exist(flags)) + return; + + bool with_slots = stream_has_capability(rsb, STREAM_CAP_SLOTS) ? true : false; + NUMBER_ENCODING integer_encoding = stream_has_capability(rsb, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX; + NUMBER_ENCODING doubles_encoding = stream_has_capability(rsb, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_DECIMAL; + BUFFER *wb = rsb->wb; + time_t point_end_time_s = (time_t)(point_end_time_ut / USEC_PER_SEC); + if(unlikely(rsb->last_point_end_time_s != point_end_time_s)) { + + if(unlikely(rsb->begin_v2_added)) + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1); + + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_BEGIN_V2, sizeof(PLUGINSD_KEYWORD_BEGIN_V2) - 1); + + if(with_slots) { + buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); + buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdset->rrdpush.sender.chart_slot); + } + + buffer_fast_strcat(wb, " '", 2); + buffer_fast_strcat(wb, rrdset_id(rd->rrdset), string_strlen(rd->rrdset->id)); + buffer_fast_strcat(wb, "' ", 2); + buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdset->update_every); + buffer_fast_strcat(wb, " ", 1); + buffer_print_uint64_encoded(wb, integer_encoding, point_end_time_s); + buffer_fast_strcat(wb, " ", 1); + if(point_end_time_s == rsb->wall_clock_time) + buffer_fast_strcat(wb, "#", 1); + else + buffer_print_uint64_encoded(wb, integer_encoding, rsb->wall_clock_time); + buffer_fast_strcat(wb, "\n", 1); + + rsb->last_point_end_time_s = point_end_time_s; + rsb->begin_v2_added = true; + } + + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_SET_V2, sizeof(PLUGINSD_KEYWORD_SET_V2) - 1); + + if(with_slots) { + buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); + buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdpush.sender.dim_slot); + } + + buffer_fast_strcat(wb, " '", 2); + buffer_fast_strcat(wb, rrddim_id(rd), string_strlen(rd->id)); + buffer_fast_strcat(wb, "' ", 2); + buffer_print_int64_encoded(wb, integer_encoding, rd->collector.last_collected_value); + buffer_fast_strcat(wb, " ", 1); + + if((NETDATA_DOUBLE)rd->collector.last_collected_value == n) + buffer_fast_strcat(wb, "#", 1); + else + buffer_print_netdata_double_encoded(wb, doubles_encoding, n); + + buffer_fast_strcat(wb, " ", 1); + buffer_print_sn_flags(wb, flags, true); + buffer_fast_strcat(wb, "\n", 1); +} + +void rrdset_push_metrics_finished(RRDSET_STREAM_BUFFER *rsb, RRDSET *st) { + if(!rsb->wb) + return; + + if(rsb->v2 && rsb->begin_v2_added) { + if(unlikely(rsb->rrdset_flags & RRDSET_FLAG_UPSTREAM_SEND_VARIABLES)) + rrdvar_print_to_streaming_custom_chart_variables(st, rsb->wb); + + buffer_fast_strcat(rsb->wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1); + } + + sender_commit(st->rrdhost->sender, rsb->wb, STREAM_TRAFFIC_TYPE_DATA); + + *rsb = (RRDSET_STREAM_BUFFER){ .wb = NULL, }; +} + diff --git a/src/streaming/protocol/command-chart-definition.c b/src/streaming/protocol/command-chart-definition.c new file mode 100644 index 000000000..864d13242 --- /dev/null +++ b/src/streaming/protocol/command-chart-definition.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "commands.h" +#include "plugins.d/pluginsd_internals.h" + +// chart labels +static int send_clabels_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) { + BUFFER *wb = (BUFFER *)data; + buffer_sprintf(wb, PLUGINSD_KEYWORD_CLABEL " \"%s\" \"%s\" %d\n", name, value, ls & ~(RRDLABEL_FLAG_INTERNAL)); + return 1; +} + +static void rrdpush_send_clabels(BUFFER *wb, RRDSET *st) { + if (st->rrdlabels) { + if(rrdlabels_walkthrough_read(st->rrdlabels, send_clabels_callback, wb) > 0) + buffer_sprintf(wb, PLUGINSD_KEYWORD_CLABEL_COMMIT "\n"); + } +} + +// Send the current chart definition. +// Assumes that collector thread has already called sender_start for mutex / buffer state. +bool rrdpush_send_chart_definition(BUFFER *wb, RRDSET *st) { + uint32_t version = rrdset_metadata_version(st); + + RRDHOST *host = st->rrdhost; + NUMBER_ENCODING integer_encoding = stream_has_capability(host->sender, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX; + bool with_slots = stream_has_capability(host->sender, STREAM_CAP_SLOTS) ? true : false; + + bool replication_progress = false; + + // properly set the name for the remote end to parse it + char *name = ""; + if(likely(st->name)) { + if(unlikely(st->id != st->name)) { + // they differ + name = strchr(rrdset_name(st), '.'); + if(name) + name++; + else + name = ""; + } + } + + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_CHART, sizeof(PLUGINSD_KEYWORD_CHART) - 1); + + if(with_slots) { + buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); + buffer_print_uint64_encoded(wb, integer_encoding, st->rrdpush.sender.chart_slot); + } + + // send the chart + buffer_sprintf( + wb + , " \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" %d %d \"%s %s %s\" \"%s\" \"%s\"\n" + , rrdset_id(st) + , name + , rrdset_title(st) + , rrdset_units(st) + , rrdset_family(st) + , rrdset_context(st) + , rrdset_type_name(st->chart_type) + , st->priority + , st->update_every + , rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)?"obsolete":"" + , rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)?"store_first":"" + , rrdset_flag_check(st, RRDSET_FLAG_HIDDEN)?"hidden":"" + , rrdset_plugin_name(st) + , rrdset_module_name(st) + ); + + // send the chart labels + if (stream_has_capability(host->sender, STREAM_CAP_CLABELS)) + rrdpush_send_clabels(wb, st); + + // send the dimensions + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_DIMENSION, sizeof(PLUGINSD_KEYWORD_DIMENSION) - 1); + + if(with_slots) { + buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); + buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdpush.sender.dim_slot); + } + + buffer_sprintf( + wb + , " \"%s\" \"%s\" \"%s\" %d %d \"%s %s %s\"\n" + , rrddim_id(rd) + , rrddim_name(rd) + , rrd_algorithm_name(rd->algorithm) + , rd->multiplier + , rd->divisor + , rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)?"obsolete":"" + , rrddim_option_check(rd, RRDDIM_OPTION_HIDDEN)?"hidden":"" + , rrddim_option_check(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS)?"noreset":"" + ); + } + rrddim_foreach_done(rd); + + // send the chart functions + if(stream_has_capability(host->sender, STREAM_CAP_FUNCTIONS)) + rrd_chart_functions_expose_rrdpush(st, wb); + + // send the chart local custom variables + rrdvar_print_to_streaming_custom_chart_variables(st, wb); + + if (stream_has_capability(host->sender, STREAM_CAP_REPLICATION)) { + time_t db_first_time_t, db_last_time_t; + + time_t now = now_realtime_sec(); + rrdset_get_retention_of_tier_for_collected_chart(st, &db_first_time_t, &db_last_time_t, now, 0); + + buffer_sprintf(wb, PLUGINSD_KEYWORD_CHART_DEFINITION_END " %llu %llu %llu\n", + (unsigned long long)db_first_time_t, + (unsigned long long)db_last_time_t, + (unsigned long long)now); + + if(!rrdset_flag_check(st, RRDSET_FLAG_SENDER_REPLICATION_IN_PROGRESS)) { + rrdset_flag_set(st, RRDSET_FLAG_SENDER_REPLICATION_IN_PROGRESS); + rrdset_flag_clear(st, RRDSET_FLAG_SENDER_REPLICATION_FINISHED); + rrdhost_sender_replicating_charts_plus_one(st->rrdhost); + } + replication_progress = true; + +#ifdef NETDATA_LOG_REPLICATION_REQUESTS + internal_error(true, "REPLAY: 'host:%s/chart:%s' replication starts", + rrdhost_hostname(st->rrdhost), rrdset_id(st)); +#endif + } + + sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA); + + // we can set the exposed flag, after we commit the buffer + // because replication may pick it up prematurely + rrddim_foreach_read(rd, st) { + rrddim_metadata_exposed_upstream(rd, version); + } + rrddim_foreach_done(rd); + rrdset_metadata_exposed_upstream(st, version); + + st->rrdpush.sender.resync_time_s = st->last_collected_time.tv_sec + (stream_conf_initial_clock_resync_iterations * st->update_every); + return replication_progress; +} + +bool should_send_chart_matching(RRDSET *st, RRDSET_FLAGS flags) { + if(!(flags & RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED)) + return false; + + if(unlikely(!(flags & (RRDSET_FLAG_UPSTREAM_SEND | RRDSET_FLAG_UPSTREAM_IGNORE)))) { + RRDHOST *host = st->rrdhost; + + if (flags & RRDSET_FLAG_ANOMALY_DETECTION) { + if(ml_streaming_enabled()) + rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_SEND); + else + rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_IGNORE); + } + else { + int negative = 0, positive = 0; + SIMPLE_PATTERN_RESULT r; + + r = simple_pattern_matches_string_extract(host->rrdpush.send.charts_matching, st->context, NULL, 0); + if(r == SP_MATCHED_POSITIVE) positive++; + else if(r == SP_MATCHED_NEGATIVE) negative++; + + if(!negative) { + r = simple_pattern_matches_string_extract(host->rrdpush.send.charts_matching, st->name, NULL, 0); + if (r == SP_MATCHED_POSITIVE) positive++; + else if (r == SP_MATCHED_NEGATIVE) negative++; + } + + if(!negative) { + r = simple_pattern_matches_string_extract(host->rrdpush.send.charts_matching, st->id, NULL, 0); + if (r == SP_MATCHED_POSITIVE) positive++; + else if (r == SP_MATCHED_NEGATIVE) negative++; + } + + if(!negative && positive) + rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_SEND); + else + rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_IGNORE); + } + + // get the flags again, to know how to respond + flags = rrdset_flag_check(st, RRDSET_FLAG_UPSTREAM_SEND|RRDSET_FLAG_UPSTREAM_IGNORE); + } + + return flags & RRDSET_FLAG_UPSTREAM_SEND; +} + +// Called from the internal collectors to mark a chart obsolete. +bool rrdset_push_chart_definition_now(RRDSET *st) { + RRDHOST *host = st->rrdhost; + + if(unlikely(!rrdhost_can_send_definitions_to_parent(host) + || !should_send_chart_matching(st, rrdset_flag_get(st)))) { + return false; + } + + BUFFER *wb = sender_start(host->sender); + rrdpush_send_chart_definition(wb, st); + sender_thread_buffer_free(); + + return true; +} + diff --git a/src/streaming/protocol/command-claimed_id.c b/src/streaming/protocol/command-claimed_id.c new file mode 100644 index 000000000..5392e1d3b --- /dev/null +++ b/src/streaming/protocol/command-claimed_id.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "commands.h" +#include "plugins.d/pluginsd_internals.h" + +PARSER_RC rrdpush_receiver_pluginsd_claimed_id(char **words, size_t num_words, PARSER *parser) { + const char *machine_guid_str = get_word(words, num_words, 1); + const char *claim_id_str = get_word(words, num_words, 2); + + if (!machine_guid_str || !claim_id_str) { + netdata_log_error("PLUGINSD: command CLAIMED_ID came malformed, machine_guid '%s', claim_id '%s'", + machine_guid_str ? machine_guid_str : "[unset]", + claim_id_str ? claim_id_str : "[unset]"); + return PARSER_RC_ERROR; + } + + RRDHOST *host = parser->user.host; + + nd_uuid_t machine_uuid; + if(uuid_parse(machine_guid_str, machine_uuid)) { + netdata_log_error("PLUGINSD: parameter machine guid to CLAIMED_ID command is not valid UUID. " + "Received: '%s'.", machine_guid_str); + return PARSER_RC_ERROR; + } + + nd_uuid_t claim_uuid; + if(strcmp(claim_id_str, "NULL") == 0) + uuid_clear(claim_uuid); + + else if(uuid_parse(claim_id_str, claim_uuid) != 0) { + netdata_log_error("PLUGINSD: parameter claim id to CLAIMED_ID command is not valid UUID. " + "Received: '%s'.", claim_id_str); + return PARSER_RC_ERROR; + } + + if(strcmp(machine_guid_str, host->machine_guid) != 0) { + netdata_log_error("PLUGINSD: received claim id for host '%s' but it came over the connection of '%s'", + machine_guid_str, host->machine_guid); + return PARSER_RC_OK; //the message is OK problem must be somewhere else + } + + if(host == localhost) { + netdata_log_error("PLUGINSD: CLAIMED_ID command cannot be used to set the claimed id of localhost. " + "Received: '%s'.", claim_id_str); + return PARSER_RC_OK; + } + + if(!uuid_is_null(claim_uuid)) { + uuid_copy(host->aclk.claim_id_of_origin.uuid, claim_uuid); + rrdpush_sender_send_claimed_id(host); + } + + return PARSER_RC_OK; +} + +void rrdpush_sender_send_claimed_id(RRDHOST *host) { + if(!stream_has_capability(host->sender, STREAM_CAP_CLAIM)) + return; + + if(unlikely(!rrdhost_can_send_definitions_to_parent(host))) + return; + + BUFFER *wb = sender_start(host->sender); + + char str[UUID_STR_LEN] = ""; + ND_UUID uuid = host->aclk.claim_id_of_origin; + if(!UUIDiszero(uuid)) + uuid_unparse_lower(uuid.uuid, str); + else + strncpyz(str, "NULL", sizeof(str) - 1); + + buffer_sprintf(wb, PLUGINSD_KEYWORD_CLAIMED_ID " '%s' '%s'\n", + host->machine_guid, str); + + sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA); + + sender_thread_buffer_free(); +} diff --git a/src/streaming/protocol/command-function.c b/src/streaming/protocol/command-function.c new file mode 100644 index 000000000..d9b28eb4e --- /dev/null +++ b/src/streaming/protocol/command-function.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "commands.h" +#include "plugins.d/pluginsd_internals.h" + +void rrdpush_send_global_functions(RRDHOST *host) { + if(!stream_has_capability(host->sender, STREAM_CAP_FUNCTIONS)) + return; + + if(unlikely(!rrdhost_can_send_definitions_to_parent(host))) + return; + + BUFFER *wb = sender_start(host->sender); + + rrd_global_functions_expose_rrdpush(host, wb, stream_has_capability(host->sender, STREAM_CAP_DYNCFG)); + + sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_FUNCTIONS); + + sender_thread_buffer_free(); +} diff --git a/src/streaming/protocol/command-host-labels.c b/src/streaming/protocol/command-host-labels.c new file mode 100644 index 000000000..7c2a2d0dd --- /dev/null +++ b/src/streaming/protocol/command-host-labels.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "commands.h" +#include "plugins.d/pluginsd_internals.h" + +static int send_labels_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) { + BUFFER *wb = (BUFFER *)data; + buffer_sprintf(wb, "LABEL \"%s\" = %d \"%s\"\n", name, ls, value); + return 1; +} + +void rrdpush_send_host_labels(RRDHOST *host) { + if(unlikely(!rrdhost_can_send_definitions_to_parent(host) + || !stream_has_capability(host->sender, STREAM_CAP_HLABELS))) + return; + + BUFFER *wb = sender_start(host->sender); + + rrdlabels_walkthrough_read(host->rrdlabels, send_labels_callback, wb); + buffer_sprintf(wb, "OVERWRITE %s\n", "labels"); + + sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA); + + sender_thread_buffer_free(); +} diff --git a/src/streaming/protocol/command-host-variables.c b/src/streaming/protocol/command-host-variables.c new file mode 100644 index 000000000..83e4990d6 --- /dev/null +++ b/src/streaming/protocol/command-host-variables.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "commands.h" +#include "plugins.d/pluginsd_internals.h" + +static inline void rrdpush_sender_add_host_variable_to_buffer(BUFFER *wb, const RRDVAR_ACQUIRED *rva) { + buffer_sprintf( + wb + , "VARIABLE HOST %s = " NETDATA_DOUBLE_FORMAT "\n" + , rrdvar_name(rva) + , rrdvar2number(rva) + ); + + netdata_log_debug(D_STREAM, "RRDVAR pushed HOST VARIABLE %s = " NETDATA_DOUBLE_FORMAT, rrdvar_name(rva), rrdvar2number(rva)); +} + +void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, const RRDVAR_ACQUIRED *rva) { + if(rrdhost_can_send_definitions_to_parent(host)) { + BUFFER *wb = sender_start(host->sender); + rrdpush_sender_add_host_variable_to_buffer(wb, rva); + sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA); + sender_thread_buffer_free(); + } +} + +struct custom_host_variables_callback { + BUFFER *wb; +}; + +static int rrdpush_sender_thread_custom_host_variables_callback(const DICTIONARY_ITEM *item __maybe_unused, void *rrdvar_ptr __maybe_unused, void *struct_ptr) { + const RRDVAR_ACQUIRED *rv = (const RRDVAR_ACQUIRED *)item; + struct custom_host_variables_callback *tmp = struct_ptr; + BUFFER *wb = tmp->wb; + + rrdpush_sender_add_host_variable_to_buffer(wb, rv); + return 1; +} + +void rrdpush_sender_thread_send_custom_host_variables(RRDHOST *host) { + if(rrdhost_can_send_definitions_to_parent(host)) { + BUFFER *wb = sender_start(host->sender); + struct custom_host_variables_callback tmp = { + .wb = wb + }; + int ret = rrdvar_walkthrough_read(host->rrdvars, rrdpush_sender_thread_custom_host_variables_callback, &tmp); + (void)ret; + sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA); + sender_thread_buffer_free(); + + netdata_log_debug(D_STREAM, "RRDVAR sent %d VARIABLES", ret); + } +} diff --git a/src/streaming/protocol/command-nodeid.c b/src/streaming/protocol/command-nodeid.c new file mode 100644 index 000000000..85ace83c8 --- /dev/null +++ b/src/streaming/protocol/command-nodeid.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "commands.h" +#include "plugins.d/pluginsd_internals.h" + +// the child disconnected from the parent, and it has to clear the parent's claim id +void rrdpush_sender_clear_parent_claim_id(RRDHOST *host) { + host->aclk.claim_id_of_parent = UUID_ZERO; +} + +// the parent sends to the child its claim id, node id and cloud url +void rrdpush_receiver_send_node_and_claim_id_to_child(RRDHOST *host) { + if(host == localhost || UUIDiszero(host->node_id)) return; + + spinlock_lock(&host->receiver_lock); + if(host->receiver && stream_has_capability(host->receiver, STREAM_CAP_NODE_ID)) { + char node_id_str[UUID_STR_LEN] = ""; + uuid_unparse_lower(host->node_id.uuid, node_id_str); + + CLAIM_ID claim_id = claim_id_get(); + + if((!claim_id_is_set(claim_id) || !aclk_online())) { + // the agent is not claimed or not connected, just use parent claim id + // to allow the connection flow. + // this may be zero and it is ok. + claim_id.uuid = host->aclk.claim_id_of_parent; + uuid_unparse_lower(claim_id.uuid.uuid, claim_id.str); + } + + char buf[4096]; + snprintfz(buf, sizeof(buf), + PLUGINSD_KEYWORD_NODE_ID " '%s' '%s' '%s'\n", + claim_id.str, node_id_str, cloud_config_url_get()); + + send_to_plugin(buf, __atomic_load_n(&host->receiver->parser, __ATOMIC_RELAXED)); + } + spinlock_unlock(&host->receiver_lock); +} + +// the sender of the child receives node id, claim id and cloud url from the receiver of the parent +void rrdpush_sender_get_node_and_claim_id_from_parent(struct sender_state *s) { + char *claim_id_str = get_word(s->line.words, s->line.num_words, 1); + char *node_id_str = get_word(s->line.words, s->line.num_words, 2); + char *url = get_word(s->line.words, s->line.num_words, 3); + + bool claimed = is_agent_claimed(); + bool update_node_id = false; + + ND_UUID claim_id; + if (uuid_parse(claim_id_str ? claim_id_str : "", claim_id.uuid) != 0) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM %s [send to %s] received invalid claim id '%s'", + rrdhost_hostname(s->host), s->connected_to, + claim_id_str ? claim_id_str : "(unset)"); + return; + } + + ND_UUID node_id; + if(uuid_parse(node_id_str ? node_id_str : "", node_id.uuid) != 0) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM %s [send to %s] received an invalid node id '%s'", + rrdhost_hostname(s->host), s->connected_to, + node_id_str ? node_id_str : "(unset)"); + return; + } + + if (!UUIDiszero(s->host->aclk.claim_id_of_parent) && !UUIDeq(s->host->aclk.claim_id_of_parent, claim_id)) + nd_log(NDLS_DAEMON, NDLP_INFO, + "STREAM %s [send to %s] changed parent's claim id to %s", + rrdhost_hostname(s->host), s->connected_to, + claim_id_str ? claim_id_str : "(unset)"); + + if(!UUIDiszero(s->host->node_id) && !UUIDeq(s->host->node_id, node_id)) { + if(claimed) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM %s [send to %s] parent reports different node id '%s', but we are claimed. Ignoring it.", + rrdhost_hostname(s->host), s->connected_to, + node_id_str ? node_id_str : "(unset)"); + return; + } + else { + update_node_id = true; + nd_log(NDLS_DAEMON, NDLP_WARNING, + "STREAM %s [send to %s] changed node id to %s", + rrdhost_hostname(s->host), s->connected_to, + node_id_str ? node_id_str : "(unset)"); + } + } + + if(!url || !*url) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM %s [send to %s] received an invalid cloud URL '%s'", + rrdhost_hostname(s->host), s->connected_to, + url ? url : "(unset)"); + return; + } + + s->host->aclk.claim_id_of_parent = claim_id; + + // There are some very strange corner cases here: + // + // - Agent is claimed but offline, and it receives node_id and cloud_url from a different Netdata Cloud. + // - Agent is configured to talk to an on-prem Netdata Cloud, it is offline, but the parent is connected + // to a different Netdata Cloud. + // + // The solution below, tries to get the agent online, using the latest information. + // So, if the agent is not claimed or not connected, we inherit whatever information sent from the parent, + // to allow the user to work with it. + + if(claimed && aclk_online()) + // we are directly claimed and connected, ignore node id and cloud url + return; + + bool node_id_updated = false; + if(UUIDiszero(s->host->node_id) || update_node_id) { + s->host->node_id = node_id; + node_id_updated = true; + } + + // we change the URL, to allow the agent dashboard to work with Netdata Cloud on-prem, if any. + cloud_config_url_set(url); + + // send it down the line (to children) + rrdpush_receiver_send_node_and_claim_id_to_child(s->host); + + if(node_id_updated) + stream_path_node_id_updated(s->host); +} diff --git a/src/streaming/protocol/commands.c b/src/streaming/protocol/commands.c new file mode 100644 index 000000000..e9e16bdac --- /dev/null +++ b/src/streaming/protocol/commands.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "commands.h" + +RRDSET_STREAM_BUFFER rrdset_push_metric_initialize(RRDSET *st, time_t wall_clock_time) { + RRDHOST *host = st->rrdhost; + + // fetch the flags we need to check with one atomic operation + RRDHOST_FLAGS host_flags = __atomic_load_n(&host->flags, __ATOMIC_SEQ_CST); + + // check if we are not connected + if(unlikely(!(host_flags & RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS))) { + + if(unlikely(!(host_flags & (RRDHOST_FLAG_RRDPUSH_SENDER_SPAWN | RRDHOST_FLAG_RRDPUSH_RECEIVER_DISCONNECTED)))) + rrdpush_sender_thread_spawn(host); + + if(unlikely(!(host_flags & RRDHOST_FLAG_RRDPUSH_SENDER_LOGGED_STATUS))) { + rrdhost_flag_set(host, RRDHOST_FLAG_RRDPUSH_SENDER_LOGGED_STATUS); + nd_log_daemon(NDLP_NOTICE, "STREAM %s [send]: not ready - collected metrics are not sent to parent.", rrdhost_hostname(host)); + } + + return (RRDSET_STREAM_BUFFER) { .wb = NULL, }; + } + else if(unlikely(host_flags & RRDHOST_FLAG_RRDPUSH_SENDER_LOGGED_STATUS)) { + nd_log_daemon(NDLP_INFO, "STREAM %s [send]: sending metrics to parent...", rrdhost_hostname(host)); + rrdhost_flag_clear(host, RRDHOST_FLAG_RRDPUSH_SENDER_LOGGED_STATUS); + } + + if(unlikely(host_flags & RRDHOST_FLAG_GLOBAL_FUNCTIONS_UPDATED)) { + BUFFER *wb = sender_start(host->sender); + rrd_global_functions_expose_rrdpush(host, wb, stream_has_capability(host->sender, STREAM_CAP_DYNCFG)); + sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_FUNCTIONS); + } + + bool exposed_upstream = rrdset_check_upstream_exposed(st); + RRDSET_FLAGS rrdset_flags = rrdset_flag_get(st); + bool replication_in_progress = !(rrdset_flags & RRDSET_FLAG_SENDER_REPLICATION_FINISHED); + + if(unlikely((exposed_upstream && replication_in_progress) || + !should_send_chart_matching(st, rrdset_flags))) + return (RRDSET_STREAM_BUFFER) { .wb = NULL, }; + + if(unlikely(!exposed_upstream)) { + BUFFER *wb = sender_start(host->sender); + replication_in_progress = rrdpush_send_chart_definition(wb, st); + } + + if(replication_in_progress) + return (RRDSET_STREAM_BUFFER) { .wb = NULL, }; + + return (RRDSET_STREAM_BUFFER) { + .capabilities = host->sender->capabilities, + .v2 = stream_has_capability(host->sender, STREAM_CAP_INTERPOLATED), + .rrdset_flags = rrdset_flags, + .wb = sender_start(host->sender), + .wall_clock_time = wall_clock_time, + }; +} diff --git a/src/streaming/protocol/commands.h b/src/streaming/protocol/commands.h new file mode 100644 index 000000000..81344175c --- /dev/null +++ b/src/streaming/protocol/commands.h @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_STREAMING_PROTCOL_COMMANDS_H +#define NETDATA_STREAMING_PROTCOL_COMMANDS_H + +#include "database/rrd.h" +#include "../rrdpush.h" + +typedef struct rrdset_stream_buffer { + STREAM_CAPABILITIES capabilities; + bool v2; + bool begin_v2_added; + time_t wall_clock_time; + RRDSET_FLAGS rrdset_flags; + time_t last_point_end_time_s; + BUFFER *wb; +} RRDSET_STREAM_BUFFER; + +RRDSET_STREAM_BUFFER rrdset_push_metric_initialize(RRDSET *st, time_t wall_clock_time); + +void rrdpush_sender_get_node_and_claim_id_from_parent(struct sender_state *s); +void rrdpush_receiver_send_node_and_claim_id_to_child(RRDHOST *host); +void rrdpush_sender_clear_parent_claim_id(RRDHOST *host); + +void rrdpush_sender_send_claimed_id(RRDHOST *host); + +void rrdpush_send_global_functions(RRDHOST *host); +void rrdpush_send_host_labels(RRDHOST *host); + +void rrdpush_sender_thread_send_custom_host_variables(RRDHOST *host); +void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, const RRDVAR_ACQUIRED *rva); + +bool rrdpush_send_chart_definition(BUFFER *wb, RRDSET *st); +bool rrdset_push_chart_definition_now(RRDSET *st); +bool should_send_chart_matching(RRDSET *st, RRDSET_FLAGS flags); + +void rrdset_push_metrics_v1(RRDSET_STREAM_BUFFER *rsb, RRDSET *st); +void rrddim_push_metrics_v2(RRDSET_STREAM_BUFFER *rsb, RRDDIM *rd, usec_t point_end_time_ut, NETDATA_DOUBLE n, SN_FLAGS flags); +void rrdset_push_metrics_finished(RRDSET_STREAM_BUFFER *rsb, RRDSET *st); + +#endif //NETDATA_STREAMING_PROTCOL_COMMANDS_H diff --git a/src/streaming/receiver.c b/src/streaming/receiver.c index 0c0da2121..6c15004a3 100644 --- a/src/streaming/receiver.c +++ b/src/streaming/receiver.c @@ -3,12 +3,13 @@ #include "rrdpush.h" #include "web/server/h2o/http_server.h" -extern struct config stream_config; +// When a child disconnects this is the maximum we will wait +// before we update the cloud that the child is offline +#define MAX_CHILD_DISC_DELAY (30000) +#define MAX_CHILD_DISC_TOLERANCE (125 / 100) void receiver_state_free(struct receiver_state *rpt) { -#ifdef ENABLE_HTTPS netdata_ssl_close(&rpt->ssl); -#endif if(rpt->fd != -1) { internal_error(true, "closing socket..."); @@ -36,7 +37,7 @@ void receiver_state_free(struct receiver_state *rpt) { freez(rpt); } -#include "collectors/plugins.d/pluginsd_parser.h" +#include "plugins.d/pluginsd_parser.h" // IMPORTANT: to add workers, you have to edit WORKER_PARSER_FIRST_JOB accordingly #define WORKER_RECEIVER_JOB_BYTES_READ (WORKER_PARSER_FIRST_JOB - 1) @@ -71,9 +72,7 @@ static inline int read_stream(struct receiver_state *r, char* buffer, size_t siz errno_clear(); switch(wait_on_socket_or_cancel_with_timeout( -#ifdef ENABLE_HTTPS &r->ssl, -#endif r->fd, 0, POLLIN, NULL)) { case 0: // data are waiting @@ -93,14 +92,10 @@ static inline int read_stream(struct receiver_state *r, char* buffer, size_t siz return -2; } -#ifdef ENABLE_HTTPS if (SSL_connection(&r->ssl)) bytes_read = netdata_ssl_read(&r->ssl, buffer, size); else bytes_read = read(r->fd, buffer, size); -#else - bytes_read = read(r->fd, buffer, size); -#endif } while(bytes_read < 0 && errno == EINTR && tries--); @@ -325,7 +320,7 @@ static size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, i .capabilities = rpt->capabilities, }; - parser = parser_init(&user, NULL, NULL, fd, PARSER_INPUT_SPLIT, ssl); + parser = parser_init(&user, fd, fd, PARSER_INPUT_SPLIT, ssl); } #ifdef ENABLE_H2O @@ -336,10 +331,6 @@ static size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, i rrd_collector_started(); - // this keeps the parser with its current value - // so, parser needs to be allocated before pushing it - CLEANUP_FUNCTION_REGISTER(pluginsd_process_thread_cleanup) parser_ptr = parser; - bool compressed_connection = rrdpush_decompression_initialize(rpt); buffered_reader_init(&rpt->reader); @@ -365,6 +356,9 @@ static size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, i }; ND_LOG_STACK_PUSH(lgs); + __atomic_store_n(&rpt->parser, parser, __ATOMIC_RELAXED); + rrdpush_receiver_send_node_and_claim_id_to_child(rpt->host); + while(!receiver_should_stop(rpt)) { if(!buffered_reader_next_line(&rpt->reader, buffer)) { @@ -389,6 +383,17 @@ static size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, i buffer->len = 0; buffer->buffer[0] = '\0'; } + + // cleanup the sender buffer, because we may end-up reusing an incomplete buffer + sender_thread_buffer_free(); + parser->user.v2.stream_buffer.wb = NULL; + + // make sure send_to_plugin() will not write any data to the socket + spinlock_lock(&parser->writer.spinlock); + parser->fd_output = -1; + parser->ssl_output = NULL; + spinlock_unlock(&parser->writer.spinlock); + result = parser->user.data_collections_count; return result; } @@ -407,7 +412,7 @@ static bool rrdhost_set_receiver(RRDHOST *host, struct receiver_state *rpt) { bool signal_rrdcontext = false; bool set_this = false; - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); if (!host->receiver) { rrdhost_flag_clear(host, RRDHOST_FLAG_ORPHAN); @@ -433,7 +438,7 @@ static bool rrdhost_set_receiver(RRDHOST *host, struct receiver_state *rpt) { } } - host->health_log.health_log_history = rpt->config.alarms_history; + host->health_log.health_log_retention_s = rpt->config.alarms_history; // this is a test // if(rpt->hops <= host->sender->hops) @@ -450,7 +455,7 @@ static bool rrdhost_set_receiver(RRDHOST *host, struct receiver_state *rpt) { set_this = true; } - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); if(signal_rrdcontext) rrdcontext_host_child_connected(host); @@ -460,47 +465,56 @@ static bool rrdhost_set_receiver(RRDHOST *host, struct receiver_state *rpt) { static void rrdhost_clear_receiver(struct receiver_state *rpt) { RRDHOST *host = rpt->host; - if(host) { - bool signal_rrdcontext = false; - netdata_mutex_lock(&host->receiver_lock); + if(!host) return; + spinlock_lock(&host->receiver_lock); + { // Make sure that we detach this thread and don't kill a freshly arriving receiver - if(host->receiver == rpt) { + + if (host->receiver == rpt) { + spinlock_unlock(&host->receiver_lock); + { + // run all these without having the receiver lock + + stream_path_child_disconnected(host); + rrdpush_sender_thread_stop(host, STREAM_HANDSHAKE_DISCONNECT_RECEIVER_LEFT, false); + rrdpush_receiver_replication_reset(host); + rrdcontext_host_child_disconnected(host); + + if (rpt->config.health_enabled) + rrdcalc_child_disconnected(host); + + rrdpush_reset_destinations_postpone_time(host); + } + spinlock_lock(&host->receiver_lock); + + // now we have the lock again + __atomic_sub_fetch(&localhost->connected_children_count, 1, __ATOMIC_RELAXED); rrdhost_flag_set(rpt->host, RRDHOST_FLAG_RRDPUSH_RECEIVER_DISCONNECTED); host->trigger_chart_obsoletion_check = 0; host->child_connect_time = 0; host->child_disconnected_time = now_realtime_sec(); - host->health.health_enabled = 0; - rrdpush_sender_thread_stop(host, STREAM_HANDSHAKE_DISCONNECT_RECEIVER_LEFT, false); - - signal_rrdcontext = true; - rrdpush_receiver_replication_reset(host); - + host->rrdpush_last_receiver_exit_reason = rpt->exit.reason; rrdhost_flag_set(host, RRDHOST_FLAG_ORPHAN); host->receiver = NULL; - host->rrdpush_last_receiver_exit_reason = rpt->exit.reason; - - if(rpt->config.health_enabled) - rrdcalc_child_disconnected(host); } + } - netdata_mutex_unlock(&host->receiver_lock); - - if(signal_rrdcontext) - rrdcontext_host_child_disconnected(host); + // this must be cleared with the receiver lock + pluginsd_process_cleanup(rpt->parser); + __atomic_store_n(&rpt->parser, NULL, __ATOMIC_RELAXED); - rrdpush_reset_destinations_postpone_time(host); - } + spinlock_unlock(&host->receiver_lock); } bool stop_streaming_receiver(RRDHOST *host, STREAM_HANDSHAKE reason) { bool ret = false; - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); if(host->receiver) { if(!host->receiver->exit.shutdown) { @@ -514,12 +528,12 @@ bool stop_streaming_receiver(RRDHOST *host, STREAM_HANDSHAKE reason) { int count = 2000; while (host->receiver && count-- > 0) { - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); // let the lock for the receiver thread to exit sleep_usec(1 * USEC_PER_MS); - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); } if(host->receiver) @@ -531,16 +545,14 @@ bool stop_streaming_receiver(RRDHOST *host, STREAM_HANDSHAKE reason) { else ret = true; - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); return ret; } static void rrdpush_send_error_on_taken_over_connection(struct receiver_state *rpt, const char *msg) { (void) send_timeout( -#ifdef ENABLE_HTTPS &rpt->ssl, -#endif rpt->fd, (char *)msg, strlen(msg), @@ -548,7 +560,7 @@ static void rrdpush_send_error_on_taken_over_connection(struct receiver_state *r 5); } -void rrdpush_receive_log_status(struct receiver_state *rpt, const char *msg, const char *status, ND_LOG_FIELD_PRIORITY priority) { +static void rrdpush_receive_log_status(struct receiver_state *rpt, const char *msg, const char *status, ND_LOG_FIELD_PRIORITY priority) { // this function may be called BEFORE we spawn the receiver thread // so, we need to add the fields again (it does not harm) ND_LOG_STACK lgs[] = { @@ -582,26 +594,26 @@ static void rrdpush_receive(struct receiver_state *rpt) rpt->config.health_enabled = health_plugin_enabled(); rpt->config.alarms_delay = 60; - rpt->config.alarms_history = HEALTH_LOG_DEFAULT_HISTORY; + rpt->config.alarms_history = HEALTH_LOG_RETENTION_DEFAULT; - rpt->config.rrdpush_enabled = (int)default_rrdpush_enabled; - rpt->config.rrdpush_destination = default_rrdpush_destination; - rpt->config.rrdpush_api_key = default_rrdpush_api_key; - rpt->config.rrdpush_send_charts_matching = default_rrdpush_send_charts_matching; + rpt->config.rrdpush_enabled = (int)stream_conf_send_enabled; + rpt->config.rrdpush_destination = stream_conf_send_destination; + rpt->config.rrdpush_api_key = stream_conf_send_api_key; + rpt->config.rrdpush_send_charts_matching = stream_conf_send_charts_matching; - rpt->config.rrdpush_enable_replication = default_rrdpush_enable_replication; - rpt->config.rrdpush_seconds_to_replicate = default_rrdpush_seconds_to_replicate; - rpt->config.rrdpush_replication_step = default_rrdpush_replication_step; + rpt->config.rrdpush_enable_replication = stream_conf_replication_enabled; + rpt->config.rrdpush_seconds_to_replicate = stream_conf_replication_period; + rpt->config.rrdpush_replication_step = stream_conf_replication_step; - rpt->config.update_every = (int)appconfig_get_number(&stream_config, rpt->machine_guid, "update every", rpt->config.update_every); + rpt->config.update_every = (int)appconfig_get_duration_seconds(&stream_config, rpt->machine_guid, "update every", rpt->config.update_every); if(rpt->config.update_every < 0) rpt->config.update_every = 1; - rpt->config.history = (int)appconfig_get_number(&stream_config, rpt->key, "default history", rpt->config.history); - rpt->config.history = (int)appconfig_get_number(&stream_config, rpt->machine_guid, "history", rpt->config.history); + rpt->config.history = (int)appconfig_get_number(&stream_config, rpt->key, "retention", rpt->config.history); + rpt->config.history = (int)appconfig_get_number(&stream_config, rpt->machine_guid, "retention", rpt->config.history); if(rpt->config.history < 5) rpt->config.history = 5; - rpt->config.mode = rrd_memory_mode_id(appconfig_get(&stream_config, rpt->key, "default memory mode", rrd_memory_mode_name(rpt->config.mode))); - rpt->config.mode = rrd_memory_mode_id(appconfig_get(&stream_config, rpt->machine_guid, "memory mode", rrd_memory_mode_name(rpt->config.mode))); + rpt->config.mode = rrd_memory_mode_id(appconfig_get(&stream_config, rpt->key, "db", rrd_memory_mode_name(rpt->config.mode))); + rpt->config.mode = rrd_memory_mode_id(appconfig_get(&stream_config, rpt->machine_guid, "db", rrd_memory_mode_name(rpt->config.mode))); if (unlikely(rpt->config.mode == RRD_MEMORY_MODE_DBENGINE && !dbengine_enabled)) { netdata_log_error("STREAM '%s' [receive from %s:%s]: " @@ -616,34 +628,34 @@ static void rrdpush_receive(struct receiver_state *rpt) rpt->config.health_enabled = appconfig_get_boolean_ondemand(&stream_config, rpt->key, "health enabled by default", rpt->config.health_enabled); rpt->config.health_enabled = appconfig_get_boolean_ondemand(&stream_config, rpt->machine_guid, "health enabled", rpt->config.health_enabled); - rpt->config.alarms_delay = appconfig_get_number(&stream_config, rpt->key, "default postpone alarms on connect seconds", rpt->config.alarms_delay); - rpt->config.alarms_delay = appconfig_get_number(&stream_config, rpt->machine_guid, "postpone alarms on connect seconds", rpt->config.alarms_delay); + rpt->config.alarms_delay = appconfig_get_duration_seconds(&stream_config, rpt->key, "postpone alerts on connect", rpt->config.alarms_delay); + rpt->config.alarms_delay = appconfig_get_duration_seconds(&stream_config, rpt->machine_guid, "postpone alerts on connect", rpt->config.alarms_delay); - rpt->config.alarms_history = appconfig_get_number(&stream_config, rpt->key, "default health log history", rpt->config.alarms_history); - rpt->config.alarms_history = appconfig_get_number(&stream_config, rpt->machine_guid, "health log history", rpt->config.alarms_history); + rpt->config.alarms_history = appconfig_get_duration_seconds(&stream_config, rpt->key, "health log retention", rpt->config.alarms_history); + rpt->config.alarms_history = appconfig_get_duration_seconds(&stream_config, rpt->machine_guid, "health log retention", rpt->config.alarms_history); - rpt->config.rrdpush_enabled = appconfig_get_boolean(&stream_config, rpt->key, "default proxy enabled", rpt->config.rrdpush_enabled); + rpt->config.rrdpush_enabled = appconfig_get_boolean(&stream_config, rpt->key, "proxy enabled", rpt->config.rrdpush_enabled); rpt->config.rrdpush_enabled = appconfig_get_boolean(&stream_config, rpt->machine_guid, "proxy enabled", rpt->config.rrdpush_enabled); - rpt->config.rrdpush_destination = appconfig_get(&stream_config, rpt->key, "default proxy destination", rpt->config.rrdpush_destination); + rpt->config.rrdpush_destination = appconfig_get(&stream_config, rpt->key, "proxy destination", rpt->config.rrdpush_destination); rpt->config.rrdpush_destination = appconfig_get(&stream_config, rpt->machine_guid, "proxy destination", rpt->config.rrdpush_destination); - rpt->config.rrdpush_api_key = appconfig_get(&stream_config, rpt->key, "default proxy api key", rpt->config.rrdpush_api_key); + rpt->config.rrdpush_api_key = appconfig_get(&stream_config, rpt->key, "proxy api key", rpt->config.rrdpush_api_key); rpt->config.rrdpush_api_key = appconfig_get(&stream_config, rpt->machine_guid, "proxy api key", rpt->config.rrdpush_api_key); - rpt->config.rrdpush_send_charts_matching = appconfig_get(&stream_config, rpt->key, "default proxy send charts matching", rpt->config.rrdpush_send_charts_matching); + rpt->config.rrdpush_send_charts_matching = appconfig_get(&stream_config, rpt->key, "proxy send charts matching", rpt->config.rrdpush_send_charts_matching); rpt->config.rrdpush_send_charts_matching = appconfig_get(&stream_config, rpt->machine_guid, "proxy send charts matching", rpt->config.rrdpush_send_charts_matching); rpt->config.rrdpush_enable_replication = appconfig_get_boolean(&stream_config, rpt->key, "enable replication", rpt->config.rrdpush_enable_replication); rpt->config.rrdpush_enable_replication = appconfig_get_boolean(&stream_config, rpt->machine_guid, "enable replication", rpt->config.rrdpush_enable_replication); - rpt->config.rrdpush_seconds_to_replicate = appconfig_get_number(&stream_config, rpt->key, "seconds to replicate", rpt->config.rrdpush_seconds_to_replicate); - rpt->config.rrdpush_seconds_to_replicate = appconfig_get_number(&stream_config, rpt->machine_guid, "seconds to replicate", rpt->config.rrdpush_seconds_to_replicate); + rpt->config.rrdpush_seconds_to_replicate = appconfig_get_duration_seconds(&stream_config, rpt->key, "replication period", rpt->config.rrdpush_seconds_to_replicate); + rpt->config.rrdpush_seconds_to_replicate = appconfig_get_duration_seconds(&stream_config, rpt->machine_guid, "replication period", rpt->config.rrdpush_seconds_to_replicate); - rpt->config.rrdpush_replication_step = appconfig_get_number(&stream_config, rpt->key, "seconds per replication step", rpt->config.rrdpush_replication_step); - rpt->config.rrdpush_replication_step = appconfig_get_number(&stream_config, rpt->machine_guid, "seconds per replication step", rpt->config.rrdpush_replication_step); + rpt->config.rrdpush_replication_step = appconfig_get_number(&stream_config, rpt->key, "replication step", rpt->config.rrdpush_replication_step); + rpt->config.rrdpush_replication_step = appconfig_get_number(&stream_config, rpt->machine_guid, "replication step", rpt->config.rrdpush_replication_step); - rpt->config.rrdpush_compression = default_rrdpush_compression_enabled; + rpt->config.rrdpush_compression = stream_conf_compression_enabled; rpt->config.rrdpush_compression = appconfig_get_boolean(&stream_config, rpt->key, "enable compression", rpt->config.rrdpush_compression); rpt->config.rrdpush_compression = appconfig_get_boolean(&stream_config, rpt->machine_guid, "enable compression", rpt->config.rrdpush_compression); @@ -652,7 +664,7 @@ static void rrdpush_receive(struct receiver_state *rpt) is_ephemeral = appconfig_get_boolean(&stream_config, rpt->machine_guid, "is ephemeral node", is_ephemeral); if(rpt->config.rrdpush_compression) { - char *order = appconfig_get(&stream_config, rpt->key, "compression algorithms order", RRDPUSH_COMPRESSION_ALGORITHMS_ORDER); + const char *order = appconfig_get(&stream_config, rpt->key, "compression algorithms order", RRDPUSH_COMPRESSION_ALGORITHMS_ORDER); order = appconfig_get(&stream_config, rpt->machine_guid, "compression algorithms order", order); rrdpush_parse_compression_order(rpt, order); } @@ -730,11 +742,7 @@ static void rrdpush_receive(struct receiver_state *rpt) , rpt->host->rrd_history_entries , rrd_memory_mode_name(rpt->host->rrd_memory_mode) , (rpt->config.health_enabled == CONFIG_BOOLEAN_NO)?"disabled":((rpt->config.health_enabled == CONFIG_BOOLEAN_YES)?"enabled":"auto") -#ifdef ENABLE_HTTPS , (rpt->ssl.conn != NULL) ? " SSL," : "" -#else - , "" -#endif ); #endif // NETDATA_INTERNAL_CHECKS @@ -784,9 +792,7 @@ static void rrdpush_receive(struct receiver_state *rpt) } else { #endif ssize_t bytes_sent = send_timeout( -#ifdef ENABLE_HTTPS &rpt->ssl, -#endif rpt->fd, initial_response, strlen(initial_response), 0, 60); if(bytes_sent != (ssize_t)strlen(initial_response)) { @@ -828,13 +834,9 @@ static void rrdpush_receive(struct receiver_state *rpt) rpt, "connected and ready to receive data", RRDPUSH_STATUS_CONNECTED, NDLP_INFO); -#ifdef ENABLE_ACLK // in case we have cloud connection we inform cloud // new child connected - if (netdata_cloud_enabled) - aclk_host_state_update(rpt->host, 1, 1); -#endif - + schedule_node_state_update(rpt->host, 300); rrdhost_set_is_parent_label(); if (is_ephemeral) @@ -843,50 +845,28 @@ static void rrdpush_receive(struct receiver_state *rpt) // let it reconnect to parent immediately rrdpush_reset_destinations_postpone_time(rpt->host); - size_t count = streaming_parser(rpt, &cd, rpt->fd, -#ifdef ENABLE_HTTPS - (rpt->ssl.conn) ? &rpt->ssl : NULL -#else - NULL -#endif - ); + // receive data + size_t count = streaming_parser(rpt, &cd, rpt->fd, (rpt->ssl.conn) ? &rpt->ssl : NULL); + // the parser stopped receiver_set_exit_reason(rpt, STREAM_HANDSHAKE_DISCONNECT_PARSER_EXIT, false); { char msg[100 + 1]; snprintfz(msg, sizeof(msg) - 1, "disconnected (completed %zu updates)", count); - rrdpush_receive_log_status( - rpt, msg, - RRDPUSH_STATUS_DISCONNECTED, NDLP_WARNING); + rrdpush_receive_log_status(rpt, msg, RRDPUSH_STATUS_DISCONNECTED, NDLP_WARNING); } -#ifdef ENABLE_ACLK // in case we have cloud connection we inform cloud // a child disconnected - if (netdata_cloud_enabled) - aclk_host_state_update(rpt->host, 0, 1); -#endif + STREAM_PATH tmp = rrdhost_stream_path_fetch(rpt->host); + uint64_t total_reboot = (tmp.start_time + tmp.shutdown_time); + schedule_node_state_update(rpt->host, MIN((total_reboot * MAX_CHILD_DISC_TOLERANCE), MAX_CHILD_DISC_DELAY)); cleanup: ; } -static void rrdpush_receiver_thread_cleanup(void *pptr) { - struct receiver_state *rpt = CLEANUP_FUNCTION_GET_PTR(pptr); - if(!rpt) return; - - netdata_log_info("STREAM '%s' [receive from [%s]:%s]: " - "receive thread ended (task id %d)" - , rpt->hostname ? rpt->hostname : "-" - , rpt->client_ip ? rpt->client_ip : "-", rpt->client_port ? rpt->client_port : "-", gettid_cached()); - - worker_unregister(); - rrdhost_clear_receiver(rpt); - receiver_state_free(rpt); - rrdhost_set_is_parent_label(); -} - static bool stream_receiver_log_capabilities(BUFFER *wb, void *ptr) { struct receiver_state *rpt = ptr; if(!rpt) @@ -901,16 +881,11 @@ static bool stream_receiver_log_transport(BUFFER *wb, void *ptr) { if(!rpt) return false; -#ifdef ENABLE_HTTPS buffer_strcat(wb, SSL_connection(&rpt->ssl) ? "https" : "http"); -#else - buffer_strcat(wb, "http"); -#endif return true; } void *rrdpush_receiver_thread(void *ptr) { - CLEANUP_FUNCTION_REGISTER(rrdpush_receiver_thread_cleanup) cleanup_ptr = ptr; worker_register("STREAMRCV"); worker_register_job_custom_metric(WORKER_RECEIVER_JOB_BYTES_READ, @@ -942,5 +917,469 @@ void *rrdpush_receiver_thread(void *ptr) { , rpt->client_port); rrdpush_receive(rpt); + + netdata_log_info("STREAM '%s' [receive from [%s]:%s]: " + "receive thread ended (task id %d)" + , rpt->hostname ? rpt->hostname : "-" + , rpt->client_ip ? rpt->client_ip : "-", rpt->client_port ? rpt->client_port : "-", gettid_cached()); + + worker_unregister(); + rrdhost_clear_receiver(rpt); + rrdhost_set_is_parent_label(); + receiver_state_free(rpt); return NULL; } + +int rrdpush_receiver_permission_denied(struct web_client *w) { + // we always respond with the same message and error code + // to prevent an attacker from gaining info about the error + buffer_flush(w->response.data); + buffer_strcat(w->response.data, START_STREAMING_ERROR_NOT_PERMITTED); + return HTTP_RESP_UNAUTHORIZED; +} + +int rrdpush_receiver_too_busy_now(struct web_client *w) { + // we always respond with the same message and error code + // to prevent an attacker from gaining info about the error + buffer_flush(w->response.data); + buffer_strcat(w->response.data, START_STREAMING_ERROR_BUSY_TRY_LATER); + return HTTP_RESP_SERVICE_UNAVAILABLE; +} + +static void rrdpush_receiver_takeover_web_connection(struct web_client *w, struct receiver_state *rpt) { + rpt->fd = w->ifd; + + rpt->ssl.conn = w->ssl.conn; + rpt->ssl.state = w->ssl.state; + + w->ssl = NETDATA_SSL_UNSET_CONNECTION; + + WEB_CLIENT_IS_DEAD(w); + + if(web_server_mode == WEB_SERVER_MODE_STATIC_THREADED) { + web_client_flag_set(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET); + } + else { + if(w->ifd == w->ofd) + w->ifd = w->ofd = -1; + else + w->ifd = -1; + } + + buffer_flush(w->response.data); +} + +int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_string, void *h2o_ctx __maybe_unused) { + + if(!service_running(ABILITY_STREAMING_CONNECTIONS)) + return rrdpush_receiver_too_busy_now(w); + + struct receiver_state *rpt = callocz(1, sizeof(*rpt)); + rpt->connected_since_s = now_realtime_sec(); + rpt->last_msg_t = now_monotonic_sec(); + rpt->hops = 1; + + rpt->capabilities = STREAM_CAP_INVALID; + +#ifdef ENABLE_H2O + rpt->h2o_ctx = h2o_ctx; +#endif + + __atomic_add_fetch(&netdata_buffers_statistics.rrdhost_receivers, sizeof(*rpt), __ATOMIC_RELAXED); + __atomic_add_fetch(&netdata_buffers_statistics.rrdhost_allocations_size, sizeof(struct rrdhost_system_info), __ATOMIC_RELAXED); + + rpt->system_info = callocz(1, sizeof(struct rrdhost_system_info)); + rpt->system_info->hops = rpt->hops; + + rpt->fd = -1; + rpt->client_ip = strdupz(w->client_ip); + rpt->client_port = strdupz(w->client_port); + + rpt->ssl = NETDATA_SSL_UNSET_CONNECTION; + + rpt->config.update_every = default_rrd_update_every; + + // parse the parameters and fill rpt and rpt->system_info + + while(decoded_query_string) { + char *value = strsep_skip_consecutive_separators(&decoded_query_string, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + if(!strcmp(name, "key") && !rpt->key) + rpt->key = strdupz(value); + + else if(!strcmp(name, "hostname") && !rpt->hostname) + rpt->hostname = strdupz(value); + + else if(!strcmp(name, "registry_hostname") && !rpt->registry_hostname) + rpt->registry_hostname = strdupz(value); + + else if(!strcmp(name, "machine_guid") && !rpt->machine_guid) + rpt->machine_guid = strdupz(value); + + else if(!strcmp(name, "update_every")) + rpt->config.update_every = (int)strtoul(value, NULL, 0); + + else if(!strcmp(name, "os") && !rpt->os) + rpt->os = strdupz(value); + + else if(!strcmp(name, "timezone") && !rpt->timezone) + rpt->timezone = strdupz(value); + + else if(!strcmp(name, "abbrev_timezone") && !rpt->abbrev_timezone) + rpt->abbrev_timezone = strdupz(value); + + else if(!strcmp(name, "utc_offset")) + rpt->utc_offset = (int32_t)strtol(value, NULL, 0); + + else if(!strcmp(name, "hops")) + rpt->hops = rpt->system_info->hops = (uint16_t) strtoul(value, NULL, 0); + + else if(!strcmp(name, "ml_capable")) + rpt->system_info->ml_capable = strtoul(value, NULL, 0); + + else if(!strcmp(name, "ml_enabled")) + rpt->system_info->ml_enabled = strtoul(value, NULL, 0); + + else if(!strcmp(name, "mc_version")) + rpt->system_info->mc_version = strtoul(value, NULL, 0); + + else if(!strcmp(name, "ver") && (rpt->capabilities & STREAM_CAP_INVALID)) + rpt->capabilities = convert_stream_version_to_capabilities(strtoul(value, NULL, 0), NULL, false); + + else { + // An old Netdata child does not have a compatible streaming protocol, map to something sane. + if (!strcmp(name, "NETDATA_SYSTEM_OS_NAME")) + name = "NETDATA_HOST_OS_NAME"; + + else if (!strcmp(name, "NETDATA_SYSTEM_OS_ID")) + name = "NETDATA_HOST_OS_ID"; + + else if (!strcmp(name, "NETDATA_SYSTEM_OS_ID_LIKE")) + name = "NETDATA_HOST_OS_ID_LIKE"; + + else if (!strcmp(name, "NETDATA_SYSTEM_OS_VERSION")) + name = "NETDATA_HOST_OS_VERSION"; + + else if (!strcmp(name, "NETDATA_SYSTEM_OS_VERSION_ID")) + name = "NETDATA_HOST_OS_VERSION_ID"; + + else if (!strcmp(name, "NETDATA_SYSTEM_OS_DETECTION")) + name = "NETDATA_HOST_OS_DETECTION"; + + else if(!strcmp(name, "NETDATA_PROTOCOL_VERSION") && (rpt->capabilities & STREAM_CAP_INVALID)) + rpt->capabilities = convert_stream_version_to_capabilities(1, NULL, false); + + if (unlikely(rrdhost_set_system_info_variable(rpt->system_info, name, value))) { + nd_log_daemon(NDLP_NOTICE, "STREAM '%s' [receive from [%s]:%s]: " + "request has parameter '%s' = '%s', which is not used." + , (rpt->hostname && *rpt->hostname) ? rpt->hostname : "-" + , rpt->client_ip, rpt->client_port + , name, value); + } + } + } + + if (rpt->capabilities & STREAM_CAP_INVALID) + // no version is supplied, assume version 0; + rpt->capabilities = convert_stream_version_to_capabilities(0, NULL, false); + + // find the program name and version + if(w->user_agent && w->user_agent[0]) { + char *t = strchr(w->user_agent, '/'); + if(t && *t) { + *t = '\0'; + t++; + } + + rpt->program_name = strdupz(w->user_agent); + if(t && *t) rpt->program_version = strdupz(t); + } + + // check if we should accept this connection + + if(!rpt->key || !*rpt->key) { + rrdpush_receive_log_status( + rpt, "request without an API key, rejecting connection", + RRDPUSH_STATUS_NO_API_KEY, NDLP_WARNING); + + receiver_state_free(rpt); + return rrdpush_receiver_permission_denied(w); + } + + if(!rpt->hostname || !*rpt->hostname) { + rrdpush_receive_log_status( + rpt, "request without a hostname, rejecting connection", + RRDPUSH_STATUS_NO_HOSTNAME, NDLP_WARNING); + + receiver_state_free(rpt); + return rrdpush_receiver_permission_denied(w); + } + + if(!rpt->registry_hostname) + rpt->registry_hostname = strdupz(rpt->hostname); + + if(!rpt->machine_guid || !*rpt->machine_guid) { + rrdpush_receive_log_status( + rpt, "request without a machine GUID, rejecting connection", + RRDPUSH_STATUS_NO_MACHINE_GUID, NDLP_WARNING); + + receiver_state_free(rpt); + return rrdpush_receiver_permission_denied(w); + } + + { + char buf[GUID_LEN + 1]; + + if (regenerate_guid(rpt->key, buf) == -1) { + rrdpush_receive_log_status( + rpt, "API key is not a valid UUID (use the command uuidgen to generate one)", + RRDPUSH_STATUS_INVALID_API_KEY, NDLP_WARNING); + + receiver_state_free(rpt); + return rrdpush_receiver_permission_denied(w); + } + + if (regenerate_guid(rpt->machine_guid, buf) == -1) { + rrdpush_receive_log_status( + rpt, "machine GUID is not a valid UUID", + RRDPUSH_STATUS_INVALID_MACHINE_GUID, NDLP_WARNING); + + receiver_state_free(rpt); + return rrdpush_receiver_permission_denied(w); + } + } + + const char *api_key_type = appconfig_get(&stream_config, rpt->key, "type", "api"); + if(!api_key_type || !*api_key_type) api_key_type = "unknown"; + if(strcmp(api_key_type, "api") != 0) { + rrdpush_receive_log_status( + rpt, "API key is a machine GUID", + RRDPUSH_STATUS_INVALID_API_KEY, NDLP_WARNING); + + receiver_state_free(rpt); + return rrdpush_receiver_permission_denied(w); + } + + if(!appconfig_get_boolean(&stream_config, rpt->key, "enabled", 0)) { + rrdpush_receive_log_status( + rpt, "API key is not enabled", + RRDPUSH_STATUS_API_KEY_DISABLED, NDLP_WARNING); + + receiver_state_free(rpt); + return rrdpush_receiver_permission_denied(w); + } + + { + SIMPLE_PATTERN *key_allow_from = simple_pattern_create( + appconfig_get(&stream_config, rpt->key, "allow from", "*"), + NULL, SIMPLE_PATTERN_EXACT, true); + + if(key_allow_from) { + if(!simple_pattern_matches(key_allow_from, w->client_ip)) { + simple_pattern_free(key_allow_from); + + rrdpush_receive_log_status( + rpt, "API key is not allowed from this IP", + RRDPUSH_STATUS_NOT_ALLOWED_IP, NDLP_WARNING); + + receiver_state_free(rpt); + return rrdpush_receiver_permission_denied(w); + } + + simple_pattern_free(key_allow_from); + } + } + + { + const char *machine_guid_type = appconfig_get(&stream_config, rpt->machine_guid, "type", "machine"); + if (!machine_guid_type || !*machine_guid_type) machine_guid_type = "unknown"; + + if (strcmp(machine_guid_type, "machine") != 0) { + rrdpush_receive_log_status( + rpt, "machine GUID is an API key", + RRDPUSH_STATUS_INVALID_MACHINE_GUID, NDLP_WARNING); + + receiver_state_free(rpt); + return rrdpush_receiver_permission_denied(w); + } + } + + if(!appconfig_get_boolean(&stream_config, rpt->machine_guid, "enabled", 1)) { + rrdpush_receive_log_status( + rpt, "machine GUID is not enabled", + RRDPUSH_STATUS_MACHINE_GUID_DISABLED, NDLP_WARNING); + + receiver_state_free(rpt); + return rrdpush_receiver_permission_denied(w); + } + + { + SIMPLE_PATTERN *machine_allow_from = simple_pattern_create( + appconfig_get(&stream_config, rpt->machine_guid, "allow from", "*"), + NULL, SIMPLE_PATTERN_EXACT, true); + + if(machine_allow_from) { + if(!simple_pattern_matches(machine_allow_from, w->client_ip)) { + simple_pattern_free(machine_allow_from); + + rrdpush_receive_log_status( + rpt, "machine GUID is not allowed from this IP", + RRDPUSH_STATUS_NOT_ALLOWED_IP, NDLP_WARNING); + + receiver_state_free(rpt); + return rrdpush_receiver_permission_denied(w); + } + + simple_pattern_free(machine_allow_from); + } + } + + if (strcmp(rpt->machine_guid, localhost->machine_guid) == 0) { + + rrdpush_receiver_takeover_web_connection(w, rpt); + + rrdpush_receive_log_status( + rpt, "machine GUID is my own", + RRDPUSH_STATUS_LOCALHOST, NDLP_DEBUG); + + char initial_response[HTTP_HEADER_SIZE + 1]; + snprintfz(initial_response, HTTP_HEADER_SIZE, "%s", START_STREAMING_ERROR_SAME_LOCALHOST); + + if(send_timeout( + &rpt->ssl, + rpt->fd, initial_response, strlen(initial_response), 0, 60) != (ssize_t)strlen(initial_response)) { + + nd_log_daemon(NDLP_ERR, "STREAM '%s' [receive from [%s]:%s]: " + "failed to reply." + , rpt->hostname + , rpt->client_ip, rpt->client_port + ); + } + + receiver_state_free(rpt); + return HTTP_RESP_OK; + } + + if(unlikely(web_client_streaming_rate_t > 0)) { + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + static time_t last_stream_accepted_t = 0; + + time_t now = now_realtime_sec(); + spinlock_lock(&spinlock); + + if(unlikely(last_stream_accepted_t == 0)) + last_stream_accepted_t = now; + + if(now - last_stream_accepted_t < web_client_streaming_rate_t) { + spinlock_unlock(&spinlock); + + char msg[100 + 1]; + snprintfz(msg, sizeof(msg) - 1, + "rate limit, will accept new connection in %ld secs", + (long)(web_client_streaming_rate_t - (now - last_stream_accepted_t))); + + rrdpush_receive_log_status( + rpt, msg, + RRDPUSH_STATUS_RATE_LIMIT, NDLP_NOTICE); + + receiver_state_free(rpt); + return rrdpush_receiver_too_busy_now(w); + } + + last_stream_accepted_t = now; + spinlock_unlock(&spinlock); + } + + /* + * Quick path for rejecting multiple connections. The lock taken is fine-grained - it only protects the receiver + * pointer within the host (if a host exists). This protects against multiple concurrent web requests hitting + * separate threads within the web-server and landing here. The lock guards the thread-shutdown sequence that + * detaches the receiver from the host. If the host is being created (first time-access) then we also use the + * lock to prevent race-hazard (two threads try to create the host concurrently, one wins and the other does a + * lookup to the now-attached structure). + */ + + { + time_t age = 0; + bool receiver_stale = false; + bool receiver_working = false; + + rrd_rdlock(); + RRDHOST *host = rrdhost_find_by_guid(rpt->machine_guid); + if (unlikely(host && rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED))) /* Ignore archived hosts. */ + host = NULL; + + if (host) { + spinlock_lock(&host->receiver_lock); + if (host->receiver) { + age = now_monotonic_sec() - host->receiver->last_msg_t; + + if (age < 30) + receiver_working = true; + else + receiver_stale = true; + } + spinlock_unlock(&host->receiver_lock); + } + rrd_rdunlock(); + + if (receiver_stale && stop_streaming_receiver(host, STREAM_HANDSHAKE_DISCONNECT_STALE_RECEIVER)) { + // we stopped the receiver + // we can proceed with this connection + receiver_stale = false; + + nd_log_daemon(NDLP_NOTICE, "STREAM '%s' [receive from [%s]:%s]: " + "stopped previous stale receiver to accept this one." + , rpt->hostname + , rpt->client_ip, rpt->client_port + ); + } + + if (receiver_working || receiver_stale) { + // another receiver is already connected + // try again later + + char msg[200 + 1]; + snprintfz(msg, sizeof(msg) - 1, + "multiple connections for same host, " + "old connection was last used %ld secs ago%s", + age, receiver_stale ? " (signaled old receiver to stop)" : " (new connection not accepted)"); + + rrdpush_receive_log_status( + rpt, msg, + RRDPUSH_STATUS_ALREADY_CONNECTED, NDLP_DEBUG); + + // Have not set WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET - caller should clean up + buffer_flush(w->response.data); + buffer_strcat(w->response.data, START_STREAMING_ERROR_ALREADY_STREAMING); + receiver_state_free(rpt); + return HTTP_RESP_CONFLICT; + } + } + + rrdpush_receiver_takeover_web_connection(w, rpt); + + char tag[NETDATA_THREAD_TAG_MAX + 1]; + snprintfz(tag, NETDATA_THREAD_TAG_MAX, THREAD_TAG_STREAM_RECEIVER "[%s]", rpt->hostname); + tag[NETDATA_THREAD_TAG_MAX] = '\0'; + + rpt->thread = nd_thread_create(tag, NETDATA_THREAD_OPTION_DEFAULT, rrdpush_receiver_thread, (void *)rpt); + if(!rpt->thread) { + rrdpush_receive_log_status( + rpt, "can't create receiver thread", + RRDPUSH_STATUS_INTERNAL_SERVER_ERROR, NDLP_ERR); + + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Can't handle this request"); + receiver_state_free(rpt); + return HTTP_RESP_INTERNAL_SERVER_ERROR; + } + + // prevent the caller from closing the streaming socket + return HTTP_RESP_OK; +} diff --git a/src/streaming/receiver.h b/src/streaming/receiver.h new file mode 100644 index 000000000..a1f208608 --- /dev/null +++ b/src/streaming/receiver.h @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RECEIVER_H +#define NETDATA_RECEIVER_H + +#include "libnetdata/libnetdata.h" +#include "database/rrd.h" + +struct parser; + +struct receiver_state { + RRDHOST *host; + pid_t tid; + ND_THREAD *thread; + int fd; + char *key; + char *hostname; + char *registry_hostname; + char *machine_guid; + char *os; + char *timezone; // Unused? + char *abbrev_timezone; + int32_t utc_offset; + char *client_ip; // Duplicated in pluginsd + char *client_port; // Duplicated in pluginsd + char *program_name; // Duplicated in pluginsd + char *program_version; + struct rrdhost_system_info *system_info; + STREAM_CAPABILITIES capabilities; + time_t last_msg_t; + time_t connected_since_s; + + struct buffered_reader reader; + + uint16_t hops; + + struct { + bool shutdown; // signal the streaming parser to exit + STREAM_HANDSHAKE reason; + } exit; + + struct { + RRD_MEMORY_MODE mode; + int history; + int update_every; + int health_enabled; // CONFIG_BOOLEAN_YES, CONFIG_BOOLEAN_NO, CONFIG_BOOLEAN_AUTO + time_t alarms_delay; + uint32_t alarms_history; + int rrdpush_enabled; + const char *rrdpush_api_key; // DONT FREE - it is allocated in appconfig + const char *rrdpush_send_charts_matching; // DONT FREE - it is allocated in appconfig + bool rrdpush_enable_replication; + time_t rrdpush_seconds_to_replicate; + time_t rrdpush_replication_step; + const char *rrdpush_destination; // DONT FREE - it is allocated in appconfig + unsigned int rrdpush_compression; + STREAM_CAPABILITIES compression_priorities[COMPRESSION_ALGORITHM_MAX]; + } config; + + NETDATA_SSL ssl; + + time_t replication_first_time_t; + + struct decompressor_state decompressor; + /* + struct { + uint32_t count; + STREAM_NODE_INSTANCE *array; + } instances; +*/ + + // The parser pointer is safe to read and use, only when having the host receiver lock. + // Without this lock, the data pointed by the pointer may vanish randomly. + // Also, since the receiver sets it when it starts, it should be read with + // an atomic read. + struct parser *parser; + +#ifdef ENABLE_H2O + void *h2o_ctx; +#endif +}; + +#ifdef ENABLE_H2O +#define is_h2o_rrdpush(x) ((x)->h2o_ctx != NULL) +#define unless_h2o_rrdpush(x) if(!is_h2o_rrdpush(x)) +#endif + +int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_string, void *h2o_ctx); + +void receiver_state_free(struct receiver_state *rpt); +bool stop_streaming_receiver(RRDHOST *host, STREAM_HANDSHAKE reason); + +#endif //NETDATA_RECEIVER_H diff --git a/src/streaming/replication.c b/src/streaming/replication.c index 1f5aeb34c..1f2c3140d 100644 --- a/src/streaming/replication.c +++ b/src/streaming/replication.c @@ -612,6 +612,7 @@ static struct replication_query *replication_response_prepare( } void replication_response_cancel_and_finalize(struct replication_query *q) { + if(!q) return; replication_query_finalize(NULL, q, false); } @@ -718,7 +719,7 @@ bool replication_response_execute_and_finalize(struct replication_query *q, size struct replication_request_details { struct { send_command callback; - void *data; + struct parser *parser; } caller; RRDHOST *host; @@ -826,7 +827,7 @@ static bool send_replay_chart_cmd(struct replication_request_details *r, const c rrdset_id(st), r->wanted.start_streaming ? "true" : "false", (unsigned long long)r->wanted.after, (unsigned long long)r->wanted.before); - ssize_t ret = r->caller.callback(buffer, r->caller.data); + ssize_t ret = r->caller.callback(buffer, r->caller.parser); if (ret < 0) { netdata_log_error("REPLAY ERROR: 'host:%s/chart:%s' failed to send replication request to child (error %zd)", rrdhost_hostname(r->host), rrdset_id(r->st), ret); @@ -836,14 +837,14 @@ static bool send_replay_chart_cmd(struct replication_request_details *r, const c return true; } -bool replicate_chart_request(send_command callback, void *callback_data, RRDHOST *host, RRDSET *st, +bool replicate_chart_request(send_command callback, struct parser *parser, RRDHOST *host, RRDSET *st, time_t child_first_entry, time_t child_last_entry, time_t child_wall_clock_time, time_t prev_first_entry_wanted, time_t prev_last_entry_wanted) { struct replication_request_details r = { .caller = { .callback = callback, - .data = callback_data, + .parser = parser, }, .host = host, diff --git a/src/streaming/replication.h b/src/streaming/replication.h index 507b7c32f..27baeaf35 100644 --- a/src/streaming/replication.h +++ b/src/streaming/replication.h @@ -5,6 +5,8 @@ #include "daemon/common.h" +struct parser; + struct replication_query_statistics { SPINLOCK spinlock; size_t queries_started; @@ -17,9 +19,9 @@ struct replication_query_statistics replication_get_query_statistics(void); bool replicate_chart_response(RRDHOST *rh, RRDSET *rs, bool start_streaming, time_t after, time_t before); -typedef ssize_t (*send_command)(const char *txt, void *data); +typedef ssize_t (*send_command)(const char *txt, struct parser *parser); -bool replicate_chart_request(send_command callback, void *callback_data, +bool replicate_chart_request(send_command callback, struct parser *parser, RRDHOST *rh, RRDSET *rs, time_t child_first_entry, time_t child_last_entry, time_t child_wall_clock_time, time_t response_first_start_time, time_t response_last_end_time); diff --git a/src/streaming/rrdhost-status.c b/src/streaming/rrdhost-status.c new file mode 100644 index 000000000..c34fa693e --- /dev/null +++ b/src/streaming/rrdhost-status.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "rrdhost-status.h" + +const char *rrdhost_db_status_to_string(RRDHOST_DB_STATUS status) { + switch(status) { + default: + case RRDHOST_DB_STATUS_INITIALIZING: + return "initializing"; + + case RRDHOST_DB_STATUS_QUERYABLE: + return "online"; + } +} + +const char *rrdhost_db_liveness_to_string(RRDHOST_DB_LIVENESS status) { + switch(status) { + default: + case RRDHOST_DB_LIVENESS_STALE: + return "stale"; + + case RRDHOST_DB_LIVENESS_LIVE: + return "live"; + } +} + +const char *rrdhost_ingest_status_to_string(RRDHOST_INGEST_STATUS status) { + switch(status) { + case RRDHOST_INGEST_STATUS_ARCHIVED: + return "archived"; + + case RRDHOST_INGEST_STATUS_INITIALIZING: + return "initializing"; + + case RRDHOST_INGEST_STATUS_REPLICATING: + return "replicating"; + + case RRDHOST_INGEST_STATUS_ONLINE: + return "online"; + + default: + case RRDHOST_INGEST_STATUS_OFFLINE: + return "offline"; + } +} + +const char *rrdhost_ingest_type_to_string(RRDHOST_INGEST_TYPE type) { + switch(type) { + case RRDHOST_INGEST_TYPE_LOCALHOST: + return "localhost"; + + case RRDHOST_INGEST_TYPE_VIRTUAL: + return "virtual"; + + case RRDHOST_INGEST_TYPE_CHILD: + return "child"; + + default: + case RRDHOST_INGEST_TYPE_ARCHIVED: + return "archived"; + } +} + +const char *rrdhost_streaming_status_to_string(RRDHOST_STREAMING_STATUS status) { + switch(status) { + case RRDHOST_STREAM_STATUS_DISABLED: + return "disabled"; + + case RRDHOST_STREAM_STATUS_REPLICATING: + return "replicating"; + + case RRDHOST_STREAM_STATUS_ONLINE: + return "online"; + + default: + case RRDHOST_STREAM_STATUS_OFFLINE: + return "offline"; + } +} + +const char *rrdhost_ml_status_to_string(RRDHOST_ML_STATUS status) { + switch(status) { + case RRDHOST_ML_STATUS_RUNNING: + return "online"; + + case RRDHOST_ML_STATUS_OFFLINE: + return "offline"; + + default: + case RRDHOST_ML_STATUS_DISABLED: + return "disabled"; + } +} + +const char *rrdhost_ml_type_to_string(RRDHOST_ML_TYPE type) { + switch(type) { + case RRDHOST_ML_TYPE_SELF: + return "self"; + + case RRDHOST_ML_TYPE_RECEIVED: + return "received"; + + default: + case RRDHOST_ML_TYPE_DISABLED: + return "disabled"; + } +} + +const char *rrdhost_health_status_to_string(RRDHOST_HEALTH_STATUS status) { + switch(status) { + default: + case RRDHOST_HEALTH_STATUS_DISABLED: + return "disabled"; + + case RRDHOST_HEALTH_STATUS_INITIALIZING: + return "initializing"; + + case RRDHOST_HEALTH_STATUS_RUNNING: + return "online"; + } +} + +const char *rrdhost_dyncfg_status_to_string(RRDHOST_DYNCFG_STATUS status) { + switch(status) { + default: + case RRDHOST_DYNCFG_STATUS_UNAVAILABLE: + return "unavailable"; + + case RRDHOST_DYNCFG_STATUS_AVAILABLE: + return "online"; + } +} + +static NETDATA_DOUBLE rrdhost_sender_replication_completion_unsafe(RRDHOST *host, time_t now, size_t *instances) { + size_t charts = rrdhost_sender_replicating_charts(host); + NETDATA_DOUBLE completion; + if(!charts || !host->sender || !host->sender->replication.oldest_request_after_t) + completion = 100.0; + else if(!host->sender->replication.latest_completed_before_t || host->sender->replication.latest_completed_before_t < host->sender->replication.oldest_request_after_t) + completion = 0.0; + else { + time_t total = now - host->sender->replication.oldest_request_after_t; + time_t current = host->sender->replication.latest_completed_before_t - host->sender->replication.oldest_request_after_t; + completion = (NETDATA_DOUBLE) current * 100.0 / (NETDATA_DOUBLE) total; + } + + *instances = charts; + + return completion; +} + +void rrdhost_status(RRDHOST *host, time_t now, RRDHOST_STATUS *s) { + memset(s, 0, sizeof(*s)); + + s->host = host; + s->now = now; + + RRDHOST_FLAGS flags = __atomic_load_n(&host->flags, __ATOMIC_RELAXED); + + // --- dyncfg --- + + s->dyncfg.status = dyncfg_available_for_rrdhost(host) ? RRDHOST_DYNCFG_STATUS_AVAILABLE : RRDHOST_DYNCFG_STATUS_UNAVAILABLE; + + // --- db --- + + bool online = rrdhost_is_online(host); + + rrdhost_retention(host, now, online, &s->db.first_time_s, &s->db.last_time_s); + s->db.metrics = host->rrdctx.metrics; + s->db.instances = host->rrdctx.instances; + s->db.contexts = dictionary_entries(host->rrdctx.contexts); + if(!s->db.first_time_s || !s->db.last_time_s || !s->db.metrics || !s->db.instances || !s->db.contexts || + (flags & (RRDHOST_FLAG_PENDING_CONTEXT_LOAD))) + s->db.status = RRDHOST_DB_STATUS_INITIALIZING; + else + s->db.status = RRDHOST_DB_STATUS_QUERYABLE; + + s->db.mode = host->rrd_memory_mode; + + // --- ingest --- + + s->ingest.since = MAX(host->child_connect_time, host->child_disconnected_time); + s->ingest.reason = (online) ? STREAM_HANDSHAKE_NEVER : host->rrdpush_last_receiver_exit_reason; + + spinlock_lock(&host->receiver_lock); + s->ingest.hops = (host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1); + bool has_receiver = false; + if (host->receiver && !rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_RECEIVER_DISCONNECTED)) { + has_receiver = true; + s->ingest.replication.instances = rrdhost_receiver_replicating_charts(host); + s->ingest.replication.completion = host->rrdpush_receiver_replication_percent; + s->ingest.replication.in_progress = s->ingest.replication.instances > 0; + + s->ingest.capabilities = host->receiver->capabilities; + s->ingest.peers = socket_peers(host->receiver->fd); + s->ingest.ssl = SSL_connection(&host->receiver->ssl); + } + spinlock_unlock(&host->receiver_lock); + + if (online) { + if(s->db.status == RRDHOST_DB_STATUS_INITIALIZING) + s->ingest.status = RRDHOST_INGEST_STATUS_INITIALIZING; + + else if (host == localhost || rrdhost_option_check(host, RRDHOST_OPTION_VIRTUAL_HOST)) { + s->ingest.status = RRDHOST_INGEST_STATUS_ONLINE; + s->ingest.since = netdata_start_time; + } + + else if (s->ingest.replication.in_progress) + s->ingest.status = RRDHOST_INGEST_STATUS_REPLICATING; + + else + s->ingest.status = RRDHOST_INGEST_STATUS_ONLINE; + } + else { + if (!s->ingest.since) { + s->ingest.status = RRDHOST_INGEST_STATUS_ARCHIVED; + s->ingest.since = s->db.last_time_s; + } + + else + s->ingest.status = RRDHOST_INGEST_STATUS_OFFLINE; + } + + if(host == localhost) + s->ingest.type = RRDHOST_INGEST_TYPE_LOCALHOST; + else if(has_receiver) + s->ingest.type = RRDHOST_INGEST_TYPE_CHILD; + else if(rrdhost_option_check(host, RRDHOST_OPTION_VIRTUAL_HOST)) + s->ingest.type = RRDHOST_INGEST_TYPE_VIRTUAL; + else + s->ingest.type = RRDHOST_INGEST_TYPE_ARCHIVED; + + s->ingest.id = host->rrdpush_receiver_connection_counter; + + if(!s->ingest.since) + s->ingest.since = netdata_start_time; + + if(s->ingest.status == RRDHOST_INGEST_STATUS_ONLINE) + s->db.liveness = RRDHOST_DB_LIVENESS_LIVE; + else + s->db.liveness = RRDHOST_DB_LIVENESS_STALE; + + // --- stream --- + + if (!host->sender) { + s->stream.status = RRDHOST_STREAM_STATUS_DISABLED; + s->stream.hops = s->ingest.hops + 1; + } + else { + sender_lock(host->sender); + + s->stream.since = host->sender->last_state_since_t; + s->stream.peers = socket_peers(host->sender->rrdpush_sender_socket); + s->stream.ssl = SSL_connection(&host->sender->ssl); + + memcpy(s->stream.sent_bytes_on_this_connection_per_type, + host->sender->sent_bytes_on_this_connection_per_type, + MIN(sizeof(s->stream.sent_bytes_on_this_connection_per_type), + sizeof(host->sender->sent_bytes_on_this_connection_per_type))); + + if (rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED)) { + s->stream.hops = host->sender->hops; + s->stream.reason = STREAM_HANDSHAKE_NEVER; + s->stream.capabilities = host->sender->capabilities; + + s->stream.replication.completion = rrdhost_sender_replication_completion_unsafe(host, now, &s->stream.replication.instances); + s->stream.replication.in_progress = s->stream.replication.instances > 0; + + if(s->stream.replication.in_progress) + s->stream.status = RRDHOST_STREAM_STATUS_REPLICATING; + else + s->stream.status = RRDHOST_STREAM_STATUS_ONLINE; + + s->stream.compression = host->sender->compressor.initialized; + } + else { + s->stream.status = RRDHOST_STREAM_STATUS_OFFLINE; + s->stream.hops = s->ingest.hops + 1; + s->stream.reason = host->sender->exit.reason; + } + + sender_unlock(host->sender); + } + + s->stream.id = host->rrdpush_sender_connection_counter; + + if(!s->stream.since) + s->stream.since = netdata_start_time; + + // --- ml --- + + if(ml_host_get_host_status(host, &s->ml.metrics)) { + s->ml.type = RRDHOST_ML_TYPE_SELF; + + if(s->ingest.status == RRDHOST_INGEST_STATUS_OFFLINE || s->ingest.status == RRDHOST_INGEST_STATUS_ARCHIVED) + s->ml.status = RRDHOST_ML_STATUS_OFFLINE; + else + s->ml.status = RRDHOST_ML_STATUS_RUNNING; + } + else if(stream_has_capability(&s->ingest, STREAM_CAP_DATA_WITH_ML)) { + s->ml.type = RRDHOST_ML_TYPE_RECEIVED; + s->ml.status = RRDHOST_ML_STATUS_RUNNING; + } + else { + // does not receive ML, does not run ML + s->ml.type = RRDHOST_ML_TYPE_DISABLED; + s->ml.status = RRDHOST_ML_STATUS_DISABLED; + } + + // --- health --- + + if(host->health.health_enabled) { + if(flags & RRDHOST_FLAG_PENDING_HEALTH_INITIALIZATION) + s->health.status = RRDHOST_HEALTH_STATUS_INITIALIZING; + else { + s->health.status = RRDHOST_HEALTH_STATUS_RUNNING; + + RRDCALC *rc; + foreach_rrdcalc_in_rrdhost_read(host, rc) { + if (unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec)) + continue; + + switch (rc->status) { + default: + case RRDCALC_STATUS_REMOVED: + break; + + case RRDCALC_STATUS_CLEAR: + s->health.alerts.clear++; + break; + + case RRDCALC_STATUS_WARNING: + s->health.alerts.warning++; + break; + + case RRDCALC_STATUS_CRITICAL: + s->health.alerts.critical++; + break; + + case RRDCALC_STATUS_UNDEFINED: + s->health.alerts.undefined++; + break; + + case RRDCALC_STATUS_UNINITIALIZED: + s->health.alerts.uninitialized++; + break; + } + } + foreach_rrdcalc_in_rrdhost_done(rc); + } + } + else + s->health.status = RRDHOST_HEALTH_STATUS_DISABLED; +} diff --git a/src/streaming/rrdhost-status.h b/src/streaming/rrdhost-status.h new file mode 100644 index 000000000..21298e268 --- /dev/null +++ b/src/streaming/rrdhost-status.h @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRDHOST_STATUS_H +#define NETDATA_RRDHOST_STATUS_H + +#include "libnetdata/libnetdata.h" +#include "stream-handshake.h" +#include "stream-capabilities.h" +#include "database/rrd.h" + +typedef enum __attribute__((packed)) { + RRDHOST_DB_STATUS_INITIALIZING = 0, + RRDHOST_DB_STATUS_QUERYABLE, +} RRDHOST_DB_STATUS; + +const char *rrdhost_db_status_to_string(RRDHOST_DB_STATUS status); + +typedef enum __attribute__((packed)) { + RRDHOST_DB_LIVENESS_STALE = 0, + RRDHOST_DB_LIVENESS_LIVE, +} RRDHOST_DB_LIVENESS; + +const char *rrdhost_db_liveness_to_string(RRDHOST_DB_LIVENESS status); + +typedef enum __attribute__((packed)) { + RRDHOST_INGEST_STATUS_ARCHIVED = 0, + RRDHOST_INGEST_STATUS_INITIALIZING, + RRDHOST_INGEST_STATUS_REPLICATING, + RRDHOST_INGEST_STATUS_ONLINE, + RRDHOST_INGEST_STATUS_OFFLINE, +} RRDHOST_INGEST_STATUS; + +const char *rrdhost_ingest_status_to_string(RRDHOST_INGEST_STATUS status); + +typedef enum __attribute__((packed)) { + RRDHOST_INGEST_TYPE_LOCALHOST = 0, + RRDHOST_INGEST_TYPE_VIRTUAL, + RRDHOST_INGEST_TYPE_CHILD, + RRDHOST_INGEST_TYPE_ARCHIVED, +} RRDHOST_INGEST_TYPE; + +const char *rrdhost_ingest_type_to_string(RRDHOST_INGEST_TYPE type); + +typedef enum __attribute__((packed)) { + RRDHOST_STREAM_STATUS_DISABLED = 0, + RRDHOST_STREAM_STATUS_REPLICATING, + RRDHOST_STREAM_STATUS_ONLINE, + RRDHOST_STREAM_STATUS_OFFLINE, +} RRDHOST_STREAMING_STATUS; + +const char *rrdhost_streaming_status_to_string(RRDHOST_STREAMING_STATUS status); + +typedef enum __attribute__((packed)) { + RRDHOST_ML_STATUS_DISABLED = 0, + RRDHOST_ML_STATUS_OFFLINE, + RRDHOST_ML_STATUS_RUNNING, +} RRDHOST_ML_STATUS; + +const char *rrdhost_ml_status_to_string(RRDHOST_ML_STATUS status); + +typedef enum __attribute__((packed)) { + RRDHOST_ML_TYPE_DISABLED = 0, + RRDHOST_ML_TYPE_SELF, + RRDHOST_ML_TYPE_RECEIVED, +} RRDHOST_ML_TYPE; + +const char *rrdhost_ml_type_to_string(RRDHOST_ML_TYPE type); + +typedef enum __attribute__((packed)) { + RRDHOST_HEALTH_STATUS_DISABLED = 0, + RRDHOST_HEALTH_STATUS_INITIALIZING, + RRDHOST_HEALTH_STATUS_RUNNING, +} RRDHOST_HEALTH_STATUS; + +const char *rrdhost_health_status_to_string(RRDHOST_HEALTH_STATUS status); + +typedef enum __attribute__((packed)) { + RRDHOST_DYNCFG_STATUS_UNAVAILABLE = 0, + RRDHOST_DYNCFG_STATUS_AVAILABLE, +} RRDHOST_DYNCFG_STATUS; + +const char *rrdhost_dyncfg_status_to_string(RRDHOST_DYNCFG_STATUS status); + +typedef struct { + RRDHOST *host; + time_t now; + + struct { + RRDHOST_DYNCFG_STATUS status; + } dyncfg; + + struct { + RRDHOST_DB_STATUS status; + RRDHOST_DB_LIVENESS liveness; + RRD_MEMORY_MODE mode; + time_t first_time_s; + time_t last_time_s; + size_t metrics; + size_t instances; + size_t contexts; + } db; + + struct { + RRDHOST_ML_STATUS status; + RRDHOST_ML_TYPE type; + struct ml_metrics_statistics metrics; + } ml; + + struct { + size_t hops; + RRDHOST_INGEST_TYPE type; + RRDHOST_INGEST_STATUS status; + SOCKET_PEERS peers; + bool ssl; + STREAM_CAPABILITIES capabilities; + uint32_t id; + time_t since; + STREAM_HANDSHAKE reason; + + struct { + bool in_progress; + NETDATA_DOUBLE completion; + size_t instances; + } replication; + } ingest; + + struct { + size_t hops; + RRDHOST_STREAMING_STATUS status; + SOCKET_PEERS peers; + bool ssl; + bool compression; + STREAM_CAPABILITIES capabilities; + uint32_t id; + time_t since; + STREAM_HANDSHAKE reason; + + struct { + bool in_progress; + NETDATA_DOUBLE completion; + size_t instances; + } replication; + + size_t sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_MAX]; + } stream; + + struct { + RRDHOST_HEALTH_STATUS status; + struct { + uint32_t undefined; + uint32_t uninitialized; + uint32_t clear; + uint32_t warning; + uint32_t critical; + } alerts; + } health; +} RRDHOST_STATUS; + +void rrdhost_status(RRDHOST *host, time_t now, RRDHOST_STATUS *s); + +#endif //NETDATA_RRDHOST_STATUS_H diff --git a/src/streaming/rrdpush.c b/src/streaming/rrdpush.c deleted file mode 100644 index 23a86e720..000000000 --- a/src/streaming/rrdpush.c +++ /dev/null @@ -1,1418 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "rrdpush.h" - -/* - * rrdpush - * - * 3 threads are involved for all stream operations - * - * 1. a random data collection thread, calling rrdset_done_push() - * this is called for each chart. - * - * the output of this work is kept in a thread BUFFER - * the sender thread is signalled via a pipe (in RRDHOST) - * - * 2. a sender thread running at the sending netdata - * this is spawned automatically on the first chart to be pushed - * - * It tries to push the metrics to the remote netdata, as fast - * as possible (i.e. immediately after they are collected). - * - * 3. a receiver thread, running at the receiving netdata - * this is spawned automatically when the sender connects to - * the receiver. - * - */ - -struct config stream_config = { - .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { - .avl_tree = { - .root = NULL, - .compar = appconfig_section_compare - }, - .rwlock = AVL_LOCK_INITIALIZER - } -}; - -unsigned int default_rrdpush_enabled = 0; -STREAM_CAPABILITIES globally_disabled_capabilities = STREAM_CAP_NONE; - -unsigned int default_rrdpush_compression_enabled = 1; -char *default_rrdpush_destination = NULL; -char *default_rrdpush_api_key = NULL; -char *default_rrdpush_send_charts_matching = NULL; -bool default_rrdpush_enable_replication = true; -time_t default_rrdpush_seconds_to_replicate = 86400; -time_t default_rrdpush_replication_step = 600; -#ifdef ENABLE_HTTPS -char *netdata_ssl_ca_path = NULL; -char *netdata_ssl_ca_file = NULL; -#endif - -static void load_stream_conf() { - errno_clear(); - char *filename = strdupz_path_subpath(netdata_configured_user_config_dir, "stream.conf"); - if(!appconfig_load(&stream_config, filename, 0, NULL)) { - nd_log_daemon(NDLP_NOTICE, "CONFIG: cannot load user config '%s'. Will try stock config.", filename); - freez(filename); - - filename = strdupz_path_subpath(netdata_configured_stock_config_dir, "stream.conf"); - if(!appconfig_load(&stream_config, filename, 0, NULL)) - nd_log_daemon(NDLP_NOTICE, "CONFIG: cannot load stock config '%s'. Running with internal defaults.", filename); - } - freez(filename); -} - -bool rrdpush_receiver_needs_dbengine() { - struct section *co; - - for(co = stream_config.first_section; co; co = co->next) { - if(strcmp(co->name, "stream") == 0) - continue; // the first section is not relevant - - char *s; - - s = appconfig_get_by_section(co, "enabled", NULL); - if(!s || !appconfig_test_boolean_value(s)) - continue; - - s = appconfig_get_by_section(co, "default memory mode", NULL); - if(s && strcmp(s, "dbengine") == 0) - return true; - - s = appconfig_get_by_section(co, "memory mode", NULL); - if(s && strcmp(s, "dbengine") == 0) - return true; - } - - return false; -} - -int rrdpush_init() { - // -------------------------------------------------------------------- - // load stream.conf - load_stream_conf(); - - default_rrdpush_enabled = (unsigned int)appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "enabled", default_rrdpush_enabled); - default_rrdpush_destination = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "destination", ""); - default_rrdpush_api_key = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "api key", ""); - default_rrdpush_send_charts_matching = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "send charts matching", "*"); - - default_rrdpush_enable_replication = config_get_boolean(CONFIG_SECTION_DB, "enable replication", default_rrdpush_enable_replication); - default_rrdpush_seconds_to_replicate = config_get_number(CONFIG_SECTION_DB, "seconds to replicate", default_rrdpush_seconds_to_replicate); - default_rrdpush_replication_step = config_get_number(CONFIG_SECTION_DB, "seconds per replication step", default_rrdpush_replication_step); - - rrdhost_free_orphan_time_s = config_get_number(CONFIG_SECTION_DB, "cleanup orphan hosts after secs", rrdhost_free_orphan_time_s); - - default_rrdpush_compression_enabled = (unsigned int)appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, - "enable compression", default_rrdpush_compression_enabled); - - rrdpush_compression_levels[COMPRESSION_ALGORITHM_BROTLI] = (int)appconfig_get_number( - &stream_config, CONFIG_SECTION_STREAM, "brotli compression level", - rrdpush_compression_levels[COMPRESSION_ALGORITHM_BROTLI]); - - rrdpush_compression_levels[COMPRESSION_ALGORITHM_ZSTD] = (int)appconfig_get_number( - &stream_config, CONFIG_SECTION_STREAM, "zstd compression level", - rrdpush_compression_levels[COMPRESSION_ALGORITHM_ZSTD]); - - rrdpush_compression_levels[COMPRESSION_ALGORITHM_LZ4] = (int)appconfig_get_number( - &stream_config, CONFIG_SECTION_STREAM, "lz4 compression acceleration", - rrdpush_compression_levels[COMPRESSION_ALGORITHM_LZ4]); - - rrdpush_compression_levels[COMPRESSION_ALGORITHM_GZIP] = (int)appconfig_get_number( - &stream_config, CONFIG_SECTION_STREAM, "gzip compression level", - rrdpush_compression_levels[COMPRESSION_ALGORITHM_GZIP]); - - if(default_rrdpush_enabled && (!default_rrdpush_destination || !*default_rrdpush_destination || !default_rrdpush_api_key || !*default_rrdpush_api_key)) { - nd_log_daemon(NDLP_WARNING, "STREAM [send]: cannot enable sending thread - information is missing."); - default_rrdpush_enabled = 0; - } - -#ifdef ENABLE_HTTPS - netdata_ssl_validate_certificate_sender = !appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "ssl skip certificate verification", !netdata_ssl_validate_certificate); - - if(!netdata_ssl_validate_certificate_sender) - nd_log_daemon(NDLP_NOTICE, "SSL: streaming senders will skip SSL certificates verification."); - - netdata_ssl_ca_path = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CApath", NULL); - netdata_ssl_ca_file = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CAfile", NULL); -#endif - - return default_rrdpush_enabled; -} - -// data collection happens from multiple threads -// each of these threads calls rrdset_done() -// which in turn calls rrdset_done_push() -// which uses this pipe to notify the streaming thread -// that there are more data ready to be sent -#define PIPE_READ 0 -#define PIPE_WRITE 1 - -// to have the remote netdata re-sync the charts -// to its current clock, we send for this many -// iterations a BEGIN line without microseconds -// this is for the first iterations of each chart -unsigned int remote_clock_resync_iterations = 60; - -static inline bool should_send_chart_matching(RRDSET *st, RRDSET_FLAGS flags) { - if(!(flags & RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED)) - return false; - - if(unlikely(!(flags & (RRDSET_FLAG_UPSTREAM_SEND | RRDSET_FLAG_UPSTREAM_IGNORE)))) { - RRDHOST *host = st->rrdhost; - - if (flags & RRDSET_FLAG_ANOMALY_DETECTION) { - if(ml_streaming_enabled()) - rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_SEND); - else - rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_IGNORE); - } - else if(simple_pattern_matches_string(host->rrdpush_send_charts_matching, st->id) || - simple_pattern_matches_string(host->rrdpush_send_charts_matching, st->name)) - - rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_SEND); - else - rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_IGNORE); - - // get the flags again, to know how to respond - flags = rrdset_flag_check(st, RRDSET_FLAG_UPSTREAM_SEND|RRDSET_FLAG_UPSTREAM_IGNORE); - } - - return flags & RRDSET_FLAG_UPSTREAM_SEND; -} - -int configured_as_parent() { - struct section *section = NULL; - int is_parent = 0; - - appconfig_wrlock(&stream_config); - for (section = stream_config.first_section; section; section = section->next) { - nd_uuid_t uuid; - - if (uuid_parse(section->name, uuid) != -1 && - appconfig_get_boolean_by_section(section, "enabled", 0)) { - is_parent = 1; - break; - } - } - appconfig_unlock(&stream_config); - - return is_parent; -} - -// chart labels -static int send_clabels_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) { - BUFFER *wb = (BUFFER *)data; - buffer_sprintf(wb, PLUGINSD_KEYWORD_CLABEL " \"%s\" \"%s\" %d\n", name, value, ls & ~(RRDLABEL_FLAG_INTERNAL)); - return 1; -} - -static void rrdpush_send_clabels(BUFFER *wb, RRDSET *st) { - if (st->rrdlabels) { - if(rrdlabels_walkthrough_read(st->rrdlabels, send_clabels_callback, wb) > 0) - buffer_sprintf(wb, PLUGINSD_KEYWORD_CLABEL_COMMIT "\n"); - } -} - -// Send the current chart definition. -// Assumes that collector thread has already called sender_start for mutex / buffer state. -static inline bool rrdpush_send_chart_definition(BUFFER *wb, RRDSET *st) { - uint32_t version = rrdset_metadata_version(st); - - RRDHOST *host = st->rrdhost; - NUMBER_ENCODING integer_encoding = stream_has_capability(host->sender, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX; - bool with_slots = stream_has_capability(host->sender, STREAM_CAP_SLOTS) ? true : false; - - bool replication_progress = false; - - // properly set the name for the remote end to parse it - char *name = ""; - if(likely(st->name)) { - if(unlikely(st->id != st->name)) { - // they differ - name = strchr(rrdset_name(st), '.'); - if(name) - name++; - else - name = ""; - } - } - - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_CHART, sizeof(PLUGINSD_KEYWORD_CHART) - 1); - - if(with_slots) { - buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); - buffer_print_uint64_encoded(wb, integer_encoding, st->rrdpush.sender.chart_slot); - } - - // send the chart - buffer_sprintf( - wb - , " \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" %d %d \"%s %s %s %s\" \"%s\" \"%s\"\n" - , rrdset_id(st) - , name - , rrdset_title(st) - , rrdset_units(st) - , rrdset_family(st) - , rrdset_context(st) - , rrdset_type_name(st->chart_type) - , st->priority - , st->update_every - , rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)?"obsolete":"" - , rrdset_flag_check(st, RRDSET_FLAG_DETAIL)?"detail":"" - , rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)?"store_first":"" - , rrdset_flag_check(st, RRDSET_FLAG_HIDDEN)?"hidden":"" - , rrdset_plugin_name(st) - , rrdset_module_name(st) - ); - - // send the chart labels - if (stream_has_capability(host->sender, STREAM_CAP_CLABELS)) - rrdpush_send_clabels(wb, st); - - // send the dimensions - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_DIMENSION, sizeof(PLUGINSD_KEYWORD_DIMENSION) - 1); - - if(with_slots) { - buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); - buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdpush.sender.dim_slot); - } - - buffer_sprintf( - wb - , " \"%s\" \"%s\" \"%s\" %d %d \"%s %s %s\"\n" - , rrddim_id(rd) - , rrddim_name(rd) - , rrd_algorithm_name(rd->algorithm) - , rd->multiplier - , rd->divisor - , rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)?"obsolete":"" - , rrddim_option_check(rd, RRDDIM_OPTION_HIDDEN)?"hidden":"" - , rrddim_option_check(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS)?"noreset":"" - ); - } - rrddim_foreach_done(rd); - - // send the chart functions - if(stream_has_capability(host->sender, STREAM_CAP_FUNCTIONS)) - rrd_chart_functions_expose_rrdpush(st, wb); - - // send the chart local custom variables - rrdvar_print_to_streaming_custom_chart_variables(st, wb); - - if (stream_has_capability(host->sender, STREAM_CAP_REPLICATION)) { - time_t db_first_time_t, db_last_time_t; - - time_t now = now_realtime_sec(); - rrdset_get_retention_of_tier_for_collected_chart(st, &db_first_time_t, &db_last_time_t, now, 0); - - buffer_sprintf(wb, PLUGINSD_KEYWORD_CHART_DEFINITION_END " %llu %llu %llu\n", - (unsigned long long)db_first_time_t, - (unsigned long long)db_last_time_t, - (unsigned long long)now); - - if(!rrdset_flag_check(st, RRDSET_FLAG_SENDER_REPLICATION_IN_PROGRESS)) { - rrdset_flag_set(st, RRDSET_FLAG_SENDER_REPLICATION_IN_PROGRESS); - rrdset_flag_clear(st, RRDSET_FLAG_SENDER_REPLICATION_FINISHED); - rrdhost_sender_replicating_charts_plus_one(st->rrdhost); - } - replication_progress = true; - -#ifdef NETDATA_LOG_REPLICATION_REQUESTS - internal_error(true, "REPLAY: 'host:%s/chart:%s' replication starts", - rrdhost_hostname(st->rrdhost), rrdset_id(st)); -#endif - } - - sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA); - - // we can set the exposed flag, after we commit the buffer - // because replication may pick it up prematurely - rrddim_foreach_read(rd, st) { - rrddim_metadata_exposed_upstream(rd, version); - } - rrddim_foreach_done(rd); - rrdset_metadata_exposed_upstream(st, version); - - st->rrdpush.sender.resync_time_s = st->last_collected_time.tv_sec + (remote_clock_resync_iterations * st->update_every); - return replication_progress; -} - -// sends the current chart dimensions -static void rrdpush_send_chart_metrics(BUFFER *wb, RRDSET *st, struct sender_state *s __maybe_unused, RRDSET_FLAGS flags) { - buffer_fast_strcat(wb, "BEGIN \"", 7); - buffer_fast_strcat(wb, rrdset_id(st), string_strlen(st->id)); - buffer_fast_strcat(wb, "\" ", 2); - - if(st->last_collected_time.tv_sec > st->rrdpush.sender.resync_time_s) - buffer_print_uint64(wb, st->usec_since_last_update); - else - buffer_fast_strcat(wb, "0", 1); - - buffer_fast_strcat(wb, "\n", 1); - - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - if(unlikely(!rrddim_check_updated(rd))) - continue; - - if(likely(rrddim_check_upstream_exposed_collector(rd))) { - buffer_fast_strcat(wb, "SET \"", 5); - buffer_fast_strcat(wb, rrddim_id(rd), string_strlen(rd->id)); - buffer_fast_strcat(wb, "\" = ", 4); - buffer_print_int64(wb, rd->collector.collected_value); - buffer_fast_strcat(wb, "\n", 1); - } - else { - internal_error(true, "STREAM: 'host:%s/chart:%s/dim:%s' flag 'exposed' is updated but not exposed", - rrdhost_hostname(st->rrdhost), rrdset_id(st), rrddim_id(rd)); - // we will include it in the next iteration - rrddim_metadata_updated(rd); - } - } - rrddim_foreach_done(rd); - - if(unlikely(flags & RRDSET_FLAG_UPSTREAM_SEND_VARIABLES)) - rrdvar_print_to_streaming_custom_chart_variables(st, wb); - - buffer_fast_strcat(wb, "END\n", 4); -} - -static void rrdpush_sender_thread_spawn(RRDHOST *host); - -// Called from the internal collectors to mark a chart obsolete. -bool rrdset_push_chart_definition_now(RRDSET *st) { - RRDHOST *host = st->rrdhost; - - if(unlikely(!rrdhost_can_send_definitions_to_parent(host) - || !should_send_chart_matching(st, rrdset_flag_get(st)))) { - return false; - } - - BUFFER *wb = sender_start(host->sender); - rrdpush_send_chart_definition(wb, st); - sender_thread_buffer_free(); - - return true; -} - -void rrdset_push_metrics_v1(RRDSET_STREAM_BUFFER *rsb, RRDSET *st) { - RRDHOST *host = st->rrdhost; - rrdpush_send_chart_metrics(rsb->wb, st, host->sender, rsb->rrdset_flags); -} - -void rrddim_push_metrics_v2(RRDSET_STREAM_BUFFER *rsb, RRDDIM *rd, usec_t point_end_time_ut, NETDATA_DOUBLE n, SN_FLAGS flags) { - if(!rsb->wb || !rsb->v2 || !netdata_double_isnumber(n) || !does_storage_number_exist(flags)) - return; - - bool with_slots = stream_has_capability(rsb, STREAM_CAP_SLOTS) ? true : false; - NUMBER_ENCODING integer_encoding = stream_has_capability(rsb, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX; - NUMBER_ENCODING doubles_encoding = stream_has_capability(rsb, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_DECIMAL; - BUFFER *wb = rsb->wb; - time_t point_end_time_s = (time_t)(point_end_time_ut / USEC_PER_SEC); - if(unlikely(rsb->last_point_end_time_s != point_end_time_s)) { - - if(unlikely(rsb->begin_v2_added)) - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1); - - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_BEGIN_V2, sizeof(PLUGINSD_KEYWORD_BEGIN_V2) - 1); - - if(with_slots) { - buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); - buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdset->rrdpush.sender.chart_slot); - } - - buffer_fast_strcat(wb, " '", 2); - buffer_fast_strcat(wb, rrdset_id(rd->rrdset), string_strlen(rd->rrdset->id)); - buffer_fast_strcat(wb, "' ", 2); - buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdset->update_every); - buffer_fast_strcat(wb, " ", 1); - buffer_print_uint64_encoded(wb, integer_encoding, point_end_time_s); - buffer_fast_strcat(wb, " ", 1); - if(point_end_time_s == rsb->wall_clock_time) - buffer_fast_strcat(wb, "#", 1); - else - buffer_print_uint64_encoded(wb, integer_encoding, rsb->wall_clock_time); - buffer_fast_strcat(wb, "\n", 1); - - rsb->last_point_end_time_s = point_end_time_s; - rsb->begin_v2_added = true; - } - - buffer_fast_strcat(wb, PLUGINSD_KEYWORD_SET_V2, sizeof(PLUGINSD_KEYWORD_SET_V2) - 1); - - if(with_slots) { - buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); - buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdpush.sender.dim_slot); - } - - buffer_fast_strcat(wb, " '", 2); - buffer_fast_strcat(wb, rrddim_id(rd), string_strlen(rd->id)); - buffer_fast_strcat(wb, "' ", 2); - buffer_print_int64_encoded(wb, integer_encoding, rd->collector.last_collected_value); - buffer_fast_strcat(wb, " ", 1); - - if((NETDATA_DOUBLE)rd->collector.last_collected_value == n) - buffer_fast_strcat(wb, "#", 1); - else - buffer_print_netdata_double_encoded(wb, doubles_encoding, n); - - buffer_fast_strcat(wb, " ", 1); - buffer_print_sn_flags(wb, flags, true); - buffer_fast_strcat(wb, "\n", 1); -} - -void rrdset_push_metrics_finished(RRDSET_STREAM_BUFFER *rsb, RRDSET *st) { - if(!rsb->wb) - return; - - if(rsb->v2 && rsb->begin_v2_added) { - if(unlikely(rsb->rrdset_flags & RRDSET_FLAG_UPSTREAM_SEND_VARIABLES)) - rrdvar_print_to_streaming_custom_chart_variables(st, rsb->wb); - - buffer_fast_strcat(rsb->wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1); - } - - sender_commit(st->rrdhost->sender, rsb->wb, STREAM_TRAFFIC_TYPE_DATA); - - *rsb = (RRDSET_STREAM_BUFFER){ .wb = NULL, }; -} - -RRDSET_STREAM_BUFFER rrdset_push_metric_initialize(RRDSET *st, time_t wall_clock_time) { - RRDHOST *host = st->rrdhost; - - // fetch the flags we need to check with one atomic operation - RRDHOST_FLAGS host_flags = __atomic_load_n(&host->flags, __ATOMIC_SEQ_CST); - - // check if we are not connected - if(unlikely(!(host_flags & RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS))) { - - if(unlikely(!(host_flags & (RRDHOST_FLAG_RRDPUSH_SENDER_SPAWN | RRDHOST_FLAG_RRDPUSH_RECEIVER_DISCONNECTED)))) - rrdpush_sender_thread_spawn(host); - - if(unlikely(!(host_flags & RRDHOST_FLAG_RRDPUSH_SENDER_LOGGED_STATUS))) { - rrdhost_flag_set(host, RRDHOST_FLAG_RRDPUSH_SENDER_LOGGED_STATUS); - nd_log_daemon(NDLP_NOTICE, "STREAM %s [send]: not ready - collected metrics are not sent to parent.", rrdhost_hostname(host)); - } - - return (RRDSET_STREAM_BUFFER) { .wb = NULL, }; - } - else if(unlikely(host_flags & RRDHOST_FLAG_RRDPUSH_SENDER_LOGGED_STATUS)) { - nd_log_daemon(NDLP_INFO, "STREAM %s [send]: sending metrics to parent...", rrdhost_hostname(host)); - rrdhost_flag_clear(host, RRDHOST_FLAG_RRDPUSH_SENDER_LOGGED_STATUS); - } - - if(unlikely(host_flags & RRDHOST_FLAG_GLOBAL_FUNCTIONS_UPDATED)) { - BUFFER *wb = sender_start(host->sender); - rrd_global_functions_expose_rrdpush(host, wb, stream_has_capability(host->sender, STREAM_CAP_DYNCFG)); - sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_FUNCTIONS); - } - - bool exposed_upstream = rrdset_check_upstream_exposed(st); - RRDSET_FLAGS rrdset_flags = rrdset_flag_get(st); - bool replication_in_progress = !(rrdset_flags & RRDSET_FLAG_SENDER_REPLICATION_FINISHED); - - if(unlikely((exposed_upstream && replication_in_progress) || - !should_send_chart_matching(st, rrdset_flags))) - return (RRDSET_STREAM_BUFFER) { .wb = NULL, }; - - if(unlikely(!exposed_upstream)) { - BUFFER *wb = sender_start(host->sender); - replication_in_progress = rrdpush_send_chart_definition(wb, st); - } - - if(replication_in_progress) - return (RRDSET_STREAM_BUFFER) { .wb = NULL, }; - - return (RRDSET_STREAM_BUFFER) { - .capabilities = host->sender->capabilities, - .v2 = stream_has_capability(host->sender, STREAM_CAP_INTERPOLATED), - .rrdset_flags = rrdset_flags, - .wb = sender_start(host->sender), - .wall_clock_time = wall_clock_time, - }; -} - -// labels -static int send_labels_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) { - BUFFER *wb = (BUFFER *)data; - buffer_sprintf(wb, "LABEL \"%s\" = %d \"%s\"\n", name, ls, value); - return 1; -} - -void rrdpush_send_host_labels(RRDHOST *host) { - if(unlikely(!rrdhost_can_send_definitions_to_parent(host) - || !stream_has_capability(host->sender, STREAM_CAP_HLABELS))) - return; - - BUFFER *wb = sender_start(host->sender); - - rrdlabels_walkthrough_read(host->rrdlabels, send_labels_callback, wb); - buffer_sprintf(wb, "OVERWRITE %s\n", "labels"); - - sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA); - - sender_thread_buffer_free(); -} - -void rrdpush_send_global_functions(RRDHOST *host) { - if(!stream_has_capability(host->sender, STREAM_CAP_FUNCTIONS)) - return; - - if(unlikely(!rrdhost_can_send_definitions_to_parent(host))) - return; - - BUFFER *wb = sender_start(host->sender); - - rrd_global_functions_expose_rrdpush(host, wb, stream_has_capability(host->sender, STREAM_CAP_DYNCFG)); - - sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_FUNCTIONS); - - sender_thread_buffer_free(); -} - -void rrdpush_send_claimed_id(RRDHOST *host) { - if(!stream_has_capability(host->sender, STREAM_CAP_CLAIM)) - return; - - if(unlikely(!rrdhost_can_send_definitions_to_parent(host))) - return; - - BUFFER *wb = sender_start(host->sender); - rrdhost_aclk_state_lock(host); - - buffer_sprintf(wb, "CLAIMED_ID %s %s\n", host->machine_guid, (host->aclk_state.claimed_id ? host->aclk_state.claimed_id : "NULL") ); - - rrdhost_aclk_state_unlock(host); - sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA); - - sender_thread_buffer_free(); -} - -int connect_to_one_of_destinations( - RRDHOST *host, - int default_port, - struct timeval *timeout, - size_t *reconnects_counter, - char *connected_to, - size_t connected_to_size, - struct rrdpush_destinations **destination) -{ - int sock = -1; - - for (struct rrdpush_destinations *d = host->destinations; d; d = d->next) { - time_t now = now_realtime_sec(); - - if(nd_thread_signaled_to_cancel()) - return -1; - - if(d->postpone_reconnection_until > now) - continue; - - nd_log(NDLS_DAEMON, NDLP_DEBUG, - "STREAM %s: connecting to '%s' (default port: %d)...", - rrdhost_hostname(host), string2str(d->destination), default_port); - - if (reconnects_counter) - *reconnects_counter += 1; - - d->since = now; - d->attempts++; - sock = connect_to_this(string2str(d->destination), default_port, timeout); - - if (sock != -1) { - if (connected_to && connected_to_size) - strncpyz(connected_to, string2str(d->destination), connected_to_size); - - *destination = d; - - // move the current item to the end of the list - // without this, this destination will break the loop again and again - // not advancing the destinations to find one that may work - DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(host->destinations, d, prev, next); - DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(host->destinations, d, prev, next); - - break; - } - } - - return sock; -} - -struct destinations_init_tmp { - RRDHOST *host; - struct rrdpush_destinations *list; - int count; -}; - -bool destinations_init_add_one(char *entry, void *data) { - struct destinations_init_tmp *t = data; - - struct rrdpush_destinations *d = callocz(1, sizeof(struct rrdpush_destinations)); - char *colon_ssl = strstr(entry, ":SSL"); - if(colon_ssl) { - *colon_ssl = '\0'; - d->ssl = true; - } - else - d->ssl = false; - - d->destination = string_strdupz(entry); - - __atomic_add_fetch(&netdata_buffers_statistics.rrdhost_senders, sizeof(struct rrdpush_destinations), __ATOMIC_RELAXED); - - DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(t->list, d, prev, next); - - t->count++; - nd_log_daemon(NDLP_INFO, "STREAM: added streaming destination No %d: '%s' to host '%s'", t->count, string2str(d->destination), rrdhost_hostname(t->host)); - - return false; // we return false, so that we will get all defined destinations -} - -void rrdpush_destinations_init(RRDHOST *host) { - if(!host->rrdpush_send_destination) return; - - rrdpush_destinations_free(host); - - struct destinations_init_tmp t = { - .host = host, - .list = NULL, - .count = 0, - }; - - foreach_entry_in_connection_string(host->rrdpush_send_destination, destinations_init_add_one, &t); - - host->destinations = t.list; -} - -void rrdpush_destinations_free(RRDHOST *host) { - while (host->destinations) { - struct rrdpush_destinations *tmp = host->destinations; - DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(host->destinations, tmp, prev, next); - string_freez(tmp->destination); - freez(tmp); - __atomic_sub_fetch(&netdata_buffers_statistics.rrdhost_senders, sizeof(struct rrdpush_destinations), __ATOMIC_RELAXED); - } - - host->destinations = NULL; -} - -// ---------------------------------------------------------------------------- -// rrdpush sender thread - -// Either the receiver lost the connection or the host is being destroyed. -// The sender mutex guards thread creation, any spurious data is wiped on reconnection. -void rrdpush_sender_thread_stop(RRDHOST *host, STREAM_HANDSHAKE reason, bool wait) { - if (!host->sender) - return; - - sender_lock(host->sender); - - if(rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_SPAWN)) { - - host->sender->exit.shutdown = true; - host->sender->exit.reason = reason; - - // signal it to cancel - nd_thread_signal_cancel(host->rrdpush_sender_thread); - } - - sender_unlock(host->sender); - - if(wait) { - sender_lock(host->sender); - while(host->sender->tid) { - sender_unlock(host->sender); - sleep_usec(10 * USEC_PER_MS); - sender_lock(host->sender); - } - sender_unlock(host->sender); - } -} - -// ---------------------------------------------------------------------------- -// rrdpush receiver thread - -static void rrdpush_sender_thread_spawn(RRDHOST *host) { - sender_lock(host->sender); - - if(!rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_SPAWN)) { - char tag[NETDATA_THREAD_TAG_MAX + 1]; - snprintfz(tag, NETDATA_THREAD_TAG_MAX, THREAD_TAG_STREAM_SENDER "[%s]", rrdhost_hostname(host)); - - host->rrdpush_sender_thread = nd_thread_create(tag, NETDATA_THREAD_OPTION_DEFAULT, - rrdpush_sender_thread, (void *)host->sender); - if(!host->rrdpush_sender_thread) - nd_log_daemon(NDLP_ERR, "STREAM %s [send]: failed to create new thread for client.", rrdhost_hostname(host)); - else - rrdhost_flag_set(host, RRDHOST_FLAG_RRDPUSH_SENDER_SPAWN); - } - - sender_unlock(host->sender); -} - -int rrdpush_receiver_permission_denied(struct web_client *w) { - // we always respond with the same message and error code - // to prevent an attacker from gaining info about the error - buffer_flush(w->response.data); - buffer_strcat(w->response.data, START_STREAMING_ERROR_NOT_PERMITTED); - return HTTP_RESP_UNAUTHORIZED; -} - -int rrdpush_receiver_too_busy_now(struct web_client *w) { - // we always respond with the same message and error code - // to prevent an attacker from gaining info about the error - buffer_flush(w->response.data); - buffer_strcat(w->response.data, START_STREAMING_ERROR_BUSY_TRY_LATER); - return HTTP_RESP_SERVICE_UNAVAILABLE; -} - -static void rrdpush_receiver_takeover_web_connection(struct web_client *w, struct receiver_state *rpt) { - rpt->fd = w->ifd; - -#ifdef ENABLE_HTTPS - rpt->ssl.conn = w->ssl.conn; - rpt->ssl.state = w->ssl.state; - - w->ssl = NETDATA_SSL_UNSET_CONNECTION; -#endif - - WEB_CLIENT_IS_DEAD(w); - - if(web_server_mode == WEB_SERVER_MODE_STATIC_THREADED) { - web_client_flag_set(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET); - } - else { - if(w->ifd == w->ofd) - w->ifd = w->ofd = -1; - else - w->ifd = -1; - } - - buffer_flush(w->response.data); -} - -void *rrdpush_receiver_thread(void *ptr); -int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_string, void *h2o_ctx __maybe_unused) { - - if(!service_running(ABILITY_STREAMING_CONNECTIONS)) - return rrdpush_receiver_too_busy_now(w); - - struct receiver_state *rpt = callocz(1, sizeof(*rpt)); - rpt->last_msg_t = now_monotonic_sec(); - rpt->hops = 1; - - rpt->capabilities = STREAM_CAP_INVALID; - -#ifdef ENABLE_H2O - rpt->h2o_ctx = h2o_ctx; -#endif - - __atomic_add_fetch(&netdata_buffers_statistics.rrdhost_receivers, sizeof(*rpt), __ATOMIC_RELAXED); - __atomic_add_fetch(&netdata_buffers_statistics.rrdhost_allocations_size, sizeof(struct rrdhost_system_info), __ATOMIC_RELAXED); - - rpt->system_info = callocz(1, sizeof(struct rrdhost_system_info)); - rpt->system_info->hops = rpt->hops; - - rpt->fd = -1; - rpt->client_ip = strdupz(w->client_ip); - rpt->client_port = strdupz(w->client_port); - -#ifdef ENABLE_HTTPS - rpt->ssl = NETDATA_SSL_UNSET_CONNECTION; -#endif - - rpt->config.update_every = default_rrd_update_every; - - // parse the parameters and fill rpt and rpt->system_info - - while(decoded_query_string) { - char *value = strsep_skip_consecutive_separators(&decoded_query_string, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - if(!strcmp(name, "key") && !rpt->key) - rpt->key = strdupz(value); - - else if(!strcmp(name, "hostname") && !rpt->hostname) - rpt->hostname = strdupz(value); - - else if(!strcmp(name, "registry_hostname") && !rpt->registry_hostname) - rpt->registry_hostname = strdupz(value); - - else if(!strcmp(name, "machine_guid") && !rpt->machine_guid) - rpt->machine_guid = strdupz(value); - - else if(!strcmp(name, "update_every")) - rpt->config.update_every = (int)strtoul(value, NULL, 0); - - else if(!strcmp(name, "os") && !rpt->os) - rpt->os = strdupz(value); - - else if(!strcmp(name, "timezone") && !rpt->timezone) - rpt->timezone = strdupz(value); - - else if(!strcmp(name, "abbrev_timezone") && !rpt->abbrev_timezone) - rpt->abbrev_timezone = strdupz(value); - - else if(!strcmp(name, "utc_offset")) - rpt->utc_offset = (int32_t)strtol(value, NULL, 0); - - else if(!strcmp(name, "hops")) - rpt->hops = rpt->system_info->hops = (uint16_t) strtoul(value, NULL, 0); - - else if(!strcmp(name, "ml_capable")) - rpt->system_info->ml_capable = strtoul(value, NULL, 0); - - else if(!strcmp(name, "ml_enabled")) - rpt->system_info->ml_enabled = strtoul(value, NULL, 0); - - else if(!strcmp(name, "mc_version")) - rpt->system_info->mc_version = strtoul(value, NULL, 0); - - else if(!strcmp(name, "ver") && (rpt->capabilities & STREAM_CAP_INVALID)) - rpt->capabilities = convert_stream_version_to_capabilities(strtoul(value, NULL, 0), NULL, false); - - else { - // An old Netdata child does not have a compatible streaming protocol, map to something sane. - if (!strcmp(name, "NETDATA_SYSTEM_OS_NAME")) - name = "NETDATA_HOST_OS_NAME"; - - else if (!strcmp(name, "NETDATA_SYSTEM_OS_ID")) - name = "NETDATA_HOST_OS_ID"; - - else if (!strcmp(name, "NETDATA_SYSTEM_OS_ID_LIKE")) - name = "NETDATA_HOST_OS_ID_LIKE"; - - else if (!strcmp(name, "NETDATA_SYSTEM_OS_VERSION")) - name = "NETDATA_HOST_OS_VERSION"; - - else if (!strcmp(name, "NETDATA_SYSTEM_OS_VERSION_ID")) - name = "NETDATA_HOST_OS_VERSION_ID"; - - else if (!strcmp(name, "NETDATA_SYSTEM_OS_DETECTION")) - name = "NETDATA_HOST_OS_DETECTION"; - - else if(!strcmp(name, "NETDATA_PROTOCOL_VERSION") && (rpt->capabilities & STREAM_CAP_INVALID)) - rpt->capabilities = convert_stream_version_to_capabilities(1, NULL, false); - - if (unlikely(rrdhost_set_system_info_variable(rpt->system_info, name, value))) { - nd_log_daemon(NDLP_NOTICE, "STREAM '%s' [receive from [%s]:%s]: " - "request has parameter '%s' = '%s', which is not used." - , (rpt->hostname && *rpt->hostname) ? rpt->hostname : "-" - , rpt->client_ip, rpt->client_port - , name, value); - } - } - } - - if (rpt->capabilities & STREAM_CAP_INVALID) - // no version is supplied, assume version 0; - rpt->capabilities = convert_stream_version_to_capabilities(0, NULL, false); - - // find the program name and version - if(w->user_agent && w->user_agent[0]) { - char *t = strchr(w->user_agent, '/'); - if(t && *t) { - *t = '\0'; - t++; - } - - rpt->program_name = strdupz(w->user_agent); - if(t && *t) rpt->program_version = strdupz(t); - } - - // check if we should accept this connection - - if(!rpt->key || !*rpt->key) { - rrdpush_receive_log_status( - rpt, "request without an API key, rejecting connection", - RRDPUSH_STATUS_NO_API_KEY, NDLP_WARNING); - - receiver_state_free(rpt); - return rrdpush_receiver_permission_denied(w); - } - - if(!rpt->hostname || !*rpt->hostname) { - rrdpush_receive_log_status( - rpt, "request without a hostname, rejecting connection", - RRDPUSH_STATUS_NO_HOSTNAME, NDLP_WARNING); - - receiver_state_free(rpt); - return rrdpush_receiver_permission_denied(w); - } - - if(!rpt->registry_hostname) - rpt->registry_hostname = strdupz(rpt->hostname); - - if(!rpt->machine_guid || !*rpt->machine_guid) { - rrdpush_receive_log_status( - rpt, "request without a machine GUID, rejecting connection", - RRDPUSH_STATUS_NO_MACHINE_GUID, NDLP_WARNING); - - receiver_state_free(rpt); - return rrdpush_receiver_permission_denied(w); - } - - { - char buf[GUID_LEN + 1]; - - if (regenerate_guid(rpt->key, buf) == -1) { - rrdpush_receive_log_status( - rpt, "API key is not a valid UUID (use the command uuidgen to generate one)", - RRDPUSH_STATUS_INVALID_API_KEY, NDLP_WARNING); - - receiver_state_free(rpt); - return rrdpush_receiver_permission_denied(w); - } - - if (regenerate_guid(rpt->machine_guid, buf) == -1) { - rrdpush_receive_log_status( - rpt, "machine GUID is not a valid UUID", - RRDPUSH_STATUS_INVALID_MACHINE_GUID, NDLP_WARNING); - - receiver_state_free(rpt); - return rrdpush_receiver_permission_denied(w); - } - } - - const char *api_key_type = appconfig_get(&stream_config, rpt->key, "type", "api"); - if(!api_key_type || !*api_key_type) api_key_type = "unknown"; - if(strcmp(api_key_type, "api") != 0) { - rrdpush_receive_log_status( - rpt, "API key is a machine GUID", - RRDPUSH_STATUS_INVALID_API_KEY, NDLP_WARNING); - - receiver_state_free(rpt); - return rrdpush_receiver_permission_denied(w); - } - - if(!appconfig_get_boolean(&stream_config, rpt->key, "enabled", 0)) { - rrdpush_receive_log_status( - rpt, "API key is not enabled", - RRDPUSH_STATUS_API_KEY_DISABLED, NDLP_WARNING); - - receiver_state_free(rpt); - return rrdpush_receiver_permission_denied(w); - } - - { - SIMPLE_PATTERN *key_allow_from = simple_pattern_create( - appconfig_get(&stream_config, rpt->key, "allow from", "*"), - NULL, SIMPLE_PATTERN_EXACT, true); - - if(key_allow_from) { - if(!simple_pattern_matches(key_allow_from, w->client_ip)) { - simple_pattern_free(key_allow_from); - - rrdpush_receive_log_status( - rpt, "API key is not allowed from this IP", - RRDPUSH_STATUS_NOT_ALLOWED_IP, NDLP_WARNING); - - receiver_state_free(rpt); - return rrdpush_receiver_permission_denied(w); - } - - simple_pattern_free(key_allow_from); - } - } - - { - const char *machine_guid_type = appconfig_get(&stream_config, rpt->machine_guid, "type", "machine"); - if (!machine_guid_type || !*machine_guid_type) machine_guid_type = "unknown"; - - if (strcmp(machine_guid_type, "machine") != 0) { - rrdpush_receive_log_status( - rpt, "machine GUID is an API key", - RRDPUSH_STATUS_INVALID_MACHINE_GUID, NDLP_WARNING); - - receiver_state_free(rpt); - return rrdpush_receiver_permission_denied(w); - } - } - - if(!appconfig_get_boolean(&stream_config, rpt->machine_guid, "enabled", 1)) { - rrdpush_receive_log_status( - rpt, "machine GUID is not enabled", - RRDPUSH_STATUS_MACHINE_GUID_DISABLED, NDLP_WARNING); - - receiver_state_free(rpt); - return rrdpush_receiver_permission_denied(w); - } - - { - SIMPLE_PATTERN *machine_allow_from = simple_pattern_create( - appconfig_get(&stream_config, rpt->machine_guid, "allow from", "*"), - NULL, SIMPLE_PATTERN_EXACT, true); - - if(machine_allow_from) { - if(!simple_pattern_matches(machine_allow_from, w->client_ip)) { - simple_pattern_free(machine_allow_from); - - rrdpush_receive_log_status( - rpt, "machine GUID is not allowed from this IP", - RRDPUSH_STATUS_NOT_ALLOWED_IP, NDLP_WARNING); - - receiver_state_free(rpt); - return rrdpush_receiver_permission_denied(w); - } - - simple_pattern_free(machine_allow_from); - } - } - - if (strcmp(rpt->machine_guid, localhost->machine_guid) == 0) { - - rrdpush_receiver_takeover_web_connection(w, rpt); - - rrdpush_receive_log_status( - rpt, "machine GUID is my own", - RRDPUSH_STATUS_LOCALHOST, NDLP_DEBUG); - - char initial_response[HTTP_HEADER_SIZE + 1]; - snprintfz(initial_response, HTTP_HEADER_SIZE, "%s", START_STREAMING_ERROR_SAME_LOCALHOST); - - if(send_timeout( -#ifdef ENABLE_HTTPS - &rpt->ssl, -#endif - rpt->fd, initial_response, strlen(initial_response), 0, 60) != (ssize_t)strlen(initial_response)) { - - nd_log_daemon(NDLP_ERR, "STREAM '%s' [receive from [%s]:%s]: " - "failed to reply." - , rpt->hostname - , rpt->client_ip, rpt->client_port - ); - } - - receiver_state_free(rpt); - return HTTP_RESP_OK; - } - - if(unlikely(web_client_streaming_rate_t > 0)) { - static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; - static time_t last_stream_accepted_t = 0; - - time_t now = now_realtime_sec(); - spinlock_lock(&spinlock); - - if(unlikely(last_stream_accepted_t == 0)) - last_stream_accepted_t = now; - - if(now - last_stream_accepted_t < web_client_streaming_rate_t) { - spinlock_unlock(&spinlock); - - char msg[100 + 1]; - snprintfz(msg, sizeof(msg) - 1, - "rate limit, will accept new connection in %ld secs", - (long)(web_client_streaming_rate_t - (now - last_stream_accepted_t))); - - rrdpush_receive_log_status( - rpt, msg, - RRDPUSH_STATUS_RATE_LIMIT, NDLP_NOTICE); - - receiver_state_free(rpt); - return rrdpush_receiver_too_busy_now(w); - } - - last_stream_accepted_t = now; - spinlock_unlock(&spinlock); - } - - /* - * Quick path for rejecting multiple connections. The lock taken is fine-grained - it only protects the receiver - * pointer within the host (if a host exists). This protects against multiple concurrent web requests hitting - * separate threads within the web-server and landing here. The lock guards the thread-shutdown sequence that - * detaches the receiver from the host. If the host is being created (first time-access) then we also use the - * lock to prevent race-hazard (two threads try to create the host concurrently, one wins and the other does a - * lookup to the now-attached structure). - */ - - { - time_t age = 0; - bool receiver_stale = false; - bool receiver_working = false; - - rrd_rdlock(); - RRDHOST *host = rrdhost_find_by_guid(rpt->machine_guid); - if (unlikely(host && rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED))) /* Ignore archived hosts. */ - host = NULL; - - if (host) { - netdata_mutex_lock(&host->receiver_lock); - if (host->receiver) { - age = now_monotonic_sec() - host->receiver->last_msg_t; - - if (age < 30) - receiver_working = true; - else - receiver_stale = true; - } - netdata_mutex_unlock(&host->receiver_lock); - } - rrd_rdunlock(); - - if (receiver_stale && stop_streaming_receiver(host, STREAM_HANDSHAKE_DISCONNECT_STALE_RECEIVER)) { - // we stopped the receiver - // we can proceed with this connection - receiver_stale = false; - - nd_log_daemon(NDLP_NOTICE, "STREAM '%s' [receive from [%s]:%s]: " - "stopped previous stale receiver to accept this one." - , rpt->hostname - , rpt->client_ip, rpt->client_port - ); - } - - if (receiver_working || receiver_stale) { - // another receiver is already connected - // try again later - - char msg[200 + 1]; - snprintfz(msg, sizeof(msg) - 1, - "multiple connections for same host, " - "old connection was last used %ld secs ago%s", - age, receiver_stale ? " (signaled old receiver to stop)" : " (new connection not accepted)"); - - rrdpush_receive_log_status( - rpt, msg, - RRDPUSH_STATUS_ALREADY_CONNECTED, NDLP_DEBUG); - - // Have not set WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET - caller should clean up - buffer_flush(w->response.data); - buffer_strcat(w->response.data, START_STREAMING_ERROR_ALREADY_STREAMING); - receiver_state_free(rpt); - return HTTP_RESP_CONFLICT; - } - } - - rrdpush_receiver_takeover_web_connection(w, rpt); - - char tag[NETDATA_THREAD_TAG_MAX + 1]; - snprintfz(tag, NETDATA_THREAD_TAG_MAX, THREAD_TAG_STREAM_RECEIVER "[%s]", rpt->hostname); - tag[NETDATA_THREAD_TAG_MAX] = '\0'; - - rpt->thread = nd_thread_create(tag, NETDATA_THREAD_OPTION_DEFAULT, rrdpush_receiver_thread, (void *)rpt); - if(!rpt->thread) { - rrdpush_receive_log_status( - rpt, "can't create receiver thread", - RRDPUSH_STATUS_INTERNAL_SERVER_ERROR, NDLP_ERR); - - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Can't handle this request"); - receiver_state_free(rpt); - return HTTP_RESP_INTERNAL_SERVER_ERROR; - } - - // prevent the caller from closing the streaming socket - return HTTP_RESP_OK; -} - -void rrdpush_reset_destinations_postpone_time(RRDHOST *host) { - uint32_t wait = (host->sender) ? host->sender->reconnect_delay : 5; - time_t now = now_realtime_sec(); - for (struct rrdpush_destinations *d = host->destinations; d; d = d->next) - d->postpone_reconnection_until = now + wait; -} - -static struct { - STREAM_HANDSHAKE err; - const char *str; -} handshake_errors[] = { - { STREAM_HANDSHAKE_OK_V3, "CONNECTED" }, - { STREAM_HANDSHAKE_OK_V2, "CONNECTED" }, - { STREAM_HANDSHAKE_OK_V1, "CONNECTED" }, - { STREAM_HANDSHAKE_NEVER, "" }, - { STREAM_HANDSHAKE_ERROR_BAD_HANDSHAKE, "BAD HANDSHAKE" }, - { STREAM_HANDSHAKE_ERROR_LOCALHOST, "LOCALHOST" }, - { STREAM_HANDSHAKE_ERROR_ALREADY_CONNECTED, "ALREADY CONNECTED" }, - { STREAM_HANDSHAKE_ERROR_DENIED, "DENIED" }, - { STREAM_HANDSHAKE_ERROR_SEND_TIMEOUT, "SEND TIMEOUT" }, - { STREAM_HANDSHAKE_ERROR_RECEIVE_TIMEOUT, "RECEIVE TIMEOUT" }, - { STREAM_HANDSHAKE_ERROR_INVALID_CERTIFICATE, "INVALID CERTIFICATE" }, - { STREAM_HANDSHAKE_ERROR_SSL_ERROR, "SSL ERROR" }, - { STREAM_HANDSHAKE_ERROR_CANT_CONNECT, "CANT CONNECT" }, - { STREAM_HANDSHAKE_BUSY_TRY_LATER, "BUSY TRY LATER" }, - { STREAM_HANDSHAKE_INTERNAL_ERROR, "INTERNAL ERROR" }, - { STREAM_HANDSHAKE_INITIALIZATION, "REMOTE IS INITIALIZING" }, - { STREAM_HANDSHAKE_DISCONNECT_HOST_CLEANUP, "DISCONNECTED HOST CLEANUP" }, - { STREAM_HANDSHAKE_DISCONNECT_STALE_RECEIVER, "DISCONNECTED STALE RECEIVER" }, - { STREAM_HANDSHAKE_DISCONNECT_SHUTDOWN, "DISCONNECTED SHUTDOWN REQUESTED" }, - { STREAM_HANDSHAKE_DISCONNECT_NETDATA_EXIT, "DISCONNECTED NETDATA EXIT" }, - { STREAM_HANDSHAKE_DISCONNECT_PARSER_EXIT, "DISCONNECTED PARSE ENDED" }, - {STREAM_HANDSHAKE_DISCONNECT_UNKNOWN_SOCKET_READ_ERROR, "DISCONNECTED UNKNOWN SOCKET READ ERROR" }, - { STREAM_HANDSHAKE_DISCONNECT_PARSER_FAILED, "DISCONNECTED PARSE ERROR" }, - { STREAM_HANDSHAKE_DISCONNECT_RECEIVER_LEFT, "DISCONNECTED RECEIVER LEFT" }, - { STREAM_HANDSHAKE_DISCONNECT_ORPHAN_HOST, "DISCONNECTED ORPHAN HOST" }, - { STREAM_HANDSHAKE_NON_STREAMABLE_HOST, "NON STREAMABLE HOST" }, - { STREAM_HANDSHAKE_DISCONNECT_NOT_SUFFICIENT_READ_BUFFER, "DISCONNECTED NOT SUFFICIENT READ BUFFER" }, - {STREAM_HANDSHAKE_DISCONNECT_SOCKET_EOF, "DISCONNECTED SOCKET EOF" }, - {STREAM_HANDSHAKE_DISCONNECT_SOCKET_READ_FAILED, "DISCONNECTED SOCKET READ FAILED" }, - {STREAM_HANDSHAKE_DISCONNECT_SOCKET_READ_TIMEOUT, "DISCONNECTED SOCKET READ TIMEOUT" }, - { 0, NULL }, -}; - -const char *stream_handshake_error_to_string(STREAM_HANDSHAKE handshake_error) { - if(handshake_error >= STREAM_HANDSHAKE_OK_V1) - // handshake_error is the whole version / capabilities number - return "CONNECTED"; - - for(size_t i = 0; handshake_errors[i].str ; i++) { - if(handshake_error == handshake_errors[i].err) - return handshake_errors[i].str; - } - - return "UNKNOWN"; -} - -static struct { - STREAM_CAPABILITIES cap; - const char *str; -} capability_names[] = { - {STREAM_CAP_V1, "V1" }, - {STREAM_CAP_V2, "V2" }, - {STREAM_CAP_VN, "VN" }, - {STREAM_CAP_VCAPS, "VCAPS" }, - {STREAM_CAP_HLABELS, "HLABELS" }, - {STREAM_CAP_CLAIM, "CLAIM" }, - {STREAM_CAP_CLABELS, "CLABELS" }, - {STREAM_CAP_LZ4, "LZ4" }, - {STREAM_CAP_FUNCTIONS, "FUNCTIONS" }, - {STREAM_CAP_REPLICATION, "REPLICATION" }, - {STREAM_CAP_BINARY, "BINARY" }, - {STREAM_CAP_INTERPOLATED, "INTERPOLATED" }, - {STREAM_CAP_IEEE754, "IEEE754" }, - {STREAM_CAP_DATA_WITH_ML, "ML" }, - {STREAM_CAP_DYNCFG, "DYNCFG" }, - {STREAM_CAP_SLOTS, "SLOTS" }, - {STREAM_CAP_ZSTD, "ZSTD" }, - {STREAM_CAP_GZIP, "GZIP" }, - {STREAM_CAP_BROTLI, "BROTLI" }, - {STREAM_CAP_PROGRESS, "PROGRESS" }, - {0 , NULL }, -}; - -void stream_capabilities_to_string(BUFFER *wb, STREAM_CAPABILITIES caps) { - for(size_t i = 0; capability_names[i].str ; i++) { - if(caps & capability_names[i].cap) { - buffer_strcat(wb, capability_names[i].str); - buffer_strcat(wb, " "); - } - } -} - -void stream_capabilities_to_json_array(BUFFER *wb, STREAM_CAPABILITIES caps, const char *key) { - if(key) - buffer_json_member_add_array(wb, key); - else - buffer_json_add_array_item_array(wb); - - for(size_t i = 0; capability_names[i].str ; i++) { - if(caps & capability_names[i].cap) - buffer_json_add_array_item_string(wb, capability_names[i].str); - } - - buffer_json_array_close(wb); -} - -void log_receiver_capabilities(struct receiver_state *rpt) { - BUFFER *wb = buffer_create(100, NULL); - stream_capabilities_to_string(wb, rpt->capabilities); - - nd_log_daemon(NDLP_INFO, "STREAM %s [receive from [%s]:%s]: established link with negotiated capabilities: %s", - rrdhost_hostname(rpt->host), rpt->client_ip, rpt->client_port, buffer_tostring(wb)); - - buffer_free(wb); -} - -void log_sender_capabilities(struct sender_state *s) { - BUFFER *wb = buffer_create(100, NULL); - stream_capabilities_to_string(wb, s->capabilities); - - nd_log_daemon(NDLP_INFO, "STREAM %s [send to %s]: established link with negotiated capabilities: %s", - rrdhost_hostname(s->host), s->connected_to, buffer_tostring(wb)); - - buffer_free(wb); -} - -STREAM_CAPABILITIES stream_our_capabilities(RRDHOST *host, bool sender) { - STREAM_CAPABILITIES disabled_capabilities = globally_disabled_capabilities; - - if(host && sender) { - // we have DATA_WITH_ML capability - // we should remove the DATA_WITH_ML capability if our database does not have anomaly info - // this can happen under these conditions: 1. we don't run ML, and 2. we don't receive ML - netdata_mutex_lock(&host->receiver_lock); - - if(!ml_host_running(host) && !stream_has_capability(host->receiver, STREAM_CAP_DATA_WITH_ML)) - disabled_capabilities |= STREAM_CAP_DATA_WITH_ML; - - netdata_mutex_unlock(&host->receiver_lock); - - if(host->sender) - disabled_capabilities |= host->sender->disabled_capabilities; - } - - return (STREAM_CAP_V1 | - STREAM_CAP_V2 | - STREAM_CAP_VN | - STREAM_CAP_VCAPS | - STREAM_CAP_HLABELS | - STREAM_CAP_CLAIM | - STREAM_CAP_CLABELS | - STREAM_CAP_FUNCTIONS | - STREAM_CAP_REPLICATION | - STREAM_CAP_BINARY | - STREAM_CAP_INTERPOLATED | - STREAM_CAP_SLOTS | - STREAM_CAP_PROGRESS | - STREAM_CAP_COMPRESSIONS_AVAILABLE | - STREAM_CAP_DYNCFG | - STREAM_CAP_IEEE754 | - STREAM_CAP_DATA_WITH_ML | - 0) & ~disabled_capabilities; -} - -STREAM_CAPABILITIES convert_stream_version_to_capabilities(int32_t version, RRDHOST *host, bool sender) { - STREAM_CAPABILITIES caps = 0; - - if(version <= 1) caps = STREAM_CAP_V1; - else if(version < STREAM_OLD_VERSION_CLAIM) caps = STREAM_CAP_V2 | STREAM_CAP_HLABELS; - else if(version <= STREAM_OLD_VERSION_CLAIM) caps = STREAM_CAP_VN | STREAM_CAP_HLABELS | STREAM_CAP_CLAIM; - else if(version <= STREAM_OLD_VERSION_CLABELS) caps = STREAM_CAP_VN | STREAM_CAP_HLABELS | STREAM_CAP_CLAIM | STREAM_CAP_CLABELS; - else if(version <= STREAM_OLD_VERSION_LZ4) caps = STREAM_CAP_VN | STREAM_CAP_HLABELS | STREAM_CAP_CLAIM | STREAM_CAP_CLABELS | STREAM_CAP_LZ4_AVAILABLE; - else caps = version; - - if(caps & STREAM_CAP_VCAPS) - caps &= ~(STREAM_CAP_V1|STREAM_CAP_V2|STREAM_CAP_VN); - - if(caps & STREAM_CAP_VN) - caps &= ~(STREAM_CAP_V1|STREAM_CAP_V2); - - if(caps & STREAM_CAP_V2) - caps &= ~(STREAM_CAP_V1); - - STREAM_CAPABILITIES common_caps = caps & stream_our_capabilities(host, sender); - - if(!(common_caps & STREAM_CAP_INTERPOLATED)) - // DATA WITH ML requires INTERPOLATED - common_caps &= ~STREAM_CAP_DATA_WITH_ML; - - return common_caps; -} - -int32_t stream_capabilities_to_vn(uint32_t caps) { - if(caps & STREAM_CAP_LZ4) return STREAM_OLD_VERSION_LZ4; - if(caps & STREAM_CAP_CLABELS) return STREAM_OLD_VERSION_CLABELS; - return STREAM_OLD_VERSION_CLAIM; // if(caps & STREAM_CAP_CLAIM) -} diff --git a/src/streaming/rrdpush.h b/src/streaming/rrdpush.h index d55a07675..55d0c296c 100644 --- a/src/streaming/rrdpush.h +++ b/src/streaming/rrdpush.h @@ -3,759 +3,16 @@ #ifndef NETDATA_RRDPUSH_H #define NETDATA_RRDPUSH_H 1 -#include "libnetdata/libnetdata.h" -#include "daemon/common.h" -#include "web/server/web_client.h" -#include "database/rrdfunctions.h" -#include "database/rrd.h" +#include "stream-handshake.h" +#include "stream-capabilities.h" +#include "stream-conf.h" +#include "stream-compression/compression.h" -#define CONNECTED_TO_SIZE 100 -#define CBUFFER_INITIAL_SIZE (16 * 1024) -#define THREAD_BUFFER_INITIAL_SIZE (CBUFFER_INITIAL_SIZE / 2) +#include "sender.h" +#include "receiver.h" -// ---------------------------------------------------------------------------- -// obsolete versions - do not use anymore - -#define STREAM_OLD_VERSION_CLAIM 3 -#define STREAM_OLD_VERSION_CLABELS 4 -#define STREAM_OLD_VERSION_LZ4 5 - -// ---------------------------------------------------------------------------- -// capabilities negotiation - -typedef enum { - STREAM_CAP_NONE = 0, - - // do not use the first 3 bits - // they used to be versions 1, 2 and 3 - // before we introduce capabilities - - STREAM_CAP_V1 = (1 << 3), // v1 = the oldest protocol - STREAM_CAP_V2 = (1 << 4), // v2 = the second version of the protocol (with host labels) - STREAM_CAP_VN = (1 << 5), // version negotiation supported (for versions 3, 4, 5 of the protocol) - // v3 = claiming supported - // v4 = chart labels supported - // v5 = lz4 compression supported - STREAM_CAP_VCAPS = (1 << 6), // capabilities negotiation supported - STREAM_CAP_HLABELS = (1 << 7), // host labels supported - STREAM_CAP_CLAIM = (1 << 8), // claiming supported - STREAM_CAP_CLABELS = (1 << 9), // chart labels supported - STREAM_CAP_LZ4 = (1 << 10), // lz4 compression supported - STREAM_CAP_FUNCTIONS = (1 << 11), // plugin functions supported - STREAM_CAP_REPLICATION = (1 << 12), // replication supported - STREAM_CAP_BINARY = (1 << 13), // streaming supports binary data - STREAM_CAP_INTERPOLATED = (1 << 14), // streaming supports interpolated streaming of values - STREAM_CAP_IEEE754 = (1 << 15), // streaming supports binary/hex transfer of double values - STREAM_CAP_DATA_WITH_ML = (1 << 16), // streaming supports transferring anomaly bit - // STREAM_CAP_DYNCFG = (1 << 17), // leave this unused for as long as possible - STREAM_CAP_SLOTS = (1 << 18), // the sender can appoint a unique slot for each chart - STREAM_CAP_ZSTD = (1 << 19), // ZSTD compression supported - STREAM_CAP_GZIP = (1 << 20), // GZIP compression supported - STREAM_CAP_BROTLI = (1 << 21), // BROTLI compression supported - STREAM_CAP_PROGRESS = (1 << 22), // Functions PROGRESS support - STREAM_CAP_DYNCFG = (1 << 23), // support for DYNCFG - - STREAM_CAP_INVALID = (1 << 30), // used as an invalid value for capabilities when this is set - // this must be signed int, so don't use the last bit - // needed for negotiating errors between parent and child -} STREAM_CAPABILITIES; - -#ifdef ENABLE_LZ4 -#define STREAM_CAP_LZ4_AVAILABLE STREAM_CAP_LZ4 -#else -#define STREAM_CAP_LZ4_AVAILABLE 0 -#endif // ENABLE_LZ4 - -#ifdef ENABLE_ZSTD -#define STREAM_CAP_ZSTD_AVAILABLE STREAM_CAP_ZSTD -#else -#define STREAM_CAP_ZSTD_AVAILABLE 0 -#endif // ENABLE_ZSTD - -#ifdef ENABLE_BROTLI -#define STREAM_CAP_BROTLI_AVAILABLE STREAM_CAP_BROTLI -#else -#define STREAM_CAP_BROTLI_AVAILABLE 0 -#endif // ENABLE_BROTLI - -#define STREAM_CAP_COMPRESSIONS_AVAILABLE (STREAM_CAP_LZ4_AVAILABLE|STREAM_CAP_ZSTD_AVAILABLE|STREAM_CAP_BROTLI_AVAILABLE|STREAM_CAP_GZIP) - -extern STREAM_CAPABILITIES globally_disabled_capabilities; - -STREAM_CAPABILITIES stream_our_capabilities(RRDHOST *host, bool sender); - -#define stream_has_capability(rpt, capability) ((rpt) && ((rpt)->capabilities & (capability)) == (capability)) - -static inline bool stream_has_more_than_one_capability_of(STREAM_CAPABILITIES caps, STREAM_CAPABILITIES mask) { - STREAM_CAPABILITIES common = (STREAM_CAPABILITIES)(caps & mask); - return (common & (common - 1)) != 0 && common != 0; -} - -// ---------------------------------------------------------------------------- -// stream handshake - -#define HTTP_HEADER_SIZE 8192 - -#define STREAMING_PROTOCOL_VERSION "1.1" -#define START_STREAMING_PROMPT_V1 "Hit me baby, push them over..." -#define START_STREAMING_PROMPT_V2 "Hit me baby, push them over and bring the host labels..." -#define START_STREAMING_PROMPT_VN "Hit me baby, push them over with the version=" - -#define START_STREAMING_ERROR_SAME_LOCALHOST "Don't hit me baby, you are trying to stream my localhost back" -#define START_STREAMING_ERROR_ALREADY_STREAMING "This GUID is already streaming to this server" -#define START_STREAMING_ERROR_NOT_PERMITTED "You are not permitted to access this. Check the logs for more info." -#define START_STREAMING_ERROR_BUSY_TRY_LATER "The server is too busy now to accept this request. Try later." -#define START_STREAMING_ERROR_INTERNAL_ERROR "The server encountered an internal error. Try later." -#define START_STREAMING_ERROR_INITIALIZATION "The server is initializing. Try later." - -#define RRDPUSH_STATUS_CONNECTED "CONNECTED" -#define RRDPUSH_STATUS_ALREADY_CONNECTED "ALREADY CONNECTED" -#define RRDPUSH_STATUS_DISCONNECTED "DISCONNECTED" -#define RRDPUSH_STATUS_RATE_LIMIT "RATE LIMIT TRY LATER" -#define RRDPUSH_STATUS_INITIALIZATION_IN_PROGRESS "INITIALIZATION IN PROGRESS RETRY LATER" -#define RRDPUSH_STATUS_INTERNAL_SERVER_ERROR "INTERNAL SERVER ERROR DROPPING CONNECTION" -#define RRDPUSH_STATUS_DUPLICATE_RECEIVER "DUPLICATE RECEIVER DROPPING CONNECTION" -#define RRDPUSH_STATUS_CANT_REPLY "CANT REPLY DROPPING CONNECTION" -#define RRDPUSH_STATUS_NO_HOSTNAME "NO HOSTNAME PERMISSION DENIED" -#define RRDPUSH_STATUS_NO_API_KEY "NO API KEY PERMISSION DENIED" -#define RRDPUSH_STATUS_INVALID_API_KEY "INVALID API KEY PERMISSION DENIED" -#define RRDPUSH_STATUS_NO_MACHINE_GUID "NO MACHINE GUID PERMISSION DENIED" -#define RRDPUSH_STATUS_MACHINE_GUID_DISABLED "MACHINE GUID DISABLED PERMISSION DENIED" -#define RRDPUSH_STATUS_INVALID_MACHINE_GUID "INVALID MACHINE GUID PERMISSION DENIED" -#define RRDPUSH_STATUS_API_KEY_DISABLED "API KEY DISABLED PERMISSION DENIED" -#define RRDPUSH_STATUS_NOT_ALLOWED_IP "NOT ALLOWED IP PERMISSION DENIED" -#define RRDPUSH_STATUS_LOCALHOST "LOCALHOST PERMISSION DENIED" -#define RRDPUSH_STATUS_PERMISSION_DENIED "PERMISSION DENIED" -#define RRDPUSH_STATUS_BAD_HANDSHAKE "BAD HANDSHAKE" -#define RRDPUSH_STATUS_TIMEOUT "TIMEOUT" -#define RRDPUSH_STATUS_CANT_UPGRADE_CONNECTION "CANT UPGRADE CONNECTION" -#define RRDPUSH_STATUS_SSL_ERROR "SSL ERROR" -#define RRDPUSH_STATUS_INVALID_SSL_CERTIFICATE "INVALID SSL CERTIFICATE" -#define RRDPUSH_STATUS_CANT_ESTABLISH_SSL_CONNECTION "CANT ESTABLISH SSL CONNECTION" - -typedef enum { - STREAM_HANDSHAKE_OK_V3 = 3, // v3+ - STREAM_HANDSHAKE_OK_V2 = 2, // v2 - STREAM_HANDSHAKE_OK_V1 = 1, // v1 - STREAM_HANDSHAKE_NEVER = 0, // never tried to connect - STREAM_HANDSHAKE_ERROR_BAD_HANDSHAKE = -1, - STREAM_HANDSHAKE_ERROR_LOCALHOST = -2, - STREAM_HANDSHAKE_ERROR_ALREADY_CONNECTED = -3, - STREAM_HANDSHAKE_ERROR_DENIED = -4, - STREAM_HANDSHAKE_ERROR_SEND_TIMEOUT = -5, - STREAM_HANDSHAKE_ERROR_RECEIVE_TIMEOUT = -6, - STREAM_HANDSHAKE_ERROR_INVALID_CERTIFICATE = -7, - STREAM_HANDSHAKE_ERROR_SSL_ERROR = -8, - STREAM_HANDSHAKE_ERROR_CANT_CONNECT = -9, - STREAM_HANDSHAKE_BUSY_TRY_LATER = -10, - STREAM_HANDSHAKE_INTERNAL_ERROR = -11, - STREAM_HANDSHAKE_INITIALIZATION = -12, - STREAM_HANDSHAKE_DISCONNECT_HOST_CLEANUP = -13, - STREAM_HANDSHAKE_DISCONNECT_STALE_RECEIVER = -14, - STREAM_HANDSHAKE_DISCONNECT_SHUTDOWN = -15, - STREAM_HANDSHAKE_DISCONNECT_NETDATA_EXIT = -16, - STREAM_HANDSHAKE_DISCONNECT_PARSER_EXIT = -17, - STREAM_HANDSHAKE_DISCONNECT_UNKNOWN_SOCKET_READ_ERROR = -18, - STREAM_HANDSHAKE_DISCONNECT_PARSER_FAILED = -19, - STREAM_HANDSHAKE_DISCONNECT_RECEIVER_LEFT = -20, - STREAM_HANDSHAKE_DISCONNECT_ORPHAN_HOST = -21, - STREAM_HANDSHAKE_NON_STREAMABLE_HOST = -22, - STREAM_HANDSHAKE_DISCONNECT_NOT_SUFFICIENT_READ_BUFFER = -23, - STREAM_HANDSHAKE_DISCONNECT_SOCKET_EOF = -24, - STREAM_HANDSHAKE_DISCONNECT_SOCKET_READ_FAILED = -25, - STREAM_HANDSHAKE_DISCONNECT_SOCKET_READ_TIMEOUT = -26, - STREAM_HANDSHAKE_ERROR_HTTP_UPGRADE = -27, - -} STREAM_HANDSHAKE; - - -// ---------------------------------------------------------------------------- - -typedef struct { - char *os_name; - char *os_id; - char *os_version; - char *kernel_name; - char *kernel_version; -} stream_encoded_t; - -#include "compression.h" - -// Thread-local storage -// Metric transmission: collector threads asynchronously fill the buffer, sender thread uses it. - -typedef enum __attribute__((packed)) { - STREAM_TRAFFIC_TYPE_REPLICATION = 0, - STREAM_TRAFFIC_TYPE_FUNCTIONS, - STREAM_TRAFFIC_TYPE_METADATA, - STREAM_TRAFFIC_TYPE_DATA, - STREAM_TRAFFIC_TYPE_DYNCFG, - - // terminator - STREAM_TRAFFIC_TYPE_MAX, -} STREAM_TRAFFIC_TYPE; - -typedef enum __attribute__((packed)) { - SENDER_FLAG_OVERFLOW = (1 << 0), // The buffer has been overflown -} SENDER_FLAGS; - -struct sender_state { - RRDHOST *host; - pid_t tid; // the thread id of the sender, from gettid_cached() - SENDER_FLAGS flags; - int timeout; - int default_port; - uint32_t reconnect_delay; - char connected_to[CONNECTED_TO_SIZE + 1]; // We don't know which proxy we connect to, passed back from socket.c - size_t begin; - size_t reconnects_counter; - size_t sent_bytes; - size_t sent_bytes_on_this_connection; - size_t send_attempts; - time_t last_traffic_seen_t; - time_t last_state_since_t; // the timestamp of the last state (online/offline) change - size_t not_connected_loops; - // Metrics are collected asynchronously by collector threads calling rrdset_done_push(). This can also trigger - // the lazy creation of the sender thread - both cases (buffer access and thread creation) are guarded here. - SPINLOCK spinlock; - struct circular_buffer *buffer; - char read_buffer[PLUGINSD_LINE_MAX + 1]; - ssize_t read_len; - STREAM_CAPABILITIES capabilities; - STREAM_CAPABILITIES disabled_capabilities; - - size_t sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_MAX]; - - int rrdpush_sender_pipe[2]; // collector to sender thread signaling - int rrdpush_sender_socket; - - uint16_t hops; - - struct line_splitter line; - struct compressor_state compressor; - -#ifdef NETDATA_LOG_STREAM_SENDER - FILE *stream_log_fp; -#endif - -#ifdef ENABLE_HTTPS - NETDATA_SSL ssl; // structure used to encrypt the connection -#endif - - struct { - bool shutdown; - STREAM_HANDSHAKE reason; - } exit; - - struct { - DICTIONARY *requests; // de-duplication of replication requests, per chart - time_t oldest_request_after_t; // the timestamp of the oldest replication request - time_t latest_completed_before_t; // the timestamp of the latest replication request - - struct { - size_t pending_requests; // the currently outstanding replication requests - size_t charts_replicating; // the number of unique charts having pending replication requests (on every request one is added and is removed when we finish it - it does not track completion of the replication for this chart) - bool reached_max; // true when the sender buffer should not get more replication responses - } atomic; - - } replication; - - struct { - bool pending_data; - size_t buffer_used_percentage; // the current utilization of the sending buffer - usec_t last_flush_time_ut; // the last time the sender flushed the sending buffer in USEC - time_t last_buffer_recreate_s; // true when the sender buffer should be re-created - } atomic; - - struct { - bool intercept_input; - const char *transaction; - const char *timeout_s; - const char *function; - const char *access; - const char *source; - BUFFER *payload; - } functions; - - int parent_using_h2o; -}; - -#define sender_lock(sender) spinlock_lock(&(sender)->spinlock) -#define sender_unlock(sender) spinlock_unlock(&(sender)->spinlock) - -#define rrdpush_sender_pipe_has_pending_data(sender) __atomic_load_n(&(sender)->atomic.pending_data, __ATOMIC_RELAXED) -#define rrdpush_sender_pipe_set_pending_data(sender) __atomic_store_n(&(sender)->atomic.pending_data, true, __ATOMIC_RELAXED) -#define rrdpush_sender_pipe_clear_pending_data(sender) __atomic_store_n(&(sender)->atomic.pending_data, false, __ATOMIC_RELAXED) - -#define rrdpush_sender_last_buffer_recreate_get(sender) __atomic_load_n(&(sender)->atomic.last_buffer_recreate_s, __ATOMIC_RELAXED) -#define rrdpush_sender_last_buffer_recreate_set(sender, value) __atomic_store_n(&(sender)->atomic.last_buffer_recreate_s, value, __ATOMIC_RELAXED) - -#define rrdpush_sender_replication_buffer_full_set(sender, value) __atomic_store_n(&((sender)->replication.atomic.reached_max), value, __ATOMIC_SEQ_CST) -#define rrdpush_sender_replication_buffer_full_get(sender) __atomic_load_n(&((sender)->replication.atomic.reached_max), __ATOMIC_SEQ_CST) - -#define rrdpush_sender_set_buffer_used_percent(sender, value) __atomic_store_n(&((sender)->atomic.buffer_used_percentage), value, __ATOMIC_RELAXED) -#define rrdpush_sender_get_buffer_used_percent(sender) __atomic_load_n(&((sender)->atomic.buffer_used_percentage), __ATOMIC_RELAXED) - -#define rrdpush_sender_set_flush_time(sender) __atomic_store_n(&((sender)->atomic.last_flush_time_ut), now_realtime_usec(), __ATOMIC_RELAXED) -#define rrdpush_sender_get_flush_time(sender) __atomic_load_n(&((sender)->atomic.last_flush_time_ut), __ATOMIC_RELAXED) - -#define rrdpush_sender_replicating_charts(sender) __atomic_load_n(&((sender)->replication.atomic.charts_replicating), __ATOMIC_RELAXED) -#define rrdpush_sender_replicating_charts_plus_one(sender) __atomic_add_fetch(&((sender)->replication.atomic.charts_replicating), 1, __ATOMIC_RELAXED) -#define rrdpush_sender_replicating_charts_minus_one(sender) __atomic_sub_fetch(&((sender)->replication.atomic.charts_replicating), 1, __ATOMIC_RELAXED) -#define rrdpush_sender_replicating_charts_zero(sender) __atomic_store_n(&((sender)->replication.atomic.charts_replicating), 0, __ATOMIC_RELAXED) - -#define rrdpush_sender_pending_replication_requests(sender) __atomic_load_n(&((sender)->replication.atomic.pending_requests), __ATOMIC_RELAXED) -#define rrdpush_sender_pending_replication_requests_plus_one(sender) __atomic_add_fetch(&((sender)->replication.atomic.pending_requests), 1, __ATOMIC_RELAXED) -#define rrdpush_sender_pending_replication_requests_minus_one(sender) __atomic_sub_fetch(&((sender)->replication.atomic.pending_requests), 1, __ATOMIC_RELAXED) -#define rrdpush_sender_pending_replication_requests_zero(sender) __atomic_store_n(&((sender)->replication.atomic.pending_requests), 0, __ATOMIC_RELAXED) - -/* -typedef enum { - STREAM_NODE_INSTANCE_FEATURE_CLOUD_ONLINE = (1 << 0), - STREAM_NODE_INSTANCE_FEATURE_VIRTUAL_HOST = (1 << 1), - STREAM_NODE_INSTANCE_FEATURE_HEALTH_ENABLED = (1 << 2), - STREAM_NODE_INSTANCE_FEATURE_ML_SELF = (1 << 3), - STREAM_NODE_INSTANCE_FEATURE_ML_RECEIVED = (1 << 4), - STREAM_NODE_INSTANCE_FEATURE_SSL = (1 << 5), -} STREAM_NODE_INSTANCE_FEATURES; - -typedef struct stream_node_instance { - uuid_t uuid; - STRING *agent; - STREAM_NODE_INSTANCE_FEATURES features; - uint32_t hops; - - // receiver information on that agent - int32_t capabilities; - uint32_t local_port; - uint32_t remote_port; - STRING *local_ip; - STRING *remote_ip; -} STREAM_NODE_INSTANCE; -*/ - -struct receiver_state { - RRDHOST *host; - pid_t tid; - ND_THREAD *thread; - int fd; - char *key; - char *hostname; - char *registry_hostname; - char *machine_guid; - char *os; - char *timezone; // Unused? - char *abbrev_timezone; - int32_t utc_offset; - char *client_ip; // Duplicated in pluginsd - char *client_port; // Duplicated in pluginsd - char *program_name; // Duplicated in pluginsd - char *program_version; - struct rrdhost_system_info *system_info; - STREAM_CAPABILITIES capabilities; - time_t last_msg_t; - - struct buffered_reader reader; - - uint16_t hops; - - struct { - bool shutdown; // signal the streaming parser to exit - STREAM_HANDSHAKE reason; - } exit; - - struct { - RRD_MEMORY_MODE mode; - int history; - int update_every; - int health_enabled; // CONFIG_BOOLEAN_YES, CONFIG_BOOLEAN_NO, CONFIG_BOOLEAN_AUTO - time_t alarms_delay; - uint32_t alarms_history; - int rrdpush_enabled; - char *rrdpush_api_key; // DONT FREE - it is allocated in appconfig - char *rrdpush_send_charts_matching; // DONT FREE - it is allocated in appconfig - bool rrdpush_enable_replication; - time_t rrdpush_seconds_to_replicate; - time_t rrdpush_replication_step; - char *rrdpush_destination; // DONT FREE - it is allocated in appconfig - unsigned int rrdpush_compression; - STREAM_CAPABILITIES compression_priorities[COMPRESSION_ALGORITHM_MAX]; - } config; - -#ifdef ENABLE_HTTPS - NETDATA_SSL ssl; -#endif - - time_t replication_first_time_t; - - struct decompressor_state decompressor; -/* - struct { - uint32_t count; - STREAM_NODE_INSTANCE *array; - } instances; -*/ - -#ifdef ENABLE_H2O - void *h2o_ctx; -#endif -}; - -#ifdef ENABLE_H2O -#define is_h2o_rrdpush(x) ((x)->h2o_ctx != NULL) -#define unless_h2o_rrdpush(x) if(!is_h2o_rrdpush(x)) -#endif - -struct rrdpush_destinations { - STRING *destination; - bool ssl; - uint32_t attempts; - time_t since; - time_t postpone_reconnection_until; - STREAM_HANDSHAKE reason; - - struct rrdpush_destinations *prev; - struct rrdpush_destinations *next; -}; - -extern unsigned int default_rrdpush_enabled; -extern unsigned int default_rrdpush_compression_enabled; -extern char *default_rrdpush_destination; -extern char *default_rrdpush_api_key; -extern char *default_rrdpush_send_charts_matching; -extern bool default_rrdpush_enable_replication; -extern time_t default_rrdpush_seconds_to_replicate; -extern time_t default_rrdpush_replication_step; -extern unsigned int remote_clock_resync_iterations; - -void rrdpush_destinations_init(RRDHOST *host); -void rrdpush_destinations_free(RRDHOST *host); - -BUFFER *sender_start(struct sender_state *s); -void sender_commit(struct sender_state *s, BUFFER *wb, STREAM_TRAFFIC_TYPE type); -int rrdpush_init(); -bool rrdpush_receiver_needs_dbengine(); -int configured_as_parent(); - -typedef struct rrdset_stream_buffer { - STREAM_CAPABILITIES capabilities; - bool v2; - bool begin_v2_added; - time_t wall_clock_time; - uint64_t rrdset_flags; // RRDSET_FLAGS - time_t last_point_end_time_s; - BUFFER *wb; -} RRDSET_STREAM_BUFFER; - -RRDSET_STREAM_BUFFER rrdset_push_metric_initialize(RRDSET *st, time_t wall_clock_time); -void rrdset_push_metrics_v1(RRDSET_STREAM_BUFFER *rsb, RRDSET *st); -void rrdset_push_metrics_finished(RRDSET_STREAM_BUFFER *rsb, RRDSET *st); -void rrddim_push_metrics_v2(RRDSET_STREAM_BUFFER *rsb, RRDDIM *rd, usec_t point_end_time_ut, NETDATA_DOUBLE n, SN_FLAGS flags); - -bool rrdset_push_chart_definition_now(RRDSET *st); -void *rrdpush_sender_thread(void *ptr); -void rrdpush_send_host_labels(RRDHOST *host); -void rrdpush_send_claimed_id(RRDHOST *host); -void rrdpush_send_global_functions(RRDHOST *host); - -int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_string, void *h2o_ctx); -void rrdpush_sender_thread_stop(RRDHOST *host, STREAM_HANDSHAKE reason, bool wait); - -void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, const RRDVAR_ACQUIRED *rva); -int connect_to_one_of_destinations( - RRDHOST *host, - int default_port, - struct timeval *timeout, - size_t *reconnects_counter, - char *connected_to, - size_t connected_to_size, - struct rrdpush_destinations **destination); - -void rrdpush_signal_sender_to_wake_up(struct sender_state *s); - -void rrdpush_reset_destinations_postpone_time(RRDHOST *host); -const char *stream_handshake_error_to_string(STREAM_HANDSHAKE handshake_error); -void stream_capabilities_to_json_array(BUFFER *wb, STREAM_CAPABILITIES caps, const char *key); -void rrdpush_receive_log_status(struct receiver_state *rpt, const char *msg, const char *status, ND_LOG_FIELD_PRIORITY priority); -void log_receiver_capabilities(struct receiver_state *rpt); -void log_sender_capabilities(struct sender_state *s); -STREAM_CAPABILITIES convert_stream_version_to_capabilities(int32_t version, RRDHOST *host, bool sender); -int32_t stream_capabilities_to_vn(uint32_t caps); -void stream_capabilities_to_string(BUFFER *wb, STREAM_CAPABILITIES caps); - -void receiver_state_free(struct receiver_state *rpt); -bool stop_streaming_receiver(RRDHOST *host, STREAM_HANDSHAKE reason); - -void sender_thread_buffer_free(void); - -#include "replication.h" - -typedef enum __attribute__((packed)) { - RRDHOST_DB_STATUS_INITIALIZING = 0, - RRDHOST_DB_STATUS_QUERYABLE, -} RRDHOST_DB_STATUS; - -static inline const char *rrdhost_db_status_to_string(RRDHOST_DB_STATUS status) { - switch(status) { - default: - case RRDHOST_DB_STATUS_INITIALIZING: - return "initializing"; - - case RRDHOST_DB_STATUS_QUERYABLE: - return "online"; - } -} - -typedef enum __attribute__((packed)) { - RRDHOST_DB_LIVENESS_STALE = 0, - RRDHOST_DB_LIVENESS_LIVE, -} RRDHOST_DB_LIVENESS; - -static inline const char *rrdhost_db_liveness_to_string(RRDHOST_DB_LIVENESS status) { - switch(status) { - default: - case RRDHOST_DB_LIVENESS_STALE: - return "stale"; - - case RRDHOST_DB_LIVENESS_LIVE: - return "live"; - } -} - -typedef enum __attribute__((packed)) { - RRDHOST_INGEST_STATUS_ARCHIVED = 0, - RRDHOST_INGEST_STATUS_INITIALIZING, - RRDHOST_INGEST_STATUS_REPLICATING, - RRDHOST_INGEST_STATUS_ONLINE, - RRDHOST_INGEST_STATUS_OFFLINE, -} RRDHOST_INGEST_STATUS; - -static inline const char *rrdhost_ingest_status_to_string(RRDHOST_INGEST_STATUS status) { - switch(status) { - case RRDHOST_INGEST_STATUS_ARCHIVED: - return "archived"; - - case RRDHOST_INGEST_STATUS_INITIALIZING: - return "initializing"; - - case RRDHOST_INGEST_STATUS_REPLICATING: - return "replicating"; - - case RRDHOST_INGEST_STATUS_ONLINE: - return "online"; - - default: - case RRDHOST_INGEST_STATUS_OFFLINE: - return "offline"; - } -} - -typedef enum __attribute__((packed)) { - RRDHOST_INGEST_TYPE_LOCALHOST = 0, - RRDHOST_INGEST_TYPE_VIRTUAL, - RRDHOST_INGEST_TYPE_CHILD, - RRDHOST_INGEST_TYPE_ARCHIVED, -} RRDHOST_INGEST_TYPE; - -static inline const char *rrdhost_ingest_type_to_string(RRDHOST_INGEST_TYPE type) { - switch(type) { - case RRDHOST_INGEST_TYPE_LOCALHOST: - return "localhost"; - - case RRDHOST_INGEST_TYPE_VIRTUAL: - return "virtual"; - - case RRDHOST_INGEST_TYPE_CHILD: - return "child"; - - default: - case RRDHOST_INGEST_TYPE_ARCHIVED: - return "archived"; - } -} - -typedef enum __attribute__((packed)) { - RRDHOST_STREAM_STATUS_DISABLED = 0, - RRDHOST_STREAM_STATUS_REPLICATING, - RRDHOST_STREAM_STATUS_ONLINE, - RRDHOST_STREAM_STATUS_OFFLINE, -} RRDHOST_STREAMING_STATUS; - -static inline const char *rrdhost_streaming_status_to_string(RRDHOST_STREAMING_STATUS status) { - switch(status) { - case RRDHOST_STREAM_STATUS_DISABLED: - return "disabled"; - - case RRDHOST_STREAM_STATUS_REPLICATING: - return "replicating"; - - case RRDHOST_STREAM_STATUS_ONLINE: - return "online"; - - default: - case RRDHOST_STREAM_STATUS_OFFLINE: - return "offline"; - } -} - -typedef enum __attribute__((packed)) { - RRDHOST_ML_STATUS_DISABLED = 0, - RRDHOST_ML_STATUS_OFFLINE, - RRDHOST_ML_STATUS_RUNNING, -} RRDHOST_ML_STATUS; - -static inline const char *rrdhost_ml_status_to_string(RRDHOST_ML_STATUS status) { - switch(status) { - case RRDHOST_ML_STATUS_RUNNING: - return "online"; - - case RRDHOST_ML_STATUS_OFFLINE: - return "offline"; - - default: - case RRDHOST_ML_STATUS_DISABLED: - return "disabled"; - } -} - -typedef enum __attribute__((packed)) { - RRDHOST_ML_TYPE_DISABLED = 0, - RRDHOST_ML_TYPE_SELF, - RRDHOST_ML_TYPE_RECEIVED, -} RRDHOST_ML_TYPE; - -static inline const char *rrdhost_ml_type_to_string(RRDHOST_ML_TYPE type) { - switch(type) { - case RRDHOST_ML_TYPE_SELF: - return "self"; - - case RRDHOST_ML_TYPE_RECEIVED: - return "received"; - - default: - case RRDHOST_ML_TYPE_DISABLED: - return "disabled"; - } -} - -typedef enum __attribute__((packed)) { - RRDHOST_HEALTH_STATUS_DISABLED = 0, - RRDHOST_HEALTH_STATUS_INITIALIZING, - RRDHOST_HEALTH_STATUS_RUNNING, -} RRDHOST_HEALTH_STATUS; - -static inline const char *rrdhost_health_status_to_string(RRDHOST_HEALTH_STATUS status) { - switch(status) { - default: - case RRDHOST_HEALTH_STATUS_DISABLED: - return "disabled"; - - case RRDHOST_HEALTH_STATUS_INITIALIZING: - return "initializing"; - - case RRDHOST_HEALTH_STATUS_RUNNING: - return "online"; - } -} - -typedef enum __attribute__((packed)) { - RRDHOST_DYNCFG_STATUS_UNAVAILABLE = 0, - RRDHOST_DYNCFG_STATUS_AVAILABLE, -} RRDHOST_DYNCFG_STATUS; - -static inline const char *rrdhost_dyncfg_status_to_string(RRDHOST_DYNCFG_STATUS status) { - switch(status) { - default: - case RRDHOST_DYNCFG_STATUS_UNAVAILABLE: - return "unavailable"; - - case RRDHOST_DYNCFG_STATUS_AVAILABLE: - return "online"; - } -} - -typedef struct rrdhost_status { - RRDHOST *host; - time_t now; - - struct { - RRDHOST_DYNCFG_STATUS status; - } dyncfg; - - struct { - RRDHOST_DB_STATUS status; - RRDHOST_DB_LIVENESS liveness; - RRD_MEMORY_MODE mode; - time_t first_time_s; - time_t last_time_s; - size_t metrics; - size_t instances; - size_t contexts; - } db; - - struct { - RRDHOST_ML_STATUS status; - RRDHOST_ML_TYPE type; - struct ml_metrics_statistics metrics; - } ml; - - struct { - size_t hops; - RRDHOST_INGEST_TYPE type; - RRDHOST_INGEST_STATUS status; - SOCKET_PEERS peers; - bool ssl; - STREAM_CAPABILITIES capabilities; - uint32_t id; - time_t since; - STREAM_HANDSHAKE reason; - - struct { - bool in_progress; - NETDATA_DOUBLE completion; - size_t instances; - } replication; - } ingest; - - struct { - size_t hops; - RRDHOST_STREAMING_STATUS status; - SOCKET_PEERS peers; - bool ssl; - bool compression; - STREAM_CAPABILITIES capabilities; - uint32_t id; - time_t since; - STREAM_HANDSHAKE reason; - - struct { - bool in_progress; - NETDATA_DOUBLE completion; - size_t instances; - } replication; - - size_t sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_MAX]; - } stream; - - struct { - RRDHOST_HEALTH_STATUS status; - struct { - uint32_t undefined; - uint32_t uninitialized; - uint32_t clear; - uint32_t warning; - uint32_t critical; - } alerts; - } health; -} RRDHOST_STATUS; - -void rrdhost_status(RRDHOST *host, time_t now, RRDHOST_STATUS *s); -bool rrdhost_state_cloud_emulation(RRDHOST *host); - -bool rrdpush_compression_initialize(struct sender_state *s); -bool rrdpush_decompression_initialize(struct receiver_state *rpt); -void rrdpush_parse_compression_order(struct receiver_state *rpt, const char *order); -void rrdpush_select_receiver_compression_algorithm(struct receiver_state *rpt); -void rrdpush_compression_deactivate(struct sender_state *s); +#include "rrdhost-status.h" +#include "protocol/commands.h" +#include "stream-path.h" #endif //NETDATA_RRDPUSH_H diff --git a/src/streaming/sender-commit.c b/src/streaming/sender-commit.c new file mode 100644 index 000000000..6ff7cb2ba --- /dev/null +++ b/src/streaming/sender-commit.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sender-internals.h" + +static __thread BUFFER *sender_thread_buffer = NULL; +static __thread bool sender_thread_buffer_used = false; +static __thread time_t sender_thread_buffer_last_reset_s = 0; + +void sender_thread_buffer_free(void) { + buffer_free(sender_thread_buffer); + sender_thread_buffer = NULL; + sender_thread_buffer_used = false; +} + +// Collector thread starting a transmission +BUFFER *sender_start(struct sender_state *s) { + if(unlikely(sender_thread_buffer_used)) + fatal("STREAMING: thread buffer is used multiple times concurrently."); + + if(unlikely(rrdpush_sender_last_buffer_recreate_get(s) > sender_thread_buffer_last_reset_s)) { + if(unlikely(sender_thread_buffer && sender_thread_buffer->size > THREAD_BUFFER_INITIAL_SIZE)) { + buffer_free(sender_thread_buffer); + sender_thread_buffer = NULL; + } + } + + if(unlikely(!sender_thread_buffer)) { + sender_thread_buffer = buffer_create(THREAD_BUFFER_INITIAL_SIZE, &netdata_buffers_statistics.buffers_streaming); + sender_thread_buffer_last_reset_s = rrdpush_sender_last_buffer_recreate_get(s); + } + + sender_thread_buffer_used = true; + buffer_flush(sender_thread_buffer); + return sender_thread_buffer; +} + +#define SENDER_BUFFER_ADAPT_TO_TIMES_MAX_SIZE 3 + +// Collector thread finishing a transmission +void sender_commit(struct sender_state *s, BUFFER *wb, STREAM_TRAFFIC_TYPE type) { + + if(unlikely(wb != sender_thread_buffer)) + fatal("STREAMING: sender is trying to commit a buffer that is not this thread's buffer."); + + if(unlikely(!sender_thread_buffer_used)) + fatal("STREAMING: sender is committing a buffer twice."); + + sender_thread_buffer_used = false; + + char *src = (char *)buffer_tostring(wb); + size_t src_len = buffer_strlen(wb); + + if(unlikely(!src || !src_len)) + return; + + sender_lock(s); + +#ifdef NETDATA_LOG_STREAM_SENDER + if(type == STREAM_TRAFFIC_TYPE_METADATA) { + if(!s->stream_log_fp) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "/tmp/stream-sender-%s.txt", s->host ? rrdhost_hostname(s->host) : "unknown"); + + s->stream_log_fp = fopen(filename, "w"); + } + + fprintf(s->stream_log_fp, "\n--- SEND MESSAGE START: %s ----\n" + "%s" + "--- SEND MESSAGE END ----------------------------------------\n" + , rrdhost_hostname(s->host), src + ); + } +#endif + + if(unlikely(s->buffer->max_size < (src_len + 1) * SENDER_BUFFER_ADAPT_TO_TIMES_MAX_SIZE)) { + netdata_log_info("STREAM %s [send to %s]: max buffer size of %zu is too small for a data message of size %zu. Increasing the max buffer size to %d times the max data message size.", + rrdhost_hostname(s->host), s->connected_to, s->buffer->max_size, buffer_strlen(wb) + 1, SENDER_BUFFER_ADAPT_TO_TIMES_MAX_SIZE); + + s->buffer->max_size = (src_len + 1) * SENDER_BUFFER_ADAPT_TO_TIMES_MAX_SIZE; + } + + if (s->compressor.initialized) { + while(src_len) { + size_t size_to_compress = src_len; + + if(unlikely(size_to_compress > COMPRESSION_MAX_MSG_SIZE)) { + if (stream_has_capability(s, STREAM_CAP_BINARY)) + size_to_compress = COMPRESSION_MAX_MSG_SIZE; + else { + if (size_to_compress > COMPRESSION_MAX_MSG_SIZE) { + // we need to find the last newline + // so that the decompressor will have a whole line to work with + + const char *t = &src[COMPRESSION_MAX_MSG_SIZE]; + while (--t >= src) + if (unlikely(*t == '\n')) + break; + + if (t <= src) { + size_to_compress = COMPRESSION_MAX_MSG_SIZE; + } else + size_to_compress = t - src + 1; + } + } + } + + const char *dst; + size_t dst_len = rrdpush_compress(&s->compressor, src, size_to_compress, &dst); + if (!dst_len) { + netdata_log_error("STREAM %s [send to %s]: COMPRESSION failed. Resetting compressor and re-trying", + rrdhost_hostname(s->host), s->connected_to); + + rrdpush_compression_initialize(s); + dst_len = rrdpush_compress(&s->compressor, src, size_to_compress, &dst); + if(!dst_len) { + netdata_log_error("STREAM %s [send to %s]: COMPRESSION failed again. Deactivating compression", + rrdhost_hostname(s->host), s->connected_to); + + worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_NO_COMPRESSION); + rrdpush_compression_deactivate(s); + rrdpush_sender_thread_close_socket(s); + sender_unlock(s); + return; + } + } + + rrdpush_signature_t signature = rrdpush_compress_encode_signature(dst_len); + +#ifdef NETDATA_INTERNAL_CHECKS + // check if reversing the signature provides the same length + size_t decoded_dst_len = rrdpush_decompress_decode_signature((const char *)&signature, sizeof(signature)); + if(decoded_dst_len != dst_len) + fatal("RRDPUSH COMPRESSION: invalid signature, original payload %zu bytes, " + "compressed payload length %zu bytes, but signature says payload is %zu bytes", + size_to_compress, dst_len, decoded_dst_len); +#endif + + if(cbuffer_add_unsafe(s->buffer, (const char *)&signature, sizeof(signature))) + s->flags |= SENDER_FLAG_OVERFLOW; + else { + if(cbuffer_add_unsafe(s->buffer, dst, dst_len)) + s->flags |= SENDER_FLAG_OVERFLOW; + else + s->sent_bytes_on_this_connection_per_type[type] += dst_len + sizeof(signature); + } + + src = src + size_to_compress; + src_len -= size_to_compress; + } + } + else if(cbuffer_add_unsafe(s->buffer, src, src_len)) + s->flags |= SENDER_FLAG_OVERFLOW; + else + s->sent_bytes_on_this_connection_per_type[type] += src_len; + + replication_recalculate_buffer_used_ratio_unsafe(s); + + bool signal_sender = false; + if(!rrdpush_sender_pipe_has_pending_data(s)) { + rrdpush_sender_pipe_set_pending_data(s); + signal_sender = true; + } + + sender_unlock(s); + + if(signal_sender && (!stream_has_capability(s, STREAM_CAP_INTERPOLATED) || type != STREAM_TRAFFIC_TYPE_DATA)) + rrdpush_signal_sender_to_wake_up(s); +} diff --git a/src/streaming/sender-connect.c b/src/streaming/sender-connect.c new file mode 100644 index 000000000..ac5f392a0 --- /dev/null +++ b/src/streaming/sender-connect.c @@ -0,0 +1,741 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sender-internals.h" + +void rrdpush_sender_thread_close_socket(struct sender_state *s) { + rrdhost_flag_clear(s->host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED | RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS); + + netdata_ssl_close(&s->ssl); + + if(s->rrdpush_sender_socket != -1) { + close(s->rrdpush_sender_socket); + s->rrdpush_sender_socket = -1; + } + + // do not flush the circular buffer here + // this function is called sometimes with the sender lock, sometimes without the lock +} + +void rrdpush_encode_variable(stream_encoded_t *se, RRDHOST *host) { + se->os_name = (host->system_info->host_os_name)?url_encode(host->system_info->host_os_name):strdupz(""); + se->os_id = (host->system_info->host_os_id)?url_encode(host->system_info->host_os_id):strdupz(""); + se->os_version = (host->system_info->host_os_version)?url_encode(host->system_info->host_os_version):strdupz(""); + se->kernel_name = (host->system_info->kernel_name)?url_encode(host->system_info->kernel_name):strdupz(""); + se->kernel_version = (host->system_info->kernel_version)?url_encode(host->system_info->kernel_version):strdupz(""); +} + +void rrdpush_clean_encoded(stream_encoded_t *se) { + if (se->os_name) { + freez(se->os_name); + se->os_name = NULL; + } + + if (se->os_id) { + freez(se->os_id); + se->os_id = NULL; + } + + if (se->os_version) { + freez(se->os_version); + se->os_version = NULL; + } + + if (se->kernel_name) { + freez(se->kernel_name); + se->kernel_name = NULL; + } + + if (se->kernel_version) { + freez(se->kernel_version); + se->kernel_version = NULL; + } +} + +struct { + const char *response; + const char *status; + size_t length; + int32_t version; + bool dynamic; + const char *error; + int worker_job_id; + int postpone_reconnect_seconds; + ND_LOG_FIELD_PRIORITY priority; +} stream_responses[] = { + { + .response = START_STREAMING_PROMPT_VN, + .length = sizeof(START_STREAMING_PROMPT_VN) - 1, + .status = RRDPUSH_STATUS_CONNECTED, + .version = STREAM_HANDSHAKE_OK_V3, // and above + .dynamic = true, // dynamic = we will parse the version / capabilities + .error = NULL, + .worker_job_id = 0, + .postpone_reconnect_seconds = 0, + .priority = NDLP_INFO, + }, + { + .response = START_STREAMING_PROMPT_V2, + .length = sizeof(START_STREAMING_PROMPT_V2) - 1, + .status = RRDPUSH_STATUS_CONNECTED, + .version = STREAM_HANDSHAKE_OK_V2, + .dynamic = false, + .error = NULL, + .worker_job_id = 0, + .postpone_reconnect_seconds = 0, + .priority = NDLP_INFO, + }, + { + .response = START_STREAMING_PROMPT_V1, + .length = sizeof(START_STREAMING_PROMPT_V1) - 1, + .status = RRDPUSH_STATUS_CONNECTED, + .version = STREAM_HANDSHAKE_OK_V1, + .dynamic = false, + .error = NULL, + .worker_job_id = 0, + .postpone_reconnect_seconds = 0, + .priority = NDLP_INFO, + }, + { + .response = START_STREAMING_ERROR_SAME_LOCALHOST, + .length = sizeof(START_STREAMING_ERROR_SAME_LOCALHOST) - 1, + .status = RRDPUSH_STATUS_LOCALHOST, + .version = STREAM_HANDSHAKE_ERROR_LOCALHOST, + .dynamic = false, + .error = "remote server rejected this stream, the host we are trying to stream is its localhost", + .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, + .postpone_reconnect_seconds = 60 * 60, // the IP may change, try it every hour + .priority = NDLP_DEBUG, + }, + { + .response = START_STREAMING_ERROR_ALREADY_STREAMING, + .length = sizeof(START_STREAMING_ERROR_ALREADY_STREAMING) - 1, + .status = RRDPUSH_STATUS_ALREADY_CONNECTED, + .version = STREAM_HANDSHAKE_ERROR_ALREADY_CONNECTED, + .dynamic = false, + .error = "remote server rejected this stream, the host we are trying to stream is already streamed to it", + .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, + .postpone_reconnect_seconds = 2 * 60, // 2 minutes + .priority = NDLP_DEBUG, + }, + { + .response = START_STREAMING_ERROR_NOT_PERMITTED, + .length = sizeof(START_STREAMING_ERROR_NOT_PERMITTED) - 1, + .status = RRDPUSH_STATUS_PERMISSION_DENIED, + .version = STREAM_HANDSHAKE_ERROR_DENIED, + .dynamic = false, + .error = "remote server denied access, probably we don't have the right API key?", + .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, + .postpone_reconnect_seconds = 1 * 60, // 1 minute + .priority = NDLP_ERR, + }, + { + .response = START_STREAMING_ERROR_BUSY_TRY_LATER, + .length = sizeof(START_STREAMING_ERROR_BUSY_TRY_LATER) - 1, + .status = RRDPUSH_STATUS_RATE_LIMIT, + .version = STREAM_HANDSHAKE_BUSY_TRY_LATER, + .dynamic = false, + .error = "remote server is currently busy, we should try later", + .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, + .postpone_reconnect_seconds = 2 * 60, // 2 minutes + .priority = NDLP_NOTICE, + }, + { + .response = START_STREAMING_ERROR_INTERNAL_ERROR, + .length = sizeof(START_STREAMING_ERROR_INTERNAL_ERROR) - 1, + .status = RRDPUSH_STATUS_INTERNAL_SERVER_ERROR, + .version = STREAM_HANDSHAKE_INTERNAL_ERROR, + .dynamic = false, + .error = "remote server is encountered an internal error, we should try later", + .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, + .postpone_reconnect_seconds = 5 * 60, // 5 minutes + .priority = NDLP_CRIT, + }, + { + .response = START_STREAMING_ERROR_INITIALIZATION, + .length = sizeof(START_STREAMING_ERROR_INITIALIZATION) - 1, + .status = RRDPUSH_STATUS_INITIALIZATION_IN_PROGRESS, + .version = STREAM_HANDSHAKE_INITIALIZATION, + .dynamic = false, + .error = "remote server is initializing, we should try later", + .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, + .postpone_reconnect_seconds = 2 * 60, // 2 minute + .priority = NDLP_NOTICE, + }, + + // terminator + { + .response = NULL, + .length = 0, + .status = RRDPUSH_STATUS_BAD_HANDSHAKE, + .version = STREAM_HANDSHAKE_ERROR_BAD_HANDSHAKE, + .dynamic = false, + .error = "remote node response is not understood, is it Netdata?", + .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, + .postpone_reconnect_seconds = 1 * 60, // 1 minute + .priority = NDLP_ERR, + } +}; + +static inline bool rrdpush_sender_validate_response(RRDHOST *host, struct sender_state *s, char *http, size_t http_length) { + int32_t version = STREAM_HANDSHAKE_ERROR_BAD_HANDSHAKE; + + int i; + for(i = 0; stream_responses[i].response ; i++) { + if(stream_responses[i].dynamic && + http_length > stream_responses[i].length && http_length < (stream_responses[i].length + 30) && + strncmp(http, stream_responses[i].response, stream_responses[i].length) == 0) { + + version = str2i(&http[stream_responses[i].length]); + break; + } + else if(http_length == stream_responses[i].length && strcmp(http, stream_responses[i].response) == 0) { + version = stream_responses[i].version; + + break; + } + } + + if(version >= STREAM_HANDSHAKE_OK_V1) { + host->destination->reason = version; + host->destination->postpone_reconnection_until = now_realtime_sec() + s->reconnect_delay; + s->capabilities = convert_stream_version_to_capabilities(version, host, true); + return true; + } + + ND_LOG_FIELD_PRIORITY priority = stream_responses[i].priority; + const char *error = stream_responses[i].error; + const char *status = stream_responses[i].status; + int worker_job_id = stream_responses[i].worker_job_id; + int delay = stream_responses[i].postpone_reconnect_seconds; + + worker_is_busy(worker_job_id); + rrdpush_sender_thread_close_socket(s); + host->destination->reason = version; + host->destination->postpone_reconnection_until = now_realtime_sec() + delay; + + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, status), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + char buf[RFC3339_MAX_LENGTH]; + rfc3339_datetime_ut(buf, sizeof(buf), host->destination->postpone_reconnection_until * USEC_PER_SEC, 0, false); + + nd_log(NDLS_DAEMON, priority, + "STREAM %s [send to %s]: %s - will retry in %d secs, at %s", + rrdhost_hostname(host), s->connected_to, error, delay, buf); + + return false; +} + +unsigned char alpn_proto_list[] = { + 18, 'n', 'e', 't', 'd', 'a', 't', 'a', '_', 's', 't', 'r', 'e', 'a', 'm', '/', '2', '.', '0', + 8, 'h', 't', 't', 'p', '/', '1', '.', '1' +}; + +#define CONN_UPGRADE_VAL "upgrade" + +static bool rrdpush_sender_connect_ssl(struct sender_state *s) { + RRDHOST *host = s->host; + bool ssl_required = host && host->destination && host->destination->ssl; + + netdata_ssl_close(&host->sender->ssl); + + if(!ssl_required) + return true; + + if (netdata_ssl_open_ext(&host->sender->ssl, netdata_ssl_streaming_sender_ctx, s->rrdpush_sender_socket, alpn_proto_list, sizeof(alpn_proto_list))) { + if(!netdata_ssl_connect(&host->sender->ssl)) { + // couldn't connect + + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_SSL_ERROR), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR); + rrdpush_sender_thread_close_socket(s); + host->destination->reason = STREAM_HANDSHAKE_ERROR_SSL_ERROR; + host->destination->postpone_reconnection_until = now_realtime_sec() + 5 * 60; + return false; + } + + if (netdata_ssl_validate_certificate_sender && + security_test_certificate(host->sender->ssl.conn)) { + // certificate is not valid + + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_INVALID_SSL_CERTIFICATE), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR); + netdata_log_error("SSL: closing the stream connection, because the server SSL certificate is not valid."); + rrdpush_sender_thread_close_socket(s); + host->destination->reason = STREAM_HANDSHAKE_ERROR_INVALID_CERTIFICATE; + host->destination->postpone_reconnection_until = now_realtime_sec() + 5 * 60; + return false; + } + + return true; + } + + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_CANT_ESTABLISH_SSL_CONNECTION), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + netdata_log_error("SSL: failed to establish connection."); + return false; +} + +static int rrdpush_http_upgrade_prelude(RRDHOST *host, struct sender_state *s) { + + char http[HTTP_HEADER_SIZE + 1]; + snprintfz(http, HTTP_HEADER_SIZE, + "GET " NETDATA_STREAM_URL HTTP_1_1 HTTP_ENDL + "Upgrade: " NETDATA_STREAM_PROTO_NAME HTTP_ENDL + "Connection: Upgrade" + HTTP_HDR_END); + + ssize_t bytes = send_timeout( + &host->sender->ssl, + s->rrdpush_sender_socket, + http, + strlen(http), + 0, + 1000); + + bytes = recv_timeout( + &host->sender->ssl, + s->rrdpush_sender_socket, + http, + HTTP_HEADER_SIZE, + 0, + 1000); + + if (bytes <= 0) { + error_report("Error reading from remote"); + return 1; + } + + rbuf_t buf = rbuf_create(bytes); + rbuf_push(buf, http, bytes); + + http_parse_ctx ctx; + http_parse_ctx_create(&ctx, HTTP_PARSE_INITIAL); + ctx.flags |= HTTP_PARSE_FLAG_DONT_WAIT_FOR_CONTENT; + + int rc; + // while((rc = parse_http_response(buf, &ctx)) == HTTP_PARSE_NEED_MORE_DATA); + rc = parse_http_response(buf, &ctx); + + if (rc != HTTP_PARSE_SUCCESS) { + error_report("Failed to parse HTTP response sent. (%d)", rc); + goto err_cleanup; + } + if (ctx.http_code == HTTP_RESP_MOVED_PERM) { + const char *hdr = get_http_header_by_name(&ctx, "location"); + if (hdr) + error_report("HTTP response is %d Moved Permanently (location: \"%s\") instead of expected %d Switching Protocols.", ctx.http_code, hdr, HTTP_RESP_SWITCH_PROTO); + else + error_report("HTTP response is %d instead of expected %d Switching Protocols.", ctx.http_code, HTTP_RESP_SWITCH_PROTO); + goto err_cleanup; + } + if (ctx.http_code == HTTP_RESP_NOT_FOUND) { + error_report("HTTP response is %d instead of expected %d Switching Protocols. Parent version too old.", ctx.http_code, HTTP_RESP_SWITCH_PROTO); + // TODO set some flag here that will signify parent is older version + // and to try connection without rrdpush_http_upgrade_prelude next time + goto err_cleanup; + } + if (ctx.http_code != HTTP_RESP_SWITCH_PROTO) { + error_report("HTTP response is %d instead of expected %d Switching Protocols", ctx.http_code, HTTP_RESP_SWITCH_PROTO); + goto err_cleanup; + } + + const char *hdr = get_http_header_by_name(&ctx, "connection"); + if (!hdr) { + error_report("Missing \"connection\" header in reply"); + goto err_cleanup; + } + if (strncmp(hdr, CONN_UPGRADE_VAL, strlen(CONN_UPGRADE_VAL))) { + error_report("Expected \"connection: " CONN_UPGRADE_VAL "\""); + goto err_cleanup; + } + + hdr = get_http_header_by_name(&ctx, "upgrade"); + if (!hdr) { + error_report("Missing \"upgrade\" header in reply"); + goto err_cleanup; + } + if (strncmp(hdr, NETDATA_STREAM_PROTO_NAME, strlen(NETDATA_STREAM_PROTO_NAME))) { + error_report("Expected \"upgrade: " NETDATA_STREAM_PROTO_NAME "\""); + goto err_cleanup; + } + + netdata_log_debug(D_STREAM, "Stream sender upgrade to \"" NETDATA_STREAM_PROTO_NAME "\" successful"); + rbuf_free(buf); + http_parse_ctx_destroy(&ctx); + return 0; +err_cleanup: + rbuf_free(buf); + http_parse_ctx_destroy(&ctx); + return 1; +} + +static bool sender_send_connection_request(RRDHOST *host, int default_port, int timeout, struct sender_state *s) { + + struct timeval tv = { + .tv_sec = timeout, + .tv_usec = 0 + }; + + // make sure the socket is closed + rrdpush_sender_thread_close_socket(s); + + s->rrdpush_sender_socket = connect_to_one_of_destinations( + host + , default_port + , &tv + , &s->reconnects_counter + , s->connected_to + , sizeof(s->connected_to)-1 + , &host->destination + ); + + if(unlikely(s->rrdpush_sender_socket == -1)) { + // netdata_log_error("STREAM %s [send to %s]: could not connect to parent node at this time.", rrdhost_hostname(host), host->rrdpush_send_destination); + return false; + } + + // netdata_log_info("STREAM %s [send to %s]: initializing communication...", rrdhost_hostname(host), s->connected_to); + + // reset our capabilities to default + s->capabilities = stream_our_capabilities(host, true); + + /* TODO: During the implementation of #7265 switch the set of variables to HOST_* and CONTAINER_* if the + version negotiation resulted in a high enough version. + */ + stream_encoded_t se; + rrdpush_encode_variable(&se, host); + + host->sender->hops = host->system_info->hops + 1; + + char http[HTTP_HEADER_SIZE + 1]; + int eol = snprintfz(http, HTTP_HEADER_SIZE, + "STREAM " + "key=%s" + "&hostname=%s" + "®istry_hostname=%s" + "&machine_guid=%s" + "&update_every=%d" + "&os=%s" + "&timezone=%s" + "&abbrev_timezone=%s" + "&utc_offset=%d" + "&hops=%d" + "&ml_capable=%d" + "&ml_enabled=%d" + "&mc_version=%d" + "&ver=%u" + "&NETDATA_INSTANCE_CLOUD_TYPE=%s" + "&NETDATA_INSTANCE_CLOUD_INSTANCE_TYPE=%s" + "&NETDATA_INSTANCE_CLOUD_INSTANCE_REGION=%s" + "&NETDATA_SYSTEM_OS_NAME=%s" + "&NETDATA_SYSTEM_OS_ID=%s" + "&NETDATA_SYSTEM_OS_ID_LIKE=%s" + "&NETDATA_SYSTEM_OS_VERSION=%s" + "&NETDATA_SYSTEM_OS_VERSION_ID=%s" + "&NETDATA_SYSTEM_OS_DETECTION=%s" + "&NETDATA_HOST_IS_K8S_NODE=%s" + "&NETDATA_SYSTEM_KERNEL_NAME=%s" + "&NETDATA_SYSTEM_KERNEL_VERSION=%s" + "&NETDATA_SYSTEM_ARCHITECTURE=%s" + "&NETDATA_SYSTEM_VIRTUALIZATION=%s" + "&NETDATA_SYSTEM_VIRT_DETECTION=%s" + "&NETDATA_SYSTEM_CONTAINER=%s" + "&NETDATA_SYSTEM_CONTAINER_DETECTION=%s" + "&NETDATA_CONTAINER_OS_NAME=%s" + "&NETDATA_CONTAINER_OS_ID=%s" + "&NETDATA_CONTAINER_OS_ID_LIKE=%s" + "&NETDATA_CONTAINER_OS_VERSION=%s" + "&NETDATA_CONTAINER_OS_VERSION_ID=%s" + "&NETDATA_CONTAINER_OS_DETECTION=%s" + "&NETDATA_SYSTEM_CPU_LOGICAL_CPU_COUNT=%s" + "&NETDATA_SYSTEM_CPU_FREQ=%s" + "&NETDATA_SYSTEM_TOTAL_RAM=%s" + "&NETDATA_SYSTEM_TOTAL_DISK_SIZE=%s" + "&NETDATA_PROTOCOL_VERSION=%s" + HTTP_1_1 HTTP_ENDL + "User-Agent: %s/%s\r\n" + "Accept: */*\r\n\r\n" + , host->rrdpush.send.api_key + , rrdhost_hostname(host) + , rrdhost_registry_hostname(host) + , host->machine_guid + , default_rrd_update_every + , rrdhost_os(host) + , rrdhost_timezone(host) + , rrdhost_abbrev_timezone(host) + , host->utc_offset + , host->sender->hops + , host->system_info->ml_capable + , host->system_info->ml_enabled + , host->system_info->mc_version + , s->capabilities + , (host->system_info->cloud_provider_type) ? host->system_info->cloud_provider_type : "" + , (host->system_info->cloud_instance_type) ? host->system_info->cloud_instance_type : "" + , (host->system_info->cloud_instance_region) ? host->system_info->cloud_instance_region : "" + , se.os_name + , se.os_id + , (host->system_info->host_os_id_like) ? host->system_info->host_os_id_like : "" + , se.os_version + , (host->system_info->host_os_version_id) ? host->system_info->host_os_version_id : "" + , (host->system_info->host_os_detection) ? host->system_info->host_os_detection : "" + , (host->system_info->is_k8s_node) ? host->system_info->is_k8s_node : "" + , se.kernel_name + , se.kernel_version + , (host->system_info->architecture) ? host->system_info->architecture : "" + , (host->system_info->virtualization) ? host->system_info->virtualization : "" + , (host->system_info->virt_detection) ? host->system_info->virt_detection : "" + , (host->system_info->container) ? host->system_info->container : "" + , (host->system_info->container_detection) ? host->system_info->container_detection : "" + , (host->system_info->container_os_name) ? host->system_info->container_os_name : "" + , (host->system_info->container_os_id) ? host->system_info->container_os_id : "" + , (host->system_info->container_os_id_like) ? host->system_info->container_os_id_like : "" + , (host->system_info->container_os_version) ? host->system_info->container_os_version : "" + , (host->system_info->container_os_version_id) ? host->system_info->container_os_version_id : "" + , (host->system_info->container_os_detection) ? host->system_info->container_os_detection : "" + , (host->system_info->host_cores) ? host->system_info->host_cores : "" + , (host->system_info->host_cpu_freq) ? host->system_info->host_cpu_freq : "" + , (host->system_info->host_ram_total) ? host->system_info->host_ram_total : "" + , (host->system_info->host_disk_space) ? host->system_info->host_disk_space : "" + , STREAMING_PROTOCOL_VERSION + , rrdhost_program_name(host) + , rrdhost_program_version(host) + ); + http[eol] = 0x00; + rrdpush_clean_encoded(&se); + + if(!rrdpush_sender_connect_ssl(s)) + return false; + + if (s->parent_using_h2o && rrdpush_http_upgrade_prelude(host, s)) { + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_CANT_UPGRADE_CONNECTION), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_CANT_UPGRADE_CONNECTION); + rrdpush_sender_thread_close_socket(s); + host->destination->reason = STREAM_HANDSHAKE_ERROR_HTTP_UPGRADE; + host->destination->postpone_reconnection_until = now_realtime_sec() + 1 * 60; + return false; + } + + ssize_t len = (ssize_t)strlen(http); + ssize_t bytes = send_timeout( + &host->sender->ssl, + s->rrdpush_sender_socket, + http, + len, + 0, + timeout); + + if(bytes <= 0) { // timeout is 0 + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_TIMEOUT), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_TIMEOUT); + rrdpush_sender_thread_close_socket(s); + + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM %s [send to %s]: failed to send HTTP header to remote netdata.", + rrdhost_hostname(host), s->connected_to); + + host->destination->reason = STREAM_HANDSHAKE_ERROR_SEND_TIMEOUT; + host->destination->postpone_reconnection_until = now_realtime_sec() + 1 * 60; + return false; + } + + bytes = recv_timeout( + &host->sender->ssl, + s->rrdpush_sender_socket, + http, + HTTP_HEADER_SIZE, + 0, + timeout); + + if(bytes <= 0) { // timeout is 0 + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_TIMEOUT), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_TIMEOUT); + rrdpush_sender_thread_close_socket(s); + + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM %s [send to %s]: remote netdata does not respond.", + rrdhost_hostname(host), s->connected_to); + + host->destination->reason = STREAM_HANDSHAKE_ERROR_RECEIVE_TIMEOUT; + host->destination->postpone_reconnection_until = now_realtime_sec() + 30; + return false; + } + + if(sock_setnonblock(s->rrdpush_sender_socket) < 0) + nd_log(NDLS_DAEMON, NDLP_WARNING, + "STREAM %s [send to %s]: cannot set non-blocking mode for socket.", + rrdhost_hostname(host), s->connected_to); + sock_setcloexec(s->rrdpush_sender_socket); + + if(sock_enlarge_out(s->rrdpush_sender_socket) < 0) + nd_log(NDLS_DAEMON, NDLP_WARNING, + "STREAM %s [send to %s]: cannot enlarge the socket buffer.", + rrdhost_hostname(host), s->connected_to); + + http[bytes] = '\0'; + if(!rrdpush_sender_validate_response(host, s, http, bytes)) + return false; + + rrdpush_compression_initialize(s); + + log_sender_capabilities(s); + + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_CONNECTED), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "STREAM %s: connected to %s...", + rrdhost_hostname(host), s->connected_to); + + return true; +} + +bool attempt_to_connect(struct sender_state *state) { + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_UUID(NDF_MESSAGE_ID, &streaming_to_parent_msgid), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + state->send_attempts = 0; + + // reset the bytes we have sent for this session + state->sent_bytes_on_this_connection = 0; + memset(state->sent_bytes_on_this_connection_per_type, 0, sizeof(state->sent_bytes_on_this_connection_per_type)); + + if(sender_send_connection_request(state->host, state->default_port, state->timeout, state)) { + // reset the buffer, to properly send charts and metrics + rrdpush_sender_on_connect(state->host); + + // send from the beginning + state->begin = 0; + + // make sure the next reconnection will be immediate + state->not_connected_loops = 0; + + // let the data collection threads know we are ready + rrdhost_flag_set(state->host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED); + + rrdpush_sender_after_connect(state->host); + + return true; + } + + // we couldn't connect + + // increase the failed connections counter + state->not_connected_loops++; + + // slow re-connection on repeating errors + usec_t now_ut = now_monotonic_usec(); + usec_t end_ut = now_ut + USEC_PER_SEC * state->reconnect_delay; + while(now_ut < end_ut) { + if(nd_thread_signaled_to_cancel()) + return false; + + sleep_usec(100 * USEC_PER_MS); // seconds + now_ut = now_monotonic_usec(); + } + + return false; +} + +bool rrdpush_sender_connect(struct sender_state *s) { + worker_is_busy(WORKER_SENDER_JOB_CONNECT); + + time_t now_s = now_monotonic_sec(); + rrdpush_sender_cbuffer_recreate_timed(s, now_s, false, true); + rrdpush_sender_execute_commands_cleanup(s); + + rrdhost_flag_clear(s->host, RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS); + s->flags &= ~SENDER_FLAG_OVERFLOW; + s->read_len = 0; + s->buffer->read = 0; + s->buffer->write = 0; + + if(!attempt_to_connect(s)) + return false; + + if(rrdhost_sender_should_exit(s)) + return false; + + s->last_traffic_seen_t = now_monotonic_sec(); + stream_path_send_to_parent(s->host); + rrdpush_sender_send_claimed_id(s->host); + rrdpush_send_host_labels(s->host); + rrdpush_send_global_functions(s->host); + s->replication.oldest_request_after_t = 0; + + rrdhost_flag_set(s->host, RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS); + + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "STREAM %s [send to %s]: enabling metrics streaming...", + rrdhost_hostname(s->host), s->connected_to); + + return true; +} + +// Either the receiver lost the connection or the host is being destroyed. +// The sender mutex guards thread creation, any spurious data is wiped on reconnection. +void rrdpush_sender_thread_stop(RRDHOST *host, STREAM_HANDSHAKE reason, bool wait) { + if (!host->sender) + return; + + sender_lock(host->sender); + + if(rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_SPAWN)) { + + host->sender->exit.shutdown = true; + host->sender->exit.reason = reason; + + // signal it to cancel + nd_thread_signal_cancel(host->rrdpush_sender_thread); + } + + sender_unlock(host->sender); + + if(wait) { + sender_lock(host->sender); + while(host->sender->tid) { + sender_unlock(host->sender); + sleep_usec(10 * USEC_PER_MS); + sender_lock(host->sender); + } + sender_unlock(host->sender); + } +} diff --git a/src/streaming/sender-destinations.c b/src/streaming/sender-destinations.c new file mode 100644 index 000000000..5e67ca039 --- /dev/null +++ b/src/streaming/sender-destinations.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sender-internals.h" + +void rrdpush_reset_destinations_postpone_time(RRDHOST *host) { + uint32_t wait = (host->sender) ? host->sender->reconnect_delay : 5; + time_t now = now_realtime_sec(); + for (struct rrdpush_destinations *d = host->destinations; d; d = d->next) + d->postpone_reconnection_until = now + wait; +} + +void rrdpush_sender_ssl_init(RRDHOST *host) { + static SPINLOCK sp = NETDATA_SPINLOCK_INITIALIZER; + spinlock_lock(&sp); + + if(netdata_ssl_streaming_sender_ctx || !host) { + spinlock_unlock(&sp); + return; + } + + for(struct rrdpush_destinations *d = host->destinations; d ; d = d->next) { + if (d->ssl) { + // we need to initialize SSL + + netdata_ssl_initialize_ctx(NETDATA_SSL_STREAMING_SENDER_CTX); + ssl_security_location_for_context(netdata_ssl_streaming_sender_ctx, stream_conf_ssl_ca_file, stream_conf_ssl_ca_path); + + // stop the loop + break; + } + } + + spinlock_unlock(&sp); +} + +int connect_to_one_of_destinations( + RRDHOST *host, + int default_port, + struct timeval *timeout, + size_t *reconnects_counter, + char *connected_to, + size_t connected_to_size, + struct rrdpush_destinations **destination) +{ + int sock = -1; + + for (struct rrdpush_destinations *d = host->destinations; d; d = d->next) { + time_t now = now_realtime_sec(); + + if(nd_thread_signaled_to_cancel()) + return -1; + + if(d->postpone_reconnection_until > now) + continue; + + nd_log(NDLS_DAEMON, NDLP_DEBUG, + "STREAM %s: connecting to '%s' (default port: %d)...", + rrdhost_hostname(host), string2str(d->destination), default_port); + + if (reconnects_counter) + *reconnects_counter += 1; + + d->since = now; + d->attempts++; + sock = connect_to_this(string2str(d->destination), default_port, timeout); + + if (sock != -1) { + if (connected_to && connected_to_size) + strncpyz(connected_to, string2str(d->destination), connected_to_size); + + *destination = d; + + // move the current item to the end of the list + // without this, this destination will break the loop again and again + // not advancing the destinations to find one that may work + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(host->destinations, d, prev, next); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(host->destinations, d, prev, next); + + break; + } + } + + return sock; +} + +struct destinations_init_tmp { + RRDHOST *host; + struct rrdpush_destinations *list; + int count; +}; + +static bool destinations_init_add_one(char *entry, void *data) { + struct destinations_init_tmp *t = data; + + struct rrdpush_destinations *d = callocz(1, sizeof(struct rrdpush_destinations)); + char *colon_ssl = strstr(entry, ":SSL"); + if(colon_ssl) { + *colon_ssl = '\0'; + d->ssl = true; + } + else + d->ssl = false; + + d->destination = string_strdupz(entry); + + __atomic_add_fetch(&netdata_buffers_statistics.rrdhost_senders, sizeof(struct rrdpush_destinations), __ATOMIC_RELAXED); + + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(t->list, d, prev, next); + + t->count++; + nd_log_daemon(NDLP_INFO, "STREAM: added streaming destination No %d: '%s' to host '%s'", t->count, string2str(d->destination), rrdhost_hostname(t->host)); + + return false; // we return false, so that we will get all defined destinations +} + +void rrdpush_destinations_init(RRDHOST *host) { + if(!host->rrdpush.send.destination) return; + + rrdpush_destinations_free(host); + + struct destinations_init_tmp t = { + .host = host, + .list = NULL, + .count = 0, + }; + + foreach_entry_in_connection_string(host->rrdpush.send.destination, destinations_init_add_one, &t); + + host->destinations = t.list; +} + +void rrdpush_destinations_free(RRDHOST *host) { + while (host->destinations) { + struct rrdpush_destinations *tmp = host->destinations; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(host->destinations, tmp, prev, next); + string_freez(tmp->destination); + freez(tmp); + __atomic_sub_fetch(&netdata_buffers_statistics.rrdhost_senders, sizeof(struct rrdpush_destinations), __ATOMIC_RELAXED); + } + + host->destinations = NULL; +} + diff --git a/src/streaming/sender-destinations.h b/src/streaming/sender-destinations.h new file mode 100644 index 000000000..e7c72cef7 --- /dev/null +++ b/src/streaming/sender-destinations.h @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SENDER_DESTINATIONS_H +#define NETDATA_SENDER_DESTINATIONS_H + +#include "libnetdata/libnetdata.h" +#include "stream-handshake.h" +#include "database/rrd.h" + +struct rrdpush_destinations { + STRING *destination; + bool ssl; + uint32_t attempts; + time_t since; + time_t postpone_reconnection_until; + STREAM_HANDSHAKE reason; + + struct rrdpush_destinations *prev; + struct rrdpush_destinations *next; +}; + +void rrdpush_sender_ssl_init(RRDHOST *host); + +void rrdpush_reset_destinations_postpone_time(RRDHOST *host); + +void rrdpush_destinations_init(RRDHOST *host); +void rrdpush_destinations_free(RRDHOST *host); + +int connect_to_one_of_destinations( + RRDHOST *host, + int default_port, + struct timeval *timeout, + size_t *reconnects_counter, + char *connected_to, + size_t connected_to_size, + struct rrdpush_destinations **destination); + +#endif //NETDATA_SENDER_DESTINATIONS_H diff --git a/src/streaming/sender-execute.c b/src/streaming/sender-execute.c new file mode 100644 index 000000000..e180710e9 --- /dev/null +++ b/src/streaming/sender-execute.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sender-internals.h" + +struct inflight_stream_function { + struct sender_state *sender; + STRING *transaction; + usec_t received_ut; +}; + +static void stream_execute_function_callback(BUFFER *func_wb, int code, void *data) { + struct inflight_stream_function *tmp = data; + struct sender_state *s = tmp->sender; + + if(rrdhost_can_send_definitions_to_parent(s->host)) { + BUFFER *wb = sender_start(s); + + pluginsd_function_result_begin_to_buffer(wb + , string2str(tmp->transaction) + , code + , content_type_id2string(func_wb->content_type) + , func_wb->expires); + + buffer_fast_strcat(wb, buffer_tostring(func_wb), buffer_strlen(func_wb)); + pluginsd_function_result_end_to_buffer(wb); + + sender_commit(s, wb, STREAM_TRAFFIC_TYPE_FUNCTIONS); + sender_thread_buffer_free(); + + internal_error(true, "STREAM %s [send to %s] FUNCTION transaction %s sending back response (%zu bytes, %"PRIu64" usec).", + rrdhost_hostname(s->host), s->connected_to, + string2str(tmp->transaction), + buffer_strlen(func_wb), + now_realtime_usec() - tmp->received_ut); + } + + string_freez(tmp->transaction); + buffer_free(func_wb); + freez(tmp); +} + +static void stream_execute_function_progress_callback(void *data, size_t done, size_t all) { + struct inflight_stream_function *tmp = data; + struct sender_state *s = tmp->sender; + + if(rrdhost_can_send_definitions_to_parent(s->host)) { + BUFFER *wb = sender_start(s); + + buffer_sprintf(wb, PLUGINSD_KEYWORD_FUNCTION_PROGRESS " '%s' %zu %zu\n", + string2str(tmp->transaction), done, all); + + sender_commit(s, wb, STREAM_TRAFFIC_TYPE_FUNCTIONS); + } +} + +static void execute_commands_function(struct sender_state *s, const char *command, const char *transaction, const char *timeout_s, const char *function, BUFFER *payload, const char *access, const char *source) { + worker_is_busy(WORKER_SENDER_JOB_FUNCTION_REQUEST); + nd_log(NDLS_ACCESS, NDLP_INFO, NULL); + + if(!transaction || !*transaction || !timeout_s || !*timeout_s || !function || !*function) { + netdata_log_error("STREAM %s [send to %s] %s execution command is incomplete (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.", + rrdhost_hostname(s->host), s->connected_to, + command, + transaction?transaction:"(unset)", + timeout_s?timeout_s:"(unset)", + function?function:"(unset)"); + } + else { + int timeout = str2i(timeout_s); + if(timeout <= 0) timeout = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT; + + struct inflight_stream_function *tmp = callocz(1, sizeof(struct inflight_stream_function)); + tmp->received_ut = now_realtime_usec(); + tmp->sender = s; + tmp->transaction = string_strdupz(transaction); + BUFFER *wb = buffer_create(1024, &netdata_buffers_statistics.buffers_functions); + + int code = rrd_function_run(s->host, wb, timeout, + http_access_from_hex_mapping_old_roles(access), function, false, transaction, + stream_execute_function_callback, tmp, + stream_has_capability(s, STREAM_CAP_PROGRESS) ? stream_execute_function_progress_callback : NULL, + stream_has_capability(s, STREAM_CAP_PROGRESS) ? tmp : NULL, + NULL, NULL, payload, source, true); + + if(code != HTTP_RESP_OK) { + if (!buffer_strlen(wb)) + rrd_call_function_error(wb, "Failed to route this request to the plugin that offered it.", code); + } + } +} + +struct deferred_function { + const char *transaction; + const char *timeout_s; + const char *function; + const char *access; + const char *source; +}; + +static void execute_deferred_function(struct sender_state *s, void *data) { + struct deferred_function *dfd = data; + execute_commands_function(s, s->defer.end_keyword, + dfd->transaction, dfd->timeout_s, + dfd->function, s->defer.payload, + dfd->access, dfd->source); +} + +static void execute_deferred_json(struct sender_state *s, void *data) { + const char *keyword = data; + + if(strcmp(keyword, PLUGINSD_KEYWORD_STREAM_PATH) == 0) + stream_path_set_from_json(s->host, buffer_tostring(s->defer.payload), true); + else + nd_log(NDLS_DAEMON, NDLP_ERR, "STREAM: unknown JSON keyword '%s' with payload: %s", keyword, buffer_tostring(s->defer.payload)); +} + +static void cleanup_deferred_json(struct sender_state *s __maybe_unused, void *data) { + const char *keyword = data; + freez((void *)keyword); +} + +static void cleanup_deferred_function(struct sender_state *s __maybe_unused, void *data) { + struct deferred_function *dfd = data; + freez((void *)dfd->transaction); + freez((void *)dfd->timeout_s); + freez((void *)dfd->function); + freez((void *)dfd->access); + freez((void *)dfd->source); + freez(dfd); +} + +static void cleanup_deferred_data(struct sender_state *s) { + if(s->defer.cleanup) + s->defer.cleanup(s, s->defer.action_data); + + buffer_free(s->defer.payload); + s->defer.payload = NULL; + s->defer.end_keyword = NULL; + s->defer.action = NULL; + s->defer.cleanup = NULL; + s->defer.action_data = NULL; +} + +void rrdpush_sender_execute_commands_cleanup(struct sender_state *s) { + cleanup_deferred_data(s); +} + +// This is just a placeholder until the gap filling state machine is inserted +void rrdpush_sender_execute_commands(struct sender_state *s) { + worker_is_busy(WORKER_SENDER_JOB_EXECUTE); + + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_CB(NDF_REQUEST, line_splitter_reconstruct_line, &s->line), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + char *start = s->read_buffer, *end = &s->read_buffer[s->read_len], *newline; + *end = '\0'; + for( ; start < end ; start = newline + 1) { + newline = strchr(start, '\n'); + + if(!newline) { + if(s->defer.end_keyword) { + buffer_strcat(s->defer.payload, start); + start = end; + } + break; + } + + *newline = '\0'; + s->line.count++; + + if(s->defer.end_keyword) { + if(strcmp(start, s->defer.end_keyword) == 0) { + s->defer.action(s, s->defer.action_data); + cleanup_deferred_data(s); + } + else { + buffer_strcat(s->defer.payload, start); + buffer_putc(s->defer.payload, '\n'); + } + + continue; + } + + s->line.num_words = quoted_strings_splitter_whitespace(start, s->line.words, PLUGINSD_MAX_WORDS); + const char *command = get_word(s->line.words, s->line.num_words, 0); + + if(command && strcmp(command, PLUGINSD_CALL_FUNCTION) == 0) { + char *transaction = get_word(s->line.words, s->line.num_words, 1); + char *timeout_s = get_word(s->line.words, s->line.num_words, 2); + char *function = get_word(s->line.words, s->line.num_words, 3); + char *access = get_word(s->line.words, s->line.num_words, 4); + char *source = get_word(s->line.words, s->line.num_words, 5); + + execute_commands_function(s, command, transaction, timeout_s, function, NULL, access, source); + } + else if(command && strcmp(command, PLUGINSD_CALL_FUNCTION_PAYLOAD_BEGIN) == 0) { + char *transaction = get_word(s->line.words, s->line.num_words, 1); + char *timeout_s = get_word(s->line.words, s->line.num_words, 2); + char *function = get_word(s->line.words, s->line.num_words, 3); + char *access = get_word(s->line.words, s->line.num_words, 4); + char *source = get_word(s->line.words, s->line.num_words, 5); + char *content_type = get_word(s->line.words, s->line.num_words, 6); + + s->defer.end_keyword = PLUGINSD_CALL_FUNCTION_PAYLOAD_END; + s->defer.payload = buffer_create(0, NULL); + s->defer.payload->content_type = content_type_string2id(content_type); + s->defer.action = execute_deferred_function; + s->defer.cleanup = cleanup_deferred_function; + + struct deferred_function *dfd = callocz(1, sizeof(*dfd)); + dfd->transaction = strdupz(transaction ? transaction : ""); + dfd->timeout_s = strdupz(timeout_s ? timeout_s : ""); + dfd->function = strdupz(function ? function : ""); + dfd->access = strdupz(access ? access : ""); + dfd->source = strdupz(source ? source : ""); + + s->defer.action_data = dfd; + } + else if(command && strcmp(command, PLUGINSD_CALL_FUNCTION_CANCEL) == 0) { + worker_is_busy(WORKER_SENDER_JOB_FUNCTION_REQUEST); + nd_log(NDLS_ACCESS, NDLP_DEBUG, NULL); + + char *transaction = get_word(s->line.words, s->line.num_words, 1); + if(transaction && *transaction) + rrd_function_cancel(transaction); + } + else if(command && strcmp(command, PLUGINSD_CALL_FUNCTION_PROGRESS) == 0) { + worker_is_busy(WORKER_SENDER_JOB_FUNCTION_REQUEST); + nd_log(NDLS_ACCESS, NDLP_DEBUG, NULL); + + char *transaction = get_word(s->line.words, s->line.num_words, 1); + if(transaction && *transaction) + rrd_function_progress(transaction); + } + else if (command && strcmp(command, PLUGINSD_KEYWORD_REPLAY_CHART) == 0) { + worker_is_busy(WORKER_SENDER_JOB_REPLAY_REQUEST); + nd_log(NDLS_ACCESS, NDLP_DEBUG, NULL); + + const char *chart_id = get_word(s->line.words, s->line.num_words, 1); + const char *start_streaming = get_word(s->line.words, s->line.num_words, 2); + const char *after = get_word(s->line.words, s->line.num_words, 3); + const char *before = get_word(s->line.words, s->line.num_words, 4); + + if (!chart_id || !start_streaming || !after || !before) { + netdata_log_error("STREAM %s [send to %s] %s command is incomplete" + " (chart=%s, start_streaming=%s, after=%s, before=%s)", + rrdhost_hostname(s->host), s->connected_to, + command, + chart_id ? chart_id : "(unset)", + start_streaming ? start_streaming : "(unset)", + after ? after : "(unset)", + before ? before : "(unset)"); + } + else { + replication_add_request(s, chart_id, + strtoll(after, NULL, 0), + strtoll(before, NULL, 0), + !strcmp(start_streaming, "true") + ); + } + } + else if(command && strcmp(command, PLUGINSD_KEYWORD_NODE_ID) == 0) { + rrdpush_sender_get_node_and_claim_id_from_parent(s); + } + else if(command && strcmp(command, PLUGINSD_KEYWORD_JSON) == 0) { + char *keyword = get_word(s->line.words, s->line.num_words, 1); + + s->defer.end_keyword = PLUGINSD_KEYWORD_JSON_END; + s->defer.payload = buffer_create(0, NULL); + s->defer.action = execute_deferred_json; + s->defer.cleanup = cleanup_deferred_json; + s->defer.action_data = strdupz(keyword); + } + else { + netdata_log_error("STREAM %s [send to %s] received unknown command over connection: %s", + rrdhost_hostname(s->host), s->connected_to, s->line.words[0]?s->line.words[0]:"(unset)"); + } + + line_splitter_reset(&s->line); + worker_is_busy(WORKER_SENDER_JOB_EXECUTE); + } + + if (start < end) { + memmove(s->read_buffer, start, end-start); + s->read_len = end - start; + } + else { + s->read_buffer[0] = '\0'; + s->read_len = 0; + } +} diff --git a/src/streaming/sender-internals.h b/src/streaming/sender-internals.h new file mode 100644 index 000000000..574369afa --- /dev/null +++ b/src/streaming/sender-internals.h @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SENDER_INTERNALS_H +#define NETDATA_SENDER_INTERNALS_H + +#include "rrdpush.h" +#include "h2o-common.h" +#include "aclk/https_client.h" + +#define WORKER_SENDER_JOB_CONNECT 0 +#define WORKER_SENDER_JOB_PIPE_READ 1 +#define WORKER_SENDER_JOB_SOCKET_RECEIVE 2 +#define WORKER_SENDER_JOB_EXECUTE 3 +#define WORKER_SENDER_JOB_SOCKET_SEND 4 +#define WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE 5 +#define WORKER_SENDER_JOB_DISCONNECT_OVERFLOW 6 +#define WORKER_SENDER_JOB_DISCONNECT_TIMEOUT 7 +#define WORKER_SENDER_JOB_DISCONNECT_POLL_ERROR 8 +#define WORKER_SENDER_JOB_DISCONNECT_SOCKET_ERROR 9 +#define WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR 10 +#define WORKER_SENDER_JOB_DISCONNECT_PARENT_CLOSED 11 +#define WORKER_SENDER_JOB_DISCONNECT_RECEIVE_ERROR 12 +#define WORKER_SENDER_JOB_DISCONNECT_SEND_ERROR 13 +#define WORKER_SENDER_JOB_DISCONNECT_NO_COMPRESSION 14 +#define WORKER_SENDER_JOB_BUFFER_RATIO 15 +#define WORKER_SENDER_JOB_BYTES_RECEIVED 16 +#define WORKER_SENDER_JOB_BYTES_SENT 17 +#define WORKER_SENDER_JOB_BYTES_COMPRESSED 18 +#define WORKER_SENDER_JOB_BYTES_UNCOMPRESSED 19 +#define WORKER_SENDER_JOB_BYTES_COMPRESSION_RATIO 20 +#define WORKER_SENDER_JOB_REPLAY_REQUEST 21 +#define WORKER_SENDER_JOB_FUNCTION_REQUEST 22 +#define WORKER_SENDER_JOB_REPLAY_DICT_SIZE 23 +#define WORKER_SENDER_JOB_DISCONNECT_CANT_UPGRADE_CONNECTION 24 + +#if WORKER_UTILIZATION_MAX_JOB_TYPES < 25 +#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 25 +#endif + +bool attempt_to_connect(struct sender_state *state); +void rrdpush_sender_on_connect(RRDHOST *host); +void rrdpush_sender_after_connect(RRDHOST *host); +void rrdpush_sender_thread_close_socket(struct sender_state *s); + +void rrdpush_sender_execute_commands_cleanup(struct sender_state *s); +void rrdpush_sender_execute_commands(struct sender_state *s); + +#endif //NETDATA_SENDER_INTERNALS_H diff --git a/src/streaming/sender.c b/src/streaming/sender.c index a5fbe6044..666409b1c 100644 --- a/src/streaming/sender.c +++ b/src/streaming/sender.c @@ -1,257 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "rrdpush.h" -#include "common.h" -#include "aclk/https_client.h" - -#define WORKER_SENDER_JOB_CONNECT 0 -#define WORKER_SENDER_JOB_PIPE_READ 1 -#define WORKER_SENDER_JOB_SOCKET_RECEIVE 2 -#define WORKER_SENDER_JOB_EXECUTE 3 -#define WORKER_SENDER_JOB_SOCKET_SEND 4 -#define WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE 5 -#define WORKER_SENDER_JOB_DISCONNECT_OVERFLOW 6 -#define WORKER_SENDER_JOB_DISCONNECT_TIMEOUT 7 -#define WORKER_SENDER_JOB_DISCONNECT_POLL_ERROR 8 -#define WORKER_SENDER_JOB_DISCONNECT_SOCKET_ERROR 9 -#define WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR 10 -#define WORKER_SENDER_JOB_DISCONNECT_PARENT_CLOSED 11 -#define WORKER_SENDER_JOB_DISCONNECT_RECEIVE_ERROR 12 -#define WORKER_SENDER_JOB_DISCONNECT_SEND_ERROR 13 -#define WORKER_SENDER_JOB_DISCONNECT_NO_COMPRESSION 14 -#define WORKER_SENDER_JOB_BUFFER_RATIO 15 -#define WORKER_SENDER_JOB_BYTES_RECEIVED 16 -#define WORKER_SENDER_JOB_BYTES_SENT 17 -#define WORKER_SENDER_JOB_BYTES_COMPRESSED 18 -#define WORKER_SENDER_JOB_BYTES_UNCOMPRESSED 19 -#define WORKER_SENDER_JOB_BYTES_COMPRESSION_RATIO 20 -#define WORKER_SENDER_JOB_REPLAY_REQUEST 21 -#define WORKER_SENDER_JOB_FUNCTION_REQUEST 22 -#define WORKER_SENDER_JOB_REPLAY_DICT_SIZE 23 -#define WORKER_SENDER_JOB_DISCONNECT_CANT_UPGRADE_CONNECTION 24 - -#if WORKER_UTILIZATION_MAX_JOB_TYPES < 25 -#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 25 -#endif - -extern struct config stream_config; -extern char *netdata_ssl_ca_path; -extern char *netdata_ssl_ca_file; - -static __thread BUFFER *sender_thread_buffer = NULL; -static __thread bool sender_thread_buffer_used = false; -static __thread time_t sender_thread_buffer_last_reset_s = 0; - -void sender_thread_buffer_free(void) { - buffer_free(sender_thread_buffer); - sender_thread_buffer = NULL; - sender_thread_buffer_used = false; -} - -// Collector thread starting a transmission -BUFFER *sender_start(struct sender_state *s) { - if(unlikely(sender_thread_buffer_used)) - fatal("STREAMING: thread buffer is used multiple times concurrently."); - - if(unlikely(rrdpush_sender_last_buffer_recreate_get(s) > sender_thread_buffer_last_reset_s)) { - if(unlikely(sender_thread_buffer && sender_thread_buffer->size > THREAD_BUFFER_INITIAL_SIZE)) { - buffer_free(sender_thread_buffer); - sender_thread_buffer = NULL; - } - } - - if(unlikely(!sender_thread_buffer)) { - sender_thread_buffer = buffer_create(THREAD_BUFFER_INITIAL_SIZE, &netdata_buffers_statistics.buffers_streaming); - sender_thread_buffer_last_reset_s = rrdpush_sender_last_buffer_recreate_get(s); - } - - sender_thread_buffer_used = true; - buffer_flush(sender_thread_buffer); - return sender_thread_buffer; -} - -static inline void rrdpush_sender_thread_close_socket(RRDHOST *host); - -#define SENDER_BUFFER_ADAPT_TO_TIMES_MAX_SIZE 3 - -// Collector thread finishing a transmission -void sender_commit(struct sender_state *s, BUFFER *wb, STREAM_TRAFFIC_TYPE type) { - - if(unlikely(wb != sender_thread_buffer)) - fatal("STREAMING: sender is trying to commit a buffer that is not this thread's buffer."); - - if(unlikely(!sender_thread_buffer_used)) - fatal("STREAMING: sender is committing a buffer twice."); - - sender_thread_buffer_used = false; - - char *src = (char *)buffer_tostring(wb); - size_t src_len = buffer_strlen(wb); - - if(unlikely(!src || !src_len)) - return; - - sender_lock(s); - -#ifdef NETDATA_LOG_STREAM_SENDER - if(type == STREAM_TRAFFIC_TYPE_METADATA) { - if(!s->stream_log_fp) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "/tmp/stream-sender-%s.txt", s->host ? rrdhost_hostname(s->host) : "unknown"); - - s->stream_log_fp = fopen(filename, "w"); - } - - fprintf(s->stream_log_fp, "\n--- SEND MESSAGE START: %s ----\n" - "%s" - "--- SEND MESSAGE END ----------------------------------------\n" - , rrdhost_hostname(s->host), src - ); - } -#endif - - if(unlikely(s->buffer->max_size < (src_len + 1) * SENDER_BUFFER_ADAPT_TO_TIMES_MAX_SIZE)) { - netdata_log_info("STREAM %s [send to %s]: max buffer size of %zu is too small for a data message of size %zu. Increasing the max buffer size to %d times the max data message size.", - rrdhost_hostname(s->host), s->connected_to, s->buffer->max_size, buffer_strlen(wb) + 1, SENDER_BUFFER_ADAPT_TO_TIMES_MAX_SIZE); - - s->buffer->max_size = (src_len + 1) * SENDER_BUFFER_ADAPT_TO_TIMES_MAX_SIZE; - } - - if (s->compressor.initialized) { - while(src_len) { - size_t size_to_compress = src_len; - - if(unlikely(size_to_compress > COMPRESSION_MAX_MSG_SIZE)) { - if (stream_has_capability(s, STREAM_CAP_BINARY)) - size_to_compress = COMPRESSION_MAX_MSG_SIZE; - else { - if (size_to_compress > COMPRESSION_MAX_MSG_SIZE) { - // we need to find the last newline - // so that the decompressor will have a whole line to work with - - const char *t = &src[COMPRESSION_MAX_MSG_SIZE]; - while (--t >= src) - if (unlikely(*t == '\n')) - break; - - if (t <= src) { - size_to_compress = COMPRESSION_MAX_MSG_SIZE; - } else - size_to_compress = t - src + 1; - } - } - } - - const char *dst; - size_t dst_len = rrdpush_compress(&s->compressor, src, size_to_compress, &dst); - if (!dst_len) { - netdata_log_error("STREAM %s [send to %s]: COMPRESSION failed. Resetting compressor and re-trying", - rrdhost_hostname(s->host), s->connected_to); - - rrdpush_compression_initialize(s); - dst_len = rrdpush_compress(&s->compressor, src, size_to_compress, &dst); - if(!dst_len) { - netdata_log_error("STREAM %s [send to %s]: COMPRESSION failed again. Deactivating compression", - rrdhost_hostname(s->host), s->connected_to); - - worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_NO_COMPRESSION); - rrdpush_compression_deactivate(s); - rrdpush_sender_thread_close_socket(s->host); - sender_unlock(s); - return; - } - } - - rrdpush_signature_t signature = rrdpush_compress_encode_signature(dst_len); - -#ifdef NETDATA_INTERNAL_CHECKS - // check if reversing the signature provides the same length - size_t decoded_dst_len = rrdpush_decompress_decode_signature((const char *)&signature, sizeof(signature)); - if(decoded_dst_len != dst_len) - fatal("RRDPUSH COMPRESSION: invalid signature, original payload %zu bytes, " - "compressed payload length %zu bytes, but signature says payload is %zu bytes", - size_to_compress, dst_len, decoded_dst_len); -#endif - - if(cbuffer_add_unsafe(s->buffer, (const char *)&signature, sizeof(signature))) - s->flags |= SENDER_FLAG_OVERFLOW; - else { - if(cbuffer_add_unsafe(s->buffer, dst, dst_len)) - s->flags |= SENDER_FLAG_OVERFLOW; - else - s->sent_bytes_on_this_connection_per_type[type] += dst_len + sizeof(signature); - } - - src = src + size_to_compress; - src_len -= size_to_compress; - } - } - else if(cbuffer_add_unsafe(s->buffer, src, src_len)) - s->flags |= SENDER_FLAG_OVERFLOW; - else - s->sent_bytes_on_this_connection_per_type[type] += src_len; - - replication_recalculate_buffer_used_ratio_unsafe(s); - - bool signal_sender = false; - if(!rrdpush_sender_pipe_has_pending_data(s)) { - rrdpush_sender_pipe_set_pending_data(s); - signal_sender = true; - } - - sender_unlock(s); - - if(signal_sender && (!stream_has_capability(s, STREAM_CAP_INTERPOLATED) || type != STREAM_TRAFFIC_TYPE_DATA)) - rrdpush_signal_sender_to_wake_up(s); -} - -static inline void rrdpush_sender_add_host_variable_to_buffer(BUFFER *wb, const RRDVAR_ACQUIRED *rva) { - buffer_sprintf( - wb - , "VARIABLE HOST %s = " NETDATA_DOUBLE_FORMAT "\n" - , rrdvar_name(rva) - , rrdvar2number(rva) - ); - - netdata_log_debug(D_STREAM, "RRDVAR pushed HOST VARIABLE %s = " NETDATA_DOUBLE_FORMAT, rrdvar_name(rva), rrdvar2number(rva)); -} - -void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, const RRDVAR_ACQUIRED *rva) { - if(rrdhost_can_send_definitions_to_parent(host)) { - BUFFER *wb = sender_start(host->sender); - rrdpush_sender_add_host_variable_to_buffer(wb, rva); - sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA); - sender_thread_buffer_free(); - } -} - -struct custom_host_variables_callback { - BUFFER *wb; -}; - -static int rrdpush_sender_thread_custom_host_variables_callback(const DICTIONARY_ITEM *item __maybe_unused, void *rrdvar_ptr __maybe_unused, void *struct_ptr) { - const RRDVAR_ACQUIRED *rv = (const RRDVAR_ACQUIRED *)item; - struct custom_host_variables_callback *tmp = struct_ptr; - BUFFER *wb = tmp->wb; - - rrdpush_sender_add_host_variable_to_buffer(wb, rv); - return 1; -} - -static void rrdpush_sender_thread_send_custom_host_variables(RRDHOST *host) { - if(rrdhost_can_send_definitions_to_parent(host)) { - BUFFER *wb = sender_start(host->sender); - struct custom_host_variables_callback tmp = { - .wb = wb - }; - int ret = rrdvar_walkthrough_read(host->rrdvars, rrdpush_sender_thread_custom_host_variables_callback, &tmp); - (void)ret; - sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA); - sender_thread_buffer_free(); - - netdata_log_debug(D_STREAM, "RRDVAR sent %d VARIABLES", ret); - } -} +#include "sender-internals.h" // resets all the chart, so that their definitions // will be resent to the central netdata @@ -275,7 +24,7 @@ static void rrdpush_sender_thread_reset_all_charts(RRDHOST *host) { rrdhost_sender_replicating_charts_zero(host); } -static void rrdpush_sender_cbuffer_recreate_timed(struct sender_state *s, time_t now_s, bool have_mutex, bool force) { +void rrdpush_sender_cbuffer_recreate_timed(struct sender_state *s, time_t now_s, bool have_mutex, bool force) { static __thread time_t last_reset_time_s = 0; if(!force && now_s - last_reset_time_s < 300) @@ -324,704 +73,24 @@ static void rrdpush_sender_charts_and_replication_reset(RRDHOST *host) { rrdpush_sender_replicating_charts_zero(host->sender); } -static void rrdpush_sender_on_connect(RRDHOST *host) { +void rrdpush_sender_on_connect(RRDHOST *host) { rrdpush_sender_cbuffer_flush(host); rrdpush_sender_charts_and_replication_reset(host); } -static void rrdpush_sender_after_connect(RRDHOST *host) { +void rrdpush_sender_after_connect(RRDHOST *host) { rrdpush_sender_thread_send_custom_host_variables(host); } -static inline void rrdpush_sender_thread_close_socket(RRDHOST *host) { -#ifdef ENABLE_HTTPS - netdata_ssl_close(&host->sender->ssl); -#endif - - if(host->sender->rrdpush_sender_socket != -1) { - close(host->sender->rrdpush_sender_socket); - host->sender->rrdpush_sender_socket = -1; - } +static void rrdpush_sender_on_disconnect(RRDHOST *host) { + // we have been connected to this parent - let's cleanup - rrdhost_flag_clear(host, RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS); - rrdhost_flag_clear(host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED); - - // do not flush the circular buffer here - // this function is called sometimes with the mutex lock, sometimes without the lock rrdpush_sender_charts_and_replication_reset(host); -} - -void rrdpush_encode_variable(stream_encoded_t *se, RRDHOST *host) { - se->os_name = (host->system_info->host_os_name)?url_encode(host->system_info->host_os_name):strdupz(""); - se->os_id = (host->system_info->host_os_id)?url_encode(host->system_info->host_os_id):strdupz(""); - se->os_version = (host->system_info->host_os_version)?url_encode(host->system_info->host_os_version):strdupz(""); - se->kernel_name = (host->system_info->kernel_name)?url_encode(host->system_info->kernel_name):strdupz(""); - se->kernel_version = (host->system_info->kernel_version)?url_encode(host->system_info->kernel_version):strdupz(""); -} - -void rrdpush_clean_encoded(stream_encoded_t *se) { - if (se->os_name) { - freez(se->os_name); - se->os_name = NULL; - } - - if (se->os_id) { - freez(se->os_id); - se->os_id = NULL; - } - - if (se->os_version) { - freez(se->os_version); - se->os_version = NULL; - } - - if (se->kernel_name) { - freez(se->kernel_name); - se->kernel_name = NULL; - } - - if (se->kernel_version) { - freez(se->kernel_version); - se->kernel_version = NULL; - } -} - -struct { - const char *response; - const char *status; - size_t length; - int32_t version; - bool dynamic; - const char *error; - int worker_job_id; - int postpone_reconnect_seconds; - ND_LOG_FIELD_PRIORITY priority; -} stream_responses[] = { - { - .response = START_STREAMING_PROMPT_VN, - .length = sizeof(START_STREAMING_PROMPT_VN) - 1, - .status = RRDPUSH_STATUS_CONNECTED, - .version = STREAM_HANDSHAKE_OK_V3, // and above - .dynamic = true, // dynamic = we will parse the version / capabilities - .error = NULL, - .worker_job_id = 0, - .postpone_reconnect_seconds = 0, - .priority = NDLP_INFO, - }, - { - .response = START_STREAMING_PROMPT_V2, - .length = sizeof(START_STREAMING_PROMPT_V2) - 1, - .status = RRDPUSH_STATUS_CONNECTED, - .version = STREAM_HANDSHAKE_OK_V2, - .dynamic = false, - .error = NULL, - .worker_job_id = 0, - .postpone_reconnect_seconds = 0, - .priority = NDLP_INFO, - }, - { - .response = START_STREAMING_PROMPT_V1, - .length = sizeof(START_STREAMING_PROMPT_V1) - 1, - .status = RRDPUSH_STATUS_CONNECTED, - .version = STREAM_HANDSHAKE_OK_V1, - .dynamic = false, - .error = NULL, - .worker_job_id = 0, - .postpone_reconnect_seconds = 0, - .priority = NDLP_INFO, - }, - { - .response = START_STREAMING_ERROR_SAME_LOCALHOST, - .length = sizeof(START_STREAMING_ERROR_SAME_LOCALHOST) - 1, - .status = RRDPUSH_STATUS_LOCALHOST, - .version = STREAM_HANDSHAKE_ERROR_LOCALHOST, - .dynamic = false, - .error = "remote server rejected this stream, the host we are trying to stream is its localhost", - .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, - .postpone_reconnect_seconds = 60 * 60, // the IP may change, try it every hour - .priority = NDLP_DEBUG, - }, - { - .response = START_STREAMING_ERROR_ALREADY_STREAMING, - .length = sizeof(START_STREAMING_ERROR_ALREADY_STREAMING) - 1, - .status = RRDPUSH_STATUS_ALREADY_CONNECTED, - .version = STREAM_HANDSHAKE_ERROR_ALREADY_CONNECTED, - .dynamic = false, - .error = "remote server rejected this stream, the host we are trying to stream is already streamed to it", - .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, - .postpone_reconnect_seconds = 2 * 60, // 2 minutes - .priority = NDLP_DEBUG, - }, - { - .response = START_STREAMING_ERROR_NOT_PERMITTED, - .length = sizeof(START_STREAMING_ERROR_NOT_PERMITTED) - 1, - .status = RRDPUSH_STATUS_PERMISSION_DENIED, - .version = STREAM_HANDSHAKE_ERROR_DENIED, - .dynamic = false, - .error = "remote server denied access, probably we don't have the right API key?", - .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, - .postpone_reconnect_seconds = 1 * 60, // 1 minute - .priority = NDLP_ERR, - }, - { - .response = START_STREAMING_ERROR_BUSY_TRY_LATER, - .length = sizeof(START_STREAMING_ERROR_BUSY_TRY_LATER) - 1, - .status = RRDPUSH_STATUS_RATE_LIMIT, - .version = STREAM_HANDSHAKE_BUSY_TRY_LATER, - .dynamic = false, - .error = "remote server is currently busy, we should try later", - .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, - .postpone_reconnect_seconds = 2 * 60, // 2 minutes - .priority = NDLP_NOTICE, - }, - { - .response = START_STREAMING_ERROR_INTERNAL_ERROR, - .length = sizeof(START_STREAMING_ERROR_INTERNAL_ERROR) - 1, - .status = RRDPUSH_STATUS_INTERNAL_SERVER_ERROR, - .version = STREAM_HANDSHAKE_INTERNAL_ERROR, - .dynamic = false, - .error = "remote server is encountered an internal error, we should try later", - .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, - .postpone_reconnect_seconds = 5 * 60, // 5 minutes - .priority = NDLP_CRIT, - }, - { - .response = START_STREAMING_ERROR_INITIALIZATION, - .length = sizeof(START_STREAMING_ERROR_INITIALIZATION) - 1, - .status = RRDPUSH_STATUS_INITIALIZATION_IN_PROGRESS, - .version = STREAM_HANDSHAKE_INITIALIZATION, - .dynamic = false, - .error = "remote server is initializing, we should try later", - .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, - .postpone_reconnect_seconds = 2 * 60, // 2 minute - .priority = NDLP_NOTICE, - }, - - // terminator - { - .response = NULL, - .length = 0, - .status = RRDPUSH_STATUS_BAD_HANDSHAKE, - .version = STREAM_HANDSHAKE_ERROR_BAD_HANDSHAKE, - .dynamic = false, - .error = "remote node response is not understood, is it Netdata?", - .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE, - .postpone_reconnect_seconds = 1 * 60, // 1 minute - .priority = NDLP_ERR, - } -}; - -static inline bool rrdpush_sender_validate_response(RRDHOST *host, struct sender_state *s, char *http, size_t http_length) { - int32_t version = STREAM_HANDSHAKE_ERROR_BAD_HANDSHAKE; - - int i; - for(i = 0; stream_responses[i].response ; i++) { - if(stream_responses[i].dynamic && - http_length > stream_responses[i].length && http_length < (stream_responses[i].length + 30) && - strncmp(http, stream_responses[i].response, stream_responses[i].length) == 0) { - - version = str2i(&http[stream_responses[i].length]); - break; - } - else if(http_length == stream_responses[i].length && strcmp(http, stream_responses[i].response) == 0) { - version = stream_responses[i].version; - - break; - } - } - - if(version >= STREAM_HANDSHAKE_OK_V1) { - host->destination->reason = version; - host->destination->postpone_reconnection_until = now_realtime_sec() + s->reconnect_delay; - s->capabilities = convert_stream_version_to_capabilities(version, host, true); - return true; - } - - ND_LOG_FIELD_PRIORITY priority = stream_responses[i].priority; - const char *error = stream_responses[i].error; - const char *status = stream_responses[i].status; - int worker_job_id = stream_responses[i].worker_job_id; - int delay = stream_responses[i].postpone_reconnect_seconds; - - worker_is_busy(worker_job_id); - rrdpush_sender_thread_close_socket(host); - host->destination->reason = version; - host->destination->postpone_reconnection_until = now_realtime_sec() + delay; - - ND_LOG_STACK lgs[] = { - ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, status), - ND_LOG_FIELD_END(), - }; - ND_LOG_STACK_PUSH(lgs); - - char buf[RFC3339_MAX_LENGTH]; - rfc3339_datetime_ut(buf, sizeof(buf), host->destination->postpone_reconnection_until * USEC_PER_SEC, 0, false); - - nd_log(NDLS_DAEMON, priority, - "STREAM %s [send to %s]: %s - will retry in %d secs, at %s", - rrdhost_hostname(host), s->connected_to, error, delay, buf); - - return false; -} - -unsigned char alpn_proto_list[] = { - 18, 'n', 'e', 't', 'd', 'a', 't', 'a', '_', 's', 't', 'r', 'e', 'a', 'm', '/', '2', '.', '0', - 8, 'h', 't', 't', 'p', '/', '1', '.', '1' -}; - -#define CONN_UPGRADE_VAL "upgrade" - -static bool rrdpush_sender_connect_ssl(struct sender_state *s __maybe_unused) { -#ifdef ENABLE_HTTPS - RRDHOST *host = s->host; - bool ssl_required = host->destination && host->destination->ssl; - - netdata_ssl_close(&host->sender->ssl); - - if(!ssl_required) - return true; - - if (netdata_ssl_open_ext(&host->sender->ssl, netdata_ssl_streaming_sender_ctx, s->rrdpush_sender_socket, alpn_proto_list, sizeof(alpn_proto_list))) { - if(!netdata_ssl_connect(&host->sender->ssl)) { - // couldn't connect - - ND_LOG_STACK lgs[] = { - ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_SSL_ERROR), - ND_LOG_FIELD_END(), - }; - ND_LOG_STACK_PUSH(lgs); - - worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR); - rrdpush_sender_thread_close_socket(host); - host->destination->reason = STREAM_HANDSHAKE_ERROR_SSL_ERROR; - host->destination->postpone_reconnection_until = now_realtime_sec() + 5 * 60; - return false; - } - - if (netdata_ssl_validate_certificate_sender && - security_test_certificate(host->sender->ssl.conn)) { - // certificate is not valid - - ND_LOG_STACK lgs[] = { - ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_INVALID_SSL_CERTIFICATE), - ND_LOG_FIELD_END(), - }; - ND_LOG_STACK_PUSH(lgs); - - worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR); - netdata_log_error("SSL: closing the stream connection, because the server SSL certificate is not valid."); - rrdpush_sender_thread_close_socket(host); - host->destination->reason = STREAM_HANDSHAKE_ERROR_INVALID_CERTIFICATE; - host->destination->postpone_reconnection_until = now_realtime_sec() + 5 * 60; - return false; - } - - return true; - } - - ND_LOG_STACK lgs[] = { - ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_CANT_ESTABLISH_SSL_CONNECTION), - ND_LOG_FIELD_END(), - }; - ND_LOG_STACK_PUSH(lgs); - - netdata_log_error("SSL: failed to establish connection."); - return false; - -#else - // SSL is not enabled - return true; -#endif -} - -static int rrdpush_http_upgrade_prelude(RRDHOST *host, struct sender_state *s) { - - char http[HTTP_HEADER_SIZE + 1]; - snprintfz(http, HTTP_HEADER_SIZE, - "GET " NETDATA_STREAM_URL HTTP_1_1 HTTP_ENDL - "Upgrade: " NETDATA_STREAM_PROTO_NAME HTTP_ENDL - "Connection: Upgrade" - HTTP_HDR_END); - - ssize_t bytes = send_timeout( -#ifdef ENABLE_HTTPS - &host->sender->ssl, -#endif - s->rrdpush_sender_socket, - http, - strlen(http), - 0, - 1000); - - bytes = recv_timeout( -#ifdef ENABLE_HTTPS - &host->sender->ssl, -#endif - s->rrdpush_sender_socket, - http, - HTTP_HEADER_SIZE, - 0, - 1000); - - if (bytes <= 0) { - error_report("Error reading from remote"); - return 1; - } - - rbuf_t buf = rbuf_create(bytes); - rbuf_push(buf, http, bytes); - - http_parse_ctx ctx; - http_parse_ctx_create(&ctx, HTTP_PARSE_INITIAL); - ctx.flags |= HTTP_PARSE_FLAG_DONT_WAIT_FOR_CONTENT; - - int rc; -// while((rc = parse_http_response(buf, &ctx)) == HTTP_PARSE_NEED_MORE_DATA); - rc = parse_http_response(buf, &ctx); - - if (rc != HTTP_PARSE_SUCCESS) { - error_report("Failed to parse HTTP response sent. (%d)", rc); - goto err_cleanup; - } - if (ctx.http_code == HTTP_RESP_MOVED_PERM) { - const char *hdr = get_http_header_by_name(&ctx, "location"); - if (hdr) - error_report("HTTP response is %d Moved Permanently (location: \"%s\") instead of expected %d Switching Protocols.", ctx.http_code, hdr, HTTP_RESP_SWITCH_PROTO); - else - error_report("HTTP response is %d instead of expected %d Switching Protocols.", ctx.http_code, HTTP_RESP_SWITCH_PROTO); - goto err_cleanup; - } - if (ctx.http_code == HTTP_RESP_NOT_FOUND) { - error_report("HTTP response is %d instead of expected %d Switching Protocols. Parent version too old.", ctx.http_code, HTTP_RESP_SWITCH_PROTO); - // TODO set some flag here that will signify parent is older version - // and to try connection without rrdpush_http_upgrade_prelude next time - goto err_cleanup; - } - if (ctx.http_code != HTTP_RESP_SWITCH_PROTO) { - error_report("HTTP response is %d instead of expected %d Switching Protocols", ctx.http_code, HTTP_RESP_SWITCH_PROTO); - goto err_cleanup; - } - - const char *hdr = get_http_header_by_name(&ctx, "connection"); - if (!hdr) { - error_report("Missing \"connection\" header in reply"); - goto err_cleanup; - } - if (strncmp(hdr, CONN_UPGRADE_VAL, strlen(CONN_UPGRADE_VAL))) { - error_report("Expected \"connection: " CONN_UPGRADE_VAL "\""); - goto err_cleanup; - } - - hdr = get_http_header_by_name(&ctx, "upgrade"); - if (!hdr) { - error_report("Missing \"upgrade\" header in reply"); - goto err_cleanup; - } - if (strncmp(hdr, NETDATA_STREAM_PROTO_NAME, strlen(NETDATA_STREAM_PROTO_NAME))) { - error_report("Expected \"upgrade: " NETDATA_STREAM_PROTO_NAME "\""); - goto err_cleanup; - } - - netdata_log_debug(D_STREAM, "Stream sender upgrade to \"" NETDATA_STREAM_PROTO_NAME "\" successful"); - rbuf_free(buf); - http_parse_ctx_destroy(&ctx); - return 0; -err_cleanup: - rbuf_free(buf); - http_parse_ctx_destroy(&ctx); - return 1; -} - -static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_port, int timeout, struct sender_state *s) { - - struct timeval tv = { - .tv_sec = timeout, - .tv_usec = 0 - }; - - // make sure the socket is closed - rrdpush_sender_thread_close_socket(host); - - s->rrdpush_sender_socket = connect_to_one_of_destinations( - host - , default_port - , &tv - , &s->reconnects_counter - , s->connected_to - , sizeof(s->connected_to)-1 - , &host->destination - ); - - if(unlikely(s->rrdpush_sender_socket == -1)) { - // netdata_log_error("STREAM %s [send to %s]: could not connect to parent node at this time.", rrdhost_hostname(host), host->rrdpush_send_destination); - return false; - } - - // netdata_log_info("STREAM %s [send to %s]: initializing communication...", rrdhost_hostname(host), s->connected_to); - - // reset our capabilities to default - s->capabilities = stream_our_capabilities(host, true); - - /* TODO: During the implementation of #7265 switch the set of variables to HOST_* and CONTAINER_* if the - version negotiation resulted in a high enough version. - */ - stream_encoded_t se; - rrdpush_encode_variable(&se, host); - - host->sender->hops = host->system_info->hops + 1; - - char http[HTTP_HEADER_SIZE + 1]; - int eol = snprintfz(http, HTTP_HEADER_SIZE, - "STREAM " - "key=%s" - "&hostname=%s" - "®istry_hostname=%s" - "&machine_guid=%s" - "&update_every=%d" - "&os=%s" - "&timezone=%s" - "&abbrev_timezone=%s" - "&utc_offset=%d" - "&hops=%d" - "&ml_capable=%d" - "&ml_enabled=%d" - "&mc_version=%d" - "&ver=%u" - "&NETDATA_INSTANCE_CLOUD_TYPE=%s" - "&NETDATA_INSTANCE_CLOUD_INSTANCE_TYPE=%s" - "&NETDATA_INSTANCE_CLOUD_INSTANCE_REGION=%s" - "&NETDATA_SYSTEM_OS_NAME=%s" - "&NETDATA_SYSTEM_OS_ID=%s" - "&NETDATA_SYSTEM_OS_ID_LIKE=%s" - "&NETDATA_SYSTEM_OS_VERSION=%s" - "&NETDATA_SYSTEM_OS_VERSION_ID=%s" - "&NETDATA_SYSTEM_OS_DETECTION=%s" - "&NETDATA_HOST_IS_K8S_NODE=%s" - "&NETDATA_SYSTEM_KERNEL_NAME=%s" - "&NETDATA_SYSTEM_KERNEL_VERSION=%s" - "&NETDATA_SYSTEM_ARCHITECTURE=%s" - "&NETDATA_SYSTEM_VIRTUALIZATION=%s" - "&NETDATA_SYSTEM_VIRT_DETECTION=%s" - "&NETDATA_SYSTEM_CONTAINER=%s" - "&NETDATA_SYSTEM_CONTAINER_DETECTION=%s" - "&NETDATA_CONTAINER_OS_NAME=%s" - "&NETDATA_CONTAINER_OS_ID=%s" - "&NETDATA_CONTAINER_OS_ID_LIKE=%s" - "&NETDATA_CONTAINER_OS_VERSION=%s" - "&NETDATA_CONTAINER_OS_VERSION_ID=%s" - "&NETDATA_CONTAINER_OS_DETECTION=%s" - "&NETDATA_SYSTEM_CPU_LOGICAL_CPU_COUNT=%s" - "&NETDATA_SYSTEM_CPU_FREQ=%s" - "&NETDATA_SYSTEM_TOTAL_RAM=%s" - "&NETDATA_SYSTEM_TOTAL_DISK_SIZE=%s" - "&NETDATA_PROTOCOL_VERSION=%s" - HTTP_1_1 HTTP_ENDL - "User-Agent: %s/%s\r\n" - "Accept: */*\r\n\r\n" - , host->rrdpush_send_api_key - , rrdhost_hostname(host) - , rrdhost_registry_hostname(host) - , host->machine_guid - , default_rrd_update_every - , rrdhost_os(host) - , rrdhost_timezone(host) - , rrdhost_abbrev_timezone(host) - , host->utc_offset - , host->sender->hops - , host->system_info->ml_capable - , host->system_info->ml_enabled - , host->system_info->mc_version - , s->capabilities - , (host->system_info->cloud_provider_type) ? host->system_info->cloud_provider_type : "" - , (host->system_info->cloud_instance_type) ? host->system_info->cloud_instance_type : "" - , (host->system_info->cloud_instance_region) ? host->system_info->cloud_instance_region : "" - , se.os_name - , se.os_id - , (host->system_info->host_os_id_like) ? host->system_info->host_os_id_like : "" - , se.os_version - , (host->system_info->host_os_version_id) ? host->system_info->host_os_version_id : "" - , (host->system_info->host_os_detection) ? host->system_info->host_os_detection : "" - , (host->system_info->is_k8s_node) ? host->system_info->is_k8s_node : "" - , se.kernel_name - , se.kernel_version - , (host->system_info->architecture) ? host->system_info->architecture : "" - , (host->system_info->virtualization) ? host->system_info->virtualization : "" - , (host->system_info->virt_detection) ? host->system_info->virt_detection : "" - , (host->system_info->container) ? host->system_info->container : "" - , (host->system_info->container_detection) ? host->system_info->container_detection : "" - , (host->system_info->container_os_name) ? host->system_info->container_os_name : "" - , (host->system_info->container_os_id) ? host->system_info->container_os_id : "" - , (host->system_info->container_os_id_like) ? host->system_info->container_os_id_like : "" - , (host->system_info->container_os_version) ? host->system_info->container_os_version : "" - , (host->system_info->container_os_version_id) ? host->system_info->container_os_version_id : "" - , (host->system_info->container_os_detection) ? host->system_info->container_os_detection : "" - , (host->system_info->host_cores) ? host->system_info->host_cores : "" - , (host->system_info->host_cpu_freq) ? host->system_info->host_cpu_freq : "" - , (host->system_info->host_ram_total) ? host->system_info->host_ram_total : "" - , (host->system_info->host_disk_space) ? host->system_info->host_disk_space : "" - , STREAMING_PROTOCOL_VERSION - , rrdhost_program_name(host) - , rrdhost_program_version(host) - ); - http[eol] = 0x00; - rrdpush_clean_encoded(&se); - - if(!rrdpush_sender_connect_ssl(s)) - return false; - - if (s->parent_using_h2o && rrdpush_http_upgrade_prelude(host, s)) { - ND_LOG_STACK lgs[] = { - ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_CANT_UPGRADE_CONNECTION), - ND_LOG_FIELD_END(), - }; - ND_LOG_STACK_PUSH(lgs); - - worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_CANT_UPGRADE_CONNECTION); - rrdpush_sender_thread_close_socket(host); - host->destination->reason = STREAM_HANDSHAKE_ERROR_HTTP_UPGRADE; - host->destination->postpone_reconnection_until = now_realtime_sec() + 1 * 60; - return false; - } - - ssize_t len = (ssize_t)strlen(http); - ssize_t bytes = send_timeout( -#ifdef ENABLE_HTTPS - &host->sender->ssl, -#endif - s->rrdpush_sender_socket, - http, - len, - 0, - timeout); - - if(bytes <= 0) { // timeout is 0 - ND_LOG_STACK lgs[] = { - ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_TIMEOUT), - ND_LOG_FIELD_END(), - }; - ND_LOG_STACK_PUSH(lgs); - - worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_TIMEOUT); - rrdpush_sender_thread_close_socket(host); - - nd_log(NDLS_DAEMON, NDLP_ERR, - "STREAM %s [send to %s]: failed to send HTTP header to remote netdata.", - rrdhost_hostname(host), s->connected_to); - - host->destination->reason = STREAM_HANDSHAKE_ERROR_SEND_TIMEOUT; - host->destination->postpone_reconnection_until = now_realtime_sec() + 1 * 60; - return false; - } - - bytes = recv_timeout( -#ifdef ENABLE_HTTPS - &host->sender->ssl, -#endif - s->rrdpush_sender_socket, - http, - HTTP_HEADER_SIZE, - 0, - timeout); - - if(bytes <= 0) { // timeout is 0 - ND_LOG_STACK lgs[] = { - ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_TIMEOUT), - ND_LOG_FIELD_END(), - }; - ND_LOG_STACK_PUSH(lgs); - - worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_TIMEOUT); - rrdpush_sender_thread_close_socket(host); - - nd_log(NDLS_DAEMON, NDLP_ERR, - "STREAM %s [send to %s]: remote netdata does not respond.", - rrdhost_hostname(host), s->connected_to); - - host->destination->reason = STREAM_HANDSHAKE_ERROR_RECEIVE_TIMEOUT; - host->destination->postpone_reconnection_until = now_realtime_sec() + 30; - return false; - } - - if(sock_setnonblock(s->rrdpush_sender_socket) < 0) - nd_log(NDLS_DAEMON, NDLP_WARNING, - "STREAM %s [send to %s]: cannot set non-blocking mode for socket.", - rrdhost_hostname(host), s->connected_to); - sock_setcloexec(s->rrdpush_sender_socket); - - if(sock_enlarge_out(s->rrdpush_sender_socket) < 0) - nd_log(NDLS_DAEMON, NDLP_WARNING, - "STREAM %s [send to %s]: cannot enlarge the socket buffer.", - rrdhost_hostname(host), s->connected_to); - - http[bytes] = '\0'; - if(!rrdpush_sender_validate_response(host, s, http, bytes)) - return false; - - rrdpush_compression_initialize(s); - - log_sender_capabilities(s); - - ND_LOG_STACK lgs[] = { - ND_LOG_FIELD_TXT(NDF_RESPONSE_CODE, RRDPUSH_STATUS_CONNECTED), - ND_LOG_FIELD_END(), - }; - ND_LOG_STACK_PUSH(lgs); - - nd_log(NDLS_DAEMON, NDLP_DEBUG, - "STREAM %s: connected to %s...", - rrdhost_hostname(host), s->connected_to); - - return true; -} - -static bool attempt_to_connect(struct sender_state *state) { - ND_LOG_STACK lgs[] = { - ND_LOG_FIELD_UUID(NDF_MESSAGE_ID, &streaming_to_parent_msgid), - ND_LOG_FIELD_END(), - }; - ND_LOG_STACK_PUSH(lgs); - - state->send_attempts = 0; - - // reset the bytes we have sent for this session - state->sent_bytes_on_this_connection = 0; - memset(state->sent_bytes_on_this_connection_per_type, 0, sizeof(state->sent_bytes_on_this_connection_per_type)); - - if(rrdpush_sender_thread_connect_to_parent(state->host, state->default_port, state->timeout, state)) { - // reset the buffer, to properly send charts and metrics - rrdpush_sender_on_connect(state->host); - - // send from the beginning - state->begin = 0; - - // make sure the next reconnection will be immediate - state->not_connected_loops = 0; - - // let the data collection threads know we are ready - rrdhost_flag_set(state->host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED); - - rrdpush_sender_after_connect(state->host); - - return true; - } - - // we couldn't connect - - // increase the failed connections counter - state->not_connected_loops++; - - // slow re-connection on repeating errors - usec_t now_ut = now_monotonic_usec(); - usec_t end_ut = now_ut + USEC_PER_SEC * state->reconnect_delay; - while(now_ut < end_ut) { - if(nd_thread_signaled_to_cancel()) - return false; - sleep_usec(100 * USEC_PER_MS); // seconds - now_ut = now_monotonic_usec(); - } - - return false; + // clear the parent's claim id + rrdpush_sender_clear_parent_claim_id(host); + rrdpush_receiver_send_node_and_claim_id_to_child(host); + stream_path_parent_disconnected(host); } // TCP window is open, and we have data to transmit. @@ -1037,14 +106,10 @@ static ssize_t attempt_to_send(struct sender_state *s) { size_t outstanding = cbuffer_next_unsafe(s->buffer, &chunk); netdata_log_debug(D_STREAM, "STREAM: Sending data. Buffer r=%zu w=%zu s=%zu, next chunk=%zu", cb->read, cb->write, cb->size, outstanding); -#ifdef ENABLE_HTTPS if(SSL_connection(&s->ssl)) ret = netdata_ssl_write(&s->ssl, chunk, outstanding); else ret = send(s->rrdpush_sender_socket, chunk, outstanding, MSG_DONTWAIT); -#else - ret = send(s->rrdpush_sender_socket, chunk, outstanding, MSG_DONTWAIT); -#endif if (likely(ret > 0)) { cbuffer_remove_unsafe(s->buffer, ret); @@ -1058,7 +123,7 @@ static ssize_t attempt_to_send(struct sender_state *s) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SEND_ERROR); netdata_log_debug(D_STREAM, "STREAM: Send failed - closing socket..."); netdata_log_error("STREAM %s [send to %s]: failed to send metrics - closing connection - we have sent %zu bytes on this connection.", rrdhost_hostname(s->host), s->connected_to, s->sent_bytes_on_this_connection); - rrdpush_sender_thread_close_socket(s->host); + rrdpush_sender_thread_close_socket(s); } else netdata_log_debug(D_STREAM, "STREAM: send() returned 0 -> no error but no transmission"); @@ -1072,14 +137,10 @@ static ssize_t attempt_to_send(struct sender_state *s) { static ssize_t attempt_read(struct sender_state *s) { ssize_t ret; -#ifdef ENABLE_HTTPS if (SSL_connection(&s->ssl)) ret = netdata_ssl_read(&s->ssl, s->read_buffer + s->read_len, sizeof(s->read_buffer) - s->read_len - 1); else ret = recv(s->rrdpush_sender_socket, s->read_buffer + s->read_len, sizeof(s->read_buffer) - s->read_len - 1,MSG_DONTWAIT); -#else - ret = recv(s->rrdpush_sender_socket, s->read_buffer + s->read_len, sizeof(s->read_buffer) - s->read_len - 1,MSG_DONTWAIT); -#endif if (ret > 0) { s->read_len += ret; @@ -1089,13 +150,9 @@ static ssize_t attempt_read(struct sender_state *s) { if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) return ret; -#ifdef ENABLE_HTTPS if (SSL_connection(&s->ssl)) worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR); - else -#endif - - if (ret == 0 || errno == ECONNRESET) { + else if (ret == 0 || errno == ECONNRESET) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_PARENT_CLOSED); netdata_log_error("STREAM %s [send to %s]: connection closed by far end.", rrdhost_hostname(s->host), s->connected_to); } @@ -1104,258 +161,11 @@ static ssize_t attempt_read(struct sender_state *s) { netdata_log_error("STREAM %s [send to %s]: error during receive (%zd) - closing connection.", rrdhost_hostname(s->host), s->connected_to, ret); } - rrdpush_sender_thread_close_socket(s->host); + rrdpush_sender_thread_close_socket(s); return ret; } -struct inflight_stream_function { - struct sender_state *sender; - STRING *transaction; - usec_t received_ut; -}; - -static void stream_execute_function_callback(BUFFER *func_wb, int code, void *data) { - struct inflight_stream_function *tmp = data; - struct sender_state *s = tmp->sender; - - if(rrdhost_can_send_definitions_to_parent(s->host)) { - BUFFER *wb = sender_start(s); - - pluginsd_function_result_begin_to_buffer(wb - , string2str(tmp->transaction) - , code - , content_type_id2string(func_wb->content_type) - , func_wb->expires); - - buffer_fast_strcat(wb, buffer_tostring(func_wb), buffer_strlen(func_wb)); - pluginsd_function_result_end_to_buffer(wb); - - sender_commit(s, wb, STREAM_TRAFFIC_TYPE_FUNCTIONS); - sender_thread_buffer_free(); - - internal_error(true, "STREAM %s [send to %s] FUNCTION transaction %s sending back response (%zu bytes, %"PRIu64" usec).", - rrdhost_hostname(s->host), s->connected_to, - string2str(tmp->transaction), - buffer_strlen(func_wb), - now_realtime_usec() - tmp->received_ut); - } - - string_freez(tmp->transaction); - buffer_free(func_wb); - freez(tmp); -} - -static void stream_execute_function_progress_callback(void *data, size_t done, size_t all) { - struct inflight_stream_function *tmp = data; - struct sender_state *s = tmp->sender; - - if(rrdhost_can_send_definitions_to_parent(s->host)) { - BUFFER *wb = sender_start(s); - - buffer_sprintf(wb, PLUGINSD_KEYWORD_FUNCTION_PROGRESS " '%s' %zu %zu\n", - string2str(tmp->transaction), done, all); - - sender_commit(s, wb, STREAM_TRAFFIC_TYPE_FUNCTIONS); - } -} - -static void execute_commands_function(struct sender_state *s, const char *command, const char *transaction, const char *timeout_s, const char *function, BUFFER *payload, const char *access, const char *source) { - worker_is_busy(WORKER_SENDER_JOB_FUNCTION_REQUEST); - nd_log(NDLS_ACCESS, NDLP_INFO, NULL); - - if(!transaction || !*transaction || !timeout_s || !*timeout_s || !function || !*function) { - netdata_log_error("STREAM %s [send to %s] %s execution command is incomplete (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.", - rrdhost_hostname(s->host), s->connected_to, - command, - transaction?transaction:"(unset)", - timeout_s?timeout_s:"(unset)", - function?function:"(unset)"); - } - else { - int timeout = str2i(timeout_s); - if(timeout <= 0) timeout = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT; - - struct inflight_stream_function *tmp = callocz(1, sizeof(struct inflight_stream_function)); - tmp->received_ut = now_realtime_usec(); - tmp->sender = s; - tmp->transaction = string_strdupz(transaction); - BUFFER *wb = buffer_create(1024, &netdata_buffers_statistics.buffers_functions); - - int code = rrd_function_run(s->host, wb, timeout, - http_access_from_hex_mapping_old_roles(access), function, false, transaction, - stream_execute_function_callback, tmp, - stream_has_capability(s, STREAM_CAP_PROGRESS) ? stream_execute_function_progress_callback : NULL, - stream_has_capability(s, STREAM_CAP_PROGRESS) ? tmp : NULL, - NULL, NULL, payload, source); - - if(code != HTTP_RESP_OK) { - if (!buffer_strlen(wb)) - rrd_call_function_error(wb, "Failed to route request to collector", code); - } - } -} - -static void cleanup_intercepting_input(struct sender_state *s) { - freez((void *)s->functions.transaction); - freez((void *)s->functions.timeout_s); - freez((void *)s->functions.function); - freez((void *)s->functions.access); - freez((void *)s->functions.source); - buffer_free(s->functions.payload); - - s->functions.transaction = NULL; - s->functions.timeout_s = NULL; - s->functions.function = NULL; - s->functions.payload = NULL; - s->functions.access = NULL; - s->functions.source = NULL; - s->functions.intercept_input = false; -} - -static void execute_commands_cleanup(struct sender_state *s) { - cleanup_intercepting_input(s); -} - -// This is just a placeholder until the gap filling state machine is inserted -void execute_commands(struct sender_state *s) { - worker_is_busy(WORKER_SENDER_JOB_EXECUTE); - - ND_LOG_STACK lgs[] = { - ND_LOG_FIELD_CB(NDF_REQUEST, line_splitter_reconstruct_line, &s->line), - ND_LOG_FIELD_END(), - }; - ND_LOG_STACK_PUSH(lgs); - - char *start = s->read_buffer, *end = &s->read_buffer[s->read_len], *newline; - *end = '\0'; - for( ; start < end ; start = newline + 1) { - newline = strchr(start, '\n'); - - if(!newline) { - if(s->functions.intercept_input) { - buffer_strcat(s->functions.payload, start); - start = end; - } - break; - } - - *newline = '\0'; - s->line.count++; - - if(s->functions.intercept_input) { - if(strcmp(start, PLUGINSD_CALL_FUNCTION_PAYLOAD_END) == 0) { - execute_commands_function(s, - PLUGINSD_CALL_FUNCTION_PAYLOAD_END, - s->functions.transaction, s->functions.timeout_s, - s->functions.function, s->functions.payload, - s->functions.access, s->functions.source); - - cleanup_intercepting_input(s); - } - else { - buffer_strcat(s->functions.payload, start); - buffer_fast_charcat(s->functions.payload, '\n'); - } - - continue; - } - - s->line.num_words = quoted_strings_splitter_pluginsd(start, s->line.words, PLUGINSD_MAX_WORDS); - const char *command = get_word(s->line.words, s->line.num_words, 0); - - if(command && strcmp(command, PLUGINSD_CALL_FUNCTION) == 0) { - char *transaction = get_word(s->line.words, s->line.num_words, 1); - char *timeout_s = get_word(s->line.words, s->line.num_words, 2); - char *function = get_word(s->line.words, s->line.num_words, 3); - char *access = get_word(s->line.words, s->line.num_words, 4); - char *source = get_word(s->line.words, s->line.num_words, 5); - - execute_commands_function(s, command, transaction, timeout_s, function, NULL, access, source); - } - else if(command && strcmp(command, PLUGINSD_CALL_FUNCTION_PAYLOAD_BEGIN) == 0) { - char *transaction = get_word(s->line.words, s->line.num_words, 1); - char *timeout_s = get_word(s->line.words, s->line.num_words, 2); - char *function = get_word(s->line.words, s->line.num_words, 3); - char *access = get_word(s->line.words, s->line.num_words, 4); - char *source = get_word(s->line.words, s->line.num_words, 5); - char *content_type = get_word(s->line.words, s->line.num_words, 6); - - s->functions.transaction = strdupz(transaction ? transaction : ""); - s->functions.timeout_s = strdupz(timeout_s ? timeout_s : ""); - s->functions.function = strdupz(function ? function : ""); - s->functions.access = strdupz(access ? access : ""); - s->functions.source = strdupz(source ? source : ""); - s->functions.payload = buffer_create(0, NULL); - s->functions.payload->content_type = content_type_string2id(content_type); - s->functions.intercept_input = true; - } - else if(command && strcmp(command, PLUGINSD_CALL_FUNCTION_CANCEL) == 0) { - worker_is_busy(WORKER_SENDER_JOB_FUNCTION_REQUEST); - nd_log(NDLS_ACCESS, NDLP_DEBUG, NULL); - - char *transaction = get_word(s->line.words, s->line.num_words, 1); - if(transaction && *transaction) - rrd_function_cancel(transaction); - } - else if(command && strcmp(command, PLUGINSD_CALL_FUNCTION_PROGRESS) == 0) { - worker_is_busy(WORKER_SENDER_JOB_FUNCTION_REQUEST); - nd_log(NDLS_ACCESS, NDLP_DEBUG, NULL); - - char *transaction = get_word(s->line.words, s->line.num_words, 1); - if(transaction && *transaction) - rrd_function_progress(transaction); - } - else if (command && strcmp(command, PLUGINSD_KEYWORD_REPLAY_CHART) == 0) { - worker_is_busy(WORKER_SENDER_JOB_REPLAY_REQUEST); - nd_log(NDLS_ACCESS, NDLP_DEBUG, NULL); - - const char *chart_id = get_word(s->line.words, s->line.num_words, 1); - const char *start_streaming = get_word(s->line.words, s->line.num_words, 2); - const char *after = get_word(s->line.words, s->line.num_words, 3); - const char *before = get_word(s->line.words, s->line.num_words, 4); - - if (!chart_id || !start_streaming || !after || !before) { - netdata_log_error("STREAM %s [send to %s] %s command is incomplete" - " (chart=%s, start_streaming=%s, after=%s, before=%s)", - rrdhost_hostname(s->host), s->connected_to, - command, - chart_id ? chart_id : "(unset)", - start_streaming ? start_streaming : "(unset)", - after ? after : "(unset)", - before ? before : "(unset)"); - } - else { - replication_add_request(s, chart_id, - strtoll(after, NULL, 0), - strtoll(before, NULL, 0), - !strcmp(start_streaming, "true") - ); - } - } - else { - netdata_log_error("STREAM %s [send to %s] received unknown command over connection: %s", rrdhost_hostname(s->host), s->connected_to, s->line.words[0]?s->line.words[0]:"(unset)"); - } - - line_splitter_reset(&s->line); - worker_is_busy(WORKER_SENDER_JOB_EXECUTE); - } - - if (start < end) { - memmove(s->read_buffer, start, end-start); - s->read_len = end - start; - } - else { - s->read_buffer[0] = '\0'; - s->read_len = 0; - } -} - -struct rrdpush_sender_thread_data { - RRDHOST *host; - char *pipe_buffer; -}; - static bool rrdpush_sender_pipe_close(RRDHOST *host, int *pipe_fds, bool reopen) { static netdata_mutex_t mutex = NETDATA_MUTEX_INITIALIZER; @@ -1449,7 +259,7 @@ static void rrdhost_clear_sender___while_having_sender_mutex(RRDHOST *host) { rrdpush_reset_destinations_postpone_time(host); } -static bool rrdhost_sender_should_exit(struct sender_state *s) { +bool rrdhost_sender_should_exit(struct sender_state *s) { if(unlikely(nd_thread_signaled_to_cancel())) { if(!s->exit.reason) s->exit.reason = STREAM_HANDSHAKE_DISCONNECT_SHUTDOWN; @@ -1483,64 +293,6 @@ static bool rrdhost_sender_should_exit(struct sender_state *s) { return false; } -static void rrdpush_sender_thread_cleanup_callback(void *pptr) { - struct rrdpush_sender_thread_data *s = CLEANUP_FUNCTION_GET_PTR(pptr); - if(!s) return; - - worker_unregister(); - - RRDHOST *host = s->host; - - sender_lock(host->sender); - netdata_log_info("STREAM %s [send]: sending thread exits %s", - rrdhost_hostname(host), - host->sender->exit.reason != STREAM_HANDSHAKE_NEVER ? stream_handshake_error_to_string(host->sender->exit.reason) : ""); - - rrdpush_sender_thread_close_socket(host); - rrdpush_sender_pipe_close(host, host->sender->rrdpush_sender_pipe, false); - execute_commands_cleanup(host->sender); - - rrdhost_clear_sender___while_having_sender_mutex(host); - -#ifdef NETDATA_LOG_STREAM_SENDER - if(host->sender->stream_log_fp) { - fclose(host->sender->stream_log_fp); - host->sender->stream_log_fp = NULL; - } -#endif - - sender_unlock(host->sender); - - freez(s->pipe_buffer); - freez(s); -} - -void rrdpush_initialize_ssl_ctx(RRDHOST *host __maybe_unused) { -#ifdef ENABLE_HTTPS - static SPINLOCK sp = NETDATA_SPINLOCK_INITIALIZER; - spinlock_lock(&sp); - - if(netdata_ssl_streaming_sender_ctx || !host) { - spinlock_unlock(&sp); - return; - } - - for(struct rrdpush_destinations *d = host->destinations; d ; d = d->next) { - if (d->ssl) { - // we need to initialize SSL - - netdata_ssl_initialize_ctx(NETDATA_SSL_STREAMING_SENDER_CTX); - ssl_security_location_for_context(netdata_ssl_streaming_sender_ctx, netdata_ssl_ca_file, netdata_ssl_ca_path); - - // stop the loop - break; - } - } - - spinlock_unlock(&sp); -#endif -} - static bool stream_sender_log_capabilities(BUFFER *wb, void *ptr) { struct sender_state *state = ptr; if(!state) @@ -1555,11 +307,7 @@ static bool stream_sender_log_transport(BUFFER *wb, void *ptr) { if(!state) return false; -#ifdef ENABLE_HTTPS buffer_strcat(wb, SSL_connection(&state->ssl) ? "https" : "http"); -#else - buffer_strcat(wb, "http"); -#endif return true; } @@ -1627,9 +375,9 @@ void *rrdpush_sender_thread(void *ptr) { worker_register_job_custom_metric(WORKER_SENDER_JOB_BYTES_COMPRESSION_RATIO, "cumulative compression savings ratio", "%", WORKER_METRIC_ABSOLUTE); worker_register_job_custom_metric(WORKER_SENDER_JOB_REPLAY_DICT_SIZE, "replication dict entries", "entries", WORKER_METRIC_ABSOLUTE); - if(!rrdhost_has_rrdpush_sender_enabled(s->host) || !s->host->rrdpush_send_destination || - !*s->host->rrdpush_send_destination || !s->host->rrdpush_send_api_key || - !*s->host->rrdpush_send_api_key) { + if(!rrdhost_has_rrdpush_sender_enabled(s->host) || !s->host->rrdpush.send.destination || + !*s->host->rrdpush.send.destination || !s->host->rrdpush.send.api_key || + !*s->host->rrdpush.send.api_key) { netdata_log_error("STREAM %s [send]: thread created (task id %d), but host has streaming disabled.", rrdhost_hostname(s->host), gettid_cached()); return NULL; @@ -1641,12 +389,12 @@ void *rrdpush_sender_thread(void *ptr) { return NULL; } - rrdpush_initialize_ssl_ctx(s->host); + rrdpush_sender_ssl_init(s->host); netdata_log_info("STREAM %s [send]: thread created (task id %d)", rrdhost_hostname(s->host), gettid_cached()); - s->timeout = (int)appconfig_get_number( - &stream_config, CONFIG_SECTION_STREAM, "timeout seconds", 600); + s->timeout = (int)appconfig_get_duration_seconds( + &stream_config, CONFIG_SECTION_STREAM, "timeout", 600); s->default_port = (int)appconfig_get_number( &stream_config, CONFIG_SECTION_STREAM, "default port", 19999); @@ -1654,20 +402,19 @@ void *rrdpush_sender_thread(void *ptr) { s->buffer->max_size = (size_t)appconfig_get_number( &stream_config, CONFIG_SECTION_STREAM, "buffer size bytes", 1024 * 1024 * 10); - s->reconnect_delay = (unsigned int)appconfig_get_number( - &stream_config, CONFIG_SECTION_STREAM, "reconnect delay seconds", 5); + s->reconnect_delay = (unsigned int)appconfig_get_duration_seconds( + &stream_config, CONFIG_SECTION_STREAM, "reconnect delay", 5); - remote_clock_resync_iterations = (unsigned int)appconfig_get_number( + stream_conf_initial_clock_resync_iterations = (unsigned int)appconfig_get_number( &stream_config, CONFIG_SECTION_STREAM, "initial clock resync iterations", - remote_clock_resync_iterations); // TODO: REMOVE FOR SLEW / GAPFILLING + stream_conf_initial_clock_resync_iterations); // TODO: REMOVE FOR SLEW / GAPFILLING s->parent_using_h2o = appconfig_get_boolean( &stream_config, CONFIG_SECTION_STREAM, "parent using h2o", false); // initialize rrdpush globals - rrdhost_flag_clear(s->host, RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS); - rrdhost_flag_clear(s->host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED); + rrdhost_flag_clear(s->host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED | RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS); int pipe_buffer_size = 10 * 1024; #ifdef F_GETPIPE_SZ @@ -1682,12 +429,9 @@ void *rrdpush_sender_thread(void *ptr) { return NULL; } - struct rrdpush_sender_thread_data *thread_data = callocz(1, sizeof(struct rrdpush_sender_thread_data)); - thread_data->pipe_buffer = mallocz(pipe_buffer_size); - thread_data->host = s->host; - - CLEANUP_FUNCTION_REGISTER(rrdpush_sender_thread_cleanup_callback) cleanup_ptr = thread_data; + char *pipe_buffer = mallocz(pipe_buffer_size); + bool was_connected = false; size_t iterations = 0; time_t now_s = now_monotonic_sec(); while(!rrdhost_sender_should_exit(s)) { @@ -1695,36 +439,11 @@ void *rrdpush_sender_thread(void *ptr) { // The connection attempt blocks (after which we use the socket in nonblocking) if(unlikely(s->rrdpush_sender_socket == -1)) { - worker_is_busy(WORKER_SENDER_JOB_CONNECT); - - now_s = now_monotonic_sec(); - rrdpush_sender_cbuffer_recreate_timed(s, now_s, false, true); - execute_commands_cleanup(s); - - rrdhost_flag_clear(s->host, RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS); - s->flags &= ~SENDER_FLAG_OVERFLOW; - s->read_len = 0; - s->buffer->read = 0; - s->buffer->write = 0; - - if(!attempt_to_connect(s)) - continue; - - if(rrdhost_sender_should_exit(s)) - break; - - now_s = s->last_traffic_seen_t = now_monotonic_sec(); - rrdpush_send_claimed_id(s->host); - rrdpush_send_host_labels(s->host); - rrdpush_send_global_functions(s->host); - s->replication.oldest_request_after_t = 0; - - rrdhost_flag_set(s->host, RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS); - - nd_log(NDLS_DAEMON, NDLP_DEBUG, - "STREAM %s [send to %s]: enabling metrics streaming...", - rrdhost_hostname(s->host), s->connected_to); + if(was_connected) + rrdpush_sender_on_disconnect(s->host); + was_connected = rrdpush_sender_connect(s); + now_s = s->last_traffic_seen_t; continue; } @@ -1738,7 +457,7 @@ void *rrdpush_sender_thread(void *ptr) { )) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_TIMEOUT); netdata_log_error("STREAM %s [send to %s]: could not send metrics for %d seconds - closing connection - we have sent %zu bytes on this connection via %zu send attempts.", rrdhost_hostname(s->host), s->connected_to, s->timeout, s->sent_bytes_on_this_connection, s->send_attempts); - rrdpush_sender_thread_close_socket(s->host); + rrdpush_sender_thread_close_socket(s); continue; } @@ -1767,9 +486,9 @@ void *rrdpush_sender_thread(void *ptr) { if(unlikely(s->rrdpush_sender_pipe[PIPE_READ] == -1)) { if(!rrdpush_sender_pipe_close(s->host, s->rrdpush_sender_pipe, true)) { - netdata_log_error("STREAM %s [send]: cannot create inter-thread communication pipe. Disabling streaming.", - rrdhost_hostname(s->host)); - rrdpush_sender_thread_close_socket(s->host); + netdata_log_error("STREAM %s [send]: cannot create inter-thread communication pipe. " + "Disabling streaming.", rrdhost_hostname(s->host)); + rrdpush_sender_thread_close_socket(s); break; } } @@ -1820,7 +539,7 @@ void *rrdpush_sender_thread(void *ptr) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_POLL_ERROR); netdata_log_error("STREAM %s [send to %s]: failed to poll(). Closing socket.", rrdhost_hostname(s->host), s->connected_to); rrdpush_sender_pipe_close(s->host, s->rrdpush_sender_pipe, true); - rrdpush_sender_thread_close_socket(s->host); + rrdpush_sender_thread_close_socket(s); continue; } @@ -1839,7 +558,7 @@ void *rrdpush_sender_thread(void *ptr) { worker_is_busy(WORKER_SENDER_JOB_PIPE_READ); netdata_log_debug(D_STREAM, "STREAM: Data added to send buffer (current buffer chunk %zu bytes)...", outstanding); - if (read(fds[Collector].fd, thread_data->pipe_buffer, pipe_buffer_size) == -1) + if (read(fds[Collector].fd, pipe_buffer, pipe_buffer_size) == -1) netdata_log_error("STREAM %s [send to %s]: cannot read from internal pipe.", rrdhost_hostname(s->host), s->connected_to); } @@ -1854,7 +573,7 @@ void *rrdpush_sender_thread(void *ptr) { } if(unlikely(s->read_len)) - execute_commands(s); + rrdpush_sender_execute_commands(s); if(unlikely(fds[Collector].revents & (POLLERR|POLLHUP|POLLNVAL))) { char *error = NULL; @@ -1869,7 +588,7 @@ void *rrdpush_sender_thread(void *ptr) { if(error) { rrdpush_sender_pipe_close(s->host, s->rrdpush_sender_pipe, true); netdata_log_error("STREAM %s [send to %s]: restarting internal pipe: %s.", - rrdhost_hostname(s->host), s->connected_to, error); + rrdhost_hostname(s->host), s->connected_to, error); } } @@ -1886,8 +605,8 @@ void *rrdpush_sender_thread(void *ptr) { if(unlikely(error)) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SOCKET_ERROR); netdata_log_error("STREAM %s [send to %s]: restarting connection: %s - %zu bytes transmitted.", - rrdhost_hostname(s->host), s->connected_to, error, s->sent_bytes_on_this_connection); - rrdpush_sender_thread_close_socket(s->host); + rrdhost_hostname(s->host), s->connected_to, error, s->sent_bytes_on_this_connection); + rrdpush_sender_thread_close_socket(s); } } @@ -1896,12 +615,57 @@ void *rrdpush_sender_thread(void *ptr) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_OVERFLOW); errno_clear(); netdata_log_error("STREAM %s [send to %s]: buffer full (allocated %zu bytes) after sending %zu bytes. Restarting connection", - rrdhost_hostname(s->host), s->connected_to, s->buffer->size, s->sent_bytes_on_this_connection); - rrdpush_sender_thread_close_socket(s->host); + rrdhost_hostname(s->host), s->connected_to, s->buffer->size, s->sent_bytes_on_this_connection); + rrdpush_sender_thread_close_socket(s); } worker_set_metric(WORKER_SENDER_JOB_REPLAY_DICT_SIZE, (NETDATA_DOUBLE) dictionary_entries(s->replication.requests)); } + if(was_connected) + rrdpush_sender_on_disconnect(s->host); + + netdata_log_info("STREAM %s [send]: sending thread exits %s", + rrdhost_hostname(s->host), + s->exit.reason != STREAM_HANDSHAKE_NEVER ? stream_handshake_error_to_string(s->exit.reason) : ""); + + sender_lock(s); + { + rrdpush_sender_thread_close_socket(s); + rrdpush_sender_pipe_close(s->host, s->rrdpush_sender_pipe, false); + rrdpush_sender_execute_commands_cleanup(s); + + rrdhost_clear_sender___while_having_sender_mutex(s->host); + +#ifdef NETDATA_LOG_STREAM_SENDER + if (s->stream_log_fp) { + fclose(s->stream_log_fp); + s->stream_log_fp = NULL; + } +#endif + } + sender_unlock(s); + + freez(pipe_buffer); + worker_unregister(); + return NULL; } + +void rrdpush_sender_thread_spawn(RRDHOST *host) { + sender_lock(host->sender); + + if(!rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_SPAWN)) { + char tag[NETDATA_THREAD_TAG_MAX + 1]; + snprintfz(tag, NETDATA_THREAD_TAG_MAX, THREAD_TAG_STREAM_SENDER "[%s]", rrdhost_hostname(host)); + + host->rrdpush_sender_thread = nd_thread_create(tag, NETDATA_THREAD_OPTION_DEFAULT, + rrdpush_sender_thread, (void *)host->sender); + if(!host->rrdpush_sender_thread) + nd_log_daemon(NDLP_ERR, "STREAM %s [send]: failed to create new thread for client.", rrdhost_hostname(host)); + else + rrdhost_flag_set(host, RRDHOST_FLAG_RRDPUSH_SENDER_SPAWN); + } + + sender_unlock(host->sender); +} diff --git a/src/streaming/sender.h b/src/streaming/sender.h new file mode 100644 index 000000000..94d104f5f --- /dev/null +++ b/src/streaming/sender.h @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SENDER_H +#define NETDATA_SENDER_H + +#include "libnetdata/libnetdata.h" + +#define CONNECTED_TO_SIZE 100 + +#define CBUFFER_INITIAL_SIZE (16 * 1024) +#define THREAD_BUFFER_INITIAL_SIZE (CBUFFER_INITIAL_SIZE / 2) + +typedef enum __attribute__((packed)) { + STREAM_TRAFFIC_TYPE_REPLICATION = 0, + STREAM_TRAFFIC_TYPE_FUNCTIONS, + STREAM_TRAFFIC_TYPE_METADATA, + STREAM_TRAFFIC_TYPE_DATA, + STREAM_TRAFFIC_TYPE_DYNCFG, + + // terminator + STREAM_TRAFFIC_TYPE_MAX, +} STREAM_TRAFFIC_TYPE; + +typedef enum __attribute__((packed)) { + SENDER_FLAG_OVERFLOW = (1 << 0), // The buffer has been overflown +} SENDER_FLAGS; + +typedef struct { + char *os_name; + char *os_id; + char *os_version; + char *kernel_name; + char *kernel_version; +} stream_encoded_t; + +#include "stream-handshake.h" +#include "stream-capabilities.h" +#include "stream-conf.h" +#include "stream-compression/compression.h" + +#include "sender-destinations.h" + +typedef void (*rrdpush_defer_action_t)(struct sender_state *s, void *data); +typedef void (*rrdpush_defer_cleanup_t)(struct sender_state *s, void *data); + +struct sender_state { + RRDHOST *host; + pid_t tid; // the thread id of the sender, from gettid_cached() + SENDER_FLAGS flags; + int timeout; + int default_port; + uint32_t reconnect_delay; + char connected_to[CONNECTED_TO_SIZE + 1]; // We don't know which proxy we connect to, passed back from socket.c + size_t begin; + size_t reconnects_counter; + size_t sent_bytes; + size_t sent_bytes_on_this_connection; + size_t send_attempts; + time_t last_traffic_seen_t; + time_t last_state_since_t; // the timestamp of the last state (online/offline) change + size_t not_connected_loops; + // Metrics are collected asynchronously by collector threads calling rrdset_done_push(). This can also trigger + // the lazy creation of the sender thread - both cases (buffer access and thread creation) are guarded here. + SPINLOCK spinlock; + struct circular_buffer *buffer; + char read_buffer[PLUGINSD_LINE_MAX + 1]; + ssize_t read_len; + STREAM_CAPABILITIES capabilities; + STREAM_CAPABILITIES disabled_capabilities; + + size_t sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_MAX]; + + int rrdpush_sender_pipe[2]; // collector to sender thread signaling + int rrdpush_sender_socket; + + uint16_t hops; + + struct line_splitter line; + struct compressor_state compressor; + +#ifdef NETDATA_LOG_STREAM_SENDER + FILE *stream_log_fp; +#endif + + NETDATA_SSL ssl; // structure used to encrypt the connection + + struct { + bool shutdown; + STREAM_HANDSHAKE reason; + } exit; + + struct { + DICTIONARY *requests; // de-duplication of replication requests, per chart + time_t oldest_request_after_t; // the timestamp of the oldest replication request + time_t latest_completed_before_t; // the timestamp of the latest replication request + + struct { + size_t pending_requests; // the currently outstanding replication requests + size_t charts_replicating; // the number of unique charts having pending replication requests (on every request one is added and is removed when we finish it - it does not track completion of the replication for this chart) + bool reached_max; // true when the sender buffer should not get more replication responses + } atomic; + + } replication; + + struct { + bool pending_data; + size_t buffer_used_percentage; // the current utilization of the sending buffer + usec_t last_flush_time_ut; // the last time the sender flushed the sending buffer in USEC + time_t last_buffer_recreate_s; // true when the sender buffer should be re-created + } atomic; + + struct { + const char *end_keyword; + BUFFER *payload; + rrdpush_defer_action_t action; + rrdpush_defer_cleanup_t cleanup; + void *action_data; + } defer; + + bool parent_using_h2o; +}; + +#define sender_lock(sender) spinlock_lock(&(sender)->spinlock) +#define sender_unlock(sender) spinlock_unlock(&(sender)->spinlock) + +#define rrdpush_sender_pipe_has_pending_data(sender) __atomic_load_n(&(sender)->atomic.pending_data, __ATOMIC_RELAXED) +#define rrdpush_sender_pipe_set_pending_data(sender) __atomic_store_n(&(sender)->atomic.pending_data, true, __ATOMIC_RELAXED) +#define rrdpush_sender_pipe_clear_pending_data(sender) __atomic_store_n(&(sender)->atomic.pending_data, false, __ATOMIC_RELAXED) + +#define rrdpush_sender_last_buffer_recreate_get(sender) __atomic_load_n(&(sender)->atomic.last_buffer_recreate_s, __ATOMIC_RELAXED) +#define rrdpush_sender_last_buffer_recreate_set(sender, value) __atomic_store_n(&(sender)->atomic.last_buffer_recreate_s, value, __ATOMIC_RELAXED) + +#define rrdpush_sender_replication_buffer_full_set(sender, value) __atomic_store_n(&((sender)->replication.atomic.reached_max), value, __ATOMIC_SEQ_CST) +#define rrdpush_sender_replication_buffer_full_get(sender) __atomic_load_n(&((sender)->replication.atomic.reached_max), __ATOMIC_SEQ_CST) + +#define rrdpush_sender_set_buffer_used_percent(sender, value) __atomic_store_n(&((sender)->atomic.buffer_used_percentage), value, __ATOMIC_RELAXED) +#define rrdpush_sender_get_buffer_used_percent(sender) __atomic_load_n(&((sender)->atomic.buffer_used_percentage), __ATOMIC_RELAXED) + +#define rrdpush_sender_set_flush_time(sender) __atomic_store_n(&((sender)->atomic.last_flush_time_ut), now_realtime_usec(), __ATOMIC_RELAXED) +#define rrdpush_sender_get_flush_time(sender) __atomic_load_n(&((sender)->atomic.last_flush_time_ut), __ATOMIC_RELAXED) + +#define rrdpush_sender_replicating_charts(sender) __atomic_load_n(&((sender)->replication.atomic.charts_replicating), __ATOMIC_RELAXED) +#define rrdpush_sender_replicating_charts_plus_one(sender) __atomic_add_fetch(&((sender)->replication.atomic.charts_replicating), 1, __ATOMIC_RELAXED) +#define rrdpush_sender_replicating_charts_minus_one(sender) __atomic_sub_fetch(&((sender)->replication.atomic.charts_replicating), 1, __ATOMIC_RELAXED) +#define rrdpush_sender_replicating_charts_zero(sender) __atomic_store_n(&((sender)->replication.atomic.charts_replicating), 0, __ATOMIC_RELAXED) + +#define rrdpush_sender_pending_replication_requests(sender) __atomic_load_n(&((sender)->replication.atomic.pending_requests), __ATOMIC_RELAXED) +#define rrdpush_sender_pending_replication_requests_plus_one(sender) __atomic_add_fetch(&((sender)->replication.atomic.pending_requests), 1, __ATOMIC_RELAXED) +#define rrdpush_sender_pending_replication_requests_minus_one(sender) __atomic_sub_fetch(&((sender)->replication.atomic.pending_requests), 1, __ATOMIC_RELAXED) +#define rrdpush_sender_pending_replication_requests_zero(sender) __atomic_store_n(&((sender)->replication.atomic.pending_requests), 0, __ATOMIC_RELAXED) + +BUFFER *sender_start(struct sender_state *s); +void sender_commit(struct sender_state *s, BUFFER *wb, STREAM_TRAFFIC_TYPE type); + +void *rrdpush_sender_thread(void *ptr); +void rrdpush_sender_thread_stop(RRDHOST *host, STREAM_HANDSHAKE reason, bool wait); + +void sender_thread_buffer_free(void); + +void rrdpush_signal_sender_to_wake_up(struct sender_state *s); + +bool rrdpush_sender_connect(struct sender_state *s); +void rrdpush_sender_cbuffer_recreate_timed(struct sender_state *s, time_t now_s, bool have_mutex, bool force); +bool rrdhost_sender_should_exit(struct sender_state *s); +void rrdpush_sender_thread_spawn(RRDHOST *host); + +#include "replication.h" + +#endif //NETDATA_SENDER_H diff --git a/src/streaming/stream-capabilities.c b/src/streaming/stream-capabilities.c new file mode 100644 index 000000000..b089e8f9d --- /dev/null +++ b/src/streaming/stream-capabilities.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "rrdpush.h" + +static STREAM_CAPABILITIES globally_disabled_capabilities = STREAM_CAP_NONE; + +static struct { + STREAM_CAPABILITIES cap; + const char *str; +} capability_names[] = { + {STREAM_CAP_V1, "V1" }, + {STREAM_CAP_V2, "V2" }, + {STREAM_CAP_VN, "VN" }, + {STREAM_CAP_VCAPS, "VCAPS" }, + {STREAM_CAP_HLABELS, "HLABELS" }, + {STREAM_CAP_CLAIM, "CLAIM" }, + {STREAM_CAP_CLABELS, "CLABELS" }, + {STREAM_CAP_LZ4, "LZ4" }, + {STREAM_CAP_FUNCTIONS, "FUNCTIONS" }, + {STREAM_CAP_REPLICATION, "REPLICATION" }, + {STREAM_CAP_BINARY, "BINARY" }, + {STREAM_CAP_INTERPOLATED, "INTERPOLATED" }, + {STREAM_CAP_IEEE754, "IEEE754" }, + {STREAM_CAP_DATA_WITH_ML, "ML" }, + {STREAM_CAP_DYNCFG, "DYNCFG" }, + {STREAM_CAP_SLOTS, "SLOTS" }, + {STREAM_CAP_ZSTD, "ZSTD" }, + {STREAM_CAP_GZIP, "GZIP" }, + {STREAM_CAP_BROTLI, "BROTLI" }, + {STREAM_CAP_PROGRESS, "PROGRESS" }, + {STREAM_CAP_NODE_ID, "NODEID" }, + {STREAM_CAP_PATHS, "PATHS" }, + {0 , NULL }, +}; + +STREAM_CAPABILITIES stream_capabilities_parse_one(const char *str) { + if (!str || !*str) + return STREAM_CAP_NONE; + + for (size_t i = 0; capability_names[i].str; i++) { + if (strcmp(capability_names[i].str, str) == 0) + return capability_names[i].cap; + } + + return STREAM_CAP_NONE; +} + +void stream_capabilities_to_string(BUFFER *wb, STREAM_CAPABILITIES caps) { + for(size_t i = 0; capability_names[i].str ; i++) { + if(caps & capability_names[i].cap) { + buffer_strcat(wb, capability_names[i].str); + buffer_strcat(wb, " "); + } + } +} + +void stream_capabilities_to_json_array(BUFFER *wb, STREAM_CAPABILITIES caps, const char *key) { + if(key) + buffer_json_member_add_array(wb, key); + else + buffer_json_add_array_item_array(wb); + + for(size_t i = 0; capability_names[i].str ; i++) { + if(caps & capability_names[i].cap) + buffer_json_add_array_item_string(wb, capability_names[i].str); + } + + buffer_json_array_close(wb); +} + +void log_receiver_capabilities(struct receiver_state *rpt) { + BUFFER *wb = buffer_create(100, NULL); + stream_capabilities_to_string(wb, rpt->capabilities); + + nd_log_daemon(NDLP_INFO, "STREAM %s [receive from [%s]:%s]: established link with negotiated capabilities: %s", + rrdhost_hostname(rpt->host), rpt->client_ip, rpt->client_port, buffer_tostring(wb)); + + buffer_free(wb); +} + +void log_sender_capabilities(struct sender_state *s) { + BUFFER *wb = buffer_create(100, NULL); + stream_capabilities_to_string(wb, s->capabilities); + + nd_log_daemon(NDLP_INFO, "STREAM %s [send to %s]: established link with negotiated capabilities: %s", + rrdhost_hostname(s->host), s->connected_to, buffer_tostring(wb)); + + buffer_free(wb); +} + +STREAM_CAPABILITIES stream_our_capabilities(RRDHOST *host, bool sender) { + STREAM_CAPABILITIES disabled_capabilities = globally_disabled_capabilities; + + if(host && sender) { + // we have DATA_WITH_ML capability + // we should remove the DATA_WITH_ML capability if our database does not have anomaly info + // this can happen under these conditions: 1. we don't run ML, and 2. we don't receive ML + spinlock_lock(&host->receiver_lock); + + if(!ml_host_running(host) && !stream_has_capability(host->receiver, STREAM_CAP_DATA_WITH_ML)) + disabled_capabilities |= STREAM_CAP_DATA_WITH_ML; + + spinlock_unlock(&host->receiver_lock); + + if(host->sender) + disabled_capabilities |= host->sender->disabled_capabilities; + } + + return (STREAM_CAP_V1 | + STREAM_CAP_V2 | + STREAM_CAP_VN | + STREAM_CAP_VCAPS | + STREAM_CAP_HLABELS | + STREAM_CAP_CLAIM | + STREAM_CAP_CLABELS | + STREAM_CAP_FUNCTIONS | + STREAM_CAP_REPLICATION | + STREAM_CAP_BINARY | + STREAM_CAP_INTERPOLATED | + STREAM_CAP_SLOTS | + STREAM_CAP_PROGRESS | + STREAM_CAP_COMPRESSIONS_AVAILABLE | + STREAM_CAP_DYNCFG | + STREAM_CAP_NODE_ID | + STREAM_CAP_PATHS | + STREAM_CAP_IEEE754 | + STREAM_CAP_DATA_WITH_ML | + 0) & ~disabled_capabilities; +} + +STREAM_CAPABILITIES convert_stream_version_to_capabilities(int32_t version, RRDHOST *host, bool sender) { + STREAM_CAPABILITIES caps = 0; + + if(version <= 1) caps = STREAM_CAP_V1; + else if(version < STREAM_OLD_VERSION_CLAIM) caps = STREAM_CAP_V2 | STREAM_CAP_HLABELS; + else if(version <= STREAM_OLD_VERSION_CLAIM) caps = STREAM_CAP_VN | STREAM_CAP_HLABELS | STREAM_CAP_CLAIM; + else if(version <= STREAM_OLD_VERSION_CLABELS) caps = STREAM_CAP_VN | STREAM_CAP_HLABELS | STREAM_CAP_CLAIM | STREAM_CAP_CLABELS; + else if(version <= STREAM_OLD_VERSION_LZ4) caps = STREAM_CAP_VN | STREAM_CAP_HLABELS | STREAM_CAP_CLAIM | STREAM_CAP_CLABELS | STREAM_CAP_LZ4_AVAILABLE; + else caps = version; + + if(caps & STREAM_CAP_VCAPS) + caps &= ~(STREAM_CAP_V1|STREAM_CAP_V2|STREAM_CAP_VN); + + if(caps & STREAM_CAP_VN) + caps &= ~(STREAM_CAP_V1|STREAM_CAP_V2); + + if(caps & STREAM_CAP_V2) + caps &= ~(STREAM_CAP_V1); + + STREAM_CAPABILITIES common_caps = caps & stream_our_capabilities(host, sender); + + if(!(common_caps & STREAM_CAP_INTERPOLATED)) + // DATA WITH ML requires INTERPOLATED + common_caps &= ~STREAM_CAP_DATA_WITH_ML; + + return common_caps; +} + +int32_t stream_capabilities_to_vn(uint32_t caps) { + if(caps & STREAM_CAP_LZ4) return STREAM_OLD_VERSION_LZ4; + if(caps & STREAM_CAP_CLABELS) return STREAM_OLD_VERSION_CLABELS; + return STREAM_OLD_VERSION_CLAIM; // if(caps & STREAM_CAP_CLAIM) +} + +void check_local_streaming_capabilities(void) { + ieee754_doubles = is_system_ieee754_double(); + if(!ieee754_doubles) + globally_disabled_capabilities |= STREAM_CAP_IEEE754; +} diff --git a/src/streaming/stream-capabilities.h b/src/streaming/stream-capabilities.h new file mode 100644 index 000000000..90a0e2190 --- /dev/null +++ b/src/streaming/stream-capabilities.h @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_STREAM_CAPABILITIES_H +#define NETDATA_STREAM_CAPABILITIES_H + +#include "libnetdata/libnetdata.h" + +// ---------------------------------------------------------------------------- +// obsolete versions - do not use anymore + +#define STREAM_OLD_VERSION_CLAIM 3 +#define STREAM_OLD_VERSION_CLABELS 4 +#define STREAM_OLD_VERSION_LZ4 5 + +// ---------------------------------------------------------------------------- +// capabilities negotiation + +typedef enum { + STREAM_CAP_NONE = 0, + + // do not use the first 3 bits + // they used to be versions 1, 2 and 3 + // before we introduce capabilities + + STREAM_CAP_V1 = (1 << 3), // v1 = the oldest protocol + STREAM_CAP_V2 = (1 << 4), // v2 = the second version of the protocol (with host labels) + STREAM_CAP_VN = (1 << 5), // version negotiation supported (for versions 3, 4, 5 of the protocol) + // v3 = claiming supported + // v4 = chart labels supported + // v5 = lz4 compression supported + STREAM_CAP_VCAPS = (1 << 6), // capabilities negotiation supported + STREAM_CAP_HLABELS = (1 << 7), // host labels supported + STREAM_CAP_CLAIM = (1 << 8), // claiming supported + STREAM_CAP_CLABELS = (1 << 9), // chart labels supported + STREAM_CAP_LZ4 = (1 << 10), // lz4 compression supported + STREAM_CAP_FUNCTIONS = (1 << 11), // plugin functions supported + STREAM_CAP_REPLICATION = (1 << 12), // replication supported + STREAM_CAP_BINARY = (1 << 13), // streaming supports binary data + STREAM_CAP_INTERPOLATED = (1 << 14), // streaming supports interpolated streaming of values + STREAM_CAP_IEEE754 = (1 << 15), // streaming supports binary/hex transfer of double values + STREAM_CAP_DATA_WITH_ML = (1 << 16), // streaming supports transferring anomaly bit + // STREAM_CAP_DYNCFG = (1 << 17), // leave this unused for as long as possible + STREAM_CAP_SLOTS = (1 << 18), // the sender can appoint a unique slot for each chart + STREAM_CAP_ZSTD = (1 << 19), // ZSTD compression supported + STREAM_CAP_GZIP = (1 << 20), // GZIP compression supported + STREAM_CAP_BROTLI = (1 << 21), // BROTLI compression supported + STREAM_CAP_PROGRESS = (1 << 22), // Functions PROGRESS support + STREAM_CAP_DYNCFG = (1 << 23), // support for DYNCFG + STREAM_CAP_NODE_ID = (1 << 24), // support for sending NODE_ID back to the child + STREAM_CAP_PATHS = (1 << 25), // support for sending PATHS upstream and downstream + + STREAM_CAP_INVALID = (1 << 30), // used as an invalid value for capabilities when this is set + // this must be signed int, so don't use the last bit + // needed for negotiating errors between parent and child +} STREAM_CAPABILITIES; + +#ifdef ENABLE_LZ4 +#define STREAM_CAP_LZ4_AVAILABLE STREAM_CAP_LZ4 +#else +#define STREAM_CAP_LZ4_AVAILABLE 0 +#endif // ENABLE_LZ4 + +#ifdef ENABLE_ZSTD +#define STREAM_CAP_ZSTD_AVAILABLE STREAM_CAP_ZSTD +#else +#define STREAM_CAP_ZSTD_AVAILABLE 0 +#endif // ENABLE_ZSTD + +#ifdef ENABLE_BROTLI +#define STREAM_CAP_BROTLI_AVAILABLE STREAM_CAP_BROTLI +#else +#define STREAM_CAP_BROTLI_AVAILABLE 0 +#endif // ENABLE_BROTLI + +#define STREAM_CAP_COMPRESSIONS_AVAILABLE (STREAM_CAP_LZ4_AVAILABLE|STREAM_CAP_ZSTD_AVAILABLE|STREAM_CAP_BROTLI_AVAILABLE|STREAM_CAP_GZIP) + +#define stream_has_capability(rpt, capability) ((rpt) && ((rpt)->capabilities & (capability)) == (capability)) + +static inline bool stream_has_more_than_one_capability_of(STREAM_CAPABILITIES caps, STREAM_CAPABILITIES mask) { + STREAM_CAPABILITIES common = (STREAM_CAPABILITIES)(caps & mask); + return (common & (common - 1)) != 0 && common != 0; +} + +struct sender_state; +struct receiver_state; +struct rrdhost; + +STREAM_CAPABILITIES stream_capabilities_parse_one(const char *str); + +void stream_capabilities_to_string(BUFFER *wb, STREAM_CAPABILITIES caps); +void stream_capabilities_to_json_array(BUFFER *wb, STREAM_CAPABILITIES caps, const char *key); +void log_receiver_capabilities(struct receiver_state *rpt); +void log_sender_capabilities(struct sender_state *s); +STREAM_CAPABILITIES convert_stream_version_to_capabilities(int32_t version, struct rrdhost *host, bool sender); +int32_t stream_capabilities_to_vn(uint32_t caps); +STREAM_CAPABILITIES stream_our_capabilities(struct rrdhost *host, bool sender); + +void check_local_streaming_capabilities(void); + +#endif //NETDATA_STREAM_CAPABILITIES_H diff --git a/src/streaming/stream-compression/brotli.c b/src/streaming/stream-compression/brotli.c new file mode 100644 index 000000000..c2c09cdc5 --- /dev/null +++ b/src/streaming/stream-compression/brotli.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "brotli.h" + +#ifdef ENABLE_BROTLI +#include +#include + +void rrdpush_compressor_init_brotli(struct compressor_state *state) { + if (!state->initialized) { + state->initialized = true; + state->stream = BrotliEncoderCreateInstance(NULL, NULL, NULL); + + if (state->level < BROTLI_MIN_QUALITY) { + state->level = BROTLI_MIN_QUALITY; + } else if (state->level > BROTLI_MAX_QUALITY) { + state->level = BROTLI_MAX_QUALITY; + } + + BrotliEncoderSetParameter(state->stream, BROTLI_PARAM_QUALITY, state->level); + } +} + +void rrdpush_compressor_destroy_brotli(struct compressor_state *state) { + if (state->stream) { + BrotliEncoderDestroyInstance(state->stream); + state->stream = NULL; + } +} + +size_t rrdpush_compress_brotli(struct compressor_state *state, const char *data, size_t size, const char **out) { + if (unlikely(!state || !size || !out)) + return 0; + + simple_ring_buffer_make_room(&state->output, MAX(BrotliEncoderMaxCompressedSize(size), COMPRESSION_MAX_CHUNK)); + + size_t available_out = state->output.size; + + size_t available_in = size; + const uint8_t *next_in = (const uint8_t *)data; + uint8_t *next_out = (uint8_t *)state->output.data; + + if (!BrotliEncoderCompressStream(state->stream, BROTLI_OPERATION_FLUSH, &available_in, &next_in, &available_out, &next_out, NULL)) { + netdata_log_error("STREAM: Brotli compression failed."); + return 0; + } + + if(available_in != 0) { + netdata_log_error("STREAM: BrotliEncoderCompressStream() did not use all the input buffer, %zu bytes out of %zu remain", + available_in, size); + return 0; + } + + size_t compressed_size = state->output.size - available_out; + if(available_out == 0) { + netdata_log_error("STREAM: BrotliEncoderCompressStream() needs a bigger output buffer than the one we provided " + "(output buffer %zu bytes, compressed payload %zu bytes)", + state->output.size, size); + return 0; + } + + if(compressed_size == 0) { + netdata_log_error("STREAM: BrotliEncoderCompressStream() did not produce any output from the input provided " + "(input buffer %zu bytes)", + size); + return 0; + } + + state->sender_locked.total_compressions++; + state->sender_locked.total_uncompressed += size - available_in; + state->sender_locked.total_compressed += compressed_size; + + *out = state->output.data; + return compressed_size; +} + +void rrdpush_decompressor_init_brotli(struct decompressor_state *state) { + if (!state->initialized) { + state->initialized = true; + state->stream = BrotliDecoderCreateInstance(NULL, NULL, NULL); + + simple_ring_buffer_make_room(&state->output, COMPRESSION_MAX_CHUNK); + } +} + +void rrdpush_decompressor_destroy_brotli(struct decompressor_state *state) { + if (state->stream) { + BrotliDecoderDestroyInstance(state->stream); + state->stream = NULL; + } +} + +size_t rrdpush_decompress_brotli(struct decompressor_state *state, const char *compressed_data, size_t compressed_size) { + if (unlikely(!state || !compressed_data || !compressed_size)) + return 0; + + // The state.output ring buffer is always EMPTY at this point, + // meaning that (state->output.read_pos == state->output.write_pos) + // However, THEY ARE NOT ZERO. + + size_t available_out = state->output.size; + size_t available_in = compressed_size; + const uint8_t *next_in = (const uint8_t *)compressed_data; + uint8_t *next_out = (uint8_t *)state->output.data; + + if (BrotliDecoderDecompressStream(state->stream, &available_in, &next_in, &available_out, &next_out, NULL) == BROTLI_DECODER_RESULT_ERROR) { + netdata_log_error("STREAM: Brotli decompression failed."); + return 0; + } + + if(available_in != 0) { + netdata_log_error("STREAM: BrotliDecoderDecompressStream() did not use all the input buffer, %zu bytes out of %zu remain", + available_in, compressed_size); + return 0; + } + + size_t decompressed_size = state->output.size - available_out; + if(available_out == 0) { + netdata_log_error("STREAM: BrotliDecoderDecompressStream() needs a bigger output buffer than the one we provided " + "(output buffer %zu bytes, compressed payload %zu bytes)", + state->output.size, compressed_size); + return 0; + } + + if(decompressed_size == 0) { + netdata_log_error("STREAM: BrotliDecoderDecompressStream() did not produce any output from the input provided " + "(input buffer %zu bytes)", + compressed_size); + return 0; + } + + state->output.read_pos = 0; + state->output.write_pos = decompressed_size; + + state->total_compressed += compressed_size - available_in; + state->total_uncompressed += decompressed_size; + state->total_compressions++; + + return decompressed_size; +} + +#endif // ENABLE_BROTLI diff --git a/src/streaming/stream-compression/brotli.h b/src/streaming/stream-compression/brotli.h new file mode 100644 index 000000000..4955e5a82 --- /dev/null +++ b/src/streaming/stream-compression/brotli.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "compression.h" + +#ifndef NETDATA_STREAMING_COMPRESSION_BROTLI_H +#define NETDATA_STREAMING_COMPRESSION_BROTLI_H + +void rrdpush_compressor_init_brotli(struct compressor_state *state); +void rrdpush_compressor_destroy_brotli(struct compressor_state *state); +size_t rrdpush_compress_brotli(struct compressor_state *state, const char *data, size_t size, const char **out); +size_t rrdpush_decompress_brotli(struct decompressor_state *state, const char *compressed_data, size_t compressed_size); +void rrdpush_decompressor_init_brotli(struct decompressor_state *state); +void rrdpush_decompressor_destroy_brotli(struct decompressor_state *state); + +#endif //NETDATA_STREAMING_COMPRESSION_BROTLI_H diff --git a/src/streaming/stream-compression/compression.c b/src/streaming/stream-compression/compression.c new file mode 100644 index 000000000..3c9930656 --- /dev/null +++ b/src/streaming/stream-compression/compression.c @@ -0,0 +1,703 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "compression.h" + +#include "gzip.h" + +#ifdef ENABLE_LZ4 +#include "lz4.h" +#endif + +#ifdef ENABLE_ZSTD +#include "zstd.h" +#endif + +#ifdef ENABLE_BROTLI +#include "brotli.h" +#endif + +int rrdpush_compression_levels[COMPRESSION_ALGORITHM_MAX] = { + [COMPRESSION_ALGORITHM_NONE] = 0, + [COMPRESSION_ALGORITHM_ZSTD] = 3, // 1 (faster) - 22 (smaller) + [COMPRESSION_ALGORITHM_LZ4] = 1, // 1 (smaller) - 9 (faster) + [COMPRESSION_ALGORITHM_BROTLI] = 3, // 0 (faster) - 11 (smaller) + [COMPRESSION_ALGORITHM_GZIP] = 1, // 1 (faster) - 9 (smaller) +}; + +void rrdpush_parse_compression_order(struct receiver_state *rpt, const char *order) { + // empty all slots + for(size_t i = 0; i < COMPRESSION_ALGORITHM_MAX ;i++) + rpt->config.compression_priorities[i] = STREAM_CAP_NONE; + + char *s = strdupz(order); + + char *words[COMPRESSION_ALGORITHM_MAX + 100] = { NULL }; + size_t num_words = quoted_strings_splitter_whitespace(s, words, COMPRESSION_ALGORITHM_MAX + 100); + size_t slot = 0; + STREAM_CAPABILITIES added = STREAM_CAP_NONE; + for(size_t i = 0; i < num_words && slot < COMPRESSION_ALGORITHM_MAX ;i++) { + if((STREAM_CAP_ZSTD_AVAILABLE) && strcasecmp(words[i], "zstd") == 0 && !(added & STREAM_CAP_ZSTD)) { + rpt->config.compression_priorities[slot++] = STREAM_CAP_ZSTD; + added |= STREAM_CAP_ZSTD; + } + else if((STREAM_CAP_LZ4_AVAILABLE) && strcasecmp(words[i], "lz4") == 0 && !(added & STREAM_CAP_LZ4)) { + rpt->config.compression_priorities[slot++] = STREAM_CAP_LZ4; + added |= STREAM_CAP_LZ4; + } + else if((STREAM_CAP_BROTLI_AVAILABLE) && strcasecmp(words[i], "brotli") == 0 && !(added & STREAM_CAP_BROTLI)) { + rpt->config.compression_priorities[slot++] = STREAM_CAP_BROTLI; + added |= STREAM_CAP_BROTLI; + } + else if(strcasecmp(words[i], "gzip") == 0 && !(added & STREAM_CAP_GZIP)) { + rpt->config.compression_priorities[slot++] = STREAM_CAP_GZIP; + added |= STREAM_CAP_GZIP; + } + } + + freez(s); + + // make sure all participate + if((STREAM_CAP_ZSTD_AVAILABLE) && slot < COMPRESSION_ALGORITHM_MAX && !(added & STREAM_CAP_ZSTD)) + rpt->config.compression_priorities[slot++] = STREAM_CAP_ZSTD; + if((STREAM_CAP_LZ4_AVAILABLE) && slot < COMPRESSION_ALGORITHM_MAX && !(added & STREAM_CAP_LZ4)) + rpt->config.compression_priorities[slot++] = STREAM_CAP_LZ4; + if((STREAM_CAP_BROTLI_AVAILABLE) && slot < COMPRESSION_ALGORITHM_MAX && !(added & STREAM_CAP_BROTLI)) + rpt->config.compression_priorities[slot++] = STREAM_CAP_BROTLI; + if(slot < COMPRESSION_ALGORITHM_MAX && !(added & STREAM_CAP_GZIP)) + rpt->config.compression_priorities[slot++] = STREAM_CAP_GZIP; +} + +void rrdpush_select_receiver_compression_algorithm(struct receiver_state *rpt) { + if (!rpt->config.rrdpush_compression) + rpt->capabilities &= ~STREAM_CAP_COMPRESSIONS_AVAILABLE; + + // select the right compression before sending our capabilities to the child + if(stream_has_more_than_one_capability_of(rpt->capabilities, STREAM_CAP_COMPRESSIONS_AVAILABLE)) { + STREAM_CAPABILITIES compressions = rpt->capabilities & STREAM_CAP_COMPRESSIONS_AVAILABLE; + for(int i = 0; i < COMPRESSION_ALGORITHM_MAX; i++) { + STREAM_CAPABILITIES c = rpt->config.compression_priorities[i]; + + if(!(c & STREAM_CAP_COMPRESSIONS_AVAILABLE)) + continue; + + if(compressions & c) { + STREAM_CAPABILITIES exclude = compressions; + exclude &= ~c; + + rpt->capabilities &= ~exclude; + break; + } + } + } +} + +bool rrdpush_compression_initialize(struct sender_state *s) { + rrdpush_compressor_destroy(&s->compressor); + + // IMPORTANT + // KEEP THE SAME ORDER IN DECOMPRESSION + + if(stream_has_capability(s, STREAM_CAP_ZSTD)) + s->compressor.algorithm = COMPRESSION_ALGORITHM_ZSTD; + else if(stream_has_capability(s, STREAM_CAP_LZ4)) + s->compressor.algorithm = COMPRESSION_ALGORITHM_LZ4; + else if(stream_has_capability(s, STREAM_CAP_BROTLI)) + s->compressor.algorithm = COMPRESSION_ALGORITHM_BROTLI; + else if(stream_has_capability(s, STREAM_CAP_GZIP)) + s->compressor.algorithm = COMPRESSION_ALGORITHM_GZIP; + else + s->compressor.algorithm = COMPRESSION_ALGORITHM_NONE; + + if(s->compressor.algorithm != COMPRESSION_ALGORITHM_NONE) { + s->compressor.level = rrdpush_compression_levels[s->compressor.algorithm]; + rrdpush_compressor_init(&s->compressor); + return true; + } + + return false; +} + +bool rrdpush_decompression_initialize(struct receiver_state *rpt) { + rrdpush_decompressor_destroy(&rpt->decompressor); + + // IMPORTANT + // KEEP THE SAME ORDER IN COMPRESSION + + if(stream_has_capability(rpt, STREAM_CAP_ZSTD)) + rpt->decompressor.algorithm = COMPRESSION_ALGORITHM_ZSTD; + else if(stream_has_capability(rpt, STREAM_CAP_LZ4)) + rpt->decompressor.algorithm = COMPRESSION_ALGORITHM_LZ4; + else if(stream_has_capability(rpt, STREAM_CAP_BROTLI)) + rpt->decompressor.algorithm = COMPRESSION_ALGORITHM_BROTLI; + else if(stream_has_capability(rpt, STREAM_CAP_GZIP)) + rpt->decompressor.algorithm = COMPRESSION_ALGORITHM_GZIP; + else + rpt->decompressor.algorithm = COMPRESSION_ALGORITHM_NONE; + + if(rpt->decompressor.algorithm != COMPRESSION_ALGORITHM_NONE) { + rrdpush_decompressor_init(&rpt->decompressor); + return true; + } + + return false; +} + +/* +* In case of stream compression buffer overflow +* Inform the user through the error log file and +* deactivate compression by downgrading the stream protocol. +*/ +void rrdpush_compression_deactivate(struct sender_state *s) { + switch(s->compressor.algorithm) { + case COMPRESSION_ALGORITHM_MAX: + case COMPRESSION_ALGORITHM_NONE: + netdata_log_error("STREAM_COMPRESSION: compression error on 'host:%s' without any compression enabled. Ignoring error.", + rrdhost_hostname(s->host)); + break; + + case COMPRESSION_ALGORITHM_GZIP: + netdata_log_error("STREAM_COMPRESSION: GZIP compression error on 'host:%s'. Disabling GZIP for this node.", + rrdhost_hostname(s->host)); + s->disabled_capabilities |= STREAM_CAP_GZIP; + break; + + case COMPRESSION_ALGORITHM_LZ4: + netdata_log_error("STREAM_COMPRESSION: LZ4 compression error on 'host:%s'. Disabling ZSTD for this node.", + rrdhost_hostname(s->host)); + s->disabled_capabilities |= STREAM_CAP_LZ4; + break; + + case COMPRESSION_ALGORITHM_ZSTD: + netdata_log_error("STREAM_COMPRESSION: ZSTD compression error on 'host:%s'. Disabling ZSTD for this node.", + rrdhost_hostname(s->host)); + s->disabled_capabilities |= STREAM_CAP_ZSTD; + break; + + case COMPRESSION_ALGORITHM_BROTLI: + netdata_log_error("STREAM_COMPRESSION: BROTLI compression error on 'host:%s'. Disabling BROTLI for this node.", + rrdhost_hostname(s->host)); + s->disabled_capabilities |= STREAM_CAP_BROTLI; + break; + } +} + +// ---------------------------------------------------------------------------- +// compressor public API + +void rrdpush_compressor_init(struct compressor_state *state) { + switch(state->algorithm) { +#ifdef ENABLE_ZSTD + case COMPRESSION_ALGORITHM_ZSTD: + rrdpush_compressor_init_zstd(state); + break; +#endif + +#ifdef ENABLE_LZ4 + case COMPRESSION_ALGORITHM_LZ4: + rrdpush_compressor_init_lz4(state); + break; +#endif + +#ifdef ENABLE_BROTLI + case COMPRESSION_ALGORITHM_BROTLI: + rrdpush_compressor_init_brotli(state); + break; +#endif + + default: + case COMPRESSION_ALGORITHM_GZIP: + rrdpush_compressor_init_gzip(state); + break; + } + + simple_ring_buffer_reset(&state->input); + simple_ring_buffer_reset(&state->output); +} + +void rrdpush_compressor_destroy(struct compressor_state *state) { + switch(state->algorithm) { +#ifdef ENABLE_ZSTD + case COMPRESSION_ALGORITHM_ZSTD: + rrdpush_compressor_destroy_zstd(state); + break; +#endif + +#ifdef ENABLE_LZ4 + case COMPRESSION_ALGORITHM_LZ4: + rrdpush_compressor_destroy_lz4(state); + break; +#endif + +#ifdef ENABLE_BROTLI + case COMPRESSION_ALGORITHM_BROTLI: + rrdpush_compressor_destroy_brotli(state); + break; +#endif + + default: + case COMPRESSION_ALGORITHM_GZIP: + rrdpush_compressor_destroy_gzip(state); + break; + } + + state->initialized = false; + + simple_ring_buffer_destroy(&state->input); + simple_ring_buffer_destroy(&state->output); +} + +size_t rrdpush_compress(struct compressor_state *state, const char *data, size_t size, const char **out) { + size_t ret = 0; + + switch(state->algorithm) { +#ifdef ENABLE_ZSTD + case COMPRESSION_ALGORITHM_ZSTD: + ret = rrdpush_compress_zstd(state, data, size, out); + break; +#endif + +#ifdef ENABLE_LZ4 + case COMPRESSION_ALGORITHM_LZ4: + ret = rrdpush_compress_lz4(state, data, size, out); + break; +#endif + +#ifdef ENABLE_BROTLI + case COMPRESSION_ALGORITHM_BROTLI: + ret = rrdpush_compress_brotli(state, data, size, out); + break; +#endif + + default: + case COMPRESSION_ALGORITHM_GZIP: + ret = rrdpush_compress_gzip(state, data, size, out); + break; + } + + if(unlikely(ret >= COMPRESSION_MAX_CHUNK)) { + netdata_log_error("RRDPUSH_COMPRESS: compressed data is %zu bytes, which is >= than the max chunk size %d", + ret, COMPRESSION_MAX_CHUNK); + return 0; + } + + return ret; +} + +// ---------------------------------------------------------------------------- +// decompressor public API + +void rrdpush_decompressor_destroy(struct decompressor_state *state) { + if(unlikely(!state->initialized)) + return; + + switch(state->algorithm) { +#ifdef ENABLE_ZSTD + case COMPRESSION_ALGORITHM_ZSTD: + rrdpush_decompressor_destroy_zstd(state); + break; +#endif + +#ifdef ENABLE_LZ4 + case COMPRESSION_ALGORITHM_LZ4: + rrdpush_decompressor_destroy_lz4(state); + break; +#endif + +#ifdef ENABLE_BROTLI + case COMPRESSION_ALGORITHM_BROTLI: + rrdpush_decompressor_destroy_brotli(state); + break; +#endif + + default: + case COMPRESSION_ALGORITHM_GZIP: + rrdpush_decompressor_destroy_gzip(state); + break; + } + + simple_ring_buffer_destroy(&state->output); + + state->initialized = false; +} + +void rrdpush_decompressor_init(struct decompressor_state *state) { + switch(state->algorithm) { +#ifdef ENABLE_ZSTD + case COMPRESSION_ALGORITHM_ZSTD: + rrdpush_decompressor_init_zstd(state); + break; +#endif + +#ifdef ENABLE_LZ4 + case COMPRESSION_ALGORITHM_LZ4: + rrdpush_decompressor_init_lz4(state); + break; +#endif + +#ifdef ENABLE_BROTLI + case COMPRESSION_ALGORITHM_BROTLI: + rrdpush_decompressor_init_brotli(state); + break; +#endif + + default: + case COMPRESSION_ALGORITHM_GZIP: + rrdpush_decompressor_init_gzip(state); + break; + } + + state->signature_size = RRDPUSH_COMPRESSION_SIGNATURE_SIZE; + simple_ring_buffer_reset(&state->output); +} + +size_t rrdpush_decompress(struct decompressor_state *state, const char *compressed_data, size_t compressed_size) { + if (unlikely(state->output.read_pos != state->output.write_pos)) + fatal("RRDPUSH_DECOMPRESS: asked to decompress new data, while there are unread data in the decompression buffer!"); + + size_t ret = 0; + + switch(state->algorithm) { +#ifdef ENABLE_ZSTD + case COMPRESSION_ALGORITHM_ZSTD: + ret = rrdpush_decompress_zstd(state, compressed_data, compressed_size); + break; +#endif + +#ifdef ENABLE_LZ4 + case COMPRESSION_ALGORITHM_LZ4: + ret = rrdpush_decompress_lz4(state, compressed_data, compressed_size); + break; +#endif + +#ifdef ENABLE_BROTLI + case COMPRESSION_ALGORITHM_BROTLI: + ret = rrdpush_decompress_brotli(state, compressed_data, compressed_size); + break; +#endif + + default: + case COMPRESSION_ALGORITHM_GZIP: + ret = rrdpush_decompress_gzip(state, compressed_data, compressed_size); + break; + } + + // for backwards compatibility we cannot check for COMPRESSION_MAX_MSG_SIZE, + // because old children may send this big payloads. + if(unlikely(ret > COMPRESSION_MAX_CHUNK)) { + netdata_log_error("RRDPUSH_DECOMPRESS: decompressed data is %zu bytes, which is bigger than the max msg size %d", + ret, COMPRESSION_MAX_CHUNK); + return 0; + } + + return ret; +} + +// ---------------------------------------------------------------------------- +// unit test + +void unittest_generate_random_name(char *dst, size_t size) { + if(size < 7) + size = 7; + + size_t len = 5 + os_random32() % (size - 6); + + for(size_t i = 0; i < len ; i++) { + if(os_random8() % 2 == 0) + dst[i] = 'A' + os_random8() % 26; + else + dst[i] = 'a' + os_random8() % 26; + } + + dst[len] = '\0'; +} + +void unittest_generate_message(BUFFER *wb, time_t now_s, size_t counter) { + bool with_slots = true; + NUMBER_ENCODING integer_encoding = NUMBER_ENCODING_BASE64; + NUMBER_ENCODING doubles_encoding = NUMBER_ENCODING_BASE64; + time_t update_every = 1; + time_t point_end_time_s = now_s; + time_t wall_clock_time_s = now_s; + size_t chart_slot = counter + 1; + size_t dimensions = 2 + os_random8() % 5; + char chart[RRD_ID_LENGTH_MAX + 1] = "name"; + unittest_generate_random_name(chart, 5 + os_random8() % 30); + + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_BEGIN_V2, sizeof(PLUGINSD_KEYWORD_BEGIN_V2) - 1); + + if(with_slots) { + buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); + buffer_print_uint64_encoded(wb, integer_encoding, chart_slot); + } + + buffer_fast_strcat(wb, " '", 2); + buffer_strcat(wb, chart); + buffer_fast_strcat(wb, "' ", 2); + buffer_print_uint64_encoded(wb, integer_encoding, update_every); + buffer_fast_strcat(wb, " ", 1); + buffer_print_uint64_encoded(wb, integer_encoding, point_end_time_s); + buffer_fast_strcat(wb, " ", 1); + if(point_end_time_s == wall_clock_time_s) + buffer_fast_strcat(wb, "#", 1); + else + buffer_print_uint64_encoded(wb, integer_encoding, wall_clock_time_s); + buffer_fast_strcat(wb, "\n", 1); + + + for(size_t d = 0; d < dimensions ;d++) { + size_t dim_slot = d + 1; + char dim_id[RRD_ID_LENGTH_MAX + 1] = "dimension"; + unittest_generate_random_name(dim_id, 10 + os_random8() % 20); + int64_t last_collected_value = (os_random8() % 2 == 0) ? (int64_t)(counter + d) : (int64_t)os_random32(); + NETDATA_DOUBLE value = (os_random8() % 2 == 0) ? (NETDATA_DOUBLE)os_random64() / ((NETDATA_DOUBLE)os_random64() + 1) : (NETDATA_DOUBLE)last_collected_value; + SN_FLAGS flags = (os_random16() % 1000 == 0) ? SN_FLAG_NONE : SN_FLAG_NOT_ANOMALOUS; + + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_SET_V2, sizeof(PLUGINSD_KEYWORD_SET_V2) - 1); + + if(with_slots) { + buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2); + buffer_print_uint64_encoded(wb, integer_encoding, dim_slot); + } + + buffer_fast_strcat(wb, " '", 2); + buffer_strcat(wb, dim_id); + buffer_fast_strcat(wb, "' ", 2); + buffer_print_int64_encoded(wb, integer_encoding, last_collected_value); + buffer_fast_strcat(wb, " ", 1); + + if((NETDATA_DOUBLE)last_collected_value == value) + buffer_fast_strcat(wb, "#", 1); + else + buffer_print_netdata_double_encoded(wb, doubles_encoding, value); + + buffer_fast_strcat(wb, " ", 1); + buffer_print_sn_flags(wb, flags, true); + buffer_fast_strcat(wb, "\n", 1); + } + + buffer_fast_strcat(wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1); +} + +int unittest_rrdpush_compression_speed(compression_algorithm_t algorithm, const char *name) { + fprintf(stderr, "\nTesting streaming compression speed with %s\n", name); + + struct compressor_state cctx = { + .initialized = false, + .algorithm = algorithm, + }; + struct decompressor_state dctx = { + .initialized = false, + .algorithm = algorithm, + }; + + rrdpush_compressor_init(&cctx); + rrdpush_decompressor_init(&dctx); + + int errors = 0; + + BUFFER *wb = buffer_create(COMPRESSION_MAX_MSG_SIZE, NULL); + time_t now_s = now_realtime_sec(); + usec_t compression_ut = 0; + usec_t decompression_ut = 0; + size_t bytes_compressed = 0; + size_t bytes_uncompressed = 0; + + usec_t compression_started_ut = now_monotonic_usec(); + usec_t decompression_started_ut = compression_started_ut; + + for(int i = 0; i < 10000 ;i++) { + compression_started_ut = now_monotonic_usec(); + decompression_ut += compression_started_ut - decompression_started_ut; + + buffer_flush(wb); + while(buffer_strlen(wb) < COMPRESSION_MAX_MSG_SIZE - 1024) + unittest_generate_message(wb, now_s, i); + + const char *txt = buffer_tostring(wb); + size_t txt_len = buffer_strlen(wb); + bytes_uncompressed += txt_len; + + const char *out; + size_t size = rrdpush_compress(&cctx, txt, txt_len, &out); + + bytes_compressed += size; + decompression_started_ut = now_monotonic_usec(); + compression_ut += decompression_started_ut - compression_started_ut; + + if(size == 0) { + fprintf(stderr, "iteration %d: compressed size %zu is zero\n", + i, size); + errors++; + goto cleanup; + } + else if(size >= COMPRESSION_MAX_CHUNK) { + fprintf(stderr, "iteration %d: compressed size %zu exceeds max allowed size\n", + i, size); + errors++; + goto cleanup; + } + else { + size_t dtxt_len = rrdpush_decompress(&dctx, out, size); + char *dtxt = (char *) &dctx.output.data[dctx.output.read_pos]; + + if(rrdpush_decompressed_bytes_in_buffer(&dctx) != dtxt_len) { + fprintf(stderr, "iteration %d: decompressed size %zu does not rrdpush_decompressed_bytes_in_buffer() %zu\n", + i, dtxt_len, rrdpush_decompressed_bytes_in_buffer(&dctx) + ); + errors++; + goto cleanup; + } + + if(!dtxt_len) { + fprintf(stderr, "iteration %d: decompressed size is zero\n", i); + errors++; + goto cleanup; + } + else if(dtxt_len != txt_len) { + fprintf(stderr, "iteration %d: decompressed size %zu does not match original size %zu\n", + i, dtxt_len, txt_len + ); + errors++; + goto cleanup; + } + else { + if(memcmp(txt, dtxt, txt_len) != 0) { + fprintf(stderr, "iteration %d: decompressed data '%s' do not match original data length %zu\n", + i, dtxt, txt_len); + errors++; + goto cleanup; + } + } + } + + // here we are supposed to copy the data and advance the position + dctx.output.read_pos += rrdpush_decompressed_bytes_in_buffer(&dctx); + } + +cleanup: + rrdpush_compressor_destroy(&cctx); + rrdpush_decompressor_destroy(&dctx); + + if(errors) + fprintf(stderr, "Compression with %s: FAILED (%d errors)\n", name, errors); + else + fprintf(stderr, "Compression with %s: OK " + "(compression %zu usec, decompression %zu usec, bytes raw %zu, compressed %zu, savings ratio %0.2f%%)\n", + name, compression_ut, decompression_ut, + bytes_uncompressed, bytes_compressed, + 100.0 - (double)bytes_compressed * 100.0 / (double)bytes_uncompressed); + + return errors; +} + +int unittest_rrdpush_compression(compression_algorithm_t algorithm, const char *name) { + fprintf(stderr, "\nTesting streaming compression with %s\n", name); + + struct compressor_state cctx = { + .initialized = false, + .algorithm = algorithm, + }; + struct decompressor_state dctx = { + .initialized = false, + .algorithm = algorithm, + }; + + char txt[COMPRESSION_MAX_MSG_SIZE]; + + rrdpush_compressor_init(&cctx); + rrdpush_decompressor_init(&dctx); + + int errors = 0; + + memset(txt, '=', COMPRESSION_MAX_MSG_SIZE); + + for(int i = 0; i < COMPRESSION_MAX_MSG_SIZE ;i++) { + txt[i] = 'A' + (i % 26); + size_t txt_len = i + 1; + + const char *out; + size_t size = rrdpush_compress(&cctx, txt, txt_len, &out); + + if(size == 0) { + fprintf(stderr, "iteration %d: compressed size %zu is zero\n", + i, size); + errors++; + goto cleanup; + } + else if(size >= COMPRESSION_MAX_CHUNK) { + fprintf(stderr, "iteration %d: compressed size %zu exceeds max allowed size\n", + i, size); + errors++; + goto cleanup; + } + else { + size_t dtxt_len = rrdpush_decompress(&dctx, out, size); + char *dtxt = (char *) &dctx.output.data[dctx.output.read_pos]; + + if(rrdpush_decompressed_bytes_in_buffer(&dctx) != dtxt_len) { + fprintf(stderr, "iteration %d: decompressed size %zu does not rrdpush_decompressed_bytes_in_buffer() %zu\n", + i, dtxt_len, rrdpush_decompressed_bytes_in_buffer(&dctx) + ); + errors++; + goto cleanup; + } + + if(!dtxt_len) { + fprintf(stderr, "iteration %d: decompressed size is zero\n", i); + errors++; + goto cleanup; + } + else if(dtxt_len != txt_len) { + fprintf(stderr, "iteration %d: decompressed size %zu does not match original size %zu\n", + i, dtxt_len, txt_len + ); + errors++; + goto cleanup; + } + else { + if(memcmp(txt, dtxt, txt_len) != 0) { + txt[txt_len] = '\0'; + dtxt[txt_len + 5] = '\0'; + + fprintf(stderr, "iteration %d: decompressed data '%s' do not match original data '%s' of length %zu\n", + i, dtxt, txt, txt_len); + errors++; + goto cleanup; + } + } + } + + // fill the compressed buffer with garbage + memset((void *)out, 'x', size); + + // here we are supposed to copy the data and advance the position + dctx.output.read_pos += rrdpush_decompressed_bytes_in_buffer(&dctx); + } + +cleanup: + rrdpush_compressor_destroy(&cctx); + rrdpush_decompressor_destroy(&dctx); + + if(errors) + fprintf(stderr, "Compression with %s: FAILED (%d errors)\n", name, errors); + else + fprintf(stderr, "Compression with %s: OK\n", name); + + return errors; +} + +int unittest_rrdpush_compressions(void) { + int ret = 0; + + ret += unittest_rrdpush_compression(COMPRESSION_ALGORITHM_ZSTD, "ZSTD"); + ret += unittest_rrdpush_compression(COMPRESSION_ALGORITHM_LZ4, "LZ4"); + ret += unittest_rrdpush_compression(COMPRESSION_ALGORITHM_BROTLI, "BROTLI"); + ret += unittest_rrdpush_compression(COMPRESSION_ALGORITHM_GZIP, "GZIP"); + + ret += unittest_rrdpush_compression_speed(COMPRESSION_ALGORITHM_ZSTD, "ZSTD"); + ret += unittest_rrdpush_compression_speed(COMPRESSION_ALGORITHM_LZ4, "LZ4"); + ret += unittest_rrdpush_compression_speed(COMPRESSION_ALGORITHM_BROTLI, "BROTLI"); + ret += unittest_rrdpush_compression_speed(COMPRESSION_ALGORITHM_GZIP, "GZIP"); + + return ret; +} diff --git a/src/streaming/stream-compression/compression.h b/src/streaming/stream-compression/compression.h new file mode 100644 index 000000000..37f589b85 --- /dev/null +++ b/src/streaming/stream-compression/compression.h @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRDPUSH_COMPRESSION_H +#define NETDATA_RRDPUSH_COMPRESSION_H 1 + +#include "libnetdata/libnetdata.h" + +// signature MUST end with a newline + +#if COMPRESSION_MAX_MSG_SIZE >= (COMPRESSION_MAX_CHUNK - COMPRESSION_MAX_OVERHEAD) +#error "COMPRESSION_MAX_MSG_SIZE >= (COMPRESSION_MAX_CHUNK - COMPRESSION_MAX_OVERHEAD)" +#endif + +typedef uint32_t rrdpush_signature_t; +#define RRDPUSH_COMPRESSION_SIGNATURE ((rrdpush_signature_t)('z' | 0x80) | (0x80 << 8) | (0x80 << 16) | ('\n' << 24)) +#define RRDPUSH_COMPRESSION_SIGNATURE_MASK ((rrdpush_signature_t) 0xffU | (0x80U << 8) | (0x80U << 16) | (0xffU << 24)) +#define RRDPUSH_COMPRESSION_SIGNATURE_SIZE sizeof(rrdpush_signature_t) + +static inline rrdpush_signature_t rrdpush_compress_encode_signature(size_t compressed_data_size) { + rrdpush_signature_t len = ((compressed_data_size & 0x7f) | 0x80 | (((compressed_data_size & (0x7f << 7)) << 1) | 0x8000)) << 8; + return len | RRDPUSH_COMPRESSION_SIGNATURE; +} + +typedef enum { + COMPRESSION_ALGORITHM_NONE = 0, + COMPRESSION_ALGORITHM_ZSTD, + COMPRESSION_ALGORITHM_LZ4, + COMPRESSION_ALGORITHM_GZIP, + COMPRESSION_ALGORITHM_BROTLI, + + // terminator + COMPRESSION_ALGORITHM_MAX, +} compression_algorithm_t; + +extern int rrdpush_compression_levels[COMPRESSION_ALGORITHM_MAX]; + +// this defines the order the algorithms will be selected by the receiver (parent) +#define RRDPUSH_COMPRESSION_ALGORITHMS_ORDER "zstd lz4 brotli gzip" + +// ---------------------------------------------------------------------------- + +typedef struct simple_ring_buffer { + const char *data; + size_t size; + size_t read_pos; + size_t write_pos; +} SIMPLE_RING_BUFFER; + +static inline void simple_ring_buffer_reset(SIMPLE_RING_BUFFER *b) { + b->read_pos = b->write_pos = 0; +} + +static inline void simple_ring_buffer_make_room(SIMPLE_RING_BUFFER *b, size_t size) { + if(b->write_pos + size > b->size) { + if(!b->size) + b->size = COMPRESSION_MAX_CHUNK; + else + b->size *= 2; + + if(b->write_pos + size > b->size) + b->size += size; + + b->data = (const char *)reallocz((void *)b->data, b->size); + } +} + +static inline void simple_ring_buffer_append_data(SIMPLE_RING_BUFFER *b, const void *data, size_t size) { + simple_ring_buffer_make_room(b, size); + memcpy((void *)(b->data + b->write_pos), data, size); + b->write_pos += size; +} + +static inline void simple_ring_buffer_destroy(SIMPLE_RING_BUFFER *b) { + freez((void *)b->data); + b->data = NULL; + b->read_pos = b->write_pos = b->size = 0; +} + +// ---------------------------------------------------------------------------- + +struct compressor_state { + bool initialized; + compression_algorithm_t algorithm; + + SIMPLE_RING_BUFFER input; + SIMPLE_RING_BUFFER output; + + int level; + void *stream; + + struct { + size_t total_compressed; + size_t total_uncompressed; + size_t total_compressions; + } sender_locked; +}; + +void rrdpush_compressor_init(struct compressor_state *state); +void rrdpush_compressor_destroy(struct compressor_state *state); +size_t rrdpush_compress(struct compressor_state *state, const char *data, size_t size, const char **out); + +// ---------------------------------------------------------------------------- + +struct decompressor_state { + bool initialized; + compression_algorithm_t algorithm; + size_t signature_size; + + size_t total_compressed; + size_t total_uncompressed; + size_t total_compressions; + + SIMPLE_RING_BUFFER output; + + void *stream; +}; + +void rrdpush_decompressor_destroy(struct decompressor_state *state); +void rrdpush_decompressor_init(struct decompressor_state *state); +size_t rrdpush_decompress(struct decompressor_state *state, const char *compressed_data, size_t compressed_size); + +static inline size_t rrdpush_decompress_decode_signature(const char *data, size_t data_size) { + if (unlikely(!data || !data_size)) + return 0; + + if (unlikely(data_size != RRDPUSH_COMPRESSION_SIGNATURE_SIZE)) + return 0; + + rrdpush_signature_t sign = *(rrdpush_signature_t *)data; + if (unlikely((sign & RRDPUSH_COMPRESSION_SIGNATURE_MASK) != RRDPUSH_COMPRESSION_SIGNATURE)) + return 0; + + size_t length = ((sign >> 8) & 0x7f) | ((sign >> 9) & (0x7f << 7)); + return length; +} + +static inline size_t rrdpush_decompressor_start(struct decompressor_state *state, const char *header, size_t header_size) { + if(unlikely(state->output.read_pos != state->output.write_pos)) + fatal("RRDPUSH DECOMPRESS: asked to decompress new data, while there are unread data in the decompression buffer!"); + + return rrdpush_decompress_decode_signature(header, header_size); +} + +static inline size_t rrdpush_decompressed_bytes_in_buffer(struct decompressor_state *state) { + if(unlikely(state->output.read_pos > state->output.write_pos)) + fatal("RRDPUSH DECOMPRESS: invalid read/write stream positions"); + + return state->output.write_pos - state->output.read_pos; +} + +static inline size_t rrdpush_decompressor_get(struct decompressor_state *state, char *dst, size_t size) { + if (unlikely(!state || !size || !dst)) + return 0; + + size_t remaining = rrdpush_decompressed_bytes_in_buffer(state); + + if(unlikely(!remaining)) + return 0; + + size_t bytes_to_return = size; + if(bytes_to_return > remaining) + bytes_to_return = remaining; + + memcpy(dst, state->output.data + state->output.read_pos, bytes_to_return); + state->output.read_pos += bytes_to_return; + + if(unlikely(state->output.read_pos > state->output.write_pos)) + fatal("RRDPUSH DECOMPRESS: invalid read/write stream positions"); + + return bytes_to_return; +} + +// ---------------------------------------------------------------------------- + +#include "../rrdpush.h" + +bool rrdpush_compression_initialize(struct sender_state *s); +bool rrdpush_decompression_initialize(struct receiver_state *rpt); +void rrdpush_parse_compression_order(struct receiver_state *rpt, const char *order); +void rrdpush_select_receiver_compression_algorithm(struct receiver_state *rpt); +void rrdpush_compression_deactivate(struct sender_state *s); + +#endif // NETDATA_RRDPUSH_COMPRESSION_H 1 diff --git a/src/streaming/stream-compression/gzip.c b/src/streaming/stream-compression/gzip.c new file mode 100644 index 000000000..d63e9afbe --- /dev/null +++ b/src/streaming/stream-compression/gzip.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "gzip.h" +#include + +void rrdpush_compressor_init_gzip(struct compressor_state *state) { + if (!state->initialized) { + state->initialized = true; + + // Initialize deflate stream + z_stream *strm = state->stream = (z_stream *) mallocz(sizeof(z_stream)); + strm->zalloc = Z_NULL; + strm->zfree = Z_NULL; + strm->opaque = Z_NULL; + + if(state->level < Z_BEST_SPEED) + state->level = Z_BEST_SPEED; + + if(state->level > Z_BEST_COMPRESSION) + state->level = Z_BEST_COMPRESSION; + + // int r = deflateInit2(strm, Z_BEST_COMPRESSION, Z_DEFLATED, 15 + 16, 8, Z_DEFAULT_STRATEGY); + int r = deflateInit2(strm, state->level, Z_DEFLATED, 15 + 16, 8, Z_DEFAULT_STRATEGY); + if (r != Z_OK) { + netdata_log_error("Failed to initialize deflate with error: %d", r); + freez(state->stream); + state->initialized = false; + return; + } + + } +} + +void rrdpush_compressor_destroy_gzip(struct compressor_state *state) { + if (state->stream) { + deflateEnd(state->stream); + freez(state->stream); + state->stream = NULL; + } +} + +size_t rrdpush_compress_gzip(struct compressor_state *state, const char *data, size_t size, const char **out) { + if (unlikely(!state || !size || !out)) + return 0; + + simple_ring_buffer_make_room(&state->output, deflateBound(state->stream, size)); + + z_stream *strm = state->stream; + strm->avail_in = (uInt)size; + strm->next_in = (Bytef *)data; + strm->avail_out = (uInt)state->output.size; + strm->next_out = (Bytef *)state->output.data; + + int ret = deflate(strm, Z_SYNC_FLUSH); + if (ret != Z_OK && ret != Z_STREAM_END) { + netdata_log_error("STREAM: deflate() failed with error %d", ret); + return 0; + } + + if(strm->avail_in != 0) { + netdata_log_error("STREAM: deflate() did not use all the input buffer, %u bytes out of %zu remain", + strm->avail_in, size); + return 0; + } + + if(strm->avail_out == 0) { + netdata_log_error("STREAM: deflate() needs a bigger output buffer than the one we provided " + "(output buffer %zu bytes, compressed payload %zu bytes)", + state->output.size, size); + return 0; + } + + size_t compressed_data_size = state->output.size - strm->avail_out; + + if(compressed_data_size == 0) { + netdata_log_error("STREAM: deflate() did not produce any output " + "(output buffer %zu bytes, compressed payload %zu bytes)", + state->output.size, size); + return 0; + } + + state->sender_locked.total_compressions++; + state->sender_locked.total_uncompressed += size; + state->sender_locked.total_compressed += compressed_data_size; + + *out = state->output.data; + return compressed_data_size; +} + +void rrdpush_decompressor_init_gzip(struct decompressor_state *state) { + if (!state->initialized) { + state->initialized = true; + + // Initialize inflate stream + z_stream *strm = state->stream = (z_stream *)mallocz(sizeof(z_stream)); + strm->zalloc = Z_NULL; + strm->zfree = Z_NULL; + strm->opaque = Z_NULL; + + int r = inflateInit2(strm, 15 + 16); + if (r != Z_OK) { + netdata_log_error("Failed to initialize inflateInit2() with error: %d", r); + freez(state->stream); + state->initialized = false; + return; + } + + simple_ring_buffer_make_room(&state->output, COMPRESSION_MAX_CHUNK); + } +} + +void rrdpush_decompressor_destroy_gzip(struct decompressor_state *state) { + if (state->stream) { + inflateEnd(state->stream); + freez(state->stream); + state->stream = NULL; + } +} + +size_t rrdpush_decompress_gzip(struct decompressor_state *state, const char *compressed_data, size_t compressed_size) { + if (unlikely(!state || !compressed_data || !compressed_size)) + return 0; + + // The state.output ring buffer is always EMPTY at this point, + // meaning that (state->output.read_pos == state->output.write_pos) + // However, THEY ARE NOT ZERO. + + z_stream *strm = state->stream; + strm->avail_in = (uInt)compressed_size; + strm->next_in = (Bytef *)compressed_data; + strm->avail_out = (uInt)state->output.size; + strm->next_out = (Bytef *)state->output.data; + + int ret = inflate(strm, Z_SYNC_FLUSH); + if (ret != Z_STREAM_END && ret != Z_OK) { + netdata_log_error("RRDPUSH DECOMPRESS: inflate() failed with error %d", ret); + return 0; + } + + if(strm->avail_in != 0) { + netdata_log_error("RRDPUSH DECOMPRESS: inflate() did not use all compressed data we provided " + "(compressed payload %zu bytes, remaining to be uncompressed %u)" + , compressed_size, strm->avail_in); + return 0; + } + + if(strm->avail_out == 0) { + netdata_log_error("RRDPUSH DECOMPRESS: inflate() needs a bigger output buffer than the one we provided " + "(compressed payload %zu bytes, output buffer size %zu bytes)" + , compressed_size, state->output.size); + return 0; + } + + size_t decompressed_size = state->output.size - strm->avail_out; + + state->output.read_pos = 0; + state->output.write_pos = decompressed_size; + + state->total_compressed += compressed_size; + state->total_uncompressed += decompressed_size; + state->total_compressions++; + + return decompressed_size; +} diff --git a/src/streaming/stream-compression/gzip.h b/src/streaming/stream-compression/gzip.h new file mode 100644 index 000000000..85f34bc6d --- /dev/null +++ b/src/streaming/stream-compression/gzip.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "compression.h" + +#ifndef NETDATA_STREAMING_COMPRESSION_GZIP_H +#define NETDATA_STREAMING_COMPRESSION_GZIP_H + +void rrdpush_compressor_init_gzip(struct compressor_state *state); +void rrdpush_compressor_destroy_gzip(struct compressor_state *state); +size_t rrdpush_compress_gzip(struct compressor_state *state, const char *data, size_t size, const char **out); +size_t rrdpush_decompress_gzip(struct decompressor_state *state, const char *compressed_data, size_t compressed_size); +void rrdpush_decompressor_init_gzip(struct decompressor_state *state); +void rrdpush_decompressor_destroy_gzip(struct decompressor_state *state); + +#endif //NETDATA_STREAMING_COMPRESSION_GZIP_H diff --git a/src/streaming/stream-compression/lz4.c b/src/streaming/stream-compression/lz4.c new file mode 100644 index 000000000..284192153 --- /dev/null +++ b/src/streaming/stream-compression/lz4.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "lz4.h" + +#ifdef ENABLE_LZ4 +#include "lz4.h" + +// ---------------------------------------------------------------------------- +// compress + +void rrdpush_compressor_init_lz4(struct compressor_state *state) { + if(!state->initialized) { + state->initialized = true; + state->stream = LZ4_createStream(); + + // LZ4 needs access to the last 64KB of source data + // so, we keep twice the size of each message + simple_ring_buffer_make_room(&state->input, 65536 + COMPRESSION_MAX_CHUNK * 2); + } +} + +void rrdpush_compressor_destroy_lz4(struct compressor_state *state) { + if (state->stream) { + LZ4_freeStream(state->stream); + state->stream = NULL; + } +} + +/* + * Compress the given block of data + * Compressed data will remain in the internal buffer until the next invocation + * Return the size of compressed data block as result and the pointer to internal buffer using the last argument + * or 0 in case of error + */ +size_t rrdpush_compress_lz4(struct compressor_state *state, const char *data, size_t size, const char **out) { + if(unlikely(!state || !size || !out)) + return 0; + + // we need to keep the last 64K of our previous source data + // as they were in the ring buffer + + simple_ring_buffer_make_room(&state->output, LZ4_COMPRESSBOUND(size)); + + if(state->input.write_pos + size > state->input.size) + // the input buffer cannot fit out data, restart from zero + simple_ring_buffer_reset(&state->input); + + simple_ring_buffer_append_data(&state->input, data, size); + + long int compressed_data_size = LZ4_compress_fast_continue( + state->stream, + state->input.data + state->input.read_pos, + (char *)state->output.data, + (int)(state->input.write_pos - state->input.read_pos), + (int)state->output.size, + state->level); + + if (compressed_data_size <= 0) { + netdata_log_error("STREAM: LZ4_compress_fast_continue() returned %ld " + "(source is %zu bytes, output buffer can fit %zu bytes)", + compressed_data_size, size, state->output.size); + return 0; + } + + state->input.read_pos = state->input.write_pos; + + state->sender_locked.total_compressions++; + state->sender_locked.total_uncompressed += size; + state->sender_locked.total_compressed += compressed_data_size; + + *out = state->output.data; + return compressed_data_size; +} + +// ---------------------------------------------------------------------------- +// decompress + +void rrdpush_decompressor_init_lz4(struct decompressor_state *state) { + if(!state->initialized) { + state->initialized = true; + state->stream = LZ4_createStreamDecode(); + simple_ring_buffer_make_room(&state->output, 65536 + COMPRESSION_MAX_CHUNK * 2); + } +} + +void rrdpush_decompressor_destroy_lz4(struct decompressor_state *state) { + if (state->stream) { + LZ4_freeStreamDecode(state->stream); + state->stream = NULL; + } +} + +/* + * Decompress the compressed data in the internal buffer + * Return the size of uncompressed data or 0 for error + */ +size_t rrdpush_decompress_lz4(struct decompressor_state *state, const char *compressed_data, size_t compressed_size) { + if (unlikely(!state || !compressed_data || !compressed_size)) + return 0; + + // The state.output ring buffer is always EMPTY at this point, + // meaning that (state->output.read_pos == state->output.write_pos) + // However, THEY ARE NOT ZERO. + + if (unlikely(state->output.write_pos + COMPRESSION_MAX_CHUNK > state->output.size)) + // the input buffer cannot fit out data, restart from zero + simple_ring_buffer_reset(&state->output); + + long int decompressed_size = LZ4_decompress_safe_continue( + state->stream + , compressed_data + , (char *)(state->output.data + state->output.write_pos) + , (int)compressed_size + , (int)(state->output.size - state->output.write_pos) + ); + + if (unlikely(decompressed_size < 0)) { + netdata_log_error("RRDPUSH DECOMPRESS: LZ4_decompress_safe_continue() returned negative value: %ld " + "(compressed chunk is %zu bytes)" + , decompressed_size, compressed_size); + return 0; + } + + if(unlikely(decompressed_size + state->output.write_pos > state->output.size)) + fatal("RRDPUSH DECOMPRESS: LZ4_decompress_safe_continue() overflown the stream_buffer " + "(size: %zu, pos: %zu, added: %ld, exceeding the buffer by %zu)" + , state->output.size + , state->output.write_pos + , decompressed_size + , (size_t)(state->output.write_pos + decompressed_size - state->output.size) + ); + + state->output.write_pos += decompressed_size; + + // statistics + state->total_compressed += compressed_size; + state->total_uncompressed += decompressed_size; + state->total_compressions++; + + return decompressed_size; +} + +#endif // ENABLE_LZ4 diff --git a/src/streaming/stream-compression/lz4.h b/src/streaming/stream-compression/lz4.h new file mode 100644 index 000000000..69f0fadcc --- /dev/null +++ b/src/streaming/stream-compression/lz4.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "compression.h" + +#ifndef NETDATA_STREAMING_COMPRESSION_LZ4_H +#define NETDATA_STREAMING_COMPRESSION_LZ4_H + +#ifdef ENABLE_LZ4 + +void rrdpush_compressor_init_lz4(struct compressor_state *state); +void rrdpush_compressor_destroy_lz4(struct compressor_state *state); +size_t rrdpush_compress_lz4(struct compressor_state *state, const char *data, size_t size, const char **out); +size_t rrdpush_decompress_lz4(struct decompressor_state *state, const char *compressed_data, size_t compressed_size); +void rrdpush_decompressor_init_lz4(struct decompressor_state *state); +void rrdpush_decompressor_destroy_lz4(struct decompressor_state *state); + +#endif // ENABLE_LZ4 + +#endif //NETDATA_STREAMING_COMPRESSION_LZ4_H diff --git a/src/streaming/stream-compression/zstd.c b/src/streaming/stream-compression/zstd.c new file mode 100644 index 000000000..0ce27c0d3 --- /dev/null +++ b/src/streaming/stream-compression/zstd.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "zstd.h" + +#ifdef ENABLE_ZSTD +#include + +void rrdpush_compressor_init_zstd(struct compressor_state *state) { + if(!state->initialized) { + state->initialized = true; + state->stream = ZSTD_createCStream(); + + if(state->level < 1) + state->level = 1; + + if(state->level > ZSTD_maxCLevel()) + state->level = ZSTD_maxCLevel(); + + size_t ret = ZSTD_initCStream(state->stream, state->level); + if(ZSTD_isError(ret)) + netdata_log_error("STREAM: ZSTD_initCStream() returned error: %s", ZSTD_getErrorName(ret)); + + // ZSTD_CCtx_setParameter(state->stream, ZSTD_c_compressionLevel, 1); + // ZSTD_CCtx_setParameter(state->stream, ZSTD_c_strategy, ZSTD_fast); + } +} + +void rrdpush_compressor_destroy_zstd(struct compressor_state *state) { + if(state->stream) { + ZSTD_freeCStream(state->stream); + state->stream = NULL; + } +} + +size_t rrdpush_compress_zstd(struct compressor_state *state, const char *data, size_t size, const char **out) { + if(unlikely(!state || !size || !out)) + return 0; + + ZSTD_inBuffer inBuffer = { + .pos = 0, + .size = size, + .src = data, + }; + + size_t wanted_size = MAX(ZSTD_compressBound(inBuffer.size - inBuffer.pos), ZSTD_CStreamOutSize()); + simple_ring_buffer_make_room(&state->output, wanted_size); + + ZSTD_outBuffer outBuffer = { + .pos = 0, + .size = state->output.size, + .dst = (void *)state->output.data, + }; + + // compress + size_t ret = ZSTD_compressStream(state->stream, &outBuffer, &inBuffer); + + // error handling + if(ZSTD_isError(ret)) { + netdata_log_error("STREAM: ZSTD_compressStream() return error: %s", ZSTD_getErrorName(ret)); + return 0; + } + + if(inBuffer.pos < inBuffer.size) { + netdata_log_error("STREAM: ZSTD_compressStream() left unprocessed input (source payload %zu bytes, consumed %zu bytes)", + inBuffer.size, inBuffer.pos); + return 0; + } + + if(outBuffer.pos == 0) { + // ZSTD needs more input to flush the output, so let's flush it manually + ret = ZSTD_flushStream(state->stream, &outBuffer); + + if(ZSTD_isError(ret)) { + netdata_log_error("STREAM: ZSTD_flushStream() return error: %s", ZSTD_getErrorName(ret)); + return 0; + } + + if(outBuffer.pos == 0) { + netdata_log_error("STREAM: ZSTD_compressStream() returned zero compressed bytes " + "(source is %zu bytes, output buffer can fit %zu bytes) " + , size, outBuffer.size); + return 0; + } + } + + state->sender_locked.total_compressions++; + state->sender_locked.total_uncompressed += size; + state->sender_locked.total_compressed += outBuffer.pos; + + // return values + *out = state->output.data; + return outBuffer.pos; +} + +void rrdpush_decompressor_init_zstd(struct decompressor_state *state) { + if(!state->initialized) { + state->initialized = true; + state->stream = ZSTD_createDStream(); + + size_t ret = ZSTD_initDStream(state->stream); + if(ZSTD_isError(ret)) + netdata_log_error("STREAM: ZSTD_initDStream() returned error: %s", ZSTD_getErrorName(ret)); + + simple_ring_buffer_make_room(&state->output, MAX(COMPRESSION_MAX_CHUNK, ZSTD_DStreamOutSize())); + } +} + +void rrdpush_decompressor_destroy_zstd(struct decompressor_state *state) { + if (state->stream) { + ZSTD_freeDStream(state->stream); + state->stream = NULL; + } +} + +size_t rrdpush_decompress_zstd(struct decompressor_state *state, const char *compressed_data, size_t compressed_size) { + if (unlikely(!state || !compressed_data || !compressed_size)) + return 0; + + // The state.output ring buffer is always EMPTY at this point, + // meaning that (state->output.read_pos == state->output.write_pos) + // However, THEY ARE NOT ZERO. + + ZSTD_inBuffer inBuffer = { + .pos = 0, + .size = compressed_size, + .src = compressed_data, + }; + + ZSTD_outBuffer outBuffer = { + .pos = 0, + .dst = (char *)state->output.data, + .size = state->output.size, + }; + + size_t ret = ZSTD_decompressStream( + state->stream + , &outBuffer + , &inBuffer); + + if(ZSTD_isError(ret)) { + netdata_log_error("STREAM: ZSTD_decompressStream() return error: %s", ZSTD_getErrorName(ret)); + return 0; + } + + if(inBuffer.pos < inBuffer.size) + fatal("RRDPUSH DECOMPRESS: ZSTD ZSTD_decompressStream() decompressed %zu bytes, " + "but %zu bytes of compressed data remain", + inBuffer.pos, inBuffer.size); + + size_t decompressed_size = outBuffer.pos; + + state->output.read_pos = 0; + state->output.write_pos = outBuffer.pos; + + // statistics + state->total_compressed += compressed_size; + state->total_uncompressed += decompressed_size; + state->total_compressions++; + + return decompressed_size; +} + +#endif // ENABLE_ZSTD diff --git a/src/streaming/stream-compression/zstd.h b/src/streaming/stream-compression/zstd.h new file mode 100644 index 000000000..bfabbf89d --- /dev/null +++ b/src/streaming/stream-compression/zstd.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "compression.h" + +#ifndef NETDATA_STREAMING_COMPRESSION_ZSTD_H +#define NETDATA_STREAMING_COMPRESSION_ZSTD_H + +#ifdef ENABLE_ZSTD + +void rrdpush_compressor_init_zstd(struct compressor_state *state); +void rrdpush_compressor_destroy_zstd(struct compressor_state *state); +size_t rrdpush_compress_zstd(struct compressor_state *state, const char *data, size_t size, const char **out); +size_t rrdpush_decompress_zstd(struct decompressor_state *state, const char *compressed_data, size_t compressed_size); +void rrdpush_decompressor_init_zstd(struct decompressor_state *state); +void rrdpush_decompressor_destroy_zstd(struct decompressor_state *state); + +#endif // ENABLE_ZSTD + +#endif //NETDATA_STREAMING_COMPRESSION_ZSTD_H diff --git a/src/streaming/stream-conf.c b/src/streaming/stream-conf.c new file mode 100644 index 000000000..8fc9e0819 --- /dev/null +++ b/src/streaming/stream-conf.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "stream-conf.h" + +struct config stream_config = APPCONFIG_INITIALIZER; + +bool stream_conf_send_enabled = false; +bool stream_conf_compression_enabled = true; +bool stream_conf_replication_enabled = true; + +const char *stream_conf_send_destination = NULL; +const char *stream_conf_send_api_key = NULL; +const char *stream_conf_send_charts_matching = "*"; + +time_t stream_conf_replication_period = 86400; +time_t stream_conf_replication_step = 600; + +const char *stream_conf_ssl_ca_path = NULL; +const char *stream_conf_ssl_ca_file = NULL; + +// to have the remote netdata re-sync the charts +// to its current clock, we send for this many +// iterations a BEGIN line without microseconds +// this is for the first iterations of each chart +unsigned int stream_conf_initial_clock_resync_iterations = 60; + +static void stream_conf_load() { + errno_clear(); + char *filename = filename_from_path_entry_strdupz(netdata_configured_user_config_dir, "stream.conf"); + if(!appconfig_load(&stream_config, filename, 0, NULL)) { + nd_log_daemon(NDLP_NOTICE, "CONFIG: cannot load user config '%s'. Will try stock config.", filename); + freez(filename); + + filename = filename_from_path_entry_strdupz(netdata_configured_stock_config_dir, "stream.conf"); + if(!appconfig_load(&stream_config, filename, 0, NULL)) + nd_log_daemon(NDLP_NOTICE, "CONFIG: cannot load stock config '%s'. Running with internal defaults.", filename); + } + + freez(filename); + + appconfig_move(&stream_config, + CONFIG_SECTION_STREAM, "timeout seconds", + CONFIG_SECTION_STREAM, "timeout"); + + appconfig_move(&stream_config, + CONFIG_SECTION_STREAM, "reconnect delay seconds", + CONFIG_SECTION_STREAM, "reconnect delay"); + + appconfig_move_everywhere(&stream_config, "default memory mode", "db"); + appconfig_move_everywhere(&stream_config, "memory mode", "db"); + appconfig_move_everywhere(&stream_config, "db mode", "db"); + appconfig_move_everywhere(&stream_config, "default history", "retention"); + appconfig_move_everywhere(&stream_config, "history", "retention"); + appconfig_move_everywhere(&stream_config, "default proxy enabled", "proxy enabled"); + appconfig_move_everywhere(&stream_config, "default proxy destination", "proxy destination"); + appconfig_move_everywhere(&stream_config, "default proxy api key", "proxy api key"); + appconfig_move_everywhere(&stream_config, "default proxy send charts matching", "proxy send charts matching"); + appconfig_move_everywhere(&stream_config, "default health log history", "health log retention"); + appconfig_move_everywhere(&stream_config, "health log history", "health log retention"); + appconfig_move_everywhere(&stream_config, "seconds to replicate", "replication period"); + appconfig_move_everywhere(&stream_config, "seconds per replication step", "replication step"); + appconfig_move_everywhere(&stream_config, "default postpone alarms on connect seconds", "postpone alerts on connect"); + appconfig_move_everywhere(&stream_config, "postpone alarms on connect seconds", "postpone alerts on connect"); +} + +bool stream_conf_receiver_needs_dbengine(void) { + return stream_conf_needs_dbengine(&stream_config); +} + +bool stream_conf_init() { + // -------------------------------------------------------------------- + // load stream.conf + stream_conf_load(); + + stream_conf_send_enabled = + appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "enabled", stream_conf_send_enabled); + + stream_conf_send_destination = + appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "destination", ""); + + stream_conf_send_api_key = + appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "api key", ""); + + stream_conf_send_charts_matching = + appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "send charts matching", stream_conf_send_charts_matching); + + stream_conf_replication_enabled = + config_get_boolean(CONFIG_SECTION_DB, "enable replication", stream_conf_replication_enabled); + + stream_conf_replication_period = + config_get_duration_seconds(CONFIG_SECTION_DB, "replication period", stream_conf_replication_period); + + stream_conf_replication_step = + config_get_duration_seconds(CONFIG_SECTION_DB, "replication step", stream_conf_replication_step); + + rrdhost_free_orphan_time_s = + config_get_duration_seconds(CONFIG_SECTION_DB, "cleanup orphan hosts after", rrdhost_free_orphan_time_s); + + stream_conf_compression_enabled = + appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, + "enable compression", stream_conf_compression_enabled); + + rrdpush_compression_levels[COMPRESSION_ALGORITHM_BROTLI] = (int)appconfig_get_number( + &stream_config, CONFIG_SECTION_STREAM, "brotli compression level", + rrdpush_compression_levels[COMPRESSION_ALGORITHM_BROTLI]); + + rrdpush_compression_levels[COMPRESSION_ALGORITHM_ZSTD] = (int)appconfig_get_number( + &stream_config, CONFIG_SECTION_STREAM, "zstd compression level", + rrdpush_compression_levels[COMPRESSION_ALGORITHM_ZSTD]); + + rrdpush_compression_levels[COMPRESSION_ALGORITHM_LZ4] = (int)appconfig_get_number( + &stream_config, CONFIG_SECTION_STREAM, "lz4 compression acceleration", + rrdpush_compression_levels[COMPRESSION_ALGORITHM_LZ4]); + + rrdpush_compression_levels[COMPRESSION_ALGORITHM_GZIP] = (int)appconfig_get_number( + &stream_config, CONFIG_SECTION_STREAM, "gzip compression level", + rrdpush_compression_levels[COMPRESSION_ALGORITHM_GZIP]); + + if(stream_conf_send_enabled && (!stream_conf_send_destination || !*stream_conf_send_destination || !stream_conf_send_api_key || !*stream_conf_send_api_key)) { + nd_log_daemon(NDLP_WARNING, "STREAM [send]: cannot enable sending thread - information is missing."); + stream_conf_send_enabled = false; + } + + netdata_ssl_validate_certificate_sender = !appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "ssl skip certificate verification", !netdata_ssl_validate_certificate); + + if(!netdata_ssl_validate_certificate_sender) + nd_log_daemon(NDLP_NOTICE, "SSL: streaming senders will skip SSL certificates verification."); + + stream_conf_ssl_ca_path = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CApath", NULL); + stream_conf_ssl_ca_file = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CAfile", NULL); + + return stream_conf_send_enabled; +} + +bool stream_conf_configured_as_parent() { + return stream_conf_has_uuid_section(&stream_config); +} diff --git a/src/streaming/stream-conf.h b/src/streaming/stream-conf.h new file mode 100644 index 000000000..da7a88123 --- /dev/null +++ b/src/streaming/stream-conf.h @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_STREAM_CONF_H +#define NETDATA_STREAM_CONF_H + +#include "libnetdata/libnetdata.h" +#include "daemon/common.h" + +extern bool stream_conf_send_enabled; +extern bool stream_conf_compression_enabled; +extern bool stream_conf_replication_enabled; + +extern const char *stream_conf_send_destination; +extern const char *stream_conf_send_api_key; +extern const char *stream_conf_send_charts_matching; +extern time_t stream_conf_replication_period; +extern time_t stream_conf_replication_step; +extern unsigned int stream_conf_initial_clock_resync_iterations; + +extern struct config stream_config; +extern const char *stream_conf_ssl_ca_path; +extern const char *stream_conf_ssl_ca_file; + +bool stream_conf_init(); +bool stream_conf_receiver_needs_dbengine(); +bool stream_conf_configured_as_parent(); + +#endif //NETDATA_STREAM_CONF_H diff --git a/src/streaming/stream-handshake.c b/src/streaming/stream-handshake.c new file mode 100644 index 000000000..e338df950 --- /dev/null +++ b/src/streaming/stream-handshake.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "rrdpush.h" + +static struct { + STREAM_HANDSHAKE err; + const char *str; +} handshake_errors[] = { + { STREAM_HANDSHAKE_OK_V3, "CONNECTED" }, + { STREAM_HANDSHAKE_OK_V2, "CONNECTED" }, + { STREAM_HANDSHAKE_OK_V1, "CONNECTED" }, + { STREAM_HANDSHAKE_NEVER, "" }, + { STREAM_HANDSHAKE_ERROR_BAD_HANDSHAKE, "BAD HANDSHAKE" }, + { STREAM_HANDSHAKE_ERROR_LOCALHOST, "LOCALHOST" }, + { STREAM_HANDSHAKE_ERROR_ALREADY_CONNECTED, "ALREADY CONNECTED" }, + { STREAM_HANDSHAKE_ERROR_DENIED, "DENIED" }, + { STREAM_HANDSHAKE_ERROR_SEND_TIMEOUT, "SEND TIMEOUT" }, + { STREAM_HANDSHAKE_ERROR_RECEIVE_TIMEOUT, "RECEIVE TIMEOUT" }, + { STREAM_HANDSHAKE_ERROR_INVALID_CERTIFICATE, "INVALID CERTIFICATE" }, + { STREAM_HANDSHAKE_ERROR_SSL_ERROR, "SSL ERROR" }, + { STREAM_HANDSHAKE_ERROR_CANT_CONNECT, "CANT CONNECT" }, + { STREAM_HANDSHAKE_BUSY_TRY_LATER, "BUSY TRY LATER" }, + { STREAM_HANDSHAKE_INTERNAL_ERROR, "INTERNAL ERROR" }, + { STREAM_HANDSHAKE_INITIALIZATION, "REMOTE IS INITIALIZING" }, + { STREAM_HANDSHAKE_DISCONNECT_HOST_CLEANUP, "DISCONNECTED HOST CLEANUP" }, + { STREAM_HANDSHAKE_DISCONNECT_STALE_RECEIVER, "DISCONNECTED STALE RECEIVER" }, + { STREAM_HANDSHAKE_DISCONNECT_SHUTDOWN, "DISCONNECTED SHUTDOWN REQUESTED" }, + { STREAM_HANDSHAKE_DISCONNECT_NETDATA_EXIT, "DISCONNECTED NETDATA EXIT" }, + { STREAM_HANDSHAKE_DISCONNECT_PARSER_EXIT, "DISCONNECTED PARSE ENDED" }, + {STREAM_HANDSHAKE_DISCONNECT_UNKNOWN_SOCKET_READ_ERROR, "DISCONNECTED UNKNOWN SOCKET READ ERROR" }, + { STREAM_HANDSHAKE_DISCONNECT_PARSER_FAILED, "DISCONNECTED PARSE ERROR" }, + { STREAM_HANDSHAKE_DISCONNECT_RECEIVER_LEFT, "DISCONNECTED RECEIVER LEFT" }, + { STREAM_HANDSHAKE_DISCONNECT_ORPHAN_HOST, "DISCONNECTED ORPHAN HOST" }, + { STREAM_HANDSHAKE_NON_STREAMABLE_HOST, "NON STREAMABLE HOST" }, + { STREAM_HANDSHAKE_DISCONNECT_NOT_SUFFICIENT_READ_BUFFER, "DISCONNECTED NOT SUFFICIENT READ BUFFER" }, + {STREAM_HANDSHAKE_DISCONNECT_SOCKET_EOF, "DISCONNECTED SOCKET EOF" }, + {STREAM_HANDSHAKE_DISCONNECT_SOCKET_READ_FAILED, "DISCONNECTED SOCKET READ FAILED" }, + {STREAM_HANDSHAKE_DISCONNECT_SOCKET_READ_TIMEOUT, "DISCONNECTED SOCKET READ TIMEOUT" }, + { 0, NULL }, +}; + +const char *stream_handshake_error_to_string(STREAM_HANDSHAKE handshake_error) { + if(handshake_error >= STREAM_HANDSHAKE_OK_V1) + // handshake_error is the whole version / capabilities number + return "CONNECTED"; + + for(size_t i = 0; handshake_errors[i].str ; i++) { + if(handshake_error == handshake_errors[i].err) + return handshake_errors[i].str; + } + + return "UNKNOWN"; +} diff --git a/src/streaming/stream-handshake.h b/src/streaming/stream-handshake.h new file mode 100644 index 000000000..9b66cab97 --- /dev/null +++ b/src/streaming/stream-handshake.h @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_STREAM_HANDSHAKE_H +#define NETDATA_STREAM_HANDSHAKE_H + +#define HTTP_HEADER_SIZE 8192 + +#define STREAMING_PROTOCOL_VERSION "1.1" +#define START_STREAMING_PROMPT_V1 "Hit me baby, push them over..." +#define START_STREAMING_PROMPT_V2 "Hit me baby, push them over and bring the host labels..." +#define START_STREAMING_PROMPT_VN "Hit me baby, push them over with the version=" + +#define START_STREAMING_ERROR_SAME_LOCALHOST "Don't hit me baby, you are trying to stream my localhost back" +#define START_STREAMING_ERROR_ALREADY_STREAMING "This GUID is already streaming to this server" +#define START_STREAMING_ERROR_NOT_PERMITTED "You are not permitted to access this. Check the logs for more info." +#define START_STREAMING_ERROR_BUSY_TRY_LATER "The server is too busy now to accept this request. Try later." +#define START_STREAMING_ERROR_INTERNAL_ERROR "The server encountered an internal error. Try later." +#define START_STREAMING_ERROR_INITIALIZATION "The server is initializing. Try later." + +#define RRDPUSH_STATUS_CONNECTED "CONNECTED" +#define RRDPUSH_STATUS_ALREADY_CONNECTED "ALREADY CONNECTED" +#define RRDPUSH_STATUS_DISCONNECTED "DISCONNECTED" +#define RRDPUSH_STATUS_RATE_LIMIT "RATE LIMIT TRY LATER" +#define RRDPUSH_STATUS_INITIALIZATION_IN_PROGRESS "INITIALIZATION IN PROGRESS RETRY LATER" +#define RRDPUSH_STATUS_INTERNAL_SERVER_ERROR "INTERNAL SERVER ERROR DROPPING CONNECTION" +#define RRDPUSH_STATUS_DUPLICATE_RECEIVER "DUPLICATE RECEIVER DROPPING CONNECTION" +#define RRDPUSH_STATUS_CANT_REPLY "CANT REPLY DROPPING CONNECTION" +#define RRDPUSH_STATUS_NO_HOSTNAME "NO HOSTNAME PERMISSION DENIED" +#define RRDPUSH_STATUS_NO_API_KEY "NO API KEY PERMISSION DENIED" +#define RRDPUSH_STATUS_INVALID_API_KEY "INVALID API KEY PERMISSION DENIED" +#define RRDPUSH_STATUS_NO_MACHINE_GUID "NO MACHINE GUID PERMISSION DENIED" +#define RRDPUSH_STATUS_MACHINE_GUID_DISABLED "MACHINE GUID DISABLED PERMISSION DENIED" +#define RRDPUSH_STATUS_INVALID_MACHINE_GUID "INVALID MACHINE GUID PERMISSION DENIED" +#define RRDPUSH_STATUS_API_KEY_DISABLED "API KEY DISABLED PERMISSION DENIED" +#define RRDPUSH_STATUS_NOT_ALLOWED_IP "NOT ALLOWED IP PERMISSION DENIED" +#define RRDPUSH_STATUS_LOCALHOST "LOCALHOST PERMISSION DENIED" +#define RRDPUSH_STATUS_PERMISSION_DENIED "PERMISSION DENIED" +#define RRDPUSH_STATUS_BAD_HANDSHAKE "BAD HANDSHAKE" +#define RRDPUSH_STATUS_TIMEOUT "TIMEOUT" +#define RRDPUSH_STATUS_CANT_UPGRADE_CONNECTION "CANT UPGRADE CONNECTION" +#define RRDPUSH_STATUS_SSL_ERROR "SSL ERROR" +#define RRDPUSH_STATUS_INVALID_SSL_CERTIFICATE "INVALID SSL CERTIFICATE" +#define RRDPUSH_STATUS_CANT_ESTABLISH_SSL_CONNECTION "CANT ESTABLISH SSL CONNECTION" + +typedef enum { + STREAM_HANDSHAKE_OK_V3 = 3, // v3+ + STREAM_HANDSHAKE_OK_V2 = 2, // v2 + STREAM_HANDSHAKE_OK_V1 = 1, // v1 + STREAM_HANDSHAKE_NEVER = 0, // never tried to connect + STREAM_HANDSHAKE_ERROR_BAD_HANDSHAKE = -1, + STREAM_HANDSHAKE_ERROR_LOCALHOST = -2, + STREAM_HANDSHAKE_ERROR_ALREADY_CONNECTED = -3, + STREAM_HANDSHAKE_ERROR_DENIED = -4, + STREAM_HANDSHAKE_ERROR_SEND_TIMEOUT = -5, + STREAM_HANDSHAKE_ERROR_RECEIVE_TIMEOUT = -6, + STREAM_HANDSHAKE_ERROR_INVALID_CERTIFICATE = -7, + STREAM_HANDSHAKE_ERROR_SSL_ERROR = -8, + STREAM_HANDSHAKE_ERROR_CANT_CONNECT = -9, + STREAM_HANDSHAKE_BUSY_TRY_LATER = -10, + STREAM_HANDSHAKE_INTERNAL_ERROR = -11, + STREAM_HANDSHAKE_INITIALIZATION = -12, + STREAM_HANDSHAKE_DISCONNECT_HOST_CLEANUP = -13, + STREAM_HANDSHAKE_DISCONNECT_STALE_RECEIVER = -14, + STREAM_HANDSHAKE_DISCONNECT_SHUTDOWN = -15, + STREAM_HANDSHAKE_DISCONNECT_NETDATA_EXIT = -16, + STREAM_HANDSHAKE_DISCONNECT_PARSER_EXIT = -17, + STREAM_HANDSHAKE_DISCONNECT_UNKNOWN_SOCKET_READ_ERROR = -18, + STREAM_HANDSHAKE_DISCONNECT_PARSER_FAILED = -19, + STREAM_HANDSHAKE_DISCONNECT_RECEIVER_LEFT = -20, + STREAM_HANDSHAKE_DISCONNECT_ORPHAN_HOST = -21, + STREAM_HANDSHAKE_NON_STREAMABLE_HOST = -22, + STREAM_HANDSHAKE_DISCONNECT_NOT_SUFFICIENT_READ_BUFFER = -23, + STREAM_HANDSHAKE_DISCONNECT_SOCKET_EOF = -24, + STREAM_HANDSHAKE_DISCONNECT_SOCKET_READ_FAILED = -25, + STREAM_HANDSHAKE_DISCONNECT_SOCKET_READ_TIMEOUT = -26, + STREAM_HANDSHAKE_ERROR_HTTP_UPGRADE = -27, + +} STREAM_HANDSHAKE; + +const char *stream_handshake_error_to_string(STREAM_HANDSHAKE handshake_error); + +#endif //NETDATA_STREAM_HANDSHAKE_H diff --git a/src/streaming/stream-path.c b/src/streaming/stream-path.c new file mode 100644 index 000000000..7aad9a0bf --- /dev/null +++ b/src/streaming/stream-path.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "stream-path.h" +#include "rrdpush.h" +#include "plugins.d/pluginsd_internals.h" + +ENUM_STR_MAP_DEFINE(STREAM_PATH_FLAGS) = { + { .id = STREAM_PATH_FLAG_ACLK, .name = "aclk" }, + + // terminator + { . id = 0, .name = NULL } +}; + +BITMAP_STR_DEFINE_FUNCTIONS(STREAM_PATH_FLAGS, STREAM_PATH_FLAG_NONE, ""); + +static void stream_path_clear(STREAM_PATH *p) { + string_freez(p->hostname); + p->hostname = NULL; + p->host_id = UUID_ZERO; + p->node_id = UUID_ZERO; + p->claim_id = UUID_ZERO; + p->hops = 0; + p->since = 0; + p->first_time_t = 0; + p->capabilities = 0; + p->flags = STREAM_PATH_FLAG_NONE; + p->start_time = 0; + p->shutdown_time = 0; +} + +static void rrdhost_stream_path_clear_unsafe(RRDHOST *host, bool destroy) { + for(size_t i = 0; i < host->rrdpush.path.used ; i++) + stream_path_clear(&host->rrdpush.path.array[i]); + + host->rrdpush.path.used = 0; + + if(destroy) { + freez(host->rrdpush.path.array); + host->rrdpush.path.array = NULL; + host->rrdpush.path.size = 0; + } +} + +void rrdhost_stream_path_clear(RRDHOST *host, bool destroy) { + spinlock_lock(&host->rrdpush.path.spinlock); + rrdhost_stream_path_clear_unsafe(host, destroy); + spinlock_unlock(&host->rrdpush.path.spinlock); +} + +static void stream_path_to_json_object(BUFFER *wb, STREAM_PATH *p) { + buffer_json_add_array_item_object(wb); + buffer_json_member_add_string(wb, "hostname", string2str(p->hostname)); + buffer_json_member_add_uuid(wb, "host_id", p->host_id.uuid); + buffer_json_member_add_uuid(wb, "node_id", p->node_id.uuid); + buffer_json_member_add_uuid(wb, "claim_id", p->claim_id.uuid); + buffer_json_member_add_int64(wb, "hops", p->hops); + buffer_json_member_add_uint64(wb, "since", p->since); + buffer_json_member_add_uint64(wb, "first_time_t", p->first_time_t); + buffer_json_member_add_uint64(wb, "start_time", p->start_time); + buffer_json_member_add_uint64(wb, "shutdown_time", p->shutdown_time); + stream_capabilities_to_json_array(wb, p->capabilities, "capabilities"); + STREAM_PATH_FLAGS_2json(wb, "flags", p->flags); + buffer_json_object_close(wb); +} + +static STREAM_PATH rrdhost_stream_path_self(RRDHOST *host) { + STREAM_PATH p = { 0 }; + + bool is_localhost = host == localhost || rrdhost_option_check(host, RRDHOST_OPTION_VIRTUAL_HOST); + + p.hostname = string_dup(localhost->hostname); + p.host_id = localhost->host_id; + p.node_id = localhost->node_id; + p.claim_id = claim_id_get_uuid(); + p.start_time = get_agent_event_time_median(EVENT_AGENT_START_TIME) / USEC_PER_MS; + p.shutdown_time = get_agent_event_time_median(EVENT_AGENT_SHUTDOWN_TIME) / USEC_PER_MS; + + p.flags = STREAM_PATH_FLAG_NONE; + if(!UUIDiszero(p.claim_id)) + p.flags |= STREAM_PATH_FLAG_ACLK; + + bool has_receiver = false; + spinlock_lock(&host->receiver_lock); + if(host->receiver) { + has_receiver = true; + p.hops = (int16_t)host->receiver->hops; + p.since = host->receiver->connected_since_s; + } + spinlock_unlock(&host->receiver_lock); + + if(!has_receiver) { + p.hops = (is_localhost) ? 0 : -1; // -1 for stale nodes + p.since = netdata_start_time; + } + + // the following may get the receiver lock again! + p.capabilities = stream_our_capabilities(host, true); + + rrdhost_retention(host, 0, false, &p.first_time_t, NULL); + + return p; +} + +STREAM_PATH rrdhost_stream_path_fetch(RRDHOST *host) { + STREAM_PATH p = { 0 }; + + spinlock_lock(&host->rrdpush.path.spinlock); + for (size_t i = 0; i < host->rrdpush.path.used; i++) { + STREAM_PATH *tmp_path = &host->rrdpush.path.array[i]; + if(UUIDeq(host->host_id, tmp_path->host_id)) { + p = *tmp_path; + break; + } + } + spinlock_unlock(&host->rrdpush.path.spinlock); + return p; +} + +void rrdhost_stream_path_to_json(BUFFER *wb, struct rrdhost *host, const char *key, bool add_version) { + if(add_version) + buffer_json_member_add_uint64(wb, "version", 1); + + spinlock_lock(&host->rrdpush.path.spinlock); + buffer_json_member_add_array(wb, key); + { + { + STREAM_PATH tmp = rrdhost_stream_path_self(host); + + bool found_self = false; + for (size_t i = 0; i < host->rrdpush.path.used; i++) { + STREAM_PATH *p = &host->rrdpush.path.array[i]; + if(UUIDeq(localhost->host_id, p->host_id)) { + // this is us, use the current data + p = &tmp; + found_self = true; + } + stream_path_to_json_object(wb, p); + } + + if(!found_self) { + // we didn't find ourselves in the list. + // append us. + stream_path_to_json_object(wb, &tmp); + } + + stream_path_clear(&tmp); + } + } + buffer_json_array_close(wb); // key + spinlock_unlock(&host->rrdpush.path.spinlock); +} + +static BUFFER *stream_path_payload(RRDHOST *host) { + BUFFER *wb = buffer_create(0, NULL); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + rrdhost_stream_path_to_json(wb, host, STREAM_PATH_JSON_MEMBER, true); + buffer_json_finalize(wb); + return wb; +} + +void stream_path_send_to_parent(RRDHOST *host) { + struct sender_state *s = host->sender; + if(!s || !stream_has_capability(s, STREAM_CAP_PATHS)) return; + + CLEAN_BUFFER *payload = stream_path_payload(host); + + BUFFER *wb = sender_start(s); + buffer_sprintf(wb, PLUGINSD_KEYWORD_JSON " " PLUGINSD_KEYWORD_STREAM_PATH "\n%s\n" PLUGINSD_KEYWORD_JSON_END "\n", buffer_tostring(payload)); + sender_commit(s, wb, STREAM_TRAFFIC_TYPE_METADATA); +} + +void stream_path_send_to_child(RRDHOST *host) { + if(host == localhost) + return; + + CLEAN_BUFFER *payload = stream_path_payload(host); + + spinlock_lock(&host->receiver_lock); + if(host->receiver && stream_has_capability(host->receiver, STREAM_CAP_PATHS)) { + + CLEAN_BUFFER *wb = buffer_create(0, NULL); + buffer_sprintf(wb, PLUGINSD_KEYWORD_JSON " " PLUGINSD_KEYWORD_STREAM_PATH "\n%s\n" PLUGINSD_KEYWORD_JSON_END "\n", buffer_tostring(payload)); + send_to_plugin(buffer_tostring(wb), __atomic_load_n(&host->receiver->parser, __ATOMIC_RELAXED)); + } + spinlock_unlock(&host->receiver_lock); +} + +void stream_path_child_disconnected(RRDHOST *host) { + rrdhost_stream_path_clear(host, true); +} + +void stream_path_parent_disconnected(RRDHOST *host) { + spinlock_lock(&host->rrdpush.path.spinlock); + + size_t cleared = 0; + size_t used = host->rrdpush.path.used; + for (size_t i = 0; i < used; i++) { + STREAM_PATH *p = &host->rrdpush.path.array[i]; + if(UUIDeq(localhost->host_id, p->host_id)) { + host->rrdpush.path.used = i + 1; + + for(size_t j = i + 1; j < used ;j++) { + stream_path_clear(&host->rrdpush.path.array[j]); + cleared++; + } + + break; + } + } + + spinlock_unlock(&host->rrdpush.path.spinlock); + + if(cleared) + stream_path_send_to_child(host); +} + +void stream_path_retention_updated(RRDHOST *host) { + if(!host || !localhost) return; + stream_path_send_to_parent(host); + stream_path_send_to_child(host); +} + +void stream_path_node_id_updated(RRDHOST *host) { + if(!host || !localhost) return; + stream_path_send_to_parent(host); + stream_path_send_to_child(host); +} + +// -------------------------------------------------------------------------------------------------------------------- + + +static bool parse_single_path(json_object *jobj, const char *path, STREAM_PATH *p, BUFFER *error) { + JSONC_PARSE_TXT2STRING_OR_ERROR_AND_RETURN(jobj, path, "hostname", p->hostname, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, "host_id", p->host_id.uuid, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, "node_id", p->node_id.uuid, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, "claim_id", p->claim_id.uuid, error, true); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "hops", p->hops, error, true); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, "since", p->since, error, true); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, "first_time_t", p->first_time_t, error, true); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "start_time", p->start_time, error, true); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "shutdown_time", p->shutdown_time, error, true); + JSONC_PARSE_ARRAY_OF_TXT2BITMAP_OR_ERROR_AND_RETURN(jobj, path, "flags", STREAM_PATH_FLAGS_2id_one, p->flags, error, true); + JSONC_PARSE_ARRAY_OF_TXT2BITMAP_OR_ERROR_AND_RETURN(jobj, path, "capabilities", stream_capabilities_parse_one, p->capabilities, error, true); + + if(!p->hostname) { + buffer_strcat(error, "hostname cannot be empty"); + return false; + } + + if(UUIDiszero(p->host_id)) { + buffer_strcat(error, "host_id cannot be zero"); + return false; + } + + if(p->hops < 0) { + buffer_strcat(error, "hops cannot be negative"); + return false; + } + + if(p->capabilities == STREAM_CAP_NONE) { + buffer_strcat(error, "capabilities cannot be empty"); + return false; + } + + if(p->since <= 0) { + buffer_strcat(error, "since cannot be <= 0"); + return false; + } + + return true; +} + +static XXH128_hash_t stream_path_hash_unsafe(RRDHOST *host) { + if(!host->rrdpush.path.used) + return (XXH128_hash_t){ 0 }; + + return XXH3_128bits(host->rrdpush.path.array, sizeof(*host->rrdpush.path.array) * host->rrdpush.path.used); +} + +static int compare_by_hops(const void *a, const void *b) { + const STREAM_PATH *path1 = a; + const STREAM_PATH *path2 = b; + + if (path1->hops < path2->hops) + return -1; + else if (path1->hops > path2->hops) + return 1; + + return 0; +} + +bool stream_path_set_from_json(RRDHOST *host, const char *json, bool from_parent) { + if(!json || !*json) + return false; + + CLEAN_JSON_OBJECT *jobj = json_tokener_parse(json); + if(!jobj) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM PATH: Cannot parse json: %s", json); + return false; + } + + spinlock_lock(&host->rrdpush.path.spinlock); + XXH128_hash_t old_hash = stream_path_hash_unsafe(host); + rrdhost_stream_path_clear_unsafe(host, true); + + CLEAN_BUFFER *error = buffer_create(0, NULL); + + json_object *_jarray; + if (json_object_object_get_ex(jobj, STREAM_PATH_JSON_MEMBER, &_jarray) && + json_object_is_type(_jarray, json_type_array)) { + size_t items = json_object_array_length(_jarray); + host->rrdpush.path.array = callocz(items, sizeof(*host->rrdpush.path.array)); + host->rrdpush.path.size = items; + + for (size_t i = 0; i < items; ++i) { + json_object *joption = json_object_array_get_idx(_jarray, i); + if (!json_object_is_type(joption, json_type_object)) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM PATH: Array item No %zu is not an object: %s", i, json); + continue; + } + + if(!parse_single_path(joption, "", &host->rrdpush.path.array[host->rrdpush.path.used], error)) { + stream_path_clear(&host->rrdpush.path.array[host->rrdpush.path.used]); + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM PATH: Array item No %zu cannot be parsed: %s: %s", i, buffer_tostring(error), json); + } + else + host->rrdpush.path.used++; + } + } + + if(host->rrdpush.path.used > 1) { + // sorting is required in order to support stream_path_parent_disconnected() + qsort(host->rrdpush.path.array, host->rrdpush.path.used, + sizeof(*host->rrdpush.path.array), compare_by_hops); + } + + XXH128_hash_t new_hash = stream_path_hash_unsafe(host); + spinlock_unlock(&host->rrdpush.path.spinlock); + + if(!XXH128_isEqual(old_hash, new_hash)) { + if(!from_parent) + stream_path_send_to_parent(host); + + // when it comes from the child, we still need to send it back to the child + // including our own entry in it. + stream_path_send_to_child(host); + } + + return host->rrdpush.path.used > 0; +} diff --git a/src/streaming/stream-path.h b/src/streaming/stream-path.h new file mode 100644 index 000000000..6dc323bdd --- /dev/null +++ b/src/streaming/stream-path.h @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_STREAM_PATH_H +#define NETDATA_STREAM_PATH_H + +#include "stream-capabilities.h" + +#define STREAM_PATH_JSON_MEMBER "streaming_path" + +typedef enum __attribute__((packed)) { + STREAM_PATH_FLAG_NONE = 0, + STREAM_PATH_FLAG_ACLK = (1 << 0), +} STREAM_PATH_FLAGS; + +typedef struct stream_path { + STRING *hostname; // the hostname of the agent + ND_UUID host_id; // the machine guid of the agent + ND_UUID node_id; // the cloud node id of the agent + ND_UUID claim_id; // the cloud claim id of the agent + time_t since; // the timestamp of the last update + time_t first_time_t; // the oldest timestamp in the db + int16_t hops; // -1 = stale node, 0 = localhost, >0 the hops count + STREAM_PATH_FLAGS flags; // ACLK or NONE for the moment + STREAM_CAPABILITIES capabilities; // streaming connection capabilities + uint32_t start_time; // median time in ms the agent needs to start + uint32_t shutdown_time; // median time in ms the agent needs to shutdown +} STREAM_PATH; + +typedef struct rrdhost_stream_path { + SPINLOCK spinlock; + uint16_t size; + uint16_t used; + STREAM_PATH *array; +} RRDHOST_STREAM_PATH; + + +struct rrdhost; + +void stream_path_send_to_parent(struct rrdhost *host); +void stream_path_send_to_child(struct rrdhost *host); + +void rrdhost_stream_path_to_json(BUFFER *wb, struct rrdhost *host, const char *key, bool add_version); +void rrdhost_stream_path_clear(struct rrdhost *host, bool destroy); + +void stream_path_retention_updated(struct rrdhost *host); +void stream_path_node_id_updated(struct rrdhost *host); + +void stream_path_child_disconnected(struct rrdhost *host); +void stream_path_parent_disconnected(struct rrdhost *host); +STREAM_PATH rrdhost_stream_path_fetch(struct rrdhost *host); + +bool stream_path_set_from_json(struct rrdhost *host, const char *json, bool from_parent); + +#endif //NETDATA_STREAM_PATH_H diff --git a/src/streaming/stream.conf b/src/streaming/stream.conf index 475d5eac2..659bd830d 100644 --- a/src/streaming/stream.conf +++ b/src/streaming/stream.conf @@ -62,32 +62,33 @@ #enable compression = yes # The timeout to connect and send metrics - timeout seconds = 60 + #timeout = 1m # If the destination line above does not specify a port, use this - default port = 19999 + #default port = 19999 - # filter the charts to be streamed + # filter the charts and contexts to be streamed # netdata SIMPLE PATTERN: # - space separated list of patterns (use \ to include spaces in patterns) # - use * as wildcard, any number of times within each pattern # - prefix a pattern with ! for a negative match (ie not stream the charts it matches) # - the order of patterns is important (left to right) # To send all except a few, use: !this !that * (ie append a wildcard pattern) - send charts matching = * + # The pattern is matched against the context, the chart name and the chart id. + #send charts matching = * # The buffer to use for sending metrics. # 10MB is good for 60 seconds of data, so increase this if you expect latencies. # The buffer is flushed on reconnects (this will not prevent gaps at the charts). - buffer size bytes = 10485760 + #buffer size bytes = 10485760 # If the connection fails, or it disconnects, # retry after that many seconds. - reconnect delay seconds = 5 + #reconnect delay = 5s # Sync the clock of the charts for that many iterations, when starting. # It is ignored when replication is enabled - initial clock resync iterations = 60 + #initial clock resync iterations = 60 # ----------------------------------------------------------------------------- # 2. ON PARENT NETDATA - THE ONE THAT WILL BE RECEIVING METRICS @@ -124,21 +125,21 @@ # will be pushing metrics using this API key. # The metrics are received via the API port, so the same IPs # should also be matched at netdata.conf [web].allow connections from - allow from = * + #allow from = * - # The default history in entries, for all hosts using this API key. + # The history in entries (for db alloc or ram), for all hosts using this API key. # You can also set it per host below. - # For the default db mode (dbengine), this is ignored. - #default history = 3600 + # For the default db (dbengine), this is ignored. + #retention = 3600 - # The default memory mode to be used for all hosts using this API key. + # The database to be used for all hosts using this API key. # You can also set it per host below. # If you don't set it here, the memory mode of netdata.conf will be used. # Valid modes: # ram keep it in RAM, don't touch the disk # none no database at all (use this on headless proxies) - # dbengine like a traditional database - #default memory mode = dbengine + # dbengine Netdata's high performance database + #db = dbengine # Shall we enable health monitoring for the hosts using this API key? # 3 possible values: @@ -150,18 +151,18 @@ # The default is taken from [health].enabled of netdata.conf #health enabled by default = auto - # postpone alarms for a short period after the sender is connected - default postpone alarms on connect seconds = 60 + # postpone alerts for a short period after the sender is connected + #postpone alerts on connect = 1m - # seconds of health log events to keep - #default health log history = 432000 + # the duration to maintain health log events + #health log retention = 5d # need to route metrics differently? set these. # the defaults are the ones at the [stream] section (above) - #default proxy enabled = yes | no - #default proxy destination = IP:PORT IP:PORT ... - #default proxy api key = API_KEY - #default proxy send charts matching = * + #proxy enabled = yes | no + #proxy destination = IP:PORT IP:PORT ... + #proxy api key = API_KEY + #proxy send charts matching = * # Stream Compression # By default it is enabled. @@ -176,13 +177,13 @@ #enable replication = yes # How many seconds to replicate from each child. Default: a day - #seconds to replicate = 86400 + #replication period = 1d # The duration we want to replicate per each step. - #seconds per replication step = 600 + #replication step = 10m # Indicate whether this child is an ephemeral node. An ephemeral node will become unavailable - # after the specified duration of "cleanup ephemeral hosts after secs" (as defined in the db section of netdata.conf) + # after the specified duration of "cleanup ephemeral hosts after" (as defined in the db section of netdata.conf) # from the time of the node's last connection. #is ephemeral node = no @@ -217,23 +218,23 @@ # The metrics are received via the API port, so the same IPs # should also be matched at netdata.conf [web].allow connections from # and at stream.conf [API_KEY].allow from - allow from = * + #allow from = * # The number of entries in the database. - # This is ignored for db mode dbengine. - #history = 3600 + # This is ignored for db dbengine. + #retention = 3600 # The memory mode of the database: ram | none | dbengine - #memory mode = dbengine + #db = dbengine # Health / alarms control: yes | no | auto #health enabled = auto - # postpone alarms when the sender connects - postpone alarms on connect seconds = 60 + # postpone alerts when the sender connects + #postpone alerts on connect = 1m - # seconds of health log events to keep - #health log history = 432000 + # the duration to maintain health log events + #health log retention = 5d # need to route metrics differently? # the defaults are the ones at the [API KEY] section @@ -252,12 +253,12 @@ #enable replication = yes # How many seconds to replicate from each child. - #seconds to replicate = 86400 + #replication period = 1d # The duration we want to replicate per each step. - #seconds per replication step = 600 + #replication step = 10m # Indicate whether this child is an ephemeral node. An ephemeral node will become unavailable - # after the specified duration of "cleanup ephemeral hosts after secs" (as defined in the db section of netdata.conf) + # after the specified duration of "cleanup ephemeral hosts after" (as defined in the db section of netdata.conf) # from the time of the node's last connection. #is ephemeral node = no diff --git a/src/web/api/badges/README.md b/src/web/api/badges/README.md deleted file mode 100644 index 1388fe0c3..000000000 --- a/src/web/api/badges/README.md +++ /dev/null @@ -1,369 +0,0 @@ - - -# Netdata badges - -**Badges are cool!** - -Netdata can generate badges for any chart and any dimension at any time-frame. Badges come in `SVG` and can be added to any web page using an `` HTML tag. - -**Netdata badges are powerful**! - -Given that Netdata collects from **1.000** to **5.000** metrics per server (depending on the number of network interfaces, disks, cpu cores, applications running, users logged in, containers running, etc) and that Netdata already has data reduction/aggregation functions embedded, the badges can be quite powerful. - -For each metric/dimension and for arbitrary time-frames badges can show **min**, **max** or **average** value, but also **sum** or **incremental-sum** to have their **volume**. - -For example, there is [a chart in Netdata that shows the current requests/s of nginx](http://london.my-netdata.io/#nginx_local_nginx). Using this chart alone we can show the following badges (we could add more time-frames, like **today**, **yesterday**, etc): - -
    - -Similarly, there is [a chart that shows outbound bandwidth per class](http://london.my-netdata.io/#tc_eth0), using QoS data. So it shows `kilobits/s` per class. Using this chart we can show: - - - -The right one is a **volume** calculation. Netdata calculated the total of the last 86.400 seconds (a day) which gives `kilobits`, then divided it by 8 to make it KB, then by 1024 to make it MB and then by 1024 to make it GB. Calculations like this are quite accurate, since for every value collected, every second, Netdata interpolates it to second boundary using microsecond calculations. - -Let's see a few more badge examples (they come from the [Netdata registry](/src/registry/README.md)): - -- **cpu usage of user `root`** (you can pick any user; 100% = 1 core). This will be `green <10%`, `yellow <20%`, `orange <50%`, `blue <100%` (1 core), `red` otherwise (you define thresholds and colors on the URL). - - - -- **mysql queries per second** - - - - niche ones: **mysql SELECT statements with JOIN, which did full table scans**: - - - ---- - -> So, every single line on the charts of a [Netdata dashboard](http://london.my-netdata.io/), can become a badge and this badge can calculate **average**, **min**, **max**, or **volume** for any time-frame! And you can also vary the badge color using conditions on the calculated value. - ---- - -## How to create badges - -The basic URL is `http://your.netdata:19999/api/v1/badge.svg?option1&option2&option3&...`. - -Here is what you can put for `options` (these are standard Netdata API options): - -- `chart=CHART.NAME` - - The chart to get the values from. - - **This is the only parameter required** and with just this parameter, Netdata will return the sum of the latest values of all chart dimensions. - - Example: - -```html - - - -``` - - Which produces this: - - - - - -- `alarm=NAME` - - Render the current value and status of an alert linked to the chart. This option can be ignored if the badge to be generated is not related to an alert. - - The current value of the alert will be rendered. The color of the badge will indicate the status of the alert. - - For alert badges, **both `chart` and `alarm` parameters are required**. - -- `dimensions=DIMENSION1|DIMENSION2|...` - - The dimensions of the chart to use. If you don't set any dimension, all will be used. When multiple dimensions are used, Netdata will sum their values. You can append `options=absolute` if you want this sum to convert all values to positive before adding them. - - Pipes in HTML have to escaped with `%7C`. - - Example: - -```html - - - -``` - - Which produces this: - - - - - -- `before=SECONDS` and `after=SECONDS` - - The timeframe. These can be absolute unix timestamps, or relative to now, number of seconds. By default `before=0` and `after=-1` (1 second in the past). - - To get the last minute set `after=-60`. This will give the average of the last complete minute (XX:XX:00 - XX:XX:59). - - To get the max of the last hour set `after=-3600&group=max`. This will give the maximum value of the last complete hour (XX:00:00 - XX:59:59) - - Example: - -```html - - - -``` - - Which produces the average of last complete minute (XX:XX:00 - XX:XX:59): - - - - - - While this is the previous minute (one minute before the last one, again aligned XX:XX:00 - XX:XX:59): - -```html - - - -``` - - It produces this: - - - - - -- `group=min` or `group=max` or `group=average` (the default) or `group=sum` or `group=incremental-sum` - - If Netdata will have to reduce (aggregate) the data to calculate the value, which aggregation method to use. - - - `max` will find the max value for the timeframe. This works on both positive and negative dimensions. It will find the most extreme value. - - - `min` will find the min value for the timeframe. This works on both positive and negative dimensions. It will find the number closest to zero. - - - `average` will calculate the average value for the timeframe. - - - `sum` will sum all the values for the timeframe. This is nice for finding the volume of dimensions for a timeframe. So if you have a dimension that reports `X per second`, you can find the volume of the dimension in a timeframe, by adding its values in that timeframe. - - - `incremental-sum` will sum the difference of each value to its next. Let's assume you have a dimension that does not measure the rate of something, but the absolute value of it. So it has values like this "1, 5, 3, 7, 4". `incremental-sum` will calculate the difference of adjacent values. In this example, they will be `(5 - 1) + (3 - 5) + (7 - 3) + (4 - 7) = 3` (which is equal to the last value minus the first = 4 - 1). - -- `options=opt1|opt2|opt3|...` - - These fine tune various options of the API. Here is what you can use for badges (the API has more option, but only these are useful for badges): - - - `percentage`, instead of returning a value, calculate the percentage of the sum of the values of the selected dimensions (selected sum / total sum * 100). This also sets the units to `%`. - - - `absolute` or `abs`, turn all values positive and then sum them. - - - `display_absolute` or `display-absolute`, to use the signed value during color calculation, but display the absolute value on the badge. - - - `min2max`, when multiple dimensions are given, do not sum them, but take their `max - min`. - - - `unaligned`, when data are reduced / aggregated (e.g. the request is about the average of the last minute, or hour), Netdata by default aligns them so that the charts will have a constant shape (so average per minute returns always XX:XX:00 - XX:XX:59). Setting the `unaligned` option, Netdata will aggregate data without any alignment, so if the request is for 60 seconds, it will aggregate the latest 60 seconds of collected data. - -These are options dedicated to badges: - -- `label=TEXT` - - The label of the badge. - -- `units=TEXT` - - The units of the badge. If you want to put a `/`, please put a `\`. This is because Netdata allows badges parameters to be given as path in URL, instead of query string. You can also use `null` or `empty` to show it without any units. - - The units `seconds`, `minutes` and `hours` trigger special formatting. The value has to be in this unit, and Netdata will automatically change it to show a more pretty duration. - -- `multiply=NUMBER` - - Multiply the value with this number. The default is `1`. - -- `divide=NUMBER` - - Divide the value with this number. The default is `1`. - -- Color customization parameters - - The following parameters specify colors of each individual part of the badge. Each parameter is documented in detail - below. - - | Area of badge | Background color parameter | Text color parameter | - | ---: | :------------------------: | :------------------: | - | Label (left) part | `label_color` | `text_color_lbl` | - | Value (right) part | `value_color` | `text_color_val` | - - - `label_color=COLOR` - - The color of the label (the left part). You can use any HTML color in `RGB` or `RRGGBB` hex notation (without - the `#` character at the beginning). Additionally, you can use one of the following predefined colors (and you - can use them by their name): - - - `green` - - `brightgreen` - - `yellow` - - `yellowgreen` - - `orange` - - `red` - - `blue` - - `grey` - - `gray` - - `lightgrey` - - `lightgray` - - These colors are taken from , which makes them compatible with standard - badges. - - - `value_color=COLOR:null|COLORVALUE|COLOR>=VALUE|COLOR<=VALUE|...` - - You can add a pipe delimited list of conditions to pick the value color. The first matching (left to right) will - be used. - - Example: `value_color=grey:null|green<10|yellow<100|orange<1000|blue<10000|red` - - The above will set `grey` if no value exists (not collected within the `gap when lost iterations above` in - `netdata.conf` for the chart), `green` if the value is less than 10, `yellow` if the value is less than 100, and - so on. Netdata will use `red` if no other conditions match. Only integers are supported as values. - - The supported operators are `<`, `>`, `<=`, `>=`, `=` (or `:`), and `!=` (or `<>`). - - You can also use the same syntax as the `label_color` parameter to define each of these colors. You can - reference a predefined color by name or `RGB`/`RRGGBB` hex notation. - - - `text_color_lbl=RGB` or `text_color_lbl=RRGGBB` or `text_color_lbl=color_by_name` - - This value specifies the font color for the font of left/label side of the badge. The syntax is the same as the - `label_color` parameter. If not given, or given with an empty value, Netdata will use the default color. - - - `text_color_val=RGB` or `text_color_val=RRGGBB` or `text_color_lbl=color_by_name` - - This value specifies the font color for the font of right/value side of the badge. The syntax is the same as the - `label_color` parameter. If not given, or given with an empty value, Netdata will use the default color. - -- `precision=NUMBER` - - The number of decimal digits of the value. By default Netdata will add: - - - no decimal digits for values > 1000 - - 1 decimal digit for values > 100 - - 2 decimal digits for values > 1 - - 3 decimal digits for values > 0.1 - - 4 decimal digits for values \<= 0.1 - - Using the `precision=NUMBER` you can set your preference per badge. - -- `scale=XXX` - - This option scales the svg image. It accepts values above or equal to 100 (100% is the default scale). For example, lets get a few different sizes: - - original
    - `scale=125`
    - `scale=150`
    - `scale=175`
    - `scale=200` - -- `fixed_width_lbl=NUMBER` and `fixed_width_val=NUMBER` - - This parameter overrides auto-sizing of badges and displays them at fixed widths. `fixed_width_lbl` determines the size of the label's left side (label/name). `fixed_width_val` determines the size of the label's right side (value). You must set both parameters together, or they will be ignored. - - You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. - - The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`. - -- `refresh=auto` or `refresh=SECONDS` - - This option enables auto-refreshing of images. Netdata will send the HTTP header `Refresh: SECONDS` to the web browser, thus requesting automatic refresh of the images at regular intervals. - - `auto` will calculate the proper `SECONDS` to avoid unnecessary refreshes. If `SECONDS` is zero, this feature is disabled (it is also disabled by default). - - Auto-refreshing like this, works only if you access the badge directly. So, you may have to put it an `embed` or `iframe` for it to be auto-refreshed. Use something like this: - -```html - -``` - - Another way is to use javascript to auto-refresh them. You can auto-refresh all the Netdata badges on a page using javascript. You have to add a class to all the Netdata badges, like this ``. Then add this javascript code to your page (it requires jquery): - -```html - -``` - -A more advanced badges refresh method is to include `http://your.netdata.ip:19999/refresh-badges.js` in your page. - ---- - -## Escaping URLs - -Keep in mind that if you add badge URLs to your HTML pages you have to escape the special characters: - -|character|name|escape sequence| -|:-------:|:--:|:-------------:| -|``|space (in labels and units)|`%20`| -|`#`|hash (for colors)|`%23`| -|`%`|percent (in units)|`%25`| -|`<`|less than|`%3C`| -|`>`|greater than|`%3E`| -|`\`|backslash (when you need a `/`)|`%5C`| -|`\|`|pipe (delimiting parameters)|`%7C`| - -## FAQ - -#### Is it fast? - -On modern hardware, Netdata can generate about **2.000 badges per second per core**, before noticing any delays. It generates a badge in about half a millisecond! - -Of course these timing are for badges that use recent data. If you need badges that do calculations over long durations (a day, or more), timing will differ. Netdata logs its timings at its `access.log`, so take a look there before adding a heavy badge on a busy web site. Of course, you can cache such badges or have a cron job get them from Netdata and save them at your web server at regular intervals. - -#### Embedding badges in GitHub - -You have 2 options: -- SVG images with markdown -- SVG images with HTML (directly in .md files) - -For example, this is the cpu badge shown above: - -- Markdown example: - -```md -[![A nice name](https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25)](https://registry.my-netdata.io/#apps_cpu) -``` - -- HTML example: - -```html - - - -``` - -Both produce this: - - - - - -#### Auto-refreshing badges in GitHub - -Unfortunately it cannot be done. GitHub fetches all the images using a proxy and rewrites all the URLs to be served by the proxy. - -You can refresh them from your browser console though. Press F12 to open the web browser console (switch to the console too), paste the following and press enter. They will refresh: - -```js -var len = document.images.length; while(len--) { document.images[len].src = document.images[len].src.replace(/\?cacheBuster=\d*/, "") + "?cacheBuster=" + new Date().getTime().toString(); }; -``` - - diff --git a/src/web/api/badges/web_buffer_svg.c b/src/web/api/badges/web_buffer_svg.c deleted file mode 100644 index 747c46d5e..000000000 --- a/src/web/api/badges/web_buffer_svg.c +++ /dev/null @@ -1,1159 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "web_buffer_svg.h" - -#define BADGE_HORIZONTAL_PADDING 4 -#define VERDANA_KERNING 0.2 -#define VERDANA_PADDING 1.0 - -/* - * verdana11_widths[] has been generated with this method: - * https://github.com/badges/shields/blob/master/measure-text.js -*/ - -static double verdana11_widths[128] = { - [0] = 0.0, - [1] = 0.0, - [2] = 0.0, - [3] = 0.0, - [4] = 0.0, - [5] = 0.0, - [6] = 0.0, - [7] = 0.0, - [8] = 0.0, - [9] = 0.0, - [10] = 0.0, - [11] = 0.0, - [12] = 0.0, - [13] = 0.0, - [14] = 0.0, - [15] = 0.0, - [16] = 0.0, - [17] = 0.0, - [18] = 0.0, - [19] = 0.0, - [20] = 0.0, - [21] = 0.0, - [22] = 0.0, - [23] = 0.0, - [24] = 0.0, - [25] = 0.0, - [26] = 0.0, - [27] = 0.0, - [28] = 0.0, - [29] = 0.0, - [30] = 0.0, - [31] = 0.0, - [32] = 3.8671874999999996, // - [33] = 4.3291015625, // ! - [34] = 5.048828125, // " - [35] = 9.001953125, // # - [36] = 6.9931640625, // $ - [37] = 11.837890625, // % - [38] = 7.992187499999999, // & - [39] = 2.9541015625, // ' - [40] = 4.9951171875, // ( - [41] = 4.9951171875, // ) - [42] = 6.9931640625, // * - [43] = 9.001953125, // + - [44] = 4.00146484375, // , - [45] = 4.9951171875, // - - [46] = 4.00146484375, // . - [47] = 4.9951171875, // / - [48] = 6.9931640625, // 0 - [49] = 6.9931640625, // 1 - [50] = 6.9931640625, // 2 - [51] = 6.9931640625, // 3 - [52] = 6.9931640625, // 4 - [53] = 6.9931640625, // 5 - [54] = 6.9931640625, // 6 - [55] = 6.9931640625, // 7 - [56] = 6.9931640625, // 8 - [57] = 6.9931640625, // 9 - [58] = 4.9951171875, // : - [59] = 4.9951171875, // ; - [60] = 9.001953125, // < - [61] = 9.001953125, // = - [62] = 9.001953125, // > - [63] = 5.99951171875, // ? - [64] = 11.0, // @ - [65] = 7.51953125, // A - [66] = 7.541015625, // B - [67] = 7.680664062499999, // C - [68] = 8.4755859375, // D - [69] = 6.95556640625, // E - [70] = 6.32177734375, // F - [71] = 8.529296875, // G - [72] = 8.26611328125, // H - [73] = 4.6298828125, // I - [74] = 5.00048828125, // J - [75] = 7.62158203125, // K - [76] = 6.123046875, // L - [77] = 9.2705078125, // M - [78] = 8.228515625, // N - [79] = 8.658203125, // O - [80] = 6.63330078125, // P - [81] = 8.658203125, // Q - [82] = 7.6484375, // R - [83] = 7.51953125, // S - [84] = 6.7783203125, // T - [85] = 8.05126953125, // U - [86] = 7.51953125, // V - [87] = 10.87646484375, // W - [88] = 7.53564453125, // X - [89] = 6.767578125, // Y - [90] = 7.53564453125, // Z - [91] = 4.9951171875, // [ - [92] = 4.9951171875, // backslash - [93] = 4.9951171875, // ] - [94] = 9.001953125, // ^ - [95] = 6.9931640625, // _ - [96] = 6.9931640625, // ` - [97] = 6.6064453125, // a - [98] = 6.853515625, // b - [99] = 5.73095703125, // c - [100] = 6.853515625, // d - [101] = 6.552734375, // e - [102] = 3.8671874999999996, // f - [103] = 6.853515625, // g - [104] = 6.9609375, // h - [105] = 3.0185546875, // i - [106] = 3.78662109375, // j - [107] = 6.509765625, // k - [108] = 3.0185546875, // l - [109] = 10.69921875, // m - [110] = 6.9609375, // n - [111] = 6.67626953125, // o - [112] = 6.853515625, // p - [113] = 6.853515625, // q - [114] = 4.6943359375, // r - [115] = 5.73095703125, // s - [116] = 4.33447265625, // t - [117] = 6.9609375, // u - [118] = 6.509765625, // v - [119] = 9.001953125, // w - [120] = 6.509765625, // x - [121] = 6.509765625, // y - [122] = 5.779296875, // z - [123] = 6.982421875, // { - [124] = 4.9951171875, // | - [125] = 6.982421875, // } - [126] = 9.001953125, // ~ - [127] = 0.0 -}; - -// find the width of the string using the verdana 11points font -static inline double verdana11_width(const char *s, float em_size) { - double w = 0.0; - - while(*s) { - // if UTF8 multibyte char found and guess it's width equal 1em - // as label width will be updated with JavaScript this is not so important - - // TODO: maybe move UTF8 functions from url.c to separate util in libnetdata - // then use url_utf8_get_byte_length etc. - if(IS_UTF8_STARTBYTE(*s)) { - s++; - while(IS_UTF8_BYTE(*s) && !IS_UTF8_STARTBYTE(*s)){ - s++; - } - w += em_size; - } - else { - if(likely(!(*s & 0x80))){ // Byte 1XXX XXXX is not valid in UTF8 - double t = verdana11_widths[(unsigned char)*s]; - if(t != 0.0) - w += t + VERDANA_KERNING; - } - s++; - } - } - - w -= VERDANA_KERNING; - w += VERDANA_PADDING; - return w; -} - -static inline size_t escape_xmlz(char *dst, const char *src, size_t len) { - size_t i = len; - - // required escapes from - // https://github.com/badges/shields/blob/master/badge.js - while(*src && i) { - switch(*src) { - case '\\': - *dst++ = '/'; - src++; - i--; - break; - - case '&': - if(i > 5) { - strcpy(dst, "&"); - i -= 5; - dst += 5; - src++; - } - else goto cleanup; - break; - - case '<': - if(i > 4) { - strcpy(dst, "<"); - i -= 4; - dst += 4; - src++; - } - else goto cleanup; - break; - - case '>': - if(i > 4) { - strcpy(dst, ">"); - i -= 4; - dst += 4; - src++; - } - else goto cleanup; - break; - - case '"': - if(i > 6) { - strcpy(dst, """); - i -= 6; - dst += 6; - src++; - } - else goto cleanup; - break; - - case '\'': - if(i > 6) { - strcpy(dst, "'"); - i -= 6; - dst += 6; - src++; - } - else goto cleanup; - break; - - default: - i--; - *dst++ = *src++; - break; - } - } - -cleanup: - *dst = '\0'; - return len - i; -} - -static inline char *format_value_with_precision_and_unit(char *value_string, size_t value_string_len, - NETDATA_DOUBLE value, const char *units, int precision) { - if(unlikely(isnan(value) || isinf(value))) - value = 0.0; - - char *separator = ""; - if(unlikely(isalnum((uint8_t)*units))) - separator = " "; - - if(precision < 0) { - int len, lstop = 0, trim_zeros = 1; - - NETDATA_DOUBLE abs = value; - if(isless(value, 0)) { - lstop = 1; - abs = fabsndd(value); - } - - if(isgreaterequal(abs, 1000)) { - len = snprintfz(value_string, value_string_len, "%0.0" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); - trim_zeros = 0; - } - else if(isgreaterequal(abs, 10)) len = snprintfz(value_string, value_string_len, "%0.1" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); - else if(isgreaterequal(abs, 1)) len = snprintfz(value_string, value_string_len, "%0.2" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); - else if(isgreaterequal(abs, 0.1)) len = snprintfz(value_string, value_string_len, "%0.2" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); - else if(isgreaterequal(abs, 0.01)) len = snprintfz(value_string, value_string_len, "%0.4" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); - else if(isgreaterequal(abs, 0.001)) len = snprintfz(value_string, value_string_len, "%0.5" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); - else if(isgreaterequal(abs, 0.0001)) len = snprintfz(value_string, value_string_len, "%0.6" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); - else len = snprintfz(value_string, value_string_len, "%0.7" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); - - if(unlikely(trim_zeros)) { - int l; - // remove trailing zeros from the decimal part - for(l = len - 1; l > lstop; l--) { - if(likely(value_string[l] == '0')) { - value_string[l] = '\0'; - len--; - } - - else if(unlikely(value_string[l] == '.')) { - value_string[l] = '\0'; - len--; - break; - } - - else - break; - } - } - - if(unlikely(len <= 0)) len = 1; - snprintfz(&value_string[len], value_string_len - len, "%s%s", separator, units); - } - else { - if(precision > 50) precision = 50; - snprintfz(value_string, value_string_len, "%0.*" NETDATA_DOUBLE_MODIFIER "%s%s", precision, (NETDATA_DOUBLE) value, separator, units); - } - - return value_string; -} - -typedef enum badge_units_format { - UNITS_FORMAT_NONE, - UNITS_FORMAT_SECONDS, - UNITS_FORMAT_SECONDS_AGO, - UNITS_FORMAT_MINUTES, - UNITS_FORMAT_MINUTES_AGO, - UNITS_FORMAT_HOURS, - UNITS_FORMAT_HOURS_AGO, - UNITS_FORMAT_ONOFF, - UNITS_FORMAT_UPDOWN, - UNITS_FORMAT_OKERROR, - UNITS_FORMAT_OKFAILED, - UNITS_FORMAT_EMPTY, - UNITS_FORMAT_PERCENT -} UNITS_FORMAT; - - -static struct units_formatter { - const char *units; - uint32_t hash; - UNITS_FORMAT format; -} badge_units_formatters[] = { - { "seconds", 0, UNITS_FORMAT_SECONDS }, - { "seconds ago", 0, UNITS_FORMAT_SECONDS_AGO }, - { "minutes", 0, UNITS_FORMAT_MINUTES }, - { "minutes ago", 0, UNITS_FORMAT_MINUTES_AGO }, - { "hours", 0, UNITS_FORMAT_HOURS }, - { "hours ago", 0, UNITS_FORMAT_HOURS_AGO }, - { "on/off", 0, UNITS_FORMAT_ONOFF }, - { "on-off", 0, UNITS_FORMAT_ONOFF }, - { "onoff", 0, UNITS_FORMAT_ONOFF }, - { "up/down", 0, UNITS_FORMAT_UPDOWN }, - { "up-down", 0, UNITS_FORMAT_UPDOWN }, - { "updown", 0, UNITS_FORMAT_UPDOWN }, - { "ok/error", 0, UNITS_FORMAT_OKERROR }, - { "ok-error", 0, UNITS_FORMAT_OKERROR }, - { "okerror", 0, UNITS_FORMAT_OKERROR }, - { "ok/failed", 0, UNITS_FORMAT_OKFAILED }, - { "ok-failed", 0, UNITS_FORMAT_OKFAILED }, - { "okfailed", 0, UNITS_FORMAT_OKFAILED }, - { "empty", 0, UNITS_FORMAT_EMPTY }, - { "null", 0, UNITS_FORMAT_EMPTY }, - { "percentage", 0, UNITS_FORMAT_PERCENT }, - { "percent", 0, UNITS_FORMAT_PERCENT }, - { "pcent", 0, UNITS_FORMAT_PERCENT }, - - // terminator - { NULL, 0, UNITS_FORMAT_NONE } -}; - -inline char *format_value_and_unit(char *value_string, size_t value_string_len, - NETDATA_DOUBLE value, const char *units, int precision) { - static int max = -1; - int i; - - if(unlikely(max == -1)) { - for(i = 0; badge_units_formatters[i].units; i++) - badge_units_formatters[i].hash = simple_hash(badge_units_formatters[i].units); - - max = i; - } - - if(unlikely(!units)) units = ""; - uint32_t hash_units = simple_hash(units); - - UNITS_FORMAT format = UNITS_FORMAT_NONE; - for(i = 0; i < max; i++) { - struct units_formatter *ptr = &badge_units_formatters[i]; - - if(hash_units == ptr->hash && !strcmp(units, ptr->units)) { - format = ptr->format; - break; - } - } - - if(unlikely(format == UNITS_FORMAT_SECONDS || format == UNITS_FORMAT_SECONDS_AGO)) { - if(value == 0.0) { - snprintfz(value_string, value_string_len, "%s", "now"); - return value_string; - } - else if(isnan(value) || isinf(value)) { - snprintfz(value_string, value_string_len, "%s", "undefined"); - return value_string; - } - - const char *suffix = (format == UNITS_FORMAT_SECONDS_AGO)?" ago":""; - - size_t s = (size_t)value; - size_t d = s / 86400; - s = s % 86400; - - size_t h = s / 3600; - s = s % 3600; - - size_t m = s / 60; - s = s % 60; - - if(d) - snprintfz(value_string, value_string_len, "%zu %s %02zu:%02zu:%02zu%s", d, (d == 1)?"day":"days", h, m, s, suffix); - else - snprintfz(value_string, value_string_len, "%02zu:%02zu:%02zu%s", h, m, s, suffix); - - return value_string; - } - - else if(unlikely(format == UNITS_FORMAT_MINUTES || format == UNITS_FORMAT_MINUTES_AGO)) { - if(value == 0.0) { - snprintfz(value_string, value_string_len, "%s", "now"); - return value_string; - } - else if(isnan(value) || isinf(value)) { - snprintfz(value_string, value_string_len, "%s", "undefined"); - return value_string; - } - - const char *suffix = (format == UNITS_FORMAT_MINUTES_AGO)?" ago":""; - - size_t m = (size_t)value; - size_t d = m / (60 * 24); - m = m % (60 * 24); - - size_t h = m / 60; - m = m % 60; - - if(d) - snprintfz(value_string, value_string_len, "%zud %02zuh %02zum%s", d, h, m, suffix); - else - snprintfz(value_string, value_string_len, "%zuh %zum%s", h, m, suffix); - - return value_string; - } - - else if(unlikely(format == UNITS_FORMAT_HOURS || format == UNITS_FORMAT_HOURS_AGO)) { - if(value == 0.0) { - snprintfz(value_string, value_string_len, "%s", "now"); - return value_string; - } - else if(isnan(value) || isinf(value)) { - snprintfz(value_string, value_string_len, "%s", "undefined"); - return value_string; - } - - const char *suffix = (format == UNITS_FORMAT_HOURS_AGO)?" ago":""; - - size_t h = (size_t)value; - size_t d = h / 24; - h = h % 24; - - if(d) - snprintfz(value_string, value_string_len, "%zud %zuh%s", d, h, suffix); - else - snprintfz(value_string, value_string_len, "%zuh%s", h, suffix); - - return value_string; - } - - else if(unlikely(format == UNITS_FORMAT_ONOFF)) { - snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"on":"off"); - return value_string; - } - - else if(unlikely(format == UNITS_FORMAT_UPDOWN)) { - snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"up":"down"); - return value_string; - } - - else if(unlikely(format == UNITS_FORMAT_OKERROR)) { - snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"error"); - return value_string; - } - - else if(unlikely(format == UNITS_FORMAT_OKFAILED)) { - snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"failed"); - return value_string; - } - - else if(unlikely(format == UNITS_FORMAT_EMPTY)) - units = ""; - - else if(unlikely(format == UNITS_FORMAT_PERCENT)) - units = "%"; - - if(unlikely(isnan(value) || isinf(value))) { - strcpy(value_string, "-"); - return value_string; - } - - return format_value_with_precision_and_unit(value_string, value_string_len, value, units, precision); -} - -static struct badge_color { - const char *name; - uint32_t hash; - const char *color; -} badge_colors[] = { - - // colors from: - // https://github.com/badges/shields/blob/master/colorscheme.json - - { "brightgreen", 0, "4c1" }, - { "green", 0, "97CA00" }, - { "yellow", 0, "dfb317" }, - { "yellowgreen", 0, "a4a61d" }, - { "orange", 0, "fe7d37" }, - { "red", 0, "e05d44" }, - { "blue", 0, "007ec6" }, - { "grey", 0, "555" }, - { "gray", 0, "555" }, - { "lightgrey", 0, "9f9f9f" }, - { "lightgray", 0, "9f9f9f" }, - - // terminator - { NULL, 0, NULL } -}; - -static inline const char *color_map(const char *color, const char *def) { - static int max = -1; - int i; - - if(unlikely(max == -1)) { - for(i = 0; badge_colors[i].name ;i++) - badge_colors[i].hash = simple_hash(badge_colors[i].name); - - max = i; - } - - uint32_t hash = simple_hash(color); - - for(i = 0; i < max; i++) { - struct badge_color *ptr = &badge_colors[i]; - - if(hash == ptr->hash && !strcmp(color, ptr->name)) - return ptr->color; - } - - return def; -} - -typedef enum color_comparison { - COLOR_COMPARE_EQUAL, - COLOR_COMPARE_NOTEQUAL, - COLOR_COMPARE_LESS, - COLOR_COMPARE_LESSEQUAL, - COLOR_COMPARE_GREATER, - COLOR_COMPARE_GREATEREQUAL, -} BADGE_COLOR_COMPARISON; - -static inline void calc_colorz(const char *color, char *final, size_t len, NETDATA_DOUBLE value) { - if(isnan(value) || isinf(value)) - value = NAN; - - char color_buffer[256 + 1] = ""; - char value_buffer[256 + 1] = ""; - BADGE_COLOR_COMPARISON comparison = COLOR_COMPARE_GREATER; - - // example input: - // colormin|color:null... - - const char *c = color; - while(*c) { - char *dc = color_buffer, *dv = NULL; - size_t ci = 0, vi = 0; - - const char *t = c; - - while(*t && *t != '|') { - switch(*t) { - case '!': - if(t[1] == '=') t++; - comparison = COLOR_COMPARE_NOTEQUAL; - dv = value_buffer; - break; - - case '=': - case ':': - comparison = COLOR_COMPARE_EQUAL; - dv = value_buffer; - break; - - case '}': - case ')': - case '>': - if(t[1] == '=') { - comparison = COLOR_COMPARE_GREATEREQUAL; - t++; - } - else - comparison = COLOR_COMPARE_GREATER; - dv = value_buffer; - break; - - case '{': - case '(': - case '<': - if(t[1] == '=') { - comparison = COLOR_COMPARE_LESSEQUAL; - t++; - } - else if(t[1] == '>' || t[1] == ')' || t[1] == '}') { - comparison = COLOR_COMPARE_NOTEQUAL; - t++; - } - else - comparison = COLOR_COMPARE_LESS; - dv = value_buffer; - break; - - default: - if(dv) { - if(vi < 256) { - vi++; - *dv++ = *t; - } - } - else { - if(ci < 256) { - ci++; - *dc++ = *t; - } - } - break; - } - - t++; - } - - // prepare for next iteration - if(*t == '|') t++; - c = t; - - // do the math - *dc = '\0'; - if(dv) { - *dv = '\0'; - NETDATA_DOUBLE v; - - if(!*value_buffer || !strcmp(value_buffer, "null")) { - v = NAN; - } - else { - v = str2l(value_buffer); - if(isnan(v) || isinf(v)) - v = NAN; - } - - if(unlikely(isnan(value) || isnan(v))) { - if(isnan(value) && isnan(v)) - break; - } - else { - if (unlikely(comparison == COLOR_COMPARE_LESS && isless(value, v))) break; - else if (unlikely(comparison == COLOR_COMPARE_LESSEQUAL && islessequal(value, v))) break; - else if (unlikely(comparison == COLOR_COMPARE_GREATER && isgreater(value, v))) break; - else if (unlikely(comparison == COLOR_COMPARE_GREATEREQUAL && isgreaterequal(value, v))) break; - else if (unlikely(comparison == COLOR_COMPARE_EQUAL && !islessgreater(value, v))) break; - else if (unlikely(comparison == COLOR_COMPARE_NOTEQUAL && islessgreater(value, v))) break; - } - } - else - break; - } - - const char *b; - if(color_buffer[0]) - b = color_buffer; - else - b = color; - - strncpyz(final, b, len); -} - -// value + units -#define VALUE_STRING_SIZE 100 - -// label -#define LABEL_STRING_SIZE 200 - -// colors -#define COLOR_STRING_SIZE 100 - -static inline int allowed_hexa_char(char x) { - return ( (x >= '0' && x <= '9') || - (x >= 'a' && x <= 'f') || - (x >= 'A' && x <= 'F') - ); -} - -static int html_color_check(const char *str) { - int i = 0; - while(str[i]) { - if(!allowed_hexa_char(str[i])) - return 0; - if(unlikely(i >= 6)) - return 0; - i++; - } - // want to allow either RGB or RRGGBB - return ( i == 6 || i == 3 ); -} - -// Will parse color arg as #RRGGBB or #RGB or one of the colors -// from color_map hash table -// if parsing fails (argument error) it will return default color -// given as default parameter (def) -// in any case it will return either color in "RRGGBB" or "RGB" format as string -// or whatever is given as def (without checking - caller responsible to give sensible -// safely escaped default) as default if it fails -// in any case this function must always return something we can put directly in XML -// so no escaping is necessary anymore (with exception of default where caller is responsible) -// to give sensible default -#define BADGE_SVG_COLOR_ARG_MAXLEN 20 - -static const char *parse_color_argument(const char *arg, const char *def) -{ - if( !arg ) - return def; - size_t len = strnlen(arg, BADGE_SVG_COLOR_ARG_MAXLEN); - if( len < 2 || len >= BADGE_SVG_COLOR_ARG_MAXLEN ) - return def; - if( html_color_check(arg) ) - return arg; - return color_map(arg, def); -} - -void buffer_svg(BUFFER *wb, const char *label, - NETDATA_DOUBLE value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val) { - char value_color_buffer[COLOR_STRING_SIZE + 1] - , value_string[VALUE_STRING_SIZE + 1] - , label_escaped[LABEL_STRING_SIZE + 1] - , value_escaped[VALUE_STRING_SIZE + 1]; - - const char *label_color_parsed; - const char *value_color_parsed; - - double label_width = (double)fixed_width_lbl, value_width = (double)fixed_width_val, total_width; - double height = 20.0, font_size = 11.0, text_offset = 5.8, round_corner = 3.0; - - if(scale < 100) scale = 100; - - if(unlikely(!value_color || !*value_color)) - value_color = (isnan(value) || isinf(value))?"999":"4c1"; - - calc_colorz(value_color, value_color_buffer, COLOR_STRING_SIZE, value); - format_value_and_unit(value_string, VALUE_STRING_SIZE, (options & RRDR_OPTION_DISPLAY_ABS)? fabsndd(value):value, units, precision); - - if(fixed_width_lbl <= 0 || fixed_width_val <= 0) { - label_width = verdana11_width(label, font_size) + (BADGE_HORIZONTAL_PADDING * 2); - value_width = verdana11_width(value_string, font_size) + (BADGE_HORIZONTAL_PADDING * 2); - } - total_width = label_width + value_width; - - escape_xmlz(label_escaped, label, LABEL_STRING_SIZE); - escape_xmlz(value_escaped, value_string, VALUE_STRING_SIZE); - - label_color_parsed = parse_color_argument(label_color, "555"); - value_color_parsed = parse_color_argument(value_color_buffer, "555"); - - wb->content_type = CT_IMAGE_SVG_XML; - - total_width = total_width * scale / 100.0; - height = height * scale / 100.0; - font_size = font_size * scale / 100.0; - text_offset = text_offset * scale / 100.0; - label_width = label_width * scale / 100.0; - value_width = value_width * scale / 100.0; - round_corner = round_corner * scale / 100.0; - - // svg template from: - // https://raw.githubusercontent.com/badges/shields/master/templates/flat-template.svg - buffer_sprintf(wb, - "" - "" - "" - "" - "" - "" - "" - "" - "" - "", - total_width, height, - total_width, height, round_corner, - label_width, height, label_color_parsed); // 0 && fixed_width_val > 0) { - buffer_sprintf(wb, - "" - "" - "", - label_width, height); // ", - label_width, value_width, height, value_color_parsed); - - if(fixed_width_lbl > 0 && fixed_width_val > 0) { - buffer_sprintf(wb, - "" - "" - "", - label_width, value_width, height); - } - - buffer_sprintf(wb, - "" - "" - "" - "%s" - "%s" - "%s" - "%s" - "", - total_width, height, - font_size, - label_width / 2, ceil(height - text_offset), label_escaped, - label_width / 2, ceil(height - text_offset - 1.0), parse_color_argument(text_color_lbl, "fff"), label_escaped, - label_width + value_width / 2 -1, ceil(height - text_offset), value_escaped, - label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), parse_color_argument(text_color_val, "fff"), value_escaped); - - if(fixed_width_lbl <= 0 || fixed_width_val <= 0){ - buffer_sprintf(wb, - "", - BADGE_HORIZONTAL_PADDING); - } - buffer_sprintf(wb, ""); -} - -#define BADGE_URL_ARG_LBL_COLOR "text_color_lbl" -#define BADGE_URL_ARG_VAL_COLOR "text_color_val" - -int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url) { - int ret = HTTP_RESP_BAD_REQUEST; - buffer_flush(w->response.data); - - BUFFER *dimensions = NULL; - - const char *chart = NULL - , *before_str = NULL - , *after_str = NULL - , *points_str = NULL - , *multiply_str = NULL - , *divide_str = NULL - , *label = NULL - , *units = NULL - , *label_color = NULL - , *value_color = NULL - , *refresh_str = NULL - , *precision_str = NULL - , *scale_str = NULL - , *alarm = NULL - , *fixed_width_lbl_str = NULL - , *fixed_width_val_str = NULL - , *text_color_lbl_str = NULL - , *text_color_val_str = NULL - , *group_options = NULL; - - int group = RRDR_GROUPING_AVERAGE; - uint32_t options = 0x00000000; - - const RRDCALC_ACQUIRED *rca = NULL; - RRDCALC *rc = NULL; - RRDSET *st = NULL; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 badge.svg query param '%s' with value '%s'", w->id, name, value); - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "chart")) chart = value; - else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { - if(!dimensions) - dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); - - buffer_strcat(dimensions, "|"); - buffer_strcat(dimensions, value); - } - else if(!strcmp(name, "after")) after_str = value; - else if(!strcmp(name, "before")) before_str = value; - else if(!strcmp(name, "points")) points_str = value; - else if(!strcmp(name, "group_options")) group_options = value; - else if(!strcmp(name, "group")) { - group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); - } - else if(!strcmp(name, "options")) { - options |= rrdr_options_parse(value); - } - else if(!strcmp(name, "label")) label = value; - else if(!strcmp(name, "units")) units = value; - else if(!strcmp(name, "label_color")) label_color = value; - else if(!strcmp(name, "value_color")) value_color = value; - else if(!strcmp(name, "multiply")) multiply_str = value; - else if(!strcmp(name, "divide")) divide_str = value; - else if(!strcmp(name, "refresh")) refresh_str = value; - else if(!strcmp(name, "precision")) precision_str = value; - else if(!strcmp(name, "scale")) scale_str = value; - else if(!strcmp(name, "fixed_width_lbl")) fixed_width_lbl_str = value; - else if(!strcmp(name, "fixed_width_val")) fixed_width_val_str = value; - else if(!strcmp(name, "alarm")) alarm = value; - else if(!strcmp(name, BADGE_URL_ARG_LBL_COLOR)) text_color_lbl_str = value; - else if(!strcmp(name, BADGE_URL_ARG_VAL_COLOR)) text_color_val_str = value; - } - - int fixed_width_lbl = -1; - int fixed_width_val = -1; - - if(fixed_width_lbl_str && *fixed_width_lbl_str - && fixed_width_val_str && *fixed_width_val_str) { - fixed_width_lbl = str2i(fixed_width_lbl_str); - fixed_width_val = str2i(fixed_width_val_str); - } - - if(!chart || !*chart) { - buffer_no_cacheable(w->response.data); - buffer_sprintf(w->response.data, "No chart id is given at the request."); - goto cleanup; - } - - int scale = (scale_str && *scale_str)?str2i(scale_str):100; - - st = rrdset_find(host, chart); - if(!st) st = rrdset_find_byname(host, chart); - if(!st) { - buffer_no_cacheable(w->response.data); - buffer_svg(w->response.data, "chart not found", NAN, "", NULL, NULL, -1, scale, 0, -1, -1, NULL, NULL); - ret = HTTP_RESP_OK; - goto cleanup; - } - st->last_accessed_time_s = now_realtime_sec(); - - if(alarm) { - rca = rrdcalc_from_rrdset_get(st, alarm); - rc = rrdcalc_acquired_to_rrdcalc(rca); - - if (!rc) { - buffer_no_cacheable(w->response.data); - buffer_svg(w->response.data, "alarm not found", NAN, "", NULL, NULL, -1, scale, 0, -1, -1, NULL, NULL); - ret = HTTP_RESP_OK; - goto cleanup; - } - } - - long long multiply = (multiply_str && *multiply_str )?str2l(multiply_str):1; - long long divide = (divide_str && *divide_str )?str2l(divide_str):1; - long long before = (before_str && *before_str )?str2l(before_str):0; - long long after = (after_str && *after_str )?str2l(after_str):-st->update_every; - int points = (points_str && *points_str )?str2i(points_str):1; - int precision = (precision_str && *precision_str)?str2i(precision_str):-1; - - if(!multiply) multiply = 1; - if(!divide) divide = 1; - - int refresh = 0; - if(refresh_str && *refresh_str) { - if(!strcmp(refresh_str, "auto")) { - if(rc) refresh = rc->config.update_every; - else if(options & RRDR_OPTION_NOT_ALIGNED) - refresh = st->update_every; - else { - refresh = (int)(before - after); - if(refresh < 0) refresh = -refresh; - } - } - else { - refresh = str2i(refresh_str); - if(refresh < 0) refresh = -refresh; - } - } - - if(!label) { - if(alarm) { - char *s = (char *)alarm; - while(*s) { - if(*s == '_') *s = ' '; - s++; - } - label = alarm; - } - else if(dimensions) { - const char *dim = buffer_tostring(dimensions); - if(*dim == '|') dim++; - label = dim; - } - else - label = rrdset_name(st); - } - if(!units) { - if(alarm) { - if(rc->config.units) - units = rrdcalc_units(rc); - else - units = ""; - } - else if(options & RRDR_OPTION_PERCENTAGE) - units = "%"; - else - units = rrdset_units(st); - } - - netdata_log_debug(D_WEB_CLIENT, "%llu: API command 'badge.svg' for chart '%s', alarm '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', options '0x%08x'" - , w->id - , chart - , alarm?alarm:"" - , (dimensions)?buffer_tostring(dimensions):"" - , after - , before - , points - , group - , options - ); - - if(rc) { - if (refresh > 0) { - buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh); - w->response.data->date = now_realtime_sec(); - w->response.data->expires = w->response.data->date + refresh; - buffer_cacheable(w->response.data); - } - else - buffer_no_cacheable(w->response.data); - - if(!value_color) { - switch(rc->status) { - case RRDCALC_STATUS_CRITICAL: - value_color = "red"; - break; - - case RRDCALC_STATUS_WARNING: - value_color = "orange"; - break; - - case RRDCALC_STATUS_CLEAR: - value_color = "brightgreen"; - break; - - case RRDCALC_STATUS_UNDEFINED: - value_color = "lightgrey"; - break; - - case RRDCALC_STATUS_UNINITIALIZED: - value_color = "#000"; - break; - - default: - value_color = "grey"; - break; - } - } - - buffer_svg(w->response.data, - label, - (isnan(rc->value)||isinf(rc->value)) ? rc->value : rc->value * multiply / divide, - units, - label_color, - value_color, - precision, - scale, - options, - fixed_width_lbl, - fixed_width_val, - text_color_lbl_str, - text_color_val_str - ); - ret = HTTP_RESP_OK; - } - else { - time_t latest_timestamp = 0; - int value_is_null = 1; - NETDATA_DOUBLE n = NAN; - ret = HTTP_RESP_INTERNAL_SERVER_ERROR; - - // if the collected value is too old, don't calculate its value - if (rrdset_last_entry_s(st) >= (now_realtime_sec() - (st->update_every * gap_when_lost_iterations_above))) - ret = rrdset2value_api_v1(st, w->response.data, &n, - (dimensions) ? buffer_tostring(dimensions) : NULL, - points, after, before, group, group_options, 0, options, - NULL, &latest_timestamp, - NULL, NULL, NULL, - &value_is_null, NULL, 0, 0, - QUERY_SOURCE_API_BADGE, STORAGE_PRIORITY_NORMAL); - - // if the value cannot be calculated, show empty badge - if (ret != HTTP_RESP_OK) { - buffer_no_cacheable(w->response.data); - value_is_null = 1; - n = 0; - ret = HTTP_RESP_OK; - } - else if (refresh > 0) { - buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh); - w->response.data->expires = now_realtime_sec() + refresh; - } - else buffer_no_cacheable(w->response.data); - - // render the badge - buffer_svg(w->response.data, - label, - (value_is_null)?NAN:(n * multiply / divide), - units, - label_color, - value_color, - precision, - scale, - options, - fixed_width_lbl, - fixed_width_val, - text_color_lbl_str, - text_color_val_str - ); - } - -cleanup: - rrdcalc_from_rrdset_release(st, rca); - buffer_free(dimensions); - return ret; -} diff --git a/src/web/api/badges/web_buffer_svg.h b/src/web/api/badges/web_buffer_svg.h deleted file mode 100644 index 71857811f..000000000 --- a/src/web/api/badges/web_buffer_svg.h +++ /dev/null @@ -1,18 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_WEB_BUFFER_SVG_H -#define NETDATA_WEB_BUFFER_SVG_H 1 - -#include "libnetdata/libnetdata.h" -#include "web/server/web_client.h" - -void buffer_svg(BUFFER *wb, const char *label, - NETDATA_DOUBLE value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val); -char *format_value_and_unit(char *value_string, size_t value_string_len, - NETDATA_DOUBLE value, const char *units, int precision); - -int web_client_api_request_v1_badge(struct rrdhost *host, struct web_client *w, char *url); - -#include "web/api/web_api_v1.h" - -#endif /* NETDATA_WEB_BUFFER_SVG_H */ diff --git a/src/web/api/exporters/README.md b/src/web/api/exporters/README.md index 206937967..47b443487 100644 --- a/src/web/api/exporters/README.md +++ b/src/web/api/exporters/README.md @@ -1,12 +1,3 @@ - - # Exporters TBD diff --git a/src/web/api/exporters/allmetrics.c b/src/web/api/exporters/allmetrics.c deleted file mode 100644 index 55179c0ae..000000000 --- a/src/web/api/exporters/allmetrics.c +++ /dev/null @@ -1,132 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "allmetrics.h" - -struct prometheus_output_options { - char *name; - PROMETHEUS_OUTPUT_OPTIONS flag; -} prometheus_output_flags_root[] = { - { "names", PROMETHEUS_OUTPUT_NAMES }, - { "timestamps", PROMETHEUS_OUTPUT_TIMESTAMPS }, - { "variables", PROMETHEUS_OUTPUT_VARIABLES }, - { "oldunits", PROMETHEUS_OUTPUT_OLDUNITS }, - { "hideunits", PROMETHEUS_OUTPUT_HIDEUNITS }, - // terminator - { NULL, PROMETHEUS_OUTPUT_NONE }, -}; - -inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url) { - int format = ALLMETRICS_SHELL; - const char *filter = NULL; - const char *prometheus_server = w->client_ip; - - uint32_t prometheus_exporting_options; - if (prometheus_exporter_instance) - prometheus_exporting_options = prometheus_exporter_instance->config.options; - else - prometheus_exporting_options = global_exporting_options; - - PROMETHEUS_OUTPUT_OPTIONS prometheus_output_options = - PROMETHEUS_OUTPUT_TIMESTAMPS | - ((prometheus_exporting_options & EXPORTING_OPTION_SEND_NAMES) ? PROMETHEUS_OUTPUT_NAMES : 0); - - const char *prometheus_prefix; - if (prometheus_exporter_instance) - prometheus_prefix = prometheus_exporter_instance->config.prefix; - else - prometheus_prefix = global_exporting_prefix; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - if(!strcmp(name, "format")) { - if(!strcmp(value, ALLMETRICS_FORMAT_SHELL)) - format = ALLMETRICS_SHELL; - else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS)) - format = ALLMETRICS_PROMETHEUS; - else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS)) - format = ALLMETRICS_PROMETHEUS_ALL_HOSTS; - else if(!strcmp(value, ALLMETRICS_FORMAT_JSON)) - format = ALLMETRICS_JSON; - else - format = 0; - } - else if(!strcmp(name, "filter")) { - filter = value; - } - else if(!strcmp(name, "server")) { - prometheus_server = value; - } - else if(!strcmp(name, "prefix")) { - prometheus_prefix = value; - } - else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) { - prometheus_exporting_options = exporting_parse_data_source(value, prometheus_exporting_options); - } - else { - int i; - for(i = 0; prometheus_output_flags_root[i].name ; i++) { - if(!strcmp(name, prometheus_output_flags_root[i].name)) { - if(!strcmp(value, "yes") || !strcmp(value, "1") || !strcmp(value, "true")) - prometheus_output_options |= prometheus_output_flags_root[i].flag; - else { - prometheus_output_options &= ~prometheus_output_flags_root[i].flag; - } - - break; - } - } - } - } - - buffer_flush(w->response.data); - buffer_no_cacheable(w->response.data); - - switch(format) { - case ALLMETRICS_JSON: - w->response.data->content_type = CT_APPLICATION_JSON; - rrd_stats_api_v1_charts_allmetrics_json(host, filter, w->response.data); - return HTTP_RESP_OK; - - case ALLMETRICS_SHELL: - w->response.data->content_type = CT_TEXT_PLAIN; - rrd_stats_api_v1_charts_allmetrics_shell(host, filter, w->response.data); - return HTTP_RESP_OK; - - case ALLMETRICS_PROMETHEUS: - w->response.data->content_type = CT_PROMETHEUS; - rrd_stats_api_v1_charts_allmetrics_prometheus_single_host( - host - , filter - , w->response.data - , prometheus_server - , prometheus_prefix - , prometheus_exporting_options - , prometheus_output_options - ); - return HTTP_RESP_OK; - - case ALLMETRICS_PROMETHEUS_ALL_HOSTS: - w->response.data->content_type = CT_PROMETHEUS; - rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts( - host - , filter - , w->response.data - , prometheus_server - , prometheus_prefix - , prometheus_exporting_options - , prometheus_output_options - ); - return HTTP_RESP_OK; - - default: - w->response.data->content_type = CT_TEXT_PLAIN; - buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "', '" ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported."); - return HTTP_RESP_BAD_REQUEST; - } -} diff --git a/src/web/api/exporters/allmetrics.h b/src/web/api/exporters/allmetrics.h deleted file mode 100644 index 3afc42e28..000000000 --- a/src/web/api/exporters/allmetrics.h +++ /dev/null @@ -1,12 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_API_ALLMETRICS_H -#define NETDATA_API_ALLMETRICS_H - -#include "web/api/formatters/rrd2json.h" -#include "shell/allmetrics_shell.h" -#include "web/server/web_client.h" - -int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url); - -#endif //NETDATA_API_ALLMETRICS_H diff --git a/src/web/api/exporters/prometheus/README.md b/src/web/api/exporters/prometheus/README.md index 6c6bad3a7..f4b1602cb 100644 --- a/src/web/api/exporters/prometheus/README.md +++ b/src/web/api/exporters/prometheus/README.md @@ -1,12 +1,3 @@ - - # Prometheus exporter Read the Prometheus exporter documentation: [Using Netdata with Prometheus](/src/exporting/prometheus/README.md). diff --git a/src/web/api/exporters/shell/README.md b/src/web/api/exporters/shell/README.md index 86b774f1b..14faf1fbc 100644 --- a/src/web/api/exporters/shell/README.md +++ b/src/web/api/exporters/shell/README.md @@ -1,25 +1,16 @@ - - # Shell exporter Shell scripts can now query Netdata: ```sh -eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')" +eval "$(curl -s 'http://localhost:19999/api/v3/allmetrics')" ``` after this command, all the Netdata metrics are exposed to shell. Check: ```sh # source the metrics -eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')" +eval "$(curl -s 'http://localhost:19999/api/v3/allmetrics')" # let's see if there are variables exposed by Netdata for system.cpu set | grep "^NETDATA_SYSTEM_CPU" @@ -50,7 +41,7 @@ echo ${NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_STATUS} CLEAR # is it fast? -time curl -s 'http://localhost:19999/api/v1/allmetrics' >/dev/null +time curl -s 'http://localhost:19999/api/v3/allmetrics' >/dev/null real 0m0,070s user 0m0,000s diff --git a/src/web/api/exporters/shell/allmetrics_shell.c b/src/web/api/exporters/shell/allmetrics_shell.c deleted file mode 100644 index c8248c148..000000000 --- a/src/web/api/exporters/shell/allmetrics_shell.c +++ /dev/null @@ -1,170 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "allmetrics_shell.h" - -// ---------------------------------------------------------------------------- -// BASH -// /api/v1/allmetrics?format=bash - -static inline size_t shell_name_copy(char *d, const char *s, size_t usable) { - size_t n; - - for(n = 0; *s && n < usable ; d++, s++, n++) { - register char c = *s; - - if(unlikely(!isalnum(c))) *d = '_'; - else *d = (char)toupper(c); - } - *d = '\0'; - - return n; -} - -#define SHELL_ELEMENT_MAX 100 - -void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_string, BUFFER *wb) { - analytics_log_shell(); - SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT, true); - - // for each chart - RRDSET *st; - rrdset_foreach_read(st, host) { - if (filter && !simple_pattern_matches_string(filter, st->name)) - continue; - if (rrdset_is_available_for_viewers(st)) { - NETDATA_DOUBLE total = 0.0; - - char chart[SHELL_ELEMENT_MAX + 1]; - shell_name_copy(chart, st->name ? rrdset_name(st) : rrdset_id(st), SHELL_ELEMENT_MAX); - - buffer_sprintf(wb, "\n# chart: %s (name: %s)\n", rrdset_id(st), rrdset_name(st)); - - // for each dimension - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - if(rd->collector.counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) { - char dimension[SHELL_ELEMENT_MAX + 1]; - shell_name_copy(dimension, rd->name?rrddim_name(rd):rrddim_id(rd), SHELL_ELEMENT_MAX); - - NETDATA_DOUBLE n = rd->collector.last_stored_value; - - if(isnan(n) || isinf(n)) - buffer_sprintf(wb, "NETDATA_%s_%s=\"\" # %s\n", chart, dimension, rrdset_units(st)); - else { - if(rd->multiplier < 0 || rd->divisor < 0) n = -n; - n = roundndd(n); - if(!rrddim_option_check(rd, RRDDIM_OPTION_HIDDEN)) total += n; - buffer_sprintf(wb, "NETDATA_%s_%s=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, dimension, n, rrdset_units(st)); - } - } - } - rrddim_foreach_done(rd); - - total = roundndd(total); - buffer_sprintf(wb, "NETDATA_%s_VISIBLETOTAL=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, total, rrdset_units(st)); - } - } - rrdset_foreach_done(st); - - buffer_strcat(wb, "\n# NETDATA ALARMS RUNNING\n"); - - RRDCALC *rc; - foreach_rrdcalc_in_rrdhost_read(host, rc) { - if(!rc->rrdset) continue; - - char chart[SHELL_ELEMENT_MAX + 1]; - shell_name_copy(chart, rc->rrdset->name?rrdset_name(rc->rrdset):rrdset_id(rc->rrdset), SHELL_ELEMENT_MAX); - - char alarm[SHELL_ELEMENT_MAX + 1]; - shell_name_copy(alarm, rrdcalc_name(rc), SHELL_ELEMENT_MAX); - - NETDATA_DOUBLE n = rc->value; - - if(isnan(n) || isinf(n)) - buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"\" # %s\n", chart, alarm, rrdcalc_units(rc)); - else { - n = roundndd(n); - buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, alarm, n, rrdcalc_units(rc)); - } - - buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_STATUS=\"%s\"\n", chart, alarm, rrdcalc_status2string(rc->status)); - } - foreach_rrdcalc_in_rrdhost_done(rc); - - simple_pattern_free(filter); -} - -// ---------------------------------------------------------------------------- - -void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_string, BUFFER *wb) { - analytics_log_json(); - SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT, true); - - buffer_strcat(wb, "{"); - - size_t chart_counter = 0; - size_t dimension_counter = 0; - - // for each chart - RRDSET *st; - rrdset_foreach_read(st, host) { - if (filter && !(simple_pattern_matches_string(filter, st->id) || simple_pattern_matches_string(filter, st->name))) - continue; - - if(rrdset_is_available_for_viewers(st)) { - buffer_sprintf( - wb, - "%s\n" - "\t\"%s\": {\n" - "\t\t\"name\":\"%s\",\n" - "\t\t\"family\":\"%s\",\n" - "\t\t\"context\":\"%s\",\n" - "\t\t\"units\":\"%s\",\n" - "\t\t\"last_updated\": %"PRId64",\n" - "\t\t\"dimensions\": {", - chart_counter ? "," : "", - rrdset_id(st), - rrdset_name(st), - rrdset_family(st), - rrdset_context(st), - rrdset_units(st), - (int64_t) rrdset_last_entry_s(st)); - - chart_counter++; - dimension_counter = 0; - - // for each dimension - RRDDIM *rd; - rrddim_foreach_read(rd, st) { - if(rd->collector.counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) { - buffer_sprintf( - wb, - "%s\n" - "\t\t\t\"%s\": {\n" - "\t\t\t\t\"name\": \"%s\",\n" - "\t\t\t\t\"value\": ", - dimension_counter ? "," : "", - rrddim_id(rd), - rrddim_name(rd)); - - if(isnan(rd->collector.last_stored_value)) - buffer_strcat(wb, "null"); - else - buffer_sprintf(wb, NETDATA_DOUBLE_FORMAT, rd->collector.last_stored_value); - - buffer_strcat(wb, "\n\t\t\t}"); - - dimension_counter++; - } - } - rrddim_foreach_done(rd); - - buffer_strcat(wb, "\n\t\t}\n\t}"); - } - } - rrdset_foreach_done(st); - - buffer_strcat(wb, "\n}"); - simple_pattern_free(filter); -} - diff --git a/src/web/api/exporters/shell/allmetrics_shell.h b/src/web/api/exporters/shell/allmetrics_shell.h deleted file mode 100644 index d6598e08d..000000000 --- a/src/web/api/exporters/shell/allmetrics_shell.h +++ /dev/null @@ -1,21 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_API_ALLMETRICS_SHELL_H -#define NETDATA_API_ALLMETRICS_SHELL_H - -#include "../allmetrics.h" - -#define ALLMETRICS_FORMAT_SHELL "shell" -#define ALLMETRICS_FORMAT_PROMETHEUS "prometheus" -#define ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "prometheus_all_hosts" -#define ALLMETRICS_FORMAT_JSON "json" - -#define ALLMETRICS_SHELL 1 -#define ALLMETRICS_PROMETHEUS 2 -#define ALLMETRICS_JSON 3 -#define ALLMETRICS_PROMETHEUS_ALL_HOSTS 4 - -void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_string, BUFFER *wb); -void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_string, BUFFER *wb); - -#endif //NETDATA_API_ALLMETRICS_SHELL_H diff --git a/src/web/api/formatters/README.md b/src/web/api/formatters/README.md index 6347f5fb4..27d80d403 100644 --- a/src/web/api/formatters/README.md +++ b/src/web/api/formatters/README.md @@ -1,12 +1,3 @@ - - # Query formatting API data queries need to be formatted before returned to the caller. diff --git a/src/web/api/formatters/charts2json.c b/src/web/api/formatters/charts2json.c index 0b45d77c4..9407f224b 100644 --- a/src/web/api/formatters/charts2json.c +++ b/src/web/api/formatters/charts2json.c @@ -37,7 +37,7 @@ const char* get_release_channel() { } void charts2json(RRDHOST *host, BUFFER *wb) { - static char *custom_dashboard_info_js_filename = NULL; + static const char *custom_dashboard_info_js_filename = NULL; size_t c = 0, dimensions = 0, memory = 0, alarms = 0; RRDSET *st; diff --git a/src/web/api/formatters/csv/README.md b/src/web/api/formatters/csv/README.md index e60aab57b..435d23808 100644 --- a/src/web/api/formatters/csv/README.md +++ b/src/web/api/formatters/csv/README.md @@ -1,12 +1,3 @@ - - # CSV formatter The CSV formatter presents [results of database queries](/src/web/api/queries/README.md) in the following formats: diff --git a/src/web/api/formatters/json/README.md b/src/web/api/formatters/json/README.md index 4137b0372..b1d025559 100644 --- a/src/web/api/formatters/json/README.md +++ b/src/web/api/formatters/json/README.md @@ -1,12 +1,3 @@ - - # JSON formatter The CSV formatter presents [results of database queries](/src/web/api/queries/README.md) in the following formats: diff --git a/src/web/api/formatters/rrd2json.c b/src/web/api/formatters/rrd2json.c index 81c9ad5c7..a80275487 100644 --- a/src/web/api/formatters/rrd2json.c +++ b/src/web/api/formatters/rrd2json.c @@ -10,46 +10,6 @@ void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb) buffer_json_finalize(wb); } -const char *rrdr_format_to_string(DATASOURCE_FORMAT format) { - switch(format) { - case DATASOURCE_JSON: - return DATASOURCE_FORMAT_JSON; - - case DATASOURCE_JSON2: - return DATASOURCE_FORMAT_JSON2; - - case DATASOURCE_DATATABLE_JSON: - return DATASOURCE_FORMAT_DATATABLE_JSON; - - case DATASOURCE_DATATABLE_JSONP: - return DATASOURCE_FORMAT_DATATABLE_JSONP; - - case DATASOURCE_JSONP: - return DATASOURCE_FORMAT_JSONP; - - case DATASOURCE_SSV: - return DATASOURCE_FORMAT_SSV; - - case DATASOURCE_CSV: - return DATASOURCE_FORMAT_CSV; - - case DATASOURCE_TSV: - return DATASOURCE_FORMAT_TSV; - - case DATASOURCE_HTML: - return DATASOURCE_FORMAT_HTML; - - case DATASOURCE_JS_ARRAY: - return DATASOURCE_FORMAT_JS_ARRAY; - - case DATASOURCE_SSV_COMMA: - return DATASOURCE_FORMAT_SSV_COMMA; - - default: - return "unknown"; - } -} - int rrdset2value_api_v1( RRDSET *st , BUFFER *wb diff --git a/src/web/api/formatters/rrd2json.h b/src/web/api/formatters/rrd2json.h index f0c0c39ba..cf3492ff2 100644 --- a/src/web/api/formatters/rrd2json.h +++ b/src/web/api/formatters/rrd2json.h @@ -3,26 +3,8 @@ #ifndef NETDATA_RRD2JSON_H #define NETDATA_RRD2JSON_H 1 -// type of JSON generations -typedef enum { - DATASOURCE_JSON = 0, - DATASOURCE_DATATABLE_JSON = 1, - DATASOURCE_DATATABLE_JSONP = 2, - DATASOURCE_SSV = 3, - DATASOURCE_CSV = 4, - DATASOURCE_JSONP = 5, - DATASOURCE_TSV = 6, - DATASOURCE_HTML = 7, - DATASOURCE_JS_ARRAY = 8, - DATASOURCE_SSV_COMMA = 9, - DATASOURCE_CSV_JSON_ARRAY = 10, - DATASOURCE_CSV_MARKDOWN = 11, - DATASOURCE_JSON2 = 12, -} DATASOURCE_FORMAT; +#include "web/api/web_api.h" -#include "web/api/web_api_v1.h" - -#include "web/api/exporters/allmetrics.h" #include "web/api/queries/rrdr.h" #include "web/api/formatters/csv/csv.h" @@ -38,22 +20,7 @@ typedef enum { #define HOSTNAME_MAX 1024 -#define DATASOURCE_FORMAT_JSON "json" -#define DATASOURCE_FORMAT_JSON2 "json2" -#define DATASOURCE_FORMAT_DATATABLE_JSON "datatable" -#define DATASOURCE_FORMAT_DATATABLE_JSONP "datasource" -#define DATASOURCE_FORMAT_JSONP "jsonp" -#define DATASOURCE_FORMAT_SSV "ssv" -#define DATASOURCE_FORMAT_CSV "csv" -#define DATASOURCE_FORMAT_TSV "tsv" -#define DATASOURCE_FORMAT_HTML "html" -#define DATASOURCE_FORMAT_JS_ARRAY "array" -#define DATASOURCE_FORMAT_SSV_COMMA "ssvcomma" -#define DATASOURCE_FORMAT_CSV_JSON_ARRAY "csvjsonarray" -#define DATASOURCE_FORMAT_CSV_MARKDOWN "markdown" - void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb); -const char *rrdr_format_to_string(DATASOURCE_FORMAT format); int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, struct query_target *qt, time_t *latest_timestamp); diff --git a/src/web/api/formatters/ssv/README.md b/src/web/api/formatters/ssv/README.md index b32494014..4d07fe5b7 100644 --- a/src/web/api/formatters/ssv/README.md +++ b/src/web/api/formatters/ssv/README.md @@ -1,12 +1,3 @@ - - # SSV formatter The SSV formatter sums all dimensions in [results of database queries](/src/web/api/queries/README.md) diff --git a/src/web/api/formatters/value/README.md b/src/web/api/formatters/value/README.md index 8a2df23c6..1f0af813d 100644 --- a/src/web/api/formatters/value/README.md +++ b/src/web/api/formatters/value/README.md @@ -1,12 +1,3 @@ - - # Value formatter The Value formatter presents [results of database queries](/src/web/api/queries/README.md) as a single value. diff --git a/src/web/api/functions/function-bearer_get_token.c b/src/web/api/functions/function-bearer_get_token.c new file mode 100644 index 000000000..8f14e68ae --- /dev/null +++ b/src/web/api/functions/function-bearer_get_token.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "function-bearer_get_token.h" +#include "../v2/api_v2_calls.h" + +struct bearer_token_request { + nd_uuid_t claim_id; + nd_uuid_t machine_guid; + nd_uuid_t node_id; + HTTP_USER_ROLE user_role; + HTTP_ACCESS access; + nd_uuid_t cloud_account_id; + STRING *client_name; +}; + +static bool bearer_parse_json_payload(json_object *jobj, const char *path, void *data, BUFFER *error) { + struct bearer_token_request *rq = data; + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, "claim_id", rq->claim_id, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, "machine_guid", rq->machine_guid, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, "node_id", rq->node_id, error, true); + JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, path, "user_role", http_user_role2id, rq->user_role, error, true); + JSONC_PARSE_ARRAY_OF_TXT2BITMAP_OR_ERROR_AND_RETURN(jobj, path, "access", http_access2id_one, rq->access, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, "cloud_account_id", rq->cloud_account_id, error, true); + JSONC_PARSE_TXT2STRING_OR_ERROR_AND_RETURN(jobj, path, "client_name", rq->client_name, error, true); + return true; +} + +int function_bearer_get_token(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload, const char *source) { + if(!request_source_is_cloud(source)) + return rrd_call_function_error( + wb, "Bearer tokens can only be provided via NC.", HTTP_RESP_BAD_REQUEST); + + int code; + struct bearer_token_request rq = { 0 }; + CLEAN_JSON_OBJECT *jobj = json_parse_function_payload_or_error(wb, payload, &code, bearer_parse_json_payload, &rq); + if(!jobj || code != HTTP_RESP_OK) { + string_freez(rq.client_name); + return code; + } + + char claim_id[UUID_STR_LEN]; + uuid_unparse_lower(rq.claim_id, claim_id); + + char machine_guid[UUID_STR_LEN]; + uuid_unparse_lower(rq.machine_guid, machine_guid); + + char node_id[UUID_STR_LEN]; + uuid_unparse_lower(rq.node_id, node_id); + + int rc = bearer_get_token_json_response(wb, localhost, claim_id, machine_guid, node_id, + rq.user_role, rq.access, rq.cloud_account_id, + string2str(rq.client_name)); + + string_freez(rq.client_name); + return rc; +} + +int call_function_bearer_get_token(RRDHOST *host, struct web_client *w, const char *claim_id, const char *machine_guid, const char *node_id) { + CLEAN_BUFFER *payload = buffer_create(0, NULL); + buffer_json_initialize(payload, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + buffer_json_member_add_string(payload, "claim_id", claim_id); + buffer_json_member_add_string(payload, "machine_guid", machine_guid); + buffer_json_member_add_string(payload, "node_id", node_id); + buffer_json_member_add_string(payload, "user_role", http_id2user_role(w->user_role)); + http_access2buffer_json_array(payload, "access", w->access); + buffer_json_member_add_uuid(payload, "cloud_account_id", w->auth.cloud_account_id); + buffer_json_member_add_string(payload, "client_name", w->auth.client_name); + buffer_json_finalize(payload); + + CLEAN_BUFFER *source = buffer_create(0, NULL); + web_client_api_request_vX_source_to_buffer(w, source); + + char transaction_str[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(w->transaction, transaction_str); + return rrd_function_run(host, w->response.data, 10, + w->access, RRDFUNCTIONS_BEARER_GET_TOKEN, true, + transaction_str, NULL, NULL, + NULL, NULL, + NULL, NULL, + payload, buffer_tostring(source), true); +} diff --git a/src/web/api/functions/function-bearer_get_token.h b/src/web/api/functions/function-bearer_get_token.h new file mode 100644 index 000000000..03481ebb8 --- /dev/null +++ b/src/web/api/functions/function-bearer_get_token.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_FUNCTION_BEARER_GET_TOKEN_H +#define NETDATA_FUNCTION_BEARER_GET_TOKEN_H + +#include "daemon/common.h" + +int function_bearer_get_token(BUFFER *wb, const char *function, BUFFER *payload, const char *source); +int call_function_bearer_get_token(RRDHOST *host, struct web_client *w, const char *claim_id, const char *machine_guid, const char *node_id); + +#define RRDFUNCTIONS_BEARER_GET_TOKEN "bearer_get_token" +#define RRDFUNCTIONS_BEARER_GET_TOKEN_HELP "Get a bearer token for authenticated direct access to the agent" + +#endif //NETDATA_FUNCTION_BEARER_GET_TOKEN_H diff --git a/src/web/api/functions/function-progress.c b/src/web/api/functions/function-progress.c new file mode 100644 index 000000000..052a9020a --- /dev/null +++ b/src/web/api/functions/function-progress.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "function-progress.h" + +int function_progress(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { + return progress_function_result(wb, rrdhost_hostname(localhost)); +} + diff --git a/src/web/api/functions/function-progress.h b/src/web/api/functions/function-progress.h new file mode 100644 index 000000000..7d2d10b9d --- /dev/null +++ b/src/web/api/functions/function-progress.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_FUNCTION_PROGRESS_H +#define NETDATA_FUNCTION_PROGRESS_H + +#include "daemon/common.h" + +int function_progress(BUFFER *wb, const char *function, BUFFER *payload, const char *source); + +#endif //NETDATA_FUNCTION_PROGRESS_H diff --git a/src/web/api/functions/function-streaming.c b/src/web/api/functions/function-streaming.c new file mode 100644 index 000000000..11e970441 --- /dev/null +++ b/src/web/api/functions/function-streaming.c @@ -0,0 +1,627 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "function-streaming.h" + +int function_streaming(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { + + time_t now = now_realtime_sec(); + + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost)); + buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "type", "table"); + buffer_json_member_add_time_t(wb, "update_every", 1); + buffer_json_member_add_boolean(wb, "has_history", false); + buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_STREAMING_HELP); + buffer_json_member_add_array(wb, "data"); + + size_t max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_MAX] = { 0 }; + size_t max_db_metrics = 0, max_db_instances = 0, max_db_contexts = 0; + size_t max_collection_replication_instances = 0, max_streaming_replication_instances = 0; + size_t max_ml_anomalous = 0, max_ml_normal = 0, max_ml_trained = 0, max_ml_pending = 0, max_ml_silenced = 0; + { + RRDHOST *host; + dfe_start_read(rrdhost_root_index, host) { + RRDHOST_STATUS s; + rrdhost_status(host, now, &s); + buffer_json_add_array_item_array(wb); + + if(s.db.metrics > max_db_metrics) + max_db_metrics = s.db.metrics; + + if(s.db.instances > max_db_instances) + max_db_instances = s.db.instances; + + if(s.db.contexts > max_db_contexts) + max_db_contexts = s.db.contexts; + + if(s.ingest.replication.instances > max_collection_replication_instances) + max_collection_replication_instances = s.ingest.replication.instances; + + if(s.stream.replication.instances > max_streaming_replication_instances) + max_streaming_replication_instances = s.stream.replication.instances; + + for(int i = 0; i < STREAM_TRAFFIC_TYPE_MAX ;i++) { + if (s.stream.sent_bytes_on_this_connection_per_type[i] > + max_sent_bytes_on_this_connection_per_type[i]) + max_sent_bytes_on_this_connection_per_type[i] = + s.stream.sent_bytes_on_this_connection_per_type[i]; + } + + // retention + buffer_json_add_array_item_string(wb, rrdhost_hostname(s.host)); // Node + buffer_json_add_array_item_uint64(wb, s.db.first_time_s * MSEC_PER_SEC); // dbFrom + buffer_json_add_array_item_uint64(wb, s.db.last_time_s * MSEC_PER_SEC); // dbTo + + if(s.db.first_time_s && s.db.last_time_s && s.db.last_time_s > s.db.first_time_s) + buffer_json_add_array_item_uint64(wb, s.db.last_time_s - s.db.first_time_s); // dbDuration + else + buffer_json_add_array_item_string(wb, NULL); // dbDuration + + buffer_json_add_array_item_uint64(wb, s.db.metrics); // dbMetrics + buffer_json_add_array_item_uint64(wb, s.db.instances); // dbInstances + buffer_json_add_array_item_uint64(wb, s.db.contexts); // dbContexts + + // statuses + buffer_json_add_array_item_string(wb, rrdhost_ingest_status_to_string(s.ingest.status)); // InStatus + buffer_json_add_array_item_string(wb, rrdhost_streaming_status_to_string(s.stream.status)); // OutStatus + buffer_json_add_array_item_string(wb, rrdhost_ml_status_to_string(s.ml.status)); // MLStatus + + // collection + if(s.ingest.since) { + buffer_json_add_array_item_uint64(wb, s.ingest.since * MSEC_PER_SEC); // InSince + buffer_json_add_array_item_time_t(wb, s.now - s.ingest.since); // InAge + } + else { + buffer_json_add_array_item_string(wb, NULL); // InSince + buffer_json_add_array_item_string(wb, NULL); // InAge + } + buffer_json_add_array_item_string(wb, stream_handshake_error_to_string(s.ingest.reason)); // InReason + buffer_json_add_array_item_uint64(wb, s.ingest.hops); // InHops + buffer_json_add_array_item_double(wb, s.ingest.replication.completion); // InReplCompletion + buffer_json_add_array_item_uint64(wb, s.ingest.replication.instances); // InReplInstances + buffer_json_add_array_item_string(wb, s.ingest.peers.local.ip); // InLocalIP + buffer_json_add_array_item_uint64(wb, s.ingest.peers.local.port); // InLocalPort + buffer_json_add_array_item_string(wb, s.ingest.peers.peer.ip); // InRemoteIP + buffer_json_add_array_item_uint64(wb, s.ingest.peers.peer.port); // InRemotePort + buffer_json_add_array_item_string(wb, s.ingest.ssl ? "SSL" : "PLAIN"); // InSSL + stream_capabilities_to_json_array(wb, s.ingest.capabilities, NULL); // InCapabilities + + // streaming + if(s.stream.since) { + buffer_json_add_array_item_uint64(wb, s.stream.since * MSEC_PER_SEC); // OutSince + buffer_json_add_array_item_time_t(wb, s.now - s.stream.since); // OutAge + } + else { + buffer_json_add_array_item_string(wb, NULL); // OutSince + buffer_json_add_array_item_string(wb, NULL); // OutAge + } + buffer_json_add_array_item_string(wb, stream_handshake_error_to_string(s.stream.reason)); // OutReason + buffer_json_add_array_item_uint64(wb, s.stream.hops); // OutHops + buffer_json_add_array_item_double(wb, s.stream.replication.completion); // OutReplCompletion + buffer_json_add_array_item_uint64(wb, s.stream.replication.instances); // OutReplInstances + buffer_json_add_array_item_string(wb, s.stream.peers.local.ip); // OutLocalIP + buffer_json_add_array_item_uint64(wb, s.stream.peers.local.port); // OutLocalPort + buffer_json_add_array_item_string(wb, s.stream.peers.peer.ip); // OutRemoteIP + buffer_json_add_array_item_uint64(wb, s.stream.peers.peer.port); // OutRemotePort + buffer_json_add_array_item_string(wb, s.stream.ssl ? "SSL" : "PLAIN"); // OutSSL + buffer_json_add_array_item_string(wb, s.stream.compression ? "COMPRESSED" : "UNCOMPRESSED"); // OutCompression + stream_capabilities_to_json_array(wb, s.stream.capabilities, NULL); // OutCapabilities + buffer_json_add_array_item_uint64(wb, s.stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DATA]); + buffer_json_add_array_item_uint64(wb, s.stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_METADATA]); + buffer_json_add_array_item_uint64(wb, s.stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_REPLICATION]); + buffer_json_add_array_item_uint64(wb, s.stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_FUNCTIONS]); + + buffer_json_add_array_item_array(wb); // OutAttemptHandshake + time_t last_attempt = 0; + for(struct rrdpush_destinations *d = host->destinations; d ; d = d->next) { + if(d->since > last_attempt) + last_attempt = d->since; + + buffer_json_add_array_item_string(wb, stream_handshake_error_to_string(d->reason)); + } + buffer_json_array_close(wb); // // OutAttemptHandshake + + if(!last_attempt) { + buffer_json_add_array_item_string(wb, NULL); // OutAttemptSince + buffer_json_add_array_item_string(wb, NULL); // OutAttemptAge + } + else { + buffer_json_add_array_item_uint64(wb, last_attempt * 1000); // OutAttemptSince + buffer_json_add_array_item_time_t(wb, s.now - last_attempt); // OutAttemptAge + } + + // ML + if(s.ml.status == RRDHOST_ML_STATUS_RUNNING) { + buffer_json_add_array_item_uint64(wb, s.ml.metrics.anomalous); // MlAnomalous + buffer_json_add_array_item_uint64(wb, s.ml.metrics.normal); // MlNormal + buffer_json_add_array_item_uint64(wb, s.ml.metrics.trained); // MlTrained + buffer_json_add_array_item_uint64(wb, s.ml.metrics.pending); // MlPending + buffer_json_add_array_item_uint64(wb, s.ml.metrics.silenced); // MlSilenced + + if(s.ml.metrics.anomalous > max_ml_anomalous) + max_ml_anomalous = s.ml.metrics.anomalous; + + if(s.ml.metrics.normal > max_ml_normal) + max_ml_normal = s.ml.metrics.normal; + + if(s.ml.metrics.trained > max_ml_trained) + max_ml_trained = s.ml.metrics.trained; + + if(s.ml.metrics.pending > max_ml_pending) + max_ml_pending = s.ml.metrics.pending; + + if(s.ml.metrics.silenced > max_ml_silenced) + max_ml_silenced = s.ml.metrics.silenced; + + } + else { + buffer_json_add_array_item_string(wb, NULL); // MlAnomalous + buffer_json_add_array_item_string(wb, NULL); // MlNormal + buffer_json_add_array_item_string(wb, NULL); // MlTrained + buffer_json_add_array_item_string(wb, NULL); // MlPending + buffer_json_add_array_item_string(wb, NULL); // MlSilenced + } + + // close + buffer_json_array_close(wb); + } + dfe_done(host); + } + buffer_json_array_close(wb); // data + buffer_json_member_add_object(wb, "columns"); + { + size_t field_id = 0; + + // Node + buffer_rrdf_table_add_field(wb, field_id++, "Node", "Node's Hostname", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY, + NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "dbFrom", "DB Data Retention From", + RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "dbTo", "DB Data Retention To", + RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "dbDuration", "DB Data Retention Duration", + RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "dbMetrics", "Time-series Metrics in the DB", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, NULL, (double)max_db_metrics, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "dbInstances", "Instances in the DB", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, NULL, (double)max_db_instances, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "dbContexts", "Contexts in the DB", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, NULL, (double)max_db_contexts, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + // --- statuses --- + + buffer_rrdf_table_add_field(wb, field_id++, "InStatus", "Data Collection Online Status", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + + buffer_rrdf_table_add_field(wb, field_id++, "OutStatus", "Streaming Online Status", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "MlStatus", "ML Status", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + // --- collection --- + + buffer_rrdf_table_add_field(wb, field_id++, "InSince", "Last Data Collection Status Change", + RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS, + 0, NULL, NAN, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "InAge", "Last Data Collection Online Status Change Age", + RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "InReason", "Data Collection Online Status Reason", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "InHops", "Data Collection Distance Hops from Origin Node", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "InReplCompletion", "Inbound Replication Completion", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 1, "%", 100.0, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "InReplInstances", "Inbound Replicating Instances", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "instances", (double)max_collection_replication_instances, RRDF_FIELD_SORT_DESCENDING, + NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "InLocalIP", "Inbound Local IP", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "InLocalPort", "Inbound Local Port", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "InRemoteIP", "Inbound Remote IP", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "InRemotePort", "Inbound Remote Port", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "InSSL", "Inbound SSL Connection", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "InCapabilities", "Inbound Connection Capabilities", + RRDF_FIELD_TYPE_ARRAY, RRDF_FIELD_VISUAL_PILL, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_NONE, NULL); + + // --- streaming --- + + buffer_rrdf_table_add_field(wb, field_id++, "OutSince", "Last Streaming Status Change", + RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS, + 0, NULL, NAN, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutAge", "Last Streaming Status Change Age", + RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutReason", "Streaming Status Reason", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutHops", "Streaming Distance Hops from Origin Node", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutReplCompletion", "Outbound Replication Completion", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, + 1, "%", 100.0, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutReplInstances", "Outbound Replicating Instances", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "instances", (double)max_streaming_replication_instances, RRDF_FIELD_SORT_DESCENDING, + NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutLocalIP", "Outbound Local IP", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutLocalPort", "Outbound Local Port", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutRemoteIP", "Outbound Remote IP", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutRemotePort", "Outbound Remote Port", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutSSL", "Outbound SSL Connection", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutCompression", "Outbound Compressed Connection", + RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutCapabilities", "Outbound Connection Capabilities", + RRDF_FIELD_TYPE_ARRAY, RRDF_FIELD_VISUAL_PILL, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutTrafficData", "Outbound Metric Data Traffic", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "bytes", (double)max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DATA], + RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutTrafficMetadata", "Outbound Metric Metadata Traffic", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "bytes", + (double)max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_METADATA], + RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutTrafficReplication", "Outbound Metric Replication Traffic", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "bytes", + (double)max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_REPLICATION], + RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutTrafficFunctions", "Outbound Metric Functions Traffic", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "bytes", + (double)max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_FUNCTIONS], + RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutAttemptHandshake", + "Outbound Connection Attempt Handshake Status", + RRDF_FIELD_TYPE_ARRAY, RRDF_FIELD_VISUAL_PILL, RRDF_FIELD_TRANSFORM_NONE, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutAttemptSince", + "Last Outbound Connection Attempt Status Change Time", + RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS, + 0, NULL, NAN, RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "OutAttemptAge", + "Last Outbound Connection Attempt Status Change Age", + RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S, + 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, + RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_VISIBLE, NULL); + + // --- ML --- + + buffer_rrdf_table_add_field(wb, field_id++, "MlAnomalous", "Number of Anomalous Metrics", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "metrics", + (double)max_ml_anomalous, + RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "MlNormal", "Number of Not Anomalous Metrics", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "metrics", + (double)max_ml_normal, + RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "MlTrained", "Number of Trained Metrics", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "metrics", + (double)max_ml_trained, + RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "MlPending", "Number of Pending Metrics", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "metrics", + (double)max_ml_pending, + RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + + buffer_rrdf_table_add_field(wb, field_id++, "MlSilenced", "Number of Silenced Metrics", + RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, + 0, "metrics", + (double)max_ml_silenced, + RRDF_FIELD_SORT_DESCENDING, NULL, + RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, + RRDF_FIELD_OPTS_NONE, NULL); + } + buffer_json_object_close(wb); // columns + buffer_json_member_add_string(wb, "default_sort_column", "Node"); + buffer_json_member_add_object(wb, "charts"); + { + // Data Collection Age chart + buffer_json_member_add_object(wb, "InAge"); + { + buffer_json_member_add_string(wb, "name", "Data Collection Age"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "InAge"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // Streaming Age chart + buffer_json_member_add_object(wb, "OutAge"); + { + buffer_json_member_add_string(wb, "name", "Streaming Age"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "OutAge"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + // DB Duration + buffer_json_member_add_object(wb, "dbDuration"); + { + buffer_json_member_add_string(wb, "name", "Retention Duration"); + buffer_json_member_add_string(wb, "type", "stacked-bar"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "dbDuration"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // charts + + buffer_json_member_add_array(wb, "default_charts"); + { + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "InAge"); + buffer_json_add_array_item_string(wb, "Node"); + buffer_json_array_close(wb); + + buffer_json_add_array_item_array(wb); + buffer_json_add_array_item_string(wb, "OutAge"); + buffer_json_add_array_item_string(wb, "Node"); + buffer_json_array_close(wb); + } + buffer_json_array_close(wb); + + buffer_json_member_add_object(wb, "group_by"); + { + buffer_json_member_add_object(wb, "Node"); + { + buffer_json_member_add_string(wb, "name", "Node"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "Node"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "InStatus"); + { + buffer_json_member_add_string(wb, "name", "Nodes by Collection Status"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "InStatus"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "OutStatus"); + { + buffer_json_member_add_string(wb, "name", "Nodes by Streaming Status"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "OutStatus"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "MlStatus"); + { + buffer_json_member_add_string(wb, "name", "Nodes by ML Status"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "MlStatus"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "InRemoteIP"); + { + buffer_json_member_add_string(wb, "name", "Nodes by Inbound IP"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "InRemoteIP"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "OutRemoteIP"); + { + buffer_json_member_add_string(wb, "name", "Nodes by Outbound IP"); + buffer_json_member_add_array(wb, "columns"); + { + buffer_json_add_array_item_string(wb, "OutRemoteIP"); + } + buffer_json_array_close(wb); + } + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // group_by + + buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1); + buffer_json_finalize(wb); + + return HTTP_RESP_OK; +} diff --git a/src/web/api/functions/function-streaming.h b/src/web/api/functions/function-streaming.h new file mode 100644 index 000000000..06da6af9f --- /dev/null +++ b/src/web/api/functions/function-streaming.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_FUNCTION_STREAMING_H +#define NETDATA_FUNCTION_STREAMING_H + +#include "daemon/common.h" + +#define RRDFUNCTIONS_STREAMING_HELP "Streaming status for parents and children." + +int function_streaming(BUFFER *wb, const char *function, BUFFER *payload, const char *source); + +#endif //NETDATA_FUNCTION_STREAMING_H diff --git a/src/web/api/functions/functions.c b/src/web/api/functions/functions.c new file mode 100644 index 000000000..c00e04ca0 --- /dev/null +++ b/src/web/api/functions/functions.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "functions.h" + +void global_functions_add(void) { + // we register this only on localhost + // for the other nodes, the origin server should register it + rrd_function_add_inline( + localhost, + NULL, + "streaming", + 10, + RRDFUNCTIONS_PRIORITY_DEFAULT + 1, + RRDFUNCTIONS_VERSION_DEFAULT, + RRDFUNCTIONS_STREAMING_HELP, + "top", + HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA, + function_streaming); + + rrd_function_add_inline( + localhost, + NULL, + "netdata-api-calls", + 10, + RRDFUNCTIONS_PRIORITY_DEFAULT + 2, + RRDFUNCTIONS_VERSION_DEFAULT, + RRDFUNCTIONS_PROGRESS_HELP, + "top", + HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA, + function_progress); + + rrd_function_add_inline( + localhost, + NULL, + RRDFUNCTIONS_BEARER_GET_TOKEN, + 10, + RRDFUNCTIONS_PRIORITY_DEFAULT + 3, + RRDFUNCTIONS_VERSION_DEFAULT, + RRDFUNCTIONS_BEARER_GET_TOKEN_HELP, + RRDFUNCTIONS_TAG_HIDDEN, + HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA, + function_bearer_get_token); +} diff --git a/src/web/api/functions/functions.h b/src/web/api/functions/functions.h new file mode 100644 index 000000000..28c483541 --- /dev/null +++ b/src/web/api/functions/functions.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_FUNCTIONS_H +#define NETDATA_FUNCTIONS_H + +#include "daemon/common.h" + +#include "function-streaming.h" +#include "function-progress.h" +#include "function-bearer_get_token.h" + +void global_functions_add(void); + +#endif //NETDATA_FUNCTIONS_H diff --git a/src/web/api/health/README.md b/src/web/api/health/README.md index 725b4a36f..a788e9b4d 100644 --- a/src/web/api/health/README.md +++ b/src/web/api/health/README.md @@ -1,13 +1,3 @@ - - # Health API Calls ## Health Read API @@ -33,7 +23,7 @@ The size of the alert log is configured in `netdata.conf`. There are 2 settings: ``` [health] in memory max health log entries = 1000 - health log history = 432000 + health log retention = 5d ``` The API call retrieves all entries of the alert log: diff --git a/src/web/api/http_auth.c b/src/web/api/http_auth.c index ec0520304..5c4fffcaf 100644 --- a/src/web/api/http_auth.c +++ b/src/web/api/http_auth.c @@ -2,83 +2,341 @@ #include "http_auth.h" -#define BEARER_TOKEN_EXPIRATION 86400 +#define BEARER_TOKEN_EXPIRATION (86400 * 1) -bool netdata_is_protected_by_bearer = false; // this is controlled by cloud, at the point the agent logs in - this should also be saved to /var/lib/netdata +bool netdata_is_protected_by_bearer = false; static DICTIONARY *netdata_authorized_bearers = NULL; struct bearer_token { nd_uuid_t cloud_account_id; - char cloud_user_name[CLOUD_USER_NAME_LENGTH]; + char client_name[CLOUD_CLIENT_NAME_LENGTH]; HTTP_ACCESS access; HTTP_USER_ROLE user_role; time_t created_s; time_t expires_s; }; -bool web_client_bearer_token_auth(struct web_client *w, const char *v) { - if(!uuid_parse_flexi(v, w->auth.bearer_token)) { - char uuid_str[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(w->auth.bearer_token, uuid_str); +static void bearer_tokens_path(char out[FILENAME_MAX]) { + filename_from_path_entry(out, netdata_configured_varlib_dir, "bearer_tokens", NULL); +} - struct bearer_token *z = dictionary_get(netdata_authorized_bearers, uuid_str); - if (z && z->expires_s > now_monotonic_sec()) { - strncpyz(w->auth.client_name, z->cloud_user_name, sizeof(w->auth.client_name) - 1); - uuid_copy(w->auth.cloud_account_id, z->cloud_account_id); - web_client_set_permissions(w, z->access, z->user_role, WEB_CLIENT_FLAG_AUTH_BEARER); - return true; - } - } - else - nd_log(NDLS_DAEMON, NDLP_NOTICE, "Invalid bearer token '%s' received.", v); +static void bearer_token_filename(char out[FILENAME_MAX], nd_uuid_t uuid) { + char uuid_str[UUID_STR_LEN]; + uuid_unparse_lower(uuid, uuid_str); + + char path[FILENAME_MAX]; + bearer_tokens_path(path); + filename_from_path_entry(out, path, uuid_str, NULL); +} - return false; +static inline bool bearer_tokens_ensure_path_exists(void) { + char path[FILENAME_MAX]; + bearer_tokens_path(path); + return filename_is_dir(path, true); } -static void bearer_token_cleanup(void) { +static void bearer_token_delete_from_disk(nd_uuid_t *token) { + char filename[FILENAME_MAX]; + bearer_token_filename(filename, *token); + if(unlink(filename) != 0) + nd_log(NDLS_DAEMON, NDLP_ERR, "Failed to unlink() file '%s'", filename); +} + +static void bearer_token_cleanup(bool force) { static time_t attempts = 0; - if(++attempts % 1000 != 0) + if(++attempts % 1000 != 0 && !force) return; - time_t now_s = now_monotonic_sec(); + time_t now_s = now_realtime_sec(); struct bearer_token *z; dfe_start_read(netdata_authorized_bearers, z) { - if(z->expires_s < now_s) + if(z->expires_s < now_s) { + nd_uuid_t uuid; + if(uuid_parse_flexi(z_dfe.name, uuid) == 0) + bearer_token_delete_from_disk(&uuid); + dictionary_del(netdata_authorized_bearers, z_dfe.name); + } } dfe_done(z); dictionary_garbage_collect(netdata_authorized_bearers); } -void bearer_tokens_init(void) { - netdata_authorized_bearers = dictionary_create_advanced( - DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct bearer_token)); +static uint64_t bearer_token_signature(nd_uuid_t token, struct bearer_token *bt) { + // we use a custom structure to make sure that changes in the other code will not affect the signature + + struct { + nd_uuid_t host_uuid; + nd_uuid_t token; + nd_uuid_t cloud_account_id; + char client_name[CLOUD_CLIENT_NAME_LENGTH]; + HTTP_ACCESS access; + HTTP_USER_ROLE user_role; + time_t created_s; + time_t expires_s; + } signature_payload = { + .access = bt->access, + .user_role = bt->user_role, + .created_s = bt->created_s, + .expires_s = bt->expires_s, + }; + uuid_copy(signature_payload.host_uuid, localhost->host_id.uuid); + uuid_copy(signature_payload.token, token); + uuid_copy(signature_payload.cloud_account_id, bt->cloud_account_id); + memset(signature_payload.client_name, 0, sizeof(signature_payload.client_name)); + strncpyz(signature_payload.client_name, bt->client_name, sizeof(signature_payload.client_name) - 1); + + return XXH3_64bits(&signature_payload, sizeof(signature_payload)); } -time_t bearer_create_token(nd_uuid_t *uuid, struct web_client *w) { +static bool bearer_token_save_to_file(nd_uuid_t token, struct bearer_token *bt) { + CLEAN_BUFFER *wb = buffer_create(0, NULL); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + buffer_json_member_add_uint64(wb, "version", 1); + buffer_json_member_add_uuid(wb, "host_uuid", localhost->host_id.uuid); + buffer_json_member_add_uuid(wb, "token", token); + buffer_json_member_add_uuid(wb, "cloud_account_id", bt->cloud_account_id); + buffer_json_member_add_string(wb, "client_name", bt->client_name); + http_access2buffer_json_array(wb, "access", bt->access); + buffer_json_member_add_string(wb, "user_role", http_id2user_role(bt->user_role)); + buffer_json_member_add_uint64(wb, "created_s", bt->created_s); + buffer_json_member_add_uint64(wb, "expires_s", bt->expires_s); + buffer_json_member_add_uint64(wb, "signature", bearer_token_signature(token, bt)); + buffer_json_finalize(wb); + + char filename[FILENAME_MAX]; + bearer_token_filename(filename, token); + + FILE *fp = fopen(filename, "w"); + if(!fp) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Cannot create file '%s'", filename); + return false; + } + + if(fwrite(buffer_tostring(wb), 1, buffer_strlen(wb), fp) != buffer_strlen(wb)) { + fclose(fp); + unlink(filename); + nd_log(NDLS_DAEMON, NDLP_ERR, "Cannot save file '%s'", filename); + return false; + } + + fclose(fp); + return true; +} + +static time_t bearer_create_token_internal(nd_uuid_t token, HTTP_USER_ROLE user_role, HTTP_ACCESS access, nd_uuid_t cloud_account_id, const char *client_name, time_t created_s, time_t expires_s, bool save) { char uuid_str[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(token, uuid_str); + + struct bearer_token t = { 0 }, *bt; + const DICTIONARY_ITEM *item = dictionary_set_and_acquire_item(netdata_authorized_bearers, uuid_str, &t, sizeof(t)); + bt = dictionary_acquired_item_value(item); + + if(!bt->created_s) { + bt->created_s = created_s; + bt->expires_s = expires_s; + bt->user_role = user_role; + bt->access = access; + + uuid_copy(bt->cloud_account_id, cloud_account_id); + strncpyz(bt->client_name, client_name, sizeof(bt->cloud_account_id) - 1); + + if(save) + bearer_token_save_to_file(token, bt); + } + + time_t expiration = bt->expires_s; + + dictionary_acquired_item_release(netdata_authorized_bearers, item); + + return expiration; +} + +time_t bearer_create_token(nd_uuid_t *uuid, HTTP_USER_ROLE user_role, HTTP_ACCESS access, nd_uuid_t cloud_account_id, const char *client_name) { + time_t now_s = now_realtime_sec(); + time_t expires_s = 0; + + struct bearer_token *bt; + dfe_start_read(netdata_authorized_bearers, bt) { + if(bt->expires_s > now_s + 3600 * 2 && // expires in more than 2 hours + user_role == bt->user_role && // the user_role matches + access == bt->access && // the access matches + uuid_eq(cloud_account_id, bt->cloud_account_id) && // the cloud_account_id matches + strncmp(client_name, bt->client_name, sizeof(bt->client_name) - 1) == 0 && // the client_name matches + uuid_parse_flexi(bt_dfe.name, *uuid) == 0) // the token can be parsed + return expires_s; /* dfe will cleanup automatically */ + } + dfe_done(bt); uuid_generate_random(*uuid); - uuid_unparse_lower_compact(*uuid, uuid_str); - - struct bearer_token t = { 0 }, *z; - z = dictionary_set(netdata_authorized_bearers, uuid_str, &t, sizeof(t)); - if(!z->created_s) { - z->created_s = now_monotonic_sec(); - z->expires_s = z->created_s + BEARER_TOKEN_EXPIRATION; - z->user_role = w->user_role; - z->access = w->access; - uuid_copy(z->cloud_account_id, w->auth.cloud_account_id); - strncpyz(z->cloud_user_name, w->auth.client_name, sizeof(z->cloud_account_id) - 1); + expires_s = bearer_create_token_internal( + *uuid, user_role, access, cloud_account_id, client_name, + now_s, now_s + BEARER_TOKEN_EXPIRATION, true); + + bearer_token_cleanup(false); + + return expires_s; +} + +static bool bearer_token_parse_json(nd_uuid_t token, struct json_object *jobj, BUFFER *error) { + int64_t version; + nd_uuid_t token_in_file, cloud_account_id, host_uuid; + CLEAN_STRING *client_name = NULL; + HTTP_USER_ROLE user_role = HTTP_USER_ROLE_NONE; + HTTP_ACCESS access = HTTP_ACCESS_NONE; + time_t created_s = 0, expires_s = 0; + uint64_t signature = 0; + + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, ".", "version", version, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, ".", "host_uuid", host_uuid, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, ".", "token", token_in_file, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, ".", "cloud_account_id", cloud_account_id, error, true); + JSONC_PARSE_TXT2STRING_OR_ERROR_AND_RETURN(jobj, ".", "client_name", client_name, error, true); + JSONC_PARSE_ARRAY_OF_TXT2BITMAP_OR_ERROR_AND_RETURN(jobj, ".", "access", http_access2id_one, access, error, true); + JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, ".", "user_role", http_user_role2id, user_role, error, true); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, ".", "created_s", created_s, error, true); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, ".", "expires_s", expires_s, error, true); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, ".", "signature", signature, error, true); + + if(uuid_compare(token, token_in_file) != 0) { + buffer_flush(error); + buffer_strcat(error, "token in JSON file does not match the filename"); + return false; + } + + if(uuid_compare(host_uuid, localhost->host_id.uuid) != 0) { + buffer_flush(error); + buffer_strcat(error, "Host UUID in JSON file does not match our host UUID"); + return false; + } + + if(!created_s || !expires_s || created_s >= expires_s) { + buffer_flush(error); + buffer_strcat(error, "bearer token has invalid dates"); + return false; + } + + struct bearer_token bt = { + .access = access, + .user_role = user_role, + .created_s = created_s, + .expires_s = expires_s, + }; + uuid_copy(bt.cloud_account_id, cloud_account_id); + strncpyz(bt.client_name, string2str(client_name), sizeof(bt.client_name) - 1); + + if(signature != bearer_token_signature(token_in_file, &bt)) { + buffer_flush(error); + buffer_strcat(error, "bearer token has invalid signature"); + return false; + } + + bearer_create_token_internal(token, user_role, access, + cloud_account_id, string2str(client_name), + created_s, expires_s, false); + + return true; +} + +static bool bearer_token_load_token(nd_uuid_t token) { + char filename[FILENAME_MAX]; + bearer_token_filename(filename, token); + + CLEAN_BUFFER *wb = buffer_create(0, NULL); + if(!read_txt_file_to_buffer(filename, wb, 1 * 1024 * 1024)) + return false; + + CLEAN_JSON_OBJECT *jobj = json_tokener_parse(buffer_tostring(wb)); + if (jobj == NULL) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Cannot parse bearer token file '%s'", filename); + return false; + } + + CLEAN_BUFFER *error = buffer_create(0, NULL); + bool rc = bearer_token_parse_json(token, jobj, error); + if(!rc) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Failed to parse bearer token file '%s': %s", filename, buffer_tostring(error)); + unlink(filename); + return false; + } + + bearer_token_cleanup(true); + + return true; +} + +static void bearer_tokens_load_from_disk(void) { + bearer_tokens_ensure_path_exists(); + + char path[FILENAME_MAX]; + bearer_tokens_path(path); + + DIR *dir = opendir(path); + if(!dir) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Cannot open directory '%s' to read saved bearer tokens", path); + return; } - bearer_token_cleanup(); + struct dirent *de; + while((de = readdir(dir))) { + if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) + continue; + + ND_UUID uuid = UUID_ZERO; + if(uuid_parse_flexi(de->d_name, uuid.uuid) != 0 || UUIDiszero(uuid)) + continue; + + char filename[FILENAME_MAX]; + filename_from_path_entry(filename, path, de->d_name, NULL); + + if(de->d_type == DT_REG || (de->d_type == DT_LNK && filename_is_file(filename))) + bearer_token_load_token(uuid.uuid); + } + + closedir(dir); +} + +bool web_client_bearer_token_auth(struct web_client *w, const char *v) { + bool rc = false; + + if(!uuid_parse_flexi(v, w->auth.bearer_token)) { + char uuid_str[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(w->auth.bearer_token, uuid_str); + + const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(netdata_authorized_bearers, uuid_str); + if(!item && bearer_token_load_token(w->auth.bearer_token)) + item = dictionary_get_and_acquire_item(netdata_authorized_bearers, uuid_str); + + if(item) { + struct bearer_token *bt = dictionary_acquired_item_value(item); + if (bt->expires_s > now_realtime_sec()) { + strncpyz(w->auth.client_name, bt->client_name, sizeof(w->auth.client_name) - 1); + uuid_copy(w->auth.cloud_account_id, bt->cloud_account_id); + web_client_set_permissions(w, bt->access, bt->user_role, WEB_CLIENT_FLAG_AUTH_BEARER); + rc = true; + } + + dictionary_acquired_item_release(netdata_authorized_bearers, item); + } + } + else + nd_log(NDLS_DAEMON, NDLP_NOTICE, "Invalid bearer token '%s' received.", v); + + return rc; +} + +void bearer_tokens_init(void) { + netdata_is_protected_by_bearer = + config_get_boolean(CONFIG_SECTION_WEB, "bearer token protection", netdata_is_protected_by_bearer); + + netdata_authorized_bearers = dictionary_create_advanced( + DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, sizeof(struct bearer_token)); - return now_realtime_sec() + BEARER_TOKEN_EXPIRATION; + bearer_tokens_load_from_disk(); } bool extract_bearer_token_from_request(struct web_client *w, char *dst, size_t dst_len) { diff --git a/src/web/api/http_auth.h b/src/web/api/http_auth.h index f339a44cf..0b01fdb1e 100644 --- a/src/web/api/http_auth.h +++ b/src/web/api/http_auth.h @@ -11,7 +11,7 @@ extern bool netdata_is_protected_by_bearer; bool extract_bearer_token_from_request(struct web_client *w, char *dst, size_t dst_len); -time_t bearer_create_token(nd_uuid_t *uuid, struct web_client *w); +time_t bearer_create_token(nd_uuid_t *uuid, HTTP_USER_ROLE user_role, HTTP_ACCESS access, nd_uuid_t cloud_account_id, const char *client_name); bool web_client_bearer_token_auth(struct web_client *w, const char *v); static inline bool http_access_user_has_enough_access_level_for_endpoint(HTTP_ACCESS user, HTTP_ACCESS endpoint) { diff --git a/src/web/api/ilove/README.md b/src/web/api/ilove/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/web/api/ilove/ilove.c b/src/web/api/ilove/ilove.c deleted file mode 100644 index 67489ec42..000000000 --- a/src/web/api/ilove/ilove.c +++ /dev/null @@ -1,306 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "ilove.h" - -static const unsigned short int ibm_plex_sans_bold_250[128][128] = { - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, - {5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* ! */, 5900 /* " */, 5900 /* # */, 5900 /* $ */, 5900 /* % */, 5900 /* & */, 5900 /* ' */, 5900 /* ( */, 5900 /* ) */, 5900 /* * */, 5900 /* + */, 5900 /* , */, 5900 /* - */, 5900 /* . */, 5900 /* / */, 5900 /* 0 */, 5900 /* 1 */, 5900 /* 2 */, 5900 /* 3 */, 5900 /* 4 */, 5900 /* 5 */, 5900 /* 6 */, 5900 /* 7 */, 5900 /* 8 */, 5900 /* 9 */, 5900 /* : */, 5900 /* ; */, 5900 /* < */, 5900 /* = */, 5900 /* > */, 5900 /* ? */, 5900 /* @ */, 5900 /* A */, 5900 /* B */, 5900 /* C */, 5900 /* D */, 5900 /* E */, 5900 /* F */, 5900 /* G */, 5900 /* H */, 5900 /* I */, 5900 /* J */, 5900 /* K */, 5900 /* L */, 5900 /* M */, 5900 /* N */, 5900 /* O */, 5900 /* P */, 5900 /* Q */, 5900 /* R */, 5900 /* S */, 5900 /* T */, 5900 /* U */, 5900 /* V */, 5900 /* W */, 5900 /* X */, 5900 /* Y */, 5900 /* Z */, 5900 /* [ */, 5900 /* \ */, 5900 /* ] */, 5900 /* ^ */, 5900 /* _ */, 5900 /* ` */, 5900 /* a */, 5900 /* b */, 5900 /* c */, 5900 /* d */, 5900 /* e */, 5900 /* f */, 5900 /* g */, 5900 /* h */, 5900 /* i */, 5900 /* j */, 5900 /* k */, 5900 /* l */, 5900 /* m */, 5900 /* n */, 5900 /* o */, 5900 /* p */, 5900 /* q */, 5900 /* r */, 5900 /* s */, 5900 /* t */, 5900 /* u */, 5900 /* v */, 5900 /* w */, 5900 /* x */, 5900 /* y */, 5900 /* z */, 5900 /* { */, 5900 /* | */, 5900 /* } */, 5900 /* ~ */}, - {8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* !! */, 8000 /* !" */, 8000 /* !# */, 8000 /* !$ */, 8000 /* !% */, 8000 /* !& */, 8000 /* !' */, 8000 /* !( */, 8000 /* !) */, 8000 /* !* */, 8000 /* !+ */, 8000 /* !, */, 8000 /* !- */, 8000 /* !. */, 8000 /* !/ */, 8000 /* !0 */, 8000 /* !1 */, 8000 /* !2 */, 8000 /* !3 */, 8000 /* !4 */, 8000 /* !5 */, 8000 /* !6 */, 8000 /* !7 */, 8000 /* !8 */, 8000 /* !9 */, 8000 /* !: */, 8000 /* !; */, 8000 /* !< */, 8000 /* != */, 8000 /* !> */, 8000 /* !? */, 8000 /* !@ */, 8000 /* !A */, 8000 /* !B */, 8000 /* !C */, 8000 /* !D */, 8000 /* !E */, 8000 /* !F */, 8000 /* !G */, 8000 /* !H */, 8000 /* !I */, 8000 /* !J */, 8000 /* !K */, 8000 /* !L */, 8000 /* !M */, 8000 /* !N */, 8000 /* !O */, 8000 /* !P */, 8000 /* !Q */, 8000 /* !R */, 8000 /* !S */, 8000 /* !T */, 8000 /* !U */, 8000 /* !V */, 8000 /* !W */, 8000 /* !X */, 8000 /* !Y */, 8000 /* !Z */, 8000 /* ![ */, 8000 /* !\ */, 8000 /* !] */, 8000 /* !^ */, 8000 /* !_ */, 8000 /* !` */, 8000 /* !a */, 8000 /* !b */, 8000 /* !c */, 8000 /* !d */, 8000 /* !e */, 8000 /* !f */, 8000 /* !g */, 8000 /* !h */, 8000 /* !i */, 8000 /* !j */, 8000 /* !k */, 8000 /* !l */, 8000 /* !m */, 8000 /* !n */, 8000 /* !o */, 8000 /* !p */, 8000 /* !q */, 8000 /* !r */, 8000 /* !s */, 8000 /* !t */, 8000 /* !u */, 8000 /* !v */, 8000 /* !w */, 8000 /* !x */, 8000 /* !y */, 8000 /* !z */, 8000 /* !{ */, 8000 /* !| */, 8000 /* !} */, 8000 /* !~ */}, - {12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* "! */, 12325 /* "" */, 12325 /* "# */, 12325 /* "$ */, 12325 /* "% */, 12325 /* "& */, 12325 /* "' */, 12325 /* "( */, 12325 /* ") */, 12325 /* "* */, 12325 /* "+ */, 10075 /* ", */, 12325 /* "- */, 10075 /* ". */, 12325 /* "/ */, 12325 /* "0 */, 12325 /* "1 */, 12325 /* "2 */, 12325 /* "3 */, 12325 /* "4 */, 12325 /* "5 */, 12325 /* "6 */, 12325 /* "7 */, 12325 /* "8 */, 12325 /* "9 */, 12325 /* ": */, 12325 /* "; */, 12325 /* "< */, 12325 /* "= */, 12325 /* "> */, 12325 /* "? */, 12325 /* "@ */, 11075 /* "A */, 12325 /* "B */, 12075 /* "C */, 12325 /* "D */, 12325 /* "E */, 12325 /* "F */, 12075 /* "G */, 12325 /* "H */, 12325 /* "I */, 11325 /* "J */, 12325 /* "K */, 12325 /* "L */, 12325 /* "M */, 12325 /* "N */, 12075 /* "O */, 12325 /* "P */, 12075 /* "Q */, 12325 /* "R */, 12325 /* "S */, 12700 /* "T */, 12325 /* "U */, 12575 /* "V */, 12700 /* "W */, 12325 /* "X */, 12450 /* "Y */, 12325 /* "Z */, 12325 /* "[ */, 12325 /* "\ */, 12325 /* "] */, 12325 /* "^ */, 12325 /* "_ */, 12325 /* "` */, 12325 /* "a */, 12325 /* "b */, 11825 /* "c */, 11825 /* "d */, 11825 /* "e */, 12325 /* "f */, 12325 /* "g */, 12325 /* "h */, 12325 /* "i */, 12325 /* "j */, 12325 /* "k */, 12325 /* "l */, 12325 /* "m */, 12325 /* "n */, 11825 /* "o */, 12325 /* "p */, 11825 /* "q */, 12325 /* "r */, 12325 /* "s */, 12325 /* "t */, 12325 /* "u */, 12950 /* "v */, 12825 /* "w */, 12325 /* "x */, 12825 /* "y */, 12450 /* "z */, 12325 /* "{ */, 12325 /* "| */, 12325 /* "} */, 12325 /* "~ */}, - {15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* #! */, 15800 /* #" */, 15800 /* ## */, 15800 /* #$ */, 15800 /* #% */, 15800 /* #& */, 15800 /* #' */, 15800 /* #( */, 15800 /* #) */, 15800 /* #* */, 15800 /* #+ */, 15800 /* #, */, 15800 /* #- */, 15800 /* #. */, 15800 /* #/ */, 15800 /* #0 */, 15800 /* #1 */, 15800 /* #2 */, 15800 /* #3 */, 15800 /* #4 */, 15800 /* #5 */, 15800 /* #6 */, 15800 /* #7 */, 15800 /* #8 */, 15800 /* #9 */, 15800 /* #: */, 15800 /* #; */, 15800 /* #< */, 15800 /* #= */, 15800 /* #> */, 15800 /* #? */, 15800 /* #@ */, 15800 /* #A */, 15800 /* #B */, 15800 /* #C */, 15800 /* #D */, 15800 /* #E */, 15800 /* #F */, 15800 /* #G */, 15800 /* #H */, 15800 /* #I */, 15800 /* #J */, 15800 /* #K */, 15800 /* #L */, 15800 /* #M */, 15800 /* #N */, 15800 /* #O */, 15800 /* #P */, 15800 /* #Q */, 15800 /* #R */, 15800 /* #S */, 15800 /* #T */, 15800 /* #U */, 15800 /* #V */, 15800 /* #W */, 15800 /* #X */, 15800 /* #Y */, 15800 /* #Z */, 15800 /* #[ */, 15800 /* #\ */, 15800 /* #] */, 15800 /* #^ */, 15800 /* #_ */, 15800 /* #` */, 15800 /* #a */, 15800 /* #b */, 15800 /* #c */, 15800 /* #d */, 15800 /* #e */, 15800 /* #f */, 15800 /* #g */, 15800 /* #h */, 15800 /* #i */, 15800 /* #j */, 15800 /* #k */, 15800 /* #l */, 15800 /* #m */, 15800 /* #n */, 15800 /* #o */, 15800 /* #p */, 15800 /* #q */, 15800 /* #r */, 15800 /* #s */, 15800 /* #t */, 15800 /* #u */, 15800 /* #v */, 15800 /* #w */, 15800 /* #x */, 15800 /* #y */, 15800 /* #z */, 15800 /* #{ */, 15800 /* #| */, 15800 /* #} */, 15800 /* #~ */}, - {15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $! */, 15025 /* $" */, 15025 /* $# */, 15025 /* $$ */, 15025 /* $% */, 15025 /* $& */, 15025 /* $' */, 15025 /* $( */, 15025 /* $) */, 15025 /* $* */, 15025 /* $+ */, 15025 /* $, */, 15025 /* $- */, 15025 /* $. */, 15025 /* $/ */, 15025 /* $0 */, 15025 /* $1 */, 15025 /* $2 */, 15025 /* $3 */, 15025 /* $4 */, 15025 /* $5 */, 15025 /* $6 */, 15025 /* $7 */, 15025 /* $8 */, 15025 /* $9 */, 15025 /* $: */, 15025 /* $; */, 15025 /* $< */, 15025 /* $= */, 15025 /* $> */, 15025 /* $? */, 15025 /* $@ */, 15025 /* $A */, 15025 /* $B */, 15025 /* $C */, 15025 /* $D */, 15025 /* $E */, 15025 /* $F */, 15025 /* $G */, 15025 /* $H */, 15025 /* $I */, 15025 /* $J */, 15025 /* $K */, 15025 /* $L */, 15025 /* $M */, 15025 /* $N */, 15025 /* $O */, 15025 /* $P */, 15025 /* $Q */, 15025 /* $R */, 15025 /* $S */, 15025 /* $T */, 15025 /* $U */, 15025 /* $V */, 15025 /* $W */, 15025 /* $X */, 15025 /* $Y */, 15025 /* $Z */, 15025 /* $[ */, 15025 /* $\ */, 15025 /* $] */, 15025 /* $^ */, 15025 /* $_ */, 15025 /* $` */, 15025 /* $a */, 15025 /* $b */, 15025 /* $c */, 15025 /* $d */, 15025 /* $e */, 15025 /* $f */, 15025 /* $g */, 15025 /* $h */, 15025 /* $i */, 15025 /* $j */, 15025 /* $k */, 15025 /* $l */, 15025 /* $m */, 15025 /* $n */, 15025 /* $o */, 15025 /* $p */, 15025 /* $q */, 15025 /* $r */, 15025 /* $s */, 15025 /* $t */, 15025 /* $u */, 15025 /* $v */, 15025 /* $w */, 15025 /* $x */, 15025 /* $y */, 15025 /* $z */, 15025 /* ${ */, 15025 /* $| */, 15025 /* $} */, 15025 /* $~ */}, - {24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* %! */, 22850 /* %" */, 24350 /* %# */, 24350 /* %$ */, 24350 /* %% */, 24350 /* %& */, 22850 /* %' */, 24350 /* %( */, 24350 /* %) */, 22850 /* %* */, 24350 /* %+ */, 24350 /* %, */, 24350 /* %- */, 24350 /* %. */, 24350 /* %/ */, 24350 /* %0 */, 24350 /* %1 */, 24350 /* %2 */, 24350 /* %3 */, 24350 /* %4 */, 24350 /* %5 */, 24350 /* %6 */, 24350 /* %7 */, 24350 /* %8 */, 24350 /* %9 */, 24350 /* %: */, 24350 /* %; */, 24350 /* %< */, 24350 /* %= */, 24350 /* %> */, 24350 /* %? */, 24350 /* %@ */, 24350 /* %A */, 24350 /* %B */, 24350 /* %C */, 24350 /* %D */, 24350 /* %E */, 24350 /* %F */, 24350 /* %G */, 24350 /* %H */, 24350 /* %I */, 24350 /* %J */, 24350 /* %K */, 24350 /* %L */, 24350 /* %M */, 24350 /* %N */, 24350 /* %O */, 24350 /* %P */, 24350 /* %Q */, 24350 /* %R */, 24350 /* %S */, 24350 /* %T */, 24350 /* %U */, 24350 /* %V */, 24350 /* %W */, 24350 /* %X */, 24350 /* %Y */, 24350 /* %Z */, 24350 /* %[ */, 24350 /* %\ */, 24350 /* %] */, 24350 /* %^ */, 24350 /* %_ */, 24350 /* %` */, 24350 /* %a */, 24350 /* %b */, 24350 /* %c */, 24350 /* %d */, 24350 /* %e */, 24350 /* %f */, 24350 /* %g */, 24350 /* %h */, 24350 /* %i */, 24350 /* %j */, 24350 /* %k */, 24350 /* %l */, 24350 /* %m */, 24350 /* %n */, 24350 /* %o */, 24350 /* %p */, 24350 /* %q */, 24350 /* %r */, 24350 /* %s */, 24350 /* %t */, 24350 /* %u */, 24350 /* %v */, 24350 /* %w */, 24350 /* %x */, 24350 /* %y */, 24350 /* %z */, 24350 /* %{ */, 24350 /* %| */, 24350 /* %} */, 24350 /* %~ */}, - {18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* &! */, 18025 /* &" */, 18025 /* &# */, 18025 /* &$ */, 18025 /* &% */, 18025 /* && */, 18025 /* &' */, 18025 /* &( */, 18025 /* &) */, 18025 /* &* */, 18025 /* &+ */, 18025 /* &, */, 18025 /* &- */, 18025 /* &. */, 18025 /* &/ */, 18025 /* &0 */, 18025 /* &1 */, 18025 /* &2 */, 18025 /* &3 */, 18025 /* &4 */, 18025 /* &5 */, 18025 /* &6 */, 18025 /* &7 */, 18025 /* &8 */, 18025 /* &9 */, 18025 /* &: */, 18025 /* &; */, 18025 /* &< */, 18025 /* &= */, 18025 /* &> */, 18025 /* &? */, 18025 /* &@ */, 18150 /* &A */, 18025 /* &B */, 18025 /* &C */, 18025 /* &D */, 18025 /* &E */, 18025 /* &F */, 18025 /* &G */, 18025 /* &H */, 18025 /* &I */, 18025 /* &J */, 18025 /* &K */, 18025 /* &L */, 18025 /* &M */, 18025 /* &N */, 18025 /* &O */, 18025 /* &P */, 18025 /* &Q */, 18025 /* &R */, 17900 /* &S */, 17025 /* &T */, 18025 /* &U */, 17150 /* &V */, 17525 /* &W */, 18025 /* &X */, 16525 /* &Y */, 17775 /* &Z */, 18025 /* &[ */, 18025 /* &\ */, 18025 /* &] */, 18025 /* &^ */, 18025 /* &_ */, 18025 /* &` */, 18275 /* &a */, 18025 /* &b */, 18275 /* &c */, 18275 /* &d */, 18275 /* &e */, 18025 /* &f */, 18025 /* &g */, 18025 /* &h */, 18025 /* &i */, 18025 /* &j */, 18025 /* &k */, 18025 /* &l */, 18025 /* &m */, 18025 /* &n */, 18275 /* &o */, 18025 /* &p */, 18275 /* &q */, 18025 /* &r */, 18025 /* &s */, 18025 /* &t */, 18025 /* &u */, 17775 /* &v */, 17775 /* &w */, 18025 /* &x */, 17775 /* &y */, 18025 /* &z */, 18025 /* &{ */, 18025 /* &| */, 18025 /* &} */, 18025 /* &~ */}, - {6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* '! */, 6700 /* '" */, 6700 /* '# */, 6700 /* '$ */, 6700 /* '% */, 6700 /* '& */, 6700 /* '' */, 6700 /* '( */, 6700 /* ') */, 6700 /* '* */, 6700 /* '+ */, 4450 /* ', */, 6700 /* '- */, 4450 /* '. */, 6700 /* '/ */, 6700 /* '0 */, 6700 /* '1 */, 6700 /* '2 */, 6700 /* '3 */, 6700 /* '4 */, 6700 /* '5 */, 6700 /* '6 */, 6700 /* '7 */, 6700 /* '8 */, 6700 /* '9 */, 6700 /* ': */, 6700 /* '; */, 6700 /* '< */, 6700 /* '= */, 6700 /* '> */, 6700 /* '? */, 6700 /* '@ */, 5450 /* 'A */, 6700 /* 'B */, 6450 /* 'C */, 6700 /* 'D */, 6700 /* 'E */, 6700 /* 'F */, 6450 /* 'G */, 6700 /* 'H */, 6700 /* 'I */, 5700 /* 'J */, 6700 /* 'K */, 6700 /* 'L */, 6700 /* 'M */, 6700 /* 'N */, 6450 /* 'O */, 6700 /* 'P */, 6450 /* 'Q */, 6700 /* 'R */, 6700 /* 'S */, 7075 /* 'T */, 6700 /* 'U */, 6950 /* 'V */, 7075 /* 'W */, 6700 /* 'X */, 6825 /* 'Y */, 6700 /* 'Z */, 6700 /* '[ */, 6700 /* '\ */, 6700 /* '] */, 6700 /* '^ */, 6700 /* '_ */, 6700 /* '` */, 6700 /* 'a */, 6700 /* 'b */, 6200 /* 'c */, 6200 /* 'd */, 6200 /* 'e */, 6700 /* 'f */, 6700 /* 'g */, 6700 /* 'h */, 6700 /* 'i */, 6700 /* 'j */, 6700 /* 'k */, 6700 /* 'l */, 6700 /* 'm */, 6700 /* 'n */, 6200 /* 'o */, 6700 /* 'p */, 6200 /* 'q */, 6700 /* 'r */, 6700 /* 's */, 6700 /* 't */, 6700 /* 'u */, 7325 /* 'v */, 7200 /* 'w */, 6700 /* 'x */, 7200 /* 'y */, 6825 /* 'z */, 6700 /* '{ */, 6700 /* '| */, 6700 /* '} */, 6700 /* '~ */}, - {8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* (! */, 8450 /* (" */, 8450 /* (# */, 8450 /* ($ */, 8450 /* (% */, 8450 /* (& */, 8450 /* (' */, 8450 /* (( */, 8450 /* () */, 8450 /* (* */, 8450 /* (+ */, 8450 /* (, */, 8450 /* (- */, 8450 /* (. */, 8450 /* (/ */, 8450 /* (0 */, 8450 /* (1 */, 8450 /* (2 */, 8450 /* (3 */, 8450 /* (4 */, 8450 /* (5 */, 8450 /* (6 */, 8450 /* (7 */, 8450 /* (8 */, 8450 /* (9 */, 8450 /* (: */, 8450 /* (; */, 8450 /* (< */, 8450 /* (= */, 8450 /* (> */, 8450 /* (? */, 8450 /* (@ */, 8450 /* (A */, 8450 /* (B */, 8450 /* (C */, 8450 /* (D */, 8450 /* (E */, 8450 /* (F */, 8450 /* (G */, 8450 /* (H */, 8450 /* (I */, 8450 /* (J */, 8450 /* (K */, 8450 /* (L */, 8450 /* (M */, 8450 /* (N */, 8450 /* (O */, 8450 /* (P */, 8450 /* (Q */, 8450 /* (R */, 8450 /* (S */, 8950 /* (T */, 8450 /* (U */, 8950 /* (V */, 8950 /* (W */, 8700 /* (X */, 8950 /* (Y */, 8450 /* (Z */, 8450 /* ([ */, 8450 /* (\ */, 8450 /* (] */, 8450 /* (^ */, 8450 /* (_ */, 8450 /* (` */, 8450 /* (a */, 8450 /* (b */, 8450 /* (c */, 8450 /* (d */, 8450 /* (e */, 8450 /* (f */, 8700 /* (g */, 8450 /* (h */, 8450 /* (i */, 10200 /* (j */, 8450 /* (k */, 8450 /* (l */, 8450 /* (m */, 8450 /* (n */, 8450 /* (o */, 8450 /* (p */, 8450 /* (q */, 8450 /* (r */, 8450 /* (s */, 8450 /* (t */, 8450 /* (u */, 8450 /* (v */, 8450 /* (w */, 8450 /* (x */, 8450 /* (y */, 8450 /* (z */, 8450 /* ({ */, 8450 /* (| */, 8450 /* (} */, 8450 /* (~ */}, - {8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* )! */, 8450 /* )" */, 8450 /* )# */, 8450 /* )$ */, 8450 /* )% */, 8450 /* )& */, 8450 /* )' */, 8450 /* )( */, 8450 /* )) */, 8450 /* )* */, 8450 /* )+ */, 8450 /* ), */, 8450 /* )- */, 8450 /* ). */, 8450 /* )/ */, 8450 /* )0 */, 8450 /* )1 */, 8450 /* )2 */, 8450 /* )3 */, 8450 /* )4 */, 8450 /* )5 */, 8450 /* )6 */, 8450 /* )7 */, 8450 /* )8 */, 8450 /* )9 */, 8450 /* ): */, 8450 /* ); */, 8450 /* )< */, 8450 /* )= */, 8450 /* )> */, 8450 /* )? */, 8450 /* )@ */, 8450 /* )A */, 8450 /* )B */, 8450 /* )C */, 8450 /* )D */, 8450 /* )E */, 8450 /* )F */, 8450 /* )G */, 8450 /* )H */, 8450 /* )I */, 8450 /* )J */, 8450 /* )K */, 8450 /* )L */, 8450 /* )M */, 8450 /* )N */, 8450 /* )O */, 8450 /* )P */, 8450 /* )Q */, 8450 /* )R */, 8450 /* )S */, 8450 /* )T */, 8450 /* )U */, 8450 /* )V */, 8450 /* )W */, 8450 /* )X */, 8450 /* )Y */, 8450 /* )Z */, 8450 /* )[ */, 8450 /* )\ */, 8450 /* )] */, 8450 /* )^ */, 8450 /* )_ */, 8450 /* )` */, 8450 /* )a */, 8450 /* )b */, 8450 /* )c */, 8450 /* )d */, 8450 /* )e */, 8450 /* )f */, 8450 /* )g */, 8450 /* )h */, 8450 /* )i */, 8450 /* )j */, 8450 /* )k */, 8450 /* )l */, 8450 /* )m */, 8450 /* )n */, 8450 /* )o */, 8450 /* )p */, 8450 /* )q */, 8450 /* )r */, 8450 /* )s */, 8450 /* )t */, 8450 /* )u */, 8450 /* )v */, 8450 /* )w */, 8450 /* )x */, 8450 /* )y */, 8450 /* )z */, 8450 /* ){ */, 8450 /* )| */, 8450 /* )} */, 8450 /* )~ */}, - {15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* *! */, 15025 /* *" */, 15025 /* *# */, 15025 /* *$ */, 15025 /* *% */, 15025 /* *& */, 15025 /* *' */, 15025 /* *( */, 15025 /* *) */, 15025 /* ** */, 15025 /* *+ */, 12775 /* *, */, 15025 /* *- */, 12775 /* *. */, 15025 /* *\/ */, 15025 /* *0 */, 15025 /* *1 */, 15025 /* *2 */, 15025 /* *3 */, 15025 /* *4 */, 15025 /* *5 */, 15025 /* *6 */, 15025 /* *7 */, 15025 /* *8 */, 15025 /* *9 */, 15025 /* *: */, 15025 /* *; */, 15025 /* *< */, 15025 /* *= */, 15025 /* *> */, 15025 /* *? */, 15025 /* *@ */, 13775 /* *A */, 15025 /* *B */, 14775 /* *C */, 15025 /* *D */, 15025 /* *E */, 15025 /* *F */, 14775 /* *G */, 15025 /* *H */, 15025 /* *I */, 14025 /* *J */, 15025 /* *K */, 15025 /* *L */, 15025 /* *M */, 15025 /* *N */, 14775 /* *O */, 15025 /* *P */, 14775 /* *Q */, 15025 /* *R */, 15025 /* *S */, 15400 /* *T */, 15025 /* *U */, 15275 /* *V */, 15400 /* *W */, 15025 /* *X */, 15150 /* *Y */, 15025 /* *Z */, 15025 /* *[ */, 15025 /* *\ */, 15025 /* *] */, 15025 /* *^ */, 15025 /* *_ */, 15025 /* *` */, 15025 /* *a */, 15025 /* *b */, 14525 /* *c */, 14525 /* *d */, 14525 /* *e */, 15025 /* *f */, 15025 /* *g */, 15025 /* *h */, 15025 /* *i */, 15025 /* *j */, 15025 /* *k */, 15025 /* *l */, 15025 /* *m */, 15025 /* *n */, 14525 /* *o */, 15025 /* *p */, 14525 /* *q */, 15025 /* *r */, 15025 /* *s */, 15025 /* *t */, 15025 /* *u */, 15650 /* *v */, 15525 /* *w */, 15025 /* *x */, 15525 /* *y */, 15150 /* *z */, 15025 /* *{ */, 15025 /* *| */, 15025 /* *} */, 15025 /* *~ */}, - {15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* +! */, 15000 /* +" */, 15000 /* +# */, 15000 /* +$ */, 15000 /* +% */, 15000 /* +& */, 15000 /* +' */, 15000 /* +( */, 15000 /* +) */, 15000 /* +* */, 15000 /* ++ */, 15000 /* +, */, 15000 /* +- */, 15000 /* +. */, 15000 /* +/ */, 15000 /* +0 */, 15000 /* +1 */, 15000 /* +2 */, 15000 /* +3 */, 15000 /* +4 */, 15000 /* +5 */, 15000 /* +6 */, 15000 /* +7 */, 15000 /* +8 */, 15000 /* +9 */, 15000 /* +: */, 15000 /* +; */, 15000 /* +< */, 15000 /* += */, 15000 /* +> */, 15000 /* +? */, 15000 /* +@ */, 15000 /* +A */, 15000 /* +B */, 15000 /* +C */, 15000 /* +D */, 15000 /* +E */, 15000 /* +F */, 15000 /* +G */, 15000 /* +H */, 15000 /* +I */, 15000 /* +J */, 15000 /* +K */, 15000 /* +L */, 15000 /* +M */, 15000 /* +N */, 15000 /* +O */, 15000 /* +P */, 15000 /* +Q */, 15000 /* +R */, 15000 /* +S */, 15000 /* +T */, 15000 /* +U */, 15000 /* +V */, 15000 /* +W */, 15000 /* +X */, 15000 /* +Y */, 15000 /* +Z */, 15000 /* +[ */, 15000 /* +\ */, 15000 /* +] */, 15000 /* +^ */, 15000 /* +_ */, 15000 /* +` */, 15000 /* +a */, 15000 /* +b */, 15000 /* +c */, 15000 /* +d */, 15000 /* +e */, 15000 /* +f */, 15000 /* +g */, 15000 /* +h */, 15000 /* +i */, 15000 /* +j */, 15000 /* +k */, 15000 /* +l */, 15000 /* +m */, 15000 /* +n */, 15000 /* +o */, 15000 /* +p */, 15000 /* +q */, 15000 /* +r */, 15000 /* +s */, 15000 /* +t */, 15000 /* +u */, 15000 /* +v */, 15000 /* +w */, 15000 /* +x */, 15000 /* +y */, 15000 /* +z */, 15000 /* +{ */, 15000 /* +| */, 15000 /* +} */, 15000 /* +~ */}, - {7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* ,! */, 5500 /* ," */, 7750 /* ,# */, 7750 /* ,$ */, 7750 /* ,% */, 7750 /* ,& */, 5500 /* ,' */, 7750 /* ,( */, 7750 /* ,) */, 5500 /* ,* */, 7750 /* ,+ */, 7750 /* ,, */, 7750 /* ,- */, 7750 /* ,. */, 7750 /* ,/ */, 7750 /* ,0 */, 7750 /* ,1 */, 7750 /* ,2 */, 7750 /* ,3 */, 7750 /* ,4 */, 7750 /* ,5 */, 7750 /* ,6 */, 7750 /* ,7 */, 7750 /* ,8 */, 7750 /* ,9 */, 7750 /* ,: */, 7750 /* ,; */, 7750 /* ,< */, 7750 /* ,= */, 7750 /* ,> */, 5750 /* ,? */, 7750 /* ,@ */, 8250 /* ,A */, 7750 /* ,B */, 6750 /* ,C */, 7750 /* ,D */, 7750 /* ,E */, 7750 /* ,F */, 6750 /* ,G */, 7750 /* ,H */, 7750 /* ,I */, 7750 /* ,J */, 7750 /* ,K */, 7750 /* ,L */, 7750 /* ,M */, 7750 /* ,N */, 6750 /* ,O */, 7750 /* ,P */, 6750 /* ,Q */, 7750 /* ,R */, 7750 /* ,S */, 6000 /* ,T */, 6875 /* ,U */, 6250 /* ,V */, 6750 /* ,W */, 8000 /* ,X */, 6000 /* ,Y */, 8500 /* ,Z */, 7750 /* ,[ */, 7750 /* ,\ */, 7750 /* ,] */, 7750 /* ,^ */, 7750 /* ,_ */, 7750 /* ,` */, 7875 /* ,a */, 7750 /* ,b */, 7375 /* ,c */, 7375 /* ,d */, 7375 /* ,e */, 7375 /* ,f */, 7750 /* ,g */, 7750 /* ,h */, 7750 /* ,i */, 7750 /* ,j */, 7750 /* ,k */, 7750 /* ,l */, 7750 /* ,m */, 7750 /* ,n */, 7375 /* ,o */, 7750 /* ,p */, 7375 /* ,q */, 7750 /* ,r */, 7750 /* ,s */, 7150 /* ,t */, 7550 /* ,u */, 6550 /* ,v */, 6750 /* ,w */, 7750 /* ,x */, 6625 /* ,y */, 8000 /* ,z */, 7750 /* ,{ */, 7750 /* ,| */, 7750 /* ,} */, 7750 /* ,~ */}, - {10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* -! */, 10075 /* -" */, 10075 /* -# */, 10075 /* -$ */, 10075 /* -% */, 10075 /* -& */, 10075 /* -' */, 10075 /* -( */, 10075 /* -) */, 10075 /* -* */, 10075 /* -+ */, 10075 /* -, */, 10075 /* -- */, 10075 /* -. */, 10075 /* -/ */, 10075 /* -0 */, 10075 /* -1 */, 10075 /* -2 */, 10075 /* -3 */, 10075 /* -4 */, 10075 /* -5 */, 10075 /* -6 */, 10075 /* -7 */, 10075 /* -8 */, 10075 /* -9 */, 10075 /* -: */, 10075 /* -; */, 10075 /* -< */, 10075 /* -= */, 10075 /* -> */, 10075 /* -? */, 10075 /* -@ */, 9700 /* -A */, 10075 /* -B */, 10450 /* -C */, 10075 /* -D */, 10075 /* -E */, 10075 /* -F */, 10450 /* -G */, 10075 /* -H */, 9325 /* -I */, 9450 /* -J */, 10075 /* -K */, 10075 /* -L */, 10075 /* -M */, 10075 /* -N */, 10450 /* -O */, 10075 /* -P */, 10450 /* -Q */, 10075 /* -R */, 9575 /* -S */, 8950 /* -T */, 10075 /* -U */, 9450 /* -V */, 9825 /* -W */, 9325 /* -X */, 8825 /* -Y */, 9325 /* -Z */, 10075 /* -[ */, 10075 /* -\ */, 10075 /* -] */, 10075 /* -^ */, 10075 /* -_ */, 10075 /* -` */, 10075 /* -a */, 10075 /* -b */, 10325 /* -c */, 10325 /* -d */, 10325 /* -e */, 10075 /* -f */, 10075 /* -g */, 10075 /* -h */, 10075 /* -i */, 10075 /* -j */, 10075 /* -k */, 10075 /* -l */, 10075 /* -m */, 10075 /* -n */, 10325 /* -o */, 10075 /* -p */, 10325 /* -q */, 10075 /* -r */, 10200 /* -s */, 10075 /* -t */, 10075 /* -u */, 9950 /* -v */, 9975 /* -w */, 9200 /* -x */, 9950 /* -y */, 9700 /* -z */, 10075 /* -{ */, 10075 /* -| */, 10075 /* -} */, 10075 /* -~ */}, - {7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* .! */, 5500 /* ." */, 7750 /* .# */, 7750 /* .$ */, 7750 /* .% */, 7750 /* .& */, 5500 /* .' */, 7750 /* .( */, 7750 /* .) */, 5500 /* .* */, 7750 /* .+ */, 7750 /* ., */, 7750 /* .- */, 7750 /* .. */, 7750 /* ./ */, 7750 /* .0 */, 7750 /* .1 */, 7750 /* .2 */, 7750 /* .3 */, 7750 /* .4 */, 7750 /* .5 */, 7750 /* .6 */, 7750 /* .7 */, 7750 /* .8 */, 7750 /* .9 */, 7750 /* .: */, 7750 /* .; */, 7750 /* .< */, 7750 /* .= */, 7750 /* .> */, 5750 /* .? */, 7750 /* .@ */, 8250 /* .A */, 7750 /* .B */, 6750 /* .C */, 7750 /* .D */, 7750 /* .E */, 7750 /* .F */, 6750 /* .G */, 7750 /* .H */, 7750 /* .I */, 7750 /* .J */, 7750 /* .K */, 7750 /* .L */, 7750 /* .M */, 7750 /* .N */, 6750 /* .O */, 7750 /* .P */, 6750 /* .Q */, 7750 /* .R */, 7750 /* .S */, 6000 /* .T */, 6875 /* .U */, 6250 /* .V */, 6750 /* .W */, 8000 /* .X */, 6000 /* .Y */, 8500 /* .Z */, 7750 /* .[ */, 7750 /* .\ */, 7750 /* .] */, 7750 /* .^ */, 7750 /* ._ */, 7750 /* .` */, 7875 /* .a */, 7750 /* .b */, 7375 /* .c */, 7375 /* .d */, 7375 /* .e */, 7375 /* .f */, 7750 /* .g */, 7750 /* .h */, 7750 /* .i */, 7750 /* .j */, 7750 /* .k */, 7750 /* .l */, 7750 /* .m */, 7750 /* .n */, 7375 /* .o */, 7750 /* .p */, 7375 /* .q */, 7750 /* .r */, 7750 /* .s */, 7150 /* .t */, 7550 /* .u */, 6550 /* .v */, 6750 /* .w */, 7750 /* .x */, 6625 /* .y */, 8000 /* .z */, 7750 /* .{ */, 7750 /* .| */, 7750 /* .} */, 7750 /* .~ */}, - {11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* /! */, 11500 /* /" */, 11500 /* /# */, 11500 /* /$ */, 11500 /* /% */, 11500 /* /& */, 11500 /* /' */, 11500 /* /( */, 11500 /* /) */, 11500 /* /\* */, 11500 /* /+ */, 11500 /* /, */, 11500 /* /- */, 11500 /* /. */, 6750 /* // */, 11500 /* /0 */, 11500 /* /1 */, 11500 /* /2 */, 11500 /* /3 */, 11500 /* /4 */, 11500 /* /5 */, 11500 /* /6 */, 11500 /* /7 */, 11500 /* /8 */, 11500 /* /9 */, 11500 /* /: */, 11500 /* /; */, 11500 /* /< */, 11500 /* /= */, 11500 /* /> */, 11500 /* /? */, 11500 /* /@ */, 10375 /* /A */, 11500 /* /B */, 10950 /* /C */, 11500 /* /D */, 11500 /* /E */, 11500 /* /F */, 10950 /* /G */, 11500 /* /H */, 11500 /* /I */, 11500 /* /J */, 11500 /* /K */, 11500 /* /L */, 11500 /* /M */, 11500 /* /N */, 10950 /* /O */, 11500 /* /P */, 10950 /* /Q */, 11500 /* /R */, 11000 /* /S */, 12000 /* /T */, 11500 /* /U */, 11750 /* /V */, 11625 /* /W */, 11500 /* /X */, 11750 /* /Y */, 11500 /* /Z */, 11500 /* /[ */, 11500 /* /\ */, 11500 /* /] */, 11500 /* /^ */, 11500 /* /_ */, 11500 /* /` */, 10500 /* /a */, 11500 /* /b */, 10625 /* /c */, 10625 /* /d */, 10625 /* /e */, 11500 /* /f */, 10875 /* /g */, 11500 /* /h */, 11500 /* /i */, 11500 /* /j */, 11500 /* /k */, 11500 /* /l */, 11250 /* /m */, 11250 /* /n */, 10625 /* /o */, 11500 /* /p */, 10625 /* /q */, 11250 /* /r */, 11500 /* /s */, 11500 /* /t */, 11500 /* /u */, 11500 /* /v */, 11500 /* /w */, 11500 /* /x */, 11500 /* /y */, 11500 /* /z */, 11500 /* /{ */, 11500 /* /| */, 11500 /* /} */, 11500 /* /~ */}, - {15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0! */, 15000 /* 0" */, 15000 /* 0# */, 15000 /* 0$ */, 15000 /* 0% */, 15000 /* 0& */, 15000 /* 0' */, 15000 /* 0( */, 15000 /* 0) */, 15000 /* 0* */, 15000 /* 0+ */, 15000 /* 0, */, 15000 /* 0- */, 15000 /* 0. */, 15000 /* 0/ */, 15000 /* 00 */, 15000 /* 01 */, 15000 /* 02 */, 15000 /* 03 */, 15000 /* 04 */, 15000 /* 05 */, 15000 /* 06 */, 15000 /* 07 */, 15000 /* 08 */, 15000 /* 09 */, 15000 /* 0: */, 15000 /* 0; */, 15000 /* 0< */, 15000 /* 0= */, 15000 /* 0> */, 15000 /* 0? */, 15000 /* 0@ */, 15000 /* 0A */, 15000 /* 0B */, 15000 /* 0C */, 15000 /* 0D */, 15000 /* 0E */, 15000 /* 0F */, 15000 /* 0G */, 15000 /* 0H */, 15000 /* 0I */, 15000 /* 0J */, 15000 /* 0K */, 15000 /* 0L */, 15000 /* 0M */, 15000 /* 0N */, 15000 /* 0O */, 15000 /* 0P */, 15000 /* 0Q */, 15000 /* 0R */, 15000 /* 0S */, 15000 /* 0T */, 15000 /* 0U */, 15000 /* 0V */, 15000 /* 0W */, 15000 /* 0X */, 15000 /* 0Y */, 15000 /* 0Z */, 15000 /* 0[ */, 15000 /* 0\ */, 15000 /* 0] */, 15000 /* 0^ */, 15000 /* 0_ */, 15000 /* 0` */, 15000 /* 0a */, 15000 /* 0b */, 15000 /* 0c */, 15000 /* 0d */, 15000 /* 0e */, 15000 /* 0f */, 15000 /* 0g */, 15000 /* 0h */, 15000 /* 0i */, 15000 /* 0j */, 15000 /* 0k */, 15000 /* 0l */, 15000 /* 0m */, 15000 /* 0n */, 15000 /* 0o */, 15000 /* 0p */, 15000 /* 0q */, 15000 /* 0r */, 15000 /* 0s */, 15000 /* 0t */, 15000 /* 0u */, 15000 /* 0v */, 15000 /* 0w */, 15000 /* 0x */, 15000 /* 0y */, 15000 /* 0z */, 15000 /* 0{ */, 15000 /* 0| */, 15000 /* 0} */, 15000 /* 0~ */}, - {15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1! */, 15000 /* 1" */, 15000 /* 1# */, 15000 /* 1$ */, 15000 /* 1% */, 15000 /* 1& */, 15000 /* 1' */, 15000 /* 1( */, 15000 /* 1) */, 15000 /* 1* */, 15000 /* 1+ */, 15000 /* 1, */, 15000 /* 1- */, 15000 /* 1. */, 15000 /* 1/ */, 15000 /* 10 */, 15000 /* 11 */, 15000 /* 12 */, 15000 /* 13 */, 15000 /* 14 */, 15000 /* 15 */, 15000 /* 16 */, 15000 /* 17 */, 15000 /* 18 */, 15000 /* 19 */, 15000 /* 1: */, 15000 /* 1; */, 15000 /* 1< */, 15000 /* 1= */, 15000 /* 1> */, 15000 /* 1? */, 15000 /* 1@ */, 15000 /* 1A */, 15000 /* 1B */, 15000 /* 1C */, 15000 /* 1D */, 15000 /* 1E */, 15000 /* 1F */, 15000 /* 1G */, 15000 /* 1H */, 15000 /* 1I */, 15000 /* 1J */, 15000 /* 1K */, 15000 /* 1L */, 15000 /* 1M */, 15000 /* 1N */, 15000 /* 1O */, 15000 /* 1P */, 15000 /* 1Q */, 15000 /* 1R */, 15000 /* 1S */, 15000 /* 1T */, 15000 /* 1U */, 15000 /* 1V */, 15000 /* 1W */, 15000 /* 1X */, 15000 /* 1Y */, 15000 /* 1Z */, 15000 /* 1[ */, 15000 /* 1\ */, 15000 /* 1] */, 15000 /* 1^ */, 15000 /* 1_ */, 15000 /* 1` */, 15000 /* 1a */, 15000 /* 1b */, 15000 /* 1c */, 15000 /* 1d */, 15000 /* 1e */, 15000 /* 1f */, 15000 /* 1g */, 15000 /* 1h */, 15000 /* 1i */, 15000 /* 1j */, 15000 /* 1k */, 15000 /* 1l */, 15000 /* 1m */, 15000 /* 1n */, 15000 /* 1o */, 15000 /* 1p */, 15000 /* 1q */, 15000 /* 1r */, 15000 /* 1s */, 15000 /* 1t */, 15000 /* 1u */, 15000 /* 1v */, 15000 /* 1w */, 15000 /* 1x */, 15000 /* 1y */, 15000 /* 1z */, 15000 /* 1{ */, 15000 /* 1| */, 15000 /* 1} */, 15000 /* 1~ */}, - {15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2! */, 15000 /* 2" */, 15000 /* 2# */, 15000 /* 2$ */, 15000 /* 2% */, 15000 /* 2& */, 15000 /* 2' */, 15000 /* 2( */, 15000 /* 2) */, 15000 /* 2* */, 15000 /* 2+ */, 15000 /* 2, */, 15000 /* 2- */, 15000 /* 2. */, 15000 /* 2/ */, 15000 /* 20 */, 15000 /* 21 */, 15000 /* 22 */, 15000 /* 23 */, 15000 /* 24 */, 15000 /* 25 */, 15000 /* 26 */, 15000 /* 27 */, 15000 /* 28 */, 15000 /* 29 */, 15000 /* 2: */, 15000 /* 2; */, 15000 /* 2< */, 15000 /* 2= */, 15000 /* 2> */, 15000 /* 2? */, 15000 /* 2@ */, 15000 /* 2A */, 15000 /* 2B */, 15000 /* 2C */, 15000 /* 2D */, 15000 /* 2E */, 15000 /* 2F */, 15000 /* 2G */, 15000 /* 2H */, 15000 /* 2I */, 15000 /* 2J */, 15000 /* 2K */, 15000 /* 2L */, 15000 /* 2M */, 15000 /* 2N */, 15000 /* 2O */, 15000 /* 2P */, 15000 /* 2Q */, 15000 /* 2R */, 15000 /* 2S */, 15000 /* 2T */, 15000 /* 2U */, 15000 /* 2V */, 15000 /* 2W */, 15000 /* 2X */, 15000 /* 2Y */, 15000 /* 2Z */, 15000 /* 2[ */, 15000 /* 2\ */, 15000 /* 2] */, 15000 /* 2^ */, 15000 /* 2_ */, 15000 /* 2` */, 15000 /* 2a */, 15000 /* 2b */, 15000 /* 2c */, 15000 /* 2d */, 15000 /* 2e */, 15000 /* 2f */, 15000 /* 2g */, 15000 /* 2h */, 15000 /* 2i */, 15000 /* 2j */, 15000 /* 2k */, 15000 /* 2l */, 15000 /* 2m */, 15000 /* 2n */, 15000 /* 2o */, 15000 /* 2p */, 15000 /* 2q */, 15000 /* 2r */, 15000 /* 2s */, 15000 /* 2t */, 15000 /* 2u */, 15000 /* 2v */, 15000 /* 2w */, 15000 /* 2x */, 15000 /* 2y */, 15000 /* 2z */, 15000 /* 2{ */, 15000 /* 2| */, 15000 /* 2} */, 15000 /* 2~ */}, - {15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3! */, 15000 /* 3" */, 15000 /* 3# */, 15000 /* 3$ */, 15000 /* 3% */, 15000 /* 3& */, 15000 /* 3' */, 15000 /* 3( */, 15000 /* 3) */, 15000 /* 3* */, 15000 /* 3+ */, 15000 /* 3, */, 15000 /* 3- */, 15000 /* 3. */, 15000 /* 3/ */, 15000 /* 30 */, 15000 /* 31 */, 15000 /* 32 */, 15000 /* 33 */, 15000 /* 34 */, 15000 /* 35 */, 15000 /* 36 */, 15000 /* 37 */, 15000 /* 38 */, 15000 /* 39 */, 15000 /* 3: */, 15000 /* 3; */, 15000 /* 3< */, 15000 /* 3= */, 15000 /* 3> */, 15000 /* 3? */, 15000 /* 3@ */, 15000 /* 3A */, 15000 /* 3B */, 15000 /* 3C */, 15000 /* 3D */, 15000 /* 3E */, 15000 /* 3F */, 15000 /* 3G */, 15000 /* 3H */, 15000 /* 3I */, 15000 /* 3J */, 15000 /* 3K */, 15000 /* 3L */, 15000 /* 3M */, 15000 /* 3N */, 15000 /* 3O */, 15000 /* 3P */, 15000 /* 3Q */, 15000 /* 3R */, 15000 /* 3S */, 15000 /* 3T */, 15000 /* 3U */, 15000 /* 3V */, 15000 /* 3W */, 15000 /* 3X */, 15000 /* 3Y */, 15000 /* 3Z */, 15000 /* 3[ */, 15000 /* 3\ */, 15000 /* 3] */, 15000 /* 3^ */, 15000 /* 3_ */, 15000 /* 3` */, 15000 /* 3a */, 15000 /* 3b */, 15000 /* 3c */, 15000 /* 3d */, 15000 /* 3e */, 15000 /* 3f */, 15000 /* 3g */, 15000 /* 3h */, 15000 /* 3i */, 15000 /* 3j */, 15000 /* 3k */, 15000 /* 3l */, 15000 /* 3m */, 15000 /* 3n */, 15000 /* 3o */, 15000 /* 3p */, 15000 /* 3q */, 15000 /* 3r */, 15000 /* 3s */, 15000 /* 3t */, 15000 /* 3u */, 15000 /* 3v */, 15000 /* 3w */, 15000 /* 3x */, 15000 /* 3y */, 15000 /* 3z */, 15000 /* 3{ */, 15000 /* 3| */, 15000 /* 3} */, 15000 /* 3~ */}, - {15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4! */, 15000 /* 4" */, 15000 /* 4# */, 15000 /* 4$ */, 15000 /* 4% */, 15000 /* 4& */, 15000 /* 4' */, 15000 /* 4( */, 15000 /* 4) */, 15000 /* 4* */, 15000 /* 4+ */, 15000 /* 4, */, 15000 /* 4- */, 15000 /* 4. */, 15000 /* 4/ */, 15000 /* 40 */, 15000 /* 41 */, 15000 /* 42 */, 15000 /* 43 */, 15000 /* 44 */, 15000 /* 45 */, 15000 /* 46 */, 15000 /* 47 */, 15000 /* 48 */, 15000 /* 49 */, 15000 /* 4: */, 15000 /* 4; */, 15000 /* 4< */, 15000 /* 4= */, 15000 /* 4> */, 15000 /* 4? */, 15000 /* 4@ */, 15000 /* 4A */, 15000 /* 4B */, 15000 /* 4C */, 15000 /* 4D */, 15000 /* 4E */, 15000 /* 4F */, 15000 /* 4G */, 15000 /* 4H */, 15000 /* 4I */, 15000 /* 4J */, 15000 /* 4K */, 15000 /* 4L */, 15000 /* 4M */, 15000 /* 4N */, 15000 /* 4O */, 15000 /* 4P */, 15000 /* 4Q */, 15000 /* 4R */, 15000 /* 4S */, 15000 /* 4T */, 15000 /* 4U */, 15000 /* 4V */, 15000 /* 4W */, 15000 /* 4X */, 15000 /* 4Y */, 15000 /* 4Z */, 15000 /* 4[ */, 15000 /* 4\ */, 15000 /* 4] */, 15000 /* 4^ */, 15000 /* 4_ */, 15000 /* 4` */, 15000 /* 4a */, 15000 /* 4b */, 15000 /* 4c */, 15000 /* 4d */, 15000 /* 4e */, 15000 /* 4f */, 15000 /* 4g */, 15000 /* 4h */, 15000 /* 4i */, 15000 /* 4j */, 15000 /* 4k */, 15000 /* 4l */, 15000 /* 4m */, 15000 /* 4n */, 15000 /* 4o */, 15000 /* 4p */, 15000 /* 4q */, 15000 /* 4r */, 15000 /* 4s */, 15000 /* 4t */, 15000 /* 4u */, 15000 /* 4v */, 15000 /* 4w */, 15000 /* 4x */, 15000 /* 4y */, 15000 /* 4z */, 15000 /* 4{ */, 15000 /* 4| */, 15000 /* 4} */, 15000 /* 4~ */}, - {15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5! */, 15000 /* 5" */, 15000 /* 5# */, 15000 /* 5$ */, 15000 /* 5% */, 15000 /* 5& */, 15000 /* 5' */, 15000 /* 5( */, 15000 /* 5) */, 15000 /* 5* */, 15000 /* 5+ */, 15000 /* 5, */, 15000 /* 5- */, 15000 /* 5. */, 15000 /* 5/ */, 15000 /* 50 */, 15000 /* 51 */, 15000 /* 52 */, 15000 /* 53 */, 15000 /* 54 */, 15000 /* 55 */, 15000 /* 56 */, 15000 /* 57 */, 15000 /* 58 */, 15000 /* 59 */, 15000 /* 5: */, 15000 /* 5; */, 15000 /* 5< */, 15000 /* 5= */, 15000 /* 5> */, 15000 /* 5? */, 15000 /* 5@ */, 15000 /* 5A */, 15000 /* 5B */, 15000 /* 5C */, 15000 /* 5D */, 15000 /* 5E */, 15000 /* 5F */, 15000 /* 5G */, 15000 /* 5H */, 15000 /* 5I */, 15000 /* 5J */, 15000 /* 5K */, 15000 /* 5L */, 15000 /* 5M */, 15000 /* 5N */, 15000 /* 5O */, 15000 /* 5P */, 15000 /* 5Q */, 15000 /* 5R */, 15000 /* 5S */, 15000 /* 5T */, 15000 /* 5U */, 15000 /* 5V */, 15000 /* 5W */, 15000 /* 5X */, 15000 /* 5Y */, 15000 /* 5Z */, 15000 /* 5[ */, 15000 /* 5\ */, 15000 /* 5] */, 15000 /* 5^ */, 15000 /* 5_ */, 15000 /* 5` */, 15000 /* 5a */, 15000 /* 5b */, 15000 /* 5c */, 15000 /* 5d */, 15000 /* 5e */, 15000 /* 5f */, 15000 /* 5g */, 15000 /* 5h */, 15000 /* 5i */, 15000 /* 5j */, 15000 /* 5k */, 15000 /* 5l */, 15000 /* 5m */, 15000 /* 5n */, 15000 /* 5o */, 15000 /* 5p */, 15000 /* 5q */, 15000 /* 5r */, 15000 /* 5s */, 15000 /* 5t */, 15000 /* 5u */, 15000 /* 5v */, 15000 /* 5w */, 15000 /* 5x */, 15000 /* 5y */, 15000 /* 5z */, 15000 /* 5{ */, 15000 /* 5| */, 15000 /* 5} */, 15000 /* 5~ */}, - {15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6! */, 15000 /* 6" */, 15000 /* 6# */, 15000 /* 6$ */, 15000 /* 6% */, 15000 /* 6& */, 15000 /* 6' */, 15000 /* 6( */, 15000 /* 6) */, 15000 /* 6* */, 15000 /* 6+ */, 15000 /* 6, */, 15000 /* 6- */, 15000 /* 6. */, 15000 /* 6/ */, 15000 /* 60 */, 15000 /* 61 */, 15000 /* 62 */, 15000 /* 63 */, 15000 /* 64 */, 15000 /* 65 */, 15000 /* 66 */, 15000 /* 67 */, 15000 /* 68 */, 15000 /* 69 */, 15000 /* 6: */, 15000 /* 6; */, 15000 /* 6< */, 15000 /* 6= */, 15000 /* 6> */, 15000 /* 6? */, 15000 /* 6@ */, 15000 /* 6A */, 15000 /* 6B */, 15000 /* 6C */, 15000 /* 6D */, 15000 /* 6E */, 15000 /* 6F */, 15000 /* 6G */, 15000 /* 6H */, 15000 /* 6I */, 15000 /* 6J */, 15000 /* 6K */, 15000 /* 6L */, 15000 /* 6M */, 15000 /* 6N */, 15000 /* 6O */, 15000 /* 6P */, 15000 /* 6Q */, 15000 /* 6R */, 15000 /* 6S */, 15000 /* 6T */, 15000 /* 6U */, 15000 /* 6V */, 15000 /* 6W */, 15000 /* 6X */, 15000 /* 6Y */, 15000 /* 6Z */, 15000 /* 6[ */, 15000 /* 6\ */, 15000 /* 6] */, 15000 /* 6^ */, 15000 /* 6_ */, 15000 /* 6` */, 15000 /* 6a */, 15000 /* 6b */, 15000 /* 6c */, 15000 /* 6d */, 15000 /* 6e */, 15000 /* 6f */, 15000 /* 6g */, 15000 /* 6h */, 15000 /* 6i */, 15000 /* 6j */, 15000 /* 6k */, 15000 /* 6l */, 15000 /* 6m */, 15000 /* 6n */, 15000 /* 6o */, 15000 /* 6p */, 15000 /* 6q */, 15000 /* 6r */, 15000 /* 6s */, 15000 /* 6t */, 15000 /* 6u */, 15000 /* 6v */, 15000 /* 6w */, 15000 /* 6x */, 15000 /* 6y */, 15000 /* 6z */, 15000 /* 6{ */, 15000 /* 6| */, 15000 /* 6} */, 15000 /* 6~ */}, - {15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7! */, 15000 /* 7" */, 15000 /* 7# */, 15000 /* 7$ */, 15000 /* 7% */, 15000 /* 7& */, 15000 /* 7' */, 15000 /* 7( */, 15000 /* 7) */, 15000 /* 7* */, 15000 /* 7+ */, 15000 /* 7, */, 15000 /* 7- */, 15000 /* 7. */, 15000 /* 7/ */, 15000 /* 70 */, 15000 /* 71 */, 15000 /* 72 */, 15000 /* 73 */, 15000 /* 74 */, 15000 /* 75 */, 15000 /* 76 */, 15000 /* 77 */, 15000 /* 78 */, 15000 /* 79 */, 15000 /* 7: */, 15000 /* 7; */, 15000 /* 7< */, 15000 /* 7= */, 15000 /* 7> */, 15000 /* 7? */, 15000 /* 7@ */, 15000 /* 7A */, 15000 /* 7B */, 15000 /* 7C */, 15000 /* 7D */, 15000 /* 7E */, 15000 /* 7F */, 15000 /* 7G */, 15000 /* 7H */, 15000 /* 7I */, 15000 /* 7J */, 15000 /* 7K */, 15000 /* 7L */, 15000 /* 7M */, 15000 /* 7N */, 15000 /* 7O */, 15000 /* 7P */, 15000 /* 7Q */, 15000 /* 7R */, 15000 /* 7S */, 15000 /* 7T */, 15000 /* 7U */, 15000 /* 7V */, 15000 /* 7W */, 15000 /* 7X */, 15000 /* 7Y */, 15000 /* 7Z */, 15000 /* 7[ */, 15000 /* 7\ */, 15000 /* 7] */, 15000 /* 7^ */, 15000 /* 7_ */, 15000 /* 7` */, 15000 /* 7a */, 15000 /* 7b */, 15000 /* 7c */, 15000 /* 7d */, 15000 /* 7e */, 15000 /* 7f */, 15000 /* 7g */, 15000 /* 7h */, 15000 /* 7i */, 15000 /* 7j */, 15000 /* 7k */, 15000 /* 7l */, 15000 /* 7m */, 15000 /* 7n */, 15000 /* 7o */, 15000 /* 7p */, 15000 /* 7q */, 15000 /* 7r */, 15000 /* 7s */, 15000 /* 7t */, 15000 /* 7u */, 15000 /* 7v */, 15000 /* 7w */, 15000 /* 7x */, 15000 /* 7y */, 15000 /* 7z */, 15000 /* 7{ */, 15000 /* 7| */, 15000 /* 7} */, 15000 /* 7~ */}, - {15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8! */, 15000 /* 8" */, 15000 /* 8# */, 15000 /* 8$ */, 15000 /* 8% */, 15000 /* 8& */, 15000 /* 8' */, 15000 /* 8( */, 15000 /* 8) */, 15000 /* 8* */, 15000 /* 8+ */, 15000 /* 8, */, 15000 /* 8- */, 15000 /* 8. */, 15000 /* 8/ */, 15000 /* 80 */, 15000 /* 81 */, 15000 /* 82 */, 15000 /* 83 */, 15000 /* 84 */, 15000 /* 85 */, 15000 /* 86 */, 15000 /* 87 */, 15000 /* 88 */, 15000 /* 89 */, 15000 /* 8: */, 15000 /* 8; */, 15000 /* 8< */, 15000 /* 8= */, 15000 /* 8> */, 15000 /* 8? */, 15000 /* 8@ */, 15000 /* 8A */, 15000 /* 8B */, 15000 /* 8C */, 15000 /* 8D */, 15000 /* 8E */, 15000 /* 8F */, 15000 /* 8G */, 15000 /* 8H */, 15000 /* 8I */, 15000 /* 8J */, 15000 /* 8K */, 15000 /* 8L */, 15000 /* 8M */, 15000 /* 8N */, 15000 /* 8O */, 15000 /* 8P */, 15000 /* 8Q */, 15000 /* 8R */, 15000 /* 8S */, 15000 /* 8T */, 15000 /* 8U */, 15000 /* 8V */, 15000 /* 8W */, 15000 /* 8X */, 15000 /* 8Y */, 15000 /* 8Z */, 15000 /* 8[ */, 15000 /* 8\ */, 15000 /* 8] */, 15000 /* 8^ */, 15000 /* 8_ */, 15000 /* 8` */, 15000 /* 8a */, 15000 /* 8b */, 15000 /* 8c */, 15000 /* 8d */, 15000 /* 8e */, 15000 /* 8f */, 15000 /* 8g */, 15000 /* 8h */, 15000 /* 8i */, 15000 /* 8j */, 15000 /* 8k */, 15000 /* 8l */, 15000 /* 8m */, 15000 /* 8n */, 15000 /* 8o */, 15000 /* 8p */, 15000 /* 8q */, 15000 /* 8r */, 15000 /* 8s */, 15000 /* 8t */, 15000 /* 8u */, 15000 /* 8v */, 15000 /* 8w */, 15000 /* 8x */, 15000 /* 8y */, 15000 /* 8z */, 15000 /* 8{ */, 15000 /* 8| */, 15000 /* 8} */, 15000 /* 8~ */}, - {15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9! */, 15000 /* 9" */, 15000 /* 9# */, 15000 /* 9$ */, 15000 /* 9% */, 15000 /* 9& */, 15000 /* 9' */, 15000 /* 9( */, 15000 /* 9) */, 15000 /* 9* */, 15000 /* 9+ */, 15000 /* 9, */, 15000 /* 9- */, 15000 /* 9. */, 15000 /* 9/ */, 15000 /* 90 */, 15000 /* 91 */, 15000 /* 92 */, 15000 /* 93 */, 15000 /* 94 */, 15000 /* 95 */, 15000 /* 96 */, 15000 /* 97 */, 15000 /* 98 */, 15000 /* 99 */, 15000 /* 9: */, 15000 /* 9; */, 15000 /* 9< */, 15000 /* 9= */, 15000 /* 9> */, 15000 /* 9? */, 15000 /* 9@ */, 15000 /* 9A */, 15000 /* 9B */, 15000 /* 9C */, 15000 /* 9D */, 15000 /* 9E */, 15000 /* 9F */, 15000 /* 9G */, 15000 /* 9H */, 15000 /* 9I */, 15000 /* 9J */, 15000 /* 9K */, 15000 /* 9L */, 15000 /* 9M */, 15000 /* 9N */, 15000 /* 9O */, 15000 /* 9P */, 15000 /* 9Q */, 15000 /* 9R */, 15000 /* 9S */, 15000 /* 9T */, 15000 /* 9U */, 15000 /* 9V */, 15000 /* 9W */, 15000 /* 9X */, 15000 /* 9Y */, 15000 /* 9Z */, 15000 /* 9[ */, 15000 /* 9\ */, 15000 /* 9] */, 15000 /* 9^ */, 15000 /* 9_ */, 15000 /* 9` */, 15000 /* 9a */, 15000 /* 9b */, 15000 /* 9c */, 15000 /* 9d */, 15000 /* 9e */, 15000 /* 9f */, 15000 /* 9g */, 15000 /* 9h */, 15000 /* 9i */, 15000 /* 9j */, 15000 /* 9k */, 15000 /* 9l */, 15000 /* 9m */, 15000 /* 9n */, 15000 /* 9o */, 15000 /* 9p */, 15000 /* 9q */, 15000 /* 9r */, 15000 /* 9s */, 15000 /* 9t */, 15000 /* 9u */, 15000 /* 9v */, 15000 /* 9w */, 15000 /* 9x */, 15000 /* 9y */, 15000 /* 9z */, 15000 /* 9{ */, 15000 /* 9| */, 15000 /* 9} */, 15000 /* 9~ */}, - {8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* :! */, 8250 /* :" */, 8250 /* :# */, 8250 /* :$ */, 8250 /* :% */, 8250 /* :& */, 8250 /* :' */, 8250 /* :( */, 8250 /* :) */, 8250 /* :* */, 8250 /* :+ */, 8250 /* :, */, 8250 /* :- */, 8250 /* :. */, 8250 /* :/ */, 8250 /* :0 */, 8250 /* :1 */, 8250 /* :2 */, 8250 /* :3 */, 8250 /* :4 */, 8250 /* :5 */, 8250 /* :6 */, 8250 /* :7 */, 8250 /* :8 */, 8250 /* :9 */, 8250 /* :: */, 8250 /* :; */, 8250 /* :< */, 8250 /* := */, 8250 /* :> */, 8250 /* :? */, 8250 /* :@ */, 8250 /* :A */, 8250 /* :B */, 8250 /* :C */, 8250 /* :D */, 8250 /* :E */, 8250 /* :F */, 8250 /* :G */, 8250 /* :H */, 8250 /* :I */, 8250 /* :J */, 8250 /* :K */, 8250 /* :L */, 8250 /* :M */, 8250 /* :N */, 8250 /* :O */, 8250 /* :P */, 8250 /* :Q */, 8250 /* :R */, 8250 /* :S */, 7250 /* :T */, 8250 /* :U */, 7500 /* :V */, 8250 /* :W */, 7875 /* :X */, 7000 /* :Y */, 8250 /* :Z */, 8250 /* :[ */, 8250 /* :\ */, 8250 /* :] */, 8250 /* :^ */, 8250 /* :_ */, 8250 /* :` */, 8250 /* :a */, 8250 /* :b */, 8250 /* :c */, 8250 /* :d */, 8250 /* :e */, 8250 /* :f */, 8250 /* :g */, 8250 /* :h */, 8250 /* :i */, 8250 /* :j */, 8250 /* :k */, 8250 /* :l */, 8250 /* :m */, 8250 /* :n */, 8250 /* :o */, 8250 /* :p */, 8250 /* :q */, 8250 /* :r */, 8250 /* :s */, 8250 /* :t */, 8250 /* :u */, 8250 /* :v */, 8250 /* :w */, 8250 /* :x */, 8250 /* :y */, 8250 /* :z */, 8250 /* :{ */, 8250 /* :| */, 8250 /* :} */, 8250 /* :~ */}, - {8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ;! */, 8250 /* ;" */, 8250 /* ;# */, 8250 /* ;$ */, 8250 /* ;% */, 8250 /* ;& */, 8250 /* ;' */, 8250 /* ;( */, 8250 /* ;) */, 8250 /* ;* */, 8250 /* ;+ */, 8250 /* ;, */, 8250 /* ;- */, 8250 /* ;. */, 8250 /* ;/ */, 8250 /* ;0 */, 8250 /* ;1 */, 8250 /* ;2 */, 8250 /* ;3 */, 8250 /* ;4 */, 8250 /* ;5 */, 8250 /* ;6 */, 8250 /* ;7 */, 8250 /* ;8 */, 8250 /* ;9 */, 8250 /* ;: */, 8250 /* ;; */, 8250 /* ;< */, 8250 /* ;= */, 8250 /* ;> */, 8250 /* ;? */, 8250 /* ;@ */, 8250 /* ;A */, 8250 /* ;B */, 8250 /* ;C */, 8250 /* ;D */, 8250 /* ;E */, 8250 /* ;F */, 8250 /* ;G */, 8250 /* ;H */, 8250 /* ;I */, 8250 /* ;J */, 8250 /* ;K */, 8250 /* ;L */, 8250 /* ;M */, 8250 /* ;N */, 8250 /* ;O */, 8250 /* ;P */, 8250 /* ;Q */, 8250 /* ;R */, 8250 /* ;S */, 7250 /* ;T */, 8250 /* ;U */, 7500 /* ;V */, 8250 /* ;W */, 7875 /* ;X */, 7000 /* ;Y */, 8250 /* ;Z */, 8250 /* ;[ */, 8250 /* ;\ */, 8250 /* ;] */, 8250 /* ;^ */, 8250 /* ;_ */, 8250 /* ;` */, 8250 /* ;a */, 8250 /* ;b */, 8250 /* ;c */, 8250 /* ;d */, 8250 /* ;e */, 8250 /* ;f */, 8250 /* ;g */, 8250 /* ;h */, 8250 /* ;i */, 8250 /* ;j */, 8250 /* ;k */, 8250 /* ;l */, 8250 /* ;m */, 8250 /* ;n */, 8250 /* ;o */, 8250 /* ;p */, 8250 /* ;q */, 8250 /* ;r */, 8250 /* ;s */, 8250 /* ;t */, 8250 /* ;u */, 8250 /* ;v */, 8250 /* ;w */, 8250 /* ;x */, 8250 /* ;y */, 8250 /* ;z */, 8250 /* ;{ */, 8250 /* ;| */, 8250 /* ;} */, 8250 /* ;~ */}, - {15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* */, 15000 /* */, 15000 /* =? */, 15000 /* =@ */, 15000 /* =A */, 15000 /* =B */, 15000 /* =C */, 15000 /* =D */, 15000 /* =E */, 15000 /* =F */, 15000 /* =G */, 15000 /* =H */, 15000 /* =I */, 15000 /* =J */, 15000 /* =K */, 15000 /* =L */, 15000 /* =M */, 15000 /* =N */, 15000 /* =O */, 15000 /* =P */, 15000 /* =Q */, 15000 /* =R */, 15000 /* =S */, 15000 /* =T */, 15000 /* =U */, 15000 /* =V */, 15000 /* =W */, 15000 /* =X */, 15000 /* =Y */, 15000 /* =Z */, 15000 /* =[ */, 15000 /* =\ */, 15000 /* =] */, 15000 /* =^ */, 15000 /* =_ */, 15000 /* =` */, 15000 /* =a */, 15000 /* =b */, 15000 /* =c */, 15000 /* =d */, 15000 /* =e */, 15000 /* =f */, 15000 /* =g */, 15000 /* =h */, 15000 /* =i */, 15000 /* =j */, 15000 /* =k */, 15000 /* =l */, 15000 /* =m */, 15000 /* =n */, 15000 /* =o */, 15000 /* =p */, 15000 /* =q */, 15000 /* =r */, 15000 /* =s */, 15000 /* =t */, 15000 /* =u */, 15000 /* =v */, 15000 /* =w */, 15000 /* =x */, 15000 /* =y */, 15000 /* =z */, 15000 /* ={ */, 15000 /* =| */, 15000 /* =} */, 15000 /* =~ */}, - {15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* >! */, 15000 /* >" */, 15000 /* ># */, 15000 /* >$ */, 15000 /* >% */, 15000 /* >& */, 15000 /* >' */, 15000 /* >( */, 15000 /* >) */, 15000 /* >* */, 15000 /* >+ */, 15000 /* >, */, 15000 /* >- */, 15000 /* >. */, 15000 /* >/ */, 15000 /* >0 */, 15000 /* >1 */, 15000 /* >2 */, 15000 /* >3 */, 15000 /* >4 */, 15000 /* >5 */, 15000 /* >6 */, 15000 /* >7 */, 15000 /* >8 */, 15000 /* >9 */, 15000 /* >: */, 15000 /* >; */, 15000 /* >< */, 15000 /* >= */, 15000 /* >> */, 15000 /* >? */, 15000 /* >@ */, 15000 /* >A */, 15000 /* >B */, 15000 /* >C */, 15000 /* >D */, 15000 /* >E */, 15000 /* >F */, 15000 /* >G */, 15000 /* >H */, 15000 /* >I */, 15000 /* >J */, 15000 /* >K */, 15000 /* >L */, 15000 /* >M */, 15000 /* >N */, 15000 /* >O */, 15000 /* >P */, 15000 /* >Q */, 15000 /* >R */, 15000 /* >S */, 15000 /* >T */, 15000 /* >U */, 15000 /* >V */, 15000 /* >W */, 15000 /* >X */, 15000 /* >Y */, 15000 /* >Z */, 15000 /* >[ */, 15000 /* >\ */, 15000 /* >] */, 15000 /* >^ */, 15000 /* >_ */, 15000 /* >` */, 15000 /* >a */, 15000 /* >b */, 15000 /* >c */, 15000 /* >d */, 15000 /* >e */, 15000 /* >f */, 15000 /* >g */, 15000 /* >h */, 15000 /* >i */, 15000 /* >j */, 15000 /* >k */, 15000 /* >l */, 15000 /* >m */, 15000 /* >n */, 15000 /* >o */, 15000 /* >p */, 15000 /* >q */, 15000 /* >r */, 15000 /* >s */, 15000 /* >t */, 15000 /* >u */, 15000 /* >v */, 15000 /* >w */, 15000 /* >x */, 15000 /* >y */, 15000 /* >z */, 15000 /* >{ */, 15000 /* >| */, 15000 /* >} */, 15000 /* >~ */}, - {12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ?! */, 12500 /* ?" */, 12500 /* ?# */, 12500 /* ?$ */, 12500 /* ?% */, 12500 /* ?& */, 12500 /* ?' */, 12500 /* ?( */, 12500 /* ?) */, 12500 /* ?* */, 12500 /* ?+ */, 9500 /* ?, */, 12500 /* ?- */, 9500 /* ?. */, 12500 /* ?/ */, 12500 /* ?0 */, 12500 /* ?1 */, 12500 /* ?2 */, 12500 /* ?3 */, 12500 /* ?4 */, 12500 /* ?5 */, 12500 /* ?6 */, 12500 /* ?7 */, 12500 /* ?8 */, 12500 /* ?9 */, 12500 /* ?: */, 12500 /* ?; */, 12500 /* ?< */, 12500 /* ?= */, 12500 /* ?> */, 12500 /* ?? */, 12500 /* ?@ */, 11750 /* ?A */, 12500 /* ?B */, 12500 /* ?C */, 12500 /* ?D */, 12500 /* ?E */, 12500 /* ?F */, 12500 /* ?G */, 12500 /* ?H */, 12500 /* ?I */, 12500 /* ?J */, 12500 /* ?K */, 12500 /* ?L */, 12500 /* ?M */, 12500 /* ?N */, 12500 /* ?O */, 12500 /* ?P */, 12500 /* ?Q */, 12500 /* ?R */, 12500 /* ?S */, 12500 /* ?T */, 12500 /* ?U */, 12750 /* ?V */, 12875 /* ?W */, 12500 /* ?X */, 12500 /* ?Y */, 12500 /* ?Z */, 12500 /* ?[ */, 12500 /* ?\ */, 12500 /* ?] */, 12500 /* ?^ */, 12500 /* ?_ */, 12500 /* ?` */, 12500 /* ?a */, 12500 /* ?b */, 12500 /* ?c */, 12500 /* ?d */, 12500 /* ?e */, 12500 /* ?f */, 12500 /* ?g */, 12500 /* ?h */, 12500 /* ?i */, 12500 /* ?j */, 12500 /* ?k */, 12500 /* ?l */, 12500 /* ?m */, 12500 /* ?n */, 12500 /* ?o */, 12500 /* ?p */, 12500 /* ?q */, 12500 /* ?r */, 12500 /* ?s */, 12500 /* ?t */, 12500 /* ?u */, 12500 /* ?v */, 12500 /* ?w */, 12500 /* ?x */, 12500 /* ?y */, 12500 /* ?z */, 12500 /* ?{ */, 12500 /* ?| */, 12500 /* ?} */, 12500 /* ?~ */}, - {22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @! */, 22575 /* @" */, 22575 /* @# */, 22575 /* @$ */, 22575 /* @% */, 22575 /* @& */, 22575 /* @' */, 22575 /* @( */, 22575 /* @) */, 22575 /* @* */, 22575 /* @+ */, 22575 /* @, */, 22575 /* @- */, 22575 /* @. */, 22575 /* @/ */, 22575 /* @0 */, 22575 /* @1 */, 22575 /* @2 */, 22575 /* @3 */, 22575 /* @4 */, 22575 /* @5 */, 22575 /* @6 */, 22575 /* @7 */, 22575 /* @8 */, 22575 /* @9 */, 22575 /* @: */, 22575 /* @; */, 22575 /* @< */, 22575 /* @= */, 22575 /* @> */, 22575 /* @? */, 22575 /* @@ */, 22075 /* @A */, 22575 /* @B */, 22575 /* @C */, 22575 /* @D */, 22575 /* @E */, 22575 /* @F */, 22575 /* @G */, 22575 /* @H */, 22075 /* @I */, 21825 /* @J */, 22575 /* @K */, 22575 /* @L */, 22575 /* @M */, 22575 /* @N */, 22575 /* @O */, 22575 /* @P */, 22575 /* @Q */, 22575 /* @R */, 22325 /* @S */, 21825 /* @T */, 22575 /* @U */, 21825 /* @V */, 22075 /* @W */, 21575 /* @X */, 21575 /* @Y */, 22200 /* @Z */, 22575 /* @[ */, 22575 /* @\ */, 22575 /* @] */, 22575 /* @^ */, 22575 /* @_ */, 22575 /* @` */, 22575 /* @a */, 22575 /* @b */, 22575 /* @c */, 22575 /* @d */, 22575 /* @e */, 22575 /* @f */, 22450 /* @g */, 22575 /* @h */, 22575 /* @i */, 22575 /* @j */, 22575 /* @k */, 22575 /* @l */, 22575 /* @m */, 22575 /* @n */, 22575 /* @o */, 22575 /* @p */, 22575 /* @q */, 22575 /* @r */, 22575 /* @s */, 22575 /* @t */, 22575 /* @u */, 22575 /* @v */, 22325 /* @w */, 22075 /* @x */, 22325 /* @y */, 22325 /* @z */, 22575 /* @{ */, 22575 /* @| */, 22575 /* @} */, 22575 /* @~ */}, - {17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A! */, 15875 /* A" */, 17125 /* A# */, 17125 /* A$ */, 17125 /* A% */, 16875 /* A& */, 15875 /* A' */, 17125 /* A( */, 17125 /* A) */, 15875 /* A* */, 17125 /* A+ */, 17625 /* A, */, 16750 /* A- */, 17625 /* A. */, 17125 /* A/ */, 17125 /* A0 */, 17125 /* A1 */, 17125 /* A2 */, 17125 /* A3 */, 17125 /* A4 */, 17125 /* A5 */, 17125 /* A6 */, 17125 /* A7 */, 17125 /* A8 */, 17125 /* A9 */, 17125 /* A: */, 17125 /* A; */, 17125 /* A< */, 17125 /* A= */, 17125 /* A> */, 15875 /* A? */, 16625 /* A@ */, 17375 /* AA */, 17125 /* AB */, 16375 /* AC */, 17125 /* AD */, 17125 /* AE */, 17125 /* AF */, 16375 /* AG */, 17125 /* AH */, 17125 /* AI */, 16775 /* AJ */, 17125 /* AK */, 17125 /* AL */, 17125 /* AM */, 17125 /* AN */, 16375 /* AO */, 17125 /* AP */, 16375 /* AQ */, 17125 /* AR */, 17125 /* AS */, 15500 /* AT */, 16525 /* AU */, 15775 /* AV */, 16450 /* AW */, 17250 /* AX */, 15375 /* AY */, 17125 /* AZ */, 17125 /* A[ */, 17125 /* A\ */, 17125 /* A] */, 17125 /* A^ */, 17500 /* A_ */, 17125 /* A` */, 17200 /* Aa */, 17125 /* Ab */, 17125 /* Ac */, 17125 /* Ad */, 17125 /* Ae */, 17125 /* Af */, 17125 /* Ag */, 17125 /* Ah */, 17125 /* Ai */, 17125 /* Aj */, 17125 /* Ak */, 17125 /* Al */, 17125 /* Am */, 17125 /* An */, 17125 /* Ao */, 17125 /* Ap */, 17125 /* Aq */, 17125 /* Ar */, 17125 /* As */, 16650 /* At */, 17125 /* Au */, 16375 /* Av */, 16625 /* Aw */, 17125 /* Ax */, 16750 /* Ay */, 17375 /* Az */, 17125 /* A{ */, 17125 /* A| */, 17125 /* A} */, 17125 /* A~ */}, - {16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B! */, 16675 /* B" */, 16675 /* B# */, 16675 /* B$ */, 16675 /* B% */, 16675 /* B& */, 16675 /* B' */, 16675 /* B( */, 16675 /* B) */, 16675 /* B* */, 16675 /* B+ */, 16300 /* B, */, 16675 /* B- */, 16300 /* B. */, 16050 /* B/ */, 16675 /* B0 */, 16675 /* B1 */, 16675 /* B2 */, 16675 /* B3 */, 16675 /* B4 */, 16675 /* B5 */, 16675 /* B6 */, 16675 /* B7 */, 16675 /* B8 */, 16675 /* B9 */, 16675 /* B: */, 16675 /* B; */, 16675 /* B< */, 16675 /* B= */, 16675 /* B> */, 16675 /* B? */, 16675 /* B@ */, 16300 /* BA */, 16675 /* BB */, 16675 /* BC */, 16675 /* BD */, 16675 /* BE */, 16675 /* BF */, 16675 /* BG */, 16675 /* BH */, 16675 /* BI */, 16425 /* BJ */, 16675 /* BK */, 16675 /* BL */, 16675 /* BM */, 16675 /* BN */, 16675 /* BO */, 16675 /* BP */, 16675 /* BQ */, 16675 /* BR */, 16675 /* BS */, 16025 /* BT */, 16675 /* BU */, 15975 /* BV */, 16475 /* BW */, 16150 /* BX */, 15600 /* BY */, 16675 /* BZ */, 16675 /* B[ */, 16675 /* B\ */, 16675 /* B] */, 16675 /* B^ */, 15075 /* B_ */, 16675 /* B` */, 16675 /* Ba */, 16675 /* Bb */, 16675 /* Bc */, 16675 /* Bd */, 16675 /* Be */, 16625 /* Bf */, 16600 /* Bg */, 16675 /* Bh */, 16675 /* Bi */, 16675 /* Bj */, 16675 /* Bk */, 16675 /* Bl */, 16675 /* Bm */, 16675 /* Bn */, 16675 /* Bo */, 16675 /* Bp */, 16675 /* Bq */, 16675 /* Br */, 16675 /* Bs */, 16550 /* Bt */, 16675 /* Bu */, 16475 /* Bv */, 16500 /* Bw */, 16375 /* Bx */, 16475 /* By */, 16675 /* Bz */, 16675 /* B{ */, 16675 /* B| */, 16675 /* B} */, 16675 /* B~ */}, - {16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C! */, 16275 /* C" */, 16275 /* C# */, 16275 /* C$ */, 16275 /* C% */, 16025 /* C& */, 16275 /* C' */, 16275 /* C( */, 16275 /* C) */, 16275 /* C* */, 16275 /* C+ */, 16275 /* C, */, 16025 /* C- */, 16275 /* C. */, 15775 /* C/ */, 16275 /* C0 */, 16275 /* C1 */, 16275 /* C2 */, 16275 /* C3 */, 16275 /* C4 */, 16275 /* C5 */, 16275 /* C6 */, 16275 /* C7 */, 16275 /* C8 */, 16275 /* C9 */, 16275 /* C: */, 16275 /* C; */, 16275 /* C< */, 16275 /* C= */, 16275 /* C> */, 16275 /* C? */, 16025 /* C@ */, 16075 /* CA */, 16275 /* CB */, 15950 /* CC */, 16275 /* CD */, 16275 /* CE */, 16275 /* CF */, 15950 /* CG */, 16275 /* CH */, 16275 /* CI */, 16275 /* CJ */, 16275 /* CK */, 16275 /* CL */, 16275 /* CM */, 16275 /* CN */, 15950 /* CO */, 16275 /* CP */, 15950 /* CQ */, 16275 /* CR */, 16275 /* CS */, 16275 /* CT */, 16275 /* CU */, 16275 /* CV */, 16275 /* CW */, 15775 /* CX */, 16025 /* CY */, 16275 /* CZ */, 16275 /* C[ */, 16275 /* C\ */, 16275 /* C] */, 16275 /* C^ */, 15175 /* C_ */, 16275 /* C` */, 16275 /* Ca */, 16275 /* Cb */, 16275 /* Cc */, 16275 /* Cd */, 16275 /* Ce */, 16275 /* Cf */, 16275 /* Cg */, 16275 /* Ch */, 16275 /* Ci */, 16275 /* Cj */, 16275 /* Ck */, 16275 /* Cl */, 16275 /* Cm */, 16275 /* Cn */, 16275 /* Co */, 16275 /* Cp */, 16275 /* Cq */, 16275 /* Cr */, 16275 /* Cs */, 16275 /* Ct */, 16275 /* Cu */, 16275 /* Cv */, 16275 /* Cw */, 16275 /* Cx */, 16275 /* Cy */, 16275 /* Cz */, 16275 /* C{ */, 16275 /* C| */, 16275 /* C} */, 16275 /* C~ */}, - {17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D! */, 17175 /* D" */, 17425 /* D# */, 17425 /* D$ */, 17425 /* D% */, 17425 /* D& */, 17175 /* D' */, 17425 /* D( */, 17425 /* D) */, 17175 /* D* */, 17425 /* D+ */, 16425 /* D, */, 17800 /* D- */, 16425 /* D. */, 16300 /* D/ */, 17425 /* D0 */, 17425 /* D1 */, 17425 /* D2 */, 17425 /* D3 */, 17425 /* D4 */, 17425 /* D5 */, 17425 /* D6 */, 17425 /* D7 */, 17425 /* D8 */, 17425 /* D9 */, 17425 /* D: */, 17425 /* D; */, 17425 /* D< */, 17425 /* D= */, 17425 /* D> */, 17425 /* D? */, 17425 /* D@ */, 16500 /* DA */, 17425 /* DB */, 17425 /* DC */, 17425 /* DD */, 17425 /* DE */, 17425 /* DF */, 17425 /* DG */, 17425 /* DH */, 17425 /* DI */, 16675 /* DJ */, 17425 /* DK */, 17425 /* DL */, 17425 /* DM */, 17425 /* DN */, 17425 /* DO */, 17425 /* DP */, 17425 /* DQ */, 17425 /* DR */, 17425 /* DS */, 16800 /* DT */, 17425 /* DU */, 16650 /* DV */, 16850 /* DW */, 16675 /* DX */, 16275 /* DY */, 16800 /* DZ */, 17425 /* D[ */, 17425 /* D\ */, 17425 /* D] */, 17425 /* D^ */, 17425 /* D_ */, 17425 /* D` */, 17425 /* Da */, 17425 /* Db */, 17425 /* Dc */, 17425 /* Dd */, 17425 /* De */, 17425 /* Df */, 17425 /* Dg */, 17425 /* Dh */, 17425 /* Di */, 17425 /* Dj */, 17425 /* Dk */, 17425 /* Dl */, 17425 /* Dm */, 17425 /* Dn */, 17425 /* Do */, 17425 /* Dp */, 17425 /* Dq */, 17425 /* Dr */, 17425 /* Ds */, 17425 /* Dt */, 17425 /* Du */, 17425 /* Dv */, 17425 /* Dw */, 17425 /* Dx */, 17425 /* Dy */, 17425 /* Dz */, 17425 /* D{ */, 17425 /* D| */, 17425 /* D} */, 17425 /* D~ */}, - {15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E! */, 15175 /* E" */, 15175 /* E# */, 15175 /* E$ */, 15175 /* E% */, 14800 /* E& */, 15175 /* E' */, 15175 /* E( */, 15175 /* E) */, 15175 /* E* */, 15175 /* E+ */, 15175 /* E, */, 15175 /* E- */, 15175 /* E. */, 15175 /* E/ */, 15175 /* E0 */, 15175 /* E1 */, 15175 /* E2 */, 15175 /* E3 */, 15175 /* E4 */, 15175 /* E5 */, 15175 /* E6 */, 15175 /* E7 */, 15175 /* E8 */, 15175 /* E9 */, 15175 /* E: */, 15175 /* E; */, 15175 /* E< */, 15175 /* E= */, 15175 /* E> */, 15175 /* E? */, 14925 /* E@ */, 15175 /* EA */, 15175 /* EB */, 15175 /* EC */, 15175 /* ED */, 15175 /* EE */, 15175 /* EF */, 15175 /* EG */, 15175 /* EH */, 15175 /* EI */, 14675 /* EJ */, 15175 /* EK */, 15175 /* EL */, 15175 /* EM */, 15175 /* EN */, 15175 /* EO */, 15175 /* EP */, 15175 /* EQ */, 15175 /* ER */, 15175 /* ES */, 15175 /* ET */, 15175 /* EU */, 15175 /* EV */, 15175 /* EW */, 15175 /* EX */, 15175 /* EY */, 15175 /* EZ */, 15175 /* E[ */, 15175 /* E\ */, 15175 /* E] */, 15175 /* E^ */, 15175 /* E_ */, 15175 /* E` */, 15175 /* Ea */, 15175 /* Eb */, 14925 /* Ec */, 14925 /* Ed */, 14925 /* Ee */, 15175 /* Ef */, 15050 /* Eg */, 15175 /* Eh */, 15175 /* Ei */, 15175 /* Ej */, 15175 /* Ek */, 15175 /* El */, 15175 /* Em */, 15175 /* En */, 14925 /* Eo */, 15175 /* Ep */, 14925 /* Eq */, 15175 /* Er */, 15175 /* Es */, 15175 /* Et */, 15175 /* Eu */, 15175 /* Ev */, 15175 /* Ew */, 15175 /* Ex */, 15175 /* Ey */, 15175 /* Ez */, 15175 /* E{ */, 15175 /* E| */, 15175 /* E} */, 15175 /* E~ */}, - {14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F! */, 14750 /* F" */, 14625 /* F# */, 14625 /* F$ */, 14625 /* F% */, 13625 /* F& */, 14750 /* F' */, 14625 /* F( */, 15000 /* F) */, 14750 /* F* */, 14625 /* F+ */, 12125 /* F, */, 14625 /* F- */, 12125 /* F. */, 12500 /* F/ */, 14625 /* F0 */, 14625 /* F1 */, 14625 /* F2 */, 14625 /* F3 */, 14625 /* F4 */, 14625 /* F5 */, 14625 /* F6 */, 14625 /* F7 */, 14625 /* F8 */, 14625 /* F9 */, 14625 /* F: */, 14625 /* F; */, 14625 /* F< */, 14625 /* F= */, 14625 /* F> */, 14625 /* F? */, 13875 /* F@ */, 12450 /* FA */, 14625 /* FB */, 13875 /* FC */, 14625 /* FD */, 14625 /* FE */, 14625 /* FF */, 13875 /* FG */, 14625 /* FH */, 14625 /* FI */, 12750 /* FJ */, 14625 /* FK */, 14625 /* FL */, 14625 /* FM */, 14625 /* FN */, 13875 /* FO */, 14625 /* FP */, 13875 /* FQ */, 14625 /* FR */, 14625 /* FS */, 14625 /* FT */, 14625 /* FU */, 14625 /* FV */, 14625 /* FW */, 14625 /* FX */, 14625 /* FY */, 14625 /* FZ */, 14625 /* F[ */, 14625 /* F\ */, 14625 /* F] */, 14625 /* F^ */, 11125 /* F_ */, 14625 /* F` */, 13625 /* Fa */, 14625 /* Fb */, 13825 /* Fc */, 13775 /* Fd */, 13825 /* Fe */, 14450 /* Ff */, 13525 /* Fg */, 14625 /* Fh */, 14625 /* Fi */, 14375 /* Fj */, 14625 /* Fk */, 14625 /* Fl */, 13875 /* Fm */, 13875 /* Fn */, 13825 /* Fo */, 13875 /* Fp */, 13775 /* Fq */, 13875 /* Fr */, 13625 /* Fs */, 14250 /* Ft */, 14000 /* Fu */, 14125 /* Fv */, 14250 /* Fw */, 13625 /* Fx */, 14125 /* Fy */, 14000 /* Fz */, 14625 /* F{ */, 14625 /* F| */, 14625 /* F} */, 14625 /* F~ */}, - {17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G! */, 17725 /* G" */, 17975 /* G# */, 17975 /* G$ */, 17975 /* G% */, 17975 /* G& */, 17725 /* G' */, 17975 /* G( */, 17975 /* G) */, 17725 /* G* */, 17975 /* G+ */, 17975 /* G, */, 18225 /* G- */, 17975 /* G. */, 17975 /* G/ */, 17975 /* G0 */, 17975 /* G1 */, 17975 /* G2 */, 17975 /* G3 */, 17975 /* G4 */, 17975 /* G5 */, 17975 /* G6 */, 17975 /* G7 */, 17975 /* G8 */, 17975 /* G9 */, 17975 /* G: */, 17975 /* G; */, 17975 /* G< */, 17975 /* G= */, 17975 /* G> */, 17975 /* G? */, 17975 /* G@ */, 17975 /* GA */, 17975 /* GB */, 17975 /* GC */, 17975 /* GD */, 17975 /* GE */, 17975 /* GF */, 17975 /* GG */, 17975 /* GH */, 17975 /* GI */, 17675 /* GJ */, 17975 /* GK */, 17975 /* GL */, 17975 /* GM */, 17975 /* GN */, 17975 /* GO */, 17975 /* GP */, 17975 /* GQ */, 17975 /* GR */, 17975 /* GS */, 17025 /* GT */, 17975 /* GU */, 17225 /* GV */, 17650 /* GW */, 17725 /* GX */, 16875 /* GY */, 17975 /* GZ */, 17975 /* G[ */, 17975 /* G\ */, 17975 /* G] */, 17975 /* G^ */, 17975 /* G_ */, 17975 /* G` */, 17975 /* Ga */, 17975 /* Gb */, 17975 /* Gc */, 17975 /* Gd */, 17975 /* Ge */, 17975 /* Gf */, 17975 /* Gg */, 17975 /* Gh */, 17975 /* Gi */, 17975 /* Gj */, 17975 /* Gk */, 17975 /* Gl */, 17975 /* Gm */, 17975 /* Gn */, 17975 /* Go */, 17975 /* Gp */, 17975 /* Gq */, 17975 /* Gr */, 17975 /* Gs */, 17975 /* Gt */, 17975 /* Gu */, 17975 /* Gv */, 17975 /* Gw */, 17975 /* Gx */, 17975 /* Gy */, 17975 /* Gz */, 17975 /* G{ */, 17975 /* G| */, 17975 /* G} */, 17975 /* G~ */}, - {18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H! */, 18100 /* H" */, 18100 /* H# */, 18100 /* H$ */, 18100 /* H% */, 18100 /* H& */, 18100 /* H' */, 18100 /* H( */, 18100 /* H) */, 18100 /* H* */, 18100 /* H+ */, 18100 /* H, */, 18100 /* H- */, 18100 /* H. */, 18100 /* H/ */, 18100 /* H0 */, 18100 /* H1 */, 18100 /* H2 */, 18100 /* H3 */, 18100 /* H4 */, 18100 /* H5 */, 18100 /* H6 */, 18100 /* H7 */, 18100 /* H8 */, 18100 /* H9 */, 18100 /* H: */, 18100 /* H; */, 18100 /* H< */, 18100 /* H= */, 18100 /* H> */, 18100 /* H? */, 18100 /* H@ */, 18100 /* HA */, 18100 /* HB */, 18100 /* HC */, 18100 /* HD */, 18100 /* HE */, 18100 /* HF */, 18100 /* HG */, 18100 /* HH */, 18100 /* HI */, 18100 /* HJ */, 18100 /* HK */, 18100 /* HL */, 18100 /* HM */, 18100 /* HN */, 18100 /* HO */, 18100 /* HP */, 18100 /* HQ */, 18100 /* HR */, 18100 /* HS */, 18100 /* HT */, 18100 /* HU */, 18100 /* HV */, 18100 /* HW */, 18100 /* HX */, 18100 /* HY */, 18100 /* HZ */, 18100 /* H[ */, 18100 /* H\ */, 18100 /* H] */, 18100 /* H^ */, 18100 /* H_ */, 18100 /* H` */, 18100 /* Ha */, 18100 /* Hb */, 18100 /* Hc */, 18100 /* Hd */, 18100 /* He */, 18100 /* Hf */, 18100 /* Hg */, 18100 /* Hh */, 18100 /* Hi */, 18100 /* Hj */, 18100 /* Hk */, 18100 /* Hl */, 18100 /* Hm */, 18100 /* Hn */, 18100 /* Ho */, 18100 /* Hp */, 18100 /* Hq */, 18100 /* Hr */, 18100 /* Hs */, 18100 /* Ht */, 18100 /* Hu */, 18100 /* Hv */, 18100 /* Hw */, 18100 /* Hx */, 18100 /* Hy */, 18100 /* Hz */, 18100 /* H{ */, 18100 /* H| */, 18100 /* H} */, 18100 /* H~ */}, - {10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I! */, 10800 /* I" */, 10800 /* I# */, 10800 /* I$ */, 10800 /* I% */, 10800 /* I& */, 10800 /* I' */, 10800 /* I( */, 10800 /* I) */, 10800 /* I* */, 10800 /* I+ */, 10800 /* I, */, 10050 /* I- */, 10800 /* I. */, 10800 /* I/ */, 10800 /* I0 */, 10800 /* I1 */, 10800 /* I2 */, 10800 /* I3 */, 10800 /* I4 */, 10800 /* I5 */, 10800 /* I6 */, 10800 /* I7 */, 10800 /* I8 */, 10800 /* I9 */, 10800 /* I: */, 10800 /* I; */, 10800 /* I< */, 10800 /* I= */, 10800 /* I> */, 10800 /* I? */, 10300 /* I@ */, 10800 /* IA */, 10800 /* IB */, 10800 /* IC */, 10800 /* ID */, 10800 /* IE */, 10800 /* IF */, 10800 /* IG */, 10800 /* IH */, 10800 /* II */, 10800 /* IJ */, 10800 /* IK */, 10800 /* IL */, 10800 /* IM */, 10800 /* IN */, 10800 /* IO */, 10800 /* IP */, 10800 /* IQ */, 10800 /* IR */, 10800 /* IS */, 10700 /* IT */, 10800 /* IU */, 10800 /* IV */, 10800 /* IW */, 10800 /* IX */, 10800 /* IY */, 10800 /* IZ */, 10800 /* I[ */, 10800 /* I\ */, 10800 /* I] */, 10800 /* I^ */, 10800 /* I_ */, 10800 /* I` */, 10800 /* Ia */, 10800 /* Ib */, 10675 /* Ic */, 10675 /* Id */, 10675 /* Ie */, 10800 /* If */, 10800 /* Ig */, 10800 /* Ih */, 10800 /* Ii */, 10800 /* Ij */, 10800 /* Ik */, 10800 /* Il */, 10800 /* Im */, 10800 /* In */, 10675 /* Io */, 10800 /* Ip */, 10675 /* Iq */, 10800 /* Ir */, 10675 /* Is */, 10800 /* It */, 10800 /* Iu */, 10675 /* Iv */, 10800 /* Iw */, 10800 /* Ix */, 10675 /* Iy */, 10800 /* Iz */, 10800 /* I{ */, 10800 /* I| */, 10800 /* I} */, 10800 /* I~ */}, - {13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J! */, 13975 /* J" */, 13975 /* J# */, 13975 /* J$ */, 13975 /* J% */, 13975 /* J& */, 13975 /* J' */, 13975 /* J( */, 13975 /* J) */, 13975 /* J* */, 13975 /* J+ */, 13350 /* J, */, 13975 /* J- */, 13350 /* J. */, 13100 /* J/ */, 13975 /* J0 */, 13975 /* J1 */, 13975 /* J2 */, 13975 /* J3 */, 13975 /* J4 */, 13975 /* J5 */, 13975 /* J6 */, 13975 /* J7 */, 13975 /* J8 */, 13975 /* J9 */, 13975 /* J: */, 13975 /* J; */, 13975 /* J< */, 13975 /* J= */, 13975 /* J> */, 13975 /* J? */, 13975 /* J@ */, 13450 /* JA */, 13975 /* JB */, 13975 /* JC */, 13975 /* JD */, 13975 /* JE */, 13975 /* JF */, 13975 /* JG */, 13975 /* JH */, 13975 /* JI */, 13675 /* JJ */, 13975 /* JK */, 13975 /* JL */, 13975 /* JM */, 13975 /* JN */, 13975 /* JO */, 13975 /* JP */, 13975 /* JQ */, 13975 /* JR */, 13975 /* JS */, 13975 /* JT */, 13975 /* JU */, 13975 /* JV */, 13975 /* JW */, 13600 /* JX */, 13975 /* JY */, 13975 /* JZ */, 13975 /* J[ */, 13975 /* J\ */, 13975 /* J] */, 13975 /* J^ */, 11850 /* J_ */, 13975 /* J` */, 13975 /* Ja */, 13975 /* Jb */, 13975 /* Jc */, 13975 /* Jd */, 13975 /* Je */, 13975 /* Jf */, 13975 /* Jg */, 13975 /* Jh */, 13975 /* Ji */, 13975 /* Jj */, 13975 /* Jk */, 13975 /* Jl */, 13975 /* Jm */, 13975 /* Jn */, 13975 /* Jo */, 13975 /* Jp */, 13975 /* Jq */, 13975 /* Jr */, 13975 /* Js */, 13975 /* Jt */, 13975 /* Ju */, 13975 /* Jv */, 13975 /* Jw */, 13975 /* Jx */, 13975 /* Jy */, 13975 /* Jz */, 13975 /* J{ */, 13975 /* J| */, 13975 /* J} */, 13975 /* J~ */}, - {17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K! */, 17150 /* K" */, 17400 /* K# */, 17400 /* K$ */, 17400 /* K% */, 16650 /* K& */, 17150 /* K' */, 17400 /* K( */, 17650 /* K) */, 17150 /* K* */, 17400 /* K+ */, 17650 /* K, */, 16275 /* K- */, 17650 /* K. */, 17400 /* K/ */, 17400 /* K0 */, 17400 /* K1 */, 17400 /* K2 */, 17400 /* K3 */, 17400 /* K4 */, 17400 /* K5 */, 17400 /* K6 */, 17400 /* K7 */, 17400 /* K8 */, 17400 /* K9 */, 17400 /* K: */, 17400 /* K; */, 17400 /* K< */, 17400 /* K= */, 17400 /* K> */, 17400 /* K? */, 16400 /* K@ */, 17400 /* KA */, 17400 /* KB */, 16400 /* KC */, 17400 /* KD */, 17400 /* KE */, 17400 /* KF */, 16400 /* KG */, 17400 /* KH */, 17400 /* KI */, 16900 /* KJ */, 17400 /* KK */, 17400 /* KL */, 17400 /* KM */, 17400 /* KN */, 16400 /* KO */, 17400 /* KP */, 16400 /* KQ */, 17400 /* KR */, 17150 /* KS */, 16775 /* KT */, 17400 /* KU */, 16775 /* KV */, 17150 /* KW */, 17400 /* KX */, 16900 /* KY */, 17400 /* KZ */, 17400 /* K[ */, 17400 /* K\ */, 17400 /* K] */, 17400 /* K^ */, 17400 /* K_ */, 17400 /* K` */, 17150 /* Ka */, 17400 /* Kb */, 16775 /* Kc */, 16775 /* Kd */, 16775 /* Ke */, 16900 /* Kf */, 17150 /* Kg */, 17400 /* Kh */, 17400 /* Ki */, 17400 /* Kj */, 17400 /* Kk */, 17150 /* Kl */, 17150 /* Km */, 17150 /* Kn */, 16775 /* Ko */, 17150 /* Kp */, 16775 /* Kq */, 17150 /* Kr */, 17400 /* Ks */, 16650 /* Kt */, 16900 /* Ku */, 16900 /* Kv */, 16900 /* Kw */, 17275 /* Kx */, 16900 /* Ky */, 17650 /* Kz */, 17400 /* K{ */, 17400 /* K| */, 17400 /* K} */, 17400 /* K~ */}, - {13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L! */, 11000 /* L" */, 13250 /* L# */, 13250 /* L$ */, 13250 /* L% */, 13125 /* L& */, 11000 /* L' */, 13250 /* L( */, 13250 /* L) */, 11000 /* L* */, 13250 /* L+ */, 13750 /* L, */, 12375 /* L- */, 13750 /* L. */, 13500 /* L/ */, 13250 /* L0 */, 13250 /* L1 */, 13250 /* L2 */, 13250 /* L3 */, 13250 /* L4 */, 13250 /* L5 */, 13250 /* L6 */, 13250 /* L7 */, 13250 /* L8 */, 13250 /* L9 */, 13250 /* L: */, 13250 /* L; */, 13250 /* L< */, 13250 /* L= */, 13250 /* L> */, 11750 /* L? */, 13000 /* L@ */, 13675 /* LA */, 13250 /* LB */, 12250 /* LC */, 13250 /* LD */, 13250 /* LE */, 13250 /* LF */, 12250 /* LG */, 13250 /* LH */, 13250 /* LI */, 13250 /* LJ */, 13250 /* LK */, 13250 /* LL */, 13250 /* LM */, 13250 /* LN */, 12250 /* LO */, 13250 /* LP */, 12250 /* LQ */, 13250 /* LR */, 13250 /* LS */, 11400 /* LT */, 12425 /* LU */, 11450 /* LV */, 12225 /* LW */, 13250 /* LX */, 11025 /* LY */, 13250 /* LZ */, 13250 /* L[ */, 13250 /* L\ */, 13250 /* L] */, 13250 /* L^ */, 13750 /* L_ */, 13250 /* L` */, 13350 /* La */, 13250 /* Lb */, 13000 /* Lc */, 13025 /* Ld */, 13000 /* Le */, 13250 /* Lf */, 13250 /* Lg */, 13250 /* Lh */, 13250 /* Li */, 13250 /* Lj */, 13250 /* Lk */, 13250 /* Ll */, 13250 /* Lm */, 13250 /* Ln */, 13000 /* Lo */, 13250 /* Lp */, 13025 /* Lq */, 13250 /* Lr */, 13250 /* Ls */, 12750 /* Lt */, 13125 /* Lu */, 12000 /* Lv */, 12500 /* Lw */, 13500 /* Lx */, 12375 /* Ly */, 13525 /* Lz */, 13250 /* L{ */, 13250 /* L| */, 13250 /* L} */, 13250 /* L~ */}, - {20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M! */, 20475 /* M" */, 20475 /* M# */, 20475 /* M$ */, 20475 /* M% */, 20475 /* M& */, 20475 /* M' */, 20475 /* M( */, 20475 /* M) */, 20475 /* M* */, 20475 /* M+ */, 20475 /* M, */, 20475 /* M- */, 20475 /* M. */, 20475 /* M/ */, 20475 /* M0 */, 20475 /* M1 */, 20475 /* M2 */, 20475 /* M3 */, 20475 /* M4 */, 20475 /* M5 */, 20475 /* M6 */, 20475 /* M7 */, 20475 /* M8 */, 20475 /* M9 */, 20475 /* M: */, 20475 /* M; */, 20475 /* M< */, 20475 /* M= */, 20475 /* M> */, 20475 /* M? */, 20475 /* M@ */, 20475 /* MA */, 20475 /* MB */, 20475 /* MC */, 20475 /* MD */, 20475 /* ME */, 20475 /* MF */, 20475 /* MG */, 20475 /* MH */, 20475 /* MI */, 20475 /* MJ */, 20475 /* MK */, 20475 /* ML */, 20475 /* MM */, 20475 /* MN */, 20475 /* MO */, 20475 /* MP */, 20475 /* MQ */, 20475 /* MR */, 20475 /* MS */, 20475 /* MT */, 20475 /* MU */, 20475 /* MV */, 20475 /* MW */, 20475 /* MX */, 20475 /* MY */, 20475 /* MZ */, 20475 /* M[ */, 20475 /* M\ */, 20475 /* M] */, 20475 /* M^ */, 20475 /* M_ */, 20475 /* M` */, 20475 /* Ma */, 20475 /* Mb */, 20475 /* Mc */, 20475 /* Md */, 20475 /* Me */, 20475 /* Mf */, 20475 /* Mg */, 20475 /* Mh */, 20475 /* Mi */, 20475 /* Mj */, 20475 /* Mk */, 20475 /* Ml */, 20475 /* Mm */, 20475 /* Mn */, 20475 /* Mo */, 20475 /* Mp */, 20475 /* Mq */, 20475 /* Mr */, 20475 /* Ms */, 20475 /* Mt */, 20475 /* Mu */, 20475 /* Mv */, 20475 /* Mw */, 20475 /* Mx */, 20475 /* My */, 20475 /* Mz */, 20475 /* M{ */, 20475 /* M| */, 20475 /* M} */, 20475 /* M~ */}, - {18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N! */, 18100 /* N" */, 18100 /* N# */, 18100 /* N$ */, 18100 /* N% */, 18100 /* N& */, 18100 /* N' */, 18100 /* N( */, 18100 /* N) */, 18100 /* N* */, 18100 /* N+ */, 18100 /* N, */, 18100 /* N- */, 18100 /* N. */, 18100 /* N/ */, 18100 /* N0 */, 18100 /* N1 */, 18100 /* N2 */, 18100 /* N3 */, 18100 /* N4 */, 18100 /* N5 */, 18100 /* N6 */, 18100 /* N7 */, 18100 /* N8 */, 18100 /* N9 */, 18100 /* N: */, 18100 /* N; */, 18100 /* N< */, 18100 /* N= */, 18100 /* N> */, 18100 /* N? */, 18100 /* N@ */, 18100 /* NA */, 18100 /* NB */, 18100 /* NC */, 18100 /* ND */, 18100 /* NE */, 18100 /* NF */, 18100 /* NG */, 18100 /* NH */, 18100 /* NI */, 18100 /* NJ */, 18100 /* NK */, 18100 /* NL */, 18100 /* NM */, 18100 /* NN */, 18100 /* NO */, 18100 /* NP */, 18100 /* NQ */, 18100 /* NR */, 18100 /* NS */, 18100 /* NT */, 18100 /* NU */, 18100 /* NV */, 18100 /* NW */, 18100 /* NX */, 18100 /* NY */, 18100 /* NZ */, 18100 /* N[ */, 18100 /* N\ */, 18100 /* N] */, 18100 /* N^ */, 18100 /* N_ */, 18100 /* N` */, 18100 /* Na */, 18100 /* Nb */, 18100 /* Nc */, 18100 /* Nd */, 18100 /* Ne */, 18100 /* Nf */, 18100 /* Ng */, 18100 /* Nh */, 18100 /* Ni */, 18100 /* Nj */, 18100 /* Nk */, 18100 /* Nl */, 18100 /* Nm */, 18100 /* Nn */, 18100 /* No */, 18100 /* Np */, 18100 /* Nq */, 18100 /* Nr */, 18100 /* Ns */, 18100 /* Nt */, 18100 /* Nu */, 18100 /* Nv */, 18100 /* Nw */, 18100 /* Nx */, 18100 /* Ny */, 18100 /* Nz */, 18100 /* N{ */, 18100 /* N| */, 18100 /* N} */, 18100 /* N~ */}, - {17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O! */, 17600 /* O" */, 17850 /* O# */, 17850 /* O$ */, 17850 /* O% */, 17725 /* O& */, 17600 /* O' */, 17850 /* O( */, 17850 /* O) */, 17600 /* O* */, 17850 /* O+ */, 16850 /* O, */, 18225 /* O- */, 16850 /* O. */, 16725 /* O/ */, 17850 /* O0 */, 17850 /* O1 */, 17850 /* O2 */, 17850 /* O3 */, 17850 /* O4 */, 17850 /* O5 */, 17850 /* O6 */, 17850 /* O7 */, 17850 /* O8 */, 17850 /* O9 */, 17850 /* O: */, 17850 /* O; */, 17850 /* O< */, 17850 /* O= */, 17850 /* O> */, 17850 /* O? */, 17850 /* O@ */, 17100 /* OA */, 17850 /* OB */, 17850 /* OC */, 17850 /* OD */, 17850 /* OE */, 17850 /* OF */, 17850 /* OG */, 17850 /* OH */, 17850 /* OI */, 17100 /* OJ */, 17850 /* OK */, 17850 /* OL */, 17850 /* OM */, 17850 /* ON */, 17850 /* OO */, 17850 /* OP */, 17850 /* OQ */, 17850 /* OR */, 17750 /* OS */, 17225 /* OT */, 17850 /* OU */, 17075 /* OV */, 17275 /* OW */, 17100 /* OX */, 16600 /* OY */, 17400 /* OZ */, 17850 /* O[ */, 17850 /* O\ */, 17850 /* O] */, 17850 /* O^ */, 15350 /* O_ */, 17850 /* O` */, 17850 /* Oa */, 17850 /* Ob */, 17850 /* Oc */, 17850 /* Od */, 17850 /* Oe */, 17850 /* Of */, 17850 /* Og */, 17850 /* Oh */, 17850 /* Oi */, 17850 /* Oj */, 17850 /* Ok */, 17850 /* Ol */, 17850 /* Om */, 17850 /* On */, 17850 /* Oo */, 17850 /* Op */, 17850 /* Oq */, 17850 /* Or */, 17850 /* Os */, 17850 /* Ot */, 17850 /* Ou */, 17850 /* Ov */, 17850 /* Ow */, 17850 /* Ox */, 17850 /* Oy */, 17850 /* Oz */, 17850 /* O{ */, 17850 /* O| */, 17850 /* O} */, 17850 /* O~ */}, - {16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P! */, 16525 /* P" */, 16400 /* P# */, 16400 /* P$ */, 16400 /* P% */, 15650 /* P& */, 16525 /* P' */, 16400 /* P( */, 16400 /* P) */, 16525 /* P* */, 16400 /* P+ */, 13900 /* P, */, 16400 /* P- */, 13900 /* P. */, 14150 /* P/ */, 16400 /* P0 */, 16400 /* P1 */, 16400 /* P2 */, 16400 /* P3 */, 16400 /* P4 */, 16400 /* P5 */, 16400 /* P6 */, 16400 /* P7 */, 16400 /* P8 */, 16400 /* P9 */, 16400 /* P: */, 16400 /* P; */, 16400 /* P< */, 16400 /* P= */, 16400 /* P> */, 16400 /* P? */, 16150 /* P@ */, 14575 /* PA */, 16400 /* PB */, 16400 /* PC */, 16400 /* PD */, 16400 /* PE */, 16400 /* PF */, 16400 /* PG */, 16400 /* PH */, 16400 /* PI */, 14650 /* PJ */, 16400 /* PK */, 16400 /* PL */, 16400 /* PM */, 16400 /* PN */, 16400 /* PO */, 16400 /* PP */, 16400 /* PQ */, 16400 /* PR */, 16150 /* PS */, 16400 /* PT */, 16400 /* PU */, 16275 /* PV */, 16150 /* PW */, 15525 /* PX */, 15775 /* PY */, 15775 /* PZ */, 16400 /* P[ */, 16400 /* P\ */, 16400 /* P] */, 16400 /* P^ */, 12650 /* P_ */, 16400 /* P` */, 16025 /* Pa */, 16400 /* Pb */, 15900 /* Pc */, 15900 /* Pd */, 15900 /* Pe */, 16400 /* Pf */, 15650 /* Pg */, 16400 /* Ph */, 16400 /* Pi */, 16400 /* Pj */, 16400 /* Pk */, 16400 /* Pl */, 16150 /* Pm */, 16150 /* Pn */, 15900 /* Po */, 16025 /* Pp */, 15900 /* Pq */, 16150 /* Pr */, 16025 /* Ps */, 16400 /* Pt */, 16400 /* Pu */, 16400 /* Pv */, 16525 /* Pw */, 16025 /* Px */, 16400 /* Py */, 16400 /* Pz */, 16400 /* P{ */, 16400 /* P| */, 16400 /* P} */, 16400 /* P~ */}, - {17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q! */, 17600 /* Q" */, 17850 /* Q# */, 17850 /* Q$ */, 17850 /* Q% */, 17725 /* Q& */, 17600 /* Q' */, 17850 /* Q( */, 17850 /* Q) */, 17600 /* Q* */, 17850 /* Q+ */, 16850 /* Q, */, 18225 /* Q- */, 16850 /* Q. */, 16725 /* Q/ */, 17850 /* Q0 */, 17850 /* Q1 */, 17850 /* Q2 */, 17850 /* Q3 */, 17850 /* Q4 */, 17850 /* Q5 */, 17850 /* Q6 */, 17850 /* Q7 */, 17850 /* Q8 */, 17850 /* Q9 */, 17850 /* Q: */, 17850 /* Q; */, 17850 /* Q< */, 17850 /* Q= */, 17850 /* Q> */, 17850 /* Q? */, 17850 /* Q@ */, 17100 /* QA */, 17850 /* QB */, 17850 /* QC */, 17850 /* QD */, 17850 /* QE */, 17850 /* QF */, 17850 /* QG */, 17850 /* QH */, 17850 /* QI */, 17100 /* QJ */, 17850 /* QK */, 17850 /* QL */, 17850 /* QM */, 17850 /* QN */, 17850 /* QO */, 17850 /* QP */, 17850 /* QQ */, 17850 /* QR */, 17750 /* QS */, 17225 /* QT */, 17850 /* QU */, 17075 /* QV */, 17275 /* QW */, 17100 /* QX */, 16600 /* QY */, 17400 /* QZ */, 17850 /* Q[ */, 17850 /* Q\ */, 17850 /* Q] */, 17850 /* Q^ */, 15350 /* Q_ */, 17850 /* Q` */, 17850 /* Qa */, 17850 /* Qb */, 17850 /* Qc */, 17850 /* Qd */, 17850 /* Qe */, 17850 /* Qf */, 17850 /* Qg */, 17850 /* Qh */, 17850 /* Qi */, 17850 /* Qj */, 17850 /* Qk */, 17850 /* Ql */, 17850 /* Qm */, 17850 /* Qn */, 17850 /* Qo */, 17850 /* Qp */, 17850 /* Qq */, 17850 /* Qr */, 17850 /* Qs */, 17850 /* Qt */, 17850 /* Qu */, 17850 /* Qv */, 17850 /* Qw */, 17850 /* Qx */, 17850 /* Qy */, 17850 /* Qz */, 17850 /* Q{ */, 17850 /* Q| */, 17850 /* Q} */, 17850 /* Q~ */}, - {16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R! */, 16850 /* R" */, 16850 /* R# */, 16850 /* R$ */, 16850 /* R% */, 16225 /* R& */, 16850 /* R' */, 16850 /* R( */, 16850 /* R) */, 16850 /* R* */, 16850 /* R+ */, 16850 /* R, */, 16475 /* R- */, 16850 /* R. */, 16350 /* R/ */, 16850 /* R0 */, 16850 /* R1 */, 16850 /* R2 */, 16850 /* R3 */, 16850 /* R4 */, 16850 /* R5 */, 16850 /* R6 */, 16850 /* R7 */, 16850 /* R8 */, 16850 /* R9 */, 16850 /* R: */, 16850 /* R; */, 16850 /* R< */, 16850 /* R= */, 16850 /* R> */, 16850 /* R? */, 16350 /* R@ */, 16850 /* RA */, 16850 /* RB */, 16600 /* RC */, 16850 /* RD */, 16850 /* RE */, 16850 /* RF */, 16600 /* RG */, 16850 /* RH */, 16850 /* RI */, 15975 /* RJ */, 16850 /* RK */, 16850 /* RL */, 16850 /* RM */, 16850 /* RN */, 16600 /* RO */, 16850 /* RP */, 16600 /* RQ */, 16850 /* RR */, 16850 /* RS */, 16250 /* RT */, 16500 /* RU */, 16100 /* RV */, 16500 /* RW */, 16850 /* RX */, 15850 /* RY */, 16850 /* RZ */, 16850 /* R[ */, 16850 /* R\ */, 16850 /* R] */, 16850 /* R^ */, 16850 /* R_ */, 16850 /* R` */, 16600 /* Ra */, 16850 /* Rb */, 16475 /* Rc */, 16475 /* Rd */, 16475 /* Re */, 16850 /* Rf */, 16225 /* Rg */, 16850 /* Rh */, 16850 /* Ri */, 16850 /* Rj */, 16850 /* Rk */, 16850 /* Rl */, 16850 /* Rm */, 16850 /* Rn */, 16475 /* Ro */, 16850 /* Rp */, 16475 /* Rq */, 16850 /* Rr */, 16850 /* Rs */, 16850 /* Rt */, 16850 /* Ru */, 16850 /* Rv */, 16850 /* Rw */, 16850 /* Rx */, 16750 /* Ry */, 16850 /* Rz */, 16850 /* R{ */, 16850 /* R| */, 16850 /* R} */, 16850 /* R~ */}, - {15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S! */, 15600 /* S" */, 15600 /* S# */, 15600 /* S$ */, 15600 /* S% */, 15350 /* S& */, 15600 /* S' */, 15600 /* S( */, 15600 /* S) */, 15600 /* S* */, 15600 /* S+ */, 15600 /* S, */, 15975 /* S- */, 15600 /* S. */, 14725 /* S/ */, 15600 /* S0 */, 15600 /* S1 */, 15600 /* S2 */, 15600 /* S3 */, 15600 /* S4 */, 15600 /* S5 */, 15600 /* S6 */, 15600 /* S7 */, 15600 /* S8 */, 15600 /* S9 */, 15600 /* S: */, 15600 /* S; */, 15600 /* S< */, 15600 /* S= */, 15600 /* S> */, 15600 /* S? */, 15350 /* S@ */, 15600 /* SA */, 15600 /* SB */, 15600 /* SC */, 15600 /* SD */, 15600 /* SE */, 15600 /* SF */, 15600 /* SG */, 15600 /* SH */, 15600 /* SI */, 15600 /* SJ */, 15600 /* SK */, 15600 /* SL */, 15600 /* SM */, 15600 /* SN */, 15600 /* SO */, 15600 /* SP */, 15600 /* SQ */, 15600 /* SR */, 15250 /* SS */, 15025 /* ST */, 15600 /* SU */, 15150 /* SV */, 15150 /* SW */, 15600 /* SX */, 14600 /* SY */, 15600 /* SZ */, 15600 /* S[ */, 15600 /* S\ */, 15600 /* S] */, 15600 /* S^ */, 14100 /* S_ */, 15600 /* S` */, 15600 /* Sa */, 15600 /* Sb */, 15600 /* Sc */, 15600 /* Sd */, 15600 /* Se */, 15600 /* Sf */, 15600 /* Sg */, 15600 /* Sh */, 15600 /* Si */, 15600 /* Sj */, 15600 /* Sk */, 15600 /* Sl */, 15600 /* Sm */, 15600 /* Sn */, 15600 /* So */, 15600 /* Sp */, 15600 /* Sq */, 15600 /* Sr */, 15600 /* Ss */, 15475 /* St */, 15600 /* Su */, 15600 /* Sv */, 15600 /* Sw */, 15600 /* Sx */, 15600 /* Sy */, 15600 /* Sz */, 15600 /* S{ */, 15600 /* S| */, 15600 /* S} */, 15600 /* S~ */}, - {14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T! */, 14975 /* T" */, 14600 /* T# */, 14600 /* T$ */, 14600 /* T% */, 13850 /* T& */, 14975 /* T' */, 14600 /* T( */, 15100 /* T) */, 14975 /* T* */, 14600 /* T+ */, 12850 /* T, */, 13475 /* T- */, 12850 /* T. */, 12100 /* T/ */, 14600 /* T0 */, 14600 /* T1 */, 14600 /* T2 */, 14600 /* T3 */, 14600 /* T4 */, 14600 /* T5 */, 14600 /* T6 */, 14600 /* T7 */, 14600 /* T8 */, 14600 /* T9 */, 13600 /* T: */, 13600 /* T; */, 14600 /* T< */, 14600 /* T= */, 14600 /* T> */, 14600 /* T? */, 13850 /* T@ */, 12975 /* TA */, 14600 /* TB */, 13975 /* TC */, 14600 /* TD */, 14600 /* TE */, 14600 /* TF */, 13975 /* TG */, 14600 /* TH */, 14500 /* TI */, 13225 /* TJ */, 14600 /* TK */, 14600 /* TL */, 14600 /* TM */, 14600 /* TN */, 13975 /* TO */, 14600 /* TP */, 13975 /* TQ */, 14600 /* TR */, 14300 /* TS */, 14975 /* TT */, 14600 /* TU */, 14675 /* TV */, 14675 /* TW */, 14600 /* TX */, 14675 /* TY */, 14600 /* TZ */, 14600 /* T[ */, 14600 /* T\ */, 14600 /* T] */, 14600 /* T^ */, 12600 /* T_ */, 14600 /* T` */, 12850 /* Ta */, 14600 /* Tb */, 12975 /* Tc */, 12975 /* Td */, 12975 /* Te */, 14225 /* Tf */, 12600 /* Tg */, 14600 /* Th */, 14600 /* Ti */, 14600 /* Tj */, 14600 /* Tk */, 14600 /* Tl */, 13475 /* Tm */, 13475 /* Tn */, 12975 /* To */, 13475 /* Tp */, 12975 /* Tq */, 13475 /* Tr */, 12850 /* Ts */, 14600 /* Tt */, 13475 /* Tu */, 13975 /* Tv */, 13725 /* Tw */, 13725 /* Tx */, 13850 /* Ty */, 13350 /* Tz */, 14600 /* T{ */, 14600 /* T| */, 14600 /* T} */, 14600 /* T~ */}, - {17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U! */, 17350 /* U" */, 17350 /* U# */, 17350 /* U$ */, 17350 /* U% */, 17100 /* U& */, 17350 /* U' */, 17350 /* U( */, 17350 /* U) */, 17350 /* U* */, 17350 /* U+ */, 16475 /* U, */, 17350 /* U- */, 16475 /* U. */, 16350 /* U/ */, 17350 /* U0 */, 17350 /* U1 */, 17350 /* U2 */, 17350 /* U3 */, 17350 /* U4 */, 17350 /* U5 */, 17350 /* U6 */, 17350 /* U7 */, 17350 /* U8 */, 17350 /* U9 */, 17350 /* U: */, 17350 /* U; */, 17350 /* U< */, 17350 /* U= */, 17350 /* U> */, 17350 /* U? */, 17350 /* U@ */, 16725 /* UA */, 17350 /* UB */, 17350 /* UC */, 17350 /* UD */, 17350 /* UE */, 17350 /* UF */, 17350 /* UG */, 17350 /* UH */, 17350 /* UI */, 16975 /* UJ */, 17350 /* UK */, 17350 /* UL */, 17350 /* UM */, 17350 /* UN */, 17350 /* UO */, 17350 /* UP */, 17350 /* UQ */, 17350 /* UR */, 17350 /* US */, 17350 /* UT */, 17350 /* UU */, 17350 /* UV */, 17350 /* UW */, 17350 /* UX */, 17350 /* UY */, 17350 /* UZ */, 17350 /* U[ */, 17350 /* U\ */, 17350 /* U] */, 17350 /* U^ */, 15600 /* U_ */, 17350 /* U` */, 17350 /* Ua */, 17350 /* Ub */, 17350 /* Uc */, 17350 /* Ud */, 17350 /* Ue */, 17350 /* Uf */, 17100 /* Ug */, 17350 /* Uh */, 17350 /* Ui */, 17350 /* Uj */, 17350 /* Uk */, 17350 /* Ul */, 17350 /* Um */, 17350 /* Un */, 17350 /* Uo */, 17350 /* Up */, 17350 /* Uq */, 17350 /* Ur */, 17350 /* Us */, 17350 /* Ut */, 17350 /* Uu */, 17350 /* Uv */, 17350 /* Uw */, 17350 /* Ux */, 17350 /* Uy */, 17350 /* Uz */, 17350 /* U{ */, 17350 /* U| */, 17350 /* U} */, 17350 /* U~ */}, - {16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V! */, 16500 /* V" */, 16250 /* V# */, 16250 /* V$ */, 16250 /* V% */, 15375 /* V& */, 16500 /* V' */, 16250 /* V( */, 16750 /* V) */, 16500 /* V* */, 16250 /* V+ */, 14750 /* V, */, 15625 /* V- */, 14750 /* V. */, 14625 /* V/ */, 16250 /* V0 */, 16250 /* V1 */, 16250 /* V2 */, 16250 /* V3 */, 16250 /* V4 */, 16250 /* V5 */, 16250 /* V6 */, 16250 /* V7 */, 16250 /* V8 */, 16250 /* V9 */, 15500 /* V: */, 15500 /* V; */, 16250 /* V< */, 16250 /* V= */, 16250 /* V> */, 16625 /* V? */, 15500 /* V@ */, 14900 /* VA */, 16250 /* VB */, 15475 /* VC */, 16250 /* VD */, 16250 /* VE */, 16250 /* VF */, 15475 /* VG */, 16250 /* VH */, 16250 /* VI */, 14875 /* VJ */, 16250 /* VK */, 16250 /* VL */, 16250 /* VM */, 16250 /* VN */, 15475 /* VO */, 16250 /* VP */, 15475 /* VQ */, 16250 /* VR */, 15500 /* VS */, 16325 /* VT */, 16250 /* VU */, 16375 /* VV */, 16325 /* VW */, 16250 /* VX */, 16250 /* VY */, 16125 /* VZ */, 16250 /* V[ */, 16250 /* V\ */, 16250 /* V] */, 16250 /* V^ */, 14250 /* V_ */, 16250 /* V` */, 15250 /* Va */, 16250 /* Vb */, 15375 /* Vc */, 15375 /* Vd */, 15375 /* Ve */, 16250 /* Vf */, 15250 /* Vg */, 16250 /* Vh */, 16250 /* Vi */, 16125 /* Vj */, 16250 /* Vk */, 16250 /* Vl */, 15750 /* Vm */, 15750 /* Vn */, 15375 /* Vo */, 15750 /* Vp */, 15375 /* Vq */, 15750 /* Vr */, 15625 /* Vs */, 16250 /* Vt */, 15625 /* Vu */, 16000 /* Vv */, 16000 /* Vw */, 16000 /* Vx */, 16125 /* Vy */, 15875 /* Vz */, 16250 /* V{ */, 16250 /* V| */, 16250 /* V} */, 16250 /* V~ */}, - {24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W! */, 24700 /* W" */, 24325 /* W# */, 24325 /* W$ */, 24325 /* W% */, 23450 /* W& */, 24700 /* W' */, 24325 /* W( */, 24575 /* W) */, 24700 /* W* */, 24325 /* W+ */, 23325 /* W, */, 24075 /* W- */, 23325 /* W. */, 23075 /* W/ */, 24325 /* W0 */, 24325 /* W1 */, 24325 /* W2 */, 24325 /* W3 */, 24325 /* W4 */, 24325 /* W5 */, 24325 /* W6 */, 24325 /* W7 */, 24325 /* W8 */, 24325 /* W9 */, 24325 /* W: */, 24325 /* W; */, 24325 /* W< */, 24325 /* W= */, 24325 /* W> */, 24325 /* W? */, 23825 /* W@ */, 23625 /* WA */, 24325 /* WB */, 23750 /* WC */, 24325 /* WD */, 24325 /* WE */, 24325 /* WF */, 23750 /* WG */, 24325 /* WH */, 24325 /* WI */, 23075 /* WJ */, 24325 /* WK */, 24325 /* WL */, 24325 /* WM */, 24325 /* WN */, 23750 /* WO */, 24325 /* WP */, 23750 /* WQ */, 24325 /* WR */, 24325 /* WS */, 24400 /* WT */, 24325 /* WU */, 24400 /* WV */, 24375 /* WW */, 24325 /* WX */, 24325 /* WY */, 24200 /* WZ */, 24325 /* W[ */, 24325 /* W\ */, 24325 /* W] */, 24325 /* W^ */, 23075 /* W_ */, 24325 /* W` */, 23575 /* Wa */, 24325 /* Wb */, 23700 /* Wc */, 23700 /* Wd */, 23700 /* We */, 24325 /* Wf */, 23825 /* Wg */, 24325 /* Wh */, 24325 /* Wi */, 24200 /* Wj */, 24325 /* Wk */, 24325 /* Wl */, 24075 /* Wm */, 24075 /* Wn */, 23700 /* Wo */, 24200 /* Wp */, 23700 /* Wq */, 24075 /* Wr */, 24075 /* Ws */, 24325 /* Wt */, 24200 /* Wu */, 24325 /* Wv */, 24075 /* Ww */, 24075 /* Wx */, 24325 /* Wy */, 24075 /* Wz */, 24325 /* W{ */, 24325 /* W| */, 24325 /* W} */, 24325 /* W~ */}, - {16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X! */, 16825 /* X" */, 16825 /* X# */, 16825 /* X$ */, 16825 /* X% */, 16200 /* X& */, 16825 /* X' */, 16825 /* X( */, 16825 /* X) */, 16825 /* X* */, 16825 /* X+ */, 17075 /* X, */, 16075 /* X- */, 17075 /* X. */, 16825 /* X/ */, 16825 /* X0 */, 16825 /* X1 */, 16825 /* X2 */, 16825 /* X3 */, 16825 /* X4 */, 16825 /* X5 */, 16825 /* X6 */, 16825 /* X7 */, 16825 /* X8 */, 16825 /* X9 */, 16450 /* X: */, 16450 /* X; */, 16825 /* X< */, 16825 /* X= */, 16825 /* X> */, 16575 /* X? */, 16075 /* X@ */, 16950 /* XA */, 16825 /* XB */, 16075 /* XC */, 16825 /* XD */, 16825 /* XE */, 16825 /* XF */, 16075 /* XG */, 16825 /* XH */, 16825 /* XI */, 16325 /* XJ */, 16825 /* XK */, 16825 /* XL */, 16825 /* XM */, 16825 /* XN */, 16075 /* XO */, 16825 /* XP */, 16075 /* XQ */, 16825 /* XR */, 16825 /* XS */, 16825 /* XT */, 16825 /* XU */, 16825 /* XV */, 16825 /* XW */, 16825 /* XX */, 16825 /* XY */, 16825 /* XZ */, 16825 /* X[ */, 16825 /* X\ */, 16825 /* X] */, 16825 /* X^ */, 17325 /* X_ */, 16825 /* X` */, 16700 /* Xa */, 16825 /* Xb */, 16325 /* Xc */, 16325 /* Xd */, 16325 /* Xe */, 16825 /* Xf */, 16825 /* Xg */, 16825 /* Xh */, 16825 /* Xi */, 16825 /* Xj */, 16825 /* Xk */, 16825 /* Xl */, 16825 /* Xm */, 16825 /* Xn */, 16325 /* Xo */, 16825 /* Xp */, 16325 /* Xq */, 16825 /* Xr */, 16575 /* Xs */, 16325 /* Xt */, 16325 /* Xu */, 16450 /* Xv */, 16325 /* Xw */, 16825 /* Xx */, 16575 /* Xy */, 17075 /* Xz */, 16825 /* X{ */, 16825 /* X| */, 16825 /* X} */, 16825 /* X~ */}, - {16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y! */, 16350 /* Y" */, 16225 /* Y# */, 16225 /* Y$ */, 16225 /* Y% */, 14850 /* Y& */, 16350 /* Y' */, 16225 /* Y( */, 16725 /* Y) */, 16350 /* Y* */, 16225 /* Y+ */, 14475 /* Y, */, 14975 /* Y- */, 14475 /* Y. */, 13975 /* Y/ */, 16225 /* Y0 */, 16225 /* Y1 */, 16225 /* Y2 */, 16225 /* Y3 */, 16225 /* Y4 */, 16225 /* Y5 */, 16225 /* Y6 */, 16225 /* Y7 */, 16225 /* Y8 */, 16225 /* Y9 */, 14975 /* Y: */, 14975 /* Y; */, 16225 /* Y< */, 16225 /* Y= */, 16225 /* Y> */, 16225 /* Y? */, 15225 /* Y@ */, 14475 /* YA */, 16225 /* YB */, 14975 /* YC */, 16225 /* YD */, 16225 /* YE */, 16225 /* YF */, 14975 /* YG */, 16225 /* YH */, 16225 /* YI */, 14475 /* YJ */, 16225 /* YK */, 16225 /* YL */, 16225 /* YM */, 16225 /* YN */, 14975 /* YO */, 16225 /* YP */, 14975 /* YQ */, 16225 /* YR */, 15125 /* YS */, 16300 /* YT */, 16225 /* YU */, 16225 /* YV */, 16225 /* YW */, 16225 /* YX */, 16225 /* YY */, 16225 /* YZ */, 16225 /* Y[ */, 16225 /* Y\ */, 16225 /* Y] */, 16225 /* Y^ */, 13975 /* Y_ */, 16225 /* Y` */, 14600 /* Ya */, 15975 /* Yb */, 14725 /* Yc */, 14725 /* Yd */, 14725 /* Ye */, 15600 /* Yf */, 14700 /* Yg */, 15975 /* Yh */, 15975 /* Yi */, 15475 /* Yj */, 15975 /* Yk */, 16225 /* Yl */, 15100 /* Ym */, 15100 /* Yn */, 14725 /* Yo */, 14975 /* Yp */, 14725 /* Yq */, 15100 /* Yr */, 14725 /* Ys */, 15600 /* Yt */, 15225 /* Yu */, 15350 /* Yv */, 15225 /* Yw */, 15225 /* Yx */, 15350 /* Yy */, 14975 /* Yz */, 16225 /* Y{ */, 16225 /* Y| */, 16225 /* Y} */, 16225 /* Y~ */}, - {15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z! */, 15300 /* Z" */, 15175 /* Z# */, 15175 /* Z$ */, 15175 /* Z% */, 14925 /* Z& */, 15300 /* Z' */, 15175 /* Z( */, 15425 /* Z) */, 15300 /* Z* */, 15175 /* Z+ */, 15925 /* Z, */, 14175 /* Z- */, 15925 /* Z. */, 15175 /* Z/ */, 15175 /* Z0 */, 15175 /* Z1 */, 15175 /* Z2 */, 15175 /* Z3 */, 15175 /* Z4 */, 15175 /* Z5 */, 15175 /* Z6 */, 15175 /* Z7 */, 15175 /* Z8 */, 15175 /* Z9 */, 15175 /* Z: */, 15175 /* Z; */, 15175 /* Z< */, 15175 /* Z= */, 15175 /* Z> */, 15175 /* Z? */, 14800 /* Z@ */, 15175 /* ZA */, 15175 /* ZB */, 14725 /* ZC */, 15175 /* ZD */, 15175 /* ZE */, 15175 /* ZF */, 14725 /* ZG */, 15175 /* ZH */, 15175 /* ZI */, 15000 /* ZJ */, 15175 /* ZK */, 15175 /* ZL */, 15175 /* ZM */, 15175 /* ZN */, 14725 /* ZO */, 15175 /* ZP */, 14725 /* ZQ */, 15175 /* ZR */, 15175 /* ZS */, 15175 /* ZT */, 15175 /* ZU */, 14975 /* ZV */, 15050 /* ZW */, 15175 /* ZX */, 14925 /* ZY */, 15175 /* ZZ */, 15175 /* Z[ */, 15175 /* Z\ */, 15175 /* Z] */, 15175 /* Z^ */, 15550 /* Z_ */, 15175 /* Z` */, 15175 /* Za */, 15175 /* Zb */, 15050 /* Zc */, 15050 /* Zd */, 15050 /* Ze */, 15175 /* Zf */, 15175 /* Zg */, 15175 /* Zh */, 15175 /* Zi */, 15175 /* Zj */, 15175 /* Zk */, 15175 /* Zl */, 15175 /* Zm */, 15175 /* Zn */, 15050 /* Zo */, 15175 /* Zp */, 15050 /* Zq */, 15175 /* Zr */, 15175 /* Zs */, 14925 /* Zt */, 15050 /* Zu */, 15175 /* Zv */, 15175 /* Zw */, 15175 /* Zx */, 15050 /* Zy */, 15175 /* Zz */, 15175 /* Z{ */, 15175 /* Z| */, 15175 /* Z} */, 15175 /* Z~ */}, - {8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [! */, 8350 /* [" */, 8350 /* [# */, 8350 /* [$ */, 8350 /* [% */, 8350 /* [& */, 8350 /* [' */, 8350 /* [( */, 8350 /* [) */, 8350 /* [* */, 8350 /* [+ */, 8350 /* [, */, 8350 /* [- */, 8350 /* [. */, 8350 /* [/ */, 8350 /* [0 */, 8350 /* [1 */, 8350 /* [2 */, 8350 /* [3 */, 8350 /* [4 */, 8350 /* [5 */, 8350 /* [6 */, 8350 /* [7 */, 8350 /* [8 */, 8350 /* [9 */, 8350 /* [: */, 8350 /* [; */, 8350 /* [< */, 8350 /* [= */, 8350 /* [> */, 8350 /* [? */, 8350 /* [@ */, 8350 /* [A */, 8350 /* [B */, 8350 /* [C */, 8350 /* [D */, 8350 /* [E */, 8350 /* [F */, 8350 /* [G */, 8350 /* [H */, 8350 /* [I */, 8350 /* [J */, 8350 /* [K */, 8350 /* [L */, 8350 /* [M */, 8350 /* [N */, 8350 /* [O */, 8350 /* [P */, 8350 /* [Q */, 8350 /* [R */, 8350 /* [S */, 8350 /* [T */, 8350 /* [U */, 8350 /* [V */, 8350 /* [W */, 8350 /* [X */, 8350 /* [Y */, 8350 /* [Z */, 8350 /* [[ */, 8350 /* [\ */, 8350 /* [] */, 8350 /* [^ */, 8350 /* [_ */, 8350 /* [` */, 8350 /* [a */, 8350 /* [b */, 8350 /* [c */, 8350 /* [d */, 8350 /* [e */, 8350 /* [f */, 8350 /* [g */, 8350 /* [h */, 8350 /* [i */, 8600 /* [j */, 8350 /* [k */, 8350 /* [l */, 8350 /* [m */, 8350 /* [n */, 8350 /* [o */, 8350 /* [p */, 8350 /* [q */, 8350 /* [r */, 8350 /* [s */, 8350 /* [t */, 8350 /* [u */, 8350 /* [v */, 8350 /* [w */, 8350 /* [x */, 8350 /* [y */, 8350 /* [z */, 8350 /* [{ */, 8350 /* [| */, 8350 /* [} */, 8350 /* [~ */}, - {11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \! */, 11500 /* \" */, 11500 /* \# */, 11500 /* \$ */, 11500 /* \% */, 11500 /* \& */, 11500 /* \' */, 11500 /* \( */, 11500 /* \) */, 11500 /* \* */, 11500 /* \+ */, 11500 /* \, */, 11500 /* \- */, 11500 /* \. */, 11500 /* \/ */, 11500 /* \0 */, 11500 /* \1 */, 11500 /* \2 */, 11500 /* \3 */, 11500 /* \4 */, 11500 /* \5 */, 11500 /* \6 */, 11500 /* \7 */, 11500 /* \8 */, 11500 /* \9 */, 11500 /* \: */, 11500 /* \; */, 11500 /* \< */, 11500 /* \= */, 11500 /* \> */, 11500 /* \? */, 11500 /* \@ */, 11500 /* \A */, 11500 /* \B */, 11500 /* \C */, 11500 /* \D */, 11500 /* \E */, 11500 /* \F */, 11500 /* \G */, 11500 /* \H */, 11500 /* \I */, 11500 /* \J */, 11500 /* \K */, 11500 /* \L */, 11500 /* \M */, 11500 /* \N */, 11500 /* \O */, 11500 /* \P */, 11500 /* \Q */, 11500 /* \R */, 11500 /* \S */, 11500 /* \T */, 11500 /* \U */, 11500 /* \V */, 11500 /* \W */, 11500 /* \X */, 11500 /* \Y */, 11500 /* \Z */, 11500 /* \[ */, 11500 /* \\ */, 11500 /* \] */, 11500 /* \^ */, 11500 /* \_ */, 11500 /* \` */, 11500 /* \a */, 11500 /* \b */, 11500 /* \c */, 11500 /* \d */, 11500 /* \e */, 11500 /* \f */, 11500 /* \g */, 11500 /* \h */, 11500 /* \i */, 11500 /* \j */, 11500 /* \k */, 11500 /* \l */, 11500 /* \m */, 11500 /* \n */, 11500 /* \o */, 11500 /* \p */, 11500 /* \q */, 11500 /* \r */, 11500 /* \s */, 11500 /* \t */, 11500 /* \u */, 11500 /* \v */, 11500 /* \w */, 11500 /* \x */, 11500 /* \y */, 11500 /* \z */, 11500 /* \{ */, 11500 /* \| */, 11500 /* \} */, 11500 /* \~ */}, - {8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ]! */, 8350 /* ]" */, 8350 /* ]# */, 8350 /* ]$ */, 8350 /* ]% */, 8350 /* ]& */, 8350 /* ]' */, 8350 /* ]( */, 8350 /* ]) */, 8350 /* ]* */, 8350 /* ]+ */, 8350 /* ], */, 8350 /* ]- */, 8350 /* ]. */, 8350 /* ]/ */, 8350 /* ]0 */, 8350 /* ]1 */, 8350 /* ]2 */, 8350 /* ]3 */, 8350 /* ]4 */, 8350 /* ]5 */, 8350 /* ]6 */, 8350 /* ]7 */, 8350 /* ]8 */, 8350 /* ]9 */, 8350 /* ]: */, 8350 /* ]; */, 8350 /* ]< */, 8350 /* ]= */, 8350 /* ]> */, 8350 /* ]? */, 8350 /* ]@ */, 8350 /* ]A */, 8350 /* ]B */, 8350 /* ]C */, 8350 /* ]D */, 8350 /* ]E */, 8350 /* ]F */, 8350 /* ]G */, 8350 /* ]H */, 8350 /* ]I */, 8350 /* ]J */, 8350 /* ]K */, 8350 /* ]L */, 8350 /* ]M */, 8350 /* ]N */, 8350 /* ]O */, 8350 /* ]P */, 8350 /* ]Q */, 8350 /* ]R */, 8350 /* ]S */, 8350 /* ]T */, 8350 /* ]U */, 8350 /* ]V */, 8350 /* ]W */, 8350 /* ]X */, 8350 /* ]Y */, 8350 /* ]Z */, 8350 /* ][ */, 8350 /* ]\ */, 8350 /* ]] */, 8350 /* ]^ */, 8350 /* ]_ */, 8350 /* ]` */, 8350 /* ]a */, 8350 /* ]b */, 8350 /* ]c */, 8350 /* ]d */, 8350 /* ]e */, 8350 /* ]f */, 8350 /* ]g */, 8350 /* ]h */, 8350 /* ]i */, 8350 /* ]j */, 8350 /* ]k */, 8350 /* ]l */, 8350 /* ]m */, 8350 /* ]n */, 8350 /* ]o */, 8350 /* ]p */, 8350 /* ]q */, 8350 /* ]r */, 8350 /* ]s */, 8350 /* ]t */, 8350 /* ]u */, 8350 /* ]v */, 8350 /* ]w */, 8350 /* ]x */, 8350 /* ]y */, 8350 /* ]z */, 8350 /* ]{ */, 8350 /* ]| */, 8350 /* ]} */, 8350 /* ]~ */}, - {15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^! */, 15000 /* ^" */, 15000 /* ^# */, 15000 /* ^$ */, 15000 /* ^% */, 15000 /* ^& */, 15000 /* ^' */, 15000 /* ^( */, 15000 /* ^) */, 15000 /* ^* */, 15000 /* ^+ */, 15000 /* ^, */, 15000 /* ^- */, 15000 /* ^. */, 15000 /* ^/ */, 15000 /* ^0 */, 15000 /* ^1 */, 15000 /* ^2 */, 15000 /* ^3 */, 15000 /* ^4 */, 15000 /* ^5 */, 15000 /* ^6 */, 15000 /* ^7 */, 15000 /* ^8 */, 15000 /* ^9 */, 15000 /* ^: */, 15000 /* ^; */, 15000 /* ^< */, 15000 /* ^= */, 15000 /* ^> */, 15000 /* ^? */, 15000 /* ^@ */, 15000 /* ^A */, 15000 /* ^B */, 15000 /* ^C */, 15000 /* ^D */, 15000 /* ^E */, 15000 /* ^F */, 15000 /* ^G */, 15000 /* ^H */, 15000 /* ^I */, 15000 /* ^J */, 15000 /* ^K */, 15000 /* ^L */, 15000 /* ^M */, 15000 /* ^N */, 15000 /* ^O */, 15000 /* ^P */, 15000 /* ^Q */, 15000 /* ^R */, 15000 /* ^S */, 15000 /* ^T */, 15000 /* ^U */, 15000 /* ^V */, 15000 /* ^W */, 15000 /* ^X */, 15000 /* ^Y */, 15000 /* ^Z */, 15000 /* ^[ */, 15000 /* ^\ */, 15000 /* ^] */, 15000 /* ^^ */, 15000 /* ^_ */, 15000 /* ^` */, 15000 /* ^a */, 15000 /* ^b */, 15000 /* ^c */, 15000 /* ^d */, 15000 /* ^e */, 15000 /* ^f */, 15000 /* ^g */, 15000 /* ^h */, 15000 /* ^i */, 15000 /* ^j */, 15000 /* ^k */, 15000 /* ^l */, 15000 /* ^m */, 15000 /* ^n */, 15000 /* ^o */, 15000 /* ^p */, 15000 /* ^q */, 15000 /* ^r */, 15000 /* ^s */, 15000 /* ^t */, 15000 /* ^u */, 15000 /* ^v */, 15000 /* ^w */, 15000 /* ^x */, 15000 /* ^y */, 15000 /* ^z */, 15000 /* ^{ */, 15000 /* ^| */, 15000 /* ^} */, 15000 /* ^~ */}, - {13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _! */, 13900 /* _" */, 13900 /* _# */, 13900 /* _$ */, 13900 /* _% */, 13900 /* _& */, 13900 /* _' */, 13900 /* _( */, 13900 /* _) */, 13900 /* _* */, 13900 /* _+ */, 13900 /* _, */, 13900 /* _- */, 13900 /* _. */, 13900 /* _/ */, 13900 /* _0 */, 13900 /* _1 */, 13900 /* _2 */, 13900 /* _3 */, 13900 /* _4 */, 13900 /* _5 */, 13900 /* _6 */, 13900 /* _7 */, 13900 /* _8 */, 13900 /* _9 */, 13900 /* _: */, 13900 /* _; */, 13900 /* _< */, 13900 /* _= */, 13900 /* _> */, 13900 /* _? */, 13900 /* _@ */, 14275 /* _A */, 13900 /* _B */, 11400 /* _C */, 13900 /* _D */, 13900 /* _E */, 13900 /* _F */, 11400 /* _G */, 13900 /* _H */, 13900 /* _I */, 12650 /* _J */, 13900 /* _K */, 13900 /* _L */, 13900 /* _M */, 13900 /* _N */, 11400 /* _O */, 13900 /* _P */, 11400 /* _Q */, 13900 /* _R */, 12400 /* _S */, 11900 /* _T */, 12150 /* _U */, 11900 /* _V */, 12650 /* _W */, 14400 /* _X */, 11650 /* _Y */, 14525 /* _Z */, 13900 /* _[ */, 13900 /* _\ */, 13900 /* _] */, 13900 /* _^ */, 13900 /* __ */, 13900 /* _` */, 13150 /* _a */, 13900 /* _b */, 12400 /* _c */, 12650 /* _d */, 12400 /* _e */, 13275 /* _f */, 14525 /* _g */, 13900 /* _h */, 13900 /* _i */, 15275 /* _j */, 13900 /* _k */, 12525 /* _l */, 13900 /* _m */, 13900 /* _n */, 12400 /* _o */, 14150 /* _p */, 12650 /* _q */, 13900 /* _r */, 12900 /* _s */, 12275 /* _t */, 13025 /* _u */, 12150 /* _v */, 12650 /* _w */, 14400 /* _x */, 13525 /* _y */, 14025 /* _z */, 13900 /* _{ */, 13900 /* _| */, 13900 /* _} */, 13900 /* _~ */}, - {15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* `! */, 15000 /* `" */, 15000 /* `# */, 15000 /* `$ */, 15000 /* `% */, 15000 /* `& */, 15000 /* `' */, 15000 /* `( */, 15000 /* `) */, 15000 /* `* */, 15000 /* `+ */, 15000 /* `, */, 15000 /* `- */, 15000 /* `. */, 15000 /* `/ */, 15000 /* `0 */, 15000 /* `1 */, 15000 /* `2 */, 15000 /* `3 */, 15000 /* `4 */, 15000 /* `5 */, 15000 /* `6 */, 15000 /* `7 */, 15000 /* `8 */, 15000 /* `9 */, 15000 /* `: */, 15000 /* `; */, 15000 /* `< */, 15000 /* `= */, 15000 /* `> */, 15000 /* `? */, 15000 /* `@ */, 15000 /* `A */, 15000 /* `B */, 15000 /* `C */, 15000 /* `D */, 15000 /* `E */, 15000 /* `F */, 15000 /* `G */, 15000 /* `H */, 15000 /* `I */, 15000 /* `J */, 15000 /* `K */, 15000 /* `L */, 15000 /* `M */, 15000 /* `N */, 15000 /* `O */, 15000 /* `P */, 15000 /* `Q */, 15000 /* `R */, 15000 /* `S */, 15000 /* `T */, 15000 /* `U */, 15000 /* `V */, 15000 /* `W */, 15000 /* `X */, 15000 /* `Y */, 15000 /* `Z */, 15000 /* `[ */, 15000 /* `\ */, 15000 /* `] */, 15000 /* `^ */, 15000 /* `_ */, 15000 /* `` */, 15000 /* `a */, 15000 /* `b */, 15000 /* `c */, 15000 /* `d */, 15000 /* `e */, 15000 /* `f */, 15000 /* `g */, 15000 /* `h */, 15000 /* `i */, 15000 /* `j */, 15000 /* `k */, 15000 /* `l */, 15000 /* `m */, 15000 /* `n */, 15000 /* `o */, 15000 /* `p */, 15000 /* `q */, 15000 /* `r */, 15000 /* `s */, 15000 /* `t */, 15000 /* `u */, 15000 /* `v */, 15000 /* `w */, 15000 /* `x */, 15000 /* `y */, 15000 /* `z */, 15000 /* `{ */, 15000 /* `| */, 15000 /* `} */, 15000 /* `~ */}, - {14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a! */, 13850 /* a" */, 14225 /* a# */, 14225 /* a$ */, 14225 /* a% */, 14225 /* a& */, 13850 /* a' */, 14225 /* a( */, 14225 /* a) */, 13850 /* a* */, 14225 /* a+ */, 14475 /* a, */, 14100 /* a- */, 14475 /* a. */, 14725 /* a/ */, 14225 /* a0 */, 14225 /* a1 */, 14225 /* a2 */, 14225 /* a3 */, 14225 /* a4 */, 14225 /* a5 */, 14225 /* a6 */, 14225 /* a7 */, 14225 /* a8 */, 14225 /* a9 */, 14225 /* a: */, 14225 /* a; */, 14225 /* a< */, 14225 /* a= */, 14225 /* a> */, 14225 /* a? */, 14225 /* a@ */, 14425 /* aA */, 14225 /* aB */, 14225 /* aC */, 14225 /* aD */, 14225 /* aE */, 14225 /* aF */, 14225 /* aG */, 14225 /* aH */, 14225 /* aI */, 14225 /* aJ */, 14225 /* aK */, 14225 /* aL */, 14225 /* aM */, 14225 /* aN */, 14225 /* aO */, 14225 /* aP */, 14225 /* aQ */, 14225 /* aR */, 14225 /* aS */, 12475 /* aT */, 14225 /* aU */, 13475 /* aV */, 13975 /* aW */, 14225 /* aX */, 13100 /* aY */, 14375 /* aZ */, 14225 /* a[ */, 14225 /* a\ */, 14225 /* a] */, 14225 /* a^ */, 14475 /* a_ */, 14225 /* a` */, 14225 /* aa */, 14225 /* ab */, 14225 /* ac */, 14225 /* ad */, 14225 /* ae */, 14225 /* af */, 14225 /* ag */, 14225 /* ah */, 14225 /* ai */, 14225 /* aj */, 14225 /* ak */, 14225 /* al */, 14225 /* am */, 14225 /* an */, 14225 /* ao */, 14225 /* ap */, 14225 /* aq */, 14225 /* ar */, 14225 /* as */, 14075 /* at */, 14225 /* au */, 13975 /* av */, 14075 /* aw */, 14225 /* ax */, 14000 /* ay */, 14225 /* az */, 14225 /* a{ */, 14225 /* a| */, 14225 /* a} */, 14225 /* a~ */}, - {15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b! */, 14700 /* b" */, 15200 /* b# */, 15200 /* b$ */, 15200 /* b% */, 15200 /* b& */, 14700 /* b' */, 15200 /* b( */, 15200 /* b) */, 14700 /* b* */, 15200 /* b+ */, 14825 /* b, */, 15450 /* b- */, 14825 /* b. */, 15200 /* b/ */, 15200 /* b0 */, 15200 /* b1 */, 15200 /* b2 */, 15200 /* b3 */, 15200 /* b4 */, 15200 /* b5 */, 15200 /* b6 */, 15200 /* b7 */, 15200 /* b8 */, 15200 /* b9 */, 15200 /* b: */, 15200 /* b; */, 15200 /* b< */, 15200 /* b= */, 15200 /* b> */, 15200 /* b? */, 15200 /* b@ */, 15200 /* bA */, 15200 /* bB */, 15200 /* bC */, 15200 /* bD */, 15200 /* bE */, 15200 /* bF */, 15200 /* bG */, 15200 /* bH */, 15075 /* bI */, 15200 /* bJ */, 15200 /* bK */, 15200 /* bL */, 15200 /* bM */, 15200 /* bN */, 15200 /* bO */, 15200 /* bP */, 15200 /* bQ */, 15200 /* bR */, 15200 /* bS */, 13575 /* bT */, 15200 /* bU */, 14325 /* bV */, 14575 /* bW */, 14700 /* bX */, 13700 /* bY */, 15075 /* bZ */, 15200 /* b[ */, 15200 /* b\ */, 15200 /* b] */, 15200 /* b^ */, 13950 /* b_ */, 15200 /* b` */, 15200 /* ba */, 15200 /* bb */, 15200 /* bc */, 15200 /* bd */, 15200 /* be */, 15050 /* bf */, 15200 /* bg */, 15200 /* bh */, 15200 /* bi */, 15200 /* bj */, 15200 /* bk */, 15200 /* bl */, 15200 /* bm */, 15200 /* bn */, 15200 /* bo */, 15200 /* bp */, 15200 /* bq */, 15200 /* br */, 15200 /* bs */, 15050 /* bt */, 15200 /* bu */, 15000 /* bv */, 14950 /* bw */, 14800 /* bx */, 14950 /* by */, 15000 /* bz */, 15200 /* b{ */, 15200 /* b| */, 15200 /* b} */, 15200 /* b~ */}, - {12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c! */, 12925 /* c" */, 12925 /* c# */, 12925 /* c$ */, 12925 /* c% */, 12925 /* c& */, 12925 /* c' */, 12925 /* c( */, 12925 /* c) */, 12925 /* c* */, 12925 /* c+ */, 12925 /* c, */, 12925 /* c- */, 12925 /* c. */, 12800 /* c/ */, 12925 /* c0 */, 12925 /* c1 */, 12925 /* c2 */, 12925 /* c3 */, 12925 /* c4 */, 12925 /* c5 */, 12925 /* c6 */, 12925 /* c7 */, 12925 /* c8 */, 12925 /* c9 */, 12925 /* c: */, 12925 /* c; */, 12925 /* c< */, 12925 /* c= */, 12925 /* c> */, 12925 /* c? */, 12925 /* c@ */, 13125 /* cA */, 12925 /* cB */, 12925 /* cC */, 12925 /* cD */, 12925 /* cE */, 12925 /* cF */, 12925 /* cG */, 12925 /* cH */, 12925 /* cI */, 13050 /* cJ */, 12925 /* cK */, 12925 /* cL */, 12925 /* cM */, 12925 /* cN */, 12925 /* cO */, 12925 /* cP */, 12925 /* cQ */, 12925 /* cR */, 12925 /* cS */, 11775 /* cT */, 12925 /* cU */, 12250 /* cV */, 12675 /* cW */, 12925 /* cX */, 11625 /* cY */, 13075 /* cZ */, 12925 /* c[ */, 12925 /* c\ */, 12925 /* c] */, 12925 /* c^ */, 11925 /* c_ */, 12925 /* c` */, 12925 /* ca */, 12925 /* cb */, 12800 /* cc */, 12800 /* cd */, 12800 /* ce */, 12925 /* cf */, 12925 /* cg */, 12925 /* ch */, 12925 /* ci */, 12925 /* cj */, 12925 /* ck */, 12925 /* cl */, 12925 /* cm */, 12925 /* cn */, 12800 /* co */, 12925 /* cp */, 12800 /* cq */, 12925 /* cr */, 12925 /* cs */, 12925 /* ct */, 12925 /* cu */, 12925 /* cv */, 12925 /* cw */, 12925 /* cx */, 12925 /* cy */, 12925 /* cz */, 12925 /* c{ */, 12925 /* c| */, 12925 /* c} */, 12925 /* c~ */}, - {15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d! */, 15200 /* d" */, 15200 /* d# */, 15200 /* d$ */, 15200 /* d% */, 15200 /* d& */, 15200 /* d' */, 15200 /* d( */, 15200 /* d) */, 15200 /* d* */, 15200 /* d+ */, 15200 /* d, */, 15200 /* d- */, 15200 /* d. */, 15200 /* d/ */, 15200 /* d0 */, 15200 /* d1 */, 15200 /* d2 */, 15200 /* d3 */, 15200 /* d4 */, 15200 /* d5 */, 15200 /* d6 */, 15200 /* d7 */, 15200 /* d8 */, 15200 /* d9 */, 15200 /* d: */, 15200 /* d; */, 15200 /* d< */, 15200 /* d= */, 15200 /* d> */, 15200 /* d? */, 15200 /* d@ */, 15200 /* dA */, 15200 /* dB */, 15200 /* dC */, 15200 /* dD */, 15200 /* dE */, 15200 /* dF */, 15200 /* dG */, 15200 /* dH */, 15200 /* dI */, 15200 /* dJ */, 15200 /* dK */, 15200 /* dL */, 15200 /* dM */, 15200 /* dN */, 15200 /* dO */, 15200 /* dP */, 15200 /* dQ */, 15200 /* dR */, 15200 /* dS */, 15200 /* dT */, 15200 /* dU */, 15200 /* dV */, 15200 /* dW */, 15200 /* dX */, 15200 /* dY */, 15200 /* dZ */, 15200 /* d[ */, 15200 /* d\ */, 15200 /* d] */, 15200 /* d^ */, 15200 /* d_ */, 15200 /* d` */, 15200 /* da */, 15200 /* db */, 15200 /* dc */, 15200 /* dd */, 15200 /* de */, 15200 /* df */, 15200 /* dg */, 15200 /* dh */, 15200 /* di */, 15200 /* dj */, 15200 /* dk */, 15200 /* dl */, 15200 /* dm */, 15200 /* dn */, 15200 /* do */, 15200 /* dp */, 15200 /* dq */, 15200 /* dr */, 15200 /* ds */, 15200 /* dt */, 15200 /* du */, 15200 /* dv */, 15200 /* dw */, 15200 /* dx */, 15200 /* dy */, 15200 /* dz */, 15200 /* d{ */, 15200 /* d| */, 15200 /* d} */, 15200 /* d~ */}, - {14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e! */, 13550 /* e" */, 14050 /* e# */, 14050 /* e$ */, 14050 /* e% */, 14050 /* e& */, 13550 /* e' */, 14050 /* e( */, 14050 /* e) */, 13550 /* e* */, 14050 /* e+ */, 13675 /* e, */, 14300 /* e- */, 13675 /* e. */, 14050 /* e/ */, 14050 /* e0 */, 14050 /* e1 */, 14050 /* e2 */, 14050 /* e3 */, 14050 /* e4 */, 14050 /* e5 */, 14050 /* e6 */, 14050 /* e7 */, 14050 /* e8 */, 14050 /* e9 */, 14050 /* e: */, 14050 /* e; */, 14050 /* e< */, 14050 /* e= */, 14050 /* e> */, 14050 /* e? */, 14050 /* e@ */, 14050 /* eA */, 14050 /* eB */, 14050 /* eC */, 14050 /* eD */, 14050 /* eE */, 14050 /* eF */, 14050 /* eG */, 14050 /* eH */, 13925 /* eI */, 14050 /* eJ */, 14050 /* eK */, 14050 /* eL */, 14050 /* eM */, 14050 /* eN */, 14050 /* eO */, 14050 /* eP */, 14050 /* eQ */, 14050 /* eR */, 13800 /* eS */, 12425 /* eT */, 14050 /* eU */, 13175 /* eV */, 13550 /* eW */, 13550 /* eX */, 12550 /* eY */, 13925 /* eZ */, 14050 /* e[ */, 14050 /* e\ */, 14050 /* e] */, 14050 /* e^ */, 12550 /* e_ */, 14050 /* e` */, 14050 /* ea */, 14050 /* eb */, 14050 /* ec */, 14050 /* ed */, 14050 /* ee */, 14050 /* ef */, 14050 /* eg */, 14050 /* eh */, 14050 /* ei */, 14050 /* ej */, 14050 /* ek */, 14050 /* el */, 14050 /* em */, 14050 /* en */, 14050 /* eo */, 14050 /* ep */, 14050 /* eq */, 14050 /* er */, 14050 /* es */, 14050 /* et */, 14050 /* eu */, 13800 /* ev */, 13925 /* ew */, 13650 /* ex */, 13875 /* ey */, 13925 /* ez */, 14050 /* e{ */, 14050 /* e| */, 14050 /* e} */, 14050 /* e~ */}, - {9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9525 /* f! */, 9775 /* f" */, 9025 /* f# */, 9025 /* f$ */, 9025 /* f% */, 8650 /* f& */, 9775 /* f' */, 9025 /* f( */, 9650 /* f) */, 9775 /* f* */, 9025 /* f+ */, 8275 /* f, */, 9025 /* f- */, 8275 /* f. */, 8525 /* f/ */, 9025 /* f0 */, 9025 /* f1 */, 9025 /* f2 */, 9025 /* f3 */, 9025 /* f4 */, 9025 /* f5 */, 9025 /* f6 */, 9025 /* f7 */, 9025 /* f8 */, 9025 /* f9 */, 9025 /* f: */, 9025 /* f; */, 9025 /* f< */, 9025 /* f= */, 9025 /* f> */, 9775 /* f? */, 9025 /* f@ */, 8875 /* fA */, 9025 /* fB */, 9400 /* fC */, 9025 /* fD */, 9025 /* fE */, 9025 /* fF */, 9400 /* fG */, 9025 /* fH */, 9025 /* fI */, 8450 /* fJ */, 9025 /* fK */, 9025 /* fL */, 9025 /* fM */, 9025 /* fN */, 9400 /* fO */, 9025 /* fP */, 9400 /* fQ */, 9025 /* fR */, 9025 /* fS */, 9650 /* fT */, 9025 /* fU */, 9900 /* fV */, 9650 /* fW */, 9375 /* fX */, 9775 /* fY */, 9075 /* fZ */, 9025 /* f[ */, 9025 /* f\ */, 9400 /* f] */, 9025 /* f^ */, 7475 /* f_ */, 9025 /* f` */, 9025 /* fa */, 9025 /* fb */, 8925 /* fc */, 8925 /* fd */, 8925 /* fe */, 9125 /* ff */, 8925 /* fg */, 9025 /* fh */, 9025 /* fi */, 9025 /* fj */, 9025 /* fk */, 9025 /* fl */, 9025 /* fm */, 9025 /* fn */, 8925 /* fo */, 9025 /* fp */, 8925 /* fq */, 9025 /* fr */, 9025 /* fs */, 9025 /* ft */, 9025 /* fu */, 9275 /* fv */, 9025 /* fw */, 9025 /* fx */, 9275 /* fy */, 9025 /* fz */, 9025 /* f{ */, 9025 /* f| */, 9400 /* f} */, 9025 /* f~ */}, - {13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g! */, 13925 /* g" */, 13800 /* g# */, 13800 /* g$ */, 13800 /* g% */, 13675 /* g& */, 13925 /* g' */, 13800 /* g( */, 14475 /* g) */, 13925 /* g* */, 13800 /* g+ */, 13950 /* g, */, 13800 /* g- */, 13950 /* g. */, 14375 /* g/ */, 13800 /* g0 */, 13800 /* g1 */, 13800 /* g2 */, 13800 /* g3 */, 13800 /* g4 */, 13800 /* g5 */, 13800 /* g6 */, 13800 /* g7 */, 13800 /* g8 */, 13800 /* g9 */, 13800 /* g: */, 13800 /* g; */, 13800 /* g< */, 13800 /* g= */, 13800 /* g> */, 13800 /* g? */, 13800 /* g@ */, 14425 /* gA */, 13800 /* gB */, 13800 /* gC */, 13800 /* gD */, 13800 /* gE */, 13800 /* gF */, 13800 /* gG */, 13800 /* gH */, 13800 /* gI */, 13575 /* gJ */, 13800 /* gK */, 13800 /* gL */, 13800 /* gM */, 13800 /* gN */, 13800 /* gO */, 13800 /* gP */, 13800 /* gQ */, 13800 /* gR */, 13800 /* gS */, 13575 /* gT */, 13800 /* gU */, 13625 /* gV */, 13800 /* gW */, 14075 /* gX */, 13600 /* gY */, 14050 /* gZ */, 13800 /* g[ */, 13800 /* g\ */, 14100 /* g] */, 13800 /* g^ */, 15200 /* g_ */, 13800 /* g` */, 13750 /* ga */, 13800 /* gb */, 13650 /* gc */, 13650 /* gd */, 13650 /* ge */, 13800 /* gf */, 13975 /* gg */, 13800 /* gh */, 13800 /* gi */, 14500 /* gj */, 13800 /* gk */, 13800 /* gl */, 13800 /* gm */, 13800 /* gn */, 13650 /* go */, 13800 /* gp */, 13650 /* gq */, 13800 /* gr */, 13800 /* gs */, 13800 /* gt */, 13700 /* gu */, 13800 /* gv */, 13800 /* gw */, 13800 /* gx */, 13925 /* gy */, 13800 /* gz */, 13800 /* g{ */, 13800 /* g| */, 14100 /* g} */, 13800 /* g~ */}, - {14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h! */, 14525 /* h" */, 14900 /* h# */, 14900 /* h$ */, 14900 /* h% */, 14900 /* h& */, 14525 /* h' */, 14900 /* h( */, 14900 /* h) */, 14525 /* h* */, 14900 /* h+ */, 14900 /* h, */, 14900 /* h- */, 14900 /* h. */, 14900 /* h/ */, 14900 /* h0 */, 14900 /* h1 */, 14900 /* h2 */, 14900 /* h3 */, 14900 /* h4 */, 14900 /* h5 */, 14900 /* h6 */, 14900 /* h7 */, 14900 /* h8 */, 14900 /* h9 */, 14900 /* h: */, 14900 /* h; */, 14900 /* h< */, 14900 /* h= */, 14900 /* h> */, 14900 /* h? */, 14900 /* h@ */, 14900 /* hA */, 14900 /* hB */, 14900 /* hC */, 14900 /* hD */, 14900 /* hE */, 14900 /* hF */, 14900 /* hG */, 14900 /* hH */, 14900 /* hI */, 14900 /* hJ */, 14900 /* hK */, 14900 /* hL */, 14900 /* hM */, 14900 /* hN */, 14900 /* hO */, 14900 /* hP */, 14900 /* hQ */, 14900 /* hR */, 14900 /* hS */, 13525 /* hT */, 14900 /* hU */, 14025 /* hV */, 14275 /* hW */, 14900 /* hX */, 13525 /* hY */, 14900 /* hZ */, 14900 /* h[ */, 14900 /* h\ */, 14900 /* h] */, 14900 /* h^ */, 14900 /* h_ */, 14900 /* h` */, 14900 /* ha */, 14900 /* hb */, 14900 /* hc */, 14900 /* hd */, 14900 /* he */, 14900 /* hf */, 14900 /* hg */, 14900 /* hh */, 14900 /* hi */, 14900 /* hj */, 14900 /* hk */, 14900 /* hl */, 14900 /* hm */, 14900 /* hn */, 14900 /* ho */, 14900 /* hp */, 14900 /* hq */, 14900 /* hr */, 14900 /* hs */, 14900 /* ht */, 14900 /* hu */, 14700 /* hv */, 14750 /* hw */, 14900 /* hx */, 14700 /* hy */, 14900 /* hz */, 14900 /* h{ */, 14900 /* h| */, 14900 /* h} */, 14900 /* h~ */}, - {7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i! */, 7150 /* i" */, 7150 /* i# */, 7150 /* i$ */, 7150 /* i% */, 7150 /* i& */, 7150 /* i' */, 7150 /* i( */, 7150 /* i) */, 7150 /* i* */, 7150 /* i+ */, 7150 /* i, */, 7150 /* i- */, 7150 /* i. */, 7150 /* i/ */, 7150 /* i0 */, 7150 /* i1 */, 7150 /* i2 */, 7150 /* i3 */, 7150 /* i4 */, 7150 /* i5 */, 7150 /* i6 */, 7150 /* i7 */, 7150 /* i8 */, 7150 /* i9 */, 7150 /* i: */, 7150 /* i; */, 7150 /* i< */, 7150 /* i= */, 7150 /* i> */, 7150 /* i? */, 7150 /* i@ */, 7150 /* iA */, 7150 /* iB */, 7150 /* iC */, 7150 /* iD */, 7150 /* iE */, 7150 /* iF */, 7150 /* iG */, 7150 /* iH */, 7150 /* iI */, 6900 /* iJ */, 7150 /* iK */, 7150 /* iL */, 7150 /* iM */, 7150 /* iN */, 7150 /* iO */, 7150 /* iP */, 7150 /* iQ */, 7150 /* iR */, 7150 /* iS */, 7150 /* iT */, 7150 /* iU */, 7150 /* iV */, 7150 /* iW */, 7150 /* iX */, 6900 /* iY */, 7150 /* iZ */, 7150 /* i[ */, 7150 /* i\ */, 7150 /* i] */, 7150 /* i^ */, 7150 /* i_ */, 7150 /* i` */, 7150 /* ia */, 7150 /* ib */, 7150 /* ic */, 7150 /* id */, 7150 /* ie */, 7150 /* if */, 7150 /* ig */, 7150 /* ih */, 7150 /* ii */, 7150 /* ij */, 7150 /* ik */, 7150 /* il */, 7150 /* im */, 7150 /* in */, 7150 /* io */, 7150 /* ip */, 7150 /* iq */, 7150 /* ir */, 7150 /* is */, 7150 /* it */, 7150 /* iu */, 7150 /* iv */, 7150 /* iw */, 7150 /* ix */, 7150 /* iy */, 7150 /* iz */, 7150 /* i{ */, 7150 /* i| */, 7150 /* i} */, 7150 /* i~ */}, - {7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j! */, 7150 /* j" */, 7150 /* j# */, 7150 /* j$ */, 7150 /* j% */, 7150 /* j& */, 7150 /* j' */, 7150 /* j( */, 7150 /* j) */, 7150 /* j* */, 7150 /* j+ */, 7150 /* j, */, 7150 /* j- */, 7150 /* j. */, 7150 /* j/ */, 7150 /* j0 */, 7150 /* j1 */, 7150 /* j2 */, 7150 /* j3 */, 7150 /* j4 */, 7150 /* j5 */, 7150 /* j6 */, 7150 /* j7 */, 7150 /* j8 */, 7150 /* j9 */, 7150 /* j: */, 7150 /* j; */, 7150 /* j< */, 7150 /* j= */, 7150 /* j> */, 7150 /* j? */, 7150 /* j@ */, 7150 /* jA */, 7150 /* jB */, 7150 /* jC */, 7150 /* jD */, 7150 /* jE */, 7150 /* jF */, 7150 /* jG */, 7150 /* jH */, 7150 /* jI */, 7150 /* jJ */, 7150 /* jK */, 7150 /* jL */, 7150 /* jM */, 7150 /* jN */, 7150 /* jO */, 7150 /* jP */, 7150 /* jQ */, 7150 /* jR */, 7150 /* jS */, 6150 /* jT */, 7150 /* jU */, 6525 /* jV */, 6900 /* jW */, 7150 /* jX */, 7150 /* jY */, 7150 /* jZ */, 7150 /* j[ */, 7150 /* j\ */, 7150 /* j] */, 7150 /* j^ */, 7150 /* j_ */, 7150 /* j` */, 7150 /* ja */, 7150 /* jb */, 7150 /* jc */, 7150 /* jd */, 7150 /* je */, 7150 /* jf */, 7150 /* jg */, 7150 /* jh */, 7150 /* ji */, 7150 /* jj */, 7150 /* jk */, 7150 /* jl */, 7150 /* jm */, 7150 /* jn */, 7150 /* jo */, 7150 /* jp */, 7150 /* jq */, 7150 /* jr */, 7150 /* js */, 7150 /* jt */, 7150 /* ju */, 7150 /* jv */, 7150 /* jw */, 7150 /* jx */, 7150 /* jy */, 7150 /* jz */, 7150 /* j{ */, 7150 /* j| */, 7150 /* j} */, 7150 /* j~ */}, - {14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k! */, 14425 /* k" */, 14425 /* k# */, 14425 /* k$ */, 14425 /* k% */, 14175 /* k& */, 14425 /* k' */, 14425 /* k( */, 14425 /* k) */, 14425 /* k* */, 14425 /* k+ */, 14425 /* k, */, 13550 /* k- */, 14425 /* k. */, 14425 /* k/ */, 14425 /* k0 */, 14425 /* k1 */, 14425 /* k2 */, 14425 /* k3 */, 14425 /* k4 */, 14425 /* k5 */, 14425 /* k6 */, 14425 /* k7 */, 14425 /* k8 */, 14425 /* k9 */, 14425 /* k: */, 14425 /* k; */, 14425 /* k< */, 14425 /* k= */, 14425 /* k> */, 14425 /* k? */, 14175 /* k@ */, 14800 /* kA */, 14425 /* kB */, 14100 /* kC */, 14425 /* kD */, 14425 /* kE */, 14425 /* kF */, 14100 /* kG */, 14425 /* kH */, 14425 /* kI */, 14300 /* kJ */, 14425 /* kK */, 14425 /* kL */, 14425 /* kM */, 14425 /* kN */, 14100 /* kO */, 14425 /* kP */, 14100 /* kQ */, 14425 /* kR */, 14250 /* kS */, 13625 /* kT */, 14425 /* kU */, 13975 /* kV */, 14250 /* kW */, 14425 /* kX */, 13275 /* kY */, 14825 /* kZ */, 14425 /* k[ */, 14425 /* k\ */, 14425 /* k] */, 14425 /* k^ */, 14925 /* k_ */, 14425 /* k` */, 14425 /* ka */, 14425 /* kb */, 13750 /* kc */, 13925 /* kd */, 13750 /* ke */, 14425 /* kf */, 14425 /* kg */, 14425 /* kh */, 14425 /* ki */, 14425 /* kj */, 14425 /* kk */, 14425 /* kl */, 14425 /* km */, 14425 /* kn */, 13750 /* ko */, 14425 /* kp */, 13925 /* kq */, 14425 /* kr */, 14175 /* ks */, 14175 /* kt */, 14175 /* ku */, 14125 /* kv */, 14300 /* kw */, 14425 /* kx */, 14300 /* ky */, 14425 /* kz */, 14425 /* k{ */, 14425 /* k| */, 14425 /* k} */, 14425 /* k~ */}, - {7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l! */, 7575 /* l" */, 7575 /* l# */, 7575 /* l$ */, 7575 /* l% */, 7575 /* l& */, 7575 /* l' */, 7575 /* l( */, 7575 /* l) */, 7575 /* l* */, 7575 /* l+ */, 7825 /* l, */, 7325 /* l- */, 7825 /* l. */, 7575 /* l/ */, 7575 /* l0 */, 7575 /* l1 */, 7575 /* l2 */, 7575 /* l3 */, 7575 /* l4 */, 7575 /* l5 */, 7575 /* l6 */, 7575 /* l7 */, 7575 /* l8 */, 7575 /* l9 */, 7575 /* l: */, 7575 /* l; */, 7575 /* l< */, 7575 /* l= */, 7575 /* l> */, 7575 /* l? */, 7575 /* l@ */, 8175 /* lA */, 7575 /* lB */, 7575 /* lC */, 7575 /* lD */, 7575 /* lE */, 7575 /* lF */, 7575 /* lG */, 7575 /* lH */, 7575 /* lI */, 7825 /* lJ */, 7575 /* lK */, 7575 /* lL */, 7575 /* lM */, 7575 /* lN */, 7575 /* lO */, 7575 /* lP */, 7575 /* lQ */, 7575 /* lR */, 7575 /* lS */, 7250 /* lT */, 7575 /* lU */, 7450 /* lV */, 7575 /* lW */, 7950 /* lX */, 7200 /* lY */, 7950 /* lZ */, 7575 /* l[ */, 7575 /* l\ */, 7575 /* l] */, 7575 /* l^ */, 8200 /* l_ */, 7575 /* l` */, 7575 /* la */, 7575 /* lb */, 7575 /* lc */, 7575 /* ld */, 7575 /* le */, 7575 /* lf */, 7575 /* lg */, 7575 /* lh */, 7575 /* li */, 7575 /* lj */, 7575 /* lk */, 7375 /* ll */, 7575 /* lm */, 7575 /* ln */, 7575 /* lo */, 7575 /* lp */, 7575 /* lq */, 7575 /* lr */, 7575 /* ls */, 7450 /* lt */, 7575 /* lu */, 7425 /* lv */, 7475 /* lw */, 7575 /* lx */, 7425 /* ly */, 7725 /* lz */, 7575 /* l{ */, 7575 /* l| */, 7575 /* l} */, 7575 /* l~ */}, - {22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m! */, 21975 /* m" */, 22350 /* m# */, 22350 /* m$ */, 22350 /* m% */, 22350 /* m& */, 21975 /* m' */, 22350 /* m( */, 22350 /* m) */, 21975 /* m* */, 22350 /* m+ */, 22350 /* m, */, 22350 /* m- */, 22350 /* m. */, 22350 /* m/ */, 22350 /* m0 */, 22350 /* m1 */, 22350 /* m2 */, 22350 /* m3 */, 22350 /* m4 */, 22350 /* m5 */, 22350 /* m6 */, 22350 /* m7 */, 22350 /* m8 */, 22350 /* m9 */, 22350 /* m: */, 22350 /* m; */, 22350 /* m< */, 22350 /* m= */, 22350 /* m> */, 22350 /* m? */, 22350 /* m@ */, 22350 /* mA */, 22350 /* mB */, 22350 /* mC */, 22350 /* mD */, 22350 /* mE */, 22350 /* mF */, 22350 /* mG */, 22350 /* mH */, 22350 /* mI */, 22350 /* mJ */, 22350 /* mK */, 22350 /* mL */, 22350 /* mM */, 22350 /* mN */, 22350 /* mO */, 22350 /* mP */, 22350 /* mQ */, 22350 /* mR */, 22350 /* mS */, 20975 /* mT */, 22350 /* mU */, 21475 /* mV */, 21725 /* mW */, 22350 /* mX */, 20975 /* mY */, 22350 /* mZ */, 22350 /* m[ */, 22350 /* m\ */, 22350 /* m] */, 22350 /* m^ */, 22350 /* m_ */, 22350 /* m` */, 22350 /* ma */, 22350 /* mb */, 22350 /* mc */, 22350 /* md */, 22350 /* me */, 22350 /* mf */, 22350 /* mg */, 22350 /* mh */, 22350 /* mi */, 22350 /* mj */, 22350 /* mk */, 22350 /* ml */, 22350 /* mm */, 22350 /* mn */, 22350 /* mo */, 22350 /* mp */, 22350 /* mq */, 22350 /* mr */, 22350 /* ms */, 22350 /* mt */, 22350 /* mu */, 22150 /* mv */, 22200 /* mw */, 22350 /* mx */, 22150 /* my */, 22350 /* mz */, 22350 /* m{ */, 22350 /* m| */, 22350 /* m} */, 22350 /* m~ */}, - {14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n! */, 14525 /* n" */, 14900 /* n# */, 14900 /* n$ */, 14900 /* n% */, 14900 /* n& */, 14525 /* n' */, 14900 /* n( */, 14900 /* n) */, 14525 /* n* */, 14900 /* n+ */, 14900 /* n, */, 14900 /* n- */, 14900 /* n. */, 14900 /* n/ */, 14900 /* n0 */, 14900 /* n1 */, 14900 /* n2 */, 14900 /* n3 */, 14900 /* n4 */, 14900 /* n5 */, 14900 /* n6 */, 14900 /* n7 */, 14900 /* n8 */, 14900 /* n9 */, 14900 /* n: */, 14900 /* n; */, 14900 /* n< */, 14900 /* n= */, 14900 /* n> */, 14900 /* n? */, 14900 /* n@ */, 14900 /* nA */, 14900 /* nB */, 14900 /* nC */, 14900 /* nD */, 14900 /* nE */, 14900 /* nF */, 14900 /* nG */, 14900 /* nH */, 14900 /* nI */, 14900 /* nJ */, 14900 /* nK */, 14900 /* nL */, 14900 /* nM */, 14900 /* nN */, 14900 /* nO */, 14900 /* nP */, 14900 /* nQ */, 14900 /* nR */, 14900 /* nS */, 13525 /* nT */, 14900 /* nU */, 14025 /* nV */, 14275 /* nW */, 14900 /* nX */, 13525 /* nY */, 14900 /* nZ */, 14900 /* n[ */, 14900 /* n\ */, 14900 /* n] */, 14900 /* n^ */, 14900 /* n_ */, 14900 /* n` */, 14900 /* na */, 14900 /* nb */, 14900 /* nc */, 14900 /* nd */, 14900 /* ne */, 14900 /* nf */, 14900 /* ng */, 14900 /* nh */, 14900 /* ni */, 14900 /* nj */, 14900 /* nk */, 14900 /* nl */, 14900 /* nm */, 14900 /* nn */, 14900 /* no */, 14900 /* np */, 14900 /* nq */, 14900 /* nr */, 14900 /* ns */, 14900 /* nt */, 14900 /* nu */, 14700 /* nv */, 14750 /* nw */, 14900 /* nx */, 14700 /* ny */, 14900 /* nz */, 14900 /* n{ */, 14900 /* n| */, 14900 /* n} */, 14900 /* n~ */}, - {14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o! */, 13600 /* o" */, 14100 /* o# */, 14100 /* o$ */, 14100 /* o% */, 14100 /* o& */, 13600 /* o' */, 14100 /* o( */, 14100 /* o) */, 13600 /* o* */, 14100 /* o+ */, 13725 /* o, */, 14350 /* o- */, 13725 /* o. */, 14100 /* o/ */, 14100 /* o0 */, 14100 /* o1 */, 14100 /* o2 */, 14100 /* o3 */, 14100 /* o4 */, 14100 /* o5 */, 14100 /* o6 */, 14100 /* o7 */, 14100 /* o8 */, 14100 /* o9 */, 14100 /* o: */, 14100 /* o; */, 14100 /* o< */, 14100 /* o= */, 14100 /* o> */, 14100 /* o? */, 14100 /* o@ */, 14100 /* oA */, 14100 /* oB */, 14100 /* oC */, 14100 /* oD */, 14100 /* oE */, 14100 /* oF */, 14100 /* oG */, 14100 /* oH */, 13975 /* oI */, 14100 /* oJ */, 14100 /* oK */, 14100 /* oL */, 14100 /* oM */, 14100 /* oN */, 14100 /* oO */, 14100 /* oP */, 14100 /* oQ */, 14100 /* oR */, 14100 /* oS */, 12475 /* oT */, 14100 /* oU */, 13225 /* oV */, 13475 /* oW */, 13600 /* oX */, 12600 /* oY */, 13975 /* oZ */, 14100 /* o[ */, 14100 /* o\ */, 14100 /* o] */, 14100 /* o^ */, 12600 /* o_ */, 14100 /* o` */, 14100 /* oa */, 14100 /* ob */, 14100 /* oc */, 14100 /* od */, 14100 /* oe */, 13950 /* of */, 14100 /* og */, 14100 /* oh */, 14100 /* oi */, 14100 /* oj */, 14100 /* ok */, 14100 /* ol */, 14100 /* om */, 14100 /* on */, 14100 /* oo */, 14100 /* op */, 14100 /* oq */, 14100 /* or */, 14100 /* os */, 13950 /* ot */, 14100 /* ou */, 13900 /* ov */, 13850 /* ow */, 13700 /* ox */, 13850 /* oy */, 13900 /* oz */, 14100 /* o{ */, 14100 /* o| */, 14100 /* o} */, 14100 /* o~ */}, - {15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p! */, 14700 /* p" */, 15200 /* p# */, 15200 /* p$ */, 15200 /* p% */, 15200 /* p& */, 14700 /* p' */, 15200 /* p( */, 15200 /* p) */, 14700 /* p* */, 15200 /* p+ */, 14825 /* p, */, 15450 /* p- */, 14825 /* p. */, 15200 /* p/ */, 15200 /* p0 */, 15200 /* p1 */, 15200 /* p2 */, 15200 /* p3 */, 15200 /* p4 */, 15200 /* p5 */, 15200 /* p6 */, 15200 /* p7 */, 15200 /* p8 */, 15200 /* p9 */, 15200 /* p: */, 15200 /* p; */, 15200 /* p< */, 15200 /* p= */, 15200 /* p> */, 15200 /* p? */, 15200 /* p@ */, 15200 /* pA */, 15200 /* pB */, 15200 /* pC */, 15200 /* pD */, 15200 /* pE */, 15200 /* pF */, 15200 /* pG */, 15200 /* pH */, 15075 /* pI */, 15200 /* pJ */, 15200 /* pK */, 15200 /* pL */, 15200 /* pM */, 15200 /* pN */, 15200 /* pO */, 15200 /* pP */, 15200 /* pQ */, 15200 /* pR */, 15200 /* pS */, 13575 /* pT */, 15200 /* pU */, 14325 /* pV */, 14575 /* pW */, 14700 /* pX */, 13700 /* pY */, 15075 /* pZ */, 15200 /* p[ */, 15200 /* p\ */, 15200 /* p] */, 15200 /* p^ */, 13950 /* p_ */, 15200 /* p` */, 15200 /* pa */, 15200 /* pb */, 15200 /* pc */, 15200 /* pd */, 15200 /* pe */, 15050 /* pf */, 15200 /* pg */, 15200 /* ph */, 15200 /* pi */, 15200 /* pj */, 15200 /* pk */, 15200 /* pl */, 15200 /* pm */, 15200 /* pn */, 15200 /* po */, 15200 /* pp */, 15200 /* pq */, 15200 /* pr */, 15200 /* ps */, 15050 /* pt */, 15200 /* pu */, 15000 /* pv */, 14950 /* pw */, 14800 /* px */, 14950 /* py */, 15000 /* pz */, 15200 /* p{ */, 15200 /* p| */, 15200 /* p} */, 15200 /* p~ */}, - {15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q! */, 15200 /* q" */, 15200 /* q# */, 15200 /* q$ */, 15200 /* q% */, 15200 /* q& */, 15200 /* q' */, 15200 /* q( */, 15200 /* q) */, 15200 /* q* */, 15200 /* q+ */, 15200 /* q, */, 15200 /* q- */, 15200 /* q. */, 15200 /* q/ */, 15200 /* q0 */, 15200 /* q1 */, 15200 /* q2 */, 15200 /* q3 */, 15200 /* q4 */, 15200 /* q5 */, 15200 /* q6 */, 15200 /* q7 */, 15200 /* q8 */, 15200 /* q9 */, 15200 /* q: */, 15200 /* q; */, 15200 /* q< */, 15200 /* q= */, 15200 /* q> */, 15200 /* q? */, 15200 /* q@ */, 15200 /* qA */, 15200 /* qB */, 15200 /* qC */, 15200 /* qD */, 15200 /* qE */, 15200 /* qF */, 15200 /* qG */, 15200 /* qH */, 15200 /* qI */, 15200 /* qJ */, 15200 /* qK */, 15200 /* qL */, 15200 /* qM */, 15200 /* qN */, 15200 /* qO */, 15200 /* qP */, 15200 /* qQ */, 15200 /* qR */, 15200 /* qS */, 14200 /* qT */, 15200 /* qU */, 14575 /* qV */, 14950 /* qW */, 15200 /* qX */, 14450 /* qY */, 15200 /* qZ */, 15200 /* q[ */, 15200 /* q\ */, 15200 /* q] */, 15200 /* q^ */, 15200 /* q_ */, 15200 /* q` */, 15200 /* qa */, 15200 /* qb */, 15200 /* qc */, 15200 /* qd */, 15200 /* qe */, 15200 /* qf */, 15200 /* qg */, 15200 /* qh */, 15200 /* qi */, 15200 /* qj */, 15200 /* qk */, 15200 /* ql */, 15200 /* qm */, 15200 /* qn */, 15200 /* qo */, 15200 /* qp */, 15200 /* qq */, 15200 /* qr */, 15200 /* qs */, 15200 /* qt */, 15200 /* qu */, 15200 /* qv */, 15200 /* qw */, 15200 /* qx */, 15200 /* qy */, 15200 /* qz */, 15200 /* q{ */, 15200 /* q| */, 15200 /* q} */, 15200 /* q~ */}, - {10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r! */, 10850 /* r" */, 10100 /* r# */, 10100 /* r$ */, 10100 /* r% */, 9600 /* r& */, 10850 /* r' */, 10100 /* r( */, 10100 /* r) */, 10850 /* r* */, 10100 /* r+ */, 8100 /* r, */, 10100 /* r- */, 8100 /* r. */, 9550 /* r/ */, 10100 /* r0 */, 10100 /* r1 */, 10100 /* r2 */, 10100 /* r3 */, 10100 /* r4 */, 10100 /* r5 */, 10100 /* r6 */, 10100 /* r7 */, 10100 /* r8 */, 10100 /* r9 */, 10100 /* r: */, 10100 /* r; */, 10100 /* r< */, 10100 /* r= */, 10100 /* r> */, 10100 /* r? */, 10100 /* r@ */, 9100 /* rA */, 10100 /* rB */, 10350 /* rC */, 10100 /* rD */, 10100 /* rE */, 10100 /* rF */, 10350 /* rG */, 10100 /* rH */, 10100 /* rI */, 9500 /* rJ */, 10100 /* rK */, 10100 /* rL */, 10100 /* rM */, 10100 /* rN */, 10350 /* rO */, 10100 /* rP */, 10350 /* rQ */, 10100 /* rR */, 10275 /* rS */, 9850 /* rT */, 10100 /* rU */, 10100 /* rV */, 10100 /* rW */, 9600 /* rX */, 9600 /* rY */, 9825 /* rZ */, 10100 /* r[ */, 10100 /* r\ */, 10100 /* r] */, 10100 /* r^ */, 8100 /* r_ */, 10100 /* r` */, 9750 /* ra */, 10100 /* rb */, 9975 /* rc */, 10000 /* rd */, 9975 /* re */, 10325 /* rf */, 9750 /* rg */, 10100 /* rh */, 10100 /* ri */, 10100 /* rj */, 10100 /* rk */, 10100 /* rl */, 10100 /* rm */, 10100 /* rn */, 9975 /* ro */, 10100 /* rp */, 10000 /* rq */, 10100 /* rr */, 10100 /* rs */, 10200 /* rt */, 10100 /* ru */, 10275 /* rv */, 10225 /* rw */, 10100 /* rx */, 10275 /* ry */, 10100 /* rz */, 10100 /* r{ */, 10100 /* r| */, 10100 /* r} */, 10100 /* r~ */}, - {12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s! */, 12350 /* s" */, 12600 /* s# */, 12600 /* s$ */, 12600 /* s% */, 12600 /* s& */, 12350 /* s' */, 12600 /* s( */, 12600 /* s) */, 12350 /* s* */, 12600 /* s+ */, 12400 /* s, */, 12600 /* s- */, 12400 /* s. */, 12600 /* s/ */, 12600 /* s0 */, 12600 /* s1 */, 12600 /* s2 */, 12600 /* s3 */, 12600 /* s4 */, 12600 /* s5 */, 12600 /* s6 */, 12600 /* s7 */, 12600 /* s8 */, 12600 /* s9 */, 12600 /* s: */, 12600 /* s; */, 12600 /* s< */, 12600 /* s= */, 12600 /* s> */, 12600 /* s? */, 12600 /* s@ */, 12650 /* sA */, 12600 /* sB */, 12600 /* sC */, 12600 /* sD */, 12600 /* sE */, 12600 /* sF */, 12600 /* sG */, 12600 /* sH */, 12600 /* sI */, 12550 /* sJ */, 12600 /* sK */, 12600 /* sL */, 12600 /* sM */, 12600 /* sN */, 12600 /* sO */, 12600 /* sP */, 12600 /* sQ */, 12600 /* sR */, 12600 /* sS */, 10975 /* sT */, 12600 /* sU */, 11775 /* sV */, 12600 /* sW */, 12025 /* sX */, 11250 /* sY */, 12600 /* sZ */, 12600 /* s[ */, 12600 /* s\ */, 12600 /* s] */, 12600 /* s^ */, 11350 /* s_ */, 12600 /* s` */, 12600 /* sa */, 12600 /* sb */, 12600 /* sc */, 12600 /* sd */, 12600 /* se */, 12525 /* sf */, 12600 /* sg */, 12600 /* sh */, 12600 /* si */, 12600 /* sj */, 12600 /* sk */, 12600 /* sl */, 12600 /* sm */, 12600 /* sn */, 12600 /* so */, 12600 /* sp */, 12600 /* sq */, 12600 /* sr */, 12425 /* ss */, 12400 /* st */, 12600 /* su */, 12300 /* sv */, 12350 /* sw */, 12175 /* sx */, 12375 /* sy */, 12350 /* sz */, 12600 /* s{ */, 12600 /* s| */, 12600 /* s} */, 12600 /* s~ */}, - {9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t! */, 9575 /* t" */, 9575 /* t# */, 9575 /* t$ */, 9575 /* t% */, 9325 /* t& */, 9575 /* t' */, 9575 /* t( */, 9575 /* t) */, 9575 /* t* */, 9575 /* t+ */, 9575 /* t, */, 9325 /* t- */, 9575 /* t. */, 9575 /* t/ */, 9575 /* t0 */, 9575 /* t1 */, 9575 /* t2 */, 9575 /* t3 */, 9575 /* t4 */, 9575 /* t5 */, 9575 /* t6 */, 9575 /* t7 */, 9575 /* t8 */, 9575 /* t9 */, 9575 /* t: */, 9575 /* t; */, 9575 /* t< */, 9575 /* t= */, 9575 /* t> */, 9575 /* t? */, 9575 /* t@ */, 9725 /* tA */, 9575 /* tB */, 9575 /* tC */, 9575 /* tD */, 9575 /* tE */, 9575 /* tF */, 9575 /* tG */, 9575 /* tH */, 9575 /* tI */, 9575 /* tJ */, 9575 /* tK */, 9575 /* tL */, 9575 /* tM */, 9575 /* tN */, 9575 /* tO */, 9575 /* tP */, 9575 /* tQ */, 9575 /* tR */, 9575 /* tS */, 8975 /* tT */, 9575 /* tU */, 9575 /* tV */, 9575 /* tW */, 9575 /* tX */, 9075 /* tY */, 9575 /* tZ */, 9575 /* t[ */, 9575 /* t\ */, 9575 /* t] */, 9575 /* t^ */, 10075 /* t_ */, 9575 /* t` */, 9575 /* ta */, 9575 /* tb */, 9475 /* tc */, 9475 /* td */, 9475 /* te */, 9575 /* tf */, 9575 /* tg */, 9575 /* th */, 9575 /* ti */, 9575 /* tj */, 9575 /* tk */, 9575 /* tl */, 9575 /* tm */, 9575 /* tn */, 9475 /* to */, 9575 /* tp */, 9475 /* tq */, 9575 /* tr */, 9575 /* ts */, 9450 /* tt */, 9575 /* tu */, 9575 /* tv */, 9575 /* tw */, 9575 /* tx */, 9575 /* ty */, 9575 /* tz */, 9575 /* t{ */, 9575 /* t| */, 9575 /* t} */, 9575 /* t~ */}, - {14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u! */, 14900 /* u" */, 14900 /* u# */, 14900 /* u$ */, 14900 /* u% */, 14900 /* u& */, 14900 /* u' */, 14900 /* u( */, 14900 /* u) */, 14900 /* u* */, 14900 /* u+ */, 14900 /* u, */, 14900 /* u- */, 14900 /* u. */, 14900 /* u/ */, 14900 /* u0 */, 14900 /* u1 */, 14900 /* u2 */, 14900 /* u3 */, 14900 /* u4 */, 14900 /* u5 */, 14900 /* u6 */, 14900 /* u7 */, 14900 /* u8 */, 14900 /* u9 */, 14900 /* u: */, 14900 /* u; */, 14900 /* u< */, 14900 /* u= */, 14900 /* u> */, 14900 /* u? */, 14900 /* u@ */, 14900 /* uA */, 14900 /* uB */, 14900 /* uC */, 14900 /* uD */, 14900 /* uE */, 14900 /* uF */, 14900 /* uG */, 14900 /* uH */, 14900 /* uI */, 14900 /* uJ */, 14900 /* uK */, 14900 /* uL */, 14900 /* uM */, 14900 /* uN */, 14900 /* uO */, 14900 /* uP */, 14900 /* uQ */, 14900 /* uR */, 14900 /* uS */, 13775 /* uT */, 14900 /* uU */, 14275 /* uV */, 14650 /* uW */, 14900 /* uX */, 13650 /* uY */, 14900 /* uZ */, 14900 /* u[ */, 14900 /* u\ */, 14900 /* u] */, 14900 /* u^ */, 14900 /* u_ */, 14900 /* u` */, 14900 /* ua */, 14900 /* ub */, 14900 /* uc */, 14900 /* ud */, 14900 /* ue */, 14900 /* uf */, 14900 /* ug */, 14900 /* uh */, 14900 /* ui */, 14900 /* uj */, 14900 /* uk */, 14900 /* ul */, 14900 /* um */, 14900 /* un */, 14900 /* uo */, 14900 /* up */, 14900 /* uq */, 14900 /* ur */, 14900 /* us */, 14900 /* ut */, 14900 /* uu */, 14900 /* uv */, 14900 /* uw */, 14900 /* ux */, 14900 /* uy */, 14900 /* uz */, 14900 /* u{ */, 14900 /* u| */, 14900 /* u} */, 14900 /* u~ */}, - {13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v! */, 14075 /* v" */, 13450 /* v# */, 13450 /* v$ */, 13450 /* v% */, 12950 /* v& */, 14075 /* v' */, 13450 /* v( */, 13450 /* v) */, 14075 /* v* */, 13450 /* v+ */, 12250 /* v, */, 13325 /* v- */, 12250 /* v. */, 13075 /* v/ */, 13450 /* v0 */, 13450 /* v1 */, 13450 /* v2 */, 13450 /* v3 */, 13450 /* v4 */, 13450 /* v5 */, 13450 /* v6 */, 13450 /* v7 */, 13450 /* v8 */, 13450 /* v9 */, 13450 /* v: */, 13450 /* v; */, 13450 /* v< */, 13450 /* v= */, 13450 /* v> */, 13450 /* v? */, 13450 /* v@ */, 12700 /* vA */, 13450 /* vB */, 13450 /* vC */, 13450 /* vD */, 13450 /* vE */, 13450 /* vF */, 13450 /* vG */, 13450 /* vH */, 13325 /* vI */, 12700 /* vJ */, 13450 /* vK */, 13450 /* vL */, 13450 /* vM */, 13450 /* vN */, 13450 /* vO */, 13450 /* vP */, 13450 /* vQ */, 13450 /* vR */, 13450 /* vS */, 12825 /* vT */, 13450 /* vU */, 13200 /* vV */, 13450 /* vW */, 13450 /* vX */, 12575 /* vY */, 13450 /* vZ */, 13450 /* v[ */, 13450 /* v\ */, 13450 /* v] */, 13450 /* v^ */, 11700 /* v_ */, 13450 /* v` */, 13225 /* va */, 13450 /* vb */, 13250 /* vc */, 13250 /* vd */, 13250 /* ve */, 13600 /* vf */, 13125 /* vg */, 13450 /* vh */, 13450 /* vi */, 13450 /* vj */, 13450 /* vk */, 13450 /* vl */, 13450 /* vm */, 13450 /* vn */, 13250 /* vo */, 13450 /* vp */, 13250 /* vq */, 13450 /* vr */, 13400 /* vs */, 13450 /* vt */, 13450 /* vu */, 13450 /* vv */, 13450 /* vw */, 13450 /* vx */, 13450 /* vy */, 13350 /* vz */, 13450 /* v{ */, 13450 /* v| */, 13450 /* v} */, 13450 /* v~ */}, - {21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w! */, 21525 /* w" */, 21025 /* w# */, 21025 /* w$ */, 21025 /* w% */, 20525 /* w& */, 21525 /* w' */, 21025 /* w( */, 21025 /* w) */, 21525 /* w* */, 21025 /* w+ */, 20025 /* w, */, 20925 /* w- */, 20025 /* w. */, 21025 /* w/ */, 21025 /* w0 */, 21025 /* w1 */, 21025 /* w2 */, 21025 /* w3 */, 21025 /* w4 */, 21025 /* w5 */, 21025 /* w6 */, 21025 /* w7 */, 21025 /* w8 */, 21025 /* w9 */, 21025 /* w: */, 21025 /* w; */, 21025 /* w< */, 21025 /* w= */, 21025 /* w> */, 21025 /* w? */, 20775 /* w@ */, 20525 /* wA */, 21025 /* wB */, 21025 /* wC */, 21025 /* wD */, 21025 /* wE */, 21025 /* wF */, 21025 /* wG */, 21025 /* wH */, 21025 /* wI */, 20275 /* wJ */, 21025 /* wK */, 21025 /* wL */, 21025 /* wM */, 21025 /* wN */, 21025 /* wO */, 21025 /* wP */, 21025 /* wQ */, 21025 /* wR */, 21025 /* wS */, 20150 /* wT */, 21025 /* wU */, 20775 /* wV */, 20775 /* wW */, 21025 /* wX */, 20025 /* wY */, 21025 /* wZ */, 21025 /* w[ */, 21025 /* w\ */, 21025 /* w] */, 21025 /* w^ */, 19775 /* w_ */, 21025 /* w` */, 20675 /* wa */, 21025 /* wb */, 20775 /* wc */, 20775 /* wd */, 20775 /* we */, 21025 /* wf */, 20600 /* wg */, 21025 /* wh */, 21025 /* wi */, 21025 /* wj */, 21025 /* wk */, 21025 /* wl */, 21025 /* wm */, 21025 /* wn */, 20775 /* wo */, 21025 /* wp */, 20775 /* wq */, 21025 /* wr */, 20925 /* ws */, 21025 /* wt */, 21025 /* wu */, 21025 /* wv */, 21025 /* ww */, 21025 /* wx */, 21025 /* wy */, 21025 /* wz */, 21025 /* w{ */, 21025 /* w| */, 21025 /* w} */, 21025 /* w~ */}, - {14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x! */, 14000 /* x" */, 14000 /* x# */, 14000 /* x$ */, 14000 /* x% */, 13250 /* x& */, 14000 /* x' */, 14000 /* x( */, 14000 /* x) */, 14000 /* x* */, 14000 /* x+ */, 14000 /* x, */, 13125 /* x- */, 14000 /* x. */, 14000 /* x/ */, 14000 /* x0 */, 14000 /* x1 */, 14000 /* x2 */, 14000 /* x3 */, 14000 /* x4 */, 14000 /* x5 */, 14000 /* x6 */, 14000 /* x7 */, 14000 /* x8 */, 14000 /* x9 */, 14000 /* x: */, 14000 /* x; */, 14000 /* x< */, 14000 /* x= */, 14000 /* x> */, 14000 /* x? */, 13500 /* x@ */, 14000 /* xA */, 14000 /* xB */, 14000 /* xC */, 14000 /* xD */, 14000 /* xE */, 14000 /* xF */, 14000 /* xG */, 14000 /* xH */, 14000 /* xI */, 13625 /* xJ */, 14000 /* xK */, 14000 /* xL */, 14000 /* xM */, 14000 /* xN */, 14000 /* xO */, 14000 /* xP */, 14000 /* xQ */, 14000 /* xR */, 14000 /* xS */, 13125 /* xT */, 14000 /* xU */, 13750 /* xV */, 13750 /* xW */, 14000 /* xX */, 13000 /* xY */, 14000 /* xZ */, 14000 /* x[ */, 14000 /* x\ */, 14000 /* x] */, 14000 /* x^ */, 14500 /* x_ */, 14000 /* x` */, 14000 /* xa */, 14000 /* xb */, 13600 /* xc */, 13600 /* xd */, 13600 /* xe */, 14000 /* xf */, 14000 /* xg */, 14000 /* xh */, 14000 /* xi */, 14000 /* xj */, 14000 /* xk */, 14000 /* xl */, 14000 /* xm */, 14000 /* xn */, 13600 /* xo */, 14000 /* xp */, 13600 /* xq */, 14000 /* xr */, 13900 /* xs */, 14000 /* xt */, 14000 /* xu */, 14000 /* xv */, 14000 /* xw */, 14000 /* xx */, 14000 /* xy */, 14000 /* xz */, 14000 /* x{ */, 14000 /* x| */, 14000 /* x} */, 14000 /* x~ */}, - {13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y! */, 13850 /* y" */, 13350 /* y# */, 13350 /* y$ */, 13350 /* y% */, 12600 /* y& */, 13850 /* y' */, 13350 /* y( */, 13350 /* y) */, 13850 /* y* */, 13350 /* y+ */, 12225 /* y, */, 13225 /* y- */, 12225 /* y. */, 13225 /* y/ */, 13350 /* y0 */, 13350 /* y1 */, 13350 /* y2 */, 13350 /* y3 */, 13350 /* y4 */, 13350 /* y5 */, 13350 /* y6 */, 13350 /* y7 */, 13350 /* y8 */, 13350 /* y9 */, 13350 /* y: */, 13350 /* y; */, 13350 /* y< */, 13350 /* y= */, 13350 /* y> */, 13350 /* y? */, 13100 /* y@ */, 12975 /* yA */, 13350 /* yB */, 13350 /* yC */, 13350 /* yD */, 13350 /* yE */, 13350 /* yF */, 13350 /* yG */, 13350 /* yH */, 13225 /* yI */, 12600 /* yJ */, 13350 /* yK */, 13350 /* yL */, 13350 /* yM */, 13350 /* yN */, 13350 /* yO */, 13350 /* yP */, 13350 /* yQ */, 13350 /* yR */, 13350 /* yS */, 12600 /* yT */, 13350 /* yU */, 13100 /* yV */, 13350 /* yW */, 13350 /* yX */, 12475 /* yY */, 13350 /* yZ */, 13350 /* y[ */, 13350 /* y\ */, 13350 /* y] */, 13350 /* y^ */, 11350 /* y_ */, 13350 /* y` */, 13000 /* ya */, 13350 /* yb */, 13150 /* yc */, 13150 /* yd */, 13150 /* ye */, 13525 /* yf */, 12825 /* yg */, 13350 /* yh */, 13350 /* yi */, 13350 /* yj */, 13350 /* yk */, 13350 /* yl */, 13350 /* ym */, 13350 /* yn */, 13150 /* yo */, 13350 /* yp */, 13150 /* yq */, 13350 /* yr */, 13125 /* ys */, 13350 /* yt */, 13350 /* yu */, 13350 /* yv */, 13350 /* yw */, 13350 /* yx */, 13350 /* yy */, 13250 /* yz */, 13350 /* y{ */, 13350 /* y| */, 13350 /* y} */, 13350 /* y~ */}, - {12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z! */, 13075 /* z" */, 12950 /* z# */, 12950 /* z$ */, 12950 /* z% */, 12700 /* z& */, 13075 /* z' */, 12950 /* z( */, 12950 /* z) */, 13075 /* z* */, 12950 /* z+ */, 13200 /* z, */, 12450 /* z- */, 13200 /* z. */, 12950 /* z/ */, 12950 /* z0 */, 12950 /* z1 */, 12950 /* z2 */, 12950 /* z3 */, 12950 /* z4 */, 12950 /* z5 */, 12950 /* z6 */, 12950 /* z7 */, 12950 /* z8 */, 12950 /* z9 */, 12950 /* z: */, 12950 /* z; */, 12950 /* z< */, 12950 /* z= */, 12950 /* z> */, 12950 /* z? */, 12700 /* z@ */, 13200 /* zA */, 12950 /* zB */, 12950 /* zC */, 12950 /* zD */, 12950 /* zE */, 12950 /* zF */, 12950 /* zG */, 12950 /* zH */, 12950 /* zI */, 12950 /* zJ */, 12950 /* zK */, 12950 /* zL */, 12950 /* zM */, 12950 /* zN */, 12950 /* zO */, 12950 /* zP */, 12950 /* zQ */, 12950 /* zR */, 12950 /* zS */, 12075 /* zT */, 12950 /* zU */, 12700 /* zV */, 12825 /* zW */, 13200 /* zX */, 12200 /* zY */, 12950 /* zZ */, 12950 /* z[ */, 12950 /* z\ */, 12950 /* z] */, 12950 /* z^ */, 13075 /* z_ */, 12950 /* z` */, 12950 /* za */, 12950 /* zb */, 12650 /* zc */, 12700 /* zd */, 12650 /* ze */, 12950 /* zf */, 12950 /* zg */, 12950 /* zh */, 12950 /* zi */, 12950 /* zj */, 12950 /* zk */, 12950 /* zl */, 12950 /* zm */, 12950 /* zn */, 12650 /* zo */, 12950 /* zp */, 12700 /* zq */, 12950 /* zr */, 12950 /* zs */, 12950 /* zt */, 12950 /* zu */, 12950 /* zv */, 12950 /* zw */, 12950 /* zx */, 12950 /* zy */, 12950 /* zz */, 12950 /* z{ */, 12950 /* z| */, 12950 /* z} */, 12950 /* z~ */}, - {9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* {! */, 9300 /* {" */, 9300 /* {# */, 9300 /* {$ */, 9300 /* {% */, 9300 /* {& */, 9300 /* {' */, 9300 /* {( */, 9300 /* {) */, 9300 /* {* */, 9300 /* {+ */, 9300 /* {, */, 9300 /* {- */, 9300 /* {. */, 9300 /* {/ */, 9300 /* {0 */, 9300 /* {1 */, 9300 /* {2 */, 9300 /* {3 */, 9300 /* {4 */, 9300 /* {5 */, 9300 /* {6 */, 9300 /* {7 */, 9300 /* {8 */, 9300 /* {9 */, 9300 /* {: */, 9300 /* {; */, 9300 /* {< */, 9300 /* {= */, 9300 /* {> */, 9300 /* {? */, 9300 /* {@ */, 9300 /* {A */, 9300 /* {B */, 9300 /* {C */, 9300 /* {D */, 9300 /* {E */, 9300 /* {F */, 9300 /* {G */, 9300 /* {H */, 9300 /* {I */, 9300 /* {J */, 9300 /* {K */, 9300 /* {L */, 9300 /* {M */, 9300 /* {N */, 9300 /* {O */, 9300 /* {P */, 9300 /* {Q */, 9300 /* {R */, 9300 /* {S */, 9300 /* {T */, 9300 /* {U */, 9300 /* {V */, 9300 /* {W */, 9300 /* {X */, 9300 /* {Y */, 9300 /* {Z */, 9300 /* {[ */, 9300 /* {\ */, 9300 /* {] */, 9300 /* {^ */, 9300 /* {_ */, 9300 /* {` */, 9300 /* {a */, 9300 /* {b */, 9300 /* {c */, 9300 /* {d */, 9300 /* {e */, 9300 /* {f */, 9300 /* {g */, 9300 /* {h */, 9300 /* {i */, 9550 /* {j */, 9300 /* {k */, 9300 /* {l */, 9300 /* {m */, 9300 /* {n */, 9300 /* {o */, 9300 /* {p */, 9300 /* {q */, 9300 /* {r */, 9300 /* {s */, 9300 /* {t */, 9300 /* {u */, 9300 /* {v */, 9300 /* {w */, 9300 /* {x */, 9300 /* {y */, 9300 /* {z */, 9300 /* {{ */, 9300 /* {| */, 9300 /* {} */, 9300 /* {~ */}, - {10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* |! */, 10050 /* |" */, 10050 /* |# */, 10050 /* |$ */, 10050 /* |% */, 10050 /* |& */, 10050 /* |' */, 10050 /* |( */, 10050 /* |) */, 10050 /* |* */, 10050 /* |+ */, 10050 /* |, */, 10050 /* |- */, 10050 /* |. */, 10050 /* |/ */, 10050 /* |0 */, 10050 /* |1 */, 10050 /* |2 */, 10050 /* |3 */, 10050 /* |4 */, 10050 /* |5 */, 10050 /* |6 */, 10050 /* |7 */, 10050 /* |8 */, 10050 /* |9 */, 10050 /* |: */, 10050 /* |; */, 10050 /* |< */, 10050 /* |= */, 10050 /* |> */, 10050 /* |? */, 10050 /* |@ */, 10050 /* |A */, 10050 /* |B */, 10050 /* |C */, 10050 /* |D */, 10050 /* |E */, 10050 /* |F */, 10050 /* |G */, 10050 /* |H */, 10050 /* |I */, 10050 /* |J */, 10050 /* |K */, 10050 /* |L */, 10050 /* |M */, 10050 /* |N */, 10050 /* |O */, 10050 /* |P */, 10050 /* |Q */, 10050 /* |R */, 10050 /* |S */, 10050 /* |T */, 10050 /* |U */, 10050 /* |V */, 10050 /* |W */, 10050 /* |X */, 10050 /* |Y */, 10050 /* |Z */, 10050 /* |[ */, 10050 /* |\ */, 10050 /* |] */, 10050 /* |^ */, 10050 /* |_ */, 10050 /* |` */, 10050 /* |a */, 10050 /* |b */, 10050 /* |c */, 10050 /* |d */, 10050 /* |e */, 10050 /* |f */, 10050 /* |g */, 10050 /* |h */, 10050 /* |i */, 10050 /* |j */, 10050 /* |k */, 10050 /* |l */, 10050 /* |m */, 10050 /* |n */, 10050 /* |o */, 10050 /* |p */, 10050 /* |q */, 10050 /* |r */, 10050 /* |s */, 10050 /* |t */, 10050 /* |u */, 10050 /* |v */, 10050 /* |w */, 10050 /* |x */, 10050 /* |y */, 10050 /* |z */, 10050 /* |{ */, 10050 /* || */, 10050 /* |} */, 10050 /* |~ */}, - {9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* }! */, 9300 /* }" */, 9300 /* }# */, 9300 /* }$ */, 9300 /* }% */, 9300 /* }& */, 9300 /* }' */, 9300 /* }( */, 9300 /* }) */, 9300 /* }* */, 9300 /* }+ */, 9300 /* }, */, 9300 /* }- */, 9300 /* }. */, 9300 /* }/ */, 9300 /* }0 */, 9300 /* }1 */, 9300 /* }2 */, 9300 /* }3 */, 9300 /* }4 */, 9300 /* }5 */, 9300 /* }6 */, 9300 /* }7 */, 9300 /* }8 */, 9300 /* }9 */, 9300 /* }: */, 9300 /* }; */, 9300 /* }< */, 9300 /* }= */, 9300 /* }> */, 9300 /* }? */, 9300 /* }@ */, 9300 /* }A */, 9300 /* }B */, 9300 /* }C */, 9300 /* }D */, 9300 /* }E */, 9300 /* }F */, 9300 /* }G */, 9300 /* }H */, 9300 /* }I */, 9300 /* }J */, 9300 /* }K */, 9300 /* }L */, 9300 /* }M */, 9300 /* }N */, 9300 /* }O */, 9300 /* }P */, 9300 /* }Q */, 9300 /* }R */, 9300 /* }S */, 9300 /* }T */, 9300 /* }U */, 9300 /* }V */, 9300 /* }W */, 9300 /* }X */, 9300 /* }Y */, 9300 /* }Z */, 9300 /* }[ */, 9300 /* }\ */, 9300 /* }] */, 9300 /* }^ */, 9300 /* }_ */, 9300 /* }` */, 9300 /* }a */, 9300 /* }b */, 9300 /* }c */, 9300 /* }d */, 9300 /* }e */, 9300 /* }f */, 9300 /* }g */, 9300 /* }h */, 9300 /* }i */, 9300 /* }j */, 9300 /* }k */, 9300 /* }l */, 9300 /* }m */, 9300 /* }n */, 9300 /* }o */, 9300 /* }p */, 9300 /* }q */, 9300 /* }r */, 9300 /* }s */, 9300 /* }t */, 9300 /* }u */, 9300 /* }v */, 9300 /* }w */, 9300 /* }x */, 9300 /* }y */, 9300 /* }z */, 9300 /* }{ */, 9300 /* }| */, 9300 /* }} */, 9300 /* }~ */}, - {15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~! */, 15000 /* ~" */, 15000 /* ~# */, 15000 /* ~$ */, 15000 /* ~% */, 15000 /* ~& */, 15000 /* ~' */, 15000 /* ~( */, 15000 /* ~) */, 15000 /* ~* */, 15000 /* ~+ */, 15000 /* ~, */, 15000 /* ~- */, 15000 /* ~. */, 15000 /* ~/ */, 15000 /* ~0 */, 15000 /* ~1 */, 15000 /* ~2 */, 15000 /* ~3 */, 15000 /* ~4 */, 15000 /* ~5 */, 15000 /* ~6 */, 15000 /* ~7 */, 15000 /* ~8 */, 15000 /* ~9 */, 15000 /* ~: */, 15000 /* ~; */, 15000 /* ~< */, 15000 /* ~= */, 15000 /* ~> */, 15000 /* ~? */, 15000 /* ~@ */, 15000 /* ~A */, 15000 /* ~B */, 15000 /* ~C */, 15000 /* ~D */, 15000 /* ~E */, 15000 /* ~F */, 15000 /* ~G */, 15000 /* ~H */, 15000 /* ~I */, 15000 /* ~J */, 15000 /* ~K */, 15000 /* ~L */, 15000 /* ~M */, 15000 /* ~N */, 15000 /* ~O */, 15000 /* ~P */, 15000 /* ~Q */, 15000 /* ~R */, 15000 /* ~S */, 15000 /* ~T */, 15000 /* ~U */, 15000 /* ~V */, 15000 /* ~W */, 15000 /* ~X */, 15000 /* ~Y */, 15000 /* ~Z */, 15000 /* ~[ */, 15000 /* ~\ */, 15000 /* ~] */, 15000 /* ~^ */, 15000 /* ~_ */, 15000 /* ~` */, 15000 /* ~a */, 15000 /* ~b */, 15000 /* ~c */, 15000 /* ~d */, 15000 /* ~e */, 15000 /* ~f */, 15000 /* ~g */, 15000 /* ~h */, 15000 /* ~i */, 15000 /* ~j */, 15000 /* ~k */, 15000 /* ~l */, 15000 /* ~m */, 15000 /* ~n */, 15000 /* ~o */, 15000 /* ~p */, 15000 /* ~q */, 15000 /* ~r */, 15000 /* ~s */, 15000 /* ~t */, 15000 /* ~u */, 15000 /* ~v */, 15000 /* ~w */, 15000 /* ~x */, 15000 /* ~y */, 15000 /* ~z */, 15000 /* ~{ */, 15000 /* ~| */, 15000 /* ~} */, 15000 /* ~~ */}, -}; - -static const unsigned short int ibm_plex_sans_bold_250_em_size = 20475; - -static double ibm_plex_sans_bold_word_width(const char *s, double fontSize) { - unsigned long int totalWidth = 0; - - while(*s) { - if (IS_UTF8_STARTBYTE(*s)) { - s++; - - while(IS_UTF8_BYTE(*s) && !IS_UTF8_STARTBYTE(*s)) - s++; - - totalWidth += ibm_plex_sans_bold_250_em_size; - } - else { - if (*s >= 0 && *s <= 126) // Check if it's a valid ASCII character (including '\0') - totalWidth += ibm_plex_sans_bold_250[(unsigned char)*s][(unsigned char)s[1]]; - - s++; - } - } - - // Convert the width from the encoded value to the actual float value - double actualWidth = (double)totalWidth / 100.0; - - // Scale the width proportionally based on the desired font size - double scaledWidth = actualWidth * (fontSize / 250.0); - - return scaledWidth; -} - -/* - - - - - - - - - - - - - - I - - - TROUBLE - - - - */ - -static bool word_goes_below_baseline(const char *love) { - const char *s = love; - while(*s) { - switch(*s) { - case 'g': - case 'j': - case 'p': - case 'q': - case 'y': - case 'Q': - return true; - } - - s++; - } - - return false; -} - -static void generate_ilove_svg(BUFFER *wb, const char *love) { - const char *i = "I"; - const char *stretch = "spacing"; - - double font_size = 250.0; - double border_width = 25.0; - double logo_scale = 0.215; - double logo_width = 1000.0 * logo_scale; - double i_width = ibm_plex_sans_bold_word_width(i, font_size); - double first_line_width = i_width + logo_width; - double second_line_font_size = font_size; - double second_line_width = ibm_plex_sans_bold_word_width(love, second_line_font_size); - bool second_line_needs_height = word_goes_below_baseline(love); - - if(second_line_width <= first_line_width) { - second_line_width = first_line_width; - stretch = "spacingAndGlyphs"; - - if(!second_line_needs_height) - second_line_font_size *= 1.10; - } - else if(second_line_width > first_line_width * 4) { - second_line_width *= 0.80; - stretch = "spacingAndGlyphs"; - second_line_font_size *= 0.90; - } - else if(second_line_width > first_line_width * 2) { - second_line_width *= 0.93; - stretch = "spacing"; - } - - double width = second_line_width + border_width * 4.0; - - buffer_flush(wb); - - buffer_sprintf(wb, "\n", - width); - - // White bounding box with rounded corners - buffer_sprintf(wb, " \n", - width, border_width * 2, border_width * 2); - - // Black background - buffer_sprintf(wb, " \n", - border_width, border_width, width - border_width * 2, border_width * 1.5, border_width * 1.5); - - // Netdata logo - buffer_sprintf(wb, " \n", - (width - first_line_width) / 2 + i_width, border_width * 2, logo_scale); - - // first line - double first_line_baseline = font_size * 0.70 + border_width * 2; - buffer_sprintf(wb, " %s\n", - (width - first_line_width) / 2, first_line_baseline, font_size, i); - - // second line - double second_line_baseline = first_line_baseline + font_size * 0.85; - if(second_line_needs_height) - second_line_baseline = first_line_baseline + font_size * 0.78; - - buffer_sprintf(wb, " %s\n", - border_width * 2, second_line_baseline, second_line_font_size, second_line_width, stretch, love); - - buffer_sprintf(wb, ""); - - wb->content_type = CT_IMAGE_SVG_XML; -} - -int web_client_api_request_v2_ilove(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - char *love = "TROUBLE"; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "love")) love = value; - } - -// char *s = love; -// while(*s) { -// *s = toupper(*s); -// s++; -// } - - generate_ilove_svg(w->response.data, love); - - return HTTP_RESP_OK; -} diff --git a/src/web/api/ilove/ilove.h b/src/web/api/ilove/ilove.h deleted file mode 100644 index 010c19c6b..000000000 --- a/src/web/api/ilove/ilove.h +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_WEB_API_ILOVE_H -#define NETDATA_WEB_API_ILOVE_H 1 - -#include "libnetdata/libnetdata.h" -#include "web/server/web_client.h" - -int web_client_api_request_v2_ilove(RRDHOST *host, struct web_client *w, char *url); - -#include "web/api/web_api_v1.h" - -#endif /* NETDATA_WEB_API_ILOVE_H */ diff --git a/src/web/api/ilove/measure-text.js b/src/web/api/ilove/measure-text.js deleted file mode 100644 index e2a2a6e94..000000000 --- a/src/web/api/ilove/measure-text.js +++ /dev/null @@ -1,73 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -'use strict'; - -var path = require('path'); -var fs = require('fs'); -var PDFDocument = require('pdfkit'); -var doc = new PDFDocument({size:'A4', layout:'landscape'}); - -function loadFont(fontPaths, callback) { - for (let fontPath of fontPaths) { - try { - doc = doc.font(fontPath); - if (callback) { callback(null); } - return; // Exit once a font is loaded successfully - } catch(err) { - // Log error but continue to next font path - console.error(`Failed to load font from path: ${fontPath}. Error: ${err.message}`); - } - } - - // If we reached here, none of the fonts were loaded successfully. - console.error('All font paths failed. Stopping execution.'); - process.exit(1); // Exit with an error code -} - -loadFont(['IBMPlexSans-Bold.ttf'], function(err) { - if (err) { - console.error('Could not load any of the specified fonts.'); - } -}); - -doc = doc.fontSize(250); - -function measureCombination(charA, charB) { - return doc.widthOfString(charA + charB); -} - -function getCharRepresentation(charCode) { - return (charCode >= 32 && charCode <= 126) ? String.fromCharCode(charCode) : ''; -} - -function generateCombinationArray() { - let output = "static const unsigned short int ibm_plex_sans_bold_250[128][128] = {\n"; - - for (let i = 0; i <= 126; i++) { - output += " {"; // Start of inner array - for (let j = 0; j <= 126; j++) { - let charA = getCharRepresentation(i); - let charB = getCharRepresentation(j); - let width = measureCombination(charA, charB) - doc.widthOfString(charB); - let encodedWidth = Math.round(width * 100); // Multiply by 100 and round - - if(charA === '*' && charB == '/') - charB = '\\/'; - - if(charA === '/' && charB == '*') - charB = '\\*'; - - output += `${encodedWidth} /* ${charA}${charB} */`; - if (j < 126) { - output += ", "; - } - } - output += "},\n"; // End of inner array - } - output += "};\n"; // End of 2D array - - return output; -} - -console.log(generateCombinationArray()); -console.log('static const unsigned short int ibm_plex_sans_bold_250_em_size = ' + Math.round(doc.widthOfString('M') * 100) + ';'); diff --git a/src/web/api/maps/contexts_alert_statuses.c b/src/web/api/maps/contexts_alert_statuses.c new file mode 100644 index 000000000..d3565c9e8 --- /dev/null +++ b/src/web/api/maps/contexts_alert_statuses.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "contexts_alert_statuses.h" + +static struct { + const char *name; + uint32_t hash; + CONTEXTS_ALERT_STATUS value; +} contexts_alert_status[] = { + {"uninitialized" , 0 , CONTEXT_ALERT_UNINITIALIZED} + , {"undefined" , 0 , CONTEXT_ALERT_UNDEFINED} + , {"clear" , 0 , CONTEXT_ALERT_CLEAR} + , {"raised" , 0 , CONTEXT_ALERT_RAISED} + , {"active" , 0 , CONTEXT_ALERT_RAISED} + , {"warning" , 0 , CONTEXT_ALERT_WARNING} + , {"critical" , 0 , CONTEXT_ALERT_CRITICAL} + , {NULL , 0 , 0} +}; + +CONTEXTS_ALERT_STATUS contexts_alert_status_str_to_id(char *o) { + CONTEXTS_ALERT_STATUS ret = 0; + char *tok; + + while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) { + if(!*tok) continue; + + uint32_t hash = simple_hash(tok); + int i; + for(i = 0; contexts_alert_status[i].name ; i++) { + if (unlikely(hash == contexts_alert_status[i].hash && !strcmp(tok, contexts_alert_status[i].name))) { + ret |= contexts_alert_status[i].value; + break; + } + } + } + + return ret; +} + +void contexts_alerts_status_to_buffer_json_array(BUFFER *wb, const char *key, + CONTEXTS_ALERT_STATUS options) { + buffer_json_member_add_array(wb, key); + + CONTEXTS_ALERT_STATUS used = 0; // to prevent adding duplicates + for(int i = 0; contexts_alert_status[i].name ; i++) { + if (unlikely((contexts_alert_status[i].value & options) && !(contexts_alert_status[i].value & used))) { + const char *name = contexts_alert_status[i].name; + used |= contexts_alert_status[i].value; + + buffer_json_add_array_item_string(wb, name); + } + } + + buffer_json_array_close(wb); +} + +void contexts_alert_statuses_init(void) { + for(size_t i = 0; contexts_alert_status[i].name ; i++) + contexts_alert_status[i].hash = simple_hash(contexts_alert_status[i].name); +} diff --git a/src/web/api/maps/contexts_alert_statuses.h b/src/web/api/maps/contexts_alert_statuses.h new file mode 100644 index 000000000..1c38cb976 --- /dev/null +++ b/src/web/api/maps/contexts_alert_statuses.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CONTEXTS_ALERT_STATUSES_H +#define NETDATA_CONTEXTS_ALERT_STATUSES_H + +#include "libnetdata/libnetdata.h" + +typedef enum contexts_alert_status { + CONTEXT_ALERT_UNINITIALIZED = (1 << 6), // include UNINITIALIZED alerts + CONTEXT_ALERT_UNDEFINED = (1 << 7), // include UNDEFINED alerts + CONTEXT_ALERT_CLEAR = (1 << 8), // include CLEAR alerts + CONTEXT_ALERT_RAISED = (1 << 9), // include WARNING & CRITICAL alerts + CONTEXT_ALERT_WARNING = (1 << 10), // include WARNING alerts + CONTEXT_ALERT_CRITICAL = (1 << 11), // include CRITICAL alerts +} CONTEXTS_ALERT_STATUS; + +#define CONTEXTS_ALERT_STATUSES (CONTEXT_ALERT_UNINITIALIZED | CONTEXT_ALERT_UNDEFINED | CONTEXT_ALERT_CLEAR | \ + CONTEXT_ALERT_RAISED | CONTEXT_ALERT_WARNING | CONTEXT_ALERT_CRITICAL) + +CONTEXTS_ALERT_STATUS contexts_alert_status_str_to_id(char *o); +void contexts_alerts_status_to_buffer_json_array(BUFFER *wb, const char *key, + CONTEXTS_ALERT_STATUS options); + +void contexts_alert_statuses_init(void); + +#endif //NETDATA_CONTEXTS_ALERT_STATUSES_H diff --git a/src/web/api/maps/contexts_options.c b/src/web/api/maps/contexts_options.c new file mode 100644 index 000000000..22e50e8d7 --- /dev/null +++ b/src/web/api/maps/contexts_options.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "contexts_options.h" + +static struct { + const char *name; + uint32_t hash; + CONTEXTS_OPTIONS value; +} contexts_options[] = { + {"minify" , 0 , CONTEXTS_OPTION_MINIFY} + , {"debug" , 0 , CONTEXTS_OPTION_DEBUG} + , {"config" , 0 , CONTEXTS_OPTION_ALERTS_WITH_CONFIGURATIONS} + , {"instances" , 0 , CONTEXTS_OPTION_ALERTS_WITH_INSTANCES} + , {"values" , 0 , CONTEXTS_OPTION_ALERTS_WITH_VALUES} + , {"summary" , 0 , CONTEXTS_OPTION_ALERTS_WITH_SUMMARY} + , {NULL , 0 , 0} +}; + +CONTEXTS_OPTIONS contexts_options_str_to_id(char *o) { + CONTEXTS_OPTIONS ret = 0; + char *tok; + + while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) { + if(!*tok) continue; + + uint32_t hash = simple_hash(tok); + int i; + for(i = 0; contexts_options[i].name ; i++) { + if (unlikely(hash == contexts_options[i].hash && !strcmp(tok, contexts_options[i].name))) { + ret |= contexts_options[i].value; + break; + } + } + } + + return ret; +} + +void contexts_options_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_OPTIONS options) { + buffer_json_member_add_array(wb, key); + + CONTEXTS_OPTIONS used = 0; // to prevent adding duplicates + for(int i = 0; contexts_options[i].name ; i++) { + if (unlikely((contexts_options[i].value & options) && !(contexts_options[i].value & used))) { + const char *name = contexts_options[i].name; + used |= contexts_options[i].value; + + buffer_json_add_array_item_string(wb, name); + } + } + + buffer_json_array_close(wb); +} + +void contexts_options_init(void) { + for(size_t i = 0; contexts_options[i].name ; i++) + contexts_options[i].hash = simple_hash(contexts_options[i].name); +} diff --git a/src/web/api/maps/contexts_options.h b/src/web/api/maps/contexts_options.h new file mode 100644 index 000000000..a21bd76ca --- /dev/null +++ b/src/web/api/maps/contexts_options.h @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CONTEXTS_OPTIONS_H +#define NETDATA_CONTEXTS_OPTIONS_H + +#include "libnetdata/libnetdata.h" + +typedef enum contexts_options { + CONTEXTS_OPTION_MINIFY = (1 << 0), // remove JSON spaces and newlines from JSON output + CONTEXTS_OPTION_DEBUG = (1 << 1), // show the request + CONTEXTS_OPTION_ALERTS_WITH_CONFIGURATIONS = (1 << 2), // include alert configurations (used by /api/v2/alert_transitions) + CONTEXTS_OPTION_ALERTS_WITH_INSTANCES = (1 << 3), // include alert instances (used by /api/v2/alerts) + CONTEXTS_OPTION_ALERTS_WITH_VALUES = (1 << 4), // include alert latest values (used by /api/v2/alerts) + CONTEXTS_OPTION_ALERTS_WITH_SUMMARY = (1 << 5), // include alerts summary counters (used by /api/v2/alerts) +} CONTEXTS_OPTIONS; + +CONTEXTS_OPTIONS contexts_options_str_to_id(char *o); +void contexts_options_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_OPTIONS options); + +void contexts_options_init(void); + +#endif //NETDATA_CONTEXTS_OPTIONS_H diff --git a/src/web/api/maps/datasource_formats.c b/src/web/api/maps/datasource_formats.c new file mode 100644 index 000000000..33e1e7457 --- /dev/null +++ b/src/web/api/maps/datasource_formats.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "datasource_formats.h" + +static struct { + const char *name; + uint32_t hash; + DATASOURCE_FORMAT value; +} google_data_formats[] = { + // this is not an error - when Google requests json, it expects javascript + // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source#responseformat + {"json", 0, DATASOURCE_DATATABLE_JSONP} + , {"html", 0, DATASOURCE_HTML} + , {"csv", 0, DATASOURCE_CSV} + , {"tsv-excel", 0, DATASOURCE_TSV} + + // terminator + , {NULL, 0, 0} +}; + +inline DATASOURCE_FORMAT google_data_format_str_to_id(char *name) { + uint32_t hash = simple_hash(name); + int i; + + for(i = 0; google_data_formats[i].name ; i++) { + if (unlikely(hash == google_data_formats[i].hash && !strcmp(name, google_data_formats[i].name))) { + return google_data_formats[i].value; + } + } + + return DATASOURCE_JSON; +} + +// -------------------------------------------------------------------------------------------------------------------- + +static struct { + const char *name; + uint32_t hash; + DATASOURCE_FORMAT value; +} datasource_formats[] = { + { "datatable" , 0 , DATASOURCE_DATATABLE_JSON} + , {"datasource" , 0 , DATASOURCE_DATATABLE_JSONP} + , {"json" , 0 , DATASOURCE_JSON} + , {"json2" , 0 , DATASOURCE_JSON2} + , {"jsonp" , 0 , DATASOURCE_JSONP} + , {"ssv" , 0 , DATASOURCE_SSV} + , {"csv" , 0 , DATASOURCE_CSV} + , {"tsv" , 0 , DATASOURCE_TSV} + , {"tsv-excel" , 0 , DATASOURCE_TSV} + , {"html" , 0 , DATASOURCE_HTML} + , {"array" , 0 , DATASOURCE_JS_ARRAY} + , {"ssvcomma" , 0 , DATASOURCE_SSV_COMMA} + , {"csvjsonarray" , 0 , DATASOURCE_CSV_JSON_ARRAY} + , {"markdown" , 0 , DATASOURCE_CSV_MARKDOWN} + + // terminator + , {NULL, 0, 0} +}; + +DATASOURCE_FORMAT datasource_format_str_to_id(char *name) { + uint32_t hash = simple_hash(name); + int i; + + for(i = 0; datasource_formats[i].name ; i++) { + if (unlikely(hash == datasource_formats[i].hash && !strcmp(name, datasource_formats[i].name))) { + return datasource_formats[i].value; + } + } + + return DATASOURCE_JSON; +} + +const char *rrdr_format_to_string(DATASOURCE_FORMAT format) { + for(size_t i = 0; datasource_formats[i].name ;i++) + if(unlikely(datasource_formats[i].value == format)) + return datasource_formats[i].name; + + return "unknown"; +} + +// -------------------------------------------------------------------------------------------------------------------- + +void datasource_formats_init(void) { + for(size_t i = 0; datasource_formats[i].name ; i++) + datasource_formats[i].hash = simple_hash(datasource_formats[i].name); + + for(size_t i = 0; google_data_formats[i].name ; i++) + google_data_formats[i].hash = simple_hash(google_data_formats[i].name); +} diff --git a/src/web/api/maps/datasource_formats.h b/src/web/api/maps/datasource_formats.h new file mode 100644 index 000000000..50d8a82b4 --- /dev/null +++ b/src/web/api/maps/datasource_formats.h @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DATASOURCE_FORMATS_H +#define NETDATA_DATASOURCE_FORMATS_H + +#include "libnetdata/libnetdata.h" + +// type of JSON generations +typedef enum { + DATASOURCE_JSON = 0, + DATASOURCE_DATATABLE_JSON, + DATASOURCE_DATATABLE_JSONP, + DATASOURCE_SSV, + DATASOURCE_CSV, + DATASOURCE_JSONP, + DATASOURCE_TSV, + DATASOURCE_HTML, + DATASOURCE_JS_ARRAY, + DATASOURCE_SSV_COMMA, + DATASOURCE_CSV_JSON_ARRAY, + DATASOURCE_CSV_MARKDOWN, + DATASOURCE_JSON2, +} DATASOURCE_FORMAT; + +DATASOURCE_FORMAT datasource_format_str_to_id(char *name); +const char *rrdr_format_to_string(DATASOURCE_FORMAT format); + +DATASOURCE_FORMAT google_data_format_str_to_id(char *name); + +void datasource_formats_init(void); + +#endif //NETDATA_DATASOURCE_FORMATS_H diff --git a/src/web/api/maps/maps.h b/src/web/api/maps/maps.h new file mode 100644 index 000000000..25d210235 --- /dev/null +++ b/src/web/api/maps/maps.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_MAPS_H +#define NETDATA_MAPS_H + +#include "libnetdata/libnetdata.h" +#include "datasource_formats.h" +#include "contexts_options.h" +#include "rrdr_options.h" +#include "contexts_alert_statuses.h" + +#endif //NETDATA_MAPS_H diff --git a/src/web/api/maps/rrdr_options.c b/src/web/api/maps/rrdr_options.c new file mode 100644 index 000000000..41161d802 --- /dev/null +++ b/src/web/api/maps/rrdr_options.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "rrdr_options.h" + +static struct { + const char *name; + uint32_t hash; + RRDR_OPTIONS value; +} rrdr_options[] = { + { "nonzero" , 0 , RRDR_OPTION_NONZERO} + , {"flip" , 0 , RRDR_OPTION_REVERSED} + , {"reversed" , 0 , RRDR_OPTION_REVERSED} + , {"reverse" , 0 , RRDR_OPTION_REVERSED} + , {"jsonwrap" , 0 , RRDR_OPTION_JSON_WRAP} + , {"min2max" , 0 , RRDR_OPTION_DIMS_MIN2MAX} // rrdr2value() only + , {"average" , 0 , RRDR_OPTION_DIMS_AVERAGE} // rrdr2value() only + , {"min" , 0 , RRDR_OPTION_DIMS_MIN} // rrdr2value() only + , {"max" , 0 , RRDR_OPTION_DIMS_MAX} // rrdr2value() only + , {"ms" , 0 , RRDR_OPTION_MILLISECONDS} + , {"milliseconds" , 0 , RRDR_OPTION_MILLISECONDS} + , {"absolute" , 0 , RRDR_OPTION_ABSOLUTE} + , {"abs" , 0 , RRDR_OPTION_ABSOLUTE} + , {"absolute_sum" , 0 , RRDR_OPTION_ABSOLUTE} + , {"absolute-sum" , 0 , RRDR_OPTION_ABSOLUTE} + , {"display_absolute" , 0 , RRDR_OPTION_DISPLAY_ABS} + , {"display-absolute" , 0 , RRDR_OPTION_DISPLAY_ABS} + , {"seconds" , 0 , RRDR_OPTION_SECONDS} + , {"null2zero" , 0 , RRDR_OPTION_NULL2ZERO} + , {"objectrows" , 0 , RRDR_OPTION_OBJECTSROWS} + , {"google_json" , 0 , RRDR_OPTION_GOOGLE_JSON} + , {"google-json" , 0 , RRDR_OPTION_GOOGLE_JSON} + , {"percentage" , 0 , RRDR_OPTION_PERCENTAGE} + , {"unaligned" , 0 , RRDR_OPTION_NOT_ALIGNED} + , {"match_ids" , 0 , RRDR_OPTION_MATCH_IDS} + , {"match-ids" , 0 , RRDR_OPTION_MATCH_IDS} + , {"match_names" , 0 , RRDR_OPTION_MATCH_NAMES} + , {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES} + , {"anomaly-bit" , 0 , RRDR_OPTION_ANOMALY_BIT} + , {"selected-tier" , 0 , RRDR_OPTION_SELECTED_TIER} + , {"raw" , 0 , RRDR_OPTION_RETURN_RAW} + , {"jw-anomaly-rates" , 0 , RRDR_OPTION_RETURN_JWAR} + , {"natural-points" , 0 , RRDR_OPTION_NATURAL_POINTS} + , {"virtual-points" , 0 , RRDR_OPTION_VIRTUAL_POINTS} + , {"all-dimensions" , 0 , RRDR_OPTION_ALL_DIMENSIONS} + , {"details" , 0 , RRDR_OPTION_SHOW_DETAILS} + , {"debug" , 0 , RRDR_OPTION_DEBUG} + , {"plan" , 0 , RRDR_OPTION_DEBUG} + , {"minify" , 0 , RRDR_OPTION_MINIFY} + , {"group-by-labels" , 0 , RRDR_OPTION_GROUP_BY_LABELS} + , {"label-quotes" , 0 , RRDR_OPTION_LABEL_QUOTES} + , {NULL , 0 , 0} +}; + +RRDR_OPTIONS rrdr_options_parse_one(const char *o) { + RRDR_OPTIONS ret = 0; + + if(!o || !*o) return ret; + + uint32_t hash = simple_hash(o); + int i; + for(i = 0; rrdr_options[i].name ; i++) { + if (unlikely(hash == rrdr_options[i].hash && !strcmp(o, rrdr_options[i].name))) { + ret |= rrdr_options[i].value; + break; + } + } + + return ret; +} + +RRDR_OPTIONS rrdr_options_parse(char *o) { + RRDR_OPTIONS ret = 0; + char *tok; + + while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) { + if(!*tok) continue; + ret |= rrdr_options_parse_one(tok); + } + + return ret; +} + +void rrdr_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options) { + buffer_json_member_add_array(wb, key); + + RRDR_OPTIONS used = 0; // to prevent adding duplicates + for(int i = 0; rrdr_options[i].name ; i++) { + if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) { + const char *name = rrdr_options[i].name; + used |= rrdr_options[i].value; + + buffer_json_add_array_item_string(wb, name); + } + } + + buffer_json_array_close(wb); +} + +void rrdr_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options) { + RRDR_OPTIONS used = 0; // to prevent adding duplicates + size_t added = 0; + for(int i = 0; rrdr_options[i].name ; i++) { + if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) { + const char *name = rrdr_options[i].name; + used |= rrdr_options[i].value; + + if(added++) buffer_strcat(wb, " "); + buffer_strcat(wb, name); + } + } +} + +void web_client_api_request_data_vX_options_to_string(char *buf, size_t size, RRDR_OPTIONS options) { + char *write = buf; + char *end = &buf[size - 1]; + + RRDR_OPTIONS used = 0; // to prevent adding duplicates + int added = 0; + for(int i = 0; rrdr_options[i].name ; i++) { + if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) { + const char *name = rrdr_options[i].name; + used |= rrdr_options[i].value; + + if(added && write < end) + *write++ = ','; + + while(*name && write < end) + *write++ = *name++; + + added++; + } + } + *write = *end = '\0'; +} + +void rrdr_options_init(void) { + for(size_t i = 0; rrdr_options[i].name ; i++) + rrdr_options[i].hash = simple_hash(rrdr_options[i].name); +} diff --git a/src/web/api/maps/rrdr_options.h b/src/web/api/maps/rrdr_options.h new file mode 100644 index 000000000..4b6697dba --- /dev/null +++ b/src/web/api/maps/rrdr_options.h @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRDR_OPTIONS_H +#define NETDATA_RRDR_OPTIONS_H + +#include "libnetdata/libnetdata.h" + +typedef enum rrdr_options { + RRDR_OPTION_NONZERO = (1 << 0), // don't output dimensions with just zero values + RRDR_OPTION_REVERSED = (1 << 1), // output the rows in reverse order (oldest to newest) + RRDR_OPTION_ABSOLUTE = (1 << 2), // values positive, for DATASOURCE_SSV before summing + RRDR_OPTION_DIMS_MIN2MAX = (1 << 3), // when adding dimensions, use max - min, instead of sum + RRDR_OPTION_DIMS_AVERAGE = (1 << 4), // when adding dimensions, use average, instead of sum + RRDR_OPTION_DIMS_MIN = (1 << 5), // when adding dimensions, use minimum, instead of sum + RRDR_OPTION_DIMS_MAX = (1 << 6), // when adding dimensions, use maximum, instead of sum + RRDR_OPTION_SECONDS = (1 << 7), // output seconds, instead of dates + RRDR_OPTION_MILLISECONDS = (1 << 8), // output milliseconds, instead of dates + RRDR_OPTION_NULL2ZERO = (1 << 9), // do not show nulls, convert them to zeros + RRDR_OPTION_OBJECTSROWS = (1 << 10), // each row of values should be an object, not an array + RRDR_OPTION_GOOGLE_JSON = (1 << 11), // comply with google JSON/JSONP specs + RRDR_OPTION_JSON_WRAP = (1 << 12), // wrap the response in a JSON header with info about the result + RRDR_OPTION_LABEL_QUOTES = (1 << 13), // in CSV output, wrap header labels in double quotes + RRDR_OPTION_PERCENTAGE = (1 << 14), // give values as percentage of total + RRDR_OPTION_NOT_ALIGNED = (1 << 15), // do not align charts for persistent timeframes + RRDR_OPTION_DISPLAY_ABS = (1 << 16), // for badges, display the absolute value, but calculate colors with sign + RRDR_OPTION_MATCH_IDS = (1 << 17), // when filtering dimensions, match only IDs + RRDR_OPTION_MATCH_NAMES = (1 << 18), // when filtering dimensions, match only names + RRDR_OPTION_NATURAL_POINTS = (1 << 19), // return the natural points of the database + RRDR_OPTION_VIRTUAL_POINTS = (1 << 20), // return virtual points + RRDR_OPTION_ANOMALY_BIT = (1 << 21), // Return the anomaly bit stored in each collected_number + RRDR_OPTION_RETURN_RAW = (1 << 22), // Return raw data for aggregating across multiple nodes + RRDR_OPTION_RETURN_JWAR = (1 << 23), // Return anomaly rates in jsonwrap + RRDR_OPTION_SELECTED_TIER = (1 << 24), // Use the selected tier for the query + RRDR_OPTION_ALL_DIMENSIONS = (1 << 25), // Return the full dimensions list + RRDR_OPTION_SHOW_DETAILS = (1 << 26), // v2 returns detailed object tree + RRDR_OPTION_DEBUG = (1 << 27), // v2 returns request description + RRDR_OPTION_MINIFY = (1 << 28), // remove JSON spaces and newlines from JSON output + RRDR_OPTION_GROUP_BY_LABELS = (1 << 29), // v2 returns flattened labels per dimension of the chart + + // internal ones - not to be exposed to the API + RRDR_OPTION_INTERNAL_AR = (1 << 31), // internal use only, to let the formatters know we want to render the anomaly rate +} RRDR_OPTIONS; + +void rrdr_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options); +void rrdr_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options); +void web_client_api_request_data_vX_options_to_string(char *buf, size_t size, RRDR_OPTIONS options); +void rrdr_options_init(void); + +RRDR_OPTIONS rrdr_options_parse(char *o); +RRDR_OPTIONS rrdr_options_parse_one(const char *o); + +#endif //NETDATA_RRDR_OPTIONS_H diff --git a/src/web/api/queries/average/README.md b/src/web/api/queries/average/README.md index 1ad78bee5..97fb8beb7 100644 --- a/src/web/api/queries/average/README.md +++ b/src/web/api/queries/average/README.md @@ -1,12 +1,3 @@ - - # Average or Mean > This query is available as `average` and `mean`. diff --git a/src/web/api/queries/countif/README.md b/src/web/api/queries/countif/README.md index a40535395..1b7b682c2 100644 --- a/src/web/api/queries/countif/README.md +++ b/src/web/api/queries/countif/README.md @@ -1,12 +1,3 @@ - - # CountIf > This query is available as `countif`. diff --git a/src/web/api/queries/des/README.md b/src/web/api/queries/des/README.md index 6dc19e732..fb053de5d 100644 --- a/src/web/api/queries/des/README.md +++ b/src/web/api/queries/des/README.md @@ -1,12 +1,3 @@ - - # double exponential smoothing Exponential smoothing is one of many window functions commonly applied to smooth data in signal diff --git a/src/web/api/queries/incremental_sum/README.md b/src/web/api/queries/incremental_sum/README.md index 6f02abe7d..5e2462893 100644 --- a/src/web/api/queries/incremental_sum/README.md +++ b/src/web/api/queries/incremental_sum/README.md @@ -1,12 +1,3 @@ - - # Incremental Sum (`incremental_sum`) This modules finds the incremental sum of a period, which `last value - first value`. diff --git a/src/web/api/queries/max/README.md b/src/web/api/queries/max/README.md index ae634e05e..6a24a0a57 100644 --- a/src/web/api/queries/max/README.md +++ b/src/web/api/queries/max/README.md @@ -1,12 +1,3 @@ - - # Max This module finds the max value in the time-frame given. diff --git a/src/web/api/queries/median/README.md b/src/web/api/queries/median/README.md index e6f6c04e7..42a9afb1d 100644 --- a/src/web/api/queries/median/README.md +++ b/src/web/api/queries/median/README.md @@ -1,13 +1,3 @@ - - # Median The median is the value separating the higher half from the lower half of a data sample diff --git a/src/web/api/queries/min/README.md b/src/web/api/queries/min/README.md index 35acb8c9e..f2a35d625 100644 --- a/src/web/api/queries/min/README.md +++ b/src/web/api/queries/min/README.md @@ -1,12 +1,3 @@ - - # Min This module finds the min value in the time-frame given. diff --git a/src/web/api/queries/percentile/README.md b/src/web/api/queries/percentile/README.md index 88abf8d5c..0f9a2f398 100644 --- a/src/web/api/queries/percentile/README.md +++ b/src/web/api/queries/percentile/README.md @@ -1,13 +1,3 @@ - - # Percentile The percentile is the average value of a series using only the smaller N percentile of the values. diff --git a/src/web/api/queries/rrdr.h b/src/web/api/queries/rrdr.h index d36d3f5b3..860a375c9 100644 --- a/src/web/api/queries/rrdr.h +++ b/src/web/api/queries/rrdr.h @@ -17,62 +17,6 @@ typedef enum tier_query_fetch { TIER_QUERY_FETCH_AVERAGE } TIER_QUERY_FETCH; -typedef enum rrdr_options { - RRDR_OPTION_NONZERO = (1 << 0), // don't output dimensions with just zero values - RRDR_OPTION_REVERSED = (1 << 1), // output the rows in reverse order (oldest to newest) - RRDR_OPTION_ABSOLUTE = (1 << 2), // values positive, for DATASOURCE_SSV before summing - RRDR_OPTION_DIMS_MIN2MAX = (1 << 3), // when adding dimensions, use max - min, instead of sum - RRDR_OPTION_DIMS_AVERAGE = (1 << 4), // when adding dimensions, use average, instead of sum - RRDR_OPTION_DIMS_MIN = (1 << 5), // when adding dimensions, use minimum, instead of sum - RRDR_OPTION_DIMS_MAX = (1 << 6), // when adding dimensions, use maximum, instead of sum - RRDR_OPTION_SECONDS = (1 << 7), // output seconds, instead of dates - RRDR_OPTION_MILLISECONDS = (1 << 8), // output milliseconds, instead of dates - RRDR_OPTION_NULL2ZERO = (1 << 9), // do not show nulls, convert them to zeros - RRDR_OPTION_OBJECTSROWS = (1 << 10), // each row of values should be an object, not an array - RRDR_OPTION_GOOGLE_JSON = (1 << 11), // comply with google JSON/JSONP specs - RRDR_OPTION_JSON_WRAP = (1 << 12), // wrap the response in a JSON header with info about the result - RRDR_OPTION_LABEL_QUOTES = (1 << 13), // in CSV output, wrap header labels in double quotes - RRDR_OPTION_PERCENTAGE = (1 << 14), // give values as percentage of total - RRDR_OPTION_NOT_ALIGNED = (1 << 15), // do not align charts for persistent timeframes - RRDR_OPTION_DISPLAY_ABS = (1 << 16), // for badges, display the absolute value, but calculate colors with sign - RRDR_OPTION_MATCH_IDS = (1 << 17), // when filtering dimensions, match only IDs - RRDR_OPTION_MATCH_NAMES = (1 << 18), // when filtering dimensions, match only names - RRDR_OPTION_NATURAL_POINTS = (1 << 19), // return the natural points of the database - RRDR_OPTION_VIRTUAL_POINTS = (1 << 20), // return virtual points - RRDR_OPTION_ANOMALY_BIT = (1 << 21), // Return the anomaly bit stored in each collected_number - RRDR_OPTION_RETURN_RAW = (1 << 22), // Return raw data for aggregating across multiple nodes - RRDR_OPTION_RETURN_JWAR = (1 << 23), // Return anomaly rates in jsonwrap - RRDR_OPTION_SELECTED_TIER = (1 << 24), // Use the selected tier for the query - RRDR_OPTION_ALL_DIMENSIONS = (1 << 25), // Return the full dimensions list - RRDR_OPTION_SHOW_DETAILS = (1 << 26), // v2 returns detailed object tree - RRDR_OPTION_DEBUG = (1 << 27), // v2 returns request description - RRDR_OPTION_MINIFY = (1 << 28), // remove JSON spaces and newlines from JSON output - RRDR_OPTION_GROUP_BY_LABELS = (1 << 29), // v2 returns flattened labels per dimension of the chart - - // internal ones - not to be exposed to the API - RRDR_OPTION_INTERNAL_AR = (1 << 31), // internal use only, to let the formatters know we want to render the anomaly rate -} RRDR_OPTIONS; - -typedef enum context_v2_options { - CONTEXT_V2_OPTION_MINIFY = (1 << 0), // remove JSON spaces and newlines from JSON output - CONTEXT_V2_OPTION_DEBUG = (1 << 1), // show the request - CONTEXT_V2_OPTION_ALERTS_WITH_CONFIGURATIONS = (1 << 2), // include alert configurations (used by /api/v2/alert_transitions) - CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES = (1 << 3), // include alert instances (used by /api/v2/alerts) - CONTEXT_V2_OPTION_ALERTS_WITH_VALUES = (1 << 4), // include alert latest values (used by /api/v2/alerts) - CONTEXT_V2_OPTION_ALERTS_WITH_SUMMARY = (1 << 5), // include alerts summary counters (used by /api/v2/alerts) -} CONTEXTS_V2_OPTIONS; - -typedef enum context_v2_alert_status { - CONTEXT_V2_ALERT_UNINITIALIZED = (1 << 5), // include UNINITIALIZED alerts - CONTEXT_V2_ALERT_UNDEFINED = (1 << 6), // include UNDEFINED alerts - CONTEXT_V2_ALERT_CLEAR = (1 << 7), // include CLEAR alerts - CONTEXT_V2_ALERT_RAISED = (1 << 8), // include WARNING & CRITICAL alerts - CONTEXT_V2_ALERT_WARNING = (1 << 9), // include WARNING alerts - CONTEXT_V2_ALERT_CRITICAL = (1 << 10), // include CRITICAL alerts -} CONTEXTS_V2_ALERT_STATUS; - -#define CONTEXTS_V2_ALERT_STATUSES (CONTEXT_V2_ALERT_UNINITIALIZED|CONTEXT_V2_ALERT_UNDEFINED|CONTEXT_V2_ALERT_CLEAR|CONTEXT_V2_ALERT_RAISED|CONTEXT_V2_ALERT_WARNING|CONTEXT_V2_ALERT_CRITICAL) - typedef enum __attribute__ ((__packed__)) rrdr_value_flag { // IMPORTANT: diff --git a/src/web/api/queries/ses/README.md b/src/web/api/queries/ses/README.md index e2fd65d7a..58afdbe0c 100644 --- a/src/web/api/queries/ses/README.md +++ b/src/web/api/queries/ses/README.md @@ -1,12 +1,3 @@ - - # Single (or Simple) Exponential Smoothing (`ses`) > This query is also available as `ema` and `ewma`. diff --git a/src/web/api/queries/stddev/README.md b/src/web/api/queries/stddev/README.md index 76cfee1f1..62cda84fd 100644 --- a/src/web/api/queries/stddev/README.md +++ b/src/web/api/queries/stddev/README.md @@ -1,12 +1,3 @@ - - # standard deviation (`stddev`) The standard deviation is a measure that is used to quantify the amount of variation or dispersion diff --git a/src/web/api/queries/sum/README.md b/src/web/api/queries/sum/README.md index dd29b9c5b..1bb71a623 100644 --- a/src/web/api/queries/sum/README.md +++ b/src/web/api/queries/sum/README.md @@ -1,12 +1,3 @@ - - # Sum This module sums all the values in the time-frame requested. diff --git a/src/web/api/queries/trimmed_mean/README.md b/src/web/api/queries/trimmed_mean/README.md index 969023292..66d86a535 100644 --- a/src/web/api/queries/trimmed_mean/README.md +++ b/src/web/api/queries/trimmed_mean/README.md @@ -1,13 +1,3 @@ - - # Trimmed Mean The trimmed mean is the average value of a series excluding the smallest and biggest points. diff --git a/src/web/api/queries/weights.c b/src/web/api/queries/weights.c index 44928fea8..e34774f32 100644 --- a/src/web/api/queries/weights.c +++ b/src/web/api/queries/weights.c @@ -4,9 +4,7 @@ #include "database/KolmogorovSmirnovDist.h" #define MAX_POINTS 10000 -int enable_metric_correlations = CONFIG_BOOLEAN_YES; int metric_correlations_version = 1; -WEIGHTS_METHOD default_metric_correlations_method = WEIGHTS_METHOD_MC_KS2; typedef struct weights_stats { NETDATA_DOUBLE max_base_high_ratio; @@ -36,7 +34,7 @@ WEIGHTS_METHOD weights_string_to_method(const char *method) { if(strcmp(method, weights_methods[i].name) == 0) return weights_methods[i].value; - return default_metric_correlations_method; + return WEIGHTS_METHOD_MC_KS2; } const char *weights_method_to_string(WEIGHTS_METHOD method) { @@ -44,7 +42,7 @@ const char *weights_method_to_string(WEIGHTS_METHOD method) { if(weights_methods[i].value == method) return weights_methods[i].name; - return "unknown"; + return "ks2"; } // ---------------------------------------------------------------------------- @@ -978,6 +976,12 @@ static size_t registered_results_to_json_multinode_group_by( BUFFER *key = buffer_create(0, NULL); BUFFER *name = buffer_create(0, NULL); dfe_start_read(results, t) { + char node_uuid[UUID_STR_LEN]; + + if(UUIDiszero(t->host->node_id)) + uuid_unparse_lower(t->host->host_id.uuid, node_uuid); + else + uuid_unparse_lower(t->host->node_id.uuid, node_uuid); buffer_flush(key); buffer_flush(name); @@ -998,7 +1002,7 @@ static size_t registered_results_to_json_multinode_group_by( if(!(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_NODE)) { buffer_fast_strcat(key, "@", 1); buffer_fast_strcat(name, "@", 1); - buffer_strcat(key, t->host->machine_guid); + buffer_strcat(key, node_uuid); buffer_strcat(name, rrdhost_hostname(t->host)); } } @@ -1008,7 +1012,7 @@ static size_t registered_results_to_json_multinode_group_by( buffer_fast_strcat(name, ",", 1); } - buffer_strcat(key, t->host->machine_guid); + buffer_strcat(key, node_uuid); buffer_strcat(name, rrdhost_hostname(t->host)); } if(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_CONTEXT) { diff --git a/src/web/api/queries/weights.h b/src/web/api/queries/weights.h index be7e5a8b3..6d2bf8e09 100644 --- a/src/web/api/queries/weights.h +++ b/src/web/api/queries/weights.h @@ -18,9 +18,7 @@ typedef enum { WEIGHTS_FORMAT_MULTINODE = 3, } WEIGHTS_FORMAT; -extern int enable_metric_correlations; extern int metric_correlations_version; -extern WEIGHTS_METHOD default_metric_correlations_method; typedef bool (*weights_interrupt_callback_t)(void *data); diff --git a/src/web/api/v1/api_v1_aclk.c b/src/web/api/v1/api_v1_aclk.c new file mode 100644 index 000000000..b9878db2f --- /dev/null +++ b/src/web/api/v1/api_v1_aclk.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_aclk(RRDHOST *host, struct web_client *w, char *url) { + UNUSED(url); + UNUSED(host); + if (!netdata_ready) return HTTP_RESP_SERVICE_UNAVAILABLE; + + BUFFER *wb = w->response.data; + buffer_flush(wb); + char *str = aclk_state_json(); + buffer_strcat(wb, str); + freez(str); + + wb->content_type = CT_APPLICATION_JSON; + buffer_no_cacheable(wb); + return HTTP_RESP_OK; +} + diff --git a/src/web/api/v1/api_v1_alarms.c b/src/web/api/v1/api_v1_alarms.c new file mode 100644 index 000000000..4f3af74b5 --- /dev/null +++ b/src/web/api/v1/api_v1_alarms.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +static int web_client_api_request_v1_alarms_select(char *url) { + int all = 0; + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + if(!strcmp(value, "all") || !strcmp(value, "all=true")) all = 1; + else if(!strcmp(value, "active") || !strcmp(value, "active=true")) all = 0; + } + + return all; +} + +int api_v1_alarms(RRDHOST *host, struct web_client *w, char *url) { + int all = web_client_api_request_v1_alarms_select(url); + + buffer_flush(w->response.data); + w->response.data->content_type = CT_APPLICATION_JSON; + health_alarms2json(host, w->response.data, all); + buffer_no_cacheable(w->response.data); + return HTTP_RESP_OK; +} + +int api_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url) { + int all = web_client_api_request_v1_alarms_select(url); + + buffer_flush(w->response.data); + w->response.data->content_type = CT_APPLICATION_JSON; + health_alarms_values2json(host, w->response.data, all); + buffer_no_cacheable(w->response.data); + return HTTP_RESP_OK; +} + +int api_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url) { + RRDCALC_STATUS status = RRDCALC_STATUS_RAISED; + BUFFER *contexts = NULL; + + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, "["); + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 alarm_count query param '%s' with value '%s'", w->id, name, value); + + char* p = value; + if(!strcmp(name, "status")) { + while ((*p = toupper(*p))) p++; + if (!strcmp("CRITICAL", value)) status = RRDCALC_STATUS_CRITICAL; + else if (!strcmp("WARNING", value)) status = RRDCALC_STATUS_WARNING; + else if (!strcmp("UNINITIALIZED", value)) status = RRDCALC_STATUS_UNINITIALIZED; + else if (!strcmp("UNDEFINED", value)) status = RRDCALC_STATUS_UNDEFINED; + else if (!strcmp("REMOVED", value)) status = RRDCALC_STATUS_REMOVED; + else if (!strcmp("CLEAR", value)) status = RRDCALC_STATUS_CLEAR; + } + else if(!strcmp(name, "context") || !strcmp(name, "ctx")) { + if(!contexts) contexts = buffer_create(255, &netdata_buffers_statistics.buffers_api); + buffer_strcat(contexts, "|"); + buffer_strcat(contexts, value); + } + } + + health_aggregate_alarms(host, w->response.data, contexts, status); + + buffer_sprintf(w->response.data, "]\n"); + w->response.data->content_type = CT_APPLICATION_JSON; + buffer_no_cacheable(w->response.data); + + buffer_free(contexts); + return 200; +} + +int api_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url) { + time_t after = 0; + char *chart = NULL; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + if (!strcmp(name, "after")) after = (time_t) strtoul(value, NULL, 0); + else if (!strcmp(name, "chart")) chart = value; + } + + buffer_flush(w->response.data); + w->response.data->content_type = CT_APPLICATION_JSON; + sql_health_alarm_log2json(host, w->response.data, after, chart); + return HTTP_RESP_OK; +} + +int api_v1_variable(RRDHOST *host, struct web_client *w, char *url) { + int ret = HTTP_RESP_BAD_REQUEST; + char *chart = NULL; + char *variable = NULL; + + buffer_flush(w->response.data); + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "chart")) chart = value; + else if(!strcmp(name, "variable")) variable = value; + } + + if(!chart || !*chart || !variable || !*variable) { + buffer_sprintf(w->response.data, "A chart= and a variable= are required."); + goto cleanup; + } + + RRDSET *st = rrdset_find(host, chart); + if(!st) st = rrdset_find_byname(host, chart); + if(!st) { + buffer_strcat(w->response.data, "Chart is not found: "); + buffer_strcat_htmlescape(w->response.data, chart); + ret = HTTP_RESP_NOT_FOUND; + goto cleanup; + } + + w->response.data->content_type = CT_APPLICATION_JSON; + st->last_accessed_time_s = now_realtime_sec(); + alert_variable_lookup_trace(host, st, variable, w->response.data); + + return HTTP_RESP_OK; + +cleanup: + return ret; +} + +int api_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url) { + return api_v1_single_chart_helper(host, w, url, health_api_v1_chart_variables2json); +} + diff --git a/src/web/api/v1/api_v1_allmetrics.c b/src/web/api/v1/api_v1_allmetrics.c new file mode 100644 index 000000000..593475efd --- /dev/null +++ b/src/web/api/v1/api_v1_allmetrics.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +#define ALLMETRICS_FORMAT_SHELL "shell" +#define ALLMETRICS_FORMAT_PROMETHEUS "prometheus" +#define ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "prometheus_all_hosts" +#define ALLMETRICS_FORMAT_JSON "json" + +#define ALLMETRICS_SHELL 1 +#define ALLMETRICS_PROMETHEUS 2 +#define ALLMETRICS_JSON 3 +#define ALLMETRICS_PROMETHEUS_ALL_HOSTS 4 + +struct prometheus_output_options { + char *name; + PROMETHEUS_OUTPUT_OPTIONS flag; +} prometheus_output_flags_root[] = { + { "names", PROMETHEUS_OUTPUT_NAMES }, + { "timestamps", PROMETHEUS_OUTPUT_TIMESTAMPS }, + { "variables", PROMETHEUS_OUTPUT_VARIABLES }, + { "oldunits", PROMETHEUS_OUTPUT_OLDUNITS }, + { "hideunits", PROMETHEUS_OUTPUT_HIDEUNITS }, + // terminator + { NULL, PROMETHEUS_OUTPUT_NONE }, +}; + +// ---------------------------------------------------------------------------- +// BASH +// /api/v1/allmetrics?format=bash + +static inline size_t shell_name_copy(char *d, const char *s, size_t usable) { + size_t n; + + for(n = 0; *s && n < usable ; d++, s++, n++) { + register char c = *s; + + if(unlikely(!isalnum(c))) *d = '_'; + else *d = (char)toupper(c); + } + *d = '\0'; + + return n; +} + +#define SHELL_ELEMENT_MAX 100 + +void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_string, BUFFER *wb) { + analytics_log_shell(); + SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT, true); + + // for each chart + RRDSET *st; + rrdset_foreach_read(st, host) { + if (filter && !simple_pattern_matches_string(filter, st->name)) + continue; + if (rrdset_is_available_for_viewers(st)) { + NETDATA_DOUBLE total = 0.0; + + char chart[SHELL_ELEMENT_MAX + 1]; + shell_name_copy(chart, st->name ? rrdset_name(st) : rrdset_id(st), SHELL_ELEMENT_MAX); + + buffer_sprintf(wb, "\n# chart: %s (name: %s)\n", rrdset_id(st), rrdset_name(st)); + + // for each dimension + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + if(rd->collector.counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) { + char dimension[SHELL_ELEMENT_MAX + 1]; + shell_name_copy(dimension, rd->name?rrddim_name(rd):rrddim_id(rd), SHELL_ELEMENT_MAX); + + NETDATA_DOUBLE n = rd->collector.last_stored_value; + + if(isnan(n) || isinf(n)) + buffer_sprintf(wb, "NETDATA_%s_%s=\"\" # %s\n", chart, dimension, rrdset_units(st)); + else { + if(rd->multiplier < 0 || rd->divisor < 0) n = -n; + n = roundndd(n); + if(!rrddim_option_check(rd, RRDDIM_OPTION_HIDDEN)) total += n; + buffer_sprintf(wb, "NETDATA_%s_%s=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, dimension, n, rrdset_units(st)); + } + } + } + rrddim_foreach_done(rd); + + total = roundndd(total); + buffer_sprintf(wb, "NETDATA_%s_VISIBLETOTAL=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, total, rrdset_units(st)); + } + } + rrdset_foreach_done(st); + + buffer_strcat(wb, "\n# NETDATA ALARMS RUNNING\n"); + + RRDCALC *rc; + foreach_rrdcalc_in_rrdhost_read(host, rc) { + if(!rc->rrdset) continue; + + char chart[SHELL_ELEMENT_MAX + 1]; + shell_name_copy(chart, rc->rrdset->name?rrdset_name(rc->rrdset):rrdset_id(rc->rrdset), SHELL_ELEMENT_MAX); + + char alarm[SHELL_ELEMENT_MAX + 1]; + shell_name_copy(alarm, rrdcalc_name(rc), SHELL_ELEMENT_MAX); + + NETDATA_DOUBLE n = rc->value; + + if(isnan(n) || isinf(n)) + buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"\" # %s\n", chart, alarm, rrdcalc_units(rc)); + else { + n = roundndd(n); + buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, alarm, n, rrdcalc_units(rc)); + } + + buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_STATUS=\"%s\"\n", chart, alarm, rrdcalc_status2string(rc->status)); + } + foreach_rrdcalc_in_rrdhost_done(rc); + + simple_pattern_free(filter); +} + +// ---------------------------------------------------------------------------- + +void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_string, BUFFER *wb) { + analytics_log_json(); + SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT, true); + + buffer_strcat(wb, "{"); + + size_t chart_counter = 0; + size_t dimension_counter = 0; + + // for each chart + RRDSET *st; + rrdset_foreach_read(st, host) { + if (filter && !(simple_pattern_matches_string(filter, st->id) || simple_pattern_matches_string(filter, st->name))) + continue; + + if(rrdset_is_available_for_viewers(st)) { + buffer_sprintf( + wb, + "%s\n" + "\t\"%s\": {\n" + "\t\t\"name\":\"%s\",\n" + "\t\t\"family\":\"%s\",\n" + "\t\t\"context\":\"%s\",\n" + "\t\t\"units\":\"%s\",\n" + "\t\t\"last_updated\": %"PRId64",\n" + "\t\t\"dimensions\": {", + chart_counter ? "," : "", + rrdset_id(st), + rrdset_name(st), + rrdset_family(st), + rrdset_context(st), + rrdset_units(st), + (int64_t) rrdset_last_entry_s(st)); + + chart_counter++; + dimension_counter = 0; + + // for each dimension + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + if(rd->collector.counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) { + buffer_sprintf( + wb, + "%s\n" + "\t\t\t\"%s\": {\n" + "\t\t\t\t\"name\": \"%s\",\n" + "\t\t\t\t\"value\": ", + dimension_counter ? "," : "", + rrddim_id(rd), + rrddim_name(rd)); + + if(isnan(rd->collector.last_stored_value)) + buffer_strcat(wb, "null"); + else + buffer_sprintf(wb, NETDATA_DOUBLE_FORMAT, rd->collector.last_stored_value); + + buffer_strcat(wb, "\n\t\t\t}"); + + dimension_counter++; + } + } + rrddim_foreach_done(rd); + + buffer_strcat(wb, "\n\t\t}\n\t}"); + } + } + rrdset_foreach_done(st); + + buffer_strcat(wb, "\n}"); + simple_pattern_free(filter); +} + +int api_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url) { + int format = ALLMETRICS_SHELL; + const char *filter = NULL; + const char *prometheus_server = w->client_ip; + + uint32_t prometheus_exporting_options; + if (prometheus_exporter_instance) + prometheus_exporting_options = prometheus_exporter_instance->config.options; + else + prometheus_exporting_options = global_exporting_options; + + PROMETHEUS_OUTPUT_OPTIONS prometheus_output_options = + PROMETHEUS_OUTPUT_TIMESTAMPS | + ((prometheus_exporting_options & EXPORTING_OPTION_SEND_NAMES) ? PROMETHEUS_OUTPUT_NAMES : 0); + + const char *prometheus_prefix; + if (prometheus_exporter_instance) + prometheus_prefix = prometheus_exporter_instance->config.prefix; + else + prometheus_prefix = global_exporting_prefix; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + if(!strcmp(name, "format")) { + if(!strcmp(value, ALLMETRICS_FORMAT_SHELL)) + format = ALLMETRICS_SHELL; + else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS)) + format = ALLMETRICS_PROMETHEUS; + else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS)) + format = ALLMETRICS_PROMETHEUS_ALL_HOSTS; + else if(!strcmp(value, ALLMETRICS_FORMAT_JSON)) + format = ALLMETRICS_JSON; + else + format = 0; + } + else if(!strcmp(name, "filter")) { + filter = value; + } + else if(!strcmp(name, "server")) { + prometheus_server = value; + } + else if(!strcmp(name, "prefix")) { + prometheus_prefix = value; + } + else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) { + prometheus_exporting_options = exporting_parse_data_source(value, prometheus_exporting_options); + } + else { + int i; + for(i = 0; prometheus_output_flags_root[i].name ; i++) { + if(!strcmp(name, prometheus_output_flags_root[i].name)) { + if(!strcmp(value, "yes") || !strcmp(value, "1") || !strcmp(value, "true")) + prometheus_output_options |= prometheus_output_flags_root[i].flag; + else { + prometheus_output_options &= ~prometheus_output_flags_root[i].flag; + } + + break; + } + } + } + } + + buffer_flush(w->response.data); + buffer_no_cacheable(w->response.data); + + switch(format) { + case ALLMETRICS_JSON: + w->response.data->content_type = CT_APPLICATION_JSON; + rrd_stats_api_v1_charts_allmetrics_json(host, filter, w->response.data); + return HTTP_RESP_OK; + + case ALLMETRICS_SHELL: + w->response.data->content_type = CT_TEXT_PLAIN; + rrd_stats_api_v1_charts_allmetrics_shell(host, filter, w->response.data); + return HTTP_RESP_OK; + + case ALLMETRICS_PROMETHEUS: + w->response.data->content_type = CT_PROMETHEUS; + rrd_stats_api_v1_charts_allmetrics_prometheus_single_host( + host + , filter + , w->response.data + , prometheus_server + , prometheus_prefix + , prometheus_exporting_options + , prometheus_output_options + ); + return HTTP_RESP_OK; + + case ALLMETRICS_PROMETHEUS_ALL_HOSTS: + w->response.data->content_type = CT_PROMETHEUS; + rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts( + host + , filter + , w->response.data + , prometheus_server + , prometheus_prefix + , prometheus_exporting_options + , prometheus_output_options + ); + return HTTP_RESP_OK; + + default: + w->response.data->content_type = CT_TEXT_PLAIN; + buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "', '" ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported."); + return HTTP_RESP_BAD_REQUEST; + } +} diff --git a/src/web/api/v1/api_v1_badge/README.md b/src/web/api/v1/api_v1_badge/README.md new file mode 100644 index 000000000..d6deb7994 --- /dev/null +++ b/src/web/api/v1/api_v1_badge/README.md @@ -0,0 +1,360 @@ +# Netdata badges + +**Badges are cool!** + +Netdata can generate badges for any chart and any dimension at any time-frame. Badges come in `SVG` and can be added to any web page using an `` HTML tag. + +**Netdata badges are powerful**! + +Given that Netdata collects from **1.000** to **5.000** metrics per server (depending on the number of network interfaces, disks, cpu cores, applications running, users logged in, containers running, etc) and that Netdata already has data reduction/aggregation functions embedded, the badges can be quite powerful. + +For each metric/dimension and for arbitrary time-frames badges can show **min**, **max** or **average** value, but also **sum** or **incremental-sum** to have their **volume**. + +For example, there is [a chart in Netdata that shows the current requests/s of nginx](http://london.my-netdata.io/#nginx_local_nginx). Using this chart alone we can show the following badges (we could add more time-frames, like **today**, **yesterday**, etc): + + + +Similarly, there is [a chart that shows outbound bandwidth per class](http://london.my-netdata.io/#tc_eth0), using QoS data. So it shows `kilobits/s` per class. Using this chart we can show: + + + +The right one is a **volume** calculation. Netdata calculated the total of the last 86.400 seconds (a day) which gives `kilobits`, then divided it by 8 to make it KB, then by 1024 to make it MB and then by 1024 to make it GB. Calculations like this are quite accurate, since for every value collected, every second, Netdata interpolates it to second boundary using microsecond calculations. + +Let's see a few more badge examples (they come from the [Netdata registry](/src/registry/README.md)): + +- **cpu usage of user `root`** (you can pick any user; 100% = 1 core). This will be `green <10%`, `yellow <20%`, `orange <50%`, `blue <100%` (1 core), `red` otherwise (you define thresholds and colors on the URL). + + + +- **mysql queries per second** + + + + niche ones: **mysql SELECT statements with JOIN, which did full table scans**: + + + +--- + +> So, every single line on the charts of a [Netdata dashboard](http://london.my-netdata.io/), can become a badge and this badge can calculate **average**, **min**, **max**, or **volume** for any time-frame! And you can also vary the badge color using conditions on the calculated value. + +--- + +## How to create badges + +The basic URL is `http://your.netdata:19999/api/v1/badge.svg?option1&option2&option3&...`. + +Here is what you can put for `options` (these are standard Netdata API options): + +- `chart=CHART.NAME` + + The chart to get the values from. + + **This is the only parameter required** and with just this parameter, Netdata will return the sum of the latest values of all chart dimensions. + + Example: + +```html + + + +``` + + Which produces this: + + + + + +- `alarm=NAME` + + Render the current value and status of an alert linked to the chart. This option can be ignored if the badge to be generated is not related to an alert. + + The current value of the alert will be rendered. The color of the badge will indicate the status of the alert. + + For alert badges, **both `chart` and `alarm` parameters are required**. + +- `dimensions=DIMENSION1|DIMENSION2|...` + + The dimensions of the chart to use. If you don't set any dimension, all will be used. When multiple dimensions are used, Netdata will sum their values. You can append `options=absolute` if you want this sum to convert all values to positive before adding them. + + Pipes in HTML have to escaped with `%7C`. + + Example: + +```html + + + +``` + + Which produces this: + + + + + +- `before=SECONDS` and `after=SECONDS` + + The timeframe. These can be absolute unix timestamps, or relative to now, number of seconds. By default `before=0` and `after=-1` (1 second in the past). + + To get the last minute set `after=-60`. This will give the average of the last complete minute (XX:XX:00 - XX:XX:59). + + To get the max of the last hour set `after=-3600&group=max`. This will give the maximum value of the last complete hour (XX:00:00 - XX:59:59) + + Example: + +```html + + + +``` + + Which produces the average of last complete minute (XX:XX:00 - XX:XX:59): + + + + + + While this is the previous minute (one minute before the last one, again aligned XX:XX:00 - XX:XX:59): + +```html + + + +``` + + It produces this: + + + + + +- `group=min` or `group=max` or `group=average` (the default) or `group=sum` or `group=incremental-sum` + + If Netdata will have to reduce (aggregate) the data to calculate the value, which aggregation method to use. + + - `max` will find the max value for the timeframe. This works on both positive and negative dimensions. It will find the most extreme value. + + - `min` will find the min value for the timeframe. This works on both positive and negative dimensions. It will find the number closest to zero. + + - `average` will calculate the average value for the timeframe. + + - `sum` will sum all the values for the timeframe. This is nice for finding the volume of dimensions for a timeframe. So if you have a dimension that reports `X per second`, you can find the volume of the dimension in a timeframe, by adding its values in that timeframe. + + - `incremental-sum` will sum the difference of each value to its next. Let's assume you have a dimension that does not measure the rate of something, but the absolute value of it. So it has values like this "1, 5, 3, 7, 4". `incremental-sum` will calculate the difference of adjacent values. In this example, they will be `(5 - 1) + (3 - 5) + (7 - 3) + (4 - 7) = 3` (which is equal to the last value minus the first = 4 - 1). + +- `options=opt1|opt2|opt3|...` + + These fine tune various options of the API. Here is what you can use for badges (the API has more option, but only these are useful for badges): + + - `percentage`, instead of returning a value, calculate the percentage of the sum of the values of the selected dimensions (selected sum / total sum * 100). This also sets the units to `%`. + + - `absolute` or `abs`, turn all values positive and then sum them. + + - `display_absolute` or `display-absolute`, to use the signed value during color calculation, but display the absolute value on the badge. + + - `min2max`, when multiple dimensions are given, do not sum them, but take their `max - min`. + + - `unaligned`, when data are reduced / aggregated (e.g. the request is about the average of the last minute, or hour), Netdata by default aligns them so that the charts will have a constant shape (so average per minute returns always XX:XX:00 - XX:XX:59). Setting the `unaligned` option, Netdata will aggregate data without any alignment, so if the request is for 60 seconds, it will aggregate the latest 60 seconds of collected data. + +These are options dedicated to badges: + +- `label=TEXT` + + The label of the badge. + +- `units=TEXT` + + The units of the badge. If you want to put a `/`, please put a `\`. This is because Netdata allows badges parameters to be given as path in URL, instead of query string. You can also use `null` or `empty` to show it without any units. + + The units `seconds`, `minutes` and `hours` trigger special formatting. The value has to be in this unit, and Netdata will automatically change it to show a more pretty duration. + +- `multiply=NUMBER` + + Multiply the value with this number. The default is `1`. + +- `divide=NUMBER` + + Divide the value with this number. The default is `1`. + +- Color customization parameters + + The following parameters specify colors of each individual part of the badge. Each parameter is documented in detail + below. + + | Area of badge | Background color parameter | Text color parameter | + | ---: | :------------------------: | :------------------: | + | Label (left) part | `label_color` | `text_color_lbl` | + | Value (right) part | `value_color` | `text_color_val` | + + - `label_color=COLOR` + + The color of the label (the left part). You can use any HTML color in `RGB` or `RRGGBB` hex notation (without + the `#` character at the beginning). Additionally, you can use one of the following predefined colors (and you + can use them by their name): + + - `green` + - `brightgreen` + - `yellow` + - `yellowgreen` + - `orange` + - `red` + - `blue` + - `grey` + - `gray` + - `lightgrey` + - `lightgray` + + These colors are taken from , which makes them compatible with standard + badges. + + - `value_color=COLOR:null|COLORVALUE|COLOR>=VALUE|COLOR<=VALUE|...` + + You can add a pipe delimited list of conditions to pick the value color. The first matching (left to right) will + be used. + + Example: `value_color=grey:null|green<10|yellow<100|orange<1000|blue<10000|red` + + The above will set `grey` if no value exists (not collected within the `gap when lost iterations above` in + `netdata.conf` for the chart), `green` if the value is less than 10, `yellow` if the value is less than 100, and + so on. Netdata will use `red` if no other conditions match. Only integers are supported as values. + + The supported operators are `<`, `>`, `<=`, `>=`, `=` (or `:`), and `!=` (or `<>`). + + You can also use the same syntax as the `label_color` parameter to define each of these colors. You can + reference a predefined color by name or `RGB`/`RRGGBB` hex notation. + + - `text_color_lbl=RGB` or `text_color_lbl=RRGGBB` or `text_color_lbl=color_by_name` + + This value specifies the font color for the font of left/label side of the badge. The syntax is the same as the + `label_color` parameter. If not given, or given with an empty value, Netdata will use the default color. + + - `text_color_val=RGB` or `text_color_val=RRGGBB` or `text_color_lbl=color_by_name` + + This value specifies the font color for the font of right/value side of the badge. The syntax is the same as the + `label_color` parameter. If not given, or given with an empty value, Netdata will use the default color. + +- `precision=NUMBER` + + The number of decimal digits of the value. By default Netdata will add: + + - no decimal digits for values > 1000 + - 1 decimal digit for values > 100 + - 2 decimal digits for values > 1 + - 3 decimal digits for values > 0.1 + - 4 decimal digits for values \<= 0.1 + + Using the `precision=NUMBER` you can set your preference per badge. + +- `scale=XXX` + + This option scales the svg image. It accepts values above or equal to 100 (100% is the default scale). For example, lets get a few different sizes: + + original
    + `scale=125`
    + `scale=150`
    + `scale=175`
    + `scale=200` + +- `fixed_width_lbl=NUMBER` and `fixed_width_val=NUMBER` + + This parameter overrides auto-sizing of badges and displays them at fixed widths. `fixed_width_lbl` determines the size of the label's left side (label/name). `fixed_width_val` determines the size of the label's right side (value). You must set both parameters together, or they will be ignored. + + You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. + + The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`. + +- `refresh=auto` or `refresh=SECONDS` + + This option enables auto-refreshing of images. Netdata will send the HTTP header `Refresh: SECONDS` to the web browser, thus requesting automatic refresh of the images at regular intervals. + + `auto` will calculate the proper `SECONDS` to avoid unnecessary refreshes. If `SECONDS` is zero, this feature is disabled (it is also disabled by default). + + Auto-refreshing like this, works only if you access the badge directly. So, you may have to put it an `embed` or `iframe` for it to be auto-refreshed. Use something like this: + +```html + +``` + + Another way is to use javascript to auto-refresh them. You can auto-refresh all the Netdata badges on a page using javascript. You have to add a class to all the Netdata badges, like this ``. Then add this javascript code to your page (it requires jquery): + +```html + +``` + +A more advanced badges refresh method is to include `http://your.netdata.ip:19999/refresh-badges.js` in your page. + +--- + +## Escaping URLs + +Keep in mind that if you add badge URLs to your HTML pages you have to escape the special characters: + +|character|name|escape sequence| +|:-------:|:--:|:-------------:| +|``|space (in labels and units)|`%20`| +|`#`|hash (for colors)|`%23`| +|`%`|percent (in units)|`%25`| +|`<`|less than|`%3C`| +|`>`|greater than|`%3E`| +|`\`|backslash (when you need a `/`)|`%5C`| +|`\|`|pipe (delimiting parameters)|`%7C`| + +## FAQ + +#### Is it fast? + +On modern hardware, Netdata can generate about **2.000 badges per second per core**, before noticing any delays. It generates a badge in about half a millisecond! + +Of course these timing are for badges that use recent data. If you need badges that do calculations over long durations (a day, or more), timing will differ. Netdata logs its timings at its `access.log`, so take a look there before adding a heavy badge on a busy web site. Of course, you can cache such badges or have a cron job get them from Netdata and save them at your web server at regular intervals. + +#### Embedding badges in GitHub + +You have 2 options: +- SVG images with markdown +- SVG images with HTML (directly in .md files) + +For example, this is the cpu badge shown above: + +- Markdown example: + +```md +[![A nice name](https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25)](https://registry.my-netdata.io/#apps_cpu) +``` + +- HTML example: + +```html + + + +``` + +Both produce this: + + + + + +#### Auto-refreshing badges in GitHub + +Unfortunately it cannot be done. GitHub fetches all the images using a proxy and rewrites all the URLs to be served by the proxy. + +You can refresh them from your browser console though. Press F12 to open the web browser console (switch to the console too), paste the following and press enter. They will refresh: + +```js +var len = document.images.length; while(len--) { document.images[len].src = document.images[len].src.replace(/\?cacheBuster=\d*/, "") + "?cacheBuster=" + new Date().getTime().toString(); }; +``` + + diff --git a/src/web/api/v1/api_v1_badge/web_buffer_svg.c b/src/web/api/v1/api_v1_badge/web_buffer_svg.c new file mode 100644 index 000000000..642261fd3 --- /dev/null +++ b/src/web/api/v1/api_v1_badge/web_buffer_svg.c @@ -0,0 +1,1160 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "libnetdata/libnetdata.h" +#include "../../../server/web_client.h" + +#define BADGE_HORIZONTAL_PADDING 4 +#define VERDANA_KERNING 0.2 +#define VERDANA_PADDING 1.0 + +/* + * verdana11_widths[] has been generated with this method: + * https://github.com/badges/shields/blob/master/measure-text.js +*/ + +static double verdana11_widths[128] = { + [0] = 0.0, + [1] = 0.0, + [2] = 0.0, + [3] = 0.0, + [4] = 0.0, + [5] = 0.0, + [6] = 0.0, + [7] = 0.0, + [8] = 0.0, + [9] = 0.0, + [10] = 0.0, + [11] = 0.0, + [12] = 0.0, + [13] = 0.0, + [14] = 0.0, + [15] = 0.0, + [16] = 0.0, + [17] = 0.0, + [18] = 0.0, + [19] = 0.0, + [20] = 0.0, + [21] = 0.0, + [22] = 0.0, + [23] = 0.0, + [24] = 0.0, + [25] = 0.0, + [26] = 0.0, + [27] = 0.0, + [28] = 0.0, + [29] = 0.0, + [30] = 0.0, + [31] = 0.0, + [32] = 3.8671874999999996, // + [33] = 4.3291015625, // ! + [34] = 5.048828125, // " + [35] = 9.001953125, // # + [36] = 6.9931640625, // $ + [37] = 11.837890625, // % + [38] = 7.992187499999999, // & + [39] = 2.9541015625, // ' + [40] = 4.9951171875, // ( + [41] = 4.9951171875, // ) + [42] = 6.9931640625, // * + [43] = 9.001953125, // + + [44] = 4.00146484375, // , + [45] = 4.9951171875, // - + [46] = 4.00146484375, // . + [47] = 4.9951171875, // / + [48] = 6.9931640625, // 0 + [49] = 6.9931640625, // 1 + [50] = 6.9931640625, // 2 + [51] = 6.9931640625, // 3 + [52] = 6.9931640625, // 4 + [53] = 6.9931640625, // 5 + [54] = 6.9931640625, // 6 + [55] = 6.9931640625, // 7 + [56] = 6.9931640625, // 8 + [57] = 6.9931640625, // 9 + [58] = 4.9951171875, // : + [59] = 4.9951171875, // ; + [60] = 9.001953125, // < + [61] = 9.001953125, // = + [62] = 9.001953125, // > + [63] = 5.99951171875, // ? + [64] = 11.0, // @ + [65] = 7.51953125, // A + [66] = 7.541015625, // B + [67] = 7.680664062499999, // C + [68] = 8.4755859375, // D + [69] = 6.95556640625, // E + [70] = 6.32177734375, // F + [71] = 8.529296875, // G + [72] = 8.26611328125, // H + [73] = 4.6298828125, // I + [74] = 5.00048828125, // J + [75] = 7.62158203125, // K + [76] = 6.123046875, // L + [77] = 9.2705078125, // M + [78] = 8.228515625, // N + [79] = 8.658203125, // O + [80] = 6.63330078125, // P + [81] = 8.658203125, // Q + [82] = 7.6484375, // R + [83] = 7.51953125, // S + [84] = 6.7783203125, // T + [85] = 8.05126953125, // U + [86] = 7.51953125, // V + [87] = 10.87646484375, // W + [88] = 7.53564453125, // X + [89] = 6.767578125, // Y + [90] = 7.53564453125, // Z + [91] = 4.9951171875, // [ + [92] = 4.9951171875, // backslash + [93] = 4.9951171875, // ] + [94] = 9.001953125, // ^ + [95] = 6.9931640625, // _ + [96] = 6.9931640625, // ` + [97] = 6.6064453125, // a + [98] = 6.853515625, // b + [99] = 5.73095703125, // c + [100] = 6.853515625, // d + [101] = 6.552734375, // e + [102] = 3.8671874999999996, // f + [103] = 6.853515625, // g + [104] = 6.9609375, // h + [105] = 3.0185546875, // i + [106] = 3.78662109375, // j + [107] = 6.509765625, // k + [108] = 3.0185546875, // l + [109] = 10.69921875, // m + [110] = 6.9609375, // n + [111] = 6.67626953125, // o + [112] = 6.853515625, // p + [113] = 6.853515625, // q + [114] = 4.6943359375, // r + [115] = 5.73095703125, // s + [116] = 4.33447265625, // t + [117] = 6.9609375, // u + [118] = 6.509765625, // v + [119] = 9.001953125, // w + [120] = 6.509765625, // x + [121] = 6.509765625, // y + [122] = 5.779296875, // z + [123] = 6.982421875, // { + [124] = 4.9951171875, // | + [125] = 6.982421875, // } + [126] = 9.001953125, // ~ + [127] = 0.0 +}; + +// find the width of the string using the verdana 11points font +static inline double verdana11_width(const char *s, float em_size) { + double w = 0.0; + + while(*s) { + // if UTF8 multibyte char found and guess it's width equal 1em + // as label width will be updated with JavaScript this is not so important + + // TODO: maybe move UTF8 functions from url.c to separate util in libnetdata + // then use url_utf8_get_byte_length etc. + if(IS_UTF8_STARTBYTE(*s)) { + s++; + while(IS_UTF8_BYTE(*s) && !IS_UTF8_STARTBYTE(*s)){ + s++; + } + w += em_size; + } + else { + if(likely(!(*s & 0x80))){ // Byte 1XXX XXXX is not valid in UTF8 + double t = verdana11_widths[(unsigned char)*s]; + if(t != 0.0) + w += t + VERDANA_KERNING; + } + s++; + } + } + + w -= VERDANA_KERNING; + w += VERDANA_PADDING; + return w; +} + +static inline size_t escape_xmlz(char *dst, const char *src, size_t len) { + size_t i = len; + + // required escapes from + // https://github.com/badges/shields/blob/master/badge.js + while(*src && i) { + switch(*src) { + case '\\': + *dst++ = '/'; + src++; + i--; + break; + + case '&': + if(i > 5) { + strcpy(dst, "&"); + i -= 5; + dst += 5; + src++; + } + else goto cleanup; + break; + + case '<': + if(i > 4) { + strcpy(dst, "<"); + i -= 4; + dst += 4; + src++; + } + else goto cleanup; + break; + + case '>': + if(i > 4) { + strcpy(dst, ">"); + i -= 4; + dst += 4; + src++; + } + else goto cleanup; + break; + + case '"': + if(i > 6) { + strcpy(dst, """); + i -= 6; + dst += 6; + src++; + } + else goto cleanup; + break; + + case '\'': + if(i > 6) { + strcpy(dst, "'"); + i -= 6; + dst += 6; + src++; + } + else goto cleanup; + break; + + default: + i--; + *dst++ = *src++; + break; + } + } + +cleanup: + *dst = '\0'; + return len - i; +} + +static inline char *format_value_with_precision_and_unit(char *value_string, size_t value_string_len, + NETDATA_DOUBLE value, const char *units, int precision) { + if(unlikely(isnan(value) || isinf(value))) + value = 0.0; + + char *separator = ""; + if(unlikely(isalnum((uint8_t)*units))) + separator = " "; + + if(precision < 0) { + int len, lstop = 0, trim_zeros = 1; + + NETDATA_DOUBLE abs = value; + if(isless(value, 0)) { + lstop = 1; + abs = fabsndd(value); + } + + if(isgreaterequal(abs, 1000)) { + len = snprintfz(value_string, value_string_len, "%0.0" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); + trim_zeros = 0; + } + else if(isgreaterequal(abs, 10)) len = snprintfz(value_string, value_string_len, "%0.1" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); + else if(isgreaterequal(abs, 1)) len = snprintfz(value_string, value_string_len, "%0.2" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); + else if(isgreaterequal(abs, 0.1)) len = snprintfz(value_string, value_string_len, "%0.2" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); + else if(isgreaterequal(abs, 0.01)) len = snprintfz(value_string, value_string_len, "%0.4" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); + else if(isgreaterequal(abs, 0.001)) len = snprintfz(value_string, value_string_len, "%0.5" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); + else if(isgreaterequal(abs, 0.0001)) len = snprintfz(value_string, value_string_len, "%0.6" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); + else len = snprintfz(value_string, value_string_len, "%0.7" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value); + + if(unlikely(trim_zeros)) { + int l; + // remove trailing zeros from the decimal part + for(l = len - 1; l > lstop; l--) { + if(likely(value_string[l] == '0')) { + value_string[l] = '\0'; + len--; + } + + else if(unlikely(value_string[l] == '.')) { + value_string[l] = '\0'; + len--; + break; + } + + else + break; + } + } + + if(unlikely(len <= 0)) len = 1; + snprintfz(&value_string[len], value_string_len - len, "%s%s", separator, units); + } + else { + if(precision > 50) precision = 50; + snprintfz(value_string, value_string_len, "%0.*" NETDATA_DOUBLE_MODIFIER "%s%s", precision, (NETDATA_DOUBLE) value, separator, units); + } + + return value_string; +} + +typedef enum badge_units_format { + UNITS_FORMAT_NONE, + UNITS_FORMAT_SECONDS, + UNITS_FORMAT_SECONDS_AGO, + UNITS_FORMAT_MINUTES, + UNITS_FORMAT_MINUTES_AGO, + UNITS_FORMAT_HOURS, + UNITS_FORMAT_HOURS_AGO, + UNITS_FORMAT_ONOFF, + UNITS_FORMAT_UPDOWN, + UNITS_FORMAT_OKERROR, + UNITS_FORMAT_OKFAILED, + UNITS_FORMAT_EMPTY, + UNITS_FORMAT_PERCENT +} UNITS_FORMAT; + + +static struct units_formatter { + const char *units; + uint32_t hash; + UNITS_FORMAT format; +} badge_units_formatters[] = { + { "seconds", 0, UNITS_FORMAT_SECONDS }, + { "seconds ago", 0, UNITS_FORMAT_SECONDS_AGO }, + { "minutes", 0, UNITS_FORMAT_MINUTES }, + { "minutes ago", 0, UNITS_FORMAT_MINUTES_AGO }, + { "hours", 0, UNITS_FORMAT_HOURS }, + { "hours ago", 0, UNITS_FORMAT_HOURS_AGO }, + { "on/off", 0, UNITS_FORMAT_ONOFF }, + { "on-off", 0, UNITS_FORMAT_ONOFF }, + { "onoff", 0, UNITS_FORMAT_ONOFF }, + { "up/down", 0, UNITS_FORMAT_UPDOWN }, + { "up-down", 0, UNITS_FORMAT_UPDOWN }, + { "updown", 0, UNITS_FORMAT_UPDOWN }, + { "ok/error", 0, UNITS_FORMAT_OKERROR }, + { "ok-error", 0, UNITS_FORMAT_OKERROR }, + { "okerror", 0, UNITS_FORMAT_OKERROR }, + { "ok/failed", 0, UNITS_FORMAT_OKFAILED }, + { "ok-failed", 0, UNITS_FORMAT_OKFAILED }, + { "okfailed", 0, UNITS_FORMAT_OKFAILED }, + { "empty", 0, UNITS_FORMAT_EMPTY }, + { "null", 0, UNITS_FORMAT_EMPTY }, + { "percentage", 0, UNITS_FORMAT_PERCENT }, + { "percent", 0, UNITS_FORMAT_PERCENT }, + { "pcent", 0, UNITS_FORMAT_PERCENT }, + + // terminator + { NULL, 0, UNITS_FORMAT_NONE } +}; + +char *format_value_and_unit(char *value_string, size_t value_string_len, + NETDATA_DOUBLE value, const char *units, int precision) { + static int max = -1; + int i; + + if(unlikely(max == -1)) { + for(i = 0; badge_units_formatters[i].units; i++) + badge_units_formatters[i].hash = simple_hash(badge_units_formatters[i].units); + + max = i; + } + + if(unlikely(!units)) units = ""; + uint32_t hash_units = simple_hash(units); + + UNITS_FORMAT format = UNITS_FORMAT_NONE; + for(i = 0; i < max; i++) { + struct units_formatter *ptr = &badge_units_formatters[i]; + + if(hash_units == ptr->hash && !strcmp(units, ptr->units)) { + format = ptr->format; + break; + } + } + + if(unlikely(format == UNITS_FORMAT_SECONDS || format == UNITS_FORMAT_SECONDS_AGO)) { + if(value == 0.0) { + snprintfz(value_string, value_string_len, "%s", "now"); + return value_string; + } + else if(isnan(value) || isinf(value)) { + snprintfz(value_string, value_string_len, "%s", "undefined"); + return value_string; + } + + const char *suffix = (format == UNITS_FORMAT_SECONDS_AGO)?" ago":""; + + size_t s = (size_t)value; + size_t d = s / 86400; + s = s % 86400; + + size_t h = s / 3600; + s = s % 3600; + + size_t m = s / 60; + s = s % 60; + + if(d) + snprintfz(value_string, value_string_len, "%zu %s %02zu:%02zu:%02zu%s", d, (d == 1)?"day":"days", h, m, s, suffix); + else + snprintfz(value_string, value_string_len, "%02zu:%02zu:%02zu%s", h, m, s, suffix); + + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_MINUTES || format == UNITS_FORMAT_MINUTES_AGO)) { + if(value == 0.0) { + snprintfz(value_string, value_string_len, "%s", "now"); + return value_string; + } + else if(isnan(value) || isinf(value)) { + snprintfz(value_string, value_string_len, "%s", "undefined"); + return value_string; + } + + const char *suffix = (format == UNITS_FORMAT_MINUTES_AGO)?" ago":""; + + size_t m = (size_t)value; + size_t d = m / (60 * 24); + m = m % (60 * 24); + + size_t h = m / 60; + m = m % 60; + + if(d) + snprintfz(value_string, value_string_len, "%zud %02zuh %02zum%s", d, h, m, suffix); + else + snprintfz(value_string, value_string_len, "%zuh %zum%s", h, m, suffix); + + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_HOURS || format == UNITS_FORMAT_HOURS_AGO)) { + if(value == 0.0) { + snprintfz(value_string, value_string_len, "%s", "now"); + return value_string; + } + else if(isnan(value) || isinf(value)) { + snprintfz(value_string, value_string_len, "%s", "undefined"); + return value_string; + } + + const char *suffix = (format == UNITS_FORMAT_HOURS_AGO)?" ago":""; + + size_t h = (size_t)value; + size_t d = h / 24; + h = h % 24; + + if(d) + snprintfz(value_string, value_string_len, "%zud %zuh%s", d, h, suffix); + else + snprintfz(value_string, value_string_len, "%zuh%s", h, suffix); + + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_ONOFF)) { + snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"on":"off"); + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_UPDOWN)) { + snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"up":"down"); + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_OKERROR)) { + snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"error"); + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_OKFAILED)) { + snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"failed"); + return value_string; + } + + else if(unlikely(format == UNITS_FORMAT_EMPTY)) + units = ""; + + else if(unlikely(format == UNITS_FORMAT_PERCENT)) + units = "%"; + + if(unlikely(isnan(value) || isinf(value))) { + strcpy(value_string, "-"); + return value_string; + } + + return format_value_with_precision_and_unit(value_string, value_string_len, value, units, precision); +} + +static struct badge_color { + const char *name; + uint32_t hash; + const char *color; +} badge_colors[] = { + + // colors from: + // https://github.com/badges/shields/blob/master/colorscheme.json + + { "brightgreen", 0, "4c1" }, + { "green", 0, "97CA00" }, + { "yellow", 0, "dfb317" }, + { "yellowgreen", 0, "a4a61d" }, + { "orange", 0, "fe7d37" }, + { "red", 0, "e05d44" }, + { "blue", 0, "007ec6" }, + { "grey", 0, "555" }, + { "gray", 0, "555" }, + { "lightgrey", 0, "9f9f9f" }, + { "lightgray", 0, "9f9f9f" }, + + // terminator + { NULL, 0, NULL } +}; + +static inline const char *color_map(const char *color, const char *def) { + static int max = -1; + int i; + + if(unlikely(max == -1)) { + for(i = 0; badge_colors[i].name ;i++) + badge_colors[i].hash = simple_hash(badge_colors[i].name); + + max = i; + } + + uint32_t hash = simple_hash(color); + + for(i = 0; i < max; i++) { + struct badge_color *ptr = &badge_colors[i]; + + if(hash == ptr->hash && !strcmp(color, ptr->name)) + return ptr->color; + } + + return def; +} + +typedef enum color_comparison { + COLOR_COMPARE_EQUAL, + COLOR_COMPARE_NOTEQUAL, + COLOR_COMPARE_LESS, + COLOR_COMPARE_LESSEQUAL, + COLOR_COMPARE_GREATER, + COLOR_COMPARE_GREATEREQUAL, +} BADGE_COLOR_COMPARISON; + +static inline void calc_colorz(const char *color, char *final, size_t len, NETDATA_DOUBLE value) { + if(isnan(value) || isinf(value)) + value = NAN; + + char color_buffer[256 + 1] = ""; + char value_buffer[256 + 1] = ""; + BADGE_COLOR_COMPARISON comparison = COLOR_COMPARE_GREATER; + + // example input: + // colormin|color:null... + + const char *c = color; + while(*c) { + char *dc = color_buffer, *dv = NULL; + size_t ci = 0, vi = 0; + + const char *t = c; + + while(*t && *t != '|') { + switch(*t) { + case '!': + if(t[1] == '=') t++; + comparison = COLOR_COMPARE_NOTEQUAL; + dv = value_buffer; + break; + + case '=': + case ':': + comparison = COLOR_COMPARE_EQUAL; + dv = value_buffer; + break; + + case '}': + case ')': + case '>': + if(t[1] == '=') { + comparison = COLOR_COMPARE_GREATEREQUAL; + t++; + } + else + comparison = COLOR_COMPARE_GREATER; + dv = value_buffer; + break; + + case '{': + case '(': + case '<': + if(t[1] == '=') { + comparison = COLOR_COMPARE_LESSEQUAL; + t++; + } + else if(t[1] == '>' || t[1] == ')' || t[1] == '}') { + comparison = COLOR_COMPARE_NOTEQUAL; + t++; + } + else + comparison = COLOR_COMPARE_LESS; + dv = value_buffer; + break; + + default: + if(dv) { + if(vi < 256) { + vi++; + *dv++ = *t; + } + } + else { + if(ci < 256) { + ci++; + *dc++ = *t; + } + } + break; + } + + t++; + } + + // prepare for next iteration + if(*t == '|') t++; + c = t; + + // do the math + *dc = '\0'; + if(dv) { + *dv = '\0'; + NETDATA_DOUBLE v; + + if(!*value_buffer || !strcmp(value_buffer, "null")) { + v = NAN; + } + else { + v = str2l(value_buffer); + if(isnan(v) || isinf(v)) + v = NAN; + } + + if(unlikely(isnan(value) || isnan(v))) { + if(isnan(value) && isnan(v)) + break; + } + else { + if (unlikely(comparison == COLOR_COMPARE_LESS && isless(value, v))) break; + else if (unlikely(comparison == COLOR_COMPARE_LESSEQUAL && islessequal(value, v))) break; + else if (unlikely(comparison == COLOR_COMPARE_GREATER && isgreater(value, v))) break; + else if (unlikely(comparison == COLOR_COMPARE_GREATEREQUAL && isgreaterequal(value, v))) break; + else if (unlikely(comparison == COLOR_COMPARE_EQUAL && !islessgreater(value, v))) break; + else if (unlikely(comparison == COLOR_COMPARE_NOTEQUAL && islessgreater(value, v))) break; + } + } + else + break; + } + + const char *b; + if(color_buffer[0]) + b = color_buffer; + else + b = color; + + strncpyz(final, b, len); +} + +// value + units +#define VALUE_STRING_SIZE 100 + +// label +#define LABEL_STRING_SIZE 200 + +// colors +#define COLOR_STRING_SIZE 100 + +static inline int allowed_hexa_char(char x) { + return ( (x >= '0' && x <= '9') || + (x >= 'a' && x <= 'f') || + (x >= 'A' && x <= 'F') + ); +} + +static int html_color_check(const char *str) { + int i = 0; + while(str[i]) { + if(!allowed_hexa_char(str[i])) + return 0; + if(unlikely(i >= 6)) + return 0; + i++; + } + // want to allow either RGB or RRGGBB + return ( i == 6 || i == 3 ); +} + +// Will parse color arg as #RRGGBB or #RGB or one of the colors +// from color_map hash table +// if parsing fails (argument error) it will return default color +// given as default parameter (def) +// in any case it will return either color in "RRGGBB" or "RGB" format as string +// or whatever is given as def (without checking - caller responsible to give sensible +// safely escaped default) as default if it fails +// in any case this function must always return something we can put directly in XML +// so no escaping is necessary anymore (with exception of default where caller is responsible) +// to give sensible default +#define BADGE_SVG_COLOR_ARG_MAXLEN 20 + +static const char *parse_color_argument(const char *arg, const char *def) +{ + if( !arg ) + return def; + size_t len = strnlen(arg, BADGE_SVG_COLOR_ARG_MAXLEN); + if( len < 2 || len >= BADGE_SVG_COLOR_ARG_MAXLEN ) + return def; + if( html_color_check(arg) ) + return arg; + return color_map(arg, def); +} + +static void buffer_svg(BUFFER *wb, const char *label, + NETDATA_DOUBLE value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val) { + char value_color_buffer[COLOR_STRING_SIZE + 1] + , value_string[VALUE_STRING_SIZE + 1] + , label_escaped[LABEL_STRING_SIZE + 1] + , value_escaped[VALUE_STRING_SIZE + 1]; + + const char *label_color_parsed; + const char *value_color_parsed; + + double label_width = (double)fixed_width_lbl, value_width = (double)fixed_width_val, total_width; + double height = 20.0, font_size = 11.0, text_offset = 5.8, round_corner = 3.0; + + if(scale < 100) scale = 100; + + if(unlikely(!value_color || !*value_color)) + value_color = (isnan(value) || isinf(value))?"999":"4c1"; + + calc_colorz(value_color, value_color_buffer, COLOR_STRING_SIZE, value); + format_value_and_unit(value_string, VALUE_STRING_SIZE, (options & RRDR_OPTION_DISPLAY_ABS)? fabsndd(value):value, units, precision); + + if(fixed_width_lbl <= 0 || fixed_width_val <= 0) { + label_width = verdana11_width(label, font_size) + (BADGE_HORIZONTAL_PADDING * 2); + value_width = verdana11_width(value_string, font_size) + (BADGE_HORIZONTAL_PADDING * 2); + } + total_width = label_width + value_width; + + escape_xmlz(label_escaped, label, LABEL_STRING_SIZE); + escape_xmlz(value_escaped, value_string, VALUE_STRING_SIZE); + + label_color_parsed = parse_color_argument(label_color, "555"); + value_color_parsed = parse_color_argument(value_color_buffer, "555"); + + wb->content_type = CT_IMAGE_SVG_XML; + + total_width = total_width * scale / 100.0; + height = height * scale / 100.0; + font_size = font_size * scale / 100.0; + text_offset = text_offset * scale / 100.0; + label_width = label_width * scale / 100.0; + value_width = value_width * scale / 100.0; + round_corner = round_corner * scale / 100.0; + + // svg template from: + // https://raw.githubusercontent.com/badges/shields/master/templates/flat-template.svg + buffer_sprintf(wb, + "" + "" + "" + "" + "" + "" + "" + "" + "" + "", + total_width, height, + total_width, height, round_corner, + label_width, height, label_color_parsed); // 0 && fixed_width_val > 0) { + buffer_sprintf(wb, + "" + "" + "", + label_width, height); // ", + label_width, value_width, height, value_color_parsed); + + if(fixed_width_lbl > 0 && fixed_width_val > 0) { + buffer_sprintf(wb, + "" + "" + "", + label_width, value_width, height); + } + + buffer_sprintf(wb, + "" + "" + "" + "%s" + "%s" + "%s" + "%s" + "", + total_width, height, + font_size, + label_width / 2, ceil(height - text_offset), label_escaped, + label_width / 2, ceil(height - text_offset - 1.0), parse_color_argument(text_color_lbl, "fff"), label_escaped, + label_width + value_width / 2 -1, ceil(height - text_offset), value_escaped, + label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), parse_color_argument(text_color_val, "fff"), value_escaped); + + if(fixed_width_lbl <= 0 || fixed_width_val <= 0){ + buffer_sprintf(wb, + "", + BADGE_HORIZONTAL_PADDING); + } + buffer_sprintf(wb, ""); +} + +#define BADGE_URL_ARG_LBL_COLOR "text_color_lbl" +#define BADGE_URL_ARG_VAL_COLOR "text_color_val" + +int api_v1_badge(RRDHOST *host, struct web_client *w, char *url) { + int ret = HTTP_RESP_BAD_REQUEST; + buffer_flush(w->response.data); + + BUFFER *dimensions = NULL; + + const char *chart = NULL + , *before_str = NULL + , *after_str = NULL + , *points_str = NULL + , *multiply_str = NULL + , *divide_str = NULL + , *label = NULL + , *units = NULL + , *label_color = NULL + , *value_color = NULL + , *refresh_str = NULL + , *precision_str = NULL + , *scale_str = NULL + , *alarm = NULL + , *fixed_width_lbl_str = NULL + , *fixed_width_val_str = NULL + , *text_color_lbl_str = NULL + , *text_color_val_str = NULL + , *group_options = NULL; + + int group = RRDR_GROUPING_AVERAGE; + uint32_t options = 0x00000000; + + const RRDCALC_ACQUIRED *rca = NULL; + RRDCALC *rc = NULL; + RRDSET *st = NULL; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 badge.svg query param '%s' with value '%s'", w->id, name, value); + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "chart")) chart = value; + else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { + if(!dimensions) + dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); + + buffer_strcat(dimensions, "|"); + buffer_strcat(dimensions, value); + } + else if(!strcmp(name, "after")) after_str = value; + else if(!strcmp(name, "before")) before_str = value; + else if(!strcmp(name, "points")) points_str = value; + else if(!strcmp(name, "group_options")) group_options = value; + else if(!strcmp(name, "group")) { + group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); + } + else if(!strcmp(name, "options")) { + options |= rrdr_options_parse(value); + } + else if(!strcmp(name, "label")) label = value; + else if(!strcmp(name, "units")) units = value; + else if(!strcmp(name, "label_color")) label_color = value; + else if(!strcmp(name, "value_color")) value_color = value; + else if(!strcmp(name, "multiply")) multiply_str = value; + else if(!strcmp(name, "divide")) divide_str = value; + else if(!strcmp(name, "refresh")) refresh_str = value; + else if(!strcmp(name, "precision")) precision_str = value; + else if(!strcmp(name, "scale")) scale_str = value; + else if(!strcmp(name, "fixed_width_lbl")) fixed_width_lbl_str = value; + else if(!strcmp(name, "fixed_width_val")) fixed_width_val_str = value; + else if(!strcmp(name, "alarm")) alarm = value; + else if(!strcmp(name, BADGE_URL_ARG_LBL_COLOR)) text_color_lbl_str = value; + else if(!strcmp(name, BADGE_URL_ARG_VAL_COLOR)) text_color_val_str = value; + } + + int fixed_width_lbl = -1; + int fixed_width_val = -1; + + if(fixed_width_lbl_str && *fixed_width_lbl_str + && fixed_width_val_str && *fixed_width_val_str) { + fixed_width_lbl = str2i(fixed_width_lbl_str); + fixed_width_val = str2i(fixed_width_val_str); + } + + if(!chart || !*chart) { + buffer_no_cacheable(w->response.data); + buffer_sprintf(w->response.data, "No chart id is given at the request."); + goto cleanup; + } + + int scale = (scale_str && *scale_str)?str2i(scale_str):100; + + st = rrdset_find(host, chart); + if(!st) st = rrdset_find_byname(host, chart); + if(!st) { + buffer_no_cacheable(w->response.data); + buffer_svg(w->response.data, "chart not found", NAN, "", NULL, NULL, -1, scale, 0, -1, -1, NULL, NULL); + ret = HTTP_RESP_OK; + goto cleanup; + } + st->last_accessed_time_s = now_realtime_sec(); + + if(alarm) { + rca = rrdcalc_from_rrdset_get(st, alarm); + rc = rrdcalc_acquired_to_rrdcalc(rca); + + if (!rc) { + buffer_no_cacheable(w->response.data); + buffer_svg(w->response.data, "alarm not found", NAN, "", NULL, NULL, -1, scale, 0, -1, -1, NULL, NULL); + ret = HTTP_RESP_OK; + goto cleanup; + } + } + + long long multiply = (multiply_str && *multiply_str )?str2l(multiply_str):1; + long long divide = (divide_str && *divide_str )?str2l(divide_str):1; + long long before = (before_str && *before_str )?str2l(before_str):0; + long long after = (after_str && *after_str )?str2l(after_str):-st->update_every; + int points = (points_str && *points_str )?str2i(points_str):1; + int precision = (precision_str && *precision_str)?str2i(precision_str):-1; + + if(!multiply) multiply = 1; + if(!divide) divide = 1; + + int refresh = 0; + if(refresh_str && *refresh_str) { + if(!strcmp(refresh_str, "auto")) { + if(rc) refresh = rc->config.update_every; + else if(options & RRDR_OPTION_NOT_ALIGNED) + refresh = st->update_every; + else { + refresh = (int)(before - after); + if(refresh < 0) refresh = -refresh; + } + } + else { + refresh = str2i(refresh_str); + if(refresh < 0) refresh = -refresh; + } + } + + if(!label) { + if(alarm) { + char *s = (char *)alarm; + while(*s) { + if(*s == '_') *s = ' '; + s++; + } + label = alarm; + } + else if(dimensions) { + const char *dim = buffer_tostring(dimensions); + if(*dim == '|') dim++; + label = dim; + } + else + label = rrdset_name(st); + } + if(!units) { + if(alarm) { + if(rc->config.units) + units = rrdcalc_units(rc); + else + units = ""; + } + else if(options & RRDR_OPTION_PERCENTAGE) + units = "%"; + else + units = rrdset_units(st); + } + + netdata_log_debug(D_WEB_CLIENT, "%llu: API command 'badge.svg' for chart '%s', alarm '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', options '0x%08x'" + , w->id + , chart + , alarm?alarm:"" + , (dimensions)?buffer_tostring(dimensions):"" + , after + , before + , points + , group + , options + ); + + if(rc) { + if (refresh > 0) { + buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh); + w->response.data->date = now_realtime_sec(); + w->response.data->expires = w->response.data->date + refresh; + buffer_cacheable(w->response.data); + } + else + buffer_no_cacheable(w->response.data); + + if(!value_color) { + switch(rc->status) { + case RRDCALC_STATUS_CRITICAL: + value_color = "red"; + break; + + case RRDCALC_STATUS_WARNING: + value_color = "orange"; + break; + + case RRDCALC_STATUS_CLEAR: + value_color = "brightgreen"; + break; + + case RRDCALC_STATUS_UNDEFINED: + value_color = "lightgrey"; + break; + + case RRDCALC_STATUS_UNINITIALIZED: + value_color = "#000"; + break; + + default: + value_color = "grey"; + break; + } + } + + buffer_svg(w->response.data, + label, + (isnan(rc->value)||isinf(rc->value)) ? rc->value : rc->value * multiply / divide, + units, + label_color, + value_color, + precision, + scale, + options, + fixed_width_lbl, + fixed_width_val, + text_color_lbl_str, + text_color_val_str + ); + ret = HTTP_RESP_OK; + } + else { + time_t latest_timestamp = 0; + int value_is_null = 1; + NETDATA_DOUBLE n = NAN; + ret = HTTP_RESP_INTERNAL_SERVER_ERROR; + + // if the collected value is too old, don't calculate its value + if (rrdset_last_entry_s(st) >= (now_realtime_sec() - (st->update_every * gap_when_lost_iterations_above))) + ret = rrdset2value_api_v1(st, w->response.data, &n, + (dimensions) ? buffer_tostring(dimensions) : NULL, + points, after, before, group, group_options, 0, options, + NULL, &latest_timestamp, + NULL, NULL, NULL, + &value_is_null, NULL, 0, 0, + QUERY_SOURCE_API_BADGE, STORAGE_PRIORITY_NORMAL); + + // if the value cannot be calculated, show empty badge + if (ret != HTTP_RESP_OK) { + buffer_no_cacheable(w->response.data); + value_is_null = 1; + n = 0; + ret = HTTP_RESP_OK; + } + else if (refresh > 0) { + buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh); + w->response.data->expires = now_realtime_sec() + refresh; + } + else buffer_no_cacheable(w->response.data); + + // render the badge + buffer_svg(w->response.data, + label, + (value_is_null)?NAN:(n * multiply / divide), + units, + label_color, + value_color, + precision, + scale, + options, + fixed_width_lbl, + fixed_width_val, + text_color_lbl_str, + text_color_val_str + ); + } + +cleanup: + rrdcalc_from_rrdset_release(st, rca); + buffer_free(dimensions); + return ret; +} diff --git a/src/web/api/v1/api_v1_calls.h b/src/web/api/v1/api_v1_calls.h new file mode 100644 index 000000000..36a0605cb --- /dev/null +++ b/src/web/api/v1/api_v1_calls.h @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_V1_CALLS_H +#define NETDATA_API_V1_CALLS_H + +#include "../web_api_v1.h" + +int api_v1_info(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_config(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_registry(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_manage(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_data(RRDHOST *host, struct web_client *w, char *url); +int api_v1_chart(RRDHOST *host, struct web_client *w, char *url); +int api_v1_charts(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_context(RRDHOST *host, struct web_client *w, char *url); +int api_v1_contexts(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_alarms(RRDHOST *host, struct web_client *w, char *url); +int api_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url); +int api_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url); +int api_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url); +int api_v1_variable(RRDHOST *host, struct web_client *w, char *url); +int api_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_dbengine_stats(RRDHOST *host, struct web_client *w, char *url); +int api_v1_ml_info(RRDHOST *host, struct web_client *w, char *url); +int api_v1_aclk(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_functions(RRDHOST *host, struct web_client *w, char *url); +int api_v1_function(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_metric_correlations(RRDHOST *host, struct web_client *w, char *url); +int api_v1_weights(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_badge(RRDHOST *host, struct web_client *w, char *url); +int api_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url); + +// common library calls +int api_v1_single_chart_helper(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)); +void api_v1_management_init(void); + +#endif //NETDATA_API_V1_CALLS_H diff --git a/src/web/api/v1/api_v1_charts.c b/src/web/api/v1/api_v1_charts.c new file mode 100644 index 000000000..afc67af68 --- /dev/null +++ b/src/web/api/v1/api_v1_charts.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_single_chart_helper(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)) { + int ret = HTTP_RESP_BAD_REQUEST; + char *chart = NULL; + + buffer_flush(w->response.data); + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "chart")) chart = value; + //else { + /// buffer_sprintf(w->response.data, "Unknown parameter '%s' in request.", name); + // goto cleanup; + //} + } + + if(!chart || !*chart) { + buffer_sprintf(w->response.data, "No chart id is given at the request."); + goto cleanup; + } + + RRDSET *st = rrdset_find(host, chart); + if(!st) st = rrdset_find_byname(host, chart); + if(!st) { + buffer_strcat(w->response.data, "Chart is not found: "); + buffer_strcat_htmlescape(w->response.data, chart); + ret = HTTP_RESP_NOT_FOUND; + goto cleanup; + } + + w->response.data->content_type = CT_APPLICATION_JSON; + st->last_accessed_time_s = now_realtime_sec(); + callback(st, w->response.data); + return HTTP_RESP_OK; + +cleanup: + return ret; +} + +int api_v1_charts(RRDHOST *host, struct web_client *w, char *url) { + (void)url; + + buffer_flush(w->response.data); + w->response.data->content_type = CT_APPLICATION_JSON; + charts2json(host, w->response.data); + return HTTP_RESP_OK; +} + +int api_v1_chart(RRDHOST *host, struct web_client *w, char *url) { + return api_v1_single_chart_helper(host, w, url, rrd_stats_api_v1_chart); +} + diff --git a/src/web/api/v1/api_v1_config.c b/src/web/api/v1/api_v1_config.c new file mode 100644 index 000000000..69bcde760 --- /dev/null +++ b/src/web/api/v1/api_v1_config.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "web/api/v2/api_v2_calls.h" + +int api_v1_config(RRDHOST *host, struct web_client *w, char *url __maybe_unused) { + char *action = "tree"; + char *path = "/"; + char *id = NULL; + char *add_name = NULL; + int timeout = 120; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "action")) + action = value; + else if(!strcmp(name, "path")) + path = value; + else if(!strcmp(name, "id")) + id = value; + else if(!strcmp(name, "name")) + add_name = value; + else if(!strcmp(name, "timeout")) { + timeout = (int)strtol(value, NULL, 10); + if(timeout < 10) + timeout = 10; + } + } + + char transaction[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(w->transaction, transaction); + + size_t len = strlen(action) + (id ? strlen(id) : 0) + strlen(path) + (add_name ? strlen(add_name) : 0) + 100; + + char cmd[len]; + if(strcmp(action, "tree") == 0) + snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " tree '%s' '%s'", path, id?id:""); + else { + DYNCFG_CMDS c = dyncfg_cmds2id(action); + if(!id || !*id || !dyncfg_is_valid_id(id)) { + rrd_call_function_error(w->response.data, "Invalid id", HTTP_RESP_BAD_REQUEST); + return HTTP_RESP_BAD_REQUEST; + } + + if(c == DYNCFG_CMD_NONE) { + rrd_call_function_error(w->response.data, "Invalid action", HTTP_RESP_BAD_REQUEST); + return HTTP_RESP_BAD_REQUEST; + } + + if(c == DYNCFG_CMD_ADD || c == DYNCFG_CMD_USERCONFIG || c == DYNCFG_CMD_TEST) { + if(c == DYNCFG_CMD_TEST && (!add_name || !*add_name)) { + // backwards compatibility for TEST without a name + char *colon = strrchr(id, ':'); + if(colon) { + *colon = '\0'; + add_name = ++colon; + } + else + add_name = "test"; + } + + if(!add_name || !*add_name || !dyncfg_is_valid_id(add_name)) { + rrd_call_function_error(w->response.data, "Invalid name", HTTP_RESP_BAD_REQUEST); + return HTTP_RESP_BAD_REQUEST; + } + snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " %s %s %s", id, dyncfg_id2cmd_one(c), add_name); + } + else + snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " %s %s", id, dyncfg_id2cmd_one(c)); + } + + CLEAN_BUFFER *source = buffer_create(100, NULL); + web_client_api_request_vX_source_to_buffer(w, source); + + buffer_flush(w->response.data); + int code = rrd_function_run(host, w->response.data, timeout, w->access, cmd, + true, transaction, + NULL, NULL, + web_client_progress_functions_update, w, + web_client_interrupt_callback, w, + w->payload, buffer_tostring(source), false); + + return code; +} diff --git a/src/web/api/v1/api_v1_context.c b/src/web/api/v1/api_v1_context.c new file mode 100644 index 000000000..5b7baf80c --- /dev/null +++ b/src/web/api/v1/api_v1_context.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_context(RRDHOST *host, struct web_client *w, char *url) { + char *context = NULL; + RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE; + time_t after = 0, before = 0; + const char *chart_label_key = NULL, *chart_labels_filter = NULL; + BUFFER *dimensions = NULL; + + buffer_flush(w->response.data); + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "context") || !strcmp(name, "ctx")) context = value; + else if(!strcmp(name, "after")) after = str2l(value); + else if(!strcmp(name, "before")) before = str2l(value); + else if(!strcmp(name, "options")) options = rrdcontext_to_json_parse_options(value); + else if(!strcmp(name, "chart_label_key")) chart_label_key = value; + else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value; + else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { + if(!dimensions) dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); + buffer_strcat(dimensions, "|"); + buffer_strcat(dimensions, value); + } + } + + if(!context || !*context) { + buffer_sprintf(w->response.data, "No context is given at the request."); + return HTTP_RESP_BAD_REQUEST; + } + + SIMPLE_PATTERN *chart_label_key_pattern = NULL; + SIMPLE_PATTERN *chart_labels_filter_pattern = NULL; + SIMPLE_PATTERN *chart_dimensions_pattern = NULL; + + if(chart_label_key) + chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, true); + + if(chart_labels_filter) + chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, + true); + + if(dimensions) { + chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", + SIMPLE_PATTERN_EXACT, true); + buffer_free(dimensions); + } + + w->response.data->content_type = CT_APPLICATION_JSON; + int ret = rrdcontext_to_json(host, w->response.data, after, before, options, context, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern); + + simple_pattern_free(chart_label_key_pattern); + simple_pattern_free(chart_labels_filter_pattern); + simple_pattern_free(chart_dimensions_pattern); + + return ret; +} diff --git a/src/web/api/v1/api_v1_contexts.c b/src/web/api/v1/api_v1_contexts.c new file mode 100644 index 000000000..90d376d47 --- /dev/null +++ b/src/web/api/v1/api_v1_contexts.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_contexts(RRDHOST *host, struct web_client *w, char *url) { + RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE; + time_t after = 0, before = 0; + const char *chart_label_key = NULL, *chart_labels_filter = NULL; + BUFFER *dimensions = NULL; + + buffer_flush(w->response.data); + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "after")) after = str2l(value); + else if(!strcmp(name, "before")) before = str2l(value); + else if(!strcmp(name, "options")) options = rrdcontext_to_json_parse_options(value); + else if(!strcmp(name, "chart_label_key")) chart_label_key = value; + else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value; + else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { + if(!dimensions) dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); + buffer_strcat(dimensions, "|"); + buffer_strcat(dimensions, value); + } + } + + SIMPLE_PATTERN *chart_label_key_pattern = NULL; + SIMPLE_PATTERN *chart_labels_filter_pattern = NULL; + SIMPLE_PATTERN *chart_dimensions_pattern = NULL; + + if(chart_label_key) + chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, true); + + if(chart_labels_filter) + chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, + true); + + if(dimensions) { + chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", + SIMPLE_PATTERN_EXACT, true); + buffer_free(dimensions); + } + + w->response.data->content_type = CT_APPLICATION_JSON; + int ret = rrdcontexts_to_json(host, w->response.data, after, before, options, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern); + + simple_pattern_free(chart_label_key_pattern); + simple_pattern_free(chart_labels_filter_pattern); + simple_pattern_free(chart_dimensions_pattern); + + return ret; +} diff --git a/src/web/api/v1/api_v1_data.c b/src/web/api/v1/api_v1_data.c new file mode 100644 index 000000000..30328ed3e --- /dev/null +++ b/src/web/api/v1/api_v1_data.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_data(RRDHOST *host, struct web_client *w, char *url) { + netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 data with URL '%s'", w->id, url); + + int ret = HTTP_RESP_BAD_REQUEST; + BUFFER *dimensions = NULL; + + buffer_flush(w->response.data); + + char *google_version = "0.6", + *google_reqId = "0", + *google_sig = "0", + *google_out = "json", + *responseHandler = NULL, + *outFileName = NULL; + + time_t last_timestamp_in_data = 0, google_timestamp = 0; + + char *chart = NULL; + char *before_str = NULL; + char *after_str = NULL; + char *group_time_str = NULL; + char *points_str = NULL; + char *timeout_str = NULL; + char *context = NULL; + char *chart_label_key = NULL; + char *chart_labels_filter = NULL; + char *group_options = NULL; + size_t tier = 0; + RRDR_TIME_GROUPING group = RRDR_GROUPING_AVERAGE; + DATASOURCE_FORMAT format = DATASOURCE_JSON; + RRDR_OPTIONS options = 0; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 data query param '%s' with value '%s'", w->id, name, value); + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "context")) context = value; + else if(!strcmp(name, "chart_label_key")) chart_label_key = value; + else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value; + else if(!strcmp(name, "chart")) chart = value; + else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { + if(!dimensions) dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); + buffer_strcat(dimensions, "|"); + buffer_strcat(dimensions, value); + } + else if(!strcmp(name, "show_dimensions")) options |= RRDR_OPTION_ALL_DIMENSIONS; + else if(!strcmp(name, "after")) after_str = value; + else if(!strcmp(name, "before")) before_str = value; + else if(!strcmp(name, "points")) points_str = value; + else if(!strcmp(name, "timeout")) timeout_str = value; + else if(!strcmp(name, "gtime")) group_time_str = value; + else if(!strcmp(name, "group_options")) group_options = value; + else if(!strcmp(name, "group")) { + group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); + } + else if(!strcmp(name, "format")) { + format = datasource_format_str_to_id(value); + } + else if(!strcmp(name, "options")) { + options |= rrdr_options_parse(value); + } + else if(!strcmp(name, "callback")) { + responseHandler = value; + } + else if(!strcmp(name, "filename")) { + outFileName = value; + } + else if(!strcmp(name, "tqx")) { + // parse Google Visualization API options + // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source + char *tqx_name, *tqx_value; + + while(value) { + tqx_value = strsep_skip_consecutive_separators(&value, ";"); + if(!tqx_value || !*tqx_value) continue; + + tqx_name = strsep_skip_consecutive_separators(&tqx_value, ":"); + if(!tqx_name || !*tqx_name) continue; + if(!tqx_value || !*tqx_value) continue; + + if(!strcmp(tqx_name, "version")) + google_version = tqx_value; + else if(!strcmp(tqx_name, "reqId")) + google_reqId = tqx_value; + else if(!strcmp(tqx_name, "sig")) { + google_sig = tqx_value; + google_timestamp = strtoul(google_sig, NULL, 0); + } + else if(!strcmp(tqx_name, "out")) { + google_out = tqx_value; + format = google_data_format_str_to_id(google_out); + } + else if(!strcmp(tqx_name, "responseHandler")) + responseHandler = tqx_value; + else if(!strcmp(tqx_name, "outFileName")) + outFileName = tqx_value; + } + } + else if(!strcmp(name, "tier")) { + tier = str2ul(value); + if(tier < storage_tiers) + options |= RRDR_OPTION_SELECTED_TIER; + else + tier = 0; + } + } + + // validate the google parameters given + fix_google_param(google_out); + fix_google_param(google_sig); + fix_google_param(google_reqId); + fix_google_param(google_version); + fix_google_param(responseHandler); + fix_google_param(outFileName); + + RRDSET *st = NULL; + ONEWAYALLOC *owa = onewayalloc_create(0); + QUERY_TARGET *qt = NULL; + + if(!is_valid_sp(chart) && !is_valid_sp(context)) { + buffer_sprintf(w->response.data, "No chart or context is given."); + goto cleanup; + } + + if(chart && !context) { + // check if this is a specific chart + st = rrdset_find(host, chart); + if (!st) st = rrdset_find_byname(host, chart); + } + + long long before = (before_str && *before_str)?str2l(before_str):0; + long long after = (after_str && *after_str) ?str2l(after_str):-600; + int points = (points_str && *points_str)?str2i(points_str):0; + int timeout = (timeout_str && *timeout_str)?str2i(timeout_str): 0; + long group_time = (group_time_str && *group_time_str)?str2l(group_time_str):0; + + QUERY_TARGET_REQUEST qtr = { + .version = 1, + .after = after, + .before = before, + .host = host, + .st = st, + .nodes = NULL, + .contexts = context, + .instances = chart, + .dimensions = (dimensions)?buffer_tostring(dimensions):NULL, + .timeout_ms = timeout, + .points = points, + .format = format, + .options = options, + .time_group_method = group, + .time_group_options = group_options, + .resampling_time = group_time, + .tier = tier, + .chart_label_key = chart_label_key, + .labels = chart_labels_filter, + .query_source = QUERY_SOURCE_API_DATA, + .priority = STORAGE_PRIORITY_NORMAL, + .interrupt_callback = web_client_interrupt_callback, + .interrupt_callback_data = w, + .transaction = &w->transaction, + }; + qt = query_target_create(&qtr); + + if(!qt || !qt->query.used) { + buffer_sprintf(w->response.data, "No metrics where matched to query."); + ret = HTTP_RESP_NOT_FOUND; + goto cleanup; + } + + web_client_timeout_checkpoint_set(w, timeout); + if(web_client_timeout_checkpoint_and_check(w, NULL)) { + ret = w->response.code; + goto cleanup; + } + + if(outFileName && *outFileName) { + buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName); + netdata_log_debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName); + } + + if(format == DATASOURCE_DATATABLE_JSONP) { + if(responseHandler == NULL) + responseHandler = "google.visualization.Query.setResponse"; + + netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'", + w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName + ); + + buffer_sprintf( + w->response.data, + "%s({version:'%s',reqId:'%s',status:'ok',sig:'%"PRId64"',table:", + responseHandler, + google_version, + google_reqId, + (int64_t)(st ? st->last_updated.tv_sec : 0)); + } + else if(format == DATASOURCE_JSONP) { + if(responseHandler == NULL) + responseHandler = "callback"; + + buffer_strcat(w->response.data, responseHandler); + buffer_strcat(w->response.data, "("); + } + + ret = data_query_execute(owa, w->response.data, qt, &last_timestamp_in_data); + + if(format == DATASOURCE_DATATABLE_JSONP) { + if(google_timestamp < last_timestamp_in_data) + buffer_strcat(w->response.data, "});"); + + else { + // the client already has the latest data + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, + "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});", + responseHandler, google_version, google_reqId); + } + } + else if(format == DATASOURCE_JSONP) + buffer_strcat(w->response.data, ");"); + + if(qt->internal.relative) + buffer_no_cacheable(w->response.data); + else + buffer_cacheable(w->response.data); + +cleanup: + query_target_release(qt); + onewayalloc_destroy(owa); + buffer_free(dimensions); + return ret; +} diff --git a/src/web/api/v1/api_v1_dbengine.c b/src/web/api/v1/api_v1_dbengine.c new file mode 100644 index 000000000..89855f88a --- /dev/null +++ b/src/web/api/v1/api_v1_dbengine.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +#ifndef ENABLE_DBENGINE +int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url __maybe_unused) { + return HTTP_RESP_NOT_FOUND; +} +#else +static void web_client_api_v1_dbengine_stats_for_tier(BUFFER *wb, size_t tier) { + RRDENG_SIZE_STATS stats = rrdeng_size_statistics(multidb_ctx[tier]); + + buffer_sprintf(wb, + "\n\t\t\"default_granularity_secs\":%zu" + ",\n\t\t\"sizeof_datafile\":%zu" + ",\n\t\t\"sizeof_page_in_cache\":%zu" + ",\n\t\t\"sizeof_point_data\":%zu" + ",\n\t\t\"sizeof_page_data\":%zu" + ",\n\t\t\"pages_per_extent\":%zu" + ",\n\t\t\"datafiles\":%zu" + ",\n\t\t\"extents\":%zu" + ",\n\t\t\"extents_pages\":%zu" + ",\n\t\t\"points\":%zu" + ",\n\t\t\"metrics\":%zu" + ",\n\t\t\"metrics_pages\":%zu" + ",\n\t\t\"extents_compressed_bytes\":%zu" + ",\n\t\t\"pages_uncompressed_bytes\":%zu" + ",\n\t\t\"pages_duration_secs\":%lld" + ",\n\t\t\"single_point_pages\":%zu" + ",\n\t\t\"first_t\":%ld" + ",\n\t\t\"last_t\":%ld" + ",\n\t\t\"database_retention_secs\":%lld" + ",\n\t\t\"average_compression_savings\":%0.2f" + ",\n\t\t\"average_point_duration_secs\":%0.2f" + ",\n\t\t\"average_metric_retention_secs\":%0.2f" + ",\n\t\t\"ephemeral_metrics_per_day_percent\":%0.2f" + ",\n\t\t\"average_page_size_bytes\":%0.2f" + ",\n\t\t\"estimated_concurrently_collected_metrics\":%zu" + ",\n\t\t\"currently_collected_metrics\":%zu" + ",\n\t\t\"disk_space\":%zu" + ",\n\t\t\"max_disk_space\":%zu" + , stats.default_granularity_secs + , stats.sizeof_datafile + , stats.sizeof_page_in_cache + , stats.sizeof_point_data + , stats.sizeof_page_data + , stats.pages_per_extent + , stats.datafiles + , stats.extents + , stats.extents_pages + , stats.points + , stats.metrics + , stats.metrics_pages + , stats.extents_compressed_bytes + , stats.pages_uncompressed_bytes + , (long long)stats.pages_duration_secs + , stats.single_point_pages + , stats.first_time_s + , stats.last_time_s + , (long long)stats.database_retention_secs + , stats.average_compression_savings + , stats.average_point_duration_secs + , stats.average_metric_retention_secs + , stats.ephemeral_metrics_per_day_percent + , stats.average_page_size_bytes + , stats.estimated_concurrently_collected_metrics + , stats.currently_collected_metrics + , stats.disk_space + , stats.max_disk_space + ); +} + +int api_v1_dbengine_stats(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) { + if (!netdata_ready) + return HTTP_RESP_SERVICE_UNAVAILABLE; + + BUFFER *wb = w->response.data; + buffer_flush(wb); + + if(!dbengine_enabled) { + buffer_strcat(wb, "dbengine is not enabled"); + return HTTP_RESP_NOT_FOUND; + } + + wb->content_type = CT_APPLICATION_JSON; + buffer_no_cacheable(wb); + buffer_strcat(wb, "{"); + for(size_t tier = 0; tier < storage_tiers ;tier++) { + buffer_sprintf(wb, "%s\n\t\"tier%zu\": {", tier?",":"", tier); + web_client_api_v1_dbengine_stats_for_tier(wb, tier); + buffer_strcat(wb, "\n\t}"); + } + buffer_strcat(wb, "\n}"); + + return HTTP_RESP_OK; +} +#endif diff --git a/src/web/api/v1/api_v1_function.c b/src/web/api/v1/api_v1_function.c new file mode 100644 index 000000000..761164fd0 --- /dev/null +++ b/src/web/api/v1/api_v1_function.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_function(RRDHOST *host, struct web_client *w, char *url) { + if (!netdata_ready) + return HTTP_RESP_SERVICE_UNAVAILABLE; + + int timeout = 0; + const char *function = NULL; + + while (url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) + continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) + continue; + + if (!strcmp(name, "function")) + function = value; + + else if (!strcmp(name, "timeout")) + timeout = (int) strtoul(value, NULL, 0); + } + + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + buffer_no_cacheable(wb); + + char transaction[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(w->transaction, transaction); + + CLEAN_BUFFER *source = buffer_create(100, NULL); + web_client_api_request_vX_source_to_buffer(w, source); + + return rrd_function_run(host, wb, timeout, w->access, function, true, transaction, + NULL, NULL, + web_client_progress_functions_update, w, + web_client_interrupt_callback, w, w->payload, + buffer_tostring(source), false); +} diff --git a/src/web/api/v1/api_v1_functions.c b/src/web/api/v1/api_v1_functions.c new file mode 100644 index 000000000..bc1c7df8e --- /dev/null +++ b/src/web/api/v1/api_v1_functions.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_functions(RRDHOST *host, struct web_client *w, char *url __maybe_unused) { + if (!netdata_ready) + return HTTP_RESP_SERVICE_UNAVAILABLE; + + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + buffer_no_cacheable(wb); + + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + host_functions2json(host, wb); + buffer_json_finalize(wb); + + return HTTP_RESP_OK; +} diff --git a/src/web/api/v1/api_v1_info.c b/src/web/api/v1/api_v1_info.c new file mode 100644 index 000000000..2395cea59 --- /dev/null +++ b/src/web/api/v1/api_v1_info.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +static void host_collectors(RRDHOST *host, BUFFER *wb) { + buffer_json_member_add_array(wb, "collectors"); + + DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE); + RRDSET *st; + char name[500]; + + time_t now = now_realtime_sec(); + + rrdset_foreach_read(st, host) { + if (!rrdset_is_available_for_viewers(st)) + continue; + + sprintf(name, "%s:%s", rrdset_plugin_name(st), rrdset_module_name(st)); + + bool old = 0; + bool *set = dictionary_set(dict, name, &old, sizeof(bool)); + if(!*set) { + *set = true; + st->last_accessed_time_s = now; + buffer_json_add_array_item_object(wb); + buffer_json_member_add_string(wb, "plugin", rrdset_plugin_name(st)); + buffer_json_member_add_string(wb, "module", rrdset_module_name(st)); + buffer_json_object_close(wb); + } + } + rrdset_foreach_done(st); + dictionary_destroy(dict); + + buffer_json_array_close(wb); +} + +static inline void web_client_api_request_v1_info_mirrored_hosts_status(BUFFER *wb, RRDHOST *host) { + buffer_json_add_array_item_object(wb); + + buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host)); + buffer_json_member_add_uint64(wb, "hops", host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1); + buffer_json_member_add_boolean(wb, "reachable", (host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))); + + buffer_json_member_add_string(wb, "guid", host->machine_guid); + buffer_json_member_add_uuid(wb, "node_id", host->node_id.uuid); + CLAIM_ID claim_id = rrdhost_claim_id_get(host); + buffer_json_member_add_string(wb, "claim_id", claim_id_is_set(claim_id) ? claim_id.str : NULL); + + buffer_json_object_close(wb); +} + +static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) { + RRDHOST *host; + + rrd_rdlock(); + + buffer_json_member_add_array(wb, "mirrored_hosts"); + rrdhost_foreach_read(host) + buffer_json_add_array_item_string(wb, rrdhost_hostname(host)); + buffer_json_array_close(wb); + + buffer_json_member_add_array(wb, "mirrored_hosts_status"); + rrdhost_foreach_read(host) { + if ((host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) { + web_client_api_request_v1_info_mirrored_hosts_status(wb, host); + } + } + rrdhost_foreach_read(host) { + if ((host != localhost && rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) { + web_client_api_request_v1_info_mirrored_hosts_status(wb, host); + } + } + buffer_json_array_close(wb); + + rrd_rdunlock(); +} + +static void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb, const char *key) { + buffer_json_member_add_object(wb, key); + + size_t normal = 0, warning = 0, critical = 0; + RRDCALC *rc; + foreach_rrdcalc_in_rrdhost_read(host, rc) { + if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec)) + continue; + + switch(rc->status) { + case RRDCALC_STATUS_WARNING: + warning++; + break; + case RRDCALC_STATUS_CRITICAL: + critical++; + break; + default: + normal++; + } + } + foreach_rrdcalc_in_rrdhost_done(rc); + + buffer_json_member_add_uint64(wb, "normal", normal); + buffer_json_member_add_uint64(wb, "warning", warning); + buffer_json_member_add_uint64(wb, "critical", critical); + + buffer_json_object_close(wb); +} + +static int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb) { + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + buffer_json_member_add_string(wb, "version", rrdhost_program_version(host)); + buffer_json_member_add_string(wb, "uid", host->machine_guid); + + buffer_json_member_add_uint64(wb, "hosts-available", rrdhost_hosts_available()); + web_client_api_request_v1_info_mirrored_hosts(wb); + + web_client_api_request_v1_info_summary_alarm_statuses(host, wb, "alarms"); + + buffer_json_member_add_string_or_empty(wb, "os_name", host->system_info->host_os_name); + buffer_json_member_add_string_or_empty(wb, "os_id", host->system_info->host_os_id); + buffer_json_member_add_string_or_empty(wb, "os_id_like", host->system_info->host_os_id_like); + buffer_json_member_add_string_or_empty(wb, "os_version", host->system_info->host_os_version); + buffer_json_member_add_string_or_empty(wb, "os_version_id", host->system_info->host_os_version_id); + buffer_json_member_add_string_or_empty(wb, "os_detection", host->system_info->host_os_detection); + buffer_json_member_add_string_or_empty(wb, "cores_total", host->system_info->host_cores); + buffer_json_member_add_string_or_empty(wb, "total_disk_space", host->system_info->host_disk_space); + buffer_json_member_add_string_or_empty(wb, "cpu_freq", host->system_info->host_cpu_freq); + buffer_json_member_add_string_or_empty(wb, "ram_total", host->system_info->host_ram_total); + + buffer_json_member_add_string_or_omit(wb, "container_os_name", host->system_info->container_os_name); + buffer_json_member_add_string_or_omit(wb, "container_os_id", host->system_info->container_os_id); + buffer_json_member_add_string_or_omit(wb, "container_os_id_like", host->system_info->container_os_id_like); + buffer_json_member_add_string_or_omit(wb, "container_os_version", host->system_info->container_os_version); + buffer_json_member_add_string_or_omit(wb, "container_os_version_id", host->system_info->container_os_version_id); + buffer_json_member_add_string_or_omit(wb, "container_os_detection", host->system_info->container_os_detection); + buffer_json_member_add_string_or_omit(wb, "is_k8s_node", host->system_info->is_k8s_node); + + buffer_json_member_add_string_or_empty(wb, "kernel_name", host->system_info->kernel_name); + buffer_json_member_add_string_or_empty(wb, "kernel_version", host->system_info->kernel_version); + buffer_json_member_add_string_or_empty(wb, "architecture", host->system_info->architecture); + buffer_json_member_add_string_or_empty(wb, "virtualization", host->system_info->virtualization); + buffer_json_member_add_string_or_empty(wb, "virt_detection", host->system_info->virt_detection); + buffer_json_member_add_string_or_empty(wb, "container", host->system_info->container); + buffer_json_member_add_string_or_empty(wb, "container_detection", host->system_info->container_detection); + + buffer_json_member_add_string_or_omit(wb, "cloud_provider_type", host->system_info->cloud_provider_type); + buffer_json_member_add_string_or_omit(wb, "cloud_instance_type", host->system_info->cloud_instance_type); + buffer_json_member_add_string_or_omit(wb, "cloud_instance_region", host->system_info->cloud_instance_region); + + host_labels2json(host, wb, "host_labels"); + host_functions2json(host, wb); + host_collectors(host, wb); + + buffer_json_member_add_boolean(wb, "cloud-enabled", true); + buffer_json_member_add_boolean(wb, "cloud-available", true); + buffer_json_member_add_boolean(wb, "agent-claimed", is_agent_claimed()); + buffer_json_member_add_boolean(wb, "aclk-available", aclk_online()); + + buffer_json_member_add_string(wb, "memory-mode", rrd_memory_mode_name(host->rrd_memory_mode)); +#ifdef ENABLE_DBENGINE + buffer_json_member_add_uint64(wb, "multidb-disk-quota", default_multidb_disk_quota_mb); + buffer_json_member_add_uint64(wb, "page-cache-size", default_rrdeng_page_cache_mb); +#endif // ENABLE_DBENGINE + buffer_json_member_add_boolean(wb, "web-enabled", web_server_mode != WEB_SERVER_MODE_NONE); + buffer_json_member_add_boolean(wb, "stream-enabled", stream_conf_send_enabled); + + buffer_json_member_add_boolean(wb, "stream-compression", + host->sender && host->sender->compressor.initialized); + + buffer_json_member_add_boolean(wb, "https-enabled", true); + + buffer_json_member_add_quoted_string(wb, "buildinfo", analytics_data.netdata_buildinfo); + buffer_json_member_add_quoted_string(wb, "release-channel", analytics_data.netdata_config_release_channel); + buffer_json_member_add_quoted_string(wb, "notification-methods", analytics_data.netdata_notification_methods); + + buffer_json_member_add_boolean(wb, "exporting-enabled", analytics_data.exporting_enabled); + buffer_json_member_add_quoted_string(wb, "exporting-connectors", analytics_data.netdata_exporting_connectors); + + buffer_json_member_add_uint64(wb, "allmetrics-prometheus-used", analytics_data.prometheus_hits); + buffer_json_member_add_uint64(wb, "allmetrics-shell-used", analytics_data.shell_hits); + buffer_json_member_add_uint64(wb, "allmetrics-json-used", analytics_data.json_hits); + buffer_json_member_add_uint64(wb, "dashboard-used", analytics_data.dashboard_hits); + + buffer_json_member_add_uint64(wb, "charts-count", analytics_data.charts_count); + buffer_json_member_add_uint64(wb, "metrics-count", analytics_data.metrics_count); + +#if defined(ENABLE_ML) + buffer_json_member_add_object(wb, "ml-info"); + ml_host_get_info(host, wb); + buffer_json_object_close(wb); +#endif + + buffer_json_finalize(wb); + return 0; +} + +int api_v1_info(RRDHOST *host, struct web_client *w, char *url) { + (void)url; + if (!netdata_ready) return HTTP_RESP_SERVICE_UNAVAILABLE; + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + + web_client_api_request_v1_info_fill_buffer(host, wb); + + buffer_no_cacheable(wb); + return HTTP_RESP_OK; +} diff --git a/src/web/api/v1/api_v1_manage.c b/src/web/api/v1/api_v1_manage.c new file mode 100644 index 000000000..46611fbf5 --- /dev/null +++ b/src/web/api/v1/api_v1_manage.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +char *api_secret; + +static char *get_mgmt_api_key(void) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/netdata.api.key", netdata_configured_varlib_dir); + const char *api_key_filename = config_get(CONFIG_SECTION_REGISTRY, "netdata management api key file", filename); + static char guid[GUID_LEN + 1] = ""; + + if(likely(guid[0])) + return guid; + + // read it from disk + int fd = open(api_key_filename, O_RDONLY | O_CLOEXEC); + if(fd != -1) { + char buf[GUID_LEN + 1]; + if(read(fd, buf, GUID_LEN) != GUID_LEN) + netdata_log_error("Failed to read management API key from '%s'", api_key_filename); + else { + buf[GUID_LEN] = '\0'; + if(regenerate_guid(buf, guid) == -1) { + netdata_log_error("Failed to validate management API key '%s' from '%s'.", + buf, api_key_filename); + + guid[0] = '\0'; + } + } + close(fd); + } + + // generate a new one? + if(!guid[0]) { + nd_uuid_t uuid; + + uuid_generate_time(uuid); + uuid_unparse_lower(uuid, guid); + guid[GUID_LEN] = '\0'; + + // save it + fd = open(api_key_filename, O_WRONLY|O_CREAT|O_TRUNC | O_CLOEXEC, 444); + if(fd == -1) { + netdata_log_error("Cannot create unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file.", api_key_filename); + goto temp_key; + } + + if(write(fd, guid, GUID_LEN) != GUID_LEN) { + netdata_log_error("Cannot write the unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file with enough space left.", api_key_filename); + close(fd); + goto temp_key; + } + + close(fd); + } + + return guid; + +temp_key: + netdata_log_info("You can still continue to use the alarm management API using the authorization token %s during this Netdata session only.", guid); + return guid; +} + +void api_v1_management_init(void) { + api_secret = get_mgmt_api_key(); +} + +#define HLT_MGM "manage/health" +int api_v1_manage(RRDHOST *host, struct web_client *w, char *url) { + const char *haystack = buffer_tostring(w->url_path_decoded); + char *needle; + + buffer_flush(w->response.data); + + if ((needle = strstr(haystack, HLT_MGM)) == NULL) { + buffer_strcat(w->response.data, "Invalid management request. Curently only 'health' is supported."); + return HTTP_RESP_NOT_FOUND; + } + needle += strlen(HLT_MGM); + if (*needle != '\0') { + buffer_strcat(w->response.data, "Invalid management request. Currently only 'health' is supported."); + return HTTP_RESP_NOT_FOUND; + } + return web_client_api_request_v1_mgmt_health(host, w, url); +} diff --git a/src/web/api/v1/api_v1_ml_info.c b/src/web/api/v1/api_v1_ml_info.c new file mode 100644 index 000000000..121f9bf9c --- /dev/null +++ b/src/web/api/v1/api_v1_ml_info.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_ml_info(RRDHOST *host, struct web_client *w, char *url) { + (void) url; +#if defined(ENABLE_ML) + + if (!netdata_ready) + return HTTP_RESP_SERVICE_UNAVAILABLE; + + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + ml_host_get_detection_info(host, wb); + buffer_json_finalize(wb); + + buffer_no_cacheable(wb); + + return HTTP_RESP_OK; +#else + UNUSED(host); + UNUSED(w); + return HTTP_RESP_SERVICE_UNAVAILABLE; +#endif // ENABLE_ML +} diff --git a/src/web/api/v1/api_v1_registry.c b/src/web/api/v1/api_v1_registry.c new file mode 100644 index 000000000..fa4ce4ca4 --- /dev/null +++ b/src/web/api/v1/api_v1_registry.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +// Pings a netdata server: +// /api/v1/registry?action=hello +// +// Access to a netdata registry: +// /api/v1/registry?action=access&machine=${machine_guid}&name=${hostname}&url=${url} +// +// Delete from a netdata registry: +// /api/v1/registry?action=delete&machine=${machine_guid}&name=${hostname}&url=${url}&delete_url=${delete_url} +// +// Search for the URLs of a machine: +// /api/v1/registry?action=search&for=${machine_guid} +// +// Impersonate: +// /api/v1/registry?action=switch&machine=${machine_guid}&name=${hostname}&url=${url}&to=${new_person_guid} +int api_v1_registry(RRDHOST *host, struct web_client *w, char *url) { + static uint32_t hash_action = 0, hash_access = 0, hash_hello = 0, hash_delete = 0, hash_search = 0, + hash_switch = 0, hash_machine = 0, hash_url = 0, hash_name = 0, hash_delete_url = 0, hash_for = 0, + hash_to = 0 /*, hash_redirects = 0 */; + + if(unlikely(!hash_action)) { + hash_action = simple_hash("action"); + hash_access = simple_hash("access"); + hash_hello = simple_hash("hello"); + hash_delete = simple_hash("delete"); + hash_search = simple_hash("search"); + hash_switch = simple_hash("switch"); + hash_machine = simple_hash("machine"); + hash_url = simple_hash("url"); + hash_name = simple_hash("name"); + hash_delete_url = simple_hash("delete_url"); + hash_for = simple_hash("for"); + hash_to = simple_hash("to"); + /* + hash_redirects = simple_hash("redirects"); +*/ + } + + netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 registry with URL '%s'", w->id, url); + + // TODO + // The browser may send multiple cookies with our id + + char person_guid[UUID_STR_LEN] = ""; + char *cookie = strstr(w->response.data->buffer, NETDATA_REGISTRY_COOKIE_NAME "="); + if(cookie) + strncpyz(person_guid, &cookie[sizeof(NETDATA_REGISTRY_COOKIE_NAME)], UUID_STR_LEN - 1); + else if(!extract_bearer_token_from_request(w, person_guid, sizeof(person_guid))) + person_guid[0] = '\0'; + + char action = '\0'; + char *machine_guid = NULL, + *machine_url = NULL, + *url_name = NULL, + *search_machine_guid = NULL, + *delete_url = NULL, + *to_person_guid = NULL; + /* + int redirects = 0; +*/ + + // Don't cache registry responses + buffer_no_cacheable(w->response.data); + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) continue; + if (!value || !*value) continue; + + netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 registry query param '%s' with value '%s'", w->id, name, value); + + uint32_t hash = simple_hash(name); + + if(hash == hash_action && !strcmp(name, "action")) { + uint32_t vhash = simple_hash(value); + + if(vhash == hash_access && !strcmp(value, "access")) action = 'A'; + else if(vhash == hash_hello && !strcmp(value, "hello")) action = 'H'; + else if(vhash == hash_delete && !strcmp(value, "delete")) action = 'D'; + else if(vhash == hash_search && !strcmp(value, "search")) action = 'S'; + else if(vhash == hash_switch && !strcmp(value, "switch")) action = 'W'; +#ifdef NETDATA_INTERNAL_CHECKS + else netdata_log_error("unknown registry action '%s'", value); +#endif /* NETDATA_INTERNAL_CHECKS */ + } + /* + else if(hash == hash_redirects && !strcmp(name, "redirects")) + redirects = atoi(value); +*/ + else if(hash == hash_machine && !strcmp(name, "machine")) + machine_guid = value; + + else if(hash == hash_url && !strcmp(name, "url")) + machine_url = value; + + else if(action == 'A') { + if(hash == hash_name && !strcmp(name, "name")) + url_name = value; + } + else if(action == 'D') { + if(hash == hash_delete_url && !strcmp(name, "delete_url")) + delete_url = value; + } + else if(action == 'S') { + if(hash == hash_for && !strcmp(name, "for")) + search_machine_guid = value; + } + else if(action == 'W') { + if(hash == hash_to && !strcmp(name, "to")) + to_person_guid = value; + } +#ifdef NETDATA_INTERNAL_CHECKS + else netdata_log_error("unused registry URL parameter '%s' with value '%s'", name, value); +#endif /* NETDATA_INTERNAL_CHECKS */ + } + + bool do_not_track = respect_web_browser_do_not_track_policy && web_client_has_donottrack(w); + + if(unlikely(action == 'H')) { + // HELLO request, dashboard ACL + analytics_log_dashboard(); + if(unlikely(!http_can_access_dashboard(w))) + return web_client_permission_denied_acl(w); + } + else { + // everything else, registry ACL + if(unlikely(!http_can_access_registry(w))) + return web_client_permission_denied_acl(w); + + if(unlikely(do_not_track)) { + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, "Your web browser is sending 'DNT: 1' (Do Not Track). The registry requires persistent cookies on your browser to work."); + return HTTP_RESP_BAD_REQUEST; + } + } + + buffer_no_cacheable(w->response.data); + + switch(action) { + case 'A': + if(unlikely(!machine_guid || !machine_url || !url_name)) { + netdata_log_error("Invalid registry request - access requires these parameters: machine ('%s'), url ('%s'), name ('%s')", machine_guid ? machine_guid : "UNSET", machine_url ? machine_url : "UNSET", url_name ? url_name : "UNSET"); + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry Access request."); + return HTTP_RESP_BAD_REQUEST; + } + + web_client_enable_tracking_required(w); + return registry_request_access_json(host, w, person_guid, machine_guid, machine_url, url_name, now_realtime_sec()); + + case 'D': + if(unlikely(!machine_guid || !machine_url || !delete_url)) { + netdata_log_error("Invalid registry request - delete requires these parameters: machine ('%s'), url ('%s'), delete_url ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", delete_url?delete_url:"UNSET"); + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry Delete request."); + return HTTP_RESP_BAD_REQUEST; + } + + web_client_enable_tracking_required(w); + return registry_request_delete_json(host, w, person_guid, machine_guid, machine_url, delete_url, now_realtime_sec()); + + case 'S': + if(unlikely(!search_machine_guid)) { + netdata_log_error("Invalid registry request - search requires these parameters: for ('%s')", search_machine_guid?search_machine_guid:"UNSET"); + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry Search request."); + return HTTP_RESP_BAD_REQUEST; + } + + web_client_enable_tracking_required(w); + return registry_request_search_json(host, w, person_guid, search_machine_guid); + + case 'W': + if(unlikely(!machine_guid || !machine_url || !to_person_guid)) { + netdata_log_error("Invalid registry request - switching identity requires these parameters: machine ('%s'), url ('%s'), to ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", to_person_guid?to_person_guid:"UNSET"); + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry Switch request."); + return HTTP_RESP_BAD_REQUEST; + } + + web_client_enable_tracking_required(w); + return registry_request_switch_json(host, w, person_guid, machine_guid, machine_url, to_person_guid, now_realtime_sec()); + + case 'H': + return registry_request_hello_json(host, w, do_not_track); + + default: + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry request - you need to set an action: hello, access, delete, search"); + return HTTP_RESP_BAD_REQUEST; + } +} diff --git a/src/web/api/v1/api_v1_weights.c b/src/web/api/v1/api_v1_weights.c new file mode 100644 index 000000000..e39fceae1 --- /dev/null +++ b/src/web/api/v1/api_v1_weights.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_metric_correlations(RRDHOST *host, struct web_client *w, char *url) { + return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_MC_KS2, WEIGHTS_FORMAT_CHARTS, 1); +} + +int api_v1_weights(RRDHOST *host, struct web_client *w, char *url) { + return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_ANOMALY_RATE, WEIGHTS_FORMAT_CONTEXTS, 1); +} diff --git a/src/web/api/v2/api_v2_alert_config.c b/src/web/api/v2/api_v2_alert_config.c new file mode 100644 index 000000000..b4f5344e2 --- /dev/null +++ b/src/web/api/v2/api_v2_alert_config.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_alert_config(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + const char *config = NULL; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "config")) + config = value; + } + + buffer_flush(w->response.data); + + if(!config) { + w->response.data->content_type = CT_TEXT_PLAIN; + buffer_strcat(w->response.data, "A config hash ID is required. Add ?config=UUID query param"); + return HTTP_RESP_BAD_REQUEST; + } + + return contexts_v2_alert_config_to_json(w, config); +} diff --git a/src/web/api/v2/api_v2_alert_transitions.c b/src/web/api/v2/api_v2_alert_transitions.c new file mode 100644 index 000000000..e84b80184 --- /dev/null +++ b/src/web/api/v2/api_v2_alert_transitions.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_alert_transitions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal(host, w, url, CONTEXTS_V2_ALERT_TRANSITIONS | CONTEXTS_V2_NODES); +} diff --git a/src/web/api/v2/api_v2_alerts.c b/src/web/api/v2/api_v2_alerts.c new file mode 100644 index 000000000..c5d1922e2 --- /dev/null +++ b/src/web/api/v2/api_v2_alerts.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_alerts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal(host, w, url, CONTEXTS_V2_ALERTS | CONTEXTS_V2_NODES); +} diff --git a/src/web/api/v2/api_v2_bearer.c b/src/web/api/v2/api_v2_bearer.c new file mode 100644 index 000000000..312ca4e48 --- /dev/null +++ b/src/web/api/v2/api_v2_bearer.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +static bool verify_host_uuids(RRDHOST *host, const char *machine_guid, const char *node_id) { + if(!machine_guid || !node_id) + return false; + + if(strcmp(machine_guid, host->machine_guid) != 0) + return false; + + if(UUIDiszero(host->node_id)) + return false; + + char buf[UUID_STR_LEN]; + uuid_unparse_lower(host->node_id.uuid, buf); + + return strcmp(node_id, buf) == 0; +} + +int api_v2_bearer_protection(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url) { + char *machine_guid = NULL; + char *claim_id = NULL; + char *node_id = NULL; + bool protection = netdata_is_protected_by_bearer; + + while (url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) continue; + if (!value || !*value) continue; + + if(!strcmp(name, "bearer_protection")) { + if(!strcmp(value, "on") || !strcmp(value, "true") || !strcmp(value, "yes")) + protection = true; + else + protection = false; + } + else if(!strcmp(name, "machine_guid")) + machine_guid = value; + else if(!strcmp(name, "claim_id")) + claim_id = value; + else if(!strcmp(name, "node_id")) + node_id = value; + } + + if(!claim_id_matches(claim_id)) { + buffer_reset(w->response.data); + buffer_strcat(w->response.data, "The request is for a different claimed agent"); + return HTTP_RESP_BAD_REQUEST; + } + + if(!verify_host_uuids(localhost, machine_guid, node_id)) { + buffer_reset(w->response.data); + buffer_strcat(w->response.data, "The request is missing or not matching local UUIDs"); + return HTTP_RESP_BAD_REQUEST; + } + + netdata_is_protected_by_bearer = protection; + + BUFFER *wb = w->response.data; + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + buffer_json_member_add_boolean(wb, "bearer_protection", netdata_is_protected_by_bearer); + buffer_json_finalize(wb); + + return HTTP_RESP_OK; +} + +int bearer_get_token_json_response(BUFFER *wb, RRDHOST *host, const char *claim_id, const char *machine_guid, const char *node_id, HTTP_USER_ROLE user_role, HTTP_ACCESS access, nd_uuid_t cloud_account_id, const char *client_name) { + if(!claim_id_matches_any(claim_id)) + return rrd_call_function_error(wb, "The request is for a different agent", HTTP_RESP_BAD_REQUEST); + + if(!verify_host_uuids(host, machine_guid, node_id)) + return rrd_call_function_error(wb, "The request is missing or not matching local node UUIDs", HTTP_RESP_BAD_REQUEST); + + nd_uuid_t uuid; + time_t expires_s = bearer_create_token(&uuid, user_role, access, cloud_account_id, client_name); + + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + buffer_json_member_add_int64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "mg", host->machine_guid); + buffer_json_member_add_boolean(wb, "bearer_protection", netdata_is_protected_by_bearer); + buffer_json_member_add_uuid(wb, "token", uuid); + buffer_json_member_add_time_t(wb, "expiration", expires_s); + buffer_json_finalize(wb); + return HTTP_RESP_OK; +} + +int api_v2_bearer_get_token(RRDHOST *host, struct web_client *w, char *url) { + char *machine_guid = NULL; + char *claim_id = NULL; + char *node_id = NULL; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) continue; + if (!value || !*value) continue; + + if(!strcmp(name, "machine_guid")) + machine_guid = value; + else if(!strcmp(name, "claim_id")) + claim_id = value; + else if(!strcmp(name, "node_id")) + node_id = value; + } + + if(!claim_id_matches(claim_id)) { + buffer_reset(w->response.data); + buffer_strcat(w->response.data, "The request is for a different claimed agent"); + return HTTP_RESP_BAD_REQUEST; + } + + if(!verify_host_uuids(host, machine_guid, node_id)) { + buffer_reset(w->response.data); + buffer_strcat(w->response.data, "The request is missing or not matching local UUIDs"); + return HTTP_RESP_BAD_REQUEST; + } + + if(host != localhost) + return call_function_bearer_get_token(host, w, claim_id, machine_guid, node_id); + + return bearer_get_token_json_response( + w->response.data, + host, + claim_id, + machine_guid, + node_id, + w->user_role, + w->access, + w->auth.cloud_account_id, + w->auth.client_name); +} diff --git a/src/web/api/v2/api_v2_calls.h b/src/web/api/v2/api_v2_calls.h new file mode 100644 index 000000000..809af9669 --- /dev/null +++ b/src/web/api/v2/api_v2_calls.h @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_V2_CALLS_H +#define NETDATA_API_V2_CALLS_H + +#include "../web_api_v2.h" + +int api_v2_info(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_data(RRDHOST *host, struct web_client *w, char *url); +int api_v2_weights(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_alert_config(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_contexts_internal(RRDHOST *host, struct web_client *w, char *url, CONTEXTS_V2_MODE mode); +int api_v2_contexts(RRDHOST *host, struct web_client *w, char *url); +int api_v2_alert_transitions(RRDHOST *host, struct web_client *w, char *url); +int api_v2_alerts(RRDHOST *host, struct web_client *w, char *url); +int api_v2_functions(RRDHOST *host, struct web_client *w, char *url); +int api_v2_versions(RRDHOST *host, struct web_client *w, char *url); +int api_v2_q(RRDHOST *host, struct web_client *w, char *url); +int api_v2_nodes(RRDHOST *host, struct web_client *w, char *url); +int api_v2_node_instances(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_ilove(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_claim(RRDHOST *host, struct web_client *w, char *url); +int api_v3_claim(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_webrtc(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_progress(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_bearer_get_token(RRDHOST *host, struct web_client *w, char *url); +int bearer_get_token_json_response(BUFFER *wb, RRDHOST *host, const char *claim_id, const char *machine_guid, const char *node_id, HTTP_USER_ROLE user_role, HTTP_ACCESS access, nd_uuid_t cloud_account_id, const char *client_name); +int api_v2_bearer_protection(RRDHOST *host, struct web_client *w, char *url); + +#endif //NETDATA_API_V2_CALLS_H diff --git a/src/web/api/v2/api_v2_claim.c b/src/web/api/v2/api_v2_claim.c new file mode 100644 index 000000000..906986101 --- /dev/null +++ b/src/web/api/v2/api_v2_claim.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" +#include "claim/claim.h" + +static char *netdata_random_session_id_filename = NULL; +static nd_uuid_t netdata_random_session_id = { 0 }; + +bool netdata_random_session_id_generate(void) { + static char guid[UUID_STR_LEN] = ""; + + uuid_generate_random(netdata_random_session_id); + uuid_unparse_lower(netdata_random_session_id, guid); + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/netdata_random_session_id", netdata_configured_varlib_dir); + + bool ret = true; + + (void)unlink(filename); + + // save it + int fd = open(filename, O_WRONLY|O_CREAT|O_TRUNC|O_CLOEXEC, 640); + if(fd == -1) { + netdata_log_error("Cannot create random session id file '%s'.", filename); + ret = false; + } + else { + if (write(fd, guid, UUID_STR_LEN - 1) != UUID_STR_LEN - 1) { + netdata_log_error("Cannot write the random session id file '%s'.", filename); + ret = false; + } else { + ssize_t bytes = write(fd, "\n", 1); + UNUSED(bytes); + } + close(fd); + } + + if(ret && (!netdata_random_session_id_filename || strcmp(netdata_random_session_id_filename, filename) != 0)) { + freez(netdata_random_session_id_filename); + netdata_random_session_id_filename = strdupz(filename); + } + + return ret; +} + +static const char *netdata_random_session_id_get_filename(void) { + if(!netdata_random_session_id_filename) + netdata_random_session_id_generate(); + + return netdata_random_session_id_filename; +} + +static bool netdata_random_session_id_matches(const char *guid) { + if(uuid_is_null(netdata_random_session_id)) + return false; + + nd_uuid_t uuid; + + if(uuid_parse(guid, uuid)) + return false; + + if(uuid_compare(netdata_random_session_id, uuid) == 0) + return true; + + return false; +} + +static bool check_claim_param(const char *s) { + if(!s || !*s) return true; + + do { + if(isalnum((uint8_t)*s) || *s == '.' || *s == ',' || *s == '-' || *s == ':' || *s == '/' || *s == '_') + ; + else + return false; + + } while(*++s); + + return true; +} + +static bool agent_can_be_claimed(void) { + CLOUD_STATUS status = cloud_status(); + switch(status) { + case CLOUD_STATUS_AVAILABLE: + case CLOUD_STATUS_OFFLINE: + case CLOUD_STATUS_INDIRECT: + return true; + + case CLOUD_STATUS_BANNED: + case CLOUD_STATUS_ONLINE: + return false; + } + + return false; +} + +typedef enum { + CLAIM_RESP_INFO, + CLAIM_RESP_ERROR, + CLAIM_RESP_ACTION_OK, + CLAIM_RESP_ACTION_FAILED, +} CLAIM_RESPONSE; + +static void claim_add_user_info_command(BUFFER *wb) { + const char *filename = netdata_random_session_id_get_filename(); + CLEAN_BUFFER *os_cmd = buffer_create(0, NULL); + + const char *os_filename; + const char *os_prefix; + const char *os_quote; + const char *os_message; + +#if defined(OS_WINDOWS) + char win_path[MAX_PATH]; + cygwin_conv_path(CCP_POSIX_TO_WIN_A, filename, win_path, sizeof(win_path)); + os_filename = win_path; + os_prefix = "more"; + os_message = "We need to verify this Windows server is yours. So, open a Command Prompt on this server to run the command. It will give you a UUID. Copy and paste this UUID to this box:"; +#else + os_filename = filename; + os_prefix = "sudo cat"; + os_message = "We need to verify this server is yours. SSH to this server and run this command. It will give you a UUID. Copy and paste this UUID to this box:"; +#endif + + // add quotes only when the filename has a space + if(strchr(os_filename, ' ')) + os_quote = "\""; + else + os_quote = ""; + + buffer_sprintf(os_cmd, "%s %s%s%s", os_prefix, os_quote, os_filename, os_quote); + + buffer_json_member_add_string(wb, "key_filename", os_filename); + buffer_json_member_add_string(wb, "cmd", buffer_tostring(os_cmd)); + buffer_json_member_add_string(wb, "help", os_message); +} + +static int claim_json_response(BUFFER *wb, CLAIM_RESPONSE response, const char *msg) { + time_t now_s = now_realtime_sec(); + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + if(response != CLAIM_RESP_INFO) { + // this is not an info, so it needs a status report + buffer_json_member_add_boolean(wb, "success", response == CLAIM_RESP_ACTION_OK ? true : false); + buffer_json_member_add_string_or_empty(wb, "message", msg ? msg : ""); + } + + buffer_json_cloud_status(wb, now_s); + + if(response != CLAIM_RESP_ACTION_OK) { + buffer_json_member_add_boolean(wb, "can_be_claimed", agent_can_be_claimed()); + claim_add_user_info_command(wb); + } + + buffer_json_agents_v2(wb, NULL, now_s, false, false); + buffer_json_finalize(wb); + + return (response == CLAIM_RESP_ERROR) ? HTTP_RESP_BAD_REQUEST : HTTP_RESP_OK; +} + +static int claim_txt_response(BUFFER *wb, const char *msg) { + buffer_reset(wb); + buffer_strcat(wb, msg); + return HTTP_RESP_BAD_REQUEST; +} + +static int api_claim(uint8_t version, struct web_client *w, char *url) { + char *key = NULL; + char *token = NULL; + char *rooms = NULL; + char *base_url = NULL; + + while (url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) continue; + if (!value || !*value) continue; + + if(!strcmp(name, "key")) + key = value; + else if(!strcmp(name, "token")) + token = value; + else if(!strcmp(name, "rooms")) + rooms = value; + else if(!strcmp(name, "url")) + base_url = value; + } + + BUFFER *wb = w->response.data; + + CLAIM_RESPONSE response = CLAIM_RESP_INFO; + const char *msg = NULL; + bool can_be_claimed = agent_can_be_claimed(); + + if(can_be_claimed && key) { + if(!netdata_random_session_id_matches(key)) { + netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it + if(version < 3) return claim_txt_response(wb, "invalid key"); + return claim_json_response(wb, CLAIM_RESP_ERROR, "invalid key"); + } + + if(!token || !base_url || !check_claim_param(token) || !check_claim_param(base_url) || (rooms && !check_claim_param(rooms))) { + netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it + if(version < 3) return claim_txt_response(wb, "invalid parameters"); + return claim_json_response(wb, CLAIM_RESP_ERROR, "invalid parameters"); + } + + netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it + + if(claim_agent(base_url, token, rooms, cloud_config_proxy_get(), cloud_config_insecure_get())) { + msg = "ok"; + can_be_claimed = false; + claim_reload_and_wait_online(); + response = CLAIM_RESP_ACTION_OK; + } + else { + msg = claim_agent_failure_reason_get(); + response = CLAIM_RESP_ACTION_FAILED; + } + } + + return claim_json_response(wb, response, msg); +} + +int api_v2_claim(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_claim(2, w, url); +} + +int api_v3_claim(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_claim(3, w, url); +} diff --git a/src/web/api/v2/api_v2_contexts.c b/src/web/api/v2/api_v2_contexts.c new file mode 100644 index 000000000..bbe36ab34 --- /dev/null +++ b/src/web/api/v2/api_v2_contexts.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +// -------------------------------------------------------------------------------------------------------------------- + +int api_v2_contexts_internal(RRDHOST *host __maybe_unused, struct web_client *w, char *url, CONTEXTS_V2_MODE mode) { + struct api_v2_contexts_request req = { 0 }; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "scope_nodes")) + req.scope_nodes = value; + else if(!strcmp(name, "nodes")) + req.nodes = value; + else if((mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) && !strcmp(name, "scope_contexts")) + req.scope_contexts = value; + else if((mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) && !strcmp(name, "contexts")) + req.contexts = value; + else if((mode & CONTEXTS_V2_SEARCH) && !strcmp(name, "q")) + req.q = value; + else if(!strcmp(name, "options")) + req.options = contexts_options_str_to_id(value); + else if(!strcmp(name, "after")) + req.after = str2l(value); + else if(!strcmp(name, "before")) + req.before = str2l(value); + else if(!strcmp(name, "timeout")) + req.timeout_ms = str2l(value); + else if(mode & (CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) { + if (!strcmp(name, "alert")) + req.alerts.alert = value; + else if (!strcmp(name, "transition")) + req.alerts.transition = value; + else if(mode & CONTEXTS_V2_ALERTS) { + if (!strcmp(name, "status")) + req.alerts.status = contexts_alert_status_str_to_id(value); + } + else if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { + if (!strcmp(name, "last")) + req.alerts.last = strtoul(value, NULL, 0); + else if(!strcmp(name, "context")) + req.contexts = value; + else if (!strcmp(name, "anchor_gi")) { + req.alerts.global_id_anchor = str2ull(value, NULL); + } + else { + for(int i = 0; i < ATF_TOTAL_ENTRIES ;i++) { + if(!strcmp(name, alert_transition_facets[i].query_param)) + req.alerts.facets[i] = value; + } + } + } + } + } + + if ((mode & CONTEXTS_V2_ALERT_TRANSITIONS) && !req.alerts.last) + req.alerts.last = 1; + + buffer_flush(w->response.data); + buffer_no_cacheable(w->response.data); + return rrdcontext_to_json_v2(w->response.data, &req, mode); +} + +int api_v2_contexts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal( + host, w, url, CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS); +} + diff --git a/src/web/api/v2/api_v2_data.c b/src/web/api/v2/api_v2_data.c new file mode 100644 index 000000000..4eb54e9ad --- /dev/null +++ b/src/web/api/v2/api_v2_data.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +#define GROUP_BY_KEY_MAX_LENGTH 30 +static struct { + char group_by[GROUP_BY_KEY_MAX_LENGTH + 1]; + char aggregation[GROUP_BY_KEY_MAX_LENGTH + 1]; + char group_by_label[GROUP_BY_KEY_MAX_LENGTH + 1]; +} group_by_keys[MAX_QUERY_GROUP_BY_PASSES]; + +__attribute__((constructor)) void initialize_group_by_keys(void) { + for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { + snprintfz(group_by_keys[g].group_by, GROUP_BY_KEY_MAX_LENGTH, "group_by[%zu]", g); + snprintfz(group_by_keys[g].aggregation, GROUP_BY_KEY_MAX_LENGTH, "aggregation[%zu]", g); + snprintfz(group_by_keys[g].group_by_label, GROUP_BY_KEY_MAX_LENGTH, "group_by_label[%zu]", g); + } +} + +int api_v2_data(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + usec_t received_ut = now_monotonic_usec(); + + int ret = HTTP_RESP_BAD_REQUEST; + + buffer_flush(w->response.data); + + char *google_version = "0.6", + *google_reqId = "0", + *google_sig = "0", + *google_out = "json", + *responseHandler = NULL, + *outFileName = NULL; + + time_t last_timestamp_in_data = 0, google_timestamp = 0; + + char *scope_nodes = NULL; + char *scope_contexts = NULL; + char *nodes = NULL; + char *contexts = NULL; + char *instances = NULL; + char *dimensions = NULL; + char *before_str = NULL; + char *after_str = NULL; + char *resampling_time_str = NULL; + char *points_str = NULL; + char *timeout_str = NULL; + char *labels = NULL; + char *alerts = NULL; + char *time_group_options = NULL; + char *tier_str = NULL; + size_t tier = 0; + RRDR_TIME_GROUPING time_group = RRDR_GROUPING_AVERAGE; + DATASOURCE_FORMAT format = DATASOURCE_JSON2; + RRDR_OPTIONS options = RRDR_OPTION_VIRTUAL_POINTS | RRDR_OPTION_JSON_WRAP | RRDR_OPTION_RETURN_JWAR; + + struct group_by_pass group_by[MAX_QUERY_GROUP_BY_PASSES] = { + { + .group_by = RRDR_GROUP_BY_DIMENSION, + .group_by_label = NULL, + .aggregation = RRDR_GROUP_BY_FUNCTION_AVERAGE, + }, + }; + + size_t group_by_idx = 0, group_by_label_idx = 0, aggregation_idx = 0; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "scope_nodes")) scope_nodes = value; + else if(!strcmp(name, "scope_contexts")) scope_contexts = value; + else if(!strcmp(name, "nodes")) nodes = value; + else if(!strcmp(name, "contexts")) contexts = value; + else if(!strcmp(name, "instances")) instances = value; + else if(!strcmp(name, "dimensions")) dimensions = value; + else if(!strcmp(name, "labels")) labels = value; + else if(!strcmp(name, "alerts")) alerts = value; + else if(!strcmp(name, "after")) after_str = value; + else if(!strcmp(name, "before")) before_str = value; + else if(!strcmp(name, "points")) points_str = value; + else if(!strcmp(name, "timeout")) timeout_str = value; + else if(!strcmp(name, "group_by")) { + group_by[group_by_idx++].group_by = group_by_parse(value); + if(group_by_idx >= MAX_QUERY_GROUP_BY_PASSES) + group_by_idx = MAX_QUERY_GROUP_BY_PASSES - 1; + } + else if(!strcmp(name, "group_by_label")) { + group_by[group_by_label_idx++].group_by_label = value; + if(group_by_label_idx >= MAX_QUERY_GROUP_BY_PASSES) + group_by_label_idx = MAX_QUERY_GROUP_BY_PASSES - 1; + } + else if(!strcmp(name, "aggregation")) { + group_by[aggregation_idx++].aggregation = group_by_aggregate_function_parse(value); + if(aggregation_idx >= MAX_QUERY_GROUP_BY_PASSES) + aggregation_idx = MAX_QUERY_GROUP_BY_PASSES - 1; + } + else if(!strcmp(name, "format")) format = datasource_format_str_to_id(value); + else if(!strcmp(name, "options")) options |= rrdr_options_parse(value); + else if(!strcmp(name, "time_group")) time_group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); + else if(!strcmp(name, "time_group_options")) time_group_options = value; + else if(!strcmp(name, "time_resampling")) resampling_time_str = value; + else if(!strcmp(name, "tier")) tier_str = value; + else if(!strcmp(name, "callback")) responseHandler = value; + else if(!strcmp(name, "filename")) outFileName = value; + else if(!strcmp(name, "tqx")) { + // parse Google Visualization API options + // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source + char *tqx_name, *tqx_value; + + while(value) { + tqx_value = strsep_skip_consecutive_separators(&value, ";"); + if(!tqx_value || !*tqx_value) continue; + + tqx_name = strsep_skip_consecutive_separators(&tqx_value, ":"); + if(!tqx_name || !*tqx_name) continue; + if(!tqx_value || !*tqx_value) continue; + + if(!strcmp(tqx_name, "version")) + google_version = tqx_value; + else if(!strcmp(tqx_name, "reqId")) + google_reqId = tqx_value; + else if(!strcmp(tqx_name, "sig")) { + google_sig = tqx_value; + google_timestamp = strtoul(google_sig, NULL, 0); + } + else if(!strcmp(tqx_name, "out")) { + google_out = tqx_value; + format = google_data_format_str_to_id(google_out); + } + else if(!strcmp(tqx_name, "responseHandler")) + responseHandler = tqx_value; + else if(!strcmp(tqx_name, "outFileName")) + outFileName = tqx_value; + } + } + else { + for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { + if(!strcmp(name, group_by_keys[g].group_by)) + group_by[g].group_by = group_by_parse(value); + else if(!strcmp(name, group_by_keys[g].group_by_label)) + group_by[g].group_by_label = value; + else if(!strcmp(name, group_by_keys[g].aggregation)) + group_by[g].aggregation = group_by_aggregate_function_parse(value); + } + } + } + + // validate the google parameters given + fix_google_param(google_out); + fix_google_param(google_sig); + fix_google_param(google_reqId); + fix_google_param(google_version); + fix_google_param(responseHandler); + fix_google_param(outFileName); + + for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { + if (group_by[g].group_by_label && *group_by[g].group_by_label) + group_by[g].group_by |= RRDR_GROUP_BY_LABEL; + } + + if(group_by[0].group_by == RRDR_GROUP_BY_NONE) + group_by[0].group_by = RRDR_GROUP_BY_DIMENSION; + + for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { + if ((group_by[g].group_by & ~(RRDR_GROUP_BY_DIMENSION)) || (options & RRDR_OPTION_PERCENTAGE)) { + options |= RRDR_OPTION_ABSOLUTE; + break; + } + } + + if(options & RRDR_OPTION_DEBUG) + options &= ~RRDR_OPTION_MINIFY; + + if(tier_str && *tier_str) { + tier = str2ul(tier_str); + if(tier < storage_tiers) + options |= RRDR_OPTION_SELECTED_TIER; + else + tier = 0; + } + + time_t before = (before_str && *before_str)?str2l(before_str):0; + time_t after = (after_str && *after_str) ?str2l(after_str):-600; + size_t points = (points_str && *points_str)?str2u(points_str):0; + int timeout = (timeout_str && *timeout_str)?str2i(timeout_str): 0; + time_t resampling_time = (resampling_time_str && *resampling_time_str) ? str2l(resampling_time_str) : 0; + + QUERY_TARGET_REQUEST qtr = { + .version = 2, + .scope_nodes = scope_nodes, + .scope_contexts = scope_contexts, + .after = after, + .before = before, + .host = NULL, + .st = NULL, + .nodes = nodes, + .contexts = contexts, + .instances = instances, + .dimensions = dimensions, + .alerts = alerts, + .timeout_ms = timeout, + .points = points, + .format = format, + .options = options, + .time_group_method = time_group, + .time_group_options = time_group_options, + .resampling_time = resampling_time, + .tier = tier, + .chart_label_key = NULL, + .labels = labels, + .query_source = QUERY_SOURCE_API_DATA, + .priority = STORAGE_PRIORITY_NORMAL, + .received_ut = received_ut, + + .interrupt_callback = web_client_interrupt_callback, + .interrupt_callback_data = w, + + .transaction = &w->transaction, + }; + + for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) + qtr.group_by[g] = group_by[g]; + + QUERY_TARGET *qt = query_target_create(&qtr); + ONEWAYALLOC *owa = NULL; + + if(!qt) { + buffer_sprintf(w->response.data, "Failed to prepare the query."); + ret = HTTP_RESP_INTERNAL_SERVER_ERROR; + goto cleanup; + } + + web_client_timeout_checkpoint_set(w, timeout); + if(web_client_timeout_checkpoint_and_check(w, NULL)) { + ret = w->response.code; + goto cleanup; + } + + if(outFileName && *outFileName) { + buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName); + netdata_log_debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName); + } + + if(format == DATASOURCE_DATATABLE_JSONP) { + if(responseHandler == NULL) + responseHandler = "google.visualization.Query.setResponse"; + + netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'", + w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName + ); + + buffer_sprintf( + w->response.data, + "%s({version:'%s',reqId:'%s',status:'ok',sig:'%"PRId64"',table:", + responseHandler, + google_version, + google_reqId, + (int64_t)now_realtime_sec()); + } + else if(format == DATASOURCE_JSONP) { + if(responseHandler == NULL) + responseHandler = "callback"; + + buffer_strcat(w->response.data, responseHandler); + buffer_strcat(w->response.data, "("); + } + + owa = onewayalloc_create(0); + ret = data_query_execute(owa, w->response.data, qt, &last_timestamp_in_data); + + if(format == DATASOURCE_DATATABLE_JSONP) { + if(google_timestamp < last_timestamp_in_data) + buffer_strcat(w->response.data, "});"); + + else { + // the client already has the latest data + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, + "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});", + responseHandler, google_version, google_reqId); + } + } + else if(format == DATASOURCE_JSONP) + buffer_strcat(w->response.data, ");"); + + if(qt->internal.relative) + buffer_no_cacheable(w->response.data); + else + buffer_cacheable(w->response.data); + +cleanup: + query_target_release(qt); + onewayalloc_destroy(owa); + return ret; +} diff --git a/src/web/api/v2/api_v2_functions.c b/src/web/api/v2/api_v2_functions.c new file mode 100644 index 000000000..286efd130 --- /dev/null +++ b/src/web/api/v2/api_v2_functions.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_functions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal( + host, w, url, CONTEXTS_V2_FUNCTIONS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS); +} diff --git a/src/web/api/v2/api_v2_ilove/README.md b/src/web/api/v2/api_v2_ilove/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/src/web/api/v2/api_v2_ilove/ilove.c b/src/web/api/v2/api_v2_ilove/ilove.c new file mode 100644 index 000000000..501e00123 --- /dev/null +++ b/src/web/api/v2/api_v2_ilove/ilove.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../api_v2_calls.h" + +static const unsigned short int ibm_plex_sans_bold_250[128][128] = { + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, + {5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* */, 5900 /* ! */, 5900 /* " */, 5900 /* # */, 5900 /* $ */, 5900 /* % */, 5900 /* & */, 5900 /* ' */, 5900 /* ( */, 5900 /* ) */, 5900 /* * */, 5900 /* + */, 5900 /* , */, 5900 /* - */, 5900 /* . */, 5900 /* / */, 5900 /* 0 */, 5900 /* 1 */, 5900 /* 2 */, 5900 /* 3 */, 5900 /* 4 */, 5900 /* 5 */, 5900 /* 6 */, 5900 /* 7 */, 5900 /* 8 */, 5900 /* 9 */, 5900 /* : */, 5900 /* ; */, 5900 /* < */, 5900 /* = */, 5900 /* > */, 5900 /* ? */, 5900 /* @ */, 5900 /* A */, 5900 /* B */, 5900 /* C */, 5900 /* D */, 5900 /* E */, 5900 /* F */, 5900 /* G */, 5900 /* H */, 5900 /* I */, 5900 /* J */, 5900 /* K */, 5900 /* L */, 5900 /* M */, 5900 /* N */, 5900 /* O */, 5900 /* P */, 5900 /* Q */, 5900 /* R */, 5900 /* S */, 5900 /* T */, 5900 /* U */, 5900 /* V */, 5900 /* W */, 5900 /* X */, 5900 /* Y */, 5900 /* Z */, 5900 /* [ */, 5900 /* \ */, 5900 /* ] */, 5900 /* ^ */, 5900 /* _ */, 5900 /* ` */, 5900 /* a */, 5900 /* b */, 5900 /* c */, 5900 /* d */, 5900 /* e */, 5900 /* f */, 5900 /* g */, 5900 /* h */, 5900 /* i */, 5900 /* j */, 5900 /* k */, 5900 /* l */, 5900 /* m */, 5900 /* n */, 5900 /* o */, 5900 /* p */, 5900 /* q */, 5900 /* r */, 5900 /* s */, 5900 /* t */, 5900 /* u */, 5900 /* v */, 5900 /* w */, 5900 /* x */, 5900 /* y */, 5900 /* z */, 5900 /* { */, 5900 /* | */, 5900 /* } */, 5900 /* ~ */}, + {8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* ! */, 8000 /* !! */, 8000 /* !" */, 8000 /* !# */, 8000 /* !$ */, 8000 /* !% */, 8000 /* !& */, 8000 /* !' */, 8000 /* !( */, 8000 /* !) */, 8000 /* !* */, 8000 /* !+ */, 8000 /* !, */, 8000 /* !- */, 8000 /* !. */, 8000 /* !/ */, 8000 /* !0 */, 8000 /* !1 */, 8000 /* !2 */, 8000 /* !3 */, 8000 /* !4 */, 8000 /* !5 */, 8000 /* !6 */, 8000 /* !7 */, 8000 /* !8 */, 8000 /* !9 */, 8000 /* !: */, 8000 /* !; */, 8000 /* !< */, 8000 /* != */, 8000 /* !> */, 8000 /* !? */, 8000 /* !@ */, 8000 /* !A */, 8000 /* !B */, 8000 /* !C */, 8000 /* !D */, 8000 /* !E */, 8000 /* !F */, 8000 /* !G */, 8000 /* !H */, 8000 /* !I */, 8000 /* !J */, 8000 /* !K */, 8000 /* !L */, 8000 /* !M */, 8000 /* !N */, 8000 /* !O */, 8000 /* !P */, 8000 /* !Q */, 8000 /* !R */, 8000 /* !S */, 8000 /* !T */, 8000 /* !U */, 8000 /* !V */, 8000 /* !W */, 8000 /* !X */, 8000 /* !Y */, 8000 /* !Z */, 8000 /* ![ */, 8000 /* !\ */, 8000 /* !] */, 8000 /* !^ */, 8000 /* !_ */, 8000 /* !` */, 8000 /* !a */, 8000 /* !b */, 8000 /* !c */, 8000 /* !d */, 8000 /* !e */, 8000 /* !f */, 8000 /* !g */, 8000 /* !h */, 8000 /* !i */, 8000 /* !j */, 8000 /* !k */, 8000 /* !l */, 8000 /* !m */, 8000 /* !n */, 8000 /* !o */, 8000 /* !p */, 8000 /* !q */, 8000 /* !r */, 8000 /* !s */, 8000 /* !t */, 8000 /* !u */, 8000 /* !v */, 8000 /* !w */, 8000 /* !x */, 8000 /* !y */, 8000 /* !z */, 8000 /* !{ */, 8000 /* !| */, 8000 /* !} */, 8000 /* !~ */}, + {12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* " */, 12325 /* "! */, 12325 /* "" */, 12325 /* "# */, 12325 /* "$ */, 12325 /* "% */, 12325 /* "& */, 12325 /* "' */, 12325 /* "( */, 12325 /* ") */, 12325 /* "* */, 12325 /* "+ */, 10075 /* ", */, 12325 /* "- */, 10075 /* ". */, 12325 /* "/ */, 12325 /* "0 */, 12325 /* "1 */, 12325 /* "2 */, 12325 /* "3 */, 12325 /* "4 */, 12325 /* "5 */, 12325 /* "6 */, 12325 /* "7 */, 12325 /* "8 */, 12325 /* "9 */, 12325 /* ": */, 12325 /* "; */, 12325 /* "< */, 12325 /* "= */, 12325 /* "> */, 12325 /* "? */, 12325 /* "@ */, 11075 /* "A */, 12325 /* "B */, 12075 /* "C */, 12325 /* "D */, 12325 /* "E */, 12325 /* "F */, 12075 /* "G */, 12325 /* "H */, 12325 /* "I */, 11325 /* "J */, 12325 /* "K */, 12325 /* "L */, 12325 /* "M */, 12325 /* "N */, 12075 /* "O */, 12325 /* "P */, 12075 /* "Q */, 12325 /* "R */, 12325 /* "S */, 12700 /* "T */, 12325 /* "U */, 12575 /* "V */, 12700 /* "W */, 12325 /* "X */, 12450 /* "Y */, 12325 /* "Z */, 12325 /* "[ */, 12325 /* "\ */, 12325 /* "] */, 12325 /* "^ */, 12325 /* "_ */, 12325 /* "` */, 12325 /* "a */, 12325 /* "b */, 11825 /* "c */, 11825 /* "d */, 11825 /* "e */, 12325 /* "f */, 12325 /* "g */, 12325 /* "h */, 12325 /* "i */, 12325 /* "j */, 12325 /* "k */, 12325 /* "l */, 12325 /* "m */, 12325 /* "n */, 11825 /* "o */, 12325 /* "p */, 11825 /* "q */, 12325 /* "r */, 12325 /* "s */, 12325 /* "t */, 12325 /* "u */, 12950 /* "v */, 12825 /* "w */, 12325 /* "x */, 12825 /* "y */, 12450 /* "z */, 12325 /* "{ */, 12325 /* "| */, 12325 /* "} */, 12325 /* "~ */}, + {15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* # */, 15800 /* #! */, 15800 /* #" */, 15800 /* ## */, 15800 /* #$ */, 15800 /* #% */, 15800 /* #& */, 15800 /* #' */, 15800 /* #( */, 15800 /* #) */, 15800 /* #* */, 15800 /* #+ */, 15800 /* #, */, 15800 /* #- */, 15800 /* #. */, 15800 /* #/ */, 15800 /* #0 */, 15800 /* #1 */, 15800 /* #2 */, 15800 /* #3 */, 15800 /* #4 */, 15800 /* #5 */, 15800 /* #6 */, 15800 /* #7 */, 15800 /* #8 */, 15800 /* #9 */, 15800 /* #: */, 15800 /* #; */, 15800 /* #< */, 15800 /* #= */, 15800 /* #> */, 15800 /* #? */, 15800 /* #@ */, 15800 /* #A */, 15800 /* #B */, 15800 /* #C */, 15800 /* #D */, 15800 /* #E */, 15800 /* #F */, 15800 /* #G */, 15800 /* #H */, 15800 /* #I */, 15800 /* #J */, 15800 /* #K */, 15800 /* #L */, 15800 /* #M */, 15800 /* #N */, 15800 /* #O */, 15800 /* #P */, 15800 /* #Q */, 15800 /* #R */, 15800 /* #S */, 15800 /* #T */, 15800 /* #U */, 15800 /* #V */, 15800 /* #W */, 15800 /* #X */, 15800 /* #Y */, 15800 /* #Z */, 15800 /* #[ */, 15800 /* #\ */, 15800 /* #] */, 15800 /* #^ */, 15800 /* #_ */, 15800 /* #` */, 15800 /* #a */, 15800 /* #b */, 15800 /* #c */, 15800 /* #d */, 15800 /* #e */, 15800 /* #f */, 15800 /* #g */, 15800 /* #h */, 15800 /* #i */, 15800 /* #j */, 15800 /* #k */, 15800 /* #l */, 15800 /* #m */, 15800 /* #n */, 15800 /* #o */, 15800 /* #p */, 15800 /* #q */, 15800 /* #r */, 15800 /* #s */, 15800 /* #t */, 15800 /* #u */, 15800 /* #v */, 15800 /* #w */, 15800 /* #x */, 15800 /* #y */, 15800 /* #z */, 15800 /* #{ */, 15800 /* #| */, 15800 /* #} */, 15800 /* #~ */}, + {15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $ */, 15025 /* $! */, 15025 /* $" */, 15025 /* $# */, 15025 /* $$ */, 15025 /* $% */, 15025 /* $& */, 15025 /* $' */, 15025 /* $( */, 15025 /* $) */, 15025 /* $* */, 15025 /* $+ */, 15025 /* $, */, 15025 /* $- */, 15025 /* $. */, 15025 /* $/ */, 15025 /* $0 */, 15025 /* $1 */, 15025 /* $2 */, 15025 /* $3 */, 15025 /* $4 */, 15025 /* $5 */, 15025 /* $6 */, 15025 /* $7 */, 15025 /* $8 */, 15025 /* $9 */, 15025 /* $: */, 15025 /* $; */, 15025 /* $< */, 15025 /* $= */, 15025 /* $> */, 15025 /* $? */, 15025 /* $@ */, 15025 /* $A */, 15025 /* $B */, 15025 /* $C */, 15025 /* $D */, 15025 /* $E */, 15025 /* $F */, 15025 /* $G */, 15025 /* $H */, 15025 /* $I */, 15025 /* $J */, 15025 /* $K */, 15025 /* $L */, 15025 /* $M */, 15025 /* $N */, 15025 /* $O */, 15025 /* $P */, 15025 /* $Q */, 15025 /* $R */, 15025 /* $S */, 15025 /* $T */, 15025 /* $U */, 15025 /* $V */, 15025 /* $W */, 15025 /* $X */, 15025 /* $Y */, 15025 /* $Z */, 15025 /* $[ */, 15025 /* $\ */, 15025 /* $] */, 15025 /* $^ */, 15025 /* $_ */, 15025 /* $` */, 15025 /* $a */, 15025 /* $b */, 15025 /* $c */, 15025 /* $d */, 15025 /* $e */, 15025 /* $f */, 15025 /* $g */, 15025 /* $h */, 15025 /* $i */, 15025 /* $j */, 15025 /* $k */, 15025 /* $l */, 15025 /* $m */, 15025 /* $n */, 15025 /* $o */, 15025 /* $p */, 15025 /* $q */, 15025 /* $r */, 15025 /* $s */, 15025 /* $t */, 15025 /* $u */, 15025 /* $v */, 15025 /* $w */, 15025 /* $x */, 15025 /* $y */, 15025 /* $z */, 15025 /* ${ */, 15025 /* $| */, 15025 /* $} */, 15025 /* $~ */}, + {24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* % */, 24350 /* %! */, 22850 /* %" */, 24350 /* %# */, 24350 /* %$ */, 24350 /* %% */, 24350 /* %& */, 22850 /* %' */, 24350 /* %( */, 24350 /* %) */, 22850 /* %* */, 24350 /* %+ */, 24350 /* %, */, 24350 /* %- */, 24350 /* %. */, 24350 /* %/ */, 24350 /* %0 */, 24350 /* %1 */, 24350 /* %2 */, 24350 /* %3 */, 24350 /* %4 */, 24350 /* %5 */, 24350 /* %6 */, 24350 /* %7 */, 24350 /* %8 */, 24350 /* %9 */, 24350 /* %: */, 24350 /* %; */, 24350 /* %< */, 24350 /* %= */, 24350 /* %> */, 24350 /* %? */, 24350 /* %@ */, 24350 /* %A */, 24350 /* %B */, 24350 /* %C */, 24350 /* %D */, 24350 /* %E */, 24350 /* %F */, 24350 /* %G */, 24350 /* %H */, 24350 /* %I */, 24350 /* %J */, 24350 /* %K */, 24350 /* %L */, 24350 /* %M */, 24350 /* %N */, 24350 /* %O */, 24350 /* %P */, 24350 /* %Q */, 24350 /* %R */, 24350 /* %S */, 24350 /* %T */, 24350 /* %U */, 24350 /* %V */, 24350 /* %W */, 24350 /* %X */, 24350 /* %Y */, 24350 /* %Z */, 24350 /* %[ */, 24350 /* %\ */, 24350 /* %] */, 24350 /* %^ */, 24350 /* %_ */, 24350 /* %` */, 24350 /* %a */, 24350 /* %b */, 24350 /* %c */, 24350 /* %d */, 24350 /* %e */, 24350 /* %f */, 24350 /* %g */, 24350 /* %h */, 24350 /* %i */, 24350 /* %j */, 24350 /* %k */, 24350 /* %l */, 24350 /* %m */, 24350 /* %n */, 24350 /* %o */, 24350 /* %p */, 24350 /* %q */, 24350 /* %r */, 24350 /* %s */, 24350 /* %t */, 24350 /* %u */, 24350 /* %v */, 24350 /* %w */, 24350 /* %x */, 24350 /* %y */, 24350 /* %z */, 24350 /* %{ */, 24350 /* %| */, 24350 /* %} */, 24350 /* %~ */}, + {18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* & */, 18025 /* &! */, 18025 /* &" */, 18025 /* &# */, 18025 /* &$ */, 18025 /* &% */, 18025 /* && */, 18025 /* &' */, 18025 /* &( */, 18025 /* &) */, 18025 /* &* */, 18025 /* &+ */, 18025 /* &, */, 18025 /* &- */, 18025 /* &. */, 18025 /* &/ */, 18025 /* &0 */, 18025 /* &1 */, 18025 /* &2 */, 18025 /* &3 */, 18025 /* &4 */, 18025 /* &5 */, 18025 /* &6 */, 18025 /* &7 */, 18025 /* &8 */, 18025 /* &9 */, 18025 /* &: */, 18025 /* &; */, 18025 /* &< */, 18025 /* &= */, 18025 /* &> */, 18025 /* &? */, 18025 /* &@ */, 18150 /* &A */, 18025 /* &B */, 18025 /* &C */, 18025 /* &D */, 18025 /* &E */, 18025 /* &F */, 18025 /* &G */, 18025 /* &H */, 18025 /* &I */, 18025 /* &J */, 18025 /* &K */, 18025 /* &L */, 18025 /* &M */, 18025 /* &N */, 18025 /* &O */, 18025 /* &P */, 18025 /* &Q */, 18025 /* &R */, 17900 /* &S */, 17025 /* &T */, 18025 /* &U */, 17150 /* &V */, 17525 /* &W */, 18025 /* &X */, 16525 /* &Y */, 17775 /* &Z */, 18025 /* &[ */, 18025 /* &\ */, 18025 /* &] */, 18025 /* &^ */, 18025 /* &_ */, 18025 /* &` */, 18275 /* &a */, 18025 /* &b */, 18275 /* &c */, 18275 /* &d */, 18275 /* &e */, 18025 /* &f */, 18025 /* &g */, 18025 /* &h */, 18025 /* &i */, 18025 /* &j */, 18025 /* &k */, 18025 /* &l */, 18025 /* &m */, 18025 /* &n */, 18275 /* &o */, 18025 /* &p */, 18275 /* &q */, 18025 /* &r */, 18025 /* &s */, 18025 /* &t */, 18025 /* &u */, 17775 /* &v */, 17775 /* &w */, 18025 /* &x */, 17775 /* &y */, 18025 /* &z */, 18025 /* &{ */, 18025 /* &| */, 18025 /* &} */, 18025 /* &~ */}, + {6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* ' */, 6700 /* '! */, 6700 /* '" */, 6700 /* '# */, 6700 /* '$ */, 6700 /* '% */, 6700 /* '& */, 6700 /* '' */, 6700 /* '( */, 6700 /* ') */, 6700 /* '* */, 6700 /* '+ */, 4450 /* ', */, 6700 /* '- */, 4450 /* '. */, 6700 /* '/ */, 6700 /* '0 */, 6700 /* '1 */, 6700 /* '2 */, 6700 /* '3 */, 6700 /* '4 */, 6700 /* '5 */, 6700 /* '6 */, 6700 /* '7 */, 6700 /* '8 */, 6700 /* '9 */, 6700 /* ': */, 6700 /* '; */, 6700 /* '< */, 6700 /* '= */, 6700 /* '> */, 6700 /* '? */, 6700 /* '@ */, 5450 /* 'A */, 6700 /* 'B */, 6450 /* 'C */, 6700 /* 'D */, 6700 /* 'E */, 6700 /* 'F */, 6450 /* 'G */, 6700 /* 'H */, 6700 /* 'I */, 5700 /* 'J */, 6700 /* 'K */, 6700 /* 'L */, 6700 /* 'M */, 6700 /* 'N */, 6450 /* 'O */, 6700 /* 'P */, 6450 /* 'Q */, 6700 /* 'R */, 6700 /* 'S */, 7075 /* 'T */, 6700 /* 'U */, 6950 /* 'V */, 7075 /* 'W */, 6700 /* 'X */, 6825 /* 'Y */, 6700 /* 'Z */, 6700 /* '[ */, 6700 /* '\ */, 6700 /* '] */, 6700 /* '^ */, 6700 /* '_ */, 6700 /* '` */, 6700 /* 'a */, 6700 /* 'b */, 6200 /* 'c */, 6200 /* 'd */, 6200 /* 'e */, 6700 /* 'f */, 6700 /* 'g */, 6700 /* 'h */, 6700 /* 'i */, 6700 /* 'j */, 6700 /* 'k */, 6700 /* 'l */, 6700 /* 'm */, 6700 /* 'n */, 6200 /* 'o */, 6700 /* 'p */, 6200 /* 'q */, 6700 /* 'r */, 6700 /* 's */, 6700 /* 't */, 6700 /* 'u */, 7325 /* 'v */, 7200 /* 'w */, 6700 /* 'x */, 7200 /* 'y */, 6825 /* 'z */, 6700 /* '{ */, 6700 /* '| */, 6700 /* '} */, 6700 /* '~ */}, + {8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* ( */, 8450 /* (! */, 8450 /* (" */, 8450 /* (# */, 8450 /* ($ */, 8450 /* (% */, 8450 /* (& */, 8450 /* (' */, 8450 /* (( */, 8450 /* () */, 8450 /* (* */, 8450 /* (+ */, 8450 /* (, */, 8450 /* (- */, 8450 /* (. */, 8450 /* (/ */, 8450 /* (0 */, 8450 /* (1 */, 8450 /* (2 */, 8450 /* (3 */, 8450 /* (4 */, 8450 /* (5 */, 8450 /* (6 */, 8450 /* (7 */, 8450 /* (8 */, 8450 /* (9 */, 8450 /* (: */, 8450 /* (; */, 8450 /* (< */, 8450 /* (= */, 8450 /* (> */, 8450 /* (? */, 8450 /* (@ */, 8450 /* (A */, 8450 /* (B */, 8450 /* (C */, 8450 /* (D */, 8450 /* (E */, 8450 /* (F */, 8450 /* (G */, 8450 /* (H */, 8450 /* (I */, 8450 /* (J */, 8450 /* (K */, 8450 /* (L */, 8450 /* (M */, 8450 /* (N */, 8450 /* (O */, 8450 /* (P */, 8450 /* (Q */, 8450 /* (R */, 8450 /* (S */, 8950 /* (T */, 8450 /* (U */, 8950 /* (V */, 8950 /* (W */, 8700 /* (X */, 8950 /* (Y */, 8450 /* (Z */, 8450 /* ([ */, 8450 /* (\ */, 8450 /* (] */, 8450 /* (^ */, 8450 /* (_ */, 8450 /* (` */, 8450 /* (a */, 8450 /* (b */, 8450 /* (c */, 8450 /* (d */, 8450 /* (e */, 8450 /* (f */, 8700 /* (g */, 8450 /* (h */, 8450 /* (i */, 10200 /* (j */, 8450 /* (k */, 8450 /* (l */, 8450 /* (m */, 8450 /* (n */, 8450 /* (o */, 8450 /* (p */, 8450 /* (q */, 8450 /* (r */, 8450 /* (s */, 8450 /* (t */, 8450 /* (u */, 8450 /* (v */, 8450 /* (w */, 8450 /* (x */, 8450 /* (y */, 8450 /* (z */, 8450 /* ({ */, 8450 /* (| */, 8450 /* (} */, 8450 /* (~ */}, + {8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* ) */, 8450 /* )! */, 8450 /* )" */, 8450 /* )# */, 8450 /* )$ */, 8450 /* )% */, 8450 /* )& */, 8450 /* )' */, 8450 /* )( */, 8450 /* )) */, 8450 /* )* */, 8450 /* )+ */, 8450 /* ), */, 8450 /* )- */, 8450 /* ). */, 8450 /* )/ */, 8450 /* )0 */, 8450 /* )1 */, 8450 /* )2 */, 8450 /* )3 */, 8450 /* )4 */, 8450 /* )5 */, 8450 /* )6 */, 8450 /* )7 */, 8450 /* )8 */, 8450 /* )9 */, 8450 /* ): */, 8450 /* ); */, 8450 /* )< */, 8450 /* )= */, 8450 /* )> */, 8450 /* )? */, 8450 /* )@ */, 8450 /* )A */, 8450 /* )B */, 8450 /* )C */, 8450 /* )D */, 8450 /* )E */, 8450 /* )F */, 8450 /* )G */, 8450 /* )H */, 8450 /* )I */, 8450 /* )J */, 8450 /* )K */, 8450 /* )L */, 8450 /* )M */, 8450 /* )N */, 8450 /* )O */, 8450 /* )P */, 8450 /* )Q */, 8450 /* )R */, 8450 /* )S */, 8450 /* )T */, 8450 /* )U */, 8450 /* )V */, 8450 /* )W */, 8450 /* )X */, 8450 /* )Y */, 8450 /* )Z */, 8450 /* )[ */, 8450 /* )\ */, 8450 /* )] */, 8450 /* )^ */, 8450 /* )_ */, 8450 /* )` */, 8450 /* )a */, 8450 /* )b */, 8450 /* )c */, 8450 /* )d */, 8450 /* )e */, 8450 /* )f */, 8450 /* )g */, 8450 /* )h */, 8450 /* )i */, 8450 /* )j */, 8450 /* )k */, 8450 /* )l */, 8450 /* )m */, 8450 /* )n */, 8450 /* )o */, 8450 /* )p */, 8450 /* )q */, 8450 /* )r */, 8450 /* )s */, 8450 /* )t */, 8450 /* )u */, 8450 /* )v */, 8450 /* )w */, 8450 /* )x */, 8450 /* )y */, 8450 /* )z */, 8450 /* ){ */, 8450 /* )| */, 8450 /* )} */, 8450 /* )~ */}, + {15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* * */, 15025 /* *! */, 15025 /* *" */, 15025 /* *# */, 15025 /* *$ */, 15025 /* *% */, 15025 /* *& */, 15025 /* *' */, 15025 /* *( */, 15025 /* *) */, 15025 /* ** */, 15025 /* *+ */, 12775 /* *, */, 15025 /* *- */, 12775 /* *. */, 15025 /* *\/ */, 15025 /* *0 */, 15025 /* *1 */, 15025 /* *2 */, 15025 /* *3 */, 15025 /* *4 */, 15025 /* *5 */, 15025 /* *6 */, 15025 /* *7 */, 15025 /* *8 */, 15025 /* *9 */, 15025 /* *: */, 15025 /* *; */, 15025 /* *< */, 15025 /* *= */, 15025 /* *> */, 15025 /* *? */, 15025 /* *@ */, 13775 /* *A */, 15025 /* *B */, 14775 /* *C */, 15025 /* *D */, 15025 /* *E */, 15025 /* *F */, 14775 /* *G */, 15025 /* *H */, 15025 /* *I */, 14025 /* *J */, 15025 /* *K */, 15025 /* *L */, 15025 /* *M */, 15025 /* *N */, 14775 /* *O */, 15025 /* *P */, 14775 /* *Q */, 15025 /* *R */, 15025 /* *S */, 15400 /* *T */, 15025 /* *U */, 15275 /* *V */, 15400 /* *W */, 15025 /* *X */, 15150 /* *Y */, 15025 /* *Z */, 15025 /* *[ */, 15025 /* *\ */, 15025 /* *] */, 15025 /* *^ */, 15025 /* *_ */, 15025 /* *` */, 15025 /* *a */, 15025 /* *b */, 14525 /* *c */, 14525 /* *d */, 14525 /* *e */, 15025 /* *f */, 15025 /* *g */, 15025 /* *h */, 15025 /* *i */, 15025 /* *j */, 15025 /* *k */, 15025 /* *l */, 15025 /* *m */, 15025 /* *n */, 14525 /* *o */, 15025 /* *p */, 14525 /* *q */, 15025 /* *r */, 15025 /* *s */, 15025 /* *t */, 15025 /* *u */, 15650 /* *v */, 15525 /* *w */, 15025 /* *x */, 15525 /* *y */, 15150 /* *z */, 15025 /* *{ */, 15025 /* *| */, 15025 /* *} */, 15025 /* *~ */}, + {15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* + */, 15000 /* +! */, 15000 /* +" */, 15000 /* +# */, 15000 /* +$ */, 15000 /* +% */, 15000 /* +& */, 15000 /* +' */, 15000 /* +( */, 15000 /* +) */, 15000 /* +* */, 15000 /* ++ */, 15000 /* +, */, 15000 /* +- */, 15000 /* +. */, 15000 /* +/ */, 15000 /* +0 */, 15000 /* +1 */, 15000 /* +2 */, 15000 /* +3 */, 15000 /* +4 */, 15000 /* +5 */, 15000 /* +6 */, 15000 /* +7 */, 15000 /* +8 */, 15000 /* +9 */, 15000 /* +: */, 15000 /* +; */, 15000 /* +< */, 15000 /* += */, 15000 /* +> */, 15000 /* +? */, 15000 /* +@ */, 15000 /* +A */, 15000 /* +B */, 15000 /* +C */, 15000 /* +D */, 15000 /* +E */, 15000 /* +F */, 15000 /* +G */, 15000 /* +H */, 15000 /* +I */, 15000 /* +J */, 15000 /* +K */, 15000 /* +L */, 15000 /* +M */, 15000 /* +N */, 15000 /* +O */, 15000 /* +P */, 15000 /* +Q */, 15000 /* +R */, 15000 /* +S */, 15000 /* +T */, 15000 /* +U */, 15000 /* +V */, 15000 /* +W */, 15000 /* +X */, 15000 /* +Y */, 15000 /* +Z */, 15000 /* +[ */, 15000 /* +\ */, 15000 /* +] */, 15000 /* +^ */, 15000 /* +_ */, 15000 /* +` */, 15000 /* +a */, 15000 /* +b */, 15000 /* +c */, 15000 /* +d */, 15000 /* +e */, 15000 /* +f */, 15000 /* +g */, 15000 /* +h */, 15000 /* +i */, 15000 /* +j */, 15000 /* +k */, 15000 /* +l */, 15000 /* +m */, 15000 /* +n */, 15000 /* +o */, 15000 /* +p */, 15000 /* +q */, 15000 /* +r */, 15000 /* +s */, 15000 /* +t */, 15000 /* +u */, 15000 /* +v */, 15000 /* +w */, 15000 /* +x */, 15000 /* +y */, 15000 /* +z */, 15000 /* +{ */, 15000 /* +| */, 15000 /* +} */, 15000 /* +~ */}, + {7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* , */, 7750 /* ,! */, 5500 /* ," */, 7750 /* ,# */, 7750 /* ,$ */, 7750 /* ,% */, 7750 /* ,& */, 5500 /* ,' */, 7750 /* ,( */, 7750 /* ,) */, 5500 /* ,* */, 7750 /* ,+ */, 7750 /* ,, */, 7750 /* ,- */, 7750 /* ,. */, 7750 /* ,/ */, 7750 /* ,0 */, 7750 /* ,1 */, 7750 /* ,2 */, 7750 /* ,3 */, 7750 /* ,4 */, 7750 /* ,5 */, 7750 /* ,6 */, 7750 /* ,7 */, 7750 /* ,8 */, 7750 /* ,9 */, 7750 /* ,: */, 7750 /* ,; */, 7750 /* ,< */, 7750 /* ,= */, 7750 /* ,> */, 5750 /* ,? */, 7750 /* ,@ */, 8250 /* ,A */, 7750 /* ,B */, 6750 /* ,C */, 7750 /* ,D */, 7750 /* ,E */, 7750 /* ,F */, 6750 /* ,G */, 7750 /* ,H */, 7750 /* ,I */, 7750 /* ,J */, 7750 /* ,K */, 7750 /* ,L */, 7750 /* ,M */, 7750 /* ,N */, 6750 /* ,O */, 7750 /* ,P */, 6750 /* ,Q */, 7750 /* ,R */, 7750 /* ,S */, 6000 /* ,T */, 6875 /* ,U */, 6250 /* ,V */, 6750 /* ,W */, 8000 /* ,X */, 6000 /* ,Y */, 8500 /* ,Z */, 7750 /* ,[ */, 7750 /* ,\ */, 7750 /* ,] */, 7750 /* ,^ */, 7750 /* ,_ */, 7750 /* ,` */, 7875 /* ,a */, 7750 /* ,b */, 7375 /* ,c */, 7375 /* ,d */, 7375 /* ,e */, 7375 /* ,f */, 7750 /* ,g */, 7750 /* ,h */, 7750 /* ,i */, 7750 /* ,j */, 7750 /* ,k */, 7750 /* ,l */, 7750 /* ,m */, 7750 /* ,n */, 7375 /* ,o */, 7750 /* ,p */, 7375 /* ,q */, 7750 /* ,r */, 7750 /* ,s */, 7150 /* ,t */, 7550 /* ,u */, 6550 /* ,v */, 6750 /* ,w */, 7750 /* ,x */, 6625 /* ,y */, 8000 /* ,z */, 7750 /* ,{ */, 7750 /* ,| */, 7750 /* ,} */, 7750 /* ,~ */}, + {10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* - */, 10075 /* -! */, 10075 /* -" */, 10075 /* -# */, 10075 /* -$ */, 10075 /* -% */, 10075 /* -& */, 10075 /* -' */, 10075 /* -( */, 10075 /* -) */, 10075 /* -* */, 10075 /* -+ */, 10075 /* -, */, 10075 /* -- */, 10075 /* -. */, 10075 /* -/ */, 10075 /* -0 */, 10075 /* -1 */, 10075 /* -2 */, 10075 /* -3 */, 10075 /* -4 */, 10075 /* -5 */, 10075 /* -6 */, 10075 /* -7 */, 10075 /* -8 */, 10075 /* -9 */, 10075 /* -: */, 10075 /* -; */, 10075 /* -< */, 10075 /* -= */, 10075 /* -> */, 10075 /* -? */, 10075 /* -@ */, 9700 /* -A */, 10075 /* -B */, 10450 /* -C */, 10075 /* -D */, 10075 /* -E */, 10075 /* -F */, 10450 /* -G */, 10075 /* -H */, 9325 /* -I */, 9450 /* -J */, 10075 /* -K */, 10075 /* -L */, 10075 /* -M */, 10075 /* -N */, 10450 /* -O */, 10075 /* -P */, 10450 /* -Q */, 10075 /* -R */, 9575 /* -S */, 8950 /* -T */, 10075 /* -U */, 9450 /* -V */, 9825 /* -W */, 9325 /* -X */, 8825 /* -Y */, 9325 /* -Z */, 10075 /* -[ */, 10075 /* -\ */, 10075 /* -] */, 10075 /* -^ */, 10075 /* -_ */, 10075 /* -` */, 10075 /* -a */, 10075 /* -b */, 10325 /* -c */, 10325 /* -d */, 10325 /* -e */, 10075 /* -f */, 10075 /* -g */, 10075 /* -h */, 10075 /* -i */, 10075 /* -j */, 10075 /* -k */, 10075 /* -l */, 10075 /* -m */, 10075 /* -n */, 10325 /* -o */, 10075 /* -p */, 10325 /* -q */, 10075 /* -r */, 10200 /* -s */, 10075 /* -t */, 10075 /* -u */, 9950 /* -v */, 9975 /* -w */, 9200 /* -x */, 9950 /* -y */, 9700 /* -z */, 10075 /* -{ */, 10075 /* -| */, 10075 /* -} */, 10075 /* -~ */}, + {7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* . */, 7750 /* .! */, 5500 /* ." */, 7750 /* .# */, 7750 /* .$ */, 7750 /* .% */, 7750 /* .& */, 5500 /* .' */, 7750 /* .( */, 7750 /* .) */, 5500 /* .* */, 7750 /* .+ */, 7750 /* ., */, 7750 /* .- */, 7750 /* .. */, 7750 /* ./ */, 7750 /* .0 */, 7750 /* .1 */, 7750 /* .2 */, 7750 /* .3 */, 7750 /* .4 */, 7750 /* .5 */, 7750 /* .6 */, 7750 /* .7 */, 7750 /* .8 */, 7750 /* .9 */, 7750 /* .: */, 7750 /* .; */, 7750 /* .< */, 7750 /* .= */, 7750 /* .> */, 5750 /* .? */, 7750 /* .@ */, 8250 /* .A */, 7750 /* .B */, 6750 /* .C */, 7750 /* .D */, 7750 /* .E */, 7750 /* .F */, 6750 /* .G */, 7750 /* .H */, 7750 /* .I */, 7750 /* .J */, 7750 /* .K */, 7750 /* .L */, 7750 /* .M */, 7750 /* .N */, 6750 /* .O */, 7750 /* .P */, 6750 /* .Q */, 7750 /* .R */, 7750 /* .S */, 6000 /* .T */, 6875 /* .U */, 6250 /* .V */, 6750 /* .W */, 8000 /* .X */, 6000 /* .Y */, 8500 /* .Z */, 7750 /* .[ */, 7750 /* .\ */, 7750 /* .] */, 7750 /* .^ */, 7750 /* ._ */, 7750 /* .` */, 7875 /* .a */, 7750 /* .b */, 7375 /* .c */, 7375 /* .d */, 7375 /* .e */, 7375 /* .f */, 7750 /* .g */, 7750 /* .h */, 7750 /* .i */, 7750 /* .j */, 7750 /* .k */, 7750 /* .l */, 7750 /* .m */, 7750 /* .n */, 7375 /* .o */, 7750 /* .p */, 7375 /* .q */, 7750 /* .r */, 7750 /* .s */, 7150 /* .t */, 7550 /* .u */, 6550 /* .v */, 6750 /* .w */, 7750 /* .x */, 6625 /* .y */, 8000 /* .z */, 7750 /* .{ */, 7750 /* .| */, 7750 /* .} */, 7750 /* .~ */}, + {11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* / */, 11500 /* /! */, 11500 /* /" */, 11500 /* /# */, 11500 /* /$ */, 11500 /* /% */, 11500 /* /& */, 11500 /* /' */, 11500 /* /( */, 11500 /* /) */, 11500 /* /\* */, 11500 /* /+ */, 11500 /* /, */, 11500 /* /- */, 11500 /* /. */, 6750 /* // */, 11500 /* /0 */, 11500 /* /1 */, 11500 /* /2 */, 11500 /* /3 */, 11500 /* /4 */, 11500 /* /5 */, 11500 /* /6 */, 11500 /* /7 */, 11500 /* /8 */, 11500 /* /9 */, 11500 /* /: */, 11500 /* /; */, 11500 /* /< */, 11500 /* /= */, 11500 /* /> */, 11500 /* /? */, 11500 /* /@ */, 10375 /* /A */, 11500 /* /B */, 10950 /* /C */, 11500 /* /D */, 11500 /* /E */, 11500 /* /F */, 10950 /* /G */, 11500 /* /H */, 11500 /* /I */, 11500 /* /J */, 11500 /* /K */, 11500 /* /L */, 11500 /* /M */, 11500 /* /N */, 10950 /* /O */, 11500 /* /P */, 10950 /* /Q */, 11500 /* /R */, 11000 /* /S */, 12000 /* /T */, 11500 /* /U */, 11750 /* /V */, 11625 /* /W */, 11500 /* /X */, 11750 /* /Y */, 11500 /* /Z */, 11500 /* /[ */, 11500 /* /\ */, 11500 /* /] */, 11500 /* /^ */, 11500 /* /_ */, 11500 /* /` */, 10500 /* /a */, 11500 /* /b */, 10625 /* /c */, 10625 /* /d */, 10625 /* /e */, 11500 /* /f */, 10875 /* /g */, 11500 /* /h */, 11500 /* /i */, 11500 /* /j */, 11500 /* /k */, 11500 /* /l */, 11250 /* /m */, 11250 /* /n */, 10625 /* /o */, 11500 /* /p */, 10625 /* /q */, 11250 /* /r */, 11500 /* /s */, 11500 /* /t */, 11500 /* /u */, 11500 /* /v */, 11500 /* /w */, 11500 /* /x */, 11500 /* /y */, 11500 /* /z */, 11500 /* /{ */, 11500 /* /| */, 11500 /* /} */, 11500 /* /~ */}, + {15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0 */, 15000 /* 0! */, 15000 /* 0" */, 15000 /* 0# */, 15000 /* 0$ */, 15000 /* 0% */, 15000 /* 0& */, 15000 /* 0' */, 15000 /* 0( */, 15000 /* 0) */, 15000 /* 0* */, 15000 /* 0+ */, 15000 /* 0, */, 15000 /* 0- */, 15000 /* 0. */, 15000 /* 0/ */, 15000 /* 00 */, 15000 /* 01 */, 15000 /* 02 */, 15000 /* 03 */, 15000 /* 04 */, 15000 /* 05 */, 15000 /* 06 */, 15000 /* 07 */, 15000 /* 08 */, 15000 /* 09 */, 15000 /* 0: */, 15000 /* 0; */, 15000 /* 0< */, 15000 /* 0= */, 15000 /* 0> */, 15000 /* 0? */, 15000 /* 0@ */, 15000 /* 0A */, 15000 /* 0B */, 15000 /* 0C */, 15000 /* 0D */, 15000 /* 0E */, 15000 /* 0F */, 15000 /* 0G */, 15000 /* 0H */, 15000 /* 0I */, 15000 /* 0J */, 15000 /* 0K */, 15000 /* 0L */, 15000 /* 0M */, 15000 /* 0N */, 15000 /* 0O */, 15000 /* 0P */, 15000 /* 0Q */, 15000 /* 0R */, 15000 /* 0S */, 15000 /* 0T */, 15000 /* 0U */, 15000 /* 0V */, 15000 /* 0W */, 15000 /* 0X */, 15000 /* 0Y */, 15000 /* 0Z */, 15000 /* 0[ */, 15000 /* 0\ */, 15000 /* 0] */, 15000 /* 0^ */, 15000 /* 0_ */, 15000 /* 0` */, 15000 /* 0a */, 15000 /* 0b */, 15000 /* 0c */, 15000 /* 0d */, 15000 /* 0e */, 15000 /* 0f */, 15000 /* 0g */, 15000 /* 0h */, 15000 /* 0i */, 15000 /* 0j */, 15000 /* 0k */, 15000 /* 0l */, 15000 /* 0m */, 15000 /* 0n */, 15000 /* 0o */, 15000 /* 0p */, 15000 /* 0q */, 15000 /* 0r */, 15000 /* 0s */, 15000 /* 0t */, 15000 /* 0u */, 15000 /* 0v */, 15000 /* 0w */, 15000 /* 0x */, 15000 /* 0y */, 15000 /* 0z */, 15000 /* 0{ */, 15000 /* 0| */, 15000 /* 0} */, 15000 /* 0~ */}, + {15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1 */, 15000 /* 1! */, 15000 /* 1" */, 15000 /* 1# */, 15000 /* 1$ */, 15000 /* 1% */, 15000 /* 1& */, 15000 /* 1' */, 15000 /* 1( */, 15000 /* 1) */, 15000 /* 1* */, 15000 /* 1+ */, 15000 /* 1, */, 15000 /* 1- */, 15000 /* 1. */, 15000 /* 1/ */, 15000 /* 10 */, 15000 /* 11 */, 15000 /* 12 */, 15000 /* 13 */, 15000 /* 14 */, 15000 /* 15 */, 15000 /* 16 */, 15000 /* 17 */, 15000 /* 18 */, 15000 /* 19 */, 15000 /* 1: */, 15000 /* 1; */, 15000 /* 1< */, 15000 /* 1= */, 15000 /* 1> */, 15000 /* 1? */, 15000 /* 1@ */, 15000 /* 1A */, 15000 /* 1B */, 15000 /* 1C */, 15000 /* 1D */, 15000 /* 1E */, 15000 /* 1F */, 15000 /* 1G */, 15000 /* 1H */, 15000 /* 1I */, 15000 /* 1J */, 15000 /* 1K */, 15000 /* 1L */, 15000 /* 1M */, 15000 /* 1N */, 15000 /* 1O */, 15000 /* 1P */, 15000 /* 1Q */, 15000 /* 1R */, 15000 /* 1S */, 15000 /* 1T */, 15000 /* 1U */, 15000 /* 1V */, 15000 /* 1W */, 15000 /* 1X */, 15000 /* 1Y */, 15000 /* 1Z */, 15000 /* 1[ */, 15000 /* 1\ */, 15000 /* 1] */, 15000 /* 1^ */, 15000 /* 1_ */, 15000 /* 1` */, 15000 /* 1a */, 15000 /* 1b */, 15000 /* 1c */, 15000 /* 1d */, 15000 /* 1e */, 15000 /* 1f */, 15000 /* 1g */, 15000 /* 1h */, 15000 /* 1i */, 15000 /* 1j */, 15000 /* 1k */, 15000 /* 1l */, 15000 /* 1m */, 15000 /* 1n */, 15000 /* 1o */, 15000 /* 1p */, 15000 /* 1q */, 15000 /* 1r */, 15000 /* 1s */, 15000 /* 1t */, 15000 /* 1u */, 15000 /* 1v */, 15000 /* 1w */, 15000 /* 1x */, 15000 /* 1y */, 15000 /* 1z */, 15000 /* 1{ */, 15000 /* 1| */, 15000 /* 1} */, 15000 /* 1~ */}, + {15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2 */, 15000 /* 2! */, 15000 /* 2" */, 15000 /* 2# */, 15000 /* 2$ */, 15000 /* 2% */, 15000 /* 2& */, 15000 /* 2' */, 15000 /* 2( */, 15000 /* 2) */, 15000 /* 2* */, 15000 /* 2+ */, 15000 /* 2, */, 15000 /* 2- */, 15000 /* 2. */, 15000 /* 2/ */, 15000 /* 20 */, 15000 /* 21 */, 15000 /* 22 */, 15000 /* 23 */, 15000 /* 24 */, 15000 /* 25 */, 15000 /* 26 */, 15000 /* 27 */, 15000 /* 28 */, 15000 /* 29 */, 15000 /* 2: */, 15000 /* 2; */, 15000 /* 2< */, 15000 /* 2= */, 15000 /* 2> */, 15000 /* 2? */, 15000 /* 2@ */, 15000 /* 2A */, 15000 /* 2B */, 15000 /* 2C */, 15000 /* 2D */, 15000 /* 2E */, 15000 /* 2F */, 15000 /* 2G */, 15000 /* 2H */, 15000 /* 2I */, 15000 /* 2J */, 15000 /* 2K */, 15000 /* 2L */, 15000 /* 2M */, 15000 /* 2N */, 15000 /* 2O */, 15000 /* 2P */, 15000 /* 2Q */, 15000 /* 2R */, 15000 /* 2S */, 15000 /* 2T */, 15000 /* 2U */, 15000 /* 2V */, 15000 /* 2W */, 15000 /* 2X */, 15000 /* 2Y */, 15000 /* 2Z */, 15000 /* 2[ */, 15000 /* 2\ */, 15000 /* 2] */, 15000 /* 2^ */, 15000 /* 2_ */, 15000 /* 2` */, 15000 /* 2a */, 15000 /* 2b */, 15000 /* 2c */, 15000 /* 2d */, 15000 /* 2e */, 15000 /* 2f */, 15000 /* 2g */, 15000 /* 2h */, 15000 /* 2i */, 15000 /* 2j */, 15000 /* 2k */, 15000 /* 2l */, 15000 /* 2m */, 15000 /* 2n */, 15000 /* 2o */, 15000 /* 2p */, 15000 /* 2q */, 15000 /* 2r */, 15000 /* 2s */, 15000 /* 2t */, 15000 /* 2u */, 15000 /* 2v */, 15000 /* 2w */, 15000 /* 2x */, 15000 /* 2y */, 15000 /* 2z */, 15000 /* 2{ */, 15000 /* 2| */, 15000 /* 2} */, 15000 /* 2~ */}, + {15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3 */, 15000 /* 3! */, 15000 /* 3" */, 15000 /* 3# */, 15000 /* 3$ */, 15000 /* 3% */, 15000 /* 3& */, 15000 /* 3' */, 15000 /* 3( */, 15000 /* 3) */, 15000 /* 3* */, 15000 /* 3+ */, 15000 /* 3, */, 15000 /* 3- */, 15000 /* 3. */, 15000 /* 3/ */, 15000 /* 30 */, 15000 /* 31 */, 15000 /* 32 */, 15000 /* 33 */, 15000 /* 34 */, 15000 /* 35 */, 15000 /* 36 */, 15000 /* 37 */, 15000 /* 38 */, 15000 /* 39 */, 15000 /* 3: */, 15000 /* 3; */, 15000 /* 3< */, 15000 /* 3= */, 15000 /* 3> */, 15000 /* 3? */, 15000 /* 3@ */, 15000 /* 3A */, 15000 /* 3B */, 15000 /* 3C */, 15000 /* 3D */, 15000 /* 3E */, 15000 /* 3F */, 15000 /* 3G */, 15000 /* 3H */, 15000 /* 3I */, 15000 /* 3J */, 15000 /* 3K */, 15000 /* 3L */, 15000 /* 3M */, 15000 /* 3N */, 15000 /* 3O */, 15000 /* 3P */, 15000 /* 3Q */, 15000 /* 3R */, 15000 /* 3S */, 15000 /* 3T */, 15000 /* 3U */, 15000 /* 3V */, 15000 /* 3W */, 15000 /* 3X */, 15000 /* 3Y */, 15000 /* 3Z */, 15000 /* 3[ */, 15000 /* 3\ */, 15000 /* 3] */, 15000 /* 3^ */, 15000 /* 3_ */, 15000 /* 3` */, 15000 /* 3a */, 15000 /* 3b */, 15000 /* 3c */, 15000 /* 3d */, 15000 /* 3e */, 15000 /* 3f */, 15000 /* 3g */, 15000 /* 3h */, 15000 /* 3i */, 15000 /* 3j */, 15000 /* 3k */, 15000 /* 3l */, 15000 /* 3m */, 15000 /* 3n */, 15000 /* 3o */, 15000 /* 3p */, 15000 /* 3q */, 15000 /* 3r */, 15000 /* 3s */, 15000 /* 3t */, 15000 /* 3u */, 15000 /* 3v */, 15000 /* 3w */, 15000 /* 3x */, 15000 /* 3y */, 15000 /* 3z */, 15000 /* 3{ */, 15000 /* 3| */, 15000 /* 3} */, 15000 /* 3~ */}, + {15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4 */, 15000 /* 4! */, 15000 /* 4" */, 15000 /* 4# */, 15000 /* 4$ */, 15000 /* 4% */, 15000 /* 4& */, 15000 /* 4' */, 15000 /* 4( */, 15000 /* 4) */, 15000 /* 4* */, 15000 /* 4+ */, 15000 /* 4, */, 15000 /* 4- */, 15000 /* 4. */, 15000 /* 4/ */, 15000 /* 40 */, 15000 /* 41 */, 15000 /* 42 */, 15000 /* 43 */, 15000 /* 44 */, 15000 /* 45 */, 15000 /* 46 */, 15000 /* 47 */, 15000 /* 48 */, 15000 /* 49 */, 15000 /* 4: */, 15000 /* 4; */, 15000 /* 4< */, 15000 /* 4= */, 15000 /* 4> */, 15000 /* 4? */, 15000 /* 4@ */, 15000 /* 4A */, 15000 /* 4B */, 15000 /* 4C */, 15000 /* 4D */, 15000 /* 4E */, 15000 /* 4F */, 15000 /* 4G */, 15000 /* 4H */, 15000 /* 4I */, 15000 /* 4J */, 15000 /* 4K */, 15000 /* 4L */, 15000 /* 4M */, 15000 /* 4N */, 15000 /* 4O */, 15000 /* 4P */, 15000 /* 4Q */, 15000 /* 4R */, 15000 /* 4S */, 15000 /* 4T */, 15000 /* 4U */, 15000 /* 4V */, 15000 /* 4W */, 15000 /* 4X */, 15000 /* 4Y */, 15000 /* 4Z */, 15000 /* 4[ */, 15000 /* 4\ */, 15000 /* 4] */, 15000 /* 4^ */, 15000 /* 4_ */, 15000 /* 4` */, 15000 /* 4a */, 15000 /* 4b */, 15000 /* 4c */, 15000 /* 4d */, 15000 /* 4e */, 15000 /* 4f */, 15000 /* 4g */, 15000 /* 4h */, 15000 /* 4i */, 15000 /* 4j */, 15000 /* 4k */, 15000 /* 4l */, 15000 /* 4m */, 15000 /* 4n */, 15000 /* 4o */, 15000 /* 4p */, 15000 /* 4q */, 15000 /* 4r */, 15000 /* 4s */, 15000 /* 4t */, 15000 /* 4u */, 15000 /* 4v */, 15000 /* 4w */, 15000 /* 4x */, 15000 /* 4y */, 15000 /* 4z */, 15000 /* 4{ */, 15000 /* 4| */, 15000 /* 4} */, 15000 /* 4~ */}, + {15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5 */, 15000 /* 5! */, 15000 /* 5" */, 15000 /* 5# */, 15000 /* 5$ */, 15000 /* 5% */, 15000 /* 5& */, 15000 /* 5' */, 15000 /* 5( */, 15000 /* 5) */, 15000 /* 5* */, 15000 /* 5+ */, 15000 /* 5, */, 15000 /* 5- */, 15000 /* 5. */, 15000 /* 5/ */, 15000 /* 50 */, 15000 /* 51 */, 15000 /* 52 */, 15000 /* 53 */, 15000 /* 54 */, 15000 /* 55 */, 15000 /* 56 */, 15000 /* 57 */, 15000 /* 58 */, 15000 /* 59 */, 15000 /* 5: */, 15000 /* 5; */, 15000 /* 5< */, 15000 /* 5= */, 15000 /* 5> */, 15000 /* 5? */, 15000 /* 5@ */, 15000 /* 5A */, 15000 /* 5B */, 15000 /* 5C */, 15000 /* 5D */, 15000 /* 5E */, 15000 /* 5F */, 15000 /* 5G */, 15000 /* 5H */, 15000 /* 5I */, 15000 /* 5J */, 15000 /* 5K */, 15000 /* 5L */, 15000 /* 5M */, 15000 /* 5N */, 15000 /* 5O */, 15000 /* 5P */, 15000 /* 5Q */, 15000 /* 5R */, 15000 /* 5S */, 15000 /* 5T */, 15000 /* 5U */, 15000 /* 5V */, 15000 /* 5W */, 15000 /* 5X */, 15000 /* 5Y */, 15000 /* 5Z */, 15000 /* 5[ */, 15000 /* 5\ */, 15000 /* 5] */, 15000 /* 5^ */, 15000 /* 5_ */, 15000 /* 5` */, 15000 /* 5a */, 15000 /* 5b */, 15000 /* 5c */, 15000 /* 5d */, 15000 /* 5e */, 15000 /* 5f */, 15000 /* 5g */, 15000 /* 5h */, 15000 /* 5i */, 15000 /* 5j */, 15000 /* 5k */, 15000 /* 5l */, 15000 /* 5m */, 15000 /* 5n */, 15000 /* 5o */, 15000 /* 5p */, 15000 /* 5q */, 15000 /* 5r */, 15000 /* 5s */, 15000 /* 5t */, 15000 /* 5u */, 15000 /* 5v */, 15000 /* 5w */, 15000 /* 5x */, 15000 /* 5y */, 15000 /* 5z */, 15000 /* 5{ */, 15000 /* 5| */, 15000 /* 5} */, 15000 /* 5~ */}, + {15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6 */, 15000 /* 6! */, 15000 /* 6" */, 15000 /* 6# */, 15000 /* 6$ */, 15000 /* 6% */, 15000 /* 6& */, 15000 /* 6' */, 15000 /* 6( */, 15000 /* 6) */, 15000 /* 6* */, 15000 /* 6+ */, 15000 /* 6, */, 15000 /* 6- */, 15000 /* 6. */, 15000 /* 6/ */, 15000 /* 60 */, 15000 /* 61 */, 15000 /* 62 */, 15000 /* 63 */, 15000 /* 64 */, 15000 /* 65 */, 15000 /* 66 */, 15000 /* 67 */, 15000 /* 68 */, 15000 /* 69 */, 15000 /* 6: */, 15000 /* 6; */, 15000 /* 6< */, 15000 /* 6= */, 15000 /* 6> */, 15000 /* 6? */, 15000 /* 6@ */, 15000 /* 6A */, 15000 /* 6B */, 15000 /* 6C */, 15000 /* 6D */, 15000 /* 6E */, 15000 /* 6F */, 15000 /* 6G */, 15000 /* 6H */, 15000 /* 6I */, 15000 /* 6J */, 15000 /* 6K */, 15000 /* 6L */, 15000 /* 6M */, 15000 /* 6N */, 15000 /* 6O */, 15000 /* 6P */, 15000 /* 6Q */, 15000 /* 6R */, 15000 /* 6S */, 15000 /* 6T */, 15000 /* 6U */, 15000 /* 6V */, 15000 /* 6W */, 15000 /* 6X */, 15000 /* 6Y */, 15000 /* 6Z */, 15000 /* 6[ */, 15000 /* 6\ */, 15000 /* 6] */, 15000 /* 6^ */, 15000 /* 6_ */, 15000 /* 6` */, 15000 /* 6a */, 15000 /* 6b */, 15000 /* 6c */, 15000 /* 6d */, 15000 /* 6e */, 15000 /* 6f */, 15000 /* 6g */, 15000 /* 6h */, 15000 /* 6i */, 15000 /* 6j */, 15000 /* 6k */, 15000 /* 6l */, 15000 /* 6m */, 15000 /* 6n */, 15000 /* 6o */, 15000 /* 6p */, 15000 /* 6q */, 15000 /* 6r */, 15000 /* 6s */, 15000 /* 6t */, 15000 /* 6u */, 15000 /* 6v */, 15000 /* 6w */, 15000 /* 6x */, 15000 /* 6y */, 15000 /* 6z */, 15000 /* 6{ */, 15000 /* 6| */, 15000 /* 6} */, 15000 /* 6~ */}, + {15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7 */, 15000 /* 7! */, 15000 /* 7" */, 15000 /* 7# */, 15000 /* 7$ */, 15000 /* 7% */, 15000 /* 7& */, 15000 /* 7' */, 15000 /* 7( */, 15000 /* 7) */, 15000 /* 7* */, 15000 /* 7+ */, 15000 /* 7, */, 15000 /* 7- */, 15000 /* 7. */, 15000 /* 7/ */, 15000 /* 70 */, 15000 /* 71 */, 15000 /* 72 */, 15000 /* 73 */, 15000 /* 74 */, 15000 /* 75 */, 15000 /* 76 */, 15000 /* 77 */, 15000 /* 78 */, 15000 /* 79 */, 15000 /* 7: */, 15000 /* 7; */, 15000 /* 7< */, 15000 /* 7= */, 15000 /* 7> */, 15000 /* 7? */, 15000 /* 7@ */, 15000 /* 7A */, 15000 /* 7B */, 15000 /* 7C */, 15000 /* 7D */, 15000 /* 7E */, 15000 /* 7F */, 15000 /* 7G */, 15000 /* 7H */, 15000 /* 7I */, 15000 /* 7J */, 15000 /* 7K */, 15000 /* 7L */, 15000 /* 7M */, 15000 /* 7N */, 15000 /* 7O */, 15000 /* 7P */, 15000 /* 7Q */, 15000 /* 7R */, 15000 /* 7S */, 15000 /* 7T */, 15000 /* 7U */, 15000 /* 7V */, 15000 /* 7W */, 15000 /* 7X */, 15000 /* 7Y */, 15000 /* 7Z */, 15000 /* 7[ */, 15000 /* 7\ */, 15000 /* 7] */, 15000 /* 7^ */, 15000 /* 7_ */, 15000 /* 7` */, 15000 /* 7a */, 15000 /* 7b */, 15000 /* 7c */, 15000 /* 7d */, 15000 /* 7e */, 15000 /* 7f */, 15000 /* 7g */, 15000 /* 7h */, 15000 /* 7i */, 15000 /* 7j */, 15000 /* 7k */, 15000 /* 7l */, 15000 /* 7m */, 15000 /* 7n */, 15000 /* 7o */, 15000 /* 7p */, 15000 /* 7q */, 15000 /* 7r */, 15000 /* 7s */, 15000 /* 7t */, 15000 /* 7u */, 15000 /* 7v */, 15000 /* 7w */, 15000 /* 7x */, 15000 /* 7y */, 15000 /* 7z */, 15000 /* 7{ */, 15000 /* 7| */, 15000 /* 7} */, 15000 /* 7~ */}, + {15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8 */, 15000 /* 8! */, 15000 /* 8" */, 15000 /* 8# */, 15000 /* 8$ */, 15000 /* 8% */, 15000 /* 8& */, 15000 /* 8' */, 15000 /* 8( */, 15000 /* 8) */, 15000 /* 8* */, 15000 /* 8+ */, 15000 /* 8, */, 15000 /* 8- */, 15000 /* 8. */, 15000 /* 8/ */, 15000 /* 80 */, 15000 /* 81 */, 15000 /* 82 */, 15000 /* 83 */, 15000 /* 84 */, 15000 /* 85 */, 15000 /* 86 */, 15000 /* 87 */, 15000 /* 88 */, 15000 /* 89 */, 15000 /* 8: */, 15000 /* 8; */, 15000 /* 8< */, 15000 /* 8= */, 15000 /* 8> */, 15000 /* 8? */, 15000 /* 8@ */, 15000 /* 8A */, 15000 /* 8B */, 15000 /* 8C */, 15000 /* 8D */, 15000 /* 8E */, 15000 /* 8F */, 15000 /* 8G */, 15000 /* 8H */, 15000 /* 8I */, 15000 /* 8J */, 15000 /* 8K */, 15000 /* 8L */, 15000 /* 8M */, 15000 /* 8N */, 15000 /* 8O */, 15000 /* 8P */, 15000 /* 8Q */, 15000 /* 8R */, 15000 /* 8S */, 15000 /* 8T */, 15000 /* 8U */, 15000 /* 8V */, 15000 /* 8W */, 15000 /* 8X */, 15000 /* 8Y */, 15000 /* 8Z */, 15000 /* 8[ */, 15000 /* 8\ */, 15000 /* 8] */, 15000 /* 8^ */, 15000 /* 8_ */, 15000 /* 8` */, 15000 /* 8a */, 15000 /* 8b */, 15000 /* 8c */, 15000 /* 8d */, 15000 /* 8e */, 15000 /* 8f */, 15000 /* 8g */, 15000 /* 8h */, 15000 /* 8i */, 15000 /* 8j */, 15000 /* 8k */, 15000 /* 8l */, 15000 /* 8m */, 15000 /* 8n */, 15000 /* 8o */, 15000 /* 8p */, 15000 /* 8q */, 15000 /* 8r */, 15000 /* 8s */, 15000 /* 8t */, 15000 /* 8u */, 15000 /* 8v */, 15000 /* 8w */, 15000 /* 8x */, 15000 /* 8y */, 15000 /* 8z */, 15000 /* 8{ */, 15000 /* 8| */, 15000 /* 8} */, 15000 /* 8~ */}, + {15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9 */, 15000 /* 9! */, 15000 /* 9" */, 15000 /* 9# */, 15000 /* 9$ */, 15000 /* 9% */, 15000 /* 9& */, 15000 /* 9' */, 15000 /* 9( */, 15000 /* 9) */, 15000 /* 9* */, 15000 /* 9+ */, 15000 /* 9, */, 15000 /* 9- */, 15000 /* 9. */, 15000 /* 9/ */, 15000 /* 90 */, 15000 /* 91 */, 15000 /* 92 */, 15000 /* 93 */, 15000 /* 94 */, 15000 /* 95 */, 15000 /* 96 */, 15000 /* 97 */, 15000 /* 98 */, 15000 /* 99 */, 15000 /* 9: */, 15000 /* 9; */, 15000 /* 9< */, 15000 /* 9= */, 15000 /* 9> */, 15000 /* 9? */, 15000 /* 9@ */, 15000 /* 9A */, 15000 /* 9B */, 15000 /* 9C */, 15000 /* 9D */, 15000 /* 9E */, 15000 /* 9F */, 15000 /* 9G */, 15000 /* 9H */, 15000 /* 9I */, 15000 /* 9J */, 15000 /* 9K */, 15000 /* 9L */, 15000 /* 9M */, 15000 /* 9N */, 15000 /* 9O */, 15000 /* 9P */, 15000 /* 9Q */, 15000 /* 9R */, 15000 /* 9S */, 15000 /* 9T */, 15000 /* 9U */, 15000 /* 9V */, 15000 /* 9W */, 15000 /* 9X */, 15000 /* 9Y */, 15000 /* 9Z */, 15000 /* 9[ */, 15000 /* 9\ */, 15000 /* 9] */, 15000 /* 9^ */, 15000 /* 9_ */, 15000 /* 9` */, 15000 /* 9a */, 15000 /* 9b */, 15000 /* 9c */, 15000 /* 9d */, 15000 /* 9e */, 15000 /* 9f */, 15000 /* 9g */, 15000 /* 9h */, 15000 /* 9i */, 15000 /* 9j */, 15000 /* 9k */, 15000 /* 9l */, 15000 /* 9m */, 15000 /* 9n */, 15000 /* 9o */, 15000 /* 9p */, 15000 /* 9q */, 15000 /* 9r */, 15000 /* 9s */, 15000 /* 9t */, 15000 /* 9u */, 15000 /* 9v */, 15000 /* 9w */, 15000 /* 9x */, 15000 /* 9y */, 15000 /* 9z */, 15000 /* 9{ */, 15000 /* 9| */, 15000 /* 9} */, 15000 /* 9~ */}, + {8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* : */, 8250 /* :! */, 8250 /* :" */, 8250 /* :# */, 8250 /* :$ */, 8250 /* :% */, 8250 /* :& */, 8250 /* :' */, 8250 /* :( */, 8250 /* :) */, 8250 /* :* */, 8250 /* :+ */, 8250 /* :, */, 8250 /* :- */, 8250 /* :. */, 8250 /* :/ */, 8250 /* :0 */, 8250 /* :1 */, 8250 /* :2 */, 8250 /* :3 */, 8250 /* :4 */, 8250 /* :5 */, 8250 /* :6 */, 8250 /* :7 */, 8250 /* :8 */, 8250 /* :9 */, 8250 /* :: */, 8250 /* :; */, 8250 /* :< */, 8250 /* := */, 8250 /* :> */, 8250 /* :? */, 8250 /* :@ */, 8250 /* :A */, 8250 /* :B */, 8250 /* :C */, 8250 /* :D */, 8250 /* :E */, 8250 /* :F */, 8250 /* :G */, 8250 /* :H */, 8250 /* :I */, 8250 /* :J */, 8250 /* :K */, 8250 /* :L */, 8250 /* :M */, 8250 /* :N */, 8250 /* :O */, 8250 /* :P */, 8250 /* :Q */, 8250 /* :R */, 8250 /* :S */, 7250 /* :T */, 8250 /* :U */, 7500 /* :V */, 8250 /* :W */, 7875 /* :X */, 7000 /* :Y */, 8250 /* :Z */, 8250 /* :[ */, 8250 /* :\ */, 8250 /* :] */, 8250 /* :^ */, 8250 /* :_ */, 8250 /* :` */, 8250 /* :a */, 8250 /* :b */, 8250 /* :c */, 8250 /* :d */, 8250 /* :e */, 8250 /* :f */, 8250 /* :g */, 8250 /* :h */, 8250 /* :i */, 8250 /* :j */, 8250 /* :k */, 8250 /* :l */, 8250 /* :m */, 8250 /* :n */, 8250 /* :o */, 8250 /* :p */, 8250 /* :q */, 8250 /* :r */, 8250 /* :s */, 8250 /* :t */, 8250 /* :u */, 8250 /* :v */, 8250 /* :w */, 8250 /* :x */, 8250 /* :y */, 8250 /* :z */, 8250 /* :{ */, 8250 /* :| */, 8250 /* :} */, 8250 /* :~ */}, + {8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ; */, 8250 /* ;! */, 8250 /* ;" */, 8250 /* ;# */, 8250 /* ;$ */, 8250 /* ;% */, 8250 /* ;& */, 8250 /* ;' */, 8250 /* ;( */, 8250 /* ;) */, 8250 /* ;* */, 8250 /* ;+ */, 8250 /* ;, */, 8250 /* ;- */, 8250 /* ;. */, 8250 /* ;/ */, 8250 /* ;0 */, 8250 /* ;1 */, 8250 /* ;2 */, 8250 /* ;3 */, 8250 /* ;4 */, 8250 /* ;5 */, 8250 /* ;6 */, 8250 /* ;7 */, 8250 /* ;8 */, 8250 /* ;9 */, 8250 /* ;: */, 8250 /* ;; */, 8250 /* ;< */, 8250 /* ;= */, 8250 /* ;> */, 8250 /* ;? */, 8250 /* ;@ */, 8250 /* ;A */, 8250 /* ;B */, 8250 /* ;C */, 8250 /* ;D */, 8250 /* ;E */, 8250 /* ;F */, 8250 /* ;G */, 8250 /* ;H */, 8250 /* ;I */, 8250 /* ;J */, 8250 /* ;K */, 8250 /* ;L */, 8250 /* ;M */, 8250 /* ;N */, 8250 /* ;O */, 8250 /* ;P */, 8250 /* ;Q */, 8250 /* ;R */, 8250 /* ;S */, 7250 /* ;T */, 8250 /* ;U */, 7500 /* ;V */, 8250 /* ;W */, 7875 /* ;X */, 7000 /* ;Y */, 8250 /* ;Z */, 8250 /* ;[ */, 8250 /* ;\ */, 8250 /* ;] */, 8250 /* ;^ */, 8250 /* ;_ */, 8250 /* ;` */, 8250 /* ;a */, 8250 /* ;b */, 8250 /* ;c */, 8250 /* ;d */, 8250 /* ;e */, 8250 /* ;f */, 8250 /* ;g */, 8250 /* ;h */, 8250 /* ;i */, 8250 /* ;j */, 8250 /* ;k */, 8250 /* ;l */, 8250 /* ;m */, 8250 /* ;n */, 8250 /* ;o */, 8250 /* ;p */, 8250 /* ;q */, 8250 /* ;r */, 8250 /* ;s */, 8250 /* ;t */, 8250 /* ;u */, 8250 /* ;v */, 8250 /* ;w */, 8250 /* ;x */, 8250 /* ;y */, 8250 /* ;z */, 8250 /* ;{ */, 8250 /* ;| */, 8250 /* ;} */, 8250 /* ;~ */}, + {15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* < */, 15000 /* */, 15000 /* */, 15000 /* =? */, 15000 /* =@ */, 15000 /* =A */, 15000 /* =B */, 15000 /* =C */, 15000 /* =D */, 15000 /* =E */, 15000 /* =F */, 15000 /* =G */, 15000 /* =H */, 15000 /* =I */, 15000 /* =J */, 15000 /* =K */, 15000 /* =L */, 15000 /* =M */, 15000 /* =N */, 15000 /* =O */, 15000 /* =P */, 15000 /* =Q */, 15000 /* =R */, 15000 /* =S */, 15000 /* =T */, 15000 /* =U */, 15000 /* =V */, 15000 /* =W */, 15000 /* =X */, 15000 /* =Y */, 15000 /* =Z */, 15000 /* =[ */, 15000 /* =\ */, 15000 /* =] */, 15000 /* =^ */, 15000 /* =_ */, 15000 /* =` */, 15000 /* =a */, 15000 /* =b */, 15000 /* =c */, 15000 /* =d */, 15000 /* =e */, 15000 /* =f */, 15000 /* =g */, 15000 /* =h */, 15000 /* =i */, 15000 /* =j */, 15000 /* =k */, 15000 /* =l */, 15000 /* =m */, 15000 /* =n */, 15000 /* =o */, 15000 /* =p */, 15000 /* =q */, 15000 /* =r */, 15000 /* =s */, 15000 /* =t */, 15000 /* =u */, 15000 /* =v */, 15000 /* =w */, 15000 /* =x */, 15000 /* =y */, 15000 /* =z */, 15000 /* ={ */, 15000 /* =| */, 15000 /* =} */, 15000 /* =~ */}, + {15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* > */, 15000 /* >! */, 15000 /* >" */, 15000 /* ># */, 15000 /* >$ */, 15000 /* >% */, 15000 /* >& */, 15000 /* >' */, 15000 /* >( */, 15000 /* >) */, 15000 /* >* */, 15000 /* >+ */, 15000 /* >, */, 15000 /* >- */, 15000 /* >. */, 15000 /* >/ */, 15000 /* >0 */, 15000 /* >1 */, 15000 /* >2 */, 15000 /* >3 */, 15000 /* >4 */, 15000 /* >5 */, 15000 /* >6 */, 15000 /* >7 */, 15000 /* >8 */, 15000 /* >9 */, 15000 /* >: */, 15000 /* >; */, 15000 /* >< */, 15000 /* >= */, 15000 /* >> */, 15000 /* >? */, 15000 /* >@ */, 15000 /* >A */, 15000 /* >B */, 15000 /* >C */, 15000 /* >D */, 15000 /* >E */, 15000 /* >F */, 15000 /* >G */, 15000 /* >H */, 15000 /* >I */, 15000 /* >J */, 15000 /* >K */, 15000 /* >L */, 15000 /* >M */, 15000 /* >N */, 15000 /* >O */, 15000 /* >P */, 15000 /* >Q */, 15000 /* >R */, 15000 /* >S */, 15000 /* >T */, 15000 /* >U */, 15000 /* >V */, 15000 /* >W */, 15000 /* >X */, 15000 /* >Y */, 15000 /* >Z */, 15000 /* >[ */, 15000 /* >\ */, 15000 /* >] */, 15000 /* >^ */, 15000 /* >_ */, 15000 /* >` */, 15000 /* >a */, 15000 /* >b */, 15000 /* >c */, 15000 /* >d */, 15000 /* >e */, 15000 /* >f */, 15000 /* >g */, 15000 /* >h */, 15000 /* >i */, 15000 /* >j */, 15000 /* >k */, 15000 /* >l */, 15000 /* >m */, 15000 /* >n */, 15000 /* >o */, 15000 /* >p */, 15000 /* >q */, 15000 /* >r */, 15000 /* >s */, 15000 /* >t */, 15000 /* >u */, 15000 /* >v */, 15000 /* >w */, 15000 /* >x */, 15000 /* >y */, 15000 /* >z */, 15000 /* >{ */, 15000 /* >| */, 15000 /* >} */, 15000 /* >~ */}, + {12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ? */, 12500 /* ?! */, 12500 /* ?" */, 12500 /* ?# */, 12500 /* ?$ */, 12500 /* ?% */, 12500 /* ?& */, 12500 /* ?' */, 12500 /* ?( */, 12500 /* ?) */, 12500 /* ?* */, 12500 /* ?+ */, 9500 /* ?, */, 12500 /* ?- */, 9500 /* ?. */, 12500 /* ?/ */, 12500 /* ?0 */, 12500 /* ?1 */, 12500 /* ?2 */, 12500 /* ?3 */, 12500 /* ?4 */, 12500 /* ?5 */, 12500 /* ?6 */, 12500 /* ?7 */, 12500 /* ?8 */, 12500 /* ?9 */, 12500 /* ?: */, 12500 /* ?; */, 12500 /* ?< */, 12500 /* ?= */, 12500 /* ?> */, 12500 /* ?? */, 12500 /* ?@ */, 11750 /* ?A */, 12500 /* ?B */, 12500 /* ?C */, 12500 /* ?D */, 12500 /* ?E */, 12500 /* ?F */, 12500 /* ?G */, 12500 /* ?H */, 12500 /* ?I */, 12500 /* ?J */, 12500 /* ?K */, 12500 /* ?L */, 12500 /* ?M */, 12500 /* ?N */, 12500 /* ?O */, 12500 /* ?P */, 12500 /* ?Q */, 12500 /* ?R */, 12500 /* ?S */, 12500 /* ?T */, 12500 /* ?U */, 12750 /* ?V */, 12875 /* ?W */, 12500 /* ?X */, 12500 /* ?Y */, 12500 /* ?Z */, 12500 /* ?[ */, 12500 /* ?\ */, 12500 /* ?] */, 12500 /* ?^ */, 12500 /* ?_ */, 12500 /* ?` */, 12500 /* ?a */, 12500 /* ?b */, 12500 /* ?c */, 12500 /* ?d */, 12500 /* ?e */, 12500 /* ?f */, 12500 /* ?g */, 12500 /* ?h */, 12500 /* ?i */, 12500 /* ?j */, 12500 /* ?k */, 12500 /* ?l */, 12500 /* ?m */, 12500 /* ?n */, 12500 /* ?o */, 12500 /* ?p */, 12500 /* ?q */, 12500 /* ?r */, 12500 /* ?s */, 12500 /* ?t */, 12500 /* ?u */, 12500 /* ?v */, 12500 /* ?w */, 12500 /* ?x */, 12500 /* ?y */, 12500 /* ?z */, 12500 /* ?{ */, 12500 /* ?| */, 12500 /* ?} */, 12500 /* ?~ */}, + {22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @ */, 22575 /* @! */, 22575 /* @" */, 22575 /* @# */, 22575 /* @$ */, 22575 /* @% */, 22575 /* @& */, 22575 /* @' */, 22575 /* @( */, 22575 /* @) */, 22575 /* @* */, 22575 /* @+ */, 22575 /* @, */, 22575 /* @- */, 22575 /* @. */, 22575 /* @/ */, 22575 /* @0 */, 22575 /* @1 */, 22575 /* @2 */, 22575 /* @3 */, 22575 /* @4 */, 22575 /* @5 */, 22575 /* @6 */, 22575 /* @7 */, 22575 /* @8 */, 22575 /* @9 */, 22575 /* @: */, 22575 /* @; */, 22575 /* @< */, 22575 /* @= */, 22575 /* @> */, 22575 /* @? */, 22575 /* @@ */, 22075 /* @A */, 22575 /* @B */, 22575 /* @C */, 22575 /* @D */, 22575 /* @E */, 22575 /* @F */, 22575 /* @G */, 22575 /* @H */, 22075 /* @I */, 21825 /* @J */, 22575 /* @K */, 22575 /* @L */, 22575 /* @M */, 22575 /* @N */, 22575 /* @O */, 22575 /* @P */, 22575 /* @Q */, 22575 /* @R */, 22325 /* @S */, 21825 /* @T */, 22575 /* @U */, 21825 /* @V */, 22075 /* @W */, 21575 /* @X */, 21575 /* @Y */, 22200 /* @Z */, 22575 /* @[ */, 22575 /* @\ */, 22575 /* @] */, 22575 /* @^ */, 22575 /* @_ */, 22575 /* @` */, 22575 /* @a */, 22575 /* @b */, 22575 /* @c */, 22575 /* @d */, 22575 /* @e */, 22575 /* @f */, 22450 /* @g */, 22575 /* @h */, 22575 /* @i */, 22575 /* @j */, 22575 /* @k */, 22575 /* @l */, 22575 /* @m */, 22575 /* @n */, 22575 /* @o */, 22575 /* @p */, 22575 /* @q */, 22575 /* @r */, 22575 /* @s */, 22575 /* @t */, 22575 /* @u */, 22575 /* @v */, 22325 /* @w */, 22075 /* @x */, 22325 /* @y */, 22325 /* @z */, 22575 /* @{ */, 22575 /* @| */, 22575 /* @} */, 22575 /* @~ */}, + {17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A */, 17125 /* A! */, 15875 /* A" */, 17125 /* A# */, 17125 /* A$ */, 17125 /* A% */, 16875 /* A& */, 15875 /* A' */, 17125 /* A( */, 17125 /* A) */, 15875 /* A* */, 17125 /* A+ */, 17625 /* A, */, 16750 /* A- */, 17625 /* A. */, 17125 /* A/ */, 17125 /* A0 */, 17125 /* A1 */, 17125 /* A2 */, 17125 /* A3 */, 17125 /* A4 */, 17125 /* A5 */, 17125 /* A6 */, 17125 /* A7 */, 17125 /* A8 */, 17125 /* A9 */, 17125 /* A: */, 17125 /* A; */, 17125 /* A< */, 17125 /* A= */, 17125 /* A> */, 15875 /* A? */, 16625 /* A@ */, 17375 /* AA */, 17125 /* AB */, 16375 /* AC */, 17125 /* AD */, 17125 /* AE */, 17125 /* AF */, 16375 /* AG */, 17125 /* AH */, 17125 /* AI */, 16775 /* AJ */, 17125 /* AK */, 17125 /* AL */, 17125 /* AM */, 17125 /* AN */, 16375 /* AO */, 17125 /* AP */, 16375 /* AQ */, 17125 /* AR */, 17125 /* AS */, 15500 /* AT */, 16525 /* AU */, 15775 /* AV */, 16450 /* AW */, 17250 /* AX */, 15375 /* AY */, 17125 /* AZ */, 17125 /* A[ */, 17125 /* A\ */, 17125 /* A] */, 17125 /* A^ */, 17500 /* A_ */, 17125 /* A` */, 17200 /* Aa */, 17125 /* Ab */, 17125 /* Ac */, 17125 /* Ad */, 17125 /* Ae */, 17125 /* Af */, 17125 /* Ag */, 17125 /* Ah */, 17125 /* Ai */, 17125 /* Aj */, 17125 /* Ak */, 17125 /* Al */, 17125 /* Am */, 17125 /* An */, 17125 /* Ao */, 17125 /* Ap */, 17125 /* Aq */, 17125 /* Ar */, 17125 /* As */, 16650 /* At */, 17125 /* Au */, 16375 /* Av */, 16625 /* Aw */, 17125 /* Ax */, 16750 /* Ay */, 17375 /* Az */, 17125 /* A{ */, 17125 /* A| */, 17125 /* A} */, 17125 /* A~ */}, + {16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B */, 16675 /* B! */, 16675 /* B" */, 16675 /* B# */, 16675 /* B$ */, 16675 /* B% */, 16675 /* B& */, 16675 /* B' */, 16675 /* B( */, 16675 /* B) */, 16675 /* B* */, 16675 /* B+ */, 16300 /* B, */, 16675 /* B- */, 16300 /* B. */, 16050 /* B/ */, 16675 /* B0 */, 16675 /* B1 */, 16675 /* B2 */, 16675 /* B3 */, 16675 /* B4 */, 16675 /* B5 */, 16675 /* B6 */, 16675 /* B7 */, 16675 /* B8 */, 16675 /* B9 */, 16675 /* B: */, 16675 /* B; */, 16675 /* B< */, 16675 /* B= */, 16675 /* B> */, 16675 /* B? */, 16675 /* B@ */, 16300 /* BA */, 16675 /* BB */, 16675 /* BC */, 16675 /* BD */, 16675 /* BE */, 16675 /* BF */, 16675 /* BG */, 16675 /* BH */, 16675 /* BI */, 16425 /* BJ */, 16675 /* BK */, 16675 /* BL */, 16675 /* BM */, 16675 /* BN */, 16675 /* BO */, 16675 /* BP */, 16675 /* BQ */, 16675 /* BR */, 16675 /* BS */, 16025 /* BT */, 16675 /* BU */, 15975 /* BV */, 16475 /* BW */, 16150 /* BX */, 15600 /* BY */, 16675 /* BZ */, 16675 /* B[ */, 16675 /* B\ */, 16675 /* B] */, 16675 /* B^ */, 15075 /* B_ */, 16675 /* B` */, 16675 /* Ba */, 16675 /* Bb */, 16675 /* Bc */, 16675 /* Bd */, 16675 /* Be */, 16625 /* Bf */, 16600 /* Bg */, 16675 /* Bh */, 16675 /* Bi */, 16675 /* Bj */, 16675 /* Bk */, 16675 /* Bl */, 16675 /* Bm */, 16675 /* Bn */, 16675 /* Bo */, 16675 /* Bp */, 16675 /* Bq */, 16675 /* Br */, 16675 /* Bs */, 16550 /* Bt */, 16675 /* Bu */, 16475 /* Bv */, 16500 /* Bw */, 16375 /* Bx */, 16475 /* By */, 16675 /* Bz */, 16675 /* B{ */, 16675 /* B| */, 16675 /* B} */, 16675 /* B~ */}, + {16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C */, 16275 /* C! */, 16275 /* C" */, 16275 /* C# */, 16275 /* C$ */, 16275 /* C% */, 16025 /* C& */, 16275 /* C' */, 16275 /* C( */, 16275 /* C) */, 16275 /* C* */, 16275 /* C+ */, 16275 /* C, */, 16025 /* C- */, 16275 /* C. */, 15775 /* C/ */, 16275 /* C0 */, 16275 /* C1 */, 16275 /* C2 */, 16275 /* C3 */, 16275 /* C4 */, 16275 /* C5 */, 16275 /* C6 */, 16275 /* C7 */, 16275 /* C8 */, 16275 /* C9 */, 16275 /* C: */, 16275 /* C; */, 16275 /* C< */, 16275 /* C= */, 16275 /* C> */, 16275 /* C? */, 16025 /* C@ */, 16075 /* CA */, 16275 /* CB */, 15950 /* CC */, 16275 /* CD */, 16275 /* CE */, 16275 /* CF */, 15950 /* CG */, 16275 /* CH */, 16275 /* CI */, 16275 /* CJ */, 16275 /* CK */, 16275 /* CL */, 16275 /* CM */, 16275 /* CN */, 15950 /* CO */, 16275 /* CP */, 15950 /* CQ */, 16275 /* CR */, 16275 /* CS */, 16275 /* CT */, 16275 /* CU */, 16275 /* CV */, 16275 /* CW */, 15775 /* CX */, 16025 /* CY */, 16275 /* CZ */, 16275 /* C[ */, 16275 /* C\ */, 16275 /* C] */, 16275 /* C^ */, 15175 /* C_ */, 16275 /* C` */, 16275 /* Ca */, 16275 /* Cb */, 16275 /* Cc */, 16275 /* Cd */, 16275 /* Ce */, 16275 /* Cf */, 16275 /* Cg */, 16275 /* Ch */, 16275 /* Ci */, 16275 /* Cj */, 16275 /* Ck */, 16275 /* Cl */, 16275 /* Cm */, 16275 /* Cn */, 16275 /* Co */, 16275 /* Cp */, 16275 /* Cq */, 16275 /* Cr */, 16275 /* Cs */, 16275 /* Ct */, 16275 /* Cu */, 16275 /* Cv */, 16275 /* Cw */, 16275 /* Cx */, 16275 /* Cy */, 16275 /* Cz */, 16275 /* C{ */, 16275 /* C| */, 16275 /* C} */, 16275 /* C~ */}, + {17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D */, 17425 /* D! */, 17175 /* D" */, 17425 /* D# */, 17425 /* D$ */, 17425 /* D% */, 17425 /* D& */, 17175 /* D' */, 17425 /* D( */, 17425 /* D) */, 17175 /* D* */, 17425 /* D+ */, 16425 /* D, */, 17800 /* D- */, 16425 /* D. */, 16300 /* D/ */, 17425 /* D0 */, 17425 /* D1 */, 17425 /* D2 */, 17425 /* D3 */, 17425 /* D4 */, 17425 /* D5 */, 17425 /* D6 */, 17425 /* D7 */, 17425 /* D8 */, 17425 /* D9 */, 17425 /* D: */, 17425 /* D; */, 17425 /* D< */, 17425 /* D= */, 17425 /* D> */, 17425 /* D? */, 17425 /* D@ */, 16500 /* DA */, 17425 /* DB */, 17425 /* DC */, 17425 /* DD */, 17425 /* DE */, 17425 /* DF */, 17425 /* DG */, 17425 /* DH */, 17425 /* DI */, 16675 /* DJ */, 17425 /* DK */, 17425 /* DL */, 17425 /* DM */, 17425 /* DN */, 17425 /* DO */, 17425 /* DP */, 17425 /* DQ */, 17425 /* DR */, 17425 /* DS */, 16800 /* DT */, 17425 /* DU */, 16650 /* DV */, 16850 /* DW */, 16675 /* DX */, 16275 /* DY */, 16800 /* DZ */, 17425 /* D[ */, 17425 /* D\ */, 17425 /* D] */, 17425 /* D^ */, 17425 /* D_ */, 17425 /* D` */, 17425 /* Da */, 17425 /* Db */, 17425 /* Dc */, 17425 /* Dd */, 17425 /* De */, 17425 /* Df */, 17425 /* Dg */, 17425 /* Dh */, 17425 /* Di */, 17425 /* Dj */, 17425 /* Dk */, 17425 /* Dl */, 17425 /* Dm */, 17425 /* Dn */, 17425 /* Do */, 17425 /* Dp */, 17425 /* Dq */, 17425 /* Dr */, 17425 /* Ds */, 17425 /* Dt */, 17425 /* Du */, 17425 /* Dv */, 17425 /* Dw */, 17425 /* Dx */, 17425 /* Dy */, 17425 /* Dz */, 17425 /* D{ */, 17425 /* D| */, 17425 /* D} */, 17425 /* D~ */}, + {15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E */, 15175 /* E! */, 15175 /* E" */, 15175 /* E# */, 15175 /* E$ */, 15175 /* E% */, 14800 /* E& */, 15175 /* E' */, 15175 /* E( */, 15175 /* E) */, 15175 /* E* */, 15175 /* E+ */, 15175 /* E, */, 15175 /* E- */, 15175 /* E. */, 15175 /* E/ */, 15175 /* E0 */, 15175 /* E1 */, 15175 /* E2 */, 15175 /* E3 */, 15175 /* E4 */, 15175 /* E5 */, 15175 /* E6 */, 15175 /* E7 */, 15175 /* E8 */, 15175 /* E9 */, 15175 /* E: */, 15175 /* E; */, 15175 /* E< */, 15175 /* E= */, 15175 /* E> */, 15175 /* E? */, 14925 /* E@ */, 15175 /* EA */, 15175 /* EB */, 15175 /* EC */, 15175 /* ED */, 15175 /* EE */, 15175 /* EF */, 15175 /* EG */, 15175 /* EH */, 15175 /* EI */, 14675 /* EJ */, 15175 /* EK */, 15175 /* EL */, 15175 /* EM */, 15175 /* EN */, 15175 /* EO */, 15175 /* EP */, 15175 /* EQ */, 15175 /* ER */, 15175 /* ES */, 15175 /* ET */, 15175 /* EU */, 15175 /* EV */, 15175 /* EW */, 15175 /* EX */, 15175 /* EY */, 15175 /* EZ */, 15175 /* E[ */, 15175 /* E\ */, 15175 /* E] */, 15175 /* E^ */, 15175 /* E_ */, 15175 /* E` */, 15175 /* Ea */, 15175 /* Eb */, 14925 /* Ec */, 14925 /* Ed */, 14925 /* Ee */, 15175 /* Ef */, 15050 /* Eg */, 15175 /* Eh */, 15175 /* Ei */, 15175 /* Ej */, 15175 /* Ek */, 15175 /* El */, 15175 /* Em */, 15175 /* En */, 14925 /* Eo */, 15175 /* Ep */, 14925 /* Eq */, 15175 /* Er */, 15175 /* Es */, 15175 /* Et */, 15175 /* Eu */, 15175 /* Ev */, 15175 /* Ew */, 15175 /* Ex */, 15175 /* Ey */, 15175 /* Ez */, 15175 /* E{ */, 15175 /* E| */, 15175 /* E} */, 15175 /* E~ */}, + {14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F */, 14625 /* F! */, 14750 /* F" */, 14625 /* F# */, 14625 /* F$ */, 14625 /* F% */, 13625 /* F& */, 14750 /* F' */, 14625 /* F( */, 15000 /* F) */, 14750 /* F* */, 14625 /* F+ */, 12125 /* F, */, 14625 /* F- */, 12125 /* F. */, 12500 /* F/ */, 14625 /* F0 */, 14625 /* F1 */, 14625 /* F2 */, 14625 /* F3 */, 14625 /* F4 */, 14625 /* F5 */, 14625 /* F6 */, 14625 /* F7 */, 14625 /* F8 */, 14625 /* F9 */, 14625 /* F: */, 14625 /* F; */, 14625 /* F< */, 14625 /* F= */, 14625 /* F> */, 14625 /* F? */, 13875 /* F@ */, 12450 /* FA */, 14625 /* FB */, 13875 /* FC */, 14625 /* FD */, 14625 /* FE */, 14625 /* FF */, 13875 /* FG */, 14625 /* FH */, 14625 /* FI */, 12750 /* FJ */, 14625 /* FK */, 14625 /* FL */, 14625 /* FM */, 14625 /* FN */, 13875 /* FO */, 14625 /* FP */, 13875 /* FQ */, 14625 /* FR */, 14625 /* FS */, 14625 /* FT */, 14625 /* FU */, 14625 /* FV */, 14625 /* FW */, 14625 /* FX */, 14625 /* FY */, 14625 /* FZ */, 14625 /* F[ */, 14625 /* F\ */, 14625 /* F] */, 14625 /* F^ */, 11125 /* F_ */, 14625 /* F` */, 13625 /* Fa */, 14625 /* Fb */, 13825 /* Fc */, 13775 /* Fd */, 13825 /* Fe */, 14450 /* Ff */, 13525 /* Fg */, 14625 /* Fh */, 14625 /* Fi */, 14375 /* Fj */, 14625 /* Fk */, 14625 /* Fl */, 13875 /* Fm */, 13875 /* Fn */, 13825 /* Fo */, 13875 /* Fp */, 13775 /* Fq */, 13875 /* Fr */, 13625 /* Fs */, 14250 /* Ft */, 14000 /* Fu */, 14125 /* Fv */, 14250 /* Fw */, 13625 /* Fx */, 14125 /* Fy */, 14000 /* Fz */, 14625 /* F{ */, 14625 /* F| */, 14625 /* F} */, 14625 /* F~ */}, + {17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G */, 17975 /* G! */, 17725 /* G" */, 17975 /* G# */, 17975 /* G$ */, 17975 /* G% */, 17975 /* G& */, 17725 /* G' */, 17975 /* G( */, 17975 /* G) */, 17725 /* G* */, 17975 /* G+ */, 17975 /* G, */, 18225 /* G- */, 17975 /* G. */, 17975 /* G/ */, 17975 /* G0 */, 17975 /* G1 */, 17975 /* G2 */, 17975 /* G3 */, 17975 /* G4 */, 17975 /* G5 */, 17975 /* G6 */, 17975 /* G7 */, 17975 /* G8 */, 17975 /* G9 */, 17975 /* G: */, 17975 /* G; */, 17975 /* G< */, 17975 /* G= */, 17975 /* G> */, 17975 /* G? */, 17975 /* G@ */, 17975 /* GA */, 17975 /* GB */, 17975 /* GC */, 17975 /* GD */, 17975 /* GE */, 17975 /* GF */, 17975 /* GG */, 17975 /* GH */, 17975 /* GI */, 17675 /* GJ */, 17975 /* GK */, 17975 /* GL */, 17975 /* GM */, 17975 /* GN */, 17975 /* GO */, 17975 /* GP */, 17975 /* GQ */, 17975 /* GR */, 17975 /* GS */, 17025 /* GT */, 17975 /* GU */, 17225 /* GV */, 17650 /* GW */, 17725 /* GX */, 16875 /* GY */, 17975 /* GZ */, 17975 /* G[ */, 17975 /* G\ */, 17975 /* G] */, 17975 /* G^ */, 17975 /* G_ */, 17975 /* G` */, 17975 /* Ga */, 17975 /* Gb */, 17975 /* Gc */, 17975 /* Gd */, 17975 /* Ge */, 17975 /* Gf */, 17975 /* Gg */, 17975 /* Gh */, 17975 /* Gi */, 17975 /* Gj */, 17975 /* Gk */, 17975 /* Gl */, 17975 /* Gm */, 17975 /* Gn */, 17975 /* Go */, 17975 /* Gp */, 17975 /* Gq */, 17975 /* Gr */, 17975 /* Gs */, 17975 /* Gt */, 17975 /* Gu */, 17975 /* Gv */, 17975 /* Gw */, 17975 /* Gx */, 17975 /* Gy */, 17975 /* Gz */, 17975 /* G{ */, 17975 /* G| */, 17975 /* G} */, 17975 /* G~ */}, + {18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H */, 18100 /* H! */, 18100 /* H" */, 18100 /* H# */, 18100 /* H$ */, 18100 /* H% */, 18100 /* H& */, 18100 /* H' */, 18100 /* H( */, 18100 /* H) */, 18100 /* H* */, 18100 /* H+ */, 18100 /* H, */, 18100 /* H- */, 18100 /* H. */, 18100 /* H/ */, 18100 /* H0 */, 18100 /* H1 */, 18100 /* H2 */, 18100 /* H3 */, 18100 /* H4 */, 18100 /* H5 */, 18100 /* H6 */, 18100 /* H7 */, 18100 /* H8 */, 18100 /* H9 */, 18100 /* H: */, 18100 /* H; */, 18100 /* H< */, 18100 /* H= */, 18100 /* H> */, 18100 /* H? */, 18100 /* H@ */, 18100 /* HA */, 18100 /* HB */, 18100 /* HC */, 18100 /* HD */, 18100 /* HE */, 18100 /* HF */, 18100 /* HG */, 18100 /* HH */, 18100 /* HI */, 18100 /* HJ */, 18100 /* HK */, 18100 /* HL */, 18100 /* HM */, 18100 /* HN */, 18100 /* HO */, 18100 /* HP */, 18100 /* HQ */, 18100 /* HR */, 18100 /* HS */, 18100 /* HT */, 18100 /* HU */, 18100 /* HV */, 18100 /* HW */, 18100 /* HX */, 18100 /* HY */, 18100 /* HZ */, 18100 /* H[ */, 18100 /* H\ */, 18100 /* H] */, 18100 /* H^ */, 18100 /* H_ */, 18100 /* H` */, 18100 /* Ha */, 18100 /* Hb */, 18100 /* Hc */, 18100 /* Hd */, 18100 /* He */, 18100 /* Hf */, 18100 /* Hg */, 18100 /* Hh */, 18100 /* Hi */, 18100 /* Hj */, 18100 /* Hk */, 18100 /* Hl */, 18100 /* Hm */, 18100 /* Hn */, 18100 /* Ho */, 18100 /* Hp */, 18100 /* Hq */, 18100 /* Hr */, 18100 /* Hs */, 18100 /* Ht */, 18100 /* Hu */, 18100 /* Hv */, 18100 /* Hw */, 18100 /* Hx */, 18100 /* Hy */, 18100 /* Hz */, 18100 /* H{ */, 18100 /* H| */, 18100 /* H} */, 18100 /* H~ */}, + {10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I */, 10800 /* I! */, 10800 /* I" */, 10800 /* I# */, 10800 /* I$ */, 10800 /* I% */, 10800 /* I& */, 10800 /* I' */, 10800 /* I( */, 10800 /* I) */, 10800 /* I* */, 10800 /* I+ */, 10800 /* I, */, 10050 /* I- */, 10800 /* I. */, 10800 /* I/ */, 10800 /* I0 */, 10800 /* I1 */, 10800 /* I2 */, 10800 /* I3 */, 10800 /* I4 */, 10800 /* I5 */, 10800 /* I6 */, 10800 /* I7 */, 10800 /* I8 */, 10800 /* I9 */, 10800 /* I: */, 10800 /* I; */, 10800 /* I< */, 10800 /* I= */, 10800 /* I> */, 10800 /* I? */, 10300 /* I@ */, 10800 /* IA */, 10800 /* IB */, 10800 /* IC */, 10800 /* ID */, 10800 /* IE */, 10800 /* IF */, 10800 /* IG */, 10800 /* IH */, 10800 /* II */, 10800 /* IJ */, 10800 /* IK */, 10800 /* IL */, 10800 /* IM */, 10800 /* IN */, 10800 /* IO */, 10800 /* IP */, 10800 /* IQ */, 10800 /* IR */, 10800 /* IS */, 10700 /* IT */, 10800 /* IU */, 10800 /* IV */, 10800 /* IW */, 10800 /* IX */, 10800 /* IY */, 10800 /* IZ */, 10800 /* I[ */, 10800 /* I\ */, 10800 /* I] */, 10800 /* I^ */, 10800 /* I_ */, 10800 /* I` */, 10800 /* Ia */, 10800 /* Ib */, 10675 /* Ic */, 10675 /* Id */, 10675 /* Ie */, 10800 /* If */, 10800 /* Ig */, 10800 /* Ih */, 10800 /* Ii */, 10800 /* Ij */, 10800 /* Ik */, 10800 /* Il */, 10800 /* Im */, 10800 /* In */, 10675 /* Io */, 10800 /* Ip */, 10675 /* Iq */, 10800 /* Ir */, 10675 /* Is */, 10800 /* It */, 10800 /* Iu */, 10675 /* Iv */, 10800 /* Iw */, 10800 /* Ix */, 10675 /* Iy */, 10800 /* Iz */, 10800 /* I{ */, 10800 /* I| */, 10800 /* I} */, 10800 /* I~ */}, + {13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J */, 13975 /* J! */, 13975 /* J" */, 13975 /* J# */, 13975 /* J$ */, 13975 /* J% */, 13975 /* J& */, 13975 /* J' */, 13975 /* J( */, 13975 /* J) */, 13975 /* J* */, 13975 /* J+ */, 13350 /* J, */, 13975 /* J- */, 13350 /* J. */, 13100 /* J/ */, 13975 /* J0 */, 13975 /* J1 */, 13975 /* J2 */, 13975 /* J3 */, 13975 /* J4 */, 13975 /* J5 */, 13975 /* J6 */, 13975 /* J7 */, 13975 /* J8 */, 13975 /* J9 */, 13975 /* J: */, 13975 /* J; */, 13975 /* J< */, 13975 /* J= */, 13975 /* J> */, 13975 /* J? */, 13975 /* J@ */, 13450 /* JA */, 13975 /* JB */, 13975 /* JC */, 13975 /* JD */, 13975 /* JE */, 13975 /* JF */, 13975 /* JG */, 13975 /* JH */, 13975 /* JI */, 13675 /* JJ */, 13975 /* JK */, 13975 /* JL */, 13975 /* JM */, 13975 /* JN */, 13975 /* JO */, 13975 /* JP */, 13975 /* JQ */, 13975 /* JR */, 13975 /* JS */, 13975 /* JT */, 13975 /* JU */, 13975 /* JV */, 13975 /* JW */, 13600 /* JX */, 13975 /* JY */, 13975 /* JZ */, 13975 /* J[ */, 13975 /* J\ */, 13975 /* J] */, 13975 /* J^ */, 11850 /* J_ */, 13975 /* J` */, 13975 /* Ja */, 13975 /* Jb */, 13975 /* Jc */, 13975 /* Jd */, 13975 /* Je */, 13975 /* Jf */, 13975 /* Jg */, 13975 /* Jh */, 13975 /* Ji */, 13975 /* Jj */, 13975 /* Jk */, 13975 /* Jl */, 13975 /* Jm */, 13975 /* Jn */, 13975 /* Jo */, 13975 /* Jp */, 13975 /* Jq */, 13975 /* Jr */, 13975 /* Js */, 13975 /* Jt */, 13975 /* Ju */, 13975 /* Jv */, 13975 /* Jw */, 13975 /* Jx */, 13975 /* Jy */, 13975 /* Jz */, 13975 /* J{ */, 13975 /* J| */, 13975 /* J} */, 13975 /* J~ */}, + {17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K */, 17400 /* K! */, 17150 /* K" */, 17400 /* K# */, 17400 /* K$ */, 17400 /* K% */, 16650 /* K& */, 17150 /* K' */, 17400 /* K( */, 17650 /* K) */, 17150 /* K* */, 17400 /* K+ */, 17650 /* K, */, 16275 /* K- */, 17650 /* K. */, 17400 /* K/ */, 17400 /* K0 */, 17400 /* K1 */, 17400 /* K2 */, 17400 /* K3 */, 17400 /* K4 */, 17400 /* K5 */, 17400 /* K6 */, 17400 /* K7 */, 17400 /* K8 */, 17400 /* K9 */, 17400 /* K: */, 17400 /* K; */, 17400 /* K< */, 17400 /* K= */, 17400 /* K> */, 17400 /* K? */, 16400 /* K@ */, 17400 /* KA */, 17400 /* KB */, 16400 /* KC */, 17400 /* KD */, 17400 /* KE */, 17400 /* KF */, 16400 /* KG */, 17400 /* KH */, 17400 /* KI */, 16900 /* KJ */, 17400 /* KK */, 17400 /* KL */, 17400 /* KM */, 17400 /* KN */, 16400 /* KO */, 17400 /* KP */, 16400 /* KQ */, 17400 /* KR */, 17150 /* KS */, 16775 /* KT */, 17400 /* KU */, 16775 /* KV */, 17150 /* KW */, 17400 /* KX */, 16900 /* KY */, 17400 /* KZ */, 17400 /* K[ */, 17400 /* K\ */, 17400 /* K] */, 17400 /* K^ */, 17400 /* K_ */, 17400 /* K` */, 17150 /* Ka */, 17400 /* Kb */, 16775 /* Kc */, 16775 /* Kd */, 16775 /* Ke */, 16900 /* Kf */, 17150 /* Kg */, 17400 /* Kh */, 17400 /* Ki */, 17400 /* Kj */, 17400 /* Kk */, 17150 /* Kl */, 17150 /* Km */, 17150 /* Kn */, 16775 /* Ko */, 17150 /* Kp */, 16775 /* Kq */, 17150 /* Kr */, 17400 /* Ks */, 16650 /* Kt */, 16900 /* Ku */, 16900 /* Kv */, 16900 /* Kw */, 17275 /* Kx */, 16900 /* Ky */, 17650 /* Kz */, 17400 /* K{ */, 17400 /* K| */, 17400 /* K} */, 17400 /* K~ */}, + {13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L */, 13250 /* L! */, 11000 /* L" */, 13250 /* L# */, 13250 /* L$ */, 13250 /* L% */, 13125 /* L& */, 11000 /* L' */, 13250 /* L( */, 13250 /* L) */, 11000 /* L* */, 13250 /* L+ */, 13750 /* L, */, 12375 /* L- */, 13750 /* L. */, 13500 /* L/ */, 13250 /* L0 */, 13250 /* L1 */, 13250 /* L2 */, 13250 /* L3 */, 13250 /* L4 */, 13250 /* L5 */, 13250 /* L6 */, 13250 /* L7 */, 13250 /* L8 */, 13250 /* L9 */, 13250 /* L: */, 13250 /* L; */, 13250 /* L< */, 13250 /* L= */, 13250 /* L> */, 11750 /* L? */, 13000 /* L@ */, 13675 /* LA */, 13250 /* LB */, 12250 /* LC */, 13250 /* LD */, 13250 /* LE */, 13250 /* LF */, 12250 /* LG */, 13250 /* LH */, 13250 /* LI */, 13250 /* LJ */, 13250 /* LK */, 13250 /* LL */, 13250 /* LM */, 13250 /* LN */, 12250 /* LO */, 13250 /* LP */, 12250 /* LQ */, 13250 /* LR */, 13250 /* LS */, 11400 /* LT */, 12425 /* LU */, 11450 /* LV */, 12225 /* LW */, 13250 /* LX */, 11025 /* LY */, 13250 /* LZ */, 13250 /* L[ */, 13250 /* L\ */, 13250 /* L] */, 13250 /* L^ */, 13750 /* L_ */, 13250 /* L` */, 13350 /* La */, 13250 /* Lb */, 13000 /* Lc */, 13025 /* Ld */, 13000 /* Le */, 13250 /* Lf */, 13250 /* Lg */, 13250 /* Lh */, 13250 /* Li */, 13250 /* Lj */, 13250 /* Lk */, 13250 /* Ll */, 13250 /* Lm */, 13250 /* Ln */, 13000 /* Lo */, 13250 /* Lp */, 13025 /* Lq */, 13250 /* Lr */, 13250 /* Ls */, 12750 /* Lt */, 13125 /* Lu */, 12000 /* Lv */, 12500 /* Lw */, 13500 /* Lx */, 12375 /* Ly */, 13525 /* Lz */, 13250 /* L{ */, 13250 /* L| */, 13250 /* L} */, 13250 /* L~ */}, + {20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M */, 20475 /* M! */, 20475 /* M" */, 20475 /* M# */, 20475 /* M$ */, 20475 /* M% */, 20475 /* M& */, 20475 /* M' */, 20475 /* M( */, 20475 /* M) */, 20475 /* M* */, 20475 /* M+ */, 20475 /* M, */, 20475 /* M- */, 20475 /* M. */, 20475 /* M/ */, 20475 /* M0 */, 20475 /* M1 */, 20475 /* M2 */, 20475 /* M3 */, 20475 /* M4 */, 20475 /* M5 */, 20475 /* M6 */, 20475 /* M7 */, 20475 /* M8 */, 20475 /* M9 */, 20475 /* M: */, 20475 /* M; */, 20475 /* M< */, 20475 /* M= */, 20475 /* M> */, 20475 /* M? */, 20475 /* M@ */, 20475 /* MA */, 20475 /* MB */, 20475 /* MC */, 20475 /* MD */, 20475 /* ME */, 20475 /* MF */, 20475 /* MG */, 20475 /* MH */, 20475 /* MI */, 20475 /* MJ */, 20475 /* MK */, 20475 /* ML */, 20475 /* MM */, 20475 /* MN */, 20475 /* MO */, 20475 /* MP */, 20475 /* MQ */, 20475 /* MR */, 20475 /* MS */, 20475 /* MT */, 20475 /* MU */, 20475 /* MV */, 20475 /* MW */, 20475 /* MX */, 20475 /* MY */, 20475 /* MZ */, 20475 /* M[ */, 20475 /* M\ */, 20475 /* M] */, 20475 /* M^ */, 20475 /* M_ */, 20475 /* M` */, 20475 /* Ma */, 20475 /* Mb */, 20475 /* Mc */, 20475 /* Md */, 20475 /* Me */, 20475 /* Mf */, 20475 /* Mg */, 20475 /* Mh */, 20475 /* Mi */, 20475 /* Mj */, 20475 /* Mk */, 20475 /* Ml */, 20475 /* Mm */, 20475 /* Mn */, 20475 /* Mo */, 20475 /* Mp */, 20475 /* Mq */, 20475 /* Mr */, 20475 /* Ms */, 20475 /* Mt */, 20475 /* Mu */, 20475 /* Mv */, 20475 /* Mw */, 20475 /* Mx */, 20475 /* My */, 20475 /* Mz */, 20475 /* M{ */, 20475 /* M| */, 20475 /* M} */, 20475 /* M~ */}, + {18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N */, 18100 /* N! */, 18100 /* N" */, 18100 /* N# */, 18100 /* N$ */, 18100 /* N% */, 18100 /* N& */, 18100 /* N' */, 18100 /* N( */, 18100 /* N) */, 18100 /* N* */, 18100 /* N+ */, 18100 /* N, */, 18100 /* N- */, 18100 /* N. */, 18100 /* N/ */, 18100 /* N0 */, 18100 /* N1 */, 18100 /* N2 */, 18100 /* N3 */, 18100 /* N4 */, 18100 /* N5 */, 18100 /* N6 */, 18100 /* N7 */, 18100 /* N8 */, 18100 /* N9 */, 18100 /* N: */, 18100 /* N; */, 18100 /* N< */, 18100 /* N= */, 18100 /* N> */, 18100 /* N? */, 18100 /* N@ */, 18100 /* NA */, 18100 /* NB */, 18100 /* NC */, 18100 /* ND */, 18100 /* NE */, 18100 /* NF */, 18100 /* NG */, 18100 /* NH */, 18100 /* NI */, 18100 /* NJ */, 18100 /* NK */, 18100 /* NL */, 18100 /* NM */, 18100 /* NN */, 18100 /* NO */, 18100 /* NP */, 18100 /* NQ */, 18100 /* NR */, 18100 /* NS */, 18100 /* NT */, 18100 /* NU */, 18100 /* NV */, 18100 /* NW */, 18100 /* NX */, 18100 /* NY */, 18100 /* NZ */, 18100 /* N[ */, 18100 /* N\ */, 18100 /* N] */, 18100 /* N^ */, 18100 /* N_ */, 18100 /* N` */, 18100 /* Na */, 18100 /* Nb */, 18100 /* Nc */, 18100 /* Nd */, 18100 /* Ne */, 18100 /* Nf */, 18100 /* Ng */, 18100 /* Nh */, 18100 /* Ni */, 18100 /* Nj */, 18100 /* Nk */, 18100 /* Nl */, 18100 /* Nm */, 18100 /* Nn */, 18100 /* No */, 18100 /* Np */, 18100 /* Nq */, 18100 /* Nr */, 18100 /* Ns */, 18100 /* Nt */, 18100 /* Nu */, 18100 /* Nv */, 18100 /* Nw */, 18100 /* Nx */, 18100 /* Ny */, 18100 /* Nz */, 18100 /* N{ */, 18100 /* N| */, 18100 /* N} */, 18100 /* N~ */}, + {17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O */, 17850 /* O! */, 17600 /* O" */, 17850 /* O# */, 17850 /* O$ */, 17850 /* O% */, 17725 /* O& */, 17600 /* O' */, 17850 /* O( */, 17850 /* O) */, 17600 /* O* */, 17850 /* O+ */, 16850 /* O, */, 18225 /* O- */, 16850 /* O. */, 16725 /* O/ */, 17850 /* O0 */, 17850 /* O1 */, 17850 /* O2 */, 17850 /* O3 */, 17850 /* O4 */, 17850 /* O5 */, 17850 /* O6 */, 17850 /* O7 */, 17850 /* O8 */, 17850 /* O9 */, 17850 /* O: */, 17850 /* O; */, 17850 /* O< */, 17850 /* O= */, 17850 /* O> */, 17850 /* O? */, 17850 /* O@ */, 17100 /* OA */, 17850 /* OB */, 17850 /* OC */, 17850 /* OD */, 17850 /* OE */, 17850 /* OF */, 17850 /* OG */, 17850 /* OH */, 17850 /* OI */, 17100 /* OJ */, 17850 /* OK */, 17850 /* OL */, 17850 /* OM */, 17850 /* ON */, 17850 /* OO */, 17850 /* OP */, 17850 /* OQ */, 17850 /* OR */, 17750 /* OS */, 17225 /* OT */, 17850 /* OU */, 17075 /* OV */, 17275 /* OW */, 17100 /* OX */, 16600 /* OY */, 17400 /* OZ */, 17850 /* O[ */, 17850 /* O\ */, 17850 /* O] */, 17850 /* O^ */, 15350 /* O_ */, 17850 /* O` */, 17850 /* Oa */, 17850 /* Ob */, 17850 /* Oc */, 17850 /* Od */, 17850 /* Oe */, 17850 /* Of */, 17850 /* Og */, 17850 /* Oh */, 17850 /* Oi */, 17850 /* Oj */, 17850 /* Ok */, 17850 /* Ol */, 17850 /* Om */, 17850 /* On */, 17850 /* Oo */, 17850 /* Op */, 17850 /* Oq */, 17850 /* Or */, 17850 /* Os */, 17850 /* Ot */, 17850 /* Ou */, 17850 /* Ov */, 17850 /* Ow */, 17850 /* Ox */, 17850 /* Oy */, 17850 /* Oz */, 17850 /* O{ */, 17850 /* O| */, 17850 /* O} */, 17850 /* O~ */}, + {16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P */, 16400 /* P! */, 16525 /* P" */, 16400 /* P# */, 16400 /* P$ */, 16400 /* P% */, 15650 /* P& */, 16525 /* P' */, 16400 /* P( */, 16400 /* P) */, 16525 /* P* */, 16400 /* P+ */, 13900 /* P, */, 16400 /* P- */, 13900 /* P. */, 14150 /* P/ */, 16400 /* P0 */, 16400 /* P1 */, 16400 /* P2 */, 16400 /* P3 */, 16400 /* P4 */, 16400 /* P5 */, 16400 /* P6 */, 16400 /* P7 */, 16400 /* P8 */, 16400 /* P9 */, 16400 /* P: */, 16400 /* P; */, 16400 /* P< */, 16400 /* P= */, 16400 /* P> */, 16400 /* P? */, 16150 /* P@ */, 14575 /* PA */, 16400 /* PB */, 16400 /* PC */, 16400 /* PD */, 16400 /* PE */, 16400 /* PF */, 16400 /* PG */, 16400 /* PH */, 16400 /* PI */, 14650 /* PJ */, 16400 /* PK */, 16400 /* PL */, 16400 /* PM */, 16400 /* PN */, 16400 /* PO */, 16400 /* PP */, 16400 /* PQ */, 16400 /* PR */, 16150 /* PS */, 16400 /* PT */, 16400 /* PU */, 16275 /* PV */, 16150 /* PW */, 15525 /* PX */, 15775 /* PY */, 15775 /* PZ */, 16400 /* P[ */, 16400 /* P\ */, 16400 /* P] */, 16400 /* P^ */, 12650 /* P_ */, 16400 /* P` */, 16025 /* Pa */, 16400 /* Pb */, 15900 /* Pc */, 15900 /* Pd */, 15900 /* Pe */, 16400 /* Pf */, 15650 /* Pg */, 16400 /* Ph */, 16400 /* Pi */, 16400 /* Pj */, 16400 /* Pk */, 16400 /* Pl */, 16150 /* Pm */, 16150 /* Pn */, 15900 /* Po */, 16025 /* Pp */, 15900 /* Pq */, 16150 /* Pr */, 16025 /* Ps */, 16400 /* Pt */, 16400 /* Pu */, 16400 /* Pv */, 16525 /* Pw */, 16025 /* Px */, 16400 /* Py */, 16400 /* Pz */, 16400 /* P{ */, 16400 /* P| */, 16400 /* P} */, 16400 /* P~ */}, + {17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q */, 17850 /* Q! */, 17600 /* Q" */, 17850 /* Q# */, 17850 /* Q$ */, 17850 /* Q% */, 17725 /* Q& */, 17600 /* Q' */, 17850 /* Q( */, 17850 /* Q) */, 17600 /* Q* */, 17850 /* Q+ */, 16850 /* Q, */, 18225 /* Q- */, 16850 /* Q. */, 16725 /* Q/ */, 17850 /* Q0 */, 17850 /* Q1 */, 17850 /* Q2 */, 17850 /* Q3 */, 17850 /* Q4 */, 17850 /* Q5 */, 17850 /* Q6 */, 17850 /* Q7 */, 17850 /* Q8 */, 17850 /* Q9 */, 17850 /* Q: */, 17850 /* Q; */, 17850 /* Q< */, 17850 /* Q= */, 17850 /* Q> */, 17850 /* Q? */, 17850 /* Q@ */, 17100 /* QA */, 17850 /* QB */, 17850 /* QC */, 17850 /* QD */, 17850 /* QE */, 17850 /* QF */, 17850 /* QG */, 17850 /* QH */, 17850 /* QI */, 17100 /* QJ */, 17850 /* QK */, 17850 /* QL */, 17850 /* QM */, 17850 /* QN */, 17850 /* QO */, 17850 /* QP */, 17850 /* QQ */, 17850 /* QR */, 17750 /* QS */, 17225 /* QT */, 17850 /* QU */, 17075 /* QV */, 17275 /* QW */, 17100 /* QX */, 16600 /* QY */, 17400 /* QZ */, 17850 /* Q[ */, 17850 /* Q\ */, 17850 /* Q] */, 17850 /* Q^ */, 15350 /* Q_ */, 17850 /* Q` */, 17850 /* Qa */, 17850 /* Qb */, 17850 /* Qc */, 17850 /* Qd */, 17850 /* Qe */, 17850 /* Qf */, 17850 /* Qg */, 17850 /* Qh */, 17850 /* Qi */, 17850 /* Qj */, 17850 /* Qk */, 17850 /* Ql */, 17850 /* Qm */, 17850 /* Qn */, 17850 /* Qo */, 17850 /* Qp */, 17850 /* Qq */, 17850 /* Qr */, 17850 /* Qs */, 17850 /* Qt */, 17850 /* Qu */, 17850 /* Qv */, 17850 /* Qw */, 17850 /* Qx */, 17850 /* Qy */, 17850 /* Qz */, 17850 /* Q{ */, 17850 /* Q| */, 17850 /* Q} */, 17850 /* Q~ */}, + {16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R */, 16850 /* R! */, 16850 /* R" */, 16850 /* R# */, 16850 /* R$ */, 16850 /* R% */, 16225 /* R& */, 16850 /* R' */, 16850 /* R( */, 16850 /* R) */, 16850 /* R* */, 16850 /* R+ */, 16850 /* R, */, 16475 /* R- */, 16850 /* R. */, 16350 /* R/ */, 16850 /* R0 */, 16850 /* R1 */, 16850 /* R2 */, 16850 /* R3 */, 16850 /* R4 */, 16850 /* R5 */, 16850 /* R6 */, 16850 /* R7 */, 16850 /* R8 */, 16850 /* R9 */, 16850 /* R: */, 16850 /* R; */, 16850 /* R< */, 16850 /* R= */, 16850 /* R> */, 16850 /* R? */, 16350 /* R@ */, 16850 /* RA */, 16850 /* RB */, 16600 /* RC */, 16850 /* RD */, 16850 /* RE */, 16850 /* RF */, 16600 /* RG */, 16850 /* RH */, 16850 /* RI */, 15975 /* RJ */, 16850 /* RK */, 16850 /* RL */, 16850 /* RM */, 16850 /* RN */, 16600 /* RO */, 16850 /* RP */, 16600 /* RQ */, 16850 /* RR */, 16850 /* RS */, 16250 /* RT */, 16500 /* RU */, 16100 /* RV */, 16500 /* RW */, 16850 /* RX */, 15850 /* RY */, 16850 /* RZ */, 16850 /* R[ */, 16850 /* R\ */, 16850 /* R] */, 16850 /* R^ */, 16850 /* R_ */, 16850 /* R` */, 16600 /* Ra */, 16850 /* Rb */, 16475 /* Rc */, 16475 /* Rd */, 16475 /* Re */, 16850 /* Rf */, 16225 /* Rg */, 16850 /* Rh */, 16850 /* Ri */, 16850 /* Rj */, 16850 /* Rk */, 16850 /* Rl */, 16850 /* Rm */, 16850 /* Rn */, 16475 /* Ro */, 16850 /* Rp */, 16475 /* Rq */, 16850 /* Rr */, 16850 /* Rs */, 16850 /* Rt */, 16850 /* Ru */, 16850 /* Rv */, 16850 /* Rw */, 16850 /* Rx */, 16750 /* Ry */, 16850 /* Rz */, 16850 /* R{ */, 16850 /* R| */, 16850 /* R} */, 16850 /* R~ */}, + {15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S */, 15600 /* S! */, 15600 /* S" */, 15600 /* S# */, 15600 /* S$ */, 15600 /* S% */, 15350 /* S& */, 15600 /* S' */, 15600 /* S( */, 15600 /* S) */, 15600 /* S* */, 15600 /* S+ */, 15600 /* S, */, 15975 /* S- */, 15600 /* S. */, 14725 /* S/ */, 15600 /* S0 */, 15600 /* S1 */, 15600 /* S2 */, 15600 /* S3 */, 15600 /* S4 */, 15600 /* S5 */, 15600 /* S6 */, 15600 /* S7 */, 15600 /* S8 */, 15600 /* S9 */, 15600 /* S: */, 15600 /* S; */, 15600 /* S< */, 15600 /* S= */, 15600 /* S> */, 15600 /* S? */, 15350 /* S@ */, 15600 /* SA */, 15600 /* SB */, 15600 /* SC */, 15600 /* SD */, 15600 /* SE */, 15600 /* SF */, 15600 /* SG */, 15600 /* SH */, 15600 /* SI */, 15600 /* SJ */, 15600 /* SK */, 15600 /* SL */, 15600 /* SM */, 15600 /* SN */, 15600 /* SO */, 15600 /* SP */, 15600 /* SQ */, 15600 /* SR */, 15250 /* SS */, 15025 /* ST */, 15600 /* SU */, 15150 /* SV */, 15150 /* SW */, 15600 /* SX */, 14600 /* SY */, 15600 /* SZ */, 15600 /* S[ */, 15600 /* S\ */, 15600 /* S] */, 15600 /* S^ */, 14100 /* S_ */, 15600 /* S` */, 15600 /* Sa */, 15600 /* Sb */, 15600 /* Sc */, 15600 /* Sd */, 15600 /* Se */, 15600 /* Sf */, 15600 /* Sg */, 15600 /* Sh */, 15600 /* Si */, 15600 /* Sj */, 15600 /* Sk */, 15600 /* Sl */, 15600 /* Sm */, 15600 /* Sn */, 15600 /* So */, 15600 /* Sp */, 15600 /* Sq */, 15600 /* Sr */, 15600 /* Ss */, 15475 /* St */, 15600 /* Su */, 15600 /* Sv */, 15600 /* Sw */, 15600 /* Sx */, 15600 /* Sy */, 15600 /* Sz */, 15600 /* S{ */, 15600 /* S| */, 15600 /* S} */, 15600 /* S~ */}, + {14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T */, 14600 /* T! */, 14975 /* T" */, 14600 /* T# */, 14600 /* T$ */, 14600 /* T% */, 13850 /* T& */, 14975 /* T' */, 14600 /* T( */, 15100 /* T) */, 14975 /* T* */, 14600 /* T+ */, 12850 /* T, */, 13475 /* T- */, 12850 /* T. */, 12100 /* T/ */, 14600 /* T0 */, 14600 /* T1 */, 14600 /* T2 */, 14600 /* T3 */, 14600 /* T4 */, 14600 /* T5 */, 14600 /* T6 */, 14600 /* T7 */, 14600 /* T8 */, 14600 /* T9 */, 13600 /* T: */, 13600 /* T; */, 14600 /* T< */, 14600 /* T= */, 14600 /* T> */, 14600 /* T? */, 13850 /* T@ */, 12975 /* TA */, 14600 /* TB */, 13975 /* TC */, 14600 /* TD */, 14600 /* TE */, 14600 /* TF */, 13975 /* TG */, 14600 /* TH */, 14500 /* TI */, 13225 /* TJ */, 14600 /* TK */, 14600 /* TL */, 14600 /* TM */, 14600 /* TN */, 13975 /* TO */, 14600 /* TP */, 13975 /* TQ */, 14600 /* TR */, 14300 /* TS */, 14975 /* TT */, 14600 /* TU */, 14675 /* TV */, 14675 /* TW */, 14600 /* TX */, 14675 /* TY */, 14600 /* TZ */, 14600 /* T[ */, 14600 /* T\ */, 14600 /* T] */, 14600 /* T^ */, 12600 /* T_ */, 14600 /* T` */, 12850 /* Ta */, 14600 /* Tb */, 12975 /* Tc */, 12975 /* Td */, 12975 /* Te */, 14225 /* Tf */, 12600 /* Tg */, 14600 /* Th */, 14600 /* Ti */, 14600 /* Tj */, 14600 /* Tk */, 14600 /* Tl */, 13475 /* Tm */, 13475 /* Tn */, 12975 /* To */, 13475 /* Tp */, 12975 /* Tq */, 13475 /* Tr */, 12850 /* Ts */, 14600 /* Tt */, 13475 /* Tu */, 13975 /* Tv */, 13725 /* Tw */, 13725 /* Tx */, 13850 /* Ty */, 13350 /* Tz */, 14600 /* T{ */, 14600 /* T| */, 14600 /* T} */, 14600 /* T~ */}, + {17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U */, 17350 /* U! */, 17350 /* U" */, 17350 /* U# */, 17350 /* U$ */, 17350 /* U% */, 17100 /* U& */, 17350 /* U' */, 17350 /* U( */, 17350 /* U) */, 17350 /* U* */, 17350 /* U+ */, 16475 /* U, */, 17350 /* U- */, 16475 /* U. */, 16350 /* U/ */, 17350 /* U0 */, 17350 /* U1 */, 17350 /* U2 */, 17350 /* U3 */, 17350 /* U4 */, 17350 /* U5 */, 17350 /* U6 */, 17350 /* U7 */, 17350 /* U8 */, 17350 /* U9 */, 17350 /* U: */, 17350 /* U; */, 17350 /* U< */, 17350 /* U= */, 17350 /* U> */, 17350 /* U? */, 17350 /* U@ */, 16725 /* UA */, 17350 /* UB */, 17350 /* UC */, 17350 /* UD */, 17350 /* UE */, 17350 /* UF */, 17350 /* UG */, 17350 /* UH */, 17350 /* UI */, 16975 /* UJ */, 17350 /* UK */, 17350 /* UL */, 17350 /* UM */, 17350 /* UN */, 17350 /* UO */, 17350 /* UP */, 17350 /* UQ */, 17350 /* UR */, 17350 /* US */, 17350 /* UT */, 17350 /* UU */, 17350 /* UV */, 17350 /* UW */, 17350 /* UX */, 17350 /* UY */, 17350 /* UZ */, 17350 /* U[ */, 17350 /* U\ */, 17350 /* U] */, 17350 /* U^ */, 15600 /* U_ */, 17350 /* U` */, 17350 /* Ua */, 17350 /* Ub */, 17350 /* Uc */, 17350 /* Ud */, 17350 /* Ue */, 17350 /* Uf */, 17100 /* Ug */, 17350 /* Uh */, 17350 /* Ui */, 17350 /* Uj */, 17350 /* Uk */, 17350 /* Ul */, 17350 /* Um */, 17350 /* Un */, 17350 /* Uo */, 17350 /* Up */, 17350 /* Uq */, 17350 /* Ur */, 17350 /* Us */, 17350 /* Ut */, 17350 /* Uu */, 17350 /* Uv */, 17350 /* Uw */, 17350 /* Ux */, 17350 /* Uy */, 17350 /* Uz */, 17350 /* U{ */, 17350 /* U| */, 17350 /* U} */, 17350 /* U~ */}, + {16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V */, 16250 /* V! */, 16500 /* V" */, 16250 /* V# */, 16250 /* V$ */, 16250 /* V% */, 15375 /* V& */, 16500 /* V' */, 16250 /* V( */, 16750 /* V) */, 16500 /* V* */, 16250 /* V+ */, 14750 /* V, */, 15625 /* V- */, 14750 /* V. */, 14625 /* V/ */, 16250 /* V0 */, 16250 /* V1 */, 16250 /* V2 */, 16250 /* V3 */, 16250 /* V4 */, 16250 /* V5 */, 16250 /* V6 */, 16250 /* V7 */, 16250 /* V8 */, 16250 /* V9 */, 15500 /* V: */, 15500 /* V; */, 16250 /* V< */, 16250 /* V= */, 16250 /* V> */, 16625 /* V? */, 15500 /* V@ */, 14900 /* VA */, 16250 /* VB */, 15475 /* VC */, 16250 /* VD */, 16250 /* VE */, 16250 /* VF */, 15475 /* VG */, 16250 /* VH */, 16250 /* VI */, 14875 /* VJ */, 16250 /* VK */, 16250 /* VL */, 16250 /* VM */, 16250 /* VN */, 15475 /* VO */, 16250 /* VP */, 15475 /* VQ */, 16250 /* VR */, 15500 /* VS */, 16325 /* VT */, 16250 /* VU */, 16375 /* VV */, 16325 /* VW */, 16250 /* VX */, 16250 /* VY */, 16125 /* VZ */, 16250 /* V[ */, 16250 /* V\ */, 16250 /* V] */, 16250 /* V^ */, 14250 /* V_ */, 16250 /* V` */, 15250 /* Va */, 16250 /* Vb */, 15375 /* Vc */, 15375 /* Vd */, 15375 /* Ve */, 16250 /* Vf */, 15250 /* Vg */, 16250 /* Vh */, 16250 /* Vi */, 16125 /* Vj */, 16250 /* Vk */, 16250 /* Vl */, 15750 /* Vm */, 15750 /* Vn */, 15375 /* Vo */, 15750 /* Vp */, 15375 /* Vq */, 15750 /* Vr */, 15625 /* Vs */, 16250 /* Vt */, 15625 /* Vu */, 16000 /* Vv */, 16000 /* Vw */, 16000 /* Vx */, 16125 /* Vy */, 15875 /* Vz */, 16250 /* V{ */, 16250 /* V| */, 16250 /* V} */, 16250 /* V~ */}, + {24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W */, 24325 /* W! */, 24700 /* W" */, 24325 /* W# */, 24325 /* W$ */, 24325 /* W% */, 23450 /* W& */, 24700 /* W' */, 24325 /* W( */, 24575 /* W) */, 24700 /* W* */, 24325 /* W+ */, 23325 /* W, */, 24075 /* W- */, 23325 /* W. */, 23075 /* W/ */, 24325 /* W0 */, 24325 /* W1 */, 24325 /* W2 */, 24325 /* W3 */, 24325 /* W4 */, 24325 /* W5 */, 24325 /* W6 */, 24325 /* W7 */, 24325 /* W8 */, 24325 /* W9 */, 24325 /* W: */, 24325 /* W; */, 24325 /* W< */, 24325 /* W= */, 24325 /* W> */, 24325 /* W? */, 23825 /* W@ */, 23625 /* WA */, 24325 /* WB */, 23750 /* WC */, 24325 /* WD */, 24325 /* WE */, 24325 /* WF */, 23750 /* WG */, 24325 /* WH */, 24325 /* WI */, 23075 /* WJ */, 24325 /* WK */, 24325 /* WL */, 24325 /* WM */, 24325 /* WN */, 23750 /* WO */, 24325 /* WP */, 23750 /* WQ */, 24325 /* WR */, 24325 /* WS */, 24400 /* WT */, 24325 /* WU */, 24400 /* WV */, 24375 /* WW */, 24325 /* WX */, 24325 /* WY */, 24200 /* WZ */, 24325 /* W[ */, 24325 /* W\ */, 24325 /* W] */, 24325 /* W^ */, 23075 /* W_ */, 24325 /* W` */, 23575 /* Wa */, 24325 /* Wb */, 23700 /* Wc */, 23700 /* Wd */, 23700 /* We */, 24325 /* Wf */, 23825 /* Wg */, 24325 /* Wh */, 24325 /* Wi */, 24200 /* Wj */, 24325 /* Wk */, 24325 /* Wl */, 24075 /* Wm */, 24075 /* Wn */, 23700 /* Wo */, 24200 /* Wp */, 23700 /* Wq */, 24075 /* Wr */, 24075 /* Ws */, 24325 /* Wt */, 24200 /* Wu */, 24325 /* Wv */, 24075 /* Ww */, 24075 /* Wx */, 24325 /* Wy */, 24075 /* Wz */, 24325 /* W{ */, 24325 /* W| */, 24325 /* W} */, 24325 /* W~ */}, + {16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X */, 16825 /* X! */, 16825 /* X" */, 16825 /* X# */, 16825 /* X$ */, 16825 /* X% */, 16200 /* X& */, 16825 /* X' */, 16825 /* X( */, 16825 /* X) */, 16825 /* X* */, 16825 /* X+ */, 17075 /* X, */, 16075 /* X- */, 17075 /* X. */, 16825 /* X/ */, 16825 /* X0 */, 16825 /* X1 */, 16825 /* X2 */, 16825 /* X3 */, 16825 /* X4 */, 16825 /* X5 */, 16825 /* X6 */, 16825 /* X7 */, 16825 /* X8 */, 16825 /* X9 */, 16450 /* X: */, 16450 /* X; */, 16825 /* X< */, 16825 /* X= */, 16825 /* X> */, 16575 /* X? */, 16075 /* X@ */, 16950 /* XA */, 16825 /* XB */, 16075 /* XC */, 16825 /* XD */, 16825 /* XE */, 16825 /* XF */, 16075 /* XG */, 16825 /* XH */, 16825 /* XI */, 16325 /* XJ */, 16825 /* XK */, 16825 /* XL */, 16825 /* XM */, 16825 /* XN */, 16075 /* XO */, 16825 /* XP */, 16075 /* XQ */, 16825 /* XR */, 16825 /* XS */, 16825 /* XT */, 16825 /* XU */, 16825 /* XV */, 16825 /* XW */, 16825 /* XX */, 16825 /* XY */, 16825 /* XZ */, 16825 /* X[ */, 16825 /* X\ */, 16825 /* X] */, 16825 /* X^ */, 17325 /* X_ */, 16825 /* X` */, 16700 /* Xa */, 16825 /* Xb */, 16325 /* Xc */, 16325 /* Xd */, 16325 /* Xe */, 16825 /* Xf */, 16825 /* Xg */, 16825 /* Xh */, 16825 /* Xi */, 16825 /* Xj */, 16825 /* Xk */, 16825 /* Xl */, 16825 /* Xm */, 16825 /* Xn */, 16325 /* Xo */, 16825 /* Xp */, 16325 /* Xq */, 16825 /* Xr */, 16575 /* Xs */, 16325 /* Xt */, 16325 /* Xu */, 16450 /* Xv */, 16325 /* Xw */, 16825 /* Xx */, 16575 /* Xy */, 17075 /* Xz */, 16825 /* X{ */, 16825 /* X| */, 16825 /* X} */, 16825 /* X~ */}, + {16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y */, 16225 /* Y! */, 16350 /* Y" */, 16225 /* Y# */, 16225 /* Y$ */, 16225 /* Y% */, 14850 /* Y& */, 16350 /* Y' */, 16225 /* Y( */, 16725 /* Y) */, 16350 /* Y* */, 16225 /* Y+ */, 14475 /* Y, */, 14975 /* Y- */, 14475 /* Y. */, 13975 /* Y/ */, 16225 /* Y0 */, 16225 /* Y1 */, 16225 /* Y2 */, 16225 /* Y3 */, 16225 /* Y4 */, 16225 /* Y5 */, 16225 /* Y6 */, 16225 /* Y7 */, 16225 /* Y8 */, 16225 /* Y9 */, 14975 /* Y: */, 14975 /* Y; */, 16225 /* Y< */, 16225 /* Y= */, 16225 /* Y> */, 16225 /* Y? */, 15225 /* Y@ */, 14475 /* YA */, 16225 /* YB */, 14975 /* YC */, 16225 /* YD */, 16225 /* YE */, 16225 /* YF */, 14975 /* YG */, 16225 /* YH */, 16225 /* YI */, 14475 /* YJ */, 16225 /* YK */, 16225 /* YL */, 16225 /* YM */, 16225 /* YN */, 14975 /* YO */, 16225 /* YP */, 14975 /* YQ */, 16225 /* YR */, 15125 /* YS */, 16300 /* YT */, 16225 /* YU */, 16225 /* YV */, 16225 /* YW */, 16225 /* YX */, 16225 /* YY */, 16225 /* YZ */, 16225 /* Y[ */, 16225 /* Y\ */, 16225 /* Y] */, 16225 /* Y^ */, 13975 /* Y_ */, 16225 /* Y` */, 14600 /* Ya */, 15975 /* Yb */, 14725 /* Yc */, 14725 /* Yd */, 14725 /* Ye */, 15600 /* Yf */, 14700 /* Yg */, 15975 /* Yh */, 15975 /* Yi */, 15475 /* Yj */, 15975 /* Yk */, 16225 /* Yl */, 15100 /* Ym */, 15100 /* Yn */, 14725 /* Yo */, 14975 /* Yp */, 14725 /* Yq */, 15100 /* Yr */, 14725 /* Ys */, 15600 /* Yt */, 15225 /* Yu */, 15350 /* Yv */, 15225 /* Yw */, 15225 /* Yx */, 15350 /* Yy */, 14975 /* Yz */, 16225 /* Y{ */, 16225 /* Y| */, 16225 /* Y} */, 16225 /* Y~ */}, + {15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z */, 15175 /* Z! */, 15300 /* Z" */, 15175 /* Z# */, 15175 /* Z$ */, 15175 /* Z% */, 14925 /* Z& */, 15300 /* Z' */, 15175 /* Z( */, 15425 /* Z) */, 15300 /* Z* */, 15175 /* Z+ */, 15925 /* Z, */, 14175 /* Z- */, 15925 /* Z. */, 15175 /* Z/ */, 15175 /* Z0 */, 15175 /* Z1 */, 15175 /* Z2 */, 15175 /* Z3 */, 15175 /* Z4 */, 15175 /* Z5 */, 15175 /* Z6 */, 15175 /* Z7 */, 15175 /* Z8 */, 15175 /* Z9 */, 15175 /* Z: */, 15175 /* Z; */, 15175 /* Z< */, 15175 /* Z= */, 15175 /* Z> */, 15175 /* Z? */, 14800 /* Z@ */, 15175 /* ZA */, 15175 /* ZB */, 14725 /* ZC */, 15175 /* ZD */, 15175 /* ZE */, 15175 /* ZF */, 14725 /* ZG */, 15175 /* ZH */, 15175 /* ZI */, 15000 /* ZJ */, 15175 /* ZK */, 15175 /* ZL */, 15175 /* ZM */, 15175 /* ZN */, 14725 /* ZO */, 15175 /* ZP */, 14725 /* ZQ */, 15175 /* ZR */, 15175 /* ZS */, 15175 /* ZT */, 15175 /* ZU */, 14975 /* ZV */, 15050 /* ZW */, 15175 /* ZX */, 14925 /* ZY */, 15175 /* ZZ */, 15175 /* Z[ */, 15175 /* Z\ */, 15175 /* Z] */, 15175 /* Z^ */, 15550 /* Z_ */, 15175 /* Z` */, 15175 /* Za */, 15175 /* Zb */, 15050 /* Zc */, 15050 /* Zd */, 15050 /* Ze */, 15175 /* Zf */, 15175 /* Zg */, 15175 /* Zh */, 15175 /* Zi */, 15175 /* Zj */, 15175 /* Zk */, 15175 /* Zl */, 15175 /* Zm */, 15175 /* Zn */, 15050 /* Zo */, 15175 /* Zp */, 15050 /* Zq */, 15175 /* Zr */, 15175 /* Zs */, 14925 /* Zt */, 15050 /* Zu */, 15175 /* Zv */, 15175 /* Zw */, 15175 /* Zx */, 15050 /* Zy */, 15175 /* Zz */, 15175 /* Z{ */, 15175 /* Z| */, 15175 /* Z} */, 15175 /* Z~ */}, + {8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [ */, 8350 /* [! */, 8350 /* [" */, 8350 /* [# */, 8350 /* [$ */, 8350 /* [% */, 8350 /* [& */, 8350 /* [' */, 8350 /* [( */, 8350 /* [) */, 8350 /* [* */, 8350 /* [+ */, 8350 /* [, */, 8350 /* [- */, 8350 /* [. */, 8350 /* [/ */, 8350 /* [0 */, 8350 /* [1 */, 8350 /* [2 */, 8350 /* [3 */, 8350 /* [4 */, 8350 /* [5 */, 8350 /* [6 */, 8350 /* [7 */, 8350 /* [8 */, 8350 /* [9 */, 8350 /* [: */, 8350 /* [; */, 8350 /* [< */, 8350 /* [= */, 8350 /* [> */, 8350 /* [? */, 8350 /* [@ */, 8350 /* [A */, 8350 /* [B */, 8350 /* [C */, 8350 /* [D */, 8350 /* [E */, 8350 /* [F */, 8350 /* [G */, 8350 /* [H */, 8350 /* [I */, 8350 /* [J */, 8350 /* [K */, 8350 /* [L */, 8350 /* [M */, 8350 /* [N */, 8350 /* [O */, 8350 /* [P */, 8350 /* [Q */, 8350 /* [R */, 8350 /* [S */, 8350 /* [T */, 8350 /* [U */, 8350 /* [V */, 8350 /* [W */, 8350 /* [X */, 8350 /* [Y */, 8350 /* [Z */, 8350 /* [[ */, 8350 /* [\ */, 8350 /* [] */, 8350 /* [^ */, 8350 /* [_ */, 8350 /* [` */, 8350 /* [a */, 8350 /* [b */, 8350 /* [c */, 8350 /* [d */, 8350 /* [e */, 8350 /* [f */, 8350 /* [g */, 8350 /* [h */, 8350 /* [i */, 8600 /* [j */, 8350 /* [k */, 8350 /* [l */, 8350 /* [m */, 8350 /* [n */, 8350 /* [o */, 8350 /* [p */, 8350 /* [q */, 8350 /* [r */, 8350 /* [s */, 8350 /* [t */, 8350 /* [u */, 8350 /* [v */, 8350 /* [w */, 8350 /* [x */, 8350 /* [y */, 8350 /* [z */, 8350 /* [{ */, 8350 /* [| */, 8350 /* [} */, 8350 /* [~ */}, + {11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \ */, 11500 /* \! */, 11500 /* \" */, 11500 /* \# */, 11500 /* \$ */, 11500 /* \% */, 11500 /* \& */, 11500 /* \' */, 11500 /* \( */, 11500 /* \) */, 11500 /* \* */, 11500 /* \+ */, 11500 /* \, */, 11500 /* \- */, 11500 /* \. */, 11500 /* \/ */, 11500 /* \0 */, 11500 /* \1 */, 11500 /* \2 */, 11500 /* \3 */, 11500 /* \4 */, 11500 /* \5 */, 11500 /* \6 */, 11500 /* \7 */, 11500 /* \8 */, 11500 /* \9 */, 11500 /* \: */, 11500 /* \; */, 11500 /* \< */, 11500 /* \= */, 11500 /* \> */, 11500 /* \? */, 11500 /* \@ */, 11500 /* \A */, 11500 /* \B */, 11500 /* \C */, 11500 /* \D */, 11500 /* \E */, 11500 /* \F */, 11500 /* \G */, 11500 /* \H */, 11500 /* \I */, 11500 /* \J */, 11500 /* \K */, 11500 /* \L */, 11500 /* \M */, 11500 /* \N */, 11500 /* \O */, 11500 /* \P */, 11500 /* \Q */, 11500 /* \R */, 11500 /* \S */, 11500 /* \T */, 11500 /* \U */, 11500 /* \V */, 11500 /* \W */, 11500 /* \X */, 11500 /* \Y */, 11500 /* \Z */, 11500 /* \[ */, 11500 /* \\ */, 11500 /* \] */, 11500 /* \^ */, 11500 /* \_ */, 11500 /* \` */, 11500 /* \a */, 11500 /* \b */, 11500 /* \c */, 11500 /* \d */, 11500 /* \e */, 11500 /* \f */, 11500 /* \g */, 11500 /* \h */, 11500 /* \i */, 11500 /* \j */, 11500 /* \k */, 11500 /* \l */, 11500 /* \m */, 11500 /* \n */, 11500 /* \o */, 11500 /* \p */, 11500 /* \q */, 11500 /* \r */, 11500 /* \s */, 11500 /* \t */, 11500 /* \u */, 11500 /* \v */, 11500 /* \w */, 11500 /* \x */, 11500 /* \y */, 11500 /* \z */, 11500 /* \{ */, 11500 /* \| */, 11500 /* \} */, 11500 /* \~ */}, + {8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ] */, 8350 /* ]! */, 8350 /* ]" */, 8350 /* ]# */, 8350 /* ]$ */, 8350 /* ]% */, 8350 /* ]& */, 8350 /* ]' */, 8350 /* ]( */, 8350 /* ]) */, 8350 /* ]* */, 8350 /* ]+ */, 8350 /* ], */, 8350 /* ]- */, 8350 /* ]. */, 8350 /* ]/ */, 8350 /* ]0 */, 8350 /* ]1 */, 8350 /* ]2 */, 8350 /* ]3 */, 8350 /* ]4 */, 8350 /* ]5 */, 8350 /* ]6 */, 8350 /* ]7 */, 8350 /* ]8 */, 8350 /* ]9 */, 8350 /* ]: */, 8350 /* ]; */, 8350 /* ]< */, 8350 /* ]= */, 8350 /* ]> */, 8350 /* ]? */, 8350 /* ]@ */, 8350 /* ]A */, 8350 /* ]B */, 8350 /* ]C */, 8350 /* ]D */, 8350 /* ]E */, 8350 /* ]F */, 8350 /* ]G */, 8350 /* ]H */, 8350 /* ]I */, 8350 /* ]J */, 8350 /* ]K */, 8350 /* ]L */, 8350 /* ]M */, 8350 /* ]N */, 8350 /* ]O */, 8350 /* ]P */, 8350 /* ]Q */, 8350 /* ]R */, 8350 /* ]S */, 8350 /* ]T */, 8350 /* ]U */, 8350 /* ]V */, 8350 /* ]W */, 8350 /* ]X */, 8350 /* ]Y */, 8350 /* ]Z */, 8350 /* ][ */, 8350 /* ]\ */, 8350 /* ]] */, 8350 /* ]^ */, 8350 /* ]_ */, 8350 /* ]` */, 8350 /* ]a */, 8350 /* ]b */, 8350 /* ]c */, 8350 /* ]d */, 8350 /* ]e */, 8350 /* ]f */, 8350 /* ]g */, 8350 /* ]h */, 8350 /* ]i */, 8350 /* ]j */, 8350 /* ]k */, 8350 /* ]l */, 8350 /* ]m */, 8350 /* ]n */, 8350 /* ]o */, 8350 /* ]p */, 8350 /* ]q */, 8350 /* ]r */, 8350 /* ]s */, 8350 /* ]t */, 8350 /* ]u */, 8350 /* ]v */, 8350 /* ]w */, 8350 /* ]x */, 8350 /* ]y */, 8350 /* ]z */, 8350 /* ]{ */, 8350 /* ]| */, 8350 /* ]} */, 8350 /* ]~ */}, + {15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^ */, 15000 /* ^! */, 15000 /* ^" */, 15000 /* ^# */, 15000 /* ^$ */, 15000 /* ^% */, 15000 /* ^& */, 15000 /* ^' */, 15000 /* ^( */, 15000 /* ^) */, 15000 /* ^* */, 15000 /* ^+ */, 15000 /* ^, */, 15000 /* ^- */, 15000 /* ^. */, 15000 /* ^/ */, 15000 /* ^0 */, 15000 /* ^1 */, 15000 /* ^2 */, 15000 /* ^3 */, 15000 /* ^4 */, 15000 /* ^5 */, 15000 /* ^6 */, 15000 /* ^7 */, 15000 /* ^8 */, 15000 /* ^9 */, 15000 /* ^: */, 15000 /* ^; */, 15000 /* ^< */, 15000 /* ^= */, 15000 /* ^> */, 15000 /* ^? */, 15000 /* ^@ */, 15000 /* ^A */, 15000 /* ^B */, 15000 /* ^C */, 15000 /* ^D */, 15000 /* ^E */, 15000 /* ^F */, 15000 /* ^G */, 15000 /* ^H */, 15000 /* ^I */, 15000 /* ^J */, 15000 /* ^K */, 15000 /* ^L */, 15000 /* ^M */, 15000 /* ^N */, 15000 /* ^O */, 15000 /* ^P */, 15000 /* ^Q */, 15000 /* ^R */, 15000 /* ^S */, 15000 /* ^T */, 15000 /* ^U */, 15000 /* ^V */, 15000 /* ^W */, 15000 /* ^X */, 15000 /* ^Y */, 15000 /* ^Z */, 15000 /* ^[ */, 15000 /* ^\ */, 15000 /* ^] */, 15000 /* ^^ */, 15000 /* ^_ */, 15000 /* ^` */, 15000 /* ^a */, 15000 /* ^b */, 15000 /* ^c */, 15000 /* ^d */, 15000 /* ^e */, 15000 /* ^f */, 15000 /* ^g */, 15000 /* ^h */, 15000 /* ^i */, 15000 /* ^j */, 15000 /* ^k */, 15000 /* ^l */, 15000 /* ^m */, 15000 /* ^n */, 15000 /* ^o */, 15000 /* ^p */, 15000 /* ^q */, 15000 /* ^r */, 15000 /* ^s */, 15000 /* ^t */, 15000 /* ^u */, 15000 /* ^v */, 15000 /* ^w */, 15000 /* ^x */, 15000 /* ^y */, 15000 /* ^z */, 15000 /* ^{ */, 15000 /* ^| */, 15000 /* ^} */, 15000 /* ^~ */}, + {13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _ */, 13900 /* _! */, 13900 /* _" */, 13900 /* _# */, 13900 /* _$ */, 13900 /* _% */, 13900 /* _& */, 13900 /* _' */, 13900 /* _( */, 13900 /* _) */, 13900 /* _* */, 13900 /* _+ */, 13900 /* _, */, 13900 /* _- */, 13900 /* _. */, 13900 /* _/ */, 13900 /* _0 */, 13900 /* _1 */, 13900 /* _2 */, 13900 /* _3 */, 13900 /* _4 */, 13900 /* _5 */, 13900 /* _6 */, 13900 /* _7 */, 13900 /* _8 */, 13900 /* _9 */, 13900 /* _: */, 13900 /* _; */, 13900 /* _< */, 13900 /* _= */, 13900 /* _> */, 13900 /* _? */, 13900 /* _@ */, 14275 /* _A */, 13900 /* _B */, 11400 /* _C */, 13900 /* _D */, 13900 /* _E */, 13900 /* _F */, 11400 /* _G */, 13900 /* _H */, 13900 /* _I */, 12650 /* _J */, 13900 /* _K */, 13900 /* _L */, 13900 /* _M */, 13900 /* _N */, 11400 /* _O */, 13900 /* _P */, 11400 /* _Q */, 13900 /* _R */, 12400 /* _S */, 11900 /* _T */, 12150 /* _U */, 11900 /* _V */, 12650 /* _W */, 14400 /* _X */, 11650 /* _Y */, 14525 /* _Z */, 13900 /* _[ */, 13900 /* _\ */, 13900 /* _] */, 13900 /* _^ */, 13900 /* __ */, 13900 /* _` */, 13150 /* _a */, 13900 /* _b */, 12400 /* _c */, 12650 /* _d */, 12400 /* _e */, 13275 /* _f */, 14525 /* _g */, 13900 /* _h */, 13900 /* _i */, 15275 /* _j */, 13900 /* _k */, 12525 /* _l */, 13900 /* _m */, 13900 /* _n */, 12400 /* _o */, 14150 /* _p */, 12650 /* _q */, 13900 /* _r */, 12900 /* _s */, 12275 /* _t */, 13025 /* _u */, 12150 /* _v */, 12650 /* _w */, 14400 /* _x */, 13525 /* _y */, 14025 /* _z */, 13900 /* _{ */, 13900 /* _| */, 13900 /* _} */, 13900 /* _~ */}, + {15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* ` */, 15000 /* `! */, 15000 /* `" */, 15000 /* `# */, 15000 /* `$ */, 15000 /* `% */, 15000 /* `& */, 15000 /* `' */, 15000 /* `( */, 15000 /* `) */, 15000 /* `* */, 15000 /* `+ */, 15000 /* `, */, 15000 /* `- */, 15000 /* `. */, 15000 /* `/ */, 15000 /* `0 */, 15000 /* `1 */, 15000 /* `2 */, 15000 /* `3 */, 15000 /* `4 */, 15000 /* `5 */, 15000 /* `6 */, 15000 /* `7 */, 15000 /* `8 */, 15000 /* `9 */, 15000 /* `: */, 15000 /* `; */, 15000 /* `< */, 15000 /* `= */, 15000 /* `> */, 15000 /* `? */, 15000 /* `@ */, 15000 /* `A */, 15000 /* `B */, 15000 /* `C */, 15000 /* `D */, 15000 /* `E */, 15000 /* `F */, 15000 /* `G */, 15000 /* `H */, 15000 /* `I */, 15000 /* `J */, 15000 /* `K */, 15000 /* `L */, 15000 /* `M */, 15000 /* `N */, 15000 /* `O */, 15000 /* `P */, 15000 /* `Q */, 15000 /* `R */, 15000 /* `S */, 15000 /* `T */, 15000 /* `U */, 15000 /* `V */, 15000 /* `W */, 15000 /* `X */, 15000 /* `Y */, 15000 /* `Z */, 15000 /* `[ */, 15000 /* `\ */, 15000 /* `] */, 15000 /* `^ */, 15000 /* `_ */, 15000 /* `` */, 15000 /* `a */, 15000 /* `b */, 15000 /* `c */, 15000 /* `d */, 15000 /* `e */, 15000 /* `f */, 15000 /* `g */, 15000 /* `h */, 15000 /* `i */, 15000 /* `j */, 15000 /* `k */, 15000 /* `l */, 15000 /* `m */, 15000 /* `n */, 15000 /* `o */, 15000 /* `p */, 15000 /* `q */, 15000 /* `r */, 15000 /* `s */, 15000 /* `t */, 15000 /* `u */, 15000 /* `v */, 15000 /* `w */, 15000 /* `x */, 15000 /* `y */, 15000 /* `z */, 15000 /* `{ */, 15000 /* `| */, 15000 /* `} */, 15000 /* `~ */}, + {14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a */, 14225 /* a! */, 13850 /* a" */, 14225 /* a# */, 14225 /* a$ */, 14225 /* a% */, 14225 /* a& */, 13850 /* a' */, 14225 /* a( */, 14225 /* a) */, 13850 /* a* */, 14225 /* a+ */, 14475 /* a, */, 14100 /* a- */, 14475 /* a. */, 14725 /* a/ */, 14225 /* a0 */, 14225 /* a1 */, 14225 /* a2 */, 14225 /* a3 */, 14225 /* a4 */, 14225 /* a5 */, 14225 /* a6 */, 14225 /* a7 */, 14225 /* a8 */, 14225 /* a9 */, 14225 /* a: */, 14225 /* a; */, 14225 /* a< */, 14225 /* a= */, 14225 /* a> */, 14225 /* a? */, 14225 /* a@ */, 14425 /* aA */, 14225 /* aB */, 14225 /* aC */, 14225 /* aD */, 14225 /* aE */, 14225 /* aF */, 14225 /* aG */, 14225 /* aH */, 14225 /* aI */, 14225 /* aJ */, 14225 /* aK */, 14225 /* aL */, 14225 /* aM */, 14225 /* aN */, 14225 /* aO */, 14225 /* aP */, 14225 /* aQ */, 14225 /* aR */, 14225 /* aS */, 12475 /* aT */, 14225 /* aU */, 13475 /* aV */, 13975 /* aW */, 14225 /* aX */, 13100 /* aY */, 14375 /* aZ */, 14225 /* a[ */, 14225 /* a\ */, 14225 /* a] */, 14225 /* a^ */, 14475 /* a_ */, 14225 /* a` */, 14225 /* aa */, 14225 /* ab */, 14225 /* ac */, 14225 /* ad */, 14225 /* ae */, 14225 /* af */, 14225 /* ag */, 14225 /* ah */, 14225 /* ai */, 14225 /* aj */, 14225 /* ak */, 14225 /* al */, 14225 /* am */, 14225 /* an */, 14225 /* ao */, 14225 /* ap */, 14225 /* aq */, 14225 /* ar */, 14225 /* as */, 14075 /* at */, 14225 /* au */, 13975 /* av */, 14075 /* aw */, 14225 /* ax */, 14000 /* ay */, 14225 /* az */, 14225 /* a{ */, 14225 /* a| */, 14225 /* a} */, 14225 /* a~ */}, + {15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b */, 15200 /* b! */, 14700 /* b" */, 15200 /* b# */, 15200 /* b$ */, 15200 /* b% */, 15200 /* b& */, 14700 /* b' */, 15200 /* b( */, 15200 /* b) */, 14700 /* b* */, 15200 /* b+ */, 14825 /* b, */, 15450 /* b- */, 14825 /* b. */, 15200 /* b/ */, 15200 /* b0 */, 15200 /* b1 */, 15200 /* b2 */, 15200 /* b3 */, 15200 /* b4 */, 15200 /* b5 */, 15200 /* b6 */, 15200 /* b7 */, 15200 /* b8 */, 15200 /* b9 */, 15200 /* b: */, 15200 /* b; */, 15200 /* b< */, 15200 /* b= */, 15200 /* b> */, 15200 /* b? */, 15200 /* b@ */, 15200 /* bA */, 15200 /* bB */, 15200 /* bC */, 15200 /* bD */, 15200 /* bE */, 15200 /* bF */, 15200 /* bG */, 15200 /* bH */, 15075 /* bI */, 15200 /* bJ */, 15200 /* bK */, 15200 /* bL */, 15200 /* bM */, 15200 /* bN */, 15200 /* bO */, 15200 /* bP */, 15200 /* bQ */, 15200 /* bR */, 15200 /* bS */, 13575 /* bT */, 15200 /* bU */, 14325 /* bV */, 14575 /* bW */, 14700 /* bX */, 13700 /* bY */, 15075 /* bZ */, 15200 /* b[ */, 15200 /* b\ */, 15200 /* b] */, 15200 /* b^ */, 13950 /* b_ */, 15200 /* b` */, 15200 /* ba */, 15200 /* bb */, 15200 /* bc */, 15200 /* bd */, 15200 /* be */, 15050 /* bf */, 15200 /* bg */, 15200 /* bh */, 15200 /* bi */, 15200 /* bj */, 15200 /* bk */, 15200 /* bl */, 15200 /* bm */, 15200 /* bn */, 15200 /* bo */, 15200 /* bp */, 15200 /* bq */, 15200 /* br */, 15200 /* bs */, 15050 /* bt */, 15200 /* bu */, 15000 /* bv */, 14950 /* bw */, 14800 /* bx */, 14950 /* by */, 15000 /* bz */, 15200 /* b{ */, 15200 /* b| */, 15200 /* b} */, 15200 /* b~ */}, + {12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c */, 12925 /* c! */, 12925 /* c" */, 12925 /* c# */, 12925 /* c$ */, 12925 /* c% */, 12925 /* c& */, 12925 /* c' */, 12925 /* c( */, 12925 /* c) */, 12925 /* c* */, 12925 /* c+ */, 12925 /* c, */, 12925 /* c- */, 12925 /* c. */, 12800 /* c/ */, 12925 /* c0 */, 12925 /* c1 */, 12925 /* c2 */, 12925 /* c3 */, 12925 /* c4 */, 12925 /* c5 */, 12925 /* c6 */, 12925 /* c7 */, 12925 /* c8 */, 12925 /* c9 */, 12925 /* c: */, 12925 /* c; */, 12925 /* c< */, 12925 /* c= */, 12925 /* c> */, 12925 /* c? */, 12925 /* c@ */, 13125 /* cA */, 12925 /* cB */, 12925 /* cC */, 12925 /* cD */, 12925 /* cE */, 12925 /* cF */, 12925 /* cG */, 12925 /* cH */, 12925 /* cI */, 13050 /* cJ */, 12925 /* cK */, 12925 /* cL */, 12925 /* cM */, 12925 /* cN */, 12925 /* cO */, 12925 /* cP */, 12925 /* cQ */, 12925 /* cR */, 12925 /* cS */, 11775 /* cT */, 12925 /* cU */, 12250 /* cV */, 12675 /* cW */, 12925 /* cX */, 11625 /* cY */, 13075 /* cZ */, 12925 /* c[ */, 12925 /* c\ */, 12925 /* c] */, 12925 /* c^ */, 11925 /* c_ */, 12925 /* c` */, 12925 /* ca */, 12925 /* cb */, 12800 /* cc */, 12800 /* cd */, 12800 /* ce */, 12925 /* cf */, 12925 /* cg */, 12925 /* ch */, 12925 /* ci */, 12925 /* cj */, 12925 /* ck */, 12925 /* cl */, 12925 /* cm */, 12925 /* cn */, 12800 /* co */, 12925 /* cp */, 12800 /* cq */, 12925 /* cr */, 12925 /* cs */, 12925 /* ct */, 12925 /* cu */, 12925 /* cv */, 12925 /* cw */, 12925 /* cx */, 12925 /* cy */, 12925 /* cz */, 12925 /* c{ */, 12925 /* c| */, 12925 /* c} */, 12925 /* c~ */}, + {15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d */, 15200 /* d! */, 15200 /* d" */, 15200 /* d# */, 15200 /* d$ */, 15200 /* d% */, 15200 /* d& */, 15200 /* d' */, 15200 /* d( */, 15200 /* d) */, 15200 /* d* */, 15200 /* d+ */, 15200 /* d, */, 15200 /* d- */, 15200 /* d. */, 15200 /* d/ */, 15200 /* d0 */, 15200 /* d1 */, 15200 /* d2 */, 15200 /* d3 */, 15200 /* d4 */, 15200 /* d5 */, 15200 /* d6 */, 15200 /* d7 */, 15200 /* d8 */, 15200 /* d9 */, 15200 /* d: */, 15200 /* d; */, 15200 /* d< */, 15200 /* d= */, 15200 /* d> */, 15200 /* d? */, 15200 /* d@ */, 15200 /* dA */, 15200 /* dB */, 15200 /* dC */, 15200 /* dD */, 15200 /* dE */, 15200 /* dF */, 15200 /* dG */, 15200 /* dH */, 15200 /* dI */, 15200 /* dJ */, 15200 /* dK */, 15200 /* dL */, 15200 /* dM */, 15200 /* dN */, 15200 /* dO */, 15200 /* dP */, 15200 /* dQ */, 15200 /* dR */, 15200 /* dS */, 15200 /* dT */, 15200 /* dU */, 15200 /* dV */, 15200 /* dW */, 15200 /* dX */, 15200 /* dY */, 15200 /* dZ */, 15200 /* d[ */, 15200 /* d\ */, 15200 /* d] */, 15200 /* d^ */, 15200 /* d_ */, 15200 /* d` */, 15200 /* da */, 15200 /* db */, 15200 /* dc */, 15200 /* dd */, 15200 /* de */, 15200 /* df */, 15200 /* dg */, 15200 /* dh */, 15200 /* di */, 15200 /* dj */, 15200 /* dk */, 15200 /* dl */, 15200 /* dm */, 15200 /* dn */, 15200 /* do */, 15200 /* dp */, 15200 /* dq */, 15200 /* dr */, 15200 /* ds */, 15200 /* dt */, 15200 /* du */, 15200 /* dv */, 15200 /* dw */, 15200 /* dx */, 15200 /* dy */, 15200 /* dz */, 15200 /* d{ */, 15200 /* d| */, 15200 /* d} */, 15200 /* d~ */}, + {14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e */, 14050 /* e! */, 13550 /* e" */, 14050 /* e# */, 14050 /* e$ */, 14050 /* e% */, 14050 /* e& */, 13550 /* e' */, 14050 /* e( */, 14050 /* e) */, 13550 /* e* */, 14050 /* e+ */, 13675 /* e, */, 14300 /* e- */, 13675 /* e. */, 14050 /* e/ */, 14050 /* e0 */, 14050 /* e1 */, 14050 /* e2 */, 14050 /* e3 */, 14050 /* e4 */, 14050 /* e5 */, 14050 /* e6 */, 14050 /* e7 */, 14050 /* e8 */, 14050 /* e9 */, 14050 /* e: */, 14050 /* e; */, 14050 /* e< */, 14050 /* e= */, 14050 /* e> */, 14050 /* e? */, 14050 /* e@ */, 14050 /* eA */, 14050 /* eB */, 14050 /* eC */, 14050 /* eD */, 14050 /* eE */, 14050 /* eF */, 14050 /* eG */, 14050 /* eH */, 13925 /* eI */, 14050 /* eJ */, 14050 /* eK */, 14050 /* eL */, 14050 /* eM */, 14050 /* eN */, 14050 /* eO */, 14050 /* eP */, 14050 /* eQ */, 14050 /* eR */, 13800 /* eS */, 12425 /* eT */, 14050 /* eU */, 13175 /* eV */, 13550 /* eW */, 13550 /* eX */, 12550 /* eY */, 13925 /* eZ */, 14050 /* e[ */, 14050 /* e\ */, 14050 /* e] */, 14050 /* e^ */, 12550 /* e_ */, 14050 /* e` */, 14050 /* ea */, 14050 /* eb */, 14050 /* ec */, 14050 /* ed */, 14050 /* ee */, 14050 /* ef */, 14050 /* eg */, 14050 /* eh */, 14050 /* ei */, 14050 /* ej */, 14050 /* ek */, 14050 /* el */, 14050 /* em */, 14050 /* en */, 14050 /* eo */, 14050 /* ep */, 14050 /* eq */, 14050 /* er */, 14050 /* es */, 14050 /* et */, 14050 /* eu */, 13800 /* ev */, 13925 /* ew */, 13650 /* ex */, 13875 /* ey */, 13925 /* ez */, 14050 /* e{ */, 14050 /* e| */, 14050 /* e} */, 14050 /* e~ */}, + {9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9025 /* f */, 9525 /* f! */, 9775 /* f" */, 9025 /* f# */, 9025 /* f$ */, 9025 /* f% */, 8650 /* f& */, 9775 /* f' */, 9025 /* f( */, 9650 /* f) */, 9775 /* f* */, 9025 /* f+ */, 8275 /* f, */, 9025 /* f- */, 8275 /* f. */, 8525 /* f/ */, 9025 /* f0 */, 9025 /* f1 */, 9025 /* f2 */, 9025 /* f3 */, 9025 /* f4 */, 9025 /* f5 */, 9025 /* f6 */, 9025 /* f7 */, 9025 /* f8 */, 9025 /* f9 */, 9025 /* f: */, 9025 /* f; */, 9025 /* f< */, 9025 /* f= */, 9025 /* f> */, 9775 /* f? */, 9025 /* f@ */, 8875 /* fA */, 9025 /* fB */, 9400 /* fC */, 9025 /* fD */, 9025 /* fE */, 9025 /* fF */, 9400 /* fG */, 9025 /* fH */, 9025 /* fI */, 8450 /* fJ */, 9025 /* fK */, 9025 /* fL */, 9025 /* fM */, 9025 /* fN */, 9400 /* fO */, 9025 /* fP */, 9400 /* fQ */, 9025 /* fR */, 9025 /* fS */, 9650 /* fT */, 9025 /* fU */, 9900 /* fV */, 9650 /* fW */, 9375 /* fX */, 9775 /* fY */, 9075 /* fZ */, 9025 /* f[ */, 9025 /* f\ */, 9400 /* f] */, 9025 /* f^ */, 7475 /* f_ */, 9025 /* f` */, 9025 /* fa */, 9025 /* fb */, 8925 /* fc */, 8925 /* fd */, 8925 /* fe */, 9125 /* ff */, 8925 /* fg */, 9025 /* fh */, 9025 /* fi */, 9025 /* fj */, 9025 /* fk */, 9025 /* fl */, 9025 /* fm */, 9025 /* fn */, 8925 /* fo */, 9025 /* fp */, 8925 /* fq */, 9025 /* fr */, 9025 /* fs */, 9025 /* ft */, 9025 /* fu */, 9275 /* fv */, 9025 /* fw */, 9025 /* fx */, 9275 /* fy */, 9025 /* fz */, 9025 /* f{ */, 9025 /* f| */, 9400 /* f} */, 9025 /* f~ */}, + {13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g */, 13800 /* g! */, 13925 /* g" */, 13800 /* g# */, 13800 /* g$ */, 13800 /* g% */, 13675 /* g& */, 13925 /* g' */, 13800 /* g( */, 14475 /* g) */, 13925 /* g* */, 13800 /* g+ */, 13950 /* g, */, 13800 /* g- */, 13950 /* g. */, 14375 /* g/ */, 13800 /* g0 */, 13800 /* g1 */, 13800 /* g2 */, 13800 /* g3 */, 13800 /* g4 */, 13800 /* g5 */, 13800 /* g6 */, 13800 /* g7 */, 13800 /* g8 */, 13800 /* g9 */, 13800 /* g: */, 13800 /* g; */, 13800 /* g< */, 13800 /* g= */, 13800 /* g> */, 13800 /* g? */, 13800 /* g@ */, 14425 /* gA */, 13800 /* gB */, 13800 /* gC */, 13800 /* gD */, 13800 /* gE */, 13800 /* gF */, 13800 /* gG */, 13800 /* gH */, 13800 /* gI */, 13575 /* gJ */, 13800 /* gK */, 13800 /* gL */, 13800 /* gM */, 13800 /* gN */, 13800 /* gO */, 13800 /* gP */, 13800 /* gQ */, 13800 /* gR */, 13800 /* gS */, 13575 /* gT */, 13800 /* gU */, 13625 /* gV */, 13800 /* gW */, 14075 /* gX */, 13600 /* gY */, 14050 /* gZ */, 13800 /* g[ */, 13800 /* g\ */, 14100 /* g] */, 13800 /* g^ */, 15200 /* g_ */, 13800 /* g` */, 13750 /* ga */, 13800 /* gb */, 13650 /* gc */, 13650 /* gd */, 13650 /* ge */, 13800 /* gf */, 13975 /* gg */, 13800 /* gh */, 13800 /* gi */, 14500 /* gj */, 13800 /* gk */, 13800 /* gl */, 13800 /* gm */, 13800 /* gn */, 13650 /* go */, 13800 /* gp */, 13650 /* gq */, 13800 /* gr */, 13800 /* gs */, 13800 /* gt */, 13700 /* gu */, 13800 /* gv */, 13800 /* gw */, 13800 /* gx */, 13925 /* gy */, 13800 /* gz */, 13800 /* g{ */, 13800 /* g| */, 14100 /* g} */, 13800 /* g~ */}, + {14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h */, 14900 /* h! */, 14525 /* h" */, 14900 /* h# */, 14900 /* h$ */, 14900 /* h% */, 14900 /* h& */, 14525 /* h' */, 14900 /* h( */, 14900 /* h) */, 14525 /* h* */, 14900 /* h+ */, 14900 /* h, */, 14900 /* h- */, 14900 /* h. */, 14900 /* h/ */, 14900 /* h0 */, 14900 /* h1 */, 14900 /* h2 */, 14900 /* h3 */, 14900 /* h4 */, 14900 /* h5 */, 14900 /* h6 */, 14900 /* h7 */, 14900 /* h8 */, 14900 /* h9 */, 14900 /* h: */, 14900 /* h; */, 14900 /* h< */, 14900 /* h= */, 14900 /* h> */, 14900 /* h? */, 14900 /* h@ */, 14900 /* hA */, 14900 /* hB */, 14900 /* hC */, 14900 /* hD */, 14900 /* hE */, 14900 /* hF */, 14900 /* hG */, 14900 /* hH */, 14900 /* hI */, 14900 /* hJ */, 14900 /* hK */, 14900 /* hL */, 14900 /* hM */, 14900 /* hN */, 14900 /* hO */, 14900 /* hP */, 14900 /* hQ */, 14900 /* hR */, 14900 /* hS */, 13525 /* hT */, 14900 /* hU */, 14025 /* hV */, 14275 /* hW */, 14900 /* hX */, 13525 /* hY */, 14900 /* hZ */, 14900 /* h[ */, 14900 /* h\ */, 14900 /* h] */, 14900 /* h^ */, 14900 /* h_ */, 14900 /* h` */, 14900 /* ha */, 14900 /* hb */, 14900 /* hc */, 14900 /* hd */, 14900 /* he */, 14900 /* hf */, 14900 /* hg */, 14900 /* hh */, 14900 /* hi */, 14900 /* hj */, 14900 /* hk */, 14900 /* hl */, 14900 /* hm */, 14900 /* hn */, 14900 /* ho */, 14900 /* hp */, 14900 /* hq */, 14900 /* hr */, 14900 /* hs */, 14900 /* ht */, 14900 /* hu */, 14700 /* hv */, 14750 /* hw */, 14900 /* hx */, 14700 /* hy */, 14900 /* hz */, 14900 /* h{ */, 14900 /* h| */, 14900 /* h} */, 14900 /* h~ */}, + {7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i */, 7150 /* i! */, 7150 /* i" */, 7150 /* i# */, 7150 /* i$ */, 7150 /* i% */, 7150 /* i& */, 7150 /* i' */, 7150 /* i( */, 7150 /* i) */, 7150 /* i* */, 7150 /* i+ */, 7150 /* i, */, 7150 /* i- */, 7150 /* i. */, 7150 /* i/ */, 7150 /* i0 */, 7150 /* i1 */, 7150 /* i2 */, 7150 /* i3 */, 7150 /* i4 */, 7150 /* i5 */, 7150 /* i6 */, 7150 /* i7 */, 7150 /* i8 */, 7150 /* i9 */, 7150 /* i: */, 7150 /* i; */, 7150 /* i< */, 7150 /* i= */, 7150 /* i> */, 7150 /* i? */, 7150 /* i@ */, 7150 /* iA */, 7150 /* iB */, 7150 /* iC */, 7150 /* iD */, 7150 /* iE */, 7150 /* iF */, 7150 /* iG */, 7150 /* iH */, 7150 /* iI */, 6900 /* iJ */, 7150 /* iK */, 7150 /* iL */, 7150 /* iM */, 7150 /* iN */, 7150 /* iO */, 7150 /* iP */, 7150 /* iQ */, 7150 /* iR */, 7150 /* iS */, 7150 /* iT */, 7150 /* iU */, 7150 /* iV */, 7150 /* iW */, 7150 /* iX */, 6900 /* iY */, 7150 /* iZ */, 7150 /* i[ */, 7150 /* i\ */, 7150 /* i] */, 7150 /* i^ */, 7150 /* i_ */, 7150 /* i` */, 7150 /* ia */, 7150 /* ib */, 7150 /* ic */, 7150 /* id */, 7150 /* ie */, 7150 /* if */, 7150 /* ig */, 7150 /* ih */, 7150 /* ii */, 7150 /* ij */, 7150 /* ik */, 7150 /* il */, 7150 /* im */, 7150 /* in */, 7150 /* io */, 7150 /* ip */, 7150 /* iq */, 7150 /* ir */, 7150 /* is */, 7150 /* it */, 7150 /* iu */, 7150 /* iv */, 7150 /* iw */, 7150 /* ix */, 7150 /* iy */, 7150 /* iz */, 7150 /* i{ */, 7150 /* i| */, 7150 /* i} */, 7150 /* i~ */}, + {7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j */, 7150 /* j! */, 7150 /* j" */, 7150 /* j# */, 7150 /* j$ */, 7150 /* j% */, 7150 /* j& */, 7150 /* j' */, 7150 /* j( */, 7150 /* j) */, 7150 /* j* */, 7150 /* j+ */, 7150 /* j, */, 7150 /* j- */, 7150 /* j. */, 7150 /* j/ */, 7150 /* j0 */, 7150 /* j1 */, 7150 /* j2 */, 7150 /* j3 */, 7150 /* j4 */, 7150 /* j5 */, 7150 /* j6 */, 7150 /* j7 */, 7150 /* j8 */, 7150 /* j9 */, 7150 /* j: */, 7150 /* j; */, 7150 /* j< */, 7150 /* j= */, 7150 /* j> */, 7150 /* j? */, 7150 /* j@ */, 7150 /* jA */, 7150 /* jB */, 7150 /* jC */, 7150 /* jD */, 7150 /* jE */, 7150 /* jF */, 7150 /* jG */, 7150 /* jH */, 7150 /* jI */, 7150 /* jJ */, 7150 /* jK */, 7150 /* jL */, 7150 /* jM */, 7150 /* jN */, 7150 /* jO */, 7150 /* jP */, 7150 /* jQ */, 7150 /* jR */, 7150 /* jS */, 6150 /* jT */, 7150 /* jU */, 6525 /* jV */, 6900 /* jW */, 7150 /* jX */, 7150 /* jY */, 7150 /* jZ */, 7150 /* j[ */, 7150 /* j\ */, 7150 /* j] */, 7150 /* j^ */, 7150 /* j_ */, 7150 /* j` */, 7150 /* ja */, 7150 /* jb */, 7150 /* jc */, 7150 /* jd */, 7150 /* je */, 7150 /* jf */, 7150 /* jg */, 7150 /* jh */, 7150 /* ji */, 7150 /* jj */, 7150 /* jk */, 7150 /* jl */, 7150 /* jm */, 7150 /* jn */, 7150 /* jo */, 7150 /* jp */, 7150 /* jq */, 7150 /* jr */, 7150 /* js */, 7150 /* jt */, 7150 /* ju */, 7150 /* jv */, 7150 /* jw */, 7150 /* jx */, 7150 /* jy */, 7150 /* jz */, 7150 /* j{ */, 7150 /* j| */, 7150 /* j} */, 7150 /* j~ */}, + {14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k */, 14425 /* k! */, 14425 /* k" */, 14425 /* k# */, 14425 /* k$ */, 14425 /* k% */, 14175 /* k& */, 14425 /* k' */, 14425 /* k( */, 14425 /* k) */, 14425 /* k* */, 14425 /* k+ */, 14425 /* k, */, 13550 /* k- */, 14425 /* k. */, 14425 /* k/ */, 14425 /* k0 */, 14425 /* k1 */, 14425 /* k2 */, 14425 /* k3 */, 14425 /* k4 */, 14425 /* k5 */, 14425 /* k6 */, 14425 /* k7 */, 14425 /* k8 */, 14425 /* k9 */, 14425 /* k: */, 14425 /* k; */, 14425 /* k< */, 14425 /* k= */, 14425 /* k> */, 14425 /* k? */, 14175 /* k@ */, 14800 /* kA */, 14425 /* kB */, 14100 /* kC */, 14425 /* kD */, 14425 /* kE */, 14425 /* kF */, 14100 /* kG */, 14425 /* kH */, 14425 /* kI */, 14300 /* kJ */, 14425 /* kK */, 14425 /* kL */, 14425 /* kM */, 14425 /* kN */, 14100 /* kO */, 14425 /* kP */, 14100 /* kQ */, 14425 /* kR */, 14250 /* kS */, 13625 /* kT */, 14425 /* kU */, 13975 /* kV */, 14250 /* kW */, 14425 /* kX */, 13275 /* kY */, 14825 /* kZ */, 14425 /* k[ */, 14425 /* k\ */, 14425 /* k] */, 14425 /* k^ */, 14925 /* k_ */, 14425 /* k` */, 14425 /* ka */, 14425 /* kb */, 13750 /* kc */, 13925 /* kd */, 13750 /* ke */, 14425 /* kf */, 14425 /* kg */, 14425 /* kh */, 14425 /* ki */, 14425 /* kj */, 14425 /* kk */, 14425 /* kl */, 14425 /* km */, 14425 /* kn */, 13750 /* ko */, 14425 /* kp */, 13925 /* kq */, 14425 /* kr */, 14175 /* ks */, 14175 /* kt */, 14175 /* ku */, 14125 /* kv */, 14300 /* kw */, 14425 /* kx */, 14300 /* ky */, 14425 /* kz */, 14425 /* k{ */, 14425 /* k| */, 14425 /* k} */, 14425 /* k~ */}, + {7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l */, 7575 /* l! */, 7575 /* l" */, 7575 /* l# */, 7575 /* l$ */, 7575 /* l% */, 7575 /* l& */, 7575 /* l' */, 7575 /* l( */, 7575 /* l) */, 7575 /* l* */, 7575 /* l+ */, 7825 /* l, */, 7325 /* l- */, 7825 /* l. */, 7575 /* l/ */, 7575 /* l0 */, 7575 /* l1 */, 7575 /* l2 */, 7575 /* l3 */, 7575 /* l4 */, 7575 /* l5 */, 7575 /* l6 */, 7575 /* l7 */, 7575 /* l8 */, 7575 /* l9 */, 7575 /* l: */, 7575 /* l; */, 7575 /* l< */, 7575 /* l= */, 7575 /* l> */, 7575 /* l? */, 7575 /* l@ */, 8175 /* lA */, 7575 /* lB */, 7575 /* lC */, 7575 /* lD */, 7575 /* lE */, 7575 /* lF */, 7575 /* lG */, 7575 /* lH */, 7575 /* lI */, 7825 /* lJ */, 7575 /* lK */, 7575 /* lL */, 7575 /* lM */, 7575 /* lN */, 7575 /* lO */, 7575 /* lP */, 7575 /* lQ */, 7575 /* lR */, 7575 /* lS */, 7250 /* lT */, 7575 /* lU */, 7450 /* lV */, 7575 /* lW */, 7950 /* lX */, 7200 /* lY */, 7950 /* lZ */, 7575 /* l[ */, 7575 /* l\ */, 7575 /* l] */, 7575 /* l^ */, 8200 /* l_ */, 7575 /* l` */, 7575 /* la */, 7575 /* lb */, 7575 /* lc */, 7575 /* ld */, 7575 /* le */, 7575 /* lf */, 7575 /* lg */, 7575 /* lh */, 7575 /* li */, 7575 /* lj */, 7575 /* lk */, 7375 /* ll */, 7575 /* lm */, 7575 /* ln */, 7575 /* lo */, 7575 /* lp */, 7575 /* lq */, 7575 /* lr */, 7575 /* ls */, 7450 /* lt */, 7575 /* lu */, 7425 /* lv */, 7475 /* lw */, 7575 /* lx */, 7425 /* ly */, 7725 /* lz */, 7575 /* l{ */, 7575 /* l| */, 7575 /* l} */, 7575 /* l~ */}, + {22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m */, 22350 /* m! */, 21975 /* m" */, 22350 /* m# */, 22350 /* m$ */, 22350 /* m% */, 22350 /* m& */, 21975 /* m' */, 22350 /* m( */, 22350 /* m) */, 21975 /* m* */, 22350 /* m+ */, 22350 /* m, */, 22350 /* m- */, 22350 /* m. */, 22350 /* m/ */, 22350 /* m0 */, 22350 /* m1 */, 22350 /* m2 */, 22350 /* m3 */, 22350 /* m4 */, 22350 /* m5 */, 22350 /* m6 */, 22350 /* m7 */, 22350 /* m8 */, 22350 /* m9 */, 22350 /* m: */, 22350 /* m; */, 22350 /* m< */, 22350 /* m= */, 22350 /* m> */, 22350 /* m? */, 22350 /* m@ */, 22350 /* mA */, 22350 /* mB */, 22350 /* mC */, 22350 /* mD */, 22350 /* mE */, 22350 /* mF */, 22350 /* mG */, 22350 /* mH */, 22350 /* mI */, 22350 /* mJ */, 22350 /* mK */, 22350 /* mL */, 22350 /* mM */, 22350 /* mN */, 22350 /* mO */, 22350 /* mP */, 22350 /* mQ */, 22350 /* mR */, 22350 /* mS */, 20975 /* mT */, 22350 /* mU */, 21475 /* mV */, 21725 /* mW */, 22350 /* mX */, 20975 /* mY */, 22350 /* mZ */, 22350 /* m[ */, 22350 /* m\ */, 22350 /* m] */, 22350 /* m^ */, 22350 /* m_ */, 22350 /* m` */, 22350 /* ma */, 22350 /* mb */, 22350 /* mc */, 22350 /* md */, 22350 /* me */, 22350 /* mf */, 22350 /* mg */, 22350 /* mh */, 22350 /* mi */, 22350 /* mj */, 22350 /* mk */, 22350 /* ml */, 22350 /* mm */, 22350 /* mn */, 22350 /* mo */, 22350 /* mp */, 22350 /* mq */, 22350 /* mr */, 22350 /* ms */, 22350 /* mt */, 22350 /* mu */, 22150 /* mv */, 22200 /* mw */, 22350 /* mx */, 22150 /* my */, 22350 /* mz */, 22350 /* m{ */, 22350 /* m| */, 22350 /* m} */, 22350 /* m~ */}, + {14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n */, 14900 /* n! */, 14525 /* n" */, 14900 /* n# */, 14900 /* n$ */, 14900 /* n% */, 14900 /* n& */, 14525 /* n' */, 14900 /* n( */, 14900 /* n) */, 14525 /* n* */, 14900 /* n+ */, 14900 /* n, */, 14900 /* n- */, 14900 /* n. */, 14900 /* n/ */, 14900 /* n0 */, 14900 /* n1 */, 14900 /* n2 */, 14900 /* n3 */, 14900 /* n4 */, 14900 /* n5 */, 14900 /* n6 */, 14900 /* n7 */, 14900 /* n8 */, 14900 /* n9 */, 14900 /* n: */, 14900 /* n; */, 14900 /* n< */, 14900 /* n= */, 14900 /* n> */, 14900 /* n? */, 14900 /* n@ */, 14900 /* nA */, 14900 /* nB */, 14900 /* nC */, 14900 /* nD */, 14900 /* nE */, 14900 /* nF */, 14900 /* nG */, 14900 /* nH */, 14900 /* nI */, 14900 /* nJ */, 14900 /* nK */, 14900 /* nL */, 14900 /* nM */, 14900 /* nN */, 14900 /* nO */, 14900 /* nP */, 14900 /* nQ */, 14900 /* nR */, 14900 /* nS */, 13525 /* nT */, 14900 /* nU */, 14025 /* nV */, 14275 /* nW */, 14900 /* nX */, 13525 /* nY */, 14900 /* nZ */, 14900 /* n[ */, 14900 /* n\ */, 14900 /* n] */, 14900 /* n^ */, 14900 /* n_ */, 14900 /* n` */, 14900 /* na */, 14900 /* nb */, 14900 /* nc */, 14900 /* nd */, 14900 /* ne */, 14900 /* nf */, 14900 /* ng */, 14900 /* nh */, 14900 /* ni */, 14900 /* nj */, 14900 /* nk */, 14900 /* nl */, 14900 /* nm */, 14900 /* nn */, 14900 /* no */, 14900 /* np */, 14900 /* nq */, 14900 /* nr */, 14900 /* ns */, 14900 /* nt */, 14900 /* nu */, 14700 /* nv */, 14750 /* nw */, 14900 /* nx */, 14700 /* ny */, 14900 /* nz */, 14900 /* n{ */, 14900 /* n| */, 14900 /* n} */, 14900 /* n~ */}, + {14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o */, 14100 /* o! */, 13600 /* o" */, 14100 /* o# */, 14100 /* o$ */, 14100 /* o% */, 14100 /* o& */, 13600 /* o' */, 14100 /* o( */, 14100 /* o) */, 13600 /* o* */, 14100 /* o+ */, 13725 /* o, */, 14350 /* o- */, 13725 /* o. */, 14100 /* o/ */, 14100 /* o0 */, 14100 /* o1 */, 14100 /* o2 */, 14100 /* o3 */, 14100 /* o4 */, 14100 /* o5 */, 14100 /* o6 */, 14100 /* o7 */, 14100 /* o8 */, 14100 /* o9 */, 14100 /* o: */, 14100 /* o; */, 14100 /* o< */, 14100 /* o= */, 14100 /* o> */, 14100 /* o? */, 14100 /* o@ */, 14100 /* oA */, 14100 /* oB */, 14100 /* oC */, 14100 /* oD */, 14100 /* oE */, 14100 /* oF */, 14100 /* oG */, 14100 /* oH */, 13975 /* oI */, 14100 /* oJ */, 14100 /* oK */, 14100 /* oL */, 14100 /* oM */, 14100 /* oN */, 14100 /* oO */, 14100 /* oP */, 14100 /* oQ */, 14100 /* oR */, 14100 /* oS */, 12475 /* oT */, 14100 /* oU */, 13225 /* oV */, 13475 /* oW */, 13600 /* oX */, 12600 /* oY */, 13975 /* oZ */, 14100 /* o[ */, 14100 /* o\ */, 14100 /* o] */, 14100 /* o^ */, 12600 /* o_ */, 14100 /* o` */, 14100 /* oa */, 14100 /* ob */, 14100 /* oc */, 14100 /* od */, 14100 /* oe */, 13950 /* of */, 14100 /* og */, 14100 /* oh */, 14100 /* oi */, 14100 /* oj */, 14100 /* ok */, 14100 /* ol */, 14100 /* om */, 14100 /* on */, 14100 /* oo */, 14100 /* op */, 14100 /* oq */, 14100 /* or */, 14100 /* os */, 13950 /* ot */, 14100 /* ou */, 13900 /* ov */, 13850 /* ow */, 13700 /* ox */, 13850 /* oy */, 13900 /* oz */, 14100 /* o{ */, 14100 /* o| */, 14100 /* o} */, 14100 /* o~ */}, + {15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p */, 15200 /* p! */, 14700 /* p" */, 15200 /* p# */, 15200 /* p$ */, 15200 /* p% */, 15200 /* p& */, 14700 /* p' */, 15200 /* p( */, 15200 /* p) */, 14700 /* p* */, 15200 /* p+ */, 14825 /* p, */, 15450 /* p- */, 14825 /* p. */, 15200 /* p/ */, 15200 /* p0 */, 15200 /* p1 */, 15200 /* p2 */, 15200 /* p3 */, 15200 /* p4 */, 15200 /* p5 */, 15200 /* p6 */, 15200 /* p7 */, 15200 /* p8 */, 15200 /* p9 */, 15200 /* p: */, 15200 /* p; */, 15200 /* p< */, 15200 /* p= */, 15200 /* p> */, 15200 /* p? */, 15200 /* p@ */, 15200 /* pA */, 15200 /* pB */, 15200 /* pC */, 15200 /* pD */, 15200 /* pE */, 15200 /* pF */, 15200 /* pG */, 15200 /* pH */, 15075 /* pI */, 15200 /* pJ */, 15200 /* pK */, 15200 /* pL */, 15200 /* pM */, 15200 /* pN */, 15200 /* pO */, 15200 /* pP */, 15200 /* pQ */, 15200 /* pR */, 15200 /* pS */, 13575 /* pT */, 15200 /* pU */, 14325 /* pV */, 14575 /* pW */, 14700 /* pX */, 13700 /* pY */, 15075 /* pZ */, 15200 /* p[ */, 15200 /* p\ */, 15200 /* p] */, 15200 /* p^ */, 13950 /* p_ */, 15200 /* p` */, 15200 /* pa */, 15200 /* pb */, 15200 /* pc */, 15200 /* pd */, 15200 /* pe */, 15050 /* pf */, 15200 /* pg */, 15200 /* ph */, 15200 /* pi */, 15200 /* pj */, 15200 /* pk */, 15200 /* pl */, 15200 /* pm */, 15200 /* pn */, 15200 /* po */, 15200 /* pp */, 15200 /* pq */, 15200 /* pr */, 15200 /* ps */, 15050 /* pt */, 15200 /* pu */, 15000 /* pv */, 14950 /* pw */, 14800 /* px */, 14950 /* py */, 15000 /* pz */, 15200 /* p{ */, 15200 /* p| */, 15200 /* p} */, 15200 /* p~ */}, + {15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q */, 15200 /* q! */, 15200 /* q" */, 15200 /* q# */, 15200 /* q$ */, 15200 /* q% */, 15200 /* q& */, 15200 /* q' */, 15200 /* q( */, 15200 /* q) */, 15200 /* q* */, 15200 /* q+ */, 15200 /* q, */, 15200 /* q- */, 15200 /* q. */, 15200 /* q/ */, 15200 /* q0 */, 15200 /* q1 */, 15200 /* q2 */, 15200 /* q3 */, 15200 /* q4 */, 15200 /* q5 */, 15200 /* q6 */, 15200 /* q7 */, 15200 /* q8 */, 15200 /* q9 */, 15200 /* q: */, 15200 /* q; */, 15200 /* q< */, 15200 /* q= */, 15200 /* q> */, 15200 /* q? */, 15200 /* q@ */, 15200 /* qA */, 15200 /* qB */, 15200 /* qC */, 15200 /* qD */, 15200 /* qE */, 15200 /* qF */, 15200 /* qG */, 15200 /* qH */, 15200 /* qI */, 15200 /* qJ */, 15200 /* qK */, 15200 /* qL */, 15200 /* qM */, 15200 /* qN */, 15200 /* qO */, 15200 /* qP */, 15200 /* qQ */, 15200 /* qR */, 15200 /* qS */, 14200 /* qT */, 15200 /* qU */, 14575 /* qV */, 14950 /* qW */, 15200 /* qX */, 14450 /* qY */, 15200 /* qZ */, 15200 /* q[ */, 15200 /* q\ */, 15200 /* q] */, 15200 /* q^ */, 15200 /* q_ */, 15200 /* q` */, 15200 /* qa */, 15200 /* qb */, 15200 /* qc */, 15200 /* qd */, 15200 /* qe */, 15200 /* qf */, 15200 /* qg */, 15200 /* qh */, 15200 /* qi */, 15200 /* qj */, 15200 /* qk */, 15200 /* ql */, 15200 /* qm */, 15200 /* qn */, 15200 /* qo */, 15200 /* qp */, 15200 /* qq */, 15200 /* qr */, 15200 /* qs */, 15200 /* qt */, 15200 /* qu */, 15200 /* qv */, 15200 /* qw */, 15200 /* qx */, 15200 /* qy */, 15200 /* qz */, 15200 /* q{ */, 15200 /* q| */, 15200 /* q} */, 15200 /* q~ */}, + {10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r */, 10100 /* r! */, 10850 /* r" */, 10100 /* r# */, 10100 /* r$ */, 10100 /* r% */, 9600 /* r& */, 10850 /* r' */, 10100 /* r( */, 10100 /* r) */, 10850 /* r* */, 10100 /* r+ */, 8100 /* r, */, 10100 /* r- */, 8100 /* r. */, 9550 /* r/ */, 10100 /* r0 */, 10100 /* r1 */, 10100 /* r2 */, 10100 /* r3 */, 10100 /* r4 */, 10100 /* r5 */, 10100 /* r6 */, 10100 /* r7 */, 10100 /* r8 */, 10100 /* r9 */, 10100 /* r: */, 10100 /* r; */, 10100 /* r< */, 10100 /* r= */, 10100 /* r> */, 10100 /* r? */, 10100 /* r@ */, 9100 /* rA */, 10100 /* rB */, 10350 /* rC */, 10100 /* rD */, 10100 /* rE */, 10100 /* rF */, 10350 /* rG */, 10100 /* rH */, 10100 /* rI */, 9500 /* rJ */, 10100 /* rK */, 10100 /* rL */, 10100 /* rM */, 10100 /* rN */, 10350 /* rO */, 10100 /* rP */, 10350 /* rQ */, 10100 /* rR */, 10275 /* rS */, 9850 /* rT */, 10100 /* rU */, 10100 /* rV */, 10100 /* rW */, 9600 /* rX */, 9600 /* rY */, 9825 /* rZ */, 10100 /* r[ */, 10100 /* r\ */, 10100 /* r] */, 10100 /* r^ */, 8100 /* r_ */, 10100 /* r` */, 9750 /* ra */, 10100 /* rb */, 9975 /* rc */, 10000 /* rd */, 9975 /* re */, 10325 /* rf */, 9750 /* rg */, 10100 /* rh */, 10100 /* ri */, 10100 /* rj */, 10100 /* rk */, 10100 /* rl */, 10100 /* rm */, 10100 /* rn */, 9975 /* ro */, 10100 /* rp */, 10000 /* rq */, 10100 /* rr */, 10100 /* rs */, 10200 /* rt */, 10100 /* ru */, 10275 /* rv */, 10225 /* rw */, 10100 /* rx */, 10275 /* ry */, 10100 /* rz */, 10100 /* r{ */, 10100 /* r| */, 10100 /* r} */, 10100 /* r~ */}, + {12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s */, 12600 /* s! */, 12350 /* s" */, 12600 /* s# */, 12600 /* s$ */, 12600 /* s% */, 12600 /* s& */, 12350 /* s' */, 12600 /* s( */, 12600 /* s) */, 12350 /* s* */, 12600 /* s+ */, 12400 /* s, */, 12600 /* s- */, 12400 /* s. */, 12600 /* s/ */, 12600 /* s0 */, 12600 /* s1 */, 12600 /* s2 */, 12600 /* s3 */, 12600 /* s4 */, 12600 /* s5 */, 12600 /* s6 */, 12600 /* s7 */, 12600 /* s8 */, 12600 /* s9 */, 12600 /* s: */, 12600 /* s; */, 12600 /* s< */, 12600 /* s= */, 12600 /* s> */, 12600 /* s? */, 12600 /* s@ */, 12650 /* sA */, 12600 /* sB */, 12600 /* sC */, 12600 /* sD */, 12600 /* sE */, 12600 /* sF */, 12600 /* sG */, 12600 /* sH */, 12600 /* sI */, 12550 /* sJ */, 12600 /* sK */, 12600 /* sL */, 12600 /* sM */, 12600 /* sN */, 12600 /* sO */, 12600 /* sP */, 12600 /* sQ */, 12600 /* sR */, 12600 /* sS */, 10975 /* sT */, 12600 /* sU */, 11775 /* sV */, 12600 /* sW */, 12025 /* sX */, 11250 /* sY */, 12600 /* sZ */, 12600 /* s[ */, 12600 /* s\ */, 12600 /* s] */, 12600 /* s^ */, 11350 /* s_ */, 12600 /* s` */, 12600 /* sa */, 12600 /* sb */, 12600 /* sc */, 12600 /* sd */, 12600 /* se */, 12525 /* sf */, 12600 /* sg */, 12600 /* sh */, 12600 /* si */, 12600 /* sj */, 12600 /* sk */, 12600 /* sl */, 12600 /* sm */, 12600 /* sn */, 12600 /* so */, 12600 /* sp */, 12600 /* sq */, 12600 /* sr */, 12425 /* ss */, 12400 /* st */, 12600 /* su */, 12300 /* sv */, 12350 /* sw */, 12175 /* sx */, 12375 /* sy */, 12350 /* sz */, 12600 /* s{ */, 12600 /* s| */, 12600 /* s} */, 12600 /* s~ */}, + {9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t */, 9575 /* t! */, 9575 /* t" */, 9575 /* t# */, 9575 /* t$ */, 9575 /* t% */, 9325 /* t& */, 9575 /* t' */, 9575 /* t( */, 9575 /* t) */, 9575 /* t* */, 9575 /* t+ */, 9575 /* t, */, 9325 /* t- */, 9575 /* t. */, 9575 /* t/ */, 9575 /* t0 */, 9575 /* t1 */, 9575 /* t2 */, 9575 /* t3 */, 9575 /* t4 */, 9575 /* t5 */, 9575 /* t6 */, 9575 /* t7 */, 9575 /* t8 */, 9575 /* t9 */, 9575 /* t: */, 9575 /* t; */, 9575 /* t< */, 9575 /* t= */, 9575 /* t> */, 9575 /* t? */, 9575 /* t@ */, 9725 /* tA */, 9575 /* tB */, 9575 /* tC */, 9575 /* tD */, 9575 /* tE */, 9575 /* tF */, 9575 /* tG */, 9575 /* tH */, 9575 /* tI */, 9575 /* tJ */, 9575 /* tK */, 9575 /* tL */, 9575 /* tM */, 9575 /* tN */, 9575 /* tO */, 9575 /* tP */, 9575 /* tQ */, 9575 /* tR */, 9575 /* tS */, 8975 /* tT */, 9575 /* tU */, 9575 /* tV */, 9575 /* tW */, 9575 /* tX */, 9075 /* tY */, 9575 /* tZ */, 9575 /* t[ */, 9575 /* t\ */, 9575 /* t] */, 9575 /* t^ */, 10075 /* t_ */, 9575 /* t` */, 9575 /* ta */, 9575 /* tb */, 9475 /* tc */, 9475 /* td */, 9475 /* te */, 9575 /* tf */, 9575 /* tg */, 9575 /* th */, 9575 /* ti */, 9575 /* tj */, 9575 /* tk */, 9575 /* tl */, 9575 /* tm */, 9575 /* tn */, 9475 /* to */, 9575 /* tp */, 9475 /* tq */, 9575 /* tr */, 9575 /* ts */, 9450 /* tt */, 9575 /* tu */, 9575 /* tv */, 9575 /* tw */, 9575 /* tx */, 9575 /* ty */, 9575 /* tz */, 9575 /* t{ */, 9575 /* t| */, 9575 /* t} */, 9575 /* t~ */}, + {14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u */, 14900 /* u! */, 14900 /* u" */, 14900 /* u# */, 14900 /* u$ */, 14900 /* u% */, 14900 /* u& */, 14900 /* u' */, 14900 /* u( */, 14900 /* u) */, 14900 /* u* */, 14900 /* u+ */, 14900 /* u, */, 14900 /* u- */, 14900 /* u. */, 14900 /* u/ */, 14900 /* u0 */, 14900 /* u1 */, 14900 /* u2 */, 14900 /* u3 */, 14900 /* u4 */, 14900 /* u5 */, 14900 /* u6 */, 14900 /* u7 */, 14900 /* u8 */, 14900 /* u9 */, 14900 /* u: */, 14900 /* u; */, 14900 /* u< */, 14900 /* u= */, 14900 /* u> */, 14900 /* u? */, 14900 /* u@ */, 14900 /* uA */, 14900 /* uB */, 14900 /* uC */, 14900 /* uD */, 14900 /* uE */, 14900 /* uF */, 14900 /* uG */, 14900 /* uH */, 14900 /* uI */, 14900 /* uJ */, 14900 /* uK */, 14900 /* uL */, 14900 /* uM */, 14900 /* uN */, 14900 /* uO */, 14900 /* uP */, 14900 /* uQ */, 14900 /* uR */, 14900 /* uS */, 13775 /* uT */, 14900 /* uU */, 14275 /* uV */, 14650 /* uW */, 14900 /* uX */, 13650 /* uY */, 14900 /* uZ */, 14900 /* u[ */, 14900 /* u\ */, 14900 /* u] */, 14900 /* u^ */, 14900 /* u_ */, 14900 /* u` */, 14900 /* ua */, 14900 /* ub */, 14900 /* uc */, 14900 /* ud */, 14900 /* ue */, 14900 /* uf */, 14900 /* ug */, 14900 /* uh */, 14900 /* ui */, 14900 /* uj */, 14900 /* uk */, 14900 /* ul */, 14900 /* um */, 14900 /* un */, 14900 /* uo */, 14900 /* up */, 14900 /* uq */, 14900 /* ur */, 14900 /* us */, 14900 /* ut */, 14900 /* uu */, 14900 /* uv */, 14900 /* uw */, 14900 /* ux */, 14900 /* uy */, 14900 /* uz */, 14900 /* u{ */, 14900 /* u| */, 14900 /* u} */, 14900 /* u~ */}, + {13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v */, 13450 /* v! */, 14075 /* v" */, 13450 /* v# */, 13450 /* v$ */, 13450 /* v% */, 12950 /* v& */, 14075 /* v' */, 13450 /* v( */, 13450 /* v) */, 14075 /* v* */, 13450 /* v+ */, 12250 /* v, */, 13325 /* v- */, 12250 /* v. */, 13075 /* v/ */, 13450 /* v0 */, 13450 /* v1 */, 13450 /* v2 */, 13450 /* v3 */, 13450 /* v4 */, 13450 /* v5 */, 13450 /* v6 */, 13450 /* v7 */, 13450 /* v8 */, 13450 /* v9 */, 13450 /* v: */, 13450 /* v; */, 13450 /* v< */, 13450 /* v= */, 13450 /* v> */, 13450 /* v? */, 13450 /* v@ */, 12700 /* vA */, 13450 /* vB */, 13450 /* vC */, 13450 /* vD */, 13450 /* vE */, 13450 /* vF */, 13450 /* vG */, 13450 /* vH */, 13325 /* vI */, 12700 /* vJ */, 13450 /* vK */, 13450 /* vL */, 13450 /* vM */, 13450 /* vN */, 13450 /* vO */, 13450 /* vP */, 13450 /* vQ */, 13450 /* vR */, 13450 /* vS */, 12825 /* vT */, 13450 /* vU */, 13200 /* vV */, 13450 /* vW */, 13450 /* vX */, 12575 /* vY */, 13450 /* vZ */, 13450 /* v[ */, 13450 /* v\ */, 13450 /* v] */, 13450 /* v^ */, 11700 /* v_ */, 13450 /* v` */, 13225 /* va */, 13450 /* vb */, 13250 /* vc */, 13250 /* vd */, 13250 /* ve */, 13600 /* vf */, 13125 /* vg */, 13450 /* vh */, 13450 /* vi */, 13450 /* vj */, 13450 /* vk */, 13450 /* vl */, 13450 /* vm */, 13450 /* vn */, 13250 /* vo */, 13450 /* vp */, 13250 /* vq */, 13450 /* vr */, 13400 /* vs */, 13450 /* vt */, 13450 /* vu */, 13450 /* vv */, 13450 /* vw */, 13450 /* vx */, 13450 /* vy */, 13350 /* vz */, 13450 /* v{ */, 13450 /* v| */, 13450 /* v} */, 13450 /* v~ */}, + {21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w */, 21025 /* w! */, 21525 /* w" */, 21025 /* w# */, 21025 /* w$ */, 21025 /* w% */, 20525 /* w& */, 21525 /* w' */, 21025 /* w( */, 21025 /* w) */, 21525 /* w* */, 21025 /* w+ */, 20025 /* w, */, 20925 /* w- */, 20025 /* w. */, 21025 /* w/ */, 21025 /* w0 */, 21025 /* w1 */, 21025 /* w2 */, 21025 /* w3 */, 21025 /* w4 */, 21025 /* w5 */, 21025 /* w6 */, 21025 /* w7 */, 21025 /* w8 */, 21025 /* w9 */, 21025 /* w: */, 21025 /* w; */, 21025 /* w< */, 21025 /* w= */, 21025 /* w> */, 21025 /* w? */, 20775 /* w@ */, 20525 /* wA */, 21025 /* wB */, 21025 /* wC */, 21025 /* wD */, 21025 /* wE */, 21025 /* wF */, 21025 /* wG */, 21025 /* wH */, 21025 /* wI */, 20275 /* wJ */, 21025 /* wK */, 21025 /* wL */, 21025 /* wM */, 21025 /* wN */, 21025 /* wO */, 21025 /* wP */, 21025 /* wQ */, 21025 /* wR */, 21025 /* wS */, 20150 /* wT */, 21025 /* wU */, 20775 /* wV */, 20775 /* wW */, 21025 /* wX */, 20025 /* wY */, 21025 /* wZ */, 21025 /* w[ */, 21025 /* w\ */, 21025 /* w] */, 21025 /* w^ */, 19775 /* w_ */, 21025 /* w` */, 20675 /* wa */, 21025 /* wb */, 20775 /* wc */, 20775 /* wd */, 20775 /* we */, 21025 /* wf */, 20600 /* wg */, 21025 /* wh */, 21025 /* wi */, 21025 /* wj */, 21025 /* wk */, 21025 /* wl */, 21025 /* wm */, 21025 /* wn */, 20775 /* wo */, 21025 /* wp */, 20775 /* wq */, 21025 /* wr */, 20925 /* ws */, 21025 /* wt */, 21025 /* wu */, 21025 /* wv */, 21025 /* ww */, 21025 /* wx */, 21025 /* wy */, 21025 /* wz */, 21025 /* w{ */, 21025 /* w| */, 21025 /* w} */, 21025 /* w~ */}, + {14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x */, 14000 /* x! */, 14000 /* x" */, 14000 /* x# */, 14000 /* x$ */, 14000 /* x% */, 13250 /* x& */, 14000 /* x' */, 14000 /* x( */, 14000 /* x) */, 14000 /* x* */, 14000 /* x+ */, 14000 /* x, */, 13125 /* x- */, 14000 /* x. */, 14000 /* x/ */, 14000 /* x0 */, 14000 /* x1 */, 14000 /* x2 */, 14000 /* x3 */, 14000 /* x4 */, 14000 /* x5 */, 14000 /* x6 */, 14000 /* x7 */, 14000 /* x8 */, 14000 /* x9 */, 14000 /* x: */, 14000 /* x; */, 14000 /* x< */, 14000 /* x= */, 14000 /* x> */, 14000 /* x? */, 13500 /* x@ */, 14000 /* xA */, 14000 /* xB */, 14000 /* xC */, 14000 /* xD */, 14000 /* xE */, 14000 /* xF */, 14000 /* xG */, 14000 /* xH */, 14000 /* xI */, 13625 /* xJ */, 14000 /* xK */, 14000 /* xL */, 14000 /* xM */, 14000 /* xN */, 14000 /* xO */, 14000 /* xP */, 14000 /* xQ */, 14000 /* xR */, 14000 /* xS */, 13125 /* xT */, 14000 /* xU */, 13750 /* xV */, 13750 /* xW */, 14000 /* xX */, 13000 /* xY */, 14000 /* xZ */, 14000 /* x[ */, 14000 /* x\ */, 14000 /* x] */, 14000 /* x^ */, 14500 /* x_ */, 14000 /* x` */, 14000 /* xa */, 14000 /* xb */, 13600 /* xc */, 13600 /* xd */, 13600 /* xe */, 14000 /* xf */, 14000 /* xg */, 14000 /* xh */, 14000 /* xi */, 14000 /* xj */, 14000 /* xk */, 14000 /* xl */, 14000 /* xm */, 14000 /* xn */, 13600 /* xo */, 14000 /* xp */, 13600 /* xq */, 14000 /* xr */, 13900 /* xs */, 14000 /* xt */, 14000 /* xu */, 14000 /* xv */, 14000 /* xw */, 14000 /* xx */, 14000 /* xy */, 14000 /* xz */, 14000 /* x{ */, 14000 /* x| */, 14000 /* x} */, 14000 /* x~ */}, + {13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y */, 13350 /* y! */, 13850 /* y" */, 13350 /* y# */, 13350 /* y$ */, 13350 /* y% */, 12600 /* y& */, 13850 /* y' */, 13350 /* y( */, 13350 /* y) */, 13850 /* y* */, 13350 /* y+ */, 12225 /* y, */, 13225 /* y- */, 12225 /* y. */, 13225 /* y/ */, 13350 /* y0 */, 13350 /* y1 */, 13350 /* y2 */, 13350 /* y3 */, 13350 /* y4 */, 13350 /* y5 */, 13350 /* y6 */, 13350 /* y7 */, 13350 /* y8 */, 13350 /* y9 */, 13350 /* y: */, 13350 /* y; */, 13350 /* y< */, 13350 /* y= */, 13350 /* y> */, 13350 /* y? */, 13100 /* y@ */, 12975 /* yA */, 13350 /* yB */, 13350 /* yC */, 13350 /* yD */, 13350 /* yE */, 13350 /* yF */, 13350 /* yG */, 13350 /* yH */, 13225 /* yI */, 12600 /* yJ */, 13350 /* yK */, 13350 /* yL */, 13350 /* yM */, 13350 /* yN */, 13350 /* yO */, 13350 /* yP */, 13350 /* yQ */, 13350 /* yR */, 13350 /* yS */, 12600 /* yT */, 13350 /* yU */, 13100 /* yV */, 13350 /* yW */, 13350 /* yX */, 12475 /* yY */, 13350 /* yZ */, 13350 /* y[ */, 13350 /* y\ */, 13350 /* y] */, 13350 /* y^ */, 11350 /* y_ */, 13350 /* y` */, 13000 /* ya */, 13350 /* yb */, 13150 /* yc */, 13150 /* yd */, 13150 /* ye */, 13525 /* yf */, 12825 /* yg */, 13350 /* yh */, 13350 /* yi */, 13350 /* yj */, 13350 /* yk */, 13350 /* yl */, 13350 /* ym */, 13350 /* yn */, 13150 /* yo */, 13350 /* yp */, 13150 /* yq */, 13350 /* yr */, 13125 /* ys */, 13350 /* yt */, 13350 /* yu */, 13350 /* yv */, 13350 /* yw */, 13350 /* yx */, 13350 /* yy */, 13250 /* yz */, 13350 /* y{ */, 13350 /* y| */, 13350 /* y} */, 13350 /* y~ */}, + {12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z */, 12950 /* z! */, 13075 /* z" */, 12950 /* z# */, 12950 /* z$ */, 12950 /* z% */, 12700 /* z& */, 13075 /* z' */, 12950 /* z( */, 12950 /* z) */, 13075 /* z* */, 12950 /* z+ */, 13200 /* z, */, 12450 /* z- */, 13200 /* z. */, 12950 /* z/ */, 12950 /* z0 */, 12950 /* z1 */, 12950 /* z2 */, 12950 /* z3 */, 12950 /* z4 */, 12950 /* z5 */, 12950 /* z6 */, 12950 /* z7 */, 12950 /* z8 */, 12950 /* z9 */, 12950 /* z: */, 12950 /* z; */, 12950 /* z< */, 12950 /* z= */, 12950 /* z> */, 12950 /* z? */, 12700 /* z@ */, 13200 /* zA */, 12950 /* zB */, 12950 /* zC */, 12950 /* zD */, 12950 /* zE */, 12950 /* zF */, 12950 /* zG */, 12950 /* zH */, 12950 /* zI */, 12950 /* zJ */, 12950 /* zK */, 12950 /* zL */, 12950 /* zM */, 12950 /* zN */, 12950 /* zO */, 12950 /* zP */, 12950 /* zQ */, 12950 /* zR */, 12950 /* zS */, 12075 /* zT */, 12950 /* zU */, 12700 /* zV */, 12825 /* zW */, 13200 /* zX */, 12200 /* zY */, 12950 /* zZ */, 12950 /* z[ */, 12950 /* z\ */, 12950 /* z] */, 12950 /* z^ */, 13075 /* z_ */, 12950 /* z` */, 12950 /* za */, 12950 /* zb */, 12650 /* zc */, 12700 /* zd */, 12650 /* ze */, 12950 /* zf */, 12950 /* zg */, 12950 /* zh */, 12950 /* zi */, 12950 /* zj */, 12950 /* zk */, 12950 /* zl */, 12950 /* zm */, 12950 /* zn */, 12650 /* zo */, 12950 /* zp */, 12700 /* zq */, 12950 /* zr */, 12950 /* zs */, 12950 /* zt */, 12950 /* zu */, 12950 /* zv */, 12950 /* zw */, 12950 /* zx */, 12950 /* zy */, 12950 /* zz */, 12950 /* z{ */, 12950 /* z| */, 12950 /* z} */, 12950 /* z~ */}, + {9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* { */, 9300 /* {! */, 9300 /* {" */, 9300 /* {# */, 9300 /* {$ */, 9300 /* {% */, 9300 /* {& */, 9300 /* {' */, 9300 /* {( */, 9300 /* {) */, 9300 /* {* */, 9300 /* {+ */, 9300 /* {, */, 9300 /* {- */, 9300 /* {. */, 9300 /* {/ */, 9300 /* {0 */, 9300 /* {1 */, 9300 /* {2 */, 9300 /* {3 */, 9300 /* {4 */, 9300 /* {5 */, 9300 /* {6 */, 9300 /* {7 */, 9300 /* {8 */, 9300 /* {9 */, 9300 /* {: */, 9300 /* {; */, 9300 /* {< */, 9300 /* {= */, 9300 /* {> */, 9300 /* {? */, 9300 /* {@ */, 9300 /* {A */, 9300 /* {B */, 9300 /* {C */, 9300 /* {D */, 9300 /* {E */, 9300 /* {F */, 9300 /* {G */, 9300 /* {H */, 9300 /* {I */, 9300 /* {J */, 9300 /* {K */, 9300 /* {L */, 9300 /* {M */, 9300 /* {N */, 9300 /* {O */, 9300 /* {P */, 9300 /* {Q */, 9300 /* {R */, 9300 /* {S */, 9300 /* {T */, 9300 /* {U */, 9300 /* {V */, 9300 /* {W */, 9300 /* {X */, 9300 /* {Y */, 9300 /* {Z */, 9300 /* {[ */, 9300 /* {\ */, 9300 /* {] */, 9300 /* {^ */, 9300 /* {_ */, 9300 /* {` */, 9300 /* {a */, 9300 /* {b */, 9300 /* {c */, 9300 /* {d */, 9300 /* {e */, 9300 /* {f */, 9300 /* {g */, 9300 /* {h */, 9300 /* {i */, 9550 /* {j */, 9300 /* {k */, 9300 /* {l */, 9300 /* {m */, 9300 /* {n */, 9300 /* {o */, 9300 /* {p */, 9300 /* {q */, 9300 /* {r */, 9300 /* {s */, 9300 /* {t */, 9300 /* {u */, 9300 /* {v */, 9300 /* {w */, 9300 /* {x */, 9300 /* {y */, 9300 /* {z */, 9300 /* {{ */, 9300 /* {| */, 9300 /* {} */, 9300 /* {~ */}, + {10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* | */, 10050 /* |! */, 10050 /* |" */, 10050 /* |# */, 10050 /* |$ */, 10050 /* |% */, 10050 /* |& */, 10050 /* |' */, 10050 /* |( */, 10050 /* |) */, 10050 /* |* */, 10050 /* |+ */, 10050 /* |, */, 10050 /* |- */, 10050 /* |. */, 10050 /* |/ */, 10050 /* |0 */, 10050 /* |1 */, 10050 /* |2 */, 10050 /* |3 */, 10050 /* |4 */, 10050 /* |5 */, 10050 /* |6 */, 10050 /* |7 */, 10050 /* |8 */, 10050 /* |9 */, 10050 /* |: */, 10050 /* |; */, 10050 /* |< */, 10050 /* |= */, 10050 /* |> */, 10050 /* |? */, 10050 /* |@ */, 10050 /* |A */, 10050 /* |B */, 10050 /* |C */, 10050 /* |D */, 10050 /* |E */, 10050 /* |F */, 10050 /* |G */, 10050 /* |H */, 10050 /* |I */, 10050 /* |J */, 10050 /* |K */, 10050 /* |L */, 10050 /* |M */, 10050 /* |N */, 10050 /* |O */, 10050 /* |P */, 10050 /* |Q */, 10050 /* |R */, 10050 /* |S */, 10050 /* |T */, 10050 /* |U */, 10050 /* |V */, 10050 /* |W */, 10050 /* |X */, 10050 /* |Y */, 10050 /* |Z */, 10050 /* |[ */, 10050 /* |\ */, 10050 /* |] */, 10050 /* |^ */, 10050 /* |_ */, 10050 /* |` */, 10050 /* |a */, 10050 /* |b */, 10050 /* |c */, 10050 /* |d */, 10050 /* |e */, 10050 /* |f */, 10050 /* |g */, 10050 /* |h */, 10050 /* |i */, 10050 /* |j */, 10050 /* |k */, 10050 /* |l */, 10050 /* |m */, 10050 /* |n */, 10050 /* |o */, 10050 /* |p */, 10050 /* |q */, 10050 /* |r */, 10050 /* |s */, 10050 /* |t */, 10050 /* |u */, 10050 /* |v */, 10050 /* |w */, 10050 /* |x */, 10050 /* |y */, 10050 /* |z */, 10050 /* |{ */, 10050 /* || */, 10050 /* |} */, 10050 /* |~ */}, + {9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* } */, 9300 /* }! */, 9300 /* }" */, 9300 /* }# */, 9300 /* }$ */, 9300 /* }% */, 9300 /* }& */, 9300 /* }' */, 9300 /* }( */, 9300 /* }) */, 9300 /* }* */, 9300 /* }+ */, 9300 /* }, */, 9300 /* }- */, 9300 /* }. */, 9300 /* }/ */, 9300 /* }0 */, 9300 /* }1 */, 9300 /* }2 */, 9300 /* }3 */, 9300 /* }4 */, 9300 /* }5 */, 9300 /* }6 */, 9300 /* }7 */, 9300 /* }8 */, 9300 /* }9 */, 9300 /* }: */, 9300 /* }; */, 9300 /* }< */, 9300 /* }= */, 9300 /* }> */, 9300 /* }? */, 9300 /* }@ */, 9300 /* }A */, 9300 /* }B */, 9300 /* }C */, 9300 /* }D */, 9300 /* }E */, 9300 /* }F */, 9300 /* }G */, 9300 /* }H */, 9300 /* }I */, 9300 /* }J */, 9300 /* }K */, 9300 /* }L */, 9300 /* }M */, 9300 /* }N */, 9300 /* }O */, 9300 /* }P */, 9300 /* }Q */, 9300 /* }R */, 9300 /* }S */, 9300 /* }T */, 9300 /* }U */, 9300 /* }V */, 9300 /* }W */, 9300 /* }X */, 9300 /* }Y */, 9300 /* }Z */, 9300 /* }[ */, 9300 /* }\ */, 9300 /* }] */, 9300 /* }^ */, 9300 /* }_ */, 9300 /* }` */, 9300 /* }a */, 9300 /* }b */, 9300 /* }c */, 9300 /* }d */, 9300 /* }e */, 9300 /* }f */, 9300 /* }g */, 9300 /* }h */, 9300 /* }i */, 9300 /* }j */, 9300 /* }k */, 9300 /* }l */, 9300 /* }m */, 9300 /* }n */, 9300 /* }o */, 9300 /* }p */, 9300 /* }q */, 9300 /* }r */, 9300 /* }s */, 9300 /* }t */, 9300 /* }u */, 9300 /* }v */, 9300 /* }w */, 9300 /* }x */, 9300 /* }y */, 9300 /* }z */, 9300 /* }{ */, 9300 /* }| */, 9300 /* }} */, 9300 /* }~ */}, + {15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~ */, 15000 /* ~! */, 15000 /* ~" */, 15000 /* ~# */, 15000 /* ~$ */, 15000 /* ~% */, 15000 /* ~& */, 15000 /* ~' */, 15000 /* ~( */, 15000 /* ~) */, 15000 /* ~* */, 15000 /* ~+ */, 15000 /* ~, */, 15000 /* ~- */, 15000 /* ~. */, 15000 /* ~/ */, 15000 /* ~0 */, 15000 /* ~1 */, 15000 /* ~2 */, 15000 /* ~3 */, 15000 /* ~4 */, 15000 /* ~5 */, 15000 /* ~6 */, 15000 /* ~7 */, 15000 /* ~8 */, 15000 /* ~9 */, 15000 /* ~: */, 15000 /* ~; */, 15000 /* ~< */, 15000 /* ~= */, 15000 /* ~> */, 15000 /* ~? */, 15000 /* ~@ */, 15000 /* ~A */, 15000 /* ~B */, 15000 /* ~C */, 15000 /* ~D */, 15000 /* ~E */, 15000 /* ~F */, 15000 /* ~G */, 15000 /* ~H */, 15000 /* ~I */, 15000 /* ~J */, 15000 /* ~K */, 15000 /* ~L */, 15000 /* ~M */, 15000 /* ~N */, 15000 /* ~O */, 15000 /* ~P */, 15000 /* ~Q */, 15000 /* ~R */, 15000 /* ~S */, 15000 /* ~T */, 15000 /* ~U */, 15000 /* ~V */, 15000 /* ~W */, 15000 /* ~X */, 15000 /* ~Y */, 15000 /* ~Z */, 15000 /* ~[ */, 15000 /* ~\ */, 15000 /* ~] */, 15000 /* ~^ */, 15000 /* ~_ */, 15000 /* ~` */, 15000 /* ~a */, 15000 /* ~b */, 15000 /* ~c */, 15000 /* ~d */, 15000 /* ~e */, 15000 /* ~f */, 15000 /* ~g */, 15000 /* ~h */, 15000 /* ~i */, 15000 /* ~j */, 15000 /* ~k */, 15000 /* ~l */, 15000 /* ~m */, 15000 /* ~n */, 15000 /* ~o */, 15000 /* ~p */, 15000 /* ~q */, 15000 /* ~r */, 15000 /* ~s */, 15000 /* ~t */, 15000 /* ~u */, 15000 /* ~v */, 15000 /* ~w */, 15000 /* ~x */, 15000 /* ~y */, 15000 /* ~z */, 15000 /* ~{ */, 15000 /* ~| */, 15000 /* ~} */, 15000 /* ~~ */}, +}; + +static const unsigned short int ibm_plex_sans_bold_250_em_size = 20475; + +static double ibm_plex_sans_bold_word_width(const char *s, double fontSize) { + unsigned long int totalWidth = 0; + + while(*s) { + if (IS_UTF8_STARTBYTE(*s)) { + s++; + + while(IS_UTF8_BYTE(*s) && !IS_UTF8_STARTBYTE(*s)) + s++; + + totalWidth += ibm_plex_sans_bold_250_em_size; + } + else { + if (*s >= 0 && *s <= 126) // Check if it's a valid ASCII character (including '\0') + totalWidth += ibm_plex_sans_bold_250[(unsigned char)*s][(unsigned char)s[1]]; + + s++; + } + } + + // Convert the width from the encoded value to the actual float value + double actualWidth = (double)totalWidth / 100.0; + + // Scale the width proportionally based on the desired font size + double scaledWidth = actualWidth * (fontSize / 250.0); + + return scaledWidth; +} + +/* + + + + + + + + + + + + + + I + + + TROUBLE + + + + */ + +static bool word_goes_below_baseline(const char *love) { + const char *s = love; + while(*s) { + switch(*s) { + case 'g': + case 'j': + case 'p': + case 'q': + case 'y': + case 'Q': + return true; + } + + s++; + } + + return false; +} + +static void generate_ilove_svg(BUFFER *wb, const char *love) { + const char *i = "I"; + const char *stretch = "spacing"; + + double font_size = 250.0; + double border_width = 25.0; + double logo_scale = 0.215; + double logo_width = 1000.0 * logo_scale; + double i_width = ibm_plex_sans_bold_word_width(i, font_size); + double first_line_width = i_width + logo_width; + double second_line_font_size = font_size; + double second_line_width = ibm_plex_sans_bold_word_width(love, second_line_font_size); + bool second_line_needs_height = word_goes_below_baseline(love); + + if(second_line_width <= first_line_width) { + second_line_width = first_line_width; + stretch = "spacingAndGlyphs"; + + if(!second_line_needs_height) + second_line_font_size *= 1.10; + } + else if(second_line_width > first_line_width * 4) { + second_line_width *= 0.80; + stretch = "spacingAndGlyphs"; + second_line_font_size *= 0.90; + } + else if(second_line_width > first_line_width * 2) { + second_line_width *= 0.93; + stretch = "spacing"; + } + + double width = second_line_width + border_width * 4.0; + + buffer_flush(wb); + + buffer_sprintf(wb, "\n", + width); + + // White bounding box with rounded corners + buffer_sprintf(wb, " \n", + width, border_width * 2, border_width * 2); + + // Black background + buffer_sprintf(wb, " \n", + border_width, border_width, width - border_width * 2, border_width * 1.5, border_width * 1.5); + + // Netdata logo + buffer_sprintf(wb, " \n", + (width - first_line_width) / 2 + i_width, border_width * 2, logo_scale); + + // first line + double first_line_baseline = font_size * 0.70 + border_width * 2; + buffer_sprintf(wb, " %s\n", + (width - first_line_width) / 2, first_line_baseline, font_size, i); + + // second line + double second_line_baseline = first_line_baseline + font_size * 0.85; + if(second_line_needs_height) + second_line_baseline = first_line_baseline + font_size * 0.78; + + buffer_sprintf(wb, " %s\n", + border_width * 2, second_line_baseline, second_line_font_size, second_line_width, stretch, love); + + buffer_sprintf(wb, ""); + + wb->content_type = CT_IMAGE_SVG_XML; +} + +int api_v2_ilove(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + char *love = "TROUBLE"; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "love")) love = value; + } + +// char *s = love; +// while(*s) { +// *s = toupper(*s); +// s++; +// } + + generate_ilove_svg(w->response.data, love); + + return HTTP_RESP_OK; +} diff --git a/src/web/api/v2/api_v2_ilove/measure-text.js b/src/web/api/v2/api_v2_ilove/measure-text.js new file mode 100644 index 000000000..e2a2a6e94 --- /dev/null +++ b/src/web/api/v2/api_v2_ilove/measure-text.js @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +'use strict'; + +var path = require('path'); +var fs = require('fs'); +var PDFDocument = require('pdfkit'); +var doc = new PDFDocument({size:'A4', layout:'landscape'}); + +function loadFont(fontPaths, callback) { + for (let fontPath of fontPaths) { + try { + doc = doc.font(fontPath); + if (callback) { callback(null); } + return; // Exit once a font is loaded successfully + } catch(err) { + // Log error but continue to next font path + console.error(`Failed to load font from path: ${fontPath}. Error: ${err.message}`); + } + } + + // If we reached here, none of the fonts were loaded successfully. + console.error('All font paths failed. Stopping execution.'); + process.exit(1); // Exit with an error code +} + +loadFont(['IBMPlexSans-Bold.ttf'], function(err) { + if (err) { + console.error('Could not load any of the specified fonts.'); + } +}); + +doc = doc.fontSize(250); + +function measureCombination(charA, charB) { + return doc.widthOfString(charA + charB); +} + +function getCharRepresentation(charCode) { + return (charCode >= 32 && charCode <= 126) ? String.fromCharCode(charCode) : ''; +} + +function generateCombinationArray() { + let output = "static const unsigned short int ibm_plex_sans_bold_250[128][128] = {\n"; + + for (let i = 0; i <= 126; i++) { + output += " {"; // Start of inner array + for (let j = 0; j <= 126; j++) { + let charA = getCharRepresentation(i); + let charB = getCharRepresentation(j); + let width = measureCombination(charA, charB) - doc.widthOfString(charB); + let encodedWidth = Math.round(width * 100); // Multiply by 100 and round + + if(charA === '*' && charB == '/') + charB = '\\/'; + + if(charA === '/' && charB == '*') + charB = '\\*'; + + output += `${encodedWidth} /* ${charA}${charB} */`; + if (j < 126) { + output += ", "; + } + } + output += "},\n"; // End of inner array + } + output += "};\n"; // End of 2D array + + return output; +} + +console.log(generateCombinationArray()); +console.log('static const unsigned short int ibm_plex_sans_bold_250_em_size = ' + Math.round(doc.widthOfString('M') * 100) + ';'); diff --git a/src/web/api/v2/api_v2_info.c b/src/web/api/v2/api_v2_info.c new file mode 100644 index 000000000..fd2aba633 --- /dev/null +++ b/src/web/api/v2/api_v2_info.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_info(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal(host, w, url, CONTEXTS_V2_AGENTS | CONTEXTS_V2_AGENTS_INFO); +} diff --git a/src/web/api/v2/api_v2_node_instances.c b/src/web/api/v2/api_v2_node_instances.c new file mode 100644 index 000000000..037191432 --- /dev/null +++ b/src/web/api/v2/api_v2_node_instances.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_node_instances(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal( + host, w, url, + CONTEXTS_V2_NODES | CONTEXTS_V2_NODE_INSTANCES | CONTEXTS_V2_AGENTS | + CONTEXTS_V2_AGENTS_INFO | CONTEXTS_V2_VERSIONS); +} diff --git a/src/web/api/v2/api_v2_nodes.c b/src/web/api/v2/api_v2_nodes.c new file mode 100644 index 000000000..3880f279f --- /dev/null +++ b/src/web/api/v2/api_v2_nodes.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_nodes(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal(host, w, url, CONTEXTS_V2_NODES | CONTEXTS_V2_NODES_INFO); +} diff --git a/src/web/api/v2/api_v2_progress.c b/src/web/api/v2/api_v2_progress.c new file mode 100644 index 000000000..ebb53ca88 --- /dev/null +++ b/src/web/api/v2/api_v2_progress.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_progress(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + char *transaction = NULL; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "transaction")) transaction = value; + } + + nd_uuid_t tr; + uuid_parse_flexi(transaction, tr); + + rrd_function_call_progresser(&tr); + + return web_api_v2_report_progress(&tr, w->response.data); +} diff --git a/src/web/api/v2/api_v2_q.c b/src/web/api/v2/api_v2_q.c new file mode 100644 index 000000000..57fcec7dd --- /dev/null +++ b/src/web/api/v2/api_v2_q.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_q(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal( + host, w, url, + CONTEXTS_V2_SEARCH | CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS); +} diff --git a/src/web/api/v2/api_v2_versions.c b/src/web/api/v2/api_v2_versions.c new file mode 100644 index 000000000..299e7a30c --- /dev/null +++ b/src/web/api/v2/api_v2_versions.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_versions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal(host, w, url, CONTEXTS_V2_VERSIONS); +} diff --git a/src/web/api/v2/api_v2_webrtc.c b/src/web/api/v2/api_v2_webrtc.c new file mode 100644 index 000000000..dcd383d47 --- /dev/null +++ b/src/web/api/v2/api_v2_webrtc.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" +#include "../../rtc/webrtc.h" + +int api_v2_webrtc(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) { + return webrtc_new_connection(buffer_tostring(w->payload), w->response.data); +} diff --git a/src/web/api/v2/api_v2_weights.c b/src/web/api/v2/api_v2_weights.c new file mode 100644 index 000000000..442c8b75a --- /dev/null +++ b/src/web/api/v2/api_v2_weights.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int web_client_api_request_weights(RRDHOST *host, struct web_client *w, char *url, WEIGHTS_METHOD method, WEIGHTS_FORMAT format, size_t api_version) { + if (!netdata_ready) + return HTTP_RESP_SERVICE_UNAVAILABLE; + + time_t baseline_after = 0, baseline_before = 0, after = 0, before = 0; + size_t points = 0; + RRDR_OPTIONS options = 0; + RRDR_TIME_GROUPING time_group_method = RRDR_GROUPING_AVERAGE; + time_t timeout_ms = 0; + size_t tier = 0; + const char *time_group_options = NULL, *scope_contexts = NULL, *scope_nodes = NULL, *contexts = NULL, *nodes = NULL, + *instances = NULL, *dimensions = NULL, *labels = NULL, *alerts = NULL; + + struct group_by_pass group_by = { + .group_by = RRDR_GROUP_BY_NONE, + .group_by_label = NULL, + .aggregation = RRDR_GROUP_BY_FUNCTION_AVERAGE, + }; + + while (url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) + continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) + continue; + if (!value || !*value) + continue; + + if (!strcmp(name, "baseline_after")) + baseline_after = str2l(value); + + else if (!strcmp(name, "baseline_before")) + baseline_before = str2l(value); + + else if (!strcmp(name, "after") || !strcmp(name, "highlight_after")) + after = str2l(value); + + else if (!strcmp(name, "before") || !strcmp(name, "highlight_before")) + before = str2l(value); + + else if (!strcmp(name, "points") || !strcmp(name, "max_points")) + points = str2ul(value); + + else if (!strcmp(name, "timeout")) + timeout_ms = str2l(value); + + else if((api_version == 1 && !strcmp(name, "group")) || (api_version >= 2 && !strcmp(name, "time_group"))) + time_group_method = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); + + else if((api_version == 1 && !strcmp(name, "group_options")) || (api_version >= 2 && !strcmp(name, "time_group_options"))) + time_group_options = value; + + else if(!strcmp(name, "options")) + options |= rrdr_options_parse(value); + + else if(!strcmp(name, "method")) + method = weights_string_to_method(value); + + else if(api_version == 1 && (!strcmp(name, "context") || !strcmp(name, "contexts"))) + scope_contexts = value; + + else if(api_version >= 2 && !strcmp(name, "scope_nodes")) scope_nodes = value; + else if(api_version >= 2 && !strcmp(name, "scope_contexts")) scope_contexts = value; + else if(api_version >= 2 && !strcmp(name, "nodes")) nodes = value; + else if(api_version >= 2 && !strcmp(name, "contexts")) contexts = value; + else if(api_version >= 2 && !strcmp(name, "instances")) instances = value; + else if(api_version >= 2 && !strcmp(name, "dimensions")) dimensions = value; + else if(api_version >= 2 && !strcmp(name, "labels")) labels = value; + else if(api_version >= 2 && !strcmp(name, "alerts")) alerts = value; + else if(api_version >= 2 && (!strcmp(name, "group_by") || !strcmp(name, "group_by[0]"))) { + group_by.group_by = group_by_parse(value); + } + else if(api_version >= 2 && (!strcmp(name, "group_by_label") || !strcmp(name, "group_by_label[0]"))) { + group_by.group_by_label = value; + } + else if(api_version >= 2 && (!strcmp(name, "aggregation") || !strcmp(name, "aggregation[0]"))) { + group_by.aggregation = group_by_aggregate_function_parse(value); + } + + else if(!strcmp(name, "tier")) { + tier = str2ul(value); + if(tier < storage_tiers) + options |= RRDR_OPTION_SELECTED_TIER; + else + tier = 0; + } + } + + if(options == 0) + // the user did not set any options + options = RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO | RRDR_OPTION_NONZERO; + else + // the user set some options, add also these + options |= RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO; + + if(options & RRDR_OPTION_PERCENTAGE) + options |= RRDR_OPTION_ABSOLUTE; + + if(options & RRDR_OPTION_DEBUG) + options &= ~RRDR_OPTION_MINIFY; + + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + + QUERY_WEIGHTS_REQUEST qwr = { + .version = api_version, + .host = (api_version == 1) ? NULL : host, + .scope_nodes = scope_nodes, + .scope_contexts = scope_contexts, + .nodes = nodes, + .contexts = contexts, + .instances = instances, + .dimensions = dimensions, + .labels = labels, + .alerts = alerts, + .group_by = { + .group_by = group_by.group_by, + .group_by_label = group_by.group_by_label, + .aggregation = group_by.aggregation, + }, + .method = method, + .format = format, + .time_group_method = time_group_method, + .time_group_options = time_group_options, + .baseline_after = baseline_after, + .baseline_before = baseline_before, + .after = after, + .before = before, + .points = points, + .options = options, + .tier = tier, + .timeout_ms = timeout_ms, + + .interrupt_callback = web_client_interrupt_callback, + .interrupt_callback_data = w, + + .transaction = &w->transaction, + }; + + return web_api_v12_weights(wb, &qwr); +} + +int api_v2_weights(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_VALUE, WEIGHTS_FORMAT_MULTINODE, 2); +} diff --git a/src/web/api/v3/api_v3_calls.h b/src/web/api/v3/api_v3_calls.h new file mode 100644 index 000000000..4cee766fd --- /dev/null +++ b/src/web/api/v3/api_v3_calls.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_V3_CALLS_H +#define NETDATA_API_V3_CALLS_H + +#include "../web_api_v3.h" + +int api_v3_settings(RRDHOST *host, struct web_client *w, char *url); +int api_v3_me(RRDHOST *host, struct web_client *w, char *url); + +#endif //NETDATA_API_V3_CALLS_H diff --git a/src/web/api/v3/api_v3_me.c b/src/web/api/v3/api_v3_me.c new file mode 100644 index 000000000..39ba2c29b --- /dev/null +++ b/src/web/api/v3/api_v3_me.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v3_calls.h" + +int api_v3_me(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) { + BUFFER *wb = w->response.data; + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + + const char *auth; + switch(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_CLOUD|WEB_CLIENT_FLAG_AUTH_BEARER|WEB_CLIENT_FLAG_AUTH_GOD)) { + case WEB_CLIENT_FLAG_AUTH_CLOUD: + auth = "cloud"; + break; + + case WEB_CLIENT_FLAG_AUTH_BEARER: + auth = "bearer"; + break; + + case WEB_CLIENT_FLAG_AUTH_GOD: + auth = "god"; + break; + + default: + auth = "none"; + break; + } + buffer_json_member_add_string(wb, "auth", auth); + + buffer_json_member_add_uuid(wb, "cloud_account_id", w->auth.cloud_account_id); + buffer_json_member_add_string(wb, "client_name", w->auth.client_name); + http_access2buffer_json_array(wb, "access", w->access); + buffer_json_member_add_string(wb, "user_role", http_id2user_role(w->user_role)); + + buffer_json_finalize(wb); + return HTTP_RESP_OK; +} diff --git a/src/web/api/v3/api_v3_settings.c b/src/web/api/v3/api_v3_settings.c new file mode 100644 index 000000000..3b02e6b61 --- /dev/null +++ b/src/web/api/v3/api_v3_settings.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +/* + * /api/v3/settings + * + * QUERY STRING PARAMETERS: + * - file=a file name (alphanumerics, dashes, underscores) + * When the user is not authenticated with a bearer token + * only the 'default' file is allowed. + * Authenticated users can create, store and update any + * settings file. + * + * HTTP METHODS + * - GET to retrieve a file + * - PUT to create or update a file + * + * PAYLOAD + * - The payload MUST have the member 'version'. + * - The payload MAY have anything else. + * - The maximum payload size in JSON is 20MiB. + * - When updating the payload, the caller must specify the + * version of the existing file. If this check fails, + * Netdata will return 409 (conflict). + * When the caller receives 409, it means there are updates + * in the payload outside its control and the object MUST + * be loaded again to find its current version to update it. + * After loading it, the caller must reapply the changes and + * PUT it again. + * - Netdata will increase the version on every PUT action. + * So, the payload MUST specify the version found on disk + * but, Netdata will increment the version before saving it. + */ + +#include "api_v3_calls.h" + +#define MAX_SETTINGS_SIZE_BYTES (20 * 1024 * 1024) + +// we need an r/w spinlock to ensure that reads and write do not happen +// concurrently for settings files +static RW_SPINLOCK settings_spinlock = NETDATA_RW_SPINLOCK_INITIALIZER; + +static inline void settings_path(char out[FILENAME_MAX]) { + filename_from_path_entry(out, netdata_configured_varlib_dir, "settings", NULL); +} + +static inline void settings_filename(char out[FILENAME_MAX], const char *file, const char *extension) { + char path[FILENAME_MAX]; + settings_path(path); + filename_from_path_entry(out, path, file, extension); +} + +static inline bool settings_ensure_path_exists(void) { + char path[FILENAME_MAX]; + settings_path(path); + return filename_is_dir(path, true); +} + +static inline size_t settings_extract_json_version(const char *json) { + if(!json || !*json) return 0; + + // Parse the JSON string into a JSON-C object + CLEAN_JSON_OBJECT *jobj = json_tokener_parse(json); + if (jobj == NULL) + return 0; + + // Access the "version" field + struct json_object *version_obj; + if (json_object_object_get_ex(jobj, "version", &version_obj)) + // Extract the integer value of the version + return (size_t)json_object_get_int(version_obj); + + return 0; +} + +static inline void settings_initial_version(BUFFER *wb) { + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + buffer_json_member_add_uint64(wb, "version", 1); + buffer_json_finalize(wb); +} + +static inline void settings_get(BUFFER *wb, const char *file, bool have_lock) { + char filename[FILENAME_MAX]; + settings_filename(filename, file, NULL); + + buffer_reset(wb); + + if(!have_lock) + rw_spinlock_read_lock(&settings_spinlock); + + bool rc = read_txt_file_to_buffer(filename, wb, MAX_SETTINGS_SIZE_BYTES); + + if(!have_lock) + rw_spinlock_read_unlock(&settings_spinlock); + + if(rc) { + size_t version = settings_extract_json_version(buffer_tostring(wb)); + if (!version) { + nd_log(NDLS_DAEMON, NDLP_ERR, "file '%s' cannot be parsed to extract version", filename); + settings_initial_version(wb); + } + else { + wb->content_type = CT_APPLICATION_JSON; + buffer_no_cacheable(wb); + } + } + else + settings_initial_version(wb); +} + +static inline size_t settings_get_version(const char *path, bool have_lock) { + CLEAN_BUFFER *wb = buffer_create(0, NULL); + settings_get(wb, path, have_lock); + + return settings_extract_json_version(buffer_tostring(wb)); +} + +static inline int settings_put(struct web_client *w, char *file) { + rw_spinlock_write_lock(&settings_spinlock); + + if(!settings_ensure_path_exists()) { + rw_spinlock_write_unlock(&settings_spinlock); + return rrd_call_function_error( + w->response.data, + "Settings path cannot be created or accessed.", + HTTP_RESP_BAD_REQUEST); + } + + size_t old_version = settings_get_version(file, true); + + // Parse the JSON string into a JSON-C object + CLEAN_JSON_OBJECT *jobj = json_tokener_parse(buffer_tostring(w->payload)); + if (jobj == NULL) { + rw_spinlock_write_unlock(&settings_spinlock); + return rrd_call_function_error( + w->response.data, + "Payload cannot be parsed as a JSON object", + HTTP_RESP_BAD_REQUEST); + } + + // Access the "version" field + struct json_object *version_obj; + if (!json_object_object_get_ex(jobj, "version", &version_obj)) { + rw_spinlock_write_unlock(&settings_spinlock); + return rrd_call_function_error( + w->response.data, + "Field version is not found in payload", + HTTP_RESP_BAD_REQUEST); + } + + size_t new_version = (size_t)json_object_get_int(version_obj); + + if (old_version != new_version) { + rw_spinlock_write_unlock(&settings_spinlock); + return rrd_call_function_error( + w->response.data, + "Payload version does not match the version of the stored object", + HTTP_RESP_CONFLICT); + } + + new_version++; + // Set the new version back into the JSON object + json_object_object_add(jobj, "version", json_object_new_int((int)new_version)); + + // Convert the updated JSON object back to a string + const char *updated_json_str = json_object_to_json_string(jobj); + + char tmp_filename[FILENAME_MAX]; + settings_filename(tmp_filename, file, "new"); + + // Save the updated JSON string to a file + FILE *fp = fopen(tmp_filename, "w"); + if (fp == NULL) { + rw_spinlock_write_unlock(&settings_spinlock); + nd_log(NDLS_DAEMON, NDLP_ERR, "cannot open/create settings file '%s'", tmp_filename); + return rrd_call_function_error( + w->response.data, + "Cannot create payload file '%s'", + HTTP_RESP_INTERNAL_SERVER_ERROR); + } + size_t len = strlen(updated_json_str); + if(fwrite(updated_json_str, 1, len, fp) != len) { + fclose(fp); + unlink(tmp_filename); + rw_spinlock_write_unlock(&settings_spinlock); + nd_log(NDLS_DAEMON, NDLP_ERR, "cannot save settings to file '%s'", tmp_filename); + return rrd_call_function_error( + w->response.data, + "Cannot save payload to file '%s'", + HTTP_RESP_INTERNAL_SERVER_ERROR); + } + fclose(fp); + + char filename[FILENAME_MAX]; + settings_filename(filename, file, NULL); + + bool renamed = rename(tmp_filename, filename) == 0; + + rw_spinlock_write_unlock(&settings_spinlock); + + if(!renamed) { + nd_log(NDLS_DAEMON, NDLP_ERR, "cannot rename file '%s' to '%s'", tmp_filename, filename); + return rrd_call_function_error( + w->response.data, + "Failed to move the payload file to its final location", + HTTP_RESP_INTERNAL_SERVER_ERROR); + } + + return rrd_call_function_error( + w->response.data, + "OK", + HTTP_RESP_OK); +} + +static inline bool is_settings_file_valid(char *file) { + char *s = file; + + if(!s || !*s) + return false; + + while(*s) { + if(!isalnum((uint8_t)*s) && *s != '-' && *s != '_') + return false; + s++; + } + + return true; +} + +int api_v3_settings(RRDHOST *host, struct web_client *w, char *url) { + char *file = NULL; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "file")) + file = value; + } + + if(!is_settings_file_valid(file)) + return rrd_call_function_error( + w->response.data, + "Invalid settings file given.", + HTTP_RESP_BAD_REQUEST); + + if(host != localhost) + return rrd_call_function_error( + w->response.data, + "Settings API is only allowed for the agent node.", + HTTP_RESP_BAD_REQUEST); + + if(web_client_flags_check_auth(w) != WEB_CLIENT_FLAG_AUTH_BEARER && strcmp(file, "default") != 0) + return rrd_call_function_error( + w->response.data, + "Only the 'default' settings file is allowed for anonymous users", + HTTP_RESP_BAD_REQUEST); + + switch(w->mode) { + case HTTP_REQUEST_MODE_GET: + settings_get(w->response.data, file, false); + return HTTP_RESP_OK; + + case HTTP_REQUEST_MODE_PUT: + if(!w->payload || !buffer_strlen(w->payload)) + return rrd_call_function_error( + w->response.data, + "Settings API PUT action requires a payload.", + HTTP_RESP_BAD_REQUEST); + + return settings_put(w, file); + + default: + return rrd_call_function_error(w->response.data, + "Invalid HTTP mode. HTTP modes GET and PUT are supported.", + HTTP_RESP_BAD_REQUEST); + } +} diff --git a/src/web/api/web_api.c b/src/web/api/web_api.c index 4e936be5b..88be32a40 100644 --- a/src/web/api/web_api.c +++ b/src/web/api/web_api.c @@ -2,6 +2,12 @@ #include "web_api.h" +void host_labels2json(RRDHOST *host, BUFFER *wb, const char *key) { + buffer_json_member_add_object(wb, key); + rrdlabels_to_buffer_json_members(host->rrdlabels, wb); + buffer_json_object_close(wb); +} + int web_client_api_request_vX(RRDHOST *host, struct web_client *w, char *url_path_endpoint, struct web_api_command *api_commands) { buffer_no_cacheable(w->response.data); @@ -11,15 +17,16 @@ int web_client_api_request_vX(RRDHOST *host, struct web_client *w, char *url_pat internal_fatal(!web_client_flags_check_auth(w) && (w->access & HTTP_ACCESS_SIGNED_ID), "signed-in permission is set, but it shouldn't"); +#ifdef NETDATA_GOD_MODE + web_client_set_permissions(w, HTTP_ACCESS_ALL, HTTP_USER_ROLE_ADMIN, WEB_CLIENT_FLAG_AUTH_GOD); +#else if(!web_client_flags_check_auth(w)) { - w->user_role = (netdata_is_protected_by_bearer) ? HTTP_USER_ROLE_NONE : HTTP_USER_ROLE_ANY; - w->access = (netdata_is_protected_by_bearer) ? HTTP_ACCESS_NONE : HTTP_ACCESS_ANONYMOUS_DATA; + web_client_set_permissions( + w, + (netdata_is_protected_by_bearer) ? HTTP_ACCESS_NONE : HTTP_ACCESS_ANONYMOUS_DATA, + (netdata_is_protected_by_bearer) ? HTTP_USER_ROLE_NONE : HTTP_USER_ROLE_ANY, + 0); } - -#ifdef NETDATA_GOD_MODE - web_client_flag_set(w, WEB_CLIENT_FLAG_AUTH_GOD); - w->user_role = HTTP_USER_ROLE_ADMIN; - w->access = HTTP_ACCESS_ALL; #endif if(unlikely(!url_path_endpoint || !*url_path_endpoint)) { @@ -110,156 +117,66 @@ RRDCONTEXT_TO_JSON_OPTIONS rrdcontext_to_json_parse_options(char *o) { return options; } -int web_client_api_request_weights(RRDHOST *host, struct web_client *w, char *url, WEIGHTS_METHOD method, WEIGHTS_FORMAT format, size_t api_version) { - if (!netdata_ready) - return HTTP_RESP_SERVICE_UNAVAILABLE; - - time_t baseline_after = 0, baseline_before = 0, after = 0, before = 0; - size_t points = 0; - RRDR_OPTIONS options = 0; - RRDR_TIME_GROUPING time_group_method = RRDR_GROUPING_AVERAGE; - time_t timeout_ms = 0; - size_t tier = 0; - const char *time_group_options = NULL, *scope_contexts = NULL, *scope_nodes = NULL, *contexts = NULL, *nodes = NULL, - *instances = NULL, *dimensions = NULL, *labels = NULL, *alerts = NULL; - - struct group_by_pass group_by = { - .group_by = RRDR_GROUP_BY_NONE, - .group_by_label = NULL, - .aggregation = RRDR_GROUP_BY_FUNCTION_AVERAGE, - }; - - while (url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) - continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) - continue; - if (!value || !*value) - continue; - - if (!strcmp(name, "baseline_after")) - baseline_after = str2l(value); - - else if (!strcmp(name, "baseline_before")) - baseline_before = str2l(value); - - else if (!strcmp(name, "after") || !strcmp(name, "highlight_after")) - after = str2l(value); - - else if (!strcmp(name, "before") || !strcmp(name, "highlight_before")) - before = str2l(value); - - else if (!strcmp(name, "points") || !strcmp(name, "max_points")) - points = str2ul(value); - - else if (!strcmp(name, "timeout")) - timeout_ms = str2l(value); - - else if((api_version == 1 && !strcmp(name, "group")) || (api_version >= 2 && !strcmp(name, "time_group"))) - time_group_method = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); - - else if((api_version == 1 && !strcmp(name, "group_options")) || (api_version >= 2 && !strcmp(name, "time_group_options"))) - time_group_options = value; - - else if(!strcmp(name, "options")) - options |= rrdr_options_parse(value); - - else if(!strcmp(name, "method")) - method = weights_string_to_method(value); - - else if(api_version == 1 && (!strcmp(name, "context") || !strcmp(name, "contexts"))) - scope_contexts = value; - - else if(api_version >= 2 && !strcmp(name, "scope_nodes")) scope_nodes = value; - else if(api_version >= 2 && !strcmp(name, "scope_contexts")) scope_contexts = value; - else if(api_version >= 2 && !strcmp(name, "nodes")) nodes = value; - else if(api_version >= 2 && !strcmp(name, "contexts")) contexts = value; - else if(api_version >= 2 && !strcmp(name, "instances")) instances = value; - else if(api_version >= 2 && !strcmp(name, "dimensions")) dimensions = value; - else if(api_version >= 2 && !strcmp(name, "labels")) labels = value; - else if(api_version >= 2 && !strcmp(name, "alerts")) alerts = value; - else if(api_version >= 2 && (!strcmp(name, "group_by") || !strcmp(name, "group_by[0]"))) { - group_by.group_by = group_by_parse(value); - } - else if(api_version >= 2 && (!strcmp(name, "group_by_label") || !strcmp(name, "group_by_label[0]"))) { - group_by.group_by_label = value; - } - else if(api_version >= 2 && (!strcmp(name, "aggregation") || !strcmp(name, "aggregation[0]"))) { - group_by.aggregation = group_by_aggregate_function_parse(value); - } - else if(!strcmp(name, "tier")) { - tier = str2ul(value); - if(tier < storage_tiers) - options |= RRDR_OPTION_SELECTED_TIER; - else - tier = 0; - } - } +bool web_client_interrupt_callback(void *data) { + struct web_client *w = data; - if(options == 0) - // the user did not set any options - options = RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO | RRDR_OPTION_NONZERO; + bool ret; + if(w->interrupt.callback) + ret = w->interrupt.callback(w, w->interrupt.callback_data); else - // the user set some options, add also these - options |= RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO; - - if(options & RRDR_OPTION_PERCENTAGE) - options |= RRDR_OPTION_ABSOLUTE; - - if(options & RRDR_OPTION_DEBUG) - options &= ~RRDR_OPTION_MINIFY; - - BUFFER *wb = w->response.data; - buffer_flush(wb); - wb->content_type = CT_APPLICATION_JSON; - - QUERY_WEIGHTS_REQUEST qwr = { - .version = api_version, - .host = (api_version == 1) ? NULL : host, - .scope_nodes = scope_nodes, - .scope_contexts = scope_contexts, - .nodes = nodes, - .contexts = contexts, - .instances = instances, - .dimensions = dimensions, - .labels = labels, - .alerts = alerts, - .group_by = { - .group_by = group_by.group_by, - .group_by_label = group_by.group_by_label, - .aggregation = group_by.aggregation, - }, - .method = method, - .format = format, - .time_group_method = time_group_method, - .time_group_options = time_group_options, - .baseline_after = baseline_after, - .baseline_before = baseline_before, - .after = after, - .before = before, - .points = points, - .options = options, - .tier = tier, - .timeout_ms = timeout_ms, - - .interrupt_callback = web_client_interrupt_callback, - .interrupt_callback_data = w, - - .transaction = &w->transaction, - }; - - return web_api_v12_weights(wb, &qwr); + ret = is_socket_closed(w->ofd); + + return ret; } -bool web_client_interrupt_callback(void *data) { - struct web_client *w = data; +void nd_web_api_init(void) { + contexts_alert_statuses_init(); + rrdr_options_init(); + contexts_options_init(); + datasource_formats_init(); + time_grouping_init(); +} - if(w->interrupt.callback) - return w->interrupt.callback(w, w->interrupt.callback_data); - return sock_has_output_error(w->ofd); +bool request_source_is_cloud(const char *source) { + return source && *source && strstartswith(source, "method=NC,"); +} + +void web_client_api_request_vX_source_to_buffer(struct web_client *w, BUFFER *source) { + if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_CLOUD)) + buffer_sprintf(source, "method=NC"); + else if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_BEARER)) + buffer_sprintf(source, "method=api-bearer"); + else + buffer_sprintf(source, "method=api"); + + if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_GOD)) + buffer_strcat(source, ",role=god"); + else + buffer_sprintf(source, ",role=%s", http_id2user_role(w->user_role)); + + buffer_sprintf(source, ",permissions="HTTP_ACCESS_FORMAT, (HTTP_ACCESS_FORMAT_CAST)w->access); + + if(w->auth.client_name[0]) + buffer_sprintf(source, ",user=%s", w->auth.client_name); + + if(!uuid_is_null(w->auth.cloud_account_id)) { + char uuid_str[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(w->auth.cloud_account_id, uuid_str); + buffer_sprintf(source, ",account=%s", uuid_str); + } + + if(w->client_ip[0]) + buffer_sprintf(source, ",ip=%s", w->client_ip); + + if(w->forwarded_for) + buffer_sprintf(source, ",forwarded_for=%s", w->forwarded_for); } + +void web_client_progress_functions_update(void *data, size_t done, size_t all) { + // handle progress updates from the plugin + struct web_client *w = data; + query_progress_functions_update(&w->transaction, done, all); +} + diff --git a/src/web/api/web_api.h b/src/web/api/web_api.h index 634e59657..cb694a33d 100644 --- a/src/web/api/web_api.h +++ b/src/web/api/web_api.h @@ -3,14 +3,28 @@ #ifndef NETDATA_WEB_API_H #define NETDATA_WEB_API_H 1 +#define ENABLE_API_V1 1 +#define ENABLE_API_v2 1 + +struct web_client; + #include "daemon/common.h" +#include "maps/maps.h" +#include "functions/functions.h" + #include "web/api/http_header.h" #include "web/api/http_auth.h" -#include "web/api/badges/web_buffer_svg.h" -#include "web/api/ilove/ilove.h" #include "web/api/formatters/rrd2json.h" #include "web/api/queries/weights.h" +void nd_web_api_init(void); + +bool request_source_is_cloud(const char *source); +void web_client_api_request_vX_source_to_buffer(struct web_client *w, BUFFER *source); +void web_client_progress_functions_update(void *data, size_t done, size_t all); + +void host_labels2json(RRDHOST *host, BUFFER *wb, const char *key); + struct web_api_command { const char *api; uint32_t hash; @@ -37,7 +51,11 @@ int web_client_api_request_weights(RRDHOST *host, struct web_client *w, char *ur bool web_client_interrupt_callback(void *data); +char *format_value_and_unit(char *value_string, size_t value_string_len, + NETDATA_DOUBLE value, const char *units, int precision); + #include "web_api_v1.h" #include "web_api_v2.h" +#include "web_api_v3.h" #endif //NETDATA_WEB_API_H diff --git a/src/web/api/web_api_v1.c b/src/web/api/web_api_v1.c index bfaa4f6f7..8d38e01db 100644 --- a/src/web/api/web_api_v1.c +++ b/src/web/api/web_api_v1.c @@ -1,1958 +1,232 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "web_api_v1.h" - -char *api_secret; - -static struct { - const char *name; - uint32_t hash; - RRDR_OPTIONS value; -} rrdr_options[] = { - { "nonzero" , 0 , RRDR_OPTION_NONZERO} - , {"flip" , 0 , RRDR_OPTION_REVERSED} - , {"reversed" , 0 , RRDR_OPTION_REVERSED} - , {"reverse" , 0 , RRDR_OPTION_REVERSED} - , {"jsonwrap" , 0 , RRDR_OPTION_JSON_WRAP} - , {"min2max" , 0 , RRDR_OPTION_DIMS_MIN2MAX} // rrdr2value() only - , {"average" , 0 , RRDR_OPTION_DIMS_AVERAGE} // rrdr2value() only - , {"min" , 0 , RRDR_OPTION_DIMS_MIN} // rrdr2value() only - , {"max" , 0 , RRDR_OPTION_DIMS_MAX} // rrdr2value() only - , {"ms" , 0 , RRDR_OPTION_MILLISECONDS} - , {"milliseconds" , 0 , RRDR_OPTION_MILLISECONDS} - , {"absolute" , 0 , RRDR_OPTION_ABSOLUTE} - , {"abs" , 0 , RRDR_OPTION_ABSOLUTE} - , {"absolute_sum" , 0 , RRDR_OPTION_ABSOLUTE} - , {"absolute-sum" , 0 , RRDR_OPTION_ABSOLUTE} - , {"display_absolute" , 0 , RRDR_OPTION_DISPLAY_ABS} - , {"display-absolute" , 0 , RRDR_OPTION_DISPLAY_ABS} - , {"seconds" , 0 , RRDR_OPTION_SECONDS} - , {"null2zero" , 0 , RRDR_OPTION_NULL2ZERO} - , {"objectrows" , 0 , RRDR_OPTION_OBJECTSROWS} - , {"google_json" , 0 , RRDR_OPTION_GOOGLE_JSON} - , {"google-json" , 0 , RRDR_OPTION_GOOGLE_JSON} - , {"percentage" , 0 , RRDR_OPTION_PERCENTAGE} - , {"unaligned" , 0 , RRDR_OPTION_NOT_ALIGNED} - , {"match_ids" , 0 , RRDR_OPTION_MATCH_IDS} - , {"match-ids" , 0 , RRDR_OPTION_MATCH_IDS} - , {"match_names" , 0 , RRDR_OPTION_MATCH_NAMES} - , {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES} - , {"anomaly-bit" , 0 , RRDR_OPTION_ANOMALY_BIT} - , {"selected-tier" , 0 , RRDR_OPTION_SELECTED_TIER} - , {"raw" , 0 , RRDR_OPTION_RETURN_RAW} - , {"jw-anomaly-rates" , 0 , RRDR_OPTION_RETURN_JWAR} - , {"natural-points" , 0 , RRDR_OPTION_NATURAL_POINTS} - , {"virtual-points" , 0 , RRDR_OPTION_VIRTUAL_POINTS} - , {"all-dimensions" , 0 , RRDR_OPTION_ALL_DIMENSIONS} - , {"details" , 0 , RRDR_OPTION_SHOW_DETAILS} - , {"debug" , 0 , RRDR_OPTION_DEBUG} - , {"plan" , 0 , RRDR_OPTION_DEBUG} - , {"minify" , 0 , RRDR_OPTION_MINIFY} - , {"group-by-labels" , 0 , RRDR_OPTION_GROUP_BY_LABELS} - , {"label-quotes" , 0 , RRDR_OPTION_LABEL_QUOTES} - , {NULL , 0 , 0} -}; - -static struct { - const char *name; - uint32_t hash; - CONTEXTS_V2_OPTIONS value; -} contexts_v2_options[] = { - {"minify" , 0 , CONTEXT_V2_OPTION_MINIFY} - , {"debug" , 0 , CONTEXT_V2_OPTION_DEBUG} - , {"config" , 0 , CONTEXT_V2_OPTION_ALERTS_WITH_CONFIGURATIONS} - , {"instances" , 0 , CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES} - , {"values" , 0 , CONTEXT_V2_OPTION_ALERTS_WITH_VALUES} - , {"summary" , 0 , CONTEXT_V2_OPTION_ALERTS_WITH_SUMMARY} - , {NULL , 0 , 0} -}; - -static struct { - const char *name; - uint32_t hash; - CONTEXTS_V2_ALERT_STATUS value; -} contexts_v2_alert_status[] = { - {"uninitialized" , 0 , CONTEXT_V2_ALERT_UNINITIALIZED} - , {"undefined" , 0 , CONTEXT_V2_ALERT_UNDEFINED} - , {"clear" , 0 , CONTEXT_V2_ALERT_CLEAR} - , {"raised" , 0 , CONTEXT_V2_ALERT_RAISED} - , {"active" , 0 , CONTEXT_V2_ALERT_RAISED} - , {"warning" , 0 , CONTEXT_V2_ALERT_WARNING} - , {"critical" , 0 , CONTEXT_V2_ALERT_CRITICAL} - , {NULL , 0 , 0} -}; - -static struct { - const char *name; - uint32_t hash; - DATASOURCE_FORMAT value; -} api_v1_data_formats[] = { - { DATASOURCE_FORMAT_DATATABLE_JSON , 0 , DATASOURCE_DATATABLE_JSON} - , {DATASOURCE_FORMAT_DATATABLE_JSONP, 0 , DATASOURCE_DATATABLE_JSONP} - , {DATASOURCE_FORMAT_JSON , 0 , DATASOURCE_JSON} - , {DATASOURCE_FORMAT_JSON2 , 0 , DATASOURCE_JSON2} - , {DATASOURCE_FORMAT_JSONP , 0 , DATASOURCE_JSONP} - , {DATASOURCE_FORMAT_SSV , 0 , DATASOURCE_SSV} - , {DATASOURCE_FORMAT_CSV , 0 , DATASOURCE_CSV} - , {DATASOURCE_FORMAT_TSV , 0 , DATASOURCE_TSV} - , {"tsv-excel" , 0 , DATASOURCE_TSV} - , {DATASOURCE_FORMAT_HTML , 0 , DATASOURCE_HTML} - , {DATASOURCE_FORMAT_JS_ARRAY , 0 , DATASOURCE_JS_ARRAY} - , {DATASOURCE_FORMAT_SSV_COMMA , 0 , DATASOURCE_SSV_COMMA} - , {DATASOURCE_FORMAT_CSV_JSON_ARRAY , 0 , DATASOURCE_CSV_JSON_ARRAY} - , {DATASOURCE_FORMAT_CSV_MARKDOWN , 0 , DATASOURCE_CSV_MARKDOWN} - - // terminator - , {NULL, 0, 0} -}; - -static struct { - const char *name; - uint32_t hash; - DATASOURCE_FORMAT value; -} api_v1_data_google_formats[] = { - // this is not an error - when Google requests json, it expects javascript - // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source#responseformat - {"json", 0, DATASOURCE_DATATABLE_JSONP} - , {"html", 0, DATASOURCE_HTML} - , {"csv", 0, DATASOURCE_CSV} - , {"tsv-excel", 0, DATASOURCE_TSV} - - // terminator - , {NULL, 0, 0} -}; - -void web_client_api_v1_init(void) { - int i; - - for(i = 0; contexts_v2_alert_status[i].name ; i++) - contexts_v2_alert_status[i].hash = simple_hash(contexts_v2_alert_status[i].name); - - for(i = 0; rrdr_options[i].name ; i++) - rrdr_options[i].hash = simple_hash(rrdr_options[i].name); - - for(i = 0; contexts_v2_options[i].name ; i++) - contexts_v2_options[i].hash = simple_hash(contexts_v2_options[i].name); - - for(i = 0; api_v1_data_formats[i].name ; i++) - api_v1_data_formats[i].hash = simple_hash(api_v1_data_formats[i].name); - - for(i = 0; api_v1_data_google_formats[i].name ; i++) - api_v1_data_google_formats[i].hash = simple_hash(api_v1_data_google_formats[i].name); - - time_grouping_init(); - - nd_uuid_t uuid; - - // generate - uuid_generate(uuid); - - // unparse (to string) - char uuid_str[37]; - uuid_unparse_lower(uuid, uuid_str); -} - -char *get_mgmt_api_key(void) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/netdata.api.key", netdata_configured_varlib_dir); - char *api_key_filename=config_get(CONFIG_SECTION_REGISTRY, "netdata management api key file", filename); - static char guid[GUID_LEN + 1] = ""; - - if(likely(guid[0])) - return guid; - - // read it from disk - int fd = open(api_key_filename, O_RDONLY | O_CLOEXEC); - if(fd != -1) { - char buf[GUID_LEN + 1]; - if(read(fd, buf, GUID_LEN) != GUID_LEN) - netdata_log_error("Failed to read management API key from '%s'", api_key_filename); - else { - buf[GUID_LEN] = '\0'; - if(regenerate_guid(buf, guid) == -1) { - netdata_log_error("Failed to validate management API key '%s' from '%s'.", - buf, api_key_filename); - - guid[0] = '\0'; - } - } - close(fd); - } - - // generate a new one? - if(!guid[0]) { - nd_uuid_t uuid; - - uuid_generate_time(uuid); - uuid_unparse_lower(uuid, guid); - guid[GUID_LEN] = '\0'; - - // save it - fd = open(api_key_filename, O_WRONLY|O_CREAT|O_TRUNC | O_CLOEXEC, 444); - if(fd == -1) { - netdata_log_error("Cannot create unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file.", api_key_filename); - goto temp_key; - } - - if(write(fd, guid, GUID_LEN) != GUID_LEN) { - netdata_log_error("Cannot write the unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file with enough space left.", api_key_filename); - close(fd); - goto temp_key; - } - - close(fd); - } - - return guid; - -temp_key: - netdata_log_info("You can still continue to use the alarm management API using the authorization token %s during this Netdata session only.", guid); - return guid; -} - -void web_client_api_v1_management_init(void) { - api_secret = get_mgmt_api_key(); -} - -inline RRDR_OPTIONS rrdr_options_parse_one(const char *o) { - RRDR_OPTIONS ret = 0; - - if(!o || !*o) return ret; - - uint32_t hash = simple_hash(o); - int i; - for(i = 0; rrdr_options[i].name ; i++) { - if (unlikely(hash == rrdr_options[i].hash && !strcmp(o, rrdr_options[i].name))) { - ret |= rrdr_options[i].value; - break; - } - } - - return ret; -} - -inline RRDR_OPTIONS rrdr_options_parse(char *o) { - RRDR_OPTIONS ret = 0; - char *tok; - - while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) { - if(!*tok) continue; - ret |= rrdr_options_parse_one(tok); - } - - return ret; -} - -inline CONTEXTS_V2_OPTIONS web_client_api_request_v2_context_options(char *o) { - CONTEXTS_V2_OPTIONS ret = 0; - char *tok; - - while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) { - if(!*tok) continue; - - uint32_t hash = simple_hash(tok); - int i; - for(i = 0; contexts_v2_options[i].name ; i++) { - if (unlikely(hash == contexts_v2_options[i].hash && !strcmp(tok, contexts_v2_options[i].name))) { - ret |= contexts_v2_options[i].value; - break; - } - } - } - - return ret; -} - -inline CONTEXTS_V2_ALERT_STATUS web_client_api_request_v2_alert_status(char *o) { - CONTEXTS_V2_ALERT_STATUS ret = 0; - char *tok; - - while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) { - if(!*tok) continue; - - uint32_t hash = simple_hash(tok); - int i; - for(i = 0; contexts_v2_alert_status[i].name ; i++) { - if (unlikely(hash == contexts_v2_alert_status[i].hash && !strcmp(tok, contexts_v2_alert_status[i].name))) { - ret |= contexts_v2_alert_status[i].value; - break; - } - } - } - - return ret; -} - -void web_client_api_request_v2_contexts_alerts_status_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_V2_ALERT_STATUS options) { - buffer_json_member_add_array(wb, key); - - RRDR_OPTIONS used = 0; // to prevent adding duplicates - for(int i = 0; contexts_v2_alert_status[i].name ; i++) { - if (unlikely((contexts_v2_alert_status[i].value & options) && !(contexts_v2_alert_status[i].value & used))) { - const char *name = contexts_v2_alert_status[i].name; - used |= contexts_v2_alert_status[i].value; - - buffer_json_add_array_item_string(wb, name); - } - } - - buffer_json_array_close(wb); -} - -void web_client_api_request_v2_contexts_options_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_V2_OPTIONS options) { - buffer_json_member_add_array(wb, key); - - RRDR_OPTIONS used = 0; // to prevent adding duplicates - for(int i = 0; contexts_v2_options[i].name ; i++) { - if (unlikely((contexts_v2_options[i].value & options) && !(contexts_v2_options[i].value & used))) { - const char *name = contexts_v2_options[i].name; - used |= contexts_v2_options[i].value; - - buffer_json_add_array_item_string(wb, name); - } - } - - buffer_json_array_close(wb); -} - -void rrdr_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options) { - buffer_json_member_add_array(wb, key); - - RRDR_OPTIONS used = 0; // to prevent adding duplicates - for(int i = 0; rrdr_options[i].name ; i++) { - if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) { - const char *name = rrdr_options[i].name; - used |= rrdr_options[i].value; - - buffer_json_add_array_item_string(wb, name); - } - } - - buffer_json_array_close(wb); -} - -void rrdr_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options) { - RRDR_OPTIONS used = 0; // to prevent adding duplicates - size_t added = 0; - for(int i = 0; rrdr_options[i].name ; i++) { - if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) { - const char *name = rrdr_options[i].name; - used |= rrdr_options[i].value; - - if(added++) buffer_strcat(wb, " "); - buffer_strcat(wb, name); - } - } -} - -void web_client_api_request_v1_data_options_to_string(char *buf, size_t size, RRDR_OPTIONS options) { - char *write = buf; - char *end = &buf[size - 1]; - - RRDR_OPTIONS used = 0; // to prevent adding duplicates - int added = 0; - for(int i = 0; rrdr_options[i].name ; i++) { - if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) { - const char *name = rrdr_options[i].name; - used |= rrdr_options[i].value; - - if(added && write < end) - *write++ = ','; - - while(*name && write < end) - *write++ = *name++; - - added++; - } - } - *write = *end = '\0'; -} - -inline uint32_t web_client_api_request_v1_data_format(char *name) { - uint32_t hash = simple_hash(name); - int i; - - for(i = 0; api_v1_data_formats[i].name ; i++) { - if (unlikely(hash == api_v1_data_formats[i].hash && !strcmp(name, api_v1_data_formats[i].name))) { - return api_v1_data_formats[i].value; - } - } - - return DATASOURCE_JSON; -} - -inline uint32_t web_client_api_request_v1_data_google_format(char *name) { - uint32_t hash = simple_hash(name); - int i; - - for(i = 0; api_v1_data_google_formats[i].name ; i++) { - if (unlikely(hash == api_v1_data_google_formats[i].hash && !strcmp(name, api_v1_data_google_formats[i].name))) { - return api_v1_data_google_formats[i].value; - } - } - - return DATASOURCE_JSON; -} - -int web_client_api_request_v1_alarms_select (char *url) { - int all = 0; - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - if(!strcmp(value, "all") || !strcmp(value, "all=true")) all = 1; - else if(!strcmp(value, "active") || !strcmp(value, "active=true")) all = 0; - } - - return all; -} - -inline int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url) { - int all = web_client_api_request_v1_alarms_select(url); - - buffer_flush(w->response.data); - w->response.data->content_type = CT_APPLICATION_JSON; - health_alarms2json(host, w->response.data, all); - buffer_no_cacheable(w->response.data); - return HTTP_RESP_OK; -} - -inline int web_client_api_request_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url) { - int all = web_client_api_request_v1_alarms_select(url); - - buffer_flush(w->response.data); - w->response.data->content_type = CT_APPLICATION_JSON; - health_alarms_values2json(host, w->response.data, all); - buffer_no_cacheable(w->response.data); - return HTTP_RESP_OK; -} - -inline int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url) { - RRDCALC_STATUS status = RRDCALC_STATUS_RAISED; - BUFFER *contexts = NULL; - - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, "["); - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 alarm_count query param '%s' with value '%s'", w->id, name, value); - - char* p = value; - if(!strcmp(name, "status")) { - while ((*p = toupper(*p))) p++; - if (!strcmp("CRITICAL", value)) status = RRDCALC_STATUS_CRITICAL; - else if (!strcmp("WARNING", value)) status = RRDCALC_STATUS_WARNING; - else if (!strcmp("UNINITIALIZED", value)) status = RRDCALC_STATUS_UNINITIALIZED; - else if (!strcmp("UNDEFINED", value)) status = RRDCALC_STATUS_UNDEFINED; - else if (!strcmp("REMOVED", value)) status = RRDCALC_STATUS_REMOVED; - else if (!strcmp("CLEAR", value)) status = RRDCALC_STATUS_CLEAR; - } - else if(!strcmp(name, "context") || !strcmp(name, "ctx")) { - if(!contexts) contexts = buffer_create(255, &netdata_buffers_statistics.buffers_api); - buffer_strcat(contexts, "|"); - buffer_strcat(contexts, value); - } - } - - health_aggregate_alarms(host, w->response.data, contexts, status); - - buffer_sprintf(w->response.data, "]\n"); - w->response.data->content_type = CT_APPLICATION_JSON; - buffer_no_cacheable(w->response.data); - - buffer_free(contexts); - return 200; -} - -inline int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url) { - time_t after = 0; - char *chart = NULL; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - if (!strcmp(name, "after")) after = (time_t) strtoul(value, NULL, 0); - else if (!strcmp(name, "chart")) chart = value; - } - - buffer_flush(w->response.data); - w->response.data->content_type = CT_APPLICATION_JSON; - sql_health_alarm_log2json(host, w->response.data, after, chart); - return HTTP_RESP_OK; -} - -inline int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)) { - int ret = HTTP_RESP_BAD_REQUEST; - char *chart = NULL; - - buffer_flush(w->response.data); - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "chart")) chart = value; - //else { - /// buffer_sprintf(w->response.data, "Unknown parameter '%s' in request.", name); - // goto cleanup; - //} - } - - if(!chart || !*chart) { - buffer_sprintf(w->response.data, "No chart id is given at the request."); - goto cleanup; - } - - RRDSET *st = rrdset_find(host, chart); - if(!st) st = rrdset_find_byname(host, chart); - if(!st) { - buffer_strcat(w->response.data, "Chart is not found: "); - buffer_strcat_htmlescape(w->response.data, chart); - ret = HTTP_RESP_NOT_FOUND; - goto cleanup; - } - - w->response.data->content_type = CT_APPLICATION_JSON; - st->last_accessed_time_s = now_realtime_sec(); - callback(st, w->response.data); - return HTTP_RESP_OK; - - cleanup: - return ret; -} - -static inline int web_client_api_request_variable(RRDHOST *host, struct web_client *w, char *url) { - int ret = HTTP_RESP_BAD_REQUEST; - char *chart = NULL; - char *variable = NULL; - - buffer_flush(w->response.data); - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "chart")) chart = value; - else if(!strcmp(name, "variable")) variable = value; - } - - if(!chart || !*chart || !variable || !*variable) { - buffer_sprintf(w->response.data, "A chart= and a variable= are required."); - goto cleanup; - } - - RRDSET *st = rrdset_find(host, chart); - if(!st) st = rrdset_find_byname(host, chart); - if(!st) { - buffer_strcat(w->response.data, "Chart is not found: "); - buffer_strcat_htmlescape(w->response.data, chart); - ret = HTTP_RESP_NOT_FOUND; - goto cleanup; - } - - w->response.data->content_type = CT_APPLICATION_JSON; - st->last_accessed_time_s = now_realtime_sec(); - alert_variable_lookup_trace(host, st, variable, w->response.data); - - return HTTP_RESP_OK; - -cleanup: - return ret; -} - -inline int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_request_single_chart(host, w, url, health_api_v1_chart_variables2json); -} - -static int web_client_api_request_v1_context(RRDHOST *host, struct web_client *w, char *url) { - char *context = NULL; - RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE; - time_t after = 0, before = 0; - const char *chart_label_key = NULL, *chart_labels_filter = NULL; - BUFFER *dimensions = NULL; - - buffer_flush(w->response.data); - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "context") || !strcmp(name, "ctx")) context = value; - else if(!strcmp(name, "after")) after = str2l(value); - else if(!strcmp(name, "before")) before = str2l(value); - else if(!strcmp(name, "options")) options = rrdcontext_to_json_parse_options(value); - else if(!strcmp(name, "chart_label_key")) chart_label_key = value; - else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value; - else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { - if(!dimensions) dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); - buffer_strcat(dimensions, "|"); - buffer_strcat(dimensions, value); - } - } - - if(!context || !*context) { - buffer_sprintf(w->response.data, "No context is given at the request."); - return HTTP_RESP_BAD_REQUEST; - } - - SIMPLE_PATTERN *chart_label_key_pattern = NULL; - SIMPLE_PATTERN *chart_labels_filter_pattern = NULL; - SIMPLE_PATTERN *chart_dimensions_pattern = NULL; - - if(chart_label_key) - chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, true); - - if(chart_labels_filter) - chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, - true); - - if(dimensions) { - chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", - SIMPLE_PATTERN_EXACT, true); - buffer_free(dimensions); - } - - w->response.data->content_type = CT_APPLICATION_JSON; - int ret = rrdcontext_to_json(host, w->response.data, after, before, options, context, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern); - - simple_pattern_free(chart_label_key_pattern); - simple_pattern_free(chart_labels_filter_pattern); - simple_pattern_free(chart_dimensions_pattern); - - return ret; -} - -static int web_client_api_request_v1_contexts(RRDHOST *host, struct web_client *w, char *url) { - RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE; - time_t after = 0, before = 0; - const char *chart_label_key = NULL, *chart_labels_filter = NULL; - BUFFER *dimensions = NULL; - - buffer_flush(w->response.data); - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "after")) after = str2l(value); - else if(!strcmp(name, "before")) before = str2l(value); - else if(!strcmp(name, "options")) options = rrdcontext_to_json_parse_options(value); - else if(!strcmp(name, "chart_label_key")) chart_label_key = value; - else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value; - else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { - if(!dimensions) dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); - buffer_strcat(dimensions, "|"); - buffer_strcat(dimensions, value); - } - } - - SIMPLE_PATTERN *chart_label_key_pattern = NULL; - SIMPLE_PATTERN *chart_labels_filter_pattern = NULL; - SIMPLE_PATTERN *chart_dimensions_pattern = NULL; - - if(chart_label_key) - chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, true); - - if(chart_labels_filter) - chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, - true); - - if(dimensions) { - chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", - SIMPLE_PATTERN_EXACT, true); - buffer_free(dimensions); - } - - w->response.data->content_type = CT_APPLICATION_JSON; - int ret = rrdcontexts_to_json(host, w->response.data, after, before, options, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern); - - simple_pattern_free(chart_label_key_pattern); - simple_pattern_free(chart_labels_filter_pattern); - simple_pattern_free(chart_dimensions_pattern); - - return ret; -} - -inline int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url) { - (void)url; - - buffer_flush(w->response.data); - w->response.data->content_type = CT_APPLICATION_JSON; - charts2json(host, w->response.data); - return HTTP_RESP_OK; -} - -inline int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_request_single_chart(host, w, url, rrd_stats_api_v1_chart); -} - -// returns the HTTP code -static inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url) { - netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 data with URL '%s'", w->id, url); - - int ret = HTTP_RESP_BAD_REQUEST; - BUFFER *dimensions = NULL; - - buffer_flush(w->response.data); - - char *google_version = "0.6", - *google_reqId = "0", - *google_sig = "0", - *google_out = "json", - *responseHandler = NULL, - *outFileName = NULL; - - time_t last_timestamp_in_data = 0, google_timestamp = 0; - - char *chart = NULL; - char *before_str = NULL; - char *after_str = NULL; - char *group_time_str = NULL; - char *points_str = NULL; - char *timeout_str = NULL; - char *context = NULL; - char *chart_label_key = NULL; - char *chart_labels_filter = NULL; - char *group_options = NULL; - size_t tier = 0; - RRDR_TIME_GROUPING group = RRDR_GROUPING_AVERAGE; - DATASOURCE_FORMAT format = DATASOURCE_JSON; - RRDR_OPTIONS options = 0; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 data query param '%s' with value '%s'", w->id, name, value); - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "context")) context = value; - else if(!strcmp(name, "chart_label_key")) chart_label_key = value; - else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value; - else if(!strcmp(name, "chart")) chart = value; - else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { - if(!dimensions) dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); - buffer_strcat(dimensions, "|"); - buffer_strcat(dimensions, value); - } - else if(!strcmp(name, "show_dimensions")) options |= RRDR_OPTION_ALL_DIMENSIONS; - else if(!strcmp(name, "after")) after_str = value; - else if(!strcmp(name, "before")) before_str = value; - else if(!strcmp(name, "points")) points_str = value; - else if(!strcmp(name, "timeout")) timeout_str = value; - else if(!strcmp(name, "gtime")) group_time_str = value; - else if(!strcmp(name, "group_options")) group_options = value; - else if(!strcmp(name, "group")) { - group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); - } - else if(!strcmp(name, "format")) { - format = web_client_api_request_v1_data_format(value); - } - else if(!strcmp(name, "options")) { - options |= rrdr_options_parse(value); - } - else if(!strcmp(name, "callback")) { - responseHandler = value; - } - else if(!strcmp(name, "filename")) { - outFileName = value; - } - else if(!strcmp(name, "tqx")) { - // parse Google Visualization API options - // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source - char *tqx_name, *tqx_value; - - while(value) { - tqx_value = strsep_skip_consecutive_separators(&value, ";"); - if(!tqx_value || !*tqx_value) continue; - - tqx_name = strsep_skip_consecutive_separators(&tqx_value, ":"); - if(!tqx_name || !*tqx_name) continue; - if(!tqx_value || !*tqx_value) continue; - - if(!strcmp(tqx_name, "version")) - google_version = tqx_value; - else if(!strcmp(tqx_name, "reqId")) - google_reqId = tqx_value; - else if(!strcmp(tqx_name, "sig")) { - google_sig = tqx_value; - google_timestamp = strtoul(google_sig, NULL, 0); - } - else if(!strcmp(tqx_name, "out")) { - google_out = tqx_value; - format = web_client_api_request_v1_data_google_format(google_out); - } - else if(!strcmp(tqx_name, "responseHandler")) - responseHandler = tqx_value; - else if(!strcmp(tqx_name, "outFileName")) - outFileName = tqx_value; - } - } - else if(!strcmp(name, "tier")) { - tier = str2ul(value); - if(tier < storage_tiers) - options |= RRDR_OPTION_SELECTED_TIER; - else - tier = 0; - } - } - - // validate the google parameters given - fix_google_param(google_out); - fix_google_param(google_sig); - fix_google_param(google_reqId); - fix_google_param(google_version); - fix_google_param(responseHandler); - fix_google_param(outFileName); - - RRDSET *st = NULL; - ONEWAYALLOC *owa = onewayalloc_create(0); - QUERY_TARGET *qt = NULL; - - if(!is_valid_sp(chart) && !is_valid_sp(context)) { - buffer_sprintf(w->response.data, "No chart or context is given."); - goto cleanup; - } - - if(chart && !context) { - // check if this is a specific chart - st = rrdset_find(host, chart); - if (!st) st = rrdset_find_byname(host, chart); - } - - long long before = (before_str && *before_str)?str2l(before_str):0; - long long after = (after_str && *after_str) ?str2l(after_str):-600; - int points = (points_str && *points_str)?str2i(points_str):0; - int timeout = (timeout_str && *timeout_str)?str2i(timeout_str): 0; - long group_time = (group_time_str && *group_time_str)?str2l(group_time_str):0; - - QUERY_TARGET_REQUEST qtr = { - .version = 1, - .after = after, - .before = before, - .host = host, - .st = st, - .nodes = NULL, - .contexts = context, - .instances = chart, - .dimensions = (dimensions)?buffer_tostring(dimensions):NULL, - .timeout_ms = timeout, - .points = points, - .format = format, - .options = options, - .time_group_method = group, - .time_group_options = group_options, - .resampling_time = group_time, - .tier = tier, - .chart_label_key = chart_label_key, - .labels = chart_labels_filter, - .query_source = QUERY_SOURCE_API_DATA, - .priority = STORAGE_PRIORITY_NORMAL, - .interrupt_callback = web_client_interrupt_callback, - .interrupt_callback_data = w, - .transaction = &w->transaction, - }; - qt = query_target_create(&qtr); - - if(!qt || !qt->query.used) { - buffer_sprintf(w->response.data, "No metrics where matched to query."); - ret = HTTP_RESP_NOT_FOUND; - goto cleanup; - } - - web_client_timeout_checkpoint_set(w, timeout); - if(web_client_timeout_checkpoint_and_check(w, NULL)) { - ret = w->response.code; - goto cleanup; - } - - if(outFileName && *outFileName) { - buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName); - netdata_log_debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName); - } - - if(format == DATASOURCE_DATATABLE_JSONP) { - if(responseHandler == NULL) - responseHandler = "google.visualization.Query.setResponse"; - - netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'", - w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName - ); - - buffer_sprintf( - w->response.data, - "%s({version:'%s',reqId:'%s',status:'ok',sig:'%"PRId64"',table:", - responseHandler, - google_version, - google_reqId, - (int64_t)(st ? st->last_updated.tv_sec : 0)); - } - else if(format == DATASOURCE_JSONP) { - if(responseHandler == NULL) - responseHandler = "callback"; - - buffer_strcat(w->response.data, responseHandler); - buffer_strcat(w->response.data, "("); - } - - ret = data_query_execute(owa, w->response.data, qt, &last_timestamp_in_data); - - if(format == DATASOURCE_DATATABLE_JSONP) { - if(google_timestamp < last_timestamp_in_data) - buffer_strcat(w->response.data, "});"); - - else { - // the client already has the latest data - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, - "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});", - responseHandler, google_version, google_reqId); - } - } - else if(format == DATASOURCE_JSONP) - buffer_strcat(w->response.data, ");"); - - if(qt->internal.relative) - buffer_no_cacheable(w->response.data); - else - buffer_cacheable(w->response.data); - -cleanup: - query_target_release(qt); - onewayalloc_destroy(owa); - buffer_free(dimensions); - return ret; -} - -// Pings a netdata server: -// /api/v1/registry?action=hello -// -// Access to a netdata registry: -// /api/v1/registry?action=access&machine=${machine_guid}&name=${hostname}&url=${url} -// -// Delete from a netdata registry: -// /api/v1/registry?action=delete&machine=${machine_guid}&name=${hostname}&url=${url}&delete_url=${delete_url} -// -// Search for the URLs of a machine: -// /api/v1/registry?action=search&for=${machine_guid} -// -// Impersonate: -// /api/v1/registry?action=switch&machine=${machine_guid}&name=${hostname}&url=${url}&to=${new_person_guid} -inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url) { - static uint32_t hash_action = 0, hash_access = 0, hash_hello = 0, hash_delete = 0, hash_search = 0, - hash_switch = 0, hash_machine = 0, hash_url = 0, hash_name = 0, hash_delete_url = 0, hash_for = 0, - hash_to = 0 /*, hash_redirects = 0 */; - - if(unlikely(!hash_action)) { - hash_action = simple_hash("action"); - hash_access = simple_hash("access"); - hash_hello = simple_hash("hello"); - hash_delete = simple_hash("delete"); - hash_search = simple_hash("search"); - hash_switch = simple_hash("switch"); - hash_machine = simple_hash("machine"); - hash_url = simple_hash("url"); - hash_name = simple_hash("name"); - hash_delete_url = simple_hash("delete_url"); - hash_for = simple_hash("for"); - hash_to = simple_hash("to"); -/* - hash_redirects = simple_hash("redirects"); -*/ - } - - netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 registry with URL '%s'", w->id, url); - - // TODO - // The browser may send multiple cookies with our id - - char person_guid[UUID_STR_LEN] = ""; - char *cookie = strstr(w->response.data->buffer, NETDATA_REGISTRY_COOKIE_NAME "="); - if(cookie) - strncpyz(person_guid, &cookie[sizeof(NETDATA_REGISTRY_COOKIE_NAME)], UUID_STR_LEN - 1); - else if(!extract_bearer_token_from_request(w, person_guid, sizeof(person_guid))) - person_guid[0] = '\0'; - - char action = '\0'; - char *machine_guid = NULL, - *machine_url = NULL, - *url_name = NULL, - *search_machine_guid = NULL, - *delete_url = NULL, - *to_person_guid = NULL; -/* - int redirects = 0; -*/ - - // Don't cache registry responses - buffer_no_cacheable(w->response.data); - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) continue; - if (!value || !*value) continue; - - netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 registry query param '%s' with value '%s'", w->id, name, value); - - uint32_t hash = simple_hash(name); - - if(hash == hash_action && !strcmp(name, "action")) { - uint32_t vhash = simple_hash(value); - - if(vhash == hash_access && !strcmp(value, "access")) action = 'A'; - else if(vhash == hash_hello && !strcmp(value, "hello")) action = 'H'; - else if(vhash == hash_delete && !strcmp(value, "delete")) action = 'D'; - else if(vhash == hash_search && !strcmp(value, "search")) action = 'S'; - else if(vhash == hash_switch && !strcmp(value, "switch")) action = 'W'; -#ifdef NETDATA_INTERNAL_CHECKS - else netdata_log_error("unknown registry action '%s'", value); -#endif /* NETDATA_INTERNAL_CHECKS */ - } -/* - else if(hash == hash_redirects && !strcmp(name, "redirects")) - redirects = atoi(value); -*/ - else if(hash == hash_machine && !strcmp(name, "machine")) - machine_guid = value; - - else if(hash == hash_url && !strcmp(name, "url")) - machine_url = value; - - else if(action == 'A') { - if(hash == hash_name && !strcmp(name, "name")) - url_name = value; - } - else if(action == 'D') { - if(hash == hash_delete_url && !strcmp(name, "delete_url")) - delete_url = value; - } - else if(action == 'S') { - if(hash == hash_for && !strcmp(name, "for")) - search_machine_guid = value; - } - else if(action == 'W') { - if(hash == hash_to && !strcmp(name, "to")) - to_person_guid = value; - } -#ifdef NETDATA_INTERNAL_CHECKS - else netdata_log_error("unused registry URL parameter '%s' with value '%s'", name, value); -#endif /* NETDATA_INTERNAL_CHECKS */ - } - - bool do_not_track = respect_web_browser_do_not_track_policy && web_client_has_donottrack(w); - - if(unlikely(action == 'H')) { - // HELLO request, dashboard ACL - analytics_log_dashboard(); - if(unlikely(!http_can_access_dashboard(w))) - return web_client_permission_denied_acl(w); - } - else { - // everything else, registry ACL - if(unlikely(!http_can_access_registry(w))) - return web_client_permission_denied_acl(w); - - if(unlikely(do_not_track)) { - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, "Your web browser is sending 'DNT: 1' (Do Not Track). The registry requires persistent cookies on your browser to work."); - return HTTP_RESP_BAD_REQUEST; - } - } - - buffer_no_cacheable(w->response.data); - - switch(action) { - case 'A': - if(unlikely(!machine_guid || !machine_url || !url_name)) { - netdata_log_error("Invalid registry request - access requires these parameters: machine ('%s'), url ('%s'), name ('%s')", machine_guid ? machine_guid : "UNSET", machine_url ? machine_url : "UNSET", url_name ? url_name : "UNSET"); - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry Access request."); - return HTTP_RESP_BAD_REQUEST; - } - - web_client_enable_tracking_required(w); - return registry_request_access_json(host, w, person_guid, machine_guid, machine_url, url_name, now_realtime_sec()); - - case 'D': - if(unlikely(!machine_guid || !machine_url || !delete_url)) { - netdata_log_error("Invalid registry request - delete requires these parameters: machine ('%s'), url ('%s'), delete_url ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", delete_url?delete_url:"UNSET"); - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry Delete request."); - return HTTP_RESP_BAD_REQUEST; - } - - web_client_enable_tracking_required(w); - return registry_request_delete_json(host, w, person_guid, machine_guid, machine_url, delete_url, now_realtime_sec()); - - case 'S': - if(unlikely(!search_machine_guid)) { - netdata_log_error("Invalid registry request - search requires these parameters: for ('%s')", search_machine_guid?search_machine_guid:"UNSET"); - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry Search request."); - return HTTP_RESP_BAD_REQUEST; - } - - web_client_enable_tracking_required(w); - return registry_request_search_json(host, w, person_guid, search_machine_guid); - - case 'W': - if(unlikely(!machine_guid || !machine_url || !to_person_guid)) { - netdata_log_error("Invalid registry request - switching identity requires these parameters: machine ('%s'), url ('%s'), to ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", to_person_guid?to_person_guid:"UNSET"); - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry Switch request."); - return HTTP_RESP_BAD_REQUEST; - } - - web_client_enable_tracking_required(w); - return registry_request_switch_json(host, w, person_guid, machine_guid, machine_url, to_person_guid, now_realtime_sec()); - - case 'H': - return registry_request_hello_json(host, w, do_not_track); - - default: - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry request - you need to set an action: hello, access, delete, search"); - return HTTP_RESP_BAD_REQUEST; - } -} - -void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb, const char *key) { - buffer_json_member_add_object(wb, key); - - size_t normal = 0, warning = 0, critical = 0; - RRDCALC *rc; - foreach_rrdcalc_in_rrdhost_read(host, rc) { - if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec)) - continue; - - switch(rc->status) { - case RRDCALC_STATUS_WARNING: - warning++; - break; - case RRDCALC_STATUS_CRITICAL: - critical++; - break; - default: - normal++; - } - } - foreach_rrdcalc_in_rrdhost_done(rc); - - buffer_json_member_add_uint64(wb, "normal", normal); - buffer_json_member_add_uint64(wb, "warning", warning); - buffer_json_member_add_uint64(wb, "critical", critical); - - buffer_json_object_close(wb); -} - -static inline void web_client_api_request_v1_info_mirrored_hosts_status(BUFFER *wb, RRDHOST *host) { - buffer_json_add_array_item_object(wb); - - buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host)); - buffer_json_member_add_uint64(wb, "hops", host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1); - buffer_json_member_add_boolean(wb, "reachable", (host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))); - - buffer_json_member_add_string(wb, "guid", host->machine_guid); - buffer_json_member_add_uuid(wb, "node_id", host->node_id); - rrdhost_aclk_state_lock(host); - buffer_json_member_add_string(wb, "claim_id", host->aclk_state.claimed_id); - rrdhost_aclk_state_unlock(host); - - buffer_json_object_close(wb); -} - -static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) { - RRDHOST *host; - - rrd_rdlock(); - - buffer_json_member_add_array(wb, "mirrored_hosts"); - rrdhost_foreach_read(host) - buffer_json_add_array_item_string(wb, rrdhost_hostname(host)); - buffer_json_array_close(wb); - - buffer_json_member_add_array(wb, "mirrored_hosts_status"); - rrdhost_foreach_read(host) { - if ((host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) { - web_client_api_request_v1_info_mirrored_hosts_status(wb, host); - } - } - rrdhost_foreach_read(host) { - if ((host != localhost && rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) { - web_client_api_request_v1_info_mirrored_hosts_status(wb, host); - } - } - buffer_json_array_close(wb); - - rrd_rdunlock(); -} - -void host_labels2json(RRDHOST *host, BUFFER *wb, const char *key) { - buffer_json_member_add_object(wb, key); - rrdlabels_to_buffer_json_members(host->rrdlabels, wb); - buffer_json_object_close(wb); -} - -static void host_collectors(RRDHOST *host, BUFFER *wb) { - buffer_json_member_add_array(wb, "collectors"); - - DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE); - RRDSET *st; - char name[500]; - - time_t now = now_realtime_sec(); - - rrdset_foreach_read(st, host) { - if (!rrdset_is_available_for_viewers(st)) - continue; - - sprintf(name, "%s:%s", rrdset_plugin_name(st), rrdset_module_name(st)); - - bool old = 0; - bool *set = dictionary_set(dict, name, &old, sizeof(bool)); - if(!*set) { - *set = true; - st->last_accessed_time_s = now; - buffer_json_add_array_item_object(wb); - buffer_json_member_add_string(wb, "plugin", rrdset_plugin_name(st)); - buffer_json_member_add_string(wb, "module", rrdset_module_name(st)); - buffer_json_object_close(wb); - } - } - rrdset_foreach_done(st); - dictionary_destroy(dict); - - buffer_json_array_close(wb); -} - -extern int aclk_connected; -inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb) { - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - - buffer_json_member_add_string(wb, "version", rrdhost_program_version(host)); - buffer_json_member_add_string(wb, "uid", host->machine_guid); - - buffer_json_member_add_uint64(wb, "hosts-available", rrdhost_hosts_available()); - web_client_api_request_v1_info_mirrored_hosts(wb); - - web_client_api_request_v1_info_summary_alarm_statuses(host, wb, "alarms"); - - buffer_json_member_add_string_or_empty(wb, "os_name", host->system_info->host_os_name); - buffer_json_member_add_string_or_empty(wb, "os_id", host->system_info->host_os_id); - buffer_json_member_add_string_or_empty(wb, "os_id_like", host->system_info->host_os_id_like); - buffer_json_member_add_string_or_empty(wb, "os_version", host->system_info->host_os_version); - buffer_json_member_add_string_or_empty(wb, "os_version_id", host->system_info->host_os_version_id); - buffer_json_member_add_string_or_empty(wb, "os_detection", host->system_info->host_os_detection); - buffer_json_member_add_string_or_empty(wb, "cores_total", host->system_info->host_cores); - buffer_json_member_add_string_or_empty(wb, "total_disk_space", host->system_info->host_disk_space); - buffer_json_member_add_string_or_empty(wb, "cpu_freq", host->system_info->host_cpu_freq); - buffer_json_member_add_string_or_empty(wb, "ram_total", host->system_info->host_ram_total); - - buffer_json_member_add_string_or_omit(wb, "container_os_name", host->system_info->container_os_name); - buffer_json_member_add_string_or_omit(wb, "container_os_id", host->system_info->container_os_id); - buffer_json_member_add_string_or_omit(wb, "container_os_id_like", host->system_info->container_os_id_like); - buffer_json_member_add_string_or_omit(wb, "container_os_version", host->system_info->container_os_version); - buffer_json_member_add_string_or_omit(wb, "container_os_version_id", host->system_info->container_os_version_id); - buffer_json_member_add_string_or_omit(wb, "container_os_detection", host->system_info->container_os_detection); - buffer_json_member_add_string_or_omit(wb, "is_k8s_node", host->system_info->is_k8s_node); - - buffer_json_member_add_string_or_empty(wb, "kernel_name", host->system_info->kernel_name); - buffer_json_member_add_string_or_empty(wb, "kernel_version", host->system_info->kernel_version); - buffer_json_member_add_string_or_empty(wb, "architecture", host->system_info->architecture); - buffer_json_member_add_string_or_empty(wb, "virtualization", host->system_info->virtualization); - buffer_json_member_add_string_or_empty(wb, "virt_detection", host->system_info->virt_detection); - buffer_json_member_add_string_or_empty(wb, "container", host->system_info->container); - buffer_json_member_add_string_or_empty(wb, "container_detection", host->system_info->container_detection); - - buffer_json_member_add_string_or_omit(wb, "cloud_provider_type", host->system_info->cloud_provider_type); - buffer_json_member_add_string_or_omit(wb, "cloud_instance_type", host->system_info->cloud_instance_type); - buffer_json_member_add_string_or_omit(wb, "cloud_instance_region", host->system_info->cloud_instance_region); - - host_labels2json(host, wb, "host_labels"); - host_functions2json(host, wb); - host_collectors(host, wb); - - buffer_json_member_add_boolean(wb, "cloud-enabled", netdata_cloud_enabled); - -#ifdef ENABLE_ACLK - buffer_json_member_add_boolean(wb, "cloud-available", true); -#else - buffer_json_member_add_boolean(wb, "cloud-available", false); -#endif - - char *agent_id = get_agent_claimid(); - buffer_json_member_add_boolean(wb, "agent-claimed", agent_id != NULL); - freez(agent_id); - -#ifdef ENABLE_ACLK - buffer_json_member_add_boolean(wb, "aclk-available", aclk_connected); -#else - buffer_json_member_add_boolean(wb, "aclk-available", false); -#endif - - buffer_json_member_add_string(wb, "memory-mode", rrd_memory_mode_name(host->rrd_memory_mode)); -#ifdef ENABLE_DBENGINE - buffer_json_member_add_uint64(wb, "multidb-disk-quota", default_multidb_disk_quota_mb); - buffer_json_member_add_uint64(wb, "page-cache-size", default_rrdeng_page_cache_mb); -#endif // ENABLE_DBENGINE - buffer_json_member_add_boolean(wb, "web-enabled", web_server_mode != WEB_SERVER_MODE_NONE); - buffer_json_member_add_boolean(wb, "stream-enabled", default_rrdpush_enabled); - - buffer_json_member_add_boolean(wb, "stream-compression", - host->sender && host->sender->compressor.initialized); - -#ifdef ENABLE_HTTPS - buffer_json_member_add_boolean(wb, "https-enabled", true); -#else - buffer_json_member_add_boolean(wb, "https-enabled", false); -#endif - - buffer_json_member_add_quoted_string(wb, "buildinfo", analytics_data.netdata_buildinfo); - buffer_json_member_add_quoted_string(wb, "release-channel", analytics_data.netdata_config_release_channel); - buffer_json_member_add_quoted_string(wb, "notification-methods", analytics_data.netdata_notification_methods); - - buffer_json_member_add_boolean(wb, "exporting-enabled", analytics_data.exporting_enabled); - buffer_json_member_add_quoted_string(wb, "exporting-connectors", analytics_data.netdata_exporting_connectors); - - buffer_json_member_add_uint64(wb, "allmetrics-prometheus-used", analytics_data.prometheus_hits); - buffer_json_member_add_uint64(wb, "allmetrics-shell-used", analytics_data.shell_hits); - buffer_json_member_add_uint64(wb, "allmetrics-json-used", analytics_data.json_hits); - buffer_json_member_add_uint64(wb, "dashboard-used", analytics_data.dashboard_hits); - - buffer_json_member_add_uint64(wb, "charts-count", analytics_data.charts_count); - buffer_json_member_add_uint64(wb, "metrics-count", analytics_data.metrics_count); - -#if defined(ENABLE_ML) - buffer_json_member_add_object(wb, "ml-info"); - ml_host_get_info(host, wb); - buffer_json_object_close(wb); -#endif - - buffer_json_finalize(wb); - return 0; -} - -#if defined(ENABLE_ML) -int web_client_api_request_v1_ml_info(RRDHOST *host, struct web_client *w, char *url) { - (void) url; - - if (!netdata_ready) - return HTTP_RESP_SERVICE_UNAVAILABLE; - - BUFFER *wb = w->response.data; - buffer_flush(wb); - wb->content_type = CT_APPLICATION_JSON; - - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - ml_host_get_detection_info(host, wb); - buffer_json_finalize(wb); - - buffer_no_cacheable(wb); - - return HTTP_RESP_OK; -} -#endif // ENABLE_ML - -inline int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url) { - (void)url; - if (!netdata_ready) return HTTP_RESP_SERVICE_UNAVAILABLE; - BUFFER *wb = w->response.data; - buffer_flush(wb); - wb->content_type = CT_APPLICATION_JSON; - - web_client_api_request_v1_info_fill_buffer(host, wb); - - buffer_no_cacheable(wb); - return HTTP_RESP_OK; -} - -static int web_client_api_request_v1_aclk_state(RRDHOST *host, struct web_client *w, char *url) { - UNUSED(url); - UNUSED(host); - if (!netdata_ready) return HTTP_RESP_SERVICE_UNAVAILABLE; - - BUFFER *wb = w->response.data; - buffer_flush(wb); -#ifdef ENABLE_ACLK - char *str = aclk_state_json(); - buffer_strcat(wb, str); - freez(str); -#else - buffer_strcat(wb, "{\"aclk-available\":false}"); -#endif - - wb->content_type = CT_APPLICATION_JSON; - buffer_no_cacheable(wb); - return HTTP_RESP_OK; -} - -int web_client_api_request_v1_metric_correlations(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_request_weights(host, w, url, default_metric_correlations_method, WEIGHTS_FORMAT_CHARTS, 1); -} - -int web_client_api_request_v1_weights(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_ANOMALY_RATE, WEIGHTS_FORMAT_CONTEXTS, 1); -} - -static void web_client_progress_functions_update(void *data, size_t done, size_t all) { - // handle progress updates from the plugin - struct web_client *w = data; - query_progress_functions_update(&w->transaction, done, all); -} - -int web_client_api_request_v1_function(RRDHOST *host, struct web_client *w, char *url) { - if (!netdata_ready) - return HTTP_RESP_SERVICE_UNAVAILABLE; - - int timeout = 0; - const char *function = NULL; - - while (url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) - continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) - continue; - - if (!strcmp(name, "function")) - function = value; - - else if (!strcmp(name, "timeout")) - timeout = (int) strtoul(value, NULL, 0); - } - - BUFFER *wb = w->response.data; - buffer_flush(wb); - wb->content_type = CT_APPLICATION_JSON; - buffer_no_cacheable(wb); - - char transaction[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(w->transaction, transaction); - - CLEAN_BUFFER *source = buffer_create(100, NULL); - web_client_source2buffer(w, source); - - return rrd_function_run(host, wb, timeout, w->access, function, true, transaction, - NULL, NULL, - web_client_progress_functions_update, w, - web_client_interrupt_callback, w, NULL, - buffer_tostring(source)); -} - -int web_client_api_request_v1_functions(RRDHOST *host, struct web_client *w, char *url __maybe_unused) { - if (!netdata_ready) - return HTTP_RESP_SERVICE_UNAVAILABLE; - - BUFFER *wb = w->response.data; - buffer_flush(wb); - wb->content_type = CT_APPLICATION_JSON; - buffer_no_cacheable(wb); - - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - host_functions2json(host, wb); - buffer_json_finalize(wb); - - return HTTP_RESP_OK; -} - -void web_client_source2buffer(struct web_client *w, BUFFER *source) { - if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_CLOUD)) - buffer_sprintf(source, "method=NC"); - else if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_BEARER)) - buffer_sprintf(source, "method=api-bearer"); - else - buffer_sprintf(source, "method=api"); - - if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_GOD)) - buffer_strcat(source, ",role=god"); - else - buffer_sprintf(source, ",role=%s", http_id2user_role(w->user_role)); - - buffer_sprintf(source, ",permissions="HTTP_ACCESS_FORMAT, (HTTP_ACCESS_FORMAT_CAST)w->access); - - if(w->auth.client_name[0]) - buffer_sprintf(source, ",user=%s", w->auth.client_name); - - if(!uuid_is_null(w->auth.cloud_account_id)) { - char uuid_str[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(w->auth.cloud_account_id, uuid_str); - buffer_sprintf(source, ",account=%s", uuid_str); - } - - if(w->client_ip[0]) - buffer_sprintf(source, ",ip=%s", w->client_ip); - - if(w->forwarded_for) - buffer_sprintf(source, ",forwarded_for=%s", w->forwarded_for); -} - -static int web_client_api_request_v1_config(RRDHOST *host, struct web_client *w, char *url __maybe_unused) { - char *action = "tree"; - char *path = "/"; - char *id = NULL; - char *add_name = NULL; - int timeout = 120; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "action")) - action = value; - else if(!strcmp(name, "path")) - path = value; - else if(!strcmp(name, "id")) - id = value; - else if(!strcmp(name, "name")) - add_name = value; - else if(!strcmp(name, "timeout")) { - timeout = (int)strtol(value, NULL, 10); - if(timeout < 10) - timeout = 10; - } - } - - char transaction[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(w->transaction, transaction); - - size_t len = strlen(action) + (id ? strlen(id) : 0) + strlen(path) + (add_name ? strlen(add_name) : 0) + 100; - - char cmd[len]; - if(strcmp(action, "tree") == 0) - snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " tree '%s' '%s'", path, id?id:""); - else { - DYNCFG_CMDS c = dyncfg_cmds2id(action); - if(!id || !*id || !dyncfg_is_valid_id(id)) { - rrd_call_function_error(w->response.data, "invalid id given", HTTP_RESP_BAD_REQUEST); - return HTTP_RESP_BAD_REQUEST; - } - - if(c == DYNCFG_CMD_NONE) { - rrd_call_function_error(w->response.data, "invalid action given", HTTP_RESP_BAD_REQUEST); - return HTTP_RESP_BAD_REQUEST; - } - - if(c == DYNCFG_CMD_ADD || c == DYNCFG_CMD_USERCONFIG || c == DYNCFG_CMD_TEST) { - if(c == DYNCFG_CMD_TEST && (!add_name || !*add_name)) { - // backwards compatibility for TEST without a name - char *colon = strrchr(id, ':'); - if(colon) { - *colon = '\0'; - add_name = ++colon; - } - else - add_name = "test"; - } - - if(!add_name || !*add_name || !dyncfg_is_valid_id(add_name)) { - rrd_call_function_error(w->response.data, "invalid name given", HTTP_RESP_BAD_REQUEST); - return HTTP_RESP_BAD_REQUEST; - } - snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " %s %s %s", id, dyncfg_id2cmd_one(c), add_name); - } - else - snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " %s %s", id, dyncfg_id2cmd_one(c)); - } - - CLEAN_BUFFER *source = buffer_create(100, NULL); - web_client_source2buffer(w, source); - - buffer_flush(w->response.data); - int code = rrd_function_run(host, w->response.data, timeout, w->access, cmd, - true, transaction, - NULL, NULL, - web_client_progress_functions_update, w, - web_client_interrupt_callback, w, - w->payload, buffer_tostring(source)); - - return code; -} - -#ifndef ENABLE_DBENGINE -int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url __maybe_unused) { - return HTTP_RESP_NOT_FOUND; -} -#else -static void web_client_api_v1_dbengine_stats_for_tier(BUFFER *wb, size_t tier) { - RRDENG_SIZE_STATS stats = rrdeng_size_statistics(multidb_ctx[tier]); - - buffer_sprintf(wb, - "\n\t\t\"default_granularity_secs\":%zu" - ",\n\t\t\"sizeof_datafile\":%zu" - ",\n\t\t\"sizeof_page_in_cache\":%zu" - ",\n\t\t\"sizeof_point_data\":%zu" - ",\n\t\t\"sizeof_page_data\":%zu" - ",\n\t\t\"pages_per_extent\":%zu" - ",\n\t\t\"datafiles\":%zu" - ",\n\t\t\"extents\":%zu" - ",\n\t\t\"extents_pages\":%zu" - ",\n\t\t\"points\":%zu" - ",\n\t\t\"metrics\":%zu" - ",\n\t\t\"metrics_pages\":%zu" - ",\n\t\t\"extents_compressed_bytes\":%zu" - ",\n\t\t\"pages_uncompressed_bytes\":%zu" - ",\n\t\t\"pages_duration_secs\":%lld" - ",\n\t\t\"single_point_pages\":%zu" - ",\n\t\t\"first_t\":%ld" - ",\n\t\t\"last_t\":%ld" - ",\n\t\t\"database_retention_secs\":%lld" - ",\n\t\t\"average_compression_savings\":%0.2f" - ",\n\t\t\"average_point_duration_secs\":%0.2f" - ",\n\t\t\"average_metric_retention_secs\":%0.2f" - ",\n\t\t\"ephemeral_metrics_per_day_percent\":%0.2f" - ",\n\t\t\"average_page_size_bytes\":%0.2f" - ",\n\t\t\"estimated_concurrently_collected_metrics\":%zu" - ",\n\t\t\"currently_collected_metrics\":%zu" - ",\n\t\t\"disk_space\":%zu" - ",\n\t\t\"max_disk_space\":%zu" - , stats.default_granularity_secs - , stats.sizeof_datafile - , stats.sizeof_page_in_cache - , stats.sizeof_point_data - , stats.sizeof_page_data - , stats.pages_per_extent - , stats.datafiles - , stats.extents - , stats.extents_pages - , stats.points - , stats.metrics - , stats.metrics_pages - , stats.extents_compressed_bytes - , stats.pages_uncompressed_bytes - , (long long)stats.pages_duration_secs - , stats.single_point_pages - , stats.first_time_s - , stats.last_time_s - , (long long)stats.database_retention_secs - , stats.average_compression_savings - , stats.average_point_duration_secs - , stats.average_metric_retention_secs - , stats.ephemeral_metrics_per_day_percent - , stats.average_page_size_bytes - , stats.estimated_concurrently_collected_metrics - , stats.currently_collected_metrics - , stats.disk_space - , stats.max_disk_space - ); -} -int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) { - if (!netdata_ready) - return HTTP_RESP_SERVICE_UNAVAILABLE; - - BUFFER *wb = w->response.data; - buffer_flush(wb); - - if(!dbengine_enabled) { - buffer_strcat(wb, "dbengine is not enabled"); - return HTTP_RESP_NOT_FOUND; - } - - wb->content_type = CT_APPLICATION_JSON; - buffer_no_cacheable(wb); - buffer_strcat(wb, "{"); - for(size_t tier = 0; tier < storage_tiers ;tier++) { - buffer_sprintf(wb, "%s\n\t\"tier%zu\": {", tier?",":"", tier); - web_client_api_v1_dbengine_stats_for_tier(wb, tier); - buffer_strcat(wb, "\n\t}"); - } - buffer_strcat(wb, "\n}"); - - return HTTP_RESP_OK; -} -#endif - -#define HLT_MGM "manage/health" -int web_client_api_request_v1_mgmt(RRDHOST *host, struct web_client *w, char *url) { - const char *haystack = buffer_tostring(w->url_path_decoded); - char *needle; - - buffer_flush(w->response.data); - - if ((needle = strstr(haystack, HLT_MGM)) == NULL) { - buffer_strcat(w->response.data, "Invalid management request. Curently only 'health' is supported."); - return HTTP_RESP_NOT_FOUND; - } - needle += strlen(HLT_MGM); - if (*needle != '\0') { - buffer_strcat(w->response.data, "Invalid management request. Currently only 'health' is supported."); - return HTTP_RESP_NOT_FOUND; - } - return web_client_api_request_v1_mgmt_health(host, w, url); -} +#include "v1/api_v1_calls.h" +#include "v2/api_v2_calls.h" +#include "v3/api_v3_calls.h" static struct web_api_command api_commands_v1[] = { // time-series data APIs { .api = "data", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_data, + .callback = api_v1_data, .allow_subpaths = 0 }, +#if defined(ENABLE_API_V1) { .api = "weights", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_weights, + .callback = api_v1_weights, .allow_subpaths = 0 }, { // deprecated - do not use anymore - use "weights" .api = "metric_correlations", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_metric_correlations, + .callback = api_v1_metric_correlations, .allow_subpaths = 0 }, +#endif { - // exporting API - .api = "allmetrics", + .api = "badge.svg", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_BADGES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_allmetrics, + .callback = api_v1_badge, .allow_subpaths = 0 }, { - // badges can be fetched with both dashboard and badge ACL - .api = "badge.svg", + // exporting API + .api = "allmetrics", .hash = 0, - .acl = HTTP_ACL_BADGES, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_badge, + .callback = api_v1_allmetrics, .allow_subpaths = 0 }, // alerts APIs +#if defined(ENABLE_API_V1) { .api = "alarms", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_alarms, + .callback = api_v1_alarms, .allow_subpaths = 0 }, { .api = "alarms_values", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_alarms_values, + .callback = api_v1_alarms_values, .allow_subpaths = 0 }, { .api = "alarm_log", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_alarm_log, + .callback = api_v1_alarm_log, .allow_subpaths = 0 }, { .api = "alarm_variables", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_alarm_variables, + .callback = api_v1_alarm_variables, .allow_subpaths = 0 }, { .api = "variable", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_variable, + .callback = api_v1_variable, .allow_subpaths = 0 }, { .api = "alarm_count", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_alarm_count, + .callback = api_v1_alarm_count, .allow_subpaths = 0 }, +#endif // functions APIs - they check permissions per function call { .api = "function", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_FUNCTIONS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_function, + .callback = api_v1_function, .allow_subpaths = 0 }, + +#if defined(ENABLE_API_V1) { .api = "functions", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_FUNCTIONS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_functions, + .callback = api_v1_functions, .allow_subpaths = 0 }, +#endif // time-series metadata APIs +#if defined(ENABLE_API_V1) { .api = "chart", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_chart, + .callback = api_v1_chart, .allow_subpaths = 0 }, +#endif { .api = "charts", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_charts, + .callback = api_v1_charts, .allow_subpaths = 0 }, { .api = "context", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_context, + .callback = api_v1_context, .allow_subpaths = 0 }, { .api = "contexts", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_contexts, + .callback = api_v1_contexts, .allow_subpaths = 0 }, // registry APIs +#if defined(ENABLE_API_V1) { // registry checks the ACL by itself, so we allow everything .api = "registry", .hash = 0, .acl = HTTP_ACL_NONE, // it manages acl by itself .access = HTTP_ACCESS_NONE, // it manages access by itself - .callback = web_client_api_request_v1_registry, + .callback = api_v1_registry, .allow_subpaths = 0 }, +#endif // agent information APIs +#if defined(ENABLE_API_V1) { .api = "info", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_info, + .callback = api_v1_info, .allow_subpaths = 0 }, { .api = "aclk", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_aclk_state, + .callback = api_v1_aclk, .allow_subpaths = 0 }, { // deprecated - use /api/v2/info .api = "dbengine_stats", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_dbengine_stats, + .callback = api_v1_dbengine_stats, .allow_subpaths = 0 }, - - // dyncfg APIs { - .api = "config", + .api = "ml_info", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_config, + .callback = api_v1_ml_info, .allow_subpaths = 0 }, - -#if defined(ENABLE_ML) { - .api = "ml_info", - .hash = 0, - .acl = HTTP_ACL_DASHBOARD, - .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_ml_info, - .allow_subpaths = 0 + .api = "manage", + .hash = 0, + .acl = HTTP_ACL_MANAGEMENT, + .access = HTTP_ACCESS_NONE, // it manages access by itself + .callback = api_v1_manage, + .allow_subpaths = 1 }, #endif + // dyncfg APIs { - // deprecated - .api = "manage", + .api = "config", .hash = 0, - .acl = HTTP_ACL_MANAGEMENT, - .access = HTTP_ACCESS_NONE, // it manages access by itself - .callback = web_client_api_request_v1_mgmt, - .allow_subpaths = 1 + .acl = HTTP_ACL_DYNCFG, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_config, + .allow_subpaths = 0 }, { diff --git a/src/web/api/web_api_v1.h b/src/web/api/web_api_v1.h index cf0efbd13..c102ac75c 100644 --- a/src/web/api/web_api_v1.h +++ b/src/web/api/web_api_v1.h @@ -5,43 +5,7 @@ #include "web_api.h" -struct web_client; - -CONTEXTS_V2_OPTIONS web_client_api_request_v2_context_options(char *o); -CONTEXTS_V2_ALERT_STATUS web_client_api_request_v2_alert_status(char *o); -void web_client_api_request_v2_contexts_options_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_V2_OPTIONS options); -void web_client_api_request_v2_contexts_alerts_status_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_V2_ALERT_STATUS options); - -RRDR_OPTIONS rrdr_options_parse(char *o); -RRDR_OPTIONS rrdr_options_parse_one(const char *o); - -void rrdr_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options); -void rrdr_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options); -void web_client_api_request_v1_data_options_to_string(char *buf, size_t size, RRDR_OPTIONS options); - -uint32_t web_client_api_request_v1_data_format(char *name); -uint32_t web_client_api_request_v1_data_google_format(char *name); - -int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)); -int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url); int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url_path_endpoint); -int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb); - -void web_client_api_v1_init(void); -void web_client_api_v1_management_init(void); - -void host_labels2json(RRDHOST *host, BUFFER *wb, const char *key); -void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb, const char *key); - -void web_client_source2buffer(struct web_client *w, BUFFER *source); extern char *api_secret; diff --git a/src/web/api/web_api_v2.c b/src/web/api/web_api_v2.c index c62ed9ed3..f8e52a94b 100644 --- a/src/web/api/web_api_v2.c +++ b/src/web/api/web_api_v2.c @@ -1,604 +1,27 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "web_api_v2.h" -#include "../rtc/webrtc.h" - -static bool verify_agent_uuids(const char *machine_guid, const char *node_id, const char *claim_id) { - if(!machine_guid || !node_id || !claim_id) - return false; - - if(strcmp(machine_guid, localhost->machine_guid) != 0) - return false; - - char *agent_claim_id = get_agent_claimid(); - - bool not_verified = (!agent_claim_id || strcmp(claim_id, agent_claim_id) != 0); - freez(agent_claim_id); - - if(not_verified || !localhost->node_id) - return false; - - char buf[UUID_STR_LEN]; - uuid_unparse_lower(*localhost->node_id, buf); - - if(strcmp(node_id, buf) != 0) - return false; - - return true; -} - -int api_v2_bearer_protection(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url) { - char *machine_guid = NULL; - char *claim_id = NULL; - char *node_id = NULL; - bool protection = netdata_is_protected_by_bearer; - - while (url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) continue; - if (!value || !*value) continue; - - if(!strcmp(name, "bearer_protection")) { - if(!strcmp(value, "on") || !strcmp(value, "true") || !strcmp(value, "yes")) - protection = true; - else - protection = false; - } - else if(!strcmp(name, "machine_guid")) - machine_guid = value; - else if(!strcmp(name, "claim_id")) - claim_id = value; - else if(!strcmp(name, "node_id")) - node_id = value; - } - - if(!verify_agent_uuids(machine_guid, node_id, claim_id)) { - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "The request is missing or not matching local UUIDs"); - return HTTP_RESP_BAD_REQUEST; - } - - netdata_is_protected_by_bearer = protection; - - BUFFER *wb = w->response.data; - buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - buffer_json_member_add_boolean(wb, "bearer_protection", netdata_is_protected_by_bearer); - buffer_json_finalize(wb); - - return HTTP_RESP_OK; -} - -int api_v2_bearer_token(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url __maybe_unused) { - char *machine_guid = NULL; - char *claim_id = NULL; - char *node_id = NULL; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) continue; - if (!value || !*value) continue; - - if(!strcmp(name, "machine_guid")) - machine_guid = value; - else if(!strcmp(name, "claim_id")) - claim_id = value; - else if(!strcmp(name, "node_id")) - node_id = value; - } - - if(!verify_agent_uuids(machine_guid, node_id, claim_id)) { - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "The request is missing or not matching local UUIDs"); - return HTTP_RESP_BAD_REQUEST; - } - - nd_uuid_t uuid; - time_t expires_s = bearer_create_token(&uuid, w); - - BUFFER *wb = w->response.data; - buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - buffer_json_member_add_string(wb, "mg", localhost->machine_guid); - buffer_json_member_add_boolean(wb, "bearer_protection", netdata_is_protected_by_bearer); - buffer_json_member_add_uuid(wb, "token", &uuid); - buffer_json_member_add_time_t(wb, "expiration", expires_s); - buffer_json_finalize(wb); - - return HTTP_RESP_OK; -} - -static int web_client_api_request_v2_contexts_internal(RRDHOST *host __maybe_unused, struct web_client *w, char *url, CONTEXTS_V2_MODE mode) { - struct api_v2_contexts_request req = { 0 }; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "scope_nodes")) - req.scope_nodes = value; - else if(!strcmp(name, "nodes")) - req.nodes = value; - else if((mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) && !strcmp(name, "scope_contexts")) - req.scope_contexts = value; - else if((mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) && !strcmp(name, "contexts")) - req.contexts = value; - else if((mode & CONTEXTS_V2_SEARCH) && !strcmp(name, "q")) - req.q = value; - else if(!strcmp(name, "options")) - req.options = web_client_api_request_v2_context_options(value); - else if(!strcmp(name, "after")) - req.after = str2l(value); - else if(!strcmp(name, "before")) - req.before = str2l(value); - else if(!strcmp(name, "timeout")) - req.timeout_ms = str2l(value); - else if(mode & (CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) { - if (!strcmp(name, "alert")) - req.alerts.alert = value; - else if (!strcmp(name, "transition")) - req.alerts.transition = value; - else if(mode & CONTEXTS_V2_ALERTS) { - if (!strcmp(name, "status")) - req.alerts.status = web_client_api_request_v2_alert_status(value); - } - else if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { - if (!strcmp(name, "last")) - req.alerts.last = strtoul(value, NULL, 0); - else if(!strcmp(name, "context")) - req.contexts = value; - else if (!strcmp(name, "anchor_gi")) { - req.alerts.global_id_anchor = str2ull(value, NULL); - } - else { - for(int i = 0; i < ATF_TOTAL_ENTRIES ;i++) { - if(!strcmp(name, alert_transition_facets[i].query_param)) - req.alerts.facets[i] = value; - } - } - } - } - } - - if ((mode & CONTEXTS_V2_ALERT_TRANSITIONS) && !req.alerts.last) - req.alerts.last = 1; - - buffer_flush(w->response.data); - buffer_no_cacheable(w->response.data); - return rrdcontext_to_json_v2(w->response.data, &req, mode); -} - -static int web_client_api_request_v2_alert_transitions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_ALERT_TRANSITIONS | CONTEXTS_V2_NODES); -} - -static int web_client_api_request_v2_alerts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_ALERTS | CONTEXTS_V2_NODES); -} - -static int web_client_api_request_v2_functions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_FUNCTIONS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS); -} - -static int web_client_api_request_v2_versions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_VERSIONS); -} - -static int web_client_api_request_v2_q(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_SEARCH | CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS); -} - -static int web_client_api_request_v2_contexts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS); -} - -static int web_client_api_request_v2_nodes(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_NODES | CONTEXTS_V2_NODES_INFO); -} - -static int web_client_api_request_v2_info(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_AGENTS | CONTEXTS_V2_AGENTS_INFO); -} - -static int web_client_api_request_v2_node_instances(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_NODES | CONTEXTS_V2_NODE_INSTANCES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_AGENTS_INFO | CONTEXTS_V2_VERSIONS); -} - -static int web_client_api_request_v2_weights(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_VALUE, WEIGHTS_FORMAT_MULTINODE, 2); -} - -static int web_client_api_request_v2_claim(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return api_v2_claim(w, url); -} - -static int web_client_api_request_v2_alert_config(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - const char *config = NULL; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "config")) - config = value; - } - - buffer_flush(w->response.data); - - if(!config) { - w->response.data->content_type = CT_TEXT_PLAIN; - buffer_strcat(w->response.data, "A config hash ID is required. Add ?config=UUID query param"); - return HTTP_RESP_BAD_REQUEST; - } - - return contexts_v2_alert_config_to_json(w, config); -} - - -#define GROUP_BY_KEY_MAX_LENGTH 30 -static struct { - char group_by[GROUP_BY_KEY_MAX_LENGTH + 1]; - char aggregation[GROUP_BY_KEY_MAX_LENGTH + 1]; - char group_by_label[GROUP_BY_KEY_MAX_LENGTH + 1]; -} group_by_keys[MAX_QUERY_GROUP_BY_PASSES]; - -__attribute__((constructor)) void initialize_group_by_keys(void) { - for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { - snprintfz(group_by_keys[g].group_by, GROUP_BY_KEY_MAX_LENGTH, "group_by[%zu]", g); - snprintfz(group_by_keys[g].aggregation, GROUP_BY_KEY_MAX_LENGTH, "aggregation[%zu]", g); - snprintfz(group_by_keys[g].group_by_label, GROUP_BY_KEY_MAX_LENGTH, "group_by_label[%zu]", g); - } -} - -static int web_client_api_request_v2_data(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - usec_t received_ut = now_monotonic_usec(); - - int ret = HTTP_RESP_BAD_REQUEST; - - buffer_flush(w->response.data); - - char *google_version = "0.6", - *google_reqId = "0", - *google_sig = "0", - *google_out = "json", - *responseHandler = NULL, - *outFileName = NULL; - - time_t last_timestamp_in_data = 0, google_timestamp = 0; - - char *scope_nodes = NULL; - char *scope_contexts = NULL; - char *nodes = NULL; - char *contexts = NULL; - char *instances = NULL; - char *dimensions = NULL; - char *before_str = NULL; - char *after_str = NULL; - char *resampling_time_str = NULL; - char *points_str = NULL; - char *timeout_str = NULL; - char *labels = NULL; - char *alerts = NULL; - char *time_group_options = NULL; - char *tier_str = NULL; - size_t tier = 0; - RRDR_TIME_GROUPING time_group = RRDR_GROUPING_AVERAGE; - DATASOURCE_FORMAT format = DATASOURCE_JSON2; - RRDR_OPTIONS options = RRDR_OPTION_VIRTUAL_POINTS | RRDR_OPTION_JSON_WRAP | RRDR_OPTION_RETURN_JWAR; - - struct group_by_pass group_by[MAX_QUERY_GROUP_BY_PASSES] = { - { - .group_by = RRDR_GROUP_BY_DIMENSION, - .group_by_label = NULL, - .aggregation = RRDR_GROUP_BY_FUNCTION_AVERAGE, - }, - }; - - size_t group_by_idx = 0, group_by_label_idx = 0, aggregation_idx = 0; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "scope_nodes")) scope_nodes = value; - else if(!strcmp(name, "scope_contexts")) scope_contexts = value; - else if(!strcmp(name, "nodes")) nodes = value; - else if(!strcmp(name, "contexts")) contexts = value; - else if(!strcmp(name, "instances")) instances = value; - else if(!strcmp(name, "dimensions")) dimensions = value; - else if(!strcmp(name, "labels")) labels = value; - else if(!strcmp(name, "alerts")) alerts = value; - else if(!strcmp(name, "after")) after_str = value; - else if(!strcmp(name, "before")) before_str = value; - else if(!strcmp(name, "points")) points_str = value; - else if(!strcmp(name, "timeout")) timeout_str = value; - else if(!strcmp(name, "group_by")) { - group_by[group_by_idx++].group_by = group_by_parse(value); - if(group_by_idx >= MAX_QUERY_GROUP_BY_PASSES) - group_by_idx = MAX_QUERY_GROUP_BY_PASSES - 1; - } - else if(!strcmp(name, "group_by_label")) { - group_by[group_by_label_idx++].group_by_label = value; - if(group_by_label_idx >= MAX_QUERY_GROUP_BY_PASSES) - group_by_label_idx = MAX_QUERY_GROUP_BY_PASSES - 1; - } - else if(!strcmp(name, "aggregation")) { - group_by[aggregation_idx++].aggregation = group_by_aggregate_function_parse(value); - if(aggregation_idx >= MAX_QUERY_GROUP_BY_PASSES) - aggregation_idx = MAX_QUERY_GROUP_BY_PASSES - 1; - } - else if(!strcmp(name, "format")) format = web_client_api_request_v1_data_format(value); - else if(!strcmp(name, "options")) options |= rrdr_options_parse(value); - else if(!strcmp(name, "time_group")) time_group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); - else if(!strcmp(name, "time_group_options")) time_group_options = value; - else if(!strcmp(name, "time_resampling")) resampling_time_str = value; - else if(!strcmp(name, "tier")) tier_str = value; - else if(!strcmp(name, "callback")) responseHandler = value; - else if(!strcmp(name, "filename")) outFileName = value; - else if(!strcmp(name, "tqx")) { - // parse Google Visualization API options - // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source - char *tqx_name, *tqx_value; - - while(value) { - tqx_value = strsep_skip_consecutive_separators(&value, ";"); - if(!tqx_value || !*tqx_value) continue; - - tqx_name = strsep_skip_consecutive_separators(&tqx_value, ":"); - if(!tqx_name || !*tqx_name) continue; - if(!tqx_value || !*tqx_value) continue; - - if(!strcmp(tqx_name, "version")) - google_version = tqx_value; - else if(!strcmp(tqx_name, "reqId")) - google_reqId = tqx_value; - else if(!strcmp(tqx_name, "sig")) { - google_sig = tqx_value; - google_timestamp = strtoul(google_sig, NULL, 0); - } - else if(!strcmp(tqx_name, "out")) { - google_out = tqx_value; - format = web_client_api_request_v1_data_google_format(google_out); - } - else if(!strcmp(tqx_name, "responseHandler")) - responseHandler = tqx_value; - else if(!strcmp(tqx_name, "outFileName")) - outFileName = tqx_value; - } - } - else { - for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { - if(!strcmp(name, group_by_keys[g].group_by)) - group_by[g].group_by = group_by_parse(value); - else if(!strcmp(name, group_by_keys[g].group_by_label)) - group_by[g].group_by_label = value; - else if(!strcmp(name, group_by_keys[g].aggregation)) - group_by[g].aggregation = group_by_aggregate_function_parse(value); - } - } - } - - // validate the google parameters given - fix_google_param(google_out); - fix_google_param(google_sig); - fix_google_param(google_reqId); - fix_google_param(google_version); - fix_google_param(responseHandler); - fix_google_param(outFileName); - - for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { - if (group_by[g].group_by_label && *group_by[g].group_by_label) - group_by[g].group_by |= RRDR_GROUP_BY_LABEL; - } - - if(group_by[0].group_by == RRDR_GROUP_BY_NONE) - group_by[0].group_by = RRDR_GROUP_BY_DIMENSION; - - for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { - if ((group_by[g].group_by & ~(RRDR_GROUP_BY_DIMENSION)) || (options & RRDR_OPTION_PERCENTAGE)) { - options |= RRDR_OPTION_ABSOLUTE; - break; - } - } - - if(options & RRDR_OPTION_DEBUG) - options &= ~RRDR_OPTION_MINIFY; - - if(tier_str && *tier_str) { - tier = str2ul(tier_str); - if(tier < storage_tiers) - options |= RRDR_OPTION_SELECTED_TIER; - else - tier = 0; - } - - time_t before = (before_str && *before_str)?str2l(before_str):0; - time_t after = (after_str && *after_str) ?str2l(after_str):-600; - size_t points = (points_str && *points_str)?str2u(points_str):0; - int timeout = (timeout_str && *timeout_str)?str2i(timeout_str): 0; - time_t resampling_time = (resampling_time_str && *resampling_time_str) ? str2l(resampling_time_str) : 0; - - QUERY_TARGET_REQUEST qtr = { - .version = 2, - .scope_nodes = scope_nodes, - .scope_contexts = scope_contexts, - .after = after, - .before = before, - .host = NULL, - .st = NULL, - .nodes = nodes, - .contexts = contexts, - .instances = instances, - .dimensions = dimensions, - .alerts = alerts, - .timeout_ms = timeout, - .points = points, - .format = format, - .options = options, - .time_group_method = time_group, - .time_group_options = time_group_options, - .resampling_time = resampling_time, - .tier = tier, - .chart_label_key = NULL, - .labels = labels, - .query_source = QUERY_SOURCE_API_DATA, - .priority = STORAGE_PRIORITY_NORMAL, - .received_ut = received_ut, - - .interrupt_callback = web_client_interrupt_callback, - .interrupt_callback_data = w, - - .transaction = &w->transaction, - }; - - for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) - qtr.group_by[g] = group_by[g]; - - QUERY_TARGET *qt = query_target_create(&qtr); - ONEWAYALLOC *owa = NULL; - - if(!qt) { - buffer_sprintf(w->response.data, "Failed to prepare the query."); - ret = HTTP_RESP_INTERNAL_SERVER_ERROR; - goto cleanup; - } - - web_client_timeout_checkpoint_set(w, timeout); - if(web_client_timeout_checkpoint_and_check(w, NULL)) { - ret = w->response.code; - goto cleanup; - } - - if(outFileName && *outFileName) { - buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName); - netdata_log_debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName); - } - - if(format == DATASOURCE_DATATABLE_JSONP) { - if(responseHandler == NULL) - responseHandler = "google.visualization.Query.setResponse"; - - netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'", - w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName - ); - - buffer_sprintf( - w->response.data, - "%s({version:'%s',reqId:'%s',status:'ok',sig:'%"PRId64"',table:", - responseHandler, - google_version, - google_reqId, - (int64_t)now_realtime_sec()); - } - else if(format == DATASOURCE_JSONP) { - if(responseHandler == NULL) - responseHandler = "callback"; - - buffer_strcat(w->response.data, responseHandler); - buffer_strcat(w->response.data, "("); - } - - owa = onewayalloc_create(0); - ret = data_query_execute(owa, w->response.data, qt, &last_timestamp_in_data); - - if(format == DATASOURCE_DATATABLE_JSONP) { - if(google_timestamp < last_timestamp_in_data) - buffer_strcat(w->response.data, "});"); - - else { - // the client already has the latest data - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, - "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});", - responseHandler, google_version, google_reqId); - } - } - else if(format == DATASOURCE_JSONP) - buffer_strcat(w->response.data, ");"); - - if(qt->internal.relative) - buffer_no_cacheable(w->response.data); - else - buffer_cacheable(w->response.data); - -cleanup: - query_target_release(qt); - onewayalloc_destroy(owa); - return ret; -} - -static int web_client_api_request_v2_webrtc(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) { - return webrtc_new_connection(buffer_tostring(w->payload), w->response.data); -} - -static int web_client_api_request_v2_progress(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - char *transaction = NULL; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "transaction")) transaction = value; - } - - nd_uuid_t tr; - uuid_parse_flexi(transaction, tr); - - rrd_function_call_progresser(&tr); - - return web_api_v2_report_progress(&tr, w->response.data); -} +#include "v1/api_v1_calls.h" +#include "v2/api_v2_calls.h" +#include "v3/api_v3_calls.h" static struct web_api_command api_commands_v2[] = { +#if defined(ENABLE_API_v2) // time-series multi-node multi-instance data APIs { .api = "data", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_data, + .callback = api_v2_data, .allow_subpaths = 0 }, { .api = "weights", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_weights, + .callback = api_v2_weights, .allow_subpaths = 0 }, @@ -606,18 +29,18 @@ static struct web_api_command api_commands_v2[] = { { .api = "contexts", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_contexts, + .callback = api_v2_contexts, .allow_subpaths = 0 }, { // full text search .api = "q", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_q, + .callback = api_v2_q, .allow_subpaths = 0 }, @@ -625,25 +48,25 @@ static struct web_api_command api_commands_v2[] = { { .api = "alerts", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_alerts, + .callback = api_v2_alerts, .allow_subpaths = 0 }, { .api = "alert_transitions", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_alert_transitions, + .callback = api_v2_alert_transitions, .allow_subpaths = 0 }, { .api = "alert_config", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_alert_config, + .callback = api_v2_alert_config, .allow_subpaths = 0 }, @@ -651,41 +74,41 @@ static struct web_api_command api_commands_v2[] = { { .api = "info", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NOCHECK, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_info, + .callback = api_v2_info, .allow_subpaths = 0 }, { .api = "nodes", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_nodes, + .callback = api_v2_nodes, .allow_subpaths = 0 }, { .api = "node_instances", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_node_instances, + .callback = api_v2_node_instances, .allow_subpaths = 0 }, { .api = "versions", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_versions, + .callback = api_v2_versions, .allow_subpaths = 0 }, { .api = "progress", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NOCHECK, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_progress, + .callback = api_v2_progress, .allow_subpaths = 0 }, @@ -693,9 +116,9 @@ static struct web_api_command api_commands_v2[] = { { .api = "functions", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_FUNCTIONS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_functions, + .callback = api_v2_functions, .allow_subpaths = 0 }, @@ -705,7 +128,7 @@ static struct web_api_command api_commands_v2[] = { .hash = 0, .acl = HTTP_ACL_ACLK | ACL_DEV_OPEN_ACCESS, .access = HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE, - .callback = web_client_api_request_v2_webrtc, + .callback = api_v2_webrtc, .allow_subpaths = 0 }, @@ -715,7 +138,7 @@ static struct web_api_command api_commands_v2[] = { .hash = 0, .acl = HTTP_ACL_NOCHECK, .access = HTTP_ACCESS_NONE, - .callback = web_client_api_request_v2_claim, + .callback = api_v2_claim, .allow_subpaths = 0 }, { @@ -731,17 +154,18 @@ static struct web_api_command api_commands_v2[] = { .hash = 0, .acl = HTTP_ACL_ACLK | ACL_DEV_OPEN_ACCESS, .access = HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE, - .callback = api_v2_bearer_token, + .callback = api_v2_bearer_get_token, .allow_subpaths = 0 }, +#endif // Netdata branding APIs { .api = "ilove.svg", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NOCHECK, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_ilove, + .callback = api_v2_ilove, .allow_subpaths = 0 }, diff --git a/src/web/api/web_api_v3.c b/src/web/api/web_api_v3.c new file mode 100644 index 000000000..a1092471c --- /dev/null +++ b/src/web/api/web_api_v3.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "web_api_v3.h" +#include "v1/api_v1_calls.h" +#include "v2/api_v2_calls.h" +#include "v3/api_v3_calls.h" + +static struct web_api_command api_commands_v3[] = { + // time-series multi-node multi-instance data APIs + { + .api = "data", + .hash = 0, + .acl = HTTP_ACL_METRICS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_data, + .allow_subpaths = 0 + }, + // badges + { + .api = "badge.svg", + .hash = 0, + .acl = HTTP_ACL_BADGES, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_badge, + .allow_subpaths = 0 + }, + // scoring engine + { + .api = "weights", + .hash = 0, + .acl = HTTP_ACL_METRICS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_weights, + .allow_subpaths = 0 + }, + // exporting API + { + .api = "allmetrics", + .hash = 0, + .acl = HTTP_ACL_METRICS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_allmetrics, + .allow_subpaths = 0 + }, + + // time-series multi-node multi-instance metadata APIs + { + .api = "context", + .hash = 0, + .acl = HTTP_ACL_METRICS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_context, + .allow_subpaths = 0 + }, + { + .api = "contexts", + .hash = 0, + .acl = HTTP_ACL_METRICS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_contexts, + .allow_subpaths = 0 + }, + + // fulltext search + { + .api = "q", + .hash = 0, + .acl = HTTP_ACL_METRICS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_q, + .allow_subpaths = 0 + }, + + // multi-node multi-instance alerts APIs + { + .api = "alerts", + .hash = 0, + .acl = HTTP_ACL_ALERTS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_alerts, + .allow_subpaths = 0 + }, + { + .api = "alert_transitions", + .hash = 0, + .acl = HTTP_ACL_ALERTS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_alert_transitions, + .allow_subpaths = 0 + }, + { + .api = "alert_config", + .hash = 0, + .acl = HTTP_ACL_ALERTS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_alert_config, + .allow_subpaths = 0 + }, + { + .api = "variable", + .hash = 0, + .acl = HTTP_ACL_ALERTS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_variable, + .allow_subpaths = 0 + }, + + // agent information APIs + { + .api = "info", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_NONE, + .callback = api_v2_info, + .allow_subpaths = 0 + }, + { + .api = "nodes", + .hash = 0, + .acl = HTTP_ACL_NODES, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_nodes, + .allow_subpaths = 0 + }, + { + .api = "node_instances", + .hash = 0, + .acl = HTTP_ACL_NODES, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_node_instances, + .allow_subpaths = 0 + }, + { + .api = "versions", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_versions, + .allow_subpaths = 0 + }, + { + .api = "progress", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_progress, + .allow_subpaths = 0 + }, + + // functions APIs + { + .api = "function", + .hash = 0, + .acl = HTTP_ACL_FUNCTIONS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_function, + .allow_subpaths = 0 + }, + { + .api = "functions", + .hash = 0, + .acl = HTTP_ACL_FUNCTIONS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_functions, + .allow_subpaths = 0 + }, + + // dyncfg APIs + { + .api = "config", + .hash = 0, + .acl = HTTP_ACL_DYNCFG, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_config, + .allow_subpaths = 0 + }, + + // settings APIs + { + .api = "settings", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v3_settings, + .allow_subpaths = 0 + }, + + // WebRTC APIs + { + .api = "rtc_offer", + .hash = 0, + .acl = HTTP_ACL_ACLK | ACL_DEV_OPEN_ACCESS, + .access = HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE, + .callback = api_v2_webrtc, + .allow_subpaths = 0 + }, + + // management APIs + { + .api = "claim", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_NONE, + .callback = api_v3_claim, + .allow_subpaths = 0 + }, + { + .api = "bearer_protection", + .hash = 0, + .acl = HTTP_ACL_ACLK | ACL_DEV_OPEN_ACCESS, + .access = HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_VIEW_AGENT_CONFIG | HTTP_ACCESS_EDIT_AGENT_CONFIG, + .callback = api_v2_bearer_protection, + .allow_subpaths = 0 + }, + { + .api = "bearer_get_token", + .hash = 0, + .acl = HTTP_ACL_ACLK | ACL_DEV_OPEN_ACCESS, + .access = HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE, + .callback = api_v2_bearer_get_token, + .allow_subpaths = 0 + }, + { + .api = "me", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_NONE, + .callback = api_v3_me, + .allow_subpaths = 0 + }, + + // Netdata branding APIs + { + .api = "ilove.svg", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_NONE, + .callback = api_v2_ilove, + .allow_subpaths = 0 + }, + + {// terminator + .api = NULL, + .hash = 0, + .acl = HTTP_ACL_NONE, + .access = HTTP_ACCESS_NONE, + .callback = NULL, + .allow_subpaths = 0 + }, +}; + +inline int web_client_api_request_v3(RRDHOST *host, struct web_client *w, char *url_path_endpoint) { + static int initialized = 0; + + if(unlikely(initialized == 0)) { + initialized = 1; + + for(int i = 0; api_commands_v3[i].api ; i++) + api_commands_v3[i].hash = simple_hash(api_commands_v3[i].api); + } + + return web_client_api_request_vX(host, w, url_path_endpoint, api_commands_v3); +} diff --git a/src/web/api/web_api_v3.h b/src/web/api/web_api_v3.h new file mode 100644 index 000000000..32fa4cd1d --- /dev/null +++ b/src/web/api/web_api_v3.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WEB_API_V3_H +#define NETDATA_WEB_API_V3_H + +#include "web_api.h" + +struct web_client; + +int web_client_api_request_v3(RRDHOST *host, struct web_client *w, char *url_path_endpoint); + +#endif //NETDATA_WEB_API_V3_H diff --git a/src/web/gui/.dashboard-v2-notice.md b/src/web/gui/.dashboard-v2-notice.md deleted file mode 100644 index 2eb4374ce..000000000 --- a/src/web/gui/.dashboard-v2-notice.md +++ /dev/null @@ -1,8 +0,0 @@ -# Do not edit any files in this directory! - -If you spot any errors or bugs in these files please open a bug report -at https://github.com/netdata/netdata/issues/new/choose. - -These files are maintained in a seprate private repository and copied -here when they are updated there, so any changes made in this directory -will eventually be overwritten. diff --git a/src/web/gui/README.md b/src/web/gui/README.md index 248cee4da..2eb9cfb15 100644 --- a/src/web/gui/README.md +++ b/src/web/gui/README.md @@ -1,163 +1,4 @@ # Legacy Agent dashboard -> ⚠️ You're checking the documentation for the legacy Agent dashboard. For the current version please check [Accessing Netdata Dashboards](/docs/dashboards-and-charts/README.md). - - -The local Netdata Agent dashboard is the heart of Netdata's performance troubleshooting toolkit. You've probably seen it -before: - -![The Netdata dashboard in -action](https://user-images.githubusercontent.com/1153921/101513938-fae28380-3939-11eb-9434-8ad86a39be62.gif) - -Learn more about how dashboards work and how they're populated using the `dashboards.js` file in our [web dashboards -overview](/src/web/README.md). - -By default, Netdata starts a web server for its dashboard at port `19999`. Open up your web browser of choice and -navigate to `http://NODE:19999`, replacing `NODE` with the IP address or hostname of your Agent. If installed on localhost, -you can access it through `http://localhost:19999`. - -Netdata uses an [internal, static-threaded web server](/src/web/server/README.md) to host the HTML, CSS, and JavaScript -files that make up the local Agent dashboard. You don't have to configure anything to access it, although you can adjust -[your settings](/src/web/server/README.md#other-netdataconf-web-section-options) in the `netdata.conf` file, or run Netdata -behind an [Nginx proxy](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md), and so on. - -## Navigating the local dashboard - -Beyond charts, the local dashboard can be broken down into three key areas: - -- [Navigating the local dashboard](#navigating-the-local-dashboard) - - [Sections](#sections) - - [Time \& date picker](#time--date-picker) - - [Metrics menus](#metrics-menus) - - [Cloud menus (Spaces, Rooms, and Visited nodes)](#cloud-menus-spaces-rooms-and-visited-nodes) -- [Customizing the local dashboard](#customizing-the-local-dashboard) -- [Custom dashboards](#custom-dashboards) - -![Annotated screenshot of the local Agent -dashboard](https://user-images.githubusercontent.com/1153921/101509403-f7e59400-3935-11eb-9abd-cbecfa3ee49a.png) - -### Sections - -Netdata is broken up into multiple **sections**, such as **System Overview**, -**CPU**, **Disk**, and more. Inside each section you'll find a number of charts, -broken down into [contexts](/src/web/README.md#contexts) and -[families](/src/web/README.md#families). - -An example of the **Memory** section on a Linux desktop system. - -![Screenshot of the Memory section of the Netdata -dashboard](https://user-images.githubusercontent.com/1153921/101508423-e354cc00-3934-11eb-9b33-3ad57a5988b4.png) - -All sections and their associated charts appear on a single page, so all you need to do to view different sections is -scroll up and down. But it's usually quicker to use the [menus](#metrics-menus). - -### Time & date picker - -The local dashboard features a time & date picker to help you visualize specific timeframes of historical metrics. The -picker chooses an appropriate default to always show per-second granularity based on the width of your browser's -viewport. - -![The time & date picker in the local Agent -dashboard](https://user-images.githubusercontent.com/1153921/101507784-2c585080-3934-11eb-9d6e-eff30b8553e4.png) - -Use the Quick Selector to show metrics from the last 5 minutes, 15 minutes, 30 minutes, 2 hours, 6 hours, or 12 hours. - -Beneath the Quick Selector is an input field and dropdown you use in combination to select a specific timeframe of -minutes, hours, days, or months. Enter a number and choose the appropriate unit of time. - -Use the calendar to select multiple days. Click on a date to begin the timeframe selection, then an ending date. - -Click **Apply** to re-render all visualizations with new metrics data, or **Clear** to restore the default timeframe. - -[Increase the metrics retention policy](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md) for your node to see more historical -timeframes. - -### Metrics menus - -**Metrics menus** appears on the right-hand side of the local Agent dashboard. Netdata generates a menu for each -section, and menus link to the section they're associated with. - -![A screenshot of metrics menus](https://user-images.githubusercontent.com/1153921/80834638-f08f2880-8ba5-11ea-99ae-f610b2885fd6.png) - -Most metrics menu items will contain several **submenu** entries, which represent any -[families](/src/web/README.md#families) from that section. Netdata automatically -generates these submenu entries. - -Here's a **Disks** menu with several submenu entries for each disk drive and -partition Netdata recognizes. - -![Screenshot of some metrics -submenus](https://user-images.githubusercontent.com/1153921/80834697-11577e00-8ba6-11ea-979c-92fd19cdb480.png) - -### Cloud menus (Spaces, Rooms, and Visited nodes) - -The dashboard also features a menu related to Netdata Cloud functionality. You can view your existing Spaces or create -new ones via the left vertical column of boxes. This menu also displays the name of your current Space, shows a list of -any Rooms you've added you your Space, and lists any notes you recently visited via their Agent dashboards. Click on -a Room's name to jump to the Netdata Cloud web interface. - -![A screenshot of the Cloud -menus](https://user-images.githubusercontent.com/1153921/80837210-3f8b8c80-8bab-11ea-9c75-128c2d823ef8.png) - -## Customizing the local dashboard - -Netdata stores information about individual charts in the `dashboard_info.js` -file. This file includes section and subsection headings, descriptions, colors, -titles, tooltips, and other information for Netdata to render on the dashboard. - -For example, here is how `dashboard_info.js` defines the **System Overview** -section. - -```javascript -netdataDashboard.menu = { - 'system': { - title: 'System Overview', - icon: '', - info: 'Overview of the key system metrics.' - }, -``` - -If you want to customize this information, you should avoid editing -`dashboard_info.js` directly. These changes are not persistent; Netdata will -overwrite the file when it's updated. Instead, you should create a new file with -your customizations. - -We created an example file at `dashboard_info_custom_example.js`. You can -copy this to a new file with a name of your choice in the `web/` directory. This -directory changes based on your operating system and installation method. If -you're on a Linux system, it should be at `/usr/share/netdata/web/`. - -```shell -cd /usr/share/netdata/web/ -sudo cp dashboard_info_custom_example.js your_dashboard_info_file.js -``` - -Edit the file with your customizations. For example: - -```javascript -customDashboard.menu = { - system: { - title: "Testing, testing, 1 2 3", - icon: '', - info: "This is overwritten info for the system overview section!" - } -}; -``` - -Finally, tell Netdata where you placed your customization file by replacing -`your_dashboard_info_file.js` below. - -```conf -[web] - custom dashboard_info.js = your_dashboard_info_file.js -``` - -Once you restart Netdata, refresh the dashboard to find your custom -configuration: - -![Screenshot of overwritten text from dashboard_info.js -file](https://user-images.githubusercontent.com/1153921/62798924-570e6c80-ba94-11e9-9578-869753bec39c.png) - -## Custom dashboards - -For information on creating custom dashboards from scratch, see the [custom dashboards](/src/web/gui/custom/README.md) guide. +> ⚠️ You're checking the documentation for the legacy Agent dashboard. +> For the current version, check [Accessing Netdata Dashboards](/docs/dashboards-and-charts/README.md). diff --git a/src/web/gui/bundle_dashboard_v1.py b/src/web/gui/bundle_dashboard_v1.py index 3fbe66330..9c6f4d682 100755 --- a/src/web/gui/bundle_dashboard_v1.py +++ b/src/web/gui/bundle_dashboard_v1.py @@ -17,15 +17,15 @@ BASEPATH = Path('v1') URLTEMPLATE = 'https://github.com/netdata/dashboard/releases/download/{0}/dashboard.tar.gz' CMAKETEMPLATE = ''' - install(FILES {0} COMPONENT netdata DESTINATION ${{WEB_DEST}}) - install(FILES {1} COMPONENT netdata DESTINATION ${{WEB_DEST}}/css) - install(FILES {2} COMPONENT netdata DESTINATION ${{WEB_DEST}}/fonts) - install(FILES {3} COMPONENT netdata DESTINATION ${{WEB_DEST}}/images) - install(FILES {4} COMPONENT netdata DESTINATION ${{WEB_DEST}}/lib) - install(FILES {5} COMPONENT netdata DESTINATION ${{WEB_DEST}}/static/css) - install(FILES {6} COMPONENT netdata DESTINATION ${{WEB_DEST}}/static/js) - install(FILES {7} COMPONENT netdata DESTINATION ${{WEB_DEST}}/static/media) - install(FILES web/gui/v1/index.html COMPONENT netdata DESTINATION ${WEB_DEST}/v1) + install(FILES {0} COMPONENT dashboard DESTINATION ${{WEB_DEST}}) + install(FILES {1} COMPONENT dashboard DESTINATION ${{WEB_DEST}}/css) + install(FILES {2} COMPONENT dashboard DESTINATION ${{WEB_DEST}}/fonts) + install(FILES {3} COMPONENT dashboard DESTINATION ${{WEB_DEST}}/images) + install(FILES {4} COMPONENT dashboard DESTINATION ${{WEB_DEST}}/lib) + install(FILES {5} COMPONENT dashboard DESTINATION ${{WEB_DEST}}/static/css) + install(FILES {6} COMPONENT dashboard DESTINATION ${{WEB_DEST}}/static/js) + install(FILES {7} COMPONENT dashboard DESTINATION ${{WEB_DEST}}/static/media) + install(FILES web/gui/v1/index.html COMPONENT dashboard DESTINATION ${WEB_DEST}/v1) ''' def copy_dashboard(tag): diff --git a/src/web/gui/bundle_dashboard_v2.py b/src/web/gui/bundle_dashboard_v2.py deleted file mode 100755 index 09ac6f998..000000000 --- a/src/web/gui/bundle_dashboard_v2.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright: © 2023 Netdata Inc. -# SPDX-License-Identifier: GPL-3.0-or-later -'''Bundle the v2 dashboard code into the agent repo. - - This is designed to be run as part of a GHA workflow, but will work fine outside of one.''' - -import os -import shutil -import subprocess - -from pathlib import Path - -os.chdir(Path(__file__).parent.absolute()) - -BASEDIR = 'v2' - -BASEPATH = Path(BASEDIR) - -TMPPATH = Path('tmp') - -URLSRC = 'https://app.netdata.cloud/agent.tar.gz' - -CMAKETEMPLATE = ''' - install(FILES {0} COMPONENT netdata DESTINATION ${{WEB_DEST}}/v2) - install(FILES {1} COMPONENT netdata DESTINATION ${{WEB_DEST}}/v2/static) - install(FILES {2} COMPONENT netdata DESTINATION ${{WEB_DEST}}/v2/static/email/img) - install(FILES {3} COMPONENT netdata DESTINATION ${{WEB_DEST}}/v2/static/img) - install(FILES {4} COMPONENT netdata DESTINATION ${{WEB_DEST}}/v2/static/img/logos/os) - install(FILES {5} COMPONENT netdata DESTINATION ${{WEB_DEST}}/v2/static/img/logos/services) - install(FILES {6} COMPONENT netdata DESTINATION ${{WEB_DEST}}/v2/static/img/mail) - install(FILES {7} COMPONENT netdata DESTINATION ${{WEB_DEST}}/v2/static/site/pages/holding-page-503) -''' - -def copy_dashboard(): - '''Fetch and bundle the dashboard code.''' - print('Preparing target directory') - shutil.rmtree(BASEPATH) - TMPPATH.mkdir() - print('::group::Fetching dashboard release tarball') - subprocess.check_call(f'curl -L -o agent.tar { URLSRC }', shell=True) - print('::endgroup::') - print('::group::Extracting dashboard release tarball') - subprocess.check_call(f"tar -xvf agent.tar -C { TMPPATH } --strip-components=1 --exclude='*.br' --exclude='*.gz'", shell=True) - print('::endgroup::') - print('Copying files') - (TMPPATH / 'agent' / BASEDIR).rename(BASEPATH) - (TMPPATH / 'agent' / 'index.html').rename(Path('./index.html')) - (TMPPATH / 'agent' / 'registry-access.html').rename('./registry-access.html') - (TMPPATH / 'agent' / 'registry-alert-redirect.html').rename('./registry-alert-redirect.html') - (TMPPATH / 'agent' / 'registry-hello.html').rename('./registry-hello.html') - shutil.copytree(TMPPATH / 'agent' / 'static', Path('./static'), dirs_exist_ok=True) - shutil.rmtree(TMPPATH) - print('Copying README.md') - BASEPATH.joinpath('README.md').symlink_to('../.dashboard-v2-notice.md') - print('Removing dashboard release tarball') - BASEPATH.joinpath('..', 'agent.tar').unlink() - - -def genfilelist(path): - '''Generate a list of files for the Makefile.''' - files = [f for f in path.iterdir() if f.is_file() and f.name != 'README.md'] - files = [Path(*f.parts[1:]) for f in files] - files.sort() - return '\n'.join([("src/web/gui/v2/" + str(f)) for f in files]) - - -def write_cmakefile(): - '''Write out the makefile for the dashboard code.''' - print('Generating cmake file') - output = CMAKETEMPLATE.format( - genfilelist(BASEPATH), - genfilelist(BASEPATH.joinpath('static')), - genfilelist(BASEPATH.joinpath('static', 'email', 'img')), - genfilelist(BASEPATH.joinpath('static', 'img')), - genfilelist(BASEPATH.joinpath('static', 'img', 'logos', 'os')), - genfilelist(BASEPATH.joinpath('static', 'img', 'logos', 'services')), - genfilelist(BASEPATH.joinpath('static', 'img', 'mail')), - genfilelist(BASEPATH.joinpath('static', 'site', 'pages', 'holding-page-503')), - ) - - BASEPATH.joinpath('dashboard_v2.cmake').write_text(output) - - -def list_changed_files(): - '''Create a list of changed files, and set it in an environment variable.''' - if 'GITHUB_ENV' in os.environ: - print('Generating file list for commit.') - subprocess.check_call('echo "COMMIT_FILES<> $GITHUB_ENV', shell=True) - subprocess.check_call('git status --porcelain=v1 --no-renames --untracked-files=all | rev | cut -d \' \' -f 1 | rev >> $GITHUB_ENV', shell=True) - subprocess.check_call('echo "EOF" >> $GITHUB_ENV', shell=True) - - -copy_dashboard() -write_cmakefile() -list_changed_files() diff --git a/src/web/gui/index.html b/src/web/gui/index.html deleted file mode 100644 index bad821a9d..000000000 --- a/src/web/gui/index.html +++ /dev/null @@ -1,245 +0,0 @@ -Netdata Agent Console

    Welcome to Netdata

    Loading latest Netdata UI...

    We couldn't load the latest Netdata UI. You can try again
    Or you can load the old single node dashboard or a local copy of Netdata UI
    \ No newline at end of file diff --git a/src/web/gui/registry-access.html b/src/web/gui/registry-access.html deleted file mode 100644 index 75a403034..000000000 --- a/src/web/gui/registry-access.html +++ /dev/null @@ -1,73 +0,0 @@ -Netdata Registry \ No newline at end of file diff --git a/src/web/gui/registry-alert-redirect.html b/src/web/gui/registry-alert-redirect.html deleted file mode 100644 index 0bb20a9eb..000000000 --- a/src/web/gui/registry-alert-redirect.html +++ /dev/null @@ -1,152 +0,0 @@ -Netdata Agent Alert Redirect

    Netdata Alert Notifications

    Trying to find a Netdata Agent for this alert...
    \ No newline at end of file diff --git a/src/web/gui/registry-hello.html b/src/web/gui/registry-hello.html deleted file mode 100644 index 7fba5662c..000000000 --- a/src/web/gui/registry-hello.html +++ /dev/null @@ -1,94 +0,0 @@ -Netdata Registry \ No newline at end of file diff --git a/src/web/gui/static/splash.css b/src/web/gui/static/splash.css deleted file mode 100644 index f20b6ac41..000000000 --- a/src/web/gui/static/splash.css +++ /dev/null @@ -1,171 +0,0 @@ -:root { - --main-bg: #080a0a; - --font-color: #b7c2c2; - --primary-green: #00ab44; - --column-gap: 8px; - --logo-color: #f1fff7; - --button-text-green: #00cd51; - --border-color: #536775; -} - -body { - margin: 0; - padding: 0; - font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", - "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", - sans-serif; - background: var(--main-bg); - color: var(--font-color); - font-weight: 200; - font-size: 14px; - line-height: 20px; -} -iframe.hidden { - display: none; -} -#agent-splash-screen a:link, -#agent-splash-screen a:visited, -#agent-splash-screen a:active { - font-size: 12px; - color: var(--primary-green); - font-weight: normal; -} -#agent-splash-screen .hero { - position: relative; - display: flex; - width: 500px; - margin: auto; - align-items: center; - justify-content: center; - flex-direction: column; - gap: calc(var(--column-gap) * 4); - text-align: center; - margin: 120px auto 32px; -} -#agent-splash-screen .logo-container { - position: relative; -} -#agent-splash-screen .logo-blur { - position: absolute; - width: 315px; - height: 315px; - left: -100%; - top: -100%; - filter: blur(30px); - pointer-events: none; -} -#agent-splash-screen .logo { - filter: drop-shadow(-6px -2px 20px rgba(255, 255, 255, 0.6)) blur(0.7px); -} - -#agent-splash-screen.loading .logo { - animation: glow 800ms linear infinite alternate; -} -#agent-splash-screen .headings { - display: flex; - flex-direction: column; - gap: calc(var(--column-gap) * 2); - height: 104px; -} -#agent-splash-screen .title { - font-size: 22px; - line-height: 26px; - font-weight: 200; - margin: 0; -} -#agent-splash-screen .subtitle { - font-size: 14px; - line-height: 20px; - margin: 0; -} -#agent-splash-screen .flex-center { - display: flex; - align-items: center; - justify-content: center; - gap: 2px; -} -#agent-splash-screen .flex-column { - flex-direction: column; -} -#agent-splash-screen a.button { - appearance: none; - border: none; - border-radius: 2px; - cursor: pointer; - text-decoration: none; - text-align: center; -} -#agent-splash-screen a.button svg { - width: 22px; - height: 16px; -} -#agent-splash-screen a.button.ghost { - background: transparent; - color: var(--button-text-green); - padding: 4px 6px; - font-size: 14px; - font-weight: 200; -} -#agent-splash-screen a.button.ghost:hover { - background: #00441b; -} - -#agent-splash-screen a.button.large { - background: transparent; - border: 1px solid var(--primary-green); - font-size: 18px; - padding: 12px; - color: var(--button-text-green); - width: 100%; - font-weight: 200; -} -#agent-splash-screen a.button.large:hover { - background: #00441b; -} -#agent-splash-screen .loading-message .subtitle { - display:none; -} -#agent-splash-screen.loading .loading-message .subtitle { - display:block; - height: 56px; -} -#agent-splash-screen .loading-message .flex-center { - display: none; -} -#agent-splash-screen.error .loading-message .flex-center { - display: flex; - margin: 8px auto; -} -#agent-splash-screen .loading-message table { - display: none; - border-collapse: collapse; - border: 1px solid var(--border-color); - color: var(--font-color); -} -#agent-splash-screen.table .loading-message table { - display: table; - margin: 8px auto; -} -#agent-splash-screen .dashboard-buttons { - width: 320px; - margin: auto; - gap: 24px; -} -#agent-splash-screen .terms { - position: absolute; - bottom: 40px; - left: 0; - right: 0; - margin: auto; -} -.green { - color: var(--primary-green); -} -@keyframes glow { - from { - filter: drop-shadow(-6px -2px 20px rgba(255, 255, 255, 0.9)) blur(0.9px); - } - to { - filter: drop-shadow(-6px -2px 20px rgba(255, 255, 255, 0.3)) blur(0.4px); - } -} diff --git a/src/web/gui/v1/dashboard_v1.cmake b/src/web/gui/v1/dashboard_v1.cmake index fd820d9e9..f1d40115e 100644 --- a/src/web/gui/v1/dashboard_v1.cmake +++ b/src/web/gui/v1/dashboard_v1.cmake @@ -22,19 +22,19 @@ src/web/gui/v1/robots.txt src/web/gui/v1/service-worker.js src/web/gui/v1/sitemap.xml src/web/gui/v1/tv-react.html -src/web/gui/v1/tv.html COMPONENT netdata DESTINATION ${WEB_DEST}) +src/web/gui/v1/tv.html COMPONENT dashboard DESTINATION ${WEB_DEST}) install(FILES src/web/gui/v1/css/bootstrap-3.3.7.css src/web/gui/v1/css/bootstrap-slate-flat-3.3.7.css src/web/gui/v1/css/bootstrap-slider-10.0.0.min.css src/web/gui/v1/css/bootstrap-theme-3.3.7.min.css src/web/gui/v1/css/bootstrap-toggle-2.2.2.min.css src/web/gui/v1/css/dashboard.css -src/web/gui/v1/css/dashboard.slate.css COMPONENT netdata DESTINATION ${WEB_DEST}/css) +src/web/gui/v1/css/dashboard.slate.css COMPONENT dashboard DESTINATION ${WEB_DEST}/css) install(FILES src/web/gui/v1/fonts/glyphicons-halflings-regular.eot src/web/gui/v1/fonts/glyphicons-halflings-regular.svg src/web/gui/v1/fonts/glyphicons-halflings-regular.ttf src/web/gui/v1/fonts/glyphicons-halflings-regular.woff -src/web/gui/v1/fonts/glyphicons-halflings-regular.woff2 COMPONENT netdata DESTINATION ${WEB_DEST}/fonts) +src/web/gui/v1/fonts/glyphicons-halflings-regular.woff2 COMPONENT dashboard DESTINATION ${WEB_DEST}/fonts) install(FILES src/web/gui/v1/images/alert-128-orange.png src/web/gui/v1/images/alert-128-red.png src/web/gui/v1/images/alert-multi-size-orange.ico @@ -84,7 +84,7 @@ src/web/gui/v1/images/overview.png src/web/gui/v1/images/packaging-beta-tag.svg src/web/gui/v1/images/post.png src/web/gui/v1/images/pricing.png -src/web/gui/v1/images/seo-performance-128.png COMPONENT netdata DESTINATION ${WEB_DEST}/images) +src/web/gui/v1/images/seo-performance-128.png COMPONENT dashboard DESTINATION ${WEB_DEST}/images) install(FILES src/web/gui/v1/lib/bootstrap-3.3.7.min.js src/web/gui/v1/lib/bootstrap-slider-10.0.0.min.js src/web/gui/v1/lib/bootstrap-table-1.11.0.min.js @@ -104,13 +104,13 @@ src/web/gui/v1/lib/jquery.sparkline-2.1.2.min.js src/web/gui/v1/lib/lz-string-1.4.4.min.js src/web/gui/v1/lib/pako-1.0.6.min.js src/web/gui/v1/lib/perfect-scrollbar-0.6.15.min.js -src/web/gui/v1/lib/tableExport-1.6.0.min.js COMPONENT netdata DESTINATION ${WEB_DEST}/lib) +src/web/gui/v1/lib/tableExport-1.6.0.min.js COMPONENT dashboard DESTINATION ${WEB_DEST}/lib) install(FILES src/web/gui/v1/static/css/2.c454aab8.chunk.css src/web/gui/v1/static/css/2.c454aab8.chunk.css.map src/web/gui/v1/static/css/4.a36e3b73.chunk.css src/web/gui/v1/static/css/4.a36e3b73.chunk.css.map src/web/gui/v1/static/css/main.53ba10f1.chunk.css -src/web/gui/v1/static/css/main.53ba10f1.chunk.css.map COMPONENT netdata DESTINATION ${WEB_DEST}/static/css) +src/web/gui/v1/static/css/main.53ba10f1.chunk.css.map COMPONENT dashboard DESTINATION ${WEB_DEST}/static/css) install(FILES src/web/gui/v1/static/js/10.a5cd7d0e.chunk.js src/web/gui/v1/static/js/10.a5cd7d0e.chunk.js.map src/web/gui/v1/static/js/2.62d105c5.chunk.js @@ -135,7 +135,7 @@ src/web/gui/v1/static/js/main.e248095a.chunk.js src/web/gui/v1/static/js/main.e248095a.chunk.js.LICENSE src/web/gui/v1/static/js/main.e248095a.chunk.js.map src/web/gui/v1/static/js/runtime-main.08abed8f.js -src/web/gui/v1/static/js/runtime-main.08abed8f.js.map COMPONENT netdata DESTINATION ${WEB_DEST}/static/js) +src/web/gui/v1/static/js/runtime-main.08abed8f.js.map COMPONENT dashboard DESTINATION ${WEB_DEST}/static/js) install(FILES src/web/gui/v1/static/media/ibm-plex-sans-latin-100.245539db.woff2 src/web/gui/v1/static/media/ibm-plex-sans-latin-100.9a582f3a.woff src/web/gui/v1/static/media/ibm-plex-sans-latin-100italic.1ea7c5d2.woff @@ -164,5 +164,5 @@ src/web/gui/v1/static/media/ibm-plex-sans-latin-700.b8809d61.woff src/web/gui/v1/static/media/ibm-plex-sans-latin-700.c9983d3d.woff2 src/web/gui/v1/static/media/ibm-plex-sans-latin-700italic.02954bee.woff2 src/web/gui/v1/static/media/ibm-plex-sans-latin-700italic.72e9af40.woff -src/web/gui/v1/static/media/material-icons.0509ab09.woff2 COMPONENT netdata DESTINATION ${WEB_DEST}/static/media) - install(FILES src/web/gui/v1/index.html COMPONENT netdata DESTINATION ${WEB_DEST}/v1) +src/web/gui/v1/static/media/material-icons.0509ab09.woff2 COMPONENT dashboard DESTINATION ${WEB_DEST}/static/media) + install(FILES src/web/gui/v1/index.html COMPONENT dashboard DESTINATION ${WEB_DEST}/v1) diff --git a/src/web/gui/v2/.well-known/assetlinks.json b/src/web/gui/v2/.well-known/assetlinks.json deleted file mode 100644 index 270af6689..000000000 --- a/src/web/gui/v2/.well-known/assetlinks.json +++ /dev/null @@ -1,11 +0,0 @@ -[ - { - "relation": ["delegate_permission/common.handle_all_urls"], - "target": { - "namespace": "android_app", - "package_name": "cloud.netdata.android", - "sha256_cert_fingerprints": - ["67:F4:89:2B:8D:A9:2D:CB:91:ED:A1:BD:42:04:90:28:05:4F:3E:81:04:7B:76:B6:0A:20:30:6C:90:38:08:90","1F:B8:9A:45:AD:83:76:DD:7E:A5:9A:07:82:4A:2F:99:3E:0D:EB:64:FA:50:76:59:65:3F:CC:38:7F:32:28:AA"] - } - } -] diff --git a/src/web/gui/v2/1220.01d6bbaab869c74f4437.chunk.js b/src/web/gui/v2/1220.01d6bbaab869c74f4437.chunk.js deleted file mode 100644 index fdd0ef57e..000000000 --- a/src/web/gui/v2/1220.01d6bbaab869c74f4437.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="abaaead6-1221-4463-a772-6fda591382c6",e._sentryDebugIdIdentifier="sentry-dbid-abaaead6-1221-4463-a772-6fda591382c6")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[1220],{51220:(e,t,s)=>{s.d(t,{Te:()=>h});var n=s(96540),i=s(40961),o=s(57530);const l="undefined"!==typeof document?n.useLayoutEffect:n.useEffect;function r(e){const t=n.useReducer((()=>({})),{})[1],s={...e,onChange:(s,n)=>{var o;n?(0,i.flushSync)(t):t(),null==(o=e.onChange)||o.call(e,s,n)}},[r]=n.useState((()=>new o.YV(s)));return r.setOptions(s),n.useEffect((()=>r._didMount()),[]),l((()=>r._willUpdate())),r}function h(e){return r({observeElementRect:o.T6,observeElementOffset:o.AO,scrollToFn:o.Ox,...e})}},57530:(e,t,s)=>{function n(e,t,s){let n,i=s.initialDeps??[];return()=>{var o,l,r,h;let a;s.key&&(null==(o=s.debug)?void 0:o.call(s))&&(a=Date.now());const d=e();if(!(d.length!==i.length||d.some(((e,t)=>i[t]!==e))))return n;let c;if(i=d,s.key&&(null==(l=s.debug)?void 0:l.call(s))&&(c=Date.now()),n=t(...d),s.key&&(null==(r=s.debug)?void 0:r.call(s))){const e=Math.round(100*(Date.now()-a))/100,t=Math.round(100*(Date.now()-c))/100,n=t/16,i=(e,t)=>{for(e=String(e);e.lengthm,vp:()=>r,Ox:()=>f,AO:()=>c,T6:()=>h});const o=(e,t)=>{let s;return function(...n){clearTimeout(s),s=setTimeout((()=>e.apply(this,n)),t)}},l=e=>e,r=e=>{const t=Math.max(e.startIndex-e.overscan,0),s=Math.min(e.endIndex+e.overscan,e.count-1),n=[];for(let i=t;i<=s;i++)n.push(i);return n},h=(e,t)=>{const s=e.scrollElement;if(!s)return;const n=e=>{const{width:s,height:n}=e;t({width:Math.round(s),height:Math.round(n)})};if(n(s.getBoundingClientRect()),"undefined"===typeof ResizeObserver)return()=>{};const i=new ResizeObserver((e=>{const t=e[0];if(null==t?void 0:t.borderBoxSize){const e=t.borderBoxSize[0];if(e)return void n({width:e.inlineSize,height:e.blockSize})}n(s.getBoundingClientRect())}));return i.observe(s,{box:"border-box"}),()=>{i.unobserve(s)}},a={passive:!0},d="undefined"==typeof window||"onscrollend"in window,c=(e,t)=>{const s=e.scrollElement;if(!s)return;let n=0;const i=d?()=>{}:o((()=>{t(n,!1)}),e.options.isScrollingResetDelay),l=o=>()=>{n=s[e.options.horizontal?"scrollLeft":"scrollTop"],i(),t(n,o)},r=l(!0),h=l(!1);return h(),s.addEventListener("scroll",r,a),s.addEventListener("scrollend",h,a),()=>{s.removeEventListener("scroll",r),s.removeEventListener("scrollend",h)}},u=(e,t,s)=>{if(null==t?void 0:t.borderBoxSize){const e=t.borderBoxSize[0];if(e){return Math.round(e[s.options.horizontal?"inlineSize":"blockSize"])}}return Math.round(e.getBoundingClientRect()[s.options.horizontal?"width":"height"])},f=(e,{adjustments:t=0,behavior:s},n)=>{var i,o;const l=e+t;null==(o=null==(i=n.scrollElement)?void 0:i.scrollTo)||o.call(i,{[n.options.horizontal?"left":"top"]:l,behavior:s})};class m{constructor(e){this.unsubs=[],this.scrollElement=null,this.isScrolling=!1,this.scrollToIndexTimeoutId=null,this.measurementsCache=[],this.itemSizeCache=new Map,this.pendingMeasuredCacheIndexes=[],this.scrollDirection=null,this.scrollAdjustments=0,this.measureElementCache=new Map,this.observer=(()=>{let e=null;const t=()=>e||("undefined"!==typeof ResizeObserver?e=new ResizeObserver((e=>{e.forEach((e=>{this._measureElement(e.target,e)}))})):null);return{disconnect:()=>{var e;return null==(e=t())?void 0:e.disconnect()},observe:e=>{var s;return null==(s=t())?void 0:s.observe(e,{box:"border-box"})},unobserve:e=>{var s;return null==(s=t())?void 0:s.unobserve(e)}}})(),this.range=null,this.setOptions=e=>{Object.entries(e).forEach((([t,s])=>{"undefined"===typeof s&&delete e[t]})),this.options={debug:!1,initialOffset:0,overscan:1,paddingStart:0,paddingEnd:0,scrollPaddingStart:0,scrollPaddingEnd:0,horizontal:!1,getItemKey:l,rangeExtractor:r,onChange:()=>{},measureElement:u,initialRect:{width:0,height:0},scrollMargin:0,gap:0,indexAttribute:"data-index",initialMeasurementsCache:[],lanes:1,isScrollingResetDelay:150,...e}},this.notify=(e,t)=>{var s,n;const{startIndex:i,endIndex:o}=this.range??{startIndex:void 0,endIndex:void 0},l=this.calculateRange();(e||i!==(null==l?void 0:l.startIndex)||o!==(null==l?void 0:l.endIndex))&&(null==(n=(s=this.options).onChange)||n.call(s,this,t))},this.cleanup=()=>{this.unsubs.filter(Boolean).forEach((e=>e())),this.unsubs=[],this.scrollElement=null},this._didMount=()=>(this.measureElementCache.forEach(this.observer.observe),()=>{this.observer.disconnect(),this.cleanup()}),this._willUpdate=()=>{const e=this.options.getScrollElement();this.scrollElement!==e&&(this.cleanup(),this.scrollElement=e,this._scrollToOffset(this.scrollOffset,{adjustments:void 0,behavior:void 0}),this.unsubs.push(this.options.observeElementRect(this,(e=>{this.scrollRect=e,this.notify(!1,!1)}))),this.unsubs.push(this.options.observeElementOffset(this,((e,t)=>{this.scrollAdjustments=0,this.scrollDirection=t?this.scrollOffsetthis.scrollRect[this.options.horizontal?"width":"height"],this.getMeasurementOptions=n((()=>[this.options.count,this.options.paddingStart,this.options.scrollMargin,this.options.getItemKey]),((e,t,s,n)=>(this.pendingMeasuredCacheIndexes=[],{count:e,paddingStart:t,scrollMargin:s,getItemKey:n})),{key:!1}),this.getFurthestMeasurement=(e,t)=>{const s=new Map,n=new Map;for(let i=t-1;i>=0;i--){const t=e[i];if(s.has(t.lane))continue;const o=n.get(t.lane);if(null==o||t.end>o.end?n.set(t.lane,t):t.ende.end===t.end?e.index-t.index:e.end-t.end))[0]:void 0},this.getMeasurements=n((()=>[this.getMeasurementOptions(),this.itemSizeCache]),(({count:e,paddingStart:t,scrollMargin:s,getItemKey:n},i)=>{const o=this.pendingMeasuredCacheIndexes.length>0?Math.min(...this.pendingMeasuredCacheIndexes):0;this.pendingMeasuredCacheIndexes=[];const l=this.measurementsCache.slice(0,o);for(let r=o;rthis.options.debug}),this.calculateRange=n((()=>[this.getMeasurements(),this.getSize(),this.scrollOffset]),((e,t,s)=>this.range=e.length>0&&t>0?function({measurements:e,outerSize:t,scrollOffset:s}){const n=e.length-1,i=t=>e[t].start,o=g(0,n,i,s);let l=o;for(;lthis.options.debug}),this.getIndexes=n((()=>[this.options.rangeExtractor,this.calculateRange(),this.options.overscan,this.options.count]),((e,t,s,n)=>null===t?[]:e({startIndex:t.startIndex,endIndex:t.endIndex,overscan:s,count:n})),{key:!1,debug:()=>this.options.debug}),this.indexFromElement=e=>{const t=this.options.indexAttribute,s=e.getAttribute(t);return s?parseInt(s,10):(console.warn(`Missing attribute name '${t}={index}' on measured element.`),-1)},this._measureElement=(e,t)=>{const s=this.measurementsCache[this.indexFromElement(e)];if(!s||!e.isConnected)return void this.measureElementCache.forEach(((t,s)=>{t===e&&(this.observer.unobserve(e),this.measureElementCache.delete(s))}));const n=this.measureElementCache.get(s.key);n!==e&&(n&&this.observer.unobserve(n),this.observer.observe(e),this.measureElementCache.set(s.key,e));const i=this.options.measureElement(e,t,this);this.resizeItem(s,i)},this.resizeItem=(e,t)=>{const s=t-(this.itemSizeCache.get(e.key)??e.size);0!==s&&((void 0!==this.shouldAdjustScrollPositionOnItemSizeChange?this.shouldAdjustScrollPositionOnItemSizeChange(e,s,this):e.start{e&&this._measureElement(e,void 0)},this.getVirtualItems=n((()=>[this.getIndexes(),this.getMeasurements()]),((e,t)=>{const s=[];for(let n=0,i=e.length;nthis.options.debug}),this.getVirtualItemForOffset=e=>{const t=this.getMeasurements();return i(t[g(0,t.length-1,(e=>i(t[e]).start),e)])},this.getOffsetForAlignment=(e,t)=>{const s=this.getSize();"auto"===t&&(t=e<=this.scrollOffset?"start":e>=this.scrollOffset+s?"end":"start"),"start"===t||("end"===t?e-=s:"center"===t&&(e-=s/2));const n=this.options.horizontal?"scrollWidth":"scrollHeight",i=(this.scrollElement?"document"in this.scrollElement?this.scrollElement.document.documentElement[n]:this.scrollElement[n]:0)-this.getSize();return Math.max(Math.min(i,e),0)},this.getOffsetForIndex=(e,t="auto")=>{e=Math.max(0,Math.min(e,this.options.count-1));const s=i(this.getMeasurements()[e]);if("auto"===t)if(s.end>=this.scrollOffset+this.getSize()-this.options.scrollPaddingEnd)t="end";else{if(!(s.start<=this.scrollOffset+this.options.scrollPaddingStart))return[this.scrollOffset,t];t="start"}const n="end"===t?s.end+this.options.scrollPaddingEnd:s.start-this.options.scrollPaddingStart;return[this.getOffsetForAlignment(n,t),t]},this.isDynamicMode=()=>this.measureElementCache.size>0,this.cancelScrollToIndex=()=>{null!==this.scrollToIndexTimeoutId&&(clearTimeout(this.scrollToIndexTimeoutId),this.scrollToIndexTimeoutId=null)},this.scrollToOffset=(e,{align:t="start",behavior:s}={})=>{this.cancelScrollToIndex(),"smooth"===s&&this.isDynamicMode()&&console.warn("The `smooth` scroll behavior is not fully supported with dynamic size."),this._scrollToOffset(this.getOffsetForAlignment(e,t),{adjustments:void 0,behavior:s})},this.scrollToIndex=(e,{align:t="auto",behavior:s}={})=>{e=Math.max(0,Math.min(e,this.options.count-1)),this.cancelScrollToIndex(),"smooth"===s&&this.isDynamicMode()&&console.warn("The `smooth` scroll behavior is not fully supported with dynamic size.");const[n,i]=this.getOffsetForIndex(e,t);this._scrollToOffset(n,{adjustments:void 0,behavior:s}),"smooth"!==s&&this.isDynamicMode()&&(this.scrollToIndexTimeoutId=setTimeout((()=>{this.scrollToIndexTimeoutId=null;if(this.measureElementCache.has(this.options.getItemKey(e))){const[o]=this.getOffsetForIndex(e,i);t=o,n=this.scrollOffset,Math.abs(t-n)<1||this.scrollToIndex(e,{align:i,behavior:s})}else this.scrollToIndex(e,{align:i,behavior:s});var t,n})))},this.scrollBy=(e,{behavior:t}={})=>{this.cancelScrollToIndex(),"smooth"===t&&this.isDynamicMode()&&console.warn("The `smooth` scroll behavior is not fully supported with dynamic size."),this._scrollToOffset(this.scrollOffset+e,{adjustments:void 0,behavior:t})},this.getTotalSize=()=>{var e;const t=this.getMeasurements();let s;return s=0===t.length?this.options.paddingStart:1===this.options.lanes?(null==(e=t[t.length-1])?void 0:e.end)??0:Math.max(...t.slice(-this.options.lanes).map((e=>e.end))),s-this.options.scrollMargin+this.options.paddingEnd},this._scrollToOffset=(e,{adjustments:t,behavior:s})=>{this.options.scrollToFn(e,{behavior:s,adjustments:t},this)},this.measure=()=>{var e,t;this.itemSizeCache=new Map,null==(t=(e=this.options).onChange)||t.call(e,this,!1)},this.setOptions(e),this.scrollRect=this.options.initialRect,this.scrollOffset="function"===typeof this.options.initialOffset?this.options.initialOffset():this.options.initialOffset,this.measurementsCache=this.options.initialMeasurementsCache,this.measurementsCache.forEach((e=>{this.itemSizeCache.set(e.key,e.size)})),this.notify(!1,!1)}}const g=(e,t,s,n)=>{for(;e<=t;){const i=(e+t)/2|0,o=s(i);if(on))return i;t=i-1}}return e>0?e-1:0}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/1396.56f70d7c659ac0b694cd.chunk.js b/src/web/gui/v2/1396.56f70d7c659ac0b694cd.chunk.js deleted file mode 100644 index 50a4a0f3e..000000000 --- a/src/web/gui/v2/1396.56f70d7c659ac0b694cd.chunk.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! For license information please see 1396.56f70d7c659ac0b694cd.chunk.js.LICENSE.txt */ -!function(){try{var t="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},e=(new Error).stack;e&&(t._sentryDebugIds=t._sentryDebugIds||{},t._sentryDebugIds[e]="9850ca16-d701-4640-8fc6-db4d0992573b",t._sentryDebugIdIdentifier="sentry-dbid-9850ca16-d701-4640-8fc6-db4d0992573b")}catch(t){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[1396],{40955:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(80754),r(84636),r(39506),r(57165),function(){var t=o,e=t.lib.BlockCipher,r=t.algo,i=[],n=[],s=[],c=[],a=[],h=[],f=[],l=[],u=[],d=[];!function(){for(var t=[],e=0;e<256;e++)t[e]=e<128?e<<1:e<<1^283;var r=0,o=0;for(e=0;e<256;e++){var p=o^o<<1^o<<2^o<<3^o<<4;p=p>>>8^255&p^99,i[r]=p,n[p]=r;var v=t[r],_=t[v],y=t[_],g=257*t[p]^16843008*p;s[r]=g<<24|g>>>8,c[r]=g<<16|g>>>16,a[r]=g<<8|g>>>24,h[r]=g,g=16843009*y^65537*_^257*v^16843008*r,f[p]=g<<24|g>>>8,l[p]=g<<16|g>>>16,u[p]=g<<8|g>>>24,d[p]=g,r?(r=v^t[t[t[y^v]]],o^=t[t[o]]):r=o=1}}();var p=[0,1,2,4,8,16,32,64,128,27,54],v=r.AES=e.extend({_doReset:function(){if(!this._nRounds||this._keyPriorReset!==this._key){for(var t=this._keyPriorReset=this._key,e=t.words,r=t.sigBytes/4,n=4*((this._nRounds=r+6)+1),o=this._keySchedule=[],s=0;s6&&s%r==4&&(h=i[h>>>24]<<24|i[h>>>16&255]<<16|i[h>>>8&255]<<8|i[255&h]):(h=i[(h=h<<8|h>>>24)>>>24]<<24|i[h>>>16&255]<<16|i[h>>>8&255]<<8|i[255&h],h^=p[s/r|0]<<24),o[s]=o[s-r]^h);for(var c=this._invKeySchedule=[],a=0;a>>24]]^l[i[h>>>16&255]]^u[i[h>>>8&255]]^d[i[255&h]]}}},encryptBlock:function(t,e){this._doCryptBlock(t,e,this._keySchedule,s,c,a,h,i)},decryptBlock:function(t,e){var r=t[e+1];t[e+1]=t[e+3],t[e+3]=r,this._doCryptBlock(t,e,this._invKeySchedule,f,l,u,d,n),r=t[e+1],t[e+1]=t[e+3],t[e+3]=r},_doCryptBlock:function(t,e,r,i,n,o,s,c){for(var a=this._nRounds,h=t[e]^r[0],f=t[e+1]^r[1],l=t[e+2]^r[2],u=t[e+3]^r[3],d=4,p=1;p>>24]^n[f>>>16&255]^o[l>>>8&255]^s[255&u]^r[d++],_=i[f>>>24]^n[l>>>16&255]^o[u>>>8&255]^s[255&h]^r[d++],y=i[l>>>24]^n[u>>>16&255]^o[h>>>8&255]^s[255&f]^r[d++],g=i[u>>>24]^n[h>>>16&255]^o[f>>>8&255]^s[255&l]^r[d++];h=v,f=_,l=y,u=g}v=(c[h>>>24]<<24|c[f>>>16&255]<<16|c[l>>>8&255]<<8|c[255&u])^r[d++],_=(c[f>>>24]<<24|c[l>>>16&255]<<16|c[u>>>8&255]<<8|c[255&h])^r[d++],y=(c[l>>>24]<<24|c[u>>>16&255]<<16|c[h>>>8&255]<<8|c[255&f])^r[d++],g=(c[u>>>24]<<24|c[h>>>16&255]<<16|c[f>>>8&255]<<8|c[255&l])^r[d++],t[e]=v,t[e+1]=_,t[e+2]=y,t[e+3]=g},keySize:8});t.AES=e._createHelper(v)}(),o.AES)}()},43128:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(80754),r(84636),r(39506),r(57165),function(){var t=o,e=t.lib.BlockCipher,r=t.algo;const i=16,n=[608135816,2242054355,320440878,57701188,2752067618,698298832,137296536,3964562569,1160258022,953160567,3193202383,887688300,3232508343,3380367581,1065670069,3041331479,2450970073,2306472731],s=[[3509652390,2564797868,805139163,3491422135,3101798381,1780907670,3128725573,4046225305,614570311,3012652279,134345442,2240740374,1667834072,1901547113,2757295779,4103290238,227898511,1921955416,1904987480,2182433518,2069144605,3260701109,2620446009,720527379,3318853667,677414384,3393288472,3101374703,2390351024,1614419982,1822297739,2954791486,3608508353,3174124327,2024746970,1432378464,3864339955,2857741204,1464375394,1676153920,1439316330,715854006,3033291828,289532110,2706671279,2087905683,3018724369,1668267050,732546397,1947742710,3462151702,2609353502,2950085171,1814351708,2050118529,680887927,999245976,1800124847,3300911131,1713906067,1641548236,4213287313,1216130144,1575780402,4018429277,3917837745,3693486850,3949271944,596196993,3549867205,258830323,2213823033,772490370,2760122372,1774776394,2652871518,566650946,4142492826,1728879713,2882767088,1783734482,3629395816,2517608232,2874225571,1861159788,326777828,3124490320,2130389656,2716951837,967770486,1724537150,2185432712,2364442137,1164943284,2105845187,998989502,3765401048,2244026483,1075463327,1455516326,1322494562,910128902,469688178,1117454909,936433444,3490320968,3675253459,1240580251,122909385,2157517691,634681816,4142456567,3825094682,3061402683,2540495037,79693498,3249098678,1084186820,1583128258,426386531,1761308591,1047286709,322548459,995290223,1845252383,2603652396,3431023940,2942221577,3202600964,3727903485,1712269319,422464435,3234572375,1170764815,3523960633,3117677531,1434042557,442511882,3600875718,1076654713,1738483198,4213154764,2393238008,3677496056,1014306527,4251020053,793779912,2902807211,842905082,4246964064,1395751752,1040244610,2656851899,3396308128,445077038,3742853595,3577915638,679411651,2892444358,2354009459,1767581616,3150600392,3791627101,3102740896,284835224,4246832056,1258075500,768725851,2589189241,3069724005,3532540348,1274779536,3789419226,2764799539,1660621633,3471099624,4011903706,913787905,3497959166,737222580,2514213453,2928710040,3937242737,1804850592,3499020752,2949064160,2386320175,2390070455,2415321851,4061277028,2290661394,2416832540,1336762016,1754252060,3520065937,3014181293,791618072,3188594551,3933548030,2332172193,3852520463,3043980520,413987798,3465142937,3030929376,4245938359,2093235073,3534596313,375366246,2157278981,2479649556,555357303,3870105701,2008414854,3344188149,4221384143,3956125452,2067696032,3594591187,2921233993,2428461,544322398,577241275,1471733935,610547355,4027169054,1432588573,1507829418,2025931657,3646575487,545086370,48609733,2200306550,1653985193,298326376,1316178497,3007786442,2064951626,458293330,2589141269,3591329599,3164325604,727753846,2179363840,146436021,1461446943,4069977195,705550613,3059967265,3887724982,4281599278,3313849956,1404054877,2845806497,146425753,1854211946],[1266315497,3048417604,3681880366,3289982499,290971e4,1235738493,2632868024,2414719590,3970600049,1771706367,1449415276,3266420449,422970021,1963543593,2690192192,3826793022,1062508698,1531092325,1804592342,2583117782,2714934279,4024971509,1294809318,4028980673,1289560198,2221992742,1669523910,35572830,157838143,1052438473,1016535060,1802137761,1753167236,1386275462,3080475397,2857371447,1040679964,2145300060,2390574316,1461121720,2956646967,4031777805,4028374788,33600511,2920084762,1018524850,629373528,3691585981,3515945977,2091462646,2486323059,586499841,988145025,935516892,3367335476,2599673255,2839830854,265290510,3972581182,2759138881,3795373465,1005194799,847297441,406762289,1314163512,1332590856,1866599683,4127851711,750260880,613907577,1450815602,3165620655,3734664991,3650291728,3012275730,3704569646,1427272223,778793252,1343938022,2676280711,2052605720,1946737175,3164576444,3914038668,3967478842,3682934266,1661551462,3294938066,4011595847,840292616,3712170807,616741398,312560963,711312465,1351876610,322626781,1910503582,271666773,2175563734,1594956187,70604529,3617834859,1007753275,1495573769,4069517037,2549218298,2663038764,504708206,2263041392,3941167025,2249088522,1514023603,1998579484,1312622330,694541497,2582060303,2151582166,1382467621,776784248,2618340202,3323268794,2497899128,2784771155,503983604,4076293799,907881277,423175695,432175456,1378068232,4145222326,3954048622,3938656102,3820766613,2793130115,2977904593,26017576,3274890735,3194772133,1700274565,1756076034,4006520079,3677328699,720338349,1533947780,354530856,688349552,3973924725,1637815568,332179504,3949051286,53804574,2852348879,3044236432,1282449977,3583942155,3416972820,4006381244,1617046695,2628476075,3002303598,1686838959,431878346,2686675385,1700445008,1080580658,1009431731,832498133,3223435511,2605976345,2271191193,2516031870,1648197032,4164389018,2548247927,300782431,375919233,238389289,3353747414,2531188641,2019080857,1475708069,455242339,2609103871,448939670,3451063019,1395535956,2413381860,1841049896,1491858159,885456874,4264095073,4001119347,1565136089,3898914787,1108368660,540939232,1173283510,2745871338,3681308437,4207628240,3343053890,4016749493,1699691293,1103962373,3625875870,2256883143,3830138730,1031889488,3479347698,1535977030,4236805024,3251091107,2132092099,1774941330,1199868427,1452454533,157007616,2904115357,342012276,595725824,1480756522,206960106,497939518,591360097,863170706,2375253569,3596610801,1814182875,2094937945,3421402208,1082520231,3463918190,2785509508,435703966,3908032597,1641649973,2842273706,3305899714,1510255612,2148256476,2655287854,3276092548,4258621189,236887753,3681803219,274041037,1734335097,3815195456,3317970021,1899903192,1026095262,4050517792,356393447,2410691914,3873677099,3682840055],[3913112168,2491498743,4132185628,2489919796,1091903735,1979897079,3170134830,3567386728,3557303409,857797738,1136121015,1342202287,507115054,2535736646,337727348,3213592640,1301675037,2528481711,1895095763,1721773893,3216771564,62756741,2142006736,835421444,2531993523,1442658625,3659876326,2882144922,676362277,1392781812,170690266,3921047035,1759253602,3611846912,1745797284,664899054,1329594018,3901205900,3045908486,2062866102,2865634940,3543621612,3464012697,1080764994,553557557,3656615353,3996768171,991055499,499776247,1265440854,648242737,3940784050,980351604,3713745714,1749149687,3396870395,4211799374,3640570775,1161844396,3125318951,1431517754,545492359,4268468663,3499529547,1437099964,2702547544,3433638243,2581715763,2787789398,1060185593,1593081372,2418618748,4260947970,69676912,2159744348,86519011,2512459080,3838209314,1220612927,3339683548,133810670,1090789135,1078426020,1569222167,845107691,3583754449,4072456591,1091646820,628848692,1613405280,3757631651,526609435,236106946,48312990,2942717905,3402727701,1797494240,859738849,992217954,4005476642,2243076622,3870952857,3732016268,765654824,3490871365,2511836413,1685915746,3888969200,1414112111,2273134842,3281911079,4080962846,172450625,2569994100,980381355,4109958455,2819808352,2716589560,2568741196,3681446669,3329971472,1835478071,660984891,3704678404,4045999559,3422617507,3040415634,1762651403,1719377915,3470491036,2693910283,3642056355,3138596744,1364962596,2073328063,1983633131,926494387,3423689081,2150032023,4096667949,1749200295,3328846651,309677260,2016342300,1779581495,3079819751,111262694,1274766160,443224088,298511866,1025883608,3806446537,1145181785,168956806,3641502830,3584813610,1689216846,3666258015,3200248200,1692713982,2646376535,4042768518,1618508792,1610833997,3523052358,4130873264,2001055236,3610705100,2202168115,4028541809,2961195399,1006657119,2006996926,3186142756,1430667929,3210227297,1314452623,4074634658,4101304120,2273951170,1399257539,3367210612,3027628629,1190975929,2062231137,2333990788,2221543033,2438960610,1181637006,548689776,2362791313,3372408396,3104550113,3145860560,296247880,1970579870,3078560182,3769228297,1714227617,3291629107,3898220290,166772364,1251581989,493813264,448347421,195405023,2709975567,677966185,3703036547,1463355134,2715995803,1338867538,1343315457,2802222074,2684532164,233230375,2599980071,2000651841,3277868038,1638401717,4028070440,3237316320,6314154,819756386,300326615,590932579,1405279636,3267499572,3150704214,2428286686,3959192993,3461946742,1862657033,1266418056,963775037,2089974820,2263052895,1917689273,448879540,3550394620,3981727096,150775221,3627908307,1303187396,508620638,2975983352,2726630617,1817252668,1876281319,1457606340,908771278,3720792119,3617206836,2455994898,1729034894,1080033504],[976866871,3556439503,2881648439,1522871579,1555064734,1336096578,3548522304,2579274686,3574697629,3205460757,3593280638,3338716283,3079412587,564236357,2993598910,1781952180,1464380207,3163844217,3332601554,1699332808,1393555694,1183702653,3581086237,1288719814,691649499,2847557200,2895455976,3193889540,2717570544,1781354906,1676643554,2592534050,3230253752,1126444790,2770207658,2633158820,2210423226,2615765581,2414155088,3127139286,673620729,2805611233,1269405062,4015350505,3341807571,4149409754,1057255273,2012875353,2162469141,2276492801,2601117357,993977747,3918593370,2654263191,753973209,36408145,2530585658,25011837,3520020182,2088578344,530523599,2918365339,1524020338,1518925132,3760827505,3759777254,1202760957,3985898139,3906192525,674977740,4174734889,2031300136,2019492241,3983892565,4153806404,3822280332,352677332,2297720250,60907813,90501309,3286998549,1016092578,2535922412,2839152426,457141659,509813237,4120667899,652014361,1966332200,2975202805,55981186,2327461051,676427537,3255491064,2882294119,3433927263,1307055953,942726286,933058658,2468411793,3933900994,4215176142,1361170020,2001714738,2830558078,3274259782,1222529897,1679025792,2729314320,3714953764,1770335741,151462246,3013232138,1682292957,1483529935,471910574,1539241949,458788160,3436315007,1807016891,3718408830,978976581,1043663428,3165965781,1927990952,4200891579,2372276910,3208408903,3533431907,1412390302,2931980059,4132332400,1947078029,3881505623,4168226417,2941484381,1077988104,1320477388,886195818,18198404,3786409e3,2509781533,112762804,3463356488,1866414978,891333506,18488651,661792760,1628790961,3885187036,3141171499,876946877,2693282273,1372485963,791857591,2686433993,3759982718,3167212022,3472953795,2716379847,445679433,3561995674,3504004811,3574258232,54117162,3331405415,2381918588,3769707343,4154350007,1140177722,4074052095,668550556,3214352940,367459370,261225585,2610173221,4209349473,3468074219,3265815641,314222801,3066103646,3808782860,282218597,3406013506,3773591054,379116347,1285071038,846784868,2669647154,3771962079,3550491691,2305946142,453669953,1268987020,3317592352,3279303384,3744833421,2610507566,3859509063,266596637,3847019092,517658769,3462560207,3443424879,370717030,4247526661,2224018117,4143653529,4112773975,2788324899,2477274417,1456262402,2901442914,1517677493,1846949527,2295493580,3734397586,2176403920,1280348187,1908823572,3871786941,846861322,1172426758,3287448474,3383383037,1655181056,3139813346,901632758,1897031941,2986607138,3066810236,3447102507,1393639104,373351379,950779232,625454576,3124240540,4148612726,2007998917,544563296,2244738638,2330496472,2058025392,1291430526,424198748,50039436,29584100,3605783033,2429876329,2791104160,1057563949,3255363231,3075367218,3463963227,1469046755,985887462]];var c={pbox:[],sbox:[]};function a(t,e){let r=e>>24&255,i=e>>16&255,n=e>>8&255,o=255&e,s=t.sbox[0][r]+t.sbox[1][i];return s^=t.sbox[2][n],s+=t.sbox[3][o],s}function h(t,e,r){let n,o=e,s=r;for(let c=0;c1;--c)o^=t.pbox[c],s=a(t,o)^s,n=o,o=s,s=n;return n=o,o=s,s=n,s^=t.pbox[1],o^=t.pbox[0],{left:o,right:s}}function l(t,e,r){for(let i=0;i<4;i++){t.sbox[i]=[];for(let e=0;e<256;e++)t.sbox[i][e]=s[i][e]}let o=0;for(let s=0;s=r&&(o=0);let c=0,a=0,f=0;for(let n=0;n>>2];t.sigBytes-=e}},v=(r.BlockCipher=f.extend({cfg:f.cfg.extend({mode:d,padding:p}),reset:function(){var t;f.reset.call(this);var e=this.cfg,r=e.iv,i=e.mode;this._xformMode==this._ENC_XFORM_MODE?t=i.createEncryptor:(t=i.createDecryptor,this._minBufferSize=1),this._mode&&this._mode.__creator==t?this._mode.init(this,r&&r.words):(this._mode=t.call(i,this,r&&r.words),this._mode.__creator=t)},_doProcessBlock:function(t,e){this._mode.processBlock(t,e)},_doFinalize:function(){var t,e=this.cfg.padding;return this._xformMode==this._ENC_XFORM_MODE?(e.pad(this._data,this.blockSize),t=this._process(!0)):(t=this._process(!0),e.unpad(t)),t},blockSize:4}),r.CipherParams=i.extend({init:function(t){this.mixIn(t)},toString:function(t){return(t||this.formatter).stringify(this)}})),_=(e.format={}).OpenSSL={stringify:function(t){var e=t.ciphertext,r=t.salt;return(r?n.create([1398893684,1701076831]).concat(r).concat(e):e).toString(a)},parse:function(t){var e,r=a.parse(t),i=r.words;return 1398893684==i[0]&&1701076831==i[1]&&(e=n.create(i.slice(2,4)),i.splice(0,4),r.sigBytes-=16),v.create({ciphertext:r,salt:e})}},y=r.SerializableCipher=i.extend({cfg:i.extend({format:_}),encrypt:function(t,e,r,i){i=this.cfg.extend(i);var n=t.createEncryptor(r,i),o=n.finalize(e),s=n.cfg;return v.create({ciphertext:o,key:r,iv:s.iv,algorithm:t,mode:s.mode,padding:s.padding,blockSize:t.blockSize,formatter:i.format})},decrypt:function(t,e,r,i){return i=this.cfg.extend(i),e=this._parse(e,i.format),t.createDecryptor(r,i).finalize(e.ciphertext)},_parse:function(t,e){return"string"==typeof t?e.parse(t,this):t}}),g=(e.kdf={}).OpenSSL={execute:function(t,e,r,i,o){if(i||(i=n.random(8)),o)s=h.create({keySize:e+r,hasher:o}).compute(t,i);else var s=h.create({keySize:e+r}).compute(t,i);var c=n.create(s.words.slice(e),4*r);return s.sigBytes=4*e,v.create({key:s,iv:c,salt:i})}},B=r.PasswordBasedCipher=y.extend({cfg:y.cfg.extend({kdf:g}),encrypt:function(t,e,r,i){var n=(i=this.cfg.extend(i)).kdf.execute(r,t.keySize,t.ivSize,i.salt,i.hasher);i.iv=n.iv;var o=y.encrypt.call(this,t,e,n.key,i);return o.mixIn(n),o},decrypt:function(t,e,r,i){i=this.cfg.extend(i),e=this._parse(e,i.format);var n=i.kdf.execute(r,t.keySize,t.ivSize,e.salt,i.hasher);return i.iv=n.iv,y.decrypt.call(this,t,e,n.key,i)}})}()))}()},19021:function(t,e,r){t.exports=function(){var t=t||function(t,e){var i;if("undefined"!==typeof window&&window.crypto&&(i=window.crypto),"undefined"!==typeof self&&self.crypto&&(i=self.crypto),"undefined"!==typeof globalThis&&globalThis.crypto&&(i=globalThis.crypto),!i&&"undefined"!==typeof window&&window.msCrypto&&(i=window.msCrypto),!i&&"undefined"!==typeof r.g&&r.g.crypto&&(i=r.g.crypto),!i)try{i=r(50477)}catch(_){}var n=function(){if(i){if("function"===typeof i.getRandomValues)try{return i.getRandomValues(new Uint32Array(1))[0]}catch(_){}if("function"===typeof i.randomBytes)try{return i.randomBytes(4).readInt32LE()}catch(_){}}throw new Error("Native crypto module could not be used to get secure random number.")},o=Object.create||function(){function t(){}return function(e){var r;return t.prototype=e,r=new t,t.prototype=null,r}}(),s={},c=s.lib={},a=c.Base={extend:function(t){var e=o(this);return t&&e.mixIn(t),e.hasOwnProperty("init")&&this.init!==e.init||(e.init=function(){e.$super.init.apply(this,arguments)}),e.init.prototype=e,e.$super=this,e},create:function(){var t=this.extend();return t.init.apply(t,arguments),t},init:function(){},mixIn:function(t){for(var e in t)t.hasOwnProperty(e)&&(this[e]=t[e]);t.hasOwnProperty("toString")&&(this.toString=t.toString)},clone:function(){return this.init.prototype.extend(this)}},h=c.WordArray=a.extend({init:function(t,r){t=this.words=t||[],this.sigBytes=r!=e?r:4*t.length},toString:function(t){return(t||l).stringify(this)},concat:function(t){var e=this.words,r=t.words,i=this.sigBytes,n=t.sigBytes;if(this.clamp(),i%4)for(var o=0;o>>2]>>>24-o%4*8&255;e[i+o>>>2]|=s<<24-(i+o)%4*8}else for(var c=0;c>>2]=r[c>>>2];return this.sigBytes+=n,this},clamp:function(){var e=this.words,r=this.sigBytes;e[r>>>2]&=4294967295<<32-r%4*8,e.length=t.ceil(r/4)},clone:function(){var t=a.clone.call(this);return t.words=this.words.slice(0),t},random:function(t){for(var e=[],r=0;r>>2]>>>24-n%4*8&255;i.push((o>>>4).toString(16)),i.push((15&o).toString(16))}return i.join("")},parse:function(t){for(var e=t.length,r=[],i=0;i>>3]|=parseInt(t.substr(i,2),16)<<24-i%8*4;return new h.init(r,e/2)}},u=f.Latin1={stringify:function(t){for(var e=t.words,r=t.sigBytes,i=[],n=0;n>>2]>>>24-n%4*8&255;i.push(String.fromCharCode(o))}return i.join("")},parse:function(t){for(var e=t.length,r=[],i=0;i>>2]|=(255&t.charCodeAt(i))<<24-i%4*8;return new h.init(r,e)}},d=f.Utf8={stringify:function(t){try{return decodeURIComponent(escape(u.stringify(t)))}catch(e){throw new Error("Malformed UTF-8 data")}},parse:function(t){return u.parse(unescape(encodeURIComponent(t)))}},p=c.BufferedBlockAlgorithm=a.extend({reset:function(){this._data=new h.init,this._nDataBytes=0},_append:function(t){"string"==typeof t&&(t=d.parse(t)),this._data.concat(t),this._nDataBytes+=t.sigBytes},_process:function(e){var r,i=this._data,n=i.words,o=i.sigBytes,s=this.blockSize,c=o/(4*s),a=(c=e?t.ceil(c):t.max((0|c)-this._minBufferSize,0))*s,f=t.min(4*a,o);if(a){for(var l=0;l>>6-s%4*2;n[o>>>2]|=c<<24-o%4*8,o++}return e.create(n,o)}t.enc.Base64={stringify:function(t){var e=t.words,r=t.sigBytes,i=this._map;t.clamp();for(var n=[],o=0;o>>2]>>>24-o%4*8&255)<<16|(e[o+1>>>2]>>>24-(o+1)%4*8&255)<<8|e[o+2>>>2]>>>24-(o+2)%4*8&255,c=0;c<4&&o+.75*c>>6*(3-c)&63));var a=i.charAt(64);if(a)for(;n.length%4;)n.push(a);return n.join("")},parse:function(t){var e=t.length,i=this._map,n=this._reverseMap;if(!n){n=this._reverseMap=[];for(var o=0;o>>6-s%4*2;n[o>>>2]|=c<<24-o%4*8,o++}return e.create(n,o)}t.enc.Base64url={stringify:function(t,e){void 0===e&&(e=!0);var r=t.words,i=t.sigBytes,n=e?this._safe_map:this._map;t.clamp();for(var o=[],s=0;s>>2]>>>24-s%4*8&255)<<16|(r[s+1>>>2]>>>24-(s+1)%4*8&255)<<8|r[s+2>>>2]>>>24-(s+2)%4*8&255,a=0;a<4&&s+.75*a>>6*(3-a)&63));var h=n.charAt(64);if(h)for(;o.length%4;)o.push(h);return o.join("")},parse:function(t,e){void 0===e&&(e=!0);var i=t.length,n=e?this._safe_map:this._map,o=this._reverseMap;if(!o){o=this._reverseMap=[];for(var s=0;s>>8&16711935}r.Utf16=r.Utf16BE={stringify:function(t){for(var e=t.words,r=t.sigBytes,i=[],n=0;n>>2]>>>16-n%4*8&65535;i.push(String.fromCharCode(o))}return i.join("")},parse:function(t){for(var r=t.length,i=[],n=0;n>>1]|=t.charCodeAt(n)<<16-n%2*16;return e.create(i,2*r)}},r.Utf16LE={stringify:function(t){for(var e=t.words,r=t.sigBytes,n=[],o=0;o>>2]>>>16-o%4*8&65535);n.push(String.fromCharCode(s))}return n.join("")},parse:function(t){for(var r=t.length,n=[],o=0;o>>1]|=i(t.charCodeAt(o)<<16-o%2*16);return e.create(n,2*r)}}}(),n.enc.Utf16)}()},39506:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(45471),r(51025),function(){var t=o,e=t.lib,r=e.Base,i=e.WordArray,n=t.algo,s=n.MD5,c=n.EvpKDF=r.extend({cfg:r.extend({keySize:4,hasher:s,iterations:1}),init:function(t){this.cfg=this.cfg.extend(t)},compute:function(t,e){for(var r,n=this.cfg,o=n.hasher.create(),s=i.create(),c=s.words,a=n.keySize,h=n.iterations;c.lengthn&&(e=t.finalize(e)),e.clamp();for(var o=this._oKey=e.clone(),s=this._iKey=e.clone(),c=o.words,a=s.words,h=0;h>>2]|=t[n]<<24-n%4*8;e.call(this,i,r)}else e.apply(this,arguments)};r.prototype=t}}(),n.lib.WordArray)}()},84636:function(t,e,r){!function(e,i){var n;t.exports=(n=r(19021),function(t){var e=n,r=e.lib,i=r.WordArray,o=r.Hasher,s=e.algo,c=[];!function(){for(var e=0;e<64;e++)c[e]=4294967296*t.abs(t.sin(e+1))|0}();var a=s.MD5=o.extend({_doReset:function(){this._hash=new i.init([1732584193,4023233417,2562383102,271733878])},_doProcessBlock:function(t,e){for(var r=0;r<16;r++){var i=e+r,n=t[i];t[i]=16711935&(n<<8|n>>>24)|4278255360&(n<<24|n>>>8)}var o=this._hash.words,s=t[e+0],a=t[e+1],d=t[e+2],p=t[e+3],v=t[e+4],_=t[e+5],y=t[e+6],g=t[e+7],B=t[e+8],w=t[e+9],k=t[e+10],x=t[e+11],b=t[e+12],S=t[e+13],m=t[e+14],A=t[e+15],H=o[0],z=o[1],C=o[2],R=o[3];H=h(H,z,C,R,s,7,c[0]),R=h(R,H,z,C,a,12,c[1]),C=h(C,R,H,z,d,17,c[2]),z=h(z,C,R,H,p,22,c[3]),H=h(H,z,C,R,v,7,c[4]),R=h(R,H,z,C,_,12,c[5]),C=h(C,R,H,z,y,17,c[6]),z=h(z,C,R,H,g,22,c[7]),H=h(H,z,C,R,B,7,c[8]),R=h(R,H,z,C,w,12,c[9]),C=h(C,R,H,z,k,17,c[10]),z=h(z,C,R,H,x,22,c[11]),H=h(H,z,C,R,b,7,c[12]),R=h(R,H,z,C,S,12,c[13]),C=h(C,R,H,z,m,17,c[14]),H=f(H,z=h(z,C,R,H,A,22,c[15]),C,R,a,5,c[16]),R=f(R,H,z,C,y,9,c[17]),C=f(C,R,H,z,x,14,c[18]),z=f(z,C,R,H,s,20,c[19]),H=f(H,z,C,R,_,5,c[20]),R=f(R,H,z,C,k,9,c[21]),C=f(C,R,H,z,A,14,c[22]),z=f(z,C,R,H,v,20,c[23]),H=f(H,z,C,R,w,5,c[24]),R=f(R,H,z,C,m,9,c[25]),C=f(C,R,H,z,p,14,c[26]),z=f(z,C,R,H,B,20,c[27]),H=f(H,z,C,R,S,5,c[28]),R=f(R,H,z,C,d,9,c[29]),C=f(C,R,H,z,g,14,c[30]),H=l(H,z=f(z,C,R,H,b,20,c[31]),C,R,_,4,c[32]),R=l(R,H,z,C,B,11,c[33]),C=l(C,R,H,z,x,16,c[34]),z=l(z,C,R,H,m,23,c[35]),H=l(H,z,C,R,a,4,c[36]),R=l(R,H,z,C,v,11,c[37]),C=l(C,R,H,z,g,16,c[38]),z=l(z,C,R,H,k,23,c[39]),H=l(H,z,C,R,S,4,c[40]),R=l(R,H,z,C,s,11,c[41]),C=l(C,R,H,z,p,16,c[42]),z=l(z,C,R,H,y,23,c[43]),H=l(H,z,C,R,w,4,c[44]),R=l(R,H,z,C,b,11,c[45]),C=l(C,R,H,z,A,16,c[46]),H=u(H,z=l(z,C,R,H,d,23,c[47]),C,R,s,6,c[48]),R=u(R,H,z,C,g,10,c[49]),C=u(C,R,H,z,m,15,c[50]),z=u(z,C,R,H,_,21,c[51]),H=u(H,z,C,R,b,6,c[52]),R=u(R,H,z,C,p,10,c[53]),C=u(C,R,H,z,k,15,c[54]),z=u(z,C,R,H,a,21,c[55]),H=u(H,z,C,R,B,6,c[56]),R=u(R,H,z,C,A,10,c[57]),C=u(C,R,H,z,y,15,c[58]),z=u(z,C,R,H,S,21,c[59]),H=u(H,z,C,R,v,6,c[60]),R=u(R,H,z,C,x,10,c[61]),C=u(C,R,H,z,d,15,c[62]),z=u(z,C,R,H,w,21,c[63]),o[0]=o[0]+H|0,o[1]=o[1]+z|0,o[2]=o[2]+C|0,o[3]=o[3]+R|0},_doFinalize:function(){var e=this._data,r=e.words,i=8*this._nDataBytes,n=8*e.sigBytes;r[n>>>5]|=128<<24-n%32;var o=t.floor(i/4294967296),s=i;r[15+(n+64>>>9<<4)]=16711935&(o<<8|o>>>24)|4278255360&(o<<24|o>>>8),r[14+(n+64>>>9<<4)]=16711935&(s<<8|s>>>24)|4278255360&(s<<24|s>>>8),e.sigBytes=4*(r.length+1),this._process();for(var c=this._hash,a=c.words,h=0;h<4;h++){var f=a[h];a[h]=16711935&(f<<8|f>>>24)|4278255360&(f<<24|f>>>8)}return c},clone:function(){var t=o.clone.call(this);return t._hash=this._hash.clone(),t}});function h(t,e,r,i,n,o,s){var c=t+(e&r|~e&i)+n+s;return(c<>>32-o)+e}function f(t,e,r,i,n,o,s){var c=t+(e&i|r&~i)+n+s;return(c<>>32-o)+e}function l(t,e,r,i,n,o,s){var c=t+(e^r^i)+n+s;return(c<>>32-o)+e}function u(t,e,r,i,n,o,s){var c=t+(r^(e|~i))+n+s;return(c<>>32-o)+e}e.MD5=o._createHelper(a),e.HmacMD5=o._createHmacHelper(a)}(Math),n.MD5)}()},82169:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(57165),o.mode.CFB=function(){var t=o.lib.BlockCipherMode.extend();function e(t,e,r,i){var n,o=this._iv;o?(n=o.slice(0),this._iv=void 0):n=this._prevBlock,i.encryptBlock(n,0);for(var s=0;s>24&255)){var e=t>>16&255,r=t>>8&255,i=255&t;255===e?(e=0,255===r?(r=0,255===i?i=0:++i):++r):++e,t=0,t+=e<<16,t+=r<<8,t+=i}else t+=1<<24;return t}function r(t){return 0===(t[0]=e(t[0]))&&(t[1]=e(t[1])),t}var i=t.Encryptor=t.extend({processBlock:function(t,e){var i=this._cipher,n=i.blockSize,o=this._iv,s=this._counter;o&&(s=this._counter=o.slice(0),this._iv=void 0),r(s);var c=s.slice(0);i.encryptBlock(c,0);for(var a=0;a>>2]|=n<<24-o%4*8,t.sigBytes+=n},unpad:function(t){var e=255&t.words[t.sigBytes-1>>>2];t.sigBytes-=e}},o.pad.Ansix923)}()},54905:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(57165),o.pad.Iso10126={pad:function(t,e){var r=4*e,i=r-t.sigBytes%r;t.concat(o.lib.WordArray.random(i-1)).concat(o.lib.WordArray.create([i<<24],1))},unpad:function(t){var e=255&t.words[t.sigBytes-1>>>2];t.sigBytes-=e}},o.pad.Iso10126)}()},10482:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(57165),o.pad.Iso97971={pad:function(t,e){t.concat(o.lib.WordArray.create([2147483648],1)),o.pad.ZeroPadding.pad(t,e)},unpad:function(t){o.pad.ZeroPadding.unpad(t),t.sigBytes--}},o.pad.Iso97971)}()},58124:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(57165),o.pad.NoPadding={pad:function(){},unpad:function(){}},o.pad.NoPadding)}()},52155:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(57165),o.pad.ZeroPadding={pad:function(t,e){var r=4*e;t.clamp(),t.sigBytes+=r-(t.sigBytes%r||r)},unpad:function(t){var e=t.words,r=t.sigBytes-1;for(r=t.sigBytes-1;r>=0;r--)if(e[r>>>2]>>>24-r%4*8&255){t.sigBytes=r+1;break}}},o.pad.ZeroPadding)}()},70019:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(63009),r(51025),function(){var t=o,e=t.lib,r=e.Base,i=e.WordArray,n=t.algo,s=n.SHA256,c=n.HMAC,a=n.PBKDF2=r.extend({cfg:r.extend({keySize:4,hasher:s,iterations:25e4}),init:function(t){this.cfg=this.cfg.extend(t)},compute:function(t,e){for(var r=this.cfg,n=c.create(r.hasher,t),o=i.create(),s=i.create([1]),a=o.words,h=s.words,f=r.keySize,l=r.iterations;a.length>>16,t[1],t[0]<<16|t[3]>>>16,t[2],t[1]<<16|t[0]>>>16,t[3],t[2]<<16|t[1]>>>16],i=this._C=[t[2]<<16|t[2]>>>16,4294901760&t[0]|65535&t[1],t[3]<<16|t[3]>>>16,4294901760&t[1]|65535&t[2],t[0]<<16|t[0]>>>16,4294901760&t[2]|65535&t[3],t[1]<<16|t[1]>>>16,4294901760&t[3]|65535&t[0]];this._b=0;for(var n=0;n<4;n++)a.call(this);for(n=0;n<8;n++)i[n]^=r[n+4&7];if(e){var o=e.words,s=o[0],c=o[1],h=16711935&(s<<8|s>>>24)|4278255360&(s<<24|s>>>8),f=16711935&(c<<8|c>>>24)|4278255360&(c<<24|c>>>8),l=h>>>16|4294901760&f,u=f<<16|65535&h;for(i[0]^=h,i[1]^=l,i[2]^=f,i[3]^=u,i[4]^=h,i[5]^=l,i[6]^=f,i[7]^=u,n=0;n<4;n++)a.call(this)}},_doProcessBlock:function(t,e){var r=this._X;a.call(this),i[0]=r[0]^r[5]>>>16^r[3]<<16,i[1]=r[2]^r[7]>>>16^r[5]<<16,i[2]=r[4]^r[1]>>>16^r[7]<<16,i[3]=r[6]^r[3]>>>16^r[1]<<16;for(var n=0;n<4;n++)i[n]=16711935&(i[n]<<8|i[n]>>>24)|4278255360&(i[n]<<24|i[n]>>>8),t[e+n]^=i[n]},blockSize:4,ivSize:2});function a(){for(var t=this._X,e=this._C,r=0;r<8;r++)n[r]=e[r];for(e[0]=e[0]+1295307597+this._b|0,e[1]=e[1]+3545052371+(e[0]>>>0>>0?1:0)|0,e[2]=e[2]+886263092+(e[1]>>>0>>0?1:0)|0,e[3]=e[3]+1295307597+(e[2]>>>0>>0?1:0)|0,e[4]=e[4]+3545052371+(e[3]>>>0>>0?1:0)|0,e[5]=e[5]+886263092+(e[4]>>>0>>0?1:0)|0,e[6]=e[6]+1295307597+(e[5]>>>0>>0?1:0)|0,e[7]=e[7]+3545052371+(e[6]>>>0>>0?1:0)|0,this._b=e[7]>>>0>>0?1:0,r=0;r<8;r++){var i=t[r]+e[r],o=65535&i,c=i>>>16,a=((o*o>>>17)+o*c>>>15)+c*c,h=((4294901760&i)*i|0)+((65535&i)*i|0);s[r]=a^h}t[0]=s[0]+(s[7]<<16|s[7]>>>16)+(s[6]<<16|s[6]>>>16)|0,t[1]=s[1]+(s[0]<<8|s[0]>>>24)+s[7]|0,t[2]=s[2]+(s[1]<<16|s[1]>>>16)+(s[0]<<16|s[0]>>>16)|0,t[3]=s[3]+(s[2]<<8|s[2]>>>24)+s[1]|0,t[4]=s[4]+(s[3]<<16|s[3]>>>16)+(s[2]<<16|s[2]>>>16)|0,t[5]=s[5]+(s[4]<<8|s[4]>>>24)+s[3]|0,t[6]=s[6]+(s[5]<<16|s[5]>>>16)+(s[4]<<16|s[4]>>>16)|0,t[7]=s[7]+(s[6]<<8|s[6]>>>24)+s[5]|0}t.RabbitLegacy=e._createHelper(c)}(),o.RabbitLegacy)}()},96298:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(80754),r(84636),r(39506),r(57165),function(){var t=o,e=t.lib.StreamCipher,r=t.algo,i=[],n=[],s=[],c=r.Rabbit=e.extend({_doReset:function(){for(var t=this._key.words,e=this.cfg.iv,r=0;r<4;r++)t[r]=16711935&(t[r]<<8|t[r]>>>24)|4278255360&(t[r]<<24|t[r]>>>8);var i=this._X=[t[0],t[3]<<16|t[2]>>>16,t[1],t[0]<<16|t[3]>>>16,t[2],t[1]<<16|t[0]>>>16,t[3],t[2]<<16|t[1]>>>16],n=this._C=[t[2]<<16|t[2]>>>16,4294901760&t[0]|65535&t[1],t[3]<<16|t[3]>>>16,4294901760&t[1]|65535&t[2],t[0]<<16|t[0]>>>16,4294901760&t[2]|65535&t[3],t[1]<<16|t[1]>>>16,4294901760&t[3]|65535&t[0]];for(this._b=0,r=0;r<4;r++)a.call(this);for(r=0;r<8;r++)n[r]^=i[r+4&7];if(e){var o=e.words,s=o[0],c=o[1],h=16711935&(s<<8|s>>>24)|4278255360&(s<<24|s>>>8),f=16711935&(c<<8|c>>>24)|4278255360&(c<<24|c>>>8),l=h>>>16|4294901760&f,u=f<<16|65535&h;for(n[0]^=h,n[1]^=l,n[2]^=f,n[3]^=u,n[4]^=h,n[5]^=l,n[6]^=f,n[7]^=u,r=0;r<4;r++)a.call(this)}},_doProcessBlock:function(t,e){var r=this._X;a.call(this),i[0]=r[0]^r[5]>>>16^r[3]<<16,i[1]=r[2]^r[7]>>>16^r[5]<<16,i[2]=r[4]^r[1]>>>16^r[7]<<16,i[3]=r[6]^r[3]>>>16^r[1]<<16;for(var n=0;n<4;n++)i[n]=16711935&(i[n]<<8|i[n]>>>24)|4278255360&(i[n]<<24|i[n]>>>8),t[e+n]^=i[n]},blockSize:4,ivSize:2});function a(){for(var t=this._X,e=this._C,r=0;r<8;r++)n[r]=e[r];for(e[0]=e[0]+1295307597+this._b|0,e[1]=e[1]+3545052371+(e[0]>>>0>>0?1:0)|0,e[2]=e[2]+886263092+(e[1]>>>0>>0?1:0)|0,e[3]=e[3]+1295307597+(e[2]>>>0>>0?1:0)|0,e[4]=e[4]+3545052371+(e[3]>>>0>>0?1:0)|0,e[5]=e[5]+886263092+(e[4]>>>0>>0?1:0)|0,e[6]=e[6]+1295307597+(e[5]>>>0>>0?1:0)|0,e[7]=e[7]+3545052371+(e[6]>>>0>>0?1:0)|0,this._b=e[7]>>>0>>0?1:0,r=0;r<8;r++){var i=t[r]+e[r],o=65535&i,c=i>>>16,a=((o*o>>>17)+o*c>>>15)+c*c,h=((4294901760&i)*i|0)+((65535&i)*i|0);s[r]=a^h}t[0]=s[0]+(s[7]<<16|s[7]>>>16)+(s[6]<<16|s[6]>>>16)|0,t[1]=s[1]+(s[0]<<8|s[0]>>>24)+s[7]|0,t[2]=s[2]+(s[1]<<16|s[1]>>>16)+(s[0]<<16|s[0]>>>16)|0,t[3]=s[3]+(s[2]<<8|s[2]>>>24)+s[1]|0,t[4]=s[4]+(s[3]<<16|s[3]>>>16)+(s[2]<<16|s[2]>>>16)|0,t[5]=s[5]+(s[4]<<8|s[4]>>>24)+s[3]|0,t[6]=s[6]+(s[5]<<16|s[5]>>>16)+(s[4]<<16|s[4]>>>16)|0,t[7]=s[7]+(s[6]<<8|s[6]>>>24)+s[5]|0}t.Rabbit=e._createHelper(c)}(),o.Rabbit)}()},77193:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(80754),r(84636),r(39506),r(57165),function(){var t=o,e=t.lib.StreamCipher,r=t.algo,i=r.RC4=e.extend({_doReset:function(){for(var t=this._key,e=t.words,r=t.sigBytes,i=this._S=[],n=0;n<256;n++)i[n]=n;n=0;for(var o=0;n<256;n++){var s=n%r,c=e[s>>>2]>>>24-s%4*8&255;o=(o+i[n]+c)%256;var a=i[n];i[n]=i[o],i[o]=a}this._i=this._j=0},_doProcessBlock:function(t,e){t[e]^=n.call(this)},keySize:8,ivSize:0});function n(){for(var t=this._S,e=this._i,r=this._j,i=0,n=0;n<4;n++){r=(r+t[e=(e+1)%256])%256;var o=t[e];t[e]=t[r],t[r]=o,i|=t[(t[e]+t[r])%256]<<24-8*n}return this._i=e,this._j=r,i}t.RC4=e._createHelper(i);var s=r.RC4Drop=i.extend({cfg:i.cfg.extend({drop:192}),_doReset:function(){i._doReset.call(this);for(var t=this.cfg.drop;t>0;t--)n.call(this)}});t.RC4Drop=e._createHelper(s)}(),o.RC4)}()},78056:function(t,e,r){!function(e,i){var n;t.exports=(n=r(19021),function(t){var e=n,r=e.lib,i=r.WordArray,o=r.Hasher,s=e.algo,c=i.create([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,7,4,13,1,10,6,15,3,12,0,9,5,2,14,11,8,3,10,14,4,9,15,8,1,2,7,0,6,13,11,5,12,1,9,11,10,0,8,12,4,13,3,7,15,14,5,6,2,4,0,5,9,7,12,2,10,14,1,3,8,11,6,15,13]),a=i.create([5,14,7,0,9,2,11,4,13,6,15,8,1,10,3,12,6,11,3,7,0,13,5,10,14,15,8,12,4,9,1,2,15,5,1,3,7,14,6,9,11,8,12,2,10,0,4,13,8,6,4,1,3,11,15,0,5,12,2,13,9,7,10,14,12,15,10,4,1,5,8,7,6,2,13,14,0,3,9,11]),h=i.create([11,14,15,12,5,8,7,9,11,13,14,15,6,7,9,8,7,6,8,13,11,9,7,15,7,12,15,9,11,7,13,12,11,13,6,7,14,9,13,15,14,8,13,6,5,12,7,5,11,12,14,15,14,15,9,8,9,14,5,6,8,6,5,12,9,15,5,11,6,8,13,12,5,12,13,14,11,8,5,6]),f=i.create([8,9,9,11,13,15,15,5,7,7,8,11,14,14,12,6,9,13,15,7,12,8,9,11,7,7,12,7,6,15,13,11,9,7,15,11,8,6,6,14,12,13,5,14,13,13,7,5,15,5,8,11,14,14,6,14,6,9,12,9,12,5,15,8,8,5,12,9,12,5,14,6,8,13,6,5,15,13,11,11]),l=i.create([0,1518500249,1859775393,2400959708,2840853838]),u=i.create([1352829926,1548603684,1836072691,2053994217,0]),d=s.RIPEMD160=o.extend({_doReset:function(){this._hash=i.create([1732584193,4023233417,2562383102,271733878,3285377520])},_doProcessBlock:function(t,e){for(var r=0;r<16;r++){var i=e+r,n=t[i];t[i]=16711935&(n<<8|n>>>24)|4278255360&(n<<24|n>>>8)}var o,s,d,w,k,x,b,S,m,A,H,z=this._hash.words,C=l.words,R=u.words,E=c.words,D=a.words,M=h.words,P=f.words;for(x=o=z[0],b=s=z[1],S=d=z[2],m=w=z[3],A=k=z[4],r=0;r<80;r+=1)H=o+t[e+E[r]]|0,H+=r<16?p(s,d,w)+C[0]:r<32?v(s,d,w)+C[1]:r<48?_(s,d,w)+C[2]:r<64?y(s,d,w)+C[3]:g(s,d,w)+C[4],H=(H=B(H|=0,M[r]))+k|0,o=k,k=w,w=B(d,10),d=s,s=H,H=x+t[e+D[r]]|0,H+=r<16?g(b,S,m)+R[0]:r<32?y(b,S,m)+R[1]:r<48?_(b,S,m)+R[2]:r<64?v(b,S,m)+R[3]:p(b,S,m)+R[4],H=(H=B(H|=0,P[r]))+A|0,x=A,A=m,m=B(S,10),S=b,b=H;H=z[1]+d+m|0,z[1]=z[2]+w+A|0,z[2]=z[3]+k+x|0,z[3]=z[4]+o+b|0,z[4]=z[0]+s+S|0,z[0]=H},_doFinalize:function(){var t=this._data,e=t.words,r=8*this._nDataBytes,i=8*t.sigBytes;e[i>>>5]|=128<<24-i%32,e[14+(i+64>>>9<<4)]=16711935&(r<<8|r>>>24)|4278255360&(r<<24|r>>>8),t.sigBytes=4*(e.length+1),this._process();for(var n=this._hash,o=n.words,s=0;s<5;s++){var c=o[s];o[s]=16711935&(c<<8|c>>>24)|4278255360&(c<<24|c>>>8)}return n},clone:function(){var t=o.clone.call(this);return t._hash=this._hash.clone(),t}});function p(t,e,r){return t^e^r}function v(t,e,r){return t&e|~t&r}function _(t,e,r){return(t|~e)^r}function y(t,e,r){return t&r|e&~r}function g(t,e,r){return t^(e|~r)}function B(t,e){return t<>>32-e}e.RIPEMD160=o._createHelper(d),e.HmacRIPEMD160=o._createHmacHelper(d)}(Math),n.RIPEMD160)}()},45471:function(t,e,r){!function(e,i){var n;t.exports=(n=r(19021),function(){var t=n,e=t.lib,r=e.WordArray,i=e.Hasher,o=t.algo,s=[],c=o.SHA1=i.extend({_doReset:function(){this._hash=new r.init([1732584193,4023233417,2562383102,271733878,3285377520])},_doProcessBlock:function(t,e){for(var r=this._hash.words,i=r[0],n=r[1],o=r[2],c=r[3],a=r[4],h=0;h<80;h++){if(h<16)s[h]=0|t[e+h];else{var f=s[h-3]^s[h-8]^s[h-14]^s[h-16];s[h]=f<<1|f>>>31}var l=(i<<5|i>>>27)+a+s[h];l+=h<20?1518500249+(n&o|~n&c):h<40?1859775393+(n^o^c):h<60?(n&o|n&c|o&c)-1894007588:(n^o^c)-899497514,a=c,c=o,o=n<<30|n>>>2,n=i,i=l}r[0]=r[0]+i|0,r[1]=r[1]+n|0,r[2]=r[2]+o|0,r[3]=r[3]+c|0,r[4]=r[4]+a|0},_doFinalize:function(){var t=this._data,e=t.words,r=8*this._nDataBytes,i=8*t.sigBytes;return e[i>>>5]|=128<<24-i%32,e[14+(i+64>>>9<<4)]=Math.floor(r/4294967296),e[15+(i+64>>>9<<4)]=r,t.sigBytes=4*e.length,this._process(),this._hash},clone:function(){var t=i.clone.call(this);return t._hash=this._hash.clone(),t}});t.SHA1=i._createHelper(c),t.HmacSHA1=i._createHmacHelper(c)}(),n.SHA1)}()},36308:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(63009),function(){var t=o,e=t.lib.WordArray,r=t.algo,i=r.SHA256,n=r.SHA224=i.extend({_doReset:function(){this._hash=new e.init([3238371032,914150663,812702999,4144912697,4290775857,1750603025,1694076839,3204075428])},_doFinalize:function(){var t=i._doFinalize.call(this);return t.sigBytes-=4,t}});t.SHA224=i._createHelper(n),t.HmacSHA224=i._createHmacHelper(n)}(),o.SHA224)}()},63009:function(t,e,r){!function(e,i){var n;t.exports=(n=r(19021),function(t){var e=n,r=e.lib,i=r.WordArray,o=r.Hasher,s=e.algo,c=[],a=[];!function(){function e(e){for(var r=t.sqrt(e),i=2;i<=r;i++)if(!(e%i))return!1;return!0}function r(t){return 4294967296*(t-(0|t))|0}for(var i=2,n=0;n<64;)e(i)&&(n<8&&(c[n]=r(t.pow(i,.5))),a[n]=r(t.pow(i,1/3)),n++),i++}();var h=[],f=s.SHA256=o.extend({_doReset:function(){this._hash=new i.init(c.slice(0))},_doProcessBlock:function(t,e){for(var r=this._hash.words,i=r[0],n=r[1],o=r[2],s=r[3],c=r[4],f=r[5],l=r[6],u=r[7],d=0;d<64;d++){if(d<16)h[d]=0|t[e+d];else{var p=h[d-15],v=(p<<25|p>>>7)^(p<<14|p>>>18)^p>>>3,_=h[d-2],y=(_<<15|_>>>17)^(_<<13|_>>>19)^_>>>10;h[d]=v+h[d-7]+y+h[d-16]}var g=i&n^i&o^n&o,B=(i<<30|i>>>2)^(i<<19|i>>>13)^(i<<10|i>>>22),w=u+((c<<26|c>>>6)^(c<<21|c>>>11)^(c<<7|c>>>25))+(c&f^~c&l)+a[d]+h[d];u=l,l=f,f=c,c=s+w|0,s=o,o=n,n=i,i=w+(B+g)|0}r[0]=r[0]+i|0,r[1]=r[1]+n|0,r[2]=r[2]+o|0,r[3]=r[3]+s|0,r[4]=r[4]+c|0,r[5]=r[5]+f|0,r[6]=r[6]+l|0,r[7]=r[7]+u|0},_doFinalize:function(){var e=this._data,r=e.words,i=8*this._nDataBytes,n=8*e.sigBytes;return r[n>>>5]|=128<<24-n%32,r[14+(n+64>>>9<<4)]=t.floor(i/4294967296),r[15+(n+64>>>9<<4)]=i,e.sigBytes=4*r.length,this._process(),this._hash},clone:function(){var t=o.clone.call(this);return t._hash=this._hash.clone(),t}});e.SHA256=o._createHelper(f),e.HmacSHA256=o._createHmacHelper(f)}(Math),n.SHA256)}()},45953:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(43240),function(t){var e=o,r=e.lib,i=r.WordArray,n=r.Hasher,s=e.x64.Word,c=e.algo,a=[],h=[],f=[];!function(){for(var t=1,e=0,r=0;r<24;r++){a[t+5*e]=(r+1)*(r+2)/2%64;var i=(2*t+3*e)%5;t=e%5,e=i}for(t=0;t<5;t++)for(e=0;e<5;e++)h[t+5*e]=e+(2*t+3*e)%5*5;for(var n=1,o=0;o<24;o++){for(var c=0,l=0,u=0;u<7;u++){if(1&n){var d=(1<>>24)|4278255360&(o<<24|o>>>8),s=16711935&(s<<8|s>>>24)|4278255360&(s<<24|s>>>8),(z=r[n]).high^=s,z.low^=o}for(var c=0;c<24;c++){for(var u=0;u<5;u++){for(var d=0,p=0,v=0;v<5;v++)d^=(z=r[u+5*v]).high,p^=z.low;var _=l[u];_.high=d,_.low=p}for(u=0;u<5;u++){var y=l[(u+4)%5],g=l[(u+1)%5],B=g.high,w=g.low;for(d=y.high^(B<<1|w>>>31),p=y.low^(w<<1|B>>>31),v=0;v<5;v++)(z=r[u+5*v]).high^=d,z.low^=p}for(var k=1;k<25;k++){var x=(z=r[k]).high,b=z.low,S=a[k];S<32?(d=x<>>32-S,p=b<>>32-S):(d=b<>>64-S,p=x<>>64-S);var m=l[h[k]];m.high=d,m.low=p}var A=l[0],H=r[0];for(A.high=H.high,A.low=H.low,u=0;u<5;u++)for(v=0;v<5;v++){var z=r[k=u+5*v],C=l[k],R=l[(u+1)%5+5*v],E=l[(u+2)%5+5*v];z.high=C.high^~R.high&E.high,z.low=C.low^~R.low&E.low}z=r[0];var D=f[c];z.high^=D.high,z.low^=D.low}},_doFinalize:function(){var e=this._data,r=e.words,n=(this._nDataBytes,8*e.sigBytes),o=32*this.blockSize;r[n>>>5]|=1<<24-n%32,r[(t.ceil((n+1)/o)*o>>>5)-1]|=128,e.sigBytes=4*r.length,this._process();for(var s=this._state,c=this.cfg.outputLength/8,a=c/8,h=[],f=0;f>>24)|4278255360&(u<<24|u>>>8),d=16711935&(d<<8|d>>>24)|4278255360&(d<<24|d>>>8),h.push(d),h.push(u)}return new i.init(h,c)},clone:function(){for(var t=n.clone.call(this),e=t._state=this._state.slice(0),r=0;r<25;r++)e[r]=e[r].clone();return t}});e.SHA3=n._createHelper(u),e.HmacSHA3=n._createHmacHelper(u)}(Math),o.SHA3)}()},89557:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(43240),r(81380),function(){var t=o,e=t.x64,r=e.Word,i=e.WordArray,n=t.algo,s=n.SHA512,c=n.SHA384=s.extend({_doReset:function(){this._hash=new i.init([new r.init(3418070365,3238371032),new r.init(1654270250,914150663),new r.init(2438529370,812702999),new r.init(355462360,4144912697),new r.init(1731405415,4290775857),new r.init(2394180231,1750603025),new r.init(3675008525,1694076839),new r.init(1203062813,3204075428)])},_doFinalize:function(){var t=s._doFinalize.call(this);return t.sigBytes-=16,t}});t.SHA384=s._createHelper(c),t.HmacSHA384=s._createHmacHelper(c)}(),o.SHA384)}()},81380:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(43240),function(){var t=o,e=t.lib.Hasher,r=t.x64,i=r.Word,n=r.WordArray,s=t.algo;function c(){return i.create.apply(i,arguments)}var a=[c(1116352408,3609767458),c(1899447441,602891725),c(3049323471,3964484399),c(3921009573,2173295548),c(961987163,4081628472),c(1508970993,3053834265),c(2453635748,2937671579),c(2870763221,3664609560),c(3624381080,2734883394),c(310598401,1164996542),c(607225278,1323610764),c(1426881987,3590304994),c(1925078388,4068182383),c(2162078206,991336113),c(2614888103,633803317),c(3248222580,3479774868),c(3835390401,2666613458),c(4022224774,944711139),c(264347078,2341262773),c(604807628,2007800933),c(770255983,1495990901),c(1249150122,1856431235),c(1555081692,3175218132),c(1996064986,2198950837),c(2554220882,3999719339),c(2821834349,766784016),c(2952996808,2566594879),c(3210313671,3203337956),c(3336571891,1034457026),c(3584528711,2466948901),c(113926993,3758326383),c(338241895,168717936),c(666307205,1188179964),c(773529912,1546045734),c(1294757372,1522805485),c(1396182291,2643833823),c(1695183700,2343527390),c(1986661051,1014477480),c(2177026350,1206759142),c(2456956037,344077627),c(2730485921,1290863460),c(2820302411,3158454273),c(3259730800,3505952657),c(3345764771,106217008),c(3516065817,3606008344),c(3600352804,1432725776),c(4094571909,1467031594),c(275423344,851169720),c(430227734,3100823752),c(506948616,1363258195),c(659060556,3750685593),c(883997877,3785050280),c(958139571,3318307427),c(1322822218,3812723403),c(1537002063,2003034995),c(1747873779,3602036899),c(1955562222,1575990012),c(2024104815,1125592928),c(2227730452,2716904306),c(2361852424,442776044),c(2428436474,593698344),c(2756734187,3733110249),c(3204031479,2999351573),c(3329325298,3815920427),c(3391569614,3928383900),c(3515267271,566280711),c(3940187606,3454069534),c(4118630271,4000239992),c(116418474,1914138554),c(174292421,2731055270),c(289380356,3203993006),c(460393269,320620315),c(685471733,587496836),c(852142971,1086792851),c(1017036298,365543100),c(1126000580,2618297676),c(1288033470,3409855158),c(1501505948,4234509866),c(1607167915,987167468),c(1816402316,1246189591)],h=[];!function(){for(var t=0;t<80;t++)h[t]=c()}();var f=s.SHA512=e.extend({_doReset:function(){this._hash=new n.init([new i.init(1779033703,4089235720),new i.init(3144134277,2227873595),new i.init(1013904242,4271175723),new i.init(2773480762,1595750129),new i.init(1359893119,2917565137),new i.init(2600822924,725511199),new i.init(528734635,4215389547),new i.init(1541459225,327033209)])},_doProcessBlock:function(t,e){for(var r=this._hash.words,i=r[0],n=r[1],o=r[2],s=r[3],c=r[4],f=r[5],l=r[6],u=r[7],d=i.high,p=i.low,v=n.high,_=n.low,y=o.high,g=o.low,B=s.high,w=s.low,k=c.high,x=c.low,b=f.high,S=f.low,m=l.high,A=l.low,H=u.high,z=u.low,C=d,R=p,E=v,D=_,M=y,P=g,F=B,W=w,I=k,O=x,U=b,K=S,X=m,L=A,T=H,j=z,N=0;N<80;N++){var Z,q,G=h[N];if(N<16)q=G.high=0|t[e+2*N],Z=G.low=0|t[e+2*N+1];else{var V=h[N-15],Y=V.high,J=V.low,Q=(Y>>>1|J<<31)^(Y>>>8|J<<24)^Y>>>7,$=(J>>>1|Y<<31)^(J>>>8|Y<<24)^(J>>>7|Y<<25),tt=h[N-2],et=tt.high,rt=tt.low,it=(et>>>19|rt<<13)^(et<<3|rt>>>29)^et>>>6,nt=(rt>>>19|et<<13)^(rt<<3|et>>>29)^(rt>>>6|et<<26),ot=h[N-7],st=ot.high,ct=ot.low,at=h[N-16],ht=at.high,ft=at.low;q=(q=(q=Q+st+((Z=$+ct)>>>0<$>>>0?1:0))+it+((Z+=nt)>>>0>>0?1:0))+ht+((Z+=ft)>>>0>>0?1:0),G.high=q,G.low=Z}var lt,ut=I&U^~I&X,dt=O&K^~O&L,pt=C&E^C&M^E&M,vt=R&D^R&P^D&P,_t=(C>>>28|R<<4)^(C<<30|R>>>2)^(C<<25|R>>>7),yt=(R>>>28|C<<4)^(R<<30|C>>>2)^(R<<25|C>>>7),gt=(I>>>14|O<<18)^(I>>>18|O<<14)^(I<<23|O>>>9),Bt=(O>>>14|I<<18)^(O>>>18|I<<14)^(O<<23|I>>>9),wt=a[N],kt=wt.high,xt=wt.low,bt=T+gt+((lt=j+Bt)>>>0>>0?1:0),St=yt+vt;T=X,j=L,X=U,L=K,U=I,K=O,I=F+(bt=(bt=(bt=bt+ut+((lt+=dt)>>>0
    >>0?1:0))+kt+((lt+=xt)>>>0>>0?1:0))+q+((lt+=Z)>>>0>>0?1:0))+((O=W+lt|0)>>>0>>0?1:0)|0,F=M,W=P,M=E,P=D,E=C,D=R,C=bt+(_t+pt+(St>>>0>>0?1:0))+((R=lt+St|0)>>>0>>0?1:0)|0}p=i.low=p+R,i.high=d+C+(p>>>0>>0?1:0),_=n.low=_+D,n.high=v+E+(_>>>0>>0?1:0),g=o.low=g+P,o.high=y+M+(g>>>0

    >>0?1:0),w=s.low=w+W,s.high=B+F+(w>>>0>>0?1:0),x=c.low=x+O,c.high=k+I+(x>>>0>>0?1:0),S=f.low=S+K,f.high=b+U+(S>>>0>>0?1:0),A=l.low=A+L,l.high=m+X+(A>>>0>>0?1:0),z=u.low=z+j,u.high=H+T+(z>>>0>>0?1:0)},_doFinalize:function(){var t=this._data,e=t.words,r=8*this._nDataBytes,i=8*t.sigBytes;return e[i>>>5]|=128<<24-i%32,e[30+(i+128>>>10<<5)]=Math.floor(r/4294967296),e[31+(i+128>>>10<<5)]=r,t.sigBytes=4*e.length,this._process(),this._hash.toX32()},clone:function(){var t=e.clone.call(this);return t._hash=this._hash.clone(),t},blockSize:32});t.SHA512=e._createHelper(f),t.HmacSHA512=e._createHmacHelper(f)}(),o.SHA512)}()},7628:function(t,e,r){!function(e,i,n){var o;t.exports=(o=r(19021),r(80754),r(84636),r(39506),r(57165),function(){var t=o,e=t.lib,r=e.WordArray,i=e.BlockCipher,n=t.algo,s=[57,49,41,33,25,17,9,1,58,50,42,34,26,18,10,2,59,51,43,35,27,19,11,3,60,52,44,36,63,55,47,39,31,23,15,7,62,54,46,38,30,22,14,6,61,53,45,37,29,21,13,5,28,20,12,4],c=[14,17,11,24,1,5,3,28,15,6,21,10,23,19,12,4,26,8,16,7,27,20,13,2,41,52,31,37,47,55,30,40,51,45,33,48,44,49,39,56,34,53,46,42,50,36,29,32],a=[1,2,4,6,8,10,12,14,15,17,19,21,23,25,27,28],h=[{0:8421888,268435456:32768,536870912:8421378,805306368:2,1073741824:512,1342177280:8421890,1610612736:8389122,1879048192:8388608,2147483648:514,2415919104:8389120,2684354560:33280,2952790016:8421376,3221225472:32770,3489660928:8388610,3758096384:0,4026531840:33282,134217728:0,402653184:8421890,671088640:33282,939524096:32768,1207959552:8421888,1476395008:512,1744830464:8421378,2013265920:2,2281701376:8389120,2550136832:33280,2818572288:8421376,3087007744:8389122,3355443200:8388610,3623878656:32770,3892314112:514,4160749568:8388608,1:32768,268435457:2,536870913:8421888,805306369:8388608,1073741825:8421378,1342177281:33280,1610612737:512,1879048193:8389122,2147483649:8421890,2415919105:8421376,2684354561:8388610,2952790017:33282,3221225473:514,3489660929:8389120,3758096385:32770,4026531841:0,134217729:8421890,402653185:8421376,671088641:8388608,939524097:512,1207959553:32768,1476395009:8388610,1744830465:2,2013265921:33282,2281701377:32770,2550136833:8389122,2818572289:514,3087007745:8421888,3355443201:8389120,3623878657:0,3892314113:33280,4160749569:8421378},{0:1074282512,16777216:16384,33554432:524288,50331648:1074266128,67108864:1073741840,83886080:1074282496,100663296:1073758208,117440512:16,134217728:540672,150994944:1073758224,167772160:1073741824,184549376:540688,201326592:524304,218103808:0,234881024:16400,251658240:1074266112,8388608:1073758208,25165824:540688,41943040:16,58720256:1073758224,75497472:1074282512,92274688:1073741824,109051904:524288,125829120:1074266128,142606336:524304,159383552:0,176160768:16384,192937984:1074266112,209715200:1073741840,226492416:540672,243269632:1074282496,260046848:16400,268435456:0,285212672:1074266128,301989888:1073758224,318767104:1074282496,335544320:1074266112,352321536:16,369098752:540688,385875968:16384,402653184:16400,419430400:524288,436207616:524304,452984832:1073741840,469762048:540672,486539264:1073758208,503316480:1073741824,520093696:1074282512,276824064:540688,293601280:524288,310378496:1074266112,327155712:16384,343932928:1073758208,360710144:1074282512,377487360:16,394264576:1073741824,411041792:1074282496,427819008:1073741840,444596224:1073758224,461373440:524304,478150656:0,494927872:16400,511705088:1074266128,528482304:540672},{0:260,1048576:0,2097152:67109120,3145728:65796,4194304:65540,5242880:67108868,6291456:67174660,7340032:67174400,8388608:67108864,9437184:67174656,10485760:65792,11534336:67174404,12582912:67109124,13631488:65536,14680064:4,15728640:256,524288:67174656,1572864:67174404,2621440:0,3670016:67109120,4718592:67108868,5767168:65536,6815744:65540,7864320:260,8912896:4,9961472:256,11010048:67174400,12058624:65796,13107200:65792,14155776:67109124,15204352:67174660,16252928:67108864,16777216:67174656,17825792:65540,18874368:65536,19922944:67109120,20971520:256,22020096:67174660,23068672:67108868,24117248:0,25165824:67109124,26214400:67108864,27262976:4,28311552:65792,29360128:67174400,30408704:260,31457280:65796,32505856:67174404,17301504:67108864,18350080:260,19398656:67174656,20447232:0,21495808:65540,22544384:67109120,23592960:256,24641536:67174404,25690112:65536,26738688:67174660,27787264:65796,28835840:67108868,29884416:67109124,30932992:67174400,31981568:4,33030144:65792},{0:2151682048,65536:2147487808,131072:4198464,196608:2151677952,262144:0,327680:4198400,393216:2147483712,458752:4194368,524288:2147483648,589824:4194304,655360:64,720896:2147487744,786432:2151678016,851968:4160,917504:4096,983040:2151682112,32768:2147487808,98304:64,163840:2151678016,229376:2147487744,294912:4198400,360448:2151682112,425984:0,491520:2151677952,557056:4096,622592:2151682048,688128:4194304,753664:4160,819200:2147483648,884736:4194368,950272:4198464,1015808:2147483712,1048576:4194368,1114112:4198400,1179648:2147483712,1245184:0,1310720:4160,1376256:2151678016,1441792:2151682048,1507328:2147487808,1572864:2151682112,1638400:2147483648,1703936:2151677952,1769472:4198464,1835008:2147487744,1900544:4194304,1966080:64,2031616:4096,1081344:2151677952,1146880:2151682112,1212416:0,1277952:4198400,1343488:4194368,1409024:2147483648,1474560:2147487808,1540096:64,1605632:2147483712,1671168:4096,1736704:2147487744,1802240:2151678016,1867776:4160,1933312:2151682048,1998848:4194304,2064384:4198464},{0:128,4096:17039360,8192:262144,12288:536870912,16384:537133184,20480:16777344,24576:553648256,28672:262272,32768:16777216,36864:537133056,40960:536871040,45056:553910400,49152:553910272,53248:0,57344:17039488,61440:553648128,2048:17039488,6144:553648256,10240:128,14336:17039360,18432:262144,22528:537133184,26624:553910272,30720:536870912,34816:537133056,38912:0,43008:553910400,47104:16777344,51200:536871040,55296:553648128,59392:16777216,63488:262272,65536:262144,69632:128,73728:536870912,77824:553648256,81920:16777344,86016:553910272,90112:537133184,94208:16777216,98304:553910400,102400:553648128,106496:17039360,110592:537133056,114688:262272,118784:536871040,122880:0,126976:17039488,67584:553648256,71680:16777216,75776:17039360,79872:537133184,83968:536870912,88064:17039488,92160:128,96256:553910272,100352:262272,104448:553910400,108544:0,112640:553648128,116736:16777344,120832:262144,124928:537133056,129024:536871040},{0:268435464,256:8192,512:270532608,768:270540808,1024:268443648,1280:2097152,1536:2097160,1792:268435456,2048:0,2304:268443656,2560:2105344,2816:8,3072:270532616,3328:2105352,3584:8200,3840:270540800,128:270532608,384:270540808,640:8,896:2097152,1152:2105352,1408:268435464,1664:268443648,1920:8200,2176:2097160,2432:8192,2688:268443656,2944:270532616,3200:0,3456:270540800,3712:2105344,3968:268435456,4096:268443648,4352:270532616,4608:270540808,4864:8200,5120:2097152,5376:268435456,5632:268435464,5888:2105344,6144:2105352,6400:0,6656:8,6912:270532608,7168:8192,7424:268443656,7680:270540800,7936:2097160,4224:8,4480:2105344,4736:2097152,4992:268435464,5248:268443648,5504:8200,5760:270540808,6016:270532608,6272:270540800,6528:270532616,6784:8192,7040:2105352,7296:2097160,7552:0,7808:268435456,8064:268443656},{0:1048576,16:33555457,32:1024,48:1049601,64:34604033,80:0,96:1,112:34603009,128:33555456,144:1048577,160:33554433,176:34604032,192:34603008,208:1025,224:1049600,240:33554432,8:34603009,24:0,40:33555457,56:34604032,72:1048576,88:33554433,104:33554432,120:1025,136:1049601,152:33555456,168:34603008,184:1048577,200:1024,216:34604033,232:1,248:1049600,256:33554432,272:1048576,288:33555457,304:34603009,320:1048577,336:33555456,352:34604032,368:1049601,384:1025,400:34604033,416:1049600,432:1,448:0,464:34603008,480:33554433,496:1024,264:1049600,280:33555457,296:34603009,312:1,328:33554432,344:1048576,360:1025,376:34604032,392:33554433,408:34603008,424:0,440:34604033,456:1049601,472:1024,488:33555456,504:1048577},{0:134219808,1:131072,2:134217728,3:32,4:131104,5:134350880,6:134350848,7:2048,8:134348800,9:134219776,10:133120,11:134348832,12:2080,13:0,14:134217760,15:133152,2147483648:2048,2147483649:134350880,2147483650:134219808,2147483651:134217728,2147483652:134348800,2147483653:133120,2147483654:133152,2147483655:32,2147483656:134217760,2147483657:2080,2147483658:131104,2147483659:134350848,2147483660:0,2147483661:134348832,2147483662:134219776,2147483663:131072,16:133152,17:134350848,18:32,19:2048,20:134219776,21:134217760,22:134348832,23:131072,24:0,25:131104,26:134348800,27:134219808,28:134350880,29:133120,30:2080,31:134217728,2147483664:131072,2147483665:2048,2147483666:134348832,2147483667:133152,2147483668:32,2147483669:134348800,2147483670:134217728,2147483671:134219808,2147483672:134350880,2147483673:134217760,2147483674:134219776,2147483675:0,2147483676:133120,2147483677:2080,2147483678:131104,2147483679:134350848}],f=[4160749569,528482304,33030144,2064384,129024,8064,504,2147483679],l=n.DES=i.extend({_doReset:function(){for(var t=this._key.words,e=[],r=0;r<56;r++){var i=s[r]-1;e[r]=t[i>>>5]>>>31-i%32&1}for(var n=this._subKeys=[],o=0;o<16;o++){var h=n[o]=[],f=a[o];for(r=0;r<24;r++)h[r/6|0]|=e[(c[r]-1+f)%28]<<31-r%6,h[4+(r/6|0)]|=e[28+(c[r+24]-1+f)%28]<<31-r%6;for(h[0]=h[0]<<1|h[0]>>>31,r=1;r<7;r++)h[r]=h[r]>>>4*(r-1)+3;h[7]=h[7]<<5|h[7]>>>27}var l=this._invSubKeys=[];for(r=0;r<16;r++)l[r]=n[15-r]},encryptBlock:function(t,e){this._doCryptBlock(t,e,this._subKeys)},decryptBlock:function(t,e){this._doCryptBlock(t,e,this._invSubKeys)},_doCryptBlock:function(t,e,r){this._lBlock=t[e],this._rBlock=t[e+1],u.call(this,4,252645135),u.call(this,16,65535),d.call(this,2,858993459),d.call(this,8,16711935),u.call(this,1,1431655765);for(var i=0;i<16;i++){for(var n=r[i],o=this._lBlock,s=this._rBlock,c=0,a=0;a<8;a++)c|=h[a][((s^n[a])&f[a])>>>0];this._lBlock=s,this._rBlock=o^c}var l=this._lBlock;this._lBlock=this._rBlock,this._rBlock=l,u.call(this,1,1431655765),d.call(this,8,16711935),d.call(this,2,858993459),u.call(this,16,65535),u.call(this,4,252645135),t[e]=this._lBlock,t[e+1]=this._rBlock},keySize:2,ivSize:2,blockSize:2});function u(t,e){var r=(this._lBlock>>>t^this._rBlock)&e;this._rBlock^=r,this._lBlock^=r<>>t^this._lBlock)&e;this._lBlock^=r,this._rBlock^=r<192.");var e=t.slice(0,2),i=t.length<4?t.slice(0,2):t.slice(2,4),n=t.length<6?t.slice(0,2):t.slice(4,6);this._des1=l.createEncryptor(r.create(e)),this._des2=l.createEncryptor(r.create(i)),this._des3=l.createEncryptor(r.create(n))},encryptBlock:function(t,e){this._des1.encryptBlock(t,e),this._des2.decryptBlock(t,e),this._des3.encryptBlock(t,e)},decryptBlock:function(t,e){this._des3.decryptBlock(t,e),this._des2.encryptBlock(t,e),this._des1.decryptBlock(t,e)},keySize:6,ivSize:2,blockSize:2});t.TripleDES=i._createHelper(p)}(),o.TripleDES)}()},43240:function(t,e,r){!function(e,i){var n;t.exports=(n=r(19021),function(t){var e=n,r=e.lib,i=r.Base,o=r.WordArray,s=e.x64={};s.Word=i.extend({init:function(t,e){this.high=t,this.low=e}}),s.WordArray=i.extend({init:function(e,r){e=this.words=e||[],this.sigBytes=r!=t?r:8*e.length},toX32:function(){for(var t=this.words,e=t.length,r=[],i=0;i{n.d(t,{A:()=>l});var r=n(58168),a=n(96540),i=n(50876);const o=e=>(0,a.forwardRef)(((t,n)=>{let{callback:o,feature:c,isStart:l,isSuccess:s,isFailure:d,eventReason:g,payload:m={},...u}=t;const{sendLog:h,isReady:p}=(0,i.A)(),k=(0,a.useCallback)((()=>{const e=u[o],t={feature:c,isStart:l,isSuccess:s,isFailure:d,eventReason:g,...m,...u["data-ga"]?{dataGa:u["data-ga"]}:{},...u.dataGa?{dataGa:u.dataGa}:{},...u["data-track"]?{dataTrack:u["data-track"]}:{},...u.label?{label:u.label}:{}};"function"==typeof e&&e(),h(t,!0)}),[o,h,p,m,u]),x=(0,a.useMemo)((()=>({...u,[o]:k})),[u,o,k]);return a.createElement(e,(0,r.A)({ref:n},x))}));var c=n(67276);const l=e=>(0,a.forwardRef)(((t,n)=>{let{payload:i={},...l}=t;const s=o(e);return a.createElement(s,(0,r.A)({},l,{ref:n,callback:"onClick",payload:{...i,action:c.o1.buttonClicked}}))}))},21828:(e,t,n)=>{n.d(t,{A:()=>i});var r=n(96540),a=n(83199);const i=e=>{let{theme:t}=e;const n="default"==t?"grey185":"grey45";return t="dark"==t?a.DarkTheme:a.DefaultTheme,r.createElement(a.Flex,{height:"1px",width:"100%",background:(0,a.getColor)(["neutral",n])({theme:t})})}},84541:(e,t,n)=>{n.d(t,{A:()=>S});var r=n(58168),a=n(96540),i=n(45463),o=n(83199);const c=(0,n(8711).default)(o.Flex).attrs((e=>({flex:{grow:"1",shrink:"0"},...e}))).withConfig({displayName:"panel__Panel",componentId:"sc-4zlw7c-0"})(["",";",";"],(e=>{let{order:t}=e;return t&&"order: ".concat(t,";")}),(e=>{let{bgGradient:t}=e;return t&&"background: linear-gradient(34.14deg, #536775 -26.52%, #2F3A42 53.66%);\n"}));var l=n(17208);const s=()=>a.createElement(o.Flex,{alignItems:"center",column:!0,padding:[0,0,18,0]},a.createElement(l.oi,null));n(41393),n(81454);const d=[{icon:"rocket",title:"Rapid Deployment, Instant Insights",text:"One-command installation, automatic metric discovery, and intuitive out-of-the-box dashboards for every single metric."},{icon:"qualityOfServiceSolid",title:"Advanced Monitoring, Made Accessible",text:"Experience real-time monitoring with 1-second granularity and anomaly detection across 800+ integrations."},{icon:"firewallSolid",title:"On-Prem Data Storage, Transparent Pricing",text:"Unlimited metrics, Infinite scale, Zero hidden costs. Decentralized architecture ensures maximum data security."}];n(3064),n(98992),n(72577),n(62953);var g=n(18061),m=n(26655);const u=()=>m.A.get("https://us-east1-netdata-analytics-bi.cloudfunctions.net/netdata_public_metrics_website"),h=e=>{let{downScale:t,...n}=e;const o=(0,i.A)("(min-width: 992px)");return(c=o?t[0]:t[1],e=>{let{children:t,...n}=e;return a.createElement(c,(0,r.A)({color:"bright"},n),t)})(n);var c};var p=n(12897),k=n.n(p),x=n(55042),b=n.n(x),w=new(k())({id:"cloudStatistics",use:"cloudStatistics-usage",viewBox:"0 0 194 146",content:''});b().add(w);const f=w,E=e=>a.createElement(o.Flex,(0,r.A)({as:"svg",height:f.height,width:f.width,viewBox:f.viewBox},e),a.createElement("use",{xlinkHref:"#".concat(f.id)})),v=()=>{const[e]=(0,g.A)((()=>({fetch:u})),[]),{nodesOnline:t,githubStars:n,dockerHubPulls:r}=(0,a.useMemo)((()=>{var t,n;return Array.isArray(e)?{nodesOnline:"1,265,463",githubStars:null===(t=e.find((e=>{let{key:t}=e;return"GitHub Stars"===t})))||void 0===t?void 0:t.value,dockerHubPulls:null===(n=e.find((e=>{let{key:t}=e;return"DockerHub Pulls"===t})))||void 0===n?void 0:n.value}:{}}),[e]);return a.createElement(o.Flex,{column:!0,gap:6},a.createElement(E,{height:"160px",padding:[0,0,4,0]}),a.createElement(o.Flex,{column:!0,gap:3},a.createElement(h,{downScale:[o.TextBigger,o.TextBigger],color:"bright",textAlign:"center"},a.createElement(h,{downScale:[o.TextBigger,o.TextBigger],color:t?"primaryHighlight":"bright",textAlign:"center",strong:!0},t||"..."),a.createElement(o.Box,{as:"span",margin:[0,0,0,3]},"Nodes Online")),a.createElement(h,{downScale:[o.TextBigger,o.TextBigger],color:"bright",textAlign:"center"},a.createElement(h,{downScale:[o.TextBigger,o.TextBigger],color:n?"primaryHighlight":"bright",textAlign:"center",strong:!0},n||"..."),a.createElement(o.Box,{as:"span",margin:[0,0,0,3]},"GitHub Stars")),a.createElement(h,{downScale:[o.TextBigger,o.TextBigger],color:"bright",textAlign:"center"},a.createElement(h,{downScale:[o.TextBigger,o.TextBigger],color:r?"primaryHighlight":"bright",textAlign:"center",strong:!0},r||"..."),a.createElement(o.Box,{as:"span",margin:[0,0,0,3]},"DockerHub Pulls"))))},y=e=>{let{icon:t,title:n}=e;const r=(0,i.A)("(min-width: 475px)");return a.createElement(o.Flex,{gap:4,alignItems:"center"},r&&a.createElement(o.Flex,{background:"transparent",justifyContent:"center",alignItems:"center",height:"56px",width:"56px",round:64,border:{side:"all",color:"secondaryColor"}},a.createElement(o.Icon,{name:t,height:"24px",width:"24px",color:"secondaryColor"})),a.createElement(o.Flex,{column:!0,justifyContent:"start",alignItems:"start",gap:2},a.createElement(o.TextBig,{color:"bright",strong:!0},n)))};var A=n(21828);const M=()=>a.createElement(o.Flex,{column:!0,padding:[16,4,12],width:{max:"500px"},margin:[0,"auto"],gap:14},a.createElement(v,null),a.createElement(o.Flex,{column:!0,gap:8,padding:[0,10]},d.map(((e,t)=>a.createElement(y,(0,r.A)({key:t},e))))),a.createElement(o.Flex,{column:!0,gap:6,alignItems:"center"},a.createElement(A.A,null),a.createElement(o.Flex,{column:!0,gap:4,alignItems:"center"},a.createElement(l.go,{theme:"dark"}),a.createElement(o.TextSmall,{textAlign:"center",color:"bright"},"Netdata is a member of the Cloud Native Computing Foundation (CNCF), and is one of the most starred projects in the CNCF landscape.")))),S=e=>{let{children:t,...n}=e;const l=(0,i.A)("(min-width: 998px)");return a.createElement(o.Flex,(0,r.A)({height:{min:"100vh"},flexWrap:!0},n),!window.envSettings.onprem&&a.createElement(c,{background:(0,o.getColor)(["neutral","grey25"])({theme:o.DarkTheme}),order:l?0:1,width:l?{base:"34%"}:{max:"100%"}},a.createElement(M,null)),a.createElement(c,{background:"mainBackground",order:l?1:0,width:l?{base:"66%"}:{max:"100%"}},a.createElement(o.Flex,{background:"transparent",column:!0,gap:8,padding:[12,4],width:{max:"500px"},margin:[0,"auto"]},a.createElement(s,null),t)))}},11418:(e,t,n)=>{n.r(t),n.d(t,{MagicLinkSent:()=>p,default:()=>k});n(62953),n(3296),n(27208),n(48408);var r=n(96540),a=n(8711),i=n(47767),o=n(83199),c=n(55337),l=n(84541),s=n(17182),d=n(11604),g=n(92155),m=n(63314);const u=(0,a.default)(o.Text).attrs({role:"button"}).withConfig({displayName:"magicLinkSent__ButtonText",componentId:"sc-ua6kmo-0"})(["cursor:pointer;"]),h=(0,g.A)(u),p=()=>{const{searchParams:e}=new URL(window.location.href),t=null===e||void 0===e?void 0:e.get("email"),{search:n,state:a={}}=(0,i.zy)(),{email:c}=a||{},g=t||c,u=window.location.hash,p=(0,d.Js)(),k=(0,r.useCallback)((()=>{const e=encodeURIComponent((0,s.V)("/sign-in".concat(n),u)),t=encodeURIComponent((0,s.V)("/sign-up/verify".concat(n),u));p({email:g,redirectURI:e,registerURI:t,resend:!0})}),[g]);return r.createElement(m.Ay,{feature:"MagicLinkSent",email:g},r.createElement(l.A,{"data-testid":"magicLinkSent"},r.createElement(o.H1,{textAlign:"center"},"Check your email!"),r.createElement(o.Flex,{column:!0,padding:[0,0,8,0],gap:8,justifyContent:"between",alignItems:"center"},r.createElement(o.Flex,{column:!0,gap:1,alignItems:"center"},r.createElement(o.TextBig,{textAlign:"center"},"We have sent an email to ",r.createElement(o.TextBig,{strong:!0},!!g&&g),"."),r.createElement(o.TextBig,{textAlign:"center"},"Please find this email (check your spam folder too) and click the button there to continue.")),r.createElement(o.Text,{textAlign:"center"},"Didn't receive it?"," ",r.createElement(h,{onClick:k,color:"primary","data-ga":"magicLikSent::click-resent::check-email-view"},"Click here to resend it.")))))},k=(0,c.g)(p,"light")},17182:(e,t,n)=>{n.d(t,{R:()=>o,V:()=>i});var r=n(38819);const a=["expires_at","error_code","error_msg_key","error_msg"],i=(e,t)=>{const n=(0,r.yq)(a),i=t.includes("join-callback")?decodeURIComponent(n):n;return"".concat(window.location.origin).concat(e,"#").concat(i)},o=(e,t)=>{const{search:n,hash:r}=window.location,a=encodeURIComponent(i("/sign-in".concat(n).concat(n.length?"&":"?","oauth=").concat(e,"&"),r)),o=encodeURIComponent(i("/sign-up/verify".concat(n).concat(n.length?"&":"?","oauth=").concat(e,"&"),r));return"/api/v2/auth/account/".concat(e,"?redirect_uri=").concat(a,"®ister_uri=").concat(o).concat(t?"&is_unverified_registration=true":"")}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/1782.d82eb301aa81b380dd0c.chunk.js b/src/web/gui/v2/1782.d82eb301aa81b380dd0c.chunk.js deleted file mode 100644 index 46438f433..000000000 --- a/src/web/gui/v2/1782.d82eb301aa81b380dd0c.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="915cff30-b3e0-4d43-9fb7-29cb723a3367",e._sentryDebugIdIdentifier="sentry-dbid-915cff30-b3e0-4d43-9fb7-29cb723a3367")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[1782],{1782:(e,n,t)=>{t.r(n),t.d(n,{default:()=>b});var a=t(96540),o=t(5396),l=t(65566),d=t(83199),r=t(4659),s=t(50876);const c=()=>{const{sendButtonClickedLog:e}=(0,s.A)(),n=(0,a.useCallback)((()=>{window.open("https://www.netdata.cloud/secure-foss-agent/","_blank","noopener,noreferrer"),e({feature:"AgentBannerContactUs"})}),[e]);return a.createElement(d.Flex,{width:"100%",justifyContent:"center",alignItems:"center"},a.createElement(d.Text,null,"Your Agents are open by default. Secure them easily with"," ",a.createElement(r.A,{onClick:n,testId:"agent-banner-contact-link"},"Netdata Enterprise Agent"),"."))};var f=t(75542),i=t(63314),u=t(37618);const b=()=>{const{id:e}=(0,f.A)(),n=(0,a.useCallback)((()=>"dismissed-agent-banner-".concat(e)),[e]),{dismissed:t,onClose:d}=(0,l.A)({getLocalStorageKey:n,logKey:"AgentBannerDismiss"});return u.Ay&&!t?a.createElement(i.Ay,{feature:"AgentBanner"},a.createElement(o.A,{testId:"agent-banner",width:"100%",background:"successSemi",onClose:d,tooltipProps:{align:"top"},zIndex:20},a.createElement(c,null))):null}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/1839.a4196d2a87ac0fdd9f34.chunk.js b/src/web/gui/v2/1839.a4196d2a87ac0fdd9f34.chunk.js deleted file mode 100644 index fed71e9fc..000000000 --- a/src/web/gui/v2/1839.a4196d2a87ac0fdd9f34.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="ef034845-b41b-4f78-8c45-36f3bd8c8152",e._sentryDebugIdIdentifier="sentry-dbid-ef034845-b41b-4f78-8c45-36f3bd8c8152")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[1839],{41839:(e,t,n)=>{n.r(t),n.d(t,{default:()=>u});var l=n(96540),a=n(83199),o=n(99904),r=n(47731),d=n(92155),i=n(63314);const c=(0,n(8711).default)(a.ModalContent).attrs((e=>{let{isMobile:t}=e;return{width:t?{base:"95vw"}:{}}})).withConfig({displayName:"styled__StyledModalContent",componentId:"sc-1vfmaif-0"})([""]),s=(0,d.A)(a.Button),u=()=>{const e=(0,r.J)(),{isModalVisible:t,trialEndsAt:n,onClose:d}=(0,o.A)();return t?l.createElement(a.Modal,{backdropProps:{backdropBlur:!0}},l.createElement(i.Ay,{feature:"BumpedWarningModal"},l.createElement(c,{isMobile:e},l.createElement(a.ModalHeader,null,l.createElement(a.Flex,{gap:2,alignItems:"center"},l.createElement(a.Icon,{name:"netdataPress",color:"text"}),l.createElement(a.H4,null,"Welcome to Netdata!"))),l.createElement(a.ModalBody,null,l.createElement(a.Flex,{column:!0,gap:4,width:{max:120},padding:[0,0,4,0]},l.createElement(a.TextBigger,null,"Welcome to your Business trial!"),l.createElement(a.Flex,{column:!0,gap:2},l.createElement(a.TextBigger,{lineHeight:1.5},"We are happy to inform you that we upgraded your account to Netdata Business, for free, until ",l.createElement(a.TextBigger,{strong:!0},n),"."),l.createElement(a.TextBigger,{lineHeight:1.5},"Explore all the Business features and keep enjoying the best of Netdata!")))),l.createElement(a.ModalFooter,null,l.createElement(a.Flex,{gap:4,justifyContent:"end",padding:[1,2]},l.createElement(s,{feature:"BumpedWarningModalClose",label:"OK",textTransform:"uppercase",flavour:"hollow",icon:"thumb_up",onClick:d})))))):null}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/185.42bab351ba68de7ca4aa.chunk.js b/src/web/gui/v2/185.42bab351ba68de7ca4aa.chunk.js deleted file mode 100644 index 6f2926191..000000000 --- a/src/web/gui/v2/185.42bab351ba68de7ca4aa.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="d40cbb04-6538-4d72-8288-fa96fe4b50e8",e._sentryDebugIdIdentifier="sentry-dbid-d40cbb04-6538-4d72-8288-fa96fe4b50e8")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[185],{51719:(e,n,l)=>{l.d(n,{A:()=>i});var t=l(58168),a=l(96540),r=l(83199),d=l(54856),o=l(3914);const i=e=>{const n=(0,o.dg)();return a.createElement(r.Flex,(0,t.A)({gap:2,alignItems:"center"},e),a.createElement(r.Icon,{size:"small",color:"warning",name:"warning_triangle"}),a.createElement(r.Text,null,"This feature is only available to paid plans"),n?null:a.createElement(d.A,null))}},70185:(e,n,l)=>{l.r(n),l.d(n,{default:()=>u});var t=l(96540),a=l(83199),r=l(8910),d=l(3914),o=l(87860),i=l(37618),f=l(70895),s=l(51719),c=l(63314);const u=()=>{const e=(0,d.vt)(),n=(0,i.ES)(e)?i.gB:"",{hasPermission:l}=(0,f.j)({edit:!0});return(0,o.A)({spaceId:e,id:n,pollingInterval:63e3}),t.createElement(c.Ay,{feature:"SettingsConfigurations"},t.createElement(a.Flex,{height:"100%",column:!0,gap:4},t.createElement(a.Flex,{gap:3,alignItems:"center"},t.createElement(a.H3,null,"Configurations"),l?null:t.createElement(s.A,null)),t.createElement(r.default,null)))}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/1876.e610906417b961290730.chunk.js b/src/web/gui/v2/1876.e610906417b961290730.chunk.js deleted file mode 100644 index b545b042a..000000000 --- a/src/web/gui/v2/1876.e610906417b961290730.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},d=(new Error).stack;d&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[d]="58cd7e74-4be7-447d-9061-c9c4d9d78755",e._sentryDebugIdIdentifier="sentry-dbid-58cd7e74-4be7-447d-9061-c9c4d9d78755")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[1876],{41876:(e,d,n)=>{n.r(d),n.d(d,{default:()=>u});n(9391);var t=n(96540),l=n(11604),o=n(50876),f=n(28738);const u=()=>{const{sendLog:e,isReady:d}=(0,o.A)(),n=(0,l.So)(),u=(0,t.useCallback)((()=>{e({feature:"SignUpThankYou"}).finally((()=>{n()}))}),[n,d]);return(0,t.useEffect)((()=>{let e=!0,n=null;return d?u():n=setTimeout((()=>{e&&u()}),1e3),()=>{e=!1,n&&(clearTimeout(n),n=null)}}),[d]),t.createElement(f.A,{title:"Welcome to Netdata!"})}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/195.4cdbea6af54d14a95949.chunk.js b/src/web/gui/v2/195.4cdbea6af54d14a95949.chunk.js deleted file mode 100644 index 478a5f7a7..000000000 --- a/src/web/gui/v2/195.4cdbea6af54d14a95949.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="e74218ad-2906-4523-9997-f0ed6afc09dc",e._sentryDebugIdIdentifier="sentry-dbid-e74218ad-2906-4523-9997-f0ed6afc09dc")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[195],{52919:(e,t,a)=>{a.d(t,{D7:()=>r,Jp:()=>d,NT:()=>u,N_:()=>h,iS:()=>i,vE:()=>c,v_:()=>o,xI:()=>s,z6:()=>m});var l=a(8711),n=a(83199);const o=l.default.img.withConfig({displayName:"styled__Illustration",componentId:"sc-1yhntgl-0"})(["margin:0 auto;"]),r=(0,l.default)(n.Text).withConfig({displayName:"styled__StyledText",componentId:"sc-1yhntgl-1"})(["display:block;"]),d=l.default.div.withConfig({displayName:"styled__TextHeader",componentId:"sc-1yhntgl-2"})(["margin-bottom:",";font-weight:700;"],(0,n.getSizeBy)(2)),s=l.default.div.withConfig({displayName:"styled__StaticCheckmarks",componentId:"sc-1yhntgl-3"})(["margin-top:",";margin-bottom:",";"],(0,n.getSizeBy)(2),(0,n.getSizeBy)(5)),i=l.default.div.withConfig({displayName:"styled__CheckmarkLine",componentId:"sc-1yhntgl-4"})(["display:flex;align-items:center;"]),c=(0,l.default)(n.Icon).withConfig({displayName:"styled__StyledIcon",componentId:"sc-1yhntgl-5"})(["margin-right:4px;height:18px;> use{fill:",";}"],(0,n.getColor)("primary")),u=(0,l.default)(c).withConfig({displayName:"styled__HelpIcon",componentId:"sc-1yhntgl-6"})(["vertical-align:middle;"]),m=l.default.div.withConfig({displayName:"styled__LearnMoreSection",componentId:"sc-1yhntgl-7"})(["margin-top:",";"],(0,n.getSizeBy)(4)),h=l.default.a.withConfig({displayName:"styled__Link",componentId:"sc-1yhntgl-8"})(["text-decoration:underline;color:",";&:visited{color:",";}"],(0,n.getColor)("success"),(0,n.getColor)("success"))},10195:(e,t,a)=>{a.r(t),a.d(t,{default:()=>B});a(62953);var l=a(96540),n=a(83199),o=a(87659),r=a(35304),d=a(21591),s=a(47767),i=a(15327),c=a(74618),u=a(45765),m=a(78969),h=a(69765),b=a(3914),g=a(67544),f=a(49667),y=a(8018),p=a(52919);const E=e=>{let{onClose:t}=e;const a=(0,s.Zp)(),[o,r]=(0,l.useState)(!1),[d,E]=(0,l.useState)(""),{id:w,slug:v}=(0,b.ap)(),A=(0,h.ID)(),C=(0,h.QW)(),D=(0,l.useCallback)((e=>{let{slug:l}=e;t(),a("/spaces/".concat(v,"/rooms/").concat(C,"/dashboards/").concat(l))}),[v,C]),x=(0,g.Dn)(w,A,{onSuccess:D}),T=(0,l.useCallback)((()=>x({name:d})),[d]),k=d.length>0;return l.createElement(i.GO,{onClose:t},l.createElement(c.z,{onClose:t,title:"New Dashboard"},l.createElement(n.Button,{label:"Add",icon:"plus",onClick:T,"data-testid":"addNewDashboardModal-add-button"})),l.createElement(u.U,null,"Create new Dashboard"),l.createElement(i.Yv,null,l.createElement(y.A,{value:d,label:"Name",onChange:e=>{E(e.target.value)},isValid:o,setIsValid:r,isDirty:k,instantFeedback:"all",onKeyDown:e=>e.keyCode===m.I7&&o&&T,"data-testid":"addNewDashboardModal-dashboardName-input",containerStyles:{flex:{grow:0,shrink:0},margin:[0,0,4,0]}}),l.createElement(p.D7,null,l.createElement(p.Jp,null,"What can you do with Dashboards?"),"Combine all the metrics that matter to you, from all the nodes that matter to you, into one clean interface that helps you visually detect anomalies as they\u2019re happening. Building a new dashboard will only take a few minutes, and the metrics are always real-time."),l.createElement(p.xI,null,l.createElement(p.iS,null,l.createElement(p.vE,{name:"check"}),l.createElement(n.Text,null,"Add charts from your infrastructure")),l.createElement(p.iS,null,l.createElement(p.vE,{name:"check"}),l.createElement(n.Text,null,"Group information in a meaningful way")),l.createElement(p.z6,null,l.createElement(p.NT,{name:"help"}),l.createElement(n.Text,null,"Learn more about Dashboards")," ",l.createElement(p.N_,{href:"https://learn.netdata.cloud/docs/cloud/visualize/dashboards",target:"_blank",rel:"noopener noreferrer"},"In our documentation"))),l.createElement(p.v_,{src:f.$})))};var w=a(28738),v=a(63950),A=a.n(v),C=(a(41393),a(8159),a(98992),a(81454),a(37550),a(73865));const D=e=>{let{handleOpenAddDashboardModal:t,hasPermissionToAddDashboard:a,hasPermissionToDeleteDashboard:n,refetch:o}=e;const r=(0,h.XA)("name"),s=(0,g.Ts)(),i=(0,d.q)(),{hasLimitations:c,maxDashboards:u}=(0,C.A)(),m=async(e,t)=>{const a=(Array.isArray(e)?e:[e]).map((e=>{let{id:t}=e;return t}));await s({ids:a},{onSuccess:()=>t.resetRowSelection()}),o()},b=(0,l.useMemo)((()=>({delete:{confirmLabel:"Yes, delete",confirmationMessage:e=>l.createElement(l.Fragment,null,"You are about to delete ",l.createElement("strong",null,e.name)," from ",l.createElement("strong",null,r),".",l.createElement("br",null),"Are you sure you want to continue?"),confirmationTitle:e=>"Delete ".concat(e.name),declineLabel:"Cancel",handleAction:m,tooltipText:"Delete dashboard",isVisible:n}})),[n]),f=(0,l.useMemo)((()=>({addEntry:{handleAction:t,isVisible:a,tooltipText:"Create dashboard",disabledTooltipText:"Your plan does not allow you to create more than ".concat(u," dashboards."),disabled:c&&i.length>=u},delete:{confirmLabel:"Yes, delete",confirmationMessage:(e,t)=>l.createElement(l.Fragment,null,"You are about to delete"," ",l.createElement("strong",null,t.length>1?"".concat(t.length," dashboards"):t[0].name)," ","from ",l.createElement("strong",null,r),".",l.createElement("br",null),"Are you sure you want to continue?"),confirmationTitle:()=>"Delete",disabledTooltipText:"Delete is disabled because you haven't selected dashboards",declineLabel:"Cancel",handleAction:m,tooltipText:"Delete dashboards",isVisible:n}})));return{rowActions:n?b:[],bulkActions:[a,n].some(Boolean)?f:[]}};var x=a(68980),T=a(29217);const k=e=>{const t=new Date(e),[,a,l,n]=t.toDateString().split(" ");return"".concat(a," ").concat(l,", ").concat(n)};var _=a(4659);const S=e=>{let{name:t}=e;return l.createElement(n.Flex,{width:"300px",column:!0,gap:1},l.createElement(n.Text,{strong:!0},"Locked!"),l.createElement(n.Text,null,"The dashboard ",l.createElement(n.Text,{strong:!0},t)," is locked."),l.createElement(n.Text,null,"Your plan is limited to 1 dashboard. You can delete some dashboards or upgrade your plan for no limitations."))},I=e=>{var t;let{getValue:a,row:o}=e;const r="notAvailable"==(null===o||void 0===o||null===(t=o.original)||void 0===t?void 0:t.state),d=a();return r?l.createElement(T.A,{content:l.createElement(S,{name:d}),align:"bottom",isBasic:!0},l.createElement(n.Flex,{alignItems:"center"},l.createElement(n.Text,null,d),l.createElement(n.Icon,{name:"padlock",height:"12px",color:"text"}))):l.createElement(_.A,{as:"Link",to:o.original.slug},d)};var N=a(46741);const L=e=>{let{refetch:t,handleOpenAddDashboardModal:a}=e;const[,o]=(0,l.useState)(),r=(0,d.q)(),s=(0,x.Sf)(r),i=(0,N.JT)("dashboard:Create"),c=(0,N.JT)("dashboard:Delete"),u=(0,l.useMemo)((()=>[{id:"name",accessorKey:"name",header:"Name",cell:I},{id:"updatedAt",accessorKey:"updatedAt",header:"Last modified",cell:e=>{let{getValue:t}=e;return l.createElement(n.TextSmall,null,k(t()))},sortingFn:"datetime"},{id:"createdAt",accessorKey:"createdAt",header:"Created",cell:e=>{let{getValue:t}=e;return l.createElement(n.TextSmall,null,k(t()))},sortingFn:"datetime"}]),[]),{rowActions:m,bulkActions:h}=D({dashboards:s,handleOpenAddDashboardModal:a,hasPermissionToAddDashboard:i,hasPermissionToDeleteDashboard:c,refetch:t});return{rowActions:m,bulkActions:h,data:s,enableSelection:c,columns:u,enableSorting:!0,globalFilterFn:(0,l.useCallback)(((e,t,a)=>{var l,n,o,r,d,s,i;const c=null===a||void 0===a||null===(l=a.toLowerCase)||void 0===l?void 0:l.call(a),u=null===(n=e.getValue("name"))||void 0===n||null===(o=n.toLowerCase)||void 0===o?void 0:o.call(n),m="".concat(null===(r=k(e.getValue("updatedAt")))||void 0===r||null===(d=r.toLowerCase)||void 0===d?void 0:d.call(r)),h="".concat(null===(s=k(e.getValue("createdAt")))||void 0===s||null===(i=s.toLowerCase)||void 0===i?void 0:i.call(s));return u.includes(c)||m.includes(c)||h.includes(c)}),[]),setGlobalFilter:o}},F=[{id:"updatedAt",desc:!0}],M=e=>{let{refetch:t=A(),handleOpenAddDashboardModal:a}=e;const{enableSelection:o,columns:r,data:d,rowActions:s,bulkActions:i,enableSorting:c,globalFilterFn:u,setGlobalFilter:m}=L({refetch:t,handleOpenAddDashboardModal:a});return l.createElement(n.Table,{rowActions:s,enableSelection:o,data:d,dataColumns:r,bulkActions:i,enableSorting:c,globalFilterFn:u,onSearch:m,testPrefixCallback:e=>e.name,sortBy:F,title:"Dashboards"})};var V=a(63314);const B=()=>{const e=(0,r.A)(),[t,,a,s]=(0,o.A)(!1);return(0,d.RQ)()?l.createElement(V.Ay,{feature:"DashboardOverview"},l.createElement(n.Flex,{position:"relative",width:"100%",height:"100%",flex:"1",column:!0,padding:[3,3,0],overflow:"hidden"},l.createElement(M,{refetch:e,handleOpenAddDashboardModal:a}),t&&l.createElement(E,{onClose:s}))):l.createElement(w.A,{title:"Loading dashboards..."})}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/2007.b33ce2b4b736228fd681.chunk.js b/src/web/gui/v2/2007.b33ce2b4b736228fd681.chunk.js deleted file mode 100644 index c08dc41d1..000000000 --- a/src/web/gui/v2/2007.b33ce2b4b736228fd681.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="5ee71555-9d61-4693-bbdd-297fb8bf64e8",e._sentryDebugIdIdentifier="sentry-dbid-5ee71555-9d61-4693-bbdd-297fb8bf64e8")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[2007],{62007:(e,n,l)=>{l.r(n),l.d(n,{default:()=>u});var d=l(96540),t=l(22292),f=(l(62953),l(83199)),o=l(19673);const b=()=>{const{loaded:e,value:n}=(0,o.JN)(),l=[n.class,...n.trialEndsAt?["(Trial)"]:[]].join(" ");return e&&l.length?d.createElement(f.Flex,null,d.createElement(f.Pill,null,l)):null},u=()=>(0,t.uW)("isAnonymous")?null:d.createElement(b,null)}}]); \ No newline at end of file diff --git a/src/web/gui/v2/252.40edc9b0f6da1422f40b.chunk.js b/src/web/gui/v2/252.40edc9b0f6da1422f40b.chunk.js deleted file mode 100644 index 6883dbbe3..000000000 --- a/src/web/gui/v2/252.40edc9b0f6da1422f40b.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="7da4450f-4e60-469b-a9f3-7fd9037f778a",e._sentryDebugIdIdentifier="sentry-dbid-7da4450f-4e60-469b-a9f3-7fd9037f778a")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[252],{11726:(e,n,t)=>{t.r(n),t.d(n,{default:()=>b});t(62953);var o=t(96540),r=t(39225),a=t(45467),l=t(83199),i=t(29217),c=t(8239),s=t(28738),u=t(57605),d=t(70895),f=t(51719);const p=(0,r.A)((()=>Promise.all([t.e(7144),t.e(7857),t.e(1220),t.e(749),t.e(9473),t.e(9292),t.e(8910),t.e(4140)]).then(t.bind(t,8910))),"Configuration"),b=()=>{const[e,n]=(0,c.OD)(),{node:t}=e||{},{openItems:r}=(0,c.IP)(),b=(0,u.A)(),{hasPermission:m}=(0,d.j)({edit:!0});(0,a.A)((()=>{b(!!e)}),[!!e]);const g=(0,o.useCallback)((()=>{n(null)}),[n]),v=(0,o.useCallback)((()=>{r.length||g()}),[r,g]);return e?o.createElement(l.Modal,{backdropProps:{backdropBlur:!0},onEsc:v},o.createElement(l.ModalContent,{width:"90vw",height:"90vh"},o.createElement(l.ModalHeader,{justifyContent:"between",padding:[4],round:!0},o.createElement(l.Flex,{gap:3,alignItems:"center"},o.createElement(l.H4,null,t.name),m?null:o.createElement(f.A,null)),o.createElement(i.A,{align:"bottom",content:"Close window",zIndex:7050},o.createElement(l.Flex,null,o.createElement(l.ModalCloseButton,{onClose:g,testId:"assistant-modal-close-button"})))),o.createElement(l.ModalBody,{height:"calc(100% - 55px)",overflow:{vertical:"auto"}},o.createElement(o.Suspense,{fallback:o.createElement(s.A,{title:"Loading configuration..."})},o.createElement(p,{node:t,inModal:!0}))))):null}},54856:(e,n,t)=>{t.d(n,{A:()=>u});var o=t(58168),r=t(96540),a=t(84976),l=t(83199),i=t(4659),c=t(46741),s=t(27994);const u=function(){let{containerProps:e={},...n}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const t=(0,c.JT)("billing:ReadAll"),{url:u}=(0,s.A)();return u?r.createElement(l.Flex,(0,o.A)({background:"sideBarMini",border:{side:"all",color:"border"},padding:[1,2],round:!0},e),r.createElement(i.A,(0,o.A)({align:"bottom",as:a.N_,boxProps:{as:l.Flex},color:"text",Component:l.TextMicro,content:t?"Upgrade your plan in order to use this feature":"You have no permissions to manage billing",disabled:!t,hoverColor:"textFocus",showToolTip:!0,strong:!0,to:u},n),"Upgrade now!")):null}},51719:(e,n,t)=>{t.d(n,{A:()=>c});var o=t(58168),r=t(96540),a=t(83199),l=t(54856),i=t(3914);const c=e=>{const n=(0,i.dg)();return r.createElement(a.Flex,(0,o.A)({gap:2,alignItems:"center"},e),r.createElement(a.Icon,{size:"small",color:"warning",name:"warning_triangle"}),r.createElement(a.Text,null,"This feature is only available to paid plans"),n?null:r.createElement(l.A,null))}},84428:(e,n,t)=>{var o=t(78227)("iterator"),r=!1;try{var a=0,l={next:function(){return{done:!!a++}},return:function(){r=!0}};l[o]=function(){return this},Array.from(l,(function(){throw 2}))}catch(i){}e.exports=function(e,n){try{if(!n&&!r)return!1}catch(i){return!1}var t=!1;try{var a={};a[o]=function(){return{next:function(){return{done:t=!0}}}},e(a)}catch(i){}return t}},87290:(e,n,t)=>{var o=t(50516),r=t(19088);e.exports=!o&&!r&&"object"==typeof window&&"object"==typeof document},50516:e=>{e.exports="object"==typeof Deno&&Deno&&"object"==typeof Deno.version},19088:(e,n,t)=>{var o=t(24475),r=t(44576);e.exports="process"===r(o.process)},10916:(e,n,t)=>{var o=t(24475),r=t(80550),a=t(94901),l=t(92796),i=t(33706),c=t(78227),s=t(87290),u=t(50516),d=t(96395),f=t(77388),p=r&&r.prototype,b=c("species"),m=!1,g=a(o.PromiseRejectionEvent),v=l("Promise",(function(){var e=i(r),n=e!==String(r);if(!n&&66===f)return!0;if(d&&(!p.catch||!p.finally))return!0;if(!f||f<51||!/native code/.test(e)){var t=new r((function(e){e(1)})),o=function(e){e((function(){}),(function(){}))};if((t.constructor={})[b]=o,!(m=t.then((function(){}))instanceof o))return!0}return!n&&(s||u)&&!g}));e.exports={CONSTRUCTOR:v,REJECTION_EVENT:g,SUBCLASSING:m}},90537:(e,n,t)=>{var o=t(80550),r=t(84428),a=t(10916).CONSTRUCTOR;e.exports=a||!r((function(e){o.all(e).then(void 0,(function(){}))}))},96167:(e,n,t)=>{var o=t(46518),r=t(69565),a=t(79306),l=t(36043),i=t(1103),c=t(72652);o({target:"Promise",stat:!0,forced:t(90537)},{allSettled:function(e){var n=this,t=l.f(n),o=t.resolve,s=t.reject,u=i((function(){var t=a(n.resolve),l=[],i=0,s=1;c(e,(function(e){var a=i++,c=!1;s++,r(t,n,e).then((function(e){c||(c=!0,l[a]={status:"fulfilled",value:e},--s||o(l))}),(function(e){c||(c=!0,l[a]={status:"rejected",reason:e},--s||o(l))}))})),--s||o(l)}));return u.error&&s(u.value),t.promise}})}}]); \ No newline at end of file diff --git a/src/web/gui/v2/3104.3b70865e21a81a616af3.chunk.js b/src/web/gui/v2/3104.3b70865e21a81a616af3.chunk.js deleted file mode 100644 index ca5e9f24d..000000000 --- a/src/web/gui/v2/3104.3b70865e21a81a616af3.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="1df98f68-5fbc-4150-8079-c9894daf86c8",e._sentryDebugIdIdentifier="sentry-dbid-1df98f68-5fbc-4150-8079-c9894daf86c8")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[3104],{3104:(e,t,a)=>{a.r(t),a.d(t,{default:()=>Ke});var n=a(96540),o=a(11128),r=(a(14905),a(98992),a(8872),a(62953),a(47762));const l=(e,t)=>t?e[t]:e;var i=a(99739),s=a(67990),d=a(83199),c=a(12897),m=a.n(c),h=a(55042),u=a.n(h),p=new(m())({id:"notCapableNodes",use:"notCapableNodes-usage",viewBox:"0 0 231 230",content:''});u().add(p);const L=p,f=()=>n.createElement(d.Flex,{width:"100%",justifyContent:"center",alignItems:"center",gap:3},n.createElement("svg",{viewBox:L.viewBox,height:"222px"},n.createElement("use",{xlinkHref:"#".concat(L.id)})),n.createElement(d.TextBig,{strong:!0},"Your nodes are either offline or not configured for ML"));var g=a(4659);const y=()=>n.createElement(d.Flex,{height:10.5,width:{min:10.5},background:"warningText",justifyContent:"center",alignItems:"center",round:"50%",sx:{borderRadius:"50%"}},n.createElement(d.Icon,{color:"warningBackground",name:"informationPress"})),v=e=>{let{to:t,children:a}=e;return n.createElement(g.A,{Component:d.Text,href:t,target:"_blank",rel:"noopener noreferrer"},a)},E=()=>n.createElement(d.Flex,{column:!0,gap:1},n.createElement(d.Text,{strong:!0,color:"textDescription"},"Not Configured Nodes"),n.createElement(d.Text,{color:"textDescription"},"Learn how to configure your nodes"," ",n.createElement(v,{to:"https://learn.netdata.cloud/docs/cloud/insights/anomaly-advisor#enable-ml-on-netdata-agent"},"to support anomaly advisor"),".")),M=()=>n.createElement(d.Flex,{column:!0,gap:1},n.createElement(d.Text,{strong:!0,color:"textDescription"},"Not Capable Nodes"),n.createElement(d.Text,{color:"textDescription"},"All nodes need to be updated to a version higher than"," ",n.createElement(d.Text,{strong:!0,color:"textDescription"},"1.32"),". Learn how to"," ",n.createElement(v,{to:"https://learn.netdata.cloud/docs/agent/packaging/installer/update"},"update to the latest Netdata version"),".")),b=()=>n.createElement(d.Box,{round:!0,background:"elementBackground",padding:[4]},n.createElement(d.Flex,{gap:4,alignItems:"start"},n.createElement(y,null),n.createElement(d.Flex,{gap:4,column:!0},n.createElement(E,null),n.createElement(M,null)))),A=()=>n.createElement(d.Flex,{column:!0,width:"100%",height:"100%",justifyContent:"start",padding:[8,4]},n.createElement(d.Flex,{column:!0,gap:8},n.createElement(f,null),n.createElement(b,null)));var w=a(58168),x=a(3705),C=a(83084),k=a(69765),I=a(23931),Z=a(3914),D=a(16579),_=a(81416),H=a(27467),T=a(99292),z=a(79304),B=(a(9920),a(41393),a(3949),a(81454),a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215),a(45387)),N=a(61427),S=a(21290),F=a(93658);const V=e=>Math.floor(e/1e3),R=()=>{const[e,t]=(0,_.yD)(),{localeTimeString:a}=(0,S.$j)(),[o,r,l]=(0,n.useMemo)((()=>{const n=V(e),o=V(t);return[(0,F.getDateDiff)(n,o),a(e,{secs:!0}),a(t,{secs:!0})]}),[e,t]);return e&&t?n.createElement(d.Flex,{gap:1,alignItems:"baseline"},n.createElement(d.TextMicro,{strong:!0,color:"textLite"},n.createElement(d.TextSmall,{strong:!0,color:"textLite"},r," \u2192 ",l," \u2022")," ","Duration: ",o)):null};var G=a(8711);const O=(0,G.keyframes)(["from{transform:translate(412px,0);}"]),U=G.default.g.withConfig({displayName:"skeleton__Animated",componentId:"sc-gwkfye-0"})(["animation:",";"],(e=>{let{animate:t}=e;return t?(0,G.css)([""," 1s linear infinite"],O):""})),j=e=>{let{animate:t=!1}=e;return n.createElement("svg",{width:"1140",height:"80",viewBox:"0 0 1140 80",fill:"none",xmlns:"http://www.w3.org/2000/svg"},n.createElement("mask",{id:"mask0_467_73096",style:{maskType:"alpha"},maskUnits:"userSpaceOnUse",x:"0",y:"0",width:"1140",height:"80"},n.createElement("rect",{width:"1140",height:"80",fill:"#C4C4C4"})),n.createElement("g",{mask:"url(#mask0_467_73096)"},n.createElement(U,{animate:t},n.createElement("path",{fillRule:"evenodd",clipRule:"evenodd",d:"M-374 70L-401 78H-192.5H14H25.5H212L180 70L157 55L144 70L105 11L69.5 70H41L17.8524 76.8586L-8 73L-39 38L-62 71L-85.5 61.5L-100.5 73L-114.5 61.5L-146 41L-168 70L-192.5 78L-235 70L-258 55L-271 70L-310 11L-327.75 40.5L-345.5 70H-374Z",fill:"#8F9EAA"}),n.createElement("path",{d:"M-401 78L-401.142 77.5206L-401 78.5V78ZM-374 70V69.5H-374.073L-374.142 69.5206L-374 70ZM212 78V78.5L212.121 77.5149L212 78ZM180 70L179.727 70.4188L179.797 70.4647L179.879 70.4851L180 70ZM157 55L157.273 54.5812L156.908 54.3429L156.622 54.6725L157 55ZM144 70L143.583 70.2757L143.946 70.8254L144.378 70.3275L144 70ZM105 11L105.417 10.7243L104.98 10.0632L104.572 10.7422L105 11ZM69.5 70V70.5H69.7827L69.9284 70.2578L69.5 70ZM41 70V69.5H40.9275L40.8579 69.5206L41 70ZM17.8524 76.8586L17.7785 77.3531L17.8882 77.3694L17.9944 77.338L17.8524 76.8586ZM-8 73L-8.3743 73.3315L-8.25369 73.4677L-8.07382 73.4945L-8 73ZM-39 38L-38.6257 37.6685L-39.0469 37.1929L-39.4102 37.7141L-39 38ZM-62 71L-62.1874 71.4636L-61.8178 71.613L-61.5898 71.2859L-62 71ZM-85.5 61.5L-85.3126 61.0365L-85.5775 60.9294L-85.8042 61.1032L-85.5 61.5ZM-100.5 73L-100.817 73.3864L-100.511 73.6383L-100.196 73.3968L-100.5 73ZM-114.5 61.5L-114.183 61.1137L-114.204 61.0961L-114.227 61.0809L-114.5 61.5ZM-146 41L-145.727 40.5809L-146.117 40.3272L-146.398 40.6978L-146 41ZM-168 70L-167.845 70.4753L-167.696 70.4268L-167.602 70.3022L-168 70ZM-235 70L-235.273 70.4188L-235.19 70.473L-235.092 70.4914L-235 70ZM-258 55L-257.727 54.5812L-258.092 54.3429L-258.378 54.6725L-258 55ZM-271 70L-271.417 70.2757L-271.054 70.8254L-270.622 70.3275L-271 70ZM-310 11L-309.583 10.7243L-310.02 10.0632L-310.428 10.7422L-310 11ZM-345.5 70V70.5H-345.217L-345.072 70.2578L-345.5 70ZM-400.858 78.4794L-373.858 70.4794L-374.142 69.5206L-401.142 77.5206L-400.858 78.4794ZM-192.5 77.5H-401V78.5H-192.5V77.5ZM14 77.5H-192.5V78.5H14V77.5ZM14 78.5H25.5V77.5H14V78.5ZM25.5 78.5H212V77.5H25.5V78.5ZM212.121 77.5149L180.121 69.5149L179.879 70.4851L211.879 78.4851L212.121 77.5149ZM180.273 69.5812L157.273 54.5812L156.727 55.4188L179.727 70.4188L180.273 69.5812ZM156.622 54.6725L143.622 69.6725L144.378 70.3275L157.378 55.3275L156.622 54.6725ZM144.417 69.7243L105.417 10.7243L104.583 11.2757L143.583 70.2757L144.417 69.7243ZM104.572 10.7422L69.0716 69.7422L69.9284 70.2578L105.428 11.2578L104.572 10.7422ZM69.5 69.5H41V70.5H69.5V69.5ZM40.8579 69.5206L17.7103 76.3792L17.9944 77.338L41.1421 70.4794L40.8579 69.5206ZM-8.07382 73.4945L17.7785 77.3531L17.9262 76.364L-7.92618 72.5055L-8.07382 73.4945ZM-39.3743 38.3315L-8.3743 73.3315L-7.6257 72.6685L-38.6257 37.6685L-39.3743 38.3315ZM-61.5898 71.2859L-38.5898 38.2859L-39.4102 37.7141L-62.4102 70.7141L-61.5898 71.2859ZM-85.6874 61.9636L-62.1874 71.4636L-61.8126 70.5364L-85.3126 61.0365L-85.6874 61.9636ZM-100.196 73.3968L-85.1958 61.8968L-85.8042 61.1032L-100.804 72.6032L-100.196 73.3968ZM-114.817 61.8864L-100.817 73.3864L-100.183 72.6136L-114.183 61.1137L-114.817 61.8864ZM-146.273 41.4191L-114.773 61.9191L-114.227 61.0809L-145.727 40.5809L-146.273 41.4191ZM-167.602 70.3022L-145.602 41.3022L-146.398 40.6978L-168.398 69.6978L-167.602 70.3022ZM-192.345 78.4753L-167.845 70.4753L-168.155 69.5247L-192.655 77.5247L-192.345 78.4753ZM-235.092 70.4914L-192.592 78.4914L-192.408 77.5086L-234.908 69.5086L-235.092 70.4914ZM-258.273 55.4188L-235.273 70.4188L-234.727 69.5812L-257.727 54.5812L-258.273 55.4188ZM-270.622 70.3275L-257.622 55.3275L-258.378 54.6725L-271.378 69.6725L-270.622 70.3275ZM-310.417 11.2757L-271.417 70.2757L-270.583 69.7243L-309.583 10.7243L-310.417 11.2757ZM-327.322 40.7578L-309.572 11.2578L-310.428 10.7422L-328.178 40.2422L-327.322 40.7578ZM-345.072 70.2578L-327.322 40.7578L-328.178 40.2422L-345.928 69.7422L-345.072 70.2578ZM-374 70.5H-345.5V69.5H-374V70.5Z",fill:"#CFD5DA"})),n.createElement("mask",{id:"mask1_467_73096",style:{maskType:"alpha"},maskUnits:"userSpaceOnUse",x:"52",y:"0",width:"1140",height:"80"},n.createElement("rect",{x:"52",width:"1140",height:"80",fill:"#C4C4C4"})),n.createElement("g",{mask:"url(#mask1_467_73096)"},n.createElement("rect",{x:"55.5",y:"-1.5",width:"1140",height:"80",fill:"#CFD5DA",fillOpacity:"0.25",stroke:"#8F9EAA",strokeDasharray:"2 5"}),n.createElement("rect",{x:"55",width:"86",height:"3",fill:"#CFD5DA"}))))};a(9391);var K=new(m())({id:"error-state",use:"error-state-usage",viewBox:"0 0 199 79",content:''});u().add(K);const Y=K;var P=a(50876),q=a(63314);const J=()=>n.createElement("svg",{viewBox:Y.viewBox,width:"197"},n.createElement("use",{xlinkHref:"#".concat(Y.id)})),W=(0,G.default)(d.Button).withConfig({displayName:"errorView__RetryButton",componentId:"sc-1n9qb8k-0"})(["white-space:normal !important;align-items:flex-start !important;width:230px !important;font-size:12px !important;font-weight:normal !important;& > *{align-items:start !important;margin:0 !important;padding:0 !important;}"]),X=e=>{let{onRetry:t}=e;const{sendLog:a,isReady:o}=(0,P.A)(),r=(0,n.useCallback)((()=>{a({feature:"AnomalyAdvisor",description:"Retry"}).finally((()=>{"function"==typeof t&&t()}))}),[o]);return n.createElement(q.DL,{feature:"AnomalyAdvisor"},n.createElement(d.Flex,{column:!0,alignItems:"center",width:"100%"},n.createElement(J,null),n.createElement(d.TextSmall,{margin:[1,0,0],color:"textDescription"},"Something went wrong."),n.createElement(W,{margin:[2,0,0],padding:[0],onClick:r,icon:"reload",flavour:"borderless",label:"Retry fetching anomalies for the same timeframe",height:"initial",width:"initial"})))},$=e=>{let{error:t,loading:a,hasData:o}=e;return t?n.createElement(d.TextSmall,null,"No data"===t?"No data for this period. Try highlighting an other area.":t.errorMessage||"Something went wrong"):a?n.createElement(d.TextSmall,null,"Searching for anomalies..."):o?n.createElement(d.TextSmall,{color:"textDescription"},"Anomalous metrics in the selected timeframe sorted from most anomalous to least."):n.createElement(d.TextSmall,null,"You haven't highlighted any timeframe yet.")},Q=(0,n.forwardRef)(((e,t)=>{let{error:a,loading:o,hasData:r,onRetry:l,totalDimensionsCount:i,...s}=e;return n.createElement(d.Flex,(0,w.A)({ref:t,column:!0,gap:2,padding:[4],background:"sideBar"},s),n.createElement(d.Flex,{gap:1,alignItems:"baseline"},n.createElement(d.H4,null,i?"".concat(i," "):"","Anomalous metrics"),n.createElement(R,null)),n.createElement($,{hasData:r,loading:o,error:a}),a?n.createElement(X,{onRetry:l}):n.createElement(j,{animate:o}))})),ee=(0,n.forwardRef)(((e,t)=>{const a=(0,o.w7)({extraKey:"anomalies",merge:!1,scoped:!0}),{loaded:r,loading:l,error:i,totalDimensionsCount:s}=(0,N.G9)({nodeIds:a,flavour:"anomaly"}),{sendLog:d,isReady:c}=(0,P.A)(),m=(0,n.useRef)();return(0,n.useEffect)((()=>{null!==m&&void 0!==m&&m.current||!c||!r||(d({feature:"AnomalyAdvisor",...!i&&r?{isSuccess:!0}:{},...i?{isFailue:!0}:{},...r?{totalDimensionsCount:s}:{}}),m.current=!0)}),[l,r,i,d,c]),n.createElement(Q,(0,w.A)({ref:t,loading:l,error:i,hasData:r,totalDimensionsCount:s},e))})),te={"anomaly_detection.anomaly_rate":{name:"Anomaly Rate",info:"This is the percentage of metrics that are anomalous."},"anomaly_detection.dimensions":{name:"Count of Anomalous Metrics",info:"Variance in the amount of anomalous metrics over time could indicate unexpected behavior that merits investigation."},"anomaly_detection.detector_events":{name:"Anomaly Events Detected ",info:"An anomaly event is a period of time when a node has persistently elevated anomaly rates across all metrics. This may indicate unexpected behavior that merits investigation."}},ae={"Anomaly advisor":{showAR:!1,name:"Anomaly advisor",info:n.createElement(d.Flex,{column:!0,gap:2},n.createElement(d.TextSmall,{color:"sectionDescription"},"Machine Learning powered automated anomaly detection running at the edge."),n.createElement(d.TextSmall,{color:"sectionDescription"},"Use this page as a starting point to explore potential anomalies. Learn more on"," ",n.createElement(g.A,{Component:d.TextSmall,href:"https://learn.netdata.cloud/docs/cloud/insights/anomaly-advisor",target:"_blank",rel:"noopener noreferrer"},"how to")," ","use Anomaly Advisor."),n.createElement(d.Flex,{gap:1,alignItems:"center"},n.createElement(d.Icon,{name:"highlightArea",color:"sectionDescription",size:"small"}),n.createElement(d.TextSmall,{color:"sectionDescription"},"Highlight a time-frame of interest to explore potential anomalies.")))},"Anomalous metrics":{info:n.createElement(ee,null)}},ne=Object.keys(te),oe=function(e,t){let{os:a,extraKey:n}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},o={};const{menuGroups:r,subMenus:l,menuGroupChartIds:i}=((e,t)=>{const a={},n={},o={};return ne.forEach(((e,a)=>{t[e]={chartId:e,id:"Anomaly advisor",subMenuId:e,priority:-(a+1)}})),e.forEach(((e,a)=>{t[e]={chartId:e,id:"Anomalous metrics",subMenuId:e,priority:a+1}})),[...ne,...e].forEach((e=>{const r=t[e],l="".concat(r.id,"|").concat(r.subMenuId);n[l]||(n[l]=[]),a[r.id]||(a[r.id]=new Set),o[r.id]||(o[r.id]=[]),r.chartId&&(n[l].push(r.chartId),o[r.id].push(r.chartId)),l&&a[r.id].add(l)})),{chartMenus:t,menuGroups:a,subMenus:n,menuGroupChartIds:o}})(e,o),s={},d=Object.keys(r).reduce(((e,t)=>{const a=i[t],l=[...r[t]],s=o[a[0]];return e[t]={...s,...ae[t],level:0,id:t,subMenuIds:l,subMenuChartIds:a,link:"".concat((0,z.A)("menu_".concat(t))),size:24,forceVisibility:!0,arFlavour:"anomaly",extraKey:n},e}),{}),c=Object.keys(l).reduce(((e,t)=>{const a=l[t];if(!a.length)return e;const r=o[a[0]],{id:i}=d[r.id],[s,c]=r.chartId.split("::"),m=te[r.chartId];return e[t]={...r,name:c?"".concat(s," > ").concat(c):s,...m,level:1,id:t,menuGroupId:i,chartIds:a,link:"".concat((0,z.A)("menu_".concat(r.id,"_submenu_").concat(r.subMenuId))),size:24,forceVisibility:!0,arFlavour:"anomaly",showAR:!m,extraKey:n},e}),{}),m=Object.keys(r),h={},u=m.reduce(((e,t)=>{const n=d[t];return h[n.id]=!0,[...e,{...n,sticky:!0},...d[t].subMenuIds.reduce(((e,n)=>[...e,{...c[n],sticky:!0},...c[n].chartIds.map((e=>{const r=o[e];if(r)return s[e]=(0,B.Ay)({...r,id:e,context:e},{menuId:t,subMenuId:n,sectionInfo:c[n].info},{os:a}),{...s[e],level:2,size:365,menuKey:d[t].id,forceVisibility:!0}}))]),[])]}),[]);return o=null,{stickyIds:h,allElements:u,menuGroupIds:m,menuGroupById:d,subMenuById:c,menuItemAttributesById:s}};var re=a(79566),le=a(72253),ie=a(56489),se=a(67602),de=a(44741),ce=a(10952),me=a(12412);const he=e=>{let{id:t,...a}=e;return n.createElement(de.k,(0,w.A)({id:t},a),n.createElement(ce._,{id:t}),n.createElement(me.G,{id:t}))},ue=(0,n.memo)(he);var pe=a(31438),Le=a(80925),fe=a(36196),ge=(a(3064),a(72577),a(28973)),ye=a(13752),ve=a(92815),Ee=a(72582),Me=a(540),be=new(m())({id:"dashboard_add_chart",use:"dashboard_add_chart-usage",viewBox:"0 0 24 24",content:''});u().add(be);const Ae=be;var we=a(6504),xe=a(22332),Ce=a(46741);const ke=e=>{const{handleOpenModal:t}=(0,Me.A)("addToDashboardModal"),a=(0,xe.useChart)(),o=()=>{const e=a.getAttribute("id");t("",{chartId:e})},r=(0,Ce.JT)("dashboard:Update");return(0,n.useLayoutEffect)((()=>a.onKeyChange(["Alt","Shift","KeyD"],r?o:()=>{})),[r]),n.createElement(we.Button,(0,w.A)({icon:n.createElement(we.default,{svg:Ae,size:"16px"}),onClick:r?o:()=>{},title:r?"Add to dashboard":"You need to be logged in to create your custom dashboards and use this chart in them","data-testid":"chartHeaderToolbox-addDashboard",disabled:!r},e))},Ie=(0,n.memo)(ke);var Ze=a(83465);const De=(e,t)=>e&&e.getRoot().getChildren().find((e=>e.match({id:"anomalies-".concat(t)}))),_e=e=>t=>"overview-page::".concat(e.getAttribute("id"),"::").concat(t),He=e=>{let{id:t,...a}=e;const o=(0,k.ID)(),{height:r=0,...l}=(0,pe.aA)(t),i=(0,Le.e)(),{sendLog:s,isReady:d}=(0,P.A)(),c=(0,n.useMemo)((()=>{const e=De(i,o),a=Object.keys(te);let n=e.getNode({id:t});if(n)return n;const[s,d]=t.split("::");return n=i.makeChart({attributes:{contextScope:[s],height:(0,Ze.Oq)(e,t,r+260),id:t,roomId:o,selectedDimensions:d?[d]:[],groupBy:a.includes(t)?["node"]:["dimension"],...l},makeTrack:_e}),e.appendChild(n),n}),[i,o,t]);return(0,n.useEffect)((()=>{c&&d&&"function"===typeof s&&c.updateAttribute("logOptions",{sendLog:s,payload:{feature:"AnomalyAdvisor"}})}),[c,s,d]),n.createElement(fe.A,(0,w.A)({margin:[0,0,2],chart:c,"data-chartid":t,"data-track":c.track("container"),height:"300px",width:"100%"},a))},Te=n.memo(He,((e,t)=>e.id===t.id)),ze=e=>{let{id:t,subMenuId:a,...o}=e;return n.createElement(Te,(0,w.A)({id:t,role:"graphics-object","aria-roledescription":"chart","data-submenuid":a},o))},Be=(0,n.memo)(ze);var Ne=a(74258),Se=a(37031),Fe=a(6551);const Ve=e=>{let{id:t}=e;return n.createElement(Ne.t,{gap:0,id:t},n.createElement(Se.t,{id:t,margin:[1,0,0]}),n.createElement(Fe.X,{id:t}))},Re=(0,n.memo)(Ve),Ge=e=>{switch(e.level){case 0:return ue;case 1:return Re;case 2:return Be;default:return null}},Oe=e=>{let{onChartNameChange:t,initialChartName:a,dashboardOptions:o,linkToGo:r,contextToGo:l,...i}=e;const[s,,,d]=(0,se.A)("addToDashboardModal"),{setActiveMenuGroupId:c,setActiveSubMenuId:m}=(0,re.A)({onChartNameChange:t,initialChartName:a,linkToGo:r,contextToGo:l});return n.createElement(D.H,i,n.createElement(le.A,{onActiveMenuGroupId:c,onActiveSubMenuId:m,getComponent:Ge,dashboardOptions:o,initialChartName:a,checkVisibility:()=>!0}),s&&n.createElement(ie.A,{onClose:d}))},Ue=["alerts","info","config"],je=e=>{let{nodeIds:t}=e;const a=(0,Z.vt)(),o=(0,k.ID)(),[r,l,i,,s,d]=(0,_.Ay)(a,o,t),c=!l.length||!!s,m=(0,I.yO)(o,i),[h,{width:u}]=(0,x.A)(),[p,{height:L}]=(0,x.A)(),f=(0,n.useMemo)((()=>Math.ceil((L+32)/4)),[L]);((e,t,a)=>{let{width:o,host:r}=a;if(!r)throw"No host provided";const l=(0,Le.e)();(0,n.useMemo)((()=>{if(!l||!e)return;let a=De(l,e);return a?void 0:(a=l.makeContainer({attributes:{id:"anomalies-".concat(e),roomId:e,host:r,overlays:{proceeded:{type:"proceeded"}},nodesScope:t,containerWidth:o,toolboxElements:[Ee.default,ye.default,ve.default,Ie]}}),l.appendChild(a),(0,ge.unregister)((0,Ze.ml)(l,a)))}),[l,r,e,t]),(0,n.useEffect)((()=>{if(!l||!e)return;let a=De(l,e);a&&a.getNodes().forEach((e=>{e.updateAttribute("nodesScope",t),e.getAttribute("active")&&"chart"===e.type&&e.trigger("fetch")}))}),[l,e,t]),(0,n.useEffect)((()=>()=>{if(!l||!e)return;let t=De(l,e);t&&t.destroy()}),[e])})(o,t,{host:(0,Z.dg)()?"".concat(window.envSettings.agentApiUrl,"/api/v2"):"".concat(window.envSettings.apiUrl,"/api/v3/spaces/").concat(a,"/rooms/").concat(o),width:u});const[g,y]=(0,H.N9)("chartName",{key:o,extraKey:"anomalies",flavour:"val"}),[v,E]=(0,n.useState)(!1);return(0,n.useEffect)((()=>{E(!!l.length)}),[l.length]),n.createElement(D.A,{getObject:m,ids:r,getMenu:oe,extraKey:"anomalies"},n.createElement(C.A,{ref:h,position:"relative",sidebar:n.createElement(T.Ay,{nodeIds:t,title:"Anomalies",initialChartName:g,hiddenTabs:Ue,flavour:"anomalies",loaded:!0,hasSearch:!1})},n.createElement(Oe,(0,w.A)({initialChartName:g,onChartNameChange:y,linkToGo:v&&(0,z.A)("menu_Anomalous metrics")},c&&{padding:[0,0,f,4]})),c&&n.createElement(ee,{ref:p,position:"absolute",bottom:0,right:0,left:0,zIndex:35,onRetry:d})))},Ke=()=>{const e=(0,o.w7)({extraKey:"anomalies",merge:!1,scoped:!0}),t=(0,s.CK)(),a=(0,s.nj)(),{predicting:d}=(e=>{const t=(0,r.BU)(),a=(0,r.Gt)(t);return(0,n.useMemo)((()=>a.reduce(((t,a)=>{const{capabilities:{ml:n}}=a;return null!==n&&void 0!==n&&n.enabled?t.predicting=[...t.predicting,l(a,e)]:t.incapable=[...t.incapable,l(a,e)],t}),{predicting:[],incapable:[]})),[t.length,e])})("id");if(a&&!t.length)return n.createElement(q.Ay,{feature:"Anomalies",mode:"NoNodesView"},n.createElement(i.A,null));const c=d.length>0;return a&&!c?n.createElement(q.Ay,{feature:"Anomalies",mode:"NoConfiguredNodesView"},n.createElement(A,null)):n.createElement(q.Ay,{feature:"Anomalies"},n.createElement(je,{nodeIds:e}))}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/3350.ae7151980981854dc3d1.chunk.js b/src/web/gui/v2/3350.ae7151980981854dc3d1.chunk.js deleted file mode 100644 index 499e4b03c..000000000 --- a/src/web/gui/v2/3350.ae7151980981854dc3d1.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="cba6d6ae-245a-4321-928f-6028dbaba19b",e._sentryDebugIdIdentifier="sentry-dbid-cba6d6ae-245a-4321-928f-6028dbaba19b")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[3350],{92155:(e,t,a)=>{a.d(t,{A:()=>d});var n=a(58168),l=a(96540),r=a(50876);const o=e=>(0,l.forwardRef)(((t,a)=>{let{callback:o,feature:c,isStart:d,isSuccess:u,isFailure:i,eventReason:s,payload:f={},...b}=t;const{sendLog:g,isReady:m}=(0,r.A)(),y=(0,l.useCallback)((()=>{const e=b[o],t={feature:c,isStart:d,isSuccess:u,isFailure:i,eventReason:s,...f,...b["data-ga"]?{dataGa:b["data-ga"]}:{},...b.dataGa?{dataGa:b.dataGa}:{},...b["data-track"]?{dataTrack:b["data-track"]}:{},...b.label?{label:b.label}:{}};"function"==typeof e&&e(),g(t,!0)}),[o,g,m,f,b]),p=(0,l.useMemo)((()=>({...b,[o]:y})),[b,o,y]);return l.createElement(e,(0,n.A)({ref:a},p))}));var c=a(67276);const d=e=>(0,l.forwardRef)(((t,a)=>{let{payload:r={},...d}=t;const u=o(e);return l.createElement(u,(0,n.A)({},d,{ref:a,callback:"onClick",payload:{...r,action:c.o1.buttonClicked}}))}))},73350:(e,t,a)=>{a.r(t),a.d(t,{Trust:()=>f,default:()=>b});a(9391),a(25440),a(3064),a(98992),a(72577),a(62953);var n=a(96540),l=a(86663),r=a(83199),o=a(11604),c=a(28738),d=a(92155),u=a(50876),i=a(63314);const s=(0,d.A)(r.Button),f=()=>{const[e,t]=(0,n.useMemo)((()=>{const{search:e}=window.location,{redirect_uri:t,agent_uri:a}=l.parse(e);return[t,a]})),[a,d]=(0,n.useState)(),[f,b,g]=(0,o.LC)(),[m,y]=(0,o.Yr)({},[f]),{sendLog:p,isReady:w}=(0,u.A)();return(0,n.useEffect)((()=>{let a=!0,n=null;const l=m.find((e=>e.url===t));return l?p({feature:"TrustUrl",description:"Redirect due to trusted url"}).finally((()=>{location.replace("/api/v1/auth/account/origins/".concat(l.id,"/redirect?redirect_uri=").concat(encodeURIComponent(e),"&agent_uri=").concat(encodeURIComponent(t)))})):n=setTimeout((()=>{a&&d(!0)}),1e3),()=>{a=!1,n&&(clearTimeout(n),n=null)}}),[e,t,m,w]),a?n.createElement(i.Ay,{feature:"TrustUrl"},n.createElement(r.Layer,{full:!0},n.createElement(r.Flex,{"data-testid":"trust",alignItems:"center",justifyContent:"center",column:!0,width:"100%",height:"100%",background:"mainBackground",gap:4},n.createElement(c.m,{animate:g}),n.createElement(r.Text,{textAlign:"center"},"Do you trust ",n.createElement(r.Text,{strong:!0},t),"?"),n.createElement(r.Flex,{column:!0,gap:2,alignItems:"center"},n.createElement(r.TextBig,null,"Authorizing this URL will allow it to request your Netdata data."),n.createElement(r.Flex,{gap:2},n.createElement(s,{flavour:"borderless",onClick:()=>window.location.replace(decodeURIComponent(e)),disabled:y||g,"data-ga":"url-authorization::click-cancel-button",feature:"TrustUrl",payload:{label:"Cancel"}},"Cancel"),n.createElement(s,{onClick:()=>b(t),disabled:y||g,isLoading:y||g,"data-ga":"url-authorization::click-yes-button",feature:"TrustUrl",payload:{label:"Yes"}},"Yes")))))):n.createElement(c.A,null)},b=f}}]); \ No newline at end of file diff --git a/src/web/gui/v2/3455.f9ca876de57244386773.chunk.js b/src/web/gui/v2/3455.f9ca876de57244386773.chunk.js deleted file mode 100644 index 63ff93101..000000000 --- a/src/web/gui/v2/3455.f9ca876de57244386773.chunk.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! For license information please see 3455.f9ca876de57244386773.chunk.js.LICENSE.txt */ -!function(){try{var t="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},e=(new Error).stack;e&&(t._sentryDebugIds=t._sentryDebugIds||{},t._sentryDebugIds[e]="d0f4bcf8-a615-49cc-81ac-0d7754d8311f",t._sentryDebugIdIdentifier="sentry-dbid-d0f4bcf8-a615-49cc-81ac-0d7754d8311f")}catch(t){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[3455],{18595:(t,e)=>{"use strict";var i="windows",s=[],n="f17a",o="M0 93.7l183.6-25.3v177.4H0V93.7zm0 324.6l183.6 25.3V268.4H0v149.9zm203.8 28L448 480V268.4H203.8v177.9zm0-380.6v180.1H448V32L203.8 65.7z";e.mw={prefix:"fab",iconName:i,icon:[448,512,s,n,o]},e.tQ=e.mw},62672:(t,e)=>{"use strict";var i=[128683,"cancel"],s="f05e",n="M367.2 412.5L99.5 144.8C77.1 176.1 64 214.5 64 256c0 106 86 192 192 192c41.5 0 79.9-13.1 111.2-35.5zm45.3-45.3C434.9 335.9 448 297.5 448 256c0-106-86-192-192-192c-41.5 0-79.9 13.1-111.2 35.5L412.5 367.2zM0 256a256 256 0 1 1 512 0A256 256 0 1 1 0 256z";e.mw={prefix:"fas",iconName:"ban",icon:[512,512,i,s,n]},e.Df=e.mw},485:(t,e)=>{"use strict";var i="battery-half",s=["battery-3"],n="f242",o="M464 160c8.8 0 16 7.2 16 16V336c0 8.8-7.2 16-16 16H80c-8.8 0-16-7.2-16-16V176c0-8.8 7.2-16 16-16H464zM80 96C35.8 96 0 131.8 0 176V336c0 44.2 35.8 80 80 80H464c44.2 0 80-35.8 80-80V320c17.7 0 32-14.3 32-32V224c0-17.7-14.3-32-32-32V176c0-44.2-35.8-80-80-80H80zm208 96H96V320H288V192z";e.mw={prefix:"fas",iconName:i,icon:[576,512,s,n,o]},e.W6=e.mw},78536:(t,e)=>{"use strict";var i="bell",s=[128276,61602],n="f0f3",o="M224 0c-17.7 0-32 14.3-32 32V51.2C119 66 64 130.6 64 208v18.8c0 47-17.3 92.4-48.5 127.6l-7.4 8.3c-8.4 9.4-10.4 22.9-5.3 34.4S19.4 416 32 416H416c12.6 0 24-7.4 29.2-18.9s3.1-25-5.3-34.4l-7.4-8.3C401.3 319.2 384 273.9 384 226.8V208c0-77.4-55-142-128-156.8V32c0-17.7-14.3-32-32-32zm45.3 493.3c12-12 18.7-28.3 18.7-45.3H224 160c0 17 6.7 33.3 18.7 45.3s28.3 18.7 45.3 18.7s33.3-6.7 45.3-18.7z";e.mw={prefix:"fas",iconName:i,icon:[448,512,s,n,o]},e.z$=e.mw},84994:(t,e)=>{"use strict";var i="bolt",s=[9889,"zap"],n="f0e7",o="M349.4 44.6c5.9-13.7 1.5-29.7-10.6-38.5s-28.6-8-39.9 1.8l-256 224c-10 8.8-13.6 22.9-8.9 35.3S50.7 288 64 288H175.5L98.6 467.4c-5.9 13.7-1.5 29.7 10.6 38.5s28.6 8 39.9-1.8l256-224c10-8.8 13.6-22.9 8.9-35.3s-16.6-20.7-30-20.7H272.5L349.4 44.6z";e.mw={prefix:"fas",iconName:i,icon:[448,512,s,n,o]},e.zm=e.mw},7521:(t,e)=>{"use strict";var i="bookmark",s=[128278,61591],n="f02e",o="M0 48V487.7C0 501.1 10.9 512 24.3 512c5 0 9.9-1.5 14-4.4L192 400 345.7 507.6c4.1 2.9 9 4.4 14 4.4c13.4 0 24.3-10.9 24.3-24.3V48c0-26.5-21.5-48-48-48H48C21.5 0 0 21.5 0 48z";e.mw={prefix:"fas",iconName:i,icon:[384,512,s,n,o]},e.G0=e.mw},13765:(t,e)=>{"use strict";var i="brain",s=[129504],n="f5dc",o="M184 0c30.9 0 56 25.1 56 56V456c0 30.9-25.1 56-56 56c-28.9 0-52.7-21.9-55.7-50.1c-5.2 1.4-10.7 2.1-16.3 2.1c-35.3 0-64-28.7-64-64c0-7.4 1.3-14.6 3.6-21.2C21.4 367.4 0 338.2 0 304c0-31.9 18.7-59.5 45.8-72.3C37.1 220.8 32 207 32 192c0-30.7 21.6-56.3 50.4-62.6C80.8 123.9 80 118 80 112c0-29.9 20.6-55.1 48.3-62.1C131.3 21.9 155.1 0 184 0zM328 0c28.9 0 52.6 21.9 55.7 49.9c27.8 7 48.3 32.1 48.3 62.1c0 6-.8 11.9-2.4 17.4c28.8 6.2 50.4 31.9 50.4 62.6c0 15-5.1 28.8-13.8 39.7C493.3 244.5 512 272.1 512 304c0 34.2-21.4 63.4-51.6 74.8c2.3 6.6 3.6 13.8 3.6 21.2c0 35.3-28.7 64-64 64c-5.6 0-11.1-.7-16.3-2.1c-3 28.2-26.8 50.1-55.7 50.1c-30.9 0-56-25.1-56-56V56c0-30.9 25.1-56 56-56z";e.mw={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.GQ=e.mw},66600:(t,e)=>{"use strict";var i="chart-bar",s=["bar-chart"],n="f080",o="M32 32c17.7 0 32 14.3 32 32V400c0 8.8 7.2 16 16 16H480c17.7 0 32 14.3 32 32s-14.3 32-32 32H80c-44.2 0-80-35.8-80-80V64C0 46.3 14.3 32 32 32zm96 96c0-17.7 14.3-32 32-32l192 0c17.7 0 32 14.3 32 32s-14.3 32-32 32l-192 0c-17.7 0-32-14.3-32-32zm32 64H288c17.7 0 32 14.3 32 32s-14.3 32-32 32H160c-17.7 0-32-14.3-32-32s14.3-32 32-32zm0 96H416c17.7 0 32 14.3 32 32s-14.3 32-32 32H160c-17.7 0-32-14.3-32-32s14.3-32 32-32z";e.mw={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.$F=e.mw},54399:(t,e)=>{"use strict";var i="chart-line",s=["line-chart"],n="f201",o="M64 64c0-17.7-14.3-32-32-32S0 46.3 0 64V400c0 44.2 35.8 80 80 80H480c17.7 0 32-14.3 32-32s-14.3-32-32-32H80c-8.8 0-16-7.2-16-16V64zm406.6 86.6c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0L320 210.7l-57.4-57.4c-12.5-12.5-32.8-12.5-45.3 0l-112 112c-12.5 12.5-12.5 32.8 0 45.3s32.8 12.5 45.3 0L240 221.3l57.4 57.4c12.5 12.5 32.8 12.5 45.3 0l128-128z";e.mw={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.gK=e.mw},46943:(t,e)=>{"use strict";var i="circle-notch",s=[],n="f1ce",o="M222.7 32.1c5 16.9-4.6 34.8-21.5 39.8C121.8 95.6 64 169.1 64 256c0 106 86 192 192 192s192-86 192-192c0-86.9-57.8-160.4-137.1-184.1c-16.9-5-26.6-22.9-21.5-39.8s22.9-26.6 39.8-21.5C434.9 42.1 512 140 512 256c0 141.4-114.6 256-256 256S0 397.4 0 256C0 140 77.1 42.1 182.9 10.6c16.9-5 34.8 4.6 39.8 21.5z";e.mw={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.s4=e.mw},1871:(t,e)=>{"use strict";var i="clock",s=[128339,"clock-four"],n="f017",o="M256 0a256 256 0 1 1 0 512A256 256 0 1 1 256 0zM232 120V256c0 8 4 15.5 10.7 20l96 64c11 7.4 25.9 4.4 33.3-6.7s4.4-25.9-6.7-33.3L280 243.2V120c0-13.3-10.7-24-24-24s-24 10.7-24 24z";e.mw={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.a$=e.mw},75904:(t,e)=>{"use strict";var i="cloud",s=[9729],n="f0c2",o="M0 336c0 79.5 64.5 144 144 144H512c70.7 0 128-57.3 128-128c0-61.9-44-113.6-102.4-125.4c4.1-10.7 6.4-22.4 6.4-34.6c0-53-43-96-96-96c-19.7 0-38.1 6-53.3 16.2C367 64.2 315.3 32 256 32C167.6 32 96 103.6 96 192c0 2.7 .1 5.4 .2 8.1C40.2 219.8 0 273.2 0 336z";e.mw={prefix:"fas",iconName:i,icon:[640,512,s,n,o]},e.jB=e.mw},76229:(t,e,i)=>{"use strict";var s=i(8185);e.mw={prefix:s.prefix,iconName:s.iconName,icon:[s.width,s.height,s.aliases,s.unicode,s.svgPathData]},e.Ub=e.mw,s.prefix,s.iconName,s.width,s.height,s.aliases,s.unicode,s.svgPathData,s.aliases},93847:(t,e)=>{"use strict";var i="comments",s=[128490,61670],n="f086",o="M208 352c114.9 0 208-78.8 208-176S322.9 0 208 0S0 78.8 0 176c0 38.6 14.7 74.3 39.6 103.4c-3.5 9.4-8.7 17.7-14.2 24.7c-4.8 6.2-9.7 11-13.3 14.3c-1.8 1.6-3.3 2.9-4.3 3.7c-.5 .4-.9 .7-1.1 .8l-.2 .2 0 0 0 0C1 327.2-1.4 334.4 .8 340.9S9.1 352 16 352c21.8 0 43.8-5.6 62.1-12.5c9.2-3.5 17.8-7.4 25.3-11.4C134.1 343.3 169.8 352 208 352zM448 176c0 112.3-99.1 196.9-216.5 207C255.8 457.4 336.4 512 432 512c38.2 0 73.9-8.7 104.7-23.9c7.5 4 16 7.9 25.2 11.4c18.3 6.9 40.3 12.5 62.1 12.5c6.9 0 13.1-4.5 15.2-11.1c2.1-6.6-.2-13.8-5.8-17.9l0 0 0 0-.2-.2c-.2-.2-.6-.4-1.1-.8c-1-.8-2.5-2-4.3-3.7c-3.6-3.3-8.5-8.1-13.3-14.3c-5.5-7-10.7-15.4-14.2-24.7c24.9-29 39.6-64.7 39.6-103.4c0-92.8-84.9-168.9-192.6-175.5c.4 5.1 .6 10.3 .6 15.5z";e.mw={prefix:"fas",iconName:i,icon:[640,512,s,n,o]},e.q9=e.mw},37048:(t,e)=>{"use strict";var i="database",s=[],n="f1c0",o="M448 80v48c0 44.2-100.3 80-224 80S0 172.2 0 128V80C0 35.8 100.3 0 224 0S448 35.8 448 80zM393.2 214.7c20.8-7.4 39.9-16.9 54.8-28.6V288c0 44.2-100.3 80-224 80S0 332.2 0 288V186.1c14.9 11.8 34 21.2 54.8 28.6C99.7 230.7 159.5 240 224 240s124.3-9.3 169.2-25.3zM0 346.1c14.9 11.8 34 21.2 54.8 28.6C99.7 390.7 159.5 400 224 400s124.3-9.3 169.2-25.3c20.8-7.4 39.9-16.9 54.8-28.6V432c0 44.2-100.3 80-224 80S0 476.2 0 432V346.1z";e.mw={prefix:"fas",iconName:i,icon:[448,512,s,n,o]},e.he=e.mw},64850:(t,e)=>{"use strict";var i="dragon",s=[128009],n="f6d5",o="M352 124.5l-51.9-13c-6.5-1.6-11.3-7.1-12-13.8s2.8-13.1 8.7-16.1l40.8-20.4L294.4 28.8c-5.5-4.1-7.8-11.3-5.6-17.9S297.1 0 304 0H416h32 16c30.2 0 58.7 14.2 76.8 38.4l57.6 76.8c6.2 8.3 9.6 18.4 9.6 28.8c0 26.5-21.5 48-48 48H538.5c-17 0-33.3-6.7-45.3-18.7L480 160H448v21.5c0 24.8 12.8 47.9 33.8 61.1l106.6 66.6c32.1 20.1 51.6 55.2 51.6 93.1C640 462.9 590.9 512 530.2 512H496 432 32.3c-3.3 0-6.6-.4-9.6-1.4C13.5 507.8 6 501 2.4 492.1C1 488.7 .2 485.2 0 481.4c-.2-3.7 .3-7.3 1.3-10.7c2.8-9.2 9.6-16.7 18.6-20.4c3-1.2 6.2-2 9.5-2.2L433.3 412c8.3-.7 14.7-7.7 14.7-16.1c0-4.3-1.7-8.4-4.7-11.4l-44.4-44.4c-30-30-46.9-70.7-46.9-113.1V181.5v-57zM512 72.3c0-.1 0-.2 0-.3s0-.2 0-.3v.6zm-1.3 7.4L464.3 68.1c-.2 1.3-.3 2.6-.3 3.9c0 13.3 10.7 24 24 24c10.6 0 19.5-6.8 22.7-16.3zM130.9 116.5c16.3-14.5 40.4-16.2 58.5-4.1l130.6 87V227c0 32.8 8.4 64.8 24 93H112c-6.7 0-12.7-4.2-15-10.4s-.5-13.3 4.6-17.7L171 232.3 18.4 255.8c-7 1.1-13.9-2.6-16.9-9s-1.5-14.1 3.8-18.8L130.9 116.5z";e.mw={prefix:"fas",iconName:i,icon:[640,512,s,n,o]},e.dX=e.mw},55345:(t,e)=>{"use strict";var i="envelope",s=[128386,9993,61443],n="f0e0",o="M48 64C21.5 64 0 85.5 0 112c0 15.1 7.1 29.3 19.2 38.4L236.8 313.6c11.4 8.5 27 8.5 38.4 0L492.8 150.4c12.1-9.1 19.2-23.3 19.2-38.4c0-26.5-21.5-48-48-48H48zM0 176V384c0 35.3 28.7 64 64 64H448c35.3 0 64-28.7 64-64V176L294.4 339.2c-22.8 17.1-54 17.1-76.8 0L0 176z";e.mw={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.y_=e.mw},49205:(t,e,i)=>{"use strict";var s=i(46534);e.mw={prefix:s.prefix,iconName:s.iconName,icon:[s.width,s.height,s.aliases,s.unicode,s.svgPathData]},e._q=e.mw,s.prefix,s.iconName,s.width,s.height,s.aliases,s.unicode,s.svgPathData,s.aliases},35532:(t,e)=>{"use strict";var i="exclamation",s=[10069,10071,61738],n="M64 64c0-17.7-14.3-32-32-32S0 46.3 0 64V320c0 17.7 14.3 32 32 32s32-14.3 32-32V64zM32 480a40 40 0 1 0 0-80 40 40 0 1 0 0 80z";e.mw={prefix:"fas",iconName:i,icon:[64,512,s,"21",n]},e.bx=e.mw},900:(t,e)=>{"use strict";var i=[128065],s="f06e",n="M288 32c-80.8 0-145.5 36.8-192.6 80.6C48.6 156 17.3 208 2.5 243.7c-3.3 7.9-3.3 16.7 0 24.6C17.3 304 48.6 356 95.4 399.4C142.5 443.2 207.2 480 288 480s145.5-36.8 192.6-80.6c46.8-43.5 78.1-95.4 93-131.1c3.3-7.9 3.3-16.7 0-24.6c-14.9-35.7-46.2-87.7-93-131.1C433.5 68.8 368.8 32 288 32zM144 256a144 144 0 1 1 288 0 144 144 0 1 1 -288 0zm144-64c0 35.3-28.7 64-64 64c-7.1 0-13.9-1.2-20.3-3.3c-5.5-1.8-11.9 1.6-11.7 7.4c.3 6.9 1.3 13.8 3.2 20.7c13.7 51.2 66.4 81.6 117.6 67.9s81.6-66.4 67.9-117.6c-11.1-41.5-47.8-69.4-88.6-71.1c-5.8-.2-9.2 6.1-7.4 11.7c2.1 6.4 3.3 13.2 3.3 20.3z";e.mw={prefix:"fas",iconName:"eye",icon:[576,512,i,s,n]},e.pS=e.mw},93360:(t,e,i)=>{"use strict";var s=i(96488);e.mw={prefix:s.prefix,iconName:s.iconName,icon:[s.width,s.height,s.aliases,s.unicode,s.svgPathData]},e.h8=e.mw,s.prefix,s.iconName,s.width,s.height,s.aliases,s.unicode,s.svgPathData,s.aliases},96488:(t,e)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0});var i="file-lines",s=[128441,128462,61686,"file-alt","file-text"],n="f15c",o="M64 0C28.7 0 0 28.7 0 64V448c0 35.3 28.7 64 64 64H320c35.3 0 64-28.7 64-64V160H256c-17.7 0-32-14.3-32-32V0H64zM256 0V128H384L256 0zM112 256H272c8.8 0 16 7.2 16 16s-7.2 16-16 16H112c-8.8 0-16-7.2-16-16s7.2-16 16-16zm0 64H272c8.8 0 16 7.2 16 16s-7.2 16-16 16H112c-8.8 0-16-7.2-16-16s7.2-16 16-16zm0 64H272c8.8 0 16 7.2 16 16s-7.2 16-16 16H112c-8.8 0-16-7.2-16-16s7.2-16 16-16z";e.definition={prefix:"fas",iconName:i,icon:[384,512,s,n,o]},e.faFileLines=e.definition,e.prefix="fas",e.iconName=i,e.width=384,e.height=512,e.ligatures=s,e.unicode=n,e.svgPathData=o,e.aliases=s},22294:(t,e)=>{"use strict";var i="flask",s=[],n="f0c3",o="M288 0H160 128C110.3 0 96 14.3 96 32s14.3 32 32 32V196.8c0 11.8-3.3 23.5-9.5 33.5L10.3 406.2C3.6 417.2 0 429.7 0 442.6C0 480.9 31.1 512 69.4 512H378.6c38.3 0 69.4-31.1 69.4-69.4c0-12.8-3.6-25.4-10.3-36.4L329.5 230.4c-6.2-10.1-9.5-21.7-9.5-33.5V64c17.7 0 32-14.3 32-32s-14.3-32-32-32H288zM192 196.8V64h64V196.8c0 23.7 6.6 46.9 19 67.1L309.5 320h-171L173 263.9c12.4-20.2 19-43.4 19-67.1z";e.mw={prefix:"fas",iconName:i,icon:[448,512,s,n,o]},e.rI=e.mw},22017:(t,e)=>{"use strict";var i="folder-open",s=[128194,128449,61717],n="f07c",o="M88.7 223.8L0 375.8V96C0 60.7 28.7 32 64 32H181.5c17 0 33.3 6.7 45.3 18.7l26.5 26.5c12 12 28.3 18.7 45.3 18.7H416c35.3 0 64 28.7 64 64v32H144c-22.8 0-43.8 12.1-55.3 31.8zm27.6 16.1C122.1 230 132.6 224 144 224H544c11.5 0 22 6.1 27.7 16.1s5.7 22.2-.1 32.1l-112 192C453.9 474 443.4 480 432 480H32c-11.5 0-22-6.1-27.7-16.1s-5.7-22.2 .1-32.1l112-192z";e.mw={prefix:"fas",iconName:i,icon:[576,512,s,n,o]},e.Uj=e.mw},44822:(t,e)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0});var i="gauge-high",s=[62461,"tachometer-alt","tachometer-alt-fast"],n="f625",o="M0 256a256 256 0 1 1 512 0A256 256 0 1 1 0 256zM288 96a32 32 0 1 0 -64 0 32 32 0 1 0 64 0zM256 416c35.3 0 64-28.7 64-64c0-17.4-6.9-33.1-18.1-44.6L366 161.7c5.3-12.1-.2-26.3-12.3-31.6s-26.3 .2-31.6 12.3L257.9 288c-.6 0-1.3 0-1.9 0c-35.3 0-64 28.7-64 64s28.7 64 64 64zM176 144a32 32 0 1 0 -64 0 32 32 0 1 0 64 0zM96 288a32 32 0 1 0 0-64 32 32 0 1 0 0 64zm352-32a32 32 0 1 0 -64 0 32 32 0 1 0 64 0z";e.definition={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.faGaugeHigh=e.definition,e.prefix="fas",e.iconName=i,e.width=512,e.height=512,e.ligatures=s,e.unicode=n,e.svgPathData=o,e.aliases=s},8185:(t,e)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0});var i="gears",s=["cogs"],n="f085",o="M308.5 135.3c7.1-6.3 9.9-16.2 6.2-25c-2.3-5.3-4.8-10.5-7.6-15.5L304 89.4c-3-5-6.3-9.9-9.8-14.6c-5.7-7.6-15.7-10.1-24.7-7.1l-28.2 9.3c-10.7-8.8-23-16-36.2-20.9L199 27.1c-1.9-9.3-9.1-16.7-18.5-17.8C173.9 8.4 167.2 8 160.4 8h-.7c-6.8 0-13.5 .4-20.1 1.2c-9.4 1.1-16.6 8.6-18.5 17.8L115 56.1c-13.3 5-25.5 12.1-36.2 20.9L50.5 67.8c-9-3-19-.5-24.7 7.1c-3.5 4.7-6.8 9.6-9.9 14.6l-3 5.3c-2.8 5-5.3 10.2-7.6 15.6c-3.7 8.7-.9 18.6 6.2 25l22.2 19.8C32.6 161.9 32 168.9 32 176s.6 14.1 1.7 20.9L11.5 216.7c-7.1 6.3-9.9 16.2-6.2 25c2.3 5.3 4.8 10.5 7.6 15.6l3 5.2c3 5.1 6.3 9.9 9.9 14.6c5.7 7.6 15.7 10.1 24.7 7.1l28.2-9.3c10.7 8.8 23 16 36.2 20.9l6.1 29.1c1.9 9.3 9.1 16.7 18.5 17.8c6.7 .8 13.5 1.2 20.4 1.2s13.7-.4 20.4-1.2c9.4-1.1 16.6-8.6 18.5-17.8l6.1-29.1c13.3-5 25.5-12.1 36.2-20.9l28.2 9.3c9 3 19 .5 24.7-7.1c3.5-4.7 6.8-9.5 9.8-14.6l3.1-5.4c2.8-5 5.3-10.2 7.6-15.5c3.7-8.7 .9-18.6-6.2-25l-22.2-19.8c1.1-6.8 1.7-13.8 1.7-20.9s-.6-14.1-1.7-20.9l22.2-19.8zM112 176a48 48 0 1 1 96 0 48 48 0 1 1 -96 0zM504.7 500.5c6.3 7.1 16.2 9.9 25 6.2c5.3-2.3 10.5-4.8 15.5-7.6l5.4-3.1c5-3 9.9-6.3 14.6-9.8c7.6-5.7 10.1-15.7 7.1-24.7l-9.3-28.2c8.8-10.7 16-23 20.9-36.2l29.1-6.1c9.3-1.9 16.7-9.1 17.8-18.5c.8-6.7 1.2-13.5 1.2-20.4s-.4-13.7-1.2-20.4c-1.1-9.4-8.6-16.6-17.8-18.5L583.9 307c-5-13.3-12.1-25.5-20.9-36.2l9.3-28.2c3-9 .5-19-7.1-24.7c-4.7-3.5-9.6-6.8-14.6-9.9l-5.3-3c-5-2.8-10.2-5.3-15.6-7.6c-8.7-3.7-18.6-.9-25 6.2l-19.8 22.2c-6.8-1.1-13.8-1.7-20.9-1.7s-14.1 .6-20.9 1.7l-19.8-22.2c-6.3-7.1-16.2-9.9-25-6.2c-5.3 2.3-10.5 4.8-15.6 7.6l-5.2 3c-5.1 3-9.9 6.3-14.6 9.9c-7.6 5.7-10.1 15.7-7.1 24.7l9.3 28.2c-8.8 10.7-16 23-20.9 36.2L315.1 313c-9.3 1.9-16.7 9.1-17.8 18.5c-.8 6.7-1.2 13.5-1.2 20.4s.4 13.7 1.2 20.4c1.1 9.4 8.6 16.6 17.8 18.5l29.1 6.1c5 13.3 12.1 25.5 20.9 36.2l-9.3 28.2c-3 9-.5 19 7.1 24.7c4.7 3.5 9.5 6.8 14.6 9.8l5.4 3.1c5 2.8 10.2 5.3 15.5 7.6c8.7 3.7 18.6 .9 25-6.2l19.8-22.2c6.8 1.1 13.8 1.7 20.9 1.7s14.1-.6 20.9-1.7l19.8 22.2zM464 304a48 48 0 1 1 0 96 48 48 0 1 1 0-96z";e.definition={prefix:"fas",iconName:i,icon:[640,512,s,n,o]},e.faGears=e.definition,e.prefix="fas",e.iconName=i,e.width=640,e.height=512,e.ligatures=s,e.unicode=n,e.svgPathData=o,e.aliases=s},27064:(t,e)=>{"use strict";var i="globe",s=[127760],n="f0ac",o="M352 256c0 22.2-1.2 43.6-3.3 64H163.3c-2.2-20.4-3.3-41.8-3.3-64s1.2-43.6 3.3-64H348.7c2.2 20.4 3.3 41.8 3.3 64zm28.8-64H503.9c5.3 20.5 8.1 41.9 8.1 64s-2.8 43.5-8.1 64H380.8c2.1-20.6 3.2-42 3.2-64s-1.1-43.4-3.2-64zm112.6-32H376.7c-10-63.9-29.8-117.4-55.3-151.6c78.3 20.7 142 77.5 171.9 151.6zm-149.1 0H167.7c6.1-36.4 15.5-68.6 27-94.7c10.5-23.6 22.2-40.7 33.5-51.5C239.4 3.2 248.7 0 256 0s16.6 3.2 27.8 13.8c11.3 10.8 23 27.9 33.5 51.5c11.6 26 20.9 58.2 27 94.7zm-209 0H18.6C48.6 85.9 112.2 29.1 190.6 8.4C165.1 42.6 145.3 96.1 135.3 160zM8.1 192H131.2c-2.1 20.6-3.2 42-3.2 64s1.1 43.4 3.2 64H8.1C2.8 299.5 0 278.1 0 256s2.8-43.5 8.1-64zM194.7 446.6c-11.6-26-20.9-58.2-27-94.6H344.3c-6.1 36.4-15.5 68.6-27 94.6c-10.5 23.6-22.2 40.7-33.5 51.5C272.6 508.8 263.3 512 256 512s-16.6-3.2-27.8-13.8c-11.3-10.8-23-27.9-33.5-51.5zM135.3 352c10 63.9 29.8 117.4 55.3 151.6C112.2 482.9 48.6 426.1 18.6 352H135.3zm358.1 0c-30 74.1-93.6 130.9-171.9 151.6c25.5-34.2 45.2-87.7 55.3-151.6H493.4z";e.mw={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.Bw=e.mw},98726:(t,e)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0});var i="hard-drive",s=[128436,"hdd"],n="f0a0",o="M0 96C0 60.7 28.7 32 64 32H448c35.3 0 64 28.7 64 64V280.4c-17-15.2-39.4-24.4-64-24.4H64c-24.6 0-47 9.2-64 24.4V96zM64 288H448c35.3 0 64 28.7 64 64v64c0 35.3-28.7 64-64 64H64c-35.3 0-64-28.7-64-64V352c0-35.3 28.7-64 64-64zM320 416a32 32 0 1 0 0-64 32 32 0 1 0 0 64zm128-32a32 32 0 1 0 -64 0 32 32 0 1 0 64 0z";e.definition={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.faHardDrive=e.definition,e.prefix="fas",e.iconName=i,e.width=512,e.height=512,e.ligatures=s,e.unicode=n,e.svgPathData=o,e.aliases=s},78613:(t,e,i)=>{"use strict";var s=i(98726);e.mw={prefix:s.prefix,iconName:s.iconName,icon:[s.width,s.height,s.aliases,s.unicode,s.svgPathData]},e.MB=e.mw,s.prefix,s.iconName,s.width,s.height,s.aliases,s.unicode,s.svgPathData,s.aliases},41776:(t,e)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0});var i="heart-pulse",s=["heartbeat"],n="f21e",o="M228.3 469.1L47.6 300.4c-4.2-3.9-8.2-8.1-11.9-12.4h87c22.6 0 43-13.6 51.7-34.5l10.5-25.2 49.3 109.5c3.8 8.5 12.1 14 21.4 14.1s17.8-5 22-13.3L320 253.7l1.7 3.4c9.5 19 28.9 31 50.1 31H476.3c-3.7 4.3-7.7 8.5-11.9 12.4L283.7 469.1c-7.5 7-17.4 10.9-27.7 10.9s-20.2-3.9-27.7-10.9zM503.7 240h-132c-3 0-5.8-1.7-7.2-4.4l-23.2-46.3c-4.1-8.1-12.4-13.3-21.5-13.3s-17.4 5.1-21.5 13.3l-41.4 82.8L205.9 158.2c-3.9-8.7-12.7-14.3-22.2-14.1s-18.1 5.9-21.8 14.8l-31.8 76.3c-1.2 3-4.2 4.9-7.4 4.9H16c-2.6 0-5 .4-7.3 1.1C3 225.2 0 208.2 0 190.9v-5.8c0-69.9 50.5-129.5 119.4-141C165 36.5 211.4 51.4 244 84l12 12 12-12c32.6-32.6 79-47.5 124.6-39.9C461.5 55.6 512 115.2 512 185.1v5.8c0 16.9-2.8 33.5-8.3 49.1z";e.definition={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.faHeartPulse=e.definition,e.prefix="fas",e.iconName=i,e.width=512,e.height=512,e.ligatures=s,e.unicode=n,e.svgPathData=o,e.aliases=s},55573:(t,e,i)=>{"use strict";var s=i(41776);e.mw={prefix:s.prefix,iconName:s.iconName,icon:[s.width,s.height,s.aliases,s.unicode,s.svgPathData]},e._g=e.mw,s.prefix,s.iconName,s.width,s.height,s.aliases,s.unicode,s.svgPathData,s.aliases},13808:(t,e)=>{"use strict";var i="laptop-code",s=[],n="f5fc",o="M64 96c0-35.3 28.7-64 64-64H512c35.3 0 64 28.7 64 64V352H512V96H128V352H64V96zM0 403.2C0 392.6 8.6 384 19.2 384H620.8c10.6 0 19.2 8.6 19.2 19.2c0 42.4-34.4 76.8-76.8 76.8H76.8C34.4 480 0 445.6 0 403.2zM281 209l-31 31 31 31c9.4 9.4 9.4 24.6 0 33.9s-24.6 9.4-33.9 0l-48-48c-9.4-9.4-9.4-24.6 0-33.9l48-48c9.4-9.4 24.6-9.4 33.9 0s9.4 24.6 0 33.9zM393 175l48 48c9.4 9.4 9.4 24.6 0 33.9l-48 48c-9.4 9.4-24.6 9.4-33.9 0s-9.4-24.6 0-33.9l31-31-31-31c-9.4-9.4-9.4-24.6 0-33.9s24.6-9.4 33.9 0z";e.mw={prefix:"fas",iconName:i,icon:[640,512,s,n,o]},e.fP=e.mw},12333:(t,e)=>{"use strict";var i="leaf",s=[],n="f06c",o="M272 96c-78.6 0-145.1 51.5-167.7 122.5c33.6-17 71.5-26.5 111.7-26.5h88c8.8 0 16 7.2 16 16s-7.2 16-16 16H288 216s0 0 0 0c-16.6 0-32.7 1.9-48.3 5.4c-25.9 5.9-49.9 16.4-71.4 30.7c0 0 0 0 0 0C38.3 298.8 0 364.9 0 440v16c0 13.3 10.7 24 24 24s24-10.7 24-24V440c0-48.7 20.7-92.5 53.8-123.2C121.6 392.3 190.3 448 272 448l1 0c132.1-.7 239-130.9 239-291.4c0-42.6-7.5-83.1-21.1-119.6c-2.6-6.9-12.7-6.6-16.2-.1C455.9 72.1 418.7 96 376 96L272 96z";e.mw={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.YH=e.mw},90501:(t,e)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0});var i="list-check",s=["tasks"],n="f0ae",o="M152.1 38.2c9.9 8.9 10.7 24 1.8 33.9l-72 80c-4.4 4.9-10.6 7.8-17.2 7.9s-12.9-2.4-17.6-7L7 113C-2.3 103.6-2.3 88.4 7 79s24.6-9.4 33.9 0l22.1 22.1 55.1-61.2c8.9-9.9 24-10.7 33.9-1.8zm0 160c9.9 8.9 10.7 24 1.8 33.9l-72 80c-4.4 4.9-10.6 7.8-17.2 7.9s-12.9-2.4-17.6-7L7 273c-9.4-9.4-9.4-24.6 0-33.9s24.6-9.4 33.9 0l22.1 22.1 55.1-61.2c8.9-9.9 24-10.7 33.9-1.8zM224 96c0-17.7 14.3-32 32-32H480c17.7 0 32 14.3 32 32s-14.3 32-32 32H256c-17.7 0-32-14.3-32-32zm0 160c0-17.7 14.3-32 32-32H480c17.7 0 32 14.3 32 32s-14.3 32-32 32H256c-17.7 0-32-14.3-32-32zM160 416c0-17.7 14.3-32 32-32H480c17.7 0 32 14.3 32 32s-14.3 32-32 32H192c-17.7 0-32-14.3-32-32zM48 368a48 48 0 1 1 0 96 48 48 0 1 1 0-96z";e.definition={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.faListCheck=e.definition,e.prefix="fas",e.iconName=i,e.width=512,e.height=512,e.ligatures=s,e.unicode=n,e.svgPathData=o,e.aliases=s},36697:(t,e)=>{"use strict";var i="mask",s=[],n="f6fa",o="M288 64C64 64 0 160 0 272S80 448 176 448h8.4c24.2 0 46.4-13.7 57.2-35.4l23.2-46.3c4.4-8.8 13.3-14.3 23.2-14.3s18.8 5.5 23.2 14.3l23.2 46.3c10.8 21.7 33 35.4 57.2 35.4H400c96 0 176-64 176-176s-64-208-288-208zM96 256a64 64 0 1 1 128 0A64 64 0 1 1 96 256zm320-64a64 64 0 1 1 0 128 64 64 0 1 1 0-128z";e.mw={prefix:"fas",iconName:i,icon:[576,512,s,n,o]},e.KI=e.mw},82367:(t,e)=>{"use strict";var i="microchip",s=[],n="f2db",o="M176 24c0-13.3-10.7-24-24-24s-24 10.7-24 24V64c-35.3 0-64 28.7-64 64H24c-13.3 0-24 10.7-24 24s10.7 24 24 24H64v56H24c-13.3 0-24 10.7-24 24s10.7 24 24 24H64v56H24c-13.3 0-24 10.7-24 24s10.7 24 24 24H64c0 35.3 28.7 64 64 64v40c0 13.3 10.7 24 24 24s24-10.7 24-24V448h56v40c0 13.3 10.7 24 24 24s24-10.7 24-24V448h56v40c0 13.3 10.7 24 24 24s24-10.7 24-24V448c35.3 0 64-28.7 64-64h40c13.3 0 24-10.7 24-24s-10.7-24-24-24H448V280h40c13.3 0 24-10.7 24-24s-10.7-24-24-24H448V176h40c13.3 0 24-10.7 24-24s-10.7-24-24-24H448c0-35.3-28.7-64-64-64V24c0-13.3-10.7-24-24-24s-24 10.7-24 24V64H280V24c0-13.3-10.7-24-24-24s-24 10.7-24 24V64H176V24zM160 128H352c17.7 0 32 14.3 32 32V352c0 17.7-14.3 32-32 32H160c-17.7 0-32-14.3-32-32V160c0-17.7 14.3-32 32-32zm192 32H160V352H352V160z";e.mw={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.YS=e.mw},86443:(t,e)=>{"use strict";var i="puzzle-piece",s=[129513],n="f12e",o="M192 104.8c0-9.2-5.8-17.3-13.2-22.8C167.2 73.3 160 61.3 160 48c0-26.5 28.7-48 64-48s64 21.5 64 48c0 13.3-7.2 25.3-18.8 34c-7.4 5.5-13.2 13.6-13.2 22.8c0 12.8 10.4 23.2 23.2 23.2H336c26.5 0 48 21.5 48 48v56.8c0 12.8 10.4 23.2 23.2 23.2c9.2 0 17.3-5.8 22.8-13.2c8.7-11.6 20.7-18.8 34-18.8c26.5 0 48 28.7 48 64s-21.5 64-48 64c-13.3 0-25.3-7.2-34-18.8c-5.5-7.4-13.6-13.2-22.8-13.2c-12.8 0-23.2 10.4-23.2 23.2V464c0 26.5-21.5 48-48 48H279.2c-12.8 0-23.2-10.4-23.2-23.2c0-9.2 5.8-17.3 13.2-22.8c11.6-8.7 18.8-20.7 18.8-34c0-26.5-28.7-48-64-48s-64 21.5-64 48c0 13.3 7.2 25.3 18.8 34c7.4 5.5 13.2 13.6 13.2 22.8c0 12.8-10.4 23.2-23.2 23.2H48c-26.5 0-48-21.5-48-48V343.2C0 330.4 10.4 320 23.2 320c9.2 0 17.3 5.8 22.8 13.2C54.7 344.8 66.7 352 80 352c26.5 0 48-28.7 48-64s-21.5-64-48-64c-13.3 0-25.3 7.2-34 18.8C40.5 250.2 32.4 256 23.2 256C10.4 256 0 245.6 0 232.8V176c0-26.5 21.5-48 48-48H168.8c12.8 0 23.2-10.4 23.2-23.2z";e.mw={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e._X=e.mw},80058:(t,e,i)=>{"use strict";var s=i(61260);e.mw={prefix:s.prefix,iconName:s.iconName,icon:[s.width,s.height,s.aliases,s.unicode,s.svgPathData]},e.fK=e.mw,s.prefix,s.iconName,s.width,s.height,s.aliases,s.unicode,s.svgPathData,s.aliases},46534:(t,e)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0});var i="right-left",s=["exchange-alt"],n="f362",o="M32 96l320 0V32c0-12.9 7.8-24.6 19.8-29.6s25.7-2.2 34.9 6.9l96 96c6 6 9.4 14.1 9.4 22.6s-3.4 16.6-9.4 22.6l-96 96c-9.2 9.2-22.9 11.9-34.9 6.9s-19.8-16.6-19.8-29.6V160L32 160c-17.7 0-32-14.3-32-32s14.3-32 32-32zM480 352c17.7 0 32 14.3 32 32s-14.3 32-32 32H160v64c0 12.9-7.8 24.6-19.8 29.6s-25.7 2.2-34.9-6.9l-96-96c-6-6-9.4-14.1-9.4-22.6s3.4-16.6 9.4-22.6l96-96c9.2-9.2 22.9-11.9 34.9-6.9s19.8 16.6 19.8 29.6l0 64H480z";e.definition={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.faRightLeft=e.definition,e.prefix="fas",e.iconName=i,e.width=512,e.height=512,e.ligatures=s,e.unicode=n,e.svgPathData=o,e.aliases=s},35074:(t,e)=>{"use strict";var i="server",s=[],n="f233",o="M64 32C28.7 32 0 60.7 0 96v64c0 35.3 28.7 64 64 64H448c35.3 0 64-28.7 64-64V96c0-35.3-28.7-64-64-64H64zm280 72a24 24 0 1 1 0 48 24 24 0 1 1 0-48zm48 24a24 24 0 1 1 48 0 24 24 0 1 1 -48 0zM64 288c-35.3 0-64 28.7-64 64v64c0 35.3 28.7 64 64 64H448c35.3 0 64-28.7 64-64V352c0-35.3-28.7-64-64-64H64zm280 72a24 24 0 1 1 0 48 24 24 0 1 1 0-48zm56 24a24 24 0 1 1 48 0 24 24 0 1 1 -48 0z";e.mw={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.D6=e.mw},66248:(t,e)=>{"use strict";var i="shield",s=[128737,"shield-blank"],n="f132",o="M256 0c4.6 0 9.2 1 13.4 2.9L457.7 82.8c22 9.3 38.4 31 38.3 57.2c-.5 99.2-41.3 280.7-213.6 363.2c-16.7 8-36.1 8-52.8 0C57.3 420.7 16.5 239.2 16 140c-.1-26.2 16.3-47.9 38.3-57.2L242.7 2.9C246.8 1 251.4 0 256 0z";e.mw={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.V2=e.mw},83697:(t,e,i)=>{"use strict";var s=i(7876);e.mw={prefix:s.prefix,iconName:s.iconName,icon:[s.width,s.height,s.aliases,s.unicode,s.svgPathData]},e.im=e.mw,s.prefix,s.iconName,s.width,s.height,s.aliases,s.unicode,s.svgPathData,s.aliases},7876:(t,e)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0});var i="shield-halved",s=["shield-alt"],n="f3ed",o="M256 0c4.6 0 9.2 1 13.4 2.9L457.7 82.8c22 9.3 38.4 31 38.3 57.2c-.5 99.2-41.3 280.7-213.6 363.2c-16.7 8-36.1 8-52.8 0C57.3 420.7 16.5 239.2 16 140c-.1-26.2 16.3-47.9 38.3-57.2L242.7 2.9C246.8 1 251.4 0 256 0zm0 66.8V444.8C394 378 431.1 230.1 432 141.4L256 66.8l0 0z";e.definition={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.faShieldHalved=e.definition,e.prefix="fas",e.iconName=i,e.width=512,e.height=512,e.ligatures=s,e.unicode=n,e.svgPathData=o,e.aliases=s},61260:(t,e)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0});var i="shuffle",s=[128256,"random"],n="f074",o="M403.8 34.4c12-5 25.7-2.2 34.9 6.9l64 64c6 6 9.4 14.1 9.4 22.6s-3.4 16.6-9.4 22.6l-64 64c-9.2 9.2-22.9 11.9-34.9 6.9s-19.8-16.6-19.8-29.6V160H352c-10.1 0-19.6 4.7-25.6 12.8L284 229.3 244 176l31.2-41.6C293.3 110.2 321.8 96 352 96h32V64c0-12.9 7.8-24.6 19.8-29.6zM164 282.7L204 336l-31.2 41.6C154.7 401.8 126.2 416 96 416H32c-17.7 0-32-14.3-32-32s14.3-32 32-32H96c10.1 0 19.6-4.7 25.6-12.8L164 282.7zm274.6 188c-9.2 9.2-22.9 11.9-34.9 6.9s-19.8-16.6-19.8-29.6V416H352c-30.2 0-58.7-14.2-76.8-38.4L121.6 172.8c-6-8.1-15.5-12.8-25.6-12.8H32c-17.7 0-32-14.3-32-32s14.3-32 32-32H96c30.2 0 58.7 14.2 76.8 38.4L326.4 339.2c6 8.1 15.5 12.8 25.6 12.8h32V320c0-12.9 7.8-24.6 19.8-29.6s25.7-2.2 34.9 6.9l64 64c6 6 9.4 14.1 9.4 22.6s-3.4 16.6-9.4 22.6l-64 64z";e.definition={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.faShuffle=e.definition,e.prefix="fas",e.iconName=i,e.width=512,e.height=512,e.ligatures=s,e.unicode=n,e.svgPathData=o,e.aliases=s},45386:(t,e)=>{"use strict";var i="sitemap",s=[],n="f0e8",o="M208 80c0-26.5 21.5-48 48-48h64c26.5 0 48 21.5 48 48v64c0 26.5-21.5 48-48 48h-8v40H464c30.9 0 56 25.1 56 56v32h8c26.5 0 48 21.5 48 48v64c0 26.5-21.5 48-48 48H464c-26.5 0-48-21.5-48-48V368c0-26.5 21.5-48 48-48h8V288c0-4.4-3.6-8-8-8H312v40h8c26.5 0 48 21.5 48 48v64c0 26.5-21.5 48-48 48H256c-26.5 0-48-21.5-48-48V368c0-26.5 21.5-48 48-48h8V280H112c-4.4 0-8 3.6-8 8v32h8c26.5 0 48 21.5 48 48v64c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V368c0-26.5 21.5-48 48-48h8V288c0-30.9 25.1-56 56-56H264V192h-8c-26.5 0-48-21.5-48-48V80z";e.mw={prefix:"fas",iconName:i,icon:[576,512,s,n,o]},e.FF=e.mw},85713:(t,e)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0});var i="table-cells-large",s=["th-large"],n="f009",o="M448 96V224H288V96H448zm0 192V416H288V288H448zM224 224H64V96H224V224zM64 288H224V416H64V288zM64 32C28.7 32 0 60.7 0 96V416c0 35.3 28.7 64 64 64H448c35.3 0 64-28.7 64-64V96c0-35.3-28.7-64-64-64H64z";e.definition={prefix:"fas",iconName:i,icon:[512,512,s,n,o]},e.faTableCellsLarge=e.definition,e.prefix="fas",e.iconName=i,e.width=512,e.height=512,e.ligatures=s,e.unicode=n,e.svgPathData=o,e.aliases=s},54416:(t,e,i)=>{"use strict";var s=i(44822);e.mw={prefix:s.prefix,iconName:s.iconName,icon:[s.width,s.height,s.aliases,s.unicode,s.svgPathData]},e.xi=e.mw,s.prefix,s.iconName,s.width,s.height,s.aliases,s.unicode,s.svgPathData,s.aliases},1705:(t,e)=>{"use strict";var i=[127991],s="f02b",n="M0 80V229.5c0 17 6.7 33.3 18.7 45.3l176 176c25 25 65.5 25 90.5 0L418.7 317.3c25-25 25-65.5 0-90.5l-176-176c-12-12-28.3-18.7-45.3-18.7H48C21.5 32 0 53.5 0 80zm112 32a32 32 0 1 1 0 64 32 32 0 1 1 0-64z";e.mw={prefix:"fas",iconName:"tag",icon:[448,512,i,s,n]},e._2=e.mw},38055:(t,e,i)=>{"use strict";var s=i(90501);e.mw={prefix:s.prefix,iconName:s.iconName,icon:[s.width,s.height,s.aliases,s.unicode,s.svgPathData]},e.Ag=e.mw,s.prefix,s.iconName,s.width,s.height,s.aliases,s.unicode,s.svgPathData,s.aliases},72950:(t,e)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0});var i="temperature-half",s=[127777,"temperature-2","thermometer-2","thermometer-half"],n="f2c9",o="M160 64c-26.5 0-48 21.5-48 48V276.5c0 17.3-7.1 31.9-15.3 42.5C86.2 332.6 80 349.5 80 368c0 44.2 35.8 80 80 80s80-35.8 80-80c0-18.5-6.2-35.4-16.7-48.9c-8.2-10.6-15.3-25.2-15.3-42.5V112c0-26.5-21.5-48-48-48zM48 112C48 50.2 98.1 0 160 0s112 50.1 112 112V276.5c0 .1 .1 .3 .2 .6c.2 .6 .8 1.6 1.7 2.8c18.9 24.4 30.1 55 30.1 88.1c0 79.5-64.5 144-144 144S16 447.5 16 368c0-33.2 11.2-63.8 30.1-88.1c.9-1.2 1.5-2.2 1.7-2.8c.1-.3 .2-.5 .2-.6V112zM208 368c0 26.5-21.5 48-48 48s-48-21.5-48-48c0-20.9 13.4-38.7 32-45.3V208c0-8.8 7.2-16 16-16s16 7.2 16 16V322.7c18.6 6.6 32 24.4 32 45.3z";e.definition={prefix:"fas",iconName:i,icon:[320,512,s,n,o]},e.faTemperatureHalf=e.definition,e.prefix="fas",e.iconName=i,e.width=320,e.height=512,e.ligatures=s,e.unicode=n,e.svgPathData=o,e.aliases=s},59582:(t,e,i)=>{"use strict";var s=i(85713);e.mw={prefix:s.prefix,iconName:s.iconName,icon:[s.width,s.height,s.aliases,s.unicode,s.svgPathData]},e.l9=e.mw,s.prefix,s.iconName,s.width,s.height,s.aliases,s.unicode,s.svgPathData,s.aliases},25436:(t,e,i)=>{"use strict";var s=i(72950);e.mw={prefix:s.prefix,iconName:s.iconName,icon:[s.width,s.height,s.aliases,s.unicode,s.svgPathData]},e.Ro=e.mw,s.prefix,s.iconName,s.width,s.height,s.aliases,s.unicode,s.svgPathData,s.aliases},42469:(t,e)=>{"use strict";var i="user",s=[128100,62144],n="f007",o="M224 256A128 128 0 1 0 224 0a128 128 0 1 0 0 256zm-45.7 48C79.8 304 0 383.8 0 482.3C0 498.7 13.3 512 29.7 512H418.3c16.4 0 29.7-13.3 29.7-29.7C448 383.8 368.2 304 269.7 304H178.3z";e.mw={prefix:"fas",iconName:i,icon:[448,512,s,n,o]},e.X4=e.mw},39281:(t,e)=>{"use strict";var i="users",s=[],n="f0c0",o="M144 0a80 80 0 1 1 0 160A80 80 0 1 1 144 0zM512 0a80 80 0 1 1 0 160A80 80 0 1 1 512 0zM0 298.7C0 239.8 47.8 192 106.7 192h42.7c15.9 0 31 3.5 44.6 9.7c-1.3 7.2-1.9 14.7-1.9 22.3c0 38.2 16.8 72.5 43.3 96c-.2 0-.4 0-.7 0H21.3C9.6 320 0 310.4 0 298.7zM405.3 320c-.2 0-.4 0-.7 0c26.6-23.5 43.3-57.8 43.3-96c0-7.6-.7-15-1.9-22.3c13.6-6.3 28.7-9.7 44.6-9.7h42.7C592.2 192 640 239.8 640 298.7c0 11.8-9.6 21.3-21.3 21.3H405.3zM224 224a96 96 0 1 1 192 0 96 96 0 1 1 -192 0zM128 485.3C128 411.7 187.7 352 261.3 352H378.7C452.3 352 512 411.7 512 485.3c0 14.7-11.9 26.7-26.7 26.7H154.7c-14.7 0-26.7-11.9-26.7-26.7z";e.mw={prefix:"fas",iconName:i,icon:[640,512,s,n,o]},e.gd=e.mw},54264:(t,e)=>{"use strict";var i="wifi",s=["wifi-3","wifi-strong"],n="f1eb",o="M54.2 202.9C123.2 136.7 216.8 96 320 96s196.8 40.7 265.8 106.9c12.8 12.2 33 11.8 45.2-.9s11.8-33-.9-45.2C549.7 79.5 440.4 32 320 32S90.3 79.5 9.8 156.7C-2.9 169-3.3 189.2 8.9 202s32.5 13.2 45.2 .9zM320 256c56.8 0 108.6 21.1 148.2 56c13.3 11.7 33.5 10.4 45.2-2.8s10.4-33.5-2.8-45.2C459.8 219.2 393 192 320 192s-139.8 27.2-190.5 72c-13.3 11.7-14.5 31.9-2.8 45.2s31.9 14.5 45.2 2.8c39.5-34.9 91.3-56 148.2-56zm64 160a64 64 0 1 0 -128 0 64 64 0 1 0 128 0z";e.mw={prefix:"fas",iconName:i,icon:[640,512,s,n,o]},e.Bw=e.mw},87133:t=>{t.exports=function(t,e,i){return t===t&&(void 0!==i&&(t=t<=i?t:i),void 0!==e&&(t=t>=e?t:e)),t}},54128:(t,e,i)=>{var s=i(31800),n=/^\s+/;t.exports=function(t){return t?t.slice(0,s(t)+1).replace(n,""):t}},55765:(t,e,i)=>{var s=i(38859),n=i(15325),o=i(29905),a=i(19219),r=i(44517),c=i(84247);t.exports=function(t,e,i){var h=-1,l=n,d=t.length,u=!0,f=[],g=f;if(i)u=!1,l=o;else if(d>=200){var p=e?null:r(t);if(p)return c(p);u=!1,l=a,g=new s}else g=e?[]:f;t:for(;++h{var s=i(76545),n=i(63950),o=i(84247),a=s&&1/o(new s([,-0]))[1]==1/0?function(t){return new s(t)}:n;t.exports=a},31800:t=>{var e=/\s/;t.exports=function(t){for(var i=t.length;i--&&e.test(t.charAt(i)););return i}},78659:(t,e,i)=>{var s=i(87133),n=i(99374);t.exports=function(t,e,i){return void 0===i&&(i=e,e=void 0),void 0!==i&&(i=(i=n(i))===i?i:0),void 0!==e&&(e=(e=n(e))===e?e:0),s(n(t),e,i)}},62193:(t,e,i)=>{var s=i(88984),n=i(5861),o=i(72428),a=i(56449),r=i(64894),c=i(3656),h=i(55527),l=i(37167),d=Object.prototype.hasOwnProperty;t.exports=function(t){if(null==t)return!0;if(r(t)&&(a(t)||"string"==typeof t||"function"==typeof t.splice||c(t)||l(t)||o(t)))return!t.length;var e=n(t);if("[object Map]"==e||"[object Set]"==e)return!t.size;if(h(t))return!s(t).length;for(var i in t)if(d.call(t,i))return!1;return!0}},99374:(t,e,i)=>{var s=i(54128),n=i(23805),o=i(44394),a=/^[-+]0x[0-9a-f]+$/i,r=/^0b[01]+$/i,c=/^0o[0-7]+$/i,h=parseInt;t.exports=function(t){if("number"==typeof t)return t;if(o(t))return NaN;if(n(t)){var e="function"==typeof t.valueOf?t.valueOf():t;t=n(e)?e+"":e}if("string"!=typeof t)return 0===t?t:+t;t=s(t);var i=r.test(t);return i||c.test(t)?h(t.slice(2),i?2:8):a.test(t)?NaN:+t}},21283:(t,e,i)=>{var s=i(83120),n=i(69302),o=i(55765),a=i(83693),r=i(68090),c=n((function(t){var e=r(t);return e="function"==typeof e?e:void 0,o(s(t,1,a,!0),void 0,e)}));t.exports=c},92677:function(t,e,i){!function(t,e){"use strict";function i(t){return t*Math.PI/180}function s(t,e,i){return t>i?i:t=0||(n[i]=t[i]);return n}function f(t){var i=t.renderLabel,s=t.labelProps,n=i(s);if("string"===typeof n||"number"===typeof n){s.dataEntry,s.dataIndex;var o=u(s,["dataEntry","dataIndex"]);return e.createElement("text",Object.assign({dominantBaseline:"central"},o),n)}return e.isValidElement(n)?n:null}function g(t){var e=1e14;return Math.round((t+Number.EPSILON)*e)/e}function p(t){var e=t.labelPosition,i=t.lineWidth,s=g(t.labelHorizontalShift);return 0===s?"middle":e>100?s>0?"start":"end":e<100-i?s>0?"end":"start":"middle"}function m(t,e){return t.map((function(t,i){var s,r=null!=(s=c(e.segmentsShift,i))?s:0,h=n(e.radius,e.labelPosition)+r,l=a(o(t.startAngle,t.degrees),h),d=l.dx,u=l.dy;return{x:e.center[0],y:e.center[1],dx:d,dy:u,textAnchor:p({labelPosition:e.labelPosition,lineWidth:e.lineWidth,labelHorizontalShift:d}),dataEntry:t,dataIndex:i,style:c(e.labelStyle,i)}}))}function x(t,i){var s=i.label;if(s)return m(t,i).map((function(t,i){return e.createElement(f,{key:"label-"+(t.dataEntry.key||i),renderLabel:s,labelProps:t})}))}var b=function(t,e,i,s,n){var o=n-s;if(0===o)return[];var a=i*Math.cos(s)+t,r=i*Math.sin(s)+e,c=i*Math.cos(n)+t,h=i*Math.sin(n)+e;return[["M",a,r],["A",i,i,0,Math.abs(o)<=Math.PI?"0":"1",o<0?"0":"1",c,h]]};function v(t,e,n,o,a){var r=s(o,-359.999,359.999);return b(t,e,a,i(n),i(n+r)).map((function(t){return t.join(" ")})).join(" ")}function _(t){var s,c,h=t.cx,l=t.cy,d=t.lengthAngle,f=t.lineWidth,g=t.radius,p=t.shift,m=void 0===p?0:p,x=t.reveal,b=t.rounded,_=t.startAngle,y=t.title,w=u(t,["cx","cy","lengthAngle","lineWidth","radius","shift","reveal","rounded","startAngle","title"]),M=g-f/2,k=a(o(_,d),m),S=v(h+k.dx,l+k.dy,_,d,M);if(r(x)){var C=i(M)*d;c=(s=Math.abs(C))-n(s,x)}return e.createElement("path",Object.assign({d:S,fill:"none",strokeWidth:f,strokeDasharray:s,strokeDashoffset:c,strokeLinecap:b?"round":void 0},w),y&&e.createElement("title",null,y))}function y(t,e,i){var s="stroke-dashoffset "+t+"ms "+e;return i&&i.transition&&(s=s+","+i.transition),{transition:s}}function w(t){return t.animate&&!r(t.reveal)?100:t.reveal}function M(t,e){return t&&function(i){t(i,e)}}function k(t,i,s){var o=null!=s?s:w(i),a=i.radius,r=i.center,h=r[0],l=r[1],d=n(a,i.lineWidth),u=t.map((function(t,s){var n=c(i.segmentsStyle,s);return e.createElement(_,{cx:h,cy:l,key:t.key||s,lengthAngle:t.degrees,lineWidth:d,radius:a,rounded:i.rounded,reveal:o,shift:c(i.segmentsShift,s),startAngle:t.startAngle,title:t.title,style:Object.assign({},n,i.animate&&y(i.animationDuration,i.animationEasing,n)),stroke:t.color,tabIndex:i.segmentsTabIndex,onBlur:M(i.onBlur,s),onClick:M(i.onClick,s),onFocus:M(i.onFocus,s),onKeyDown:M(i.onKeyDown,s),onMouseOver:M(i.onMouseOver,s),onMouseOut:M(i.onMouseOut,s)})}));return i.background&&u.unshift(e.createElement(_,{cx:h,cy:l,key:"bg",lengthAngle:i.lengthAngle,lineWidth:d,radius:a,rounded:i.rounded,startAngle:i.startAngle,stroke:i.background})),u}var S={animationDuration:500,animationEasing:"ease-out",center:[50,50],data:[],labelPosition:50,lengthAngle:360,lineWidth:100,paddingAngle:0,radius:50,startAngle:0,viewBoxSize:[100,100]};function C(t){var i=h(t,S),s=e.useState(i.animate?0:null),n=s[0],o=s[1];e.useEffect((function(){i.animate&&o(null)}),[]);var a=d(i);return e.createElement("svg",{viewBox:"0 0 "+i.viewBoxSize[0]+" "+i.viewBoxSize[1],width:"100%",height:"100%",className:i.className,style:i.style},k(a,i,n),x(a,i),i.children)}t.PieChart=C,t.pieChartDefaultProps=S}(e,i(96540))},62709:(t,e,i)=>{"use strict";i.r(e),i.d(e,{default:()=>c});var s=i(12897),n=i.n(s),o=i(55042),a=i.n(o),r=new(n())({id:"alarm_bell",use:"alarm_bell-usage",viewBox:"0 0 12 14",content:''});a().add(r);const c=r},2386:(t,e,i)=>{"use strict";i.r(e),i.d(e,{default:()=>c});var s=i(12897),n=i.n(s),o=i(55042),a=i.n(o),r=new(n())({id:"settings_h",use:"settings_h-usage",viewBox:"0 0 14 14",content:''});a().add(r);const c=r},24074:(t,e,i)=>{"use strict";var s=i(69565),n=i(28551),o=i(2360),a=i(55966),r=i(56279),c=i(91181),h=i(97751),l=i(53982),d=i(62529),u=h("Promise"),f="AsyncFromSyncIterator",g=c.set,p=c.getterFor(f),m=function(t,e,i){var s=t.done;u.resolve(t.value).then((function(t){e(d(t,s))}),i)},x=function(t){t.type=f,g(this,t)};x.prototype=r(o(l),{next:function(){var t=p(this);return new u((function(e,i){var o=n(s(t.next,t.iterator));m(o,e,i)}))},return:function(){var t=p(this).iterator;return new u((function(e,i){var o=a(t,"return");if(void 0===o)return e(d(void 0,!0));var r=n(s(o,t));m(r,e,i)}))}}),t.exports=x},84428:(t,e,i)=>{"use strict";var s=i(78227)("iterator"),n=!1;try{var o=0,a={next:function(){return{done:!!o++}},return:function(){n=!0}};a[s]=function(){return this},Array.from(a,(function(){throw 2}))}catch(r){}t.exports=function(t,e){try{if(!e&&!n)return!1}catch(r){return!1}var i=!1;try{var o={};o[s]=function(){return{next:function(){return{done:i=!0}}}},t(o)}catch(r){}return i}},87290:(t,e,i)=>{"use strict";var s=i(50516),n=i(19088);t.exports=!s&&!n&&"object"==typeof window&&"object"==typeof document},50516:t=>{"use strict";t.exports="object"==typeof Deno&&Deno&&"object"==typeof Deno.version},19088:(t,e,i)=>{"use strict";var s=i(24475),n=i(44576);t.exports="process"===n(s.process)},50133:(t,e,i)=>{"use strict";var s=i(69565),n=i(94901),o=i(28551),a=i(1767),r=i(50851),c=i(55966),h=i(78227),l=i(24074),d=h("asyncIterator");t.exports=function(t){var e,i=o(t),h=!0,u=c(i,d);return n(u)||(u=r(i),h=!1),void 0!==u?e=s(u,i):(e=i,h=!0),o(e),a(h?e:new l(a(e)))}},48646:(t,e,i)=>{"use strict";var s=i(69565),n=i(28551),o=i(1767),a=i(50851);t.exports=function(t,e){e&&"string"===typeof t||n(t);var i=a(t);return o(n(void 0!==i?s(i,t):t))}},10916:(t,e,i)=>{"use strict";var s=i(24475),n=i(80550),o=i(94901),a=i(92796),r=i(33706),c=i(78227),h=i(87290),l=i(50516),d=i(96395),u=i(77388),f=n&&n.prototype,g=c("species"),p=!1,m=o(s.PromiseRejectionEvent),x=a("Promise",(function(){var t=r(n),e=t!==String(n);if(!e&&66===u)return!0;if(d&&(!f.catch||!f.finally))return!0;if(!u||u<51||!/native code/.test(t)){var i=new n((function(t){t(1)})),s=function(t){t((function(){}),(function(){}))};if((i.constructor={})[g]=s,!(p=i.then((function(){}))instanceof s))return!0}return!e&&(h||l)&&!m}));t.exports={CONSTRUCTOR:x,REJECTION_EVENT:m,SUBCLASSING:p}},90537:(t,e,i)=>{"use strict";var s=i(80550),n=i(84428),o=i(10916).CONSTRUCTOR;t.exports=o||!n((function(t){s.all(t).then(void 0,(function(){}))}))},30237:(t,e,i)=>{"use strict";i(6469)("flatMap")},96167:(t,e,i)=>{"use strict";var s=i(46518),n=i(69565),o=i(79306),a=i(36043),r=i(1103),c=i(72652);s({target:"Promise",stat:!0,forced:i(90537)},{allSettled:function(t){var e=this,i=a.f(e),s=i.resolve,h=i.reject,l=r((function(){var i=o(e.resolve),a=[],r=0,h=1;c(t,(function(t){var o=r++,c=!1;h++,n(i,e,t).then((function(t){c||(c=!0,a[o]={status:"fulfilled",value:t},--h||s(a))}),(function(t){c||(c=!0,a[o]={status:"rejected",reason:t},--h||s(a))}))})),--h||s(a)}));return l.error&&h(l.value),i.promise}})},32679:(t,e,i)=>{"use strict";var s=i(46518),n=i(69565),o=i(79306),a=i(28551),r=i(20034),c=i(1767),h=i(92059),l=i(62529),d=i(50133),u=i(20772),f=i(96395),g=h((function(t){var e=this,i=e.iterator,s=e.mapper;return new t((function(o,c){var h=function(t){e.done=!0,c(t)},f=function(t){u(i,h,t,h)},g=function(){try{t.resolve(a(n(e.next,i))).then((function(i){try{if(a(i).done)e.done=!0,o(l(void 0,!0));else{var n=i.value;try{var c=s(n,e.counter++),u=function(t){try{e.inner=d(t),p()}catch(i){f(i)}};r(c)?t.resolve(c).then(u,f):u(c)}catch(g){f(g)}}}catch(m){h(m)}}),h)}catch(c){h(c)}},p=function(){var i=e.inner;if(i)try{t.resolve(a(n(i.next,i.iterator))).then((function(t){try{a(t).done?(e.inner=null,g()):o(l(t.value,!1))}catch(i){f(i)}}),f)}catch(s){f(s)}else g()};p()}))}));s({target:"AsyncIterator",proto:!0,real:!0,forced:f},{flatMap:function(t){return a(this),o(t),new g(c(this),{mapper:t,inner:null})}})},30670:(t,e,i)=>{"use strict";var s=i(46518),n=i(69565),o=i(79306),a=i(28551),r=i(1767),c=i(48646),h=i(19462),l=i(9539),d=i(96395),u=h((function(){for(var t,e,i=this.iterator,s=this.mapper;;){if(e=this.inner)try{if(!(t=a(n(e.next,e.iterator))).done)return t.value;this.inner=null}catch(o){l(i,"throw",o)}if(t=a(n(this.next,i)),this.done=!!t.done)return;try{this.inner=c(s(t.value,this.counter++),!1)}catch(o){l(i,"throw",o)}}}));s({target:"Iterator",proto:!0,real:!0,forced:d},{flatMap:function(t){return a(this),o(t),new u(r(this),{mapper:t,inner:null})}})},66118:(t,e,i)=>{"use strict";i.d(e,{A6:()=>D,E8:()=>ke,FN:()=>xe,Hg:()=>bt,No:()=>pe,PP:()=>Ue,Qw:()=>h,UA:()=>si,ZT:()=>L,ij:()=>T,kc:()=>qe,m_:()=>We,s$:()=>ze,t1:()=>oe});var s=i(12020);class n{constructor(){this._request=null,this._charts=new Map,this._running=!1,this._lastDate=void 0}_notify(t,e,i,s){const n=e.listeners[s],o=e.duration;n.forEach((s=>s({chart:t,initial:e.initial,numSteps:o,currentStep:Math.min(i-e.start,o)})))}_refresh(){this._request||(this._running=!0,this._request=s.r.call(window,(()=>{this._update(),this._request=null,this._running&&this._refresh()})))}_update(t=Date.now()){let e=0;this._charts.forEach(((i,s)=>{if(!i.running||!i.items.length)return;const n=i.items;let o,a=n.length-1,r=!1;for(;a>=0;--a)o=n[a],o._active?(o._total>i.duration&&(i.duration=o._total),o.tick(t),r=!0):(n[a]=n[n.length-1],n.pop());r&&(s.draw(),this._notify(s,i,t,"progress")),n.length||(i.running=!1,this._notify(s,i,t,"complete"),i.initial=!1),e+=n.length})),this._lastDate=t,0===e&&(this._running=!1)}_getAnims(t){const e=this._charts;let i=e.get(t);return i||(i={running:!1,initial:!0,items:[],listeners:{complete:[],progress:[]}},e.set(t,i)),i}listen(t,e,i){this._getAnims(t).listeners[e].push(i)}add(t,e){e&&e.length&&this._getAnims(t).items.push(...e)}has(t){return this._getAnims(t).items.length>0}start(t){const e=this._charts.get(t);e&&(e.running=!0,e.start=Date.now(),e.duration=e.items.reduce(((t,e)=>Math.max(t,e._duration)),0),this._refresh())}running(t){if(!this._running)return!1;const e=this._charts.get(t);return!!(e&&e.running&&e.items.length)}stop(t){const e=this._charts.get(t);if(!e||!e.items.length)return;const i=e.items;let s=i.length-1;for(;s>=0;--s)i[s].cancel();e.items=[],this._notify(t,e,Date.now(),"complete")}remove(t){return this._charts.delete(t)}}var o=new n;const a="transparent",r={boolean:(t,e,i)=>i>.5?e:t,color(t,e,i){const n=(0,s.c)(t||a),o=n.valid&&(0,s.c)(e||a);return o&&o.valid?o.mix(n,i).hexString():e},number:(t,e,i)=>t+(e-t)*i};class c{constructor(t,e,i,n){const o=e[i];n=(0,s.a)([t.to,n,o,t.from]);const a=(0,s.a)([t.from,o,n]);this._active=!0,this._fn=t.fn||r[t.type||typeof a],this._easing=s.e[t.easing]||s.e.linear,this._start=Math.floor(Date.now()+(t.delay||0)),this._duration=this._total=Math.floor(t.duration),this._loop=!!t.loop,this._target=e,this._prop=i,this._from=a,this._to=n,this._promises=void 0}active(){return this._active}update(t,e,i){if(this._active){this._notify(!1);const n=this._target[this._prop],o=i-this._start,a=this._duration-o;this._start=i,this._duration=Math.floor(Math.max(a,t.duration)),this._total+=o,this._loop=!!t.loop,this._to=(0,s.a)([t.to,e,n,t.from]),this._from=(0,s.a)([t.from,n,e])}}cancel(){this._active&&(this.tick(Date.now()),this._active=!1,this._notify(!1))}tick(t){const e=t-this._start,i=this._duration,s=this._prop,n=this._from,o=this._loop,a=this._to;let r;if(this._active=n!==a&&(o||e1?2-r:r,r=this._easing(Math.min(1,Math.max(0,r))),this._target[s]=this._fn(n,a,r))}wait(){const t=this._promises||(this._promises=[]);return new Promise(((e,i)=>{t.push({res:e,rej:i})}))}_notify(t){const e=t?"res":"rej",i=this._promises||[];for(let s=0;s{const o=t[n];if(!(0,s.i)(o))return;const a={};for(const t of e)a[t]=o[t];((0,s.b)(o.properties)&&o.properties||[n]).forEach((t=>{t!==n&&i.has(t)||i.set(t,a)}))}))}_animateOptions(t,e){const i=e.options,s=function(t,e){if(!e)return;let i=t.options;if(!i)return void(t.options=e);i.$shared&&(t.options=i=Object.assign({},i,{$shared:!1,$animations:{}}));return i}(t,i);if(!s)return[];const n=this._createAnimations(s,i);return i.$shared&&function(t,e){const i=[],s=Object.keys(e);for(let n=0;n{t.options=i}),(()=>{})),n}_createAnimations(t,e){const i=this._properties,s=[],n=t.$animations||(t.$animations={}),o=Object.keys(e),a=Date.now();let r;for(r=o.length-1;r>=0;--r){const h=o[r];if("$"===h.charAt(0))continue;if("options"===h){s.push(...this._animateOptions(t,e));continue}const l=e[h];let d=n[h];const u=i.get(h);if(d){if(u&&d.active()){d.update(u,l,a);continue}d.cancel()}u&&u.duration?(n[h]=d=new c(u,t,h,l),s.push(d)):t[h]=l}return s}update(t,e){if(0===this._properties.size)return void Object.assign(t,e);const i=this._createAnimations(t,e);return i.length?(o.add(this._chart,i),!0):void 0}}function l(t,e){const i=t&&t.options||{},s=i.reverse,n=void 0===i.min?e:0,o=void 0===i.max?e:0;return{start:s?o:n,end:s?n:o}}function d(t,e){const i=[],s=t._getSortedDatasetMetas(e);let n,o;for(n=0,o=s.length;n0||!i&&e<0)return n.index}return null}function m(t,e){const{chart:i,_cachedMeta:s}=t,n=i._stacks||(i._stacks={}),{iScale:o,vScale:a,index:r}=s,c=o.axis,h=a.axis,l=function(t,e,i){return`${t.id}.${e.id}.${i.stack||i.type}`}(o,a,s),d=e.length;let u;for(let f=0;fi[t].axis===e)).shift()}function b(t,e){const i=t.controller.index,s=t.vScale&&t.vScale.axis;if(s){e=e||t._parsed;for(const t of e){const e=t._stacks;if(!e||void 0===e[s]||void 0===e[s][i])return;delete e[s][i],void 0!==e[s]._visualValues&&void 0!==e[s]._visualValues[i]&&delete e[s]._visualValues[i]}}}const v=t=>"reset"===t||"none"===t,_=(t,e)=>e?t:Object.assign({},t);class y{static defaults={};static datasetElementType=null;static dataElementType=null;constructor(t,e){this.chart=t,this._ctx=t.ctx,this.index=e,this._cachedDataOpts={},this._cachedMeta=this.getMeta(),this._type=this._cachedMeta.type,this.options=void 0,this._parsing=!1,this._data=void 0,this._objectData=void 0,this._sharedOptions=void 0,this._drawStart=void 0,this._drawCount=void 0,this.enableOptionSharing=!1,this.supportsDecimation=!1,this.$context=void 0,this._syncList=[],this.datasetElementType=new.target.datasetElementType,this.dataElementType=new.target.dataElementType,this.initialize()}initialize(){const t=this._cachedMeta;this.configure(),this.linkScales(),t._stacked=f(t.vScale,t),this.addElements(),this.options.fill&&!this.chart.isPluginEnabled("filler")&&console.warn("Tried to use the 'fill' option without the 'Filler' plugin enabled. Please import and register the 'Filler' plugin and make sure it is not disabled in the options")}updateIndex(t){this.index!==t&&b(this._cachedMeta),this.index=t}linkScales(){const t=this.chart,e=this._cachedMeta,i=this.getDataset(),n=(t,e,i,s)=>"x"===t?e:"r"===t?s:i,o=e.xAxisID=(0,s.v)(i.xAxisID,x(t,"x")),a=e.yAxisID=(0,s.v)(i.yAxisID,x(t,"y")),r=e.rAxisID=(0,s.v)(i.rAxisID,x(t,"r")),c=e.indexAxis,h=e.iAxisID=n(c,o,a,r),l=e.vAxisID=n(c,a,o,r);e.xScale=this.getScaleForId(o),e.yScale=this.getScaleForId(a),e.rScale=this.getScaleForId(r),e.iScale=this.getScaleForId(h),e.vScale=this.getScaleForId(l)}getDataset(){return this.chart.data.datasets[this.index]}getMeta(){return this.chart.getDatasetMeta(this.index)}getScaleForId(t){return this.chart.scales[t]}_getOtherScale(t){const e=this._cachedMeta;return t===e.iScale?e.vScale:e.iScale}reset(){this._update("reset")}_destroy(){const t=this._cachedMeta;this._data&&(0,s.u)(this._data,this),t._stacked&&b(t)}_dataCheck(){const t=this.getDataset(),e=t.data||(t.data=[]),i=this._data;if((0,s.i)(e))this._data=function(t){const e=Object.keys(t),i=new Array(e.length);let s,n,o;for(s=0,n=e.length;s0&&i._parsed[t-1];if(!1===this._parsing)i._parsed=n,i._sorted=!0,l=n;else{l=(0,s.b)(n[t])?this.parseArrayData(i,n,t,e):(0,s.i)(n[t])?this.parseObjectData(i,n,t,e):this.parsePrimitiveData(i,n,t,e);const o=()=>null===h[r]||u&&h[r]t&&!e.hidden&&e._stacked&&{keys:d(i,!0),values:null})(e,i,this.chart),h={min:Number.POSITIVE_INFINITY,max:Number.NEGATIVE_INFINITY},{min:l,max:u}=function(t){const{min:e,max:i,minDefined:s,maxDefined:n}=t.getUserBounds();return{min:s?e:Number.NEGATIVE_INFINITY,max:n?i:Number.POSITIVE_INFINITY}}(r);let f,g;function p(){g=n[f];const e=g[r.axis];return!(0,s.g)(g[t.axis])||l>e||u=0;--f)if(!p()){this.updateRangeFromParsed(h,t,g,c);break}return h}getAllParsedValues(t){const e=this._cachedMeta._parsed,i=[];let n,o,a;for(n=0,o=e.length;n=0&&tthis.getContext(i,n,e)),d);return g.$shared&&(g.$shared=c,o[a]=Object.freeze(_(g,c))),g}_resolveAnimations(t,e,i){const s=this.chart,n=this._cachedDataOpts,o=`animation-${e}`,a=n[o];if(a)return a;let r;if(!1!==s.options.animation){const s=this.chart.config,n=s.datasetAnimationScopeKeys(this._type,e),o=s.getOptionScopes(this.getDataset(),n);r=s.createResolver(o,this.getContext(t,i,e))}const c=new h(s,r&&r.animations);return r&&r._cacheable&&(n[o]=Object.freeze(c)),c}getSharedOptions(t){if(t.$shared)return this._sharedOptions||(this._sharedOptions=Object.assign({},t))}includeOptions(t,e){return!e||v(t)||this.chart._animationsDisabled}_getSharedOptions(t,e){const i=this.resolveDataElementOptions(t,e),s=this._sharedOptions,n=this.getSharedOptions(i),o=this.includeOptions(e,n)||n!==s;return this.updateSharedOptions(n,e,i),{sharedOptions:n,includeOptions:o}}updateElement(t,e,i,s){v(s)?Object.assign(t,i):this._resolveAnimations(e,s).update(t,i)}updateSharedOptions(t,e,i){t&&!v(e)&&this._resolveAnimations(void 0,e).update(t,i)}_setStyle(t,e,i,s){t.active=s;const n=this.getStyle(e,s);this._resolveAnimations(e,i,s).update(t,{options:!s&&this.getSharedOptions(n)||n})}removeHoverStyle(t,e,i){this._setStyle(t,i,"active",!1)}setHoverStyle(t,e,i){this._setStyle(t,i,"active",!0)}_removeDatasetHoverStyle(){const t=this._cachedMeta.dataset;t&&this._setStyle(t,void 0,"active",!1)}_setDatasetHoverStyle(){const t=this._cachedMeta.dataset;t&&this._setStyle(t,void 0,"active",!0)}_resyncElements(t){const e=this._data,i=this._cachedMeta.data;for(const[a,r,c]of this._syncList)this[a](r,c);this._syncList=[];const s=i.length,n=e.length,o=Math.min(n,s);o&&this.parse(0,o),n>s?this._insertElements(s,n-s,t):n{for(t.length+=e,a=t.length-1;a>=o;a--)t[a]=t[a-e]};for(r(n),a=t;at-e)))}return t._cache.$bar}(e,t.type);let n,o,a,r,c=e._length;const h=()=>{32767!==a&&-32768!==a&&((0,s.h)(r)&&(c=Math.min(c,Math.abs(a-r)||c)),r=a)};for(n=0,o=i.length;nMath.abs(r)&&(c=r,h=a),e[i.axis]=h,e._custom={barStart:c,barEnd:h,start:n,end:o,min:a,max:r}}(t,e,i,n):e[i.axis]=i.parse(t,n),e}function k(t,e,i,s){const n=t.iScale,o=t.vScale,a=n.getLabels(),r=n===o,c=[];let h,l,d,u;for(h=i,l=i+s;ht.x,i="left",s="right"):(e=t.baset.controller.options.grouped)),o=i.options.stacked,a=[],r=t=>{const i=t.controller.getParsed(e),n=i&&i[t.vScale.axis];if((0,s.k)(n)||isNaN(n))return!0};for(const s of n)if((void 0===e||!r(s))&&((!1===o||-1===a.indexOf(s.stack)||void 0===o&&void 0===s.stack)&&a.push(s.stack),s.index===t))break;return a.length||a.push(void 0),a}_getStackCount(t){return this._getStacks(void 0,t).length}_getStackIndex(t,e,i){const s=this._getStacks(t,i),n=void 0!==e?s.indexOf(e):-1;return-1===n?s.length-1:n}_getRuler(){const t=this.options,e=this._cachedMeta,i=e.iScale,s=[];let n,o;for(n=0,o=e.data.length;n=i?1:-1)}(u,e,r)*a,f===r&&(x-=u/2);const t=e.getPixelForDecimal(0),o=e.getPixelForDecimal(1),h=Math.min(t,o),g=Math.max(t,o);x=Math.max(Math.min(x,g),h),d=x+u,i&&!l&&(c._stacks[e.axis]._visualValues[n]=e.getValueForPixel(d)-e.getValueForPixel(x))}if(x===e.getPixelForValue(r)){const t=(0,s.s)(u)*e.getLineWidthForValue(r)/2;x+=t,u-=t}return{size:u,base:x,head:d,center:d+u/2}}_calculateBarIndexPixels(t,e){const i=e.scale,n=this.options,o=n.skipNull,a=(0,s.v)(n.maxBarThickness,1/0);let r,c;if(e.grouped){const i=o?this._getStackCount(t):e.stackCount,h="flex"===n.barThickness?function(t,e,i,s){const n=e.pixels,o=n[t];let a=t>0?n[t-1]:null,r=t0&&this.getParsed(e-1);for(let y=0;y=b){g.skip=!0;continue}const v=this.getParsed(y),w=(0,s.k)(v[f]),M=g[u]=a.getPixelForValue(v[u],y),k=g[f]=o||w?r.getBasePixel():r.getPixelForValue(c?this.applyStack(r,v,c):v[f],y);g.skip=isNaN(M)||isNaN(k)||w,g.stop=y>0&&Math.abs(v[u]-_[u])>m,p&&(g.parsed=v,g.raw=h.data[y]),d&&(g.options=l||this.resolveDataElementOptions(y,i.active?"active":n)),x||this.updateElement(i,y,g,n),_=v}}getMaxOverflow(){const t=this._cachedMeta,e=t.dataset,i=e.options&&e.options.borderWidth||0,s=t.data||[];if(!s.length)return i;const n=s[0].size(this.resolveDataElementOptions(0)),o=s[s.length-1].size(this.resolveDataElementOptions(s.length-1));return Math.max(i,n,o)/2}draw(){const t=this._cachedMeta;t.dataset.updateControlPoints(this.chart.chartArea,t.iScale.axis),super.draw()}}function A(){throw new Error("This method is not implemented: Check that a complete date adapter is provided.")}class H{static override(t){Object.assign(H.prototype,t)}options;constructor(t){this.options=t||{}}init(){}formats(){return A()}parse(){return A()}format(){return A()}add(){return A()}diff(){return A()}startOf(){return A()}endOf(){return A()}}var T={_date:H};function I(t,e,i,n){const{controller:o,data:a,_sorted:r}=t,c=o._cachedMeta.iScale;if(c&&e===c.axis&&"r"!==e&&r&&a.length){const t=c._reversePixels?s.A:s.B;if(!n)return t(a,e,i);if(o._sharedOptions){const s=a[0],n="function"===typeof s.getRange&&s.getRange(e);if(n){const s=t(a,e,i-n),o=t(a,e,i+n);return{lo:s.lo,hi:o.hi}}}}return{lo:0,hi:a.length-1}}function E(t,e,i,s,n){const o=t.getSortedVisibleDatasetMetas(),a=i[e];for(let r=0,c=o.length;r{t[a](e[i],n)&&(o.push({element:t,datasetIndex:s,index:c}),r=r||t.inRange(e.x,e.y,n))})),s&&!r?[]:o}var j={evaluateInteractionItems:E,modes:{index(t,e,i,n){const o=(0,s.z)(e,t),a=i.axis||"x",r=i.includeInvisible||!1,c=i.intersect?V(t,o,a,n,r):R(t,o,a,!1,n,r),h=[];return c.length?(t.getSortedVisibleDatasetMetas().forEach((t=>{const e=c[0].index,i=t.data[e];i&&!i.skip&&h.push({element:i,datasetIndex:t.index,index:e})})),h):[]},dataset(t,e,i,n){const o=(0,s.z)(e,t),a=i.axis||"xy",r=i.includeInvisible||!1;let c=i.intersect?V(t,o,a,n,r):R(t,o,a,!1,n,r);if(c.length>0){const e=c[0].datasetIndex,i=t.getDatasetMeta(e).data;c=[];for(let t=0;tV(t,(0,s.z)(e,t),i.axis||"xy",n,i.includeInvisible||!1),nearest(t,e,i,n){const o=(0,s.z)(e,t),a=i.axis||"xy",r=i.includeInvisible||!1;return R(t,o,a,i.intersect,n,r)},x:(t,e,i,n)=>N(t,(0,s.z)(e,t),"x",i.intersect,n),y:(t,e,i,n)=>N(t,(0,s.z)(e,t),"y",i.intersect,n)}};const B=["left","top","right","bottom"];function W(t,e){return t.filter((t=>t.pos===e))}function $(t,e){return t.filter((t=>-1===B.indexOf(t.pos)&&t.box.axis===e))}function Y(t,e){return t.sort(((t,i)=>{const s=e?i:t,n=e?t:i;return s.weight===n.weight?s.index-n.index:s.weight-n.weight}))}function U(t,e){const i=function(t){const e={};for(const i of t){const{stack:t,pos:s,stackWeight:n}=i;if(!t||!B.includes(s))continue;const o=e[t]||(e[t]={count:0,placed:0,weight:0,size:0});o.count++,o.weight+=n}return e}(t),{vBoxMaxWidth:s,hBoxMaxHeight:n}=e;let o,a,r;for(o=0,a=t.length;o{s[t]=Math.max(e[t],i[t])})),s}return s(t?["left","right"]:["top","bottom"])}function G(t,e,i,s){const n=[];let o,a,r,c,h,l;for(o=0,a=t.length,h=0;ot.box.fullSize)),!0),s=Y(W(e,"left"),!0),n=Y(W(e,"right")),o=Y(W(e,"top"),!0),a=Y(W(e,"bottom")),r=$(e,"x"),c=$(e,"y");return{fullSize:i,leftAndTop:s.concat(o),rightAndBottom:n.concat(c).concat(a).concat(r),chartArea:W(e,"chartArea"),vertical:s.concat(n).concat(c),horizontal:o.concat(a).concat(r)}}(t.boxes),h=c.vertical,l=c.horizontal;(0,s.F)(t.boxes,(t=>{"function"===typeof t.beforeLayout&&t.beforeLayout()}));const d=h.reduce(((t,e)=>e.box.options&&!1===e.box.options.display?t:t+1),0)||1,u=Object.freeze({outerWidth:e,outerHeight:i,padding:o,availableWidth:a,availableHeight:r,vBoxMaxWidth:a/2/d,hBoxMaxHeight:r/2}),f=Object.assign({},o);Q(f,(0,s.E)(n));const g=Object.assign({maxPadding:f,w:a,h:r,x:o.left,y:o.top},o),p=U(h.concat(l),u);G(c.fullSize,g,u,p),G(h,g,u,p),G(l,g,u,p)&&G(h,g,u,p),function(t){const e=t.maxPadding;function i(i){const s=Math.max(e[i]-t[i],0);return t[i]+=s,s}t.y+=i("top"),t.x+=i("left"),i("right"),i("bottom")}(g),J(c.leftAndTop,g,u,p),g.x+=g.w,g.y+=g.h,J(c.rightAndBottom,g,u,p),t.chartArea={left:g.left,top:g.top,right:g.left+g.w,bottom:g.top+g.h,height:g.h,width:g.w},(0,s.F)(c.chartArea,(e=>{const i=e.box;Object.assign(i,t.chartArea),i.update(g.w,g.h,{left:0,top:0,right:0,bottom:0})}))}};class et{acquireContext(t,e){}releaseContext(t){return!1}addEventListener(t,e,i){}removeEventListener(t,e,i){}getDevicePixelRatio(){return 1}getMaximumSize(t,e,i,s){return e=Math.max(0,e||t.width),i=i||t.height,{width:e,height:Math.max(0,s?Math.floor(e/s):i)}}isAttached(t){return!0}updateConfig(t){}}class it extends et{acquireContext(t){return t&&t.getContext&&t.getContext("2d")||null}updateConfig(t){t.options.animation=!1}}const st="$chartjs",nt={touchstart:"mousedown",touchmove:"mousemove",touchend:"mouseup",pointerenter:"mouseenter",pointerdown:"mousedown",pointermove:"mousemove",pointerup:"mouseup",pointerleave:"mouseout",pointerout:"mouseout"},ot=t=>null===t||""===t;const at=!!s.K&&{passive:!0};function rt(t,e,i){t&&t.canvas&&t.canvas.removeEventListener(e,i,at)}function ct(t,e){for(const i of t)if(i===e||i.contains(e))return!0}function ht(t,e,i){const s=t.canvas,n=new MutationObserver((t=>{let e=!1;for(const i of t)e=e||ct(i.addedNodes,s),e=e&&!ct(i.removedNodes,s);e&&i()}));return n.observe(document,{childList:!0,subtree:!0}),n}function lt(t,e,i){const s=t.canvas,n=new MutationObserver((t=>{let e=!1;for(const i of t)e=e||ct(i.removedNodes,s),e=e&&!ct(i.addedNodes,s);e&&i()}));return n.observe(document,{childList:!0,subtree:!0}),n}const dt=new Map;let ut=0;function ft(){const t=window.devicePixelRatio;t!==ut&&(ut=t,dt.forEach(((e,i)=>{i.currentDevicePixelRatio!==t&&e()})))}function gt(t,e,i){const n=t.canvas,o=n&&(0,s.I)(n);if(!o)return;const a=(0,s.L)(((t,e)=>{const s=o.clientWidth;i(t,e),s{const e=t[0],i=e.contentRect.width,s=e.contentRect.height;0===i&&0===s||a(i,s)}));return r.observe(o),function(t,e){dt.size||window.addEventListener("resize",ft),dt.set(t,e)}(t,a),r}function pt(t,e,i){i&&i.disconnect(),"resize"===e&&function(t){dt.delete(t),dt.size||window.removeEventListener("resize",ft)}(t)}function mt(t,e,i){const n=t.canvas,o=(0,s.L)((e=>{null!==t.ctx&&i(function(t,e){const i=nt[t.type]||t.type,{x:n,y:o}=(0,s.z)(t,e);return{type:i,chart:e,native:t,x:void 0!==n?n:null,y:void 0!==o?o:null}}(e,t))}),t);return function(t,e,i){t&&t.addEventListener(e,i,at)}(n,e,o),o}class xt extends et{acquireContext(t,e){const i=t&&t.getContext&&t.getContext("2d");return i&&i.canvas===t?(function(t,e){const i=t.style,n=t.getAttribute("height"),o=t.getAttribute("width");if(t[st]={initial:{height:n,width:o,style:{display:i.display,height:i.height,width:i.width}}},i.display=i.display||"block",i.boxSizing=i.boxSizing||"border-box",ot(o)){const e=(0,s.J)(t,"width");void 0!==e&&(t.width=e)}if(ot(n))if(""===t.style.height)t.height=t.width/(e||2);else{const e=(0,s.J)(t,"height");void 0!==e&&(t.height=e)}}(t,e),i):null}releaseContext(t){const e=t.canvas;if(!e[st])return!1;const i=e[st].initial;["height","width"].forEach((t=>{const n=i[t];(0,s.k)(n)?e.removeAttribute(t):e.setAttribute(t,n)}));const n=i.style||{};return Object.keys(n).forEach((t=>{e.style[t]=n[t]})),e.width=e.width,delete e[st],!0}addEventListener(t,e,i){this.removeEventListener(t,e);const s=t.$proxies||(t.$proxies={}),n={attach:ht,detach:lt,resize:gt}[e]||mt;s[e]=n(t,e,i)}removeEventListener(t,e){const i=t.$proxies||(t.$proxies={}),s=i[e];if(!s)return;({attach:pt,detach:pt,resize:pt}[e]||rt)(t,e,s),i[e]=void 0}getDevicePixelRatio(){return window.devicePixelRatio}getMaximumSize(t,e,i,n){return(0,s.G)(t,e,i,n)}isAttached(t){const e=(0,s.I)(t);return!(!e||!e.isConnected)}}class bt{static defaults={};static defaultRoutes=void 0;x;y;active=!1;options;$animations;tooltipPosition(t){const{x:e,y:i}=this.getProps(["x","y"],t);return{x:e,y:i}}hasValue(){return(0,s.x)(this.x)&&(0,s.x)(this.y)}getProps(t,e){const i=this.$animations;if(!e||!i)return this;const s={};return t.forEach((t=>{s[t]=i[t]&&i[t].active()?i[t]._to:this[t]})),s}}function vt(t,e){const i=t.options.ticks,n=function(t){const e=t.options.offset,i=t._tickSize(),s=t._length/i+(e?0:1),n=t._maxLength/i;return Math.floor(Math.min(s,n))}(t),o=Math.min(i.maxTicksLimit||n,n),a=i.major.enabled?function(t){const e=[];let i,s;for(i=0,s=t.length;io)return function(t,e,i,s){let n,o=0,a=i[0];for(s=Math.ceil(s),n=0;no)return t}return Math.max(o,1)}(a,e,o);if(r>0){let t,i;const n=r>1?Math.round((h-c)/(r-1)):null;for(_t(e,l,d,(0,s.k)(n)?0:c-n,c),t=0,i=r-1;t"top"===e||"left"===e?t[e]+i:t[e]-i,wt=(t,e)=>Math.min(e||t,t);function Mt(t,e){const i=[],s=t.length/e,n=t.length;let o=0;for(;oa+r)))return h}function St(t){return t.drawTicks?t.tickLength:0}function Ct(t,e){if(!t.display)return 0;const i=(0,s.a0)(t.font,e),n=(0,s.E)(t.padding);return((0,s.b)(t.text)?t.text.length:1)*i.lineHeight+n.height}function Pt(t,e,i){let n=(0,s.a1)(t);return(i&&"right"!==e||!i&&"right"===e)&&(n=(t=>"left"===t?"right":"right"===t?"left":t)(n)),n}class zt extends bt{constructor(t){super(),this.id=t.id,this.type=t.type,this.options=void 0,this.ctx=t.ctx,this.chart=t.chart,this.top=void 0,this.bottom=void 0,this.left=void 0,this.right=void 0,this.width=void 0,this.height=void 0,this._margins={left:0,right:0,top:0,bottom:0},this.maxWidth=void 0,this.maxHeight=void 0,this.paddingTop=void 0,this.paddingBottom=void 0,this.paddingLeft=void 0,this.paddingRight=void 0,this.axis=void 0,this.labelRotation=void 0,this.min=void 0,this.max=void 0,this._range=void 0,this.ticks=[],this._gridLineItems=null,this._labelItems=null,this._labelSizes=null,this._length=0,this._maxLength=0,this._longestTextCache={},this._startPixel=void 0,this._endPixel=void 0,this._reversePixels=!1,this._userMax=void 0,this._userMin=void 0,this._suggestedMax=void 0,this._suggestedMin=void 0,this._ticksLength=0,this._borderValue=0,this._cache={},this._dataLimitsCached=!1,this.$context=void 0}init(t){this.options=t.setContext(this.getContext()),this.axis=t.axis,this._userMin=this.parse(t.min),this._userMax=this.parse(t.max),this._suggestedMin=this.parse(t.suggestedMin),this._suggestedMax=this.parse(t.suggestedMax)}parse(t,e){return t}getUserBounds(){let{_userMin:t,_userMax:e,_suggestedMin:i,_suggestedMax:n}=this;return t=(0,s.O)(t,Number.POSITIVE_INFINITY),e=(0,s.O)(e,Number.NEGATIVE_INFINITY),i=(0,s.O)(i,Number.POSITIVE_INFINITY),n=(0,s.O)(n,Number.NEGATIVE_INFINITY),{min:(0,s.O)(t,i),max:(0,s.O)(e,n),minDefined:(0,s.g)(t),maxDefined:(0,s.g)(e)}}getMinMax(t){let e,{min:i,max:n,minDefined:o,maxDefined:a}=this.getUserBounds();if(o&&a)return{min:i,max:n};const r=this.getMatchingVisibleMetas();for(let s=0,c=r.length;sn?n:i,n=o&&i>n?i:n,{min:(0,s.O)(i,(0,s.O)(n,i)),max:(0,s.O)(n,(0,s.O)(i,n))}}getPadding(){return{left:this.paddingLeft||0,top:this.paddingTop||0,right:this.paddingRight||0,bottom:this.paddingBottom||0}}getTicks(){return this.ticks}getLabels(){const t=this.chart.data;return this.options.labels||(this.isHorizontal()?t.xLabels:t.yLabels)||t.labels||[]}getLabelItems(t=this.chart.chartArea){return this._labelItems||(this._labelItems=this._computeLabelItems(t))}beforeLayout(){this._cache={},this._dataLimitsCached=!1}beforeUpdate(){(0,s.Q)(this.options.beforeUpdate,[this])}update(t,e,i){const{beginAtZero:n,grace:o,ticks:a}=this.options,r=a.sampleSize;this.beforeUpdate(),this.maxWidth=t,this.maxHeight=e,this._margins=i=Object.assign({left:0,right:0,top:0,bottom:0},i),this.ticks=null,this._labelSizes=null,this._gridLineItems=null,this._labelItems=null,this.beforeSetDimensions(),this.setDimensions(),this.afterSetDimensions(),this._maxLength=this.isHorizontal()?this.width+i.left+i.right:this.height+i.top+i.bottom,this._dataLimitsCached||(this.beforeDataLimits(),this.determineDataLimits(),this.afterDataLimits(),this._range=(0,s.R)(this,o,n),this._dataLimitsCached=!0),this.beforeBuildTicks(),this.ticks=this.buildTicks()||[],this.afterBuildTicks();const c=r=o||i<=1||!this.isHorizontal())return void(this.labelRotation=n);const l=this._getLabelSizes(),d=l.widest.width,u=l.highest.height,f=(0,s.S)(this.chart.width-d,0,this.maxWidth);a=t.offset?this.maxWidth/i:f/(i-1),d+6>a&&(a=f/(i-(t.offset?.5:1)),r=this.maxHeight-St(t.grid)-e.padding-Ct(t.title,this.chart.options.font),c=Math.sqrt(d*d+u*u),h=(0,s.U)(Math.min(Math.asin((0,s.S)((l.highest.height+6)/a,-1,1)),Math.asin((0,s.S)(r/c,-1,1))-Math.asin((0,s.S)(u/c,-1,1)))),h=Math.max(n,Math.min(o,h))),this.labelRotation=h}afterCalculateLabelRotation(){(0,s.Q)(this.options.afterCalculateLabelRotation,[this])}afterAutoSkip(){}beforeFit(){(0,s.Q)(this.options.beforeFit,[this])}fit(){const t={width:0,height:0},{chart:e,options:{ticks:i,title:n,grid:o}}=this,a=this._isVisible(),r=this.isHorizontal();if(a){const a=Ct(n,e.options.font);if(r?(t.width=this.maxWidth,t.height=St(o)+a):(t.height=this.maxHeight,t.width=St(o)+a),i.display&&this.ticks.length){const{first:e,last:n,widest:o,highest:a}=this._getLabelSizes(),c=2*i.padding,h=(0,s.t)(this.labelRotation),l=Math.cos(h),d=Math.sin(h);if(r){const e=i.mirror?0:d*o.width+l*a.height;t.height=Math.min(this.maxHeight,t.height+e+c)}else{const e=i.mirror?0:l*o.width+d*a.height;t.width=Math.min(this.maxWidth,t.width+e+c)}this._calculatePadding(e,n,d,l)}}this._handleMargins(),r?(this.width=this._length=e.width-this._margins.left-this._margins.right,this.height=t.height):(this.width=t.width,this.height=this._length=e.height-this._margins.top-this._margins.bottom)}_calculatePadding(t,e,i,s){const{ticks:{align:n,padding:o},position:a}=this.options,r=0!==this.labelRotation,c="top"!==a&&"x"===this.axis;if(this.isHorizontal()){const a=this.getPixelForTick(0)-this.left,h=this.right-this.getPixelForTick(this.ticks.length-1);let l=0,d=0;r?c?(l=s*t.width,d=i*e.height):(l=i*t.height,d=s*e.width):"start"===n?d=e.width:"end"===n?l=t.width:"inner"!==n&&(l=t.width/2,d=e.width/2),this.paddingLeft=Math.max((l-a+o)*this.width/(this.width-a),0),this.paddingRight=Math.max((d-h+o)*this.width/(this.width-h),0)}else{let i=e.height/2,s=t.height/2;"start"===n?(i=0,s=t.height):"end"===n&&(i=e.height,s=0),this.paddingTop=i+o,this.paddingBottom=s+o}}_handleMargins(){this._margins&&(this._margins.left=Math.max(this.paddingLeft,this._margins.left),this._margins.top=Math.max(this.paddingTop,this._margins.top),this._margins.right=Math.max(this.paddingRight,this._margins.right),this._margins.bottom=Math.max(this.paddingBottom,this._margins.bottom))}afterFit(){(0,s.Q)(this.options.afterFit,[this])}isHorizontal(){const{axis:t,position:e}=this.options;return"top"===e||"bottom"===e||"x"===t}isFullSize(){return this.options.fullSize}_convertTicksToLabels(t){let e,i;for(this.beforeTickToLabelConversion(),this.generateTickLabels(t),e=0,i=t.length;e{const i=t.gc,s=i.length/2;let n;if(s>e){for(n=0;n({width:a[t]||0,height:r[t]||0});return{first:k(0),last:k(e-1),widest:k(w),highest:k(M),widths:a,heights:r}}getLabelForValue(t){return t}getPixelForValue(t,e){return NaN}getValueForPixel(t){}getPixelForTick(t){const e=this.ticks;return t<0||t>e.length-1?null:this.getPixelForValue(e[t].value)}getPixelForDecimal(t){this._reversePixels&&(t=1-t);const e=this._startPixel+t*this._length;return(0,s.W)(this._alignToPixels?(0,s.X)(this.chart,e,0):e)}getDecimalForPixel(t){const e=(t-this._startPixel)/this._length;return this._reversePixels?1-e:e}getBasePixel(){return this.getPixelForValue(this.getBaseValue())}getBaseValue(){const{min:t,max:e}=this;return t<0&&e<0?e:t>0&&e>0?t:0}getContext(t){const e=this.ticks||[];if(t>=0&&tr*n?r/i:c/n:c*n0}_computeGridLineItems(t){const e=this.axis,i=this.chart,n=this.options,{grid:o,position:a,border:r}=n,c=o.offset,h=this.isHorizontal(),l=this.ticks.length+(c?1:0),d=St(o),u=[],f=r.setContext(this.getContext()),g=f.display?f.width:0,p=g/2,m=function(t){return(0,s.X)(i,t,g)};let x,b,v,_,y,w,M,k,S,C,P,z;if("top"===a)x=m(this.bottom),w=this.bottom-d,k=x-p,C=m(t.top)+p,z=t.bottom;else if("bottom"===a)x=m(this.top),C=t.top,z=m(t.bottom)-p,w=x+p,k=this.top+d;else if("left"===a)x=m(this.right),y=this.right-d,M=x-p,S=m(t.left)+p,P=t.right;else if("right"===a)x=m(this.left),S=t.left,P=m(t.right)-p,y=x+p,M=this.left+d;else if("x"===e){if("center"===a)x=m((t.top+t.bottom)/2+.5);else if((0,s.i)(a)){const t=Object.keys(a)[0],e=a[t];x=m(this.chart.scales[t].getPixelForValue(e))}C=t.top,z=t.bottom,w=x+p,k=w+d}else if("y"===e){if("center"===a)x=m((t.left+t.right)/2);else if((0,s.i)(a)){const t=Object.keys(a)[0],e=a[t];x=m(this.chart.scales[t].getPixelForValue(e))}y=x-p,M=y-d,S=t.left,P=t.right}const O=(0,s.v)(n.ticks.maxTicksLimit,l),D=Math.max(1,Math.ceil(l/O));for(b=0;b0&&(a-=n/2)}u={left:a,top:o,width:n+e.width,height:i+e.height,color:t.backdropColor}}m.push({label:_,font:S,textOffset:z,options:{rotation:p,color:i,strokeColor:c,strokeWidth:l,textAlign:f,textBaseline:O,translation:[y,w],backdrop:u}})}return m}_getXAxisLabelAlignment(){const{position:t,ticks:e}=this.options;if(-(0,s.t)(this.labelRotation))return"top"===t?"left":"right";let i="center";return"start"===e.align?i="left":"end"===e.align?i="right":"inner"===e.align&&(i="inner"),i}_getYAxisLabelAlignment(t){const{position:e,ticks:{crossAlign:i,mirror:s,padding:n}}=this.options,o=t+n,a=this._getLabelSizes().widest.width;let r,c;return"left"===e?s?(c=this.right+n,"near"===i?r="left":"center"===i?(r="center",c+=a/2):(r="right",c+=a)):(c=this.right-o,"near"===i?r="right":"center"===i?(r="center",c-=a/2):(r="left",c=this.left)):"right"===e?s?(c=this.left+n,"near"===i?r="right":"center"===i?(r="center",c-=a/2):(r="left",c-=a)):(c=this.left+o,"near"===i?r="left":"center"===i?(r="center",c+=a/2):(r="right",c=this.right)):r="right",{textAlign:r,x:c}}_computeLabelArea(){if(this.options.ticks.mirror)return;const t=this.chart,e=this.options.position;return"left"===e||"right"===e?{top:0,left:this.left,bottom:t.height,right:this.right}:"top"===e||"bottom"===e?{top:this.top,left:0,bottom:this.bottom,right:t.width}:void 0}drawBackground(){const{ctx:t,options:{backgroundColor:e},left:i,top:s,width:n,height:o}=this;e&&(t.save(),t.fillStyle=e,t.fillRect(i,s,n,o),t.restore())}getLineWidthForValue(t){const e=this.options.grid;if(!this._isVisible()||!e.display)return 0;const i=this.ticks.findIndex((e=>e.value===t));if(i>=0){return e.setContext(this.getContext(i)).lineWidth}return 0}drawGrid(t){const e=this.options.grid,i=this.ctx,s=this._gridLineItems||(this._gridLineItems=this._computeGridLineItems(t));let n,o;const a=(t,e,s)=>{s.width&&s.color&&(i.save(),i.lineWidth=s.width,i.strokeStyle=s.color,i.setLineDash(s.borderDash||[]),i.lineDashOffset=s.borderDashOffset,i.beginPath(),i.moveTo(t.x,t.y),i.lineTo(e.x,e.y),i.stroke(),i.restore())};if(e.display)for(n=0,o=s.length;n{this.drawBackground(),this.drawGrid(t),this.drawTitle()}},{z:n,draw:()=>{this.drawBorder()}},{z:e,draw:t=>{this.drawLabels(t)}}]:[{z:e,draw:t=>{this.draw(t)}}]}getMatchingVisibleMetas(t){const e=this.chart.getSortedVisibleDatasetMetas(),i=this.axis+"AxisID",s=[];let n,o;for(n=0,o=e.length;n{const n=i.split("."),o=n.pop(),a=[t].concat(n).join("."),r=e[i].split("."),c=r.pop(),h=r.join(".");s.d.route(a,o,h,c)}))}(e,t.defaultRoutes);t.descriptors&&s.d.describe(e,t.descriptors)}(t,a,i),this.override&&s.d.override(t.id,t.overrides)),a}get(t){return this.items[t]}unregister(t){const e=this.items,i=t.id,n=this.scope;i in e&&delete e[i],n&&i in s.d[n]&&(delete s.d[n][i],this.override&&delete s.a3[i])}}class Dt{constructor(){this.controllers=new Ot(y,"datasets",!0),this.elements=new Ot(bt,"elements"),this.plugins=new Ot(Object,"plugins"),this.scales=new Ot(zt,"scales"),this._typedRegistries=[this.controllers,this.scales,this.elements]}add(...t){this._each("register",t)}remove(...t){this._each("unregister",t)}addControllers(...t){this._each("register",t,this.controllers)}addElements(...t){this._each("register",t,this.elements)}addPlugins(...t){this._each("register",t,this.plugins)}addScales(...t){this._each("register",t,this.scales)}getController(t){return this._get(t,this.controllers,"controller")}getElement(t){return this._get(t,this.elements,"element")}getPlugin(t){return this._get(t,this.plugins,"plugin")}getScale(t){return this._get(t,this.scales,"scale")}removeControllers(...t){this._each("unregister",t,this.controllers)}removeElements(...t){this._each("unregister",t,this.elements)}removePlugins(...t){this._each("unregister",t,this.plugins)}removeScales(...t){this._each("unregister",t,this.scales)}_each(t,e,i){[...e].forEach((e=>{const n=i||this._getRegistryForType(e);i||n.isForType(e)||n===this.plugins&&e.id?this._exec(t,n,e):(0,s.F)(e,(e=>{const s=i||this._getRegistryForType(e);this._exec(t,s,e)}))}))}_exec(t,e,i){const n=(0,s.a5)(t);(0,s.Q)(i["before"+n],[],i),e[t](i),(0,s.Q)(i["after"+n],[],i)}_getRegistryForType(t){for(let e=0;et.filter((t=>!e.some((e=>t.plugin.id===e.plugin.id))));this._notify(s(e,i),t,"stop"),this._notify(s(i,e),t,"start")}}function Ht(t,e){return e||!1!==t?!0===t?{}:t:null}function Tt(t,{plugin:e,local:i},s,n){const o=t.pluginScopeKeys(e),a=t.getOptionScopes(s,o);return i&&e.defaults&&a.push(e.defaults),t.createResolver(a,n,[""],{scriptable:!1,indexable:!1,allKeys:!0})}function It(t,e){const i=s.d.datasets[t]||{};return((e.datasets||{})[t]||{}).indexAxis||e.indexAxis||i.indexAxis||"x"}function Et(t){if("x"===t||"y"===t||"r"===t)return t}function Vt(t,...e){if(Et(t))return t;for(const s of e){const e=s.axis||("top"===(i=s.position)||"bottom"===i?"x":"left"===i||"right"===i?"y":void 0)||t.length>1&&Et(t[0].toLowerCase());if(e)return e}var i;throw new Error(`Cannot determine type of '${t}' axis. Please provide 'axis' or 'position' option.`)}function Ft(t,e,i){if(i[e+"AxisID"]===t)return{axis:e}}function Rt(t,e){const i=s.a3[t.type]||{scales:{}},n=e.scales||{},o=It(t.type,e),a=Object.create(null);return Object.keys(n).forEach((e=>{const r=n[e];if(!(0,s.i)(r))return console.error(`Invalid scale configuration for scale: ${e}`);if(r._proxy)return console.warn(`Ignoring resolver passed as options for scale: ${e}`);const c=Vt(e,r,function(t,e){if(e.data&&e.data.datasets){const i=e.data.datasets.filter((e=>e.xAxisID===t||e.yAxisID===t));if(i.length)return Ft(t,"x",i[0])||Ft(t,"y",i[0])}return{}}(e,t),s.d.scales[r.type]),h=function(t,e){return t===e?"_index_":"_value_"}(c,o),l=i.scales||{};a[e]=(0,s.ab)(Object.create(null),[{axis:c},r,l[c],l[h]])})),t.data.datasets.forEach((i=>{const o=i.type||t.type,r=i.indexAxis||It(o,e),c=(s.a3[o]||{}).scales||{};Object.keys(c).forEach((t=>{const e=function(t,e){let i=t;return"_index_"===t?i=e:"_value_"===t&&(i="x"===e?"y":"x"),i}(t,r),o=i[e+"AxisID"]||e;a[o]=a[o]||Object.create(null),(0,s.ab)(a[o],[{axis:e},n[o],c[t]])}))})),Object.keys(a).forEach((t=>{const e=a[t];(0,s.ab)(e,[s.d.scales[e.type],s.d.scale])})),a}function Nt(t){const e=t.options||(t.options={});e.plugins=(0,s.v)(e.plugins,{}),e.scales=Rt(t,e)}function jt(t){return(t=t||{}).datasets=t.datasets||[],t.labels=t.labels||[],t}const Bt=new Map,Wt=new Set;function $t(t,e){let i=Bt.get(t);return i||(i=e(),Bt.set(t,i),Wt.add(i)),i}const Yt=(t,e,i)=>{const n=(0,s.f)(e,i);void 0!==n&&t.add(n)};class Ut{constructor(t){this._config=function(t){return(t=t||{}).data=jt(t.data),Nt(t),t}(t),this._scopeCache=new Map,this._resolverCache=new Map}get platform(){return this._config.platform}get type(){return this._config.type}set type(t){this._config.type=t}get data(){return this._config.data}set data(t){this._config.data=jt(t)}get options(){return this._config.options}set options(t){this._config.options=t}get plugins(){return this._config.plugins}update(){const t=this._config;this.clearCache(),Nt(t)}clearCache(){this._scopeCache.clear(),this._resolverCache.clear()}datasetScopeKeys(t){return $t(t,(()=>[[`datasets.${t}`,""]]))}datasetAnimationScopeKeys(t,e){return $t(`${t}.transition.${e}`,(()=>[[`datasets.${t}.transitions.${e}`,`transitions.${e}`],[`datasets.${t}`,""]]))}datasetElementScopeKeys(t,e){return $t(`${t}-${e}`,(()=>[[`datasets.${t}.elements.${e}`,`datasets.${t}`,`elements.${e}`,""]]))}pluginScopeKeys(t){const e=t.id;return $t(`${this.type}-plugin-${e}`,(()=>[[`plugins.${e}`,...t.additionalOptionScopes||[]]]))}_cachedScopes(t,e){const i=this._scopeCache;let s=i.get(t);return s&&!e||(s=new Map,i.set(t,s)),s}getOptionScopes(t,e,i){const{options:n,type:o}=this,a=this._cachedScopes(t,i),r=a.get(e);if(r)return r;const c=new Set;e.forEach((e=>{t&&(c.add(t),e.forEach((e=>Yt(c,t,e)))),e.forEach((t=>Yt(c,n,t))),e.forEach((t=>Yt(c,s.a3[o]||{},t))),e.forEach((t=>Yt(c,s.d,t))),e.forEach((t=>Yt(c,s.a6,t)))}));const h=Array.from(c);return 0===h.length&&h.push(Object.create(null)),Wt.has(e)&&a.set(e,h),h}chartOptionScopes(){const{options:t,type:e}=this;return[t,s.a3[e]||{},s.d.datasets[e]||{},{type:e},s.d,s.a6]}resolveNamedOptions(t,e,i,n=[""]){const o={$shared:!0},{resolver:a,subPrefixes:r}=Xt(this._resolverCache,t,n);let c=a;if(function(t,e){const{isScriptable:i,isIndexable:n}=(0,s.aa)(t);for(const o of e){const e=i(o),a=n(o),r=(a||e)&&t[o];if(e&&((0,s.a7)(r)||Qt(r))||a&&(0,s.b)(r))return!0}return!1}(a,e)){o.$shared=!1,i=(0,s.a7)(i)?i():i;const e=this.createResolver(t,i,r);c=(0,s.a8)(a,i,e)}for(const s of e)o[s]=c[s];return o}createResolver(t,e,i=[""],n){const{resolver:o}=Xt(this._resolverCache,t,i);return(0,s.i)(e)?(0,s.a8)(o,e,void 0,n):o}}function Xt(t,e,i){let n=t.get(e);n||(n=new Map,t.set(e,n));const o=i.join();let a=n.get(o);if(!a){a={resolver:(0,s.a9)(e,i),subPrefixes:i.filter((t=>!t.toLowerCase().includes("hover")))},n.set(o,a)}return a}const Qt=t=>(0,s.i)(t)&&Object.getOwnPropertyNames(t).some((e=>(0,s.a7)(t[e])));const qt=["top","bottom","left","right","chartArea"];function Kt(t,e){return"top"===t||"bottom"===t||-1===qt.indexOf(t)&&"x"===e}function Gt(t,e){return function(i,s){return i[t]===s[t]?i[e]-s[e]:i[t]-s[t]}}function Zt(t){const e=t.chart,i=e.options.animation;e.notifyPlugins("afterRender"),(0,s.Q)(i&&i.onComplete,[t],e)}function Jt(t){const e=t.chart,i=e.options.animation;(0,s.Q)(i&&i.onProgress,[t],e)}function te(t){return(0,s.M)()&&"string"===typeof t?t=document.getElementById(t):t&&t.length&&(t=t[0]),t&&t.canvas&&(t=t.canvas),t}const ee={},ie=t=>{const e=te(t);return Object.values(ee).filter((t=>t.canvas===e)).pop()};function se(t,e,i){const s=Object.keys(t);for(const n of s){const s=+n;if(s>=e){const o=t[n];delete t[n],(i>0||s>e)&&(t[s+i]=o)}}}function ne(t,e,i){return t.options.clip?t[i]:e[i]}class oe{static defaults=s.d;static instances=ee;static overrides=s.a3;static registry=Lt;static version="4.4.2";static getChart=ie;static register(...t){Lt.add(...t),ae()}static unregister(...t){Lt.remove(...t),ae()}constructor(t,e){const i=this.config=new Ut(e),n=te(t),a=ie(n);if(a)throw new Error("Canvas is already in use. Chart with ID '"+a.id+"' must be destroyed before the canvas with ID '"+a.canvas.id+"' can be reused.");const r=i.createResolver(i.chartOptionScopes(),this.getContext());this.platform=new(i.platform||function(t){return!(0,s.M)()||"undefined"!==typeof OffscreenCanvas&&t instanceof OffscreenCanvas?it:xt}(n)),this.platform.updateConfig(i);const c=this.platform.acquireContext(n,r.aspectRatio),h=c&&c.canvas,l=h&&h.height,d=h&&h.width;this.id=(0,s.ac)(),this.ctx=c,this.canvas=h,this.width=d,this.height=l,this._options=r,this._aspectRatio=this.aspectRatio,this._layers=[],this._metasets=[],this._stacks=void 0,this.boxes=[],this.currentDevicePixelRatio=void 0,this.chartArea=void 0,this._active=[],this._lastEvent=void 0,this._listeners={},this._responsiveListeners=void 0,this._sortedMetasets=[],this.scales={},this._plugins=new At,this.$proxies={},this._hiddenIndices={},this.attached=!1,this._animationsDisabled=void 0,this.$context=void 0,this._doResize=(0,s.ad)((t=>this.update(t)),r.resizeDelay||0),this._dataChanges=[],ee[this.id]=this,c&&h?(o.listen(this,"complete",Zt),o.listen(this,"progress",Jt),this._initialize(),this.attached&&this.update()):console.error("Failed to create chart: can't acquire context from the given item")}get aspectRatio(){const{options:{aspectRatio:t,maintainAspectRatio:e},width:i,height:n,_aspectRatio:o}=this;return(0,s.k)(t)?e&&o?o:n?i/n:null:t}get data(){return this.config.data}set data(t){this.config.data=t}get options(){return this._options}set options(t){this.config.options=t}get registry(){return Lt}_initialize(){return this.notifyPlugins("beforeInit"),this.options.responsive?this.resize():(0,s.ae)(this,this.options.devicePixelRatio),this.bindEvents(),this.notifyPlugins("afterInit"),this}clear(){return(0,s.af)(this.canvas,this.ctx),this}stop(){return o.stop(this),this}resize(t,e){o.running(this)?this._resizeBeforeDraw={width:t,height:e}:this._resize(t,e)}_resize(t,e){const i=this.options,n=this.canvas,o=i.maintainAspectRatio&&this.aspectRatio,a=this.platform.getMaximumSize(n,t,e,o),r=i.devicePixelRatio||this.platform.getDevicePixelRatio(),c=this.width?"resize":"attach";this.width=a.width,this.height=a.height,this._aspectRatio=this.aspectRatio,(0,s.ae)(this,r,!0)&&(this.notifyPlugins("resize",{size:a}),(0,s.Q)(i.onResize,[this,a],this),this.attached&&this._doResize(c)&&this.render())}ensureScalesHaveIDs(){const t=this.options.scales||{};(0,s.F)(t,((t,e)=>{t.id=e}))}buildOrUpdateScales(){const t=this.options,e=t.scales,i=this.scales,n=Object.keys(i).reduce(((t,e)=>(t[e]=!1,t)),{});let o=[];e&&(o=o.concat(Object.keys(e).map((t=>{const i=e[t],s=Vt(t,i),n="r"===s,o="x"===s;return{options:i,dposition:n?"chartArea":o?"bottom":"left",dtype:n?"radialLinear":o?"category":"linear"}})))),(0,s.F)(o,(e=>{const o=e.options,a=o.id,r=Vt(a,o),c=(0,s.v)(o.type,e.dtype);void 0!==o.position&&Kt(o.position,r)===Kt(e.dposition)||(o.position=e.dposition),n[a]=!0;let h=null;if(a in i&&i[a].type===c)h=i[a];else{h=new(Lt.getScale(c))({id:a,type:c,ctx:this.ctx,chart:this}),i[h.id]=h}h.init(o,t)})),(0,s.F)(n,((t,e)=>{t||delete i[e]})),(0,s.F)(i,(t=>{tt.configure(this,t,t.options),tt.addBox(this,t)}))}_updateMetasets(){const t=this._metasets,e=this.data.datasets.length,i=t.length;if(t.sort(((t,e)=>t.index-e.index)),i>e){for(let t=e;te.length&&delete this._stacks,t.forEach(((t,i)=>{0===e.filter((e=>e===t._dataset)).length&&this._destroyDatasetMeta(i)}))}buildOrUpdateControllers(){const t=[],e=this.data.datasets;let i,n;for(this._removeUnreferencedMetasets(),i=0,n=e.length;i{this.getDatasetMeta(e).controller.reset()}),this)}reset(){this._resetElements(),this.notifyPlugins("reset")}update(t){const e=this.config;e.update();const i=this._options=e.createResolver(e.chartOptionScopes(),this.getContext()),n=this._animationsDisabled=!i.animation;if(this._updateScales(),this._checkEventBindings(),this._updateHiddenIndices(),this._plugins.invalidate(),!1===this.notifyPlugins("beforeUpdate",{mode:t,cancelable:!0}))return;const o=this.buildOrUpdateControllers();this.notifyPlugins("beforeElementsUpdate");let a=0;for(let s=0,h=this.data.datasets.length;s{t.reset()})),this._updateDatasets(t),this.notifyPlugins("afterUpdate",{mode:t}),this._layers.sort(Gt("z","_idx"));const{_active:r,_lastEvent:c}=this;c?this._eventHandler(c,!0):r.length&&this._updateHoverStyles(r,r,!0),this.render()}_updateScales(){(0,s.F)(this.scales,(t=>{tt.removeBox(this,t)})),this.ensureScalesHaveIDs(),this.buildOrUpdateScales()}_checkEventBindings(){const t=this.options,e=new Set(Object.keys(this._listeners)),i=new Set(t.events);(0,s.ag)(e,i)&&!!this._responsiveListeners===t.responsive||(this.unbindEvents(),this.bindEvents())}_updateHiddenIndices(){const{_hiddenIndices:t}=this,e=this._getUniformDataChanges()||[];for(const{method:i,start:s,count:n}of e){se(t,s,"_removeElements"===i?-n:n)}}_getUniformDataChanges(){const t=this._dataChanges;if(!t||!t.length)return;this._dataChanges=[];const e=this.data.datasets.length,i=e=>new Set(t.filter((t=>t[0]===e)).map(((t,e)=>e+","+t.splice(1).join(",")))),n=i(0);for(let o=1;ot.split(","))).map((t=>({method:t[1],start:+t[2],count:+t[3]})))}_updateLayout(t){if(!1===this.notifyPlugins("beforeLayout",{cancelable:!0}))return;tt.update(this,this.width,this.height,t);const e=this.chartArea,i=e.width<=0||e.height<=0;this._layers=[],(0,s.F)(this.boxes,(t=>{i&&"chartArea"===t.position||(t.configure&&t.configure(),this._layers.push(...t._layers()))}),this),this._layers.forEach(((t,e)=>{t._idx=e})),this.notifyPlugins("afterLayout")}_updateDatasets(t){if(!1!==this.notifyPlugins("beforeDatasetsUpdate",{mode:t,cancelable:!0})){for(let t=0,e=this.data.datasets.length;t=0;--e)this._drawDataset(t[e]);this.notifyPlugins("afterDatasetsDraw")}_drawDataset(t){const e=this.ctx,i=t._clip,n=!i.disabled,o=function(t,e){const{xScale:i,yScale:s}=t;return i&&s?{left:ne(i,e,"left"),right:ne(i,e,"right"),top:ne(s,e,"top"),bottom:ne(s,e,"bottom")}:e}(t,this.chartArea),a={meta:t,index:t.index,cancelable:!0};!1!==this.notifyPlugins("beforeDatasetDraw",a)&&(n&&(0,s.Y)(e,{left:!1===i.left?0:o.left-i.left,right:!1===i.right?this.width:o.right+i.right,top:!1===i.top?0:o.top-i.top,bottom:!1===i.bottom?this.height:o.bottom+i.bottom}),t.controller.draw(),n&&(0,s.$)(e),a.cancelable=!1,this.notifyPlugins("afterDatasetDraw",a))}isPointInArea(t){return(0,s.C)(t,this.chartArea,this._minPadding)}getElementsAtEventForMode(t,e,i,s){const n=j.modes[e];return"function"===typeof n?n(this,t,i,s):[]}getDatasetMeta(t){const e=this.data.datasets[t],i=this._metasets;let s=i.filter((t=>t&&t._dataset===e)).pop();return s||(s={type:null,data:[],dataset:null,controller:null,hidden:null,xAxisID:null,yAxisID:null,order:e&&e.order||0,index:t,_dataset:e,_parsed:[],_sorted:!1},i.push(s)),s}getContext(){return this.$context||(this.$context=(0,s.j)(null,{chart:this,type:"chart"}))}getVisibleDatasetCount(){return this.getSortedVisibleDatasetMetas().length}isDatasetVisible(t){const e=this.data.datasets[t];if(!e)return!1;const i=this.getDatasetMeta(t);return"boolean"===typeof i.hidden?!i.hidden:!e.hidden}setDatasetVisibility(t,e){this.getDatasetMeta(t).hidden=!e}toggleDataVisibility(t){this._hiddenIndices[t]=!this._hiddenIndices[t]}getDataVisibility(t){return!this._hiddenIndices[t]}_updateVisibility(t,e,i){const n=i?"show":"hide",o=this.getDatasetMeta(t),a=o.controller._resolveAnimations(void 0,n);(0,s.h)(e)?(o.data[e].hidden=!i,this.update()):(this.setDatasetVisibility(t,i),a.update(o,{visible:i}),this.update((e=>e.datasetIndex===t?n:void 0)))}hide(t,e){this._updateVisibility(t,e,!1)}show(t,e){this._updateVisibility(t,e,!0)}_destroyDatasetMeta(t){const e=this._metasets[t];e&&e.controller&&e.controller._destroy(),delete this._metasets[t]}_stop(){let t,e;for(this.stop(),o.remove(this),t=0,e=this.data.datasets.length;t{e.addEventListener(this,i,s),t[i]=s},n=(t,e,i)=>{t.offsetX=e,t.offsetY=i,this._eventHandler(t)};(0,s.F)(this.options.events,(t=>i(t,n)))}bindResponsiveEvents(){this._responsiveListeners||(this._responsiveListeners={});const t=this._responsiveListeners,e=this.platform,i=(i,s)=>{e.addEventListener(this,i,s),t[i]=s},s=(i,s)=>{t[i]&&(e.removeEventListener(this,i,s),delete t[i])},n=(t,e)=>{this.canvas&&this.resize(t,e)};let o;const a=()=>{s("attach",a),this.attached=!0,this.resize(),i("resize",n),i("detach",o)};o=()=>{this.attached=!1,s("resize",n),this._stop(),this._resize(0,0),i("attach",a)},e.isAttached(this.canvas)?a():o()}unbindEvents(){(0,s.F)(this._listeners,((t,e)=>{this.platform.removeEventListener(this,e,t)})),this._listeners={},(0,s.F)(this._responsiveListeners,((t,e)=>{this.platform.removeEventListener(this,e,t)})),this._responsiveListeners=void 0}updateHoverStyle(t,e,i){const s=i?"set":"remove";let n,o,a,r;for("dataset"===e&&(n=this.getDatasetMeta(t[0].datasetIndex),n.controller["_"+s+"DatasetHoverStyle"]()),a=0,r=t.length;a{const i=this.getDatasetMeta(t);if(!i)throw new Error("No dataset found at index "+t);return{datasetIndex:t,element:i.data[e],index:e}}));!(0,s.ah)(i,e)&&(this._active=i,this._lastEvent=null,this._updateHoverStyles(i,e))}notifyPlugins(t,e,i){return this._plugins.notify(this,t,e,i)}isPluginEnabled(t){return 1===this._plugins._cache.filter((e=>e.plugin.id===t)).length}_updateHoverStyles(t,e,i){const s=this.options.hover,n=(t,e)=>t.filter((t=>!e.some((e=>t.datasetIndex===e.datasetIndex&&t.index===e.index)))),o=n(e,t),a=i?t:n(t,e);o.length&&this.updateHoverStyle(o,s.mode,!1),a.length&&s.mode&&this.updateHoverStyle(a,s.mode,!0)}_eventHandler(t,e){const i={event:t,replay:e,cancelable:!0,inChartArea:this.isPointInArea(t)},s=e=>(e.options.events||this.options.events).includes(t.native.type);if(!1===this.notifyPlugins("beforeEvent",i,s))return;const n=this._handleEvent(t,e,i.inChartArea);return i.cancelable=!1,this.notifyPlugins("afterEvent",i,s),(n||i.changed)&&this.render(),this}_handleEvent(t,e,i){const{_active:n=[],options:o}=this,a=e,r=this._getActiveElements(t,n,i,a),c=(0,s.ai)(t),h=function(t,e,i,s){return i&&"mouseout"!==t.type?s?e:t:null}(t,this._lastEvent,i,c);i&&(this._lastEvent=null,(0,s.Q)(o.onHover,[t,r,this],this),c&&(0,s.Q)(o.onClick,[t,r,this],this));const l=!(0,s.ah)(r,n);return(l||e)&&(this._active=r,this._updateHoverStyles(r,n,e)),this._lastEvent=h,l}_getActiveElements(t,e,i,s){if("mouseout"===t.type)return[];if(!i)return e;const n=this.options.hover;return this.getElementsAtEventForMode(t,n.mode,n,s)}}function ae(){return(0,s.F)(oe.instances,(t=>t._plugins.invalidate()))}function re(t,e,i=e){t.lineCap=(0,s.v)(i.borderCapStyle,e.borderCapStyle),t.setLineDash((0,s.v)(i.borderDash,e.borderDash)),t.lineDashOffset=(0,s.v)(i.borderDashOffset,e.borderDashOffset),t.lineJoin=(0,s.v)(i.borderJoinStyle,e.borderJoinStyle),t.lineWidth=(0,s.v)(i.borderWidth,e.borderWidth),t.strokeStyle=(0,s.v)(i.borderColor,e.borderColor)}function ce(t,e,i){t.lineTo(i.x,i.y)}function he(t,e,i={}){const s=t.length,{start:n=0,end:o=s-1}=i,{start:a,end:r}=e,c=Math.max(n,a),h=Math.min(o,r),l=nr&&o>r;return{count:s,start:c,loop:e.loop,ilen:h(a+(h?r-t:t))%o,v=()=>{f!==g&&(t.lineTo(m,g),t.lineTo(m,f),t.lineTo(m,p))};for(c&&(d=n[b(0)],t.moveTo(d.x,d.y)),l=0;l<=r;++l){if(d=n[b(l)],d.skip)continue;const e=d.x,i=d.y,s=0|e;s===u?(ig&&(g=i),m=(x*m+e)/++x):(v(),t.lineTo(e,i),u=s,x=0,f=g=i),p=i}v()}function ue(t){const e=t.options,i=e.borderDash&&e.borderDash.length;return!t._decimated&&!t._loop&&!e.tension&&"monotone"!==e.cubicInterpolationMode&&!e.stepped&&!i?de:le}const fe="function"===typeof Path2D;function ge(t,e,i,s){fe&&!e.options.segment?function(t,e,i,s){let n=e._path;n||(n=e._path=new Path2D,e.path(n,i,s)&&n.closePath()),re(t,e.options),t.stroke(n)}(t,e,i,s):function(t,e,i,s){const{segments:n,options:o}=e,a=ue(e);for(const r of n)re(t,o,r.style),t.beginPath(),a(t,e,r,{start:i,end:i+s-1})&&t.closePath(),t.stroke()}(t,e,i,s)}class pe extends bt{static id="line";static defaults={borderCapStyle:"butt",borderDash:[],borderDashOffset:0,borderJoinStyle:"miter",borderWidth:3,capBezierPoints:!0,cubicInterpolationMode:"default",fill:!1,spanGaps:!1,stepped:!1,tension:0};static defaultRoutes={backgroundColor:"backgroundColor",borderColor:"borderColor"};static descriptors={_scriptable:!0,_indexable:t=>"borderDash"!==t&&"fill"!==t};constructor(t){super(),this.animated=!0,this.options=void 0,this._chart=void 0,this._loop=void 0,this._fullLoop=void 0,this._path=void 0,this._points=void 0,this._segments=void 0,this._decimated=!1,this._pointsUpdated=!1,this._datasetIndex=void 0,t&&Object.assign(this,t)}updateControlPoints(t,e){const i=this.options;if((i.tension||"monotone"===i.cubicInterpolationMode)&&!i.stepped&&!this._pointsUpdated){const n=i.spanGaps?this._loop:this._fullLoop;(0,s.al)(this._points,i,t,n,e),this._pointsUpdated=!0}}set points(t){this._points=t,delete this._segments,delete this._path,this._pointsUpdated=!1}get points(){return this._points}get segments(){return this._segments||(this._segments=(0,s.am)(this,this.options.segment))}first(){const t=this.segments,e=this.points;return t.length&&e[t[0].start]}last(){const t=this.segments,e=this.points,i=t.length;return i&&e[t[i-1].end]}interpolate(t,e){const i=this.options,n=t[e],o=this.points,a=(0,s.an)(this,{property:e,start:n,end:n});if(!a.length)return;const r=[],c=function(t){return t.stepped?s.ao:t.tension||"monotone"===t.cubicInterpolationMode?s.ap:s.aq}(i);let h,l;for(h=0,l=a.length;h{let{boxHeight:i=e,boxWidth:s=e}=t;return t.usePointStyle&&(i=Math.min(i,e),s=t.pointStyleWidth||Math.min(s,e)),{boxWidth:s,boxHeight:i,itemHeight:Math.max(e,i)}};class Ce extends bt{constructor(t){super(),this._added=!1,this.legendHitBoxes=[],this._hoveredItem=null,this.doughnutMode=!1,this.chart=t.chart,this.options=t.options,this.ctx=t.ctx,this.legendItems=void 0,this.columnSizes=void 0,this.lineWidths=void 0,this.maxHeight=void 0,this.maxWidth=void 0,this.top=void 0,this.bottom=void 0,this.left=void 0,this.right=void 0,this.height=void 0,this.width=void 0,this._margins=void 0,this.position=void 0,this.weight=void 0,this.fullSize=void 0}update(t,e,i){this.maxWidth=t,this.maxHeight=e,this._margins=i,this.setDimensions(),this.buildLabels(),this.fit()}setDimensions(){this.isHorizontal()?(this.width=this.maxWidth,this.left=this._margins.left,this.right=this.width):(this.height=this.maxHeight,this.top=this._margins.top,this.bottom=this.height)}buildLabels(){const t=this.options.labels||{};let e=(0,s.Q)(t.generateLabels,[this.chart],this)||[];t.filter&&(e=e.filter((e=>t.filter(e,this.chart.data)))),t.sort&&(e=e.sort(((e,i)=>t.sort(e,i,this.chart.data)))),this.options.reverse&&e.reverse(),this.legendItems=e}fit(){const{options:t,ctx:e}=this;if(!t.display)return void(this.width=this.height=0);const i=t.labels,n=(0,s.a0)(i.font),o=n.size,a=this._computeTitleHeight(),{boxWidth:r,itemHeight:c}=Se(i,o);let h,l;e.font=n.string,this.isHorizontal()?(h=this.maxWidth,l=this._fitRows(a,o,r,c)+10):(l=this.maxHeight,h=this._fitCols(a,n,r,c)+10),this.width=Math.min(h,t.maxWidth||this.maxWidth),this.height=Math.min(l,t.maxHeight||this.maxHeight)}_fitRows(t,e,i,s){const{ctx:n,maxWidth:o,options:{labels:{padding:a}}}=this,r=this.legendHitBoxes=[],c=this.lineWidths=[0],h=s+a;let l=t;n.textAlign="left",n.textBaseline="middle";let d=-1,u=-h;return this.legendItems.forEach(((t,f)=>{const g=i+e/2+n.measureText(t.text).width;(0===f||c[c.length-1]+g+2*a>o)&&(l+=h,c[c.length-(f>0?0:1)]=0,u+=h,d++),r[f]={left:0,top:u,row:d,width:g,height:s},c[c.length-1]+=g+a})),l}_fitCols(t,e,i,s){const{ctx:n,maxHeight:o,options:{labels:{padding:a}}}=this,r=this.legendHitBoxes=[],c=this.columnSizes=[],h=o-t;let l=a,d=0,u=0,f=0,g=0;return this.legendItems.forEach(((t,o)=>{const{itemWidth:p,itemHeight:m}=function(t,e,i,s,n){const o=function(t,e,i,s){let n=t.text;n&&"string"!==typeof n&&(n=n.reduce(((t,e)=>t.length>e.length?t:e)));return e+i.size/2+s.measureText(n).width}(s,t,e,i),a=function(t,e,i){let s=t;"string"!==typeof e.text&&(s=Pe(e,i));return s}(n,s,e.lineHeight);return{itemWidth:o,itemHeight:a}}(i,e,n,t,s);o>0&&u+m+2*a>h&&(l+=d+a,c.push({width:d,height:u}),f+=d+a,g++,d=u=0),r[o]={left:f,top:u,col:g,width:p,height:m},d=Math.max(d,p),u+=m+a})),l+=d,c.push({width:d,height:u}),l}adjustHitBoxes(){if(!this.options.display)return;const t=this._computeTitleHeight(),{legendHitBoxes:e,options:{align:i,labels:{padding:n},rtl:o}}=this,a=(0,s.az)(o,this.left,this.width);if(this.isHorizontal()){let o=0,r=(0,s.a2)(i,this.left+n,this.right-this.lineWidths[o]);for(const c of e)o!==c.row&&(o=c.row,r=(0,s.a2)(i,this.left+n,this.right-this.lineWidths[o])),c.top+=this.top+t+n,c.left=a.leftForLtr(a.x(r),c.width),r+=c.width+n}else{let o=0,r=(0,s.a2)(i,this.top+t+n,this.bottom-this.columnSizes[o].height);for(const c of e)c.col!==o&&(o=c.col,r=(0,s.a2)(i,this.top+t+n,this.bottom-this.columnSizes[o].height)),c.top=r,c.left+=this.left+n,c.left=a.leftForLtr(a.x(c.left),c.width),r+=c.height+n}}isHorizontal(){return"top"===this.options.position||"bottom"===this.options.position}draw(){if(this.options.display){const t=this.ctx;(0,s.Y)(t,this),this._draw(),(0,s.$)(t)}}_draw(){const{options:t,columnSizes:e,lineWidths:i,ctx:n}=this,{align:o,labels:a}=t,r=s.d.color,c=(0,s.az)(t.rtl,this.left,this.width),h=(0,s.a0)(a.font),{padding:l}=a,d=h.size,u=d/2;let f;this.drawTitle(),n.textAlign=c.textAlign("left"),n.textBaseline="middle",n.lineWidth=.5,n.font=h.string;const{boxWidth:g,boxHeight:p,itemHeight:m}=Se(a,d),x=this.isHorizontal(),b=this._computeTitleHeight();f=x?{x:(0,s.a2)(o,this.left+l,this.right-i[0]),y:this.top+l+b,line:0}:{x:this.left+l,y:(0,s.a2)(o,this.top+b+l,this.bottom-e[0].height),line:0},(0,s.aA)(this.ctx,t.textDirection);const v=m+l;this.legendItems.forEach(((_,y)=>{n.strokeStyle=_.fontColor,n.fillStyle=_.fontColor;const w=n.measureText(_.text).width,M=c.textAlign(_.textAlign||(_.textAlign=a.textAlign)),k=g+u+w;let S=f.x,C=f.y;c.setWidth(this.width),x?y>0&&S+k+l>this.right&&(C=f.y+=v,f.line++,S=f.x=(0,s.a2)(o,this.left+l,this.right-i[f.line])):y>0&&C+v>this.bottom&&(S=f.x=S+e[f.line].width+l,f.line++,C=f.y=(0,s.a2)(o,this.top+b+l,this.bottom-e[f.line].height));if(function(t,e,i){if(isNaN(g)||g<=0||isNaN(p)||p<0)return;n.save();const o=(0,s.v)(i.lineWidth,1);if(n.fillStyle=(0,s.v)(i.fillStyle,r),n.lineCap=(0,s.v)(i.lineCap,"butt"),n.lineDashOffset=(0,s.v)(i.lineDashOffset,0),n.lineJoin=(0,s.v)(i.lineJoin,"miter"),n.lineWidth=o,n.strokeStyle=(0,s.v)(i.strokeStyle,r),n.setLineDash((0,s.v)(i.lineDash,[])),a.usePointStyle){const r={radius:p*Math.SQRT2/2,pointStyle:i.pointStyle,rotation:i.rotation,borderWidth:o},h=c.xPlus(t,g/2),l=e+u;(0,s.aD)(n,r,h,l,a.pointStyleWidth&&g)}else{const a=e+Math.max((d-p)/2,0),r=c.leftForLtr(t,g),h=(0,s.aw)(i.borderRadius);n.beginPath(),Object.values(h).some((t=>0!==t))?(0,s.au)(n,{x:r,y:a,w:g,h:p,radius:h}):n.rect(r,a,g,p),n.fill(),0!==o&&n.stroke()}n.restore()}(c.x(S),C,_),S=(0,s.aB)(M,S+g+u,x?S+k:this.right,t.rtl),function(t,e,i){(0,s.Z)(n,i.text,t,e+m/2,h,{strikethrough:i.hidden,textAlign:c.textAlign(i.textAlign)})}(c.x(S),C,_),x)f.x+=k+l;else if("string"!==typeof _.text){const t=h.lineHeight;f.y+=Pe(_,t)+l}else f.y+=v})),(0,s.aC)(this.ctx,t.textDirection)}drawTitle(){const t=this.options,e=t.title,i=(0,s.a0)(e.font),n=(0,s.E)(e.padding);if(!e.display)return;const o=(0,s.az)(t.rtl,this.left,this.width),a=this.ctx,r=e.position,c=i.size/2,h=n.top+c;let l,d=this.left,u=this.width;if(this.isHorizontal())u=Math.max(...this.lineWidths),l=this.top+h,d=(0,s.a2)(t.align,d,this.right-u);else{const e=this.columnSizes.reduce(((t,e)=>Math.max(t,e.height)),0);l=h+(0,s.a2)(t.align,this.top,this.bottom-e-t.labels.padding-this._computeTitleHeight())}const f=(0,s.a2)(r,d,d+u);a.textAlign=o.textAlign((0,s.a1)(r)),a.textBaseline="middle",a.strokeStyle=e.color,a.fillStyle=e.color,a.font=i.string,(0,s.Z)(a,e.text,f,l,i)}_computeTitleHeight(){const t=this.options.title,e=(0,s.a0)(t.font),i=(0,s.E)(t.padding);return t.display?e.lineHeight+i.height:0}_getLegendItemAt(t,e){let i,n,o;if((0,s.aj)(t,this.left,this.right)&&(0,s.aj)(e,this.top,this.bottom))for(o=this.legendHitBoxes,i=0;it.chart.options.color,boxWidth:40,padding:10,generateLabels(t){const e=t.data.datasets,{labels:{usePointStyle:i,pointStyle:n,textAlign:o,color:a,useBorderRadius:r,borderRadius:c}}=t.legend.options;return t._getSortedDatasetMetas().map((t=>{const h=t.controller.getStyle(i?0:void 0),l=(0,s.E)(h.borderWidth);return{text:e[t.index].label,fillStyle:h.backgroundColor,fontColor:a,hidden:!t.visible,lineCap:h.borderCapStyle,lineDash:h.borderDash,lineDashOffset:h.borderDashOffset,lineJoin:h.borderJoinStyle,lineWidth:(l.width+l.height)/4,strokeStyle:h.borderColor,pointStyle:n||h.pointStyle,rotation:h.rotation,textAlign:o||h.textAlign,borderRadius:r&&(c||h.borderRadius),datasetIndex:t.index}}),this)}},title:{color:t=>t.chart.options.color,display:!1,position:"center",text:""}},descriptors:{_scriptable:t=>!t.startsWith("on"),labels:{_scriptable:t=>!["generateLabels","filter","sort"].includes(t)}}};new WeakMap;const Oe={average(t){if(!t.length)return!1;let e,i,s=new Set,n=0,o=0;for(e=0,i=t.length;et+e))/s.size,y:n/o}},nearest(t,e){if(!t.length)return!1;let i,n,o,a=e.x,r=e.y,c=Number.POSITIVE_INFINITY;for(i=0,n=t.length;i-1?t.split("\n"):t}function Ae(t,e){const{element:i,datasetIndex:s,index:n}=e,o=t.getDatasetMeta(s).controller,{label:a,value:r}=o.getLabelAndValue(n);return{chart:t,label:a,parsed:o.getParsed(n),raw:t.data.datasets[s].data[n],formattedValue:r,dataset:o.getDataset(),dataIndex:n,datasetIndex:s,element:i}}function He(t,e){const i=t.chart.ctx,{body:n,footer:o,title:a}=t,{boxWidth:r,boxHeight:c}=e,h=(0,s.a0)(e.bodyFont),l=(0,s.a0)(e.titleFont),d=(0,s.a0)(e.footerFont),u=a.length,f=o.length,g=n.length,p=(0,s.E)(e.padding);let m=p.height,x=0,b=n.reduce(((t,e)=>t+e.before.length+e.lines.length+e.after.length),0);if(b+=t.beforeBody.length+t.afterBody.length,u&&(m+=u*l.lineHeight+(u-1)*e.titleSpacing+e.titleMarginBottom),b){m+=g*(e.displayColors?Math.max(c,h.lineHeight):h.lineHeight)+(b-g)*h.lineHeight+(b-1)*e.bodySpacing}f&&(m+=e.footerMarginTop+f*d.lineHeight+(f-1)*e.footerSpacing);let v=0;const _=function(t){x=Math.max(x,i.measureText(t).width+v)};return i.save(),i.font=l.string,(0,s.F)(t.title,_),i.font=h.string,(0,s.F)(t.beforeBody.concat(t.afterBody),_),v=e.displayColors?r+2+e.boxPadding:0,(0,s.F)(n,(t=>{(0,s.F)(t.before,_),(0,s.F)(t.lines,_),(0,s.F)(t.after,_)})),v=0,i.font=d.string,(0,s.F)(t.footer,_),i.restore(),x+=p.width,{width:x,height:m}}function Te(t,e,i,s){const{x:n,width:o}=i,{width:a,chartArea:{left:r,right:c}}=t;let h="center";return"center"===s?h=n<=(r+c)/2?"left":"right":n<=o/2?h="left":n>=a-o/2&&(h="right"),function(t,e,i,s){const{x:n,width:o}=s,a=i.caretSize+i.caretPadding;return"left"===t&&n+o+a>e.width||"right"===t&&n-o-a<0||void 0}(h,t,e,i)&&(h="center"),h}function Ie(t,e,i){const s=i.yAlign||e.yAlign||function(t,e){const{y:i,height:s}=e;return it.height-s/2?"bottom":"center"}(t,i);return{xAlign:i.xAlign||e.xAlign||Te(t,e,i,s),yAlign:s}}function Ee(t,e,i,n){const{caretSize:o,caretPadding:a,cornerRadius:r}=t,{xAlign:c,yAlign:h}=i,l=o+a,{topLeft:d,topRight:u,bottomLeft:f,bottomRight:g}=(0,s.aw)(r);let p=function(t,e){let{x:i,width:s}=t;return"right"===e?i-=s:"center"===e&&(i-=s/2),i}(e,c);const m=function(t,e,i){let{y:s,height:n}=t;return"top"===e?s+=i:s-="bottom"===e?n+i:n/2,s}(e,h,l);return"center"===h?"left"===c?p+=l:"right"===c&&(p-=l):"left"===c?p-=Math.max(d,f)+o:"right"===c&&(p+=Math.max(u,g)+o),{x:(0,s.S)(p,0,n.width-e.width),y:(0,s.S)(m,0,n.height-e.height)}}function Ve(t,e,i){const n=(0,s.E)(i.padding);return"center"===e?t.x+t.width/2:"right"===e?t.x+t.width-n.right:t.x+n.left}function Fe(t){return De([],Le(t))}function Re(t,e){const i=e&&e.dataset&&e.dataset.tooltip&&e.dataset.tooltip.callbacks;return i?t.override(i):t}const Ne={beforeTitle:s.aF,title(t){if(t.length>0){const e=t[0],i=e.chart.data.labels,s=i?i.length:0;if(this&&this.options&&"dataset"===this.options.mode)return e.dataset.label||"";if(e.label)return e.label;if(s>0&&e.dataIndex{const e={before:[],lines:[],after:[]},s=Re(i,t);De(e.before,Le(je(s,"beforeLabel",this,t))),De(e.lines,je(s,"label",this,t)),De(e.after,Le(je(s,"afterLabel",this,t))),n.push(e)})),n}getAfterBody(t,e){return Fe(je(e.callbacks,"afterBody",this,t))}getFooter(t,e){const{callbacks:i}=e,s=je(i,"beforeFooter",this,t),n=je(i,"footer",this,t),o=je(i,"afterFooter",this,t);let a=[];return a=De(a,Le(s)),a=De(a,Le(n)),a=De(a,Le(o)),a}_createItems(t){const e=this._active,i=this.chart.data,n=[],o=[],a=[];let r,c,h=[];for(r=0,c=e.length;rt.filter(e,s,n,i)))),t.itemSort&&(h=h.sort(((e,s)=>t.itemSort(e,s,i)))),(0,s.F)(h,(e=>{const i=Re(t.callbacks,e);n.push(je(i,"labelColor",this,e)),o.push(je(i,"labelPointStyle",this,e)),a.push(je(i,"labelTextColor",this,e))})),this.labelColors=n,this.labelPointStyles=o,this.labelTextColors=a,this.dataPoints=h,h}update(t,e){const i=this.options.setContext(this.getContext()),s=this._active;let n,o=[];if(s.length){const t=Oe[i.position].call(this,s,this._eventPosition);o=this._createItems(i),this.title=this.getTitle(o,i),this.beforeBody=this.getBeforeBody(o,i),this.body=this.getBody(o,i),this.afterBody=this.getAfterBody(o,i),this.footer=this.getFooter(o,i);const e=this._size=He(this,i),a=Object.assign({},t,e),r=Ie(this.chart,i,a),c=Ee(i,a,r,this.chart);this.xAlign=r.xAlign,this.yAlign=r.yAlign,n={opacity:1,x:c.x,y:c.y,width:e.width,height:e.height,caretX:t.x,caretY:t.y}}else 0!==this.opacity&&(n={opacity:0});this._tooltipItems=o,this.$context=void 0,n&&this._resolveAnimations().update(this,n),t&&i.external&&i.external.call(this,{chart:this.chart,tooltip:this,replay:e})}drawCaret(t,e,i,s){const n=this.getCaretPosition(t,i,s);e.lineTo(n.x1,n.y1),e.lineTo(n.x2,n.y2),e.lineTo(n.x3,n.y3)}getCaretPosition(t,e,i){const{xAlign:n,yAlign:o}=this,{caretSize:a,cornerRadius:r}=i,{topLeft:c,topRight:h,bottomLeft:l,bottomRight:d}=(0,s.aw)(r),{x:u,y:f}=t,{width:g,height:p}=e;let m,x,b,v,_,y;return"center"===o?(_=f+p/2,"left"===n?(m=u,x=m-a,v=_+a,y=_-a):(m=u+g,x=m+a,v=_-a,y=_+a),b=m):(x="left"===n?u+Math.max(c,l)+a:"right"===n?u+g-Math.max(h,d)-a:this.caretX,"top"===o?(v=f,_=v-a,m=x-a,b=x+a):(v=f+p,_=v+a,m=x+a,b=x-a),y=v),{x1:m,x2:x,x3:b,y1:v,y2:_,y3:y}}drawTitle(t,e,i){const n=this.title,o=n.length;let a,r,c;if(o){const h=(0,s.az)(i.rtl,this.x,this.width);for(t.x=Ve(this,i.titleAlign,i),e.textAlign=h.textAlign(i.titleAlign),e.textBaseline="middle",a=(0,s.a0)(i.titleFont),r=i.titleSpacing,e.fillStyle=i.titleColor,e.font=a.string,c=0;c0!==t))?(t.beginPath(),t.fillStyle=o.multiKeyBackground,(0,s.au)(t,{x:e,y:g,w:h,h:c,radius:r}),t.fill(),t.stroke(),t.fillStyle=a.backgroundColor,t.beginPath(),(0,s.au)(t,{x:i,y:g+1,w:h-2,h:c-2,radius:r}),t.fill()):(t.fillStyle=o.multiKeyBackground,t.fillRect(e,g,h,c),t.strokeRect(e,g,h,c),t.fillStyle=a.backgroundColor,t.fillRect(i,g+1,h-2,c-2))}t.fillStyle=this.labelTextColors[i]}drawBody(t,e,i){const{body:n}=this,{bodySpacing:o,bodyAlign:a,displayColors:r,boxHeight:c,boxWidth:h,boxPadding:l}=i,d=(0,s.a0)(i.bodyFont);let u=d.lineHeight,f=0;const g=(0,s.az)(i.rtl,this.x,this.width),p=function(i){e.fillText(i,g.x(t.x+f),t.y+u/2),t.y+=u+o},m=g.textAlign(a);let x,b,v,_,y,w,M;for(e.textAlign=a,e.textBaseline="middle",e.font=d.string,t.x=Ve(this,m,i),e.fillStyle=i.bodyColor,(0,s.F)(this.beforeBody,p),f=r&&"right"!==m?"center"===a?h/2+l:h+2+l:0,_=0,w=n.length;_0&&e.stroke()}_updateAnimationTarget(t){const e=this.chart,i=this.$animations,s=i&&i.x,n=i&&i.y;if(s||n){const i=Oe[t.position].call(this,this._active,this._eventPosition);if(!i)return;const o=this._size=He(this,t),a=Object.assign({},i,this._size),r=Ie(e,t,a),c=Ee(t,a,r,e);s._to===c.x&&n._to===c.y||(this.xAlign=r.xAlign,this.yAlign=r.yAlign,this.width=o.width,this.height=o.height,this.caretX=i.x,this.caretY=i.y,this._resolveAnimations().update(this,c))}}_willRender(){return!!this.opacity}draw(t){const e=this.options.setContext(this.getContext());let i=this.opacity;if(!i)return;this._updateAnimationTarget(e);const n={width:this.width,height:this.height},o={x:this.x,y:this.y};i=Math.abs(i)<.001?0:i;const a=(0,s.E)(e.padding),r=this.title.length||this.beforeBody.length||this.body.length||this.afterBody.length||this.footer.length;e.enabled&&r&&(t.save(),t.globalAlpha=i,this.drawBackground(o,t,n,e),(0,s.aA)(t,e.textDirection),o.y+=a.top,this.drawTitle(o,t,e),this.drawBody(o,t,e),this.drawFooter(o,t,e),(0,s.aC)(t,e.textDirection),t.restore())}getActiveElements(){return this._active||[]}setActiveElements(t,e){const i=this._active,n=t.map((({datasetIndex:t,index:e})=>{const i=this.chart.getDatasetMeta(t);if(!i)throw new Error("Cannot find a dataset at index "+t);return{datasetIndex:t,element:i.data[e],index:e}})),o=!(0,s.ah)(i,n),a=this._positionChanged(n,e);(o||a)&&(this._active=n,this._eventPosition=e,this._ignoreReplayEvents=!0,this.update(!0))}handleEvent(t,e,i=!0){if(e&&this._ignoreReplayEvents)return!1;this._ignoreReplayEvents=!1;const n=this.options,o=this._active||[],a=this._getActiveElements(t,o,e,i),r=this._positionChanged(a,t),c=e||!(0,s.ah)(a,o)||r;return c&&(this._active=a,(n.enabled||n.external)&&(this._eventPosition={x:t.x,y:t.y},this.update(!0,e))),c}_getActiveElements(t,e,i,s){const n=this.options;if("mouseout"===t.type)return[];if(!s)return e.filter((t=>this.chart.data.datasets[t.datasetIndex]&&void 0!==this.chart.getDatasetMeta(t.datasetIndex).controller.getParsed(t.index)));const o=this.chart.getElementsAtEventForMode(t,n.mode,n,i);return n.reverse&&o.reverse(),o}_positionChanged(t,e){const{caretX:i,caretY:s,options:n}=this,o=Oe[n.position].call(this,t,e);return!1!==o&&(i!==o.x||s!==o.y)}}var We={id:"tooltip",_element:Be,positioners:Oe,afterInit(t,e,i){i&&(t.tooltip=new Be({chart:t,options:i}))},beforeUpdate(t,e,i){t.tooltip&&t.tooltip.initialize(i)},reset(t,e,i){t.tooltip&&t.tooltip.initialize(i)},afterDraw(t){const e=t.tooltip;if(e&&e._willRender()){const i={tooltip:e};if(!1===t.notifyPlugins("beforeTooltipDraw",{...i,cancelable:!0}))return;e.draw(t.ctx),t.notifyPlugins("afterTooltipDraw",i)}},afterEvent(t,e){if(t.tooltip){const i=e.replay;t.tooltip.handleEvent(e.event,i,e.inChartArea)&&(e.changed=!0)}},defaults:{enabled:!0,external:null,position:"average",backgroundColor:"rgba(0,0,0,0.8)",titleColor:"#fff",titleFont:{weight:"bold"},titleSpacing:2,titleMarginBottom:6,titleAlign:"left",bodyColor:"#fff",bodySpacing:2,bodyFont:{},bodyAlign:"left",footerColor:"#fff",footerSpacing:2,footerMarginTop:6,footerFont:{weight:"bold"},footerAlign:"left",padding:6,caretPadding:2,caretSize:5,cornerRadius:6,boxHeight:(t,e)=>e.bodyFont.size,boxWidth:(t,e)=>e.bodyFont.size,multiKeyBackground:"#fff",displayColors:!0,boxPadding:0,borderColor:"rgba(0,0,0,0)",borderWidth:0,animation:{duration:400,easing:"easeOutQuart"},animations:{numbers:{type:"number",properties:["x","y","width","height","caretX","caretY"]},opacity:{easing:"linear",duration:200}},callbacks:Ne},defaultRoutes:{bodyFont:"font",footerFont:"font",titleFont:"font"},descriptors:{_scriptable:t=>"filter"!==t&&"itemSort"!==t&&"external"!==t,_indexable:!1,callbacks:{_scriptable:!1,_indexable:!1},animation:{_fallback:!1},animations:{_fallback:"animation"}},additionalOptionScopes:["interaction"]};function $e(t,e,i,s){const n=t.indexOf(e);if(-1===n)return((t,e,i,s)=>("string"===typeof e?(i=t.push(e)-1,s.unshift({index:i,label:e})):isNaN(e)&&(i=null),i))(t,e,i,s);return n!==t.lastIndexOf(e)?i:n}function Ye(t){const e=this.getLabels();return t>=0&&tnull===t?null:(0,s.S)(Math.round(t),0,e))(e=isFinite(e)&&i[e]===t?e:$e(i,t,(0,s.v)(e,t),this._addedLabels),i.length-1)}determineDataLimits(){const{minDefined:t,maxDefined:e}=this.getUserBounds();let{min:i,max:s}=this.getMinMax(!0);"ticks"===this.options.bounds&&(t||(i=0),e||(s=this.getLabels().length-1)),this.min=i,this.max=s}buildTicks(){const t=this.min,e=this.max,i=this.options.offset,s=[];let n=this.getLabels();n=0===t&&e===n.length-1?n:n.slice(t,e+1),this._valueRange=Math.max(n.length-(i?0:1),1),this._startValue=this.min-(i?.5:0);for(let o=t;o<=e;o++)s.push({value:o});return s}getLabelForValue(t){return Ye.call(this,t)}configure(){super.configure(),this.isHorizontal()||(this._reversePixels=!this._reversePixels)}getPixelForValue(t){return"number"!==typeof t&&(t=this.parse(t)),null===t?NaN:this.getPixelForDecimal((t-this._startValue)/this._valueRange)}getPixelForTick(t){const e=this.ticks;return t<0||t>e.length-1?null:this.getPixelForValue(e[t].value)}getValueForPixel(t){return Math.round(this._startValue+this.getDecimalForPixel(t)*this._valueRange)}getBasePixel(){return this.bottom}}function Xe(t,e,{horizontal:i,minRotation:n}){const o=(0,s.t)(n),a=(i?Math.sin(o):Math.cos(o))||.001,r=.75*e*(""+t).length;return Math.min(e/a,r)}class Qe extends zt{constructor(t){super(t),this.start=void 0,this.end=void 0,this._startValue=void 0,this._endValue=void 0,this._valueRange=0}parse(t,e){return(0,s.k)(t)||("number"===typeof t||t instanceof Number)&&!isFinite(+t)?null:+t}handleTickRangeOptions(){const{beginAtZero:t}=this.options,{minDefined:e,maxDefined:i}=this.getUserBounds();let{min:n,max:o}=this;const a=t=>n=e?n:t,r=t=>o=i?o:t;if(t){const t=(0,s.s)(n),e=(0,s.s)(o);t<0&&e<0?r(0):t>0&&e>0&&a(0)}if(n===o){let e=0===o?1:Math.abs(.05*o);r(o+e),t||a(n-e)}this.min=n,this.max=o}getTickLimit(){const t=this.options.ticks;let e,{maxTicksLimit:i,stepSize:s}=t;return s?(e=Math.ceil(this.max/s)-Math.floor(this.min/s)+1,e>1e3&&(console.warn(`scales.${this.id}.ticks.stepSize: ${s} would result generating up to ${e} ticks. Limiting to 1000.`),e=1e3)):(e=this.computeTickLimit(),i=i||11),i&&(e=Math.min(i,e)),e}computeTickLimit(){return Number.POSITIVE_INFINITY}buildTicks(){const t=this.options,e=t.ticks;let i=this.getTickLimit();i=Math.max(2,i);const n=function(t,e){const i=[],{bounds:n,step:o,min:a,max:r,precision:c,count:h,maxTicks:l,maxDigits:d,includeBounds:u}=t,f=o||1,g=l-1,{min:p,max:m}=e,x=!(0,s.k)(a),b=!(0,s.k)(r),v=!(0,s.k)(h),_=(m-p)/(d+1);let y,w,M,k,S=(0,s.aH)((m-p)/g/f)*f;if(S<1e-14&&!x&&!b)return[{value:p},{value:m}];k=Math.ceil(m/S)-Math.floor(p/S),k>g&&(S=(0,s.aH)(k*S/g/f)*f),(0,s.k)(c)||(y=Math.pow(10,c),S=Math.ceil(S*y)/y),"ticks"===n?(w=Math.floor(p/S)*S,M=Math.ceil(m/S)*S):(w=p,M=m),x&&b&&o&&(0,s.aI)((r-a)/o,S/1e3)?(k=Math.round(Math.min((r-a)/S,l)),S=(r-a)/k,w=a,M=r):v?(w=x?a:w,M=b?r:M,k=h-1,S=(M-w)/k):(k=(M-w)/S,k=(0,s.aJ)(k,Math.round(k),S/1e3)?Math.round(k):Math.ceil(k));const C=Math.max((0,s.aK)(S),(0,s.aK)(w));y=Math.pow(10,(0,s.k)(c)?C:c),w=Math.round(w*y)/y,M=Math.round(M*y)/y;let P=0;for(x&&(u&&w!==a?(i.push({value:a}),wr)break;i.push({value:t})}return b&&u&&M!==r?i.length&&(0,s.aJ)(i[i.length-1].value,r,Xe(r,_,t))?i[i.length-1].value=r:i.push({value:r}):b&&M!==r||i.push({value:M}),i}({maxTicks:i,bounds:t.bounds,min:t.min,max:t.max,precision:e.precision,step:e.stepSize,count:e.count,maxDigits:this._maxDigits(),horizontal:this.isHorizontal(),minRotation:e.minRotation||0,includeBounds:!1!==e.includeBounds},this._range||this);return"ticks"===t.bounds&&(0,s.aG)(n,this,"value"),t.reverse?(n.reverse(),this.start=this.max,this.end=this.min):(this.start=this.min,this.end=this.max),n}configure(){const t=this.ticks;let e=this.min,i=this.max;if(super.configure(),this.options.offset&&t.length){const s=(i-e)/Math.max(t.length-1,1)/2;e-=s,i+=s}this._startValue=e,this._endValue=i,this._valueRange=i-e}getLabelForValue(t){return(0,s.o)(t,this.chart.options.locale,this.options.ticks.format)}}class qe extends Qe{static id="linear";static defaults={ticks:{callback:s.aL.formatters.numeric}};determineDataLimits(){const{min:t,max:e}=this.getMinMax(!0);this.min=(0,s.g)(t)?t:0,this.max=(0,s.g)(e)?e:1,this.handleTickRangeOptions()}computeTickLimit(){const t=this.isHorizontal(),e=t?this.width:this.height,i=(0,s.t)(this.options.ticks.minRotation),n=(t?Math.sin(i):Math.cos(i))||.001,o=this._resolveTickFontOptions(0);return Math.ceil(e/Math.min(40,o.lineHeight/n))}getPixelForValue(t){return null===t?NaN:this.getPixelForDecimal((t-this._startValue)/this._valueRange)}getValueForPixel(t){return this._startValue+this.getDecimalForPixel(t)*this._valueRange}}s.aL.formatters.logarithmic;s.aL.formatters.numeric;const Ke={millisecond:{common:!0,size:1,steps:1e3},second:{common:!0,size:1e3,steps:60},minute:{common:!0,size:6e4,steps:60},hour:{common:!0,size:36e5,steps:24},day:{common:!0,size:864e5,steps:30},week:{common:!1,size:6048e5,steps:4},month:{common:!0,size:2628e6,steps:12},quarter:{common:!1,size:7884e6,steps:4},year:{common:!0,size:3154e7}},Ge=Object.keys(Ke);function Ze(t,e){return t-e}function Je(t,e){if((0,s.k)(e))return null;const i=t._adapter,{parser:n,round:o,isoWeekday:a}=t._parseOpts;let r=e;return"function"===typeof n&&(r=n(r)),(0,s.g)(r)||(r="string"===typeof n?i.parse(r,n):i.parse(r)),null===r?null:(o&&(r="week"!==o||!(0,s.x)(a)&&!0!==a?i.startOf(r,o):i.startOf(r,"isoWeek",a)),+r)}function ti(t,e,i,s){const n=Ge.length;for(let o=Ge.indexOf(t);o=e?i[n]:i[o]]=!0}}else t[e]=!0}function ii(t,e,i){const s=[],n={},o=e.length;let a,r;for(a=0;a=0&&(e[c].major=!0);return e}(t,s,n,i):s}class si extends zt{static id="time";static defaults={bounds:"data",adapters:{},time:{parser:!1,unit:!1,round:!1,isoWeekday:!1,minUnit:"millisecond",displayFormats:{}},ticks:{source:"auto",callback:!1,major:{enabled:!1}}};constructor(t){super(t),this._cache={data:[],labels:[],all:[]},this._unit="day",this._majorUnit=void 0,this._offsets={},this._normalized=!1,this._parseOpts=void 0}init(t,e={}){const i=t.time||(t.time={}),n=this._adapter=new T._date(t.adapters.date);n.init(e),(0,s.ab)(i.displayFormats,n.formats()),this._parseOpts={parser:i.parser,round:i.round,isoWeekday:i.isoWeekday},super.init(t),this._normalized=e.normalized}parse(t,e){return void 0===t?null:Je(this,t)}beforeLayout(){super.beforeLayout(),this._cache={data:[],labels:[],all:[]}}determineDataLimits(){const t=this.options,e=this._adapter,i=t.time.unit||"day";let{min:n,max:o,minDefined:a,maxDefined:r}=this.getUserBounds();function c(t){a||isNaN(t.min)||(n=Math.min(n,t.min)),r||isNaN(t.max)||(o=Math.max(o,t.max))}a&&r||(c(this._getLabelBounds()),"ticks"===t.bounds&&"labels"===t.ticks.source||c(this.getMinMax(!1))),n=(0,s.g)(n)&&!isNaN(n)?n:+e.startOf(Date.now(),i),o=(0,s.g)(o)&&!isNaN(o)?o:+e.endOf(Date.now(),i)+1,this.min=Math.min(n,o-1),this.max=Math.max(n+1,o)}_getLabelBounds(){const t=this.getLabelTimestamps();let e=Number.POSITIVE_INFINITY,i=Number.NEGATIVE_INFINITY;return t.length&&(e=t[0],i=t[t.length-1]),{min:e,max:i}}buildTicks(){const t=this.options,e=t.time,i=t.ticks,n="labels"===i.source?this.getLabelTimestamps():this._generate();"ticks"===t.bounds&&n.length&&(this.min=this._userMin||n[0],this.max=this._userMax||n[n.length-1]);const o=this.min,a=this.max,r=(0,s.aO)(n,o,a);return this._unit=e.unit||(i.autoSkip?ti(e.minUnit,this.min,this.max,this._getLabelCapacity(o)):function(t,e,i,s,n){for(let o=Ge.length-1;o>=Ge.indexOf(i);o--){const i=Ge[o];if(Ke[i].common&&t._adapter.diff(n,s,i)>=e-1)return i}return Ge[i?Ge.indexOf(i):0]}(this,r.length,e.minUnit,this.min,this.max)),this._majorUnit=i.major.enabled&&"year"!==this._unit?function(t){for(let e=Ge.indexOf(t)+1,i=Ge.length;e+t.value)))}initOffsets(t=[]){let e,i,n=0,o=0;this.options.offset&&t.length&&(e=this.getDecimalForValue(t[0]),n=1===t.length?1-e:(this.getDecimalForValue(t[1])-e)/2,i=this.getDecimalForValue(t[t.length-1]),o=1===t.length?i:(i-this.getDecimalForValue(t[t.length-2]))/2);const a=t.length<3?.5:.25;n=(0,s.S)(n,0,a),o=(0,s.S)(o,0,a),this._offsets={start:n,end:o,factor:1/(n+1+o)}}_generate(){const t=this._adapter,e=this.min,i=this.max,n=this.options,o=n.time,a=o.unit||ti(o.minUnit,e,i,this._getLabelCapacity(e)),r=(0,s.v)(n.ticks.stepSize,1),c="week"===a&&o.isoWeekday,h=(0,s.x)(c)||!0===c,l={};let d,u,f=e;if(h&&(f=+t.startOf(f,"isoWeek",c)),f=+t.startOf(f,h?"day":a),t.diff(i,e,a)>1e5*r)throw new Error(e+" and "+i+" are too far apart with stepSize of "+r+" "+a);const g="data"===n.ticks.source&&this.getDataTimestamps();for(d=f,u=0;d+t))}getLabelForValue(t){const e=this._adapter,i=this.options.time;return i.tooltipFormat?e.format(t,i.tooltipFormat):e.format(t,i.displayFormats.datetime)}format(t,e){const i=this.options.time.displayFormats,s=this._unit,n=e||i[s];return this._adapter.format(t,n)}_tickFormatFunction(t,e,i,n){const o=this.options,a=o.ticks.callback;if(a)return(0,s.Q)(a,[t,e,i],this);const r=o.time.displayFormats,c=this._unit,h=this._majorUnit,l=c&&r[c],d=h&&r[h],u=i[e],f=h&&d&&u&&u.major;return this._adapter.format(t,n||(f?d:l))}generateTickLabels(t){let e,i,s;for(e=0,i=t.length;e0?a:1}getDataTimestamps(){let t,e,i=this._cache.data||[];if(i.length)return i;const s=this.getMatchingVisibleMetas();if(this._normalized&&s.length)return this._cache.data=s[0].controller.getAllParsedValues(this);for(t=0,e=s.length;t{"use strict";function s(t){return t+.5|0}i.d(e,{$:()=>ke,A:()=>Vt,B:()=>Et,C:()=>we,D:()=>Pt,E:()=>Fe,F:()=>X,G:()=>vi,H:()=>ft,I:()=>di,J:()=>wi,K:()=>yi,L:()=>$t,M:()=>li,N:()=>_t,O:()=>B,P:()=>ct,Q:()=>U,R:()=>je,S:()=>At,T:()=>ht,U:()=>St,V:()=>me,W:()=>Ht,X:()=>be,Y:()=>Me,Z:()=>Oe,_:()=>Bt,a:()=>Ne,a0:()=>Re,a1:()=>Ut,a2:()=>Xt,a3:()=>le,a4:()=>Z,a5:()=>st,a6:()=>de,a7:()=>ot,a8:()=>$e,a9:()=>We,aA:()=>Oi,aB:()=>Qt,aC:()=>Di,aD:()=>ye,aE:()=>zt,aF:()=>E,aG:()=>Mt,aH:()=>vt,aI:()=>wt,aJ:()=>bt,aK:()=>Ct,aL:()=>he,aM:()=>mt,aN:()=>xe,aO:()=>Ft,aP:()=>It,aa:()=>Ye,ab:()=>J,ac:()=>V,ad:()=>Yt,ae:()=>_i,af:()=>ve,ag:()=>at,ah:()=>Q,ai:()=>rt,aj:()=>Tt,ak:()=>Ie,al:()=>hi,am:()=>Ii,an:()=>Ti,ao:()=>ki,ap:()=>Si,aq:()=>Mi,ar:()=>Se,as:()=>Ce,at:()=>_e,au:()=>De,av:()=>Ee,aw:()=>Ve,ax:()=>Hi,ay:()=>Dt,az:()=>zi,b:()=>R,b3:()=>ut,b4:()=>gt,b5:()=>pt,c:()=>ie,d:()=>pe,e:()=>te,f:()=>it,g:()=>j,h:()=>nt,i:()=>N,j:()=>Be,k:()=>F,l:()=>Nt,m:()=>$,n:()=>Y,o:()=>re,p:()=>Lt,q:()=>qt,r:()=>Wt,s:()=>xt,t:()=>kt,u:()=>jt,v:()=>W,w:()=>Kt,x:()=>yt,y:()=>ii,z:()=>xi});const n=(t,e,i)=>Math.max(Math.min(t,i),e);function o(t){return n(s(2.55*t),0,255)}function a(t){return n(s(255*t),0,255)}function r(t){return n(s(t/2.55)/100,0,1)}function c(t){return n(s(100*t),0,100)}const h={0:0,1:1,2:2,3:3,4:4,5:5,6:6,7:7,8:8,9:9,A:10,B:11,C:12,D:13,E:14,F:15,a:10,b:11,c:12,d:13,e:14,f:15},l=[..."0123456789ABCDEF"],d=t=>l[15&t],u=t=>l[(240&t)>>4]+l[15&t],f=t=>(240&t)>>4===(15&t);function g(t){var e=(t=>f(t.r)&&f(t.g)&&f(t.b)&&f(t.a))(t)?d:u;return t?"#"+e(t.r)+e(t.g)+e(t.b)+((t,e)=>t<255?e(t):"")(t.a,e):void 0}const p=/^(hsla?|hwb|hsv)\(\s*([-+.e\d]+)(?:deg)?[\s,]+([-+.e\d]+)%[\s,]+([-+.e\d]+)%(?:[\s,]+([-+.e\d]+)(%)?)?\s*\)$/;function m(t,e,i){const s=e*Math.min(i,1-i),n=(e,n=(e+t/30)%12)=>i-s*Math.max(Math.min(n-3,9-n,1),-1);return[n(0),n(8),n(4)]}function x(t,e,i){const s=(s,n=(s+t/60)%6)=>i-i*e*Math.max(Math.min(n,4-n,1),0);return[s(5),s(3),s(1)]}function b(t,e,i){const s=m(t,1,.5);let n;for(e+i>1&&(n=1/(e+i),e*=n,i*=n),n=0;n<3;n++)s[n]*=1-e-i,s[n]+=e;return s}function v(t){const e=t.r/255,i=t.g/255,s=t.b/255,n=Math.max(e,i,s),o=Math.min(e,i,s),a=(n+o)/2;let r,c,h;return n!==o&&(h=n-o,c=a>.5?h/(2-n-o):h/(n+o),r=function(t,e,i,s,n){return t===n?(e-i)/s+(e>16&255,o>>8&255,255&o]}return t}(),C.transparent=[0,0,0,0]);const e=C[t.toLowerCase()];return e&&{r:e[0],g:e[1],b:e[2],a:4===e.length?e[3]:255}}const z=/^rgba?\(\s*([-+.\d]+)(%)?[\s,]+([-+.e\d]+)(%)?[\s,]+([-+.e\d]+)(%)?(?:[\s,/]+([-+.e\d]+)(%)?)?\s*\)$/;const O=t=>t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055,D=t=>t<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4);function L(t,e,i){if(t){let s=v(t);s[e]=Math.max(0,Math.min(s[e]+s[e]*i,0===e?360:1)),s=y(s),t.r=s[0],t.g=s[1],t.b=s[2]}}function A(t,e){return t?Object.assign(e||{},t):t}function H(t){var e={r:0,g:0,b:0,a:255};return Array.isArray(t)?t.length>=3&&(e={r:t[0],g:t[1],b:t[2],a:255},t.length>3&&(e.a=a(t[3]))):(e=A(t,{r:0,g:0,b:0,a:1})).a=a(e.a),e}function T(t){return"r"===t.charAt(0)?function(t){const e=z.exec(t);let i,s,a,r=255;if(e){if(e[7]!==i){const t=+e[7];r=e[8]?o(t):n(255*t,0,255)}return i=+e[1],s=+e[3],a=+e[5],i=255&(e[2]?o(i):n(i,0,255)),s=255&(e[4]?o(s):n(s,0,255)),a=255&(e[6]?o(a):n(a,0,255)),{r:i,g:s,b:a,a:r}}}(t):M(t)}class I{constructor(t){if(t instanceof I)return t;const e=typeof t;let i;"object"===e?i=H(t):"string"===e&&(i=function(t){var e,i=t.length;return"#"===t[0]&&(4===i||5===i?e={r:255&17*h[t[1]],g:255&17*h[t[2]],b:255&17*h[t[3]],a:5===i?17*h[t[4]]:255}:7!==i&&9!==i||(e={r:h[t[1]]<<4|h[t[2]],g:h[t[3]]<<4|h[t[4]],b:h[t[5]]<<4|h[t[6]],a:9===i?h[t[7]]<<4|h[t[8]]:255})),e}(t)||P(t)||T(t)),this._rgb=i,this._valid=!!i}get valid(){return this._valid}get rgb(){var t=A(this._rgb);return t&&(t.a=r(t.a)),t}set rgb(t){this._rgb=H(t)}rgbString(){return this._valid?(t=this._rgb)&&(t.a<255?`rgba(${t.r}, ${t.g}, ${t.b}, ${r(t.a)})`:`rgb(${t.r}, ${t.g}, ${t.b})`):void 0;var t}hexString(){return this._valid?g(this._rgb):void 0}hslString(){return this._valid?function(t){if(!t)return;const e=v(t),i=e[0],s=c(e[1]),n=c(e[2]);return t.a<255?`hsla(${i}, ${s}%, ${n}%, ${r(t.a)})`:`hsl(${i}, ${s}%, ${n}%)`}(this._rgb):void 0}mix(t,e){if(t){const i=this.rgb,s=t.rgb;let n;const o=e===n?.5:e,a=2*o-1,r=i.a-s.a,c=((a*r===-1?a:(a+r)/(1+a*r))+1)/2;n=1-c,i.r=255&c*i.r+n*s.r+.5,i.g=255&c*i.g+n*s.g+.5,i.b=255&c*i.b+n*s.b+.5,i.a=o*i.a+(1-o)*s.a,this.rgb=i}return this}interpolate(t,e){return t&&(this._rgb=function(t,e,i){const s=D(r(t.r)),n=D(r(t.g)),o=D(r(t.b));return{r:a(O(s+i*(D(r(e.r))-s))),g:a(O(n+i*(D(r(e.g))-n))),b:a(O(o+i*(D(r(e.b))-o))),a:t.a+i*(e.a-t.a)}}(this._rgb,t._rgb,e)),this}clone(){return new I(this.rgb)}alpha(t){return this._rgb.a=a(t),this}clearer(t){return this._rgb.a*=1-t,this}greyscale(){const t=this._rgb,e=s(.3*t.r+.59*t.g+.11*t.b);return t.r=t.g=t.b=e,this}opaquer(t){return this._rgb.a*=1+t,this}negate(){const t=this._rgb;return t.r=255-t.r,t.g=255-t.g,t.b=255-t.b,this}lighten(t){return L(this._rgb,2,t),this}darken(t){return L(this._rgb,2,-t),this}saturate(t){return L(this._rgb,1,t),this}desaturate(t){return L(this._rgb,1,-t),this}rotate(t){return function(t,e){var i=v(t);i[0]=w(i[0]+e),i=y(i),t.r=i[0],t.g=i[1],t.b=i[2]}(this._rgb,t),this}}function E(){}const V=(()=>{let t=0;return()=>t++})();function F(t){return null===t||"undefined"===typeof t}function R(t){if(Array.isArray&&Array.isArray(t))return!0;const e=Object.prototype.toString.call(t);return"[object"===e.slice(0,7)&&"Array]"===e.slice(-6)}function N(t){return null!==t&&"[object Object]"===Object.prototype.toString.call(t)}function j(t){return("number"===typeof t||t instanceof Number)&&isFinite(+t)}function B(t,e){return j(t)?t:e}function W(t,e){return"undefined"===typeof t?e:t}const $=(t,e)=>"string"===typeof t&&t.endsWith("%")?parseFloat(t)/100:+t/e,Y=(t,e)=>"string"===typeof t&&t.endsWith("%")?parseFloat(t)/100*e:+t;function U(t,e,i){if(t&&"function"===typeof t.call)return t.apply(i,e)}function X(t,e,i,s){let n,o,a;if(R(t))if(o=t.length,s)for(n=o-1;n>=0;n--)e.call(i,t[n],n);else for(n=0;nt,x:t=>t.x,y:t=>t.y};function it(t,e){const i=et[e]||(et[e]=function(t){const e=function(t){const e=t.split("."),i=[];let s="";for(const n of e)s+=n,s.endsWith("\\")?s=s.slice(0,-1)+".":(i.push(s),s="");return i}(t);return t=>{for(const i of e){if(""===i)break;t=t&&t[i]}return t}}(e));return i(t)}function st(t){return t.charAt(0).toUpperCase()+t.slice(1)}const nt=t=>"undefined"!==typeof t,ot=t=>"function"===typeof t,at=(t,e)=>{if(t.size!==e.size)return!1;for(const i of t)if(!e.has(i))return!1;return!0};function rt(t){return"mouseup"===t.type||"click"===t.type||"contextmenu"===t.type}const ct=Math.PI,ht=2*ct,lt=ht+ct,dt=Number.POSITIVE_INFINITY,ut=ct/180,ft=ct/2,gt=ct/4,pt=2*ct/3,mt=Math.log10,xt=Math.sign;function bt(t,e,i){return Math.abs(t-e)t-e)).pop(),e}function yt(t){return!isNaN(parseFloat(t))&&isFinite(t)}function wt(t,e){const i=Math.round(t);return i-e<=t&&i+e>=t}function Mt(t,e,i){let s,n,o;for(s=0,n=t.length;sc&&h=Math.min(e,i)-s&&t<=Math.max(e,i)+s}function It(t,e,i){i=i||(i=>t[i]1;)s=o+n>>1,i(s)?o=s:n=s;return{lo:o,hi:n}}const Et=(t,e,i,s)=>It(t,i,s?s=>{const n=t[s][e];return nt[s][e]It(t,i,(s=>t[s][e]>=i));function Ft(t,e,i){let s=0,n=t.length;for(;ss&&t[n-1]>i;)n--;return s>0||n{const i="_onData"+st(e),s=t[e];Object.defineProperty(t,e,{configurable:!0,enumerable:!1,value(...e){const n=s.apply(this,e);return t._chartjs.listeners.forEach((t=>{"function"===typeof t[i]&&t[i](...e)})),n}})})))}function jt(t,e){const i=t._chartjs;if(!i)return;const s=i.listeners,n=s.indexOf(e);-1!==n&&s.splice(n,1),s.length>0||(Rt.forEach((e=>{delete t[e]})),delete t._chartjs)}function Bt(t){const e=new Set(t);return e.size===t.length?t:Array.from(e)}const Wt="undefined"===typeof window?function(t){return t()}:window.requestAnimationFrame;function $t(t,e){let i=[],s=!1;return function(...n){i=n,s||(s=!0,Wt.call(window,(()=>{s=!1,t.apply(e,i)})))}}function Yt(t,e){let i;return function(...s){return e?(clearTimeout(i),i=setTimeout(t,e,s)):t.apply(this,s),e}}const Ut=t=>"start"===t?"left":"end"===t?"right":"center",Xt=(t,e,i)=>"start"===t?e:"end"===t?i:(e+i)/2,Qt=(t,e,i,s)=>t===(s?"left":"right")?i:"center"===t?(e+i)/2:e;function qt(t,e,i){const s=e.length;let n=0,o=s;if(t._sorted){const{iScale:a,_parsed:r}=t,c=a.axis,{min:h,max:l,minDefined:d,maxDefined:u}=a.getUserBounds();d&&(n=At(Math.min(Et(r,c,h).lo,i?s:Et(e,c,a.getPixelForValue(h)).lo),0,s-1)),o=u?At(Math.max(Et(r,a.axis,l,!0).hi+1,i?0:Et(e,c,a.getPixelForValue(l),!0).hi+1),n,s)-n:s-n}return{start:n,count:o}}function Kt(t){const{xScale:e,yScale:i,_scaleRanges:s}=t,n={xmin:e.min,xmax:e.max,ymin:i.min,ymax:i.max};if(!s)return t._scaleRanges=n,!0;const o=s.xmin!==e.min||s.xmax!==e.max||s.ymin!==i.min||s.ymax!==i.max;return Object.assign(s,n),o}const Gt=t=>0===t||1===t,Zt=(t,e,i)=>-Math.pow(2,10*(t-=1))*Math.sin((t-e)*ht/i),Jt=(t,e,i)=>Math.pow(2,-10*t)*Math.sin((t-e)*ht/i)+1,te={linear:t=>t,easeInQuad:t=>t*t,easeOutQuad:t=>-t*(t-2),easeInOutQuad:t=>(t/=.5)<1?.5*t*t:-.5*(--t*(t-2)-1),easeInCubic:t=>t*t*t,easeOutCubic:t=>(t-=1)*t*t+1,easeInOutCubic:t=>(t/=.5)<1?.5*t*t*t:.5*((t-=2)*t*t+2),easeInQuart:t=>t*t*t*t,easeOutQuart:t=>-((t-=1)*t*t*t-1),easeInOutQuart:t=>(t/=.5)<1?.5*t*t*t*t:-.5*((t-=2)*t*t*t-2),easeInQuint:t=>t*t*t*t*t,easeOutQuint:t=>(t-=1)*t*t*t*t+1,easeInOutQuint:t=>(t/=.5)<1?.5*t*t*t*t*t:.5*((t-=2)*t*t*t*t+2),easeInSine:t=>1-Math.cos(t*ft),easeOutSine:t=>Math.sin(t*ft),easeInOutSine:t=>-.5*(Math.cos(ct*t)-1),easeInExpo:t=>0===t?0:Math.pow(2,10*(t-1)),easeOutExpo:t=>1===t?1:1-Math.pow(2,-10*t),easeInOutExpo:t=>Gt(t)?t:t<.5?.5*Math.pow(2,10*(2*t-1)):.5*(2-Math.pow(2,-10*(2*t-1))),easeInCirc:t=>t>=1?t:-(Math.sqrt(1-t*t)-1),easeOutCirc:t=>Math.sqrt(1-(t-=1)*t),easeInOutCirc:t=>(t/=.5)<1?-.5*(Math.sqrt(1-t*t)-1):.5*(Math.sqrt(1-(t-=2)*t)+1),easeInElastic:t=>Gt(t)?t:Zt(t,.075,.3),easeOutElastic:t=>Gt(t)?t:Jt(t,.075,.3),easeInOutElastic(t){const e=.1125;return Gt(t)?t:t<.5?.5*Zt(2*t,e,.45):.5+.5*Jt(2*t-1,e,.45)},easeInBack(t){const e=1.70158;return t*t*((e+1)*t-e)},easeOutBack(t){const e=1.70158;return(t-=1)*t*((e+1)*t+e)+1},easeInOutBack(t){let e=1.70158;return(t/=.5)<1?t*t*((1+(e*=1.525))*t-e)*.5:.5*((t-=2)*t*((1+(e*=1.525))*t+e)+2)},easeInBounce:t=>1-te.easeOutBounce(1-t),easeOutBounce(t){const e=7.5625,i=2.75;return t<1/i?e*t*t:t<2/i?e*(t-=1.5/i)*t+.75:t<2.5/i?e*(t-=2.25/i)*t+.9375:e*(t-=2.625/i)*t+.984375},easeInOutBounce:t=>t<.5?.5*te.easeInBounce(2*t):.5*te.easeOutBounce(2*t-1)+.5};function ee(t){if(t&&"object"===typeof t){const e=t.toString();return"[object CanvasPattern]"===e||"[object CanvasGradient]"===e}return!1}function ie(t){return ee(t)?t:new I(t)}function se(t){return ee(t)?t:new I(t).saturate(.5).darken(.1).hexString()}const ne=["x","y","borderWidth","radius","tension"],oe=["color","borderColor","backgroundColor"];const ae=new Map;function re(t,e,i){return function(t,e){e=e||{};const i=t+JSON.stringify(e);let s=ae.get(i);return s||(s=new Intl.NumberFormat(t,e),ae.set(i,s)),s}(e,i).format(t)}const ce={values:t=>R(t)?t:""+t,numeric(t,e,i){if(0===t)return"0";const s=this.chart.options.locale;let n,o=t;if(i.length>1){const e=Math.max(Math.abs(i[0].value),Math.abs(i[i.length-1].value));(e<1e-4||e>1e15)&&(n="scientific"),o=function(t,e){let i=e.length>3?e[2].value-e[1].value:e[1].value-e[0].value;Math.abs(i)>=1&&t!==Math.floor(t)&&(i=t-Math.floor(t));return i}(t,i)}const a=mt(Math.abs(o)),r=isNaN(a)?1:Math.max(Math.min(-1*Math.floor(a),20),0),c={notation:n,minimumFractionDigits:r,maximumFractionDigits:r};return Object.assign(c,this.options.ticks.format),re(t,s,c)},logarithmic(t,e,i){if(0===t)return"0";const s=i[e].significand||t/Math.pow(10,Math.floor(mt(t)));return[1,2,3,5,10,15].includes(s)||e>.8*i.length?ce.numeric.call(this,t,e,i):""}};var he={formatters:ce};const le=Object.create(null),de=Object.create(null);function ue(t,e){if(!e)return t;const i=e.split(".");for(let s=0,n=i.length;st.chart.platform.getDevicePixelRatio(),this.elements={},this.events=["mousemove","mouseout","click","touchstart","touchmove"],this.font={family:"'Helvetica Neue', 'Helvetica', 'Arial', sans-serif",size:12,style:"normal",lineHeight:1.2,weight:null},this.hover={},this.hoverBackgroundColor=(t,e)=>se(e.backgroundColor),this.hoverBorderColor=(t,e)=>se(e.borderColor),this.hoverColor=(t,e)=>se(e.color),this.indexAxis="x",this.interaction={mode:"nearest",intersect:!0,includeInvisible:!1},this.maintainAspectRatio=!0,this.onHover=null,this.onClick=null,this.parsing=!0,this.plugins={},this.responsive=!0,this.scale=void 0,this.scales={},this.showLine=!0,this.drawActiveElementsOnTop=!0,this.describe(t),this.apply(e)}set(t,e){return fe(this,t,e)}get(t){return ue(this,t)}describe(t,e){return fe(de,t,e)}override(t,e){return fe(le,t,e)}route(t,e,i,s){const n=ue(this,t),o=ue(this,i),a="_"+e;Object.defineProperties(n,{[a]:{value:n[e],writable:!0},[e]:{enumerable:!0,get(){const t=this[a],e=o[s];return N(t)?Object.assign({},e,t):W(t,e)},set(t){this[a]=t}}})}apply(t){t.forEach((t=>t(this)))}}var pe=new ge({_scriptable:t=>!t.startsWith("on"),_indexable:t=>"events"!==t,hover:{_fallback:"interaction"},interaction:{_scriptable:!1,_indexable:!1}},[function(t){t.set("animation",{delay:void 0,duration:1e3,easing:"easeOutQuart",fn:void 0,from:void 0,loop:void 0,to:void 0,type:void 0}),t.describe("animation",{_fallback:!1,_indexable:!1,_scriptable:t=>"onProgress"!==t&&"onComplete"!==t&&"fn"!==t}),t.set("animations",{colors:{type:"color",properties:oe},numbers:{type:"number",properties:ne}}),t.describe("animations",{_fallback:"animation"}),t.set("transitions",{active:{animation:{duration:400}},resize:{animation:{duration:0}},show:{animations:{colors:{from:"transparent"},visible:{type:"boolean",duration:0}}},hide:{animations:{colors:{to:"transparent"},visible:{type:"boolean",easing:"linear",fn:t=>0|t}}}})},function(t){t.set("layout",{autoPadding:!0,padding:{top:0,right:0,bottom:0,left:0}})},function(t){t.set("scale",{display:!0,offset:!1,reverse:!1,beginAtZero:!1,bounds:"ticks",clip:!0,grace:0,grid:{display:!0,lineWidth:1,drawOnChartArea:!0,drawTicks:!0,tickLength:8,tickWidth:(t,e)=>e.lineWidth,tickColor:(t,e)=>e.color,offset:!1},border:{display:!0,dash:[],dashOffset:0,width:1},title:{display:!1,text:"",padding:{top:4,bottom:4}},ticks:{minRotation:0,maxRotation:50,mirror:!1,textStrokeWidth:0,textStrokeColor:"",padding:3,display:!0,autoSkip:!0,autoSkipPadding:3,labelOffset:0,callback:he.formatters.values,minor:{},major:{},align:"center",crossAlign:"near",showLabelBackdrop:!1,backdropColor:"rgba(255, 255, 255, 0.75)",backdropPadding:2}}),t.route("scale.ticks","color","","color"),t.route("scale.grid","color","","borderColor"),t.route("scale.border","color","","borderColor"),t.route("scale.title","color","","color"),t.describe("scale",{_fallback:!1,_scriptable:t=>!t.startsWith("before")&&!t.startsWith("after")&&"callback"!==t&&"parser"!==t,_indexable:t=>"borderDash"!==t&&"tickBorderDash"!==t&&"dash"!==t}),t.describe("scales",{_fallback:"scale"}),t.describe("scale.ticks",{_scriptable:t=>"backdropPadding"!==t&&"callback"!==t,_indexable:t=>"backdropPadding"!==t})}]);function me(t,e,i,s,n){let o=e[n];return o||(o=e[n]=t.measureText(n).width,i.push(n)),o>s&&(s=o),s}function xe(t,e,i,s){let n=(s=s||{}).data=s.data||{},o=s.garbageCollect=s.garbageCollect||[];s.font!==e&&(n=s.data={},o=s.garbageCollect=[],s.font=e),t.save(),t.font=e;let a=0;const r=i.length;let c,h,l,d,u;for(c=0;ci.length){for(c=0;c0&&t.stroke()}}function we(t,e,i){return i=i||.5,!e||t&&t.x>e.left-i&&t.xe.top-i&&t.y0&&""!==o.strokeColor;let c,h;for(t.save(),t.font=n.string,function(t,e){e.translation&&t.translate(e.translation[0],e.translation[1]),F(e.rotation)||t.rotate(e.rotation),e.color&&(t.fillStyle=e.color),e.textAlign&&(t.textAlign=e.textAlign),e.textBaseline&&(t.textBaseline=e.textBaseline)}(t,o),c=0;c+t||0;function Ie(t,e){const i={},s=N(e),n=s?Object.keys(e):e,o=N(t)?s?i=>W(t[i],t[e[i]]):e=>t[e]:()=>t;for(const a of n)i[a]=Te(o(a));return i}function Ee(t){return Ie(t,{top:"y",right:"x",bottom:"y",left:"x"})}function Ve(t){return Ie(t,["topLeft","topRight","bottomLeft","bottomRight"])}function Fe(t){const e=Ee(t);return e.width=e.left+e.right,e.height=e.top+e.bottom,e}function Re(t,e){t=t||{},e=e||pe.font;let i=W(t.size,e.size);"string"===typeof i&&(i=parseInt(i,10));let s=W(t.style,e.style);s&&!(""+s).match(Ae)&&(console.warn('Invalid font style specified: "'+s+'"'),s=void 0);const n={family:W(t.family,e.family),lineHeight:He(W(t.lineHeight,e.lineHeight),i),size:i,style:s,weight:W(t.weight,e.weight),string:""};return n.string=function(t){return!t||F(t.size)||F(t.family)?null:(t.style?t.style+" ":"")+(t.weight?t.weight+" ":"")+t.size+"px "+t.family}(n),n}function Ne(t,e,i,s){let n,o,a,r=!0;for(n=0,o=t.length;ni&&0===t?0:t+e;return{min:a(s,-Math.abs(o)),max:a(n,o)}}function Be(t,e){return Object.assign(Object.create(t),e)}function We(t,e=[""],i,s,n=(()=>t[0])){const o=i||t;"undefined"===typeof s&&(s=ti("_fallback",t));const a={[Symbol.toStringTag]:"Object",_cacheable:!0,_scopes:t,_rootScopes:o,_fallback:s,_getTarget:n,override:i=>We([i,...t],e,o,s)};return new Proxy(a,{deleteProperty:(e,i)=>(delete e[i],delete e._keys,delete t[0][i],!0),get:(i,s)=>Qe(i,s,(()=>function(t,e,i,s){let n;for(const o of e)if(n=ti(Ue(o,t),i),"undefined"!==typeof n)return Xe(t,n)?Ze(i,s,t,n):n}(s,e,t,i))),getOwnPropertyDescriptor:(t,e)=>Reflect.getOwnPropertyDescriptor(t._scopes[0],e),getPrototypeOf:()=>Reflect.getPrototypeOf(t[0]),has:(t,e)=>ei(t).includes(e),ownKeys:t=>ei(t),set(t,e,i){const s=t._storage||(t._storage=n());return t[e]=s[e]=i,delete t._keys,!0}})}function $e(t,e,i,s){const n={_cacheable:!1,_proxy:t,_context:e,_subProxy:i,_stack:new Set,_descriptors:Ye(t,s),setContext:e=>$e(t,e,i,s),override:n=>$e(t.override(n),e,i,s)};return new Proxy(n,{deleteProperty:(e,i)=>(delete e[i],delete t[i],!0),get:(t,e,i)=>Qe(t,e,(()=>function(t,e,i){const{_proxy:s,_context:n,_subProxy:o,_descriptors:a}=t;let r=s[e];ot(r)&&a.isScriptable(e)&&(r=function(t,e,i,s){const{_proxy:n,_context:o,_subProxy:a,_stack:r}=i;if(r.has(t))throw new Error("Recursion detected: "+Array.from(r).join("->")+"->"+t);r.add(t);let c=e(o,a||s);r.delete(t),Xe(t,c)&&(c=Ze(n._scopes,n,t,c));return c}(e,r,t,i));R(r)&&r.length&&(r=function(t,e,i,s){const{_proxy:n,_context:o,_subProxy:a,_descriptors:r}=i;if("undefined"!==typeof o.index&&s(t))return e[o.index%e.length];if(N(e[0])){const i=e,s=n._scopes.filter((t=>t!==i));e=[];for(const c of i){const i=Ze(s,n,t,c);e.push($e(i,o,a&&a[t],r))}}return e}(e,r,t,a.isIndexable));Xe(e,r)&&(r=$e(r,n,o&&o[e],a));return r}(t,e,i))),getOwnPropertyDescriptor:(e,i)=>e._descriptors.allKeys?Reflect.has(t,i)?{enumerable:!0,configurable:!0}:void 0:Reflect.getOwnPropertyDescriptor(t,i),getPrototypeOf:()=>Reflect.getPrototypeOf(t),has:(e,i)=>Reflect.has(t,i),ownKeys:()=>Reflect.ownKeys(t),set:(e,i,s)=>(t[i]=s,delete e[i],!0)})}function Ye(t,e={scriptable:!0,indexable:!0}){const{_scriptable:i=e.scriptable,_indexable:s=e.indexable,_allKeys:n=e.allKeys}=t;return{allKeys:n,scriptable:i,indexable:s,isScriptable:ot(i)?i:()=>i,isIndexable:ot(s)?s:()=>s}}const Ue=(t,e)=>t?t+st(e):e,Xe=(t,e)=>N(e)&&"adapters"!==t&&(null===Object.getPrototypeOf(e)||e.constructor===Object);function Qe(t,e,i){if(Object.prototype.hasOwnProperty.call(t,e))return t[e];const s=i();return t[e]=s,s}function qe(t,e,i){return ot(t)?t(e,i):t}const Ke=(t,e)=>!0===t?e:"string"===typeof t?it(e,t):void 0;function Ge(t,e,i,s,n){for(const o of e){const e=Ke(i,o);if(e){t.add(e);const o=qe(e._fallback,i,n);if("undefined"!==typeof o&&o!==i&&o!==s)return o}else if(!1===e&&"undefined"!==typeof s&&i!==s)return null}return!1}function Ze(t,e,i,s){const n=e._rootScopes,o=qe(e._fallback,i,s),a=[...t,...n],r=new Set;r.add(s);let c=Je(r,a,i,o||i,s);return null!==c&&(("undefined"===typeof o||o===i||(c=Je(r,a,o,c,s),null!==c))&&We(Array.from(r),[""],n,o,(()=>function(t,e,i){const s=t._getTarget();e in s||(s[e]={});const n=s[e];if(R(n)&&N(i))return i;return n||{}}(e,i,s))))}function Je(t,e,i,s,n){for(;i;)i=Ge(t,e,i,s,n);return i}function ti(t,e){for(const i of e){if(!i)continue;const e=i[t];if("undefined"!==typeof e)return e}}function ei(t){let e=t._keys;return e||(e=t._keys=function(t){const e=new Set;for(const i of t)for(const t of Object.keys(i).filter((t=>!t.startsWith("_"))))e.add(t);return Array.from(e)}(t._scopes)),e}function ii(t,e,i,s){const{iScale:n}=t,{key:o="r"}=this._parsing,a=new Array(s);let r,c,h,l;for(r=0,c=s;re"x"===t?"y":"x";function ai(t,e,i,s){const n=t.skip?e:t,o=e,a=i.skip?e:i,r=zt(o,n),c=zt(a,o);let h=r/(r+c),l=c/(r+c);h=isNaN(h)?0:h,l=isNaN(l)?0:l;const d=s*h,u=s*l;return{previous:{x:o.x-d*(a.x-n.x),y:o.y-d*(a.y-n.y)},next:{x:o.x+u*(a.x-n.x),y:o.y+u*(a.y-n.y)}}}function ri(t,e="x"){const i=oi(e),s=t.length,n=Array(s).fill(0),o=Array(s);let a,r,c,h=ni(t,0);for(a=0;a!t.skip))),"monotone"===e.cubicInterpolationMode)ri(t,n);else{let i=s?t[t.length-1]:t[0];for(o=0,a=t.length;ot.ownerDocument.defaultView.getComputedStyle(t,null);const gi=["top","right","bottom","left"];function pi(t,e,i){const s={};i=i?"-"+i:"";for(let n=0;n<4;n++){const o=gi[n];s[o]=parseFloat(t[e+"-"+o+i])||0}return s.width=s.left+s.right,s.height=s.top+s.bottom,s}const mi=(t,e,i)=>(t>0||e>0)&&(!i||!i.shadowRoot);function xi(t,e){if("native"in t)return t;const{canvas:i,currentDevicePixelRatio:s}=e,n=fi(i),o="border-box"===n.boxSizing,a=pi(n,"padding"),r=pi(n,"border","width"),{x:c,y:h,box:l}=function(t,e){const i=t.touches,s=i&&i.length?i[0]:t,{offsetX:n,offsetY:o}=s;let a,r,c=!1;if(mi(n,o,t.target))a=n,r=o;else{const t=e.getBoundingClientRect();a=s.clientX-t.left,r=s.clientY-t.top,c=!0}return{x:a,y:r,box:c}}(t,i),d=a.left+(l&&r.left),u=a.top+(l&&r.top);let{width:f,height:g}=e;return o&&(f-=a.width+r.width,g-=a.height+r.height),{x:Math.round((c-d)/f*i.width/s),y:Math.round((h-u)/g*i.height/s)}}const bi=t=>Math.round(10*t)/10;function vi(t,e,i,s){const n=fi(t),o=pi(n,"margin"),a=ui(n.maxWidth,t,"clientWidth")||dt,r=ui(n.maxHeight,t,"clientHeight")||dt,c=function(t,e,i){let s,n;if(void 0===e||void 0===i){const o=di(t);if(o){const t=o.getBoundingClientRect(),a=fi(o),r=pi(a,"border","width"),c=pi(a,"padding");e=t.width-c.width-r.width,i=t.height-c.height-r.height,s=ui(a.maxWidth,o,"clientWidth"),n=ui(a.maxHeight,o,"clientHeight")}else e=t.clientWidth,i=t.clientHeight}return{width:e,height:i,maxWidth:s||dt,maxHeight:n||dt}}(t,e,i);let{width:h,height:l}=c;if("content-box"===n.boxSizing){const t=pi(n,"border","width"),e=pi(n,"padding");h-=e.width+t.width,l-=e.height+t.height}h=Math.max(0,h-o.width),l=Math.max(0,s?h/s:l-o.height),h=bi(Math.min(h,a,c.maxWidth)),l=bi(Math.min(l,r,c.maxHeight)),h&&!l&&(l=bi(h/2));return(void 0!==e||void 0!==i)&&s&&c.height&&l>c.height&&(l=c.height,h=bi(Math.floor(l*s))),{width:h,height:l}}function _i(t,e,i){const s=e||1,n=Math.floor(t.height*s),o=Math.floor(t.width*s);t.height=Math.floor(t.height),t.width=Math.floor(t.width);const a=t.canvas;return a.style&&(i||!a.style.height&&!a.style.width)&&(a.style.height=`${t.height}px`,a.style.width=`${t.width}px`),(t.currentDevicePixelRatio!==s||a.height!==n||a.width!==o)&&(t.currentDevicePixelRatio=s,a.height=n,a.width=o,t.ctx.setTransform(s,0,0,s,0,0),!0)}const yi=function(){let t=!1;try{const e={get passive(){return t=!0,!1}};li()&&(window.addEventListener("test",null,e),window.removeEventListener("test",null,e))}catch(e){}return t}();function wi(t,e){const i=function(t,e){return fi(t).getPropertyValue(e)}(t,e),s=i&&i.match(/^(\d+)(\.\d+)?px$/);return s?+s[1]:void 0}function Mi(t,e,i,s){return{x:t.x+i*(e.x-t.x),y:t.y+i*(e.y-t.y)}}function ki(t,e,i,s){return{x:t.x+i*(e.x-t.x),y:"middle"===s?i<.5?t.y:e.y:"after"===s?i<1?t.y:e.y:i>0?e.y:t.y}}function Si(t,e,i,s){const n={x:t.cp2x,y:t.cp2y},o={x:e.cp1x,y:e.cp1y},a=Mi(t,n,i),r=Mi(n,o,i),c=Mi(o,e,i),h=Mi(a,r,i),l=Mi(r,c,i);return Mi(h,l,i)}const Ci=function(t,e){return{x:i=>t+t+e-i,setWidth(t){e=t},textAlign:t=>"center"===t?t:"right"===t?"left":"right",xPlus:(t,e)=>t-e,leftForLtr:(t,e)=>t-e}},Pi=function(){return{x:t=>t,setWidth(t){},textAlign:t=>t,xPlus:(t,e)=>t+e,leftForLtr:(t,e)=>t}};function zi(t,e,i){return t?Ci(e,i):Pi()}function Oi(t,e){let i,s;"ltr"!==e&&"rtl"!==e||(i=t.canvas.style,s=[i.getPropertyValue("direction"),i.getPropertyPriority("direction")],i.setProperty("direction",e,"important"),t.prevTextDirection=s)}function Di(t,e){void 0!==e&&(delete t.prevTextDirection,t.canvas.style.setProperty("direction",e[0],e[1]))}function Li(t){return"angle"===t?{between:Lt,compare:Ot,normalize:Dt}:{between:Tt,compare:(t,e)=>t-e,normalize:t=>t}}function Ai({start:t,end:e,count:i,loop:s,style:n}){return{start:t%i,end:e%i,loop:s&&(e-t+1)%i===0,style:n}}function Hi(t,e,i){if(!i)return[t];const{property:s,start:n,end:o}=i,a=e.length,{compare:r,between:c,normalize:h}=Li(s),{start:l,end:d,loop:u,style:f}=function(t,e,i){const{property:s,start:n,end:o}=i,{between:a,normalize:r}=Li(s),c=e.length;let h,l,{start:d,end:u,loop:f}=t;if(f){for(d+=c,u+=c,h=0,l=c;hb||c(n,x,p)&&0!==r(n,x),y=()=>!b||0===r(o,p)||c(o,x,p);for(let w=l,M=l;w<=d;++w)m=e[w%a],m.skip||(p=h(m[s]),p!==x&&(b=c(p,n,o),null===v&&_()&&(v=0===r(p,n)?w:M),null!==v&&y()&&(g.push(Ai({start:v,end:w,loop:u,count:a,style:f})),v=null),M=w,x=p));return null!==v&&g.push(Ai({start:v,end:d,loop:u,count:a,style:f})),g}function Ti(t,e){const i=[],s=t.segments;for(let n=0;nn&&t[o%e].skip;)o--;return o%=e,{start:n,end:o}}(i,n,o,s);if(!0===s)return Ei(t,[{start:a,end:r,loop:o}],i,e);return Ei(t,function(t,e,i,s){const n=t.length,o=[];let a,r=e,c=t[e];for(a=e+1;a<=i;++a){const i=t[a%n];i.skip||i.stop?c.skip||(s=!1,o.push({start:e%n,end:(a-1)%n,loop:s}),e=r=i.stop?a:null):(r=a,c.skip&&(e=a)),c=i}return null!==r&&o.push({start:e%n,end:r%n,loop:s}),o}(i,a,r{"use strict";i.d(e,{t1:()=>d,yP:()=>f});var s=i(96540),n=i(66118);const o="label";function a(t,e){"function"===typeof t?t(e):t&&(t.current=e)}function r(t,e){t.labels=e}function c(t,e){let i=arguments.length>2&&void 0!==arguments[2]?arguments[2]:o;const s=[];t.datasets=e.map((e=>{const n=t.datasets.find((t=>t[i]===e[i]));return n&&e.data&&!s.includes(n)?(s.push(n),Object.assign(n,e),n):{...e}}))}function h(t){let e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:o;const i={labels:[],datasets:[]};return r(i,t.labels),c(i,t.datasets,e),i}function l(t,e){const{height:i=150,width:o=300,redraw:l=!1,datasetIdKey:d,type:u,data:f,options:g,plugins:p=[],fallbackContent:m,updateMode:x,...b}=t,v=(0,s.useRef)(null),_=(0,s.useRef)(),y=()=>{v.current&&(_.current=new n.t1(v.current,{type:u,data:h(f,d),options:g&&{...g},plugins:p}),a(e,_.current))},w=()=>{a(e,null),_.current&&(_.current.destroy(),_.current=null)};return(0,s.useEffect)((()=>{!l&&_.current&&g&&function(t,e){const i=t.options;i&&e&&Object.assign(i,e)}(_.current,g)}),[l,g]),(0,s.useEffect)((()=>{!l&&_.current&&r(_.current.config.data,f.labels)}),[l,f.labels]),(0,s.useEffect)((()=>{!l&&_.current&&f.datasets&&c(_.current.config.data,f.datasets,d)}),[l,f.datasets]),(0,s.useEffect)((()=>{_.current&&(l?(w(),setTimeout(y)):_.current.update(x))}),[l,g,f.labels,f.datasets,x]),(0,s.useEffect)((()=>{_.current&&(w(),setTimeout(y))}),[u]),(0,s.useEffect)((()=>(y(),()=>w())),[]),s.createElement("canvas",Object.assign({ref:v,role:"img",height:i,width:o},b),m)}const d=(0,s.forwardRef)(l);function u(t,e){return n.t1.register(e),(0,s.forwardRef)(((e,i)=>s.createElement(d,Object.assign({},e,{ref:i,type:t}))))}const f=u("bar",n.A6)}}]); \ No newline at end of file diff --git a/src/web/gui/v2/3455.f9ca876de57244386773.chunk.js.LICENSE.txt b/src/web/gui/v2/3455.f9ca876de57244386773.chunk.js.LICENSE.txt deleted file mode 100644 index ebc73ef35..000000000 --- a/src/web/gui/v2/3455.f9ca876de57244386773.chunk.js.LICENSE.txt +++ /dev/null @@ -1,13 +0,0 @@ -/*! - * @kurkle/color v0.3.2 - * https://github.com/kurkle/color#readme - * (c) 2023 Jukka Kurkela - * Released under the MIT License - */ - -/*! - * Chart.js v4.4.2 - * https://www.chartjs.org - * (c) 2024 Chart.js Contributors - * Released under the MIT License - */ diff --git a/src/web/gui/v2/3621.01ee70ee9c311ac163d9.chunk.js b/src/web/gui/v2/3621.01ee70ee9c311ac163d9.chunk.js deleted file mode 100644 index 4779ee524..000000000 --- a/src/web/gui/v2/3621.01ee70ee9c311ac163d9.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="faad78fd-e5f3-4fb9-b6d3-062efb04f31c",e._sentryDebugIdIdentifier="sentry-dbid-faad78fd-e5f3-4fb9-b6d3-062efb04f31c")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[3621],{63621:(e,t,n)=>{n.r(t),n.d(t,{default:()=>U});n(30067),n(93518),n(25440),n(3064),n(41393),n(98992),n(72577),n(81454),n(62953);var r=n(96540),o=n(47767),a=n(86663),s=n(26655),i=n(78969),c=n(22292),d=n(63129),u=n(71835),l=n(76201),f=n(3914),g=n(69418);const w=()=>{const e=(0,c.uW)("id"),t=(0,c.uW)("name"),n=(0,c.uW)("email"),o=(0,c.uW)("createdAt"),a=(0,c.uW)("verifiedEmail"),s=(0,c.uW)("avatarUrl"),i=(0,f.UV)("loaded"),d=(0,f.UV)("ids"),[,u,l]=(0,g.A)();(0,r.useEffect)((()=>{e&&i&&!u&&(e=>{let{avatar:t,createdAt:n,email:r,id:o,name:a,spacesCount:s,verifiedEmail:i,maxNodesCount:c}=e;if(!window.envSettings.tracking)return;if(r&&r.includes("@netdata.msdc.co"))return;if(o&&"00000000-0000-0000-0000-000000000000"===o)return;if(r&&r.includes("anonymous@netdata.cloud"))return;const d=new Date,u=d.toISOString(),l=new Date(n),f=Math.floor((d-l)/864e5);var g;window.dataLayer&&(window.dataLayer.push({event:"UserInfoAvailable",user_id:o,userIdentifier:o,userName:a,userEmail:r,userAccountCreatedAt:n,userAccountCreatedDaysAgo:f,userAvatarURL:t,userEmailVerified:i,spacesCount:s}),null!==(g=window.posthog)&&void 0!==g&&g.__loaded&&(window.posthog.identify(o),window.posthog.people.set({email:r||"unknown email",name:a,netdata_cloud_account_created_at:n,netdata_cloud_account_created_days_ago:f,spacesCount:s,maxNodesCount:c}),window.posthog.register({netdata_cloud_account_created_days_ago:f,...window.localNetdataRegistry?{...window.localNetdataRegistry.pg?{netdata_registry_person_guid:window.localNetdataRegistry.pg}:{},...window.localNetdataRegistry.mg?{netdata_registry_machine_guid:window.localNetdataRegistry.mg}:{}}:{}}),window.posthog.register_once({event_source:"cloud",netdata_cloud_account_created_at:n,netdata_cloud_account_email:r||"unknown email",netdata_cloud_account_id:o,netdata_cloud_signed_in_at:u})))})({avatar:s,createdAt:o,email:n,id:e,name:t,spacesCount:d.length,verifiedEmail:a,maxNodesCount:l})}),[e,i,u]),(0,r.useEffect)((()=>{var e;if(null!==(e=window.posthog)&&void 0!==e&&e.__loaded)try{window.posthog.onFeatureFlags((function(){window.posthog.isFeatureEnabled&&window.posthog.isFeatureEnabled("user-age-less-than-7d")&&window.posthog.startSessionRecording()}))}catch(t){}}),[])},p=()=>{var e;if(null===(e=window.posthog)||void 0===e||!e.__loaded)return;const t=(new Date).toISOString();window.posthog.register_once({event_source:"cloud",posthog_first_seen_at:t,posthog_first_distinct_id:"get_distinct_id"in window.posthog&&window.posthog.get_distinct_id()})},m=()=>(0,r.useEffect)(p,[]);var h=n(87337),y=n(9224),v=n(91069),_=n(48388),E=n(37618),b=n(33829),A=n(67276),N=n(38819),C=n(80158),S=n(79731),k=n(50876);const R=()=>{const e=(0,r.useRef)(),[,t]=(0,u.A)(),{error_msg_key:n,error_msg:o}=(0,N.PP)(),{sendLog:a}=(0,k.A)();(0,r.useEffect)((()=>{if(n&&e.current!=n){e.current=n;const r=o?(0,C.Zr)(decodeURIComponent(o)):void 0,s=(0,S.o)(n,r);t({header:"Error",text:s}),a({feature:"url-hash-error",isFailure:!0,errorMsgKey:n,message:s})}}),[e.current,n,o,t,a])},I=e=>{let{errorRetry:t,token:n,redirectUri:o}=e;const s=a.parseUrl(decodeURIComponent(t)),{url:i,query:c}=s,{redirect_uri:d}=c,u=(0,r.useRef)(!1);return u.current||(u.current=!0,window.location="".concat(i,"?token=").concat(n,"&redirect_uri=").concat(d||o)),null},U=()=>{const e=(0,c.uW)("isLoaded"),t=(0,c.uW)("isAnonymous");(0,l.A)(),m(),w(),(0,r.useEffect)((()=>{if(!E.Ay){const e=window.localStorage.getItem(A.lO.visitor);if(e)window.envSettings.visitor=e;else{const e=(0,b.A)();window.localStorage.setItem(A.lO.visitor,e),window.envSettings.visitor=e}}}),[]),(0,r.useEffect)((()=>{sessionStorage.getItem(A.yq.session)||sessionStorage.setItem(A.yq.session,(0,b.A)())}),[]),R();const n=(0,o.RQ)("/sign-in/mobile-app/*"),f=(0,o.RQ)("/sign-in/*"),g=(0,o.RQ)("/sign-up/*"),p=!!f||!!g,N=(()=>{const[,e]=(0,u.A)(),t=(0,h.KF)();return(0,r.useCallback)((function(){let{origin:n,id:r,name:o}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};if(!n)return;const a=decodeURIComponent(r),c=decodeURIComponent(n);(0,y.ys)(r).then((n=>{let{data:r=[]}=n;return t(a,c,o),r.length?s.A.get(i._9).then((e=>{let{data:t}=e;return Promise.any(t.map((e=>s.A.get((0,v.t)({spaceId:e.id})).then((t=>{let{data:n}=t;return Promise.any(n.map((t=>(0,d.uQ)({roomId:t.id,spaceId:e.id}).then((n=>{let{nodes:o}=n;const a=o.find((e=>{let{id:t}=e;return r.includes(t)}));if(!a)throw new Error("can't find matching node");const{protocol:s,host:i}=window.location;return"".concat(s,"//").concat(i,"/spaces/").concat(e.slug,"/rooms/").concat(t.slug,"/nodes/").concat(a.id)})))))})))))})).catch((()=>{})):(0,y.iY)(a).then((t=>{let{data:n}=t;const{claimed:r}=n;throw e(r?{errorMsgKey:"ErrForbidden",errorMessage:"You tried to access this Node on Netdata and you don't have access to it. Please contact your Space admin to give you access to it."}:{errorMsgKey:"ErrForbidden",errorMessage:"This Node isn't connected to Netdata. Please connect it, if you have permission for it, or contact your Space admin."}),"no access"}))})).then((e=>{e&&setTimeout(location.assign(e))})).catch((()=>{}))}),[])})(),C=(0,_.A)();if(!e||C.isFetching||C.hasAccess&&!p||n)return null;const{pathname:S,search:k,hash:U}=window.location,{error_retry:D,token:x}=a.parse(U),{cloudRoute:T,redirect_uri:P,...F}=a.parse(k);if(!t&&D)return r.createElement(I,{errorRetry:D,token:x,redirectUri:P});if(!t&&p){if(T){const e=T.includes("join-callback")?decodeURI(T):T,t=Array.isArray(e)?e[0]:e;return r.createElement(o.C5,{replace:!0,to:t})}return P?(N(F),window.location.replace(decodeURIComponent(P)),null):r.createElement(o.C5,{replace:!0,to:"/spaces"})}if(E.Ay)return null;if(t&&!p){const e=U.includes("join-callback")?U:encodeURIComponent(U),t="".concat(k).concat(k?"&":"?","cloudRoute=").concat(S);return r.createElement(o.C5,{replace:!0,to:{pathname:"/sign-in",search:t,hash:e}})}return null}},87337:(e,t,n)=>{n.d(t,{xN:()=>p,vS:()=>f,YN:()=>m,KF:()=>w,iw:()=>l,rE:()=>g});n(17333),n(3064),n(41393),n(14905),n(98992),n(54520),n(72577),n(81454),n(8872),n(25509),n(65223),n(60321),n(41927),n(11632),n(64377),n(66771),n(12516),n(68931),n(52514),n(35694),n(52774),n(49536),n(21926),n(94483),n(16215),n(62953);var r=n(96540),o=n(47444),a=n(47767),s=n(22292);const i=(0,o.Iz)({key:"visitedNodes",default:()=>[]});var c=n(47762),d=n(9224);const u=(0,o.K0)({key:"visitedNodeIdsValue",get:e=>t=>{let{get:n}=t;return n(i(e)).map((e=>{let{id:t}=e;return t}))}}),l=()=>{const e=(0,s.NJ)(),t=(0,o.vc)(i(e)),n=(0,o.Zs)((e=>{let{set:t}=e;return e=>{t(c.gl,{values:e.reduce(((e,t)=>({...e,[t.id]:{...t,loaded:!0}})),{}),merge:!0})}}),[]);return(0,r.useEffect)((()=>{n(t)}),[t]),(0,o.vc)(u(e))},f=e=>{const t=(0,s.NJ)(),n=(0,o.vc)(i(t)),a=(0,r.useMemo)((()=>e?n.filter((t=>t.name.toUpperCase().includes(e.toUpperCase()))):n),[n,e]);return(0,r.useMemo)((()=>a.map((e=>e.id))),[a])},g=()=>(0,o.Zs)((e=>{let{snapshot:t,set:n}=e;return async(e,r)=>{const o=await t.getPromise((0,s.Dm)("id")),{urls:a,name:u}=await t.getPromise((0,c.GN)({id:e})),l=a.filter((e=>e!==r));n((0,c.GN)({id:e,key:"urls"}),l),l.length||n(i(o),(t=>t.filter((t=>t.id!==e))));try{await(l.length?(0,d.Bz)(o,e,u,l):(0,d.sm)(o,[e])),(0,d.UL)(o,e).catch((()=>{}))}catch(f){n((0,c.GN)({id:e,key:"urls"}),a)}}}),[]),w=()=>{const{pathname:e}=(0,a.zy)(),t=(0,s.NJ)(),n=p({autoFetch:!1});return(0,o.Zs)((e=>{let{snapshot:t,set:r}=e;return async(e,o,a)=>{if(await t.getPromise((0,s.Dm)("isAnonymous")))return;const u=await t.getPromise((0,s.Dm)("id")),{urls:l,name:f}=await t.getPromise((0,c.GN)({id:e}));let g=o?[o,...l]:l;g=[...new Set([window.location.href,...g])];const w=g.length!==l.length;try{r((0,c.GN)({id:e,key:"urls"}),g),r(i(u),(t=>{const n=t.find((t=>t.id===e)),r=t.filter((t=>t.id!==e));return n?[{...n,accessCount:n.accessCount+1,lastAccessTime:(new Date).toISOString()},...r]:[{accessCount:1,id:e,lastAccessTime:(new Date).toISOString(),urls:g,name:a},...r]})),w&&await(0,d.Bz)(u,e,a||f,g),n(),await(0,d.UL)(u,e)}catch(p){r((0,c.GN)({id:e,key:"urls"}),l)}}}),[n,e,t])},p=function(){let{autoFetch:e=!0}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{id:t,isAnonymous:n}=(0,s.uW)(),[,a]=(0,o.L4)(i(t)),[c,u]=(0,r.useState)(0),l=(0,r.useCallback)((()=>{u((e=>e+1))}),[u]);return(0,r.useEffect)((()=>{if(t&&(e||c))if(n){const e=(window.visitedNodes||[]).sort(((e,t)=>new Date(t.lastAccessTime)-new Date(e.lastAccessTime)));a(e)}else(0,d.uQ)(t).then((e=>{if(!e)return;const{data:{results:t}}=e,n=t.sort(((e,t)=>new Date(t.lastAccessTime)-new Date(e.lastAccessTime)));a(n)}))}),[e,c,t,n]),l},m=()=>{const e=(0,s.NJ)(),t=i(e);return(0,o.Zs)((e=>{let{snapshot:n,set:r}=e;return async e=>{const o=(await n.getPromise((0,c.th)(e))).map((e=>e.machineGUID)),a=await n.getPromise(t),s=a.filter((e=>!o.includes(e.id)));s.length!==a.length&&r(t,s)}}),[e])}},84428:(e,t,n)=>{var r=n(78227)("iterator"),o=!1;try{var a=0,s={next:function(){return{done:!!a++}},return:function(){o=!0}};s[r]=function(){return this},Array.from(s,(function(){throw 2}))}catch(i){}e.exports=function(e,t){try{if(!t&&!o)return!1}catch(i){return!1}var n=!1;try{var a={};a[r]=function(){return{next:function(){return{done:n=!0}}}},e(a)}catch(i){}return n}},87290:(e,t,n)=>{var r=n(50516),o=n(19088);e.exports=!r&&!o&&"object"==typeof window&&"object"==typeof document},50516:e=>{e.exports="object"==typeof Deno&&Deno&&"object"==typeof Deno.version},19088:(e,t,n)=>{var r=n(24475),o=n(44576);e.exports="process"===o(r.process)},16193:(e,t,n)=>{var r=n(79504),o=Error,a=r("".replace),s=String(new o("zxcasd").stack),i=/\n\s*at [^:]*:[^\n]*/,c=i.test(s);e.exports=function(e,t){if(c&&"string"==typeof e&&!o.prepareStackTrace)for(;t--;)e=a(e,i,"");return e}},80747:(e,t,n)=>{var r=n(66699),o=n(16193),a=n(24659),s=Error.captureStackTrace;e.exports=function(e,t,n,i){a&&(s?s(e,t):r(e,"stack",o(n,i)))}},24659:(e,t,n)=>{var r=n(79039),o=n(6980);e.exports=!r((function(){var e=new Error("a");return!("stack"in e)||(Object.defineProperty(e,"stack",o(1,7)),7!==e.stack)}))},77584:(e,t,n)=>{var r=n(20034),o=n(66699);e.exports=function(e,t){r(t)&&"cause"in t&&o(e,"cause",t.cause)}},32603:(e,t,n)=>{var r=n(655);e.exports=function(e,t){return void 0===e?arguments.length<2?"":t:r(e)}},10916:(e,t,n)=>{var r=n(24475),o=n(80550),a=n(94901),s=n(92796),i=n(33706),c=n(78227),d=n(87290),u=n(50516),l=n(96395),f=n(77388),g=o&&o.prototype,w=c("species"),p=!1,m=a(r.PromiseRejectionEvent),h=s("Promise",(function(){var e=i(o),t=e!==String(o);if(!t&&66===f)return!0;if(l&&(!g.catch||!g.finally))return!0;if(!f||f<51||!/native code/.test(e)){var n=new o((function(e){e(1)})),r=function(e){e((function(){}),(function(){}))};if((n.constructor={})[w]=r,!(p=n.then((function(){}))instanceof r))return!0}return!t&&(d||u)&&!m}));e.exports={CONSTRUCTOR:h,REJECTION_EVENT:m,SUBCLASSING:p}},90537:(e,t,n)=>{var r=n(80550),o=n(84428),a=n(10916).CONSTRUCTOR;e.exports=a||!o((function(e){r.all(e).then(void 0,(function(){}))}))},17145:(e,t,n)=>{var r=n(46518),o=n(1625),a=n(42787),s=n(52967),i=n(77740),c=n(2360),d=n(66699),u=n(6980),l=n(77584),f=n(80747),g=n(72652),w=n(32603),p=n(78227)("toStringTag"),m=Error,h=[].push,y=function(e,t){var n,r=o(v,this);s?n=s(new m,r?a(this):v):(n=r?this:c(v),d(n,p,"Error")),void 0!==t&&d(n,"message",w(t)),f(n,y,n.stack,1),arguments.length>2&&l(n,arguments[2]);var i=[];return g(e,h,{that:i}),d(n,"errors",i),n};s?s(y,m):i(y,m,{name:!0});var v=y.prototype=c(m.prototype,{constructor:u(1,y),message:u(1,""),name:u(1,"AggregateError")});r({global:!0,constructor:!0,arity:2},{AggregateError:y})},30067:(e,t,n)=>{n(17145)},93518:(e,t,n)=>{var r=n(46518),o=n(69565),a=n(79306),s=n(97751),i=n(36043),c=n(1103),d=n(72652),u=n(90537),l="No one promise resolved";r({target:"Promise",stat:!0,forced:u},{any:function(e){var t=this,n=s("AggregateError"),r=i.f(t),u=r.resolve,f=r.reject,g=c((function(){var r=a(t.resolve),s=[],i=0,c=1,g=!1;d(e,(function(e){var a=i++,d=!1;c++,o(r,t,e).then((function(e){d||g||(g=!0,u(e))}),(function(e){d||g||(d=!0,s[a]=e,--c||f(new n(s,l)))}))})),--c||f(new n(s,l))}));return g.error&&f(g.value),r.promise}})}}]); \ No newline at end of file diff --git a/src/web/gui/v2/3624.bfeb1fdc3057ba82ddac.chunk.js b/src/web/gui/v2/3624.bfeb1fdc3057ba82ddac.chunk.js deleted file mode 100644 index 8ac2ae235..000000000 --- a/src/web/gui/v2/3624.bfeb1fdc3057ba82ddac.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="62ed62e8-38ee-4913-9bf3-d1a56caff2f2",e._sentryDebugIdIdentifier="sentry-dbid-62ed62e8-38ee-4913-9bf3-d1a56caff2f2")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[3624],{83624:(e,t,n)=>{function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,i)}return n}function r(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,i=new Array(t);n=e.length?{done:!0}:{done:!1,value:e[i++]}},e:function(e){throw e},f:r}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var s,o=!0,a=!1;return{s:function(){n=n.call(e)},n:function(){var e=n.next();return o=e.done,e},e:function(e){a=!0,s=e},f:function(){try{o||null==n.return||n.return()}finally{if(a)throw s}}}}n.r(t),n.d(t,{Compression:()=>ie,PostHog:()=>$r,SurveyQuestionType:()=>Sr,SurveyType:()=>wr,default:()=>Or,posthog:()=>Or});var v={DEBUG:!1,LIB_VERSION:"1.130.1"},g=Array.isArray,_=Object.prototype,m=_.hasOwnProperty,y=_.toString,b=g||function(e){return"[object Array]"===y.call(e)},k=function(e){return"function"==typeof e},w=function(e){return e===Object(e)&&!b(e)},S=function(e){if(w(e)){for(var t in e)if(m.call(e,t))return!1;return!0}return!1},F=function(e){return void 0===e},E=function(e){return"[object String]"==y.call(e)},R=function(e){return null===e},x=function(e){return F(e)||R(e)},P=function(e){return"[object Number]"==y.call(e)},I=function(e){return"[object Boolean]"===y.call(e)},C=function(e){return e instanceof FormData},T="undefined"!=typeof window?window:void 0,$="undefined"!=typeof globalThis?globalThis:T,M=Array.prototype,O=M.forEach,q=M.indexOf,A=null==$?void 0:$.navigator,L=null==$?void 0:$.document,D=null==$?void 0:$.location,B=null==$?void 0:$.fetch,N=null!=$&&$.XMLHttpRequest&&"withCredentials"in new $.XMLHttpRequest?$.XMLHttpRequest:void 0,H=null==$?void 0:$.AbortController,j=null==A?void 0:A.userAgent,U=null!=T?T:{},W="[PostHog.js]",z={_log:function(e){if(T&&(v.DEBUG||U.POSTHOG_DEBUG)&&!F(T.console)&&T.console){for(var t=("__rrweb_original__"in T.console[e]?T.console[e].__rrweb_original__:T.console[e]),n=arguments.length,i=new Array(n>1?n-1:0),r=1;r1?t-1:0),i=1;i0&&(t[n]=e)})),t};var ie,re=function(){function e(t){return t&&(t.preventDefault=e.preventDefault,t.stopPropagation=e.stopPropagation),t}return e.preventDefault=function(){this.returnValue=!1},e.stopPropagation=function(){this.cancelBubble=!0},function(t,n,i,r,s){if(t)if(t.addEventListener&&!r)t.addEventListener(n,i,!!s);else{var o="on"+n,a=t[o];t[o]=function(t,n,i){return function(r){if(r=r||e(null==T?void 0:T.event)){var s,o=!0;k(i)&&(s=i(r));var a=n.call(t,r);return!1!==s&&!1!==a||(o=!1),o}}}(t,i,a)}else z.error("No valid element provided to register_event")}}();function se(e,t){var n=function(){if(!L)return t("document not found");var n=L.createElement("script");n.type="text/javascript",n.src=e,n.onload=function(e){return t(void 0,e)},n.onerror=function(e){return t(e)};var i,r=L.querySelectorAll("body > script");r.length>0?null===(i=r[0].parentNode)||void 0===i||i.insertBefore(n,r[0]):L.body.appendChild(n)};null!=L&&L.body?n():null==L||L.addEventListener("DOMContentLoaded",n)}!function(e){e.GZipJS="gzip-js",e.Base64="base64"}(ie||(ie={}));var oe="$people_distinct_id",ae="__alias",ue="__timers",le="$autocapture_disabled_server_side",ce="$heatmaps_enabled_server_side",de="$session_recording_enabled_server_side",he="$console_log_recording_enabled_server_side",fe="$session_recording_network_payload_capture",pe="$session_recording_canvas_recording",ve="$replay_sample_rate",ge="$replay_minimum_duration",_e="$sesid",me="$session_is_sampled",ye="$enabled_feature_flags",be="$early_access_features",ke="$stored_person_properties",we="$stored_group_properties",Se="$surveys",Fe="$flag_call_reported",Ee="$user_state",Re="$client_session_props",xe="$capture_rate_limit",Pe="$initial_campaign_params",Ie="$initial_referrer_info",Ce="$epp",Te=[oe,ae,"__cmpns",ue,de,ce,_e,ye,Ee,be,we,ke,Se,Fe,Re,xe,Pe,Ie,Ce],$e="$active_feature_flags",Me="$override_feature_flags",Oe="$feature_flag_payloads",qe=function(e){var t,n={},i=p(K(e||{}));try{for(i.s();!(t=i.n()).done;){var r=c(t.value,2),s=r[0],o=r[1];o&&(n[s]=o)}}catch(e){i.e(e)}finally{i.f()}return n},Ae=function(){function e(t){o(this,e),this.instance=t,this._override_warning=!1,this.featureFlagEventHandlers=[],this.reloadFeatureFlagsQueued=!1,this.reloadFeatureFlagsInAction=!1}return u(e,[{key:"getFlags",value:function(){return Object.keys(this.getFlagVariants())}},{key:"getFlagVariants",value:function(){var e=this.instance.get_property(ye),t=this.instance.get_property(Me);if(!t)return e||{};for(var n=Y({},e),i=Object.keys(t),r=0;r1&&void 0!==arguments[1]?arguments[1]:{};if(this.instance.decideEndpointWasHit||this.getFlags()&&this.getFlags().length>0){var n,i=this.getFlagVariants()[e],r="".concat(i),s=this.instance.get_property(Fe)||{};return!t.send_event&&"send_event"in t||e in s&&s[e].includes(r)||(b(s[e])?s[e].push(r):s[e]=[r],null===(n=this.instance.persistence)||void 0===n||n.register(l({},Fe,s)),this.instance.capture("$feature_flag_called",{$feature_flag:e,$feature_flag_response:i})),i}z.warn('getFeatureFlag for key "'+e+"\" failed. Feature flags didn't load in time.")}},{key:"getFeatureFlagPayload",value:function(e){return this.getFlagPayloads()[e]}},{key:"isFeatureEnabled",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if(this.instance.decideEndpointWasHit||this.getFlags()&&this.getFlags().length>0)return!!this.getFeatureFlag(e,t);z.warn('isFeatureEnabled for key "'+e+"\" failed. Feature flags didn't load in time.")}},{key:"addFeatureFlagsHandler",value:function(e){this.featureFlagEventHandlers.push(e)}},{key:"removeFeatureFlagsHandler",value:function(e){this.featureFlagEventHandlers=this.featureFlagEventHandlers.filter((function(t){return t!==e}))}},{key:"receivedFeatureFlags",value:function(e,t){if(this.instance.persistence){this.instance.decideEndpointWasHit=!0;var n=this.getFlagVariants(),i=this.getFlagPayloads();!function(e,t){var n,i=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},s=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},o=e.featureFlags,a=e.featureFlagPayloads;if(o)if(b(o)){var u,c={};if(o)for(var d=0;d1&&void 0!==arguments[1]&&arguments[1],i=this.instance.get_property(be);if(i&&!n)return e(i);this.instance._send_request({transport:"XHR",url:this.instance.requestRouter.endpointFor("api","/api/early_access_features/?token=".concat(this.instance.config.token)),method:"GET",callback:function(n){var i;if(n.json){var r=n.json.earlyAccessFeatures;return null===(i=t.instance.persistence)||void 0===i||i.register(l({},be,r)),e(r)}}})}},{key:"_prepareFeatureFlagsForCallbacks",value:function(){var e=this.getFlags(),t=this.getFlagVariants();return{flags:e.filter((function(e){return t[e]})),flagVariants:Object.keys(t).filter((function(e){return t[e]})).reduce((function(e,n){return e[n]=t[n],e}),{})}}},{key:"_fireFeatureFlagsCallbacks",value:function(e){var t=this._prepareFeatureFlagsForCallbacks(),n=t.flags,i=t.flagVariants;this.featureFlagEventHandlers.forEach((function(t){return t(n,i,{errorsLoading:e})}))}},{key:"setPersonPropertiesForFlags",value:function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1],n=this.instance.get_property(ke)||{};this.instance.register(l({},ke,r(r({},n),e))),t&&this.instance.reloadFeatureFlags()}},{key:"resetPersonPropertiesForFlags",value:function(){this.instance.unregister(ke)}},{key:"setGroupPropertiesForFlags",value:function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1],n=this.instance.get_property(we)||{};0!==Object.keys(n).length&&Object.keys(n).forEach((function(t){n[t]=r(r({},n[t]),e[t]),delete e[t]})),this.instance.register(l({},we,r(r({},n),e))),t&&this.instance.reloadFeatureFlags()}},{key:"resetGroupPropertiesForFlags",value:function(e){if(e){var t=this.instance.get_property(we)||{};this.instance.register(l({},we,r(r({},t),{},l({},e,{}))))}else this.instance.unregister(we)}}]),e}();Math.trunc||(Math.trunc=function(e){return e<0?Math.ceil(e):Math.floor(e)}),Number.isInteger||(Number.isInteger=function(e){return P(e)&&isFinite(e)&&Math.floor(e)===e});var Le="0123456789abcdef",De=function(){function e(t){if(o(this,e),this.bytes=t,16!==t.length)throw new TypeError("not 128-bit length")}return u(e,[{key:"toString",value:function(){for(var e="",t=0;t>>4)+Le.charAt(15&this.bytes[t]),3!==t&&5!==t&&7!==t&&9!==t||(e+="-");if(36!==e.length)throw new Error("Invalid UUIDv7 was generated");return e}},{key:"clone",value:function(){return new e(this.bytes.slice(0))}},{key:"equals",value:function(e){return 0===this.compareTo(e)}},{key:"compareTo",value:function(e){for(var t=0;t<16;t++){var n=this.bytes[t]-e.bytes[t];if(0!==n)return Math.sign(n)}return 0}}],[{key:"fromFieldsV7",value:function(t,n,i,r){if(!Number.isInteger(t)||!Number.isInteger(n)||!Number.isInteger(i)||!Number.isInteger(r)||t<0||n<0||i<0||r<0||t>0xffffffffffff||n>4095||i>1073741823||r>4294967295)throw new RangeError("invalid field value");var s=new Uint8Array(16);return s[0]=t/Math.pow(2,40),s[1]=t/Math.pow(2,32),s[2]=t/Math.pow(2,24),s[3]=t/Math.pow(2,16),s[4]=t/Math.pow(2,8),s[5]=t,s[6]=112|n>>>8,s[7]=n,s[8]=128|i>>>24,s[9]=i>>>16,s[10]=i>>>8,s[11]=i,s[12]=r>>>24,s[13]=r>>>16,s[14]=r>>>8,s[15]=r,new e(s)}}]),e}(),Be=function(){function e(){o(this,e),l(this,"timestamp",0),l(this,"counter",0),l(this,"random",new je)}return u(e,[{key:"generate",value:function(){var e=this.generateOrAbort();if(F(e)){this.timestamp=0;var t=this.generateOrAbort();if(F(t))throw new Error("Could not generate UUID after timestamp reset");return t}return e}},{key:"generateOrAbort",value:function(){var e=Date.now();if(e>this.timestamp)this.timestamp=e,this.resetCounter();else{if(!(e+1e4>this.timestamp))return;this.counter++,this.counter>4398046511103&&(this.timestamp++,this.resetCounter())}return De.fromFieldsV7(this.timestamp,Math.trunc(this.counter/Math.pow(2,30)),this.counter&Math.pow(2,30)-1,this.random.nextUint32())}},{key:"resetCounter",value:function(){this.counter=1024*this.random.nextUint32()+(1023&this.random.nextUint32())}}]),e}(),Ne=function(e){if("undefined"!=typeof UUIDV7_DENY_WEAK_RNG&&UUIDV7_DENY_WEAK_RNG)throw new Error("no cryptographically strong RNG available");for(var t=0;t=this.buffer.length&&(Ne(this.buffer),this.cursor=0),this.buffer[this.cursor++]}}]),e}(),Ue=function(){return We().toString()},We=function(){return(He||(He=new Be)).generate()},ze="",Ge=/[a-z0-9][a-z0-9-]+\.[a-z]{2,}$/i;var Ve,Qe={is_supported:function(){return!!L},error:function(e){z.error("cookieStore error: "+e)},get:function(e){if(L){try{for(var t=e+"=",n=L.cookie.split(";").filter((function(e){return e.length})),i=0;i1&&void 0!==arguments[1]?arguments[1]:L;if(ze)return ze;if(!t)return"";if(["localhost","127.0.0.1"].includes(e))return"";for(var n=e.split("."),i=Math.min(n.length,8),r="dmn_chk_"+Ue(),s=new RegExp("(^|;)\\s*"+r+"=1");!ze&&i--;){var o=n.slice(i).join("."),a=r+"=1;domain=."+o;t.cookie=a,s.test(t.cookie)&&(t.cookie=a+";expires=Thu, 01 Jan 1970 00:00:00 GMT",ze=o)}return ze}(e);if(!n){var i=function(e){var t=e.match(Ge);return t?t[0]:""}(e);i!==n&&z.info("Warning: cookie subdomain discovery mismatch",i,n),n=i}return n?"; domain=."+n:""}return""}(L.location.hostname,i);if(n){var u=new Date;u.setTime(u.getTime()+24*n*60*60*1e3),s="; expires="+u.toUTCString()}r&&(o="; secure");var l=e+"="+encodeURIComponent(JSON.stringify(t))+s+"; SameSite=Lax; path=/"+a+o;return l.length>3686.4&&z.warn("cookieStore warning: large cookie, len="+l.length),L.cookie=l,l}catch(e){return}},remove:function(e,t){try{Qe.set(e,"",-1,t)}catch(e){return}}},Je=null,Ye={is_supported:function(){if(!R(Je))return Je;var e=!0;if(F(T))e=!1;else try{var t="__mplssupport__";Ye.set(t,"xyz"),'"xyz"'!==Ye.get(t)&&(e=!1),Ye.remove(t)}catch(t){e=!1}return e||z.error("localStorage unsupported; falling back to cookie store"),Je=e,e},error:function(e){z.error("localStorage error: "+e)},get:function(e){try{return null==T?void 0:T.localStorage.getItem(e)}catch(e){Ye.error(e)}return null},parse:function(e){try{return JSON.parse(Ye.get(e))||{}}catch(e){}return null},set:function(e,t){try{null==T||T.localStorage.setItem(e,JSON.stringify(t))}catch(e){Ye.error(e)}},remove:function(e){try{null==T||T.localStorage.removeItem(e)}catch(e){Ye.error(e)}}},Xe=["distinct_id",_e,me,Ce],Ke=r(r({},Ye),{},{parse:function(e){try{var t={};try{t=Qe.parse(e)||{}}catch(e){}var n=Y(t,JSON.parse(Ye.get(e)||"{}"));return Ye.set(e,n),n}catch(e){}return null},set:function(e,t,n,i,r){try{Ye.set(e,t);var s={};Xe.forEach((function(e){t[e]&&(s[e]=t[e])})),Object.keys(s).length&&Qe.set(e,s,n,i,r)}catch(e){Ye.error(e)}},remove:function(e,t){try{null==T||T.localStorage.removeItem(e),Qe.remove(e,t)}catch(e){Ye.error(e)}}}),Ze={},et={is_supported:function(){return!0},error:function(e){z.error("memoryStorage error: "+e)},get:function(e){return Ze[e]||null},parse:function(e){return Ze[e]||null},set:function(e,t){Ze[e]=t},remove:function(e){delete Ze[e]}},tt=null,nt={is_supported:function(){if(!R(tt))return tt;if(tt=!0,F(T))tt=!1;else try{var e="__support__";nt.set(e,"xyz"),'"xyz"'!==nt.get(e)&&(tt=!1),nt.remove(e)}catch(e){tt=!1}return tt},error:function(e){z.error("sessionStorage error: ",e)},get:function(e){try{return null==T?void 0:T.sessionStorage.getItem(e)}catch(e){nt.error(e)}return null},parse:function(e){try{return JSON.parse(nt.get(e))||null}catch(e){}return null},set:function(e,t){try{null==T||T.sessionStorage.setItem(e,JSON.stringify(t))}catch(e){nt.error(e)}},remove:function(e){try{null==T||T.sessionStorage.removeItem(e)}catch(e){nt.error(e)}}},it=["localhost","127.0.0.1"],rt=function(e){var t=null==L?void 0:L.createElement("a");return F(t)?null:(t.href=e,t)},st=function(e,t){for(var n,i=((e.split("#")[0]||"").split("?")[1]||"").split("&"),r=0;r=3&&(t=F(e[2])?e[3]:e[2]),["watchOS",t]}],[new RegExp("("+lt+" (\\d+)\\.(\\d+)\\.?(\\d+)?|"+lt+")","i"),function(e){if(e&&e[2]){var t=[e[2],e[3],e[4]||"0"];return[lt,t.join(".")]}return[lt,""]}],[/Mac OS X (\d+)[_.](\d+)[_.]?(\d+)?/i,function(e){var t=["Mac OS X",""];if(e&&e[1]){var n=[e[1],e[2],e[3]||"0"];t[1]=n.join(".")}return t}],[/Mac/i,["Mac OS X",""]],[/CrOS/,[kt,""]],[/Linux|debian/i,["Linux",""]]],en=function(e){return Qt.test(e)?Tt:Vt.test(e)?$t:Gt.test(e)?Mt:new RegExp(Bt,"i").test(e)?Bt:new RegExp("("+Lt+"|WPDesktop)","i").test(e)?Lt:/iPad/.test(e)?ht:/iPod/.test(e)?"iPod Touch":/iPhone/.test(e)?"iPhone":/(watch)(?: ?os[,/]|\d,\d\/)[\d.]+/i.test(e)?pt:Jt.test(e)?gt:/(kobo)\s(ereader|touch)/i.test(e)?"Kobo":new RegExp(Dt,"i").test(e)?Dt:/(kf[a-z]{2}wi|aeo[c-r]{2})( bui|\))/i.test(e)||/(kf[a-z]+)( bui|\)).+silk\//i.test(e)?"Kindle Fire":/(Android|ZTE)/i.test(e)?!new RegExp(at).test(e)||/(9138B|TB782B|Nexus [97]|pixel c|HUAWEISHT|BTV|noble nook|smart ultra 6)/i.test(e)?/pixel[\daxl ]{1,6}/i.test(e)&&!/pixel c/i.test(e)||/(huaweimed-al00|tah-|APA|SM-G92|i980|zte|U304AA)/i.test(e)||/lmy47v/i.test(e)&&!/QTAQZ3/i.test(e)?lt:dt:lt:new RegExp("(pda|"+at+")","i").test(e)?Ht:new RegExp(ct,"i").test(e)&&!new RegExp(ct+" pc","i").test(e)?jt:""},tn="https?://(.*)",nn=["utm_source","utm_medium","utm_campaign","utm_content","utm_term","gclid","gad_source","gclsrc","dclid","gbraid","wbraid","fbclid","msclkid","twclid","li_fat_id","mc_cid","igshid","ttclid"],rn={campaignParams:function(e){var t=nn.concat(e||[]),n={};return J(t,(function(e){var t=L?st(L.URL,e):"";t.length&&(n[e]=t)})),n},searchEngine:function(){var e=null==L?void 0:L.referrer;return e?0===e.search(tn+"google.([^/?]*)")?"google":0===e.search(tn+"bing.com")?"bing":0===e.search(tn+"yahoo.com")?"yahoo":0===e.search(tn+"duckduckgo.com")?"duckduckgo":null:null},searchInfo:function(){var e=rn.searchEngine(),t="yahoo"!=e?"q":"p",n={};if(!R(e)){n.$search_engine=e;var i=L?st(L.referrer,t):"";i.length&&(n.ph_keyword=i)}return n},browser:Xt,browserVersion:function(e,t){var n=Xt(e,t),i=Kt[n];if(F(i))return null;for(var r=0;r1e3?j.substring(0,997)+"...":j,$browser_version:rn.browserVersion(j,navigator.vendor),$browser_language:rn.browserLanguage(),$screen_height:null==T?void 0:T.screen.height,$screen_width:null==T?void 0:T.screen.width,$viewport_height:null==T?void 0:T.innerHeight,$viewport_width:null==T?void 0:T.innerWidth,$lib:"web",$lib_version:v.LIB_VERSION,$insert_id:Math.random().toString(36).substring(2,10)+Math.random().toString(36).substring(2,10),$time:Z()/1e3})},people_properties:function(){if(!j)return{};var e=c(rn.os(j),2),t=e[0],n=e[1];return Y(ne({$os:t,$os_version:n,$browser:rn.browser(j,navigator.vendor)}),{$browser_version:rn.browserVersion(j,navigator.vendor)})}},sn=["cookie","localstorage","localstorage+cookie","sessionstorage","memory"],on=function(){function e(t){o(this,e),this.config=t,this.props={},this.campaign_params_saved=!1,this.name=function(e){var t="";return e.token&&(t=e.token.replace(/\+/g,"PL").replace(/\//g,"SL").replace(/=/g,"EQ")),e.persistence_name?"ph_"+e.persistence_name:"ph_"+t+"_posthog"}(t),this.storage=this.buildStorage(t),this.load(),this.update_config(t,t),this.save()}return u(e,[{key:"buildStorage",value:function(e){-1===sn.indexOf(e.persistence.toLowerCase())&&(z.critical("Unknown persistence type "+e.persistence+"; falling back to localStorage+cookie"),e.persistence="localStorage+cookie");var t=e.persistence.toLowerCase();return"localstorage"===t&&Ye.is_supported()?Ye:"localstorage+cookie"===t&&Ke.is_supported()?Ke:"sessionstorage"===t&&nt.is_supported()?nt:"memory"===t?et:"cookie"===t?Qe:Ke.is_supported()?Ke:Qe}},{key:"properties",value:function(){var e={};return J(this.props,(function(t,n){if(n===ye&&w(t))for(var i=Object.keys(t),r=0;r1&&void 0!==arguments[1]?arguments[1]:{};o(this,e),l(this,"bucketSize",100),l(this,"refillRate",10),l(this,"mutationBuckets",{}),l(this,"loggedTracker",{}),l(this,"refillBuckets",(function(){Object.keys(r.mutationBuckets).forEach((function(e){r.mutationBuckets[e]=r.mutationBuckets[e]+r.refillRate,r.mutationBuckets[e]>=r.bucketSize&&delete r.mutationBuckets[e]}))})),l(this,"getNodeOrRelevantParent",(function(e){var t=r.rrweb.mirror.getNode(e);if("svg"!==(null==t?void 0:t.nodeName)&&t instanceof Element){var n=t.closest("svg");if(n)return[r.rrweb.mirror.getId(n),n]}return[e,t]})),l(this,"numberOfChanges",(function(e){var t,n,i,r,s,o,a,u;return(null!==(t=null===(n=e.removes)||void 0===n?void 0:n.length)&&void 0!==t?t:0)+(null!==(i=null===(r=e.attributes)||void 0===r?void 0:r.length)&&void 0!==i?i:0)+(null!==(s=null===(o=e.texts)||void 0===o?void 0:o.length)&&void 0!==s?s:0)+(null!==(a=null===(u=e.adds)||void 0===u?void 0:u.length)&&void 0!==a?a:0)})),l(this,"throttleMutations",(function(e){if(3!==e.type||0!==e.data.source)return e;var t=e.data,n=r.numberOfChanges(t);t.attributes&&(t.attributes=t.attributes.filter((function(e){var t,n,i,s=c(r.getNodeOrRelevantParent(e.id),2),o=s[0],a=s[1];return 0!==r.mutationBuckets[o]&&(r.mutationBuckets[o]=null!==(t=r.mutationBuckets[o])&&void 0!==t?t:r.bucketSize,r.mutationBuckets[o]=Math.max(r.mutationBuckets[o]-1,0),0===r.mutationBuckets[o]&&(r.loggedTracker[o]||(r.loggedTracker[o]=!0,null===(n=(i=r.options).onBlockedNode)||void 0===n||n.call(i,o,a))),e)})));var i=r.numberOfChanges(t);return 0!==i||n===i?e:void 0})),this.rrweb=t,this.options=s,this.refillRate=null!==(n=this.options.refillRate)&&void 0!==n?n:this.refillRate,this.bucketSize=null!==(i=this.options.bucketSize)&&void 0!==i?i:this.bucketSize,setInterval((function(){r.refillBuckets()}),1e3)})),cn=function(e){return e[e.DomContentLoaded=0]="DomContentLoaded",e[e.Load=1]="Load",e[e.FullSnapshot=2]="FullSnapshot",e[e.IncrementalSnapshot=3]="IncrementalSnapshot",e[e.Meta=4]="Meta",e[e.Custom=5]="Custom",e[e.Plugin=6]="Plugin",e}(cn||{});function dn(e){return e?V(e).split(/\s+/):[]}function hn(e){var t="";switch(s(e.className)){case"string":t=e.className;break;case"object":t=("baseVal"in e.className?e.className.baseVal:null)||e.getAttribute("class")||"";break;default:t=""}return dn(t)}function fn(e){return x(e)?null:V(e).split(/(\s+)/).filter((function(e){return Cn(e)})).join("").replace(/[\r\n]/g," ").replace(/[ ]+/g," ").substring(0,255)}function pn(e){var t="";return wn(e)&&!Sn(e)&&e.childNodes&&e.childNodes.length&&J(e.childNodes,(function(e){var n;_n(e)&&e.textContent&&(t+=null!==(n=fn(e.textContent))&&void 0!==n?n:"")})),V(t)}function vn(e){return!!e&&1===e.nodeType}function gn(e,t){return!!e&&!!e.tagName&&e.tagName.toLowerCase()===t.toLowerCase()}function _n(e){return!!e&&3===e.nodeType}function mn(e){return!!e&&11===e.nodeType}var yn=["a","button","form","input","select","textarea","label"];function bn(e){var t=e.parentNode;return!(!t||!vn(t))&&t}function kn(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:void 0,i=arguments.length>3?arguments[3]:void 0,r=arguments.length>4?arguments[4]:void 0;if(!T||!e||gn(e,"html")||!vn(e))return!1;if(null!=n&&n.url_allowlist){var o=T.location.href,a=n.url_allowlist;if(a&&!a.some((function(e){return o.match(e)})))return!1}if(null!=n&&n.dom_event_allowlist){var u=n.dom_event_allowlist;if(u&&!u.some((function(e){return t.type===e})))return!1}for(var l=!1,c=[e],d=!0,h=e;h.parentNode&&!gn(h,"body");)if(mn(h.parentNode))c.push(h.parentNode.host),h=h.parentNode.host;else{if(!(d=bn(h)))break;if(i||yn.indexOf(d.tagName.toLowerCase())>-1)l=!0;else{var f=T.getComputedStyle(d);f&&"pointer"===f.getPropertyValue("cursor")&&(l=!0)}c.push(d),h=d}if(!function(e,t){var n=null==t?void 0:t.element_allowlist;if(F(n))return!0;var i,r=p(e);try{var o=function(){var e=i.value;if(n.some((function(t){return e.tagName.toLowerCase()===t})))return{v:!0}};for(r.s();!(i=r.n()).done;){var a=o();if("object"===s(a))return a.v}}catch(e){r.e(e)}finally{r.f()}return!1}(c,n))return!1;if(!function(e,t){var n=null==t?void 0:t.css_selector_allowlist;if(F(n))return!0;var i,r=p(e);try{var o=function(){var e=i.value;if(n.some((function(t){return e.matches(t)})))return{v:!0}};for(r.s();!(i=r.n()).done;){var a=o();if("object"===s(a))return a.v}}catch(e){r.e(e)}finally{r.f()}return!1}(c,n))return!1;var v=T.getComputedStyle(e);if(v&&"pointer"===v.getPropertyValue("cursor")&&"click"===t.type)return!0;var g=e.tagName.toLowerCase();switch(g){case"html":return!1;case"form":return(r||["submit"]).indexOf(t.type)>=0;case"input":case"select":case"textarea":return(r||["change","click"]).indexOf(t.type)>=0;default:return l?(r||["click"]).indexOf(t.type)>=0:(r||["click"]).indexOf(t.type)>=0&&(yn.indexOf(g)>-1||"true"===e.getAttribute("contenteditable"))}}function wn(e){for(var t=e;t.parentNode&&!gn(t,"body");t=t.parentNode){var n=hn(t);if(X(n,"ph-sensitive")||X(n,"ph-no-capture"))return!1}if(X(hn(e),"ph-include"))return!0;var i=e.type||"";if(E(i))switch(i.toLowerCase()){case"hidden":case"password":return!1}var r=e.name||e.id||"";return!E(r)||!/^cc|cardnum|ccnum|creditcard|csc|cvc|cvv|exp|pass|pwd|routing|seccode|securitycode|securitynum|socialsec|socsec|ssn/i.test(r.replace(/[^a-zA-Z0-9]/g,""))}function Sn(e){return!!(gn(e,"input")&&!["button","checkbox","submit","reset"].includes(e.type)||gn(e,"select")||gn(e,"textarea")||"true"===e.getAttribute("contenteditable"))}var Fn="(4[0-9]{12}(?:[0-9]{3})?)|(5[1-5][0-9]{14})|(6(?:011|5[0-9]{2})[0-9]{12})|(3[47][0-9]{13})|(3(?:0[0-5]|[68][0-9])[0-9]{11})|((?:2131|1800|35[0-9]{3})[0-9]{11})",En=new RegExp("^(?:".concat(Fn,")$")),Rn=new RegExp(Fn),xn="\\d{3}-?\\d{2}-?\\d{4}",Pn=new RegExp("^(".concat(xn,")$")),In=new RegExp("(".concat(xn,")"));function Cn(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];if(x(e))return!1;if(E(e)){if(e=V(e),(t?En:Rn).test((e||"").replace(/[- ]/g,"")))return!1;if((t?Pn:In).test(e))return!1}return!0}function Tn(e){var t=pn(e);return Cn(t="".concat(t," ").concat($n(e)).trim())?t:""}function $n(e){var t="";return e&&e.childNodes&&e.childNodes.length&&J(e.childNodes,(function(e){var n;if(e&&"span"===(null===(n=e.tagName)||void 0===n?void 0:n.toLowerCase()))try{var i=pn(e);t="".concat(t," ").concat(i).trim(),e.childNodes&&e.childNodes.length&&(t="".concat(t," ").concat($n(e)).trim())}catch(e){z.error(e)}})),t}function Mn(e){return function(e){var t=e.map((function(e){var t,n,i="";if(e.tag_name&&(i+=e.tag_name),e.attr_class){e.attr_class.sort();var s,o=p(e.attr_class);try{for(o.s();!(s=o.n()).done;){var a=s.value;i+=".".concat(a.replace(/"/g,""))}}catch(e){o.e(e)}finally{o.f()}}var u=r(r(r(r({},e.text?{text:e.text}:{}),{},{"nth-child":null!==(t=e.nth_child)&&void 0!==t?t:0,"nth-of-type":null!==(n=e.nth_of_type)&&void 0!==n?n:0},e.href?{href:e.href}:{}),e.attr_id?{attr_id:e.attr_id}:{}),e.attributes),l={};return K(u).sort((function(e,t){var n=c(e,1)[0],i=c(t,1)[0];return n.localeCompare(i)})).forEach((function(e){var t=c(e,2),n=t[0],i=t[1];return l[On(n.toString())]=On(i.toString())})),(i+=":")+K(u).map((function(e){var t=c(e,2),n=t[0],i=t[1];return"".concat(n,'="').concat(i,'"')})).join("")}));return t.join(";")}(function(e){return e.map((function(e){var t,n,i={text:null===(t=e.$el_text)||void 0===t?void 0:t.slice(0,400),tag_name:e.tag_name,href:null===(n=e.attr__href)||void 0===n?void 0:n.slice(0,2048),attr_class:qn(e),attr_id:e.attr__id,nth_child:e.nth_child,nth_of_type:e.nth_of_type,attributes:{}};return K(e).filter((function(e){return 0===c(e,1)[0].indexOf("attr__")})).forEach((function(e){var t=c(e,2),n=t[0],r=t[1];return i.attributes[n]=r})),i}))}(e))}function On(e){return e.replace(/"|\\"/g,'\\"')}function qn(e){var t=e.attr__class;return t?b(t)?t:dn(t):void 0}var An="[SessionRecording]",Ln={initiatorTypes:["audio","beacon","body","css","early-hint","embed","fetch","frame","iframe","icon","image","img","input","link","navigation","object","ping","script","track","video","xmlhttprequest"],maskRequestFn:function(e){return e},recordHeaders:!1,recordBody:!1,recordInitialRequests:!1,recordPerformance:!1,performanceEntryTypeToObserve:["first-input","navigation","paint","resource"],payloadSizeLimitBytes:1e6},Dn=["authorization","x-forwarded-for","authorization","cookie","set-cookie","x-api-key","x-real-ip","remote-addr","forwarded","proxy-authorization","x-csrf-token","x-csrftoken","x-xsrf-token"],Bn=["password","secret","passwd","api_key","apikey","auth","credentials","mysql_pwd","privatekey","private_key","token"],Nn=["/s/","/e/","/i/"];function Hn(e,t,n,i){if(x(e))return e;var r=(null==t?void 0:t["content-length"])||function(e){return new Blob([e]).size}(e);return E(r)&&(r=parseInt(r)),r>n?An+" ".concat(i," body too large to record (").concat(r," bytes)"):e}function jn(e,t){if(x(e))return e;var n=e;return Cn(n,!1)||(n=An+" "+t+" body redacted"),J(Bn,(function(e){var i,r;null!==(i=n)&&void 0!==i&&i.length&&-1!==(null===(r=n)||void 0===r?void 0:r.indexOf(e))&&(n=An+" "+t+" body redacted as might contain: "+e)})),n}var Un="__ph_opt_in_out_";function Wn(e,t){Kn(!0,e,t)}function zn(e,t){Kn(!1,e,t)}function Gn(e,t){return"1"===Xn(e,t)}function Vn(e,t){return!!function(e){if(e&&e.respectDnt){var t=e&&e.window||T,n=null==t?void 0:t.navigator,i=!1;return J([null==n?void 0:n.doNotTrack,n.msDoNotTrack,t.doNotTrack],(function(e){X([!0,1,"1","yes"],e)&&(i=!0)})),i}return!1}(t)||"0"===Xn(e,t)}function Qn(e,t){Jn(t=t||{}).remove(Yn(e,t),!!t.crossSubdomainCookie)}function Jn(e){return"localStorage"===(e=e||{}).persistenceType?Ye:"localStorage+cookie"===e.persistenceType?Ke:Qe}function Yn(e,t){return((t=t||{}).persistencePrefix||Un)+e}function Xn(e,t){return Jn(t).get(Yn(e,t))}function Kn(e,t,n){E(t)&&t.length?(Jn(n=n||{}).set(Yn(t,n),e?1:0,P(n.cookieExpiration)?n.cookieExpiration:null,n.crossSubdomainCookie,n.secureCookie),n.capture&&e&&n.capture(n.captureEventName||"$opt_in",n.captureProperties||{},{send_instantly:!0})):z.error("gdpr."+(e?"optIn":"optOut")+" called with an invalid token")}function Zn(e){var t=!1;try{var n=e.config.token,i=e.config.respect_dnt,r=e.config.opt_out_capturing_persistence_type,s=e.config.opt_out_capturing_cookie_prefix||void 0,o=e.config.window;n&&(t=Vn(n,{respectDnt:i,persistenceType:r,persistencePrefix:s,window:o}))}catch(e){z.error("Unexpected error when checking capturing opt-out status: "+e)}return t}var ei,ti=3e5,ni=ti;!function(e){e[e.Mutation=0]="Mutation",e[e.MouseMove=1]="MouseMove",e[e.MouseInteraction=2]="MouseInteraction",e[e.Scroll=3]="Scroll",e[e.ViewportResize=4]="ViewportResize",e[e.Input=5]="Input",e[e.TouchMove=6]="TouchMove",e[e.MediaInteraction=7]="MediaInteraction",e[e.StyleSheetRule=8]="StyleSheetRule",e[e.CanvasMutation=9]="CanvasMutation",e[e.Font=10]="Font",e[e.Log=11]="Log",e[e.Drag=12]="Drag",e[e.StyleDeclaration=13]="StyleDeclaration",e[e.Selection=14]="Selection",e[e.AdoptedStyleSheet=15]="AdoptedStyleSheet"}(ei||(ei={}));var ii=[ei.MouseMove,ei.MouseInteraction,ei.Scroll,ei.ViewportResize,ei.Input,ei.TouchMove,ei.MediaInteraction,ei.Drag],ri=function(e){return{rrwebMethod:e,enqueuedAt:Date.now(),attempt:1}},si="[SessionRecording]",oi=function(){function e(t){var n=this;if(o(this,e),l(this,"queuedRRWebEvents",[]),l(this,"isIdle",!1),l(this,"_linkedFlagSeen",!1),l(this,"_lastActivityTimestamp",Date.now()),l(this,"windowId",null),l(this,"sessionId",null),l(this,"_linkedFlag",null),l(this,"_forceAllowLocalhostNetworkCapture",!1),l(this,"_samplingSessionListener",null),this.instance=t,this._captureStarted=!1,this._endpoint="/s/",this.stopRrweb=void 0,this.receivedDecide=!1,null==T||T.addEventListener("beforeunload",(function(){n._flushBuffer()})),null==T||T.addEventListener("offline",(function(){n._tryAddCustomEvent("browser offline",{})})),null==T||T.addEventListener("online",(function(){n._tryAddCustomEvent("browser online",{})})),null==T||T.addEventListener("visibilitychange",(function(){if(null!=L&&L.visibilityState){var e="window "+L.visibilityState;n._tryAddCustomEvent(e,{})}})),!this.instance.sessionManager)throw z.error(si+" started without valid sessionManager"),new Error(si+" started without valid sessionManager. This is a bug.");this.buffer=this.clearBuffer(),this._setupSampling()}return u(e,[{key:"rrwebRecord",get:function(){var e;return null==U||null===(e=U.rrweb)||void 0===e?void 0:e.record}},{key:"started",get:function(){return this._captureStarted}},{key:"sessionManager",get:function(){if(!this.instance.sessionManager)throw z.error(si+" started without valid sessionManager"),new Error(si+" started without valid sessionManager. This is a bug.");return this.instance.sessionManager}},{key:"isSampled",get:function(){var e=this.instance.get_property(me);return I(e)?e:null}},{key:"sessionDuration",get:function(){var e,t,n=null===(e=this.buffer)||void 0===e?void 0:e.data[(null===(t=this.buffer)||void 0===t?void 0:t.data.length)-1],i=this.sessionManager.checkAndGetSessionAndWindowId(!0).sessionStartTimestamp;return n?n.timestamp-i:null}},{key:"isRecordingEnabled",get:function(){var e=!!this.instance.get_property(de),t=!this.instance.config.disable_session_recording;return T&&e&&t}},{key:"isConsoleLogCaptureEnabled",get:function(){var e=!!this.instance.get_property(he),t=this.instance.config.enable_recording_console_log;return null!=t?t:e}},{key:"canvasRecording",get:function(){var e=this.instance.get_property(pe);return e&&e.fps&&e.quality?{enabled:e.enabled,fps:e.fps,quality:e.quality}:void 0}},{key:"networkPayloadCapture",get:function(){var e,t,n=this.instance.get_property(fe),i={recordHeaders:null===(e=this.instance.config.session_recording)||void 0===e?void 0:e.recordHeaders,recordBody:null===(t=this.instance.config.session_recording)||void 0===t?void 0:t.recordBody},r=(null==i?void 0:i.recordHeaders)||(null==n?void 0:n.recordHeaders),s=(null==i?void 0:i.recordBody)||(null==n?void 0:n.recordBody),o=this.instance.config.capture_performance||(null==n?void 0:n.capturePerformance);return r||s||o?{recordHeaders:r,recordBody:s,recordPerformance:o}:void 0}},{key:"sampleRate",get:function(){var e=this.instance.get_property(ve);return P(e)?e:null}},{key:"minimumDuration",get:function(){var e=this.instance.get_property(ge);return P(e)?e:null}},{key:"status",get:function(){return this.receivedDecide?this.isRecordingEnabled?x(this._linkedFlag)||this._linkedFlagSeen?I(this.isSampled)?this.isSampled?"sampled":"disabled":"active":"buffering":"disabled":"buffering"}},{key:"startIfEnabledOrStop",value:function(){this.isRecordingEnabled?(this._startCapture(),z.info(si+" started")):(this.stopRecording(),this.clearBuffer())}},{key:"stopRecording",value:function(){this._captureStarted&&this.stopRrweb&&(this.stopRrweb(),this.stopRrweb=void 0,this._captureStarted=!1,z.info(si+" stopped"))}},{key:"makeSamplingDecision",value:function(e){var t,n=this.sessionId!==e,i=this.sampleRate;if(P(i)){var r,s=this.isSampled,o=n||!I(s);!(r=o?Math.random()1&&void 0!==arguments[1]?arguments[1]:"log";null===(t=this.instance.sessionRecording)||void 0===t||t.onRRwebEmit({type:6,data:{plugin:"rrweb/console@1",payload:{level:n,trace:[],payload:[JSON.stringify(e)]}},timestamp:Z()})}},{key:"_startCapture",value:function(){var e=this;F(Object.assign)||this._captureStarted||this.instance.config.disable_session_recording||Zn(this.instance)||(this._captureStarted=!0,this.sessionManager.checkAndGetSessionAndWindowId(),this.rrwebRecord?this._onScriptLoaded():se(this.instance.requestRouter.endpointFor("assets","/static/recorder.js?v=".concat(v.LIB_VERSION)),(function(t){if(t)return z.error(si+" could not load recorder.js",t);e._onScriptLoaded()})))}},{key:"isInteractiveEvent",value:function(e){var t;return 3===e.type&&-1!==ii.indexOf(null===(t=e.data)||void 0===t?void 0:t.source)}},{key:"_updateWindowAndSessionIds",value:function(e){var t=this.isInteractiveEvent(e);t||this.isIdle||e.timestamp-this._lastActivityTimestamp>ni&&(this.isIdle=!0,this._tryAddCustomEvent("sessionIdle",{reason:"user inactivity",timeSinceLastActive:e.timestamp-this._lastActivityTimestamp,threshold:ni}));var n=!1;if(t&&(this._lastActivityTimestamp=e.timestamp,this.isIdle&&(this.isIdle=!1,this._tryAddCustomEvent("sessionNoLongerIdle",{reason:"user activity",type:e.type}),n=!0)),!this.isIdle){var i=this.sessionManager.checkAndGetSessionAndWindowId(!t,e.timestamp),r=i.windowId,s=i.sessionId,o=this.sessionId!==s,a=this.windowId!==r;this.windowId=r,this.sessionId=s,(n||-1===[an,un].indexOf(e.type)&&(a||o))&&this._tryTakeFullSnapshot()}}},{key:"_tryRRWebMethod",value:function(e){try{return e.rrwebMethod(),!0}catch(r){return this.queuedRRWebEvents.length<10?this.queuedRRWebEvents.push({enqueuedAt:e.enqueuedAt||Date.now(),attempt:e.attempt++,rrwebMethod:e.rrwebMethod}):z.warn(si+" could not emit queued rrweb event.",r,e),!1}}},{key:"_tryAddCustomEvent",value:function(e,t){var n=this;return this._tryRRWebMethod(ri((function(){return n.rrwebRecord.addCustomEvent(e,t)})))}},{key:"_tryTakeFullSnapshot",value:function(){var e=this;return this._tryRRWebMethod(ri((function(){return e.rrwebRecord.takeFullSnapshot()})))}},{key:"_onScriptLoaded",value:function(){for(var e,t=this,n={blockClass:"ph-no-capture",blockSelector:void 0,ignoreClass:"ph-ignore-input",maskTextClass:"ph-mask",maskTextSelector:void 0,maskTextFn:void 0,maskAllInputs:!0,maskInputOptions:{},maskInputFn:void 0,slimDOMOptions:{},collectFonts:!1,inlineStylesheet:!0,recordCrossOriginIframes:!1},i=this.instance.config.session_recording,s=0,o=Object.entries(i||{});s10&&(t.data.payload.payload=t.data.payload.payload.slice(0,10),t.data.payload.payload.push("...[truncated]"));for(var n=[],i=0;i2e3?n.push(t.data.payload.payload[i].slice(0,2e3)+"...[truncated]"):n.push(t.data.payload.payload[i]);return t.data.payload.payload=n,e}return e}(n),r=JSON.stringify(i).length;if(this._updateWindowAndSessionIds(i),!this.isIdle||i.type===cn.Custom){var s={$snapshot_bytes:r,$snapshot_data:i,$session_id:this.sessionId,$window_id:this.windowId};"disabled"!==this.status?this._captureSnapshotBuffered(s):this.clearBuffer()}}}}},{key:"_pageViewFallBack",value:function(){if(!this.instance.config.capture_pageview&&T){var e=this._maskUrl(T.location.href);this._lastHref!==e&&(this._tryAddCustomEvent("$url_changed",{href:e}),this._lastHref=e)}}},{key:"_processQueuedEvents",value:function(){var e=this;if(this.queuedRRWebEvents.length){var t=d(this.queuedRRWebEvents);this.queuedRRWebEvents=[],t.forEach((function(n){Date.now()-n.enqueuedAt>2e3?e._tryAddCustomEvent("rrwebQueueTimeout",{enqueuedAt:n.enqueuedAt,attempt:n.attempt,queueLength:t.length}):e._tryRRWebMethod(n)&&e._tryAddCustomEvent("rrwebQueueSuccess",{enqueuedAt:n.enqueuedAt,attempt:n.attempt,queueLength:t.length})}))}}},{key:"_maskUrl",value:function(e){var t=this.instance.config.session_recording;if(t.maskNetworkRequestFn){var n,i={url:e};return null===(n=i=t.maskNetworkRequestFn(i))||void 0===n?void 0:n.url}return e}},{key:"clearBuffer",value:function(){return this.buffer=void 0,{size:0,data:[],sessionId:this.sessionId,windowId:this.windowId}}},{key:"_flushBuffer",value:function(){var e=this;this.flushBufferTimer&&(clearTimeout(this.flushBufferTimer),this.flushBufferTimer=void 0);var t=this.minimumDuration,n=this.sessionDuration,i=P(n)&&n>=0,r=P(t)&&i&&n943718.4||this.buffer.sessionId&&this.buffer.sessionId!==this.sessionId)&&(this.buffer=this._flushBuffer()),R(this.buffer.sessionId)&&!R(this.sessionId)&&(this.buffer.sessionId=this.sessionId,this.buffer.windowId=this.windowId),this.buffer.size+=e.$snapshot_bytes,this.buffer.data.push(e.$snapshot_data),this.flushBufferTimer||(this.flushBufferTimer=setTimeout((function(){n._flushBuffer()}),2e3))}},{key:"_captureSnapshot",value:function(e){this.instance.capture("$snapshot",e,{_url:this.instance.requestRouter.endpointFor("api",this._endpoint),_noTruncate:!0,_batchKey:"recordings",_noHeatmaps:!0})}}]),e}(),ai=function(){function e(t){o(this,e),this.instance=t,this.instance.decideEndpointWasHit=this.instance._hasBootstrappedFeatureFlags()}return u(e,[{key:"call",value:function(){var e=this,t={token:this.instance.config.token,distinct_id:this.instance.get_distinct_id(),groups:this.instance.getGroups(),person_properties:this.instance.get_property(ke),group_properties:this.instance.get_property(we),disable_flags:this.instance.config.advanced_disable_feature_flags||this.instance.config.advanced_disable_feature_flags_on_first_load||void 0};this.instance._send_request({method:"POST",url:this.instance.requestRouter.endpointFor("api","/decide/?v=3"),data:t,compression:this.instance.config.disable_compression?void 0:ie.Base64,timeout:this.instance.config.feature_flag_request_timeout_ms,callback:function(t){return e.parseDecideResponse(t.json)}})}},{key:"parseDecideResponse",value:function(e){var t=this;this.instance.featureFlags.setReloadingPaused(!1),this.instance.featureFlags._startReloadTimer();var n=!e;if(this.instance.config.advanced_disable_feature_flags_on_first_load||this.instance.config.advanced_disable_feature_flags||this.instance.featureFlags.receivedFeatureFlags(null!=e?e:{},n),n)z.error("Failed to fetch feature flags from PostHog.");else{if(!L||!L.body)return z.info("document not ready yet, trying again in 500 milliseconds..."),void setTimeout((function(){t.parseDecideResponse(e)}),500);this.instance._afterDecideResponse(e);var i=null==T?void 0:T.extendPostHogWithExceptionAutoCapture;if(e.autocaptureExceptions&&e.autocaptureExceptions&&F(i)&&se(this.instance.requestRouter.endpointFor("assets","/static/exception-autocapture.js"),(function(n){if(n)return z.error("Could not load exception autocapture script",n);T.extendPostHogWithExceptionAutocapture(t.instance,e)})),e.siteApps)if(this.instance.config.opt_in_site_apps){var r,s=p(e.siteApps);try{var o=function(){var e=r.value,n=e.id,i=e.url,s=t.instance.requestRouter.endpointFor("api",i);U["__$$ph_site_app_".concat(n)]=t.instance,se(s,(function(e){e&&z.error("Error while initializing PostHog app with config id ".concat(n),e)}))};for(s.s();!(r=s.n()).done;)o()}catch(e){s.e(e)}finally{s.f()}}else e.siteApps.length>0&&z.error('PostHog site apps are disabled. Enable the "opt_in_site_apps" config to proceed.')}}}]),e}(),ui=null!=T&&T.location?ot(T.location.hash,"__posthog")||ot(location.hash,"state"):null,li="_postHogToolbarParams",ci=function(){function e(t){o(this,e),l(this,"_toolbarScriptLoaded",!1),this.instance=t}return u(e,[{key:"maybeLoadToolbar",value:function(){var e,t,n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:void 0,i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:void 0,r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:void 0;if(!T||!L)return!1;n=null!==(e=n)&&void 0!==e?e:T.location,r=null!==(t=r)&&void 0!==t?t:T.history;try{if(!i){try{T.localStorage.setItem("test","test"),T.localStorage.removeItem("test")}catch(e){return!1}i=null==T?void 0:T.localStorage}var s,o=ui||ot(n.hash,"__posthog")||ot(n.hash,"state"),a=o?ee((function(){return JSON.parse(atob(decodeURIComponent(o)))}))||ee((function(){return JSON.parse(decodeURIComponent(o))})):null;return a&&"ph_authorize"===a.action?((s=a).source="url",s&&Object.keys(s).length>0&&(a.desiredHash?n.hash=a.desiredHash:r?r.replaceState("",L.title,n.pathname+n.search):n.hash="")):((s=JSON.parse(i.getItem(li)||"{}")).source="localstorage",delete s.userIntent),!(!s.token||this.instance.config.token!==s.token)&&(this.loadToolbar(s),!0)}catch(e){return!1}}},{key:"_callLoadToolbar",value:function(e){(U.ph_load_toolbar||U.ph_load_editor)(e,this.instance)}},{key:"loadToolbar",value:function(e){var t=this;if(!T||T.localStorage.getItem(li)&&this._toolbarScriptLoaded)return!1;var n="custom"===this.instance.requestRouter.region&&this.instance.config.advanced_disable_toolbar_metrics,i=r(r({token:this.instance.config.token},e),{},{apiURL:this.instance.requestRouter.endpointFor("ui")},n?{instrument:!1}:{});if(T.localStorage.setItem(li,JSON.stringify(r(r({},i),{},{source:void 0}))),this._toolbarScriptLoaded)this._callLoadToolbar(i);else{this._toolbarScriptLoaded=!0;var s=3e5,o=Math.floor(Date.now()/s)*s;se(this.instance.requestRouter.endpointFor("assets","/static/toolbar.js?t=".concat(o)),(function(e){if(e)return z.error("Failed to load toolbar",e),void(t._toolbarScriptLoaded=!1);t._callLoadToolbar(i)})),re(T,"turbolinks:load",(function(){t._toolbarScriptLoaded=!1,t.loadToolbar(i)}))}return!0}},{key:"_loadEditor",value:function(e){return this.loadToolbar(e)}},{key:"maybeLoadEditor",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:void 0,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:void 0,n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:void 0;return this.maybeLoadToolbar(e,t,n)}}]),e}(),di=function(){function e(t){o(this,e),l(this,"isPaused",!0),l(this,"queue",[]),l(this,"flushTimeoutMs",3e3),this.sendRequest=t}return u(e,[{key:"enqueue",value:function(e){this.queue.push(e),this.flushTimeout||this.setFlushTimeout()}},{key:"unload",value:function(){var e=this;this.clearFlushTimeout();var t=this.queue.length>0?this.formatQueue():{},n=Object.values(t);[].concat(d(n.filter((function(e){return 0===e.url.indexOf("/e")}))),d(n.filter((function(e){return 0!==e.url.indexOf("/e")})))).map((function(t){e.sendRequest(r(r({},t),{},{transport:"sendBeacon"}))}))}},{key:"enable",value:function(){this.isPaused=!1,this.setFlushTimeout()}},{key:"setFlushTimeout",value:function(){var e=this;this.isPaused||(this.flushTimeout=setTimeout((function(){if(e.clearFlushTimeout(),e.queue.length>0){var t=e.formatQueue(),n=function(n){var i=t[n],r=(new Date).getTime();i.data&&b(i.data)&&J(i.data,(function(e){e.offset=Math.abs(e.timestamp-r),delete e.timestamp})),e.sendRequest(i)};for(var i in t)n(i)}}),this.flushTimeoutMs))}},{key:"clearFlushTimeout",value:function(){clearTimeout(this.flushTimeout),this.flushTimeout=void 0}},{key:"formatQueue",value:function(){var e={};return J(this.queue,(function(t){var n,i=t,s=(i?i.batchKey:null)||i.url;F(e[s])&&(e[s]=r(r({},i),{},{data:[]})),null===(n=e[s].data)||void 0===n||n.push(i.data)})),this.queue=[],e}}]),e}(),hi=Uint8Array,fi=Uint16Array,pi=Uint32Array,vi=new hi([0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0,0]),gi=new hi([0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,0,0]),_i=new hi([16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]),mi=function(e,t){for(var n=new fi(31),i=0;i<31;++i)n[i]=t+=1<>>1|(21845&Fi)<<1;Ei=(61680&(Ei=(52428&Ei)>>>2|(13107&Ei)<<2))>>>4|(3855&Ei)<<4,Si[Fi]=((65280&Ei)>>>8|(255&Ei)<<8)>>>1}var Ri=function(e,t,n){for(var i=e.length,r=0,s=new fi(t);r>>u]=l}else for(o=new fi(i),r=0;r>>15-e[r];return o},xi=new hi(288);for(Fi=0;Fi<144;++Fi)xi[Fi]=8;for(Fi=144;Fi<256;++Fi)xi[Fi]=9;for(Fi=256;Fi<280;++Fi)xi[Fi]=7;for(Fi=280;Fi<288;++Fi)xi[Fi]=8;var Pi=new hi(32);for(Fi=0;Fi<32;++Fi)Pi[Fi]=5;var Ii=Ri(xi,9,0),Ci=Ri(Pi,5,0),Ti=function(e){return(e/8|0)+(7&e&&1)},$i=function(e,t,n){(null==t||t<0)&&(t=0),(null==n||n>e.length)&&(n=e.length);var i=new(e instanceof fi?fi:e instanceof pi?pi:hi)(n-t);return i.set(e.subarray(t,n)),i},Mi=function(e,t,n){n<<=7&t;var i=t/8|0;e[i]|=n,e[i+1]|=n>>>8},Oi=function(e,t,n){n<<=7&t;var i=t/8|0;e[i]|=n,e[i+1]|=n>>>8,e[i+2]|=n>>>16},qi=function(e,t){for(var n=[],i=0;ih&&(h=s[i].s);var f=new fi(h+1),p=Ai(n[c-1],f,0);if(p>t){i=0;var v=0,g=p-t,_=1<t))break;v+=_-(1<>>=g;v>0;){var y=s[i].s;f[y]=0&&v;--i){var b=s[i].s;f[b]==t&&(--f[b],++v)}p=t}return[new hi(f),p]},Ai=function e(t,n,i){return-1==t.s?Math.max(e(t.l,n,i+1),e(t.r,n,i+1)):n[t.s]=i},Li=function(e){for(var t=e.length;t&&!e[--t];);for(var n=new fi(++t),i=0,r=e[0],s=1,o=function(e){n[i++]=e},a=1;a<=t;++a)if(e[a]==r&&a!=t)++s;else{if(!r&&s>2){for(;s>138;s-=138)o(32754);s>2&&(o(s>10?s-11<<5|28690:s-3<<5|12305),s=0)}else if(s>3){for(o(r),--s;s>6;s-=6)o(8304);s>2&&(o(s-3<<5|8208),s=0)}for(;s--;)o(r);s=1,r=e[a]}return[n.subarray(0,i),t]},Di=function(e,t){for(var n=0,i=0;i>>8,e[r+2]=255^e[r],e[r+3]=255^e[r+1];for(var s=0;s4&&!R[_i[P-1]];--P);var I,C,T,$,M=l+5<<3,O=Di(r,xi)+Di(s,Pi)+o,q=Di(r,h)+Di(s,v)+o+14+3*P+Di(S,R)+(2*S[16]+3*S[17]+7*S[18]);if(M<=O&&M<=q)return Bi(t,c,e.subarray(u,u+l));if(Mi(t,c,1+(q15&&(Mi(t,c,B[F]>>>5&127),c+=B[F]>>>12)}}}else I=Ii,C=xi,T=Ci,$=Pi;for(F=0;F255){N=i[F]>>>18&31,Oi(t,c,I[N+257]),c+=C[N+257],N>7&&(Mi(t,c,i[F]>>>23&31),c+=vi[N]);var H=31&i[F];Oi(t,c,T[H]),c+=$[H],H>3&&(Oi(t,c,i[F]>>>5&8191),c+=gi[H])}else Oi(t,c,I[i[F]]),c+=C[i[F]];return Oi(t,c,I[256]),c+C[256]},Hi=new pi([65540,131080,131088,131104,262176,1048704,1048832,2114560,2117632]),ji=new hi(0),Ui=function(){for(var e=new pi(256),t=0;t<256;++t){for(var n=t,i=9;--i;)n=(1&n&&3988292384)^n>>>1;e[t]=n}return e}(),Wi=function(e,t,n){for(;n;++t)e[t]=n,n>>>=8};function zi(e,t){void 0===t&&(t={});var n=function(){var e=4294967295;return{p:function(t){for(var n=e,i=0;i>>8;e=n},d:function(){return 4294967295^e}}}(),i=e.length;n.p(e);var r,s=function(e,t,n,i,r){return function(e,t,n,i,r,s){var o=e.length,a=new hi(i+o+5*(1+Math.floor(o/7e3))+r),u=a.subarray(i,a.length-r),l=0;if(!t||o<8)for(var c=0;c<=o;c+=65535){var d=c+65535;d>>13,p=8191&h,v=(1<7e3||R>24576)&&$>423){l=Ni(e,u,0,k,w,S,E,R,P,c-P,l),R=F=E=0,P=c;for(var M=0;M<286;++M)w[M]=0;for(M=0;M<30;++M)S[M]=0}var O=2,q=0,A=p,L=C-T&32767;if($>2&&I==b(c-L))for(var D=Math.min(f,$)-1,B=Math.min(32767,c),N=Math.min(258,$);L<=B&&--A&&C!=T;){if(e[c+O]==e[c+O-L]){for(var H=0;HO){if(O=H,q=L,H>D)break;var j=Math.min(L,H-2),U=0;for(M=0;MU&&(U=z,T=W)}}}L+=(C=T)-(T=g[C])+32768&32767}if(q){k[R++]=268435456|ki[O]<<18|wi[q];var G=31&ki[O],V=31&wi[q];E+=vi[G]+gi[V],++w[257+G],++S[V],x=c+O,++F}else k[R++]=e[c],++w[e[c]]}}l=Ni(e,u,s,k,w,S,E,R,P,c-P,l),s||(l=Bi(u,l,ji))}return $i(a,0,i+Ti(l)+r)}(e,null==t.level?6:t.level,null==t.mem?Math.ceil(1.5*Math.max(8,Math.min(13,Math.log(e.length)))):12+t.mem,n,i,!r)}(e,t,10+((r=t).filename&&r.filename.length+1||0),8),o=s.length;return function(e,t){var n=t.filename;if(e[0]=31,e[1]=139,e[2]=8,e[8]=t.level<2?4:9==t.level?2:0,e[9]=3,0!=t.mtime&&Wi(e,4,Math.floor(new Date(t.mtime||Date.now())/1e3)),n){e[3]=8;for(var i=0;i<=n.length;++i)e[i+10]=n.charCodeAt(i)}}(s,t),Wi(s,o-8,n.d()),Wi(s,o-4,i),s}var Gi,Vi=!!N||!!B,Qi="text/plain",Ji=function(e,t){var n=c(e.split("?"),2),i=n[0],s=n[1],o=r({},t);null==s||s.split("&").forEach((function(e){var t=c(e.split("="),1)[0];delete o[t]}));var a=function(e){var t,n,i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"&",r=[];return J(e,(function(e,i){F(e)||F(i)||"undefined"===i||(t=encodeURIComponent(function(e){return e instanceof File}(e)?e.name:e.toString()),n=encodeURIComponent(i),r[r.length]=n+"="+t)})),r.join(i)}(o);return a=a?(s?s+"&":"")+a:s,"".concat(i,"?").concat(a)},Yi=function(e){return"data="+encodeURIComponent("string"==typeof e?e:JSON.stringify(e))},Xi=function(e){var t=e.data,n=e.compression;if(t){if(n===ie.GZipJS){var i=zi(function(e,t){var n=e.length;if("undefined"!=typeof TextEncoder)return(new TextEncoder).encode(e);for(var i=new hi(e.length+(e.length>>>1)),r=0,s=function(e){i[r++]=e},o=0;oi.length){var a=new hi(r+8+(n-o<<1));a.set(i),i=a}var u=e.charCodeAt(o);u<128?s(u):u<2048?(s(192|u>>>6),s(128|63&u)):u>55295&&u<57344?(s(240|(u=65536+(1047552&u)|1023&e.charCodeAt(++o))>>>18),s(128|u>>>12&63),s(128|u>>>6&63),s(128|63&u)):(s(224|u>>>12),s(128|u>>>6&63),s(128|63&u))}return $i(i,0,r)}(JSON.stringify(t)),{mtime:0});return{contentType:Qi,body:new Blob([i],{type:Qi})}}if(n===ie.Base64){var r=function(e){var t,n,i,r,s,o="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=",a=0,u=0,l="",c=[];if(!e)return e;e=function(e){var t,n,i,r,s="";for(t=n=0,i=(e=(e+"").replace(/\r\n/g,"\n").replace(/\r/g,"\n")).length,r=0;r127&&o<2048?String.fromCharCode(o>>6|192,63&o|128):String.fromCharCode(o>>12|224,o>>6&63|128,63&o|128),R(a)||(n>t&&(s+=e.substring(t,n)),s+=a,t=n=r+1)}return n>t&&(s+=e.substring(t,e.length)),s}(e);do{t=(s=e.charCodeAt(a++)<<16|e.charCodeAt(a++)<<8|e.charCodeAt(a++))>>18&63,n=s>>12&63,i=s>>6&63,r=63&s,c[u++]=o.charAt(t)+o.charAt(n)+o.charAt(i)+o.charAt(r)}while(a=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(e);for(i=0;i=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}(e,Ki);P(n)&&n>0&&(i.url=Ji(i.url,{retry_count:n})),this.instance._send_request(r(r({},i),{},{callback:function(e){var s;200!==e.statusCode&&(e.statusCode<400||e.statusCode>=500)&&(null!=n?n:0)<10?t.enqueue(r({},i)):null===(s=i.callback)||void 0===s||s.call(i,e)}}))}},{key:"enqueue",value:function(e){var t=e.retriesPerformedSoFar||0;e.retriesPerformedSoFar=t+1;var n=function(e){var t=3e3*Math.pow(2,e),n=t/2,i=Math.min(18e5,t),r=(Math.random()-.5)*(i-n);return Math.ceil(i+r)}(t),i=Date.now()+n;this.queue.push({retryAt:i,requestOptions:e});var r="Enqueued failed request for retry in ".concat(n);navigator.onLine||(r+=" (Browser is offline)"),z.warn(r),this.isPolling||(this.isPolling=!0,this.poll())}},{key:"poll",value:function(){var e=this;this.poller&&clearTimeout(this.poller),this.poller=setTimeout((function(){e.areWeOnline&&e.queue.length>0&&e.flush(),e.poll()}),this.pollIntervalMs)}},{key:"flush",value:function(){var e=Date.now(),t=[],n=this.queue.filter((function(n){return n.retryAt0){var i,r=p(n);try{for(r.s();!(i=r.n()).done;){var s=i.value.requestOptions;this.retriableRequest(s)}}catch(e){r.e(e)}finally{r.f()}}}},{key:"unload",value:function(){this.poller&&(clearTimeout(this.poller),this.poller=void 0);var e,t=p(this.queue);try{for(t.s();!(e=t.n()).done;){var n=e.value.requestOptions;try{this.instance._send_request(r(r({},n),{},{transport:"sendBeacon"}))}catch(e){z.error(e)}}}catch(e){t.e(e)}finally{t.f()}this.queue=[]}}]),e}(),er=1800,tr=function(){function e(t,n,i,r){o(this,e),l(this,"_sessionIdChangedHandlers",[]),this.config=t,this.persistence=n,this._windowId=void 0,this._sessionId=void 0,this._sessionStartTimestamp=null,this._sessionActivityTimestamp=null,this._sessionIdGenerator=i||Ue,this._windowIdGenerator=r||Ue;var s=t.persistence_name||t.token,a=t.session_idle_timeout_seconds||er;if(P(a)?a>er?z.warn("session_idle_timeout_seconds cannot be greater than 30 minutes. Using 30 minutes instead."):a<60&&z.warn("session_idle_timeout_seconds cannot be less than 60 seconds. Using 60 seconds instead."):(z.warn("session_idle_timeout_seconds must be a number. Defaulting to 30 minutes."),a=er),this._sessionTimeoutMs=1e3*Math.min(Math.max(a,60),er),this._window_id_storage_key="ph_"+s+"_window_id",this._primary_window_exists_storage_key="ph_"+s+"_primary_window_exists",this._canUseSessionStorage()){var u=nt.parse(this._window_id_storage_key),c=nt.parse(this._primary_window_exists_storage_key);u&&!c?this._windowId=u:nt.remove(this._window_id_storage_key),nt.set(this._primary_window_exists_storage_key,!0)}this._listenToReloadWindow()}return u(e,[{key:"onSessionId",value:function(e){var t=this;return F(this._sessionIdChangedHandlers)&&(this._sessionIdChangedHandlers=[]),this._sessionIdChangedHandlers.push(e),this._sessionId&&e(this._sessionId,this._windowId),function(){t._sessionIdChangedHandlers=t._sessionIdChangedHandlers.filter((function(t){return t!==e}))}}},{key:"_canUseSessionStorage",value:function(){return"memory"!==this.config.persistence&&!this.persistence.disabled&&nt.is_supported()}},{key:"_setWindowId",value:function(e){e!==this._windowId&&(this._windowId=e,this._canUseSessionStorage()&&nt.set(this._window_id_storage_key,e))}},{key:"_getWindowId",value:function(){return this._windowId?this._windowId:this._canUseSessionStorage()?nt.parse(this._window_id_storage_key):null}},{key:"_setSessionId",value:function(e,t,n){e===this._sessionId&&t===this._sessionActivityTimestamp&&n===this._sessionStartTimestamp||(this._sessionStartTimestamp=n,this._sessionActivityTimestamp=t,this._sessionId=e,this.persistence.register(l({},_e,[t,e,n])))}},{key:"_getSessionId",value:function(){if(this._sessionId&&this._sessionActivityTimestamp&&this._sessionStartTimestamp)return[this._sessionActivityTimestamp,this._sessionId,this._sessionStartTimestamp];var e=this.persistence.props[_e];return b(e)&&2===e.length&&e.push(e[0]),e||[0,null,0]}},{key:"resetSessionId",value:function(){this._setSessionId(null,null,null)}},{key:"_listenToReloadWindow",value:function(){var e=this;null==T||T.addEventListener("beforeunload",(function(){e._canUseSessionStorage()&&nt.remove(e._primary_window_exists_storage_key)}))}},{key:"checkAndGetSessionAndWindowId",value:function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0],t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:null)||(new Date).getTime(),n=c(this._getSessionId(),3),i=n[0],r=n[1],s=n[2],o=this._getWindowId(),a=s&&s>0&&Math.abs(t-s)>864e5,u=!1,l=!r,d=!e&&Math.abs(t-i)>this._sessionTimeoutMs;l||d||a?(r=this._sessionIdGenerator(),o=this._windowIdGenerator(),z.info("[SessionId] new session ID generated",{sessionId:r,windowId:o,changeReason:{noSessionId:l,activityTimeout:d,sessionPastMaximumLength:a}}),s=t,u=!0):o||(o=this._windowIdGenerator(),u=!0);var h=0===i||!e||a?t:i,f=0===s?(new Date).getTime():s;return this._setWindowId(o),this._setSessionId(r,h,f),u&&this._sessionIdChangedHandlers.forEach((function(e){return e(r,o)})),{sessionId:r,windowId:o,sessionStartTimestamp:f}}}]),e}();!function(e){e.US="us",e.EU="eu",e.CUSTOM="custom"}(Gi||(Gi={}));var nr="i.posthog.com",ir=function(){function e(t){o(this,e),l(this,"_regionCache",{}),this.instance=t}return u(e,[{key:"apiHost",get:function(){return this.instance.config.api_host.trim().replace(/\/$/,"")}},{key:"uiHost",get:function(){var e;return null===(e=this.instance.config.ui_host)||void 0===e?void 0:e.replace(/\/$/,"")}},{key:"region",get:function(){return this._regionCache[this.apiHost]||(/https:\/\/(app|us|us-assets)(\.i)?\.posthog\.com/i.test(this.apiHost)?this._regionCache[this.apiHost]=Gi.US:/https:\/\/(eu|eu-assets)(\.i)?\.posthog\.com/i.test(this.apiHost)?this._regionCache[this.apiHost]=Gi.EU:this._regionCache[this.apiHost]=Gi.CUSTOM),this._regionCache[this.apiHost]}},{key:"endpointFor",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";if(t&&(t="/"===t[0]?t:"/".concat(t)),"ui"===e)return(this.uiHost||this.apiHost.replace(".".concat(nr),".posthog.com"))+t;if(this.region===Gi.CUSTOM)return this.apiHost+t;var n=nr+t;switch(e){case"assets":return"https://".concat(this.region,"-assets.").concat(n);case"api":return"https://".concat(this.region,".").concat(n)}}}]),e}(),rr=u((function e(t,n,i,r){var s=arguments.length>4&&void 0!==arguments[4]?arguments[4]:["error"];o(this,e),this.name="posthog-js",this.setupOnce=function(e){e((function(e){var o,a,u,l,c;if("*"!==s&&!s.includes(e.level)||!t.__loaded)return e;e.tags||(e.tags={});var d=t.requestRouter.endpointFor("ui","/project/".concat(t.config.token,"/person/").concat(t.get_distinct_id()));e.tags["PostHog Person URL"]=d,t.sessionRecordingStarted()&&(e.tags["PostHog Recording URL"]=t.get_session_replay_url({withTimestamp:!0}));var h=(null===(o=e.exception)||void 0===o?void 0:o.values)||[],f={$exception_message:(null===(a=h[0])||void 0===a?void 0:a.value)||e.message,$exception_type:null===(u=h[0])||void 0===u?void 0:u.type,$exception_personURL:d,$sentry_event_id:e.event_id,$sentry_exception:e.exception,$sentry_exception_message:(null===(l=h[0])||void 0===l?void 0:l.value)||e.message,$sentry_exception_type:null===(c=h[0])||void 0===c?void 0:c.type,$sentry_tags:e.tags,$level:e.level};return n&&i&&(f.$sentry_url=(r||"https://sentry.io/organizations/")+n+"/issues/?project="+i+"&query="+e.event_id),t.capture("$exception",f),e}))}}));var sr=function(){function e(t){o(this,e),this._instance=t}return u(e,[{key:"doPageView",value:function(){var e,t=this._previousScrollProperties();return this._currentPath=null!==(e=null==T?void 0:T.location.pathname)&&void 0!==e?e:"",this._instance.scrollManager.resetContext(),t}},{key:"doPageLeave",value:function(){return this._previousScrollProperties()}},{key:"_previousScrollProperties",value:function(){var e=this._currentPath,t=this._instance.scrollManager.getContext();if(!e||!t)return{};var n=t.maxScrollHeight,i=t.lastScrollY,r=t.maxScrollY,s=t.maxContentHeight,o=t.lastContentY,a=t.maxContentY;return F(n)||F(i)||F(r)||F(s)||F(o)||F(a)?{}:(n=Math.ceil(n),i=Math.ceil(i),r=Math.ceil(r),s=Math.ceil(s),o=Math.ceil(o),a=Math.ceil(a),{$prev_pageview_pathname:e,$prev_pageview_last_scroll:i,$prev_pageview_last_scroll_percentage:n<=1?1:or(i/n,0,1),$prev_pageview_max_scroll:r,$prev_pageview_max_scroll_percentage:n<=1?1:or(r/n,0,1),$prev_pageview_last_content:o,$prev_pageview_last_content_percentage:s<=1?1:or(o/s,0,1),$prev_pageview_max_content:a,$prev_pageview_max_content_percentage:s<=1?1:or(a/s,0,1)})}}]),e}();function or(e,t,n){return Math.max(t,Math.min(e,n))}var ar={icontains:function(e){return!!T&&T.location.href.toLowerCase().indexOf(e.toLowerCase())>-1},regex:function(e){return!!T&&function(e,t){return!!function(e){try{new RegExp(e)}catch(e){return!1}return!0}(t)&&new RegExp(t).test(e)}(T.location.href,e)},exact:function(e){return(null==T?void 0:T.location.href)===e}},ur=function(){function e(t){o(this,e),this.instance=t}return u(e,[{key:"afterDecideResponse",value:function(e){this._decideServerResponse=!!e.surveys,this.loadIfEnabled()}},{key:"loadIfEnabled",value:function(){var e=this,t=null==U?void 0:U.extendPostHogWithSurveys;this.instance.config.disable_surveys||!this._decideServerResponse||t||se(this.instance.requestRouter.endpointFor("assets","/static/surveys.js"),(function(t){if(t)return z.error("Could not load surveys script",t);U.extendPostHogWithSurveys(e.instance)}))}},{key:"getSurveys",value:function(e){var t=this,n=arguments.length>1&&void 0!==arguments[1]&&arguments[1];if(this.instance.config.disable_surveys)return e([]);var i=this.instance.get_property(Se);if(i&&!n)return e(i);this.instance._send_request({url:this.instance.requestRouter.endpointFor("api","/api/surveys/?token=".concat(this.instance.config.token)),method:"GET",transport:"XHR",callback:function(n){var i;if(200!==n.statusCode||!n.json)return e([]);var r=n.json.surveys||[];return null===(i=t.instance.persistence)||void 0===i||i.register(l({},Se,r)),e(r)}})}},{key:"getActiveMatchingSurveys",value:function(e){var t=this,n=arguments.length>1&&void 0!==arguments[1]&&arguments[1];this.getSurveys((function(n){var i=n.filter((function(e){return!(!e.start_date||e.end_date)})).filter((function(e){var t,n,i,r;if(!e.conditions)return!0;var s=null===(t=e.conditions)||void 0===t||!t.url||ar[null!==(n=null===(i=e.conditions)||void 0===i?void 0:i.urlMatchType)&&void 0!==n?n:"icontains"](e.conditions.url),o=null===(r=e.conditions)||void 0===r||!r.selector||(null==L?void 0:L.querySelector(e.conditions.selector));return s&&o})).filter((function(e){if(!e.linked_flag_key&&!e.targeting_flag_key)return!0;var n=!e.linked_flag_key||t.instance.featureFlags.isFeatureEnabled(e.linked_flag_key),i=!e.targeting_flag_key||t.instance.featureFlags.isFeatureEnabled(e.targeting_flag_key);return n&&i}));return e(i)}),n)}}]),e}(),lr=function(){function e(t){var n,i,r=this;o(this,e),l(this,"serverLimits",{}),l(this,"lastEventRateLimited",!1),l(this,"checkForLimiting",(function(e){var t=e.text;if(t&&t.length)try{(JSON.parse(t).quota_limited||[]).forEach((function(e){z.info("[RateLimiter] ".concat(e||"events"," is quota limited.")),r.serverLimits[e]=(new Date).getTime()+6e4}))}catch(e){return void z.warn('[RateLimiter] could not rate limit - continuing. Error: "'.concat(null==e?void 0:e.message,'"'),{text:t})}})),this.instance=t,this.captureEventsPerSecond=(null===(n=t.config.rate_limiting)||void 0===n?void 0:n.events_per_second)||10,this.captureEventsBurstLimit=Math.max((null===(i=t.config.rate_limiting)||void 0===i?void 0:i.events_burst_limit)||10*this.captureEventsPerSecond,this.captureEventsPerSecond),this.lastEventRateLimited=this.isCaptureClientSideRateLimited(!0)}return u(e,[{key:"isCaptureClientSideRateLimited",value:function(){var e,t,n,i=arguments.length>0&&void 0!==arguments[0]&&arguments[0],r=(new Date).getTime(),s=null!==(e=null===(t=this.instance.persistence)||void 0===t?void 0:t.get_property(xe))&&void 0!==e?e:{tokens:this.captureEventsBurstLimit,last:r};s.tokens+=(r-s.last)/1e3*this.captureEventsPerSecond,s.last=r,s.tokens>this.captureEventsBurstLimit&&(s.tokens=this.captureEventsBurstLimit);var o=s.tokens<1;return o||i||(s.tokens=Math.max(0,s.tokens-1)),!o||this.lastEventRateLimited||i||this.instance.capture("$$client_ingestion_warning",{$$client_ingestion_warning_message:"posthog-js client rate limited. Config is set to ".concat(this.captureEventsPerSecond," events per second and ").concat(this.captureEventsBurstLimit," events burst limit.")},{skip_client_rate_limiting:!0}),this.lastEventRateLimited=o,null===(n=this.instance.persistence)||void 0===n||n.set_property(xe,s),o}},{key:"isServerRateLimited",value:function(e){var t=this.serverLimits[e||"events"]||!1;return!1!==t&&(new Date).getTime()e?t.slice(0,e)+"...":t}var wr,Sr,Fr=function(){function e(t){o(this,e),l(this,"_initialized",!1),l(this,"_isDisabledServerSide",null),l(this,"rageclicks",new pr),l(this,"_elementsChainAsString",!1),this.instance=t}return u(e,[{key:"config",get:function(){var e,t=w(this.instance.config.autocapture)?this.instance.config.autocapture:{};return t.url_allowlist=null===(e=t.url_allowlist)||void 0===e?void 0:e.map((function(e){return new RegExp(e)})),t}},{key:"_addDomEventHandlers",value:function(){var e=this;if(this.isBrowserSupported()){if(T&&L){var t=function(t){t=t||(null==T?void 0:T.event);try{e._captureEvent(t)}catch(e){z.error("Failed to capture event",e)}},n=function(t){t=t||(null==T?void 0:T.event),e._captureEvent(t,br)};re(L,"submit",t,!1,!0),re(L,"change",t,!1,!0),re(L,"click",t,!1,!0),this.config.capture_copied_text&&(re(L,"copy",n,!1,!0),re(L,"cut",n,!1,!0))}}else z.info("Disabling Automatic Event Collection because this browser is not supported")}},{key:"startIfEnabled",value:function(){this.isEnabled&&!this._initialized&&(this._addDomEventHandlers(),this._initialized=!0)}},{key:"afterDecideResponse",value:function(e){e.elementsChainAsString&&(this._elementsChainAsString=e.elementsChainAsString),this.instance.persistence&&this.instance.persistence.register(l({},le,!!e.autocapture_opt_out)),this._isDisabledServerSide=!!e.autocapture_opt_out,this.startIfEnabled()}},{key:"isEnabled",get:function(){var e,t,n=null===(e=this.instance.persistence)||void 0===e?void 0:e.props[le],i=this._isDisabledServerSide;if(R(i)&&!I(n)&&!this.instance.config.advanced_disable_decide)return!1;var r=null!==(t=this._isDisabledServerSide)&&void 0!==t?t:!!n;return!!this.instance.config.autocapture&&!r}},{key:"_previousElementSibling",value:function(e){if(e.previousElementSibling)return e.previousElementSibling;var t=e;do{t=t.previousSibling}while(t&&!vn(t));return t}},{key:"_getAugmentPropertiesFromElement",value:function(e){if(!wn(e))return{};var t={};return J(e.attributes,(function(e){if(e.name&&0===e.name.indexOf("data-ph-capture-attribute")){var n=e.name.replace("data-ph-capture-attribute-",""),i=e.value;n&&i&&Cn(i)&&(t[n]=i)}})),t}},{key:"_getPropertiesFromElement",value:function(e,t,n){var i,r=e.tagName.toLowerCase(),s={tag_name:r};yn.indexOf(r)>-1&&!n&&("a"===r.toLowerCase()||"button"===r.toLowerCase()?s.$el_text=kr(1024,Tn(e)):s.$el_text=kr(1024,pn(e)));var o=hn(e);o.length>0&&(s.classes=o.filter((function(e){return""!==e})));var a=null===(i=this.config)||void 0===i?void 0:i.element_attribute_ignorelist;J(e.attributes,(function(n){var i;if((!Sn(e)||-1!==["name","id","class","aria-label"].indexOf(n.name))&&(null==a||!a.includes(n.name))&&!t&&Cn(n.value)&&(i=n.name,!E(i)||"_ngcontent"!==i.substring(0,10)&&"_nghost"!==i.substring(0,7))){var r=n.value;"class"===n.name&&(r=dn(r).join(" ")),s["attr__"+n.name]=kr(1024,r)}}));for(var u=1,l=1,c=e;c=this._previousElementSibling(c);)u++,c.tagName===e.tagName&&l++;return s.nth_child=u,s.nth_of_type=l,s}},{key:"_getDefaultProperties",value:function(e){return{$event_type:e,$ce_version:1}}},{key:"_getEventTarget",value:function(e){return F(e.target)?e.srcElement||null:null!==(t=e.target)&&void 0!==t&&t.shadowRoot?e.composedPath()[0]||null:e.target||null;var t}},{key:"_captureEvent",value:function(e){var t=this,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"$autocapture";if(this.isEnabled){var i,r=this._getEventTarget(e);_n(r)&&(r=r.parentNode||null),"$autocapture"===n&&"click"===e.type&&e instanceof MouseEvent&&this.instance.config.rageclick&&null!==(i=this.rageclicks)&&void 0!==i&&i.isRageClick(e.clientX,e.clientY,(new Date).getTime())&&this._captureEvent(e,"$rageclick");var s=n===br;if(r&&kn(r,e,this.config,s,s?["copy","cut"]:void 0)){for(var o,a,u=[r],l=r;l.parentNode&&!gn(l,"body");)mn(l.parentNode)?(u.push(l.parentNode.host),l=l.parentNode.host):(u.push(l.parentNode),l=l.parentNode);var c,d=[],h={},f=!1;if(J(u,(function(e){var n=wn(e);"a"===e.tagName.toLowerCase()&&(c=e.getAttribute("href"),c=n&&Cn(c)&&c),X(hn(e),"ph-no-capture")&&(f=!0),d.push(t._getPropertiesFromElement(e,t.instance.config.mask_all_element_attributes,t.instance.config.mask_all_text));var i=t._getAugmentPropertiesFromElement(e);Y(h,i)})),this.instance.config.mask_all_text||("a"===r.tagName.toLowerCase()||"button"===r.tagName.toLowerCase()?d[0].$el_text=Tn(r):d[0].$el_text=pn(r)),c&&(d[0].attr__href=c),f)return!1;var p=Y(this._getDefaultProperties(e.type),this._elementsChainAsString?{$elements_chain:Mn(d)}:{$elements:d},null!==(o=d[0])&&void 0!==o&&o.$el_text?{$el_text:null===(a=d[0])||void 0===a?void 0:a.$el_text}:{},h);if(n===br){var v,g=fn(null==T||null===(v=T.getSelection())||void 0===v?void 0:v.toString()),_=e.type||"clipboard";if(!g)return!1;p.$selected_content=g,p.$copy_type=_}return this.instance.capture(n,p),!0}}}},{key:"isBrowserSupported",value:function(){return k(null==L?void 0:L.querySelectorAll)}}]),e}(),Er={},Rr=function(){},xr="posthog",Pr=!Vi&&-1===(null==j?void 0:j.indexOf("MSIE"))&&-1===(null==j?void 0:j.indexOf("Mozilla")),Ir=function(){var e,t,n;return{api_host:"https://us.i.posthog.com",api_transport:"XHR",ui_host:null,token:"",autocapture:!0,rageclick:!0,cross_subdomain_cookie:(t=null==L?void 0:L.location,n=null==t?void 0:t.hostname,!!E(n)&&"herokuapp.com"!==n.split(".").slice(-2).join(".")),persistence:"localStorage+cookie",persistence_name:"",loaded:Rr,store_google:!0,custom_campaign_params:[],custom_blocked_useragents:[],save_referrer:!0,capture_pageview:!0,capture_pageleave:!0,debug:D&&E(null==D?void 0:D.search)&&-1!==D.search.indexOf("__posthog_debug=true")||!1,verbose:!1,cookie_expiration:365,upgrade:!1,disable_session_recording:!1,disable_persistence:!1,disable_surveys:!1,enable_recording_console_log:void 0,secure_cookie:"https:"===(null==T||null===(e=T.location)||void 0===e?void 0:e.protocol),ip:!0,opt_out_capturing_by_default:!1,opt_out_persistence_by_default:!1,opt_out_useragent_filter:!1,opt_out_capturing_persistence_type:"localStorage",opt_out_capturing_cookie_prefix:null,opt_in_site_apps:!1,property_denylist:[],respect_dnt:!1,sanitize_properties:null,request_headers:{},inapp_protocol:"//",inapp_link_new_window:!1,request_batching:!0,properties_string_max_length:65535,session_recording:{},mask_all_element_attributes:!1,mask_all_text:!1,advanced_disable_decide:!1,advanced_disable_feature_flags:!1,advanced_disable_feature_flags_on_first_load:!1,advanced_disable_toolbar_metrics:!1,feature_flag_request_timeout_ms:3e3,on_request_error:function(e){var t="Bad HTTP status: "+e.statusCode+" "+e.text;z.error(t)},get_device_id:function(e){return e},_onCapture:Rr,capture_performance:void 0,name:"posthog",bootstrap:{},disable_compression:!1,session_idle_timeout_seconds:1800,person_profiles:"always"}},Cr=function(e){var t={};F(e.process_person)||(t.person_profiles=e.process_person),F(e.xhr_headers)||(t.request_headers=e.xhr_headers),F(e.cookie_name)||(t.persistence_name=e.cookie_name),F(e.disable_cookie)||(t.disable_persistence=e.disable_cookie);var n=Y({},t,e);return b(e.property_blacklist)&&(F(e.property_denylist)?n.property_denylist=e.property_blacklist:b(e.property_denylist)?n.property_denylist=[].concat(d(e.property_blacklist),d(e.property_denylist)):z.error("Invalid value for property_denylist config: "+e.property_denylist)),n},Tr=function(){function e(){o(this,e),l(this,"__forceAllowLocalhost",!1)}return u(e,[{key:"_forceAllowLocalhost",get:function(){return this.__forceAllowLocalhost},set:function(e){z.error("WebPerformanceObserver is deprecated and has no impact on network capture. Use `_forceAllowLocalhostNetworkCapture` on `posthog.sessionRecording`"),this.__forceAllowLocalhost=e}}]),e}(),$r=function(){function e(){var t=this;o(this,e),l(this,"webPerformance",new Tr),l(this,"_debugEventEmitter",new yr),this.config=Ir(),this.decideEndpointWasHit=!1,this.SentryIntegration=rr,this.__request_queue=[],this.__loaded=!1,this.analyticsDefaultEndpoint="/e/",this.featureFlags=new Ae(this),this.toolbar=new ci(this),this.scrollManager=new mr(this),this.pageViewManager=new sr(this),this.surveys=new ur(this),this.rateLimiter=new lr(this),this.requestRouter=new ir(this),this.people={set:function(e,n,i){var r=E(e)?l({},e,n):e;t.setPersonProperties(r),null==i||i({})},set_once:function(e,n,i){var r=E(e)?l({},e,n):e;t.setPersonProperties(void 0,r),null==i||i({})}},this.on("eventCaptured",(function(e){return z.info("send",e)}))}return u(e,[{key:"init",value:function(t,n,i){if(i&&i!==xr){var r,s=null!==(r=Er[i])&&void 0!==r?r:new e;return s._init(t,n,i),Er[i]=s,Er[xr][i]=s,s}return this._init(t,n,i)}},{key:"_init",value:function(e){var t,n,i=this,s=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},o=arguments.length>2?arguments[2]:void 0;if(F(e)||function(e){return E(e)&&0===e.trim().length}(e))return z.critical("PostHog was initialized without a token. This likely indicates a misconfiguration. Please check the first argument passed to posthog.init()"),this;if(this.__loaded)return z.warn("You have already initialized PostHog! Re-initializing is a no-op"),this;if(this.__loaded=!0,this.config={},this._triggered_notifs=[],this.set_config(Y({},Ir(),Cr(s),{name:o,token:e})),this.compression=s.disable_compression?void 0:ie.Base64,this.persistence=new on(this.config),this.sessionPersistence="sessionStorage"===this.config.persistence?this.persistence:new on(r(r({},this.config),{},{persistence:"sessionStorage"})),this._requestQueue=new di((function(e){return i._send_request(e)})),this._retryQueue=new Zi(this),this.__request_queue=[],this.sessionManager=new tr(this.config,this.persistence),this.sessionPropsManager=new dr(this.sessionManager,this.persistence),this.sessionRecording=new oi(this),this.sessionRecording.startIfEnabledOrStop(),this.config.disable_scroll_properties||this.scrollManager.startMeasuringScrollPosition(),this.autocapture=new Fr(this),this.autocapture.startIfEnabled(),this.surveys.loadIfEnabled(),this.heatmaps=new _r(this),this.heatmaps.startIfEnabled(),v.DEBUG=v.DEBUG||this.config.debug,this._gdpr_init(),void 0!==(null===(t=s.bootstrap)||void 0===t?void 0:t.distinctID)){var a,u,l=this.config.get_device_id(Ue()),c=null!==(a=s.bootstrap)&&void 0!==a&&a.isIdentifiedID?l:s.bootstrap.distinctID;this.persistence.set_property(Ee,null!==(u=s.bootstrap)&&void 0!==u&&u.isIdentifiedID?"identified":"anonymous"),this.register({distinct_id:s.bootstrap.distinctID,$device_id:c})}if(this._hasBootstrappedFeatureFlags()){var d,h,f=Object.keys((null===(d=s.bootstrap)||void 0===d?void 0:d.featureFlags)||{}).filter((function(e){var t,n;return!(null===(t=s.bootstrap)||void 0===t||null===(n=t.featureFlags)||void 0===n||!n[e])})).reduce((function(e,t){var n,i;return e[t]=(null===(n=s.bootstrap)||void 0===n||null===(i=n.featureFlags)||void 0===i?void 0:i[t])||!1,e}),{}),p=Object.keys((null===(h=s.bootstrap)||void 0===h?void 0:h.featureFlagPayloads)||{}).filter((function(e){return f[e]})).reduce((function(e,t){var n,i,r,o;return null!==(n=s.bootstrap)&&void 0!==n&&null!==(i=n.featureFlagPayloads)&&void 0!==i&&i[t]&&(e[t]=null===(r=s.bootstrap)||void 0===r||null===(o=r.featureFlagPayloads)||void 0===o?void 0:o[t]),e}),{});this.featureFlags.receivedFeatureFlags({featureFlags:f,featureFlagPayloads:p})}if(!this.get_distinct_id()){var g=this.config.get_device_id(Ue());this.register_once({distinct_id:g,$device_id:g},""),this.persistence.set_property(Ee,"anonymous")}return null==T||null===(n=T.addEventListener)||void 0===n||n.call(T,"onpagehide"in self?"pagehide":"unload",this._handle_unload.bind(this)),this.toolbar.maybeLoadToolbar(),s.segment?function(e,t){var n=e.config.segment;if(!n)return t();!function(e,t){var n=e.config.segment;if(!n)return t();var i=function(n){var i=function(){return n.anonymousId()||Ue()};e.config.get_device_id=i,n.id()&&(e.register({distinct_id:n.id(),$device_id:i()}),e.persistence.set_property(Ee,"identified")),t()},r=n.user();"then"in r&&k(r.then)?r.then((function(e){return i(e)})):i(r)}(e,(function(){n.register(function(e){Promise&&Promise.resolve||z.warn("This browser does not have Promise support, and can not use the segment integration");var t=function(t,n){var i;if(!n)return t;t.event.userId||t.event.anonymousId===e.get_distinct_id()||e.reset(),t.event.userId&&t.event.userId!==e.get_distinct_id()&&(e.register({distinct_id:t.event.userId}),e.reloadFeatureFlags());var r=e._calculate_event_properties(n,null!==(i=t.event.properties)&&void 0!==i?i:{});return t.event.properties=Object.assign({},r,t.event.properties),t};return{name:"PostHog JS",type:"enrichment",version:"1.0.0",isLoaded:function(){return!0},load:function(){return Promise.resolve()},track:function(e){return t(e,e.event.event)},page:function(e){return t(e,"$pageview")},identify:function(e){return t(e,"$identify")},screen:function(e){return t(e,"$screen")}}}(e)).then((function(){t()}))}))}(this,(function(){return i._loaded()})):this._loaded(),k(this.config._onCapture)&&this.on("eventCaptured",(function(e){return i.config._onCapture(e.event,e)})),this}},{key:"_afterDecideResponse",value:function(e){var t,n,i,r,s;this.compression=void 0,e.supportedCompression&&!this.config.disable_compression&&(this.compression=X(e.supportedCompression,ie.GZipJS)?ie.GZipJS:X(e.supportedCompression,ie.Base64)?ie.Base64:void 0),null!==(t=e.analytics)&&void 0!==t&&t.endpoint&&(this.analyticsDefaultEndpoint=e.analytics.endpoint),null===(n=this.sessionRecording)||void 0===n||n.afterDecideResponse(e),null===(i=this.autocapture)||void 0===i||i.afterDecideResponse(e),null===(r=this.heatmaps)||void 0===r||r.afterDecideResponse(e),null===(s=this.surveys)||void 0===s||s.afterDecideResponse(e)}},{key:"_loaded",value:function(){var e=this,t=this.config.advanced_disable_decide;t||this.featureFlags.setReloadingPaused(!0);try{this.config.loaded(this)}catch(e){z.critical("`loaded` function failed",e)}this._start_queue_if_opted_in(),this.config.capture_pageview&&setTimeout((function(){L&&e.capture("$pageview",{title:L.title},{send_instantly:!0})}),1),t||(new ai(this).call(),this.featureFlags.resetRequestQueue())}},{key:"_start_queue_if_opted_in",value:function(){var e;this.has_opted_out_capturing()||this.config.request_batching&&(null===(e=this._requestQueue)||void 0===e||e.enable())}},{key:"_dom_loaded",value:function(){var e=this;this.has_opted_out_capturing()||Q(this.__request_queue,(function(t){return e._send_retriable_request(t)})),this.__request_queue=[],this._start_queue_if_opted_in()}},{key:"_handle_unload",value:function(){var e,t;this.config.request_batching?(this.config.capture_pageview&&this.config.capture_pageleave&&this.capture("$pageleave"),null===(e=this._requestQueue)||void 0===e||e.unload(),null===(t=this._retryQueue)||void 0===t||t.unload()):this.config.capture_pageview&&this.config.capture_pageleave&&this.capture("$pageleave",null,{transport:"sendBeacon"})}},{key:"_send_request",value:function(e){var t=this;this.__loaded&&(Pr?this.__request_queue.push(e):this.rateLimiter.isServerRateLimited(e.batchKey)||(e.transport=e.transport||this.config.api_transport,e.url=Ji(e.url,{ip:this.config.ip?1:0}),e.headers=this.config.request_headers,e.compression="best-available"===e.compression?this.compression:e.compression,function(e){var t=r({},e);t.timeout=t.timeout||6e4,t.url=Ji(t.url,{_:(new Date).getTime().toString(),ver:v.LIB_VERSION,compression:t.compression}),"sendBeacon"===t.transport&&null!=A&&A.sendBeacon?function(e){var t=Ji(e.url,{beacon:"1"});try{var n,i=null!==(n=Xi(e))&&void 0!==n?n:{},r=i.contentType,s=i.body,o="string"==typeof s?new Blob([s],{type:r}):s;A.sendBeacon(t,o)}catch(e){}}(t):"fetch"===t.transport&&B?function(e){var t,n,i=null!==(t=Xi(e))&&void 0!==t?t:{},r=i.contentType,s=i.body,o=new Headers;J(o,(function(e,t){o.append(t,e)})),r&&o.append("Content-Type",r);var a=e.url,u=null;if(H){var l=new H;u={signal:l.signal,timeout:setTimeout((function(){return l.abort()}),e.timeout)}}B(a,{method:(null==e?void 0:e.method)||"GET",headers:o,keepalive:"POST"===e.method,body:s,signal:null===(n=u)||void 0===n?void 0:n.signal}).then((function(t){return t.text().then((function(n){var i,r={statusCode:t.status,text:n};if(200===t.status)try{r.json=JSON.parse(n)}catch(e){z.error(e)}null===(i=e.callback)||void 0===i||i.call(e,r)}))})).catch((function(t){var n;z.error(t),null===(n=e.callback)||void 0===n||n.call(e,{statusCode:0,text:t})})).finally((function(){return u?clearTimeout(u.timeout):null}))}(t):N||!L?function(e){var t,n=new N;n.open(e.method||"GET",e.url,!0);var i=null!==(t=Xi(e))&&void 0!==t?t:{},r=i.contentType,s=i.body;J(e.headers,(function(e,t){n.setRequestHeader(t,e)})),r&&n.setRequestHeader("Content-Type",r),e.timeout&&(n.timeout=e.timeout),n.withCredentials=!0,n.onreadystatechange=function(){if(4===n.readyState){var t,i={statusCode:n.status,text:n.responseText};if(200===n.status)try{i.json=JSON.parse(n.responseText)}catch(e){}null===(t=e.callback)||void 0===t||t.call(e,i)}},n.send(s)}(t):function(e){var t;if(L){var n=L.createElement("script");n.type="text/javascript",n.async=!0,n.defer=!0,n.src=e.url;var i=L.getElementsByTagName("script")[0];null===(t=i.parentNode)||void 0===t||t.insertBefore(n,i)}}(t)}(r(r({},e),{},{callback:function(n){var i,r,s;t.rateLimiter.checkForLimiting(n),n.statusCode>=400&&(null===(r=(s=t.config).on_request_error)||void 0===r||r.call(s,n)),null===(i=e.callback)||void 0===i||i.call(e,n)}}))))}},{key:"_send_retriable_request",value:function(e){this._retryQueue?this._retryQueue.retriableRequest(e):this._send_request(e)}},{key:"_execute_array",value:function(e){var t,n=this,i=[],r=[],s=[];Q(e,(function(e){e&&(t=e[0],b(t)?s.push(e):k(e)?e.call(n):b(e)&&"alias"===t?i.push(e):b(e)&&-1!==t.indexOf("capture")&&k(n[t])?s.push(e):r.push(e))}));var o=function(e,t){Q(e,(function(e){if(b(e[0])){var n=t;J(e,(function(e){n=n[e[0]].apply(n,e.slice(1))}))}else this[e[0]].apply(this,e.slice(1))}),t)};o(i,this),o(r,this),o(s,this)}},{key:"_hasBootstrappedFeatureFlags",value:function(){var e,t;return(null===(e=this.config.bootstrap)||void 0===e?void 0:e.featureFlags)&&Object.keys(null===(t=this.config.bootstrap)||void 0===t?void 0:t.featureFlags).length>0||!1}},{key:"push",value:function(e){this._execute_array([e])}},{key:"capture",value:function(e,t,n){var i;if(!(this.__loaded&&this.persistence&&this.sessionPersistence&&this._requestQueue))return z.uninitializedWarning("posthog.capture");if(!Zn(this))if(null!=n&&n.skip_client_rate_limiting||!this.rateLimiter.isCaptureClientSideRateLimited())if(!F(e)&&E(e)){if(!j||this.config.opt_out_useragent_filter||!fr(j,this.config.custom_blocked_useragents)){this.sessionPersistence.update_search_keyword(),this.config.store_google&&(this.sessionPersistence.update_campaign_params(),this.persistence.set_initial_campaign_params()),this.config.save_referrer&&(this.sessionPersistence.update_referrer_info(),this.persistence.set_initial_referrer_info());var s={uuid:Ue(),event:e,properties:this._calculate_event_properties(e,t||{})};if(null==n||!n._noHeatmaps){var o,a=null===(o=this.heatmaps)||void 0===o?void 0:o.getAndClearBuffer();a&&(s.properties.$heatmap_data=a)}(null==n?void 0:n.$set)&&(s.$set=null==n?void 0:n.$set);var u=this._calculate_set_once_properties(null==n?void 0:n.$set_once);u&&(s.$set_once=u),(s=function(e,t){return n=e,i=function(e){return E(e)&&!R(t)?e.slice(0,t):e},r=new Set,function e(t,n){return t!==Object(t)?i?i(t):t:r.has(t)?void 0:(r.add(t),b(t)?(s=[],Q(t,(function(t){s.push(e(t))}))):(s={},J(t,(function(t,n){r.has(t)||(s[n]=e(t,n))}))),s);var s}(n);var n,i,r}(s,null!=n&&n._noTruncate?null:this.config.properties_string_max_length)).timestamp=(null==n?void 0:n.timestamp)||new Date,F(null==n?void 0:n.timestamp)||(s.properties.$event_time_override_provided=!0,s.properties.$event_time_override_system_time=new Date);var l=r(r({},s.properties.$set),s.$set);S(l)||this.setPersonPropertiesForFlags(l),this._debugEventEmitter.emit("eventCaptured",s);var c={method:"POST",url:null!==(i=null==n?void 0:n._url)&&void 0!==i?i:this.requestRouter.endpointFor("api",this.analyticsDefaultEndpoint),data:s,compression:"best-available",batchKey:null==n?void 0:n._batchKey};return!this.config.request_batching||n&&(null==n||!n._batchKey)||null!=n&&n.send_instantly?this._send_retriable_request(c):this._requestQueue.enqueue(c),s}}else z.error("No event name provided to posthog.capture");else z.critical("This capture call is ignored due to client rate limiting.")}},{key:"_addCaptureHook",value:function(e){this.on("eventCaptured",(function(t){return e(t.event)}))}},{key:"_calculate_event_properties",value:function(e,t){if(!this.persistence||!this.sessionPersistence)return t;var n=this.persistence.remove_event_timer(e),i=r({},t);if(i.token=this.config.token,"$snapshot"===e){var s=r(r({},this.persistence.properties()),this.sessionPersistence.properties());return i.distinct_id=s.distinct_id,i}var o=rn.properties();if(this.sessionManager){var a=this.sessionManager.checkAndGetSessionAndWindowId(),u=a.sessionId,l=a.windowId;i.$session_id=u,i.$window_id=l}if(this.requestRouter.region===Gi.CUSTOM&&(i.$lib_custom_api_host=this.config.api_host),this.sessionPropsManager&&this.config.__preview_send_client_session_params&&("$pageview"===e||"$pageleave"===e||"$autocapture"===e)){var c=this.sessionPropsManager.getSessionProps();i=Y(i,c)}if(!this.config.disable_scroll_properties){var d={};"$pageview"===e?d=this.pageViewManager.doPageView():"$pageleave"===e&&(d=this.pageViewManager.doPageLeave()),i=Y(i,d)}if("$pageview"===e&&L&&(i.title=L.title),!F(n)){var h=(new Date).getTime()-n;i.$duration=parseFloat((h/1e3).toFixed(3))}j&&this.config.opt_out_useragent_filter&&(i.$browser_type=fr(j,this.config.custom_blocked_useragents)?"bot":"browser"),(i=Y({},o,this.persistence.properties(),this.sessionPersistence.properties(),i)).$is_identified=this._isIdentified(),b(this.config.property_denylist)?J(this.config.property_denylist,(function(e){delete i[e]})):z.error("Invalid value for property_denylist config: "+this.config.property_denylist+" or property_blacklist config: "+this.config.property_blacklist);var f=this.config.sanitize_properties;return f&&(i=f(i,e)),i.$process_person_profile=this._hasPersonProcessing(),i}},{key:"_calculate_set_once_properties",value:function(e){if(!this.persistence||!this._hasPersonProcessing())return e;var t=Y({},this.persistence.get_initial_props(),e||{});return S(t)?void 0:t}},{key:"register",value:function(e,t){var n;null===(n=this.persistence)||void 0===n||n.register(e,t)}},{key:"register_once",value:function(e,t,n){var i;null===(i=this.persistence)||void 0===i||i.register_once(e,t,n)}},{key:"register_for_session",value:function(e){var t;null===(t=this.sessionPersistence)||void 0===t||t.register(e)}},{key:"unregister",value:function(e){var t;null===(t=this.persistence)||void 0===t||t.unregister(e)}},{key:"unregister_for_session",value:function(e){var t;null===(t=this.sessionPersistence)||void 0===t||t.unregister(e)}},{key:"_register_single",value:function(e,t){this.register(l({},e,t))}},{key:"getFeatureFlag",value:function(e,t){return this.featureFlags.getFeatureFlag(e,t)}},{key:"getFeatureFlagPayload",value:function(e){var t=this.featureFlags.getFeatureFlagPayload(e);try{return JSON.parse(t)}catch(e){return t}}},{key:"isFeatureEnabled",value:function(e,t){return this.featureFlags.isFeatureEnabled(e,t)}},{key:"reloadFeatureFlags",value:function(){this.featureFlags.reloadFeatureFlags()}},{key:"updateEarlyAccessFeatureEnrollment",value:function(e,t){this.featureFlags.updateEarlyAccessFeatureEnrollment(e,t)}},{key:"getEarlyAccessFeatures",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];return this.featureFlags.getEarlyAccessFeatures(e,t)}},{key:"on",value:function(e,t){return this._debugEventEmitter.on(e,t)}},{key:"onFeatureFlags",value:function(e){return this.featureFlags.onFeatureFlags(e)}},{key:"onSessionId",value:function(e){var t,n;return null!==(t=null===(n=this.sessionManager)||void 0===n?void 0:n.onSessionId(e))&&void 0!==t?t:function(){}}},{key:"getSurveys",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];this.surveys.getSurveys(e,t)}},{key:"getActiveMatchingSurveys",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];this.surveys.getActiveMatchingSurveys(e,t)}},{key:"identify",value:function(e,t,n){if(!this.__loaded||!this.persistence)return z.uninitializedWarning("posthog.identify");if(P(e)&&(e=e.toString(),z.warn("The first argument to posthog.identify was a number, but it should be a string. It has been converted to a string.")),e){if(["distinct_id","distinctid"].includes(e.toLowerCase()))z.critical('The string "'.concat(e,'" was set in posthog.identify which indicates an error. This ID should be unique to the user and not a hardcoded string.'));else if(this._requirePersonProcessing("posthog.identify")){var i=this.get_distinct_id();if(this.register({$user_id:e}),!this.get_property("$device_id")){var r=i;this.register_once({$had_persisted_distinct_id:!0,$device_id:r},"")}e!==i&&e!==this.get_property(ae)&&(this.unregister(ae),this.register({distinct_id:e}));var s="anonymous"===(this.persistence.get_property(Ee)||"anonymous");e!==i&&s?(this.persistence.set_property(Ee,"identified"),this.setPersonPropertiesForFlags(t||{},!1),this.capture("$identify",{distinct_id:e,$anon_distinct_id:i},{$set:t||{},$set_once:n||{}}),this.featureFlags.setAnonymousDistinctId(i)):(t||n)&&this.setPersonProperties(t,n),e!==i&&(this.reloadFeatureFlags(),this.unregister(Fe))}}else z.error("Unique user id has not been set in posthog.identify")}},{key:"setPersonProperties",value:function(e,t){(e||t)&&this._requirePersonProcessing("posthog.setPersonProperties")&&(this.setPersonPropertiesForFlags(e||{}),this.capture("$set",{$set:e||{},$set_once:t||{}}))}},{key:"group",value:function(e,t,n){if(e&&t){if(this._requirePersonProcessing("posthog.group")){var i=this.getGroups();i[e]!==t&&this.resetGroupPropertiesForFlags(e),this.register({$groups:r(r({},i),{},l({},e,t))}),n&&(this.capture("$groupidentify",{$group_type:e,$group_key:t,$group_set:n}),this.setGroupPropertiesForFlags(l({},e,n))),i[e]===t||n||this.reloadFeatureFlags()}}else z.error("posthog.group requires a group type and group key")}},{key:"resetGroups",value:function(){this.register({$groups:{}}),this.resetGroupPropertiesForFlags(),this.reloadFeatureFlags()}},{key:"setPersonPropertiesForFlags",value:function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];this._requirePersonProcessing("posthog.setPersonPropertiesForFlags")&&this.featureFlags.setPersonPropertiesForFlags(e,t)}},{key:"resetPersonPropertiesForFlags",value:function(){this.featureFlags.resetPersonPropertiesForFlags()}},{key:"setGroupPropertiesForFlags",value:function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];this._requirePersonProcessing("posthog.setGroupPropertiesForFlags")&&this.featureFlags.setGroupPropertiesForFlags(e,t)}},{key:"resetGroupPropertiesForFlags",value:function(e){this.featureFlags.resetGroupPropertiesForFlags(e)}},{key:"reset",value:function(e){var t,n,i,r;if(!this.__loaded)return z.uninitializedWarning("posthog.reset");var s=this.get_property("$device_id");null===(t=this.persistence)||void 0===t||t.clear(),null===(n=this.sessionPersistence)||void 0===n||n.clear(),null===(i=this.persistence)||void 0===i||i.set_property(Ee,"anonymous"),null===(r=this.sessionManager)||void 0===r||r.resetSessionId();var o=this.config.get_device_id(Ue());this.register_once({distinct_id:o,$device_id:e?o:s},"")}},{key:"get_distinct_id",value:function(){return this.get_property("distinct_id")}},{key:"getGroups",value:function(){return this.get_property("$groups")||{}}},{key:"get_session_id",value:function(){var e,t;return null!==(e=null===(t=this.sessionManager)||void 0===t?void 0:t.checkAndGetSessionAndWindowId(!0).sessionId)&&void 0!==e?e:""}},{key:"get_session_replay_url",value:function(e){if(!this.sessionManager)return"";var t=this.sessionManager.checkAndGetSessionAndWindowId(!0),n=t.sessionId,i=t.sessionStartTimestamp,r=this.requestRouter.endpointFor("ui","/project/".concat(this.config.token,"/replay/").concat(n));if(null!=e&&e.withTimestamp&&i){var s,o=null!==(s=e.timestampLookBack)&&void 0!==s?s:10;if(!i)return r;var a=Math.max(Math.floor(((new Date).getTime()-i)/1e3)-o,0);r+="?t=".concat(a)}return r}},{key:"alias",value:function(e,t){return e===this.get_property(oe)?(z.critical("Attempting to create alias for existing People user - aborting."),-2):this._requirePersonProcessing("posthog.alias")?(F(t)&&(t=this.get_distinct_id()),e!==t?(this._register_single(ae,e),this.capture("$create_alias",{alias:e,distinct_id:t})):(z.warn("alias matches current distinct_id - skipping api call."),this.identify(e),-1)):void 0}},{key:"set_config",value:function(e){var t,n,i,s,o=r({},this.config);w(e)&&(Y(this.config,Cr(e)),null===(t=this.persistence)||void 0===t||t.update_config(this.config,o),this.sessionPersistence="sessionStorage"===this.config.persistence?this.persistence:new on(r(r({},this.config),{},{persistence:"sessionStorage"})),Ye.is_supported()&&"true"===Ye.get("ph_debug")&&(this.config.debug=!0),this.config.debug&&(v.DEBUG=!0),null===(n=this.sessionRecording)||void 0===n||n.startIfEnabledOrStop(),null===(i=this.autocapture)||void 0===i||i.startIfEnabled(),null===(s=this.heatmaps)||void 0===s||s.startIfEnabled(),this.surveys.loadIfEnabled())}},{key:"startSessionRecording",value:function(e){if(null!=e&&e.sampling){var t,n,i=null===(t=this.sessionManager)||void 0===t?void 0:t.checkAndGetSessionAndWindowId();null===(n=this.persistence)||void 0===n||n.register(l({},me,!0)),z.info("Session recording started with sampling override for session: ",null==i?void 0:i.sessionId)}this.set_config({disable_session_recording:!1})}},{key:"stopSessionRecording",value:function(){this.set_config({disable_session_recording:!0})}},{key:"sessionRecordingStarted",value:function(){var e;return!(null===(e=this.sessionRecording)||void 0===e||!e.started)}},{key:"loadToolbar",value:function(e){return this.toolbar.loadToolbar(e)}},{key:"get_property",value:function(e){var t;return null===(t=this.persistence)||void 0===t?void 0:t.props[e]}},{key:"getSessionProperty",value:function(e){var t;return null===(t=this.sessionPersistence)||void 0===t?void 0:t.props[e]}},{key:"toString",value:function(){var e,t=null!==(e=this.config.name)&&void 0!==e?e:xr;return t!==xr&&(t=xr+"."+t),t}},{key:"_isIdentified",value:function(){var e,t;return"identified"===(null===(e=this.persistence)||void 0===e?void 0:e.get_property(Ee))||"identified"===(null===(t=this.sessionPersistence)||void 0===t?void 0:t.get_property(Ee))}},{key:"_hasPersonProcessing",value:function(){var e,t,n,i;return!("never"===this.config.person_profiles||"identified_only"===this.config.person_profiles&&!this._isIdentified()&&S(this.getGroups())&&(null===(e=this.persistence)||void 0===e||null===(t=e.props)||void 0===t||!t[ae])&&(null===(n=this.persistence)||void 0===n||null===(i=n.props)||void 0===i||!i[Ce]))}},{key:"_requirePersonProcessing",value:function(e){return"never"===this.config.person_profiles?(z.error(e+' was called, but process_person is set to "never". This call will be ignored.'),!1):(this._register_single(Ce,!0),!0)}},{key:"_gdpr_init",value:function(){"localStorage"===this.config.opt_out_capturing_persistence_type&&Ye.is_supported()&&(!this.has_opted_in_capturing()&&this.has_opted_in_capturing({persistence_type:"cookie"})&&this.opt_in_capturing({enable_persistence:!1}),!this.has_opted_out_capturing()&&this.has_opted_out_capturing({persistence_type:"cookie"})&&this.opt_out_capturing({clear_persistence:!1}),this.clear_opt_in_out_capturing({persistence_type:"cookie",enable_persistence:!1})),this.has_opted_out_capturing()?this._gdpr_update_persistence({clear_persistence:!0}):this.has_opted_in_capturing()||!this.config.opt_out_capturing_by_default&&!Qe.get("ph_optout")||(Qe.remove("ph_optout"),this.opt_out_capturing({clear_persistence:this.config.opt_out_persistence_by_default}))}},{key:"_gdpr_update_persistence",value:function(e){var t,n,i,r,s;if(e&&e.clear_persistence)i=!0;else{if(!e||!e.enable_persistence)return;i=!1}this.config.disable_persistence||(null===(t=this.persistence)||void 0===t?void 0:t.disabled)===i||null===(r=this.persistence)||void 0===r||r.set_disabled(i),this.config.disable_persistence||(null===(n=this.sessionPersistence)||void 0===n?void 0:n.disabled)===i||null===(s=this.sessionPersistence)||void 0===s||s.set_disabled(i)}},{key:"_gdpr_call_func",value:function(e,t){return t=Y({capture:this.capture.bind(this),persistence_type:this.config.opt_out_capturing_persistence_type,cookie_prefix:this.config.opt_out_capturing_cookie_prefix,cookie_expiration:this.config.cookie_expiration,cross_subdomain_cookie:this.config.cross_subdomain_cookie,secure_cookie:this.config.secure_cookie},t||{}),Ye.is_supported()||"localStorage"!==t.persistence_type||(t.persistence_type="cookie"),e(this.config.token,{capture:t.capture,captureEventName:t.capture_event_name,captureProperties:t.capture_properties,persistenceType:t.persistence_type,persistencePrefix:t.cookie_prefix,cookieExpiration:t.cookie_expiration,crossSubdomainCookie:t.cross_subdomain_cookie,secureCookie:t.secure_cookie})}},{key:"opt_in_capturing",value:function(e){e=Y({enable_persistence:!0},e||{}),this._gdpr_call_func(Wn,e),this._gdpr_update_persistence(e)}},{key:"opt_out_capturing",value:function(e){var t=Y({clear_persistence:!0},e||{});this._gdpr_call_func(zn,t),this._gdpr_update_persistence(t)}},{key:"has_opted_in_capturing",value:function(e){return this._gdpr_call_func(Gn,e)}},{key:"has_opted_out_capturing",value:function(e){return this._gdpr_call_func(Vn,e)}},{key:"clear_opt_in_out_capturing",value:function(e){var t=Y({enable_persistence:!0},null!=e?e:{});this._gdpr_call_func(Qn,t),this._gdpr_update_persistence(t)}},{key:"debug",value:function(e){!1===e?(null==T||T.console.log("You've disabled debug mode."),localStorage&&localStorage.removeItem("ph_debug"),this.set_config({debug:!1})):(null==T||T.console.log("You're now in debug mode. All calls to PostHog will be logged in your console.\nYou can disable this with `posthog.debug(false)`."),localStorage&&localStorage.setItem("ph_debug","true"),this.set_config({debug:!0}))}}]),e}();!function(e,t){for(var n=0;n{o.r(t),o.d(t,{default:()=>l});var i=o(8711),n=o(83199);const r=(0,i.css)(['html,body,div,span,applet,object,iframe,h1,h2,h3,h4,h5,h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,img,ins,kbd,q,s,samp,small,strike,strong,sub,sup,tt,var,b,u,i,center,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,table,caption,tbody,tfoot,thead,tr,th,td,article,aside,canvas,details,embed,figure,figcaption,footer,header,hgroup,menu,nav,output,ruby,section,summary,time,mark,audio,video{margin:0;padding:0;border:0;font:inherit;font-size:100%;vertical-align:inherit;}body{font-size:14px;line-height:1.5;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Ubuntu,"Helvetica Neue",sans-serif;min-height:100vh;scroll-behavior:smooth;text-rendering:optimizespeed;scrollbar-gutter:stable both-edges;}article,aside,details,figcaption,figure,footer,header,hgroup,menu,nav,section{display:block;}body{-webkit-tap-highlight-color:transparent;-webkit-overflow-scrolling:touch;}body > iframe:not([src$="statuspage.io/embed/frame"]){display:none;}ol,ul{list-style:none;}blockquote,q{quotes:none;}blockquote:before,blockquote:after,q:before,q:after{content:"";content:none;}table{border-collapse:collapse;border-spacing:0;th,tr{vertical-align:middle;}th{font-weight:bold;}}*{box-sizing:inherit;-webkit-font-smoothing:antialiased;font-weight:inherit;text-rendering:optimizelegibility;-webkit-appearance:none;border-width:0px;border-style:initial;border-color:initial;border-image:initial;margin:0px;outline:0px;padding:0px;text-decoration:none;}*,*::before,*::after{box-sizing:border-box;}input[type="button" i],input[type="submit" i],input[type="reset" i],input[type="file" i]::-webkit-file-upload-button,button{border-color:transparent;border-style:none;border-width:0;padding:0;}a{color:',";&:hover{text-decoration:underline;color:",";}&:visited{color:",";}}b{font-weight:bold;}i{font-style:italic;}code{font-family:monospace;}::-webkit-scrollbar{height:8px;width:8px;}",""],(e=>e.theme.colors.link),(e=>e.theme.colors.linkHover),(e=>e.theme.colors.link),(e=>e.isScreenSmall&&"\n ::-webkit-scrollbar {\n width: 0px !important;\n height: 0px !important;\n background: transparent !important; /* make scrollbar transparent */\n }\n ")),l=(0,i.createGlobalStyle)([""," ",""],r,n.webkitVisibleScrollbar)}}]); \ No newline at end of file diff --git a/src/web/gui/v2/3750.4ad02f036f2a7c520b1c.chunk.js b/src/web/gui/v2/3750.4ad02f036f2a7c520b1c.chunk.js deleted file mode 100644 index df5cf24f0..000000000 --- a/src/web/gui/v2/3750.4ad02f036f2a7c520b1c.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="7b7f67f5-e18c-474a-965d-53b795758a0e",e._sentryDebugIdIdentifier="sentry-dbid-7b7f67f5-e18c-474a-965d-53b795758a0e")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[3750],{63750:(e,t,l)=>{l.r(t),l.d(t,{default:()=>_});l(9391),l(41393),l(81454),l(62953);var n=l(96540),a=l(63950),o=l.n(a),r=l(83199),c=l(47731),d=l(71835),s=l(92155),i=l(63314),u=l(32089),m=(l(17333),l(3064),l(14905),l(98992),l(54520),l(72577),l(8872),l(8711));const g=(0,m.default)(r.MenuDropdown).attrs((e=>({background:"transparent",hideShadow:!0,height:{max:"300px"},width:{max:"600px"},overflow:"auto",padding:[2],...e}))).withConfig({displayName:"styled__CheckboxesContainer",componentId:"sc-8uj6na-0"})([""]),f=e=>{let{index:t,item:l,onItemClick:a}=e;const{group:o,label:c,value:d,node:s={}}=l,{selected:i,disabled:u}=s,m=o&&t>0?[3,0,1,0]:[1,0];return n.createElement(r.Flex,{padding:m},o?n.createElement(r.Flex,{gap:2,alignItems:"center"},n.createElement(r.Icon,{name:o.iconName,size:"small"}),n.createElement(r.Text,{color:o.textColor||"textFocus"},o.label)):n.createElement(r.Flex,null,n.createElement(r.Checkbox,{checked:i,disabled:u,onChange:e=>{u||a({value:d,label:c,checked:e})},label:c,value:d,labelPosition:"right"})))};var p=l(47193);const b={live:{iconName:"connectivityStatusLive",label:"Live",textColor:"primary"},offline:{iconName:"connectivityStatusOffline",label:"Offline",textColor:"textFocus"},stale:{iconName:"connectivityStatusStale",label:"Stale",textColor:"textFocus"}},E=(e,t)=>{var l;return t&&Object.keys(t)&&null!==(l=t[e])&&void 0!==l&&l.length?[{group:b[e]},...t[e]]:[]},v=e=>{let{selectedNodes:t,setSelectedNodes:l,activeNodesLimit:a=0}=e;const{nodes:o}=(0,p.A)(),[r,c]=(0,n.useState)([]),d=(0,n.useCallback)((e=>{let{value:n,label:a,checked:o}=e;o&&!t.find((e=>e.value==n))?l((e=>[...e,{value:n,label:a}])):!o&&t.find((e=>e.value==n))&&l((e=>e.filter((e=>e.value!=n))))}),[t]);return(0,n.useEffect)((()=>{o&&c(((e,t,l)=>{const n=e.reduce(((e,n)=>{const a={...n},o=n.isLive?"live":n.isOffline?"offline":"stale";t.find((e=>{let{value:t}=e;return t==n.id}))?a.selected=!0:l==t.length&&(a.disabled=!0);const r={label:n.name,value:n.id,node:a};return e[o]?e[o]=[...e[o],r]:e[o]=[r],e}),{});return[...E("live",n),...E("offline",n),...E("stale",n)]})(o,t,a))}),[o,t]),n.createElement(g,{items:r,Item:f,onItemClick:d,searchMargin:[0,0,4,0],hasSearch:!0})};var x=l(6586),h=l(28738),y=l(58168);const N=e=>{let{isTitle:t,value:l,selectedNodes:a,setSelectedNodes:c=o(),children:d}=e;const s=(0,n.useCallback)((()=>{t||c((e=>e.filter((e=>e.value!=l))))}),[t,l,a,c]),i={gap:1,height:"18px",alignItems:"center",padding:[0,2],margin:[0,0,2,0],round:!0,...t?{}:{background:"nodeBadgeBackground"}};return n.createElement(r.Flex,i,!t&&n.createElement(r.Icon,{name:"x",size:"small",onClick:s,color:"text",cursor:"pointer"}),d)},S=e=>{let{selectedNodes:t,setSelectedNodes:l}=e;return t.length?n.createElement(r.Flex,{flexWrap:!0,gap:2},n.createElement(N,{isTitle:!0},n.createElement(r.TextBig,null,"Selected Nodes:")),t.map((e=>{let{label:a,...o}=e;return n.createElement(N,(0,y.A)({key:o.value,selectedNodes:t,setSelectedNodes:l},o),n.createElement(r.TextSmall,{color:"text"},a))}))):null};var k=l(12602),w=l(73865);const C=e=>{let{selectedNodes:t,setSelectedNodes:l,loading:a,error:o}=e;const{loaded:c,currentPlan:d,maxNodes:s}=(0,w.A)();return n.createElement(r.Flex,{column:!0,gap:4},a&&c?n.createElement(h.A,{height:"280px",title:"Saving selection..."}):n.createElement(n.Fragment,null,n.createElement(r.Flex,{column:!0,margin:[3,0,0,0],gap:4},n.createElement(r.TextBig,null,"Your current"," ",n.createElement(r.TextBig,{strong:!0},d.class," (",d.version,")")," ","plan does not allow more than ",n.createElement(r.TextBig,{strong:!0},s," active nodes"),"."),n.createElement(r.TextBig,null,"To proceed, select the ",n.createElement(r.TextBig,{strong:!0},s," Nodes")," that you wish to keep active. Any Nodes beyond this limit will be deactivated."),o&&n.createElement(r.Flex,{gap:2,alignItems:"center"},n.createElement(r.Icon,{name:"warning_triangle",size:"medium",color:"warning"}),n.createElement(r.TextBig,null,"For some reason, we coudn't save your selection. Please retry."))),n.createElement(u.A,null),n.createElement(v,{selectedNodes:t,setSelectedNodes:l,activeNodesLimit:s}),n.createElement(u.A,null),n.createElement(r.Flex,{column:!0,gap:4},n.createElement(S,{selectedNodes:t,setSelectedNodes:l}),n.createElement(r.TextBig,null,"Remember, you can"," ",n.createElement(k.A,null,n.createElement(r.TextBig,{color:"primary"},"upgrade your space back to the Business plan"))," ","for unlimited access at any time."),n.createElement(r.TextBig,null,"The node selection isn't a commitment, you can modify the active Nodes any time on the"," ",n.createElement(x.A,null,n.createElement(r.TextBig,{color:"primary"},"Space Settings page")),"."))))},I=(0,m.default)(r.ModalContent).attrs((e=>{let{isMobile:t}=e;return{width:t?{base:"95vw"}:{base:150}}})).withConfig({displayName:"styled__PreferredNodesModalContent",componentId:"sc-1v8zr4l-0"})([""]);var A=l(47762),T=l(29848),B=l(87659);const F=(0,s.A)(r.Button),_=e=>{let{onSuccessfulSave:t=o()}=e;const l=(0,T.ly)(),a=(0,c.J)(),[s,u]=(0,d.A)(),m=(0,A.je)(),[g,f]=(0,n.useState)([]),[p,b]=(0,n.useState)(),[E,,v,x]=(0,B.A)(),h=(0,n.useCallback)((()=>{v(),m(g.map((e=>{let{value:t}=e;return t}))).then((()=>{s({header:"Success",text:"You selection was successfully saved"}),t(),l()})).catch((()=>{b(!0),u({header:"Error",text:"Selection failed to be saved"})})).finally((()=>{x()}))}),[g,m]);return n.createElement(r.Modal,{backdropProps:{backdropBlur:!0}},n.createElement(i.Ay,{feature:"PreferredNodesModal"},n.createElement(I,{isMobile:a},n.createElement(r.ModalHeader,null,n.createElement(r.Flex,{gap:2,alignItems:"center"},n.createElement(r.Icon,{name:"netdataPress",color:"text"}),n.createElement(r.H4,null,"Required Action"))),n.createElement(r.ModalBody,null,n.createElement(C,{selectedNodes:g,setSelectedNodes:f,loading:E,error:p})),!E&&n.createElement(r.ModalFooter,null,n.createElement(r.Flex,{gap:4,justifyContent:"end",padding:[1,2]},n.createElement(F,{feature:"SavePreferredNodes",label:"Save",onClick:h,disabled:E||!g.length,payload:{selectedNodes:g.map((e=>{let{value:t}=e;return t})).join(",")}}))))))}},47193:(e,t,l)=>{l.d(t,{A:()=>s});l(62953);var n=l(69765),a=l(67990),o=l(87860),r=l(3914),c=l(47762),d=l(87659);const s=function(){let{polling:e=!0}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{id:t=""}=(0,n.pr)(),l=(0,a.gr)(t,"ids"),s=(0,a.gr)(t,"loaded"),i=(0,c.Gt)(l),u=(0,r.vt)(),[m,,,g]=(0,d.A)();return(0,o.A)({id:t,spaceId:u,polling:e}),{areDefaultRoomNodesLoaded:s,nodes:i,isClaimNodeModalOpen:m,closeClaimNodeModal:g}}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/3843.89070793921be1288bb5.css b/src/web/gui/v2/3843.89070793921be1288bb5.css deleted file mode 100644 index e5d02668f..000000000 --- a/src/web/gui/v2/3843.89070793921be1288bb5.css +++ /dev/null @@ -1,2 +0,0 @@ -.default .dygraph-axis-label{color:#35414a}.dark .dygraph-axis-label{color:#fff}.dygraph-label-rotate-right{text-align:center;transform:rotate(-90deg);-webkit-transform:rotate(-90deg);-moz-transform:rotate(-90deg);-o-transform:rotate(-90deg);-ms-transform:rotate(-90deg)}.dygraph-annotation{position:absolute;z-index:10;overflow:hidden;border:1px solid} - diff --git a/src/web/gui/v2/3843.ffbb6f614ba4f7b77570.chunk.js b/src/web/gui/v2/3843.ffbb6f614ba4f7b77570.chunk.js deleted file mode 100644 index b444666ff..000000000 --- a/src/web/gui/v2/3843.ffbb6f614ba4f7b77570.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="79eb8fed-e1a0-4523-ab23-55a2de3ad85e",e._sentryDebugIdIdentifier="sentry-dbid-79eb8fed-e1a0-4523-ab23-55a2de3ad85e")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[3843,7170],{79304:(e,t,n)=>{n.d(t,{A:()=>s});n(25440);const s=e=>(e||"").replace(/[\s:()./]/g,"_")},17170:(e,t,n)=>{n.d(t,{A:()=>a});n(62953),n(3296),n(27208),n(48408);var s=n(37618);const a=()=>{if(s.Ay)return null;const{origin:e,searchParams:t}=new URL(window.location.href),n=null===t||void 0===t?void 0:t.get("labra_subscription_id"),a=null===t||void 0===t?void 0:t.get("aws_customer_id"),o=null===t||void 0===t?void 0:t.get("aws_product_id");return n&&a&&o?"".concat(e,"/api/v2/billing/labra/spaces?customer_id=").concat(a,"&marketplace=aws&product_id=").concat(o,"&subscription_id=").concat(n):null}},61658:(e,t,n)=>{n.d(t,{O:()=>a});const s={ErrNoChartExist:"ErrNoChartExist"},a=function(e){return!!(arguments.length>1&&void 0!==arguments[1]?arguments[1]:s)[e]}},1239:(e,t,n)=>{var s;n.d(t,{ei:()=>a});const a=(null===(s=window.envSettings)||void 0===s?void 0:s.onprem)||!1},42629:(e,t,n)=>{n.d(t,{Oq:()=>c,sd:()=>o,tr:()=>d,zA:()=>l});var s=n(78659),a=n.n(s);const o=e=>Math.round(10*e)/10,r=e=>{const{gutter:t,containerWidth:n,gridTracks:s}=e;return(n-t*(s-1))/s};function i(e,t,n){return Number.isFinite(e)?o(t*e+Math.max(0,e-1)*n):e}function c(e,t,n,s,a,c){const{gutter:l,rowHeight:d}=e,h=r(e),u={};return c?(u.width=i(c.width,h,l),u.height=i(c.height,d,l),u.top=o((d+l)*c.top),u.left=o((h+l)*c.left)):(u.width=i(s,h,l),u.height=i(a,d,l),u.top=o((d+l)*n),u.left=o((h+l)*t)),u}function l(e,t,n,s){const{gutter:i,gridTracks:c,rowHeight:l,maxRows:d}=e,h=r(e);let u=o((n-i)/(h+i)),p=o((t-i)/(l+i));return u=a()(u,0,c-s.width),p=a()(p,0,d-s.height),{left:u,top:p}}const d=(e,t,n,s)=>{const{gutter:i,maxRows:c,gridTracks:l,rowHeight:d}=e,h=r(e);let u=o((t+i)/(h+i)),p=o((n+i)/(d+i));return u=a()(a()(u,s.minWidth||3,l-s.left),0,l),p=a()(a()(p,s.minHeight||2,c-s.top),0,c),{width:u,height:p}}},34604:(e,t,n)=>{n.d(t,{A:()=>c});var s=n(58168),a=n(96540),o=n(8711),r=n(83199);const i=o.default.div.withConfig({displayName:"container__Div",componentId:"sc-tikmd7-0"})(["position:relative;"]),c=(0,a.forwardRef)(((e,t)=>{let{onRemove:n,children:o,...c}=e;return a.createElement(i,(0,s.A)({},c,{ref:t}),a.createElement(r.Icon,{name:"x",size:"small",onClick:n}),o)}))},75233:(e,t,n)=>{n.d(t,{A:()=>p});var s=n(58168),a=(n(3064),n(41393),n(98992),n(72577),n(81454),n(62953),n(96540)),o=n(40961),r=n(43375),i=n(45467),c=n(34604),l=n(44554),d=n(42629),h=n(13692);const u={sideEffects(e){let{active:t}=e;t.node.animate([{opacity:0},{opacity:1}],{easing:"ease-in",duration:250})}},p=e=>{let{rootId:t,items:n,onRemove:p,Item:m,containerWidth:g=800,rowHeight:f=40,gridTracks:b=12,gutter:y=4,allowOverlap:v=!1,compactType:_="vertical",maxRows:w=3e5,Container:k=c.A,onDragEnd:T,containerId:S,itemProps:M,rearrangeable:x=!0}=e;const C=(0,a.useRef)(),[P,B]=(0,a.useState)((()=>(0,l.oE)((0,l.Su)(n),_,b)));(0,i.A)((()=>{B((0,l.oE)((0,l.Su)(n),_,b))}),[b]);const[q,I]=(0,a.useState)(null),[L,A]=(0,a.useState)(null),E=(0,a.useRef)(null),{active:D}=(0,r.fF)();(0,r.E5)((0,a.useMemo)((()=>({onDragStart:e=>{let{active:t}=e;const{itemContainerId:n}=t.data.current;n===S&&(I(P),A(P))},onDragMove:e=>{let{active:t,over:n}=e;A((e=>{const{isResizer:s,isContainer:a,itemId:o,itemContainerId:r}=t.data.current;if(a||r!==S)return e;const{initial:i,translated:c}=t.rect.current;let h=e;const u=(0,l.GN)(h,o);if(!u)return e;const p=null===n||void 0===n?void 0:n.id;if(s){if(!p)return e;const t=n.rect.width+(0,d.sd)(c.right-i.right),s=n.rect.height+(0,d.sd)(c.bottom-i.bottom);let{width:a,height:o}=(0,d.tr)({gutter:y,maxRows:w,gridTracks:b,rowHeight:f,containerWidth:g},t,s,u);return u.width===a&&u.height===o?e:(u.width=a,u.height=o,h=h.map((e=>e.id===u.id?{...u,width:a,height:o}:e)),h=(0,l.oE)(h,_,b),E.current=u,h)}if(!x)return e;const m=C.current.getBoundingClientRect();let{left:k,top:T}=(0,d.zA)({gutter:y,maxRows:w,gridTracks:b,rowHeight:f,containerWidth:g},c.top-m.top,c.left-m.left,u);return u.top===T&&u.left===k?e:(h=(0,l.Pe)(h,u,k,T,!0,!1,_,b,v),h=(0,l.oE)(h,_,b),E.current=u,h)}))},onDragEnd:e=>{let{active:t,over:n}=e;const{isResizer:s,isContainer:a,itemContainerId:o}=t.data.current;if(a||o!==S)return void A(null);const r=null===n||void 0===n?void 0:n.id;if(s)return L&&B(L),A(null),void T(L,E.current);null!==r?(L&&B(L),A(null),T(L,E.current)):A(null)},onDragCancel:()=>{q&&B(q),E.current=null,I(null),A(null)}})),[L]));const N=(0,l.Hp)(L||P),R=N*f+(N-1)*y+"px";return a.createElement(k,{ref:C,style:{width:g,height:R}},(L||P).map(((e,n)=>a.createElement(h.A,(0,s.A)({key:e.id},e,{index:n,containerId:S,Item:m,onRemove:p,draggable:!0,containerWidth:g,rowHeight:f,gridTracks:b,gutter:y,transformed:null!==L?L[n]:null,draggableProps:e,itemProps:M,rootId:t})))),(0,o.createPortal)(a.createElement(r.Hd,{adjustScale:!1,dropAnimation:u,zIndex:9999},D&&D.data.current.rootId===t&&D.data.current.isItem?(e=>{const n=L||P;if(!n)return null;const o=n.find((t=>t.id===e.itemId));return o?a.createElement(h.A,(0,s.A)({rootId:t},o,{containerId:e.itemContainerId,Item:m,onRemove:p,draggable:!0,containerWidth:g,rowHeight:f,gridTracks:b,gutter:y,dragOverlay:!0,itemProps:M})):null})(D.data.current):null),document.body))}},13692:(e,t,n)=>{n.d(t,{A:()=>d});var s=n(58168),a=n(96540),o=n(8711),r=n(83199),i=n(43375),c=n(42629);const l=(0,o.default)(r.IconButton).attrs({icon:"resize_handler",padding:[0],position:"absolute",bottom:0,right:0,hoverColor:"textDescription",width:"14px",height:"14px"}).withConfig({displayName:"item__ResizeButton",componentId:"sc-1g5beux-0"})(["&&{cursor:nwse-resize;}"]),d=e=>{let{draggable:t,id:n,containerId:o,index:d,onRemove:h,itemProps:u,Item:p,top:m,left:g,width:f,height:b,containerWidth:y,rowHeight:v,gridTracks:_,gutter:w,transformed:k,draggableProps:T,dragOverlay:S,containerDragOverlay:M,rootId:x,containerDndProps:C}=e;const{setNodeRef:P}=(0,i.zM)({id:S||M?"dragOverlay-".concat(x,"-").concat(n):"".concat(x,"-").concat(n),disabled:!t||S||M,data:{isItem:!0,itemId:n,itemContainerId:o,rootId:x,...C}}),{attributes:B,isDragging:q,listeners:I,setNodeRef:L,setActivatorNodeRef:A}=(0,i.PM)({id:S||M?"dragOverlay-".concat(x,"-").concat(n):"".concat(x,"-").concat(n),disabled:!t||S||M,data:{isItem:!0,itemId:n,itemContainerId:o,...T,itemProps:u,rootId:x}}),E=(0,i.PM)({id:S||M?"dragOverlay-".concat(x,"-").concat(n,"-resizeHandle"):"".concat(x,"-").concat(n,"-resizeHandle"),disabled:S||M,data:{isResizer:!0,itemId:n,itemContainerId:o,rootId:x}}),D=(0,c.Oq)({containerWidth:y,rowHeight:v,gridTracks:_,gutter:w},g,m,f,b,k),N=S?{width:"".concat(D.width,"px"),height:"".concat(D.height,"px"),boxShadow:"\n 0 0 0 1px rgba(63, 63, 68, 0.05),\n -1px 0 15px 0 rgba(34, 33, 81, 0.01),\n 0px 15px 15px 0 rgba(34, 33, 81, 0.25)\n "}:{transition:q||E.isDragging?"":"top 100ms ease, left 100ms ease, width 100ms ease, height 100ms ease",position:"absolute",width:"".concat(D.width/y*100,"%"),height:"".concat(D.height,"px"),left:"".concat(D.left/y*100,"%"),top:"".concat(D.top,"px")};return a.createElement(p,(0,s.A)({},u,{containerId:o,ref:(0,r.mergeRefs)(L,P),dragging:q,resizing:E.isDragging,handleProps:S||M||!t?void 0:{ref:A},index:d,style:N,onRemove:h,id:n,attributes:B,listeners:I,resizeHandle:a.createElement(l,(0,s.A)({ref:E.setNodeRef},E.attributes,E.listeners)),dragOverlay:S||M}))}},38257:(e,t,n)=>{n.d(t,{A:()=>r});n(62953);var s=n(96540),a=n(43375);const o={vertical:{start:"top",end:"bottom",mouse:"clientY",clientDimension:"clientHeight",scrollDimension:"scrollHeight",scrollStart:"scrollTop"},horizontal:{start:"left",end:"right",mouse:"clientX",clientDimension:"clientWidth",scrollDimension:"scrollWidth",scrollStart:"scrollLeft"}},r=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"vertical";const{start:t,end:n,mouse:r,clientDimension:i,scrollDimension:c,scrollStart:l}=o[e]||o.vertical,{active:d}=(0,a.fF)(),h=(0,s.useRef)(null),[u,p]=(0,s.useState)(null);return(0,s.useEffect)((()=>{if(!u)return;const e=h.current;if(!e)return;const t=setInterval((()=>{e[l]+=5*u}),5);return()=>clearInterval(t)}),[u,h.current]),(0,s.useEffect)((()=>{const e=e=>{const s=h.current;if(!d||!s)return;if(!(s[c]>s[i]))return;const a=s.getBoundingClientRect(),o=e[r],l=oa[n]-100?1:null;p((e=>l!==e?l:e))};return d?window.addEventListener("mousemove",e):(window.removeEventListener("mousemove",e),p(null)),()=>{window.removeEventListener("mousemove",e),p(null)}}),[d,h.current]),h}},18202:(e,t,n)=>{n.d(t,{A:()=>s});const s=e=>{let{contextScope:t,nodesScope:n,aggregationMethod:s,groupBy:a,groupByLabel:o,postAggregationMethod:r,postGroupBy:i,postGroupByLabel:c,groupingMethod:l,groupingTime:d,chartType:h,chartLibrary:u,selectedDimensions:p,selectedLabels:m,selectedNodes:g,selectedInstances:f,sparkline:b,selectedLegendDimensions:y,showingInfo:v,dimensionsSortBy:_,instancesSortBy:w,nodesSortBy:k,groupBySortBy:T,labelsSortBy:S,dimensionsSort:M,nodesExpanded:x,groupByExpanded:C,labelsExpanded:P,expanded:B,staticZones:q,overlays:I,title:L,description:A,showPostAggregations:E,desiredUnits:D}=e;return{contextScope:t,nodesScope:n,aggregationMethod:s,groupBy:a,groupByLabel:o,postAggregationMethod:r,postGroupBy:i,postGroupByLabel:c,groupingMethod:l,groupingTime:d,chartType:h,chartLibrary:u,selectedDimensions:p,selectedLabels:m,selectedNodes:g,selectedInstances:f,sparkline:b,selectedLegendDimensions:y,showingInfo:v,dimensionsSortBy:_,instancesSortBy:w,nodesSortBy:k,groupBySortBy:T,labelsSortBy:S,dimensionsSort:M,nodesExpanded:x,groupByExpanded:C,labelsExpanded:P,expanded:B,staticZones:q,overlays:I,title:L,description:A,showPostAggregations:E,desiredUnits:D}}},64125:(e,t,n)=>{n.d(t,{A:()=>M});var s=n(58168),a=(n(41393),n(81454),n(62953),n(96540)),o=n(83199),r=n(18202),i=n(87659),c=n(2386),l=n(6504),d=n(22332),h=n(18925),u=n(40933),p=n(44644),m=(n(3064),n(98992),n(72577),n(92155));const g={room:"space_new",space:"spaces_v2",personal:"user"},f=[{icon:g.personal,value:"personal",label:"Personal"},{icon:g.room,value:"room",label:"Room"},{icon:g.space,value:"space",label:"Space"}],b=(0,m.A)(o.Button),y=e=>{let{onSubmit:t,onClose:n}=e;const[s,r]=(0,a.useState)(""),[i,c]=(0,a.useState)("personal");return a.createElement(o.Modal,{onClickOutside:n,onEsc:n},a.createElement(o.ModalContent,{background:"dropdown"},a.createElement(o.ModalHeader,null,"Create a new settings snapshot",a.createElement(o.ModalCloseButton,{testId:"close-button",onClose:n})),a.createElement(o.ModalBody,null,a.createElement(o.Flex,{column:!0,width:80,gap:3},a.createElement(o.TextInput,{"data-testid":"name",label:"Name",onChange:e=>{let{target:t}=e;return r(t.value)},value:s}),a.createElement(o.Flex,{column:!0,gap:1},a.createElement(o.TextSmall,{strong:!0},"Scope"),a.createElement(o.Select,{options:f,value:f.find((e=>e.value===i)),onChange:e=>{let{value:t}=e;return c(t)},styles:{minWidth:"80px"}})))),a.createElement(o.ModalFooter,null,a.createElement(b,{disabled:""===s.trim(),label:"Create",onClick:()=>t({name:s,scope:i}),"data-testid":"btn-create",payload:{description:"Modal - Create Settings"}}))))},v={color:"textLite",margin:[0,1,0,0],width:"14px",height:"14px"},_=e=>{let{openForm:t,close:n}=e;return a.createElement(o.Flex,{padding:[2,1],border:{side:"top"},justifyContent:"center"},a.createElement(o.Button,{small:!0,icon:"save",onClick:()=>{t(),n()},"data-ga":"user-settings::click-create",title:"Create a new setting with your changes"},"Add new setting"))},w=e=>{let{id:t,close:n}=e;const s=(0,d.useChart)(),i=(0,h.xS)(t,{onSuccess:n});return a.createElement(o.IconButton,{icon:"save",iconColor:"textLite",onClick:e=>{e.stopPropagation(),i({value:(0,r.A)(s.getAttributes())})},"data-ga":"user-settings::click-update",neutral:!0,padding:[0],title:"Update this setting with your changes"})},k=e=>{let{id:t,close:n}=e;const s=(0,h.z2)(t,{onSuccess:n});return a.createElement(o.IconButton,{icon:"trashcan",iconColor:"textLite",onClick:e=>{e.stopPropagation(),s()},"data-ga":"user-settings::click-delete",neutral:!0,padding:[0],title:"Delete this setting"})},T=e=>{let{item:{value:t,label:n,icon:r,disabled:i,onClick:c,...l},value:d,onItemClick:h,index:u,style:p,close:m,...g}=e;const f=d===t;return a.createElement(o.MenuItemContainer,(0,s.A)({"data-index":u,"aria-selected":f,disabled:i,selected:f,onClick:e=>{c&&c(e),h(t)}},l,g,{style:p,alignItems:"center",justifyContent:"between",padding:[1],overflow:"hidden"}),a.createElement(o.Flex,{alignItems:"center"},r,a.createElement(o.TextSmall,{whiteSpace:"normal",wordBreak:"break-word"},n)),a.createElement(o.Flex,{alignItems:"center",gap:.5},a.createElement(w,{id:t,close:m}),a.createElement(k,{id:t,close:m})))},S=e=>{let{disabled:t}=e;const n=(0,d.useChart)(),m=(0,u._)({params:{type:["chart"],entity:n.getAttribute("contextScope")}}),[f,b]=(0,p.WY)({type:"chart",entity:n.getAttribute("contextScope")[0]}),w=(0,a.useCallback)((e=>(f===e&&n.resetPristine(),b(e))),[n,f,b]),k=((e,t)=>(0,a.useMemo)((()=>t.map((t=>({value:t.id,label:t.name,icon:a.createElement(o.Icon,(0,s.A)({name:g[t.scope]||g.personal},v)),"data-track":e.track("setting-".concat(t.scope))})))),[e,t]))(n,m),[S,,M,x]=(0,i.A)(),C=(0,h.yK)({onSuccess:()=>{x(),close()}});return a.createElement(a.Fragment,null,a.createElement(o.Menu,{value:f,items:k,dropProps:{align:{top:"bottom",right:"right"},"data-toolbox":n.getId()},dropdownProps:{width:"200px"},onChange:w,"data-track":n.track("user-settings"),Item:T,Footer:e=>a.createElement(_,(0,s.A)({},e,{openForm:M}))},a.createElement(l.Button,{icon:a.createElement(l.default,{svg:c.default,size:"16px"}),title:"User settings",disabled:t,"data-testid":"chartHeaderToolbox-addSettings"})),S&&a.createElement(y,{onSubmit:function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return C({type:"chart",entity:n.getId(),path:"/",...e,value:(0,r.A)(n.getAttributes())})},onClose:x}))},M=(0,a.memo)(S)},83465:(e,t,n)=>{n.d(t,{Oq:()=>a,Q:()=>o,bP:()=>r,ml:()=>s});n(25440);const s=(e,t)=>e.on("sizeChanged",((e,n,s)=>{if(e.getParent()!==t)return;const a=e.getAttribute("id"),o=t.getAttribute("host"),r="chart_height.".concat(o,"/").concat(a);localStorage.setItem(r,n);const i="chart_width.".concat(o,"/").concat(a);localStorage.setItem(i,s)})),a=(e,t,n)=>{const s=e.getAttribute("host"),a="chart_height.".concat(s,"/").concat(t),o=localStorage.getItem(a);return o?/px/.test(o)?parseInt(o.replace("px",""),10):parseInt(o,10):n},o=e=>{null!==e&&void 0!==e&&e.id&&localStorage.setItem("chart_layout/".concat(e.id),JSON.stringify(e))},r=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const s=localStorage.getItem("chart_layout/".concat(e));try{let e=JSON.parse(s);return e?(e=(e=>{let{i:t,id:n=t,x:s,left:a=s,y:o,top:r=o,w:i,width:c=i,h:l,height:d=l}=e;return{id:n,width:c,height:d,left:a,top:r}})(e),{...n,...t,...e}):{...n,...t}}catch(a){return{...n,...t}}}},75793:(e,t,n)=>{n.d(t,{A:()=>c});var s=n(58168),a=(n(62953),n(96540)),o=n(83199),r=n(80158),i=n(29217);const c=e=>{let{text:t,TextComponent:n=o.Text,...c}=e;const[l,d]=(0,a.useState)(""),[h,u]=(0,a.useState)();return(0,a.useEffect)((()=>{if(!h)return;const e=h.offsetWidth;let n=0;for(;h.scrollWidth>e;)h.textContent=(0,r.P3)(h.textContent,n),n+=1;h.textContent!==t&&d(t)}),[t,h]),a.createElement(i.A,{content:l?t:"",align:"bottom",isBasic:!0},a.createElement(n,(0,s.A)({truncate:!0,ref:u},c),t))}},98496:(e,t,n)=>{n.d(t,{A:()=>u});var s=n(58168),a=n(96540),o=n(83199),r=n(12897),i=n.n(r),c=n(55042),l=n.n(c),d=new(i())({id:"partialFailureSvg",use:"partialFailureSvg-usage",viewBox:"0 0 348 348",content:''});l().add(d);const h=d,u=e=>{let{title:t,children:n,...r}=e;return a.createElement(o.Flex,{alignItems:"center",justifyContent:"center",flex:!0,gap:8},a.createElement("svg",{id:"partial_failure",width:"288px",height:"234px"},a.createElement("use",{xlinkHref:"#".concat(h.id)})),a.createElement(o.Flex,(0,s.A)({column:!0,width:{max:125},gap:2},r),a.createElement(o.H3,null,t),n))}},9683:(e,t,n)=>{n.d(t,{A:()=>o});var s=n(96540),a=n(83199);const o=e=>{let{width:t=443,height:n=249,videoId:o,...r}=e;return s.createElement(a.Flex,r,s.createElement(a.Box,{className:"video-responsive"},s.createElement("iframe",{width:t,height:n,src:"https://www.youtube.com/embed/".concat(o),frameBorder:"0",allow:"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture",allowFullScreen:!0,title:"Embedded youtube"})))}},41198:(e,t,n)=>{n.r(t),n.d(t,{SettingsContainer:()=>Tr,default:()=>Mr});n(62953),n(3296),n(27208),n(48408);var s=n(96540),a=n(39225),o=n(47767),r=n(97054),i=n(56820),c=n(46741),l=n(69765),d=n(47444),h=n(63129);var u=n(87860),p=(n(9920),n(41393),n(14905),n(98992),n(3949),n(81454),n(8872),n(99090)),m=n(64118),g=n(45860),f=n(11988),b=n(51074),y=n(94355);var v=n(29662),_=n(3914),w=n(87633),k=n(66294),T=n(56359),S=n(14994);var M=n(28738),x=n(68831),C=n(58168),P=n(83199),B=n(38257),q=n(92138),I=n(54621),L=n(11128);const A=e=>{let{error:t,testPrefix:n,onClick:s,flavour:a,value:o}=e;return{"data-testid":n?"".concat(n,"-").concat(a):a,flavour:a,...s&&{onClick:s},text:t?"-":"".concat(o)}},E=e=>{let{critical:t,warning:n,error:a,testPrefix:o,...r}=e;const i=A({error:a,testPrefix:o,flavour:t?"error":"disabledError",value:t}),c=A({error:a,testPrefix:o,flavour:n?"warning":"disabledWarning",value:n});return s.createElement(P.Flex,r,s.createElement(P.MasterCard,{"data-testid":"alertsMastercard",pillLeft:i,pillRight:c,size:"small"}))},D=e=>{let{id:t,testPrefix:n,...a}=e;const o=(0,f.Xt)(t),{critical:r,warning:i}=(0,m.AO)(o);return s.createElement(E,(0,C.A)({"data-testid":"alerts",testPrefix:n,critical:r,warning:i},a))},N=e=>{let{critical:t,warning:n,error:a}=e;return s.createElement(D,{critical:t,"data-testid":"tabAlerts",error:a,testPrefix:"tabAlerts-alert",warning:n,margin:[0,0,0,1]})},R=e=>{let{nodeId:t}=e;const n=(0,f.Xt)(t),{critical:a,warning:o}=(0,m.AO)(n);return s.createElement(N,{critical:a,warning:o})},F=()=>{const e=(0,L.w7)({emptyIfAll:!1}),t=(0,f.YS)(e),n=(0,b.s)("error"),{critical:a,warning:o}=(0,m.AO)(t);return s.createElement(N,{critical:a,warning:o,error:n})},U={warning:"warning",critical:"error"},z=e=>{let{alertId:t}=e;const n=(0,m.JL)(t,"status");return U[n]?s.createElement(P.Flex,{alignSelf:"center",margin:[0,0,0,2],round:1,background:U[n],width:2,height:2}):null};n(17333),n(54520);var O=n(23630);const H=()=>{const e=(0,b.s)("error"),t=(0,b.s)("updatedAt");return s.createElement(O.A,{error:e,updatedAt:t,text:"Alerts and Notifications"})};var V=n(37618);const Q=()=>{const e=(0,l.r9)(),t=(0,c.JT)("dashboard:ReadAll"),n=(e=>(0,s.useMemo)((()=>({home:{id:"home",title:"Home",icon:"room_home",path:"".concat(e,"/home"),dataGa:"view-picker::click-view-home::global-view",testId:"viewPicker-home",tooltip:"A dashboard metrics from all your nodes"},overview:{id:"overview",title:"Metrics",icon:"room_overview",path:"".concat(e,"/overview"),dataGa:"view-picker::click-view-overview::global-view",testId:"viewPicker-overview",tooltip:"System, containers, VMs and application metrics"},nodes:{id:"nodes",title:"Nodes",icon:"nodes_hollow",path:"".concat(e,"/nodes"),dataGa:"view-picker::click-view-nodes::global-view",testId:"viewPicker-nodes",tooltip:"An index of your nodes with alert status and key metrics"},...!window.envSettings.isAgent&&!window.envSettings.onprem&&{k8s:{id:"k8s",title:"K8s",icon:"serviceKubernetes",path:"".concat(e,"/kubernetes"),dataGa:"view-picker::click-view-kubernetes::global-view",testId:"viewPicker-kubernetes",tooltip:"Kubernetes clusters and container metrics"}},top:{id:"top",title:"Top",icon:"top",path:"".concat(e,"/top"),dataGa:"view-picker::click-view-fn::global-overview",testId:"viewPicker-fn",tooltip:"Top processes, containers, VMs, services, queries and system components"},logs:{id:"logs",title:"Logs",icon:"logs",path:"".concat(e,"/logs"),dataGa:"view-picker::click-view-logs::global-overview",testId:"viewPicker-logs",tooltip:"System and application logs"},dashboards:{id:"dashboards",title:"Dashboards",icon:"dashboard",path:"".concat(e,"/dashboards"),dataGa:"view-picker::click-view-dashboard::global-view",testId:"viewPicker-customDashboards",tooltip:"Your custom dashboards",droppable:!0,droppableProps:{dropArea:!0,dropinDashboards:!0}},dashboard:{id:"dashboard",title:"Dashboard",icon:"dashboard",path:"".concat(e,"/dashboard"),dataGa:"view-picker::click-view-dashboard::global-view",testId:"viewPicker-customDashboard",tooltip:"Your local custom dashboard",droppable:!0,droppableProps:{dropArea:!0,dashboardId:V.LA}},alerts:{id:"alerts",title:"Alerts",icon:"alarm",path:"".concat(e,"/alerts"),dataGa:"view-picker::click-view-alerts::global-view",testId:"viewPicker-alerts",tooltip:s.createElement(H,null),children:s.createElement(F,null)},ar:{id:"ar",title:"Anomalies",icon:"anomaliesLens",path:"".concat(e,"/anomalies"),dataGa:"view-picker::click-view-anomalies::global-view",testId:"viewPicker-anomalies",tooltip:"Anomaly Advisor - ML powered anomaly detection"},events:{id:"events",title:"Events",icon:"feed",path:"".concat(e,"/events"),dataGa:"view-picker::click-view-feed::mn-overview",testId:"viewPicker-feed",tooltip:"Activity feed"}})),[e]))(e),a=(0,_.dg)();var o,r;return[(0,s.useMemo)((()=>[!a&&n.home,n.nodes,n.overview,n.top,n.logs,!window.envSettings.isAgent&&!window.envSettings.onprem&&n.k8s,!a&&t&&n.dashboards,a&&t&&n.dashboard,n.alerts,n.ar,n.events].filter(Boolean)),[e,a]),(o=e,r=!a,(0,s.useMemo)((()=>({["".concat(o,"/overview")]:!0,["".concat(o,"/home")]:!0,["".concat(o,"/nodes")]:!0,["".concat(o,"/dashboards")]:!0,["".concat(o,"/dashboard")]:!0,["".concat(o,"/alerts")]:!0,["".concat(o,"/anomalies")]:!0,["".concat(o,"/top")]:!0,["".concat(o,"/logs")]:!0,["".concat(o,"/events")]:!0})),[o,r]))]};var j=n(8711),G=n(43375),W=n(47731),K=n(29217);const Z=(0,j.keyframes)(["0%{transform:translateY(-5px) scale(1);}25%{transform:translateY(-10px) scale(0.7);}50%{transform:translateY(-5px) scale(1);}75%{transform:translateY(0) scale(1.3);}100%{transform:translateY(-5px) scale(1);}"]),Y=(0,j.default)(P.Icon).attrs({name:"long_arrow_up",color:"successText"}).withConfig({displayName:"tabLink__DropHereIcon",componentId:"sc-1h4dha1-0"})(["animation:"," 1s ease-in infinite;"],Z),J=e=>e?"text":"textDescription",X=(0,s.forwardRef)(((e,t)=>{let{id:n,to:a,params:r,exact:i,icon:c,title:l,fixed:d,dataGa:h,testId:u,children:p,type:m,tooltip:g,showBorderLeft:f,droppable:b,droppableProps:y={},...v}=e;const{setNodeRef:_,active:w}=(0,G.zM)({id:"droppable-tab-".concat(n),disabled:!b,data:y}),k=(0,o.RQ)({end:i,path:a}),T=(0,o.Zp)(),S=(0,W.J)(),M=!!k,x="nodes"!==m||M,B=(0,s.useCallback)((()=>{if(M)return;const e="".concat(a).concat(r?"/".concat(r):"");T(e)}),[a,M,r]),q=b&&w&&w.data.current.dashboardable,I=(0,s.useRef)();return s.createElement(P.NavigationTab,(0,C.A)({ref:(0,P.mergeRefs)(_,t,I),fixed:d,active:M,showBorderLeft:f,icon:s.createElement(P.Icon,{name:c,size:"small"}),onActivate:B,"data-ga":h,"data-testid":u,"aria-selected":M,tooltip:g||l},q&&{rootProps:{background:"successSemi",cursor:"alias"}},v),!S&&!!l&&s.createElement(K.A,{content:g||l,align:g?"bottom":"top",isBasic:!0},s.createElement(P.TextSmall,{alignSelf:"center",color:J(M),whiteSpace:"nowrap"},l)),I.current&&q&&s.createElement(P.Drop,{target:I.current,align:{top:"bottom"},stretch:!1,hideShadow:!0,zIndex:1},s.createElement(Y,null)),x&&p)})),$=X,ee=()=>{const{active:e}=(0,G.fF)(),t=(0,c.JT)("dashboard:Create"),n=(0,_.dg)();return t&&e&&!n?s.createElement($,{to:"",id:"plus",icon:"plus",fixed:!0,droppable:!0,droppableProps:{dropArea:!0,dashboardId:"new"},testid:"roomDropdownMenu-roomOptions",disabled:!t||n}):null},te=(0,s.forwardRef)(((e,t)=>{let{title:n,path:a,children:o,index:r,...i}=e;return s.createElement($,(0,C.A)({ref:t,showBorderLeft:0===r,exact:!0,to:a,title:n,"data-testid":"navigation-dynamicTab-".concat(n),index:r},i),o)})),ne=e=>{let{staticPaths:t}=e;const[n,a,r]=(e=>{const t=(0,o.Zp)(),n=(0,o.RQ)("/spaces/:spaceSlug/rooms/:roomSlug/:type/*"),a=(0,I._F)(),r=(0,I.xK)(),i=(0,I.NU)(),c=n?n.pathnameBase:"/spaces",{pathname:l}=(0,o.zy)(),d=(0,q.A)(e[l]&&l,!0),h=(0,s.useCallback)(((e,n)=>{if(!n)return r(e);const s=a[e-1],o=a[e+1],i=(n,s)=>{const a="".concat(n).concat(s?"/".concat(s):"");t(a),r(e)};return d?i(d):s?i(s.path,s.params):o?i(o.path,o.params):i(c)}),[a,r,c,d]);return[(0,s.useMemo)((()=>a.map((e=>({...e,children:e.id&&"nodes"===e.type?s.createElement(R,{nodeId:e.id}):"alerts"===e.type?s.createElement(z,{alertId:e.id}):null})))),[a]),(e,t)=>{-1===e&&-1===t||i({sourceIndex:e,destinationIndex:t})},h]})(t),i=(0,B.A)("horizontal");return s.createElement(P.BaseDraggableTabs,{onDragEnd:a,onTabClose:r,items:n,Item:te,ref:i})},se=(0,s.memo)((()=>{const[e,t]=Q();return s.createElement(P.NavigationTabs,null,e.map(((e,t)=>{let{id:n,icon:a,title:o,path:r,...i}=e;return s.createElement($,(0,C.A)({exact:!0,fixed:!0,to:r,icon:a,title:o,id:n,key:"".concat(n,"-").concat(t)},i))})),s.createElement(P.TabSeparator,null),s.createElement(ne,{staticPaths:t}),s.createElement(ee,null))})),ae=se;var oe=n(67990),re=n(99739),ie=n(63314),ce=n(3705),le=n(83084),de=n(47762),he=n(16579),ue=n(27467),pe=n(99292),me=n(71847),ge=n(87659),fe=n(78062),be=n(38413),ye=n(82265),ve=n(1522);const _e=(0,d.K0)({key:"roomSettings",get:e=>{let{id:t,key:n}=e;return e=>{let{get:s}=e;const a=s((0,ye.A)(t));return n?a[n]:a}},set:e=>{let{id:t,key:n}=e;return(e,s)=>{let{set:a}=e;a((0,ye.A)(t),(e=>n?{...e,[n]:s}:s))}}}),we=(e,t)=>(0,d.vc)(_e({id:e,key:t})),ke=(e,t)=>{const n=we(e,t),s=function(e){let{key:t,shouldPersist:n=!0}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const s=(0,d.lZ)(_e({id:e,key:t})),a=(0,ve.DH)(e);return(0,d.Zs)((o=>{let{snapshot:r}=o;return async o=>{if(s(o),!n)return;const i=await r.getPromise(_e({id:e}));try{await a({settings:{...i,...t?{[t]:o}:o}})}catch(c){s(t?i[t]:i)}}}),[e,t,a])}(e,{key:t});return[n,s]};var Te=n(15327),Se=n(74618),Me=n(45765),xe=n(18061),Ce=n(21204),Pe=n(69388),Be=n(23931);const qe=()=>{},Ie=e=>{let{roomId:t,id:n,onRemove:a=qe}=e;const o=(0,l.wz)(t,"name"),r=(0,Ce.e6)(t,n,"title"),i=(0,Ce.gV)(t),[c,,d,h]=(0,ge.A)();return s.createElement(s.Fragment,null,s.createElement(P.Button,{danger:!0,flavour:"hollow",onClick:d,label:"Delete metric","data-testid":"remove-metric"}),c&&s.createElement(P.ConfirmationDialog,{"data-ga":"remove-metric-dialog","data-testid":"removeMetricDialog",handleConfirm:async()=>{await i(n),a()},handleDecline:h,message:s.createElement(s.Fragment,null,"You are about to remove ",s.createElement("strong",null,r)," metric.",s.createElement("br",null),"Are you sure you want to continue?"),title:"Remove metric from ".concat(o)}))},Le=e=>{let{link:t,children:n}=e;return s.createElement(P.Flex,{as:"a",target:"_blank",href:t,gap:1,alignItems:"end"},s.createElement(P.TextNano,{textDecoration:"undeline"},n),s.createElement(P.Icon,{color:"text",name:"question",width:"16px",height:"16px"}))},Ae=e=>{let{title:t,help:n,link:a,disclaimer:o,children:r,...i}=e;return s.createElement(P.Flex,(0,C.A)({gap:1,column:!0},i),s.createElement(P.Flex,{justifyContent:"between",alignItems:"baseline"},s.createElement(P.Text,{as:"label",margin:[0]},t),n&&s.createElement(Le,{link:a},n)),r,o&&s.createElement(P.TextSmall,null,o))};var Ee=n(26655),De=n(78969);const Ne={value:"all",label:"All"},Re=e=>{let{dimensions:t,units:n}=e;return{dimensions:t?[Ne,...t.map((e=>{let{id:t,name:n}=e;return{value:t,label:n}}))]:[Ne],units:n}},Fe=e=>{let{isNew:t}=e;return s.createElement(P.Flex,{as:"span",gap:1},"Manage room",s.createElement(P.TextBig,{color:"textLite"},"/"),s.createElement(P.TextBig,{color:"textLite"},t?"Add":"Edit"," Metric"))},Ue=e=>{let{spaceId:t,roomId:n,id:a,onClose:o,...r}=e;const i="new"===a,c=(0,Ce.uB)(n,a),l=(0,Ce.XH)(n,{shouldPersist:!1}),d=(0,Ce.e6)(n,a),h=(0,s.useRef)(d),{title:u,context:p,dimensions:m=[]}=d,g=()=>{l(a,h.current),o()};(0,s.useEffect)((()=>{i&&c()}),[]);const f=(0,L.w7)({extraKey:"nodesView",merge:!1,scoped:!0}),b=(0,Pe.A)(n,f,{spaceId:t});if(b)throw b;const y=(0,Be.OL)(n),v=(0,s.useMemo)((()=>y.map((e=>({value:e,label:e})))),[y]),_=(0,Be._0)(n),w=(0,s.useMemo)((()=>p&&{value:p,label:p}),[p]),[{dimensions:k,units:T},S]=(0,xe.A)((()=>({enabled:!!p,fetch:()=>((e,t,n)=>Ee.A.get("".concat(De.P8,"/spaces/").concat(e,"/rooms/").concat(t,"/contexts/").concat(encodeURIComponent(n)),{transform:Re}))(t,n,p),initialValue:{dimensions:[],units:""}})),[t,n,p]),M=(0,s.useCallback)((e=>{let{value:t}=e;l(a,"context",t),l(a,"dimensions",[]),l(a,"title",t)}),[a]),x=(0,s.useMemo)((()=>k?1===k.length||m.length===k.length-1||0===m.length?[Ne]:m.map((e=>{let{id:t,name:n}=e;return{value:t,label:n}})):[Ne]),[m,k]),B=(0,s.useCallback)((e=>{const t=e.filter((e=>{let{value:t}=e;return t!==Ne.value})),n=(0===e.length||t.length!==e.length&&0!==m.length||t.length===k.length-1?[]:t).map((e=>{let{value:t,label:n}=e;return{id:t,name:n}}));l(a,"dimensions",n)}),[a,m,k]),q=(0,Ce.tQ)(n),I=(0,s.useCallback)((()=>q({...d,id:a,enableAllDimensions:0===m.length,unit:T}).then(o)),[d]),A=(0,s.useCallback)((e=>{let{target:{value:t}}=e;t.length<=30&&l(a,"title",t)}),[a]),E=!u||!p||S||!_;return s.createElement(Te.GO,(0,C.A)({onClose:g},r),s.createElement(Se.z,{onClose:g,title:s.createElement(Fe,{isNew:i})},s.createElement(P.Button,{label:"Save",onClick:I,disabled:E})),s.createElement(Me.U,null,i?"Add a new metric (column)":"Edit metric"),s.createElement(Te.Yv,null,s.createElement(P.Flex,{gap:6,column:!0},s.createElement(Ae,{title:"Context",help:"What is a context?",link:"https://learn.netdata.cloud/docs/data-collection/chart-dimensions-contexts-and-families#context","data-testid":"manageMetrics-context"},s.createElement(P.Select,{options:v,value:w,onChange:M,isLoading:!_,"data-testid":"manageMetrics-contextSelect"})),s.createElement(Ae,{title:"Metric Title","data-testid":"manageMetrics-title"},s.createElement(P.TextInput,{name:"title",placeholder:"Type name",value:u,onChange:A})),s.createElement(Ae,{title:"Dimensions",help:"What is a dimension?",link:"https://learn.netdata.cloud/docs/data-collection/chart-dimensions-contexts-and-families#dimension",disclaimer:x[0]===Ne&&"The returned value is the sum of all selected dimensions.","data-testid":"manageMetrics-dimensions"},s.createElement(P.Select,{isMulti:!0,options:k,value:x,onChange:B,isLoading:S,isDisabled:!w,"data-testid":"manageMetrics-dimensionSelect"})),!i&&s.createElement(P.Flex,{justifyContent:"end"},s.createElement(Ie,{roomId:n,id:a,onRemove:o})))))},ze=e=>{let{isOpen:t,onClose:n,onEdit:a,roomId:o,targetRef:r}=e;const[i,c]=(0,Ce.tY)(o);if(!r.current||!t)return null;const l=e=>{a(e),n()};return s.createElement(P.Drop,{align:{top:"bottom",right:"right"},animation:!0,background:"dropdown",column:!0,margin:[.5,0,0],overflow:{vertical:"auto"},padding:[3],round:!0,width:70,close:n,"data-testid":"metricsMenu",onClickOutside:n,onEsc:n,target:r.current},i.map((e=>s.createElement(P.Flex,{"data-testid":"metricsMenu-metric-".concat(e.title),key:e.id,justifyContent:"between",alignItems:"center",padding:[1,0]},s.createElement(P.Checkbox,{checked:!e.hidden,"data-testid":"metricsMenu-metricCheckbox",label:e.title,onChange:()=>c(e.id,"hidden",!e.hidden)}),s.createElement(P.IconButton,{"data-testid":"metricsMenu-editMetric",flavour:"borderless",icon:"pencilSolid",padding:[0],onClick:()=>l(e.id),width:"10px",height:"10px"})))),s.createElement(P.Button,{"data-testid":"metricsMenu-addMetricButton",label:"Add new metric",icon:"plus",onClick:()=>l("new"),small:!0,alignSelf:"end"}))},Oe=e=>{let{spaceId:t,roomId:n}=e;const[a,,o,r]=(0,ge.A)(),[i,c]=(0,s.useState)(null),l=(0,s.useRef)();return s.createElement(s.Fragment,null,s.createElement(P.Button,{"data-ga":"metric-setting::click-metric-setting::hm-nodes","data-testid":"metricSettings-button",flavour:"hollow",icon:"gear",neutral:!0,onClick:o,ref:l,label:"Metrics",small:!0}),s.createElement(ze,{isOpen:a,onClose:r,onEdit:c,roomId:n,targetRef:l}),i&&s.createElement(Ue,{"data-testid":"metricsManagement",spaceId:t,roomId:n,id:i,onClose:()=>c(null)}))};var He=n(667),Ve=n(92155);const Qe=()=>(0,me.H)("grouping","click-grouping","hm-nodes"),je=(0,Ve.A)(fe.A),Ge=[{icon:"node",label:"Node status",value:"nodeStatus","data-ga":"grouping::click-grouping-node-status::hm-nodes"},{icon:"alarm_bell",label:"Alert status",value:"alertStatus","data-ga":"grouping::click-grouping-status::hm-nodes"}],We=()=>{const e=(0,_.vt)(),t=(0,l.ID)(),[n,,a,o]=(0,ge.A)(),{onIntegrationsClick:r}=(0,He.A)(),[i,c]=ke(t,"groupMode"),d=(0,s.useMemo)((()=>Ge.find((e=>e.value===i))||Ge[0]),[i]),h=(0,s.useCallback)((e=>{let{value:t}=e;return c(t)}),[c]);return s.createElement(P.Flex,{alignItems:"center","data-testid":"agentsHead-actions",flexWrap:!1,gap:3},s.createElement(P.Select,{label:"Group by",onChange:h,onMenuOpen:Qe,options:Ge,styles:{size:"tiny"},value:d}),s.createElement(Oe,{roomId:t,spaceId:e}),s.createElement(P.Button,{icon:"integrations",flavour:"hollow",onClick:r,small:!0},"Integrations"),n&&s.createElement(be.A,{onClose:o}),s.createElement(je,{"data-ga":"add-nodes::click-add-nodes::hm-nodes","data-testid":"agentsHead-addNode",icon:"nodes_hollow",label:"Add Nodes",onClick:a,small:!0}))},Ke=()=>s.createElement(P.Flex,{width:"100%",justifyContent:"end",background:"mainBackground",padding:[2]},s.createElement(We,null));n(25509),n(65223),n(60321),n(41927),n(11632),n(64377),n(66771),n(12516),n(68931),n(52514),n(35694),n(52774),n(49536),n(21926),n(94483),n(16215);var Ze=n(79304),Ye=n(25950);const Je=function(e,t){let{getGrouping:n,extraKey:s}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},a={};const{menuGroups:o}=((e,t)=>{let{getNode:n,getGrouping:s,nodeMenus:a}=t;const o={};return e.forEach((e=>{const t=n(e);if(!t)return;const{name:o,priority:r,icon:i}=s(t);a[e]={...t,groupId:o,subMenuId:e,priority:r,groupIcon:i}})),[...e].sort(((e,t)=>{var n,s,o,r;return(null===(n=a[e])||void 0===n?void 0:n.priority)-(null===(s=a[t])||void 0===s?void 0:s.priority)||((null===(o=a[e])||void 0===o?void 0:o.name)||"").localeCompare((null===(r=a[t])||void 0===r?void 0:r.name)||"",void 0,{sensitivity:"accent",ignorePunctuation:!0})})).forEach((e=>{const t=a[e];if(!t)return;const n="".concat(t.groupId,"|").concat(t.subMenuId);o[t.groupId]||(o[t.groupId]=new Set),n&&o[t.groupId].add(t.id)})),{menuGroups:o}})(e,{getNode:t,getGrouping:n,nodeMenus:a}),r={},i=Object.keys(o).reduce(((e,t)=>{const n=[...o[t]],i=a[n[0]];return e[t]={level:0,name:i.groupId,id:t,subMenuIds:n,link:"".concat((0,Ze.A)("menu_".concat(t))),size:24,forceVisibility:!0,icon:i.groupIcon,extraKey:s},n.forEach((e=>{var n,o,i,c;const l=a[e];r[e]={...l,level:1,id:e,menuGroupId:t,link:"".concat((0,Ze.A)("menu_".concat(t,"_submenu_").concat(e))),size:24,forceVisibility:!0,icon:(null===(n=Ye.u[null===(o=l.os)||void 0===o?void 0:o.nm])||void 0===n?void 0:n.iconName)||(null===(i=Ye.U[null===(c=l.os)||void 0===c?void 0:c.id])||void 0===i?void 0:i.iconName)||"os",extraKey:s}})),e}),{}),c=Object.keys(o);let l=-1;const d=c.reduce(((e,t)=>{l+=1;return[...e,{...i[t],sticky:!0},...i[t].subMenuIds.reduce(((e,t)=>{l+=1;return[...e,r[t]]}),[])]}),[]);return a=null,{stickyIds:{},allElements:d,menuGroupIds:c,menuGroupById:i,subMenuById:r}},Xe="clear",$e="warning",et="critical",tt="unreachable";var nt=n(82432);const st={alertStatus:{[et]:1,[$e]:2,[Xe]:3,[tt]:4},nodeStatus:{Live:1,Stale:2,Offline:3}},at={nodeStatus:e=>(0,nt.GM)(e.state),alertStatus:e=>(0,nt.vt)(e)};var ot=n(79566),rt=n(72253),it=n(44741),ct=n(10952),lt=n(12412);const dt=e=>{let{id:t,...n}=e;return s.createElement(it.k,(0,C.A)({id:t},n),s.createElement(ct._,{id:t}),s.createElement(lt.G,{id:t}))},ht=(0,s.memo)(dt);var ut=n(50466),pt=n(28146),mt=n(42402),gt=n(74379),ft=n(4959);const bt=e=>{let{connectivity:t,id:n,name:a,...o}=e;const r=(0,c.JT)("node:Delete"),i=!(0,l.XA)().untouchable,[d,,h,u]=(0,ge.A)(),[p,,m,g]=(0,ge.A)();return s.createElement(ft.SS,(0,C.A)({alignSelf:"end",gap:1,"data-testid":"node-actions"},o),i&&s.createElement(s.Fragment,null,s.createElement(pt.A,{badge:"actionRemove","data-testid":"remove-node-action"},s.createElement(ft.d0,{"data-ga":"nodes-table-row::click-remove-node::nodes-view",onClick:m})),p&&s.createElement(gt.Ay,{ids:[n],name:a,onClose:g})),r&&"Offline"===t&&s.createElement(s.Fragment,null,s.createElement(pt.A,{badge:"actionObsolete","data-testid":"obsolete-node-action"},s.createElement(ft.AS,{"data-ga":"nodes-table-row::click-obsolete-node::nodes-view",onClick:h})),d&&s.createElement(mt.Ay,{ids:[n],name:a,onClose:u})))};var yt=n(37019);const vt=e=>{let{instanceType:t,name:n,providerType:a,...o}=e;return s.createElement(pt.A,(0,C.A)({badge:"cloudInfo","data-testid":"node-cloud-info",nodeName:n},o),s.createElement(P.Flex,{alignItems:"center",gap:1},s.createElement(P.Icon,{color:"textDescription","data-testid":"node-cloud-info-icon",name:"ipNetworking",width:"18px",height:"18px"}),s.createElement(P.TextSmall,{color:"textDescription","data-testid":"node-cloud-info-text"},(0,nt.ws)(a)," \u2022 ",(0,nt.ws)(t))))},_t=e=>{let{connectivity:t,name:n,...a}=e;return s.createElement(pt.A,(0,C.A)({badge:"connectivity",connectivityState:t,"data-testid":"node-connectivity",nodeName:n},a),s.createElement(P.Icon,{name:"connectivityStatus".concat(t),width:"18px",height:"18px"}))},wt=e=>{let{onClick:t,...n}=e;return s.createElement(pt.A,(0,C.A)({badge:"info","data-testid":"node-info-cta"},n),s.createElement(P.Icon,{color:"textDescription",cursor:"pointer",height:"18px",width:"18px",name:"information",onClick:t}))},kt=e=>{let{name:t,...n}=e;return s.createElement(pt.A,(0,C.A)({badge:"k8s","data-testid":"node-kubernetes",nodeName:t},n),s.createElement(P.Flex,{alignItems:"center",gap:1},s.createElement(P.Icon,{color:"textDescription","data-testid":"node-kubernetes-icon",name:"serviceKubernetes",width:"18px",height:"18px"}),s.createElement(P.TextSmall,{color:"textDescription","data-testid":"node-kubernetes-text"},"k8s")))},Tt=e=>{let{kernelName:t,kernelVersion:n,name:a,...o}=e;return s.createElement(pt.A,(0,C.A)({badge:"kernel","data-testid":"node-kernel",nodeName:a},o),s.createElement(P.TextSmall,{color:"textDescription"},(0,nt.Ud)(t,n)))};var St=n(88494),Mt=n(63119);const xt=e=>{var t,n;let{architecture:a,cpuFrequency:o,cpus:r,diskSpace:i,memory:c,name:l,os:d,osName:h,...u}=e;return s.createElement(pt.A,(0,C.A)({badge:"systemInfo","data-testid":"node-system-info",nodeName:l},u),s.createElement(P.Flex,{alignItems:"center",gap:1},s.createElement(P.Icon,{color:"textDescription","data-testid":"node-system-info-icon",name:(null===(t=Ye.u[h])||void 0===t?void 0:t.iconName)||(null===(n=Ye.U[d])||void 0===n?void 0:n.iconName)||"os",width:"16px",height:"16px"}),s.createElement(P.TextSmall,{color:"textDescription","data-testid":"node-system-info-text"},(0,nt.ws)(h)," \u2022 ",(0,nt.O)(o)," \u2022"," ",(0,nt.ws)(r,1===r?"Core":"Cores")," \u2022 ",(0,nt.ws)(a)," ","\u2022 ",(0,nt.ws)((0,nt.RI)(c),"RAM")," \u2022"," ",(0,nt.ws)((0,nt.RI)(i),"HD"))))},Ct=e=>{let{container:t,name:n,virtualization:a,...o}=e;const r=(0,nt.Pg)(t,a);return s.createElement(pt.A,(0,C.A)({badge:"type","data-testid":"node-type",nodeName:n,nodeType:r.label},o),s.createElement(P.Flex,{alignItems:"center",gap:1},s.createElement(P.Icon,{color:"textDescription","data-testid":"node-type-icon",name:r.icon,height:r.iconSize,width:r.iconSize}),s.createElement(P.TextSmall,{color:"textDescription","data-testid":"node-type-text"},r.label)))};var Pt=n(33195);const Bt=e=>{let{health:t={}}=e;const{silencingState:n}=t.alerts||{},{state:a}=n||{};return"NONE"!=a?s.createElement(s.Fragment,null,s.createElement(ft.K,{"data-testid":"nodeRow-separator"}),s.createElement(P.Flex,null,s.createElement(Pt.A,{flavour:"node",silencing:n}))):null};var qt=n(47130);const It=(0,qt.A)((0,Ve.A)(P.Button)),Lt=e=>{let{id:t}=e;const n=(0,o.Zp)(),a=(0,_.bq)(),r=(0,s.useCallback)((()=>{const e={nodeId:t};n("/spaces/".concat(a,"/settings/notifications#notificationsActiveTab=1&silencingRulePrefill=").concat(JSON.stringify(e)))}),[]);return s.createElement(It,{feature:"SilencingNode",payload:{nodeId:""},tooltip:"Create a new silencing rule for this node",flavour:"hollow",icon:"alarm_off",iconWidth:"12px",tiny:!0,onClick:r})};var At=n(55905),Et=n(80925);n(3064),n(72577);const Dt=(e,t)=>e&&e.getRoot().getChildren().find((e=>e.match({id:"nodes-".concat(t)})));var Nt=n(36196);const Rt=e=>{let{id:t,roomId:n,nodeId:a,context:o,dimensions:r}=e;const i=(0,Et.e)(),c=(0,s.useMemo)((()=>{const e=Dt(i,n),s=[t,a,o].join("-");if(!e)return null;let c=e.getNode({id:s});return c?(c.updateAttribute("selectedDimensions",r.map((e=>e.name))),c):(c=i.makeChart({attributes:{id:s,nodesScope:[a],contextScope:[o],pixelsPerPoint:20,selectedDimensions:r.map((e=>e.name))}}),e.appendChild(c),c)}),[o,t,a,n,i]);return c?s.createElement(Nt.A,{chart:c,hasHeader:!1,hasFooter:!1,hasFilters:!1,height:100}):null},Ft=(0,s.memo)(Rt);var Ut=n(75793);const zt=e=>{let{id:t,context:n,nodeId:a,isLive:o,hidden:r,title:i,dimensions:c}=e;const d=(0,l.ID)();return r||!o?null:s.createElement(P.Flex,{column:!0,"data-testid":"metrics-chart-".concat(n),flex:"grow",basis:"174px"},s.createElement(Ut.A,{text:i||n,TextComponent:P.TextSmall,color:"textDescription",margin:[1,0,1,2],as:"div"}),s.createElement(Ft,{id:t,context:n,nodeId:a,roomId:d,dimensions:c}))};var Ot=n(8239),Ht=n(50876);const Vt=e=>{var t;let{node:n}=e;const[,a]=(0,Ot.OD)(),{sendButtonClickedLog:o}=(0,Ht.A)(),r=null===(t=n.capabilities)||void 0===t||null===(t=t.dyncfg)||void 0===t?void 0:t.enabled,i=(0,s.useCallback)((()=>{a({node:n}),o({feature:"DyncnfNode",description:"Configure node",nodeId:null===n||void 0===n?void 0:n.id,nodeName:null===n||void 0===n?void 0:n.name})}),[a,o]),c=(0,s.useMemo)((()=>({name:"gear",size:"small",color:"text",cursor:"pointer",onClick:i})),[i]);return n.isLive&&r?s.createElement(s.Fragment,null,s.createElement(ft.K,{"data-testid":"nodeRow-separator"}),s.createElement(pt.A,{content:"Configure this node"},s.createElement(P.Flex,null,s.createElement(P.Icon,c)))):null},Qt=e=>{var t,n;let{id:a,name:o,hw:r,os:i,ni:c,capabilities:d,container:h,updateSeverity:u,labels:p,isLive:m,state:g,version:f,extraKey:b,isPreferred:y,health:v}=e;const w=(0,_.dg)(),k=(0,l.ID)(),T=null===(t=d.funcs)||void 0===t?void 0:t.enabled,S=null===(n=d.ml)||void 0===n?void 0:n.enabled,M=(0,nt.GM)(g),{_cloud_instance_type:x,_cloud_provider_type:B}=p||{},q="true"===(null===p||void 0===p?void 0:p._is_k8s_node),I=!(0,nt.Po)(x)||!(0,nt.Po)(B),L=(0,ue.Fw)("sidebarTab",{flavour:"val",extraKey:b}),A=(0,ue.Fw)("sidebarOpen",{flavour:"bool"}),E=(0,ue.Fw)("sidebarNodeId",{flavour:"val",extraKey:b}),D=(0,s.useCallback)((e=>{A(!0),L(e),E(a)}),[a]),N=(0,s.useCallback)((()=>D("alerts")),[D]),R=(0,s.useCallback)((()=>D("info")),[D]),F=(0,ut.Id)(),U=(0,Ce.Vw)(k);return s.createElement(P.Flex,{"data-testid":"nodeRow-".concat(o),"data-submenuid":a,padding:[1],column:!0,gap:1,round:!0},s.createElement(P.Flex,{alignItems:"center","data-testid":"nodeRow-basic-info",gap:2,height:{min:6}},s.createElement(_t,{connectivity:M,"data-testid":"nodeRow-connectivity-".concat(M.toLowerCase()),name:o}),s.createElement(ft.K,{"data-testid":"nodeRow-separator"}),s.createElement(Mt.A,{"data-testid":"nodeRow-name",id:a,name:o,isLive:m,state:g,isPreferred:y}),s.createElement(ft.K,{"data-testid":"nodeRow-separator"}),s.createElement(wt,{onClick:R,"data-testid":"nodeRow-more-info-cta"}),s.createElement(ft.K,{"data-testid":"nodeRow-separator"}),s.createElement(yt.A,{id:a,isLive:m,"data-testid":"nodeRow-alerts",name:o,onClick:N}),s.createElement(ft.K,{"data-testid":"nodeRow-separator"}),s.createElement(St.A,{badge:"ml","data-testid":"nodeRow-machine-learning-status",enabled:S,name:o},s.createElement(P.Icon,{name:"anomaliesLens",size:"small",color:"text"})),s.createElement(ft.K,{"data-testid":"nodeRow-separator"}),s.createElement(St.A,{badge:"fn","data-testid":"nodeRow-fn-status",enabled:T,name:o,onClick:()=>F(a)},s.createElement(P.Icon,{name:"functions",size:"small",color:"text"})),u&&s.createElement(s.Fragment,null,s.createElement(ft.K,{"data-testid":"nodeRow-separator"}),s.createElement(At.A,{name:o,os:i.id,container:h,warningLevel:u,labels:p,version:f,text:u,"data-testid":"nodeRow-needs-update"})),s.createElement(Vt,{node:{id:a,name:o,isLive:m,ni:c,capabilities:d}}),s.createElement(bt,{"data-testid":"nodeRow-node-actions",id:a,name:o,connectivity:M}),w?null:s.createElement(s.Fragment,null,s.createElement(Bt,{health:v}),s.createElement(Lt,{id:a}))),s.createElement(P.Flex,{alignItems:"center","data-testid":"nodeRow-detailed-info",gap:2},s.createElement(xt,{architecture:r.architecture,cpuFrequency:r.cpuFrequency,cpus:r.cpus,"data-testid":"nodeRow-system-info",diskSpace:r.diskSpace,memory:r.memory,name:o,osName:i.nm,os:i.id}),s.createElement(ft.K,{"data-testid":"nodeRow-separator"}),!(null===i||void 0===i||!i.kernel)&&s.createElement(Tt,{"data-testid":"nodeRow-kernel",kernelName:i.kernel.nm,kernelVersion:i.kernel.v,name:o}),s.createElement(ft.K,{"data-testid":"nodeRow-separator"}),q&&s.createElement(s.Fragment,null,s.createElement(kt,{"data-testid":"nodeRow-kubernetes",name:o}),s.createElement(ft.K,{"data-testid":"nodeRow-separator"})),s.createElement(Ct,{container:r.container,"data-testid":"nodeRow-type",name:o,virtualization:r.virtualization}),I&&s.createElement(s.Fragment,null,s.createElement(ft.K,{"data-testid":"nodeRow-separator"}),s.createElement(vt,{"data-testid":"nodeRow-cloud-info",instanceType:x,name:o,providerType:B}))),y&&!(null===U||void 0===U||!U.length)&&s.createElement(P.Flex,{"data-testid":"nodeRow-charts",gap:1},U.map((e=>s.createElement(zt,(0,C.A)({key:e.id},e,{nodeId:a,isLive:m}))))))},jt=e=>{switch(e.level){case 0:return ht;case 1:return Qt;default:return null}},Gt=e=>{let{onChartNameChange:t,initialChartName:n,dashboardOptions:a,linkToGo:o,contextToGo:r,...i}=e;const{setActiveMenuGroupId:c,setActiveSubMenuId:l}=(0,ot.A)({onChartNameChange:t,initialChartName:n,linkToGo:o,contextToGo:r});return s.createElement(he.H,i,s.createElement(rt.A,{onActiveMenuGroupId:c,onActiveSubMenuId:l,getComponent:jt,dashboardOptions:a,initialChartName:n,checkVisibility:()=>!0}))},Wt=["config"],Kt=()=>{const e=(0,_.vt)(),t=(0,l.ID)(),n=(()=>{const e=(0,l.ID)();let t=we(e,"groupMode")||"nodeStatus";const n=at[t]||at.nodeStatus;return(0,s.useCallback)((e=>{var s;const a=n(e);return{name:a,priority:at[t]?null===st||void 0===st||null===(s=st[t])||void 0===s?void 0:s[a]:st.nodeStatus,icon:"nodeStatus"===t?"connectivityStatus".concat(a):null}}),[t])})(),a=(0,L.w7)({extraKey:"nodesView",merge:!1,scoped:!0}),o=(0,de.Y7)(),[r,{width:i}]=(0,ce.A)();!function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[],{host:n,width:a}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const o=(0,Et.e)(),r=(0,s.useRef)();(0,s.useMemo)((()=>{if(!o)return;let s=Dt(o,e);s&&s.destroy(),s=o.makeContainer({attributes:{host:n,enabledXAxis:!1,id:"nodes-".concat(e),hasToolbox:!1,height:"75px",groupingMethod:"average",groupBy:["dimension"],aggregationMethod:"avg",legend:!1,axisLabelFontSize:7,yAxisLabelWidth:20,hasYlabel:!1,nodesScope:t,containerWidth:a}}),r.current=s,o.appendChild(s)}),[e]),(0,s.useMemo)((()=>{r.current&&(r.current.setAttribute("containerWidth",a),r.current.getNodes().forEach((e=>e.updateAttribute("containerWidth",a))))}),[r.current,a]),(0,s.useLayoutEffect)((()=>()=>r.current&&r.current.destroy()),[])}(t,a,{host:(0,_.dg)()?"".concat(window.envSettings.agentApiUrl,"/api/v2"):"".concat(window.envSettings.apiUrl,"/api/v3/spaces/").concat(e,"/rooms/").concat(t),width:i});const[c,d]=(0,ue.N9)("nodeIdToGo",{key:t,extraKey:"nodesView",flavour:"val"});return s.createElement(he.A,{getObject:o,ids:a,getMenu:Je,getGrouping:n,deps:[n],extraKey:"nodesView"},s.createElement(le.A,{ref:r,position:"relative",sidebar:s.createElement(pe.Ay,{hiddenTabs:Wt,nodeIds:a,title:"Nodes",initialChartName:c,flavour:"nodesView",loaded:!0,hasSearch:!1})},s.createElement(Ke,null),s.createElement(Gt,{initialChartName:c,onChartNameChange:d})))},Zt=()=>{const e=(0,l.ID)(),t=(0,oe.CK)();return(0,oe.gr)(e,"loaded")&&!t.length?s.createElement(ie.Ay,{feature:"NodesView",mode:"NoNodesView"},s.createElement(re.A,null)):s.createElement(ie.Ay,{feature:"NodesView"},s.createElement(Kt,null))};var Yt=n(76777),Jt=n(4659),Xt=n(32089),$t=n(6586),en=n(12602);const tn=()=>s.createElement(P.Flex,{padding:[6],round:1,width:"600px",background:"modalInfoBackground"},s.createElement(P.Box,{margin:[0,4,0,0]},s.createElement(P.Box,{as:P.Icon,width:10,height:10,name:"nodes_update"})),s.createElement(P.Flex,{column:!0,gap:2},s.createElement(P.Text,{strong:!0},"Couldn't find the chart you were looking for? "),s.createElement(P.Text,{color:"textDescription"},"Netdata has zero-configuration auto-detection for most applications and systems, this is achieved using collectors. If you miss some specific chart please check our"," ",s.createElement(Jt.A,{href:"https://learn.netdata.cloud/docs/agent/collectors",target:"_blank","data-ga":"chart-area::click-link-collectors::charts-view"},"list of collectors")," ","to see if any additional step is needed."))),nn=()=>s.createElement(P.Flex,{width:"100%",column:!0,gap:6,margin:[8,0,0,0]},s.createElement(Xt.A,null),s.createElement(P.Flex,{gap:1,alignItems:"center",justifyContent:"center",padding:[2],background:"successSemi"},s.createElement(en.A,null,s.createElement(P.Text,{color:"primary"},"Upgrade to Business for unlimited access")),s.createElement(P.Text,null,"or"),s.createElement($t.A,null,s.createElement(P.Text,{color:"primary"},"review your Space active Nodes")))),sn=e=>{let{noPreferredNodesError:t}=e;return s.createElement(P.Flex,{column:!0,justifyContent:"center",alignItems:"center",alignSelf:"center",margin:[30,0,0]},s.createElement(P.Flex,{column:!0,justifyContent:"center",alignItems:"center",width:"300px",margin:[0,0,6]},s.createElement(P.H3,{margin:[0,0,4]},"No charts to display"),s.createElement(P.Text,{color:"textDescription",textAlign:"center"},"Double-check your search or filters and dates and try again with different conditions.")),s.createElement(tn,null),t&&s.createElement(nn,null))};var an=n(9683),on=n(77173),rn=n(58205);const cn=()=>{const e=(0,l.ID)(),t=(0,c.JT)("node:Create");return s.createElement(le.A,{column:!1,"data-testid":"noNodesViewPage",gap:12,padding:[6]},s.createElement(P.Flex,{column:!0,gap:5,flex:{shrink:0,grow:0},"data-testid":"noKubernetesView-videoSection"},s.createElement(an.A,{"data-testid":"noNodesView-onboardingVideo",height:350,width:850,videoId:De.BX[0]}),s.createElement(an.A,{"data-testid":"noNodesView-onboardingVideo",height:350,width:850,videoId:De.BX[1]})),s.createElement(P.Flex,{column:!0,gap:4},s.createElement(P.Box,{border:{side:"bottom",color:"borderSecondary"}},s.createElement(P.Box,{margin:[0,0,2,0]},s.createElement(Jt.A,{href:"https://learn.netdata.cloud/guides/monitor/kubernetes-k8s-netdata",target:"_blank",rel:"noopener noreferrer","data-ga":"k8s-tab::click-link-guides::kubernetes-overview-and-visualizations"},s.createElement(P.TextBig,{color:"primary"},"Kubernetes monitoring with Netdata: Overview and visualizations"))),s.createElement(P.Box,{margin:[0,0,2,0]},s.createElement(Jt.A,{href:"https://learn.netdata.cloud/docs/cloud/visualize/kubernetes",target:"_blank",rel:"noopener noreferrer","data-ga":"k8s-tab::click-link-guides::kubernetes-visualizations"},s.createElement(P.TextBig,{color:"primary"},"Kubernetes visualizations")))),s.createElement(P.Flex,{column:!0,gap:4},t?s.createElement(on.A,{integrationId:rn.v2,rooms:[e]}):s.createElement(re.S,null))))};var ln=n(45467),dn=n(61427);const hn=j.default.div.withConfig({displayName:"selections__Separator",componentId:"sc-1cc7y18-0"})(["width:1px;height:",";background:",";"],(0,P.getSizeBy)(2.5),(0,P.getColor)("borderSecondary")),un=[{label:"Volume",value:"volume"},{label:"KS2",value:"ks2"}],pn=[{label:"Average",value:"average"},{label:"Median",value:"median"},{label:"Min",value:"min"},{label:"Max",value:"max"},{label:"Stddev",value:"stddev"}],mn=[{label:"Metrics",value:""},{label:"Anomaly Rate",value:"anomaly-bit"}],gn=()=>s.createElement(P.Flex,{column:!0,gap:1,alignItems:"center",width:"220px"},s.createElement(P.TextMicro,null,"Choose the algorithm you want to be used to identify correlations across metrics:"),s.createElement(P.TextMicro,null,s.createElement(P.TextMicro,{strong:!0},"KS2:")," A statistical test comparing the distribution of the highlighted window to the baseline."),s.createElement(P.TextMicro,null,s.createElement(P.TextMicro,{strong:!0},"Volume:")," Percentage change in averages between highlighted window and baseline.")),fn=()=>s.createElement(P.Flex,{column:!0,gap:1,alignItems:"center",width:"220px"},s.createElement(P.TextMicro,null,"What aggregation function do you want to apply when aggregating multiple datapoints for metric correlations.")),bn=()=>s.createElement(P.Flex,{column:!0,gap:1,alignItems:"center",width:"220px"},s.createElement(P.TextMicro,null,"Do you want to find correlations over the metric values or the anomaly rates of each metric.")),yn=e=>{let{runGetWeights:t,nodeIds:n,flavour:a}=e;const{method:o,options:r,group:i}=(0,dn.G9)({nodeIds:n,flavour:a}),c=null!==r&&void 0!==r&&r.includes(mn[1].value)?mn[1].value:"",l=(0,s.useCallback)((e=>n=>{let{value:s}=n;s="options"!==e?s:s?[s]:[];t({method:o,options:[r],group:i,[e]:s})}),[o,r,i]),d=(0,s.useMemo)((()=>({method:un.find((e=>e.value===o)),group:pn.find((e=>e.value===i)),option:mn.find((e=>e.value===c))})),[o,i,c]);return i?s.createElement(P.Flex,{gap:2},s.createElement(P.Flex,{gap:1,alignItems:"center"},s.createElement(P.TextSmall,{color:"textLite"},"Method:"),s.createElement(P.Select,{options:un,value:d.method,onChange:l("method")}),s.createElement(K.A,{isBasic:!0,plain:!0,content:gn,align:"bottom"},s.createElement(P.Icon,{name:"information",color:"textDescription",size:"small"}))),s.createElement(hn,null),s.createElement(P.Flex,{gap:1,alignItems:"center"},s.createElement(P.TextSmall,{color:"textLite"},"Aggregation:"),s.createElement(P.Select,{options:pn,value:d.group,onChange:l("group")}),s.createElement(K.A,{isBasic:!0,plain:!0,content:fn,align:"bottom"},s.createElement(P.Icon,{name:"information",color:"textDescription",size:"small"}))),s.createElement(hn,null),s.createElement(P.Flex,{gap:1,alignItems:"center"},s.createElement(P.TextSmall,{color:"textLite"},"Data:"),s.createElement(P.Select,{options:mn,value:d.option,onChange:l("options")}),s.createElement(K.A,{isBasic:!0,plain:!0,content:bn,align:"bottom"},s.createElement(P.Icon,{name:"information",color:"textDescription",size:"small"}))),s.createElement(hn,null)):null};var vn=n(21290);const _n=e=>{let{highlightAfter:t,highlightBefore:n,baseline:a}=e;const{localeDateString:o,localeTimeString:r}=(0,vn.$j)(),i=new Date(t),c=new Date(n),l=new Date(a),d=new Date(t),h=Math.round((n-t)/1e3),u=Math.round((t-a)/1e3),p=(0,ue.rI)("highlight");return s.createElement(P.Flex,{gap:4,alignItems:"center"},s.createElement(P.Flex,{gap:2},s.createElement(P.Flex,{column:!0,gap:1},s.createElement(P.TextSmall,{color:"textLite"},"Selected area:"),s.createElement(P.TextSmall,{color:"textLite"},"Reference baseline:")),s.createElement(P.Flex,{column:!0,gap:1},s.createElement(P.TextSmall,{"data-testid":"metricCorrelation-selectedArea"},o(i,{long:!1}),","," ",r(i),"\xa0->\xa0",o(c,{long:!1}),","," ",r(c)),s.createElement(P.TextSmall,{"data-testid":"metricCorrelation-referenceBaseline"},o(l,{long:!1}),","," ",r(l),"\xa0->\xa0",o(d,{long:!1}),","," ",r(d))),s.createElement(P.Flex,{column:!0,gap:1},s.createElement(P.TextSmall,{color:"textLite"},"Duration:"),s.createElement(P.TextSmall,{color:"textLite"},"Duration:")),s.createElement(P.Flex,{column:!0,gap:1},s.createElement(P.TextSmall,{"data-testid":"metricCorrelation-selecteArea-duration"},h,"\xa0secs"),s.createElement(P.TextSmall,{"data-testid":"metricCorrelation-referenceBaseline-duration"},u,"\xa0secs"))),s.createElement(K.A,{content:"Clear selection",align:"bottom",isBasic:!0},s.createElement(P.Button,{neutral:!0,flavour:"borderless",icon:"trashcan",onClick:()=>{p({after:null,before:null})},"data-ga":"metric-correlation::click-delete::charts-view","data-testid":"metricCorrelation-delete"})))},wn=(0,Ve.A)(P.Button),kn=(0,j.default)(P.Button).withConfig({displayName:"correlation__LogoButton",componentId:"sc-15d72m3-0"})(["&&{pointer-events:none;}"]),Tn=(0,j.default)(P.Button).withConfig({displayName:"correlation__CloseButton",componentId:"sc-15d72m3-1"})(["position:absolute !important;top:-3px;right:-3px;"]),Sn=(0,Ve.A)(Tn),Mn=(0,j.default)(P.Button).attrs({padding:[0],margin:[0,0,0,1.5],width:"auto"}).withConfig({displayName:"correlation__StyledButton",componentId:"sc-15d72m3-2"})(["height:16px !important;> span{margin:0 !important;}"]),xn=e=>{let{flavour:t}=e;const n=(0,_.vt)(),a=(0,l.ID)(),o=(0,de.nl)(),r=(0,s.useMemo)((()=>o?[o]:[]),[o]),i=(0,L.w7)({extraKey:"nodesView",merge:!1,scoped:!0}),c="singleNode"===t?r:i,[d,h]=(0,s.useState)(!1),[u,p]=(0,s.useState)(),{threshold:m,totalDimensionsCount:g,dimensionsCount:f,loaded:b,loading:y,getWeights:v,resetWeights:w}=(0,dn.Yy)({nodeIds:c,flavour:t},n,a),{method:k,options:T,group:S}=(0,dn.G9)({nodeIds:c,flavour:t}),{after:M,before:x}=(0,ue.rW)("highlight"),C=M-4*(x-M),B=!!M;(0,s.useEffect)((()=>{if(b){let e="default-result";if(S){const t=null!==T&&void 0!==T&&T.includes("anomaly-bit")?"anomaly-bit":"metrics";e="default-result-".concat(k,"-").concat(S,"-").concat(t)}return(0,me.H)("metrics-correlation",e,"charts-view",f.toString())}}),[b]),(0,ln.A)((()=>{M&&h(!0)}),[M]);const q=(0,ue.rI)("correlation"),I=(0,ue.rI)("threshold"),A=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};h(!1),p(),v({highlightAfter:M,highlightBefore:x,baselineAfter:C,baselineBefore:M,method:k,options:T,group:S,nodeIds:c,...e}).then((()=>I(dn.UG)))},E=!B||y;return(0,ln.A)((()=>{b&&d&&B&&A()}),[b,B,d]),s.createElement(P.Flex,{round:2,background:"elementBackground",padding:[2,6,2,2],alignItems:"center",justifyContent:"between",flex:!0,"data-testid":"correlation",position:"relative",height:{min:"64px"},gap:2},B?b?s.createElement(ie._0,{feature:"MetricCorrelations",totalDimensionsCount:g,dimensionsCount:f},s.createElement(P.Flex,{"data-testid":"metricCorrelation-resultsContainer",column:!0,width:"100%"},s.createElement(P.Flex,{justifyContent:"between",alignItems:"center",gap:1},s.createElement(P.Flex,{alignItems:"center",gap:1},s.createElement(P.Icon,{name:"correlation",color:"primary",width:"16px",height:"16px"}),s.createElement(P.TextSmall,null,"Analysed"," ",s.createElement(P.TextSmall,{color:"primary","data-testid":"metricCorrelation-resultsAnalyzed"},g)," ","and found"," ",s.createElement(P.TextSmall,{color:"primary","data-testid":"metricCorrelation-resultsFound"},f)," ","correlated metrics."),s.createElement(K.A,{content:"Clear results"},s.createElement(Mn,{flavour:"borderless",neutral:!0,disabled:y||!b&&!B,onClick:()=>{I(dn.UG),w(),(0,me.H)("metrics-correlation","click-delete","charts-view","".concat(b?"finish":"start"))},"data-testid":"metricCorrelation-clear",small:!0,icon:"reload"}))),s.createElement(yn,{runGetWeights:A,nodeIds:c,flavour:t})),s.createElement(P.Flex,{width:"100%"},s.createElement(P.Flex,{alignItems:"center",gap:3,margin:[0,2,0,5],width:"100%"},s.createElement(P.TextNano,{whiteSpace:"nowrap",color:"textDescription"},"Show less"),s.createElement(P.InputRange,{"data-testid":"metricCorrelation-resultsSlider",min:0,max:1,onChange:e=>I(e.target.value.toString()),onClick:()=>(0,me.H)("metric-correlation","click-slider","charts-view"),step:.01,value:m}),s.createElement(P.TextNano,{whiteSpace:"nowrap",color:"textDescription"},"Show more")),s.createElement(s.Fragment,null,s.createElement(K.A,{content:"Give us your feedback! Was it useful?",align:"bottom",enterDelay:200,showArrow:!0,activateOn:"hover"},s.createElement(P.Flex,null,s.createElement(P.Button,{flavour:"+"===u?"hollow":"borderless",disabled:u,icon:"thumb_up",onClick:()=>p("+"),"data-ga":"metric-correlation::click-thumbs-up::charts-view","data-testid":"metricCorrelation-thumbsUp",small:!0}),s.createElement(P.Button,{flavour:"-"===u?"hollow":"borderless",disabled:u,icon:"thumb_down",onClick:()=>p("-"),"data-ga":"metric-correlation::click-thumbs-down::charts-view","data-testid":"metricCorrelation-thumbsDown",small:!0}))))))):s.createElement(P.Flex,{alignItems:"center","data-testid":"correlationView-timePeriod-container",gap:4},s.createElement(kn,{icon:"logo_s",isLoading:y}),y?s.createElement(P.H5,{"data-testid":"metricCorrelation-loadingCalculation"},"Calculating metric correlations..."):s.createElement(_n,{highlightAfter:M,highlightBefore:x,baseline:C})):s.createElement(P.Flex,{alignItems:"center",gap:1},s.createElement(P.Icon,{name:"correlation",color:"primary"}),s.createElement(P.Text,null,"Select a timeframe on any chart and find correlated metrics. Visit documentation"),s.createElement("a",{href:"https://learn.netdata.cloud/docs/cloud/insights/metric-correlations",target:"_blank",rel:"noreferrer"},s.createElement(P.Icon,{name:"nav_arrow_goto",color:"primary",width:"12px",height:"12px"}))),s.createElement(P.Flex,{alignItems:"center"},(!b||y)&&s.createElement(K.A,{content:E?"Select an area of interest on any chart":"Click to find correlated metrics for the selected area",align:"bottom",enterDelay:200,showArrow:!0,activateOn:"hover"},s.createElement(wn,{label:y?"Loading...":"Find correlations",isLoading:!1,onClick:A,disabled:E,flavour:"hollow","data-ga":"metric-correlation::click-find-correlation::charts-view","data-testid":"metricCorrelation-find",small:!0,margin:[0,2,0,0],feature:"MetricCorrelations"})),s.createElement(K.A,{content:"Close",align:"bottom"},s.createElement(Sn,{neutral:!0,flavour:"borderless",onClick:()=>{q(!1),I(dn.UG),w(),(0,me.H)("metrics-correlation","click-close","charts-view","".concat(b?"finish":"start"))},"data-ga":"metric-correlation::click-close::charts-view","data-testid":"metricCorrelation-close",icon:"x",feature:"MetricCorrelations"}))))},Cn=(0,Ve.A)(P.Button),Pn=()=>{const e=(0,ue.rI)("correlation");return s.createElement(K.A,{content:()=>s.createElement(P.Box,null,s.createElement("strong",null,"Troubleshoot with Metric Correlations"),s.createElement("br",null),s.createElement("br",null),"Metric correlations will help you identify potential root causes for an observed issue.",s.createElement("br",null),"It will automatically analyse all available metrics for irregular behavior for the same timeframe."),align:"bottom",activateOn:"hover",isBasic:!0},s.createElement(Cn,{width:"118px",flavour:"hollow",label:"Metric Correlations",onClick:()=>{e(!0)},"data-ga":"metrics-correlation::click-metric-correlations::charts-view","data-testid":"run-correlation",small:!0,feature:"MetricCorrelations",isStart:!0}))};var Bn=n(45463);const qn=j.default.div.withConfig({displayName:"styled__Separator",componentId:"sc-1fhfk7c-0"})(["background:",";height:20px;width:1px;"],(0,P.getColor)("border")),In=e=>{var t,n;let{flavour:a}=e;const o=(0,de.nl)(),r=(0,f.Xt)(o),{critical:i,warning:c}=(0,m.AO)(r),l=(0,ue.Fw)("sidebarTab",{flavour:"val",extraKey:a}),d=(0,ue.Fw)("sidebarOpen",{flavour:"bool"}),h=(0,s.useCallback)((e=>{d(!0),l(e)}),[]),{hw:u={},cpus:p,isLive:g,name:b,os:y={},state:v,updateSeverity:_,labels:w,version:k,health:T}=(0,de.xY)(o),{silencingState:S}=(null===T||void 0===T?void 0:T.alerts)||{},M={dataTestId:"singleNode",isLive:g},x=(0,nt.t3)({count:i,type:"critical",...M}),C=(0,nt.t3)({count:c,type:"warning",...M}),B=(0,nt.GM)(v),q=(0,nt.Pg)(u.container,u.virtualization),I=(0,Bn.A)("(min-width: 1760px)");return s.createElement(P.Flex,{alignItems:"center",background:"panelBg",border:{side:"all",color:"border"},"data-testid":"singleNode-badges-container",flex:!1,gap:1,padding:[1,2],round:1},s.createElement(P.TextSmall,{"data-testid":"singleNode-".concat(b),strong:!0},b),s.createElement(pt.A,{badge:"info","data-testid":"singleNode-nodeInfo-cta"},s.createElement(P.IconButton,{flavour:"borderless",cursor:"pointer",onClick:()=>h("info"),icon:"information",iconColor:"nodeBadgeColor",tiny:!0,width:"18px",height:"18px"})),s.createElement(qn,{"data-testid":"singleNode-separator"}),s.createElement(pt.A,{badge:"alerts",nodeName:b},s.createElement(P.AlertMasterCard,{onClick:x.text||C.text?()=>h("alerts"):null,pillLeft:x,pillRight:C})),_&&s.createElement(At.A,{"data-testid":"singleNode-update-cta",name:b,os:y.id,container:u.container,warningLevel:_,labels:w,version:k,text:_}),s.createElement(qn,{"data-testid":"singleNode-separator"}),s.createElement(pt.A,{badge:"connectivity",connectivityState:B,"data-testid":"singleNode-connectivityStateInfo",nodeName:b},s.createElement(P.Pill,{flavour:"neutral",hollow:!0},B)),s.createElement(pt.A,{badge:"type","data-testid":"singleNode-nodeTypeInfo",nodeName:b,nodeType:q.label},s.createElement(P.Pill,{flavour:"neutral",hollow:!0,icon:q.icon},q.label)),I&&s.createElement(pt.A,{badge:"systemInfo","data-testid":"singleNode-systemInfo",nodeName:b},s.createElement(P.Pill,{flavour:"neutral",hollow:!0,icon:(null===(t=Ye.u[y.nm])||void 0===t?void 0:t.iconName)||(null===(n=Ye.U[y.id])||void 0===n?void 0:n.iconName)||"os"},(0,nt.O)(u.cpuFrequency)," (",p," ",1===u.cpus?"Core":"Cores",") -"," ",(0,nt.RI)(u.memory)," RAM - ",(0,nt.RI)(u.diskSpace))),s.createElement(Pt.A,{flavour:"node",silencing:S}))},Ln=()=>{const{onIntegrationsClick:e}=(0,He.A)();return s.createElement(s.Fragment,null,s.createElement(P.Button,{icon:"integrations",flavour:"hollow",onClick:e,small:!0,label:"Integrations","data-ga":"integrations::click-integrations::charts-view","data-testid":"btn-show-integrations"}))};const An={singleNode:In,default:n(90535).A},En=(0,s.memo)((e=>{let{flavour:t,...n}=e;const a=(0,ue.rW)("correlation"),o=An[t]||An.default;return s.createElement(P.Flex,{alignItems:!a&&"center",background:"mainBackground",column:a,"data-testid":"overview-header",gap:2,justifyContent:a?"start":"between",padding:[2,2,2,4],width:"100%"},a?s.createElement(xn,{flavour:t}):s.createElement(s.Fragment,null,!!o&&s.createElement(o,(0,C.A)({},n,{flavour:t})),s.createElement(P.Flex,{gap:2},s.createElement(Ln,null),s.createElement(Pn,null))))})),Dn=En;var Nn=n(56489),Rn=n(67602),Fn=n(83465),Un=n(31438),zn=n(55309),On=n(59090),Hn=n(13271),Vn=n(35243),Qn=n(75233);const jn=e=>t=>"overview-page::".concat(e.getAttribute("id"),"::").concat(t),Gn=(0,s.forwardRef)(((e,t)=>{let{style:n,id:a,menuChartAttributes:o,resizeHandle:r,handleProps:i,listeners:c,dragging:d,dragOverlay:h,...u}=e;const p=(0,l.ID)(),m=(0,Et.e)(),g=(0,Vn.N)(),f=(0,s.useMemo)((()=>{if(!g)return null;let e=g.getChildren().find((e=>e.match({id:h?"dragOverlay-".concat(a):a})));if(!e){const t=(e=>{let{chartId:t,...n}=e;return{contextScope:[t],...n}})(o);e=m.makeChart({attributes:{...t,roomId:p,id:h?"dragOverlay-".concat(a):a},makeTrack:jn,toolboxProps:{drag:{...i,...c}}}),g.appendChild(e)}return e}),[p,a,g]);(0,s.useLayoutEffect)((()=>{f&&f.updateAttributes({toolboxProps:{drag:{...i,...c,dragging:d}}})}),[f,d]);return(0,Be.GR)(o.chartId)&&f?s.createElement("div",{key:g.getId(),ref:t,style:n},s.createElement(Nt.A,{chart:f,"data-track":f.track("container"),"data-chartid":h?"dragOverlay-".concat(a):a,height:n.height,width:"100%"})):null})),Wn=(0,Un.BK)(Gn),Kn=(e,t)=>(e.layout||console.warn(t,"has no layout set, default values will be used."),(0,Fn.bP)(t,e.layout,{left:0,top:0,width:3,height:5,id:e.id,chartId:e.chartId,minWidth:1,minHeight:1,static:!1,dashboardable:!0})),Zn=(0,s.memo)((e=>{let{id:t,ids:n,containerWidth:a,...o}=e;const r=(0,Un.dd)(n,Kn),{filteredChartsCount:i}=(0,Hn.Ad)(),c=(0,Un.yO)(t),[l,d]=(0,s.useState)((()=>r)),h=(0,s.useMemo)((()=>l.filter((e=>{if(!e)return!1;const t=c(e.chartId);return t.visible&&!t.filteredOut&&!t.hidden}))),[n,l,i]),u=(0,Bn.A)("(max-width: 767px)");return n.length?s.createElement(Qn.A,(0,C.A)({compactType:"horizontal",rootId:t,containerId:t,onDragEnd:(e,t)=>{(0,Fn.Q)(t),d(e)},items:h,containerWidth:a-(u?16:26),gridTracks:u?1:12},o,{Item:Wn,rearrangeable:!1})):null}),((e,t)=>e.ids===t.ids&&e.containerWidth===t.containerWidth)),Yn=(0,zn.HB)(Zn,(e=>({ids:(null===e||void 0===e?void 0:e.headIds)||[]}))),Jn=(0,On.A)(Zn,(e=>({ids:(null===e||void 0===e?void 0:e.headIds)||[]}))),Xn=e=>{let{id:t,isActiveSticky:n,containerWidth:a,...o}=e;return s.createElement(it.k,(0,C.A)({id:t},o),s.createElement(ct._,{id:t,tiny:n,hasTooltip:n}),!n&&s.createElement(lt.G,{id:t,isActiveSticky:n}),!n&&s.createElement(Yn,{id:t,tiny:n,containerWidth:a}))},$n=(0,s.memo)(Xn);var es=n(44644);const ts=[],ns=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return{selectedDimensions:e.dimensions?Object.keys(e.dimensions):ts,selectedInstances:e.instances?Object.keys(e.instances):ts,selectedNodes:e.nodes?Object.keys(e.nodes):ts,weight:e.weight}},ss=(0,s.forwardRef)(((e,t)=>{let{style:n,handleProps:a,attributes:o,listeners:r,dragging:i,dragOverlay:c,id:d,resizeHandle:h,onRemove:u,...p}=e;const m=(0,l.ID)(),g=(0,Vn.N)(),f=(0,Un.aA)(d),b=(0,Et.e)(),{sendLog:y,isReady:v}=(0,Ht.A)(),_=(0,es.CS)({type:"chart",entity:d}),w=(0,s.useMemo)((()=>{if(!g)return null;let e=g.getNode({id:c?"dragOverlay-".concat(d):d});return e||(e=b.makeChart({attributes:{contextScope:[d],id:c?"dragOverlay-".concat(d):d,roomId:m,expandable:!1,...f,toolboxProps:{drag:{...a,...r,...o}}},makeTrack:jn}),g.appendChild(e),e)}),[g,m,d,c]);(0,s.useLayoutEffect)((()=>{w&&v&&"function"===typeof y&&w.updateAttribute("logOptions",{sendLog:y,payload:{feature:"Overview"}})}),[w,y,v]);const{weight:k,selectedDimensions:T,selectedInstances:S,selectedNodes:M}=(0,Un.uy)(d,ns);return(0,ln.A)((()=>{w&&k&&(w.updateAttributes({selectedDimensions:T,selectedInstances:S,selectedNodes:M}),w.trigger("fetch"))}),[w,m,d,k,T.length,S.length,M.length]),(0,ln.A)((()=>{w&&_&&(w.updateAttributes(_),w.trigger("fetch"))}),[w,m,d,_]),(0,s.useLayoutEffect)((()=>{w&&n&&(w.updateAttributes({height:n.height,width:n.width}),w.trigger("resize"))}),[w,n.height,n.width]),(0,ln.A)((()=>{w&&w.updateAttributes({toolboxProps:{drag:{...a,...r,...o,dragging:i}}})}),[w,i]),w?s.createElement(P.Box,{key:g&&g.getId(),ref:t,style:n},s.createElement(Nt.A,(0,C.A)({chart:w,"data-chartid":c?"dragOverlay-".concat(d):d,"data-track":w.track("container"),height:n.height,width:"100%"},p)),h):null})),as=s.memo(ss,((e,t)=>e.id===t.id&&e.style===t.style)),os=function(){let{id:e,layout:t={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return(0,Fn.bP)(e,t,{left:0,top:0,width:12,height:9,id:e,chartId:e,...t,minWidth:12,maxWidth:12,minHeight:4,static:!1,dashboardable:!0})},rs=e=>{let{id:t,subMenuId:n,containerWidth:a,...o}=e;const r=(0,Un.aA)(t,os),[i,c]=(0,s.useState)((()=>[r])),l=(0,Bn.A)("(max-width: 767px)");return s.createElement(Qn.A,(0,C.A)({rootId:t,containerId:t,onDragEnd:(e,t)=>{(0,Fn.Q)(t),c(e)},items:i,containerWidth:a-(l?16:26)},o,{Item:(0,s.forwardRef)(((e,t)=>{let{id:a,...o}=e;return s.createElement(as,(0,C.A)({key:a,id:a,role:"graphics-object","aria-roledescription":"chart","data-submenuid":n,ref:t},o))})),rearrangeable:!1}))},is=(0,s.memo)(rs);var cs=n(74258),ls=n(37031);const ds=e=>{let{id:t,isActiveSticky:n}=e;return s.createElement(cs.t,{gap:0,id:t},s.createElement(ls.t,{id:t,margin:[1,0,0],hasTooltip:n}),!n&&s.createElement(Jn,{id:t}))},hs=(0,s.memo)(ds),us=e=>{let{id:t}=e;const n=(0,l.ID)(),a=(0,Et.e)(),o=(0,Un.yO)(),{subMenuChartIds:r}=(0,zn.Ee)(t),i=(0,Vn.N)(),c=(0,s.useMemo)((()=>{const[e]=r;let s=i.getChildren().find((e=>e.match({id:t})));if(s)return s;const c=r.map((e=>({value:e,label:o(e).context})));return s=a.makeChart({attributes:{id:"custom-k8s-map-".concat(t),contextScope:[e],roomId:n,composite:!0,chartLibrary:"groupBoxes",groupBy:["label"],groupByLabel:["k8s_namespace","k8s_pod_name"],aggregationMethod:"avg",eliminateZeroDimensions:!1,contextItems:c,toolboxElements:[]},makeTrack:jn}),i.appendChild(s),s}),[t,i]);return s.createElement(Nt.A,{key:i.getId(),margin:[0,0,2],chart:c,"data-chartid":t,"data-track":c.track("container")})},ps=e=>{let{id:t,...n}=e;return s.createElement(it.k,{id:t,padding:[6,0,0]},s.createElement(ct._,{id:t}),s.createElement(lt.G,{id:t,padding:[2,0,0]}),s.createElement(us,(0,C.A)({id:t},n)))},ms={k8s:(0,s.memo)(ps)},gs=e=>{if(ms[e.flavour])return ms[e.flavour];switch(e.level){case 0:return $n;case 1:return hs;case 2:return is;default:return null}},fs=e=>{let{onChartNameChange:t,initialChartName:n,dashboardOptions:a,linkToGo:o,contextToGo:r,containerWidth:i}=e;const[c,,,l]=(0,Rn.A)("addToDashboardModal"),{setActiveMenuGroupId:d,setActiveSubMenuId:h}=(0,ot.A)({onChartNameChange:t,initialChartName:n,linkToGo:o,contextToGo:r});return s.createElement(he.H,null,s.createElement(rt.A,{onActiveMenuGroupId:d,onActiveSubMenuId:h,getComponent:gs,dashboardOptions:a,initialChartName:n,containerWidth:i}),c&&s.createElement(Nn.A,{onClose:l}))},bs=(0,s.memo)((()=>{const[e,t]=(0,m.KB)(),n=(0,Et.e)(),a=!!e&&!!n&&n.getNode({id:e.context}),o=()=>{if(t(null),null===e||void 0===e||!e.context||!a)return;const n={...a.getAttribute("overlays")};delete n.alert,a.updateAttribute("overlays",n),history.replaceState({},"")};(0,s.useEffect)((()=>o),[]);const r=(0,l.ID)(),i=(0,Be.R7)((null===e||void 0===e?void 0:e.context)&&r,null===e||void 0===e?void 0:e.context);if(!e||null===i||void 0===i||!i.loaded)return null;const{firstEntry:c}=i;return s.createElement(P.Layer,{backdrop:!1,position:"top",margin:[26,0,0],padding:[26,0,0]},s.createElement(P.Flex,{background:"tooltip",padding:[1,2],gap:2},s.createElement(P.TextSmall,null,"Showing alert in ",s.createElement(P.TextSmall,{strong:!0},e.instance)," on"," ",s.createElement(P.TextSmall,{strong:!0},new Date(1e3*e.lastStatusChange).toLocaleString()),!c&&" doesn't exist on the node anymore",!!c&&c>e.lastStatusChange&&" exceeds agent data retention settings"),s.createElement(P.Flex,{flex:!1},s.createElement(P.Icon,{name:"x",color:"tooltipText",onClick:o,size:"small"}))))})),ys=bs,vs={NoChartsView:sn,NoNodesView:re.A,sidebar:{title:"Filters",hiddenTabs:["info"],Component:pe.Ay,props:{}},dashboardOptions:{},loadingMessage:"Loading charts...",Header:Dn},_s={overview:{...vs,sidebar:{...vs.sidebar,title:"Overview"},feature:"Overview"},k8s:{...vs,NoChartsView:cn,NoNodesView:cn,sidebar:{...vs.sidebar,title:"Kubernetes"},loadingMessage:"Loading k8s charts...",feature:"Kubernetes"},singleNode:{...vs,sidebar:{...vs.sidebar,title:"Single Node",hiddenTabs:["filters"],props:{showCollapsed:!0}},feature:"SingleNode"}},ws=()=>((0,Yt.A)(),null),ks=(0,s.memo)((e=>{let{flavour:t="overview",invalidationKey:n,nodeIds:a,loaded:r,getChart:i,chartIds:c,areChartsIdsEmpty:l,showNoChartsView:d,showNoNodesView:h,initializing:u,chartName:p,setChartName:m,chartsLoaded:g,widthRef:f,containerWidth:b,nodesKey:y,blurred:v,blurProps:_={},BlurredContent:w,noPreferredNodesError:k}=e;const{dashboardOptions:T,NoChartsView:S,NoNodesView:x,sidebar:P,loadingMessage:B,Header:q,feature:I="Overview"}=_s[t],{state:L}=(0,o.zy)();return h?s.createElement(ie.Ay,{feature:I,mode:"NoNodesView"},s.createElement(x,null)):u?s.createElement(M.A,{title:"Please wait while charts are being initialized.",body:"Thank you for your patience!"}):r?s.createElement(ie.Ay,{feature:I},s.createElement(he.A,{key:n,getObject:i,ids:c,linkToGo:null===L||void 0===L?void 0:L.chartName,extraKey:t,deps:[y]},s.createElement(ws,null),s.createElement(ys,null),s.createElement(le.A,{ref:f,sidebar:s.createElement(P.Component,(0,C.A)({nodeIds:a,title:P.title,initialChartName:p,hiddenTabs:P.hiddenTabs,flavour:t,loaded:g},P.props)),blurred:v,blurProps:_,BlurredContent:w},s.createElement(q,{flavour:t}),g||k?d||l?s.createElement(S,{noPreferredNodesError:k}):s.createElement(fs,{dashboardOptions:T,initialChartName:p,onChartNameChange:m,linkToGo:null===L||void 0===L?void 0:L.chartName,contextToGo:null===L||void 0===L?void 0:L.contextToGo,containerWidth:b}):s.createElement(M.A,{title:B})))):s.createElement(M.A,{title:B})})),Ts=ks;var Ss=n(87337),Ms=n(85686);const xs=e=>t=>{const n=(0,_.vt)(),a=(0,de.nl)(),o=(0,de.xY)(a,"isLive"),r=(0,de.xY)(a,"name"),i=(0,de.xY)(a,"isPreferred");(0,I.ZB)({title:r,id:a,destination:a});const c=(0,de.BQ)(a,n),l=(0,Ss.KF)();(0,s.useEffect)((()=>{if(!o)return;if("hasValue"!==c.state)return;const{contents:e}=c;null!==e&&void 0!==e&&e.uid&&r&&l(e.uid,null,r)}),[c,o,r]);const d=(0,s.useMemo)((()=>[a]),[a]);return s.createElement(e,(0,C.A)({flavour:"singleNode",nodeIds:d,blurred:!i,blurProps:{value:"12px"},BlurredContent:Ms.A},t))};var Cs=n(61658),Ps=n(52768);var Bs=n(28973),qs=n(13752),Is=n(92815),Ls=n(72582),As=n(60247),Es=n(64125),Ds=n(6504),Ns=n(22332),Rs=n(62709);const Fs=(0,j.default)(P.Icon).withConfig({displayName:"styled__StyledAlertDot",componentId:"sc-12w0ym-0"})(["position:absolute;top:0;right:-1px;width:6px;height:6px;"]);var Us=n(14125);const zs={events:{extraKey:"feedFilters",page:"events",alertNamesKey:"alert_names",period:{after:-n(79897).d_,before:0}},alerts:{extraKey:"alerts",page:"alerts",alertNamesKey:"alertNames"}},Os=function(){let{flavour:e="events"}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const t=(0,l.ID)(),n=(0,o.Zp)(),[a,i]=(0,r.Ay)(),[,c]=(0,m.kJ)(),[,d]=(0,m.DV)(),[,h]=(0,ue.N9)(),{extraKey:u,page:p,alertNamesKey:g,period:f}=(0,s.useMemo)((()=>zs[e]||zs.events),[e]),b=(0,s.useMemo)((()=>({defaultValue:[],key:t,extraKey:u,flavour:"arr",merge:!1})),[t,u]),[,y]=(0,ue.N9)("alertContexts",b),[,v]=(0,ue.N9)(g,b),[,_]=(0,ue.N9)("chart_names",b),w=(0,s.useCallback)((e=>{let{name:t}=e;c(1),d({alertName:t,nodeId:null}),n("/spaces/".concat(a,"/rooms/").concat(i,"/alerts"),{replace:!0})}),[n,a,i,c,d]),k=(0,s.useCallback)((e=>{let{context:t,name:s,instance:o}=e;f&&h(f),t&&y([t]),s&&v([s]),o&&_([o]),n("/spaces/".concat(a,"/rooms/").concat(i,"/").concat(p))}),[n,p,f,a,i,h,y,v,_]);return{goToAlerts:k,goToAlertConfigurations:w}};var Hs=n(90204);const Vs=(0,qt.A)((e=>{let{alertName:t,close:n,tooltip:a,...o}=e;const{setSelectedTab:r}=(0,Hs.A)(),{goToAlerts:i}=Os({flavour:"alerts"}),{sendLog:c}=(0,Ht.A)(),l=(0,s.useCallback)((e=>{e.stopPropagation(),c({feature:"MetricsChartManageAlerts",description:"Show alert",alertName:t}),r(0),n(),i({name:t})}),[r,i,n,c]);return s.createElement(P.IconButton,(0,C.A)({icon:"goToNode",iconColor:"textLite",onClick:l,neutral:!0,padding:[0],width:"16px",height:"16px"},o))})),Qs=(0,qt.A)((e=>{let{alertName:t,nodes:n,close:a,tooltip:o,...r}=e;const{goToEdit:i}=(0,Us.q)({alertName:t,nodes:n}),{sendLog:c}=(0,Ht.A)(),l=(0,s.useCallback)((e=>{e.stopPropagation(),c({feature:"MetricsChartManageAlerts",description:"Edit alert",alertName:t}),i(),a()}),[a,i,c]);return s.createElement(P.IconButton,(0,C.A)({icon:"pencilOutline",iconColor:"textLite",onClick:l,neutral:!0,padding:[0],width:"12px",height:"12px"},r))})),js=e=>{let{item:{value:t,label:n,alerts:a={},nodes:o,disabled:r,onClick:i,...c},index:l,style:d,close:h,onItemClick:u,...p}=e;const{cl:m=0,cr:g=0,wr:f=0}=a,b=!!f||!!g,y={text:g,flavour:g?"error":"disabledError"},v={text:f,flavour:f?"warning":"disabledWarning"},_={text:m,flavour:m?"clear":"disabledClear"};return s.createElement(P.MenuItemContainer,(0,C.A)({"data-index":l,disabled:r,onClick:e=>{i&&i(e)}},c,p,{style:d,alignItems:"center",justifyContent:"between",gap:2,padding:[1,2],overflow:"hidden",cursor:"default"}),s.createElement(P.Flex,{width:{max:"180px"},alignItems:"center"},s.createElement(Ut.A,{text:n,TextComponent:P.Text})),s.createElement(P.Flex,{alignItems:"center",gap:1},s.createElement(P.MasterCard,{pillLeft:y,pillRight:v,pillEnd:_}),s.createElement(Vs,{alertName:t,close:h,tooltip:b?"Show in active alerts":"",tooltipProps:{align:"bottom"},disabled:!b}),s.createElement(Qs,{alertName:t,nodes:o,close:h,tooltip:"Edit this alert configuration",tooltipProps:{align:"bottom"}})))};var Gs=n(26688),Ws=n(94390);const Ks=()=>{const e=(0,Gs.A)(),[,t]=(0,Ot.QH)();return(0,s.useCallback)((n=>{let{context:s}=n;const a={format_version:1,rules:[{config:{match:{on:s}}}]};t(e({template:Ws.Sd,formData:a}))}),[e,t])},Zs=(0,Ve.A)((0,qt.A)(P.Button)),Ys=e=>{let{contexts:t=[],close:n}=e;const a=(0,Ns.useChart)(),i=a.getId(),c=null===t||void 0===t?void 0:t[0],l=a.getAttribute("nodes"),{setSelectedTab:d}=(0,Hs.A)(),[h,u]=(0,r.Ay)(),{goToAlerts:p}=Os({flavour:"alerts"}),m=Ks(),g=(0,L.u7)({extraKey:Ws.q4}),f=(0,o.Zp)(),b=(0,s.useMemo)((()=>c?"View the active alerts for ".concat(c," context"):"View the active alerts for this context"),[c]),y=(0,s.useCallback)((()=>{c&&(d(0),n(),p({context:c}))}),[c,p,d,h,u,f,n]),v=(0,s.useCallback)((()=>{c&&(m({context:c}),f("/spaces/".concat(h,"/settings/configurations"),{replace:!0,state:{isAlert:!0}}),g(Object.values(l).map((e=>{let{nd:t}=e;return t}))),n())}),[m,g,c,l,f,h,n]);return s.createElement(P.Flex,{gap:2,padding:[2],border:{side:"top"},justifyContent:"center"},s.createElement(Zs,{feature:"MetricsChartManageAlerts",label:"Go to alerts",small:!0,flavour:"hollow",icon:"goToNode",onClick:y,tooltip:b,tooltipProps:{align:"bottom"},payload:{chartId:i},disabled:!c}),s.createElement(Zs,{feature:"MetricsChartManageAlerts",label:"Add alert",small:!0,icon:"plus",onClick:v,tooltip:"Start adding a new alert on ".concat(c," context"),tooltipProps:{align:"bottom"},payload:{chartId:i},disabled:!c}))},Js=()=>{const e=(0,Ns.useChart)(),{items:t,status:n}=(e=>{const[t,n]=(0,s.useState)([]),[a,o]=(0,s.useState)([]),[r,i]=(0,s.useState)();return(0,s.useEffect)((()=>{const t=()=>{const t=e.getAttribute("alerts"),s=Object.values(t||{}),a=Object.keys(e.getAttribute("nodes")||{});n(s),o(a);for(let e=0;e<=s.length;e++){const{wr:t=0,cr:n=0}=s[e]||{};if(n>0){i("error");break}if(t>0){i("warning");break}}};return e&&e.on("successFetch",t),()=>{e&&e.off("successFetch",t)}}),[e,n,i]),{items:(0,s.useMemo)((()=>[...t.map((e=>{let{nm:t,cl:n,wr:s,cr:o}=e;return{value:t,label:t,alerts:{cl:n,wr:s,cr:o},nodes:a}}))]),[t]),status:r}})(e),a=e.getAttribute("contextScope");return e?s.createElement(s.Fragment,null,s.createElement(P.Menu,{dropProps:{align:{top:"bottom",right:"right"},"data-toolbox":e.getId()},dropdownProps:{width:"360px"},Item:js,items:t,Footer:e=>{let{close:t}=e;return s.createElement(Ys,{contexts:a,close:t})}},s.createElement(P.Flex,{alignItems:"center",position:"relative"},n?s.createElement(Fs,{name:"dot",color:n}):null,s.createElement(Ds.Button,{icon:s.createElement(Ds.default,{svg:Rs.default,size:"16px"}),title:"Manage alerts","data-testid":"chartHeaderToolbox-manageAlerts"})))):null};var Xs=n(93155);var $s=n(98496);const ea={ErrAllNodesFailed:"You do not have permissions to request charts metadata.",default:"Something went wrong during the request of charts metadata."},ta=function(){let{error:e=""}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return s.createElement($s.A,{title:"Failed to request contexts"},s.createElement(P.TextBig,null,ea[e]||ea.default))},na={default:{shouldHide:()=>!1},k8s:{shouldHide:e=>{let{context:t}=e;return!t.includes("k8s")}}},sa=[],aa="ErrNoPreferredNodeRequested",oa=e=>(0,s.memo)((t=>{let{flavour:n="overview",nodeIds:a=sa,...r}=t;const i=(0,_.vt)(),c=(0,l.ID)();let d=(0,L.w7)({extraKey:n,merge:!1,scoped:!0});d=a.length?a:d;const h=(0,oe.nj)(),u=(0,Be._0)(c),{state:p}=(0,o.zy)(),g=(0,Be.R7)(c),f=(0,de.TG)(d),b=(0,Pe.A)(c,d,{spaceId:i}),{contexts:y,metadata:v,loaded:w,dimensionsCount:k}=(0,dn.Yy)({nodeIds:d,flavour:n},i,c),[,T]=(()=>{const{state:e={}}=(0,o.zy)(),{alertId:t=null}=e||{},[n,a]=(0,m.KB)(),r=t||(null===n||void 0===n?void 0:n.id),i=(0,m.JL)(r);(0,m.yk)(i.id),(0,s.useEffect)((()=>{(i.fullyLoaded||i.id)&&a({...i,formattedLastStatusChangeValue:(0,Ps.m3)(i.lastStatusChangeValue,i.units),linkToGo:"chart_".concat((0,Ze.A)(i.context))})}),[i.id,i.fullyLoaded]);const c=null!==n&&void 0!==n&&n.id?"chart_".concat((0,Ze.A)(n.context)):null;return[i.id,c]})(),S=a?a.join():n,M=(0,_.dg)(),[x,{width:P}]=(0,ce.A)();((e,t,n)=>{let{width:a,host:r}=n;if(!r)throw"No host provided";const i=(0,Et.e)(),c=(0,s.useRef)(),l=(0,Be.aZ)(e,"error"),[{correlation:d},h]=(0,ue.N9)(),{state:u,pathname:p}=(0,o.zy)(),m=(0,Vn.G)(),g=(0,Be.jI)(e),f=(0,Be.DQ)(e);(0,s.useLayoutEffect)((()=>{if(!e||!i||l)return;let n=i.getRoot().getChildren().find((e=>e.match({id:p})));if(!n)return n=i.makeContainer({attributes:{id:p,roomId:e,host:r,navigation:d?"highlight":"pan",overlays:{proceeded:{type:"proceeded"}},composite:!0,nodesScope:t,hasCorrelation:!0,versions:g,containerWidth:a,toolboxElements:[...Xs.Dm?[Js]:[],Ls.default,qs.default,Is.default,Es.A,As.A]}}),i.appendChild(n),m(p),c.current=n,(0,Bs.unregister)(i.on("correlation",((e,t)=>{t&&h({correlation:!0})})),n.onAttributeChange("versions",f));m(p)}),[e,l,p]),(0,s.useMemo)((()=>{c.current&&(c.current.setAttribute("containerWidth",a),c.current.getNodes().forEach((e=>e.updateAttribute("containerWidth",a))))}),[c.current,a]),(0,ln.A)((()=>{c.current&&c.current.getNodes().forEach((e=>{e.updateAttribute("nodesScope",t),e.getAttribute("active")&&"chart"===e.type&&e.fetch()}))}),[c.current,t]),(0,s.useEffect)((()=>{c.current&&c.current.getApplicableNodes({syncHighlight:!0}).forEach((e=>{e.updateAttribute("navigation",d?"highlight":"pan")}))}),[c.current,d]),(0,s.useEffect)((()=>()=>c.current&&c.current.destroy()),[]),(0,s.useEffect)((()=>{if(!c.current)return;if(!u||!u.context)return;const{context:e,chartId:t,nodeId:n}=u,s=c.current.getNode({id:e});s&&s.updateAttributes({selectedInstances:["".concat(t,"@").concat(n)],selectedNodes:[n]});const a=t&&n&&c.current.on("chartLoaded",(s=>{const o=s.getAttribute("id");e===o&&(s.updateAttributes({selectedInstances:["".concat(t,"@").concat(n)],selectedNodes:[n]}),a())}));return()=>u&&(0,Bs.unregister)(a)}),[c.current,u])})(c,d,{host:M?"".concat(window.envSettings.agentApiUrl,"/api/v2"):"".concat(window.envSettings.apiUrl,"/api/v3/spaces/").concat(i,"/rooms/").concat(c),width:P});const B=(0,Be.OL)(c),I=(0,Be.rx)(c),A=w?v:g,E=na[n]||na.default,D=(0,Be.bo)(c),N=(0,q.A)(D),R=(0,Hn.rq)(w?y:B,g,c,{...E,force:w||D!==N,cacheKey:"".concat(n,"-").concat(k,"-").concat(S)}),F=(0,Be.yO)(c,v),U=(0,q.A)(b,!0),z=R<1,[O,H]=(0,ue.N9)("chartName",{key:c,extraKey:S,flavour:"val"});(0,s.useEffect)((()=>{c&&H(T||(null===p||void 0===p?void 0:p.chartName)||O)}),[c,T]);const V=(0,Vn.N)(),Q=Object.keys(A).length<1&&u||d.length&&!f.length,j=(0,oe.CK)(),G=(null===I||void 0===I?void 0:I.errorMsgKey)==aa;if(!Q&&!1!==U&&b&&"No data for this period"!==b&&b!==aa&&!Object.keys(A).length)return s.createElement(ta,{error:b});const W=!!c&&h&&(!!V||G);return s.createElement(e,(0,C.A)({nodeIds:f,loaded:W,getChart:F,areChartsIdsEmpty:z,showNoNodesView:h&&!j.length,showNoChartsView:Q,initializing:b&&(0,Cs.O)(b),chartName:O,setChartName:H,chartsLoaded:u&&!!P,invalidationKey:c,flavour:n,chartIds:w?y:B,widthRef:x,containerWidth:P,nodesKey:S,noPreferredNodesError:G},r))}));var ra=n(45894);var ia=n(31604);const ca=(0,a.A)((()=>n.e(6760).then(n.bind(n,96760))),"Contents"),la=()=>{const[e,t]=(0,ia.A$)();return e?s.createElement(s.Suspense,{fallback:""},s.createElement(ca,{id:e,onClose:()=>t(null)})):null},da=e=>{let{title:t,icon:n,iconColor:a,...o}=e;return s.createElement(P.Flex,(0,C.A)({alignItems:"center",gap:2},o),n&&s.createElement(P.Icon,(0,C.A)({name:n},a?{color:a}:{})),s.createElement(P.TextBig,null,t))},ha=e=>{let{titleProps:t,children:n,...a}=e;const o={padding:[4],gap:4,background:"panelBg",...t?{column:!0}:{}};return s.createElement(P.Flex,(0,C.A)({},o,a),t?s.createElement(da,t):null,n)},ua=e=>function(){let{title:t="",headerInfo:n,containerProps:a={},TitleComponent:o=P.TextBigger,...r}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return s.createElement(P.Flex,(0,C.A)({column:!0,gap:2,alignItems:"center",justifyContent:"center"},a),t&&s.createElement(P.Flex,{gap:2,alignItems:"center"},s.createElement(o,null,t),n),s.createElement(e,r))},pa=ua((e=>{let{number:t=0,NumberComponent:n=P.TextHuge}=e;return s.createElement(n,{strong:!0},t)})),ma=e=>{const t=e.reduce(((e,t)=>{let{value:n}=t;return e+n}),0);return e.map((e=>({...e,width:"".concat(Math.floor(e.value/t*100),"%")})))},ga=function(){let{data:e=[],testId:t="",numberIndicatorProps:n={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return s.createElement(P.Flex,{width:"100%",column:!0,gap:2},s.createElement(P.Flex,{justifyContent:"around"},e.map((e=>{let{label:t,value:a}=e;return s.createElement(pa,(0,C.A)({key:"".concat(t,"-").concat(a),title:t,number:a},n))}))),s.createElement(P.ProgressBar,{background:"borderSecondary",border:"none",containerWidth:"100%","data-testid":t,height:2,value:ma(e.filter((e=>{let{omit:t}=e;return!t})))}))},fa=ua((e=>{let{total:t,count:n,live:a,offline:o,stale:r,unseen:i,...c}=e;const l=(0,s.useMemo)((()=>[...[{label:"Live",value:a,color:"success"},{label:"Offline",value:o,color:"offline"},{label:"Stale",value:r,color:"stale"}],...i?[{label:"Unseen",value:i,color:"unseen"}]:[]]),[a,o,r,i]);return s.createElement(P.Flex,{width:"100%",column:!0,alignItems:"center",gap:4},s.createElement(P.Flex,{gap:2,alignItems:"baseline"},s.createElement(P.TextHuge,(0,C.A)({strong:!0},c),n||"-"),"-"!=n&&t!=n&&s.createElement(P.Text,null,"of ",t)),s.createElement(ga,{data:l,numberIndicatorProps:{TitleComponent:P.TextBig,NumberComponent:P.TextBigger}}))})),ba=e=>{const t=(0,oe.CK)(),n=t.length,a=(0,L.eO)({keepAll:!1}),o=(0,s.useMemo)((()=>a.reduce(((e,t)=>{let{isLive:n,isOffline:s,isUnseen:a,state:o}=t;return{live:n?e.live+1:e.live,offline:s?e.offline+1:e.offline,stale:"stale"==o?e.stale+1:e.stale,unseen:a?e.unseen+1:e.unseen}}),{live:0,offline:0,stale:0,unseen:0})),[a]),r=(0,s.useMemo)((()=>{const e=(null===a||void 0===a?void 0:a.length)||"-";return{total:n,count:e,title:a&&n!=e?"Selected nodes":"Total nodes",...o}}),[t,a]);return s.createElement(fa,(0,C.A)({},r,e))};var ya=n(92677);const va=(0,j.default)(P.TextSmall).withConfig({displayName:"pieChartLabels__StyledText",componentId:"sc-3919ls-0"})(["display:flex;align-items:center;cursor:",";&:hover{opacity:",";}"],(e=>{let{onClick:t}=e;return t?"pointer":"auto"}),(e=>{let{onClick:t}=e;return t?"0.7":"1"})),_a=(0,s.memo)((e=>{let{title:t,value:n,color:a,onClick:o=null}=e;return s.createElement(P.Flex,{alignItems:"center",justifyContent:"between",width:"100%",gap:4,flex:{grow:1,shrink:0}},s.createElement(P.Flex,{gap:1},s.createElement(P.Flex,{width:"4px",height:"16px",background:a}),s.createElement(va,{whiteSpace:"nowrap",onClick:o},t,o&&s.createElement(P.Icon,{color:"textLite",margin:[0,0,0,1],name:"nav_arrow_goto",height:"10px",width:"10px"}))),s.createElement(P.Text,{strong:!0},null!==n&&void 0!==n?n:"-"))})),wa=e=>{let{data:t}=e;return s.createElement(P.Flex,{column:!0,alignItems:"start",justifyContent:"center",gap:5},t.map(((e,t)=>{let{title:n,value:a,color:o,onClick:r}=e;return s.createElement(_a,{key:"".concat(n,"-").concat(a,"-").concat(t),title:n,value:a,color:o,onClick:r})})))},ka=e=>{let{data:t,theme:n}=e;return t.map((e=>({...e,color:(0,P.getColor)(e.color)({theme:n})})))},Ta=e=>{let{chartData:t=[],lineWidth:n=15,animate:a=!0,label:o,hasData:r}=e;const i=(0,s.useContext)(j.ThemeContext);return s.createElement(P.Flex,{gap:4,margin:[0,4],height:{min:"70px",max:"130px"},alignItems:"center"},s.createElement(P.Flex,{height:"70px",flex:{grow:1,shrink:1}},r?s.createElement(ya.PieChart,{label:()=>o,labelStyle:{fontSize:"34px",fontWeight:700,lineHeight:"38px",fill:(0,P.getColor)("textDescription")({theme:i})},labelPosition:0,data:ka({data:t,theme:i}),lineWidth:n,animate:a}):s.createElement(P.Icon,{color:"border",name:"pie_chart_skeleton",width:"70px",height:"70px",alignSelf:"center"})),s.createElement(wa,{data:t}))};var Sa=n(23452);const Ma=ua((e=>s.createElement(P.Flex,{gap:2,justifyContent:"between",padding:[2,0,0,0]},s.createElement(Ta,e)))),xa=e=>{const t=(0,Sa.QD)({extraKey:"alerts"}),{critical:n,warning:a}=(t||[]).reduce(((e,t)=>{let{status:n}=t;return{...e,[n]:e[n]+1}}),{critical:0,warning:0}),o=(0,Ps.x7)(),r=(0,s.useMemo)((()=>({chartData:[{title:"Critical",value:n,color:"errorText",onClick:()=>o("critical")},{title:"Warning",value:a,color:"warningText",onClick:()=>o("warning")}],hasData:!!t,label:null===t||void 0===t?void 0:t.length})),[n,a,o]);return s.createElement(Ma,(0,C.A)({title:"Active alerts"},r,e))},Ca=()=>s.createElement(P.Flex,{column:!0,gap:4,basis:"205px"},s.createElement(ba,null),s.createElement(Xt.A,null),s.createElement(xa,null));var Pa=n(63950),Ba=n.n(Pa),qa=n(35119),Ia=n(80158);const La=e=>{let{label:t,color:n}=e;return s.createElement(P.Flex,{height:"32px",gap:1,alignItems:"center"},s.createElement(P.Flex,{width:"12px",height:"12px",background:n}),s.createElement(P.Text,null,(0,Ia.Zr)(t)))},Aa=e=>{let{colorBy:t}=e;const n=Object.entries(qa.Cc[null===t||void 0===t?void 0:t.value]||{}),a=qa.Ug[null===t||void 0===t?void 0:t.value]||{};return n.length?s.createElement(P.Flex,{gap:2},n.map((e=>{let[t,n]=e;return s.createElement(La,{key:t,label:a[t]||t,color:n})}))):null},Ea=e=>{let{groupBy:t,setGroupBy:n=Ba(),colorBy:a,setColorBy:o=Ba(),onAddNode:r}=e;return s.createElement(P.Flex,{alignItems:"end",justifyContent:"between"},s.createElement(P.Flex,{gap:3,alignItems:"end"},s.createElement(P.Flex,{column:!0,gap:1},s.createElement(P.Text,null,"Group by"),s.createElement(P.Select,{options:qa.XJ,value:t,onChange:n})),s.createElement(P.Flex,{column:!0,gap:1},s.createElement(P.Text,null,"Color by"),s.createElement(P.Select,{options:qa.Hn,value:a,onChange:o})),s.createElement(Aa,{colorBy:a})),s.createElement(fe.A,{flavour:"borderless",icon:"add_node",iconColor:"text",onClick:r}))},Da=(0,j.default)(P.Box).withConfig({displayName:"styled__PolygonContainer",componentId:"sc-9bx1cv-0"})(["width:22px;position:relative;aspect-ratio:1;background-color:",";cursor:",";opacity:",";clip-path:polygon( 93.56% 74.55%,50.52% 100%,6.96% 75.45%,6.44% 25.45%,49.48% 0%,93.04% 24.55%,93.56% 74.55% );"],(e=>{let{background:t}=e;return(0,P.getColor)(t||"offline")}),(e=>{let{onClick:t}=e;return t?"pointer":"default"}),(e=>{let{isDisabled:t}=e;return t?"0.5":"1"})),Na=(0,j.default)(P.Box).withConfig({displayName:"styled__PolygonInner",componentId:"sc-9bx1cv-1"})(["width:18px;position:absolute;top:2px;left:2px;aspect-ratio:1;background-color:",";opacity:",";clip-path:polygon( 93.56% 74.55%,50.52% 100%,6.96% 75.45%,6.44% 25.45%,49.48% 0%,93.04% 24.55%,93.56% 74.55% );"],(e=>{let{background:t}=e;return(0,P.getColor)(t||"offline")}),(e=>{let{isDisabled:t}=e;return t?"0.5":"1"})),Ra=(0,j.default)(P.Icon).withConfig({displayName:"styled__AddNodeIcon",componentId:"sc-9bx1cv-2"})(["position:absolute;left:50%;top:50%;transform:translate(-50%,-50%);"]),Fa=(0,j.default)(P.Box).withConfig({displayName:"styled__Groups",componentId:"sc-9bx1cv-3"})(["width:100%;display:grid;grid-template-columns:repeat(2,1fr);gap:16px;"]),Ua=(0,j.default)(P.Flex).withConfig({displayName:"styled__NodeContainer",componentId:"sc-9bx1cv-4"})(["opacity:",";"],(e=>{let{isActive:t}=e;return t?"1":".3"})),za=(0,qt.A)((e=>{let{children:t,innerProps:n={},containerProps:a={},...o}=e;return s.createElement(Ua,(0,C.A)({margin:[0,0,.5,0]},o),s.createElement(Da,a,s.createElement(Na,n,t)))})),Oa=e=>{let{type:t,onClick:n}=e;const a=(0,l.XA)(),o=(0,_.dg)(),r=(0,c.JT)("node:Create"),i=(0,s.useMemo)((()=>({background:"success",isDisabled:!r,...r?{onClick:()=>n({type:t})}:{}})),[r]),d=r?"Add node":o?"You are viewing your local node, connect to cloud and connect multiple nodes to view at once":"Only admin users can add nodes to ".concat(a.name);return s.createElement(s.Fragment,null,s.createElement(za,{containerProps:i,innerProps:{background:"panelBg"},tooltip:d,isActive:!0},s.createElement(Ra,{name:"plus",color:"success"})))},Ha=za;var Va=n(33931);const Qa=e=>{var t,n;let{architecture:a,cpuFrequency:o,cpus:r,diskSpace:i,memory:c,os:l,osName:d}=e;const h=[(0,nt.O)(o),(0,nt.ws)(r,1===r?"Core":"Cores"),(0,nt.ws)(a),(0,nt.ws)((0,nt.RI)(c),"RAM"),(0,nt.ws)((0,nt.RI)(i),"HD")];return s.createElement(P.Flex,{column:!0,gap:1},s.createElement(P.Flex,{gap:1},s.createElement(P.Icon,{"data-testid":"node-system-info-icon",name:(null===(t=Ye.u[d])||void 0===t?void 0:t.iconName)||(null===(n=Ye.U[l])||void 0===n?void 0:n.iconName)||"os",width:"16px",height:"16px",color:"text"}),s.createElement(P.Flex,null,(0,nt.ws)(d))),h.map(((e,t)=>s.createElement(P.Text,{key:t},"\u2022 ",e))))};var ja=n(89879);const Ga=e=>{let{isStable:t}=e;const n=t?{icon:"checkmark_s",color:"primary",borderColor:"primary"}:{icon:"warning_triangle",color:"stale",borderColor:"stale"};return s.createElement(P.Pill,(0,C.A)({hollow:!0},n),t?"Stable":"Unstable")},Wa=function(){let{id:e,name:t,state:n,hw:a,os:o,isStable:r,setActiveNodes:i}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const c=(0,ia.c8)(),{nodes:l}=c||{},d=null===l||void 0===l?void 0:l[e];return(0,s.useEffect)((()=>{if("function"==typeof i){const{children:t,parents:n}=d||{},s=d?[...[...n,...t].map((e=>{let{id:t}=e;return t}))||[],e]:[e];i({isHover:!0,nodes:s})}return()=>{"function"==typeof i&&i(qa.vd)}}),[e,d]),s.createElement(P.Flex,{column:!0,gap:3,width:{min:40},background:"mainBackground",padding:[2,4]},s.createElement(P.TextBig,{strong:!0},t),s.createElement(P.Flex,{column:!0,gap:1},s.createElement(P.Text,null,"Status"),s.createElement(P.Flex,null,s.createElement(Va.A,{state:n}))),s.createElement(P.Flex,{column:!0,gap:1},s.createElement(P.Text,null,"Connection"),s.createElement(P.Flex,null,s.createElement(Ga,{isStable:r}))),s.createElement(P.Flex,{column:!0,gap:1},s.createElement(P.Text,null,"Alerts"),s.createElement(D,{id:e})),s.createElement(Xt.A,{color:"textLite"}),s.createElement(Qa,{architecture:a.architecture,cpuFrequency:a.cpuFrequency,cpus:a.cpus,"data-testid":"nodeRow-system-info",diskSpace:a.diskSpace,memory:a.memory,name:t,osName:o.nm,os:o.id}),s.createElement(Xt.A,{color:"textLite"}),s.createElement(ja.A,{id:e}))};n(8159),n(37550);const Ka=function(){var e;let{colorBy:t={},node:n={},isStable:a}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{value:o}=t,r=qa.AZ[o],i=(0,f.Xt)(n.id),{critical:c,warning:l}=(0,m.AO)(i),d=qa.Cc[o];let h=d[null===r||void 0===r||null===(e=r(n))||void 0===e||null===(e=e.group)||void 0===e?void 0:e.toLowerCase()];if(o==qa.w9.stability.value){h=d[a?"stable":"unstable"]}return{borderColor:(0,s.useMemo)((()=>(e=>{let{critical:t,warning:n}=e;return t?"error":n?"warning":null})({critical:c,warning:l})||h),[h,c,l]),background:h}},Za=e=>{let{colorBy:t,stability:n={},setActiveNodes:a,...o}=e;const{isStable:r}=n[o.id]||{},{borderColor:i,background:c}=Ka({colorBy:t,node:o,isStable:r}),[,l]=(0,ia.A$)(),d=(0,s.useCallback)((()=>l(o.id)),[o.id,l]);return s.createElement(Ha,(0,C.A)({containerProps:{background:i,onClick:d},innerProps:{background:c},tooltip:s.createElement(Wa,(0,C.A)({isStable:r,setActiveNodes:a},o))},o))};var Ya=n(49389);const Ja=(0,s.memo)((e=>{let{nodes:t=[],type:n,stability:a={},groupBy:o,colorBy:r,onAddNode:i,...c}=e;const[l,d]=(0,s.useState)(qa.vd),{isHover:h,nodes:u}=l,p=null===o||void 0===o?void 0:o.canAddNodes;return s.createElement(P.Flex,(0,C.A)({flexWrap:!0},c),t.map((e=>s.createElement(Za,(0,C.A)({key:e.id,colorBy:r,stability:a,isActive:!h||u.includes(e.id),setActiveNodes:d},e)))),p?s.createElement(Oa,{type:n,onClick:i}):null)})),Xa=(e=>t=>{var n;let{title:a,...o}=t;return a?s.createElement(P.Flex,{column:!0,gap:2},s.createElement(P.Flex,{justifyContent:"between",border:{side:"bottom",color:"border"}},s.createElement(P.TextBig,{strong:!0},a),null!==(n=o.nodes)&&void 0!==n&&n.length?s.createElement(P.TextBig,null,"".concat(o.nodes.length," ").concat((0,Ya.su)(o.nodes.length))):null),s.createElement(e,o)):s.createElement(e,o)})(($a=Ja,e=>{const{stability:t}=(0,ia.SW)();return s.createElement($a,(0,C.A)({stability:t},e))}));var $a;const eo=e=>t=>{const n=qa.AZ[e],s=(0,qa.Hl)(e);return t.reduce(((e,t)=>{const{group:a="unknown",type:o}=n(t);return e[a]?e[a].nodes=[...e[a].nodes,t]:e[a]={title:s(a),type:o,nodes:[t]},e}),{})},to={os:eo("os"),version:eo("version"),status:eo("status"),technology:eo("technology"),replication:eo("replication"),cloudProvider:eo("cloudProvider"),cloudRegion:eo("cloudRegion"),instanceType:eo("instanceType"),none:e=>({none:{nodes:e}})},no=e=>{var t;let{value:n}=e;const s=(0,L.eO)({keepAll:!1});return null===(t=to[n])||void 0===t?void 0:t.call(to,s)},so=()=>s.createElement(P.Flex,null,s.createElement(P.TextBig,null,"No nodes found")),ao=e=>{let{groupBy:t,colorBy:n,onAddNode:a}=e;const o=no(t);if(!o)return s.createElement(so,null);const r=Object.values(o);if(!r.length)return s.createElement(so,null);if((null===t||void 0===t?void 0:t.value)==qa.nv.value){const{nodes:e}=r[0];return s.createElement(Xa,{nodes:e,groupBy:t,colorBy:n,onAddNode:a})}const i=1==r.length?P.Flex:Fa;return s.createElement(i,null,r.map((e=>{let{title:o,type:r,nodes:i}=e;return s.createElement(Xa,{key:o,title:o,type:r,nodes:i,groupBy:t,colorBy:n,onAddNode:a,margin:[0,0,2,0]})})))},oo=()=>{const[e,t]=(0,s.useState)(qa.nv),[n,a]=(0,s.useState)(qa.jZ),[o,r]=(0,s.useState)(),i=(0,s.useCallback)((e=>{r(e)}),[r]),c=(0,s.useCallback)((()=>r()),[r]);return s.createElement(P.Flex,{column:!0,gap:4,basis:"calc(100% - 410px)"},s.createElement(Ea,{groupBy:e,setGroupBy:t,colorBy:n,setColorBy:a,onAddNode:i}),s.createElement(P.Flex,{height:{max:150},overflow:{vertical:"auto"}},s.createElement(ao,{groupBy:e,colorBy:n,onAddNode:i})),o&&s.createElement(be.A,{nodeType:null===o||void 0===o?void 0:o.type,onClose:c}))},ro=e=>s.createElement(ha,(0,C.A)({width:"100%",alignItems:"center",justiFyContent:"center"},e)),io=()=>{const{loaded:e,totalChildren:t,totalParents:n,totalStandalone:a}=(0,ia.c8)(),o=(0,s.useMemo)((()=>[{label:"Parents",value:n,color:["blue","indigo"]},{label:"Children",value:t,color:["purple","lilac"]},{label:"Standalone",value:a,color:["yellow","yellow120"]}]),[t,n]);return e?s.createElement(ga,{numberIndicatorProps:{TitleComponent:P.Text},data:o}):s.createElement(ro,{height:"58px"})},co=()=>s.createElement(P.Flex,{column:!0,gap:1,alignItems:"center",width:"220px","data-testid":"nodesByDataReplication-information-text",overflow:"hidden"},s.createElement(P.TextMicro,null,s.createElement(P.TextMicro,{strong:!0},"None:")," The node's data are only on the node itself. Lose the node, lose the data!"),s.createElement(P.TextMicro,null,s.createElement(P.TextMicro,{strong:!0},"Single:")," The node's metrics are replicated to one parent. The parent can trigger alerts for the child."),s.createElement(P.TextMicro,null,s.createElement(P.TextMicro,{strong:!0},"Multi:")," Same as dual replication, but with the metrics replicated to more than one parent. Each parent in the hierarchy can trigger alerts for its children."),s.createElement(P.TextMicro,{margin:[2,0]},"Aim to have most of your nodes with at least a dual replication and your critical ones with a multiple one.")),lo=()=>s.createElement(K.A,{isBasic:!0,plain:!0,content:co,align:"bottom"},s.createElement(P.Icon,{name:"information",color:"textDescription",size:"small"})),ho=ua((()=>{const e=(0,ia.c8)(),{loaded:t,replicationFactor:n}=e||{},a=(0,s.useMemo)((()=>[{label:"None",value:(null===n||void 0===n?void 0:n.r_1)||0,color:qa.q5.r_1},{label:"Single",value:(null===n||void 0===n?void 0:n.r_2)||0,color:qa.q5.r_2},{label:"Multi",value:(null===n||void 0===n?void 0:n["r_*"])||0,color:qa.q5["r_*"]}]),[n]);return t?s.createElement(ga,{data:a}):s.createElement(ro,{height:"74px"})})),uo=()=>s.createElement(ho,{title:"Data Replication",headerInfo:s.createElement(lo,null)}),po=()=>s.createElement(P.Flex,{column:!0,gap:4,basis:"205px"},s.createElement(io,null),s.createElement(Xt.A,null),s.createElement(uo,null)),mo=()=>s.createElement(ha,null,s.createElement(Ca,null),s.createElement(Xt.A,{vertical:!0}),s.createElement(oo,null),s.createElement(Xt.A,{vertical:!0}),s.createElement(po,null));var go=n(66118),fo=n(44731);go.t1.register(go.PP,go.kc,go.A6,go.E8,go.m_,go.s$);const bo={borderWidth:0,maxBarThickness:15,minBarLength:0},yo=function(){let{alertsStats:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const t=(0,s.useContext)(j.ThemeContext),{hosts:n}=(null===e||void 0===e?void 0:e.nodes)||{},a={animation:!1,maintainAspectRatio:!1,plugins:{legend:{display:!1}},responsive:!0,scales:{x:{stacked:!0,grid:{color:(0,P.getColor)("borderSecondary")({theme:t})},afterSetDimensions:e=>{e.maxHeight=50},ticks:{precision:0,callback:(e,t)=>{let n=o.labels[t];if(!n)return;let s=0;for(;(null===(a=n)||void 0===a?void 0:a.length)>20;){var a;n=(0,Ia.P3)(n,s),s+=1}return n}}},y:{stacked:!0,grid:{color:(0,P.getColor)("borderSecondary")({theme:t})},afterSetDimensions:e=>{e.maxWidth=50}}}},o=(0,s.useMemo)((()=>{if(!n)return{datasets:[],labels:[]};const[e,t]=Object.entries(n).sort(((e,t)=>t[1].total-e[1].total)).reduce(((e,t)=>{let[n,{critical:s,warning:a}]=t;return e[0].push(n),e[1].critical?e[1].critical=[...e[1].critical,s]:e[1].critical=[s],e[1].warning?e[1].warning=[...e[1].warning,a]:e[1].warning=[a],e}),[[],{}]);return{datasets:Object.entries(t).map((e=>{let[t,n]=e;return{...bo,backgroundColor:"critical"==t?"#DB162F":"#FF9700",data:n,label:t}})),labels:e}}),[n]);return o.datasets?s.createElement(P.Flex,{width:"100%",height:40},s.createElement(fo.yP,{data:o,options:a})):null},vo=e=>{let{alertsStats:t}=e;const{nodes:n}=t||{},{critical:a,warning:o,total:r}=n||{},i=(0,s.useMemo)((()=>[{label:"Warning",value:o,color:["yellow","yellow80"]},{label:"Critical",value:a,color:["red","red100"]},{label:"Total",value:r,color:["purple","lilacFocus"],omit:!0}]),[a,o,r]);return n?s.createElement(ga,{data:i}):null},_o=[{id:"name",accessorKey:"name",header:"Alert name",fullWidth:!0,cell:e=>{let{getValue:t,row:n}=e;const a=t(),{goToAlerts:o}=Os(),r=(0,s.useCallback)((e=>{var t;e.preventDefault(),o({name:a,instance:null===n||void 0===n||null===(t=n.original)||void 0===t?void 0:t.chart})}),[o]);return s.createElement(K.A,{content:"Go to events tab. Timeframe will be set to the last 24 hours.",isBasic:!0},s.createElement(P.Box,null,s.createElement(Jt.A,{onClick:r},a)))}},{id:"chart",name:"chart",fullWidth:!0,header:"Instance"},{id:"occurrences",name:"occurrences",header:"Occurrences"},{id:"duration",name:"duration",header:"Duration (seconds)"}],wo=function(){let{data:e=[]}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return e.length?s.createElement(P.Flex,{height:{max:50},overflow:{vertical:"auto"}},s.createElement(P.Table,{data:e,dataColumns:_o,sortBy:[{id:"occurrences",desc:!0}],enableResizing:!0})):null},ko={title:"Nodes with the most alerts in the last 24h"},To=e=>{const t=(0,ia.jg)();return t.loaded?s.createElement(ha,(0,C.A)({titleProps:ko},e),s.createElement(yo,{alertsStats:t}),s.createElement(vo,{alertsStats:t}),s.createElement(P.Flex,{column:!0,gap:4},s.createElement(P.TextBig,null,"Top alerts in the last 24 h"),s.createElement(wo,{data:null===t||void 0===t?void 0:t.alerts}))):s.createElement(ro,null)};var So=n(51913),Mo=n(1239);const xo={title:"Netdata Assistant",icon:"netdataAssistant",iconColor:"success"},Co=e=>{const[,t]=(0,So.MY)();return Mo.ei?null:s.createElement(ha,(0,C.A)({titleProps:xo},e),s.createElement(P.Text,null,"Need help? Ask the Netdata Assistant!"),s.createElement(P.Button,{flavour:"hollow",onClick:t},"Start conversation"))},Po=["","k","m","B","T","P","E"],Bo=e=>{if(!e)return 0;const t=Math.log10(Math.abs(e))/3|0;if(!t)return e;const n=Po[t];return(e/Math.pow(10,3*t)).toFixed(1)+n},qo={title:"Metrics collected",justifyContent:"center"},Io=e=>{const{metricsCollected:t=0}=(0,ia.P9)();return s.createElement(ha,(0,C.A)({titleProps:qo},e),s.createElement(P.Flex,{width:"100%",justifyContent:"center"},s.createElement(P.TextHuge,{strong:!0},Bo(t))))},Lo={title:"Charts visualized",justifyContent:"center"},Ao=e=>{const t=(0,l.ID)(),n=(0,Be.R7)(t),a=Object.keys(n||{}).length;return s.createElement(ha,(0,C.A)({titleProps:Lo},e),s.createElement(P.Flex,{width:"100%",justifyContent:"center"},s.createElement(P.TextHuge,{strong:!0},Bo(a))))},Eo={title:"Alerts configured",justifyContent:"center"},Do=e=>{const t=(0,m.oU)();return s.createElement(ha,(0,C.A)({titleProps:Eo},e),s.createElement(P.Flex,{width:"100%",justifyContent:"center"},s.createElement(P.TextHuge,{strong:!0},Bo((null===t||void 0===t?void 0:t.length)||0))))};go.t1.register(go.PP,go.kc,go.A6,go.E8,go.m_,go.s$);const No=function(){let{dataRetention:e=[]}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const t=(0,s.useContext)(j.ThemeContext),n={animation:!1,maintainAspectRatio:!1,plugins:{legend:{display:!1}},responsive:!0,scales:{x:{stacked:!0,grid:{color:(0,P.getColor)("borderSecondary")({theme:t})},afterSetDimensions:e=>{e.maxHeight=50},ticks:{precision:0,callback:(e,t)=>{let n=a.labels[t];if(!n)return;let s=0;for(;(null===(o=n)||void 0===o?void 0:o.length)>20;){var o;n=(0,Ia.P3)(n,s),s+=1}return n}}},y:{stacked:!0,grid:{color:(0,P.getColor)("borderSecondary")({theme:t})},afterSetDimensions:e=>{e.maxWidth=50}}}},a=(0,s.useMemo)((()=>{if(null===e||void 0===e||!e.length)return{datasets:[],labels:[]};const{values:t,labels:n}=e.reduce(((e,t)=>{let{label:n,value:s}=t;return e.labels.push(n),e.values.push(s),e}),{values:[],labels:[]});return{datasets:[{borderWidth:0,maxBarThickness:10,backgroundColor:"#0A81AB",minBarLength:0,data:t,label:"Nodes"}],labels:n}}),[e]);return a.datasets?s.createElement(P.Flex,{width:"100%",height:"100%"},s.createElement(fo.yP,{data:a,options:n})):null},Ro={title:"Data retention per Nodes"},Fo=e=>{const{dataRetention:t}=(0,ia.P9)();return s.createElement(ha,(0,C.A)({titleProps:Ro},e),s.createElement(No,{dataRetention:t}))},Uo=()=>{const e=(0,_.vt)(),t=(0,l.ID)(),n=(0,L.w7)({extraKey:"nodesView",merge:!1,scoped:!0});(0,Pe.A)(t,n,{spaceId:e})};var zo=n(60177);const Oo=()=>{const e=(0,_.vt)(),t=(0,l.ID)(),n=(0,ia.N3)();(0,p.A)((()=>({enabled:!(!e||!t),force:!0,fetch:()=>(0,h.hn)(e,t),onReceive:e=>{let{data:t}=e;return n({...zo.jf,loaded:!0,...t})},onFail:e=>n({...zo.jf,loaded:!0,error:e}),pollingOptions:{pollingInterval:588e3}})),[e,t])},Ho=()=>{const e=(0,_.vt)(),t=(0,l.ID)(),n=(0,ia.s6)();(0,p.A)((()=>({enabled:!(!e||!t),force:!0,fetch:()=>(0,h.uP)(e,t),onReceive:e=>{let{data:t}=e;return n({...zo.Ml,loaded:!0,...t})},onFail:e=>n({...zo.Ml,loaded:!0,error:e}),pollingOptions:{pollingInterval:588e3}})),[e,t])},Vo=()=>{const e=(0,_.vt)(),t=(0,l.ID)(),n=(0,L.w7)({extraKey:"home",merge:!1,scoped:!0}),s=(0,ia.kf)();(0,p.A)((()=>({enabled:!(!e||!t),force:!0,fetch:()=>(0,h.TM)(e,t,n),onReceive:e=>{let{data:t}=e;return s({...zo.ul,loaded:!0,...t})},onFail:e=>s({...zo.ul,loaded:!0,error:e}),pollingOptions:{pollingInterval:588e3}})),[e,t])},Qo=()=>{const e=(0,_.vt)(),t=(0,l.ID)(),n=(0,L.w7)({extraKey:"home",merge:!1,scoped:!0}),s=(0,ia.xs)();(0,p.A)((()=>({enabled:!(!e||!t),force:!0,fetch:()=>(0,h.jt)(e,t,n),onReceive:e=>{let{data:t}=e;return s({...zo.ve,loaded:!0,...t})},onFail:e=>s({...zo.ve,loaded:!0,error:e}),pollingOptions:{pollingInterval:588e3}})),[e,t])},jo=()=>(Uo(),Oo(),Ho(),Vo(),Qo(),null),Go=()=>{const e=(0,oe.CK)(),t=(0,oe.nj)();return t?t&&!e.length?s.createElement(ie.Ay,{feature:"Home",mode:"NoNodesView"},s.createElement(re.A,null)):s.createElement(ie.Ay,{feature:"Home"},s.createElement(jo,null),s.createElement(le.A,{height:"calc(100% - 32px)",sidebar:s.createElement(la,null),gap:4,padding:[4],overflow:"auto"},s.createElement(mo,null),s.createElement(P.Flex,{gap:4},s.createElement(To,{width:"60%"}),s.createElement(P.Flex,{column:!0,gap:4,height:"100%",width:"calc(40% - 16px)"},s.createElement(Co,{column:!0}),s.createElement(P.Flex,{gap:4},s.createElement(Io,{flex:{grow:"1",shrink:"0"}}),s.createElement(Ao,{flex:{grow:"1",shrink:"0"}}),s.createElement(Do,{flex:{grow:"1",shrink:"0"}})),s.createElement(Fo,{flex:"grow"}))))):s.createElement(M.A,{title:"Loading room stats..."})};var Wo=n(24266);const Ko=()=>{const e=(0,Et.e)(),t=(()=>{const e=(0,_.bq)(),t=(0,l.QW)();return"/spaces/".concat(e,"/rooms/").concat(t,"/overview")})(),n=(0,o.Zp)();(0,s.useEffect)((()=>{if(e)return(0,Bs.unregister)(e.on("goToLink",((e,s)=>{var a;let o=location.pathname;return e&&e.getParent().getAttribute("roomId")&&(o=e.getParent().getId(),o=/^\/spaces/.test(o)?o:t),n(o,s?{state:{chartName:s},replace:!0}:{state:{contextToGo:null===e||void 0===e||null===(a=e.getAttribute("contextScope"))||void 0===a?void 0:a[0]},replace:!0})})))}),[e,t])},Zo=()=>(Ko(),null);var Yo=n(53285),Jo=n(69418),Xo=n(29848),$o=n(17170);const er=(0,a.A)((()=>Promise.all([n.e(6121),n.e(6323),n.e(4958)]).then(n.bind(n,52577))),"ManageSpaceRoute"),tr=(0,a.A)((()=>n.e(7436).then(n.bind(n,57436))),"Trial"),nr=(0,a.A)((()=>n.e(9912).then(n.bind(n,9912))),"PreferredNodesModal"),sr=(0,a.A)((()=>Promise.resolve().then(n.bind(n,57419))),"JoinSpace"),ar=(0,a.A)((()=>n.e(7959).then(n.bind(n,7959))),"NoSpaceAccess"),or=(0,a.A)((()=>n.e(3968).then(n.bind(n,73968))),"AlertView"),rr=(0,a.A)((()=>n.e(3104).then(n.bind(n,3104))),"Anomalies"),ir=(0,a.A)((()=>n.e(8059).then(n.bind(n,88059))),"AlertsSmartboard"),cr=(0,a.A)((()=>n.e(195).then(n.bind(n,10195))),"DashboardsOverview"),lr=(0,a.A)((()=>Promise.all([n.e(6008),n.e(785)]).then(n.bind(n,40785))),"DashboardView"),dr=(0,a.A)((()=>Promise.all([n.e(6121),n.e(5709),n.e(7340)]).then(n.bind(n,97340))),"FunctionsView"),hr=(0,a.A)((()=>Promise.all([n.e(7208),n.e(6323),n.e(7304)]).then(n.bind(n,7304))),"Integrations"),ur=(0,a.A)((()=>Promise.all([n.e(6008),n.e(7332)]).then(n.bind(n,47332))),"DropInDashboard"),pr=(0,a.A)((()=>n.e(6944).then(n.bind(n,6944))),"Aws"),{demoSlug:mr,demoDefaultRoomViews:gr,defaultRoomView:fr,integrationsView:br}=x.A,yr=xs(oa(Ts)),vr=oa(Ts),_r=(0,s.memo)((()=>{const[e,t]=(0,r.Ay)(),n=mr===e?gr[t]||gr.default:V.Ay&&e===V.z0&&t===V.gB?V.kG:fr;return s.createElement(s.Suspense,{fallback:s.createElement(M.A,{title:"Loading..."})},s.createElement(s.Suspense,{fallback:""},s.createElement(ur,null)),s.createElement(o.BV,null,s.createElement(o.qh,{path:"nodes/:nodeId",element:s.createElement(yr,null)}),s.createElement(o.qh,{path:"nodes",element:s.createElement(Zt,null)}),s.createElement(o.qh,{path:"home",element:s.createElement(Go,null)}),s.createElement(o.qh,{path:"overview",element:s.createElement(vr,null)}),s.createElement(o.qh,{path:"alerts/:alertId",element:s.createElement(or,null)}),s.createElement(o.qh,{path:"alerts",element:s.createElement(ir,null)}),s.createElement(o.qh,{path:"alarms/:alertId",element:s.createElement(or,null)}),s.createElement(o.qh,{path:"alarms",element:s.createElement(o.C5,{replace:!0,to:"/spaces/".concat(e,"/rooms/").concat(t,"/alerts")})}),s.createElement(o.qh,{path:"functions",element:s.createElement(o.C5,{replace:!0,to:"/spaces/".concat(e,"/rooms/").concat(t,"/top")})}),s.createElement(o.qh,{path:"dashboard",element:s.createElement(lr,{customDashboardId:V.LA})}),s.createElement(o.qh,{path:"dashboards/:dashboardSlug",element:s.createElement(lr,null)}),s.createElement(o.qh,{path:"dashboards",element:s.createElement(cr,null)}),s.createElement(o.qh,{path:"kubernetes",element:s.createElement(vr,{flavour:"k8s"})}),s.createElement(o.qh,{path:"anomalies",element:s.createElement(rr,null)}),s.createElement(o.qh,{path:"top",element:s.createElement(dr,{key:"fn",flavour:"fn"})}),s.createElement(o.qh,{path:"logs",element:s.createElement(dr,{key:"logs",flavour:"logs"})}),s.createElement(o.qh,{path:"events",element:s.createElement(dr,{key:"feed",flavour:"feed"})}),s.createElement(o.qh,{path:"/",element:s.createElement(o.C5,{replace:!0,to:"/spaces/".concat(e,"/rooms/").concat(t,"/").concat(n)})})))})),wr=()=>{const e=(0,o.Zp)(),[t,n]=(0,r.Ay)(),[a,i]=(0,Jo.A)(),c=(0,_.vt)(),w=(0,l.ID)(),[k,T]=(0,s.useState)(!0),[S,x]=(0,Xo.pp)(),C=(0,$o.A)();return((e,t)=>{const n=(0,d.Zs)((n=>{let{snapshot:s,set:a}=n;return async()=>{await s.getPromise((0,l.LS)({id:t,key:"fullyLoaded"}))||(0,h.K8)(e,t).then((t=>{let{data:n}=t;return a((0,l.If)(n.id),{...n,spaceId:e,fullyLoaded:!0,loaded:!0})}))}}),[e,t]);(0,s.useEffect)((()=>{e&&t&&n()}),[e,t])})(c,w),(0,u.A)({spaceId:c,id:w,pollingInterval:63e3}),function(e,t){let{polling:n=!0}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const s=(0,d.Zs)((e=>{let{set:n}=e;return function(){let{data:e=[]}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};n((0,b.t)({id:t,key:"ids"}),e.map((e=>e.id))),n((0,b.t)({id:t,key:"loaded"}),!0),n((0,b.t)({id:t,key:"updatedAt"}),(new Date).toISOString()),n((0,b.t)({id:t,key:"error"}),null),e.forEach((e=>{n((0,m.SB)({id:e.id}),(t=>({...t,loaded:!0,...e})))}));const s=e.reduce(((e,t)=>{let{id:n,nodeId:s}=t;return e[s]=[...e[s]||[],n],e}),{});n(f.l3,s)}}),[e,t]),a=(0,d.Zs)((e=>{let{set:n}=e;return e=>{n((0,b.t)({id:t,key:"error"}),(0,y.A)(e))}}));(0,p.A)((()=>({enabled:!!e&&!!t,polling:n,fetch:()=>(0,g.l1)(e,t),onFail:a,onReceive:s,force:!0})),[e,t])}(c,w),(0,v.A)(c,w),function(e,t){let{types:n,entities:a,paths:o,names:r}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const i=(0,d.Zs)((s=>{let{set:i}=s;return async()=>{(0,ra.Uc)(e,t,{types:n,entities:a,paths:o,names:r}).then((n=>{let{data:s}=n;return i((0,es.Mg)({spaceId:e,roomId:t}),s)}))}}));(0,s.useEffect)((()=>{e&&t&&i()}),[e,t,n,a,o,r])}(c,w),(0,s.useEffect)((()=>{let s=!0,o=null;return o=setTimeout((()=>{s&&(a&&!C&&e("/spaces/".concat(t,"/rooms/").concat(n,"/").concat(br),{replace:!0}),T(!1))}),1e3),()=>{s=!1,o&&(clearTimeout(o),o=null)}}),[a,C]),(0,s.useEffect)((()=>{x(k||!w||i)}),[k,w,i]),C?s.createElement(s.Suspense,{fallback:""},s.createElement(pr,{subCallback:C})):S?s.createElement(M.A,{title:"Loading room..."}):s.createElement(_r,null)},kr=(0,Wo.Xc)((0,s.memo)((()=>{const{isIntegrationsPath:e}=(0,Jo.Q)(),t=(0,Xo.Hs)(),n=(0,c.JT)("space:Read"),a=(0,c.JT)("room:Read");return n?s.createElement(s.Fragment,null,!t&&!e&&s.createElement(ae,null),s.createElement(s.Suspense,{fallback:s.createElement(M.A,{title:"Loading your space..."})},s.createElement(o.BV,null,s.createElement(o.qh,{path:"spaces/:spaceSlug/join-space",element:s.createElement(sr,null)}),s.createElement(o.qh,{path:"spaces/:spaceSlug/no-rooms",element:s.createElement(ar,{reason:"noRooms"})}),s.createElement(o.qh,{path:"spaces/:spaceSlug/settings/*",element:s.createElement(Tr,null)}),s.createElement(o.qh,{path:"spaces/:spaceSlug/rooms/:roomSlug/".concat(br),element:s.createElement(hr,null)}),s.createElement(o.qh,{path:"spaces/:spaceSlug/rooms/:roomSlug/*",element:a?s.createElement(wr,null):s.createElement(ar,{reason:"noRoomPermission"})})))):s.createElement(ar,{reason:"noSpacePermission"})}))),Tr=()=>((0,r.KI)(),s.createElement(er,null)),Sr=()=>{const e=(0,_.vt)();return(0,w.A)(e),(0,k.A)(e),(e=>{const t=(0,d.Zs)((t=>{let{set:n}=t;return function(){let{data:{results:t}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};n((0,T.S)({id:e}),t)}})),n=(0,d.Zs)((t=>{let{set:n}=t;return t=>n((0,T.S)({id:e,key:"error"}),t)})),s=(0,S.sC)(e,"loaded"),a=(0,c.JT)("alert:ReadAll");(0,p.A)((()=>({enabled:a&&!!e&&s,fetch:()=>(0,g.C2)(e),onFail:n,onReceive:t,pollingOptions:{pollingInterval:15e4}})),[e,!!s])})(e),null},Mr=(0,Wo.Xc)((()=>{const e=(0,_.vt)();return(0,i.Ay)(),(0,s.useEffect)((()=>{const e=new URL(window.location.href).searchParams.get("join_callback");e&&(window.location.href=e)}),[]),e?s.createElement(s.Fragment,null,s.createElement(Sr,null),s.createElement(Yo.A,{permission:"space:Read"},(t=>t?s.createElement(s.Fragment,null,s.createElement(o.BV,null,s.createElement(o.qh,{path:"/*",element:s.createElement(Zo,null)})),s.createElement(s.Suspense,{fallback:s.createElement(M.A,{title:"Loading your space..."})},s.createElement(kr,null)),s.createElement(s.Suspense,{fallback:""},s.createElement(tr,null)),s.createElement(s.Suspense,{fallback:""},s.createElement(nr,null))):s.createElement(ar,{key:e,reason:"noSpacePermission"})))):null}))},90204:(e,t,n)=>{n.d(t,{A:()=>i});n(62953);var s=n(96540),a=n(47444),o=n(96935),r=n(64118);const i=()=>{const[e,t]=(0,a.L4)(o.J7),n=(0,r.ud)(),i=(0,s.useCallback)((s=>{s===e&&n(),t(s)}),[e,t]);return{selectedTab:e,setSelectedTab:t,handleTabChange:i}}},33195:(e,t,n)=>{n.d(t,{A:()=>u});n(41393),n(81454),n(62953);var s=n(96540),a=n(47767),o=n(83199),r=n(29217),i=n(97054),c=n(37171);const l={SCHEDULED:{icon:"scheduled",tooltip:"scheduled to be silenced"},SILENCED:{icon:"alarm_off",tooltip:"silenced"}},d=(h=o.Pill,e=>{let{tooltip:t,...n}=e;return s.createElement(r.A,{align:"bottom",content:t},s.createElement(o.Box,null,s.createElement(h,n)))});var h;const u=e=>{let{silencing:t,flavour:n="alert"}=e;const[o]=(0,i.Ay)(),r=(0,a.Zp)(),{state:h,rules:u=[]}=t||{},p=u.map((e=>{let{id:t}=e;return t})),m=(0,c.z)(),g=(0,s.useMemo)((()=>{var e;return null===(e=l[h])||void 0===e?void 0:e.icon}),[h]),f=(0,s.useCallback)((()=>{m(p),r("/spaces/".concat(o,"/settings/notifications#notificationsActiveTab=1"))}),[r,m,p]),b=(0,s.useMemo)((()=>{var e,t,n;return{alert:{tooltip:"This alert is ".concat(null===(e=l[h])||void 0===e?void 0:e.tooltip)},node:{tooltip:"This node is ".concat(null===(t=l[h])||void 0===t?void 0:t.tooltip)},room:{tooltip:"This room is ".concat(null===(n=l[h])||void 0===n?void 0:n.tooltip)}}}),[g]),y=(0,s.useMemo)((()=>({icon:g,...u.length?{onClick:f}:{},flavour:"neutral",children:h,...b[n]||{}})),[g,h]);return h&&"NONE"!=h?s.createElement(d,y):"-"}},12412:(e,t,n)=>{n.d(t,{G:()=>r});var s=n(58168),a=n(96540),o=n(92230);const r=(0,n(55309).HB)((e=>e.children?a.createElement(o.A,(0,s.A)({as:"p",role:"document"},e)):null),(e=>{let{info:t}=e;return{children:t}}))},10952:(e,t,n)=>{n.d(t,{_:()=>d});var s=n(58168),a=n(96540),o=n(83199),r=n(29217),i=n(55309),c=n(12412);const l=(0,a.forwardRef)(((e,t)=>{let{name:n,tiny:r,...i}=e;const c=r?o.Text:o.H3;return"string"===typeof n?a.createElement(c,(0,s.A)({strong:!0},i,{ref:t}),n):n||null})),d=(0,i.HB)((e=>{let{hasTooltip:t,hasInfo:n,...s}=e;return t&&n?a.createElement(r.A,{content:a.createElement(c.G,{id:s.id}),isBasic:!0},a.createElement(l,s)):a.createElement(l,s)}),(e=>{let{name:t,info:n}=e;return{name:t,hasInfo:!!n}}))},44741:(e,t,n)=>{n.d(t,{k:()=>r});var s=n(58168),a=n(96540),o=n(83199);const r=(0,a.forwardRef)(((e,t)=>{let{id:n,...r}=e;return a.createElement(o.Flex,(0,s.A)({column:!0,"data-menuid":n,ref:t},r))}))},6551:(e,t,n)=>{n.d(t,{X:()=>i});var s=n(58168),a=n(96540),o=n(59090),r=n(92230);const i=(0,o.A)((e=>e.children?a.createElement(r.A,(0,s.A)({as:"p",role:"document"},e)):null),(e=>{let{info:t}=e;return{children:t}}))},37031:(e,t,n)=>{n.d(t,{t:()=>d});var s=n(58168),a=n(96540),o=n(83199),r=n(29217),i=n(59090),c=n(6551);const l=(0,a.forwardRef)(((e,t)=>{let{name:n,...r}=e;return"string"===typeof n?a.createElement(o.Text,(0,s.A)({strong:!0,ref:t},r),n):n||null})),d=(0,i.A)((e=>{let{hasTooltip:t,hasInfo:n,...s}=e;return t&&n?a.createElement(r.A,{content:a.createElement(c.X,{id:s.id}),isBasic:!0},a.createElement(l,s)):a.createElement(l,s)}),(e=>{let{name:t,info:n}=e;return{name:t,hasInfo:!!n}}))},74258:(e,t,n)=>{n.d(t,{t:()=>c});var s=n(58168),a=n(96540),o=n(83199),r=n(59090);const i=function(){let{link:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return e},c=(0,a.forwardRef)(((e,t)=>{let{id:n,...c}=e;const l=(0,r.jR)(n,i);return a.createElement(o.Flex,(0,s.A)({column:!0,gap:2,id:l,"data-submenuid":n,ref:t},c))}))},4967:(e,t,n)=>{n.d(t,{A:()=>Qe,L:()=>Ve});n(14905),n(98992),n(8872),n(62953);var s=n(81395);const a="The amount of traffic transferred by the network interface.",o='The number of packets transferred by the network interface. Received multicast counter is commonly calculated at the device level (unlike received) and therefore may include packets which did not reach the host.',r="

    The number of errors encountered by the network interface.

    Inbound - bad packets received on this interface. It includes dropped packets due to invalid length, CRC, frame alignment, and other errors. Outbound - transmit problems. It includes frames transmission errors due to loss of carrier, FIFO underrun/underflow, heartbeat, late collisions, and other problems.

    ",i="

    The number of FIFO errors encountered by the network interface.

    Inbound - packets dropped because they did not fit into buffers provided by the host, e.g. packets larger than MTU or next buffer in the ring was not available for a scatter transfer. Outbound - frame transmission errors due to device FIFO underrun/underflow. This condition occurs when the device begins transmission of a frame but is unable to deliver the entire frame to the transmitter in time for transmission.

    ",c='

    The number of packets that have been dropped at the network interface level.

    Inbound - packets received but not processed, e.g. due to softnet backlog overflow, bad/unintended VLAN tags, unknown or unregistered protocols, IPv6 frames when the server is not configured for IPv6. Outbound - packets dropped on their way to transmission, e.g. due to lack of resources.

    ',l="The number of correctly transferred compressed packets by the network interface. These counters are only meaningful for interfaces which support packet compression (e.g. CSLIP, PPP).",d='

    The number of errors encountered by the network interface.

    Frames - aggregated counter for dropped packets due to invalid length, FIFO overflow, CRC, and frame alignment errors. Collisions - collisions during packet transmissions. Carrier - aggregated counter for frame transmission errors due to excessive collisions, loss of carrier, device FIFO underrun/underflow, Heartbeat/SQE Test errors, and late collisions.

    ',h='

    The interface\'s latest or current duplex that the network adapter negotiated with the device it is connected to.

    Unknown - the duplex mode can not be determined. Half duplex - the communication is one direction at a time. Full duplex - the interface is able to send and receive data simultaneously.

    ',u='

    The current operational state of the interface.

    Unknown - the state can not be determined. NotPresent - the interface has missing (typically, hardware) components. Down - the interface is unable to transfer data on L1, e.g. ethernet is not plugged or interface is administratively down. LowerLayerDown - the interface is down due to state of lower-layer interface(s). Testing - the interface is in testing mode, e.g. cable test. It can\u2019t be used for normal traffic until tests complete. Dormant - the interface is L1 up, but waiting for an external event, e.g. for a protocol to establish. Up - the interface is ready to pass packets and can be used.

    ',p="The current physical link state of the interface.",m='The interface\'s latest or current speed that the network adapter negotiated with the device it is connected to. This does not give the max supported speed of the NIC.',g='The interface\'s currently configured Maximum transmission unit (MTU) value. MTU is the size of the largest protocol data unit that can be communicated in a single network layer transaction.',f=' This chart is provided by the eBPF plugin.',b='Number of times a function that starts a process is called. Netdata gives a summary for this chart in Process, and when the integration is enabled, Netdata shows process per application.'+f,y='Number of times a function that starts a thread is called. Netdata gives a summary for this chart in Process, and when the integration is enabled, Netdata shows process per application.'+f,v='Number of times a function that responsible for closing tasks is called. Netdata gives a summary for this chart in Process, and when the integration is enabled, Netdata shows process per application.'+f,_='Number of times a function that responsible for releasing tasks is called. Netdata gives a summary for this chart in Process, and when the integration is enabled, Netdata shows process per application.'+f,w='Number of errors to create a new task. Netdata gives a summary for this chart in Process, and when the integration is enabled, Netdata shows process per application.'+f,k='Number of calls for internal functions on the Linux kernel responsible to open files. Netdata gives a summary for this chart in file access, and when the integration is enabled, Netdata shows virtual file system per application.'+f,T='Number of failed calls for internal functions on the Linux kernel responsible to open files. Netdata gives a summary for this chart in file access, and when the integration is enabled, Netdata shows virtual file system per application.'+f,S='Number of calls for internal functions on the Linux kernel responsible to close files. Netdata gives a summary for this chart in file access, and when the integration is enabled, Netdata shows virtual file system per application.'+f,M='Number of failed calls for internal functions on the Linux kernel responsible to close files. Netdata gives a summary for this chart in file access, and when the integration is enabled, Netdata shows virtual file system per application.'+f,x='Percentage of file accesses that were present in the directory cache. Netdata gives a summary for this chart in directory cache, and when the integration is enabled, Netdata shows directory cache per application.'+f,C='Number of times a file is accessed inside directory cache. Netdata gives a summary for this chart in directory cache, and when the integration is enabled, Netdata shows directory cache per application.'+f,P='Number of times a file is accessed in the file system, because it is not present inside the directory cache. Netdata gives a summary for this chart in directory cache, and when the integration is enabled, Netdata shows directory cache per application.'+f,B='Number of times a file was not found on the file system. Netdata gives a summary for this chart in directory cache, and when the integration is enabled, Netdata shows directory cache per application.'+f,q='Number of successful calls to VFS writer function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,I='Number of successful calls to VFS reader function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,L='Number of failed calls to VFS writer function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,A='Number of failed calls to VFS reader function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,E='Total of bytes successfully written using the VFS writer function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,D='Total of bytes successfully read using the VFS reader function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,N='Number of calls to VFS unlinker function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,R='Number of calls to VFS syncer function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,F='Number of failed calls to VFS syncer function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,U='Number of calls to VFS opener function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,z='Number of failed calls to VFS opener function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,O='Number of calls to VFS creator function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,H='Number of failed calls to VFS creator function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per application.'+f,V='Number of failed calls to swap reader function. Netdata gives a summary for this chart in System Overview, and when the integration is enabled, Netdata shows swap per application.'+f,Q='Number of failed calls to swap writer function. Netdata gives a summary for this chart in System Overview, and when the integration is enabled, Netdata shows swap per application.'+f,j='The ratio shows the percentage of data accessed directly in memory. Netdata gives a summary for this chart in Memory, and when the integration is enabled, Netdata shows page cache hit per application.'+f,G='Number of modified pages in Linux page cache. Netdata gives a summary for this chart in Memory, and when the integration is enabled, Netdata shows page cache hit per application.'+f,W='Number of access to data in Linux page cache. Netdata gives a summary for this chart in Memory, and when the integration is enabled, Netdata shows page cache hit per application.'+f,K='Number of access to data was not present in Linux page cache. Netdata gives a summary for this chart in Memory, and when the integration is enabled, Netdata shows page cache misses per application.'+f,Z='Number of calls to shmget. Netdata gives a summary for this chart in System Overview, and when the integration is enabled, Netdata shows shared memory metrics per application.'+f,Y='Number of calls to shmat. Netdata gives a summary for this chart in System Overview, and when the integration is enabled, Netdata shows shared memory metrics per application.'+f,J='Number of calls to shmctl. Netdata gives a summary for this chart in System Overview, and when the integration is enabled, Netdata shows shared memory metrics per application.'+f,X='Number of calls to shmdt. Netdata gives a summary for this chart in System Overview, and when the integration is enabled, Netdata shows shared memory metrics per application.'+f,$='Number of calls to IPV4 TCP function responsible for starting connections. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows outbound connections per application.'+f,ee='Number of calls to IPV6 TCP function responsible for starting connections. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows outbound connections per application.'+f,te='Total bytes sent with TCP or UDP internal functions. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows bandwidth per application.'+f,ne='Total bytes received with TCP or UDP internal functions. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows bandwidth per application.'+f,se='Number of calls to TCP functions responsible to send data. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows TCP calls per application.'+f,ae='Number of calls to TCP functions responsible to receive data. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows TCP calls per application.'+f,oe='Number of times a TCP packet was retransmitted. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows TCP calls per application.'+f,re='Number of calls to UDP functions responsible to send data. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows UDP calls per application.'+f,ie='Number of calls to UDP functions responsible to receive data. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows UDP calls per application.'+f,ce="Total CPU utilization within the configured or system-wide (if not set) limits. When the CPU utilization of a cgroup exceeds the limit for the configured period, the tasks belonging to its hierarchy will be throttled and are not allowed to run again until the next period.",le='Total CPU utilization within the system-wide CPU resources (all cores). The amount of time spent by tasks of the cgroup in user and kernel modes.',de="The percentage of runnable periods when tasks in a cgroup have been throttled. The tasks have not been allowed to run because they have exhausted all of the available time as specified by their CPU quota.",he="The total time duration for which tasks in a cgroup have been throttled. When an application has used its allotted CPU quota for a given period, it gets throttled until the next period.",ue="

    The weight of each group living in the same hierarchy, that translates into the amount of CPU it is expected to get. The percentage of CPU assigned to the cgroup is the value of shares divided by the sum of all shares in all cgroups in the same level.

    For example, tasks in two cgroups that have cpu.shares set to 100 will receive equal CPU time, but tasks in a cgroup that has cpu.shares set to 200 receive twice the CPU time of tasks in a cgroup where cpu.shares is set to 100.

    ",pe="Total CPU utilization per core within the system-wide CPU resources.",me='CPU Pressure Stall Information. Some indicates the share of time in which at least some tasks are stalled on CPU. The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.',ge="The amount of time some processes have been waiting for CPU time.",fe='CPU Pressure Stall Information. Full indicates the share of time in which all non-idle tasks are stalled on CPU resource simultaneously. The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.',be="The amount of time all non-idle processes have been stalled due to CPU congestion.",ye="RAM utilization within the configured or system-wide (if not set) limits. When the RAM utilization of a cgroup exceeds the limit, OOM killer will start killing the tasks belonging to the cgroup.",ve="RAM usage within the configured or system-wide (if not set) limits. When the RAM usage of a cgroup exceeds the limit, OOM killer will start killing the tasks belonging to the cgroup.",_e="The amount of used RAM and swap memory.",we='Memory usage statistics. The individual metrics are described in the memory.stat section for cgroup-v1 and cgroup-v2.',ke="The number of memory usage hits limits.",Te="Dirty is the amount of memory waiting to be written to disk. Writeback is how much memory is actively being written to disk.",Se="

    Memory accounting statistics.

    In - a page is accounted as either mapped anon page (RSS) or cache page (Page Cache) to the cgroup. Out - a page is unaccounted from the cgroup.

    ",Me='

    Memory page fault statistics.

    Pgfault - all page faults. Swap - major page faults.

    ',xe='Memory Pressure Stall Information. Some indicates the share of time in which at least some tasks are stalled on memory. In this state the CPU is still doing productive work. The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.',Ce="The amount of time some processes have been waiting due to memory congestion.",Pe='Memory Pressure Stall Information. Full indicates the share of time in which all non-idle tasks are stalled on memory resource simultaneously. In this state actual CPU cycles are going to waste, and a workload that spends extended time in this state is considered to be thrashing. This has severe impact on performance. The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.',Be="The amount of time all non-idle processes have been stalled due to memory congestion.",qe="The amount of data transferred to and from specific devices as seen by the CFQ scheduler. It is not updated when the CFQ scheduler is operating on a request queue.",Ie="The number of I/O operations performed on specific devices as seen by the CFQ scheduler.",Le="The number of requests queued for I/O operations.",Ae="The number of BIOS requests merged into requests for I/O operations.",Ee="The amount of data transferred to and from specific devices as seen by the throttling policy.",De="The number of processes currently in the cgroup.",Ne="The number of I/O operations performed on specific devices as seen by the throttling policy.",Re='I/O Pressure Stall Information. Some indicates the share of time in which at least some tasks are stalled on I/O. In this state the CPU is still doing productive work. The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.',Fe="The amount of time some processes have been waiting due to I/O congestion.",Ue='I/O Pressure Stall Information. Full line indicates the share of time in which all non-idle tasks are stalled on I/O resource simultaneously. In this state actual CPU cycles are going to waste, and a workload that spends extended time in this state is considered to be thrashing. This has severe impact on performance. The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.',ze="The amount of time all non-idle processes have been stalled due to I/O congestion.",Oe={"system.cpu":{aggregationMethod:"avg",info:()=>'Total CPU utilization (all cores). 100% here means there is no CPU idle time at all. You can get per core usage at the CPUs section and per application usage at the Applications Monitoring section.',valueRange:[0,100],en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"avg",urlOptions:[],chartLibrary:"gauge",valueRange:[0,100],title:"Average CPU per Node",colors:s.default[0],layout:{left:3.4,top:0,width:2.6,height:5}},{groupBy:["node"],aggregationMethod:"sum",chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top Nodes by CPU",layout:{left:3.4,top:5,width:2.6,height:5}}]},"system.load":{mainheads:[{aggregationMethod:"avg",chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,selectedDimensions:["load1"],title:"Average System Load (1 min)",colors:s.default[0],layout:{left:10.3,top:5,width:1.7,height:5}}],aggregationMethod:"avg",info:'Current system load, i.e. the number of processes using CPU or waiting for system resources (usually CPU and disk). The 3 metrics refer to 1, 5 and 15 minute averages. The system calculates this once every 5 seconds. For more information check this wikipedia article.',en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["load1"]},"system.cpu_some_pressure":{aggregationMethod:"avg",mainheads:[{aggregationMethod:"avg",selectedDimensions:["some 10"],chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,title:"Average CPU Pressure (10 sec, some)",colors:s.default[0],layout:{left:1.7,top:5,width:1.7,height:5}}],info:'CPU Pressure Stall Information. Some indicates the share of time in which at least some tasks are stalled on CPU. The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.',en:{instance:{one:"system",other:"systems"}}},"system.cpu_some_pressure_stall_time":{info:"The amount of time some processes have been waiting for CPU time.",en:{instance:{one:"system",other:"systems"}}},"system.memory_some_pressure":{aggregationMethod:"avg",info:'Memory Pressure Stall Information. Some indicates the share of time in which at least some tasks are stalled on memory. In this state the CPU is still doing productive work. The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.',en:{instance:{one:"system",other:"systems"}}},"system.memory_some_pressure_stall_time":{info:"The amount of time some processes have been waiting due to memory congestion.",en:{instance:{one:"system",other:"systems"}}},"system.memory_full_pressure":{aggregationMethod:"avg",mainheads:[{aggregationMethod:"avg",selectedDimensions:["full 10"],chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,title:"Average Memory Pressure (10 sec, full)",colors:s.default[0],layout:{left:8.6,top:5,width:1.7,height:5}}],info:'Memory Pressure Stall Information. Full indicates the share of time in which all non-idle tasks are stalled on memory resource simultaneously. In this state actual CPU cycles are going to waste, and a workload that spends extended time in this state is considered to be thrashing. This has severe impact on performance. The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.',en:{instance:{one:"system",other:"systems"}}},"system.memory_full_pressure_stall_time":{info:"The amount of time all non-idle processes have been stalled due to memory congestion.",en:{instance:{one:"system",other:"systems"}}},"system.io_some_pressure":{aggregationMethod:"avg",info:'I/O Pressure Stall Information. Some indicates the share of time in which at least some tasks are stalled on I/O. In this state the CPU is still doing productive work. The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.',en:{instance:{one:"system",other:"systems"}}},"system.io_some_pressure_stall_time":{info:"The amount of time some processes have been waiting due to I/O congestion.",en:{instance:{one:"system",other:"systems"}}},"system.io_full_pressure":{aggregationMethod:"avg",mainheads:[{aggregationMethod:"avg",selectedDimensions:["full 10"],chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,title:"Average Disk I/O Pressure (10 sec, full)",colors:s.default[0],layout:{left:0,top:5,width:1.7,height:5}}],info:'I/O Pressure Stall Information. Full line indicates the share of time in which all non-idle tasks are stalled on I/O resource simultaneously. In this state actual CPU cycles are going to waste, and a workload that spends extended time in this state is considered to be thrashing. This has severe impact on performance. The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.',en:{instance:{one:"system",other:"systems"}}},"system.io_full_pressure_stall_time":{info:"The amount of time all non-idle processes have been stalled due to I/O congestion.",en:{instance:{one:"system",other:"systems"}}},"system.io":{info:e=>{let{os:t}=e;var n='Total Disk I/O, for all physical disks. You can get detailed information about each disk at the Disks section and per application Disk usage at the Applications Monitoring section.';return"linux"===t?n+" Physical are all the disks that are listed in /sys/block, but do not exist in /sys/devices/virtual/block.":n},en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["in"],title:"Total Disk Reads",layout:{left:1.7,top:0,width:1.7,height:5}},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["out"],title:"Total Disk Writes",layout:{left:0,top:0,width:1.7,height:5}}]},"system.pgpgio":{info:"Memory paged from/to disk. This is usually the total disk I/O of the system.",en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["in"],title:"Total Pages read from Disk",hiddenWhen:"system.io",layout:{left:1.6,top:0,width:1.6,height:5}},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["out"],title:"Total Pages written to Disk",hiddenWhen:"system.io",layout:{left:1.6,top:0,width:1.6,height:5}}]},"system.swapio":{info:"

    System swap I/O.

    In - pages the system has swapped in from disk to RAM. Out - pages the system has swapped out from RAM to disk.",en:{instance:{one:"system",other:"systems"}}},"system.pgfaults":{info:'Total page faults. Major page faults indicates that the system is using its swap. You can find which applications use the swap at the Applications Monitoring section.',en:{instance:{one:"system",other:"systems"}}},"system.entropy":{aggregationMethod:"min",colors:s.default[5],info:'Entropy, is a pool of random numbers (/dev/random) that is mainly used in cryptography. If the pool of entropy gets empty, processes requiring random numbers may run a lot slower (it depends on the interface each program uses), waiting for the pool to be replenished. Ideally a system with high entropy demands should have a hardware device for that purpose (TPM is one such device). There are also several software-only options you may install, like haveged, although these are generally useful only in servers.',en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["entropy"]},"system.clock_sync_state":{info:'

    The system clock synchronization state as provided by the ntp_adjtime() system call. An unsynchronized clock may be the result of synchronization issues by the NTP daemon or a hardware clock fault. It can take several minutes (usually up to 17) before NTP daemon selects a server to synchronize with.

    State map: 0 - not synchronized, 1 - synchronized.

    ',en:{instance:{one:"system",other:"systems"}}},"system.clock_status":{info:'

    The kernel code can operate in various modes and with various features enabled or disabled, as selected by the ntp_adjtime() system call. The system clock status shows the value of the time_status variable in the kernel. The bits of the variable are used to control these functions and record error conditions as they exist.

    UNSYNC - set/cleared by the caller to indicate clock unsynchronized (e.g., when no peers are reachable). This flag is usually controlled by an application program, but the operating system may also set it. CLOCKERR - set/cleared by the external hardware clock driver to indicate hardware fault.

    Status map: 0 - bit unset, 1 - bit set.

    ',en:{instance:{one:"system",other:"systems"}}},"system.clock_sync_offset":{aggregationMethod:"avg",info:'A typical NTP client regularly polls one or more NTP servers. The client must compute its time offset and round-trip delay. Time offset is the difference in absolute time between the two clocks.',en:{instance:{one:"system",other:"systems"}}},"system.forks":{colors:s.default[6],info:"The number of new processes created.",en:{instance:{one:"system",other:"systems"}}},"system.intr":{colors:s.default[1],info:'Total number of CPU interrupts. Check system.interrupts that gives more detail about each interrupt and also the CPUs section where interrupts are analyzed per CPU core.',en:{instance:{one:"system",other:"systems"}}},"system.interrupts":{info:'CPU interrupts in detail. At the CPUs section, interrupts are analyzed per CPU core. The last column in /proc/interrupts provides an interrupt description or the device name that registered the handler for that interrupt.',en:{instance:{one:"system",other:"systems"}}},"system.hardirq_latency":{aggregationMethod:"avg",info:'Total time spent servicing hardware interrupts. Based on the eBPF hardirqs from BCC tools.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"system.softirqs":{info:'

    Total number of software interrupts in the system. At the CPUs section, softirqs are analyzed per CPU core.

    HI - high priority tasklets. TIMER - tasklets related to timer interrupts. NET_TX, NET_RX - used for network transmit and receive processing. BLOCK - handles block I/O completion events. IRQ_POLL - used by the IO subsystem to increase performance (a NAPI like approach for block devices). TASKLET - handles regular tasklets. SCHED - used by the scheduler to perform load-balancing and other scheduling tasks. HRTIMER - used for high-resolution timers. RCU - performs read-copy-update (RCU) processing.

    ',en:{instance:{one:"system",other:"systems"}}},"system.softnet_stat":{en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["processed"]},"system.softirq_latency":{aggregationMethod:"avg",info:'Total time spent servicing software interrupts. Based on the eBPF softirqs from BCC tools.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"system.processes":{info:"

    System processes.

    Running - running or ready to run (runnable). Blocked - currently blocked, waiting for I/O to complete.

    ",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["blocked"]},"system.processes_state":{info:"

    The number of processes in different states.

    Running - Process using the CPU at a particular moment. Sleeping (uninterruptible) - Process will wake when a waited-upon resource becomes available or after a time-out occurs during that wait. Mostly used by device drivers waiting for disk or network I/O. Sleeping (interruptible) - Process is waiting either for a particular time slot or for a particular event to occur. Zombie - Process that has completed its execution, released the system resources, but its entry is not removed from the process table. Usually occurs in child processes when the parent process still needs to read its child\u2019s exit status. A process that stays a zombie for a long time is generally an error and causes system PID space leak. Stopped - Process is suspended from proceeding further due to STOP or TSTP signals. In this state, a process will not do anything (not even terminate) until it receives a CONT signal.

    ",en:{instance:{one:"system",other:"systems"}}},"system.active_processes":{info:"The total number of processes in the system.",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["active"]},"system.ctxt":{info:'Context Switches, is the switching of the CPU from one process, task or thread to another. If there are many processes or threads willing to execute and very few CPU cores available to handle them, the system is making more context switching to balance the CPU resources among them. The whole process is computationally intensive. The more the context switches, the slower the system gets.',en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["switches"]},"system.idlejitter":{aggregationMethod:"max",info:"Idle jitter is calculated by netdata. A thread is spawned that requests to sleep for a few microseconds. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This number is useful in real-time environments, where CPU jitter can affect the quality of the service (like VoIP media gateways).",en:{instance:{one:"system",other:"systems"}},heatmapType:"default"},"system.net":{info:e=>{let{os:t}=e;var n="Total bandwidth of all physical network interfaces. This does not include lo, VPNs, network bridges, IFB devices, bond interfaces, etc. Only the bandwidth of physical network interfaces is aggregated.";return"linux"===t?n+" Physical are all the network interfaces that are listed in /proc/net/dev, but do not exist in /sys/devices/virtual/net.":n},en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["received"],title:"Total Network Inbound",layout:{left:8.6,top:0,width:1.7,height:5}},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["sent"],title:"Total Network Outbound",layout:{left:10.3,top:0,width:1.7,height:5}}]},"system.ip":{info:"Total IP traffic in the system.",en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["received"],title:"Total IP Traffic In",hiddenWhen:"system.net",layout:{left:7.2,top:0,width:1.6,height:5}},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["sent"],title:"Total IP Traffic Out",hiddenWhen:"system.net",layout:{left:8.8,top:0,width:1.6,height:5}}]},"system.ipv4":{info:"Total IPv4 Traffic.",en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["received"],title:"Total IPv4 Traffic In",hiddenWhen:["system.net","system.ip"],layout:{left:7.2,top:0,width:1.6,height:5}},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["sent"],title:"Total IPv4 Traffic Out",hiddenWhen:["system.net","system.ip"],layout:{left:8.8,top:0,width:1.6,height:5}}]},"system.ipv6":{info:"Total IPv6 Traffic.",en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["received"],title:"Total IPv6 Traffic In",hiddenWhen:["system.net","system.ip","system.ipv4"],layout:{left:7.2,top:0,width:1.6,height:5}},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["sent"],title:"Total IPv6 Traffic Out",hiddenWhen:["system.net","system.ip","system.ipv4"],layout:{left:8.8,top:0,width:1.6,height:5}}]},"system.ram":{info:"System Random Access Memory (i.e. physical memory) usage.",en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["percentage-of-instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",selectedDimensions:["used","buffers","active","wired"],colors:s.default[10],title:"Average Used RAM per Node",valueRange:[0,100],layout:{left:6,top:0,width:2.6,height:5}},{groupBy:["percentage-of-instance"],showPostAggregations:!0,postGroupBy:["node"],urlOptions:["percentage"],selectedDimensions:["used","buffers","active","wired"],valueRange:[0,100],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top Nodes by Used RAM",layout:{left:6,top:5,width:2.6,height:5}}]},"system.swap":{info:"System swap memory usage. Swap space is used when the amount of physical memory (RAM) is full. When the system needs more memory resources and the RAM is full, inactive pages in memory are moved to the swap space (usually a disk, a disk partition or a file).",en:{instance:{one:"system",other:"systems"}}},"system.swapcalls":{info:'Number of calls to functions used to manipulate swap data. Netdata shows swap metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"system.ipc_semaphores":{info:"Number of allocated System V IPC semaphores. The system-wide limit on the number of semaphores in all semaphore sets is specified in /proc/sys/kernel/sem file (2nd field).",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["semaphores"]},"system.ipc_semaphore_arrays":{info:"Number of used System V IPC semaphore arrays (sets). Semaphores support semaphore sets where each one is a counting semaphore. So when an application requests semaphores, the kernel releases them in sets. The system-wide limit on the maximum number of semaphore sets is specified in /proc/sys/kernel/sem file (4th field).",en:{instance:{one:"system",other:"systems"}}},"system.shared_memory_segments":{info:"Number of allocated System V IPC memory segments. The system-wide maximum number of shared memory segments that can be created is specified in /proc/sys/kernel/shmmni file.",en:{instance:{one:"system",other:"systems"}}},"system.shared_memory_bytes":{info:"Amount of memory currently used by System V IPC memory segments. The run-time limit on the maximum shared memory segment size that can be created is specified in /proc/sys/kernel/shmmax file.",en:{instance:{one:"system",other:"systems"}}},"system.shared_memory_calls":{info:'Number of calls to syscalls responsible to manipulate shared memories. Netdata shows shared memory metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"system.message_queue_messages":{info:"Number of messages that are currently present in System V IPC message queues.",en:{instance:{one:"system",other:"systems"}}},"system.message_queue_bytes":{info:"Amount of memory currently used by messages in System V IPC message queues.",en:{instance:{one:"system",other:"systems"}}},"system.uptime":{aggregationMethod:"min",info:"The amount of time the system has been running, including time spent in suspend.",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["uptime"]},"system.process_thread":{title:"Task creation",info:'Number of times a function that starts a process or thread is called. Netdata shows process metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"system.exit":{title:"Exit monitoring",info:'Number of times a function responsible to close a process or thread is called. Netdata shows process metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"system.task_error":{title:"Task error",info:'Number of times a function that starts a process or thread failed. Netdata shows process metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"system.process_status":{title:"Task status",info:'Difference between the number of calls to functions that close a task and release a task.'+f,en:{instance:{one:"system",other:"systems"}}},"cpu.cpu":{aggregationMethod:"avg",valueRange:[0,100],en:{instance:{one:"cpu core",other:"cpu cores"}},mainheads:[{groupBy:["instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average CPU Core Utilization",valueRange:[0,100],layout:{left:0,top:5,width:3,height:5}},{groupBy:["dimension"],aggregationMethod:"avg",chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top CPU Core Utilisation Stats",layout:{left:0,top:0,width:3,height:5}},{groupBy:["instance"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top CPU Cores by Utilisation",layout:{left:0,top:10,width:3,height:5}}]},"cpu.interrupts":{en:{instance:{one:"cpu core",other:"cpu cores"}},mainheads:[{groupBy:["instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average CPU Core Interrupts",layout:{left:3,top:5,width:3,height:5}},{groupBy:["dimension"],aggregationMethod:"avg",chartLibrary:"bars",title:"Top CPU Core Interrupt Types",layout:{left:3,top:0,width:3,height:5}},{groupBy:["instance"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top CPU Cores by Interrupts",layout:{left:3,top:10,width:3,height:5}}]},"cpu.softirqs":{en:{instance:{one:"cpu core",other:"cpu cores"}},mainheads:[{groupBy:["instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average CPU Core Softirqs",layout:{left:6,top:5,width:3,height:5}},{groupBy:["dimension"],aggregationMethod:"avg",chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top CPU Core Softirq Types",layout:{left:6,top:0,width:3,height:5}},{groupBy:["instance"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top CPU Cores by Softirqs",layout:{left:6,top:10,width:3,height:5}}]},"cpu.softnet_stat":{en:{instance:{one:"cpu core",other:"cpu cores"}},dimensionsOnNonDimensionGrouping:["processed"],mainheads:[{groupBy:["instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average CPU Core Softnets",layout:{left:9,top:5,width:3,height:5}},{groupBy:["dimension"],aggregationMethod:"avg",chartLibrary:"bars",title:"Top CPU Core Softnet Stats",layout:{left:9,top:0,width:3,height:5}},{groupBy:["instance"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top CPU Cores by Softnets",layout:{left:9,top:10,width:3,height:5}}]},"cpu.core_throttling":{en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Max CPU Core Throttling",layout:{left:0,top:5,width:3,height:5}}],info:"The number of adjustments made to the clock speed of the CPU based on it's core temperature."},"cpu.package_throttling":{info:"The number of adjustments made to the clock speed of the CPU based on it's package (chip) temperature.",en:{instance:{one:"system",other:"systems"}}},"cpufreq.cpufreq":{info:"The frequency measures the number of cycles your CPU executes per second.",en:{instance:{one:"cpu core",other:"cpu cores"}}},"cpuidle.cpuidle":{info:"The percentage of time spent in C-states.",en:{instance:{one:"cpu core",other:"cpu cores"}}},"cpuidle.cpu_cstate_residency_time":{aggregationMethod:"avg",en:{instance:{one:"cpu core",other:"cpu cores"}}},"mem.ksm":{info:"

    Memory pages merging statistics. A high ratio of Sharing to Shared indicates good sharing, but a high ratio of Unshared to Sharing indicates wasted effort.

    Shared - used shared pages. Unshared - memory no longer shared (pages are unique but repeatedly checked for merging). Sharing - memory currently shared (how many more sites are sharing the pages, i.e. how much saved). Volatile - volatile pages (changing too fast to be placed in a tree).

    ",en:{instance:{one:"system",other:"systems"}}},"mem.ksm_savings":{heads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Saved",selectedDimensions:["savings"],colors:s.default[0],layout:{left:0,top:5,width:3,height:5}}],info:"

    The amount of memory saved by KSM.

    Savings - saved memory. Offered - memory marked as mergeable.

    ",en:{instance:{one:"system",other:"systems"}}},"mem.ksm_ratios":{aggregationMethod:"avg",heads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Savings",colors:s.default[0],valueRange:[0,100],layout:{left:3,top:5,width:3,height:5}}],info:"The effectiveness of KSM. This is the percentage of the mergeable pages that are currently merged.",en:{instance:{one:"system",other:"systems"}}},"mem.zram_usage":{info:"ZRAM total RAM usage metrics. ZRAM uses some memory to store metadata about stored memory pages, thus introducing an overhead which is proportional to disk size. It excludes same-element-filled-pages since no memory is allocated for them.",en:{instance:{one:"system",other:"systems"}}},"mem.zram_savings":{info:"Displays original and compressed memory data sizes.",en:{instance:{one:"system",other:"systems"}}},"mem.zram_ratio":{heads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Compression Ratio",selectedDimensions:["ratio"],colors:s.default[0],valueRange:[0,100],layout:{left:6,top:5,width:3,height:5}}],info:"Compression ratio, calculated as 100 * original_size / compressed_size. More means better compression and more RAM savings.",en:{instance:{one:"system",other:"systems"}}},"mem.zram_efficiency":{aggregationMethod:"avg",heads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Efficiency",selectedDimensions:["percent"],colors:s.default[0],layout:{left:9,top:5,width:3,height:5}}],valueRange:[0,100],info:"Memory usage efficiency, calculated as 100 * compressed_size / total_mem_used.",en:{instance:{one:"system",other:"systems"}}},"mem.pgfaults":{family:"page faults",info:'

    A page fault is a type of interrupt, called trap, raised by computer hardware when a running program accesses a memory page that is mapped into the virtual address space, but not actually loaded into main memory.

    Minor - the page is loaded in memory at the time the fault is generated, but is not marked in the memory management unit as being loaded in memory. Major - generated when the system needs to load the memory page from disk or swap memory.

    ',en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["major"]},"mem.directmaps":{family:"overview"},"mem.committed":{family:"overview",colors:s.default[3],info:"Committed Memory, is the sum of all memory which has been allocated by processes.",en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Committed Memory",layout:{left:3,top:0,width:3,height:5},colors:s.default[2]},{groupBy:["node"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top Nodes by Committed Memory",layout:{left:3,top:5,width:3,height:5}}]},"mem.real":{colors:s.default[3],info:"Total amount of real (physical) memory used.",en:{instance:{one:"system",other:"systems"}}},"mem.oom_kill":{family:"OOM kills",info:'The number of processes killed by Out of Memory Killer. The kernel\'s OOM killer is summoned when the system runs short of free memory and is unable to proceed without killing one or more processes. It tries to pick the process whose demise will free the most memory while causing the least misery for users of the system. This counter also includes processes within containers that have exceeded the memory limit.',en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Out of Memory Kills",colors:s.default[1],layout:{left:9,top:5,width:3,height:5}}]},"mem.numa":{info:"

    NUMA balancing statistics.

    Local - pages successfully allocated on this node, by a process on this node. Foreign - pages initially intended for this node that were allocated to another node instead. Interleave - interleave policy pages successfully allocated to this node. Other - pages allocated on this node, by a process on another node. PteUpdates - base pages that were marked for NUMA hinting faults. HugePteUpdates - transparent huge pages that were marked for NUMA hinting faults. In Combination with pte_updates the total address space that was marked can be calculated. HintFaults - NUMA hinting faults that were trapped. HintFaultsLocal - hinting faults that were to local nodes. In combination with HintFaults, the percentage of local versus remote faults can be calculated. A high percentage of local hinting faults indicates that the workload is closer to being converged. PagesMigrated - pages were migrated because they were misplaced. As migration is a copying operation, it contributes the largest part of the overhead created by NUMA balancing.

    ",en:{instance:{one:"system",other:"systems"}}},"mem.numa_nodes":{en:{instance:{one:"system",other:"systems"}}},"mem.available":{family:"overview",mainheads:[{groupBy:["selected"],aggregationMethod:"sum",chartLibrary:"easypiechart",title:"Total Available Memory",layout:{left:0,top:0,width:3,height:5}},{groupBy:["node"],dimensionsSort:"valueAsc",chartLibrary:"bars",title:"Top Nodes by Least Available Memory",layout:{left:0,top:5,width:3,height:5}}],info:e=>{let{os:t}=e;return"freebsd"===t?"The amount of memory that can be used by user-space processes without causing swapping. Calculated as the sum of free, cached, and inactive memory.":"Available Memory is estimated by the kernel, as the amount of RAM that can be used by userspace processes, without causing swapping."},en:{instance:{one:"system",other:"systems"}}},"mem.writeback":{family:"writeback",info:"Dirty is the amount of memory waiting to be written to disk. Writeback is how much memory is actively being written to disk.",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["Dirty"]},"mem.kernel":{info:"

    The total amount of memory being used by the kernel.

    Slab - used by the kernel to cache data structures for its own use. KernelStack - allocated for each task done by the kernel. PageTables - dedicated to the lowest level of page tables (A page table is used to turn a virtual address into a physical memory address). VmallocUsed - being used as virtual address space. Percpu - allocated to the per-CPU allocator used to back per-CPU allocations (excludes the cost of metadata). When you create a per-CPU variable, each processor on the system gets its own copy of that variable.

    ",en:{instance:{one:"system",other:"systems"}}},"mem.slab":{info:'

    Slab memory statistics.

    Reclaimable - amount of memory which the kernel can reuse. Unreclaimable - can not be reused even when the kernel is lacking memory.

    ',en:{instance:{one:"system",other:"systems"}}},"mem.hugepages":{info:"Dedicated (or Direct) HugePages is memory reserved for applications configured to utilize huge pages. Hugepages are used memory, even if there are free hugepages available.",en:{instance:{one:"system",other:"systems"}}},"mem.transparent_hugepages":{info:"Transparent HugePages (THP) is backing virtual memory with huge pages, supporting automatic promotion and demotion of page sizes. It works for all applications for anonymous memory mappings and tmpfs/shmem.",en:{instance:{one:"system",other:"systems"}}},"mem.hwcorrupt":{info:'The amount of memory with physical corruption problems, identified by ECC and set aside by the kernel so it does not get used.',en:{instance:{one:"system",other:"systems"}}},"mem.edac_mc":{info:"The number of correctable (single-bit) ECC errors. These errors do not affect the normal operation of the system because they are still being corrected. Periodic correctable errors may indicate that one of the memory modules is slowly failing.",en:{instance:{one:"mem controller",other:"mem controllers"}}},"mem.edac_mc_errors":{info:"The number of correctable (single-bit) ECC errors. These errors do not affect the normal operation of the system because they are still being corrected. Periodic correctable errors may indicate that one of the memory modules is slowly failing.",en:{instance:{one:"mem controller",other:"mem controllers"}}},"mem.edac_mc_dimm":{info:"The number of uncorrectable (multi-bit) ECC errors. An uncorrectable error is a fatal issue that will typically lead to an OS crash.",en:{instance:{one:"mem module",other:"mem modules"}}},"mem.edac_mc_dimm_errors":{info:"The number of uncorrectable (multi-bit) ECC errors. An uncorrectable error is a fatal issue that will typically lead to an OS crash.",en:{instance:{one:"mem module",other:"mem modules"}}},"mem.pagetype_global":{info:"The amount of memory available in blocks of certain size.",en:{instance:{one:"system",other:"systems"}}},"mem.cachestat_ratio":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Page Cache Hit Ratio",colors:s.default[0],valueRange:[0,100],layout:{left:6,top:0,width:3,height:5}},{groupBy:["node"],chartLibrary:"bars",dimensionsSort:"valueAsc",title:"Top Nodes by Least Cache Hit Ratio",layout:{left:6,top:5,width:3,height:5}}],info:'The ratio shows the percentage of data accessed directly in memory. Netdata shows the ratio per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f,en:{instance:{one:"system",other:"systems"}}},"mem.cachestat_dirties":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Dirty Pages",colors:s.default[12],layout:{left:9,top:0,width:3,height:5}}],info:'Number of modified pages in Linux page cache. Netdata shows the dity page application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"mem.cachestat_hits":{aggregationMethod:"avg",info:'Number of access to data in Linux page cache. Netdata shows the hits per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"mem.cachestat_misses":{aggregationMethod:"avg",info:'Number of access to data that was not present in Linux page cache. Netdata shows the missed access per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"mem.sync":{info:'Number of calls to syscalls that sync filesystem metadata or cached. This chart has a relationship with File systems and Linux Page Cache.'+f,en:{instance:{one:"system",other:"systems"}}},"mem.file_sync":{info:'Number of calls to syscalls responsible to transfer modified Linux page cache to disk. This chart has a relationship with File systems and Linux Page Cache.'+f,en:{instance:{one:"system",other:"systems"}}},"mem.memory_map":{info:'Number of calls to syscall responsible to the in-core copy of a file that was mapped. This chart has a relationship with File systems and Linux Page Cache.'+f,en:{instance:{one:"system",other:"systems"}}},"mem.file_segment":{info:'Number of calls to syscall responsible to sync file segments. This chart has a relationship with File systems and Linux Page Cache.'+f,en:{instance:{one:"system",other:"systems"}}},"filesystem.dc_hit_ratio":{aggregationMethod:"avg",info:'Percentage of file accesses that were present in the directory cache. Netdata shows directory cache metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"filesystem.dc_reference":{info:'Counters of file accesses. Reference is when there is a file access and the file is not present in the directory cache. Miss is when there is file access and the file is not found in the filesystem. Slow is when there is a file access and the file is present in the filesystem but not in the directory cache. Netdata shows directory cache metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"md.health":{family:"health",info:"Number of failed devices per MD array. Netdata retrieves this data from the [n/m] field of the md status line. It means that ideally the array would have n devices however, currently, m devices are in use. failed disks is n-m.",en:{instance:{one:"MD array",other:"MD arrays"}}},"md.disks":{family:"health",info:"Number of devices in use and in the down state. Netdata retrieves this data from the [n/m] field of the md status line. It means that ideally the array would have n devices however, currently, m devices are in use. inuse is m, down is n-m.",en:{instance:{one:"MD array",other:"MD arrays"}}},"md.status":{family:"activity",info:"Completion progress of the ongoing operation.",en:{instance:{one:"MD array",other:"MD arrays"}}},"md.expected_time_until_operation_finish":{family:"activity",info:"Estimated time to complete the ongoing operation. The time is only an approximation since the operation speed will vary according to other I/O demands.",en:{instance:{one:"MD array",other:"MD arrays"}}},"md.operation_speed":{family:"activity",info:"Speed of the ongoing operation. The system-wide rebuild speed limits are specified in /proc/sys/dev/raid/{speed_limit_min,speed_limit_max} files. These options are good for tweaking rebuilt process and may increase overall system load, cpu and memory usage.",en:{instance:{one:"MD array",other:"MD arrays"}}},"md.mismatch_cnt":{family:"errors",info:'When performing check and repair, and possibly when performing resync, md will count the number of errors that are found. A count of mismatches is recorded in the sysfs file md/mismatch_cnt. This value is the number of sectors that were re-written, or (for check) would have been re-written. It may be larger than the number of actual errors by a factor of the number of sectors in a page. Mismatches can not be interpreted very reliably on RAID1 or RAID10, especially when the device is used for swap. On a truly clean RAID5 or RAID6 array, any mismatches should indicate a hardware problem at some level - software issues should never cause such a mismatch. For details, see md(4).',en:{instance:{one:"MD array",other:"MD arrays"}}},"md.flush":{family:"flushes",info:'Number of flush counts per MD array. Based on the eBPF mdflush from BCC tools.',en:{instance:{one:"MD array",other:"MD arrays"}}},"ip.inerrors":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"IP Inbound Errors",colors:s.default[0],layout:{left:0,top:0,width:3,height:5}}],info:"

    The number of errors encountered during the reception of IP packets.

    NoRoutes - packets that were dropped because there was no route to send them. Truncated - packets which is being discarded because the datagram frame didn't carry enough data. Checksum - packets that were dropped because they had wrong checksum.

    ",en:{instance:{one:"system",other:"systems"}}},"ip.mcast":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["received"],title:"Total Multicast Traffic Received",layout:{left:9,top:5,width:3,height:5},colors:s.default[2]},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["sent"],title:"Total Multicast Traffic Sent",layout:{left:6,top:5,width:3,height:5},colors:s.default[1]}],info:"Total multicast traffic in the system.",en:{instance:{one:"system",other:"systems"}}},"ip.mcastpkts":{info:"Total transferred multicast packets in the system.",en:{instance:{one:"system",other:"systems"}}},"ip.bcast":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["received"],title:"Total Broadcast Traffic Received",layout:{left:3,top:5,width:3,height:5},colors:s.default[2]},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["sent"],title:"Total Broadcast Traffic Sent",layout:{left:0,top:5,width:3,height:5},colors:s.default[1]}],info:"Total broadcast traffic in the system.",en:{instance:{one:"system",other:"systems"}}},"ip.bcastpkts":{info:"Total transferred broadcast packets in the system.",en:{instance:{one:"system",other:"systems"}}},"ip.ecnpkts":{info:"

    Total number of received IP packets with ECN bits set in the system.

    CEP - congestion encountered. NoECTP - non ECN-capable transport. ECTP0 and ECTP1 - ECN capable transport.

    ",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["NoECTP"]},"ip.inbound_conn":{info:'Number of calls to functions responsible for receiving connections.'+f,en:{instance:{one:"system",other:"systems"}}},"ip.tcp_outbound_conn":{info:'Number of calls to TCP functions responsible for starting connections. Netdata shows TCP outbound connections metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"ip.tcp_functions":{info:'Number of calls to TCP functions responsible for exchanging data. Netdata shows TCP outbound connections metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"ip.total_tcp_bandwidth":{heads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total TCP Bandwidth",colors:s.default[1],layout:{left:0,top:0,width:3,height:5}}],info:'Total bytes sent and received with TCP internal functions. Netdata shows TCP bandwidth metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"ip.tcp_error":{heads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"TCP Errors",colors:s.default[2],layout:{left:3,top:0,width:3,height:5}}],info:'Number of failed calls to TCP functions responsible for TCP bandwidth. Netdata shows TCP error per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"ip.tcp_retransmit":{heads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"TCP Retransmits",colors:s.default[3],layout:{left:6,top:0,width:3,height:5}}],info:'Number of times a TCP packet was retransmitted. Netdata shows TCP retransmit per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"ip.udp_functions":{info:'Number of calls to UDP functions responsible for exchanging data. Netdata shows TCP outbound connections metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"ip.total_udp_bandwidth":{heads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total UDP Bandwidth",colors:s.default[4],layout:{left:9,top:0,width:3,height:5}}],info:'Total bytes sent and received with UDP internal functions. Netdata shows UDP bandwidth metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"ip.udp_error":{info:'Number of failed calls to UDP functions responsible for UDP bandwidth. Netdata shows UDP error per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+'
    ',en:{instance:{one:"system",other:"systems"}}},"ip.tcpreorders":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"TCP Reorders",layout:{left:9,top:0,width:3,height:5}}],info:"

    TCP prevents out-of-order packets by either sequencing them in the correct order or by requesting the retransmission of out-of-order packets.

    Timestamp - detected re-ordering using the timestamp option. SACK - detected re-ordering using Selective Acknowledgment algorithm. FACK - detected re-ordering using Forward Acknowledgment algorithm. Reno - detected re-ordering using Fast Retransmit algorithm.

    ",en:{instance:{one:"system",other:"systems"}}},"ip.tcpofo":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"TCP Out-Of-Order Queue",layout:{left:6,top:0,width:3,height:5}}],info:"

    TCP maintains an out-of-order queue to keep the out-of-order packets in the TCP communication.

    InQueue - the TCP layer receives an out-of-order packet and has enough memory to queue it. Dropped - the TCP layer receives an out-of-order packet but does not have enough memory, so drops it. Merged - the received out-of-order packet has an overlay with the previous packet. The overlay part will be dropped. All these packets will also be counted into InQueue. Pruned - packets dropped from out-of-order queue because of socket buffer overrun.

    ",en:{instance:{one:"system",other:"systems"}}},"ip.tcpsyncookies":{info:'

    SYN cookies are used to mitigate SYN flood.

    Received - after sending a SYN cookie, it came back to us and passed the check. Sent - an application was not able to accept a connection fast enough, so the kernel could not store an entry in the queue for this connection. Instead of dropping it, it sent a SYN cookie to the client. Failed - the MSS decoded from the SYN cookie is invalid. When this counter is incremented, the received packet won\u2019t be treated as a SYN cookie.

    ',en:{instance:{one:"system",other:"systems"}}},"ip.tcpmemorypressures":{info:"The number of times a socket was put in memory pressure due to a non fatal memory allocation failure (the kernel attempts to work around this situation by reducing the send buffers, etc).",en:{instance:{one:"system",other:"systems"}}},"ip.tcpconnaborts":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"TCP Connection Aborts",layout:{left:3,top:0,width:3,height:5}}],info:"

    TCP connection aborts.

    BadData - happens while the connection is on FIN_WAIT1 and the kernel receives a packet with a sequence number beyond the last one for this connection - the kernel responds with RST (closes the connection). UserClosed - happens when the kernel receives data on an already closed connection and responds with RST. NoMemory - happens when there are too many orphaned sockets (not attached to an fd) and the kernel has to drop a connection - sometimes it will send an RST, sometimes it won't. Timeout - happens when a connection times out. Linger - happens when the kernel killed a socket that was already closed by the application and lingered around for long enough. Failed - happens when the kernel attempted to send an RST but failed because there was no memory available.

    ",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["timeouts"]},"ip.tcp_syn_queue":{info:"

    The SYN queue of the kernel tracks TCP handshakes until connections get fully established. It overflows when too many incoming TCP connection requests hang in the half-open state and the server is not configured to fall back to SYN cookies. Overflows are usually caused by SYN flood DoS attacks.

    Drops - number of connections dropped because the SYN queue was full and SYN cookies were disabled. Cookies - number of SYN cookies sent because the SYN queue was full.

    ",en:{instance:{one:"system",other:"systems"}}},"ip.tcp_accept_queue":{info:"

    The accept queue of the kernel holds the fully established TCP connections, waiting to be handled by the listening application.

    Overflows - the number of established connections that could not be handled because the receive queue of the listening application was full. Drops - number of incoming connections that could not be handled, including SYN floods, overflows, out of memory, security issues, no route to destination, reception of related ICMP messages, socket is broadcast or multicast.

    ",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["overflows"]},"ipv4.packets":{aggregationMethod:"sum",mainheads:[{groupBy:["selected"],aggregationMethod:"sum",chartLibrary:"easypiechart",title:"Total IPv4 Packets",colors:s.default[0],layout:{left:0,top:0,width:3,height:5}},{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"IPv4 Packets",layout:{left:0,top:5,width:3,height:5}}],info:'

    IPv4 packets statistics for this host.

    Received - packets received by the IP layer. This counter will be increased even if the packet is dropped later. Sent - packets sent via IP layer, for both single cast and multicast packets. This counter does not include any packets counted in Forwarded. Forwarded - input packets for which this host was not their final IP destination, as a result of which an attempt was made to find a route to forward them to that final destination. In hosts which do not act as IP Gateways, this counter will include only those packets which were Source-Routed and the Source-Route option processing was successful. Delivered - packets delivered to the upper layer protocols, e.g. TCP, UDP, ICMP, and so on.

    ',en:{instance:{one:"system",other:"systems"}}},"ipv4.fragsout":{info:'

    IPv4 fragmentation statistics for this system.

    OK - packets that have been successfully fragmented. Failed - packets that have been discarded because they needed to be fragmented but could not be, e.g. due to Don\'t Fragment (DF) flag was set. Created - fragments that have been generated as a result of fragmentation.

    ',en:{instance:{one:"system",other:"systems"}}},"ipv4.fragsin":{info:'

    IPv4 reassembly statistics for this system.

    OK - packets that have been successfully reassembled. Failed - failures detected by the IP reassembly algorithm. This is not necessarily a count of discarded IP fragments since some algorithms can lose track of the number of fragments by combining them as they are received. All - received IP fragments which needed to be reassembled.

    ',en:{instance:{one:"system",other:"systems"}}},"ipv4.errors":{aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total IPv4 Errors",colors:s.default[1],layout:{left:3,top:0,width:3,height:5}},{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"IPv4 Errors",layout:{left:3,top:5,width:3,height:5}}],info:"

    The number of discarded IPv4 packets.

    InDiscards, OutDiscards - inbound and outbound packets which were chosen to be discarded even though no errors had been detected to prevent their being deliverable to a higher-layer protocol. InHdrErrors - input packets that have been discarded due to errors in their IP headers, including bad checksums, version number mismatch, other format errors, time-to-live exceeded, errors discovered in processing their IP options, etc. OutNoRoutes - packets that have been discarded because no route could be found to transmit them to their destination. This includes any packets which a host cannot route because all of its default gateways are down. InAddrErrors - input packets that have been discarded due to invalid IP address or the destination IP address is not a local address and IP forwarding is not enabled. InUnknownProtos - input packets which were discarded because of an unknown or unsupported protocol.

    ",en:{instance:{one:"system",other:"systems"}}},"ipv4.icmp":{aggregationMethod:"sum",info:"

    The number of transferred IPv4 ICMP messages.

    Received, Sent - ICMP messages which the host received and attempted to send. Both these counters include errors.

    ",en:{instance:{one:"system",other:"systems"}}},"ipv4.icmp_errors":{info:"

    The number of IPv4 ICMP errors.

    InErrors - received ICMP messages but determined as having ICMP-specific errors, e.g. bad ICMP checksums, bad length, etc. OutErrors - ICMP messages which this host did not send due to problems discovered within ICMP such as a lack of buffers. This counter does not include errors discovered outside the ICMP layer such as the inability of IP to route the resultant datagram. InCsumErrors - received ICMP messages with bad checksum.

    ",en:{instance:{one:"system",other:"systems"}}},"ipv4.icmpmsg":{info:'The number of transferred IPv4 ICMP control messages.',en:{instance:{one:"system",other:"systems"}}},"ipv4.udppackets":{aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total IPv4 UDP Packets",colors:s.default[4],layout:{left:9,top:0,width:3,height:5}},{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"IPv4 UDP Packets",layout:{left:9,top:5,width:3,height:5}}],info:"The number of transferred UDP packets.",en:{instance:{one:"system",other:"systems"}}},"ipv4.udperrors":{info:"

    The number of errors encountered during transferring UDP packets.

    RcvbufErrors - receive buffer is full. SndbufErrors - send buffer is full, no kernel memory available, or the IP layer reported an error when trying to send the packet and no error queue has been setup. InErrors - that is an aggregated counter for all errors, excluding NoPorts. NoPorts - no application is listening at the destination port. InCsumErrors - a UDP checksum failure is detected. IgnoredMulti - ignored multicast packets.",en:{instance:{one:"system",other:"systems"}}},"ipv4.udplite":{info:"The number of transferred UDP-Lite packets.",en:{instance:{one:"system",other:"systems"}}},"ipv4.udplite_errors":{info:"

    The number of errors encountered during transferring UDP-Lite packets.

    RcvbufErrors - receive buffer is full. SndbufErrors - send buffer is full, no kernel memory available, or the IP layer reported an error when trying to send the packet and no error queue has been setup. InErrors - that is an aggregated counter for all errors, excluding NoPorts. NoPorts - no application is listening at the destination port. InCsumErrors - a UDP checksum failure is detected. IgnoredMulti - ignored multicast packets.",en:{instance:{one:"system",other:"systems"}}},"ipv4.tcppackets":{aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total IPv4 TCP Packets",colors:s.default[3],layout:{left:6,top:0,width:3,height:5}},{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"IPv4 TCP Packets",layout:{left:6,top:5,width:3,height:5}}],info:"

    The number of packets transferred by the TCP layer.

    Received - received packets, including those received in error, such as checksum error, invalid TCP header, and so on. Sent - sent packets, excluding the retransmitted packets. But it includes the SYN, ACK, and RST packets.

    ",en:{instance:{one:"system",other:"systems"}}},"ipv4.tcpsock":{info:"The number of TCP connections for which the current state is either ESTABLISHED or CLOSE-WAIT. This is a snapshot of the established connections at the time of measurement (i.e. a connection established and a connection disconnected within the same iteration will not affect this metric).",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["connections"]},"ipv4.tcpopens":{info:"

    TCP connection statistics.

    Active - number of outgoing TCP connections attempted by this host. Passive - number of incoming TCP connections accepted by this host.

    ",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["active"]},"ipv4.tcperrors":{info:"

    TCP errors.

    InErrs - TCP segments received in error (including header too small, checksum errors, sequence errors, bad packets - for both IPv4 and IPv6). InCsumErrors - TCP segments received with checksum errors (for both IPv4 and IPv6). RetransSegs - TCP segments retransmitted.

    ",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["all"]},"ipv4.tcphandshake":{info:"

    TCP handshake statistics.

    EstabResets - established connections resets (i.e. connections that made a direct transition from ESTABLISHED or CLOSE_WAIT to CLOSED). OutRsts - TCP segments sent, with the RST flag set (for both IPv4 and IPv6). AttemptFails - number of times TCP connections made a direct transition from either SYN_SENT or SYN_RECV to CLOSED, plus the number of times TCP connections made a direct transition from the SYN_RECV to LISTEN. SynRetrans - shows retries for new outbound TCP connections, which can indicate general connectivity issues or backlog on the remote host.

    ",en:{instance:{one:"system",other:"systems"}}},"ipv4.sockstat_sockets":{info:'The total number of used sockets for all address families in this system.',en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["used"]},"ipv4.sockstat_tcp_sockets":{info:'

    The number of TCP sockets in the system in certain states.

    Alloc - in any TCP state. Orphan - no longer attached to a socket descriptor in any user processes, but for which the kernel is still required to maintain state in order to complete the transport protocol. InUse - in any TCP state, excluding TIME-WAIT and CLOSED. TimeWait - in the TIME-WAIT state.

    ',en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["alloc"]},"ipv4.sockstat_tcp_mem":{info:"The amount of memory used by allocated TCP sockets.",en:{instance:{one:"system",other:"systems"}}},"ipv4.sockstat_udp_sockets":{info:"The number of used UDP sockets.",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["inuse"]},"ipv4.sockstat_udp_mem":{info:"The amount of memory used by allocated UDP sockets.",en:{instance:{one:"system",other:"systems"}}},"ipv4.sockstat_udplite_sockets":{info:"The number of used UDP-Lite sockets.",en:{instance:{one:"system",other:"systems"}}},"ipv4.sockstat_raw_sockets":{info:'The number of used raw sockets.',en:{instance:{one:"system",other:"systems"}}},"ipv4.sockstat_frag_sockets":{info:"The number of entries in hash tables that are used for packet reassembly.",en:{instance:{one:"system",other:"systems"}}},"ipv4.sockstat_frag_mem":{info:"The amount of memory used for packet reassembly.",en:{instance:{one:"system",other:"systems"}}},"ipv6.packets":{aggregationMethod:"sum",mainheads:[{groupBy:["selected"],aggregationMethod:"sum",chartLibrary:"easypiechart",title:"Total IPv6 Packets",colors:s.default[3],layout:{left:0,top:0,width:3,height:5}},{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"IPv6 Packets",layout:{left:0,top:5,width:3,height:5}}],info:'

    IPv6 packet statistics for this host.

    Received - packets received by the IP layer. This counter will be increased even if the packet is dropped later. Sent - packets sent via IP layer, for both single cast and multicast packets. This counter does not include any packets counted in Forwarded. Forwarded - input packets for which this host was not their final IP destination, as a result of which an attempt was made to find a route to forward them to that final destination. In hosts which do not act as IP Gateways, this counter will include only those packets which were Source-Routed and the Source-Route option processing was successful. Delivers - packets delivered to the upper layer protocols, e.g. TCP, UDP, ICMP, and so on.

    ',en:{instance:{one:"system",other:"systems"}}},"ipv6.fragsout":{info:'

    IPv6 fragmentation statistics for this system.

    OK - packets that have been successfully fragmented. Failed - packets that have been discarded because they needed to be fragmented but could not be, e.g. due to Don\'t Fragment (DF) flag was set. All - fragments that have been generated as a result of fragmentation.

    ',en:{instance:{one:"system",other:"systems"}}},"ipv6.fragsin":{info:'

    IPv6 reassembly statistics for this system.

    OK - packets that have been successfully reassembled. Failed - failures detected by the IP reassembly algorithm. This is not necessarily a count of discarded IP fragments since some algorithms can lose track of the number of fragments by combining them as they are received. Timeout - reassembly timeouts detected. All - received IP fragments which needed to be reassembled.

    ',en:{instance:{one:"system",other:"systems"}}},"ipv6.errors":{aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total IPv6 Errors",colors:s.default[3],layout:{left:3,top:0,width:3,height:5}},{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"IPv6 Errors",layout:{left:3,top:5,width:3,height:5}}],info:"

    The number of discarded IPv6 packets.

    InDiscards, OutDiscards - packets which were chosen to be discarded even though no errors had been detected to prevent their being deliverable to a higher-layer protocol. InHdrErrors - errors in IP headers, including bad checksums, version number mismatch, other format errors, time-to-live exceeded, etc. InAddrErrors - invalid IP address or the destination IP address is not a local address and IP forwarding is not enabled. InUnknownProtos - unknown or unsupported protocol. InTooBigErrors - the size exceeded the link MTU. InTruncatedPkts - packet frame did not carry enough data. InNoRoutes - no route could be found while forwarding. OutNoRoutes - no route could be found for packets generated by this host.

    ",en:{instance:{one:"system",other:"systems"}}},"ipv6.udppackets":{aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total IPv6 UDP Packets",colors:s.default[3],layout:{left:6,top:0,width:3,height:5}},{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"IPv6 UDP Packets",layout:{left:6,top:5,width:3,height:5}}],info:"The number of transferred UDP packets.",en:{instance:{one:"system",other:"systems"}}},"ipv6.udperrors":{aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total IPv6 UDP Errors",colors:s.default[3],layout:{left:9,top:0,width:3,height:5}},{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"IPv6 UDP Errors",layout:{left:9,top:5,width:3,height:5}}],info:"

    The number of errors encountered during transferring UDP packets.

    RcvbufErrors - receive buffer is full. SndbufErrors - send buffer is full, no kernel memory available, or the IP layer reported an error when trying to send the packet and no error queue has been setup. InErrors - that is an aggregated counter for all errors, excluding NoPorts. NoPorts - no application is listening at the destination port. InCsumErrors - a UDP checksum failure is detected. IgnoredMulti - ignored multicast packets.",en:{instance:{one:"system",other:"systems"}}},"ipv6.udplitepackets":{info:"The number of transferred UDP-Lite packets.",en:{instance:{one:"system",other:"systems"}}},"ipv6.udpliteerrors":{info:"

    The number of errors encountered during transferring UDP-Lite packets.

    RcvbufErrors - receive buffer is full. SndbufErrors - send buffer is full, no kernel memory available, or the IP layer reported an error when trying to send the packet and no error queue has been setup. InErrors - that is an aggregated counter for all errors, excluding NoPorts. NoPorts - no application is listening at the destination port. InCsumErrors - a UDP checksum failure is detected.

    ",en:{instance:{one:"system",other:"systems"}}},"ipv6.mcast":{info:"Total IPv6 multicast traffic.",en:{instance:{one:"system",other:"systems"}}},"ipv6.bcast":{info:"Total IPv6 broadcast traffic.",en:{instance:{one:"system",other:"systems"}}},"ipv6.mcastpkts":{info:"Total transferred IPv6 multicast packets.",en:{instance:{one:"system",other:"systems"}}},"ipv6.icmp":{aggregationMethod:"sum",info:"

    The number of transferred ICMPv6 messages.

    Received, Sent - ICMP messages which the host received and attempted to send. Both these counters include errors.

    ",en:{instance:{one:"system",other:"systems"}}},"ipv6.icmpredir":{info:"The number of transferred ICMPv6 Redirect messages. These messages inform a host to update its routing information (to send packets on an alternative route).",en:{instance:{one:"system",other:"systems"}}},"ipv6.icmpechos":{info:"The number of ICMPv6 Echo messages.",en:{instance:{one:"system",other:"systems"}}},"ipv6.icmperrors":{info:'

    The number of ICMPv6 errors and error messages.

    InErrors, OutErrors - bad ICMP messages (bad ICMP checksums, bad length, etc.). InCsumErrors - wrong checksum.

    ',en:{instance:{one:"system",other:"systems"}}},"ipv6.groupmemb":{info:"

    The number of transferred ICMPv6 Group Membership messages.

    Multicast routers send Group Membership Query messages to learn which groups have members on each of their attached physical networks. Host computers respond by sending a Group Membership Report for each multicast group joined by the host. A host computer can also send a Group Membership Report when it joins a new multicast group. Group Membership Reduction messages are sent when a host computer leaves a multicast group.

    ",en:{instance:{one:"system",other:"systems"}}},"ipv6.icmprouter":{info:'

    The number of transferred ICMPv6 Router Discovery messages.

    Router Solicitations message is sent from a computer host to any routers on the local area network to request that they advertise their presence on the network. Router Advertisement message is sent by a router on the local area network to announce its IP address as available for routing.

    ',en:{instance:{one:"system",other:"systems"}}},"ipv6.icmpneighbor":{info:'

    The number of transferred ICMPv6 Neighbour Discovery messages.

    Neighbor Solicitations are used by nodes to determine the link layer address of a neighbor, or to verify that a neighbor is still reachable via a cached link layer address. Neighbor Advertisements are used by nodes to respond to a Neighbor Solicitation message.

    ',en:{instance:{one:"system",other:"systems"}}},"ipv6.icmpmldv2":{info:'The number of transferred ICMPv6 Multicast Listener Discovery (MLD) messages.',en:{instance:{one:"system",other:"systems"}}},"ipv6.icmptypes":{info:'The number of transferred ICMPv6 messages of certain types.',en:{instance:{one:"system",other:"systems"}}},"ipv6.ect":{info:"

    Total number of received IPv6 packets with ECN bits set in the system.

    CEP - congestion encountered. NoECTP - non ECN-capable transport. ECTP0 and ECTP1 - ECN capable transport.

    ",en:{instance:{one:"system",other:"systems"}}},"ipv6.sockstat6_tcp_sockets":{info:'The number of TCP sockets in any state, excluding TIME-WAIT and CLOSED.',en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["inuse"]},"ipv6.sockstat6_udp_sockets":{info:"The number of used UDP sockets.",en:{instance:{one:"system",other:"systems"}}},"ipv6.sockstat6_udplite_sockets":{info:"The number of used UDP-Lite sockets.",en:{instance:{one:"system",other:"systems"}}},"ipv6.sockstat6_raw_sockets":{info:'The number of used raw sockets.',en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["inuse"]},"ipv6.sockstat6_frag_sockets":{info:"The number of entries in hash tables that are used for packet reassembly.",en:{instance:{one:"system",other:"systems"}}},"sctp.established":{aggregationMethod:"sum",mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"SCTP Associations States",colors:s.default[0],layout:{left:4,top:0,width:4,height:5}}],info:"The number of associations for which the current state is either ESTABLISHED, SHUTDOWN-RECEIVED or SHUTDOWN-PENDING.",en:{instance:{one:"system",other:"systems"}}},"sctp.transitions":{aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total SCTP Transitions",colors:s.default[1],layout:{left:0,top:0,width:2,height:5}}],info:"

    The number of times that associations have made a direct transition between states.

    Active - from COOKIE-ECHOED to ESTABLISHED. The upper layer initiated the association attempt. Passive - from CLOSED to ESTABLISHED. The remote endpoint initiated the association attempt. Aborted - from any state to CLOSED using the primitive ABORT. Ungraceful termination of the association. Shutdown - from SHUTDOWN-SENT or SHUTDOWN-ACK-SENT to CLOSED. Graceful termination of the association.

    ",en:{instance:{one:"system",other:"systems"}}},"sctp.packets":{aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total SCTP Packets",colors:s.default[2],layout:{left:2,top:0,width:2,height:5}}],info:"

    The number of transferred SCTP packets.

    Received - includes duplicate packets. Sent - includes retransmitted DATA chunks.

    ",en:{instance:{one:"system",other:"systems"}}},"sctp.packet_errors":{aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total SCTP Packet Errors",colors:s.default[3],layout:{left:8,top:0,width:2,height:5}}],info:"

    The number of errors encountered during receiving SCTP packets.

    Invalid - packets for which the receiver was unable to identify an appropriate association. Checksum - packets with an invalid checksum.

    ",en:{instance:{one:"system",other:"systems"}}},"sctp.fragmentation":{aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total SCTP Fragmentation",colors:s.default[4],layout:{left:10,top:0,width:2,height:5}}],info:"

    The number of fragmented and reassembled SCTP messages.

    Reassembled - reassembled user messages, after conversion into DATA chunks. Fragmented - user messages that have to be fragmented because of the MTU.

    ",en:{instance:{one:"system",other:"systems"}}},"sctp.chunks":{aggregationMethod:"sum",info:"The number of transferred control, ordered, and unordered DATA chunks. Retransmissions and duplicates are not included.",en:{instance:{one:"system",other:"systems"}}},"netfilter.conntrack_sockets":{info:"The number of entries in the conntrack table.",en:{instance:{one:"firewall",other:"firewalls"}},aggregationMethod:"sum"},"netfilter.conntrack_new":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Conntrack Connections Status",colors:s.default[0],layout:{left:0,top:0,width:3,height:5}}],info:"

    Packet tracking statistics. New (since v4.9) and Ignore (since v5.10) are hardcoded to zeros in the latest kernel.

    New - conntrack entries added which were not expected before. Ignore - packets seen which are already connected to a conntrack entry. Invalid - packets seen which can not be tracked.

    ",en:{instance:{one:"firewall",other:"firewalls"}},aggregationMethod:"sum"},"netfilter.conntrack_changes":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Conntrack Changes",colors:s.default[1],layout:{left:3,top:0,width:3,height:5}}],info:"

    The number of changes in conntrack tables.

    Inserted, Deleted - conntrack entries which were inserted or removed. Delete-list - conntrack entries which were put to dying list.

    ",en:{instance:{one:"firewall",other:"firewalls"}},aggregationMethod:"sum"},"netfilter.conntrack_expect":{info:'

    The number of events in the "expect" table. Connection tracking expectations are the mechanism used to "expect" RELATED connections to existing ones. An expectation is a connection that is expected to happen in a period of time.

    Created, Deleted - conntrack entries which were inserted or removed. New - conntrack entries added after an expectation for them was already present.

    ',en:{instance:{one:"firewall",other:"firewalls"}},aggregationMethod:"sum"},"netfilter.conntrack_search":{info:"

    Conntrack table lookup statistics.

    Searched - conntrack table lookups performed. Restarted - conntrack table lookups which had to be restarted due to hashtable resizes. Found - conntrack table lookups which were successful.

    ",en:{instance:{one:"firewall",other:"firewalls"}},aggregationMethod:"sum"},"netfilter.conntrack_errors":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Conntrack Errors",colors:s.default[2],layout:{left:6,top:0,width:3,height:5}}],info:"

    Conntrack errors.

    IcmpError - packets which could not be tracked due to error situation. InsertFailed - entries for which list insertion was attempted but failed (happens if the same entry is already present). Drop - packets dropped due to conntrack failure. Either new conntrack entry allocation failed, or protocol helper dropped the packet. EarlyDrop - dropped conntrack entries to make room for new ones, if maximum table size was reached.

    ",en:{instance:{one:"firewall",other:"firewalls"}},aggregationMethod:"sum"},"netfilter.synproxy_syn_received":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total TCP SYN Received",colors:s.default[3],layout:{left:9,top:0,width:3,height:5}}],info:"The number of initial TCP SYN packets received from clients.",en:{instance:{one:"firewall",other:"firewalls"}},aggregationMethod:"sum"},"netfilter.synproxy_conn_reopened":{info:"The number of reopened connections by new TCP SYN packets directly from the TIME-WAIT state.",en:{instance:{one:"firewall",other:"firewalls"}},aggregationMethod:"sum"},"netfilter.synproxy_cookies":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"SYN Proxy Cookie Stats",colors:s.default[4],layout:{left:0,top:5,width:3,height:5}}],info:"

    SYNPROXY cookie statistics.

    Valid, Invalid - result of cookie validation in TCP ACK packets received from clients. Retransmits - TCP SYN packets retransmitted to the server. It happens when the client repeats TCP ACK and the connection to the server is not yet established.

    ",en:{instance:{one:"firewall",other:"firewalls"}},aggregationMethod:"sum"},"app.cpu_utilization":{info:'The amount of time the CPU was busy executing code in user and kernel modes (all cores).',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],mainheads:[{groupBy:["label"],groupByLabel:["app_group"],chartLibrary:"bars",title:"Top Apps by CPU",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:0,top:0,width:3,height:5}}],en:{instance:{one:"application",other:"applications"}}},"app.cpu_guest_utilization":{info:"The amount of time spent running a virtual CPU for a guest operating system (all cores).",aggregationMethod:"avg",groupBy:["label"],groupByLabel:["app_group"],chartType:"stacked",en:{instance:{one:"application",other:"applications"}}},"app.cpu_context_switches":{info:"The number of CPU context switches. Voluntary context switches occur when a process yields control of the CPU to another process. Nonvoluntary context switches occur when the kernel preempts a process and gives control of the CPU to another process.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],en:{instance:{one:"application",other:"applications"}}},"app.mem_usage":{info:"Resident Set Size (RSS) is the amount of physical memory that is currently being used by the processes. This includes the process's code, data, stack, and shared libraries.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],chartType:"stacked",mainheads:[{groupBy:["label"],groupByLabel:["app_group"],chartLibrary:"bars",title:"Top Apps by Memory",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:3,top:0,width:3,height:5}}],en:{instance:{one:"application",other:"applications"}}},"app.mem_private_usage":{info:"The amount of used memory, excluding shared memory.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],chartType:"stacked",en:{instance:{one:"application",other:"applications"}}},"app.mem_page_faults":{info:'The number of minor and major page faults.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],chartType:"stacked",en:{instance:{one:"application",other:"applications"}}},"app.vmem_usage":{info:'The amount of allocated virtual memory. Check this article for more details.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],chartType:"stacked",en:{instance:{one:"application",other:"applications"}}},"app.swap_usage":{info:"The amount of swapped-out virtual memory by anonymous private pages. This does not include shared swap memory.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],chartType:"stacked",en:{instance:{one:"application",other:"applications"}}},"app.disk_physical_io":{info:"The amount of data that has been transferred to/from the storage layer. Actual physical disk I/O was required.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],chartType:"stacked",mainheads:[{groupBy:["label"],groupByLabel:["app_group"],selectedDimensions:["reads"],chartLibrary:"bars",title:"Top Apps by Disk Reads",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:6,top:0,width:3,height:5}},{groupBy:["label"],groupByLabel:["app_group"],selectedDimensions:["writes"],chartLibrary:"bars",title:"Top Apps by Disk Writes",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:9,top:0,width:3,height:5}}],en:{instance:{one:"application",other:"applications"}}},"app.disk_logical_io":{info:"The amount of data that has been transferred to/from the storage layer. It includes things such as terminal I/O and is unaffected by whether or not actual physical disk I/O was required (the read/write operation might have been satisfied from pagecache).",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],chartType:"stacked",en:{instance:{one:"application",other:"applications"}}},"app.processes":{info:'The number of processes.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],chartType:"stacked",en:{instance:{one:"application",other:"applications"}}},"app.threads":{info:'The number of threads.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],chartType:"stacked",en:{instance:{one:"application",other:"applications"}}},"app.fds_open_limit":{info:"Percentage of available file descriptors used.",aggregationMethod:"avg",groupBy:["label"],groupByLabel:["app_group"],en:{instance:{one:"application",other:"applications"}}},"app.fds_open":{info:"Number of file descriptors used.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],en:{instance:{one:"application",other:"applications"}}},"app.uptime":{info:"The period of time within which at least one process in the group has been running.",aggregationMethod:"avg",groupBy:["label"],groupByLabel:["app_group"],en:{instance:{one:"application",other:"applications"}}},"app.uptime_summary":{info:"The shorted, average and longest uptime among processes in the group.",aggregationMethod:"avg",groupBy:["label"],groupByLabel:["app_group"],en:{instance:{one:"application",other:"applications"}}},"usergroup.cpu_utilization":{info:'The amount of time the CPU was busy executing code in user and kernel modes (all cores).',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user_group"],mainheads:[{groupBy:["label"],groupByLabel:["user_group"],chartLibrary:"bars",title:"Top Groups by CPU",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:0,top:0,width:3,height:5}}],en:{instance:{one:"group",other:"groups"}}},"usergroup.cpu_guest_utilization":{info:"The amount of time spent running a virtual CPU for a guest operating system (all cores).",aggregationMethod:"avg",groupBy:["label"],groupByLabel:["user_group"],chartType:"stacked",en:{instance:{one:"group",other:"groups"}}},"usergroup.cpu_context_switches":{info:"The number of CPU context switches. Voluntary context switches occur when a process yields control of the CPU to another process. Nonvoluntary context switches occur when the kernel preempts a process and gives control of the CPU to another process.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user_group"],en:{instance:{one:"group",other:"groups"}}},"usergroup.mem_usage":{info:"Resident Set Size (RSS) is the amount of physical memory that is currently being used by the processes. This includes the process's code, data, stack, and shared libraries.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user_group"],chartType:"stacked",mainheads:[{groupBy:["label"],groupByLabel:["user_group"],chartLibrary:"bars",title:"Top Groups by Memory",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:3,top:0,width:3,height:5}}],en:{instance:{one:"group",other:"groups"}}},"usergroup.mem_private_usage":{info:"The amount of used memory, excluding shared memory.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user_group"],chartType:"stacked",en:{instance:{one:"group",other:"groups"}}},"usergroup.mem_page_faults":{info:'The number of minor and major page faults.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user_group"],chartType:"stacked",en:{instance:{one:"group",other:"groups"}}},"usergroup.vmem_usage":{info:'The amount of allocated virtual memory. Check this article for more details.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user_group"],chartType:"stacked",en:{instance:{one:"group",other:"groups"}}},"usergroup.swap_usage":{info:"The amount of swapped-out virtual memory by anonymous private pages. This does not include shared swap memory.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user_group"],chartType:"stacked",en:{instance:{one:"group",other:"groups"}}},"usergroup.disk_physical_io":{info:"The amount of data that has been transferred to/from the storage layer. Actual physical disk I/O was required.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user_group"],chartType:"stacked",mainheads:[{groupBy:["label"],groupByLabel:["user_group"],selectedDimensions:["reads"],chartLibrary:"bars",title:"Top Groups by Disk Reads",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:6,top:0,width:3,height:5}},{groupBy:["label"],groupByLabel:["user_group"],selectedDimensions:["writes"],chartLibrary:"bars",title:"Top Groups by Disk Writes",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:9,top:0,width:3,height:5}}],en:{instance:{one:"group",other:"groups"}}},"usergroup.disk_logical_io":{info:"The amount of data that has been transferred to/from the storage layer. It includes things such as terminal I/O and is unaffected by whether or not actual physical disk I/O was required (the read/write operation might have been satisfied from pagecache).",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user_group"],chartType:"stacked",en:{instance:{one:"group",other:"groups"}}},"usergroup.processes":{info:'The number of processes.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user_group"],chartType:"stacked",en:{instance:{one:"group",other:"groups"}}},"usergroup.threads":{info:'The number of threads.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user_group"],chartType:"stacked",en:{instance:{one:"group",other:"groups"}}},"usergroup.fds_open_limit":{info:"Percentage of available file descriptors used.",aggregationMethod:"avg",groupBy:["label"],groupByLabel:["user_group"],en:{instance:{one:"group",other:"groups"}}},"usergroup.fds_open":{info:"Number of file descriptors used.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user_group"],en:{instance:{one:"group",other:"groups"}}},"usergroup.uptime":{info:"The period of time within which at least one process in the group has been running.",aggregationMethod:"avg",groupBy:["label"],groupByLabel:["user_group"],en:{instance:{one:"group",other:"groups"}}},"usergroup.uptime_summary":{info:"The shorted, average and longest uptime among processes in the group.",aggregationMethod:"avg",groupBy:["label"],groupByLabel:["user_group"],en:{instance:{one:"group",other:"groups"}}},"user.cpu_utilization":{info:'The amount of time the CPU was busy executing code in user and kernel modes (all cores).',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user"],mainheads:[{groupBy:["label"],groupByLabel:["user"],chartLibrary:"bars",title:"Top Users by CPU",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:0,top:0,width:3,height:5}}],en:{instance:{one:"user",other:"users"}}},"user.cpu_guest_utilization":{info:"The amount of time spent running a virtual CPU for a guest operating system (all cores).",aggregationMethod:"avg",groupBy:["label"],groupByLabel:["user"],chartType:"stacked",en:{instance:{one:"user",other:"users"}}},"user.cpu_context_switches":{info:"The number of CPU context switches. Voluntary context switches occur when a process yields control of the CPU to another process. Nonvoluntary context switches occur when the kernel preempts a process and gives control of the CPU to another process.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user"],en:{instance:{one:"user",other:"users"}}},"user.mem_usage":{info:"Resident Set Size (RSS) is the amount of physical memory that is currently being used by the processes. This includes the process's code, data, stack, and shared libraries.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user"],chartType:"stacked",mainheads:[{groupBy:["label"],groupByLabel:["user"],chartLibrary:"bars",title:"Top Users by Memory",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:3,top:0,width:3,height:5}}],en:{instance:{one:"user",other:"users"}}},"user.mem_private_usage":{info:"The amount of used memory, excluding shared memory.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user"],chartType:"stacked",en:{instance:{one:"user",other:"users"}}},"user.mem_page_faults":{info:'The number of minor and major page faults.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user"],en:{instance:{one:"user",other:"users"}}},"user.vmem_usage":{info:'The amount of allocated virtual memory. Check this article for more details.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user"],chartType:"stacked",en:{instance:{one:"user",other:"users"}}},"user.swap_usage":{info:"The amount of swapped-out virtual memory by anonymous private pages. This does not include shared swap memory.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user"],chartType:"stacked",en:{instance:{one:"user",other:"users"}}},"user.disk_physical_io":{info:"The amount of data that has been transferred to/from the storage layer. Actual physical disk I/O was required.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user"],chartType:"stacked",mainheads:[{groupBy:["label"],groupByLabel:["user"],selectedDimensions:["reads"],chartLibrary:"bars",title:"Top Users by Disk Reads",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:6,top:0,width:3,height:5}},{groupBy:["label"],groupByLabel:["user"],selectedDimensions:["writes"],chartLibrary:"bars",title:"Top Users by Disk Writes",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:9,top:0,width:3,height:5}}],en:{instance:{one:"user",other:"users"}}},"user.disk_logical_io":{info:"The amount of data that has been transferred to/from the storage layer. It includes things such as terminal I/O and is unaffected by whether or not actual physical disk I/O was required (the read/write operation might have been satisfied from pagecache).",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user"],chartType:"stacked",en:{instance:{one:"user",other:"users"}}},"user.processes":{info:'The number of processes.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user"],chartType:"stacked",en:{instance:{one:"user",other:"users"}}},"user.threads":{info:'The number of threads.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user"],chartType:"stacked",en:{instance:{one:"user",other:"users"}}},"user.fds_open_limit":{info:"Percentage of available file descriptors used.",aggregationMethod:"avg",groupBy:["label"],groupByLabel:["user"],en:{instance:{one:"user",other:"users"}}},"user.fds_open":{info:"Number of file descriptors used.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["user"],en:{instance:{one:"user",other:"users"}}},"user.uptime":{info:"The period of time within which at least one process in the group has been running.",aggregationMethod:"avg",groupBy:["label"],groupByLabel:["user"],en:{instance:{one:"user",other:"users"}}},"user.uptime_summary":{info:"The shorted, average and longest uptime among processes in the group.",aggregationMethod:"avg",groupBy:["label"],groupByLabel:["user"],en:{instance:{one:"user",other:"users"}}},"apps.cpu":{aggregationMethod:"avg",mainheads:[{groupBy:["dimension"],aggregationMethod:"avg",chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Average CPU per Application",layout:{left:0,top:0,width:2.4,height:5}}],info:"Total CPU utilization per application.",en:{instance:{one:"system",other:"systems"}}},"groups.cpu":{mainheads:[{groupBy:["dimension"],aggregationMethod:"avg",chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Average CPU per Group",layout:{left:0,top:0,width:2.4,height:5}}],info:"Total CPU utilization (all cores). It includes user, system and guest time.",en:{instance:{one:"system",other:"systems"}}},"users.cpu":{mainheads:[{groupBy:["dimension"],aggregationMethod:"avg",chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Average CPU per User",layout:{left:0,top:0,width:2.4,height:5}}],info:"Total CPU utilization (all cores). It includes user, system and guest time.",en:{instance:{one:"system",other:"systems"}}},"apps.cpu_user":{aggregationMethod:"avg",info:'The amount of time the CPU was busy executing code in user mode (all cores).',en:{instance:{one:"system",other:"systems"}}},"groups.cpu_user":{aggregationMethod:"avg",info:'The amount of time the CPU was busy executing code in user mode (all cores).',en:{instance:{one:"system",other:"systems"}}},"users.cpu_user":{aggregationMethod:"avg",info:'The amount of time the CPU was busy executing code in user mode (all cores).',en:{instance:{one:"system",other:"systems"}}},"apps.cpu_system":{aggregationMethod:"avg",info:'The amount of time the CPU was busy executing code in kernel mode (all cores).',en:{instance:{one:"system",other:"systems"}}},"groups.cpu_system":{aggregationMethod:"avg",info:'The amount of time the CPU was busy executing code in kernel mode (all cores).',en:{instance:{one:"system",other:"systems"}}},"users.cpu_system":{aggregationMethod:"avg",info:'The amount of time the CPU was busy executing code in kernel mode (all cores).',en:{instance:{one:"system",other:"systems"}}},"apps.cpu_guest":{aggregationMethod:"avg",info:"The amount of time spent running a virtual CPU for a guest operating system (all cores).",en:{instance:{one:"system",other:"systems"}}},"groups.cpu_guest":{aggregationMethod:"avg",info:"The amount of time spent running a virtual CPU for a guest operating system (all cores).",en:{instance:{one:"system",other:"systems"}}},"users.cpu_guest":{aggregationMethod:"avg",info:"The amount of time spent running a virtual CPU for a guest operating system (all cores).",en:{instance:{one:"system",other:"systems"}}},"apps.preads":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Disk reads per Application",layout:{left:4.8,top:0,width:2.4,height:5}}],info:"The amount of data that has been read from the storage layer. Actual physical disk I/O was required.",en:{instance:{one:"system",other:"systems"}}},"groups.preads":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Disk Reads per Group",layout:{left:4.8,top:0,width:2.4,height:5}}],info:"The amount of data that has been read from the storage layer. Actual physical disk I/O was required.",en:{instance:{one:"system",other:"systems"}}},"users.preads":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Disk Reads per User",layout:{left:4.8,top:0,width:2.4,height:5}}],info:"The amount of data that has been read from the storage layer. Actual physical disk I/O was required.",en:{instance:{one:"system",other:"systems"}}},"apps.pwrites":{mainheads:[{chartLibrary:"bars",title:"Total Disk Writes per Application",layout:{left:7.2,top:0,width:2.4,height:5}}],info:"The amount of data that has been written to the storage layer. Actual physical disk I/O was required.",en:{instance:{one:"system",other:"systems"}}},"groups.pwrites":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Disk Writes per Group",layout:{left:7.2,top:0,width:2.4,height:5}}],info:"The amount of data that has been written to the storage layer. Actual physical disk I/O was required.",en:{instance:{one:"system",other:"systems"}}},"users.pwrites":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Disk Writes per User",layout:{left:7.2,top:0,width:2.4,height:5}}],info:"The amount of data that has been written to the storage layer. Actual physical disk I/O was required.",en:{instance:{one:"system",other:"systems"}}},"apps.lreads":{info:"The amount of data that has been read from the storage layer. It includes things such as terminal I/O and is unaffected by whether or not actual physical disk I/O was required (the read might have been satisfied from pagecache).",en:{instance:{one:"system",other:"systems"}}},"groups.lreads":{info:"The amount of data that has been read from the storage layer. It includes things such as terminal I/O and is unaffected by whether or not actual physical disk I/O was required (the read might have been satisfied from pagecache).",en:{instance:{one:"system",other:"systems"}}},"users.lreads":{info:"The amount of data that has been read from the storage layer. It includes things such as terminal I/O and is unaffected by whether or not actual physical disk I/O was required (the read might have been satisfied from pagecache).",en:{instance:{one:"system",other:"systems"}}},"apps.lwrites":{info:"The amount of data that has been written or shall be written to the storage layer. It includes things such as terminal I/O and is unaffected by whether or not actual physical disk I/O was required.",en:{instance:{one:"system",other:"systems"}}},"groups.lwrites":{info:"The amount of data that has been written or shall be written to the storage layer. It includes things such as terminal I/O and is unaffected by whether or not actual physical disk I/O was required.",en:{instance:{one:"system",other:"systems"}}},"users.lwrites":{info:"The amount of data that has been written or shall be written to the storage layer. It includes things such as terminal I/O and is unaffected by whether or not actual physical disk I/O was required.",en:{instance:{one:"system",other:"systems"}}},"apps.files":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Open Files per Application",layout:{left:9.6,top:0,width:2.4,height:5}}],info:"The number of open files and directories.",en:{instance:{one:"system",other:"systems"}}},"groups.files":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Open Files per Group",layout:{left:9.6,top:0,width:2.4,height:5}}],info:"The number of open files and directories.",en:{instance:{one:"system",other:"systems"}}},"users.files":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Open Files per User",layout:{left:9.6,top:0,width:2.4,height:5}}],info:"The number of open files and directories.",en:{instance:{one:"system",other:"systems"}}},"apps.mem":{info:"Real memory (RAM) used by applications. This does not include shared memory.",en:{instance:{one:"system",other:"systems"}}},"groups.mem":{info:"Real memory (RAM) used per user group. This does not include shared memory.",en:{instance:{one:"system",other:"systems"}}},"users.mem":{info:"Real memory (RAM) used per user. This does not include shared memory.",en:{instance:{one:"system",other:"systems"}}},"apps.rss":{info:"Applications Resident Set Size (RSS).",en:{instance:{one:"system",other:"systems"}},mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Memory per Application",layout:{left:2.4,top:0,width:2.4,height:5}}]},"groups.rss":{info:"Applications Resident Set Size (RSS) per user group.",en:{instance:{one:"system",other:"systems"}},mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Memory per Group",layout:{left:2.4,top:0,width:2.4,height:5}}]},"users.rss":{info:"Applications Resident Set Size (RSS) per user.",en:{instance:{one:"system",other:"systems"}},mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Memory per User",layout:{left:2.4,top:0,width:2.4,height:5}}]},"apps.vmem":{info:'Virtual memory allocated by applications. Check this article for more information.',en:{instance:{one:"system",other:"systems"}}},"groups.vmem":{info:'Virtual memory allocated per user group since the Netdata restart. Please check this article for more information.',en:{instance:{one:"system",other:"systems"}}},"users.vmem":{info:'Virtual memory allocated per user group since the Netdata restart. Please check this article for more information.',en:{instance:{one:"system",other:"systems"}}},"apps.minor_faults":{info:'The number of minor faults which have not required loading a memory page from the disk. Minor page faults occur when a process needs data that is in memory and is assigned to another process. They share memory pages between multiple processes \u2013 no additional data needs to be read from disk to memory.',en:{instance:{one:"system",other:"systems"}}},"groups.minor_faults":{info:'The number of minor faults which have not required loading a memory page from the disk. Minor page faults occur when a process needs data that is in memory and is assigned to another process. They share memory pages between multiple processes \u2013 no additional data needs to be read from disk to memory.',en:{instance:{one:"system",other:"systems"}}},"users.minor_faults":{info:'The number of minor faults which have not required loading a memory page from the disk. Minor page faults occur when a process needs data that is in memory and is assigned to another process. They share memory pages between multiple processes \u2013 no additional data needs to be read from disk to memory.',en:{instance:{one:"system",other:"systems"}}},"apps.threads":{info:'The number of threads.',en:{instance:{one:"system",other:"systems"}}},"groups.threads":{info:'The number of threads.',en:{instance:{one:"system",other:"systems"}}},"users.threads":{info:'The number of threads.',en:{instance:{one:"system",other:"systems"}}},"apps.processes":{info:'The number of processes.',en:{instance:{one:"system",other:"systems"}}},"groups.processes":{info:'The number of processes.',en:{instance:{one:"system",other:"systems"}}},"users.processes":{info:'The number of processes.',en:{instance:{one:"system",other:"systems"}}},"apps.uptime":{aggregationMethod:"min",info:"The period of time within which at least one process in the group has been running.",en:{instance:{one:"system",other:"systems"}}},"groups.uptime":{aggregationMethod:"min",info:"The period of time within which at least one process in the group has been running.",en:{instance:{one:"system",other:"systems"}}},"users.uptime":{aggregationMethod:"min",info:"The period of time within which at least one process in the group has been running.",en:{instance:{one:"system",other:"systems"}}},"apps.uptime_min":{info:"The shortest uptime among processes in the group.",en:{instance:{one:"system",other:"systems"}}},"groups.uptime_min":{info:"The shortest uptime among processes in the group.",en:{instance:{one:"system",other:"systems"}}},"users.uptime_min":{info:"The shortest uptime among processes in the group.",en:{instance:{one:"system",other:"systems"}}},"apps.uptime_avg":{info:"The average uptime of processes in the group.",en:{instance:{one:"system",other:"systems"}}},"groups.uptime_avg":{info:"The average uptime of processes in the group.",en:{instance:{one:"system",other:"systems"}}},"users.uptime_avg":{info:"The average uptime of processes in the group.",en:{instance:{one:"system",other:"systems"}}},"apps.uptime_max":{info:"The longest uptime among processes in the group.",en:{instance:{one:"system",other:"systems"}}},"groups.uptime_max":{info:"The longest uptime among processes in the group.",en:{instance:{one:"system",other:"systems"}}},"users.uptime_max":{info:"The longest uptime among processes in the group.",en:{instance:{one:"system",other:"systems"}}},"apps.pipes":{info:'The number of open pipes. A pipe is a unidirectional data channel that can be used for interprocess communication.',en:{instance:{one:"system",other:"systems"}}},"groups.pipes":{info:'The number of open pipes. A pipe is a unidirectional data channel that can be used for interprocess communication.',en:{instance:{one:"system",other:"systems"}}},"users.pipes":{info:'The number of open pipes. A pipe is a unidirectional data channel that can be used for interprocess communication.',en:{instance:{one:"system",other:"systems"}}},"apps.swap":{info:"The amount of swapped-out virtual memory by anonymous private pages. This does not include shared swap memory.",en:{instance:{one:"system",other:"systems"}}},"groups.swap":{info:"The amount of swapped-out virtual memory by anonymous private pages. This does not include shared swap memory.",en:{instance:{one:"system",other:"systems"}}},"users.swap":{info:"The amount of swapped-out virtual memory by anonymous private pages. This does not include shared swap memory.",en:{instance:{one:"system",other:"systems"}}},"apps.major_faults":{info:'The number of major faults which have required loading a memory page from the disk. Major page faults occur because of the absence of the required page from the RAM. They are expected when a process starts or needs to read in additional data and in these cases do not indicate a problem condition. However, a major page fault can also be the result of reading memory pages that have been written out to the swap file, which could indicate a memory shortage.',en:{instance:{one:"system",other:"systems"}}},"groups.major_faults":{info:'The number of major faults which have required loading a memory page from the disk. Major page faults occur because of the absence of the required page from the RAM. They are expected when a process starts or needs to read in additional data and in these cases do not indicate a problem condition. However, a major page fault can also be the result of reading memory pages that have been written out to the swap file, which could indicate a memory shortage.',en:{instance:{one:"system",other:"systems"}}},"users.major_faults":{info:'The number of major faults which have required loading a memory page from the disk. Major page faults occur because of the absence of the required page from the RAM. They are expected when a process starts or needs to read in additional data and in these cases do not indicate a problem condition. However, a major page fault can also be the result of reading memory pages that have been written out to the swap file, which could indicate a memory shortage.',en:{instance:{one:"system",other:"systems"}}},"apps.sockets":{info:"The number of open sockets. Sockets are a way to enable inter-process communication between programs running on a server, or between programs running on separate servers. This includes both network and UNIX sockets.",en:{instance:{one:"system",other:"systems"}}},"groups.sockets":{info:"The number of open sockets. Sockets are a way to enable inter-process communication between programs running on a server, or between programs running on separate servers. This includes both network and UNIX sockets.",en:{instance:{one:"system",other:"systems"}}},"users.sockets":{info:"The number of open sockets. Sockets are a way to enable inter-process communication between programs running on a server, or between programs running on separate servers. This includes both network and UNIX sockets.",en:{instance:{one:"system",other:"systems"}}},"app.ebpf_file_open":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls for internal functions on the Linux kernel responsible to open files. Netdata gives a summary for this chart in file access, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_file_open_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of failed calls for internal functions on the Linux kernel responsible to open files. Netdata gives a summary for this chart in file access, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_file_closed":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls for internal functions on the Linux kernel responsible to close files. Netdata gives a summary for this chart in file access, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_file_close_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of failed calls for internal functions on the Linux kernel responsible to close files. Netdata gives a summary for this chart in file access, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_unlink":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to VFS unlinker function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_write":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of successful calls to VFS writer function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_write_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of failed calls to VFS writer function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_read":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of successful calls to VFS reader function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_read_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of failed calls to VFS reader function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_write_bytes":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Total of bytes successfully written using the VFS writer function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_read_bytes":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Total of bytes successfully written using the VFS reader function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_fsync":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to VFS syncer function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_fsync_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of failed calls to VFS syncer function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_open":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to VFS opener function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_open_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of failed calls to VFS opener function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_create":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to VFS creator function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_vfs_create_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of failed calls to VFS creator function. Netdata gives a summary for this chart in Virtual File System, and when the integration is enabled, Netdata shows virtual file system per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_process_start":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of times a function that starts a process is called. Netdata gives a summary for this chart in Process, and when the integration is enabled, Netdata shows process per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_thread_start":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of times a function that starts a thread is called. Netdata gives a summary for this chart in Process, and when the integration is enabled, Netdata shows process per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_task_exit":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of times a function responsible for closing tasks is called. Netdata gives a summary for this chart in Process, and when the integration is enabled, Netdata shows process per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_task_released":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of times a function responsible for releasing tasks is called. Netdata gives a summary for this chart in Process, and when the integration is enabled, Netdata shows process per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_task_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of errors to create a new task. Netdata gives a summary for this chart in Process, and when the integration is enabled, Netdata shows process per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_tcp_v4_connection":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to IPV4 TCP function responsible for starting connections. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows outbound connections per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_tcp_v6_connection":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to IPV6 TCP function responsible for starting connections. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows outbound connections per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_sock_bytes_sent":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Total bytes sent with TCP or UDP internal functions. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows bandwidth per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_sock_bytes_received":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Total bytes received with TCP or UDP internal functions. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows bandwidth per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_tcp_sendmsg":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to TCP functions responsible to send data. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows TCP calls per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_tcp_cleanup_rbuf":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to TCP functions responsible to receive data. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows TCP calls per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_tcp_retransmit":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of times a TCP packet was retransmitted. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows TCP calls per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_udp_sendmsg":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to UDP functions responsible to send data. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows UDP calls per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_udp_recvmsg":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to UDP functions responsible to receive data. Netdata gives a summary for this chart in Network Stack. When the integration is enabled, Netdata shows UDP calls per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_cachestat_hit_ratio":{groupBy:["label"],groupByLabel:["app_group"],aggregationMethod:"avg",info:'The ratio shows the percentage of data accessed directly in memory. Netdata gives a summary for this chart in Memory, and when the integration is enabled, Netdata shows page cache hit per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_cachestat_dirty_pages":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of modified pages in Linux page cache. Netdata gives a summary for this chart in Memory, and when the integration is enabled, Netdata shows page cache hit per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_cachestat_access":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of access to data in Linux page cache. Netdata gives a summary for this chart in Memory, and when the integration is enabled, Netdata shows page cache hit per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_cachestat_misses":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of access to data was not present in Linux page cache. Netdata gives a summary for this chart in Memory, and when the integration is enabled, Netdata shows page cache misses per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_dc_hit":{aggregationMethod:"avg",groupBy:["label"],groupByLabel:["app_group"],info:'Percentage of file accesses that were present in the directory cache. Netdata gives a summary for this chart in directory cache, and when the integration is enabled, Netdata shows directory cache per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_dc_reference":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of times a file is accessed inside directory cache. Netdata gives a summary for this chart in directory cache, and when the integration is enabled, Netdata shows directory cache per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_dc_not_cache":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of times a file is accessed in the file system, because it is not present inside the directory cache. Netdata gives a summary for this chart in directory cache, and when the integration is enabled, Netdata shows directory cache per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_dc_not_found":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of times a file was not found on the file system. Netdata gives a summary for this chart in directory cache, and when the integration is enabled, Netdata shows directory cache per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_swap_readpage":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to swap reader function. Netdata gives a summary for this chart in System Overview, and when the integration is enabled, Netdata shows swap metrics per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_call_swap_writepage":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to swap writer function. Netdata gives a summary for this chart in System Overview, and when the integration is enabled, Netdata shows swap metrics per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_shmget_call":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to shmget. Netdata gives a summary for this chart in System Overview, and when the integration is enabled, Netdata shows shared memory metrics per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_shmat_call":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to shmat. Netdata gives a summary for this chart in System Overview, and when the integration is enabled, Netdata shows shared memory metrics per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_shmdt_call":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to shmdt. Netdata gives a summary for this chart in System Overview, and when the integration is enabled, Netdata shows shared memory metrics per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"app.ebpf_shmctl_call":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["app_group"],info:'Number of calls to shmctl. Netdata gives a summary for this chart in System Overview, and when the integration is enabled, Netdata shows shared memory metrics per cgroup (systemd Services).'+f+'
    ',en:{instance:{one:"application",other:"applications"}}},"tc.qos":{info:"Network Interface traffic per QoS class",family:"traffic",en:{instance:{one:"interface",other:"interfaces"}},aggregationMethod:"sum",mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Traffic per QoS Class",layout:{left:0,top:0,width:6,height:5}}]},"tc.qos_packets":{family:"packets",info:"Network Interface packets per QoS class",en:{instance:{one:"interface",other:"interfaces"}},aggregationMethod:"sum"},"tc.qos_dropped":{family:"drops",mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Packets dropped per QoS Class",layout:{left:6,top:0,width:6,height:5}}],info:"Network Interface packets dropped per QoS class",en:{instance:{one:"interface",other:"interfaces"}},aggregationMethod:"sum"},"tc.qos_tokens":{family:"tokens",info:"Class Tokens.",en:{instance:{one:"interface",other:"interfaces"}}},"tc.qos_ctokens":{family:"tokens",info:"Class cTokens.",en:{instance:{one:"interface",other:"interfaces"}}},"net.net":{family:"traffic",aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Network Inbound",selectedDimensions:["received"],colors:s.default[0],valueRange:[0,null],layout:{left:0,top:0,width:3,height:5}},{groupBy:["selected"],chartLibrary:"gauge",title:"Total Network Outbound",selectedDimensions:["sent"],colors:s.default[1],valueRange:[0,null],layout:{left:3,top:0,width:3,height:5}}],info:a,en:{instance:{one:"interface",other:"interfaces"}}},"net.packets":{family:"packets",aggregationMethod:"sum",info:o,en:{instance:{one:"interface",other:"interfaces"}}},"net.errors":{family:"errors",aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Network Errors",colors:s.default[1],layout:{left:6,top:0,width:3,height:5}}],info:r,en:{instance:{one:"interface",other:"interfaces"}}},"net.fifo":{family:"errors",info:i,en:{instance:{one:"interface",other:"interfaces"}}},"net.drops":{family:"drops",aggregationMethod:"sum",mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Network Drops",colors:s.default[2],layout:{left:9,top:0,width:3,height:5}}],info:c,en:{instance:{one:"interface",other:"interfaces"}}},"net.compressed":{family:"compression",info:l,en:{instance:{one:"interface",other:"interfaces"}}},"net.events":{family:"errors",info:d,en:{instance:{one:"interface",other:"interfaces"}}},"net.duplex":{family:"duplex",info:h,en:{instance:{one:"interface",other:"interfaces"}}},"net.operstate":{family:"state",info:u,en:{instance:{one:"interface",other:"interfaces"}}},"net.carrier":{family:"state",info:p,en:{instance:{one:"interface",other:"interfaces"}}},"net.speed":{family:"speed",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"gauge",title:"Maximum Network Speed",colors:s.default[3],layout:{left:0,top:5,width:3,height:5}}],info:m,en:{instance:{one:"interface",other:"interfaces"}}},"net.mtu":{family:"mtu",aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"easypiechart",title:"Minimum Network MTU",colors:s.default[4],layout:{left:3,top:5,width:3,height:5}}],info:g,en:{instance:{one:"interface",other:"interfaces"}}},"cgroup.net_net":{mainheads:[{groupBy:["selected"],valueRange:[0,null],chartLibrary:"easypiechart",title:"Total Network Inbound",selectedDimensions:["received"],colors:s.default[0],layout:{left:4,top:10,width:2,height:5}},{groupBy:["selected"],valueRange:[0,null],chartLibrary:"easypiechart",title:"Total Network Outbound",selectedDimensions:["sent"],colors:s.default[1],layout:{left:6,top:10,width:2,height:5}}],info:a,en:{instance:{one:"interface",other:"interfaces"}}},"cgroup.net_packets":{info:o,en:{instance:{one:"interface",other:"interfaces"}}},"cgroup.net_errors":{heads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Network Errors",colors:s.default[1],layout:{left:15,top:0,width:3,height:5}}],info:r,en:{instance:{one:"interface",other:"interfaces"}}},"cgroup.net_fifo":{info:i,en:{instance:{one:"interface",other:"interfaces"}}},"cgroup.net_drops":{heads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Network Drops",colors:s.default[2],layout:{left:9,top:0,width:3,height:5}}],info:c,en:{instance:{one:"interface",other:"interfaces"}}},"cgroup.net_compressed":{info:l,en:{instance:{one:"interface",other:"interfaces"}}},"cgroup.net_events":{info:d,en:{instance:{one:"interface",other:"interfaces"}}},"cgroup.net_duplex":{info:h,en:{instance:{one:"interface",other:"interfaces"}}},"cgroup.net_operstate":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Interface Operational States",layout:{left:10,top:10,width:2,height:5}}],info:u,en:{instance:{one:"interface",other:"interfaces"}}},"cgroup.net_carrier":{info:p,en:{instance:{one:"interface",other:"interfaces"}}},"cgroup.net_speed":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"gauge",title:"Maxinum Network Speed",colors:s.default[3],layout:{left:0,top:5,width:3,height:5}}],info:m,en:{instance:{one:"interface",other:"interfaces"}}},"cgroup.net_mtu":{mainheads:[{groupBy:["selected"],aggregationMethod:"min",groupingMethod:"min",chartLibrary:"easypiechart",title:"Minimum Network MTU",colors:s.default[4],layout:{left:8,top:10,width:2,height:5}}],info:g,en:{instance:{one:"interface",other:"interfaces"}}},"k8s.cgroup.net_net":{heads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Network Inbound",selectedDimensions:["received"],colors:s.default[0],layout:{left:0,top:0,width:2,height:5}},{groupBy:["selected"],chartLibrary:"gauge",title:"Total Network Outbound",selectedDimensions:["sent"],colors:s.default[1],layout:{left:2,top:0,width:3,height:5}}],info:a,en:{instance:{one:"interface",other:"interfaces"}}},"k8s.cgroup.net_packets":{info:o,en:{instance:{one:"interface",other:"interfaces"}}},"k8s.cgroup.net_errors":{heads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Network Errors",colors:s.default[1],layout:{left:6,top:0,width:2,height:5}}],info:r,en:{instance:{one:"interface",other:"interfaces"}}},"k8s.cgroup.net_fifo":{info:i,en:{instance:{one:"interface",other:"interfaces"}}},"k8s.cgroup.net_drops":{heads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Network Drops",colors:s.default[2],layout:{left:8,top:0,width:2,height:5}}],info:c,en:{instance:{one:"interface",other:"interfaces"}}},"k8s.cgroup.net_compressed":{info:l,en:{instance:{one:"interface",other:"interfaces"}}},"k8s.cgroup.net_events":{info:d,en:{instance:{one:"interface",other:"interfaces"}}},"k8s.cgroup.net_operstate":{info:u,en:{instance:{one:"interface",other:"interfaces"}}},"k8s.cgroup.net_duplex":{info:h,en:{instance:{one:"interface",other:"interfaces"}}},"k8s.cgroup.net_carrier":{info:p,en:{instance:{one:"interface",other:"interfaces"}}},"k8s.cgroup.net_speed":{heads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"gauge",title:"Maxinum Network Speed",colors:s.default[3],layout:{left:10,top:5,width:2,height:5}}],info:m,en:{instance:{one:"interface",other:"interfaces"}}},"k8s.cgroup.net_mtu":{heads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"easypiechart",title:"Minimum Network MTU",colors:s.default[4],layout:{left:4,top:0,width:2,height:5}}],info:g,en:{instance:{one:"interface",other:"interfaces"}}},"docker.containers_state":{en:{instance:{one:"system",other:"systems"}}},"docker.container_state":{en:{instance:{one:"container",other:"containers"}},mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Docker Container States",layout:{left:0,top:0,width:3,height:5}}]},"docker.container_health_status":{en:{instance:{one:"container",other:"containers"}},mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Docker Container Health Status",layout:{left:3,top:0,width:3,height:5}}]},"docker.healthy_containers":{en:{instance:{one:"system",other:"systems"}}},"docker.unhealthy_containers":{en:{instance:{one:"system",other:"systems"}}},"docker.images":{en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["selected"],chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,colors:s.default[6],title:"Total Docker Images",layout:{left:6,top:0,width:3,height:5}}]},"docker.images_size":{en:{instance:{one:"system",other:"systems"}},mainheads:[{groupBy:["selected"],chartLibrary:"number",title:"Total Images Size",layout:{left:9,top:0,width:3,height:5}}]},"wireless.link_quality":{family:"quality",aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Wireless Link Quality"}],info:"Overall quality of the link. May be based on the level of contention or interference, the bit or frame error rate, how good the received signal is, some timing synchronisation, or other hardware metric.",en:{instance:{one:"interface",other:"interfaces"}}},"wireless.signal_level":{family:"signal",aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Signal Level"}],info:'Received signal strength (RSSI).',en:{instance:{one:"interface",other:"interfaces"}}},"wireless.noise_level":{family:"noise",aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Noise Level"}],info:"Background noise level (when no packet is transmitted).",en:{instance:{one:"interface",other:"interfaces"}}},"wireless.discarded_packets":{family:"errors",mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Discarded Packets"}],info:"

    The number of discarded packets.

    NWID - received packets with a different NWID or ESSID. Used to detect configuration problems or adjacent network existence (on the same frequency). Crypt - received packets that the hardware was unable to code/encode. This can be used to detect invalid encryption settings. Frag - received packets for which the hardware was not able to properly re-assemble the link layer fragments (most likely one was missing). Retry - packets that the hardware failed to deliver. Most MAC protocols will retry the packet a number of times before giving up. Misc - other packets lost in relation with specific wireless operations.

    ",en:{instance:{one:"interface",other:"interfaces"}}},"wireless.missed_beacons":{family:"errors",info:'The number of periodic beacons from the Cell or the Access Point have been missed. Beacons are sent at regular intervals to maintain the cell coordination, failure to receive them usually indicates that the card is out of range.',en:{instance:{one:"interface",other:"interfaces"}}},"ib.bytes":{info:"The amount of traffic transferred by the port."},"ib.packets":{info:"The number of packets transferred by the port."},"ib.errors":{info:"The number of errors encountered by the port."},"ib.hwerrors":{info:"The number of hardware errors encountered by the port."},"ib.hwpackets":{info:"The number of hardware packets transferred by the port."},"netfilter.sockets":{en:{instance:{one:"firewall",other:"firewalls"}},colors:s.default[7],heads:[]},"netfilter.new":{en:{instance:{one:"firewall",other:"firewalls"}},heads:[{groupBy:["selected"],chartLibrary:"gauge",title:"New Connections",selectedDimensions:["new"],colors:s.default[19]}]},"ipvs.sockets":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Created Connections",layout:{left:6,top:0,width:4,height:5},colors:s.default[12]}],info:"Total created connections for all services and their servers. To see the IPVS connection table, run ipvsadm -Lnc.",en:{instance:{one:"IPVS server",other:"IPVS servers"}}},"ipvs.packets":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["received"],title:"Total Received Packets",layout:{left:9,top:0,width:2,height:5},colors:s.default[2]},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["sent"],title:"Total Sent Packets",layout:{left:12,top:0,width:2,height:5},colors:s.default[1]}],info:"Total transferred packets for all services and their servers.",en:{instance:{one:"IPVS server",other:"IPVS servers"}}},"ipvs.net":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["received"],title:"Total Received Bandwidth",layout:{left:0,top:0,width:2,height:5},colors:s.default[2]},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["sent"],title:"Total Sent Bandwidth",layout:{left:3,top:0,width:2,height:5},colors:s.default[1]}],info:"Total network traffic for all services and their servers.",en:{instance:{one:"IPVS server",other:"IPVS servers"}}},"disk.util":{family:"utilization",aggregationMethod:"avg",colors:s.default[5],mainheads:[{groupBy:["instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Utilization",colors:s.default[0],valueRange:[0,100],layout:{left:0,top:5,width:3,height:5}},{groupBy:["instance"],chartLibrary:"bars",title:"Top Disks by Utilization",layout:{left:3,top:5,width:3,height:5}}],info:"Disk Utilization measures the amount of time the disk was busy with something. This is not related to its performance. 100% means that the system always had an outstanding operation on the disk. Keep in mind that depending on the underlying technology of the disk, 100% here may or may not be an indication of congestion.",en:{instance:{one:"disk",other:"disks"}}},"disk.busy":{family:"utilization",aggregationMethod:"avg",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Utilization",colors:s.default[0],valueRange:[0,100],layout:{left:6,top:10,width:3,height:5}},{groupBy:["instance"],chartLibrary:"bars",title:"Top Disks by Busy Time",layout:{left:9,top:10,width:3,height:5}}],colors:s.default[4],info:"Disk Busy Time measures the amount of time the disk was busy with something.",en:{instance:{one:"disk",other:"disks"}}},"disk.backlog":{family:"utilization",aggregationMethod:"avg",mainheads:[{groupBy:["selected"],chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,title:"Average Disk Backlog",colors:s.default[12],layout:{left:6,top:5,width:3,height:5}},{groupBy:["instance"],chartLibrary:"bars",title:"Top Disks by Backlog",layout:{left:9,top:5,width:3,height:5}}],colors:s.default[8],info:"Backlog is an indication of the duration of pending disk operations. On every I/O event the system is multiplying the time spent doing I/O since the last update of this field with the number of pending operations. While not accurate, this metric can provide an indication of the expected completion time of the operations in progress.",en:{instance:{one:"disk",other:"disks"}}},"disk.io":{family:"io",info:"The amount of data transferred to and from disk.",en:{instance:{one:"disk",other:"disks"}}},"disk_ext.io":{family:"io",info:"The amount of discarded data that are no longer in use by a mounted file system.",en:{instance:{one:"disk",other:"disks"}}},"disk.ops":{family:"iops",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Read IOPS",selectedDimensions:["reads"],colors:s.default[2],layout:{left:0,top:0,width:3,height:5}},{groupBy:["instance"],chartLibrary:"bars",selectedDimensions:["reads"],title:"Top Disks by Read IOPS",layout:{left:3,top:0,width:3,height:5}},{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Write IOPS",selectedDimensions:["writes"],colors:s.default[1],layout:{left:6,top:0,width:3,height:5}},{groupBy:["instance"],chartLibrary:"bars",selectedDimensions:["writes"],title:"Top Disks by Write IOPS",layout:{left:9,top:0,width:3,height:5}}],info:"Completed disk I/O operations. Keep in mind the number of operations requested might be higher, since the system is able to merge adjacent to each other (see merged operations chart).",en:{instance:{one:"disk",other:"disks"}},dimensionsOnNonDimensionGrouping:["reads"]},"disk_ext.ops":{family:"iops",info:"

    The number (after merges) of completed discard/flush requests.

    Discard commands inform disks which blocks of data are no longer considered to be in use and therefore can be erased internally. They are useful for solid-state drivers (SSDs) and thinly-provisioned storage. Discarding/trimming enables the SSD to handle garbage collection more efficiently, which would otherwise slow future write operations to the involved blocks down.

    Flush operations transfer all modified in-core data (i.e., modified buffer cache pages) to the disk device so that all changed information can be retrieved even if the system crashes or is rebooted. Flush requests are executed by disks. Flush requests are not tracked for partitions. Before being merged, flush operations are counted as writes.

    ",en:{instance:{one:"disk",other:"disks"}}},"disk.qops":{family:"utilization",mainheads:[{groupBy:["selected"],chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,title:"Average Disk Current I/O Operations",colors:s.default[12],layout:{left:0,top:10,width:3,height:5}},{groupBy:["instance"],chartLibrary:"bars",title:"Top Disks by Current I/O Operations",layout:{left:3,top:10,width:3,height:5}}],info:"I/O operations currently in progress. This metric is a snapshot - it is not an average over the last interval.",en:{instance:{one:"disk",other:"disks"}}},"disk.iotime":{family:"iotime",aggregationMethod:"avg",info:"The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute I/O operations in parallel.",en:{instance:{one:"disk",other:"disks"}},dimensionsOnNonDimensionGrouping:["reads"]},"disk_ext.iotime":{family:"iotime",aggregationMethod:"avg",info:"The sum of the duration of all completed discard/flush operations. This number can exceed the interval if the disk is able to execute discard/flush operations in parallel.",en:{instance:{one:"disk",other:"disks"}},dimensionsOnNonDimensionGrouping:["reads"]},"disk.mops":{family:"iops",info:"The number of merged disk operations. The system is able to merge adjacent I/O operations, for example two 4KB reads can become one 8KB read before given to disk.",en:{instance:{one:"disk",other:"disks"}},dimensionsOnNonDimensionGrouping:["reads"]},"disk_ext.mops":{family:"iops",info:"The number of merged discard disk operations. Discard operations which are adjacent to each other may be merged for efficiency.",en:{instance:{one:"disk",other:"disks"}},dimensionsOnNonDimensionGrouping:["reads"]},"disk.svctm":{family:"iotime",aggregationMethod:"avg",info:"The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading.",en:{instance:{one:"disk",other:"disks"}},dimensionsOnNonDimensionGrouping:["svctm"]},"disk.latency_io":{family:"latency",aggregationMethod:"avg",info:'Disk I/O latency is the time it takes for an I/O request to be completed. Disk chart has a relationship with Filesystem charts. This chart is based on the bio_tracepoints tool of the ebpf_exporter.'+f,en:{instance:{one:"disk",other:"disks"}}},"disk.avgsz":{family:"size",info:"The average I/O operation size.",en:{instance:{one:"disk",other:"disks"}}},"disk_ext.avgsz":{family:"size",info:"The average discard operation size.",en:{instance:{one:"disk",other:"disks"}}},"disk.await":{family:"iotime",aggregationMethod:"avg",info:"The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.",en:{instance:{one:"disk",other:"disks"}}},"disk_ext.await":{family:"iowait",aggregationMethod:"avg",info:"The average time for discard/flush requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.",en:{instance:{one:"disk",other:"disks"}}},"disk.space":{family:"utilization",mainheads:[{chartLibrary:"d3pie",title:"Disk Space Usage",layout:{left:0,top:0,width:3,height:5}}],info:"Disk space utilization. reserved for root is automatically reserved by the system to prevent the root user from getting out of space.",en:{instance:{one:"mount",other:"mounts"}}},"disk.inodes":{family:"inodes",mainheads:[{chartLibrary:"d3pie",title:"Disk Files Usage",layout:{left:3,top:0,width:3,height:5}}],info:"Inodes (or index nodes) are filesystem objects (e.g. files and directories). On many types of file system implementations, the maximum number of inodes is fixed at filesystem creation, limiting the maximum number of files the filesystem can hold. It is possible for a device to run out of inodes. When this happens, new files cannot be created on the device, even though there may be free space available.",en:{instance:{one:"mount",other:"mounts"}}},"disk.bcache_hit_ratio":{family:"cache",aggregationMethod:"avg",info:"

    Bcache (block cache) is a cache in the block layer of Linux kernel, which is used for accessing secondary storage devices. It allows one or more fast storage devices, such as flash-based solid-state drives (SSDs), to act as a cache for one or more slower storage devices, such as hard disk drives (HDDs).

    Percentage of data requests that were fulfilled right from the block cache. Hits and misses are counted per individual IO as bcache sees them. A partial hit is counted as a miss.

    ",en:{instance:{one:"disk",other:"disks"}}},"disk.bcache_rates":{family:"cache",aggregationMethod:"avg",info:"Throttling rates. To avoid congestions bcache tracks latency to the cache device, and gradually throttles traffic if the latency exceeds a threshold. If the writeback percentage is nonzero, bcache tries to keep around this percentage of the cache dirty by throttling background writeback and using a PD controller to smoothly adjust the rate.",en:{instance:{one:"disk",other:"disks"}}},"disk.bcache_size":{family:"cache",info:"The amount of dirty data for this backing device in the cache.",en:{instance:{one:"disk",other:"disks"}}},"disk.bcache_usage":{family:"cache",aggregationMethod:"avg",info:"The percentage of cache device which does not contain dirty data, and could potentially be used for writeback.",en:{instance:{one:"disk",other:"disks"}}},"disk.bcache_cache_read_races":{family:"cache",info:"Read races happen when a bucket was reused and invalidated while data was being read from the cache. When this occurs the data is reread from the backing device. IO errors are decayed by the half life. If the decaying count reaches the limit, dirty data is written out and the cache is disabled.",en:{instance:{one:"disk",other:"disks"}}},"disk.bcache":{family:"cache",info:"Hits and misses are counted per individual IO as bcache sees them; a partial hit is counted as a miss. Collisions happen when data was going to be inserted into the cache from a cache miss, but raced with a write and data was already present. Cache miss reads are rounded up to the readahead size, but without overlapping existing cache entries.",en:{instance:{one:"disk",other:"disks"}}},"disk.bcache_bypass":{family:"cache",info:"Hits and misses for IO that is intended to skip the cache.",en:{instance:{one:"disk",other:"disks"}}},"disk.bcache_cache_alloc":{family:"cache",aggregationMethod:"avg",info:"

    Working set size.

    Unused is the percentage of the cache that does not contain any data. Dirty is the data that is modified in the cache but not yet written to the permanent storage. Clean data matches the data stored on the permanent storage. Metadata is bcache's metadata overhead.

    ",en:{instance:{one:"disk",other:"disks"}}},"nfs.net":{info:"The number of received UDP and TCP packets.",mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total received packets"}],en:{instance:{one:"NFS client",other:"NFS clients"}}},"nfs.rpc":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"RPC Statistics"}],info:"

    Remote Procedure Call (RPC) statistics.

    Calls - all RPC calls. Retransmits - retransmitted calls. AuthRefresh - authentication refresh calls (validating credentials with the server).

    ",en:{instance:{one:"NFS client",other:"NFS clients"}}},"nfs.proc2":{info:'NFSv2 RPC calls. The individual metrics are described in RFC1094.',en:{instance:{one:"NFS client",other:"NFS clients"}}},"nfs.proc3":{info:'NFSv3 RPC calls. The individual metrics are described in RFC1813.',en:{instance:{one:"NFS client",other:"NFS clients"}}},"nfs.proc4":{info:'NFSv4 RPC calls. The individual metrics are described in RFC8881.',en:{instance:{one:"NFS client",other:"NFS clients"}}},"nfsd.readcache":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"NFS Reply Cache Statistics"}],info:"

    Reply cache statistics. The reply cache keeps track of responses to recently performed non-idempotent transactions, and in case of a replay, the cached response is sent instead of attempting to perform the operation again.

    Hits - client did not receive a reply and re-transmitted its request. This event is undesirable. Misses - an operation that requires caching (idempotent). Nocache - an operation that does not require caching (non-idempotent).",en:{instance:{one:"NFS server",other:"NFS servers"}}},"nfsd.filehandles":{info:"

    File handle statistics. File handles are small pieces of memory that keep track of what file is opened.

    Stale - happen when a file handle references a location that has been recycled. This also occurs when the server loses connection and applications are still using files that are no longer accessible.",en:{instance:{one:"NFS server",other:"NFS servers"}}},"nfsd.io":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"NFS IO Statistics"}],info:"The amount of data transferred to and from disk.",en:{instance:{one:"NFS server",other:"NFS servers"}}},"nfsd.threads":{mainheads:[{groupBy:["selected"],chartLibrary:"number",title:"NFS Daemon Threads"}],info:"The number of threads used by the NFS daemon.",en:{instance:{one:"NFS server",other:"NFS servers"}}},"nfsd.readahead":{info:"

    Read-ahead cache statistics. NFS read-ahead predictively requests blocks from a file in advance of I/O requests by the application. It is designed to improve client sequential read throughput.

    10%-100% - histogram of depth the block was found. This means how far the cached block is from the original block that was first requested. Misses - not found in the read-ahead cache.

    ",en:{instance:{one:"NFS server",other:"NFS servers"}}},"nfsd.net":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Network Packets received"}],info:"The number of received UDP and TCP packets.",en:{instance:{one:"NFS server",other:"NFS servers"}}},"nfsd.rpc":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"RPC Statistics"}],info:"

    Remote Procedure Call (RPC) statistics.

    Calls - all RPC calls. BadAuth - bad authentication. It does not count if you try to mount from a machine that it's not in your exports file. BadFormat - other errors.

    ",en:{instance:{one:"NFS server",other:"NFS servers"}}},"nfsd.proc2":{info:'NFSv2 RPC calls. The individual metrics are described in RFC1094.',en:{instance:{one:"NFS server",other:"NFS servers"}}},"nfsd.proc3":{info:'NFSv3 RPC calls. The individual metrics are described in RFC1813.',en:{instance:{one:"NFS server",other:"NFS servers"}}},"nfsd.proc4":{info:'NFSv4 RPC calls. The individual metrics are described in RFC8881.',en:{instance:{one:"NFS server",other:"NFS servers"}}},"nfsd.proc4ops":{info:'NFSv4 RPC operations. The individual metrics are described in RFC8881.',en:{instance:{one:"NFS server",other:"NFS servers"}}},"zfs.arc_size":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Average ZFS ARC Size"}],info:"

    The size of the ARC.

    Arcsz - actual size. Target - target size that the ARC is attempting to maintain (adaptive). Min - minimum size limit. When the ARC is asked to shrink, it will stop shrinking at this value. Max - maximum size limit.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.l2_size":{info:"

    The size of the L2ARC.

    Actual - size of compressed data. Size - size of uncompressed data.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.reads":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Read Requests"}],info:"

    The number of read requests.

    ARC - all prefetch and demand requests. Demand - triggered by an application request. Prefetch - triggered by the prefetch mechanism, not directly from an application request. Metadata - metadata read requests. L2 - L2ARC read requests.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.bytes":{info:"The amount of data transferred to and from the L2ARC cache devices.",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.hits":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Hit Rate of ARC Read Requests"}],info:"

    Hit rate of the ARC read requests.

    Hits - a data block was in the ARC DRAM cache and returned. Misses - a data block was not in the ARC DRAM cache. It will be read from the L2ARC cache devices (if available and the data is cached on them) or the pool disks.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.dhits":{aggregationMethod:"avg",info:"

    Hit rate of the ARC data and metadata demand read requests. Demand requests are triggered by an application request.

    Hits - a data block was in the ARC DRAM cache and returned. Misses - a data block was not in the ARC DRAM cache. It will be read from the L2ARC cache devices (if available and the data is cached on them) or the pool disks.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.phits":{aggregationMethod:"avg",info:"

    Hit rate of the ARC data and metadata prefetch read requests. Prefetch requests are triggered by the prefetch mechanism, not directly from an application request.

    Hits - a data block was in the ARC DRAM cache and returned. Misses - a data block was not in the ARC DRAM cache. It will be read from the L2ARC cache devices (if available and the data is cached on them) or the pool disks.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.mhits":{aggregationMethod:"avg",info:"

    Hit rate of the ARC metadata read requests.

    Hits - a data block was in the ARC DRAM cache and returned. Misses - a data block was not in the ARC DRAM cache. It will be read from the L2ARC cache devices (if available and the data is cached on them) or the pool disks.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.l2hits":{aggregationMethod:"avg",info:"

    Hit rate of the L2ARC lookups.

    Hits - a data block was in the L2ARC cache and returned. Misses - a data block was not in the L2ARC cache. It will be read from the pool disks.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.demand_data_hits":{aggregationMethod:"avg",info:"

    Hit rate of the ARC data demand read requests. Demand requests are triggered by an application request.

    Hits - a data block was in the ARC DRAM cache and returned. Misses - a data block was not in the ARC DRAM cache. It will be read from the L2ARC cache devices (if available and the data is cached on them) or the pool disks.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.prefetch_data_hits":{aggregationMethod:"avg",info:"

    Hit rate of the ARC data prefetch read requests. Prefetch requests are triggered by the prefetch mechanism, not directly from an application request.

    Hits - a data block was in the ARC DRAM cache and returned. Misses - a data block was not in the ARC DRAM cache. It will be read from the L2ARC cache devices (if available and the data is cached on them) or the pool disks.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.list_hits":{info:"MRU (most recently used) and MFU (most frequently used) cache list hits. MRU and MFU lists contain metadata for requested blocks which are cached. Ghost lists contain metadata of the evicted pages on disk.",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.arc_size_breakdown":{aggregationMethod:"avg",info:"The size of MRU (most recently used) and MFU (most frequently used) cache.",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.memory_ops":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Memory Operation Statistics"}],info:"

    Memory operation statistics.

    Direct - synchronous memory reclaim. Data is evicted from the ARC and free slabs reaped. Throttled - number of times that ZFS had to limit the ARC growth. A constant increasing of the this value can indicate excessive pressure to evict data from the ARC. Indirect - asynchronous memory reclaim. It reaps free slabs from the ARC cache.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.important_ops":{info:"

    Eviction and insertion operation statistics.

    EvictSkip - skipped data eviction operations. Deleted - old data is evicted (deleted) from the cache. MutexMiss - an attempt to get hash or data block mutex when it is locked during eviction. HashCollisions - occurs when two distinct data block numbers have the same hash value.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.actual_hits":{aggregationMethod:"avg",info:"

    MRU and MFU cache hit rate.

    Hits - a data block was in the ARC DRAM cache and returned. Misses - a data block was not in the ARC DRAM cache. It will be read from the L2ARC cache devices (if available and the data is cached on them) or the pool disks.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.hash_elements":{info:"

    Data Virtual Address (DVA) hash table element statistics.

    Current - current number of elements. Max - maximum number of elements seen.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfs.hash_chains":{info:"

    Data Virtual Address (DVA) hash table chain statistics. A chain is formed when two or more distinct data block numbers have the same hash value.

    Current - current number of chains. Max - longest length seen for a chain. If the value is high, performance may degrade as the hash locks are held longer while the chains are walked.

    ",en:{instance:{one:"ZFS system",other:"ZFS systems"}}},"zfspool.state":{info:'ZFS pool state. The overall health of a pool, as reported by zpool status, is determined by the aggregate state of all devices within the pool. For states description, see ZFS documentation.',en:{instance:{one:"ZFS pool",other:"ZFS pools"}}},"zfspool.pool_space_utilization":{info:"Percentage of pool space used.",aggregationMethod:"max",en:{instance:{one:"ZFS pool",other:"ZFS pools"}},mainheads:[{groupBy:["label"],groupByLabel:["pool"],aggregationMethod:"avg",chartLibrary:"bars",title:"Space Usage",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:0,top:0,width:3,height:5}}]},"zfspool.pool_space_usage":{info:"The amount of available and used space in the pool.",en:{instance:{one:"ZFS pool",other:"ZFS pools"}},mainheads:[{groupBy:["label"],groupByLabel:["pool"],selectedDimensions:["free"],chartLibrary:"bars",title:"Space Free",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:3,top:0,width:3,height:5}}]},"zfspool.pool_fragmentation":{info:"The amount of fragmentation in the pool. As the amount of space allocated increases, it becomes more difficult to locate free space. This may result in lower write performance compared to pools with more unfragmented free space.",aggregationMethod:"max",en:{instance:{one:"ZFS pool",other:"ZFS pools"}},mainheads:[{groupBy:["label"],groupByLabel:["pool"],aggregationMethod:"avg",chartLibrary:"bars",title:"Fragmentation",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:6,top:0,width:3,height:5}}]},"zfspool.pool_health_state":{info:'ZFS pool health state. The overall health of a pool, as reported by zpool status, is determined by the aggregate state of all devices within the pool. For states description, see ZFS documentation.',en:{instance:{one:"ZFS pool",other:"ZFS pools"}},mainheads:[{groupBy:["dimension"],chartLibrary:"bars",title:"Health Status",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:9,top:0,width:3,height:5}}]},"mysql.net":{info:"The amount of data sent to mysql clients (out) and received from mysql clients (in).",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.queries":{mainheads:[{groupBy:["selected"],chartLibrary:"number",title:"No. of Slow Queries",selectedDimensions:["slow_queries"],colors:s.default[0],layout:{left:0,top:0,width:3,height:5}}],info:'The number of statements executed by the server.
    • queries counts the statements executed within stored SQL programs.
    • questions counts the statements sent to the mysql server by mysql clients.
    • slow queries counts the number of statements that took more than long_query_time seconds to be executed. For more information about slow queries check the mysql slow query log.
    ',en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.handlers":{info:'Usage of the internal handlers of mysql. This chart provides very good insights of what the mysql server is actually doing. (if the chart is not showing all these dimensions it is because they are zero - set Which dimensions to show? to All from the dashboard settings, to render even the zero values)
    • commit, the number of internal COMMIT statements.
    • delete, the number of times that rows have been deleted from tables.
    • prepare, a counter for the prepare phase of two-phase commit operations.
    • read first, the number of times the first entry in an index was read. A high value suggests that the server is doing a lot of full index scans; e.g. SELECT col1 FROM foo, with col1 indexed.
    • read key, the number of requests to read a row based on a key. If this value is high, it is a good indication that your tables are properly indexed for your queries.
    • read next, the number of requests to read the next row in key order. This value is incremented if you are querying an index column with a range constraint or if you are doing an index scan.
    • read prev, the number of requests to read the previous row in key order. This read method is mainly used to optimize ORDER BY ... DESC.
    • read rnd, the number of requests to read a row based on a fixed position. A high value indicates you are doing a lot of queries that require sorting of the result. You probably have a lot of queries that require MySQL to scan entire tables or you have joins that do not use keys properly.
    • read rnd next, the number of requests to read the next row in the data file. This value is high if you are doing a lot of table scans. Generally this suggests that your tables are not properly indexed or that your queries are not written to take advantage of the indexes you have.
    • rollback, the number of requests for a storage engine to perform a rollback operation.
    • savepoint, the number of requests for a storage engine to place a savepoint.
    • savepoint rollback, the number of requests for a storage engine to roll back to a savepoint.
    • update, the number of requests to update a row in a table.
    • write, the number of requests to insert a row in a table.
    ',en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.table_locks":{info:"MySQL table locks counters:
    • immediate, the number of times that a request for a table lock could be granted immediately.
    • waited, the number of times that a request for a table lock could not be granted immediately and a wait was needed. If this is high and you have performance problems, you should first optimize your queries, and then either split your table or tables or use replication.
    ",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_deadlocks":{mainheads:[{groupBy:["selected"],chartLibrary:"number",title:"Total No. of Deadlocks",colors:s.default[1],layout:{left:3,top:0,width:3,height:5}}],info:'A deadlock happens when two or more transactions mutually hold and request for locks, creating a cycle of dependencies. For more information about how to minimize and handle deadlocks.',en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.galera_cluster_status":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Galera Cluster Status",colors:s.default[2],layout:{left:6,top:0,width:3,height:5}}],info:"

    Status of this cluster component.

    Primary - primary group configuration, quorum present. Non-Primary - non-primary group configuration, quorum lost. Disconnected - not connected to group, retrying.

    ",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.galera_cluster_state":{info:"

    Membership state of this cluster component.

    Undefined - undefined state. Joining - the node is attempting to join the cluster. Donor - the node has blocked itself while it sends a State Snapshot Transfer (SST) to bring a new node up to date with the cluster. Joined - the node has successfully joined the cluster. Synced - the node has established a connection with the cluster and synchronized its local databases with those of the cluster. Error - the node is not part of the cluster and does not replicate transactions. This state is provider-specific, check wsrep_local_state_comment variable for a description.

    ",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.galera_cluster_weight":{info:"The value is counted as a sum of pc.weight of the nodes in the current Primary Component.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.galera_connected":{info:"0 means that the node has not yet connected to any of the cluster components. This may be due to misconfiguration.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.open_transactions":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Open Transactions",colors:s.default[3],layout:{left:9,top:0,width:3,height:5}}],info:"The number of locally running transactions which have been registered inside the wsrep provider. This means transactions which have made operations which have caused write set population to happen. Transactions which are read only are not counted.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.table_open_cache_overflows":{info:"The number of overflows in the table open cache on the MySQL server. Monitoring this metric can help identify potential performance issues related to the table open cache.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.join_issues":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"MySQL Join Operation Issues",layout:{left:0,top:5,width:3,height:5}}],info:"The number of issues with joins on the MySQL server. Monitoring this metric can help identify potential performance issues related to joins.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.sort_issues":{info:"The number of issues with sorts on the MySQL server. Monitoring this metric can help identify potential performance issues related to sorting.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.tmp":{info:"The usage of temporary files on the MySQL server. Monitoring this metric can help identify potential performance issues related to temporary files.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.threads_created":{mainheads:[{groupBy:["selected"],chartLibrary:"number",title:"Number of MySQL threads",colors:s.default[4],layout:{left:0,top:5,width:3,height:5}}],info:"The number of threads that have been created on the MySQL server. Monitoring this metric can help identify potential performance issues related to thread creation.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.thread_cache_misses":{info:"The number of thread cache misses on the MySQL server. Monitoring this metric can help identify potential performance issues related to the thread cache.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_io":{info:"The I/O operations on the InnoDB storage engine on the MySQL server. Monitoring this metric can help identify potential performance issues related to the InnoDB storage engine.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_io_ops":{info:"The number of I/O operations on the InnoDB storage engine on the MySQL server. Monitoring this metric can help identify potential performance issues related to the InnoDB storage engine.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_io_pending_ops":{info:"The number of pending I/O operations on the InnoDB storage engine on the MySQL server. Monitoring this metric can help identify potential performance issues related to the InnoDB storage engine.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_log":{info:"The usage of the InnoDB log on the MySQL server. Monitoring this metric can help identify potential performance issues related to the InnoDB log.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_cur_row_lock":{info:"The number of current row locks on the InnoDB storage engine on the MySQL server. Monitoring this metric can help identify potential performance issues related to row locking on the InnoDB storage engine.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_rows":{info:"The number of rows on the InnoDB storage engine on the MySQL server. Monitoring this metric can help identify the usage patterns of the InnoDB storage engine and potential performance issues.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_buffer_pool_pages":{info:"The number of pages in the InnoDB buffer pool on the MySQL server. Monitoring this metric can help identify potential performance issues related to the InnoDB buffer pool.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_buffer_pool_pages_flushed":{info:"The number of pages flushed from the InnoDB buffer pool on the MySQL server. Monitoring this metric can help identify potential performance issues related to the InnoDB buffer pool.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_buffer_pool_bytes":{info:"The amount of memory used by the InnoDB buffer pool on the MySQL server. Monitoring this metric can help identify potential performance issues related to the InnoDB buffer pool.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_buffer_pool_read_ahead":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"InnoDB Buffer Pool Read Ahead",colors:s.default[6],layout:{left:6,top:5,width:3,height:5}}],info:"The amount of read ahead performed by the InnoDB buffer pool on the MySQL server. Monitoring this metric can help identify potential performance issues related to read ahead on the InnoDB buffer.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_buffer_pool_read_ahead_rnd":{info:"The amount of random read ahead performed by the InnoDB buffer pool on the MySQL server. Monitoring this metric can help identify potential performance issues related to random read ahead on the InnoDB buffer pool.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_buffer_pool_ops":{info:"The number of operations on the InnoDB buffer pool on the MySQL server. Monitoring this metric can help identify potential performance issues related to the InnoDB buffer pool.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.innodb_os_log":{info:"The usage of the InnoDB OS log on the MySQL server. Monitoring this metric can help identify potential performance issues related to the InnoDB OS log.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.files":{info:"The number of files opened by the MySQL server. Monitoring this metric can help identify potential performance issues related to file opening.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.files_rate":{info:"The rate of file opening by the MySQL server. Monitoring this metric can help identify potential performance issues related to file opening.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.opened_tables":{info:"The number of tables opened by the MySQL server. Monitoring this metric can help identify potential performance issues related to table opening.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.process_list_fetch_query_duration":{info:"The duration of queries in the process list on the MySQL server. Monitoring this metric can help identify potential performance issues related to queries in the process list.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.process_list_longest_query_duration":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"number",title:"Longest Query Duration",colors:s.default[7],layout:{left:9,top:5,width:3,height:5}}],info:"The duration of the longest query in the process list on the MySQL server. Monitoring this metric can help identify potential performance issues related to long running queries in the process list.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.qcache_ops":{info:"The number of operations on the query cache on the MySQL server. Monitoring this metric can help identify potential performance issues related to the query cache.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.qcache":{info:"The usage of the query cache on the MySQL server. Monitoring this metric can help identify potential performance issues related to the query cache.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.qcache_freemem":{info:"The amount of free memory in the query cache on the MySQL server. Monitoring this metric can help identify potential performance issues related to the query cache memory usage.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.qcache_memblocks":{info:"The number of memory blocks in the query cache on the MySQL server. Monitoring this metric can help identify potential performance issues related to the query cache memory usage.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.galera_bytes":{info:"The number of bytes in the Galera cluster on the MySQL server. Monitoring this metric can help identify potential performance issues related to the Galera cluster.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.galera_queue":{info:"The length of the Galera queue on the MySQL server. Monitoring this metric can help identify potential performance issues related to the Galera cluster.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.galera_flow_control":{info:"The status of the flow control in the Galera cluster on the MySQL server. Monitoring this metric can help identify potential performance issues related to the Galera cluster.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.galera_cluster_size":{info:"The size of the Galera cluster on the MySQL server. Monitoring this metric can help identify potential performance issues related to the Galera cluster.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.galera_ready":{info:"The status of the Galera cluster on the MySQL server, indicating whether it is ready for write sets. Monitoring this metric can help identify potential performance issues related to the Galera cluster.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.galera_open_transactions":{info:"The number of open transactions in the Galera cluster on the MySQL server. Monitoring this metric can help identify potential performance issues related to the Galera cluster.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.key_blocks":{info:"The number of blocks in the key cache on the MySQL server. Monitoring this metric can help identify potential performance issues related to the key cache.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.binlog_cache":{info:"The usage of the binary log cache on the MySQL server. Monitoring this metric can help identify potential performance issues related to the binary log cache.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.binlog_stmt_cache":{info:"The usage of the binary log statement cache on the MySQL server. Monitoring this metric can help identify potential performance issues related to the binary log statement cache.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.slave_behind":{info:"The lag of the slave in the replication on the MySQL server. Monitoring this metric can help identify potential performance issues related to the replication process.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.userstats_cpu":{aggregationMethod:"avg",info:"The amount of CPU time used by each user on the MySQL server. Monitoring this metric can help identify potential performance issues related to CPU usage by users.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.userstats_rows":{info:"The number of rows accessed by each user on the MySQL server. Monitoring this metric can help identify potential performance issues related to user access to rows.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.userstats_commands":{info:"The number of commands executed by each user on the MySQL server. Monitoring this metric can help identify potential performance issues related to user commands.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.userstats_denied_commands":{info:"The number of denied commands for each user on the MySQL server. Monitoring this metric can help identify potential security issues related to user commands.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.userstats_binlog_written":{info:"The amount of data written to the binary log by each user on the MySQL server. Monitoring this metric can help identify potential performance issues related to the binary log.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.userstats_connections":{info:"The number of connections for each user on the MySQL server. Monitoring this metric can help identify potential performance issues related to user connections.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.userstats_lost_connections":{info:"The number of lost connections for each user on the MySQL server. Monitoring this metric can help identify potential performance issues related to user connections.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"mysql.userstats_denied_connections":{info:"The number of denied connections for each user on the MySQL server. Monitoring this metric can help identify potential security issues related to user connections.",en:{instance:{one:"MySQL server",other:"MySQL servers"}}},"ping.host_rtt":{aggregationMethod:"avg",mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Host Latency",layout:{left:6,top:0,width:3,height:5}}],info:"Round-trip time (RTT) is the time it takes for a data packet to reach its destination and return back to its original source.",en:{instance:{one:"ping host",other:"ping hosts"}}},"ping.host_std_dev_rtt":{aggregationMethod:"avg",info:"Round-trip time (RTT) standard deviation. The average value of how far each RTT of a ping differs from the average RTT.",en:{instance:{one:"ping host",other:"ping hosts"}}},"ping.host_packet_loss":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"gauge",title:"Maximum Host Packet Loss",colors:s.default[1],valueRange:[0,100],layout:{left:9,top:0,width:3,height:5}}],info:"Packet loss occurs when one or more transmitted data packets do not reach their destination. Usually caused by data transfer errors, network congestion or firewall blocking. ICMP echo packets are often treated as lower priority by routers and target hosts, so ping test packet loss may not always translate to application packet loss.",en:{instance:{one:"ping host",other:"ping hosts"}}},"ping.host_packets":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Ping Packets Received",colors:s.default[2],valueRange:[0,null],layout:{left:0,top:0,width:3,height:5}},{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Ping Packets Sent",colors:s.default[1],valueRange:[0,null],layout:{left:3,top:0,width:3,height:5}}],info:"Number of ICMP messages sent and received. These counters should be equal if there is no packet loss.",en:{instance:{one:"ping host",other:"ping hosts"}}},"nvme.device_estimated_endurance_perc":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"gauge",title:"Lowest Endurance on device",valueRange:[0,100],colors:s.default[7],layout:{left:0,top:0,width:3,height:5}}],info:"NVM subsystem lifetime used based on the actual usage and the manufacturer's prediction of NVM life. A value of 100 indicates that the estimated endurance of the device has been consumed, but may not indicate a device failure. The value can be greater than 100 if you use the storage beyond its planned lifetime.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_available_spare_perc":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Available Spare Capacity",valueRange:[0,100],colors:s.default[7],layout:{left:3,top:0,width:3,height:5}}],info:"Remaining spare capacity that is available. SSDs provide a set of internal spare capacity, called spare blocks, that can be used to replace blocks that have reached their write operation limit. After all of the spare blocks have been used, the next block that reaches its limit causes the disk to fail.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_composite_temperature":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"number",title:"Maximum device Temperature",colors:s.default[7],layout:{left:6,top:0,width:3,height:5}}],info:"The current composite temperature of the controller and namespace(s) associated with that controller. The manner in which this value is computed is implementation specific and may not represent the actual temperature of any physical point in the NVM subsystem.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_io_transferred_count":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total IO Transfer per device",colors:s.default[7],layout:{left:9,top:0,width:3,height:5}}],info:"The total amount of data read and written by the host.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_power_cycles_count":{info:"Power cycles reflect the number of times this host has been rebooted or the device has been woken up after sleep. A high number of power cycles does not affect the device's life expectancy.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_power_on_time":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"number",title:"Maximum Power On Time",colors:s.default[7],layout:{left:0,top:5,width:3,height:5}}],info:"Power-on time is the length of time the device is supplied with power.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_unsafe_shutdowns_count":{info:"The number of times a power outage occurred without a shutdown notification being sent. Depending on the NVMe device you are using, an unsafe shutdown can corrupt user data.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_critical_warnings_state":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"number",title:"Maximum No. of Critical / Warnings",colors:s.default[7],layout:{left:3,top:5,width:3,height:5}}],info:"

    Critical warnings for the status of the controller. Status active if set to 1.

    AvailableSpare - the available spare capacity is below the threshold. TempThreshold - the composite temperature is greater than or equal to an over temperature threshold or less than or equal to an under temperature threshold. NvmSubsystemReliability - the NVM subsystem reliability is degraded due to excessive media or internal errors. ReadOnly - media is placed in read-only mode. VolatileMemBackupFailed - the volatile memory backup device has failed. PersistentMemoryReadOnly - the Persistent Memory Region has become read-only or unreliable.

    ",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_media_errors_rate":{info:"The number of occurrences where the controller detected an unrecovered data integrity error. Errors such as uncorrectable ECC, CRC checksum failure, or LBA tag mismatch are included in this counter.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_error_log_entries_rate":{info:"The number of entries in the Error Information Log. By itself, an increase in the number of records is not an indicator of any failure condition.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_warning_composite_temperature_time":{info:"The time the device has been operating above the Warning Composite Temperature Threshold (WCTEMP) and below Critical Composite Temperature Threshold (CCTEMP).",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_critical_composite_temperature_time":{info:"The time the device has been operating above the Critical Composite Temperature Threshold (CCTEMP).",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_thermal_mgmt_temp1_transitions_rate":{info:"The number of times the controller has entered lower active power states or performed vendor-specific thermal management actions, minimizing performance impact, to attempt to lower the Composite Temperature due to the host-managed thermal management feature.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_thermal_mgmt_temp2_transitions_rate":{info:"The number of times the controller has entered lower active power states or performed vendor-specific thermal management actions, regardless of the impact on performance (e.g., heavy throttling), to attempt to lower the Combined Temperature due to the host-managed thermal management feature.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_thermal_mgmt_temp1_time":{info:"The amount of time the controller has entered lower active power states or performed vendor-specific thermal management actions, minimizing performance impact, to attempt to lower the Composite Temperature due to the host-managed thermal management feature.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"nvme.device_thermal_mgmt_temp2_time":{info:"The amount of time the controller has entered lower active power states or performed vendor-specific thermal management actions, regardless of the impact on performance (e.g., heavy throttling), to attempt to lower the Combined Temperature due to the host-managed thermal management feature.",en:{instance:{one:"NVMe disk",other:"NVMe disks"}},groupBy:["label"],groupByLabel:["device"]},"postfix.qemails":{info:"The qemails metric represents the number of emails currently in the queue in Postfix. This metric should be monitored to ensure that the queue is not growing too large, which can lead to delays in email delivery.",en:{instance:{one:"mail server",other:"mail servers"}}},"postfix.qsize":{info:"The qsize metric represents the total size of emails currently in the queue in Postfix. This metric should be monitored to ensure that the queue is not growing too large, which can lead to delays in email delivery.",en:{instance:{one:"mail server",other:"mail servers"}}},"postgres.connections_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",valueRange:[0,100],chartLibrary:"gauge",title:"Average Connections Utilization",colors:s.default[14],layout:{left:0,top:0,width:2.5,height:5}}],info:"

    A connection is an established line of communication between a client and the PostgreSQL server. Each connection adds to the load on the PostgreSQL server. To guard against running out of memory or overloading the database the max_connections parameter (default = 100) defines the maximum number of concurrent connections to the database server. A separate parameter, superuser_reserved_connections (default = 3), defines the quota for superuser connections (so that superusers can connect even if all other connection slots are blocked).


    Total connection utilization across all databases. Utilization is measured as a percentage of (max_connections - superuser_reserved_connections). If the utilization is 100% no more new connections will be accepted (superuser connections will still be accepted if superuser quota is available).

    ",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.connections_usage":{info:"

    Connections usage across all databases. The maximum number of concurrent connections to the database server is (max_connections - superuser_reserved_connections). As a general rule, if you need more than 200 connections it is advisable to use connection pooling.

    Available - new connections allowed. Used - connections currently in use.

    ",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.connections_state_count":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Connection State Count",layout:{left:9,top:0,width:2.5,height:5}}],en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.transactions_duration":{aggregationMethod:"avg",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.queries_duration":{aggregationMethod:"avg",mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top Queries by Duration",layout:{left:0,top:10,width:4.5,height:5}}],info:"Active queries duration histogram. The bins are specified as consecutive, non-overlapping intervals. The value is the number of observed active queries that fall into each interval.",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.checkpoints_rate":{info:'

    Number of checkpoints that have been performed. Checkpoints are periodic maintenance operations the database performs to make sure that everything it\'s been caching in memory has been synchronized with the disk. Ideally checkpoints should be time-driven (scheduled) as opposed to load-driven (requested).

    Scheduled - checkpoints triggered as per schedule when time elapsed from the previous checkpoint is greater than checkpoint_timeout. Requested - checkpoints triggered due to WAL updates reaching the max_wal_size before the checkpoint_timeout is reached.

    ',en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.checkpoints_time":{info:"

    Checkpoint timing information. An important indicator of how well checkpoint I/O is performing is the amount of time taken to sync files to disk.

    Write - amount of time spent writing files to disk during checkpoint processing. Sync - amount of time spent synchronizing files to disk during checkpoint processing.

    ",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.buffers_allocated_rate":{info:"Allocated and re-allocated buffers. If a backend process requests data it is either found in a block in shared buffer cache or the block has to be allocated (read from disk). The latter is counted as Allocated.",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.buffers_io_rate":{info:"

    Amount of data flushed from memory to disk.

    Checkpoint - buffers written during checkpoints. Backend - buffers written directly by a backend. It may happen that a dirty page is requested by a backend process. In this case the page is synced to disk before the page is returned to the client. BgWriter - buffers written by the background writer. PostgreSQL may clear pages with a low usage count in advance. The process scans for dirty pages with a low usage count so that they could be cleared if necessary. Buffers written by this process increment the counter.

    ",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.bgwriter_halts_rate":{info:'Number of times the background writer stopped a cleaning scan because it had written too many buffers (exceeding the value of bgwriter_lru_maxpages).',en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.buffers_backend_fsync_rate":{info:"Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write). Any values above zero can indicate problems with storage when fsync queue is completely filled.",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.wal_io_rate":{info:"Write-Ahead Logging (WAL) ensures data integrity by ensuring that changes to data files (where tables and indexes reside) are written only after log records describing the changes have been flushed to permanent storage.",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.wal_files_count":{info:"

    Number of WAL logs stored in the directory pg_wal under the data directory.

    Written - generated log segments files. Recycled - old log segment files that are no longer needed. Renamed to become future segments in the numbered sequence to avoid the need to create new ones.

    ",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.wal_archiving_files_count":{info:'

    WAL archiving.

    Ready - WAL files waiting to be archived. A non-zero value can indicate archive_command is in error, see Continuous Archiving and Point-in-Time Recovery. Done - WAL files successfully archived.',en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.autovacuum_workers_count":{info:'PostgreSQL databases require periodic maintenance known as vacuuming. For many installations, it is sufficient to let vacuuming be performed by the autovacuum daemon. For more information see The Autovacuum Daemon.',en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.txid_exhaustion_towards_autovacuum_perc":{aggregationMethod:"avg",info:'Percentage towards emergency autovacuum for one or more tables. A forced autovacuum will run once this value reaches 100%. For more information see Preventing Transaction ID Wraparound Failures.',en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.txid_exhaustion_perc":{aggregationMethod:"avg",info:'Percentage towards transaction wraparound. A transaction wraparound may occur when this value reaches 100%. For more information see Preventing Transaction ID Wraparound Failures.',en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.txid_exhaustion_oldest_txid_num":{info:'The oldest current transaction ID (XID). If for some reason autovacuum fails to clear old XIDs from a table, the system will begin to emit warning messages when the database\'s oldest XIDs reach eleven million transactions from the wraparound point. For more information see Preventing Transaction ID Wraparound Failures.',en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.uptime":{aggregationMethod:"min",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Uptime",layout:{left:12,top:0,width:2.5,height:1.66}}],info:"The time elapsed since the Postgres process was started.",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.replication_app_wal_lag_size":{info:"

    Replication WAL lag size.

    SentLag - sent over the network. WriteLag - written to disk. FlushLag - flushed to disk. ReplayLag - replayed into the database.

    ",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.replication_app_wal_lag_time":{info:"

    Replication WAL lag time.

    WriteLag - time elapsed between flushing recent WAL locally and receiving notification that the standby server has written it, but not yet flushed it or applied it. FlushLag - time elapsed between flushing recent WAL locally and receiving notification that the standby server has written and flushed it, but not yet applied it. ReplayLag - time elapsed between flushing recent WAL locally and receiving notification that the standby server has written, flushed and applied it.

    ",en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.replication_slot_files_count":{info:'

    Replication slot files. For more information see Replication Slots.

    WalKeep - WAL files retained by the replication slot. PgReplslotFiles - files present in pg_replslot.

    ',en:{instance:{one:"postgres server",other:"postgres servers"}}},"postgres.db_transactions_ratio":{aggregationMethod:"avg",info:"Percentage of committed/rollback transactions.",en:{instance:{one:"database",other:"databases"}}},"postgres.db_transactions_rate":{info:"

    Number of transactions that have been performed

    Committed - transactions that have been committed. All changes made by the committed transaction become visible to others and are guaranteed to be durable if a crash occurs. Rollback - transactions that have been rolled back. Rollback aborts the current transaction and causes all the updates made by the transaction to be discarded. Single queries that have failed outside the transactions are also accounted as rollbacks.

    ",en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_connections_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["label"],groupByLabel:["database"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Connections Utilization per Database",layout:{left:0,top:5,width:2.5,height:5}}],info:"Connection utilization per database. Utilization is measured as a percentage of CONNECTION LIMIT per database (if set) or max_connections (if CONNECTION LIMIT is not set).",en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_connections_count":{info:"Number of current connections per database.",en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_cache_io_ratio":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",valueRange:[0,100],chartLibrary:"gauge",title:"Average Cache Miss Ratio",colors:s.default[1],layout:{left:6,top:0,width:2.5,height:5}},{groupBy:["label"],groupByLabel:["database"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Cache Miss Ratio per DB",layout:{left:6,top:5,width:2.5,height:5}}],info:'PostgreSQL uses a shared buffer cache to store frequently accessed data in memory, and avoid slower disk reads. If you are seeing performance issues, consider increasing the shared_buffers size or tuning effective_cache_size.',en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_io_rate":{info:"

    Amount of data read from shared buffer cache or from disk.

    Disk - data read from disk. Memory - data read from buffer cache (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache).

    ",en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_ops_fetched_rows_ratio":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,title:"Avg Fetched Row Ratio",colors:s.default[10],layout:{left:3,top:0,width:2,height:5}},{groupBy:["label"],groupByLabel:["database"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Fetched Row Ratio per DB",layout:{left:3,top:5,width:2,height:5}}],info:"The percentage of rows that contain data needed to execute the query, out of the total number of rows scanned. A high value indicates that the database is executing queries efficiently, while a low value indicates that the database is performing extra work by scanning a large number of rows that aren't required to process the query. Low values may be caused by missing indexes or inefficient queries.",en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_ops_read_rows_rate":{info:"

    Read queries throughput.

    Returned - Total number of rows scanned by queries. This value indicates rows returned by the storage layer to be scanned, not rows returned to the client. Fetched - Subset of scanned rows (Returned) that contained data needed to execute the query.

    ",en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_ops_write_rows_rate":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Rows Written per Database",layout:{left:9,top:5,width:2.5,height:5}}],info:"

    Write queries throughput.

    Inserted - number of rows inserted by queries. Deleted - number of rows deleted by queries. Updated - number of rows updated by queries.

    ",en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_conflicts_rate":{info:'Number of queries canceled due to conflict with recovery on standby servers. To minimize query cancels caused by cleanup records consider configuring hot_standby_feedback.',en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_conflicts_reason_rate":{info:"

    Statistics about queries canceled due to various types of conflicts on standby servers.

    Tablespace - queries that have been canceled due to dropped tablespaces. Lock - queries that have been canceled due to lock timeouts. Snapshot - queries that have been canceled due to old snapshots. Bufferpin - queries that have been canceled due to pinned buffers. Deadlock - queries that have been canceled due to deadlocks.

    ",en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_deadlocks_rate":{info:"Number of detected deadlocks. When a transaction cannot acquire the requested lock within a certain amount of time (configured by deadlock_timeout), it begins deadlock detection.",en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_locks_held_count":{info:'Number of held locks. Some of these lock modes are acquired by PostgreSQL automatically before statement execution, while others are provided to be used by applications. All lock modes acquired in a transaction are held for the duration of the transaction. For lock modes details, see table-level locks.',en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_locks_awaited_count":{info:'Number of awaited locks. It indicates that some transaction is currently waiting to acquire a lock, which implies that some other transaction is holding a conflicting lock mode on the same lockable object. For lock modes details, see table-level locks.',en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_temp_files_created_rate":{info:"Number of temporary files created by queries. Complex queries may require more memory than is available (specified by work_mem). When this happens, Postgres reverts to using temporary files - they are actually stored on disk, but only exist for the duration of the request. After the request returns, the temporary files are deleted.",en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_temp_files_io_rate":{info:"Amount of data written temporarily to disk to execute queries.",en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.db_size":{mainheads:[{groupBy:["label"],groupByLabel:["database"],chartLibrary:"bars",title:"Database Sizes",dimensionsSort:"valueDesc",layout:{left:12,top:5,width:2.5,height:5}},{chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,title:"Total Database Size",layout:{left:12,top:3.33,width:2.5,height:1.66}}],info:"Actual on-disk usage of the database's data directory and any associated tablespaces.",en:{instance:{one:"database",other:"databases"}},groupBy:["label"],groupByLabel:["database"]},"postgres.databases_count":{mainheads:[{groupBy:["selected"],chartLibrary:"number",title:"Total Database count",layout:{left:12,top:1.66,width:2.5,height:1.66}}],info:"Count of databases per PostgreSQL server.",en:{instance:{one:"postgres server",other:"postgres servers"}}},"pgbouncer.client_connections_utilization":{aggregationMethod:"avg",info:"Client connections in use as percentage of max_client_conn (default 100)."},"pgbouncer.db_client_connections":{info:"

    Client connections in different states.

    Active - linked to server connection and can process queries. Waiting - have sent queries but have not yet got a server connection. CancelReq - have not forwarded query cancellations to the server yet.

    "},"pgbouncer.db_server_connections":{info:"

    Server connections in different states.

    Active - linked to a client. Idle - unused and immediately usable for client queries. Used - have been idle for more than server_check_delay, so they need server_check_query to run on them before they can be used again. Tested - currently running either server_reset_query or server_check_query. Login - currently in the process of logging in.

    "},"pgbouncer.db_server_connections_utilization":{aggregationMethod:"avg",info:"Server connections in use as percentage of max_db_connections (default 0 - unlimited). This considers the PgBouncer database that the client has connected to, not the PostgreSQL database of the outgoing connection."},"pgbouncer.db_clients_wait_time":{info:"Time spent by clients waiting for a server connection. This shows if the decrease in database performance from the client's point of view was due to exhaustion of the corresponding PgBouncer pool."},"pgbouncer.db_client_max_wait_time":{info:"Waiting time for the first (oldest) client in the queue. If this starts increasing, then the current pool of servers does not handle requests quickly enough."},"pgbouncer.db_transactions":{info:"SQL transactions pooled (proxied) by pgbouncer."},"pgbouncer.db_transactions_time":{info:"Time spent by pgbouncer when connected to PostgreSQL in a transaction, either idle in transaction or executing queries."},"pgbouncer.db_transaction_avg_time":{info:"Average transaction duration."},"pgbouncer.db_queries":{info:"SQL queries pooled (proxied) by pgbouncer."},"pgbouncer.db_queries_time":{info:"Time spent by pgbouncer when actively connected to PostgreSQL, executing queries."},"pgbouncer.db_query_avg_time":{info:"Average query duration."},"pgbouncer.db_network_io":{info:"

    Network traffic received and sent by pgbouncer.

    Received - received from clients. Sent - sent to servers.

    "},"postgres.table_rows_dead_ratio":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",valueRange:[0,100],chartLibrary:"gauge",title:"Average Dead Row Ratio",colors:s.default[10],layout:{left:12,top:10,width:2.5,height:5}}],info:"Percentage of dead rows. An increase in dead rows indicates a problem with VACUUM processes, which can slow down your queries.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_rows_count":{info:"

    Number of rows. When you do an UPDATE or DELETE, the row is not actually physically deleted. For a DELETE, the database simply marks the row as unavailable for future transactions, and for UPDATE, under the hood it is a combined INSERT then DELETE, where the previous version of the row is marked unavailable.

    Live - rows that currently in use and can be queried. Dead - deleted rows that will later be reused for new rows from INSERT or UPDATE.

    ",en:{instance:{one:"table",other:"tables"}}},"postgres.table_ops_rows_rate":{info:"Write queries throughput. If you see a large number of updated and deleted rows, keep an eye on the number of dead rows, as a high percentage of dead rows can slow down your queries.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_ops_rows_hot_ratio":{aggregationMethod:"avg",info:"Percentage of HOT (Heap Only Tuple) updated rows. HOT updates are much more efficient than ordinary updates: less write operations, less WAL writes, vacuum operation has less work to do, increased read efficiency (help to limit table and index bloat).",en:{instance:{one:"table",other:"tables"}}},"postgres.table_ops_rows_hot_rate":{info:"Number of HOT (Heap Only Tuple) updated rows.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_cache_io_ratio":{aggregationMethod:"avg",info:"Table cache inefficiency. Percentage of data read from disk. Lower is better.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_io_rate":{info:"

    Amount of data read from shared buffer cache or from disk.

    Disk - data read from disk. Memory - data read from buffer cache (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache).

    ",en:{instance:{one:"table",other:"tables"}}},"postgres.table_index_cache_io_ratio":{aggregationMethod:"avg",info:"Table indexes cache inefficiency. Percentage of data read from disk. Lower is better.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_index_io_rate":{info:"

    Amount of data read from all indexes from shared buffer cache or from disk.

    Disk - data read from disk. Memory - data read from buffer cache (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache).

    ",en:{instance:{one:"table",other:"tables"}}},"postgres.table_toast_cache_io_ratio":{aggregationMethod:"avg",info:"Table TOAST cache inefficiency. Percentage of data read from disk. Lower is better.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_toast_io_rate":{info:"

    Amount of data read from TOAST table from shared buffer cache or from disk.

    Disk - data read from disk. Memory - data read from buffer cache (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache).

    ",en:{instance:{one:"table",other:"tables"}}},"postgres.table_toast_index_cache_io_ratio":{aggregationMethod:"avg",info:"Table TOAST indexes cache inefficiency. Percentage of data read from disk. Lower is better.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_toast_index_io_rate":{info:"

    Amount of data read from this table's TOAST table indexes from shared buffer cache or from disk.

    Disk - data read from disk. Memory - data read from buffer cache (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache).

    ",en:{instance:{one:"table",other:"tables"}}},"postgres.table_scans_rate":{info:"

    Number of scans initiated on this table. If you see that your database regularly performs more sequential scans over time, you can improve its performance by creating an index on data that is frequently accessed.

    Index - relying on an index to point to the location of specific rows. Sequential - have to scan through each row of a table sequentially. Typically, take longer than index scans.

    ",en:{instance:{one:"table",other:"tables"}}},"postgres.table_scans_rows_rate":{info:"Number of live rows fetched by scans.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_autovacuum_since_time":{aggregationMethod:"min",info:"Time elapsed since this table was vacuumed by the autovacuum daemon.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_vacuum_since_time":{aggregationMethod:"min",info:"Time elapsed since this table was manually vacuumed (not counting VACUUM FULL).",en:{instance:{one:"table",other:"tables"}}},"postgres.table_autoanalyze_since_time":{aggregationMethod:"min",info:"Time elapsed this table was analyzed by the autovacuum daemon.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_analyze_since_time":{aggregationMethod:"min",info:"Time elapsed since this table was manually analyzed.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_null_columns":{info:"Number of table columns that contain only NULLs.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_total_size":{info:"Actual on-disk size of the table.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_bloat_size_perc":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",valueRange:[0,100],chartLibrary:"gauge",title:"Average Table Bloat %",colors:s.default[1],layout:{left:9,top:10,width:2.5,height:5}}],info:"Estimated percentage of bloat in the table. It is normal for tables that are updated frequently to have a small to moderate amount of bloat.",en:{instance:{one:"table",other:"tables"}}},"postgres.table_bloat_size":{info:'Disk space that was used by the table and is available for reuse by the database but has not been reclaimed. Bloated tables require more disk storage and additional I/O that can slow down query execution. Running VACUUM regularly on a table that is updated frequently results in fast reuse of space occupied by expired rows, which prevents the table from growing too large.',en:{instance:{one:"table",other:"tables"}}},"postgres.index_size":{info:"Actual on-disk size of the index.",en:{instance:{one:"index",other:"indexes"}}},"postgres.index_bloat_size_perc":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",valueRange:[0,100],chartLibrary:"gauge",title:"Average Index Bloat %",colors:s.default[1],layout:{left:9,top:10,width:2.5,height:5}}],info:"Estimated percentage of bloat in the index.",en:{instance:{one:"index",other:"indexes"}}},"postgres.index_bloat_size":{info:'Disk space that was used by the index and is available for reuse by the database but has not been reclaimed. Bloat slows down your database and eats up more storage than needed. To recover the space from indexes, recreate them using the REINDEX command.',en:{instance:{one:"index",other:"indexes"}}},"postgres.index_usage_status":{info:"An index is considered unused if no scans have been initiated on that index.",en:{instance:{one:"index",other:"indexes"}}},"puppet.jvm_heap":{info:"The jvm_heap metric represents the size of the Java Virtual Machine's heap memory in Puppet. This metric should be monitored to ensure that sufficient memory is allocated for Puppet to operate correctly."},"puppet.jvm_nonheap":{info:"The jvm_nonheap metric represents the size of the Java Virtual Machine's non-heap memory in Puppet. This metric should be monitored to ensure that sufficient memory is allocated for Puppet to operate correctly."},"puppet.cpu":{aggregationMethod:"avg",info:"The cpu metric represents the amount of CPU resources being used by Puppet. This metric should be monitored to ensure that the CPU is not being overutilized and to detect any potential performance issues."},"puppet.fd_open":{info:"The fd_open metric represents the number of open file descriptors in Puppet. This metric should be monitored to ensure that the system is not running out of available file descriptors, which can lead to performance issues."},"redis.ping_latency":{aggregationMethod:"avg",mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Ping Latency",layout:{left:3,top:0,width:2,height:5}}],info:"The average time it takes from a client request to the server response. High latency could be caused slow commands, over utilized network links or a high backlog in the command queue.",en:{instance:{one:"redis server",other:"redis servers"}}},"redis.commands":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Commands",layout:{left:0,top:0,width:2,height:5}}],info:"The Total number of commands processed per second. A significant or unexpected shift in this metric could indicate a problem worth investigating.",en:{instance:{one:"redis server",other:"redis servers"}}},"redis.keyspace_lookup_hit_rate":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",valueRange:[0,100],chartLibrary:"gauge",title:"Average Lookup Hit Rate",colors:s.default[0],layout:{left:6,top:0,width:2,height:5}}],info:"Lookup Hitrate = (Keyspace hits / (Keyspace hits + Keyspace misses))
    Lower hitrates lead to higher latency, in normal conditions this value should be greater than 80%.",en:{instance:{one:"redis server",other:"redis servers"}}},"redis.mem_fragmentation_ratio":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"max",valueRange:[0,100],chartLibrary:"gauge",title:"Max Memory Fragmentation Ratio",colors:s.default[1],layout:{left:9,top:0,width:2,height:5}}],info:"The ratio of memory allocated by the operating system to memory requested by Redis (used_memory_rss/used_memory).",en:{instance:{one:"redis server",other:"redis servers"}}},"redis.uptime":{aggregationMethod:"min",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Uptime",layout:{left:12,top:0,width:2,height:2.5}}],en:{instance:{one:"redis server",other:"redis servers"}}},"redis.clients":{mainheads:[{chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,title:"Total Connected Clients",layout:{left:12,top:2.5,width:2,height:2.5}}],info:"The current state of clients connecting to or attempting to connect to Redis.",en:{instance:{one:"redis server",other:"redis servers"}}},"redis.commands_calls":{info:"Total commands processed per second, by command type. Use this chart to identify most common commands being processed.",en:{instance:{one:"redis server",other:"redis servers"}}},"redis.master_last_io_since_time":{aggregationMethod:"min",info:"Time in seconds since the last interaction between replica and primary. A long time interval without communication could indicate a problem on the primary Redis server or on the replica or in the link between them.",en:{instance:{one:"redis server",other:"redis servers"}}},"redis.master_link_down_since_time":{aggregationMethod:"min",info:"Time in seconds since the link between replica and primary went down. This metric is only available when the connection between a primary and its replica has been lost. Any non zero value for this metric is cause for alert.",en:{instance:{one:"redis server",other:"redis servers"}}},"cassandra.client_requests_rate":{info:"Client requests received per second. Consider whether your workload is read-heavy or write-heavy while choosing a compaction strategy."},"cassandra.client_request_read_latency_histogram":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Read latency (99th percentile)",selectedDimensions:["p99"],colors:s.default[12],layout:{left:7.5,top:0,width:2.25,height:5}}],info:"Histogram for read latency, with bins for 50th, 75th, 90th, 95th, 98th, 99th and 99.9th percentile latency values.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.client_request_write_latency_histogram":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Write latency (99th percentile)",selectedDimensions:["p99"],layout:{left:9.75,top:0,width:2.25,height:5}}],info:"Histogram for write latency, with bins for 50th, 75th, 90th, 95th, 98th, 99th and 99.9th percentile latency values.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.client_requests_latency":{aggregationMethod:"avg",info:"Total response latency summed over all requests received per second. Latency could be impacted by disk access, network latency or replication configuration.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.key_cache_hit_ratio":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Key Cache Hit Ratio",valueRange:[0,100],colors:s.default[2],layout:{left:4.5,top:0,width:3,height:5}}],info:"Key cache hit ratio indicates the efficiency of the key cache. If ratio is consistently < 80% consider increasing cache size.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.key_cache_hit_rate":{aggregationMethod:"avg",info:"Key cache hit rate measures the cache hits and misses per second.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.storage_live_disk_space_used":{mainheads:[{groupBy:["selected"],chartLibrary:"number",title:"Total Live Disk Space Used",layout:{left:2.25,top:0,width:2.25,height:5}}],info:"Amount of live disk space used. This does not include obsolete data waiting to be garbage collected.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.compaction_completed_tasks_rate":{info:"Compaction tasks completed per second.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.compaction_pending_tasks_count":{info:"Total compaction tasks in queue.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.thread_pool_active_tasks_count":{info:"Total tasks currently being processed.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.thread_pool_pending_tasks_count":{info:"Total tasks in queue awaiting a thread for processing.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.thread_pool_blocked_tasks_rate":{info:"Tasks that cannot be queued for processing yet.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.thread_pool_blocked_tasks_count":{info:"Total tasks that cannot yet be queued for processing.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.jvm_gc_rate":{info:"Rate of garbage collections.

    ParNew - young-generation. cms (ConcurrentMarkSweep) - old-generation.

    ",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.jvm_gc_time":{aggregationMethod:"min",info:"Elapsed time of garbage collection.

    ParNew - young-generation. cms (ConcurrentMarkSweep) - old-generation.

    ",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.client_requests_timeouts_rate":{info:"Requests which were not acknowledged within the configurable timeout window.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.client_requests_unavailables_rate":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Unavailable exceptions",colors:s.default[1],layout:{left:0,top:0,width:2.25,height:5}}],info:"Requests for which the required number of nodes was unavailable.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.row_cache_hit_ratio":{aggregationMethod:"avg",info:"Row cache hit ratio indicates the efficiency of the row cache. If ratio is consistently < 80% consider increasing cache size.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.row_cache_hit_rate":{aggregationMethod:"avg",info:"Row cache hit rate measures the cache hits and misses per second.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.row_cache_utilization":{aggregationMethod:"avg",info:"The percentage of row cache memory currently in use. High utilization may indicate the need to increase cache size.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.row_cache_size":{info:"The size of the row cache in bytes. Adjusting this value can impact system performance.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.key_cache_utilization":{aggregationMethod:"avg",info:"The percentage of key cache memory currently in use. High utilization may indicate the need to increase cache size.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.key_cache_size":{info:"The size of the key cache in bytes. Adjusting this value can impact system performance.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.compaction_compacted_rate":{info:"The rate at which compactions are occurring in the system. If this rate is consistently high, it may indicate a need for more resources or a different compaction strategy.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.jvm_memory_used":{info:"The amount of memory used by the Cassandra JVM. High memory usage may indicate the need for additional resources or optimization.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.dropped_messages_rate":{info:"The rate at which messages are being dropped by the system. High rates may indicate a need for additional resources or a problem with the system.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"cassandra.client_requests_failures_rate":{info:"The rate at which client requests are failing. High rates may indicate a problem with the system or the need for additional resources.",en:{instance:{one:"cassandra server",other:"cassandra servers"}}},"clickhouse.connections":{info:"Active connections. TCP - connections to TCP server (clients with native interface), also included server-server distributed query connections. HTTP - connections to HTTP server. MyQSL - client connections using MySQL protocol. PostgreSQL - client connections using PostgreSQL protocol. InterServer - connections from other replicas to fetch parts."},"clickhouse.slow_reads":{info:"Number of reads from a file that were slow. This indicate system overload. Thresholds are controlled by read_backoff_* settings.",mainheads:[{groupBy:["selected"],aggregationMethod:"sum",chartLibrary:"number",title:"Slow Reads",valueRange:[0,null],colors:s.default[3],layout:{left:9,top:0,width:3,height:4}}]},"clickhouse.read_backoff":{info:"Number of times the number of query processing threads was lowered due to slow reads."},"clickhouse.memory_usage":{info:"Total amount of memory allocated by the server.",mainheads:[{groupBy:["selected"],aggregationMethod:"sum",chartLibrary:"gauge",title:"Memory Usage",valueRange:[0,null],colors:s.default[10],layout:{left:0,top:4,width:3,height:4}}]},"clickhouse.running_queries":{info:"Number of executing queries.",mainheads:[{groupBy:["selected"],aggregationMethod:"sum",chartLibrary:"number",title:"Running Queries",valueRange:[0,null],colors:s.default[0],layout:{left:0,top:0,width:3,height:4}}]},"clickhouse.queries":{info:"Successful - queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries.",mainheads:[{groupBy:["dimension"],aggregationMethod:"sum",chartLibrary:"bars",title:"Queries/second",valueRange:[0,null],colors:s.default[0],layout:{left:3,top:0,width:3,height:4}}]},"clickhouse.select_queries":{info:"Successful - SELECT queries to be interpreted and potentially executed. May include internal queries initiated by ClickHouse itself. Does not count subqueries."},"clickhouse.select_queries":{info:"Successful - INSERT queries to be interpreted and potentially executed. May include internal queries initiated by ClickHouse itself. Does not count subqueries."},"clickhouse.queries_preempted":{info:"Number of queries that are stopped and waiting due to 'priority' setting."},"clickhouse.queries_memory_limit_exceeded":{info:"Number of times when memory limit exceeded for query."},"clickhouse.queries_latency":{info:"Total time of all queries.",mainheads:[{groupBy:["selected"],aggregationMethod:"sum",chartLibrary:"number",title:"Queries Latency",valueRange:[0,null],colors:s.default[4],layout:{left:6,top:0,width:3,height:4}}]},"clickhouse.select_queries_latency":{info:"Total time of all SELECT queries."},"clickhouse.insert_queries_latency":{info:"Total time of all INSERT queries."},"clickhouse.io":{info:"Reads - number of bytes read from file descriptors. If the file is compressed, this will show the compressed data size. Writes - number of writes (write/pwrite) to a file descriptor. Does not include sockets.",mainheads:[{groupBy:["selected"],selectedDimensions:["reads"],chartLibrary:"easypiechart",title:"Read IO",valueRange:[0,null],colors:s.default[2],layout:{left:3,top:4,width:3,height:4}},{groupBy:["selected"],selectedDimensions:["writes"],chartLibrary:"easypiechart",title:"Write IO",valueRange:[0,null],colors:s.default[1],layout:{left:6,top:4,width:3,height:4}}]},"clickhouse.iops":{info:"Reads - number of reads (read/pread) from a file descriptor. Does not include sockets. Writes - number of writes (write/pwrite) to a file descriptor. Does not include sockets."},"clickhouse.io_errors":{info:"Read - number of times the read (read/pread) from a file descriptor have failed. Write - number of times the write (write/pwrite) to a file descriptor have failed.",mainheads:[{groupBy:["selected"],aggregationMethod:"sum",chartLibrary:"gauge",title:"IO Errors",valueRange:[0,null],colors:s.default[1],layout:{left:9,top:4,width:3,height:4}}]},"clickhouse.io_seeks":{info:"Number of times the 'lseek' function was called."},"clickhouse.io_file_opens":{info:"Number of files opened."},"clickhouse.replicated_parts_current_activity":{info:"Fetch - number of data parts being fetched from replica. Send - number of data parts being sent to replicas. Check - number of data parts checking for consistency."},"clickhouse.replicated_readonly_tables":{info:"Number of replicas skipped during INSERT into Distributed table due to replicas being read-only."},"clickhouse.replicated_data_loss":{info:"Number of times a data part that we wanted doesn't exist on any replica (even on replicas that are offline right now). That data parts are definitely lost. This is normal due to asynchronous replication (if quorum inserts were not enabled), when the replica on which the data part was written was failed and when it became online after fail it doesn't contain that data part."},"clickhouse.replicated_part_fetches":{info:"Successful - number of times a data part was downloaded from replica of a ReplicatedMergeTree table. Failed - number of times a data part was failed to download from replica of a ReplicatedMergeTree table."},"clickhouse.replicated_part_fetches_of_merged":{info:"Number of times data parts of ReplicatedMergeTree tables were successfully merged."},"clickhouse.replicated_part_merges":{info:"Number of times data parts of ReplicatedMergeTree tables were successfully merged."},"clickhouse.inserted_bytes":{info:"Number of bytes (uncompressed; for columns as they stored in memory) INSERTed to all tables."},"clickhouse.inserted_rows":{info:"Number of rows INSERTed to all tables."},"clickhouse.rejected_inserts":{info:"Number of times the INSERT of a block to a MergeTree table was rejected with 'Too many parts' exception due to high number of active data parts for partition."},"clickhouse.delayed_inserts":{info:"Number of times the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition."},"clickhouse.delayed_inserts_throttle_time":{info:"Total number of milliseconds spent while the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition."},"clickhouse.selected_bytes":{info:"Number of bytes (uncompressed; for columns as they stored in memory) SELECTed from all tables."},"clickhouse.selected_rows":{info:"Number of rows SELECTed from all tables."},"clickhouse.selected_parts":{info:"Number of data parts selected to read from a MergeTree table."},"clickhouse.selected_ranges":{info:"Number of (non-adjacent) ranges in all data parts selected to read from a MergeTree table."},"clickhouse.selected_marks":{info:"Number of marks (index granules) selected to read from a MergeTree table."},"clickhouse.merges":{info:"Number of launched background merges."},"clickhouse.merges_latency":{info:"Total time spent for background merges."},"clickhouse.merged_uncompressed_bytes":{info:"Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge."},"clickhouse.merged_rows":{info:"Rows read for background merges. This is the number of rows before merge."},"clickhouse.merge_tree_data_writer_inserted_rows":{info:"Uncompressed bytes (for columns as they stored in memory) INSERTed to MergeTree tables."},"clickhouse.merge_tree_data_writer_compressed_bytes":{info:"Bytes written to filesystem for data INSERTed to MergeTree tables."},"clickhouse.uncompressed_cache_requests":{info:"Hits - number of times a block of data has been found in the uncompressed cache (and decompression was avoided). Misses - number of times a block of data has not been found in the uncompressed cache (and required decompression)."},"clickhouse.mark_cache_requests":{info:"Hits - number of times an entry has been found in the mark cache, so we didn't have to load a mark file. Misses - number of times an entry has not been found in the mark cache, so we had to load a mark file in memory, which is a costly operation, adding to query latency."},"clickhouse.parts_count":{info:"Temporary - the part is generating now, it is not in data_parts list. PreActive - the part is in data_parts, but not used for SELECTs. Active - active data part, used by current and upcoming SELECTs. Deleting - not active data part with identity refcounter, it is deleting right now by a cleaner. DeleteOnDestroy - the part was moved to another disk and should be deleted in own destructor. Outdated - not active data part, but could be used by only current SELECTs, could be deleted after SELECTs finishes. Wide - wide parts. Compact - compact parts.",mainheads:[{groupBy:["dimension"],aggregationMethod:"sum",chartLibrary:"bars",title:"Parts",dimensionsSort:"valueDesc",valueRange:[0,null],layout:{left:6,top:8,width:3,height:4}}]},"clickhouse.distributed_connections":{info:"Number of connections to remote servers sending data that was INSERTed into Distributed tables. Both synchronous and asynchronous mode."},"clickhouse.distributed_connections_attempts":{info:"Total count of distributed connection attempts."},"clickhouse.distributed_connections_fail_retries":{info:"Total count when distributed connection fails with retry."},"clickhouse.distributed_connections_fail_exhausted_retries":{info:"Total count when distributed connection fails after all retries finished."},"clickhouse.distributed_files_to_insert":{info:"Number of pending files to process for asynchronous insertion into Distributed tables. Number of files for every shard is summed."},"clickhouse.distributed_rejected_inserts":{info:"Number of times the INSERT of a block to a Distributed table was rejected with 'Too many bytes' exception due to high number of pending bytes."},"clickhouse.distributed_delayed_inserts":{info:"Number of times the INSERT of a block to a Distributed table was throttled due to high number of pending bytes."},"clickhouse.distributed_delayed_inserts_latency":{info:"Total number of milliseconds spent while the INSERT of a block to a Distributed table was throttled due to high number of pending bytes."},"clickhouse.distributed_sync_insertion_timeout_exceeded":{info:"A timeout has exceeded while waiting for shards during synchronous insertion into a Distributed table (with 'distributed_foreground_insert' = 1)."},"clickhouse.distributed_async_insertions_failures":{info:"Number of failures for asynchronous insertion into a Distributed table (with 'distributed_foreground_insert' = 0)."},"clickhouse.uptime":{info:"The server uptime. It includes the time spent for server initialization before accepting connections.",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Uptime",layout:{left:9,top:8,width:3,height:4}}]},"clickhouse.disk_space_usage":{mainheads:[{groupBy:["label"],groupByLabel:["disk_name"],aggregationMethod:"sum",chartLibrary:"bars",title:"Free Disk Space",dimensionsSort:"valueAsc",valueRange:[0,null],layout:{left:0,top:8,width:3,height:4}}],en:{instance:{one:"disk",other:"disks"}}},"clickhouse.database_table_size":{en:{instance:{one:"table",other:"tables"}},mainheads:[{groupBy:["label"],groupByLabel:["database"],aggregationMethod:"sum",chartLibrary:"bars",title:"Database Size",dimensionsSort:"valueDesc",valueRange:[0,null],layout:{left:3,top:8,width:3,height:4}}]},"clickhouse.database_table_parts":{en:{instance:{one:"table",other:"tables"}}},"clickhouse.database_table_rows":{en:{instance:{one:"table",other:"tables"}}},"coredns.dns_request_count_total":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total DNS Requests",colors:s.default[12],layout:{left:0,top:0,width:2.5,height:5}}],info:"The total number of DNS requests handled by CoreDNS. This can be useful for understanding overall system load and potential bottlenecks.",en:{instance:{one:"coredns server",other:"coredns servers"}}},"coredns.dns_responses_count_total":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total DNS Responses",colors:s.default[2],layout:{left:6,top:0,width:2.5,height:5}}],info:"The total number of DNS responses sent by CoreDNS. This can be useful for understanding overall system load and potential bottlenecks.",en:{instance:{one:"coredns server",other:"coredns servers"}}},"coredns.dns_no_matching_zone_dropped_total":{info:"The total number of DNS requests dropped by CoreDNS because no matching zone was found. This can be useful for identifying potential configuration issues.",en:{instance:{one:"coredns server",other:"coredns servers"}}},"coredns.dns_panic_count_total":{info:"The total number of panics that occurred in CoreDNS. This can be useful for identifying potential issues or bugs in the system.",en:{instance:{one:"coredns server",other:"coredns servers"}}},"activemq.messages":{info:"The total number of messages in the broker. This can be useful for understanding overall system throughput.",en:{instance:{one:"broker",other:"brokers"}}},"activemq.unprocessed_messages":{info:"The total number of messages that have not been processed. If this number consistently increases, it may indicate a problem with consumer performance or a bottleneck in the system.",en:{instance:{one:"broker",other:"brokers"}}},"activemq.consumers":{info:"The number of active consumers connected to the broker. This can be useful for understanding overall system load and potential bottlenecks.",en:{instance:{one:"broker",other:"brokers"}}},"apache.connections":{colors:s.default[4],mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",valueRange:[0,null],title:"Total Apache Connections",colors:s.default[4]}],info:"The total number of connections to the Apache web server. This can be useful for understanding overall system load and potential bottlenecks.",en:{instance:{one:"web server",other:"web servers"}}},"apache.requests":{colors:s.default[0],mainheads:[{showPostAggregations:!0,postGroupBy:["selected"],chartLibrary:"easypiechart",valueRange:[0,null],title:"Total Apache Requests",colors:s.default[0]}],info:"The number of requests processed by the Apache web server. This can be useful for understanding overall system throughput and potential performance issues.",en:{instance:{one:"web server",other:"web servers"}}},"apache.net":{colors:s.default[3],mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Apache Bandwidth",valueRange:[0,null],colors:s.default[3]}],info:"The number of bytes transferred over the network by the Apache web server. This can be useful for understanding overall network usage and potential performance issues.",en:{instance:{one:"web server",other:"web servers"}}},"apache.workers":{mainheads:[{groupBy:["percentage-of-instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"max",selectedDimensions:["busy"],chartLibrary:"gauge",title:"Maximum Apache Workers Utilization",valueRange:[0,100],colors:s.default[5]}],info:"The number of worker processes used by the Apache web server. This can be useful for understanding overall system performance and potential bottlenecks.",en:{instance:{one:"web server",other:"web servers"}}},"apache.bytesperreq":{colors:s.default[3],info:"The number of bytes transferred per second over the network by the Apache web server. This can be useful for understanding overall network performance and potential bottlenecks.",en:{instance:{one:"web server",other:"web servers"}}},"apache.reqpersec":{colors:s.default[4],info:"The number of requests processed per second by the Apache web server. This can be useful for understanding overall system performance and potential bottlenecks.",en:{instance:{one:"web server",other:"web servers"}}},"apache.bytespersec":{colors:s.default[6],info:"The number of bytes transferred per request by the Apache web server. This can be useful for understanding the efficiency of the server and identifying potential performance issues.",en:{instance:{one:"web server",other:"web servers"}}},"apache.uptime":{aggregationMethod:"min",info:"The amount of time that the Apache web server has been running. This can be useful for understanding the overall health and stability of the server.",en:{instance:{one:"web server",other:"web servers"}}},"lighttpd.connections":{colors:s.default[4],mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Lighttpd Connections",colors:s.default[4]}],en:{instance:{one:"web server",other:"web servers"}}},"lighttpd.requests":{colors:s.default[0],mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Lighttpd Requests",colors:s.default[0]}],en:{instance:{one:"web server",other:"web servers"}}},"lighttpd.net":{colors:s.default[3],mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Lighttpd Bandwidth",colors:s.default[3]}],en:{instance:{one:"web server",other:"web servers"}}},"lighttpd.workers":{mainheads:[{groupBy:["percentage-of-instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"max",selectedDimensions:["busy"],chartLibrary:"gauge",title:"Maximum Lighttpd Workers Utilization",colors:s.default[5]}],en:{instance:{one:"web server",other:"web servers"}}},"lighttpd.bytesperreq":{colors:s.default[3],en:{instance:{one:"web server",other:"web servers"}}},"lighttpd.reqpersec":{colors:s.default[4],en:{instance:{one:"web server",other:"web servers"}}},"lighttpd.bytespersec":{colors:s.default[6],en:{instance:{one:"web server",other:"web servers"}}},"lighttpd.uptime":{aggregationMethod:"min",info:"The uptime of the lighttpd server. Monitoring this metric can help identify if the server has been restarted or is experiencing issues with uptime.",en:{instance:{one:"web server",other:"web servers"}}},"lighttpd2.requests":{info:"The total number of requests received by the lighttpd server. Monitoring this metric can help identify the usage patterns of the server and potential performance issues.",en:{instance:{one:"web server",other:"web servers"}}},"lighttpd2.status_codes":{info:"The distribution of HTTP response codes returned by the lighttpd server. Monitoring this metric can help identify if there are issues with the server or the components interacting with it.",en:{instance:{one:"web server",other:"web servers"}}},"lighttpd2.traffic":{info:"The amount of traffic handled by the lighttpd server. Monitoring this metric can help identify the usage patterns of the server and potential performance issues.",en:{instance:{one:"web server",other:"web servers"}}},"lighttpd2.connections":{info:"The number of active connections to the lighttpd server. Monitoring this metric can help identify the usage patterns of the server and potential performance issues.",en:{instance:{one:"web server",other:"web servers"}}},"lighttpd2.connection_states":{info:"The distribution of connection states for the lighttpd server. Monitoring this metric can help identify if there are issues with the server or the components interacting with it.",en:{instance:{one:"web server",other:"web servers"}}},"lighttpd2.memory_usage":{info:"The memory usage of the lighttpd server. Monitoring this metric can help identify if the server is experiencing memory usage issues that may affect its performance.",en:{instance:{one:"web server",other:"web servers"}}},"lighttpd2.uptime":{info:"The uptime of the lighttpd server. Monitoring this metric can help identify if the server has been restarted or is experiencing issues with uptime.",en:{instance:{one:"web server",other:"web servers"}}},"logstash.jvm_threads":{info:"The number of threads currently being used by the Logstash JVM. Monitoring this metric can help identify if the JVM is experiencing threading issues that may affect its performance."},"logstash.jvm_mem_heap_used":{aggregationMethod:"avg",info:"The amount of memory currently being used by the Logstash JVM's heap. Monitoring this metric can help identify if the JVM is experiencing memory usage issues that may affect its performance."},"logstash.jvm_mem_pools_eden":{info:"The amount of memory currently being used by the Logstash JVM's Eden memory pool. Monitoring this metric can help identify if the JVM is experiencing memory usage issues that may affect its performance."},"logstash.jvm_mem_pools_survivor":{info:"The amount of memory currently being used by the Logstash JVM's Survivor memory pool. Monitoring this metric can help identify if the JVM is experiencing memory usage issues that may affect its performance."},"logstash.jvm_mem_pools_old":{info:"The amount of memory currently being used by the Logstash JVM's Old memory pool. Monitoring this metric can help identify if the JVM is experiencing memory usage issues that may affect its performance."},"logstash.jvm_gc_collector_count":{info:"The number of garbage collection operations performed by the Logstash JVM. Monitoring this metric can help identify if the JVM is experiencing performance issues related to garbage collection."},"logstash.jvm_gc_collector_time":{aggregationMethod:"min",info:"The total time spent on garbage collection operations by the Logstash JVM. Monitoring this metric can help identify if the JVM is experiencing performance issues related to garbage collection."},"logstash.open_file_descriptors":{info:"The number of open file descriptors used by Logstash. Monitoring this metric can help identify if Logstash is experiencing file descriptor usage issues that may affect its performance."},"logstash.event":{info:"The number of events processed by Logstash. Monitoring this metric can help identify the usage patterns of Logstash and potential performance issues."},"logstash.event_duration":{info:"The latencies of events processed by Logstash. High latencies may indicate performance issues with Logstash or the components interacting with it."},"logstash.uptime":{aggregationMethod:"min",info:"The uptime of the Logstash server. Monitoring this metric can help identify if the server has been restarted or is experiencing issues with uptime."},"logstash.pipeline_event":{info:"The number of events processed by the specified Logstash pipeline. Monitoring this metric can help identify the usage patterns of the pipeline and potential performance issues."},"mongodb.operations":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Operations",colors:s.default[0],layout:{left:0,top:0,width:3,height:5}}],info:"The total number of operations performed by the MongoDB server. Monitoring this metric can help identify the usage patterns of the server and potential performance issues.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.operations_latency":{aggregationMethod:"avg",info:"The latencies of operations performed by the MongoDB server. High latencies may indicate performance issues with the server or the components interacting with it.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.connections":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Connections",colors:s.default[0],layout:{left:3,top:0,width:3,height:5}}],info:"The total number of connections to the MongoDB server. Monitoring this metric can help identify the usage patterns of the server and potential performance issues.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.connections_rate":{info:"The rate of connections to the MongoDB server. Monitoring this metric can help identify the usage patterns of the server and potential performance issues.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.connections_state":{info:"The distribution of connection states for the MongoDB server. Monitoring this metric can help identify if there are issues with the server or the components interacting with it.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.network_io":{info:"The amount of network IO performed by the MongoDB server. Monitoring this metric can help identify the usage patterns of the server and potential performance issues.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.network_requests":{info:"The number of requests to the MongoDB server. Monitoring this metric can help identify the usage patterns of the server and potential performance issues.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.page_faults":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Page Faults",colors:s.default[0],layout:{left:9,top:0,width:3,height:5}}],info:"The number of page faults encountered by the MongoDB server. Monitoring this metric can help identify if the server is experiencing memory usage issues that may affect its performance.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.tcmalloc_generic":{info:"The usage of the TCMalloc generic allocator by the MongoDB server. Monitoring this metric can help identify if the server is experiencing memory usage issues that may affect its performance.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.tcmalloc":{info:"The usage of the TCMalloc allocator by the MongoDB server. Monitoring this metric can help identify if the server is experiencing memory usage issues that may affect its performance.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.asserts":{info:"The number of asserts encountered by the MongoDB server. Monitoring this metric can help identify if the server is encountering issues that may affect its performance.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.current_transactions":{info:"The number of current transactions on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to transactions.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.shard_commit_types":{info:"The distribution of commit types for sharded collections on the MongoDB server. Monitoring this metric can help identify if there are issues with sharding on the server.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.active_clients":{mainheads:[{groupBy:["selected"],chartLibrary:"number",title:"Total Active Clients",colors:s.default[0],layout:{left:0,top:5,width:3,height:5}}],info:"The number of active clients connected to the MongoDB server. Monitoring this metric can help identify the usage patterns of the server and potential performance issues.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.queued_operations":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Queued Operations",colors:s.default[0],layout:{left:3,top:5,width:3,height:5}}],info:"The number of operations that are currently queued on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to queued operations.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.locks":{info:"The distribution of locks held by the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to locking.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.flow_control_timings":{info:"The timings of flow control events on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to flow control.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.wiredtiger_blocks":{info:"The number of blocks currently held in the WiredTiger cache on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to the WiredTiger cache.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.wiredtiger_cache":{info:"The usage of the WiredTiger cache on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to the WiredTiger cache.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.wiredtiger_capacity":{info:"The capacity of the WiredTiger cache on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to the WiredTiger cache.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.wiredtiger_connection":{info:"The number of connections currently open in the WiredTiger storage engine on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to connections.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.wiredtiger_cursor":{info:"The number of cursors currently open in the WiredTiger storage engine on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to cursors.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.wiredtiger_lock":{info:"The number of locks currently held in the WiredTiger storage engine on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to locking.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.wiredtiger_lock_duration":{info:"The duration of locks held in the WiredTiger storage engine on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to locking.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.wiredtiger_log_ops":{info:"The number of operations written to the WiredTiger log on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to logging.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.wiredtiger_transactions":{info:"The number of transactions currently open in the WiredTiger storage engine on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to transactions.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.database_collections":{info:"The number of collections in the specified database on the MongoDB server. Monitoring this metric can help identify the usage patterns of the database and potential performance issues.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.database_indexes":{info:"The number of indexes in the specified database on the MongoDB server. Monitoring this metric can help identify the usage patterns of the database and potential performance issues.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.database_views":{info:"The number of views in the specified database on the MongoDB server. Monitoring this metric can help identify the usage patterns of the database and potential performance issues.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.database_documents":{info:"The number of documents in the specified database on the MongoDB server. Monitoring this metric can help identify the usage patterns of the database and potential performance issues.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.database_storage_size":{info:"The storage size of the specified database on the MongoDB server. Monitoring this metric can help identify if the database is using an appropriate amount of storage.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.replication_lag":{info:"The amount of lag in replication on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to replication.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.replication_heartbeat_latency":{aggregationMethod:"avg",info:"The latencies of replication heartbeats on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to replication.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.replication_node_ping":{aggregationMethod:"avg",info:"The latencies of pings to replication nodes on the MongoDB server. Monitoring this metric can help identify if the server is experiencing performance issues related to replication.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.shard_nodes_count":{info:"The number of nodes in the specified shard on the MongoDB server. Monitoring this metric can help identify the usage patterns of the shard and potential performance issues.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.shard_databases_status":{info:"The status of the databases in the specified shard on the MongoDB server. Monitoring this metric can help identify if there are issues with the databases in the shard.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"mongodb.chunks":{info:"The number of chunks in the specified shard on the MongoDB server. Monitoring this metric can help identify the usage patterns of the shard and potential performance issues.",en:{instance:{one:"mongodb server",other:"mongodb servers"}}},"nginx.connections":{colors:s.default[4],mainheads:[{groupBy:["selected"],valueRange:[0,null],chartLibrary:"gauge",title:"Total Connections",colors:s.default[4],layout:{left:0,top:0,width:3,height:5}}],en:{instance:{one:"web server",other:"web servers"}}},"nginx.requests":{colors:s.default[0],mainheads:[{groupBy:["selected"],valueRange:[0,null],chartLibrary:"gauge",title:"Total Requests",colors:s.default[0],layout:{left:3,top:0,width:3,height:5}}],en:{instance:{one:"web server",other:"web servers"}}},"nginx.connections_status":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Connection Status",layout:{left:6,top:0,width:3,height:5}}],info:"The current status of connections on the nginx server. Monitoring this metric can help identify potential performance issues related to connections.",en:{instance:{one:"web server",other:"web servers"}}},"nginx.connections_accepted_handled":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Connections Handled",layout:{left:9,top:0,width:3,height:5}}],info:"The number of accepted and handled connections on the nginx server. Monitoring this metric can help identify potential performance issues related to connections.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.client_connections_rate":{mainheads:[{groupBy:["selected"],valueRange:[0,null],chartLibrary:"gauge",title:"Total Client Connections Rate",colors:s.default[4],layout:{left:0,top:0,width:3,height:5}}],info:"Accepted and dropped (not handled) connections. A connection is considered dropped if the worker process is unable to get a connection for the request by establishing a new connection or reusing an open one.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.client_connections_count":{mainheads:[{valueRange:[0,null],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Client Connections per State",layout:{left:3,top:0,width:3,height:2.5}}],info:"The current number of client connections. A connection is considered idle if there are currently no active requests.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.ssl_handshakes_rate":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total SSL Handshake Rate",colors:s.default[4],layout:{left:3,top:2.5,width:3,height:2.5}}],info:"Successful and failed SSL handshakes.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.ssl_session_reuses_rate":{info:"The number of session reuses during SSL handshake.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.ssl_handshakes_failures_rate":{mainheads:[{groupBy:["selected"],valueRange:[0,null],chartLibrary:"easypiechart",title:"SSL Handshake Failures",colors:s.default[4],layout:{left:6,top:0,width:3,height:5}}],info:"

    SSL handshake failures.

    NoCommonProtocol - failed because of no common protocol. NoCommonCipher - failed because of no shared cipher. Timeout - failed because of a timeout. PeerRejectedCert - failed because a client rejected the certificate.

    ",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.ssl_verification_errors_rate":{info:'

    SSL verification errors.

    NoCert - a client did not provide the required certificate. ExpiredCert - an expired or not yet valid certificate was presented by a client. RevokedCert - a revoked certificate was presented by a client. HostnameMismatch - server"s certificate does not match the hostname. Other - other SSL certificate verification errors.

    ',en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_requests_rate":{mainheads:[{groupBy:["selected"],valueRange:[0,null],chartLibrary:"gauge",title:"Total HTTP Requests Rate",colors:s.default[4],layout:{left:9,top:0,width:3,height:5}}],info:"The number of HTTP requests received from clients.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_requests_count":{mainheads:[{chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,title:"Total Requests",layout:{left:0,top:5,width:3,height:2.5}}],info:"The current number of client requests.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.uptime":{aggregationMethod:"min",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Uptime",layout:{left:0,top:7.5,width:3,height:2.5}}],info:"The time elapsed since the NGINX process was started.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_server_zone_requests_rate":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total HTTP Server Zone Requests Rate",layout:{left:3,top:5,width:3,height:5}}],info:"The number of requests to the HTTP Server Zone.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_server_zone_responses_per_code_class_rate":{info:"The number of responses from the HTTP Server Zone. Responses are grouped by HTTP status code class.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_server_zone_traffic_rate":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total HTTP Server Zone Traffic Rate",layout:{left:6,top:5,width:3,height:5}}],info:"The amount of data transferred to and from the HTTP Server Zone.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_server_zone_requests_processing_count":{info:"The number of client requests that are currently being processed by the HTTP Server Zone.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_server_zone_requests_discarded_rate":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total HTTP Server Zone Discarded Requests Rate",layout:{left:9,top:5,width:3,height:5}}],info:"The number of requests to the HTTP Server Zone completed without sending a response.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_location_zone_requests_rate":{info:"The number of requests to the HTTP Location Zone.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_location_zone_responses_per_code_class_rate":{info:"The number of responses from the HTTP Location Zone. Responses are grouped by HTTP status code class.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_location_zone_traffic_rate":{info:"The amount of data transferred to and from the HTTP Location Zone.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_location_zone_requests_discarded_rate":{info:"The number of requests to the HTTP Location Zone completed without sending a response.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_upstream_peers_count":{info:"The number of HTTP Upstream servers.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_upstream_zombies_count":{info:"The current number of HTTP Upstream servers removed from the group but still processing active client requests.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_upstream_keepalive_count":{info:"The current number of idle keepalive connections to the HTTP Upstream.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_upstream_server_requests_rate":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total HTTP Upstream Request Rate",layout:{left:0,top:10,width:3,height:5}}],info:"The number of client requests forwarded to the HTTP Upstream Server.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_upstream_server_responses_per_code_class_rate":{info:"The number of responses received from the HTTP Upstream Server. Responses are grouped by HTTP status code class.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_upstream_server_response_time":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average HTTP Upstream Response Time",layout:{left:3,top:10,width:3,height:5}}],info:"The average time to get a complete response from the HTTP Upstream Server.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_upstream_server_response_header_time":{aggregationMethod:"avg",info:"The average time to get a response header from the HTTP Upstream Server.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_upstream_server_traffic_rate":{info:"The amount of traffic transferred to and from the HTTP Upstream Server.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_upstream_server_state":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"HTTP Upstream Server State",layout:{left:6,top:10,width:3,height:5}}],info:"The current state of the HTTP Upstream Server. Status is active if set to 1.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_upstream_server_connections_count":{info:"The current number of active connections to the HTTP Upstream Server.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_upstream_server_downtime":{info:"The time the HTTP Upstream Server has spent in the unavail, checking, and unhealthy states.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_cache_state":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total HTTP Cache State",layout:{left:9,top:10,width:3,height:5}}],info:"HTTP cache current state. Cold means that the cache loader process is still loading data from disk into the cache.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_cache_iops":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total HTTP Cache IOPS",layout:{left:9,top:15,width:3,height:5}}],info:"

    HTTP cache IOPS.

    Served - valid, expired, and revalidated responses read from the cache. Written - miss, expired, and bypassed responses written to the cache. Bypassed - miss, expired, and bypass responses.

    ",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_cache_io":{info:"

    HTTP cache IO.

    Served - valid, expired, and revalidated responses read from the cache. Written - miss, expired, and bypassed responses written to the cache. Bypassed - miss, expired, and bypass responses.

    ",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.http_cache_size":{info:"The current size of the cache.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.stream_server_zone_connections_rate":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Stream Server Zone Connections Rate",layout:{left:0,top:15,width:3,height:5}}],info:"The number of accepted connections to the Stream Server Zone.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.stream_server_zone_sessions_per_code_class_rate":{info:"The number of completed sessions for the Stream Server Zone. Sessions grouped by status code class.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.stream_server_zone_traffic_rate":{info:"The amount of data transferred to and from the Stream Server Zone.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.stream_server_zone_connections_processing_count":{info:"The number of client connections to the Stream Server Zone that are currently being processed.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.stream_server_zone_connections_discarded_rate":{info:"The number of connections to the Stream Server Zone completed without creating a session.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.stream_upstream_peers_count":{info:"The number of Stream Upstream servers.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.stream_upstream_zombies_count":{info:"The current number of HTTP Upstream servers removed from the group but still processing active client connections.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.stream_upstream_server_connections_rate":{info:"The number of connections forwarded to the Stream Upstream Server.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.stream_upstream_server_traffic_rate":{info:"The amount of traffic transferred to and from the Stream Upstream Server.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.stream_upstream_server_state":{info:"The current state of the Stream Upstream Server. Status is active if set to 1.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.stream_upstream_server_downtime":{info:"The time the Stream Upstream Server has spent in the unavail, checking, and unhealthy states.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.stream_upstream_server_connections_count":{info:"The current number of connections to the Stream Upstream Server.",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.resolver_zone_requests_rate":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Resolver Zone Request Rate",colors:s.default[4],layout:{left:3,top:15,width:3,height:5}}],info:"

    Resolver zone DNS requests.

    Name - requests to resolve names to addresses. Srv - requests to resolve SRV records. Addr - requests to resolve addresses to names.

    ",en:{instance:{one:"web server",other:"web servers"}}},"nginxplus.resolver_zone_responses_rate":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Resolver Zone Responses Rate",colors:s.default[4],layout:{left:6,top:15,width:3,height:5}}],info:"

    Resolver zone DNS responses.

    NoError - successful responses. FormErr - format error responses. ServFail - server failure responses. NXDomain - host not found responses. NotImp - unimplemented responses. Refused - operation refused responses. TimedOut - timed out requests. Unknown - requests completed with an unknown error.

    ",en:{instance:{one:"web server",other:"web servers"}}},"nginxvts.requests_total":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Requests",layout:{left:0,top:0,width:3,height:5}}],info:"The total number of requests on the nginx server. Monitoring this metric can help identify potential performance issues related to requests.",en:{instance:{one:"web server",other:"web servers"}}},"nginxvts.active_connections":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Active Connections",layout:{left:6,top:0,width:3,height:5}}],info:"The number of active connections on the nginx server. Monitoring this metric can help identify potential performance issues related to connections.",en:{instance:{one:"web server",other:"web servers"}}},"nginxvts.uptime":{aggregationMethod:"min",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Uptime",layout:{left:9,top:0,width:3,height:5}}],info:"The uptime of the nginx server. Monitoring this metric can help identify potential performance issues related to server uptime.",en:{instance:{one:"web server",other:"web servers"}}},"nginxvts.shm_usage":{info:"The usage of the shared memory on the nginx server. Monitoring this metric can help identify potential performance issues related to shared memory usage.",en:{instance:{one:"web server",other:"web servers"}}},"nginxvts.server_requests_total":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Server Requests",layout:{left:3,top:0,width:3,height:5}}],info:"The total number of requests on the nginx server. Monitoring this metric can help identify potential performance issues related to server requests.",en:{instance:{one:"web server",other:"web servers"}}},"httpcheck.response_time":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average Response Time",layout:{left:0,top:0,width:3,height:5}}],info:"The response time describes the time passed between request and response. Currently, the accuracy of the response time is low and should be used as reference only.",groupBy:["label"],groupByLabel:["_collect_job"]},"httpcheck.response_length":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average Response Length",layout:{left:3,top:0,width:3,height:5}}],info:"The response length counts the number of characters in the response body. For static pages, this should be mostly constant.",groupBy:["label"],groupByLabel:["_collect_job"]},"httpcheck.status":{mainheads:[{groupBy:["dimension"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"HTTP Server Check Status",layout:{left:6,top:0,width:3,height:5}}],info:"This chart verifies the response of the webserver. Each status dimension will have a value of 1 if triggered. Dimension success is 1 only if all constraints are satisfied. This chart is most useful for alerts or third-party apps.",groupBy:["label"],groupByLabel:["_collect_job"],dimensionsOnNonDimensionGrouping:["success"]},"netdata.response_time":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average API Response Time",layout:{left:6,top:0,width:3,height:5}}],info:"The netdata API response time measures the time netdata needed to serve requests. This time includes everything, from the reception of the first byte of a request, to the dispatch of the last byte of its reply, therefore it includes all network latencies involved (i.e. a client over a slow network will influence these metrics).",en:{instance:{one:"agent",other:"agents"}}},"netdata.server_cpu":{mainheads:[{groupBy:["instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average CPU usage per Agent",colors:s.default[12]}],info:"The CPU time consumed by Netdata process in user and system space.",en:{instance:{one:"agent",other:"agents"}}},"netdata.memory":{mainheads:[{groupBy:["instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average memory usage per Agent",colors:s.default[1]}],info:"The memory consumed by the Netdata agent.",en:{instance:{one:"agent",other:"agents"}}},"netdata.net":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total network traffic across all agents",layout:{left:9,top:0,width:3,height:5}}],info:"The network traffic generated by Netdata agent.",en:{instance:{one:"agent",other:"agents"}}},"netdata.ebpf_threads":{info:'Show total number of threads and number of active threads. For more details about the threads, see the official documentation.',en:{instance:{one:"agent",other:"agents"}}},"netdata.ebpf_load_methods":{info:"Show number of threads loaded using legacy code (independent binary) or CO-RE (Compile Once Run Everywhere).",en:{instance:{one:"agent",other:"agents"}}},"retroshare.bandwidth":{info:"RetroShare inbound and outbound traffic.",mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Inbound",selectedDimensions:["bandwidth_down_kb"],colors:s.default[0]},{groupBy:["selected"],chartLibrary:"gauge",title:"Total Outbound",selectedDimensions:["bandwidth_up_kb"],colors:s.default[1]}]},"retroshare.peers":{info:"Number of (connected) RetroShare friends.",mainheads:[{groupBy:["selected"],urlOptions:["friends"],selectedDimensions:["peers_connected"],chartLibrary:"easypiechart",title:"Total Connected Friends"}]},"retroshare.dht":{info:"Statistics about RetroShare's DHT. These values are estimated!"},"fping.quality":{family:"quality",colors:s.default[10]},"fping.packets":{family:"packets"},"cgroup.cpu_limit":{aggregationMethod:"avg",valueRange:[0,null],mainheads:[{groupBy:["selected"],aggregationMethod:"max",groupingMethod:"max",valueRange:[0,100],selectedDimensions:["used"],chartLibrary:"gauge",title:"Maximum CPU Utilization (within limit)",layout:{left:0,top:0,width:3,height:5}},{groupBy:["label"],groupByLabel:["cgroup_name"],aggregationMethod:"max",groupingMethod:"max",valueRange:[0,null],selectedDimensions:["used"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Maximum CPU Utilization (within limit) per cgroup",layout:{left:0,top:5,width:4,height:5}}],info:ce,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.cpu":{aggregationMethod:"avg",mainheads:[{groupBy:["instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"max",groupingMethod:"max",valueRange:[0,100],chartLibrary:"gauge",title:"Maximum CPU Utilization",layout:{left:3,top:0,width:3,height:5}},{groupBy:["label"],groupByLabel:["cgroup_name"],aggregationMethod:"max",groupingMethod:"max",valueRange:[0,null],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Maximum CPU Utilization per cgroup",layout:{left:4,top:5,width:4,height:5}}],info:le,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.throttled":{aggregationMethod:"avg",info:de,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.throttled_duration":{aggregationMethod:"avg",info:he,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.cpu_shares":{info:ue,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.cpu_per_core":{aggregationMethod:"avg",info:pe,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.cpu_some_pressure":{aggregationMethod:"avg",info:me,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.cpu_some_pressure_stall_time":{info:ge,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.cpu_full_pressure":{aggregationMethod:"avg",info:fe,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.cpu_full_pressure_stall_time":{info:be,en:{instance:{one:"cgroup",other:"cgroups"}}},"k8s.cgroup.cpu_limit":{aggregationMethod:"avg",valueRange:[0,null],mainheads:[{groupBy:["selected"],aggregationMethod:"max",selectedDimensions:["used"],chartLibrary:"gauge",title:"Maximum CPU Utilization (within limit)"}],info:ce,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.cpu":{aggregationMethod:"avg",mainheads:[{groupBy:["instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"max",chartLibrary:"gauge",title:"Maximum CPU Utilization"}],info:le,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.throttled":{aggregationMethod:"avg",info:de,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.throttled_duration":{aggregationMethod:"avg",info:he,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.cpu_shares":{info:ue,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.cpu_per_core":{aggregationMethod:"avg",info:pe,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.cpu_some_pressure":{aggregationMethod:"avg",info:me,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.cpu_some_pressure_stall_time":{info:ge,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.cpu_full_pressure":{aggregationMethod:"avg",info:fe,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.cpu_full_pressure_stall_time":{info:be,en:{instance:{one:"container",other:"containers"}}},"cgroup.mem_utilization":{aggregationMethod:"avg",info:ye,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.mem_usage_limit":{aggregationMethod:"avg",mainheads:[{groupBy:["percentage-of-instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"max",groupingMethod:"max",valueRange:[0,100],selectedDimensions:["used"],chartLibrary:"gauge",title:"Maximum Memory Utilization (within limit)",layout:{left:6,top:0,width:3,height:5}},{groupBy:["label"],groupByLabel:["cgroup_name"],aggregationMethod:"max",groupingMethod:"max",selectedDimensions:["used"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Maximum Memory Utilization (within limit) per cgroup",layout:{left:8,top:5,width:4,height:5}}],info:ve,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.mem_usage":{mainheads:[{groupBy:["dimension"],chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,selectedDimensions:["ram"],title:"Total RAM Used",colors:s.default[11],layout:{left:9,top:0,width:3,height:5}}],info:_e,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.mem":{info:we,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.mem_failcnt":{info:ke,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.writeback":{info:Te,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.mem_activity":{info:Se,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.pgfaults":{info:Me,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.memory_some_pressure":{aggregationMethod:"avg",info:xe,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.memory_some_pressure_stall_time":{info:Ce,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.memory_full_pressure":{aggregationMethod:"avg",info:Pe,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.memory_full_pressure_stall_time":{info:Be,en:{instance:{one:"cgroup",other:"cgroups"}}},"k8s.cgroup.mem_utilization":{aggregationMethod:"avg",info:ye,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.mem_usage_limit":{aggregationMethod:"avg",mainheads:[{groupBy:["percentage-of-instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"max",valueRange:[0,null],selectedDimensions:["used"],chartLibrary:"gauge",title:"Maximum Memory Used (within limit)"}],info:ve,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.mem_usage":{mainheads:[{groupBy:["selected"],selectedDimensions:["ram"],chartLibrary:"gauge",title:"Total RAM Used"}],info:_e,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.mem":{info:we,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.mem_failcnt":{info:ke,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.writeback":{info:Te,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.mem_activity":{info:Se,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.pgfaults":{info:Me,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.memory_some_pressure":{aggregationMethod:"avg",info:xe,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.memory_some_pressure_stall_time":{info:Ce,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.memory_full_pressure":{aggregationMethod:"avg",info:Pe,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.memory_full_pressure_stall_time":{info:Be,en:{instance:{one:"container",other:"containers"}}},"cgroup.io":{mainheads:[{groupBy:["selected"],valueRange:[0,null],chartLibrary:"easypiechart",title:"Total Disk Read",selectedDimensions:["read"],colors:s.default[0],priority:5,layout:{left:0,top:10,width:2,height:5}},{groupBy:["selected"],valueRange:[0,null],chartLibrary:"easypiechart",title:"Total Disk Write",selectedDimensions:["write"],colors:s.default[1],priority:6,layout:{left:2,top:10,width:2,height:5}}],info:qe,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.serviced_ops":{info:Ie,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.queued_ops":{info:Le,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.merged_ops":{info:Ae,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.throttle_io":{info:Ee,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.throttle_serviced_ops":{info:Ne,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.pids_current":{info:De,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.io_some_pressure":{aggregationMethod:"avg",info:Re,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.io_some_pressure_stall_time":{info:Fe,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.io_full_pressure":{aggregationMethod:"avg",info:Ue,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.io_full_pressure_stall_time":{info:ze,en:{instance:{one:"cgroup",other:"cgroups"}}},"k8s.cgroup.io":{info:qe,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.serviced_ops":{info:Ie,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.queued_ops":{info:Le,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.merged_ops":{info:Ae,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.throttle_io":{mainheads:[{groupBy:["selected"],selectedDimensions:["read"],chartLibrary:"gauge",title:"Total Read Disk I/O"},{groupBy:["selected"],selectedDimensions:["write"],chartLibrary:"gauge",title:"Total Write Disk I/O"}],info:Ee,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.throttle_serviced_ops":{info:Ne,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.pids_current":{info:De,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.io_some_pressure":{aggregationMethod:"avg",info:Re,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.io_some_pressure_stall_time":{info:Fe,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.io_full_pressure":{aggregationMethod:"avg",info:Ue,en:{instance:{one:"container",other:"containers"}}},"k8s.cgroup.io_full_pressure_stall_time":{info:ze,en:{instance:{one:"container",other:"containers"}}},"cgroup.swap_read":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:V,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.swap_write":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:Q,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.fd_open":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:k,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.fd_open_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:T,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.fd_close":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:S,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.fd_close_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:M,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_unlink":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:N,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_write":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:q,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_write_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:L,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_read":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:I,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_read_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:A,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_write_bytes":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:E,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_read_bytes":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:D,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_fsync":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:R,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_fsync_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:F,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_open":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:U,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_open_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:z,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_create":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:O,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.vfs_create_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:H,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.process_create":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:b,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.thread_create":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:y,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.task_exit":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:v,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.task_close":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:_,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.task_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:w,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.dc_ratio":{aggregationMethod:"avg",groupBy:["label"],groupByLabel:["cgroup_name"],info:'Percentage of file accesses that were present in the directory cache. 100% means that every file that was accessed was present in the directory cache. If files are not present in the directory cache 1) they are not present in the file system, 2) the files were not accessed before. Read more about directory cache. Netdata also gives a summary for these charts in Filesystem submenu.',en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.dc_reference":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:C,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.dc_not_cache":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:P,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.dc_not_found":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:B,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.shmget":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:Z,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.shmat":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:Y,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.shmdt":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:X,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.shmctl":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:J,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.outbound_conn_v4":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:$,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.outbound_conn_v6":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:ee,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.net_bytes_send":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:te,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.net_bytes_recv":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:ne,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.net_tcp_send":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:se,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.net_tcp_recv":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:ae,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.net_retransmit":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:oe,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.net_udp_send":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:re,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.net_udp_recv":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:ie,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.cachestat_ratio":{aggregationMethod:"avg",groupBy:["label"],groupByLabel:["cgroup_name"],info:j,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.cachestat_dirties":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:G,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.cachestat_hits":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["cgroup_name"],info:W,en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.cachestat_misses":{info:K,en:{instance:{one:"cgroup",other:"cgroups"}}},"systemd.service.cpu.utilization":{info:'Total CPU utilization within the system-wide CPU resources (all cores). The amount of time spent by tasks of the cgroup in user and kernel modes.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],mainheads:[{groupBy:["label"],groupByLabel:["service_name"],chartLibrary:"bars",title:"Top by CPU",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:0,top:0,width:4,height:5}}],en:{instance:{one:"service",other:"services"}}},"systemd.service.memory.usage":{info:"The amount of used RAM and swap memory.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],mainheads:[{groupBy:["label"],groupByLabel:["service_name"],selectedDimensions:["ram"],chartLibrary:"bars",title:"Top by RAM",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:4,top:0,width:4,height:5}},{groupBy:["label"],groupByLabel:["service_name"],selectedDimensions:["swap"],chartLibrary:"bars",title:"Top by Swap",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:8,top:0,width:4,height:5}}],en:{instance:{one:"service",other:"services"}}},"systemd.service.memory.failcnt":{info:"The number of memory usage hits limits.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],chartType:"stacked",en:{instance:{one:"service",other:"services"}}},"systemd.service.memory.ram.usage":{info:'Memory usage statistics. The individual metrics are described in the memory.stat section for cgroup-v1 and cgroup-v2.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],en:{instance:{one:"service",other:"services"}}},"systemd.service.memory.writeback":{info:"Dirty is the amount of memory waiting to be written to disk. Writeback is how much memory is actively being written to disk.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],en:{instance:{one:"service",other:"services"}}},"systemd.service.memory.paging.faults":{info:'Memory page fault statistics.',aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],en:{instance:{one:"service",other:"services"}}},"systemd.service.memory.paging.io":{info:"Memory accounting statistics. In - a page is accounted as either mapped anon page (RSS) or cache page (Page Cache) to the cgroup. Out - a page is unaccounted from the cgroup.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],chartType:"stacked",en:{instance:{one:"service",other:"services"}}},"systemd.service.disk.io":{info:"The amount of data transferred to and from specific devices as seen by the CFQ scheduler. It is not updated when the CFQ scheduler is operating on a request queue.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],chartType:"stacked",en:{instance:{one:"service",other:"services"}}},"systemd.service.disk.iops":{info:"The number of I/O operations performed on specific devices as seen by the CFQ scheduler.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],chartType:"stacked",en:{instance:{one:"service",other:"services"}}},"systemd.service.disk.throttle.io":{info:"The amount of data transferred to and from specific devices as seen by the throttling policy.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],chartType:"stacked",en:{instance:{one:"service",other:"services"}}},"systemd.service.disk.throttle.iops":{info:"The number of I/O operations performed on specific devices as seen by the throttling policy.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],chartType:"stacked",en:{instance:{one:"service",other:"services"}}},"systemd.service.disk.queued_iops":{info:"The number of requests queued for I/O operations.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],chartType:"stacked",en:{instance:{one:"service",other:"services"}}},"systemd.service.disk.merged_iops":{info:"The number of BIOS requests merged into requests for I/O operations.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],chartType:"stacked",en:{instance:{one:"service",other:"services"}}},"systemd.service.pids.current":{info:"The number of processes currently in the cgroup.",aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],chartType:"stacked",en:{instance:{one:"service",other:"services"}}},"services.cpu":{aggregationMethod:"avg",info:'Total CPU utilization within the system-wide CPU resources (all cores). The amount of time spent by tasks of the cgroup in user and kernel modes.',en:{instance:{one:"system",other:"systems"}}},"services.mem_usage":{aggregationMethod:"avg",info:"The amount of used RAM.",en:{instance:{one:"system",other:"systems"}}},"services.mem_rss":{info:'The amount of used RSS memory. It includes transparent hugepages.',en:{instance:{one:"system",other:"systems"}}},"services.mem_mapped":{info:'The size of memory-mapped files.',en:{instance:{one:"system",other:"systems"}}},"services.mem_cache":{info:'The amount of used page cache memory.',en:{instance:{one:"system",other:"systems"}}},"services.mem_writeback":{info:'The amount of file/anon cache that is queued for syncing to disk.',en:{instance:{one:"system",other:"systems"}}},"services.mem_pgfault":{info:'The number of page faults. It includes both minor and major page faults.',en:{instance:{one:"system",other:"systems"}}},"services.mem_pgmajfault":{info:'The number of major page faults.',en:{instance:{one:"system",other:"systems"}}},"services.mem_pgpgin":{info:"The amount of memory charged to the cgroup. The charging event happens each time a page is accounted as either mapped anon page(RSS) or cache page(Page Cache) to the cgroup.",en:{instance:{one:"system",other:"systems"}}},"services.mem_pgpgout":{info:"The amount of memory uncharged from the cgroup. The uncharging event happens each time a page is unaccounted from the cgroup.",en:{instance:{one:"system",other:"systems"}}},"services.mem_failcnt":{info:"The number of memory usage hits limits.",en:{instance:{one:"system",other:"systems"}}},"services.swap_usage":{info:'The amount of used swap memory.',en:{instance:{one:"system",other:"systems"}}},"services.io_read":{info:"The amount of data transferred from specific devices as seen by the CFQ scheduler. It is not updated when the CFQ scheduler is operating on a request queue.",en:{instance:{one:"system",other:"systems"}}},"services.io_write":{info:"The amount of data transferred to specific devices as seen by the CFQ scheduler. It is not updated when the CFQ scheduler is operating on a request queue.",en:{instance:{one:"system",other:"systems"}}},"services.io_ops_read":{info:"The number of read operations performed on specific devices as seen by the CFQ scheduler.",en:{instance:{one:"system",other:"systems"}}},"services.io_ops_write":{info:"The number write operations performed on specific devices as seen by the CFQ scheduler.",en:{instance:{one:"system",other:"systems"}}},"services.throttle_io_read":{info:"The amount of data transferred from specific devices as seen by the throttling policy.",en:{instance:{one:"system",other:"systems"}}},"services.throttle_io_write":{info:"The amount of data transferred to specific devices as seen by the throttling policy.",en:{instance:{one:"system",other:"systems"}}},"services.throttle_io_ops_read":{info:"The number of read operations performed on specific devices as seen by the throttling policy.",en:{instance:{one:"system",other:"systems"}}},"services.throttle_io_ops_write":{info:"The number of write operations performed on specific devices as seen by the throttling policy.",en:{instance:{one:"system",other:"systems"}}},"services.queued_io_ops_read":{info:"The number of queued read requests.",en:{instance:{one:"system",other:"systems"}}},"services.queued_io_ops_write":{info:"The number of queued write requests.",en:{instance:{one:"system",other:"systems"}}},"services.merged_io_ops_read":{info:"The number of read requests merged.",en:{instance:{one:"system",other:"systems"}}},"services.merged_io_ops_write":{info:"The number of write requests merged.",en:{instance:{one:"system",other:"systems"}}},"systemd.services.swap_read":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:V+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.swap_write":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:Q+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.fd_open":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:k+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.fd_open_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:T+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.fd_close":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:S+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.fd_close_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:M+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.vfs_unlink":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:N+'',en:{instance:{one:"system",other:"systems"}}},"systemd.services.vfs_write":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:q+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.vfs_write_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:L+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.vfs_read":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:I+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.vfs_read_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:A+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.vfs_write_bytes":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:E+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.vfs_read_bytes":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:D+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.vfs_fsync":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:R+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.vfs_fsync_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:F+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.vfs_open":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:U+'
    ',en:{instance:{one:"system",other:"systems"}}},"services.vfs_open_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:z+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.vfs_create":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:O+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.vfs_create_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:H+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.process_create":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:b+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.thread_create":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:y+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.task_exit":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:v+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.task_close":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:_+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.task_error":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:w+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.dc_ratio":{aggregationMethod:"avg",groupBy:["label"],groupByLabel:["service_name"],info:x+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.dc_reference":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:C+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.dc_not_cache":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:P+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.dc_not_found":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:B+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.cachestat_ratio":{aggregationMethod:"avg",groupBy:["label"],groupByLabel:["service_name"],info:j+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.cachestat_dirties":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:G+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.cachestat_hits":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:W+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.cachestat_misses":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:K+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.shmget":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:Z+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.shmat":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:Y+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.shmdt":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:X+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.shmctl":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:J+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.net_conn_ipv4":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:$+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.net_conn_ipv6":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:ee+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.net_bytes_send":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:te+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.net_bytes_recv":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:ne+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.net_tcp_send":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:se+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.net_tcp_recv":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:ae+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.net_retransmit":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:oe+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.net_udp_send":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:re+'
    ',en:{instance:{one:"system",other:"systems"}}},"systemd.services.net_udp_recv":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["service_name"],info:ie+'
    ',en:{instance:{one:"system",other:"systems"}}},"beanstalk.cpu_usage":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total CPU time consumed",colors:s.default[4],layout:{left:0,top:0,width:3,height:5}}],info:"Amount of CPU Time for user and system used by beanstalkd."},"beanstalk.jobs_rate":{info:"The rate of jobs processed by the beanstalkd served."},"beanstalk.connections_rate":{mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Connections Rate",colors:s.default[12],layout:{left:3,top:0,width:3,height:5}}],info:"The rate of connections opened to beanstalkd."},"beanstalk.commands_rate":{mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Commands Received Rate",colors:s.default[7],layout:{left:6,top:0,width:3,height:5}}],info:"The rate of commands received by beanstalkd."},"beanstalk.current_tubes":{info:"Total number of current tubes on the server including the default tube (which always exists)."},"beanstalk.current_jobs":{info:"Current number of jobs in all tubes grouped by status: urgent, ready, reserved, delayed and buried."},"beanstalk.current_connections":{info:"Current number of connections group by connection type: written, producers, workers, waiting."},"beanstalk.binlog":{info:"The rate of records written to binlog and migrated as part of compaction."},"beanstalk.uptime":{aggregationMethod:"min",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Uptime",layout:{left:9,top:0,width:3,height:5}}],info:"Total time beanstalkd server has been up for."},"beanstalk.jobs":{info:"Number of jobs currently in the tube grouped by status: urgent, ready, reserved, delayed and buried."},"beanstalk.connections":{info:"The current number of connections to this tube grouped by connection type; using, waiting and watching."},"beanstalk.commands":{info:"The rate of delete and pause commands executed by beanstalkd."},"beanstalk.pause":{info:"Shows info on how long the tube has been paused for, and how long is left remaining on the pause."},"ceph.general_usage":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Usage",colors:s.default[4],layout:{left:0,top:0,width:3,height:5}}],info:"The usage and available space in all ceph cluster."},"ceph.general_objects":{mainheads:[{groupBy:["selected"],chartLibrary:"number",title:"Total Objects",colors:s.default[12],layout:{left:3,top:0,width:3,height:5}}],info:"Total number of objects storage on ceph cluster."},"ceph.general_bytes":{info:"Cluster read and write data per second."},"ceph.general_operations":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Operations",colors:s.default[4],layout:{left:0,top:0,width:3,height:5}}],info:"Number of read and write operations per second."},"ceph.general_latency":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Latency",colors:s.default[4],layout:{left:0,top:0,width:3,height:5}}],info:"Total of apply and commit latency in all OSDs. The apply latency is the total time taken to flush an update to disk. The commit latency is the total time taken to commit an operation to the journal."},"ceph.pool_usage":{info:"The usage space in each pool."},"ceph.pool_objects":{info:"Number of objects presents in each pool."},"ceph.pool_read_bytes":{info:"The rate of read data per second in each pool."},"ceph.pool_write_bytes":{info:"The rate of write data per second in each pool."},"ceph.pool_read_objects":{info:"Number of read objects per second in each pool."},"ceph.pool_write_objects":{info:"Number of write objects per second in each pool."},"ceph.osd_usage":{info:"The usage space in each OSD."},"ceph.osd_size":{info:"Each OSD's size"},"ceph.apply_latency":{aggregationMethod:"avg",info:"Time taken to flush an update in each OSD."},"ceph.commit_latency":{aggregationMethod:"avg",info:"Time taken to commit an operation to the journal in each OSD."},"web_log.squid_response_statuses":{info:"Squid responses by type. success includes 1xx, 2xx, 000, 304, error includes 5xx and 6xx, redirect includes 3xx except 304, bad includes 4xx, other are all the other responses.",mainheads:[{groupBy:["selected"],selectedDimensions:["success"],chartLibrary:"gauge",title:"Total Successful Responses"},{groupBy:["selected"],selectedDimensions:["redirect"],chartLibrary:"gauge",title:"Total Redirects"},{groupBy:["selected"],selectedDimensions:["bad"],chartLibrary:"gauge",title:"Total Bad Requests"},{groupBy:["selected"],selectedDimensions:["error"],chartLibrary:"gauge",title:"Total Server Errors"}],en:{instance:{one:"web server",other:"web servers"}}},"web_log.squid_response_codes":{info:'Web server responses by code family. According to HTTP standards 1xx are informational responses, 2xx are successful responses, 3xx are redirects (although they include 304 which is used as "not modified"), 4xx are bad requests, 5xx are internal server errors. Squid also defines 000 mostly for UDP requests, and 6xx for broken upstream servers sending wrong headers. Finally, other are non-standard responses, and unmatched counts the lines in the log file that are not matched by the plugin (let us know if you have any unmatched).',en:{instance:{one:"web server",other:"web servers"}}},"web_log.squid_duration":{mainheads:[{groupBy:["selected"],aggregationMethod:"avg",selectedDimensions:["avg"],chartLibrary:"gauge",title:"Average Response Time"}],en:{instance:{one:"web server",other:"web servers"}}},"web_log.squid_detailed_response_codes":{info:"Number of responses for each response code individually.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.squid_clients":{info:"Unique client IPs accessing squid, within each data collection iteration. If data collection is per second, this chart shows unique client IPs per second.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.squid_clients_all":{info:'Unique client IPs accessing squid since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the server. On very busy squid servers (several millions of unique IPs) you may want to disable this chart (check /etc/netdata/go.d/web_log.conf).',en:{instance:{one:"web server",other:"web servers"}}},"web_log.squid_transport_methods":{info:"Break down per delivery method: TCP are requests on the HTTP port (usually 3128), UDP are requests on the ICP port (usually 3130), or HTCP port (usually 4128). If ICP logging was disabled using the log_icp_queries option, no ICP replies will be logged. NONE are used to state that squid delivered an unusual response or no response at all. Seen with cachemgr requests and errors, usually when the transaction fails before being classified into one of the above outcomes. Also seen with responses to CONNECT requests.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.squid_code":{info:'These are combined squid result status codes. A break down per component is given in the following charts. Check the squid documentation about them.',en:{instance:{one:"web server",other:"web servers"}}},"web_log.squid_handling_opts":{info:"These tags are optional and describe why the particular handling was performed or where the request came from. CLIENT means that the client request placed limits affecting the response. Usually seen with client issued a no-cache, or analogous cache control command along with the request. Thus, the cache has to validate the object.IMS states that the client sent a revalidation (conditional) request. ASYNC, is used when the request was generated internally by Squid. Usually this is background fetches for cache information exchanges, background revalidation from stale-while-revalidate cache controls, or ESI sub-objects being loaded. SWAPFAIL is assigned when the object was believed to be in the cache, but could not be accessed. A new copy was requested from the server. REFRESH when a revalidation (conditional) request was sent to the server. SHARED when this request was combined with an existing transaction by collapsed forwarding. NOTE: the existing request is not marked as SHARED. REPLY when particular handling was requested in the HTTP reply from server or peer. Usually seen on DENIED due to http_reply_access ACLs preventing delivery of servers response object to the client.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.squid_object_types":{info:"These tags are optional and describe what type of object was produced. NEGATIVE is only seen on HIT responses, indicating the response was a cached error response. e.g. 404 not found. STALE means the object was cached and served stale. This is usually caused by stale-while-revalidate or stale-if-error cache controls. OFFLINE when the requested object was retrieved from the cache during offline_mode. The offline mode never validates any object. INVALID when an invalid request was received. An error response was delivered indicating what the problem was. FAIL is only seen on REFRESH to indicate the revalidation request failed. The response object may be the server provided network error or the stale object which was being revalidated depending on stale-if-error cache control. MODIFIED is only seen on REFRESH responses to indicate revalidation produced a new modified object. UNMODIFIED is only seen on REFRESH responses to indicate revalidation produced a 304 (Not Modified) status, which was relayed to the client. REDIRECT when squid generated an HTTP redirect response to this request.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.squid_cache_events":{info:'These tags are optional and describe whether the response was loaded from cache, network, or otherwise. HIT when the response object delivered was the local cache object. MEM when the response object came from memory cache, avoiding disk accesses. Only seen on HIT responses. MISS when the response object delivered was the network response object. DENIED when the request was denied by access controls. NOFETCH an ICP specific type, indicating service is alive, but not to be used for this request (sent during "-Y" startup, or during frequent failures, a cache in hit only mode will return either UDP_HIT or UDP_MISS_NOFETCH. Neighbours will thus only fetch hits). TUNNEL when a binary tunnel was established for this transaction.',en:{instance:{one:"web server",other:"web servers"}}},"web_log.squid_transport_errors":{info:"These tags are optional and describe some error conditions which occurred during response delivery (if any). ABORTED when the response was not completed due to the connection being aborted (usually by the client). TIMEOUT, when the response was not completed due to a connection timeout.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.type_requests":{info:"Web server responses by type. success includes 1xx, 2xx, 304 and 401, error includes 5xx, redirect includes 3xx except 304, bad includes 4xx except 401, other are all the other responses.",mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Requests by Type",layout:{left:3,top:0,width:3,height:5}}],en:{instance:{one:"web server",other:"web servers"}}},"web_log.request_processing_time":{mainheads:[{groupBy:["selected"],aggregationMethod:"avg",selectedDimensions:["avg"],chartLibrary:"gauge",title:"Average Response Time"}],en:{instance:{one:"web server",other:"web servers"}}},"portcheck.latency":{aggregationMethod:"avg",info:"The latency describes the time spent connecting to a TCP port. No data is sent or received. Currently, the accuracy of the latency is low and should be used as reference only.",en:{instance:{one:"service",other:"services"}}},"portcheck.status":{info:"The status chart verifies the availability of the service. Each status dimension will have a value of 1 if triggered. Dimension success is 1 only if connection could be established. This chart is most useful for alerts and third-party apps.",en:{instance:{one:"service",other:"services"}}},"chrony.stratum":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",valueRange:[0,null],title:"Maximum Stratum",colors:s.default[1],layout:{left:0,top:0,width:3,height:5}}],info:"The stratum indicates the distance (hops) to the computer with the reference clock. The higher the stratum number, the more the timing accuracy and stability degrades.",en:{instance:{one:"system",other:"systems"}}},"chrony.current_correction":{info:"Any error in the system clock is corrected by slightly speeding up or slowing down the system clock until the error has been removed, and then returning to the system clock\u2019s normal speed. A consequence of this is that there will be a period when the system clock (as read by other programs) will be different from chronyd's estimate of the current true time (which it reports to NTP clients when it is operating as a server). The reported value is the difference due to this effect.",en:{instance:{one:"system",other:"systems"}}},"chrony.root_delay":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average Root Delay",valueRange:[0,null],colors:s.default[12],layout:{left:3,top:0,width:3,height:5}}],info:"The total of the network path delays to the stratum-1 computer from which the computer is ultimately synchronised.",en:{instance:{one:"system",other:"systems"}}},"chrony.root_dispersion":{aggregationMethod:"avg",info:"The total dispersion accumulated through all the computers back to the stratum-1 computer from which the computer is ultimately synchronised. Dispersion is due to system clock resolution, statistical measurement variations, etc.",en:{instance:{one:"system",other:"systems"}}},"chrony.last_offset":{aggregationMethod:"avg",info:"The estimated local offset on the last clock update. A positive value indicates the local time (as previously estimated true time) was ahead of the time sources.",en:{instance:{one:"system",other:"systems"}}},"chrony.frequency":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Max Frequency",colors:s.default[7],layout:{left:6,top:0,width:3,height:5}}],info:"The frequency is the rate by which the system\u2019s clock would be wrong if chronyd was not correcting it. It is expressed in ppm (parts per million). For example, a value of 1 ppm would mean that when the system\u2019s clock thinks it has advanced 1 second, it has actually advanced by 1.000001 seconds relative to true time.",en:{instance:{one:"system",other:"systems"}}},"chrony.residual_frequency":{aggregationMethod:"avg",info:"The residual frequency for the currently selected reference source. This reflects any difference between what the measurements from the reference source indicate the frequency should be and the frequency currently being used. The reason this is not always zero is that a smoothing procedure is applied to the frequency.",en:{instance:{one:"system",other:"systems"}}},"chrony.skew":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average Skew",layout:{left:9,top:0,width:3,height:5}}],info:"The estimated error bound on the frequency.",en:{instance:{one:"system",other:"systems"}}},"chrony.ref_measurement_time":{aggregationMethod:"min",info:"The time elapsed since the last measurement from the reference source was processed.",en:{instance:{one:"system",other:"systems"}}},"chrony.leap_status":{info:"

    The current leap status of the source.

    Normal - indicates the normal status (no leap second). InsertSecond - indicates that a leap second will be inserted at the end of the month. DeleteSecond - indicates that a leap second will be deleted at the end of the month. Unsynchronised - the server has not synchronized properly with the NTP server.

    ",en:{instance:{one:"system",other:"systems"}}},"chrony.activity":{info:"

    The number of servers and peers that are online and offline.

    Online - the server or peer is currently online (i.e. assumed by chronyd to be reachable). Offline - the server or peer is currently offline (i.e. assumed by chronyd to be unreachable, and no measurements from it will be attempted). BurstOnline - a burst command has been initiated for the server or peer and is being performed. After the burst is complete, the server or peer will be returned to the online state. BurstOffline - a burst command has been initiated for the server or peer and is being performed. After the burst is complete, the server or peer will be returned to the offline state. Unresolved - the name of the server or peer was not resolved to an address yet.

    ",en:{instance:{one:"system",other:"systems"}}},"chrony.rms_offset":{aggregationMethod:"avg",info:"The root mean square (RMS) offset of the system clock from true time. Large offsets may indicate a problem with the clock or network synchronization.",en:{instance:{one:"system",other:"systems"}}},"chrony.update_interval":{aggregationMethod:"avg",info:"The interval between clock updates. Shorter intervals may improve accuracy but may also increase network load.",en:{instance:{one:"system",other:"systems"}}},"couchdb.active_tasks":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Active Tasks",colors:s.default[1],layout:{left:0,top:0,width:3,height:5}}],info:"Active tasks running on this CouchDB cluster. Four types of tasks currently exist: indexer (view building), replication, database compaction and view compaction.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.replicator_jobs":{info:'Detailed breakdown of any replication jobs in progress on this node. For more information, see the replicator documentation.',en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.open_files":{info:'Count of all files held open by CouchDB. If this value seems pegged at 1024 or 4096, your server process is probably hitting the open file handle limit and needs to be increased.',en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.activity":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Activity",colors:s.default[4],layout:{left:3,top:0,width:3,height:5}}],info:"The activity of CouchDB nodes in the cluster. The number of requests being handled by the nodes per second. High values may indicate a high workload on the nodes and potential performance issues.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.request_methods":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Request Methods",layout:{left:6,top:0,width:3,height:5}}],info:"The distribution of request methods being used on CouchDB nodes in the cluster. This can be useful for identifying the most common operations being performed on the nodes and potential performance bottlenecks.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.response_codes":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Response Codes",layout:{left:9,top:0,width:3,height:5}}],info:"The distribution of response codes returned by CouchDB nodes in the cluster. This can be useful for identifying common error codes and potential issues with the nodes.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.response_code_classes":{info:"The distribution of response code classes returned by CouchDB nodes in the cluster. This can be useful for identifying the general class of responses being returned by the nodes (e.g. success, error) and potential issues with the nodes.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.erlang_vm_memory":{info:"The amount of memory used by the Erlang virtual machine (VM) running on CouchDB nodes in the cluster. This can be useful for monitoring the overall memory usage of the nodes and potential capacity issues.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.proccounts":{info:"The number of Erlang processes running on CouchDB nodes in the cluster. This can be useful for monitoring the overall workload of the nodes and potential performance issues.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.peakmsgqueue":{info:"The maximum size of the message queue on CouchDB nodes in the cluster. This can be useful for monitoring the overall workload of the nodes and potential performance issues.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.reductions":{info:"The number of reductions performed by the Erlang VM on CouchDB nodes in the cluster. Reductions are a measure of the computational work done by the VM and high values may indicate a high workload on the nodes and potential performance issues.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.db_sizes_file":{info:"The size of CouchDB databases on disk, including data and metadata. This can be useful for monitoring the overall size of the databases and potential capacity issues.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.db_sizes_external":{info:"The total size of CouchDB databases in external storage in bytes.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.db_sizes_active":{info:"The total size of CouchDB databases in active memory in bytes.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.db_doc_count":{info:"The total number of documents stored in CouchDB databases.",en:{instance:{one:"cluster",other:"clusters"}}},"couchdb.db_doc_del_count":{info:"The total number of deleted documents stored in CouchDB databases.",en:{instance:{one:"cluster",other:"clusters"}}},"freeradius.authentication":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Authentication Requests",colors:s.default[0],layout:{left:0,top:0,width:3,height:5}}],info:"The number of authentication requests that have been processed by the FreeRADIUS server. FreeRADIUS is a RADIUS server, and authentication requests are requests from clients to authenticate themselves with the server. Monitoring this metric can provide insight into the usage of the FreeRADIUS server and can help identify any issues with authentication requests.",en:{instance:{one:"server",other:"servers"}}},"freeradius.authentication_access_responses":{info:"The number of access responses that have been sent by the FreeRADIUS server in response to authentication requests. Access responses are messages sent by the server to either grant or deny access to a client based on the authentication request. Monitoring this metric can provide insight into the usage of the FreeRADIUS server and can help identify any issues with authentication requests.",en:{instance:{one:"server",other:"servers"}}},"freeradius.bad_authentication":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Bad Authentication Requests",colors:s.default[1],layout:{left:3,top:0,width:3,height:5}}],info:"The number of bad authentication requests that have been received by the FreeRADIUS server. Bad authentication requests are requests that are improperly formatted or contain invalid data. Monitoring this metric can provide insight into the usage of the FreeRADIUS server and can help identify any issues with authentication requests.",en:{instance:{one:"server",other:"servers"}}},"freeradius.proxy_authentication":{info:"The number of proxy authentication requests that have been processed by the FreeRADIUS server. Proxy authentication requests are requests from other RADIUS servers to authenticate a client on their behalf. Monitoring this metric can provide insight into the usage of the FreeRADIUS server as a proxy and can help identify any issues with proxy authentication requests.",en:{instance:{one:"server",other:"servers"}}},"freeradius.proxy_authentication_access_responses":{info:"The number of access responses that have been sent by the FreeRADIUS server in response to proxy authentication requests. Access responses are messages sent by the server to either grant or deny access to a client based on the authentication request. Monitoring this metric can provide insight into the usage of the FreeRADIUS server as a proxy and can help identify any issues with proxy authentication requests.",en:{instance:{one:"server",other:"servers"}}},"geth.goroutines":{info:"The number of goroutines in the Geth process.",en:{instance:{one:"process",other:"processes"}}},"geth.chaindata_db_size":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total chain data size",colors:s.default[4],layout:{left:0,top:0,width:3,height:5}}],info:"The size of the chain data database.",en:{instance:{one:"database",other:"databases"}}},"geth.chainhead":{info:"The current head of the blockchain."},"geth.p2p_bandwidth":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Bandwidth",colors:s.default[12],layout:{left:3,top:0,width:3,height:5}}],info:"The amount of bandwidth used by the P2P network.",en:{instance:{one:"network",other:"networks"}}},"geth.reorgs":{info:"The number of blockchain reorgs that have occurred."},"geth.reorgs_blocks":{info:"The number of blocks that have been reorganized in a blockchain reorg."},"geth.p2p_peers_calls":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total P2P peers calls",colors:s.default[7],layout:{left:6,top:0,width:3,height:5}}],info:"The number of P2P peer calls made by the node."},"haproxy.backend_current_sessions":{info:"The number of current sessions in the backend of the HAProxy server. A session is a connection between a client and the server. Monitoring this metric can provide insight into the usage of the HAProxy server and can help identify any issues with session management.",en:{instance:{one:"HAproxy server",other:"HAproxy servers"}}},"haproxy.backend_sessions":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"No. of Backend Sessions per proxy",layout:{left:0,top:0,width:3,height:5}}],info:"The total number of sessions in the backend of the HAProxy server. A session is a connection between a client and the server. Monitoring this metric can provide insight into the usage of the HAProxy server and can help identify any issues with session management.",en:{instance:{one:"HAproxy server",other:"HAproxy servers"}}},"haproxy.backend_response_time_average":{aggregationMethod:"avg",mainheads:[{groupBy:["dimension"],aggregationMethod:"avg",chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Average Response Time per proxy",layout:{left:3,top:0,width:3,height:5}}],info:"The average response time of the backend of the HAProxy server. The response time is the time it takes for the server to respond to a client's request. Monitoring this metric can provide insight into the performance of the HAProxy server and can help identify any issues with slow response times.",en:{instance:{one:"HAproxy server",other:"HAproxy servers"}}},"haproxy.backend_queue_time_average":{aggregationMethod:"avg",mainheads:[{chartLibrary:"bars",title:"Average Queue Time per proxy",layout:{left:9,top:0,width:3,height:5}}],info:"The average queue time of the backend of the HAProxy server. The queue time is the time a client's request spends in the server's queue before being processed. Monitoring this metric can provide insight into the performance of the HAProxy server and can help identify any issues with long queue times.",en:{instance:{one:"HAproxy server",other:"HAproxy servers"}}},"haproxy.backend_current_queue":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Queue Size per proxy",layout:{left:6,top:0,width:3,height:5}}],info:"The current number of requests in the queue of the backend of the HAProxy server. Monitoring this metric can provide insight into the performance of the HAProxy server and can help identify any issues with the server's ability to handle incoming requests.",en:{instance:{one:"HAproxy server",other:"HAproxy servers"}}},"haproxy.backend_http_responses":{info:"The number of HTTP responses sent by the backend of the HAProxy server. HTTP responses are messages sent by the server in response to client requests. Monitoring this metric can provide insight into the usage of the HAProxy server and can help identify any issues with HTTP responses.",en:{instance:{one:"HAproxy server",other:"HAproxy servers"}}},"haproxy.backend_network_io":{info:"The network IO of the backend of the HAProxy server. Network IO is the amount of data transferred over the network by the server. Monitoring this metric can provide insight into the performance of the HAProxy server and can help identify any issues with network performance.",en:{instance:{one:"HAproxy server",other:"HAproxy servers"}}},"hdfs.heap_memory":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Heap Memory",colors:s.default[4],layout:{left:0,top:0,width:3,height:5}}],info:"The amount of heap memory used by the HDFS service. Heap memory is the memory allocated to the Java virtual machine for storing objects. Monitoring this metric can provide insight into the memory usage of the HDFS service and can help identify any issues with memory allocation.",en:{instance:{one:"HDFS service",other:"HDFS services"}}},"hdfs.gc_count_total":{info:"The total number of garbage collection (GC) events that have occurred in the HDFS service. GC events are processes that reclaim memory by removing unused objects. Monitoring this metric can provide insight into the performance of the HDFS service and can help identify any issues with GC performance.",en:{instance:{one:"HDFS service",other:"HDFS services"}}},"hdfs.gc_time_total":{info:"The total amount of time spent on garbage collection (GC) in the HDFS service. GC is a process that reclaims memory by removing unused objects. Monitoring this metric can provide insight into the performance of the HDFS service and can help identify any issues with GC performance.",en:{instance:{one:"HDFS service",other:"HDFS services"}}},"hdfs.gc_threshold":{info:"The GC threshold of the HDFS service. The GC threshold is the point at which the HDFS service triggers a GC event. Monitoring this metric can provide insight into the performance of the HDFS service and can help identify any issues with GC performance.",en:{instance:{one:"HDFS service",other:"HDFS services"}}},"hdfs.rpc_bandwidth":{info:"The amount of bandwidth being used by HDFS Remote Procedure Calls (RPCs).",en:{instance:{one:"HDFS service",other:"HDFS services"}}},"hdfs.rpc_calls":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total RPC Calls",colors:s.default[7],layout:{left:3,top:0,width:3,height:5}}],info:"The number of HDFS Remote Procedure Calls (RPCs) being made.",en:{instance:{one:"HDFS service",other:"HDFS services"}}},"hdfs.avg_queue_time":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Queue Time",colors:s.default[12],layout:{left:6,top:0,width:3,height:5}}],info:"The average time spent in the queue for HDFS calls.",en:{instance:{one:"HDFS service",other:"HDFS services"}}},"hdfs.avg_processing_time":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Processing Time",colors:s.default[0],layout:{left:9,top:0,width:3,height:5}}],info:"The average time spent processing HDFS calls.",en:{instance:{one:"HDFS service",other:"HDFS services"}}},"hdfs.datanode_capacity":{info:"The capacity of the HDFS DataNode.",en:{instance:{one:"HDFS service",other:"HDFS services"}}},"hdfs.datanode_bandwidth":{info:"The bandwidth used by the HDFS DataNode.",en:{instance:{one:"HDFS service",other:"HDFS services"}}},"isc_dhcps.active_leases_total":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Active Leases",colors:s.default[0],layout:{left:0,top:0,width:3,height:5}}],info:"The total number of active leases in the ISC DHCP server. A lease is a contract that allows a client to use a particular IP address for a specified period of time. Monitoring this metric can provide insight into the usage of the ISC DHCP server and can help identify any issues with lease management.",en:{instance:{one:"DHCP server",other:"DHCP servers"}}},"isc_dhcps.pool_active_leases":{info:"The number of active leases in a DHCP pool of the ISC DHCP server. A DHCP pool is a range of IP addresses that are available for allocation to clients. Monitoring this metric can provide insight into the usage of the ISC DHCP server and can help identify any issues with DHCP pool management.",en:{instance:{one:"DHCP server",other:"DHCP servers"}}},"isc_dhcps.pool_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Pool Utilization",colors:s.default[12],layout:{left:6,top:0,width:3,height:5}}],info:"The utilization of a DHCP pool in the ISC DHCP server. The utilization is the percentage of IP addresses in the pool that are currently leased to clients. Monitoring this metric can provide insight into the usage of the ISC DHCP server and can help identify any issues with DHCP pool utilization.",en:{instance:{one:"DHCP server",other:"DHCP servers"}}},"btrfs.disk":{family:"utilization",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",selectedDimensions:["unallocated"],title:"Maximum Unallocated Physical Disk Space",colors:s.default[1],layout:{left:0,top:0,width:3,height:5}}],info:"Physical disk usage of BTRFS. The disk space reported here is the raw physical disk space assigned to the BTRFS volume (i.e. before any RAID levels). BTRFS uses a two-stage allocator, first allocating large regions of disk space for one type of block (data, metadata, or system), and then using a regular block allocator inside those regions. unallocated is the physical disk space that is not allocated yet and is available to become data, metadata or system on demand. When unallocated is zero, all available disk space has been allocated to a specific function. Healthy volumes should ideally have at least five percent of their total space unallocated. You can keep your volume healthy by running the btrfs balance command on it regularly (check man btrfs-balance for more info). Note that some of the space listed as unallocated may not actually be usable if the volume uses devices of different sizes.",en:{instance:{one:"device",other:"devices"}}},"btrfs.data":{family:"utilization",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Logical Disk Usage (data)",colors:s.default[4],layout:{left:3,top:0,width:3,height:5}}],info:"Logical disk usage for BTRFS data. Data chunks are used to store the actual file data (file contents). The disk space reported here is the usable allocation (i.e. after any striping or replication). Healthy volumes should ideally have no more than a few GB of free space reported here persistently. Running btrfs balance can help here.",en:{instance:{one:"device",other:"devices"}}},"btrfs.metadata":{family:"utilization",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Logical Disk Usage (metadata)",colors:s.default[7],layout:{left:6,top:0,width:3,height:5}}],info:"Logical disk usage for BTRFS metadata. Metadata chunks store most of the filesystem internal structures, as well as information like directory structure and file names. The disk space reported here is the usable allocation (i.e. after any striping or replication). Healthy volumes should ideally have no more than a few GB of free space reported here persistently. Running btrfs balance can help here.",en:{instance:{one:"device",other:"devices"}}},"btrfs.system":{family:"utilization",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Logical Disk Usage (system)",colors:s.default[12],layout:{left:9,top:0,width:3,height:5}}],info:"Logical disk usage for BTRFS system. System chunks store information about the allocation of other chunks. The disk space reported here is the usable allocation (i.e. after any striping or replication). The values reported here should be relatively small compared to Data and Metadata, and will scale with the volume size and overall space usage.",en:{instance:{one:"device",other:"devices"}}},"rabbitmq.queued_messages":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Queued Messages",colors:s.default[1],layout:{left:0,top:0,width:3,height:5}}],info:"Overall total of ready and unacknowledged queued messages. Messages that are delivered immediately are not counted here."},"rabbitmq.message_rates":{info:"Overall messaging rates including acknowledgements, deliveries, redeliveries, and publishes."},"rabbitmq.global_counts":{info:"Overall totals for channels, consumers, connections, queues and exchanges."},"rabbitmq.file_descriptors":{info:'Total number of used filed descriptors. See Open File Limits for further details.'},"rabbitmq.sockets":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Sockets Used",colors:s.default[7],layout:{left:9,top:0,width:3,height:5}}],info:'Total number of used socket descriptors. Each used socket also counts as a used file descriptor. See Open File Limits for further details.'},"rabbitmq.processes":{info:"Total number of processes running within the Erlang VM. This is not the same as the number of processes running on the host."},"rabbitmq.erlang_run_queue":{info:"Number of Erlang processes the Erlang schedulers have queued to run."},"rabbitmq.memory":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Memory Used",colors:s.default[4],layout:{left:3,top:0,width:3,height:5}}],info:'Total amount of memory used by the RabbitMQ. This is a complex statistic that can be further analyzed in the management UI. See Memory for further details.'},"rabbitmq.disk_space":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Disk Space Consumed",colors:s.default[12],layout:{left:6,top:0,width:3,height:5}}],info:'Total amount of disk space consumed by the message store(s). See Disk Space Limits for further details.'},"rabbitmq.queue_messages":{info:"Total amount of messages and their states in this queue."},"rabbitmq.queue_messages_stats":{info:"Overall messaging rates including acknowledgements, deliveries, redeliveries, and publishes."},"ntpd.sys_offset":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum System Offset",colors:s.default[1],layout:{left:0,top:0,width:3,height:5}}],info:"For hosts without any time critical services an offset of < 100 ms should be acceptable even with high network latencies. For hosts with time critical services an offset of about 0.01 ms or less can be achieved by using peers with low delays and configuring optimal poll exponent values.",en:{instance:{one:"system",other:"systems"}}},"ntpd.sys_jitter":{mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average System Jitter",colors:s.default[4],layout:{left:3,top:0,width:3,height:5}}],info:"The jitter statistics are exponentially-weighted RMS averages. The system jitter is defined in the NTPv4 specification; the clock jitter statistic is computed by the clock discipline module.",en:{instance:{one:"system",other:"systems"}}},"ntpd.sys_frequency":{info:"The frequency offset is shown in ppm (parts per million) relative to the frequency of the system. The frequency correction needed for the clock can vary significantly between boots and also due to external influences like temperature or radiation.",en:{instance:{one:"system",other:"systems"}}},"ntpd.sys_wander":{info:"The wander statistics are exponentially-weighted RMS averages.",en:{instance:{one:"system",other:"systems"}}},"ntpd.sys_rootdelay":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average System Root Delay",colors:s.default[12],layout:{left:6,top:0,width:3,height:5}}],info:"The rootdelay is the round-trip delay to the primary reference clock, similar to the delay shown by the ping command. A lower delay should result in a lower clock offset.",en:{instance:{one:"system",other:"systems"}}},"ntpd.sys_stratum":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum System Stratum",colors:s.default[7],layout:{left:9,top:0,width:3,height:5}}],info:'The distance in "hops" to the primary reference clock',en:{instance:{one:"system",other:"systems"}}},"ntpd.sys_tc":{info:'Time constants and poll intervals are expressed as exponents of 2. The default poll exponent of 6 corresponds to a poll interval of 64 s. For typical Internet paths, the optimum poll interval is about 64 s. For fast LANs with modern computers, a poll exponent of 4 (16 s) is appropriate. The poll process sends NTP packets at intervals determined by the clock discipline algorithm.',en:{instance:{one:"system",other:"systems"}}},"ntpd.sys_precision":{colors:s.default[6],en:{instance:{one:"system",other:"systems"}}},"ntpd.peer_offset":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Peer Offset",colors:s.default[1],layout:{left:0,top:5,width:2,height:5}}],info:"The offset of the peer clock relative to the system clock in milliseconds. Smaller values here weight peers more heavily for selection after the initial synchronization of the local clock. For a system providing time service to other systems, these should be as low as possible.",en:{instance:{one:"peer",other:"peers"}}},"ntpd.peer_delay":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average Peer Delay",colors:s.default[12],layout:{left:0,top:5,width:2,height:5}}],info:"The round-trip time (RTT) for communication with the peer, similar to the delay shown by the ping command. Not as critical as either the offset or jitter, but still factored into the selection algorithm (because as a general rule, lower delay means more accurate time). In most cases, it should be below 100ms.",en:{instance:{one:"peer",other:"peers"}}},"ntpd.peer_dispersion":{info:"This is a measure of the estimated error between the peer and the local system. Lower values here are better.",en:{instance:{one:"peer",other:"peers"}}},"ntpd.peer_jitter":{mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average Peer Jitter",colors:s.default[4],layout:{left:0,top:5,width:2,height:5}}],info:"This is essentially a remote estimate of the peer's system_jitter value. Lower values here weight highly in favor of peer selection, and this is a good indicator of overall quality of a given time server (good servers will have values not exceeding single digit milliseconds here, with high quality stratum one servers regularly having sub-millisecond jitter).",en:{instance:{one:"peer",other:"peers"}}},"ntpd.peer_xleave":{info:'This variable is used in interleaved mode (used only in NTP symmetric and broadcast modes). See NTP Interleaved Modes.',en:{instance:{one:"peer",other:"peers"}}},"ntpd.peer_rootdelay":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average Peer Root Delay",colors:s.default[7],layout:{left:0,top:5,width:2,height:5}}],info:"For a stratum 1 server, this is the access latency for the reference clock. For lower stratum servers, it is the sum of the peer_delay and peer_rootdelay for the system they are syncing off of. Similarly to peer_delay, lower values here are technically better, but have limited influence in peer selection.",en:{instance:{one:"peer",other:"peers"}}},"ntpd.peer_rootdisp":{info:"Is the same as peer_rootdelay, but measures accumulated peer_dispersion instead of accumulated peer_delay.",en:{instance:{one:"peer",other:"peers"}}},"ntpd.peer_hmode":{info:"The peer_hmode and peer_pmode variables give info about what mode the packets being sent to and received from a given peer are. Mode 1 is symmetric active (both the local system and the remote peer have each other declared as peers in /etc/ntp.conf), Mode 2 is symmetric passive (only one side has the other declared as a peer), Mode 3 is client, Mode 4 is server, and Mode 5 is broadcast (also used for multicast and manycast operation).",en:{instance:{one:"peer",other:"peers"}}},"ntpd.peer_pmode":{en:{instance:{one:"peer",other:"peers"}}},"ntpd.peer_hpoll":{info:"The peer_hpoll and peer_ppoll variables are log2 representations of the polling interval in seconds.",en:{instance:{one:"peer",other:"peers"}}},"ntpd.peer_ppoll":{en:{instance:{one:"peer",other:"peers"}}},"ntpd.peer_precision":{en:{instance:{one:"peer",other:"peers"}}},"spigotmc.tps":{info:"The running 1, 5, and 15 minute average number of server ticks per second. An idealized server will show 20.0 for all values, but in practice this almost never happens. Typical servers should show approximately 19.98-20.0 here. Lower values indicate progressively more server-side lag (and thus that you need better hardware for your server or a lower user limit). For every 0.05 ticks below 20, redstone clocks will lag behind by approximately 0.25%. Values below approximately 19.50 may interfere with complex free-running redstone circuits and will noticeably slow down growth."},"spigotmc.users":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Users",colors:s.default[12],layout:{left:0,top:0,width:3,height:5}}],info:"The number of currently connected users on the monitored Spigot server."},"boinc.tasks":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Tasks",colors:s.default[12],layout:{left:0,top:0,width:3,height:5}}],info:"The total number of tasks and the number of active tasks. Active tasks are those which are either currently being processed, or are partially processed but suspended.",en:{instance:{one:"client",other:"clients"}}},"boinc.states":{mainheads:[{chartLibrary:"bars",title:"Task States",layout:{left:3,top:0,width:3,height:5}}],info:"Counts of tasks in each task state. The normal sequence of states is New, Downloading, Ready to Run, Uploading, Uploaded. Tasks which are marked Ready to Run may be actively running, or may be waiting to be scheduled. Compute Errors are tasks which failed for some reason during execution. Aborted tasks were manually cancelled, and will not be processed. Failed Uploads are otherwise finished tasks which failed to upload to the server, and usually indicate networking issues.",en:{instance:{one:"client",other:"clients"}}},"boinc.sched":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Task Scheduling States",layout:{left:6,top:0,width:3,height:5}}],info:"Counts of active tasks in each scheduling state. Scheduled tasks are the ones which will run if the system is permitted to process tasks. Preempted tasks are on standby, and will run if a Scheduled task stops running for some reason. Uninitialized tasks should never be present, and indicate tha the scheduler has not tried to schedule them yet.",en:{instance:{one:"client",other:"clients"}}},"boinc.process":{info:"Counts of active tasks in each process state. Executing tasks are running right now. Suspended tasks have an associated process, but are not currently running (either because the system isn't processing any tasks right now, or because they have been preempted by higher priority tasks). Quit tasks are exiting gracefully. Aborted tasks exceeded some resource limit, and are being shut down. Copy Pending tasks are waiting on a background file transfer to finish. Uninitialized tasks do not have an associated process yet.",en:{instance:{one:"client",other:"clients"}}},"w1sensor.temp":{aggregationMethod:"avg",info:"Temperature derived from 1-Wire temperature sensors.",en:{instance:{one:"sensor",other:"sensors"}}},"logind.sessions":{info:"Local and remote sessions.",en:{instance:{one:"system",other:"systems"}}},"logind.sessions_type":{mainheads:[{chartLibrary:"bars",title:"Session Types",layout:{left:0,top:0,width:3,height:5}}],info:"

    Sessions of each session type.

    Graphical - sessions are running under one of X11, Mir, or Wayland. Console - sessions are usually regular text mode local logins, but depending on how the system is configured may have an associated GUI. Other - sessions are those that do not fall into the above categories (such as sessions for cron jobs or systemd timer units).

    ",en:{instance:{one:"system",other:"systems"}}},"logind.sessions_state":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Session States",layout:{left:3,top:0,width:3,height:5}}],info:"

    Sessions of each session type.

    Graphical - sessions are running under one of X11, Mir, or Wayland. Console - sessions are usually regular text mode local logins, but depending on how the system is configured may have an associated GUI. Other - sessions are those that do not fall into the above categories (such as sessions for cron jobs or systemd timer units).

    ",en:{instance:{one:"system",other:"systems"}}},"logind.users_state":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"User States",layout:{left:6,top:0,width:3,height:5}}],info:"

    Users in each user state.

    Offline - users are not logged in. Closing - users are in the process of logging out without lingering. Online - users are logged in, but have no active sessions. Lingering - users are not logged in, but have one or more services still running. Active - users are logged in, and have at least one active session.

    ",en:{instance:{one:"system",other:"systems"}}},"proxysql.pool_status":{info:"The status of the backend servers. 1=ONLINE backend server is fully operational, 2=SHUNNED backend sever is temporarily taken out of use because of either too many connection errors in a time that was too short, or replication lag exceeded the allowed threshold, 3=OFFLINE_SOFT when a server is put into OFFLINE_SOFT mode, new incoming connections aren't accepted anymore, while the existing connections are kept until they became inactive. In other words, connections are kept in use until the current transaction is completed. This allows to gracefully detach a backend, 4=OFFLINE_HARD when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren't accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work, -1 Unknown status.",en:{instance:{one:"server",other:"servers"}}},"proxysql.pool_net":{info:"The amount of data sent to/received from the backend (This does not include metadata (packets' headers, OK/ERR packets, fields' description, etc).",en:{instance:{one:"server",other:"servers"}}},"proxysql.pool_overall_net":{info:"The amount of data sent to/received from the all backends (This does not include metadata (packets' headers, OK/ERR packets, fields' description, etc).",en:{instance:{one:"server",other:"servers"}}},"proxysql.questions":{info:"questions total number of queries sent from frontends, slow_queries number of queries that ran for longer than the threshold in milliseconds defined in global variable mysql-long_query_time. ",en:{instance:{one:"server",other:"servers"}}},"proxysql.connections":{mainheads:[{groupBy:["selected"],selectedDimensions:["connected"],chartLibrary:"easypiechart",title:"Total Connected connections",colors:s.default[12],layout:{left:6,top:0,width:3,height:5}},{groupBy:["selected"],selectedDimensions:["aborted"],chartLibrary:"easypiechart",title:"Total Aborted connections",colors:s.default[7],layout:{left:9,top:0,width:3,height:5}}],info:"aborted number of frontend connections aborted due to invalid credential or max_connections reached, connected number of frontend connections currently connected, created number of frontend connections created, non_idle number of frontend connections that are not currently idle. ",en:{instance:{one:"server",other:"servers"}}},"proxysql.pool_latency":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Max Pool Latency",colors:s.default[4],layout:{left:3,top:0,width:3,height:5}}],info:"The currently ping time in microseconds, as reported from Monitor.",en:{instance:{one:"server",other:"servers"}}},"proxysql.queries":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Queries",colors:s.default[1],layout:{left:0,top:0,width:3,height:5}}],info:"The number of queries routed towards this particular backend server.",en:{instance:{one:"server",other:"servers"}}},"proxysql.pool_used_connections":{info:"The number of connections are currently used by ProxySQL for sending queries to the backend server.",en:{instance:{one:"server",other:"servers"}}},"proxysql.pool_free_connections":{info:"The number of connections are currently free. They are kept open in order to minimize the time cost of sending a query to the backend server.",en:{instance:{one:"server",other:"servers"}}},"proxysql.pool_ok_connections":{info:"The number of connections were established successfully.",en:{instance:{one:"server",other:"servers"}}},"proxysql.pool_error_connections":{info:"The number of connections weren't established successfully.",en:{instance:{one:"server",other:"servers"}}},"proxysql.commands_count":{info:"The total number of commands of that type executed",en:{instance:{one:"server",other:"servers"}}},"proxysql.commands_duration":{info:"The total time spent executing commands of that type, in ms",en:{instance:{one:"server",other:"servers"}}},"sensors.sensor_temperature":{groupBy:["node"],aggregationMethod:"max",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Temperature",colors:s.default[1],layout:{left:0,top:0,width:3,height:5}}],info:"The temperature of the system as reported by the sensors. High values can indicate cooling issues.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.sensor_fan_speed":{groupBy:["node"],aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average Fan Speed",colors:s.default[4],layout:{left:3,top:0,width:3,height:5}}],info:"The speed of the system fans as reported by the sensors. Low values can indicate cooling issues while high values can indicate excessive noise.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.sensor_voltage":{groupBy:["node"],aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"easypiechart",title:"Minimum Voltage",colors:s.default[7],layout:{left:6,top:0,width:3,height:5}}],info:"The voltage of the system as reported by the sensors. Low values can indicate power issues.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.sensor_current":{groupBy:["node"],aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Current",colors:s.default[12],layout:{left:9,top:0,width:3,height:5}}],info:"The current of the system as reported by the sensors. High values can indicate power issues.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.sensor_power":{groupBy:["node"],aggregationMethod:"avg",info:"The power of the system as reported by the sensors. High values can indicate power issues.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.sensor_energy":{groupBy:["node"],aggregationMethod:"avg",info:"The energy of the system as reported by the sensors. High values can indicate power issues.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.sensor_humidity":{groupBy:["node"],aggregationMethod:"max",info:"The humidity of the system as reported by the sensors. High values can indicate cooling issues.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.temperature":{groupBy:["node"],aggregationMethod:"max",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Temperature",colors:s.default[1],layout:{left:0,top:0,width:3,height:5}}],info:"The temperature of the system as reported by the sensors. High values can indicate cooling issues.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.fan":{groupBy:["node"],aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average Fan Speed",colors:s.default[4],layout:{left:3,top:0,width:3,height:5}}],info:"The speed of the system fans as reported by the sensors. Low values can indicate cooling issues while high values can indicate excessive noise.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.voltage":{groupBy:["node"],aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"easypiechart",title:"Minimum Voltage",colors:s.default[7],layout:{left:6,top:0,width:3,height:5}}],info:"The voltage of the system as reported by the sensors. Low values can indicate power issues.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.current":{groupBy:["node"],aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Current",colors:s.default[12],layout:{left:9,top:0,width:3,height:5}}],info:"The current of the system as reported by the sensors. High values can indicate power issues.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.power":{groupBy:["node"],aggregationMethod:"avg",info:"The power of the system as reported by the sensors. High values can indicate power issues.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.energy":{groupBy:["node"],aggregationMethod:"avg",info:"The energy of the system as reported by the sensors. High values can indicate power issues.",en:{instance:{one:"sensor",other:"sensors"}}},"sensors.humidity":{groupBy:["node"],aggregationMethod:"max",info:"The humidity of the system as reported by the sensors. High values can indicate cooling issues.",en:{instance:{one:"sensor",other:"sensors"}}},"tomcat.accesses":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Requests",colors:s.default[1],layout:{left:0,top:0,width:3,height:5}}],info:"The number of requests made to the Tomcat server. It is important to monitor this metric to ensure that the server is not overloaded.",en:{instance:{one:"web server",other:"web servers"}}},"tomcat.bandwidth":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Bandwidth",colors:s.default[4],layout:{left:3,top:0,width:3,height:5}}],info:"The amount of network bandwidth used by the Tomcat server. It is important to monitor this metric to ensure that the server is not overloaded.",en:{instance:{one:"web server",other:"web servers"}}},"tomcat.threads":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Threads",colors:s.default[12],layout:{left:6,top:0,width:3,height:5}}],info:"The number of threads running in the Tomcat application. It is important to monitor this metric to ensure that the application is running efficiently.",en:{instance:{one:"web server",other:"web servers"}}},"tor.traffic":{info:"This metric tracks Tor traffic statistics collected form the Tor control port. It is important to monitor this metric to ensure that the Tor network is not overloaded.",en:{instance:{one:"server",other:"servers"}}},"powersupply.capacity":{family:"charge",aggregationMethod:"avg",info:"The current battery charge.",en:{instance:{one:"Power supply",other:"Power supplies"}}},"powersupply.charge":{family:"charge",info:'

    The battery charge in Amp-hours.

    now - actual charge value. full, empty - last remembered value of charge when battery became full/empty. It also could mean "value of charge when battery considered full/empty at given conditions (temperature, age)". I.e. these attributes represents real thresholds, not design values. full_design, empty_design - design charge values, when battery considered full/empty.

    ',en:{instance:{one:"Power supply",other:"Power supplies"}}},"powersupply.energy":{family:"charge",info:'

    The battery charge in Watt-hours.

    now - actual charge value. full, empty - last remembered value of charge when battery became full/empty. It also could mean "value of charge when battery considered full/empty at given conditions (temperature, age)". I.e. these attributes represents real thresholds, not design values. full_design, empty_design - design charge values, when battery considered full/empty.

    ',en:{instance:{one:"Power supply",other:"Power supplies"}}},"powersupply.voltage":{family:"voltage",info:'

    The power supply voltage.

    now - current voltage. max, min - voltage values that hardware could only guess (measure and retain) the thresholds of a given power supply. max_design, min_design - design values for maximal and minimal power supply voltages. Maximal/minimal means values of voltages when battery considered "full"/"empty" at normal conditions.

    ',en:{instance:{one:"Power supply",other:"Power supplies"}}},"vsphere.host_cpu_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["label"],groupByLabel:["host"],chartLibrary:"bars",title:"Top Hosts by CPU Usage",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:0,top:0,width:3,height:5}}],info:"Summary CPU usage statistics across all CPUs/cores.",en:{instance:{one:"host",other:"hosts"}}},"vsphere.host_mem_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["label"],groupByLabel:["host"],chartLibrary:"bars",title:"Top Hosts by Memory Usage",dimensionsSort:"valueDesc",colors:s.default[1],layout:{left:3,top:0,width:3,height:5}}],info:"Percentage of used machine memory: consumed / machine-memory-size.",en:{instance:{one:"host",other:"hosts"}}},"vsphere.host_mem_usage":{aggregationMethod:"sum",info:'granted is amount of machine memory that is mapped for a host, it equals sum of all granted metrics for all powered-on virtual machines, plus machine memory for vSphere services on the host. consumed is amount of machine memory used on the host, it includes memory used by the Service Console, the VMkernel, vSphere services, plus the total consumed metrics for all running virtual machines. consumed = total host memory - free host memory.active is sum of all active metrics for all powered-on virtual machines plus vSphere services (such as COS, vpxa) on the host.shared is sum of all shared metrics for all powered-on virtual machines, plus amount for vSphere services on the host. sharedcommon is amount of machine memory that is shared by all powered-on virtual machines and vSphere services on the host. shared - sharedcommon = machine memory (host memory) savings (KB). For details see Measuring and Differentiating Types of Memory Usage and Memory Counters articles.',en:{instance:{one:"host",other:"hosts"}}},"vsphere.host_mem_swap_io":{aggregationMethod:"sum",info:"This statistic refers to VMkernel swapping and not to guest OS swapping. in is sum of swapinRate values for all powered-on virtual machines on the host.swapinRate is rate at which VMKernel reads data into machine memory from the swap file. out is sum of swapoutRate values for all powered-on virtual machines on the host.swapoutRate is rate at which VMkernel writes to the virtual machine\u2019s swap file from machine memory.",en:{instance:{one:"host",other:"hosts"}}},"vsphere.host_disk_io":{aggregationMethod:"sum",info:"Summary read/write statistics across all disks.",en:{instance:{one:"host",other:"hosts"}}},"vsphere.host_disk_max_latency":{aggregationMethod:"max",info:"latency is highest latency value across all disks.",en:{instance:{one:"host",other:"hosts"}}},"vsphere.host_net_traffic":{aggregationMethod:"sum",info:"Summary receive/transmit statistics across all network interfaces.",en:{instance:{one:"host",other:"hosts"}}},"vsphere.host_net_packets":{aggregationMethod:"sum",info:"Summary receive/transmit statistics across all network interfaces.",en:{instance:{one:"host",other:"hosts"}}},"vsphere.host_net_errors":{aggregationMethod:"sum",info:"Summary receive/transmit statistics across all network interfaces.",en:{instance:{one:"host",other:"hosts"}}},"vsphere.host_net_drops":{aggregationMethod:"sum",info:"Summary receive/transmit statistics across all network interfaces.",en:{instance:{one:"host",other:"hosts"}}},"vsphere.host_overall_status":{aggregationMethod:"sum",info:"green is OK, yellow is might have a problem, red is definitely has a problem, gray is unknown.",en:{instance:{one:"host",other:"hosts"}}},"vsphere.host_system_uptime":{aggregationMethod:"min",en:{instance:{one:"host",other:"hosts"}}},"vsphere.vm_cpu_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["label"],groupByLabel:["vm"],chartLibrary:"bars",title:"Top VMs by CPU Usage",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:6,top:0,width:3,height:5}}],info:"Summary CPU usage statistics across all CPUs/cores.",en:{instance:{one:"vm",other:"vms"}}},"vsphere.vm_mem_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["label"],groupByLabel:["vm"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top VMs by Memory Usage",colors:s.default[4],layout:{left:9,top:0,width:3,height:5}}],info:"Percentage of used virtual machine \u201cphysical\u201d memory: active / virtual machine configured size.",en:{instance:{one:"vm",other:"vms"}}},"vsphere.vm_mem_usage":{aggregationMethod:"sum",info:'granted is amount of guest \u201cphysical\u201d memory that is mapped to machine memory, it includes shared memory amount. consumed is amount of guest \u201cphysical\u201d memory consumed by the virtual machine for guest memory, consumed = granted - memory saved due to memory sharing. active is amount of memory that is actively used, as estimated by VMkernel based on recently touched memory pages. shared is amount of guest \u201cphysical\u201d memory shared with other virtual machines (through the VMkernel\u2019s transparent page-sharing mechanism, a RAM de-duplication technique). For details see Measuring and Differentiating Types of Memory Usage and Memory Counters articles.',en:{instance:{one:"vm",other:"vms"}}},"vsphere.vm_mem_swap_io":{aggregationMethod:"sum",info:"This statistic refers to VMkernel swapping and not to guest OS swapping. in is rate at which VMKernel reads data into machine memory from the swap file. out is rate at which VMkernel writes to the virtual machine\u2019s swap file from machine memory.",en:{instance:{one:"vm",other:"vms"}}},"vsphere.vm_mem_swap_usage":{aggregationMethod:"sum",info:"This statistic refers to VMkernel swapping and not to guest OS swapping. swapped is amount of guest physical memory swapped out to the virtual machine's swap file by the VMkernel. Swapped memory stays on disk until the virtual machine needs it.",en:{instance:{one:"vm",other:"vms"}}},"vsphere.vm_disk_io":{aggregationMethod:"sum",info:"Summary read/write statistics across all disks.",en:{instance:{one:"vm",other:"vms"}}},"vsphere.vm_disk_max_latency":{aggregationMethod:"max",info:"latency is highest latency value across all disks.",en:{instance:{one:"vm",other:"vms"}}},"vsphere.vm_net_traffic":{aggregationMethod:"sum",info:"Summary receive/transmit statistics across all network interfaces.",en:{instance:{one:"vm",other:"vms"}}},"vsphere.vm_net_packets":{aggregationMethod:"sum",info:"Summary receive/transmit statistics across all network interfaces.",en:{instance:{one:"vm",other:"vms"}}},"vsphere.vm_net_drops":{aggregationMethod:"sum",info:"Summary receive/transmit statistics across all network interfaces.",en:{instance:{one:"vm",other:"vms"}}},"vsphere.vm_overall_status":{aggregationMethod:"sum",info:"green is OK, yellow is might have a problem, red is definitely has a problem, gray is unknown.",en:{instance:{one:"vm",other:"vms"}}},"vsphere.vm_system_uptime":{aggregationMethod:"min",en:{instance:{one:"vm",other:"vms"}}},"vcsa.system_health_status":{info:"green: all components are healthy; yellow: one or more components might become overloaded soon; orange: one or more components in the appliance might be degraded; red: one or more components might be in an unusable status and the appliance might become unresponsive soon; gray: no health data is available; unknown: collector failed to decode the status.",en:{instance:{one:"server appliance",other:"server appliances"}}},"vcsa.applmgmt_health_status":{info:"green: the component is healthy; yellow: the component is healthy, but may have some problems; orange: the component is degraded, and may have serious problems; red: the component is unavailable, or will stop functioning soon; gray: no health data is available; unknown: collector failed to decode the status.",en:{instance:{one:"server appliance",other:"server appliances"}}},"vcsa.load_health_status":{info:"green: the component is healthy; yellow: the component is healthy, but may have some problems; orange: the component is degraded, and may have serious problems; red: the component is unavailable, or will stop functioning soon; gray: no health data is available; unknown: collector failed to decode the status.",en:{instance:{one:"server appliance",other:"server appliances"}}},"vcsa.mem_health_status":{info:"green: the component is healthy; yellow: the component is healthy, but may have some problems; orange: the component is degraded, and may have serious problems; red: the component is unavailable, or will stop functioning soon; gray: no health data is available; unknown: collector failed to decode the status.",en:{instance:{one:"server appliance",other:"server appliances"}}},"vcsa.swap_health_status":{info:"green: the component is healthy; yellow: the component is healthy, but may have some problems; orange: the component is degraded, and may have serious problems; red: the component is unavailable, or will stop functioning soon; gray: no health data is available; unknown: collector failed to decode the status.",en:{instance:{one:"server appliance",other:"server appliances"}}},"vcsa.database_storage_health_status":{info:"green: the component is healthy; yellow: the component is healthy, but may have some problems; orange: the component is degraded, and may have serious problems; red: the component is unavailable, or will stop functioning soon; gray: no health data is available; unknown: collector failed to decode the status.",en:{instance:{one:"server appliance",other:"server appliances"}}},"vcsa.storage_health_status":{info:"green: the component is healthy; yellow: the component is healthy, but may have some problems; orange: the component is degraded, and may have serious problems; red: the component is unavailable, or will stop functioning soon; gray: no health data is available; unknown: collector failed to decode the status.",en:{instance:{one:"server appliance",other:"server appliances"}}},"vcsa.software_packages_health_status":{info:"softwarepackages represents information on available software updates available in the remote vSphere Update Manager repository.green: no updates available; orange: non-security updates are available; red: security updates are available; gray: an error retrieving information on software updates; unknown: collector failed to decode the status.",en:{instance:{one:"server appliance",other:"server appliances"}}},"zookeeper.server_state":{info:"0: unknown, 1: leader, 2: follower, 3: observer, 4: standalone.",en:{instance:{one:"server",other:"servers"}}},"squidlog.requests":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Requests",colors:s.default[1],layout:{left:0,top:0,width:3,height:5}}],info:"Total number of requests (log lines read). It includes unmatched."},"squidlog.excluded_requests":{info:'unmatched counts the lines in the log file that are not matched by the plugin parser (let us know if you have any unmatched).'},"squidlog.type_requests":{info:"Requests by response type:
    • success includes 1xx, 2xx, 0, 304, 401.
    • error includes 5xx and 6xx.
    • redirect includes 3xx except 304.
    • bad includes 4xx except 401.
    "},"squidlog.http_status_code_class_responses":{info:'The HTTP response status code classes. According to rfc7231:
  • 1xx is informational responses.
  • 2xx is successful responses.
  • 3xx is redirects.
  • 4xx is bad requests.
  • 5xx is internal server errors.
  • Squid also uses 0 for a result code being unavailable, and 6xx to signal an invalid header, a proxy error.'},"squidlog.http_status_code_responses":{info:"Number of responses for each http response status code individually."},"squidlog.uniq_clients":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Unique Clients",colors:s.default[4],layout:{left:3,top:0,width:3,height:5}}],info:"Unique clients (requesting instances), within each data collection iteration. If data collection is per second, this chart shows unique clients per second."},"squidlog.bandwidth":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Bandwidth",colors:s.default[7],layout:{left:6,top:0,width:3,height:5}}],info:"The size is the amount of data delivered to the clients. Mind that this does not constitute the net object size, as headers are also counted. Also, failed requests may deliver an error page, the size of which is also logged here."},"squidlog.response_time":{aggregationMethod:"avg",info:"The elapsed time considers how many milliseconds the transaction busied the cache. It differs in interpretation between TCP and UDP:
    • TCP this is basically the time from having received the request to when Squid finishes sending the last byte of the response.
    • UDP this is the time between scheduling a reply and actually sending it.
    Please note that the entries are logged after the reply finished being sent, not during the lifetime of the transaction."},"squidlog.cache_result_code_requests":{info:'The Squid result code is composed of several tags (separated by underscore characters) which describe the response sent to the client. Check the squid documentation about them.'},"squidlog.cache_result_code_transport_tag_requests":{info:"These tags are always present and describe delivery method.
    • TCP requests on the HTTP port (usually 3128).
    • UDP requests on the ICP port (usually 3130) or HTCP port (usually 4128).
    • NONE Squid delivered an unusual response or no response at all. Seen with cachemgr requests and errors, usually when the transaction fails before being classified into one of the above outcomes. Also seen with responses to CONNECT requests.
    "},"squidlog.cache_result_code_handling_tag_requests":{info:'These tags are optional and describe why the particular handling was performed or where the request came from.
    • CF at least one request in this transaction was collapsed. See collapsed_forwarding for more details about request collapsing.
    • CLIENT usually seen with client issued a "no-cache", or analogous cache control command along with the request. Thus, the cache has to validate the object.
    • IMS the client sent a revalidation (conditional) request.
    • ASYNC the request was generated internally by Squid. Usually this is background fetches for cache information exchanges, background revalidation from stale-while-revalidate cache controls, or ESI sub-objects being loaded.
    • SWAPFAIL the object was believed to be in the cache, but could not be accessed. A new copy was requested from the server.
    • REFRESH a revalidation (conditional) request was sent to the server.
    • SHARED this request was combined with an existing transaction by collapsed forwarding.
    • REPLY the HTTP reply from server or peer. Usually seen on DENIED due to http_reply_access ACLs preventing delivery of servers response object to the client.
    '},"squidlog.cache_code_object_tag_requests":{info:'These tags are optional and describe what type of object was produced.
    • NEGATIVE only seen on HIT responses, indicating the response was a cached error response. e.g. 404 not found.
    • STALE the object was cached and served stale. This is usually caused by stale-while-revalidate or stale-if-error cache controls.
    • OFFLINE the requested object was retrieved from the cache during offline_mode. The offline mode never validates any object.
    • INVALID an invalid request was received. An error response was delivered indicating what the problem was.
    • FAILED only seen on REFRESH to indicate the revalidation request failed. The response object may be the server provided network error or the stale object which was being revalidated depending on stale-if-error cache control.
    • MODIFIED only seen on REFRESH responses to indicate revalidation produced a new modified object.
    • UNMODIFIED only seen on REFRESH responses to indicate revalidation produced a 304 (Not Modified) status. The client gets either a full 200 (OK), a 304 (Not Modified), or (in theory) another response, depending on the client request and other details.
    • REDIRECT Squid generated an HTTP redirect response to this request.
    '},"squidlog.cache_code_load_source_tag_requests":{info:"These tags are optional and describe whether the response was loaded from cache, network, or otherwise.
    • HIT the response object delivered was the local cache object.
    • MEM the response object came from memory cache, avoiding disk accesses. Only seen on HIT responses.
    • MISS the response object delivered was the network response object.
    • DENIED the request was denied by access controls.
    • NOFETCH an ICP specific type, indicating service is alive, but not to be used for this request.
    • TUNNEL a binary tunnel was established for this transaction.
    "},"squidlog.cache_code_error_tag_requests":{info:"These tags are optional and describe some error conditions which occurred during response delivery.
    • ABORTED the response was not completed due to the connection being aborted (usually by the client).
    • TIMEOUT the response was not completed due to a connection timeout.
    • IGNORED while refreshing a previously cached response A, Squid got a response B that was older than A (as determined by the Date header field). Squid ignored response B (and attempted to use A instead).
    "},"squidlog.http_method_requests":{info:'The request method to obtain an object. Please refer to section request-methods for available methods and their description.'},"squidlog.hier_code_requests":{info:'A code that explains how the request was handled, e.g. by forwarding it to a peer, or going straight to the source. Any hierarchy tag may be prefixed with TIMEOUT_, if the timeout occurs waiting for all ICP replies to return from the neighbours. The timeout is either dynamic, if the icp_query_timeout was not set, or the time configured there has run up. Refer to Hierarchy Codes for details on hierarchy codes.'},"squidlog.server_address_forwarded_requests":{info:"The IP address or hostname where the request (if a miss) was forwarded. For requests sent to origin servers, this is the origin server's IP address. For requests sent to a neighbor cache, this is the neighbor's hostname. NOTE: older versions of Squid would put the origin server hostname here."},"squidlog.mime_type_requests":{info:"The content type of the object as seen in the HTTP reply header. Please note that ICP exchanges usually don't have any content type."},"cockroachdb.process_cpu_time_combined_percentage":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average CPU Usage",colors:s.default[0],layout:{left:0,top:0,width:3,height:5}}],info:"Current combined cpu utilization, calculated as (user+system)/num of logical cpus."},"cockroachdb.host_disk_bandwidth":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Disk Bandwidth",colors:s.default[1],layout:{left:3,top:0,width:3,height:5}}],info:"Summary disk bandwidth statistics across all system host disks."},"cockroachdb.host_disk_operations":{info:"Summary disk operations statistics across all system host disks."},"cockroachdb.host_disk_iops_in_progress":{info:"Summary disk iops in progress statistics across all system host disks."},"cockroachdb.host_network_bandwidth":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Network Bandwidth",colors:s.default[4],layout:{left:6,top:0,width:3,height:5}}],info:"Summary network bandwidth statistics across all system host network interfaces."},"cockroachdb.host_network_packets":{info:"Summary network packets statistics across all system host network interfaces."},"cockroachdb.live_nodes":{info:"Will be 0 if this node is not itself live."},"cockroachdb.total_storage_capacity":{info:"Entire disk capacity. It includes non-CR data, CR data, and empty space."},"cockroachdb.storage_capacity_usability":{info:"usable is sum of empty space and CR data, unusable is space used by non-CR data."},"cockroachdb.storage_usable_capacity":{info:"Breakdown of usable space."},"cockroachdb.storage_used_capacity_percentage":{aggregationMethod:"avg",info:"total is % of total space used, usable is % of usable space used."},"cockroachdb.sql_bandwidth":{info:"The total amount of SQL client network traffic."},"cockroachdb.sql_errors":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total SQL Errors",colors:s.default[7],layout:{left:9,top:0,width:3,height:5}}],info:"statement is statements resulting in a planning or runtime error, transaction is SQL transactions abort errors."},"cockroachdb.sql_started_ddl_statements":{info:"The amount of started DDL (Data Definition Language) statements. This type means database schema changes. It includes CREATE, ALTER, DROP, RENAME, TRUNCATE and COMMENT statements."},"cockroachdb.sql_executed_ddl_statements":{info:"The amount of executed DDL (Data Definition Language) statements. This type means database schema changes. It includes CREATE, ALTER, DROP, RENAME, TRUNCATE and COMMENT statements."},"cockroachdb.sql_started_dml_statements":{info:"The amount of started DML (Data Manipulation Language) statements."},"cockroachdb.sql_executed_dml_statements":{info:"The amount of executed DML (Data Manipulation Language) statements."},"cockroachdb.sql_started_tcl_statements":{info:"The amount of started TCL (Transaction Control Language) statements."},"cockroachdb.sql_executed_tcl_statements":{info:"The amount of executed TCL (Transaction Control Language) statements."},"cockroachdb.live_bytes":{info:"The amount of live data used by both applications and the CockroachDB system."},"cockroachdb.kv_transactions":{info:"KV transactions breakdown:
    • committed committed KV transactions (including 1PC).
    • fast-path_committed KV transaction on-phase commit attempts.
    • aborted aborted KV transactions.
    "},"cockroachdb.kv_transaction_restarts":{info:'KV transactions restarts breakdown:
    • write too old restarts due to a concurrent writer committing first.
    • write too old (multiple) restarts due to multiple concurrent writers committing first.
    • forwarded timestamp (iso=serializable) restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE".
    • possible replay restarts due to possible replays of command batches at the storage layer.
    • async consensus failure restarts due to async consensus writes that failed to leave intents.
    • read within uncertainty interval restarts due to reading a new value within the uncertainty interval.
    • aborted restarts due to an abort by a concurrent transaction (usually due to deadlock).
    • push failure restarts due to a transaction push failure.
    • unknown restarts due to a unknown reasons.
    '},"cockroachdb.ranges":{info:'CockroachDB stores all user data (tables, indexes, etc.) and almost all system data in a giant sorted map of key-value pairs. This keyspace is divided into "ranges", contiguous chunks of the keyspace, so that every key can always be found in a single range.'},"cockroachdb.ranges_replication_problem":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Replication Problems",colors:s.default[12],layout:{left:9,top:0,width:3,height:5}}],info:"Ranges with not optimal number of replicas:
    • unavailable ranges with fewer live replicas than needed for quorum.
    • under replicated ranges with fewer live replicas than the replication target.
    • over replicated ranges with more live replicas than the replication target.
    "},"cockroachdb.replicas":{info:"CockroachDB replicates each range (3 times by default) and stores each replica on a different node."},"cockroachdb.replicas_leaders":{info:"For each range, one of the replicas is the leader for write requests, not leaseholders is the number of Raft leaders whose range lease is held by another store."},"cockroachdb.replicas_leaseholders":{info:'For each range, one of the replicas holds the "range lease". This replica, referred to as the leaseholder, is the one that receives and coordinates all read and write requests for the range.'},"cockroachdb.queue_processing_failures":{info:"Failed replicas breakdown by queue:
    • gc replicas which failed processing in the GC queue.
    • replica gc replicas which failed processing in the replica GC queue.
    • replication replicas which failed processing in the replicate queue.
    • split replicas which failed processing in the split queue.
    • consistency replicas which failed processing in the consistency checker queue.
    • raft log replicas which failed processing in the Raft log queue.
    • raft snapshot replicas which failed processing in the Raft repair queue.
    • time series maintenance replicas which failed processing in the time series maintenance queue.
    "},"cockroachdb.rebalancing_queries":{info:"Number of kv-level requests received per second by the store, averaged over a large time period as used in rebalancing decisions."},"cockroachdb.rebalancing_writes":{info:"Number of keys written (i.e. applied by raft) per second to the store, averaged over a large time period as used in rebalancing decisions."},"cockroachdb.slow_requests":{info:"Requests that have been stuck for a long time."},"cockroachdb.timeseries_samples":{info:"The amount of metric samples written to disk."},"cockroachdb.timeseries_write_errors":{info:"The amount of errors encountered while attempting to write metrics to disk."},"cockroachdb.timeseries_write_bytes":{info:"Size of metric samples written to disk."},"cockroachdb.process_cpu_time_percentage":{aggregationMethod:"avg",info:"The percentage of CPU time used by the CockroachDB process. High usage may indicate the need for additional resources or optimization."},"cockroachdb.process_memory":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Memory Usage",colors:s.default[1],layout:{left:0,top:5,width:3,height:5}}],info:"The amount of memory used by the CockroachDB process. High memory usage may indicate the need for additional resources or optimization."},"cockroachdb.process_file_descriptors":{info:"The number of file descriptors used by the CockroachDB process. High usage may indicate the need for additional resources or optimization."},"cockroachdb.process_uptime":{aggregationMethod:"min",info:"The amount of time that the CockroachDB process has been running. This can be useful for understanding the overall health and stability of the system."},"cockroachdb.node_liveness_heartbeats":{info:"The number of liveness heartbeats sent by the CockroachDB node. This can be useful for understanding the overall health and stability of the cluster."},"cockroachdb.sql_statements_total":{info:"The total number of SQL statements executed in the CockroachDB cluster. This can be useful for understanding overall system load and potential bottlenecks."},"cockroachdb.sql_active_distributed_queries":{info:"The number of active distributed SQL queries in the CockroachDB cluster. This can be useful for understanding overall system load and potential bottlenecks."},"cockroachdb.sql_distributed_flows":{info:"'The number of active distributed flows in the CockroachDB cluster. This can be useful for understanding overall system load and potential bottlenecks."},"cockroachdb.rocksdb_table_operations":{info:"The number of RocksDB table operations in the CockroachDB cluster. This can be useful for understanding overall system load and potential bottlenecks."},"cockroachdb.rocksdb_cache_hit_rate":{aggregationMethod:"avg",info:"The RocksDB cache hit rate in the CockroachDB cluster. This can be useful for understanding the efficiency of the cache and identifying potential performance issues."},"cockroachdb.code_heap_memory_usage":{info:"The amount of memory used by the code heap in the CockroachDB cluster. High usage may indicate the need for additional resources or optimization."},"cockroachdb.goroutines":{info:"The number of goroutines in the CockroachDB cluster. This can be useful for understanding overall system load and potential bottlenecks."},"cockroachdb.gc_count":{info:"The number of garbage collection cycles in the CockroachDB cluster. This can be useful for understanding overall system performance and potential bottlenecks."},"cockroachdb.gc_pause":{info:"The total time spent in garbage collection pauses in the CockroachDB cluster. High values may indicate the need for optimization or additional resources."},"cockroachdb.cgo_calls":{info:"The number of CGo calls in the CockroachDB cluster. This can be useful for understanding overall system load and potential bottlenecks."},"perf.instructions_per_cycle":{info:'An IPC < 1.0 likely means memory bound, and an IPC > 1.0 likely means instruction bound. For more details about the metric take a look at this blog post.'},"filesystem.vfs_deleted_objects":{title:"VFS remove",info:'Number of calls to VFS unlinker function. This chart may not show all file system events if it uses other functions to store data on disk. Netdata shows virtual file system metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+' to monitor File Systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.vfs_io":{title:"VFS IO",info:'Number of calls to VFS I/O functions. This chart may not show all file system events if it uses other functions to store data on disk. Netdata shows virtual file system metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+' to monitor File Systems.
    ',en:{instance:{one:"system",other:"systems"}}},"filesystem.vfs_io_bytes":{title:"VFS bytes written",info:'Total of bytes read or written with success using the VFS I/O functions. This chart may not show all file system events if it uses other functions to store data on disk. Netdata shows virtual file system metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+' to monitor File Systems.
    ',en:{instance:{one:"system",other:"systems"}}},"filesystem.vfs_io_error":{title:"VFS IO error",info:'Number of failed calls to VFS I/O functions. This chart may not show all file system events if it uses other functions to store data on disk. Netdata shows virtual file system metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+' to monitor File Systems.
    ',en:{instance:{one:"system",other:"systems"}}},"filesystem.vfs_fsync":{info:'Number of calls to VFS syncer function. This chart may not show all file system events if it uses other functions to sync data on disk. Netdata shows virtual file system metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+' to monitor File Systems.
    ',en:{instance:{one:"system",other:"systems"}}},"filesystem.vfs_fsync_error":{info:'Number of failed calls to VFS syncer function.. This chart may not show all file system events if it uses other functions to sync data on disk. Netdata shows virtual file system metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+' to monitor File Systems.
    ',en:{instance:{one:"system",other:"systems"}}},"filesystem.vfs_open":{info:'Number of calls to VFS opener function. This chart may not show all file system events if it uses other functions to open files. Netdata shows virtual file system metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+' to monitor File Systems.
    ',en:{instance:{one:"system",other:"systems"}}},"filesystem.vfs_open_error":{info:'Number of failed calls to VFS opener function. This chart may not show all file system events if it uses other functions to open files. Netdata shows virtual file system metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+' to monitor File Systems.
    ',en:{instance:{one:"system",other:"systems"}}},"filesystem.vfs_create":{info:'Number of calls to VFS creator function. This chart may not show all file system events if it uses other functions to create files. Netdata shows virtual file system metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+' to monitor File Systems.
    ',en:{instance:{one:"system",other:"systems"}}},"filesystem.vfs_create_error":{info:'Number of failed calls to VFS creator function. This chart may not show all file system events if it uses other functions to create files. Netdata shows virtual file system metrics per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+' to monitor File Systems.
    ',en:{instance:{one:"system",other:"systems"}}},"filesystem.ext4_read_latency":{aggregationMethod:"avg",info:'Latency for each read request monitoring ext4 reader function.'+f+'to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.ext4_write_latency":{aggregationMethod:"avg",info:'Latency for each write request monitoring ext4 writer function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.ext4_open_latency":{aggregationMethod:"avg",info:'Latency for each open request monitoring ext4 opener function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.ext4_sync_latency":{aggregationMethod:"avg",info:'Latency for each sync request monitoring ext4 syncer function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.xfs_read_latency":{aggregationMethod:"avg",info:'Latency for each read request monitoring xfs reader function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.xfs_write_latency":{aggregationMethod:"avg",info:'Latency for each write request monitoring xfs writer function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.xfs_open_latency":{aggregationMethod:"avg",info:'Latency for each open request monitoring xfs opener function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.xfs_sync_latency":{aggregationMethod:"avg",info:'Latency for each sync request monitoring xfs syncer function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.nfs_read_latency":{aggregationMethod:"avg",info:'Latency for each read request monitoring nfs reader function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.nfs_write_latency":{aggregationMethod:"avg",info:'Latency for each write request monitoring nfs writer function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.nfs_open_latency":{aggregationMethod:"avg",info:'Latency for each open request monitoring nfs opener function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.nfs_attribute_latency":{aggregationMethod:"avg",info:'Latency for each get attribute request monitoring nfs attribute function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.zfs_read_latency":{aggregationMethod:"avg",info:'Latency for each read request monitoring zfs reader function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.zfs_write_latency":{aggregationMethod:"avg",info:'Latency for each write request monitoring zfs writer function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.zfs_open_latency":{aggregationMethod:"avg",info:'Latency for each open request monitoring zfs opener function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.zfs_sync_latency":{aggregationMethod:"avg",info:'Latency for each sync request monitoring zfs syncer function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.btrfs_read_latency":{aggregationMethod:"avg",info:'Latency for each read request monitoring btrfs reader function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.btrfs_write_latency":{aggregationMethod:"avg",info:'Latency for each write request monitoring btrfs writer function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.btrfs_open_latency":{aggregationMethod:"avg",info:'Latency for each open request monitoring btrfs opener function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"filesystem.btrfs_sync_latency":{aggregationMethod:"avg",info:'Latency for each sync request monitoring btrfs syncer function.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"mount_points.call":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Syscalls to mount and unmount",layout:{left:6,top:0,width:3,height:5}}],info:'Monitor calls to syscalls that are responsible for attaching (mount(2)) or removing filesystems (umount(2)). This chart has relationship with File systems.'+f,en:{instance:{one:"system",other:"systems"}}},"mount_points.error":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Errors to mount and unmount",colors:s.default[1],layout:{left:9,top:0,width:3,height:5}}],info:'Monitor errors in calls to syscalls that are responsible for attaching (mount(2)) or removing filesystems (umount(2)). This chart has relationship with File systems.'+f,en:{instance:{one:"system",other:"systems"}}},"filesystem.file_descriptor":{info:'Number of calls for internal functions on the Linux kernel responsible to open and closing files. Netdata shows file access per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+' to monitor File systems',en:{instance:{one:"system",other:"systems"}}},"filesystem.file_error":{info:'Number of failed calls to the kernel internal function responsible to open and closing files. Netdata shows file error per application and cgroup (systemd Services) if apps or cgroup (systemd Services) plugins are enabled.'+f+' to monitor File systems.',en:{instance:{one:"system",other:"systems"}}},"netdata.aclk_status":{valueRange:[0,1],info:"This chart shows if ACLK was online during entirety of the sample duration.",en:{instance:{one:"agent",other:"agents"}}},"netdata.aclk_query_per_second":{info:"This chart shows how many queries were added for ACLK_query thread to process and how many it was actually able to process.",en:{instance:{one:"agent",other:"agents"}}},"netdata.aclk_latency_mqtt":{aggregationMethod:"avg",info:"Measures latency between MQTT publish of the message and it's PUB_ACK being received",en:{instance:{one:"agent",other:"agents"}}},"vernemq.sockets":{mainheads:[{groupBy:["selected"],selectedDimensions:["open_sockets"],chartLibrary:"gauge",title:"Total Connected Clients"}],en:{instance:{one:"broker",other:"brokers"}}},"vernemq.queue_processes":{mainheads:[{groupBy:["selected"],selectedDimensions:["queue_processes"],chartLibrary:"gauge",title:"Total Queues Processes"}],en:{instance:{one:"broker",other:"brokers"}}},"vernemq.queue_messages":{mainheads:[{groupBy:["selected"],selectedDimensions:["queue_message_in"],chartLibrary:"easypiechart",title:"Total MQTT Receive Rate"},{groupBy:["selected"],selectedDimensions:["queue_message_out"],chartLibrary:"easypiechart",title:"Total MQTT Send Rate"}],en:{instance:{one:"broker",other:"brokers"}}},"vernemq.average_scheduler_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"max",selectedDimensions:["system_utilization"],chartLibrary:"gauge",title:"Maximum Scheduler Utilization",colors:s.default[5]}],en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_unsubscribe":{info:"This metric measures the number of MQTT unsubscribe attempts. Monitoring this metric can help identify any potential issues related to unsubscription.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_publish":{info:"This metric measures the number of MQTT publish attempts. Monitoring this metric can help identify any potential issues related to publishing.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_publish_errors":{info:"This metric measures the number of errors encountered during MQTT publish attempts. Monitoring this metric can help identify any potential issues related to publishing.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_publish_auth_errors":{info:"This metric measures the number of authentication errors encountered during MQTT publish attempts. Monitoring this metric can help identify any potential authentication issues.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_puback":{info:"This metric measures the number of MQTT puback requests. A PUBACK message is the response to a PUBLISH message with QoS level 1. A PUBACK message is sent by a server in response to a PUBLISH message from a publishing client, and by a subscriber in response to a PUBLISH message from the server. Monitoring this metric can help identify any potential issues related to acknowledgements.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_ping":{info:"This metric measures the number of MQTT ping requests. Monitoring this metric can help identify any potential issues related to communication.",en:{instance:{one:"broker",other:"brokers"}}},"pulsar.messages_rate":{mainheads:[{groupBy:["selected"],selectedDimensions:["pulsar_rate_in"],chartLibrary:"easypiechart",title:"Total Published"},{groupBy:["selected"],selectedDimensions:["pulsar_rate_out"],chartLibrary:"easypiechart",title:"Total Dispatched"}],en:{instance:{one:"broker",other:"brokers"}}},"pulsar.subscription_msg_rate_redeliver":{mainheads:[{groupBy:["selected"],selectedDimensions:["pulsar_subscription_msg_rate_redeliver"],chartLibrary:"gauge",title:"Total Redelivered"}],en:{instance:{one:"broker",other:"brokers"}}},"pulsar.subscription_blocked_on_unacked_messages":{mainheads:[{groupBy:["selected"],selectedDimensions:["pulsar_subscription_blocked_on_unacked_messages"],chartLibrary:"gauge",title:"Total Blocked On Unacked"}],en:{instance:{one:"broker",other:"brokers"}}},"pulsar.msg_backlog":{mainheads:[{groupBy:["selected"],selectedDimensions:["pulsar_msg_backlog"],chartLibrary:"gauge",title:"Total Messages Backlog"}],en:{instance:{one:"broker",other:"brokers"}}},"pulsar.namespace_messages_rate":{heads:[{groupBy:["selected"],selectedDimensions:["publish"],chartLibrary:"easypiechart",title:"Total Published"},{groupBy:["selected"],selectedDimensions:["dispatch"],chartLibrary:"easypiechart",title:"Total Dispatched"}],en:{instance:{one:"broker",other:"brokers"}}},"pulsar.namespace_subscription_msg_rate_redeliver":{mainheads:[{groupBy:["selected"],selectedDimensions:["redelivered"],chartLibrary:"gauge",title:"Total Redelivered"}],en:{instance:{one:"broker",other:"brokers"}}},"pulsar.namespace_subscription_blocked_on_unacked_messages":{mainheads:[{groupBy:["selected"],selectedDimensions:["blocked"],chartLibrary:"gauge",title:"Total Blocked On Unacked"}],en:{instance:{one:"broker",other:"brokers"}}},"pulsar.namespace_msg_backlog":{mainheads:[{groupBy:["selected"],selectedDimensions:["backlog"],chartLibrary:"gauge",title:"Total Messages Backlog"}],en:{instance:{one:"broker",other:"brokers"}}},"amdgpu.gpu_utilization":{en:{instance:{one:"GPU",other:"GPUs"}}},"amdgpu.gpu_mem_utilization":{en:{instance:{one:"GPU",other:"GPUs"}}},"amdgpu.gpu_clk_frequency":{en:{instance:{one:"GPU",other:"GPUs"}}},"amdgpu.gpu_mem_clk_frequency":{en:{instance:{one:"GPU",other:"GPUs"}}},"amdgpu.gpu_mem_vram_usage_perc":{en:{instance:{one:"GPU",other:"GPUs"}}},"amdgpu.gpu_mem_vram_usage":{en:{instance:{one:"GPU",other:"GPUs"}}},"amdgpu.gpu_mem_vis_vram_usage_perc":{en:{instance:{one:"GPU",other:"GPUs"}}},"amdgpu.gpu_mem_vis_vram_usage":{en:{instance:{one:"GPU",other:"GPUs"}}},"amdgpu.gpu_mem_gtt_usage_perc":{en:{instance:{one:"GPU",other:"GPUs"}}},"amdgpu.gpu_mem_gtt_usage":{en:{instance:{one:"GPU",other:"GPUs"}}},"intelgpu.frequency":{mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Frequency",valueRange:[0,null],colors:s.default[0],layout:{left:0,top:0,width:3,height:5}}],aggregationMethod:"avg",en:{instance:{one:"GPU",other:"GPUs"}}},"intelgpu.power":{mainheads:[{groupBy:["selected"],selectedDimensions:["gpu"],chartLibrary:"gauge",title:"Total GPU Power",valueRange:[0,null],colors:s.default[3],layout:{left:3,top:0,width:3,height:5}},{groupBy:["selected"],selectedDimensions:["package"],chartLibrary:"gauge",title:"Total Package Power",valueRange:[0,null],colors:s.default[3],layout:{left:6,top:0,width:3,height:5}}],aggregationMethod:"sum",en:{instance:{one:"GPU",other:"GPUs"}}},"intelgpu.engine_busy_perc":{aggregationMethod:"avg",mainheads:[{groupBy:["label"],groupByLabel:["engine_class"],aggregationMethod:"avg",chartLibrary:"bars",title:"Average Utilization of Engines Classes",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:9,top:0,width:3,height:5}}],en:{instance:{one:"GPU engine",other:"GPU engines"}},groupBy:["label"],groupByLabel:["engine_class"]},"nvidia_smi.fan_speed":{heads:[{groupBy:["selected"],aggregationMethod:"max",selectedDimensions:["speed"],chartLibrary:"easypiechart",title:"Maximum Fan Speed"}],en:{instance:{one:"GPU",other:"GPUs"}}},"nvidia_smi.temperature":{heads:[{groupBy:["selected"],aggregationMethod:"max",selectedDimensions:["temp"],chartLibrary:"easypiechart",title:"Maximum Temperature"}],en:{instance:{one:"GPU",other:"GPUs"}}},"nvidia_smi.memory_allocated":{heads:[{groupBy:["selected"],selectedDimensions:["used"],chartLibrary:"easypiechart",title:"Total Used Memory"}],en:{instance:{one:"GPU",other:"GPUs"}}},"nvidia_smi.power":{aggregationMethod:"avg",heads:[{groupBy:["selected"],selectedDimensions:["power"],chartLibrary:"easypiechart",title:"Total Power Utilization",colors:s.default[5]}],en:{instance:{one:"GPU",other:"GPUs"}}},"nvidia_smi.gpu_pcie_bandwidth_usage":{info:"The amount of PCI Express (PCIe) bandwidth being used by the GPU. Monitoring this metric can help identify if the GPU is being bottlenecked by the PCIe bus and determine if a faster GPU is needed.",en:{instance:{one:"GPU",other:"GPUs"}}},"nvidia_smi.gpu_fan_speed_perc":{aggregationMethod:"avg",info:"The speed of the GPU's fan in percent. Monitoring this metric is important to ensure that the GPU is not overheating and that the fan is running at the optimal speed.",en:{instance:{one:"GPU",other:"GPUs"}}},"nvidia_smi.gpu_utilization":{aggregationMethod:"avg",info:"The amount of utilization of the GPU's compute and graphics units. Monitoring this metric can help identify potential bottlenecks in the GPU's performance and help tune the system for optimal performance.",en:{instance:{one:"GPU",other:"GPUs"}}},"nvidia_smi.gpu_memory_utilization":{aggregationMethod:"avg",info:"The amount of GPU memory being used. Monitoring this metric can help identify potential memory usage issues and help optimize memory usage for better performance.",en:{instance:{one:"GPU",other:"GPUs"}}},"nvidia_smi.gpu_decoder_utilization":{aggregationMethod:"avg",info:"The amount of utilization of the GPU's decoder units. Monitoring this metric can help identify potential bottlenecks in the GPU's performance and help tune the system for optimal performance.",en:{instance:{one:"GPU",other:"GPUs"}}},"nvidia_smi.gpu_encoder_utilization":{aggregationMethod:"avg",info:"The amount of utilization of the GPU's encoder units. Monitoring this metric can help identify potential bottlenecks in the GPU's performance and help tune the system for optimal performance.",en:{instance:{one:"GPU",other:"GPUs"}}},"nvidia_smi.gpu_temperature":{info:"The temperature of the GPU in degrees Celsius. Monitoring this metric is important to ensure that the GPU is not overheating and that the fan is running at the optimal speed.",en:{instance:{one:"GPU",other:"GPUs"}}},"nvidia_smi.gpu_clock_freq":{info:"The clock frequency of the GPU in MHz. Monitoring this metric can help identify potential bottlenecks in the GPU's performance and help tune the system for optimal performance.",en:{instance:{one:"GPU",other:"GPUs"}}},"nvidia_smi.gpu_performance_state":{info:"The performance state of the GPU. Monitoring this metric can help identify potential performance issues and help tune the system for optimal performance.",en:{instance:{one:"GPU",other:"GPUs"}}},"openvpn.active_clients":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Active Clients"}],info:"The active clients metric indicates the number of clients connected to the OpenVPN server. This metric should be monitored to keep track of the number of connected clients.",en:{instance:{one:"OpenVPN server",other:"OpenVPN servers"}}},"openvpn.total_traffic":{mainheads:[{groupBy:["selected"],selectedDimensions:["out"],chartLibrary:"easypiechart",title:"Total Traffic Sent"},{groupBy:["selected"],selectedDimensions:["in"],chartLibrary:"easypiechart",title:"Total Traffic received"}],info:"The total traffic metric indicates the total amount of data sent and received on the OpenVPN server. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"OpenVPN server",other:"OpenVPN servers"}}},"openvpn.user_traffic":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Traffic per User"}],info:"The user traffic metric indicates the amount of data sent and received by each user connected to the OpenVPN server. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"OpenVPN server",other:"OpenVPN servers"}}},"openvpn.user_connection_time":{aggregationMethod:"avg",info:"The user connection time metric indicates the time each user is connected to the OpenVPN server. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"OpenVPN server",other:"OpenVPN servers"}}},"phpdaemon.workers":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Workers",layout:{left:0,top:0,width:3,height:5}}],info:"The workers metric indicates the number of workers running in the phpdaemon. This metric should be monitored to ensure the proper number of workers is running for the best performance.",en:{instance:{one:"daemon",other:"daemons"}}},"phpdaemon.alive_workers":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Active Workers",layout:{left:3,top:0,width:3,height:5}}],info:"The alive workers metric indicates the number of workers currently active in the phpdaemon. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"daemon",other:"daemons"}}},"phpdaemon.idle_workers":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Idle Workers",layout:{left:6,top:0,width:3,height:5}}],info:"The idle workers metric indicates the number of workers that are currently idle in the phpdaemon. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"daemon",other:"daemons"}}},"phpdaemon.uptime":{aggregationMethod:"min",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Uptime",layout:{left:9,top:0,width:3,height:5}}],info:"The uptime metric indicates the time since the phpdaemon was started. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"daemon",other:"daemons"}}},"phpfpm.connections":{mainheads:[{groupBy:["selected"],selectedDimensions:["active"],chartLibrary:"easypiechart",title:"Active Connections",layout:{left:0,top:0,width:3,height:5}}],info:"This metric indicates the connections count across the following dimensions: active, max_active, idle.",en:{instance:{one:"FastCGI server",other:"FastCGI servers"}}},"phpfpm.requests":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Requests",layout:{left:3,top:0,width:3,height:5}}],info:"The requests metric indicates the number of requests processed by the phpfpm server. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"FastCGI server",other:"FastCGI servers"}}},"phpfpm.performance":{info:"The performance metric indicates the performance of the phpfpm server. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"FastCGI server",other:"FastCGI servers"}}},"phpfpm.request_duration":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Request Duration",layout:{left:6,top:0,width:3,height:5}}],info:"The request duration metric indicates the time taken to process each request on the phpfpm server. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"FastCGI server",other:"FastCGI servers"}}},"phpfpm.request_cpu":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Request CPU",layout:{left:9,top:0,width:3,height:5}}],info:"The request CPU metric indicates the amount of CPU used to process each request on the phpfpm server. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"FastCGI server",other:"FastCGI servers"}}},"phpfpm.request_mem":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Request Memory",layout:{left:0,top:5,width:3,height:5}}],info:"The request memory metric indicates the amount of memory used to process each request on the phpfpm server. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"FastCGI server",other:"FastCGI servers"}}},"pihole.dns_queries_total":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Queries",layout:{left:0,top:0,width:3,height:5}}],info:"The DNS queries total metric indicates the total number of DNS queries that have been made to the Pi-hole server. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"Pi-hole server",other:"Pi-hole servers"}}},"pihole.dns_queries":{info:"The DNS queries metric indicates the number of DNS queries that have been made to the Pi-hole server in a given time period. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"Pi-hole server",other:"Pi-hole servers"}}},"pihole.dns_queries_percentage":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],selectedDimensions:["blocked"],chartLibrary:"gauge",title:"Blocked Queries %",layout:{left:3,top:0,width:3,height:5}}],info:"The DNS queries percentage metric indicates the percentage of DNS queries that have been made to the Pi-hole server in a given time period. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"Pi-hole server",other:"Pi-hole servers"}}},"pihole.unique_clients":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Clients",layout:{left:6,top:0,width:3,height:5}}],info:"The unique clients metric indicates the number of unique clients making DNS queries to the Pi-hole server. This metric should be monitored to ensure the server is running efficiently.",en:{instance:{one:"Pi-hole server",other:"Pi-hole servers"}}},"pihole.blocklist_last_update":{info:"The blocklist last update metric indicates the last time the Pi-hole's blocklist was updated. This metric should be monitored to ensure that the server is running efficiently.",en:{instance:{one:"Pi-hole server",other:"Pi-hole servers"}}},"pihole.dns_queries_types":{aggregationMethod:"avg",mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Query Types",layout:{left:9,top:0,width:3,height:5}}],info:"The DNS queries types metric indicates the types of DNS queries that have been made to the Pi-hole server. This metric should be monitored to ensure that the server is running efficiently.",en:{instance:{one:"Pi-hole server",other:"Pi-hole servers"}}},"pihole.domains_on_blocklist":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"number",title:"Domains on Blocklist",layout:{left:0,top:5,width:3,height:5}}],info:"The DNS queries types metric indicates the types of DNS queries that have been made to the Pi-hole server. This metric should be monitored to ensure that the server is running efficiently.",en:{instance:{one:"Pi-hole server",other:"Pi-hole servers"}}},"pihole.dns_queries_forwarded_destination":{aggregationMethod:"avg",info:"The DNS queries forwarded destination metric indicates the destination of the DNS queries that have been forwarded by the Pi-hole server. This metric should be monitored to ensure that the server is running efficiently.",en:{instance:{one:"Pi-hole server",other:"Pi-hole servers"}}},"supervisord.process_state_code":{info:'Process states map: 0 - stopped, 10 - starting, 20 - running, 30 - backoff,40 - stopping, 100 - exited, 200 - fatal, 1000 - unknown.'},"systemd.unit_file_state":{info:'Systemd unit file state. For details, search for `is-enabled UNIT` in systemctl(1).',en:{instance:{one:"unit file",other:"unit files"}}},"systemd.service_unit_state":{info:'Service units start and control daemons and the processes they consist of. For details, see systemd.service(5)',en:{instance:{one:"unit",other:"units"}}},"systemd.socket_unit_state":{info:'Socket units encapsulate local IPC or network sockets in the system, useful for socket-based activation. For details about socket units, see systemd.socket(5), for details on socket-based activation and other forms of activation, see daemon(7).',en:{instance:{one:"unit",other:"units"}}},"systemd.target_unit_state":{info:'Target units are useful to group units, or provide well-known synchronization points during boot-up, see systemd.target(5).',en:{instance:{one:"unit",other:"units"}}},"systemd.path_unit_state":{info:'Path units may be used to activate other services when file system objects change or are modified. See systemd.path(5).',en:{instance:{one:"unit",other:"units"}}},"systemd.device_unit_state":{info:'Device units expose kernel devices in systemd and may be used to implement device-based activation. For details, see systemd.device(5).',en:{instance:{one:"unit",other:"units"}}},"systemd.mount_unit_state":{info:'Mount units control mount points in the file system. For details, see systemd.mount(5).',en:{instance:{one:"unit",other:"units"}}},"systemd.automount_unit_state":{info:'Automount units provide automount capabilities, for on-demand mounting of file systems as well as parallelized boot-up. See systemd.automount(5).',en:{instance:{one:"unit",other:"units"}}},"systemd.swap_unit_state":{info:'Swap units are very similar to mount units and encapsulate memory swap partitions or files of the operating system. They are described in systemd.swap(5).',en:{instance:{one:"unit",other:"units"}}},"systemd.timer_unit_state":{info:'Timer units are useful for triggering activation of other units based on timers. You may find details in systemd.timer(5).',en:{instance:{one:"unit",other:"units"}}},"systemd.scope_unit_state":{info:'Slice units may be used to group units which manage system processes (such as service and scope units) in a hierarchical tree for resource management purposes. See systemd.scope(5).',en:{instance:{one:"unit",other:"units"}}},"systemd.slice_unit_state":{info:'Scope units are similar to service units, but manage foreign processes instead of starting them as well. See systemd.slice(5).',en:{instance:{one:"unit",other:"units"}}},"anomaly_detection.dimensions":{mainheads:[{groupBy:["selected"],selectedDimensions:["anomalous"],chartLibrary:"number",title:"Total Anomalous Dimensions",layout:{left:3,top:0,width:3,height:5}},{groupBy:["selected"],selectedDimensions:["normal"],chartLibrary:"number",title:"Total Normal Dimensions",layout:{left:6,top:0,width:3,height:5}}],info:"Total count of dimensions considered anomalous or normal. ",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["anomalous"]},"anomaly_detection.anomaly_rate":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",colors:s.default[12],title:"% of Anomalous Dimensions",layout:{left:0,top:0,width:3,height:5}}],info:"Percentage of anomalous dimensions. ",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["anomaly_rate"]},"anomaly_detection.detector_window":{info:"The length of the active window used by the detector. ",en:{instance:{one:"system",other:"systems"}},dimensionsOnNonDimensionGrouping:["above_threshold"]},"anomaly_detection.detector_events":{info:"Flags (0 or 1) to show when an anomaly event has been triggered by the detector. ",en:{instance:{one:"system",other:"systems"}}},"anomaly_detection.prediction_stats":{info:"Diagnostic metrics relating to prediction time of anomaly detection. ",en:{instance:{one:"system",other:"systems"}}},"anomaly_detection.training_stats":{info:"Diagnostic metrics relating to training time of anomaly detection. ",en:{instance:{one:"system",other:"systems"}}},"fail2ban.jail_banned_ips":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["jail"],mainheads:[{groupBy:["dimension"],chartLibrary:"easypiechart",title:"Currently Banned IPs",layout:{left:0,top:0,width:4,height:5}}],info:"This value shows the number of IP addresses currently banned due to exceeding the allowed number of failed login attempts within the defined time window.",en:{instance:{one:"jail",other:"jails"}}},"fail2ban.jail_active_failures":{aggregationMethod:"sum",groupBy:["label"],groupByLabel:["jail"],mainheads:[{groupBy:["dimension"],chartLibrary:"easypiechart",title:"Currently Active Failures",layout:{left:4,top:0,width:4,height:5}}],info:" the number of tickets (IPs) with failures that have not yet caused a ban or have become obsolete.",en:{instance:{one:"jail",other:"jails"}}},"fail2ban.failed_attempts":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Failed Attempts",layout:{left:0,top:0,width:3,height:5}}],info:"

    The number of failed attempts.

    This chart reflects the number of 'Found' lines. Found means a line in the service\u2019s log file matches the failregex in its filter.

    ",en:{instance:{one:"service",other:"services"}}},"fail2ban.bans":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Bans",layout:{left:3,top:0,width:3,height:5}}],info:"

    The number of bans.

    This chart reflects the number of 'Ban' and 'Restore Ban' lines. Ban action happens when the number of failed attempts (maxretry) occurred in the last configured interval (findtime).

    ",en:{instance:{one:"service",other:"services"}}},"fail2ban.banned_ips":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Banned IPs",layout:{left:6,top:0,width:3,height:5}}],info:"

    The number of banned IP addresses.

    ",en:{instance:{one:"service",other:"services"}}},"consul.node_health_check_status":{info:'The current status of the node health check. A node health check monitors the health of the entire node. If the node health check fails, Consul marks the node as unhealthy.',en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.service_health_check_status":{info:'The current status of the service health check. A service check only affects the health of the service it is associated with. If the service health check fails, the DNS interface stops returning that service.',en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.client_rpc_requests_rate":{mainheads:[{groupBy:["instance"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"RPC Requests Rate Per Client"}],info:"The number of RPC requests to a Consul server.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.client_rpc_requests_exceeded_rate":{info:'The number of rate-limited RPC requests to a Consul server. An Increase of this metric either indicates the load is getting high enough to limit the rate or a incorrectly configured Consul agent.',en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.client_rpc_requests_failed_rate":{info:"The number of failed RPC requests to a Consul server.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.memory_allocated":{mainheads:[{groupBy:["label"],groupByLabel:["datacenter"],chartLibrary:"number",title:"Memory Allocated per Consul DC"}],info:"The amount of memory allocated by the Consul process.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.memory_sys":{info:"The amount of memory obtained from the OS.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.gc_pause_time":{aggregationMethod:"avg",info:"The amount of time spent in stop-the-world garbage collection (GC) pauses.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.kvs_apply_time":{aggregationMethod:"avg",info:"The time it takes to complete an update to the KV store.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.kvs_apply_operations_rate":{mainheads:[{groupBy:["label"],groupByLabel:["datacenter"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Key Value Store Operations Rate per DC"}],info:"The number of KV store updates.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.txn_apply_time":{aggregationMethod:"avg",info:"The time spent applying a transaction operation.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.txn_apply_operations_rate":{info:"The number of applied transaction operations.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_commit_time":{aggregationMethod:"avg",info:"The time it takes to commit a new entry to the Raft log on the leader.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_commits_rate":{mainheads:[{groupBy:["label"],groupByLabel:["datacenter"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Raft Commits Rate per DC"}],info:"The number of applied Raft transactions.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.autopilot_health_status":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Health Status Overview"}],info:"The overall health of the local server cluster. The status is healthy if all servers are considered healthy by Autopilot.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.autopilot_failure_tolerance":{info:"The number of voting servers that the cluster can lose while continuing to function.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_leader_last_contact_time":{aggregationMethod:"avg",info:"The time since the leader was last able to contact the follower nodes when checking its leader lease.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_leader_elections_rate":{info:"The number of leadership elections. Increments whenever a Consul server starts an election.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_leadership_transitions_rate":{mainheads:[{groupBy:["label"],groupByLabel:["datacenter"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Leadership Transitions Rate per DC"}],info:"The number of leadership elections. Increments whenever a Consul server becomes a leader.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.server_leadership_status":{info:"The Consul server leadership status.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_thread_main_saturation_perc":{aggregationMethod:"avg",info:"An approximate measurement of the proportion of time the main Raft goroutine is busy and unavailable to accept new work.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_thread_fsm_saturation_perc":{aggregationMethod:"avg",info:"An approximate measurement of the proportion of time the Raft FSM goroutine is busy and unavailable to accept new work.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_fsm_last_restore_duration":{info:"The time taken to restore the FSM from a snapshot on an agent restart or from the leader calling installSnapshot.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_leader_oldest_log_age":{info:"The time elapsed since the oldest journal was written to the leader's journal storage. This can be important for the health of replication when the write rate is high and the snapshot is large, because followers may not be able to recover from a restart if recovery takes longer than the minimum for the current leader.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_rpc_install_snapshot_time":{aggregationMethod:"avg",info:"The time it takes to process the installSnapshot RPC call.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_boltdb_freelist_bytes":{info:'The number of bytes necessary to encode the freelist metadata. When raft_boltdb.NoFreelistSync is set to false these metadata bytes must also be written to disk for each committed log.',en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_boltdb_logs_per_batch_rate":{info:"The number of logs written per batch to the database.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_boltdb_store_logs_time":{aggregationMethod:"avg",info:"The amount of time spent writing logs to the database.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.license_expiration_time":{aggregationMethod:"min`",info:"The amount of time remaining before Consul Enterprise license expires. When the license expires, some Consul Enterprise features will stop working.",en:{instance:{one:"datacenter",other:"datacenters"}}},"envoy.server_state":{aggregationMethod:"avg",mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Envoy Server States",layout:{left:0,top:0,width:3,height:5}}],info:"Server current state",en:{instance:{one:"server",other:"servers"}}},"envoy.server_connections_count":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Server Connections Count",colors:s.default[1],layout:{left:3,top:0,width:3,height:5}}],info:"Server current connections",en:{instance:{one:"server",other:"servers"}}},"envoy.server_parent_connections_count":{info:"Server current parent connections",en:{instance:{one:"server",other:"servers"}}},"envoy.server_memory_allocated_size":{mainheads:[{groupBy:["instance"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top servers by Memory Allocated",layout:{left:6,top:0,width:3,height:5}}],info:"Server memory allocated size",en:{instance:{one:"server",other:"servers"}}},"envoy.server_memory_heap_size":{mainheads:[{groupBy:["instance"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top servers by Heap Size",layout:{left:9,top:0,width:3,height:5}}],info:"Server memory heap size",en:{instance:{one:"server",other:"servers"}}},"envoy.server_memory_physical_size":{info:"Server memory physical size",en:{instance:{one:"server",other:"servers"}}},"envoy.server_uptime":{aggregationMethod:"min",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Server Uptime",colors:s.default[4],layout:{left:0,top:5,width:3,height:5}}],info:"Envoy Server uptime",en:{instance:{one:"server",other:"servers"}}},"envoy.cluster_manager_cluster_count":{mainheads:[{groupBy:["dimension"],chartLibrary:"d3pie",title:"Active and Inactive Envoy Clusters ",layout:{left:3,top:5,width:3,height:5}}],info:"Cluster manager current clusters",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_manager_cluster_changes_rate":{info:"Cluster manager cluster changes",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_manager_cluster_updates_rate":{info:"Cluster manager updates",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_manager_cluster_updated_via_merge_rate":{info:"Cluster manager updates applied as merged updates",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_manager_update_merge_cancelled_rate":{info:"Cluster manager cancelled merged updates",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_manager_update_out_of_merge_window_rate":{info:"Cluster manager out of a merge window updates",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_membership_endpoints_count":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Top Clusters by Membership Endpoints",layout:{left:6,top:5,width:3,height:5}}],info:"Cluster membership current endpoints",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_membership_changes_rate":{info:"Cluster membership changes",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_membership_updates_rate":{info:"Cluster membership update rate",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_cx_active_count":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Upstream Active Connections",colors:s.default[7],layout:{left:9,top:5,width:3,height:5}}],info:"Cluster upstream current active connections",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_cx_rate":{info:"Cluster upstream connections",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_cx_http_rate":{info:"Cluster upstream connections by HTTP version",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_cx_destroy_rate":{info:"Cluster upstream destroyed connections rate",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_cx_connect_fail_rate":{info:"Cluster upstream failed connections",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_cx_connect_timeout_rate":{info:"Cluster upstream timed out connections",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_cx_bytes_rate":{info:"Cluster upstream connection traffic",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_cx_bytes_buffered_size":{info:"Cluster upstream current connection buffered size",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_rq_active_count":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Upstream Active Requests",colors:s.default[7],layout:{left:0,top:10,width:3,height:5}}],info:"Cluster upstream current active requests",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_rq_rate":{info:"Cluster upstream requests",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_rq_failed_rate":{info:"Cluster upstream failed requests",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_rq_pending_active_count":{info:"Cluster upstream current active pending requests",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_rq_pending_rate":{info:"Cluster upstream pending requests",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_rq_pending_failed_rate":{info:"Cluster upstream failed pending requests",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_rq_retry_rate":{info:"Cluster upstream request retries",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_rq_retry_success_rate":{info:"Cluster upstream request successful retries",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.cluster_upstream_rq_retry_backoff_rate":{info:"Cluster upstream request backoff retries",en:{instance:{one:"cluster",other:"clusters"}}},"envoy.listener_manager_listeners_count":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Listeners Count",colors:s.default[9],layout:{left:3,top:10,width:3,height:5}}],info:"Listener manager current listeners",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_manager_listener_changes_rate":{info:"Listener manager listener changes",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_manager_listener_object_events_rate":{info:"Listener manager listener object events",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_admin_downstream_cx_active_count":{info:"Listener admin downstream current active connections",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Downstream Active Connections",colors:s.default[9],layout:{left:6,top:10,width:3,height:5}}],en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_admin_downstream_cx_rate":{info:"Listener admin downstream connections",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_admin_downstream_cx_destroy_rate":{info:"Listener admin downstream destroyed connections",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate":{info:"Listener admin downstream timed out connections",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_admin_downstream_cx_rejected_rate":{info:"Listener admin downstream rejected connections",mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Downstream Rejected Connections",colors:s.default[9],layout:{left:9,top:10,width:3,height:5}}],en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_admin_downstream_listener_filter_remote_close_rate":{info:"Listener admin downstream connections closed by remote when peek data for listener filters",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_admin_downstream_listener_filter_error_rate":{info:"Listener admin downstream read errors when peeking data for listener filters",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_admin_downstream_pre_cx_active_count":{info:"Listener admin downstream current active sockets",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_admin_downstream_pre_cx_timeout_rate":{info:"Listener admin downstream timed out sockets",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_downstream_cx_active_count":{info:"Listener downstream current active connections",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_downstream_cx_rate":{info:"Listener downstream connections",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_downstream_cx_destroy_rate":{info:"Listener downstream destroyed connections",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_downstream_cx_transport_socket_connect_timeout_rate":{info:"Listener downstream timed out connections",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_downstream_cx_rejected_rate":{info:"Listener downstream rejected connections",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_downstream_listener_filter_remote_close_rate":{info:"Listener downstream connections closed by remote when peek data for listener filters",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_downstream_listener_filter_error_rate":{info:"Listener downstream read errors when peeking data for listener filters",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_downstream_pre_cx_active_count":{info:"Listener downstream current active sockets",en:{instance:{one:"listener",other:"listeners"}}},"envoy.listener_downstream_pre_cx_timeout_rate":{info:"Listener downstream timed out sockets",en:{instance:{one:"listener",other:"listeners"}}},"k8s_state.node_allocatable_cpu_requests_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average CPU Utilization",valueRange:[0,100],layout:{left:0,top:5,width:3,height:5}}],info:"The percentage of allocated CPU resources used by Pod requests. A Pod is scheduled to run on a Node only if the Node has enough CPU resources available to satisfy the Pod CPU request.",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_allocatable_cpu_requests_used":{info:'The amount of allocated CPU resources used by Pod requests. 1000 millicpu is equivalent to 1 physical or virtual CPU core.',en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_allocatable_cpu_limits_utilization":{aggregationMethod:"avg",info:"The percentage of allocated CPU resources used by Pod limits. Total limits may be over 100 percent (overcommitted).",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_allocatable_cpu_limits_used":{info:'The amount of allocated CPU resources used by Pod limits. 1000 millicpu is equivalent to 1 physical or virtual CPU core.',en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_allocatable_mem_requests_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Memory Utilization",colors:s.default[4],valueRange:[0,100],layout:{left:9,top:5,width:3,height:5}}],info:"The percentage of allocated memory resources used by Pod requests. A Pod is scheduled to run on a Node only if the Node has enough memory resources available to satisfy the Pod memory request.",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_allocatable_mem_requests_used":{info:"The amount of allocated memory resources used by Pod requests.",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_allocatable_mem_limits_utilization":{aggregationMethod:"avg",info:"The percentage of allocated memory resources used by Pod limits. Total limits may be over 100 percent (overcommitted).",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_allocatable_mem_limits_used":{info:"The amount of allocated memory resources used by Pod limits.",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_allocatable_pods_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Pods Utilization",colors:s.default[12],valueRange:[0,100],layout:{left:3,top:5,width:3,height:5}}],info:"Pods limit utilization.",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_allocatable_pods_usage":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Pods limit usage",layout:{left:8,top:0,width:4,height:2.5}}],info:"

    Pods limit usage.

    Available - the number of Pods available for scheduling. Allocated - the number of Pods that have been scheduled.

    ",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_condition":{info:'Health status. If the status of the Ready condition remains False for longer than the pod-eviction-timeout (the default is 5 minutes), then the node controller triggers API-initiated eviction for all Pods assigned to that node. More info.',en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_pods_readiness":{mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Pod Readiness",colors:s.default[0],valueRange:[0,100],layout:{left:0,top:5,width:3,height:5}}],info:"The percentage of Pods that are ready to serve requests.",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_pods_readiness_state":{info:"

    Pods readiness state.

    Ready - the Pod has passed its readiness probe and ready to serve requests. Unready - the Pod has not passed its readiness probe yet.

    ",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_pods_condition":{info:'

    Pods state. More info.

    PodReady - the Pod is able to serve requests and should be added to the load balancing pools of all matching Services. PodScheduled - the Pod has been scheduled to a node. PodInitialized - all init containers have completed successfully. ContainersReady - all containers in the Pod are ready.

    ',en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_pods_phase":{info:'

    Pods phase. The phase of a Pod is a high-level summary of where the Pod is in its lifecycle. More info.

    Running - the Pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Failed - all containers in the Pod have terminated, and at least one container has terminated in failure. That is, the container either exited with non-zero status or was terminated by the system. Succedeed - all containers in the Pod have terminated in success, and will not be restarted. Pending - the Pod has been accepted by the Kubernetes cluster, but one or more of the containers has not been set up and made ready to run.

    ',en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_containers":{info:"The total number of containers and init containers.",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_containers_state":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Containers by State",layout:{left:0,top:20,width:4,height:2.5}}],info:'

    The number of containers in different lifecycle states. More info.

    Running - a container is executing without issues. Waiting - a container is still running the operations it requires in order to complete start up. Terminated - a container began execution and then either ran to completion or failed for some reason.

    ',en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_init_containers_state":{info:'

    The number of init containers in different lifecycle states. More info.

    Running - a container is executing without issues. Waiting - a container is still running the operations it requires in order to complete start up. Terminated - a container began execution and then either ran to completion or failed for some reason.

    ',en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_age":{mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Node Uptime",layout:{left:8,top:2.5,width:4,height:2.5}}],info:"The lifetime of the Node.",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.node_schedulability":{info:"The schedulability of nodes in the Kubernetes cluster. Monitoring this metric can help identify if there are issues with the cluster's ability to schedule pods on nodes.",en:{instance:{one:"K8s node",other:"K8s nodes"}}},"k8s_state.pod_cpu_requests_used":{mainheads:[{groupBy:["selected"],aggregationMethod:"sum",valueRange:[0,null],chartLibrary:"gauge",title:"Total Pod CPU Requests used",colors:s.default[0],layout:{left:0,top:10,width:3,height:5}},{groupBy:["label"],groupByLabel:["k8s_pod_name"],aggregationMethod:"max",groupingMethod:"max",valueRange:[0,null],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Maximum CPU Requests per Pod",layout:{left:0,top:15,width:3,height:5}}],info:'The overall CPU resource requests for a Pod. This is the sum of the CPU requests for all the Containers in the Pod. Provided the system has CPU time free, a container is guaranteed to be allocated as much CPU as it requests. 1000 millicpu is equivalent to 1 physical or virtual CPU core.',en:{instance:{one:"pod",other:"pods"}}},"k8s_state.pod_cpu_limits_used":{mainheads:[{groupBy:["selected"],aggregationMethod:"sum",valueRange:[0,null],chartLibrary:"gauge",title:"Total Pod CPU limits used",layout:{left:3,top:10,width:3,height:5}},{groupBy:["label"],groupByLabel:["k8s_pod_name"],aggregationMethod:"max",groupingMethod:"max",valueRange:[0,null],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Maximum CPU limits per Pod",layout:{left:3,top:15,width:3,height:5}}],info:'The overall CPU resource limits for a Pod. This is the sum of the CPU limits for all the Containers in the Pod. If set, containers cannot use more CPU than the configured limit. 1000 millicpu is equivalent to 1 physical or virtual CPU core.',en:{instance:{one:"pod",other:"pods"}}},"k8s_state.pod_mem_requests_used":{mainheads:[{groupBy:["selected"],aggregationMethod:"sum",valueRange:[0,null],chartLibrary:"gauge",title:"Total Pod Memory Requests used",colors:s.default[12],layout:{left:6,top:10,width:3,height:5}},{groupBy:["label"],groupByLabel:["k8s_pod_name"],aggregationMethod:"max",groupingMethod:"max",valueRange:[0,null],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Pod Memory Requests per Pod",layout:{left:6,top:15,width:3,height:5}}],info:"The overall memory resource requests for a Pod. This is the sum of the memory requests for all the Containers in the Pod.",en:{instance:{one:"pod",other:"pods"}}},"k8s_state.pod_mem_limits_used":{mainheads:[{groupBy:["selected"],aggregationMethod:"sum",valueRange:[0,null],chartLibrary:"gauge",title:"Total Pod Memory limits used",colors:s.default[4],layout:{left:9,top:10,width:3,height:5}},{groupBy:["label"],groupByLabel:["k8s_pod_name"],aggregationMethod:"max",groupingMethod:"max",valueRange:[0,null],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Maximum Memory limits per Pod",layout:{left:9,top:15,width:3,height:5}}],info:"The overall memory resource limits for a Pod. This is the sum of the memory limits for all the Containers in the Pod. If set, containers cannot use more RAM than the configured limit.",en:{instance:{one:"pod",other:"pods"}}},"k8s_state.pod_condition":{info:'The current state of the Pod. More info.

    PodReady - the Pod is able to serve requests and should be added to the load balancing pools of all matching Services. PodScheduled - the Pod has been scheduled to a node. PodInitialized - all init containers have completed successfully. ContainersReady - all containers in the Pod are ready. ',en:{instance:{one:"pod",other:"pods"}}},"k8s_state.pod_phase":{mainheads:[{groupBy:["selected"],selectedDimensions:["running"],chartLibrary:"easypiechart",title:"Total Running Pods",colors:s.default[0],layout:{left:0,top:0,width:2,height:5}},{groupBy:["selected"],selectedDimensions:["failed"],chartLibrary:"easypiechart",title:"Total Failed Pods",layout:{left:2,top:0,width:2,height:5}},{groupBy:["selected"],selectedDimensions:["succeeded"],chartLibrary:"easypiechart",title:"Total Terminated Pods",colors:s.default[4],layout:{left:6,top:0,width:2,height:5}},{groupBy:["selected"],selectedDimensions:["pending"],chartLibrary:"easypiechart",title:"Total Pending Pods",colors:s.default[12],layout:{left:4,top:0,width:2,height:5}}],info:'High-level summary of where the Pod is in its lifecycle. More info.

    Running - the Pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Failed - all containers in the Pod have terminated, and at least one container has terminated in failure. That is, the container either exited with non-zero status or was terminated by the system. Succedeed - all containers in the Pod have terminated in success, and will not be restarted. Pending - the Pod has been accepted by the Kubernetes cluster, but one or more of the containers has not been set up and made ready to run. This includes time a Pod spends waiting to be scheduled as well as the time spent downloading container images over the network. ',en:{instance:{one:"pod",other:"pods"}}},"k8s_state.pod_age":{info:'The lifetime of the Pod. ',en:{instance:{one:"pod",other:"pods"}}},"k8s_state.pod_containers":{info:"The number of containers and init containers belonging to the Pod.",en:{instance:{one:"pod",other:"pods"}}},"k8s_state.pod_containers_state":{info:'The state of each container inside this Pod. More info.

    Running - a container is executing without issues. Waiting - a container is still running the operations it requires in order to complete start up. Terminated - a container began execution and then either ran to completion or failed for some reason.

    ',en:{instance:{one:"pod",other:"pods"}}},"k8s_state.pod_init_containers_state":{info:'The state of each init container inside this Pod. More info.

    Running - a container is executing without issues. Waiting - a container is still running the operations it requires in order to complete start up. Terminated - a container began execution and then either ran to completion or failed for some reason.

    ',en:{instance:{one:"pod",other:"pods"}}},"k8s_state.pod_container_readiness_state":{info:"Specifies whether the container has passed its readiness probe. Kubelet uses readiness probes to know when a container is ready to start accepting traffic.",en:{instance:{one:"container",other:"containers"}}},"k8s_state.pod_container_restarts":{mainheads:[{groupBy:["selected"],chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,title:"Container restarts",layout:{left:0,top:22.5,width:4,height:2.5}}],info:"The number of times the container has been restarted.",en:{instance:{one:"container",other:"containers"}}},"k8s_state.pod_container_state":{info:'Current state of the container. More info.

    Running - a container is executing without issues. Waiting - a container is still running the operations it requires in order to complete start up. Terminated - a container began execution and then either ran to completion or failed for some reason.

    ',en:{instance:{one:"container",other:"containers"}}},"k8s_state.pod_container_waiting_state_reason":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Container waiting state reason",layout:{left:4,top:20,width:4,height:5}}],info:'Reason the container is not yet running. More info. ',en:{instance:{one:"container",other:"containers"}}},"k8s_state.pod_container_terminated_state_reason":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Container terminated state reason",layout:{left:8,top:20,width:4,height:5}}],info:'Reason from the last termination of the container. More info.',en:{instance:{one:"container",other:"containers"}}},"k8s_kubelet.apiserver_audit_requests_rejected":{info:"The number of audit requests to the Kubernetes API server that were rejected. Monitoring this metric can help identify if there are issues with the audit configuration or access control policies on the API server.",en:{instance:{one:"K8s API server",other:"K8s API servers"}}},"k8s_kubelet.apiserver_storage_data_key_generation_failures":{info:"The number of failures when generating data keys for encrypting and decrypting secrets stored in the Kubernetes API server. If this metric is non-zero, it may indicate issues with the encryption configuration or key management.",en:{instance:{one:"K8s API server",other:"K8s API servers"}}},"k8s_kubelet.apiserver_storage_data_key_generation_latencies":{info:"The latencies of data key generation requests to the Kubernetes API server. High latencies may indicate issues with the encryption configuration or key management.",en:{instance:{one:"K8s API server",other:"K8s API servers"}}},"k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent":{aggregationMethod:"avg",info:"The percentile latencies of data key generation requests to the Kubernetes API server. High latencies may indicate issues with the encryption configuration or key management.",en:{instance:{one:"K8s API server",other:"K8s API servers"}}},"k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses":{info:"The number of cache misses when transforming encryption envelopes for secrets stored in the Kubernetes API server. If this metric is non-zero, it may indicate issues with the encryption configuration or key management.",en:{instance:{one:"K8s API server",other:"K8s API servers"}}},"k8s_kubelet.kubelet_containers_running":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Running Containers",colors:s.default[2],layout:{left:3,top:0,width:3,height:5}}],info:"The number of containers running on the node managed by the kubelet. Monitoring this metric can help identify if there are issues with container scheduling or resource allocation.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.kubelet_pods_running":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Running Pods",colors:s.default[0],layout:{left:0,top:0,width:3,height:5}}],info:"The number of pods running on the node managed by the kubelet. Monitoring this metric can help identify if there are issues with pod scheduling or resource allocation.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.kubelet_pods_log_filesystem_used_bytes":{mainheads:[{chartLibrary:"bars",title:"Pod Logs Disk Usage",layout:{left:9,top:5,width:4,height:5}}],info:"The amount of disk space used by pod logs on the node managed by the kubelet. Monitoring this metric can help identify if there are issues with disk usage or log rotation.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.kubelet_runtime_operations":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Runtime Operations by Type",layout:{left:6,top:0,width:3,height:5}}],info:"The total number of runtime operations performed by the kubelet on the node, such as starting and stopping containers. Monitoring this metric can help identify if there are issues with the runtime or container management.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.kubelet_runtime_operations_errors":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Runtime Operations Errors by Type",layout:{left:9,top:0,width:3,height:5}}],info:"The number of runtime operation errors on the node managed by the kubelet. If this metric is non-zero, it may indicate issues with the runtime or container management.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.kubelet_docker_operations":{info:"The total number of Docker operations performed by the kubelet on the node, such as pulling images or creating containers. Monitoring this metric can help identify if there are issues with the Docker daemon or container management.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.kubelet_docker_operations_errors":{info:"The number of Docker operation errors on the node managed by the kubelet. If this metric is non-zero, it may indicate issues with the Docker daemon or container management.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.kubelet_node_config_error":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Node Congifuration Errors",layout:{left:7,top:5,width:2,height:5}}],info:"The number of errors in the node configuration on the node managed by the kubelet. If this metric is non-zero, it may indicate issues with the node's configuration or the kubelet's ability to read and apply it.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.kubelet_pleg_relist_interval_microseconds":{aggregationMethod:"avg",info:"The interval, in microseconds, between periodic relists performed by the kubelet's Pod Lifecycle Event Generator (PLEG). Monitoring this metric can help identify if the PLEG is not able to keep up with the rate of change in the pod lifecycle on the node.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.kubelet_pleg_relist_latency_microseconds":{aggregationMethod:"avg",info:"The latency, in microseconds, of the periodic relists performed by the kubelet's Pod Lifecycle Event Generator (PLEG). High latencies may indicate issues with the PLEG or the pod lifecycle on the node.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.kubelet_token_requests":{mainheads:[{groupBy:["dimension"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total and Failed No. of Token Requests",colors:s.default[0],layout:{left:3,top:5,width:3,height:5}}],info:"The number of token requests to the kubelet. Monitoring this metric can help identify if there are issues with authentication or access control on the node.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.rest_client_requests_by_code":{info:"The distribution of HTTP response codes for requests to the kubelet's REST API. Monitoring this metric can help identify if there are issues with the kubelet's API or the components interacting with it.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.rest_client_requests_by_method":{mainheads:[{groupBy:["selected"],chartLibrary:"dygraph",sparkline:!0,overlays:{latestValue:{type:"latestValue"}},hasToolbox:!1,title:"HTTP Requests Rate to API",colors:s.default[0],layout:{left:0,top:5,width:3,height:2.5}}],info:"The distribution of HTTP request methods for requests to the kubelet's REST API. Monitoring this metric can help identify the usage patterns of the API and potential issues with specific request methods.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubelet.volume_manager_total_volumes":{mainheads:[{groupBy:["dimension"],chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Volume Manager State",layout:{left:0,top:7.5,width:3,height:2.5}}],info:"The total number of volumes managed by the kubelet's volume manager. Monitoring this metric can help identify if there are issues with volume management on the node.",en:{instance:{one:"kubelet",other:"kubelets"}}},"k8s_kubeproxy.kubeproxy_sync_proxy_rules":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Sync Proxy Rules",colors:s.default[4],layout:{left:0,top:0,width:3,height:5}}],info:"The total number of synced proxy rules in the Kubernetes proxy. Monitoring this metric can help identify if there are issues with the proxy or its configuration.",en:{instance:{one:"proxy",other:"proxies"}}},"k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond":{aggregationMethod:"avg",info:"The latencies of proxy rule sync operations in the Kubernetes proxy. High latencies may indicate issues with the proxy or its configuration.",en:{instance:{one:"proxy",other:"proxies"}}},"k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency":{aggregationMethod:"avg",info:"The latencies of proxy rule sync operations in the Kubernetes proxy. High latencies may indicate issues with the proxy or its configuration.",en:{instance:{one:"proxy",other:"proxies"}}},"k8s_kubeproxy.rest_client_requests_by_code":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"HTTP Response Code Distribution",colors:s.default[4],layout:{left:3,top:0,width:3,height:5}}],info:"The distribution of HTTP response codes for requests to the Kubernetes proxy's REST API. Monitoring this metric can help identify if there are issues with the proxy's API or the components interacting with it.",en:{instance:{one:"proxy",other:"proxies"}}},"k8s_kubeproxy.rest_client_requests_by_method":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"HTTP Request Method Distribution",colors:s.default[4],layout:{left:6,top:0,width:3,height:5}}],info:"The distribution of HTTP request methods for requests to the Kubernetes proxy's REST API. Monitoring this metric can help identify the usage patterns of the API and potential issues with specific request methods.",en:{instance:{one:"proxy",other:"proxies"}}},"k8s_kubeproxy.http_request_duration":{info:"The latencies of HTTP requests handled by the Kubernetes proxy. High latencies may indicate performance issues with the proxy or the components interacting with it.",en:{instance:{one:"proxy",other:"proxies"}}},"windows.logical_disk_bandwidth":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["read"],title:"Total Disk Read",layout:{left:0,top:0,width:2,height:5}},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["write"],title:"Total Disk Write",layout:{left:3,top:0,width:2,height:5}}],en:{instance:{one:"system",other:"systems"}}},"windows.cpu_utilization_total":{aggregationMethod:"avg",mainheads:[{groupBy:["instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"avg",valueRange:[0,100],chartLibrary:"gauge",title:"Average CPU Utilization",colors:s.default[12],layout:{left:6,top:0,width:2,height:5}}],en:{instance:{one:"system",other:"systems"}}},"windows.net_nic_bandwidth":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["received"],title:"Total Net Inbound",layout:{left:9,top:0,width:2,height:5}},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["sent"],title:"Total Net Outbound",layout:{left:12,top:0,width:2,height:5}}],en:{instance:{one:"system",other:"systems"}}},"windows.memory_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["percentage-of-instance"],showPostAggregations:!0,postGroupBy:["selected"],aggregationMethod:"max",chartLibrary:"gauge",selectedDimensions:["used"],colors:s.default[0],title:"Maximum RAM Used",valueRange:[0,100],layout:{left:15,top:0,width:2,height:5}}],en:{instance:{one:"system",other:"systems"}}},"windows.processes_cpu_time":{aggregationMethod:"avg",info:'Total CPU utilization. The amount of time spent by the process in user and privileged modes.',en:{instance:{one:"system",other:"systems"}}},"windows.processes_handles":{info:'Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process.',en:{instance:{one:"system",other:"systems"}}},"windows.processes_io_operations":{info:"I/O operations issued in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations.",en:{instance:{one:"system",other:"systems"}}},"windows.processes_pool_bytes":{info:"Pool Bytes is the last observed number of bytes in the paged or nonpaged pool. The nonpaged pool is an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. The paged pool is an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used.",en:{instance:{one:"system",other:"systems"}}},"windows.tcp_conns_active":{info:"Number of times TCP connections have made a direct transition from the CLOSED state to the SYN-SENT state.",en:{instance:{one:"system",other:"systems"}}},"windows.tcp_conns_established":{info:"Number of TCP connections for which the current state is either ESTABLISHED or CLOSE-WAIT.",en:{instance:{one:"system",other:"systems"}}},"windows.tcp_conns_failures":{info:"Number of times TCP connections have made a direct transition to the CLOSED state from the SYN-SENT state or the SYN-RCVD state, plus the number of times TCP connections have made a direct transition from the SYN-RCVD state to the LISTEN state.",en:{instance:{one:"system",other:"systems"}}},"windows.tcp_conns_passive":{info:"Number of times TCP connections have made a direct transition from the LISTEN state to the SYN-RCVD state.",en:{instance:{one:"system",other:"systems"}}},"windows.tcp_conns_resets":{info:"Number of times TCP connections have made a direct transition from the LISTEN state to the SYN-RCVD state.",en:{instance:{one:"system",other:"systems"}}},"windows.tcp_segments_received":{info:"Rate at which segments are received, including those received in error. This count includes segments received on currently established connections.",en:{instance:{one:"system",other:"systems"}}},"windows.tcp_segments_retransmitted":{info:"Rate at which segments are retransmitted, that is, segments transmitted that contain one or more previously transmitted bytes.",en:{instance:{one:"system",other:"systems"}}},"windows.tcp_segments_sent":{info:"Rate at which segments are sent, including those on current connections, but excluding those containing only retransmitted bytes.",en:{instance:{one:"system",other:"systems"}}},"windows.processes_cpu_utilization":{aggregationMethod:"avg",mainheads:[{groupBy:["dimension"],aggregationMethod:"avg",chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Average CPU per Process",layout:{left:6,top:5,width:2,height:5}}],info:"Total CPU utilization per process.",dimensionsSort:"valueDesc",en:{instance:{one:"system",other:"systems"}}},"windows.processes_memory_usage":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Memory per Process",layout:{left:9,top:5,width:2,height:5}}],dimensionsSort:"valueDesc",info:"Total Memory usage per process.",en:{instance:{one:"system",other:"systems"}}},"windows.processes_io_bytes":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total I/O per Process",layout:{left:0,top:5,width:4,height:5}}],info:"Bytes issued to I/O operations in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations.",dimensionsSort:"valueDesc",en:{instance:{one:"system",other:"systems"}}},"windows.processes_page_faults":{info:"Page faults by the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. This can cause the page not to be fetched from disk if it is on the standby list and hence already in main memory, or if it is in use by another process with which the page is shared.",dimensionsSort:"valueDesc",en:{instance:{one:"system",other:"systems"}}},"windows.processes_file_bytes":{info:"Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory.",dimensionsSort:"valueDesc",en:{instance:{one:"system",other:"systems"}}},"windows.processes_threads":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Active Threads per Process",layout:{left:12,top:5,width:4,height:5}}],info:"Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread.",dimensionsSort:"valueDesc",en:{instance:{one:"system",other:"systems"}}},"iis.website_traffic":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["received"],title:"Total Traffic Received",layout:{left:3,top:0,width:3,height:5}},{groupBy:["selected"],chartLibrary:"easypiechart",selectedDimensions:["sent"],title:"Total Traffic Sent",layout:{left:6,top:0,width:3,height:5}}],en:{instance:{one:"web server",other:"web servers"}}},"iis.website_active_connections_count":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Active Connections"}],en:{instance:{one:"web server",other:"web servers"}}},"iis.website_requests_rate":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Request Rate",colors:s.default[0]}],en:{instance:{one:"web server",other:"web servers"}}},"iis.website_isapi_extension_requests_count":{info:'The number of ISAPI extension requests that are processed concurrently by the web service.',en:{instance:{one:"web server",other:"web servers"}}},"iis.website_errors_rate":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Errors Rate",colors:s.default[0]}],info:"

    The number of requests that cannot be satisfied by the server.

    DocumentLocked - the requested document was locked. Usually reported as HTTP error 423. DocumentNotFound - the requested document was not found. Usually reported as HTTP error 404.

    ",en:{instance:{one:"web server",other:"web servers"}}},"iis.website_uptime":{aggregationMethod:"min",mainheads:[{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Uptime"}],en:{instance:{one:"web server",other:"web servers"}}},"mssql.instance_user_connection":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Connections",layout:{left:3,top:0,width:3,height:5}}],en:{instance:{one:"SQL server",other:"SQL servers"}}},"mssql.database_transactions":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Transactions",layout:{left:3,top:0,width:3,height:5}}],en:{instance:{one:"SQL server",other:"SQL servers"}}},"mssql.database_data_files_size":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total DB Size",layout:{left:3,top:0,width:3,height:5}}],en:{instance:{one:"SQL server",other:"SQL servers"}}},"mssql.instance_accessmethods_page_splits":{info:"Page split happens when the page does not have more space. This chart shows the number of page splits per second that occur as the result of overflowing index pages.",en:{instance:{one:"SQL server",other:"SQL servers"}}},"mssql.instance_cache_hit_ratio":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Cache Hit Ratio",colors:s.default[0]}],info:"Indicates the percentage of pages found in the buffer cache without having to read from disk. The ratio is the total number of cache hits divided by the total number of cache lookups over the last few thousand page accesses. After a long period of time, the ratio moves very little. Because reading from the cache is much less expensive than reading from disk, you want this ratio to be high.",en:{instance:{one:"SQL server",other:"SQL servers"}}},"mssql.instance_bufman_checkpoint_pages":{info:"Indicates the number of pages flushed to disk per second by a checkpoint or other operation that require all dirty pages to be flushed.",en:{instance:{one:"SQL server",other:"SQL servers"}}},"mssql.instance_bufman_page_life_expectancy":{info:"Indicates the number of seconds a page will stay in the buffer pool without references.",en:{instance:{one:"SQL server",other:"SQL servers"}}},"mssql.instance_memmgr_external_benefit_of_memory":{info:"It is used by the engine to balance memory usage between cache and is useful to support when troubleshooting cases with unexpected cache growth. The value is presented as an integer based on an internal calculation.",en:{instance:{one:"SQL server",other:"SQL servers"}}},"mssql.instance_sql_errors":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total SQL Errors",colors:s.default[0]}],info:"Errors in Microsoft SQL Server.

    Db_offline - Tracks severe errors that cause SQL Server to take the current database offline. Info - Information related to error messages that provide information to users but do not cause errors. Kill_connection - Tracks severe errors that cause SQL Server to kill the current connection. User - User errors.

    ",en:{instance:{one:"SQL server",other:"SQL servers"}}},"mssql.instance_sqlstats_auto_parameterization_attempts":{info:"Auto-parameterization occurs when an instance of SQL Server tries to parameterize a Transact-SQL request by replacing some literals with parameters so that reuse of the resulting cached execution plan across multiple similar-looking requests is possible. Note that auto-parameterizations are also known as simple parameterizations in newer versions of SQL Server. This counter does not include forced parameterizations.",en:{instance:{one:"SQL server",other:"SQL servers"}}},"mssql.instance_sqlstats_batch_requests":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Batch Requests",layout:{left:3,top:0,width:3,height:5}}],info:"This statistic is affected by all constraints (such as I/O, number of users, cache size, complexity of requests, and so on). High batch requests mean good throughput.",en:{instance:{one:"SQL server",other:"SQL servers"}}},"mssql.instance_sqlstats_safe_auto_parameterization_attempts":{info:"Note that auto-parameterizations are also known as simple parameterizations in later versions of SQL Server.",en:{instance:{one:"SQL server",other:"SQL servers"}}},"mssql.instance_sqlstats_sql_compilations":{info:"Indicates the number of times the compile code path is entered. Includes compiles caused by statement-level recompilations in SQL Server. After SQL Server user activity is stable, this value reaches a steady state.",en:{instance:{one:"SQL server",other:"SQL servers"}}},"ad.binds":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Binds",colors:s.default[0]}],en:{instance:{one:"AD server",other:"AD servers"}}},"ad.ldap_searches":{mainheads:[{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"gauge",title:"Average Cache Hit Ratio",colors:s.default[0]}],en:{instance:{one:"AD server",other:"AD servers"}}},"ad.dra_replication_intersite_compressed_traffic":{info:"The compressed size, in bytes, of inbound and outbound compressed replication data (size after compression, from DSAs in other sites).",en:{instance:{one:"AD server",other:"AD servers"}}},"ad.dra_replication_intrasite_compressed_traffic":{info:"The number of bytes replicated that were not compressed (that is., from DSAs in the same site).",en:{instance:{one:"AD server",other:"AD servers"}}},"ad.dra_replication_properties_updated":{info:"The number of properties that are updated due to incoming property winning the reconciliation logic that determines the final value to be replicated.",en:{instance:{one:"AD server",other:"AD servers"}}},"ad.dra_replication_objects_filtered":{info:"The number of objects received from inbound replication partners that contained no updates that needed to be applied.",en:{instance:{one:"AD server",other:"AD servers"}}},"ad.dra_replication_pending_syncs":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Pending Syncs",layout:{left:3,top:0,width:3,height:5}}],info:"The number of directory synchronizations that are queued for this server but not yet processed.",en:{instance:{one:"AD server",other:"AD servers"}}},"ad.dra_replication_sync_requests":{mainheads:[{groupBy:["selected"],chartLibrary:"gauge",title:"Total Sync Requests",colors:s.default[0]}],info:"The number of directory synchronizations that are queued for this server but not yet processed.",en:{instance:{one:"AD server",other:"AD servers"}}},"netframework.clrexception_thrown":{info:"The exceptions include both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrexception_filters":{info:"An exception filter evaluates regardless of whether an exception is handled.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrexception_finallys":{info:"The metric counts only the finally blocks executed for an exception; finally blocks on normal code paths are not counted by this counter.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrinterop_com_callable_wrappers":{info:"A COM callable wrappers (CCW) is a proxy for a managed object being referenced from an unmanaged COM client.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrinterop_interop_stubs_created":{info:"The Stubs are responsible for marshaling arguments and return values from managed to unmanaged code, and vice versa, during a COM interop call or a platform invoke call.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrjit_methods":{info:"The metric does not include pre-JIT-compiled methods.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrjit_time":{aggregationMethod:"avg",info:"The metric is updated at the end of every JIT compilation phase. A JIT compilation phase occurs when a method and its dependencies are compiled.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrjit_standard_failures":{info:"The failure can occur if the MSIL cannot be verified or if there is an internal error in the JIT compiler.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrloading_loader_heap_size":{info:"The memory committed by the class loader across all application domains is the physical space reserved in the disk paging file.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrloading_assemblies_loaded":{info:"If the assembly is loaded as domain-neutral from multiple application domains, the metric is incremented only once.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrlocksandthreads_recognized_threads":{info:"Displays the total number of threads that have been recognized by the runtime since the application started. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrmemory_heap_size":{info:"The metric shows maximum bytes that can be allocated, but it does not indicate the current number of bytes allocated.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrmemory_promoted":{info:"Memory is promoted when it survives a garbage collection.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrmemory_number_gc_handles":{info:"Garbage collection handles are handles to resources external to the common language runtime and the managed environment.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrmemory_induced_gc":{info:"The metric is updated when an explicit call to GC.Collect happens.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrmemory_number_sink_blocks_in_use":{info:"Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrmemory_committed":{info:"Committed memory is the physical memory for which space has been reserved in the disk paging file.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrmemory_reserved":{info:"Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used.",en:{instance:{one:".NET server",other:".NET servers"}}},"netframework.clrmemory_gc_time":{aggregationMethod:"avg",info:"Displays the percentage of time that was spent performing a garbage collection in the last sample.",en:{instance:{one:".NET server",other:".NET servers"}}},"adaptecraid.logical_device_status":{info:"Logical Device health status.",en:{instance:{one:"logical device",other:"logical devices"}}},"adaptecraid.physical_device_state":{info:"Physical Device health state.",en:{instance:{one:"physical device",other:"physical devices"}}},"adaptecraid.physical_device_smart_warnings":{info:"Physical Device SMART warnings.",en:{instance:{one:"physical device",other:"physical devices"}}},"adaptecraid.physical_device_temperature":{info:"Physical Device temperature.",aggregationMethod:"max",en:{instance:{one:"physical device",other:"physical devices"}}},"adaptec_raid.ld_status":{info:"Status of logical devices (1: Failed or Degraded).",en:{instance:{one:"device",other:"devices"}}},"adaptec_raid.pd_state":{info:"State of physical devices (1: not Online).",en:{instance:{one:"device",other:"devices"}}},"adaptec_raid.smart_warnings":{info:"S.M.A.R.T warnings.",en:{instance:{one:"device",other:"devices"}}},"adaptec_raid.temperature":{info:"Temperature.",en:{instance:{one:"device",other:"devices"}}},"alerts.status":{info:"Alert Values.",en:{instance:{one:"alert",other:"alerts"}}},"am2320.temperature":{aggregationMethod:"avg",info:"Temperature.",en:{instance:{one:"device",other:"devices"}}},"am2320.humidity":{aggregationMethod:"avg",info:"Relative Humidity.",en:{instance:{one:"device",other:"devices"}}},"anomalies.probability":{info:"Anomaly Probability.",en:{instance:{one:"device",other:"devices"}}},"anomalies.anomaly":{info:"Anomaly.",en:{instance:{one:"device",other:"devices"}}},"ap.clients":{info:"Connected clients to ${ssid} on ${dev}.",en:{instance:{one:"device",other:"devices"}}},"ap.net":{info:"Bandwidth for ${ssid} on ${dev}.",en:{instance:{one:"device",other:"devices"}}},"ap.packets":{info:"Packets for ${ssid} on ${dev}.",en:{instance:{one:"device",other:"devices"}}},"ap.issues":{info:"Transmit Issues for ${ssid} on ${dev}.",en:{instance:{one:"device",other:"devices"}}},"ap.signal":{info:"Average Signal for ${ssid} on ${dev}.",en:{instance:{one:"device",other:"devices"}}},"ap.bitrate":{info:"Bitrate for ${ssid} on ${dev}.",en:{instance:{one:"device",other:"devices"}}},"apcupsd.charge":{aggregationMethod:"avg",info:"UPS Charge.",en:{instance:{one:"device",other:"devices"}}},"apcupsd.battery.voltage":{aggregationMethod:"avg",info:"UPS Battery Voltage.",en:{instance:{one:"device",other:"devices"}}},"apcupsd.input.voltage":{aggregationMethod:"avg",info:"UPS Input Voltage.",en:{instance:{one:"device",other:"devices"}}},"apcupsd.output.voltage":{aggregationMethod:"avg",info:"UPS Output Voltage.",en:{instance:{one:"device",other:"devices"}}},"apcupsd.input.frequency":{aggregationMethod:"avg",info:"UPS Input Voltage.",en:{instance:{one:"device",other:"devices"}}},"apcupsd.load":{aggregationMethod:"avg",info:"UPS Load.",en:{instance:{one:"device",other:"devices"}}},"apcupsd.load_usage":{info:"UPS Load Usage.",en:{instance:{one:"device",other:"devices"}}},"apcupsd.temperature":{aggregationMethod:"avg",info:"UPS Temperature.",en:{instance:{one:"device",other:"devices"}}},"apcupsd.time":{aggregationMethod:"min",info:"UPS Time Remaining.",en:{instance:{one:"device",other:"devices"}}},"apcupsd.online":{info:"UPS ONLINE flag.",en:{instance:{one:"device",other:"devices"}}},"netdata.apps_cpu":{info:"Apps Plugin CPU.",en:{instance:{one:"agent",other:"agents"}}},"netdata.apps_sizes":{info:"Apps Plugin Files.",en:{instance:{one:"agent",other:"agents"}}},"netdata.apps_fix":{aggregationMethod:"avg",info:"Apps Plugin Normalization Ratios.",en:{instance:{one:"agent",other:"agents"}}},"netdata.apps_children_fix":{aggregationMethod:"avg",info:"Apps Plugin Exited Children Normalization Ratios.",en:{instance:{one:"agent",other:"agents"}}},"bind_rndc.name_server_statistics":{info:"Name Server Statistics.",en:{instance:{one:"server",other:"servers"}}},"bind_rndc.incoming_queries":{info:"Incoming queries.",en:{instance:{one:"server",other:"servers"}}},"bind_rndc.outgoing_queries":{info:"Outgoing queries.",en:{instance:{one:"server",other:"servers"}}},"bind_rndc.stats_size":{info:"Named Stats File Size.",en:{instance:{one:"server",other:"servers"}}},"cassandra.storage_exceptions_rate":{info:"Storage exceptions rate.",en:{instance:{one:"db server",other:"db servers"}}},"ceph.pool_read_operations":{info:"Ceph Read Pool Operations/s.",en:{instance:{one:"pool",other:"pools"}}},"ceph.pool_write_operations":{info:"Ceph Write Pool Operations/s.",en:{instance:{one:"pool",other:"pools"}}},"services.services.throttle_io_write":{info:"Systemd Services Throttle Disk Write Bandwidth.",en:{instance:{one:"system",other:"systems"}}},throttle_io_ops_write:{info:"Systemd Services Throttle Disk Write Operations.",en:{instance:{one:"system",other:"systems"}}},"changefinder.scores":{info:"ChangeFinder.",en:{instance:{one:"system",other:"systems"}}},"changefinder.flags":{info:"ChangeFinder.",en:{instance:{one:"system",other:"systems"}}},"cockroachdb.process_cpu_time":{aggregationMethod:"avg",info:"CPU Time.",en:{instance:{one:"db server",other:"db servers"}}},"cockroachdb.sql_connections":{info:"Active SQL Connections.",en:{instance:{one:"db server",other:"db servers"}}},"cockroachdb.logical_data":{info:"Logical Data.",en:{instance:{one:"db server",other:"db servers"}}},"cockroachdb.logical_data_count":{info:"Logical Data Count.",en:{instance:{one:"db server",other:"db servers"}}},"cockroachdb.range_events":{info:"Range Events.",en:{instance:{one:"db server",other:"db servers"}}},"cockroachdb.range_snapshot_events":{info:"Range Snapshot Events.",en:{instance:{one:"db server",other:"db servers"}}},"cockroachdb.rocksdb_read_amplification":{info:"RocksDB Read Amplification.",en:{instance:{one:"db server",other:"db servers"}}},"cockroachdb.rocksdb_cache_usage":{info:"RocksDB Block Cache Usage.",en:{instance:{one:"db server",other:"db servers"}}},"cockroachdb.rocksdb_cache_operations":{info:"RocksDB Block Cache Operations.",en:{instance:{one:"db server",other:"db servers"}}},"cockroachdb.rocksdb_sstables":{info:"RocksDB SSTables.",en:{instance:{one:"db server",other:"db servers"}}},"cockroachdb.replicas_quiescence":{info:"Replicas Quiescence.",en:{instance:{one:"db server",other:"db servers"}}},"consul.autopilot_server_health_status":{info:"Autopilot server health status.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.autopilot_server_stable_time":{aggregationMethod:"avg",info:"Autopilot server stable time.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.autopilot_server_serf_status":{info:"Autopilot server Serf status.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.autopilot_server_voter_status":{info:"Autopilot server Raft voting membership.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.network_lan_rtt":{info:"Network lan RTT.",en:{instance:{one:"datacenter",other:"datacenters"}}},"consul.raft_follower_last_contact_leader_time":{aggregationMethod:"avg",info:"Raft follower last contact with the leader time.",en:{instance:{one:"datacenter",other:"datacenters"}}},"coredns.dns_request_count_total_per_status":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Dropped DNS Requests",selectedDimensions:["dropped"],colors:s.default[1],layout:{left:12,top:0,width:2,height:5}}],info:"Number Of Processed And Dropped DNS Requests.",en:{instance:{one:"server",other:"servers"}}},"coredns.dns_requests_count_total_per_proto":{info:"Number Of DNS Requests Per Transport Protocol.",en:{instance:{one:"server",other:"servers"}}},"coredns.dns_requests_count_total_per_ip_family":{info:"Number Of DNS Requests Per IP Family.",en:{instance:{one:"server",other:"servers"}}},"coredns.dns_requests_count_total_per_per_type":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"DNS Requests per Type",layout:{left:3,top:0,width:2.5,height:5}}],info:"Number Of DNS Requests Per Type.",en:{instance:{one:"server",other:"servers"}}},"coredns.dns_responses_count_total_per_rcode":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"DNS Responses per Rcode",layout:{left:9,top:0,width:2.5,height:5}}],info:"Number Of DNS Responses Per Rcode.",en:{instance:{one:"server",other:"servers"}}},"coredns.server_dns_request_count_total":{info:"Number Of DNS Requests.",en:{instance:{one:"server",other:"servers"}}},"coredns.server_dns_responses_count_total":{info:"Number Of DNS Responses.",en:{instance:{one:"server",other:"servers"}}},"coredns.server_request_count_total_per_status":{info:"Number Of Processed And Dropped DNS Requests.",en:{instance:{one:"server",other:"servers"}}},"coredns.server_requests_count_total_per_proto":{info:"Number Of DNS Requests Per Transport Protocol.",en:{instance:{one:"server",other:"servers"}}},"coredns.server_requests_count_total_per_ip_family":{info:"Number Of DNS Requests Per IP Family.",en:{instance:{one:"server",other:"servers"}}},"coredns.server_requests_count_total_per_per_type":{info:"Number Of DNS Requests Per Type.",en:{instance:{one:"server",other:"servers"}}},"coredns.server_responses_count_total_per_rcode":{info:"Number Of DNS Responses Per Rcode.",en:{instance:{one:"server",other:"servers"}}},"coredns.zone_dns_request_count_total":{info:"Number Of DNS Requests.",en:{instance:{one:"zone",other:"zones"}}},"coredns.zone_dns_responses_count_total":{info:"Number Of DNS Responses.",en:{instance:{one:"zone",other:"zones"}}},"coredns.zone_requests_count_total_per_proto":{info:"Number Of DNS Requests Per Transport Protocol.",en:{instance:{one:"zone",other:"zones"}}},"coredns.zone_requests_count_total_per_ip_family":{info:"Number Of DNS Requests Per IP Family.",en:{instance:{one:"zone",other:"zones"}}},"coredns.zone_requests_count_total_per_per_type":{info:"Number Of DNS Requests Per Type.",en:{instance:{one:"zone",other:"zones"}}},"coredns.zone_responses_count_total_per_rcode":{info:"Number Of DNS Responses Per Rcode.",en:{instance:{one:"zone",other:"zones"}}},"couchbase.bucket_quota_percent_used":{aggregationMethod:"avg",info:"Quota Percent Used Per Bucket.",en:{instance:{one:"bucket",other:"buckets"}}},"couchbase.bucket_ops_per_sec":{info:"Operations Per Second Per Bucket.",en:{instance:{one:"bucket",other:"buckets"}}},"couchbase.bucket_disk_fetches":{info:"Disk Fetches Per Bucket.",en:{instance:{one:"bucket",other:"buckets"}}},"couchbase.bucket_item_count":{info:"Item Count Per Bucket.",en:{instance:{one:"bucket",other:"buckets"}}},"couchbase.bucket_disk_used_stats":{info:"Disk Used Per Bucket.",en:{instance:{one:"bucket",other:"buckets"}}},"couchbase.bucket_data_used":{info:"Data Used Per Bucket.",en:{instance:{one:"bucket",other:"buckets"}}},"couchbase.bucket_mem_used":{info:"Memory Used Per Bucket.",en:{instance:{one:"bucket",other:"buckets"}}},"couchbase.bucket_vb_active_num_non_resident":{info:"Number Of Non-Resident Items Per Bucket.",en:{instance:{one:"bucket",other:"buckets"}}},"cups.dests_state":{family:"destination",info:"Destinations by state.",en:{instance:{one:"system",other:"systems"}}},"cups.dests_option":{family:"destination",info:"Destinations by option.",en:{instance:{one:"system",other:"systems"}}},"cups.job_num":{family:"jobs",info:"Active jobs.",en:{instance:{one:"system",other:"systems"}}},"cups.job_size":{family:"jobs",info:"Active jobs size.",en:{instance:{one:"system",other:"systems"}}},"cups.destination_job_num":{family:"jobs",info:"Active jobs of {destination}.",en:{instance:{one:"destination",other:"destinations"}}},"cups.destination_job_size":{family:"jobs",info:"Active jobs size of {destination}.",en:{instance:{one:"destination",other:"destinations"}}},"dnsdist.queries":{info:"Client queries received.",en:{instance:{one:"server",other:"servers"}}},"dnsdist.queries_dropped":{info:"Client queries dropped.",en:{instance:{one:"server",other:"servers"}}},"dnsdist.packets_dropped":{info:"Packets dropped.",en:{instance:{one:"server",other:"servers"}}},"dnsdist.answers":{info:"Answers statistics.",en:{instance:{one:"server",other:"servers"}}},"dnsdist.backend_responses":{info:"Backend responses.",en:{instance:{one:"server",other:"servers"}}},"dnsdist.backend_commerrors":{info:"Backend communication errors.",en:{instance:{one:"server",other:"servers"}}},"dnsdist.backend_errors":{info:"Backend error responses.",en:{instance:{one:"server",other:"servers"}}},"dnsdist.cache":{info:"Cache performance.",en:{instance:{one:"server",other:"servers"}}},"dnsdist.servercpu":{info:"DNSdist server CPU utilization.",en:{instance:{one:"server",other:"servers"}}},"dnsdist.servermem":{info:"DNSdist server memory utilization.",en:{instance:{one:"server",other:"servers"}}},"dnsdist.query_latency":{aggregationMethod:"avg",info:"Query latency.",en:{instance:{one:"server",other:"servers"}}},"dnsdist.query_latency_avg":{info:"Average latency for the last N queries.",en:{instance:{one:"server",other:"servers"}}},"dnsmasq_dhcp.dhcp_ranges":{info:"Number of DHCP Ranges.",en:{instance:{one:"server",other:"servers"}}},"dnsmasq_dhcp.dhcp_hosts":{info:"Number of DHCP Hosts.",en:{instance:{one:"server",other:"servers"}}},"dnsmasq_dhcp.dhcp_range_utilization":{aggregationMethod:"avg",info:"DHCP Range utilization.",en:{instance:{one:"dhcp range",other:"dhcp ranges"}}},"dnsmasq_dhcp.dhcp_range_allocated_leases":{info:"DHCP Range Allocated Leases.",en:{instance:{one:"dhcp range",other:"dhcp ranges"}}},"dnsmasq.servers_queries":{info:"Queries forwarded to the upstream servers.",en:{instance:{one:"server",other:"servers"}}},"dnsmasq.cache_performance":{info:"Cache performance.",en:{instance:{one:"server",other:"servers"}}},"dnsmasq.cache_operations":{info:"Cache operations.",en:{instance:{one:"server",other:"servers"}}},"dnsmasq.cache_size":{aggregationMethod:"avg",info:"Cache size.",en:{instance:{one:"server",other:"servers"}}},"dns_query.query_status":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"DNS Query Status",layout:{left:0,top:0,width:4,height:5}}],info:"DNS Query Status.",en:{instance:{one:"server",other:"servers"}}},"dns_query.query_time":{aggregationMethod:"avg",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum DNS Query Response Time",valueRange:[0,null],colors:s.default[1],layout:{left:3,top:0,width:4,height:5}},{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average DNS Query Response Time",valueRange:[0,null],colors:s.default[0],layout:{left:9,top:0,width:4,height:5}}],info:"DNS Query Time.",en:{instance:{one:"server",other:"servers"}}},"docker_engine.engine_daemon_container_actions":{info:"Container Actions.",en:{instance:{one:"system",other:"systems"}}},"docker_engine.engine_daemon_container_states_containers":{info:"Containers In Various States.",en:{instance:{one:"system",other:"systems"}}},"docker_engine.builder_builds_failed_total":{info:"Builder Builds Fails By Reason.",en:{instance:{one:"system",other:"systems"}}},"docker_engine.engine_daemon_health_checks_failed_total":{info:"Health Checks.",en:{instance:{one:"system",other:"systems"}}},"docker_engine.swarm_manager_leader":{info:"Swarm Manager Leader.",en:{instance:{one:"system",other:"systems"}}},"docker_engine.swarm_manager_object_store":{info:"Swarm Manager Object Store.",en:{instance:{one:"system",other:"systems"}}},"docker_engine.swarm_manager_nodes_per_state":{info:"Swarm Manager Nodes Per State.",en:{instance:{one:"system",other:"systems"}}},"docker_engine.swarm_manager_tasks_per_state":{info:"Swarm Manager Tasks Per State.",en:{instance:{one:"system",other:"systems"}}},"docker.containers_health_status":{info:"Total number of Docker containers in various health states.",en:{instance:{one:"container",other:"containers"}}},"docker.container_writeable_layer_size":{info:"Docker container writable layer size.",en:{instance:{one:"container",other:"containers"}}},"dockerhub.pulls_sum":{info:"Pulls Summary.",en:{instance:{one:"repository",other:"repositories"}}},"dockerhub.pulls":{info:"Pulls.",en:{instance:{one:"repository",other:"repositories"}}},"dockerhub.pulls_rate":{info:"Pulls Rate.",en:{instance:{one:"repository",other:"repositories"}}},"dockerhub.stars":{info:"Stars.",en:{instance:{one:"repository",other:"repositories"}}},"dockerhub.status":{info:"Current Status.",en:{instance:{one:"repository",other:"repositories"}}},"dockerhub.last_updated":{info:"Time Since Last Updated.",en:{instance:{one:"repository",other:"repositories"}}},"dovecot.sessions":{info:"Dovecot Active Sessions.",en:{instance:{one:"server",other:"servers"}}},"dovecot.logins":{info:"Dovecot Logins.",en:{instance:{one:"server",other:"servers"}}},"dovecot.commands":{info:"Dovecot Commands.",en:{instance:{one:"server",other:"servers"}}},"dovecot.faults":{info:"Dovecot Page Faults.",en:{instance:{one:"server",other:"servers"}}},"dovecot.context_switches":{info:"Dovecot Context Switches.",en:{instance:{one:"server",other:"servers"}}},"dovecot.io":{info:"Dovecot Disk I/O.",en:{instance:{one:"server",other:"servers"}}},"dovecot.net":{info:"Dovecot Network Bandwidth.",en:{instance:{one:"server",other:"servers"}}},"dovecot.syscalls":{info:"Dovecot Number of SysCalls.",en:{instance:{one:"server",other:"servers"}}},"dovecot.lookup":{info:"Dovecot Lookups.",en:{instance:{one:"server",other:"servers"}}},"dovecot.cache":{info:"Dovecot Cache Hits.",en:{instance:{one:"server",other:"servers"}}},"dovecot.auth":{info:"Dovecot Authentications.",en:{instance:{one:"server",other:"servers"}}},"dovecot.auth_cache":{info:"Dovecot Authentication Cache.",en:{instance:{one:"server",other:"servers"}}},"cgroup.fd_closed":{info:"Files closed.",en:{instance:{one:"cgroup",other:"cgroups"}}},"services.file_open":{info:"Number of open files.",en:{instance:{one:"service",other:"services"}}},"services.file_open_error":{info:"Fails to open files.",en:{instance:{one:"service",other:"services"}}},"services.file_closed":{info:"Files closed.",en:{instance:{one:"service",other:"services"}}},"services.file_close_error":{info:"Fails to close files.",en:{instance:{one:"service",other:"services"}}},"mem.meory_map":{info:"Monitor calls for msync(2)..",en:{instance:{one:"system",other:"systems"}}},"hpssa.controller_status":{info:"Controller status",en:{instance:{one:"controller",other:"controllers"}}},"hpssa.controller_temperature":{info:"Controller temperature",aggregationMethod:"max",groupBy:["label"],groupByLabel:["slot","model"],en:{instance:{one:"controller",other:"controllers"}}},"hpssa.controller_cache_module_presence_status":{info:"Controller cache module presence",en:{instance:{one:"cache module",other:"cache modules"}}},"hpssa.controller_cache_module_status":{info:"Controller cache module status",en:{instance:{one:"cache module",other:"cache modules"}}},"hpssa.controller_cache_module_temperature":{info:"Controller cache module temperature",aggregationMethod:"max",groupBy:["label"],groupByLabel:["slot","model"],en:{instance:{one:"cache module",other:"cache modules"}}},"hpssa.controller_cache_module_battery_status":{info:"Controller cache module battery status",en:{instance:{one:"cache module",other:"cache modules"}}},"hpssa.array_status":{info:"Array status",en:{instance:{one:"array",other:"arrays"}}},"hpssa.logical_drive_status":{info:"Logical Drive status",en:{instance:{one:"logical drive",other:"logical drives"}}},"hpssa.physical_drive_status":{info:"Physical Drive status",en:{instance:{one:"physical drive",other:"physical drives"}}},"hpssa.physical_drive_temperature":{info:"Physical Drive temperature",aggregationMethod:"max",groupBy:["label"],groupByLabel:["slot","location"],en:{instance:{one:"physical drive",other:"physical drives"}}},"megacli.adapter_health_state":{info:"Adapter health state",en:{instance:{one:"adapter",other:"adapters"}}},"megacli.phys_drive_media_errors":{info:"Physical Drive media errors rate",en:{instance:{one:"drive",other:"drives"}}},"megacli.phys_drive_predictive_failures":{info:"Physical Drive predictive failures rate",en:{instance:{one:"drive",other:"drives"}}},"megacli.bbu_charge":{info:"Bckup Battery Unit charge",aggregationMethod:"min",en:{instance:{one:"battery unit",other:"battery units"}}},"megacli.bbu_recharge_cycles":{info:"Bckup Battery Unit recharge cycles",aggregationMethod:"max",en:{instance:{one:"battery unit",other:"battery units"}}},"megacli.bbu_temperature":{info:"Bckup Battery Unit temperature",aggregationMethod:"max",en:{instance:{one:"battery unit",other:"battery units"}}},"storcli.controller_status":{info:"Controller status",en:{instance:{one:"controller",other:"controllers"}}},"storcli.controller_bbu_status":{info:"Controller BBU status",en:{instance:{one:"controller",other:"controllers"}}},"storcli.phys_drive_errors":{info:"Physical Drive media errors rate",en:{instance:{one:"drive",other:"drives"}}},"storcli.phys_drive_predictive_failures":{info:"Physical Drive predictive failures rate",en:{instance:{one:"drive",other:"drives"}}},"storcli.phys_drive_smart_alert_status":{info:"Physical Drive SMART alert status",en:{instance:{one:"drive",other:"drives"}}},"storcli.phys_drive_temperature":{info:"Physical Drive temperature",aggregationMethod:"max",en:{instance:{one:"drive",other:"drives"}}},"storcli.bbu_temperature":{info:"BBU temperature",aggregationMethod:"max",en:{instance:{one:"bbu",other:"bbus"}}},"mdstat.mdstat_flush":{info:"MD flushes.",en:{instance:{one:"system",other:"systems"}}},"cgroup.oomkills":{info:"OOM kills. This chart is provided by eBPF plugin..",en:{instance:{one:"cgroup",other:"cgroups"}}},"services.oomkills":{info:"OOM kills. This chart is provided by eBPF plugin..",en:{instance:{one:"service",other:"services"}}},"apps.oomkills":{info:"OOM kills.",en:{instance:{one:"app group",other:"app groups"}}},"cgroup.net_conn_ipv4":{info:"Calls to tcp_v4_connection.",en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.net_conn_ipv6":{info:"Calls to tcp_v6_connection.",en:{instance:{one:"cgroup",other:"cgroups"}}},"cgroup.net_bytes_sent":{info:"Bytes sent.",en:{instance:{one:"cgroup",other:"cgroups"}}},"services.net_conn_ipv4":{info:"Calls to tcp_v4_connection.",en:{instance:{one:"service",other:"services"}}},"services.net_conn_ipv6":{info:"Calls to tcp_v6_connection.",en:{instance:{one:"service",other:"services"}}},"services.net_bytes_sent":{info:"Bytes sent.",en:{instance:{one:"service",other:"services"}}},"services.net_tcp_retransmit":{info:"Calls to tcp_retransmit.",en:{instance:{one:"service",other:"services"}}},"apps.dc_ratio":{aggregationMethod:"avg",info:"Percentage of files inside directory cache.",en:{instance:{one:"app group",other:"app groups"}}},"services.dc_ratio":{aggregationMethod:"avg",info:"Percentage of files inside directory cache.",en:{instance:{one:"service",other:"services"}}},"filesystem.read_latency":{aggregationMethod:"avg",info:"ext4 latency for each read request..",en:{instance:{one:"filesystem",other:"filesystems"}}},"filesystem.write_latency":{aggregationMethod:"avg",info:"ext4 latency for each write request..",en:{instance:{one:"filesystem",other:"filesystems"}}},"filesystem.open_latency":{aggregationMethod:"avg",info:"ext4 latency for each open request..",en:{instance:{one:"filesystem",other:"filesystems"}}},"filesystem.sync_latency":{aggregationMethod:"avg",info:"ext4 latency for each sync request..",en:{instance:{one:"filesystem",other:"filesystems"}}},"filesystem.attributte_latency":{aggregationMethod:"avg",info:"nfs latency for each attribute request..",en:{instance:{one:"filesystem",other:"filesystems"}}},"netdata.ebpf_aral_stat_size":{info:"Bytes allocated for ARAL..",en:{instance:{one:"agent",other:"agents"}}},"netdata.ebpf_aral_stat_alloc":{info:"Calls to allocate memory.",en:{instance:{one:"agent",other:"agents"}}},"netdata.ebpf_kernel_memory":{info:"Memory allocated for hash tables..",en:{instance:{one:"agent",other:"agents"}}},"netdata.ebpf_hash_tables_count":{info:"Number of hash tables loaded.",en:{instance:{one:"agent",other:"agents"}}},"elasticsearch.node_indices_indexing":{info:"Indexing Operations.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_indexing_current":{info:"Indexing Operations Current.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_indexing_time":{aggregationMethod:"avg",info:"Time Spent On Indexing Operations.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_search":{info:"Search Operations.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_search_current":{info:"Search Operations Current.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_search_time":{aggregationMethod:"avg",info:"node_indices_search_time.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_refresh":{info:"Refresh Operations.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_refresh_time":{aggregationMethod:"avg",info:"Time Spent On Refresh Operations.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_flush":{info:"Flush Operations.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_flush_time":{aggregationMethod:"avg",info:"Time Spent On Flush Operations.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_fielddata_memory_usage":{info:"Fielddata Cache Memory Usage.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_fielddata_evictions":{info:"Fielddata Evictions.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_segments_count":{info:"Segments Count.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_segments_memory_usage_total":{info:"Segments Memory Usage Total.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_segments_memory_usage":{info:"Segments Memory Usage.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_translog_operations":{info:"Translog Operations.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_indices_translog_size":{info:"Translog Size.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_file_descriptors":{info:"Process File Descriptors.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_jvm_heap":{aggregationMethod:"avg",info:"JVM Heap Percentage Currently in Use.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_jvm_heap_bytes":{info:"JVM Heap Commit And Usage.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_jvm_buffer_pools_count":{info:"JVM Buffer Pools Count.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_jvm_buffer_pool_direct_memory":{info:"JVM Buffer Pool Direct Memory.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_jvm_buffer_pool_mapped_memory":{info:"JVM Buffer Pool Mapped Memory.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_jvm_gc_count":{info:"JVM Garbage Collections.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_jvm_gc_time":{aggregationMethod:"avg",info:"JVM Time Spent On Garbage Collections.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_thread_pool_queued":{info:"Thread Pool Queued Threads Count.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.node_thread_pool_rejected":{info:"Thread Pool Rejected Threads Count.",en:{instance:{one:"elastic node",other:"elastic nodes"}}},"elasticsearch.cluster_communication_packets":{info:"Cluster Communication.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.cluster_communication":{info:"Cluster Communication Bandwidth.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.http_connections":{info:"HTTP Connections.",en:{instance:{one:"server",other:"servers"}}},"elasticsearch.breakers_trips":{info:"Circuit Breaker Trips Count.",en:{instance:{one:"server",other:"servers"}}},"elasticsearch.cluster_health_status":{info:"Cluster Status.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.cluster_number_of_nodes":{info:"Cluster Nodes Count.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.cluster_shards_count":{info:"Cluster Shards Count.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.cluster_pending_tasks":{info:"Cluster Pending Tasks.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.cluster_number_of_in_flight_fetch":{info:"Cluster Unfinished Fetches.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.cluster_indices_count":{info:"Cluster Indices Count.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.cluster_indices_shards_count":{info:"Cluster Indices Shards Count.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.cluster_indices_docs_count":{info:"Cluster Indices Docs Count.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.cluster_indices_store_size":{info:"Cluster Indices Store Size.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.cluster_indices_query_cache":{info:"Cluster Indices Query Cache.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.cluster_nodes_by_role_count":{info:"Cluster Nodes By Role Count.",en:{instance:{one:"cluster",other:"clusters"}}},"elasticsearch.node_index_health":{info:"Index Health.",en:{instance:{one:"index",other:"indexes"}}},"elasticsearch.node_index_shards_count":{info:"Index Shards Count.",en:{instance:{one:"index",other:"indexes"}}},"elasticsearch.node_index_docs_count":{info:"Index Docs Count.",en:{instance:{one:"index",other:"indexes"}}},"elasticsearch.node_index_store_size":{info:"Index Store Size.",en:{instance:{one:"index",other:"indexes"}}},"energid.blockindex":{info:"Blockchain index.",en:{instance:{one:"server",other:"servers"}}},"energid.difficulty":{info:"Blockchain difficulty.",en:{instance:{one:"server",other:"servers"}}},"energid.mempool":{info:"Memory pool.",en:{instance:{one:"server",other:"servers"}}},"energid.secmem":{info:"Secure memory.",en:{instance:{one:"server",other:"servers"}}},"energid.network":{info:"Network.",en:{instance:{one:"server",other:"servers"}}},"energid.timeoffset":{info:"Network time offset.",en:{instance:{one:"server",other:"servers"}}},"energid.utxo_transactions":{info:"Transactions.",en:{instance:{one:"server",other:"servers"}}},"exim.qemails":{info:"Exim Queue Emails.",en:{instance:{one:"server",other:"servers"}}},"fail2ban.faile_attempts":{info:"Failed attempts.",en:{instance:{one:"system",other:"systems"}}},"filecheck.file_existence_status":{info:"File existence status.",en:{instance:{one:"file",other:"files"}}},"filecheck.file_modification_time_ago":{groupBy:["label"],groupByLabel:["file_path"],info:"File time since the last modification.",en:{instance:{one:"file",other:"files"}}},"filecheck.file_size_bytes":{groupBy:["label"],groupByLabel:["file_path"],info:"File size.",en:{instance:{one:"file",other:"files"}}},"filecheck.dir_existence_status":{info:"Directory existence status.",en:{instance:{one:"directory",other:"directories"}}},"filecheck.dir_modification_time_ago":{info:"Directory time since the last modification.",groupBy:["label"],groupByLabel:["dir_path"],en:{instance:{one:"directory",other:"directories"}}},"filecheck.dir_size_bytes":{info:"Directory size.",groupBy:["label"],groupByLabel:["dir_path"],en:{instance:{one:"directory",other:"directories"}}},"filecheck.dir_files_count":{info:"Directory files count.",groupBy:["label"],groupByLabel:["dir_path"],en:{instance:{one:"directory",other:"directories"}}},"filecheck.file_existence":{info:"File Existence (0: not exists, 1: exists).",en:{instance:{one:"file",other:"files"}}},"filecheck.file_mtime_ago":{info:"File Time Since the Last Modification.",en:{instance:{one:"file",other:"files"}}},"filecheck.file_size":{info:"File Size.",en:{instance:{one:"file",other:"files"}}},"filecheck.dir_existence":{info:"Dir Existence (0: not exists, 1: exists).",en:{instance:{one:"directory",other:"directories"}}},"filecheck.dir_mtime_ago":{info:"Dir Time Since the Last Modification.",en:{instance:{one:"directory",other:"directories"}}},"filecheck.dir_num_of_files":{info:"Dir Number of Files.",en:{instance:{one:"directory",other:"directories"}}},"filecheck.dir_size":{info:"Dir Size.",en:{instance:{one:"directory",other:"directories"}}},"fluentd.retry_count":{info:"Plugin Retry Count.",en:{instance:{one:"plugin",other:"plugins"}}},"fluentd.buffer_queue_length":{info:"Plugin Buffer Queue Length.",en:{instance:{one:"plugin",other:"plugins"}}},"fluentd.buffer_total_queued_size":{info:"Plugin Buffer Total Size.",en:{instance:{one:"plugin",other:"plugins"}}},"cpu.temperature":{info:"Core temperature.",en:{instance:{one:"core",other:"cores"}}},"cpu.scaling_cur_freq":{info:"Current CPU Scaling Frequency.",en:{instance:{one:"system",other:"systems"}}},"system.dev_intr":{info:"Device Interrupts.",en:{instance:{one:"system",other:"systems"}}},"system.soft_intr":{info:"Software Interrupts.",en:{instance:{one:"system",other:"systems"}}},"system.ipc_shared_mem_segs":{info:"IPC Shared Memory Segments.",en:{instance:{one:"system",other:"systems"}}},"system.ipc_shared_mem_size":{info:"IPC Shared Memory Segments Size.",en:{instance:{one:"system",other:"systems"}}},"system.ipc_msq_queues":{info:"Number of IPC Message Queues.",en:{instance:{one:"system",other:"systems"}}},"system.ipc_msq_messages":{info:"Number of Messages in IPC Message Queues.",en:{instance:{one:"system",other:"systems"}}},"system.ipc_msq_size":{info:"Size of IPC Message Queues.",en:{instance:{one:"system",other:"systems"}}},"ipv4.tcpconnaborts":{info:"TCP Connection Aborts.",en:{instance:{one:"system",other:"systems"}}},"ipv4.tcpofo":{info:"TCP Out-Of-Order Queue.",en:{instance:{one:"system",other:"systems"}}},"ipv4.tcpsyncookies":{info:"TCP SYN Cookies.",en:{instance:{one:"system",other:"systems"}}},"ipv4.tcplistenissues":{info:"TCP Listen Socket Issues.",en:{instance:{one:"system",other:"systems"}}},"ipv4.ecnpkts":{info:"IPv4 ECN Statistics.",en:{instance:{one:"system",other:"systems"}}},"ipfw.mem":{info:"Memory allocated by rules.",en:{instance:{one:"system",other:"systems"}}},"ipfw.packets":{info:"Packets.",en:{instance:{one:"system",other:"systems"}}},"ipfw.bytes":{info:"Bytes.",en:{instance:{one:"system",other:"systems"}}},"ipfw.active":{info:"Active rules.",en:{instance:{one:"system",other:"systems"}}},"ipfw.expired":{info:"Expired rules.",en:{instance:{one:"system",other:"systems"}}},"system.packets":{info:"Network Packets.",en:{instance:{one:"system",other:"systems"}}},"zfs.hits_rate":{info:"ZFS ARC Hits Rate.",en:{instance:{one:"filesystem",other:"filesystems"}}},"zfs.dhits_rate":{info:"ZFS Demand Hits Rate.",en:{instance:{one:"filesystem",other:"filesystems"}}},"zfs.phits_rate":{info:"ZFS Prefetch Hits Rate.",en:{instance:{one:"filesystem",other:"filesystems"}}},"zfs.mhits_rate":{info:"ZFS Metadata Hits Rate.",en:{instance:{one:"filesystem",other:"filesystems"}}},"zfs.l2hits_rate":{info:"ZFS L2 Hits Rate.",en:{instance:{one:"filesystem",other:"filesystems"}}},"zfs.actual_hits_rate":{info:"ZFS Actual Cache Hits Rate.",en:{instance:{one:"filesystem",other:"filesystems"}}},"zfs.demand_data_hits_rate":{info:"ZFS Data Demand Efficiency Rate.",en:{instance:{one:"filesystem",other:"filesystems"}}},"zfs.prefetch_data_hits_rate":{info:"ZFS Data Prefetch Efficiency Rate.",en:{instance:{one:"filesystem",other:"filesystems"}}},"zfs.trim_bytes":{info:"Successfully TRIMmed bytes.",en:{instance:{one:"system",other:"systems"}}},"zfs.trim_requests":{info:"TRIM requests.",en:{instance:{one:"system",other:"systems"}}},"ipmi.sel":{groupBy:["node"],info:"IPMI Events.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.sensor_state":{info:"IPMI Sensors State.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.sensor_temperature_c":{groupBy:["label"],groupByLabel:["component"],aggregationMethod:"max",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Temperature",colors:s.default[1],layout:{left:0,top:0,width:2,height:5}},{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Temperature",layout:{left:2,top:0,width:2,height:2.5}},{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"number",title:"Average Temperature",layout:{left:2,top:2.5,width:2,height:2.5}}],info:"IPMI Sensor Temperature Celsius.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.sensor_temperature_f":{groupBy:["label"],groupByLabel:["component"],aggregationMethod:"max",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Temperature",colors:s.default[1],layout:{left:0,top:0,width:2,height:5}},{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Temperature",layout:{left:2,top:0,width:2,height:2.5}},{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"number",title:"Average Temperature",layout:{left:2,top:2.5,width:2,height:2.5}}],info:"IPMI Sensor Temperature Fahrenheit.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.sensor_voltage":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Voltage",colors:s.default[12],layout:{left:8,top:0,width:2,height:5}},{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Voltage",layout:{left:10,top:0,width:2,height:2.5}},{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"number",title:"Average Voltage",layout:{left:10,top:2.5,width:2,height:2.5}}],groupBy:["label"],groupByLabel:["component"],aggregationMethod:"avg",info:"IPMI Sensor Voltage.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.sensor_ampere":{groupBy:["label"],groupByLabel:["component"],aggregationMethod:"avg",info:"IPMI Sensor Current.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.sensor_fan_speed":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Fan Speed",layout:{left:4,top:0,width:2,height:5}},{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Fan Speed",layout:{left:6,top:0,width:2,height:2.5}},{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"number",title:"Average Fan Speed",layout:{left:6,top:2.5,width:2,height:2.5}}],groupBy:["label"],groupByLabel:["component"],aggregationMethod:"avg",info:"IPMI Sensor Fans speed.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.sensor_power":{groupBy:["label"],groupByLabel:["component"],aggregationMethod:"avg",info:"IPMI Sensor Power.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.sensor_reading_percent":{groupBy:["label"],groupByLabel:["component"],aggregationMethod:"avg",info:"IPMI Sensor Reading Percentage.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.sensors_states":{info:"IPMI Sensors State.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.temperatures_c":{groupBy:["node"],aggregationMethod:"max",mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Temperature",colors:s.default[1],layout:{left:0,top:0,width:2,height:5}},{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Temperature",layout:{left:2,top:0,width:2,height:2.5}},{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"number",title:"Average Temperature",layout:{left:2,top:2.5,width:2,height:2.5}}],info:"System Celsius Temperatures read by IPMI.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.temperatures_f":{groupBy:["node"],aggregationMethod:"max",info:"System Celsius Temperatures read by IPMI.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.voltages":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Voltage",colors:s.default[12],layout:{left:8,top:0,width:2,height:5}},{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Voltage",layout:{left:10,top:0,width:2,height:2.5}},{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"number",title:"Average Voltage",layout:{left:10,top:2.5,width:2,height:2.5}}],groupBy:["node"],aggregationMethod:"avg",info:"System Voltages read by IPMI.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.amps":{groupBy:["node"],aggregationMethod:"avg",info:"System Current read by IPMI.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.rpm":{mainheads:[{groupBy:["selected"],aggregationMethod:"max",chartLibrary:"easypiechart",title:"Maximum Fan Speed",layout:{left:4,top:0,width:2,height:5}},{groupBy:["selected"],aggregationMethod:"min",chartLibrary:"number",title:"Minimum Fan Speed",layout:{left:6,top:0,width:2,height:2.5}},{groupBy:["selected"],aggregationMethod:"avg",chartLibrary:"number",title:"Average Fan Speed",layout:{left:6,top:2.5,width:2,height:2.5}}],groupBy:["node"],aggregationMethod:"avg",info:"System Fans read by IPMI.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.watts":{groupBy:["node"],aggregationMethod:"avg",info:"System Power read by IPMI.",en:{instance:{one:"sensor",other:"sensors"}}},"ipmi.percent":{groupBy:["node"],aggregationMethod:"avg",info:"System Metrics read by IPMI.",en:{instance:{one:"sensor",other:"sensors"}}},"freeradius.proxy_bad_authentication":{info:"Bad Authentication Requests.",en:{instance:{one:"server",other:"servers"}}},"freeradius.accounting":{info:"Accounting.",en:{instance:{one:"server",other:"servers"}}},"freeradius.bad_accounting":{info:"Bad Accounting Requests.",en:{instance:{one:"server",other:"servers"}}},"freeradius.proxy_accounting":{info:"Accounting.",en:{instance:{one:"server",other:"servers"}}},"freeradius.proxy_bad_accounting":{info:"Bad Accounting Requests.",en:{instance:{one:"server",other:"servers"}}},"gearman.total_jobs":{info:"Total Jobs.",en:{instance:{one:"server",other:"servers"}}},"gearman.single_job":{info:"{job_name}.",en:{instance:{one:"server",other:"servers"}}},"geth.eth_db_chaindata_ancient_io_rate":{info:"Ancient Chaindata rate.",en:{instance:{one:"server",other:"servers"}}},"geth.eth_db_chaindata_ancient_io":{info:"Session ancient Chaindata.",en:{instance:{one:"server",other:"servers"}}},"geth.eth_db_chaindata_disk_io":{info:"Session chaindata on disk.",en:{instance:{one:"server",other:"servers"}}},"geth.eth_db_chaindata_disk_io_rate":{info:"On disk Chaindata rate.",en:{instance:{one:"server",other:"servers"}}},"geth.tx_pool_pending":{info:"Pending Transaction Pool.",en:{instance:{one:"server",other:"servers"}}},"geth.tx_pool_current":{info:"Transaction Pool.",en:{instance:{one:"server",other:"servers"}}},"geth.tx_pool_queued":{info:"Queued Transaction Pool.",en:{instance:{one:"server",other:"servers"}}},"geth.p2p_peers":{info:"Number of Peers.",en:{instance:{one:"server",other:"servers"}}},"geth.rpc_calls":{info:"rpc calls.",en:{instance:{one:"server",other:"servers"}}},"expvar.memstats.heap":{info:"memory: size of heap memory structures.",en:{instance:{one:"system",other:"systems"}}},"expvar.memstats.stack":{info:"memory: size of stack memory structures.",en:{instance:{one:"system",other:"systems"}}},"expvar.memstats.mspan":{info:"memory: size of mspan memory structures.",en:{instance:{one:"system",other:"systems"}}},"expvar.memstats.mcache":{info:"memory: size of mcache memory structures.",en:{instance:{one:"system",other:"systems"}}},"expvar.memstats.live_objects":{info:"memory: number of live objects.",en:{instance:{one:"system",other:"systems"}}},"expvar.memstats.sys":{info:"memory: size of reserved virtual address space.",en:{instance:{one:"system",other:"systems"}}},"expvar.memstats.gc_pauses":{info:"memory: average duration of GC pauses.",en:{instance:{one:"system",other:"systems"}}},"hddtemp.temperatures":{aggregationMethod:"avg",info:"Disk Temperatures.",en:{instance:{one:"device",other:"devices"}}},"hddtemp.disk_temperature":{aggregationMethod:"max",info:"Disk temperature.",en:{instance:{one:"disk",other:"disks"}},groupBy:["label"],groupByLabel:["disk_id"]},"hddtemp.disk_temperature_sensor_status":{info:"Disk temperature sensor status.",en:{instance:{one:"disk",other:"disks"}}},"hdfs.threads":{info:"Number of Threads.",en:{instance:{one:"device",other:"devices"}}},"hdfs.logs_total":{info:"Number of Logs.",en:{instance:{one:"device",other:"devices"}}},"hdfs.open_connections":{info:"RPC Open Connections.",en:{instance:{one:"device",other:"devices"}}},"hdfs.call_queue_length":{info:"RPC Call Queue Length.",en:{instance:{one:"device",other:"devices"}}},"hdfs.capacity":{info:"Capacity Across All Datanodes.",en:{instance:{one:"device",other:"devices"}}},"hdfs.used_capacity":{info:"Used Capacity Across All Datanodes.",en:{instance:{one:"device",other:"devices"}}},"hdfs.load":{info:"Number of Concurrent File Accesses (read/write) Across All DataNodes.",en:{instance:{one:"device",other:"devices"}}},"hdfs.volume_failures_total":{info:"Number of Volume Failures Across All Datanodes.",en:{instance:{one:"device",other:"devices"}}},"hdfs.files_total":{info:"Number of Tracked Files.",en:{instance:{one:"device",other:"devices"}}},"hdfs.blocks_total":{info:"Number of Allocated Blocks in the System.",en:{instance:{one:"device",other:"devices"}}},"hdfs.blocks":{info:"Number of Problem Blocks (can point to an unhealthy cluster).",en:{instance:{one:"device",other:"devices"}}},"hdfs.data_nodes":{info:"Number of Data Nodes By Status.",en:{instance:{one:"device",other:"devices"}}},"hdfs.datanode_used_capacity":{info:"Used Capacity.",en:{instance:{one:"device",other:"devices"}}},"hdfs.datanode_failed_volumes":{info:"Number of Failed Volumes.",en:{instance:{one:"device",other:"devices"}}},"hpssa.ctrl_status":{info:"Status 1 is OK, Status 0 is not OK.",en:{instance:{one:"device",other:"devices"}}},"hpssa.ctrl_temperature":{aggregationMethod:"avg",info:"Temperature.",en:{instance:{one:"device",other:"devices"}}},"hpssa.ld_status":{info:"Status 1 is OK, Status 0 is not OK.",en:{instance:{one:"device",other:"devices"}}},"hpssa.pd_status":{info:"Status 1 is OK, Status 0 is not OK.",en:{instance:{one:"device",other:"devices"}}},"hpssa.pd_temperature":{aggregationMethod:"avg",info:"Temperature.",en:{instance:{one:"device",other:"devices"}}},"httpcheck.in_state":{aggregationMethod:"avg",info:"HTTP Current State Duration.",en:{instance:{one:"server",other:"servers"}}},"icecast.listeners":{info:"Number Of Listeners.",en:{instance:{one:"server",other:"servers"}}},"ioping.latency":{info:"Read Latency.",en:{instance:{one:"disk",other:"disks"}}},"ipfs.bandwidth":{info:"IPFS Bandwidth.",en:{instance:{one:"server",other:"servers"}}},"ipfs.peers":{info:"IPFS Peers.",en:{instance:{one:"server",other:"servers"}}},"ipfs.repo_size":{info:"IPFS Repo Size.",en:{instance:{one:"server",other:"servers"}}},"ipfs.repo_objects":{info:"IPFS Repo Objects.",en:{instance:{one:"server",other:"servers"}}},"isc_dhcpd.active_leases_total":{info:"Active Leases Total.",en:{instance:{one:"server",other:"servers"}}},"isc_dhcpd.pool_active_leases":{info:"Pool Active Leases.",en:{instance:{one:"pool",other:"pools"}}},"isc_dhcpd.pool_utilization":{aggregationMethod:"avg",info:"Pool Utilization.",en:{instance:{one:"pool",other:"pools"}}},"libreswan.net":{info:"LibreSWAN Tunnel ${name} Traffic.",en:{instance:{one:"tunnel",other:"tunnels"}}},"libreswan.uptime":{aggregationMethod:"min",info:"LibreSWAN Tunnel ${name} Uptime.",en:{instance:{one:"tunnel",other:"tunnels"}}},"lighttpd.scoreboard":{info:"ScoreBoard.",en:{instance:{one:"server",other:"servers"}}},"litespeed.net_throughput":{info:"Network Throughput HTTPS.",en:{instance:{one:"server",other:"servers"}}},"litespeed.connections":{info:"Connections HTTPS.",en:{instance:{one:"server",other:"servers"}}},"litespeed.requests":{info:"Requests.",en:{instance:{one:"server",other:"servers"}}},"litespeed.requests_processing":{info:"Requests In Processing.",en:{instance:{one:"server",other:"servers"}}},"litespeed.cache":{info:"Private Cache Hits.",en:{instance:{one:"server",other:"servers"}}},"litespeed.static":{info:"Static Hits.",en:{instance:{one:"server",other:"servers"}}},"logstash.jvm_mem_heap":{info:"JVM Heap Memory.",en:{instance:{one:"system",other:"systems"}}},"megacli.adapter_degraded":{info:"Adapter State.",en:{instance:{one:"device",other:"devices"}}},"megacli.pd_media_error":{info:"Physical Drives Media Errors.",en:{instance:{one:"device",other:"devices"}}},"megacli.pd_predictive_failure":{info:"Physical Drives Predictive Failures.",en:{instance:{one:"device",other:"devices"}}},"megacli.bbu_relative_charge":{aggregationMethod:"avg",info:"Relative State of Charge.",en:{instance:{one:"battery",other:"batteries"}}},"megacli.bbu_cycle_count":{info:"Cycle Count.",en:{instance:{one:"battery",other:"batteries"}}},"memcached.cache":{info:"Cache Size.",en:{instance:{one:"system",other:"systems"}}},"memcached.net":{info:"Network.",en:{instance:{one:"system",other:"systems"}}},"memcached.connections":{info:"Connections.",en:{instance:{one:"system",other:"systems"}}},"memcached.items":{info:"Items.",en:{instance:{one:"system",other:"systems"}}},"memcached.evicted_reclaimed":{info:"Evicted and Reclaimed Items.",en:{instance:{one:"system",other:"systems"}}},"memcached.get":{info:"Get Requests.",en:{instance:{one:"system",other:"systems"}}},"memcached.get_rate":{info:"Get Request Rate.",en:{instance:{one:"system",other:"systems"}}},"memcached.set_rate":{info:"Set Request Rate.",en:{instance:{one:"system",other:"systems"}}},"memcached.delete":{info:"Delete Requests.",en:{instance:{one:"system",other:"systems"}}},"memcached.cas":{info:"Check and Set Requests.",en:{instance:{one:"system",other:"systems"}}},"memcached.increment":{info:"Increment Requests.",en:{instance:{one:"system",other:"systems"}}},"memcached.decrement":{info:"Decrement Requests.",en:{instance:{one:"system",other:"systems"}}},"memcached.touch":{info:"Touch Requests.",en:{instance:{one:"system",other:"systems"}}},"memcached.touch_rate":{info:"Touch Request Rate.",en:{instance:{one:"system",other:"systems"}}},"mongodb.operations_rate":{info:"Operations rate.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.operations_latency_time":{aggregationMethod:"avg",info:"Operations Latency.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.operations_by_type_rate":{info:"Operations by type.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.document_operations_rate":{info:"Document operations.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.scanned_indexes_rate":{info:"Scanned indexes.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.scanned_documents_rate":{info:"Scanned documents.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.active_clients_count":{info:"Connected clients.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.queued_operations_count":{info:"Queued operations because of a lock.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.lock_acquisitions_rate":{info:"Lock acquisitions.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.cursors_open_count":{info:"Open cursors.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.cursors_open_no_timeout_count":{info:"Open cursors with disabled timeout.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.cursors_opened_rate":{info:"Opened cursors rate.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.cursors_timed_out_rate":{info:"Timed-out cursors.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.cursors_by_lifespan_count":{info:"Cursors lifespan.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.transactions_count":{info:"Current transactions.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.transactions_rate":{info:"Transactions rate.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.transactions_commits_rate":{info:"Transactions commits.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.transactions_commits_duration_time":{aggregationMethod:"avg",info:"Transactions successful commits duration.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.connections_usage":{info:"Connections usage.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.connections_by_state_count":{info:"Connections By State.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.asserts_rate":{info:"Raised assertions.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.network_traffic_rate":{info:"Network traffic.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.network_requests_rate":{info:"Network Requests.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.network_slow_dns_resolutions_rate":{info:"Slow DNS resolution operations.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.network_slow_ssl_handshakes_rate":{info:"Slow SSL handshake operations.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.memory_resident_size":{info:"Used resident memory.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.memory_virtual_size":{info:"Used virtual memory.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.memory_page_faults_rate":{info:"Memory page faults.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.memory_tcmalloc_stats":{info:"TCMalloc statistics.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.wiredtiger_concurrent_read_transactions_usage":{info:"Wired Tiger concurrent read transactions usage.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.wiredtiger_concurrent_write_transactions_usage":{info:"Wired Tiger concurrent write transactions usage.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.wiredtiger_cache_usage":{info:"Wired Tiger cache usage.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.wiredtiger_cache_dirty_space_size":{info:"Wired Tiger cache dirty space size.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.wiredtiger_cache_io_rate":{info:"Wired Tiger IO activity.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.wiredtiger_cache_evictions_rate":{info:"Wired Tiger cache evictions.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.database_collection_count":{info:"Database collections.",en:{instance:{one:"database",other:"databases"}}},"mongodb.database_indexes_count":{info:"Database indexes.",en:{instance:{one:"database",other:"databases"}}},"mongodb.database_views_count":{info:"Database views.",en:{instance:{one:"database",other:"databases"}}},"mongodb.database_documents_count":{info:"Database documents.",en:{instance:{one:"database",other:"databases"}}},"mongodb.database_data_size":{info:"Database data size.",en:{instance:{one:"database",other:"databases"}}},"mongodb.database_index_size":{info:"Database index size.",en:{instance:{one:"database",other:"databases"}}},"mongodb.repl_set_member_state":{info:"Replica Set member state.",en:{instance:{one:"replica",other:"replicas"}}},"mongodb.repl_set_member_health_status":{info:"Replica Set member health status.",en:{instance:{one:"replica",other:"replicas"}}},"mongodb.repl_set_member_replication_lag_time":{aggregationMethod:"max",info:"Replica Set member replication lag.",en:{instance:{one:"replica",other:"replicas"}}},"mongodb.repl_set_member_heartbeat_latency_time":{aggregationMethod:"avg",info:"Replica Set member heartbeat latency.",en:{instance:{one:"replica",other:"replicas"}}},"mongodb.repl_set_member_ping_rtt_time":{aggregationMethod:"avg",info:"Replica Set member ping RTT.",en:{instance:{one:"replica",other:"replicas"}}},"mongodb.repl_set_member_uptime":{aggregationMethod:"min",info:"Replica Set member uptime.",en:{instance:{one:"replica",other:"replicas"}}},"mongodb.sharding_nodes_count":{info:"Sharding Nodes.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.sharding_sharded_databases_count":{info:"Sharded databases.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.sharding_sharded_collections_count":{info:"Sharded collections.",en:{instance:{one:"db server",other:"db servers"}}},"mongodb.sharding_shard_chunks_count":{info:"Shard chunks.",en:{instance:{one:"shard",other:"shards"}}},"monit.filesystems":{info:"Filesystems.",en:{instance:{one:"system",other:"systems"}}},"monit.directories":{info:"Directories.",en:{instance:{one:"system",other:"systems"}}},"monit.files":{info:"Files.",en:{instance:{one:"system",other:"systems"}}},"monit.fifos":{info:"Pipes (fifo).",en:{instance:{one:"system",other:"systems"}}},"monit.programs":{info:"Programs statuses.",en:{instance:{one:"system",other:"systems"}}},"monit.services":{info:"Processes statuses.",en:{instance:{one:"system",other:"systems"}}},"monit.process_uptime":{aggregationMethod:"min",info:"Processes uptime.",en:{instance:{one:"system",other:"systems"}}},"monit.process_threads":{info:"Processes threads.",en:{instance:{one:"system",other:"systems"}}},"monit.process_childrens":{info:"Child processes.",en:{instance:{one:"system",other:"systems"}}},"monit.hosts":{info:"Hosts.",en:{instance:{one:"system",other:"systems"}}},"monit.host_latency":{aggregationMethod:"avg",info:"Hosts latency.",en:{instance:{one:"system",other:"systems"}}},"monit.networks":{info:"Network interfaces and addresses.",en:{instance:{one:"system",other:"systems"}}},"mysql.queries_type":{info:"Queries By Type.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.connections":{info:"Connections.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.connections_active":{info:"Active Connections.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.threads":{info:"Threads.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.innodb_os_log_fsync_writes":{info:"InnoDB OS Log Operations.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.innodb_os_log_io":{info:"InnoDB OS Log Bandwidth.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.connection_errors":{info:"Connection Errors.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.open_tables":{info:"Open Tables.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.process_list_queries_count":{info:"Queries Count.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.galera_writesets":{info:"Replicated Writesets.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.galera_conflicts":{info:"Replication Conflicts.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.galera_thread_count":{info:"Total Number of WSRep (applier/rollbacker) Threads.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.key_requests":{info:"MyISAM Key Cache Requests.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.key_disk_ops":{info:"MyISAM Key Cache Disk Operations.",en:{instance:{one:"db server",other:"db servers"}}},"mysql.slave_status":{info:"I/O / SQL Thread Running State.",en:{instance:{one:"connection",other:"connections"}}},"mysql.userstats_created_transactions":{info:"User Transactions.",en:{instance:{one:"user",other:"users"}}},"mysql.userstats_empty_queries":{info:"User Empty Queries.",en:{instance:{one:"user",other:"users"}}},"netfilter.netlink_new":{info:"Connection Tracker New Connections.",en:{instance:{one:"system",other:"systems"}}},"netfilter.netlink_changes":{info:"Connection Tracker Changes.",en:{instance:{one:"system",other:"systems"}}},"netfilter.netlink_search":{info:"Connection Tracker Searches.",en:{instance:{one:"system",other:"systems"}}},"netfilter.netlink_errors":{info:"Connection Tracker Errors.",en:{instance:{one:"system",other:"systems"}}},"netfilter.netlink_expect":{info:"Connection Tracker Expectations.",en:{instance:{one:"system",other:"systems"}}},"netfilter.nfacct_packets":{info:"Netfilter Accounting Packets.",en:{instance:{one:"system",other:"systems"}}},"netfilter.nfacct_bytes":{info:"Netfilter Accounting Bandwidth.",en:{instance:{one:"system",other:"systems"}}},"nginxvts.connections_total":{info:"Total connections.",en:{instance:{one:"server",other:"servers"}}},"nginxvts.shm_used_node":{info:"Number of node using shared memory.",en:{instance:{one:"server",other:"servers"}}},"nginxvts.server_responses_total":{info:"Total number of responses by code class.",en:{instance:{one:"server",other:"servers"}}},"nginxvts.server_traffic_total":{info:"Total amount of data transferred to and from the server.",en:{instance:{one:"server",other:"servers"}}},"nginxvts.server_cache_total":{info:"Total server cache.",en:{instance:{one:"server",other:"servers"}}},"nsd.queries":{info:"queries.",en:{instance:{one:"server",other:"servers"}}},"nsd.zones":{info:"zones.",en:{instance:{one:"server",other:"servers"}}},"nsd.protocols":{info:"protocol.",en:{instance:{one:"server",other:"servers"}}},"nsd.type":{info:"query type.",en:{instance:{one:"server",other:"servers"}}},"nsd.transfer":{info:"transfer.",en:{instance:{one:"server",other:"servers"}}},"nsd.rcode":{info:"return code.",en:{instance:{one:"server",other:"servers"}}},"ntpd.sys_rootdisp":{info:"Total root dispersion to the primary reference clock.",en:{instance:{one:"server",other:"servers"}}},"ntpd.peer_stratum":{info:"Peer stratum.",en:{instance:{one:"peer",other:"peers"}}},"nut.charge":{aggregationMethod:"avg",info:"UPS Charge.",en:{instance:{one:"device",other:"devices"}}},"nut.runtime":{info:"UPS Runtime.",en:{instance:{one:"device",other:"devices"}}},"nut.battery.voltage":{aggregationMethod:"avg",info:"UPS Battery Voltage.",en:{instance:{one:"device",other:"devices"}}},"nut.input.voltage":{aggregationMethod:"avg",info:"UPS Input Voltage.",en:{instance:{one:"device",other:"devices"}}},"nut.input.current":{aggregationMethod:"avg",info:"UPS Input Current.",en:{instance:{one:"device",other:"devices"}}},"nut.input.frequency":{aggregationMethod:"avg",info:"UPS Input Frequency.",en:{instance:{one:"device",other:"devices"}}},"nut.output.voltage":{aggregationMethod:"avg",info:"UPS Output Voltage.",en:{instance:{one:"device",other:"devices"}}},"nut.load":{aggregationMethod:"avg",info:"UPS Load.",en:{instance:{one:"device",other:"devices"}}},"nut.load_usage":{info:"UPS Load Usage.",en:{instance:{one:"device",other:"devices"}}},"nut.temperature":{aggregationMethod:"avg",info:"UPS Temperature.",en:{instance:{one:"device",other:"devices"}}},"nut.clients":{info:"UPS Connected Clients.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_load":{aggregationMethod:"avg",mainheads:[{groupBy:["label"],groupByLabel:["ups_name"],chartLibrary:"bars",title:"Top by Load",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:0,top:0,width:3,height:5}}],info:"The current load on the UPS, expressed as a percentage of its rated capacity.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_load_usage":{aggregationMethod:"sum",mainheads:[{groupBy:["label"],groupByLabel:["ups_name"],chartLibrary:"bars",title:"Top by Power Output",dimensionsSort:"valueDesc",colors:s.default[12],layout:{left:3,top:0,width:3,height:5}}],info:"The amount of power that the UPS delivers to the equipment connected to it.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_status":{aggregationMethod:"sum",info:'The overall status of the UPS. For details, see NUT status data.',en:{instance:{one:"device",other:"devices"}}},"upsd.ups_temperature":{aggregationMethod:"max",info:"The current temperature of the UPS.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_battery_charge":{aggregationMethod:"avg",mainheads:[{groupBy:["label"],groupByLabel:["ups_name"],chartLibrary:"bars",title:"Top by Battery Charge (ascending)",dimensionsSort:"valueAsc",colors:s.default[12],layout:{left:6,top:0,width:3,height:5}}],info:"The current charge level of the UPS battery, expressed as a percentage.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_battery_estimated_runtime":{aggregationMethod:"avg",mainheads:[{groupBy:["label"],groupByLabel:["ups_name"],chartLibrary:"bars",title:"Top by Estimated Runtime (ascending)",dimensionsSort:"valueAsc",colors:s.default[12],layout:{left:9,top:0,width:3,height:5}}],info:"The estimated amount of time that the UPS can power its connected equipment during a power outage.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_battery_voltage":{aggregationMethod:"avg",info:"The current voltage of the UPS battery.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_battery_voltage_nominal":{aggregationMethod:"avg",info:"The nominal voltage of the UPS battery. The nominal voltage is the voltage that the battery is designed to operate at.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_input_voltage":{aggregationMethod:"avg",info:"The voltage of the power that is coming into the UPS from the utility.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_input_voltage_nominal":{aggregationMethod:"avg",info:"The nominal input voltage for the UPS. It is the voltage that the UPS is designed to operate at.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_input_current":{aggregationMethod:"avg",info:"The current amount of current that the UPS is drawing from the utility power.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_input_current_nominal":{aggregationMethod:"avg",info:"The nominal input current for the UPS. It is the current that the UPS is expected to draw from the utility power when it is fully loaded.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_input_frequency":{aggregationMethod:"avg",info:"The frequency of the utility power that the UPS is receiving.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_input_frequency_nominal":{aggregationMethod:"avg",info:"The nominal input frequency for the UPS. It is the frequency of the utility power that the UPS is designed to operate at.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_output_voltage":{aggregationMethod:"avg",info:"The voltage of the power that the UPS is providing to the connected equipment.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_output_voltage_nominal":{aggregationMethod:"avg",info:"The nominal output voltage for the UPS. It is the voltage that the UPS is designed to provide to the connected equipment.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_output_current":{aggregationMethod:"avg",info:"The current amount of current that the UPS is providing to the connected equipment.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_output_current_nominal":{aggregationMethod:"avg",info:"The nominal output current for the UPS. It is the amount of current that the UPS is designed to provide to the connected equipment under normal operating conditions.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_output_frequency":{aggregationMethod:"avg",info:"The frequency of the power that the UPS is providing to the connected equipment.",en:{instance:{one:"device",other:"devices"}}},"upsd.ups_output_frequency_nominal":{aggregationMethod:"avg",info:"The nominal output frequency for the UPS. It is the frequency of the power that the UPS is designed to provide to the connected equipment under normal operating conditions.",en:{instance:{one:"device",other:"devices"}}},"nvidia_smi.gpu_pcie_bandwidth_utilization":{aggregationMethod:"avg",info:"PCI Express Bandwidth Utilization.",en:{instance:{one:"gpu",other:"gpus"}}},"nvidia_smi.gpu_frame_buffer_memory_usage":{info:"Frame buffer memory usage.",en:{instance:{one:"gpu",other:"gpus"}}},"nvidia_smi.gpu_bar1_memory_usage":{info:"BAR1 memory usage.",en:{instance:{one:"gpu",other:"gpus"}}},"nvidia_smi.gpu_voltage":{aggregationMethod:"avg",info:"Voltage.",en:{instance:{one:"gpu",other:"gpus"}}},"nvidia_smi.gpu_power_draw":{aggregationMethod:"avg",info:"Power draw.",en:{instance:{one:"gpu",other:"gpus"}}},"nvidia_smi.gpu_mig_mode_current_status":{info:"MIG current mode.",en:{instance:{one:"gpu",other:"gpus"}}},"nvidia_smi.gpu_mig_devices_count":{info:"MIG devices.",en:{instance:{one:"gpu",other:"gpus"}}},"nvidia_smi.gpu_mig_frame_buffer_memory_usage":{info:"Frame buffer memory usage.",en:{instance:{one:"gpu",other:"gpus"}}},"nvidia_smi.gpu_mig_bar1_memory_usage":{info:"BAR1 memory usage.",en:{instance:{one:"gpu",other:"gpus"}}},"openldap.total_connections":{info:"Total Connections.",en:{instance:{one:"server",other:"servers"}}},"openldap.traffic_stats":{info:"Traffic.",en:{instance:{one:"server",other:"servers"}}},"openldap.operations_status":{info:"Operations Status.",en:{instance:{one:"server",other:"servers"}}},"openldap.referrals":{info:"Referrals.",en:{instance:{one:"server",other:"servers"}}},"openldap.entries":{info:"Entries.",en:{instance:{one:"server",other:"servers"}}},"openldap.ldap_operations":{info:"Operations.",en:{instance:{one:"server",other:"servers"}}},"openldap.waiters":{info:"Waiters.",en:{instance:{one:"server",other:"servers"}}},"opensips.dialogs_active":{info:"OpenSIPS Active Dialogs.",en:{instance:{one:"system",other:"systems"}}},"opensips.users":{info:"OpenSIPS Users.",en:{instance:{one:"system",other:"systems"}}},"opensips.registrar":{info:"OpenSIPS Registrar.",en:{instance:{one:"system",other:"systems"}}},"opensips.transactions":{info:"OpenSIPS Transactions.",en:{instance:{one:"system",other:"systems"}}},"opensips.core_rcv":{info:"OpenSIPS Core Receives.",en:{instance:{one:"system",other:"systems"}}},"opensips.core_fwd":{info:"OpenSIPS Core Forwards.",en:{instance:{one:"system",other:"systems"}}},"opensips.core_drop":{info:"OpenSIPS Core Drops.",en:{instance:{one:"system",other:"systems"}}},"opensips.core_err":{info:"OpenSIPS Core Errors.",en:{instance:{one:"system",other:"systems"}}},"opensips.core_bad":{info:"OpenSIPS Core Bad.",en:{instance:{one:"system",other:"systems"}}},"opensips.tm_replies":{info:"OpenSIPS TM Replies.",en:{instance:{one:"system",other:"systems"}}},"opensips.transactions_status":{info:"OpenSIPS Transactions Status.",en:{instance:{one:"system",other:"systems"}}},"opensips.transactions_inuse":{info:"OpenSIPS InUse Transactions.",en:{instance:{one:"system",other:"systems"}}},"opensips.sl_replies":{info:"OpenSIPS SL Replies.",en:{instance:{one:"system",other:"systems"}}},"opensips.dialogs":{info:"OpenSIPS Dialogs.",en:{instance:{one:"system",other:"systems"}}},"opensips.net_waiting":{info:"OpenSIPS Network Waiting.",en:{instance:{one:"system",other:"systems"}}},"opensips.uri_checks":{info:"OpenSIPS URI Checks.",en:{instance:{one:"system",other:"systems"}}},"opensips.traces":{info:"OpenSIPS Traces.",en:{instance:{one:"system",other:"systems"}}},"opensips.shmem":{info:"OpenSIPS Shared Memory.",en:{instance:{one:"system",other:"systems"}}},"opensips.shmem_fragment":{info:"OpenSIPS Shared Memory Fragmentation.",en:{instance:{one:"system",other:"systems"}}},"oracledb.session_count":{info:"Session Count.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.session_limit_usage":{info:"Session Limit Usage.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.logons":{info:"Logons.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.physical_disk_read_writes":{info:"Physical Disk Reads/Writes.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.sorts_on_disks":{info:"Sorts On Disk.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.full_table_scans":{info:"Full Table Scans.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.database_wait_time_ratio":{aggregationMethod:"avg",info:"Database Wait Time Ratio.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.shared_pool_free_memory":{info:"Shared Pool Free Memory.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.in_memory_sorts_ratio":{aggregationMethod:"avg",info:"In-Memory Sorts Ratio.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.sql_service_response_time":{aggregationMethod:"avg",info:"SQL Service Response Time.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.user_rollbacks":{info:"User Rollbacks.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.enqueue_timeouts":{info:"Enqueue Timeouts.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.cache_hit_ration":{aggregationMethod:"avg",info:"Cache Hit Ratio.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.global_cache_blocks":{info:"Global Cache Blocks Events.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.activity":{info:"Activities.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.wait_time":{aggregationMethod:"avg",info:"Wait Time.",en:{instance:{one:"db server",other:"db servers"}}},"oracledb.tablespace_size":{info:"Size.",en:{instance:{one:"tablespace",other:"tablespaces"}}},"oracledb.tablespace_usage":{info:"Usage.",en:{instance:{one:"tablespace",other:"tablespaces"}}},"oracledb.tablespace_usage_in_percent":{aggregationMethod:"avg",info:"Usage.",en:{instance:{one:"tablespace",other:"tablespaces"}}},"oracledb.allocated_size":{info:"Size.",en:{instance:{one:"tablespace",other:"tablespaces"}}},"oracledb.allocated_usage":{info:"Usage.",en:{instance:{one:"tablespace",other:"tablespaces"}}},"oracledb.allocated_usage_in_percent":{aggregationMethod:"avg",info:"Usage.",en:{instance:{one:"tablespace",other:"tablespaces"}}},"perf.cpu_cycles":{info:"CPU cycles.",en:{instance:{one:"system",other:"systems"}}},"perf.instructions":{info:"Instructions.",en:{instance:{one:"system",other:"systems"}}},"perf.branch_instructions":{info:"Branch instructions.",en:{instance:{one:"system",other:"systems"}}},"perf.cache":{info:"Cache operations.",en:{instance:{one:"system",other:"systems"}}},"perf.bus_cycles":{info:"Bus cycles.",en:{instance:{one:"system",other:"systems"}}},"perf.stalled_cycles":{info:"Stalled frontend and backend cycles.",en:{instance:{one:"system",other:"systems"}}},"perf.migrations":{info:"CPU migrations.",en:{instance:{one:"system",other:"systems"}}},"perf.alignment_faults":{info:"Alignment faults.",en:{instance:{one:"system",other:"systems"}}},"perf.emulation_faults":{info:"Emulation faults.",en:{instance:{one:"system",other:"systems"}}},"perf.l1d_cache":{info:"L1D cache operations.",en:{instance:{one:"system",other:"systems"}}},"perf.l1d_cache_prefetch":{info:"L1D prefetch cache operations.",en:{instance:{one:"system",other:"systems"}}},"perf.l1i_cache":{info:"L1I cache operations.",en:{instance:{one:"system",other:"systems"}}},"perf.ll_cache":{info:"LL cache operations.",en:{instance:{one:"system",other:"systems"}}},"perf.dtlb_cache":{info:"DTLB cache operations.",en:{instance:{one:"system",other:"systems"}}},"perf.itlb_cache":{info:"ITLB cache operations.",en:{instance:{one:"system",other:"systems"}}},"perf.pbu_cache":{info:"PBU cache operations.",en:{instance:{one:"system",other:"systems"}}},"pihole.unwanted_domains_blocking_status":{info:"Unwanted Domains Blocking Status.",en:{instance:{one:"server",other:"servers"}}},"pika.connections":{info:"Connections.",en:{instance:{one:"server",other:"servers"}}},"pika.clients":{info:"Clients.",en:{instance:{one:"server",other:"servers"}}},"pika.memory":{info:"Memory usage.",en:{instance:{one:"server",other:"servers"}}},"pika.connected_replicas":{info:"Connected replicas.",en:{instance:{one:"server",other:"servers"}}},"pika.commands":{info:"Processed commands.",en:{instance:{one:"server",other:"servers"}}},"pika.commands_calls":{info:"Calls per command.",en:{instance:{one:"server",other:"servers"}}},"pika.database_strings_keys":{info:"Strings type keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_strings_expires_keys":{info:"Strings type expires keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_strings_invalid_keys":{info:"Strings type invalid keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_hashes_keys":{info:"Hashes type keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_hashes_expires_keys":{info:"Hashes type expires keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_hashes_invalid_keys":{info:"Hashes type invalid keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_lists_keys":{info:"Lists type keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_lists_expires_keys":{info:"Lists type expires keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_lists_invalid_keys":{info:"Lists type invalid keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_zsets_keys":{info:"Zsets type keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_zsets_expires_keys":{info:"Zsets type expires keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_zsets_invalid_keys":{info:"Zsets type invalid keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_sets_keys":{info:"Sets type keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_sets_expires_keys":{info:"Sets type expires keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.database_sets_invalid_keys":{info:"Sets invalid keys per database.",en:{instance:{one:"database",other:"databases"}}},"pika.uptime":{aggregationMethod:"min",info:"Uptime.",en:{instance:{one:"server",other:"servers"}}},"portcheck.state_duration":{info:"Current State Duration.",en:{instance:{one:"server",other:"servers"}}},"postgres.locks_utilization":{aggregationMethod:"avg",info:"Acquired locks utilization.",en:{instance:{one:"pg server",other:"pg servers"}}},"postgres.catalog_relations_count":{info:"Relation count.",en:{instance:{one:"pg server",other:"pg servers"}}},"postgres.catalog_relations_size":{info:"Relation size.",en:{instance:{one:"pg server",other:"pg servers"}}},"postgres.table_size":{info:"Table total size.",en:{instance:{one:"table",other:"tables"}}},"powerdns.questions_in":{info:"Incoming questions.",en:{instance:{one:"server",other:"servers"}}},"powerdns.questions_out":{info:"Outgoing questions.",en:{instance:{one:"server",other:"servers"}}},"powerdns.cache_usage":{info:"Cache Usage.",en:{instance:{one:"server",other:"servers"}}},"powerdns.cache_size":{info:"Cache Size.",en:{instance:{one:"server",other:"servers"}}},"powerdns.latency":{info:"Answer latency.",en:{instance:{one:"server",other:"servers"}}},"powerdns_recursor.questions_in":{info:"Incoming questions.",en:{instance:{one:"server",other:"servers"}}},"powerdns_recursor.questions_out":{info:"Outgoing questions.",en:{instance:{one:"server",other:"servers"}}},"powerdns_recursor.answer_time":{info:"Queries answered within a time range.",en:{instance:{one:"server",other:"servers"}}},"powerdns_recursor.timeouts":{info:"Timeouts on outgoing UDP queries.",en:{instance:{one:"server",other:"servers"}}},"powerdns_recursor.drops":{info:"Drops.",en:{instance:{one:"server",other:"servers"}}},"powerdns_recursor.cache_usage":{info:"Cache Usage.",en:{instance:{one:"server",other:"servers"}}},"powerdns_recursor.cache_size":{info:"Cache Size.",en:{instance:{one:"server",other:"servers"}}},"cpu.cpufreq":{info:"Current CPU Frequency.",en:{instance:{one:"system",other:"systems"}}},"mem.hugepage":{info:"Dedicated HugePages Memory.",en:{instance:{one:"system",other:"systems"}}},"mem.pagetype":{info:"pagetype_Node{node}_{zone}_{type}.",en:{instance:{one:"system",other:"systems"}}},"system.message_queue_message":{info:"IPC Message Queue Number of Messages.",en:{instance:{one:"system",other:"systems"}}},"md.nonredundant":{family:"redundancy",info:"Nonredundant Array Availability.",en:{instance:{one:"device",other:"devices"}}},"wireless.status":{family:"status",info:"Internal status reported by interface..",en:{instance:{one:"device",other:"devices"}}},"btrfs.commits":{family:"commits",info:"BTRFS Commits.",en:{instance:{one:"filesystem",other:"filesystems"}}},"btrfs.commits_perc_time":{family:"commits",aggregationMethod:"avg",info:"BTRFS Commits Time Share.",en:{instance:{one:"filesystem",other:"filesystems"}}},"btrfs.commit_timings":{family:"commits",info:"BTRFS Commit Timings.",en:{instance:{one:"filesystem",other:"filesystems"}}},"btrfs.device_errors":{family:"errors",info:"BTRFS Device Errors.",en:{instance:{one:"device",other:"devices"}}},"proxysql.client_connections_count":{info:"Client connections.",en:{instance:{one:"server",other:"servers"}}},"proxysql.client_connections_rate":{info:"Client connections rate.",en:{instance:{one:"server",other:"servers"}}},"proxysql.server_connections_count":{info:"Server connections.",en:{instance:{one:"server",other:"servers"}}},"proxysql.server_connections_rate":{info:"Server connections rate.",en:{instance:{one:"server",other:"servers"}}},"proxysql.backends_traffic":{info:"Backends traffic.",en:{instance:{one:"server",other:"servers"}}},"proxysql.clients_traffic":{info:"Clients traffic.",en:{instance:{one:"server",other:"servers"}}},"proxysql.active_transactions_count":{info:"Client connections that are currently processing a transaction.",en:{instance:{one:"server",other:"servers"}}},"proxysql.questions_rate":{info:"Client requests / statements executed.",en:{instance:{one:"server",other:"servers"}}},"proxysql.slow_queries_rate":{info:"Slow queries.",en:{instance:{one:"server",other:"servers"}}},"proxysql.queries_rate":{info:"Queries rate.",en:{instance:{one:"server",other:"servers"}}},"proxysql.backend_statements_count":{info:"Statements available across all backend connections.",en:{instance:{one:"server",other:"servers"}}},"proxysql.backend_statements_rate":{info:"Statements executed against the backends.",en:{instance:{one:"server",other:"servers"}}},"proxysql.client_statements_count":{info:"Statements that are in use by clients.",en:{instance:{one:"server",other:"servers"}}},"proxysql.client_statements_rate":{info:"Statements executed by clients.",en:{instance:{one:"server",other:"servers"}}},"proxysql.cached_statements_count":{info:"Global prepared statements.",en:{instance:{one:"server",other:"servers"}}},"proxysql.query_cache_entries_count":{info:"Query Cache entries.",en:{instance:{one:"server",other:"servers"}}},"proxysql.query_cache_memory_used":{info:"Query Cache memory used.",en:{instance:{one:"server",other:"servers"}}},"proxysql.query_cache_io":{info:"Query Cache I/O.",en:{instance:{one:"server",other:"servers"}}},"proxysql.query_cache_requests_rate":{info:"Query Cache requests.",en:{instance:{one:"server",other:"servers"}}},"proxysql.mysql_monitor_workers_count":{info:"MySQL monitor workers.",en:{instance:{one:"server",other:"servers"}}},"proxysql.mysql_monitor_workers_rate":{info:"MySQL monitor workers rate.",en:{instance:{one:"server",other:"servers"}}},"proxysql.mysql_monitor_connect_checks_rate":{info:"MySQL monitor connect checks.",en:{instance:{one:"server",other:"servers"}}},"proxysql.mysql_monitor_ping_checks_rate":{info:"MySQL monitor ping checks.",en:{instance:{one:"server",other:"servers"}}},"proxysql.mysql_monitor_read_only_checks_rate":{info:"MySQL monitor read only checks.",en:{instance:{one:"server",other:"servers"}}},"proxysql.mysql_monitor_replication_lag_checks_rate":{info:"MySQL monitor replication lag checks.",en:{instance:{one:"server",other:"servers"}}},"proxysql.jemalloc_memory_used":{info:"Jemalloc used memory.",en:{instance:{one:"server",other:"servers"}}},"proxysql.memory_used":{info:"Memory used.",en:{instance:{one:"server",other:"servers"}}},"proxysql.uptime":{aggregationMethod:"min",info:"Uptime.",en:{instance:{one:"server",other:"servers"}}},"proxysql.mysql_command_execution_rate":{info:"MySQL command execution.",en:{instance:{one:"command",other:"commands"}}},"proxysql.mysql_command_execution_time":{aggregationMethod:"avg",info:"MySQL command execution time.",en:{instance:{one:"command",other:"commands"}}},"proxysql.mysql_command_execution_duration":{info:"MySQL command execution duration histogram.",en:{instance:{one:"command",other:"commands"}}},"proxysql.mysql_user_connections_utilization":{aggregationMethod:"avg",info:"MySQL user connections utilization.",en:{instance:{one:"user",other:"users"}}},"proxysql.mysql_user_connections_count":{info:"MySQL user connections used.",en:{instance:{one:"user",other:"users"}}},"proxysql.backend_status":{info:"Backend status.",en:{instance:{one:"sql backend",other:"sql backends"}}},"proxysql.backend_connections_usage":{info:"Backend connections usage.",en:{instance:{one:"sql backend",other:"sql backends"}}},"proxysql.backend_connections_rate":{info:"Backend connections established.",en:{instance:{one:"sql backend",other:"sql backends"}}},"proxysql.backend_queries_rate":{info:"Backend queries.",en:{instance:{one:"sql backend",other:"sql backends"}}},"proxysql.backend_traffic":{info:"Backend traffic.",en:{instance:{one:"sql backend",other:"sql backends"}}},"proxysql.backend_latency":{aggregationMethod:"avg",info:"Backend latency.",en:{instance:{one:"sql backend",other:"sql backends"}}},"pulsar.broker_components":{info:"Broker Components.",en:{instance:{one:"broker",other:"brokers"}}},"pulsar.throughput_rate":{info:"Throughput Rate.",en:{instance:{one:"broker",other:"brokers"}}},"pulsar.storage_size":{info:"Storage Size.",en:{instance:{one:"broker",other:"brokers"}}},"pulsar.storage_operations_rate":{info:"Storage Read/Write Operations Rate.",en:{instance:{one:"broker",other:"brokers"}}},"pulsar.storage_write_latency":{aggregationMethod:"avg",info:"Storage Write Latency.",en:{instance:{one:"broker",other:"brokers"}}},"pulsar.entry_size":{info:"Entry Size.",en:{instance:{one:"broker",other:"brokers"}}},"pulsar.subscription_delayed":{info:"Subscriptions Delayed for Dispatching.",en:{instance:{one:"broker",other:"brokers"}}},"pulsar.replication_rate":{info:"Replication Rate.",en:{instance:{one:"broker",other:"brokers"}}},"pulsar.replication_throughput_rate":{info:"Replication Throughput Rate.",en:{instance:{one:"broker",other:"brokers"}}},"pulsar.replication_backlog":{info:"Replication Backlog.",en:{instance:{one:"broker",other:"brokers"}}},"pulsar.namespace_broker_components":{info:"Broker Components.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.namespace_throughput_rate":{info:"Throughput Rate.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.namespace_storage_size":{info:"Storage Size.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.namespace_storage_operations_rate":{info:"Storage Read/Write Operations Rate.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.namespace_storage_write_latency":{aggregationMethod:"avg",info:"Storage Write Latency.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.namespace_entry_size":{info:"Entry Size.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.namespace_subscription_delayed":{info:"Subscriptions Delayed for Dispatching.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.namespace_replication_rate":{info:"Replication Rate.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.namespace_replication_throughput_rate":{info:"Replication Throughput Rate.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.namespace_replication_backlog":{info:"Replication Backlog.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_producers":{info:"Topic Producers.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_subscriptions":{info:"Topic Subscriptions.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_consumers":{info:"Topic Consumers.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_messages_rate_in":{info:"Topic Publish Messages Rate.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_messages_rate_out":{info:"Topic Dispatch Messages Rate.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_throughput_rate_in":{info:"Topic Publish Throughput Rate.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_throughput_rate_out":{info:"Topic Dispatch Throughput Rate.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_storage_size":{info:"Topic Storage Size.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_storage_read_rate":{info:"Topic Storage Read Rate.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_storage_write_rate":{info:"Topic Storage Write Rate.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_msg_backlog":{info:"Topic Messages Backlog Size.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_subscription_delayed":{info:"Topic Subscriptions Delayed for Dispatching.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_subscription_msg_rate_redeliver":{info:"Topic Subscriptions Redelivered Message Rate.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_subscription_blocked_on_unacked_messages":{info:"Topic Subscriptions Blocked On Unacked Messages.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_replication_rate_in":{info:"Topic Replication Rate From Remote Cluster.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_replication_rate_out":{info:"Topic Replication Rate To Remote Cluster.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_replication_throughput_rate_in":{info:"Topic Replication Throughput Rate From Remote Cluster.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_replication_throughput_rate_out":{info:"Topic Replication Throughput Rate To Remote Cluster.",en:{instance:{one:"namespace",other:"namespaces"}}},"pulsar.topic_replication_backlog":{info:"Topic Replication Backlog.",en:{instance:{one:"namespace",other:"namespaces"}}},"puppet.jvm":{info:"JVM Non-Heap.",en:{instance:{one:"server",other:"servers"}}},"puppet.fdopen":{info:"File Descriptors.",en:{instance:{one:"server",other:"servers"}}},"rabbitmq.messages_count":{info:"Messages.",en:{instance:{one:"broker",other:"brokers"}}},"rabbitmq.messages_rate":{info:"Messages.",en:{instance:{one:"broker",other:"brokers"}}},"rabbitmq.objects_count":{info:"Objects.",en:{instance:{one:"broker",other:"brokers"}}},"rabbitmq.connection_churn_rate":{info:"Connection churn.",en:{instance:{one:"broker",other:"brokers"}}},"rabbitmq.channel_churn_rate":{info:"Channel churn.",en:{instance:{one:"broker",other:"brokers"}}},"rabbitmq.queue_churn_rate":{info:"Queue churn.",en:{instance:{one:"broker",other:"brokers"}}},"rabbitmq.file_descriptors_count":{info:"File descriptors.",en:{instance:{one:"broker",other:"brokers"}}},"rabbitmq.sockets_count":{info:"Used sockets.",en:{instance:{one:"broker",other:"brokers"}}},"rabbitmq.erlang_processes_count":{info:"Erlang processes.",en:{instance:{one:"broker",other:"brokers"}}},"rabbitmq.erlang_run_queue_processes_count":{info:"Erlang run queue.",en:{instance:{one:"broker",other:"brokers"}}},"rabbitmq.memory_usage":{info:"Memory.",en:{instance:{one:"broker",other:"brokers"}}},"rabbitmq.disk_space_free_size":{info:"Free disk space.",en:{instance:{one:"broker",other:"brokers"}}},"rabbitmq.vhost_messages_count":{info:"Vhost messages.",en:{instance:{one:"vhost",other:"vhosts"}}},"rabbitmq.vhost_messages_rate":{info:"Vhost messages rate.",en:{instance:{one:"vhost",other:"vhosts"}}},"rabbitmq.queue_messages_count":{info:"Queue messages.",en:{instance:{one:"queue",other:"queues"}}},"rabbitmq.queue_messages_rate":{info:"Queue messages rate.",en:{instance:{one:"queue",other:"queues"}}},"redis.connections":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Connections",layout:{left:14,top:0,width:1.999999999,height:5}}],info:"Accepted and rejected (maxclients limit) connections.",en:{instance:{one:"server",other:"servers"}}},"redis.memory":{info:"Memory usage.",en:{instance:{one:"server",other:"servers"}}},"redis.key_eviction_events":{info:"Evicted keys due to maxmemory limit.",en:{instance:{one:"server",other:"servers"}}},"redis.net":{info:"Bandwidth.",en:{instance:{one:"server",other:"servers"}}},"redis.rdb_changes":{info:"TI,TLE.",en:{instance:{one:"server",other:"servers"}}},"redis.bgsave_now":{aggregationMethod:"max",info:"Duration of the on-going RDB save operation if any.",en:{instance:{one:"server",other:"servers"}}},"redis.bgsave_health":{info:"Status of the last RDB save operation (0: ok, 1: err).",en:{instance:{one:"server",other:"servers"}}},"redis.bgsave_last_rdb_save_since_time":{aggregationMethod:"min",info:"Time elapsed since the last successful RDB save.",en:{instance:{one:"server",other:"servers"}}},"redis.aof_file_size":{info:"AOF file size.",en:{instance:{one:"server",other:"servers"}}},"redis.commands_usec":{info:"Total CPU time consumed by the commands.",en:{instance:{one:"server",other:"servers"}}},"redis.commands_usec_per_sec":{info:"Average CPU consumed per command execution.",en:{instance:{one:"server",other:"servers"}}},"redis.key_expiration_events":{info:"Expired keys.",en:{instance:{one:"server",other:"servers"}}},"redis.database_keys":{info:"Keys per database.",en:{instance:{one:"server",other:"servers"}}},"redis.database_expires_keys":{info:"Keys with an expiration per database.",en:{instance:{one:"server",other:"servers"}}},"redis.connected_replicas":{info:"Connected replicas.",en:{instance:{one:"server",other:"servers"}}},"redis.master_link_status":{info:"Master link status.",en:{instance:{one:"server",other:"servers"}}},"rethinkdb.cluster_connected_servers":{info:"Connected Servers.",en:{instance:{one:"server",other:"servers"}}},"rethinkdb.cluster_clients_active":{info:"Active Clients.",en:{instance:{one:"server",other:"servers"}}},"rethinkdb.cluster_queries":{info:"Queries.",en:{instance:{one:"server",other:"servers"}}},"rethinkdb.cluster_documents":{info:"Documents.",en:{instance:{one:"server",other:"servers"}}},"rethinkdb.client_connections":{info:"Client Connections.",en:{instance:{one:"database",other:"databases"}}},"rethinkdb.clients_active":{info:"Active Clients.",en:{instance:{one:"database",other:"databases"}}},"rethinkdb.queries":{info:"Queries.",en:{instance:{one:"database",other:"databases"}}},"rethinkdb.documents":{info:"Documents.",en:{instance:{one:"database",other:"databases"}}},"riak.kv.throughput":{info:"Reads & writes coordinated by this node.",en:{instance:{one:"server",other:"servers"}}},"riak.dt.vnode_updates":{info:"Update operations coordinated by local vnodes by data type.",en:{instance:{one:"server",other:"servers"}}},"riak.search":{info:"Search queries on the node.",en:{instance:{one:"server",other:"servers"}}},"riak.search.documents":{info:"Documents indexed by search.",en:{instance:{one:"server",other:"servers"}}},"riak.consistent.operations":{info:"Consistent node operations.",en:{instance:{one:"server",other:"servers"}}},"riak.kv.latency.get":{info:"Time between reception of a client GET request and subsequent response to client.",en:{instance:{one:"server",other:"servers"}}},"riak.kv.latency.put":{info:"Time between reception of a client PUT request and subsequent response to client.",en:{instance:{one:"server",other:"servers"}}},"riak.dt.latency.counter_merge":{info:"Time it takes to perform an Update Counter operation.",en:{instance:{one:"server",other:"servers"}}},"riak.dt.latency.set_merge":{info:"Time it takes to perform an Update Set operation.",en:{instance:{one:"server",other:"servers"}}},"riak.dt.latency.map_merge":{info:"Time it takes to perform an Update Map operation.",en:{instance:{one:"server",other:"servers"}}},"riak.search.latency.query":{info:"Search query latency.",en:{instance:{one:"server",other:"servers"}}},"riak.search.latency.index":{info:"Time it takes Search to index a new document.",en:{instance:{one:"server",other:"servers"}}},"riak.consistent.latency.get":{info:"Strongly consistent read latency.",en:{instance:{one:"server",other:"servers"}}},"riak.consistent.latency.put":{info:"Strongly consistent write latency.",en:{instance:{one:"server",other:"servers"}}},"riak.vm":{info:"Total processes running in the Erlang VM.",en:{instance:{one:"server",other:"servers"}}},"riak.vm.memory.processes":{info:"Memory allocated & used by Erlang processes.",en:{instance:{one:"server",other:"servers"}}},"riak.kv.siblings_encountered.get":{info:"Number of siblings encountered during GET operations by this node during the past minute.",en:{instance:{one:"server",other:"servers"}}},"riak.kv.objsize.get":{info:"Object size encountered by this node during the past minute.",en:{instance:{one:"server",other:"servers"}}},"riak.search.vnodeq_size":{info:"Number of unprocessed messages in the vnode message queues of Search on this node in the past minute.",en:{instance:{one:"server",other:"servers"}}},"riak.search.index":{info:"Number of writes to Search failed due to bad data format by reason.",en:{instance:{one:"server",other:"servers"}}},"riak.core.protobuf_connections":{info:"Protocol buffer connections by status.",en:{instance:{one:"server",other:"servers"}}},"riak.core.repairs":{info:"Number of repair operations this node has coordinated.",en:{instance:{one:"server",other:"servers"}}},"riak.core.fsm_active":{info:"Active finite state machines by kind.",en:{instance:{one:"server",other:"servers"}}},"riak.core.fsm_rejected":{info:"Finite state machines being rejected by Sidejobs overload protection.",en:{instance:{one:"server",other:"servers"}}},"syscall.rw":{info:"R/Ws.",en:{instance:{one:"server",other:"servers"}}},"smb2.rw":{info:"R/Ws.",en:{instance:{one:"server",other:"servers"}}},"smb2.create_close":{info:"Create/Close.",en:{instance:{one:"server",other:"servers"}}},"smb2.get_set_info":{info:"Info.",en:{instance:{one:"server",other:"servers"}}},"smb2.find":{info:"Find.",en:{instance:{one:"server",other:"servers"}}},"smb2.notify":{info:"Notify.",en:{instance:{one:"server",other:"servers"}}},"smb2.sm_counters":{info:"Lesser Ops.",en:{instance:{one:"server",other:"servers"}}},"scaleio.system_capacity_total":{info:"Total Capacity.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_capacity_in_use":{info:"Capacity In Use.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_capacity_usage":{info:"Capacity Usage.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_capacity_available_volume_allocation":{info:"Available For Volume Allocation.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_capacity_health_state":{info:"Capacity Health State.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_workload_primary_bandwidth_total":{info:"Primary Backend Bandwidth Total (Read and Write).",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_workload_primary_bandwidth":{info:"Primary Backend Bandwidth.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_workload_primary_iops_total":{info:"Primary Backend IOPS Total (Read and Write).",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_workload_primary_iops":{info:"Primary Backend IOPS.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_workload_primary_io_size_total":{info:"Primary Backend I/O Size Total (Read and Write).",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_rebalance":{info:"Rebalance.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_rebalance_left":{info:"Rebalance Pending Capacity.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_rebalance_time_until_finish":{aggregationMethod:"min",info:"Rebalance Approximate Time Until Finish.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_rebuild":{info:"Rebuild Bandwidth Total (Forward, Backward and Normal).",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_rebuild_left":{info:"Rebuild Pending Capacity Total (Forward, Backward and Normal).",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_defined_components":{info:"Components.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_components_volumes_by_type":{info:"Volumes By Type.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.system_components_volumes_by_mapping":{info:"Volumes By Mapping.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.storage_pool_capacity_total":{info:"Total Capacity.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.storage_pool_capacity_in_use":{info:"Capacity In Use.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.storage_pool_capacity_usage":{info:"Capacity Usage.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.storage_pool_capacity_utilization":{aggregationMethod:"avg",info:"Capacity Utilization.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.storage_pool_capacity_available_volume_allocation":{info:"Available For Volume Allocation.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.storage_pool_capacity_health_state":{info:"Capacity Health State.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.storage_pool_components":{info:"Components.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.sdc_mdm_connection_state":{info:"MDM Connection State.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.sdc_bandwidth":{info:"Bandwidth.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.sdc_iops":{info:"IOPS.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.sdc_io_size":{info:"IOPS Size.",en:{instance:{one:"storage server",other:"storage servers"}}},"scaleio.sdc_num_of_mapped_volumed":{info:"Mapped Volumes.",en:{instance:{one:"storage server",other:"storage servers"}}},"mem.slabmemory":{info:"Memory Usage.",en:{instance:{one:"system",other:"systems"}}},"mem.slabfilling":{info:"Object Filling.",en:{instance:{one:"system",other:"systems"}}},"mem.slabwaste":{info:"Memory waste.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.read_error_rate":{info:"Read Error Rate.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.seek_error_rate":{info:"Seek Error Rate.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.soft_read_error_rate":{info:"Soft Read Error Rate.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.write_error_rate":{info:"Write Error Rate.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.read_total_err_corrected":{info:"Read Error Corrected.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.read_total_unc_errors":{info:"Read Error Uncorrected.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.write_total_err_corrected":{info:"Write Error Corrected.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.write_total_unc_errors":{info:"Write Error Uncorrected.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.verify_total_err_corrected":{info:"Verify Error Corrected.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.verify_total_unc_errors":{info:"Verify Error Uncorrected.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.sata_interface_downshift":{info:"SATA Interface Downshift.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.udma_crc_error_count":{info:"UDMA CRC Error Count.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.throughput_performance":{info:"Throughput Performance.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.seek_time_performance":{info:"Seek Time Performance.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.start_stop_count":{info:"Start/Stop Count.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.power_on_hours_count":{info:"Power-On Hours Count.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.power_cycle_count":{info:"Power Cycle Count.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.unexpected_power_loss":{info:"Unexpected Power Loss.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.spin_up_time":{aggregationMethod:"avg",info:"Spin-Up Time.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.spin_up_retries":{info:"Spin-up Retries.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.calibration_retries":{info:"Calibration Retries.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.airflow_temperature_celsius":{aggregationMethod:"avg",info:"Airflow Temperature Celsius.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.temperature_celsius":{aggregationMethod:"avg",info:"Temperature.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.reallocated_sectors_count":{info:"Reallocated Sectors Count.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.reserved_block_count":{aggregationMethod:"avg",info:"Reserved Block Count.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.program_fail_count":{info:"Program Fail Count.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.erase_fail_count":{info:"Erase Fail Count.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.wear_leveller_worst_case_erase_count":{info:"Wear Leveller Worst Case Erase Count.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.unused_reserved_nand_blocks":{info:"Unused Reserved NAND Blocks.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.reallocation_event_count":{info:"Reallocation Event Count.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.current_pending_sector_count":{info:"Current Pending Sector Count.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.offline_uncorrectable_sector_count":{info:"Offline Uncorrectable Sector Count.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.percent_lifetime_used":{aggregationMethod:"avg",info:"Percent Lifetime Used.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.media_wearout_indicator":{aggregationMethod:"avg",info:"Media Wearout Indicator.",en:{instance:{one:"system",other:"systems"}}},"smartd_log.nand_writes_1gib":{info:"NAND Writes.",en:{instance:{one:"system",other:"systems"}}},"solr.search_requests":{info:"Search Requests.",en:{instance:{one:"solr server",other:"solr server"}}},"solr.search_errors":{info:"Search Errors.",en:{instance:{one:"solr server",other:"solr server"}}},"solr.search_errors_by_type":{info:"Search Errors By Type.",en:{instance:{one:"solr server",other:"solr server"}}},"solr.search_requests_processing_time":{aggregationMethod:"avg",info:"Search Requests Processing Time.",en:{instance:{one:"solr server",other:"solr server"}}},"solr.search_requests_timings":{info:"Search Requests Timings.",en:{instance:{one:"solr server",other:"solr server"}}},"solr.search_requests_processing_time_percentile":{aggregationMethod:"avg",info:"Search Requests Processing Time Percentile.",en:{instance:{one:"solr server",other:"solr server"}}},"solr.update_requests":{info:"Update Requests.",en:{instance:{one:"solr server",other:"solr server"}}},"solr.update_errors":{info:"Update Errors.",en:{instance:{one:"solr server",other:"solr server"}}},"solr.update_errors_by_type":{info:"Update Errors By Type.",en:{instance:{one:"solr server",other:"solr server"}}},"solr.update_requests_processing_time":{aggregationMethod:"avg",info:"Update Requests Processing Time.",en:{instance:{one:"solr server",other:"solr server"}}},"solr.update_requests_timings":{info:"Update Requests Timings.",en:{instance:{one:"solr server",other:"solr server"}}},"solr.update_requests_processing_time_percentile":{aggregationMethod:"avg",info:"Update Requests Processing Time Percentile.",en:{instance:{one:"solr server",other:"solr server"}}},"spigotmc.mem":{info:"Minecraft Memory Usage.",en:{instance:{one:"spigot server",other:"spigot servers"}}},"springboot2.response_codes":{info:"Response Codes.",en:{instance:{one:"sb2 server",other:"sb2 servers"}}},"springboot2.thread":{info:"Threads.",en:{instance:{one:"sb2 server",other:"sb2 servers"}}},"springboot2.heap":{info:"Overview.",en:{instance:{one:"sb2 server",other:"sb2 servers"}}},"springboot2.heap_eden":{info:"Eden Space.",en:{instance:{one:"sb2 server",other:"sb2 servers"}}},"springboot2.heap_survivor":{info:"Survivor Space.",en:{instance:{one:"sb2 server",other:"sb2 servers"}}},"springboot2.heap_old":{info:"Old Space.",en:{instance:{one:"sb2 server",other:"sb2 servers"}}},"springboot2.uptime":{aggregationMethod:"min",info:"The uptime of the Java virtual machine.",en:{instance:{one:"sb2 server",other:"sb2 servers"}}},"squid.clients_net":{info:"Squid Client Bandwidth.",en:{instance:{one:"squid instance",other:"squid instances"}}},"squid.clients_requests":{info:"Squid Client Requests.",en:{instance:{one:"squid instance",other:"squid instances"}}},"squid.servers_net":{info:"Squid Server Bandwidth.",en:{instance:{one:"squid instance",other:"squid instances"}}},"squid.servers_requests":{info:"Squid Server Requests.",en:{instance:{one:"squid instance",other:"squid instances"}}},"supervisord.summary_processes":{info:"Processes.",en:{instance:{one:"client / server",other:"clients / servers"}}},"supervisord.processes":{info:"Processes.",en:{instance:{one:"process group",other:"process groups"}}},"supervisord.process_exit_status":{info:"Exit status.",en:{instance:{one:"process group",other:"process groups"}}},"supervisord.process_uptime":{aggregationMethod:"min",info:"Uptime.",en:{instance:{one:"process group",other:"process groups"}}},"supervisord.process_downtime":{info:"Downtime.",en:{instance:{one:"process group",other:"process groups"}}},"tengine.bandwidth_total":{info:"Bandwidth.",en:{instance:{one:"web server",other:"web servers"}}},"tengine.connections_total":{info:"Connections.",en:{instance:{one:"web server",other:"web servers"}}},"tengine.requests_total":{info:"Requests.",en:{instance:{one:"web server",other:"web servers"}}},"tengine.requests_per_response_code_family_total":{info:"Requests Per Response Code Family.",en:{instance:{one:"web server",other:"web servers"}}},"tengine.requests_per_response_code_detailed_total":{info:"Requests Per Response Code Detailed.",en:{instance:{one:"web server",other:"web servers"}}},"tengine.requests_upstream_total":{info:"Number Of Requests Calling For Upstream.",en:{instance:{one:"web server",other:"web servers"}}},"tengine.tries_upstream_total":{info:"Number Of Times Calling For Upstream.",en:{instance:{one:"web server",other:"web servers"}}},"tengine.requests_upstream_per_response_code_family_total":{info:"Upstream Requests Per Response Code Family.",en:{instance:{one:"web server",other:"web servers"}}},"tomcat.processing_time":{aggregationMethod:"avg",info:"processing time.",en:{instance:{one:"web server",other:"web servers"}}},"tomcat.jvm":{info:"JVM Memory Pool Usage.",en:{instance:{one:"web server",other:"web servers"}}},"tomcat.jvm_eden":{info:"Eden Memory Usage.",en:{instance:{one:"web server",other:"web servers"}}},"tomcat.jvm_survivor":{info:"Survivor Memory Usage.",en:{instance:{one:"web server",other:"web servers"}}},"tomcat.jvm_tenured":{info:"Tenured Memory Usage.",en:{instance:{one:"web server",other:"web servers"}}},"unbound.queries":{info:"Received Queries.",en:{instance:{one:"server",other:"servers"}}},"unbound.queries_ip_ratelimited":{info:"Rate Limited Queries.",en:{instance:{one:"server",other:"servers"}}},"unbound.dnscrypt_queries":{info:"DNSCrypt Queries.",en:{instance:{one:"server",other:"servers"}}},"unbound.cache":{info:"Cache Statistics.",en:{instance:{one:"server",other:"servers"}}},"unbound.cache_percentage":{aggregationMethod:"avg",info:"Cache Statistics Percentage.",en:{instance:{one:"server",other:"servers"}}},"unbound.prefetch":{info:"Cache Prefetches.",en:{instance:{one:"server",other:"servers"}}},"unbound.expired":{info:"Replies Served From Expired Cache.",en:{instance:{one:"server",other:"servers"}}},"unbound.zero_ttl_replies":{info:"Replies Served From Expired Cache.",en:{instance:{one:"server",other:"servers"}}},"unbound.recursive_replies":{info:"Replies That Needed Recursive Processing.",en:{instance:{one:"server",other:"servers"}}},"unbound.recursion_time":{aggregationMethod:"avg",info:"Time Spent On Recursive Processing.",en:{instance:{one:"server",other:"servers"}}},"unbound.request_list_usage":{info:"Request List Usage.",en:{instance:{one:"server",other:"servers"}}},"unbound.current_request_list_usage":{info:"Current Request List Usage.",en:{instance:{one:"server",other:"servers"}}},"unbound.request_list_jostle_list":{info:"Request List Jostle List Events.",en:{instance:{one:"server",other:"servers"}}},"unbound.tcpusage":{info:"TCP Handler Buffers.",en:{instance:{one:"server",other:"servers"}}},"unbound.uptime":{aggregationMethod:"min",info:"Uptime.",en:{instance:{one:"server",other:"servers"}}},"unbound.cache_memory":{info:"Cache Memory.",en:{instance:{one:"server",other:"servers"}}},"unbound.mod_memory":{info:"Module Memory.",en:{instance:{one:"server",other:"servers"}}},"unbound.mem_streamwait":{info:"TCP and TLS Stream Waif Buffer Memory.",en:{instance:{one:"server",other:"servers"}}},"unbound.cache_count":{info:"Cache Items Count.",en:{instance:{one:"server",other:"servers"}}},"unbound.type_queries":{info:"Queries By Type.",en:{instance:{one:"server",other:"servers"}}},"unbound.class_queries":{info:"Queries By Class.",en:{instance:{one:"server",other:"servers"}}},"unbound.opcode_queries":{info:"Queries By OpCode.",en:{instance:{one:"server",other:"servers"}}},"unbound.flag_queries":{info:"Queries By Flag.",en:{instance:{one:"server",other:"servers"}}},"unbound.rcode_answers":{info:"Replies By RCode.",en:{instance:{one:"server",other:"servers"}}},"unbound.thread_queries":{info:"Thread Received Queries.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_queries_ip_ratelimited":{info:"Thread Rate Limited Queries.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_dnscrypt_queries":{info:"Thread DNSCrypt Queries.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_cache":{info:"Cache Statistics.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_cache_percentage":{aggregationMethod:"avg",info:"Cache Statistics Percentage.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_prefetch":{info:"Cache Prefetches.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_expired":{info:"Replies Served From Expired Cache.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_zero_ttl_replies":{info:"Replies Served From Expired Cache.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_recursive_replies":{info:"Replies That Needed Recursive Processing.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_recursion_time":{aggregationMethod:"avg",info:"Time Spent On Recursive Processing.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_request_list_usage":{info:"Time Spent On Recursive Processing.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_current_request_list_usage":{info:"Current Request List Usage.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_request_list_jostle_list":{info:"Request List Jostle List Events.",en:{instance:{one:"thread",other:"threads"}}},"unbound.thread_tcpusage":{info:"TCP Handler Buffers.",en:{instance:{one:"thread",other:"threads"}}},"uwsgi.requests":{info:"Requests.",en:{instance:{one:"server",other:"servers"}}},"uwsgi.tx":{info:"Transmitted data.",en:{instance:{one:"server",other:"servers"}}},"uwsgi.avg_rt":{info:"Average request time.",en:{instance:{one:"server",other:"servers"}}},"uwsgi.memory_rss":{info:"RSS (Resident Set Size).",en:{instance:{one:"server",other:"servers"}}},"uwsgi.memory_vsz":{info:"VSZ (Virtual Memory Size).",en:{instance:{one:"server",other:"servers"}}},"uwsgi.exceptions":{info:"Exceptions.",en:{instance:{one:"server",other:"servers"}}},"uwsgi.harakiris":{info:"Harakiris.",en:{instance:{one:"server",other:"servers"}}},"uwsgi.respawns":{info:"Respawns.",en:{instance:{one:"server",other:"servers"}}},"varnish.session_connection":{info:"Connections Statistics.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.client_requests":{info:"Client Requests.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.all_time_hit_rate":{aggregationMethod:"avg",info:"All History Hit Rate Ratio.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.current_poll_hit_rate":{aggregationMethod:"avg",info:"Current Poll Hit Rate Ratio.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.cached_objects_expired":{info:"Expired Objects.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.cached_objects_nuked":{info:"Least Recently Used Nuked Objects.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.threads_total":{info:"Number Of Threads In All Pools.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.threads_statistics":{info:"Threads Statistics.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.threads_queue_len":{info:"Current Queue Length.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.backend_connections":{info:"Backend Connections Statistics.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.backend_requests":{info:"Requests To The Backend.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.esi_statistics":{info:"ESI Statistics.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.memory_usage":{info:"Memory Usage.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.uptime":{aggregationMethod:"min",info:"Uptime.",en:{instance:{one:"accelerator",other:"accelerators"}}},"varnish.backend":{info:"Backend {backend_name}.",en:{instance:{one:"backend",other:"backends"}}},"varnish.storage_usage":{info:"Storage {storage_name} Usage.",en:{instance:{one:"storage",other:"storages"}}},"varnish.storage_alloc_objs":{info:"Storage {storage_name} Allocated Objects.",en:{instance:{one:"storage",other:"storages"}}},"vernemq.socket_operations":{info:"Socket Open and Close Events.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.client_keepalive_expired":{info:"Closed Sockets due to Keepalive Time Expired.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.socket_close_timeout":{info:"Closed Sockets due to no CONNECT Frame On Time.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.socket_errors":{info:"Socket Errors.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.queue_processes_operations":{info:"Queue Processes Setup and Teardown Events.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.queue_process_init_from_storage":{info:"Queue Processes Initialized from Offline Storage.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.queue_undelivered_messages":{info:"Undelivered PUBLISH Messages.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.router_subscriptions":{info:"Subscriptions in the Routing Table.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.router_matched_subscriptions":{info:"Matched Subscriptions.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.router_memory":{info:"Routing Table Memory Usage.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.system_utilization_scheduler":{aggregationMethod:"avg",info:"Scheduler Utilization.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.system_processes":{info:"Erlang Processes.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.system_reductions":{info:"Reductions.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.system_context_switches":{info:"Context Switches.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.system_io":{info:"Received and Sent Traffic through Ports.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.system_run_queue":{info:"Processes that are Ready to Run on All Run-Queues.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.system_gc_count":{info:"GC Count.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.system_gc_words_reclaimed":{info:"GC Words Reclaimed.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.system_allocated_memory":{info:"Memory Allocated by the Erlang Processes and by the Emulator.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.bandwidth":{info:"Bandwidth.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.retain_messages":{info:"Stored Retained Messages.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.retain_memory":{info:"Stored Retained Messages Memory Usage.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.cluster_bandwidth":{info:"Communication with Other Cluster Nodes.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.cluster_dropped":{info:"Traffic Dropped During Communication with Other Cluster Nodes.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.netsplit_unresolved":{info:"Unresolved Netsplits.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.netsplits":{info:"Netsplits.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_auth":{info:"v5 AUTH.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_auth_received_reason":{info:"v5 AUTH Received by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_auth_sent_reason":{info:"v5 AUTH Sent by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_connect":{info:"v3/v5 CONNECT and CONNACK.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_connack_sent_reason":{info:"v3/v5 CONNACK Sent by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_disconnect":{info:"v3/v5 DISCONNECT.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_disconnect_received_reason":{info:"v5 DISCONNECT Received by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_disconnect_sent_reason":{info:"v5 DISCONNECT Sent by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_subscribe":{info:"v3/v5 SUBSCRIBE and SUBACK.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_subscribe_error":{info:"v3/v5 Failed SUBSCRIBE Operations due to a Netsplit.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_subscribe_auth_error":{info:"v3/v5 Unauthorized SUBSCRIBE Attempts.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_puback_received_reason":{info:"v5 PUBACK QoS 1 Received by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_puback_sent_reason":{info:"v5 PUBACK QoS 1 Sent by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_puback_invalid_error":{info:"v3/v5 PUBACK QoS 1 Received Unexpected Messages.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_pubrec":{info:"v3/v5 PUBREC QoS 2.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_pubrec_received_reason":{info:"v5 PUBREC QoS 2 Received by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_pubrec_sent_reason":{info:"v5 PUBREC QoS 2 Sent by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_pubrec_invalid_error":{info:"v3 PUBREC QoS 2 Received Unexpected Messages.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_pubrel":{info:"v3/v5 PUBREL QoS 2.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_pubrel_received_reason":{info:"v5 PUBREL QoS 2 Received by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_pubrel_sent_reason":{info:"v5 PUBREL QoS 2 Sent by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_pubcom":{info:"v3/v5 PUBCOMP QoS 2.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_pubcomp_received_reason":{info:"v5 PUBCOMP QoS 2 Received by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_pubcomp_sent_reason":{info:"v5 PUBCOMP QoS 2 Sent by Reason.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.mqtt_pubcomp_invalid_error":{info:"v3/v5 PUBCOMP QoS 2 Received Unexpected Messages.",en:{instance:{one:"broker",other:"brokers"}}},"vernemq.node_uptime":{aggregationMethod:"min",info:"Node Uptime.",en:{instance:{one:"broker",other:"brokers"}}},"web_log.requests":{mainheads:[{groupBy:["selected"],chartLibrary:"easypiechart",title:"Total Requests",colors:s.default[12],layout:{left:0,top:0,width:3,height:5}}],info:"Total Requests.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.excluded_requests":{info:"Excluded Requests.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.status_code_class_responses":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Responses by Status Code Class",layout:{left:9,top:0,width:3,height:5}}],info:"Responses By Status Code Class.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.status_code_class_1xx_responses":{info:"Informational Responses By Status Code.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.status_code_class_2xx_responses":{info:"Successful Responses By Status Code.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.status_code_class_3xx_responses":{info:"Redirects Responses By Status Code.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.status_code_class_4xx_responses":{info:"Client Errors Responses By Status Code.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.status_code_class_5xx_responses":{info:"Server Errors Responses By Status Code.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.bandwidth":{info:"Bandwidth.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.requests_processing_time_histogram":{info:"Requests Processing Time Histogram.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.upstream_response_time":{aggregationMethod:"avg",info:"Upstream Response Time.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.upstream_responses_time_histogram":{info:"Upstream Responses Time Histogram.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.current_poll_uniq_clients":{info:"Current Poll Unique Clients.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.vhost_requests":{info:"Requests By Vhost.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.port_requests":{info:"Requests By Port.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.scheme_requests":{info:"Requests By Scheme.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.http_method_requests":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Total Requests by HTTP Method",layout:{left:6,top:0,width:3,height:5}}],info:"Requests By HTTP Method.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.http_version_requests":{info:"Requests By HTTP Version.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.ip_proto_requests":{info:"Requests By IP Protocol.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.ssl_proto_requests":{info:"Requests By SSL Connection Protocol.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.ssl_cipher_suite_requests":{info:"Requests By SSL Connection Cipher Suite.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.url_pattern_requests":{info:"URL Field Requests By Pattern.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.custom_field_pattern_requests":{info:"Custom Field Requests By Pattern.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.custom_time_field_summary":{info:"Custom Time Field Summary.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.custom_time_field_histogram":{info:"Custom Time Field Histogram.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.url_pattern_status_code_responses":{info:"Responses By Status Code.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.url_pattern_http_method_requests":{info:"Requests By HTTP Method.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.url_pattern_bandwidth":{info:"Bandwidth.",en:{instance:{one:"web server",other:"web servers"}}},"web_log.url_pattern_request_processing_time":{aggregationMethod:"avg",info:"Request Processing Time.",en:{instance:{one:"web server",other:"web servers"}}},"whoisquery.time_until_expiration":{info:"Time Until Domain Expiration.",en:{instance:{one:"cpu core",other:"cpu cores"}}},"windows.cpu_core_utilization":{aggregationMethod:"avg",info:"Core CPU Utilization.",en:{instance:{one:"cpu core",other:"cpu cores"}}},"windows.cpu_core_interrupts":{info:"Received and Serviced Hardware Interrupts.",en:{instance:{one:"cpu core",other:"cpu cores"}}},"windows.cpu_core_dpcs":{info:"Received and Serviced Deferred Procedure Calls (DPC).",en:{instance:{one:"cpu core",other:"cpu cores"}}},"windows.cpu_core_cstate":{aggregationMethod:"avg",info:"Core Time Spent in Low-Power Idle State.",en:{instance:{one:"cpu core",other:"cpu cores"}}},"windows.memory_page_faults":{info:"Memory Page Faults.",en:{instance:{one:"system",other:"systems"}}},"windows.memory_swap_utilization":{aggregationMethod:"avg",info:"Swap Utilization.",en:{instance:{one:"system",other:"systems"}}},"windows.memory_swap_operations":{info:"Swap Operations.",en:{instance:{one:"system",other:"systems"}}},"windows.memory_swap_pages":{info:"Swap Pages.",en:{instance:{one:"system",other:"systems"}}},"windows.memory_cached":{info:"Cached.",en:{instance:{one:"system",other:"systems"}}},"windows.memory_cache_faults":{info:"Cache Faults.",en:{instance:{one:"system",other:"systems"}}},"windows.memory_system_pool":{info:"System Memory Pool.",en:{instance:{one:"system",other:"systems"}}},"windows.logical_disk_utilization":{aggregationMethod:"avg",info:"Space usage.",en:{instance:{one:"disk",other:"disks"}}},"windows.logical_disk_operations":{info:"Operations.",en:{instance:{one:"disk",other:"disks"}}},"windows.logical_disk_latency":{aggregationMethod:"avg",info:"Average Read/Write Latency.",en:{instance:{one:"disk",other:"disks"}}},"windows.net_nic_packets":{info:"Packets.",en:{instance:{one:"interface",other:"interfaces"}}},"windows.net_nic_errors":{info:"Errors.",en:{instance:{one:"interface",other:"interfaces"}}},"windows.net_nic_discarded":{info:"Discards.",en:{instance:{one:"interface",other:"interfaces"}}},"windows.os_processes":{info:"Processes.",en:{instance:{one:"system",other:"systems"}}},"windows.os_users":{info:"Number of Users.",en:{instance:{one:"system",other:"systems"}}},"windows.os_visible_memory_usage":{info:"Visible Memory Usage.",en:{instance:{one:"system",other:"systems"}}},"windows.os_paging_files_usage":{info:"Paging Files Usage.",en:{instance:{one:"system",other:"systems"}}},"windows.system_threads":{info:"Threads.",en:{instance:{one:"system",other:"systems"}}},"windows.system_uptime":{aggregationMethod:"min",info:"Uptime.",en:{instance:{one:"system",other:"systems"}}},"windows.logon_type_sessions":{info:"Active User Logon Sessions By Type.",en:{instance:{one:"system",other:"systems"}}},"windows.thermalzone_temperature":{aggregationMethod:"avg",info:"Thermal zone temperature.",en:{instance:{one:"zone",other:"zones"}}},"windows.processes_page_file_bytes":{info:"Bytes used in page file(s).",en:{instance:{one:"system",other:"systems"}}},"windows.service_state":{info:"Service state.",en:{instance:{one:"service",other:"services"}}},"windows.service_status":{info:"Service status.",en:{instance:{one:"service",other:"services"}}},"iis.website_users_count":{info:"Website users with pending requests.",en:{instance:{one:"website",other:"websites"}}},"iis.website_connection_attempts_rate":{info:"Website connections attempts.",en:{instance:{one:"website",other:"websites"}}},"iis.website_isapi_extension_requests_rate":{info:"Website extensions request.",en:{instance:{one:"website",other:"websites"}}},"iis.website_ftp_file_transfer_rate":{info:"Website FTP file transfer rate.",en:{instance:{one:"website",other:"websites"}}},"iis.website_logon_attempts_rate":{info:"Website logon attempts.",en:{instance:{one:"website",other:"websites"}}},"mssql.instance_bufman_iops":{info:"Number of pages input and output.",en:{instance:{one:"mssql server",other:"mssql servers"}}},"mssql.instance_blocked_processes":{info:"Blocked processes.",en:{instance:{one:"mssql server",other:"mssql servers"}}},"mssql.instance_locks_lock_wait":{info:"Lock requests that required the caller to wait.",en:{instance:{one:"mssql server",other:"mssql servers"}}},"mssql.instance_locks_deadlocks":{info:"Lock requests that resulted in deadlock.",en:{instance:{one:"mssql server",other:"mssql servers"}}},"mssql.instance_memmgr_connection_memory_bytes":{info:"Amount of dynamic memory to maintain connections.",en:{instance:{one:"mssql server",other:"mssql servers"}}},"mssql.instance_memmgr_pending_memory_grants":{info:"Process waiting for memory grant.",en:{instance:{one:"mssql server",other:"mssql servers"}}},"mssql.instance_memmgr_server_memory":{info:"Memory committed.",en:{instance:{one:"mssql server",other:"mssql servers"}}},"mssql.instance_sqlstats_sql_recompilations":{info:"SQL re-compilations.",en:{instance:{one:"mssql server",other:"mssql servers"}}},"mssql.database_active_transactions":{info:"Active transactions per database.",en:{instance:{one:"database",other:"databases"}}},"mssql.database_backup_restore_operations":{info:"Backup IO per database.",en:{instance:{one:"database",other:"databases"}}},"mssql.database_log_flushed":{info:"Log flushed.",en:{instance:{one:"database",other:"databases"}}},"mssql.database_log_flushes":{info:"Log flushes.",en:{instance:{one:"database",other:"databases"}}},"mssql.database_write_transactions":{info:"Write transactions.",en:{instance:{one:"database",other:"databases"}}},"ad.database_operations":{info:"AD database operations.",en:{instance:{one:"ad instance",other:"ad instances"}}},"ad.directory_operations":{info:"AD directory operations.",en:{instance:{one:"ad instance",other:"ad instances"}}},"ad.name_cache_lookups":{info:"Name cache lookups.",en:{instance:{one:"ad instance",other:"ad instances"}}},"ad.name_cache_hits":{info:"Name cache hits.",en:{instance:{one:"ad instance",other:"ad instances"}}},"ad.atq_average_request_latency":{aggregationMethod:"avg",info:"Average request processing time.",en:{instance:{one:"ad instance",other:"ad instances"}}},"ad.atq_outstanding_requests":{info:"Outstanding requests.",en:{instance:{one:"ad instance",other:"ad instances"}}},"ad.dra_replication_sync_objects_remaining":{info:"DRA replication full sync objects remaining.",en:{instance:{one:"ad instance",other:"ad instances"}}},"ad.dra_replication_properties_filtered":{info:"DRA replication properties filtered.",en:{instance:{one:"ad instance",other:"ad instances"}}},"ad.ds_threads":{info:"Directory Service threads.",en:{instance:{one:"ad instance",other:"ad instances"}}},"ad.ldap_last_bind_time":{aggregationMethod:"min",info:"LDAP last successful bind time.",en:{instance:{one:"ad instance",other:"ad instances"}}},"adcs.cert_template_requests":{info:"Certificate requests processed.",en:{instance:{one:"cert template",other:"cert templates"}}},"adcs.cert_template_failed_requests":{info:"Certificate failed requests processed.",en:{instance:{one:"cert template",other:"cert templates"}}},"adcs.cert_template_issued_requests":{info:"Certificate issued requests processed.",en:{instance:{one:"cert template",other:"cert templates"}}},"adcs.cert_template_pending_requests":{info:"Certificate pending requests processed.",en:{instance:{one:"cert template",other:"cert templates"}}},"adcs.cert_template_request_processing_time":{aggregationMethod:"avg",info:"Certificate last request processing time.",en:{instance:{one:"cert template",other:"cert templates"}}},"adcs.cert_template_retrievals":{info:"Total of certificate retrievals.",en:{instance:{one:"cert template",other:"cert templates"}}},"adcs.cert_template_retrieval_processing_time":{aggregationMethod:"avg",info:"Certificate last retrieval processing time.",en:{instance:{one:"cert template",other:"cert templates"}}},"adcs.cert_template_request_cryptographic_signing_time":{aggregationMethod:"avg",info:"Certificate last signing operation request time.",en:{instance:{one:"cert template",other:"cert templates"}}},"adcs.cert_template_request_policy_module_processing":{info:"Certificate last policy module processing request time.",en:{instance:{one:"cert template",other:"cert templates"}}},"adcs.cert_template_challenge_responses":{info:"Certificate challenge responses.",en:{instance:{one:"cert template",other:"cert templates"}}},"adcs.cert_template_challenge_response_processing_time":{aggregationMethod:"avg",info:"Certificate last challenge response time.",en:{instance:{one:"cert template",other:"cert templates"}}},"adcs.cert_template_signed_certificate_timestamp_lists":{info:"Certificate Signed Certificate Timestamp Lists processed.",en:{instance:{one:"cert template",other:"cert templates"}}},"adcs.cert_template_signed_certificate_timestamp_list_processing_time":{aggregationMethod:"avg",info:"Certificate last Signed Certificate Timestamp List process time.",en:{instance:{one:"cert template",other:"cert templates"}}},"adfs.ad_login_connection_failures":{info:"Connection failures.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.certificate_authentications":{info:"User Certificate authentications.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.db_artifact_failures":{info:"Connection failures to the artifact database.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.db_artifact_query_time_seconds":{aggregationMethod:"avg",info:"Time taken for an artifact database query.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.db_config_failures":{info:"Connection failures to the configuration database.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.db_config_query_time_seconds":{aggregationMethod:"avg",info:"Time taken for a configuration database query.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.device_authentications":{info:"Device authentications.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.external_authentications":{info:"Authentications from external MFA providers.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.federated_authentications":{info:"Authentications from Federated Sources.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.federation_metadata_requests":{info:"Federation Metadata requests.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.oauth_authorization_requests":{info:"Incoming requests to the OAuth Authorization endpoint.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.oauth_client_authentications":{info:"OAuth client authentications.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.oauth_client_credentials_requests":{info:"OAuth client credentials requests.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.oauth_client_privkey_jwt_authentications":{info:"OAuth client private key JWT authentications.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.oauth_client_secret_basic_authentications":{info:"OAuth client secret basic authentications.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.oauth_client_secret_post_authentications":{info:"OAuth client secret post authentications.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.oauth_client_windows_authentications":{info:"OAuth client windows integrated authentications.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.oauth_logon_certificate_requests":{info:"OAuth logon certificate requests.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.oauth_password_grant_requests":{info:"OAuth password grant requests.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.oauth_token_requests_success":{info:"Successful RP token requests over OAuth protocol.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.passive_requests":{info:"Passive requests.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.passport_authentications":{info:"Microsoft Passport SSO authentications.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.password_change_requests":{info:"Password change requests.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.samlp_token_requests_success":{info:"Successful RP token requests over SAML-P protocol.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.sso_authentications":{info:"SSO authentications.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.token_requests":{info:"Token access requests.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.userpassword_authentications":{info:"AD U/P authentications.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.windows_integrated_authentications":{info:"Windows integrated authentications using Kerberos or NTLM.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.wsfed_token_requests_success":{info:"Successful RP token requests over WS-Fed protocol.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"adfs.wstrust_token_requests_success":{info:"Successful RP token requests over WS-Trust protocol.",en:{instance:{one:"adfs instance",other:"adfs instances"}}},"netframework.clrexception_throw_to_catch_depth":{info:"Traversed stack frames.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrinterop_interop_marshallings":{info:"Arguments and return values marshallings.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrjit_il_bytes":{info:"Compiled Microsoft intermediate language (MSIL) bytes.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrloading_appdomains_loaded":{info:"Loaded application domains.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrloading_appdomains_unloaded":{info:"Unloaded application domains.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrloading_classes_loaded":{info:"Loaded classes in all assemblies.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrloading_class_load_failures":{info:"Class load failures.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrlocksandthreads_queue_length":{info:"Threads waited to acquire a managed lock.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrlocksandthreads_current_logical_threads":{info:"Logical threads.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrlocksandthreads_current_physical_threads":{info:"Physical threads.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrlocksandthreads_contentions":{info:"Fails to acquire a managed lock.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrmemory_allocated_bytes":{info:"Memory allocated on the garbage collection heap.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrmemory_finalization_survivors":{info:"Objects that survived garbage-collection.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrmemory_collections":{info:"Garbage collections.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrmemory_number_pinned_objects":{info:"Pinned objects encountered.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrremoting_channels":{info:"Registered channels.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrremoting_context_bound_classes_loaded":{info:"Loaded context-bound classes.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrremoting_context_bound_objects":{info:"Allocated context-bound objects.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrremoting_context_proxies":{info:"Remoting proxy objects.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrremoting_contexts":{info:"Total of remoting contexts.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrremoting_remote_calls":{info:"Remote Procedure Calls (RPC) invoked.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrsecurity_link_time_checks":{info:"Link-time code access security checks.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrsecurity_checks_time":{aggregationMethod:"avg",info:"Time spent performing runtime code access security checks.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrsecurity_stack_walk_depth":{info:"Depth of the stack.",en:{instance:{one:".net process",other:".net processes"}}},"netframework.clrsecurity_runtime_checks":{info:"Runtime code access security checks performed.",en:{instance:{one:".net process",other:".net processes"}}},"exchange.activesync_ping_cmds_pending":{info:"Ping commands pending in queue.",en:{instance:{one:"host",other:"hosts"}}},"exchange.activesync_requests":{info:"HTTP requests received from ASP.NET.",en:{instance:{one:"host",other:"hosts"}}},"exchange.activesync_sync_cmds":{info:"Sync commands processed.",en:{instance:{one:"host",other:"hosts"}}},"exchange.autodiscover_requests":{info:"Autodiscover service requests processed.",en:{instance:{one:"host",other:"hosts"}}},"exchange.avail_service_requests":{info:"Requests serviced.",en:{instance:{one:"host",other:"hosts"}}},"exchange.owa_current_unique_users":{mainheads:[{groupBy:["instance"],chartLibrary:"easypiechart",title:"OWA Unique Users",colors:s.default[12],layout:{left:0,top:0,width:2,height:5}}],info:"Unique users currently logged on to Outlook Web App.",en:{instance:{one:"host",other:"hosts"}}},"exchange.owa_requests_total":{mainheads:[{groupBy:["instance"],chartLibrary:"easypiechart",title:"OWA Total Requests",layout:{left:3,top:0,width:2,height:5}}],info:"Requests handled by Outlook Web App.",en:{instance:{one:"host",other:"hosts"}}},"exchange.rpc_active_user_count":{info:"Active unique users in the last 2 minutes.",en:{instance:{one:"host",other:"hosts"}}},"exchange.rpc_avg_latency":{aggregationMethod:"avg",mainheads:[{groupBy:["instance"],aggregationMethod:"avg",chartLibrary:"easypiechart",title:"Average Latency",colors:s.default[1],layout:{left:12,top:0,width:2,height:5}}],info:"Average latency.",en:{instance:{one:"host",other:"hosts"}}},"exchange.rpc_connection_count":{mainheads:[{groupBy:["instance"],chartLibrary:"easypiechart",title:"Total Client Connections",colors:s.default[2],layout:{left:15,top:0,width:2,height:5}}],info:"Client connections.",en:{instance:{one:"host",other:"hosts"}}},"exchange.rpc_operations":{info:"RPC operations.",en:{instance:{one:"host",other:"hosts"}}},"exchange.rpc_requests":{info:"Clients requests currently being processed.",en:{instance:{one:"host",other:"hosts"}}},"exchange.rpc_user_count":{info:"RPC users.",en:{instance:{one:"host",other:"hosts"}}},"exchange.transport_queues_active_mail_box_delivery":{mainheads:[{chartLibrary:"bars",dimensionsSort:"valueDesc",title:"Active Mailbox Delivery Queue",layout:{left:9,top:0,width:4,height:5}}],info:"Active Mailbox Delivery Queue length.",en:{instance:{one:"host",other:"hosts"}}},"exchange.transport_queues_external_active_remote_delivery":{info:"External Active Remote Delivery Queue length.",en:{instance:{one:"host",other:"hosts"}}},"exchange.transport_queues_external_largest_delivery":{info:"External Largest Delivery Queue length.",en:{instance:{one:"host",other:"hosts"}}},"exchange.transport_queues_internal_active_remote_delivery":{info:"Internal Active Remote Delivery Queue length.",en:{instance:{one:"host",other:"hosts"}}},"exchange.transport_queues_internal_largest_delivery":{info:"Internal Largest Delivery Queue length.",en:{instance:{one:"host",other:"hosts"}}},"exchange.transport_queues_retry_mailbox_delivery":{info:"Internal Active Remote Delivery Queue length.",en:{instance:{one:"host",other:"hosts"}}},"exchange.transport_queues_poison":{info:"Poison Queue Length.",en:{instance:{one:"host",other:"hosts"}}},"exchange.workload_active_tasks":{info:"Workload active tasks.",en:{instance:{one:"workload",other:"workloads"}}},"exchange.workload_completed_tasks":{info:"Workload completed tasks.",en:{instance:{one:"workload",other:"workloads"}}},"exchange.workload_queued_tasks":{info:"Workload queued tasks.",en:{instance:{one:"workload",other:"workloads"}}},"exchange.workload_yielded_tasks":{info:"Workload yielded tasks.",en:{instance:{one:"workload",other:"workloads"}}},"exchange.workload_activity_status":{info:"Workload activity status.",en:{instance:{one:"workload",other:"workloads"}}},"exchange.ldap_long_running_ops_per_sec":{info:"Long Running LDAP operations.",en:{instance:{one:"ldap process",other:"ldap processes"}}},"exchange.ldap_read_time":{aggregationMethod:"avg",info:"Time to send an LDAP read request and receive a response.",en:{instance:{one:"ldap process",other:"ldap processes"}}},"exchange.ldap_search_time":{aggregationMethod:"avg",info:"Time to send an LDAP search request and receive a response.",en:{instance:{one:"ldap process",other:"ldap processes"}}},"exchange.ldap_write_time":{aggregationMethod:"avg",info:"Time to send an LDAP search request and receive a response.",en:{instance:{one:"ldap process",other:"ldap processes"}}},"exchange.ldap_timeout_errors":{info:"LDAP timeout errors.",en:{instance:{one:"ldap process",other:"ldap processes"}}},"exchange.http_proxy_avg_auth_latency":{aggregationMethod:"avg",info:"Average time spent authenticating CAS.",en:{instance:{one:"proxy",other:"proxies"}}},"exchange.http_proxy_avg_cas_processing_latency_sec":{aggregationMethod:"avg",info:"Average time spent authenticating CAS.",en:{instance:{one:"proxy",other:"proxies"}}},"exchange.http_proxy_mailbox_proxy_failure_rate":{aggregationMethod:"avg",info:"Percentage of failures between this CAS and MBX servers.",en:{instance:{one:"proxy",other:"proxies"}}},"exchange.http_proxy_mailbox_server_locator_avg_latency_sec":{aggregationMethod:"avg",info:"Average latency of MailboxServerLocator web service calls.",en:{instance:{one:"proxy",other:"proxies"}}},"exchange.http_proxy_outstanding_proxy_requests":{info:"Concurrent outstanding proxy requests.",en:{instance:{one:"proxy",other:"proxies"}}},"exchange.http_proxy_requests":{info:"Number of proxy requests processed each second.",en:{instance:{one:"proxy",other:"proxies"}}},"wireguard.device_network_io":{info:"Device traffic.",en:{instance:{one:"device",other:"devices"}}},"wireguard.device_peers":{info:"Device peers.",en:{instance:{one:"device",other:"devices"}}},"wireguard.peer_network_io":{info:"Peer traffic.",en:{instance:{one:"peer",other:"peers"}}},"wireguard.peer_latest_handshake_ago":{info:"Peer time elapsed since the latest handshake.",en:{instance:{one:"peer",other:"peers"}}},"x509check.time_until_expiration":{info:"Time Until Certificate Expiration.",en:{instance:{one:"certificate",other:"certificates"}},groupBy:["label"],groupByLabel:["source"]},"x509check.revocation_status":{info:"Revocation Status.",en:{instance:{one:"certificate",other:"certificates"}},groupBy:["label"],groupByLabel:["source"]},"xenstat.mem":{info:"Memory Usage.",en:{instance:{one:"xenstat instance",other:"xenstat instances"}}},"xenstat.domains":{info:"Number of Domains.",en:{instance:{one:"xenstat instance",other:"xenstat instances"}}},"xenstat.cpus":{info:"Number of CPUs.",en:{instance:{one:"xenstat instance",other:"xenstat instances"}}},"xenstat.cpu_freq":{info:"CPU Frequency.",en:{instance:{one:"xenstat instance",other:"xenstat instances"}}},"xendomain.states":{info:"Domain States.",en:{instance:{one:"domain",other:"domains"}}},"xendomain.cpu":{aggregationMethod:"avg",info:"CPU Usage (100% = 1 core).",en:{instance:{one:"domain",other:"domains"}}},"xendomain.mem":{info:"Memory Reservation.",en:{instance:{one:"domain",other:"domains"}}},"xendomain.vcpu":{aggregationMethod:"avg",info:"CPU Usage per VCPU.",en:{instance:{one:"domain",other:"domains"}}},"xendomain.oo_req_vbd":{info:"VBD{%u} Out Of Requests.",en:{instance:{one:"domain",other:"domains"}}},"xendomain.requests_vbd":{info:"VBD{%u} Requests.",en:{instance:{one:"domain",other:"domains"}}},"xendomain.sectors_vbd":{info:"VBD{%u} Read/Written Sectors.",en:{instance:{one:"domain",other:"domains"}}},"xendomain.bytes_network":{info:"Network{%u} Received/Sent Bytes.",en:{instance:{one:"domain",other:"domains"}}},"xendomain.packets_network":{info:"Network{%u} Received/Sent Packets.",en:{instance:{one:"domain",other:"domains"}}},"xendomain.errors_network":{info:"Network{%u} Receive/Transmit Errors.",en:{instance:{one:"domain",other:"domains"}}},"xendomain.drops_network":{info:"Network{%u} Receive/Transmit Drops.",en:{instance:{one:"domain",other:"domains"}}},"zookeeper.requests":{info:"Outstanding Requests.",en:{instance:{one:"zk instance",other:"zk instances"}}},"zookeeper.requests_latency":{aggregationMethod:"avg",info:"Requests Latency.",en:{instance:{one:"zk instance",other:"zk instances"}}},"zookeeper.connections":{info:"Alive Connections.",en:{instance:{one:"zk instance",other:"zk instances"}}},"zookeeper.packets":{info:"Packets.",en:{instance:{one:"zk instance",other:"zk instances"}}},"zookeeper.file_descriptor":{info:"Open File Descriptors.",en:{instance:{one:"zk instance",other:"zk instances"}}},"zookeeper.nodes":{info:"Number of Nodes.",en:{instance:{one:"zk instance",other:"zk instances"}}},"zookeeper.watches":{info:"Number of Watches.",en:{instance:{one:"zk instance",other:"zk instances"}}},"zookeeper.approximate_data_size":{info:"Approximate Data Tree Size.",en:{instance:{one:"zk instance",other:"zk instances"}}},"zscores.z":{info:"Z Score.",en:{instance:{one:"chart",other:"charts"}}},"zscores.3stddev":{info:"Z Score >3.",en:{instance:{one:"chart",other:"charts"}}},"smartctl.device_smart_status":{groupBy:["dimension"],groupByLabel:[]},"netdata.dbengine_tier_retention":{aggregationMethod:"max",groupBy:["dimension","label"],groupByLabel:["tier"],en:{instance:{one:"tier",other:"tiers"}}}},He=[[/smartctl.*/,{groupBy:["node","label"],groupByLabel:["device_name"],en:{instance:{one:"device",other:"devices"}}}],[/clickhouse.*/,{en:{instance:{one:"db server",other:"db servers"}}}]],Ve=e=>"string"===typeof e?He.reduce(((t,n)=>{let[s,a]=n;return s.test(e)?{...a,...t}:t}),{...Oe[e]}):Oe[e],Qe=Oe},92230:(e,t,n)=>{n.d(t,{A:()=>i});var s=n(58168),a=n(96540),o=n(83199),r=n(8320);const i=(0,a.forwardRef)(((e,t)=>{let{children:n,onClick:i,...c}=e;const l=(0,r.UH)(),d=(0,a.useCallback)((e=>{const{hash:t=""}=e.target;t.startsWith("#menu")&&(e.preventDefault(),l.goToLink(t.substr(1))),i&&i(e)}),[l]);return"string"===typeof n?a.createElement(o.TextSmall,(0,s.A)({color:"textDescription",dangerouslySetInnerHTML:{__html:n},onClick:d},c,{ref:t})):n||null}))},45387:(e,t,n)=>{n.d(t,{X_:()=>Y,Ay:()=>re,_3:()=>ae,ro:()=>$,yn:()=>ee,e4:()=>J,Wr:()=>ne,KG:()=>te});var s=n(86443),a=(n(25440),n(79304)),o=n(4967),r=n(18595),i=n(36697),c=n(485),l=n(78613),d=n(64850),h=n(66248),u=n(13765),p=n(35532),m=n(22294),g=n(76229),f=n(38055),b=n(54399),y=n(78536),v=n(93847),_=n(93360),w=n(25436),k=n(22017),T=n(35074),S=n(54416),M=n(55345),x=n(13808),C=n(37048),P=n(900),B=n(59582),q=n(42469),I=n(82367),L=n(1705),A=n(1871),E=n(46943),D=n(80058),N=n(55573),R=n(62672),F=n(45386),U=n(49205),z=n(66600),O=n(39281),H=n(12333),V=n(84994),Q=n(83697),j=n(75904),G=n(54264),W=n(27064);const K={system:{title:"System Overview",icon:n(7521).G0,info:"Overview of the key system metrics."},services:{title:"systemd Services",icon:g.Ub,info:"Resources utilization of systemd services. Netdata monitors all systemd services via cgroups (the resources accounting used by containers).
    Tip:
    • Default view of cgroup charts show aggregate information from all your VMs and containers
    • For instance level view of cgroups change the Group by to instance
    • Checkout our docs for more details"},ap:{title:"Access Points",icon:G.Bw,info:"Performance metrics for the access points (i.e. wireless interfaces in AP mode) found on the system."},tc:{title:"Quality of Service",icon:W.Bw,info:'Netdata collects and visualizes tc class utilization using its tc-helper plugin. If you also use FireQOS for setting up QoS, netdata automatically collects interface and class names. If your QoS configuration includes overheads calculation, the values shown here will include these overheads (the total bandwidth for the same interface as reported in the Network Interfaces section, will be lower than the total bandwidth reported here). QoS data collection may have a slight time difference compared to the interface (QoS data collection uses a BASH script, so a shift in data collection of a few milliseconds should be justified).'},net:{title:"Network Interfaces",icon:F.FF,info:'

    Performance metrics for network interfaces.

    Netdata retrieves this data reading the /proc/net/dev file and /sys/class/net/ directory.

    '},Infiniband:{title:"Infiniband ports",icon:F.FF,info:'

    Performance and exception statistics for Infiniband ports. The individual port and hardware counter descriptions can be found in the Mellanox knowledge base.'},wireless:{title:"Wireless Interfaces",icon:G.Bw,info:"Performance metrics for wireless interfaces."},ip:{title:"Networking Stack",icon:j.jB,info:e=>{let{os:t}=e;return"linux"===t?"Metrics for the networking stack of the system. These metrics are collected from /proc/net/netstat or attaching kprobes to kernel functions, apply to both IPv4 and IPv6 traffic and are related to operation of the kernel networking stack.":"Metrics for the networking stack of the system."}},ipv4:{title:"IPv4 Networking",icon:j.jB,info:'Metrics for the IPv4 stack of the system. Internet Protocol version 4 (IPv4) is the fourth version of the Internet Protocol (IP). It is one of the core protocols of standards-based internetworking methods in the Internet. IPv4 is a connectionless protocol for use on packet-switched networks. It operates on a best effort delivery model, in that it does not guarantee delivery, nor does it assure proper sequencing or avoidance of duplicate delivery. These aspects, including data integrity, are addressed by an upper layer transport protocol, such as the Transmission Control Protocol (TCP).'},ipv6:{title:"IPv6 Networking",icon:j.jB,info:'Metrics for the IPv6 stack of the system. Internet Protocol version 6 (IPv6) is the most recent version of the Internet Protocol (IP), the communications protocol that provides an identification and location system for computers on networks and routes traffic across the Internet. IPv6 was developed by the Internet Engineering Task Force (IETF) to deal with the long-anticipated problem of IPv4 address exhaustion. IPv6 is intended to replace IPv4.'},sctp:{title:"SCTP Networking",icon:j.jB,info:'

    Stream Control Transmission Protocol (SCTP) is a computer network protocol which operates at the transport layer and serves a role similar to the popular protocols TCP and UDP. SCTP provides some of the features of both UDP and TCP: it is message-oriented like UDP and ensures reliable, in-sequence transport of messages with congestion control like TCP. It differs from those protocols by providing multi-homing and redundant paths to increase resilience and reliability.

    Netdata collects SCTP metrics reading the /proc/net/sctp/snmp file.

    '},ipvs:{title:"IP Virtual Server",icon:"serviceIPVS",info:'

    IPVS (IP Virtual Server) implements transport-layer load balancing inside the Linux kernel, so called Layer-4 switching. IPVS running on a host acts as a load balancer at the front of a cluster of real servers, it can direct requests for TCP/UDP based services to the real servers, and makes services of the real servers to appear as a virtual service on a single IP address.

    Netdata collects summary statistics, reading /proc/net/ip_vs_stats. To display the statistics information of services and their servers, run ipvsadm -Ln --stats or ipvsadm -Ln --rate for the rate statistics. For details, see ipvsadm(8).

    '},netfilter:{title:"Firewall (netfilter)",icon:Q.im,info:"Performance metrics of the netfilter components."},ipfw:{title:"Firewall (ipfw)",icon:Q.im,info:"Counters and memory usage for the ipfw rules."},cpu:{title:"CPUs",icon:V.zm,info:'Detailed information for each CPU of the system. A summary of the system for all CPUs can be found at the System Overview section.'},mem:{title:"Memory",icon:I.YS,info:"Detailed information about the memory management of the system."},disk:{title:"Disks",icon:l.MB,info:"Charts with performance information for all the system disks. Special care has been given to present disk performance metrics in a way compatible with iostat -x. netdata by default prevents rendering performance charts for individual partitions and unmounted virtual disks. Disabled charts can still be enabled by configuring the relative settings in the netdata configuration file."},mount:{title:"Mount Points",icon:l.MB,info:""},adaptecraid:{title:"Adaptec RAID",icon:l.MB,info:"Monitors the health of Adaptec Hardware RAID by tracking the status of logical and physical devices in your storage system."},storcli:{title:"StorCLI RAID",icon:l.MB,info:"Monitors the health of Hardware RAID by tracking the status of RAID controllers, physical drives, and backup batteries in your storage system."},megacli:{title:"MegaCLI MegaRAID",icon:l.MB,info:"Monitors the health of MegaCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system."},hpssa:{title:"HPE Smart Array",icon:l.MB,info:"Monitors the health of HPE Smart Arrays by tracking the status of controllers, arrays, logical and physical drives in your storage system."},mdstat:{title:"MD arrays",icon:l.MB,info:'

    RAID devices are virtual devices created from two or more real block devices. Linux Software RAID devices are implemented through the md (Multiple Devices) device driver.

    Netdata monitors the current status of MD arrays reading /proc/mdstat and /sys/block/%s/md/mismatch_cnt files.

    '},sensors:{title:"Sensors",icon:H.YH,info:"Readings of the configured system sensors."},ipmi:{title:"IPMI",icon:H.YH,info:"The Intelligent Platform Management Interface (IPMI) is a set of computer interface specifications for an autonomous computer subsystem that provides management and monitoring capabilities independently of the host system's CPU, firmware (BIOS or UEFI) and operating system."},amdgpu:{title:"AMD GPUs",icon:I.YS,info:"Performance and usage metrics for each AMD GPU in the system."},samba:{title:"Samba",icon:k.Uj,info:"Performance metrics of the Samba file share operations of this system. Samba is a implementation of Windows services, including Windows SMB protocol file shares."},nfsd:{title:"NFS Server",icon:k.Uj,info:'Performance metrics of the Network File Server. NFS is a distributed file system protocol, allowing a user on a client computer to access files over a network, much like local storage is accessed. NFS, like many other protocols, builds on the Open Network Computing Remote Procedure Call (ONC RPC) system.'},nfs:{title:"NFS Client",icon:k.Uj,info:'Performance metrics of the NFS operations of this system, acting as an NFS client.'},zfs:{title:"ZFS Cache",icon:k.Uj,info:'Performance metrics of the ZFS ARC and L2ARC. The following charts visualize all metrics reported by arcstat.py and arc_summary.py.'},zfspool:{title:"ZFS pools",icon:"serviceDatabase",info:"ZFS pools health and performance statistics."},lvm:{title:"LVM",icon:k.Uj,info:"Storage health and capacity of LVM Logical Volumes."},btrfs:{title:"BTRFS filesystem",icon:k.Uj,info:"Disk space metrics for the BTRFS filesystem."},app:{title:"Applications",icon:"applicationsSolid",info:'Per application statistics are collected using apps.plugin. This plugin walks through all processes and aggregates statistics for application groups. The plugin also counts the resources of exited children. So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.',height:1.5},usergroup:{title:"User Groups",icon:q.X4,info:'Per user group statistics are collected using apps.plugin. This plugin walks through all processes and aggregates statistics per user group. The plugin also counts the resources of exited children. So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.',height:1.5},user:{title:"Users",icon:O.gd,info:'Per user statistics are collected using apps.plugin. This plugin walks through all processes and aggregates statistics per user. The plugin also counts the resources of exited children. So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.',height:1.5},apps:{title:"Applications (old)",icon:"applicationsSolid",info:'Per application statistics are collected using apps.plugin. This plugin walks through all processes and aggregates statistics for application groups. The plugin also counts the resources of exited children. So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.',height:1.5},groups:{title:"User Groups (old)",icon:q.X4,info:'Per user group statistics are collected using apps.plugin. This plugin walks through all processes and aggregates statistics per user group. The plugin also counts the resources of exited children. So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.',height:1.5},users:{title:"Users (old)",icon:O.gd,info:'Per user statistics are collected using apps.plugin. This plugin walks through all processes and aggregates statistics per user. The plugin also counts the resources of exited children. So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.',height:1.5},netdata:{title:"Netdata Monitoring",icon:z.$F,info:"Performance metrics for the operation of netdata itself and its plugins."},aclk_test:{title:"ACLK Test Generator",info:"For internal use to perform integration testing."},example:{title:"Example Charts",info:"Example charts, demonstrating the external plugin architecture."},cgroup:{title:"",icon:"serviceContainer",info:"Netdata collects Container and VM resource utilization metrics from cgroups, a Linux kernel feature that enables the organization, management, and isolation of system resources among groups of processes. The charts show the aggregated view across all instances by default, to visualize the metrics per container or VM instance, change the Group by to instance"},cgqemu:{title:"",icon:B.l9,info:"QEMU virtual machine resource utilization metrics. QEMU (short for Quick Emulator) is a free and open-source hosted hypervisor that performs hardware virtualization."},docker:{title:"Docker",icon:"serviceDockerHubPress",info:"Docker container and image metrics. Charts show aggregated view across all containers by default, to visualize the metrics per container, change the Group by to instance"},dockerhub:{icon:"serviceDockerHubPress"},docker_engine:{icon:"serviceDockerHubPress"},fping:{title:"fping",icon:U._q,info:"Network latency statistics, via fping. fping is a program to send ICMP echo probes to network hosts, similar to ping, but much better performing when pinging multiple hosts. fping versions after 3.15 can be directly used as netdata plugins."},gearman:{title:"Gearman",icon:f.Ag,info:"Gearman is a job server that allows you to do work in parallel, to load balance processing, and to call functions between languages."},intelgpu:{title:"Intel iGPU",icon:I.YS,info:"Performance and usage metrics for Intel integrated GPU."},ioping:{title:"ioping",icon:U._q,info:"Disk latency statistics, via ioping. ioping is a program to read/write data probes from/to a disk."},filecheck:{title:"File Check",icon:N._g,info:"Tracks file and directory changes. Monitors existence, size, modification time, and for directories, number of files."},x509check:{title:"Certificate Check",icon:N._g,info:"Monitors certificate expiration dates and checks revocation status with Certificate Authorities (CAs) to identify compromised or outdated certificates."},whoisquery:{title:"WHOIS Check",icon:N._g,info:"Tracks domain health. Performs WHOIS queries to monitor expiration times of registered domains, ensuring timely renewals and preventing website outages."},httpcheck:{title:"Http Check",icon:"serviceHTTPCheck",info:"Web Service availability and latency monitoring using HTTP checks. This plugin is a specialized version of the port check plugin."},cassandra:{title:"Cassandra",icon:"serviceDatabase",info:"Performance metrics for Cassandra, the open source distributed NoSQL database management system.
    For more information:
    • Netdata Cassandra collector docs
    • Cassandra monitoring guide - Part 1 and Part 2"},clickhouse:{title:"ClickHouse",icon:"serviceDatabase",info:"Performance and health metrics for ClickHouse. Netdata collects metrics by sending HTTP to the ClickHouse HTTP interface and executing SELECT queries to retrieve data from various system tables."},rspamd:{title:"Rspamd",icon:h.V2,info:"Activity and performance of Rspamd servers: emails, learned messages, spam/ham counts, and actions taken on emails (reject, rewrite, etc.) and more. Netdata retrieves statistics from Rspamd's built-in web server by making HTTP requests to the /stat endpoint."},memcached:{title:"memcached",icon:"serviceMemCached",info:"Performance metrics for memcached. Memcached is a general-purpose distributed memory caching system. It is often used to speed up dynamic database-driven websites by caching data and objects in RAM to reduce the number of times an external data source (such as a database or API) must be read."},monit:{title:"monit",icon:"serviceDatabase",info:"Statuses of checks in monit. Monit is a utility for managing and monitoring processes, programs, files, directories and filesystems on a Unix system. Monit conducts automatic maintenance and repair and can execute meaningful causal actions in error situations."},mysql:{title:"MySQL",icon:"serviceMySQLPress",info:"Performance metrics for mysql, the open-source relational database management system (RDBMS)."},mongodb:{title:"MongoDB",icon:"serviceMongoDB",info:"Performance and health metrics of MongoDB deployments."},nvme:{title:"NVMe",icon:l.MB,info:"NVMe devices SMART and health metrics. Additional information on metrics can be found in the NVM Express Base Specification."},pci:{title:"PCIe AER",icon:I.YS,info:"Advanced Error Reporting is a mechanism built into the PCI Express (PCIe) bus to provide detailed information about errors that occur during communication between devices."},ping:{title:"Ping",icon:F.FF,info:"Measures round-trip time and packet loss by sending ping messages to network hosts."},postgres:{title:"PostgreSQL",icon:"servicePostgreSQL",info:"Performance metrics for PostgreSQL, the open source object-relational database management system (ORDBMS).
    For more information:
    • Netdata PostgreSQL collector docs
    • How to monitor PostgreSQL with Netdata"},redis:{title:"Redis",icon:"serviceRedis",info:"Performance metrics for Redis, an in-memory data structure store, used as a distributed, in-memory key\u2013value database, cache and message broker, with optional durability.
    For more information:
    • Netdata Redis collector docs
    • How to monitor Redis with Netdata"},rethinkdbs:{title:"RethinkDB",icon:"serviceRethinkDB",info:"Performance metrics for rethinkdb. RethinkDB is the first open-source scalable database built for realtime applications"},retroshare:{title:"RetroShare",icon:"serviceRetroShare",info:"Performance metrics for RetroShare. RetroShare is open source software for encrypted filesharing, serverless email, instant messaging, online chat, and BBS, based on a friend-to-friend network built on GNU Privacy Guard (GPG)."},riakkv:{title:"Riak KV",icon:"serviceDatabase",info:"Metrics for Riak KV, the distributed key-value store."},ipfs:{title:"IPFS",icon:"serviceIPFS",info:"Performance metrics for the InterPlanetary File System (IPFS), a content-addressable, peer-to-peer hypermedia distribution protocol."},phpfpm:{title:"PHP-FPM",icon:"servicePhpFpm",info:"Performance metrics for PHP-FPM, an alternative FastCGI implementation for PHP."},pihole:{title:"Pi-hole",icon:R.Df,info:'Metrics for Pi-hole, a black hole for Internet advertisements. The metrics returned by Pi-Hole API is all from the last 24 hours.'},portcheck:{title:"Port Check",icon:N._g,info:"Service availability and latency monitoring using port checks."},postfix:{title:"postfix",icon:M.y_},dovecot:{title:"Dovecot",icon:M.y_},hddtemp:{title:"HDD Temp",icon:w.Ro},nginx:{title:"NGINX",icon:"serviceNginx"},nginxplus:{title:"NGINX Plus",icon:"serviceNginx"},apache:{title:"Apache",icon:"serviceApache"},lighttpd:{title:"Lighttpd",icon:"serviceLighthttpd"},web_log:{title:"Web Server Logs",icon:"serviceWebLog",info:"Key web server performance metrics extracted in real-time from web server log files. For web servers, an extended log file format may optionally be used offering timing information and bandwidth for both requests and responses."},squid:{title:"squid",icon:"serviceSquid"},nut:{title:"UPS",icon:c.W6},upsd:{title:"UPS",icon:c.W6},apcupsd:{title:"UPS",icon:c.W6},snmp:{title:"SNMP",icon:D.fK},go_expvar:{title:"Go - expvars",icon:P.pS,info:'Statistics about running Go applications exposed by the expvar package.'},consul:{title:"Consul",icon:E.s4,info:'Consul performance and health metrics. For details, see Key Metrics.'},chrony:{title:"Chrony",icon:A.a$,info:"The system\u2019s clock performance and peers activity status."},couchdb:{icon:"serviceCouchDB",info:'Performance metrics for CouchDB, the open-source, JSON document-based database with an HTTP API and multi-master replication.'},beanstalk:{title:"Beanstalkd",icon:"serviceBeanstalk",info:'Provides statistics on the beanstalkd server and any tubes available on that server using data pulled from beanstalkc'},rabbitmq:{title:"RabbitMQ",icon:"serviceRabbitMQ",info:'Performance data for the RabbitMQ open-source message broker.'},ceph:{title:"Ceph",icon:"serviceDatabase",info:'Provides statistics on the ceph cluster server, the open-source distributed storage system.'},ntpd:{title:"NTPd",icon:"serviceNtpdPress",info:'Provides statistics for the internal variables of the Network Time Protocol daemon ntpd and optional including the configured peers (if enabled in the module configuration). The module presents the performance metrics as shown by ntpq (the standard NTP query program) using NTP mode 6 UDP packets to communicate with the NTP server.'},spigotmc:{title:"Spigot MC",icon:P.pS,info:'Provides basic performance statistics for the Spigot Minecraft server.'},unbound:{title:"Unbound",icon:L._2},boinc:{title:"BOINC",icon:I.YS,info:'Provides task counts for BOINC distributed computing clients.'},w1sensor:{title:"1-Wire Sensors",icon:w.Ro,info:'Data derived from 1-Wire sensors. Currently temperature sensors are automatically detected.'},logind:{title:"Logind",icon:q.X4,info:'Keeps track of user logins and sessions by querying the systemd-logind API.'},powersupply:{title:"Power Supply",icon:c.W6,info:'Statistics for the various system power supplies. Data collected from Linux power supply class.'},xenstat:{title:"Xen Node",icon:T.D6,info:"General statistics for the Xen node. Data collected using xenstat library."},xendomain:{title:"",icon:B.l9,info:"Xen domain resource utilization metrics. Netdata reads this information using xenstat library which gives access to the resource usage information (CPU, memory, disk I/O, network) for a virtual machine."},windows:{title:"Windows",icon:r.tQ},iis:{title:"IIS",icon:P.pS},mssql:{title:"SQL Server",icon:C.he},ad:{title:"Active Directory",icon:r.tQ},adcs:{title:"AD Certification Service",icon:r.tQ},adfs:{title:"AD Federation Service",icon:r.tQ},netframework:{title:".NET Framework",icon:x.fP},exchange:{title:"Exchange",icon:M.y_},perf:{title:"Perf Counters",icon:S.xi,info:"Performance Monitoring Counters (PMC). Data collected using perf_event_open() system call which utilises Hardware Performance Monitoring Units (PMU)."},vsphere:{title:"vSphere",icon:T.D6,info:'Performance statistics for ESXI hosts and virtual machines. Data collected from VMware vCenter Server using govmomi library.'},vcsa:{title:"VCSA",icon:T.D6,info:'vCenter Server Appliance health statistics. Data collected from Health API.'},zookeeper:{title:"Zookeeper",icon:"serviceDatabase",info:'Provides health statistics for Zookeeper server. Data collected through the command port using mntr command.'},hdfs:{title:"HDFS",icon:k.Uj,info:'Provides Hadoop Distributed File System performance statistics. Module collects metrics over Java Management Extensions through the web interface of an HDFS daemon.'},am2320:{title:"AM2320 Sensor",icon:w.Ro,info:"Readings from the external AM2320 Sensor."},scaleio:{title:"ScaleIO",icon:"serviceDatabase",info:"Performance and health statistics for various ScaleIO components. Data collected via VxFlex OS Gateway REST API."},squidlog:{title:"Squid log",icon:_.h8},cockroachdb:{title:"CockroachDB",icon:"serviceDatabase",info:"Performance and health statistics for various CockroachDB components."},ebpf:{title:"eBPF",icon:"serviceEBPF",info:"Monitor system calls, internal functions, bytes read, bytes written and errors using eBPF."},filesystem:{title:"Filesystem",icon:l.MB,info:'Number of filesystem events for Virtual File System, File Access, Directory cache, and file system latency (BTRFS, EXT4, NFS, XFS, and ZFS) when your disk has the file system. Filesystem charts have relationship with SWAP, Disk, Sync, and Mount Points.'},vernemq:{title:"VerneMQ",icon:v.q9,info:'Performance data for the VerneMQ open-source MQTT broker.'},pulsar:{title:"Pulsar",icon:v.q9,info:'Summary, namespaces and topics performance data for the Apache Pulsar pub-sub messaging system.'},anomalies:{title:"Anomalies",icon:m.rI,info:'Anomaly scores relating to key system metrics. A high anomaly probability indicates strange behaviour and may trigger an anomaly prediction from the trained models. Read the anomalies collector docs for more details.'},alerts:{title:"Alerts",icon:y.z$,info:'Charts showing alert status over time. More details here.'},statsd:{title:"StatsD",icon:b.gK,info:'StatsD is an industry-standard technology stack for monitoring applications and instrumenting any piece of software to deliver custom metrics. Netdata allows the user to organize the metrics in different charts and visualize any application metric easily. Read more on Netdata Learn.'},supervisord:{title:"Supervisord",icon:f.Ag,info:'Detailed statistics for each group of processes controlled by Supervisor. Netdata collects these metrics using getAllProcessInfo method.'},systemdunits:{title:"systemd units",icon:g.Ub,info:'systemd provides a dependency system between various entities called "units" of 11 different types. Units encapsulate various objects that are relevant for system boot-up and maintenance. Units may be active (meaning started, bound, plugged in, depending on the unit type), or inactive (meaning stopped, unbound, unplugged), as well as in the process of being activated or deactivated, i.e. between the two states (these states are called activating, deactivating). A special failed state is available as well, which is very similar to inactive and is entered when the service failed in some way (process returned error code on exit, or crashed, an operation timed out, or after too many restarts). For details, see systemd(1).'},changefinder:{title:"ChangeFinder",icon:m.rI,info:'Online changepoint detection using machine learning. More details here.'},zscores:{title:"Z-Scores",icon:p.bx,info:"Z scores scores relating to key system metrics."},anomaly_detection:{title:"Anomaly Detection",icon:u.GQ,info:'Charts relating to anomaly detection, increased anomalous dimensions or a higher than usual anomaly_rate could be signs of some abnormal behaviour. Read our anomaly detection guide for more details.'},fail2ban:{title:"Fail2ban",icon:h.V2,info:"Netdata keeps track of the current jail status by reading the Fail2ban log file."},wireguard:{title:"WireGuard",icon:d.dX,info:"VPN network interfaces and peers traffic."},prometheus:{icon:"servicePrometheus"},"Kubernetes State":{title:"Kubernetes State",icon:"serviceKubernetes"},"Kubernetes Containers":{title:"Kubernetes Containers",icon:"serviceKubernetes"},"Kubernetes kubelet":{title:"Kubernetes Kubelet",icon:"serviceKubernetes"},"Kubernetes kubeproxy":{title:"Kubernetes Kubeproxy",icon:"serviceKubernetes"},coredns:{title:"CoreDNS",icon:"serviceCoreDns"},dns:{title:"DNS Query",icon:"serviceDNS"},sendgrid:{title:"SendGrid",icon:"serviceSendgrid"},smartd_log:{icon:"servicesSmartdlog"},smartctl:{title:"S.M.A.R.T. Devices",icon:"servicesSmartdlog",info:"Tracks S.M.A.R.T. (Self-Monitoring, Analysis, and Reporting Technology) data for storage health monitoring."},bind:{icon:"serviceBind"},systemd:{icon:"serviceSystemd",info:"Resources utilization of systemd services. Netdata monitors all systemd services via cgroups (the resources accounting used by containers)."},md:{title:"Linux Software RAID",icon:l.MB,info:"Metrics for monitoring Linux Software RAID"},cups:{title:"Printers (cups)",icon:c.W6,info:"Metrics for Printer (cups) monitoring"},named:{title:"ISC Bind (named)",icon:"serviceCoreDns",info:"Metrics for named monitoring"},isc_dhcpd:{title:"ISC DHCPd",icon:"serviceCoreDns",info:"Metrics for DHCPd monitoring"},dnsmasq:{title:"Dnsmasq",icon:i.KI},dnsmasq_dhcp:{title:"Dnsmasq DHCP",icon:i.KI},elasticsearch:{title:"Elasticsearch",icon:"serviceElasticSearch"}},Z={"web_log.squid_bandwidth":{title:"bandwidth",info:'Bandwidth of responses (sent) by squid. This chart may present unusual spikes, since the bandwidth is accounted at the time the log line is saved by the server, even if the time needed to serve it spans across a longer duration. We suggest to use QoS (e.g. FireQOS) for accurate accounting of the server bandwidth.'},"web_log.squid_responses":{title:"responses",info:"Information related to the responses sent by squid."},"web_log.squid_requests":{title:"requests",info:"Information related to the requests squid has received."},"web_log.squid_hierarchy":{title:"hierarchy",info:"Performance metrics for the squid hierarchy used to serve the requests."},"web_log.squid_squid_transport":{title:"transport"},"web_log.squid_squid_cache":{title:"cache",info:"Performance metrics for the performance of the squid cache."},"web_log.squid_timings":{title:"timings",info:"Duration of squid requests. Unrealistic spikes may be reported, since squid logs the total time of the requests, when they complete. Especially for HTTPS, the clients get a tunnel from the proxy and exchange requests directly with the upstream servers, so squid cannot evaluate the individual requests and reports the total time the tunnel was open."},"web_log.squid_clients":{title:"clients"},"web_log.bandwidth":{info:'Bandwidth of requests (received) and responses (sent). received requires an extended log format (without it, the web server log does not have this information). This chart may present unusual spikes, since the bandwidth is accounted at the time the log line is saved by the web server, even if the time needed to serve it spans across a longer duration. We suggest to use QoS (e.g. FireQOS) for accurate accounting of the web server bandwidth.'},"web_log.urls":{info:'Number of requests for each URL pattern defined in /etc/netdata/go.d/web_log.conf. This chart counts all requests matching the URL patterns defined, independently of the web server response codes (i.e. both successful and unsuccessful).'},"web_log.clients":{info:"Charts showing the number of unique client IPs, accessing the web server."},"web_log.timings":{info:"Web server response timings - the time the web server needed to prepare and respond to requests. This requires an extended log format and its meaning is web server specific. For most web servers this accounts the time from the reception of a complete request, to the dispatch of the last byte of the response. So, it includes the network delays of responses, but it does not include the network delays of requests."},"mem.ksm":{title:"deduper (ksm)",info:'Kernel Same-page Merging (KSM) performance monitoring, read from several files in /sys/kernel/mm/ksm/. KSM is a memory-saving de-duplication feature in the Linux kernel. The KSM daemon ksmd periodically scans those areas of user memory which have been registered with it, looking for pages of identical content which can be replaced by a single write-protected page.'},"mem.hugepages":{info:'Hugepages is a feature that allows the kernel to utilize the multiple page size capabilities of modern hardware architectures. The kernel creates multiple pages of virtual memory, mapped from both physical RAM and swap. There is a mechanism in the CPU architecture called "Translation Lookaside Buffers" (TLB) to manage the mapping of virtual memory pages to actual physical memory addresses. The TLB is a limited hardware resource, so utilizing a large amount of physical memory with the default page size consumes the TLB and adds processing overhead. By utilizing Huge Pages, the kernel is able to create pages of much larger sizes, each page consuming a single resource in the TLB. Huge Pages are pinned to physical RAM and cannot be swapped/paged out.'},"mem.numa":{info:'Non-Uniform Memory Access (NUMA) is a hierarchical memory design the memory access time is dependent on locality. Under NUMA, a processor can access its own local memory faster than non-local memory (memory local to another processor or memory shared between processors). The individual metrics are described in the Linux kernel documentation.'},"mem.ecc":{info:'

    ECC memory is a type of computer data storage that uses an error correction code (ECC) to detect and correct n-bit data corruption which occurs in memory. Typically, ECC memory maintains a memory system immune to single-bit errors: the data that is read from each word is always the same as the data that had been written to it, even if one of the bits actually stored has been flipped to the wrong state.

    Memory errors can be classified into two types: Soft errors, which randomly corrupt bits but do not leave physical damage. Soft errors are transient in nature and are not repeatable, can be because of electrical or magnetic interference. Hard errors, which corrupt bits in a repeatable manner because of a physical/hardware defect or an environmental problem.'},"mem.pagetype":{info:'Statistics of free memory available from memory buddy allocator. The buddy allocator is the system memory allocator. The whole memory space is split in physical pages, which are grouped by NUMA node, zone, migrate type, and size of the block. By keeping pages grouped based on their ability to move, the kernel can reclaim pages within a page block to satisfy a high-order allocation. When the kernel or an application requests some memory, the buddy allocator provides a page that matches closest the request.'},"ip.ecn":{info:'Explicit Congestion Notification (ECN) is an extension to the IP and to the TCP that allows end-to-end notification of network congestion without dropping packets. ECN is an optional feature that may be used between two ECN-enabled endpoints when the underlying network infrastructure also supports it.'},"ip.multicast":{info:'IP multicast is a technique for one-to-many communication over an IP network. Multicast uses network infrastructure efficiently by requiring the source to send a packet only once, even if it needs to be delivered to a large number of receivers. The nodes in the network take care of replicating the packet to reach multiple receivers only when necessary.'},"ip.broadcast":{info:'In computer networking, broadcasting refers to transmitting a packet that will be received by every device on the network. In practice, the scope of the broadcast is limited to a broadcast domain.'},"netfilter.conntrack":{title:"connection tracker",info:"Netfilter Connection Tracker performance metrics. The connection tracker keeps track of all connections of the machine, inbound and outbound. It works by keeping a database with all open connections, tracking network and address translation and connection expectations."},"netfilter.nfacct":{title:"bandwidth accounting",info:"The following information is read using the nfacct.plugin."},"netfilter.synproxy":{title:"DDoS protection",info:'DDoS protection performance metrics. SYNPROXY is a TCP SYN packets proxy. It is used to protect any TCP server (like a web server) from SYN floods and similar DDoS attacks. SYNPROXY intercepts new TCP connections and handles the initial 3-way handshake using syncookies instead of conntrack to establish the connection. It is optimized to handle millions of packets per second utilizing all CPUs available without any concurrency locking between the connections. It can be used for any kind of TCP traffic (even encrypted), since it does not interfere with the content itself.'},"ipfw.dynamic_rules":{title:"dynamic rules",info:"Number of dynamic rules, created by correspondent stateful firewall rules."},"system.softnet_stat":{title:"softnet",info:e=>{let{os:t}=e;return"linux"===t?'

    Statistics for CPUs SoftIRQs related to network receive work. Break down per CPU core can be found at CPU / softnet statistics. More information about identifying and troubleshooting network driver related issues can be found at Red Hat Enterprise Linux Network Performance Tuning Guide.

    Processed - packets processed. Dropped - packets dropped because the network device backlog was full. Squeezed - number of times the network device budget was consumed or the time limit was reached, but more work was available. ReceivedRPS - number of times this CPU has been woken up to process packets via an Inter-processor Interrupt. FlowLimitCount - number of times the flow limit has been reached (flow limiting is an optional Receive Packet Steering feature).

    ':"Statistics for CPUs SoftIRQs related to network receive work."}},"system.clock synchronization":{info:'NTP lets you automatically sync your system time with a remote server. This keeps your machine\u2019s time accurate by syncing with servers that are known to have accurate times.'},"cpu.softnet_stat":{title:"softnet",info:e=>{let{os:t}=e;return"linux"===t?'

    Statistics for CPUs SoftIRQs related to network receive work. Total for all CPU cores can be found at System / softnet statistics. More information about identifying and troubleshooting network driver related issues can be found at Red Hat Enterprise Linux Network Performance Tuning Guide.

    Processed - packets processed. Dropped - packets dropped because the network device backlog was full. Squeezed - number of times the network device budget was consumed or the time limit was reached, but more work was available. ReceivedRPS - number of times this CPU has been woken up to process packets via an Inter-processor Interrupt. FlowLimitCount - number of times the flow limit has been reached (flow limiting is an optional Receive Packet Steering feature).

    ':'Statistics for per CPUs core SoftIRQs related to network receive work. Total for all CPU cores can be found at System / softnet statistics.'}},"go_expvar.memstats":{title:"memory statistics",info:'Go runtime memory statistics. See runtime.MemStats documentation for more info about each chart and the values.'},"couchdb.dbactivity":{title:"db activity",info:"Overall database reads and writes for the entire server. This includes any external HTTP traffic, as well as internal replication traffic performed in a cluster to ensure node consistency."},"couchdb.httptraffic":{title:"http traffic breakdown",info:"All HTTP traffic, broken down by type of request (GET, PUT, POST, etc.) and response status code (200, 201, 4xx, etc.)

    Any 5xx errors here indicate a likely CouchDB bug; check the logfile for further information."},"couchdb.ops":{title:"server operations"},"couchdb.perdbstats":{title:"per db statistics",info:'Statistics per database. This includes 3 size graphs per database: active (the size of live data in the database), external (the uncompressed size of the database contents), and file (the size of the file on disk, exclusive of any views and indexes). It also includes the number of documents and number of deleted documents per database.'},"couchdb.erlang":{title:"erlang statistics",info:"Detailed information about the status of the Erlang VM that hosts CouchDB. These are intended for advanced users only. High values of the peak message queue (>10e6) generally indicate an overload condition."},"ntpd.system":{title:"system",info:'Statistics of the system variables as shown by the readlist billboard ntpq -c rl. System variables are assigned an association ID of zero and can also be shown in the readvar billboard ntpq -c "rv 0". These variables are used in the Clock Discipline Algorithm, to calculate the lowest and most stable offset.'},"ntpd.peers":{title:"peers",info:'Statistics of the peer variables for each peer configured in /etc/ntp.conf as shown by the readvar billboard ntpq -c "rv <association>", while each peer is assigned a nonzero association ID as shown by ntpq -c "apeers". The module periodically scans for new/changed peers (default: every 60s). ntpd selects the best possible peer from the available peers to synchronize the clock. A minimum of at least 3 peers is required to properly identify the best possible peer.'},"mem.page_cache":{title:"page cache (eBPF)",info:'Number of calls to functions used to manipulate the Linux page cache. This chart has a relationship with File Systems, Sync, and Hard Disk.'},"apps.page_cache":{title:"page cache (eBPF)",info:'Netdata also gives a summary for these charts in Memory submenu.'},"filesystem.vfs":{title:"vfs (eBPF)",info:'Number of calls to Virtual File System functions used to manipulate File Systems.'},"apps.vfs":{title:"vfs (eBPF)",info:'Netdata also gives a summary for these charts in Filesystem submenu.'},"filesystem.ext4_latency":{title:"ext4 latency (eBPF)",info:'Latency is the time it takes for an event to be completed. Based on the eBPF ext4dist from BCC tools. This chart is provided by the eBPF plugin to monitor File systems.'},"filesystem.xfs_latency":{title:"xfs latency (eBPF)",info:'Latency is the time it takes for an event to be completed. Based on the xfsdist from BCC tools. This chart is provided by the eBPF plugin to monitor File systems.'},"filesystem.nfs_latency":{title:"nfs latency (eBPF)",info:'Latency is the time it takes for an event to be completed. Based on the nfsdist from BCC tools. This chart is provided by the eBPF plugin to monitor File systems.'},"filesystem.zfs_latency":{title:"zfs latency (eBPF)",info:'Latency is the time it takes for an event to be completed. Based on the zfsdist from BCC tools. This chart is provided by the eBPF plugin to monitor File systems.'},"filesystem.btrfs_latency":{title:"btrfs latency (eBPF)",info:'Latency is the time it takes for an event to be completed. Based on the btrfsdist from BCC tools. This chart is provided by the eBPF plugin to monitor File systems.'},"filesystem.file_access":{title:"file access (eBPF)"},"apps.file_access":{title:"file access (eBPF)",info:'Netdata also gives a summary for this chart on Filesystem submenu (more details on eBPF plugin file chart section).'},"ip.kernel":{title:"kernel functions (eBPF)"},"apps.net":{title:"network",info:'Netdata also gives a summary for eBPF charts in Networking Stack submenu.'},"system.ipc semaphores":{info:'System V semaphores is an inter-process communication (IPC) mechanism. It allows processes or threads within a process to synchronize their actions. They are often used to monitor and control the availability of system resources such as shared memory segments. For details, see svipc(7). To see the host IPC semaphore information, run ipcs -us. For limits, run ipcs -ls.'},"system.ipc shared memory":{info:'System V shared memory is an inter-process communication (IPC) mechanism. It allows processes to communicate information by sharing a region of memory. It is the fastest form of inter-process communication available since no kernel involvement occurs when data is passed between the processes (no copying). Typically, processes must synchronize their access to a shared memory object, using, for example, POSIX semaphores. For details, see svipc(7). To see the host IPC shared memory information, run ipcs -um. For limits, run ipcs -lm.'},"system.ipc message queues":{info:'System V message queues is an inter-process communication (IPC) mechanism. It allow processes to exchange data in the form of messages. For details, see svipc(7). To see the host IPC messages information, run ipcs -uq. For limits, run ipcs -lq.'},"system.interrupts":{info:'Interrupts are signals sent to the CPU by external devices (normally I/O devices) or programs (running processes). They tell the CPU to stop its current activities and execute the appropriate part of the operating system. Interrupt types are hardware (generated by hardware devices to signal that they need some attention from the OS), software (generated by programs when they want to request a system call to be performed by the operating system), and traps (generated by the CPU itself to indicate that some error or condition occurred for which assistance from the operating system is needed).'},"system.softirqs":{info:'Software interrupts (or "softirqs") are one of the oldest deferred-execution mechanisms in the kernel. Several tasks among those executed by the kernel are not critical: they can be deferred for a long period of time, if necessary. The deferrable tasks can execute with all interrupts enabled (softirqs are patterned after hardware interrupts). Taking them out of the interrupt handler helps keep kernel response time small.'},"cpu.softirqs":{info:'Total number of software interrupts per CPU. To see the total number for the system check the softirqs section.'},"cpu.interrupts":{info:'Total number of interrupts per CPU. To see the total number for the system check the interrupts section. The last column in /proc/interrupts provides an interrupt description or the device name that registered the handler for that interrupt.'},"cpu.throttling":{info:" CPU throttling is commonly used to automatically slow down the computer when possible to use less energy and conserve battery."},"cpu.cpuidle":{info:'Idle States (C-states) are used to save power when the processor is idle.'},"services.net":{title:"network (eBPF)"},"services.page_cache":{title:"pache cache (eBPF)"},"netdata.ebpf":{title:"eBPF.plugin",info:'eBPF (extended Berkeley Packet Filter) is used to collect metrics from inside Linux kernel giving a zoom inside your Process, Hard Disk, File systems (File Access, and Directory Cache), Memory (Swap I/O, Page Cache), IRQ (Hard IRQ and Soft IRQ ), Shared Memory, Syscalls (Sync, Mount), and Network.'}},Y=function(e,t,n,s){let a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:{};if("undefined"===typeof e[n])return s;const o=e[n][t];return"undefined"===typeof o?s:"function"===typeof o?o(a):o},J=(e,t)=>{if(e.sectionTitle)return e.sectionTitle;if("undefined"===typeof e.menuPattern)return Y(K,"title",e.id,e.id,t).toString().replace(/_/g," ");const n=e.type||e.id.split(".")[0],s=n===e.menuPattern?"":" ".concat(n.slice(-(n.length-e.menuPattern.length-1)));return"".concat(Y(K,"title",e.menuPattern,e.menuPattern,t).toString()," ").concat(s).replace(/_/g," ")},X=s._X,$=e=>Y(K,"icon",e.menuIcon||e.menuPattern||e.id,X),ee=(e,t)=>Y(K,"info",e.menuPattern||e.id,null,t),te=(e,t,n)=>{const s=t?"".concat(e,".").concat(t):e;return Y(Z,"title",s,t||e,n).toString().replace(/_/g," ")},ne=(e,t,n)=>Y(Z,"info",t?"".concat(e,".").concat(t):e,null,n),se=(e,t)=>{const n=Y(o.A,"info",e,null,t);return n?'
    '.concat(n,"
    "):""},ae=(e,t,n,s)=>Y(o.A,t,e,n,s),oe=e=>{var t;return(null===(t=o.A[e])||void 0===t?void 0:t.valueRange)||[null,null]},re=function(e){let{menuId:t,subMenuId:n,sectionInfo:s}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const{context:i,domain:c,id:l,visible:d,filteredOut:h,hidden:u}=e;return{...(0,o.L)(l),id:l,chartId:l,menuGroupId:t,subMenuId:n,link:"chart_".concat((0,a.A)(l)),chartLibrary:"dygraph",info:se(i,r),sectionInfo:s,valueRange:oe(i),colors:"".concat(Y(i||l,"colors",i,"",r)),domain:c,visible:d,filteredOut:h,hidden:u}}},72253:(e,t,n)=>{n.d(t,{A:()=>T});var s=n(58168),a=(n(17333),n(41393),n(98992),n(54520),n(81454),n(62953),n(96540)),o=n(83199),r=n(73700),i=n(31438),c=n(51220),l=n(23931),d=(n(3064),n(72577),n(55309)),h=n(59090),u=n(8320),p=n(86856);const m=e=>{const t=(0,a.useRef)();return(0,p.i7)(e,(e=>(t.current=e,!1))),t},g=(e,t)=>{const n=(0,i.NF)(),s=m(d.ox),o=m(h.xw),r=m(i.i8),c=(0,u.Gr)(),l=(0,a.useMemo)((()=>({goToElement:n=>!!t.length&&(function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1;new Promise((n=>{const s=t=>{if(0===t)return n();e(),setTimeout((()=>s(--t)))};s(t)}))}((()=>{const s=t.findIndex((e=>e.id===n));if(-1!==s)return e.scrollToIndex(s,{align:"start"})}),5),!0),goToLink:e=>{const t=Object.values(s.current).find((t=>t.link===e));if(t)return l.goToElement(t.id);const n=Object.values(o.current).find((t=>t.link===e));if(n)return l.goToElement(n.id);if(r.current){const t=Object.values(r.current).find((t=>t.link===e));if(t)return l.goToElement(t.chartId)}}})),[n,t]);return(0,a.useLayoutEffect)((()=>{c(l)}),[l]),l};var f=n(62302);let b=null;n(25509),n(65223),n(60321),n(41927),n(11632),n(64377),n(66771),n(12516),n(68931),n(52514),n(35694),n(52774),n(49536),n(21926),n(94483),n(16215);var y=n(57530),v=n(43375);n(14905),n(8872);const _=(0,a.memo)((e=>{let{getComponent:t,element:n,index:o,...r}=e;const i=(0,a.useMemo)((()=>n&&t(n)),[null===n||void 0===n?void 0:n.id,o]);return i?a.createElement(i,(0,s.A)({key:n.id},n,{index:o},r,{isVisible:!0})):null})),w=(0,a.memo)((0,a.forwardRef)(((e,t)=>{let{elements:n,onScroll:i,dashboardOptions:l,getComponent:d,stickyBg:h="mainBackground",stickyRef:u,nextStickyRef:p,stickyIndexes:m,tmpStickyRef:f,containerWidth:w}=e;const k=(e=>{const t=(0,a.useRef)(),n=(0,a.useRef)(),s=(0,a.useRef)(0),o=(0,a.useRef)(0);return(0,a.useCallback)((a=>{if(t.current!==e.length&&(o.current=0),n.current&&(o.current<5||t.current!==e.length)){o.current=o.current+1,t.current=e.length;const r=e.findIndex((e=>e.id===n.current.id));if(-1===r)return;const{start:i}=a.getMeasurements()[r];return a.scrollToOffset(i+s.current),clearTimeout(b),void(b=setTimeout((()=>o.current=5),200))}t.current=e.length;const r=a.scrollOffset;let i=0;const c=a.getMeasurements().find((e=>(i+=e.size,i>r)));c&&(n.current=e[c.index],s.current=r-c.start)}),[e])})(n),T=(0,a.useRef)(),[S,M]=(0,a.useState)(0),x=((e,t,n)=>{let{stickyRef:s,nextStickyRef:o,tmpStickyRef:r}=n;const i=(0,a.useRef)(),{active:c}=(0,v.fF)(),l=(0,a.useRef)();return l.current=c,(0,a.useCallback)((e=>{if(l.current&&l.current.data.current.isResizer)return i.current;if(!t)return i.current=(0,y.vp)(e),i.current;o.current=Number(Object.keys(t).find((t=>e.startIndexe.startIndex>=t))),o.current-e.startIndex===1&&(s.current=NaN);let n=(0,y.vp)(e);return null!==r.current&&(n=[...n,r.current]),isNaN(s.current)?i.current=[...new Set(n)].sort(((e,t)=>e-t)):i.current=[...new Set([s.current,...n])].sort(((e,t)=>e-t)),i.current}),[e,t])})(n,m,{stickyRef:u,nextStickyRef:p,tmpStickyRef:f}),C=(0,c.Te)({count:n.length,getScrollElement:()=>t.current,overscan:1,enableSmoothScroll:!1,estimateSize:e=>n[e].size,onChange:k,rangeExtractor:x,scrollPaddingStart:20});T.current=C,g(C,n);const P=(0,a.useCallback)((0,r.n)(100,(function(){i(...arguments),t.current&&M(t.current.scrollTop)})),[i]);return a.createElement("div",{ref:t,style:{minHeight:"100%",width:"100%",overflow:"auto"},onScroll:P},a.createElement("div",{style:{minHeight:"".concat(C.getTotalSize(),"px"),width:"100%",position:"relative"}},S>0&&!isNaN(u.current)&&a.createElement(o.Box,{key:u.current,sx:{top:0,left:0,width:"100%",padding:1,position:"sticky",zIndex:10,boxShadow:"2px 6px 6px -7px rgba(0, 0, 0, 0.4)"},background:h},a.createElement(_,(0,s.A)({},l,{element:n[u.current],index:u.current,getComponent:d,isActiveSticky:!0}))),C.getVirtualItems().map((e=>a.createElement(o.Box,{key:e.key,ref:C.measureElement,sx:{top:0,left:0,width:"100%",padding:1,position:"absolute",transform:"translateY(".concat(e.start,"px)")},"data-index":e.index},a.createElement(_,(0,s.A)({},l,{element:n[e.index],index:e.index,getComponent:d,containerWidth:w})))))))})),((e,t)=>e.elements.length===t.elements.length&&e.containerWidth===t.containerWidth)),k=e=>{let{onActiveMenuGroupId:t,onActiveSubMenuId:n,getComponent:o,dashboardOptions:r,initialChartName:c,checkVisibility:d,...h}=e;const u=(0,l.bD)(),p=(0,i.qR)((e=>e.filter((e=>d?d(e.subMenuChartIds||e.chartIds||[e.id]):u(e.subMenuChartIds||e.chartIds||[e.id]))))),[{stickyRef:m,nextStickyRef:g,tmpStickyRef:b},y]=(e=>{const t=(0,i.Tg)(),n=(0,a.useRef)(null),s=(0,a.useMemo)((()=>e.reduce(((e,n,s)=>{let{id:a}=n;return t[a]&&(e[s]=!0),e}),{})),[e]),o=(0,a.useRef)(),r=(0,a.useRef)(),c=(0,a.useCallback)((e=>!!s[e]),[s]),l=(0,a.useCallback)((e=>o.current===e),[]);return[{stickyRef:o,nextStickyRef:r,isSticky:c,isActiveSticky:l,tmpStickyRef:n},s,t]})(p),v=(0,a.useRef)(),_=(0,a.useRef)(),k=(0,a.useMemo)((()=>(_.current&&_.current.cancel(),_.current=(0,f.A)(m),()=>_.current(v.current,n,t))),[p.length,n,t]);return(0,a.useLayoutEffect)((()=>{p.length&&k()}),[p.length,c]),a.createElement(w,(0,s.A)({elements:p,onScroll:k,dashboardOptions:r,getComponent:o,ref:v,stickyRef:m,nextStickyRef:g,stickyIndexes:y,tmpStickyRef:b},h))},T=(0,a.memo)(k)},62302:(e,t,n)=>{n.d(t,{A:()=>a});n(3064),n(98992),n(72577);var s=n(73700);const a=e=>(0,s.s)(100,((t,n,s)=>{var a;if(!t)return;const{top:o}=t.getBoundingClientRect(),r=Array.from(t.querySelectorAll("[data-submenuid], [data-menuid], [data-chartid]")).find(((t,n)=>!(!isNaN(null===e||void 0===e?void 0:e.current)&&0===n)&&t.getBoundingClientRect().top-o>0));if(!r)return;const i=r.getAttribute("data-menuid");if(i)return n(""),void s(i);n(r.getAttribute("data-submenuid")||(null===(a=r.closest("[data-submenuid]"))||void 0===a?void 0:a.getAttribute("data-submenuid")))}))},16579:(e,t,n)=>{n.d(t,{H:()=>w,A:()=>k});var s=n(58168),a=(n(62953),n(96540)),o=n(83199),r=(n(3064),n(9920),n(41393),n(14905),n(98992),n(72577),n(3949),n(81454),n(8872),n(25509),n(65223),n(60321),n(41927),n(11632),n(64377),n(66771),n(12516),n(68931),n(52514),n(35694),n(52774),n(49536),n(21926),n(94483),n(16215),n(79304));n(25440);n(8159),n(37550);var i=n(62193),c=n.n(i),l=n(4967),d=n(45387);const h=function(e,t,n){let s=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{};const a={},o=[];return t.forEach((t=>{const r=n(t);(0,d.X_)(l.A,e,r.context,[]).forEach(((i,l)=>{const d="function"===typeof i?i({...s,id:r.id}):i;if(!d)return;if(((e,t)=>!!e&&(Array.isArray(e)?e.some((e=>!c()(t(e)))):!c()(t(e))))(d.hiddenWhen,n))return;if(d.domain&&d.domain!==r.domain)return;const h="".concat(e,"|").concat(t,"|").concat(l),u={id:h,chartId:t,...s,...d};a[h]=u,o.push(h)}))})),[a,o.sort(((e,t)=>a[e].priority-a[t].priority))]},u=(e,t,n)=>{const s={},a={},o={},r={};e.forEach((e=>{const a=t(e);a.id&&(n[e]=((e,t)=>{let{id:n,name:s,family:a,context:o,priority:r,visible:i,filteredOut:c,hidden:l}=e;return function(e){var t;let{name:n="",context:s="",submenuNames:a={}}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const[o,r,i]=(s||e.chartId).split("."),c=o.split("_"),[l,d]=c,h=t=>{let{menuPattern:n,menu:s=o,...a}=t;return{id:s,menuPattern:n,key:"".concat(s,"|").concat(e.subMenuId,"|").concat(e.chartId),...a,...e,part1:l}},u=e=>{const t=c.length>=2&&d===e?"".concat(l,"_").concat(d):l;return h({menuPattern:t})};switch(l){case"ap":case"net":case"powersupply":case"mount":return h({menu:l});case"cpufreq":case"cpuidle":return h({menu:"cpu"});case"smartd":case"web":return u("log");case"apache":return u("cache");case"bind":return u("rndc");case"go":return u("expvar");case"isc":return u("dhcpd");case"anomaly":return h({});case"disk":return/(inodes|space)/.test(r)||/(inodes|space)/.test(d)?h({menu:"mount"}):h({menu:l});case"k8s":return h("state"===d?{menu:"Kubernetes State"}:d&&"container"!==d?{menu:"Kubernetes ".concat(d)}:{menu:"Kubernetes Containers",flavour:"k8s"});case"cgroup":{const t=e.chartId.match(/.*[._/-:]qemu[._/-:]*/)||e.chartId.match(/.*[._/-:]kvm[._/-:]*/)?"cgqemu":"cgroup",n=1===c.length?"Containers & VMs":void 0;return h({menuPattern:t,sectionTitle:n})}case"ovpn":{const e=c.length>3&&"status"===c[1]&&"log"===c[2]?"".concat(l,"_").concat(d):l;return h({menuPattern:e})}case"prometheus":{if(i)return h({menu:"".concat(r.replace("-"," ")),menuIcon:"prometheus"});const t=e.subMenuId.split("_")[0];return h({menu:"".concat(t),menuIcon:"prometheus"})}case"tc":if("tc.qos"===s&&(!("family"in a)||(null===(t=a[l])||void 0===t?void 0:t[e.subMenuId])===e.subMenuId)){const[,t]=n.split(".");a[l]||(a[l]={}),a[l][e.subMenuId]=t.replace(/^(in_|out_)/,"").replace(/(_in|_out)$/,"")}return h({menu:l,priority:e.chartId.match(/.*-ifb$/)?e.priority-1:e.priority});case"dnsmasq":{if(2==c.length&&"dhcp"===c[1])return h({menu:"".concat(l,"_").concat(d)});if(c.length>=2&&"dhcp"===c[1])return h({menuPattern:"".concat(l,"_").concat(d)});const e=c.length>1?l:void 0;return h({menuPattern:e})}case"dns":return h({menu:"dns"});default:{const e=c.length>1?l:void 0;return h({menuPattern:e})}}}({chartId:n,subMenuId:a||"all",priority:r,visible:i,filteredOut:c,hidden:l},{name:s,context:o,submenuNames:t})})((e=>({...e,family:(0,d._3)(e.context,"family",e.family)}))(a),s))}));return[...e].sort(((e,t)=>{var s,a,o,r;return(null===(s=n[e])||void 0===s?void 0:s.priority)-(null===(a=n[t])||void 0===a?void 0:a.priority)||((null===(o=n[e])||void 0===o?void 0:o.key)||"").localeCompare((null===(r=n[t])||void 0===r?void 0:r.key)||"",void 0,{sensitivity:"accent",ignorePunctuation:!0})})).forEach((e=>{const t=n[e];if(!t)return;const s="".concat(t.id,"|").concat(t.subMenuId);r[s]||(r[s]=[]),a[t.id]||(a[t.id]=new Set),o[t.id]||(o[t.id]=[]),r[s].push(t.chartId),a[t.id].add(s),o[t.id].push(t.chartId)})),{chartMenus:n,menuGroups:a,menuGroupChartIds:o,subMenus:r,submenuNames:s}},p=function(e,t){let{os:n,extraKey:s}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},a={};const{menuGroups:o,menuGroupChartIds:i,subMenus:c,submenuNames:l}=u(e,t,a),p={},m=Object.keys(o).reduce(((e,c)=>{const l=i[c],u=a[l[0]],m=l.find((e=>a[e].menuPattern)),g=m?a[m].menuPattern:"",f=[...o[c]],[b,y]=h("mainheads",l,t,{os:n});return Object.assign(p,b),e[c]={level:0,id:c,menuPattern:g,priority:u.priority,headIds:y,subMenuIds:f,subMenuChartIds:l,name:(0,d.e4)(u),icon:(0,d.ro)(u),info:(0,d.yn)(u),link:"".concat((0,r.A)("menu_".concat(c))),size:null!==y&&void 0!==y&&y.length?500:24,flavour:u.flavour,showConfig:!0,extraKey:s},e}),{}),g=Object.keys(c).reduce(((e,o)=>{var i;const u=c[o],g=a[u[0]],{id:f,menuPattern:b}=m[g.id],[y,v]=h("heads",u,t,{os:n});Object.assign(p,y);const _=b||f,w=g.subMenuId in(l[g.part1]||{})?"".concat(g.subMenuId," (").concat(null===(i=l[g.part1])||void 0===i?void 0:i[g.subMenuId],")"):(0,d.KG)(_,g.subMenuId);return e[o]={level:1,id:o,menuGroupId:f,priority:g.priority,chartIds:u,headIds:v,name:w,info:(0,d.Wr)(_,g.subMenuId),link:"".concat((0,r.A)("menu_".concat(g.id,"_submenu_").concat(g.subMenuId))),size:28,showConfig:!0,extraKey:s},e}),{}),f=Object.keys(o),b={},y=f.reduce(((e,s)=>{const a=m[s];return b[a.id]=!0,[...e,{...a,sticky:!0},...m[s].subMenuIds.reduce(((e,a)=>{const o=g[a];return b[o.id]=!0,[...e,{...o,sticky:!0},...g[a].chartIds.map((e=>{const o=t(e);if(o)return p[e]=(0,d.Ay)(o,{menuId:s,subMenuId:a,sectionInfo:g[a].info},{os:n}),{...p[e],level:2,size:365,menuKey:m[s].menuPattern||m[s].id}}))]}),[])]}),[]);return a=null,{stickyIds:b,allElements:y,menuGroupIds:f,menuGroupById:m,subMenuById:g,menuItemAttributesById:p}};var m=n(31438),g=n(55309),f=n(59090),b=n(18713),y=n(8320);const v=[],_=e=>{let{container:t,ids:n,getObject:s,children:o,getMenu:r=p,deps:i=v,...c}=e;const{allElements:l,menuGroupIds:d,menuGroupById:h,subMenuById:u,menuItemAttributesById:_,stickyIds:w}=(0,a.useMemo)((()=>r(n,s,c)),[s,n,...i]);return a.createElement(m.vN,{container:t,menuItemAttributesById:_,getObject:s,allElements:l,stickyIds:w},a.createElement(g.Ss,{ids:d},a.createElement(g.sF,{menuGroupById:h},a.createElement(f.zK,{subMenuById:u},a.createElement(b.Ny,null,a.createElement(y.Cs,null,o))))))},w=e=>a.createElement(o.Flex,(0,s.A)({column:!0,height:"100%",padding:[0,1],overflow:"hidden","data-testid":"dashboard-list"},e)),k=e=>{const t=(0,a.useRef)(),[n,r]=(0,a.useState)();return(0,a.useLayoutEffect)((()=>{r(t.current)}),[]),a.createElement(o.Flex,{ref:t,height:"100%",width:"100%",overflow:{vertical:"auto"}},n&&a.createElement(_,(0,s.A)({},e,{container:n})))}},26688:(e,t,n)=>{n.d(t,{A:()=>a});var s=n(94390);const a=()=>e=>{let{template:t,formData:n={},entityProps:a}=e;return{[s.D9]:!0,template:t,name:"",sourceType:"user",type:"job",formData:n,entityProps:a}}},56489:(e,t,n)=>{n.d(t,{A:()=>C});n(62953);var s=n(96540),a=n(83199),o=n(540),r=n(15327),i=n(74618),c=n(45765),l=n(78969),d=n(3914),h=n(80925),u=n(35304),p=n(69765),m=n(21591),g=n(67544),f=n(68980),b=n(28738),y=n(4967),v=n(8018),_=n(87659),w=n(24198),k=n(91517),T=n(2025),S=n(18202);const M=(0,s.memo)((e=>{let{id:t,spaceId:n,roomId:o,chartId:r,name:i}=e;const[c,,l,u]=(0,_.A)(),{fullyLoaded:m,processing:b}=(0,f.fz)(t),y=(0,T.DH)(t,{onSuccess:()=>{(0,w.jE)("Chart added to ".concat(i)),u()}}),v=c&&m,M=(0,d.ns)(n,"slug"),x=(0,p.wz)(o,"slug"),C=(0,g.nM)(t),P=(0,k.A)(c&&t,{spaceId:n,spaceSlug:M,roomId:o,roomSlug:x}),B=(0,h.e)();(0,s.useEffect)((()=>{if(P||!v)return;const e=B.getNode({id:r}),t=(0,S.A)(e.getAttributes());C(t.contextScope,t,(()=>setTimeout(y,100)))}),[v,P]);const q=c&&(b||P);return s.createElement(a.Button,{icon:"plus",onClick:l,isLoading:q,disabled:q,alignSelf:"end"})})),x=e=>{let{spaceId:t,roomId:n,chartId:o}=e;return[{id:"name",accessorKey:"name",header:"Dashboards",cell:e=>{let{getValue:t}=e;return s.createElement(a.Flex,{alignItems:"center",gap:3},s.createElement(a.Icon,{name:"dashboards",size:"small",color:"text"}),s.createElement(a.Text,null,t()))}},{id:"plusButton",accessorKey:"id",header:"",cell:e=>{let{getValue:a,row:r}=e;return s.createElement(M,{id:a(),spaceId:t,roomId:n,chartId:o,name:r.original.name})}}]},C=e=>{let{onClose:t}=e;(0,u.A)();const n=(0,p.ID)(),_=(0,p.XA)("name"),w=(0,d.vt)(),{params:k={},params:{chartId:T}}=(0,o.A)("addToDashboardModal"),S=(0,m.q)(),M=(0,f.Sf)(S),[C,P]=(0,s.useState)(""),[B,q]=(0,s.useState)(!1),I=C.length>0,L=(0,g.W6)(w,n,{onSuccess:t}),A=(0,h.e)().getNode({id:T}),E=(0,s.useMemo)((()=>x({spaceId:w,roomId:n,chartId:T})),[w,n,T]),D=()=>{if(!A)return;const{aggregationMethod:e,selectedDimensions:t,groupBy:n,groupByLabel:s,groupingMethod:a,chartType:o,selectedLabels:r,nodesScope:i,selectedInstances:c,selectedNodes:l,contextScope:d}=A.getAttributes(),[h]=d;L(C,d,{...(0,y.L)(h),aggregationMethod:e,selectedDimensions:t,groupBy:n,groupByLabel:s,groupingMethod:a,chartType:o,selectedLabels:r,nodesScope:i,selectedInstances:c,selectedNodes:l})},N=(0,m.RQ)();return s.createElement(r.GO,{onClose:t},s.createElement(i.z,{onClose:t,title:"Add to Dashboard"}),s.createElement(c.U,null,"Select Dashboard"),s.createElement(r.Yv,{gap:3,overflow:"hidden",height:"100%"},s.createElement(a.H6,null,"Add chart to one or more dashboards from the ",_),N?s.createElement(a.Table,{dataColumns:E,data:M}):s.createElement(b.A,{title:"Loading dashboards..."}),s.createElement(a.Flex,{column:!0,padding:[3,0],gap:1},s.createElement(a.H6,null,"Create a new dashboard and add ",T," to it"),s.createElement(v.A,{value:C,label:"Name",onChange:e=>P(e.target.value),isValid:B,setIsValid:q,isDirty:I,instantFeedback:"all",onKeyDown:e=>e.keyCode===l.I7&&B&&D}),s.createElement(a.Button,{label:"Create & add",onClick:D,disabled:!A||!C}))))}},667:(e,t,n)=>{n.d(t,{A:()=>r});n(62953);var s=n(96540),a=n(27776),o=n(38819);const r=()=>{const e=(0,o.PP)(),[t,n]=(0,a.XL)(),{reset:r}=(0,a.b8)(),i=(0,s.useCallback)((()=>{r(),(0,o.Z8)({...e,integrationsModalOpen:"true"}),n("true")}),[r,e]);return{isIntegrationsVisible:t,hideIntegrations:(0,s.useCallback)((()=>{(0,o.Z8)({...e,integrationsModalOpen:""}),n("")}),[e]),onIntegrationsClick:i}}},99739:(e,t,n)=>{n.d(t,{S:()=>L,A:()=>A});var s=n(58168),a=(n(62953),n(96540)),o=n(83199),r=n(87659),i=n(69765),c=n(31348),l=n(62329),d=(n(41393),n(81454),n(8711)),h=n(55463),u=n(96083),p=n(18682);const m=(0,d.default)(o.Flex).withConfig({displayName:"adminsList__RowFlex",componentId:"sc-1pb9ob2-0"})(["&:hover{background:",";}"],(0,o.getColor)("elementBackground")),g=(0,d.default)(o.Icon).withConfig({displayName:"adminsList__StyledIcon",componentId:"sc-1pb9ob2-1"})(["cursor:pointer;"]),f=e=>{let{...t}=e;const n=(0,h.i3)();return a.createElement(o.Flex,(0,s.A)({column:!0,border:{side:"top",color:"borderSecondary"},flex:"grow",padding:[1,0],height:{max:50},overflow:"auto"},t),n.map((e=>a.createElement(m,{flex:{grow:0,shrink:0},key:e.id,padding:[0,4],justifyContent:"between",height:10,alignItems:"center"},a.createElement(o.Flex,{key:e.id,gap:2,alignItems:"center"},a.createElement(u.A,{src:e.avatarURL,title:e.name}),a.createElement(o.Text,null,e.name),a.createElement(o.Text,{color:"textLite"},e.email)),a.createElement(g,{onClick:(0,p.C)(e.email,{text:"Admin email address copied to your clipboard."}),size:"small",name:"copy",color:"primary"})))))},b=(0,d.default)(o.Icon).withConfig({displayName:"styled__StyledWarningIcon",componentId:"sc-q96c2w-0"})(["border-radius:50%;overflow:hidden;background:",";"],(0,o.getColor)(["neutral","white"])),y=(0,d.default)(o.Icon).withConfig({displayName:"styled__StyledCheckmarkIcon",componentId:"sc-q96c2w-1"})(["border-radius:50%;overflow:hidden;background:",";padding:6px;width:24px;height:24px;"],(0,o.getColor)("elementBackground")),v=(0,d.default)(o.Icon).withConfig({displayName:"styled__StyledIcon",componentId:"sc-q96c2w-2"})(["transform:",";"],(e=>{let{closed:t}=e;return t?"none":"rotate(180deg)"}));var _=n(4659);const w=e=>{let{gaPrefix:t="onboarding",...n}=e;return a.createElement(o.Flex,(0,s.A)({column:!0,gap:1,color:"textDescription"},n),a.createElement(o.Text,null,"Need help?"),a.createElement(o.Text,null,"Use our"," ",a.createElement(_.A,{href:"https://community.netdata.cloud/",target:"_blank","data-ga":"".concat(t,"::click-forums::allpages")},"forums")," ","or public"," ",a.createElement(_.A,{href:"https://discord.gg/mPZ6WZKKG2",target:"_blank","data-ga":"".concat(t,"::click-discord::allpages")},"discord channel")))};var k=n(9683),T=n(83084),S=n(46741),M=n(68831),x=n(47767),C=n(71847);const{demoSlug:P}=M.A,B=(0,d.default)(o.Button).withConfig({displayName:"headerButtons__StyledButton",componentId:"sc-11mwk9m-0"})(["&&{padding:2px 16px;font-size:12px;height:auto;width:auto;min-width:96px;}"]),q=()=>{const e=(0,x.Zp)();return a.createElement(o.Flex,{gap:4,alignItems:"center"},a.createElement(_.A,{href:"https://learn.netdata.cloud/docs/deployment-guides/deployment-strategies",rel:"noopener noreferrer",target:"_blank"},"Possible Deployment Strategies"),!window.envSettings.onprem&&a.createElement(B,{"data-ga":"no-nodes-view::click-demo::no-nodes-header",flavour:"default",onClick:t=>{e("/spaces/".concat(P)),(0,C.H)("no-nodes-view","click-demo","no-nodes-header"),t.preventDefault()},label:"Live Demo"}))};var I=n(77173);const L=()=>a.createElement(a.Fragment,null,a.createElement(o.Flex,{column:!0,"data-testid":"noNodesView-memberSection",gap:1},a.createElement(o.Flex,{alignItems:"center","data-testid":"noNodesView-memberHeader",gap:4,height:10,padding:[0,3]},a.createElement(y,{"data-testid":"noNodesView-memberHeaderIcon",name:"checkmark_s",color:"textLite"}),a.createElement(o.Flex,{justifyContent:"between",flex:!0},a.createElement(o.H3,{color:"textDescription","data-testid":"noNodesView-memberHeaderText"},"You are a member and only space admins can connect new nodes"),a.createElement(q,null))),a.createElement(o.Flex,{"data-testid":"noNodesView-memberDescription",padding:[0,0,2,13]},a.createElement(o.Text,{color:"textDescription","data-testid":"noNodesView-memberDescriptionText"},"Contact an admin from the space in order to do so"))),a.createElement(o.Flex,{"data-testid":"noNodesView-memberAdminsListSection",padding:[0,0,0,12]},a.createElement(f,{"data-testid":"noNodesView-memberAdminsList"}))),A=e=>{let{videoId:t}=e;const n=(0,i.ID)(),[d,{onAddNodes:h,selectedNodes:u}]=(0,c.A)(n),{claimedNodeIds:p}=d,m=p.length>0,[g,f]=(0,r.A)(!0),_=(0,S.JT)("node:Create");return a.createElement(T.A,{column:!1,"data-testid":"noNodesViewPage",gap:12,padding:[6],margin:[0,0,8],overflow:"auto"},a.createElement(o.Flex,{column:!0,"data-testid":"noNodesView-cmdSection",flex:{grow:0,shrink:1},width:"100%",gap:2},a.createElement(o.Flex,{"data-testid":"noNodesView-warningBanner",alignItems:"center",background:"warningBackground",gap:4,height:{min:10},padding:[0,3],round:!0},a.createElement(b,{"data-testid":"noNodesView-warningBannerIcon",name:"exclamation",color:"warning"}),a.createElement(o.Text,{"data-testid":"noNodesView-warningBannerText"},"You have no nodes.")),_&&a.createElement(a.Fragment,null,a.createElement(o.Flex,{alignItems:"center","data-testid":"noNodesView-adminSection",justifyContent:"between",onClick:m?f:null,cursor:m?"pointer":null},a.createElement(o.Flex,{alignItems:"center","data-testid":"noNodesView-adminHeader",gap:4,height:10,padding:[0,3]},a.createElement(y,{"data-testid":"noNodesView-adminHeaderIcon",name:"checkmark_s",color:"textLite"}),a.createElement(o.H3,{"data-testid":"noNodesView-adminHeaderText",color:"textDescription"},"Connect new nodes")),a.createElement(o.Flex,{gap:4,alignItems:"center"},a.createElement(q,null),m&&a.createElement(v,{closed:!g,"data-testid":"noNodesView-adminClaimedNodesIcon",name:"chevron_down",color:"textLite"}))),a.createElement(o.Collapsible,{"data-testid":"noNodesView-adminClaimNodesContainer",open:g},a.createElement(o.Flex,{"data-testid":"noNodesView-adminClaimNodes",padding:[0,0,0,12]},a.createElement(I.A,{rooms:[n]})))),!_&&a.createElement(L,null),m&&a.createElement(a.Fragment,null,a.createElement(o.Flex,{alignItems:"center","data-testid":"noNodesView-availableNodesHeader",gap:4,height:10,padding:[0,3]},a.createElement(y,{"data-testid":"noNodesView-availableNodesHeaderIcon",name:"checkmark_s",color:"textLite"}),a.createElement(o.H3,{color:"textDescription","data-testid":"noNodesView-availableNodesHeaderText"},"Add already available nodes")),a.createElement(o.Flex,{height:"100%","data-testid":"noNodesView-availableNodesContainer",padding:[0,0,0,12]},a.createElement(l.A,(0,s.A)({"data-testid":"noNodesView-availableNodesList"},d,{onAddNodes:h,canAddNodes:!!u.length,padding:[2,0,8]}))))),t&&a.createElement(o.Flex,{column:!0,"data-testid":"noNodesView-videoSection",flex:{shrink:0,grow:0}},a.createElement(k.A,{"data-testid":"noNodesView-onboardingVideo",height:296,width:520,videoId:t}),a.createElement(w,{"data-testid":"noNodesView-needHelp",padding:[8,0]})))}},78062:(e,t,n)=>{n.d(t,{A:()=>h});var s=n(58168),a=n(96540),o=n(83199),r=n(28146),i=n(69765),c=n(3914),l=n(53285);var d;const h=(d=o.Button,e=>{const t=(0,i.XA)(),n=(0,c.dg)();return a.createElement(l.A,{permission:"node:Create"},(i=>{const c=a.createElement(d,(0,s.A)({disabled:!i},e));return i?c:a.createElement(r.A,{content:n?"You are viewing your local node, connect to cloud and connect multiple nodes to view at once":"Only admin users can add nodes to ".concat(t.name),"data-testid":"addNodeTooltipContainer"},a.createElement(o.Box,null,c))}))})},78940:(e,t,n)=>{n.d(t,{DK:()=>d,Iv:()=>h,SB:()=>l,SX:()=>c,Wu:()=>a,Yo:()=>i,_V:()=>o,jH:()=>r,qt:()=>s,xc:()=>u});const s="silencingRules",a={system:{text:"All users",tooltip:"Rule affects all users"},personal:{text:"Myself",tooltip:"Limit the silencing effect to your account only"}},o="system",r={label:"All rooms",value:""},i={now:{text:"Silence for a specific duration",tooltip:"Rule starts immediately and lasts until the specified duration"},schedule:{text:"Schedule silence",tooltip:"Schedule the rule to start taking effect at some point at the future"}},c="now",l={untilTurnedOff:"Until turned off",oneHour:"1 hour",sixHours:"6 hours",twelveHours:"12 hours",oneDay:"1 day",custom:"Custom"},d={label:"Until turned off",value:"untilTurnedOff"},h={border:{side:"all",color:"inputBorder"},padding:[1,2],margin:[0,0,.5,0],round:!0,justifyContent:"start"},u={ErrInvalidName:"No rule name provided",ErrCodeInvalidStartsAt:"Invalid or no start date provided",ErrCodeInvalidSLastUntil:"Invalid or no end date provided",ErrCodeInvalidScheduleDuration:"Invalid duration selected",ErrCodeInvalidSeverity:"Invalid severity",ErrCodeInvalidIntegration:"Invalid integration",ErrCodeNotOwnRule:"Not permitted to create system rules"}},90535:(e,t,n)=>{n.d(t,{A:()=>f});n(41393),n(81454),n(62953);var s=n(96540),a=n(83199),o=n(47762),r=n(11128);const i=e=>{let{children:t}=e;return s.createElement(a.Flex,{"data-testid":"selected-nodes-container",alignItems:"center",width:"100%"},s.createElement(a.Flex,{alignItems:"center"},s.createElement(a.TextSmall,{whiteSpace:"nowrap","data-testid":"selected-nodes-container-message",color:"primary"},"Selected filters:")),s.createElement(a.Flex,{overflow:"hidden",padding:[1],gap:1,alignItems:"center",flexWrap:!0},t))},c=(0,s.memo)(i);var l=n(4659);const d=e=>{let{onRemove:t}=e;return s.createElement(a.Box,{"data-testid":"remove-button",as:a.Icon,name:"close_circle",onClick:t,cursor:"pointer",width:3,height:3,color:"textLite"})},h=(0,s.forwardRef)(((e,t)=>{let{removeFilter:n,group:o,id:r,value:i,isAnchorDisabled:c,onClick:h}=e;const[u,p]=i.split("|"),m=p?"".concat(u," > ").concat(p):u;return s.createElement(a.Flex,{width:"100%",ref:t,"data-testid":"selected-node-item-".concat(r)},s.createElement(a.Pill,{flavour:"neutral",hollow:!0},s.createElement(a.Flex,{position:"relative",gap:2,alignItems:"center"},h?s.createElement(l.A,{Component:a.TextSmall,disabled:c,onClick:h,cursor:"pointer",color:"text",hoverColor:"primary",visitedColor:"accent",disabledColor:"textLite"},m):s.createElement(a.TextSmall,null,m),s.createElement(d,{onRemove:()=>n({param:o,removedValue:r}),id:r}))))})),u=(0,s.memo)(h),p={selectedNodeIds:"Node",nodeStatuses:"Status",nodeLabels:"Host label",nodeCapabilities:"Capability",nodeVersions:"Version"},m=(0,s.forwardRef)(((e,t)=>{let{group:n,values:o,hasUnion:r,union:i="AND",removeFilter:c,Component:l=u}=e;const d=p[n];return null!==o&&void 0!==o&&o.length?s.createElement(s.Fragment,{key:"filter"},r&&s.createElement(a.TextSmall,{strong:!0},i),s.createElement(a.Pill,{flavour:"neutral",hollow:!0,gap:1,TextComponent:a.Flex,textProps:{alignItems:"center",gap:1},flexWrap:!0},s.createElement(a.TextNano,{strong:!0},d,":"),o.map(((e,t)=>s.createElement(s.Fragment,{key:e},t>0&&s.createElement(a.TextSmall,{strong:!0},"OR"),s.createElement(l,{removeFilter:c,id:e,value:e,group:n})))))):null})),g=(0,s.forwardRef)(((e,t)=>{let{removeFilter:n,value:a}=e;const r=(0,o.xY)(a,"name"),i=(0,o.xY)(a,"isOffline"),c=(0,o.d3)(a);return s.createElement(u,{ref:t,removeFilter:n,group:"selectedNodeIds",id:a,value:r,isAnchorDisabled:i,onClick:c})})),f=(0,s.memo)((e=>{let{flavour:t}=e;const[[n,o=[]],i]=(0,r._e)({extraKey:t,merge:!1});return n.length||o.length?s.createElement(a.Flex,{width:"100%",overflow:"hidden"},s.createElement(c,null,n.map(((e,t)=>{let[n,a]=e;return s.createElement(m,{key:n,group:n,values:a,hasUnion:t>0,removeFilter:i})})),s.createElement(m,{group:"selectedNodeIds",values:o,hasUnion:!!n.length&&!!o.length,union:"OR",removeFilter:i,Component:g}))):s.createElement("div",null)}))},35243:(e,t,n)=>{n.d(t,{G:()=>i,N:()=>r});n(3064),n(98992),n(72577);var s=n(47444),a=n(80925);const o=(0,s.eU)({key:"currentChartsContainerKey",default:null}),r=()=>{const e=(0,a.e)(),t=(0,s.vc)(o);return e&&e.getRoot().getChildren().find((e=>e.match({id:t})))},i=()=>(0,s.lZ)(o)},60177:(e,t,n)=>{n.d(t,{$n:()=>d,GK:()=>c,Ml:()=>o,R8:()=>r,_x:()=>u,jf:()=>i,nr:()=>a,ul:()=>l,ve:()=>h});var s=n(47444);const a=(0,s.Iz)({key:"homePageNodeDetailsId",default:null}),o={loaded:!1,error:!1,nodes:{},replicationFactor:{},totalChildren:0,totalParents:0},r=(0,s.Iz)({key:"replicationStats",default:o}),i={loaded:!1,error:!1,alerts:[],nodes:{critical:0,warning:0,total:0,hosts:{}}},c=(0,s.Iz)({key:"alertsStats",default:i}),l={loaded:!1,error:!1,stability:{}},d=(0,s.Iz)({key:"stability",default:l}),h={loaded:!1,error:!1,dataRetention:[],metricsCollected:0},u=(0,s.Iz)({key:"chartsStats",default:h})},89879:(e,t,n)=>{n.d(t,{A:()=>f});var s=n(58168),a=(n(3064),n(41393),n(98992),n(72577),n(81454),n(62953),n(96540)),o=n(47767),r=n(83199),i=n(31604),c=n(47762),l=n(81048),d=n(35119),h=n(50065),u=n(12352);const p=e=>{var t;const n=null===(t=Object.entries(l.j8).find((t=>{let[,n]=t;return n==e})))||void 0===t?void 0:t[0];return d.$S[n]||"offline"},m=e=>{let{id:t,hostname:n,status:s,flavour:i}=e;const l=(0,o.Zp)(),d=(0,c.Zl)(t),h="sidebar"==i,u=(0,a.useCallback)((()=>l(d)),[d]);return a.createElement(r.Flex,{key:t,gap:2,justifyContent:"between"},a.createElement(r.Flex,{gap:2},a.createElement(r.Icon,{name:"dot",width:"8px",color:p(s)}),a.createElement(r.Text,null,n)),h&&a.createElement(r.Icon,{name:"goToNode",color:"text",onClick:u,cursor:"pointer"}))},g=e=>{let{nodes:t,flavour:n,limit:o=10}=e;return t=t||[],t.length?"sidebar"==n||t.length<=o?t.map((e=>a.createElement(m,(0,s.A)({key:e.id,flavour:n},e)))):a.createElement(r.Flex,{column:!0,gap:2},t.slice(0,o).map((e=>a.createElement(m,(0,s.A)({key:e.id,flavour:n},e)))),a.createElement(r.Text,{color:"primary"},t.length-o," more items")):"-"},f=e=>{let{id:t,flavour:n="tooltip"}=e;const s=(0,i.c8)(),{loaded:o,nodes:c}=s||{},{children:l,parents:d}=(null===c||void 0===c?void 0:c[t])||{};return o?"sidebar"==n?a.createElement(h.A,{name:"Replication"},a.createElement(u.A,{size:"small",name:"Parents"},a.createElement(g,{nodes:d,flavour:n})),a.createElement(u.A,{size:"small",name:"Children"},a.createElement(g,{nodes:l,flavour:n}))):a.createElement(r.Flex,{column:!0,gap:2},a.createElement(r.Flex,{column:!0,gap:1},a.createElement(r.Text,null,"Parents"),a.createElement(g,{nodes:d,flavour:n})),a.createElement(r.Flex,{column:!0,gap:1},a.createElement(r.Text,null,"Children"),a.createElement(g,{nodes:l,flavour:n}))):null}},35119:(e,t,n)=>{n.d(t,{$S:()=>d,AZ:()=>p,Cc:()=>u,Hl:()=>g,Hn:()=>l,Ug:()=>o,XJ:()=>c,jZ:()=>i,nv:()=>r,q5:()=>h,vd:()=>f,w9:()=>s});const s={none:{label:"None",value:"none",canAddNodes:!0},status:{label:"Status",value:"status",canAddNodes:!1},os:{label:"OS",value:"os",canAddNodes:!0},version:{label:"Agent version",value:"version",canAddNodes:!1},technology:{label:"Technology",value:"technology",canAddNodes:!0},stability:{label:"Connection stability",value:"stability",canAddNodes:!1},replication:{label:"Replication factor",value:"replication",canAddNodes:!1},cloudProvider:{label:"Cloud provider",value:"cloudProvider",canAddNodes:!1},cloudRegion:{label:"Cloud region",value:"cloudRegion",canAddNodes:!1},instanceType:{label:"Instance type",value:"instanceType",canAddNodes:!1}},a={r_1:"None",r_2:"Single","r_*":"Multi"},o={replication:a},r=s.none,i=s.status,c=[r,s.status,s.os,s.technology,s.version,s.replication,s.cloudProvider,s.cloudRegion,s.instanceType],l=[i,s.stability,s.replication],d={live:"success",offline:"offline",stale:"stale",unseen:"unseen"},h={r_1:["blue","indigo"],r_2:["purple","lilac"],"r_*":["purple","lilacFocus"]},u={[s.status.value]:d,[s.stability.value]:{stable:"success",unstable:"error"},[s.replication.value]:h},p={[s.os.value]:e=>({group:e.os.nm,type:e.os.id}),[s.version.value]:e=>({group:e.version}),[s.status.value]:e=>({group:e.nodeStatus}),[s.technology.value]:e=>({group:e.technology,type:e.technology}),[s.replication.value]:e=>({group:e.replicationFactor}),[s.cloudProvider.value]:e=>{var t;return{group:null===(t=e.labels)||void 0===t?void 0:t.cloud_provider}},[s.cloudRegion.value]:e=>{var t;return{group:null===(t=e.labels)||void 0===t?void 0:t._cloud_instance_region}},[s.instanceType.value]:e=>{var t;return{group:null===(t=e.labels)||void 0===t?void 0:t._cloud_instance_type}}},m={[s.replication.value]:e=>a[e],default:e=>e},g=e=>{const t=m[e];return t||m.default},f={isHover:!1,nodes:[]}},31604:(e,t,n)=>{n.d(t,{A$:()=>i,N3:()=>p,P9:()=>y,SW:()=>g,c8:()=>l,jg:()=>u,kf:()=>f,s6:()=>d,xs:()=>v});var s=n(47444),a=n(60177),o=n(3914),r=n(69765);const i=()=>{const e=(0,o.vt)(),t=(0,r.ID)();return(0,s.L4)((0,a.nr)({spaceId:e,roomId:t}))},c=(0,s.K0)({key:"replicationStatsState",get:e=>{let{spaceId:t,roomId:n}=e;return()=>(0,a.R8)({spaceId:t,roomId:n})},set:e=>{let{spaceId:t,roomId:n}=e;return(e,s)=>{let{set:o}=e;o((0,a.R8)({spaceId:t,roomId:n}),s)}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),l=()=>{const e=(0,o.vt)(),t=(0,r.ID)();return(0,s.vc)((0,a.R8)({spaceId:e,roomId:t}))},d=()=>{const e=(0,o.vt)(),t=(0,r.ID)();return(0,s.lZ)(c({spaceId:e,roomId:t}))},h=(0,s.K0)({key:"alertsStatsState",get:e=>{let{spaceId:t,roomId:n}=e;return()=>(0,a.GK)({spaceId:t,roomId:n})},set:e=>{let{spaceId:t,roomId:n}=e;return(e,s)=>{let{set:o}=e;o((0,a.GK)({spaceId:t,roomId:n}),s)}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),u=()=>{const e=(0,o.vt)(),t=(0,r.ID)();return(0,s.vc)(h({spaceId:e,roomId:t}))},p=()=>{const e=(0,o.vt)(),t=(0,r.ID)();return(0,s.lZ)(h({spaceId:e,roomId:t}))},m=(0,s.K0)({key:"stabilityState",get:e=>{let{spaceId:t,roomId:n}=e;return()=>(0,a.$n)({spaceId:t,roomId:n})},set:e=>{let{spaceId:t,roomId:n}=e;return(e,s)=>{let{set:o}=e;o((0,a.$n)({spaceId:t,roomId:n}),s)}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),g=()=>{const e=(0,o.vt)(),t=(0,r.ID)();return(0,s.vc)(m({spaceId:e,roomId:t}))},f=()=>{const e=(0,o.vt)(),t=(0,r.ID)();return(0,s.lZ)(m({spaceId:e,roomId:t}))},b=(0,s.K0)({key:"chartsStatsState",get:e=>{let{spaceId:t,roomId:n}=e;return()=>(0,a._x)({spaceId:t,roomId:n})},set:e=>{let{spaceId:t,roomId:n}=e;return(e,s)=>{let{set:o}=e;o((0,a._x)({spaceId:t,roomId:n}),s)}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),y=()=>{const e=(0,o.vt)(),t=(0,r.ID)();return(0,s.vc)(b({spaceId:e,roomId:t}))},v=()=>{const e=(0,o.vt)(),t=(0,r.ID)();return(0,s.lZ)(b({spaceId:e,roomId:t}))}},69388:(e,t,n)=>{n.d(t,{A:()=>u});n(62953);var s=n(96540),a=n(92138),o=n(83957),r=n(45467),i=n(67742),c=n(23931),l=n(27467),d=n(63129),h=n(61658);const u=function(e,t){let{spaceId:n,autorun:u=!1}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const[p,m]=(0,s.useState)(null),{after:g,before:f}=(0,l.rW)(),b=(0,s.useRef)();b.current={after:g<0?g:Math.floor(g/1e3),before:g<0?0:Math.ceil(f/1e3)};const y=(0,s.useRef)(!0),v=e=>{if(e&&null!==e&&void 0!==e&&e.response){const{data:t}=e.response,{errorMsgKey:n}=t;m(n)}else k((e=>({...e,contextsHardHash:"invalid"}))),m(e)},_=(0,s.useMemo)((()=>(0,h.O)(p)?5e3:65e3),[p]),w=(0,s.useRef)(),k=(0,c.DQ)(e,!0),T=e=>{let{results:t,versions:n}=e;k((e=>({...e,contextsHardHash:"invalid",...n}))),m((!t||!Object.keys(t).length)&&"No data for this period")},{contextsHardHash:S}=(0,c.jI)(e),M=(0,a.A)(S,!0);return(0,i.A)((()=>y.current=!0),5e3),(0,r.A)((()=>{k((e=>({...e,contextsHardHash:"invalidating"}))),y.current=!0}),[t]),(0,o.A)((()=>({cache:!1,key:"spaces.".concat(n,".rooms.").concat(e,".charts"),polling:!1,autorun:u||Boolean(n&&e),fetch:()=>(y.current=!1,(0,d.a9)(n,e,t,{...b.current})),association:{getIds:()=>(0,c.Uo)({roomId:e,merge:w.current===t}),getError:()=>(0,c.Zr)({id:e,key:"error"}),getLoaded:()=>(0,c.Zr)({id:e,key:"loaded"}),getUpdatedAt:()=>(0,c.Zr)({id:e,key:"updatedAt"})},getResource:t=>(0,c.YP)({id:e,key:t}),getResourcesInitializer:()=>{const n=(0,c.b0)({id:e,merge:w.current===t,nodeIds:t});return w.current=t,n},getResourceInitialState:e=>({fullyLoaded:!0,...e}),onFail:v,pollingOptions:{pollingInterval:_},onReceive:T,after:g,nodeIds:t,force:!0,skip:!y.current||!M&&!!S||"invalidating"===M&&!!S})),[n,e,_,S,g,f]),p}},35304:(e,t,n)=>{n.d(t,{A:()=>h});n(9920),n(41393),n(98992),n(3949),n(81454);var s=n(96540),a=n(47444),o=n(68980),r=n(99090),i=n(3914),c=n(69765),l=n(63129),d=n(21591);const h=()=>{const e=(0,i.vt)(),t=(0,c.ID)(),n=(0,a.Zs)((e=>{let{set:n}=e;return function(){let{data:{results:e}={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};if(n((0,d.z_)({id:t,key:"loaded"}),!0),null!==e&&void 0!==e&&e.length){const s=e.sort(((e,t)=>e.name.localeCompare(t.name,void 0,{sensitivity:"accent",ignorePunctuation:!0})));s.forEach((e=>n((0,o._d)(e.id),(t=>({...t,loaded:!0,...e})))));const a=s.map((e=>{let{id:t}=e;return t}));n((0,d.z_)({id:t,key:"ids"}),a)}}})),h=(0,s.useCallback)((()=>{(0,l.SJ)(e,t).then(n).catch((()=>{}))}),[e,t]);return(0,r.A)((()=>({polling:!1,enabled:!!e&&!!t,fetch:()=>(0,l.SJ)(e,t),onReceive:n})),[e,t]),h}},18925:(e,t,n)=>{n.d(t,{xS:()=>h,yK:()=>d,z2:()=>u});n(62953);var s=n(96540),a=n(3914),o=n(69765),r=n(71835),i=n(61649),c=n(45894),l=n(44644);const d=function(){let{spaceId:e,roomId:t,onSuccess:n}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const s=(0,a.vt)(),d=(0,o.ID)(),[,h]=(0,r.A)(),u=(0,l.A3)({spaceId:s,roomId:d});return(0,i.A)((function(){let{type:a,entity:o,scope:r="personal",name:i="default",path:l,value:p={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return(0,c.xt)(e||s,{type:a,entity:o,scope:r,name:i,path:l,value:p,roomID:t||d}).then((e=>{let{data:t}=e;u(t),null===n||void 0===n||n(t)})).catch((e=>{var t;h((null===e||void 0===e||null===(t=e.response)||void 0===t?void 0:t.data)||(null===e||void 0===e?void 0:e.message)||e)}))}),[e,t,s,d])},h=function(e){let{spaceId:t,roomId:n,onSuccess:s}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const d=(0,l.ti)(e),h=(0,a.vt)(),u=(0,o.ID)(),[,p]=(0,r.A)(),m=(0,l.Vd)({spaceId:h,roomId:u});return(0,i.A)((function(){let{type:a,entity:o,scope:r,name:i,path:l,value:g}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return(0,c.cD)(t||h,e,{type:a||d.type,entity:o||d.entity,scope:r||d.scope,name:i||d.name,path:l||d.path,value:g||d.value,version:d.version,roomID:n||u}).then((e=>{let{data:t}=e;m({...t,prevSetting:d}),null===s||void 0===s||s(t)})).catch((e=>{var t;p((null===e||void 0===e||null===(t=e.response)||void 0===t?void 0:t.data)||(null===e||void 0===e?void 0:e.message)||e)}))}),[t,n,h,u,d])},u=function(e){let{spaceId:t,onSuccess:n}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const o=(0,a.vt)(),[,i]=(0,r.A)(),d=(0,l.G4)(e,{spaceId:o});return(0,s.useCallback)((async()=>{try{const s=await(0,c.A8)(t||o,e);d(),null===n||void 0===n||n(s)}catch(a){var s;i((null===a||void 0===a||null===(s=a.response)||void 0===s?void 0:s.data)||(null===a||void 0===a?void 0:a.message)||a)}}),[])}},45894:(e,t,n)=>{n.d(t,{A8:()=>c,Uc:()=>o,cD:()=>i,xt:()=>r});var s=n(26655),a=n(49286);const o=function(e,t){let{types:n=[],entities:o=[],paths:r=[],names:i=[]}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};return s.A.post("/api/v3/spaces/".concat(e,"/rooms/").concat(t,"/settings/search"),{types:n,entities:o,paths:r,names:i},{transform:e=>(0,a.bn)(e,{depth:0})})},r=(e,t)=>s.A.post("/api/v3/spaces/".concat(e,"/settings"),t,{transform:a.bn}),i=(e,t,n)=>s.A.put("/api/v3/spaces/".concat(e,"/settings/").concat(t),n,{transform:a.bn}),c=(e,t)=>s.A.delete("/api/v3/spaces/".concat(e,"/settings/").concat(t),{transform:a.bn})},15041:(e,t,n)=>{n.d(t,{CB:()=>o,FU:()=>r,gc:()=>i});var s=n(47444),a=n(38819);const o=(0,s.Iz)({key:"settingAtom",default:{}}),r=(0,s.Iz)({key:"settingAtom",default:[]}),i=(0,s.Iz)({key:"settingSelectionAtom",default:e=>{let{spaceId:t,roomId:n,type:s,entity:o}=e;const r=(0,a.W6)([t,n,s,o,"setting"].join("-"));return"undefined"!==r&&"null"!==r&&r?r:null},effects:e=>{let{spaceId:t,roomId:n,type:s,entity:o}=e;return[e=>{let{onSet:r}=e;r(((e,r)=>{if(e!==r){const r=[t,n,s,o,"setting"].join("-");if(!e)return void(0,a.Pg)([r]);(0,a.Z8)({[r]:e})}}))}]}})},40933:(e,t,n)=>{n.d(t,{_:()=>d});n(17333),n(41393),n(14905),n(8159),n(98992),n(54520),n(81454),n(8872),n(37550),n(62953);var s=n(47444),a=n(3914),o=n(69765),r=n(15041);const i={name:(e,t)=>{const n="string"===typeof t?t.toLowerCase():"";return Object.entries(e).some((e=>{let[t,s]=e;return null!==s&&"undefined"!==typeof s&&(Array.isArray(s)?s.some((e=>String(e).toLowerCase().includes(n))):("object"===typeof s&&Object.keys(s).some((e=>String(s[e]).toLowerCase().includes(n))),String(s).toLowerCase().includes(n)))}))}},c=e=>t=>!e.some((e=>{let[n,s]=e;return i[n]?!i[n](t,s):!(e=>(t,n)=>{if(!Array.isArray(n)||!n.length)return!0;let s=t[e];return"number"===typeof s?(s=parseFloat(s),n.some((e=>parseFloat(e)===s))):n.includes(s)})(n)(t,s)})),l=(0,s.K0)({key:"settingsFiltered",get:e=>{let{spaceId:t,roomId:n,omit:s,keepAll:a,params:o}=e;return e=>{let{get:i}=e;const l=i((0,r.FU)({spaceId:t,roomId:n}));if(!l)return l||[];const d=s?s.split(":::"):[],h=Object.keys(o).reduce(((e,t)=>(d.includes(t)||e.push([t,o[t]]),e)),[]);if(!h.length)return l;const u=c(h);return a?l.map((e=>u(e)?e:{...e,hidden:!0})):l.filter((e=>u(e)))}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),d=function(){let{spaceId:e,roomId:t,omit:n,keepAll:r=!1,params:i={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const c=(0,a.vt)(),d=(0,o.ID)();return(0,s.vc)(l({spaceId:e||c,roomId:t||d,omit:n,keepAll:r,params:i}))}},44644:(e,t,n)=>{n.d(t,{A3:()=>g,CS:()=>w,G4:()=>v,Mg:()=>p,Vd:()=>b,WY:()=>T,ti:()=>u});n(17333),n(9920),n(14905),n(98992),n(54520),n(3949),n(8872);var s=n(96540),a=n(47444),o=n(21283),r=n.n(o),i=n(65570),c=n(3914),l=n(69765),d=n(15041);const h=(0,a.K0)({key:"settingSelector",get:e=>t=>{let{get:n}=t;return n((0,d.CB)(e))},set:e=>(t,n)=>{let{set:s}=t;s((0,d.CB)(e),(e=>({...e,...n})))}}),u=e=>(0,a.vc)(h(e)),p=(0,a.K0)({key:"settingsInitialize",get:()=>()=>null,set:e=>{let{spaceId:t,roomId:n}=e;return(e,s)=>{let{get:a,set:o}=e;o((0,d.FU)({spaceId:t,roomId:n}),(e=>r()(e,s,((e,t)=>(0,i.Ay)(e,t,{keep:["scope","type","entity","path","name"]}))))),s.forEach((e=>{o(h(e.id),e)}))}}}),m=(0,a.K0)({key:"settingCreate",get:()=>()=>null,set:e=>{let{spaceId:t,roomId:n}=e;return(e,s)=>{let{set:a}=e;a((0,d.FU)({spaceId:t,roomId:n}),(e=>r()(e,[s],((e,t)=>(0,i.Ay)(e,t,{keep:["scope","type","entity","path","name"]}))))),a(h(s.id),s),a((0,d.gc)({spaceId:t,roomId:n,type:s.type,entity:s.entity}),s.id)}}}),g=function(){let{spaceId:e,roomId:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,c.vt)(),o=(0,l.ID)(),r=(0,a.lZ)(m({spaceId:e||n,roomId:t||o}));return(0,s.useCallback)((e=>r(e)))},f=(0,a.K0)({key:"settingUpdate",get:()=>()=>null,set:e=>{let{spaceId:t,roomId:n}=e;return(e,s)=>{let{set:a}=e;const{prevSetting:o,...r}=s;a((0,d.FU)({spaceId:t,roomId:n}),(e=>e.reduce(((e,t)=>(t.scope===o.scope&&t.type===o.type&&t.entity===o.entity&&t.path===o.path&&t.name===o.name?e.push({...t,...r}):e.push(t),e)),[]))),a(h(r.id),r),a((0,d.gc)({spaceId:t,roomId:n,type:r.type,entity:r.entity}),r.id)}}}),b=function(){let{spaceId:e,roomId:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,c.vt)(),o=(0,l.ID)(),r=(0,a.lZ)(f({spaceId:e||n,roomId:t||o}));return(0,s.useCallback)((e=>r(e)))},y=(0,a.K0)({key:"settingRemove",get:()=>()=>null,set:e=>{let{spaceId:t,roomId:n}=e;return(e,s)=>{var a;let{get:o,set:r}=e;r((0,d.FU)({spaceId:t,roomId:n}),(e=>e.filter((e=>e.id!==s.id)))),o((0,d.gc)({spaceId:t,roomId:n,type:s.type,entity:s.entity}))===s.id&&r((0,d.gc)({spaceId:t,roomId:n,type:s.type,entity:s.entity}),(null===(a=o((0,d.FU)({spaceId:t,roomId:n}))[0])||void 0===a?void 0:a.id)||null)}}}),v=function(e){let{spaceId:t,roomId:n}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const o=u(e),r=(0,c.vt)(),i=(0,l.ID)(),d=(0,a.lZ)(y({spaceId:t||r,roomId:n||i}));return(0,s.useCallback)((()=>d(o)))},_={},w=e=>{var t;let{spaceId:n,roomId:s,type:o,entity:r}=e;const i=(0,c.vt)(),d=(0,l.ID)(),h=(0,a.vc)(k({spaceId:n||i,roomId:s||d,type:o,entity:r}));return(null===(t=u(h))||void 0===t?void 0:t.value)||_},k=(0,a.K0)({key:"settingSelectionSelector",get:e=>{let{spaceId:t,roomId:n,type:s,entity:a}=e;return e=>{let{get:o}=e;return o((0,d.gc)({spaceId:t,roomId:n,type:s,entity:a}))}},set:e=>{let{spaceId:t,roomId:n,type:s,entity:a}=e;return(e,o)=>{let{set:r}=e;r((0,d.gc)({spaceId:t,roomId:n,type:s,entity:a}),(e=>e===o?null:o))}}}),T=e=>{let{spaceId:t,roomId:n,type:s,entity:o}=e;const r=(0,c.vt)(),i=(0,l.ID)();return(0,a.L4)(k({spaceId:t||r,roomId:n||i,type:s,entity:o}))}},9415:(e,t,n)=>{n.d(t,{Uc:()=>l,X3:()=>i,XF:()=>c,cH:()=>r});n(41393),n(81454);var s=n(26655),a=n(49286);const o=e=>e.map((e=>(0,a.bn)(e))),r=e=>s.A.get("/api/v2/spaces/".concat(e,"/notifications/silencing/rules"),{transform:o}),i=(e,t)=>s.A.post("/api/v2/spaces/".concat(e,"/notifications/silencing/rules/delete"),t),c=(e,t)=>s.A.post("/api/v2/spaces/".concat(e,"/notifications/silencing/rule"),(0,a.Jz)(t)),l=(e,t)=>s.A.put("/api/v2/spaces/".concat(e,"/notifications/silencing/rule/").concat(t.id),(0,a.Jz)(t))},37171:(e,t,n)=>{n.d(t,{q:()=>p,z:()=>m});n(17333),n(41393),n(14905),n(8159),n(98992),n(54520),n(81454),n(8872),n(37550),n(62953);var s=n(47444),a=n(3914),o=n(27467),r=n(36021),i=n(88982),c=n(78940);const l=e=>(t,n)=>{const s=t[e];return!Array.isArray(n)||!n.length||(Array.isArray(s)?n.some((e=>s.includes(e))):n.includes(s))},d={ids:l("id"),roomIds:l("roomIds"),nodeIds:l("nodeIds"),hostLabels:(h="hostLabels",(e,t)=>{const n=e[h];if(!n)return!t.length;const s=Object.entries(n).map((e=>{let[t,n]=e;return"".concat(t,": ").concat(n)}));return l(h)({...e,[h]:s},t)}),alertNames:l("alertNames"),alertRoles:l("alertRoles"),alertContexts:l("alertContexts")};var h;const u=(0,s.K0)({key:"silencingRulesFiltered",get:e=>{let{extraKey:t,spaceId:n,omit:s=[],keepAll:a,allNodesRoomId:i}=e;return e=>{let{get:c}=e;const l=c((0,o.GA)({key:n,extraKey:t})),h=Object.keys(l).reduce(((e,t)=>(s.includes(t)||e.push([t,l[t]]),e)),[]),u=c((0,r.Pu)({id:n,allNodesRoomId:i})),{rules:p,...m}=u;if(!h.length)return u;const g=(e=>t=>!e.some((e=>{let[n,s]=e;return!!d[n]&&!d[n](t,s)})))(h);return a?u:{...m,rules:p.filter(g)}}}}),p=function(){let{extraKey:e,omit:t=[],keepAll:n=!1}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const o=(0,a.vt)(),r=(0,i.n)("id");return(0,s.vc)(u({extraKey:e,spaceId:o,omit:t,keepAll:n,allNodesRoomId:r}))},m=()=>{const e=(0,a.vt)();return(0,o.rI)("ids",{key:e,extraKey:c.qt,flavour:"arr"})}},36021:(e,t,n)=>{n.d(t,{FU:()=>b,Lz:()=>g,Pu:()=>d,Qu:()=>m,UJ:()=>u,_B:()=>f,_S:()=>y,ys:()=>h});n(3064),n(41393),n(98992),n(72577),n(81454);var s=n(47444),a=n(3914),o=n(69765),r=n(9415),i=n(89821),c=n(67990),l=n(88982);const d=(0,s.K0)({key:"silencingRulesState",get:e=>{let{id:t,key:n,allNodesRoomId:s}=e;return e=>{let{get:a}=e;const o=a((0,i.A)(t)),{rules:r,...l}=o,d=a((0,c.$6)({id:s,key:"nodes"})),h={...l,rules:r.map((e=>({...e,...e.nodeIds?{nodes:e.nodeIds.map((e=>{const t=d.find((t=>t.value==e));return{id:e,name:(null===t||void 0===t?void 0:t.label)||"-",notAvailable:!t}}))}:{}})))};return n?h[n]:h}},set:e=>{let{id:t,key:n}=e;return(e,s)=>{let{get:a,set:r}=e;const{rules:c}=s;r((0,i.A)(t),(e=>{if(c){const e=c.map((e=>{let{roomIds:t,nodeIds:n,...s}=e;const r=t?t.map((e=>{const{id:t,name:n}=a((0,o.LS)({id:e}));return{id:t,name:n}})):void 0;return{...s,...t?{roomIds:t}:{},...r?{rooms:r}:{},...n?{nodeIds:n}:{}}}));s.rules=e}return n?{...e,[n]:s}:{...e,...s}}))}}}),h=e=>{let{id:t,key:n}=e;const a=(0,l.n)("id");return(0,s.vc)(d({id:t,key:n,allNodesRoomId:a}))},u=(e,t)=>(0,s.lZ)(d({id:e,key:t})),p=(0,s.eU)({key:"forceUpdateRulesSelector",default:0}),m=()=>(0,s.vc)(p),g=()=>{const e=(0,a.vt)(),t=u(e),n=(0,s.lZ)(p);return()=>{t((e=>({...e,initialLoad:!1}))),n((e=>e+1))}},f=()=>{const e=(0,a.vt)();return t=>(0,r.XF)(e,t)},b=()=>{const e=(0,a.vt)();return t=>(0,r.Uc)(e,t)},y=()=>{const e=(0,a.vt)();return t=>(0,r.X3)(e,t)}},89821:(e,t,n)=>{n.d(t,{$:()=>a,A:()=>o});var s=n(47444);const a={rules:[],error:"",id:null,loading:!0,loaded:!1,initialLoad:!1},o=(0,s.Iz)({key:"silencingRulesState",default:e=>(e=>({...a,id:e}))(e)})},66294:(e,t,n)=>{n.d(t,{$:()=>d,A:()=>h});var s=n(47444),a=n(83957),o=n(54702),r=n(70716),i=n(55463),c=n(46902),l=n(46741);const d=e=>"spaces.".concat(e,".members"),h=e=>{const t=(0,s.Zs)((t=>{let{set:n}=t;return t=>{let{results:s}=t;return n((0,i.jX)(e),s)}})),n=(0,l.JT)("user:ReadAll");(0,a.A)((()=>({key:d(e),autorun:!!e&&n,fetch:()=>(0,o.kE)(e),association:{getError:()=>(0,c.x2)({id:e,key:"error"}),getIds:()=>(0,i.kd)({id:e,key:"ids"}),getLoaded:()=>(0,i.kd)({id:e,key:"loaded"}),getUpdatedAt:()=>(0,i.kd)({id:e,key:"updatedAt"})},sort:(e,t)=>e.name.localeCompare(t.name,void 0,{sensitivity:"accent",ignorePunctuation:!0}),getResource:e=>(0,r.m)({id:e}),getResourcesInitializer:()=>r.WJ,onReceive:t,pollingOptions:{pollingInterval:315e3}})),[e])}},3705:(e,t,n)=>{n.d(t,{A:()=>o});n(62953);var s=n(96540);const a={x:0,y:0,width:0,height:0,top:0,left:0,bottom:0,right:0},o=()=>{const[e,t]=(0,s.useState)(null),[n,o]=(0,s.useState)(a),r=(0,s.useMemo)((()=>new window.ResizeObserver((e=>{if(e[0]){const{x:t,y:n,width:s,height:a,top:r,left:i,bottom:c,right:l}=e[0].contentRect;o({x:t,y:n,width:s,height:a,top:r,left:i,bottom:c,right:l})}}))),[]);return(0,s.useLayoutEffect)((()=>{if(e)return r.observe(e),()=>{r.disconnect()}}),[e]),[t,n]}},99090:(e,t,n)=>{n.d(t,{A:()=>d});n(9391),n(62953);var s=n(96540),a=n(63950),o=n.n(a),r=n(80925),i=n(68831),c=n(18300),l=n(22292);const d=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];const n=(0,r.e)(),a=(0,s.useMemo)((()=>n?n.getRoot():null),[n]),d=(0,s.useMemo)(e,t),h=(0,s.useRef)(!0),u=(0,s.useRef)({timeoutId:null,promise:null}),[,p]=(0,c.A)(),m=(0,s.useCallback)((e=>{h.current=!1;const{fetch:t,onStart:n=o(),onFail:s=o(),onPollingFail:r=o(),onReceive:c=o(),onSettle:l=o(),onBlurMode:m=o(),polling:g=!0,pollingOptions:f}=d;n(e),u.current={timeoutId:null,promise:null};const b=()=>(u.current.promise=t(e),u.current.promise.then(c).catch((e=>{if(!e.isCancel)throw e})),u.current.promise),y=()=>{u.current.timeoutId=setTimeout((()=>{if(null!==f&&void 0!==f&&f.shouldPause&&a&&(a.getAttribute("hovering")||a.getAttribute("paused"))||!p.current&&(!a||!a.getAttribute("autofetchOnWindowBlur")))return m(),y();b().then((()=>!h.current&&g&&y())).catch((e=>{e.isCancel||r(e),!h.current&&!e.isCancel&&g&&y()}))}),(null===f||void 0===f?void 0:f.pollingInterval)||i.A.pollingInterval)};return b().then((()=>!h.current&&g&&y())).catch((e=>{e.isCancel||s(e),h.current||!g||e.isCancel||y()})).finally(l),()=>{var e,t;h.current=!0,null===(e=u.current.promise)||void 0===e||null===(t=e.cancel)||void 0===t||t.call(e),clearTimeout(u.current.timeoutId)}}),t),g=(0,l.uW)("isAnonymous");return(0,s.useEffect)((()=>{const{enabled:e=!0,force:t=!1,skip:n=!1}=d;if((!g||t)&&!n)return e?m():void 0}),[g,m]),m.clearRef=u,m}},76777:(e,t,n)=>{n.d(t,{A:()=>c});n(62953);var s=n(96540),a=n(61427),o=n(82543),r=n(81416),i=n(27467);const c=()=>{const e=(0,o.g)(),t=(0,a.Ip)({nodeIds:e,flavour:"rhs"}),[n,c]=(0,r.yD)(),{after:l,before:d}=(0,i.rW)();(0,s.useEffect)((()=>{t()}),[n,c,l,d])}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/3968.483ca2ad3b300293e655.chunk.js b/src/web/gui/v2/3968.483ca2ad3b300293e655.chunk.js deleted file mode 100644 index de1e75be9..000000000 --- a/src/web/gui/v2/3968.483ca2ad3b300293e655.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="5d403afd-e5b2-4fbe-974e-d4995af6873e",e._sentryDebugIdIdentifier="sentry-dbid-5d403afd-e5b2-4fbe-974e-d4995af6873e")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[3968],{18686:(e,t,a)=>{a.d(t,{A:()=>r});var l=a(96540),n=a(83199),o=a(47731);const r=e=>{let{children:t}=e;return(0,o.J)()?l.createElement(n.Layer,{full:!0},l.createElement(n.Flex,{width:"100%",background:"mainBackground","data-testid":"alertView-mobileContainer"},t)):t}},41122:(e,t,a)=>{a.d(t,{A:()=>N});var l=a(96540),n=a(83199),o=a(64118),r=a(28738),d=a(47731),i=a(69765),c=a(11164),s=a(43407),m=a(5871),u=a(52768),g=a(47767),f=a(27467),p=a(47762),E=a(8711);const b=(0,E.default)(n.Flex).attrs({alignItems:"center"}).withConfig({displayName:"styled__StyledButtonContainer",componentId:"sc-1glv09p-0"})(["position:sticky;bottom:0;"]);var h=a(38966);const w=(0,a(92155).A)(n.Button),I=e=>{let{disabled:t,nodeId:a,alertId:o,context:r,lastStatusChange:d,onClose:i,isLoading:c,small:s=!1,testid:m="alertDetailsModal"}=e;const u=(0,g.Zp)(),E=(0,p.Zl)(a),I=(0,l.useCallback)((()=>{i&&i(),u(E,o?{state:{alertId:o}}:{state:{contextToGo:r}})}),[E,o]),y=(0,f.rI)(),x=(0,l.useCallback)((()=>{const e=1e3*d;y({highlight:{after:e-6e4,before:e},correlation:!0}),I()}),[d,r,I,o]);return l.createElement(b,{justifyContent:"end",gap:2},l.createElement(n.Flex,{gap:2,justifyContent:"end"},l.createElement(w,{small:s,label:"Run correlations",onClick:x,flavour:"hollow",isLoading:c,width:s?"112px":"170px","data-testid":"".concat(m,"-runCorrelations-button"),"data-ga":"alert-modal::click-run-correlations::alerts-view",payload:{action:"Run correlations",alertId:o,nodeId:a}}),l.createElement(w,{small:s,label:"Go to chart",onClick:I,isLoading:c,disabled:c||t,width:s?"112px":"150px","data-testid":"".concat(m,"-goToNode-button"),"data-ga":"alert-modal::click-goto-chart::alerts-view",payload:{action:"Go to chart",alertId:o,nodeId:a}}),l.createElement(h.A,{alertId:o,nodeId:a,isLoading:c,small:s,"data-testid":"".concat(m,"-edit-alert-button")})))},y=(0,l.memo)(I);var x=a(7660),C=a(40267);const v=e=>{let{alertId:t,context:a,name:o,nodeId:r,status:i,lastStatusChange:c,fullyLoaded:s,isWebview:m}=e;const u=(0,d.J)(),g=u?n.H4:n.H0;return l.createElement(n.Flex,{column:!0,gap:4},l.createElement(n.Flex,{justifyContent:"between"},l.createElement(n.Flex,{alignItems:"center",gap:2},l.createElement(C.A,{margin:u?null:[.5,0,0],flavour:i,"data-testid":"alertView-statusPill"},i),l.createElement(g,{"data-testid":"alertView-alertName"},o)),!1),l.createElement(n.Flex,{justifyContent:"between",alignItems:"center"},l.createElement(x.A,{alertId:t}),!m&&s&&!u&&l.createElement(y,{lastStatusChange:c,alertId:t,context:a,name:o,nodeId:r,small:!0,testid:"alertView"})))};var A=a(68831),k=a(63314);const L=E.default.img.withConfig({displayName:"sc-404__Illustration",componentId:"sc-4w81fg-0"})(["height:35%;width:35%;"]),S=E.default.div.withConfig({displayName:"sc-404__ButtonContainer",componentId:"sc-4w81fg-1"})(["margin:",";"],(0,n.getSizeBy)(4)),_=()=>{const e="".concat(A.A.assetsBaseURL,"/img/no-nodes-room.svg");return l.createElement(k.DL,null,l.createElement(n.Flex,{column:!0,alignItems:"center",justifyItems:"center",justifyContent:"center",height:"100%",width:"100%",padding:[0,0,"10%"]},l.createElement(L,{src:e,alt:"Unreachable alert",title:"Unreachable alert"}),l.createElement(n.H3,null,"We couldn't find the alert"),l.createElement(n.Text,null,"This can be a temporary problem of that specific alert."),l.createElement(S,null,l.createElement(n.Button,{label:"Retry",icon:"reload"}))))};var V=a(4974),F=a(73865),B=a(85686);const R=e=>{let{children:t}=e;return l.createElement(n.Flex,{background:"modalHeaderBackground",height:12,flex:!1,gap:4,padding:[0,2,0,4],alignItems:"center"},l.createElement(n.Icon,{name:"logo_s",color:"success",width:"23px"}),t)},N=e=>{let{alertId:t,spaceId:a,roomId:g,isWebview:f,nodeName:p}=e;const E=(0,i.XA)("name"),{isNodeRestricted:b}=(0,F.A)(),{fullyLoaded:h=!1,fullyLoading:w=!0,info:I,units:y,lastStatusChangeValue:x,lastStatusChange:C,context:A,instance:L,name:S,nodeId:N,status:T,lastUpdated:D,value:J}=(0,o.JL)(t);(0,o.yk)(t,{spaceId:a,roomId:g});const j=(0,u.J4)(J,y),H=(0,u.J4)(x,y),U=(0,d.J)();return N&&b(N)?l.createElement(k.Ay,{feature:"AlertDetailsViewRestricted"},l.createElement(B.A,null)):l.createElement(k.Ay,{feature:"AlertDetailsView"},l.createElement(n.Flex,{column:!0,width:U?"100%":{max:280},padding:U?null:[0,0,10],background:U?"modalBackground":null},!f&&U&&l.createElement(R,null,l.createElement(n.Flex,{column:!0},l.createElement(n.H6,{color:"textLite"},"ROOM"),l.createElement(n.Text,{"data-testid":"alertView-mobile-roomName"},E))),l.createElement(n.Flex,{column:!0,padding:U?[3]:[0],overflow:U?"auto":"visible",gap:3},l.createElement(v,{alertId:t,context:A,status:T,name:S,nodeId:N,lastStatusChange:C,fullyLoaded:h,isWebview:f}),h?null:w?l.createElement(r.A,{title:"Loading alert..."}):l.createElement(_,null),h&&I&&l.createElement(l.Fragment,null,l.createElement(s.A,{iconName:"documentation"},"Alert Description"),l.createElement(n.Text,{"data-testid":"alertView-info"},I),l.createElement(V.A,{alertId:t})),h&&l.createElement(c.A,{id:t,context:A,instance:L,formattedLastValue:j,formattedLastStatusChangeValue:H,lastStatusChange:C,lastUpdated:D,isFormattedValueLoaded:h,nodeId:N,status:T,testid:"alertView",spaceId:a,roomId:g}),h&&l.createElement(m.A,{id:t,nodeName:p,testid:"alertView"})),U&&l.createElement(n.Box,{position:"sticky",padding:[4],background:"modalBackground",bottom:0,border:{side:"top",color:"border"}},l.createElement(n.TextSmall,null,"In order to ",l.createElement(n.TextSmall,{strong:!0},"Run Correlations")," or"," ",l.createElement(n.TextSmall,{strong:!0},"View the Chart")," you will have to visit this alert from its' dedicated page on a desktop device."))))}},73968:(e,t,a)=>{a.r(t),a.d(t,{default:()=>g});var l=a(96540),n=a(54621),o=a(64118),r=a(83084),d=a(28738),i=a(47767),c=a(45588),s=a(86663);var m=a(41122),u=a(18686);const g=()=>{const e=(()=>{const{alertId:e,...t}=(0,i.g)(),{search:a}=(0,i.zy)(),{transition:n}=s.parse(a),o=(0,l.useRef)(n||e),r=(0,i.Zp)();return(0,l.useEffect)((()=>{n&&n!==e&&r((0,c.tW)("/spaces/:spaceSlug/rooms/:roomSlug/alerts/".concat(n),t),{replace:!0})}),[e,n]),o.current})(),{name:t="unknown alert",fullyLoaded:a=!1}=(0,o.JL)(e),g=(0,o.x)();return(0,n.ZB)({title:t,id:e,destination:e,params:location.hash,type:"alerts",isReady:a}),g?l.createElement(r.A,{overflow:{vertical:"auto"},margin:[0,0,8],padding:[3]},g&&l.createElement(u.A,null,l.createElement(m.A,{alertId:e}))):l.createElement(d.A,{title:"Loading alert..."})}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/3D_PARTY_LICENSES.txt b/src/web/gui/v2/3D_PARTY_LICENSES.txt deleted file mode 100644 index 80da910a8..000000000 --- a/src/web/gui/v2/3D_PARTY_LICENSES.txt +++ /dev/null @@ -1,7457 +0,0 @@ -@babel/runtime -MIT -MIT License - -Copyright (c) 2014-present Sebastian McKenzie and other contributors - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -@dnd-kit/accessibility -MIT -MIT License - -Copyright (c) 2021, Claudéric Demers - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@dnd-kit/core -MIT -MIT License - -Copyright (c) 2021, Claudéric Demers - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@dnd-kit/sortable -MIT -MIT License - -Copyright (c) 2021, Claudéric Demers - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@dnd-kit/utilities -MIT -MIT License - -Copyright (c) 2021, Claudéric Demers - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@elastic/react-search-ui -Apache-2.0 - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2019 Elasticsearch B.V. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -@elastic/search-ui -Apache-2.0 - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2019 Elasticsearch B.V. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -@elastic/search-ui-site-search-connector -Apache-2.0 - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2019 Elasticsearch B.V. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -@emotion/cache -MIT -MIT License - -Copyright (c) Emotion team and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@emotion/hash -MIT -MIT License - -Copyright (c) Emotion team and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@emotion/is-prop-valid -MIT -MIT License - -Copyright (c) Emotion team and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@emotion/memoize -MIT -MIT License - -Copyright (c) Emotion team and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@emotion/react -MIT -MIT License - -Copyright (c) Emotion team and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@emotion/serialize -MIT -MIT License - -Copyright (c) Emotion team and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@emotion/sheet -MIT -MIT License - -Copyright (c) Emotion team and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@emotion/stylis -MIT -MIT License - -Copyright (c) Emotion team and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@emotion/unitless -MIT -MIT License - -Copyright (c) Emotion team and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@emotion/use-insertion-effect-with-fallbacks -MIT -MIT License - -Copyright (c) Emotion team and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@emotion/utils -MIT -MIT License - -Copyright (c) Emotion team and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@fortawesome/free-brands-svg-icons -(CC-BY-4.0 AND MIT) -Fonticons, Inc. (https://fontawesome.com) - --------------------------------------------------------------------------------- - -Font Awesome Free License - -Font Awesome Free is free, open source, and GPL friendly. You can use it for -commercial projects, open source projects, or really almost whatever you want. -Full Font Awesome Free license: https://fontawesome.com/license/free. - --------------------------------------------------------------------------------- - -# Icons: CC BY 4.0 License (https://creativecommons.org/licenses/by/4.0/) - -The Font Awesome Free download is licensed under a Creative Commons -Attribution 4.0 International License and applies to all icons packaged -as SVG and JS file types. - --------------------------------------------------------------------------------- - -# Fonts: SIL OFL 1.1 License - -In the Font Awesome Free download, the SIL OFL license applies to all icons -packaged as web and desktop font files. - -Copyright (c) 2024 Fonticons, Inc. (https://fontawesome.com) -with Reserved Font Name: "Font Awesome". - -This Font Software is licensed under the SIL Open Font License, Version 1.1. -This license is copied below, and is also available with a FAQ at: -http://scripts.sil.org/OFL - -SIL OPEN FONT LICENSE -Version 1.1 - 26 February 2007 - -PREAMBLE -The goals of the Open Font License (OFL) are to stimulate worldwide -development of collaborative font projects, to support the font creation -efforts of academic and linguistic communities, and to provide a free and -open framework in which fonts may be shared and improved in partnership -with others. - -The OFL allows the licensed fonts to be used, studied, modified and -redistributed freely as long as they are not sold by themselves. The -fonts, including any derivative works, can be bundled, embedded, -redistributed and/or sold with any software provided that any reserved -names are not used by derivative works. The fonts and derivatives, -however, cannot be released under any other type of license. The -requirement for fonts to remain under this license does not apply -to any document created using the fonts or their derivatives. - -DEFINITIONS -"Font Software" refers to the set of files released by the Copyright -Holder(s) under this license and clearly marked as such. This may -include source files, build scripts and documentation. - -"Reserved Font Name" refers to any names specified as such after the -copyright statement(s). - -"Original Version" refers to the collection of Font Software components as -distributed by the Copyright Holder(s). - -"Modified Version" refers to any derivative made by adding to, deleting, -or substituting — in part or in whole — any of the components of the -Original Version, by changing formats or by porting the Font Software to a -new environment. - -"Author" refers to any designer, engineer, programmer, technical -writer or other person who contributed to the Font Software. - -PERMISSION & CONDITIONS -Permission is hereby granted, free of charge, to any person obtaining -a copy of the Font Software, to use, study, copy, merge, embed, modify, -redistribute, and sell modified and unmodified copies of the Font -Software, subject to the following conditions: - -1) Neither the Font Software nor any of its individual components, -in Original or Modified Versions, may be sold by itself. - -2) Original or Modified Versions of the Font Software may be bundled, -redistributed and/or sold with any software, provided that each copy -contains the above copyright notice and this license. These can be -included either as stand-alone text files, human-readable headers or -in the appropriate machine-readable metadata fields within text or -binary files as long as those fields can be easily viewed by the user. - -3) No Modified Version of the Font Software may use the Reserved Font -Name(s) unless explicit written permission is granted by the corresponding -Copyright Holder. This restriction only applies to the primary font name as -presented to the users. - -4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font -Software shall not be used to promote, endorse or advertise any -Modified Version, except to acknowledge the contribution(s) of the -Copyright Holder(s) and the Author(s) or with their explicit written -permission. - -5) The Font Software, modified or unmodified, in part or in whole, -must be distributed entirely under this license, and must not be -distributed under any other license. The requirement for fonts to -remain under this license does not apply to any document created -using the Font Software. - -TERMINATION -This license becomes null and void if any of the above conditions are -not met. - -DISCLAIMER -THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT -OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL -DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM -OTHER DEALINGS IN THE FONT SOFTWARE. - --------------------------------------------------------------------------------- - -# Code: MIT License (https://opensource.org/licenses/MIT) - -In the Font Awesome Free download, the MIT license applies to all non-font and -non-icon files. - -Copyright 2024 Fonticons, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in the -Software without restriction, including without limitation the rights to use, copy, -modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, subject to the -following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -# Attribution - -Attribution is required by MIT, SIL OFL, and CC BY licenses. Downloaded Font -Awesome Free files already contain embedded comments with sufficient -attribution, so you shouldn't need to do anything additional when using these -files normally. - -We've kept attribution comments terse, so we ask that you do not actively work -to remove them from files, especially code. They're a great way for folks to -learn about Font Awesome. - --------------------------------------------------------------------------------- - -# Brand Icons - -All brand icons are trademarks of their respective owners. The use of these -trademarks does not indicate endorsement of the trademark holder by Font -Awesome, nor vice versa. **Please do not use brand logos for any purpose except -to represent the company, product, or service to which they refer.** - - -@fortawesome/free-solid-svg-icons -(CC-BY-4.0 AND MIT) -Fonticons, Inc. (https://fontawesome.com) - --------------------------------------------------------------------------------- - -Font Awesome Free License - -Font Awesome Free is free, open source, and GPL friendly. You can use it for -commercial projects, open source projects, or really almost whatever you want. -Full Font Awesome Free license: https://fontawesome.com/license/free. - --------------------------------------------------------------------------------- - -# Icons: CC BY 4.0 License (https://creativecommons.org/licenses/by/4.0/) - -The Font Awesome Free download is licensed under a Creative Commons -Attribution 4.0 International License and applies to all icons packaged -as SVG and JS file types. - --------------------------------------------------------------------------------- - -# Fonts: SIL OFL 1.1 License - -In the Font Awesome Free download, the SIL OFL license applies to all icons -packaged as web and desktop font files. - -Copyright (c) 2024 Fonticons, Inc. (https://fontawesome.com) -with Reserved Font Name: "Font Awesome". - -This Font Software is licensed under the SIL Open Font License, Version 1.1. -This license is copied below, and is also available with a FAQ at: -http://scripts.sil.org/OFL - -SIL OPEN FONT LICENSE -Version 1.1 - 26 February 2007 - -PREAMBLE -The goals of the Open Font License (OFL) are to stimulate worldwide -development of collaborative font projects, to support the font creation -efforts of academic and linguistic communities, and to provide a free and -open framework in which fonts may be shared and improved in partnership -with others. - -The OFL allows the licensed fonts to be used, studied, modified and -redistributed freely as long as they are not sold by themselves. The -fonts, including any derivative works, can be bundled, embedded, -redistributed and/or sold with any software provided that any reserved -names are not used by derivative works. The fonts and derivatives, -however, cannot be released under any other type of license. The -requirement for fonts to remain under this license does not apply -to any document created using the fonts or their derivatives. - -DEFINITIONS -"Font Software" refers to the set of files released by the Copyright -Holder(s) under this license and clearly marked as such. This may -include source files, build scripts and documentation. - -"Reserved Font Name" refers to any names specified as such after the -copyright statement(s). - -"Original Version" refers to the collection of Font Software components as -distributed by the Copyright Holder(s). - -"Modified Version" refers to any derivative made by adding to, deleting, -or substituting — in part or in whole — any of the components of the -Original Version, by changing formats or by porting the Font Software to a -new environment. - -"Author" refers to any designer, engineer, programmer, technical -writer or other person who contributed to the Font Software. - -PERMISSION & CONDITIONS -Permission is hereby granted, free of charge, to any person obtaining -a copy of the Font Software, to use, study, copy, merge, embed, modify, -redistribute, and sell modified and unmodified copies of the Font -Software, subject to the following conditions: - -1) Neither the Font Software nor any of its individual components, -in Original or Modified Versions, may be sold by itself. - -2) Original or Modified Versions of the Font Software may be bundled, -redistributed and/or sold with any software, provided that each copy -contains the above copyright notice and this license. These can be -included either as stand-alone text files, human-readable headers or -in the appropriate machine-readable metadata fields within text or -binary files as long as those fields can be easily viewed by the user. - -3) No Modified Version of the Font Software may use the Reserved Font -Name(s) unless explicit written permission is granted by the corresponding -Copyright Holder. This restriction only applies to the primary font name as -presented to the users. - -4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font -Software shall not be used to promote, endorse or advertise any -Modified Version, except to acknowledge the contribution(s) of the -Copyright Holder(s) and the Author(s) or with their explicit written -permission. - -5) The Font Software, modified or unmodified, in part or in whole, -must be distributed entirely under this license, and must not be -distributed under any other license. The requirement for fonts to -remain under this license does not apply to any document created -using the Font Software. - -TERMINATION -This license becomes null and void if any of the above conditions are -not met. - -DISCLAIMER -THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT -OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL -DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM -OTHER DEALINGS IN THE FONT SOFTWARE. - --------------------------------------------------------------------------------- - -# Code: MIT License (https://opensource.org/licenses/MIT) - -In the Font Awesome Free download, the MIT license applies to all non-font and -non-icon files. - -Copyright 2024 Fonticons, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in the -Software without restriction, including without limitation the rights to use, copy, -modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, subject to the -following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -# Attribution - -Attribution is required by MIT, SIL OFL, and CC BY licenses. Downloaded Font -Awesome Free files already contain embedded comments with sufficient -attribution, so you shouldn't need to do anything additional when using these -files normally. - -We've kept attribution comments terse, so we ask that you do not actively work -to remove them from files, especially code. They're a great way for folks to -learn about Font Awesome. - --------------------------------------------------------------------------------- - -# Brand Icons - -All brand icons are trademarks of their respective owners. The use of these -trademarks does not indicate endorsement of the trademark holder by Font -Awesome, nor vice versa. **Please do not use brand logos for any purpose except -to represent the company, product, or service to which they refer.** - - -@fortawesome/react-fontawesome -MIT -Copyright 2018 Fonticons, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@icons/material -MIT - -@markdoc/markdoc -MIT -The MIT License - -Copyright (c) 2021- Stripe, Inc. (https://stripe.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -@netdata/charts -GPL-3.0 - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - - -@netdata/netdata-ui -GPL-3.0 - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - - -@popperjs/core -MIT -The MIT License (MIT) - -Copyright (c) 2019 Federico Zivolo - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -@prismicio/client -Apache-2.0 - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -@remix-run/router -MIT -MIT License - -Copyright (c) React Training LLC 2015-2019 -Copyright (c) Remix Software Inc. 2020-2021 -Copyright (c) Shopify Inc. 2022-2023 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@rjsf/core -Apache-2.0 - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright 2015-2024 rjsf-team - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - -@rjsf/utils -Apache-2.0 - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright 2015-2024 rjsf-team - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - -@rjsf/validator-ajv8 -Apache-2.0 - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright 2015-2024 rjsf-team - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - -@sentry/browser -MIT -Copyright (c) 2019 Sentry (https://sentry.io) and individual contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit -persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the -Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -@sentry/core -MIT -Copyright (c) 2019 Sentry (https://sentry.io) and individual contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit -persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the -Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -@sentry/react -MIT -Copyright (c) 2019 Sentry (https://sentry.io) and individual contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit -persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the -Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -@sentry/utils -MIT -Copyright (c) 2019 Sentry (https://sentry.io) and individual contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit -persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the -Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -@styled-system/background -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@styled-system/border -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@styled-system/color -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@styled-system/core -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@styled-system/css -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@styled-system/flexbox -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@styled-system/grid -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@styled-system/layout -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@styled-system/position -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@styled-system/shadow -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@styled-system/space -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@styled-system/typography -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@styled-system/variant -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -@tanstack/react-table -MIT -MIT License - -Copyright (c) 2016 Tanner Linsley - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@tanstack/react-virtual -MIT -MIT License - -Copyright (c) 2021-present Tanner Linsley - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@tanstack/table-core -MIT -MIT License - -Copyright (c) 2016 Tanner Linsley - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@tanstack/virtual-core -MIT -MIT License - -Copyright (c) 2021-present Tanner Linsley - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -@typeform/embed-react -MIT - -ajv -MIT -The MIT License (MIT) - -Copyright (c) 2015-2021 Evgeny Poberezkin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - - -ajv-formats -MIT -MIT License - -Copyright (c) 2020 Evgeny Poberezkin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -axios -MIT -# Copyright (c) 2014-present Matt Zabriskie & Collaborators - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -call-bind -MIT -MIT License - -Copyright (c) 2020 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -charenc -BSD-3-Clause -Copyright © 2011, Paul Vorbach. All rights reserved. -Copyright © 2009, Jeff Mott. All rights reserved. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. -* Neither the name Crypto-JS nor the names of its contributors may be used to - endorse or promote products derived from this software without specific prior - written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -chart.js -MIT -The MIT License (MIT) - -Copyright (c) 2014-2022 Chart.js Contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -chartjs-adapter-date-fns -MIT -The MIT License (MIT) - -Copyright (c) 2019 Chart.js Contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -chartjs-plugin-annotation -MIT -The MIT License (MIT) - -Copyright (c) 2016-2021 chartjs-plugin-annotation Contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -chartjs-plugin-zoom -MIT -The MIT License (MIT) - -Copyright (c) 2013-2021 chartjs-plugin-zoom contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -classnames -MIT -The MIT License (MIT) - -Copyright (c) 2018 Jed Watson - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -clsx -MIT -MIT License - -Copyright (c) Luke Edwards (lukeed.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -compute-gcd -MIT -The MIT License (MIT) - -Copyright (c) 2014-2015 Athan Reines. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -compute-lcm -MIT -The MIT License (MIT) - -Copyright (c) 2014-2015 Athan Reines. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -copy-text-to-clipboard -MIT -MIT License - -Copyright (c) Sindre Sorhus (https://sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -core-js -MIT -Copyright (c) 2014-2024 Denis Pushkarev - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -crypt -BSD-3-Clause -Copyright © 2011, Paul Vorbach. All rights reserved. -Copyright © 2009, Jeff Mott. All rights reserved. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. -* Neither the name Crypto-JS nor the names of its contributors may be used to - endorse or promote products derived from this software without specific prior - written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -crypto-js -MIT -# License - -[The MIT License (MIT)](http://opensource.org/licenses/MIT) - -Copyright (c) 2009-2013 Jeff Mott -Copyright (c) 2013-2016 Evan Vosberg - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -d3 -ISC -Copyright 2010-2023 Mike Bostock - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. - - -d3-array -ISC -Copyright 2010-2023 Mike Bostock - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. - - -d3-color -ISC -Copyright 2010-2022 Mike Bostock - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. - - -d3-format -ISC -Copyright 2010-2021 Mike Bostock - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. - - -d3-interpolate -ISC -Copyright 2010-2021 Mike Bostock - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. - - -d3-scale -ISC -Copyright 2010-2021 Mike Bostock - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. - - -d3-time -ISC -Copyright 2010-2022 Mike Bostock - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. - - -d3-time-format -ISC -Copyright 2010-2021 Mike Bostock - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. - - -date-fns -MIT -MIT License - -Copyright (c) 2021 Sasha Koss and Lesha Koss https://kossnocorp.mit-license.org - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -decode-uri-component -MIT -The MIT License (MIT) - -Copyright (c) 2017, Sam Verschueren (github.com/SamVerschueren) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -deep-equal -MIT -MIT License - -Copyright (c) 2012, 2013, 2014 James Halliday , 2009 Thomas Robinson <280north.com> - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -define-data-property -MIT -MIT License - -Copyright (c) 2023 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -define-properties -MIT -The MIT License (MIT) - -Copyright (C) 2015 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -domelementtype -BSD-2-Clause -Copyright (c) Felix Böhm -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS, -EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -domhandler -BSD-2-Clause -Copyright (c) Felix Böhm -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS, -EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -dygraphs -MIT -dygraphs is MIT-licenced: - -Copyright (c) 2006, 2009, 2011, 2012, 2013, 2017 - Dan Vanderkam -Copyright (c) 2011 Paul Felix -Copyright (c) 2011, 2013 Robert Konigsberg -Copyright (c) 2013 David Eberlein -Copyright (c) 2013 Google, Inc. -Copyright (c) 2014 mirabilos -Copyright (c) 2015 Petr Shevtsov -Copyright (c) 2022, 2023 mirabilos - Deutsche Telekom LLCTO -and numerous contributors (see git log) - -Some tests additionally are: - -Copyright (c) 2011, 2012 Google, Inc. -or contributed by: -- Benoit Boivin -- Paul Felix -- Marek Janda -- Robert Konigsberg -- George Madrid -- Anthony Robledo -- Fr. Sauter AG -- Fr. Sauter AG -- Ümit Seren -- Sergey Slepian -- Dan Vanderkam - -Parts of the documentation are or make use of code that is: - -Copyright (c) 2012 Google, Inc. -- Robert Konigsberg - -The automatically added browser-pack shim is: - -Copyright (c) 2013, 2014 James Halliday -Copyright (c) 2013 Roman Shtylman -Copyright (c) 2013 Esa-Matti Suuronen -Copyright (c) 2018 Philipp Simon Schmidt - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -The documentation and gallery uses Bootstrap and jQuery; see the -relevant licence files of those external libraries for details. - -The icons under common/ are CC0-licenced and adapted by mirabilos. -In Debian, /usr/share/common-licenses/CC0-1.0 has the full text. - - -easy-pie-chart -MIT -The MIT License (MIT) - -Copyright (c) 2013 Robert Fleischmann - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -es-define-property -MIT -MIT License - -Copyright (c) 2024 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -es-errors -MIT -MIT License - -Copyright (c) 2024 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -fast-deep-equal -MIT -MIT License - -Copyright (c) 2017 Evgeny Poberezkin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -filter-obj -MIT -The MIT License (MIT) - -Copyright (c) Sindre Sorhus (sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -function-bind -MIT -Copyright (c) 2013 Raynos. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - - -functions-have-names -MIT -MIT License - -Copyright (c) 2019 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -get-intrinsic -MIT -MIT License - -Copyright (c) 2020 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -gopd -MIT -MIT License - -Copyright (c) 2022 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -hammerjs -MIT -The MIT License (MIT) - -Copyright (C) 2011-2014 by Jorik Tangelder (Eight Media) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -has-property-descriptors -MIT -MIT License - -Copyright (c) 2022 Inspect JS - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -has-proto -MIT -MIT License - -Copyright (c) 2022 Inspect JS - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -has-symbols -MIT -MIT License - -Copyright (c) 2016 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -has-tostringtag -MIT -MIT License - -Copyright (c) 2021 Inspect JS - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -hasown -MIT -MIT License - -Copyright (c) Jordan Harband and contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -history -MIT -MIT License - -Copyright (c) React Training 2016-2020 -Copyright (c) Remix Software 2020-2021 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -hoist-non-react-statics -BSD-3-Clause -Software License Agreement (BSD License) -======================================== - -Copyright (c) 2015, Yahoo! Inc. All rights reserved. ----------------------------------------------------- - -Redistribution and use of this software in source and binary forms, with or -without modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of Yahoo! Inc. nor the names of YUI's contributors may be - used to endorse or promote products derived from this software without - specific prior written permission of Yahoo! Inc. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -html-dom-parser -MIT -MIT License - -Copyright (c) 2016 Menglin "Mark" Xu - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -html-react-parser -MIT -The MIT License - -Copyright (c) 2016 Menglin "Mark" Xu - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -idb-keyval -Apache-2.0 -Copyright 2016, Jake Archibald - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -inline-style-parser -MIT - -internmap -ISC -Copyright 2021 Mike Bostock - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. - - -is-arguments -MIT -The MIT License (MIT) - -Copyright (c) 2014 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -is-buffer -MIT -The MIT License (MIT) - -Copyright (c) Feross Aboukhadijeh - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -is-date-object -MIT -The MIT License (MIT) - -Copyright (c) 2015 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - - -is-regex -MIT -The MIT License (MIT) - -Copyright (c) 2014 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -json-schema-compare -MIT -MIT License - -Copyright (c) 2017 Martin Hansen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -json-schema-merge-allof -MIT - -json-schema-traverse -MIT -MIT License - -Copyright (c) 2017 Evgeny Poberezkin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -jsonpointer -MIT -The MIT License (MIT) - -Copyright (c) 2011-2015 Jan Lehnardt & Marc Bachmann - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -lodash -MIT -Copyright OpenJS Foundation and other contributors - -Based on Underscore.js, copyright Jeremy Ashkenas, -DocumentCloud and Investigative Reporters & Editors - -This software consists of voluntary contributions made by many -individuals. For exact contribution history, see the revision history -available at https://github.com/lodash/lodash - -The following license applies to all parts of this software except as -documented below: - -==== - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -==== - -Copyright and related rights for sample code are waived via CC0. Sample -code is defined as all source code displayed within the prose of the -documentation. - -CC0: http://creativecommons.org/publicdomain/zero/1.0/ - -==== - -Files located in the node_modules and vendor directories are externally -maintained libraries used by this software which have their own -licenses; we recommend you read them, as their terms may differ from the -terms above. - - -material-colors -ISC -ISC License - -Copyright 2014 Shuhei Kagawa - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - - -md5 -BSD-3-Clause -Copyright © 2011-2012, Paul Vorbach. -Copyright © 2009, Jeff Mott. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. -* Neither the name Crypto-JS nor the names of its contributors may be used to - endorse or promote products derived from this software without specific prior - written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -memoize-one -MIT -MIT License - -Copyright (c) 2019 Alexander Reardon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -object-assign -MIT -The MIT License (MIT) - -Copyright (c) Sindre Sorhus (sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -object-inspect -MIT -MIT License - -Copyright (c) 2013 James Halliday - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -object-is -MIT -The MIT License (MIT) - -Copyright (c) 2014 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -object-keys -MIT -The MIT License (MIT) - -Copyright (C) 2013 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -polished -MIT -MIT License - -Copyright (c) 2016-Present Brian Hough and Maximilian Stoiber - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -posthog-js -MIT -Copyright 2020 Posthog / Hiberly, Inc. - -Copyright 2015 Mixpanel, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this work except in compliance with the License. -You may obtain a copy of the License below, or at: - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -prop-types -MIT -MIT License - -Copyright (c) 2013-present, Facebook, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -qr.js -MIT -Copyright (c) 2013 Roman Shtylman - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -qs -BSD-3-Clause -BSD 3-Clause License - -Copyright (c) 2014, Nathan LaFreniere and other [contributors](https://github.com/ljharb/qs/graphs/contributors) -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -query-string -MIT -MIT License - -Copyright (c) Sindre Sorhus (http://sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -react -MIT -MIT License - -Copyright (c) Facebook, Inc. and its affiliates. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-chartjs-2 -MIT -Copyright 2020 Jeremy Ayerst - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -react-color -MIT -The MIT License (MIT) - -Copyright (c) 2015 Case Sandberg - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-datepicker -MIT -The MIT License (MIT) - -Copyright (c) 2014-2023 HackerOne Inc and individual contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-dom -MIT -MIT License - -Copyright (c) Facebook, Inc. and its affiliates. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-draggable -MIT -(MIT License) - -Copyright (c) 2014-2016 Matt Zabriskie. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the "Software"), -to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, -and/or sell copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - - -react-fast-compare -MIT -MIT License - -Copyright (c) 2018 Formidable Labs -Copyright (c) 2017 Evgeny Poberezkin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-is -MIT -MIT License - -Copyright (c) Facebook, Inc. and its affiliates. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-minimal-pie-chart -MIT -Copyright (c) Andrea Carraro - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-onclickoutside -MIT -Copyright 2015-2022 Mike "Pomax" Kamermans - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-popper -MIT -The MIT License (MIT) - -Copyright (c) 2018 React Popper authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-portal -MIT -The MIT License (MIT) - -Copyright (c) 2016-present, Vojtech Miksu - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-property -MIT - -react-qr-code -MIT -MIT License - -Copyright (c) 2017 Ross Khanas - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-router -MIT -MIT License - -Copyright (c) React Training LLC 2015-2019 -Copyright (c) Remix Software Inc. 2020-2021 -Copyright (c) Shopify Inc. 2022-2023 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-router-dom -MIT -MIT License - -Copyright (c) React Training LLC 2015-2019 -Copyright (c) Remix Software Inc. 2020-2021 -Copyright (c) Shopify Inc. 2022-2023 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-select -MIT - -react-toastify -MIT -MIT License - -Copyright (c) 2023 Fadi Khadra - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -react-transition-group -BSD-3-Clause -BSD 3-Clause License - -Copyright (c) 2018, React Community -Forked from React (https://github.com/facebook/react) Copyright 2013-present, Facebook, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -reactcss -MIT -The MIT License (MIT) - -Copyright (c) 2015 Case Sandberg - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -recoil -MIT -MIT License - -Copyright (c) Meta Platforms, Inc. and affiliates. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -regexp.prototype.flags -MIT -The MIT License (MIT) - -Copyright (C) 2014 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - - -resolve-pathname -MIT -MIT License - -Copyright (c) Michael Jackson 2016-2018 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -scheduler -MIT -MIT License - -Copyright (c) Facebook, Inc. and its affiliates. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -set-function-length -MIT -MIT License - -Copyright (c) Jordan Harband and contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -set-function-name -MIT -MIT License - -Copyright (c) Jordan Harband and contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -shallowequal -MIT -MIT License - -Copyright (c) 2017 Alberto Leal (github.com/dashed) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -side-channel -MIT -MIT License - -Copyright (c) 2019 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -split-on-first -MIT -MIT License - -Copyright (c) Sindre Sorhus (sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -strict-uri-encode -MIT -The MIT License (MIT) - -Copyright (c) Kevin Martensson (github.com/kevva) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -style-to-js -MIT -The MIT License (MIT) - -Copyright (c) 2020 Menglin "Mark" Xu - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -style-to-object -MIT -The MIT License (MIT) - -Copyright (c) 2017 Menglin "Mark" Xu - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -styled-components -MIT - -styled-system -MIT - -# The MIT License (MIT) -Copyright (c) 2017-2018 Brent Jackson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -svg-baker-runtime -MIT - -svg-sprite-loader -MIT -The MIT License (MIT) - -Copyright (c) 2017 Stas Kurilov (kisenka) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -throttle-debounce -MIT -Copyright (c) Ivan Nikolić - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ---- - -Copyright (c) 2010 "Cowboy" Ben Alman - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ---- - - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - -uri-js -BSD-2-Clause -Copyright 2011 Gary Court. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY GARY COURT "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GARY COURT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of Gary Court. - - -use-context-selector -MIT -The MIT License (MIT) - -Copyright (c) 2019 Daishi Kato - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -use-isomorphic-layout-effect -MIT -MIT License - -Copyright (c) Mateusz Burzyński - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -uuid -MIT -The MIT License (MIT) - -Copyright (c) 2010-2020 Robert Kieffer and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -validate.io-array -MIT -The MIT License (MIT) - -Copyright (c) 2014-2015 Athan Reines. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -validate.io-function -MIT -The MIT License (MIT) - -Copyright (c) 2014 Athan Reines. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -validate.io-integer -MIT -The MIT License (MIT) - -Copyright (c) 2014 Athan Reines. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -validate.io-integer-array -MIT -The MIT License (MIT) - -Copyright (c) 2015 Athan Reines. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -validate.io-number -MIT -The MIT License (MIT) - -Copyright (c) 2014 Athan Reines. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -warning -MIT -MIT License - -Copyright (c) 2013-present, Facebook, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -workbox-background-sync -MIT -Copyright 2018 Google LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -workbox-cacheable-response -MIT -Copyright 2018 Google LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -workbox-core -MIT -Copyright 2018 Google LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -workbox-expiration -MIT -Copyright 2018 Google LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -workbox-precaching -MIT -Copyright 2018 Google LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -workbox-recipes -MIT -Copyright 2018 Google LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -workbox-routing -MIT -Copyright 2018 Google LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -workbox-strategies -MIT -Copyright 2018 Google LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/src/web/gui/v2/4034.35199d2809d318eed690.chunk.js b/src/web/gui/v2/4034.35199d2809d318eed690.chunk.js deleted file mode 100644 index 1ac67a000..000000000 --- a/src/web/gui/v2/4034.35199d2809d318eed690.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},r=(new Error).stack;r&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[r]="bd450e9d-64ed-4ee1-a486-4ee9d95323c9",e._sentryDebugIdIdentifier="sentry-dbid-bd450e9d-64ed-4ee1-a486-4ee9d95323c9")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[4034],{24034:(e,r,t)=>{t.r(r),t.d(r,{default:()=>i,dropInFlavour:()=>d});t(74648),t(17333),t(98992),t(23215),t(54520);var n=t(96540),a=t(43375);const o={activationConstraint:{delay:50,tolerance:10}},d={dropArea:"dropArea",dashboard:"dashboard",self:"self"},i=e=>{let{children:r}=e;const t=(0,n.useCallback)((e=>{const{active:r}=e,{isResizer:t,itemId:n,itemContainerId:o,rootId:d,navigationTab:i}=r.data.current;if(i)return(0,a.fp)({...e,droppableContainers:e.droppableContainers.filter((e=>e.data.current.sortable&&e.data.current.navigationTab))});if(t){const t=e.droppableRects.get(n)||e.droppableRects.get("".concat(d,"-").concat(n));return(0,a.Qo)({...e,collisionRect:{...t,...r.rect.current.translated&&{width:t.width+(r.rect.current.translated.right-r.rect.current.initial.right),height:t.height+(r.rect.current.translated.bottom-r.rect.current.initial.bottom)}},droppableContainers:e.droppableContainers.filter((e=>{var r;return!(null!==(r=e.data.current)&&void 0!==r&&r.isContainer)&&e.data.current.itemContainerId===o}))})}let l,s=e.droppableContainers.filter((e=>e.data.current.droppable));if(s.length){if(l=(0,a.TT)({...e,droppableContainers:s}),l.length>0||s.every((e=>e.data.current.onlyPointerWithin)))return l;if(l=(0,a.Qo)({...e,droppableContainers:s.filter((e=>!e.data.current.onlyPointerWithin))}),l.length>0)return l}if(s=e.droppableContainers.filter((e=>e.data.current.dropArea)),l=(0,a.TT)({...e,droppableContainers:s}),l.length>0)return l;const c=(0,a.TT)({...e,droppableContainers:e.droppableContainers.filter((e=>{var r;return!(null!==(r=e.data.current)&&void 0!==r&&r.isResizer||e.id===n)}))});return c.length>0?c:(0,a.fp)({...e,droppableContainers:e.droppableContainers.filter((e=>{var r;return!(null!==(r=e.data.current)&&void 0!==r&&r.isResizer||e.id===n)}))})}),[]),d=(0,a.FR)((0,a.MS)(a.cA,o),(0,a.MS)(a.IG,o));return n.createElement(a.Mp,{sensors:d,collisionDetection:t,measuring:{droppable:{strategy:a.Pf.Always},draggable:{measure:a.Sj}},autoScroll:!1},r)}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/4140.46221d08bcda08826c78.chunk.js b/src/web/gui/v2/4140.46221d08bcda08826c78.chunk.js deleted file mode 100644 index 085bf2f6c..000000000 --- a/src/web/gui/v2/4140.46221d08bcda08826c78.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var t="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},h=(new Error).stack;h&&(t._sentryDebugIds=t._sentryDebugIds||{},t._sentryDebugIds[h]="0bf59ea8-784e-464d-bf04-ac3d5192b910",t._sentryDebugIdIdentifier="sentry-dbid-0bf59ea8-784e-464d-bf04-ac3d5192b910")}catch(t){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[4140],{98496:(t,h,l)=>{"use strict";l.d(h,{A:()=>o});var c=l(58168),i=l(96540),a=l(83199),e=l(12897),v=l.n(e),r=l(55042),f=l.n(r),n=new(v())({id:"partialFailureSvg",use:"partialFailureSvg-usage",viewBox:"0 0 348 348",content:''});f().add(n);const p=n,o=t=>{let{title:h,children:l,...e}=t;return i.createElement(a.Flex,{alignItems:"center",justifyContent:"center",flex:!0,gap:8},i.createElement("svg",{id:"partial_failure",width:"288px",height:"234px"},i.createElement("use",{xlinkHref:"#".concat(p.id)})),i.createElement(a.Flex,(0,c.A)({column:!0,width:{max:125},gap:2},e),i.createElement(a.H3,null,h),l))}},26688:(t,h,l)=>{"use strict";l.d(h,{A:()=>i});var c=l(94390);const i=()=>t=>{let{template:h,formData:l={},entityProps:i}=t;return{[c.D9]:!0,template:h,name:"",sourceType:"user",type:"job",formData:l,entityProps:i}}},47193:(t,h,l)=>{"use strict";l.d(h,{A:()=>f});l(62953);var c=l(69765),i=l(67990),a=l(87860),e=l(3914),v=l(47762),r=l(87659);const f=function(){let{polling:t=!0}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{id:h=""}=(0,c.pr)(),l=(0,i.gr)(h,"ids"),f=(0,i.gr)(h,"loaded"),n=(0,v.Gt)(l),p=(0,e.vt)(),[o,,,d]=(0,r.A)();return(0,a.A)({id:h,spaceId:p,polling:t}),{areDefaultRoomNodesLoaded:f,nodes:n,isClaimNodeModalOpen:o,closeClaimNodeModal:d}}},3705:(t,h,l)=>{"use strict";l.d(h,{A:()=>a});l(62953);var c=l(96540);const i={x:0,y:0,width:0,height:0,top:0,left:0,bottom:0,right:0},a=()=>{const[t,h]=(0,c.useState)(null),[l,a]=(0,c.useState)(i),e=(0,c.useMemo)((()=>new window.ResizeObserver((t=>{if(t[0]){const{x:h,y:l,width:c,height:i,top:e,left:v,bottom:r,right:f}=t[0].contentRect;a({x:h,y:l,width:c,height:i,top:e,left:v,bottom:r,right:f})}}))),[]);return(0,c.useLayoutEffect)((()=>{if(t)return e.observe(t),()=>{e.disconnect()}}),[t]),[h,l]}},54128:(t,h,l)=>{var c=l(31800),i=/^\s+/;t.exports=function(t){return t?t.slice(0,c(t)+1).replace(i,""):t}},55765:(t,h,l)=>{var c=l(38859),i=l(15325),a=l(29905),e=l(19219),v=l(44517),r=l(84247);t.exports=function(t,h,l){var f=-1,n=i,p=t.length,o=!0,d=[],M=d;if(l)o=!1,n=a;else if(p>=200){var s=h?null:v(t);if(s)return r(s);o=!1,n=e,M=new c}else M=h?[]:d;t:for(;++f{var c=l(76545),i=l(63950),a=l(84247),e=c&&1/a(new c([,-0]))[1]==1/0?function(t){return new c(t)}:i;t.exports=e},31800:t=>{var h=/\s/;t.exports=function(t){for(var l=t.length;l--&&h.test(t.charAt(l)););return l}},62193:(t,h,l)=>{var c=l(88984),i=l(5861),a=l(72428),e=l(56449),v=l(64894),r=l(3656),f=l(55527),n=l(37167),p=Object.prototype.hasOwnProperty;t.exports=function(t){if(null==t)return!0;if(v(t)&&(e(t)||"string"==typeof t||"function"==typeof t.splice||r(t)||n(t)||a(t)))return!t.length;var h=i(t);if("[object Map]"==h||"[object Set]"==h)return!t.size;if(f(t))return!c(t).length;for(var l in t)if(p.call(t,l))return!1;return!0}},99374:(t,h,l)=>{var c=l(54128),i=l(23805),a=l(44394),e=/^[-+]0x[0-9a-f]+$/i,v=/^0b[01]+$/i,r=/^0o[0-7]+$/i,f=parseInt;t.exports=function(t){if("number"==typeof t)return t;if(a(t))return NaN;if(i(t)){var h="function"==typeof t.valueOf?t.valueOf():t;t=i(h)?h+"":h}if("string"!=typeof t)return 0===t?t:+t;t=c(t);var l=v.test(t);return l||r.test(t)?f(t.slice(2),l?2:8):e.test(t)?NaN:+t}},24074:(t,h,l)=>{"use strict";var c=l(69565),i=l(28551),a=l(2360),e=l(55966),v=l(56279),r=l(91181),f=l(97751),n=l(53982),p=l(62529),o=f("Promise"),d="AsyncFromSyncIterator",M=r.set,s=r.getterFor(d),u=function(t,h,l){var c=t.done;o.resolve(t.value).then((function(t){h(p(t,c))}),l)},z=function(t){t.type=d,M(this,t)};z.prototype=v(a(n),{next:function(){var t=s(this);return new o((function(h,l){var a=i(c(t.next,t.iterator));u(a,h,l)}))},return:function(){var t=s(this).iterator;return new o((function(h,l){var a=e(t,"return");if(void 0===a)return h(p(void 0,!0));var v=i(c(a,t));u(v,h,l)}))}}),t.exports=z},50133:(t,h,l)=>{"use strict";var c=l(69565),i=l(94901),a=l(28551),e=l(1767),v=l(50851),r=l(55966),f=l(78227),n=l(24074),p=f("asyncIterator");t.exports=function(t){var h,l=a(t),f=!0,o=r(l,p);return i(o)||(o=v(l),f=!1),void 0!==o?h=c(o,l):(h=l,f=!0),a(h),e(f?h:new n(e(h)))}},48646:(t,h,l)=>{"use strict";var c=l(69565),i=l(28551),a=l(1767),e=l(50851);t.exports=function(t,h){h&&"string"===typeof t||i(t);var l=e(t);return a(i(void 0!==l?c(l,t):t))}},30237:(t,h,l)=>{"use strict";l(6469)("flatMap")},32679:(t,h,l)=>{"use strict";var c=l(46518),i=l(69565),a=l(79306),e=l(28551),v=l(20034),r=l(1767),f=l(92059),n=l(62529),p=l(50133),o=l(20772),d=l(96395),M=f((function(t){var h=this,l=h.iterator,c=h.mapper;return new t((function(a,r){var f=function(t){h.done=!0,r(t)},d=function(t){o(l,f,t,f)},M=function(){try{t.resolve(e(i(h.next,l))).then((function(l){try{if(e(l).done)h.done=!0,a(n(void 0,!0));else{var i=l.value;try{var r=c(i,h.counter++),o=function(t){try{h.inner=p(t),s()}catch(l){d(l)}};v(r)?t.resolve(r).then(o,d):o(r)}catch(M){d(M)}}}catch(u){f(u)}}),f)}catch(r){f(r)}},s=function(){var l=h.inner;if(l)try{t.resolve(e(i(l.next,l.iterator))).then((function(t){try{e(t).done?(h.inner=null,M()):a(n(t.value,!1))}catch(l){d(l)}}),d)}catch(c){d(c)}else M()};s()}))}));c({target:"AsyncIterator",proto:!0,real:!0,forced:d},{flatMap:function(t){return e(this),a(t),new M(r(this),{mapper:t,inner:null})}})},30670:(t,h,l)=>{"use strict";var c=l(46518),i=l(69565),a=l(79306),e=l(28551),v=l(1767),r=l(48646),f=l(19462),n=l(9539),p=l(96395),o=f((function(){for(var t,h,l=this.iterator,c=this.mapper;;){if(h=this.inner)try{if(!(t=e(i(h.next,h.iterator))).done)return t.value;this.inner=null}catch(a){n(l,"throw",a)}if(t=e(i(this.next,l)),this.done=!!t.done)return;try{this.inner=r(c(t.value,this.counter++),!1)}catch(a){n(l,"throw",a)}}}));c({target:"Iterator",proto:!0,real:!0,forced:p},{flatMap:function(t){return e(this),a(t),new o(v(this),{mapper:t,inner:null})}})}}]); \ No newline at end of file diff --git a/src/web/gui/v2/4140.89070793921be1288bb5.css b/src/web/gui/v2/4140.89070793921be1288bb5.css deleted file mode 100644 index e5d02668f..000000000 --- a/src/web/gui/v2/4140.89070793921be1288bb5.css +++ /dev/null @@ -1,2 +0,0 @@ -.default .dygraph-axis-label{color:#35414a}.dark .dygraph-axis-label{color:#fff}.dygraph-label-rotate-right{text-align:center;transform:rotate(-90deg);-webkit-transform:rotate(-90deg);-moz-transform:rotate(-90deg);-o-transform:rotate(-90deg);-ms-transform:rotate(-90deg)}.dygraph-annotation{position:absolute;z-index:10;overflow:hidden;border:1px solid} - diff --git a/src/web/gui/v2/4414.590ba07d470ba2ce7dd0.chunk.js b/src/web/gui/v2/4414.590ba07d470ba2ce7dd0.chunk.js deleted file mode 100644 index 571584662..000000000 --- a/src/web/gui/v2/4414.590ba07d470ba2ce7dd0.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="3597fc37-c681-4308-9ec1-86674db668e7",e._sentryDebugIdIdentifier="sentry-dbid-3597fc37-c681-4308-9ec1-86674db668e7")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[4414],{74414:(e,n,c)=>{c.r(n),c.d(n,{default:()=>f});var d=c(8711);const o="> .dyncfg-field-content > .dyncfg-object-field-container > .dyncfg-object-field-collapsible > .dyncfg-object-field-content",t=(0,d.css)([".dyncfg-grid ",",.dyncfg-grid ","{display:grid;column-gap:16px;row-gap:6px;}",""],o,"> .dyncfg-object-field-content",(e=>{const n=[];for(let c=1;c<=e;c++){n.push(".dyncfg-grid-col-".concat(c," ").concat(o," {\n grid-template-columns: repeat(").concat(c,", 1fr) !important;\n }"));for(let d=1;d<=e-c+1;d++)n.push(".dyncfg-grid-col-span-".concat(c,"-").concat(d," { grid-column: ").concat(c," / span ").concat(d,"; }"))}return n.join("\n")})(12)),f=(0,d.createGlobalStyle)(["",""],t)}}]); \ No newline at end of file diff --git a/src/web/gui/v2/4631.158982e127e11bdc6a45.chunk.js b/src/web/gui/v2/4631.158982e127e11bdc6a45.chunk.js deleted file mode 100644 index d88a2cc27..000000000 --- a/src/web/gui/v2/4631.158982e127e11bdc6a45.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="f20fb62a-06c6-4a7c-b697-548babd14717",e._sentryDebugIdIdentifier="sentry-dbid-f20fb62a-06c6-4a7c-b697-548babd14717")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[4631],{97916:(e,t,r)=>{var n=r(76080),s=r(69565),a=r(48981),i=r(96319),o=r(44209),u=r(33517),h=r(26198),f=r(97040),l=r(70081),c=r(50851),p=Array;e.exports=function(e){var t=a(e),r=u(this),g=arguments.length,m=g>1?arguments[1]:void 0,d=void 0!==m;d&&(m=n(m,g>2?arguments[2]:void 0));var v,b,w,y,P,S,U=c(t),k=0;if(!U||this===p&&o(U))for(v=h(t),b=r?new this(v):p(v);v>k;k++)S=d?m(t[k],k):t[k],f(b,k,S);else for(b=r?new this:[],P=(y=l(t,U)).next;!(w=s(P,y)).done;k++)S=d?i(y,m,[w.value,k],!0):w.value,f(b,k,S);return b.length=k,b}},44213:(e,t,r)=>{var n=r(43724),s=r(79504),a=r(69565),i=r(79039),o=r(71072),u=r(33717),h=r(48773),f=r(48981),l=r(47055),c=Object.assign,p=Object.defineProperty,g=s([].concat);e.exports=!c||i((function(){if(n&&1!==c({b:1},c(p({},"a",{enumerable:!0,get:function(){p(this,"b",{value:3,enumerable:!1})}}),{b:2})).b)return!0;var e={},t={},r=Symbol("assign detection"),s="abcdefghijklmnopqrst";return e[r]=7,s.split("").forEach((function(e){t[e]=e})),7!==c({},e)[r]||o(c({},t)).join("")!==s}))?function(e,t){for(var r=f(e),s=arguments.length,i=1,c=u.f,p=h.f;s>i;)for(var m,d=l(arguments[i++]),v=c?g(o(d),c(d)):o(d),b=v.length,w=0;b>w;)m=v[w++],n&&!a(p,d,m)||(r[m]=d[m]);return r}:c},3717:(e,t,r)=>{var n=r(79504),s=2147483647,a=/[^\0-\u007E]/,i=/[.\u3002\uFF0E\uFF61]/g,o="Overflow: input needs wider integers to process",u=RangeError,h=n(i.exec),f=Math.floor,l=String.fromCharCode,c=n("".charCodeAt),p=n([].join),g=n([].push),m=n("".replace),d=n("".split),v=n("".toLowerCase),b=function(e){return e+22+75*(e<26)},w=function(e,t,r){var n=0;for(e=r?f(e/700):e>>1,e+=f(e/t);e>455;)e=f(e/35),n+=36;return f(n+36*e/(e+38))},y=function(e){var t=[];e=function(e){for(var t=[],r=0,n=e.length;r=55296&&s<=56319&&r=i&&nf((s-h)/P))throw new u(o);for(h+=(y-i)*P,i=y,r=0;rs)throw new u(o);if(n===i){for(var S=h,U=36;;){var k=U<=m?1:U>=m+26?26:U-m;if(S{var n=r(68183).charAt,s=r(655),a=r(91181),i=r(51088),o=r(62529),u="String Iterator",h=a.set,f=a.getterFor(u);i(String,"String",(function(e){h(this,{type:u,string:s(e),index:0})}),(function(){var e,t=f(this),r=t.string,s=t.index;return s>=r.length?o(void 0,!0):(e=n(r,s),t.index+=e.length,o(e,!1))}))},45806:(e,t,r)=>{r(47764);var n,s=r(46518),a=r(43724),i=r(67416),o=r(24475),u=r(76080),h=r(79504),f=r(36840),l=r(62106),c=r(90679),p=r(39297),g=r(44213),m=r(97916),d=r(67680),v=r(68183).codeAt,b=r(3717),w=r(655),y=r(10687),P=r(22812),S=r(98406),U=r(91181),k=U.set,R=U.getterFor("URL"),L=S.URLSearchParams,H=S.getState,q=o.URL,B=o.TypeError,A=o.parseInt,C=Math.floor,I=Math.pow,O=h("".charAt),j=h(/./.exec),x=h([].join),E=h(1..toString),_=h([].pop),z=h([].push),F=h("".replace),$=h([].shift),D=h("".split),M=h("".slice),N=h("".toLowerCase),J=h([].unshift),T="Invalid scheme",Y="Invalid host",G="Invalid port",K=/[a-z]/i,Q=/[\d+-.a-z]/i,V=/\d/,W=/^0x/i,X=/^[0-7]+$/,Z=/^\d+$/,ee=/^[\da-f]+$/i,te=/[\0\t\n\r #%/:<>?@[\\\]^|]/,re=/[\0\t\n\r #/:<>?@[\\\]^|]/,ne=/^[\u0000-\u0020]+/,se=/(^|[^\u0000-\u0020])[\u0000-\u0020]+$/,ae=/[\t\n\r]/g,ie=function(e){var t,r,n,s;if("number"==typeof e){for(t=[],r=0;r<4;r++)J(t,e%256),e=C(e/256);return x(t,".")}if("object"==typeof e){for(t="",n=function(e){for(var t=null,r=1,n=null,s=0,a=0;a<8;a++)0!==e[a]?(s>r&&(t=n,r=s),n=null,s=0):(null===n&&(n=a),++s);return s>r&&(t=n,r=s),t}(e),r=0;r<8;r++)s&&0===e[r]||(s&&(s=!1),n===r?(t+=r?":":"::",s=!0):(t+=E(e[r],16),r<7&&(t+=":")));return"["+t+"]"}return e},oe={},ue=g({},oe,{" ":1,'"':1,"<":1,">":1,"`":1}),he=g({},ue,{"#":1,"?":1,"{":1,"}":1}),fe=g({},he,{"/":1,":":1,";":1,"=":1,"@":1,"[":1,"\\":1,"]":1,"^":1,"|":1}),le=function(e,t){var r=v(e,0);return r>32&&r<127&&!p(t,e)?e:encodeURIComponent(e)},ce={ftp:21,file:null,http:80,https:443,ws:80,wss:443},pe=function(e,t){var r;return 2===e.length&&j(K,O(e,0))&&(":"===(r=O(e,1))||!t&&"|"===r)},ge=function(e){var t;return e.length>1&&pe(M(e,0,2))&&(2===e.length||"/"===(t=O(e,2))||"\\"===t||"?"===t||"#"===t)},me=function(e){return"."===e||"%2e"===N(e)},de={},ve={},be={},we={},ye={},Pe={},Se={},Ue={},ke={},Re={},Le={},He={},qe={},Be={},Ae={},Ce={},Ie={},Oe={},je={},xe={},Ee={},_e=function(e,t,r){var n,s,a,i=w(e);if(t){if(s=this.parse(i))throw new B(s);this.searchParams=null}else{if(void 0!==r&&(n=new _e(r,!0)),s=this.parse(i,null,n))throw new B(s);(a=H(new L)).bindURL(this),this.searchParams=a}};_e.prototype={type:"URL",parse:function(e,t,r){var s,a,i,o,u,h=this,f=t||de,l=0,c="",g=!1,v=!1,b=!1;for(e=w(e),t||(h.scheme="",h.username="",h.password="",h.host=null,h.port=null,h.path=[],h.query=null,h.fragment=null,h.cannotBeABaseURL=!1,e=F(e,ne,""),e=F(e,se,"$1")),e=F(e,ae,""),s=m(e);l<=s.length;){switch(a=s[l],f){case de:if(!a||!j(K,a)){if(t)return T;f=be;continue}c+=N(a),f=ve;break;case ve:if(a&&(j(Q,a)||"+"===a||"-"===a||"."===a))c+=N(a);else{if(":"!==a){if(t)return T;c="",f=be,l=0;continue}if(t&&(h.isSpecial()!==p(ce,c)||"file"===c&&(h.includesCredentials()||null!==h.port)||"file"===h.scheme&&!h.host))return;if(h.scheme=c,t)return void(h.isSpecial()&&ce[h.scheme]===h.port&&(h.port=null));c="","file"===h.scheme?f=Be:h.isSpecial()&&r&&r.scheme===h.scheme?f=we:h.isSpecial()?f=Ue:"/"===s[l+1]?(f=ye,l++):(h.cannotBeABaseURL=!0,z(h.path,""),f=je)}break;case be:if(!r||r.cannotBeABaseURL&&"#"!==a)return T;if(r.cannotBeABaseURL&&"#"===a){h.scheme=r.scheme,h.path=d(r.path),h.query=r.query,h.fragment="",h.cannotBeABaseURL=!0,f=Ee;break}f="file"===r.scheme?Be:Pe;continue;case we:if("/"!==a||"/"!==s[l+1]){f=Pe;continue}f=ke,l++;break;case ye:if("/"===a){f=Re;break}f=Oe;continue;case Pe:if(h.scheme=r.scheme,a===n)h.username=r.username,h.password=r.password,h.host=r.host,h.port=r.port,h.path=d(r.path),h.query=r.query;else if("/"===a||"\\"===a&&h.isSpecial())f=Se;else if("?"===a)h.username=r.username,h.password=r.password,h.host=r.host,h.port=r.port,h.path=d(r.path),h.query="",f=xe;else{if("#"!==a){h.username=r.username,h.password=r.password,h.host=r.host,h.port=r.port,h.path=d(r.path),h.path.length--,f=Oe;continue}h.username=r.username,h.password=r.password,h.host=r.host,h.port=r.port,h.path=d(r.path),h.query=r.query,h.fragment="",f=Ee}break;case Se:if(!h.isSpecial()||"/"!==a&&"\\"!==a){if("/"!==a){h.username=r.username,h.password=r.password,h.host=r.host,h.port=r.port,f=Oe;continue}f=Re}else f=ke;break;case Ue:if(f=ke,"/"!==a||"/"!==O(c,l+1))continue;l++;break;case ke:if("/"!==a&&"\\"!==a){f=Re;continue}break;case Re:if("@"===a){g&&(c="%40"+c),g=!0,i=m(c);for(var y=0;y65535)return G;h.port=h.isSpecial()&&U===ce[h.scheme]?null:U,c=""}if(t)return;f=Ie;continue}return G}c+=a;break;case Be:if(h.scheme="file","/"===a||"\\"===a)f=Ae;else{if(!r||"file"!==r.scheme){f=Oe;continue}switch(a){case n:h.host=r.host,h.path=d(r.path),h.query=r.query;break;case"?":h.host=r.host,h.path=d(r.path),h.query="",f=xe;break;case"#":h.host=r.host,h.path=d(r.path),h.query=r.query,h.fragment="",f=Ee;break;default:ge(x(d(s,l),""))||(h.host=r.host,h.path=d(r.path),h.shortenPath()),f=Oe;continue}}break;case Ae:if("/"===a||"\\"===a){f=Ce;break}r&&"file"===r.scheme&&!ge(x(d(s,l),""))&&(pe(r.path[0],!0)?z(h.path,r.path[0]):h.host=r.host),f=Oe;continue;case Ce:if(a===n||"/"===a||"\\"===a||"?"===a||"#"===a){if(!t&&pe(c))f=Oe;else if(""===c){if(h.host="",t)return;f=Ie}else{if(o=h.parseHost(c))return o;if("localhost"===h.host&&(h.host=""),t)return;c="",f=Ie}continue}c+=a;break;case Ie:if(h.isSpecial()){if(f=Oe,"/"!==a&&"\\"!==a)continue}else if(t||"?"!==a)if(t||"#"!==a){if(a!==n&&(f=Oe,"/"!==a))continue}else h.fragment="",f=Ee;else h.query="",f=xe;break;case Oe:if(a===n||"/"===a||"\\"===a&&h.isSpecial()||!t&&("?"===a||"#"===a)){if(".."===(u=N(u=c))||"%2e."===u||".%2e"===u||"%2e%2e"===u?(h.shortenPath(),"/"===a||"\\"===a&&h.isSpecial()||z(h.path,"")):me(c)?"/"===a||"\\"===a&&h.isSpecial()||z(h.path,""):("file"===h.scheme&&!h.path.length&&pe(c)&&(h.host&&(h.host=""),c=O(c,0)+":"),z(h.path,c)),c="","file"===h.scheme&&(a===n||"?"===a||"#"===a))for(;h.path.length>1&&""===h.path[0];)$(h.path);"?"===a?(h.query="",f=xe):"#"===a&&(h.fragment="",f=Ee)}else c+=le(a,he);break;case je:"?"===a?(h.query="",f=xe):"#"===a?(h.fragment="",f=Ee):a!==n&&(h.path[0]+=le(a,oe));break;case xe:t||"#"!==a?a!==n&&("'"===a&&h.isSpecial()?h.query+="%27":h.query+="#"===a?"%23":le(a,oe)):(h.fragment="",f=Ee);break;case Ee:a!==n&&(h.fragment+=le(a,ue))}l++}},parseHost:function(e){var t,r,n;if("["===O(e,0)){if("]"!==O(e,e.length-1))return Y;if(t=function(e){var t,r,n,s,a,i,o,u=[0,0,0,0,0,0,0,0],h=0,f=null,l=0,c=function(){return O(e,l)};if(":"===c()){if(":"!==O(e,1))return;l+=2,f=++h}for(;c();){if(8===h)return;if(":"!==c()){for(t=r=0;r<4&&j(ee,c());)t=16*t+A(c(),16),l++,r++;if("."===c()){if(0===r)return;if(l-=r,h>6)return;for(n=0;c();){if(s=null,n>0){if(!("."===c()&&n<4))return;l++}if(!j(V,c()))return;for(;j(V,c());){if(a=A(c(),10),null===s)s=a;else{if(0===s)return;s=10*s+a}if(s>255)return;l++}u[h]=256*u[h]+s,2!==++n&&4!==n||h++}if(4!==n)return;break}if(":"===c()){if(l++,!c())return}else if(c())return;u[h++]=t}else{if(null!==f)return;l++,f=++h}}if(null!==f)for(i=h-f,h=7;0!==h&&i>0;)o=u[h],u[h--]=u[f+i-1],u[f+--i]=o;else if(8!==h)return;return u}(M(e,1,-1)),!t)return Y;this.host=t}else if(this.isSpecial()){if(e=b(e),j(te,e))return Y;if(t=function(e){var t,r,n,s,a,i,o,u=D(e,".");if(u.length&&""===u[u.length-1]&&u.length--,(t=u.length)>4)return e;for(r=[],n=0;n1&&"0"===O(s,0)&&(a=j(W,s)?16:8,s=M(s,8===a?1:2)),""===s)i=0;else{if(!j(10===a?Z:8===a?X:ee,s))return e;i=A(s,a)}z(r,i)}for(n=0;n=I(256,5-t))return null}else if(i>255)return null;for(o=_(r),n=0;n1?arguments[1]:void 0,n=k(t,new _e(e,!1,r));a||(t.href=n.serialize(),t.origin=n.getOrigin(),t.protocol=n.getProtocol(),t.username=n.getUsername(),t.password=n.getPassword(),t.host=n.getHost(),t.hostname=n.getHostname(),t.port=n.getPort(),t.pathname=n.getPathname(),t.search=n.getSearch(),t.searchParams=n.getSearchParams(),t.hash=n.getHash())},Fe=ze.prototype,$e=function(e,t){return{get:function(){return R(this)[e]()},set:t&&function(e){return R(this)[t](e)},configurable:!0,enumerable:!0}};if(a&&(l(Fe,"href",$e("serialize","setHref")),l(Fe,"origin",$e("getOrigin")),l(Fe,"protocol",$e("getProtocol","setProtocol")),l(Fe,"username",$e("getUsername","setUsername")),l(Fe,"password",$e("getPassword","setPassword")),l(Fe,"host",$e("getHost","setHost")),l(Fe,"hostname",$e("getHostname","setHostname")),l(Fe,"port",$e("getPort","setPort")),l(Fe,"pathname",$e("getPathname","setPathname")),l(Fe,"search",$e("getSearch","setSearch")),l(Fe,"searchParams",$e("getSearchParams")),l(Fe,"hash",$e("getHash","setHash"))),f(Fe,"toJSON",(function(){return R(this).serialize()}),{enumerable:!0}),f(Fe,"toString",(function(){return R(this).serialize()}),{enumerable:!0}),q){var De=q.createObjectURL,Me=q.revokeObjectURL;De&&f(ze,"createObjectURL",u(De,q)),Me&&f(ze,"revokeObjectURL",u(Me,q))}y(ze,"URL"),s({global:!0,constructor:!0,forced:!i,sham:!a},{URL:ze})},3296:(e,t,r)=>{r(45806)},27208:(e,t,r)=>{var n=r(46518),s=r(69565);n({target:"URL",proto:!0,enumerable:!0},{toJSON:function(){return s(URL.prototype.toString,this)}})}}]); \ No newline at end of file diff --git a/src/web/gui/v2/4680.7d8122d91e9d4582836a.chunk.js b/src/web/gui/v2/4680.7d8122d91e9d4582836a.chunk.js deleted file mode 100644 index 32ed81f99..000000000 --- a/src/web/gui/v2/4680.7d8122d91e9d4582836a.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="749c2bdc-e55c-48f5-bb20-4d797232fb98",e._sentryDebugIdIdentifier="sentry-dbid-749c2bdc-e55c-48f5-bb20-4d797232fb98")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[4680],{667:(e,t,n)=>{n.d(t,{A:()=>l});n(62953);var a=n(96540),o=n(27776),r=n(38819);const l=()=>{const e=(0,r.PP)(),[t,n]=(0,o.XL)(),{reset:l}=(0,o.b8)(),c=(0,a.useCallback)((()=>{l(),(0,r.Z8)({...e,integrationsModalOpen:"true"}),n("true")}),[l,e]);return{isIntegrationsVisible:t,hideIntegrations:(0,a.useCallback)((()=>{(0,r.Z8)({...e,integrationsModalOpen:""}),n("")}),[e]),onIntegrationsClick:c}}},84680:(e,t,n)=>{n.r(t),n.d(t,{default:()=>Ee});n(62953);var a=n(96540),o=n(83199),r=n(5668),l=n(58168),c=(n(41393),n(81454),n(8711)),i=n(47767),s=n(68831),d=n(87659),u=n(43529),m=n(78862),p=n(29217),g=n(37944),h=n(3914);const f=e=>{let{...t}=e;const n=(0,g.A)(),r=(0,h.dg)();return a.createElement(p.A,{content:r?"Settings":"Space settings"},a.createElement(o.Flex,{padding:[0,1]},a.createElement(o.Button,(0,l.A)({flavour:"borderless",icon:"gear",neutral:!0,onClick:n,small:!0,"data-ga":"left-sidebar::click-space-settings::global-view","data-testid":"workspaceBar-manageSpace"},t))))};var E=n(50105),b=n(74293),v=n(71847),w=n(64754),x=n(76634),k=n(54972),C=n(96382);const y=()=>{const e=(0,C.ae)("email"),{handleOpenProfileModal:t}=(0,k.A)();return"loading"===e.state||e.state.hasError?null:a.createElement(o.IconButton,{flavour:"borderless","data-testid":"open-email-notification",cursor:"pointer",width:"20px",height:"20px",onClick:()=>t("notifications"),icon:e.contents.email?"notification_shortcut_enabled":"notification_shortcut_disabled",tooltip:"Email Notifications","data-ga":"left-sidebar::click-user-notification-settings::global-view",iconColor:"iconColor"})},I=(0,a.memo)(y);const A=(0,n(92155).A)(w.A),S=c.default.div.withConfig({displayName:"spaces__Separator",componentId:"sc-1gbju6k-0"})(["height:1px;width:",";background:",";"],(0,o.getSizeBy)(3),(0,o.getColor)("border")),F=(0,c.default)(o.Button).withConfig({displayName:"spaces__DocButton",componentId:"sc-1gbju6k-1"})(["&&{> .button-icon{width:20px;height:20px;}}"]),_=(0,c.default)(o.Button).withConfig({displayName:"spaces__ExpandButton",componentId:"sc-1gbju6k-2"})(["&&{> .button-icon{width:6px;height:9px;}}"]),N={onClickOut:()=>(0,v.H)("header-help","click-out-modal","global-view"),onCloseClick:()=>(0,v.H)("header-help","click-close","global-view"),onOpenIssueClick:()=>(0,v.H)("header-help","click-\u03bfpen-issue","global-view"),onContributeClick:()=>(0,v.H)("header-help","click-contribute","global-view"),onOpenBugClick:()=>(0,v.H)("header-help","click-bug","global-view"),onSupportClick:()=>(0,v.H)("header-help","click-support","global-view"),onVisitDocumentClick:()=>(0,v.H)("header-help","click-visit-doc","global-view"),onGoToDemoClick:()=>(0,v.H)("header-help","click-go-to-demo","global-view")},P=e=>{let{spacePanelCollapsed:t,toggleSpacePanel:n}=e;const r=(0,h.Pk)(),c=(0,h.vt)(),[g,,v,w]=(0,d.A)(),k=(0,i.Zp)(),C=(0,a.useCallback)((function(){let{slug:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return k("/spaces/".concat(e))}),[]);return a.createElement(o.Flex,{background:"sideBarMini","data-testid":"workspaceBar",padding:[3,0],column:!0,alignItems:"center",gap:6,height:"100vh",justifyContent:"between"},a.createElement(o.Flex,{column:!0,"data-testid":"workspaceBar-spaces-list",gap:4,alignItems:"center",overflow:"hidden"},a.createElement(u.A,{"data-testid":"workspaceBar-netdataLogo"}),t&&a.createElement(_,{"data-testid":"workspaceBar-expandButton",onClick:n,icon:"chevron_right_s",neutral:!0,flavour:"borderless",small:!0}),a.createElement(o.Flex,{column:!0,"data-testid":"workspaceBar-spacesList",gap:4,overflow:{vertical:"auto"}},r.map((e=>a.createElement(m.A,{testIdPrefix:"workspaceBar-space",key:e,spaceId:e,active:e===c})))),a.createElement(x.A,{permission:"user:CreateSpace"},a.createElement(S,{"data-testid":"workspaceBar-separator"})),a.createElement(p.A,{content:"Create a new Space",align:"right"},a.createElement(A,{permission:"user:CreateSpace",ifForbidden:"hide",icon:"plus",onClick:v,"data-testid":"workspaceBar-addSpace-button",feature:"CreateSpace",isStart:!0}))),a.createElement(o.Flex,{column:!0,"data-testid":"workspaceBar-actionsList",gap:4,alignItems:"center"},a.createElement(o.Flex,{column:!0,gap:1,alignItems:"center"},a.createElement(x.A,{permission:"user:ChangeSpaceRoomNotifications"},a.createElement(I,null)),a.createElement(o.Documentation,(0,l.A)({app:"cloud",demoUrl:s.A.demoUrl},N),(e=>a.createElement(p.A,{content:"Read documentation, engage with the community and let us know about any feature requests or bugs",align:"right"},a.createElement(F,{neutral:!0,flavour:"borderless",icon:"question",onClick:e,"data-ga":"header-help::click-help::global-view","data-testid":"documentation-button",title:"Need help?"})))),a.createElement(f,null)),a.createElement(b.A,null)),g&&a.createElement(E.A,{onClose:w,onDone:C}))};n(30067),n(93518),n(25440),n(17333),n(3064),n(98992),n(54520),n(72577);var T=n(22292),B=n(26655),R=n(9224),O=n(47762),j=n(87337),L=n(63129),D=n(45123),W=n(56915),H=n(24266),U=n(91069);const z=(0,c.default)(o.Icon).withConfig({displayName:"styled__TrashIcon",componentId:"sc-1hr9uxl-0"})(["&:hover{fill:",";opacity:0.6;}"],(0,o.getColor)("textFocus")),M=(0,c.default)(o.TextSmall).withConfig({displayName:"styled__UrlWrapper",componentId:"sc-1hr9uxl-1"})(["overflow-wrap:anywhere;"]),V=(0,c.default)(o.Icon).withConfig({displayName:"styled__StyledIcon",componentId:"sc-1hr9uxl-2"})(["transform:",";"],(e=>{let{right:t}=e;return t?"rotate(0)":"rotate(180deg)"})),G=e=>{/https?:\/\//.test(e)||(e="http://".concat(e)),location.replace(e)},J=function(e,t){let{openList:n,setSelectedId:o}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const r=(0,T.uW)("isAnonymous"),l=(0,a.useRef)(),c=(0,h.UV)("ids"),i=(0,h.jw)(c);return(0,a.useCallback)((async()=>{if(o(e),l.current||(l.current=await(async(e,t)=>{let{spaces:n=[],id:a,isAnonymous:o=!1}=t;if(!o){const{data:e}=await(0,R.ys)(a);let t;if(e.length&&(t=await Promise.any(n.map((t=>B.A.get((0,U.t)({spaceId:t.id})).then((n=>{let{data:a}=n;return Promise.any(a.map((n=>(0,L.uQ)({roomId:n.id,spaceId:t.id}).then((a=>{let{data:o}=a;const r=o.find((t=>{let{id:n}=t;return e.includes(n)}));if(!r)throw new Error("can't find matching node");return"".concat(window.envSettings.cloudUrl,"/spaces/").concat(t.slug,"/rooms/").concat(n.slug,"/nodes/").concat(r.id)})))))}))))).then((e=>e)).catch((()=>{}))),t)return t}const r=e.filter((e=>!/netdata\.cloud\//.test(e)));if(r.length)return await Promise.any(r.map((e=>{let t=/^https?:\/\//.test(e)?e:"http://".concat(e);return t=t.replace(/\/+$/,""),B.A.get("".concat(t,"/api/v1/registry?action=hello")).then((()=>t))}))).then((e=>e)).catch((()=>{}))})(t,{spaces:i,id:e,isAnonymous:r})),o(),!l.current)return n();G(l.current)}),[t,i])},Y=e=>{let{id:t,selectedId:n,setSelectedId:r}=e;const l=(0,O.xY)(t,"name"),[c,,i,s]=(0,d.A)(),u=(0,j.rE)(),m=(0,O.xY)(t,"urls"),p=J(t,m,{openList:i,setSelectedId:r}),g=(0,a.useCallback)((()=>{c?s():p()}),[c,p]),h=n===t;return a.createElement(a.Fragment,null,a.createElement(D.A,{onClick:g,isSidebar:!0,padding:[1,2,1,4],testid:"visitedNodes-".concat(l),disabled:!!n&&!h,selected:h,loading:h,iconColor:"textNoFocus",textColor:"textNoFocus",Wrapper:o.TextSmall},l),c&&a.createElement(o.Flex,{column:!0,margin:[1,3],padding:[2],border:{side:"all",color:"placeholder"},round:2},a.createElement(o.Text,{textAlign:"center"},"We couldn't connect to any of your instances, here is the list:"),m.map((e=>a.createElement(D.A,{key:e,onClick:()=>G(e),isSidebar:!0,testid:"visitedNodes-node-".concat(e),actions:a.createElement(o.Flex,{flex:!1,margin:[0,0,0,1]},a.createElement(z,{name:"trashcan",size:"small",color:"textNoFocus",onClick:n=>{n.stopPropagation(),u(t,e)},width:"16px",height:"16px"})),padding:[2],Wrapper:M,iconColor:"textNoFocus",textColor:"textNoFocus"},e)))))},Z=()=>{const e=(0,j.iw)(),[t,n]=(0,d.A)(e.length>0);(0,a.useEffect)((()=>{t||n()}),[e.length]);const[r,l]=(0,a.useState)(""),c=(0,j.vS)(r),[i,s]=(0,a.useState)();return e.length?a.createElement(W.A,{isOpen:t,toggleOpen:n,label:a.createElement(o.Flex,{padding:[1,0],flex:!0,justifyContent:"between",alignItems:"center"},a.createElement(o.Flex,{alignItems:"center",gap:2},a.createElement(o.Icon,{name:"node_hollow",color:"textNoFocus",width:"14px",height:"14px"}),a.createElement(o.TextSmall,{color:"textNoFocus"},"Visited Nodes")),a.createElement(V,{right:!t,name:"chevron_down",size:"small",color:"textNoFocus"})),headerTestId:"visitedNodes"},e.length>3&&a.createElement(o.Flex,{padding:[1,2,2,4]},a.createElement(o.SearchInput,{"data-testid":"search-visited-nodes-input",onChange:l,value:r,size:"tiny"})),a.createElement(o.Flex,{padding:[0,0,2],column:!0},c.length?c.map((e=>a.createElement(Y,{key:e,id:e,selectedId:i,setSelectedId:s}))):a.createElement(o.Text,{padding:[3,5]},"No nodes match your search"))):null},q=(0,a.memo)((()=>a.createElement(H.Ay,{fallback:""},a.createElement(a.Suspense,{fallback:""},a.createElement(Z,null)))));var Q=n(79412),X=n(77279),$=n(46741);const K=e=>t=>{const n=(0,h.vt)();return a.createElement(e,(0,l.A)({},t,{key:n}))};var ee=n(12938);const te={isSidebar:!0,padding:[1,2,1,4],textColor:"menuItem",Wrapper:o.TextSmall},ne=K((()=>{const e=(0,$.JT)("room:Create"),t=(0,$.JT)("room:Read"),[n,r]=(0,d.A)(t),[l,,c,i]=(0,d.A)(!1),s=(0,a.useCallback)((t=>{t.stopPropagation(),e&&c()}),[e]);return a.createElement(a.Fragment,null,a.createElement(W.A,{"data-testid":"workspaceRooms-menuList",disabled:!t,headerTestId:"workspaceRooms-warRooms",isOpen:n,label:a.createElement(o.Flex,{padding:[1,0],margin:[0,0,1,0],flex:!0,justifyContent:"between",alignItems:"center",height:"24px"},a.createElement(o.Flex,{alignItems:"center",gap:2},a.createElement(o.Icon,{name:"space_new",color:"menuItem",width:"14px",height:"14px"}),a.createElement(o.TextSmall,{color:"menuItem"},"Rooms")),a.createElement(o.Flex,{alignItems:"center",gap:2},a.createElement(p.A,{content:"Create a new room",align:"right"},a.createElement(w.A,{permission:"room:Create",tiny:!0,icon:"plus",onClick:s,"data-testid":"workspaceRooms-addWarRoom-button"})),a.createElement(X.v,{right:!n,name:"chevron_down",size:"small",color:"textLite"}))),toggleOpen:r},a.createElement(o.Flex,{column:!0,"data-testid":"workspaceRooms-warRoomsList",padding:[0,0,1]},a.createElement(ee.A,te))),l&&a.createElement(Q.n,{onClose:i,isSubmodal:!1}))}));var ae=n(39225),oe=n(67602);const re=(0,c.default)(o.Flex).attrs({alignItems:"center",justifyContent:"center",width:10,height:4,round:.5,background:"sideBarMini"}).withConfig({displayName:"styled__AdminPill",componentId:"sc-1ps23b1-0"})(["opacity:0;",":hover &{opacity:1;}"],D.G),le=(0,ae.A)((()=>n.e(2007).then(n.bind(n,62007))),"CurrentPlan"),ce=e=>{let{children:t,onToggle:n}=e;return a.createElement(o.Flex,{column:!0,gap:1},a.createElement(o.Flex,{justifyContent:"between",alignItems:"center"},a.createElement(o.Flex,{alignItems:"center"},t),a.createElement(o.Button,{onClick:n,icon:"chevron_left",neutral:!0,flavour:"hollow",small:!0})),a.createElement(a.Suspense,{fallback:""},a.createElement(le,null)))};var ie=n(38413),se=n(53285),de=n(667);const ue=(0,ae.A)((()=>Promise.all([n.e(7436),n.e(7170)]).then(n.bind(n,57436)).then((e=>({default:e.TrialWarning})))),"TrialWarning"),me=(0,c.default)(W.N).attrs({padding:[2],color:"textLite"}).withConfig({displayName:"spacePanel__ListHeader",componentId:"sc-1275zgk-0"})([""]),pe=e=>a.createElement(o.Flex,(0,l.A)({alignItems:"center",gap:2},e),a.createElement(re,null,a.createElement(o.TextFemto,{strong:!0,color:"text"},"ADMIN")),a.createElement(o.Flex,{opacity:"medium"},a.createElement(o.Icon,{name:"padlock",size:"small",width:"18px",height:"18px",color:"text"}))),ge={iconWidth:"14px",iconHeight:"14px",padding:[1,2],gap:2,isSidebar:!0,textColor:"menuItem",iconColor:"menuItem",Wrapper:o.TextSmall},he=e=>{let{toggleSpacePanel:t}=e;const n=(0,h.ap)(),[r,,c,i]=(0,d.A)(),[,s]=(0,oe.A)("manageInvitations"),u=(0,h.dg)(),{onIntegrationsClick:m}=(0,de.A)();return a.createElement(a.Fragment,null,r&&a.createElement(ie.A,{title:"Integrations",onClose:i}),a.createElement(W.A,{"data-testid":"spacesPanel",isOpen:!0,label:a.createElement(ce,{onToggle:t},n.name),headerTestId:"spacesPanel-activeSpaceHeader",Header:me},a.createElement(o.Flex,{column:!0,padding:[2,0,1],"data-testid":"spacesPanel-menuList"},a.createElement(a.Suspense,{fallback:""},a.createElement(ue,{flavour:"sidebar"})),a.createElement(D.A,(0,l.A)({onClick:m,testid:"agentDashboard-addIntegrations",dataGa:"integrations::click-add-integrations::sideMenu",icon:"integrations"},ge),"Integrations"),u?null:a.createElement(a.Fragment,null,a.createElement(se.A,{permission:"node:Create"},(e=>a.createElement(D.A,(0,l.A)({actions:e?null:a.createElement(pe,{"data-testid":"adminWarning-notAdmin"}),disabled:!e,onClick:c,testid:"spacesPanel-claimNodes",icon:"nodes_hollow"},ge),"Connect Nodes"))),a.createElement(se.A,{permission:"space:InviteUser"},(e=>a.createElement(D.A,(0,l.A)({actions:e?null:a.createElement(pe,{"data-testid":"adminWarning-cannotInvite"}),disabled:!e,onClick:s,testid:"spacesPanel-inviteUsers",icon:"user"},ge),"Invite Users")))))))},fe=e=>{let{spacePanelCollapsed:t,toggleSpacePanel:n}=e;return a.createElement(o.Collapsible,{width:56,background:"sideBar",border:{side:"right",color:"borderSecondary"},open:!t,direction:"horizontal"},a.createElement(o.Flex,{flex:!0,width:56,column:!0,overflow:{vertical:"hidden"},padding:[2,0],position:"relative"},a.createElement(he,{toggleSpacePanel:n}),a.createElement(o.Flex,{overflow:{vertical:"auto"},flex:!0,column:!0},a.createElement(ne,null),a.createElement(q,null))))},Ee=(0,a.memo)((()=>{const[e,t]=(0,r.tF)("spacePanelCollapsed"),{isIntegrationsVisible:n,hideIntegrations:l}=(0,de.A)(),c=(0,a.useCallback)((()=>t(!e)),[e]);return(0,a.useLayoutEffect)((()=>{const e=setTimeout((()=>window.dispatchEvent(new Event("resize"))),400);return()=>clearTimeout(e)}),[e]),a.createElement(o.Flex,null,a.createElement(P,{spacePanelCollapsed:e,toggleSpacePanel:c}),a.createElement(fe,{spacePanelCollapsed:e,toggleSpacePanel:c}),n&&a.createElement(ie.A,{title:"Integrations",onClose:l,flavour:"integrations"}))}))},84428:(e,t,n)=>{var a=n(78227)("iterator"),o=!1;try{var r=0,l={next:function(){return{done:!!r++}},return:function(){o=!0}};l[a]=function(){return this},Array.from(l,(function(){throw 2}))}catch(c){}e.exports=function(e,t){try{if(!t&&!o)return!1}catch(c){return!1}var n=!1;try{var r={};r[a]=function(){return{next:function(){return{done:n=!0}}}},e(r)}catch(c){}return n}},87290:(e,t,n)=>{var a=n(50516),o=n(19088);e.exports=!a&&!o&&"object"==typeof window&&"object"==typeof document},50516:e=>{e.exports="object"==typeof Deno&&Deno&&"object"==typeof Deno.version},19088:(e,t,n)=>{var a=n(24475),o=n(44576);e.exports="process"===o(a.process)},16193:(e,t,n)=>{var a=n(79504),o=Error,r=a("".replace),l=String(new o("zxcasd").stack),c=/\n\s*at [^:]*:[^\n]*/,i=c.test(l);e.exports=function(e,t){if(i&&"string"==typeof e&&!o.prepareStackTrace)for(;t--;)e=r(e,c,"");return e}},80747:(e,t,n)=>{var a=n(66699),o=n(16193),r=n(24659),l=Error.captureStackTrace;e.exports=function(e,t,n,c){r&&(l?l(e,t):a(e,"stack",o(n,c)))}},24659:(e,t,n)=>{var a=n(79039),o=n(6980);e.exports=!a((function(){var e=new Error("a");return!("stack"in e)||(Object.defineProperty(e,"stack",o(1,7)),7!==e.stack)}))},77584:(e,t,n)=>{var a=n(20034),o=n(66699);e.exports=function(e,t){a(t)&&"cause"in t&&o(e,"cause",t.cause)}},32603:(e,t,n)=>{var a=n(655);e.exports=function(e,t){return void 0===e?arguments.length<2?"":t:a(e)}},10916:(e,t,n)=>{var a=n(24475),o=n(80550),r=n(94901),l=n(92796),c=n(33706),i=n(78227),s=n(87290),d=n(50516),u=n(96395),m=n(77388),p=o&&o.prototype,g=i("species"),h=!1,f=r(a.PromiseRejectionEvent),E=l("Promise",(function(){var e=c(o),t=e!==String(o);if(!t&&66===m)return!0;if(u&&(!p.catch||!p.finally))return!0;if(!m||m<51||!/native code/.test(e)){var n=new o((function(e){e(1)})),a=function(e){e((function(){}),(function(){}))};if((n.constructor={})[g]=a,!(h=n.then((function(){}))instanceof a))return!0}return!t&&(s||d)&&!f}));e.exports={CONSTRUCTOR:E,REJECTION_EVENT:f,SUBCLASSING:h}},90537:(e,t,n)=>{var a=n(80550),o=n(84428),r=n(10916).CONSTRUCTOR;e.exports=r||!o((function(e){a.all(e).then(void 0,(function(){}))}))},17145:(e,t,n)=>{var a=n(46518),o=n(1625),r=n(42787),l=n(52967),c=n(77740),i=n(2360),s=n(66699),d=n(6980),u=n(77584),m=n(80747),p=n(72652),g=n(32603),h=n(78227)("toStringTag"),f=Error,E=[].push,b=function(e,t){var n,a=o(v,this);l?n=l(new f,a?r(this):v):(n=a?this:i(v),s(n,h,"Error")),void 0!==t&&s(n,"message",g(t)),m(n,b,n.stack,1),arguments.length>2&&u(n,arguments[2]);var c=[];return p(e,E,{that:c}),s(n,"errors",c),n};l?l(b,f):c(b,f,{name:!0});var v=b.prototype=i(f.prototype,{constructor:d(1,b),message:d(1,""),name:d(1,"AggregateError")});a({global:!0,constructor:!0,arity:2},{AggregateError:b})},30067:(e,t,n)=>{n(17145)},93518:(e,t,n)=>{var a=n(46518),o=n(69565),r=n(79306),l=n(97751),c=n(36043),i=n(1103),s=n(72652),d=n(90537),u="No one promise resolved";a({target:"Promise",stat:!0,forced:d},{any:function(e){var t=this,n=l("AggregateError"),a=c.f(t),d=a.resolve,m=a.reject,p=i((function(){var a=r(t.resolve),l=[],c=0,i=1,p=!1;s(e,(function(e){var r=c++,s=!1;i++,o(a,t,e).then((function(e){s||p||(p=!0,d(e))}),(function(e){s||p||(s=!0,l[r]=e,--i||m(new n(l,u)))}))})),--i||m(new n(l,u))}));return p.error&&m(p.value),a.promise}})}}]); \ No newline at end of file diff --git a/src/web/gui/v2/4958.5969fedc1ff7dc82775e.chunk.js b/src/web/gui/v2/4958.5969fedc1ff7dc82775e.chunk.js deleted file mode 100644 index 51443523a..000000000 --- a/src/web/gui/v2/4958.5969fedc1ff7dc82775e.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="349be8c4-9658-4a28-87e0-147f3b4fa910",e._sentryDebugIdIdentifier="sentry-dbid-349be8c4-9658-4a28-87e0-147f3b4fa910")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[4958],{62232:(e,t,n)=>{"use strict";n.d(t,{A:()=>g});var a=n(58168),l=(n(17333),n(41393),n(98992),n(54520),n(81454),n(8711)),o=n(68090),r=n.n(o),i=n(96540),c=n(83199);const s=(0,l.default)(c.Box).withConfig({displayName:"breadcrumbs__StyledItemContainer",componentId:"sc-3u39st-0"})([""]),m=(0,l.css)(["&:hover{color:",";}"],(e=>{let{withHover:t,theme:n}=e;return t&&(0,c.getColor)("success")({theme:n})})),d=(0,l.default)(c.Text).withConfig({displayName:"breadcrumbs__StyledText",componentId:"sc-3u39st-1"})(["",""],m),u=(0,l.default)(c.TextSmall).withConfig({displayName:"breadcrumbs__StyledTextSmall",componentId:"sc-3u39st-2"})(["",""],m),g=e=>{let{items:t,isBig:n,showBackButton:l=!0,testid:o="",...m}=e;const g=(0,i.useMemo)((()=>{if(null===t||void 0===t||!t.length||!l)return null;return r()(t.filter((e=>{let{onClick:t}=e;return!!t}))).onClick}),[t,l]);if(null===t||void 0===t||!t.length)return null;const p=n?d:u;return i.createElement(c.Flex,(0,a.A)({gap:4},m),l&&i.createElement(c.Button,{onClick:g,icon:"chevron_left",label:"Back",neutral:!0,flavour:"hollow",small:!0,padding:[0,2,0,1],textTransform:"uppercase","data-testid":"".concat(o,"-breadcrumbs-backButton")}),i.createElement(c.Flex,{gap:2,alignItems:"center"},t.map(((e,t)=>{let{isDisabled:n,name:l,onClick:r}=e;return i.createElement(s,(0,a.A)({key:t,alignItems:"center"},r&&{cursor:"pointer",onClick:r},{"data-testid":"".concat(o,"-breadcrumbs-level-").concat(t)}),i.createElement(p,{color:n&&"textLite","data-testid":"".concat(o,"-breadcrumbs-level-").concat(t),withHover:!!r},0!==t&&" / ",l))}))))}},26751:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});var a=n(58168),l=n(96540),o=n(83199);const r=e=>{let{message:t,title:n,footer:r,...i}=e;const c=(null===i||void 0===i?void 0:i["data-testid"])||"functionError";return l.createElement(o.Flex,(0,a.A)({alignItems:"center",column:!0,"data-testid":c,flex:!0,gap:3,justifyContent:"center",padding:[0,20]},i),l.createElement(o.H3,{"data-testid":"".concat(c,"-title")},n),l.createElement(o.TextBig,{color:"textDescription","data-testid":"".concat(c,"-message")},t),r)}},54856:(e,t,n)=>{"use strict";n.d(t,{A:()=>m});var a=n(58168),l=n(96540),o=n(84976),r=n(83199),i=n(4659),c=n(46741),s=n(27994);const m=function(){let{containerProps:e={},...t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,c.JT)("billing:ReadAll"),{url:m}=(0,s.A)();return m?l.createElement(r.Flex,(0,a.A)({background:"sideBarMini",border:{side:"all",color:"border"},padding:[1,2],round:!0},e),l.createElement(i.A,(0,a.A)({align:"bottom",as:o.N_,boxProps:{as:r.Flex},color:"text",Component:r.TextMicro,content:n?"Upgrade your plan in order to use this feature":"You have no permissions to manage billing",disabled:!n,hoverColor:"textFocus",showToolTip:!0,strong:!0,to:m},t),"Upgrade now!")):null}},54518:(e,t,n)=>{"use strict";n.d(t,{A:()=>f});var a=n(96540),l=n(58168),o=n(83199),r=n(29217);const i=e=>{let{description:t,isRequired:n,title:l,...i}=e;const c=i["data-testid"]?"".concat(i["data-testid"],"-label"):"fieldLabel";return a.createElement(o.Flex,{"data-testid":"".concat(c,"-container"),gap:1},a.createElement(o.TextSmall,{color:"textLite","data-testid":c},l,n&&" *"),t&&a.createElement(r.A,{align:"top",content:t,"data-testid":"".concat(c,"-info"),plain:!0},a.createElement(o.Icon,{color:"textLite",name:"information",size:"small"})))},c=e=>{let{"data-testid":t="input",description:n,isRequired:r,onChange:c,title:s,...m}=e;return a.createElement(o.Flex,{column:!0,"data-testid":t,flex:"grow",gap:1},a.createElement(i,{"data-testid":t,description:n,isRequired:r,title:s}),a.createElement(o.TextInput,(0,l.A)({"data-testid":"".concat(t,"-field"),size:"tiny",onChange:e=>{let{target:t}=e;return c(t.value)}},m)))};n(41393),n(81454);const s=e=>{var t;let{"data-testid":n="select",description:r,getDataGa:c,fields:s,id:m,isRequired:d,onChange:u,secrets:g,setSecrets:p,title:E,...h}=e;return a.createElement(o.Flex,{column:!0,"data-testid":n,flex:"grow",gap:1},a.createElement(i,{"data-testid":n,description:r,isRequired:d,title:E}),a.createElement(o.Select,(0,l.A)({"data-testid":"".concat(n,"-field"),menuPortalTarget:document.body,onChange:e=>u(e),styles:{size:"tiny"},menuPlacement:"auto"},h)),Object.values((null===s||void 0===s?void 0:s[null===(t=h.value)||void 0===t?void 0:t.value])||{}).map((e=>{let{getValue:t,id:o,onChange:r,...i}=e;return a.createElement(b,(0,l.A)({"data-ga":c("".concat(m,"-").concat(o,"-").concat(i.component)),"data-testid":"".concat(n,"-").concat(o),key:o,id:o,onChange:r({id:m,setSecrets:p,subsetId:o}),value:t({id:m,secrets:g,subsetId:o})},i))})))};n(14905),n(98992),n(8872),n(62953);var m=n(90179),d=n.n(m),u=n(97200),g=n.n(u),p=n(30960),E=n(63450);const h={default:()=>null,pairs:e=>{let{componentPairs:t,"data-testid":n="pairs",description:l,getDataGa:r,id:c,isRequired:s,placeholder:m,setSecrets:u,title:h}=e;const[b,v]=(0,a.useState)({});(0,a.useEffect)((()=>{const e=Object.values(b).reduce(((e,t)=>{let{key:n,value:a}=t;return n?{...e,[n]:a||""}:e}),{});u((t=>Object.keys(e).length?{...t,[c]:e}:d()(t,c)))}),[b]);return a.createElement(o.Flex,{column:!0,"data-testid":n,gap:1},a.createElement(o.Flex,{alignItems:"center",justifyContent:"between"},a.createElement(i,{"data-testid":n,description:l,isRequired:s,title:h}),t.map((e=>{const l=(0,p.$Q)(e);return a.createElement(E.ro,{"data-ga":r("".concat(c,"-").concat(l,"-add")),"data-testid":"".concat(n,"-").concat(l,"Add"),key:l,label:1===t.length?"Add":"Add ".concat(l),onClick:()=>(e=>v((t=>({...t,[g()("pair_")]:{key:"",components:e,value:""}}))))(e)})}))),Object.entries(b).map((e=>{let[t,{key:l,components:i,value:s}]=e;const[u,g]=i,E=(0,p.$Q)(i);return a.createElement(o.Flex,{alignItems:"end",key:"".concat(E,"-").concat(t),gap:2},a.createElement(f,{component:u,"data-ga":r("".concat(c,"-").concat(E,"-key")),"data-testid":"".concat(n,"-").concat(E,"Key"),onChange:e=>v((n=>({...n,[t]:{...n[t],key:e}}))),value:l,placeholder:m,title:"Key"}),a.createElement(f,{component:g,"data-ga":r("".concat(c,"-").concat(E,"-value")),"data-testid":"".concat(n,"-").concat(E,"Value"),onChange:e=>v((n=>({...n,[t]:{...n[t],value:e}}))),value:s,placeholder:m,title:"Value"}),a.createElement(o.Button,{flavour:"borderless",icon:"trashcan",margin:[0,0,1,0],neutral:!0,onClick:()=>{v((e=>d()(e,t)))}}))})))},input:c,select:s},b=e=>{let{component:t,...n}=e;const l=h[t]||h.default;return a.createElement(l,n)},f=b},63450:(e,t,n)=>{"use strict";n.d(t,{$m:()=>s,He:()=>o,Jg:()=>g,MU:()=>r,Oj:()=>m,W6:()=>c,fn:()=>d,id:()=>u,ro:()=>i});var a=n(8711),l=n(83199);const o=(0,a.default)(l.Flex).attrs({alignItems:"center",color:"text",gap:1,hoverColor:"text"}).withConfig({displayName:"styled__AnchorComponent",componentId:"sc-wc4x41-0"})(["&,&:hover{text-decoration:none;}"]),r=(0,a.default)(l.Icon).attrs({height:"130px",width:"130px"}).withConfig({displayName:"styled__BlurredIcon",componentId:"sc-wc4x41-1"})(["filter:blur(70px);position:absolute;left:0;top:0;opacity:0.5;"]),i=(0,a.default)(l.Button).attrs((e=>({height:"22px",icon:"plus",padding:[.5,2,.5,1],small:!0,width:"auto",...e}))).withConfig({displayName:"styled__IntegrationAction",componentId:"sc-wc4x41-2"})(["&& > span{font-weight:bold;margin-left:4px;}"]),c=(0,a.default)(l.TextSmall).withConfig({displayName:"styled__LearnMoreTest",componentId:"sc-wc4x41-3"})(["position:relative;"]),s=(0,a.default)(l.ModalContent).attrs({height:{base:150,max:150,min:45},overflow:{vertical:"auto"},width:{base:121,max:140,min:70}}).withConfig({displayName:"styled__ModalContent",componentId:"sc-wc4x41-4"})(["box-shadow:0 18px 28px rgb(9 30 66 / 15%),0 0 1px rgb(9 30 66 / 31%);"]),m=(0,a.default)(l.ModalCloseButton).attrs({color:"text",height:"16px",width:"16px"}).withConfig({displayName:"styled__ModalClose",componentId:"sc-wc4x41-5"})(["&:hover{fill:",";}"],(0,l.getColor)("selected")),d=(0,a.default)(l.Flex).attrs((e=>{let{hasBorder:t,...n}=e;return{...t?{border:{side:"bottom",color:"disabled"}}:{},column:!0,padding:[3,4],...n}})).withConfig({displayName:"styled__ModalSection",componentId:"sc-wc4x41-6"})([""]),u=(0,a.default)(l.Icon).attrs({name:"warning_triangle",height:"18px",width:"18px",color:["yellow","amber"]}).withConfig({displayName:"styled__WarningIcon",componentId:"sc-wc4x41-7"})(["position:absolute;top:0;right:0;z-index:1;"]),g=(0,a.default)(l.Icon).attrs({height:"12px",width:"12px",color:"white"}).withConfig({displayName:"styled__SystemIcon",componentId:"sc-wc4x41-8"})(["position:relative;top:2px;"])},99904:(e,t,n)=>{"use strict";n.d(t,{A:()=>c});n(62953);var a=n(96540),l=n(3914),o=n(25624);var r=n(87659);const i=e=>{const t=new Date(e||void 0).toDateString();return"Invalid Date"!==t?t:null},c=()=>{var e;const t=(0,l.ap)(),n=(0,a.useMemo)((()=>"".concat("dismissedBumpedWarningKey","_").concat(null===t||void 0===t?void 0:t.id)),[null===t||void 0===t?void 0:t.id]),[c,,,s]=(0,r.A)(!localStorage.getItem(n)),{trialEndsAtRaw:m}=(0,o.A)(),d=m&&"EarlybirdAndCommunitySunset"==(null===t||void 0===t||null===(e=t.metadata)||void 0===e?void 0:e.joinTrialCode),u=c&&m&&d,g=d?"EARLYB25":null,p=(0,a.useCallback)((()=>{localStorage.setItem(n,!0),s()}),[s,n]);return{isModalVisible:u,isEarlybirdAndCommunitySunset:d,coupon:g,trialEndsAt:i(m),onClose:p}}},34641:(e,t,n)=>{"use strict";n.d(t,{A:()=>E});n(9391),n(62953);var a=n(96540),l=n(63950),o=n.n(l),r=n(83199),i=n(19673),c=n(71835),s=n(92155),m=n(50876),d=n(63314),u=n(87659),g=n(97118);const p=(0,s.A)(r.Button),E=e=>{let{onConfirm:t,onDecline:n=o(),onCancellingEnd:l=o()}=e;const s=(0,i.M4)(),[E,h]=(0,c.A)(),{id:b}=(0,g.A)(),{sendLog:f,isReady:v}=(0,m.A)(),[x,,y,C]=(0,u.A)(),w=(0,a.useCallback)((()=>{y(),s({productId:b}).then((()=>{E({header:"Successfully canceled subscription",text:"You are now on Community plan"}),f({feature:"TrialOptOut",isSuccess:!0})})).catch((()=>{h({header:"Failed to cancel the subscription",text:"Remained on Business plan"}),f({feature:"TrialOptOut",isFailure:!0,error:"Failed to cancel the subscription"})})).finally((()=>{l(),C()}))}),[b,v]),A=(0,a.useCallback)((()=>{t?t():w()}),[t,w]);return a.createElement(r.Modal,{backdropProps:{backdropBlur:!0}},a.createElement(d.Ay,{feature:"TrialOptOut"},a.createElement(r.ModalContent,{width:{base:140}},a.createElement(r.ModalHeader,null,a.createElement(r.Flex,{gap:2,alignItems:"center"},a.createElement(r.H4,null,"Go to Community plan"))),a.createElement(r.ModalBody,null,x?a.createElement(r.Flex,{height:"100px"},a.createElement(r.TextBig,null,"Changing billing plan...")):a.createElement(r.Flex,{gap:2,column:!0},a.createElement(r.H3,null,"Are You Sure?"),a.createElement(r.TextBig,null,"It looks like you have chosen to opt-out of your free 30-day business trial. Are you sure you do not want to experience all the features Netdata has to offer?"),a.createElement(r.TextBig,null,"By opting out, you will switch to the community plan immediately."))),a.createElement(r.ModalFooter,null,a.createElement(r.Flex,{justifyContent:"end",gap:4,padding:[1,2]},a.createElement(p,{feature:"TrialOptOut",label:"Yes, I am sure!",flavour:"hollow",small:!0,onClick:A,disabled:!b||x,textTransform:""}),a.createElement(p,{feature:"TrialOptOut",label:"No, I want the trial!",small:!0,onClick:n,disabled:x,textTransform:""}))))))}},35454:(e,t,n)=>{"use strict";n.d(t,{$B:()=>r,TB:()=>i,W1:()=>l,ml:()=>o,ue:()=>a});const a={default:"successSemi",warning:"warningSemi",critical:"errorSemi"},l={default:{background:"successSemi",border:"success"},warning:{background:"warningSemi",border:"warning"},critical:{background:"errorSemi",border:"error"}},o=[30,15,3,2,1],r="dismissedTrialWelcome",i="dismissedTrialWarningDate"},93476:(e,t,n)=>{"use strict";n.d(t,{A:()=>g});var a=n(58168),l=n(96540),o=n(83199),r=n(63950),i=n.n(r),c=n(25624),s=n(99904),m=n(24864);const d={banner:{color:"main"},sidebar:{lineHeight:"1.6",color:"main"},freePlanUpgrade:{lineHeight:"1.6",color:"main"},billing:{color:"textLite"}},u=e=>{let{canUpgrade:t,onUpdateClick:n=i(),children:r,...c}=e;return t?l.createElement(o.Box,(0,a.A)({"data-testid":"upgrade-to-business-banner",onClick:n,as:o.Text,cursor:"pointer",textDecoration:"underline",color:"main"},c),r):null},g=e=>{let{flavour:t,couponRemainingDays:n,onUpdateClick:r=i()}=e;const{daysRemaining:g,canUpgrade:p,trialEndsAt:E}=(0,c.A)(),{isEarlybirdAndCommunitySunset:h}=(0,s.A)(),b=(0,l.useMemo)((()=>({isCoupon:n>0,isEarlybirdAndCommunitySunset:h,isBanner:"banner"==t,isSidebar:"sidebar"==t,isBilling:"billing"==t,isFreePlanUpgrade:"freePlanUpgrade"==t})),[t,n]);return l.createElement(o.Flex,{justifyContent:b.isBilling?"start":"center",alignItems:"center",width:"100%",gap:2},b.isCoupon?l.createElement(o.Flex,{column:!0},l.createElement(o.Text,(0,a.A)({},d[t],{fontSize:"10px",strong:!0}),"POST BLACK FRIDAY OFFER"),l.createElement(m.Te,(0,a.A)({},d[t],{fontSize:"38px",lineHeight:.8,strong:!0}),"50% off")):b.isEarlybirdAndCommunitySunset?l.createElement(o.Flex,{column:!0,gap:1,alignItems:"center"},l.createElement(o.Text,(0,a.A)({},d[t],{textAlign:"center",strong:!0},b.isBanner?{}:{fontSize:"10px"}),"Thank you for your support!"," ",b.isBanner?l.createElement(u,{canUpgrade:p,onUpdateClick:r},"Upgrade"):null),l.createElement(o.Text,(0,a.A)({},d[t],{color:"primary",fontSize:"22px",lineHeight:.8,strong:!0}),"25% Lifetime off")):b.isFreePlanUpgrade?l.createElement(o.Text,d[t],"Upgrade your plan for unlimited access and Business features."):l.createElement(o.Text,d[t],"You have ",l.createElement(o.Text,(0,a.A)({strong:!0},d[t]),"".concat(g," days"))," ","left to explore all the features of Netdata Business."," ",b.isBilling&&l.createElement(l.Fragment,null,"Trial ends at"," ",l.createElement(o.Text,(0,a.A)({strong:!0},d[t]),E),"."," "),b.isBanner?l.createElement(u,{canUpgrade:p,onUpdateClick:r},"Consider upgrading for unlimited access."):l.createElement(l.Fragment,null,"Consider upgrading for unlimited access.")))}},24864:(e,t,n)=>{"use strict";n.d(t,{PL:()=>i,Te:()=>c,bg:()=>r});var a=n(8711),l=n(83199),o=n(35454);const r=(0,a.default)(l.Flex).attrs({position:"relative"}).withConfig({displayName:"styled__TrialWarningSidebar",componentId:"sc-66x250-0"})(["background-color:",";border-width:1px;border-style:dashed;border-color:",";border-radius:2px;"],(e=>{var t;let{type:n}=e;return(0,l.getColor)(null===(t=o.W1[n])||void 0===t?void 0:t.background)}),(e=>{var t;let{type:n}=e;return(0,l.getColor)(null===(t=o.W1[n])||void 0===t?void 0:t.border)})),i=(0,a.default)(l.Button).withConfig({displayName:"styled__TrialUpgradeButton",componentId:"sc-66x250-1"})(["flex:auto;"]),c=(0,a.default)(l.Text).attrs({strong:!0,lineHeight:.8}).withConfig({displayName:"styled__PromoText",componentId:"sc-66x250-2"})(["background-color:",";background-image:linear-gradient( 43deg,"," 0%,"," 46%,"," 100% );-webkit-background-clip:text;-webkit-text-fill-color:transparent;"],(0,l.getColor)("primary"),(0,l.getColor)(["blue","aquamarine"]),(0,l.getColor)(["purple","mauve"]),(0,l.getColor)("primary"))},66732:(e,t,n)=>{"use strict";n.d(t,{A:()=>s});var a=n(96540),l=n(63950),o=n.n(l),r=n(83199),i=n(50876);const c={default:"Or you can opt to downgrade immediately",billing:"Or you can opt to downgrade immediately"},s=e=>{let{flavour:t="default",onOptOutClick:n=o(),...l}=e;const{sendLog:s,isReady:m}=(0,i.A)(),d=(0,a.useCallback)((()=>{n(),s({feature:"TrialOptOut",isStart:!0})}),[m]);return a.createElement(r.Text,l,"After the trial, you'll automatically switch to the free Community plan."," ",a.createElement(r.Box,{"data-testid":"upgrade-to-business-banner",onClick:d,as:r.Text,cursor:"pointer",textDecoration:"underline",color:"primary"},c[t]),".")}},25624:(e,t,n)=>{"use strict";n.d(t,{A:()=>h});n(62953);var a=n(96540),l=n(46741),o=n(5668),r=n(22292),i=(n(8159),n(98992),n(37550),n(16074)),c=n(6593);var s=n(19673),m=n(50503),d=n(35454),u=n(42728),g=n(17170),p=n(93155);const E=e=>{const t=new Date(e||void 0).toLocaleDateString();return"Invalid Date"!==t?t:null},h=()=>{const e=(0,g.A)(),{loaded:t,value:n,refresh:h}=(0,s.JN)(),{isFailure:b}=(0,m.A)(),{slug:f,trialEndsAt:v,paymentProvider:x}=n||{},y=(0,a.useMemo)((()=>(e=>{if(!e)return null;const t=new Date(e)-new Date;return Math.ceil(t/864e5)})(v)),[v]),C=t&&!!v&&!e,w=(0,l.JT)("billing:Manage"),[A]=(0,o.ng)("trialModalDismissed"),T=p.bO&&w&&(C||b)&&!localStorage.getItem(d.$B)&&!A,[S,I]=(0,a.useState)(localStorage.getItem(d.TB)),k=(0,a.useMemo)((()=>p.bO&&C),[C]),[F,P]=(0,a.useState)(),M=(D=y)>15?"default":D>5?"warning":"critical";var D;const L=!(0,r.uW)("isAnonymous")&&w;return(0,a.useEffect)((()=>{const e=((e,t,n,a)=>{if(t<0)return!1;const l=new Date(e||void 0);if(!(0,i.f)(l))return!1;const o=new Date(a||void 0);if(!(0,i.f)(o))return!0;const{days:r}=(0,c.F)({start:o,end:l}),s=r;return n.some((e=>e>=t&&e{const e=(new Date).toISOString();I(e),localStorage.setItem(d.TB,e)},daysRemaining:y,trialEndsAt:E(v),trialEndsAtRaw:v,type:M,canUpgrade:L,onTrial:C,refreshPlan:h,planIsFreeOrEarlyBird:(0,u.Kj)(f),paymentProvider:x}}},97118:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var a=n(19673),l=n(42728);const o=()=>{var e;const{value:t}=(0,a.lU)();if(null===t||void 0===t||!t.free)return{};const n=Object.keys(t.free).sort(l.M7)[0];return{id:t.free[n]?null===(e=t.free[n][0])||void 0===e?void 0:e.id:null,version:n}}},52577:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>Zl});var a=n(96540),l=n(47767),o=n(3914),r=n(58168),i=(n(62953),n(39225)),c=n(45588),s=n(8711),m=n(83199),d=n(57375),u=n(15327),g=n(74618),p=n(79412),E=(n(41393),n(81454),n(46741)),h=n(14994),b=n(87659),f=n(46440),v=n(33195);const x=[{id:"name",accessor:"name",header:"Name",cell:e=>{let{getValue:t,row:{original:{isMember:n}}}=e;const l=t(),o=(0,E.JT)("room:ReadAll");return a.createElement(m.Flex,{alignItems:"center"},o&&n&&a.createElement(f.A,{margin:[0,1,0,0]}),a.createElement(m.Text,{margin:o&&!n&&[0,0,0,4]},l))}},{id:"nodeCount",accessor:"nodeCount",header:"Nodes",cell:e=>{let{getValue:t}=e;return"".concat(t()||0)}},{id:"memberCount",accessor:"memberCount",header:()=>"Users",cell:e=>{let{getValue:t}=e;return"".concat(t()||0)}},{id:"silencing",accessorKey:"silencingState",header:"Silencing",cell:e=>{let{getValue:t}=e;return a.createElement(v.A,{flavour:"room",silencing:t()})}}];n(17333),n(9920),n(98992),n(54520),n(3949);var y=n(47444),C=n(24198),w=n(63129),A=n(69765),T=n(54308),S=n(56820);const I=e=>(0,y.Zs)((t=>{let{snapshot:n,set:a,reset:l}=t;return async function(t){let{onSuccess:o,onFail:r}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const i=await n.getPromise((0,h.$e)({id:e,key:"ids"})),c=t.map((e=>{let{id:t}=e;return t})),s=i.filter((e=>!c.includes(e)));a((0,h.$e)({id:e,key:"ids"}),s);const m=await Promise.all(s.map((e=>n.getPromise((0,A.LS)({id:e})))));try{if(await(async(e,t)=>Promise.all(t.map((t=>(0,w.HN)(e,t)))))(e,c),a(T.yz,(0,S.Pb)(m)),c.forEach((t=>a((0,h.Oy)(e),t))),t.forEach((t=>{let{id:n,slug:a}=t;l((0,A.LS)({id:n})),l((0,T.x4)([e,a]))})),o&&o(),t.length>1)(0,C.r0)("Rooms were successfully deleted from Space!");else{const[e]=t;(0,C.r0)("Room ".concat(e.name," was successfully deleted!"))}}catch(d){a((0,h.$e)({id:e,key:"ids"}),i),r&&r(d)}}}),[e]);var k=n(74564);const F=e=>{let{spaceId:t,spaceName:n,startIsCreating:r}=e;const i=I(t),c=(0,E.JT)("space:Delete"),s=(0,E.JT)("room:Read"),m=(0,E.JT)("room:Create"),d=(0,E.JT)("space:CreatePersonalSilencingRule"),u=(e,t)=>{if(!e)return;const n=Array.isArray(e)?e.map((e=>{let{id:t,name:n}=e;return{id:t,name:n}})):[{id:e.id,name:e.name}];i(n,{onSuccess:t.resetRowSelection})},g=(e,t)=>{const n=t.length;if(!n)return"";return 1===n?(0,k.kI)(t[0].name):(0,k.kI)(n)},p=(e,t)=>{const l=t.length;if(!l)return"";const o={...1===l?{name:t[0].name}:{roomsLength:l},spaceName:n};return a.createElement(k.rj,o)},{pathname:h}=(0,l.zy)(),b=(0,o.bq)(),f=(0,l.Zp)(),v=(0,a.useCallback)((e=>f("".concat(h,"/").concat(e))),[h,f]),x=(0,a.useMemo)((()=>({goto:{handleAction:e=>{let{slug:t}=e;return v(t)},icon:"chevron_right",tooltipText:"Room settings",isDisabled:()=>!s},addRule:{handleAction:e=>{const t={roomId:e.id};f("/spaces/".concat(b,"/settings/notifications#notificationsActiveTab=1&silencingRulePrefill=").concat(JSON.stringify(t)))},icon:"alarm_off",flavour:"hollow",neutral:!1,isDisabled:()=>!d,tooltipText:"Add new silencing rule",confirmation:!1},delete:{confirmLabel:"Yes, delete",confirmationMessage:e=>a.createElement(k.rj,{name:e.name,spaceName:n}),confirmationTitle:e=>(0,k.TU)(e.name),dataGa:e=>{let{slug:t}=e;return"manage-rooms::click-delete::".concat(t)},declineLabel:"Cancel",handleAction:u,isDisabled:e=>{let{untouchable:t}=e;return t||!c},tooltipText:"Delete room"}})),[c,v,d,s]);return{bulkActions:(0,a.useMemo)((()=>({addEntry:{dataGa:"manage-rooms::click::add-room",handleAction:r,isDisabled:!m,tooltipText:"Create room"},delete:{confirmationMessage:p,confirmationTitle:g,confirmLabel:"Yes, delete",dataGa:"manage-rooms::delete-bulk",declineLabel:"Cancel",handleAction:u,isDisabled:!c,tooltipText:"Delete rooms"}})),[m,c,p,g]),rowActions:x,onClickRow:v}},P=()=>{const e=(0,h.DL)(),t=(0,E.JT)("room:Delete"),n=e.map((e=>({...e,disabled:e.untouchable||!t}))),[l,r]=(0,a.useState)(""),i=(0,o.ap)("name"),c=(0,o.ap)("id"),[s,,m,d]=(0,b.A)(),[,u]=(0,a.useState)([]),{bulkActions:g,rowActions:p}=F({startIsCreating:m,spaceId:c,spaceName:i});return{roomList:n,globalFilter:l,spaceName:i,spaceId:c,isCreating:s,rowActions:p,bulkActions:g,columns:x,startIsCreating:m,stopIsCreating:d,setGlobalFilter:r,onRowSelected:u,dataGa:"manage-rooms"}};var M=n(63314);const D=e=>{const{roomList:t,spaceName:n,isCreating:l,columns:o,setGlobalFilter:i,stopIsCreating:c,onRowSelected:s,rowActions:d,bulkActions:u,dataGa:g}=P();return a.createElement(M.Ay,{tab:"Rooms"},a.createElement(m.Flex,(0,r.A)({column:!0,height:"100%",overflow:"hidden",gap:3},e),a.createElement(m.H3,null,"Rooms of ",n),a.createElement(m.Table,{onSearch:i,onRowSelected:s,enableSorting:!0,dataColumns:o,enableSelection:!0,data:t,bulkActions:u,rowActions:d,dataGa:g,testPrefixCallback:e=>e.name})),l&&a.createElement(p.n,{onClose:c}))};var L=n(67031),N=n(87860),R=n(29662),B=n(84280),_=n(86663),U=n(67990),O=n(47762);const V=()=>{const e=(0,U.CK)();return{nodes:(0,O.Gt)(e).map((e=>({...e,disabled:e.hasAccessibleData})))||[],nodeIds:e}};var H=n(60383);const W=(0,a.memo)((e=>{let{roomUntouchable:t,...n}=e;const{nodes:l,nodeIds:o}=V({roomUntouchable:t});return a.createElement(M.Ay,{tab:"Room::Nodes"},a.createElement(m.Flex,(0,r.A)({column:!0,height:"100%",overflow:"hidden",gap:3},n),a.createElement(m.H3,null,"Nodes in this room (",o.length,") "),a.createElement(H.A,{showClaimNodeOnEmptySpace:!0,enableSelection:!0,customNodes:l,roomUntouchable:t})))}));var Y=n(45765),q=n(46902),j=n(83488),z=n.n(j),J=n(66245),G=n.n(J),$=n(55463),K=n(70716),Z=n(96083);const Q=[{id:"user",accessorKey:"user",header:"Name",cell:e=>{let{cell:t}=e;const{avatarURL:n,name:l}=t.row.original;return a.createElement(m.Flex,{alignItems:"center",gap:2},a.createElement(Z.A,{src:n,title:l}),a.createElement(m.TextSmall,null,l))}},{id:"email",accessorKey:"email",header:"Email",cell:e=>{let{getValue:t}=e;return a.createElement(m.TextSmall,null,t())}}],X=[{id:"user",desc:!1}],ee=e=>{let{setSelected:t}=e;const n=(0,$.Gi)(),l=(0,q.lb)(),o=(0,a.useMemo)((()=>G()(n,l)),[n,l]),r=(0,a.useCallback)((e=>{t(e.map((e=>e.id)))}),[]),i=(0,K.Uv)(o);return a.createElement(m.Flex,{alignItems:"start",padding:[1],overflow:{horizontal:"hidden",vertical:"auto"}},a.createElement(m.Table,{dataColumns:Q,data:i,autoResetSelectedRows:!0,sortableBy:X,onSearch:z(),onRowSelected:r,enableSelection:!0,enableSorting:!0}))};var te=n(92155);const ne=(0,te.A)(m.Button),ae=e=>{let{onAdd:t,onClose:n,selected:l,error:o,name:r}=e;return a.createElement(m.Flex,{alignItems:"center",justifyContent:"between"},o?a.createElement(m.Text,{color:"error"},"Select at least 1 user to add to ",r):a.createElement("div",null),a.createElement(m.Flex,{gap:2},a.createElement(m.Button,{neutral:!0,flavour:"hollow",onClick:n,label:"Cancel"}),a.createElement(ne,{disabled:!l.length,label:"Add ".concat(l.length," members"),onClick:t})))};var le=n(27287),oe=n(84707),re=n(49032),ie=n(77181),ce=n(13871),se=n(78217),me=n(71835),de=n(69756),ue=n(4659),ge=n(36850);const pe=e=>{let{email:t}=e;return(0,re.B9)(t)},Ee={header:"Invitations",text:"Invitations successfully sent!"},he=e=>{const{id:t,slug:n}=(0,o.ap)(),l=(0,A.ID)(),i=(0,A.wz)(l,"name"),[c,s]=(0,a.useState)([]),[,,d,u]=(0,ie.g)(t),[,g]=(0,me.A)(),[p,h]=(0,a.useState)(),[b,f]=(0,a.useState)(),v=e=>{const{header:t,text:n}=e||Ee,a=(0,ce.UI)({header:t,text:n,success:!0});se.A.success(a,{context:"manageInvitations"}),s([]),h(Math.random())},x=(0,a.useCallback)((()=>{const e=c.filter(pe).map((e=>({email:e.email,name:e.name,role:b,roomIDs:[l]}))),t="".concat(window.location.origin,"/spaces/").concat(n,"/join-space");d(e,t,{onSuccess:v,onError:g})}),[c,l,b]),y=(0,E._s)();return a.createElement(m.Flex,(0,r.A)({column:!0},e),a.createElement(m.H4,null,"Send invitations to ",i," room"),a.createElement(le.BZ,null,"TIP: You can send more invitations at once, separate each with a comma."),a.createElement(oe.y,{key:p,invitations:c,setInvitations:s}),a.createElement(m.H5,{margin:[4,0,0]},"Role"),a.createElement(le.BZ,null,"Choose a role for invited user."," ",a.createElement(ue.A,{href:ge.S,target:"_blank",rel:"noopener noreferrer",Component:m.TextSmall},"Learn more")),a.createElement(de.A,{availableRoles:y,dataGA:"invite-to-room",dataTestId:"invite-selectRole",onChange:e=>{f(e.target.value)},value:b}),a.createElement(m.Box,{alignSelf:"end",margin:[4,0,0]},a.createElement(m.Button,{label:"Send",onClick:x,disabled:0===c.length||!b,flavour:"hollow",isLoading:u})))},be=e=>{let{onClose:t,room:n}=e;const[l,o]=(0,a.useState)([]),[r,i]=(0,a.useState)(!1),c=(0,q.n)(n.spaceId,n.id),s=(0,a.useCallback)((()=>{if(!l.length)return i(!0);i(!1),c(l),t()}),[l,c]);return a.createElement(u.GO,{onClose:t},a.createElement(g.z,{onClose:t,isSubmodal:!0,title:a.createElement(a.Fragment,null,"Manage room",a.createElement(m.TextBig,{color:"textLite"},"\xa0/ Add Users"))}),a.createElement(Y.U,null,"Add users to room\xa0",n.name),a.createElement(u.Yv,null,a.createElement(he,{margin:[0,0,4]}),a.createElement(m.Flex,{column:!0,flex:!0,overflow:"hidden",padding:[0,0,2,0]},a.createElement(ee,{setSelected:o})),a.createElement(ae,{onClose:t,onAdd:s,selected:l,error:r,name:n.name})))};var fe=n(97245),ve=n(22292);const xe=e=>"Remove ".concat(e),ye=e=>{let{name:t,usersLength:n}=e;return n?1===n&&t?xe(t):"Remove ".concat(a=n," ").concat(1===a?"user":"users"):"";var a},Ce=e=>{let{name:t,roomName:n}=e;return a.createElement(a.Fragment,null,"You are about to remove ",a.createElement("strong",null,t)," from room ",a.createElement("strong",null,n),".",a.createElement("br",null),"Are you sure you want to continue?")},we=e=>{let{roomName:t,usersLength:n}=e;const l="".concat(n,1===n?" user":" users");return a.createElement(a.Fragment,null,"You are about to remove ",a.createElement("strong",null,l)," from room ",a.createElement("strong",null,t),".",a.createElement("br",null),"Are you sure you want to continue?")},Ae=e=>{let{name:t,roomName:n,usersLength:l}=e;return l?1===l&&t?a.createElement(Ce,{name:t,roomName:n}):a.createElement(we,{roomName:n,usersLength:l}):""};var Te=n(50876);const Se=e=>{let{spaceId:t,roomId:n,startIsInviting:l,canRemoveUser:o,untouchable:r}=e;const i=(0,q.zC)(t,n),c=(0,A.XA)("name"),{sendButtonClickedLog:s,isReady:m}=(0,Te.A)(),d=e=>{if(!e)return;const t=Array.isArray(e)?e.map((e=>{let{user:t}=e;return t.id})):[null===e||void 0===e?void 0:e.user.id];i(t)},u=(0,a.useCallback)((()=>{l(),m&&s({label:"Invite users"},!0)}),[m,s]);return{rowActions:(0,a.useMemo)((()=>({remove:{confirmLabel:"Yes, remove",confirmationMessage:e=>a.createElement(Ce,{name:e.name,roomName:c}),confirmationTitle:e=>xe(e.name),declineLabel:"Cancel",disabledTooltipText:e=>r?"It's not allowed to remove users from this room. Try removing them from the space.":e.isSelf?"You cannot remove yourself. Click the leave button on the room tab.":o?"Remove is disabled":"You don't have the required permissions to remove users from this room",handleAction:d,isDisabled:e=>e.disabled,tooltipText:"Remove user from room"}})),[xe,d,Ce,c]),bulkActions:(0,a.useMemo)((()=>({addEntry:{handleAction:u,tooltipText:"Invite users"},remove:{confirmLabel:"Yes, remove",confirmationMessage:(e,t)=>a.createElement(Ae,{name:t[0].name,roomName:c,usersLength:t.length}),confirmationTitle:(e,t)=>ye({name:t[0].name,usersLength:t.length}),declineLabel:"Cancel",disabledTooltipText:e=>r?"It's not allowed to remove users from this room. Try removing them from the space.":e.length?o?"Remove is disabled":"You don't have the required permissions to remove users from this room":"You haven't selected any users",handleAction:d,tooltipText:"Remove users from room"}})),[d,Ae,c,l,ye])}},Ie=e=>{let{untouchable:t}=e;const n=(0,a.useMemo)((()=>[{id:"name",accessor:"name",header:"Name",cell:e=>{let{getValue:t}=e;return"".concat(t())}},{id:"user",accessor:"user",header:"Users",cell:e=>{let{getValue:t}=e;const{name:n,avatarURL:l,email:o}=t();return a.createElement(m.Flex,{alignItems:"center",gap:2},a.createElement(Z.A,{src:l,title:"".concat(n," - ").concat(o||"email not set")}),a.createElement(m.TextSmall,null,n))}},{id:"email",accessor:"email",header:"Email",width:300,align:"center",cell:e=>{let{getValue:t}=e;return a.createElement(m.TextSmall,null,t())}}]),[]),l=(0,q.di)(),r=(0,ve.uW)("id"),i=(0,E.JT)("space:RemoveUser"),[c,s]=(0,a.useState)([]),[d,u]=(0,a.useState)(""),[g,,p,h]=(0,b.A)(),f=(0,o.ap)("id"),v=(0,A.ID)(),{rowActions:x,bulkActions:y}=Se({selectedRows:c,spaceId:f,roomId:v,startIsInviting:p,canRemoveUser:i,untouchable:t});return{columns:n,data:(0,a.useMemo)((()=>(0,fe.L)({data:l,currentUserId:r,canRemoveUser:i,untouchable:t})),[l]),columnVisibility:{name:!1},members:l,rowActions:x,bulkActions:y,isInviting:g,globalFilter:d,stopIsInviting:h,onRowSelected:s,setGlobalFilter:u,canRemoveUser:i}},ke=e=>{let{room:t,...n}=e;const{untouchable:l}=t,{columns:o,data:i,columnVisibility:c,members:s,rowActions:d,bulkActions:u,isInviting:g,stopIsInviting:p,onRowSelected:E,setGlobalFilter:h,canRemoveUser:b}=Ie({untouchable:l});return a.createElement(M.Ay,{tab:"Room::User"},a.createElement(m.Flex,(0,r.A)({column:!0,height:"100%",overflow:"hidden",gap:3},n),a.createElement(m.H3,null,"Users in this room (",s.length,")"),a.createElement(m.Table,{enableSelection:!0,enableSorting:!0,columnVisibility:c,data:i,dataColumns:o,rowActions:d,bulkActions:u,onRowSelected:E,onSearch:h,testPrefixCallback:e=>e.name}),g&&a.createElement(be,{onClose:p,room:t})))},Fe={room:0,nodes:1,users:2},Pe=(0,s.default)(m.Box).attrs({height:"100%",padding:[4,0],flex:"1",overflow:"hidden"}).withConfig({displayName:"manageRoomModal__TabContent",componentId:"sc-lrxs0y-0"})([""]),Me=()=>{const e=(0,l.Zp)(),t=(0,o.vt)(),{roomSlug:n,spaceSlug:r,settingsTab:i}=(0,l.g)(),s=(0,A.J_)(t,n);(0,N.A)({spaceId:t,id:s,polling:!1}),(0,R.A)(t,s);const d=(0,E.JT)("room:ReadUsers"),u=(0,A.wz)(s),[g,p]=(0,a.useState)(u.name),{search:h}=(0,l.zy)(),{tab:b="room"}=_.parse(h),f=Fe[b],[v,x]=(0,a.useState)(f),[y,C,w]=(0,m.useInputValue)({maxChars:255,value:u.description}),T=(0,a.useCallback)((()=>{e((0,c.tW)(B.bq,{spaceSlug:r,settingsTab:i}))}),[r,i]),S=(0,A.a8)(u.id,{shouldPersist:!0,onSuccess:T}),I=(0,a.useCallback)((()=>S({name:g,description:y})),[g,y,S]);return a.createElement(m.Flex,{column:!0,justifyContent:"between",overflow:"hidden","data-testid":"manageRoom",flex:"1",height:"100%"},a.createElement(m.Tabs,{"data-testid":"manageRoom-tabs",selected:v,onChange:x,TabContent:Pe,height:"100%",position:"relative",overflow:"hidden",width:"100%"},a.createElement(m.Tab,{"data-testid":"manageRoom-roomTab",label:a.createElement(m.H5,null,"Room")},a.createElement(L.U,{roomName:g,setRoomName:p,roomDescription:y,setRoomDescription:C,charsDescIndicator:w,"data-testid":"manageRoom-roomTabContent",id:u.id,navigateToParent:T,onSaveClick:I})),a.createElement(m.Tab,{"data-testid":"manageRoom-nodesTab",label:a.createElement(m.H5,null,"Nodes")},a.createElement(W,{roomUntouchable:u.untouchable,"data-testid":"manageRoom-nodesTabContent"})),d&&a.createElement(m.Tab,{"data-testid":"manageRoom-usersTab",label:a.createElement(m.H5,null,"Users")},a.createElement(ke,{"data-testid":"manageRoom-usersTabContent",room:u}))),v===Fe.room&&a.createElement(m.Flex,{justifyContent:"end"}))};var De=n(97054);const Le=e=>{let{children:t}=e;return(0,De.TP)(),t},Ne=e=>{let{children:t}=e;return(0,A.XA)().loaded?t:null},Re=()=>a.createElement(l.BV,null,a.createElement(l.qh,{path:"/",element:a.createElement(D,null)}),a.createElement(l.qh,{path:":roomSlug",element:a.createElement(Le,null,a.createElement(Ne,null,a.createElement(Me,null)))}));var Be=n(6323),_e=n(80158),Ue=n(29217);const Oe=[{id:"name",accessor:"name",header:"Name",cell:e=>{let{getValue:t}=e;return"".concat(t())}},{id:"user",accessor:"user",width:300,header:"Users",cell:e=>{let{getValue:t}=e;const{name:n,avatarURL:l,email:o}=t();return a.createElement(m.Flex,{alignItems:"center",gap:2},a.createElement(Z.A,{src:l,title:"".concat(n," - ").concat(o||"email not set")}),a.createElement(m.TextSmall,null,n))}},{id:"email",accessor:"email",header:"Email",width:300,align:"center",cell:e=>{let{getValue:t}=e;return a.createElement(m.TextSmall,null,t())}},{id:"type",accessor:"type",header:"Role",width:100,align:"center",cell:e=>{let{getValue:t,row:{original:{user:{deactivated:n}}}}=e;return a.createElement(m.Flex,{alignItems:"center",gap:1},a.createElement(m.TextSmall,{strong:!0},(0,_e.Zr)(t())),n&&a.createElement(Ue.A,{content:"This user's role doesn't have permission to access any information on the Space. Please review user's role or space's plan."},a.createElement(m.Icon,{name:"warning_triangle",height:"18px",width:"18px",color:["yellow","amber"]})))}}];n(14905),n(8872);var Ve=n(54702),He=n(12800),We=n(5169),Ye=n(66294);const qe=e=>{const t=(0,We.t)();return(0,y.Zs)((n=>{let{snapshot:a,set:l}=n;return async function(n){let{onSuccess:o,onError:r}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const i=await a.getPromise((0,$.kd)({id:e,key:"ids"})),c=i.filter((e=>!e.includes(n)));l((0,$.kd)({id:e,key:"ids"}),c);try{await(0,Ve.XY)(e,n),(async e=>{let{cacheKeyPrefix:t,memberIds:n,spaceId:a}=e;const l="".concat(t).concat((0,Ye.$)(a));await(0,He.y)({key:l,handleResults:e=>e.results.filter((e=>!n.includes(e.id)))})})({cacheKeyPrefix:t,spaceId:e,memberIds:n}),o&&o()}catch(s){l((0,$.kd)({id:e,key:"ids"}),i),r&&r()}}}),[e])},je=e=>(0,y.Zs)((t=>{let{snapshot:n,set:a}=t;return async function(t,l){let{onSuccess:o,onError:r}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const i=await n.getPromise((0,K.Z6)(t)),c=await Promise.all(t.map((async t=>({mId:t,role:await n.getPromise((0,$.K2)({id:t,spaceId:e}))})))),{role:s,...m}=l;t.forEach((e=>{a((0,K.m)({id:e}),(e=>({...e,...m})))})),s&&t.forEach((t=>{a((0,$.K2)({id:t,spaceId:e}),s)}));try{await(async(e,t,n)=>Promise.all(t.map((async t=>await(0,Ve.w5)(e,t,n)))))(e,t,l),o&&o()}catch(d){i.forEach((e=>{let{id:t,...n}=e;return a((0,K.m)({id:t}),{id:t,...n})})),s&&c.forEach((t=>{let{mId:n,role:l}=t;return a((0,$.K2)({id:n,spaceId:e}),l)})),r&&r()}}}),[e]),ze="change-user-role",Je="changeUserRole",Ge=e=>{let{handleAction:t,ids:n,onClose:l}=e;const o=(0,$.$D)(n),r=1===o.length?o[0]:null,i=(0,E._s)(),[c,s]=(0,a.useState)(r);return a.createElement(m.ConfirmationDialog,{confirmLabel:"Save","data-ga":ze,"data-testid":Je,handleConfirm:()=>{t(c),l()},handleDecline:l,isConfirmDisabled:!c,isConfirmPositive:!0,message:a.createElement(m.Flex,{gap:2,column:!0},a.createElement(m.TextSmall,null,"Learn more about Netdata role-based access model on"," ",a.createElement(ue.A,{href:ge.S,target:"_blank",rel:"noopener noreferrer",Component:m.TextSmall},"our documentation")),a.createElement(de.A,{availableRoles:i,dataGA:ze,dataTestId:Je,onChange:e=>{s(e.target.value)},value:c})),title:"Change roles"})};var $e=n(29848);const Ke=e=>{let{spaceId:t,startIsInviting:n}=e;const l=qe(t),o=je(t),i=(0,E.JT)("space:InviteUser"),c=(0,$e.ly)(),s=(0,a.useCallback)((e=>{let{role:t,members:n}=e;o(n,{role:t},{onSuccess:c})}),[]),m=e=>t=>{if(!e)return;const n=Array.isArray(e)?e.map((e=>{let{user:t}=e;return t.id})):[null===e||void 0===e?void 0:e.user.id];s({role:t,members:n})},d=(e,t)=>{if(!e)return;const n=Array.isArray(e)?e.map((e=>{let{user:t}=e;return t.id})):[null===e||void 0===e?void 0:e.user.id];l(n,{onSuccess:t.resetRowSelection})},u=(0,a.useCallback)((e=>a.createElement(a.Fragment,null,"You are about to delete ",a.createElement("strong",null,e.name),".",a.createElement("br",null),"Are you sure you want to continue?")),[]),g=(0,a.useCallback)(((e,t)=>{const n=t.length;return n?1===n?"Delete User":"Delete Users":""}),[]),p=(0,a.useCallback)(((e,t)=>{const n=t.length;return n?a.createElement(a.Fragment,null,"You are about to delete"," ",a.createElement("strong",null,1===n?t[0].name:"".concat(t.length," users")),".",a.createElement("br",null),"Are you sure you want to continue?"):""}),[]);return{rowActions:(0,a.useMemo)((()=>({userSettings:{CustomUIAction:e=>{let{data:t,...n}=e;return a.createElement(Ge,(0,r.A)({ids:[t.user.id]},n))},handleAction:m,tooltipText:"Change Role",isDisabled:e=>e.disabled,disabledTooltipText:e=>e.isSelf?"You cannot change your role":"You don't have the required permissions to change roles for users"},delete:{handleAction:d,confirmationTitle:"Delete User",confirmationMessage:u,isDisabled:e=>e.disabled,disabledTooltipText:e=>e.isSelf?"You cannot delete yourself. Try the space info tab, to leave space.":"You don't have the required permissions to remove users from space"}})),[]),bulkActions:(0,a.useMemo)((()=>({addEntry:{handleAction:n,tooltipText:"Invite user",isDisabled:()=>!i,disabledTooltipText:"You don't have the required permissions to invite new users"},userSettings:{CustomUIAction:e=>{let{data:t,...n}=e;return a.createElement(Ge,(0,r.A)({ids:t.map((e=>{let{user:t}=e;return t.id}))},n))},handleAction:m,tooltipText:"Change Roles",disabledTooltipText:e=>null!==e&&void 0!==e&&e.length?"You don't have the required permissions to change roles for users":"You haven't selected any users"},delete:{handleAction:d,confirmationTitle:g,confirmationMessage:p,disabledTooltipText:e=>null!==e&&void 0!==e&&e.length?"You don't have the required permissions to remove users from space":"You haven't selected any users"}})),[i,g,p])}},Ze=()=>{const e=(0,o.vt)(),t=(0,o.ns)(e,"name"),n=(0,$.bj)(),l=(0,ve.uW)("id"),[r,,i,c]=(0,b.A)(),[s,m]=(0,a.useState)(""),[d,u]=(0,a.useState)([]),{rowActions:g,bulkActions:p}=Ke({selectedRows:d,spaceId:e,startIsInviting:i}),h=(0,E.JT)("space:RemoveUser"),f=(0,E.JT)("user:ChangeRoles"),v=(0,E.Ge)(),x=h||f,y=(0,a.useMemo)((()=>(e=>{let{fromRolePermissions:t,userList:n,canModifyUser:a,currentUserId:l}=e;return n.reduce(((e,n)=>{const o=l===(null===n||void 0===n?void 0:n.id),r=t.includes(n.role);return e.push({name:n.name,email:n.email,user:{avatarURL:n.avatarURL,deactivated:n.deactivated,name:n.name,email:n.email,id:n.id},type:n.role,disabled:!r||!a||o,isSelf:o,canModifyUser:a,canSetRole:r}),e}),[])})({userList:n,currentUserId:l,canModifyUser:x,fromRolePermissions:v})),[n]);return{columns:Oe,spaceName:t,members:n,currentUserId:l,data:y,isInviting:r,globalFilter:s,rowActions:g,bulkActions:p,columnVisibility:{name:!1},onRowSelected:u,setGlobalFilter:m,startIsInviting:i,stopIsInviting:c,canModifyUser:x}},Qe=e=>{const{columns:t,spaceName:n,data:l,isInviting:o,rowActions:i,bulkActions:c,columnVisibility:s,stopIsInviting:d,setGlobalFilter:u,onRowSelected:g}=Ze();return a.createElement(M.Ay,{tab:"Users"},a.createElement(m.Flex,(0,r.A)({column:!0,height:"100%",overflow:"hidden"},e),a.createElement(m.H3,null,"Members of ",n),a.createElement(m.TextSmall,{margin:[1,0,3]},"Learn more about Netdata role-based access model on"," ",a.createElement(ue.A,{href:ge.S,target:"_blank",rel:"noopener noreferrer",Component:m.TextSmall},"our documentation")),a.createElement(m.Table,{onSearch:u,data:l,dataColumns:t,enableSorting:!0,enableSelection:!0,onRowSelected:g,bulkActions:c,rowActions:i,columnVisibility:s,testPrefixCallback:e=>e.name})),o&&a.createElement(Be.d,{onClose:d,isSubmodal:!0}))};var Xe=n(74530),et=n(47193);const tt=(0,a.memo)((e=>{const t=(0,o.vt)(),n=(0,o.ap)("name"),{nodes:l}=(0,et.A)();(0,Xe.A)(t);const i="Connect Nodes to ".concat(n);return a.createElement(M.Ay,{tab:"Nodes"},a.createElement(m.Flex,(0,r.A)({column:!0,"data-testid":"manageClaimedNodes",height:"100%",overflow:"hidden",gap:3},e),a.createElement(m.H3,{"data-testid":"manageClaimedNodes-header"},i),a.createElement(H.A,{showClaimNodeOnEmptySpace:!0,showClaimModalWithRoomSelection:!0,enableSelection:!0,customNodes:l,isSpace:!0})))})),nt=tt;var at=n(54961),lt=n(26770),ot=n(71856);const rt=e=>{let{name:t,spaceName:n}=e;return a.createElement(a.Fragment,null,"You are about to delete ",a.createElement("strong",null,t)," channel from ",a.createElement("strong",null,n)," space.",a.createElement("br",null),"This cannot be undone. Are you sure you want to continue?")};var it=n(30960),ct=(n(8159),n(37550),n(63450));const st=e=>{let{integration:t={},testId:n,...l}=e;const o=l.checked?"disable":"enable";return a.createElement(m.Toggle,(0,r.A)({colored:!0,"data-testid":"".concat(n,"-toggle"),"data-ga":"manage-channels::toggle-".concat(t.slug,"-").concat(o,"::notifications-tab")},l))},mt=e=>{let{testId:t,tooltipContent:n="plan",...l}=e;return l.disabled?a.createElement(Ue.A,{align:"top",content:ot.r7[n],"data-testid":"".concat(t,"-warning"),plain:!0},a.createElement(m.Flex,{padding:[2,2,0,0],position:"relative"},a.createElement(ct.id,{"data-testid":"".concat(t,"-warningIcon")}),a.createElement(st,(0,r.A)({testId:t},l)))):a.createElement(st,(0,r.A)({testId:t},l))},dt=e=>{let{"data-testid":t="cellName",enabled:n,id:l,integration:o,isAvailable:i,name:c,spaceId:s,tooltipContent:d,...u}=e;const g=(0,lt.t5)(s,"channels"),p=(0,lt.vq)(s,"channels");return a.createElement(m.Flex,(0,r.A)({alignItems:"center","data-testid":t,gap:4},u),a.createElement(mt,{checked:n,disabled:!i,integration:o,onChange:e=>{let{target:t}=e;const n=t.checked;p(g.map((e=>e.id===l?{...e,enabled:n}:e))),(0,at.Jq)(s,l,n)},testId:t,tooltipContent:d}),a.createElement(m.Text,{"data-testid":"".concat(t,"-label")},c))},ut=e=>{var t;let{"data-testid":n="cellService",integration:l={},kindLabel:o,...i}=e;const c=ot.a$[l.slug]||{};return a.createElement(m.Flex,(0,r.A)({alignItems:"center","data-testid":n,gap:2,justifyContent:"between",width:"100%"},i),a.createElement(m.Flex,{alignItems:"center","data-testid":"".concat(n,"-integration"),gap:2},a.createElement(m.Icon,(0,r.A)({"data-testid":"".concat(n,"-icon")},c)),a.createElement(m.Text,{"data-testid":"".concat(n,"-label")},l.title)),a.createElement(Ue.A,{content:o,"data-testid":"".concat(n,"-kind"),plain:!0},a.createElement(m.Icon,{color:"textLite",height:"16px",name:null===(t=ot.HA[l.kind])||void 0===t?void 0:t.icon,width:"16px"})))},gt=e=>{let{canManageChannels:t,roomOptions:n,spaceId:l}=e;return[{accessor:"name",cell:e=>{let{getValue:n,row:o}=e;return a.createElement(dt,{enabled:o.original.enabled,id:o.original.id,integration:o.original.integration,isAvailable:t&&o.original.available,name:n(),spaceId:l,tooltipContent:t?"plan":"role"})},header:"Name",id:"name"},{accessor:"integration",cell:e=>{let{getValue:t,row:n}=e;return a.createElement(ut,{integration:t(),kindLabel:n.original.kindLabel})},enableColumnFilter:!0,filterFn:(e,t,n)=>{const a=e.original.integration;return n.length<1||n.some((e=>{let{value:t}=e;return""===t||t===(null===a||void 0===a?void 0:a.slug)}))},header:"Service",id:"integration",meta:{filter:{component:"select",isMulti:!0,options:Object.keys(ot.a$).map((e=>({label:e,value:e}))),tiny:!0,"data-ga":"manage-integrations::select-service::notification-integrations-tab"},tooltip:a.createElement(m.Flex,{column:!0,width:{max:"200px"},gap:2},a.createElement(m.TextMicro,null,"Services are distinguished in two categories."),a.createElement(m.TextMicro,null,a.createElement(ct.Jg,{name:ot.HA.PERSONAL.icon})," ",a.createElement(m.TextMicro,{strong:!0},"Personal:")," ",ot.HA.PERSONAL.tooltip),a.createElement(m.TextMicro,null,a.createElement(ct.Jg,{name:ot.HA.SYSTEM.icon})," ",a.createElement(m.TextMicro,{strong:!0},"System:")," ",ot.HA.SYSTEM.tooltip))}},{accessor:"notificationLabel",cell:e=>{let{getValue:t}=e;return a.createElement(m.Text,{"data-testid":"channelNotificationsCell"},t())},enableColumnFilter:!0,filterFn:(e,t,n)=>{const a=e.original.notificationLabel;return n.length<1||n.some((e=>{let{label:t}=e;return""===t||t===a}))},header:"Notifications",id:"notificationLabel",meta:{filter:{component:"select",isMulti:!0,options:Object.values(ot.w8),tiny:!0,"data-ga":"manage-integrations::select-notification::notification-integrations-tab"}}},{accessor:"rooms",cell:e=>{var t;let{getValue:l,row:o}=e;const r={"data-testid":"channelRoomsCell"};if(o.original.internal)return a.createElement(m.Text,r,ot.Oh.label);if(!(o.original.rooms||null!==(t=o.original.rooms)&&void 0!==t&&t.length))return a.createElement(m.Text,r,ot.PT.label);const i=(0,it.Pl)({roomIds:l(),roomOptions:n});return a.createElement(m.Text,{"data-testid":"channelRoomsCell"},i)},enableColumnFilter:!0,filterFn:(e,t,n)=>n.length<1||n.some((t=>{let{label:n,value:a}=t;if(e.original.internal)return n===ot.Oh.label;const l=e.original.rooms||[];return l.length?l.includes(a):n===ot.PT.label})),header:"Rooms",id:"rooms",meta:{filter:{component:"select",isMulti:!0,options:n,tiny:!0,"data-ga":"manage-integrations::select-room::notification-integrations-tab"}}}]};var pt=n(58159),Et=n(18061),ht=n(88982);const bt=(e,t)=>{const n=(0,lt.ef)(t),a=(0,ht.A)({all:!0});(0,Et.A)((()=>({enabled:!!t,fetch:()=>(0,at.t9)(e,t),onFail:e=>n({...pt.V,error:e.message}),onSettle:()=>n({loading:!1,loaded:!0}),onSuccess:e=>n({...pt.V,...e.rooms?{roomSelections:a.filter((t=>{let{value:n}=t;return e.rooms.includes(n)}))}:{},...e})})),[e,t])};var ft=n(79769);const vt=["MobileApp","Email"],xt=()=>{const e=(0,l.Zp)(),t=(0,E.JT)("channel:Manage"),n=(0,o.vt)(),r=(0,o.ap)("name"),i=(0,o.bq)(),[s,m]=(0,me.A)(),[d,u]=(0,a.useState)(""),g=(0,lt.bY)();(0,ft.A)(n);const p=(0,lt.t5)(n,"channels"),h=(0,lt.t5)(n,"currentChannelId"),b=(0,lt.vq)(n,"channels"),f=(0,lt.vq)(n,"currentChannelId"),v=(0,ht.A)({all:!0,internal:!0}),{alerts:x,name:y,rooms:C,secrets:w}=(0,lt.g4)(h);bt(n,h);const A=(0,a.useCallback)((e=>{f(e)}),[]),T=(0,a.useCallback)((async e=>{let{id:t}=e;try{await(0,at.Wb)(n,t),b(p.filter((e=>e.id!==t))),s({header:"Configuration deleted successfully!"})}catch(a){m(a)}}),[p,n]),S=(0,a.useCallback)((()=>{e((0,c.tW)(B.uX,{spaceSlug:i,settingsTab:B.A8,settingsSubTab:B.G0}))}),[i]),I=(0,a.useMemo)((()=>({addEntry:{dataGa:"manage-channels::click-add-channel::notifications-tab",flavour:"hollow",handleAction:S,iconColor:"success",label:t?"Add Configuration":"View configurations",small:!0,strong:!0,width:"auto",...t?{}:{icon:""}}})),[t,S]),k=(0,a.useCallback)((e=>{e&&g({id:e})}),[g]),F=(0,a.useMemo)((()=>({testNotification:{handleAction:e=>{let{id:t}=e;k(t)},icon:"notificationTrigger",confirmation:!1,tooltipText:"Test your notification settings",disabledTooltipText:"You cannot test this notification",isDisabled:e=>{let{slug:t}=e;return vt.includes(t)}},edit:{dataGa:e=>{let{integration:t={}}=e;return"manage-rooms::click-edit::".concat(t.slug)},disabledTooltipText:"Edit is disabled",handleAction:e=>{let{id:t}=e;A(t)},isDisabled:e=>{let{available:n,internal:a}=e;return!t||!n||a},TooltipComponent:Ue.A,tooltipText:"Edit setting"},delete:{confirmLabel:"Yes, delete",confirmationMessage:e=>{let{integration:t={}}=e;return a.createElement(rt,{name:t.name,spaceName:r})},confirmationTitle:e=>{let{name:t}=e;return(0,it.O5)(t)},dataGa:e=>{let{integration:t={}}=e;return"manage-rooms::click-delete::".concat(t.slug)},declineLabel:"Cancel",handleAction:T,isDisabled:e=>{let{integration:n={}}=e;return!t||n.internal},TooltipComponent:Ue.A,tooltipText:"Delete setting"}})),[t,T,S]);return{bulkActions:I,channelData:p,currentChannelId:h,columns:gt({canManageChannels:t,roomOptions:v,spaceId:n}),dataGa:"manage-channels",onFilter:(e,t,n)=>{const a=e.original,l=n.toLowerCase();if(a.name.toLowerCase().includes(l))return!0;if(a.integration.slug.toLowerCase().includes(l))return!0;if(a.notificationLabel.toLowerCase().includes(l))return!0;if(a.internal)return ot.Oh.label.toLowerCase().includes(l);const o=a.rooms||[];if(!o.length)return ot.PT.label.toLowerCase().includes(l);return(0,it.Pl)({roomIds:o,roomOptions:v}).toLowerCase().includes(l)},onModalClose:()=>{b(p.map((e=>e.id===h?{...e,alerts:x,name:y,rooms:C,secrets:w}:e))),f("")},rowActions:F,search:d,setSearch:u}};n(74648),n(23215);var yt=n(54518),Ct=n(47130);const wt=(0,te.A)((0,Ct.A)(m.Button)),At="modal",Tt=e=>{let{id:t="new",integrationId:n,onClose:r,isSubmitEnabled:i}=e;const{alarms:s,name:m,rooms:d,slug:u,integration:g,secrets:p}=(0,lt.g4)(t),E=(0,$e.ly)(),h=(0,l.Zp)(),[b,f]=(0,me.A)(),v=(0,o.vt)(),x=(0,o.bq)(),y=(0,lt.bY)(),C=(0,a.useCallback)((()=>{(u||g)&&y({slug:u||g,secrets:p})}),[u,p,y]),w=(0,c.tW)(B.bq,{spaceSlug:x,settingsTab:B.A8});return a.createElement(ct.fn,{column:!1,gap:2,"data-testid":"".concat(At,"-footer"),justifyContent:"end"},a.createElement(wt,{feature:"IntegrationNotification",integrationId:n,label:"Test",flavour:"hollow",onClick:C,disabled:!i,tooltipProps:{content:"Test your notification settings",align:"bottom"}}),a.createElement(wt,{feature:"IntegrationNotification",integrationId:n,"data-testid":"".concat(At,"-confirmButton"),disabled:!i,label:"OK",onClick:async()=>{try{const e={alarms:s,integrationID:n,...m?{name:m}:{},...null!==d&&void 0!==d&&d.length?{Rooms:d}:{},secrets:p};"new"===t?await(0,at.Qb)(v,e):await(0,at.eQ)(v,t,e),b({header:"new"===t?"Configuration created successfully!":"Configuration updated successfully!"}),E(),r(),h(w)}catch(e){f(e)}},textTransform:"uppercase",tooltipProps:{content:"Save your settings",align:"bottom"}}))},St="modal",It=e=>{let{id:t="new",integrationId:n,onClose:l,...o}=e;const{alarms:i,title:c,description:s,docsLink:d,fields:u,integration:g,loaded:p,name:E,required:b,roomSelections:f,secrets:v}=(0,lt.g4)(t),x=e=>"manage-".concat(g,"-channel::").concat(e,"::notification-").concat(n?"integrations":"channels","-tab"),y=(0,ht.A)({all:!0}),C=(0,h.DL)(),w=(0,lt.ef)(t,"alarms"),A=(0,lt.ef)(t,"name"),T=(0,lt.ef)(t,"roomSelections"),S=(0,lt.ef)(t,"secrets"),I=(0,lt.ef)(t,"rooms"),[k,F]=(0,a.useState)({});if(!p)return null;const P=Object.keys(k),M=b.every((e=>(0,it.ct)(e,v[e],u[e]))),D=!P.length||P.every((e=>{const t=v[e].selection;return k[e].every((n=>(0,it.ct)(n,v[e][n],u[e].fields[t][n])))})),L=i&&M&&D;return a.createElement(m.Modal,{onEsc:l},a.createElement(ct.$m,{"data-testid":o["data-testid"]||St},a.createElement(m.ModalHeader,{border:{side:"bottom",color:"disabled"},column:!0,"data-testid":"".concat(St,"-header"),gap:.5,padding:[6,4,3]},a.createElement(m.Flex,{alignItems:"center","data-testid":"".concat(St,"-headerMain")},a.createElement(m.Flex,{"data-testid":"".concat(St,"-titleContainer"),gap:1},a.createElement(m.Icon,(0,r.A)({"data-testid":"".concat(St,"-titleIcon")},ot.a$[g]||{})),a.createElement(m.H3,{"data-testid":"".concat(St,"-title")},c)),l&&a.createElement(ct.Oj,{"data-ga":x("close-modal"),"data-testid":"".concat(St,"-close"),onClose:l})),a.createElement(m.TextSmall,{as:m.Box,"data-testid":"".concat(St,"-description")},s,"\xa0",a.createElement(ue.A,{Component:m.TextSmall,"data-ga":x("click-docs"),"data-testid":"".concat(St,"-docsLink"),href:d,target:"_blank",whiteSpace:"nowrap"},"Learn how to configure it."))),a.createElement(m.ModalBody,{"data-testid":"".concat(St,"-body"),overflow:{vertical:"auto"},padding:[0]},a.createElement(ct.fn,{gap:2,"data-testid":"".concat(St,"-standardFields"),hasBorder:!0},a.createElement(m.Text,{"data-testid":"".concat(St,"-standardFields-header")},"Notification settings"),a.createElement(m.Flex,{column:!0,"data-testid":"".concat(St,"-standardFields-body"),gap:3},a.createElement(yt.A,{component:"input","data-ga":x("configuration-name-input"),"data-testid":"".concat(St,"-configurationName"),onChange:A,placeholder:"i.e All alerts from All nodes",title:"Configuration name",value:E}),a.createElement(yt.A,{component:"select","data-ga":x("rooms-select"),"data-testid":"".concat(St,"-rooms"),isMulti:!0,onChange:e=>{var t;if(1===C.length)return;if(0===e.length)return I([]),void T([]);if(e.length===C.length||null===(t=e[e.length-1])||void 0===t||!t.value)return I([]),void T([ot.PT]);const n=e.map((e=>{let{value:t}=e;return t})).filter(Boolean),a=e.length>1?e.filter((e=>e.value)):e;I(n),T(a)},options:y,placeholder:"Select rooms",title:"Rooms",value:f}),a.createElement(yt.A,{component:"select","data-ga":x("notification-select"),"data-testid":"".concat(St,"-notifications"),isRequired:!0,onChange:e=>{let{value:t}=e;w(t)},options:Object.values(ot.N4),placeholder:"Select notifications",title:"Notifications",value:ot.N4[i]}))),a.createElement(ct.fn,{gap:2,"data-testid":"".concat(St,"-dynamicFields"),hasBorder:!0},a.createElement(m.Text,{"data-testid":"".concat(St,"-dynamicFields-header")},"Integration configuration"),a.createElement(m.Flex,{column:!0,"data-testid":"".concat(St,"-dynamicFields-body"),gap:3},Object.values(u).map((e=>{let{id:t,getValue:n,onChange:l,...o}=e;return a.createElement(yt.A,(0,r.A)({"data-ga":x("".concat(t,"-").concat(o.component)),"data-testid":"".concat(St,"-").concat(t),getDataGa:x,key:t,id:t,onChange:null===l||void 0===l?void 0:l({id:t,setRequiredSubsets:F,setSecrets:S}),secrets:v,setSecrets:S,value:null===n||void 0===n?void 0:n({id:t,secrets:v})},o))}))))),a.createElement(Tt,{id:t,integrationId:n,onClose:l,isSubmitEnabled:L})))},kt=e=>{let{"data-testid":t="channelList",...n}=e;const{bulkActions:l,channelData:o,currentChannelId:i,columns:c,dataGa:s,onFilter:d,onModalClose:u,rowActions:g,search:p,setSearch:E}=xt();return a.createElement(M.Ay,{tab:"Notifications::Channels"},a.createElement(m.Flex,(0,r.A)({column:!0,"data-testid":t,gap:4,height:"100%",width:"100%",margin:[3,0]},n),a.createElement(m.Table,{bulkActions:l,data:o,dataColumns:c,dataGa:s,globalFilter:p,globalFilterFn:d,onSearch:E,rowActions:g,testPrefix:"channelList",testPrefixCallback:e=>e.name}),!!i&&a.createElement(It,{"data-testid":"editChannelModal",id:i,onClose:u})))},Ft=0;var Pt=n(38819);const Mt=(0,i.A)((()=>n.e(5304).then(n.bind(n,25304))),"SilencingRules"),Dt={side:"top",type:"solid",size:"1px",color:"border"},Lt=()=>{const{notificationsActiveTab:e=Ft}=(0,Pt.PP)(),t=(0,a.useCallback)((e=>{const t=(0,Pt.PP)();(0,Pt.Z8)({...t,notificationsActiveTab:e})}),[]);return a.createElement(a.Fragment,null,a.createElement(m.Flex,{padding:[0,0,0,4]},a.createElement(m.H3,null,"Alerts & Notifications")),a.createElement(m.Tabs,{selected:parseInt(e,10),onChange:t,height:"calc(100% - 44px)",margin:[4,0,0,0]},a.createElement(m.Tab,{"data-testid":"spaceSettings-notifications-methods-tab","data-ga":"manage-space::click-tab::notifications-methods-tab",label:a.createElement(m.Text,null,"Notification Methods")},a.createElement(m.Flex,{padding:[2,4],border:Dt,flex:"grow"},a.createElement(kt,null))),a.createElement(m.Tab,{"data-testid":"spaceSettings-notifications-silencing-rules-tab","data-ga":"manage-space::click-tab::notifications-silencing-rules-tab",label:a.createElement(m.Text,null,"Notification Silencing Rules")},a.createElement(m.Flex,{border:Dt,flex:"grow"},a.createElement(a.Suspense,null,a.createElement(Mt,null))))))};var Nt=n(84976),Rt=n(28738),Bt=n(26751),_t=n(54856);const Ut=e=>{let{available:t,"data-testid":n="card",description:l,docsLink:o,fields:i,id:c,internal:s,kind:d,kindLabel:u,slug:g,required:p,title:h,...f}=e;const v=ot.a$[g]||{},x=(0,lt.Mw)("new"),y=(0,lt.ef)("new"),C=(0,E.JT)("channel:Manage"),[w,,A,T]=(0,b.A)(!1),S=(0,a.useCallback)((()=>{y({...pt.V,title:h,description:l,docsLink:o,fields:i,integration:g,loading:!1,loaded:!0,required:p,secrets:(0,it.s7)(i,p)}),A()}),[l,o,i,g,p]);return a.createElement(a.Fragment,null,a.createElement(m.Flex,(0,r.A)({background:"elementBackground",column:!0,"data-testid":n,justifyContent:"between",flex:!1,height:37,padding:[3,2,2,3],margin:[0,0,4,0],position:"relative",overflow:"hidden",round:.5,width:75},f),a.createElement(ct.MU,(0,r.A)({"data-testid":"".concat(n,"-blurredIcon")},v)),a.createElement(m.Flex,{column:!0,"data-testid":"".concat(n,"-details"),gap:3,margin:[0,0,3,0]},a.createElement(m.Flex,{"data-testid":"".concat(n,"-header"),justifyContent:"between"},a.createElement(m.Flex,{alignItems:"center","data-testid":"".concat(n,"-titleContainer"),gap:1},a.createElement(m.Icon,(0,r.A)({"data-testid":"".concat(n,"-icon")},v)),a.createElement(m.Text,{"data-testid":"".concat(n,"-title")},h)),!s&&t&&a.createElement(Ue.A,{align:"bottom",content:C?"":ot.WB,plain:!0},a.createElement(m.Box,null,a.createElement(ct.ro,{"data-ga":"manage-integration-".concat(g,"::click-add::notification-integrations-tab"),"data-testid":"".concat(n,"-button"),disabled:!C,label:"Add",onClick:S}))),!s&&!t&&a.createElement(_t.A,{"data-ga":"manage-integration-".concat(g,"::click-plan-badge::notification-integrations-tab")})),a.createElement(m.TextSmall,{color:"textDescription","data-testid":"".concat(n,"-description")},l," ",o&&a.createElement(ue.A,{Component:ct.W6,"data-ga":"manage-integration-".concat(g,"::click-docs::notification-integrations-tab"),"data-testid":"".concat(n,"-docsLink"),href:o,target:"_blank",whiteSpace:"nowrap"},"Learn more."))),a.createElement(Ue.A,{align:"top",content:ot.HA[d].tooltip,"data-testid":"".concat(n,"-kindContainer"),plain:!0},a.createElement(m.Flex,{alignItems:"end",alignSelf:"end","data-testid":"".concat(n,"-kindContainer"),gap:1},a.createElement(m.Icon,{color:"textLite","data-testid":"".concat(n,"-kindIcon"),height:"16px",name:ot.HA[d].icon,width:"16px"}),a.createElement(m.TextSmall,{color:"textLite","data-testid":"".concat(n,"-kind")},u)))),w&&a.createElement(It,{"data-testid":"createChannelModal",integrationId:c,onClose:()=>{T(),x()}}))},Ot=e=>{let{"data-testid":t="group",integrations:n,title:l,...o}=e;return n.length?a.createElement(m.Flex,(0,r.A)({column:!0,"data-testid":t,gap:2,position:"relative"},o),a.createElement(m.TextBig,{color:"textDescription","data-testid":"".concat(t,"-title")},l),a.createElement(m.Flex,{flexWrap:!0,"data-testid":"".concat(t,"-integrations"),gap:4},n.map((e=>a.createElement(Ut,(0,r.A)({"data-testid":"".concat(e.slug,"Card"),key:e.slug},e)))))):null};var Vt=n(73743);const Ht=e=>{const t=(0,lt.EE)(e);(0,Et.A)((()=>({enabled:!!e,fetch:()=>(0,at.b8)(e),onFail:e=>t({...Vt.u,error:e.message}),onSettle:()=>t({loading:!1,loaded:!0}),onSuccess:e=>{t({...Vt.u,...e})}})),[e])},Wt=e=>{let{"data-testid":t="integrations",...n}=e;const l=(0,o.vt)(),i=(0,o.bq)();Ht(l);const{available:s,error:d,loaded:u,unavailable:g}=(0,lt.m$)(l),[p,E]=(0,a.useState)([]),[h,b]=(0,a.useState)([]),[f,v]=(0,a.useState)("");if(!u)return a.createElement(Rt.A,{"data-testid":"".concat(t,"-loader"),title:"Loading services..."});if(d)return a.createElement(Bt.A,{"data-testid":"".concat(t,"-error"),message:d,title:"Services of ".concat(i," are currently unavailable")});const x=(0,c.tW)(B.bq,{spaceSlug:i,settingsTab:B.A8});return a.createElement(M.Ay,{tab:"Notifications::Integrations"},a.createElement(m.Flex,(0,r.A)({column:!0,"data-testid":t},n,{padding:[0,3,3],gap:3,overflow:"hidden"}),a.createElement(m.Flex,{border:{side:"bottom",color:"placeholder"},"data-testid":"".concat(t,"-header"),width:"100%"},a.createElement(ue.A,{as:Nt.N_,Component:ct.He,"data-ga":"manage-integrations::click-back::notification-integrations-tab","data-testid":"".concat(t,"-backLink"),to:x},a.createElement(m.Icon,{"data-testid":"".concat(t,"-backIcon"),name:"arrow_left"}),a.createElement(m.H3,{"data-testid":"".concat(t,"-title")},B.ys[B.G0]))),a.createElement(m.Flex,{column:!0,"data-testid":"".concat(t,"-content"),gap:2,height:"100%",overflow:"hidden"},a.createElement(m.Box,{as:m.SearchInput,"data-ga":"manage-integrations::search::notification-integrations-tab","data-testid":"".concat(t,"-search"),iconLeft:a.createElement(m.Icon,{name:"magnify",color:"textLite"}),onChange:e=>{v(e),E((0,it.Zv)(s,e)),b((0,it.Zv)(g,e))},placeholder:"Search service",size:"small",width:{max:49.5}}),a.createElement(m.Flex,{column:!0,"data-testid":"".concat(t,"-groups"),overflow:"auto",height:"100%"},a.createElement(Ot,{"data-testid":"integrationGroupAvailable",integrations:f?p:s,title:"Available"}),a.createElement(Ot,{"data-testid":"integrationGroupUnavailable",integrations:f?h:g,title:"Unavailable"})))))},Yt=()=>a.createElement(l.BV,null,a.createElement(l.qh,{path:"/",element:a.createElement(Lt,null)}),a.createElement(l.qh,{path:"/".concat(B.G0),element:a.createElement(Wt,null)}));var qt=n(55189),jt=n(37618);const zt=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return[(0,m.useInputValue)({maxChars:20,value:e.name||""})||{},(0,m.useInputValue)({maxChars:30,value:e.slug||""})||{},(0,m.useInputValue)({maxChars:50,value:e.description||""})||{}]};var Jt=n(39522),Gt=n(47373),$t=n(53285),Kt=n(7484),Zt=n(61360);const Qt={loading:!1,isAvailable:!1,isValid:!0,error:null},Xt=e=>{const t=(0,o.ap)(),[n,l]=(0,a.useState)(Qt);return(0,a.useEffect)((()=>{if(t.slug==e)l(Qt);else{const t=(0,Gt.ni)(e);l({...Qt,isValid:!1,error:t})}}),[t.slug,e]),(0,Zt.A)((()=>{(0,Gt.ni)(e)||t.slug==e||(l((e=>({...e,loading:!0}))),(0,Kt.Q9)(e).then((e=>{let{data:t}=e;l((e=>({...e,loading:!1,error:t.isAvailable?e.error:"slugNotAvailable"})))})).catch((e=>{l((t=>{var n;return{...t,loading:!1,error:(null===(n=e.response)||void 0===n||null===(n=n.data)||void 0===n?void 0:n.errorMessage)||"Error while validating slug"}}))})))}),500,[e]),n},en=e=>{let{nameInput:t,slugInput:n,descriptionInput:l,onStartSlugValidation:o,onStopSlugValidation:i,onSlugValidationError:c,...s}=e;const[d,u,g,p]=t,[E,h,b,f]=n,[v,x,y,C]=l,{loading:w,error:A}=Xt(E);(0,a.useEffect)((()=>{(w?o:i)()}),[w]),(0,a.useEffect)((()=>{c(A)}),[A]);const T=(0,a.useMemo)((()=>(0,Gt.fc)(d)),[d]),S=(0,a.useMemo)((()=>(0,Gt.e_)(v)),[v]);return a.createElement(m.Flex,(0,r.A)({column:!0,gap:4},s),a.createElement($t.A,{Component:m.TextInput,permission:"space:UpdateMeta",label:"Name",value:d,onChange:u,error:Gt.xc[T],isDirty:p,instantFeedback:"all",fieldIndicator:g,"data-testid":"textInputs-spaceName",containerStyles:{width:{base:150}}}),a.createElement($t.A,{Component:m.TextInput,permission:"space:UpdateMeta",label:"Slug",value:E,onChange:h,error:Gt.xc[A]||A,isDirty:f,instantFeedback:"all",fieldIndicator:b,"data-testid":"textInputs-spaceSlug",containerStyles:{width:{base:150}}}),a.createElement($t.A,{Component:m.TextInput,permission:"space:UpdateMeta",label:"Description",value:v,onChange:x,error:Gt.xc[S],isDirty:C,instantFeedback:"all",fieldIndicator:y,"data-testid":"textInputs-spaceDescription",containerStyles:{width:{base:150}}}))};var tn=n(87292);const nn=e=>{let{id:t,...n}=e;return a.createElement(m.Flex,(0,r.A)({column:!0,gap:1},n),a.createElement(m.Text,{strong:!0},"Space Id"),a.createElement(tn.Ay,{confirmationText:"Space ID copied to your clipboard."},t))};var an=n(83694);const ln=()=>a.createElement(m.Text,{lineHeight:1.5},"With the change of the Space Slug, previous ",a.createElement(m.Text,{strong:!0},"links will be broken"),". In case you have some bookmarks or previous references using the previous Space Slug you should update them where possible. Please confirm you want to proceed with the change."),on=e=>{let{handleConfirm:t,handleDecline:n}=e;return a.createElement(m.ConfirmationDialog,{confirmLabel:"Continue","data-testid":"changeSpaceSlugDialog",handleConfirm:t,handleDecline:n,message:a.createElement(ln,null),title:"Space slug change"})};var rn=n(98046);const cn=(0,te.A)(m.Button),sn=e=>{let{nameInput:t,slugInput:n,descriptionInput:l,onClose:i,isValidatingSlug:c,slugError:s,...m}=e;const d=(0,o.ap)(),u=(0,rn.A)(d.id),[g,p]=(0,a.useState)(!1),[E]=t,[h]=n,[f]=l,v=(0,a.useMemo)((()=>d.slug!==h),[d.slug,h]),[x,,y,C]=(0,b.A)(),w=()=>{i(v?h:null),p(!1)},A=()=>p(!1),T=(0,a.useCallback)((e=>{!e&&v?y():(p(!0),u({name:E,description:f,...v?{slug:h}:{}},{onSuccess:w,onFail:A}))}),[E,h,f,d.slug]),S=(0,a.useCallback)((()=>T(!0)),[T]),I=(0,a.useMemo)((()=>{const e=(0,Gt.fc)(E),t=(0,Gt.e_)(f);return!!(g||c||e||s||t)||d.name===E&&d.slug===h&&(d.description===f||!d.description&&!f)}),[d,E,h,f,g,c]);return a.createElement(a.Fragment,null,a.createElement(cn,(0,r.A)({label:"SAVE",onClick:T,isLoading:g||c,disabled:I,"data-testid":"saveSpace-button",feature:"SpaceSettings"},m)),x?a.createElement(on,{handleConfirm:S,handleDecline:C}):null)},mn=(0,a.memo)((e=>{let{onClose:t,...n}=e;const l=(0,o.ap)(),i=(0,o.UV)("ids"),[c,,s,d]=(0,b.A)(),[u,,g,p]=(0,b.A)(),[E,h]=(0,a.useState)(),[f,v,x]=zt({name:l.name,slug:l.slug,description:l.description});if(!l.id)return null;const y=1===i.filter((e=>!(0,jt.ES)(e))).length;return a.createElement(M.Ay,{tab:"Info"},a.createElement(m.Flex,(0,r.A)({column:!0,"data-testid":"manageSpace",flex:"grow",justifyContent:"between",padding:[0,0,6,0],width:{max:150}},n),a.createElement(m.Flex,{column:!0,"data-testid":"manageSpace-settings"},a.createElement(m.H3,{margin:[0,0,4,0]},"Info"),a.createElement(m.Flex,{column:!0,gap:4},a.createElement(en,{"data-testid":"manageSpace-nameInputs",nameInput:f,slugInput:v,descriptionInput:x,onStartSlugValidation:g,onStopSlugValidation:p,onSlugValidationError:h}),a.createElement(nn,{"data-testid":"manageSpace-spaceIdInput",id:l.id,width:{base:150}}),a.createElement(m.TextInput,{label:"Your role in space",value:(0,_e.Zr)(l.roleInSpace),disabled:!0,containerStyles:{width:{base:150}}}),a.createElement(m.TextInput,{label:"Plan",value:(0,_e.Vn)(l.planName),disabled:!0,containerStyles:{width:{base:150}}}))),a.createElement(m.Flex,{"data-testid":"manageSpace-actions",justifyContent:"between",alignItems:"center"},a.createElement(m.Flex,{"data-testid":"manageSpace-deleteLeaveActions",gap:4},a.createElement(Ue.A,{align:"top",content:y&&qt.sh.leave,isBasic:!0,stretch:"align"},a.createElement(m.Flex,{alignItems:"center"},a.createElement(m.Button,{"data-ga":"manage-space-tab::click-leave-space::manage-space-modal","data-testid":"manageSpace-leave",disabled:y,icon:"switch_off",flavour:"borderless",label:"Leave space",neutral:!0,padding:[0],width:"fit-content",onClick:s}))),a.createElement(an.A,{"data-testid":"manageSpace-delete",id:l.id,isLastSpace:y,name:l.name,onClose:t})),a.createElement(sn,{"data-testid":"manageSpace-save",nameInput:f,slugInput:v,descriptionInput:x,onClose:t,isValidatingSlug:u,slugError:E}))),c&&a.createElement(Jt.A,{id:l.id,name:l.name,onClose:d}))}),(()=>!0));var dn=n(62232);const un=()=>{const e=(0,l.Zp)(),t=(0,o.vt)(),n=(0,l.RQ)(B.uX),r=(0,l.RQ)(B.bq),{params:i}=n||r||{},{spaceSlug:s="",settingsTab:m="",settingsSubTab:d=""}=i,u=(0,o.ap)("name"),g=(0,A.J_)(t,d),p=(0,A.wz)(g,"name"),E=B.ys[m],h=(0,a.useCallback)((()=>{e((0,c.tW)(B.bq,{spaceSlug:s,settingsTab:m}))}),[s,m]),b=(0,a.useMemo)((()=>{const e=[{name:"Manage Space",isDisabled:!0},{name:u,isDisabled:!0},{name:E,...!!d&&{onClick:h}}];return d&&e.push({name:E===B.ys[B.aj]?p:B.ys[d]||d}),e}),[h,p,u,E,d]);return a.createElement(dn.A,{isBig:!0,items:b,showBackButton:!1,testid:"manageSpace"})};var gn=n(47731),pn=n(19673),En=(n(9391),n(67742));const hn=e=>a.createElement(m.Box,(0,r.A)({as:"hr",height:"100%",sx:{borderWidth:"0px 0px 0px 1px",borderColor:"borderSecondary",borderStyle:"solid"}},e)),bn=()=>a.createElement(hn,{height:"1px",width:"100%",sx:{borderWidth:"1px 0px 0px 0px",borderColor:"borderSecondary",borderStyle:"solid"}});var fn=n(42728);const vn=e=>{let{currentPlan:t,billingEmail:n,paymentMethod:l,businessName:o,vatNo:r,billingAddress:i}=e;const[c,s]=(0,pn.Qh)(),d=!!t.billingEmail&&!s,u=n||"",g=u?"":"Email is required",p=null!==l&&void 0!==l&&l.id?"**** ".concat(null===l||void 0===l?void 0:l.id):"",E=p?"":"Payment method is required",h=i?"":"Billing address is required";return a.createElement(m.Flex,{column:!0,gap:3},a.createElement(m.Flex,{gap:4},a.createElement(m.TextInput,{value:u,label:"Billing email",placeholder:"Not specified",instantFeedback:"all",isDirty:!!g,error:g,disabled:!0}),a.createElement(m.TextInput,{value:p,label:"Default payment method",placeholder:"Not specified",instantFeedback:"all",isDirty:!!E,error:E,disabled:!0})),r&&a.createElement(m.Flex,{gap:4},a.createElement(m.TextInput,{value:o||"",label:"Business name",placeholder:"Not specified",disabled:!0}),a.createElement(m.TextInput,{value:r,label:"VAT number",placeholder:"Not specified",disabled:!0})),a.createElement(m.Flex,null,a.createElement(m.TextInput,{value:(0,fn.qN)(i)||"",label:"Billing address",placeholder:"Not specified",isDirty:!!h,error:h,disabled:!0})),a.createElement(ue.A,{Component:m.Flex,cursor:d?"pointer":"default",disabled:!d,onClick:c,alignItems:"center",gap:1,"data-ga":"billing-options::click::billing"},a.createElement("span",null,"Change billing information and payment method"),a.createElement(m.Icon,{name:"nav_arrow_goto",width:"12px",height:"12px"})))},xn=e=>{let{onReset:t}=e;return a.createElement(m.Box,{as:m.Icon,name:"close_circle",onClick:t,cursor:"pointer",width:3,height:3,color:"border",position:"absolute",left:"2px"})},yn=e=>{let{promotionCode:t,onReset:n}=e;return a.createElement(m.Flex,{width:"100%"},a.createElement(m.Pill,{padding:[0],flavour:"neutral",hollow:!0},a.createElement(m.Flex,{position:"relative",gap:2,alignItems:"center",padding:[1,2,1,5]},a.createElement(xn,{onReset:n}),a.createElement(m.Text,null,t))))},Cn=e=>{let{promotionCode:t,onApply:n,error:l}=e;const[o,r]=(0,a.useState)(t||""),i=()=>{r(""),n("")};return a.createElement(m.Flex,{column:!0,gap:1},a.createElement(m.Text,{strong:!0},"Promotion code"),t&&!l?a.createElement(yn,{promotionCode:t,onReset:i}):a.createElement(m.Flex,{column:!0,gap:1},a.createElement(m.Flex,{gap:2,alignItems:"baseline"},a.createElement(m.TextInput,{value:o,onChange:e=>r(e.target.value),placeholder:"Promotion code"}),!l&&a.createElement(m.Button,{label:"Apply",onClick:()=>n(o),padding:[3,4],disabled:!o}),!!o&&a.createElement(m.Button,{label:"Clear",flavour:"hollow",onClick:i,padding:[3,4],disabled:!o})),l&&a.createElement(m.Text,{color:"error"},l)))},wn=e=>{let{needsCommitment:t,commitment:n,handleCommitmentChange:l}=e;return t&&a.createElement(m.Flex,{column:!0,gap:2},a.createElement(m.Flex,{gap:4,alignItems:"baseline"},a.createElement(m.Text,{strong:!0,style:{whiteSpace:"nowrap"}},"Committed Nodes"),a.createElement(m.TextInput,{onChange:l,value:n,placeholder:"i.e. 14",type:"number",min:1})),a.createElement(m.TextSmall,null,"Nodes that you'll have a discount of 25% on the original cost per node of the plan. This amount will be part of your annual prepayment."))};var An=n(63950),Tn=n.n(An),Sn=n(27994);const In={earlyBird:()=>a.createElement(m.Text,null,"You are moving from ",a.createElement(m.Text,{strong:!0},"Early Bird")," plan to another plan, you won't be able to come back to this. The ",a.createElement(m.Text,{strong:!0},"Community")," free plan will be there if you wish to cancel but this doesn't allow you to invite or change users using the Member role."),communityV1:e=>{let{onCtaClick:t=Tn()}=e;const{url:n}=(0,Sn.A)();return a.createElement(m.Text,null,"You are moving from ",a.createElement(m.Text,{strong:!0},"Community (2023.02)")," plan to another plan, you won't be able to come back to this. The new ",a.createElement(m.Text,{strong:!0},"Community (2023.11)")," free plan will be there if you wish to cancel but this will have new limitation: max of 5 nodes active and max of 1 custom dashboard. See full details on the"," ",a.createElement(ue.A,{as:Nt.N_,onClick:t,to:n,disabled:!n},a.createElement(m.Text,{color:"primary"},"View plans page")),".")},pro:()=>a.createElement(m.Text,null,"You are moving from ",a.createElement(m.Text,{strong:!0},"Pro")," plan to another plan by yourself, if you proceed we won't be able to migrate you to the Business plan with your current pricing conditions as communicated.")},kn=e=>{let{currentPlan:t,...n}=e;const l=In[(e=>"earlyBird"==e.slug?"earlyBird":"free"==e.slug&&"2023.02"==e.version?"communityV1":"pro"==e.slug?"pro":null)(t)]||null;return l&&a.createElement(a.Fragment,null,a.createElement(hn,{height:"1px",width:"100%",sx:{borderWidth:"1px 0px 0px 0px",borderColor:"borderSecondary",borderStyle:"solid"}}),a.createElement(m.Flex,{gap:3},a.createElement(m.Icon,{size:"large",color:"warning",name:"warning_triangle"}),a.createElement(l,n)))},Fn=e=>"earlyBird"===(null===e||void 0===e?void 0:e.slug)?"earlyBird":e&&e.slug&&"free"!==e.slug?"".concat(e.slug,"-").concat(e.interval):"free",Pn=s.default.div.withConfig({displayName:"withTableWrapper__TableWrapper",componentId:"sc-drcfxi-0"})(["margin-top:30px;"]),Mn=new Intl.NumberFormat("en-US",{style:"currency",currency:"USD"}),Dn={size:50,minSize:50,maxSize:100},Ln=e=>()=>a.createElement(m.Flex,{width:"100%",justifyContent:"end"},e),Nn=e=>{let{getValue:t}=e;return a.createElement(m.Flex,{width:"100%",justifyContent:"end"},t())},Rn=[{id:"name",accessor:"name",header:"",...Dn,fullWidth:!0},{id:"price",accessor:"price",header:Ln("Price"),cell:Nn,...Dn},{id:"qty",accessor:"qty",header:Ln("Qty"),cell:Nn,...Dn},{id:"month",accessor:"month",header:Ln("Month"),cell:Nn,...Dn},{id:"total",accessor:"total",header:Ln("Total"),cell:Nn,...Dn}],Bn=e=>a.createElement(m.Flex,(0,r.A)({gap:3},e),a.createElement(m.Icon,{size:"large",color:"warning",name:"warning_triangle"}),a.createElement(m.Text,null,"No immediate charges are applicable to this plan subscription. On-demand usage charges will be applied based on your node period count.")),_n=(Un=m.Table,e=>a.createElement(Pn,null,a.createElement(Un,e)));var Un;const On=e=>{let{lineItems:t}=e;return a.createElement(a.Fragment,null,t.map((e=>{var t;const n=(l=e).description?l.description.startsWith("Discount")?{color:"primary"}:l.description.startsWith("VAT")?{color:"textLite"}:{}:{};var l;const o=e.isInfo?m.TextSmall:m.Text;return a.createElement(m.Flex,{key:e.description,justifyContent:"between"},a.createElement(o,n,e.description),a.createElement(o,n,Mn.format(null===(t=e.total)||void 0===t?void 0:t.amount)))})))},Vn=(e=>t=>{var n;return t.lineItems.totalPayable?null!==(n=t.lineItems.info)&&void 0!==n&&n.length?a.createElement(m.Flex,{column:!0,gap:1},a.createElement(e,t),a.createElement(On,{lineItems:t.lineItems.info})):a.createElement(e,t):null})((e=>{let{lineItems:t}=e;return a.createElement(m.Flex,{justifyContent:"between",margin:[4,0,0,0]},a.createElement(m.H0,{strong:!0},"Total payable"),a.createElement(m.H0,{strong:!0,"data-testid":"totalPayableAmountPreview"},Mn.format(t.totalPayable.total.amount)))})),Hn=e=>{let{previewData:t,lineItems:n,agree:l,toggleAgree:o,zeroPreviewSubscriptionTotal:r,isUpdate:i}=e;const c=!i||!(null===t||void 0===t||!t.paymentMethod)&&!(null===t||void 0===t||!t.billingAddress);return a.createElement(a.Fragment,null,a.createElement(bn,null),r&&a.createElement(Bn,{padding:[0,0,2,0]}),a.createElement(m.Flex,{gap:3},a.createElement(m.Checkbox,{checked:l,onChange:o,disabled:!c}),a.createElement(m.Text,null,"I agree to Netdata Inc's"," ",a.createElement(ue.A,{href:"https://www.netdata.cloud/service-terms/",rel:"noopener noreferrer",target:"_blank"},"Terms of Service")," ","and"," ",a.createElement(ue.A,{href:"https://netdata.cloud/privacy",rel:"noopener noreferer",target:"_blank"},"Privacy Policy"))),a.createElement(Vn,{lineItems:n}))},Wn=e=>{let{lineItems:t}=e;return a.createElement(a.Fragment,null,a.createElement(_n,{dataColumns:Rn,data:t.table}),a.createElement(bn,null),a.createElement(On,{lineItems:t.footer}))};var Yn=n(34641),qn=n(25624);const jn=e=>{let{title:t,onConfirm:n,onClose:l}=e;const{sendButtonClickedLog:o,isReady:r}=(0,Te.A)(),{onTrial:i}=(0,qn.A)(),c=(0,a.useCallback)((()=>{n(),l(),o({description:"confirm-billing-plan-change"},!0)}),[o,r]),s=(0,a.useCallback)((()=>{l(),o({description:"close-billing-plan-change-confirmation-dialog"},!0)}),[o,r]),d="Community"==t,u=d?"Cancellation":"Downgrading";return i?a.createElement(Yn.A,{onDecline:s}):a.createElement(m.ConfirmationDialog,{confirmLabel:"Yes","data-ga":"downgrade-dialog","data-testid":"downgradeDialog",handleConfirm:c,handleDecline:s,message:a.createElement(m.Flex,{column:!0,gap:2},a.createElement(m.Text,null,"Are you sure you want to ",a.createElement(m.Text,{strong:!0},"move to the ",t," plan")," and cancel your current plan?"),d&&a.createElement(m.Text,null,"Upon cancellation,"," ",a.createElement(m.Text,{strong:!0},"a credit of the value related to the unused period will be credited to your Netdata account.")," ","The credit will be available for you to use on future plan subscriptions with us."),a.createElement(m.Text,null,"For the next 24 hours, you will be able to use all your current notification method configurations."," ",a.createElement(m.Text,{strong:!0},"After 24 hours, any of the notification method configurations that aren't available on your space's plan will be automatically disabled.")),a.createElement(m.Text,null,u," might affect your Space users. Please check what roles are available on the"," ",a.createElement(ue.A,{href:"https://learn.netdata.cloud/docs/nightly/concepts/netdata-plans#areas-impacted-by-plans",rel:"noopener noreferrer",strong:!0,target:"_blank"},t," plan"),"."," ",a.createElement(m.Text,{strong:!0},"Users with unavailable roles on the ",t," plan will immediately have restricted access to the Space.")),a.createElement(m.Text,null,"Do you wish to proceed?")),title:"Go to ".concat(t," plan?")})};var zn=n(88116);const Jn=e=>{let{isUpdate:t,currentPlan:n={},billingEmail:l,paymentMethod:o,...i}=e;const[c,s]=(0,pn.Qh)(),d=!!n.billingEmail&&!s;return t&&l&&o?null:a.createElement(m.Flex,(0,r.A)({gap:3,alignItems:"center"},i),a.createElement(m.Icon,{size:"large",color:"warning",name:"warning_triangle"}),a.createElement(m.Flex,{column:!0,gap:1},a.createElement(m.Text,null,"You can't proceed to checkout without having provided a"," ",a.createElement(m.Text,{strong:!0},"payment method")," and a ",a.createElement(m.Text,{strong:!0},"billing address"),"."),a.createElement(ue.A,{cursor:d?"pointer":"default",disabled:!d,onClick:c,gap:1,"data-ga":"billing-options::click::billing"},"Please go to the billing portal and fill a payment method")))};var Gn=n(79731);const $n=e=>e?"::commited-".concat(e):"",Kn=(0,s.default)(m.Flex).withConfig({displayName:"checkoutTotals__CheckoutButtonWrapper",componentId:"sc-1q0h6ca-0"})(["background:",";padding:16px 0;position:sticky;bottom:0;z-index:20;"],(0,m.getColor)("mainBackground")),Zn=(0,te.A)(m.Button),Qn=(e=>t=>{let{title:n,isDowngrade:l,isUpdate:o,checkoutOrUpdate:i,...c}=t;const[s,,m,d]=(0,b.A)();return o&&l?a.createElement(a.Fragment,null,s&&a.createElement(jn,{title:n,onConfirm:i,onClose:d}),a.createElement(e,(0,r.A)({onClick:m},c))):a.createElement(e,(0,r.A)({onClick:i},c))})((0,s.default)(Zn).withConfig({displayName:"checkoutTotals__CheckoutButton",componentId:"sc-1q0h6ca-1"})(["flex:auto;"])),Xn=e=>{let{title:t,slug:n,onClose:l,currentPlan:o,price:r,commitment:i,currentCommitment:c=0,email:s,needsCommitment:d,previewData:u,loadingPreview:g,isUpdate:p,promotionCode:E}=e;const h=((e,t)=>{var n,a;return((null===(n=zn.FJ[t])||void 0===n?void 0:n.level)||0)<((null===(a=zn.FJ[e])||void 0===a?void 0:a.level)||0)})(o.slug,n),[f,v]=(0,b.A)(!1),x=p&&!f||!p&&!s||d&&(c===i||!i||i<1),[y,C]=(0,b.A)(),w=(0,pn.L_)(),A=(0,pn.M4)(),T=p||o.billingEmail,S=T?A:w,{sendLog:I,isReady:k}=(0,Te.A)(),[F,P]=(0,me.A)(),M=(0,a.useCallback)((()=>{C(),S({productId:r.id,email:(null===u||void 0===u?void 0:u.billingEmail)||s,...d&&{commitment:i},...E&&{promotionCode:E}}).then((()=>{l(),I({isSuccess:!0,details:S==T?"update-billing-plan":"checkout-billing-plan"},!0),F({header:"Success",text:"You have successfully updated your plan"})})).catch((e=>{var t;I({isFailure:!0,details:S==T?"update-billing-plan":"checkout-billing-plan"},!0);const n=(null===e||void 0===e||null===(t=e.response)||void 0===t?void 0:t.data)||e;P({header:"Error",text:(0,Gn.o)(null===n||void 0===n?void 0:n.errorMsgKey)||(null===n||void 0===n?void 0:n.errorMessage)||"Something went wrong"})})).finally(C)}),[I,k]),D=(L=null===u||void 0===u?void 0:u.invoiceLineItems,(0,a.useMemo)((()=>{if(null===L||void 0===L||!L.length)return null;let e=!1;return L.reduce(((t,n)=>{return"Total payable"==n.description?(t.totalPayable=n,e=!0,t):(n.unitPrice?t.table=[...t.table,{id:n.description,name:n.description,price:Mn.format(n.unitPrice.amount),qty:n.quantity,month:n.month,total:Mn.format(null===(a=n.total)||void 0===a?void 0:a.amount)}]:e?t.info=[...t.info,{...n,isInfo:!0}]:t.footer=[...t.footer,n],t);var a}),{table:[],footer:[],info:[]})}),[L]));var L;const N=((null===D||void 0===D?void 0:D.footer)||[]).some((e=>{let{description:t,total:n}=e;return"Subscription Total"==t&&0==(null===n||void 0===n?void 0:n.amount)}));return a.createElement(m.Flex,{column:!0,gap:3},g||!D||N?null:a.createElement(Wn,{lineItems:D}),a.createElement(Kn,{column:!0,gap:2},p&&!g&&D&&a.createElement(Hn,{isUpdate:p,previewData:u,lineItems:D,agree:f,toggleAgree:v,zeroPreviewSubscriptionTotal:N}),p&&!g&&a.createElement(Jn,{currentPlan:o,billingEmail:null===u||void 0===u?void 0:u.billingEmail,paymentMethod:null===u||void 0===u?void 0:u.paymentMethod,padding:[0,0,4,0],isUpdate:p}),a.createElement(Qn,{title:t,isDowngrade:h,label:p?"Checkout":"Proceed to checkout",disabled:!!x||y,checkoutOrUpdate:M,isLoading:y,"data-ga":"proceedTocheckoutButton::click-".concat(Fn(r)).concat($n(i),"::billing"),"data-testid":"billingCheckoutTotals-proceedToCheckoutButton",isUpdate:p})))},ea=e=>{let{prices:t,currentPlan:n}=e;const[l,o]=(0,a.useState)(0),r=t[l],i=null===r||void 0===r?void 0:r.commitment,c=(0,ht.n)("id"),s=(0,U.gr)(c,"ids"),m=(0,O.BU)(s),d=Math.max(5,m.length),[u,g]=(0,a.useState)(n.committedNodes||d);return{recurringIndex:l,setRecurringIndex:o,price:r,needsCommitment:i,commitment:u,handleCommitmentChange:e=>g(e.target.value?parseInt(e.target.value,10):"")}},ta=function(){let{title:e="Checkout"}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return t=>n=>a.createElement(u.GO,{onClose:n.onClose},a.createElement(g.z,{onClose:n.onClose,title:e}),a.createElement(m.Flex,{column:!0,padding:[4,4,0,4],justifyContent:"between",height:"calc(100vh - 60px)",overflow:"auto"},a.createElement(t,n)))},na=3e5,aa=ta({title:"Update plan"})((e=>{let{title:t,onClose:n,prices:o,currentPlan:i={},children:c}=e;const{recurringIndex:s,setRecurringIndex:d,price:u,needsCommitment:g,commitment:p,handleCommitmentChange:h}=ea({prices:o,currentPlan:i}),{state:b}=(0,l.zy)(),[f,v]=(0,a.useState)((null===b||void 0===b?void 0:b.coupon)||""),[x,y]=(0,a.useState)(0),[C,w]=(0,a.useState)(na),A=(0,E.JT)("billing:Manage"),T=(0,fn.di)({price:u,promotionCode:f,commitment:p}),{loading:S,value:I,promoCodeError:k}=(0,pn.qW)(T,x);return(0,En.A)((()=>{S||y((e=>e+1))}),C),(0,a.useEffect)((()=>{w(S?null:na)}),[S]),a.createElement(M.Ay,{feature:"Billing::UpdateModal"},A?a.createElement(a.Fragment,null,a.createElement(m.Flex,{column:!0,gap:4},a.Children.map(c,(e=>(0,a.cloneElement)(e,{recurringIndex:s,setRecurringIndex:d}))),a.createElement(wn,{needsCommitment:g,commitment:p,handleCommitmentChange:h}),a.createElement(bn,null),S?a.createElement(Rt.A,{title:"Loading billing info...",height:"auto"}):I?a.createElement(a.Fragment,null,a.createElement(vn,(0,r.A)({currentPlan:i},I)),a.createElement(bn,null),a.createElement(Cn,{promotionCode:f,onApply:v,error:k})):a.createElement("div",null,"No data available"),a.createElement(kn,{currentPlan:i,onCtaClick:n})),a.createElement(Xn,{title:t,slug:u.slug,currentPlan:i,onClose:n,price:u,balance:i.balance,commitment:p,needsCommitment:g,previewData:I,loadingPreview:S,isUpdate:!0,promotionCode:f})):a.createElement(m.Flex,null,a.createElement(m.TextBigger,null,"You don't have permissions to change the plan.")))}));var la=n(45467);const oa=ta()((e=>{let{onClose:t,prices:n,currentPlan:o={},children:r}=e;const{recurringIndex:i,setRecurringIndex:c,price:s,needsCommitment:d,commitment:u,handleCommitmentChange:g}=ea({prices:n,currentPlan:o}),p=(0,ve.uW)("email"),[h,b]=(0,a.useState)(o.billingEmail||p||""),f=(0,E.JT)("billing:Manage"),{state:v}=(0,l.zy)(),[x,y]=(0,a.useState)((null===v||void 0===v?void 0:v.coupon)||""),{discount:C,promoCodeError:w}=(0,pn.D)({productId:s.id,promotionCode:x,...d&&u&&{commitment:u}}),[A,T]=(0,a.useState)(0),S=(0,fn.Lf)({price:s,promotionCode:x,commitment:u}),{loading:I,value:k}=(0,pn.Tr)(S,A);return(0,la.A)((()=>()=>T((e=>e+1))),[C,!x]),a.createElement(M.Ay,{feature:"Billing::CheckoutModal"},f?a.createElement(a.Fragment,null,a.createElement(m.Flex,{column:!0,gap:4},a.Children.map(r,(e=>(0,a.cloneElement)(e,{recurringIndex:i,setRecurringIndex:c}))),a.createElement(wn,{needsCommitment:d,commitment:u,handleCommitmentChange:g}),a.createElement(bn,null),a.createElement(m.TextInput,{onChange:e=>b(e.target.value),value:h,label:"Billing email",placeholder:"jsmith@example.com"}),a.createElement(bn,null),a.createElement(Cn,{promotionCode:x,onApply:y,error:w}),a.createElement(kn,{currentPlan:o,onCtaClick:t})),a.createElement(Xn,{currentPlan:o,onClose:t,price:s,balance:o.balance,commitment:u,email:h,needsCommitment:d,previewData:k,loadingPreview:I,promotionCode:x})):a.createElement(m.Flex,null,a.createElement(m.TextBigger,null,"You don't have permissions to change the plan.")))}));var ra=n(21290),ia=n(12602),ca=n(49389);const sa=[{free:a.createElement(m.H0,null,"Free forever"),homelab:a.createElement(m.H0,null,"$90.00",a.createElement(m.TextSmall,{color:"textLite"},"/year")),"business_2023.02":a.createElement(a.Fragment,null,a.createElement(m.H0,null,"$3.00",a.createElement(m.TextSmall,{color:"textLite"},"/Node/month (billed yearly)")),a.createElement(m.Text,null,a.createElement(m.Text,{strong:!0},"25% discount")," is applied for annual billing.")),"business_2024.03":a.createElement(a.Fragment,null,a.createElement(m.H0,null,a.createElement(m.TextSmall,{color:"textLite"},"Up to")," $4.50",a.createElement(m.TextSmall,{color:"textLite"},"/Node/month (billed yearly)")),a.createElement(m.Flex,{column:!0},a.createElement(m.Text,null,a.createElement(m.Text,{strong:!0},"25% discount")," is applied for annual billing."),a.createElement(m.Text,null,a.createElement(m.Text,{strong:!0},"+ volume discounts")," based on number of committed nodes"))),business:a.createElement(a.Fragment,null,a.createElement(m.H0,null,a.createElement(m.TextSmall,{color:"textLite"},"Up to")," $4.50",a.createElement(m.TextSmall,{color:"textLite"},"/Node/month (billed yearly)")),a.createElement(m.Flex,{column:!0},a.createElement(m.Text,null,a.createElement(m.Text,{strong:!0},"25% discount")," is applied for annual billing."),a.createElement(m.Text,null,a.createElement(m.Text,{strong:!0},"+ volume discounts")," based on number of committed nodes")))},{free:a.createElement(m.H0,null,"Free forever"),homelab:a.createElement(m.H0,null,"$10.00",a.createElement(m.TextSmall,{color:"textLite"},"/month")),"business_2023.02":a.createElement(a.Fragment,null,a.createElement(m.H0,null,"$4.00",a.createElement(m.TextSmall,{color:"textLite"},"/Node/month"))),"business_2024.03":a.createElement(a.Fragment,null,a.createElement(m.H0,null,"$6.00",a.createElement(m.TextSmall,{color:"textLite"},"/Node/month"))),business:a.createElement(a.Fragment,null,a.createElement(m.H0,null,"$6.00",a.createElement(m.TextSmall,{color:"textLite"},"/Node/month")))}],ma=e=>{let{recurringIndex:t,slug:n,version:l}=e;return a.createElement(m.Flex,{column:!0,gap:2},sa[t]["".concat(n,"_").concat(l)]||sa[t][n])},da=s.default.div.withConfig({displayName:"pricingDetails__OneLine",componentId:"sc-hbslp4-0"})(["white-space:nowrap white-space:nowrap;"]),ua=e=>{let{nodesLastPeriod:t,committedNodes:n}=e;const l=t-n,o=l>0;return 0==l?a.createElement(m.TextSmall,null,"You are using all your committed nodes."):o?a.createElement(m.TextSmall,null,"You are using"," ",a.createElement(m.TextSmall,{strong:!0,color:"error"},l," ",(0,ca.su)(l).toLowerCase()," more")," ","than your committed nodes."):a.createElement(m.TextSmall,null,"You are using"," ",a.createElement(m.TextSmall,{strong:!0,color:"success"},t," out of ",n)," ","committed ",(0,ca.su)(n).toLowerCase(),".")},ga=e=>{let{interval:t,currentPeriodTo:n,committedNodes:l,nodesLastPeriod:o,showPromotion:r,cancelling:i,onCancelPlan:c,commitment:s}=e;const{localeDateString:d}=(0,ra.$j)();return a.createElement(m.Flex,{column:!0,gap:1},!!n&&a.createElement(m.TextSmall,null,a.createElement(m.TextSmall,{strong:!0,"data-testid":"billingPricingDetails-interval"},"Billing ".concat(zn.rY[t]))," ","(renews ",d(new Date(n),{long:!1}),")"),r&&"month"===t&&a.createElement(ue.A,{onClick:c,disabled:i},a.createElement(da,null,"Save 25% by changing your billing frequency to yearly")),s?a.createElement(a.Fragment,null,a.createElement(m.TextSmall,null,"Committed Nodes:"," ",a.createElement(m.Text,{strong:!0,"data-testid":"billingPricingDetails-committedNodesNumber"},l)),"number"===typeof o?a.createElement(ua,{nodesLastPeriod:o,committedNodes:l}):null):null)},pa=e=>{let{features:t,showViewDetails:n}=e;return a.createElement(m.Flex,{column:!0,gap:1},t.map((e=>a.createElement(m.Flex,{gap:2,key:e},a.createElement(m.Box,{width:5},a.createElement(m.Icon,{name:"check",width:"20px",height:"20px",color:"primary"})),a.createElement(m.TextSmall,null,e)))),n&&a.createElement(m.Flex,{gap:2},a.createElement(m.Box,{width:5}),a.createElement(ue.A,{Component:m.Flex,as:Nt.N_,cursor:"pointer",alignItems:"center",gap:1,color:"text",hoverColor:"textFocus",to:"all-plans"},a.createElement(m.Text,{strong:!0},"View full details"),a.createElement(m.Icon,{name:"chevron_right",width:"16px",height:"16px"}))))},Ea=(e=>t=>{let{inModal:n,recurringIndex:l,setRecurringIndex:o,...r}=t;return n?a.createElement(m.Flex,{width:"100%",justifyContent:"between",alignItems:"center"},a.createElement(e,r),a.createElement(m.Flex,{gap:3,padding:[0,10]},zn.HR.map(((e,t)=>a.createElement(m.RadioButton,{key:e,checked:l===t,onChange:()=>o(t),"data-testid":"billingPaidPlans-".concat(e,"-radioButton")},a.createElement(m.Text,{color:"textDescription"},(0,_e.Zr)(zn.rY[e]))))))):a.createElement(e,r)})((e=>{let{children:t}=e;return a.createElement(m.H3,{"data-testid":"billingPricingDetails-activePlanName"},t)})),ha=e=>{var t;let{slug:n,version:l,prices:o,recurringIndex:i=0,setRecurringIndex:c,currentPlan:s={},isActive:d,showAllDetails:u,showTeaserDetails:g,cancelling:p,onCancelPlan:E,title:h,features:b,nodesLastPeriod:f,inModal:v}=e;const{onTrial:x}=(0,qn.A)(),y=d?s.pricing:(null===(t=o[i])||void 0===t?void 0:t.pricing)||{},C=(0,fn.Kj)(n);return a.createElement(m.Flex,{column:!0,gap:2},a.createElement(m.Flex,{column:!0},a.createElement(m.Flex,{alignItems:"center",gap:2},a.createElement(Ea,{inModal:v,recurringIndex:i,setRecurringIndex:c},h),d&&a.createElement(a.Fragment,null,a.createElement(m.Pill,{flavour:"success","data-testid":"active-plan",icon:"checkmark_s"},x?"Trial":"Active"),x&&!u&&a.createElement(ia.A,null,a.createElement(m.TextBig,{color:"primary"},"Upgrade Now!")))),l&&a.createElement(m.TextBig,null,"(",l,")")),u||!d||"free"===n?a.createElement(ma,(0,r.A)({inModal:v,isActive:d,currentPlan:s,recurringIndex:i,slug:n,version:l},y)):null,g&&null!==b&&void 0!==b&&b[l]?a.createElement(pa,{features:b[l],showViewDetails:!C}):null,d&&!(0,fn.Kj)(n)&&!x&&a.createElement(ga,(0,r.A)({},s,{showPromotion:!0,cancelling:p,onCancelPlan:E,nodesLastPeriod:f})))},ba=e=>{let{onConfirm:t}=e;const{sendButtonClickedLog:n}=(0,Te.A)(),[l,o]=(0,b.A)(),r=(0,a.useCallback)((()=>{n({feature:"HomelabAcceptTerms"}),t()}),[n]);return a.createElement(m.ConfirmationDialog,{title:"Terms of usage",confirmLabel:"I agree",handleConfirm:r,hideDecline:!0,isConfirmPositive:!0,isConfirmDisabled:!l,message:a.createElement(m.Flex,{column:!0,gap:2},a.createElement(m.Text,null,"By continuing, you acknowledge that you will use Netdata ONLY for homelab purposes. If professional use is detected you will be downgraded, and will waive any rights to refunds."),a.createElement(m.Text,null,"For more details on conditions please check our"," ",a.createElement(ue.A,{href:"https://www.netdata.cloud/fair-usage-policy",rel:"noopener noreferrer",strong:!0,target:"_blank"},"Fair Usage Policy"),"."),a.createElement(m.Flex,null,a.createElement(m.Checkbox,{checked:l,onChange:o,label:"I will NOT use the Homelab plan for professional purposes (this includes testing and staging environments).",labelPosition:"right",labelProps:{strong:!0,padding:[0,0,0,1]}})),a.createElement(m.Text,null))})};var fa=n(67276),va=n(47431),xa=n(93476),ya=n(66732),Ca=n(97118);const wa=(0,s.default)(m.Flex).attrs((e=>({padding:[0,2,4,2],background:"mainBackground",width:{min:"280px"},...e}))).withConfig({displayName:"styled__StyledWrapper",componentId:"sc-1gqbztm-0"})(["width:","};height:",";place-self:",";"],(e=>{let{showAllPlans:t,numberOfPlans:n}=e;return t?"calc(100% / ".concat(n+1,")"):"auto"}),(e=>{let{showAllPlans:t}=e;return t?"240px":"auto"}),(e=>{let{showAllPlans:t}=e;return t?"center":"auto"})),Aa=e=>{let{slug:t,version:n,prices:l=[],isSmall:i,isActive:c,currentPlan:s,showAllPlans:d,showTeaserDetails:u,allPlansView:g=!1,nodesLastPeriod:p,numberOfPlans:h,...f}=e;const v=(0,ve.NJ)(),x=(0,o.vt)(),{title:y,features:C}=zn.FJ[t]||zn.FJ.free,w=(0,E.JT)("billing:Manage"),{sendLog:A,sendButtonClickedLog:T,isReady:S}=(0,Te.A)(),{onTrial:I}=(0,qn.A)(),{cancelling:k,startCancelling:F,stopCancelling:P}=(0,pn.og)(),M="AWS"==s.paymentProvider,D=(0,a.useCallback)((function(){let{label:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};T({label:e},!0)}),[T,S]),L=(0,fn.z_)({currentPlan:s,slug:t,version:n,onTrial:I}),N=k||c&&(0,fn.Kj)(t)||s.slug==zn.VH.earlyBird&&t==zn.VH.free||s.slug==zn.VH.free&&"2023.02"==s.version&&t==zn.VH.free&&"2023.11"==n,[R,B]=(0,a.useState)({}),[_,,U,O]=(0,b.A)(),[V,H]=(0,me.A)(),W=(0,pn.M4)(),Y="cancel"===R.type&&!(0,fn.Kj)(s.slug)&&!_,q="cancel"===R.type&&t==zn.VH.free&&"2023.11"==n,{id:j}=(0,Ca.A)(),z=(0,$e.ly)(),J=(0,a.useCallback)((()=>{U(),F(),W({productId:j}).then((()=>{V({header:"Successfully canceled subscription",text:"You are now on Community plan"}),A({isSuccess:!0,description:"cancel-subscription"})})).catch((()=>{H({header:"Failed to cancel the subscription",text:"Remained on ".concat(y," plan")}),A({isFailure:!0,description:"cancel-subscription",error:"Failed to cancel the subscription"})})).finally((()=>{P(),z()}))}),[A,S]),G=(0,a.useMemo)((()=>"user-".concat(v,"-space-").concat(x,"-accept-homelab-terms")),[v,x]),$=(0,a.useCallback)((()=>{localStorage.setItem(G,!0)}),[G]),K=(0,a.useCallback)((()=>"true"==localStorage.getItem(G)),[G]),Z=(0,a.useCallback)((e=>{if(!k&&!N)if(t!=zn.VH.homelab||s.slug==zn.VH.homelab||K()){if((0,fn.Kj)(t))return B({type:"cancel"}),void A(e,!0);if((0,fn.Kj)(s.slug))return le("checkout"),void A(e,!0);le("update"),A(e,!0)}else B({type:"homelab"})}),[A,S,R,B]),Q=(0,a.useCallback)((()=>{B({}),$(),setTimeout((()=>{Z({action:fa.o1.buttonClicked},!0)}),200)}),[B,$,Z]),X=!!s.billingEmail,ee=X?aa:oa,{billingModalType:te,billingModalSlug:ne}=(0,Pt.PP)(),ae=["checkout","update"].includes(te)&&t==ne,le=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";const n=(0,Pt.PP)();(0,Pt.Z8)({...n,billingModalType:e,billingModalSlug:""==e?e:t})},[oe,,re,ie]=(0,b.A)();return a.createElement(wa,(0,r.A)({column:!0,flex:i,gap:2,justifyContent:"between",showAllPlans:d,numberOfPlans:h,isSmall:i},f),a.createElement(ha,{slug:t,prices:l,isActive:c,currentPlan:s,cancelling:k,showAllDetails:d,showTeaserDetails:u,title:y,version:n,features:C,nodesLastPeriod:p,onCancelPlan:()=>Z({action:fa.o1.buttonClicked,label:y})}),!g&&a.createElement($t.A,{Component:m.Button,permission:"billing:Manage","data-ga":"upgrade-button::click-".concat(Fn({slug:t}),"::current-").concat(Fn(s),"::billing"),"data-testid":"billingPlan-".concat(t,"-").concat(c?"currentPlan":"upgradeButton"),label:L,flavour:(0,fn.Dy)(null===s||void 0===s?void 0:s.slug,t),disabled:N,width:"100%",onClick:()=>Z({action:fa.o1.buttonClicked,label:L,dataGa:"upgrade-button::click-".concat(Fn({slug:t}),"::current-").concat(Fn(s),"::billing")}),isLoading:k&&c,loadingLabel:"Cancelling"}),!d&&a.createElement(a.Fragment,null,I&&a.createElement(xa.A,{flavour:"billing"}),(I||!(0,fn.Kj)(s.slug))&&a.createElement(m.Flex,{column:!0,gap:4},a.createElement(m.Flex,{gap:6,alignItems:"baseline"},I&&a.createElement(va.A,null),!(0,fn.Kj)(s.slug)&&!M&&a.createElement(ue.A,{Component:m.Flex,as:Nt.N_,cursor:"pointer",alignItems:"center",gap:1,color:"text",hoverColor:"textFocus",to:"all-plans",padding:[4,0,0],disabled:k,onClick:()=>D({label:w?"Change plan":"View plans"}),"data-testid":"billingPlan-viewMorePlansLink","data-ga":"change-plan::click::billing"},a.createElement("span",null,w?"Change plan":"View plans"),a.createElement(m.Icon,{name:"chevron_right",width:"16px",height:"16px"}))),I&&a.createElement(a.Fragment,null,a.createElement(ya.A,{flavour:"billing",color:"textLite",onOptOutClick:re}),oe&&a.createElement(Yn.A,{onDecline:ie,onCancellingEnd:()=>{}})))),(Y||q)&&a.createElement(jn,{title:y,onConfirm:J,onClose:()=>{O(),B({})}}),"homelab"==R.type?a.createElement(ba,{onConfirm:Q}):null,ae&&a.createElement(ee,{title:y,onClose:()=>{O(),le()},prices:l,currentPlan:s,showProrations:X},a.createElement(ha,{slug:t,prices:l,currentPlan:s,title:y,version:n,inModal:!0})))},Ta=e=>{let{slug:t,isSmall:n,currentPlan:l,showAllPlans:o,allPlansView:i=!1,showTeaserDetails:c,numberOfPlans:s,...d}=e;const{sendButtonClickedLog:u}=(0,Te.A)(),{cancelling:g}=(0,pn.og)(),{features:p}=zn.FJ.enterprise,E=(0,a.useCallback)((()=>{window.open("https://www.netdata.cloud/request-enterprise/","_blank","noopener,noreferrer"),u({feature:"OnPremContact"})}),[u]);return a.createElement(wa,(0,r.A)({column:!0,flex:n,gap:2,justifyContent:"between",showAllPlans:o,numberOfPlans:s,isSmall:n},d),a.createElement(m.Flex,{column:!0,gap:2},a.createElement(m.Flex,{column:!0},a.createElement(m.Flex,{alignItems:"center",gap:2},a.createElement(m.H3,{"data-testid":"billingPricingDetails-activePlanName"},"Enterprise On-Premise"))),a.createElement(m.H0,null,a.createElement(m.TextSmall,{color:"textLite"},"Up to")," $3.52",a.createElement(m.TextSmall,{color:"textLite"},"/Node/month (billed yearly)")),a.createElement(m.Flex,{column:!0},a.createElement(m.Text,null,"Starts at 214 node licenses")),c&&a.createElement(pa,{features:p[2023.11]||[]})),!i&&a.createElement($t.A,{Component:m.Button,permission:"billing:Manage","data-ga":"upgrade-button::click-".concat(Fn({slug:t}),"::current-").concat(Fn(l),"::billing"),"data-testid":"billingPlan-".concat(t,"-upgradeButton"),label:"Contact us",flavour:"hollow",disabled:g,width:"100%",onClick:E}))},Sa=e=>{let{plans:t,isSmall:n,currentPlan:l,showAllPlans:o,showTeaserDetails:r,numberOfPlans:i}=e;return a.createElement(a.Fragment,null,zn.tD.map((e=>(Object.entries(t[e]||{})||[]).map((t=>{let[c,s]=t;return a.createElement(Aa,{key:"".concat(e,"-").concat(c),slug:e,version:c,prices:s,isActive:l.slug==e&&l.version==c,isSmall:n,currentPlan:l,showAllPlans:o,showTeaserDetails:r,numberOfPlans:i})})))),a.createElement(Ta,{slug:"enterprise",isSmall:n,currentPlan:l,showAllPlans:o,showTeaserDetails:r,numberOfPlans:i}))},Ia=s.default.div.withConfig({displayName:"styled__Container",componentId:"sc-1oa2kv1-0"})(["display:grid;background:",";"," ",""],(0,m.getColor)("mainBackground"),(e=>{let{sticky:t}=e;return t?"position:sticky;top:0;":""}),(e=>{let{numberOfPlans:t,showAllPlans:n}=e;const a=[n?"200px":"minmax(280px, 1fr)",...Array(t).fill("minmax(280px, 1.5fr)")];return"grid-template-columns: ".concat(a.join(" "),";")})),ka=s.default.div.withConfig({displayName:"styled__Header",componentId:"sc-1oa2kv1-1"})(["display:contents;> *{background:",";border-bottom:1px solid ",";}"],(0,m.getColor)("panelBg"),(0,m.getColor)("placeholder")),Fa=(0,s.default)(m.Collapsible).withConfig({displayName:"styled__Content",componentId:"sc-1oa2kv1-2"})(["display:contents;"]),Pa=()=>{const{loaded:e,value:t}=(0,pn.lU)(),n=(0,fn.LJ)(t);return e?n:0},Ma=e=>{var t,n,l,o;let{isSmall:r,plans:i,currentPlan:c,showAllPlans:s=!1,showTeaserDetails:d=!1,showPricingColumn:u}=e;const g="earlyBird"===c.slug,p=Pa(),{version:E}=(0,Ca.A)(),h=null===(t=(null===(n=i.free)||void 0===n?void 0:n[null===c||void 0===c?void 0:c.version])||(null===(l=i.free)||void 0===l?void 0:l[E]))||void 0===t?void 0:t[0],b=i.earlyBird?null===(o=Object.values(i.earlyBird)[0])||void 0===o?void 0:o[0]:{};return a.createElement(Ia,{numberOfPlans:p,showAllPlans:s,sticky:!0},u?a.createElement(m.Flex,{column:!0,height:50,justifyContent:"center"},a.createElement(m.Text,{strong:!0},"Pricing")):null,g?a.createElement(Aa,{slug:"earlyBird",version:null===b||void 0===b?void 0:b.version,prices:null===b||void 0===b?void 0:b.prices,isSmall:r,isActive:"earlyBird"===c.slug,currentPlan:c,showAllPlans:s,numberOfPlans:p}):a.createElement(Aa,{slug:"free",version:null===h||void 0===h?void 0:h.version,prices:null===h||void 0===h?void 0:h.prices,isSmall:r,isActive:"free"===c.slug&&c.version==(null===h||void 0===h?void 0:h.version),currentPlan:c,showAllPlans:s,showTeaserDetails:d,numberOfPlans:p}),a.createElement(Sa,{plans:i,isSmall:r,currentPlan:c,showAllPlans:s,showTeaserDetails:d,numberOfPlans:p}))},Da=e=>{let{children:t,...n}=e;return a.createElement(m.Flex,(0,r.A)({alignItems:"center",gap:2},n),t)},La=()=>a.createElement(m.Flex,{column:!0,gap:2,width:65},a.createElement(m.TextMicro,null,a.createElement(m.TextMicro,{strong:!0},"Available credit")," for you to use on any plan subscriptions with us."),a.createElement(m.TextMicro,null,"It is ok to change your mind, we will give you full flexibility! You can change the plan level, billing frequency or committed nodes, we won't hold you to any choice. When applicable, we'll credit you back on any unused amount.")),Na=(0,Ct.A)(ue.A),Ra=e=>{let{currentPlan:t}=e;const[n,l]=(0,pn.Qh)(),o=!!t.billingEmail&&!l,r=(0,E.JT)("billing:ReadAll"),i=null===t||void 0===t?void 0:t.marketplaceUrl,c="AWS"==(null===t||void 0===t?void 0:t.paymentProvider),s=(0,a.useCallback)((()=>{i&&(location.href=i)}),[i]);return a.createElement(m.Flex,{column:!0,gap:2},a.createElement(m.H3,null,"Plan & Billing"),r&&a.createElement(a.Fragment,null,a.createElement(Da,null,c?null:a.createElement(a.Fragment,null,a.createElement(m.Flex,{gap:1,alignItems:"center"},a.createElement(m.Text,null,"Credit:"),a.createElement(m.Text,{"data-testid":"billingHeader-credits",strong:!0},Mn.format(t.balance.amount||0)),a.createElement(Ue.A,{content:La,align:"bottom",isBasic:!0},a.createElement(m.Icon,{name:"information",width:"16px",height:"16px",color:"textLite"}))),a.createElement(hn,null)),a.createElement(m.Text,null,"Billing email:"," ",a.createElement(m.Text,{strong:!0,"data-testid":"billingHeader-email"},t.billingEmail||"-")),a.createElement(hn,null),i?a.createElement(Na,{Component:m.Flex,cursor:"pointer",onClick:s,alignItems:"center",gap:1,tooltip:"Manage your subscription on the AWS Marketplace","data-ga":"billing-options::click::billing","data-testid":"billingHeader-goToMarketplace"},a.createElement("span",null,"Manage your Subscription"),a.createElement(m.Icon,{name:"nav_arrow_goto",width:"12px",height:"12px"})):a.createElement(ue.A,{Component:m.Flex,cursor:o?"pointer":"default",disabled:!o,onClick:n,alignItems:"center",gap:1,"data-ga":"billing-options::click::billing","data-testid":"billingHeader-goToPortal"},a.createElement("span",null,"Billing options and Invoices"),a.createElement(m.Icon,{name:"nav_arrow_goto",width:"12px",height:"12px"}))),a.createElement(hn,{height:1,width:"100%",sx:{borderWidth:"1px 0px 0px 0px",borderColor:"borderSecondary",borderStyle:"solid"}})))};n(34504),n(78898);function Ba(){const e=new Date,t=e.getFullYear(),n=e.getMonth(),a=e.getDate(),l=new Date(0);return l.setFullYear(t,n,a-1),l.setHours(23,59,59,999),l}var _a=n(51730),Ua=n(13999);function Oa(e,t){return(0,Ua.f)(e,-t)}var Va=n(4883);function Ha(e,t){return(0,Va.P)(e,-t)}var Wa=n(35840);function Ya(e,t){const{years:n=0,months:a=0,weeks:l=0,days:o=0,hours:r=0,minutes:i=0,seconds:c=0}=t,s=Oa(Ha(e,a+12*n),o+7*l),m=1e3*(c+60*(i+60*r));return(0,Wa.w)(e,s.getTime()-m)}var qa=n(71600),ja=n(66118),za=n(51891),Ja=n(44731),Ga=n(99851),$a=n(24266),Ka=n(27467),Za=n(36712);const Qa={nodes:1,p90:2,committed_nodes:3},Xa={"Daily count":"The weighted 90th percentile of the live node count during the day, taking time as the weight. If you have 30 live nodes throughout the day, except for a two hour peak of 44 live nodes, the daily value is 31.","Committed nodes":"The number of nodes committed to in the yearly plan. In case the period count is higher than the number of committed nodes, the difference is billed as overage.","Period count":"The 90th percentile of the daily counts for this period up to the date. The last value for the period is used as the number of nodes for the bill for that period."};ja.t1.register(ja.kc,ja.PP,ja.E8,ja.FN,ja.No,ja.s$,ja.m_,ja.ZT,ja.A6,za.A),ja.m_.positioners.follow=function(e,t){return null===e||void 0===e||!e.length||t.y>360?(this._resolveAnimations().update(this,{opacity:0}),!1):(0===this.opacity&&this._resolveAnimations().update(this,{opacity:1}),{x:t.x,y:t.y})};const el=e=>({nodes:{label:"Daily count",type:"bar",color:(0,m.getColor)("text")({theme:e}),backgroundColor:(0,m.getColor)("primary")({theme:e}),borderColor:(0,m.getColor)("primary")({theme:e}),borderWidth:2,pointStyle:"rectangle",usePointStyle:!0},p90:{label:"Period count",type:"line",color:(0,m.getColor)("text")({theme:e}),borderColor:(0,m.getColor)(["purple","lilac"])({theme:e}),borderWidth:2,fill:!1,stepped:!0},committed_nodes:{label:"Committed nodes",type:"line",color:(0,m.getColor)("text")({theme:e}),borderColor:(0,m.getColor)(["blue","aquamarine"])({theme:e}),borderWidth:2,fill:!1,borderDash:[1,2],borderDashOffset:1,pointStyle:!1}}),tl={border:{side:"all",color:"inputBorder",padding:[3]},round:!0},nl=Ba(),al=(0,_a.o)(Ya(nl,{months:1})),ll=e=>{let{onNodesLastPeriodFetch:t}=e;const n=(0,Ka.rW)("offset"),l=(0,o.vt)(),[r,i]=(0,a.useState)((()=>({start:al,end:nl}))),[c,d,u]=(0,Et.A)((()=>({enabled:!!l&&!!r.start&&!!r.end,fetch:()=>(0,Za.U2)(l,{after:Math.floor((0,qa.W)((0,ra.ii)(r.start,n))/1e3),before:Math.floor((0,qa.W)((0,ra.ii)(r.end,n))/1e3)}),initialValue:{labels:[],data:[]},onSuccess:e=>{let{data:n,labels:a}=e;if(!n.length)return;const l=n.at(-1),o=l[a.indexOf("nodes")],r=l[a.indexOf("timestamp")];t((e=>e.timestamp>r?e:{timestamp:r,value:o}))}})),[l,r,t]),g=(0,a.useContext)(s.ThemeContext),p=(0,a.useMemo)((()=>{const e=c.data.map((e=>{let[t]=e;return t})),t=c.labels.reduce(((t,n,a)=>a?[{...el(g)[n],data:e.map(((e,t)=>c.data[t][a])),order:Qa[n]},...t]:t),[]);return{labels:e,datasets:t}}),[c]),[E,h]=function(e){let{data:t,annotations:n=[]}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const{localeDateString:l}=(0,ra.$j)(),o=(0,a.useRef)(!1),r=(0,a.useRef)(),i=(0,a.useMemo)((()=>({interaction:{axis:"x"},plugins:{legend:{position:"bottom",align:"start",onClick:(e,t,n)=>{const a=n.legendItems.findIndex((e=>e.text===t.text));n.chart.isDatasetVisible(a)?n.chart.hide(a):n.chart.show(a)},labels:{generateLabels:e=>e.data.datasets.map(((t,n)=>({fontColor:t.color,text:t.label,fillStyle:t.backgroundColor,strokeStyle:t.borderColor,pointStyle:"Daily count"===t.label?"rect":"line",hidden:!e.isDatasetVisible(n),order:t.order}))).sort(((e,t)=>e.order-t.order)),usePointStyle:!0},onHover:function(e,t){!o.current&&r.current&&(o.current=!0,r.current.innerHTML=Xa[t.text],r.current.style.left=e.x+"px",r.current.style.top=e.y+"px",r.current.style.visibility="visible",r.current.style.transform="translateY(-100%)")},onLeave:function(){r.current&&(o.current=!1,r.current.innerHTML="",r.current.style.visibility="hidden")}},tooltip:{enabled:!0,mode:"nearest",intersect:!1,yAlign:"bottom",usePointStyle:!0,position:"follow",backgroundColor:(0,m.getColor)("tooltip")({theme:e}),color:(0,m.getColor)("tooltipText")({theme:e}),callbacks:{title:e=>{const[t]=e;return t?l(1e3*t.label,{month:"2-digit",day:"2-digit",year:"numeric",long:!1,dateStyle:void 0}):""},labelPointStyle:e=>({pointStyle:"Total Nodes"===e.dataset.label?"rect":"line"}),label:e=>" ".concat(e.formattedValue," ").concat(e.dataset.label||"")}},annotation:{annotations:n.reduce(((n,a,l)=>({...n,["annotation".concat(l)]:{type:"line",value:t.findIndex((e=>e[0]===a.timestamp)),borderColor:(0,m.getRgbColor)("attention",.3)({theme:e}),borderDashOffset:0,borderWidth:10,drawTime:"afterDatasetsDraw",label:{drawTime:"afterDatasetsDraw",display:!1,backgroundColor:(0,m.getRgbColor)("attention",.8)({theme:e}),borderWidth:0,color:"white",content:a.name,textAlign:"center"},scaleID:"x",enter(e,t){const n=e.chart,a=n.options.plugins.annotation.annotations["annotation".concat(l)];a.label.display=!0,a.label.position=t.y/e.chart.chartArea.height>.5?"start":"end",n.update()},leave(e){const t=e.chart;t.options.plugins.annotation.annotations["annotation".concat(l)].label.display=!1,t.update()}}})),{})}},responsive:!0,maintainAspectRatio:!1,scales:{x:{ticks:{callback:function(e){return l(1e3*this.getLabelForValue(e),{month:"2-digit",day:"2-digit",year:"numeric",long:!1,dateStyle:void 0})},color:(0,m.getColor)("textLite")({theme:e})}},y:{beginAtZero:!0,ticks:{color:(0,m.getColor)("textLite")({theme:e})}}}})),[e,t]);return[i,r]}(g,c);return a.createElement(m.Flex,{column:!0,gap:4},a.createElement(m.Flex,{alignItems:"center",justifyContent:"between"},a.createElement(m.H3,null,"Usage"),a.createElement(Ga.A,{values:r,utc:n,onChange:i,tagging:"billing-usage",isPlaying:!1,onlyDates:!0,accessorProps:tl,padding:[4,0],width:"auto",maxDate:nl})),a.createElement(m.Flex,{position:"relative",height:90},d?a.createElement(Rt.A,{title:"Loading billing data..."}):u?a.createElement($a.H4,{title:"Chart couldn't be loaded"}):a.createElement(Ja.t1,{type:"bar",data:p,options:E}),a.createElement(m.Flex,{ref:h,background:"main",color:"generic",position:"absolute",round:!0,padding:[3],width:{max:75},sx:{visibility:"hidden"},onMouseOver:()=>h.current.style.visibility="hidden"})))},ol=e=>{let{isSmall:t,currentPlan:n,loaded:l,currentLoaded:o,plans:r}=e;const[i,c]=(0,a.useState)({timestamp:null,value:null}),s=r[n.slug]?r[n.slug][n.version]:[];return a.createElement(a.Fragment,null,a.createElement(Ra,{currentPlan:n}),l&&o?a.createElement(a.Fragment,null,(0,fn.Kj)(n.slug)?a.createElement(Ma,{isSmall:t,plans:r,currentPlan:n,showTeaserDetails:!0}):a.createElement(Aa,{slug:n.slug,version:n.version,prices:s,width:t?"auto":120,padding:[0],height:"auto",isSmall:t,isActive:!0,currentPlan:n,allPlansView:!0,nodesLastPeriod:i.value}),a.createElement(hn,{height:1,width:"100%",sx:{borderWidth:"1px 0px 0px 0px",borderColor:"borderSecondary",borderStyle:"solid"}}),a.createElement(ll,{onNodesLastPeriodFetch:c})):a.createElement(Rt.A,{title:l?"Loading billing info...":"Loading Netdata plans..."}))};var rl=n(71847);const il={year:"Yearly",month:"Monthly"},cl=e=>{var t;let{currentPlan:n}=e;const l=(0,o.bq)(),[r,i]=(0,pn.Qh)(),{cancelling:c,startCancelling:s,stopCancelling:d}=(0,pn.og)(),u=!!n.billingEmail&&!i,[g,p]=(0,b.A)(),[h,f]=(0,me.A)(),v=(0,pn.M4)(),{title:x}=zn.FJ[n.slug]||zn.FJ.free,{id:y}=(0,Ca.A)(),C=(0,E.JT)("billing:Manage"),w=(0,E.JT)("billing:ReadAll"),{title:A}=zn.FJ[null===n||void 0===n?void 0:n.slug]||zn.FJ.free,T=il[null===n||void 0===n?void 0:n.interval];return a.createElement(m.Flex,{column:!0,gap:2},a.createElement(m.Flex,{gap:2,alignItems:"center"},a.createElement(ue.A,{as:Nt.N_,to:"/spaces/".concat(l,"/settings/billing"),disabled:c,color:"text",hoverColor:"textLite",showToolTip:!0,content:"Back to Plan & Billing",align:"bottom",isBasic:!0},a.createElement(m.Icon,{name:"arrow_left",width:"20px",height:"20px",margin:[1.5,0,0]})),a.createElement(m.H3,null,"All Plans")),a.createElement(Da,null,a.createElement(m.Flex,{gap:1,alignItems:"center"},a.createElement(m.Text,null,"Active plan:"),a.createElement(m.Text,{strong:!0},A)),a.createElement(hn,null),T&&a.createElement(a.Fragment,null,a.createElement(m.Flex,{gap:1,alignItems:"center"},a.createElement(m.Text,null,"Billing frequency:"),a.createElement(m.Text,{strong:!0},T)),a.createElement(hn,null)),n.commitment?a.createElement(m.Flex,{gap:1,alignItems:"center"},a.createElement(m.Text,null,"Committed Nodes:"),a.createElement(m.Text,{strong:!0},(null===n||void 0===n?void 0:n.committedNodes)||0)):null,a.createElement(hn,null),w&&a.createElement(a.Fragment,null,a.createElement(m.Flex,{gap:1,alignItems:"center"},a.createElement(m.Text,null,"Credit:"),a.createElement(m.Text,{strong:!0},Mn.format((null===n||void 0===n||null===(t=n.balance)||void 0===t?void 0:t.amount)||0)),a.createElement(Ue.A,{content:La,align:"bottom",isBasic:!0},a.createElement(m.Icon,{name:"information",width:"16px",height:"16px",color:"textLite"}))),a.createElement(hn,null),a.createElement(m.Flex,{gap:1,alignItems:"center"},a.createElement(m.Text,null,"Billing email:"),a.createElement(m.Text,{strong:!0},(null===n||void 0===n?void 0:n.billingEmail)||"-")),a.createElement(hn,null)),a.createElement(ue.A,{Component:m.Flex,cursor:u?"pointer":"default",disabled:!u,onClick:r,alignItems:"center",gap:1},a.createElement("span",null,"Billing options and Invoices"),a.createElement(m.Icon,{name:"nav_arrow_goto",width:"12px",height:"12px"})),!(0,fn.Kj)(null===n||void 0===n?void 0:n.slug)&&a.createElement(a.Fragment,null,a.createElement(hn,null),a.createElement(ue.A,{Component:m.Flex,cursor:"pointer",onClick:p,alignItems:"center",gap:1,disabled:c||!C,"data-ga":"cancel-plan::click::billing"},a.createElement(m.Text,{textDecoration:"underline"},c?"Canceling plan...":"Cancel plan")))),a.createElement(hn,{height:1,width:"100%",sx:{borderWidth:"1px 0px 0px 0px",borderColor:"borderSecondary",borderStyle:"solid"}}),g&&a.createElement(jn,{title:"Community",onConfirm:()=>{s(),v({productId:y}).then((()=>((0,rl.H)("billing","cancel-plan","global-view",{slug:null===n||void 0===n?void 0:n.slug,interval:null===n||void 0===n?void 0:n.interval,success:!0}),h({header:"Successfully canceled subscription",text:"You are now on Community plan"})))).catch((()=>((0,rl.H)("billing","cancel-plan","global-view",{slug:null===n||void 0===n?void 0:n.slug,interval:null===n||void 0===n?void 0:n.interval,success:!1}),f({header:"Failed to cancel the subscription",text:"Remained on ".concat(x," plan")})))).finally((()=>{d()}))},onClose:p}))},sl=()=>a.createElement(m.Icon,{name:"check",color:"primary"}),ml=()=>a.createElement(m.Icon,{name:"checkmark_partial_s",color:"textLite"}),dl=()=>a.createElement(m.Icon,{name:"chevron_down_thin",color:"text"}),ul=()=>a.createElement(m.Icon,{name:"chevron_up_thin",color:"text"}),gl=e=>{let{center:t,end:n,...l}=e;return a.createElement(m.Flex,{alignItems:"center",justifyContent:t?"center":n?"end":"start",padding:[2]},a.createElement(m.Text,(0,r.A)({},t?{textAlign:"center"}:{},l)))},pl=e=>{let{children:t,...n}=e;return a.createElement(gl,(0,r.A)({center:!0},n),t?a.createElement(m.Flex,{alignItems:"center",gap:2},t,a.createElement(sl,null)):a.createElement(sl,null))},El=e=>a.createElement(gl,(0,r.A)({center:!0},e),"UNLIMITED"),hl=e=>a.createElement(gl,(0,r.A)({center:!0},e),a.createElement(ml,null)),bl=e=>a.createElement(gl,(0,r.A)({center:!0},e),"SOON"),fl=e=>t=>{const{index:n,title:l="",showAllPlans:o,onToggle:r,collapsed:i}=t,c=Pa();return a.createElement(Ia,{numberOfPlans:c,showAllPlans:o},a.createElement(ka,{onClick:()=>r(n)},a.createElement(gl,{strong:!0},l),Array.from(Array(c-1).keys()).map((e=>a.createElement(gl,{key:e}))),a.createElement(gl,{end:!0},i[n]?a.createElement(ul,null):a.createElement(dl,null))),a.createElement(Fa,{open:!i[n]},a.createElement(e,t)))},vl=(0,a.memo)(fl((()=>a.createElement(a.Fragment,null,a.createElement(gl,null,"Scalability"),a.createElement(gl,{center:!0},"Vertical and Horizontal"),a.createElement(gl,{center:!0},"Vertical and Horizontal"),a.createElement(gl,{center:!0},"Vertical and Horizontal"),a.createElement(gl,{center:!0},"Vertical and Horizontal"),a.createElement(gl,null,"High Availability"),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,"Data Retention"),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(gl,null,"Data Privacy"),a.createElement(gl,{center:!0},"Data stored On-Prem and visualized on Netdata Cloud"),a.createElement(gl,{center:!0},"Data stored On-Prem and visualized on Netdata Cloud"),a.createElement(gl,{center:!0},"Data stored On-Prem and visualized on Netdata Cloud"),a.createElement(gl,{center:!0},"Data stored and visualized On-Prem"),a.createElement(gl,null,"Configuration"),a.createElement(gl,{center:!0},"Manual, IaC or in App(UI)"),a.createElement(gl,{center:!0},"Manual, IaC or in App(UI)"),a.createElement(gl,{center:!0},"Manual, IaC or in App(UI)"),a.createElement(gl,{center:!0},"Manual, IaC or in App(UI)"))))),xl=(0,a.memo)(fl((e=>{let{freePlanLimitattions:t}=e;return a.createElement(a.Fragment,null,a.createElement(gl,null,"Customizable charts"),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,"Infrastructure wide Dashboards"),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,"Centralized Alerts Management"),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,"Infrastructure Organization (Rooms)"),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,"Custom Dashboards"),a.createElement(pl,null,null!==t&&void 0!==t&&t.maxDashboards?a.createElement(m.Text,null,"(Limited to ",t.maxDashboards," per Room)"):null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,"Access dashboards from anywhere in the world"),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,"Role Based Access Control (RBAC)"),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,"Auditing"),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,"Invite Team Members"),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,"Functions"),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,"Netdata Assistant"),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,"Mobile App"),a.createElement(hl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(hl,null),a.createElement(gl,null,"Centralized Management of Integrations"),a.createElement(bl,null),a.createElement(bl,null),a.createElement(bl,null),a.createElement(bl,null))}))),yl=(0,a.memo)(fl((e=>{let{freePlanLimitattions:t}=e;return a.createElement(a.Fragment,null,a.createElement(gl,null,"Active Connected Nodes"),null!==t&&void 0!==t&&t.maxNodes?a.createElement(gl,{center:!0},"Max of ",t.maxNodes):a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(gl,null,"Active Custom Dashboards"),null!==t&&void 0!==t&&t.maxDashboards?a.createElement(gl,{center:!0},"Max of ",t.maxDashboards," per Room"):a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(gl,null,"Infrastructure metrics"),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(gl,null,"APM metrics"),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(gl,null,"Custom metrics"),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(gl,null,"Synthetic checks"),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(gl,null,"Monitor system journal logs"),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(El,null),a.createElement(gl,null,"Auditing Events"),a.createElement(gl,{center:!0},"4 hours"),a.createElement(gl,{center:!0},"90 days"),a.createElement(gl,{center:!0},"90 days"),a.createElement(gl,{center:!0},"As required"),a.createElement(gl,null,"Topology Events"),a.createElement(gl,{center:!0},"4 hours"),a.createElement(gl,{center:!0},"14 days"),a.createElement(gl,{center:!0},"14 days"),a.createElement(gl,{center:!0},"As required"),a.createElement(gl,null,"Alert Events"),a.createElement(gl,{center:!0},"4 hours"),a.createElement(gl,{center:!0},"60 days"),a.createElement(gl,{center:!0},"60 days"),a.createElement(gl,{center:!0},"As required"),a.createElement(gl,null,"Alert Notification Integrations"),a.createElement(gl,{center:!0},"Email, Discord"),a.createElement(gl,{center:!0},"Email, Discord, Webhook, Mattermost, Opsgenie, PagerDuty, RocketChat, Slack, and more"),a.createElement(gl,{center:!0},"Email, Discord, Webhook, Mattermost, Opsgenie, PagerDuty, RocketChat, Slack, and more"),a.createElement(gl,{center:!0},a.createElement(m.Text,null,"Same as Business plan"),a.createElement("br",null),a.createElement(m.TextSmall,null,"(Custom requests can be handled)")),a.createElement(gl,null,"User Administration"),a.createElement(gl,{center:!0},"Basic"),a.createElement(gl,{center:!0},"Advanced"),a.createElement(gl,{center:!0},"Advanced"),a.createElement(gl,{center:!0},"Advanced"))}))),Cl=(0,a.memo)(fl((e=>{let{isEarlyBird:t}=e;return a.createElement(a.Fragment,null,a.createElement(gl,null,a.createElement(m.Text,{id:"administrators"},"Administrators"),a.createElement("br",null),a.createElement(m.TextSmall,{color:"textDescription",id:"same-as-managers-but-unable-to-manage-users-or-rooms"},"Users with this role can control Spaces, War Rooms, Nodes, Users and Billing. They can also access any Room in the Space.")),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,a.createElement(m.Text,{id:"troubleshooters"},"Troubleshooters"),a.createElement("br",null),a.createElement(m.TextSmall,{color:"textDescription"},"Users with this role can use Netdata to troubleshoot, not manage entities. They can access any Room in the Space.")),a.createElement(hl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,a.createElement(m.Text,{id:"managers"},"Managers"),a.createElement("br",null),a.createElement(m.TextSmall,{color:"textDescription"},"Users with this role can manage Rooms and Users. They can access any Room in the Space.")),a.createElement(hl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,a.createElement(m.Text,{id:"observers"},"Observers"),a.createElement("br",null),a.createElement(m.TextSmall,{color:"textDescription"},"Users with this role can only view data in specific Rooms.")),a.createElement(hl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,a.createElement(m.Text,{id:"billing"},"Billing"),a.createElement("br",null),a.createElement(m.TextSmall,{color:"textDescription"},"Users with this role can handle billing options and invoices.")),a.createElement(hl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(pl,null),a.createElement(gl,null,a.createElement(m.Text,{id:"billing"},"Member"),a.createElement("br",null),a.createElement(m.TextSmall,{color:"textDescription"},"This role allows users to manage rooms and invite fellow Member teammates. These users cannot see all rooms in the Space but can see all Nodes since they are always on the All Nodes.")),t?a.createElement(pl,null):a.createElement(hl,null),a.createElement(hl,null),a.createElement(hl,null),a.createElement(hl,null))}))),wl=(0,a.memo)(fl((()=>a.createElement(a.Fragment,null,a.createElement(gl,null,"Service Availability"),a.createElement(gl,{center:!0},"Best Effort (99.5% in last 12 months)"),a.createElement(gl,{center:!0},"Best Effort (99.5% in last 12 months)"),a.createElement(gl,{center:!0},"99.9% annually (excl. scheduled maintenance)"),a.createElement(gl,{center:!0},"Same as Business plan"),a.createElement(gl,null,"Technical Support"),a.createElement(gl,{center:!0},"Public Forums, Tickets & Chat"),a.createElement(gl,{center:!0},"Public Forums, Tickets & Chat"),a.createElement(gl,{center:!0},a.createElement(m.Text,null,"Public Forums, Tickets & Chat"),a.createElement("br",null),a.createElement(m.Text,{color:"textDescription"},"Need Premium Support?"," ",a.createElement(ue.A,{Component:m.Text,href:"https://www.netdata.cloud/contact-us/?subject=custom-support-requirements",target:"_blank",rel:"noopener noreferrer"},"Reach out to us"))),a.createElement(gl,{center:!0},"Custom Design to Meet Requirements"),a.createElement(gl,null,"Initial Deployment Consulting Services"),a.createElement(gl,{center:!0},"Public Forums, Tickets & Chat"),a.createElement(gl,{center:!0},"Public Forums, Tickets & Chat"),a.createElement(gl,{center:!0},"Public Forums, Tickets & Chat"),a.createElement(gl,{center:!0},"Remote or On Site Consultation and Training During Deployment"))))),Al=e=>{var t;let{plans:n,currentPlan:l}=e;const o="free"==(null===l||void 0===l?void 0:l.slug)&&"2023.02"==(null===l||void 0===l?void 0:l.version),i="earlyBird"===(null===l||void 0===l?void 0:l.slug),c=null===(t=Object.values((null===n||void 0===n?void 0:n.free)||{}))||void 0===t||null===(t=t[0])||void 0===t||null===(t=t[0])||void 0===t?void 0:t.planLimitations,[s,d]=(0,a.useState)((()=>[!1,!0,!0,!0,!0])),u={isOldCommunity:o,isEarlyBird:i,freePlanLimitattions:c,showAllPlans:!0,onToggle:e=>d((t=>(t[e]=!t[e],[...t]))),collapsed:s};return a.createElement(m.Flex,{column:!0,gap:5,height:"100%"},a.createElement(vl,(0,r.A)({title:"General",index:0},u)),a.createElement(xl,(0,r.A)({title:"Features",index:1},u)),a.createElement(yl,(0,r.A)({title:"Usage Allowances",index:2},u)),a.createElement(Cl,(0,r.A)({title:"User Roles",index:3},u)),a.createElement(wl,(0,r.A)({title:"Support",index:4},u)))},Tl=e=>{let{plans:t,loaded:n,currentLoaded:l,currentPlan:o}=e;return a.createElement(m.Flex,{column:!0,height:"100%",gap:3},a.createElement(cl,{currentPlan:o}),n&&l?a.createElement(m.Flex,{overflow:"auto",column:!0,gap:5,height:"100%",padding:[0,0,20,0]},a.createElement(Ma,{plans:t,currentPlan:o,showAllPlans:!0,showPricingColumn:!0}),a.createElement(Al,{plans:t,currentPlan:o})):a.createElement(Rt.A,{title:n?"Loading billing info...":"Loading Netdata plans..."}))},Sl={Community:0,Pro:1,Business:2},Il={month:"Monthly",year:"Yearly"},kl=()=>{const[e,t]=(0,Nt.ok)(),[n,l]=(0,me.A)();(0,a.useEffect)((()=>{const a=e.get("from"),o=e.get("from_committed_nodes"),r=e.get("interval"),i=e.get("result"),c=e.get("to"),s=e.get("to_committed_nodes"),m=e.get("from_trial");if(i){const d="success"==i,u={from:a,fromNodes:o,interval:r,to:c,toNodes:s,fromTrial:m,success:d};(d?n:l)((e=>{let{from:t,to:n,fromNodes:a,toNodes:l,interval:o,fromTrial:r,success:i=!0}=e;if(r)return{header:i?"Successful upgrade":"Failed to upgrade",text:i?"You have successfully upgraded your plan":"Something went wrong"};const c=Il[o]||o;if(t===n&&a===l)return{header:i?"Successful billing cycle change":"Failed to update billing cycle",text:i?"Subscription billing cycle changed to ".concat(c):"Subscription is still on ".concat(c," billing cycle")};if(t===n)return{header:i?"Successful change of commitment":"Failed to change the commitment",text:i?"Commitment ".concat(aSl[n];return{header:i?"Successfully ".concat(s?"downgraded":"upgraded"," plan"):"Failed to ".concat(s?"downgrade":"upgrade"," plan"),text:i?"Subsrciprion ".concat(s?"downgraded":"upgraded"," from ").concat(t," to ").concat(n," plan (").concat(c,")"):"Remained on ".concat(t," plan")}})(u)),(0,rl.H)("billing","callback","global-view",u),e&&(e.delete("from"),e.delete("from_committed_nodes"),e.delete("interval"),e.delete("result"),e.delete("to"),e.delete("to_committed_nodes"),e.delete("from_trial"),t(e))}}),[e,n,l])},Fl=()=>{const[e]=(0,De.Ay)();return a.createElement(l.C5,{replace:!0,to:"/spaces/".concat(e)})},Pl=()=>{kl();const{loaded:e,value:t}=(0,pn.lU)(),{loaded:n,value:o}=(0,pn.JN)(),r="AWS"==(null===o||void 0===o?void 0:o.paymentProvider),i=(0,gn.J)();return a.createElement(M.Ay,{feature:"Billing"},a.createElement(m.Flex,{column:!0,height:"calc(100% - 30px)",gap:3},a.createElement(l.BV,null,a.createElement(l.qh,{path:"/all-plans",element:r?a.createElement(Fl,null):a.createElement(Tl,{isSmall:i,currentPlan:o,loaded:e,currentLoaded:n,plans:t})}),a.createElement(l.qh,{path:"/",element:a.createElement(ol,{isSmall:i,currentPlan:o,loaded:e,currentLoaded:n,plans:t})}))))};var Ml=n(58205),Dl=n(1239),Ll=n(93155);const Nl=(0,i.A)((()=>Promise.all([n.e(7208),n.e(7304)]).then(n.bind(n,7304)))),Rl=(0,i.A)((()=>Promise.all([n.e(749),n.e(8910),n.e(185)]).then(n.bind(n,70185)))),Bl=(0,i.A)((()=>n.e(8842).then(n.bind(n,8842)))),_l=(0,s.default)(d.t).withConfig({displayName:"manage-workspace__ColumnHeader",componentId:"sc-j9n54n-0"})(["width:180px;padding:16px;"]),Ul=e=>t=>a.createElement(a.Suspense,{fallback:a.createElement(Rt.A,{title:"Loading settings tab..."})},a.createElement(e,t)),Ol=e=>t=>a.createElement(u.Yv,{flex:"1",overflow:"hidden"},a.createElement(e,t)),Vl=e=>t=>{let{containerProps:n={},...l}=t;return a.createElement(u.Yv,(0,r.A)({flex:"1",overflow:"hidden",height:"100%",padding:[4,0,0]},n),a.createElement(e,l))},Hl=Ol(mn),Wl=Ol(Re),Yl=Ol(nt),ql=Ol(Qe),jl=Vl(Yt),zl=Ol(Pl),Jl=Vl(Ul(Nl)),Gl=Ol(Ul(Rl)),$l=Vl(Ul(Bl)),Kl=(0,a.memo)((()=>{const e=(0,l.Zp)(),{state:t={},pathname:n}=(0,l.zy)(),{settingsTab:r,...i}=(0,l.g)(),[s]=(0,a.useState)((null===t||void 0===t?void 0:t.previousUrlPath)||null),d=(0,o.ap)("name"),p=(0,o.ap)("id"),h=(e=>{const t=B.IV.indexOf(e);return-1!==t?t:B.Wk})(r),b=(0,a.useCallback)((t=>{const a=((e,t)=>{const n=e.split("/"),a=n.indexOf("settings")+1-n.length;return"".concat(n.slice(0,a).join("/"),"/").concat(B.IV[t]||B.Wk)})(n,t);e(a)}),[r]);(0,a.useEffect)((()=>{B.ys[r]||e((0,c.tW)(n,{...i,settingsTab:B.Wk}))}),[]);const f=(0,E.JT)("billing:ReadBasic"),v=(0,E.JT)("room:ReadAll"),x=(0,E.JT)("user:ReadAll"),y=(0,E.JT)("channel:ReadAll"),C=(0,A.At)(),w=(0,jt.ES)(p);(0,a.useEffect)((()=>{f||r===B.bO&&e((0,c.tW)(n,{...i,settingsTab:B.Wk}))}),[f,r]);const T=(0,o.bq)(),S=(0,a.useCallback)((t=>{const n=t?"/spaces/".concat(t):s||"/spaces/".concat(T);e(n)}),[s,T]);return d?a.createElement($t.A,{permission:"space:ReadSettings"},(e=>a.createElement(M.Ay,{feature:"SpaceSettings"},a.createElement(u.GO,{full:!0,"data-testid":"manageWorkspaceModal",closeOnClickOutside:!1,onClose:S,width:"100%"},a.createElement(g.z,{"data-testid":"manageWorkspaceModal-header",onClose:S,title:a.createElement(un,null)}),a.createElement(m.Tabs,{row:!0,column:!1,"data-testid":"manageWorkspaceModal-tabs",selected:h,onChange:b,TabsHeader:_l,tabsProps:{column:!0},height:"100%",noDefaultBorder:!0,position:"relative",overflow:"hidden",width:"100%"},w?null:a.createElement(m.Tab,{basis:0,"data-testid":"manageWorkspaceModal-spaceTab",label:B.ys[B.mm],"data-ga":"manage-space::click-tab::space-tab",isMenuItem:!0},a.createElement(Hl,{"data-testid":"manageWorkspaceModal-spaceTabContent",onClose:S,small:!0})),!w&&v?a.createElement(m.Tab,{basis:0,"data-testid":"manageWorkspaceModal-warRoomsTab","data-ga":"manage-space::click-tab::rooms-tab",label:"Rooms",isMenuItem:!0},a.createElement(Wl,{"data-testid":"manageWorkspaceModal-warRoomsTabContent"})):null,w||!v&&!C?null:a.createElement(m.Tab,{basis:0,label:"Nodes","data-testid":"manageWorkspaceModal-nodesTab","data-ga":"manage-space::click-tab::nodes-tab",isMenuItem:!0},a.createElement(Yl,{"data-testid":"manageWorkspaceModal-nodesTabContent"})),!w&&x?a.createElement(m.Tab,{basis:0,"data-testid":"manageWorkspaceModal-usersTab","data-ga":"manage-space::click-tab::users-tab",label:"Users",isMenuItem:!0},a.createElement(ql,{"data-testid":"manageWorkspaceModal-usersTabContent"})):null,!w&&y?a.createElement(m.Tab,{basis:0,label:"Alerts & Notifications","data-testid":"manageWorkspaceModal-notificationsTab","data-ga":"manage-space::click-tab::notifications-tab",isMenuItem:!0},a.createElement(jl,null)):null,w||!f||Dl.ei?null:a.createElement(m.Tab,{basis:0,"data-testid":"manageWorkspaceModal-billingTab","data-ga":"manage-space::click-tab::billing-tab",label:"Plan & Billing",isMenuItem:!0},a.createElement(zl,{"data-testid":"manageWorkspaceModal-billingTabContent"})),!w&&v?a.createElement(m.Tab,{basis:0,"data-testid":"manageWorkspaceModal-integrationsTab","data-ga":"manage-space::click-tab::integrations-tab",label:"Integrations",isMenuItem:!0},a.createElement(Jl,{flavour:Ml.D_.settingsPage,containerProps:{padding:[0]},"data-testid":"manageWorkspaceModal-integrationsTabContent"})):null,a.createElement(m.Tab,{basis:0,"data-testid":"manageWorkspaceModal-configurationsTab","data-ga":"manage-space::click-tab::configurations-tab",label:B.ys[B.$d],isMenuItem:!0},a.createElement(Gl,null)),!w&&Ll.Df?a.createElement(m.Tab,{basis:0,"data-testid":"manageWorkspaceModal-authenticationTab","data-ga":"manage-space::click-tab::authentication-tab",label:B.ys[B.iy],isMenuItem:!0},a.createElement($l,null)):null))))):null})),Zl=()=>{const e=(0,o.ap)("id"),t=(0,jt.ES)(e)?"virtual":"default",n=B.DT[t],{pathname:r,state:i}=(0,l.zy)();return a.createElement(l.BV,null,a.createElement(l.qh,{path:"/",element:a.createElement(l.C5,{state:i,to:{pathname:"".concat(r,"/").concat(n)}})}),a.createElement(l.qh,{path:":settingsTab/*",element:a.createElement(Kl,null)}))}},39522:(e,t,n)=>{"use strict";n.d(t,{A:()=>x});var a=n(58168),l=n(96540),o=n(83199),r=n(47767),i=n(22292),c=n(55463),s=(n(17333),n(98992),n(54520),n(62953),n(47444)),m=n(71835),d=n(54702),u=n(55189),g=n(3914),p=n(48849),E=n(56639),h=n(14994);var b=n(57992);const f="leave-space-dialog",v="leaveSpaceDialog",x=e=>{let{id:t,name:n,onClose:x}=e;const y=(0,r.Zp)(),C=(0,i.uW)("id"),w=(0,c.Gi)(),A=(0,c.i3)(),T=(0,c.pB)(),S=(e=>{const[,t]=(0,m.A)();return(0,s.Zs)((n=>{let{snapshot:a,set:l,reset:o}=n;return async n=>{let{currentUserId:r,onSuccess:i,onError:s}=n;const m=await a.getPromise((0,g.nC)("ids")),b=m.filter((t=>e!==t)),[f]=b;if(!f)return void t({header:"Spaces",text:u.sh.leave});const{slug:v}=f&&await a.getPromise((0,E.Ay)(f));l((0,g.nC)("ids"),b),l(p.A,(t=>t.filter((t=>t!==e))));try{await(0,d.XY)(e,[r]),i&&i(v),(0,c.Z8)(o,e),(0,h.Is)(o,e),o((0,E.Ay)(e))}catch(x){l((0,g.nC)("ids"),m),s&&s()}}}),[e])})(t),I=(0,b.A)(t),k=(0,l.useCallback)((e=>y("/spaces/".concat(e))),[]),F=1===w.length,P=1===A.length&&T,M=F?{confirmLabel:"Yes, leave","data-ga":"".concat(f,"-last-member"),"data-testid":"".concat(v,"LastMember"),handleConfirm:()=>I({onSuccess:k}),message:l.createElement(l.Fragment,null,"If you leave, space ",l.createElement("strong",null,n)," will be deleted immediately.",l.createElement("br",null),"Are you sure you want to continue?"),title:"Leave and delete ".concat(n," space")}:P?{confirmLabel:"Give rights","data-ga":"".concat(f,"-last-admin"),"data-testid":"".concat(v,"LastAdmin"),handleConfirm:()=>y("users"),isConfirmPositive:!0,message:l.createElement(l.Fragment,null,"You are the last admin of ",l.createElement("strong",null,n)," space. Please give admin rights to another member so you can leave this space."),title:"Leave ".concat(n," space")}:{confirmLabel:"Yes, leave","data-ga":f,"data-testid":v,handleConfirm:()=>S({currentUserId:C,onSuccess:k}),message:l.createElement(l.Fragment,null,"You are about to leave ",l.createElement("strong",null,n)," space.",l.createElement("br",null),"Are you sure you want to continue?"),title:"Leave ".concat(n," space")};return l.createElement(o.ConfirmationDialog,(0,a.A)({handleDecline:x},M))}},47193:(e,t,n)=>{"use strict";n.d(t,{A:()=>s});n(62953);var a=n(69765),l=n(67990),o=n(87860),r=n(3914),i=n(47762),c=n(87659);const s=function(){let{polling:e=!0}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{id:t=""}=(0,a.pr)(),n=(0,l.gr)(t,"ids"),s=(0,l.gr)(t,"loaded"),m=(0,i.Gt)(n),d=(0,r.vt)(),[u,,,g]=(0,c.A)();return(0,o.A)({id:t,spaceId:d,polling:e}),{areDefaultRoomNodesLoaded:s,nodes:m,isClaimNodeModalOpen:u,closeClaimNodeModal:g}}},97200:(e,t,n)=>{var a=n(13222),l=0;e.exports=function(e){var t=++l;return a(e)+t}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/5246.07c5a1649f0805c140fe.chunk.js b/src/web/gui/v2/5246.07c5a1649f0805c140fe.chunk.js deleted file mode 100644 index 2f476f949..000000000 --- a/src/web/gui/v2/5246.07c5a1649f0805c140fe.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var C="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(C._sentryDebugIds=C._sentryDebugIds||{},C._sentryDebugIds[t]="b495f478-24ec-4d85-a71d-7b0fa9909eca",C._sentryDebugIdIdentifier="sentry-dbid-b495f478-24ec-4d85-a71d-7b0fa9909eca")}catch(C){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[5246],{75246:(C,t,e)=>{e.r(t),e.d(t,{default:()=>g});e(62953);var n=e(96540),l=e(83199),i=e(71847),a=e(12897),d=e.n(a),o=e(55042),r=e.n(o),c=new(d())({id:"terms.svg",use:"terms.svg-usage",viewBox:"0 0 240 240",content:'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'});r().add(c);const p=c,s=()=>n.createElement("svg",{height:"240px",width:"240px",viewBox:p.viewBox},n.createElement("use",{xlinkHref:"#".concat(p.id)}));var H=e(74664),V=e(58187);const h=C=>{let{checked:t,error:e,onChange:i,tagging:a,children:d}=C;return n.createElement(l.Flex,{alignItems:"center","data-testid":"termsAndConditions"},n.createElement(H.ph,{checked:t,onChange:i,error:e,"data-ga":"signinup::click-checkbox::".concat(a),"data-testid":"termsAndConditions-checkbox"}),d||n.createElement(l.TextBig,null,"By signing up, you agree to the Netdata ",n.createElement(V.H,{tagging:a})," and"," ",n.createElement(V.X,{tagging:a})))};var f=e(22292),E=e(87659);const g=()=>{const[C,t]=(0,f.Ir)("termsAccepted",{shouldPersist:!0}),[e,a]=(0,E.A)(),d=(0,n.useCallback)((()=>{(0,i.H)("","","","","","terms-accept"),t(e)}),[e]);return C?null:n.createElement(l.ConfirmationDialog,{confirmLabel:"Accept","data-ga":"accept-terms-dialog","data-testid":"acceptTermsDialog",handleConfirm:d,hideIcon:!0,hideDecline:!0,isConfirmPositive:!0,isConfirmDisabled:!e,message:n.createElement(l.Flex,{gap:4,alignItems:"center",column:!0},n.createElement(s,null),n.createElement(h,{checked:e,onChange:a,tagging:"modal-view"})),title:"To use Netdata you need to read and accept our terms and conditions"})}},74664:(C,t,e)=>{e.d(t,{MG:()=>p,OV:()=>c,ah:()=>d,ch:()=>a,j5:()=>o,ph:()=>r});var n=e(8711),l=e(83199),i=e(96763);const a=n.default.div.withConfig({displayName:"styled__SvgContainer",componentId:"sc-16ytcl4-0"})(["width:42px;height:42px;flex-shrink:0;display:flex;justify-content:center;align-items:center;border-radius:2px;background:white;"]),d=n.default.a.withConfig({displayName:"styled__StyledLink",componentId:"sc-16ytcl4-1"})(["display:inline-flex;align-items:center;text-decoration:none;color:",";cursor:pointer;&:hover{text-decoration:underline;color:"," !important;}&:visited{color:",";}> svg{fill:",";padding-right:",";}"],(0,l.getColor)("success"),(0,l.getColor)("success"),(0,l.getColor)("success"),(0,l.getColor)("main"),(0,l.getSizeBy)(1)),o=(0,n.default)(i.A).withConfig({displayName:"styled__EmailInput",componentId:"sc-16ytcl4-2"})(["",""],(C=>{let{isLastSignInMethod:t}=C;return t?"border: 2px solid green;":"border: 1px solid #51966C;"})),r=(0,n.default)(l.Checkbox).withConfig({displayName:"styled__StyledCheckbox",componentId:"sc-16ytcl4-3"})(["margin:0 "," 0 0;& div:last-child{border-color:",";}"],(0,l.getSizeBy)(2),(C=>{let{error:t}=C;return t&&(0,l.getColor)("error")})),c=(0,n.default)(l.Button).withConfig({displayName:"styled__StyledButton",componentId:"sc-16ytcl4-4"})(["&&{height:44px;}"]),p=(0,n.default)(l.Flex).attrs((C=>{let{gap:t=8}=C;return{column:!0,gap:t,alignSelf:"center",padding:[0,0,8,0],border:{side:"bottom",color:"disabled"},width:{max:"320px"}}})).withConfig({displayName:"styled__FormContainer",componentId:"sc-16ytcl4-5"})(["width:100%;"])},58187:(C,t,e)=>{e.d(t,{H:()=>d,X:()=>o});var n=e(96540),l=e(74664);const i={link:"https://www.netdata.cloud/terms",title:"Terms And Conditions",dataGa:"signinup::click-terms::"},a=C=>{let{link:t,title:e,dataGa:i}=C;return C=>{let{tagging:a}=C;return n.createElement(l.ah,{href:t,target:"_blank",rel:"noopener noreferrer","data-ga":"".concat(i).concat(a)},e)}},d=a({link:"https://www.netdata.cloud/privacy",title:"Privacy Policy",dataGa:"signinup::click-privacy::"}),o=a(i)}}]); \ No newline at end of file diff --git a/src/web/gui/v2/5304.cc797fdd343c7e873b2f.chunk.js b/src/web/gui/v2/5304.cc797fdd343c7e873b2f.chunk.js deleted file mode 100644 index 1c3e79892..000000000 --- a/src/web/gui/v2/5304.cc797fdd343c7e873b2f.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="8c26f46f-78bd-438a-9cab-12329b4e3652",e._sentryDebugIdIdentifier="sentry-dbid-8c26f46f-78bd-438a-9cab-12329b4e3652")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[5304],{79394:(e,t,l)=>{l.d(t,{A:()=>u,p:()=>d});l(41393),l(81454);var a=l(96540),n=l(47444),r=l(3914),o=l(67990),s=l(63129),i=l(88982);const c=()=>{const e=(0,r.vt)();return(0,n.Zs)((t=>{let{set:l,snapshot:a}=t;return async t=>{const n=await a.getPromise((0,o.$6)({id:t,key:"nodes"}));let r=null;return null!==n&&void 0!==n&&n.length||(l((0,o.$6)({id:t,key:"loaded"}),!1),r=(0,s.uQ)({roomId:t,spaceId:e}),r.then((e=>{let{data:a}=e;l((0,o.$6)({id:t,key:"nodes"}),a.map((e=>{let{id:t,name:l}=e;return{label:l,value:t}}))),l((0,o.$6)({id:t,key:"loaded"}),!0)})).catch((e=>l((0,o.$6)({id:t,key:"error"}),e)))),r}}),[e])},d=()=>{const e=c(),t=(0,i.n)("id");(0,a.useEffect)((()=>{let l=null;return t&&(l=e(t)),()=>{var e;return!(null===(e=l)||void 0===e||!e.cancel)&&l.cancel()}}),[t,e])},u=c},25304:(e,t,l)=>{l.r(t),l.d(t,{default:()=>ae});var a=l(96540),n=l(83084),r=l(28738),o=(l(8159),l(98992),l(37550),l(62953),l(87659)),s=l(81638),i=l(30577),c=l(93615),d=l(58168),u=l(83199),m=(l(9920),l(14905),l(3949),l(8872),l(27467)),g=l(59846),p=l(82700),y=l(37171),b=l(10368);const h=(e,t,l)=>{let{id:a,name:n}=e;-1==l?t.push({id:a,name:n,count:1}):t[l]={...t[l],count:t[l].count+1}},f=e=>e.id,E=e=>e.name,x=e=>{let{title:t,baseKey:l,param:n,paramKey:r,extraKey:o,testIdPrefix:s}=e;const i=(e=>{let{baseKey:t,extraKey:l,param:n,paramKey:r}=e;const{rules:o}=(0,y.q)({extraKey:l,omit:n,keepAll:!0}),s=(0,m.rW)(n,{key:t,extraKey:l,flavour:"arr"});return(0,a.useMemo)((()=>o.reduce(((e,t)=>{const l=t[r];if(!l)return e;const a=e.findIndex((e=>e.id==l.id));return"id"==r?h({id:l,name:l},e,a):["rooms","nodes"].includes(r)?l.forEach((t=>{let{id:l,name:n,notAvailable:r}=t;const o=r?"Not available (".concat(l,")"):n;h({id:l,name:o},e,a)})):["alertNames","alertContexts","alertRoles"].includes(r)?l.forEach((t=>{h({id:t,name:t},e,a)})):"hostLabels"==r&&Object.entries(l).forEach((t=>{let[l,n]=t;const r=(0,b.M)({key:l,value:n});h({id:r,name:r},e,a)})),e}),[])),[o,s])})({baseKey:l,extraKey:o,param:n,paramKey:r});return i.length?a.createElement(g.A,{title:t,testIdPrefix:s,param:n,baseKey:l,extraKey:o},a.createElement(p.Ay,{param:n,baseKey:l,extraKey:o,testIdPrefix:s,collection:i,getValue:f,getLabel:E,capitalized:!1})):null};var A=l(3914),v=l(78940);const w=[{title:"Rules IDs",param:"ids",paramKey:"id"},{title:"Rooms",param:"roomIds",paramKey:"rooms"},{title:"Nodes",param:"nodeIds",paramKey:"nodes"},{title:"Host labels",param:"hostLabels",paramKey:"hostLabels"},{title:"Alert names",param:"alertNames",paramKey:"alertNames"},{title:"Alert contexts",param:"alertContexts",paramKey:"alertContexts"},{title:"Alert roles",param:"alertRoles",paramKey:"alertRoles"}],k={filters:{iconName:"filterList",color:{active:"success",notActive:"textLite"},width:"19px",Content:(0,a.memo)((e=>{const t=(0,A.vt)();return a.createElement(u.Flex,(0,d.A)({column:!0,height:"calc(100vh - 266px)",overflow:{vertical:"auto"},padding:[0,0,30]},e),w.map((e=>{let{title:l,param:n,paramKey:r}=e;return a.createElement(x,{key:l,baseKey:t,extraKey:v.qt,title:l,param:n,paramKey:r})})))})),dataTestId:"filterBar",label:"Filters"}},K=["filters"],C=e=>e.some((e=>{let{roomIds:t=[],nodeIds:l=[],hostLabels:a={},alertNames:n=[],alertContexts:r=[],alertRoles:o=[]}=e;return t.length||l.length||Object.keys(a).length||n.length||r.length||o.length})),S=e=>{let{rules:t}=e;const[l,n]=(0,o.A)((()=>C(t))),{Content:r}=k.filters;return(0,a.useEffect)((()=>{n(C(t))}),[t]),a.createElement(s.Ay,{collapsedComponent:a.createElement(i.A,{onClickTab:n,availableTabs:k,tabsToShow:K}),isOpen:l,header:a.createElement(c.A,{isOpen:l,onToggle:n,title:"Filters",icon:"filterList"})},a.createElement(r,{padding:[0,3,30]}))};var T=l(39225),I=l(4659),L=l(65570),F=(l(9391),l(41393),l(81454),l(21290)),R=l(36021),D=l(71835),N=l(29217),M=l(40267);const P=e=>{let{value:t,...l}=e;return a.createElement(N.A,{content:t},a.createElement(u.Pill,(0,d.A)({flavour:"neutral"},l),t))},_=()=>a.createElement(u.Text,null,"*"),j=e=>{let{getValue:t}=e;const l=t();return null!==l&&void 0!==l&&l.length?a.createElement(u.Flex,{flexWrap:!0,gap:1},l.map((e=>a.createElement(u.Flex,{key:e,padding:[.5,0]},a.createElement(P,{value:e}))))):a.createElement(_,null)},O=e=>{let{getValue:t}=e;const{localeDateString:l}=(0,F.$j)();if(!t())return a.createElement(u.Text,null,"-");const n=l(new Date(t()),{month:"2-digit",day:"2-digit",year:"numeric",hour:"numeric",minute:"numeric",long:!1,dateStyle:void 0});return a.createElement(u.Text,{whiteSpace:"nowrap"},n)},V=[{id:"disabled",name:"Disable rule",accessorKey:"disabled",header:"",cell:e=>{let{row:t,getValue:l}=e;const n=t.original,[r,s]=(0,a.useState)(!l()),[i,c]=(0,o.A)(),[,d]=(0,D.A)(),m=(0,R.FU)(),g=(0,R.Lz)();return a.createElement(u.Toggle,{colored:!0,checked:r,disabled:i,onChange:e=>{c();const t=e.target.checked;s(t),m({...n,disabled:!t}).then((()=>{g()})).catch((e=>{s(!t),d(e)})).finally(c)}})},size:80},{id:"name",name:"Name",accessorKey:"name",header:"Rule name",cell:e=>{let{row:t,getValue:l}=e;const{accountId:n}=t.original;return a.createElement(u.Flex,{gap:2,alignItems:"center"},a.createElement(u.Text,{wordBreak:"break-word"},l()),n&&a.createElement(N.A,{align:"bottom",content:"This rule affects only your account"},a.createElement(u.Box,null,a.createElement(u.Icon,{color:"textLite",height:"16px",name:"userPress",width:"16px"}))))}},{id:"rooms",name:"Rooms",accessorKey:"rooms",header:"Rooms",cell:e=>{let{getValue:t}=e;const l=t();return null!==l&&void 0!==l&&l.length?a.createElement(u.Text,null,l.map((e=>{let{name:t}=e;return t})).join(", ")):a.createElement(u.Text,null,"All rooms")}},{id:"nodes",name:"Nodes",accessorKey:"nodes",header:"Nodes",cell:e=>{let{getValue:t}=e;const l=t();return null!==l&&void 0!==l&&l.length?a.createElement(u.Flex,{flexWrap:!0,gap:1},l.map((e=>{let{id:t,name:l,notAvailable:n}=e;const r=n?a.createElement(u.Flex,{alignItems:"center",gap:1},a.createElement(u.Icon,{name:"warning_triangle",size:"small",color:"neutralPillColor"}),a.createElement(u.Text,null,"Node no longer available")):l;return a.createElement(P,{key:t,value:r})}))):a.createElement(_,null)}},{id:"hostLabels",name:"Host labels",accessorKey:"hostLabels",header:"Host labels",cell:e=>{let{getValue:t}=e;const l=t(),n=Object.entries(l||{});return n.length?a.createElement(u.Flex,{flexWrap:!0,gap:1},n.map((e=>{let[t,l]=e;const n=(0,b.M)({key:t,value:l});return a.createElement(P,{key:n,value:n})}))):a.createElement(_,null)}},{id:"alertNames",name:"Alert names",accessorKey:"alertNames",header:"Alert name",cell:j},{id:"alertContexts",name:"Alert contexts",accessorKey:"alertContexts",header:"Alert context",cell:j},{id:"alertInstances",name:"Alert instances",accessorKey:"alertInstances",header:"Alert instances",cell:j},{id:"severities",name:"Alert status",accessorKey:"severities",header:"Alert status",cell:e=>{let{getValue:t}=e;const l=t();return null!==l&&void 0!==l&&l.length?a.createElement(u.Flex,{flexWrap:!0,gap:1},l.map((e=>a.createElement(u.Flex,{key:e,padding:[.5,0]},a.createElement(M.A,{flavour:e.toLowerCase()},e))))):a.createElement(_,null)}},{id:"alertRoles",name:"Alert roles",accessorKey:"alertRoles",header:"Alert role",cell:j},{id:"startDate",name:"Start date",accessorKey:"startsAt",header:"Start date",cell:O},{id:"endDate",name:"End date",accessorKey:"lastsUntil",header:"End date",cell:O}];l(93514);var $=l(38819),U=l(29848),z=l(46741);const B={month:"2-digit",day:"2-digit",year:"numeric",hour:"numeric",minute:"numeric",long:!1,dateStyle:void 0},H=()=>{const[e,t]=(0,a.useState)(""),{silencingRulePrefill:l}=(0,$.PP)(),n=l?{type:"create"}:{},[r,o]=(0,a.useState)(n),[,s]=(0,D.A)(),{localeDateString:i}=(0,F.$j)(),c=(0,R._S)(),d=(0,R.Lz)(),m=(0,U.ly)(),g=()=>o({type:"create"}),p=e=>o({type:"edit",rule:e}),y=e=>{let{id:t}=e;c([t]).then((()=>{d(),m()})).catch(s)},b=(0,z.JT)("space:DeletePersonalSilencingRule"),h=(0,z.JT)("space:DeleteSystemSilencingRule"),f=(0,a.useMemo)((()=>({addEntry:{dataGa:"",flavour:"hollow",handleAction:g,iconColor:"success",label:"Add rule",small:!0,width:"auto"}})),[]),E=e=>{let{name:t}=e;return a.createElement(a.Fragment,null,"Delete ",t," rule")};return{search:e,setSearch:t,bulkActions:f,rowActions:(0,a.useMemo)((()=>({edit:{handleAction:p,disabledTooltipText:"Edit is disabled",isDisabled:e=>{let{readOnly:t}=e;return!!t},dataGa:"alert-silencing::click-edit-row::rules-table"},delete:{handleAction:y,confirmationTitle:E,confirmationMessage:e=>a.createElement(u.Text,{wordBreak:"break-word"},"You are about to delete ",a.createElement("strong",null,e.name)," silencing rule.",a.createElement("br",null),"Are you sure you want to continue?"),dataGa:"alert-silencing::click-delete-row::rules-table",disabledTooltipText:"Delete is disabled",confirmLabel:"Yes, delete",declineLabel:"Cancel",isDisabled:e=>{let{accountId:t}=e;return t?!b:!h}}})),[]),onFilter:(0,a.useCallback)(((e,t,l)=>{const a=e.original,n=l.toLowerCase(),r=(a.rooms||[]).map((e=>{let{name:t}=e;return t})),o=(a.nodes||[]).map((e=>{let{name:t}=e;return t})),s=a.alertNames||[],c=a.alertContexts||[],d=a.severities||[],u=a.alertRoles||[],m=a.hostLabels||{},g=a.startsAt?i(new Date(a.startsAt),B):"",p=a.lastsUntil?i(new Date(a.lastsUntil),B):"";return((e,t)=>e.some((e=>e.toLowerCase().includes(t))))([a.name.toLowerCase(),...r,...o,...s,...c,...d,...u,...Object.entries(m).flat(),g,p],n)}),[]),modalDetails:r,closeModal:()=>o({})}},W=(0,T.A)((()=>l.e(9400).then(l.bind(l,79400))),"CreateOrUpdateRuleModal"),q=(0,a.memo)(u.Table,((e,t)=>(0,L.Ay)(e,t))),J={right:["actions"]},G=()=>a.createElement(u.Flex,null,a.createElement(u.Text,null,"Create or Maintain Alert Notification Silencing Rules on this Space. These rules can be applicable to all users or just you, depending on their definition."," ",a.createElement(I.A,{"data-ga":"alert-silencing::click-link-docs::rules-table","data-testid":"silencing-rules-doc",href:"https://learn.netdata.cloud/docs/alerts-and-notifications/notifications/netdata-cloud-notifications/#silencing-alert-notifications",rel:"noopener noreferrer",target:"_blank"},"Read the Alert Notification Silencing Rules documentation to learn more."))),Y=e=>{let{data:t}=e;const{search:l,setSearch:n,bulkActions:r,rowActions:o,onFilter:s,modalDetails:i,closeModal:c}=H();return a.createElement(a.Fragment,null,a.createElement(u.Flex,{column:!0,gap:4,padding:[2,4],height:"calc(100vh - 241px)"},a.createElement(G,null),a.createElement(q,{key:JSON.stringify(t||{}),enableSorting:!0,enableColumnVisibility:!0,data:t,dataColumns:V,bulkActions:r,rowActions:o,globalFilter:l,globalFilterFn:s,onSearch:n,columnPinning:J,enableColumnPinning:!0})),"create"==i.type?a.createElement(a.Suspense,null,a.createElement(W,{onClose:c})):null,"edit"==i.type&&i.rule?a.createElement(a.Suspense,null,a.createElement(W,{rule:i.rule,onClose:c,isEdit:!0})):null)};var Q=l(9415),Z=l(89821),X=l(18061);const ee=e=>{let{errorMessage:t}=e;return a.createElement(u.Flex,{column:!0,gap:2,alignItems:"center",justifyContent:"center",flex:!0},a.createElement(u.TextBig,null,"Something went wrong"),t&&a.createElement(u.Text,null,"(",t,")"))};var te=l(79394),le=l(63314);const ae=()=>{(e=>{const t=(0,R.ys)({id:e,key:"initialLoad"}),l=(0,R.Qu)(),a=(0,R.UJ)(e);(0,X.A)((()=>({enabled:!!e&&!t,fetch:()=>(0,Q.cH)(e),onFail:e=>a({...Z.$,loading:!1,loaded:!0,error:e}),onSuccess:e=>a(((e,t)=>({...e,loading:!1,loaded:!0,initialLoad:!0,rules:t}))(Z.$,e))})),[e,l])})((0,A.vt)()),(0,te.p)();const{loaded:e,rules:t,error:l}=(0,y.q)({extraKey:v.qt});return e?l?a.createElement(le.DL,{tab:"Notifications::SilencingRules"},a.createElement(ee,{errorMessage:l.message})):a.createElement(le.Ay,{tab:"Notifications::SilencingRules"},a.createElement(n.A,{sidebar:a.createElement(S,{rules:t}),margin:[3,0]},a.createElement(Y,{data:t}))):a.createElement(r.A,{height:"calc(100% - 44px)",title:"Loading silencing rules..."})}},10368:(e,t,l)=>{l.d(t,{H:()=>n,M:()=>r});l(14905),l(98992),l(8872),l(62953);var a=l(78940);const n=e=>{var t;const{validationErrors:l,errorMessage:n}=(null===e||void 0===e||null===(t=e.response)||void 0===t?void 0:t.data)||{};return l&&Object.keys(l).length?Object.entries(l).reduce(((e,t)=>{let[,l]=t;const{Code:n}=l;return a.xc[n]&&e.push({errorMessage:a.xc[n]}),e}),[]):[{errorMessage:n}]},r=e=>{let{key:t,value:l}=e;return"".concat(t,": ").concat(l)}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/5426.254557ad3e1f2d14ad29.chunk.js b/src/web/gui/v2/5426.254557ad3e1f2d14ad29.chunk.js deleted file mode 100644 index d06902c05..000000000 --- a/src/web/gui/v2/5426.254557ad3e1f2d14ad29.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="53f38cae-4eb5-4b70-b025-7a530152c695",e._sentryDebugIdIdentifier="sentry-dbid-53f38cae-4eb5-4b70-b025-7a530152c695")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[5426],{45426:(e,t,n)=>{n.r(t),n.d(t,{default:()=>s});n(62953);var o=n(96540),a=n(83199),r=n(5396),l=n(4659),d=n(87659),c=n(51641);const s=()=>{const e=(0,c.e)(),[t,,n,s]=(0,d.A)();return(0,o.useEffect)((()=>{"Reachable"===e&&n()}),[e]),t?o.createElement(a.Layer,{full:"horizontal",backdropProps:{backdropBlur:"3px"},onClickOutside:s,onEsc:s,position:"top"},o.createElement(r.A,{testId:"agent-not-secure-banner",width:"100%",background:"warningBackground",position:"absolute",top:"0",tooltipProps:{align:"top"},zIndex:20},o.createElement(a.Flex,{justifyContent:"center",alignItems:"center",width:"100%",gap:2},o.createElement(a.Text,null,"IMPORTANT: This Netdata is openly accessible from the Internet!"," ",o.createElement(l.A,{"data-testid":"learn",href:"https://learn.netdata.cloud/docs/netdata-agent/securing-netdata-agents/",as:"a",cursor:"pointer",textDecoration:"underline",color:"text",hoverColor:"textFocus"},"Secure your Netdata now"),"!")),o.createElement(a.Box,{"data-testid":"agent-not-secure-banner-close-button",as:a.Icon,color:"text",cursor:"pointer",name:"x",position:"absolute",right:"8px",onClick:s}))):null}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/5596.2036706750ff4028cff2.chunk.js b/src/web/gui/v2/5596.2036706750ff4028cff2.chunk.js deleted file mode 100644 index b8aefba99..000000000 --- a/src/web/gui/v2/5596.2036706750ff4028cff2.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="054b07e7-b492-4640-ad51-38c1362e436f",e._sentryDebugIdIdentifier="sentry-dbid-054b07e7-b492-4640-ad51-38c1362e436f")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[5596],{92155:(e,t,n)=>{n.d(t,{A:()=>c});var a=n(58168),r=n(96540),o=n(50876);const l=e=>(0,r.forwardRef)(((t,n)=>{let{callback:l,feature:i,isStart:c,isSuccess:s,isFailure:d,eventReason:g,payload:u={},...m}=t;const{sendLog:h,isReady:p}=(0,o.A)(),f=(0,r.useCallback)((()=>{const e=m[l],t={feature:i,isStart:c,isSuccess:s,isFailure:d,eventReason:g,...u,...m["data-ga"]?{dataGa:m["data-ga"]}:{},...m.dataGa?{dataGa:m.dataGa}:{},...m["data-track"]?{dataTrack:m["data-track"]}:{},...m.label?{label:m.label}:{}};"function"==typeof e&&e(),h(t,!0)}),[l,h,p,u,m]),x=(0,r.useMemo)((()=>({...m,[l]:f})),[m,l,f]);return r.createElement(e,(0,a.A)({ref:n},x))}));var i=n(67276);const c=e=>(0,r.forwardRef)(((t,n)=>{let{payload:o={},...c}=t;const s=l(e);return r.createElement(s,(0,a.A)({},c,{ref:n,callback:"onClick",payload:{...o,action:i.o1.buttonClicked}}))}))},93155:(e,t,n)=>{n.d(t,{Df:()=>p,Dm:()=>m,EM:()=>h,Mh:()=>g,bO:()=>u});const a="mobileAppNotifications",r="trialWarning",o="alertConfiguration",l="editAlertConfiguration",i="userCustomSettings",c="oktaSSO",s=[r,o,a,c];function d(e){if(s.includes(e))return()=>!0;const t="true"===localStorage.getItem(e);return e=>e||t}const g=d(a)(),u=d(r)(),m=d(o)(),h=(d(l)(),d(i)()),p=d(c)()},21828:(e,t,n)=>{n.d(t,{A:()=>o});var a=n(96540),r=n(83199);const o=e=>{let{theme:t}=e;const n="default"==t?"grey185":"grey45";return t="dark"==t?r.DarkTheme:r.DefaultTheme,a.createElement(r.Flex,{height:"1px",width:"100%",background:(0,r.getColor)(["neutral",n])({theme:t})})}},84541:(e,t,n)=>{n.d(t,{A:()=>I});var a=n(58168),r=n(96540),o=n(45463),l=n(83199);const i=(0,n(8711).default)(l.Flex).attrs((e=>({flex:{grow:"1",shrink:"0"},...e}))).withConfig({displayName:"panel__Panel",componentId:"sc-4zlw7c-0"})(["",";",";"],(e=>{let{order:t}=e;return t&&"order: ".concat(t,";")}),(e=>{let{bgGradient:t}=e;return t&&"background: linear-gradient(34.14deg, #536775 -26.52%, #2F3A42 53.66%);\n"}));var c=n(17208);const s=()=>r.createElement(l.Flex,{alignItems:"center",column:!0,padding:[0,0,18,0]},r.createElement(c.oi,null));n(41393),n(81454);const d=[{icon:"rocket",title:"Rapid Deployment, Instant Insights",text:"One-command installation, automatic metric discovery, and intuitive out-of-the-box dashboards for every single metric."},{icon:"qualityOfServiceSolid",title:"Advanced Monitoring, Made Accessible",text:"Experience real-time monitoring with 1-second granularity and anomaly detection across 800+ integrations."},{icon:"firewallSolid",title:"On-Prem Data Storage, Transparent Pricing",text:"Unlimited metrics, Infinite scale, Zero hidden costs. Decentralized architecture ensures maximum data security."}];n(3064),n(98992),n(72577),n(62953);var g=n(18061),u=n(26655);const m=()=>u.A.get("https://us-east1-netdata-analytics-bi.cloudfunctions.net/netdata_public_metrics_website"),h=e=>{let{downScale:t,...n}=e;const l=(0,o.A)("(min-width: 992px)");return(i=l?t[0]:t[1],e=>{let{children:t,...n}=e;return r.createElement(i,(0,a.A)({color:"bright"},n),t)})(n);var i};var p=n(12897),f=n.n(p),x=n(55042),b=n.n(x),w=new(f())({id:"cloudStatistics",use:"cloudStatistics-usage",viewBox:"0 0 194 146",content:''});b().add(w);const y=w,k=e=>r.createElement(l.Flex,(0,a.A)({as:"svg",height:y.height,width:y.width,viewBox:y.viewBox},e),r.createElement("use",{xlinkHref:"#".concat(y.id)})),E=()=>{const[e]=(0,g.A)((()=>({fetch:m})),[]),{nodesOnline:t,githubStars:n,dockerHubPulls:a}=(0,r.useMemo)((()=>{var t,n;return Array.isArray(e)?{nodesOnline:"1,265,463",githubStars:null===(t=e.find((e=>{let{key:t}=e;return"GitHub Stars"===t})))||void 0===t?void 0:t.value,dockerHubPulls:null===(n=e.find((e=>{let{key:t}=e;return"DockerHub Pulls"===t})))||void 0===n?void 0:n.value}:{}}),[e]);return r.createElement(l.Flex,{column:!0,gap:6},r.createElement(k,{height:"160px",padding:[0,0,4,0]}),r.createElement(l.Flex,{column:!0,gap:3},r.createElement(h,{downScale:[l.TextBigger,l.TextBigger],color:"bright",textAlign:"center"},r.createElement(h,{downScale:[l.TextBigger,l.TextBigger],color:t?"primaryHighlight":"bright",textAlign:"center",strong:!0},t||"..."),r.createElement(l.Box,{as:"span",margin:[0,0,0,3]},"Nodes Online")),r.createElement(h,{downScale:[l.TextBigger,l.TextBigger],color:"bright",textAlign:"center"},r.createElement(h,{downScale:[l.TextBigger,l.TextBigger],color:n?"primaryHighlight":"bright",textAlign:"center",strong:!0},n||"..."),r.createElement(l.Box,{as:"span",margin:[0,0,0,3]},"GitHub Stars")),r.createElement(h,{downScale:[l.TextBigger,l.TextBigger],color:"bright",textAlign:"center"},r.createElement(h,{downScale:[l.TextBigger,l.TextBigger],color:a?"primaryHighlight":"bright",textAlign:"center",strong:!0},a||"..."),r.createElement(l.Box,{as:"span",margin:[0,0,0,3]},"DockerHub Pulls"))))},v=e=>{let{icon:t,title:n}=e;const a=(0,o.A)("(min-width: 475px)");return r.createElement(l.Flex,{gap:4,alignItems:"center"},a&&r.createElement(l.Flex,{background:"transparent",justifyContent:"center",alignItems:"center",height:"56px",width:"56px",round:64,border:{side:"all",color:"secondaryColor"}},r.createElement(l.Icon,{name:t,height:"24px",width:"24px",color:"secondaryColor"})),r.createElement(l.Flex,{column:!0,justifyContent:"start",alignItems:"start",gap:2},r.createElement(l.TextBig,{color:"bright",strong:!0},n)))};var S=n(21828);const C=()=>r.createElement(l.Flex,{column:!0,padding:[16,4,12],width:{max:"500px"},margin:[0,"auto"],gap:14},r.createElement(E,null),r.createElement(l.Flex,{column:!0,gap:8,padding:[0,10]},d.map(((e,t)=>r.createElement(v,(0,a.A)({key:t},e))))),r.createElement(l.Flex,{column:!0,gap:6,alignItems:"center"},r.createElement(S.A,null),r.createElement(l.Flex,{column:!0,gap:4,alignItems:"center"},r.createElement(c.go,{theme:"dark"}),r.createElement(l.TextSmall,{textAlign:"center",color:"bright"},"Netdata is a member of the Cloud Native Computing Foundation (CNCF), and is one of the most starred projects in the CNCF landscape.")))),I=e=>{let{children:t,...n}=e;const c=(0,o.A)("(min-width: 998px)");return r.createElement(l.Flex,(0,a.A)({height:{min:"100vh"},flexWrap:!0},n),!window.envSettings.onprem&&r.createElement(i,{background:(0,l.getColor)(["neutral","grey25"])({theme:l.DarkTheme}),order:c?0:1,width:c?{base:"34%"}:{max:"100%"}},r.createElement(C,null)),r.createElement(i,{background:"mainBackground",order:c?1:0,width:c?{base:"66%"}:{max:"100%"}},r.createElement(l.Flex,{background:"transparent",column:!0,gap:8,padding:[12,4],width:{max:"500px"},margin:[0,"auto"]},r.createElement(s,null),t)))}},75596:(e,t,n)=>{n.r(t),n.d(t,{SignIn:()=>P,default:()=>U});n(25440),n(62953),n(3296),n(27208),n(48408);var a=n(96540),r=n(47767),o=n(86663),l=n(83199),i=n(55337),c=n(11604),s=n(17182),d=n(17208),g=n(58168),u=n(8711);const m=u.default.a.withConfig({displayName:"linkButton__LinkButton",componentId:"sc-9kv26a-0"})(["color:",";text-decoration:none;background-color:transparent;display:flex;flex-flow:row nowrap;justify-content:flex-start;align-items:center;height:",";background:",";border:1px solid ",";border-radius:2px;max-width:320px;width:100%;",";&:hover{text-decoration:none;color:unset;}"],(0,l.getColor)("background"),(0,l.getSizeBy)(5.5),(0,l.getColor)("mainBackground"),(0,l.getColor)("mainBackground"),(e=>{let{disabled:t}=e;return"\n opacity: ".concat(t?.4:1,";\n pointer-events: ").concat(t?"none":"auto",";\n ")})),h=e=>t=>{let{method:n,...r}=t;return n==localStorage.getItem("lastSignInMethod")?a.createElement(l.Box,{width:"100%"},a.createElement(l.TextMicro,null,"Last Sign-in method"),a.createElement(e,(0,g.A)({isLastSignInMethod:!0},r))):a.createElement(e,r)},p=e=>{let{backgroundColor:t,color:n}=e;return(0,u.default)(m).withConfig({displayName:"linkButton",componentId:"sc-9kv26a-1"})(["box-shadow:0px 0px 1px rgba(0,0,0,0.12);border-radius:2px;background:",";border-style:solid;border-color:",";border-width:",";cursor:pointer;span{flex:1;display:flex;align-items:center;justify-content:center;padding:0 ",";color:",";}"],t,(e=>{let{isLastSignInMethod:n}=e;return n?(0,l.getColor)("linkHover"):t}),(e=>{let{isLastSignInMethod:t}=e;return t?"2px":"1px"}),(0,l.getSizeBy)(1),n?(0,l.getColor)(n):(0,l.getColor)("bright"))},f=p({backgroundColor:"#445260"}),x=p({backgroundColor:"#4285f4"}),b=p({color:"primary"});var w=n(74664),y=n(50876);const k=h(w.j5),E=e=>{let{flavour:t="default",onSubmit:n,disabled:r,initialValue:o,tagging:i}=e;const[c,s]=(0,l.useInputValue)({value:o||""}),[d,u]=(0,a.useState)(),{sendButtonClickedLog:m,isReady:h}=(0,y.A)(),p="sso"==t,f=p?"sso":"email",x=p?"Continue":"Sign in by email",b=(0,a.useCallback)((e=>{e.preventDefault(),m({feature:"SignIn",dataGa:"signinup::click-".concat(f,"::").concat(i)}),n(c).catch(u),localStorage.setItem("lastSignInMethod",f)}),[n,c,h,f]);return a.createElement(l.Flex,{as:"form",column:!0,gap:4,width:"100%","data-testid":"emailField"},a.createElement(k,(0,g.A)({value:c,onChange:s,"data-testid":"emailField-input",method:"email",size:"large",error:d},p?{placeholder:"Enter your email"}:{})),a.createElement(w.OV,{type:"submit",disabled:r||!c,width:"100%",label:x,onClick:b,"data-ga":"signinup::click-email::".concat(i),"data-testid":"emailField-submitButton"}))};var v=n(84541),S=n(21828),C=n(58187),I=n(87659),A=n(92155),B=n(67276);const _=()=>{const e=(0,a.useMemo)((()=>{const{search:e}=window.location,{telemetry_session_id:t}=o.parse(e);return t}));(0,a.useEffect)((()=>{e&&sessionStorage.setItem(B.yq.session,e)}),[e])};var M=n(63314),F=n(3914),T=n(37618),H=n(93155);const R=h(x),N=(0,A.A)(R),O=h(f),D=(0,A.A)(O),G=h(b),j=(0,A.A)(G),L=(0,A.A)(b),V=e=>{let{to:t}=e;return t=(e=>{try{const t=document.createElement("a");return t.href=e,/https?/.test(t.protocol)?new URL(t.protocol+"//"+t.host+t.pathname+t.search+t.hash).toString():null}catch(t){return null}})(t),t?a.createElement(l.Box,{position:"fixed"},a.createElement(l.Button,{as:"a",href:t,flavour:"borderless",label:"Go back to ".concat(t.replace(/#.*/,"")),icon:"arrow_left",rel:"noopener noreferrer"})):null},P=()=>{const{search:e}=(0,r.zy)(),t=window.location.hash,n=(0,c.Js)(),{redirect_uri:i}=o.parse(e),[g,,u,m]=(0,I.A)(),h=(0,F.ap)("id"),p=(0,T.ES)(h),f=new URL(window.location.href).searchParams.get("join_callback");_();const x=(0,a.useCallback)((a=>{const r=encodeURIComponent((0,s.V)("/sign-in".concat(e),t)),o=encodeURIComponent((0,s.V)("/sign-up/verify".concat(e),t));return n({isSSO:g,email:a,redirectURI:r,registerURI:o})}),[n,g]);(0,a.useEffect)((()=>{if(!/app\.netdata\.cloud/.test(location.hostname))return;const e=document.createElement("script");return e.src="https://zsvdrp94yfxr.statuspage.io/embed/script.js",e.async=!0,document.body.appendChild(e),()=>{document.body.removeChild(e)}}),[]);const b=e=>{localStorage.setItem("lastSignInMethod",e)};return a.createElement(M.Ay,{feature:"SignIn"},!window.envSettings.onprem&&!!i&&a.createElement(V,{to:i}),a.createElement(v.A,{"data-testid":"signIn"},a.createElement(l.Flex,{column:!0,gap:4},g?a.createElement(a.Fragment,null,a.createElement(l.H1,{textAlign:"center"},"Single Sign-On"),a.createElement(l.TextBig,{textAlign:"center"},"Your organisation uses Single Sign-On (SSO).",a.createElement("br",null),"Please sign in using your SSO credentials.")):a.createElement(a.Fragment,null,a.createElement(l.H1,{textAlign:"center"},"Welcome!"),f?a.createElement(l.TextBig,{textAlign:"center"},"To join the space you were invited, please sign-up for a Netdata Cloud Account. If you already have one, just sign-in."):a.createElement(l.TextBig,{textAlign:"center"},"Sign in and let's get you started"))),a.createElement(w.MG,null,g?null:a.createElement(a.Fragment,null,a.createElement(l.Flex,{column:!0,gap:4},a.createElement(N,{href:(0,s.R)("google"),onClick:()=>b("google"),disabled:!1,"data-ga":"signinup::click-google::sni-view","data-testid":"signIn-googleButton",method:"google",feature:"SignIn",payload:{label:"Continue with Google"}},a.createElement(w.ch,null,a.createElement(d.J8,null)),a.createElement(l.Text,{textAlign:"center"},"Continue with Google")),a.createElement(D,{href:(0,s.R)("github"),onClick:()=>b("github"),disabled:!1,"data-ga":"signinup::click-github::sni-view","data-testid":"signIn-githubButton",method:"github",feature:"SignIn",payload:{label:"Continue with GitHub"}},a.createElement(w.ch,null,a.createElement(d.fx,null)),a.createElement(l.Text,null,"Continue with GitHub")),H.Df&&!p?a.createElement(j,{onClick:u,"data-testid":"signIn-ssoButton",feature:"SignIn",payload:{label:"Continue with SSO"},method:"okta"},a.createElement(l.Text,null,"Sign-in with an Enterprise Single Sign-On (SSO)")):null),a.createElement(l.Flex,{gap:2,alignItems:"center"},a.createElement(S.A,{theme:"default"}),a.createElement(l.H4,null,"or"),a.createElement(S.A,{theme:"default"}))),a.createElement(E,{flavour:g?"sso":"default",onSubmit:x,tagging:"sni-view"})),!g&&!window.envSettings.onprem&&a.createElement(l.Flex,{column:!0,gap:4,alignSelf:"center",width:{max:"480px"}},a.createElement(l.TextBig,{textAlign:"center"},"No Netdata account? Sign-in and we will create one for you!"),a.createElement(l.Text,{textAlign:"center"},"By creating an account, you agree with Netdata's"," ",a.createElement(C.X,{tagging:"signin-view"})," and our"," ",a.createElement(C.H,{tagging:"signin-view"}),".")),g?a.createElement(l.Flex,{alignSelf:"center"},a.createElement(L,{onClick:m},a.createElement(l.Text,null,"Sign in without Single Sign-On"))):null))},U=(0,i.g)(P,"light")},74664:(e,t,n)=>{n.d(t,{MG:()=>g,OV:()=>d,ah:()=>i,ch:()=>l,j5:()=>c,ph:()=>s});var a=n(8711),r=n(83199),o=n(96763);const l=a.default.div.withConfig({displayName:"styled__SvgContainer",componentId:"sc-16ytcl4-0"})(["width:42px;height:42px;flex-shrink:0;display:flex;justify-content:center;align-items:center;border-radius:2px;background:white;"]),i=a.default.a.withConfig({displayName:"styled__StyledLink",componentId:"sc-16ytcl4-1"})(["display:inline-flex;align-items:center;text-decoration:none;color:",";cursor:pointer;&:hover{text-decoration:underline;color:"," !important;}&:visited{color:",";}> svg{fill:",";padding-right:",";}"],(0,r.getColor)("success"),(0,r.getColor)("success"),(0,r.getColor)("success"),(0,r.getColor)("main"),(0,r.getSizeBy)(1)),c=(0,a.default)(o.A).withConfig({displayName:"styled__EmailInput",componentId:"sc-16ytcl4-2"})(["",""],(e=>{let{isLastSignInMethod:t}=e;return t?"border: 2px solid green;":"border: 1px solid #51966C;"})),s=(0,a.default)(r.Checkbox).withConfig({displayName:"styled__StyledCheckbox",componentId:"sc-16ytcl4-3"})(["margin:0 "," 0 0;& div:last-child{border-color:",";}"],(0,r.getSizeBy)(2),(e=>{let{error:t}=e;return t&&(0,r.getColor)("error")})),d=(0,a.default)(r.Button).withConfig({displayName:"styled__StyledButton",componentId:"sc-16ytcl4-4"})(["&&{height:44px;}"]),g=(0,a.default)(r.Flex).attrs((e=>{let{gap:t=8}=e;return{column:!0,gap:t,alignSelf:"center",padding:[0,0,8,0],border:{side:"bottom",color:"disabled"},width:{max:"320px"}}})).withConfig({displayName:"styled__FormContainer",componentId:"sc-16ytcl4-5"})(["width:100%;"])},58187:(e,t,n)=>{n.d(t,{H:()=>i,X:()=>c});var a=n(96540),r=n(74664);const o={link:"https://www.netdata.cloud/terms",title:"Terms And Conditions",dataGa:"signinup::click-terms::"},l=e=>{let{link:t,title:n,dataGa:o}=e;return e=>{let{tagging:l}=e;return a.createElement(r.ah,{href:t,target:"_blank",rel:"noopener noreferrer","data-ga":"".concat(o).concat(l)},n)}},i=l({link:"https://www.netdata.cloud/privacy",title:"Privacy Policy",dataGa:"signinup::click-privacy::"}),c=l(o)},17182:(e,t,n)=>{n.d(t,{R:()=>l,V:()=>o});var a=n(38819);const r=["expires_at","error_code","error_msg_key","error_msg"],o=(e,t)=>{const n=(0,a.yq)(r),o=t.includes("join-callback")?decodeURIComponent(n):n;return"".concat(window.location.origin).concat(e,"#").concat(o)},l=(e,t)=>{const{search:n,hash:a}=window.location,r=encodeURIComponent(o("/sign-in".concat(n).concat(n.length?"&":"?","oauth=").concat(e,"&"),a)),l=encodeURIComponent(o("/sign-up/verify".concat(n).concat(n.length?"&":"?","oauth=").concat(e,"&"),a));return"/api/v2/auth/account/".concat(e,"?redirect_uri=").concat(r,"®ister_uri=").concat(l).concat(t?"&is_unverified_registration=true":"")}},96763:(e,t,n)=>{n.d(t,{A:()=>l});var a=n(58168),r=n(96540),o=n(83199);const l=e=>{let{onChange:t,value:n,onKeyDown:l,label:i,...c}=e;return r.createElement(o.TextInput,(0,a.A)({label:i,name:"userEmail",placeholder:"Enter an email address",value:n,onChange:t,onKeyDown:l},c))}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/5598.07ff43a6b96bd41e8637.chunk.js b/src/web/gui/v2/5598.07ff43a6b96bd41e8637.chunk.js deleted file mode 100644 index a350b6a43..000000000 --- a/src/web/gui/v2/5598.07ff43a6b96bd41e8637.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="9dfd529e-5f2f-449c-a441-2bc7c0d71360",e._sentryDebugIdIdentifier="sentry-dbid-9dfd529e-5f2f-449c-a441-2bc7c0d71360")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[5598],{99851:(e,n,t)=>{t.d(n,{A:()=>_e});var o=t(58168),i=(t(62953),t(96540)),a=t(45467),s=t(71600),r=t(51730),l=t(37519),c=t(83199),d=t(71847),u=t(87659),m=t(86723),p=(t(41393),t(81454),t(8711));const h=(0,p.default)(c.Flex).attrs((e=>({position:"relative",width:{min:200},height:{min:45},column:!0,background:"dropdown",color:"text",zIndex:60,round:!0,alignItems:"end",...e}))).withConfig({displayName:"styled__PickerBox",componentId:"sc-gwjt9o-0"})([""]),g=p.default.span.withConfig({displayName:"styled__StyledTimePeriod",componentId:"sc-gwjt9o-1"})(["margin-bottom:",";cursor:pointer;width:187px;height:",";&:last-of-type{margin-bottom:0;}& > span:hover{color:",";}"],(0,c.getSizeBy)(1.5),(0,c.getSizeBy)(2),(0,c.getColor)("textLite")),f=(0,p.default)(c.Flex).attrs((e=>{let{isSelected:n}=e;return{color:n?"primary":"text",margin:[2,6,0]}})).withConfig({displayName:"styled__StyledCustomTimePeriod",componentId:"sc-gwjt9o-2"})(["cursor:pointer;&:first-of-type{margin-top:0;}&:hover{color:",";}"],(0,c.getColor)("textLite")),y=(0,p.default)(c.Flex).attrs({alignItems:"center",border:{side:"all",color:"border"},color:"text",flexWrap:!1,height:8,justifyContent:"center",padding:[2,0],round:1,width:22}).withConfig({displayName:"styled__TimePeriodDropdownButton",componentId:"sc-gwjt9o-3"})([""]),b=(0,p.default)(c.Drop).attrs({background:"mainBackground",column:!0,overflow:{vertical:"auto"},margin:[.5,0,0],padding:[2,0],round:1}).withConfig({displayName:"styled__TimePeriodDrop",componentId:"sc-gwjt9o-4"})([""]),_=(0,p.default)(c.Icon).withConfig({displayName:"styled__DropdownIcon",componentId:"sc-gwjt9o-5"})(["fill:",";width:12px;height:12px;"],(0,c.getColor)("text")),w=p.default.input.withConfig({displayName:"styled__CustomInput",componentId:"sc-gwjt9o-6"})(["border:1px solid ",";background:",";box-sizing:border-box;border-radius:4px;color:",";height:32px;margin-left:10px;margin-right:10px;outline:none;padding:4px;width:32px;&:focus{border:1px solid ",";}"],(0,c.getColor)("border"),(0,c.getColor)("mainBackground"),(0,c.getColor)("text"),(0,c.getColor)("primary")),v=(0,p.default)(c.Drop).attrs({background:"dropdown",round:2,margin:[.5,0,0],border:{side:"all",color:"borderSecondary"},animation:!0}).withConfig({displayName:"styled__StyledDrop",componentId:"sc-gwjt9o-7"})([""]),T=(0,p.default)(c.TextInput).withConfig({displayName:"styled__StyledDateInput",componentId:"sc-gwjt9o-8"})(["width:100%;"]),k=p.default.div.withConfig({displayName:"styled__StyledCalendar",componentId:"sc-gwjt9o-9"})(["background:",";border:0;&.react-datepicker{background:",";border:0;}.react-datepicker{&__navigation{top:8px;&-icon::before{border-color:",";}}&__month-container{height:260px;}&__header{background:",";border:0;.react-datepicker__current-month{color:",";font-weight:normal;}.react-datepicker__day-name{color:",";}}&__day{color:",";&:hover{background:",";}&--disabled{color:",";&:hover{background:inherit;}}&--keyboard-selected,&--keyboard-selected:hover{color:",";background:inherit;border-radius:inherit;}&--selected,&--selected:hover{color:",";background:",";border-radius:8px;}&--in-selecting-range,&--in-range{color:",";background:",";border-radius:0;}&--selecting-range-start,&--range-start{color:",";background:",";border-top-left-radius:8px;border-bottom-left-radius:8px;&:hover{color:",";background:",";border-radius:0;border-top-left-radius:8px;border-bottom-left-radius:8px;}}&--selecting-range-end,&--range-end{color:",";background:",";border-top-right-radius:8px;border-bottom-right-radius:8px;&:hover{color:",";background:",";border-top-right-radius:8px;border-bottom-right-radius:8px;}}}}"],(0,c.getColor)("dropdown"),(0,c.getColor)("dropdown"),(0,c.getColor)("text"),(0,c.getColor)("dropdown"),(0,c.getColor)("main"),(0,c.getColor)("textLite"),(0,c.getColor)("main"),(0,c.getColor)("elementBackground"),(0,c.getColor)("textLite"),(0,c.getColor)("main"),(0,c.getColor)("bright"),(0,c.getColor)("primary"),(0,c.getColor)("primary"),(0,c.getColor)("elementBackground"),(0,c.getColor)("bright"),(0,c.getColor)("primary"),(0,c.getColor)("bright"),(0,c.getRgbColor)(["green","netdata"],.8),(0,c.getColor)("bright"),(0,c.getColor)("primary"),(0,c.getColor)("bright"),(0,c.getRgbColor)(["green","netdata"],.8)),x=e=>{let{value:n,period:t,resolution:o,isSelected:a,setTimeRange:s,tagging:r}=e;const l=(0,i.useCallback)((()=>s(n,o)),[n,o]);return i.createElement(g,{key:n,onClick:l,"data-ga":"date-picker::click-quick-selector::".concat(r,"::").concat(-n),"data-testid":"timePeriod-value"},i.createElement(c.Text,{color:a?"primary":"text"},t))},P=(0,i.memo)(x);var C=t(79897);const q=e=>{let{handleTimePeriodChange:n,selectedDate:t,tagging:o}=e;return i.createElement(c.Flex,{column:!0,justifyContent:"start",alignItems:"start",height:{max:"260px"},overflow:{vertical:"auto"},"data-testid":"timePeriods"},C.AE.map((e=>{let{period:a,value:s,resolution:r}=e;return i.createElement(P,{key:s,value:s,period:a,resolution:r,setTimeRange:n,isSelected:t===s,tagging:o})})))};var D=t(82526),A=t(16074),S=t(26010);const M=e=>{let{customTimePeriodRef:n,handleTimePeriodChange:t,value:o,resolution:a,tagging:s}=e;const r=()=>o<=0?(0,C.Mb)(-o,a):0,[l,d]=(0,i.useState)(r),[m,p,,h]=(0,u.A)(),g=(0,i.useRef)();(0,i.useEffect)((()=>d(r())),[o]);const v=(0,i.useCallback)((e=>d(e.target.value)),[]),T=(0,i.useCallback)((e=>{const n=Number(e.currentTarget.value),i=!Number.isNaN(n)&&Number.isInteger(n)&&n>0,s=(0,D.W)(new Date(0),{[a]:n});return i&&(0,A.f)(s)&&(0,S._)(s)<=C.So?t((0,C.zp)(n,a),a):d(o<=0?(0,C.Mb)(-o,a):0)}),[a,l]),k=(0,i.useCallback)((e=>()=>{t((0,C.zp)(l,e),e),h()}),[l]);return i.createElement(c.Flex,{justifyContent:"start",alignItems:"center",height:8,"data-ga":"date-picker::click-last-integer::".concat(s),"data-testid":"customTimePeriod"},i.createElement(c.Text,{"data-testid":"customTimePeriod-label"},"Last"),i.createElement(w,{value:l,onChange:v,onBlur:T,"data-ga":"date-picker::click-last-integer::".concat(s,"::").concat(l),"data-testid":"timePeriod-timeInput"}),i.createElement(y,{"data-testid":"timePeriodDropdown-button",onClick:p,ref:g},i.createElement(c.Text,{"data-testid":"timePeriodDropdown-buttonLabel",padding:[0,4,0,0]},a),i.createElement(_,{"data-testid":"timePeriodDropdown-buttonIcon",name:"triangle_down"})),g.current&&m&&i.createElement(b,{align:{top:"bottom",left:"left"},animation:!0,close:h,"data-testid":"timePeriodDropdown",onClickOutside:h,onEsc:h,ref:n,target:g.current},C.gt.map((e=>i.createElement(f,{key:e,onClick:k(e),"data-ga":"date-picker::click-last-time-".concat(e,"::").concat(s),"data-testid":"timePeriod-option"},e)))))};var E=t(72880),N=t(87991),R=t(21290),H=t(59386),I=t.n(H);t(30857);const O=e=>{let{selected:n,selectsStart:t=!1,selectsEnd:o=!1,startDate:a,endDate:s,onChange:r,minDate:l,maxDate:c,dateFormat:d="MM/dd/yyyy",open:u=!1,startOpen:m=!1,inline:p=!1,selectsRange:h=!1,monthsShown:g=1,showPopperArrow:f=!0,calendarContainer:y=null}=e;return i.createElement(I(),{selected:n,onChange:r,selectsStart:t,selectsEnd:o,startDate:a,endDate:s,minDate:l,maxDate:c,dateFormat:d,open:u,startOpen:m,inline:p,selectsRange:h,monthsShown:g,showPopperArrow:f,calendarContainer:y})},L=e=>{let{name:n="",value:t="",onDatesChange:o,onFocus:a,placeholderText:r=""}=e;const{utcOffset:l}=(0,R.$j)(),[c,d]=(0,i.useState)(""),u=(0,i.useCallback)((e=>{const n=e.target.value;d(n)}),[]),m=(0,i.useCallback)((e=>{if((0,A.f)(e)){const n=(0,N.GP)(e,"MMMM d yyyy, H:mm");d(n)}}),[]),p=(0,i.useCallback)((e=>{const n=(0,C.ii)(e.target.value,l);if((0,A.f)(n)&&(0,s.W)(n)>0){const e=(0,s.W)(n);o(e,(()=>m(t)))}else m(t)}),[t,l]);return(0,i.useEffect)((()=>m(t)),[t]),i.createElement(T,{type:"text",name:n,value:t?c:r,onChange:u,onBlur:p,onFocus:a,placeholder:r,"data-testid":"datePicker-input"})};var U=t(2642);const B=()=>{const{localeTimeString:e,localeDateString:n}=(0,R.$j)();return(0,i.useCallback)((t=>"".concat(n(t,{locale:"en-us",long:!1})," ").concat(e(t,{locale:"en-us",secs:!1}))),[e,n])},j=(e,n)=>e>0?(0,U.a)(new Date(n(e))):e||0===e?(0,U.a)(new Date(n((new Date).valueOf()+1e3*e))):null,G=e=>{const n=B();return(0,i.useMemo)((()=>j(e,n)),[e])},F=(e,n)=>[G(e),G(n)],z=e=>{let{startDate:n,setStartDate:t,endDate:a,setEndDate:r,singleDate:l,onDatesChange:d,onInputFocus:u,onlyDates:m,maxDate:p=new Date,minDate:h=new Date("1/1/2018"),isSinglePicker:g}=e;const[f,y]=F(n,a),b=G(l),{utcOffset:_}=(0,R.$j)(),w=B(),v=(0,i.useCallback)(((e,n)=>(0,E.Y)(j(e,w),y)?(0===a&&r(y.getTime()),t(e)):n()),[a,w]),T=(0,i.useCallback)(((e,o)=>(0,E.Y)(f,j(e,w))?(n<0&&t(f.getTime()),r(e)):o()),[n,w]),x=(0,i.useCallback)((e=>{const n=Array.isArray(e)?e[0]:e,t=Array.isArray(e)?e[1]:null,o=n?(0,C.ii)((0,N.GP)(n,"MMMM d yyyy, H:mm"),_):n,i=t?(0,C.ii)((0,N.GP)(t,"MMMM d yyyy, H:mm"),_):t,a=(0,s.W)(o)||null,r=(0,s.W)(i)||null;d({...g?{singleDate:a}:{startDate:a,endDate:r}})}),[_]);return i.createElement(c.Flex,{column:!0,justifyContent:"center",alignItems:"center",flex:!0,gap:3,"data-testid":"datePicker-wrapper"},i.createElement(O,(0,o.A)({selected:g?b:f,onChange:x},g?{}:{startDate:f,endDate:y},{maxDate:p,minDate:h,inline:!0,selectsRange:!0,monthsShown:g?1:2,dateFormat:"MMMM d yyyy, H:mm",showPopperArrow:!1,calendarContainer:k})),!m&&(g?i.createElement(L,{name:"date",value:b,onDatesChange:x,placeholderText:"Select a date"}):i.createElement(c.Flex,{justifyContent:"around",alignItems:"center",width:"100%",gap:2,padding:[0,10]},i.createElement(L,{name:"startDate",value:f,onDatesChange:v,onFocus:u,placeholderText:"Select a start date"}),i.createElement(L,{name:"endDate",value:y,onDatesChange:T,onFocus:u,placeholderText:"Select an end date"}))))},Y=e=>{let{startDate:n,endDate:t,onlyDates:o}=e;const[a,s]=F(n,t),{formattedStartDate:r,formattedEndDate:l}=(0,i.useMemo)((()=>(0,C.HA)(a,s,{onlyDates:o})),[a,s]),d=(0,i.useMemo)((()=>(0,C.jo)(a,s)),[a,s]);return i.createElement(c.Flex,{alignItems:"center",gap:2},i.createElement(c.Flex,{alignItems:"center",justifyContent:"center",gap:1.5},i.createElement(c.TextSmall,{strong:!0,whiteSpace:"nowrap"},"From"),i.createElement(c.TextSmall,{whiteSpace:"nowrap","data-testid":"periodIndication-from"},r)),i.createElement(c.Icon,{name:"arrow_left",size:"small",color:"textLite",rotate:2}),i.createElement(c.Flex,{alignItems:"center",justifyContent:"center",gap:1.5},i.createElement(c.TextSmall,{strong:!0,whiteSpace:"nowrap"},"To"),i.createElement(c.TextSmall,{whiteSpace:"nowrap","data-testid":"periodIndication-to"},l)),i.createElement(c.Flex,{alignItems:"center",justifyContent:"center",gap:2},i.createElement(c.TextSmall,{whiteSpace:"nowrap"},"/"),i.createElement(c.TextSmall,{color:"textLite",whiteSpace:"nowrap","data-testid":"periodIndication-period"},d)))};var X=t(51112),W=t(85551),K=t(6593);const $=36e5,V=24*$,Z=30*V,J=[{key:"years",value:12*Z,unit:"y"},{key:"months",value:Z,unit:"m"},{key:"days",value:V,unit:"d"},{key:"hours",value:$,unit:"h"},{key:"minutes",value:6e4,unit:"min"},{key:"seconds",value:1e3,unit:"s"}],Q=["hours","minutes","seconds"],ee=e=>{let{placeholder:n="No date"}=e;return i.createElement(c.TextSmall,null,n)},ne=e=>{let{isPlaying:n,startDate:t,endDate:o,isSameDate:a,color:s,onlyDates:r,isSinglePicker:l}=e;const{localeTimeString:d,localeDateString:u}=(0,R.$j)(),m=s||(n?"accent":"textFocus");return i.createElement(c.Flex,{gap:1},i.createElement(c.TextSmall,{color:s,whiteSpace:"nowrap"},u(t,{long:!1}),!r&&i.createElement(i.Fragment,null," ","\u2022"," ",i.createElement(c.TextSmall,{color:m,whiteSpace:"nowrap"},d(t,{secs:!1})))),!l&&i.createElement(i.Fragment,null,(!r||!a)&&i.createElement(c.Icon,{name:"arrow_left",color:m,size:"small",rotate:2}),i.createElement(c.TextSmall,{color:s,whiteSpace:"nowrap"},!a&&i.createElement(i.Fragment,null,u(o,{long:!1})," \u2022 "),!r&&i.createElement(c.TextSmall,{color:m,whiteSpace:"nowrap"},d(o,{secs:!1})))))},te=e=>{let{isPlaying:n,duration:t,color:o,fluid:a=!1,isSmall:s=!1}=e;return i.createElement(c.Flex,{gap:s?.2:1,align:"center"},!s&&i.createElement(c.TextSmall,{color:o},"\u2022"),i.createElement(c.Flex,{width:a?"auto":5},n&&i.createElement(c.TextSmall,{color:o},"last")),i.createElement(c.TextSmall,{color:o},t))};var oe=t(29217);const ie=(0,p.default)(c.Flex).withConfig({displayName:"styled__Container",componentId:"sc-1s7311w-0"})(["cursor:pointer;&:hover *{color:",";fill:",";}"],(0,c.getColor)("textLite"),(0,c.getColor)("textLite")),ae=(0,p.default)(oe.A).withConfig({displayName:"styled__StyledTooltip",componentId:"sc-1s7311w-1"})(["pointer-events:",";"],(e=>{let{isDisabled:n}=e;return n?"none":"auto"})),se=(0,i.forwardRef)(((e,n)=>{let{onClick:t,start:a,end:s,isPlaying:r,isPickerOpen:l,tagging:c,color:d,fluid:u,onlyDates:m,tooltipContent:p,isSinglePicker:h,noDateSelected:g,isSmall:f,...y}=e;const b=(0,X.yD)(),[_,w,v]=(0,i.useMemo)((()=>{if(g)return[];const e=(e=>e<0?(0,D.W)(new Date,{seconds:e}):new Date(e))(a),n=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:0;return e?new Date(e):new Date}(s),t=((e,n)=>(0,W.r)(e,n))(e,n);return[e,n,t]}),[a,s,g]),T=(0,i.useMemo)((()=>g?0:function(e){let{withSeconds:n=!1,withTime:t=!0}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const o=n||!!e.seconds;return J.reduce(((n,i)=>{let{key:a,unit:s}=i;return"seconds"!==a||o?!t&&Q.includes(a)?n:e[a]?n?"".concat(n," ").concat(e[a]).concat(s):"".concat(e[a]).concat(s):n:n}),"")}(((e,n)=>(0,K.F)({start:e,end:n}))(_,w),{withTime:!m})),[r,_,w,g]);return i.createElement(ae,{content:l?()=>{}:p||"Select a predefined or a custom timeframe",align:"bottom",isDisabled:!t||b,plain:!0},i.createElement(ie,(0,o.A)({alignItems:"center",justifyContent:"center",gap:1,height:"100%",onClick:t,padding:[0,1],ref:n,"data-ga":"date-picker::click-time::".concat(c),"data-testid":"datePicker-accessorElement"},y),f?null:h&&g?i.createElement(ee,null):i.createElement(ne,{isPlaying:r,endDate:w,startDate:_,isSameDate:v,color:d,onlyDates:m,isSinglePicker:h}),!m&&!h&&!g&&i.createElement(te,{isPlaying:r,duration:T,color:d,fluid:f||u,isSmall:f})))}));var re=t(27467),le=(t(17333),t(98992),t(54520),t(45123)),ce=t(80940);const de=(0,p.default)(c.Drop).attrs({align:{top:"bottom",left:"left"},animation:!0,background:"dropdown",column:!0,margin:[.5,0,0],overflow:{vertical:"auto"},padding:[2,0],round:1,width:80}).withConfig({displayName:"dropdown__Dropdown",componentId:"sc-pg99kg-0"})([""]),ue=(0,p.default)(c.Flex).attrs({column:!0,padding:[2,0,0],overflow:{vertical:"auto"},height:{max:"320px"}}).withConfig({displayName:"container__Container",componentId:"sc-1gtygg6-0"})([""]),me=(0,p.default)(c.Flex).attrs({justifyContent:"between",alignItems:"center",width:"100%",gap:2}).withConfig({displayName:"wrapper__Wrapper",componentId:"sc-1ehauu1-0"})([""]),pe=e=>{let{name:n,offset:t,utc:o,onSelect:a}=e;const s=(0,i.useCallback)((()=>a(o,t,n)),[o,n]);return i.createElement(le.A,{round:1,onClick:s,Wrapper:me,"data-ga":"timezone-picker::click-timezone::global-view::".concat(n)},i.createElement(c.Text,{color:"text"},n),i.createElement(c.Text,{color:"textLite",whiteSpace:"nowrap"},"UTC ",t))};var he=t(82838);const ge=e=>{let{value:n,onChange:t,timezoneRef:o,timezoneName:a}=e;const[s,r]=(0,i.useState)(""),[l,d]=(0,u.A)(),m=(0,i.useRef)(),p=(0,i.useRef)();(0,i.useEffect)((()=>{p.current&&l&&p.current.focus()}),[l]);const h=(0,i.useMemo)((()=>s?he.O.filter((e=>{let{text:n,offset:t}=e;return n.toUpperCase().includes(s.toUpperCase())||t.includes(s)})):he.O),[s]),g=()=>{d(!1),r("")},f=(0,i.useCallback)(((e,n,o)=>{t(e,n,o),g()}),[]);return i.createElement(ce.A,{hasBorder:!1,testId:"timezone-picker"},i.createElement(le.A,{round:1,onClick:d,ref:m,Wrapper:me,padding:[2],"data-ga":"timezone-picker::click-picker::global-view",selected:!0},i.createElement(c.Flex,{gap:1},i.createElement(c.Text,{color:"text",whiteSpace:"nowrap"},a," UTC ",n)),i.createElement(c.Icon,{name:"chevron_down",color:"text",width:"12px",height:"12px"})),m.current&&l&&i.createElement(de,{onClickOutside:g,onEsc:g,ref:o,target:m.current},i.createElement(c.Box,{padding:[0,2]},i.createElement(c.SearchInput,{value:s,onChange:r,ref:p,"data-ga":"timezone-picker::click-search::global-view"})),i.createElement(ue,null,h.map((e=>{let{text:n,offset:t,utc:o}=e;return i.createElement(pe,{key:n,name:n,offset:t,utc:o[0],onSelect:f})})))))},fe=e=>{let{timezoneRef:n}=e;const[{offset:t,timezoneName:o},a]=(0,re.N9)(),s=(0,i.useCallback)(((e,n,t)=>a({utc:e,offset:n,timezoneName:t})),[]);return i.createElement(ge,{timezoneRef:n,timezoneName:o,value:t,onChange:s})};t(74648),t(9920),t(23215),t(3949);const ye=["mousedown","touchstart"];var be=t(57605);const _e=e=>{let{onChange:n,isSinglePicker:t,values:{start:p,end:g,singleDate:f}={},defaultValue:y=-900,tagging:b="",isPlaying:_,onlyDates:w,accessorProps:T={},maxDate:k,minDate:x,accessorTooltipContent:P,isSmall:D,...A}=e;const[S,E]=(0,i.useState)(f),[N,R]=(0,i.useState)(p),[H,I]=(0,i.useState)(p),[O,L]=(0,m.A)("resolution","minutes"),[U,B]=(0,i.useState)("startDate"),[j,G,,F]=(0,u.A)(),X=(0,i.useRef)(),[W,K]=(0,i.useState)(null),[$,V]=(0,i.useState)(null),[Z,J]=(0,i.useState)(null),Q=(0,be.A)();!function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:ye,t=arguments.length>2?arguments[2]:void 0;const o=(0,i.useRef)();(0,i.useEffect)((()=>{const i=n=>{Array.isArray(t)?t.every((e=>!e||!e.contains(n.target)))&&e():o.current&&!o.current.contains(n.target)&&e()};return n.forEach((e=>document.addEventListener(e,i))),()=>{n.forEach((e=>document.removeEventListener(e,i)))}}),[o,e,t])}(F,["mouseup","touchend"],[W,$,Z,X.current]),(0,a.A)((()=>{Q(!!j)}),[j]);const ee=(0,i.useCallback)((e=>{let{startDate:n,endDate:t}=e;R(n),I(t)}),[]);(0,i.useEffect)((()=>{ee({startDate:p,endDate:g})}),[p,g]);const ne=(0,i.useCallback)((()=>{t?(E(f),F()):ee({startDate:y,endDate:0})}),[t]),te=(0,i.useCallback)((e=>{e.target.name&&B(e.target.name)}),[]),oe=(0,i.useCallback)((e=>{e.stopPropagation(),G()}),[G]),ie=()=>{t?n(S):(n({start:N,end:H}),Q(!1)),F()},ae=(0,i.useMemo)((()=>(0,C.g$)(U)),[U]),re=!!t||null!==N&&null!==H&&N!==H,le=!t&&(N===p&&H===g),ce=(0,i.useMemo)((()=>y),[]),de=N===ce,ue=(0,i.useCallback)(((e,n)=>{L(n),ee({startDate:e,endDate:0})}),[]),me=e=>{let{startDate:o,endDate:i,singleDate:a}=e;if(t)E(a),n(a),(0,d.H)("date-picker","click-date-picker",b,String(a));else{ee(w?{startDate:o?(0,s.W)((0,r.o)(new Date(o))):o,endDate:i?(0,s.W)((0,l.D)(new Date(i))):i}:{startDate:o,endDate:i});const e="finish"===ae?i||o:o||i;(0,d.H)("date-picker","click-date-picker",b,String(e))}},pe=(0,i.useMemo)((()=>X.current&&j?i.createElement(v,{backdrop:!0,target:X.current,canHideTarget:!1,align:{top:"bottom",left:"left"},onEsc:F},i.createElement(h,(0,o.A)({ref:V,"data-testid":"datePicker"},A),i.createElement(c.Flex,{alignItems:"center",width:"100%",padding:w?[4]:[0,6]},!w&&!t&&i.createElement(c.Flex,{column:!0,gap:3,margin:[0,4,0,0],padding:[6,0],border:{side:"right",color:"borderSecondary"}},i.createElement(q,{handleTimePeriodChange:ue,selectedDate:N,tagging:b}),i.createElement(M,{handleTimePeriodChange:ue,customTimePeriodRef:K,resolution:O,tagging:b,value:N})),i.createElement(z,{startDate:N,endDate:H,singleDate:S,setStartDate:R,setEndDate:I,onDatesChange:me,onInputFocus:te,onlyDates:w,maxDate:k,minDate:x,isSinglePicker:t})),i.createElement(c.Flex,{column:!0,padding:[4,6,2],width:"100%",border:{side:"top",color:"borderSecondary"}},!w&&!t&&i.createElement(fe,{timezoneRef:J}),i.createElement(c.Flex,{gap:1,justifyContent:t?"end":w?"center":re?"between":"end",width:"100%"},!w&&!t&&re&&i.createElement(Y,{startDate:N,endDate:H,onlyDates:w}),i.createElement(c.Flex,{alignItems:"center",justifyContent:"center",gap:1},i.createElement(c.Button,{label:"Clear",flavour:"hollow",onClick:ne,disabled:de,"data-ga":"date-picker::click-clear::".concat(b,"-").concat(ae),"data-testid":"datePicker-clear",small:!0}),i.createElement(c.Button,{label:"Apply",onClick:ie,disabled:!re||le,"data-ga":"date-picker::click-apply::".concat(b,"-").concat(ae),"data-testid":"datePicker-apply",small:!0})))))):null),[N,b,O,H,re,de,ae,le,S,j]);return i.createElement(i.Fragment,null,i.createElement(se,(0,o.A)({onClick:oe,tagging:b,isPickerOpen:j,isPlaying:_,setRangeValues:n,start:t?S:p,end:t?S:g,ref:X,onlyDates:w,tooltipContent:P,isSinglePicker:t,noDateSelected:t&&!S,isSmall:D},T)),pe)}},51112:(e,n,t)=>{t.d(n,{A_:()=>l,gC:()=>d,mQ:()=>c,yD:()=>r});var o=t(47767),i=t(50466),a=t(22332);const s={},r=()=>{const e=(0,o.RQ)("/spaces/:spaceSlug/rooms/:roomSlug/alerts"),n=(0,o.RQ)("/spaces/:spaceSlug/rooms/:roomSlug/alerts/:alertId"),t=(0,o.RQ)("/spaces/:spaceSlug/rooms/:roomSlug/home");return e||n||t},l=()=>{const e=(0,o.RQ)("/spaces/:spaceSlug/rooms/:roomSlug/top"),{hasHistory:n}=(0,i.Ol)({extraKey:"fn"})||s;return e&&!n},c=()=>(0,o.RQ)("/spaces/:spaceSlug/rooms/:roomSlug/events"),d=()=>{const e=c(),n=(0,a.useAttributeValue)("autofetch");return!(0,a.useAttributeValue)("paused")&&n&&!e}},79897:(e,n,t)=>{t.d(n,{AE:()=>w,HA:()=>v,Mb:()=>f,So:()=>p,d_:()=>m,g$:()=>_,gt:()=>h,ii:()=>T,jo:()=>k,zp:()=>y});var o=t(82526),i=t(26010),a=t(87991),s=t(84128),r=t(58380),l=t(31826),c=t(71600);const d=60,u=3600,m=86400,p=94694400,h=["minutes","hours","days","months"],g={minutes:d,hours:u,days:m,months:30*m},f=(e,n)=>Math.round(e/g[n]),y=(e,n)=>{const t=(0,o.W)(new Date(0),{[n]:e});return-(0,i._)(t)},b={startDate:"start",endDate:"finish"},_=e=>b[e],w=[{period:"Last 5 minutes",value:-300,resolution:"minutes"},{period:"Last 10 minutes",value:-600,resolution:"minutes"},{period:"Last 15 minutes",value:-900,resolution:"minutes"},{period:"Last 30 minutes",value:-1800,resolution:"minutes"},{period:"Last hour",value:-3600,resolution:"hours"},{period:"Last 2 hours",value:-7200,resolution:"hours"},{period:"Last 6 hours",value:-21600,resolution:"hours"},{period:"Last 12 hours",value:-43200,resolution:"hours"},{period:"Last day",value:-m,resolution:"days"},{period:"Last 2 days",value:-2*m,resolution:"days"},{period:"Last 7 days",value:-7*m,resolution:"days"}],v=function(e,n){let{onlyDates:t}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const o=t?"MMMM d yyyy":"MMMM d yyyy, H:mm:ss";return{formattedStartDate:(0,a.GP)(e,o),formattedEndDate:(0,a.GP)(n,o)}},T=(e,n)=>{const t=(0,s.u)(e,"MMMM d yyyy, H:mm")?e:(0,r.qg)(e,"MMMM d yyyy, H:mm",Date.now());return(0,r.qg)("".concat(t," ").concat((e=>{if(!e)return"+00:00";const n=e.toString().split("."),t=n[0]>0?"+":"-",o=Math.abs(n[0]).toString(),i="".concat(t).concat(o.padStart(2,0));return n.length>1?"".concat(i,":").concat(String(.6*n[1]).padEnd(2,0)):"".concat(i,":00")})(n)),"MMMM d yyyy, H:mm xxx",Date.now())},k=(e,n)=>(0,l.k)((0,c.W)(e),(0,c.W)(n))},71847:(e,n,t)=>{t.d(n,{H:()=>o});const o=function(e,n,t,o,i){var a;let s=arguments.length>5&&void 0!==arguments[5]?arguments[5]:"gaCustomEvent";if(window.envSettings.tracking){if(window.dataLayer){const a={event:s,eventCategory:e,eventAction:n,eventLabel:t,eventValue:o,eventResults:i};window.dataLayer.push(a)}window.gtag&&window.gtag("event","gaCustomEvent",{eventCategory:e,eventAction:n,eventLabel:t,eventValue:o,eventResults:i}),null!==(a=window.posthog)&&void 0!==a&&a.__loaded&&window.posthog.capture(s,{eventCategory:e,eventAction:n,eventLabel:t,eventValue:o})}}},96083:(e,n,t)=>{t.d(n,{A:()=>l});var o=t(58168),i=t(68415),a=t(96540),s=t(20982),r=t(83199);const l=(0,a.forwardRef)(((e,n)=>{let{onClick:t,src:l,title:c="User avatar",width:d=8,height:u=8,...m}=e;return a.createElement(r.Flex,(0,o.A)({cursor:"pointer",round:"50%","data-testid":"userAvatar",title:c,alt:c,onClick:e=>t&&t(e),width:d,height:u},m,{ref:n,alignItems:"center",justifyContent:"center",background:"spaceIdle",color:"textLite",overflow:"hidden"}),l?a.createElement(r.Flex,{as:"img",src:l,alt:c,width:d,height:u}):a.createElement(s.g,{icon:i.yV,size:"lg",alt:c}))}))},5396:(e,n,t)=>{t.d(n,{A:()=>r});var o=t(58168),i=t(96540),a=t(83199),s=t(29217);const r=e=>{let{children:n,background:t,testId:r,onClose:l,tooltipProps:c,...d}=e;const u=r||"header-banner";return i.createElement(a.Flex,(0,o.A)({background:t,"data-testid":u,height:{min:10},padding:[2,10,2,2],position:"sticky",zIndex:5},d),n,i.createElement(s.A,(0,o.A)({align:"bottom",content:"Dismiss message",isBasic:!0,plain:!0,stretch:"align"},c),l&&i.createElement(a.Box,{"data-testid":"".concat(u,"-close-button"),as:a.Icon,color:"text",cursor:"pointer",name:"x",position:"absolute",right:"8px",onClick:e=>{l&&(e.stopPropagation(),l())}})))}},76051:(e,n,t)=>{t.d(n,{Ay:()=>r,I1:()=>s});var o=t(96540),i=t(8711),a=t(83199);const s=(0,i.default)(a.Box).withConfig({displayName:"blur__BlurredBox",componentId:"sc-12emg8l-0"})(["position:absolute;top:0;bottom:0;left:0;right:0;backdrop-filter:blur(",");z-index:80;"],(e=>{let{value:n}=e;return n||"7px"})),r=e=>{let{children:n,...t}=e;return o.createElement(a.Box,{position:"relative"},n,o.createElement(s,t))}},40982:(e,n,t)=>{t.d(n,{A:()=>z});var o=t(58168),i=(t(62953),t(96540)),a=t(83199),s=t(67602),r=t(29217),l=t(15255),c=t(8711),d=t(87292),u=t(28738),m=(t(17333),t(41393),t(98992),t(54520),t(81454),t(37618)),p=t(87659),h=t(50105),g=t(3914),f=t(78862),y=t(76634),b=t(64754);const _=c.default.div.withConfig({displayName:"spaces__Separator",componentId:"sc-19p9rsp-0"})(["height:1px;width:",";background:",";"],(0,a.getSizeBy)(3),(0,a.getColor)("border")),w=e=>{let{selectedSpace:n,setSelectedSpace:t,column:s,showFullname:l,...c}=e;const d=(0,g.Pk)(),u=(0,i.useMemo)((()=>d.filter((e=>!(0,m.ES)(e)))),[d]),[w,,v,T]=(0,p.A)();return i.createElement(a.Flex,(0,o.A)({"data-testid":"workspaceBar",padding:[3,2],column:s,alignItems:"center",justifyContent:"center"},c),i.createElement(a.Flex,{column:!0,"data-testid":"workspaceBar-spaces-list",gap:4,alignItems:"center",overflow:"hidden"},i.createElement(a.Flex,{column:s,"data-testid":"workspaceBar-spacesList",gap:s?4:2,overflow:{vertical:"auto"},flexWrap:!s},u.map((e=>i.createElement(f.A,{testIdPrefix:"workspaceBar-space",key:e,spaceId:e,onClick:t,active:e===(null===n||void 0===n?void 0:n.id),background:"selected",showFullname:l})))),i.createElement(y.A,{permission:"user:CreateSpace"},i.createElement(_,{"data-testid":"workspaceBar-separator"}),i.createElement(r.A,{content:"Create a new Space",align:"right"},i.createElement(b.A,{permission:"user:CreateSpace",ifForbidden:"hide",icon:"plus",onClick:v,"data-testid":"workspaceBar-addSpace-button"})))),w&&i.createElement(h.A,{onClose:T,onDone:t}))};var v=t(79412),T=t(56915),k=t(46741),x=t(87633),P=t(14994),C=t(76999);const q=e=>{let{selectedSpace:n,selectedRoom:t,setSelectedRoom:o}=e;const s=n.id;(0,x.A)(s);const l=(0,k.JT)("room:Create",s),c=(0,k.JT)("room:Read",s),[d,,u,m]=(0,p.A)(!1),h=(0,i.useCallback)((e=>{e.stopPropagation(),l&&u()}),[l]),g=(0,P.CB)(s),f=(0,i.useMemo)((()=>g.filter((e=>{let{isMember:n}=e;return n}))),[g]);return(0,i.useEffect)((()=>{null!==f&&void 0!==f&&f.length&&(t||o(f[0]))}),[f,t]),i.createElement(i.Fragment,null,i.createElement(T.A,{"data-testid":"workspaceRooms-menuList",disabled:!c,headerTestId:"workspaceRooms-warRooms",isOpen:!0,label:i.createElement(a.Flex,{padding:[1,0],margin:[0,0,1],flex:!0,justifyContent:"between",alignItems:"center",height:"24px"},i.createElement(a.Flex,{alignItems:"center",gap:2},i.createElement(a.Icon,{name:"space_new",color:"textNoFocus",width:"14px",height:"14px"}),i.createElement(a.TextSmall,{color:"textNoFocus"},"Select a room")),i.createElement(a.Flex,{alignItems:"center",gap:2},i.createElement(r.A,{content:"Create a new room",align:"right"},i.createElement(b.A,{permission:"room:Create",tiny:!0,icon:"plus",onClick:h,"data-testid":"workspaceRooms-addWarRoom-button",spaceId:s}))))},i.createElement(a.Flex,{column:!0,"data-testid":"workspaceRooms-warRoomsList",padding:[0,0,1]},f.map((e=>{const{id:n}=e;return i.createElement(C.A,{key:n,id:n,hideAlerts:!0,Wrapper:a.TextSmall,isSidebar:!0,selectedId:null===t||void 0===t?void 0:t.id,onClick:()=>o(e)})})))),d&&i.createElement(v.n,{onClose:m,isSubmodal:!1}))};var D=t(74530),A=t(22292),S=t(71835),M=t(79731),E=t(3714),N=t(20378),R=t(78459),H=t(50876);var I=t(92155),O=t(63314);const L=(0,c.default)(a.ModalContent).attrs({width:{base:"80vw"},height:{base:"80vh",min:"80vh"}}).withConfig({displayName:"modal__ModalContent",componentId:"sc-4dwymj-0"})(["box-shadow:0 18px 28px rgba(0,0,0,0.5);"]),U=(0,I.A)(a.Button),B=e=>{let{claim:n,loading:t,error:o,privateKey:s,setPrivateKey:r,selectedSpace:l,setSelectedSpace:c,selectedRoom:m,keyFilename:p}=e;return l?m?i.createElement(i.Fragment,null,i.createElement(u.m,null),i.createElement(a.H3,{textAlign:"center"},'You are ready to connect your agent in "',m.name,'" of "',l.name,'"'),i.createElement(a.Text,{textAlign:"center"},"Last step is to ensure you're the admin of this agent ;)"),i.createElement(a.TextSmall,{textAlign:"center"},"We've created a file with a random key. Can you read it?"),i.createElement(d.Ay,{"data-testid":"key-filename-command",commandText:"sudo cat ".concat(p),width:"60%"},"$ sudo cat ",p),i.createElement(a.Flex,{column:!0,round:!0,background:"successSemi",border:"primary",padding:[2],width:"60%"},i.createElement(a.TextSmall,null,i.createElement(a.TextSmall,{strong:!0},"Tip:")," Run the command and paste here the key it will give you. If the command doesn't work out of the box, locate the ",p," file, open it in your favorite text editor, and copy it to your clipboard.")),i.createElement(a.TextInput,{value:s,placeholder:"Paste private key here",onChange:e=>r(e.target.value),containerStyles:{width:"60%"}}),i.createElement(U,{label:"Claim your agent",onClick:n,disabled:!s||t,isLoading:t,"data-ga":"claiming::claim-agent::modal",payload:{space:null===l||void 0===l?void 0:l.id,room:null===m||void 0===m?void 0:m.id,privateKey:s}})):i.createElement(i.Fragment,null,i.createElement(u.m,null),i.createElement(a.H3,{textAlign:"center"},'Select a room in "',l.name,'" for this agent'),i.createElement(a.TextBig,{color:"textDescription",textAlign:"center"},"or create a new one by hitting the [+] button.")):i.createElement(i.Fragment,null,i.createElement(u.m,null),i.createElement(a.H3,{textAlign:"center"},"Let's connect your Agent"),i.createElement(a.Flex,{width:90,justifyContent:"center"},i.createElement(a.TextBigger,{color:"textDescription",textAlign:"center",lineHeight:1.5},"Select the Space you want this Agent to join or create a new one by hitting the [+] button.")),i.createElement(w,{column:!!l,selectedSpace:l,setSelectedSpace:c,showFullname:!0,width:"80%"}))},j=e=>{let{onClose:n,keyFilename:t}=e;const{claim:o,selectedSpace:s,setSelectedSpace:r,selectedRoom:c,setSelectedRoom:d,loading:u,error:m,privateKey:p,setPrivateKey:h}=(e=>{var n;const[t,o]=(0,i.useState)(),[a,s]=(0,i.useState)(),[r,c]=(0,i.useState)(""),d=(0,D.A)(null===t||void 0===t?void 0:t.id),u=(0,g.dg)(),m=null===(n=window.localNetdataRegistry)||void 0===n?void 0:n.mg,[{claiming:p,claimingError:h},f]=(0,l.RJ)(m),y=(0,A.uW)("isAnonymous"),[b,_]=(0,S.A)(),w=(0,l.OS)(),v=(0,N.OS)(),T=(0,R.OS)(),{sendLog:k,isReady:x}=(0,H.A)();return{claim:(0,i.useCallback)((()=>{var n;r&&!p&&d&&m&&u&&!y&&a&&(f({claiming:!0}),(0,E.j)({key:r,token:null===d||void 0===d||null===(n=d[0])||void 0===n?void 0:n.token,rooms:[a.id],url:window.envSettings.apiUrl}).then((n=>{let{data:o}=n;if(!o.success)return f({claiming:!1,claimingError:o.message,claimId:null}),_({message:o.message}),void k({feature:"claim-node",isFailure:!0,message:o.message});w(),v(),T(),f({claiming:!1,...o,claimingError:""}),e(),b({header:"Your agent got connected to Netdata",text:"You can see your agent in the room ".concat(a.name," of ").concat(t.name," space.")}),k({feature:"claim-node",isSuccess:!0})})).catch((e=>{var n;const t=null===e||void 0===e||null===(n=e.response)||void 0===n?void 0:n.data,o=(0,M.o)(null===t||void 0===t?void 0:t.errorMsgKey)||(null===t||void 0===t?void 0:t.errorMessage)||"Something went wrong";f({claiming:!1,claimingError:o,claimId:null}),_({message:o}),k({feature:"claim-node",isFailure:!0,message:o})})))}),[d,r,m,u,y,p,k,x]),selectedSpace:t,setSelectedSpace:o,selectedRoom:a,setSelectedRoom:s,loading:p,error:h,privateKey:r,setPrivateKey:c}})(n);return i.createElement(a.Modal,{zIndex:80,backdropProps:{backdropBlur:!0}},i.createElement(O.Ay,{feature:"ClaimModal"},i.createElement(a.Flex,{column:!0,alignItems:"end",gap:3},i.createElement(L,{tabIndex:0},i.createElement(a.ModalHeader,{justifyContent:"between"},i.createElement(a.Flex,{gap:2,alignItems:"center"},"Connect your Agent to Netdata Cloud"),i.createElement(a.ModalCloseButton,{onClose:n,testId:"claim-modal-close-button","data-ga":"claiming::close-claim-modal::local"})),i.createElement(a.ModalBody,{overflow:{vertical:"auto"},padding:[0],height:"100%",column:!1},i.createElement(a.Collapsible,{background:"sideBar",open:!!s,direction:"horizontal"},i.createElement(w,{column:!0,selectedSpace:s,setSelectedSpace:r,background:"sideBarMini",height:"100%"})),i.createElement(a.Collapsible,{background:"sideBar",open:!!s,direction:"horizontal"},i.createElement(a.Flex,{flex:!0,width:56,column:!0,overflow:{vertical:"auto"},padding:[2,0]},!!s&&i.createElement(q,{selectedSpace:s,selectedRoom:c,setSelectedRoom:d}))),i.createElement(a.Flex,{alignItems:"center",column:!0,"data-testid":"claim-modal-body",flex:!0,gap:3,justifyContent:"center",padding:[4]},i.createElement(B,{selectedSpace:s,selectedRoom:c,setSelectedSpace:r,keyFilename:t,claim:o,loading:u,error:m,privateKey:p,setPrivateKey:h})))))))},G={unavailable:"Netdata is not available for this agent.",available:"Netdata is available. Click to claim it and gain the full benefits of Netdata!",disabled:"Netdata is available, but it is disabled, you can change the agent configuration to enable it.",banned:"The agent has been banned from cloud.",offline:"The agent tries to connect to Netdata, but it fails to do so.",online:"The agent is already connected to Netdata :)"},F=(0,I.A)(a.Button),z=e=>{const[n,,t,a]=(0,s.A)("claimModal"),[{canBeClaimed:c,cloudStatus:d,keyFilename:u}]=(0,l.RJ)();return i.createElement(i.Fragment,null,i.createElement(r.A,{plain:!0,content:G[d],isBasic:!0},i.createElement("div",null,i.createElement(F,(0,o.A)({label:"Connect",onClick:t},e,{disabled:!c,"data-ga":"claiming::open-claim-modal::local"})))),n&&i.createElement(j,{onClose:a,keyFilename:u}))}},59778:(e,n,t)=>{t.d(n,{M:()=>o,d:()=>i});const o=function(){return/(Community|Early)/.test(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"")?"nightly":"stable"},i={nightly:{description:"Released at most once every 24 hours with fully-tested code that fixes bugs or security flaws, or introduces new features to Netdata. Every nightly release is a candidate for then becoming a stable release.",title:"Nightly"},stable:{description:"Released when a major milestone is reached. Stable releases might be a better choice for those who run Netdata in mission-critical production systems, as updates will come more infrequently.",title:"Stable"}}},12740:(e,n,t)=>{t.d(n,{O1:()=>s,OV:()=>l,Yg:()=>r});var o=t(96540),i=t(8711),a=t(83199);const s=(0,i.default)(a.Box).attrs({border:{side:"all",color:"border"},padding:[1.75,7,1.75,3],round:!0}).withConfig({displayName:"styled__InfoBlock",componentId:"sc-1lice8m-0"})(["color:",";font-family:monospace;font-size:14px;letter-spacing:0.09px;line-height:18px;max-height:34px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap;width:100%;"],(0,a.getColor)("textLite")),r=e=>{let{children:n}=e;return o.createElement(a.Flex,{column:!0,gap:4},n)},l=(0,i.default)(a.Button).withConfig({displayName:"styled__StyledButton",componentId:"sc-1lice8m-1"})(["&&{width:auto;min-width:auto;height:22px;background:",";border:1px solid ",";border-radius:4px;padding:0;&:hover{background:",";& > span{span{color:",";}svg{fill:",";}}}& > span{display:flex;align-items:center;gap:6px;margin:2px 8px 2px 4px;span{color:",";}svg{fill:",";}}}"],(e=>{let{active:n,theme:t}=e;return(0,a.getColor)(n?"primary":"modalBackground")({theme:t})}),(0,a.getColor)("primary"),(0,a.getColor)("primary"),(0,a.getColor)("modalBackground"),(0,a.getColor)("modalBackground"),(e=>{let{active:n,theme:t}=e;return(0,a.getColor)(n?"modalBackground":"primary")({theme:t})}),(e=>{let{active:n,theme:t}=e;return(0,a.getColor)(n?"modalBackground":"primary")({theme:t})}))},94944:(e,n,t)=>{t.d(n,{A:()=>s});t(9920),t(98992),t(3949),t(62953);var o=t(96540),i=t(22332),a=t(27467);const s=()=>{const e=(0,i.useChart)(),[{after:n,before:t,forcePlay:s},r]=(0,a.N9)(),l=function(){let o=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{autofetchOnWindowBlur:!1};return()=>{e.getNodes().forEach((e=>e.updateAttributes(o)));const i=n<0?n:-Math.floor((t-n)/1e3);r({after:i,before:0,forcePlay:!!o.autofetchOnWindowBlur})}};return{play:(0,o.useCallback)(l(),[t,n,s]),forcePlay:(0,o.useCallback)(l({autofetchOnWindowBlur:!0}),[t,n,s]),pause:(0,o.useCallback)((()=>{if(n>0)return;const e=Date.now();r({after:1e3*Math.floor(e/1e3+n),before:1e3*Math.floor(e/1e3),forcePlay:!1})}),[n,t,s])}}},80940:(e,n,t)=>{t.d(n,{A:()=>l});var o=t(58168),i=t(8711),a=t(96540),s=t(83199);const r=(0,i.default)(s.Flex).withConfig({displayName:"item__StyledFlex",componentId:"sc-1gby0f1-0"})(["&:hover{background:",";}"],(e=>{let{hasHoverBackground:n,theme:t,hoverBackground:o="borderSecondary"}=e;return n?(0,s.getColor)(o)({theme:t}):null})),l=(0,a.forwardRef)(((e,n)=>{let{background:t,children:i,cursor:l,hasBorder:c,hasHoverBackground:d,onClick:u,padding:m,testId:p,borderColor:h="selected",round:g,hoverBackground:f,...y}=e;return a.createElement(s.Flex,(0,o.A)({alignItems:"center",border:c&&{side:"right",color:h},cursor:l,"data-testid":p,onClick:u,ref:n},y),a.createElement(r,{round:g,alignItems:"center",background:t,hasHoverBackground:d,margin:c?[0,3,0,0]:void 0,padding:m,hoverBackground:f},i))}))},82230:(e,n,t)=>{t.d(n,{A:()=>s});var o=t(83199),i=t(96540);const a="node-status-indicator",s=e=>{let{statusText:n,selected:t,total:s,statusColor:r,isScreenSmall:l}=e;const{statusTextColor:c,indicatorWrapperColor:d,counterColor:u}=r;return i.createElement(o.Flex,{alignItems:"center",gap:2},!l&&i.createElement(o.TextSmall,{"data-testid":"".concat(a,"-text-").concat(n),color:c},n),i.createElement(o.Flex,{justifyContent:"center",alignItems:"center",padding:[0,1],width:{min:7.5},height:5,background:d,round:!0,gap:l?.2:1},t===s?i.createElement(o.TextSmall,{"data-testid":"".concat(a,"-total-").concat(n),color:u},s):i.createElement(i.Fragment,null,i.createElement(o.TextSmall,{strong:!0,"data-testid":"".concat(a,"-selected-").concat(n),color:u},t),i.createElement(o.TextSmall,{color:u},l?"/":"of"),i.createElement(o.TextSmall,{"data-testid":"".concat(a,"-total-").concat(n),color:u},s))))}},24398:(e,n,t)=>{t.d(n,{P:()=>o});const o={live:{statusTextColor:"live",indicatorWrapperColor:"live",counterColor:"bright"},stale:{statusTextColor:"stale",indicatorWrapperColor:"stale",counterColor:"bright"},offline:{statusTextColor:"textLite",indicatorWrapperColor:"offline",counterColor:"bright"},unseen:{statusTextColor:"unseen",indicatorWrapperColor:"unseen",counterColor:"bright"}}},11261:(e,n,t)=>{t.d(n,{A:()=>u,q:()=>d});t(41393),t(81454);var o=t(96540),i=t(8711),a=t(83199),s=t(82230),r=t(24398);const l=(0,i.default)(a.Flex).withConfig({displayName:"wrapper__NodesIndicatorWrapper",componentId:"sc-gy5ftg-0"})(["pointer-events:",";cursor:",";opacity:",";"],(e=>{let{disabled:n}=e;return n?"none":"auto"}),(e=>{let{disabled:n}=e;return n?"default":"pointer"}),(e=>{let{disabled:n}=e;return n?"0.7":"1"})),c={live:{counter:0,statusText:"Live"},stale:{counter:0,statusText:"Stale"},offline:{counter:0,statusText:"Offline"},unseen:{counter:0,statusText:"Unseen"}},d=e=>o.createElement(l,e,Object.keys(c).map((e=>o.createElement(s.A,{key:e,statusText:c[e].statusText,counter:c[e].counter,statusColor:r.P[e]})))),u=l},68741:(e,n,t)=>{t.d(n,{A:()=>c});var o=t(58168),i=t(96540),a=t(83199),s=t(22292),r=t(14550);const l=(0,t(92155).A)(a.Button),c=e=>{const n=(0,s.uW)("isAnonymous"),t=(0,r.A)();return n?i.createElement(l,(0,o.A)({label:"Sign In",isStart:!0,onClick:t,"data-ga":"click-sign-in::header",feature:"SignIn"},e)):null}},47431:(e,n,t)=>{t.d(n,{A:()=>h});var o=t(58168),i=(t(62953),t(96540)),a=t(83199),s=t(22292),r=t(46741),l=t(29217),c=t(92155),d=t(19673),u=t(42728),m=t(28061);const p=(0,c.A)(a.Button),h=e=>{let{tooltipProps:n={},...t}=e;const a=(0,s.uW)("isAnonymous"),{value:c}=(0,d.JN)(),{slug:h}=c||{},g=(0,u.Kj)(h),f=(0,r.JT)("billing:Manage"),y=(0,m.A)(),[b,_]=(0,i.useState)();return(0,i.useEffect)((()=>{_(!a&&g&&f)}),[a,g,f]),b?i.createElement(l.A,(0,o.A)({plain:!0,content:"Upgrade to business plan and experience the full power of Netdata!",isBasic:!0},n),i.createElement("div",null,i.createElement(p,(0,o.A)({label:"Upgrade Now!",isStart:!0,onClick:y,feature:"UpgradeToBusiness","data-testid":"upgradeToBusiness-header"},t)))):null}},95598:(e,n,t)=>{t.r(n),t.d(n,{default:()=>an});t(62953);var o=t(96540),i=t(39225),a=t(83199),s=t(22292),r=(t(9920),t(98992),t(3949),t(83957)),l=t(47444),c=t(7484),d=t(3914),u=t(46741);var m=t(90709),p=t(28738),h=t(87659),g=t(12938),f=t(45123),y=t(56915),b=t(69765),_=t(8711);const w=(0,_.default)(a.Drop).attrs({align:{top:"bottom",left:"left"},animation:!0,background:"dropdown",column:!0,margin:[2,0,0],overflow:{vertical:"auto"},padding:[2,0],round:1,width:64}).withConfig({displayName:"styled__Dropdown",componentId:"sc-1vwntsm-0"})([""]),v=(0,_.default)(a.H6).attrs({color:"text",padding:[2,4]}).withConfig({displayName:"styled__OtherRoomsHeader",componentId:"sc-1vwntsm-1"})([""]);var T=t(29217),k=t(80940),x=t(37944),P=t(64754),C=t(47731);const q=()=>{const e=(0,C.J)(),[n,t,,i]=(0,h.A)(),s=(0,o.useRef)(),r=(0,b.XA)(),{slug:l}=r,c=(0,d.ap)("name"),m=(0,x.A)({roomSlug:l}),p=(0,u.JT)("room:Read"),_=(0,d.dg)();return o.createElement(o.Fragment,null,o.createElement(a.Flex,{alignItems:"center",gap:1,"data-testid":"header-roomOptions"},o.createElement(f.A,{ref:s,testid:"roomDropdownMenu-roomOptions",icon:"space_new",onClick:t,padding:[2],round:1,width:"auto",disabled:!p||_,selected:n},!e&&r.name&&o.createElement(a.Flex,{alignItems:"center",gap:4},o.createElement(a.Text,null,r.name),o.createElement(a.Icon,{name:"chevron_down",size:"small",color:"text"}))),o.createElement(k.A,{borderColor:"borderSecondary",hasHoverBackground:!0},o.createElement(T.A,{align:"bottom",content:"Room settings"},o.createElement(P.A,{Component:a.IconButton,permission:"room:Leave","data-ga":"header::click-war-room-settings::global-view","data-testid":"header-manageWarRoom",onClick:m,icon:"gear"})))),s.current&&n&&o.createElement(w,{target:s.current,onEsc:i,onClickOutside:i},o.createElement(y.A,{isOpen:!0,label:"ROOMS IN ".concat(c.toUpperCase()),headerTestId:"roomDropdownMenu-otherRooms",Header:v},o.createElement(a.Flex,{onClick:i,column:!0,height:{max:100},overflow:{vertical:"auto"}},o.createElement(g.A,null)))))};var D=t(47762),A=t(49389);const S=e=>{let{description:n,title:t,testId:i,children:s}=e;return o.createElement(a.Flex,{"data-testid":i,column:!0,gap:2},o.createElement(a.H4,{color:"textDescription"},t),o.createElement(a.Flex,{column:!0,"data-testid":i,gap:2},o.createElement(a.Text,{color:"textDescription"},n),s))},M=e=>{let{children:n,testId:t,icon:i="nodes_update"}=e;return o.createElement(a.Flex,{"data-testid":t,padding:[6],round:1,width:"100%",background:"modalInfoBackground",gap:2},o.createElement(a.Box,{as:a.Icon,width:10,height:10,name:i}),n)},E=e=>{let{title:n,desciription:t}=e;return o.createElement(a.Flex,{column:!0,gap:3},o.createElement(a.Flex,{alignItems:"center",gap:2},o.createElement(a.Icon,{color:"main",name:"warning_triangle_hollow"}),o.createElement(a.H3,null,n)),o.createElement(a.Box,null,t))};var N=t(4659),R=(t(41393),t(14905),t(81454),t(8872),t(3296),t(27208),t(48408),t(47767)),H=(t(8159),t(37550),t(33931)),I=t(82432),O=t(81881),L=t(55905);const U=[{id:"name",accessor:"name",header:"Name",cell:e=>{let{getValue:n,row:t}=e;return o.createElement(N.A,{disabled:!t.original.isLive&&"stale"!==t.original.state,color:"text",hoverColor:"primary",visitedColor:"accent",Component:a.TextSmall},n())},minSize:60},{id:"version",accessor:"version",header:"Version",cell:e=>{let{getValue:n}=e;return o.createElement(a.Pill,{color:"neutralPillColor",flavour:"neutral"},n())}},{id:"state",accessor:"state",header:"Status",cell:e=>{let{getValue:n}=e;return o.createElement(H.A,{state:n()})},sortingFn:(e,n)=>{return t=(0,I.GM)(e.original.state),o=(0,I.GM)(n.original.state),t===o?0:t>o?1:-1;var t,o},enableColumnFilter:!0,filterFn:(e,n,t)=>{const o=e.original;return t.length<1||t.some((e=>{let{value:n}=e;return"all"===n||n===(0,I.GM)(o.state)}))},meta:{tooltip:o.createElement(O.A,null),filter:{component:"select",isMulti:!0,options:[{value:"Offline",label:"Offline"},{value:"Live",label:"Live"},{value:"Stale",label:"Stale"}]}}},{id:"updateSeverity",accessor:"updateSeverity",header:"Severity",cell:e=>{let{getValue:n,row:t}=e;const i=t.original;return o.createElement(L.A,{name:i.name,os:i.os.id,container:i.hw.container,warningLevel:i.updateSeverity,labels:i.labels||{},version:i.version,text:n()})}}],B=(0,l.eU)({key:"notificationModal",default:!1}),j=function(){let{resetOnUnmount:e=!1}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,l.E0)(B),[t,i]=(0,l.L4)(B),a=(0,o.useCallback)((()=>i(!0)),[]),s=(0,o.useCallback)((()=>i(!1)),[]);return(0,o.useEffect)((()=>()=>{e&&n()}),[]),{isModalVisible:t,openModal:a,closeModal:s,resetState:n}},G=[{id:"updateSeverity",desc:!1}],F=()=>{const{closeModal:e}=j(),n=(0,o.useMemo)((()=>({goto:{handleAction:e=>{const{_install_type:n=null}=(null===e||void 0===e?void 0:e.labels)||{},{hw:{container:t},os:{id:o},version:i}=e,a=(0,A.fe)({container:t,os:o,_install_type:n,version:i});window.open(a,"_blank","noopener,noreferrer")},tooltipText:"Go to documentation"}})),[]),t=e=>e.reduce(((e,n)=>e+n.join(",")+"\n"),""),i=(0,o.useMemo)((()=>({download:{handleAction:(e,n)=>{let o=[n.getFlatHeaders().map((e=>e.id))];n.getRowModel().rows.forEach((e=>{o.push(n.getFlatHeaders().map((n=>e.renderValue(n.id))))}));const i=window.URL.createObjectURL(new Blob([t(o)],{type:"text/csv;charset=utf-8;"})),a=document.createElement("a");a.href=i;a.setAttribute("download","Outdated nodes.csv"),document.body.appendChild(a),a.click(),a.remove()},isDisabled:!1,tooltipText:"Download as CSV",icon:"download",confirmation:!1,alwaysEnabled:!0}})),[]),a=(0,R.Zp)(),s=(0,D.Ux)();return{onClickRow:(0,o.useCallback)((n=>{let{data:t}=n;if(!t.isLive&&"stale"!==t.state)return;const o=s(t.id);a(o),e()}),[]),sortBy:G,rowActions:n,columns:U,disableClickRow:e=>{let{data:n}=e;return!n.isLive&&"stale"!==n.state},bulkActions:i}};var z=t(60383);const Y={updateSeverity:!0,connectionToCloud:!1},X=e=>{let{data:n}=e;const{bulkActions:t,rowActions:i,onClickRow:a,disableClickRow:s,sortBy:r}=F();return o.createElement(z.A,{"data-testid":"nodesTable-layout",customSortBy:r,customNodes:n,showDefaultRowActions:!1,showDefaultBulkActions:!1,columnVisibility:Y,customRowActions:i,customBulkActions:t,enableSelection:!1,onClickRow:a,disableClickRow:s})},W=e=>{let{onClose:n}=e;const t=(0,D.Gn)(),i=(0,D.Gt)(t),s=t.length,r="Need update (".concat(t.length," ").concat((0,A.su)(t.length),")");return o.createElement(a.Modal,{backdropProps:{backdropBlur:!0}},o.createElement(a.ModalContent,{width:{min:200,base:270},height:200},o.createElement(a.ModalHeader,null,o.createElement(E,{title:"Nodes that need Attention",desciription:o.createElement(a.Text,null,"You have ",o.createElement(a.Text,{strong:!0},s)," ",(0,A.su)(s)," that should be upgraded to ensure experience using Netdata")}),o.createElement(a.ModalCloseButton,{onClose:n,testId:"close-button"})),o.createElement(a.ModalBody,{overflow:"hidden"},o.createElement(M,null,o.createElement(S,{title:r},o.createElement(a.Text,null,o.createElement(a.Flex,{column:!0},o.createElement(a.Text,null,"Please upgrade to ensure you get the latest security and bug fixes.")," ",o.createElement(a.Box,null,"To update your nodes to the latest version"," ",o.createElement(N.A,{href:A.sH.default,rel:"noopener noreferrer",strong:!0,target:"_blank"},"please read our documentation"," "),"and find direct links for each node depending on where it is running and/or how it was installed."))))),o.createElement(X,{data:i}))))};var K=t(45467),$=t(57605),V=t(51112),Z=t(22332),J=t(99851),Q=t(27467);const ee=e=>{let{tagging:n,isPlaying:t}=e;const i=(0,C.J)(),[{offset:a,after:s,before:r},l]=(0,Q.N9)(),c=(0,o.useCallback)((e=>{let{start:n=-900,end:t=0}=e;l({after:n,before:n<0?0:t})}),[]),d=(0,o.useMemo)((()=>({start:s<0?s:Math.floor(s),end:s<0?0:Math.ceil(r)})),[s,r]);return o.createElement(J.A,{values:d,utc:a,onChange:c,tagging:n,isPlaying:t,isSmall:i})},ne=(0,_.default)(a.Flex).withConfig({displayName:"container__Container",componentId:"sc-qc13l0-0"})(["background:",";"],(e=>{let{theme:n,isPlaying:t}=e;const{name:o}=n;return("Dark"===o?(0,a.getRgbColor)(t?["green","green40"]:["neutral","grey70"]):(0,a.getRgbColor)(t?["green","green190"]:["neutral","grey185"]))({theme:n})}));var te=t(94944);const oe=(0,_.default)(T.A).withConfig({displayName:"playPausePill__StyledTooltip",componentId:"sc-1umryvu-0"})(["pointer-events:",";"],(e=>{let{isDisabled:n}=e;return n?"none":"auto"})),ie=e=>{let{isPlaying:n,isForcePlaying:t,isDisabled:i}=e;const s=(0,C.J)(),{play:r,pause:l}=(0,te.A)(),c=(0,o.useMemo)((()=>((e,n)=>e?n?"forcePlay":"playSolid":"pauseSolid")(n,t)),[n,t]);return o.createElement(oe,{content:n?"Click to pause":"Click to play",align:"bottom",isDisabled:i},o.createElement(a.Box,{as:a.Pill,icon:c,onClick:n?l:r,isPlaying:n,"data-testid":"playPause-button",flavour:n?"success":"neutral",_hover:{background:n?"accent":"neutralPillColor"}},s?null:n?"Playing":"Paused"))},ae=(0,_.default)(a.Flex).attrs({padding:[1],role:"button"}).withConfig({displayName:"styled__MenuButton",componentId:"sc-3h0zgs-0"})(["cursor:pointer;"]),se=(0,_.default)(a.Drop).attrs({align:{top:"bottom",left:"left"},animation:!0,backdrop:!0,column:!0,padding:[2,0],background:"dropdown",round:1,overflow:{vertical:"auto"},margin:[.5,0,0],width:40}).withConfig({displayName:"styled__Dropdown",componentId:"sc-3h0zgs-1"})([""]),re=(0,_.default)(T.A).withConfig({displayName:"styled__StyledTooltip",componentId:"sc-3h0zgs-2"})(["pointer-events:",";"],(e=>{let{isDisabled:n}=e;return n?"none":"auto"})),le=e=>{let{target:n,isDisabled:t}=e;const[i,s,,r]=(0,h.A)(),{play:l,pause:c,forcePlay:d}=(0,te.A)();return o.createElement(o.Fragment,null,i?o.createElement(ae,{onClick:s,width:"auto","data-testid":"playOptions-picker"},o.createElement(a.Icon,{name:"chevron_down",color:"text",width:"12px",height:"12px"})):o.createElement(re,{content:"Play to refresh and have live content, pause to see historical, or force play to keep refreshing even when the tab loses focus at the expense of some system performance.",align:{bottom:"bottom",right:"right"},isDisabled:t,plain:!0},o.createElement(ae,{onClick:s,width:"auto","data-testid":"playOptions-picker"},o.createElement(a.Icon,{name:"chevron_down",color:"text",width:"12px",height:"12px"}))),n.current&&i&&!t&&o.createElement(se,{target:n.current,onEsc:r,onClickOutside:r},o.createElement(f.A,{icon:"playOutline",onClick:()=>{l(),r()},testid:"playOptions-play"},"Play"),o.createElement(f.A,{icon:"pauseOutline",onClick:()=>{c(),r()},testid:"playOptions-pause"},"Pause"),o.createElement(f.A,{icon:"forcePlayOutline",onClick:()=>{d(),r()},testid:"playOptions-forcePlay"},"Force Play")))},ce=(0,o.memo)(le),de=()=>{const e=(0,C.J)(),n=(0,V.yD)(),t=(0,V.mQ)(),i=(0,V.A_)(),a=(0,o.useRef)(),s=(0,V.gC)(),r=(0,Z.useAttributeValue)("autofetchOnWindowBlur"),l=(0,$.A)();return(0,K.A)((()=>{l(!!n||!!t)}),[n,t]),o.createElement(k.A,{hasBorder:!0,borderColor:"border",testId:"global-controls"},o.createElement(ne,{isPlaying:s,padding:e?[1]:[1,2],round:!0,height:"100%",alignItems:"center",gap:e?.2:1,isDisabled:n,ref:a},o.createElement(ie,{isPlaying:s,isForcePlaying:r,isDisabled:n||t}),o.createElement(ce,{target:a,isDisabled:n||t}),!i&&o.createElement(ee,{isPlaying:s,tagging:"global-view"})))};var ue=t(67990),me=t(37618);const pe=e=>{let{toggle:n,upToDate:t,hasBorder:i}=e;return o.createElement(k.A,{borderColor:"borderSecondary",hasHoverBackground:!0,hasBorder:i,"data-testid":"cloud-app-news"},o.createElement(T.A,{content:"News & Announcements",align:"bottom"},o.createElement(a.IconButton,{icon:"insights",iconColor:t?"text":"attention",onClick:n,"data-ga":"news::click-news::global-view",neutral:t})))},he=()=>{const e=(0,ue.CK)().length>0;return o.createElement(a.News,{app:me.Ay?(0,me.I)()?"agent":["cloud","agent"]:"cloud"},(n=>{let{toggle:t,upToDate:i}=n;return o.createElement(pe,{toggle:t,upToDate:i,hasBorder:!e})}))};var ge=t(11261),fe=(t(93514),t(5396)),ye=t(78969),be=t(65566),_e=t(75542);const we={warning:{background:"warningBackground",isDismissable:!0,icon:"warning_triangle_hollow",iconColor:"warning"},critical:{background:"errorBackground",isDismissable:!0,icon:"warning_triangle_hollow",iconColor:"error"}},ve=e=>{let{iconColor:n,icon:t,warningLevel:i,numberOfNodesWithCriticalSeverity:s,onClickUpdate:r}=e;return o.createElement(a.Flex,{justifyContent:"center",alignItems:"center",width:"100%",gap:2},o.createElement(a.Icon,{"data-testid":"icon-banner-agent-outdated-".concat(i),color:n,name:t})," ",o.createElement(a.Text,{"data-testid":"text-agent-outdated-".concat(i),color:"main"},s," ",(0,A.su)(s)," ",s>1?"are":"is"," below recommend agent version"," ",ye.Qy,"."," ",o.createElement(a.Box,{"data-testid":"open-add-node",onClick:r,as:a.Text,cursor:"pointer",textDecoration:"underline",color:"main"},"Please update them")," ","to ensure you get the latest security bug fixes."))},Te=e=>{let{warningLevel:n,numberOfNodesWithCriticalSeverity:t,onClose:i,onClickUpdate:a}=e;const{background:s,isDismissable:r,icon:l,iconColor:c}=we[n],u=(()=>{const{id:e}=(0,_e.A)(),n=(0,d.vt)(),t=(0,D.Gn)(),i=(0,D.Gt)(t),a=Object.entries(i.reduce(((e,n)=>{let{version:t}=n;return{...e,[t]:(e[t]||0)+1}}),{})).flat().join("_");return(0,o.useCallback)((()=>"dismissed-agent-version-manager-banner-".concat(e,"-").concat(n,"-").concat(a)),[e,n,a])})(),{dismissed:m,onClose:p}=(0,be.A)({getLocalStorageKey:u,logKey:"AgentVersionManagerDissmiss"}),h=(0,o.useCallback)((()=>{p(),i&&i()}),[p]);return m?null:o.createElement(fe.A,{background:s,onClose:r?h:null},o.createElement(ve,{iconColor:c,icon:l,warningLevel:n,numberOfNodesWithCriticalSeverity:t,onClickUpdate:a}))},ke=()=>{const{openModal:e}=j(),n=(0,D.Gn)({severity:"critical"}).length;return n?o.createElement(Te,{numberOfNodesWithCriticalSeverity:n,warningLevel:"critical",onClickUpdate:e}):o.createElement(o.Fragment,null)},xe=e=>{let{onOpenModalClick:n}=e;const t=(0,D.Gn)({severity:"critical"}).length,i=(0,D.Gn)().length;return i?o.createElement(k.A,{borderColor:"borderSecondary",cursor:"pointer",testId:"update-netdata-version-menu-item",hasBorder:!1,padding:[2,0]},o.createElement(a.Pill,{"data-testid":"update-netdata-version-badge",flavour:t?"error":"warning",icon:"warning_triangle_hollow",hollow:!0,onClick:n},i)):null};var Pe=t(15255),Ce=t(40982),qe=t(68741),De=t(47431);const Ae={offline:{icon:"switch_off",color:"attention",text:"Offline"},online:{icon:"checkmark_s",color:"primary",text:"Online"}},Se=()=>{const e=(0,d.dg)(),n=(0,s.uW)("isAnonymous"),[{canBeClaimed:t,cloudStatus:i}]=(0,Pe.RJ)();if(n)return o.createElement(qe.A,null);if(!e)return o.createElement(De.A,null);if(Ae[i]){const{icon:e,color:n,text:t}=Ae[i];return o.createElement(a.Flex,{alignItems:"center",gap:1},o.createElement(a.Icon,{name:e,color:n,width:"14px",height:"14px"}),o.createElement(a.TextSmall,{color:n},t))}return t?o.createElement(Ce.A,null):null},Me=(0,i.A)((()=>Promise.all([t.e(7144),t.e(7857),t.e(8239),t.e(9473),t.e(9292),t.e(6469)]).then(t.bind(t,62727))),"NodesIndicator"),Ee=()=>{const e=(0,C.J)(),{isModalVisible:n,closeModal:t,openModal:i}=j();return o.createElement(o.Fragment,null,o.createElement(ke,null),o.createElement(a.Flex,{as:"header","data-testid":"header",padding:e?[1]:[2],justifyContent:"between",alignItems:"center",height:12,position:"sticky",background:"topBarBg",zIndex:5,overflow:{horizontal:"auto"}},n&&o.createElement(W,{onClose:t}),o.createElement(q,null),o.createElement(a.Flex,{alignItems:"center","data-testid":"header-features",gap:e?1:3},!window.envSettings.onprem&&o.createElement(he,null),o.createElement(de,null),o.createElement(o.Suspense,{fallback:o.createElement(ge.q,{disabled:!0})},o.createElement(Me,null)),o.createElement(Se,{margin:[0,1,0,0],padding:[2,4]}),o.createElement(xe,{onOpenModalClick:i}))))};var Ne=t(74293);const Re=(0,_.default)(a.Flex).withConfig({displayName:"layout__UserControlContainer",componentId:"sc-18en99b-0"})(["bottom:0;left:0;"]),He=e=>{let{children:n}=e;return o.createElement(a.Flex,{width:"100vw",height:"100vh",column:!0,justifyContent:"center",alignItems:"center",background:"mainBackground",position:"relative"},n,o.createElement(Re,{position:"absolute",padding:[4]},o.createElement(Ne.A,null)))},Ie=(0,_.keyframes)(["from{opacity:0.4;}to{opacity:1;}"]),Oe=(0,_.default)(a.Icon).withConfig({displayName:"loading__StyledIcon",componentId:"sc-11p1wp-0"})(["width:208px;height:177px;animation:"," 1.6s ease-in infinite;"],Ie),Le=()=>o.createElement(o.Fragment,null,o.createElement(Oe,{name:"netdata",color:"primary",title:"Loading","data-testid":"spaceCreationLoading-logo"}),o.createElement(a.H3,{color:"text",margin:[1,0,0]},"We are attempting to create your space but the system is currently busy."),o.createElement(a.Text,{color:"text",margin:[4.5,0,0]},"Thank you for your patience!"));var Ue=t(72641);const Be=()=>{const e=(0,s.NJ)(),n=(0,s.uW)("email"),[t,i]=(0,o.useState)(!1),r=(0,o.useCallback)((()=>i(!1)),[]),l=(0,Ue.A)({onSuccess:r,onError:r,isDefault:!0}),c=(0,o.useCallback)((()=>{i(!0),l({userId:e,email:n})}),[e,n]);return o.createElement(a.Flex,{column:!0,gap:6,alignItems:"center"},o.createElement(a.Flex,{column:!0,gap:2,alignItems:"center"},o.createElement(a.H3,{color:"text"},"There was a problem with automatically creating your space"),o.createElement(a.Text,{color:"text"},"Please try again below")),o.createElement(a.Button,{label:"Continue",isLoading:t,onClick:c}))},je=()=>{const[e,n]=(0,o.useState)(!0);return(0,o.useEffect)((()=>{const e=setTimeout((()=>n(!1)),1e4);return()=>clearTimeout(e)}),[]),o.createElement(He,null,e?o.createElement(Le,null):o.createElement(Be,null))};var Ge=t(43529);const Fe=()=>o.createElement(a.Flex,{background:"sideBarMini","data-testid":"workspaceBar",padding:[3,2],column:!0,alignItems:"center",gap:6,height:"100vh",justifyContent:"between"},o.createElement(Ge.A,{"data-testid":"workspaceBar-netdataLogo"}));var ze=t(69418),Ye=t(29848);const Xe=(0,i.A)((()=>t.e(934).then(t.bind(t,60934))),"AlertConfigurationManagement"),We=(0,i.A)((()=>Promise.all([t.e(7436),t.e(7170)]).then(t.bind(t,57436)).then((e=>({default:e.TrialWarning})))),"TrialWarning"),Ke=(0,i.A)((()=>Promise.all([t.e(6384),t.e(8505)]).then(t.bind(t,8505))),"OnPremWarning"),$e=(0,i.A)((()=>t.e(5426).then(t.bind(t,45426))),"AgentNotSecure"),Ve=(0,i.A)((()=>t.e(4680).then(t.bind(t,84680))),"SpaceSidebar"),Ze=(0,i.A)((()=>Promise.all([t.e(6323),t.e(5700)]).then(t.bind(t,95700))),"Modals"),Je=(0,i.A)((()=>t.e(5246).then(t.bind(t,75246))),"AcceptTermsDialog"),Qe=(0,i.A)((()=>Promise.all([t.e(7144),t.e(7857),t.e(1220),t.e(3455),t.e(8239),t.e(9473),t.e(9292),t.e(3843)]).then(t.bind(t,41198))),"SpacePages"),en=(0,i.A)((()=>t.e(4034).then(t.bind(t,24034))),"DndContext"),nn=(0,i.A)((()=>t.e(1782).then(t.bind(t,1782))),"AgentBanner"),tn=(0,i.A)((()=>Promise.all([t.e(8239),t.e(252)]).then(t.bind(t,11726))),"DynamicConfigurationModal"),on=(0,o.memo)((()=>{const{isIntegrationsPath:e}=(0,ze.Q)(),n=(0,Ye.Hs)();return o.createElement(en,null,o.createElement(a.Flex,{overflow:"hidden",height:"100vh",width:"100vw"},o.createElement(o.Suspense,{fallback:o.createElement(Fe,null)},o.createElement(Ve,null)),o.createElement(a.Flex,{column:!0,position:"relative",overflow:"hidden",flex:!0},o.createElement(o.Suspense,{fallback:""},o.createElement(We,null),o.createElement(nn,null)),!n&&!e&&o.createElement(Ee,null),o.createElement(a.Flex,{column:!0,as:"main",background:"mainBackground",flex:!0,basis:"100%",height:"100%",overflow:"hidden"},o.createElement(o.Suspense,{fallback:o.createElement(p.A,{title:"Loading your space..."})},o.createElement(Qe,null))),o.createElement(o.Suspense,{fallback:""},o.createElement(Ke,null),o.createElement($e,null))),o.createElement(o.Suspense,{fallback:""},o.createElement(Ze,null),o.createElement(Je,null)),o.createElement(Xe,null),o.createElement(tn,null)))})),an=e=>{let{isUserLoaded:n}=e;!function(){let{isUserLoaded:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,l.Zs)((e=>{let{set:n}=e;return e=>{let{results:t}=e;n((0,d.Jn)(),t),t.forEach((e=>{let{id:t,permissions:o}=e;n((0,u.Dk)(t),o)}))}}),[]);(0,r.A)((()=>({key:"spaces",cache:!1,autorun:!!e,fetch:c.EL,association:{getIds:()=>(0,d.nC)("ids"),getError:()=>(0,d.nC)("error"),getLoaded:()=>(0,d.nC)("loaded"),getUpdatedAt:()=>(0,d.nC)("updatedAt")},getResource:e=>(0,d.U2)({id:e}),onReceive:n,pollingOptions:{pollingInterval:412e3}})),[!!e])}({isUserLoaded:n}),(0,m.A)();const t=(0,d.Pk)(),i=(0,d.UV)("loaded"),a=(0,d.UV)("error"),h=(0,s.uW)("isAnonymous");if(!n||!a&&!h&&!i)return o.createElement(p.A,{title:"Loading your space..."});const g=t.length;if(a&&!g)throw a;return me.Ay||h||g?o.createElement(on,null):o.createElement(je,null)}},43529:(e,n,t)=>{t.d(n,{A:()=>s});var o=t(58168),i=t(96540),a=t(83199);const s=e=>i.createElement(a.Icon,(0,o.A)({name:"netdataPress",color:"success",height:"32px",width:"32px"},e))},36091:(e,n,t)=>{t.d(n,{A:()=>d});var o=t(58168),i=t(96540),a=t(13083),s=t(83199);const r=()=>i.createElement(s.Text,null,"Something went wrong during document parsing"),l=e=>n=>{let{transformConfiguration:t={},validationConfig:s={},validate:l,errorComponent:c,children:d,...u}=n;const{tree:m,errors:p}=(e=>{let{markdocContent:n,validate:t,validationConfig:o,transformConfiguration:s}=e;return(0,i.useMemo)((()=>{const e=a.Ay.parse("string"===typeof n?n:"");let i=[];return t&&(i=a.Ay.validate(e,o)),{tree:i.length?null:a.Ay.transform(e,s),errors:i}}),[n,t,o,s])})({markdocContent:d,validate:l,validationConfig:s,transformConfiguration:t}),h=c&&"function"==typeof c?c:r;return p.length?i.createElement(h,{errors:p}):i.createElement(e,(0,o.A)({tree:m},u))},c=(0,i.memo)(l((e=>{let{tree:n,renderConfiguration:t}=e;return a.Ay.renderers.react(n,i,t)}))),d=(l((e=>{let{tree:n}=e;return a.Ay.renderers.html(n)})),c)},45123:(e,n,t)=>{t.d(n,{A:()=>m,G:()=>c});var o=t(58168),i=t(96540),a=t(8711),s=t(83199),r=t(47767);const l=e=>e?"selected":"modalBackground",c=(0,a.default)(s.Flex).withConfig({displayName:"item__PanelRowContainer",componentId:"sc-lf007z-0"})(["cursor:pointer;&:hover{background:",";}",""],(e=>(0,s.getColor)(l(e.isSidebar))(e)),(e=>e.selected&&"background: ".concat((0,s.getColor)(l(e.isSidebar))(e),";"))),d=(0,a.default)(s.Icon).withConfig({displayName:"item__StyledIcon",componentId:"sc-lf007z-1"})(["flex:0 0 auto;"]),u=(0,a.default)(s.IconComponents.LoaderIcon).withConfig({displayName:"item__StyledLoaderIcon",componentId:"sc-lf007z-2"})(["flex:0 0 auto;height:16px;width:16px;"]),m=(0,i.forwardRef)(((e,n)=>{let{disabled:t,children:a,Wrapper:l=s.Text,textProps:m,to:p="",onClick:h,testid:g,icon:f,iconHeight:y="16px",iconWidth:b="16px",iconColor:_="menuItem",textColor:w="menuItem",padding:v=[2,4],margin:T=[0],round:k=0,gap:x=3,actions:P,selected:C,loading:q,width:D="100%",isSidebar:A=!1,isSecondary:S,...M}=e;const E=(0,r.Zp)(),N=(0,i.useCallback)((()=>{t||(h&&h(),p&&E(p))}),[h,t,p]),R=M["data-ga"]||"";return i.createElement(c,(0,o.A)({ref:n,flexWrap:!1,justifyContent:"between",alignItems:"center",padding:v,margin:T,round:k,onClick:N,"data-testid":g,width:D,selected:C,disabled:t,isSidebar:A},R&&{"data-ga":R}),i.createElement(s.Flex,{alignItems:"center",gap:x,flex:!0,basis:""},q?i.createElement(u,null):"string"===typeof f?i.createElement(d,{name:f,disabled:t,color:_,height:y,width:b}):f,!!a&&i.createElement(l,(0,o.A)({opacity:t?"medium":void 0,width:"150px",color:w,italic:S},m),a)),P)}))},56915:(e,n,t)=>{t.d(n,{A:()=>c,N:()=>r});var o=t(58168),i=t(96540),a=t(8711),s=t(83199);const r=(0,a.default)(s.H4).attrs({padding:[0,2],background:"error"}).withConfig({displayName:"list__DefaultListHeader",componentId:"sc-5df7lw-0"})(["cursor:pointer;pointer-events:",";"],(e=>{let{disabled:n}=e;return n?"none":"auto"})),l=e=>{let{disabled:n,toggleOpen:t,label:o,testid:a,Header:s=r}=e;return i.createElement(s,{"data-testid":a,onClick:t,opacity:n?"medium":void 0,disabled:n},o)},c=e=>{let{disabled:n,isOpen:t=!1,toggleOpen:a,label:r,children:c,headerTestId:d,Header:u,...m}=e;return i.createElement(s.Flex,(0,o.A)({column:!0},m),i.createElement(l,{disabled:n,Header:u,toggleOpen:a,label:r,testid:d}),i.createElement(s.Collapsible,{open:t},c))}},55905:(e,n,t)=>{t.d(n,{A:()=>u});var o=t(58168),i=t(96540),a=t(83199),s=t(78969),r=t(49389),l=t(80158);const c={warning:{icon:"warning_triangle_hollow",flavour:"warning",reccomendedVersion:s.fx,warningText:"is below the latest stable agent version"},critical:{icon:"warning_triangle_hollow",flavour:"error",reccomendedVersion:s.Qy,warningText:"is below the recommend agent version"}},d=e=>{let{icon:n,iconColor:t,warningLevel:o,handeleNavigateToDocs:s,message:r}=e;return i.createElement(a.Flex,{column:!0,width:{max:"200px"},gap:2},i.createElement(a.Flex,{alignItems:"center",gap:2},i.createElement(a.Icon,{name:n,color:t}),i.createElement(a.TextMicro,null,(0,l.Zr)(o))),i.createElement(a.Flex,{column:!0,gap:2},i.createElement(a.TextMicro,null,r),i.createElement(a.TextMicro,{"data-testid":"text-agent-outdated-critical"},i.createElement(a.Box,{"data-testid":"open-add-node",onClick:s,as:a.TextMicro,cursor:"pointer",textDecoration:"underline"}," ","Please update them"," "),"to ensure you get the latest security bug fixes.")))},u=e=>{let{warningLevel:n,text:t="Needs Update",container:s,os:u,name:m,labels:p,version:h,...g}=e;const{icon:f,flavour:y,hollow:b,reccomendedVersion:_,iconColor:w,warningText:v}=c[n],T=(0,i.useCallback)((()=>{const e=(0,r.fe)({container:s,os:u,version:h,...p});window.open(e,"_blank","noopener,noreferrer")}),[s,u]),k="".concat(m," ").concat(v," ").concat(_);return i.createElement(a.Tooltip,{allowHoverOnTooltip:!0,content:i.createElement(d,{message:k,icon:f,iconColor:w,warningLevel:n,handeleNavigateToDocs:T})},i.createElement(a.Pill,(0,o.A)({icon:f,flavour:y,hollow:b},g),(0,l.Zr)(t)))}},33931:(e,n,t)=>{t.d(n,{A:()=>r});var o=t(96540),i=t(82432),a=t(78969),s=t(83199);const r=e=>{let{state:n,rawState:t}=e;const r=t||(0,i.GM)(n),{flavour:l}=a.J4[r.toLocaleLowerCase()];return o.createElement(s.Pill,{flavour:l},r)}},81881:(e,n,t)=>{t.d(n,{A:()=>a});var o=t(96540),i=t(83199);const a=()=>o.createElement(i.Flex,{column:!0,width:{max:"200px"},gap:2},o.createElement(i.TextMicro,null,o.createElement(i.TextMicro,{strong:!0},"Live:")," Node is actual collecting and streaming metrics to Netdata"),o.createElement(i.TextMicro,null,o.createElement(i.TextMicro,{strong:!0},"Stale:")," Node is currently offline and not streaming metrics to Netdata. It can show historical data from a parent node"),o.createElement(i.TextMicro,null,o.createElement(i.TextMicro,{strong:!0},"Offline:")," Node is currently offline, not streaming metrics to Netdata and not available in any parent node"),o.createElement(i.TextMicro,null,o.createElement(i.TextMicro,{strong:!0},"Unseen:")," Nodes have never been connected to Netdata, they are claimed but no successful connection was established"))},83084:(e,n,t)=>{t.d(n,{A:()=>r});var o=t(58168),i=t(96540),a=t(83199),s=t(76051);const r=(0,i.forwardRef)(((e,n)=>{let{children:t,containerProps:r,sidebar:l=null,blurred:c,blurProps:d,BlurredContent:u,widthAuto:m=!1,...p}=e;return i.createElement(a.Flex,(0,o.A)({id:"main",height:"100%",width:m?"auto":"100%",position:"relative",overflow:"hidden",flex:"1"},r),i.createElement(a.Flex,(0,o.A)({column:!0,overflow:"hidden",width:m?"auto":"100%"},p,{ref:n}),t),l,c&&i.createElement(s.I1,d,u?i.createElement(u,null):null))}))},76634:(e,n,t)=>{t.d(n,{A:()=>i});var o=t(46741);const i=e=>{let{permission:n,spaceId:t,children:i}=e;return(0,o.JT)(n,t)?i:null}},64754:(e,n,t)=>{t.d(n,{A:()=>r});var o=t(58168),i=t(96540),a=t(83199),s=t(46741);const r=(0,i.forwardRef)(((e,n)=>{let{ifForbidden:t="disabled",permission:r,spaceId:l,Component:c=a.Button,...d}=e;const u=(0,s.JT)(r,l);return"hide"!==t||u?i.createElement(c,(0,o.A)({ref:n,disabled:"disabled"===t&&!u||d.disabled},d)):null}))},53285:(e,n,t)=>{t.d(n,{A:()=>s});var o=t(58168),i=t(96540),a=t(46741);const s=e=>{let{Component:n,ifForbidden:t="disabled",permission:s,children:r,spaceId:l,...c}=e;const d=(0,a.JT)(s,l);return"hide"!==t||d?"function"===typeof r?r(d):n?i.createElement(n,(0,o.A)({},c,{disabled:"disabled"===t&&!d||c.disabled})):r:null}},32089:(e,n,t)=>{t.d(n,{A:()=>s});var o=t(58168),i=t(96540),a=t(83199);const s=e=>{let{vertical:n,color:t="borderSecondary",...s}=e;return i.createElement(a.Box,(0,o.A)({as:"hr",height:n?"100%":"1px"},n?{}:{width:"100%"},{sx:{borderWidth:n?"0px 0px 0px 1px":"1px 0px 0px 0px",borderColor:t,borderStyle:"solid"}},s))}},57375:(e,n,t)=>{t.d(n,{t:()=>a});var o=t(8711),i=t(83199);const a=(0,o.default)(i.Flex).attrs((e=>({padding:[3,3,0],width:"100%",background:"modalTabsBackground",...e}))).withConfig({displayName:"tabs__TabHeader",componentId:"sc-1ramyo4-0"})([""])},23630:(e,n,t)=>{t.d(n,{A:()=>r});t(62953);var o=t(96540),i=t(67742),a=t(83199),s=t(33222);const r=e=>{let{text:n,updatedAt:t,error:r}=e;const[,l]=(0,o.useState)(),c=(e=>{if(!e)return"";const n=new Date(e);return n.getTime()?(0,s.A)(n,new Date):""})(t);return(0,i.A)((()=>l(Math.random())),1e3),o.createElement(a.Flex,{column:!0,gap:3},o.createElement(a.TextSmall,{color:"tooltipText"},n),r&&o.createElement(a.Flex,{alignItems:"center",gap:2},o.createElement(a.Icon,{width:14,height:12,color:"text",name:"warning_triangle"}),o.createElement(a.TextSmall,null,"Error: ",c?"Data not updated":"No data")),c&&o.createElement(a.TextMicro,null,o.createElement(a.TextMicro,{color:"tooltipText"},"Last updated: "),c))}},83179:(e,n,t)=>{t.d(n,{A:()=>l});var o=t(58168),i=(t(17333),t(41393),t(98992),t(54520),t(81454),t(96540)),a=t(83199),s=t(14994);const r={closeMenuOnSelect:!1,backspaceRemovesValue:!1,isClearable:!1,blurInputOnSelect:!0,captureMenuScroll:!0,isMulti:!0},l=e=>{let{selectedValue:n,onChange:t,formatOptions:l,filterValues:c,placeholder:d="search..."}=e;const u=(0,s.DL)(),m=(0,i.useMemo)((()=>u.map((e=>{let{id:n,name:t}=e;return{label:t,value:n,...l&&l({id:n,name:t})}}))),[u,l]),p=(0,i.useMemo)((()=>m.filter((e=>{let{label:t,value:o}=e;return n.includes(o)||c&&c({label:t,value:o})}))),[m,n,c]),h=(0,i.useCallback)((e=>{const n=e.map((e=>{let{value:n}=e;return n}));t(n)}),[t]);return i.createElement(a.Select,(0,o.A)({placeholder:d},r,{options:m,value:p,onChange:h}))}},25950:(e,n,t)=>{t.d(n,{U:()=>i,u:()=>a});t(14905),t(98992),t(8872);const o=[{iconName:"osAlpineLinux",logoFilename:"alpine.svg",name:"alpine",osNames:["Alpine Linux"],title:"Alpine Linux"},{iconName:"osAmazonLinux",logoFilename:"linux.svg",name:"linux",osNames:["Amazon Linux","Amazon Linux AMI"],title:"Amazon Linux"},{iconName:"osArchLinux",logoFilename:"arch.svg",name:"archarm",osNames:["Archcraft","Arch Linux","Arch Linux 32","Arch Linux ARM","ArchLabs","ArchLinux"],title:"Arch Linux"},{iconName:"osCentos",logoFilename:"centos.svg",name:"centos",osNames:["CentOS","CentOS Linux","CentOS Stream"],title:"CentOS"},{iconName:"osCoreOS",logoFilename:"coreos.svg",name:"coreos",osNames:["Container Linux by CoreOS"],title:"CoreOS"},{iconName:"osDebian",logoFilename:"debian.svg",name:"debian",osNames:["Debian GNU/Linux"],title:"Debian"},{iconName:"osFedora",logoFilename:"fedora.svg",name:"fedora",osNames:["Fedora","Fedora Linux","Fedora Remix for WSL"],title:"Fedora"},{iconName:"osFreeBSD",logoFilename:"freebsd.svg",name:"freebsd",osNames:["FreeBSD"],title:"FreeBSD"},{iconName:"serviceFreeNAS",logoFilename:"freenas.svg",name:"freenas",osNames:[],title:"FreeNAS"},{iconName:"osGentoo",logoFilename:"gentoo.svg",name:"gentoo",osNames:["Gentoo"],title:"Gentoo"},{iconName:"serviceKubernetes",logoFilename:"kubernetes.svg",name:"kubernetes",osNames:[],title:"Kubernetes"},{iconName:"osLinux",logoFilename:"linux.svg",name:"linux",osNames:[],title:"Linux"},{iconName:"osMacOSX",logoFilename:"macos.svg",name:"macos",osNames:["mac"],title:"MacOS"},{iconName:"osMacOSX",logoFilename:"macos.svg",name:"mac",osNames:["mac"],title:"MacOS"},{iconName:"osLinuxManjaro",logoFilename:"manjaro.svg",name:"manjaro",osNames:["Manjaro Linux","Manjaro-ARM"],title:"Manjaro"},{iconName:"serviceOpenStack",logoFilename:"openstack.svg",name:"openstack",osNames:[],title:"OpenStack"},{iconName:"osSuseLinux",logoFilename:"opensuse.svg",name:"opensuse",osNames:["openSUSE","openSUSE Leap","openSUSE Leap Micro","openSUSE MicroOS","openSUSE Tumbleweed"],title:"Open Suse"},{iconName:"serviceOpenWrt",logoFilename:"openwrt.svg",name:"openwrt",osNames:["OpenWrt"],title:"OpenWRT"},{iconName:"osOracle",logoFilename:"oracle.svg",name:"oracle",osNames:["Oracle Linux Server"],title:"Oracle Linux"},{iconName:"servicePfSense",logoFilename:"pfsense.svg",name:"pfsense",osNames:[],title:"PFSense"},{logoFilename:"raspberry-pi.svg",name:"raspberry-pi",osNames:[],title:"Raspberry PI"},{iconName:"osRaspbian",name:"raspbian",osNames:["Raspbian","Raspbian GNU/Linux"],title:"Raspbian"},{iconName:"osRedHat",logoFilename:"redhat.svg",name:"redhat",osNames:["Red Hat Enterprise Linux","Red Hat Enterprise Linux Server","Red Hat Enterprise Linux Workstation","RedHatEnterpriseServer"],title:"Red Hat Enterprise Linux"},{iconName:"osSuseLinux",logoFilename:"suse.svg",name:"suse",osNames:[],title:"Suse"},{iconName:"osUbuntu",logoFilename:"ubuntu.svg",name:"ubuntu",osNames:["Ubuntu","Ubuntu Core"],title:"Ubuntu"}],i=o.reduce(((e,n)=>({...e,[n.name]:n})),{}),a=o.reduce(((e,n)=>({...e,...n.osNames.reduce(((e,t)=>({...e,[t]:n})),{})})),{})},74293:(e,n,t)=>{t.d(n,{A:()=>A});var o=t(58168),i=(t(17333),t(41393),t(98992),t(54520),t(81454),t(62953),t(8711)),a=t(96540),s=t(83199),r=t(55337),l=t(45123);const c=i.default.div.withConfig({displayName:"styled__Divider",componentId:"sc-14bbmjw-0"})(["background:",";height:1px;width:auto;margin:"," ",";"],(0,s.getColor)("disabled"),(0,s.getSizeBy)(1),(0,s.getSizeBy)(2));var d=t(96083),u=t(87659),m=t(22292),p=t(11604),h=t(14550),g=(t(9391),t(15327)),f=t(74618),y=t(57375),b=t(54972),_=t(63314);const w={flex:"grow",height:"100%",overflow:"auto"},v={flex:"1",height:"100%",overflow:"auto"},T=e=>{let{onClose:n}=e;const[t,i]=(0,a.useState)(!1),[r,l]=(0,a.useState)(!1),[c,d]=(0,a.useState)({}),{handleChangeTab:u,activeTabIndex:p,tabsByName:h,tabs:T}=(0,b.A)(),k=(0,m.qO)(void 0,{shouldPersist:!0,onSuccess:n}),x=(0,a.useCallback)((()=>{i(!0),k(c).finally((()=>i(!1)))}),[c]),P=(0,a.useMemo)((()=>T.map((e=>{const{label:t,testId:i,Component:r}=h[e];return a.createElement(s.Tab,{key:e,label:t,"data-testid":"userProfileModal-".concat(i)},a.createElement(_.Ay,{tab:e},a.createElement(r,(0,o.A)({},"profile"===e&&{setFormState:d,setFormValid:l},{onClose:n}))))}))),[T]);return a.createElement(g.GO,{onClose:n},a.createElement(_.Ay,{feature:"UserSettings"},a.createElement(f.z,{onClose:n,title:"Settings"},"profile"===T[p]&&a.createElement(s.Button,{disabled:!r,label:"Save",onClick:x,isLoading:t,loadingLabel:"saving..."})),a.createElement(s.Tabs,{flex:"1",height:"100%",overflow:"hidden",selected:p,onChange:u,TabContent:g.Yv,TabsHeader:y.t,tabsProps:w,tabContentProps:v},P)))};var k=t(29217);const x=(0,i.default)(s.Flex).attrs({column:!0,round:1}).withConfig({displayName:"dropdown__Dropdown",componentId:"sc-w0ej6i-0"})(["box-shadow:0 4px 4px rgba(0,0,0,0.25);"]);var P=t(37618);const C=(0,t(92155).A)(l.A),q={"Operational Status":()=>!0,"Sign In":e=>{let{isAnonymous:n}=e;return n},Settings:e=>{let{isAgent:n,isAnonymous:t}=e;return n||!t},"Sign Out":e=>{let{isAnonymous:n}=e;return!n}},D=(0,i.default)(s.Button).withConfig({displayName:"userControl__SignInButton",componentId:"sc-n4ebn8-0"})(["&&{> .button-icon{width:24px;height:24px;}}"]),A=e=>{let{dropdownBackground:n="dropdown"}=e;const t=(0,m.uW)("avatarURL"),i=(0,m.uW)("name"),l=(0,m.uW)("isAnonymous"),[g,,f,y]=(0,u.A)(),_=(0,a.useCallback)((()=>{window.open("https://status.netdata.cloud","_blank")}),[]),w=(0,p.tN)(),v=(0,h.A)(),{handleOpenProfileModal:A,handleCloseProfileModal:S,isProfileModalOpen:M}=(0,b.A)(),E=(0,a.useMemo)((()=>[{name:"Settings",onClick:()=>{y(),A()},hasSeparator:!1,testid:"userControl-settings"},{name:"Operational Status",onClick:()=>{y(),_()},hasSeparator:!1,testid:"userControl-operationalStatus"},{name:"Sign In",onClick:()=>{y(),v()},hasSeparator:!0,testid:"userControl-signOut"},{name:"Sign Out",onClick:()=>{y(),w()},hasSeparator:!0,testid:"userControl-signOut"}].filter((e=>q[e.name]({isAgent:P.Ay,isAnonymous:l})))),[l,P.Ay]);return a.createElement(r.A,null,a.createElement(k.A,!l&&{content:"Edit your account settings and manage your notifications",align:"right"},!P.Ay&&l?a.createElement(k.A,{content:"Sign In",align:"right"},a.createElement(D,{small:!0,icon:"sign_in",onClick:f,"data-ga":"sidebar-sign-in::click-dropdown::global-view","data-testid":"sign-in-dropdown",iconSize:"medium",margin:[0,0,2]})):a.createElement(d.A,{src:t||"","data-testid":"userControl-userAvatar",onClick:f,title:i})),g&&a.createElement(s.Layer,{margin:[5,17],position:"bottom-left",onClickOutside:y,onEsc:y},a.createElement(x,{background:n,padding:[2]},E.map((e=>{let{hasSeparator:n,name:t,...i}=e;return a.createElement(a.Fragment,{key:t},n&&a.createElement(c,{key:"".concat(t,"-itemSeparator")}),a.createElement(C,(0,o.A)({key:t,round:1,payload:{description:"User menu - Click ".concat(t)}},i),t))})))),M&&a.createElement(T,{onClose:S}))}},96763:(e,n,t)=>{t.d(n,{A:()=>s});var o=t(58168),i=t(96540),a=t(83199);const s=e=>{let{onChange:n,value:t,onKeyDown:s,label:r,...l}=e;return i.createElement(a.TextInput,(0,o.A)({label:r,name:"userEmail",placeholder:"Enter an email address",value:t,onChange:n,onKeyDown:s},l))}},54972:(e,n,t)=>{t.d(n,{A:()=>je});t(17333),t(98992),t(54520),t(62953);var o=t(96540),i=t(540),a=t(22292),s=t(83199),r=t(80542),l=t(96083),c=t(11604),d=t(87659);var u=t(58168),m=t(27229);const p="User name cannot exceed ".concat(40," characters"),h=e=>{const n=e.length<=40;return(0,r.H)(n,p)},g=e=>{let{value:n,isValid:t,setIsValid:i,onChange:a,label:l,validators:c=[],hint:d,...p}=e;const g=(0,r.k)([h,...c]),[f,y]=(0,o.useState)("");return(0,o.useEffect)((()=>{const e=g(n),o=e.isValid,a=(0,m.W)(e);!t&&o?i(!0):t&&!o&&i(!1),y(a||"")}),[t,n]),o.createElement(s.TextInput,(0,u.A)({label:l||" ",name:"userName",placeholder:"Enter the user's name",value:n,onChange:a,hint:d,error:!t&&f},p))};var f=t(96763),y=t(19673),b=t(3914),_=t(55463),w=t(83694);const v=[e=>(0,r.H)(!!e,"Name should not be empty")],T=()=>{},k=(0,w.o)((e=>{let{isLastMember:n,spaceName:t,children:i}=e;return o.createElement(s.Flex,{column:!0,gap:2},o.createElement(s.Text,null,"You are about to delete your account at ",o.createElement("strong",null,"Netdata"),". All data related to your account will be deleted."),n&&o.createElement(o.Fragment,null,o.createElement(s.Text,null,o.createElement(s.Text,{strong:!0},t)," space will be deleted since you are the last member."),i),o.createElement(s.Text,null,"This cannot be undone. Are you sure you want to continue?"))})),x=e=>{let{setFormValid:n,setFormState:t}=e;const i=(0,a.uW)("name"),[r,u,m,p]=(0,s.useInputValue)({value:i,maxChars:40}),[h,w]=(0,o.useState)(!1),[x,,P,C]=(0,d.A)(),[q,D]=(0,d.A)(),{value:A}=(0,y.JN)(),S=(0,b.ap)(),M=1===(0,_.Gi)().length,E=(0,a.uW)("email"),N=(0,a.uW)("avatarURL");(0,o.useEffect)((()=>{p&&n(h)}),[p,h]),(0,o.useEffect)((()=>{t({name:r})}),[r]);const R=(0,c.z2)();return o.createElement(s.Flex,{column:!0,justifyContent:"between",flex:"grow"},o.createElement(s.Flex,{column:!0,gap:3},o.createElement(s.Text,{strong:!0,"data-testid":"userProfile-label"},"Photo"),o.createElement(l.A,{src:N||"","data-testid":"userProfile-avatar",width:16,height:16}),o.createElement(g,{"data-testid":"userProfile-username",value:r,label:"Name",onChange:u,isValid:h,setIsValid:w,validators:v,fieldIndicator:m,instantFeedback:"positiveFirst",isDirty:p}),o.createElement(f.A,{"data-testid":"userProfile-email",label:"Email",disabled:!0,value:E,onChange:T})),o.createElement(s.Flex,{alignSelf:"end"},o.createElement(s.Button,{flavour:"borderless",danger:!0,onClick:P,label:"Delete account","data-ga":"user-profile-settings::click-delete::global-view","data-testid":"userProfile-deleteAccount-button"})),x&&o.createElement(s.ConfirmationDialog,{confirmLabel:q?"Deleting...":"Yes, delete","data-ga":"delete-account-dialog","data-testid":"deleteAccountDialog",handleConfirm:()=>{D(),R()},handleDecline:C,message:o.createElement(k,{isLastMember:M,spaceName:S.name,currentPlan:A}),title:"Delete Account",isConfirmDisabled:q,isConfirmLoading:q,isDeclineDisabled:q}))};t(65189);var P=t(5668);const C=()=>{const[e,n]=(0,P.tF)("theme"),t=(0,o.useCallback)((e=>n(e.target.value)),[]);return o.createElement(s.Flex,{column:!0,gap:3,justifyContent:"between"},o.createElement(s.Flex,{column:!0,gap:2},o.createElement(s.Text,{strong:!0},"Netdata Theme"),o.createElement(s.RadioButton,{label:"Light Theme",checked:"light"===e,onChange:t,value:"light"}),o.createElement(s.RadioButton,{label:"Dark Theme",checked:"dark"===e||!e||"unspecified"===e,onChange:t,value:"dark"})))};var q=t(71847),D=t(29217),A=t(71835);const S=(0,t(92155).A)(s.Button),M=["scope:all","scope:agent-ui","scope:grafana-plugin"],E=e=>{let{onCloseModal:n,onCreate:t}=e;const[i,a]=(0,o.useState)(""),[r,l]=(0,o.useState)("scope:all");return o.createElement(s.ModalContent,null,o.createElement(s.ModalHeader,null,"Create New Token",o.createElement(s.ModalCloseButton,{testId:"close-button",onClose:n})),o.createElement(s.ModalBody,{gap:2,width:80},o.createElement(s.TextInput,{"data-testid":"description",placeholder:"Enter Description",onChange:e=>{let{target:n}=e;return a(n.value)},value:i,size:"small"}),o.createElement(s.Flex,{column:!0,gap:2,"data-testid":"scopes"},M.map((e=>o.createElement(s.RadioButton,{key:e,checked:r===e,onChange:e=>l(e.target.value),value:e,alignItems:"start"},o.createElement(s.TextSmall,null,e)))))),o.createElement(s.ModalFooter,null,o.createElement(S,{disabled:""===i.trim(),label:"Create",onClick:()=>t({description:i,scope:r}),"data-testid":"btn-create",payload:{description:"Modal - Create Token"}})))};var N=t(8711);const R=N.default.div.withConfig({displayName:"styled__TokenContainer",componentId:"sc-s1axew-0"})(["display:flex;flex-direction:column;align-items:center;color:",";background:",";border:1px solid ",";border-radius:2px;overflow-wrap:anywhere;white-space:pre-wrap;padding:23px 27px 14px;width:100%;font-family:monospace;letter-spacing:0.09px;line-height:18px;font-size:14px;word-break:break-word;"],(0,s.getColor)("textDescription"),(0,s.getColor)("modalTabsBackground"),(0,s.getColor)("borderSecondary")),H=e=>o.createElement(s.Box,(0,u.A)({},e,{as:s.Icon,sx:{borderRadius:"50%",overflow:"hidden",background:(0,s.getColor)(["neutral","white"])}})),I=e=>o.createElement(s.Text,(0,u.A)({},e,{color:["neutral","grey35"]})),O=e=>o.createElement(s.Flex,(0,u.A)({},e,{as:s.Icon,sx:{alignSelf:"flex-end",cursor:"pointer"}}));var L=t(33436),U=t(99571),B=t(13871);const j=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return()=>{(0,L.A)(e);const t=(0,B.UI)({...{header:"Copied",text:"Token copied to your clipboard!",icon:"gear",...n},success:!0});U.oR.success(t,{context:"copy"})}};var G=t(63314);const F=e=>{let{onCloseModal:n,token:t}=e;return o.createElement(s.ModalContent,null,o.createElement(G._0,{feature:"TokenCreated"},o.createElement(s.ModalHeader,null,"Token Generated",o.createElement(s.ModalCloseButton,{testId:"close-button",onClose:n})),o.createElement(s.ModalBody,null,o.createElement(s.Flex,{column:!0,gap:4},o.createElement(R,null,o.createElement("span",{"data-testid":"token"},t),o.createElement(O,{name:"copy",size:"small",color:"primary",onClick:j(t),"data-ga":"profile::click-copytoken::all-pages::api-token"})),o.createElement(s.Flex,{alignItems:"center",background:"warningBackground",gap:4,padding:[3,3],round:!0},o.createElement(H,{"data-testid":"noNodesView-warningBannerIcon",name:"exclamation",color:"warning"}),o.createElement(I,{"data-testid":"warning-message"},"Make sure to copy or store this token and add it where you need it. Once you close this modal you will not be able to retrieve it again.")))),o.createElement(s.ModalFooter,null,o.createElement(s.Button,{label:"Close",onClick:n}))))};var z=t(26655);const Y=e=>{let{description:n,scope:t}=e;return z.A.post("/api/v1/auth/account/api-token",{description:n,scopes:[t]})},X=e=>z.A.delete("/api/v1/auth/account/api-token/".concat(e)),W=e=>{let{onCloseModal:n,onTokenCreated:t,view:i,token:a}=e;const[r,l]=(0,o.useState)(i),[c,d]=(0,o.useState)(a);return o.createElement(s.Modal,{onClickOutside:n,onEsc:n},"CreateView"===r&&o.createElement(E,{"data-testid":"create-view",onCloseModal:n,onCreate:async e=>{let{description:n,scope:o}=e;const i=await Y({description:n,scope:o});(0,q.H)("api-token","create-token","profile",{description:n,scope:o}),d(i.data.token),l("CopyTokenView"),t()}}),"CopyTokenView"===r&&c&&o.createElement(F,{onCloseModal:n,token:c}))};t(41393),t(81454);var K=t(87991);const $=()=>{const[e,n]=(0,o.useState)([]),[t,,i,a]=(0,d.A)(!0),s=async()=>{i();const e=await z.A.get("/api/v1/auth/account/api-token");a(),n(e.data.map((e=>({...e,created_at:e.created_at?(0,K.GP)(new Date(e.created_at),"dd/MM/yyyy"):null,last_used_at:e.last_used_at?(0,K.GP)(new Date(e.last_used_at),"dd/MM/yyyy"):null}))))};return(0,o.useEffect)((()=>{s()}),[]),{isLoading:t,data:e,setData:n,fetchData:s}};var V=t(50876);const Z={right:["actions"]},J=[{id:"description",header:"Description",fullWidth:!0,size:150,wrap:!0,cell:e=>{let{getValue:n}=e;return o.createElement(D.A,{plain:!0,content:n()||"",isBasic:!0},o.createElement(s.TextSmall,{truncate:!0},n()||""))}},{id:"scopes",header:"Scope",fullWidth:!0,size:150,wrap:!0,cell:e=>{let{getValue:n}=e;return o.createElement(s.TextSmall,null,(Array.isArray(n())?n():[]).join(", "))}},{id:"created_at",header:"Created at",cell:e=>{let{getValue:n}=e;return o.createElement(s.TextSmall,null,n()||"-")}},{id:"last_used_at",header:"Used at",cell:e=>{let{getValue:n}=e;return o.createElement(s.TextSmall,null,n()||"-")}},{id:"ends_with",header:"Token",cell:e=>{let{getValue:n}=e;return o.createElement(o.Fragment,null,o.createElement(s.TextSmall,{style:{verticalAlign:"sub"}},"******"),n())}}],Q=()=>{const[e,n]=(0,o.useState)(!1),[t,i]=(0,o.useState)(!1),[a,r]=(0,o.useState)(null),{data:l,setData:c,fetchData:d}=$(),{sendButtonClickedLog:u}=(0,V.A)(),m=(0,o.useMemo)((()=>({addEntry:{handleAction:()=>{n(!0),u({label:"Create new token"},!0)},tooltipText:"Create new token"}})),[u]),[,p]=(0,A.A)(),h=(0,o.useMemo)((()=>({delete:{handleAction:e=>{let{id:n}=e;X(n).then((()=>{c((e=>e.filter((e=>e.id!==n)))),(0,q.H)("api-token","delete-token","profile"),u({label:"Delete token"},!0)})).catch(p)},confirmationTitle:"Delete API Token",confirmationMessage:"You are about to delete API Token, are you sure you want to continue?"}})),[l]);return o.createElement(o.Fragment,null,o.createElement(s.Table,{dataColumns:J,data:l,bulkActions:m,rowActions:h,enableResizing:!0,enableSorting:!0,columnPinning:Z,enableColumnPinning:!0}),e&&o.createElement(W,{view:"CreateView",onCloseModal:()=>n(!1),onTokenCreated:d}),t&&o.createElement(W,{view:"CopyTokenView",onCloseModal:()=>{i(!1),r(null)},token:a}))};var ee=t(96382),ne=t(93155),te=t(63950),oe=t.n(te);const ie=e=>{let{title:n="",description:t="",label:i="",checked:a,onChange:r=oe(),...l}=e;const c=(d=s.Toggle,e=>{let{disabledInfo:n="This setting is disabled",...t}=e;const{disabled:i}=t||{};return i?o.createElement(D.A,{content:n},o.createElement(s.Box,null,o.createElement(d,t))):o.createElement(d,t)});var d;return o.createElement(s.Flex,{justifyContent:"between",alignItems:"center"},o.createElement(s.Box,null,o.createElement(s.H5,null,n),o.createElement(s.TextSmall,null,t)),o.createElement(c,(0,u.A)({colored:!0,onChange:r,checked:a,"data-testid":"profileNotifications-".concat(i,"NotificationsToggle"),"data-ga":"profile-ntab::click-toggle::global-view::".concat(a?"enabled":"disabled")},l)))};var ae=t(194);const se=()=>{const{isLoading:e,data:n}=$(),[t,i]=(0,o.useState)(),[a,s]=(0,o.useState)(),[,r]=(0,A.A)();return(0,o.useEffect)((()=>{e||(async()=>{(null===n||void 0===n?void 0:n.length)>0&&await Promise.all(n.map((e=>{let{id:n}=e;return X(n)}))).catch(r),Y({description:"Mobile App",scope:"scope:mobile-app"}).then((e=>{let{data:n}=e;null!==n&&void 0!==n&&n.token&&i(n.token)})).catch((e=>{r({header:"Error",text:"Something went wrong"}),s(!0)}))})()}),[e]),{token:t,error:a}},re=e=>{let{value:n}=e;return o.createElement(s.Box,{width:"232px",height:"232px",background:"white",padding:[4],round:3},o.createElement(ae.Ay,{size:200,value:n}))},le=e=>{let{onClose:n=oe()}=e;const{token:t,error:i}=se();return o.createElement(s.Modal,null,o.createElement(s.ModalContent,{width:{min:100,base:140}},o.createElement(s.ModalHeader,null,o.createElement(s.Text,null,"Scan QR Code"),o.createElement(s.ModalCloseButton,{onClose:n,testId:"close-button"})),o.createElement(s.ModalBody,null,o.createElement(s.Flex,{alignItems:"center",justifyContent:"center",height:100},t?o.createElement(re,{value:t}):o.createElement(s.Flex,{width:"100%",height:"100%",alignItems:"center",justifyContent:"center",padding:[4]},i?o.createElement(s.TextBigger,null,"An error occurred"):o.createElement(s.H3,null,"Generating token")))),o.createElement(s.ModalFooter,null,o.createElement(s.Flex,{justifyContent:"end",padding:[1,2]},o.createElement(s.Button,{label:"Done",onClick:n})))))},ce=()=>{const[e,n]=(0,ee.j$)("email"),t=(0,o.useCallback)((()=>n(!e)),[e,n]),[i,a]=(0,ee.j$)("mobileApp"),r=(0,o.useCallback)((()=>{a(!(null!==i&&void 0!==i&&i.enabled))}),[i,a]),[l,,c,u]=(0,d.A)();return o.createElement(s.Flex,{column:!0,gap:2},o.createElement(ie,{title:"E-mail",description:"Netdata will send you health notifications via e-mails",label:"email",checked:e,onChange:t}),ne.Mh&&o.createElement(s.Flex,{column:!0},o.createElement(ie,{title:"Mobile App Notifications",description:"Netdata will send you health notifications via mobile app notifications",label:"mobileApp",checked:null===i||void 0===i?void 0:i.enabled,onChange:r,disabled:!(null!==i&&void 0!==i&&i.linked),disabledInfo:"You have to link a device first and then enable notifications"}),o.createElement(s.Flex,{justifyContent:"end"},o.createElement(s.Button,{flavour:"borderless",icon:"qrCode",iconColor:"primary",iconSize:"small",onClick:c},o.createElement(s.Text,{color:"primary"},"Show QR code"))),l&&o.createElement(le,{onClose:u})))};t(14905),t(8872);var de=t(28738),ue=t(46741),me=(t(3064),t(72577),t(26770)),pe=t(87633),he=t(14994),ge=t(71856),fe=t(1522),ye=t(69765);const be=N.default.div.withConfig({displayName:"styled__Row",componentId:"sc-lpc291-0"})(["width:100%;height:",";display:flex;flex-flow:row nowrap;align-items:center;padding-left:",";margin-bottom:",";"],(0,s.getSizeBy)(5),(0,s.getSizeBy)(3),(0,s.getSizeBy)(2)),_e=(0,N.default)(s.Icon).withConfig({displayName:"styled__StyledIcon",componentId:"sc-lpc291-1"})(["width:20px;height:20px;margin-right:",";"],(0,s.getSizeBy)()),we=(0,N.css)(["&&{width:200px;}margin-left:auto;"]),ve=(0,N.default)(s.Select).withConfig({displayName:"styled__StyledSelect",componentId:"sc-lpc291-2"})(["",""],we),Te=(0,N.default)(s.Button).withConfig({displayName:"styled__SettingsLoader",componentId:"sc-lpc291-3"})([""," color:",";border:1px solid ",";.path{stroke:",";}"],we,(0,s.getColor)("text"),(0,s.getColor)("border"),(0,s.getColor)("text")),ke=e=>{let{roomId:n,spaceId:t}=e;const[i,a]=(0,ee.yP)({roomId:n,spaceId:t,key:"alarms"}),s=(0,o.useCallback)((e=>{a(e.value)}),[a]);return o.createElement(ve,{options:Object.values(ge.fF),value:ge.fF[i],onChange:s})},xe=e=>{let{roomId:n,spaceId:t}=e;const[i,a]=(0,o.useState)(),r=(0,fe.th)(t,n,{onFail:()=>{a(!1)},onSuccess:()=>{a(!1)}}),l=(0,o.useCallback)((()=>{a(!0),r()}),[r,n]);return o.createElement(s.Box,{margin:[0,2,0,"auto"]},o.createElement(D.A,{content:"Join this room to activate notifications for it",isBasic:!0},o.createElement(s.Button,{isLoading:i,label:"Join",onClick:l,flavour:"hollow"})))},Pe=e=>{let{isMember:n,roomId:t,spaceId:i}=e;const a=(0,ye.wz)(t,"name");return o.createElement(be,{"data-testid":"roomSettings-room-".concat(a),"data-ga":"profile-ntab::click-join-room-".concat(a,"::global-view")},o.createElement(_e,{name:"room",color:"text"}),o.createElement(s.Text,null,a),n?o.createElement(o.Suspense,{fallback:o.createElement(Te,{isLoading:!0,flavour:"hollow",label:"Loading settings.."})},o.createElement(ke,{roomId:t,spaceId:i})):o.createElement(xe,{roomId:t,spaceId:i}))},Ce=N.default.div.withConfig({displayName:"styled__CollapsibleRoot",componentId:"sc-1p8t8zr-0"})(["width:100%;height:",";display:flex;flex-flow:row nowrap;align-items:center;cursor:pointer;margin-bottom:",";"],(0,s.getSizeBy)(5),(0,s.getSizeBy)(2)),qe=(0,N.default)(s.Icon).withConfig({displayName:"styled__OpenerIcon",componentId:"sc-1p8t8zr-1"})(["height:5px;width:6px;margin-right:",";",";"],(0,s.getSizeBy)(2),(e=>{let{expanded:n}=e;return n&&"transform: rotate(90deg)"})),De=(0,N.default)(s.Icon).withConfig({displayName:"styled__SpaceIcon",componentId:"sc-1p8t8zr-2"})(["width:20px;height:20px;margin-right:",";"],(0,s.getSizeBy)()),Ae=(0,N.default)(s.Text).withConfig({displayName:"styled__SpaceLabel",componentId:"sc-1p8t8zr-3"})(["font-weight:bold;"]),Se=(0,N.default)(s.TextSmall).withConfig({displayName:"styled__SettingsInfo",componentId:"sc-1p8t8zr-4"})(["margin-left:auto;opacity:0.8;"]);var Me=t(79769);const Ee=e=>{let{expanded:n,onExpand:t,label:i,spaceId:a}=e;(0,Me.A)(a);const s=(0,me.t5)(a,"channels").find((e=>"Email"===e.integration.slug))||{};return o.createElement(Ce,{onClick:t,"data-testid":"spaceRoot-space-".concat(i)},o.createElement(qe,{name:"chevron_right_s",expanded:n,color:"text"}),o.createElement(De,{name:"space",color:"text"}),o.createElement(Ae,null,i),!s.enabled&&o.createElement(Se,null,"E-mail notifications for this space has been disabled by admin"))},Ne={width:"108px",height:"77px"},Re=e=>{let{spaceId:n,isCurrent:t,showAllRooms:i}=e;const[a,r]=(0,d.A)(t),l=(0,pe.A)(n,{autorun:!1,polling:!1}),c=(0,he.sC)(n,"loaded");(0,o.useEffect)((()=>{n&&a&&!c&&l()}),[a,c,n,l]);const u=(0,b.ns)(n,"name"),m=(0,he.CB)(n),p=(0,o.useMemo)((()=>i?m:m.filter((e=>{let{isMember:n}=e;return n}))),[m,i]),h=(0,o.useRef)();return(0,o.useEffect)((()=>{a&&h.current&&h.current.scrollIntoView({behavior:"smooth",block:"start"})}),[a]),o.createElement(s.Flex,{column:!0,ref:h},o.createElement(Ee,{expanded:a,onExpand:r,label:u,spaceId:n}),o.createElement(s.Collapsible,{open:a},(()=>c?o.createElement(o.Fragment,null,p.map((e=>{let{id:t,isMember:i}=e;return o.createElement(Pe,{isMember:i,key:t,roomId:t,spaceId:n})}))):o.createElement(de.A,{iconProps:Ne,title:"Loading..."}))))};var He=t(13617);const Ie={width:"108px",height:"77px"},Oe=()=>{const e=(0,b.vt)(),n=(e=>{const n=(0,b.UV)("ids");return(0,o.useMemo)((()=>n.reduce(((n,t)=>t===e?[t,...n]:[...n,t]),[])),[e,n])})(e),t=(0,ue.JT)("room:ReadAll"),[i,a]=(0,o.useState)(!1);return o.createElement(o.Fragment,null,o.createElement(s.H4,null,"Notifications for all your Netdata Spaces and all the Rooms you are in"),t&&o.createElement(He.A,{onFilterClick:e=>n=>{n.stopPropagation(),a(e)},showAllRooms:i,padding:[1,0]}),o.createElement(s.Flex,{overflow:{vertical:"auto"},column:!0,"data-testid":"spaceRoomNotifications-spacesContainer",padding:[3,0,3]},o.createElement(o.Suspense,{fallback:o.createElement(de.A,{iconProps:Ie,title:"Loading notification settings..."})},n.map((n=>o.createElement(Re,{key:n,isCurrent:n===e,showAllRooms:i,spaceId:n}))))))},Le={offline:["theme"],online:["profile","theme","notifications","apiTokens"]},Ue={profile:{Component:x,label:"Profile",order:0,testId:"profileTab"},theme:{Component:C,label:"Theme",order:1,testId:"themeTab"},notifications:{Component:(0,o.memo)((()=>o.createElement(s.Flex,{overflow:{vertical:"hidden"},column:!0,gap:2},o.createElement(s.H4,null,"Notification Methods"),o.createElement(o.Suspense,{fallback:"..."},o.createElement(ce,null)),o.createElement(s.Flex,{height:"1px",background:"border"}),o.createElement(Oe,null)))),label:"Notifications",order:2,testId:"notificationsTab"},apiTokens:{Component:Q,label:"API tokens",order:3,testId:"apiTokensTab"}},Be=e=>(Ue[e]||Ue.profile).order,je=function(){let{modalName:e="profile"}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,a.uW)("isAnonymous"),t=(0,o.useMemo)((()=>Object.keys(Ue).filter((e=>Le[n?"offline":"online"].includes(e)))),[n]),{isModalOpen:s,currentModalTab:r,handleOpenModal:l,handleCloseModal:c,handleChangeModalTab:d}=(0,i.A)(e),[u,m]=(0,o.useState)(Be(r)),p=e=>{d(e)};return(0,o.useEffect)((()=>{m(Be(r))}),[r]),{handleOpenProfileModal:function(){l(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"profile")},handleCloseProfileModal:()=>{c()},getPreselectedTab:Be,setCurrentTab:p,handleChangeTab:e=>{const n=t[e];m(e),p(n)},tabs:t,tabsByName:Ue,activeTabIndex:u,isProfileModalOpen:s}}},14550:(e,n,t)=>{t.d(n,{A:()=>l});t(25440);var o=t(96540),i=t(47767),a=t(80158),s=t(37618),r=t(35600);const l=()=>{const e=(0,i.Zp)();return(0,o.useCallback)((()=>{if(s.Ay){const e=window.location.origin+window.location.pathname.replace(s.y7,""),n=(0,r.u0)();window.location.href=(0,a.QU)("".concat(window.envSettings.cloudUrl,"/trust?redirect_uri=").concat(encodeURIComponent(window.location.href),"&agent_uri=").concat(encodeURIComponent(e||window.envSettings.agentApiUrl),"&telemetry_session_id=").concat(n))}else e("/sign-in")}),[])}},65624:(e,n,t)=>{t.d(n,{JJ:()=>h,L6:()=>T,LO:()=>d,d$:()=>v,ph:()=>p});t(41393),t(14905),t(98992),t(81454),t(8872),t(62953),t(48408);var o=t(26655),i=t(49286),a=t(52768),s=t(98525),r=t(37618);const l={logs:(e,n)=>n?/logs/.test(n):"systemd-journal"===e,default:(e,n)=>n?!/logs/.test(n):"systemd-journal"!==e},c=e=>{const n=l[e]||l.default;return function(){let{functions:e=[],nodes:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return e.reduce(((e,o)=>{let{name:i,ni:a,tags:s}=o;return n(i,s)?[...e,{name:i,nodes:a.reduce(((e,n)=>t[n]?(e[t[n].nd||t[n].mg]=!0,e):e),{})}]:e}),[])}},d=(e,n,t)=>(0,r.ES)(e)?o.A.get("/api/v2/functions",{baseURL:window.envSettings.agentApiUrl,transform:c(t)}):o.A.post("/api/v3/spaces/".concat(e,"/rooms/").concat(n,"/functions"),{scope:{nodes:[]},selectors:{nodes:["*"]}},{transform:c(t)}),u=(e,n)=>{let{merge:t,direction:o,dataOnly:a=!1,tail:s=!1,reset:r=!1}=n;return n=>((n=(0,i.Ds)(n,{omit:["histogram"]})).defaultCharts||"processes"!==e||(n.defaultCharts=[["CPU","Category"],["Memory","Category"]]),n.defaultCharts=n.defaultCharts||[],a||(n.aggregations=n.facets),n.merge=t,n.direction=o,n.dataOnly=a,n.tail=s,n.reset=r,n)},m=(e,n)=>"after"===e||"before"===e?Math.floor(n/1e3):n,p=e=>{let{spaceId:n,cloudNodeIds:t,nodeIds:i,fn:a,acceptedParams:s=[],merge:l=!1,reset:c=!1,filters:d={},...p}=e,h=s.reduce(((e,n)=>{let t=m(n,p[n]);return t="undefined"===typeof t||null===t?null:"".concat(t),t?e?"".concat(e," ").concat(n,":").concat(t):"".concat(n,":").concat(t):e}),"");null!==s&&void 0!==s&&s.length&&(h=Object.keys(d).reduce(((e,n)=>{let t=m(n,d[n]);return t=((e,n)=>"query"===e&&n?n.split(/\s+/).join("|"):n)(n,t),t="undefined"===typeof t||null===t?null:"".concat(t),t?e?"".concat(e," ").concat(n,":").concat(t):"".concat(n,":").concat(t):e}),h));const g=(0,r.ES)(n);let f;if(h=h?"".concat(g?"%20":" ").concat(h):"",g){const[e]=i;f=o.A.get("/host/".concat(e,"/api/v1/function?function=").concat(a).concat(h),{baseURL:window.envSettings.agentApiUrl,transform:u(a,{merge:l,direction:p.direction,dataOnly:p.data_only&&s.includes("data_only"),tail:p.tail,reset:c})})}else{const[e]=t;f=o.A.get("/api/v2/nodes/".concat(e,"/function"),{params:{function:"".concat(a).concat(h),timeout:12e4},transform:u(a,{merge:l,direction:p.direction,dataOnly:p.data_only&&s.includes("data_only"),tail:p.tail,reset:c})})}return f.catch((e=>{throw e.merge=l,e.direction=p.direction,e.dataOnly=p.data_only&&s.includes("data_only"),e.tail=p.tail,e.reset=c,e})),f},h=e=>{let{spaceId:n,cloudNodeIds:t,nodeIds:a,fn:s,after:l,before:c}=e;const d=(0,r.ES)(n),u="".concat(d?"%20":" ","info after:").concat(m("after",l)," before:").concat(m("before",c));if(d){const[e]=a;return o.A.get("/host/".concat(e,"/api/v1/function?function=").concat(s).concat(u),{baseURL:window.envSettings.agentApiUrl,transform:i.Ds})}const[p]=t;return o.A.get("/api/v2/nodes/".concat(p,"/function"),{params:{function:"".concat(s).concat(u),timeout:12e4},transform:i.Ds})},g={filter:!1,index:1,name:"Unknown",sort:"ascending",sortable:!1,sticky:!1,summary:"",type:"string",uniqueKey:!1,visible:!0,dummy:!1},f=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return{[e]:{...g,name:n.name||e,...n}}},y={...f("Timestamp",{type:"datetime",index:0}),...f("Tags",{type:"pill",index:1,wrap:!0}),...f("Description",{type:"feedTemplate",index:2}),...f("source",{type:"feedTemplate",index:3,dummy:!0}),...f("json",{type:"feedTemplate",index:4,dummy:!0})},b=function(){let[e]=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],{type:n}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return"retention"===n?/exceeds the maximum data retention/.test(e)?"You've exceeded your plan's retention limit":"":e},_=(e,n)=>t=>{var o,i,a;let{errors:s=[],page_size:r,results:{hits:l,aggregations:c}}=t;const d=(null===l||void 0===l?void 0:l.hits)||[];return{columns:{...y},data:d.map((function(){var e;let{_source:t={},_source:{event:o,host:i=[],agent:a={},Netdata:s,tags:r}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const l=Array.isArray(i)?i:[i];return s={...s,...s.alert?{alert:{...s.alert,id:null===(e=s.alert.transition)||void 0===e?void 0:e.id,...s.alert.name?{name:Array.isArray(s.alert.name)?s.alert.name:[s.alert.name]}:{},...s.alert.current?{current:{...s.alert.current,status:s.alert.current.status?Array.isArray(s.alert.current.status)?s.alert.current.status:[s.alert.current.status]:[]}}:{}}}:{}},[t["@timestamp"],r||[],o.id,{...t,...o,...s,hosts:l,agentId:a.id,roomId:n},t]})),default_sort_column:null,type:"table",update_every:5,pageSize:r,totalSize:(null===(o=l.total)||void 0===o?void 0:o.value)||0,anchorBefore:null===(i=d[d.length-1])||void 0===i||null===(i=i.sort)||void 0===i?void 0:i[0],anchorAfter:null===(a=d[0])||void 0===a||null===(a=a.sort)||void 0===a?void 0:a[0],aggregations:c,merge:e,retentionWarning:b(s,{type:"retention"}),warning:b(s)}},w=(e,n,t,o)=>t=>{var i;let{facets:s=[],items:r={},transitions:l=[]}=t;const c=l.map((t=>{let{machine_guid:o,transition_id:i,node_id:s,hostname:r,alert:l,new:{status:c,value:d},when:u,old:{status:m},instance:p,units:h}=t;const g={action:"alert-node-transition",space:{id:e},roomId:n,alert:{id:i,current:{status:[c],value_string:(0,a.m3)(d,h)},previous:{status:m},name:[l]},chart:{name:p},hosts:[{id:s||o,name:r}]};return[1e3*u,["alert","node_instance"],i,g,g,c,name,p]}));return{columns:{...y,...f("Status",{type:"value",index:5,dummy:!0,filter:"multiselect"}),...f("Alert Name",{type:"value",index:6,dummy:!0,filter:"multiselect"}),...f("Instance",{type:"value",index:7,dummy:!0,filter:"multiselect"})},data:c,default_sort_column:null,type:"table",update_every:5,pageSize:100,totalSize:(null===r||void 0===r?void 0:r.matched)||c.length,anchorBefore:null===(i=l[l.length-1])||void 0===i?void 0:i.gi,aggregations:s,merge:o,retentionWarning:null,warning:null}},v=e=>{var n;let{nodeIds:t=[],spaceId:i,roomId:a,roomSlug:l,anchor:c="",merge:d=!1,after:u,before:m,filters:p,anchorOffset:h,cloudNodeIds:g,...f}=e;if((0,r.ES)(i)){const e=new URLSearchParams({after:u?Math.floor(u/1e3):u,before:m?Math.ceil(m/1e3):m,last:200,anchor_gi:c,options:"minify",...p,...f,scope_nodes:t.join("|")||"*"}).toString();return o.A.get("/api/v2/alert_transitions?".concat(e),{baseURL:window.envSettings.agentApiUrl,transform:w(i,a,0,d)})}return o.A.post("/api/v1/feed/search",{...p,...f,histogram_seconds:(null===(n=(0,s.Bp)(u,m))||void 0===n?void 0:n.secs)||1,node_ids:g,page_size:200,space_id:i,room_ids:"all-nodes"!==l?[a]:[],search_before:c||Date.now(),from_timestamp:u,to_timestamp:m},{transform:_(d,a)})},T=()=>o.A.get("/api/v1/feed/static/facets")},61531:(e,n,t)=>{t.d(n,{Ay:()=>u,UW:()=>a,Y5:()=>l,a$:()=>s,zb:()=>d});var o=t(47444),i=t(65624);const a=(0,o.eU)({key:"fnIndexHoverAtom",default:null}),s=(0,o.eU)({key:"fnClickPointAtom",default:null}),r=(0,o.Iz)({key:"checkAgainFnsAtom",default:()=>1}),l=(0,o.K0)({key:"availableFunctionsSelector",get:e=>{let{spaceId:n,roomId:t,extraKey:o}=e;return e=>{let{get:a}=e;return a(r(n)),(e=>{let{spaceId:n,roomId:t,extraKey:o}=e;return"feed"===o?Promise.resolve({data:[{name:"feed"}]}):(0,i.LO)(n,t,o)})({spaceId:n,roomId:t,extraKey:o})}}}),c={id:null,loading:!0,loaded:!1,loadingMore:!1,infoLoaded:!1,data:null,error:"",type:"",updateEvery:1,availableFilters:{},missingRequired:!1,lastModified:null,anchorAfter:null,offset:0,defaultCharts:[],acceptedParams:[],requiredParams:[],filtersToRefresh:{}},d={table:{...c,sortColumn:"",sortDirection:"descending",columns:{},columnVisibility:{},sortedColumns:[],pinnedColumns:[],groupByColumns:null},default:c},u=(0,o.Iz)({key:"function",default:e=>({...c,id:e})})},31225:(e,n,t)=>{t.d(n,{Z$:()=>s,b4:()=>o,wD:()=>a});const o=["#19C89E","#14A6C7","#F3D63D","#E05296","#FF7448","#C082FF"],i=3600,a=86400,s=[{minLimit:350*a,secs:7*a,unit:"week"},{minLimit:50*a,secs:a,unit:"day"},{minLimit:13*a,secs:21600,unit:"hour"},{minLimit:5*a,secs:10800,unit:"hour"},{minLimit:136800,secs:i,unit:"hour"},{minLimit:61200,secs:1800,unit:"minute"},{minLimit:28800,secs:600,unit:"minute"},{minLimit:10800,secs:300,unit:"minute"},{minLimit:2280,secs:60,unit:"minute"},{minLimit:780,secs:30,unit:"second"},{minLimit:420,secs:10,unit:"second"},{minLimit:300,secs:5,unit:"second"},{minLimit:1,secs:1,unit:"second"}]},50466:(e,n,t)=>{t.d(n,{Ak:()=>f,Be:()=>v,E:()=>_,Id:()=>k,Ol:()=>b,Ph:()=>P,WB:()=>h,WH:()=>w,ZY:()=>T,_H:()=>y,dP:()=>q,nm:()=>C,vx:()=>m,x9:()=>D});t(41393),t(14905),t(98992),t(81454),t(8872),t(62953);var o=t(96540),i=t(47767),a=t(47444),s=t(27467),r=t(3914),l=t(69765),c=t(37618),d=t(61531),u=t(65624);const m=e=>{var n;const t=(0,r.vt)(),i=(0,l.ID)(),s=(0,a.xf)((0,d.Y5)({spaceId:t,roomId:i,extraKey:e})),c=(0,a.Zs)((n=>{let{refresh:o}=n;return()=>o((0,d.Y5)({spaceId:t,roomId:i,extraKey:e}))}),[t,i,e]),u=(0,o.useMemo)((()=>{var e;if(null===(e=s.contents)||void 0===e||!e.data)return[];const n=[...s.contents.data];return n.sort(((e,n)=>e.name.localeCompare(n.name,void 0,{sensitivity:"accent",ignorePunctuation:!0}))),n}),[s.contents]);return{loaded:"loading"!==s.state,value:u,hasError:"hasError"===s.state,error:null===(n=s.contents)||void 0===n||null===(n=n.response)||void 0===n?void 0:n.data,refresh:c}},p=[],h=(0,a.K0)({key:"fnState",get:e=>{let{id:n,key:t}=e;return e=>{let{get:o}=e;const i=o((0,d.Ay)(n));return t?i[t]:i}},set:e=>{let{id:n,key:t}=e;return(e,o)=>{let{set:i,reset:a}=e;o||t?i((0,d.Ay)(n),(e=>t?{...e,[t]:o}:{...e,...o})):a((0,d.Ay)(n))}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),g={extraKey:"fn"},f=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:g;const[n]=(0,s.l6)("selectedFn",{defaultValue:p,flavour:"arr",...e});return n},y=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:g;return(0,s.r$)("selectedFn",{defaultValue:p,flavour:"arr",...e})},b=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:g;return function(e){let{key:n}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:g;return(0,a.vc)(h({id:e,key:n}))}(f(e),e)},_=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:g;return function(e){let{key:n}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:g;return(0,a.lZ)(h({id:e,key:n}))}(f(e),e)},w=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:g;return[b(e),_(e)]},v=function(){let{key:e,flavour:n,...t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:g;const o=f(t);return(0,s.r$)(e,{extraKey:"".concat(o||t.extraKey,"Settings"),flavour:n})},T=function(){let{key:e,flavour:n,...t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:g;const o=f(t);return(0,s.r$)(e,{extraKey:"".concat(o||t.extraKey,"Filters"),flavour:n})},k=e=>{const n=(0,i.Zp)(),t=(()=>{const e=(0,l.r9)();return e&&"".concat(e,"/top")})();return(0,o.useCallback)((function(){n(t,{state:{nodeId:arguments.length>0&&void 0!==arguments[0]?arguments[0]:e}})}),[t,n])},x=(0,a.K0)({key:"feedFacetsSelector",get:e=>()=>e?(0,u.L6)():Promise.resolve({data:{}}),cachePolicy_UNSTABLE:{eviction:"most-recent"}}),P=e=>{const n=(0,r.vt)(),t=(0,c.ES)(n),{loading:i,loaded:s,aggregations:l}=b({extraKey:e}),d=(0,a.xf)(x(!t&&"feed"===e));return(0,o.useMemo)((()=>{var e;if("hasValue"!==d.state||!s)return[];if(!l)return[];if(Array.isArray(l))return l.reduce(((e,n)=>(e.push({...n,options:[...n.options].sort(((e,n)=>e.order-n.order||(e.name?e.name.localeCompare(n.name,void 0,{sensitivity:"accent",ignorePunctuation:!0}):n.count-e.count)))}),e)),[]).sort(((e,n)=>e.order-n.order));const n=(null===d||void 0===d||null===(e=d.contents)||void 0===e||null===(e=e.data)||void 0===e?void 0:e.facets)||{};return Object.keys(n).sort(((e,t)=>n[e].order-n[t].order)).reduce(((e,t)=>{var o;const{display_name:i,field:a}=n[t];return null!==(o=l[t])&&void 0!==o&&o.buckets?(e.push({id:t,name:i,options:l[t].buckets.map((e=>{var n,t;return{id:e.key,name:(null===(n=d.contents.data.fields)||void 0===n||null===(n=n[a])||void 0===n?void 0:n[e.key])||e.key,count:(null===(t=e.hits)||void 0===t?void 0:t.doc_count)||0}}))}),e):e}),[])}),[e,t,d.state,s,i])},C=()=>(0,a.vc)(d.UW),q=()=>(0,a.lZ)(d.UW),D=()=>(0,a.L4)(d.a$)},98525:(e,n,t)=>{t.d(n,{Bp:()=>c,Sh:()=>r,iX:()=>s,py:()=>l});t(3064),t(98992),t(72577);var o=t(96540),i=t(21290),a=t(31225);const s=function(e){let{decimalPoints:n,defaultValue:t=""}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return null===e?t:isNaN(n)?e:parseFloat(e).toFixed(n)},r=function(e){let{defaultValue:n="",usec:t=!1}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if(t&&(e=e?Math.floor(e/1e3):e),null===e)return n;const o=Math.floor(e/86400);e-=24*o*3600;const i=Math.floor(e/3600);e-=3600*i;const a=Math.floor(e/60);return"".concat(o,"d ").concat(i,"h ").concat(a,"m")},l=()=>{const{localeTimeString:e,localeDateString:n}=(0,i.$j)();return(0,o.useCallback)((function(t){let{defaultValue:o="",usec:i=!1}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};i&&(t=t?Math.floor(t/1e3):t);const a=new Date(t);return isNaN(a.valueOf())?o:"".concat(n(a,{long:!1})," ").concat(e(a,{secs:!0}))}),[])},c=(e,n)=>a.Z$.find((t=>{let{minLimit:o}=t;return((e,n)=>e<0?Math.abs(n-e):(n-e)/1e3)(e,n)>=o}))},24695:(e,n,t)=>{t.d(n,{A:()=>G});var o=t(96540),i=t(36091),a=t(58168),s=t(8711),r=t(83199),l=t(4659),c=t(87292),d=t(88773),u=t(68831);const m=s.default.article.withConfig({displayName:"nodes__StyledArticle",componentId:"sc-z8xzec-0"})(["padding-right:8px;padding-bottom:16px;"]),p=s.default.p.withConfig({displayName:"nodes__StyledParagraph",componentId:"sc-z8xzec-1"})(["margin-top:14px;line-height:1.5;"]),h=(0,s.default)(c.Ay).withConfig({displayName:"nodes__StyledCommand",componentId:"sc-z8xzec-2"})(["padding:8px;padding-right:32px;margin-top:8px;"]),g=s.default.ol.withConfig({displayName:"nodes__OrderedList",componentId:"sc-z8xzec-3"})(["list-style:roman;padding-left:16px;line-height:2;"]),f=s.default.ul.withConfig({displayName:"nodes__UnorderedList",componentId:"sc-z8xzec-4"})(['list-style-image:url("','/img/list-style-image.svg");padding-left:16px;line-height:2;'],u.A.assetsBaseURL),y={document:{render:m},heading:{render:e=>{let{level:n=1,...t}=e;const i={1:r.H1,2:r.H2,3:r.H3,4:r.H4,5:r.H5,6:r.H6}[n]||r.H1,s=n<=3?6-n:2;return o.createElement(i,(0,a.A)({margin:[s,0,2,0]},t))},attributes:{id:{type:String},level:{type:Number}}},paragraph:{render:p},link:{render:e=>{let{children:n,href:t,...i}=e;return o.createElement(l.A,(0,a.A)({href:t,rel:"noopener noreferrer",target:"_blank"},i),n)},attributes:{href:{type:String}}},code:{render:e=>{let{content:n,...t}=e;return o.createElement(c.R0,(0,a.A)({as:"span"},t),n)},attributes:{content:{type:String}}},fence:{render:h,attributes:{content:{type:String}}},list:{render:e=>{let{ordered:n,children:t,...i}=e;const a=n?g:f;return o.createElement(a,i,t)},attributes:{ordered:{type:Boolean}}},table:{render:e=>{let{children:n,...t}=e;return o.createElement(d.gY,t,n)}}},b=e=>{let{summary:n="",open:t=!1,children:i}=e;return o.createElement(r.Flex,{margin:[2,0,0,0]},o.createElement("details",{open:t},o.createElement("summary",null,o.createElement(r.Text,{strong:!0},n)),i))};t(33811),t(41393),t(86994),t(98992),t(81454),t(41795),t(62953);const _=(0,s.default)(r.Flex).attrs({column:!0}).withConfig({displayName:"styled__StyledTerminalCommand",componentId:"sc-3zs5xg-0"})(["position:relative;top:-1px;color:",";background:",";border:1px solid ",";cursor:pointer;overflow-wrap:anywhere;white-space:pre-wrap;padding:8px 16px 16px 8px;width:100%;font-family:monospace;font-weight:bold;letter-spacing:0.09px;line-height:16px;font-size:14px;word-break:break-word;overflow-y:auto;margin-top:",";"],(0,r.getColor)("primary"),(0,r.getColor)("terminalGreen"),(0,r.getColor)("terminalGreenBorder"),(e=>{let{noMargin:n}=e;return n?"0":"16px"})),w=(0,s.default)(r.Icon).withConfig({displayName:"styled__StyledIcon",componentId:"sc-3zs5xg-1"})(["display:flex;align-self:flex-end;cursor:pointer;position:absolute;bottom:8px;right:8px;"]);var v=t(76051);var T=t(25159),k=t(18682),x=t(50876);const P=(C=e=>{let{children:n,...t}=e;const i=(0,o.useMemo)((()=>(0,T.jU)(n)),[n]),{sendLog:s,isReady:r}=(0,x.A)(),l=(0,o.useCallback)((()=>{(0,k.C)(i,{text:"Config copied to your clipboard."})(),s({feature:"ConnectNode",isStart:!0,copyString:i})}),[r,i]);return o.createElement(_,(0,a.A)({onClick:l},t),n,o.createElement(w,{name:"copy",size:"small",color:"textLite"}))},e=>{let{blurred:n,blurProps:t,...i}=e;return n?o.createElement(v.Ay,t,o.createElement(C,i)):o.createElement(C,i)});var C;const q=e=>{let{children:n}=e;const[t,i]=(0,o.useState)(0),a=o.Children.toArray(n).map((e=>{let{props:n}=e;return n}));return o.createElement(r.Flex,{column:!0,width:"100%",margin:[4,0,0,0]},o.createElement(r.Flex,{border:{side:"bottom",size:"1px",color:"borderSecondary"},padding:[0,4]},o.createElement(r.Tabs,{selected:t,onChange:i},a.map((e=>{let{label:n}=e;return o.createElement(r.Tab,{key:n,label:o.createElement(r.Text,null,n),minWidth:"auto",maxWidth:"auto",padding:[1,4],background:"mainBackground",small:!0})})))),o.createElement(r.Flex,{flex:!0},o.createElement(P,{noMargin:!0},a[t].children)))};var D=t(27467),A=t(47767),S=t(3914),M=t(27776),E=t(9060);const N=e=>{let{children:n,onClick:t}=e;return o.createElement(r.Box,{as:"a",cursor:"pointer",onClick:t},n)},R=e=>{let{categoryId:n,navigateToSettings:t,children:i}=e;const a=(0,A.Zp)(),[,s]=(0,D.N9)("selectedIntegrationCategory"),r=(0,S.bq)(),{pushCategory:l}=(0,M.b8)(),c=(0,E.A)(),d=(0,o.useCallback)((()=>{t?(s("deploy.docker-kubernetes"),a({pathname:"/spaces/".concat(r,"/settings/integrations"),replace:!0})):l(c(n))}),[l,n,c,t]);return o.createElement(N,{onClick:d},i)},H=(0,o.memo)(R);t(25440),t(79978),t(3064),t(72577);var I=t(22292),O=t(46741);const L=e=>{let{showClaimingOptions:n,command:t="",claimToken:o="",claimUrl:i="",claimRooms:a=""}=e;if(n)return t.replaceAll(/{% if \$showClaimingOptions %}\n?/gi,"").replaceAll(/{% \/if %}\n?/gi,"").replaceAll(/{% claim_token %}/gi,o).replaceAll(/{% claim_url %}/gi,i).replaceAll(/{% \$claim_rooms %}/gi,a);return t.replaceAll(/{%\s*if\s*\$showClaimingOptions\s*%}[\s\S]*?{%\s*\/if\s*%}\n?/gs,"")},U=e=>{var n;let{methods:t,isNightly:i,claimToken:a="",claimUrl:s="",claimRooms:l=""}=e;const c=i?"nightly":"stable",{showClaimingOptions:d}=(()=>{const e=(0,I.uW)("isAnonymous"),n=(0,O.JT)("node:Create");return{showClaimingOptions:!e&&n}})();return t.length?d&&!a?o.createElement(r.Flex,{padding:[8,0]},o.createElement(r.Text,null,"Loading token...")):t.length>1?o.createElement(q,null,t.map((e=>{let{method:n,commands:t}=e;const{command:i}=t.find((e=>e.channel==c))||{};return o.createElement(r.Tab,{key:n,label:n},L({showClaimingOptions:d,command:i,claimToken:a,claimUrl:s,claimRooms:l}))}))):o.createElement(P,null,L({showClaimingOptions:d,command:((null===(n=t[0])||void 0===n?void 0:n.commands.find((e=>e.channel==c)))||{}).command||"",claimToken:a,claimUrl:s,claimRooms:l})):null},B={nodes:y,tags:{details:{render:b,attributes:{summary:{type:String,default:""},open:{type:Boolean,default:!1}}},tabs:{render:q},tab:{render:r.Tab,attributes:{label:{type:String,default:""}}},terminal:{render:P},goToCategory:{render:H,attributes:{categoryId:{type:String,default:""},navigateToSettings:{type:Boolean,default:!1}}},command:{render:U,attributes:{methods:{type:Array,default:[]},isNightly:{type:Boolean,default:!1},claimToken:{type:String,default:""},claimUrl:{type:String,default:""},claimRooms:{type:String,default:""}}}}},j=e=>{let{config:n={},children:t}=e;return o.createElement(i.A,{transformConfiguration:{...B,...n}},t)},G=(0,o.memo)(j)},67962:(e,n,t)=>{t.d(n,{A:()=>y});var o=t(96540),i=t(83199),a=t(12602),s=t(6586),r=t(73865),l=t(63314);const c=()=>{const{loaded:e,hasLimitations:n,maxNodes:t,preferredNodes:c}=(0,r.A)();return n&&e&&c.length==t?o.createElement(l.Ay,{feature:"ActiveNodesLimitWarning"},o.createElement(i.Flex,{gap:2},o.createElement(i.Icon,{size:"medium",color:"warning",name:"warning_triangle"}),o.createElement(i.TextBig,null,"If you connect a new node you'll"," ",o.createElement(i.TextBig,{strong:!0},"exceed the limit of ",t," active Nodes"),". To unblock the new node, either"," ",o.createElement(a.A,null,o.createElement(i.TextBig,{color:"primary"},"upgrade to the Business plan"))," ","for unlimited access or"," ",o.createElement(s.A,null,o.createElement(i.TextBig,{color:"primary"},"review your Space active Nodes on the Setting page")),"."))):null};var d=t(3914),u=t(46741),m=t(37618);const p=()=>{const e=(0,d.vt)(),n=(0,m.ES)(e),t=(0,u.JT)("node:Create");return n||t?null:o.createElement(l.Ay,{feature:"IntegrationsNodeCreatePermissionWarning"},o.createElement(i.Flex,{gap:2,background:"warningSemi",padding:[2]},o.createElement(i.Icon,{size:"medium",color:"text",name:"warning_triangle"}),o.createElement(i.TextBig,null,"You don't have permissions to connect new nodes to the Space. Please contact an administrator to do it or just install Netdata on your node.")))};var h=t(24695),g=t(74530),f=t(69765);const y=e=>{let{integration:n={},isNightly:t,rooms:a=[],navigateToSettings:s}=e;const{deployContent:r,methods:l}=n,u=(0,d.vt)(),m=(0,f.pr)(),y=(0,f.XA)(),b=null!==y&&void 0!==y&&y.loaded?y:m,_=null===b||void 0===b?void 0:b.id,w=(0,g.A)(u),v=window.envSettings.apiUrl,T=a.length?a.join(","):_,k=(0,o.useMemo)((()=>{var e;return{variables:{methods:l,isNightly:t,claimToken:null===w||void 0===w||null===(e=w[0])||void 0===e?void 0:e.token,claimUrl:v,claimRooms:T,navigateToSettings:s}}}),[l,t,w,v,T,s]);return o.createElement(i.Flex,{width:"100%",column:!0,padding:[4,0,0,0]},o.createElement(c,null),o.createElement(p,null),o.createElement(h.A,{config:k},r))}},89009:(e,n,t)=>{t.d(n,{A:()=>k});var o=t(96540),i=t(63950),a=t.n(i),s=t(83199),r=t(4659),l=t(29217),c=t(47130),d=t(32089),u=t(53285),m=(t(9391),t(62953),t(47444)),p=t(3914),h=t(93740),g=t(89916),f=t(87659),y=t(71835);const b=()=>{const e=(0,p.vt)(),[n,,t,o]=(0,f.A)(),[i,a]=(0,y.A)();return{loading:n,recycleToken:(0,m.Zs)((n=>{let{set:s}=n;return()=>{t(),(0,h.X)(e).then((n=>{let{data:t}=n;s((0,g.g)(e),t),i({text:"Successfully expired last token and created a new one."})})).catch(a).finally((()=>{o()}))}}),[e,t,o,i,a])}};var _=t(59778);const w=(0,t(92155).A)((0,c.A)(s.Button)),v={left:"stable",right:"nightly"},T=e=>{let{right:n}=e;const{title:t,description:i}=_.d[v[n?"right":"left"]]||{};return o.createElement(l.A,{content:i,align:"bottom"},o.createElement(s.Text,{padding:n?[0,0,0,1]:[0,1,0,0]},t))},k=e=>{let{isNightly:n,toggleNightly:t=a()}=e;const{loading:i,recycleToken:l}=b();return o.createElement(s.Flex,{gap:4},o.createElement(s.Flex,{column:!0,gap:1},o.createElement(r.A,{href:"https://learn.netdata.cloud/docs/getting-started/install-netdata#nightly-vs-stable-releases",rel:"noopener noreferrer",target:"_blank"},o.createElement(s.Flex,{alignItems:"center",gap:1},o.createElement(s.Text,{color:"primary"},"Updates channel"),o.createElement(s.Icon,{color:"primary",name:"goToNode",width:"18px",height:"18px"}))),o.createElement(s.Toggle,{labelLeft:"Stable",labelRight:"Nightly",Label:T,colored:!1,checked:n,disabled:!1,onChange:t})),o.createElement(d.A,{vertical:!0}),o.createElement(u.A,{permission:"node:Create"},(e=>o.createElement(w,{feature:"ClaimingToken",label:"Regenerate Token",flavour:"hollow",onClick:l,tooltip:e?"Expire the current token and claim a new one.":"Only admins can regenerate the token.",disabled:i||!e}))))}},58205:(e,n,t)=>{t.d(n,{V0:()=>U,vD:()=>X,jI:()=>W,qC:()=>B,Jr:()=>Y,v2:()=>j,D_:()=>z,yI:()=>L,rP:()=>F,aw:()=>G});var o=t(58168),i=(t(62953),t(96540)),a=t(63950),s=t.n(a),r=t(83199),l=t(43529),c=t(8711),d=t(3914),u=t(98046),m=t(87659),p=t(47373);const h=e=>{let{name:n,onEdit:t=s()}=e;return i.createElement(r.Flex,{alignItems:"center",gap:2},i.createElement(r.TextBig,null,n),i.createElement(r.Button,{icon:"pencilOutline",flavour:"borderless",iconColor:"text",iconSize:"small",padding:[0],onClick:t}))},g=(0,c.default)(r.Flex).withConfig({displayName:"spaceInfo__StyledWrapper",componentId:"sc-166ce7m-0"})(["margin-top:-3px !important;margin-bottom:-3px !important;"]),f=e=>{let{name:n,setName:t=s(),onSave:o=s(),onCancel:a=s(),isLoading:l}=e;const[c,d]=(0,i.useState)(),u=(0,i.useMemo)((()=>(0,p.fc)(n)),[n]);return i.createElement(g,{gap:2},i.createElement(r.TextInput,{value:n,onChange:e=>{d(!0),t(e.target.value)},onKeyDown:e=>{let{code:n}=e;["Enter","NumpadEnter"].includes(n)&&o()},disabled:l,size:"tiny",error:p.xc[u],isDirty:c,instantFeedback:"all"}),i.createElement(r.Button,{label:"Save",flavour:"borderless",iconColor:"text",iconSize:"small",padding:[0],onClick:o,disabled:l||!!u}),i.createElement(r.Button,{label:"Cancel",flavour:"borderless",iconColor:"text",iconSize:"small",padding:[0],onClick:a,disabled:l}))},y=()=>{const{id:e,name:n}=(0,d.ap)(),t=(0,u.A)(e),[o,,a,s]=(0,m.A)(),[r,,l,c]=(0,m.A)(),[p,g]=(0,i.useState)(n),y=(0,i.useCallback)((()=>{s(),c()}),[]),b=(0,i.useCallback)((()=>{a(),t({name:p},{onSuccess:y,onFail:s})}),[p]);return r?i.createElement(f,{name:p,setName:g,onSave:b,onCancel:c,isLoading:o}):i.createElement(h,{name:n,onEdit:l})};var b=t(29217),_=t(38413),w=t(4659),v=t(69765),T=t(87860),k=t(81048),x=t(5668),P=t(46741);t(25440);const C=(0,t(92155).A)(r.Button),q={light:"primary",dark:"white",unspecified:"primary"},D=e=>{let{onClick:n}=e;const t={icon:"add_user",flavour:"borderless",onClick:n,label:"Invite your team","data-ga":"integrations::click-invite-team::header"};return(0,P.JT)("space:InviteUser")?i.createElement(C,t):i.createElement(b.A,{content:"You can't invite a user with your current permissions"},i.createElement(r.Flex,null,i.createElement(C,(0,o.A)({},t,{disabled:!0}))))},A=e=>{let{nodesCount:n,onClick:t=s()}=e;const o=(0,P.JT)("node:Create"),a=(0,i.useMemo)((()=>({onClick:t,disabled:!o})),[o]),l=n>0?"Connect more!":"Connect a node";return o?i.createElement(w.A,a,l):i.createElement(b.A,{content:"You don't have permission to connect new nodes. Please contact an administrator to do it"},i.createElement(r.Box,null,i.createElement(w.A,a,l)))},S=e=>{let{nodesCount:n,onConnectClick:t=s()}=e;return i.createElement(r.Flex,{gap:2},i.createElement(A,{nodesCount:n,onClick:t}),i.createElement(r.Text,null,"to get started"))},M=e=>{let{nodesCount:n,onConnectClick:t=s()}=e;return i.createElement(r.Flex,{gap:2},i.createElement(r.Text,null,"You have connected ",n," node",1===n?"":"s","."),i.createElement(A,{nodesCount:n,onClick:t}))},E=(0,i.memo)((e=>{let{nodesCount:n,onConnectClick:t=s()}=e;return i.createElement(r.Flex,{gap:2},i.createElement(r.Text,null,"This is your new space"),n>0?i.createElement(M,{nodesCount:n,onConnectClick:t}):i.createElement(S,{nodesCount:n,onConnectClick:t}))})),N=e=>{let{onInvite:n=s()}=e;const t=(0,x.xd)("theme"),o=(0,d.vt)(),a=(0,v.ID)(),c=(0,v.J_)(o,k.mL),[u,,p,h]=(0,m.A)(),g=(()=>{const e=(0,d.bq)(),n=(0,v.QW)();return(0,i.useCallback)((()=>{window.location.replace("/spaces/".concat(e,"/rooms/").concat(n,"/overview"))}),[])})(),[f,b]=(0,i.useState)([]);(0,T.A)({spaceId:o,id:a||c,pollingInterval:3e3,keepPolling:!0,onNodeIdsChange:e=>{let{nodeIds:n}=e;return b((null===n||void 0===n?void 0:n.length)||0)}});const w=f>0;return(0,i.useEffect)((()=>{w&&g()}),[w]),i.createElement(i.Fragment,null,i.createElement(l.A,{height:"66px",width:"66px",color:q[t]}),i.createElement(r.H1,{strong:!1},"Welcome to Netdata!"),i.createElement(E,{nodesCount:f,onConnectClick:e=>{e.preventDefault,p()}}),i.createElement(y,null),i.createElement(r.Flex,{gap:4},i.createElement(D,{onClick:n}),i.createElement(C,{icon:"rocket",flavour:w?"default":"borderless",onClick:g,label:w?"Launch space":"Node Required for Launch",disabled:!w,fetaure:"LaunchSpace"})),u&&i.createElement(_.A,{onClose:h,room:a}))},R=()=>i.createElement(i.Fragment,null,i.createElement(r.H1,{strong:!1},"Integrations Made Easy!"),i.createElement(r.Text,null,"Unleash the Power of Connected Systems"));var H=t(67962),I=t(24695);const O=e=>{let{integration:n={}}=e;return i.createElement(r.Flex,{width:"100%",column:!0,gap:2,padding:[4,0,0,0]},i.createElement(I.A,null,n.alerts))},L="deploy.operating-systems",U=34,B="data-collection",j="deploy-kubernetes",G={deploy:"rocket","data-collection":"collect",notify:"alarmFilled",export:"importExport",logs:void 0},F={deploy:H.A,"alert-notifications":O},z={homePage:"homePage",settingsPage:"settingsPage",addNodesModal:"addNodesModal"},Y=z.homePage,X={homePage:{headerContent:N},settingsPage:{headerContent:R},addNodesModal:{headerContent:null}},W={homePage:{header:{height:284,wrapperProps:{padding:[4,0]}},search:{wrapperProps:{width:"500px",margin:[0]},inputProps:{containerStyles:{width:"100%"}}},cncf:{height:80},categoryCharacterWidth:9},settingsPage:{header:{height:210,wrapperProps:{padding:[4,0]}},search:{wrapperProps:{width:"500px",margin:[0]},inputProps:{containerStyles:{width:"100%"}}},cncf:{height:0},categoryCharacterWidth:9},addNodesModal:{header:{height:126,wrapperProps:{width:"100%",padding:[4]}},search:{wrapperProps:{width:"100%",margin:[0],flex:!0},inputProps:{containerStyles:{width:"500px"},size:"small"}},cncf:{height:16},categoryCharacterWidth:9}}},9060:(e,n,t)=>{t.d(n,{A:()=>i});t(3064),t(98992),t(72577);var o=t(27776);const i=()=>{const e=(0,o.j6)();return n=>e.find((e=>e.id==n))}},27776:(e,n,t)=>{t.d(n,{j6:()=>x,x9:()=>T,hh:()=>v,XL:()=>N,b8:()=>k,FF:()=>D,oE:()=>S,GT:()=>A,AR:()=>q,yv:()=>E,q2:()=>M,WB:()=>C,Ss:()=>P});t(17333),t(3064),t(41393),t(98992),t(54520),t(72577),t(81454),t(62953);var o=t(96540),i=t(47444),a=t(27467);const s=[{id:"deploy",name:"Deploy",description:"",most_popular:!0,priority:1,children:[{id:"deploy.operating-systems",name:"Operating Systems",description:"",most_popular:!0,priority:1,children:[]},{id:"deploy.docker-kubernetes",name:"Docker & Kubernetes",description:"",most_popular:!0,priority:2,children:[]},{id:"deploy.provisioning-systems",parent:"deploy",name:"Provisioning Systems",description:"",most_popular:!1,priority:-1,children:[]}]},{id:"data-collection",name:"Data Collection",description:"",most_popular:!0,priority:2,children:[{id:"data-collection.other",name:"Other",description:"",most_popular:!1,priority:-1,collector_default:!0,children:[]},{id:"data-collection.ebpf",name:"eBPF",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.freebsd",name:"FreeBSD",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.containers-and-vms",name:"Containers and VMs",description:"",most_popular:!0,priority:6,children:[]},{id:"data-collection.database-servers",name:"Databases",description:"",most_popular:!0,priority:1,children:[]},{id:"data-collection.kubernetes",name:"Kubernetes",description:"",most_popular:!0,priority:7,children:[]},{id:"data-collection.notifications",name:"Incident Management",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.service-discovery-registry",name:"Service Discovery / Registry",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.web-servers-and-web-proxies",name:"Web Servers and Web Proxies",description:"",most_popular:!0,priority:2,children:[]},{id:"data-collection.cloud-provider-managed",name:"Cloud Provider Managed",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.windows-systems",name:"Windows Systems",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.apm",name:"APM",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.hardware-devices-and-sensors",name:"Hardware Devices and Sensors",description:"",most_popular:!0,priority:4,children:[]},{id:"data-collection.macos-systems",name:"macOS Systems",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.message-brokers",name:"Message Brokers",description:"",most_popular:!0,priority:3,children:[]},{id:"data-collection.provisioning-systems",name:"Provisioning Systems",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.search-engines",name:"Search Engines",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.linux-systems",name:"Linux Systems",description:"",most_popular:!0,priority:5,children:[{id:"data-collection.linux-systems.system-metrics",name:"System",description:"",most_popular:!0,priority:1,children:[]},{id:"data-collection.linux-systems.memory-metrics",name:"Memory",description:"",most_popular:!0,priority:3,children:[]},{id:"data-collection.linux-systems.cpu-metrics",name:"CPU",description:"",most_popular:!0,priority:2,children:[]},{id:"data-collection.linux-systems.pressure-metrics",name:"Pressure",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.linux-systems.network-metrics",name:"Network",description:"",most_popular:!0,priority:5,children:[]},{id:"data-collection.linux-systems.ipc-metrics",name:"IPC",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.linux-systems.disk-metrics",name:"Disk",description:"",most_popular:!0,priority:4,children:[]},{id:"data-collection.linux-systems.firewall-metrics",name:"Firewall",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.linux-systems.power-supply-metrics",name:"Power Supply",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.linux-systems.filesystem-metrics",name:"Filesystem",description:"",most_popular:!1,priority:-1,children:[{id:"data-collection.linux-systems.filesystem-metrics.zfs",name:"ZFS",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.linux-systems.filesystem-metrics.btrfs",name:"BTRFS",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.linux-systems.filesystem-metrics.nfs",name:"NFS",description:"",most_popular:!1,priority:-1,children:[]}]},{id:"data-collection.linux-systems.kernel-metrics",name:"Kernel",description:"",most_popular:!1,priority:-1,children:[]}]},{id:"data-collection.networking-stack-and-network-interfaces",name:"Networking Stack and Network Interfaces",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.synthetic-checks",name:"Synthetic Checks",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.ci-cd-systems",name:"CICD Platforms",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.ups",name:"UPS",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.freebsd-systems",name:"FreeBSD Systems",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.logs-servers",name:"Logs Servers",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.security-systems",name:"Security Systems",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.observability",name:"Observability",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.gaming",name:"Gaming",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.iot-devices",name:"IoT Devices",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.media-streaming-servers",name:"Media Services",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.authentication-and-authorization",name:"Authentication and Authorization",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.project-management",name:"Project Management",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.application-servers",name:"Application Servers",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.dns-and-dhcp-servers",name:"DNS and DHCP Servers",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.mail-servers",name:"Mail Servers",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.processes-and-system-services",name:"Processes and System Services",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.storage-mount-points-and-filesystems",name:"Storage, Mount Points and Filesystems",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.systemd",name:"Systemd",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.telephony-servers",name:"Telephony Servers",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.vpns",name:"VPNs",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.blockchain-servers",name:"Blockchain Servers",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.distributed-computing-systems",name:"Distributed Computing Systems",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.generic-data-collection",name:"Generic Data Collection",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.p2p",name:"P2P",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.snmp-and-networked-devices",name:"SNMP and Networked Devices",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.system-clock-and-ntp",name:"System Clock and NTP",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.nas",name:"NAS",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.api-gateways",name:"API Gateways",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.task-queues",name:"Task Queues",description:"",most_popular:!1,priority:-1,children:[]},{id:"data-collection.ftp-servers",name:"FTP Servers",description:"",most_popular:!1,priority:-1,children:[]}]},{id:"logs",name:"Logs",description:"Monitoring logs on your infrastructure",most_popular:!0,priority:3,children:[]},{id:"export",name:"exporters",description:"Exporter Integrations",most_popular:!0,priority:6,children:[]},{id:"notify",name:"notifications",description:"Notification Integrations",most_popular:!0,priority:4,children:[{id:"notify.agent",name:"Agent Dispatched Notifications",description:"",most_popular:!0,priority:2,children:[]},{id:"notify.cloud",name:"Centralized Cloud Notifications",description:"",most_popular:!0,priority:1,children:[]}]},{id:"auth",name:"authentication",description:"Authentication & Authorization",most_popular:!0,priority:5,children:[]}],r=[{meta:{plugin_name:"apps.plugin",module_name:"apps",monitored_instance:{name:"Applications",link:"",categories:["data-collection.processes-and-system-services"],icon_filename:"applications.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["applications","processes","os","host monitoring"],most_popular:!1},overview:"# Applications\n\nPlugin: apps.plugin\nModule: apps\n\n## Overview\n\nMonitor Applications for optimal software performance and resource usage.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per applications group\n\nThese metrics refer to the application group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.cpu_utilization | user, system | percentage |\n| app.cpu_guest_utilization | guest | percentage |\n| app.cpu_context_switches | voluntary, involuntary | switches/s |\n| app.mem_usage | rss | MiB |\n| app.mem_private_usage | mem | MiB |\n| app.vmem_usage | vmem | MiB |\n| app.mem_page_faults | minor, major | pgfaults/s |\n| app.swap_usage | swap | MiB |\n| app.disk_physical_io | reads, writes | KiB/s |\n| app.disk_logical_io | reads, writes | KiB/s |\n| app.processes | processes | processes |\n| app.threads | threads | threads |\n| app.fds_open_limit | limit | percentage |\n| app.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| app.uptime | uptime | seconds |\n| app.uptime_summary | min, avg, max | seconds |\n\n",integration_type:"collector",id:"apps.plugin-apps-Applications",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"apps.plugin",module_name:"groups",monitored_instance:{name:"User Groups",link:"",categories:["data-collection.processes-and-system-services"],icon_filename:"user.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["groups","processes","user auditing","authorization","os","host monitoring"],most_popular:!1},overview:"# User Groups\n\nPlugin: apps.plugin\nModule: groups\n\n## Overview\n\nThis integration monitors resource utilization on a user groups context.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per user group\n\nThese metrics refer to the user group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user_group | The name of the user group. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| usergroup.cpu_utilization | user, system | percentage |\n| usergroup.cpu_guest_utilization | guest | percentage |\n| usergroup.cpu_context_switches | voluntary, involuntary | switches/s |\n| usergroup.mem_usage | rss | MiB |\n| usergroup.mem_private_usage | mem | MiB |\n| usergroup.vmem_usage | vmem | MiB |\n| usergroup.mem_page_faults | minor, major | pgfaults/s |\n| usergroup.swap_usage | swap | MiB |\n| usergroup.disk_physical_io | reads, writes | KiB/s |\n| usergroup.disk_logical_io | reads, writes | KiB/s |\n| usergroup.processes | processes | processes |\n| usergroup.threads | threads | threads |\n| usergroup.fds_open_limit | limit | percentage |\n| usergroup.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| usergroup.uptime | uptime | seconds |\n| usergroup.uptime_summary | min, avg, max | seconds |\n\n",integration_type:"collector",id:"apps.plugin-groups-User_Groups",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"apps.plugin",module_name:"users",monitored_instance:{name:"Users",link:"",categories:["data-collection.processes-and-system-services"],icon_filename:"users.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["users","processes","os","host monitoring"],most_popular:!1},overview:"# Users\n\nPlugin: apps.plugin\nModule: users\n\n## Overview\n\nThis integration monitors resource utilization on a user context.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | The name of the user. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| user.cpu_utilization | user, system | percentage |\n| user.cpu_guest_utilization | guest | percentage |\n| user.cpu_context_switches | voluntary, involuntary | switches/s |\n| user.mem_usage | rss | MiB |\n| user.mem_private_usage | mem | MiB |\n| user.vmem_usage | vmem | MiB |\n| user.mem_page_faults | minor, major | pgfaults/s |\n| user.swap_usage | swap | MiB |\n| user.disk_physical_io | reads, writes | KiB/s |\n| user.disk_logical_io | reads, writes | KiB/s |\n| user.processes | processes | processes |\n| user.threads | threads | threads |\n| user.fds_open_limit | limit | percentage |\n| user.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| user.uptime | uptime | seconds |\n| user.uptime_summary | min, avg, max | seconds |\n\n",integration_type:"collector",id:"apps.plugin-users-Users",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Containers",link:"",categories:["data-collection.containers-and-vms"],icon_filename:"container.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["containers"],most_popular:!0},overview:"# Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Kubernetes Containers",link:"https://kubernetes.io/",icon_filename:"kubernetes.svg",categories:["data-collection.kubernetes"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["k8s","kubernetes","pods","containers"],most_popular:!0},overview:"# Kubernetes Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ k8s_cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ k8s_cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.mem_usage | cgroup memory utilization |\n| [ k8s_cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ k8s_cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per k8s cgroup\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: "pod" or "container". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.cpu_limit | used | percentage |\n| k8s.cgroup.cpu | user, system | percentage |\n| k8s.cgroup.cpu_per_core | a dimension per core | percentage |\n| k8s.cgroup.throttled | throttled | percentage |\n| k8s.cgroup.throttled_duration | duration | ms |\n| k8s.cgroup.cpu_shares | shares | shares |\n| k8s.cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| k8s.cgroup.writeback | dirty, writeback | MiB |\n| k8s.cgroup.mem_activity | in, out | MiB/s |\n| k8s.cgroup.pgfaults | pgfault, swap | MiB/s |\n| k8s.cgroup.mem_usage | ram, swap | MiB |\n| k8s.cgroup.mem_usage_limit | available, used | MiB |\n| k8s.cgroup.mem_utilization | utilization | percentage |\n| k8s.cgroup.mem_failcnt | failures | count |\n| k8s.cgroup.io | read, write | KiB/s |\n| k8s.cgroup.serviced_ops | read, write | operations/s |\n| k8s.cgroup.throttle_io | read, write | KiB/s |\n| k8s.cgroup.throttle_serviced_ops | read, write | operations/s |\n| k8s.cgroup.queued_ops | read, write | operations |\n| k8s.cgroup.merged_ops | read, write | operations/s |\n| k8s.cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_some_pressure_stall_time | time | ms |\n| k8s.cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_full_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_some_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_full_pressure_stall_time | time | ms |\n| k8s.cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_some_pressure_stall_time | time | ms |\n| k8s.cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_full_pressure_stall_time | time | ms |\n| k8s.cgroup.pids_current | pids | pids |\n\n### Per k8s cgroup network device\n\nThese metrics refer to the Pod container network interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: "pod" or "container". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.net_net | received, sent | kilobits/s |\n| k8s.cgroup.net_packets | received, sent, multicast | pps |\n| k8s.cgroup.net_errors | inbound, outbound | errors/s |\n| k8s.cgroup.net_drops | inbound, outbound | errors/s |\n| k8s.cgroup.net_fifo | receive, transmit | errors/s |\n| k8s.cgroup.net_compressed | receive, sent | pps |\n| k8s.cgroup.net_events | frames, collisions, carrier | events/s |\n| k8s.cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| k8s.cgroup.net_carrier | up, down | state |\n| k8s.cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Kubernetes_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"LXC Containers",link:"",icon_filename:"lxc.png",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["lxc","lxd","container"],most_popular:!0},overview:"# LXC Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor LXC Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-LXC_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Libvirt Containers",link:"",icon_filename:"libvirt.png",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["libvirt","container"],most_popular:!0},overview:"# Libvirt Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Libvirt for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Libvirt_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Proxmox Containers",link:"",icon_filename:"proxmox.png",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["proxmox","container"],most_popular:!0},overview:"# Proxmox Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Proxmox for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Proxmox_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Systemd Services",link:"",icon_filename:"systemd.svg",categories:["data-collection.systemd"],keywords:["systemd","services"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["containers"],most_popular:!0},overview:"# Systemd Services\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd service\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service_name | Service name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service.cpu.utilization | user, system | percentage |\n| systemd.service.memory.usage | ram, swap | MiB |\n| systemd.service.memory.failcnt | fail | failures/s |\n| systemd.service.memory.ram.usage | rss, cache, mapped_file, rss_huge | MiB |\n| systemd.service.memory.writeback | writeback, dirty | MiB |\n| systemd.service.memory.paging.faults | minor, major | MiB/s |\n| systemd.service.memory.paging.io | in, out | MiB/s |\n| systemd.service.disk.io | read, write | KiB/s |\n| systemd.service.disk.iops | read, write | operations/s |\n| systemd.service.disk.throttle.io | read, write | KiB/s |\n| systemd.service.disk.throttle.iops | read, write | operations/s |\n| systemd.service.disk.queued_iops | read, write | operations/s |\n| systemd.service.disk.merged_iops | read, write | operations/s |\n| systemd.service.pids.current | pids | pids |\n\n",integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Systemd_Services",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Virtual Machines",link:"",icon_filename:"container.svg",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["vms","virtualization","container"],most_popular:!0},overview:"# Virtual Machines\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Virtual Machines for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Virtual_Machines",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"oVirt Containers",link:"",icon_filename:"ovirt.svg",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ovirt","container"],most_popular:!0},overview:"# oVirt Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor oVirt for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-oVirt_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"charts.d.plugin",module_name:"ap",monitored_instance:{name:"Access Points",link:"",categories:["data-collection.linux-systems.network-metrics"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ap","access","point","wireless","network"],most_popular:!1},overview:"# Access Points\n\nPlugin: charts.d.plugin\nModule: ap\n\n## Overview\n\nThe ap collector visualizes data related to wireless access points.\n\nIt uses the `iw` command line utility to detect access points. For each interface that is of `type AP`, it then runs `iw INTERFACE station dump` and collects statistics.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin is able to auto-detect if you are running access points on your linux box.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### `iw` utility.\n\nMake sure the `iw` utility is installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/ap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/ap.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It\'s a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the ap collector.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ap_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| ap_priority | Controls the order of charts at the netdata dashboard. | 6900 | no |\n| ap_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Change the collection frequency\n\nSpecify a custom collection frequence (update_every) for this collector\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\nap_update_every=10\n\n# the charts priority on the dashboard\n#ap_priority=6900\n\n# the number of retries to do in case of failure\n# before disabling the module\n#ap_retries=10\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ap` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 ap\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ap.clients | clients | clients |\n| ap.net | received, sent | kilobits/s |\n| ap.packets | received, sent | packets/s |\n| ap.issues | retries, failures | issues/s |\n| ap.signal | average signal | dBm |\n| ap.bitrate | receive, transmit, expected | Mbps |\n\n",integration_type:"collector",id:"charts.d.plugin-ap-Access_Points",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/ap/metadata.yaml",related_resources:""},{meta:{plugin_name:"charts.d.plugin",module_name:"apcupsd",monitored_instance:{name:"APC UPS",link:"https://www.apc.com",categories:["data-collection.ups"],icon_filename:"apc.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ups","apc","power","supply","battery","apcupsd"],most_popular:!1},overview:"# APC UPS\n\nPlugin: charts.d.plugin\nModule: apcupsd\n\n## Overview\n\nMonitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics.\n\nThe collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nMake sure the `apcaccess` and `apcupsd` are installed and running.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/apcupsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/apcupsd.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It\'s a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the apcupsd collector.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| apcupsd_sources | This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it. | 127.0.0.1:3551 | no |\n| apcupsd_timeout | How long to wait for apcupsd to respond. | 3 | no |\n| apcupsd_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| apcupsd_priority | The charts priority on the dashboard. | 90000 | no |\n| apcupsd_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Multiple apcupsd sources\n\nSpecify a multiple apcupsd sources along with a custom update interval\n\n```yaml\n# add all your APC UPSes in this array - uncomment it too\ndeclare -A apcupsd_sources=(\n ["local"]="127.0.0.1:3551",\n ["remote"]="1.2.3.4:3551"\n)\n\n# how long to wait for apcupsd to respond\n#apcupsd_timeout=3\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\napcupsd_update_every=5\n\n# the charts priority on the dashboard\n#apcupsd_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#apcupsd_retries=10\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apcupsd` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 apcupsd\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ apcupsd_ups_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.charge | average UPS charge over the last minute |\n| [ apcupsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | average UPS load over the last 10 minutes |\n| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | number of seconds since the last successful data collection |\n| [ apcupsd_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.selftest | self-test failed due to insufficient battery capacity or due to overload. |\n| [ apcupsd_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has switched to battery power because the input power has failed |\n| [ apcupsd_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS is overloaded and cannot supply enough power to the load |\n| [ apcupsd_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery is low and needs to be recharged |\n| [ apcupsd_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery has reached the end of its lifespan and needs to be replaced |\n| [ apcupsd_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has no battery |\n| [ apcupsd_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS communication link is lost |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nMetrics related to UPS. Each UPS provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| apcupsd.charge | charge | percentage |\n| apcupsd.battery.voltage | voltage, nominal | Volts |\n| apcupsd.input.voltage | voltage, min, max | Volts |\n| apcupsd.output.voltage | absolute, nominal | Volts |\n| apcupsd.input.frequency | frequency | Hz |\n| apcupsd.load | load | percentage |\n| apcupsd.load_usage | load | Watts |\n| apcupsd.temperature | temp | Celsius |\n| apcupsd.time | time | Minutes |\n| apcupsd.online | online | boolean |\n| apcupsd.selftest | OK, NO, BT, NG | status |\n| apcupsd.status | ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, CAL, TRIM, BOOST, SHUTTING_DOWN | status |\n\n",integration_type:"collector",id:"charts.d.plugin-apcupsd-APC_UPS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/apcupsd/metadata.yaml",related_resources:""},{meta:{plugin_name:"charts.d.plugin",module_name:"libreswan",monitored_instance:{name:"Libreswan",link:"https://libreswan.org/",categories:["data-collection.vpns"],icon_filename:"libreswan.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["vpn","libreswan","network","ipsec"],most_popular:!1},overview:"# Libreswan\n\nPlugin: charts.d.plugin\nModule: libreswan\n\n## Overview\n\nMonitor Libreswan performance for optimal IPsec VPN operations. Improve your VPN operations with Netdata''s real-time metrics and built-in alerts.\n\nThe collector uses the `ipsec` command to collect the information it needs.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Permissions to execute `ipsec`\n\nThe plugin executes 2 commands to collect all the information it needs:\n\n```sh\nipsec whack --status\nipsec whack --trafficstatus\n```\n\nThe first command is used to extract the currently established tunnels, their IDs and their names.\nThe second command is used to extract the current uptime and traffic.\n\nMost probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.\nThe plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.\n\nTo allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:\n\n```\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus\n```\n\nMake sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/libreswan.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/libreswan.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It\'s a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the libreswan collector.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| libreswan_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| libreswan_priority | The charts priority on the dashboard | 90000 | no |\n| libreswan_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n| libreswan_sudo | Whether to run `ipsec` with `sudo` or not. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Run `ipsec` without sudo\n\nRun the `ipsec` utility without sudo\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#libreswan_update_every=1\n\n# the charts priority on the dashboard\n#libreswan_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#libreswan_retries=10\n\n# set to 1, to run ipsec with sudo (the default)\n# set to 0, to run ipsec without sudo\nlibreswan_sudo=0\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `libreswan` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 libreswan\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPSEC tunnel\n\nMetrics related to IPSEC tunnels. Each tunnel provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| libreswan.net | in, out | kilobits/s |\n| libreswan.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"charts.d.plugin-libreswan-Libreswan",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/libreswan/metadata.yaml",related_resources:""},{meta:{plugin_name:"charts.d.plugin",module_name:"opensips",monitored_instance:{name:"OpenSIPS",link:"https://opensips.org/",categories:["data-collection.telephony-servers"],icon_filename:"opensips.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["opensips","sip","voice","video","stream"],most_popular:!1},overview:"# OpenSIPS\n\nPlugin: charts.d.plugin\nModule: opensips\n\n## Overview\n\nExamine OpenSIPS metrics for insights into SIP server operations. Study call rates, error rates, and response times for reliable voice over IP services.\n\nThe collector uses the `opensipsctl` command line utility to gather OpenSIPS metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to call `opensipsctl` along with a default number of parameters, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nThe collector requires the `opensipsctl` to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/opensips.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/opensips.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It\'s a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the opensips collector.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| opensips_opts | Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server. | fifo get_statistics all | no |\n| opensips_cmd | If `opensipsctl` is not in $PATH, specify it\'s full path here. | | no |\n| opensips_timeout | How long to wait for `opensipsctl` to respond. | 2 | no |\n| opensips_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 5 | no |\n| opensips_priority | The charts priority on the dashboard. | 80000 | no |\n| opensips_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom `opensipsctl` command\n\nSet a custom path to the `opensipsctl` command\n\n```yaml\n#opensips_opts="fifo get_statistics all"\nopensips_cmd=/opt/opensips/bin/opensipsctl\n#opensips_timeout=2\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#opensips_update_every=5\n\n# the charts priority on the dashboard\n#opensips_priority=80000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#opensips_retries=10\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `opensips` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 opensips\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenSIPS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| opensips.dialogs_active | active, early | dialogs |\n| opensips.users | registered, location, contacts, expires | users |\n| opensips.registrar | accepted, rejected | registrations/s |\n| opensips.transactions | UAS, UAC | transactions/s |\n| opensips.core_rcv | requests, replies | queries/s |\n| opensips.core_fwd | requests, replies | queries/s |\n| opensips.core_drop | requests, replies | queries/s |\n| opensips.core_err | requests, replies | queries/s |\n| opensips.core_bad | bad_URIs_rcvd, unsupported_methods, bad_msg_hdr | queries/s |\n| opensips.tm_replies | received, relayed, local | replies/s |\n| opensips.transactions_status | 2xx, 3xx, 4xx, 5xx, 6xx | transactions/s |\n| opensips.transactions_inuse | inuse | transactions |\n| opensips.sl_replies | 1xx, 2xx, 3xx, 4xx, 5xx, 6xx, sent, error, ACKed | replies/s |\n| opensips.dialogs | processed, expire, failed | dialogs/s |\n| opensips.net_waiting | UDP, TCP | kilobytes |\n| opensips.uri_checks | positive, negative | checks / sec |\n| opensips.traces | requests, replies | traces / sec |\n| opensips.shmem | total, used, real_used, max_used, free | kilobytes |\n| opensips.shmem_fragment | fragments | fragments |\n\n",integration_type:"collector",id:"charts.d.plugin-opensips-OpenSIPS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/opensips/metadata.yaml",related_resources:""},{meta:{plugin_name:"charts.d.plugin",module_name:"sensors",monitored_instance:{name:"Linux Sensors (sysfs)",link:"https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface",categories:["data-collection.hardware-devices-and-sensors"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["sensors","sysfs","hwmon","rpi","raspberry pi"],most_popular:!1},overview:"# Linux Sensors (sysfs)\n\nPlugin: charts.d.plugin\nModule: sensors\n\n## Overview\n\nUse this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures).\nFor all other cases use the [Go collector](/src/go/collectors/go.d.plugin/modules/sensors/README.md), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values.\"\n\n\nIt will provide charts for all configured system sensors, by reading sensors directly from the kernel.\nThe values graphed are the raw hardware values of the sensors.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the collector will try to read entries under `/sys/devices`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Enable the sensors collector\n\nThe `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config charts.d.conf\n```\n\nChange the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/sensors.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It\'s a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the sensors collector.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sensors_sys_dir | The directory the kernel exposes sensor data. | /sys/devices | no |\n| sensors_sys_depth | How deep in the tree to check for sensor data. | 10 | no |\n| sensors_source_update | If set to 1, the script will overwrite internal script functions with code generated ones. | 1 | no |\n| sensors_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| sensors_priority | The charts priority on the dashboard. | 90000 | no |\n| sensors_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Set sensors path depth\n\nSet a different sensors path depth\n\n```yaml\n# the directory the kernel keeps sensor data\n#sensors_sys_dir="/sys/devices"\n\n# how deep in the tree to check for sensor data\nsensors_sys_depth=5\n\n# if set to 1, the script will overwrite internal\n# script functions with code generated ones\n# leave to 1, is faster\n#sensors_source_update=1\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#sensors_update_every=\n\n# the charts priority on the dashboard\n#sensors_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#sensors_retries=10\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `sensors` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 sensors\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor chip\n\nMetrics related to sensor chips. Each chip provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.temp | {filename} | Celsius |\n| sensors.volt | {filename} | Volts |\n| sensors.curr | {filename} | Ampere |\n| sensors.power | {filename} | Watt |\n| sensors.fans | {filename} | Rotations / Minute |\n| sensors.energy | {filename} | Joule |\n| sensors.humidity | {filename} | Percent |\n\n",integration_type:"collector",id:"charts.d.plugin-sensors-Linux_Sensors_(sysfs)",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/sensors/metadata.yaml",related_resources:""},{meta:{plugin_name:"cups.plugin",module_name:"cups.plugin",monitored_instance:{name:"CUPS",link:"https://www.cups.org/",categories:["data-collection.hardware-devices-and-sensors"],icon_filename:"cups.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# CUPS\n\nPlugin: cups.plugin\nModule: cups.plugin\n\n## Overview\n\nMonitor CUPS performance for achieving optimal printing system operations. Monitor job statuses, queue lengths, and error rates to ensure smooth printing tasks.\n\nThe plugin uses CUPS shared library to connect and monitor the server.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access the server. Netdata sets permissions during installation time to reach the server through its library.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin detects when CUPS server is running and tries to connect to it.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nThe CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:cups]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additional parameters for the collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CUPS instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.dests_state | idle, printing, stopped | dests |\n| cups.dests_option | total, acceptingjobs, shared | dests |\n| cups.job_num | pending, held, processing | jobs |\n| cups.job_size | pending, held, processing | KB |\n\n### Per destination\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.destination_job_num | pending, held, processing | jobs |\n| cups.destination_job_size | pending, held, processing | KB |\n\n",integration_type:"collector",id:"cups.plugin-cups.plugin-CUPS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cups.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"debugfs.plugin",module_name:"/sys/kernel/debug/extfrag",monitored_instance:{name:"System Memory Fragmentation",link:"https://www.kernel.org/doc/html/next/admin-guide/sysctl/vm.html",categories:["data-collection.linux-systems.memory-metrics"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["extfrag","extfrag_threshold","memory fragmentation"],most_popular:!1},overview:"# System Memory Fragmentation\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/extfrag\n\n## Overview\n\nCollects memory fragmentation statistics from the Linux kernel\n\nParse data from `debugfs` file\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/extfrag`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically run by default.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the overall memory fragmentation of the system.\n\n### Per node\n\nMemory fragmentation statistics for each NUMA node in the system.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| numa_node | The NUMA node the metrics are associated with. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.fragmentation_index_dma | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_dma32 | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_normal | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n\n",integration_type:"collector",id:"debugfs.plugin-/sys/kernel/debug/extfrag-System_Memory_Fragmentation",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"debugfs.plugin",module_name:"/sys/kernel/debug/zswap",monitored_instance:{name:"Linux ZSwap",link:"https://www.kernel.org/doc/html/latest/admin-guide/mm/zswap.html",categories:["data-collection.linux-systems.memory-metrics"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["swap","zswap","frontswap","swap cache"],most_popular:!1},overview:"# Linux ZSwap\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/zswap\n\n## Overview\n\nCollects zswap performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/zswap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the performance statistics of zswap.\n\n### Per Linux ZSwap instance\n\nGlobal zswap performance metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.zswap_pool_compression_ratio | compression_ratio | ratio |\n| system.zswap_pool_compressed_size | compressed_size | bytes |\n| system.zswap_pool_raw_size | uncompressed_size | bytes |\n| system.zswap_rejections | compress_poor, kmemcache_fail, alloc_fail, reclaim_fail | rejections/s |\n| system.zswap_pool_limit_hit | limit | events/s |\n| system.zswap_written_back_raw_bytes | written_back | bytes/s |\n| system.zswap_same_filled_raw_size | same_filled | bytes |\n| system.zswap_duplicate_entry | duplicate | entries/s |\n\n",integration_type:"collector",id:"debugfs.plugin-/sys/kernel/debug/zswap-Linux_ZSwap",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"debugfs.plugin",module_name:"intel_rapl",monitored_instance:{name:"Power Capping",link:"https://www.kernel.org/doc/html/next/power/powercap/powercap.html",categories:["data-collection.linux-systems.kernel-metrics"],icon_filename:"powersupply.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["power capping","energy"],most_popular:!1},overview:"# Power Capping\n\nPlugin: debugfs.plugin\nModule: intel_rapl\n\n## Overview\n\nCollects power capping performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/devices/virtual/powercap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the Intel RAPL zones Consumption.\n\n### Per Power Capping instance\n\nGlobal Intel RAPL zones.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.powercap_intel_rapl_zone | Power | Watts |\n| cpu.powercap_intel_rapl_subzones | dram, core, uncore | Watts |\n\n",integration_type:"collector",id:"debugfs.plugin-intel_rapl-Power_Capping",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"diskspace.plugin",module_name:"diskspace.plugin",monitored_instance:{name:"Disk space",link:"",categories:["data-collection.linux-systems"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[{plugin_name:"ebpf.plugin",module_name:"disk"}]}},info_provided_to_referring_integrations:{description:""},keywords:["disk","I/O","space","inode"],most_popular:!1},overview:"# Disk space\n\nPlugin: diskspace.plugin\nModule: diskspace.plugin\n\n## Overview\n\nMonitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15 | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mount_point | Path used to mount a filesystem |\n| filesystem | The filesystem used to format a partition. |\n| mount_root | Root directory where mount points are present. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n",integration_type:"collector",id:"diskspace.plugin-diskspace.plugin-Disk_space",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/diskspace.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"cachestat",monitored_instance:{name:"eBPF Cachestat",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},keywords:["Page cache","Hit ratio","eBPF"],most_popular:!1},overview:"# eBPF Cachestat\n\nPlugin: ebpf.plugin\nModule: cachestat\n\n## Overview\n\nMonitor Linux page cache events giving for users a general vision about how his kernel is manipulating files.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/cachestat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/cachestat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Cachestat instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.cachestat_ratio | ratio | % |\n| mem.cachestat_dirties | dirty | page/s |\n| mem.cachestat_hits | hit | hits/s |\n| mem.cachestat_misses | miss | misses/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_cachestat_hit_ratio | ratio | % |\n| app.ebpf_cachestat_dirty_pages | pages | page/s |\n| app.ebpf_cachestat_access | hits | hits/s |\n| app.ebpf_cachestat_misses | misses | misses/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cachestat_ratio | ratio | % |\n| cgroup.cachestat_dirties | dirty | page/s |\n| cgroup.cachestat_hits | hit | hits/s |\n| cgroup.cachestat_misses | miss | misses/s |\n| services.cachestat_ratio | a dimension per systemd service | % |\n| services.cachestat_dirties | a dimension per systemd service | page/s |\n| services.cachestat_hits | a dimension per systemd service | hits/s |\n| services.cachestat_misses | a dimension per systemd service | misses/s |\n\n",integration_type:"collector",id:"ebpf.plugin-cachestat-eBPF_Cachestat",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"dcstat",monitored_instance:{name:"eBPF DCstat",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},keywords:["Directory Cache","File system","eBPF"],most_popular:!1},overview:"# eBPF DCstat\n\nPlugin: ebpf.plugin\nModule: dcstat\n\n## Overview\n\nMonitor directory cache events per application given an overall vision about files on memory or storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/dcstat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/dcstat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config option" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_dc_ratio | ratio | % |\n| app.ebpf_dc_reference | files | files |\n| app.ebpf_dc_not_cache | files | files |\n| app.ebpf_dc_not_found | files | files |\n\n### Per filesystem\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.dc_reference | reference, slow, miss | files |\n| filesystem.dc_hit_ratio | ratio | % |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.dc_ratio | ratio | % |\n| cgroup.dc_reference | reference | files |\n| cgroup.dc_not_cache | slow | files |\n| cgroup.dc_not_found | miss | files |\n| services.dc_ratio | a dimension per systemd service | % |\n| services.dc_reference | a dimension per systemd service | files |\n| services.dc_not_cache | a dimension per systemd service | files |\n| services.dc_not_found | a dimension per systemd service | files |\n\n",integration_type:"collector",id:"ebpf.plugin-dcstat-eBPF_DCstat",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"disk",monitored_instance:{name:"eBPF Disk",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["hard Disk","eBPF","latency","partition"],most_popular:!1},overview:"# eBPF Disk\n\nPlugin: ebpf.plugin\nModule: disk\n\n## Overview\n\nMeasure latency for I/O events on disk.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/disk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/disk.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\nThese metrics measure latency for I/O events on every hard disk present on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.latency_io | latency | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-disk-eBPF_Disk",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"filedescriptor",monitored_instance:{name:"eBPF Filedescriptor",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},keywords:["file","eBPF","fd","open","close"],most_popular:!1},overview:"# eBPF Filedescriptor\n\nPlugin: ebpf.plugin\nModule: filedescriptor\n\n## Overview\n\nMonitor calls for functions responsible to open or close a file descriptor and possible errors.\n\nAttach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nDepending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/fd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/fd.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.fd_open | open | calls/s |\n| cgroup.fd_open_error | open | calls/s |\n| cgroup.fd_closed | close | calls/s |\n| cgroup.fd_close_error | close | calls/s |\n| services.file_open | a dimension per systemd service | calls/s |\n| services.file_open_error | a dimension per systemd service | calls/s |\n| services.file_closed | a dimension per systemd service | calls/s |\n| services.file_close_error | a dimension per systemd service | calls/s |\n\n### Per eBPF Filedescriptor instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.file_descriptor | open, close | calls/s |\n| filesystem.file_error | open, close | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_file_open | calls | calls/s |\n| app.ebpf_file_open_error | calls | calls/s |\n| app.ebpf_file_closed | calls | calls/s |\n| app.ebpf_file_close_error | calls | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-filedescriptor-eBPF_Filedescriptor",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"filesystem",monitored_instance:{name:"eBPF Filesystem",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["Filesystem","ext4","btrfs","nfs","xfs","zfs","eBPF","latency","I/O"],most_popular:!1},overview:"# eBPF Filesystem\n\nPlugin: ebpf.plugin\nModule: filesystem\n\n## Overview\n\nMonitor latency for main actions on filesystem like I/O events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/filesystem.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/filesystem.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| btrfsdist | Enable or disable latency monitoring for functions associated with btrfs filesystem. | yes | no |\n| ext4dist | Enable or disable latency monitoring for functions associated with ext4 filesystem. | yes | no |\n| nfsdist | Enable or disable latency monitoring for functions associated with nfs filesystem. | yes | no |\n| xfsdist | Enable or disable latency monitoring for functions associated with xfs filesystem. | yes | no |\n| zfsdist | Enable or disable latency monitoring for functions associated with zfs filesystem. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per filesystem\n\nLatency charts associate with filesystem actions.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.read_latency | latency period | calls/s |\n| filesystem.open_latency | latency period | calls/s |\n| filesystem.sync_latency | latency period | calls/s |\n\n### Per iilesystem\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.write_latency | latency period | calls/s |\n\n### Per eBPF Filesystem instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.attributte_latency | latency period | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-filesystem-eBPF_Filesystem",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"hardirq",monitored_instance:{name:"eBPF Hardirq",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["HardIRQ","eBPF"],most_popular:!1},overview:"# eBPF Hardirq\n\nPlugin: ebpf.plugin\nModule: hardirq\n\n## Overview\n\nMonitor latency for each HardIRQ available.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/hardirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/hardirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Hardirq instance\n\nThese metrics show latest timestamp for each hardIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.hardirq_latency | hardirq names | milliseconds |\n\n",integration_type:"collector",id:"ebpf.plugin-hardirq-eBPF_Hardirq",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"mdflush",monitored_instance:{name:"eBPF MDflush",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["MD","RAID","eBPF"],most_popular:!1},overview:"# eBPF MDflush\n\nPlugin: ebpf.plugin\nModule: mdflush\n\n## Overview\n\nMonitor when flush events happen between disks.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mdflush.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mdflush.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF MDflush instance\n\nNumber of times md_flush_request was called since last time.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mdstat.mdstat_flush | disk | flushes |\n\n",integration_type:"collector",id:"ebpf.plugin-mdflush-eBPF_MDflush",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"mount",monitored_instance:{name:"eBPF Mount",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["mount","umount","device","eBPF"],most_popular:!1},overview:"# eBPF Mount\n\nPlugin: ebpf.plugin\nModule: mount\n\n## Overview\n\nMonitor calls for mount and umount syscall.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mount.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mount.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Mount instance\n\nCalls for syscalls mount an umount.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mount_points.call | mount, umount | calls/s |\n| mount_points.error | mount, umount | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-mount-eBPF_Mount",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"oomkill",monitored_instance:{name:"eBPF OOMkill",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},keywords:["application","memory"],most_popular:!1},overview:"# eBPF OOMkill\n\nPlugin: ebpf.plugin\nModule: oomkill\n\n## Overview\n\nMonitor applications that reach out of memory.\n\nAttach tracepoint to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/oomkill.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/oomkill.conf\n```\n#### Options\n\nOverwrite default configuration reducing number of I/O events\n\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"## Troubleshooting\n\n### update every\n\n\n\n### ebpf load mode\n\n\n\n### lifetime\n\n\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese metrics show cgroup/service that reached OOM.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.oomkills | cgroup name | kills |\n| services.oomkills | a dimension per systemd service | kills |\n\n### Per apps\n\nThese metrics show cgroup/service that reached OOM.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.oomkill | kills | kills |\n\n",integration_type:"collector",id:"ebpf.plugin-oomkill-eBPF_OOMkill",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"process",monitored_instance:{name:"eBPF Process",link:"https://github.com/netdata/netdata/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["Memory","plugin","eBPF"],most_popular:!1},overview:"# eBPF Process\n\nPlugin: ebpf.plugin\nModule: process\n\n## Overview\n\nMonitor internal memory usage.\n\nUses netdata internal statistic to monitor memory management by plugin.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Netdata flags.\n\nTo have these charts you need to compile netdata with flag `NETDATA_DEV_MODE`.\n\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Process instance\n\nHow plugin is allocating memory.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_threads | total, running | threads |\n| netdata.ebpf_load_methods | legacy, co-re | methods |\n| netdata.ebpf_kernel_memory | memory_locked | bytes |\n| netdata.ebpf_hash_tables_count | hash_table | hash tables |\n| netdata.ebpf_hash_tables_insert_pid_elements | thread | rows |\n| netdata.ebpf_hash_tables_remove_pid_elements | thread | rows |\n\n",integration_type:"collector",id:"ebpf.plugin-process-eBPF_Process",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"processes",monitored_instance:{name:"eBPF Processes",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},keywords:["thread","fork","process","eBPF"],most_popular:!1},overview:"# eBPF Processes\n\nPlugin: ebpf.plugin\nModule: processes\n\n## Overview\n\nMonitor calls for function creating tasks (threads and processes) inside Linux kernel.\n\nAttach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/process.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/process.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation. | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Processes instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.process_thread | process | calls/s |\n| system.process_status | process, zombie | difference |\n| system.exit | process | calls/s |\n| system.task_error | task | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.process_create | calls | calls/s |\n| app.thread_create | call | calls/s |\n| app.task_exit | call | calls/s |\n| app.task_close | call | calls/s |\n| app.task_error | app | calls/s |\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.process_create | process | calls/s |\n| cgroup.thread_create | thread | calls/s |\n| cgroup.task_exit | exit | calls/s |\n| cgroup.task_close | process | calls/s |\n| cgroup.task_error | process | calls/s |\n| services.process_create | a dimension per systemd service | calls/s |\n| services.thread_create | a dimension per systemd service | calls/s |\n| services.task_close | a dimension per systemd service | calls/s |\n| services.task_exit | a dimension per systemd service | calls/s |\n| services.task_error | a dimension per systemd service | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-processes-eBPF_Processes",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"shm",monitored_instance:{name:"eBPF SHM",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},keywords:["syscall","shared memory","eBPF"],most_popular:!1},overview:"# eBPF SHM\n\nPlugin: ebpf.plugin\nModule: shm\n\n## Overview\n\nMonitor syscall responsible to manipulate shared memory.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/shm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/shm.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| shmget | Enable or disable monitoring for syscall `shmget` | yes | no |\n| shmat | Enable or disable monitoring for syscall `shmat` | yes | no |\n| shmdt | Enable or disable monitoring for syscall `shmdt` | yes | no |\n| shmctl | Enable or disable monitoring for syscall `shmctl` | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.shmget | get | calls/s |\n| cgroup.shmat | at | calls/s |\n| cgroup.shmdt | dt | calls/s |\n| cgroup.shmctl | ctl | calls/s |\n| services.shmget | a dimension per systemd service | calls/s |\n| services.shmat | a dimension per systemd service | calls/s |\n| services.shmdt | a dimension per systemd service | calls/s |\n| services.shmctl | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_shmget_call | calls | calls/s |\n| app.ebpf_shmat_call | calls | calls/s |\n| app.ebpf_shmdt_call | calls | calls/s |\n| app.ebpf_shmctl_call | calls | calls/s |\n\n### Per eBPF SHM instance\n\nThese Metrics show number of calls for specified syscall.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.shared_memory_calls | get, at, dt, ctl | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-shm-eBPF_SHM",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"socket",monitored_instance:{name:"eBPF Socket",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},keywords:["TCP","UDP","bandwidth","server","connection","socket"],most_popular:!1},overview:"# eBPF Socket\n\nPlugin: ebpf.plugin\nModule: socket\n\n## Overview\n\nMonitor bandwidth consumption per application for protocols TCP and UDP.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/network.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/network.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`. Options inside `network connections` are ignored for while.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| bandwidth table size | Number of elements stored inside hash tables used to monitor calls per PID. | 16384 | no |\n| ipv4 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV4 connections. | 16384 | no |\n| ipv6 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV6 connections. | 16384 | no |\n| udp connection table size | Number of temporary elements stored inside hash tables used to monitor UDP connections. | 4096 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Socket instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.inbound_conn | connection_tcp | connections/s |\n| ip.tcp_outbound_conn | received | connections/s |\n| ip.tcp_functions | received, send, closed | calls/s |\n| ip.total_tcp_bandwidth | received, send | kilobits/s |\n| ip.tcp_error | received, send | calls/s |\n| ip.tcp_retransmit | retransmited | calls/s |\n| ip.udp_functions | received, send | calls/s |\n| ip.total_udp_bandwidth | received, send | kilobits/s |\n| ip.udp_error | received, send | calls/s |\n\n### Per apps\n\nThese metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_tcp_v4_connection | connections | connections/s |\n| app.ebpf_call_tcp_v6_connection | connections | connections/s |\n| app.ebpf_sock_bytes_sent | bandwidth | kilobits/s |\n| app.ebpf_sock_bytes_received | bandwidth | kilobits/s |\n| app.ebpf_call_tcp_sendmsg | calls | calls/s |\n| app.ebpf_call_tcp_cleanup_rbuf | calls | calls/s |\n| app.ebpf_call_tcp_retransmit | calls | calls/s |\n| app.ebpf_call_udp_sendmsg | calls | calls/s |\n| app.ebpf_call_udp_recvmsg | calls | calls/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_conn_ipv4 | connected_v4 | connections/s |\n| cgroup.net_conn_ipv6 | connected_v6 | connections/s |\n| cgroup.net_bytes_recv | received | calls/s |\n| cgroup.net_bytes_sent | sent | calls/s |\n| cgroup.net_tcp_recv | received | calls/s |\n| cgroup.net_tcp_send | sent | calls/s |\n| cgroup.net_retransmit | retransmitted | calls/s |\n| cgroup.net_udp_send | sent | calls/s |\n| cgroup.net_udp_recv | received | calls/s |\n| services.net_conn_ipv6 | a dimension per systemd service | connections/s |\n| services.net_bytes_recv | a dimension per systemd service | kilobits/s |\n| services.net_bytes_sent | a dimension per systemd service | kilobits/s |\n| services.net_tcp_recv | a dimension per systemd service | calls/s |\n| services.net_tcp_send | a dimension per systemd service | calls/s |\n| services.net_tcp_retransmit | a dimension per systemd service | calls/s |\n| services.net_udp_send | a dimension per systemd service | calls/s |\n| services.net_udp_recv | a dimension per systemd service | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-socket-eBPF_Socket",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"softirq",monitored_instance:{name:"eBPF SoftIRQ",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["SoftIRQ","eBPF"],most_popular:!1},overview:"# eBPF SoftIRQ\n\nPlugin: ebpf.plugin\nModule: softirq\n\n## Overview\n\nMonitor latency for each SoftIRQ available.\n\nAttach kprobe to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/softirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/softirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF SoftIRQ instance\n\nThese metrics show latest timestamp for each softIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirq_latency | soft IRQs | milliseconds |\n\n",integration_type:"collector",id:"ebpf.plugin-softirq-eBPF_SoftIRQ",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"swap",monitored_instance:{name:"eBPF SWAP",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},keywords:["SWAP","memory","eBPF","Hard Disk"],most_popular:!1},overview:"# eBPF SWAP\n\nPlugin: ebpf.plugin\nModule: swap\n\n## Overview\n\nMonitors when swap has I/O events and applications executing events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/swap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/swap.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.swap_read | read | calls/s |\n| cgroup.swap_write | write | calls/s |\n| services.swap_read | a dimension per systemd service | calls/s |\n| services.swap_write | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_swap_readpage | a dimension per app group | calls/s |\n| app.ebpf_call_swap_writepage | a dimension per app group | calls/s |\n\n### Per eBPF SWAP instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapcalls | write, read | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-swap-eBPF_SWAP",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"sync",monitored_instance:{name:"eBPF Sync",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["syscall","eBPF","hard disk","memory"],most_popular:!1},overview:"# eBPF Sync\n\nPlugin: ebpf.plugin\nModule: sync\n\n## Overview\n\nMonitor syscall responsible to move data from memory to storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/sync.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/sync.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| sync | Enable or disable monitoring for syscall `sync` | yes | no |\n| msync | Enable or disable monitoring for syscall `msync` | yes | no |\n| fsync | Enable or disable monitoring for syscall `fsync` | yes | no |\n| fdatasync | Enable or disable monitoring for syscall `fdatasync` | yes | no |\n| syncfs | Enable or disable monitoring for syscall `syncfs` | yes | no |\n| sync_file_range | Enable or disable monitoring for syscall `sync_file_range` | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ sync_freq ](https://github.com/netdata/netdata/blob/master/src/health/health.d/synchronization.conf) | mem.sync | number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Sync instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.file_sync | fsync, fdatasync | calls/s |\n| mem.memory_map | msync | calls/s |\n| mem.sync | sync, syncfs | calls/s |\n| mem.file_segment | sync_file_range | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-sync-eBPF_Sync",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"vfs",monitored_instance:{name:"eBPF VFS",link:"https://kernel.org/",categories:["data-collection.ebpf"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},keywords:["virtual","filesystem","eBPF","I/O","files"],most_popular:!1},overview:"# eBPF VFS\n\nPlugin: ebpf.plugin\nModule: vfs\n\n## Overview\n\nMonitor I/O events on Linux Virtual Filesystem.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/vfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/vfs.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.vfs_unlink | delete | calls/s |\n| cgroup.vfs_write | write | calls/s |\n| cgroup.vfs_write_error | write | calls/s |\n| cgroup.vfs_read | read | calls/s |\n| cgroup.vfs_read_error | read | calls/s |\n| cgroup.vfs_write_bytes | write | bytes/s |\n| cgroup.vfs_read_bytes | read | bytes/s |\n| cgroup.vfs_fsync | fsync | calls/s |\n| cgroup.vfs_fsync_error | fsync | calls/s |\n| cgroup.vfs_open | open | calls/s |\n| cgroup.vfs_open_error | open | calls/s |\n| cgroup.vfs_create | create | calls/s |\n| cgroup.vfs_create_error | create | calls/s |\n| services.vfs_unlink | a dimension per systemd service | calls/s |\n| services.vfs_write | a dimension per systemd service | calls/s |\n| services.vfs_write_error | a dimension per systemd service | calls/s |\n| services.vfs_read | a dimension per systemd service | calls/s |\n| services.vfs_read_error | a dimension per systemd service | calls/s |\n| services.vfs_write_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_read_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_fsync | a dimension per systemd service | calls/s |\n| services.vfs_fsync_error | a dimension per systemd service | calls/s |\n| services.vfs_open | a dimension per systemd service | calls/s |\n| services.vfs_open_error | a dimension per systemd service | calls/s |\n| services.vfs_create | a dimension per systemd service | calls/s |\n| services.vfs_create_error | a dimension per systemd service | calls/s |\n\n### Per eBPF VFS instance\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.vfs_deleted_objects | delete | calls/s |\n| filesystem.vfs_io | read, write | calls/s |\n| filesystem.vfs_io_bytes | read, write | bytes/s |\n| filesystem.vfs_io_error | read, write | calls/s |\n| filesystem.vfs_fsync | fsync | calls/s |\n| filesystem.vfs_fsync_error | fsync | calls/s |\n| filesystem.vfs_open | open | calls/s |\n| filesystem.vfs_open_error | open | calls/s |\n| filesystem.vfs_create | create | calls/s |\n| filesystem.vfs_create_error | create | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_vfs_unlink | calls | calls/s |\n| app.ebpf_call_vfs_write | calls | calls/s |\n| app.ebpf_call_vfs_write_error | calls | calls/s |\n| app.ebpf_call_vfs_read | calls | calls/s |\n| app.ebpf_call_vfs_read_error | calls | calls/s |\n| app.ebpf_call_vfs_write_bytes | writes | bytes/s |\n| app.ebpf_call_vfs_read_bytes | reads | bytes/s |\n| app.ebpf_call_vfs_fsync | calls | calls/s |\n| app.ebpf_call_vfs_fsync_error | calls | calls/s |\n| app.ebpf_call_vfs_open | calls | calls/s |\n| app.ebpf_call_vfs_open_error | calls | calls/s |\n| app.ebpf_call_vfs_create | calls | calls/s |\n| app.ebpf_call_vfs_create_error | calls | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-vfs-eBPF_VFS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"dev.cpu.0.freq",monitored_instance:{name:"dev.cpu.0.freq",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# dev.cpu.0.freq\n\nPlugin: freebsd.plugin\nModule: dev.cpu.0.freq\n\n## Overview\n\nRead current CPU Scaling frequency.\n\nCurrent CPU Scaling Frequency\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `Config options`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config Config options\n```\n#### Options\n\n\n\n{% details open=true summary="" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.0.freq | Enable or disable CPU Scaling frequency metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.0.freq instance\n\nThe metric shows status of CPU frequency, it is direct affected by system load.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.scaling_cur_freq | frequency | MHz |\n\n",integration_type:"collector",id:"freebsd.plugin-dev.cpu.0.freq-dev.cpu.0.freq",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"dev.cpu.temperature",monitored_instance:{name:"dev.cpu.temperature",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# dev.cpu.temperature\n\nPlugin: freebsd.plugin\nModule: dev.cpu.temperature\n\n## Overview\n\nGet current CPU temperature\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.temperature | Enable or disable CPU temperature metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.temperature instance\n\nThis metric show latest CPU temperature.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.temperature | a dimension per core | Celsius |\n\n",integration_type:"collector",id:"freebsd.plugin-dev.cpu.temperature-dev.cpu.temperature",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"devstat",monitored_instance:{name:"devstat",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# devstat\n\nPlugin: freebsd.plugin\nModule: devstat\n\n## Overview\n\nCollect information per hard disk available on host.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:kern.devstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new disks detected at runtime | Enable or disable possibility to detect new disks. | auto | no |\n| performance metrics for pass devices | Enable or disable metrics for disks with type `PASS`. | auto | no |\n| total bandwidth for all disks | Enable or disable total bandwidth metric for all disks. | yes | no |\n| bandwidth for all disks | Enable or disable bandwidth for all disks metric. | auto | no |\n| operations for all disks | Enable or disable operations for all disks metric. | auto | no |\n| queued operations for all disks | Enable or disable queued operations for all disks metric. | auto | no |\n| utilization percentage for all disks | Enable or disable utilization percentage for all disks metric. | auto | no |\n| i/o time for all disks | Enable or disable I/O time for all disks metric. | auto | no |\n| average completed i/o time for all disks | Enable or disable average completed I/O time for all disks metric. | auto | no |\n| average completed i/o bandwidth for all disks | Enable or disable average completed I/O bandwidth for all disks metric. | auto | no |\n| average service time for all disks | Enable or disable average service time for all disks metric. | auto | no |\n| disable by default disks matching | Do not create charts for disks listed. | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per devstat instance\n\nThese metrics give a general vision about I/O events on disks.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | io, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes, frees | KiB/s |\n| disk.ops | reads, writes, other, frees | operations/s |\n| disk.qops | operations | operations |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes, other, frees | milliseconds/s |\n| disk.await | reads, writes, other, frees | milliseconds/operation |\n| disk.avgsz | reads, writes, frees | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n",integration_type:"collector",id:"freebsd.plugin-devstat-devstat",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"getifaddrs",monitored_instance:{name:"getifaddrs",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# getifaddrs\n\nPlugin: freebsd.plugin\nModule: getifaddrs\n\n## Overview\n\nCollect traffic per network interface.\n\nThe plugin calls `getifaddrs` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getifaddrs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new interfaces detected at runtime | Enable or disable possibility to discover new interface after plugin starts. | auto | no |\n| total bandwidth for physical interfaces | Enable or disable total bandwidth for physical interfaces metric. | auto | no |\n| total packets for physical interfaces | Enable or disable total packets for physical interfaces metric. | auto | no |\n| total bandwidth for ipv4 interface | Enable or disable total bandwidth for IPv4 interface metric. | auto | no |\n| total bandwidth for ipv6 interfaces | Enable or disable total bandwidth for ipv6 interfaces metric. | auto | no |\n| bandwidth for all interfaces | Enable or disable bandwidth for all interfaces metric. | auto | no |\n| packets for all interfaces | Enable or disable packets for all interfaces metric. | auto | no |\n| errors for all interfaces | Enable or disable errors for all interfaces metric. | auto | no |\n| drops for all interfaces | Enable or disable drops for all interfaces metric. | auto | no |\n| collisions for all interface | Enable or disable collisions for all interface metric. | auto | no |\n| disable by default interfaces matching | Do not display data for intterfaces listed. | lo* | no |\n| set physical interfaces for system.net | Do not show network traffic for listed interfaces. | igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc* | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ interface_inbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of inbound errors for the network interface ${label:device} in the last 10 minutes |\n| [ interface_outbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of outbound errors for the network interface ${label:device} in the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per getifaddrs instance\n\nGeneral overview about network traffic.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n| system.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| system.ipv4 | received, sent | kilobits/s |\n| system.ipv6 | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.events | collisions | events/s |\n\n",integration_type:"collector",id:"freebsd.plugin-getifaddrs-getifaddrs",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"getmntinfo",monitored_instance:{name:"getmntinfo",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# getmntinfo\n\nPlugin: freebsd.plugin\nModule: getmntinfo\n\n## Overview\n\nCollect information per mount point.\n\nThe plugin calls `getmntinfo` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getmntinfo]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new mount points detected at runtime | Cheeck new mount points during runtime. | auto | no |\n| space usage for all disks | Enable or disable space usage for all disks metric. | auto | no |\n| inodes usage for all disks | Enable or disable inodes usage for all disks metric. | auto | no |\n| exclude space metrics on paths | Do not show metrics for listed paths. | /proc/* | no |\n| exclude space metrics on filesystems | Do not monitor listed filesystems. | autofs procfs subfs devfs none | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\nThese metrics show detailss about mount point usages.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n",integration_type:"collector",id:"freebsd.plugin-getmntinfo-getmntinfo",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"hw.intrcnt",monitored_instance:{name:"hw.intrcnt",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# hw.intrcnt\n\nPlugin: freebsd.plugin\nModule: hw.intrcnt\n\n## Overview\n\nGet total number of interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config option" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| hw.intrcnt | Enable or disable Interrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per hw.intrcnt instance\n\nThese metrics show system interrupts frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.intr | interrupts | interrupts/s |\n| system.interrupts | a dimension per interrupt | interrupts/s |\n\n",integration_type:"collector",id:"freebsd.plugin-hw.intrcnt-hw.intrcnt",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"ipfw",monitored_instance:{name:"ipfw",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"firewall.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# ipfw\n\nPlugin: freebsd.plugin\nModule: ipfw\n\n## Overview\n\nCollect information about FreeBSD firewall.\n\nThe plugin uses RAW socket to communicate with kernel and collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:ipfw]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| counters for static rules | Enable or disable counters for static rules metric. | yes | no |\n| number of dynamic rules | Enable or disable number of dynamic rules metric. | yes | no |\n| allocated memory | Enable or disable allocated memory metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ipfw instance\n\nTheese metrics show FreeBSD firewall statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfw.mem | dynamic, static | bytes |\n| ipfw.packets | a dimension per static rule | packets/s |\n| ipfw.bytes | a dimension per static rule | bytes/s |\n| ipfw.active | a dimension per dynamic rule | rules |\n| ipfw.expired | a dimension per dynamic rule | rules |\n\n",integration_type:"collector",id:"freebsd.plugin-ipfw-ipfw",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"kern.cp_time",monitored_instance:{name:"kern.cp_time",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# kern.cp_time\n\nPlugin: freebsd.plugin\nModule: kern.cp_time\n\n## Overview\n\nTotal CPU utilization\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe netdata main configuration file.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.cp_time | Enable or disable Total CPU usage. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding nice) |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.cp_time instance\n\nThese metrics show CPU usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | nice, system, user, interrupt, idle | percentage |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | nice, system, user, interrupt, idle | percentage |\n\n",integration_type:"collector",id:"freebsd.plugin-kern.cp_time-kern.cp_time",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"kern.ipc.msq",monitored_instance:{name:"kern.ipc.msq",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# kern.ipc.msq\n\nPlugin: freebsd.plugin\nModule: kern.ipc.msq\n\n## Overview\n\nCollect number of IPC message Queues\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.msq | Enable or disable IPC message queue metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.msq instance\n\nThese metrics show statistics IPC messages statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_msq_queues | queues | queues |\n| system.ipc_msq_messages | messages | messages |\n| system.ipc_msq_size | allocated, used | bytes |\n\n",integration_type:"collector",id:"freebsd.plugin-kern.ipc.msq-kern.ipc.msq",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"kern.ipc.sem",monitored_instance:{name:"kern.ipc.sem",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# kern.ipc.sem\n\nPlugin: freebsd.plugin\nModule: kern.ipc.sem\n\n## Overview\n\nCollect information about semaphore.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.sem | Enable or disable semaphore metrics. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.sem instance\n\nThese metrics shows counters for semaphores on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n\n",integration_type:"collector",id:"freebsd.plugin-kern.ipc.sem-kern.ipc.sem",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"kern.ipc.shm",monitored_instance:{name:"kern.ipc.shm",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"memory.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# kern.ipc.shm\n\nPlugin: freebsd.plugin\nModule: kern.ipc.shm\n\n## Overview\n\nCollect shared memory information.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.shm | Enable or disable shared memory metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.shm instance\n\nThese metrics give status about current shared memory segments.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_shared_mem_segs | segments | segments |\n| system.ipc_shared_mem_size | allocated | KiB |\n\n",integration_type:"collector",id:"freebsd.plugin-kern.ipc.shm-kern.ipc.shm",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet.icmp.stats",monitored_instance:{name:"net.inet.icmp.stats",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# net.inet.icmp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.icmp.stats\n\n## Overview\n\nCollect information about ICMP traffic.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.icmp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| IPv4 ICMP packets | Enable or disable IPv4 ICMP packets metric. | yes | no |\n| IPv4 ICMP error | Enable or disable IPv4 ICMP error metric. | yes | no |\n| IPv4 ICMP messages | Enable or disable IPv4 ICMP messages metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.icmp.stats instance\n\nThese metrics show ICMP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet.icmp.stats-net.inet.icmp.stats",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet.ip.stats",monitored_instance:{name:"net.inet.ip.stats",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# net.inet.ip.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.ip.stats\n\n## Overview\n\nCollect IP stats\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.ip.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 packets | Enable or disable IPv4 packets metric. | yes | no |\n| ipv4 fragments sent | Enable or disable IPv4 fragments sent metric. | yes | no |\n| ipv4 fragments assembly | Enable or disable IPv4 fragments assembly metric. | yes | no |\n| ipv4 errors | Enable or disable IPv4 errors metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.ip.stats instance\n\nThese metrics show IPv4 connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet.ip.stats-net.inet.ip.stats",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet.tcp.states",monitored_instance:{name:"net.inet.tcp.states",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# net.inet.tcp.states\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.states\n\n## Overview\n\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| net.inet.tcp.states | Enable or disable TCP state metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ipv4.tcpsock | IPv4 TCP connections utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.states instance\n\nA counter for TCP connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcpsock | connections | active connections |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet.tcp.states-net.inet.tcp.states",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet.tcp.stats",monitored_instance:{name:"net.inet.tcp.stats",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# net.inet.tcp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.stats\n\n## Overview\n\nCollect overall information about TCP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.tcp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 TCP packets | Enable or disable ipv4 TCP packets metric. | yes | no |\n| ipv4 TCP errors | Enable or disable pv4 TCP errors metric. | yes | no |\n| ipv4 TCP handshake issues | Enable or disable ipv4 TCP handshake issue metric. | yes | no |\n| TCP connection aborts | Enable or disable TCP connection aborts metric. | auto | no |\n| TCP out-of-order queue | Enable or disable TCP out-of-order queue metric. | auto | no |\n| TCP SYN cookies | Enable or disable TCP SYN cookies metric. | auto | no |\n| TCP listen issues | Enable or disable TCP listen issues metric. | auto | no |\n| ECN packets | Enable or disable ECN packets metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.stats instance\n\nThese metrics show TCP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.tcplistenissues | overflows | packets/s |\n| ipv4.ecnpkts | InCEPkts, InECT0Pkts, InECT1Pkts, OutECT0Pkts, OutECT1Pkts | packets/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet.tcp.stats-net.inet.tcp.stats",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet.udp.stats",monitored_instance:{name:"net.inet.udp.stats",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# net.inet.udp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.udp.stats\n\n## Overview\n\nCollect information about UDP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.udp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 UDP packets | Enable or disable ipv4 UDP packets metric. | yes | no |\n| ipv4 UDP errors | Enable or disable ipv4 UDP errors metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.udp.stats instance\n\nThese metrics show UDP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | InErrors, NoPorts, RcvbufErrors, InCsumErrors, IgnoredMulti | events/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet.udp.stats-net.inet.udp.stats",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet6.icmp6.stats",monitored_instance:{name:"net.inet6.icmp6.stats",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# net.inet6.icmp6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.icmp6.stats\n\n## Overview\n\nCollect information abou IPv6 ICMP\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.icmp6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| icmp | Enable or disable ICMP metric. | auto | no |\n| icmp redirects | Enable or disable ICMP redirects metric. | auto | no |\n| icmp errors | Enable or disable ICMP errors metric. | auto | no |\n| icmp echos | Enable or disable ICMP echos metric. | auto | no |\n| icmp router | Enable or disable ICMP router metric. | auto | no |\n| icmp neighbor | Enable or disable ICMP neighbor metric. | auto | no |\n| icmp types | Enable or disable ICMP types metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.icmp6.stats instance\n\nCollect IPv6 ICMP traffic statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet6.icmp6.stats-net.inet6.icmp6.stats",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet6.ip6.stats",monitored_instance:{name:"net.inet6.ip6.stats",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# net.inet6.ip6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.ip6.stats\n\n## Overview\n\nCollect information abou IPv6 stats.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.ip6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv6 packets | Enable or disable ipv6 packet metric. | auto | no |\n| ipv6 fragments sent | Enable or disable ipv6 fragments sent metric. | auto | no |\n| ipv6 fragments assembly | Enable or disable ipv6 fragments assembly metric. | auto | no |\n| ipv6 errors | Enable or disable ipv6 errors metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.ip6.stats instance\n\nThese metrics show general information about IPv6 connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet6.ip6.stats-net.inet6.ip6.stats",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.isr",monitored_instance:{name:"net.isr",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# net.isr\n\nPlugin: freebsd.plugin\nModule: net.isr\n\n## Overview\n\nCollect information about system softnet stat.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.isr]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| netisr | Enable or disable general vision about softnet stat metrics. | yes | no |\n| netisr per core | Enable or disable softnet stat metric per core. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n| [ 10min_netisr_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of drops in the last minute due to exceeded sysctl net.route.netisr_maxqlen (this can be a cause for dropped packets) |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.isr instance\n\nThese metrics show statistics about softnet stats.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.isr-net.isr",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"system.ram",monitored_instance:{name:"system.ram",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"memory.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# system.ram\n\nPlugin: freebsd.plugin\nModule: system.ram\n\n## Overview\n\nShow information about system memory usage.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| system.ram | Enable or disable system RAM metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per system.ram instance\n\nThis metric shows RAM usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, active, inactive, wired, cache, laundry, buffers | MiB |\n| mem.available | avail | MiB |\n\n",integration_type:"collector",id:"freebsd.plugin-system.ram-system.ram",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"uptime",monitored_instance:{name:"uptime",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# uptime\n\nPlugin: freebsd.plugin\nModule: uptime\n\n## Overview\n\nShow period of time server is up.\n\nThe plugin calls `clock_gettime` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uptime instance\n\nHow long the system is running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"freebsd.plugin-uptime-uptime",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.loadavg",monitored_instance:{name:"vm.loadavg",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# vm.loadavg\n\nPlugin: freebsd.plugin\nModule: vm.loadavg\n\n## Overview\n\nSystem Load Average\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.loadavg instance\n\nMonitoring for number of threads running or waiting.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.loadavg-vm.loadavg",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.stats.sys.v_intr",monitored_instance:{name:"vm.stats.sys.v_intr",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# vm.stats.sys.v_intr\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_intr\n\n## Overview\n\nDevice interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config option" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_intr | Enable or disable device interrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_intr instance\n\nThe metric show device interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.dev_intr | interrupts | interrupts/s |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.stats.sys.v_intr-vm.stats.sys.v_intr",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.stats.sys.v_soft",monitored_instance:{name:"vm.stats.sys.v_soft",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# vm.stats.sys.v_soft\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_soft\n\n## Overview\n\nSoftware Interrupt\n\nvm.stats.sys.v_soft\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config option" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_soft | Enable or disable software inerrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_soft instance\n\nThis metric shows software interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.soft_intr | interrupts | interrupts/s |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.stats.sys.v_soft-vm.stats.sys.v_soft",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.stats.sys.v_swtch",monitored_instance:{name:"vm.stats.sys.v_swtch",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# vm.stats.sys.v_swtch\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_swtch\n\n## Overview\n\nCPU context switch\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_swtch | Enable or disable CPU context switch metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_swtch instance\n\nThe metric count the number of context switches happening on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.stats.sys.v_swtch-vm.stats.sys.v_swtch",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.stats.vm.v_pgfaults",monitored_instance:{name:"vm.stats.vm.v_pgfaults",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"memory.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# vm.stats.vm.v_pgfaults\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_pgfaults\n\n## Overview\n\nCollect memory page faults events.\n\nThe plugin calls `sysctl` function to collect necessary data\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_pgfaults | Enable or disable Memory page fault metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_pgfaults instance\n\nThe number of page faults happened on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pgfaults | memory, io_requiring, cow, cow_optimized, in_transit | page faults/s |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.stats.vm.v_pgfaults-vm.stats.vm.v_pgfaults",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.stats.vm.v_swappgs",monitored_instance:{name:"vm.stats.vm.v_swappgs",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"memory.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# vm.stats.vm.v_swappgs\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_swappgs\n\n## Overview\n\nThe metric swap amount of data read from and written to SWAP.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_swappgs | Enable or disable infoormation about SWAP I/O metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_swappgs instance\n\nThis metric shows events happening on SWAP.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | io, out | KiB/s |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.stats.vm.v_swappgs-vm.stats.vm.v_swappgs",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.swap_info",monitored_instance:{name:"vm.swap_info",link:"",categories:["data-collection.freebsd"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# vm.swap_info\n\nPlugin: freebsd.plugin\nModule: vm.swap_info\n\n## Overview\n\nCollect information about SWAP memory.\n\nThe plugin calls `sysctlnametomib` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.swap_info | Enable or disable SWAP metrics. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.swap_info instance\n\nThis metric shows the SWAP usage.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swap | free, used | MiB |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.swap_info-vm.swap_info",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.vmtotal",monitored_instance:{name:"vm.vmtotal",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"memory.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# vm.vmtotal\n\nPlugin: freebsd.plugin\nModule: vm.vmtotal\n\n## Overview\n\nCollect Virtual Memory information from host.\n\nThe plugin calls function `sysctl` to collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:vm.vmtotal]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable total processes | Number of active processes. | yes | no |\n| processes running | Show number of processes running or blocked. | yes | no |\n| real memory | Memeory used on host. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.vmtotal instance\n\nThese metrics show an overall vision about processes running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.active_processes | active | processes |\n| system.processes | running, blocked | processes |\n| mem.real | used | MiB |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.vmtotal-vm.vmtotal",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"zfs",monitored_instance:{name:"zfs",link:"https://www.freebsd.org/",categories:["data-collection.freebsd"],icon_filename:"filesystem.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# zfs\n\nPlugin: freebsd.plugin\nModule: zfs\n\n## Overview\n\nCollect metrics for ZFS filesystem\n\nThe plugin uses `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:zfs_arcstats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| show zero charts | Do not show charts with zero metrics. | no | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs instance\n\nThese metrics show detailed information about ZFS filesystem.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | throttled | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n| zfs.trim_bytes | TRIMmed | bytes |\n| zfs.trim_requests | successful, failed, unsupported | requests |\n\n",integration_type:"collector",id:"freebsd.plugin-zfs-zfs",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"freeipmi.plugin",module_name:"freeipmi",monitored_instance:{name:"Intelligent Platform Management Interface (IPMI)",link:"https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface",categories:["data-collection.hardware-devices-and-sensors"],icon_filename:"netdata.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["sensors","ipmi","freeipmi","ipmimonitoring"],most_popular:!0},overview:'# Intelligent Platform Management Interface (IPMI)\n\nPlugin: freeipmi.plugin\nModule: freeipmi\n\n## Overview\n\n"Monitor enterprise server sensor readings, event log entries, and hardware statuses to ensure reliable server operations."\n\n\nThe plugin uses open source library IPMImonitoring to communicate with sensors.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn\'t support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nLinux kernel module for IPMI can create big overhead.\n',setup:"## Setup\n\n### Prerequisites\n\n#### Install freeipmi.plugin\n\nWhen using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires.\n\nWhen using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin.\n\nWhen using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata.\n\n\n#### Preliminary actions\n\nIf you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root\nto initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freeipmi]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe configuration is set using command line options:\n\n```\n# netdata.conf\n[plugin:freeipmi]\n command options = opt1 opt2 ... optN\n```\n\nTo display a help message listing the available command line options:\n\n```bash\n./usr/libexec/netdata/plugins.d/freeipmi.plugin --help\n```\n\n\n{% details open=true summary=\"Command options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SECONDS | Data collection frequency. | | no |\n| debug | Enable verbose output. | disabled | no |\n| no-sel | Disable System Event Log (SEL) collection. | disabled | no |\n| reread-sdr-cache | Re-read SDR cache on every iteration. | disabled | no |\n| interpret-oem-data | Attempt to parse OEM data. | disabled | no |\n| assume-system-event-record | treat illegal SEL events records as normal. | disabled | no |\n| ignore-non-interpretable-sensors | Do not read sensors that cannot be interpreted. | disabled | no |\n| bridge-sensors | Bridge sensors not owned by the BMC. | disabled | no |\n| shared-sensors | Enable shared sensors if found. | disabled | no |\n| no-discrete-reading | Do not read sensors if their event/reading type code is invalid. | enabled | no |\n| ignore-scanning-disabled | Ignore the scanning bit and read sensors no matter what. | disabled | no |\n| assume-bmc-owner | Assume the BMC is the sensor owner no matter what (usually bridging is required too). | disabled | no |\n| hostname HOST | Remote IPMI hostname or IP address. | local | no |\n| username USER | Username that will be used when connecting to the remote host. | | no |\n| password PASS | Password that will be used when connecting to the remote host. | | no |\n| noauthcodecheck / no-auth-code-check | Don't check the authentication codes returned. | | no |\n| driver-type IPMIDRIVER | Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. | | no |\n| sdr-cache-dir PATH | SDR cache files directory. | /tmp | no |\n| sensor-config-file FILE | Sensors configuration filename. | system default | no |\n| sel-config-file FILE | SEL configuration filename. | system default | no |\n| ignore N1,N2,N3,... | Sensor IDs to ignore. | | no |\n| ignore-status N1,N2,N3,... | Sensor IDs to ignore status (nominal/warning/critical). | | no |\n| -v | Print version and exit. | | no |\n| --help | Print usage message and exit. | | no |\n\n{% /details %}\n#### Examples\n\n##### Decrease data collection frequency\n\nBasic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.\n\n```yaml\n[plugin:freeipmi]\n update every = 10\n\n```\n##### Disable SEL collection\n\nAppend to `command options =` the options you need.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:freeipmi]\n command options = no-sel\n\n```\n{% /details %}\n##### Ignore specific sensors\n\nSpecific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`.\n\n**However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).\n\nTo find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:\n\nID | Name | Type | State | Reading | Units | Event\n1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'\n2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'\n3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'\n4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'\n5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'\n6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'\n7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'\n8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'\n9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'\n10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'\n11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'\n12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'\n13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'\n14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n...\n\n`freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`:\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:freeipmi]\n command options = ignore 1,2,3,4,...\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n\n\n### kimpi0 CPU usage\n\n\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipmi_sensor_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipmi.conf) | ipmi.sensor_state | IPMI sensor ${label:sensor} (${label:component}) state |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.\n\n\n### Per Intelligent Platform Management Interface (IPMI) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sel | events | events |\n\n### Per sensor\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| sensor | The sensor name |\n| type | One of 45 recognized sensor types (Battery, Voltage...) |\n| component | One of 25 recognized components (Processor, Peripheral). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sensor_state | nominal, critical, warning, unknown | state |\n| ipmi.sensor_temperature_c | temperature | Celsius |\n| ipmi.sensor_temperature_f | temperature | Fahrenheit |\n| ipmi.sensor_voltage | voltage | Volts |\n| ipmi.sensor_ampere | ampere | Amps |\n| ipmi.sensor_fan_speed | rotations | RPM |\n| ipmi.sensor_power | power | Watts |\n| ipmi.sensor_reading_percent | percentage | % |\n\n",integration_type:"collector",id:"freeipmi.plugin-freeipmi-Intelligent_Platform_Management_Interface_(IPMI)",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freeipmi.plugin/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-activemq",module_name:"activemq",plugin_name:"go.d.plugin",monitored_instance:{categories:["data-collection.message-brokers"],icon_filename:"activemq.png",name:"ActiveMQ",link:"https://activemq.apache.org/"},alternative_monitored_instances:[],keywords:["message broker"],most_popular:!1,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"apps.plugin",module_name:"apps"}]}}},overview:"# ActiveMQ\n\nPlugin: go.d.plugin\nModule: activemq\n\n## Overview\n\nThis collector monitors ActiveMQ queues and topics.\n\nIt collects metrics by sending HTTP requests to the Web Console API.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 8161.\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8161\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/activemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/activemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8161 | yes |\n| webadmin | Webadmin root path. | admin | yes |\n| max_queues | Maximum number of concurrently collected queues. | 50 | no |\n| max_topics | Maximum number of concurrently collected topics. | 50 | no |\n| queues_filter | Queues filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| topics_filter | Topics filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| timeout | HTTP request timeout. | 1 | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Filters and limits\n\nUsing filters and limits for queues and topics.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n max_queues: 100\n max_topics: 100\n queues_filter: \'!sandr* *\'\n topics_filter: \'!sandr* *\'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n - name: remote\n url: http://192.0.2.1:8161\n webadmin: admin\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `activemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m activemq\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ActiveMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| activemq.messages | enqueued, dequeued | messages/s |\n| activemq.unprocessed_messages | unprocessed | messages |\n| activemq.consumers | consumers | consumers |\n\n",integration_type:"collector",id:"go.d.plugin-activemq-ActiveMQ",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/activemq/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-adaptecraid",plugin_name:"go.d.plugin",module_name:"adaptec_raid",monitored_instance:{name:"Adaptec RAID",link:"https://www.microchip.com/en-us/products/storage",icon_filename:"adaptec.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:["storage","raid-controller","manage-disks"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Adaptec RAID\n\nPlugin: go.d.plugin\nModule: adaptec_raid\n\n## Overview\n\nMonitors the health of Adaptec Hardware RAID by tracking the status of logical and physical devices in your storage system.\nIt relies on the `arcconf` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `arcconf GETCONFIG 1 LD`\n- `arcconf GETCONFIG 1 PD`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/adaptec_raid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/adaptec_raid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | arcconf binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: adaptec_raid\n update_every: 5 # Collect Adaptec Hardware RAID statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `adaptec_raid` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m adaptec_raid\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ adaptec_raid_ld_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptecraid.logical_device_status | Adaptec RAID logical device (number ${label:ld_number} name ${label:ld_name}) health status is critical |\n| [ adaptec_raid_pd_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptecraid.physical_device_state | Adaptec RAID physical device (number ${label:pd_number} location ${label:location}) health state is critical |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per logical device\n\nThese metrics refer to the Logical Device (LD).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| ld_number | Logical device index number |\n| ld_name | Logical device name |\n| raid_level | RAID level |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adaptecraid.logical_device_status | ok, critical | status |\n\n### Per physical device\n\nThese metrics refer to the Physical Device (PD).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pd_number | Physical device index number |\n| location | Physical device location (e.g. Connector 0, Device 1) |\n| vendor | Physical device vendor |\n| model | Physical device model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adaptecraid.physical_device_state | ok, critical | status |\n| adaptecraid.physical_device_smart_warnings | smart | warnings |\n| adaptecraid.physical_device_temperature | temperature | Celsius |\n\n",integration_type:"collector",id:"go.d.plugin-adaptec_raid-Adaptec_RAID",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/adaptecraid/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-apache",plugin_name:"go.d.plugin",module_name:"apache",monitored_instance:{name:"Apache",link:"https://httpd.apache.org/",icon_filename:"apache.svg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:["webserver"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"weblog"},{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# Apache\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-apache-Apache",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/apache/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-httpd",plugin_name:"go.d.plugin",module_name:"apache",monitored_instance:{name:"HTTPD",link:"https://httpd.apache.org/",icon_filename:"apache.svg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:["webserver"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"weblog"},{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# HTTPD\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-apache-HTTPD",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/apache/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-cassandra",module_name:"cassandra",plugin_name:"go.d.plugin",monitored_instance:{categories:["data-collection.database-servers"],icon_filename:"cassandra.svg",name:"Cassandra",link:"https://cassandra.apache.org/_/index.html"},alternative_monitored_instances:[],keywords:["nosql","dbms","db","database"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Cassandra\n\nPlugin: go.d.plugin\nModule: cassandra\n\n## Overview\n\nThis collector gathers metrics about client requests, cache hits, and many more, while also providing metrics per each thread pool.\n\n\nThe [JMX Exporter](https://github.com/prometheus/jmx_exporter) is used to fetch metrics from a Cassandra instance and make them available at an endpoint like `http://127.0.0.1:7072/metrics`.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 7072.\n\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:7072/metrics\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Configure Cassandra with Prometheus JMX Exporter\n\nTo configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter):\n\n> **Note**: paths can differ depends on your setup.\n\n- Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file\n and install it in a directory where Cassandra can access it.\n- Add\n the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml)\n file to `/etc/cassandra`.\n- Add the following line to `/etc/cassandra/cassandra-env.sh`\n ```\n JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml\n ```\n- Restart cassandra service.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cassandra.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cassandra.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:7072/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nLocal server with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:7072/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n - name: remote\n url: http://192.0.2.1:7072/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `cassandra` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cassandra\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Cassandra instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.client_requests_rate | read, write | requests/s |\n| cassandra.client_request_read_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_request_write_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_requests_latency | read, write | seconds |\n| cassandra.row_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.row_cache_hit_rate | hits, misses | events/s |\n| cassandra.row_cache_utilization | used | percentage |\n| cassandra.row_cache_size | size | bytes |\n| cassandra.key_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.key_cache_hit_rate | hits, misses | events/s |\n| cassandra.key_cache_utilization | used | percentage |\n| cassandra.key_cache_size | size | bytes |\n| cassandra.storage_live_disk_space_used | used | bytes |\n| cassandra.compaction_completed_tasks_rate | completed | tasks/s |\n| cassandra.compaction_pending_tasks_count | pending | tasks |\n| cassandra.compaction_compacted_rate | compacted | bytes/s |\n| cassandra.jvm_memory_used | heap, nonheap | bytes |\n| cassandra.jvm_gc_rate | parnew, cms | gc/s |\n| cassandra.jvm_gc_time | parnew, cms | seconds |\n| cassandra.dropped_messages_rate | dropped | messages/s |\n| cassandra.client_requests_timeouts_rate | read, write | timeout/s |\n| cassandra.client_requests_unavailables_rate | read, write | exceptions/s |\n| cassandra.client_requests_failures_rate | read, write | failures/s |\n| cassandra.storage_exceptions_rate | storage | exceptions/s |\n\n### Per thread pool\n\nMetrics related to Cassandra's thread pools. Each thread pool provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thread_pool | thread pool name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.thread_pool_active_tasks_count | active | tasks |\n| cassandra.thread_pool_pending_tasks_count | pending | tasks |\n| cassandra.thread_pool_blocked_tasks_count | blocked | tasks |\n| cassandra.thread_pool_blocked_tasks_rate | blocked | tasks/s |\n\n",integration_type:"collector",id:"go.d.plugin-cassandra-Cassandra",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/cassandra/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-chrony",module_name:"chrony",plugin_name:"go.d.plugin",monitored_instance:{categories:["data-collection.system-clock-and-ntp"],icon_filename:"chrony.jpg",name:"Chrony",link:"https://chrony.tuxfamily.org/"},alternative_monitored_instances:[],keywords:[],info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}},most_popular:!1},overview:"# Chrony\n\nPlugin: go.d.plugin\nModule: chrony\n\n## Overview\n\nThis collector monitors the system's clock performance and peers activity status\n\nIt collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers Chrony instance running on the local host and listening on port 323.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:323\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/chrony.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/chrony.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:323 | yes |\n| timeout | Connection timeout. Zero means no timeout. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n - name: remote\n address: 192.0.2.1:323\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `chrony` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m chrony\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Chrony instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| chrony.stratum | stratum | level |\n| chrony.current_correction | current_correction | seconds |\n| chrony.root_delay | root_delay | seconds |\n| chrony.root_dispersion | root_delay | seconds |\n| chrony.last_offset | offset | seconds |\n| chrony.rms_offset | offset | seconds |\n| chrony.frequency | frequency | ppm |\n| chrony.residual_frequency | residual_frequency | ppm |\n| chrony.skew | skew | ppm |\n| chrony.update_interval | update_interval | seconds |\n| chrony.ref_measurement_time | ref_measurement_time | seconds |\n| chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status |\n| chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources |\n\n",integration_type:"collector",id:"go.d.plugin-chrony-Chrony",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/chrony/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-clickhouse",plugin_name:"go.d.plugin",module_name:"clickhouse",monitored_instance:{name:"ClickHouse",link:"https://clickhouse.com/",icon_filename:"clickhouse.svg",categories:["data-collection.database-servers"]},keywords:["database"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# ClickHouse\n\nPlugin: go.d.plugin\nModule: clickhouse\n\n## Overview\n\nThis collector retrieves performance data from ClickHouse for connections, queries, resources, replication, IO, and data operations (inserts, selects, merges) using HTTP requests and ClickHouse system tables. It monitors your ClickHouse server's health and activity.\n\n\nIt sends HTTP requests to the ClickHouse [HTTP interface](https://clickhouse.com/docs/en/interfaces/http), executing SELECT queries to retrieve data from various system tables.\nSpecifically, it collects metrics from the following tables:\n\n- system.metrics\n- system.async_metrics\n- system.events\n- system.disks\n- system.parts\n- system.processes\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects ClickHouse instances running on localhost that are listening on port 8123.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:8123\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/clickhouse.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/clickhouse.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8123 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nClickHouse with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8123\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8123\n\n - name: remote\n url: http://192.0.2.1:8123\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `clickhouse` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m clickhouse\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ clickhouse_restarted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.uptime | ClickHouse has recently been restarted |\n| [ clickhouse_queries_preempted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.queries_preempted | ClickHouse has queries that are stopped and waiting due to priority setting |\n| [ clickhouse_long_running_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.longest_running_query_time | ClickHouse has a long-running query exceeding the threshold |\n| [ clickhouse_rejected_inserts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.rejected_inserts | ClickHouse has INSERT queries that are rejected due to high number of active data parts for partition in a MergeTree |\n| [ clickhouse_delayed_inserts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.delayed_inserts | ClickHouse has INSERT queries that are throttled due to high number of active data parts for partition in a MergeTree |\n| [ clickhouse_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.replicas_max_absolute_delay | ClickHouse is experiencing replication lag greater than 5 minutes |\n| [ clickhouse_replicated_readonly_tables ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.replicated_readonly_tables | ClickHouse has replicated tables in readonly state due to ZooKeeper session loss/startup without ZooKeeper configured |\n| [ clickhouse_max_part_count_for_partition ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.max_part_count_for_partition | ClickHouse high number of parts per partition |\n| [ clickhouse_distributed_connections_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.distributed_connections_fail_exhausted_retries | ClickHouse has failed distributed connections after exhausting all retry attempts |\n| [ clickhouse_distributed_files_to_insert ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.distributed_files_to_insert | ClickHouse high number of pending files to process for asynchronous insertion into Distributed tables |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ClickHouse instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| clickhouse.connections | tcp, http, mysql, postgresql, interserver | connections |\n| clickhouse.slow_reads | slow | reads/s |\n| clickhouse.read_backoff | read_backoff | events/s |\n| clickhouse.memory_usage | used | bytes |\n| clickhouse.running_queries | running | queries |\n| clickhouse.queries_preempted | preempted | queries |\n| clickhouse.queries | successful, failed | queries/s |\n| clickhouse.select_queries | successful, failed | selects/s |\n| clickhouse.insert_queries | successful, failed | inserts/s |\n| clickhouse.queries_memory_limit_exceeded | mem_limit_exceeded | queries/s |\n| clickhouse.longest_running_query_time | longest_query_time | seconds |\n| clickhouse.queries_latency | queries_time | microseconds |\n| clickhouse.select_queries_latency | selects_time | microseconds |\n| clickhouse.insert_queries_latency | inserts_time | microseconds |\n| clickhouse.io | reads, writes | bytes/s |\n| clickhouse.iops | reads, writes | ops/s |\n| clickhouse.io_errors | read, write | errors/s |\n| clickhouse.io_seeks | lseek | ops/s |\n| clickhouse.io_file_opens | file_open | ops/s |\n| clickhouse.replicated_parts_current_activity | fetch, send, check | parts |\n| clickhouse.replicas_max_absolute_dela | replication_delay | seconds |\n| clickhouse.replicated_readonly_tables | read_only | tables |\n| clickhouse.replicated_data_loss | data_loss | events |\n| clickhouse.replicated_part_fetches | successful, failed | fetches/s |\n| clickhouse.inserted_rows | inserted | rows/s |\n| clickhouse.inserted_bytes | inserted | bytes/s |\n| clickhouse.rejected_inserts | rejected | inserts/s |\n| clickhouse.delayed_inserts | delayed | inserts/s |\n| clickhouse.delayed_inserts_throttle_time | delayed_inserts_throttle_time | milliseconds |\n| clickhouse.selected_bytes | selected | bytes/s |\n| clickhouse.selected_rows | selected | rows/s |\n| clickhouse.selected_parts | selected | parts/s |\n| clickhouse.selected_ranges | selected | ranges/s |\n| clickhouse.selected_marks | selected | marks/s |\n| clickhouse.merges | merge | ops/s |\n| clickhouse.merges_latency | merges_time | milliseconds |\n| clickhouse.merged_uncompressed_bytes | merged_uncompressed | bytes/s |\n| clickhouse.merged_rows | merged | rows/s |\n| clickhouse.merge_tree_data_writer_inserted_rows | inserted | rows/s |\n| clickhouse.merge_tree_data_writer_uncompressed_bytes | inserted | bytes/s |\n| clickhouse.merge_tree_data_writer_compressed_bytes | written | bytes/s |\n| clickhouse.uncompressed_cache_requests | hits, misses | requests/s |\n| clickhouse.mark_cache_requests | hits, misses | requests/s |\n| clickhouse.max_part_count_for_partition | max_parts_partition | parts |\n| clickhouse.parts_count | temporary, pre_active, active, deleting, delete_on_destroy, outdated, wide, compact | parts |\n| distributed_connections | active | connections |\n| distributed_connections_attempts | connection | attempts/s |\n| distributed_connections_fail_retries | connection_retry | fails/s |\n| distributed_connections_fail_exhausted_retries | connection_retry_exhausted | fails/s |\n| distributed_files_to_insert | pending_insertions | files |\n| distributed_rejected_inserts | rejected | inserts/s |\n| distributed_delayed_inserts | delayed | inserts/s |\n| distributed_delayed_inserts_latency | delayed_time | milliseconds |\n| distributed_sync_insertion_timeout_exceeded | sync_insertion | timeouts/s |\n| distributed_async_insertions_failures | async_insertions | failures/s |\n| clickhouse.uptime | uptime | seconds |\n\n### Per disk\n\nThese metrics refer to the Disk.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk_name | Name of the disk as defined in the [server configuration](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-multiple-volumes_configure). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| clickhouse.disk_space_usage | free, used | bytes |\n\n### Per table\n\nThese metrics refer to the Database Table.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | Name of the database. |\n| table | Name of the table. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| clickhouse.database_table_size | size | bytes |\n| clickhouse.database_table_parts | parts | parts |\n| clickhouse.database_table_rows | rows | rows |\n\n",integration_type:"collector",id:"go.d.plugin-clickhouse-ClickHouse",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/clickhouse/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-cockroachdb",plugin_name:"go.d.plugin",module_name:"cockroachdb",monitored_instance:{name:"CockroachDB",link:"https://www.cockroachlabs.com/",icon_filename:"cockroachdb.svg",categories:["data-collection.database-servers"]},keywords:["cockroachdb","databases"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# CockroachDB\n\nPlugin: go.d.plugin\nModule: cockroachdb\n\n## Overview\n\nThis collector monitors CockroachDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cockroachdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cockroachdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/_status/vars | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nCockroachDB with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/_status/vars\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n - name: remote\n url: http://203.0.113.10:8080/_status/vars\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `cockroachdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cockroachdb\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cockroachdb_used_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage capacity utilization |\n| [ cockroachdb_used_usable_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage usable space utilization |\n| [ cockroachdb_unavailable_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than needed for quorum |\n| [ cockroachdb_underreplicated_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than the replication target |\n| [ cockroachdb_open_file_descriptors_limit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.process_file_descriptors | open file descriptors utilization (against softlimit) |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CockroachDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cockroachdb.process_cpu_time_combined_percentage | used | percentage |\n| cockroachdb.process_cpu_time_percentage | user, sys | percentage |\n| cockroachdb.process_cpu_time | user, sys | ms |\n| cockroachdb.process_memory | rss | KiB |\n| cockroachdb.process_file_descriptors | open | fd |\n| cockroachdb.process_uptime | uptime | seconds |\n| cockroachdb.host_disk_bandwidth | read, write | KiB |\n| cockroachdb.host_disk_operations | reads, writes | operations |\n| cockroachdb.host_disk_iops_in_progress | in_progress | iops |\n| cockroachdb.host_network_bandwidth | received, sent | kilobits |\n| cockroachdb.host_network_packets | received, sent | packets |\n| cockroachdb.live_nodes | live_nodes | nodes |\n| cockroachdb.node_liveness_heartbeats | successful, failed | heartbeats |\n| cockroachdb.total_storage_capacity | total | KiB |\n| cockroachdb.storage_capacity_usability | usable, unusable | KiB |\n| cockroachdb.storage_usable_capacity | available, used | KiB |\n| cockroachdb.storage_used_capacity_percentage | total, usable | percentage |\n| cockroachdb.sql_connections | active | connections |\n| cockroachdb.sql_bandwidth | received, sent | KiB |\n| cockroachdb.sql_statements_total | started, executed | statements |\n| cockroachdb.sql_errors | statement, transaction | errors |\n| cockroachdb.sql_started_ddl_statements | ddl | statements |\n| cockroachdb.sql_executed_ddl_statements | ddl | statements |\n| cockroachdb.sql_started_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_executed_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_started_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_executed_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_active_distributed_queries | active | queries |\n| cockroachdb.sql_distributed_flows | active, queued | flows |\n| cockroachdb.live_bytes | applications, system | KiB |\n| cockroachdb.logical_data | keys, values | KiB |\n| cockroachdb.logical_data_count | keys, values | num |\n| cockroachdb.kv_transactions | committed, fast-path_committed, aborted | transactions |\n| cockroachdb.kv_transaction_restarts | write_too_old, write_too_old_multiple, forwarded_timestamp, possible_reply, async_consensus_failure, read_within_uncertainty_interval, aborted, push_failure, unknown | restarts |\n| cockroachdb.ranges | ranges | ranges |\n| cockroachdb.ranges_replication_problem | unavailable, under_replicated, over_replicated | ranges |\n| cockroachdb.range_events | split, add, remove, merge | events |\n| cockroachdb.range_snapshot_events | generated, applied_raft_initiated, applied_learner, applied_preemptive | events |\n| cockroachdb.rocksdb_read_amplification | reads | reads/query |\n| cockroachdb.rocksdb_table_operations | compactions, flushes | operations |\n| cockroachdb.rocksdb_cache_usage | used | KiB |\n| cockroachdb.rocksdb_cache_operations | hits, misses | operations |\n| cockroachdb.rocksdb_cache_hit_rate | hit_rate | percentage |\n| cockroachdb.rocksdb_sstables | sstables | sstables |\n| cockroachdb.replicas | replicas | replicas |\n| cockroachdb.replicas_quiescence | quiescent, active | replicas |\n| cockroachdb.replicas_leaders | leaders, not_leaseholders | replicas |\n| cockroachdb.replicas_leaseholders | leaseholders | leaseholders |\n| cockroachdb.queue_processing_failures | gc, replica_gc, replication, split, consistency, raft_log, raft_snapshot, time_series_maintenance | failures |\n| cockroachdb.rebalancing_queries | avg | queries/s |\n| cockroachdb.rebalancing_writes | avg | writes/s |\n| cockroachdb.timeseries_samples | written | samples |\n| cockroachdb.timeseries_write_errors | write | errors |\n| cockroachdb.timeseries_write_bytes | written | KiB |\n| cockroachdb.slow_requests | acquiring_latches, acquiring_lease, in_raft | requests |\n| cockroachdb.code_heap_memory_usage | go, cgo | KiB |\n| cockroachdb.goroutines | goroutines | goroutines |\n| cockroachdb.gc_count | gc | invokes |\n| cockroachdb.gc_pause | pause | us |\n| cockroachdb.cgo_calls | cgo | calls |\n\n",integration_type:"collector",id:"go.d.plugin-cockroachdb-CockroachDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/cockroachdb/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-consul",plugin_name:"go.d.plugin",module_name:"consul",monitored_instance:{name:"Consul",link:"https://www.consul.io/",categories:["data-collection.service-discovery-registry"],icon_filename:"consul.svg"},alternative_monitored_instances:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["service networking platform","hashicorp"],most_popular:!0},overview:"# Consul\n\nPlugin: go.d.plugin\nModule: consul\n\n## Overview\n\nThis collector monitors [key metrics](https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) of Consul Agents: transaction timings, leadership changes, memory usage and more.\n\n\nIt periodically sends HTTP requests to [Consul REST API](https://developer.hashicorp.com/consul/api-docs).\n\nUsed endpoints:\n\n- [/operator/autopilot/health](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health)\n- [/agent/checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks)\n- [/agent/self](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration)\n- [/agent/metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics)\n- [/coordinate/nodes](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host, that provide metrics on port 8500.\n\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8500\n- http://127.0.0.1:8500\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable Prometheus telemetry\n\n[Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`.\n\n\n#### Add required ACLs to Token\n\nRequired **only if authentication is enabled**.\n\n| ACL | Endpoint |\n|:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) |\n| `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) |\n| `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) |\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/consul.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/consul.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="All options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8500 | yes |\n| acl_token | ACL token used in every request. | | no |\n| max_checks | Checks processing/charting limit. | | no |\n| max_filter | Checks processing/charting filter. Uses [simple patterns](/src/libnetdata/simple_pattern/README.md). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7"\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7"\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7"\n\n - name: remote\n url: http://203.0.113.10:8500\n acl_token: "ada7f751-f654-8872-7f93-498e799158b6"\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `consul` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m consul\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ consul_node_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.node_health_check_status | node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_service_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.service_health_check_status | service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_exceeded_rate | number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_failed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_failed_rate | number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_gc_pause_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.gc_pause_time | time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_autopilot_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_health_status | datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name} |\n| [ consul_autopilot_server_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_server_health_status | server ${label:node_name} from datacenter ${label:datacenter} is unhealthy |\n| [ consul_raft_leader_last_contact_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leader_last_contact_time | median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes |\n| [ consul_raft_leadership_transitions ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leadership_transitions_rate | there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader |\n| [ consul_raft_thread_main_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_main_saturation_perc | average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_raft_thread_fsm_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_fsm_saturation_perc | average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_license_expiration_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.license_expiration_time | Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter} |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe set of metrics depends on the [Consul Agent mode](https://developer.hashicorp.com/consul/docs/install/glossary#agent).\n\n\n### Per Consul instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.client_rpc_requests_rate | rpc | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_exceeded_rate | exceeded | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_failed_rate | failed | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.memory_allocated | allocated | bytes | \u2022 | \u2022 | \u2022 |\n| consul.memory_sys | sys | bytes | \u2022 | \u2022 | \u2022 |\n| consul.gc_pause_time | gc_pause | seconds | \u2022 | \u2022 | \u2022 |\n| consul.kvs_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.kvs_apply_operations_rate | kvs_apply | ops/s | \u2022 | \u2022 | |\n| consul.txn_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.txn_apply_operations_rate | txn_apply | ops/s | \u2022 | \u2022 | |\n| consul.autopilot_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_failure_tolerance | failure_tolerance | servers | \u2022 | \u2022 | |\n| consul.autopilot_server_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_server_stable_time | stable | seconds | \u2022 | \u2022 | |\n| consul.autopilot_server_serf_status | active, failed, left, none | status | \u2022 | \u2022 | |\n| consul.autopilot_server_voter_status | voter, not_voter | status | \u2022 | \u2022 | |\n| consul.network_lan_rtt | min, max, avg | ms | \u2022 | \u2022 | |\n| consul.raft_commit_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_commits_rate | commits | commits/s | \u2022 | | |\n| consul.raft_leader_last_contact_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_leader_oldest_log_age | oldest_log_age | seconds | \u2022 | | |\n| consul.raft_follower_last_contact_leader_time | leader_last_contact | ms | | \u2022 | |\n| consul.raft_rpc_install_snapshot_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | | \u2022 | |\n| consul.raft_leader_elections_rate | leader | elections/s | \u2022 | \u2022 | |\n| consul.raft_leadership_transitions_rate | leadership | transitions/s | \u2022 | \u2022 | |\n| consul.server_leadership_status | leader, not_leader | status | \u2022 | \u2022 | |\n| consul.raft_thread_main_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_thread_fsm_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_fsm_last_restore_duration | last_restore_duration | ms | \u2022 | \u2022 | |\n| consul.raft_boltdb_freelist_bytes | freelist | bytes | \u2022 | \u2022 | |\n| consul.raft_boltdb_logs_per_batch_rate | written | logs/s | \u2022 | \u2022 | |\n| consul.raft_boltdb_store_logs_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.license_expiration_time | license_expiration | seconds | \u2022 | \u2022 | \u2022 |\n\n### Per node check\n\nMetrics about checks on Node level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.node_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n### Per service check\n\nMetrics about checks at a Service level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n| service_name | The service's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.service_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-consul-Consul",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/consul/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-coredns",plugin_name:"go.d.plugin",module_name:"coredns",monitored_instance:{name:"CoreDNS",link:"https://coredns.io/",icon_filename:"coredns.svg",categories:["data-collection.dns-and-dhcp-servers"]},keywords:["coredns","dns","kubernetes"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# CoreDNS\n\nPlugin: go.d.plugin\nModule: coredns\n\n## Overview\n\nThis collector monitors CoreDNS instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/coredns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/coredns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="All options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9153/metrics | yes |\n| per_server_stats | Server filter. | | no |\n| per_zone_stats | Zone filter. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n##### per_server_stats\n\nMetrics of servers matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/collectors/go.d.plugin/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_server_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### per_zone_stats\n\nMetrics of zones matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/collectors/go.d.plugin/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_zone_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n - name: remote\n url: http://203.0.113.10:9153/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `coredns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m coredns\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CoreDNS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.dns_request_count_total | requests | requests/s |\n| coredns.dns_responses_count_total | responses | responses/s |\n| coredns.dns_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.dns_no_matching_zone_dropped_total | dropped | requests/s |\n| coredns.dns_panic_count_total | panics | panics/s |\n| coredns.dns_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.dns_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.dns_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.dns_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server_name | Server name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.server_dns_request_count_total | requests | requests/s |\n| coredns.server_dns_responses_count_total | responses | responses/s |\n| coredns.server_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.server_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.server_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.server_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.server_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per zone\n\nThese metrics refer to the DNS zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| zone_name | Zone name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.zone_dns_request_count_total | requests | requests/s |\n| coredns.zone_dns_responses_count_total | responses | responses/s |\n| coredns.zone_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.zone_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.zone_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.zone_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n",integration_type:"collector",id:"go.d.plugin-coredns-CoreDNS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/coredns/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-couchbase",plugin_name:"go.d.plugin",module_name:"couchbase",monitored_instance:{name:"Couchbase",link:"https://www.couchbase.com/",icon_filename:"couchbase.svg",categories:["data-collection.database-servers"]},keywords:["couchbase","databases"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Couchbase\n\nPlugin: go.d.plugin\nModule: couchbase\n\n## Overview\n\nThis collector monitors Couchbase servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchbase.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchbase.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="All options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8091 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n - name: remote\n url: http://203.0.113.0:8091\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `couchbase` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchbase\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Couchbase instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchbase.bucket_quota_percent_used | a dimension per bucket | percentage |\n| couchbase.bucket_ops_per_sec | a dimension per bucket | ops/s |\n| couchbase.bucket_disk_fetches | a dimension per bucket | fetches |\n| couchbase.bucket_item_count | a dimension per bucket | items |\n| couchbase.bucket_disk_used_stats | a dimension per bucket | bytes |\n| couchbase.bucket_data_used | a dimension per bucket | bytes |\n| couchbase.bucket_mem_used | a dimension per bucket | bytes |\n| couchbase.bucket_vb_active_num_non_resident | a dimension per bucket | items |\n\n",integration_type:"collector",id:"go.d.plugin-couchbase-Couchbase",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/couchbase/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-couchdb",plugin_name:"go.d.plugin",module_name:"couchdb",monitored_instance:{name:"CouchDB",link:"https://couchdb.apache.org/",icon_filename:"couchdb.svg",categories:["data-collection.database-servers"]},keywords:["couchdb","databases"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# CouchDB\n\nPlugin: go.d.plugin\nModule: couchdb\n\n## Overview\n\nThis collector monitors CouchDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:5984 | yes |\n| node | CouchDB node name. Same as -name vm.args argument. | _local | no |\n| databases | List of database names for which db-specific stats should be displayed, space separated. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication, node name and multiple databases defined. Make sure to match the node name with the `NODENAME` value in your CouchDB\'s `etc/vm.args` file. Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n node: couchdb@127.0.0.1\n databases: my-db other-db\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n - name: remote\n url: http://203.0.113.0:5984\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `couchdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchdb\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CouchDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchdb.activity | db_reads, db_writes, view_reads | requests/s |\n| couchdb.request_methods | copy, delete, get, head, options, post, put | requests/s |\n| couchdb.response_codes | 200, 201, 202, 204, 206, 301, 302, 304, 400, 401, 403, 404, 406, 409, 412, 413, 414, 415, 416, 417, 500, 501, 503 | responses/s |\n| couchdb.response_code_classes | 2xx, 3xx, 4xx, 5xx | responses/s |\n| couchdb.active_tasks | indexer, db_compaction, replication, view_compaction | tasks |\n| couchdb.replicator_jobs | running, pending, crashed, internal_replication_jobs | jobs |\n| couchdb.open_files | files | files |\n| couchdb.erlang_vm_memory | atom, binaries, code, ets, procs, other | B |\n| couchdb.proccounts | os_procs, erl_procs | processes |\n| couchdb.peakmsgqueue | peak_size | messages |\n| couchdb.reductions | reductions | reductions |\n| couchdb.db_sizes_file | a dimension per database | KiB |\n| couchdb.db_sizes_external | a dimension per database | KiB |\n| couchdb.db_sizes_active | a dimension per database | KiB |\n| couchdb.db_doc_count | a dimension per database | docs |\n| couchdb.db_doc_del_count | a dimension per database | docs |\n\n",integration_type:"collector",id:"go.d.plugin-couchdb-CouchDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/couchdb/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-dns_query",plugin_name:"go.d.plugin",module_name:"dns_query",monitored_instance:{name:"DNS query",link:"",icon_filename:"network-wired.svg",categories:["data-collection.dns-and-dhcp-servers"]},keywords:["dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# DNS query\n\nPlugin: go.d.plugin\nModule: dns_query\n\n## Overview\n\nThis module monitors DNS query round-trip time (RTT).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dns_query.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dns_query.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="All options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| domains | Domain or subdomains to query. The collector will choose a random domain from the list on every iteration. | | yes |\n| servers | Servers to query. | | yes |\n| port | DNS server port. | 53 | no |\n| network | Network protocol name. Available options: udp, tcp, tcp-tls. | udp | no |\n| record_types | Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV. | A | no |\n| timeout | Query read timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: job1\n record_types:\n - A\n - AAAA\n domains:\n - google.com\n - github.com\n - reddit.com\n servers:\n - 8.8.8.8\n - 8.8.4.4\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dns_query` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dns_query\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dns_query_query_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dns_query.conf) | dns_query.query_status | DNS request type ${label:record_type} to server ${label:server} is unsuccessful |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server | DNS server address. |\n| network | Network protocol name (tcp, udp, tcp-tls). |\n| record_type | DNS record type (e.g. A, AAAA, CNAME). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dns_query.query_status | success, network_error, dns_error | status |\n| dns_query.query_time | query_time | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-dns_query-DNS_query",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/dnsquery/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-dnsdist",plugin_name:"go.d.plugin",module_name:"dnsdist",monitored_instance:{name:"DNSdist",link:"https://dnsdist.org/",icon_filename:"network-wired.svg",categories:["data-collection.dns-and-dhcp-servers"]},keywords:["dnsdist","dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# DNSdist\n\nPlugin: go.d.plugin\nModule: dnsdist\n\n## Overview\n\nThis collector monitors DNSDist servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable DNSdist built-in Webserver\n\nFor collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsdist.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsdist.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8083 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key).\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: \'your-api-key\' # static pre-shared authentication key for access to the REST API (api-key).\n\n - name: remote\n url: http://203.0.113.0:8083\n headers:\n X-API-Key: \'your-api-key\'\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsdist` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsdist\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per DNSdist instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsdist.queries | all, recursive, empty | queries/s |\n| dnsdist.queries_dropped | rule_drop, dynamic_blocked, no_policy, non_queries | queries/s |\n| dnsdist.packets_dropped | acl | packets/s |\n| dnsdist.answers | self_answered, nxdomain, refused, trunc_failures | answers/s |\n| dnsdist.backend_responses | responses | responses/s |\n| dnsdist.backend_commerrors | send_errors | errors/s |\n| dnsdist.backend_errors | timeouts, servfail, non_compliant | responses/s |\n| dnsdist.cache | hits, misses | answers/s |\n| dnsdist.servercpu | system_state, user_state | ms/s |\n| dnsdist.servermem | memory_usage | MiB |\n| dnsdist.query_latency | 1ms, 10ms, 50ms, 100ms, 1sec, slow | queries/s |\n| dnsdist.query_latency_avg | 100, 1k, 10k, 1000k | microseconds |\n\n",integration_type:"collector",id:"go.d.plugin-dnsdist-DNSdist",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/dnsdist/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-dnsmasq",plugin_name:"go.d.plugin",module_name:"dnsmasq",monitored_instance:{name:"Dnsmasq",link:"https://thekelleys.org.uk/dnsmasq/doc.html",icon_filename:"dnsmasq.svg",categories:["data-collection.dns-and-dhcp-servers"]},keywords:["dnsmasq","dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Dnsmasq\n\nPlugin: go.d.plugin\nModule: dnsmasq\n\n## Overview\n\nThis collector monitors Dnsmasq servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in `ip:port` format. | 127.0.0.1:53 | yes |\n| protocol | DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls. | udp | no |\n| timeout | DNS query timeout (dial, write and read) in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n```\n{% /details %}\n##### Using TCP protocol\n\nLocal server with specific DNS query transport protocol.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n protocol: tcp\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n - name: remote\n address: 203.0.113.0:53\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsmasq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq instance\n\nThe metrics apply to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq.servers_queries | success, failed | queries/s |\n| dnsmasq.cache_performance | hist, misses | events/s |\n| dnsmasq.cache_operations | insertions, evictions | operations/s |\n| dnsmasq.cache_size | size | entries |\n\n",integration_type:"collector",id:"go.d.plugin-dnsmasq-Dnsmasq",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/dnsmasq/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-dnsmasq_dhcp",plugin_name:"go.d.plugin",module_name:"dnsmasq_dhcp",monitored_instance:{name:"Dnsmasq DHCP",link:"https://www.thekelleys.org.uk/dnsmasq/doc.html",icon_filename:"dnsmasq.svg",categories:["data-collection.dns-and-dhcp-servers"]},keywords:["dnsmasq","dhcp"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Dnsmasq DHCP\n\nPlugin: go.d.plugin\nModule: dnsmasq_dhcp\n\n## Overview\n\nThis collector monitors Dnsmasq DHCP leases databases, depending on your configuration.\n\nBy default, it uses:\n\n- `/var/lib/misc/dnsmasq.leases` to read leases.\n- `/etc/dnsmasq.conf` to detect dhcp-ranges.\n- `/etc/dnsmasq.d` to find additional configurations.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAll configured dhcp-ranges are detected automatically\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq_dhcp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to dnsmasq DHCP leases file. | /var/lib/misc/dnsmasq.leases | no |\n| conf_path | Path to dnsmasq configuration file. | /etc/dnsmasq.conf | no |\n| conf_dir | Path to dnsmasq configuration directory. | /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /var/lib/misc/dnsmasq.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n{% /details %}\n##### Pi-hole\n\nDnsmasq DHCP on Pi-hole.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /etc/pihole/dhcp.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsmasq_dhcp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq_dhcp\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dnsmasq_dhcp_dhcp_range_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dnsmasq_dhcp.conf) | dnsmasq_dhcp.dhcp_range_utilization | DHCP range utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_ranges | ipv4, ipv6 | ranges |\n| dnsmasq_dhcp.dhcp_hosts | ipv4, ipv6 | hosts |\n\n### Per dhcp range\n\nThese metrics refer to the DHCP range.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dhcp_range | DHCP range in `START_IP:END_IP` format |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_range_utilization | used | percentage |\n| dnsmasq_dhcp.dhcp_range_allocated_leases | allocated | leases |\n\n",integration_type:"collector",id:"go.d.plugin-dnsmasq_dhcp-Dnsmasq_DHCP",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-docker",plugin_name:"go.d.plugin",module_name:"docker",alternative_monitored_instances:[],monitored_instance:{name:"Docker",link:"https://www.docker.com/",categories:["data-collection.containers-and-vms"],icon_filename:"docker.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["container"],most_popular:!0},overview:"# Docker\n\nPlugin: go.d.plugin\nModule: docker\n\n## Overview\n\nThis collector monitors Docker containers state, health status and more.\n\n\nIt connects to the Docker instance via a TCP or UNIX socket and executes the following commands:\n\n- [System info](https://docs.docker.com/engine/api/v1.43/#tag/System/operation/SystemInfo).\n- [List images](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageList).\n- [List containers](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nRequires netdata user to be in the docker group.\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker UNIX socket: `/var/run/docker.sock`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nEnabling `collect_container_size` may result in high CPU usage depending on the version of Docker Engine.\n\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Docker daemon's listening address. When using a TCP socket, the format is: tcp://[ip]:[port] | unix:///var/run/docker.sock | yes |\n| timeout | Request timeout in seconds. | 2 | no |\n| collect_container_size | Whether to collect container writable layer size. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n - name: remote\n address: 'tcp://203.0.113.10:2375'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `docker` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ docker_container_unhealthy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/docker.conf) | docker.container_health_status | ${label:container_name} docker container health status is unhealthy |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.containers_state | running, paused, stopped | containers |\n| docker.containers_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | containers |\n| docker.images | active, dangling | images |\n| docker.images_size | size | bytes |\n\n### Per container\n\nMetrics related to containers. Each container provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container's name |\n| image | The image name the container uses |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.container_state | running, paused, exited, created, restarting, removing, dead | state |\n| docker.container_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | status |\n| docker.container_writeable_layer_size | writeable_layer | size |\n\n",integration_type:"collector",id:"go.d.plugin-docker-Docker",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/docker/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-docker_engine",plugin_name:"go.d.plugin",module_name:"docker_engine",alternative_monitored_instances:[],monitored_instance:{name:"Docker Engine",link:"https://docs.docker.com/engine/",categories:["data-collection.containers-and-vms"],icon_filename:"docker.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["docker","container"],most_popular:!1},overview:"# Docker Engine\n\nPlugin: go.d.plugin\nModule: docker_engine\n\n## Overview\n\nThis collector monitors the activity and health of Docker Engine and Docker Swarm.\n\n\nThe [built-in](https://docs.docker.com/config/daemon/prometheus/) Prometheus exporter is used to get the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker TCP socket: `http://127.0.0.1:9323/metrics`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker_engine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker_engine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9323/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nConfiguration with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n - name: remote\n url: http://192.0.2.1:9323/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `docker_engine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker_engine\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Engine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker_engine.engine_daemon_container_actions | changes, commit, create, delete, start | actions/s |\n| docker_engine.engine_daemon_container_states_containers | running, paused, stopped | containers |\n| docker_engine.builder_builds_failed_total | build_canceled, build_target_not_reachable_error, command_not_supported_error, dockerfile_empty_error, dockerfile_syntax_error, error_processing_commands_error, missing_onbuild_arguments_error, unknown_instruction_error | fails/s |\n| docker_engine.engine_daemon_health_checks_failed_total | fails | events/s |\n| docker_engine.swarm_manager_leader | is_leader | bool |\n| docker_engine.swarm_manager_object_store | nodes, services, tasks, networks, secrets, configs | objects |\n| docker_engine.swarm_manager_nodes_per_state | ready, down, unknown, disconnected | nodes |\n| docker_engine.swarm_manager_tasks_per_state | running, failed, ready, rejected, starting, shutdown, new, orphaned, preparing, pending, complete, remove, accepted, assigned | tasks |\n\n",integration_type:"collector",id:"go.d.plugin-docker_engine-Docker_Engine",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/docker_engine/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-dockerhub",plugin_name:"go.d.plugin",module_name:"dockerhub",monitored_instance:{name:"Docker Hub repository",link:"https://hub.docker.com/",icon_filename:"docker.svg",categories:["data-collection.containers-and-vms"]},keywords:["dockerhub"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Docker Hub repository\n\nPlugin: go.d.plugin\nModule: dockerhub\n\n## Overview\n\nThis collector keeps track of DockerHub repositories statistics such as the number of stars, pulls, current status, and more.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dockerhub.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dockerhub.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | DockerHub URL. | https://hub.docker.com/v2/repositories | yes |\n| repositories | List of repositories to monitor. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: dockerhub\n repositories:\n - 'user1/name1'\n - 'user2/name2'\n - 'user3/name3'\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dockerhub` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dockerhub\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Hub repository instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dockerhub.pulls_sum | sum | pulls |\n| dockerhub.pulls | a dimension per repository | pulls |\n| dockerhub.pulls_rate | a dimension per repository | pulls/s |\n| dockerhub.stars | a dimension per repository | stars |\n| dockerhub.status | a dimension per repository | status |\n| dockerhub.last_updated | a dimension per repository | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-dockerhub-Docker_Hub_repository",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/dockerhub/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-elasticsearch",module_name:"elasticsearch",plugin_name:"go.d.plugin",monitored_instance:{name:"Elasticsearch",link:"https://www.elastic.co/elasticsearch/",icon_filename:"elasticsearch.svg",categories:["data-collection.search-engines"]},keywords:["elastic","elasticsearch","opensearch","search engine"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# Elasticsearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n",integration_type:"collector",id:"go.d.plugin-elasticsearch-Elasticsearch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/elasticsearch/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-opensearch",module_name:"elasticsearch",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenSearch",link:"https://opensearch.org/",icon_filename:"opensearch.svg",categories:["data-collection.search-engines"]},keywords:["elastic","elasticsearch","opensearch","search engine"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# OpenSearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n",integration_type:"collector",id:"go.d.plugin-elasticsearch-OpenSearch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/elasticsearch/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-envoy",plugin_name:"go.d.plugin",module_name:"envoy",monitored_instance:{name:"Envoy",link:"https://www.envoyproxy.io/",icon_filename:"envoy.svg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:["envoy","proxy"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# Envoy\n\nPlugin: go.d.plugin\nModule: envoy\n\n## Overview\n\nThis collector monitors Envoy proxies. It collects server, cluster, and listener metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Envoy instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/envoy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/envoy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9091/stats/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9901/stats/prometheus\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n - name: remote\n url: http://192.0.2.1:9901/stats/prometheus\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `envoy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m envoy\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Envoy instance\n\nEnvoy exposes metrics in Prometheus format. All metric labels are added to charts.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| envoy.server_state | live, draining, pre_initializing, initializing | state |\n| envoy.server_connections_count | connections | connections |\n| envoy.server_parent_connections_count | connections | connections |\n| envoy.server_memory_allocated_size | allocated | bytes |\n| envoy.server_memory_heap_size | heap | bytes |\n| envoy.server_memory_physical_size | physical | bytes |\n| envoy.server_uptime | uptime | seconds |\n| envoy.cluster_manager_cluster_count | active, not_active | clusters |\n| envoy.cluster_manager_cluster_changes_rate | added, modified, removed | clusters/s |\n| envoy.cluster_manager_cluster_updates_rate | cluster | updates/s |\n| envoy.cluster_manager_cluster_updated_via_merge_rate | via_merge | updates/s |\n| envoy.cluster_manager_update_merge_cancelled_rate | merge_cancelled | updates/s |\n| envoy.cluster_manager_update_out_of_merge_window_rate | out_of_merge_window | updates/s |\n| envoy.cluster_membership_endpoints_count | healthy, degraded, excluded | endpoints |\n| envoy.cluster_membership_changes_rate | membership | changes/s |\n| envoy.cluster_membership_updates_rate | success, failure, empty, no_rebuild | updates/s |\n| envoy.cluster_upstream_cx_active_count | active | connections |\n| envoy.cluster_upstream_cx_rate | created | connections/s |\n| envoy.cluster_upstream_cx_http_rate | http1, http2, http3 | connections/s |\n| envoy.cluster_upstream_cx_destroy_rate | local, remote | connections/s |\n| envoy.cluster_upstream_cx_connect_fail_rate | failed | connections/s |\n| envoy.cluster_upstream_cx_connect_timeout_rate | timeout | connections/s |\n| envoy.cluster_upstream_cx_bytes_rate | received, sent | bytes/s |\n| envoy.cluster_upstream_cx_bytes_buffered_size | received, send | bytes |\n| envoy.cluster_upstream_rq_active_count | active | requests |\n| envoy.cluster_upstream_rq_rate | requests | requests/s |\n| envoy.cluster_upstream_rq_failed_rate | cancelled, maintenance_mode, timeout, max_duration_reached, per_try_timeout, reset_local, reset_remote | requests/s |\n| envoy.cluster_upstream_rq_pending_active_count | active_pending | requests |\n| envoy.cluster_upstream_rq_pending_rate | pending | requests/s |\n| envoy.cluster_upstream_rq_pending_failed_rate | overflow, failure_eject | requests/s |\n| envoy.cluster_upstream_rq_retry_rate | request | retries/s |\n| envoy.cluster_upstream_rq_retry_success_rate | success | retries/s |\n| envoy.cluster_upstream_rq_retry_backoff_rate | exponential, ratelimited | retries/s |\n| envoy.listener_manager_listeners_count | active, warming, draining | listeners |\n| envoy.listener_manager_listener_changes_rate | added, modified, removed, stopped | listeners/s |\n| envoy.listener_manager_listener_object_events_rate | create_success, create_failure, in_place_updated | objects/s |\n| envoy.listener_admin_downstream_cx_active_count | active | connections |\n| envoy.listener_admin_downstream_cx_rate | created | connections/s |\n| envoy.listener_admin_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_admin_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_admin_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_admin_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_admin_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_admin_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n| envoy.listener_downstream_cx_active_count | active | connections |\n| envoy.listener_downstream_cx_rate | created | connections/s |\n| envoy.listener_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n\n",integration_type:"collector",id:"go.d.plugin-envoy-Envoy",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/envoy/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-fail2ban",plugin_name:"go.d.plugin",module_name:"fail2ban",monitored_instance:{name:"Fail2ban",link:"https://github.com/fail2ban/fail2ban#readme",icon_filename:"fail2ban.png",categories:["data-collection.authentication-and-authorization"]},keywords:["fail2ban","security","authentication","authorization"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Fail2ban\n\nPlugin: go.d.plugin\nModule: fail2ban\n\n## Overview\n\nThis collector tracks two main metrics for each jail: currently banned IPs and active failure incidents. It relies on the [`fail2ban-client`](https://linux.die.net/man/1/fail2ban-client) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fail2ban.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fail2ban.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | fail2ban-client binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: fail2ban\n update_every: 5 # Collect Fail2Ban jails statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `fail2ban` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m fail2ban\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per jail\n\nThese metrics refer to the Jail.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| jail | Jail's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fail2ban.jail_banned_ips | banned | addresses |\n| fail2ban.jail_active_failures | active_failures | failures |\n\n",integration_type:"collector",id:"go.d.plugin-fail2ban-Fail2ban",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/fail2ban/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-filecheck",plugin_name:"go.d.plugin",module_name:"filecheck",monitored_instance:{name:"Files and directories",link:"",icon_filename:"filesystem.svg",categories:["data-collection.other"]},keywords:["files","directories"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Files and directories\n\nPlugin: go.d.plugin\nModule: filecheck\n\n## Overview\n\nThis collector monitors the existence, last modification time, and size of arbitrary files and directories on the system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the DAC_READ_SEARCH capability when monitoring files not normally accessible to the Netdata user, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/filecheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/filecheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| files | List of files to monitor. | | yes |\n| dirs | List of directories to monitor. | | yes |\n| discovery_every | Files and directories discovery interval. | 60 | no |\n\n##### files\n\nFiles matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nfiles:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### dirs\n\nDirectories matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\ndirs:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Files\n\nFiles monitoring example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: files_example\n files:\n include:\n - '/path/to/file1'\n - '/path/to/file2'\n - '/path/to/*.log'\n\n```\n{% /details %}\n##### Directories\n\nDirectories monitoring example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: files_example\n dirs:\n collect_dir_size: no\n include:\n - '/path/to/dir1'\n - '/path/to/dir2'\n - '/path/to/dir3*'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `filecheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m filecheck\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per file\n\nThese metrics refer to the File.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| file_path | File absolute path |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filecheck.file_existence_status | exist, not_exist | status |\n| filecheck.file_modification_time_ago | mtime_ago | seconds |\n| filecheck.file_size_bytes | size | bytes |\n\n### Per directory\n\nThese metrics refer to the Directory.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dir_path | Directory absolute path |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filecheck.dir_existence_status | exist, not_exist | status |\n| filecheck.dir_modification_time_ago | mtime_ago | seconds |\n| filecheck.dir_size_bytes | size | bytes |\n| filecheck.dir_files count | files | files |\n\n",integration_type:"collector",id:"go.d.plugin-filecheck-Files_and_directories",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/filecheck/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-fluentd",plugin_name:"go.d.plugin",module_name:"fluentd",monitored_instance:{name:"Fluentd",link:"https://www.fluentd.org/",icon_filename:"fluentd.svg",categories:["data-collection.logs-servers"]},keywords:["fluentd","logging"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Fluentd\n\nPlugin: go.d.plugin\nModule: fluentd\n\n## Overview\n\nThis collector monitors Fluentd servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable monitor agent\n\nTo enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fluentd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fluentd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:24220 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nFluentd with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:24220\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n - name: remote\n url: http://192.0.2.1:24220\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `fluentd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m fluentd\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Fluentd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fluentd.retry_count | a dimension per plugin | count |\n| fluentd.buffer_queue_length | a dimension per plugin | queue_length |\n| fluentd.buffer_total_queued_size | a dimension per plugin | queued_size |\n\n",integration_type:"collector",id:"go.d.plugin-fluentd-Fluentd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/fluentd/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-freeradius",plugin_name:"go.d.plugin",module_name:"freeradius",monitored_instance:{name:"FreeRADIUS",link:"https://freeradius.org/",categories:["data-collection.authentication-and-authorization"],icon_filename:"freeradius.svg"},keywords:["freeradius","radius"],most_popular:!1,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# FreeRADIUS\n\nPlugin: go.d.plugin\nModule: freeradius\n\n## Overview\n\nThis collector monitors FreeRADIUS servers.\n\nIt collect metrics by sending [status-server](https://wiki.freeradius.org/config/Status) messages to the server.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects FreeRadius instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable status server\n\nTo enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/freeradius.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/freeradius.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. | 127.0.0.1 | yes |\n| port | Server port. | 18121 | no |\n| secret | FreeRADIUS secret. | adminsecret | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n - name: remote\n address: 192.0.2.1\n port: 18121\n secert: adminsecret\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `freeradius` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m freeradius\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per FreeRADIUS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| freeradius.authentication | requests, responses | packets/s |\n| freeradius.authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_authentication | requests, responses | packets/s |\n| freeradius.proxy_authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.proxy_bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.accounting | requests, responses | packets/s |\n| freeradius.bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_accounting | requests, responses | packets/s |\n| freeradius.proxy_bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n\n",integration_type:"collector",id:"go.d.plugin-freeradius-FreeRADIUS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/freeradius/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-geth",plugin_name:"go.d.plugin",module_name:"geth",monitored_instance:{name:"Go-ethereum",link:"https://github.com/ethereum/go-ethereum",icon_filename:"geth.png",categories:["data-collection.blockchain-servers"]},keywords:["geth","ethereum","blockchain"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# Go-ethereum\n\nPlugin: go.d.plugin\nModule: geth\n\n## Overview\n\nThis collector monitors Go-ethereum instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Go-ethereum instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/geth.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/geth.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:6060/debug/metrics/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n - name: remote\n url: http://192.0.2.1:6060/debug/metrics/prometheus\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `geth` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m geth\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go-ethereum instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| geth.eth_db_chaindata_ancient_io_rate | reads, writes | bytes/s |\n| geth.eth_db_chaindata_ancient_io | reads, writes | bytes |\n| geth.eth_db_chaindata_disk_io | reads, writes | bytes |\n| geth.goroutines | goroutines | goroutines |\n| geth.eth_db_chaindata_disk_io_rate | reads, writes | bytes/s |\n| geth.chaindata_db_size | level_db, ancient_db | bytes |\n| geth.chainhead | block, receipt, header | block |\n| geth.tx_pool_pending | invalid, pending, local, discard, no_funds, ratelimit, replace | transactions |\n| geth.tx_pool_current | invalid, pending, local, pool | transactions |\n| geth.tx_pool_queued | discard, eviction, no_funds, ratelimit | transactions |\n| geth.p2p_bandwidth | ingress, egress | bytes/s |\n| geth.reorgs | executed | reorgs |\n| geth.reorgs_blocks | added, dropped | blocks |\n| geth.p2p_peers | peers | peers |\n| geth.p2p_peers_calls | dials, serves | calls/s |\n| geth.rpc_calls | failed, successful | calls/s |\n\n",integration_type:"collector",id:"go.d.plugin-geth-Go-ethereum",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/geth/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-haproxy",plugin_name:"go.d.plugin",module_name:"haproxy",monitored_instance:{name:"HAProxy",link:"https://www.haproxy.org/",icon_filename:"haproxy.svg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:["haproxy","web","webserver","http","proxy"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# HAProxy\n\nPlugin: go.d.plugin\nModule: haproxy\n\n## Overview\n\nThis collector monitors HAProxy servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable PROMEX addon.\n\nTo enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/haproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/haproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8404/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n - name: remote\n url: http://192.0.2.1:8404/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `haproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m haproxy\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HAProxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_current_sessions | a dimension per proxy | sessions |\n| haproxy.backend_sessions | a dimension per proxy | sessions/s |\n| haproxy.backend_response_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_queue_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_current_queue | a dimension per proxy | requests |\n\n### Per proxy\n\nThese metrics refer to the Proxy.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_http_responses | 1xx, 2xx, 3xx, 4xx, 5xx, other | responses/s |\n| haproxy.backend_network_io | in, out | bytes/s |\n\n",integration_type:"collector",id:"go.d.plugin-haproxy-HAProxy",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/haproxy/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-hddtemp",plugin_name:"go.d.plugin",module_name:"hddtemp",monitored_instance:{name:"HDD temperature",link:"https://linux.die.net/man/8/hddtemp",categories:["data-collection.hardware-devices-and-sensors"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["hardware","hdd temperature","disk temperature","temperature"],most_popular:!1},overview:"# HDD temperature\n\nPlugin: go.d.plugin\nModule: hddtemp\n\n## Overview\n\nThis collector monitors disk temperatures.\n\n\nIt retrieves temperature data for attached disks by querying the hddtemp daemon at regular intervals.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install hddtemp\n\nInstall `hddtemp` using your distribution\'s package manager.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hddtemp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hddtemp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the hddtemp daemon listens for connections. | 127.0.0.1:7634 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7634\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7634\n\n - name: remote\n address: 203.0.113.0:7634\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hddtemp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hddtemp\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\nThese metrics refer to the Disk.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk_id | Disk identifier. It is derived from the device path (e.g. sda or ata-HUP722020APA330_BFJ0WS3F) |\n| model | Disk model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hddtemp.disk_temperature | temperature | Celsius |\n| hddtemp.disk_temperature_sensor_status | ok, err, na, unk, nos, slp | status |\n\n",integration_type:"collector",id:"go.d.plugin-hddtemp-HDD_temperature",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/hddtemp/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-hfs",plugin_name:"go.d.plugin",module_name:"hfs",monitored_instance:{name:"Hadoop Distributed File System (HDFS)",link:"https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html",icon_filename:"hadoop.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:["hdfs","hadoop"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# Hadoop Distributed File System (HDFS)\n\nPlugin: go.d.plugin\nModule: hfs\n\n## Overview\n\nThis collector monitors HDFS nodes.\n\nNetdata accesses HDFS metrics over `Java Management Extensions` (JMX) through the web interface of an HDFS daemon.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hdfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hdfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9870/jmx | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9870/jmx\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n - name: remote\n url: http://192.0.2.1:9870/jmx\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hfs` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hfs\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ hdfs_capacity_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.capacity | summary datanodes space capacity utilization |\n| [ hdfs_missing_blocks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.blocks | number of missing blocks |\n| [ hdfs_stale_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes marked stale due to delayed heartbeat |\n| [ hdfs_dead_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes which are currently dead |\n| [ hdfs_num_failed_volumes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.num_failed_volumes | number of failed volumes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Hadoop Distributed File System (HDFS) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | DataNode | NameNode |\n|:------|:----------|:----|:---:|:---:|\n| hdfs.heap_memory | committed, used | MiB | \u2022 | \u2022 |\n| hdfs.gc_count_total | gc | events/s | \u2022 | \u2022 |\n| hdfs.gc_time_total | ms | ms | \u2022 | \u2022 |\n| hdfs.gc_threshold | info, warn | events/s | \u2022 | \u2022 |\n| hdfs.threads | new, runnable, blocked, waiting, timed_waiting, terminated | num | \u2022 | \u2022 |\n| hdfs.logs_total | info, error, warn, fatal | logs/s | \u2022 | \u2022 |\n| hdfs.rpc_bandwidth | received, sent | kilobits/s | \u2022 | \u2022 |\n| hdfs.rpc_calls | calls | calls/s | \u2022 | \u2022 |\n| hdfs.open_connections | open | connections | \u2022 | \u2022 |\n| hdfs.call_queue_length | length | num | \u2022 | \u2022 |\n| hdfs.avg_queue_time | time | ms | \u2022 | \u2022 |\n| hdfs.avg_processing_time | time | ms | \u2022 | \u2022 |\n| hdfs.capacity | remaining, used | KiB | | \u2022 |\n| hdfs.used_capacity | dfs, non_dfs | KiB | | \u2022 |\n| hdfs.load | load | load | | \u2022 |\n| hdfs.volume_failures_total | failures | events/s | | \u2022 |\n| hdfs.files_total | files | num | | \u2022 |\n| hdfs.blocks_total | blocks | num | | \u2022 |\n| hdfs.blocks | corrupt, missing, under_replicated | num | | \u2022 |\n| hdfs.data_nodes | live, dead, stale | num | | \u2022 |\n| hdfs.datanode_capacity | remaining, used | KiB | \u2022 | |\n| hdfs.datanode_used_capacity | dfs, non_dfs | KiB | \u2022 | |\n| hdfs.datanode_failed_volumes | failed volumes | num | \u2022 | |\n| hdfs.datanode_bandwidth | reads, writes | KiB/s | \u2022 | |\n\n",integration_type:"collector",id:"go.d.plugin-hfs-Hadoop_Distributed_File_System_(HDFS)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/hdfs/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-hpssa",plugin_name:"go.d.plugin",module_name:"hpssa",monitored_instance:{name:"HPE Smart Arrays",link:"https://buy.hpe.com/us/en/options/controller-controller-options/smart-array-controllers-smart-host-bus-adapters/c/7109730",icon_filename:"hp.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:["storage","raid-controller","hp","hpssa","array"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# HPE Smart Arrays\n\nPlugin: go.d.plugin\nModule: hpssa\n\n## Overview\n\nMonitors the health of HPE Smart Arrays by tracking the status of controllers, arrays, logical and physical drives in your storage system.\nIt relies on the `ssacli` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `ssacli ctrl all show config detail`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install ssacli\n\nSee [official installation instructions](https://support.hpe.com/connect/s/softwaredetails?language=en_US&collectionId=MTX-0cb3f808e2514d3d).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ssacli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ssacli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | ssacli binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: hpssa\n update_every: 5 # Collect HPE Smart Array statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hpssa` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hpssa\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | Slot number |\n| model | Controller model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.controller_status | ok, nok | status |\n| hpssa.controller_temperature | temperature | Celsius |\n| hpssa.controller_cache_module_presence_status | present, not_present | status |\n| hpssa.controller_cache_module_status | ok, nok | status |\n| hpssa.controller_cache_module_temperature | temperature | Celsius |\n| hpssa.controller_cache_module_battery_status | ok, nok | status |\n\n### Per array\n\nThese metrics refer to the Array.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | Slot number |\n| array_id | Array id |\n| interface_type | Array interface type (e.g. SATA) |\n| array_type | Array type (e.g. Data) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.array_status | ok, nok | status |\n\n### Per logical drive\n\nThese metrics refer to the Logical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | Slot number |\n| array_id | Array id |\n| logical_drive_id | Logical Drive id (number) |\n| disk_name | Disk name (e.g. /dev/sda) |\n| drive_type | Drive type (e.g. Data) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.logical_drive_status | ok, nok | status |\n\n### Per physical drive\n\nThese metrics refer to the Physical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | Slot number |\n| array_id | Array id or "na" if unassigned |\n| logical_drive_id | Logical Drive id or "na" if unassigned |\n| location | Drive location in port:box:bay format (e.g. 1I:1:1) |\n| interface_type | Drive interface type (e.g. SATA) |\n| drive_type | Drive type (e.g. Data Drive, Unassigned Drive) |\n| model | Drive model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.physical_drive_status | ok, nok | status |\n| hpssa.physical_drive_temperature | temperature | status |\n\n',integration_type:"collector",id:"go.d.plugin-hpssa-HPE_Smart_Arrays",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/hpssa/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-httpcheck",plugin_name:"go.d.plugin",module_name:"httpcheck",monitored_instance:{name:"HTTP Endpoints",link:"",icon_filename:"globe.svg",categories:["data-collection.synthetic-checks"]},keywords:["webserver"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# HTTP Endpoints\n\nPlugin: go.d.plugin\nModule: httpcheck\n\n## Overview\n\nThis collector monitors HTTP servers availability and response time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/httpcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/httpcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| status_accepted | HTTP accepted response statuses. Anything else will result in \'bad status\' in the status chart. | [200] | no |\n| response_match | If the status code is accepted, the content of the response will be matched against this regular expression. | | no |\n| headers_match | This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response. | [] | no |\n| headers_match.exclude | This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it. | no | no |\n| headers_match.key | The exact name of the HTTP header to check for. | | yes |\n| headers_match.value | The [pattern](https://github.com/netdata/netdata/tree/master/src/go/collectors/go.d.plugin/pkg/matcher#supported-format) to match against the value of the specified header. | | no |\n| cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n```\n{% /details %}\n##### With HTTP request headers\n\nConfiguration with HTTP request headers that will be sent by the client.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n headers:\n Host: localhost:8080\n User-Agent: netdata/go.d.plugin\n Accept: */*\n\n```\n{% /details %}\n##### With `status_accepted`\n\nA basic example configuration with non-default status_accepted.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n status_accepted:\n - 200\n - 204\n\n```\n{% /details %}\n##### With `header_match`\n\nExample configurations with `header_match`. See the value [pattern](https://github.com/netdata/netdata/tree/master/src/go/collectors/go.d.plugin/pkg/matcher#supported-format) syntax.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n # The "X-Robots-Tag" header must be present in the HTTP response header,\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n\n # The "X-Robots-Tag" header must be present in the HTTP response header\n # only if its value is equal to "noindex, nofollow".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n value: \'= noindex,nofollow\'\n\n # The "X-Robots-Tag" header must not be present in the HTTP response header\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n\n # The "X-Robots-Tag" header must not be present in the HTTP response header\n # only if its value is equal to "noindex, nofollow".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n value: \'= noindex,nofollow\'\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n - name: remote\n url: http://192.0.2.1:8080\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `httpcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m httpcheck\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per target\n\nThe metrics refer to the monitored target.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| url | url value that is set in the configuration file. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| httpcheck.response_time | time | ms |\n| httpcheck.response_length | length | characters |\n| httpcheck.status | success, timeout, redirect, no_connection, bad_content, bad_header, bad_status | boolean |\n| httpcheck.in_state | time | boolean |\n\n",integration_type:"collector",id:"go.d.plugin-httpcheck-HTTP_Endpoints",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/httpcheck/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-intelgpu",plugin_name:"go.d.plugin",module_name:"intelgpu",monitored_instance:{name:"Intel GPU",link:"https://www.intel.com/",icon_filename:"microchip.svg",categories:["data-collection.hardware-devices-and-sensors"]},keywords:["intel","gpu","hardware"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Intel GPU\n\nPlugin: go.d.plugin\nModule: intelgpu\n\n## Overview\n\nThis collector gathers performance metrics for Intel integrated GPUs.\nIt relies on the [`intel_gpu_top`](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to grant the CAP_PERFMON capability to `intel_gpu_top`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install intel-gpu-tools\n\nInstall `intel-gpu-tools` using your distribution\'s package manager.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/intelgpu.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/intelgpu.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| device | Select a specific GPU using [supported filter](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html#DESCRIPTION). | | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: intelgpu\n update_every: 5 # Collect Intel iGPU metrics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `intelgpu` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m intelgpu\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Intel GPU instance\n\nThese metrics refer to the Intel GPU.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| intelgpu.frequency | frequency | MHz |\n| intelgpu.power | gpu, package | Watts |\n\n### Per engine\n\nThese metrics refer to the GPU hardware engine.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| engine_class | Engine class (Render/3D, Blitter, VideoEnhance, Video, Compute). |\n| engine_instance | Engine instance (e.g. Render/3D/0, Video/0, Video/1). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| intelgpu.engine_busy_perc | busy | percentage |\n\n",integration_type:"collector",id:"go.d.plugin-intelgpu-Intel_GPU",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/intelgpu/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-isc_dhcpd",plugin_name:"go.d.plugin",module_name:"isc_dhcpd",monitored_instance:{name:"ISC DHCP",link:"https://www.isc.org/dhcp/",categories:["data-collection.dns-and-dhcp-servers"],icon_filename:"isc.png"},keywords:["dhcpd","dhcp"],most_popular:!1,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# ISC DHCP\n\nPlugin: go.d.plugin\nModule: isc_dhcpd\n\n## Overview\n\nThis collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/isc_dhcpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/isc_dhcpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to DHCP client lease database. | /var/lib/dhcp/dhcpd.leases | no |\n| pools | List of IP pools to monitor. | | yes |\n\n##### pools\n\nList of IP pools to monitor.\n\n- IP range syntax: see [supported formats](https://github.com/netdata/netdata/tree/master/src/go/collectors/go.d.plugin/pkg/iprange#supported-formats).\n- Syntax:\n\n```yaml\npools:\n - name: "POOL_NAME1"\n networks: "SPACE SEPARATED LIST OF IP RANGES"\n - name: "POOL_NAME2"\n networks: "SPACE SEPARATED LIST OF IP RANGES"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n pools:\n - name: lan\n networks: "192.168.0.0/24 192.168.1.0/24 192.168.2.0/24"\n - name: wifi\n networks: "10.0.0.0/24"\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `isc_dhcpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m isc_dhcpd\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ISC DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| isc_dhcpd.active_leases_total | active | leases |\n\n### Per ISC DHCP instance\n\nThese metrics refer to the DHCP pool.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dhcp_pool_name | The DHCP pool name defined in the collector configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| isc_dhcpd.dhcp_pool_utilization | utilization | percent |\n| isc_dhcpd.dhcp_pool_active_leases | active | leases |\n\n",integration_type:"collector",id:"go.d.plugin-isc_dhcpd-ISC_DHCP",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/isc_dhcpd/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-k8s_kubelet",plugin_name:"go.d.plugin",module_name:"k8s_kubelet",monitored_instance:{name:"Kubelet",link:"https://kubernetes.io/docs/concepts/overview/components/#kubelet",icon_filename:"kubernetes.svg",categories:["data-collection.kubernetes"]},keywords:["kubelet","kubernetes","k8s"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# Kubelet\n\nPlugin: go.d.plugin\nModule: k8s_kubelet\n\n## Overview\n\nThis collector monitors Kubelet instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubelet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubelet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10255/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10255/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10250/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_kubelet` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubelet\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ kubelet_node_config_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_node_config_error | the node is experiencing a configuration-related error (0: false, 1: true) |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubelet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.apiserver_audit_requests_rejected | rejected | requests/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_failures | failures | events/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies | 5_\xb5s, 10_\xb5s, 20_\xb5s, 40_\xb5s, 80_\xb5s, 160_\xb5s, 320_\xb5s, 640_\xb5s, 1280_\xb5s, 2560_\xb5s, 5120_\xb5s, 10240_\xb5s, 20480_\xb5s, 40960_\xb5s, +Inf | observes/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent | 5_\xb5s, 10_\xb5s, 20_\xb5s, 40_\xb5s, 80_\xb5s, 160_\xb5s, 320_\xb5s, 640_\xb5s, 1280_\xb5s, 2560_\xb5s, 5120_\xb5s, 10240_\xb5s, 20480_\xb5s, 40960_\xb5s, +Inf | percentage |\n| k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses | cache misses | events/s |\n| k8s_kubelet.kubelet_containers_running | total | running_containers |\n| k8s_kubelet.kubelet_pods_running | total | running_pods |\n| k8s_kubelet.kubelet_pods_log_filesystem_used_bytes | a dimension per namespace and pod | B |\n| k8s_kubelet.kubelet_runtime_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_runtime_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_docker_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_docker_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_node_config_error | experiencing_error | bool |\n| k8s_kubelet.kubelet_pleg_relist_interval_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_pleg_relist_latency_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_token_requests | total, failed | token_requests/s |\n| k8s_kubelet.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubelet.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n\n### Per volume manager\n\nThese metrics refer to the Volume Manager.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.volume_manager_total_volumes | actual, desired | state |\n\n",integration_type:"collector",id:"go.d.plugin-k8s_kubelet-Kubelet",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/k8s_kubelet/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-k8s_kubeproxy",plugin_name:"go.d.plugin",module_name:"k8s_kubeproxy",monitored_instance:{name:"Kubeproxy",link:"https://kubernetes.io/docs/concepts/overview/components/#kube-proxy",icon_filename:"kubernetes.svg",categories:["data-collection.kubernetes"]},keywords:["kubeproxy","kubernetes","k8s"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# Kubeproxy\n\nPlugin: go.d.plugin\nModule: k8s_kubeproxy\n\n## Overview\n\nThis collector monitors Kubeproxy instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubeproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubeproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10249/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10249/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:10249/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_kubeproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubeproxy\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubeproxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules | sync_proxy_rules | events/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | observes/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | percentage |\n| k8s_kubeproxy.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubeproxy.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n| k8s_kubeproxy.http_request_duration | 0.5, 0.9, 0.99 | microseconds |\n\n",integration_type:"collector",id:"go.d.plugin-k8s_kubeproxy-Kubeproxy",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-k8s_state",plugin_name:"go.d.plugin",module_name:"k8s_state",monitored_instance:{name:"Kubernetes Cluster State",link:"https://kubernetes.io/",icon_filename:"kubernetes.svg",categories:["data-collection.kubernetes"]},keywords:["kubernetes","k8s"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# Kubernetes Cluster State\n\nPlugin: go.d.plugin\nModule: k8s_state\n\n## Overview\n\nThis collector monitors Kubernetes Nodes, Pods and Containers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_state.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_state.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_state` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_state\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the Node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.node_allocatable_cpu_requests_utilization | requests | % |\n| k8s_state.node_allocatable_cpu_requests_used | requests | millicpu |\n| k8s_state.node_allocatable_cpu_limits_utilization | limits | % |\n| k8s_state.node_allocatable_cpu_limits_used | limits | millicpu |\n| k8s_state.node_allocatable_mem_requests_utilization | requests | % |\n| k8s_state.node_allocatable_mem_requests_used | requests | bytes |\n| k8s_state.node_allocatable_mem_limits_utilization | limits | % |\n| k8s_state.node_allocatable_mem_limits_used | limits | bytes |\n| k8s_state.node_allocatable_pods_utilization | allocated | % |\n| k8s_state.node_allocatable_pods_usage | available, allocated | pods |\n| k8s_state.node_condition | a dimension per condition | status |\n| k8s_state.node_schedulability | schedulable, unschedulable | state |\n| k8s_state.node_pods_readiness | ready | % |\n| k8s_state.node_pods_readiness_state | ready, unready | pods |\n| k8s_state.node_pods_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | pods |\n| k8s_state.node_pods_phase | running, failed, succeeded, pending | pods |\n| k8s_state.node_containers | containers, init_containers | containers |\n| k8s_state.node_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_init_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_age | age | seconds |\n\n### Per pod\n\nThese metrics refer to the Pod.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_cpu_requests_used | requests | millicpu |\n| k8s_state.pod_cpu_limits_used | limits | millicpu |\n| k8s_state.pod_mem_requests_used | requests | bytes |\n| k8s_state.pod_mem_limits_used | limits | bytes |\n| k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state |\n| k8s_state.pod_phase | running, failed, succeeded, pending | state |\n| k8s_state.pod_age | age | seconds |\n| k8s_state.pod_containers | containers, init_containers | containers |\n| k8s_state.pod_containers_state | running, waiting, terminated | containers |\n| k8s_state.pod_init_containers_state | running, waiting, terminated | containers |\n\n### Per container\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n| k8s_container_name | Container name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_container_readiness_state | ready | state |\n| k8s_state.pod_container_restarts | restarts | restarts |\n| k8s_state.pod_container_state | running, waiting, terminated | state |\n| k8s_state.pod_container_waiting_state_reason | a dimension per reason | state |\n| k8s_state.pod_container_terminated_state_reason | a dimension per reason | state |\n\n",integration_type:"collector",id:"go.d.plugin-k8s_state-Kubernetes_Cluster_State",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/k8s_state/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-lighttpd",plugin_name:"go.d.plugin",module_name:"lighttpd",monitored_instance:{name:"Lighttpd",link:"https://www.lighttpd.net/",icon_filename:"lighttpd.svg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:["webserver"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"weblog"},{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# Lighttpd\n\nPlugin: go.d.plugin\nModule: lighttpd\n\n## Overview\n\nThis collector monitors the activity and performance of Lighttpd servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Lighttpd location [server-status](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status), \nwhich is a built-in location that provides metrics about the Lighttpd server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Lighttpd instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable Lighttpd status support\n\nTo enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lighttpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lighttpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nLighttpd with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `lighttpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m lighttpd\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Lighttpd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| lighttpd.requests | requests | requests/s |\n| lighttpd.net | sent | kilobits/s |\n| lighttpd.workers | idle, busy | servers |\n| lighttpd.scoreboard | waiting, open, close, hard_error, keepalive, read, read_post, write, handle_request, request_start, request_end | connections |\n| lighttpd.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-lighttpd-Lighttpd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/lighttpd/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-litespeed",plugin_name:"go.d.plugin",module_name:"litespeed",monitored_instance:{name:"Litespeed",link:"https://www.litespeedtech.com/products/litespeed-web-server",categories:["data-collection.web-servers-and-web-proxies"],icon_filename:"litespeed.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["litespeed","web","server"],most_popular:!1},overview:"# Litespeed\n\nPlugin: go.d.plugin\nModule: litespeed\n\n## Overview\n\nExamine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.\n\nThe collector uses the statistics under /tmp/lshttpd to gather the metrics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/litespeed.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/litespeed.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| reports_dir | Directory containing Litespeed's real-time statistics files. | /tmp/lshttpd/ | no |\n\n{% /details %}\n#### Examples\n\n##### Set the path to statistics\n\nChange the path for the litespeed stats files\n\n```yaml\nlocal:\n name: 'local'\n path: '/tmp/lshttpd'\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `litespeed` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m litespeed\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Litespeed instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| litespeed.requests | requests | requests/s |\n| litespeed.requests_processing | processing | requests |\n| litespeed.net_throughput | in, out | kilobits/s |\n| litespeed.net_ssl_throughput | in, out | kilobits/s |\n| litespeed.connections | free, used | conns |\n| litespeed.ssl_connections | free, used | conns |\n| litespeed.public_cache | hits | hits/s |\n| litespeed.private_cache | hits | hits/s |\n| litespeed.static | hits | hits/s |\n\n",integration_type:"collector",id:"go.d.plugin-litespeed-Litespeed",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/litespeed/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-logind",plugin_name:"go.d.plugin",module_name:"logind",monitored_instance:{name:"systemd-logind users",link:"https://www.freedesktop.org/software/systemd/man/systemd-logind.service.html",icon_filename:"users.svg",categories:["data-collection.systemd"]},keywords:["logind","systemd"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# systemd-logind users\n\nPlugin: go.d.plugin\nModule: logind\n\n## Overview\n\nThis collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logind.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logind.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `logind` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logind\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd-logind users instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logind.sessions | remote, local | sessions |\n| logind.sessions_type | console, graphical, other | sessions |\n| logind.sessions_state | online, closing, active | sessions |\n| logind.users_state | offline, closing, online, lingering, active | users |\n\n",integration_type:"collector",id:"go.d.plugin-logind-systemd-logind_users",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/logind/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-logstash",plugin_name:"go.d.plugin",module_name:"logstash",monitored_instance:{name:"Logstash",link:"https://www.elastic.co/products/logstash",icon_filename:"elastic-logstash.svg",categories:["data-collection.logs-servers"]},keywords:["logstatsh"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Logstash\n\nPlugin: go.d.plugin\nModule: logstash\n\n## Overview\n\nThis collector monitors Logstash instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logstatsh.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logstatsh.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:9600 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n```\n{% /details %}\n##### HTTP authentication\n\nHTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nHTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://localhost:9600\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n - name: remote\n url: http://192.0.2.1:9600\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `logstash` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logstash\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Logstash instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.jvm_threads | threads | count |\n| logstash.jvm_mem_heap_used | in_use | percentage |\n| logstash.jvm_mem_heap | committed, used | KiB |\n| logstash.jvm_mem_pools_eden | committed, used | KiB |\n| logstash.jvm_mem_pools_survivor | committed, used | KiB |\n| logstash.jvm_mem_pools_old | committed, used | KiB |\n| logstash.jvm_gc_collector_count | eden, old | counts/s |\n| logstash.jvm_gc_collector_time | eden, old | ms |\n| logstash.open_file_descriptors | open | fd |\n| logstash.event | in, filtered, out | events/s |\n| logstash.event_duration | event, queue | seconds |\n| logstash.uptime | uptime | seconds |\n\n### Per pipeline\n\nThese metrics refer to the pipeline.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pipeline | pipeline name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.pipeline_event | in, filtered, out | events/s |\n| logstash.pipeline_event_duration | event, queue | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-logstash-Logstash",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/logstash/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-lvm",plugin_name:"go.d.plugin",module_name:"lvm",monitored_instance:{name:"LVM logical volumes",link:"",icon_filename:"filesystem.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:["lvm","lvs"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# LVM logical volumes\n\nPlugin: go.d.plugin\nModule: lvm\n\n## Overview\n\nThis collector monitors the health of LVM logical volumes. It relies on the [`lvs`](https://man7.org/linux/man-pages/man8/lvs.8.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lvm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lvm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | lvs binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: lvm\n update_every: 5 # Collect logical volume statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `lvm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m lvm\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ lvm_lv_data_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/lvm.conf) | lvm.lv_data_space_utilization | LVM logical volume high data space usage (LV ${label:lv_name} VG ${label:vg_name} Type ${label:volume_type}) |\n| [ lvm_lv_metadata_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/lvm.conf) | lvm.lv_metadata_space_utilization | LVM logical volume high metadata space usage (LV ${label:lv_name} VG ${label:vg_name} Type ${label:volume_type}) |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per logical volume\n\nThese metrics refer to the LVM logical volume.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| lv_name | Logical volume name |\n| vg_name | Volume group name |\n| volume_type | Type of the volume |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| lvm.lv_data_space_utilization | utilization | % |\n| lvm.lv_metadata_space_utilization | utilization | % |\n\n",integration_type:"collector",id:"go.d.plugin-lvm-LVM_logical_volumes",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/lvm/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-megacli",plugin_name:"go.d.plugin",module_name:"megacli",monitored_instance:{name:"MegaCLI MegaRAID",link:"https://wikitech.wikimedia.org/wiki/MegaCli",icon_filename:"hard-drive.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:["storage","raid-controller","manage-disks"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# MegaCLI MegaRAID\n\nPlugin: go.d.plugin\nModule: megacli\n\n## Overview\n\nMonitors the health of MegaCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.\nIt relies on the `megacli` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `megacli -LDPDInfo -aAll -NoLog`\n- `megacli -AdpBbuCmd -aAll -NoLog`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/megacli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/megacli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | megacli binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: megacli\n update_every: 5 # Collect MegaCli Hardware RAID statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `megacli` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m megacli\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ megacli_adapter_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.adapter_health_state | MegaCLI adapter ${label:adapter_number} is in the degraded state |\n| [ megacli_phys_drive_media_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.phys_drive_media_errors | MegaCLI physical drive adapter ${label:adapter_number} slot ${label:slot_number} media errors |\n| [ megacli_phys_drive_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.phys_drive_predictive_failures | MegaCLI physical drive (adapter ${label:adapter_number} slot ${label:slot_number}) predictive failures |\n| [ megacli_bbu_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_charge | MegaCLI Backup Battery Unit (adapter ${label:adapter_number}) average charge over the last minute |\n| [ megacli_bbu_recharge_cycles ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_recharge_cycles | MegaCLI Backup Battery Unit (adapter ${label:adapter_number}) average charge over the last minute |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per adapter\n\nThese metrics refer to the MegaCLI Adapter.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| adapter_number | Adapter number |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.adapter_health_state | optimal, degraded, partially_degraded, failed | state |\n\n### Per physical drive\n\nThese metrics refer to the MegaCLI Physical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| adapter_number | Adapter number |\n| wwn | World Wide Name |\n| slot_number | Slot number |\n| drive_position | Position (e.g. DiskGroup: 0, Span: 0, Arm: 2) |\n| drive_type | Type (e.g. SATA) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.phys_drive_media_errors_rate | media_errors | errors/s |\n| megacli.phys_drive_predictive_failures_rate | predictive_failures | failures/s |\n\n### Per backup battery unit\n\nThese metrics refer to the MegaCLI Backup Battery Unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| adapter_number | Adapter number |\n| battery_type | Battery type (e.g. BBU) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.bbu_relative_charge | charge | percentage |\n| megacli.bbu_recharge_cycles | recharge | cycles |\n| megacli.bbu_temperature | temperature | Celsius |\n\n",integration_type:"collector",id:"go.d.plugin-megacli-MegaCLI_MegaRAID",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/megacli/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-mongodb",plugin_name:"go.d.plugin",module_name:"mongodb",monitored_instance:{name:"MongoDB",link:"https://www.mongodb.com/",icon_filename:"mongodb.svg",categories:["data-collection.database-servers"]},keywords:["mongodb","databases"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# MongoDB\n\nPlugin: go.d.plugin\nModule: mongodb\n\n## Overview\n\nThis collector monitors MongoDB servers.\n\nExecuted queries:\n\n- [serverStatus](https://docs.mongodb.com/manual/reference/command/serverStatus/)\n- [dbStats](https://docs.mongodb.com/manual/reference/command/dbStats/)\n- [replSetGetStatus](https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Create a read-only user\n\nCreate a read-only user for Netdata in the admin database.\n\n- Authenticate as the admin user:\n\n ```bash\n use admin\n db.auth("admin", "")\n ```\n\n- Create a user:\n\n ```bash\n db.createUser({\n "user":"netdata",\n "pwd": "",\n "roles" : [\n {role: \'read\', db: \'admin\' },\n {role: \'clusterMonitor\', db: \'admin\'},\n {role: \'read\', db: \'local\' }\n ]\n })\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mongodb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mongodb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| uri | MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). | mongodb://localhost:27017 | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n| databases | Databases selector. Determines which database metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n```\n{% /details %}\n##### With databases metrics\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n databases:\n includes:\n - "* *"\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n - name: remote\n uri: mongodb://netdata:password@203.0.113.0:27017\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mongodb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mongodb\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- WireTiger metrics are available only if [WiredTiger](https://docs.mongodb.com/v6.0/core/wiredtiger/) is used as the\n storage engine.\n- Sharding metrics are available on shards only\n for [mongos](https://www.mongodb.com/docs/manual/reference/program/mongos/).\n\n\n### Per MongoDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.operations_rate | reads, writes, commands | operations/s |\n| mongodb.operations_latency_time | reads, writes, commands | milliseconds |\n| mongodb.operations_by_type_rate | insert, query, update, delete, getmore, command | operations/s |\n| mongodb.document_operations_rate | inserted, deleted, returned, updated | operations/s |\n| mongodb.scanned_indexes_rate | scanned | indexes/s |\n| mongodb.scanned_documents_rate | scanned | documents/s |\n| mongodb.active_clients_count | readers, writers | clients |\n| mongodb.queued_operations_count | reads, writes | operations |\n| mongodb.cursors_open_count | open | cursors |\n| mongodb.cursors_open_no_timeout_count | open_no_timeout | cursors |\n| mongodb.cursors_opened_rate | opened | cursors/s |\n| mongodb.cursors_timed_out_rate | timed_out | cursors/s |\n| mongodb.cursors_by_lifespan_count | le_1s, 1s_5s, 5s_15s, 15s_30s, 30s_1m, 1m_10m, ge_10m | cursors |\n| mongodb.transactions_count | active, inactive, open, prepared | transactions |\n| mongodb.transactions_rate | started, aborted, committed, prepared | transactions/s |\n| mongodb.connections_usage | available, used | connections |\n| mongodb.connections_by_state_count | active, threaded, exhaust_is_master, exhaust_hello, awaiting_topology_changes | connections |\n| mongodb.connections_rate | created | connections/s |\n| mongodb.asserts_rate | regular, warning, msg, user, tripwire, rollovers | asserts/s |\n| mongodb.network_traffic_rate | in, out | bytes/s |\n| mongodb.network_requests_rate | requests | requests/s |\n| mongodb.network_slow_dns_resolutions_rate | slow_dns | resolutions/s |\n| mongodb.network_slow_ssl_handshakes_rate | slow_ssl | handshakes/s |\n| mongodb.memory_resident_size | used | bytes |\n| mongodb.memory_virtual_size | used | bytes |\n| mongodb.memory_page_faults_rate | pgfaults | pgfaults/s |\n| mongodb.memory_tcmalloc_stats | allocated, central_cache_freelist, transfer_cache_freelist, thread_cache_freelists, pageheap_freelist, pageheap_unmapped | bytes |\n| mongodb.wiredtiger_concurrent_read_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_concurrent_write_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_cache_usage | used | bytes |\n| mongodb.wiredtiger_cache_dirty_space_size | dirty | bytes |\n| mongodb.wiredtiger_cache_io_rate | read, written | pages/s |\n| mongodb.wiredtiger_cache_evictions_rate | unmodified, modified | pages/s |\n| mongodb.sharding_nodes_count | shard_aware, shard_unaware | nodes |\n| mongodb.sharding_sharded_databases_count | partitioned, unpartitioned | databases |\n| mongodb.sharding_sharded_collections_count | partitioned, unpartitioned | collections |\n\n### Per lock type\n\nThese metrics refer to the lock type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| lock_type | lock type (e.g. global, database, collection, mutex) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.lock_acquisitions_rate | shared, exclusive, intent_shared, intent_exclusive | acquisitions/s |\n\n### Per commit type\n\nThese metrics refer to the commit type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| commit_type | commit type (e.g. noShards, singleShard, singleWriteShard) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.transactions_commits_rate | success, fail | commits/s |\n| mongodb.transactions_commits_duration_time | commits | milliseconds |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.database_collection_count | collections | collections |\n| mongodb.database_indexes_count | indexes | indexes |\n| mongodb.database_views_count | views | views |\n| mongodb.database_documents_count | documents | documents |\n| mongodb.database_data_size | data_size | bytes |\n| mongodb.database_storage_size | storage_size | bytes |\n| mongodb.database_index_size | index_size | bytes |\n\n### Per replica set member\n\nThese metrics refer to the replica set member.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| repl_set_member | replica set member name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.repl_set_member_state | primary, startup, secondary, recovering, startup2, unknown, arbiter, down, rollback, removed | state |\n| mongodb.repl_set_member_health_status | up, down | status |\n| mongodb.repl_set_member_replication_lag_time | replication_lag | milliseconds |\n| mongodb.repl_set_member_heartbeat_latency_time | heartbeat_latency | milliseconds |\n| mongodb.repl_set_member_ping_rtt_time | ping_rtt | milliseconds |\n| mongodb.repl_set_member_uptime | uptime | seconds |\n\n### Per shard\n\nThese metrics refer to the shard.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| shard_id | shard id |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.sharding_shard_chunks_count | chunks | chunks |\n\n",integration_type:"collector",id:"go.d.plugin-mongodb-MongoDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/mongodb/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-mariadb",plugin_name:"go.d.plugin",module_name:"mysql",monitored_instance:{name:"MariaDB",link:"https://mariadb.org/",icon_filename:"mariadb.svg",categories:["data-collection.database-servers"]},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},keywords:["db","database","mysql","maria","mariadb","sql"],most_popular:!0},overview:'# MariaDB\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- "[::1]:3306"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER \'netdata\'@\'localhost\';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO \'netdata\'@\'localhost\';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n my.cnf: \'/etc/my.cnf\'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-mysql-MariaDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/mysql/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-mysql",plugin_name:"go.d.plugin",module_name:"mysql",monitored_instance:{name:"MySQL",link:"https://www.mysql.com/",categories:["data-collection.database-servers"],icon_filename:"mysql.svg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},keywords:["db","database","mysql","maria","mariadb","sql"],most_popular:!0},overview:'# MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- "[::1]:3306"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER \'netdata\'@\'localhost\';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO \'netdata\'@\'localhost\';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n my.cnf: \'/etc/my.cnf\'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-mysql-MySQL",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/mysql/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-percona_mysql",plugin_name:"go.d.plugin",module_name:"mysql",monitored_instance:{name:"Percona MySQL",link:"https://www.percona.com/software/mysql-database/percona-server",icon_filename:"percona.svg",categories:["data-collection.database-servers"]},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},info_provided_to_referring_integrations:{description:""},keywords:["db","database","mysql","maria","mariadb","sql"],most_popular:!1},overview:'# Percona MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- "[::1]:3306"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER \'netdata\'@\'localhost\';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO \'netdata\'@\'localhost\';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n my.cnf: \'/etc/my.cnf\'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-mysql-Percona_MySQL",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/mysql/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-nginx",plugin_name:"go.d.plugin",module_name:"nginx",monitored_instance:{name:"NGINX",link:"https://www.nginx.com/",categories:["data-collection.web-servers-and-web-proxies"],icon_filename:"nginx.svg"},related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"go.d.plugin",module_name:"web_log"},{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["nginx","web","webserver","http","proxy"],most_popular:!0},overview:"# NGINX\n\nPlugin: go.d.plugin\nModule: nginx\n\n## Overview\n\nThis collector monitors the activity and performance of NGINX servers, and collects metrics such as the number of connections, their status, and client requests.\n\n\nIt sends HTTP requests to the NGINX location [stub-status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html), which is a built-in location that provides metrics about the NGINX server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1/basic_status\n- http://localhost/stub_status\n- http://127.0.0.1/stub_status\n- http://127.0.0.1/nginx_status\n- http://127.0.0.1/status\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable status support\n\nConfigure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginx.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginx.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/stub_status | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n - name: remote\n url: http://192.0.2.1/stub_status\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginx` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginx\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginx.connections | active | connections |\n| nginx.connections_status | reading, writing, idle | connections |\n| nginx.connections_accepted_handled | accepted, handled | connections/s |\n| nginx.requests | requests | requests/s |\n\n",integration_type:"collector",id:"go.d.plugin-nginx-NGINX",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/nginx/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-nginxplus",plugin_name:"go.d.plugin",module_name:"nginxplus",monitored_instance:{name:"NGINX Plus",link:"https://www.nginx.com/products/nginx/",icon_filename:"nginxplus.svg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:["nginxplus","nginx","web","webserver","http","proxy"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# NGINX Plus\n\nPlugin: go.d.plugin\nModule: nginxplus\n\n## Overview\n\nThis collector monitors NGINX Plus servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Config API\n\nTo configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxplus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxplus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://192.0.2.1\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginxplus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxplus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX Plus instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.client_connections_rate | accepted, dropped | connections/s |\n| nginxplus.client_connections_count | active, idle | connections |\n| nginxplus.ssl_handshakes_rate | successful, failed | handshakes/s |\n| nginxplus.ssl_handshakes_failures_rate | no_common_protocol, no_common_cipher, timeout, peer_rejected_cert | failures/s |\n| nginxplus.ssl_verification_errors_rate | no_cert, expired_cert, revoked_cert, hostname_mismatch, other | errors/s |\n| nginxplus.ssl_session_reuses_rate | ssl_session | reuses/s |\n| nginxplus.http_requests_rate | requests | requests/s |\n| nginxplus.http_requests_count | requests | requests |\n| nginxplus.uptime | uptime | seconds |\n\n### Per http server zone\n\nThese metrics refer to the HTTP server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_server_zone | HTTP server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_server_zone_requests_rate | requests | requests/s |\n| nginxplus.http_server_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_server_zone_requests_processing_count | processing | requests |\n| nginxplus.http_server_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http location zone\n\nThese metrics refer to the HTTP location zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_location_zone | HTTP location zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_location_zone_requests_rate | requests | requests/s |\n| nginxplus.http_location_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_location_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_location_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http upstream\n\nThese metrics refer to the HTTP upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_peers_count | peers | peers |\n| nginxplus.http_upstream_zombies_count | zombie | servers |\n| nginxplus.http_upstream_keepalive_count | keepalive | connections |\n\n### Per http upstream server\n\nThese metrics refer to the HTTP upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n| http_upstream_server_address | HTTP upstream server address (e.g. 127.0.0.1:81) |\n| http_upstream_server_name | HTTP upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_server_requests_rate | requests | requests/s |\n| nginxplus.http_upstream_server_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_upstream_server_response_time | response | milliseconds |\n| nginxplus.http_upstream_server_response_header_time | header | milliseconds |\n| nginxplus.http_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_upstream_server_state | up, down, draining, unavail, checking, unhealthy | state |\n| nginxplus.http_upstream_server_connections_count | active | connections |\n| nginxplus.http_upstream_server_downtime | downtime | seconds |\n\n### Per http cache\n\nThese metrics refer to the HTTP cache.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_cache | HTTP cache name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_cache_state | warm, cold | state |\n| nginxplus.http_cache_iops | served, written, bypass | responses/s |\n| nginxplus.http_cache_io | served, written, bypass | bytes/s |\n| nginxplus.http_cache_size | size | bytes |\n\n### Per stream server zone\n\nThese metrics refer to the Stream server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_server_zone | Stream server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_server_zone_connections_rate | accepted | connections/s |\n| nginxplus.stream_server_zone_sessions_per_code_class_rate | 2xx, 4xx, 5xx | sessions/s |\n| nginxplus.stream_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_server_zone_connections_processing_count | processing | connections |\n| nginxplus.stream_server_zone_connections_discarded_rate | discarded | connections/s |\n\n### Per stream upstream\n\nThese metrics refer to the Stream upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_peers_count | peers | peers |\n| nginxplus.stream_upstream_zombies_count | zombie | servers |\n\n### Per stream upstream server\n\nThese metrics refer to the Stream upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n| stream_upstream_server_address | Stream upstream server address (e.g. 127.0.0.1:12346) |\n| stream_upstream_server_name | Stream upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_server_connections_rate | forwarded | connections/s |\n| nginxplus.stream_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_upstream_server_state | up, down, unavail, checking, unhealthy | state |\n| nginxplus.stream_upstream_server_downtime | downtime | seconds |\n| nginxplus.stream_upstream_server_connections_count | active | connections |\n\n### Per resolver zone\n\nThese metrics refer to the resolver zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| resolver_zone | resolver zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.resolver_zone_requests_rate | name, srv, addr | requests/s |\n| nginxplus.resolver_zone_responses_rate | noerror, formerr, servfail, nxdomain, notimp, refused, timedout, unknown | responses/s |\n\n",integration_type:"collector",id:"go.d.plugin-nginxplus-NGINX_Plus",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/nginxplus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-nginxvts",plugin_name:"go.d.plugin",module_name:"nginxvts",monitored_instance:{name:"NGINX VTS",link:"https://www.nginx.com/",icon_filename:"nginx.svg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:["webserver"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"weblog"},{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# NGINX VTS\n\nPlugin: go.d.plugin\nModule: nginxvts\n\n## Overview\n\nThis collector monitors NGINX servers with [virtual host traffic status module](https://github.com/vozlt/nginx-module-vts).\n\n\nIt sends HTTP requests to the NGINX VTS location [status](https://github.com/vozlt/nginx-module-vts#synopsis), \nwhich is a built-in location that provides metrics about the NGINX VTS server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Configure nginx-vts module\n\nTo configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxvts.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxvts.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status/format/json | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/status/format/json\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n - name: remote\n url: http://192.0.2.1/status/format/json\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginxvts` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxvts\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX VTS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxvts.requests_total | requests | requests/s |\n| nginxvts.active_connections | active | connections |\n| nginxvts.connections_total | reading, writing, waiting, accepted, handled | connections/s |\n| nginxvts.uptime | uptime | seconds |\n| nginxvts.shm_usage | max, used | bytes |\n| nginxvts.shm_used_node | used | nodes |\n| nginxvts.server_requests_total | requests | requests/s |\n| nginxvts.server_responses_total | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxvts.server_traffic_total | in, out | bytes/s |\n| nginxvts.server_cache_total | miss, bypass, expired, stale, updating, revalidated, hit, scarce | events/s |\n\n",integration_type:"collector",id:"go.d.plugin-nginxvts-NGINX_VTS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/nginxvts/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-ntpd",plugin_name:"go.d.plugin",module_name:"ntpd",monitored_instance:{name:"NTPd",link:"https://www.ntp.org/documentation/4.2.8-series/ntpd",icon_filename:"ntp.png",categories:["data-collection.system-clock-and-ntp"]},keywords:["ntpd","ntp","time"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# NTPd\n\nPlugin: go.d.plugin\nModule: ntpd\n\n## Overview\n\nThis collector monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](https://doc.ntp.org/current-stable/ntpq.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ntpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ntpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:123 | yes |\n| timeout | Connection/read/write timeout. | 1 | no |\n| collect_peers | Determines whether peer metrics will be collected. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n```\n{% /details %}\n##### With peers metrics\n\nCollect peers metrics.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n collect_peers: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n - name: remote\n address: 203.0.113.0:123\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ntpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ntpd\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NTPd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.sys_offset | offset | milliseconds |\n| ntpd.sys_jitter | system, clock | milliseconds |\n| ntpd.sys_frequency | frequency | ppm |\n| ntpd.sys_wander | clock | ppm |\n| ntpd.sys_rootdelay | delay | milliseconds |\n| ntpd.sys_rootdisp | dispersion | milliseconds |\n| ntpd.sys_stratum | stratum | stratum |\n| ntpd.sys_tc | current, minimum | log2 |\n| ntpd.sys_precision | precision | log2 |\n\n### Per peer\n\nThese metrics refer to the NTPd peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| peer_address | peer's source IP address |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.peer_offset | offset | milliseconds |\n| ntpd.peer_delay | delay | milliseconds |\n| ntpd.peer_dispersion | dispersion | milliseconds |\n| ntpd.peer_jitter | jitter | milliseconds |\n| ntpd.peer_xleave | xleave | milliseconds |\n| ntpd.peer_rootdelay | rootdelay | milliseconds |\n| ntpd.peer_rootdisp | dispersion | milliseconds |\n| ntpd.peer_stratum | stratum | stratum |\n| ntpd.peer_hmode | hmode | hmode |\n| ntpd.peer_pmode | pmode | pmode |\n| ntpd.peer_hpoll | hpoll | log2 |\n| ntpd.peer_ppoll | ppoll | log2 |\n| ntpd.peer_precision | precision | log2 |\n\n",integration_type:"collector",id:"go.d.plugin-ntpd-NTPd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/ntpd/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-nvidia_smi",plugin_name:"go.d.plugin",module_name:"nvidia_smi",monitored_instance:{name:"Nvidia GPU",link:"https://www.nvidia.com/en-us/",icon_filename:"nvidia.svg",categories:["data-collection.hardware-devices-and-sensors"]},keywords:["nvidia","gpu","hardware"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Nvidia GPU\n\nPlugin: go.d.plugin\nModule: nvidia_smi\n\n## Overview\n\nThis collector monitors GPUs performance metrics using\nthe [nvidia-smi](https://developer.nvidia.com/nvidia-system-management-interface) CLI tool.\n\n> **Warning**: under development, [loop mode](https://github.com/netdata/netdata/issues/14522) not implemented yet.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in the `go.d.conf` file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvidia_smi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvidia_smi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvidia_smi binary. The default is "nvidia_smi" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |\n| timeout | nvidia_smi binary execution timeout. | 2 | no |\n| use_csv_format | Used format when requesting GPU information. XML is used if set to \'no\'. | no | no |\n\n{% /details %}\n#### Examples\n\n##### CSV format\n\nUse CSV format when requesting GPU information.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: nvidia_smi\n use_csv_format: yes\n\n```\n{% /details %}\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: nvidia_smi\n binary_path: /usr/local/sbin/nvidia_smi\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nvidia_smi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvidia_smi\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | XML | CSV |\n|:------|:----------|:----|:---:|:---:|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s | \u2022 | |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % | \u2022 | |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_utilization | gpu | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_memory_utilization | memory | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_decoder_utilization | decoder | % | \u2022 | |\n| nvidia_smi.gpu_encoder_utilization | encoder | % | \u2022 | |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B | \u2022 | \u2022 |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B | \u2022 | |\n| nvidia_smi.gpu_temperature | temperature | Celsius | \u2022 | \u2022 |\n| nvidia_smi.gpu_voltage | voltage | V | \u2022 | |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz | \u2022 | \u2022 |\n| nvidia_smi.gpu_power_draw | power_draw | Watts | \u2022 | \u2022 |\n| nvidia_smi.gpu_performance_state | P0-P15 | state | \u2022 | \u2022 |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status | \u2022 | |\n| nvidia_smi.gpu_mig_devices_count | mig | devices | \u2022 | |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | XML | CSV |\n|:------|:----------|:----|:---:|:---:|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B | \u2022 | |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B | \u2022 | |\n\n",integration_type:"collector",id:"go.d.plugin-nvidia_smi-Nvidia_GPU",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/nvidia_smi/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-nvme",plugin_name:"go.d.plugin",module_name:"nvme",monitored_instance:{name:"NVMe devices",link:"",icon_filename:"nvme.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:["nvme"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# NVMe devices\n\nPlugin: go.d.plugin\nModule: nvme\n\n## Overview\n\nThis collector monitors the health of NVMe devices. It relies on the [`nvme`](https://github.com/linux-nvme/nvme-cli#nvme-cli) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install nvme-cli\n\nSee [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution\'s package manager.\n\n\n#### For Netdata running in a Docker container: grant NVMe device access\n\nYour NVMe devices need to be accessible within the Docker container for Netdata to monitor them.\n\nInclude the following option in your `docker run` command or add the device mapping in your `docker-compose.yml` file:\n\n- `docker run`\n\n ```bash\n --device \'/dev/nvme0n1:/dev/nvme0n1\'\n ```\n\n- `docker-compose.yml`\n\n ```yaml\n services:\n netdata:\n devices:\n - "/dev/nvme0n1:/dev/nvme0n1"\n ```\n\n**Note**: Replace `/dev/nvme0n1` with your actual NVMe device name.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvme.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvme.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| timeout | nvme binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: nvme\n update_every: 5 # Collect NVMe metrics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nvme` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvme\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ nvme_device_critical_warnings_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/nvme.conf) | nvme.device_critical_warnings_state | NVMe device ${label:device} has critical warnings |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the NVME device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | NVMe device name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvme.device_estimated_endurance_perc | used | % |\n| nvme.device_available_spare_perc | spare | % |\n| nvme.device_composite_temperature | temperature | celsius |\n| nvme.device_io_transferred_count | read, written | bytes |\n| nvme.device_power_cycles_count | power | cycles |\n| nvme.device_power_on_time | power-on | seconds |\n| nvme.device_critical_warnings_state | available_spare, temp_threshold, nvm_subsystem_reliability, read_only, volatile_mem_backup_failed, persistent_memory_read_only | state |\n| nvme.device_unsafe_shutdowns_count | unsafe | shutdowns |\n| nvme.device_media_errors_rate | media | errors/s |\n| nvme.device_error_log_entries_rate | error_log | entries/s |\n| nvme.device_warning_composite_temperature_time | wctemp | seconds |\n| nvme.device_critical_composite_temperature_time | cctemp | seconds |\n| nvme.device_thermal_mgmt_temp1_transitions_rate | temp1 | transitions/s |\n| nvme.device_thermal_mgmt_temp2_transitions_rate | temp2 | transitions/s |\n| nvme.device_thermal_mgmt_temp1_time | temp1 | seconds |\n| nvme.device_thermal_mgmt_temp2_time | temp2 | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-nvme-NVMe_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/nvme/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-openvpn",plugin_name:"go.d.plugin",module_name:"openvpn",monitored_instance:{name:"OpenVPN",link:"https://openvpn.net/",icon_filename:"openvpn.svg",categories:["data-collection.vpns"]},keywords:["openvpn","vpn"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# OpenVPN\n\nPlugin: go.d.plugin\nModule: openvpn\n\n## Overview\n\nThis collector monitors OpenVPN servers.\n\nIt uses OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) to collect metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d.conf).\n\nFrom the documentation for the OpenVPN Management Interface:\n> Currently, the OpenVPN daemon can at most support a single management client any one time.\n\nIt is disabled to not break other tools which use `Management Interface`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:7505 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n```\n{% /details %}\n##### With user metrics\n\nCollect metrics of all users.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n per_user_stats:\n includes:\n - "* *"\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n - name: remote\n address: 203.0.113.0:7505\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openvpn` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-openvpn-OpenVPN",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/openvpn/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-openvpn_status_log",plugin_name:"go.d.plugin",module_name:"openvpn_status_log",monitored_instance:{name:"OpenVPN status log",link:"https://openvpn.net/",icon_filename:"openvpn.svg",categories:["data-collection.vpns"]},keywords:["openvpn","vpn"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# OpenVPN status log\n\nPlugin: go.d.plugin\nModule: openvpn_status_log\n\n## Overview\n\nThis collector monitors OpenVPN server.\n\nIt parses server log files and provides summary and per user metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn_status_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn_status_log.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| log_path | Path to status log. | /var/log/openvpn/status.log | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### With user metrics\n\nCollect metrics of all users.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n per_user_stats:\n includes:\n - "* *"\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openvpn_status_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn_status_log\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN status log instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-openvpn_status_log-OpenVPN_status_log",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/openvpn_status_log/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-pgbouncer",plugin_name:"go.d.plugin",module_name:"pgbouncer",monitored_instance:{name:"PgBouncer",link:"https://www.pgbouncer.org/",icon_filename:"postgres.svg",categories:["data-collection.database-servers"]},keywords:["pgbouncer"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# PgBouncer\n\nPlugin: go.d.plugin\nModule: pgbouncer\n\n## Overview\n\nThis collector monitors PgBouncer servers.\n\nExecuted queries:\n\n- `SHOW VERSION;`\n- `SHOW CONFIG;`\n- `SHOW DATABASES;`\n- `SHOW STATS;`\n- `SHOW POOLS;`\n\nInformation about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with `stats_users` permissions to query your PgBouncer instance.\n\nTo create the `netdata` user:\n\n- Add `netdata` user to the `pgbouncer.ini` file:\n\n ```text\n stats_users = netdata\n ```\n\n- Add a password for the `netdata` user to the `userlist.txt` file:\n\n ```text\n "netdata" ""\n ```\n\n- To verify the credentials, run the following command\n\n ```bash\n psql -h localhost -U netdata -p 6432 pgbouncer -c "SHOW VERSION;" >/dev/null 2>&1 && echo OK || echo FAIL\n ```\n\n When it prompts for a password, enter the password you added to `userlist.txt`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pgbouncer.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pgbouncer.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:6432/pgbouncer | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: \'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer\'\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: \'host=/tmp dbname=pgbouncer user=postgres port=6432\'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: \'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer\'\n\n - name: remote\n dsn: \'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer\'\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pgbouncer` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pgbouncer\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PgBouncer instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.client_connections_utilization | used | percentage |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| postgres_database | Postgres database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.db_client_connections | active, waiting, cancel_req | connections |\n| pgbouncer.db_server_connections | active, idle, used, tested, login | connections |\n| pgbouncer.db_server_connections_utilization | used | percentage |\n| pgbouncer.db_clients_wait_time | time | seconds |\n| pgbouncer.db_client_max_wait_time | time | seconds |\n| pgbouncer.db_transactions | transactions | transactions/s |\n| pgbouncer.db_transactions_time | time | seconds |\n| pgbouncer.db_transaction_avg_time | time | seconds |\n| pgbouncer.db_queries | queries | queries/s |\n| pgbouncer.db_queries_time | time | seconds |\n| pgbouncer.db_query_avg_time | time | seconds |\n| pgbouncer.db_network_io | received, sent | B/s |\n\n",integration_type:"collector",id:"go.d.plugin-pgbouncer-PgBouncer",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/pgbouncer/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-phpdaemon",plugin_name:"go.d.plugin",module_name:"phpdaemon",monitored_instance:{name:"phpDaemon",link:"https://github.com/kakserpom/phpdaemon",icon_filename:"php.svg",categories:["data-collection.apm"]},keywords:["phpdaemon","php"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# phpDaemon\n\nPlugin: go.d.plugin\nModule: phpdaemon\n\n## Overview\n\nThis collector monitors phpDaemon instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Enable phpDaemon's HTTP server\n\nStatistics expected to be in JSON format.\n\n
    \nphpDaemon configuration\n\nInstruction from [@METAJIJI](https://github.com/METAJIJI).\n\nTo enable `phpd` statistics on http, you must enable the http server and write an application.\nApplication is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`.\n\n```php\n// /opt/phpdaemon/conf/phpd.conf\n\npath /opt/phpdaemon/conf/AppResolver.php;\nPool:HTTPServer {\n privileged;\n listen '127.0.0.1';\n port 8509;\n}\n```\n\n```php\n// /opt/phpdaemon/conf/AppResolver.php\n\nattrs->server['DOCUMENT_URI'], $m)) {\n return $m[1];\n }\n }\n}\n\nreturn new MyAppResolver;\n```\n\n```php\n/opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php\n\nheader('Content-Type: application/javascript; charset=utf-8');\n\n $stat = Daemon::getStateOfWorkers();\n $stat['uptime'] = time() - Daemon::$startTime;\n echo json_encode($stat);\n }\n}\n```\n\n
    \n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpdaemon.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpdaemon.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8509/FullStatus | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n```\n{% /details %}\n##### HTTP authentication\n\nHTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nHTTPS with self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n - name: remote\n url: http://192.0.2.1:8509/FullStatus\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `phpdaemon` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpdaemon\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per phpDaemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpdaemon.workers | alive, shutdown | workers |\n| phpdaemon.alive_workers | idle, busy, reloading | workers |\n| phpdaemon.idle_workers | preinit, init, initialized | workers |\n| phpdaemon.uptime | time | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-phpdaemon-phpDaemon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/phpdaemon/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-phpfpm",plugin_name:"go.d.plugin",module_name:"phpfpm",monitored_instance:{name:"PHP-FPM",link:"https://php-fpm.org/",icon_filename:"php.svg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:["phpfpm","php"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# PHP-FPM\n\nPlugin: go.d.plugin\nModule: phpfpm\n\n## Overview\n\nThis collector monitors PHP-FPM instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable status page\n\nUncomment the `pm.status_path = /status` variable in the `php-fpm` config file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpfpm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpfpm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status?full&json | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### HTTP\n\nCollecting data from a local instance over HTTP.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n```\n{% /details %}\n##### Unix socket\n\nCollecting data from a local instance over Unix socket.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n socket: \'/tmp/php-fpm.sock\'\n\n```\n{% /details %}\n##### TCP socket\n\nCollecting data from a local instance over TCP socket.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9000\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n - name: remote\n url: http://203.0.113.10/status?full&json\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `phpfpm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpfpm\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PHP-FPM instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpfpm.connections | active, max_active, idle | connections |\n| phpfpm.requests | requests | requests/s |\n| phpfpm.performance | max_children_reached, slow_requests | status |\n| phpfpm.request_duration | min, max, avg | milliseconds |\n| phpfpm.request_cpu | min, max, avg | percentage |\n| phpfpm.request_mem | min, max, avg | KB |\n\n",integration_type:"collector",id:"go.d.plugin-phpfpm-PHP-FPM",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/phpfpm/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-pihole",plugin_name:"go.d.plugin",module_name:"pihole",monitored_instance:{name:"Pi-hole",link:"https://pi-hole.net",icon_filename:"pihole.png",categories:["data-collection.dns-and-dhcp-servers"]},keywords:["pihole"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Pi-hole\n\nPlugin: go.d.plugin\nModule: pihole\n\n## Overview\n\nThis collector monitors Pi-hole instances using [PHP API](https://github.com/pi-hole/AdminLTE).\n\nThe data provided by the API is for the last 24 hours. All collected values refer to this time period and not to the\nmodule's collection interval.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pihole.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pihole.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| setup_vars_path | Path to setupVars.conf. This file is used to get the web password. | /etc/pihole/setupVars.conf | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nRemote instance with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://203.0.113.11\n tls_skip_verify: yes\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://203.0.113.10\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pihole` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pihole\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ pihole_blocklist_last_update ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.blocklist_last_update | gravity.list (blocklist) file last update time |\n| [ pihole_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.unwanted_domains_blocking_status | unwanted domains blocking is disabled |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pi-hole instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pihole.dns_queries_total | queries | queries |\n| pihole.dns_queries | cached, blocked, forwarded | queries |\n| pihole.dns_queries_percentage | cached, blocked, forwarded | percentage |\n| pihole.unique_clients | unique | clients |\n| pihole.domains_on_blocklist | blocklist | domains |\n| pihole.blocklist_last_update | ago | seconds |\n| pihole.unwanted_domains_blocking_status | enabled, disabled | status |\n| pihole.dns_queries_types | a, aaaa, any, ptr, soa, srv, txt | percentage |\n| pihole.dns_queries_forwarded_destination | cached, blocked, other | percentage |\n\n",integration_type:"collector",id:"go.d.plugin-pihole-Pi-hole",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/pihole/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-pika",plugin_name:"go.d.plugin",module_name:"pika",monitored_instance:{name:"Pika",link:"https://github.com/OpenAtomFoundation/pika",icon_filename:"pika.svg",categories:["data-collection.database-servers"]},keywords:["pika","databases"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Pika\n\nPlugin: go.d.plugin\nModule: pika\n\n## Overview\n\nThis collector monitors Pika servers.\n\nIt collects information and statistics about the server executing the following commands:\n\n- [`INFO ALL`](https://github.com/OpenAtomFoundation/pika/wiki/pika-info%E4%BF%A1%E6%81%AF%E8%AF%B4%E6%98%8E)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pika.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pika.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Pika server address. | redis://@localhost:9221 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://@localhost:9221'\n\n```\n{% /details %}\n##### TCP socket with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:9221'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pika` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pika\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pika instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pika.connections | accepted | connections |\n| pika.clients | connected | clients |\n| pika.memory | used | bytes |\n| pika.connected_replicas | connected | replicas |\n| pika.commands | processed | commands/s |\n| pika.commands_calls | a dimension per command | calls/s |\n| pika.database_strings_keys | a dimension per database | keys |\n| pika.database_strings_expires_keys | a dimension per database | keys |\n| pika.database_strings_invalid_keys | a dimension per database | keys |\n| pika.database_hashes_keys | a dimension per database | keys |\n| pika.database_hashes_expires_keys | a dimension per database | keys |\n| pika.database_hashes_invalid_keys | a dimension per database | keys |\n| pika.database_lists_keys | a dimension per database | keys |\n| pika.database_lists_expires_keys | a dimension per database | keys |\n| pika.database_lists_invalid_keys | a dimension per database | keys |\n| pika.database_zsets_keys | a dimension per database | keys |\n| pika.database_zsets_expires_keys | a dimension per database | keys |\n| pika.database_zsets_invalid_keys | a dimension per database | keys |\n| pika.database_sets_keys | a dimension per database | keys |\n| pika.database_sets_expires_keys | a dimension per database | keys |\n| pika.database_sets_invalid_keys | a dimension per database | keys |\n| pika.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-pika-Pika",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/pika/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-ping",plugin_name:"go.d.plugin",module_name:"ping",monitored_instance:{name:"Ping",link:"",icon_filename:"globe.svg",categories:["data-collection.synthetic-checks"]},keywords:["ping"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:'# Ping\n\nPlugin: go.d.plugin\nModule: ping\n\n## Overview\n\nThis module measures round-trip time and packet loss by sending ping messages to network hosts.\n\nThere are two operational modes:\n\n- privileged (send raw ICMP ping, default). Requires\n CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges:\n > **Note**: set automatically during Netdata installation.\n\n ```bash\n sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin\n ```\n\n- unprivileged (send UDP ping, Linux only).\n Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html):\n\n ```bash\n sudo sysctl -w net.ipv4.ping_group_range="0 2147483647"\n ```\n To persist the change add `net.ipv4.ping_group_range="0 2147483647"` to `/etc/sysctl.conf` and\n execute `sudo sysctl -p`.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn\'t support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ping.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hosts | Network hosts. | | yes |\n| network | Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6). | ip | no |\n| privileged | Ping packets type. "no" means send an "unprivileged" UDP ping, "yes" - raw ICMP ping. | yes | no |\n| packets | Number of ping packets to send. | 5 | no |\n| interval | Timeout between sending ping packets. | 100ms | no |\n\n{% /details %}\n#### Examples\n\n##### IPv4 hosts\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: example\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n{% /details %}\n##### Unprivileged mode\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: example\n privileged: no\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: example1\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n - name: example2\n packets: 10\n hosts:\n - 192.0.2.3\n - 192.0.2.4\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ping` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ping\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ping_host_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | network host ${lab1el:host} reachability status |\n| [ ping_packet_loss ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | packet loss percentage to the network host ${label:host} over the last 10 minutes |\n| [ ping_host_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_rtt | average latency to the network host ${label:host} over the last 10 seconds |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per host\n\nThese metrics refer to the remote host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | remote host |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ping.host_rtt | min, max, avg | milliseconds |\n| ping.host_std_dev_rtt | std_dev | milliseconds |\n| ping.host_packet_loss | loss | percentage |\n| ping.host_packets | received, sent | packets |\n\n",integration_type:"collector",id:"go.d.plugin-ping-Ping",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/ping/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-portcheck",plugin_name:"go.d.plugin",module_name:"portcheck",monitored_instance:{name:"TCP Endpoints",link:"",icon_filename:"globe.svg",categories:["data-collection.synthetic-checks"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# TCP Endpoints\n\nPlugin: go.d.plugin\nModule: portcheck\n\n## Overview\n\nThis collector monitors TCP services availability and response time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/portcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/portcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes |\n| ports | Remote host ports. Must be specified in numeric format. | | yes |\n| timeout | HTTP request timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Check SSH and telnet\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n```\n{% /details %}\n##### Check webserver with IPv6 address\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: server2\n host: "[2001:DB8::1]"\n ports:\n - 80\n - 8080\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n - name: server2\n host: 203.0.113.10\n ports:\n - 22\n - 23\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `portcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m portcheck\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ portcheck_service_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | TCP host ${label:host} port ${label:port} liveness status |\n| [ portcheck_connection_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n| [ portcheck_connection_fails ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per tcp endpoint\n\nThese metrics refer to the TCP endpoint.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | host |\n| port | port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.status | success, failed, timeout | boolean |\n| portcheck.state_duration | time | seconds |\n| portcheck.latency | time | ms |\n\n",integration_type:"collector",id:"go.d.plugin-portcheck-TCP_Endpoints",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/portcheck/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-postgres",plugin_name:"go.d.plugin",module_name:"postgres",monitored_instance:{name:"PostgreSQL",link:"https://www.postgresql.org/",categories:["data-collection.database-servers"],icon_filename:"postgres.svg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["db","database","postgres","postgresql","sql"],most_popular:!0},overview:"# PostgreSQL\n\nPlugin: go.d.plugin\nModule: postgres\n\n## Overview\n\nThis collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more.\n\n\nIt establishes a connection to the Postgres instance via a TCP or UNIX socket.\nTo collect metrics for database tables and indexes, it establishes an additional connection for each discovered database.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets:\n\n- 127.0.0.1:5432\n- /var/run/postgresql/\n\n\n#### Limits\n\nTable and index metrics are not collected for databases with more than 50 tables or 250 indexes.\nThese limits can be changed in the configuration file.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with granted `pg_monitor`\nor `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).\n\nTo create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:\n\n```postgresql\nCREATE USER netdata;\nGRANT pg_monitor TO netdata;\n```\n\nAfter creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or\nthe [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your\nsystem.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postgres.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postgres.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/collectors/go.d.plugin/pkg/matcher#simple-patterns-matcher). | | no |\n| max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no |\n| max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n - name: remote\n dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `postgres` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m postgres\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ postgres_total_connection_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.connections_utilization | average total connection utilization over the last minute |\n| [ postgres_acquired_locks_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.locks_utilization | average acquired locks utilization over the last minute |\n| [ postgres_txid_exhaustion_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.txid_exhaustion_perc | percent towards TXID wraparound |\n| [ postgres_db_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average cache hit ratio in db ${label:database} over the last minute |\n| [ postgres_db_transactions_rollback_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average aborted transactions percentage in db ${label:database} over the last five minutes |\n| [ postgres_db_deadlocks_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_deadlocks_rate | number of deadlocks detected in db ${label:database} in the last minute |\n| [ postgres_table_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_cache_io_ratio | average cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_index_cache_io_ratio | average index cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_cache_io_ratio | average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_index_cache_io_ratio | average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} |\n| [ postgres_table_last_autovacuum_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autovacuum_since_time | time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon |\n| [ postgres_table_last_autoanalyze_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autoanalyze_since_time | time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon |\n| [ postgres_index_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.index_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} index ${label:index} |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PostgreSQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.connections_utilization | used | percentage |\n| postgres.connections_usage | available, used | connections |\n| postgres.connections_state_count | active, idle, idle_in_transaction, idle_in_transaction_aborted, disabled | connections |\n| postgres.transactions_duration | a dimension per bucket | transactions/s |\n| postgres.queries_duration | a dimension per bucket | queries/s |\n| postgres.locks_utilization | used | percentage |\n| postgres.checkpoints_rate | scheduled, requested | checkpoints/s |\n| postgres.checkpoints_time | write, sync | milliseconds |\n| postgres.bgwriter_halts_rate | maxwritten | events/s |\n| postgres.buffers_io_rate | checkpoint, backend, bgwriter | B/s |\n| postgres.buffers_backend_fsync_rate | fsync | calls/s |\n| postgres.buffers_allocated_rate | allocated | B/s |\n| postgres.wal_io_rate | write | B/s |\n| postgres.wal_files_count | written, recycled | files |\n| postgres.wal_archiving_files_count | ready, done | files/s |\n| postgres.autovacuum_workers_count | analyze, vacuum_analyze, vacuum, vacuum_freeze, brin_summarize | workers |\n| postgres.txid_exhaustion_towards_autovacuum_perc | emergency_autovacuum | percentage |\n| postgres.txid_exhaustion_perc | txid_exhaustion | percentage |\n| postgres.txid_exhaustion_oldest_txid_num | xid | xid |\n| postgres.catalog_relations_count | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | relations |\n| postgres.catalog_relations_size | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | B |\n| postgres.uptime | uptime | seconds |\n| postgres.databases_count | databases | databases |\n\n### Per repl application\n\nThese metrics refer to the replication application.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| application | application name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_app_wal_lag_size | sent_lag, write_lag, flush_lag, replay_lag | B |\n| postgres.replication_app_wal_lag_time | write_lag, flush_lag, replay_lag | seconds |\n\n### Per repl slot\n\nThese metrics refer to the replication slot.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | replication slot name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_slot_files_count | wal_keep, pg_replslot_files | files |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.db_transactions_ratio | committed, rollback | percentage |\n| postgres.db_transactions_rate | committed, rollback | transactions/s |\n| postgres.db_connections_utilization | used | percentage |\n| postgres.db_connections_count | connections | connections |\n| postgres.db_cache_io_ratio | miss | percentage |\n| postgres.db_io_rate | memory, disk | B/s |\n| postgres.db_ops_fetched_rows_ratio | fetched | percentage |\n| postgres.db_ops_read_rows_rate | returned, fetched | rows/s |\n| postgres.db_ops_write_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.db_conflicts_rate | conflicts | queries/s |\n| postgres.db_conflicts_reason_rate | tablespace, lock, snapshot, bufferpin, deadlock | queries/s |\n| postgres.db_deadlocks_rate | deadlocks | deadlocks/s |\n| postgres.db_locks_held_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_locks_awaited_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_temp_files_created_rate | created | files/s |\n| postgres.db_temp_files_io_rate | written | B/s |\n| postgres.db_size | size | B |\n\n### Per table\n\nThese metrics refer to the database table.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.table_rows_dead_ratio | dead | percentage |\n| postgres.table_rows_count | live, dead | rows |\n| postgres.table_ops_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.table_ops_rows_hot_ratio | hot | percentage |\n| postgres.table_ops_rows_hot_rate | hot | rows/s |\n| postgres.table_cache_io_ratio | miss | percentage |\n| postgres.table_io_rate | memory, disk | B/s |\n| postgres.table_index_cache_io_ratio | miss | percentage |\n| postgres.table_index_io_rate | memory, disk | B/s |\n| postgres.table_toast_cache_io_ratio | miss | percentage |\n| postgres.table_toast_io_rate | memory, disk | B/s |\n| postgres.table_toast_index_cache_io_ratio | miss | percentage |\n| postgres.table_toast_index_io_rate | memory, disk | B/s |\n| postgres.table_scans_rate | index, sequential | scans/s |\n| postgres.table_scans_rows_rate | index, sequential | rows/s |\n| postgres.table_autovacuum_since_time | time | seconds |\n| postgres.table_vacuum_since_time | time | seconds |\n| postgres.table_autoanalyze_since_time | time | seconds |\n| postgres.table_analyze_since_time | time | seconds |\n| postgres.table_null_columns | null | columns |\n| postgres.table_size | size | B |\n| postgres.table_bloat_size_perc | bloat | percentage |\n| postgres.table_bloat_size | bloat | B |\n\n### Per index\n\nThese metrics refer to the table index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n| index | index name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.index_size | size | B |\n| postgres.index_bloat_size_perc | bloat | percentage |\n| postgres.index_bloat_size | bloat | B |\n| postgres.index_usage_status | used, unused | status |\n\n",integration_type:"collector",id:"go.d.plugin-postgres-PostgreSQL",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/postgres/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-powerdns",plugin_name:"go.d.plugin",module_name:"powerdns",monitored_instance:{name:"PowerDNS Authoritative Server",link:"https://doc.powerdns.com/authoritative/",icon_filename:"powerdns.svg",categories:["data-collection.dns-and-dhcp-servers"]},keywords:["powerdns","dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# PowerDNS Authoritative Server\n\nPlugin: go.d.plugin\nModule: powerdns\n\n## Overview\n\nThis collector monitors PowerDNS Authoritative Server instances.\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/authoritative/http-api/statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `powerdns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Authoritative Server instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns.questions_in | udp, tcp | questions/s |\n| powerdns.questions_out | udp, tcp | questions/s |\n| powerdns.cache_usage | query-cache-hit, query-cache-miss, packetcache-hit, packetcache-miss | events/s |\n| powerdns.cache_size | query-cache, packet-cache, key-cache, meta-cache | entries |\n| powerdns.latency | latency | microseconds |\n\n",integration_type:"collector",id:"go.d.plugin-powerdns-PowerDNS_Authoritative_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/powerdns/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-powerdns_recursor",plugin_name:"go.d.plugin",module_name:"powerdns_recursor",monitored_instance:{name:"PowerDNS Recursor",link:"https://doc.powerdns.com/recursor/",icon_filename:"powerdns.svg",categories:["data-collection.dns-and-dhcp-servers"]},keywords:["powerdns","dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# PowerDNS Recursor\n\nPlugin: go.d.plugin\nModule: powerdns_recursor\n\n## Overview\n\nThis collector monitors PowerDNS Recursor instances.\n\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/recursor/http-api/index.html#built-in-webserver-and-http-api).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/recursor/common/api/endpoint-statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns_recursor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns_recursor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `powerdns_recursor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns_recursor\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Recursor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns_recursor.questions_in | total, tcp, ipv6 | questions/s |\n| powerdns_recursor.questions_out | udp, tcp, ipv6, throttled | questions/s |\n| powerdns_recursor.answer_time | 0-1ms, 1-10ms, 10-100ms, 100-1000ms, slow | queries/s |\n| powerdns_recursor.timeouts | total, ipv4, ipv6 | timeouts/s |\n| powerdns_recursor.drops | over-capacity-drops, query-pipe-full-drops, too-old-drops, truncated-drops, empty-queries | drops/s |\n| powerdns_recursor.cache_usage | cache-hits, cache-misses, packet-cache-hits, packet-cache-misses | events/s |\n| powerdns_recursor.cache_size | cache, packet-cache, negative-cache | entries |\n\n",integration_type:"collector",id:"go.d.plugin-powerdns_recursor-PowerDNS_Recursor",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/powerdns_recursor/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-4d_server",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"4D Server",link:"https://github.com/ThomasMaul/Prometheus_4D_Exporter",icon_filename:"4d_server.png",categories:["data-collection.database-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# 4D Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor 4D Server performance metrics for efficient application management and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-4D_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-8430ft-modem",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"8430FT modem",link:"https://github.com/dernasherbrezon/8430ft_exporter",icon_filename:"mtc.svg",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# 8430FT modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of vital metrics from the MTS 8430FT modem for streamlined network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-8430FT_modem",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-a10-acos",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"A10 ACOS network devices",link:"https://github.com/a10networks/PrometheusExporter",icon_filename:"a10-networks.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:["network monitoring","network performance","traffic analysis"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# A10 ACOS network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor A10 Networks device metrics for comprehensive management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-A10_ACOS_network_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-amd_smi",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AMD CPU & GPU",link:"https://github.com/amd/amd_smi_exporter",icon_filename:"amd.svg",categories:["data-collection.hardware-devices-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# AMD CPU & GPU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AMD System Management Interface performance for optimized hardware management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AMD_CPU_&_GPU",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-apicast",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"APIcast",link:"https://github.com/3scale/apicast",icon_filename:"apicast.png",categories:["data-collection.web-servers-and-web-proxies"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# APIcast\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor APIcast performance metrics to optimize API gateway operations and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [APIcast](https://github.com/3scale/apicast).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-APIcast",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-arm_hwcpipe",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"ARM HWCPipe",link:"https://github.com/ylz-at/arm-hwcpipe-exporter",icon_filename:"arm.svg",categories:["data-collection.hardware-devices-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# ARM HWCPipe\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of ARM running Android devices and get metrics for efficient performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-ARM_HWCPipe",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_ec2",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AWS EC2 Compute instances",link:"https://github.com/O1ahmad/aws_ec2_exporter",icon_filename:"aws-ec2.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","aws services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# AWS EC2 Compute instances\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS EC2 instances key metrics for optimized performance and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AWS_EC2_Compute_instances",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_ec2_spot",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AWS EC2 Spot Instance",link:"https://github.com/patcadelina/ec2-spot-exporter",icon_filename:"aws-ec2.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","aws services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# AWS EC2 Spot Instance\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS EC2 Spot instances'' performance metrics for efficient resource allocation and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AWS_EC2_Spot_Instance",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_ecs",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AWS ECS",link:"https://github.com/bevers222/ecs-exporter",icon_filename:"amazon-ecs.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","aws services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# AWS ECS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on AWS ECS services and resources for optimized container management and orchestration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS ECS exporter](https://github.com/bevers222/ecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AWS_ECS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_health",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AWS Health events",link:"https://github.com/vladvasiliu/aws-health-exporter-rs",icon_filename:"aws.svg",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","aws services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# AWS Health events\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS service health metrics for proactive incident management and resolution.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AWS_Health_events",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_quota",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AWS Quota",link:"https://github.com/emylincon/aws_quota_exporter",icon_filename:"aws.svg",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","aws services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# AWS Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS service quotas for effective resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AWS_Quota",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_rds",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AWS RDS",link:"https://github.com/percona/rds_exporter",icon_filename:"aws-rds.svg",categories:["data-collection.database-servers"]},keywords:["cloud services","cloud computing","aws services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# AWS RDS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Amazon RDS (Relational Database Service) metrics for efficient cloud database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [rds_exporter](https://github.com/percona/rds_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AWS_RDS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_s3",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AWS S3 buckets",link:"https://github.com/ribbybibby/s3_exporter",icon_filename:"aws-s3.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","aws services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# AWS S3 buckets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS S3 storage metrics for optimized performance, data management, and cost efficiency.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AWS_S3_buckets",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_sqs",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AWS SQS",link:"https://github.com/jmal98/sqs-exporter",icon_filename:"aws-sqs.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","aws services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# AWS SQS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS SQS messaging metrics for efficient message processing and queue management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AWS_SQS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_instance_health",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AWS instance health",link:"https://github.com/bobtfish/aws-instance-health-exporter",icon_filename:"aws.svg",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","aws services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# AWS instance health\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor the health of AWS instances for improved performance and availability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AWS_instance_health",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-airthings_waveplus",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Airthings Waveplus air sensor",link:"https://github.com/jeremybz/waveplus_exporter",icon_filename:"airthings.svg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Airthings Waveplus air sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Waveplus radon sensor metrics for efficient indoor air quality monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Airthings_Waveplus_air_sensor",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-akami_edgedns",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Akamai Edge DNS Traffic",link:"https://github.com/akamai/akamai-edgedns-traffic-exporter",icon_filename:"akamai.svg",categories:["data-collection.dns-and-dhcp-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Akamai Edge DNS Traffic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze Akamai Edge DNS traffic for enhanced performance and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Akamai_Edge_DNS_Traffic",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-akami_gtm",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Akamai Global Traffic Management",link:"https://github.com/akamai/akamai-gtm-metrics-exporter",icon_filename:"akamai.svg",categories:["data-collection.cloud-provider-managed"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Akamai Global Traffic Management\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor vital metrics of Akamai Global Traffic Management (GTM) for optimized load balancing and failover.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Akamai_Global_Traffic_Management",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-akami_cloudmonitor",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Akami Cloudmonitor",link:"https://github.com/ExpressenAB/cloudmonitor_exporter",icon_filename:"akamai.svg",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Akami Cloudmonitor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Akamai cloudmonitor provider metrics for comprehensive cloud performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Akami_Cloudmonitor",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-alamos_fe2",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Alamos FE2 server",link:"https://github.com/codemonauts/prometheus-fe2-exporter",icon_filename:"alamos_fe2.png",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Alamos FE2 server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Alamos FE2 systems for improved performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Alamos_FE2_server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-alibaba-cloud",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Alibaba Cloud",link:"https://github.com/aylei/aliyun-exporter",icon_filename:"alibaba-cloud.svg",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Alibaba Cloud\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Alibaba Cloud services and resources for efficient management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Alibaba_Cloud",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-altaro_backup",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Altaro Backup",link:"https://github.com/raph2i/altaro_backup_exporter",icon_filename:"altaro.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Altaro Backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Altaro Backup performance metrics to ensure smooth data protection and recovery operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Altaro_Backup",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aaisp",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Andrews & Arnold line status",link:"https://github.com/daveio/aaisp-exporter",icon_filename:"andrewsarnold.jpg",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Andrews & Arnold line status\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Andrews & Arnold Ltd (AAISP) metrics for improved network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Andrews_&_Arnold_line_status",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-airflow",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Apache Airflow",link:"https://github.com/shalb/airflow-exporter",icon_filename:"airflow.png",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Apache Airflow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Airflow metrics to optimize task scheduling and workflow management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Airflow exporter](https://github.com/shalb/airflow-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Apache_Airflow",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-flink",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Apache Flink",link:"https://github.com/matsumana/flink_exporter",icon_filename:"apache_flink.png",categories:["data-collection.apm"]},keywords:["web server","http","https"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Apache Flink\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Apache Flink metrics for efficient stream processing and application management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Apache_Flink",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-apple_timemachine",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Apple Time Machine",link:"https://github.com/znerol/prometheus-timemachine-exporter",icon_filename:"apple.svg",categories:["data-collection.macos-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Apple Time Machine\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Apple Time Machine backup metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Apple_Time_Machine",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aruba",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Aruba devices",link:"https://github.com/slashdoom/aruba_exporter",icon_filename:"aruba.svg",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:["network monitoring","network performance","aruba devices"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Aruba devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Aruba Networks devices performance metrics for comprehensive network management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Aruba Exporter](https://github.com/slashdoom/aruba_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Aruba_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-arvancloud_cdn",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"ArvanCloud CDN",link:"https://github.com/arvancloud/ar-prometheus-exporter",icon_filename:"arvancloud.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# ArvanCloud CDN\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze ArvanCloud CDN and cloud services performance metrics for optimized delivery and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-ArvanCloud_CDN",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-audisto",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Audisto",link:"https://github.com/ZeitOnline/audisto_exporter",icon_filename:"audisto.svg",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Audisto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Audisto SEO and website metrics for improved search performance and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Audisto",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-authlog",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AuthLog",link:"https://github.com/woblerr/authlog_exporter",icon_filename:"linux.png",categories:["data-collection.logs-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# AuthLog\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor authentication logs for security insights and efficient access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AuthLog Exporter](https://github.com/woblerr/authlog_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AuthLog",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-azure_ad_app_passwords",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Azure AD App passwords",link:"https://github.com/vladvasiliu/azure-app-secrets-monitor",icon_filename:"azure.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","azure services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Azure AD App passwords\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nSafeguard and track Azure App secrets for enhanced security and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Azure_AD_App_passwords",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-azure_elastic_sql",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Azure Elastic Pool SQL",link:"https://github.com/benclapp/azure_elastic_sql_exporter",icon_filename:"azure-elastic-sql.png",categories:["data-collection.cloud-provider-managed"]},keywords:["database","relational db","data querying"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Azure Elastic Pool SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Elastic SQL performance metrics for efficient database management and query optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Azure_Elastic_Pool_SQL",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-azure_res",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Azure Resources",link:"https://github.com/FXinnovation/azure_metrics_exporter",icon_filename:"azure.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","azure services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Azure Resources\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Azure resources vital metrics for efficient cloud management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Azure_Resources",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-azure_sql",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Azure SQL",link:"https://github.com/iamseth/azure_sql_exporter",icon_filename:"azure-sql.png",categories:["data-collection.cloud-provider-managed"]},keywords:["database","relational db","data querying"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Azure SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure SQL performance metrics for efficient database management and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Azure_SQL",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-azure_service_bus",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Azure Service Bus",link:"https://github.com/marcinbudny/servicebus_exporter",icon_filename:"azure-service-bus.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","azure services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Azure Service Bus\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Service Bus messaging metrics for optimized communication and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Azure_Service_Bus",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-azure_app",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Azure application",link:"https://github.com/RobustPerception/azure_metrics_exporter",icon_filename:"azure.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","azure services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Azure application\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure Monitor metrics for comprehensive resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Azure_application",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-bosh",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"BOSH",link:"https://github.com/bosh-prometheus/bosh_exporter",icon_filename:"bosh.png",categories:["data-collection.provisioning-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# BOSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on BOSH deployment metrics for improved cloud orchestration and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-BOSH",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-bigquery",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"BigQuery",link:"https://github.com/m-lab/prometheus-bigquery-exporter",icon_filename:"bigquery.png",categories:["data-collection.cloud-provider-managed"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# BigQuery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google BigQuery metrics for optimized data processing and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-BigQuery",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-bird",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Bird Routing Daemon",link:"https://github.com/czerwonk/bird_exporter",icon_filename:"bird.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Bird Routing Daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Bird Routing Daemon metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Bird_Routing_Daemon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-blackbox",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Blackbox",link:"https://github.com/prometheus/blackbox_exporter",icon_filename:"prometheus.svg",categories:["data-collection.synthetic-checks"]},keywords:["blackbox"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack external service availability and response times with Blackbox monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Blackbox exporter](https://github.com/prometheus/blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Blackbox",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-bobcat",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Bobcat Miner 300",link:"https://github.com/pperzyna/bobcat_exporter",icon_filename:"bobcat.jpg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Bobcat Miner 300\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Bobcat equipment metrics for optimized performance and maintenance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Bobcat_Miner_300",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-borg",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Borg backup",link:"https://github.com/k0ral/borg-exporter",icon_filename:"borg.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Borg backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Borg backup performance metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Borg backup exporter](https://github.com/k0ral/borg-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Borg_backup",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-bungeecord",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"BungeeCord",link:"https://github.com/weihao/bungeecord-prometheus-exporter",icon_filename:"bungee.png",categories:["data-collection.gaming"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# BungeeCord\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack BungeeCord proxy server metrics for efficient load balancing and performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-BungeeCord",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cvmfs",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"CVMFS clients",link:"https://github.com/guilbaults/cvmfs-exporter",icon_filename:"cvmfs.png",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# CVMFS clients\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CernVM File System metrics for optimized distributed file system performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-CVMFS_clients",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-celery",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Celery",link:"https://github.com/ZeitOnline/celery_redis_prometheus",icon_filename:"celery.png",categories:["data-collection.task-queues"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Celery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Celery task queue metrics for optimized task processing and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Celery",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-certificate_transparency",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Certificate Transparency",link:"https://github.com/Hsn723/ct-exporter",icon_filename:"ct.png",categories:["data-collection.security-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Certificate Transparency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack certificate transparency log metrics for enhanced\nSSL/TLS certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ct-exporter](https://github.com/Hsn723/ct-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Certificate_Transparency",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-checkpoint",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Checkpoint device",link:"https://github.com/RespiroConsulting/CheckPointExporter",icon_filename:"checkpoint.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Checkpoint device\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Check Point firewall and security metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Checkpoint_device",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-chia",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Chia",link:"https://github.com/chia-network/chia-exporter",icon_filename:"chia.png",categories:["data-collection.blockchain-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Chia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Chia blockchain metrics for optimized farming and resource allocation.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Chia Exporter](https://github.com/chia-network/chia-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Chia",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-clm5ip",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Christ Elektronik CLM5IP power panel",link:"https://github.com/christmann/clm5ip_exporter/",icon_filename:"christelec.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Christ Elektronik CLM5IP power panel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Christ Elektronik CLM5IP device metrics for efficient performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Christ_Elektronik_CLM5IP_power_panel",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cilium_agent",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cilium Agent",link:"https://github.com/cilium/cilium",icon_filename:"cilium.png",categories:["data-collection.kubernetes"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Cilium Agent\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Cilium Agent metrics for optimized network security and connectivity.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Agent](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cilium_Agent",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cilium_operator",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cilium Operator",link:"https://github.com/cilium/cilium",icon_filename:"cilium.png",categories:["data-collection.kubernetes"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Cilium Operator\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cilium Operator metrics for efficient Kubernetes network security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Operator](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cilium_Operator",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cilium_proxy",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cilium Proxy",link:"https://github.com/cilium/proxy",icon_filename:"cilium.png",categories:["data-collection.kubernetes"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Cilium Proxy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cilium Proxy metrics for enhanced network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Proxy](https://github.com/cilium/proxy).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cilium_Proxy",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cisco_aci",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cisco ACI",link:"https://github.com/RavuAlHemio/prometheus_aci_exporter",icon_filename:"cisco.svg",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:["network monitoring","network performance","cisco devices"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Cisco ACI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cisco ACI infrastructure metrics for optimized network performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cisco_ACI",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-citrix_netscaler",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Citrix NetScaler",link:"https://github.com/rokett/Citrix-NetScaler-Exporter",icon_filename:"citrix.svg",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:["network monitoring","network performance","traffic analysis"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Citrix NetScaler\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetScaler performance metrics for efficient application delivery and load balancing.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Citrix_NetScaler",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-clamd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"ClamAV daemon",link:"https://github.com/sergeymakinen/clamav_exporter",icon_filename:"clamav.png",categories:["data-collection.security-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# ClamAV daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack ClamAV antivirus metrics for enhanced threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-ClamAV_daemon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-clamscan",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Clamscan results",link:"https://github.com/FortnoxAB/clamscan-exporter",icon_filename:"clamav.png",categories:["data-collection.security-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Clamscan results\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ClamAV scanning performance metrics for efficient malware detection and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Clamscan_results",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-clash",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Clash",link:"https://github.com/elonzh/clash_exporter",icon_filename:"clash.png",categories:["data-collection.web-servers-and-web-proxies"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Clash\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Clash proxy server metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Clash exporter](https://github.com/elonzh/clash_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Clash",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_cloudwatch",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"CloudWatch",link:"https://github.com/prometheus/cloudwatch_exporter",icon_filename:"aws-cloudwatch.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# CloudWatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS CloudWatch metrics for comprehensive AWS resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-CloudWatch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cloud_foundry",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cloud Foundry",link:"https://github.com/bosh-prometheus/cf_exporter",icon_filename:"cloud-foundry.svg",categories:["data-collection.provisioning-systems"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Cloud Foundry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cloud Foundry platform metrics for optimized application deployment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cloud_Foundry",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cloud_foundry_firebase",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cloud Foundry Firehose",link:"https://github.com/bosh-prometheus/firehose_exporter",icon_filename:"cloud-foundry.svg",categories:["data-collection.provisioning-systems"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Cloud Foundry Firehose\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cloud Foundry Firehose metrics for comprehensive platform diagnostics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cloud_Foundry_Firehose",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cloudflare_pcap",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cloudflare PCAP",link:"https://github.com/wehkamp/docker-prometheus-cloudflare-exporter",icon_filename:"cloudflare.svg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Cloudflare PCAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cloudflare CDN and security metrics for optimized content delivery and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cloudflare_PCAP",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cmon",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"ClusterControl CMON",link:"https://github.com/severalnines/cmon_exporter",icon_filename:"cluster-control.svg",categories:["data-collection.database-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# ClusterControl CMON\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CMON metrics for Severalnines Cluster Control for efficient monitoring and management of database operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CMON Exporter](https://github.com/severalnines/cmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-ClusterControl_CMON",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-collectd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Collectd",link:"https://github.com/prometheus/collectd_exporter",icon_filename:"collectd.png",categories:["data-collection.observability"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Collectd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system and application metrics with Collectd for comprehensive performance analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Collectd exporter](https://github.com/prometheus/collectd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Collectd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-concourse",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Concourse",link:"https://concourse-ci.org",icon_filename:"concourse.png",categories:["data-collection.ci-cd-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Concourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Concourse CI/CD pipeline metrics for optimized workflow management and deployment.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Concourse built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Concourse",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ftbeerpi",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"CraftBeerPi",link:"https://github.com/jo-hannes/craftbeerpi_exporter",icon_filename:"craftbeer.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# CraftBeerPi\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on CraftBeerPi homebrewing metrics for optimized brewing process management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-CraftBeerPi",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-crowdsec",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Crowdsec",link:"https://docs.crowdsec.net/docs/observability/prometheus",icon_filename:"crowdsec.png",categories:["data-collection.security-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Crowdsec\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Crowdsec security metrics for efficient threat detection and response.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Crowdsec build-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Crowdsec",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-crypto",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Crypto exchanges",link:"https://github.com/ix-ai/crypto-exporter",icon_filename:"crypto.png",categories:["data-collection.blockchain-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Crypto exchanges\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack cryptocurrency market metrics for informed investment and trading decisions.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Crypto exporter](https://github.com/ix-ai/crypto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Crypto_exchanges",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cryptowatch",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cryptowatch",link:"https://github.com/nbarrientos/cryptowat_exporter",icon_filename:"cryptowatch.png",categories:["data-collection.blockchain-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Cryptowatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cryptowatch market data metrics for comprehensive cryptocurrency market analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cryptowatch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-custom",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Custom Exporter",link:"https://github.com/orange-cloudfoundry/custom_exporter",icon_filename:"customdata.png",categories:["data-collection.generic-data-collection"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Custom Exporter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nCreate and monitor custom metrics tailored to your specific use case and requirements.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Custom_Exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ddwrt",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"DDWRT Routers",link:"https://github.com/camelusferus/ddwrt_collector",icon_filename:"ddwrt.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# DDWRT Routers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on DD-WRT router metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-DDWRT_Routers",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dmarc",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"DMARC",link:"https://github.com/jgosmann/dmarc-metrics-exporter",icon_filename:"dmarc.png",categories:["data-collection.mail-servers"]},keywords:["email authentication","policy","reporting"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# DMARC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DMARC email authentication metrics for improved email security and deliverability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-DMARC",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dnsbl",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"DNSBL",link:"https://github.com/Luzilla/dnsbl_exporter/",icon_filename:"dnsbl.png",categories:["data-collection.dns-and-dhcp-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# DNSBL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor DNSBL metrics for efficient domain reputation and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-DNSBL",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dell_emc_ecs",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Dell EMC ECS cluster",link:"https://github.com/paychex/prometheus-emcecs-exporter",icon_filename:"dell.svg",categories:["data-collection.cloud-provider-managed"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Dell EMC ECS cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC ECS object storage metrics for optimized storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Dell_EMC_ECS_cluster",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dell_emc_isilon",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Dell EMC Isilon cluster",link:"https://github.com/paychex/prometheus-isilon-exporter",icon_filename:"dell.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Dell EMC Isilon cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Dell EMC Isilon scale-out NAS metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Dell_EMC_Isilon_cluster",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dell_emc_xtremio",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Dell EMC XtremIO cluster",link:"https://github.com/cthiel42/prometheus-xtremio-exporter",icon_filename:"dell.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Dell EMC XtremIO cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Dell/EMC XtremIO storage metrics for optimized data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Dell_EMC_XtremIO_cluster",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dell_powermax",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Dell PowerMax",link:"https://github.com/kckecheng/powermax_exporter",icon_filename:"powermax.png",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Dell PowerMax\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC PowerMax storage array metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Dell_PowerMax",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dependency_track",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Dependency-Track",link:"https://github.com/jetstack/dependency-track-exporter",icon_filename:"dependency-track.png",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Dependency-Track\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dependency-Track metrics for efficient vulnerability management and software supply chain analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Dependency-Track",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-digitalocean",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"DigitalOcean",link:"https://github.com/metalmatze/digitalocean_exporter",icon_filename:"digitalocean.svg",categories:["data-collection.cloud-provider-managed"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# DigitalOcean\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DigitalOcean cloud provider metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-DigitalOcean",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-discourse",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Discourse",link:"https://github.com/discourse/discourse-prometheus",icon_filename:"discourse.svg",categories:["data-collection.media-streaming-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Discourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Discourse forum metrics for efficient community management and engagement.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Discourse Exporter](https://github.com/discourse/discourse-prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Discourse",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dutch_electricity_smart_meter",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Dutch Electricity Smart Meter",link:"https://github.com/TobiasDeBruijn/prometheus-p1-exporter",icon_filename:"dutch-electricity.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Dutch Electricity Smart Meter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Dutch smart meter P1 port metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Dutch_Electricity_Smart_Meter",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dynatrace",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Dynatrace",link:"https://github.com/Apside-TOP/dynatrace_exporter",icon_filename:"dynatrace.svg",categories:["data-collection.observability"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Dynatrace\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dynatrace APM metrics for comprehensive application performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Dynatrace",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-eos_web",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"EOS",link:"https://eos-web.web.cern.ch/eos-web/",icon_filename:"eos.png",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# EOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor CERN EOS metrics for efficient storage management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [EOS exporter](https://github.com/cern-eos/eos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-EOS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-eaton_ups",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Eaton UPS",link:"https://github.com/psyinfra/prometheus-eaton-ups-exporter",icon_filename:"eaton.svg",categories:["data-collection.ups"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Eaton UPS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Eaton uninterruptible power supply (UPS) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Eaton_UPS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-elgato_keylight",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Elgato Key Light devices.",link:"https://github.com/mdlayher/keylight_exporter",icon_filename:"elgato.svg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Elgato Key Light devices.\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Elgato Key Light metrics for optimized lighting control and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Elgato_Key_Light_devices.",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-energomera",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Energomera smart power meters",link:"https://github.com/peak-load/energomera_exporter",icon_filename:"energomera.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Energomera smart power meters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Energomera electricity meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Energomera_smart_power_meters",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-excel",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Excel spreadsheet",link:"https://github.com/MarcusCalidus/excel-exporter",icon_filename:"excel.png",categories:["data-collection.generic-data-collection"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Excel spreadsheet\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport Prometheus metrics to Excel for versatile data analysis and reporting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Excel_spreadsheet",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-frrouting",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"FRRouting",link:"https://github.com/tynany/frr_exporter",icon_filename:"frrouting.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# FRRouting\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Free Range Routing (FRR) metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FRRouting Exporter](https://github.com/tynany/frr_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-FRRouting",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-fastd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Fastd",link:"https://github.com/freifunk-darmstadt/fastd-exporter",icon_filename:"fastd.png",categories:["data-collection.vpns"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Fastd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Fastd VPN metrics for efficient virtual private network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Fastd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-fortigate",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Fortigate firewall",link:"https://github.com/bluecmd/fortigate_exporter",icon_filename:"fortinet.svg",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Fortigate firewall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Fortigate firewall metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Fortigate_firewall",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-freebsd_nfs",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"FreeBSD NFS",link:"https://github.com/Axcient/freebsd-nfs-exporter",icon_filename:"freebsd.svg",categories:["data-collection.freebsd"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# FreeBSD NFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor FreeBSD Network File System metrics for efficient file sharing management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-FreeBSD_NFS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-freebsd_rctl",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"FreeBSD RCTL-RACCT",link:"https://github.com/yo000/rctl_exporter",icon_filename:"freebsd.svg",categories:["data-collection.freebsd"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# FreeBSD RCTL-RACCT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on FreeBSD Resource Container metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-FreeBSD_RCTL-RACCT",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-freifunk",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Freifunk network",link:"https://github.com/xperimental/freifunk-exporter",icon_filename:"freifunk.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Freifunk network\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Freifunk community network metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Freifunk_network",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-fritzbox",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Fritzbox network devices",link:"https://github.com/pdreker/fritz_exporter",icon_filename:"avm.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Fritzbox network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AVM Fritzbox router metrics for efficient home network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fritzbox exporter](https://github.com/pdreker/fritz_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Fritzbox_network_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gcp_gce",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"GCP GCE",link:"https://github.com/O1ahmad/gcp-gce-exporter",icon_filename:"gcp-gce.svg",categories:["data-collection.cloud-provider-managed"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# GCP GCE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google Cloud Platform Compute Engine metrics for efficient cloud resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-GCP_GCE",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gcp_quota",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"GCP Quota",link:"https://github.com/mintel/gcp-quota-exporter",icon_filename:"gcp.png",categories:["data-collection.cloud-provider-managed"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# GCP Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform quota metrics for optimized resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-GCP_Quota",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gtp",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"GTP",link:"https://github.com/wmnsk/gtp_exporter",icon_filename:"gtpu.png",categories:["data-collection.telephony-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# GTP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GTP (GPRS Tunneling Protocol) metrics for optimized mobile data communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GTP Exporter](https://github.com/wmnsk/gtp_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-GTP",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-generic_cli",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Generic Command Line Output",link:"https://github.com/MarioMartReq/generic-exporter",icon_filename:"cli.svg",categories:["data-collection.generic-data-collection"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Generic Command Line Output\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command line output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Generic_Command_Line_Output",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-enclosure",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Generic storage enclosure tool",link:"https://github.com/Gandi/jbod-rs",icon_filename:"storage-enclosure.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Generic storage enclosure tool\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor storage enclosure metrics for efficient storage device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Generic_storage_enclosure_tool",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-github_ratelimit",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"GitHub API rate limit",link:"https://github.com/lunarway/github-ratelimit-exporter",icon_filename:"github.svg",categories:["data-collection.other"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# GitHub API rate limit\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GitHub API rate limit metrics for efficient\nAPI usage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-GitHub_API_rate_limit",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-github_repo",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"GitHub repository",link:"https://github.com/githubexporter/github-exporter",icon_filename:"github.svg",categories:["data-collection.other"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# GitHub repository\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack GitHub repository metrics for optimized project and user analytics monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub Exporter](https://github.com/githubexporter/github-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-GitHub_repository",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gitlab_runner",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"GitLab Runner",link:"https://gitlab.com/gitlab-org/gitlab-runner",icon_filename:"gitlab.png",categories:["data-collection.ci-cd-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# GitLab Runner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GitLab CI/CD job metrics for efficient development and deployment management.\n\n\nMetrics are gathered by periodically sending HTTP requests to GitLab built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-GitLab_Runner",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gobetween",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Gobetween",link:"https://github.com/yyyar/gobetween",icon_filename:"gobetween.svg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Gobetween\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Gobetween load balancer metrics for optimized network traffic management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to Gobetween built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Gobetween",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gcp",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Google Cloud Platform",link:"https://github.com/DazWilkin/gcp-exporter",icon_filename:"gcp.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Google Cloud Platform\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform metrics for comprehensive cloud resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Google_Cloud_Platform",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-google_pagespeed",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Google Pagespeed",link:"https://github.com/foomo/pagespeed_exporter",icon_filename:"google.svg",categories:["data-collection.apm"]},keywords:["cloud services","cloud computing","google cloud services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Google Pagespeed\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google PageSpeed Insights performance metrics for efficient web page optimization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Google_Pagespeed",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gcp_stackdriver",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Google Stackdriver",link:"https://github.com/prometheus-community/stackdriver_exporter",icon_filename:"gcp-stackdriver.svg",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","google cloud services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Google Stackdriver\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Google Stackdriver monitoring metrics for optimized cloud performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Google_Stackdriver",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-grafana",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Grafana",link:"https://grafana.com/",icon_filename:"grafana.png",categories:["data-collection.observability"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Grafana\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Grafana dashboard and visualization metrics for optimized monitoring and data analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Grafana built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Grafana",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-graylog",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Graylog Server",link:"https://github.com/Graylog2/graylog2-server/",icon_filename:"graylog.svg",categories:["data-collection.logs-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Graylog Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Graylog server metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Graylog built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Graylog_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hana",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"HANA",link:"https://github.com/jenningsloy318/hana_exporter",icon_filename:"sap.svg",categories:["data-collection.database-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# HANA\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SAP HANA database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HANA Exporter](https://github.com/jenningsloy318/hana_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-HANA",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hdsentinel",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"HDSentinel",link:"https://github.com/qusielle/hdsentinel-exporter",icon_filename:"harddisk.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# HDSentinel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hard Disk Sentinel metrics for efficient storage device health management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-HDSentinel",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hhvm",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"HHVM",link:"https://github.com/wikimedia/operations-software-hhvm_exporter",icon_filename:"hhvm.svg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# HHVM\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HipHop Virtual Machine metrics for efficient\nPHP execution and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-HHVM",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hp_ilo",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"HP iLO",link:"https://github.com/infinityworks/hpilo-exporter",icon_filename:"hp.svg",categories:["data-collection.hardware-devices-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# HP iLO\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HP Integrated Lights Out (iLO) metrics for efficient server management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-HP_iLO",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-halon",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Halon",link:"https://github.com/tobiasbp/halon_exporter",icon_filename:"halon.svg",categories:["data-collection.mail-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Halon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Halon email security and delivery metrics for optimized email management and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Halon exporter](https://github.com/tobiasbp/halon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Halon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hashicorp_vault",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"HashiCorp Vault secrets",link:"https://github.com/tomtom-international/vault-assessment-prometheus-exporter",icon_filename:"vault.svg",categories:["data-collection.authentication-and-authorization"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# HashiCorp Vault secrets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack HashiCorp Vault security assessment metrics for efficient secrets management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-HashiCorp_Vault_secrets",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hasura_graphql",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Hasura GraphQL Server",link:"https://github.com/zolamk/hasura-exporter",icon_filename:"hasura.svg",categories:["data-collection.database-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Hasura GraphQL Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Hasura GraphQL engine metrics for optimized\nAPI performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hasura Exporter](https://github.com/zolamk/hasura-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Hasura_GraphQL_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-helium_hotspot",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Helium hotspot",link:"https://github.com/tedder/helium_hotspot_exporter",icon_filename:"helium.svg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Helium hotspot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Helium hotspot metrics for optimized LoRaWAN network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Helium_hotspot",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-helium_miner",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Helium miner (validator)",link:"https://github.com/tedder/miner_exporter",icon_filename:"helium.svg",categories:["data-collection.blockchain-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Helium miner (validator)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Helium miner and validator metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Helium_miner_(validator)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hitron_cgm",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Hitron CGN series CPE",link:"https://github.com/yrro/hitron-exporter",icon_filename:"hitron.svg",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Hitron CGN series CPE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hitron CGNV4 gateway metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Hitron_CGN_series_CPE",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hitron_coda",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Hitron CODA Cable Modem",link:"https://github.com/hairyhenderson/hitron_coda_exporter",icon_filename:"hitron.svg",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Hitron CODA Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Hitron CODA cable modem metrics for optimized internet connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Hitron_CODA_Cable_Modem",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-homebridge",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Homebridge",link:"https://github.com/lstrojny/homebridge-prometheus-exporter",icon_filename:"homebridge.svg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Homebridge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Homebridge smart home metrics for efficient home automation management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Homebridge",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-homey",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Homey",link:"https://github.com/rickardp/homey-prometheus-exporter",icon_filename:"homey.svg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Homey\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Homey smart home controller metrics for efficient home automation and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Homey",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-honeypot",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Honeypot",link:"https://github.com/Intrinsec/honeypot_exporter",icon_filename:"intrinsec.svg",categories:["data-collection.security-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Honeypot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor honeypot metrics for efficient threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Honeypot",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hilink",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Huawei devices",link:"https://github.com/eliecharra/hilink-exporter",icon_filename:"huawei.svg",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Huawei devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Huawei HiLink device metrics for optimized connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Huawei_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hubble",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Hubble",link:"https://github.com/cilium/hubble",icon_filename:"hubble.png",categories:["data-collection.observability"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Hubble\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hubble network observability metrics for efficient network visibility and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to Hubble built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Hubble",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ibm_aix_njmon",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IBM AIX systems Njmon",link:"https://github.com/crooks/njmon_exporter",icon_filename:"ibm.svg",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# IBM AIX systems Njmon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NJmon system performance monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NJmon](https://github.com/crooks/njmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IBM_AIX_systems_Njmon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ibm_cex",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IBM CryptoExpress (CEX) cards",link:"https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin",icon_filename:"ibm.svg",categories:["data-collection.hardware-devices-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# IBM CryptoExpress (CEX) cards\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack IBM Z Crypto Express device metrics for optimized cryptographic performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IBM_CryptoExpress_(CEX)_cards",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ibm_mq",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IBM MQ",link:"https://github.com/agebhar1/mq_exporter",icon_filename:"ibm.svg",categories:["data-collection.message-brokers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# IBM MQ\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on IBM MQ message queue metrics for efficient message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQ Exporter](https://github.com/agebhar1/mq_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IBM_MQ",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ibm_spectrum",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IBM Spectrum",link:"https://github.com/topine/ibm-spectrum-exporter",icon_filename:"ibm.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# IBM Spectrum\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum storage metrics for efficient data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IBM_Spectrum",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ibm_spectrum_virtualize",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IBM Spectrum Virtualize",link:"https://github.com/bluecmd/spectrum_virtualize_exporter",icon_filename:"ibm.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# IBM Spectrum Virtualize\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum Virtualize metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IBM_Spectrum_Virtualize",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ibm_zhmc",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IBM Z Hardware Management Console",link:"https://github.com/zhmcclient/zhmc-prometheus-exporter",icon_filename:"ibm.svg",categories:["data-collection.hardware-devices-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# IBM Z Hardware Management Console\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Z Hardware Management Console metrics for efficient mainframe management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IBM_Z_Hardware_Management_Console",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-iota",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IOTA full node",link:"https://github.com/crholliday/iota-prom-exporter",icon_filename:"iota.svg",categories:["data-collection.blockchain-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# IOTA full node\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on IOTA cryptocurrency network metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IOTA_full_node",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ipmi",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IPMI (By SoundCloud)",link:"https://github.com/prometheus-community/ipmi_exporter",icon_filename:"soundcloud.svg",categories:["data-collection.hardware-devices-and-sensors"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# IPMI (By SoundCloud)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IPMI metrics externally for efficient server hardware management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IPMI_(By_SoundCloud)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-influxdb",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"InfluxDB",link:"https://github.com/prometheus/influxdb_exporter",icon_filename:"influxdb.svg",categories:["data-collection.database-servers"]},keywords:["database","dbms","data storage"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# InfluxDB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor InfluxDB time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-InfluxDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-jmx",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"JMX",link:"https://github.com/prometheus/jmx_exporter",icon_filename:"java.svg",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# JMX\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Java Management Extensions (JMX) metrics for efficient Java application management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JMX Exporter](https://github.com/prometheus/jmx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-JMX",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-jarvis",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Jarvis Standing Desk",link:"https://github.com/hairyhenderson/jarvis_exporter/",icon_filename:"jarvis.jpg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Jarvis Standing Desk\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jarvis standing desk usage metrics for efficient workspace ergonomics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Jarvis_Standing_Desk",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-jenkins",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Jenkins",link:"https://www.jenkins.io/",icon_filename:"jenkins.svg",categories:["data-collection.ci-cd-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Jenkins\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jenkins continuous integration server metrics for efficient development and build management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Jenkins",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-jetbrains_fls",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"JetBrains Floating License Server",link:"https://github.com/mkreu/jetbrains-fls-exporter",icon_filename:"jetbrains.png",categories:["data-collection.generic-data-collection"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# JetBrains Floating License Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor JetBrains floating license server metrics for efficient software licensing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-JetBrains_Floating_License_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-kafka",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Kafka",link:"https://github.com/danielqsj/kafka_exporter/",icon_filename:"kafka.svg",categories:["data-collection.message-brokers"]},keywords:["big data","stream processing","message broker"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Kafka\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kafka message queue metrics for optimized data streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Kafka",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-kafka_connect",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Kafka Connect",link:"https://github.com/findelabs/kafka-connect-exporter-rs",icon_filename:"kafka.svg",categories:["data-collection.message-brokers"]},keywords:["big data","stream processing","message broker"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Kafka Connect\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kafka Connect metrics for efficient data streaming and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Kafka_Connect",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-kafka_consumer_lag",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Kafka Consumer Lag",link:"https://github.com/omarsmak/kafka-consumer-lag-monitoring",icon_filename:"kafka.svg",categories:["data-collection.service-discovery-registry"]},keywords:["big data","stream processing","message broker"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Kafka Consumer Lag\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka consumer lag metrics for efficient message queue management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Kafka_Consumer_Lag",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-kafka_zookeeper",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Kafka ZooKeeper",link:"https://github.com/cloudflare/kafka_zookeeper_exporter",icon_filename:"kafka.svg",categories:["data-collection.message-brokers"]},keywords:["big data","stream processing","message broker"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Kafka ZooKeeper\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka ZooKeeper metrics for optimized distributed coordination and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Kafka_ZooKeeper",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-kannel",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Kannel",link:"https://github.com/apostvav/kannel_exporter",icon_filename:"kannel.png",categories:["data-collection.telephony-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Kannel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kannel SMS gateway and WAP gateway metrics for efficient mobile communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kannel Exporter](https://github.com/apostvav/kannel_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Kannel",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-keepalived",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Keepalived",link:"https://github.com/gen2brain/keepalived_exporter",icon_filename:"keepalived.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Keepalived\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Keepalived metrics for efficient high-availability and load balancing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Keepalived",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-korral",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Kubernetes Cluster Cloud Cost",link:"https://github.com/agilestacks/korral",icon_filename:"kubernetes.svg",categories:["data-collection.kubernetes"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Kubernetes Cluster Cloud Cost\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kubernetes cloud cost metrics for efficient cloud resource management and budgeting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Kubernetes_Cluster_Cloud_Cost",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ldap",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"LDAP",link:"https://github.com/titisan/ldap_exporter",icon_filename:"ldap.png",categories:["data-collection.authentication-and-authorization"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# LDAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Lightweight Directory Access Protocol (LDAP) metrics for efficient directory service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [LDAP Exporter](https://github.com/titisan/ldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-LDAP",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-lagerist",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Lagerist Disk latency",link:"https://github.com/Svedrin/lagerist",icon_filename:"linux.png",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Lagerist Disk latency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack disk latency metrics for efficient storage performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Lagerist_Disk_latency",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-linode",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Linode",link:"https://github.com/DazWilkin/linode-exporter",icon_filename:"linode.svg",categories:["data-collection.cloud-provider-managed"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Linode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Linode cloud hosting metrics for efficient virtual server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Linode Exporter](https://github.com/DazWilkin/linode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Linode",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-lustre",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Lustre metadata",link:"https://github.com/GSI-HPC/prometheus-cluster-exporter",icon_filename:"lustre.png",categories:["data-collection.cloud-provider-managed"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Lustre metadata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Lustre clustered file system for efficient management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Lustre_metadata",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-lynis",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Lynis audit reports",link:"https://github.com/MauveSoftware/lynis_exporter",icon_filename:"lynis.png",categories:["data-collection.security-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Lynis audit reports\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Lynis security auditing tool metrics for efficient system security and compliance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Lynis_audit_reports",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-mp707",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"MP707 USB thermometer",link:"https://github.com/nradchenko/mp707_exporter",icon_filename:"thermometer.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# MP707 USB thermometer\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MP707 power strip metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MP707 exporter](https://github.com/nradchenko/mp707_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-MP707_USB_thermometer",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-mqtt_blackbox",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"MQTT Blackbox",link:"https://github.com/inovex/mqtt_blackbox_exporter",icon_filename:"mqtt.svg",categories:["data-collection.message-brokers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# MQTT Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MQTT message transport performance using blackbox testing methods.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-MQTT_Blackbox",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-machbase",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Machbase",link:"https://github.com/MACHBASE/prometheus-machbase-exporter",icon_filename:"machbase.png",categories:["data-collection.database-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Machbase\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Machbase time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Machbase",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-maildir",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Maildir",link:"https://github.com/cherti/mailexporter",icon_filename:"mailserver.svg",categories:["data-collection.mail-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Maildir\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack mail server metrics for optimized email management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mailexporter](https://github.com/cherti/mailexporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Maildir",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-meilisearch",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Meilisearch",link:"https://github.com/scottaglia/meilisearch_exporter",icon_filename:"meilisearch.svg",categories:["data-collection.search-engines"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Meilisearch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Meilisearch search engine metrics for efficient search performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Meilisearch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-memcached",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Memcached (community)",link:"https://github.com/prometheus/memcached_exporter",icon_filename:"memcached.svg",categories:["data-collection.database-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Memcached (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Memcached in-memory key-value store metrics for efficient caching performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Memcached exporter](https://github.com/prometheus/memcached_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Memcached_(community)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-meraki",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Meraki dashboard",link:"https://github.com/TheHolm/meraki-dashboard-promethus-exporter",icon_filename:"meraki.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Meraki dashboard\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cisco Meraki cloud-managed networking device metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Meraki_dashboard",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-mesos",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Mesos",link:"http://github.com/mesosphere/mesos_exporter",icon_filename:"mesos.svg",categories:["data-collection.task-queues"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Mesos\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Mesos cluster manager metrics for efficient resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Mesos exporter](http://github.com/mesosphere/mesos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Mesos",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-mikrotik",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"MikroTik devices",link:"https://github.com/swoga/mikrotik-exporter",icon_filename:"mikrotik.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# MikroTik devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mikrotik-exporter](https://github.com/swoga/mikrotik-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-MikroTik_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-routeros",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Mikrotik RouterOS devices",link:"https://github.com/welbymcroberts/routeros_exporter",icon_filename:"routeros.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Mikrotik RouterOS devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Mikrotik_RouterOS_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-minecraft",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Minecraft",link:"https://github.com/sladkoff/minecraft-prometheus-exporter",icon_filename:"minecraft.png",categories:["data-collection.gaming"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Minecraft\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Minecraft server metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Minecraft",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-modbus_rtu",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Modbus protocol",link:"https://github.com/dernasherbrezon/modbusrtu_exporter",icon_filename:"modbus.svg",categories:["data-collection.iot-devices"]},keywords:["database","dbms","data storage"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Modbus protocol\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Modbus RTU protocol metrics for efficient industrial automation and control performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Modbus_protocol",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-mogilefs",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"MogileFS",link:"https://github.com/KKBOX/mogilefs-exporter",icon_filename:"filesystem.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# MogileFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor MogileFS distributed file system metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-MogileFS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-monnit_mqtt",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Monnit Sensors MQTT",link:"https://github.com/braxton9460/monnit-mqtt-exporter",icon_filename:"monnit.svg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Monnit Sensors MQTT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Monnit sensor data via MQTT for efficient IoT device monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Monnit_Sensors_MQTT",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nrpe",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"NRPE daemon",link:"https://github.com/canonical/nrpe_exporter",icon_filename:"nrpelinux.png",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# NRPE daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nagios Remote Plugin Executor (NRPE) metrics for efficient system and network monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NRPE exporter](https://github.com/canonical/nrpe_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-NRPE_daemon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nsxt",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"NSX-T",link:"https://github.com/jk8s/nsxt_exporter",icon_filename:"vmware-nsx.svg",categories:["data-collection.containers-and-vms"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# NSX-T\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack VMware NSX-T software-defined networking metrics for efficient network virtualization and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-NSX-T",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nvml",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"NVML",link:"https://github.com/oko/nvml-exporter-rs",icon_filename:"nvidia.svg",categories:["data-collection.hardware-devices-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# NVML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NVIDIA Management Library (NVML) GPU metrics for efficient GPU performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NVML exporter](https://github.com/oko/nvml-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-NVML",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-naemon",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Naemon",link:"https://github.com/Griesbacher/Iapetos",icon_filename:"naemon.svg",categories:["data-collection.observability"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Naemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Naemon or Nagios network monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Naemon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nagios",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Nagios",link:"https://github.com/wbollock/nagios_exporter",icon_filename:"nagios.png",categories:["data-collection.observability"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Nagios\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Nagios network monitoring metrics for efficient\nIT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nagios exporter](https://github.com/wbollock/nagios_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Nagios",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nature_remo",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Nature Remo E lite devices",link:"https://github.com/kenfdev/remo-exporter",icon_filename:"nature-remo.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Nature Remo E lite devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nature Remo E series smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Nature_Remo_E_lite_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-netapp_solidfire",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"NetApp Solidfire",link:"https://github.com/mjavier2k/solidfire-exporter",icon_filename:"netapp.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:["network monitoring","network performance","traffic analysis"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# NetApp Solidfire\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetApp Solidfire storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-NetApp_Solidfire",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-netflow",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"NetFlow",link:"https://github.com/paihu/netflow_exporter",icon_filename:"netflow.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:["network monitoring","network performance","traffic analysis"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# NetFlow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetFlow network traffic metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [netflow exporter](https://github.com/paihu/netflow_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-NetFlow",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-netmeter",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"NetMeter",link:"https://github.com/ssbostan/netmeter-exporter",icon_filename:"netmeter.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:["network monitoring","network performance","traffic analysis"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# NetMeter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor NetMeter network traffic metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-NetMeter",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-netapp_ontap",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Netapp ONTAP API",link:"https://github.com/sapcc/netapp-api-exporter",icon_filename:"netapp.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:["network monitoring","network performance","traffic analysis"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Netapp ONTAP API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetApp ONTAP storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Netapp_ONTAP_API",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-netatmo",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Netatmo sensors",link:"https://github.com/xperimental/netatmo-exporter",icon_filename:"netatmo.svg",categories:["data-collection.iot-devices"]},keywords:["network monitoring","network performance","traffic analysis"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Netatmo sensors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Netatmo smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netatmo exporter](https://github.com/xperimental/netatmo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Netatmo_sensors",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-newrelic",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"New Relic",link:"https://github.com/jfindley/newrelic_exporter",icon_filename:"newrelic.svg",categories:["data-collection.observability"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# New Relic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor New Relic application performance management metrics for efficient application monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [New Relic exporter](https://github.com/jfindley/newrelic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-New_Relic",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nextdns",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"NextDNS",link:"https://github.com/raylas/nextdns-exporter",icon_filename:"nextdns.png",categories:["data-collection.dns-and-dhcp-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# NextDNS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NextDNS DNS resolver and security platform metrics for efficient DNS management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nextdns-exporter](https://github.com/raylas/nextdns-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-NextDNS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nextcloud",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Nextcloud servers",link:"https://github.com/xperimental/nextcloud-exporter",icon_filename:"nextcloud.png",categories:["data-collection.cloud-provider-managed"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Nextcloud servers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Nextcloud cloud storage metrics for efficient file hosting and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Nextcloud_servers",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-obs_studio",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OBS Studio",link:"https://github.com/lukegb/obs_studio_exporter",icon_filename:"obs-studio.png",categories:["data-collection.media-streaming-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# OBS Studio\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OBS Studio live streaming and recording software metrics for efficient video production and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OBS_Studio",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-odbc",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"ODBC",link:"https://github.com/MACHBASE/prometheus-odbc-exporter",icon_filename:"odbc.svg",categories:["data-collection.database-servers"]},keywords:["database","dbms","data storage"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# ODBC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Open Database Connectivity (ODBC) metrics for efficient database connection and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-ODBC",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-otrs",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OTRS",link:"https://github.com/JulianDroste/otrs_exporter",icon_filename:"otrs.png",categories:["data-collection.notifications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# OTRS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OTRS (Open-Source Ticket Request System) metrics for efficient helpdesk management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OTRS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openhab",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenHAB",link:"https://github.com/pdreker/openhab_exporter",icon_filename:"openhab.svg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# OpenHAB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack openHAB smart home automation system metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenHAB exporter](https://github.com/pdreker/openhab_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OpenHAB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openldap",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenLDAP (community)",link:"https://github.com/tomcz/openldap_exporter",icon_filename:"openldap.svg",categories:["data-collection.authentication-and-authorization"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# OpenLDAP (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenLDAP directory service metrics for efficient directory management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OpenLDAP_(community)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openrc",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenRC",link:"https://git.sr.ht/~tomleb/openrc-exporter",icon_filename:"linux.png",categories:["data-collection.linux-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# OpenRC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on OpenRC init system metrics for efficient system startup and service management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OpenRC",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openrct2",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenRCT2",link:"https://github.com/terinjokes/openrct2-prometheus-exporter",icon_filename:"openRCT2.png",categories:["data-collection.gaming"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# OpenRCT2\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenRCT2 game metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OpenRCT2",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openroadm",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenROADM devices",link:"https://github.com/utdal/openroadm_exporter",icon_filename:"openroadm.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:["network monitoring","network performance","traffic analysis"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# OpenROADM devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenROADM optical transport network metrics using the NETCONF protocol for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OpenROADM_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openstack",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenStack",link:"https://github.com/CanonicalLtd/prometheus-openstack-exporter",icon_filename:"openstack.svg",categories:["data-collection.cloud-provider-managed"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# OpenStack\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenStack cloud computing platform metrics for efficient infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OpenStack",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openvas",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenVAS",link:"https://github.com/ModeClearCode/openvas_exporter",icon_filename:"openVAS.png",categories:["data-collection.security-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# OpenVAS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenVAS vulnerability scanner metrics for efficient security assessment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OpenVAS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openweathermap",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenWeatherMap",link:"https://github.com/Tenzer/openweathermap-exporter",icon_filename:"openweather.png",categories:["data-collection.generic-data-collection"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# OpenWeatherMap\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenWeatherMap weather data and air pollution metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OpenWeatherMap",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openvswitch",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Open vSwitch",link:"https://github.com/digitalocean/openvswitch_exporter",icon_filename:"ovs.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Open vSwitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Open vSwitch software-defined networking metrics for efficient network virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Open_vSwitch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-oracledb",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Oracle DB (community)",link:"https://github.com/iamseth/oracledb_exporter",icon_filename:"oracle.svg",categories:["data-collection.database-servers"]},keywords:["oracle","database","dbms","data storage"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Oracle DB (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Oracle Database metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Oracle_DB_(community)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-patroni",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Patroni",link:"https://github.com/gopaytech/patroni_exporter",icon_filename:"patroni.png",categories:["data-collection.database-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Patroni\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Patroni PostgreSQL high-availability metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Patroni Exporter](https://github.com/gopaytech/patroni_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Patroni",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-pws",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Personal Weather Station",link:"https://github.com/JohnOrthoefer/pws-exporter",icon_filename:"wunderground.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Personal Weather Station\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack personal weather station metrics for efficient weather monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Personal_Weather_Station",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-pgpool2",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Pgpool-II",link:"https://github.com/pgpool/pgpool2_exporter",icon_filename:"pgpool2.png",categories:["data-collection.database-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Pgpool-II\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pgpool-II PostgreSQL middleware metrics for efficient database connection management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Pgpool-II",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-philips_hue",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Philips Hue",link:"https://github.com/aexel90/hue_exporter",icon_filename:"hue.svg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Philips Hue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Philips Hue smart lighting metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Philips Hue Exporter](https://github.com/aexel90/hue_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Philips_Hue",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-pimoroni_enviro_plus",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Pimoroni Enviro+",link:"https://github.com/terradolor/prometheus-enviro-exporter",icon_filename:"pimorino.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Pimoroni Enviro+\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pimoroni Enviro+ air quality and environmental metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Pimoroni_Enviro+",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-pingdom",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Pingdom",link:"https://github.com/veepee-oss/pingdom_exporter",icon_filename:"solarwinds.svg",categories:["data-collection.synthetic-checks"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Pingdom\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Pingdom website monitoring service metrics for efficient website performance management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Pingdom",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-podman",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Podman",link:"https://github.com/containers/prometheus-podman-exporter",icon_filename:"podman.png",categories:["data-collection.containers-and-vms"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Podman\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Podman container runtime metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Podman",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-powerpal",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Powerpal devices",link:"https://github.com/aashley/powerpal_exporter",icon_filename:"powerpal.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Powerpal devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Powerpal smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Powerpal Exporter](https://github.com/aashley/powerpal_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Powerpal_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-proftpd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"ProFTPD",link:"https://github.com/transnano/proftpd_exporter",icon_filename:"proftpd.png",categories:["data-collection.ftp-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# ProFTPD\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ProFTPD FTP server metrics for efficient file transfer and server performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-ProFTPD",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-generic",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Prometheus endpoint",link:"https://prometheus.io/",icon_filename:"prometheus.svg",categories:["data-collection.generic-data-collection"]},keywords:["prometheus","openmetrics"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# Prometheus endpoint\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nThis generic Prometheus collector gathers metrics from any [`Prometheus`](https://prometheus.io/) endpoints.\n\n\nIt collects metrics by periodically sending HTTP requests to the target instance.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Prometheus_endpoint",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-proxmox",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Proxmox VE",link:"https://github.com/prometheus-pve/prometheus-pve-exporter",icon_filename:"proxmox.png",categories:["data-collection.containers-and-vms"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Proxmox VE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Proxmox Virtual Environment metrics for efficient virtualization and container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Proxmox_VE",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-radius",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"RADIUS",link:"https://github.com/devon-mar/radius-exporter",icon_filename:"radius.png",categories:["data-collection.authentication-and-authorization"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# RADIUS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RADIUS (Remote Authentication Dial-In User Service) protocol metrics for efficient authentication and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RADIUS exporter](https://github.com/devon-mar/radius-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-RADIUS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ripe_atlas",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"RIPE Atlas",link:"https://github.com/czerwonk/atlas_exporter",icon_filename:"ripe.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# RIPE Atlas\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RIPE Atlas Internet measurement platform metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-RIPE_Atlas",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-radio_thermostat",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Radio Thermostat",link:"https://github.com/andrewlow/radio-thermostat-exporter",icon_filename:"radiots.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Radio Thermostat\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Radio Thermostat smart thermostat metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Radio_Thermostat",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-rancher",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Rancher",link:"https://github.com/infinityworksltd/prometheus-rancher-exporter",icon_filename:"rancher.svg",categories:["data-collection.kubernetes"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Rancher\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Rancher container orchestration platform metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Rancher",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-raritan_pdu",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Raritan PDU",link:"https://github.com/psyinfra/prometheus-raritan-pdu-exporter",icon_filename:"raritan.svg",categories:["data-collection.hardware-devices-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Raritan PDU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Raritan Power Distribution Unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Raritan_PDU",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-redis_queue",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Redis Queue",link:"https://github.com/mdawar/rq-exporter",icon_filename:"rq.png",categories:["data-collection.message-brokers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Redis Queue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Python RQ (Redis Queue) job queue metrics for efficient task management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Python RQ Exporter](https://github.com/mdawar/rq-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Redis_Queue",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sabnzbd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SABnzbd",link:"https://github.com/msroest/sabnzbd_exporter",icon_filename:"sabnzbd.png",categories:["data-collection.media-streaming-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# SABnzbd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SABnzbd Usenet client metrics for efficient file downloads and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SABnzbd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sma_inverter",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SMA Inverters",link:"https://github.com/dr0ps/sma_inverter_exporter",icon_filename:"sma.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# SMA Inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SMA solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SMA_Inverters",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sonic",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SONiC NOS",link:"https://github.com/kamelnetworks/sonic_exporter",icon_filename:"sonic.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# SONiC NOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Software for Open Networking in the Cloud (SONiC) metrics for efficient network switch management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SONiC_NOS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sql",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SQL Database agnostic",link:"https://github.com/free/sql_exporter",icon_filename:"sql.svg",categories:["data-collection.database-servers"]},keywords:["database","relational db","data querying"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# SQL Database agnostic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nQuery SQL databases for efficient database performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SQL Exporter](https://github.com/free/sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SQL_Database_agnostic",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ssh",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SSH",link:"https://github.com/Nordstrom/ssh_exporter",icon_filename:"ssh.png",categories:["data-collection.authentication-and-authorization"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# SSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SSH server metrics for efficient secure shell server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSH Exporter](https://github.com/Nordstrom/ssh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SSH",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ssl",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SSL Certificate",link:"https://github.com/ribbybibby/ssl_exporter",icon_filename:"ssl.svg",categories:["data-collection.security-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# SSL Certificate\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SSL/TLS certificate metrics for efficient web security and certificate management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SSL_Certificate",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-salicru_eqx",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Salicru EQX inverter",link:"https://github.com/alejandroscf/prometheus_salicru_exporter",icon_filename:"salicru.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Salicru EQX inverter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Salicru EQX solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Salicru_EQX_inverter",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sense_energy",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Sense Energy",link:"https://github.com/ejsuncy/sense_energy_prometheus_exporter",icon_filename:"sense.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Sense Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Sense Energy smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Sense_Energy",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sentry",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Sentry",link:"https://github.com/snakecharmer/sentry_exporter",icon_filename:"sentry.png",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Sentry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sentry error tracking and monitoring platform metrics for efficient application performance and error management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Sentry",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-servertech",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"ServerTech",link:"https://github.com/tynany/servertech_exporter",icon_filename:"servertech.png",categories:["data-collection.hardware-devices-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# ServerTech\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Server Technology power distribution unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ServerTech Exporter](https://github.com/tynany/servertech_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-ServerTech",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-shell_cmd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Shell command",link:"https://github.com/tomwilkie/prom-run",icon_filename:"crunner.svg",categories:["data-collection.generic-data-collection"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Shell command\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Command runner exporter](https://github.com/tomwilkie/prom-run).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Shell_command",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-shelly",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Shelly humidity sensor",link:"https://github.com/aexel90/shelly_exporter",icon_filename:"shelly.jpg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Shelly humidity sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Shelly smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Shelly Exporter](https://github.com/aexel90/shelly_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Shelly_humidity_sensor",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sia",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Sia",link:"https://github.com/tbenz9/sia_exporter",icon_filename:"sia.png",categories:["data-collection.blockchain-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Sia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sia decentralized storage platform metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sia Exporter](https://github.com/tbenz9/sia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Sia",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-s7_plc",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Siemens S7 PLC",link:"https://github.com/MarcusCalidus/s7-plc-exporter",icon_filename:"siemens.svg",categories:["data-collection.hardware-devices-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Siemens S7 PLC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Siemens S7 Programmable Logic Controller (PLC) metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Siemens_S7_PLC",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-site24x7",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Site 24x7",link:"https://github.com/svenstaro/site24x7_exporter",icon_filename:"site24x7.svg",categories:["data-collection.synthetic-checks"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Site 24x7\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Site24x7 website and infrastructure monitoring metrics for efficient performance tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Site_24x7",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-slurm",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Slurm",link:"https://github.com/vpenso/prometheus-slurm-exporter",icon_filename:"slurm.png",categories:["data-collection.task-queues"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Slurm\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Slurm workload manager metrics for efficient high-performance computing (HPC) and cluster management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Slurm",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-smartrg808ac",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SmartRG 808AC Cable Modem",link:"https://github.com/AdamIsrael/smartrg808ac_exporter",icon_filename:"smartr.jpeg",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# SmartRG 808AC Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SmartRG SR808ac router metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SmartRG_808AC_Cable_Modem",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sml",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Smart meters SML",link:"https://github.com/mweinelt/sml-exporter",icon_filename:"sml.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Smart meters SML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Smart Message Language (SML) metrics for efficient smart metering and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SML Exporter](https://github.com/mweinelt/sml-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Smart_meters_SML",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-softether",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SoftEther VPN Server",link:"https://github.com/dalance/softether_exporter",icon_filename:"softether.svg",categories:["data-collection.vpns"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# SoftEther VPN Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SoftEther VPN Server metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoftEther Exporter](https://github.com/dalance/softether_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SoftEther_VPN_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-solaredge",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SolarEdge inverters",link:"https://github.com/dave92082/SolarEdge-Exporter",icon_filename:"solaredge.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# SolarEdge inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SolarEdge solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SolarEdge_inverters",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-lsx",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Solar logging stick",link:"https://gitlab.com/bhavin192/lsx-exporter",icon_filename:"solar.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Solar logging stick\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor solar energy metrics using a solar logging stick for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Solar_logging_stick",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-solis",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Solis Ginlong 5G inverters",link:"https://github.com/candlerb/solis_exporter",icon_filename:"solis.jpg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Solis Ginlong 5G inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Solis solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solis Exporter](https://github.com/candlerb/solis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Solis_Ginlong_5G_inverters",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-spacelift",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Spacelift",link:"https://github.com/spacelift-io/prometheus-exporter",icon_filename:"spacelift.png",categories:["data-collection.provisioning-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Spacelift\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Spacelift infrastructure-as-code (IaC) platform metrics for efficient infrastructure automation and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Spacelift",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-speedify",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Speedify CLI",link:"https://github.com/willshen/speedify_exporter",icon_filename:"speedify.png",categories:["data-collection.vpns"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Speedify CLI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Speedify VPN metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Speedify Exporter](https://github.com/willshen/speedify_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Speedify_CLI",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sphinx",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Sphinx",link:"https://github.com/foxdalas/sphinx_exporter",icon_filename:"sphinx.png",categories:["data-collection.search-engines"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Sphinx\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Sphinx search engine metrics for efficient search and indexing performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Sphinx",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-starlink",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Starlink (SpaceX)",link:"https://github.com/danopstech/starlink_exporter",icon_filename:"starlink.svg",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Starlink (SpaceX)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SpaceX Starlink satellite internet metrics for efficient internet service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Starlink_(SpaceX)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-starwind_vsan",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Starwind VSAN VSphere Edition",link:"https://github.com/evoicefire/starwind-vsan-exporter",icon_filename:"starwind.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Starwind VSAN VSphere Edition\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on StarWind Virtual SAN metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Starwind_VSAN_VSphere_Edition",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-statuspage",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"StatusPage",link:"https://github.com/vladvasiliu/statuspage-exporter",icon_filename:"statuspage.png",categories:["data-collection.notifications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# StatusPage\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor StatusPage.io incident and status metrics for efficient incident management and communication.\n\n\nMetrics are gathered by periodically sending HTTP requests to [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-StatusPage",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-steam_a2s",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Steam",link:"https://github.com/armsnyder/a2s-exporter",icon_filename:"a2s.png",categories:["data-collection.gaming"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Steam\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nGain insights into Steam A2S-supported game servers for performance and availability through real-time metric monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A2S Exporter](https://github.com/armsnyder/a2s-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Steam",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-storidge",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Storidge",link:"https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md",icon_filename:"storidge.png",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Storidge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Storidge storage metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Storidge",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-stream_generic",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Stream",link:"https://github.com/carlpett/stream_exporter",icon_filename:"stream.png",categories:["data-collection.media-streaming-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Stream\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor streaming metrics for efficient media streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Stream exporter](https://github.com/carlpett/stream_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Stream",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sunspec",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Sunspec Solar Energy",link:"https://github.com/inosion/prometheus-sunspec-exporter",icon_filename:"sunspec.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Sunspec Solar Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SunSpec Alliance solar energy metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Sunspec_Solar_Energy",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-suricata",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Suricata",link:"https://github.com/corelight/suricata_exporter",icon_filename:"suricata.png",categories:["data-collection.security-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Suricata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Suricata network intrusion detection and prevention system (IDS/IPS) metrics for efficient network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Suricata Exporter](https://github.com/corelight/suricata_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Suricata",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-synology_activebackup",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Synology ActiveBackup",link:"https://github.com/codemonauts/activebackup-prometheus-exporter",icon_filename:"synology.png",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Synology ActiveBackup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Synology Active Backup metrics for efficient backup and data protection management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Synology_ActiveBackup",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sysload",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Sysload",link:"https://github.com/egmc/sysload_exporter",icon_filename:"sysload.png",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Sysload\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system load metrics for efficient system performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sysload Exporter](https://github.com/egmc/sysload_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Sysload",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-trex",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"T-Rex NVIDIA GPU Miner",link:"https://github.com/dennisstritzke/trex_exporter",icon_filename:"trex.png",categories:["data-collection.hardware-devices-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# T-Rex NVIDIA GPU Miner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor T-Rex NVIDIA GPU miner metrics for efficient cryptocurrency mining and GPU performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-T-Rex_NVIDIA_GPU_Miner",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-tacas",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"TACACS",link:"https://github.com/devon-mar/tacacs-exporter",icon_filename:"tacacs.png",categories:["data-collection.authentication-and-authorization"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# TACACS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Terminal Access Controller Access-Control System (TACACS) protocol metrics for efficient network authentication and authorization management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-TACACS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-tplink_p110",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"TP-Link P110",link:"https://github.com/ijohanne/prometheus-tplink-p110-exporter",icon_filename:"tplink.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# TP-Link P110\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack TP-Link P110 smart plug metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-TP-Link_P110",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-tado",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Tado smart heating solution",link:"https://github.com/eko/tado-exporter",icon_filename:"tado.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Tado smart heating solution\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tado smart thermostat metrics for efficient home heating and cooling management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tado\\xB0 Exporter](https://github.com/eko/tado-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Tado_smart_heating_solution",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-tankerkoenig",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Tankerkoenig API",link:"https://github.com/lukasmalkmus/tankerkoenig_exporter",icon_filename:"tanker.png",categories:["data-collection.generic-data-collection"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Tankerkoenig API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tankerknig API fuel price metrics for efficient fuel price monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Tankerkoenig_API",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-tesla_powerwall",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Tesla Powerwall",link:"https://github.com/foogod/powerwall_exporter",icon_filename:"tesla.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Tesla Powerwall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Powerwall metrics for efficient home energy storage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Tesla_Powerwall",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-tesla_wall_connector",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Tesla Wall Connector",link:"https://github.com/benclapp/tesla_wall_connector_exporter",icon_filename:"tesla.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Tesla Wall Connector\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Wall Connector charging station metrics for efficient electric vehicle charging management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Tesla_Wall_Connector",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-tesla_vehicle",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Tesla vehicle",link:"https://github.com/wywywywy/tesla-prometheus-exporter",icon_filename:"tesla.png",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Tesla vehicle\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tesla vehicle metrics for efficient electric vehicle management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Tesla_vehicle",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-traceroute",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Traceroute",link:"https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter",icon_filename:"traceroute.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Traceroute\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport traceroute metrics for efficient network path analysis and performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Traceroute",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-twincat_ads_webservice",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"TwinCAT ADS Web Service",link:"https://github.com/MarcusCalidus/twincat-ads-webservice-exporter",icon_filename:"twincat.png",categories:["data-collection.generic-data-collection"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# TwinCAT ADS Web Service\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor TwinCAT ADS (Automation Device Specification) Web Service metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-TwinCAT_ADS_Web_Service",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-twitch",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Twitch",link:"https://github.com/damoun/twitch_exporter",icon_filename:"twitch.svg",categories:["data-collection.media-streaming-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Twitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Twitch streaming platform metrics for efficient live streaming management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Twitch exporter](https://github.com/damoun/twitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Twitch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ubiquity_ufiber",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Ubiquiti UFiber OLT",link:"https://github.com/swoga/ufiber-exporter",icon_filename:"ubiquiti.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Ubiquiti UFiber OLT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Ubiquiti UFiber GPON (Gigabit Passive Optical Network) device metrics for efficient fiber-optic network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ufiber-exporter](https://github.com/swoga/ufiber-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Ubiquiti_UFiber_OLT",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-uptimerobot",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Uptimerobot",link:"https://github.com/wosc/prometheus-uptimerobot",icon_filename:"uptimerobot.svg",categories:["data-collection.synthetic-checks"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Uptimerobot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor UptimeRobot website uptime monitoring metrics for efficient website availability tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Uptimerobot",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-vscode",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"VSCode",link:"https://github.com/guicaulada/vscode-exporter",icon_filename:"vscode.svg",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# VSCode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Visual Studio Code editor metrics for efficient development environment management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [VSCode Exporter](https://github.com/guicaulada/vscode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-VSCode",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-vault_pki",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Vault PKI",link:"https://github.com/aarnaud/vault-pki-exporter",icon_filename:"vault.svg",categories:["data-collection.security-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Vault PKI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HashiCorp Vault Public Key Infrastructure (PKI) metrics for efficient certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Vault_PKI",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-vertica",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Vertica",link:"https://github.com/vertica/vertica-prometheus-exporter",icon_filename:"vertica.svg",categories:["data-collection.database-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Vertica\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Vertica analytics database platform metrics for efficient database performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Vertica",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-warp10",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Warp10",link:"https://github.com/centreon/warp10-sensision-exporter",icon_filename:"warp10.svg",categories:["data-collection.database-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Warp10\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Warp 10 time-series database metrics for efficient time-series data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Warp10",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-xmpp_blackbox",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"XMPP Server",link:"https://github.com/horazont/xmpp-blackbox-exporter",icon_filename:"xmpp.svg",categories:["data-collection.message-brokers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# XMPP Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor XMPP (Extensible Messaging and Presence Protocol) server metrics for efficient messaging and communication management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-XMPP_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-xiaomi_mi_flora",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Xiaomi Mi Flora",link:"https://github.com/xperimental/flowercare-exporter",icon_filename:"xiaomi.svg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Xiaomi Mi Flora\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MiFlora plant monitor metrics for efficient plant care and growth management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Xiaomi_Mi_Flora",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-yourls",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"YOURLS URL Shortener",link:"https://github.com/just1not2/prometheus-exporter-yourls",icon_filename:"yourls.png",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# YOURLS URL Shortener\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor YOURLS (Your Own URL Shortener) metrics for efficient URL shortening service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-YOURLS_URL_Shortener",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-zerto",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Zerto",link:"https://github.com/claranet/zerto-exporter",icon_filename:"zerto.png",categories:["data-collection.cloud-provider-managed"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Zerto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zerto disaster recovery and data protection metrics for efficient backup and recovery management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zerto Exporter](https://github.com/claranet/zerto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Zerto",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-zulip",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Zulip",link:"https://github.com/brokenpip3/zulip-exporter",icon_filename:"zulip.png",categories:["data-collection.media-streaming-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Zulip\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zulip open-source group chat application metrics for efficient team communication management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Zulip",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-zyxel_gs1200",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Zyxel GS1200-8",link:"https://github.com/robinelfrink/gs1200-exporter",icon_filename:"zyxel.png",categories:["data-collection.networking-stack-and-network-interfaces"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# Zyxel GS1200-8\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Zyxel GS1200 network switch metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Zyxel_GS1200-8",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-bpftrace",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"bpftrace variables",link:"https://github.com/andreasgerstmayr/bpftrace_exporter",icon_filename:"bpftrace.png",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# bpftrace variables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack bpftrace metrics for advanced performance analysis and troubleshooting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-bpftrace_variables",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cadvisor",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"cAdvisor",link:"https://github.com/google/cadvisor",icon_filename:"cadvisor.png",categories:["data-collection.containers-and-vms"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# cAdvisor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor container resource usage and performance metrics with cAdvisor for efficient container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [cAdvisor](https://github.com/google/cadvisor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-cAdvisor",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-etcd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"etcd",link:"https://etcd.io/",icon_filename:"etcd.svg",categories:["data-collection.service-discovery-registry"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# etcd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack etcd database metrics for optimized distributed key-value store management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to etcd built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-etcd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gpsd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"gpsd",link:"https://github.com/natesales/gpsd-exporter",icon_filename:"gpsd.png",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# gpsd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GPSD (GPS daemon) metrics for efficient GPS data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [gpsd exporter](https://github.com/natesales/gpsd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-gpsd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-iqair",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"iqAir AirVisual air quality monitors",link:"https://github.com/Packetslave/iqair_exporter",icon_filename:"iqair.svg",categories:["data-collection.iot-devices"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# iqAir AirVisual air quality monitors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor air quality data from IQAir devices for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IQair Exporter](https://github.com/Packetslave/iqair_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-iqAir_AirVisual_air_quality_monitors",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-jolokia",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"jolokia",link:"https://github.com/aklinkert/jolokia_exporter",icon_filename:"jolokia.png",categories:["data-collection.apm"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# jolokia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Jolokia JVM metrics for optimized Java application performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-jolokia",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-journald",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"journald",link:"https://github.com/dead-claudia/journald-exporter",icon_filename:"linux.png",categories:["data-collection.logs-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# journald\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on systemd-journald metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [journald-exporter](https://github.com/dead-claudia/journald-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-journald",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-loki",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"loki",link:"https://github.com/grafana/loki",icon_filename:"loki.png",categories:["data-collection.logs-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# loki\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Loki metrics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [loki](https://github.com/grafana/loki).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Loki\n\nInstall [loki](https://github.com/grafana/loki) according to its documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-loki",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-mosquitto",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"mosquitto",link:"https://github.com/sapcc/mosquitto-exporter",icon_filename:"mosquitto.svg",categories:["data-collection.message-brokers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# mosquitto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Mosquitto MQTT broker metrics for efficient IoT message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-mosquitto",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-mtail",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"mtail",link:"https://github.com/google/mtail",icon_filename:"mtail.png",categories:["data-collection.logs-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# mtail\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor log data metrics using mtail log data extractor and parser.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mtail](https://github.com/google/mtail).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-mtail",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nftables",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"nftables",link:"https://github.com/Sheridan/nftables_exporter",icon_filename:"nftables.png",categories:["data-collection.linux-systems.firewall-metrics"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# nftables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor nftables firewall metrics for efficient network security and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nftables_exporter](https://github.com/Sheridan/nftables_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-nftables",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-pgbackrest",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"pgBackRest",link:"https://github.com/woblerr/pgbackrest_exporter",icon_filename:"pgbackrest.png",categories:["data-collection.database-servers"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# pgBackRest\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor pgBackRest PostgreSQL backup metrics for efficient database backup and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-pgBackRest",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-strongswan",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"strongSwan",link:"https://github.com/jlti-dev/ipsec_exporter",icon_filename:"strongswan.svg",categories:["data-collection.vpns"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1,community:!0},overview:"# strongSwan\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack strongSwan VPN and IPSec metrics using the vici interface for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-strongSwan",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-proxysql",plugin_name:"go.d.plugin",module_name:"proxysql",monitored_instance:{name:"ProxySQL",link:"https://www.proxysql.com/",icon_filename:"proxysql.png",categories:["data-collection.database-servers"]},keywords:["proxysql","databases","sql"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# ProxySQL\n\nPlugin: go.d.plugin\nModule: proxysql\n\n## Overview\n\nThis collector monitors ProxySQL servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/proxysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/proxysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | stats:stats@tcp(127.0.0.1:6032)/ | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n my.cnf: \'/etc/my.cnf\'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n - name: remote\n dsn: stats:stats@tcp(203.0.113.0:6032)/\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `proxysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m proxysql\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ProxySQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.client_connections_count | connected, non_idle, hostgroup_locked | connections |\n| proxysql.client_connections_rate | created, aborted | connections/s |\n| proxysql.server_connections_count | connected | connections |\n| proxysql.server_connections_rate | created, aborted, delayed | connections/s |\n| proxysql.backends_traffic | recv, sent | B/s |\n| proxysql.clients_traffic | recv, sent | B/s |\n| proxysql.active_transactions_count | client | connections |\n| proxysql.questions_rate | questions | questions/s |\n| proxysql.slow_queries_rate | slow | queries/s |\n| proxysql.queries_rate | autocommit, autocommit_filtered, commit_filtered, rollback, rollback_filtered, backend_change_user, backend_init_db, backend_set_names, frontend_init_db, frontend_set_names, frontend_use_db | queries/s |\n| proxysql.backend_statements_count | total, unique | statements |\n| proxysql.backend_statements_rate | prepare, execute, close | statements/s |\n| proxysql.client_statements_count | total, unique | statements |\n| proxysql.client_statements_rate | prepare, execute, close | statements/s |\n| proxysql.cached_statements_count | cached | statements |\n| proxysql.query_cache_entries_count | entries | entries |\n| proxysql.query_cache_memory_used | used | B |\n| proxysql.query_cache_io | in, out | B/s |\n| proxysql.query_cache_requests_rate | read, write, read_success | requests/s |\n| proxysql.mysql_monitor_workers_count | workers, auxiliary | threads |\n| proxysql.mysql_monitor_workers_rate | started | workers/s |\n| proxysql.mysql_monitor_connect_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_ping_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_read_only_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_replication_lag_checks_rate | succeed, failed | checks/s |\n| proxysql.jemalloc_memory_used | active, allocated, mapped, metadata, resident, retained | B |\n| proxysql.memory_used | auth, sqlite3, query_digest, query_rules, firewall_users_table, firewall_users_config, firewall_rules_table, firewall_rules_config, mysql_threads, admin_threads, cluster_threads | B |\n| proxysql.uptime | uptime | seconds |\n\n### Per command\n\nThese metrics refer to the SQL command.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| command | SQL command. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_command_execution_rate | uptime | seconds |\n| proxysql.mysql_command_execution_time | time | microseconds |\n| proxysql.mysql_command_execution_duration | 100us, 500us, 1ms, 5ms, 10ms, 50ms, 100ms, 500ms, 1s, 5s, 10s, +Inf | microseconds |\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username from the mysql_users table |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_user_connections_utilization | used | percentage |\n| proxysql.mysql_user_connections_count | used | connections |\n\n### Per backend\n\nThese metrics refer to the backend server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | backend server host |\n| port | backend server port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.backend_status | online, shunned, offline_soft, offline_hard | status |\n| proxysql.backend_connections_usage | free, used | connections |\n| proxysql.backend_connections_rate | succeed, failed | connections/s |\n| proxysql.backend_queries_rate | queries | queries/s |\n| proxysql.backend_traffic | recv, send | B/s |\n| proxysql.backend_latency | latency | microseconds |\n\n",integration_type:"collector",id:"go.d.plugin-proxysql-ProxySQL",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/proxysql/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-pulsar",plugin_name:"go.d.plugin",module_name:"pulsar",monitored_instance:{name:"Apache Pulsar",link:"https://pulsar.apache.org/",icon_filename:"pulsar.svg",categories:["data-collection.message-brokers"]},keywords:["pulsar"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# Apache Pulsar\n\nPlugin: go.d.plugin\nModule: pulsar\n\n## Overview\n\nThis collector monitors Pulsar servers.\n\n\nIt collects broker statistics using Pulsar's [Prometheus endpoint](https://pulsar.apache.org/docs/en/deploy-monitoring/#broker-stats).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Pulsar instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pulsar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pulsar.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n - name: remote\n url: http://192.0.2.1:8080/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pulsar` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pulsar\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- topic_* metrics are available when `exposeTopicLevelMetricsInPrometheus` is set to true.\n- subscription_* and namespace_subscription metrics are available when `exposeTopicLevelMetricsInPrometheus` si set to true.\n- replication_* and namespace_replication_* metrics are available when replication is configured and `replicationMetricsEnabled` is set to true.\n\n\n### Per Apache Pulsar instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.broker_components | namespaces, topics, subscriptions, producers, consumers | components |\n| pulsar.messages_rate | publish, dispatch | messages/s |\n| pulsar.throughput_rate | publish, dispatch | KiB/s |\n| pulsar.storage_size | used | KiB |\n| pulsar.storage_operations_rate | read, write | message batches/s |\n| pulsar.msg_backlog | backlog | messages |\n| pulsar.storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.subscription_delayed | delayed | message batches |\n| pulsar.subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.replication_rate | in, out | messages/s |\n| pulsar.replication_throughput_rate | in, out | KiB/s |\n| pulsar.replication_backlog | backlog | messages |\n\n### Per namespace\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.namespace_broker_components | topics, subscriptions, producers, consumers | components |\n| pulsar.namespace_messages_rate | publish, dispatch | messages/s |\n| pulsar.namespace_throughput_rate | publish, dispatch | KiB/s |\n| pulsar.namespace_storage_size | used | KiB |\n| pulsar.namespace_storage_operations_rate | read, write | message batches/s |\n| pulsar.namespace_msg_backlog | backlog | messages |\n| pulsar.namespace_storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.namespace_entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.namespace_subscription_delayed | delayed | message batches |\n| pulsar.namespace_subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.namespace_subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.namespace_replication_rate | in, out | messages/s |\n| pulsar.namespace_replication_throughput_rate | in, out | KiB/s |\n| pulsar.namespace_replication_backlog | backlog | messages |\n| pulsar.topic_producers | a dimension per topic | producers |\n| pulsar.topic_subscriptions | a dimension per topic | subscriptions |\n| pulsar.topic_consumers | a dimension per topic | consumers |\n| pulsar.topic_messages_rate_in | a dimension per topic | publishes/s |\n| pulsar.topic_messages_rate_out | a dimension per topic | dispatches/s |\n| pulsar.topic_throughput_rate_in | a dimension per topic | KiB/s |\n| pulsar.topic_throughput_rate_out | a dimension per topic | KiB/s |\n| pulsar.topic_storage_size | a dimension per topic | KiB |\n| pulsar.topic_storage_read_rate | a dimension per topic | message batches/s |\n| pulsar.topic_storage_write_rate | a dimension per topic | message batches/s |\n| pulsar.topic_msg_backlog | a dimension per topic | messages |\n| pulsar.topic_subscription_delayed | a dimension per topic | message batches |\n| pulsar.topic_subscription_msg_rate_redeliver | a dimension per topic | messages/s |\n| pulsar.topic_subscription_blocked_on_unacked_messages | a dimension per topic | blocked subscriptions |\n| pulsar.topic_replication_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_backlog | a dimension per topic | messages |\n\n",integration_type:"collector",id:"go.d.plugin-pulsar-Apache_Pulsar",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/pulsar/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-rabbitmq",plugin_name:"go.d.plugin",module_name:"rabbitmq",monitored_instance:{name:"RabbitMQ",link:"https://www.rabbitmq.com/",icon_filename:"rabbitmq.svg",categories:["data-collection.message-brokers"]},keywords:["rabbitmq","message brokers"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# RabbitMQ\n\nPlugin: go.d.plugin\nModule: rabbitmq\n\n## Overview\n\nThis collector monitors RabbitMQ instances.\n\nIt collects data using an HTTP-based API provided by the [management plugin](https://www.rabbitmq.com/management.html).\nThe following endpoints are used:\n\n- `/api/overview`\n- `/api/node/{node_name}`\n- `/api/vhosts`\n- `/api/queues` (disabled by default)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable management plugin.\n\nThe management plugin is included in the RabbitMQ distribution, but disabled.\nTo enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rabbitmq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rabbitmq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:15672 | yes |\n| collect_queues_metrics | Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used. | no | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n - name: remote\n url: http://192.0.2.0:15672\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `rabbitmq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m rabbitmq\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RabbitMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.messages_count | ready, unacknowledged | messages |\n| rabbitmq.messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n| rabbitmq.objects_count | channels, consumers, connections, queues, exchanges | messages |\n| rabbitmq.connection_churn_rate | created, closed | operations/s |\n| rabbitmq.channel_churn_rate | created, closed | operations/s |\n| rabbitmq.queue_churn_rate | created, deleted, declared | operations/s |\n| rabbitmq.file_descriptors_count | available, used | fd |\n| rabbitmq.sockets_count | available, used | sockets |\n| rabbitmq.erlang_processes_count | available, used | processes |\n| rabbitmq.erlang_run_queue_processes_count | length | processes |\n| rabbitmq.memory_usage | used | bytes |\n| rabbitmq.disk_space_free_size | free | bytes |\n\n### Per vhost\n\nThese metrics refer to the virtual host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.vhost_messages_count | ready, unacknowledged | messages |\n| rabbitmq.vhost_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n### Per queue\n\nThese metrics refer to the virtual host queue.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n| queue | queue name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.queue_messages_count | ready, unacknowledged, paged_out, persistent | messages |\n| rabbitmq.queue_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n",integration_type:"collector",id:"go.d.plugin-rabbitmq-RabbitMQ",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/rabbitmq/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-redis",plugin_name:"go.d.plugin",module_name:"redis",monitored_instance:{name:"Redis",link:"https://redis.com/",categories:["data-collection.database-servers"],icon_filename:"redis.svg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"cgroups"}]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["redis","databases"],most_popular:!0},overview:"# Redis\n\nPlugin: go.d.plugin\nModule: redis\n\n## Overview\n\nThis collector monitors the health and performance of Redis servers and collects general statistics, CPU and memory consumption, replication information, command statistics, and more.\n\n\nIt connects to the Redis instance via a TCP or UNIX socket and executes the following commands:\n\n- [INFO ALL](https://redis.io/commands/info)\n- [PING](https://redis.io/commands/ping/)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known Redis TCP and UNIX sockets:\n\n- 127.0.0.1:6379\n- /tmp/redis.sock\n- /var/run/redis/redis.sock\n- /var/lib/redis/redis.sock\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/redis.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/redis.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Redis server address. | redis://@localhost:6379 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://@127.0.0.1:6379'\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'unix://@/tmp/redis.sock'\n\n```\n{% /details %}\n##### TCP socket with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:6379'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `redis` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m redis\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ redis_connections_rejected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.connections | connections rejected because of maxclients limit in the last minute |\n| [ redis_bgsave_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_now | duration of the on-going RDB save operation |\n| [ redis_bgsave_broken ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_health | status of the last RDB save operation (0: ok, 1: error) |\n| [ redis_master_link_down ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.master_link_down_since_time | time elapsed since the link between master and slave is down |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Redis instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| redis.connections | accepted, rejected | connections/s |\n| redis.clients | connected, blocked, tracking, in_timeout_table | clients |\n| redis.ping_latency | min, max, avg | seconds |\n| redis.commands | processes | commands/s |\n| redis.keyspace_lookup_hit_rate | lookup_hit_rate | percentage |\n| redis.memory | max, used, rss, peak, dataset, lua, scripts | bytes |\n| redis.mem_fragmentation_ratio | mem_fragmentation | ratio |\n| redis.key_eviction_events | evicted | keys/s |\n| redis.net | received, sent | kilobits/s |\n| redis.rdb_changes | changes | operations |\n| redis.bgsave_now | current_bgsave_time | seconds |\n| redis.bgsave_health | last_bgsave | status |\n| redis.bgsave_last_rdb_save_since_time | last_bgsave_time | seconds |\n| redis.aof_file_size | current, base | bytes |\n| redis.commands_calls | a dimension per command | calls |\n| redis.commands_usec | a dimension per command | microseconds |\n| redis.commands_usec_per_sec | a dimension per command | microseconds/s |\n| redis.key_expiration_events | expired | keys/s |\n| redis.database_keys | a dimension per database | keys |\n| redis.database_expires_keys | a dimension per database | keys |\n| redis.connected_replicas | connected | replicas |\n| redis.master_link_status | up, down | status |\n| redis.master_last_io_since_time | time | seconds |\n| redis.master_link_down_since_time | time | seconds |\n| redis.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-redis-Redis",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/redis/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-rspamd",plugin_name:"go.d.plugin",module_name:"rspamd",monitored_instance:{name:"Rspamd",link:"https://rspamd.com/",categories:["data-collection.security-systems"],icon_filename:"globe.svg"},related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"apps.plugin",module_name:"apps"}]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["spam","rspamd","email"],most_popular:!1},overview:"# Rspamd\n\nPlugin: go.d.plugin\nModule: rspamd\n\n## Overview\n\nThis collector monitors the activity and performance of Rspamd servers. It gathers various metrics including scanned emails, learned messages, spam/ham counts, and actions taken on emails (reject, rewrite, etc.).\n\n\nIt retrieves statistics from Rspamd's [built-in web server](https://rspamd.com/doc/workers/controller.html) by making HTTP requests to the `/stat` endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Rspamd instances running on localhost that are listening on port 11334.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rspamd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rspamd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:11334 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:11334\n\n - name: remote\n url: http://192.0.2.1:11334\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `rspamd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m rspamd\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Rspamd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rspamd.classifications | ham, spam | messages/s |\n| rspamd.actions | reject, soft_reject, rewrite_subject, add_header, greylist, custom, discard, quarantine, no_action | messages/s |\n| rspamd.scans | scanned | messages/s |\n| rspamd.learns | learned | messages/s |\n| rspamd.connections | connections | connections/s |\n| rspamd.control_connections | control_connections | connections/s |\n\n",integration_type:"collector",id:"go.d.plugin-rspamd-Rspamd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/rspamd/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-scaleio",plugin_name:"go.d.plugin",module_name:"scaleio",monitored_instance:{name:"Dell EMC ScaleIO",link:"https://www.dell.com/en-ca/dt/storage/scaleio/scaleioreadynode.htm",icon_filename:"dell.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:["scaleio"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Dell EMC ScaleIO\n\nPlugin: go.d.plugin\nModule: scaleio\n\n## Overview\n\nThis collector monitors ScaleIO (VxFlex OS) instances via VxFlex OS Gateway API.\n\nIt collects metrics for the following ScaleIO components:\n\n- System\n- Storage Pool\n- Sdc\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/scaleio.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/scaleio.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | https://127.0.0.1:80 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instance.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n - name: remote\n url: https://203.0.113.10\n username: admin\n password: password\n tls_skip_verify: yes\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `scaleio` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m scaleio\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dell EMC ScaleIO instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.system_capacity_total | total | KiB |\n| scaleio.system_capacity_in_use | in_use | KiB |\n| scaleio.system_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.system_capacity_available_volume_allocation | available | KiB |\n| scaleio.system_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.system_workload_primary_bandwidth_total | total | KiB/s |\n| scaleio.system_workload_primary_bandwidth | read, write | KiB/s |\n| scaleio.system_workload_primary_iops_total | total | iops/s |\n| scaleio.system_workload_primary_iops | read, write | iops/s |\n| scaleio.system_workload_primary_io_size_total | io_size | KiB |\n| scaleio.system_rebalance | read, write | KiB/s |\n| scaleio.system_rebalance_left | left | KiB |\n| scaleio.system_rebalance_time_until_finish | time | seconds |\n| scaleio.system_rebuild | read, write | KiB/s |\n| scaleio.system_rebuild_left | left | KiB |\n| scaleio.system_defined_components | devices, fault_sets, protection_domains, rfcache_devices, sdc, sds, snapshots, storage_pools, volumes, vtrees | components |\n| scaleio.system_components_volumes_by_type | thick, thin | volumes |\n| scaleio.system_components_volumes_by_mapping | mapped, unmapped | volumes |\n\n### Per storage pool\n\nThese metrics refer to the storage pool.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.storage_pool_capacity_total | total | KiB |\n| scaleio.storage_pool_capacity_in_use | in_use | KiB |\n| scaleio.storage_pool_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.storage_pool_capacity_utilization | used | percentage |\n| scaleio.storage_pool_capacity_available_volume_allocation | available | KiB |\n| scaleio.storage_pool_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.storage_pool_components | devices, snapshots, volumes, vtrees | components |\n\n### Per sdc\n\nThese metrics refer to the SDC (ScaleIO Data Client).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.sdc_mdm_connection_state | connected | boolean |\n| scaleio.sdc_bandwidth | read, write | KiB/s |\n| scaleio.sdc_iops | read, write | iops/s |\n| scaleio.sdc_io_size | read, write | KiB |\n| scaleio.sdc_num_of_mapped_volumed | mapped | volumes |\n\n",integration_type:"collector",id:"go.d.plugin-scaleio-Dell_EMC_ScaleIO",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/scaleio/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-sensors",plugin_name:"go.d.plugin",module_name:"sensors",monitored_instance:{name:"Linux Sensors (lm-sensors)",link:"https://hwmon.wiki.kernel.org/lm_sensors",icon_filename:"microchip.svg",categories:["data-collection.hardware-devices-and-sensors"]},keywords:["sensors","temperature","voltage","current","power","fan","energy","humidity"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Linux Sensors (lm-sensors)\n\nPlugin: go.d.plugin\nModule: sensors\n\n## Overview\n\nThis collector gathers real-time system sensor statistics, including temperature, voltage, current, power, fan speed, energy consumption, and humidity, utilizing the [sensors](https://linux.die.net/man/1/sensors) binary.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe following type of sensors are auto-detected:\n\n- temperature\n- fan\n- voltage\n- current\n- power\n- energy\n- humidity\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install lm-sensors\n\n- Install `lm-sensors` using your distribution\'s package manager.\n- Run `sensors-detect` to detect hardware monitoring chips.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/sensors.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `sensors` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/bin/sensors | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: sensors\n binary_path: /usr/local/sbin/sensors\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `sensors` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m sensors\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the sensor.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| chip | The hardware component responsible for the sensor monitoring. |\n| feature | The specific sensor or monitoring point provided by the chip. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.sensor_temperature | temperature | Celsius |\n| sensors.sensor_voltage | voltage | Volts |\n| sensors.sensor_current | current | Amperes |\n| sensors.sensor_power | power | Watts |\n| sensors.sensor_fan_speed | fan | RPM |\n| sensors.sensor_energy | energy | Joules |\n| sensors.sensor_humidity | humidity | percent |\n\n",integration_type:"collector",id:"go.d.plugin-sensors-Linux_Sensors_(lm-sensors)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/sensors/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-smartctl",plugin_name:"go.d.plugin",module_name:"smartctl",monitored_instance:{name:"S.M.A.R.T.",link:"https://linux.die.net/man/8/smartd",icon_filename:"smart.png",categories:["data-collection.hardware-devices-and-sensors"]},keywords:["smart","S.M.A.R.T.","SCSI devices","ATA devices"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# S.M.A.R.T.\n\nPlugin: go.d.plugin\nModule: smartctl\n\n## Overview\n\nThis collector monitors the health status of storage devices by analyzing S.M.A.R.T. (Self-Monitoring, Analysis, and Reporting Technology) counters.\nIt relies on the [`smartctl`](https://linux.die.net/man/8/smartctl) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `smartctl --json --scan`\n- `smartctl --json --all {deviceName} --device {deviceType} --nocheck {powerMode}`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install smartmontools (v7.0+)\n\nInstall `smartmontools` version 7.0 or later using your distribution\'s package manager. Version 7.0 introduced the `--json` output mode, which is required for this collector to function properly.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/smartctl.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/smartctl.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | interval for updating Netdata charts, measured in seconds. Collector might use cached data if less than **Devices poll interval**. | 10 | no |\n| timeout | smartctl binary execution timeout. | 5 | no |\n| scan_every | interval for discovering new devices using `smartctl --scan`, measured in seconds. | 900 | no |\n| poll_devices_every | interval for gathering data for every device, measured in seconds. Data is cached for this interval. | 300 | no |\n| device_selector | Specifies a pattern to match the \'info name\' of devices as reported by `smartctl --scan --json`. | * | no |\n\n{% /details %}\n#### Examples\n\n##### Custom devices poll interval\n\nAllows you to override the default devices poll interval (data collection).\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: smartctl\n devices_poll_interval: 60 # Collect S.M.A.R.T statistics every 60 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `smartctl` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m smartctl\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Storage Device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device_name | Device name |\n| device_type | Device type |\n| model_name | Model name |\n| serial_number | Serial number |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| smartctl.device_smart_status | passed, failed | status |\n| smartctl.device_ata_smart_error_log_count | error_log | logs |\n| smartctl.device_power_on_time | power_on_time | seconds |\n| smartctl.device_temperature | temperature | Celsius |\n| smartctl.device_power_cycles_count | power | cycles |\n| smartctl.device_smart_attr_{attribute_name} | {attribute_name} | {attribute_unit} |\n| smartctl.device_smart_attr_{attribute_name}_normalized | {attribute_name} | value |\n\n",integration_type:"collector",id:"go.d.plugin-smartctl-S.M.A.R.T.",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/smartctl/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-snmp",plugin_name:"go.d.plugin",module_name:"snmp",monitored_instance:{name:"SNMP devices",link:"",icon_filename:"snmp.png",categories:["data-collection.generic-data-collection"]},keywords:["snmp"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# SNMP devices\n\nPlugin: go.d.plugin\nModule: snmp\n\n## Overview\n\nThis collector monitors any SNMP devices and uses the [gosnmp](https://github.com/gosnmp/gosnmp) package.\n\nIt supports:\n\n- all SNMP versions: SNMPv1, SNMPv2c and SNMPv3.\n- any number of SNMP devices.\n- each SNMP device can be used to collect data for any number of charts.\n- each chart may have any number of dimensions.\n- each SNMP device may have a different update frequency.\n- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches).\n\nKeep in mind that many SNMP switches and routers are very slow. They may not be able to report values per second.\n`go.d.plugin` reports the time it took for the SNMP device to respond when executed in the debug mode.\n\nAlso, if many SNMP clients are used on the same SNMP device at the same time, values may be skipped.\nThis is a problem of the SNMP device, not this collector. In this case, consider reducing the frequency of data collection (increasing `update_every`).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Find OIDs\n\nUse `snmpwalk`, like this:\n\n```sh\nsnmpwalk -t 20 -O fn -v 2c -c public 192.0.2.1\n```\n\n- `-t 20` is the timeout in seconds.\n- `-O fn` will display full OIDs in numeric format.\n- `-v 2c` is the SNMP version.\n- `-c public` is the SNMP community.\n- `192.0.2.1` is the SNMP device.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/snmp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/snmp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hostname | Target ipv4 address. | 127.0.0.1 | yes |\n| community | SNMPv1/2 community string. | public | no |\n| options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no |\n| options.port | Target port. | 161 | no |\n| options.retries | Retries to attempt. | 1 | no |\n| options.timeout | SNMP request/response timeout. | 10 | no |\n| options.max_request_size | Maximum number of OIDs allowed in one one SNMP request. | 60 | no |\n| user.name | SNMPv3 user name. | | no |\n| user.name | Security level of SNMPv3 messages. | | no |\n| user.auth_proto | Security level of SNMPv3 messages. | | no |\n| user.name | Authentication protocol for SNMPv3 messages. | | no |\n| user.auth_key | Authentication protocol pass phrase. | | no |\n| user.priv_proto | Privacy protocol for SNMPv3 messages. | | no |\n| user.priv_key | Privacy protocol pass phrase. | | no |\n| charts | List of charts. | [] | yes |\n| charts.id | Chart ID. Used to uniquely identify the chart. | | yes |\n| charts.title | Chart title. | Untitled chart | no |\n| charts.units | Chart units. | num | no |\n| charts.family | Chart family. | charts.id | no |\n| charts.type | Chart type (line, area, stacked). | line | no |\n| charts.priority | Chart priority. | 70000 | no |\n| charts.multiply_range | Used when you need to define many charts using incremental OIDs. | [] | no |\n| charts.dimensions | List of chart dimensions. | [] | yes |\n| charts.dimensions.oid | Collected metric OID. | | yes |\n| charts.dimensions.name | Dimension name. | | yes |\n| charts.dimensions.algorithm | Dimension algorithm (absolute, incremental). | absolute | no |\n| charts.dimensions.multiplier | Collected value multiplier, applied to convert it properly to units. | 1 | no |\n| charts.dimensions.divisor | Collected value divisor, applied to convert it properly to units. | 1 | no |\n\n##### user.auth_proto\n\nThe security of an SNMPv3 message as per RFC 3414 (`user.level`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|------------------------------------------|\n| none | 1 | no message authentication or encryption |\n| authNoPriv | 2 | message authentication and no encryption |\n| authPriv | 3 | message authentication and encryption |\n\n\n##### user.name\n\nThe digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------|\n| none | 1 | no message authentication |\n| md5 | 2 | MD5 message authentication (HMAC-MD5-96) |\n| sha | 3 | SHA message authentication (HMAC-SHA-96) |\n| sha224 | 4 | SHA message authentication (HMAC-SHA-224) |\n| sha256 | 5 | SHA message authentication (HMAC-SHA-256) |\n| sha384 | 6 | SHA message authentication (HMAC-SHA-384) |\n| sha512 | 7 | SHA message authentication (HMAC-SHA-512) |\n\n\n##### user.priv_proto\n\nThe encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------------------------------------|\n| none | 1 | no message encryption |\n| des | 2 | ES encryption (CBC-DES) |\n| aes | 3 | 128-bit AES encryption (CFB-AES-128) |\n| aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with "Blumenthal" key localization |\n| aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with "Blumenthal" key localization |\n| aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with "Reeder" key localization |\n| aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with "Reeder" key localization |\n\n\n{% /details %}\n#### Examples\n\n##### SNMPv1/2\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n- we define 2 charts `bandwidth_port1` and `bandwidth_port2`, each having 2 dimensions: `in` and `out`.\n\n> **SNMPv1**: just set `options.version` to 1.\n> **Note**: the algorithm chosen is `incremental`, because the collected values show the total number of bytes transferred, which we need to transform into kbps. To chart gauges (e.g. temperature), use `absolute` instead.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n charts:\n - id: "bandwidth_port1"\n title: "Switch Bandwidth for port 1"\n units: "kilobits/s"\n type: "area"\n family: "ports"\n dimensions:\n - name: "in"\n oid: "1.3.6.1.2.1.2.2.1.10.1"\n algorithm: "incremental"\n multiplier: 8\n divisor: 1000\n - name: "out"\n oid: "1.3.6.1.2.1.2.2.1.16.1"\n multiplier: -8\n divisor: 1000\n - id: "bandwidth_port2"\n title: "Switch Bandwidth for port 2"\n units: "kilobits/s"\n type: "area"\n family: "ports"\n dimensions:\n - name: "in"\n oid: "1.3.6.1.2.1.2.2.1.10.2"\n algorithm: "incremental"\n multiplier: 8\n divisor: 1000\n - name: "out"\n oid: "1.3.6.1.2.1.2.2.1.16.2"\n multiplier: -8\n divisor: 1000\n\n```\n{% /details %}\n##### SNMPv3\n\nTo use SNMPv3:\n\n- use `user` instead of `community`.\n- set `options.version` to 3.\n\nThe rest of the configuration is the same as in the SNMPv1/2 example.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n options:\n version: 3\n user:\n name: username\n level: authPriv\n auth_proto: sha256\n auth_key: auth_protocol_passphrase\n priv_proto: aes256\n priv_key: priv_protocol_passphrase\n\n```\n{% /details %}\n##### Multiply range\n\nIf you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option.\n\nThis is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`.\n\nEach of the 24 new charts will have its id (1-24) appended at:\n\n- its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`.\n- its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`.\n- its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`.\n- its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: "192.0.2.1"\n community: public\n options:\n version: 2\n charts:\n - id: "bandwidth_port"\n title: "Switch Bandwidth for port"\n units: "kilobits/s"\n type: "area"\n family: "ports"\n multiply_range: [1, 24]\n dimensions:\n - name: "in"\n oid: "1.3.6.1.2.1.2.2.1.10"\n algorithm: "incremental"\n multiplier: 8\n divisor: 1000\n - name: "out"\n oid: "1.3.6.1.2.1.2.2.1.16"\n multiplier: -8\n divisor: 1000\n\n```\n{% /details %}\n##### Multiple devices with a common configuration\n\nYAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases). \nThe `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices.\n\nThe following example:\n\n- adds an `anchor` to the first job.\n- injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters.\n- injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - &anchor\n name: switch\n update_every: 10\n hostname: "192.0.2.1"\n community: public\n options:\n version: 2\n charts:\n - id: "bandwidth_port1"\n title: "Switch Bandwidth for port 1"\n units: "kilobits/s"\n type: "area"\n family: "ports"\n dimensions:\n - name: "in"\n oid: "1.3.6.1.2.1.2.2.1.10.1"\n algorithm: "incremental"\n multiplier: 8\n divisor: 1000\n - name: "out"\n oid: "1.3.6.1.2.1.2.2.1.16.1"\n multiplier: -8\n divisor: 1000\n - <<: *anchor\n name: switch2\n hostname: "192.0.2.2"\n - <<: *anchor\n name: switch3\n hostname: "192.0.2.3"\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `snmp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m snmp\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThe metrics that will be collected are defined in the configuration file.\n",integration_type:"collector",id:"go.d.plugin-snmp-SNMP_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/snmp/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-squidlog",plugin_name:"go.d.plugin",module_name:"squidlog",monitored_instance:{name:"Squid log files",link:"https://www.lighttpd.net/",icon_filename:"squid.png",categories:["data-collection.web-servers-and-web-proxies"]},keywords:["squid","logs"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# Squid log files\n\nPlugin: go.d.plugin\nModule: squidlog\n\n## Overview\n\nhis collector monitors Squid servers by parsing their access log files.\n\n\nIt automatically detects log files of Squid severs running on localhost.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squidlog.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squidlog.conf\n```\n#### Options\n\nSquid [log format codes](http://www.squid-cache.org/Doc/config/logformat/).\n\nSquidlog is aware how to parse and interpret the following codes:\n\n| field | squid format code | description |\n|----------------|-------------------|---------------------------------------------------------------|\n| resp_time | %tr | Response time (milliseconds). |\n| client_address | %>a | Client source IP address. |\n| client_address | %>A | Client FQDN. |\n| cache_code | %Ss | Squid request status (TCP_MISS etc). |\n| http_code | %>Hs | The HTTP response status code from Content Gateway to client. |\n| resp_size | %Hs | Cache code and http code. |\n| hierarchy | %Sh/% **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `squidlog` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m squidlog\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squidlog.requests | requests | requests/s |\n| squidlog.excluded_requests | unmatched | requests/s |\n| squidlog.type_requests | success, bad, redirect, error | requests/s |\n| squidlog.http_status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| squidlog.http_status_code_responses | a dimension per HTTP response code | responses/s |\n| squidlog.bandwidth | sent | kilobits/s |\n| squidlog.response_time | min, max, avg | milliseconds |\n| squidlog.uniq_clients | clients | clients |\n| squidlog.cache_result_code_requests | a dimension per cache result code | requests/s |\n| squidlog.cache_result_code_transport_tag_requests | a dimension per cache result delivery transport tag | requests/s |\n| squidlog.cache_result_code_handling_tag_requests | a dimension per cache result handling tag | requests/s |\n| squidlog.cache_code_object_tag_requests | a dimension per cache result produced object tag | requests/s |\n| squidlog.cache_code_load_source_tag_requests | a dimension per cache result load source tag | requests/s |\n| squidlog.cache_code_error_tag_requests | a dimension per cache result error tag | requests/s |\n| squidlog.http_method_requests | a dimension per HTTP method | requests/s |\n| squidlog.mime_type_requests | a dimension per MIME type | requests/s |\n| squidlog.hier_code_requests | a dimension per hierarchy code | requests/s |\n| squidlog.server_address_forwarded_requests | a dimension per server address | requests/s |\n\n",integration_type:"collector",id:"go.d.plugin-squidlog-Squid_log_files",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/squidlog/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-storcli",plugin_name:"go.d.plugin",module_name:"storcli",monitored_instance:{name:"StoreCLI RAID",link:"https://docs.broadcom.com/doc/12352476",icon_filename:"hard-drive.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:["storage","raid-controller","manage-disks"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# StoreCLI RAID\n\nPlugin: go.d.plugin\nModule: storcli\n\n## Overview\n\nMonitors the health of StoreCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.\nIt relies on the [`storcli`](https://docs.broadcom.com/doc/12352476) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n- `storcli /cALL show all J nolog`\n- `storcli /cALL/eALL/sALL show all J nolog`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/storcli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/storcli.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | storcli binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: storcli\n update_every: 5 # Collect StorCLI RAID statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `storcli` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m storcli\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ storcli_controller_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.controller_status | RAID controller ${label:controller_number} health status is not optimal |\n| [ storcli_controller_bbu_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.controller_bbu_status | RAID controller ${label:controller_number} BBU is unhealthy |\n| [ storcli_phys_drive_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.phys_drive_errors | RAID physical drive c${label:controller_number}/e${label:enclosure_number}/s${label:slot_number} errors |\n| [ storcli_phys_drive_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.phys_drive_predictive_failures | RAID physical drive c${label:controller_number}/e${label:enclosure_number}/s${label:slot_number} predictive failures |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| model | Controller model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.controller_status | optimal, degraded, partially_degraded, failed | status |\n| storcli.controller_bbu_status | healthy, unhealthy, na | status |\n\n### Per physical drive\n\nThese metrics refer to the Physical Drive.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| enclosure_number | Enclosure number (index) |\n| slot_number | Slot number (index) |\n| media type | Media type (e.g. HDD) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.phys_drive_errors | media, other | errors/s |\n| storcli.phys_drive_predictive_failures | predictive_failures | failures/s |\n| storcli.phys_drive_smart_alert_status | active, inactive | status |\n| storcli.phys_drive_temperature | temperature | Celsius |\n\n### Per bbu\n\nThese metrics refer to the Backup Battery Unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| bbu_number | BBU number (index) |\n| model | BBU model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.bbu_temperature | temperature | Celsius |\n\n",integration_type:"collector",id:"go.d.plugin-storcli-StoreCLI_RAID",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/storcli/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-supervisord",plugin_name:"go.d.plugin",module_name:"supervisord",monitored_instance:{name:"Supervisor",link:"http://supervisord.org/",icon_filename:"supervisord.png",categories:["data-collection.processes-and-system-services"]},keywords:["supervisor"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Supervisor\n\nPlugin: go.d.plugin\nModule: supervisord\n\n## Overview\n\nThis collector monitors Supervisor instances.\n\nIt can collect metrics from:\n\n- [unix socket](http://supervisord.org/configuration.html?highlight=unix_http_server#unix-http-server-section-values)\n- [internal http server](http://supervisord.org/configuration.html?highlight=unix_http_server#inet-http-server-section-settings)\n\nUsed methods:\n\n- [`supervisor.getAllProcessInfo`](http://supervisord.org/api.html#supervisor.rpcinterface.SupervisorNamespaceRPCInterface.getAllProcessInfo)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/supervisord.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/supervisord.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9001/RPC2 | yes |\n| timeout | System bus requests timeout. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### HTTP\n\nCollect metrics via HTTP.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n```\n{% /details %}\n##### Socket\n\nCollect metrics via Unix socket.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n- name: local\n url: 'unix:///run/supervisor.sock'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n - name: remote\n url: 'http://192.0.2.1:9001/RPC2'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `supervisord` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m supervisord\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Supervisor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.summary_processes | running, non-running | processes |\n\n### Per process group\n\nThese metrics refer to the process group.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.processes | running, non-running | processes |\n| supervisord.process_state_code | a dimension per process | code |\n| supervisord.process_exit_status | a dimension per process | exit status |\n| supervisord.process_uptime | a dimension per process | seconds |\n| supervisord.process_downtime | a dimension per process | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-supervisord-Supervisor",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/supervisord/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-systemdunits",plugin_name:"go.d.plugin",module_name:"systemdunits",monitored_instance:{name:"Systemd Units",link:"https://www.freedesktop.org/wiki/Software/systemd/",icon_filename:"systemd.svg",categories:["data-collection.systemd"]},keywords:["systemd"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Systemd Units\n\nPlugin: go.d.plugin\nModule: systemdunits\n\n## Overview\n\nThis collector monitors the state of Systemd units and unit files.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/systemdunits.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/systemdunits.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| timeout | System bus requests timeout. | 1 | no |\n| include | Systemd units selector. | *.service | no |\n| skip_transient | If set, skip data collection for systemd transient units. | false | no |\n| collect_unit_files | If set to true, collect the state of installed unit files. Enabling this may increase system overhead. | false | no |\n| collect_unit_files_every | Interval for querying systemd about unit files and their enablement state, measured in seconds. Data is cached for this interval to reduce system overhead. | 300 | no |\n| include_unit_files | Systemd unit files selector. | *.service | no |\n\n##### include\n\nSystemd units matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n##### include_unit_files\n\nSystemd unit files matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n{% /details %}\n#### Examples\n\n##### Service units\n\nCollect state of all service type units.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n```\n{% /details %}\n##### One specific unit\n\nCollect state of one specific unit.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my-specific-service\n include:\n - 'my-specific.service'\n\n```\n{% /details %}\n##### All unit types\n\nCollect state of all units.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: my-specific-service-unit\n include:\n - '*'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect state of all service and socket type units.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n - name: socket\n include:\n - '*.socket'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `systemdunits` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m systemdunits\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ systemd_service_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.service_unit_state | systemd service unit in the failed state |\n| [ systemd_socket_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.socket_unit_state | systemd socket unit in the failed state |\n| [ systemd_target_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.target_unit_state | systemd target unit in the failed state |\n| [ systemd_path_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.path_unit_state | systemd path unit in the failed state |\n| [ systemd_device_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.device_unit_state | systemd device unit in the failed state |\n| [ systemd_mount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.mount_unit_state | systemd mount unit in the failed state |\n| [ systemd_automount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.automount_unit_state | systemd automount unit in the failed state |\n| [ systemd_swap_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.swap_unit_state | systemd swap unit in the failed state |\n| [ systemd_scope_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.scope_unit_state | systemd scope unit in the failed state |\n| [ systemd_slice_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.slice_unit_state | systemd slice unit in the failed state |\n| [ systemd_timer_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.timer_unit_state | systemd timer unit in the failed state |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per unit\n\nThese metrics refer to the systemd unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| unit_name | systemd unit name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.socket_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.target_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.path_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.device_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.mount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.automount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.swap_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.timer_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.scope_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.slice_unit_state | active, inactive, activating, deactivating, failed | state |\n\n### Per unit file\n\nThese metrics refer to the systemd unit file.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| unit_file_name | systemd unit file name |\n| unit_file_type | systemd unit file type |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.unit_file_state | enabled, enabled-runtime, linked, linked-runtime, alias, masked, masked-runtime, static, disabled, indirect, generated, transient, bad | state |\n\n",integration_type:"collector",id:"go.d.plugin-systemdunits-Systemd_Units",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/systemdunits/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-tengine",plugin_name:"go.d.plugin",module_name:"tengine",monitored_instance:{name:"Tengine",link:"https://tengine.taobao.org/",icon_filename:"tengine.jpeg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:["tengine","web","webserver"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Tengine\n\nPlugin: go.d.plugin\nModule: tengine\n\n## Overview\n\nThis collector monitors Tengine servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable ngx_http_reqstat_module module.\n\nTo enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html).\nThe default line format is the only supported format.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tengine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tengine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/us | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n username: foo\n password: bar\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nTengine with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/us\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n - name: remote\n url: http://203.0.113.10/us\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tengine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m tengine\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tengine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tengine.bandwidth_total | in, out | B/s |\n| tengine.connections_total | accepted | connections/s |\n| tengine.requests_total | processed | requests/s |\n| tengine.requests_per_response_code_family_total | 2xx, 3xx, 4xx, 5xx, other | requests/s |\n| tengine.requests_per_response_code_detailed_total | 200, 206, 302, 304, 403, 404, 419, 499, 500, 502, 503, 504, 508, other | requests/s |\n| tengine.requests_upstream_total | requests | requests/s |\n| tengine.tries_upstream_total | calls | calls/s |\n| tengine.requests_upstream_per_response_code_family_total | 4xx, 5xx | requests/s |\n\n",integration_type:"collector",id:"go.d.plugin-tengine-Tengine",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/tengine/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-traefik",plugin_name:"go.d.plugin",module_name:"traefik",monitored_instance:{name:"Traefik",link:"Traefik",icon_filename:"traefik.svg",categories:["data-collection.web-servers-and-web-proxies"]},keywords:["traefik","proxy","webproxy"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Traefik\n\nPlugin: go.d.plugin\nModule: traefik\n\n## Overview\n\nThis collector monitors Traefik servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/traefik.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/traefik.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="All options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8082/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n http://127.0.0.1:8082/metrics\n\n - name: remote\n http://192.0.2.0:8082/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `traefik` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m traefik\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per entrypoint, protocol\n\nThese metrics refer to the endpoint.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| traefik.entrypoint_requests | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |\n| traefik.entrypoint_request_duration_average | 1xx, 2xx, 3xx, 4xx, 5xx | milliseconds |\n| traefik.entrypoint_open_connections | a dimension per HTTP method | connections |\n\n",integration_type:"collector",id:"go.d.plugin-traefik-Traefik",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/traefik/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-unbound",plugin_name:"go.d.plugin",module_name:"unbound",monitored_instance:{name:"Unbound",link:"https://nlnetlabs.nl/projects/unbound/about/",icon_filename:"unbound.png",categories:["data-collection.dns-and-dhcp-servers"]},keywords:["unbound","dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Unbound\n\nPlugin: go.d.plugin\nModule: unbound\n\n## Overview\n\nThis collector monitors Unbound servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable remote control interface\n\nSet `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf).\n\n\n#### Check permissions and adjust if necessary\n\nIf using unix socket:\n\n- socket should be readable and writeable by `netdata` user\n\nIf using ip socket and TLS is disabled:\n\n- socket should be accessible via network\n\nIf TLS is enabled, in addition:\n\n- `control-key-file` should be readable by `netdata` user\n- `control-cert-file` should be readable by `netdata` user\n\nFor auto-detection parameters from `unbound.conf`:\n\n- `unbound.conf` should be readable by `netdata` user\n- if you have several configuration files (include feature) all of them should be readable by `netdata` user\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/unbound.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/unbound.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:8953 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| conf_path | Absolute path to the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| cumulative_stats | Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file. | no | no |\n| use_tls | Whether to use TLS or not. | yes | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | /etc/unbound/unbound_control.pem | no |\n| tls_key | Client tls key. | /etc/unbound/unbound_control.key | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n```\n{% /details %}\n##### Unix socket\n\nConnecting through Unix socket.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: socket\n address: /var/run/unbound.sock\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n - name: remote\n address: 203.0.113.11:8953\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `unbound` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m unbound\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Unbound instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.queries | queries | queries |\n| unbound.queries_ip_ratelimited | ratelimited | queries |\n| unbound.dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.cache | hits, miss | events |\n| unbound.cache_percentage | hits, miss | percentage |\n| unbound.prefetch | prefetches | prefetches |\n| unbound.expired | expired | replies |\n| unbound.zero_ttl_replies | zero_ttl | replies |\n| unbound.recursive_replies | recursive | replies |\n| unbound.recursion_time | avg, median | milliseconds |\n| unbound.request_list_usage | avg, max | queries |\n| unbound.current_request_list_usage | all, users | queries |\n| unbound.request_list_jostle_list | overwritten, dropped | queries |\n| unbound.tcpusage | usage | buffers |\n| unbound.uptime | time | seconds |\n| unbound.cache_memory | message, rrset, dnscrypt_nonce, dnscrypt_shared_secret | KB |\n| unbound.mod_memory | iterator, respip, validator, subnet, ipsec | KB |\n| unbound.mem_streamwait | streamwait | KB |\n| unbound.cache_count | infra, key, msg, rrset, dnscrypt_nonce, shared_secret | items |\n| unbound.type_queries | a dimension per query type | queries |\n| unbound.class_queries | a dimension per query class | queries |\n| unbound.opcode_queries | a dimension per query opcode | queries |\n| unbound.flag_queries | qr, aa, tc, rd, ra, z, ad, cd | queries |\n| unbound.rcode_answers | a dimension per reply rcode | replies |\n\n### Per thread\n\nThese metrics refer to threads.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.thread_queries | queries | queries |\n| unbound.thread_queries_ip_ratelimited | ratelimited | queries |\n| unbound.thread_dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.thread_cache | hits, miss | events |\n| unbound.thread_cache_percentage | hits, miss | percentage |\n| unbound.thread_prefetch | prefetches | prefetches |\n| unbound.thread_expired | expired | replies |\n| unbound.thread_zero_ttl_replies | zero_ttl | replies |\n| unbound.thread_recursive_replies | recursive | replies |\n| unbound.thread_recursion_time | avg, median | milliseconds |\n| unbound.thread_request_list_usage | avg, max | queries |\n| unbound.thread_current_request_list_usage | all, users | queries |\n| unbound.thread_request_list_jostle_list | overwritten, dropped | queries |\n| unbound.thread_tcpusage | usage | buffers |\n\n",integration_type:"collector",id:"go.d.plugin-unbound-Unbound",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/unbound/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-upsd",plugin_name:"go.d.plugin",module_name:"upsd",monitored_instance:{name:"UPS (NUT)",link:"",icon_filename:"plug-circle-bolt.svg",categories:["data-collection.ups"]},keywords:["ups","nut"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# UPS (NUT)\n\nPlugin: go.d.plugin\nModule: upsd\n\n## Overview\n\nThis collector monitors Uninterruptible Power Supplies by polling the UPS daemon using the NUT network protocol.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/upsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/upsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | UPS daemon address in IP:PORT format. | 127.0.0.1:3493 | yes |\n| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n - name: remote\n address: 203.0.113.0:3493\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `upsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m upsd\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ upsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} average load over the last 10 minutes |\n| [ upsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_battery_charge | UPS ${label:ups_name} average battery charge over the last minute |\n| [ upsd_ups_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} number of seconds since the last successful data collection |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nThese metrics refer to the UPS unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| ups_name | UPS name. |\n| battery_type | Battery type (chemistry). "battery.type" variable value. |\n| device_model | Device model. "device.mode" variable value. |\n| device_serial | Device serial number. "device.serial" variable value. |\n| device_manufacturer | Device manufacturer. "device.mfr" variable value. |\n| device_type | Device type (ups, pdu, scd, psu, ats). "device.type" variable value. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| upsd.ups_load | load | percentage |\n| upsd.ups_load_usage | load_usage | Watts |\n| upsd.ups_status | on_line, on_battery, low_battery, high_battery, replace_battery, charging, discharging, bypass, calibration, offline, overloaded, trim_input_voltage, boost_input_voltage, forced_shutdown, other | status |\n| upsd.ups_temperature | temperature | Celsius |\n| upsd.ups_battery_charge | charge | percentage |\n| upsd.ups_battery_estimated_runtime | runtime | seconds |\n| upsd.ups_battery_voltage | voltage | Volts |\n| upsd.ups_battery_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_voltage | voltage | Volts |\n| upsd.ups_input_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_current | current | Ampere |\n| upsd.ups_input_current_nominal | nominal_current | Ampere |\n| upsd.ups_input_frequency | frequency | Hz |\n| upsd.ups_input_frequency_nominal | nominal_frequency | Hz |\n| upsd.ups_output_voltage | voltage | Volts |\n| upsd.ups_output_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_output_current | current | Ampere |\n| upsd.ups_output_current_nominal | nominal_current | Ampere |\n| upsd.ups_output_frequency | frequency | Hz |\n| upsd.ups_output_frequency_nominal | nominal_frequency | Hz |\n\n',integration_type:"collector",id:"go.d.plugin-upsd-UPS_(NUT)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/upsd/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-vcsa",plugin_name:"go.d.plugin",module_name:"vcsa",monitored_instance:{name:"vCenter Server Appliance",link:"https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vcsa.doc/GUID-223C2821-BD98-4C7A-936B-7DBE96291BA4.html",icon_filename:"vmware.svg",categories:["data-collection.containers-and-vms"]},keywords:["vmware"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# vCenter Server Appliance\n\nPlugin: go.d.plugin\nModule: vcsa\n\n## Overview\n\nThis collector monitors [health statistics](https://developer.vmware.com/apis/vsphere-automation/latest/appliance/health/) of vCenter Server Appliance servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vcsa.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vcsa.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | false | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | false | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nTwo instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n - name: vcsa2\n url: https://203.0.113.10\n username: admin@vsphere.local\n password: password\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vcsa` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vcsa\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vcsa_system_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is orange. One or more components are degraded. |\n| [ vcsa_system_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is red. One or more components are unavailable or will stop functioning soon. |\n| [ vcsa_applmgmt_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_applmgmt_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_load_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_load_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_mem_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_mem_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_swap_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_swap_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_database_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_database_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_software_packages_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.software_packages_health_status | VCSA software packages security updates are available. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vCenter Server Appliance instance\n\nThese metrics refer to the entire monitored application.\n
    \nSee health statuses\nOverall System Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------------------------------------------------------------------|\n| green | All components in the appliance are healthy. |\n| yellow | One or more components in the appliance might become overloaded soon. |\n| orange | One or more components in the appliance might be degraded. |\n| red | One or more components in the appliance might be in an unusable status and the appliance might become unresponsive soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nComponents Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------|\n| green | The component is healthy. |\n| yellow | The component is healthy, but may have some problems. |\n| orange | The component is degraded, and may have serious problems. |\n| red | The component is unavailable, or will stop functioning soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nSoftware Updates Health:\n\n| Status | Description |\n|:-------:|:-----------------------------------------------------|\n| green | No updates available. |\n| orange | Non-security patches might be available. |\n| red | Security patches might be available. |\n| gray | An error retrieving information on software updates. |\n| unknown | Collector failed to decode status. |\n\n
    \n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vcsa.system_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.applmgmt_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.load_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.mem_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.swap_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.database_storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.software_packages_health_status | green, red, orange, gray, unknown | status |\n\n",integration_type:"collector",id:"go.d.plugin-vcsa-vCenter_Server_Appliance",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/vcsa/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-vernemq",plugin_name:"go.d.plugin",module_name:"vernemq",monitored_instance:{name:"VerneMQ",link:"https://vernemq.com",icon_filename:"vernemq.svg",categories:["data-collection.message-brokers"]},keywords:["vernemq","message brokers"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# VerneMQ\n\nPlugin: go.d.plugin\nModule: vernemq\n\n## Overview\n\nThis collector monitors VerneMQ instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vernemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vernemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8888/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal instance with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n - name: remote\n url: http://203.0.113.10:8888/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vernemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vernemq\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.socket_errors | number of socket errors in the last minute |\n| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of dropped messaged due to full queues in the last minute |\n| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of messages which expired before delivery in the last minute |\n| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of unhandled messages (connections with clean session=true) in the last minute |\n| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.average_scheduler_utilization | average scheduler utilization over the last 10 minutes |\n| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.cluster_dropped | amount of traffic dropped during communication with the cluster nodes in the last minute |\n| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vvernemq.netsplits | number of detected netsplits (split brain situation) in the last minute |\n| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_connack_sent_reason | number of sent unsuccessful v3/v5 CONNACK packets in the last minute |\n| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_received_reason | number of received not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_sent_reason | number of sent not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_error | number of failed v3/v5 SUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_auth_error | number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute |\n| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_unsubscribe_error | number of failed v3/v5 UNSUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_errors | number of failed v3/v5 PUBLISH operations in the last minute |\n| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_auth_errors | number of unauthorized v3/v5 PUBLISH attempts in the last minute |\n| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_received_reason | number of received unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_sent_reason | number of sent unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_invalid_error | number of received unexpected v3/v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_received_reason | number of received unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_sent_reason | number of sent unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_invalid_error | number of received unexpected v3 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_received_reason | number of received unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_sent_reason | number of sent unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_received_reason | number of received unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_sent_reason | number of sent unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_invalid_error | number of received unexpected v3/v5 PUBCOMP packets in the last minute |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per VerneMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.sockets | open | sockets |\n| vernemq.socket_operations | open, close | sockets/s |\n| vernemq.client_keepalive_expired | closed | sockets/s |\n| vernemq.socket_close_timeout | closed | sockets/s |\n| vernemq.socket_errors | errors | errors/s |\n| vernemq.queue_processes | queue_processes | queue processes |\n| vernemq.queue_processes_operations | setup, teardown | events/s |\n| vernemq.queue_process_init_from_storage | queue_processes | queue processes/s |\n| vernemq.queue_messages | received, sent | messages/s |\n| vernemq.queue_undelivered_messages | dropped, expired, unhandled | messages/s |\n| vernemq.router_subscriptions | subscriptions | subscriptions |\n| vernemq.router_matched_subscriptions | local, remote | subscriptions/s |\n| vernemq.router_memory | used | KiB |\n| vernemq.average_scheduler_utilization | utilization | percentage |\n| vernemq.system_utilization_scheduler | a dimension per scheduler | percentage |\n| vernemq.system_processes | processes | processes |\n| vernemq.system_reductions | reductions | ops/s |\n| vernemq.system_context_switches | context_switches | ops/s |\n| vernemq.system_io | received, sent | kilobits/s |\n| vernemq.system_run_queue | ready | processes |\n| vernemq.system_gc_count | gc | ops/s |\n| vernemq.system_gc_words_reclaimed | words_reclaimed | ops/s |\n| vernemq.system_allocated_memory | processes, system | KiB |\n| vernemq.bandwidth | received, sent | kilobits/s |\n| vernemq.retain_messages | messages | messages |\n| vernemq.retain_memory | used | KiB |\n| vernemq.cluster_bandwidth | received, sent | kilobits/s |\n| vernemq.cluster_dropped | dropped | kilobits/s |\n| vernemq.netsplit_unresolved | unresolved | netsplits |\n| vernemq.netsplits | resolved, detected | netsplits/s |\n| vernemq.mqtt_auth | received, sent | packets/s |\n| vernemq.mqtt_auth_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_auth_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_connect | connect, connack | packets/s |\n| vernemq.mqtt_connack_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect | received, sent | packets/s |\n| vernemq.mqtt_disconnect_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_subscribe | subscribe, suback | packets/s |\n| vernemq.mqtt_subscribe_error | failed | ops/s |\n| vernemq.mqtt_subscribe_auth_error | unauth | attempts/s |\n| vernemq.mqtt_unsubscribe | unsubscribe, unsuback | packets/s |\n| vernemq.mqtt_unsubscribe_error | mqtt_unsubscribe_error | ops/s |\n| vernemq.mqtt_publish | received, sent | packets/s |\n| vernemq.mqtt_publish_errors | failed | ops/s |\n| vernemq.mqtt_publish_auth_errors | unauth | attempts/s |\n| vernemq.mqtt_puback | received, sent | packets/s |\n| vernemq.mqtt_puback_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrec | received, sent | packets/s |\n| vernemq.mqtt_pubrec_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrel | received, sent | packets/s |\n| vernemq.mqtt_pubrel_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrel_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcom | received, sent | packets/s |\n| vernemq.mqtt_pubcomp_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_ping | pingreq, pingresp | packets/s |\n| vernemq.node_uptime | time | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-vernemq-VerneMQ",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/vernemq/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-vsphere",plugin_name:"go.d.plugin",module_name:"vsphere",monitored_instance:{name:"VMware vCenter Server",link:"https://www.vmware.com/products/vcenter-server.html",icon_filename:"vmware.svg",categories:["data-collection.containers-and-vms"]},keywords:["vmware","esxi","vcenter"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!0},overview:"# VMware vCenter Server\n\nPlugin: go.d.plugin\nModule: vsphere\n\n## Overview\n\nThis collector monitors hosts and vms performance statistics from `vCenter` servers.\n\n> **Warning**: The `vsphere` collector cannot re-login and continue collecting metrics after a vCenter reboot.\n> go.d.plugin needs to be restarted.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default `update_every` is 20 seconds, and it doesn't make sense to decrease the value.\n**VMware real-time statistics are generated at the 20-second specificity**.\n\nIt is likely that 20 seconds is not enough for big installations and the value should be tuned.\n\nTo get a better view we recommend running the collector in debug mode and seeing how much time it will take to collect metrics.\n\n
    \nExample (all not related debug lines were removed)\n\n```\n[ilyam@pc]$ ./go.d.plugin -d -m vsphere\n[ DEBUG ] vsphere[vsphere] discover.go:94 discovering : starting resource discovering process\n[ DEBUG ] vsphere[vsphere] discover.go:102 discovering : found 3 dcs, process took 49.329656ms\n[ DEBUG ] vsphere[vsphere] discover.go:109 discovering : found 12 folders, process took 49.538688ms\n[ DEBUG ] vsphere[vsphere] discover.go:116 discovering : found 3 clusters, process took 47.722692ms\n[ DEBUG ] vsphere[vsphere] discover.go:123 discovering : found 2 hosts, process took 52.966995ms\n[ DEBUG ] vsphere[vsphere] discover.go:130 discovering : found 2 vms, process took 49.832979ms\n[ INFO ] vsphere[vsphere] discover.go:140 discovering : found 3 dcs, 12 folders, 3 clusters (2 dummy), 2 hosts, 3 vms, process took 249.655993ms\n[ DEBUG ] vsphere[vsphere] build.go:12 discovering : building : starting building resources process\n[ INFO ] vsphere[vsphere] build.go:23 discovering : building : built 3/3 dcs, 12/12 folders, 3/3 clusters, 2/2 hosts, 3/3 vms, process took 63.3\xb5s\n[ DEBUG ] vsphere[vsphere] hierarchy.go:10 discovering : hierarchy : start setting resources hierarchy process\n[ INFO ] vsphere[vsphere] hierarchy.go:18 discovering : hierarchy : set 3/3 clusters, 2/2 hosts, 3/3 vms, process took 6.522\xb5s\n[ DEBUG ] vsphere[vsphere] filter.go:24 discovering : filtering : starting filtering resources process\n[ DEBUG ] vsphere[vsphere] filter.go:45 discovering : filtering : removed 0 unmatched hosts\n[ DEBUG ] vsphere[vsphere] filter.go:56 discovering : filtering : removed 0 unmatched vms\n[ INFO ] vsphere[vsphere] filter.go:29 discovering : filtering : filtered 0/2 hosts, 0/3 vms, process took 42.973\xb5s\n[ DEBUG ] vsphere[vsphere] metric_lists.go:14 discovering : metric lists : starting resources metric lists collection process\n[ INFO ] vsphere[vsphere] metric_lists.go:30 discovering : metric lists : collected metric lists for 2/2 hosts, 3/3 vms, process took 275.60764ms\n[ INFO ] vsphere[vsphere] discover.go:74 discovering : discovered 2/2 hosts, 3/3 vms, the whole process took 525.614041ms\n[ INFO ] vsphere[vsphere] discover.go:11 starting discovery process, will do discovery every 5m0s\n[ DEBUG ] vsphere[vsphere] collect.go:11 starting collection process\n[ DEBUG ] vsphere[vsphere] scrape.go:48 scraping : scraped metrics for 2/2 hosts, process took 96.257374ms\n[ DEBUG ] vsphere[vsphere] scrape.go:60 scraping : scraped metrics for 3/3 vms, process took 57.879697ms\n[ DEBUG ] vsphere[vsphere] collect.go:23 metrics collected, process took 154.77997ms\n```\n\n
    \n\nThere you can see that discovering took `525.614041ms`, and collecting metrics took `154.77997ms`. Discovering is a separate thread, it doesn't affect collecting.\n`update_every` and `timeout` parameters should be adjusted based on these numbers.\n\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vsphere.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vsphere.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 20 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | vCenter server URL. | | yes |\n| host_include | Hosts selector (filter). | | no |\n| vm_include | Virtual machines selector (filter). | | no |\n| discovery_interval | Hosts and VMs discovery interval. | 300 | no |\n| timeout | HTTP request timeout. | 20 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### host_include\n\nMetrics of hosts matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern\".\n- Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n host_include:\n - '/DC1/*' # select all hosts from datacenter DC1\n - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2\n - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3\n ```\n\n\n##### vm_include\n\nMetrics of VMs matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern/VM pattern\".\n- Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n vm_include:\n - '/DC1/*' # select all VMs from datacenter DC\n - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2\n - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3\n ```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n - name : vcenter2\n url : https://203.0.113.10\n username : admin@vsphere.local\n password : somepassword\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vsphere` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vsphere\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vsphere_vm_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_cpu_utilization | Virtual Machine CPU utilization |\n| [ vsphere_vm_mem_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_mem_utilization | Virtual Machine memory utilization |\n| [ vsphere_host_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_cpu_utilization | ESXi Host CPU utilization |\n| [ vsphere_host_mem_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_mem_utilization | ESXi Host memory utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per virtual machine\n\nThese metrics refer to the Virtual Machine.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n| vm | Virtual Machine name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.vm_cpu_utilization | used | percentage |\n| vsphere.vm_mem_utilization | used | percentage |\n| vsphere.vm_mem_usage | granted, consumed, active, shared | KiB |\n| vsphere.vm_mem_swap_usage | swapped | KiB |\n| vsphere.vm_mem_swap_io | in, out | KiB/s |\n| vsphere.vm_disk_io | read, write | KiB/s |\n| vsphere.vm_disk_max_latency | latency | milliseconds |\n| vsphere.vm_net_traffic | received, sent | KiB/s |\n| vsphere.vm_net_packets | received, sent | packets |\n| vsphere.vm_net_drops | received, sent | packets |\n| vsphere.vm_overall_status | green, red, yellow, gray | status |\n| vsphere.vm_system_uptime | uptime | seconds |\n\n### Per host\n\nThese metrics refer to the ESXi host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.host_cpu_utilization | used | percentage |\n| vsphere.host_mem_utilization | used | percentage |\n| vsphere.host_mem_usage | granted, consumed, active, shared, sharedcommon | KiB |\n| vsphere.host_mem_swap_io | in, out | KiB/s |\n| vsphere.host_disk_io | read, write | KiB/s |\n| vsphere.host_disk_max_latency | latency | milliseconds |\n| vsphere.host_net_traffic | received, sent | KiB/s |\n| vsphere.host_net_packets | received, sent | packets |\n| vsphere.host_net_drops | received, sent | packets |\n| vsphere.host_net_errors | received, sent | errors |\n| vsphere.host_overall_status | green, red, yellow, gray | status |\n| vsphere.host_system_uptime | uptime | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-vsphere-VMware_vCenter_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/vsphere/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-web_log",plugin_name:"go.d.plugin",module_name:"web_log",monitored_instance:{name:"Web server log files",link:"",categories:["data-collection.web-servers-and-web-proxies"],icon_filename:"webservers.svg"},keywords:["webserver","apache","httpd","nginx","lighttpd","logs"],most_popular:!1,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# Web server log files\n\nPlugin: go.d.plugin\nModule: web_log\n\n## Overview\n\nThis collector monitors web servers by parsing their log files.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects log files of web servers running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/web_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/web_log.conf\n```\n#### Options\n\nWeblog is aware of how to parse and interpret the following fields (**known fields**):\n\n> [nginx](https://nginx.org/en/docs/varindex.html)\n>\n> [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html)\n\n| nginx | apache | description |\n|-------------------------|----------|------------------------------------------------------------------------------------------|\n| $host ($http_host) | %v | Name of the server which accepted a request. |\n| $server_port | %p | Port of the server which accepted a request. |\n| $scheme | - | Request scheme. "http" or "https". |\n| $remote_addr | %a (%h) | Client address. |\n| $request | %r | Full original request line. The line is "$request_method $request_uri $server_protocol". |\n| $request_method | %m | Request method. Usually "GET" or "POST". |\n| $request_uri | %U | Full original request URI. |\n| $server_protocol | %H | Request protocol. Usually "HTTP/1.0", "HTTP/1.1", or "HTTP/2.0". |\n| $status | %s (%>s) | Response status code. |\n| $request_length | %I | Bytes received from a client, including request and headers. |\n| $bytes_sent | %O | Bytes sent to a client, including request and headers. |\n| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. |\n| $request_time | %D | Request processing time. |\n| $upstream_response_time | - | Time spent on receiving the response from the upstream server. |\n| $ssl_protocol | - | Protocol of an established SSL connection. |\n| $ssl_cipher | - | String of ciphers used for an established SSL connection. |\n\nNotes:\n\n- Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`.\n- Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network.\n- To get `%I` and `%O` working you need to enable `mod_logio` on Apache.\n- NGINX logs URI with query parameters, Apache doesnt.\n- `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others.\n- Don\'t use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| path | Path to the web server log file. | | yes |\n| exclude_path | Path to exclude. | *.gz | no |\n| url_patterns | List of URL patterns. | [] | no |\n| url_patterns.name | Used as a dimension name. | | yes |\n| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/netdata/tree/master/src/go/collectors/go.d.plugin/pkg/matcher#supported-format). | | yes |\n| parser | Log parser configuration. | | no |\n| parser.log_type | Log parser type. | auto | no |\n| parser.csv_config | CSV log parser config. | | no |\n| parser.csv_config.delimiter | CSV field delimiter. | , | no |\n| parser.csv_config.format | CSV log format. | | no |\n| parser.ltsv_config | LTSV log parser config. | | no |\n| parser.ltsv_config.field_delimiter | LTSV field delimiter. | \\t | no |\n| parser.ltsv_config.value_delimiter | LTSV value delimiter. | : | no |\n| parser.ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes |\n| parser.json_config | JSON log parser config. | | no |\n| parser.json_config.mapping | JSON fields mapping to **known fields**. | | yes |\n| parser.regexp_config | RegExp log parser config. | | no |\n| parser.regexp_config.pattern | RegExp pattern with named groups. | | yes |\n\n##### url_patterns\n\n"URL pattern" scope metrics will be collected for each URL pattern. \n\nOption syntax:\n\n```yaml\nurl_patterns:\n - name: name1\n pattern: pattern1\n - name: name2\n pattern: pattern2\n```\n\n\n##### parser.log_type\n\nWeblog supports 5 different log parsers:\n\n| Parser type | Description |\n|-------------|-------------------------------------------|\n| auto | Use CSV and auto-detect format |\n| csv | A comma-separated values |\n| json | [JSON](https://www.json.org/json-en.html) |\n| ltsv | [LTSV](http://ltsv.org/) |\n| regexp | Regular expression with named groups |\n\nSyntax:\n\n```yaml\nparser:\n log_type: auto\n```\n\nIf `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file.\n\n- checks if format is `CSV` (using regexp).\n- checks if format is `JSON` (using regexp).\n- assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later):\n\n ```sh\n $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent\n $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time\n $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time\n $remote_addr - - [$time_local] "$request" $status $body_bytes_sent\n ```\n\n If you\'re using the default Apache/NGINX log format, auto-detect will work for you. If it doesn\'t work you need to set the format manually.\n\n\n##### parser.csv_config.format\n\n\n\n##### parser.ltsv_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don\'t use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.json_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don\'t use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: json\n json_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don\'t use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `web_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m web_log\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ web_log_1m_unmatched ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.excluded_requests | percentage of unparsed log lines over the last minute |\n| [ web_log_1m_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over the last minute (1xx, 2xx, 304, 401) |\n| [ web_log_1m_redirects ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of redirection HTTP requests over the last minute (3xx except 304) |\n| [ web_log_1m_bad_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of client error HTTP requests over the last minute (4xx except 401) |\n| [ web_log_1m_internal_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of server error HTTP requests over the last minute (5xx) |\n| [ web_log_web_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.request_processing_time | average HTTP response time over the last 1 minute |\n| [ web_log_5m_requests_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over over the last 5 minutes, compared with the previous 5 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Web server log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.requests | requests | requests/s |\n| web_log.excluded_requests | unmatched | requests/s |\n| web_log.type_requests | success, bad, redirect, error | requests/s |\n| web_log.status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| web_log.status_code_class_1xx_responses | a dimension per 1xx code | responses/s |\n| web_log.status_code_class_2xx_responses | a dimension per 2xx code | responses/s |\n| web_log.status_code_class_3xx_responses | a dimension per 3xx code | responses/s |\n| web_log.status_code_class_4xx_responses | a dimension per 4xx code | responses/s |\n| web_log.status_code_class_5xx_responses | a dimension per 5xx code | responses/s |\n| web_log.bandwidth | received, sent | kilobits/s |\n| web_log.request_processing_time | min, max, avg | milliseconds |\n| web_log.requests_processing_time_histogram | a dimension per bucket | requests/s |\n| web_log.upstream_response_time | min, max, avg | milliseconds |\n| web_log.upstream_responses_time_histogram | a dimension per bucket | requests/s |\n| web_log.current_poll_uniq_clients | ipv4, ipv6 | clients |\n| web_log.vhost_requests | a dimension per vhost | requests/s |\n| web_log.port_requests | a dimension per port | requests/s |\n| web_log.scheme_requests | http, https | requests/s |\n| web_log.http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.http_version_requests | a dimension per HTTP version | requests/s |\n| web_log.ip_proto_requests | ipv4, ipv6 | requests/s |\n| web_log.ssl_proto_requests | a dimension per SSL protocol | requests/s |\n| web_log.ssl_cipher_suite_requests | a dimension per SSL cipher suite | requests/s |\n| web_log.url_pattern_requests | a dimension per URL pattern | requests/s |\n| web_log.custom_field_pattern_requests | a dimension per custom field pattern | requests/s |\n\n### Per custom time field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_time_field_summary | min, max, avg | milliseconds |\n| web_log.custom_time_field_histogram | a dimension per bucket | observations |\n\n### Per custom numeric field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_numeric_field_{{field_name}}_summary | min, max, avg | {{units}} |\n\n### Per URL pattern\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.url_pattern_status_code_responses | a dimension per pattern | responses/s |\n| web_log.url_pattern_http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.url_pattern_bandwidth | received, sent | kilobits/s |\n| web_log.url_pattern_request_processing_time | min, max, avg | milliseconds |\n\n",integration_type:"collector",id:"go.d.plugin-web_log-Web_server_log_files",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/weblog/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-whoisquery",plugin_name:"go.d.plugin",module_name:"whoisquery",monitored_instance:{name:"Domain expiration date",link:"",icon_filename:"globe.svg",categories:["data-collection.synthetic-checks"]},keywords:["whois"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# Domain expiration date\n\nPlugin: go.d.plugin\nModule: whoisquery\n\n## Overview\n\nThis collector monitors the remaining time before the domain expires.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/whoisquery.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/whoisquery.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Domain address. | | yes |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| timeout | The query timeout in seconds. | 5 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nBasic configuration example\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: my_site\n source: my_site.com\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple domains.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: my_site1\n source: my_site1.com\n\n - name: my_site2\n source: my_site2.com\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `whoisquery` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m whoisquery\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ whoisquery_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/whoisquery.conf) | whoisquery.time_until_expiration | time until the domain name registration expires |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per domain\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| domain | Configured source |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| whoisquery.time_until_expiration | expiry | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-whoisquery-Domain_expiration_date",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/whoisquery/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-windows-ad",plugin_name:"go.d.plugin",module_name:"windows",monitored_instance:{name:"Active Directory",link:"https://learn.microsoft.com/en-us/windows-server/identity/ad-ds/get-started/virtual-dc/active-directory-domain-services-overview",icon_filename:"windows.svg",categories:["data-collection.windows-systems"]},keywords:["windows","microsoft","active directory","ad","adcs","adfs"],most_popular:!1,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# Active Directory\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n",integration_type:"collector",id:"go.d.plugin-windows-Active_Directory",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/windows/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-windows-hyperv",plugin_name:"go.d.plugin",module_name:"windows",monitored_instance:{name:"HyperV",link:"https://learn.microsoft.com/en-us/windows-server/virtualization/hyper-v/hyper-v-technology-overview",icon_filename:"windows.svg",categories:["data-collection.windows-systems"]},keywords:["windows","microsoft","hyperv","virtualization","vm"],most_popular:!1,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# HyperV\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n",integration_type:"collector",id:"go.d.plugin-windows-HyperV",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/windows/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-windows-msexchange",plugin_name:"go.d.plugin",module_name:"windows",monitored_instance:{name:"MS Exchange",link:"https://www.microsoft.com/en-us/microsoft-365/exchange/email",icon_filename:"exchange.svg",categories:["data-collection.windows-systems"]},keywords:["windows","microsoft","mail"],most_popular:!1,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# MS Exchange\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n",integration_type:"collector",id:"go.d.plugin-windows-MS_Exchange",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/windows/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-windows-mssql",plugin_name:"go.d.plugin",module_name:"windows",monitored_instance:{name:"MS SQL Server",link:"https://www.microsoft.com/en-us/sql-server/",icon_filename:"mssql.svg",categories:["data-collection.windows-systems"]},keywords:["windows","microsoft","mssql","database","db"],most_popular:!1,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# MS SQL Server\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n",integration_type:"collector",id:"go.d.plugin-windows-MS_SQL_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/windows/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-windows-dotnet",plugin_name:"go.d.plugin",module_name:"windows",monitored_instance:{name:"NET Framework",link:"https://dotnet.microsoft.com/en-us/download/dotnet-framework",icon_filename:"dotnet.svg",categories:["data-collection.windows-systems"]},keywords:["windows","microsoft","dotnet"],most_popular:!1,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# NET Framework\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n",integration_type:"collector",id:"go.d.plugin-windows-NET_Framework",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/windows/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-windows",plugin_name:"go.d.plugin",module_name:"windows",monitored_instance:{name:"Windows",link:"https://www.microsoft.com/en-us/windows",categories:["data-collection.windows-systems"],icon_filename:"windows.svg"},keywords:["windows","microsoft"],most_popular:!0,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# Windows\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n",integration_type:"collector",id:"go.d.plugin-windows-Windows",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/windows/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-wireguard",plugin_name:"go.d.plugin",module_name:"wireguard",monitored_instance:{name:"WireGuard",link:"https://www.wireguard.com/",categories:["data-collection.vpns"],icon_filename:"wireguard.svg"},keywords:["wireguard","vpn","security"],most_popular:!1,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# WireGuard\n\nPlugin: go.d.plugin\nModule: wireguard\n\n## Overview\n\nThis collector monitors WireGuard VPN devices and peers traffic.\n\n\nIt connects to the local WireGuard instance using [wireguard-go client](https://github.com/WireGuard/wireguard-go).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the CAP_NET_ADMIN capability, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects instances running on localhost.\n\n\n#### Limits\n\nDoesn't work if Netdata or WireGuard is installed in the container.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/wireguard.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/wireguard.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `wireguard` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m wireguard\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the VPN network interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.device_network_io | receive, transmit | B/s |\n| wireguard.device_peers | peers | peers |\n\n### Per peer\n\nThese metrics refer to the VPN peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n| public_key | Public key of a peer |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.peer_network_io | receive, transmit | B/s |\n| wireguard.peer_latest_handshake_ago | time | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-wireguard-WireGuard",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/wireguard/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-x509check",plugin_name:"go.d.plugin",module_name:"x509check",monitored_instance:{name:"X.509 certificate",link:"",categories:["data-collection.synthetic-checks"],icon_filename:"lock.svg"},keywords:["x509","certificate"],most_popular:!1,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# X.509 certificate\n\nPlugin: go.d.plugin\nModule: x509check\n\n## Overview\n\n\n\nThis collectors monitors x509 certificates expiration time and revocation status.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/x509check.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/x509check.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file, smtp. | | no |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| check_revocation_status | Whether to check the revocation status of the certificate. | no | no |\n| timeout | SSL connection timeout. | 2 | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Website certificate\n\nWebsite certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: my_site_cert\n source: https://my_site.org:443\n\n```\n{% /details %}\n##### Local file certificate\n\nLocal file certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: my_file_cert\n source: file:///home/me/cert.pem\n\n```\n{% /details %}\n##### SMTP certificate\n\nSMTP certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: my_smtp_cert\n source: smtp://smtp.my_mail.org:587\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple websites\' certificates.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: my_site_cert1\n source: https://my_site1.org:443\n\n - name: my_site_cert2\n source: https://my_site1.org:443\n\n - name: my_site_cert3\n source: https://my_site3.org:443\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `x509check` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m x509check\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ x509check_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.time_until_expiration | time until x509 certificate expires |\n| [ x509check_revocation_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.revocation_status | x509 certificate revocation status (0: revoked, 1: valid) |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per source\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| source | Configured source. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| x509check.time_until_expiration | expiry | seconds |\n| x509check.revocation_status | revoked | boolean |\n\n",integration_type:"collector",id:"go.d.plugin-x509check-X.509_certificate",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/x509check/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-zfspool",plugin_name:"go.d.plugin",module_name:"zfspool",monitored_instance:{name:"ZFS Pools",link:"",icon_filename:"filesystem.svg",categories:["data-collection.storage-mount-points-and-filesystems"]},keywords:["zfs pools","pools","zfs","filesystem"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},most_popular:!1},overview:"# ZFS Pools\n\nPlugin: go.d.plugin\nModule: zfspool\n\n## Overview\n\nThis collector monitors the health and space usage of ZFS pools using the command line tool [zpool](https://openzfs.github.io/openzfs-docs/man/master/8/zpool-list.8.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zfspool.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zfspool.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `zpool` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/bin/zpool | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: zfspool\n binary_path: /usr/local/sbin/zpool\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `zfspool` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m zfspool\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_pool_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_space_utilization | ZFS pool ${label:pool} is nearing capacity. Current space usage is above the threshold. |\n| [ zfs_pool_health_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_health_state | ZFS pool ${label:pool} state is degraded |\n| [ zfs_pool_health_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_health_state | ZFS pool ${label:pool} state is faulted or unavail |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs pool\n\nThese metrics refer to the ZFS pool.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pool | Zpool name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfspool.pool_space_utilization | utilization | % |\n| zfspool.pool_space_usage | free, used | bytes |\n| zfspool.pool_fragmentation | fragmentation | % |\n| zfspool.pool_health_state | online, degraded, faulted, offline, unavail, removed, suspended | state |\n\n",integration_type:"collector",id:"go.d.plugin-zfspool-ZFS_Pools",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/zfspool/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-zookeeper",plugin_name:"go.d.plugin",module_name:"zookeeper",monitored_instance:{name:"ZooKeeper",link:"https://zookeeper.apache.org/",categories:["data-collection.service-discovery-registry"],icon_filename:"zookeeper.svg"},keywords:["zookeeper"],most_popular:!1,info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"}]}}},overview:"# ZooKeeper\n\nPlugin: go.d.plugin\nModule: zookeeper\n\n## Overview\n\n\n\nIt connects to the Zookeeper instance via a TCP and executes the following commands:\n\n- [mntr](https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known ZooKeeper TCP sockets:\n\n- 127.0.0.1:2181\n- 127.0.0.1:2182\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Whitelist `mntr` command\n\nAdd `mntr` to Zookeeper\'s [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zookeeper.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zookeeper.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:2181 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| use_tls | Whether to use TLS or not. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nLocal server.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n```\n{% /details %}\n##### TLS with self-signed certificate\n\nZookeeper with TLS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n use_tls: yes\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n - name: remote\n address: 192.0.2.1:2181\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `zookeeper` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m zookeeper\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZooKeeper instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zookeeper.requests | outstanding | requests |\n| zookeeper.requests_latency | min, avg, max | ms |\n| zookeeper.connections | alive | connections |\n| zookeeper.packets | received, sent | pps |\n| zookeeper.file_descriptor | open | file descriptors |\n| zookeeper.nodes | znode, ephemerals | nodes |\n| zookeeper.watches | watches | watches |\n| zookeeper.approximate_data_size | size | KiB |\n| zookeeper.server_state | state | state |\n\n",integration_type:"collector",id:"go.d.plugin-zookeeper-ZooKeeper",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/collectors/go.d.plugin/modules/zookeeper/metadata.yaml",related_resources:""},{meta:{plugin_name:"idlejitter.plugin",module_name:"idlejitter.plugin",monitored_instance:{name:"Idle OS Jitter",link:"",categories:["data-collection.synthetic-checks"],icon_filename:"syslog.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["latency","jitter"],most_popular:!1},overview:"# Idle OS Jitter\n\nPlugin: idlejitter.plugin\nModule: idlejitter.plugin\n\n## Overview\n\nMonitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.\n\n\nA thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration will run by default on all supported systems.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Idle OS Jitter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.idlejitter | min, max, average | microseconds lost/s |\n\n",integration_type:"collector",id:"idlejitter.plugin-idlejitter.plugin-Idle_OS_Jitter",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/idlejitter.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"ioping.plugin",module_name:"ioping.plugin",monitored_instance:{name:"IOPing",link:"https://github.com/koct9i/ioping",categories:["data-collection.synthetic-checks"],icon_filename:"syslog.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# IOPing\n\nPlugin: ioping.plugin\nModule: ioping.plugin\n\n## Overview\n\nMonitor IOPing metrics for efficient disk I/O latency tracking. Keep track of read/write speeds, latency, and error rates for optimized disk operations.\n\nPlugin uses `ioping` command.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install ioping\n\nYou can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ioping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ioping.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1s | no |\n| destination | The directory/file/device to ioping. | | yes |\n| request_size | The request size in bytes to ioping the destination (symbolic modifiers are supported) | 4k | no |\n| ioping_opts | Options passed to `ioping` commands. | -T 1000000 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\nThis example has the minimum configuration necessary to have the plugin running.\n\n{% details open=true summary="Config" %}\n```yaml\ndestination="/dev/sda"\n\n```\n{% /details %}\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ioping_disk_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ioping.conf) | ioping.latency | average I/O latency over the last 10 seconds |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ioping.latency | latency | microseconds |\n\n",integration_type:"collector",id:"ioping.plugin-ioping.plugin-IOPing",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ioping.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"macos.plugin",module_name:"mach_smi",monitored_instance:{name:"macOS",link:"https://www.apple.com/macos",categories:["data-collection.macos-systems"],icon_filename:"macos.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["macos","apple","darwin"],most_popular:!1},overview:"# macOS\n\nPlugin: macos.plugin\nModule: mach_smi\n\n## Overview\n\nMonitor macOS metrics for efficient operating system performance.\n\nThe plugin uses three different methods to collect data:\n - The function `sysctlbyname` is called to collect network, swap, loadavg, and boot time.\n - The functtion `host_statistic` is called to collect CPU and Virtual memory data;\n - The function `IOServiceGetMatchingServices` to collect storage information.\n\n\nThis collector is only supported on the following platforms:\n\n- macOS\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThere are three sections in the file which you can configure:\n\n- `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time.\n- `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory.\n- `[plugin:macos:iokit]` - Enable or disable monitoring for storage device.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable load average | Enable or disable monitoring of load average metrics (load1, load5, load15). | yes | no |\n| system swap | Enable or disable monitoring of system swap metrics (free, used). | yes | no |\n| bandwidth | Enable or disable monitoring of network bandwidth metrics (received, sent). | yes | no |\n| ipv4 TCP packets | Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent). | yes | no |\n| ipv4 TCP errors | Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments). | yes | no |\n| ipv4 TCP handshake issues | Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails). | yes | no |\n| ECN packets | Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts). | auto | no |\n| TCP SYN cookies | Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed). | auto | no |\n| TCP out-of-order queue | Enable or disable monitoring of TCP out-of-order queue metrics (inqueue). | auto | no |\n| TCP connection aborts | Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout). | auto | no |\n| ipv4 UDP packets | Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.). | yes | no |\n| ipv4 UDP errors | Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi). | yes | no |\n| ipv4 icmp packets | Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error). | yes | no |\n| ipv4 icmp messages | Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum). | yes | no |\n| ipv4 packets | Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered). | yes | no |\n| ipv4 fragments sent | Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates). | yes | no |\n| ipv4 fragments assembly | Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all). | yes | no |\n| ipv4 errors | Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes). | yes | no |\n| ipv6 packets | Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered). | auto | no |\n| ipv6 fragments sent | Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all). | auto | no |\n| ipv6 fragments assembly | Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all). | auto | no |\n| ipv6 errors | Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes). | auto | no |\n| icmp | Enable or disable monitoring of ICMP metrics (sent, received). | auto | no |\n| icmp redirects | Enable or disable monitoring of ICMP redirects metrics (received, sent). | auto | no |\n| icmp errors | Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.). | auto | no |\n| icmp echos | Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply). | auto | no |\n| icmp router | Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp neighbor | Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp types | Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145). | auto | no |\n| space usage for all disks | Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root). | yes | no |\n| inodes usage for all disks | Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root). | yes | no |\n| bandwidth | Enable or disable monitoring of bandwidth metrics (received, sent). | yes | no |\n| system uptime | Enable or disable monitoring of system uptime metrics (uptime). | yes | no |\n| cpu utilization | Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel). | yes | no |\n| system ram | Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free). | yes | no |\n| swap i/o | Enable or disable monitoring of SWAP I/O metrics (I/O Swap). | yes | no |\n| memory page faults | Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge). | yes | no |\n| disk i/o | Enable or disable monitoring of disk I/O metrics (In, Out). | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Disable swap monitoring.\n\nA basic example that discards swap monitoring\n\n{% details open=true summary="Config" %}\n```yaml\n[plugin:macos:sysctl]\n system swap = no\n[plugin:macos:mach_smi]\n swap i/o = no\n\n```\n{% /details %}\n##### Disable complete Machine SMI section.\n\nA basic example that discards swap monitoring\n\n{% details open=true summary="Config" %}\n```yaml\n[plugin:macos:mach_smi]\n cpu utilization = no\n system ram = no\n swap i/o = no\n memory page faults = no\n disk i/o = no\n\n```\n{% /details %}\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per macOS instance\n\nThese metrics refer to hardware and network monitoring.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | user, nice, system, idle | percentage |\n| system.ram | active, wired, throttled, compressor, inactive, purgeable, speculative, free | MiB |\n| mem.swapio | io, out | KiB/s |\n| mem.pgfaults | memory, cow, pagein, pageout, compress, decompress, zero_fill, reactivate, purge | faults/s |\n| system.load | load1, load5, load15 | load |\n| mem.swap | free, used | MiB |\n| system.ipv4 | received, sent | kilobits/s |\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| system.uptime | uptime | seconds |\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | read, writes | KiB/s |\n| disk.ops | read, writes | operations/s |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n### Per mount point\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound | drops/s |\n| net.events | frames, collisions, carrier | events/s |\n\n",integration_type:"collector",id:"macos.plugin-mach_smi-macOS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/macos.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"nfacct.plugin",module_name:"nfacct.plugin",monitored_instance:{name:"Netfilter",link:"https://www.netfilter.org/",categories:["data-collection.linux-systems.firewall-metrics"],icon_filename:"netfilter.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# Netfilter\n\nPlugin: nfacct.plugin\nModule: nfacct.plugin\n\n## Overview\n\nMonitor Netfilter metrics for optimal packet filtering and manipulation. Keep tabs on packet counts, dropped packets, and error rates to secure network operations.\n\nNetdata uses libmnl (https://www.netfilter.org/projects/libmnl/index.html) to collect information.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin uses socket to connect with netfilter to collect data\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install required packages\n\nInstall `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:nfacct]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netfilter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.netlink_new | new, ignore, invalid | connections/s |\n| netfilter.netlink_changes | insert, delete, delete_list | changes/s |\n| netfilter.netlink_search | searched, search_restart, found | searches/s |\n| netfilter.netlink_errors | icmp_error, insert_failed, drop, early_drop | events/s |\n| netfilter.netlink_expect | created, deleted, new | expectations/s |\n| netfilter.nfacct_packets | a dimension per nfacct object | packets/s |\n| netfilter.nfacct_bytes | a dimension per nfacct object | kilobytes/s |\n\n",integration_type:"collector",id:"nfacct.plugin-nfacct.plugin-Netfilter",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/nfacct.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"perf.plugin",module_name:"perf.plugin",monitored_instance:{name:"CPU performance",link:"https://kernel.org/",categories:["data-collection.linux-systems"],icon_filename:"bolt.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["linux","cpu performance","cpu cache","perf.plugin"],most_popular:!1},overview:"# CPU performance\n\nPlugin: perf.plugin\nModule: perf.plugin\n\n## Overview\n\nThis collector monitors CPU performance metrics about cycles, instructions, migrations, cache operations and more.\n\nIt uses syscall (2) to open a file descriptor to monitor the perf events.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIt needs setuid to use the necessary syscall to collect perf events. Netdata sets the permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Install perf plugin\n\nIf you are [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.\n\n\n#### Enable the perf plugin\n\nThe plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.\n\nTo enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config netdata.conf\n```\n\nChange the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:perf]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can get the available options running:\n\n```bash\n/usr/libexec/netdata/plugins.d/perf.plugin --help\n````\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Command options that specify charts shown by the plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. | 1 | yes |\n\n{% /details %}\n#### Examples\n\n##### All metrics\n\nMonitor all metrics available.\n\n```yaml\n[plugin:perf]\n command options = all\n\n```\n##### CPU cycles\n\nMonitor CPU cycles.\n\n{% details open=true summary="Config" %}\n```yaml\n[plugin:perf]\n command options = cycles\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CPU performance instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| perf.cpu_cycles | cpu, ref_cpu | cycles/s |\n| perf.instructions | instructions | instructions/s |\n| perf.instructions_per_cycle | ipc | instructions/cycle |\n| perf.branch_instructions | instructions, misses | instructions/s |\n| perf.cache | references, misses | operations/s |\n| perf.bus_cycles | bus | cycles/s |\n| perf.stalled_cycles | frontend, backend | cycles/s |\n| perf.migrations | migrations | migrations |\n| perf.alignment_faults | faults | faults |\n| perf.emulation_faults | faults | faults |\n| perf.l1d_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.l1d_cache_prefetch | prefetches | prefetches/s |\n| perf.l1i_cache | read_access, read_misses | events/s |\n| perf.ll_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.dtlb_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.itlb_cache | read_access, read_misses | events/s |\n| perf.pbu_cache | read_access | events/s |\n\n",integration_type:"collector",id:"perf.plugin-perf.plugin-CPU_performance",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/perf.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/diskstats",monitored_instance:{name:"Disk Statistics",link:"",categories:["data-collection.linux-systems.disk-metrics"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["disk","disks","io","bcache","block devices"],most_popular:!1},overview:"# Disk Statistics\n\nPlugin: proc.plugin\nModule: /proc/diskstats\n\n## Overview\n\nDetailed statistics for each of your system's disk devices and partitions.\nThe data is reported by the kernel and can be used to monitor disk activity on a Linux system.\n\nGet valuable insight into how your disks are performing and where potential bottlenecks might be.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_backlog ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.backlog | average backlog size of the ${label:device} disk over the last 10 minutes |\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n| [ bcache_cache_dirty ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bcache.conf) | disk.bcache_cache_alloc | percentage of cache space used for dirty data and metadata (this usually means your SSD cache is too small) |\n| [ bcache_cache_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bcache.conf) | disk.bcache_cache_read_races | number of times data was read from the cache, the bucket was reused and invalidated in the last 10 minutes (when this occurs the data is reread from the backing device) |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Disk Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n| mount_point | TBD |\n| device_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes | KiB/s |\n| disk_ext.io | discards | KiB/s |\n| disk.ops | reads, writes | operations/s |\n| disk_ext.ops | discards, flushes | operations/s |\n| disk.qops | operations | operations |\n| disk.backlog | backlog | milliseconds |\n| disk.busy | busy | milliseconds |\n| disk.util | utilization | % of time working |\n| disk.mops | reads, writes | merged operations/s |\n| disk_ext.mops | discards | merged operations/s |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk_ext.iotime | discards, flushes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk_ext.await | discards, flushes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk_ext.avgsz | discards | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n| disk.bcache_cache_alloc | ununsed, dirty, clean, metadata, undefined | percentage |\n| disk.bcache_hit_ratio | 5min, 1hour, 1day, ever | percentage |\n| disk.bcache_rates | congested, writeback | KiB/s |\n| disk.bcache_size | dirty | MiB |\n| disk.bcache_usage | avail | percentage |\n| disk.bcache_cache_read_races | races, errors | operations/s |\n| disk.bcache | hits, misses, collisions, readaheads | operations/s |\n| disk.bcache_bypass | hits, misses | operations/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/diskstats-Disk_Statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/interrupts",monitored_instance:{name:"Interrupts",link:"",categories:["data-collection.linux-systems.cpu-metrics"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["interrupts"],most_popular:!1},overview:"# Interrupts\n\nPlugin: proc.plugin\nModule: /proc/interrupts\n\n## Overview\n\nMonitors `/proc/interrupts`, a file organized by CPU and then by the type of interrupt.\nThe numbers reported are the counts of the interrupts that have occurred of each type.\n\nAn interrupt is a signal to the processor emitted by hardware or software indicating an event that needs\nimmediate attention. The processor then interrupts its current activities and executes the interrupt handler\nto deal with the event. This is part of the way a computer multitasks and handles concurrent processing.\n\nThe types of interrupts include:\n\n- **I/O interrupts**: These are caused by I/O devices like the keyboard, mouse, printer, etc. For example, when\n you type something on the keyboard, an interrupt is triggered so the processor can handle the new input.\n\n- **Timer interrupts**: These are generated at regular intervals by the system's timer circuit. It's primarily\n used to switch the CPU among different tasks.\n\n- **Software interrupts**: These are generated by a program requiring disk I/O operations, or other system resources.\n\n- **Hardware interrupts**: These are caused by hardware conditions such as power failure, overheating, etc.\n\nMonitoring `/proc/interrupts` can be used for:\n\n- **Performance tuning**: If an interrupt is happening very frequently, it could be a sign that a device is not\n configured correctly, or there is a software bug causing unnecessary interrupts. This could lead to system\n performance degradation.\n\n- **System troubleshooting**: If you're seeing a lot of unexpected interrupts, it could be a sign of a hardware problem.\n\n- **Understanding system behavior**: More generally, keeping an eye on what interrupts are occurring can help you\n understand what your system is doing. It can provide insights into the system's interaction with hardware,\n drivers, and other parts of the kernel.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Interrupts instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.interrupts | a dimension per device | interrupts/s |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.interrupts | a dimension per device | interrupts/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/interrupts-Interrupts",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/loadavg",monitored_instance:{name:"System Load Average",link:"",categories:["data-collection.linux-systems.system-metrics"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["load","load average"],most_popular:!1},overview:"# System Load Average\n\nPlugin: proc.plugin\nModule: /proc/loadavg\n\n## Overview\n\nThe `/proc/loadavg` file provides information about the system load average.\n\nThe load average is a measure of the amount of computational work that a system performs. It is a\nrepresentation of the average system load over a period of time.\n\nThis file contains three numbers representing the system load averages for the last 1, 5, and 15 minutes,\nrespectively. It also includes the currently running processes and the total number of processes.\n\nMonitoring the load average can be used for:\n\n- **System performance**: If the load average is too high, it may indicate that your system is overloaded.\n On a system with a single CPU, if the load average is 1, it means the single CPU is fully utilized. If the\n load averages are consistently higher than the number of CPUs/cores, it may indicate that your system is\n overloaded and tasks are waiting for CPU time.\n\n- **Troubleshooting**: If the load average is unexpectedly high, it can be a sign of a problem. This could be\n due to a runaway process, a software bug, or a hardware issue.\n\n- **Capacity planning**: By monitoring the load average over time, you can understand the trends in your\n system's workload. This can help with capacity planning and scaling decisions.\n\nRemember that load average not only considers CPU usage, but also includes processes waiting for disk I/O.\nTherefore, high load averages could be due to I/O contention as well as CPU contention.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System Load Average instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n| system.active_processes | active | processes |\n\n",integration_type:"collector",id:"proc.plugin-/proc/loadavg-System_Load_Average",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/mdstat",monitored_instance:{name:"MD RAID",link:"",categories:["data-collection.linux-systems.disk-metrics"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["raid","mdadm","mdstat","raid"],most_popular:!1},overview:"# MD RAID\n\nPlugin: proc.plugin\nModule: /proc/mdstat\n\n## Overview\n\nThis integration monitors the status of MD RAID devices.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mdstat_last_collected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.disks | number of seconds since the last successful data collection |\n| [ mdstat_disks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.disks | number of devices in the down state for the ${label:device} ${label:raid_level} array. Any number > 0 indicates that the array is degraded. |\n| [ mdstat_mismatch_cnt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.mismatch_cnt | number of unsynchronized blocks for the ${label:device} ${label:raid_level} array |\n| [ mdstat_nonredundant_last_collected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.nonredundant | number of seconds since the last successful data collection |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MD RAID instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| md.health | a dimension per md array | failed disks |\n\n### Per md array\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n| raid_level | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| md.disks | inuse, down | disks |\n| md.mismatch_cnt | count | unsynchronized blocks |\n| md.status | check, resync, recovery, reshape | percent |\n| md.expected_time_until_operation_finish | finish_in | seconds |\n| md.operation_speed | speed | KiB/s |\n| md.nonredundant | available | boolean |\n\n",integration_type:"collector",id:"proc.plugin-/proc/mdstat-MD_RAID",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/meminfo",monitored_instance:{name:"Memory Usage",link:"",categories:["data-collection.linux-systems.memory-metrics"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["memory","ram","available","committed"],most_popular:!1},overview:"# Memory Usage\n\nPlugin: proc.plugin\nModule: /proc/meminfo\n\n## Overview\n\n`/proc/meminfo` provides detailed information about the system's current memory usage. It includes information\nabout different types of memory, RAM, Swap, ZSwap, HugePages, Transparent HugePages (THP), Kernel memory,\nSLAB memory, memory mappings, and more.\n\nMonitoring /proc/meminfo can be useful for:\n\n- **Performance Tuning**: Understanding your system's memory usage can help you make decisions about system\n tuning and optimization. For example, if your system is frequently low on free memory, it might benefit\n from more RAM.\n\n- **Troubleshooting**: If your system is experiencing problems, `/proc/meminfo` can provide clues about\n whether memory usage is a factor. For example, if your system is slow and cached swap is high, it could\n mean that your system is swapping out a lot of memory to disk, which can degrade performance.\n\n- **Capacity Planning**: By monitoring memory usage over time, you can understand trends and make informed\n decisions about future capacity needs.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n| [ 1hour_memory_hw_corrupted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.hwcorrupt | amount of memory corrupted due to a hardware failure |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory Usage instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, used, cached, buffers | MiB |\n| mem.available | avail | MiB |\n| mem.swap | free, used | MiB |\n| mem.swap_cached | cached | MiB |\n| mem.zswap | in-ram, on-disk | MiB |\n| mem.hwcorrupt | HardwareCorrupted | MiB |\n| mem.commited | Commited_AS | MiB |\n| mem.writeback | Dirty, Writeback, FuseWriteback, NfsWriteback, Bounce | MiB |\n| mem.kernel | Slab, KernelStack, PageTables, VmallocUsed, Percpu | MiB |\n| mem.slab | reclaimable, unreclaimable | MiB |\n| mem.hugepages | free, used, surplus, reserved | MiB |\n| mem.thp | anonymous, shmem | MiB |\n| mem.thp_details | ShmemPmdMapped, FileHugePages, FilePmdMapped | MiB |\n| mem.reclaiming | Active, Inactive, Active(anon), Inactive(anon), Active(file), Inactive(file), Unevictable, Mlocked | MiB |\n| mem.high_low | high_used, low_used, high_free, low_free | MiB |\n| mem.cma | used, free | MiB |\n| mem.directmaps | 4k, 2m, 4m, 1g | MiB |\n\n",integration_type:"collector",id:"proc.plugin-/proc/meminfo-Memory_Usage",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/dev",monitored_instance:{name:"Network interfaces",link:"",categories:["data-collection.linux-systems.network-metrics"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["network interfaces"],most_popular:!1},overview:"# Network interfaces\n\nPlugin: proc.plugin\nModule: /proc/net/dev\n\n## Overview\n\nMonitor network interface metrics about bandwidth, state, errors and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ 1m_received_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | average inbound utilization for the network interface ${label:device} over the last minute |\n| [ 1m_sent_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | average outbound utilization for the network interface ${label:device} over the last minute |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ wifi_inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ wifi_outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ 10min_fifo_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.fifo | number of FIFO errors for the network interface ${label:device} in the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Network interfaces instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| interface_type | TBD |\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.speed | speed | kilobits/s |\n| net.duplex | full, half, unknown | state |\n| net.operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| net.carrier | up, down | state |\n| net.mtu | mtu | octets |\n| net.packets | received, sent, multicast | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.fifo | receive, transmit | errors |\n| net.compressed | received, sent | packets/s |\n| net.events | frames, collisions, carrier | events/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/dev-Network_interfaces",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/ip_vs_stats",monitored_instance:{name:"IP Virtual Server",link:"",categories:["data-collection.linux-systems.network-metrics"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ip virtual server"],most_popular:!1},overview:"# IP Virtual Server\n\nPlugin: proc.plugin\nModule: /proc/net/ip_vs_stats\n\n## Overview\n\nThis integration monitors IP Virtual Server statistics\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IP Virtual Server instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipvs.sockets | connections | connections/s |\n| ipvs.packets | received, sent | packets/s |\n| ipvs.net | received, sent | kilobits/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/ip_vs_stats-IP_Virtual_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/netstat",monitored_instance:{name:"Network statistics",link:"",categories:["data-collection.linux-systems.network-metrics"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ip","udp","udplite","icmp","netstat","snmp"],most_popular:!1},overview:"# Network statistics\n\nPlugin: proc.plugin\nModule: /proc/net/netstat\n\n## Overview\n\nThis integration provides metrics from the `netstat`, `snmp` and `snmp6` modules.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_tcp_syn_queue_drops ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of SYN requests was dropped due to the full TCP SYN queue over the last minute (SYN cookies were not enabled) |\n| [ 1m_tcp_syn_queue_cookies ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of sent SYN cookies due to the full TCP SYN queue over the last minute |\n| [ 1m_tcp_accept_queue_overflows ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of overflows in the TCP accept queue over the last minute |\n| [ 1m_tcp_accept_queue_drops ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of dropped packets in the TCP accept queue over the last minute |\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ip.tcpsock | TCP connections utilization |\n| [ 1m_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Network statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ip | received, sent | kilobits/s |\n| ip.tcpmemorypressures | pressures | events/s |\n| ip.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger, failed | connections/s |\n| ip.tcpreorders | timestamp, sack, fack, reno | packets/s |\n| ip.tcpofo | inqueue, dropped, merged, pruned | packets/s |\n| ip.tcpsyncookies | received, sent, failed | packets/s |\n| ip.tcp_syn_queue | drops, cookies | packets/s |\n| ip.tcp_accept_queue | overflows, drops | packets/s |\n| ip.tcpsock | connections | active connections |\n| ip.tcppackets | received, sent | packets/s |\n| ip.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ip.tcpopens | active, passive | connections/s |\n| ip.tcphandshake | EstabResets, OutRsts, AttemptFails, SynRetrans | events/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InNoRoutes, OutNoRoutes, InHdrErrors, InAddrErrors, InTruncatedPkts, InCsumErrors | packets/s |\n| ipv4.bcast | received, sent | kilobits/s |\n| ipv4.bcastpkts | received, sent | packets/s |\n| ipv4.mcast | received, sent | kilobits/s |\n| ipv4.mcastpkts | received, sent | packets/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InDestUnreachs, OutDestUnreachs, InRedirects, OutRedirects, InEchos, OutEchos, InRouterAdvert, OutRouterAdvert, InRouterSelect, OutRouterSelect, InTimeExcds, OutTimeExcds, InParmProbs, OutParmProbs, InTimestamps, OutTimestamps, InTimestampReps, OutTimestampReps | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.udplite | received, sent | packets/s |\n| ipv4.udplite_errors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP, ECTP0, ECTP1 | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| system.ipv6 | received, sent | kilobits/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InUnknownProtos, InTooBigErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.bcast | received, sent | kilobits/s |\n| ipv6.mcast | received, sent | kilobits/s |\n| ipv6.mcastpkts | received, sent | packets/s |\n| ipv6.udppackets | received, sent | packets/s |\n| ipv6.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv6.udplitepackets | received, sent | packets/s |\n| ipv6.udpliteerrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors | events/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutPktTooBigs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.groupmemb | InQueries, OutQueries, InResponses, OutResponses, InReductions, OutReductions | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpmldv2 | received, sent | reports/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/netstat-Network_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/rpc/nfs",monitored_instance:{name:"NFS Client",link:"",categories:["data-collection.linux-systems.filesystem-metrics.nfs"],icon_filename:"nfs.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["nfs client","filesystem"],most_popular:!1},overview:"# NFS Client\n\nPlugin: proc.plugin\nModule: /proc/net/rpc/nfs\n\n## Overview\n\nThis integration provides statistics from the Linux kernel's NFS Client.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NFS Client instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nfs.net | udp, tcp | operations/s |\n| nfs.rpc | calls, retransmits, auth_refresh | calls/s |\n| nfs.proc2 | a dimension per proc2 call | calls/s |\n| nfs.proc3 | a dimension per proc3 call | calls/s |\n| nfs.proc4 | a dimension per proc4 call | calls/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/rpc/nfs-NFS_Client",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/rpc/nfsd",monitored_instance:{name:"NFS Server",link:"",categories:["data-collection.linux-systems.filesystem-metrics.nfs"],icon_filename:"nfs.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["nfs server","filesystem"],most_popular:!1},overview:"# NFS Server\n\nPlugin: proc.plugin\nModule: /proc/net/rpc/nfsd\n\n## Overview\n\nThis integration provides statistics from the Linux kernel's NFS Server.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NFS Server instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nfsd.readcache | hits, misses, nocache | reads/s |\n| nfsd.filehandles | stale | handles/s |\n| nfsd.io | read, write | kilobytes/s |\n| nfsd.threads | threads | threads |\n| nfsd.net | udp, tcp | packets/s |\n| nfsd.rpc | calls, bad_format, bad_auth | calls/s |\n| nfsd.proc2 | a dimension per proc2 call | calls/s |\n| nfsd.proc3 | a dimension per proc3 call | calls/s |\n| nfsd.proc4 | a dimension per proc4 call | calls/s |\n| nfsd.proc4ops | a dimension per proc4 operation | operations/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/rpc/nfsd-NFS_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/sctp/snmp",monitored_instance:{name:"SCTP Statistics",link:"",categories:["data-collection.linux-systems.network-metrics"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["sctp","stream control transmission protocol"],most_popular:!1},overview:"# SCTP Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sctp/snmp\n\n## Overview\n\nThis integration provides statistics about the Stream Control Transmission Protocol (SCTP).\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SCTP Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sctp.established | established | associations |\n| sctp.transitions | active, passive, aborted, shutdown | transitions/s |\n| sctp.packets | received, sent | packets/s |\n| sctp.packet_errors | invalid, checksum | packets/s |\n| sctp.fragmentation | reassembled, fragmented | packets/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/sctp/snmp-SCTP_Statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/sockstat",monitored_instance:{name:"Socket statistics",link:"",categories:["data-collection.linux-systems.network-metrics"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["sockets"],most_popular:!1},overview:"# Socket statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sockstat\n\n## Overview\n\nThis integration provides socket statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_orphans ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_orphans.conf) | ipv4.sockstat_tcp_sockets | orphan IPv4 TCP sockets utilization |\n| [ tcp_memory ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_mem.conf) | ipv4.sockstat_tcp_mem | TCP memory utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Socket statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.sockstat_sockets | used | sockets |\n| ipv4.sockstat_tcp_sockets | alloc, orphan, inuse, timewait | sockets |\n| ipv4.sockstat_tcp_mem | mem | KiB |\n| ipv4.sockstat_udp_sockets | inuse | sockets |\n| ipv4.sockstat_udp_mem | mem | sockets |\n| ipv4.sockstat_udplite_sockets | inuse | sockets |\n| ipv4.sockstat_raw_sockets | inuse | sockets |\n| ipv4.sockstat_frag_sockets | inuse | fragments |\n| ipv4.sockstat_frag_mem | mem | KiB |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/sockstat-Socket_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/sockstat6",monitored_instance:{name:"IPv6 Socket Statistics",link:"",categories:["data-collection.linux-systems.network-metrics"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ipv6 sockets"],most_popular:!1},overview:"# IPv6 Socket Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sockstat6\n\n## Overview\n\nThis integration provides IPv6 socket statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPv6 Socket Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.sockstat6_tcp_sockets | inuse | sockets |\n| ipv6.sockstat6_udp_sockets | inuse | sockets |\n| ipv6.sockstat6_udplite_sockets | inuse | sockets |\n| ipv6.sockstat6_raw_sockets | inuse | sockets |\n| ipv6.sockstat6_frag_sockets | inuse | fragments |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/sockstat6-IPv6_Socket_Statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/softnet_stat",monitored_instance:{name:"Softnet Statistics",link:"",categories:["data-collection.linux-systems.network-metrics"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["softnet"],most_popular:!1},overview:"# Softnet Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/softnet_stat\n\n## Overview\n\n`/proc/net/softnet_stat` provides statistics that relate to the handling of network packets by softirq.\n\nIt provides information about:\n\n- Total number of processed packets (`processed`).\n- Times ksoftirq ran out of quota (`dropped`).\n- Times net_rx_action was rescheduled.\n- Number of times processed all lists before quota.\n- Number of times did not process all lists due to quota.\n- Number of times net_rx_action was rescheduled for GRO (Generic Receive Offload) cells.\n- Number of times GRO cells were processed.\n\nMonitoring the /proc/net/softnet_stat file can be useful for:\n\n- **Network performance monitoring**: By tracking the total number of processed packets and how many packets\n were dropped, you can gain insights into your system's network performance.\n\n- **Troubleshooting**: If you're experiencing network-related issues, this collector can provide valuable clues.\n For instance, a high number of dropped packets may indicate a network problem.\n\n- **Capacity planning**: If your system is consistently processing near its maximum capacity of network\n packets, it might be time to consider upgrading your network infrastructure.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Softnet Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |\n\n### Per cpu core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/softnet_stat-Softnet_Statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/stat/nf_conntrack",monitored_instance:{name:"Conntrack",link:"",categories:["data-collection.linux-systems.firewall-metrics"],icon_filename:"firewall.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["connection tracking mechanism","netfilter","conntrack"],most_popular:!1},overview:"# Conntrack\n\nPlugin: proc.plugin\nModule: /proc/net/stat/nf_conntrack\n\n## Overview\n\nThis integration monitors the connection tracking mechanism of Netfilter in the Linux Kernel.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ netfilter_conntrack_full ](https://github.com/netdata/netdata/blob/master/src/health/health.d/netfilter.conf) | netfilter.conntrack_sockets | netfilter connection tracker table size utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Conntrack instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.conntrack_sockets | connections | active connections |\n| netfilter.conntrack_new | new, ignore, invalid | connections/s |\n| netfilter.conntrack_changes | inserted, deleted, delete_list | changes/s |\n| netfilter.conntrack_expect | created, deleted, new | expectations/s |\n| netfilter.conntrack_search | searched, restarted, found | searches/s |\n| netfilter.conntrack_errors | icmp_error, error_failed, drop, early_drop | events/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/stat/nf_conntrack-Conntrack",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/stat/synproxy",monitored_instance:{name:"Synproxy",link:"",categories:["data-collection.linux-systems.firewall-metrics"],icon_filename:"firewall.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["synproxy"],most_popular:!1},overview:"# Synproxy\n\nPlugin: proc.plugin\nModule: /proc/net/stat/synproxy\n\n## Overview\n\nThis integration provides statistics about the Synproxy netfilter module.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Synproxy instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.synproxy_syn_received | received | packets/s |\n| netfilter.synproxy_conn_reopened | reopened | connections/s |\n| netfilter.synproxy_cookies | valid, invalid, retransmits | cookies/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/stat/synproxy-Synproxy",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/wireless",monitored_instance:{name:"Wireless network interfaces",link:"",categories:["data-collection.linux-systems.network-metrics"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["wireless devices"],most_popular:!1},overview:"# Wireless network interfaces\n\nPlugin: proc.plugin\nModule: /proc/net/wireless\n\n## Overview\n\nMonitor wireless devices with metrics about status, link quality, signal level, noise level and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireless.status | status | status |\n| wireless.link_quality | link_quality | value |\n| wireless.signal_level | signal_level | dBm |\n| wireless.noise_level | noise_level | dBm |\n| wireless.discarded_packets | nwid, crypt, frag, retry, misc | packets/s |\n| wireless.missed_beacons | missed_beacons | frames/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/wireless-Wireless_network_interfaces",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/pagetypeinfo",monitored_instance:{name:"Page types",link:"",categories:["data-collection.linux-systems.memory-metrics"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["memory page types"],most_popular:!1},overview:"# Page types\n\nPlugin: proc.plugin\nModule: /proc/pagetypeinfo\n\n## Overview\n\nThis integration provides metrics about the system's memory page types\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Page types instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pagetype_global | a dimension per pagesize | B |\n\n### Per node, zone, type\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| node_id | TBD |\n| node_zone | TBD |\n| node_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pagetype | a dimension per pagesize | B |\n\n",integration_type:"collector",id:"proc.plugin-/proc/pagetypeinfo-Page_types",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/pressure",monitored_instance:{name:"Pressure Stall Information",link:"",categories:["data-collection.linux-systems.pressure-metrics"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["pressure"],most_popular:!1},overview:"# Pressure Stall Information\n\nPlugin: proc.plugin\nModule: /proc/pressure\n\n## Overview\n\nIntroduced in Linux kernel 4.20, `/proc/pressure` provides information about system pressure stall information\n(PSI). PSI is a feature that allows the system to track the amount of time the system is stalled due to\nresource contention, such as CPU, memory, or I/O.\n\nThe collectors monitored 3 separate files for CPU, memory, and I/O:\n\n- **cpu**: Tracks the amount of time tasks are stalled due to CPU contention.\n- **memory**: Tracks the amount of time tasks are stalled due to memory contention.\n- **io**: Tracks the amount of time tasks are stalled due to I/O contention.\n- **irq**: Tracks the amount of time tasks are stalled due to IRQ contention.\n\nEach of them provides metrics for stall time over the last 10 seconds, 1 minute, 5 minutes, and 15 minutes.\n\nMonitoring the /proc/pressure files can provide important insights into system performance and capacity planning:\n\n- **Identifying resource contention**: If these metrics are consistently high, it indicates that tasks are\n frequently being stalled due to lack of resources, which can significantly degrade system performance.\n\n- **Troubleshooting performance issues**: If a system is experiencing performance issues, these metrics can\n help identify whether resource contention is the cause.\n\n- **Capacity planning**: By monitoring these metrics over time, you can understand trends in resource\n utilization and make informed decisions about when to add more resources to your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pressure Stall Information instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu_some_pressure | some10, some60, some300 | percentage |\n| system.cpu_some_pressure_stall_time | time | ms |\n| system.cpu_full_pressure | some10, some60, some300 | percentage |\n| system.cpu_full_pressure_stall_time | time | ms |\n| system.memory_some_pressure | some10, some60, some300 | percentage |\n| system.memory_some_pressure_stall_time | time | ms |\n| system.memory_full_pressure | some10, some60, some300 | percentage |\n| system.memory_full_pressure_stall_time | time | ms |\n| system.io_some_pressure | some10, some60, some300 | percentage |\n| system.io_some_pressure_stall_time | time | ms |\n| system.io_full_pressure | some10, some60, some300 | percentage |\n| system.io_full_pressure_stall_time | time | ms |\n\n",integration_type:"collector",id:"proc.plugin-/proc/pressure-Pressure_Stall_Information",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/softirqs",monitored_instance:{name:"SoftIRQ statistics",link:"",categories:["data-collection.linux-systems.cpu-metrics"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["softirqs","interrupts"],most_popular:!1},overview:"# SoftIRQ statistics\n\nPlugin: proc.plugin\nModule: /proc/softirqs\n\n## Overview\n\nIn the Linux kernel, handling of hardware interrupts is split into two halves: the top half and the bottom half.\nThe top half is the routine that responds immediately to an interrupt, while the bottom half is deferred to be processed later.\n\nSoftirqs are a mechanism in the Linux kernel used to handle the bottom halves of interrupts, which can be\ndeferred and processed later in a context where it's safe to enable interrupts.\n\nThe actual work of handling the interrupt is offloaded to a softirq and executed later when the system\ndecides it's a good time to process them. This helps to keep the system responsive by not blocking the top\nhalf for too long, which could lead to missed interrupts.\n\nMonitoring `/proc/softirqs` is useful for:\n\n- **Performance tuning**: A high rate of softirqs could indicate a performance issue. For instance, a high\n rate of network softirqs (`NET_RX` and `NET_TX`) could indicate a network performance issue.\n\n- **Troubleshooting**: If a system is behaving unexpectedly, checking the softirqs could provide clues about\n what is going on. For example, a sudden increase in block device softirqs (BLOCK) might indicate a problem\n with a disk.\n\n- **Understanding system behavior**: Knowing what types of softirqs are happening can help you understand what\n your system is doing, particularly in terms of how it's interacting with hardware and how it's handling\n interrupts.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SoftIRQ statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirqs | a dimension per softirq | softirqs/s |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softirqs | a dimension per softirq | softirqs/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/softirqs-SoftIRQ_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/spl/kstat/zfs",monitored_instance:{name:"ZFS Pools",link:"",categories:["data-collection.linux-systems.filesystem-metrics.zfs"],icon_filename:"filesystem.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["zfs pools","pools","zfs","filesystem"],most_popular:!1},overview:"# ZFS Pools\n\nPlugin: proc.plugin\nModule: /proc/spl/kstat/zfs\n\n## Overview\n\nThis integration provides metrics about the state of ZFS pools.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_pool_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is degraded |\n| [ zfs_pool_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is faulted or unavail |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs pool\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pool | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfspool.state | online, degraded, faulted, offline, removed, unavail, suspended | boolean |\n\n",integration_type:"collector",id:"proc.plugin-/proc/spl/kstat/zfs-ZFS_Pools",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/spl/kstat/zfs/arcstats",monitored_instance:{name:"ZFS Adaptive Replacement Cache",link:"",categories:["data-collection.linux-systems.filesystem-metrics.zfs"],icon_filename:"filesystem.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["zfs arc","arc","zfs","filesystem"],most_popular:!1},overview:"# ZFS Adaptive Replacement Cache\n\nPlugin: proc.plugin\nModule: /proc/spl/kstat/zfs/arcstats\n\n## Overview\n\nThis integration monitors ZFS Adadptive Replacement Cache (ARC) statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZFS Adaptive Replacement Cache instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | direct, throttled, indirect | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n\n",integration_type:"collector",id:"proc.plugin-/proc/spl/kstat/zfs/arcstats-ZFS_Adaptive_Replacement_Cache",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/stat",monitored_instance:{name:"System statistics",link:"",categories:["data-collection.linux-systems.system-metrics"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["cpu utilization","process counts"],most_popular:!1},overview:"# System statistics\n\nPlugin: proc.plugin\nModule: /proc/stat\n\n## Overview\n\nCPU utilization, states and frequencies and key Linux system performance metrics.\n\nThe `/proc/stat` file provides various types of system statistics:\n\n- The overall system CPU usage statistics\n- Per CPU core statistics\n- The total context switching of the system\n- The total number of processes running\n- The total CPU interrupts\n- The total CPU softirqs\n\nThe collector also reads:\n\n- `/proc/schedstat` for statistics about the process scheduler in the Linux kernel.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/core_throttle_count` to get the count of thermal throttling events for a specific CPU core on Linux systems.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/package_throttle_count` to get the count of thermal throttling events for a specific CPU package on a Linux system.\n- `/sys/devices/system/cpu/[X]/cpufreq/scaling_cur_freq` to get the current operating frequency of a specific CPU core.\n- `/sys/devices/system/cpu/[X]/cpufreq/stats/time_in_state` to get the amount of time the CPU has spent in each of its available frequency states.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/name` to get the names of the idle states for each CPU core in a Linux system.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/time` to get the total time each specific CPU core has spent in each idle state since the system was started.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector auto-detects all metrics. No configuration is needed.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe collector disables cpu frequency and idle state monitoring when there are more than 128 CPU cores available.\n\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| system.intr | interrupts | interrupts/s |\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n| system.processes | running, blocked | processes |\n| cpu.core_throttling | a dimension per cpu core | events/s |\n| cpu.package_throttling | a dimension per package | events/s |\n| cpu.cpufreq | a dimension per cpu core | MHz |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| cpuidle.cpu_cstate_residency_time | a dimension per c-state | percentage |\n\n",integration_type:"collector",id:"proc.plugin-/proc/stat-System_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/sys/kernel/random/entropy_avail",monitored_instance:{name:"Entropy",link:"",categories:["data-collection.linux-systems.system-metrics"],icon_filename:"syslog.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["entropy"],most_popular:!1},overview:"# Entropy\n\nPlugin: proc.plugin\nModule: /proc/sys/kernel/random/entropy_avail\n\n## Overview\n\nEntropy, a measure of the randomness or unpredictability of data.\n\nIn the context of cryptography, entropy is used to generate random numbers or keys that are essential for\nsecure communication and encryption. Without a good source of entropy, cryptographic protocols can become\nvulnerable to attacks that exploit the predictability of the generated keys.\n\nIn most operating systems, entropy is generated by collecting random events from various sources, such as\nhardware interrupts, mouse movements, keyboard presses, and disk activity. These events are fed into a pool\nof entropy, which is then used to generate random numbers when needed.\n\nThe `/dev/random` device in Linux is one such source of entropy, and it provides an interface for programs\nto access the pool of entropy. When a program requests random numbers, it reads from the `/dev/random` device,\nwhich blocks until enough entropy is available to generate the requested numbers. This ensures that the\ngenerated numbers are truly random and not predictable. \n\nHowever, if the pool of entropy gets depleted, the `/dev/random` device may block indefinitely, causing\nprograms that rely on random numbers to slow down or even freeze. This is especially problematic for\ncryptographic protocols that require a continuous stream of random numbers, such as SSL/TLS and SSH.\n\nTo avoid this issue, some systems use a hardware random number generator (RNG) to generate high-quality\nentropy. A hardware RNG generates random numbers by measuring physical phenomena, such as thermal noise or\nradioactive decay. These sources of randomness are considered to be more reliable and unpredictable than\nsoftware-based sources.\n\nOne such hardware RNG is the Trusted Platform Module (TPM), which is a dedicated hardware chip that is used\nfor cryptographic operations and secure boot. The TPM contains a built-in hardware RNG that generates\nhigh-quality entropy, which can be used to seed the pool of entropy in the operating system.\n\nAlternatively, software-based solutions such as `Haveged` can be used to generate additional entropy by\nexploiting sources of randomness in the system, such as CPU utilization and network traffic. These solutions\ncan help to mitigate the risk of entropy depletion, but they may not be as reliable as hardware-based solutions.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ lowest_entropy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/entropy.conf) | system.entropy | minimum number of bits of entropy available for the kernel\u2019s random number generator |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Entropy instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.entropy | entropy | entropy |\n\n",integration_type:"collector",id:"proc.plugin-/proc/sys/kernel/random/entropy_avail-Entropy",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/uptime",monitored_instance:{name:"System Uptime",link:"",categories:["data-collection.linux-systems.system-metrics"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["uptime"],most_popular:!1},overview:"# System Uptime\n\nPlugin: proc.plugin\nModule: /proc/uptime\n\n## Overview\n\nThe amount of time the system has been up (running).\n\nUptime is a critical aspect of overall system performance:\n\n- **Availability**: Uptime monitoring can show whether a server is consistently available or experiences frequent downtimes.\n- **Performance Monitoring**: While server uptime alone doesn't provide detailed performance data, analyzing the duration and frequency of downtimes can help identify patterns or trends.\n- **Proactive problem detection**: If server uptime monitoring reveals unexpected downtimes or a decreasing uptime trend, it can serve as an early warning sign of potential problems.\n- **Root cause analysis**: When investigating server downtime, the uptime metric alone may not provide enough information to pinpoint the exact cause.\n- **Load balancing**: Uptime data can indirectly indicate load balancing issues if certain servers have significantly lower uptimes than others.\n- **Optimize maintenance efforts**: Servers with consistently low uptimes or frequent downtimes may require more attention.\n- **Compliance requirements**: Server uptime data can be used to demonstrate compliance with regulatory requirements or SLAs that mandate a minimum level of server availability.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System Uptime instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"proc.plugin-/proc/uptime-System_Uptime",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/vmstat",monitored_instance:{name:"Memory Statistics",link:"",categories:["data-collection.linux-systems.memory-metrics"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["swap","page faults","oom","numa"],most_popular:!1},overview:"# Memory Statistics\n\nPlugin: proc.plugin\nModule: /proc/vmstat\n\n## Overview\n\nLinux Virtual memory subsystem.\n\nInformation about memory management, indicating how effectively the kernel allocates and frees\nmemory resources in response to system demands.\n\nMonitors page faults, which occur when a process requests a portion of its memory that isn't\nimmediately available. Monitoring these events can help diagnose inefficiencies in memory management and\nprovide insights into application behavior.\n\nTracks swapping activity \u2014 a vital aspect of memory management where the kernel moves data from RAM to\nswap space, and vice versa, based on memory demand and usage. It also monitors the utilization of zswap,\na compressed cache for swap pages, and provides insights into its usage and performance implications.\n\nIn the context of virtualized environments, it tracks the ballooning mechanism which is used to balance\nmemory resources between host and guest systems.\n\nFor systems using NUMA architecture, it provides insights into the local and remote memory accesses, which\ncan impact the performance based on the memory access times.\n\nThe collector also watches for 'Out of Memory' kills, a drastic measure taken by the system when it runs out\nof memory resources.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n| [ oom_kill ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.oom_kill | number of out of memory kills in the last 30 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | in, out | KiB/s |\n| system.pgpgio | in, out | KiB/s |\n| system.pgfaults | minor, major | faults/s |\n| mem.balloon | inflate, deflate, migrate | KiB/s |\n| mem.zswapio | in, out | KiB/s |\n| mem.ksm_cow | swapin, write | KiB/s |\n| mem.thp_faults | alloc, fallback, fallback_charge | events/s |\n| mem.thp_file | alloc, fallback, mapped, fallback_charge | events/s |\n| mem.thp_zero | alloc, failed | events/s |\n| mem.thp_collapse | alloc, failed | events/s |\n| mem.thp_split | split, failed, split_pmd, split_deferred | events/s |\n| mem.thp_swapout | swapout, fallback | events/s |\n| mem.thp_compact | success, fail, stall | events/s |\n| mem.oom_kill | kills | kills/s |\n| mem.numa | local, foreign, interleave, other, pte_updates, huge_pte_updates, hint_faults, hint_faults_local, pages_migrated | events/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/vmstat-Memory_Statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/block/zram",monitored_instance:{name:"ZRAM",link:"",categories:["data-collection.linux-systems.memory-metrics"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["zram"],most_popular:!1},overview:"# ZRAM\n\nPlugin: proc.plugin\nModule: /sys/block/zram\n\n## Overview\n\nzRAM, or compressed RAM, is a block device that uses a portion of your system's RAM as a block device.\nThe data written to this block device is compressed and stored in memory.\n\nThe collectors provides information about the operation and the effectiveness of zRAM on your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zram device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.zram_usage | compressed, metadata | MiB |\n| mem.zram_savings | savings, original | MiB |\n| mem.zram_ratio | ratio | ratio |\n| mem.zram_efficiency | percent | percentage |\n\n",integration_type:"collector",id:"proc.plugin-/sys/block/zram-ZRAM",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/class/drm",monitored_instance:{name:"AMD GPU",link:"https://www.amd.com",categories:["data-collection.hardware-devices-and-sensors"],icon_filename:"amd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["amd","gpu","hardware"],most_popular:!1},overview:"# AMD GPU\n\nPlugin: proc.plugin\nModule: /sys/class/drm\n\n## Overview\n\nThis integration monitors AMD GPU metrics, such as utilization, clock frequency and memory usage.\n\nIt reads `/sys/class/drm` to collect metrics for every AMD GPU card instance it encounters.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| product_name | GPU product name (e.g. AMD RX 6600) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| amdgpu.gpu_utilization | utilization | percentage |\n| amdgpu.gpu_mem_utilization | utilization | percentage |\n| amdgpu.gpu_clk_frequency | frequency | MHz |\n| amdgpu.gpu_mem_clk_frequency | frequency | MHz |\n| amdgpu.gpu_mem_vram_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_vram_usage | free, used | bytes |\n| amdgpu.gpu_mem_vis_vram_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_vis_vram_usage | free, used | bytes |\n| amdgpu.gpu_mem_gtt_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_gtt_usage | free, used | bytes |\n\n",integration_type:"collector",id:"proc.plugin-/sys/class/drm-AMD_GPU",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/class/infiniband",monitored_instance:{name:"InfiniBand",link:"",categories:["data-collection.linux-systems.network-metrics"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["infiniband","rdma"],most_popular:!1},overview:"# InfiniBand\n\nPlugin: proc.plugin\nModule: /sys/class/infiniband\n\n## Overview\n\nThis integration monitors InfiniBand network inteface statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per infiniband port\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ib.bytes | Received, Sent | kilobits/s |\n| ib.packets | Received, Sent, Mcast_rcvd, Mcast_sent, Ucast_rcvd, Ucast_sent | packets/s |\n| ib.errors | Pkts_malformated, Pkts_rcvd_discarded, Pkts_sent_discarded, Tick_Wait_to_send, Pkts_missed_resource, Buffer_overrun, Link_Downed, Link_recovered, Link_integrity_err, Link_minor_errors, Pkts_rcvd_with_EBP, Pkts_rcvd_discarded_by_switch, Pkts_sent_discarded_by_switch | errors/s |\n| ib.hwerrors | Duplicated_packets, Pkt_Seq_Num_gap, Ack_timer_expired, Drop_missing_buffer, Drop_out_of_sequence, NAK_sequence_rcvd, CQE_err_Req, CQE_err_Resp, CQE_Flushed_err_Req, CQE_Flushed_err_Resp, Remote_access_err_Req, Remote_access_err_Resp, Remote_invalid_req, Local_length_err_Resp, RNR_NAK_Packets, CNP_Pkts_ignored, RoCE_ICRC_Errors | errors/s |\n| ib.hwpackets | RoCEv2_Congestion_sent, RoCEv2_Congestion_rcvd, IB_Congestion_handled, ATOMIC_req_rcvd, Connection_req_rcvd, Read_req_rcvd, Write_req_rcvd, RoCE_retrans_adaptive, RoCE_retrans_timeout, RoCE_slow_restart, RoCE_slow_restart_congestion, RoCE_slow_restart_count | packets/s |\n\n",integration_type:"collector",id:"proc.plugin-/sys/class/infiniband-InfiniBand",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/class/power_supply",monitored_instance:{name:"Power Supply",link:"",categories:["data-collection.linux-systems.power-supply-metrics"],icon_filename:"powersupply.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["psu","power supply"],most_popular:!1},overview:"# Power Supply\n\nPlugin: proc.plugin\nModule: /sys/class/power_supply\n\n## Overview\n\nThis integration monitors Power supply metrics, such as battery status, AC power status and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ linux_power_supply_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/linux_power_supply.conf) | powersupply.capacity | percentage of remaining power supply capacity |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per power device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powersupply.capacity | capacity | percentage |\n| powersupply.power | power | W |\n| powersupply.charge | empty_design, empty, now, full, full_design | Ah |\n| powersupply.energy | empty_design, empty, now, full, full_design | Wh |\n| powersupply.voltage | min_design, min, now, max, max_design | V |\n\n",integration_type:"collector",id:"proc.plugin-/sys/class/power_supply-Power_Supply",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/devices/system/edac/mc",monitored_instance:{name:"Memory modules (DIMMs)",link:"",categories:["data-collection.linux-systems.memory-metrics"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["edac","ecc","dimm","ram","hardware"],most_popular:!1},overview:"# Memory modules (DIMMs)\n\nPlugin: proc.plugin\nModule: /sys/devices/system/edac/mc\n\n## Overview\n\nThe Error Detection and Correction (EDAC) subsystem is detecting and reporting errors in the system's memory,\nprimarily ECC (Error-Correcting Code) memory errors.\n\nThe collector provides data for:\n\n- Per memory controller (MC): correctable and uncorrectable errors. These can be of 2 kinds:\n - errors related to a DIMM\n - errors that cannot be associated with a DIMM\n\n- Per memory DIMM: correctable and uncorrectable errors. There are 2 kinds:\n - memory controllers that can identify the physical DIMMS and report errors directly for them,\n - memory controllers that report errors for memory address ranges that can be linked to dimms.\n In this case the DIMMS reported may be more than the physical DIMMS installed.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ecc_memory_mc_noinfo_correctable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_errors | memory controller ${label:controller} ECC correctable errors (unknown DIMM slot) |\n| [ ecc_memory_mc_noinfo_uncorrectable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_errors | memory controller ${label:controller} ECC uncorrectable errors (unknown DIMM slot) |\n| [ ecc_memory_dimm_correctable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_dimm_errors | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC correctable errors |\n| [ ecc_memory_dimm_uncorrectable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_dimm_errors | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC uncorrectable errors |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per memory controller\n\nThese metrics refer to the memory controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |\n| mc_name | Memory controller type. |\n| size_mb | The amount of memory in megabytes that this memory controller manages. |\n| max_location | Last available memory slot in this memory controller. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.edac_mc_errors | correctable, uncorrectable, correctable_noinfo, uncorrectable_noinfo | errors |\n\n### Per memory module\n\nThese metrics refer to the memory module (or rank, [depends on the memory controller](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#f5)).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |\n| dimm | [dimmX or rankX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#dimmx-or-rankx-directories) directory name of this memory module. |\n| dimm_dev_type | Type of DRAM device used in this memory module. For example, x1, x2, x4, x8. |\n| dimm_edac_mode | Used type of error detection and correction. For example, S4ECD4ED would mean a Chipkill with x4 DRAM. |\n| dimm_label | Label assigned to this memory module. |\n| dimm_location | Location of the memory module. |\n| dimm_mem_type | Type of the memory module. |\n| size | The amount of memory in megabytes that this memory module manages. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.edac_mc_errors | correctable, uncorrectable | errors |\n\n",integration_type:"collector",id:"proc.plugin-/sys/devices/system/edac/mc-Memory_modules_(DIMMs)",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/devices/system/node",monitored_instance:{name:"Non-Uniform Memory Access",link:"",categories:["data-collection.linux-systems.memory-metrics"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["numa"],most_popular:!1},overview:"# Non-Uniform Memory Access\n\nPlugin: proc.plugin\nModule: /sys/devices/system/node\n\n## Overview\n\nInformation about NUMA (Non-Uniform Memory Access) nodes on the system.\n\nNUMA is a method of configuring a cluster of microprocessor in a multiprocessing system so that they can\nshare memory locally, improving performance and the ability of the system to be expanded. NUMA is used in a\nsymmetric multiprocessing (SMP) system.\n\nIn a NUMA system, processors, memory, and I/O devices are grouped together into cells, also known as nodes.\nEach node has its own memory and set of I/O devices, and one or more processors. While a processor can access\nmemory in any of the nodes, it does so faster when accessing memory within its own node.\n\nThe collector provides statistics on memory allocations for processes running on the NUMA nodes, revealing the\nefficiency of memory allocations in multi-node systems.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per numa node\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| numa_node | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.numa_nodes | hit, miss, local, foreign, interleave, other | events/s |\n\n",integration_type:"collector",id:"proc.plugin-/sys/devices/system/node-Non-Uniform_Memory_Access",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/fs/btrfs",monitored_instance:{name:"BTRFS",link:"",categories:["data-collection.linux-systems.filesystem-metrics.btrfs"],icon_filename:"filesystem.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["btrfs","filesystem"],most_popular:!1},overview:"# BTRFS\n\nPlugin: proc.plugin\nModule: /sys/fs/btrfs\n\n## Overview\n\nThis integration provides usage and error statistics from the BTRFS filesystem.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ btrfs_allocated ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.disk | percentage of allocated BTRFS physical disk space |\n| [ btrfs_data ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.data | utilization of BTRFS data space |\n| [ btrfs_metadata ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.metadata | utilization of BTRFS metadata space |\n| [ btrfs_system ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.system | utilization of BTRFS system space |\n| [ btrfs_device_read_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS read errors |\n| [ btrfs_device_write_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS write errors |\n| [ btrfs_device_flush_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS flush errors |\n| [ btrfs_device_corruption_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS corruption errors |\n| [ btrfs_device_generation_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS generation errors |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per btrfs filesystem\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| filesystem_uuid | TBD |\n| filesystem_label | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| btrfs.disk | unallocated, data_free, data_used, meta_free, meta_used, sys_free, sys_used | MiB |\n| btrfs.data | free, used | MiB |\n| btrfs.metadata | free, used, reserved | MiB |\n| btrfs.system | free, used | MiB |\n| btrfs.commits | commits | commits |\n| btrfs.commits_perc_time | commits | percentage |\n| btrfs.commit_timings | last, max | ms |\n\n### Per btrfs device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device_id | TBD |\n| filesystem_uuid | TBD |\n| filesystem_label | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| btrfs.device_errors | write_errs, read_errs, flush_errs, corruption_errs, generation_errs | errors |\n\n",integration_type:"collector",id:"proc.plugin-/sys/fs/btrfs-BTRFS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/kernel/mm/ksm",monitored_instance:{name:"Kernel Same-Page Merging",link:"",categories:["data-collection.linux-systems.memory-metrics"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ksm","samepage","merging"],most_popular:!1},overview:"# Kernel Same-Page Merging\n\nPlugin: proc.plugin\nModule: /sys/kernel/mm/ksm\n\n## Overview\n\nKernel Samepage Merging (KSM) is a memory-saving feature in Linux that enables the kernel to examine the\nmemory of different processes and identify identical pages. It then merges these identical pages into a\nsingle page that the processes share. This is particularly useful for virtualization, where multiple virtual\nmachines might be running the same operating system or applications and have many identical pages.\n\nThe collector provides information about the operation and effectiveness of KSM on your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kernel Same-Page Merging instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.ksm | shared, unshared, sharing, volatile | MiB |\n| mem.ksm_savings | savings, offered | MiB |\n| mem.ksm_ratios | savings | percentage |\n\n",integration_type:"collector",id:"proc.plugin-/sys/kernel/mm/ksm-Kernel_Same-Page_Merging",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"ipc",monitored_instance:{name:"Inter Process Communication",link:"",categories:["data-collection.linux-systems.ipc-metrics"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ipc","semaphores","shared memory"],most_popular:!1},overview:"# Inter Process Communication\n\nPlugin: proc.plugin\nModule: ipc\n\n## Overview\n\nIPC stands for Inter-Process Communication. It is a mechanism which allows processes to communicate with each\nother and synchronize their actions.\n\nThis collector exposes information about:\n\n- Message Queues: This allows messages to be exchanged between processes. It's a more flexible method that\n allows messages to be placed onto a queue and read at a later time.\n\n- Shared Memory: This method allows for the fastest form of IPC because processes can exchange data by\n reading/writing into shared memory segments.\n\n- Semaphores: They are used to synchronize the operations performed by independent processes. So, if multiple\n processes are trying to access a single shared resource, semaphores can ensure that only one process\n accesses the resource at a given time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Inter Process Communication instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n| system.message_queue_message | a dimension per queue | messages |\n| system.message_queue_bytes | a dimension per queue | bytes |\n| system.shared_memory_segments | segments | segments |\n| system.shared_memory_bytes | bytes | bytes |\n\n",integration_type:"collector",id:"proc.plugin-ipc-Inter_Process_Communication",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"alarms",monitored_instance:{name:"Netdata Agent alarms",link:"/src/collectors/python.d.plugin/alarms/README.md",categories:["data-collection.other"],icon_filename:""},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["alarms","netdata"],most_popular:!1},overview:"# Netdata Agent alarms\n\nPlugin: python.d.plugin\nModule: alarms\n\n## Overview\n\nThis collector creates an 'Alarms' menu with one line plot of `alarms.status`.\n\n\nAlarm status is read from the Netdata agent rest api [`/api/v1/alarms?all`](https://learn.netdata.cloud/api#/alerts/alerts1).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances of Netdata running on localhost, and gathers metrics from `http://127.0.0.1:19999/api/v1/alarms?all`. `CLEAR` status is mapped to `0`, `WARNING` to `1` and `CRITICAL` to `2`. Also, by default all alarms produced will be monitored.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/alarms.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/alarms.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent. | http://127.0.0.1:19999/api/v1/alarms?all | yes |\n| status_map | Mapping of alarm status to integer number that will be the metric value collected. | {"CLEAR": 0, "WARNING": 1, "CRITICAL": 2} | yes |\n| collect_alarm_values | set to true to include a chart with calculated alarm values over time. | no | yes |\n| alarm_status_chart_type | define the type of chart for plotting status over time e.g. \'line\' or \'stacked\'. | line | yes |\n| alarm_contains_words | A "," separated list of words you want to filter alarm names for. For example \'cpu,load\' would filter for only alarms with "cpu" or "load" in alarm name. Default includes all. | | yes |\n| alarm_excludes_words | A "," separated list of words you want to exclude based on alarm name. For example \'cpu,load\' would exclude all alarms with "cpu" or "load" in alarm name. Default excludes None. | | yes |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n url: \'http://127.0.0.1:19999/api/v1/alarms?all\'\n\n```\n##### Advanced\n\nAn advanced example configuration with multiple jobs collecting different subsets of alarms for plotting on different charts.\n"ML" job will collect status and values for all alarms with "ml_" in the name. Default job will collect status for all other alarms.\n\n\n{% details open=true summary="Config" %}\n```yaml\nML:\n update_every: 5\n url: \'http://127.0.0.1:19999/api/v1/alarms?all\'\n status_map:\n CLEAR: 0\n WARNING: 1\n CRITICAL: 2\n collect_alarm_values: true\n alarm_status_chart_type: \'stacked\'\n alarm_contains_words: \'ml_\'\n\nDefault:\n update_every: 5\n url: \'http://127.0.0.1:19999/api/v1/alarms?all\'\n status_map:\n CLEAR: 0\n WARNING: 1\n CRITICAL: 2\n collect_alarm_values: false\n alarm_status_chart_type: \'stacked\'\n alarm_excludes_words: \'ml_\'\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `alarms` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin alarms debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netdata Agent alarms instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| alarms.status | a dimension per alarm representing the latest status of the alarm. | status |\n| alarms.values | a dimension per alarm representing the latest collected value of the alarm. | value |\n\n",integration_type:"collector",id:"python.d.plugin-alarms-Netdata_Agent_alarms",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/alarms/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"am2320",monitored_instance:{name:"AM2320",link:"https://learn.adafruit.com/adafruit-am2320-temperature-humidity-i2c-sensor/overview",categories:["data-collection.hardware-devices-and-sensors"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["temperature","am2320","sensor","humidity"],most_popular:!1},overview:"# AM2320\n\nPlugin: python.d.plugin\nModule: am2320\n\n## Overview\n\nThis collector monitors AM2320 sensor metrics about temperature and humidity.\n\nIt retrieves temperature and humidity values by contacting an AM2320 sensor over i2c.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming prerequisites are met, the collector will try to connect to the sensor via i2c\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Sensor connection to a Raspberry Pi\n\nConnect the am2320 to the Raspberry Pi I2C pins\n\nRaspberry Pi 3B/4 Pins:\n\n- Board 3.3V (pin 1) to sensor VIN (pin 1)\n- Board SDA (pin 3) to sensor SDA (pin 2)\n- Board GND (pin 6) to sensor GND (pin 3)\n- Board SCL (pin 5) to sensor SCL (pin 4)\n\nYou may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.\n\n\n#### Software requirements\n\nInstall the Adafruit Circuit Python AM2320 library:\n\n`sudo pip3 install adafruit-circuitpython-am2320`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/am2320.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/am2320.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Local sensor\n\nA basic JOB configuration\n\n```yaml\nlocal_sensor:\n name: 'Local AM2320'\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `am2320` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin am2320 debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per AM2320 instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| am2320.temperature | temperature | celsius |\n| am2320.humidity | humidity | percentage |\n\n",integration_type:"collector",id:"python.d.plugin-am2320-AM2320",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/am2320/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"beanstalk",monitored_instance:{name:"Beanstalk",link:"https://beanstalkd.github.io/",categories:["data-collection.message-brokers"],icon_filename:"beanstalk.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["beanstalk","beanstalkd","message"],most_popular:!1},overview:"# Beanstalk\n\nPlugin: python.d.plugin\nModule: beanstalk\n\n## Overview\n\nMonitor Beanstalk metrics to enhance job queueing and processing efficiency. Track job rates, processing times, and queue lengths for better task management.\n\nThe collector uses the `beanstalkc` python module to connect to a `beanstalkd` service and gather metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, module will attempt to connect to beanstalkd on 127.0.0.1:11300 address.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### beanstalkc python module\n\nThe collector requires the `beanstalkc` python module to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/beanstalk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/beanstalk.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | IP or URL to a beanstalk service. | 127.0.0.1 | no |\n| port | Port to the IP or URL to a beanstalk service. | 11300 | no |\n\n{% /details %}\n#### Examples\n\n##### Remote beanstalk server\n\nA basic remote beanstalk server\n\n```yaml\nremote:\n name: 'beanstalk'\n host: '1.2.3.4'\n port: 11300\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local_beanstalk'\n host: '127.0.0.1'\n port: 11300\n\nremote_job:\n name: 'remote_beanstalk'\n host: '192.0.2.1'\n port: 113000\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `beanstalk` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin beanstalk debug trace\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Beanstalk instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.cpu_usage | user, system | cpu time |\n| beanstalk.jobs_rate | total, timeouts | jobs/s |\n| beanstalk.connections_rate | connections | connections/s |\n| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s |\n| beanstalk.current_tubes | tubes | tubes |\n| beanstalk.current_jobs | urgent, ready, reserved, delayed, buried | jobs |\n| beanstalk.current_connections | written, producers, workers, waiting | connections |\n| beanstalk.binlog | written, migrated | records/s |\n| beanstalk.uptime | uptime | seconds |\n\n### Per tube\n\nMetrics related to Beanstalk tubes. Each tube produces its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.jobs_rate | jobs | jobs/s |\n| beanstalk.jobs | urgent, ready, reserved, delayed, buried | jobs |\n| beanstalk.connections | using, waiting, watching | connections |\n| beanstalk.commands | deletes, pauses | commands/s |\n| beanstalk.pause | since, left | seconds |\n\n",integration_type:"collector",id:"python.d.plugin-beanstalk-Beanstalk",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/beanstalk/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"boinc",monitored_instance:{name:"BOINC",link:"https://boinc.berkeley.edu/",categories:["data-collection.distributed-computing-systems"],icon_filename:"bolt.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["boinc","distributed"],most_popular:!1},overview:"# BOINC\n\nPlugin: python.d.plugin\nModule: boinc\n\n## Overview\n\nThis collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.\n\nIt uses the same RPC interface that the BOINC monitoring GUI does.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Boinc RPC interface\n\nBOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/boinc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/boinc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| hostname | Define a hostname where boinc is running. | localhost | no |\n| port | The port of boinc RPC interface. | | no |\n| password | Provide a password to connect to a boinc RPC interface. | | no |\n\n{% /details %}\n#### Examples\n\n##### Configuration of a remote boinc instance\n\nA basic JOB configuration for a remote boinc instance\n\n```yaml\nremote:\n hostname: '1.2.3.4'\n port: 1234\n password: 'some-password'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 1234\n password: 'some-password'\n\nremote_job:\n name: 'remote'\n host: '192.0.2.1'\n port: 1234\n password: some-other-password\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin boinc debug trace\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |\n| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |\n| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of compute errors over the last 10 minutes |\n| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of failed uploads over the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per BOINC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| boinc.tasks | Total, Active | tasks |\n| boinc.states | New, Downloading, Ready to Run, Compute Errors, Uploading, Uploaded, Aborted, Failed Uploads | tasks |\n| boinc.sched | Uninitialized, Preempted, Scheduled | tasks |\n| boinc.process | Uninitialized, Executing, Suspended, Aborted, Quit, Copy Pending | tasks |\n\n",integration_type:"collector",id:"python.d.plugin-boinc-BOINC",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/boinc/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"ceph",monitored_instance:{name:"Ceph",link:"https://ceph.io/",categories:["data-collection.storage-mount-points-and-filesystems"],icon_filename:"ceph.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ceph","storage"],most_popular:!1},overview:"# Ceph\n\nPlugin: python.d.plugin\nModule: ceph\n\n## Overview\n\nThis collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.\n\nUses the `rados` python module to connect to a Ceph cluster.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### `rados` python module\n\nMake sure the `rados` python module is installed\n\n#### Granting read permissions to ceph group from keyring file\n\nExecute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`\n\n#### Create a specific rados_id\n\nYou can optionally create a rados_id to use instead of admin\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/ceph.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/ceph.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| config_file | Ceph config file | | yes |\n| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes |\n| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no |\n\n{% /details %}\n#### Examples\n\n##### Basic local Ceph cluster\n\nA basic configuration to connect to a local Ceph cluster.\n\n```yaml\nlocal:\n config_file: '/etc/ceph/ceph.conf'\n keyring_file: '/etc/ceph/ceph.client.admin.keyring'\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin ceph debug trace\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ceph_cluster_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.general_usage | cluster disk space utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Ceph instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.general_usage | avail, used | KiB |\n| ceph.general_objects | cluster | objects |\n| ceph.general_bytes | read, write | KiB/s |\n| ceph.general_operations | read, write | operations |\n| ceph.general_latency | apply, commit | milliseconds |\n| ceph.pool_usage | a dimension per Ceph Pool | KiB |\n| ceph.pool_objects | a dimension per Ceph Pool | objects |\n| ceph.pool_read_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_write_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_read_operations | a dimension per Ceph Pool | operations |\n| ceph.pool_write_operations | a dimension per Ceph Pool | operations |\n| ceph.osd_usage | a dimension per Ceph OSD | KiB |\n| ceph.osd_size | a dimension per Ceph OSD | KiB |\n| ceph.apply_latency | a dimension per Ceph OSD | milliseconds |\n| ceph.commit_latency | a dimension per Ceph OSD | milliseconds |\n\n",integration_type:"collector",id:"python.d.plugin-ceph-Ceph",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ceph/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"changefinder",monitored_instance:{name:"python.d changefinder",link:"",categories:["data-collection.other"],icon_filename:""},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["change detection","anomaly detection","machine learning","ml"],most_popular:!1},overview:"# python.d changefinder\n\nPlugin: python.d.plugin\nModule: changefinder\n\n## Overview\n\nThis collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to\nperform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection)\non your Netdata charts and/or dimensions.\n\n\nInstead of this collector just _collecting_ data, it also does some computation on the data it collects to return a changepoint score for each chart or dimension you configure it to work on. This is an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap to compute at each step of data collection (see the notes section below for more details) and it should scale fairly well to work on lots of charts or hosts (if running on a parent node for example).\n### Notes - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's\n typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly\n this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw\n score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have\n already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then\n should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning\n approaches which need some initial window of time before they can be useful.\n- As this collector does most of the work in Python itself, you may want to try it out first on a test or development\n system to get a sense of its performance characteristics on a node similar to where you would like to use it.\n- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the\n typical performance characteristics we saw from running this collector (with defaults) were:\n - A runtime (`netdata.runtime_changefinder`) of ~30ms.\n - Typically ~1% additional cpu usage.\n - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default this collector will work over all `system.*` charts.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector will only work with Python 3 and requires the packages below be installed.\n\n```bash\n# become netdata user\nsudo su -s /bin/bash netdata\n# install required packages for the netdata user\npip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4\n```\n\n**Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section\nof your `netdata.conf` file.\n\n```yaml\n[ plugin:python.d ]\n # update every = 1\n command options = -ppython3\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/changefinder.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/changefinder.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| charts_regex | what charts to pull data for - A regex like `system\\..*/` or `system\\..*/apps.cpu/apps.mem` etc. | system\\..* | yes |\n| charts_to_exclude | charts to exclude, useful if you would like to exclude some specific charts. note: should be a ',' separated string like 'chart.name,chart.name'. | | no |\n| mode | get ChangeFinder scores 'per_dim' or 'per_chart'. | per_chart | yes |\n| cf_r | default parameters that can be passed to the changefinder library. | 0.5 | no |\n| cf_order | default parameters that can be passed to the changefinder library. | 1 | no |\n| cf_smooth | default parameters that can be passed to the changefinder library. | 15 | no |\n| cf_threshold | the percentile above which scores will be flagged. | 99 | no |\n| n_score_samples | the number of recent scores to use when calculating the percentile of the changefinder score. | 14400 | no |\n| show_scores | set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time) | no | no |\n\n{% /details %}\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\nlocal:\n name: 'local'\n host: '127.0.0.1:19999'\n charts_regex: 'system\\..*'\n charts_to_exclude: ''\n mode: 'per_chart'\n cf_r: 0.5\n cf_order: 1\n cf_smooth: 15\n cf_threshold: 99\n n_score_samples: 14400\n show_scores: false\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `changefinder` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin changefinder debug trace\n ```\n\n### Debug Mode\n\n\n\n### Log Messages\n\n\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per python.d changefinder instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| changefinder.scores | a dimension per chart | score |\n| changefinder.flags | a dimension per chart | flag |\n\n",integration_type:"collector",id:"python.d.plugin-changefinder-python.d_changefinder",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/changefinder/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"dovecot",monitored_instance:{name:"Dovecot",link:"https://www.dovecot.org/",categories:["data-collection.mail-servers"],icon_filename:"dovecot.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["dovecot","imap","mail"],most_popular:!1},overview:"# Dovecot\n\nPlugin: python.d.plugin\nModule: dovecot\n\n## Overview\n\nThis collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.\n\nIt uses the dovecot socket and executes the `EXPORT global` command to get the statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, the collector will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Dovecot configuration\n\nThe Dovecot UNIX socket should have R/W permissions for user netdata, or Dovecot should be configured with a TCP/IP socket.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/dovecot.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/dovecot.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| socket | Use this socket to communicate with Devcot | /var/run/dovecot/stats | no |\n| host | Instead of using a socket, you can point the collector to an ip for devcot statistics. | | no |\n| port | Used in combination with host, configures the port devcot listens to. | | no |\n\n{% /details %}\n#### Examples\n\n##### Local TCP\n\nA basic TCP configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocaltcpip:\n name: 'local'\n host: '127.0.0.1'\n port: 24242\n\n```\n{% /details %}\n##### Local socket\n\nA basic local socket configuration\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocalsocket:\n name: 'local'\n socket: '/var/run/dovecot/stats'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dovecot` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin dovecot debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dovecot instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dovecot.sessions | active sessions | number |\n| dovecot.logins | logins | number |\n| dovecot.commands | commands | commands |\n| dovecot.faults | minor, major | faults |\n| dovecot.context_switches | voluntary, involuntary | switches |\n| dovecot.io | read, write | KiB/s |\n| dovecot.net | read, write | kilobits/s |\n| dovecot.syscalls | read, write | syscalls/s |\n| dovecot.lookup | path, attr | number/s |\n| dovecot.cache | hits | hits/s |\n| dovecot.auth | ok, failed | attempts |\n| dovecot.auth_cache | hit, miss | number |\n\n",integration_type:"collector",id:"python.d.plugin-dovecot-Dovecot",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/dovecot/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"example",monitored_instance:{name:"Example collector",link:"/src/collectors/python.d.plugin/example/README.md",categories:["data-collection.other"],icon_filename:""},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["example","netdata","python"],most_popular:!1},overview:"# Example collector\n\nPlugin: python.d.plugin\nModule: example\n\n## Overview\n\nExample collector that generates some random numbers as metrics.\n\nIf you want to write your own collector, read our [writing a new Python module](/src/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.\n\n\nThe `get_data()` function uses `random.randint()` to generate a random number which will be collected as a metric.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/example.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/example.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| num_lines | The number of lines to create. | 4 | no |\n| lower | The lower bound of numbers to randomly sample from. | 0 | no |\n| upper | The upper bound of numbers to randomly sample from. | 100 | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nfour_lines:\n name: "Four Lines"\n update_every: 1\n priority: 60000\n penalty: yes\n autodetection_retry: 0\n num_lines: 4\n lower: 0\n upper: 100\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `example` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin example debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Example collector instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| example.random | random | number |\n\n",integration_type:"collector",id:"python.d.plugin-example-Example_collector",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"exim",monitored_instance:{name:"Exim",link:"https://www.exim.org/",categories:["data-collection.mail-servers"],icon_filename:"exim.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["exim","mail","server"],most_popular:!1},overview:"# Exim\n\nPlugin: python.d.plugin\nModule: exim\n\n## Overview\n\nThis collector monitors Exim mail queue.\n\nIt uses the `exim` command line binary to get the statistics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming setup prerequisites are met, the collector will try to gather statistics using the method described above, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Exim configuration - local installation\n\nThe module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.\n\n1. Edit the `exim` configuration with your preferred editor and add:\n`queue_list_requires_admin = false`\n2. Restart `exim` and Netdata\n\n\n#### Exim configuration - WHM (CPanel) server\n\nOn a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.\n\n1. Login to WHM\n2. Navigate to Service Configuration --\x3e Exim Configuration Manager --\x3e tab Advanced Editor\n3. Scroll down to the button **Add additional configuration setting** and click on it.\n4. In the new dropdown which will appear above we need to find and choose:\n`queue_list_requires_admin` and set to `false`\n5. Scroll to the end and click the **Save** button.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/exim.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/exim.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| command | Path and command to the `exim` binary | exim -bpc | no |\n\n{% /details %}\n#### Examples\n\n##### Local exim install\n\nA basic local exim install\n\n```yaml\nlocal:\n command: 'exim -bpc'\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `exim` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin exim debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Exim instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exim.qemails | emails | emails |\n\n",integration_type:"collector",id:"python.d.plugin-exim-Exim",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/exim/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"gearman",monitored_instance:{name:"Gearman",link:"http://gearman.org/",categories:["data-collection.distributed-computing-systems"],icon_filename:"gearman.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["gearman","gearman job server"],most_popular:!1},overview:"# Gearman\n\nPlugin: python.d.plugin\nModule: gearman\n\n## Overview\n\nMonitor Gearman metrics for proficient system task distribution. Track job counts, worker statuses, and queue lengths for effective distributed task management.\n\nThis collector connects to a Gearman instance via either TCP or unix socket.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen no configuration file is found, the collector tries to connect to TCP/IP socket: localhost:4730.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Socket permissions\n\nThe gearman UNIX socket should have read permission for user netdata.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/gearman.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/gearman.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | URL or IP where gearman is running. | localhost | no |\n| port | Port of URL or IP where gearman is running. | 4730 | no |\n| tls | Use tls to connect to gearman. | false | no |\n| cert | Provide a certificate file if needed to connect to a TLS gearman instance. | | no |\n| key | Provide a key file if needed to connect to a TLS gearman instance. | | no |\n\n{% /details %}\n#### Examples\n\n##### Local gearman service\n\nA basic host and port gearman configuration for localhost.\n\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 4730\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 4730\n\nremote:\n name: 'remote'\n host: '192.0.2.1'\n port: 4730\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `gearman` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin gearman debug trace\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ gearman_workers_queued ](https://github.com/netdata/netdata/blob/master/src/health/health.d/gearman.conf) | gearman.single_job | average number of queued jobs over the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Gearman instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.total_jobs | Pending, Running | Jobs |\n\n### Per gearman job\n\nMetrics related to Gearman jobs. Each job produces its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.single_job | Pending, Idle, Runnning | Jobs |\n\n",integration_type:"collector",id:"python.d.plugin-gearman-Gearman",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/gearman/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"go_expvar",monitored_instance:{name:"Go applications (EXPVAR)",link:"https://pkg.go.dev/expvar",categories:["data-collection.apm"],icon_filename:"go.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["go","expvar","application"],most_popular:!1},overview:"# Go applications (EXPVAR)\n\nPlugin: python.d.plugin\nModule: go_expvar\n\n## Overview\n\nThis collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts.\n\nIt connects via http to gather the metrics exposed via the `expvar` package.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable the go_expvar collector\n\nThe `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.\n\n\n#### Sample `expvar` usage in a Go application\n\nThe `expvar` package exposes metrics over HTTP and is very easy to use.\nConsider this minimal sample below:\n\n```go\npackage main\n\nimport (\n _ "expvar"\n "net/http"\n)\n\nfunc main() {\n http.ListenAndServe("127.0.0.1:8080", nil)\n}\n```\n\nWhen imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that\nexposes Go runtime\'s memory statistics in JSON format. You can inspect the output by opening\nthe URL in your browser (or by using `wget` or `curl`).\n\nSample output:\n\n```json\n{\n"cmdline": ["./expvar-demo-binary"],\n"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, }\n}\n```\n\nYou can of course expose and monitor your own variables as well.\nHere is a sample Go application that exposes a few custom variables:\n\n```go\npackage main\n\nimport (\n "expvar"\n "net/http"\n "runtime"\n "time"\n)\n\nfunc main() {\n\n tick := time.NewTicker(1 * time.Second)\n num_go := expvar.NewInt("runtime.goroutines")\n counters := expvar.NewMap("counters")\n counters.Set("cnt1", new(expvar.Int))\n counters.Set("cnt2", new(expvar.Float))\n\n go http.ListenAndServe(":8080", nil)\n\n for {\n select {\n case <- tick.C:\n num_go.Set(int64(runtime.NumGoroutine()))\n counters.Add("cnt1", 1)\n counters.AddFloat("cnt2", 1.452)\n }\n }\n}\n```\n\nApart from the runtime memory stats, this application publishes two counters and the\nnumber of currently running Goroutines and updates these stats every second.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/go_expvar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/go_expvar.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes |\n| user | If the URL is password protected, this is the username to use. | | no |\n| pass | If the URL is password protected, this is the password to use. | | no |\n| collect_memstats | Enables charts for Go runtime\'s memory statistics. | | no |\n| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no |\n\n{% /details %}\n#### Examples\n\n##### Monitor a Go app1 application\n\nThe example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.\n\nThe `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.\n\nThe `extra_charts` variable is a YaML list of Netdata chart definitions.\nEach chart definition has the following keys:\n\n```\nid: Netdata chart ID\noptions: a key-value mapping of chart options\nlines: a list of line definitions\n```\n\n**Note: please do not use dots in the chart or line ID field.\nSee [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**\n\nPlease see these two links to the official Netdata documentation for more information about the values:\n\n- [External plugins - charts](/src/collectors/plugins.d/README.md#chart)\n- [Chart variables](/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart)\n\n**Line definitions**\n\nEach chart can define multiple lines (dimensions).\nA line definition is a key-value mapping of line options.\nEach line can have the following options:\n\n```\n# mandatory\nexpvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint\nexpvar_type: value type; supported are "float" or "int"\nid: the id of this line/dimension in Netdata\n\n# optional - Netdata defaults are used if these options are not defined\nname: \'\'\nalgorithm: absolute\nmultiplier: 1\ndivisor: 100 if expvar_type == float, 1 if expvar_type == int\nhidden: False\n```\n\nPlease see the following link for more information about the options and their default values:\n[External plugins - dimensions](/src/collectors/plugins.d/README.md#dimension)\n\nApart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;\nAll dicts in the resulting JSON document are then flattened to one level.\nExpvar names are joined together with \'.\' when flattening.\n\nExample:\n\n```\n{\n "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983},\n "runtime.goroutines": 5\n}\n```\n\nIn the above case, the exported variables will be available under `runtime.goroutines`,\n`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,\nthe first defined key wins and all subsequent keys with the same name are ignored.\n\n\n```yaml\napp1:\n name : \'app1\'\n url : \'http://127.0.0.1:8080/debug/vars\'\n collect_memstats: true\n extra_charts:\n - id: "runtime_goroutines"\n options:\n name: num_goroutines\n title: "runtime: number of goroutines"\n units: goroutines\n family: runtime\n context: expvar.runtime.goroutines\n chart_type: line\n lines:\n - {expvar_key: \'runtime.goroutines\', expvar_type: int, id: runtime_goroutines}\n - id: "foo_counters"\n options:\n name: counters\n title: "some random counters"\n units: awesomeness\n family: counters\n context: expvar.foo.counters\n chart_type: line\n lines:\n - {expvar_key: \'counters.cnt1\', expvar_type: int, id: counters_cnt1}\n - {expvar_key: \'counters.cnt2\', expvar_type: float, id: counters_cnt2}\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin go_expvar debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go applications (EXPVAR) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| expvar.memstats.heap | alloc, inuse | KiB |\n| expvar.memstats.stack | inuse | KiB |\n| expvar.memstats.mspan | inuse | KiB |\n| expvar.memstats.mcache | inuse | KiB |\n| expvar.memstats.live_objects | live | objects |\n| expvar.memstats.sys | sys | KiB |\n| expvar.memstats.gc_pauses | avg | ns |\n\n",integration_type:"collector",id:"python.d.plugin-go_expvar-Go_applications_(EXPVAR)",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/go_expvar/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"icecast",monitored_instance:{name:"Icecast",link:"https://icecast.org/",categories:["data-collection.media-streaming-servers"],icon_filename:"icecast.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["icecast","streaming","media"],most_popular:!1},overview:"# Icecast\n\nPlugin: python.d.plugin\nModule: icecast\n\n## Overview\n\nThis collector monitors Icecast listener counts.\n\nIt connects to an icecast URL and uses the `status-json.xsl` endpoint to retrieve statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWithout configuration, the collector attempts to connect to http://localhost:8443/status-json.xsl\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Icecast minimum version\n\nNeeds at least icecast version >= 2.4.0\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/icecast.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/icecast.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | The URL (and port) to the icecast server. Needs to also include `/status-json.xsl` | http://localhost:8443/status-json.xsl | no |\n| user | Username to use to connect to `url` if it's password protected. | | no |\n| pass | Password to use to connect to `url` if it's password protected. | | no |\n\n{% /details %}\n#### Examples\n\n##### Remote Icecast server\n\nConfigure a remote icecast server\n\n```yaml\nremote:\n url: 'http://1.2.3.4:8443/status-json.xsl'\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `icecast` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin icecast debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Icecast instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| icecast.listeners | a dimension for each active source | listeners |\n\n",integration_type:"collector",id:"python.d.plugin-icecast-Icecast",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/icecast/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"ipfs",monitored_instance:{name:"IPFS",link:"https://ipfs.tech/",categories:["data-collection.storage-mount-points-and-filesystems"],icon_filename:"ipfs.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# IPFS\n\nPlugin: python.d.plugin\nModule: ipfs\n\n## Overview\n\nThis collector monitors IPFS server metrics about its quality and performance.\n\nIt connects to an http endpoint of the IPFS server to collect the metrics\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the endpoint is accessible by the Agent, netdata will autodetect it\n\n#### Limits\n\nCalls to the following endpoints are disabled due to IPFS bugs:\n\n/api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874)\n/api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528)\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/ipfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/ipfs.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |\n| url | URL to the IPFS API | no | yes |\n| repoapi | Collect repo metrics. | no | no |\n| pinapi | Set status of IPFS pinned object polling. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic (default out-of-the-box)\n\nA basic example configuration, one job will run at a time. Autodetect mechanism uses it by default.\n\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:5001'\n repoapi: no\n pinapi: no\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:5001'\n repoapi: no\n pinapi: no\n\nremote_host:\n name: 'remote'\n url: 'http://192.0.2.1:5001'\n repoapi: no\n pinapi: no\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ipfs` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin ipfs debug trace\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipfs.conf) | ipfs.repo_size | IPFS datastore utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPFS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfs.bandwidth | in, out | kilobits/s |\n| ipfs.peers | peers | peers |\n| ipfs.repo_size | avail, size | GiB |\n| ipfs.repo_objects | objects, pinned, recursive_pins | objects |\n\n",integration_type:"collector",id:"python.d.plugin-ipfs-IPFS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ipfs/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"memcached",monitored_instance:{name:"Memcached",link:"https://memcached.org/",categories:["data-collection.database-servers"],icon_filename:"memcached.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["memcached","memcache","cache","database"],most_popular:!1},overview:"# Memcached\n\nPlugin: python.d.plugin\nModule: memcached\n\n## Overview\n\nMonitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching.\n\nIt reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)).\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/memcached.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/memcached.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| host | the host to connect to. | 127.0.0.1 | no |\n| port | the port to connect to. | 11211 | no |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### localhost\n\nAn example configuration for localhost.\n\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 11211\n\n```\n##### localipv4\n\nAn example configuration for localipv4.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 11211\n\n```\n{% /details %}\n##### localipv6\n\nAn example configuration for localipv6.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '::1'\n port: 11211\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `memcached` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin memcached debug trace\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ memcached_cache_memory_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | cache memory utilization |\n| [ memcached_cache_fill_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | average rate the cache fills up (positive), or frees up (negative) space over the last hour |\n| [ memcached_out_of_cache_space_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memcached instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| memcached.cache | available, used | MiB |\n| memcached.net | in, out | kilobits/s |\n| memcached.connections | current, rejected, total | connections/s |\n| memcached.items | current, total | items |\n| memcached.evicted_reclaimed | reclaimed, evicted | items |\n| memcached.get | hints, misses | requests |\n| memcached.get_rate | rate | requests/s |\n| memcached.set_rate | rate | requests/s |\n| memcached.delete | hits, misses | requests |\n| memcached.cas | hits, misses, bad value | requests |\n| memcached.increment | hits, misses | requests |\n| memcached.decrement | hits, misses | requests |\n| memcached.touch | hits, misses | requests |\n| memcached.touch_rate | rate | requests/s |\n\n",integration_type:"collector",id:"python.d.plugin-memcached-Memcached",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/memcached/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"monit",monitored_instance:{name:"Monit",link:"https://mmonit.com/monit/",categories:["data-collection.synthetic-checks"],icon_filename:"monit.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["monit","mmonit","supervision tool","monitrc"],most_popular:!1},overview:"# Monit\n\nPlugin: python.d.plugin\nModule: monit\n\n## Overview\n\nThis collector monitors Monit targets such as filesystems, directories, files, FIFO pipes and more.\n\n\nIt gathers data from Monit's XML interface.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to Monit at `http://localhost:2812`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/monit.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/monit.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| url | The URL to fetch Monit's metrics. | http://localhost:2812 | yes |\n| user | Username in case the URL is password protected. | | no |\n| pass | Password in case the URL is password protected. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:2812'\n\n```\n##### Basic Authentication\n\nExample using basic username and password in order to authenticate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:2812'\n user: 'foo'\n pass: 'bar'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:2812'\n\nremote_job:\n name: 'remote'\n url: 'http://192.0.2.1:2812'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `monit` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin monit debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Monit instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| monit.filesystems | a dimension per target | filesystems |\n| monit.directories | a dimension per target | directories |\n| monit.files | a dimension per target | files |\n| monit.fifos | a dimension per target | pipes |\n| monit.programs | a dimension per target | programs |\n| monit.services | a dimension per target | processes |\n| monit.process_uptime | a dimension per target | seconds |\n| monit.process_threads | a dimension per target | threads |\n| monit.process_childrens | a dimension per target | children |\n| monit.hosts | a dimension per target | hosts |\n| monit.host_latency | a dimension per target | milliseconds |\n| monit.networks | a dimension per target | interfaces |\n\n",integration_type:"collector",id:"python.d.plugin-monit-Monit",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/monit/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"nsd",monitored_instance:{name:"Name Server Daemon",link:"https://nsd.docs.nlnetlabs.nl/en/latest/#",categories:["data-collection.dns-and-dhcp-servers"],icon_filename:"nsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["nsd","name server daemon"],most_popular:!1},overview:"# Name Server Daemon\n\nPlugin: python.d.plugin\nModule: nsd\n\n## Overview\n\nThis collector monitors NSD statistics like queries, zones, protocols, query types and more.\n\n\nIt uses the `nsd-control stats_noreset` command to gather metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf permissions are satisfied, the collector will be able to run `nsd-control stats_noreset`, thus collecting metrics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### NSD version\n\nThe version of `nsd` must be 4.0+.\n\n\n#### Provide Netdata the permissions to run the command\n\nNetdata must have permissions to run the `nsd-control stats_noreset` command.\n\nYou can:\n\n- Add "netdata" user to "nsd" group:\n ```\n usermod -aG nsd netdata\n ```\n- Add Netdata to sudoers\n 1. Edit the sudoers file:\n ```\n visudo -f /etc/sudoers.d/netdata\n ```\n 2. Add the entry:\n ```\n Defaults:netdata !requiretty\n netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset\n ```\n\n > Note that you will need to set the `command` option to `sudo /usr/sbin/nsd-control stats_noreset` if you use this method.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/nsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/nsd.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it\'s data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 30 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| command | The command to run | nsd-control stats_noreset | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocal:\n name: \'nsd_local\'\n command: \'nsd-control stats_noreset\'\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nsd` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin nsd debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Name Server Daemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nsd.queries | queries | queries/s |\n| nsd.zones | master, slave | zones |\n| nsd.protocols | udp, udp6, tcp, tcp6 | queries/s |\n| nsd.type | A, NS, CNAME, SOA, PTR, HINFO, MX, NAPTR, TXT, AAAA, SRV, ANY | queries/s |\n| nsd.transfer | NOTIFY, AXFR | queries/s |\n| nsd.rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN | queries/s |\n\n",integration_type:"collector",id:"python.d.plugin-nsd-Name_Server_Daemon",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/nsd/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"openldap",monitored_instance:{name:"OpenLDAP",link:"https://www.openldap.org/",categories:["data-collection.authentication-and-authorization"],icon_filename:"statsd.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["openldap","RBAC","Directory access"],most_popular:!1},overview:"# OpenLDAP\n\nPlugin: python.d.plugin\nModule: openldap\n\n## Overview\n\nThis collector monitors OpenLDAP metrics about connections, operations, referrals and more.\n\nStatistics are taken from the monitoring interface of a openLDAP (slapd) server\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector doesn't work until all the prerequisites are checked.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Configure the openLDAP server to expose metrics to monitor it.\n\nFollow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.\n\n\n#### Install python-ldap module\n\nInstall python ldap module \n\n1. From pip package manager\n\n```bash\npip install ldap\n```\n\n2. With apt package manager (in most deb based distros)\n\n\n```bash\napt-get install python-ldap\n```\n\n\n3. With yum package manager (in most rpm based distros)\n\n\n```bash\nyum install python-ldap\n```\n\n\n#### Insert credentials for Netdata to access openLDAP server\n\nUse the `ldappasswd` utility to set a password for the username you will use.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/openldap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/openldap.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| username | The bind user with right to access monitor statistics | | yes |\n| password | The password for the binded user | | yes |\n| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes |\n| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes |\n| use_tls | Make True if a TLS connection is used over ldaps:// | no | no |\n| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no |\n| cert_check | False if you want to ignore certificate check | True | yes |\n| timeout | Seconds to timeout if no connection exist | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nusername: "cn=admin"\npassword: "pass"\nserver: "localhost"\nport: "389"\ncheck_cert: True\ntimeout: 1\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin openldap debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenLDAP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openldap.total_connections | connections | connections/s |\n| openldap.traffic_stats | sent | KiB/s |\n| openldap.operations_status | completed, initiated | ops/s |\n| openldap.referrals | sent | referrals/s |\n| openldap.entries | sent | entries/s |\n| openldap.ldap_operations | bind, search, unbind, add, delete, modify, compare | ops/s |\n| openldap.waiters | write, read | waiters/s |\n\n",integration_type:"collector",id:"python.d.plugin-openldap-OpenLDAP",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/openldap/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"oracledb",monitored_instance:{name:"Oracle DB",link:"https://docs.oracle.com/en/database/oracle/oracle-database/",categories:["data-collection.database-servers"],icon_filename:"oracle.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["database","oracle","data warehouse","SQL"],most_popular:!1},overview:"# Oracle DB\n\nPlugin: python.d.plugin\nModule: oracledb\n\n## Overview\n\nThis collector monitors OracleDB database metrics about sessions, tables, memory and more.\n\nIt collects the metrics via the supported database client library\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIn order for this collector to work, it needs a read-only user `netdata` in the RDBMS.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen the requirements are met, databases on the local host on port 1521 will be auto-detected\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Install the python-oracledb package\n\nYou can follow the official guide below to install the required package:\n\nSource: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html\n\n\n#### Create a read only user for netdata\n\nFollow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach\n\nConnect to your Oracle database with an administrative user and execute:\n\n```bash\nCREATE USER netdata IDENTIFIED BY ;\n\nGRANT CONNECT TO netdata;\nGRANT SELECT_CATALOG_ROLE TO netdata;\n```\n\n\n#### Edit the configuration\n\nEdit the configuration troubleshooting:\n\n1. Provide a valid user for the netdata collector to access the database\n2. Specify the network target this database is listening.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/oracledb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/oracledb.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| user | The username for the user account. | no | yes |\n| password | The password for the user account. | no | yes |\n| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes |\n| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes |\n| protocol | one of the strings \"tcp\" or \"tcps\" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration, two jobs described for two databases.\n\n```yaml\nlocal:\n user: 'netdata'\n password: 'secret'\n server: 'localhost:1521'\n service: 'XE'\n protocol: 'tcps'\n\nremote:\n user: 'netdata'\n password: 'secret'\n server: '10.0.0.1:1521'\n service: 'XE'\n protocol: 'tcps'\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin oracledb debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThese metrics refer to the entire monitored application.\n\n### Per Oracle DB instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.session_count | total, active | sessions |\n| oracledb.session_limit_usage | usage | % |\n| oracledb.logons | logons | events/s |\n| oracledb.physical_disk_read_writes | reads, writes | events/s |\n| oracledb.sorts_on_disks | sorts | events/s |\n| oracledb.full_table_scans | full table scans | events/s |\n| oracledb.database_wait_time_ratio | wait time ratio | % |\n| oracledb.shared_pool_free_memory | free memory | % |\n| oracledb.in_memory_sorts_ratio | in-memory sorts | % |\n| oracledb.sql_service_response_time | time | seconds |\n| oracledb.user_rollbacks | rollbacks | events/s |\n| oracledb.enqueue_timeouts | enqueue timeouts | events/s |\n| oracledb.cache_hit_ration | buffer, cursor, library, row | % |\n| oracledb.global_cache_blocks | corrupted, lost | events/s |\n| oracledb.activity | parse count, execute count, user commits, user rollbacks | events/s |\n| oracledb.wait_time | application, configuration, administrative, concurrency, commit, network, user I/O, system I/O, scheduler, other | ms |\n| oracledb.tablespace_size | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage_in_percent | a dimension per active tablespace | % |\n| oracledb.allocated_size | a dimension per active tablespace | B |\n| oracledb.allocated_usage | a dimension per active tablespace | B |\n| oracledb.allocated_usage_in_percent | a dimension per active tablespace | % |\n\n",integration_type:"collector",id:"python.d.plugin-oracledb-Oracle_DB",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/oracledb/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"pandas",monitored_instance:{name:"Pandas",link:"https://pandas.pydata.org/",categories:["data-collection.generic-data-collection"],icon_filename:"pandas.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["pandas","python"],most_popular:!1},overview:"# Pandas\n\nPlugin: python.d.plugin\nModule: pandas\n\n## Overview\n\n[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.\nIf you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),\neither locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.\n\nThis collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.\n\n\nThe collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.\n\n```bash\nsudo pip install pandas requests\n```\n\nNote: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.\n\n```bash\nsudo pip install 'sqlalchemy<2.0' psycopg2-binary\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/pandas.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/pandas.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| chart_configs | an array of chart configuration dictionaries | [] | yes |\n| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.family | [family](/docs/dashboards-and-charts/netdata-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.context | [context](/docs/dashboards-and-charts/netdata-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Temperature API Example\n\nexample pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.\n\n{% details open=true summary=\"Config\" %}\n```yaml\ntemperature:\n name: \"temperature\"\n update_every: 5\n chart_configs:\n - name: \"temperature_forecast_by_city\"\n title: \"Temperature By City - Today Forecast\"\n family: \"temperature.today\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.describe(); # get aggregate stats for each city;\n df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;\n df.rename(columns={'index':'city'}); # some column renaming;\n df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;\n df.rename(columns={0:'degrees'}); # some column renaming;\n pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;\n df.rename(columns={0:'measurement'}); # some column renaming;\n df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;\n df.sort_index(); # sort by city name;\n df.transpose(); # transpose so its just one wide row;\n - name: \"temperature_current_by_city\"\n title: \"Temperature By City - Current\"\n family: \"temperature.current\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}¤t_weather=true').json()['current_weather']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.transpose();\n df[['temperature']];\n df.transpose();\n\n```\n{% /details %}\n##### API CSV Example\n\nexample showing a read_csv from a url and some light pandas data wrangling.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nexample_csv:\n name: \"example_csv\"\n update_every: 2\n chart_configs:\n - name: \"london_system_cpu\"\n title: \"London System CPU - Ratios\"\n family: \"london_system_cpu\"\n context: \"pandas\"\n type: \"line\"\n units: \"n\"\n df_steps: >\n pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});\n df.drop('time', axis=1);\n df.mean().to_frame().transpose();\n df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();\n df.rename(columns={0:'average_user_system_ratio'});\n df*100;\n\n```\n{% /details %}\n##### API JSON Example\n\nexample showing a read_json from a url and some light pandas data wrangling.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nexample_json:\n name: \"example_json\"\n update_every: 2\n chart_configs:\n - name: \"london_system_net\"\n title: \"London System Net - Total Bandwidth\"\n family: \"london_system_net\"\n context: \"pandas\"\n type: \"area\"\n units: \"kilobits/s\"\n df_steps: >\n pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);\n df.drop('time', axis=1);\n abs(df);\n df.sum(axis=1).to_frame();\n df.rename(columns={0:'total_bandwidth'});\n\n```\n{% /details %}\n##### XML Example\n\nexample showing a read_xml from a url and some light pandas data wrangling.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nexample_xml:\n name: \"example_xml\"\n update_every: 2\n line_sep: \"|\"\n chart_configs:\n - name: \"temperature_forcast\"\n title: \"Temperature Forecast\"\n family: \"temp\"\n context: \"pandas.temp\"\n type: \"line\"\n units: \"celsius\"\n df_steps: >\n pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|\n df.rename(columns={'value': 'dublin'})|\n df[['dublin']]|\n\n```\n{% /details %}\n##### SQL Example\n\nexample showing a read_sql from a postgres database using sqlalchemy.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nsql:\n name: \"sql\"\n update_every: 5\n chart_configs:\n - name: \"sql\"\n title: \"SQL Example\"\n family: \"sql.example\"\n context: \"example\"\n type: \"line\"\n units: \"percent\"\n df_steps: >\n pd.read_sql_query(\n sql='\\\n select \\\n random()*100 as metric_1, \\\n random()*100 as metric_2 \\\n ',\n con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')\n );\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin pandas debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThis collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken\nas the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).\nSee [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html).\"\n\n\n### Per Pandas instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n\n",integration_type:"collector",id:"python.d.plugin-pandas-Pandas",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/pandas/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"postfix",monitored_instance:{name:"Postfix",link:"https://www.postfix.org/",categories:["data-collection.mail-servers"],icon_filename:"postfix.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["postfix","mail","mail server"],most_popular:!1},overview:"# Postfix\n\nPlugin: python.d.plugin\nModule: postfix\n\n## Overview\n\nKeep an eye on Postfix metrics for efficient mail server operations. \nImprove your mail server performance with Netdata's real-time metrics and built-in alerts.\n\n\nMonitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nPostfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file.\nSee the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector executes `postqueue -p` to get Postfix queue statistics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `postfix` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin postfix debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Postfix instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postfix.qemails | emails | emails |\n| postfix.qsize | size | KiB |\n\n",integration_type:"collector",id:"python.d.plugin-postfix-Postfix",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/postfix/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"puppet",monitored_instance:{name:"Puppet",link:"https://www.puppet.com/",categories:["data-collection.ci-cd-systems"],icon_filename:"puppet.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["puppet","jvm heap"],most_popular:!1},overview:"# Puppet\n\nPlugin: python.d.plugin\nModule: puppet\n\n## Overview\n\nThis collector monitors Puppet metrics about JVM Heap, Non-Heap, CPU usage and file descriptors.'\n\n\nIt uses Puppet's metrics API endpoint to gather the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will use `https://fqdn.example.com:8140` as the URL to look for metrics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/puppet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/puppet.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n> Notes:\n> - Exact Fully Qualified Domain Name of the node should be used.\n> - Usually Puppet Server/DB startup time is VERY long. So, there should be quite reasonable retry count.\n> - A secured PuppetDB config may require a client certificate. This does not apply to the default PuppetDB configuration though.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used. | https://fqdn.example.com:8081 | yes |\n| tls_verify | Control HTTPS server certificate verification. | False | no |\n| tls_ca_file | Optional CA (bundle) file to use | | no |\n| tls_cert_file | Optional client certificate file | | no |\n| tls_key_file | Optional client key file | | no |\n| update_every | Sets the default data collection frequency. | 30 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration\n\n```yaml\npuppetserver:\n url: 'https://fqdn.example.com:8140'\n autodetection_retry: 1\n\n```\n##### TLS Certificate\n\nAn example using a TLS certificate\n\n{% details open=true summary=\"Config\" %}\n```yaml\npuppetdb:\n url: 'https://fqdn.example.com:8081'\n tls_cert_file: /path/to/client.crt\n tls_key_file: /path/to/client.key\n autodetection_retry: 1\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\npuppetserver1:\n url: 'https://fqdn.example.com:8140'\n autodetection_retry: 1\n\npuppetserver2:\n url: 'https://fqdn.example2.com:8140'\n autodetection_retry: 1\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `puppet` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin puppet debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Puppet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| puppet.jvm_heap | committed, used | MiB |\n| puppet.jvm_nonheap | committed, used | MiB |\n| puppet.cpu | execution, GC | percentage |\n| puppet.fdopen | used | descriptors |\n\n",integration_type:"collector",id:"python.d.plugin-puppet-Puppet",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/puppet/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"rethinkdbs",monitored_instance:{name:"RethinkDB",link:"https://rethinkdb.com/",categories:["data-collection.database-servers"],icon_filename:"rethinkdb.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["rethinkdb","database","db"],most_popular:!1},overview:"# RethinkDB\n\nPlugin: python.d.plugin\nModule: rethinkdbs\n\n## Overview\n\nThis collector monitors metrics about RethinkDB clusters and database servers.\n\nIt uses the `rethinkdb` python module to connect to a RethinkDB server instance and gather statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen no configuration file is found, the collector tries to connect to 127.0.0.1:28015.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Required python module\n\nThe collector requires the `rethinkdb` python module to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/rethinkdbs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/rethinkdbs.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | Hostname or ip of the RethinkDB server. | localhost | no |\n| port | Port to connect to the RethinkDB server. | 28015 | no |\n| user | The username to use to connect to the RethinkDB server. | admin | no |\n| password | The password to use to connect to the RethinkDB server. | | no |\n| timeout | Set a connect timeout to the RethinkDB server. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Local RethinkDB server\n\nAn example of a configuration for a local RethinkDB server\n\n```yaml\nlocalhost:\n name: \'local\'\n host: \'127.0.0.1\'\n port: 28015\n user: "user"\n password: "pass"\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `rethinkdbs` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin rethinkdbs debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RethinkDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.cluster_connected_servers | connected, missing | servers |\n| rethinkdb.cluster_clients_active | active | clients |\n| rethinkdb.cluster_queries | queries | queries/s |\n| rethinkdb.cluster_documents | reads, writes | documents/s |\n\n### Per database server\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.client_connections | connections | connections |\n| rethinkdb.clients_active | active | clients |\n| rethinkdb.queries | queries | queries/s |\n| rethinkdb.documents | reads, writes | documents/s |\n\n",integration_type:"collector",id:"python.d.plugin-rethinkdbs-RethinkDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/rethinkdbs/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"retroshare",monitored_instance:{name:"RetroShare",link:"https://retroshare.cc/",categories:["data-collection.media-streaming-servers"],icon_filename:"retroshare.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["retroshare","p2p"],most_popular:!1},overview:"# RetroShare\n\nPlugin: python.d.plugin\nModule: retroshare\n\n## Overview\n\nThis collector monitors RetroShare statistics such as application bandwidth, peers, and DHT metrics.\n\nIt connects to the RetroShare web interface to gather metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to connect and detect a RetroShare web interface through http://localhost:9090, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### RetroShare web interface\n\nRetroShare needs to be configured to enable the RetroShare WEB Interface and allow access from the Netdata host.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/retroshare.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/retroshare.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | The URL to the RetroShare Web UI. | http://localhost:9090 | no |\n\n{% /details %}\n#### Examples\n\n##### Local RetroShare Web UI\n\nA basic configuration for a RetroShare server running on localhost.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local retroshare'\n url: 'http://localhost:9090'\n\n```\n{% /details %}\n##### Remote RetroShare Web UI\n\nA basic configuration for a remote RetroShare server.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nremote:\n name: 'remote retroshare'\n url: 'http://1.2.3.4:9090'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `retroshare` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin retroshare debug trace\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ retroshare_dht_working ](https://github.com/netdata/netdata/blob/master/src/health/health.d/retroshare.conf) | retroshare.dht | number of DHT peers |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RetroShare instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| retroshare.bandwidth | Upload, Download | kilobits/s |\n| retroshare.peers | All friends, Connected friends | peers |\n| retroshare.dht | DHT nodes estimated, RS nodes estimated | peers |\n\n",integration_type:"collector",id:"python.d.plugin-retroshare-RetroShare",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/retroshare/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"riakkv",monitored_instance:{name:"RiakKV",link:"https://riak.com/products/riak-kv/index.html",categories:["data-collection.database-servers"],icon_filename:"riak.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["database","nosql","big data"],most_popular:!1},overview:"# RiakKV\n\nPlugin: python.d.plugin\nModule: riakkv\n\n## Overview\n\nThis collector monitors RiakKV metrics about throughput, latency, resources and more.'\n\n\nThis collector reads the database stats from the `/stats` endpoint.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the /stats endpoint is accessible, RiakKV instances on the local host running on port 8098 will be autodetected.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Configure RiakKV to enable /stats endpoint\n\nYou can follow the RiakKV configuration reference documentation for how to enable this.\n\nSource : https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/riakkv.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/riakkv.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| url | The url of the server | no | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic (default)\n\nA basic example configuration per job\n\n```yaml\nlocal:\nurl: 'http://localhost:8098/stats'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocal:\n url: 'http://localhost:8098/stats'\n\nremote:\n url: 'http://192.0.2.1:8098/stats'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `riakkv` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin riakkv debug trace\n ```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ riakkv_1h_kv_get_mean_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to client over the last hour |\n| [ riakkv_kv_get_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |\n| [ riakkv_1h_kv_put_mean_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last hour |\n| [ riakkv_kv_put_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |\n| [ riakkv_vm_high_process_count ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.vm | number of processes running in the Erlang VM |\n| [ riakkv_list_keys_active ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.core.fsm_active | number of currently running list keys finite state machines |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RiakKV instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| riak.kv.throughput | gets, puts | operations/s |\n| riak.dt.vnode_updates | counters, sets, maps | operations/s |\n| riak.search | queries | queries/s |\n| riak.search.documents | indexed | documents/s |\n| riak.consistent.operations | gets, puts | operations/s |\n| riak.kv.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.kv.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms |\n| riak.search.latency.query | median, min, 95, 99, 999, max | ms |\n| riak.search.latency.index | median, min, 95, 99, 999, max | ms |\n| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.vm | processes | total |\n| riak.vm.memory.processes | allocated, used | MB |\n| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings |\n| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB |\n| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages |\n| riak.search.index | errors | errors |\n| riak.core.protobuf_connections | active | connections |\n| riak.core.repairs | read | repairs |\n| riak.core.fsm_active | get, put, secondary index, list keys | fsms |\n| riak.core.fsm_rejected | get, put | fsms |\n| riak.search.index | bad_entry, extract_fail | writes |\n\n",integration_type:"collector",id:"python.d.plugin-riakkv-RiakKV",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/riakkv/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"samba",monitored_instance:{name:"Samba",link:"https://www.samba.org/samba/",categories:["data-collection.storage-mount-points-and-filesystems"],icon_filename:"samba.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["samba","file sharing"],most_popular:!1},overview:"# Samba\n\nPlugin: python.d.plugin\nModule: samba\n\n## Overview\n\nThis collector monitors the performance metrics of Samba file sharing.\n\nIt is using the `smbstatus` command-line tool.\n\nExecuted commands:\n\n- `sudo -n smbstatus -P`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, the `smbstatus -P` binary is executed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Enable the samba collector\n\nThe `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\nChange the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.\n\n\n#### Permissions and programs\n\nTo run the collector you need:\n\n- `smbstatus` program\n- `sudo` program\n- `smbd` must be compiled with profiling enabled\n- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`\n\nThe module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n- add to your `/etc/sudoers` file:\n\n `which smbstatus` shows the full path to the binary.\n\n ```bash\n netdata ALL=(root) NOPASSWD: /path/to/smbstatus\n ```\n\n- Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)\n\n The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.\n\n\n As the `root` user, do the following:\n\n ```cmd\n mkdir /etc/systemd/system/netdata.service.d\n echo -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\n systemctl daemon-reload\n systemctl restart netdata.service\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/samba.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/samba.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nmy_job_name:\n name: my_name\n update_every: 1\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin samba debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Samba instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| syscall.rw | sendfile, recvfile | KiB/s |\n| smb2.rw | readout, writein, readin, writeout | KiB/s |\n| smb2.create_close | create, close | operations/s |\n| smb2.get_set_info | getinfo, setinfo | operations/s |\n| smb2.find | find | operations/s |\n| smb2.notify | notify | operations/s |\n| smb2.sm_counters | tcon, negprot, tdis, cancel, logoff, flush, lock, keepalive, break, sessetup | count |\n\n",integration_type:"collector",id:"python.d.plugin-samba-Samba",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/samba/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"spigotmc",monitored_instance:{name:"SpigotMC",link:"",categories:["data-collection.gaming"],icon_filename:"spigot.jfif"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["minecraft server","spigotmc server","spigot"],most_popular:!1},overview:"# SpigotMC\n\nPlugin: python.d.plugin\nModule: spigotmc\n\n## Overview\n\nThis collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.\n\n\nIt sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Enable the Remote Console Protocol\n\nUnder your SpigotMC server\'s `server.properties` configuration file, you should set `enable-rcon` to `true`.\n\nThis will allow the Server to listen and respond to queries over the rcon protocol.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/spigotmc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/spigotmc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | The host\'s IP to connect to. | localhost | yes |\n| port | The port the remote console is listening on. | 25575 | yes |\n| password | Remote console password if any. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocal:\n name: local_server\n url: 127.0.0.1\n port: 25575\n\n```\n##### Basic Authentication\n\nAn example using basic password for authentication with the remote console.\n\n{% details open=true summary="Config" %}\n```yaml\nlocal:\n name: local_server_pass\n url: 127.0.0.1\n port: 25575\n password: \'foobar\'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\nlocal_server:\n name : my_local_server\n url : 127.0.0.1\n port: 25575\n\nremote_server:\n name : another_remote_server\n url : 192.0.2.1\n port: 25575\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin spigotmc debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SpigotMC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| spigotmc.tps | 1 Minute Average, 5 Minute Average, 15 Minute Average | ticks |\n| spigotmc.users | Users | users |\n| spigotmc.mem | used, allocated, max | MiB |\n\n",integration_type:"collector",id:"python.d.plugin-spigotmc-SpigotMC",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/spigotmc/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"squid",monitored_instance:{name:"Squid",link:"http://www.squid-cache.org/",categories:["data-collection.web-servers-and-web-proxies"],icon_filename:"squid.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["squid","web delivery","squid caching proxy"],most_popular:!1},overview:"# Squid\n\nPlugin: python.d.plugin\nModule: squid\n\n## Overview\n\nThis collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.\n\n\nIt collects metrics from the endpoint where Squid exposes its `counters` data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will try to autodetect where Squid presents its `counters` data, by trying various configurations.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Configure Squid's Cache Manager\n\nTake a look at [Squid's official documentation](https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager) on how to configure access to the Cache Manager.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/squid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/squid.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| host | The host to connect to. | | yes |\n| port | The port to connect to. | | yes |\n| request | The URL to request from Squid. | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nexample_job_name:\n name: 'local'\n host: 'localhost'\n port: 3128\n request: 'cache_object://localhost:3128/counters'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocal_job:\n name: 'local'\n host: '127.0.0.1'\n port: 3128\n request: 'cache_object://127.0.0.1:3128/counters'\n\nremote_job:\n name: 'remote'\n host: '192.0.2.1'\n port: 3128\n request: 'cache_object://192.0.2.1:3128/counters'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `squid` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin squid debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid instance\n\nThese metrics refer to each monitored Squid instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squid.clients_net | in, out, hits | kilobits/s |\n| squid.clients_requests | requests, hits, errors | requests/s |\n| squid.servers_net | in, out | kilobits/s |\n| squid.servers_requests | requests, errors | requests/s |\n\n",integration_type:"collector",id:"python.d.plugin-squid-Squid",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/squid/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"tomcat",monitored_instance:{name:"Tomcat",link:"https://tomcat.apache.org/",categories:["data-collection.web-servers-and-web-proxies"],icon_filename:"tomcat.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["apache","tomcat","webserver","websocket","jakarta","javaEE"],most_popular:!1},overview:"# Tomcat\n\nPlugin: python.d.plugin\nModule: tomcat\n\n## Overview\n\nThis collector monitors Tomcat metrics about bandwidth, processing time, threads and more.\n\n\nIt parses the information provided by the http endpoint of the `/manager/status` in XML format\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nYou need to provide the username and the password, to access the webserver's status page. Create a seperate user with read only rights for this particular endpoint\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the Netdata Agent and the Tomcat webserver are in the same host, without configuration, module attempts to connect to http://localhost:8080/manager/status?XML=true, without any credentials. So it will probably fail.\n\n#### Limits\n\nThis module is not supporting SSL communication. If you want a Netdata Agent to monitor a Tomcat deployment, you shouldnt try to monitor it via public network (public internet). Credentials are passed by Netdata in an unsecure port\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Create a read-only `netdata` user, to monitor the `/status` endpoint.\n\nThis is necessary for configuring the collector.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/tomcat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/tomcat.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options per job\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| url | The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true. | no | yes |\n| user | A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected | no | no |\n| pass | A valid password for the user in question. Required if the endpoint is password protected | no | no |\n| connector_name | The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009 | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration\n\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:8080/manager/status?XML=true'\n\n```\n##### Using an IPv4 endpoint\n\nA typical configuration using an IPv4 endpoint\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocal_ipv4:\n name : 'local'\n url : 'http://127.0.0.1:8080/manager/status?XML=true'\n\n```\n{% /details %}\n##### Using an IPv6 endpoint\n\nA typical configuration using an IPv6 endpoint\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocal_ipv6:\n name : 'local'\n url : 'http://[::1]:8080/manager/status?XML=true'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tomcat` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin tomcat debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tomcat instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.accesses | accesses, errors | requests/s |\n| tomcat.bandwidth | sent, received | KiB/s |\n| tomcat.processing_time | processing time | seconds |\n| tomcat.threads | current, busy | current threads |\n| tomcat.jvm | free, eden, survivor, tenured, code cache, compressed, metaspace | MiB |\n| tomcat.jvm_eden | used, committed, max | MiB |\n| tomcat.jvm_survivor | used, committed, max | MiB |\n| tomcat.jvm_tenured | used, committed, max | MiB |\n\n",integration_type:"collector",id:"python.d.plugin-tomcat-Tomcat",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/tomcat/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"tor",monitored_instance:{name:"Tor",link:"https://www.torproject.org/",categories:["data-collection.vpns"],icon_filename:"tor.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["tor","traffic","vpn"],most_popular:!1},overview:"# Tor\n\nPlugin: python.d.plugin\nModule: tor\n\n## Overview\n\nThis collector monitors Tor bandwidth traffic .\n\nIt connects to the Tor control port to collect traffic statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is provided the collector will try to connect to 127.0.0.1:9051 to detect a running tor instance.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Required python module\n\nThe `stem` python library needs to be installed.\n\n\n#### Required Tor configuration\n\nAdd to /etc/tor/torrc:\n\nControlPort 9051\n\nFor more options please read the manual.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/tor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/tor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| control_addr | Tor control IP address | 127.0.0.1 | no |\n| control_port | Tor control port. Can be either a tcp port, or a path to a socket file. | 9051 | no |\n| password | Tor control password | | no |\n\n{% /details %}\n#### Examples\n\n##### Local TCP\n\nA basic TCP configuration. `local_addr` is ommited and will default to `127.0.0.1`\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocal_tcp:\n name: 'local'\n control_port: 9051\n password: # if required\n\n```\n{% /details %}\n##### Local socket\n\nA basic local socket configuration\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocal_socket:\n name: 'local'\n control_port: '/var/run/tor/control'\n password: # if required\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin tor debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tor.traffic | read, write | KiB/s |\n\n",integration_type:"collector",id:"python.d.plugin-tor-Tor",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/tor/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"uwsgi",monitored_instance:{name:"uWSGI",link:"https://github.com/unbit/uwsgi/tree/2.0.21",categories:["data-collection.web-servers-and-web-proxies"],icon_filename:"uwsgi.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["application server","python","web applications"],most_popular:!1},overview:"# uWSGI\n\nPlugin: python.d.plugin\nModule: uwsgi\n\n## Overview\n\nThis collector monitors uWSGI metrics about requests, workers, memory and more.\n\nIt collects every metric exposed from the stats server of uWSGI, either from the `stats.socket` or from the web server's TCP/IP socket.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector will auto-detect uWSGI instances deployed on the local host, running on port 1717, or exposing stats on socket `tmp/stats.socket`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Enable the uWSGI Stats server\n\nMake sure that you uWSGI exposes it's metrics via a Stats server.\n\nSource: https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/uwsgi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/uwsgi.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |\n| socket | The 'path/to/uwsgistats.sock' | no | no |\n| host | The host to connect to | no | no |\n| port | The port to connect to | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic (default out-of-the-box)\n\nA basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. As all JOBs have the same name, only one can run at a time.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nsocket:\n name : 'local'\n socket : '/tmp/stats.socket'\n\nlocalhost:\n name : 'local'\n host : 'localhost'\n port : 1717\n\nlocalipv4:\n name : 'local'\n host : '127.0.0.1'\n port : 1717\n\nlocalipv6:\n name : 'local'\n host : '::1'\n port : 1717\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\nlocal:\n name : 'local'\n host : 'localhost'\n port : 1717\n\nremote:\n name : 'remote'\n host : '192.0.2.1'\n port : 1717\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `uwsgi` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin uwsgi debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uWSGI instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| uwsgi.requests | a dimension per worker | requests/s |\n| uwsgi.tx | a dimension per worker | KiB/s |\n| uwsgi.avg_rt | a dimension per worker | milliseconds |\n| uwsgi.memory_rss | a dimension per worker | MiB |\n| uwsgi.memory_vsz | a dimension per worker | MiB |\n| uwsgi.exceptions | exceptions | exceptions |\n| uwsgi.harakiris | harakiris | harakiris |\n| uwsgi.respawns | respawns | respawns |\n\n",integration_type:"collector",id:"python.d.plugin-uwsgi-uWSGI",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/uwsgi/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"varnish",monitored_instance:{name:"Varnish",link:"https://varnish-cache.org/",categories:["data-collection.web-servers-and-web-proxies"],icon_filename:"varnish.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["varnish","varnishstat","varnishd","cache","web server","web cache"],most_popular:!1},overview:"# Varnish\n\nPlugin: python.d.plugin\nModule: varnish\n\n## Overview\n\nThis collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.\n\nNote that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.\n\n\nIt uses the `varnishstat` tool in order to collect the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`netdata` user must be a member of the `varnish` group.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Provide the necessary permissions\n\nIn order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:\n\n```\nusermod -aG varnish netdata\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/varnish.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/varnish.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njob_name:\n instance_name: ''\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin varnish debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Varnish instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.session_connection | accepted, dropped | connections/s |\n| varnish.client_requests | received | requests/s |\n| varnish.all_time_hit_rate | hit, miss, hitpass | percentage |\n| varnish.current_poll_hit_rate | hit, miss, hitpass | percentage |\n| varnish.cached_objects_expired | objects | expired/s |\n| varnish.cached_objects_nuked | objects | nuked/s |\n| varnish.threads_total | None | number |\n| varnish.threads_statistics | created, failed, limited | threads/s |\n| varnish.threads_queue_len | in queue | requests |\n| varnish.backend_connections | successful, unhealthy, reused, closed, recycled, failed | connections/s |\n| varnish.backend_requests | sent | requests/s |\n| varnish.esi_statistics | errors, warnings | problems/s |\n| varnish.memory_usage | free, allocated | MiB |\n| varnish.uptime | uptime | seconds |\n\n### Per Backend\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.backend | header, body | kilobits/s |\n\n### Per Storage\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.storage_usage | free, allocated | KiB |\n| varnish.storage_alloc_objs | allocated | objects |\n\n",integration_type:"collector",id:"python.d.plugin-varnish-Varnish",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/varnish/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"w1sensor",monitored_instance:{name:"1-Wire Sensors",link:"https://www.analog.com/en/product-category/1wire-temperature-sensors.html",categories:["data-collection.hardware-devices-and-sensors"],icon_filename:"1-wire.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["temperature","sensor","1-wire"],most_popular:!1},overview:"# 1-Wire Sensors\n\nPlugin: python.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/w1sensor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |\n\n{% /details %}\n#### Examples\n\n##### Provide human readable names\n\nAssociate two 1-Wire identifiers with human readable names.\n\n```yaml\nsensors:\n name_00000022276e: 'Machine room'\n name_00000022298f: 'Rack 12'\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin w1sensor debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per 1-Wire Sensors instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temp | a dimension per sensor | Celsius |\n\n",integration_type:"collector",id:"python.d.plugin-w1sensor-1-Wire_Sensors",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/w1sensor/metadata.yaml",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"zscores",monitored_instance:{name:"python.d zscores",link:"https://en.wikipedia.org/wiki/Standard_score",categories:["data-collection.other"],icon_filename:""},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["zscore","z-score","standard score","standard deviation","anomaly detection","statistical anomaly detection"],most_popular:!1},overview:"# python.d zscores\n\nPlugin: python.d.plugin\nModule: zscores\n\n## Overview\n\nBy using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.\n\n\nThis collector uses the [Netdata rest api](/src/web/api/README.md) to get the `mean` and `stddev`\nfor each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).\n\nFor each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over\ntime (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector will only work with Python 3 and requires the below packages be installed.\n\n```bash\n# become netdata user\nsudo su -s /bin/bash netdata\n# install required packages\npip3 install numpy pandas requests netdata-pandas==0.0.38\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/zscores.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/zscores.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| charts_regex | what charts to pull data for - A regex like `system\\..*/` or `system\\..*/apps.cpu/apps.mem` etc. | system\\..* | yes |\n| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes |\n| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes |\n| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes |\n| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes |\n| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes |\n| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes |\n| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes |\n| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes |\n| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\nlocal:\n name: 'local'\n host: '127.0.0.1:19999'\n charts_regex: 'system\\..*'\n charts_to_exclude: 'system.uptime'\n train_secs: 14400\n offset_secs: 300\n train_every_n: 900\n z_smooth_n: 15\n z_clip: 10\n z_abs: 'true'\n burn_in: 2\n mode: 'per_chart'\n per_chart_agg: 'mean'\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin zscores debug trace\n ```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per python.d zscores instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zscores.z | a dimension per chart or dimension | z |\n| zscores.3stddev | a dimension per chart or dimension | count |\n\n",integration_type:"collector",id:"python.d.plugin-zscores-python.d_zscores",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/zscores/metadata.yaml",related_resources:""},{meta:{plugin_name:"slabinfo.plugin",module_name:"slabinfo.plugin",monitored_instance:{name:"Linux kernel SLAB allocator statistics",link:"https://kernel.org/",categories:["data-collection.linux-systems.kernel-metrics"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["linux kernel","slab","slub","slob","slabinfo"],most_popular:!1},overview:"# Linux kernel SLAB allocator statistics\n\nPlugin: slabinfo.plugin\nModule: slabinfo.plugin\n\n## Overview\n\nCollects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads in the kernel.\n\n\nThe plugin parses `/proc/slabinfo`\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions sVko that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nDue to the large number of metrics generated by this integration, it is disabled by default and must be manually enabled inside `/etc/netdata/netdata.conf`\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nIf you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugins]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="The main configuration file." %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Enable plugin | As described above plugin is disabled by default, this option is used to enable plugin. | no | yes |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nSLAB cache utilization metrics for the whole system.\n\n### Per Linux kernel SLAB allocator statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.slabmemory | a dimension per cache | B |\n| mem.slabfilling | a dimension per cache | % |\n| mem.slabwaste | a dimension per cache | B |\n\n",integration_type:"collector",id:"slabinfo.plugin-slabinfo.plugin-Linux_kernel_SLAB_allocator_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/slabinfo.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"tc.plugin",module_name:"tc.plugin",monitored_instance:{name:"tc QoS classes",link:"https://wiki.linuxfoundation.org/networking/iproute2",categories:["data-collection.linux-systems.network-metrics"],icon_filename:"netdata.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# tc QoS classes\n\nPlugin: tc.plugin\nModule: tc.plugin\n\n## Overview\n\nExamine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow.\n\nThe plugin uses `tc` command to collect information about Traffic control.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Create `tc-qos-helper.conf`\n\nIn order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:\n\n```conf\ntc_show="class"\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:tc]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config option" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| script to run to get tc values | Path to script `tc-qos-helper.sh` | usr/libexec/netdata/plugins.d/tc-qos-helper.s | no |\n| enable show all classes and qdiscs for all interfaces | yes/no flag to control what data is presented. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration using classes defined in `/etc/iproute2/tc_cls`.\n\nAn example of class IDs mapped to names in that file can be:\n\n```conf\n2:1 Standard\n2:8 LowPriorityData\n2:10 HighThroughputData\n2:16 OAM\n2:18 LowLatencyData\n2:24 BroadcastVideo\n2:26 MultimediaStreaming\n2:32 RealTimeInteractive\n2:34 MultimediaConferencing\n2:40 Signalling\n2:46 Telephony\n2:48 NetworkControl\n```\n\nYou can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).\n\n\n```yaml\n[plugin:tc]\n script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh\n enable show all classes and qdiscs for all interfaces = yes\n\n```\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per network device direction\n\nMetrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The network interface. |\n| device_name | The network interface name |\n| group | The device family |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tc.qos | a dimension per class | kilobits/s |\n| tc.qos_packets | a dimension per class | packets/s |\n| tc.qos_dropped | a dimension per class | packets/s |\n| tc.qos_tokens | a dimension per class | tokens |\n| tc.qos_ctokens | a dimension per class | ctokens |\n\n",integration_type:"collector",id:"tc.plugin-tc.plugin-tc_QoS_classes",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/tc.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"timex.plugin",module_name:"timex.plugin",monitored_instance:{name:"Timex",link:"",categories:["data-collection.system-clock-and-ntp"],icon_filename:"syslog.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# Timex\n\nPlugin: timex.plugin\nModule: timex.plugin\n\n## Overview\n\nExamine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping.\n\nIt uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:timex]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nAt least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run.\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| clock synchronization state | Make chart showing system clock synchronization state. | yes | yes |\n| time offset | Make chart showing computed time offset between local system and reference clock | yes | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:timex]\n update every = 1\n clock synchronization state = yes\n time offset = yes\n\n```\n{% /details %}\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ system_clock_sync_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/timex.conf) | system.clock_sync_state | when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Timex instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.clock_sync_state | state | state |\n| system.clock_status | unsync, clockerr | status |\n| system.clock_sync_offset | offset | milliseconds |\n\n",integration_type:"collector",id:"timex.plugin-timex.plugin-Timex",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/timex.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"xenstat.plugin",module_name:"xenstat.plugin",monitored_instance:{name:"Xen XCP-ng",link:"https://xenproject.org/",categories:["data-collection.containers-and-vms"],icon_filename:"xen.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[],most_popular:!1},overview:"# Xen XCP-ng\n\nPlugin: xenstat.plugin\nModule: xenstat.plugin\n\n## Overview\n\nThis collector monitors XenServer and XCP-ng host and domains statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin requires the `xen-dom0-libs-devel` and `yajl-devel` libraries to be installed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n### Prerequisites\n\n#### Libraries\n\n1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.\n\n Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`\n\n2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:xenstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Xen XCP-ng instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xenstat.mem | free, used | MiB |\n| xenstat.domains | domains | domains |\n| xenstat.cpus | cpus | cpus |\n| xenstat.cpu_freq | frequency | MHz |\n\n### Per xendomain\n\nMetrics related to Xen domains. Each domain provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.states | running, blocked, paused, shutdown, crashed, dying | boolean |\n| xendomain.cpu | used | percentage |\n| xendomain.mem | maximum, current | MiB |\n| xendomain.vcpu | a dimension per vcpu | percentage |\n\n### Per xendomain vbd\n\nMetrics related to Xen domain Virtual Block Device. Each VBD provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.oo_req_vbd | requests | requests/s |\n| xendomain.requests_vbd | read, write | requests/s |\n| xendomain.sectors_vbd | read, write | sectors/s |\n\n### Per xendomain network\n\nMetrics related to Xen domain network interfaces. Each network interface provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.bytes_network | received, sent | kilobits/s |\n| xendomain.packets_network | received, sent | packets/s |\n| xendomain.errors_network | received, sent | errors/s |\n| xendomain.drops_network | received, sent | drops/s |\n\n",integration_type:"collector",id:"xenstat.plugin-xenstat.plugin-Xen_XCP-ng",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/xenstat.plugin/metadata.yaml",related_resources:""},{id:"deploy-alpinelinux",meta:{name:"Alpine Linux",link:"https://www.alpinelinux.org/",categories:["deploy.operating-systems"],icon_filename:"alpine.svg"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-amazonlinux",meta:{name:"Amazon Linux",link:"https://aws.amazon.com/amazon-linux-2/",categories:["deploy.operating-systems"],icon_filename:"amazonlinux.png"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 2 | Core | x86_64, aarch64 | |\n| 2023 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-archlinux",meta:{name:"Arch Linux",link:"https://archlinux.org/",categories:["deploy.operating-systems"],icon_filename:"archlinux.png"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-centos",meta:{name:"CentOS",link:"https://www.centos.org/",categories:["deploy.operating-systems"],icon_filename:"centos.svg"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 7 | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-centos-stream",meta:{name:"CentOS Stream",link:"https://www.centos.org/centos-stream",categories:["deploy.operating-systems"],icon_filename:"centos.svg"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Community | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-debian",meta:{name:"Debian",link:"https://www.debian.org/",categories:["deploy.operating-systems"],icon_filename:"debian.svg"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 12 | Core | i386, amd64, armhf, arm64 | |\n| 11 | Core | i386, amd64, armhf, arm64 | |\n| 10 | Core | i386, amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-docker",meta:{name:"Docker",link:"https://www.docker.com/",categories:["deploy.docker-kubernetes"],icon_filename:"docker.svg"},most_popular:!0,keywords:["docker","container","containers"],install_description:"Install and connect new Docker containers\nFind the commands for `docker run`, `docker compose` or `Docker Swarm`. On the last two you can copy the configs, then run `docker-compose up -d` in the same directory as the `docker-compose.yml`\n\n> Netdata container requires different privileges and mounts to provide functionality similar to that provided by Netdata installed on the host. More info [here](https://learn.netdata.cloud/docs/installing/docker?_gl=1*f2xcnf*_ga*MTI1MTUwMzU0OS4xNjg2NjM1MDA1*_ga_J69Z2JCTFB*MTY5MDMxMDIyMS40MS4xLjE2OTAzMTAzNjkuNTguMC4w#create-a-new-netdata-agent-container)\n> Netdata will use the hostname from the container in which it is run instead of that of the host system. To change the default hostname check [here](https://learn.netdata.cloud/docs/agent/packaging/docker?_gl=1*i5weve*_ga*MTI1MTUwMzU0OS4xNjg2NjM1MDA1*_ga_J69Z2JCTFB*MTY5MDMxMjM4Ny40Mi4xLjE2OTAzMTIzOTAuNTcuMC4w#change-the-default-hostname)\n",methods:[{method:"Docker CLI",commands:[{channel:"nightly",command:"docker run -d --name=netdata \\\n--pid=host \\\n--network=host \\\n-v netdataconfig:/etc/netdata \\\n-v netdatalib:/var/lib/netdata \\\n-v netdatacache:/var/cache/netdata \\\n-v /etc/passwd:/host/etc/passwd:ro \\\n-v /etc/group:/host/etc/group:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-v /proc:/host/proc:ro \\\n-v /sys:/host/sys:ro \\\n-v /etc/os-release:/host/etc/os-release:ro \\\n-v /var/log:/host/var/log:ro \\\n-v /var/run/docker.sock:/var/run/docker.sock:ro \\\n--restart unless-stopped \\\n--cap-add SYS_PTRACE \\\n--cap-add SYS_ADMIN \\\n--security-opt apparmor=unconfined \\\n{% if $showClaimingOptions %}\n-e NETDATA_CLAIM_TOKEN={% claim_token %} \\\n-e NETDATA_CLAIM_URL={% claim_url %} \\\n-e NETDATA_CLAIM_ROOMS={% $claim_rooms %} \\\n{% /if %}\nnetdata/netdata:edge\n"},{channel:"stable",command:"docker run -d --name=netdata \\\n--pid=host \\\n--network=host \\\n-v netdataconfig:/etc/netdata \\\n-v netdatalib:/var/lib/netdata \\\n-v netdatacache:/var/cache/netdata \\\n-v /etc/passwd:/host/etc/passwd:ro \\\n-v /etc/group:/host/etc/group:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-v /proc:/host/proc:ro \\\n-v /sys:/host/sys:ro \\\n-v /etc/os-release:/host/etc/os-release:ro \\\n-v /var/log:/host/var/log:ro \\\n-v /var/run/docker.sock:/var/run/docker.sock:ro \\\n--restart unless-stopped \\\n--cap-add SYS_PTRACE \\\n--cap-add SYS_ADMIN \\\n--security-opt apparmor=unconfined \\\n{% if $showClaimingOptions %}\n-e NETDATA_CLAIM_TOKEN={% claim_token %} \\\n-e NETDATA_CLAIM_URL={% claim_url %} \\\n-e NETDATA_CLAIM_ROOMS={% $claim_rooms %} \\\n{% /if %}\nnetdata/netdata:stable\n"}]},{method:"Docker Compose",commands:[{channel:"nightly",command:"version: '3'\nservices:\n netdata:\n image: netdata/netdata:edge\n container_name: netdata\n pid: host\n network_mode: host\n restart: unless-stopped\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n{% if $showClaimingOptions %}\n environment:\n - NETDATA_CLAIM_TOKEN={% claim_token %}\n - NETDATA_CLAIM_URL={% claim_url %}\n - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"},{channel:"stable",command:"version: '3'\nservices:\n netdata:\n image: netdata/netdata:stable\n container_name: netdata\n pid: host\n network_mode: host\n restart: unless-stopped\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n{% if $showClaimingOptions %}\n environment:\n - NETDATA_CLAIM_TOKEN={% claim_token %}\n - NETDATA_CLAIM_URL={% claim_url %}\n - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}]},{method:"Docker Swarm",commands:[{channel:"nightly",command:"version: '3'\nservices:\n netdata:\n image: netdata/netdata:edge\n pid: host\n network_mode: host\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /etc/hostname:/etc/hostname:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n{% if $showClaimingOptions %}\n environment:\n - NETDATA_CLAIM_TOKEN={% claim_token %}\n - NETDATA_CLAIM_URL={% claim_url %}\n - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\n deploy:\n mode: global\n restart_policy:\n condition: on-failure\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"},{channel:"stable",command:"version: '3'\nservices:\n netdata:\n image: netdata/netdata:stable\n pid: host\n network_mode: host\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /etc/hostname:/etc/hostname:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n{% if $showClaimingOptions %}\n environment:\n - NETDATA_CLAIM_TOKEN={% claim_token %}\n - NETDATA_CLAIM_URL={% claim_url %}\n - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\n deploy:\n mode: global\n restart_policy:\n condition: on-failure\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}]}],additional_info:"",related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 19.03 or newer | Core | linux/i386, linux/amd64, linux/arm/v7, linux/arm64, linux/ppc64le | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:3,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-fedora",meta:{name:"Fedora",link:"https://www.fedoraproject.org/",categories:["deploy.operating-systems"],icon_filename:"fedora.png"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 40 | Core | x86_64, aarch64 | |\n| 39 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-freebsd",meta:{name:"FreeBSD",link:"https://www.freebsd.org/",categories:["deploy.operating-systems"],icon_filename:"freebsd.svg"},most_popular:!0,keywords:["freebsd"],install_description:"## Install dependencies\nPlease install the following packages using the command below:\n\n```pkg install bash e2fsprogs-libuuid git curl autoconf automake pkgconf pidof liblz4 libuv json-c cmake gmake```\nThis step needs root privileges. Please respond in the affirmative for any relevant prompts during the installation process.\n\nRun the following command on your node to install and claim Netdata:\n",methods:[{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"fetch",commands:[{channel:"nightly",command:"fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:"Netdata can also be installed via [FreeBSD ports](https://www.freshports.org/net-mgmt/netdata).\n",related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13-STABLE | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:6,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-kubernetes",meta:{name:"Kubernetes (Helm)",link:"",categories:["deploy.docker-kubernetes"],icon_filename:"kubernetes.svg"},keywords:["kubernetes","container","Orchestrator"],install_description:"**Use helm install to install Netdata on your Kubernetes cluster**\nFor a new installation use `helm install` or for existing clusters add the content below to your `override.yaml` and then run `helm upgrade -f override.yml netdata netdata/netdata`\n",methods:[{method:"Helm",commands:[{channel:"nightly",command:'helm install netdata netdata/netdata \\\n--set image.tag=edge{% if $showClaimingOptions %} \\\n--set parent.claiming.enabled="true" \\\n--set parent.claiming.token={% claim_token %} \\\n--set parent.claiming.rooms={% $claim_rooms %} \\\n--set child.claiming.enabled="true" \\\n--set child.claiming.token={% claim_token %} \\\n--set child.claiming.rooms={% $claim_rooms %}{% /if %}\n'},{channel:"stable",command:'helm install netdata netdata/netdata \\\n--set image.tag=stable{% if $showClaimingOptions %} \\\n--set parent.claiming.enabled="true" \\\n--set parent.claiming.token={% claim_token %} \\\n--set parent.claiming.rooms={% $claim_rooms %} \\\n--set child.claiming.enabled="true" \\\n--set child.claiming.token={% claim_token %} \\\n--set child.claiming.rooms={% $claim_rooms %}{% /if %}\n'}]},{method:"Existing Cluster",commands:[{channel:"nightly",command:"image:\n tag: edge\n\nrestarter:\n enabled: true\n{% if $showClaimingOptions %}\n\nparent:\n claiming:\n enabled: true\n token: {% claim_token %}\n rooms: {% $claim_rooms %}\n\nchild:\n claiming:\n enabled: true\n token: {% claim_token %}\n rooms: {% $claim_rooms %}\n{% /if %}\n"},{channel:"stable",command:"image:\n tag: stable\n\nrestarter:\n enabled: true\n{% if $showClaimingOptions %}\n\nparent:\n claiming:\n enabled: true\n token: {% claim_token %}\n rooms: {% $claim_rooms %}\n\nchild:\n claiming:\n enabled: true\n token: {% claim_token %}\n rooms: {% $claim_rooms %}\n{% /if %}\n"}]}],additional_info:"",related_resources:{},most_popular:!0,platform_info:"\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:4,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-linux-generic",meta:{name:"Linux",link:"",categories:["deploy.operating-systems"],icon_filename:"linux.svg"},keywords:["linux"],most_popular:!0,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-macos",meta:{name:"macOS",link:"",categories:["deploy.operating-systems"],icon_filename:"macos.svg"},most_popular:!0,keywords:["macOS","mac","apple"],install_description:"Run the following command on your Intel based OSX, macOS servers to install and claim Netdata:",methods:[{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13 | Community | | |\n| 12 | Community | | |\n| 11 | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:5,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-manjarolinux",meta:{name:"Manjaro Linux",link:"https://manjaro.org/",categories:["deploy.operating-systems"],icon_filename:"manjaro.svg"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-opensuse",meta:{name:"SUSE Linux",link:"https://www.suse.com/",categories:["deploy.operating-systems"],icon_filename:"openSUSE.svg"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 15.6 | Core | x86_64, aarch64 | |\n| 15.5 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-oraclelinux",meta:{name:"Oracle Linux",link:"https://www.oracle.com/linux/",categories:["deploy.operating-systems"],icon_filename:"oraclelinux.svg"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 8 | Core | x86_64, aarch64 | |\n| 9 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-rhel",meta:{name:"Red Hat Enterprise Linux",link:"https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux",categories:["deploy.operating-systems"],icon_filename:"rhel.png"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9.x | Core | x86_64, aarch64 | |\n| 8.x | Core | x86_64, aarch64 | |\n| 7.x | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-rockylinux",meta:{name:"Rocky Linux",link:"https://rockylinux.org/",categories:["deploy.operating-systems"],icon_filename:"rocky.svg"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Core | x86_64, aarch64 | |\n| 8 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-ubuntu",meta:{name:"Ubuntu",link:"https://ubuntu.com/",categories:["deploy.operating-systems"],icon_filename:"ubuntu.svg"},keywords:["linux"],most_popular:!1,install_description:"Run the following command on your node to install and claim Netdata:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 24.04 | Core | amd64, armhf, arm64 | |\n| 23.10 | Core | amd64, armhf, arm64 | |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-windows",meta:{name:"Windows",link:"https://www.microsoft.com/en-us/windows",categories:["deploy.operating-systems"],icon_filename:"windows.svg"},keywords:["windows"],install_description:"1. Install [Windows Exporter](https://github.com/prometheus-community/windows_exporter) on every Windows host you want to monitor.\n2. Install Netdata agent on Linux, FreeBSD or Mac.\n3. Configure Netdata to collect data remotely from your Windows hosts by adding one job per host to windows.conf file. See the [configuration section](https://learn.netdata.cloud/docs/data-collection/monitor-anything/System%20Metrics/Windows-machines#configuration) for details.\n4. Enable [virtual nodes](https://learn.netdata.cloud/docs/data-collection/windows-systems#virtual-nodes) configuration so the windows nodes are displayed as separate nodes.\n",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:"",related_resources:{},most_popular:!0,platform_info:"\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.",quick_start:2,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"export-appoptics",meta:{name:"AppOptics",link:"https://www.solarwinds.com/appoptics",categories:["export"],icon_filename:"solarwinds.svg",keywords:["app optics","AppOptics","Solarwinds"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# AppOptics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-aws-kinesis",meta:{name:"AWS Kinesis",link:"https://aws.amazon.com/kinesis/",categories:["export"],icon_filename:"aws-kinesis.svg"},keywords:["exporter","AWS","Kinesis"],overview:"# AWS Kinesis\n\nExport metrics to AWS Kinesis Data Streams\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++\n- Here are the instructions when building from source, to ensure 3rd party dependencies are installed:\n ```bash\n git clone --recursive https://github.com/aws/aws-sdk-cpp.git\n cd aws-sdk-cpp/\n git submodule update --init --recursive\n mkdir BUILT\n cd BUILT\n cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis ..\n make\n make install\n ```\n- `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled.\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nNetdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly.\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n\n```\n##### Configuration with AWS credentials\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n # AWS credentials\n aws_access_key_id = your_access_key_id\n aws_secret_access_key = your_secret_access_key\n # destination stream\n stream name = your_stream_name\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/aws_kinesis/metadata.yaml",troubleshooting:""},{id:"export-azure-data",meta:{name:"Azure Data Explorer",link:"https://azure.microsoft.com/en-us/pricing/details/data-explorer/",categories:["export"],icon_filename:"azuredataex.jpg",keywords:["Azure Data Explorer","Azure"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Azure Data Explorer\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-azure-event",meta:{name:"Azure Event Hub",link:"https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-about",categories:["export"],icon_filename:"azureeventhub.png",keywords:["Azure Event Hub","Azure"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Azure Event Hub\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-bigquery",meta:{name:"Google BigQuery",link:"https://cloud.google.com/bigquery/",categories:["export"],icon_filename:"bigquery.png",keywords:["export","Google BigQuery","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Google BigQuery\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-blueflood",meta:{name:"Blueflood",link:"http://blueflood.io/",categories:["export"],icon_filename:"blueflood.png",keywords:["export","Blueflood","graphite"]},keywords:["exporter","graphite","remote write","time series"],overview:"# Blueflood\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml",troubleshooting:""},{id:"export-chronix",meta:{name:"Chronix",link:"https://dbdb.io/db/chronix",categories:["export"],icon_filename:"chronix.png",keywords:["export","chronix","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Chronix\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-cortex",meta:{name:"Cortex",link:"https://cortexmetrics.io/",categories:["export"],icon_filename:"cortex.png",keywords:["export","cortex","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Cortex\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-crate",meta:{name:"CrateDB",link:"https://crate.io/",categories:["export"],icon_filename:"crate.svg",keywords:["export","CrateDB","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# CrateDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-elastic",meta:{name:"ElasticSearch",link:"https://www.elastic.co/",categories:["export"],icon_filename:"elasticsearch.svg",keywords:["export","ElasticSearch","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# ElasticSearch\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-gnocchi",meta:{name:"Gnocchi",link:"https://wiki.openstack.org/wiki/Gnocchi",categories:["export"],icon_filename:"gnocchi.svg",keywords:["export","Gnocchi","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Gnocchi\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-google-pubsub",meta:{name:"Google Cloud Pub Sub",link:"https://cloud.google.com/pubsub",categories:["export"],icon_filename:"pubsub.png"},keywords:["exporter","Google Cloud","Pub Sub"],overview:"# Google Cloud Pub Sub\n\nExport metrics to Google Cloud Pub/Sub Service\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://github.com/googleapis/google-cloud-cpp/) install Google Cloud Platform C++ Client Libraries\n- Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = pubsub.googleapis.com\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\n- Set the destination option to a Pub/Sub service endpoint. pubsub.googleapis.com is the default one.\n- Create the credentials JSON file by following Google Cloud\'s authentication guide.\n- The user running the Agent (typically netdata) needs read access to google_cloud_credentials.json, which you can set\n `chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`\n- Set the credentials file option to the full path of the file.\n\n\n```yaml\n[pubsub:my_instance]\n enabled = yes\n destination = pubsub.googleapis.com\n credentials file = /etc/netdata/google_cloud_credentials.json\n project id = my_project\n topic id = my_topic\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/pubsub/metadata.yaml",troubleshooting:""},{id:"export-graphite",meta:{name:"Graphite",link:"https://graphite.readthedocs.io/en/latest/",categories:["export"],icon_filename:"graphite.png"},keywords:["exporter","graphite","remote write","time series"],overview:"# Graphite\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml",troubleshooting:""},{id:"export-influxdb",meta:{name:"InfluxDB",link:"https://www.influxdata.com/",categories:["export"],icon_filename:"influxdb.svg",keywords:["InfluxDB","Influx","export","graphite"]},keywords:["exporter","graphite","remote write","time series"],overview:"# InfluxDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml",troubleshooting:""},{id:"export-irondb",meta:{name:"IRONdb",link:"https://docs.circonus.com/irondb/",categories:["export"],icon_filename:"irondb.png",keywords:["export","IRONdb","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# IRONdb\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-json",meta:{name:"JSON",link:"",categories:["export"],icon_filename:"json.svg"},keywords:["exporter","json"],overview:"# JSON\n\nUse the JSON connector for the exporting engine to archive your agent's metrics to JSON document databases for long-term storage,\nfurther analysis, or correlation with data from other sources\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = localhost:5448\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\n\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `json:https:my_json_instance`.\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/json/metadata.yaml",troubleshooting:""},{id:"export-kafka",meta:{name:"Kafka",link:"https://kafka.apache.org/",categories:["export"],icon_filename:"kafka.svg",keywords:["export","Kafka","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Kafka\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-kairosdb",meta:{name:"KairosDB",link:"https://kairosdb.github.io/",categories:["export"],icon_filename:"kairos.png",keywords:["KairosDB","kairos","export","graphite"]},keywords:["exporter","graphite","remote write","time series"],overview:"# KairosDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml",troubleshooting:""},{id:"export-m3db",meta:{name:"M3DB",link:"https://m3db.io/",categories:["export"],icon_filename:"m3db.png",keywords:["export","M3DB","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# M3DB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-metricfire",meta:{name:"MetricFire",link:"https://www.metricfire.com/",categories:["export"],icon_filename:"metricfire.png",keywords:["export","MetricFire","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# MetricFire\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-mongodb",meta:{name:"MongoDB",link:"https://www.mongodb.com/",categories:["export"],icon_filename:"mongodb.svg"},keywords:["exporter","MongoDB"],overview:"# MongoDB\n\nUse the MongoDB connector for the exporting engine to archive your agent's metrics to a MongoDB database\nfor long-term storage, further analysis, or correlation with data from other sources.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.\n- Next, re-install Netdata from the source, which detects that the required library is now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | localhost | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\nThe default socket timeout depends on the exporting connector update interval.\nThe timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.\n\n\n```yaml\n[mongodb:my_instance]\n enabled = yes\n destination = mongodb://\n database = your_database_name\n collection = your_collection_name\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/mongodb/metadata.yaml",troubleshooting:""},{id:"export-newrelic",meta:{name:"New Relic",link:"https://newrelic.com/",categories:["export"],icon_filename:"newrelic.svg",keywords:["export","NewRelic","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# New Relic\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-opentsdb",meta:{name:"OpenTSDB",link:"https://github.com/OpenTSDB/opentsdb",categories:["export"],icon_filename:"opentsdb.png"},keywords:["exporter","OpenTSDB","scalable time series"],overview:"# OpenTSDB\n\nUse the OpenTSDB connector for the exporting engine to archive your Netdata metrics to OpenTSDB databases for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- OpenTSDB and Netdata, installed, configured and operational.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used (opentsdb = 4242).\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Minimal configuration\n\nAdd `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol.\nFor example: `opentsdb:http:my_opentsdb_instance`, `opentsdb:https:my_opentsdb_instance`.\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n\n```\n##### HTTP authentication\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n username = my_username\n password = my_password\n\n```\n##### Using `send hosts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send hosts matching = localhost *\n\n```\n##### Using `send charts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send charts matching = *\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/opentsdb/metadata.yaml",troubleshooting:""},{id:"export-pgsql",meta:{name:"PostgreSQL",link:"https://www.postgresql.org/",categories:["export"],icon_filename:"postgres.svg",keywords:["export","PostgreSQL","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# PostgreSQL\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-prometheus-remote",meta:{name:"Prometheus Remote Write",link:"https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage",categories:["export"],icon_filename:"prometheus.svg"},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Prometheus Remote Write\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-quasar",meta:{name:"QuasarDB",link:"https://doc.quasar.ai/master/",categories:["export"],icon_filename:"quasar.jpeg",keywords:["export","quasar","quasarDB","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# QuasarDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-splunk",meta:{name:"Splunk SignalFx",link:"https://www.splunk.com/en_us/products/observability.html",categories:["export"],icon_filename:"splunk.svg",keywords:["export","splunk","signalfx","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Splunk SignalFx\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-thanos",meta:{name:"Thanos",link:"https://thanos.io/",categories:["export"],icon_filename:"thanos.png",keywords:["export","thanos","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Thanos\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-tikv",meta:{name:"TiKV",link:"https://tikv.org/",categories:["export"],icon_filename:"tikv.png",keywords:["export","TiKV","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# TiKV\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-timescaledb",meta:{name:"TimescaleDB",link:"https://www.timescale.com/",categories:["export"],icon_filename:"timescale.png",keywords:["export","TimescaleDB","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# TimescaleDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-victoria",meta:{name:"VictoriaMetrics",link:"https://victoriametrics.com/products/open-source/",categories:["export"],icon_filename:"victoriametrics.png",keywords:["export","victoriametrics","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# VictoriaMetrics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-vmware",meta:{name:"VMware Aria",link:"https://www.vmware.com/products/aria-operations-for-applications.html",categories:["export"],icon_filename:"aria.png",keywords:["export","VMware","Aria","Tanzu","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# VMware Aria\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-wavefront",meta:{name:"Wavefront",link:"https://docs.wavefront.com/wavefront_data_ingestion.html",categories:["export"],icon_filename:"wavefront.png",keywords:["export","Wavefront","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Wavefront\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"notify-alerta",meta:{name:"Alerta",link:"https://alerta.io/",categories:["notify.agent"],icon_filename:"alerta.png"},keywords:["Alerta"],overview:"# Alerta\n\nThe [Alerta](https://alerta.io/) monitoring system is a tool used to consolidate and de-duplicate alerts from multiple sources for quick \u2018at-a-glance\u2019 visualization. With just one system you can monitor alerts from many other monitoring tools on a single screen.\nYou can send Netdata alerts to Alerta to see alerts coming from many Netdata hosts or also from a multi-host Netdata configuration.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- A working Alerta instance\n- An Alerta API key (if authentication in Alerta is enabled)\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ALERTA | Set `SEND_ALERTA` to YES | | yes |\n| ALERTA_WEBHOOK_URL | set `ALERTA_WEBHOOK_URL` to the API url you defined when you installed the Alerta server. | | yes |\n| ALERTA_API_KEY | Set `ALERTA_API_KEY` to your API key. | | yes |\n| DEFAULT_RECIPIENT_ALERTA | Set `DEFAULT_RECIPIENT_ALERTA` to the default recipient environment you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n| DEFAULT_RECIPIENT_CUSTOM | Set different recipient environments per role, by editing `DEFAULT_RECIPIENT_CUSTOM` with the environment name of your choice | | no |\n\n##### ALERTA_API_KEY\n\nYou will need an API key to send messages from any source, if Alerta is configured to use authentication (recommended). To create a new API key:\n1. Go to Configuration > API Keys.\n2. Create a new API key called "netdata" with `write:alerts` permission.\n\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_alerta[sysadmin]="Systems"\nrole_recipients_alerta[domainadmin]="Domains"\nrole_recipients_alerta[dba]="Databases Systems"\nrole_recipients_alerta[webmaster]="Marketing Development"\nrole_recipients_alerta[proxyadmin]="Proxy"\nrole_recipients_alerta[sitemgr]="Sites"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# alerta (alerta.io) global notification options\n\nSEND_ALERTA="YES"\nALERTA_WEBHOOK_URL="http://yourserver/alerta/api"\nALERTA_API_KEY="INSERT_YOUR_API_KEY_HERE"\nDEFAULT_RECIPIENT_ALERTA="Production"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/alerta/metadata.yaml"},{id:"notify-awssns",meta:{name:"AWS SNS",link:"https://aws.amazon.com/sns/",categories:["notify.agent"],icon_filename:"aws.svg"},keywords:["AWS SNS"],overview:"# AWS SNS\n\nAs part of its AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' (SNS). Amazon SNS works similarly to Netdata's own notification system, allowing to dispatch a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to:\n- Email addresses\n- Mobile Phones via SMS\n- HTTP or HTTPS web hooks\n- AWS Lambda functions\n- AWS SQS queues\n- Mobile applications via push notifications\nYou can send notifications through Amazon SNS using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- While Amazon SNS supports sending differently formatted messages for different delivery methods, Netdata does not currently support this functionality.\n- For email notification support, we recommend using Netdata's email notifications, as it is has the following benefits:\n - In most cases, it requires less configuration.\n - Netdata's emails are nicely pre-formatted and support features like threading, which requires a lot of manual effort in SNS.\n - It is less resource intensive and more cost-efficient than SNS.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- The [Amazon Web Services CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (awscli).\n- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. The setup depends on the distribution, but `/var/lib/netdata` is the recommended directory. If you are using Netdata as a dedicated user, the permissions will already be correct.\n- An Amazon SNS topic to send notifications to with one or more subscribers. The Getting Started section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.\n- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. For an additional layer of security, you can create one for each system or group of systems.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| aws path | The full path of the aws command. If empty, the system `$PATH` will be searched for it. If not found, Amazon SNS notifications will be silently disabled. | | yes |\n| SEND_AWSNS | Set `SEND_AWSNS` to YES | YES | yes |\n| AWSSNS_MESSAGE_FORMAT | Set `AWSSNS_MESSAGE_FORMAT` to to the string that you want the alert to be sent into. | ${status} on ${host} at ${date}: ${chart} ${value_string} | yes |\n| DEFAULT_RECIPIENT_AWSSNS | Set `DEFAULT_RECIPIENT_AWSSNS` to the Topic ARN you noted down upon creating the Topic. | | yes |\n\n##### AWSSNS_MESSAGE_FORMAT\n\nThe supported variables are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like "name = value units" |\n| `${status_message}` | Like "needs attention", "recovered", "is critical" |\n| `${severity}` | Like "Escalated to CRITICAL", "Recovered from WARNING" |\n| `${raised_for}` | Like "(alarm was raised for 10 minutes)" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n##### DEFAULT_RECIPIENT_AWSSNS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_awssns[sysadmin]="arn:aws:sns:us-east-2:123456789012:Systems"\nrole_recipients_awssns[domainadmin]="arn:aws:sns:us-east-2:123456789012:Domains"\nrole_recipients_awssns[dba]="arn:aws:sns:us-east-2:123456789012:Databases"\nrole_recipients_awssns[webmaster]="arn:aws:sns:us-east-2:123456789012:Development"\nrole_recipients_awssns[proxyadmin]="arn:aws:sns:us-east-2:123456789012:Proxy"\nrole_recipients_awssns[sitemgr]="arn:aws:sns:us-east-2:123456789012:Sites"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\nAn example working configuration would be:\n\n```yaml\n```conf\n#------------------------------------------------------------------------------\n# Amazon SNS notifications\n\nSEND_AWSSNS="YES"\nAWSSNS_MESSAGE_FORMAT="${status} on ${host} at ${date}: ${chart} ${value_string}"\nDEFAULT_RECIPIENT_AWSSNS="arn:aws:sns:us-east-2:123456789012:MyTopic"\n```\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/awssns/metadata.yaml"},{id:"notify-cloud-awssns",meta:{name:"Amazon SNS",link:"https://aws.amazon.com/sns/",categories:["notify.cloud"],icon_filename:"awssns.png"},keywords:["awssns"],overview:"# Amazon SNS\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on AWS SNS.\n",setup:"## Setup\n\n### Prerequisites\n\nTo add AWS SNS notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- Have an AWS account with AWS SNS access, for more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **AwsSns** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For AWS SNS:\n - Topic ARN - topic provided on AWS SNS (with region) for where to publish your notifications. For more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Settings on AWS SNS\n\nTo enable the webhook integration on AWS SNS you need:\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n - Finally, click on **Create topic** on the bottom of the page\n3. Now, use the new **Topic ARN** while adding AWS SNS integration on your space.\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-discord",meta:{name:"Discord",link:"https://discord.com/",categories:["notify.cloud"],icon_filename:"discord.png"},keywords:["discord","community"],overview:"# Discord\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Discord.\n",setup:"## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- You need to have a Discord server able to receive webhooks integrations.\n\n### Discord Server Configuration\nSteps to configure your Discord server to receive [webhook notifications](https://support.discord.com/hc/en-us/articles/228383668) from Netdata:\n1. Go to `Server Settings` --\x3e `Integrations`\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Use Webhook URL to add your notification configuration on Netdata UI\n\n### Netdata Configuration Steps\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Discord** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Discord:\n - Define the type channel you want to send notifications to: **Text channel** or **Forum channel**\n - Webhook URL - URL provided on Discord for the channel you want to receive your notifications.\n - Thread name - if the Discord channel is a **Forum channel** you will need to provide the thread name as well\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-mattermost",meta:{name:"Mattermost",link:"https://mattermost.com/",categories:["notify.cloud"],icon_filename:"mattermost.png"},keywords:["mattermost"],overview:"# Mattermost\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Mattermost.\n",setup:"## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a Mattermost app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your Mattermost to receive notifications from Netdata:\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don\u2019t have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook. The description can be up to 500 characters\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook endpoint that looks like below:\n `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`\n\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your Mattermost instance.\n\nFor more details please check Mattermost's article [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Mattermost** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Mattermost:\n - Webhook URL - URL provided on Mattermost for the channel you want to receive your notifications\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-microsoftteams",meta:{name:"Microsoft Teams",link:"https://www.microsoft.com/en-us/microsoft-teams",categories:["notify.cloud"],icon_filename:"teams.svg"},keywords:["microsoft","teams"],overview:"# Microsoft Teams\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications to a Microsoft Teams channel.\n",setup:"## Setup\n\n### Prerequisites\n\nTo add Microsoft Teams notifications integration to your Netdata Cloud space you will need the following:\n\n- A Netdata Cloud account.\n- Access to the Netdata Cloud space as an **Admin**.\n- The Space to be on a paid plan.\n- A [Microsoft 365 for Business Account](https://www.microsoft.com/en-us/microsoft-365/business). Note that this is a **paid** account.\n\n### Settings on Microsoft Teams\n\n- The integration gets enabled at a team's channel level.\n- Click on the `...` (aka three dots) icon showing up next to the channel name, it should appear when you hover over it.\n- Click on `Connectors`.\n- Look for the `Incoming Webhook` connector and click configure.\n- Provide a name for your Incoming Webhook Connector, for example _Netdata Alerts_. You can also customize it with a proper icon instead of using the default image.\n- Click `Create`.\n- The _Incoming Webhook URL_ is created.\n- That is the URL to be provided to the Netdata Cloud configuration.\n\n### Settings on Netdata Cloud\n\n1. Click on the **Space settings** cog (located above your profile icon).\n2. Click on the **Notification** tab.\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen).\n4. On the **Microsoft Teams** card click on **+ Add**.\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings:\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it.\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration.\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only.\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Microsoft Teams:\n - Microsoft Teams Incoming Webhook URL - the _Incoming Webhook URL_ that was generated earlier.\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-mobile-app",meta:{name:"Netdata Mobile App",link:"https://netdata.cloud",categories:["notify.cloud"],icon_filename:"netdata.png"},keywords:["mobile-app","phone","personal-notifications"],overview:"# Netdata Mobile App\n\nFrom the Netdata Cloud UI, you can manage your user notification settings and enable the configuration to deliver notifications on the Netdata Mobile Application.\n",setup:"## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- You need to have the Netdata Mobile Application installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration\nSteps to login to the Netdata Mobile Application to receive alert and reachability and alert notifications:\n1. Download the Netdata Mobile Application from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose the Sign In Option\n - Sign In with Email Address: Enter the Email Address of your registered Netdata Cloud Account and Click on the Verification link received by Email on your mobile device.\n - Sign In with QR Code: Scan the QR Code from your `Netdata Cloud` UI under **User Settings** --\x3e **Notifications** --\x3e **Mobile App Notifications** --\x3e **Show QR Code**\n3. Start receiving alert and reachability notifications for your **Space(s)** on a **Paid Subscription plan**\n\n### Netdata Configuration Steps\n1. Click on the **User settings** on the bottom left of your screen (your profile icon)\n2. Click on the **Notifications** tab\n3. Enable **Mobile App Notifications** if disabled (Enabled by default)\n4. Use the **Show QR Code** Option to login to your mobile device by scanning the **QR Code**\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-opsgenie",meta:{name:"Opsgenie",link:"https://www.atlassian.com/software/opsgenie",categories:["notify.cloud"],icon_filename:"opsgenie.png"},keywords:["opsgenie","atlassian"],overview:"# Opsgenie\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Opsgenie.\n",setup:"## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\nSteps to configure your Opsgenie to receive notifications from Netdata:\n\n1. Go to integrations tab of your team, click **Add integration**\n2. Pick **API** from available integrations. Copy your API Key and press **Save Integration**.\n3. Paste copied API key into the corresponding field in **Integration configuration** section of Opsgenie modal window in Netdata.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Opsgenie** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Opsgenie:\n - API Key - a key provided on Opsgenie for the channel you want to receive your notifications.\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-pagerduty",meta:{name:"PagerDuty",link:"https://www.pagerduty.com/",categories:["notify.cloud"],icon_filename:"pagerduty.png"},keywords:["pagerduty"],overview:"# PagerDuty\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on PagerDuty.\n",setup:"## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have a PagerDuty service to receive events using webhooks.\n\n\n### PagerDuty Server Configuration\nSteps to configure your PagerDuty to receive notifications from Netdata:\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. At step 3, select `Events API V2` Integration or **View Webhooks** if you already have some defined\n3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** fields to add them to your notification configuration in the Netdata UI.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **PagerDuty** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For PagerDuty:\n - Integration Key - is a 32 character key provided by PagerDuty to receive events on your service.\n - Integration URL (Alert Events) - is the URL provided by PagerDuty where we will send notifications.\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-rocketchat",meta:{name:"RocketChat",link:"https://www.rocket.chat/",categories:["notify.cloud"],icon_filename:"rocketchat.png"},keywords:["rocketchat"],overview:"# RocketChat\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on RocketChat.\n",setup:"## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a RocketChat app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your RocketChat to receive notifications from Netdata:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations.\n2. Click **+New** at the top right corner.\n3. For more details about each parameter, check [create-a-new-incoming-webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook).\n4. After configuring integration, click Save.\n5. You will end up with a webhook endpoint that looks like below:\n `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your RocketChat instance.\n\n\nFor more details please check RocketChat's article Incoming webhooks for [RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **RocketChat** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For RocketChat:\n - Webhook URL - URL provided on RocketChat for the channel you want to receive your notifications.\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-slack",meta:{name:"Slack",link:"https://slack.com/",categories:["notify.cloud"],icon_filename:"slack.png"},keywords:["slack"],overview:"# Slack\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Slack.\n",setup:"## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\nSteps to configure your Slack to receive notifications from Netdata:\n\n1. Create an app to receive webhook integrations. Check [Create an app](https://api.slack.com/apps?new_app=1) from Slack documentation for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - After pressing that specify the channel where you want your notifications to be delivered\n - Once completed copy the Webhook URL that you will need to add to your notification configuration on Netdata UI\n\nFor more details please check Slacks's article [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Slack** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Slack:\n - Webhook URL - URL provided on Slack for the channel you want to receive your notifications.\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-splunk",meta:{name:"Splunk",link:"https://splunk.com/",categories:["notify.cloud"],icon_filename:"splunk-black.svg"},keywords:["Splunk"],overview:"# Splunk\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Splunk.\n",setup:"## Setup\n\n### Prerequisites\n\nTo add Splunk notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions.\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Splunk** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - provide a descriptive name for your configuration to easily identify it.\n - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about.\n - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only.\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk:\n - HTTP Event Collector URI - The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token - the token that Splunk provided to you when you created the HTTP Event Collector\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-telegram",meta:{name:"Telegram",link:"https://telegram.org/",categories:["notify.cloud"],icon_filename:"telegram.svg"},keywords:["Telegram"],overview:"# Telegram\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Telegram.\n",setup:"## Setup\n\n### Prerequisites\n\nTo add Telegram notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- The Telegram bot token, chat ID and _optionally_ the topic ID\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Telegram** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Telegram:\n - Bot Token - the token of your bot\n - Chat ID - the chat id where your bot will deliver messages to\n - Topic ID - the identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat.\n\n### Getting the Telegram bot token, chat ID and topic ID\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n- To get the topic ID, the easiest way is this: Post a message to that topic, then right-click on it and select `Copy Message Link`. Paste it on a scratchpad and notice that it has the following structure `https://t.me/c/XXXXXXXXXX/YY/ZZ`. The topic ID is `YY` (integer).\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-victorops",meta:{name:"Splunk VictorOps",link:"https://www.splunk.com/en_us/about-splunk/acquisitions/splunk-on-call.html",categories:["notify.cloud"],icon_filename:"victorops.svg"},keywords:["VictorOps","Splunk","On-Call"],overview:"# Splunk VictorOps\n\nFrom the Cloud interface, you can manage your space's notification settings and from there you can add a specific configuration to get notifications delivered on Splunk On-Call/VictorOps.\n",setup:"## Setup\n\n### Prerequisites\n\nTo add Splunk VictorOps notification (also known as Splunk On-Call) you need:\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions.\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Splunk VictorOps** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - provide a descriptive name for your configuration to easily identify it.\n - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about.\n - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only.\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk VictorOps:\n - Destination URL - The URL provided by VictorOps of your REST endpoint.\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-webhook",meta:{name:"Webhook",link:"https://en.wikipedia.org/wiki/Webhook",categories:["notify.cloud"],icon_filename:"webhook.svg"},keywords:["generic webhooks","webhooks"],overview:"# Webhook\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on a webhook using a predefined schema.\n",setup:"## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **Admin**\n- The Netdata Space needs to be on a paid plan\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Webhook** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Webhook:\n - Webhook URL - webhook URL is the url of the service that Netdata will send notifications to. In order to keep the communication secured, we only accept HTTPS urls.\n - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism - Netdata webhook integration supports 3 different authentication mechanisms.\n * Mutual TLS (recommended) - default authentication mechanism used if no other method is selected.\n * Basic - the client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. These will settings will be required inputs.\n * Bearer - the client sends a request with an Authorization header that includes a **bearer token**. This setting will be a required input.\n\n\n ### Webhook service\n\n A webhook integration allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. In this document, we'll go over the steps to set up a generic webhook integration, including adding headers, and implementing different types of authorization mechanisms.\n\n #### Netdata webhook integration\n\n A webhook integration is a way for one service to notify another service about events that occur within it. This is done by sending an HTTP POST request to a specified URL (known as the \"webhook URL\") when an event occurs.\n\n Netdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected.\n\n For alert notifications, the content sent to the destination service contains a JSON object with the following properties:\n\n | field | type | description |\n | :-- | :-- | :-- |\n | message | string | A summary message of the alert. |\n | alarm | string | The alarm the notification is about. |\n | info | string | Additional info related with the alert. |\n | chart | string | The chart associated with the alert. |\n | context | string | The chart context. |\n | space | string | The space where the node that raised the alert is assigned. |\n | Rooms | object[object(string,string)] | Object with list of Rooms names and urls where the node belongs to. |\n | family | string | Context family. |\n | class | string | Classification of the alert, e.g. \"Error\". |\n | severity | string | Alert severity, can be one of \"warning\", \"critical\" or \"clear\". |\n | date | string | Date of the alert in ISO8601 format. |\n | duration | string | Duration the alert has been raised. |\n | additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n | additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n | alarm_url | string | Netdata Cloud URL for this alarm. |\n\n For reachability notifications, the JSON object will contain the following properties:\n\n | field | type | description |\n | :-- | :-- | :-- |\n | message | string | A summary message of the reachability alert. |\n | url | string | Netdata Cloud URL for the host experiencing the reachability alert. |\n | host | string | the host experiencing the reachability alert. |\n | severity | string | severity for this notification. If host is reachable, severity will be 'info', if host is unreachable, it will be 'critical'. |\n | status | object | an object with the status information. |\n | status.reachable | boolean | true if host is reachable, false otherwise |\n | status.text | string | can be 'reachable' or 'unreachable' |\n\n #### Extra headers\n\n When setting up a webhook integration, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\n By default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:-------------------------------:|-----------------------------|\n | Content-Type | application/json |\n\n #### Authentication mechanisms\n\n Netdata webhook integration supports 3 different authentication mechanisms:\n\n ##### Mutual TLS authentication (recommended)\n\n In mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\n This is the default authentication mechanism used if no other method is selected.\n\n To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\n The steps to perform this validation are as follows:\n\n - Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
    \n Netdata CA certificate\n\n ```\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n
    \n\n - Enable client certificate validation on the web server that is doing the TLS termination. Below we show you how to perform this configuration in `NGINX` and `Apache`\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n ##### Basic authentication\n\n In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n ##### Bearer token authentication\n\n In bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n ##### Challenge secret\n\n To validate that you have ownership of the web application that will receive the webhook events, we are using a challenge response check mechanism.\n\n This mechanism works as follows:\n\n - The challenge secret parameter that you provide is a shared secret between you and Netdata only.\n - On your request for creating a new Webhook integration, we will make a GET request to the url of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n - You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n - We will compare your application's response with the hash that we will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\n We will do this validation everytime you update your integration configuration.\n\n - Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n **Example response token generation in Python:**\n\n Here you can see how to define a handler for a Flask application in python 3:\n\n ```python\n import base64\n import hashlib\n import hmac\n import json\n\n key ='YOUR_CHALLENGE_SECRET'\n\n @app.route('/webhooks/netdata')\n def webhook_challenge():\n token = request.args.get('crc_token').encode('ascii')\n\n # creates HMAC SHA-256 hash from incomming token and your consumer secret\n sha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n # construct response data with base64 encoded hash\n response = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n }\n\n # returns properly formatted json response\n return json.dumps(response)\n ```\n\n",integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-custom",meta:{name:"Custom",link:"",categories:["notify.agent"],icon_filename:"custom.png"},keywords:["custom"],overview:"# Custom\n\nNetdata Agent's alert notification feature allows you to send custom notifications to any endpoint you choose.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_CUSTOM | Set `SEND_CUSTOM` to YES | YES | yes |\n| DEFAULT_RECIPIENT_CUSTOM | This value is dependent on how you handle the `${to}` variable inside the `custom_sender()` function. | | yes |\n| custom_sender() | You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the function in this configuration file. | | no |\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nAll roles will default to this variable if left unconfigured. You can edit `DEFAULT_RECIPIENT_CUSTOM` with the variable you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_custom[sysadmin]="systems"\nrole_recipients_custom[domainadmin]="domains"\nrole_recipients_custom[dba]="databases systems"\nrole_recipients_custom[webmaster]="marketing development"\nrole_recipients_custom[proxyadmin]="proxy-admin"\nrole_recipients_custom[sitemgr]="sites"\n```\n\n\n##### custom_sender()\n\nThe following is a sample custom_sender() function in health_alarm_notify.conf, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:\n```\ncustom_sender() {\n # example human readable SMS\n local msg="${host} ${status_message}: ${alarm} ${raised_for}"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode "${msg:0:160}" >/dev/null; msg="${REPLY}"\n\n # a space separated list of the recipients to send alarms to\n to="${1}"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode "From=XXX" \\\n --data-urlencode "To=${phone}" \\\n --data-urlencode "Body=${msg}" \\\n -u "${accountsid}:${accounttoken}" \\\n https://domain.website.com/)\n\n if [ "${httpcode}" = "200" ]; then\n info "sent custom notification ${msg} to ${phone}"\n sent=$((sent + 1))\n else\n error "failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}."\n fi\n done\n}\n```\n\nThe supported variables that you can use for the function\'s `msg` variable are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like "name = value units" |\n| `${status_message}` | Like "needs attention", "recovered", "is critical" |\n| `${severity}` | Like "Escalated to CRITICAL", "Recovered from WARNING" |\n| `${raised_for}` | Like "(alarm was raised for 10 minutes)" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# custom notifications\n\nSEND_CUSTOM="YES"\nDEFAULT_RECIPIENT_CUSTOM=""\n\n# The custom_sender() is a custom function to do whatever you need to do\ncustom_sender() {\n # example human readable SMS\n local msg="${host} ${status_message}: ${alarm} ${raised_for}"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode "${msg:0:160}" >/dev/null; msg="${REPLY}"\n\n # a space separated list of the recipients to send alarms to\n to="${1}"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode "From=XXX" \\\n --data-urlencode "To=${phone}" \\\n --data-urlencode "Body=${msg}" \\\n -u "${accountsid}:${accounttoken}" \\\n https://domain.website.com/)\n\n if [ "${httpcode}" = "200" ]; then\n info "sent custom notification ${msg} to ${phone}"\n sent=$((sent + 1))\n else\n error "failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}."\n fi\n done\n}\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/custom/metadata.yaml"},{id:"notify-discord",meta:{name:"Discord",link:"https://discord.com/",categories:["notify.agent"],icon_filename:"discord.png"},keywords:["Discord"],overview:"# Discord\n\nSend notifications to Discord using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more Discord channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DISCORD | Set `SEND_DISCORD` to YES | YES | yes |\n| DISCORD_WEBHOOK_URL | set `DISCORD_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_DISCORD | Set `DEFAULT_RECIPIENT_DISCORD` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_DISCORD\n\nAll roles will default to this variable if left unconfigured.\nYou can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_discord[sysadmin]="systems"\nrole_recipients_discord[domainadmin]="domains"\nrole_recipients_discord[dba]="databases systems"\nrole_recipients_discord[webmaster]="marketing development"\nrole_recipients_discord[proxyadmin]="proxy-admin"\nrole_recipients_discord[sitemgr]="sites"\n```\n\nThe values you provide should already exist as Discord channels in your server.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# discord (discordapp.com) global notification options\n\nSEND_DISCORD="YES"\nDISCORD_WEBHOOK_URL="https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"\nDEFAULT_RECIPIENT_DISCORD="alerts"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/discord/metadata.yaml"},{id:"notify-dynatrace",meta:{name:"Dynatrace",link:"https://dynatrace.com",categories:["notify.agent"],icon_filename:"dynatrace.svg"},keywords:["Dynatrace"],overview:"# Dynatrace\n\nDynatrace allows you to receive notifications using their Events REST API. See the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event) about POSTing an event in the Events API for more details.\nYou can send notifications to Dynatrace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- A Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts. The Dynatrace server should be with protocol prefixed (http:// or https://), for example: https://monitor.example.com.\n- An API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API. See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.\n- An API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n- A Server Tag. To generate one on your Dynatrace Server, go to Settings --\x3e Tags --\x3e Manually applied tags and create the Tag. The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DYNATRACE | Set `SEND_DYNATRACE` to YES | YES | yes |\n| DYNATRACE_SERVER | Set `DYNATRACE_SERVER` to the Dynatrace server with the protocol prefix, for example `https://monitor.example.com`. | | yes |\n| DYNATRACE_TOKEN | Set `DYNATRACE_TOKEN` to your Dynatrace API authentication token | | yes |\n| DYNATRACE_SPACE | Set `DYNATRACE_SPACE` to the API Space, it is the URL part of the page you have access in order to generate the API Token. | | yes |\n| DYNATRACE_TAG_VALUE | Set `DYNATRACE_TAG_VALUE` to your Dynatrace Server Tag. | | yes |\n| DYNATRACE_ANNOTATION_TYPE | `DYNATRACE_ANNOTATION_TYPE` can be left to its default value Netdata Alarm, but you can change it to better fit your needs. | Netdata Alarm | no |\n| DYNATRACE_EVENT | Set `DYNATRACE_EVENT` to the Dynatrace eventType you want. | Netdata Alarm | no |\n\n##### DYNATRACE_SPACE\n\nFor example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n\n\n##### DYNATRACE_EVENT\n\n`AVAILABILITY_EVENT`, `CUSTOM_ALERT`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, `CUSTOM_DEPLOYMENT`, `CUSTOM_INFO`, `ERROR_EVENT`,\n`MARKED_FOR_TERMINATION`, `PERFORMANCE_EVENT`, `RESOURCE_CONTENTION_EVENT`.\nYou can read more [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event#request-body-objects).\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Dynatrace global notification options\n\nSEND_DYNATRACE="YES"\nDYNATRACE_SERVER="https://monitor.example.com"\nDYNATRACE_TOKEN="XXXXXXX"\nDYNATRACE_SPACE="2a93fe0e-4cd5-469a-9d0d-1a064235cfce"\nDYNATRACE_TAG_VALUE="SERVERTAG"\nDYNATRACE_ANNOTATION_TYPE="Netdata Alert"\nDYNATRACE_EVENT="AVAILABILITY_EVENT"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/dynatrace/metadata.yaml"},{id:"notify-email",meta:{name:"Email",link:"",categories:["notify.agent"],icon_filename:"email.png"},keywords:["email"],overview:"# Email\n\nSend notifications via Email using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- A working sendmail command is required for email alerts to work. Almost all MTAs provide a sendmail interface. Netdata sends all emails as user netdata, so make sure your sendmail works for local users.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| EMAIL_SENDER | You can change `EMAIL_SENDER` to the email address sending the notifications. | netdata | no |\n| SEND_EMAIL | Set `SEND_EMAIL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_EMAIL | Set `DEFAULT_RECIPIENT_EMAIL` to the email address you want the email to be sent by default. You can define multiple email addresses like this: `alarms@example.com` `systems@example.com`. | root | yes |\n\n##### DEFAULT_RECIPIENT_EMAIL\n\nAll roles will default to this variable if left unconfigured.\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_email[sysadmin]="systems@example.com"\nrole_recipients_email[domainadmin]="domains@example.com"\nrole_recipients_email[dba]="databases@example.com systems@example.com"\nrole_recipients_email[webmaster]="marketing@example.com development@example.com"\nrole_recipients_email[proxyadmin]="proxy-admin@example.com"\nrole_recipients_email[sitemgr]="sites@example.com"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# email global notification options\n\nEMAIL_SENDER="example@domain.com"\nSEND_EMAIL="YES"\nDEFAULT_RECIPIENT_EMAIL="recipient@example.com"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/email/metadata.yaml"},{id:"notify-flock",meta:{name:"Flock",link:"https://support.flock.com/",categories:["notify.agent"],icon_filename:"flock.png"},keywords:["Flock"],overview:"# Flock\n\nSend notifications to Flock using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by flock.com. You can use the same on all your Netdata servers (or you can have multiple if you like). Read more about flock webhooks and how to get one [here](https://admin.flock.com/webhooks).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_FLOCK | Set `SEND_FLOCK` to YES | YES | yes |\n| FLOCK_WEBHOOK_URL | set `FLOCK_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_FLOCK | Set `DEFAULT_RECIPIENT_FLOCK` to the Flock channel you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n\n##### DEFAULT_RECIPIENT_FLOCK\n\nYou can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_flock[sysadmin]="systems"\nrole_recipients_flock[domainadmin]="domains"\nrole_recipients_flock[dba]="databases systems"\nrole_recipients_flock[webmaster]="marketing development"\nrole_recipients_flock[proxyadmin]="proxy-admin"\nrole_recipients_flock[sitemgr]="sites"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# flock (flock.com) global notification options\n\nSEND_FLOCK="YES"\nFLOCK_WEBHOOK_URL="https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"\nDEFAULT_RECIPIENT_FLOCK="alarms"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/flock/metadata.yaml"},{id:"notify-gotify",meta:{name:"Gotify",link:"https://gotify.net/",categories:["notify.agent"],icon_filename:"gotify.png"},keywords:["gotify"],overview:"# Gotify\n\n[Gotify](https://gotify.net/) is a self-hosted push notification service created for sending and receiving messages in real time.\nYou can send alerts to your Gotify instance using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- An application token. You can generate a new token in the Gotify Web UI.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_GOTIFY | Set `SEND_GOTIFY` to YES | YES | yes |\n| GOTIFY_APP_TOKEN | set `GOTIFY_APP_TOKEN` to the app token you generated. | | yes |\n| GOTIFY_APP_URL | Set `GOTIFY_APP_URL` to point to your Gotify instance, for example `https://push.example.domain/` | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_GOTIFY="YES"\nGOTIFY_APP_TOKEN="XXXXXXXXXXXXXXX"\nGOTIFY_APP_URL="https://push.example.domain/"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/gotify/metadata.yaml"},{id:"notify-irc",meta:{name:"IRC",link:"",categories:["notify.agent"],icon_filename:"irc.png"},keywords:["IRC"],overview:"# IRC\n\nSend notifications to IRC using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- The `nc` utility. You can set the path to it, or Netdata will search for it in your system `$PATH`.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| nc path | Set the path for nc, otherwise Netdata will search for it in your system $PATH | | yes |\n| SEND_IRC | Set `SEND_IRC` YES. | YES | yes |\n| IRC_NETWORK | Set `IRC_NETWORK` to the IRC network which your preferred channels belong to. | | yes |\n| IRC_PORT | Set `IRC_PORT` to the IRC port to which a connection will occur. | | no |\n| IRC_NICKNAME | Set `IRC_NICKNAME` to the IRC nickname which is required to send the notification. It must not be an already registered name as the connection\'s MODE is defined as a guest. | | yes |\n| IRC_REALNAME | Set `IRC_REALNAME` to the IRC realname which is required in order to make the connection. | | yes |\n| DEFAULT_RECIPIENT_IRC | You can have different channels per role, by editing `DEFAULT_RECIPIENT_IRC` with the channel you want | | yes |\n\n##### nc path\n\n```sh\n#------------------------------------------------------------------------------\n# external commands\n#\n# The full path of the nc command.\n# If empty, the system $PATH will be searched for it.\n# If not found, irc notifications will be silently disabled.\nnc="/usr/bin/nc"\n```\n\n\n##### DEFAULT_RECIPIENT_IRC\n\nThe `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_irc[sysadmin]="#systems"\nrole_recipients_irc[domainadmin]="#domains"\nrole_recipients_irc[dba]="#databases #systems"\nrole_recipients_irc[webmaster]="#marketing #development"\nrole_recipients_irc[proxyadmin]="#proxy-admin"\nrole_recipients_irc[sitemgr]="#sites"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# irc notification options\n#\nSEND_IRC="YES"\nDEFAULT_RECIPIENT_IRC="#system-alarms"\nIRC_NETWORK="irc.freenode.net"\nIRC_NICKNAME="netdata-alarm-user"\nIRC_REALNAME="netdata-user"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/irc/metadata.yaml"},{id:"notify-kavenegar",meta:{name:"Kavenegar",link:"https://kavenegar.com/",categories:["notify.agent"],icon_filename:"kavenegar.png"},keywords:["Kavenegar"],overview:"# Kavenegar\n\n[Kavenegar](https://kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs.\nYou can send notifications to Kavenegar using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- The APIKEY and Sender from http://panel.kavenegar.com/client/setting/account\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_KAVENEGAR | Set `SEND_KAVENEGAR` to YES | YES | yes |\n| KAVENEGAR_API_KEY | Set `KAVENEGAR_API_KEY` to your API key. | | yes |\n| KAVENEGAR_SENDER | Set `KAVENEGAR_SENDER` to the value of your Sender. | | yes |\n| DEFAULT_RECIPIENT_KAVENEGAR | Set `DEFAULT_RECIPIENT_KAVENEGAR` to the SMS recipient you want the alert notifications to be sent to. You can define multiple recipients like this: 09155555555 09177777777. | | yes |\n\n##### DEFAULT_RECIPIENT_KAVENEGAR\n\nAll roles will default to this variable if lest unconfigured.\n\nYou can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_kavenegar[sysadmin]="09100000000"\nrole_recipients_kavenegar[domainadmin]="09111111111"\nrole_recipients_kavenegar[dba]="0922222222"\nrole_recipients_kavenegar[webmaster]="0933333333"\nrole_recipients_kavenegar[proxyadmin]="0944444444"\nrole_recipients_kavenegar[sitemgr]="0955555555"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Kavenegar (Kavenegar.com) SMS options\n\nSEND_KAVENEGAR="YES"\nKAVENEGAR_API_KEY="XXXXXXXXXXXX"\nKAVENEGAR_SENDER="YYYYYYYY"\nDEFAULT_RECIPIENT_KAVENEGAR="0912345678"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/kavenegar/metadata.yaml"},{id:"notify-matrix",meta:{name:"Matrix",link:"https://spec.matrix.org/unstable/push-gateway-api/",categories:["notify.agent"],icon_filename:"matrix.svg"},keywords:["Matrix"],overview:"# Matrix\n\nSend notifications to Matrix network rooms using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- The url of the homeserver (`https://homeserver:port`).\n- Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a dedicated notification account). These tokens usually don\'t expire.\n- The Room ids that you want to sent the notification to.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MATRIX | Set `SEND_MATRIX` to YES | YES | yes |\n| MATRIX_HOMESERVER | set `MATRIX_HOMESERVER` to the URL of the Matrix homeserver. | | yes |\n| MATRIX_ACCESSTOKEN | Set `MATRIX_ACCESSTOKEN` to the access token from your Matrix account. | | yes |\n| DEFAULT_RECIPIENT_MATRIX | Set `DEFAULT_RECIPIENT_MATRIX` to the Rooms you want the alert notifications to be sent to. The format is `!roomid:homeservername`. | | yes |\n\n##### MATRIX_ACCESSTOKEN\n\nTo obtain the access token, you can use the following curl command:\n```\ncurl -XPOST -d \'{"type":"m.login.password", "user":"example", "password":"wordpass"}\' "https://homeserver:8448/_matrix/client/r0/login"\n```\n\n\n##### DEFAULT_RECIPIENT_MATRIX\n\nThe Room ids are unique identifiers and can be obtained from the Room settings in a Matrix client (e.g. Riot).\n\nYou can define multiple Rooms like this: `!roomid1:homeservername` `!roomid2:homeservername`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different Rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_matrix[sysadmin]="!roomid1:homeservername"\nrole_recipients_matrix[domainadmin]="!roomid2:homeservername"\nrole_recipients_matrix[dba]="!roomid3:homeservername"\nrole_recipients_matrix[webmaster]="!roomid4:homeservername"\nrole_recipients_matrix[proxyadmin]="!roomid5:homeservername"\nrole_recipients_matrix[sitemgr]="!roomid6:homeservername"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Matrix notifications\n\nSEND_MATRIX="YES"\nMATRIX_HOMESERVER="https://matrix.org:8448"\nMATRIX_ACCESSTOKEN="XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"\nDEFAULT_RECIPIENT_MATRIX="!XXXXXXXXXXXX:matrix.org"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/matrix/metadata.yaml"},{id:"notify-messagebird",meta:{name:"MessageBird",link:"https://messagebird.com/",categories:["notify.agent"],icon_filename:"messagebird.svg"},keywords:["MessageBird"],overview:"# MessageBird\n\nSend notifications to MessageBird using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- An access key under \'API ACCESS (REST)\' (you will want a live key), you can read more [here](https://developers.messagebird.com/quickstarts/sms/test-credits-api-keys/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MESSAGEBIRD | Set `SEND_MESSAGEBIRD` to YES | YES | yes |\n| MESSAGEBIRD_ACCESS_KEY | Set `MESSAGEBIRD_ACCESS_KEY` to your API key. | | yes |\n| MESSAGEBIRD_NUMBER | Set `MESSAGEBIRD_NUMBER` to the MessageBird number you want to use for the alert. | | yes |\n| DEFAULT_RECIPIENT_MESSAGEBIRD | Set `DEFAULT_RECIPIENT_MESSAGEBIRD` to the number you want the alert notification to be sent as an SMS. You can define multiple recipients like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_MESSAGEBIRD\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_messagebird[sysadmin]="+15555555555"\nrole_recipients_messagebird[domainadmin]="+15555555556"\nrole_recipients_messagebird[dba]="+15555555557"\nrole_recipients_messagebird[webmaster]="+15555555558"\nrole_recipients_messagebird[proxyadmin]="+15555555559"\nrole_recipients_messagebird[sitemgr]="+15555555550"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Messagebird (messagebird.com) SMS options\n\nSEND_MESSAGEBIRD="YES"\nMESSAGEBIRD_ACCESS_KEY="XXXXXXXX"\nMESSAGEBIRD_NUMBER="XXXXXXX"\nDEFAULT_RECIPIENT_MESSAGEBIRD="+15555555555"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/messagebird/metadata.yaml"},{id:"notify-ntfy",meta:{name:"ntfy",link:"https://ntfy.sh/",categories:["notify.agent"],icon_filename:"ntfy.svg"},keywords:["ntfy"],overview:"# ntfy\n\n[ntfy](https://ntfy.sh/) (pronounce: notify) is a simple HTTP-based [pub-sub](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) notification service. It allows you to send notifications to your phone or desktop via scripts from any computer, entirely without signup, cost or setup. It's also [open source](https://github.com/binwiederhier/ntfy) if you want to run your own server.\nYou can send alerts to an ntfy server using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- (Optional) A [self-hosted ntfy server](https://docs.ntfy.sh/faq/#can-i-self-host-it), in case you don\'t want to use https://ntfy.sh\n- A new [topic](https://ntfy.sh/#subscribe) for the notifications to be published to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_NTFY | Set `SEND_NTFY` to YES | YES | yes |\n| DEFAULT_RECIPIENT_NTFY | URL formed by the server-topic combination you want the alert notifications to be sent to. Unless hosting your own server, the server should always be set to https://ntfy.sh. | | yes |\n| NTFY_USERNAME | The username for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_PASSWORD | The password for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_ACCESS_TOKEN | The access token for netdata to use to authenticate with an ntfy server. | | no |\n\n##### DEFAULT_RECIPIENT_NTFY\n\nYou can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `https://SERVER2/TOPIC2`\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_ntfy[sysadmin]="https://SERVER1/TOPIC1"\nrole_recipients_ntfy[domainadmin]="https://SERVER2/TOPIC2"\nrole_recipients_ntfy[dba]="https://SERVER3/TOPIC3"\nrole_recipients_ntfy[webmaster]="https://SERVER4/TOPIC4"\nrole_recipients_ntfy[proxyadmin]="https://SERVER5/TOPIC5"\nrole_recipients_ntfy[sitemgr]="https://SERVER6/TOPIC6"\n```\n\n\n##### NTFY_USERNAME\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_PASSWORD\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_ACCESS_TOKEN\n\nThis can be used in place of `NTFY_USERNAME` and `NTFY_PASSWORD` to authenticate with a self-hosted ntfy instance. See [access tokens](https://docs.ntfy.sh/config/?h=access+to#access-tokens) for details.\nEnsure that the token user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_NTFY="YES"\nDEFAULT_RECIPIENT_NTFY="https://ntfy.sh/netdata-X7seHg7d3Tw9zGOk https://ntfy.sh/netdata-oIPm4IK1IlUtlA30"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/ntfy/metadata.yaml"},{id:"notify-opsgenie",meta:{name:"OpsGenie",link:"https://www.atlassian.com/software/opsgenie",categories:["notify.agent"],icon_filename:"opsgenie.png"},keywords:["OpsGenie"],overview:"# OpsGenie\n\nOpsgenie is an alerting and incident response tool. It is designed to group and filter alarms, build custom routing rules for on-call teams, and correlate deployments and commits to incidents.\nYou can send notifications to Opsgenie using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- An Opsgenie integration. You can create an [integration](https://docs.opsgenie.com/docs/api-integration) in the [Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_OPSGENIE | Set `SEND_OPSGENIE` to YES | YES | yes |\n| OPSGENIE_API_KEY | Set `OPSGENIE_API_KEY` to your API key. | | yes |\n| OPSGENIE_API_URL | Set `OPSGENIE_API_URL` to the corresponding URL if required, for example there are region-specific API URLs such as `https://eu.api.opsgenie.com`. | https://api.opsgenie.com | no |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_OPSGENIE="YES"\nOPSGENIE_API_KEY="11111111-2222-3333-4444-555555555555"\nOPSGENIE_API_URL=""\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/opsgenie/metadata.yaml"},{id:"notify-pagerduty",meta:{name:"PagerDuty",link:"https://www.pagerduty.com/",categories:["notify.agent"],icon_filename:"pagerduty.png"},keywords:["PagerDuty"],overview:"# PagerDuty\n\nPagerDuty is an enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times.\nYou can send notifications to PagerDuty using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- An installation of the [PagerDuty](https://www.pagerduty.com/docs/guides/agent-install-guide/) agent on the node running the Netdata Agent\n- A PagerDuty Generic API service using either the `Events API v2` or `Events API v1`\n- [Add a new service](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) to PagerDuty. Click Use our API directly and select either `Events API v2` or `Events API v1`. Once you finish creating the service, click on the Integrations tab to find your Integration Key.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PD | Set `SEND_PD` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PD | Set `DEFAULT_RECIPIENT_PD` to the PagerDuty service key you want the alert notifications to be sent to. You can define multiple service keys like this: `pd_service_key_1` `pd_service_key_2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PD\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pd[sysadmin]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa"\nrole_recipients_pd[domainadmin]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb"\nrole_recipients_pd[dba]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc"\nrole_recipients_pd[webmaster]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxd"\nrole_recipients_pd[proxyadmin]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe"\nrole_recipients_pd[sitemgr]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxf"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pagerduty.com notification options\n\nSEND_PD="YES"\nDEFAULT_RECIPIENT_PD="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"\nUSE_PD_VERSION="2"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/pagerduty/metadata.yaml"},{id:"notify-prowl",meta:{name:"Prowl",link:"https://www.prowlapp.com/",categories:["notify.agent"],icon_filename:"prowl.png"},keywords:["Prowl"],overview:"# Prowl\n\nSend notifications to Prowl using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- Because of how Netdata integrates with Prowl, there is a hard limit of at most 1000 notifications per hour (starting from the first notification sent). Any alerts beyond the first thousand in an hour will be dropped.\n- Warning messages will be sent with the 'High' priority, critical messages will be sent with the 'Emergency' priority, and all other messages will be sent with the normal priority. Opening the notification's associated URL will take you to the Netdata dashboard of the system that issued the alert, directly to the chart that it triggered on.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- A Prowl API key, which can be requested through the Prowl website after registering\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PROWL | Set `SEND_PROWL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PROWL | Set `DEFAULT_RECIPIENT_PROWL` to the Prowl API key you want the alert notifications to be sent to. You can define multiple API keys like this: `APIKEY1`, `APIKEY2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PROWL\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_prowl[sysadmin]="AAAAAAAA"\nrole_recipients_prowl[domainadmin]="BBBBBBBBB"\nrole_recipients_prowl[dba]="CCCCCCCCC"\nrole_recipients_prowl[webmaster]="DDDDDDDDDD"\nrole_recipients_prowl[proxyadmin]="EEEEEEEEEE"\nrole_recipients_prowl[sitemgr]="FFFFFFFFFF"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# iOS Push Notifications\n\nSEND_PROWL="YES"\nDEFAULT_RECIPIENT_PROWL="XXXXXXXXXX"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/prowl/metadata.yaml"},{id:"notify-pushbullet",meta:{name:"Pushbullet",link:"https://www.pushbullet.com/",categories:["notify.agent"],icon_filename:"pushbullet.png"},keywords:["Pushbullet"],overview:"# Pushbullet\n\nSend notifications to Pushbullet using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- A Pushbullet access token that can be created in your [account settings](https://www.pushbullet.com/#settings/account).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Send_PUSHBULLET | Set `Send_PUSHBULLET` to YES | YES | yes |\n| PUSHBULLET_ACCESS_TOKEN | set `PUSHBULLET_ACCESS_TOKEN` to the access token you generated. | | yes |\n| DEFAULT_RECIPIENT_PUSHBULLET | Set `DEFAULT_RECIPIENT_PUSHBULLET` to the email (e.g. `example@domain.com`) or the channel tag (e.g. `#channel`) you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHBULLET\n\nYou can define multiple entries like this: user1@email.com user2@email.com.\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushbullet[sysadmin]="user1@email.com"\nrole_recipients_pushbullet[domainadmin]="user2@mail.com"\nrole_recipients_pushbullet[dba]="#channel1"\nrole_recipients_pushbullet[webmaster]="#channel2"\nrole_recipients_pushbullet[proxyadmin]="user3@mail.com"\nrole_recipients_pushbullet[sitemgr]="user4@mail.com"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushbullet (pushbullet.com) push notification options\n\nSEND_PUSHBULLET="YES"\nPUSHBULLET_ACCESS_TOKEN="XXXXXXXXX"\nDEFAULT_RECIPIENT_PUSHBULLET="admin1@example.com admin3@somemail.com #examplechanneltag #anotherchanneltag"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/pushbullet/metadata.yaml"},{id:"notify-pushover",meta:{name:"PushOver",link:"https://pushover.net/",categories:["notify.agent"],icon_filename:"pushover.png"},keywords:["PushOver"],overview:"# PushOver\n\nSend notification to Pushover using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n- Netdata will send warning messages with priority 0 and critical messages with priority 1.\n- Pushover allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours.\n- All other notifications will be delivered silently.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- An Application token. You can use the same on all your Netdata servers.\n- A User token for each user you are going to send notifications to. This is the actual recipient of the notification.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PUSHOVER | Set `SEND_PUSHOVER` to YES | YES | yes |\n| PUSHOVER_WEBHOOK_URL | set `PUSHOVER_WEBHOOK_URL` to your Pushover Application token. | | yes |\n| DEFAULT_RECIPIENT_PUSHOVER | Set `DEFAULT_RECIPIENT_PUSHOVER` the Pushover User token you want the alert notifications to be sent to. You can define multiple User tokens like this: `USERTOKEN1` `USERTOKEN2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHOVER\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushover[sysadmin]="USERTOKEN1"\nrole_recipients_pushover[domainadmin]="USERTOKEN2"\nrole_recipients_pushover[dba]="USERTOKEN3 USERTOKEN4"\nrole_recipients_pushover[webmaster]="USERTOKEN5"\nrole_recipients_pushover[proxyadmin]="USERTOKEN6"\nrole_recipients_pushover[sitemgr]="USERTOKEN7"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushover (pushover.net) global notification options\n\nSEND_PUSHOVER="YES"\nPUSHOVER_APP_TOKEN="XXXXXXXXX"\nDEFAULT_RECIPIENT_PUSHOVER="USERTOKEN"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/pushover/metadata.yaml"},{id:"notify-rocketchat",meta:{name:"RocketChat",link:"https://rocket.chat/",categories:["notify.agent"],icon_filename:"rocketchat.png"},keywords:["RocketChat"],overview:"# RocketChat\n\nSend notifications to Rocket.Chat using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ROCKETCHAT | Set `SEND_ROCKETCHAT` to `YES` | YES | yes |\n| ROCKETCHAT_WEBHOOK_URL | set `ROCKETCHAT_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_ROCKETCHAT | Set `DEFAULT_RECIPIENT_ROCKETCHAT` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_ROCKETCHAT\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_rocketchat[sysadmin]="systems"\nrole_recipients_rocketchat[domainadmin]="domains"\nrole_recipients_rocketchat[dba]="databases systems"\nrole_recipients_rocketchat[webmaster]="marketing development"\nrole_recipients_rocketchat[proxyadmin]="proxy_admin"\nrole_recipients_rocketchat[sitemgr]="sites"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# rocketchat (rocket.chat) global notification options\n\nSEND_ROCKETCHAT="YES"\nROCKETCHAT_WEBHOOK_URL=""\nDEFAULT_RECIPIENT_ROCKETCHAT="monitoring_alarms"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/rocketchat/metadata.yaml"},{id:"notify-slack",meta:{name:"Slack",link:"https://slack.com/",categories:["notify.agent"],icon_filename:"slack.png"},keywords:["Slack"],overview:"# Slack\n\nSend notifications to a Slack workspace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Slack app along with an incoming webhook, read Slack\'s guide on the topic [here](https://api.slack.com/messaging/webhooks).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_SLACK | Set `SEND_SLACK` to YES | YES | yes |\n| SLACK_WEBHOOK_URL | set `SLACK_WEBHOOK_URL` to your Slack app\'s webhook URL. | | yes |\n| DEFAULT_RECIPIENT_SLACK | Set `DEFAULT_RECIPIENT_SLACK` to the Slack channel your Slack app is set to send messages to. The syntax for channels is `#channel` or `channel`. | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# slack (slack.com) global notification options\n\nSEND_SLACK="YES"\nSLACK_WEBHOOK_URL="https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \nDEFAULT_RECIPIENT_SLACK="#alarms"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/slack/metadata.yaml"},{id:"notify-sms",meta:{name:"SMS",link:"http://smstools3.kekekasvi.com/",categories:["notify.agent"],icon_filename:"sms.svg"},keywords:["SMS tools 3","SMS","Messaging"],overview:"# SMS\n\nSend notifications to `smstools3` using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\nThe SMS Server Tools 3 is a SMS Gateway software which can send and receive short messages through GSM modems and mobile phones.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- [Install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) `smsd`\n- To ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:\n - Have write permissions to /tmp and /var/spool/sms/outgoing\n - Be a member of group smsd\n - To ensure that the steps above are successful, just su netdata and execute sendsms phone message.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sendsms | Set the path for `sendsms`, otherwise Netdata will search for it in your system `$PATH:` | YES | yes |\n| SEND_SMS | Set `SEND_SMS` to `YES`. | | yes |\n| DEFAULT_RECIPIENT_SMS | Set DEFAULT_RECIPIENT_SMS to the phone number you want the alert notifications to be sent to. You can define multiple phone numbers like this: PHONE1 PHONE2. | | yes |\n\n##### sendsms\n\n# The full path of the sendsms command (smstools3).\n# If empty, the system $PATH will be searched for it.\n# If not found, SMS notifications will be silently disabled.\nsendsms="/usr/bin/sendsms"\n\n\n##### DEFAULT_RECIPIENT_SMS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_sms[sysadmin]="PHONE1"\nrole_recipients_sms[domainadmin]="PHONE2"\nrole_recipients_sms[dba]="PHONE3"\nrole_recipients_sms[webmaster]="PHONE4"\nrole_recipients_sms[proxyadmin]="PHONE5"\nrole_recipients_sms[sitemgr]="PHONE6"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMS Server Tools 3 (smstools3) global notification options\nSEND_SMS="YES"\nDEFAULT_RECIPIENT_SMS="1234567890"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/smstools3/metadata.yaml"},{id:"notify-syslog",meta:{name:"syslog",link:"",categories:["notify.agent"],icon_filename:"syslog.png"},keywords:["syslog"],overview:"# syslog\n\nSend notifications to Syslog using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- A working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SYSLOG_FACILITY | Set `SYSLOG_FACILITY` to the facility used for logging, by default this value is set to `local6`. | | yes |\n| DEFAULT_RECIPIENT_SYSLOG | Set `DEFAULT_RECIPIENT_SYSLOG` to the recipient you want the alert notifications to be sent to. | | yes |\n| SEND_SYSLOG | Set SEND_SYSLOG to YES, make sure you have everything else configured before turning this on. | | yes |\n\n##### DEFAULT_RECIPIENT_SYSLOG\n\nTargets are defined as follows:\n\n```\n[[facility.level][@host[:port]]/]prefix\n```\n\nprefix defines what the log messages are prefixed with. By default, all lines are prefixed with \'netdata\'.\n\nThe facility and level are the standard syslog facility and level options, for more info on them see your local logger and syslog documentation. By default, Netdata will log to the local6 facility, with a log level dependent on the type of message (crit for CRITICAL, warning for WARNING, and info for everything else).\n\nYou can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).\n\nYou can define multiple recipients like this: daemon.notice@loghost:514/netdata daemon.notice@loghost2:514/netdata.\nAll roles will default to this variable if left unconfigured.\n\n\n##### SEND_SYSLOG \n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_syslog[sysadmin]="daemon.notice@loghost1:514/netdata"\nrole_recipients_syslog[domainadmin]="daemon.notice@loghost2:514/netdata"\nrole_recipients_syslog[dba]="daemon.notice@loghost3:514/netdata"\nrole_recipients_syslog[webmaster]="daemon.notice@loghost4:514/netdata"\nrole_recipients_syslog[proxyadmin]="daemon.notice@loghost5:514/netdata"\nrole_recipients_syslog[sitemgr]="daemon.notice@loghost6:514/netdata"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# syslog notifications\n\nSEND_SYSLOG="YES"\nSYSLOG_FACILITY=\'local6\'\nDEFAULT_RECIPIENT_SYSLOG="daemon.notice@loghost6:514/netdata"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/syslog/metadata.yaml"},{id:"notify-teams",meta:{name:"Microsoft Teams",link:"https://www.microsoft.com/en-us/microsoft-teams/log-in",categories:["notify.agent"],icon_filename:"msteams.svg"},keywords:["Microsoft","Teams","MS teams"],overview:"# Microsoft Teams\n\nYou can send Netdata alerts to Microsoft Teams using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Microsoft Teams. You can use the same on all your Netdata servers (or you can have multiple if you like).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MSTEAMS | Set `SEND_MSTEAMS` to YES | YES | yes |\n| MSTEAMS_WEBHOOK_URL | set `MSTEAMS_WEBHOOK_URL` to the incoming webhook URL as given by Microsoft Teams. | | yes |\n| DEFAULT_RECIPIENT_MSTEAMS | Set `DEFAULT_RECIPIENT_MSTEAMS` to the encoded Microsoft Teams channel name you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_MSTEAMS\n\nIn Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhook/`. You can define multiple channels like this: `CHANNEL1` `CHANNEL2`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_msteams[sysadmin]="CHANNEL1"\nrole_recipients_msteams[domainadmin]="CHANNEL2"\nrole_recipients_msteams[dba]="databases CHANNEL3"\nrole_recipients_msteams[webmaster]="CHANNEL4"\nrole_recipients_msteams[proxyadmin]="CHANNEL5"\nrole_recipients_msteams[sitemgr]="CHANNEL6"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Microsoft Teams (office.com) global notification options\n\nSEND_MSTEAMS="YES"\nMSTEAMS_WEBHOOK_URL="https://outlook.office.com/webhook/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/IncomingWebhook/CHANNEL/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"\nDEFAULT_RECIPIENT_MSTEAMS="XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/msteams/metadata.yaml"},{id:"notify-telegram",meta:{name:"Telegram",link:"https://telegram.org/",categories:["notify.agent"],icon_filename:"telegram.svg"},keywords:["Telegram"],overview:"# Telegram\n\nSend notifications to Telegram using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. Invite your bot to a group where you want it to send messages.\n- The chat ID for every chat you want to send messages to. Invite [@myidbot](https://t.me/myidbot) bot to the group that will receive notifications, and write the command `/getgroupid@myidbot` to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TELEGRAM | Set `SEND_TELEGRAM` to YES | YES | yes |\n| TELEGRAM_BOT_TOKEN | set `TELEGRAM_BOT_TOKEN` to your bot token. | | yes |\n| DEFAULT_RECIPIENT_TELEGRAM | Set `DEFAULT_RECIPIENT_TELEGRAM` to the chat ID you want the alert notifications to be sent to. You can define multiple chat IDs like this: -49999333322 -1009999222255. | | yes |\n\n##### DEFAULT_RECIPIENT_TELEGRAM\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_telegram[sysadmin]="-49999333324"\nrole_recipients_telegram[domainadmin]="-49999333389"\nrole_recipients_telegram[dba]="-10099992222"\nrole_recipients_telegram[webmaster]="-10099992222 -49999333389"\nrole_recipients_telegram[proxyadmin]="-49999333344"\nrole_recipients_telegram[sitemgr]="-49999333876"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# telegram (telegram.org) global notification options\n\nSEND_TELEGRAM="YES"\nTELEGRAM_BOT_TOKEN="111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5"\nDEFAULT_RECIPIENT_TELEGRAM="-49999333876"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/telegram/metadata.yaml"},{id:"notify-twilio",meta:{name:"Twilio",link:"https://www.twilio.com/",categories:["notify.agent"],icon_filename:"twilio.png"},keywords:["Twilio"],overview:"# Twilio\n\nSend notifications to Twilio using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n### Prerequisites\n\n#### \n\n- Get your SID, and Token from https://www.twilio.com/console\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TWILIO | Set `SEND_TWILIO` to YES | YES | yes |\n| TWILIO_ACCOUNT_SID | set `TWILIO_ACCOUNT_SID` to your account SID. | | yes |\n| TWILIO_ACCOUNT_TOKEN | Set `TWILIO_ACCOUNT_TOKEN` to your account token. | | yes |\n| TWILIO_NUMBER | Set `TWILIO_NUMBER` to your account\'s number. | | yes |\n| DEFAULT_RECIPIENT_TWILIO | Set DEFAULT_RECIPIENT_TWILIO to the number you want the alert notifications to be sent to. You can define multiple numbers like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_TWILIO\n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient\'s number you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_twilio[sysadmin]="+15555555555"\nrole_recipients_twilio[domainadmin]="+15555555556"\nrole_recipients_twilio[dba]="+15555555557"\nrole_recipients_twilio[webmaster]="+15555555558"\nrole_recipients_twilio[proxyadmin]="+15555555559"\nrole_recipients_twilio[sitemgr]="+15555555550"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Twilio (twilio.com) SMS options\n\nSEND_TWILIO="YES"\nTWILIO_ACCOUNT_SID="xxxxxxxxx"\nTWILIO_ACCOUNT_TOKEN="xxxxxxxxxx"\nTWILIO_NUMBER="xxxxxxxxxxx"\nDEFAULT_RECIPIENT_TWILIO="+15555555555"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/twilio/metadata.yaml"},{id:"oidc-authentication",meta:{name:"OIDC",link:"https://netdata.cloud",categories:["auth"],icon_filename:"openid.svg"},keywords:["sso","oidc"],overview:"# OIDC\n\nIntegrate your organization's Authorization Servers with Netdata to better manage your team's access controls to Netdata Cloud.\n",setup:"## Setup\n\n### Prerequisites\n- Authorization Server with OIDC protocol supported\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- Space needs to be on a paid plan\n\n### Setting up Authorization Server\nYour server should follow the [full specification for OIDC](https://openid.net/specs/openid-connect-core-1_0.html).\nIn order to integrate your Authorization Server with Netdata the creation of a client is required. Clients are applications and services that can request authentication of a user.\nThe access settings for your client are the following:\n\n| field | value |\n| :-- | :-- |\n| Root URL | `https://app.netdata.cloud/`` |\n| Home/Initiate login URL | `https://app.netdata.cloud/api/v2/auth/account/auth-server?iss={your-server-issuer-url}&redirect_uri=https://app.netdata.cloud/sign-in®ister_uri=https://app.netdata.cloud/sign-up/verify` |\n| Redirect URL | `https://app.netdata.cloud/api/v2/auth/account/auth-server/callback` |\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon)\n2. Click on the **Authentication** tab\n3. On the OIDC card, click on **Configure**\n4. Fill in the required credentials:\n - **Issuer URL** the Authorization Server Issuer URL, e.g. `https://my-auth-server.com/`\n - **Client ID** the Client ID from the created client\n - **Client Secret** the Client Secret from the created client\n - **Authorization URL** the Authorization Server authorization URL, e.g. `https://my-auth-server.com/openid-connect/auth`\n - **Token URL** the Authorization Server token URL, e.g. `https://my-auth-server.com/openid-connect/token`\n - **User URL** the Authorization Server user info URL, e.g. `https://my-auth-server.com/openid-connect/userinfo`\n\n### Supported features\n* SP-initiated SSO (Single Sign-On)\n* IdP-initiated SSO\n\n### SP-initiated SSO\n\nIf you start your authentication flow from Netdata sign-in page please check [these steps](/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md#from-netdata-sign-up-page).\n\n\n### Reference\nhttps://openid.net/developers/how-connect-works/\n\n",integration_type:"authentication",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-authentication/metadata.yaml",troubleshooting:""},{id:"okta-authentication",meta:{name:"Okta SSO",link:"https://netdata.cloud",categories:["auth"],icon_filename:"okta.png"},keywords:["sso","okta","okta-sso"],overview:"# Okta SSO\n\nIntegrate your organization's Okta account with Netdata to better manage your team's access controls to Netdata Cloud.\n",setup:"## Setup\n\n### Prerequisites\n- An Okta account\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- Space needs to be on a paid plan\n\n### Setting up Okta\nSteps needed to be done on Okta Admin Portal:\n1. Click on **Applications** tab and choose to **Browse App Catalogue**\n2. Find Netdata's preconfigured app for easy setup and click **Add Integration**\n3. Give the app, that will be in your apps dashboard, the preferred **Application label** and click **Next** to move to the Sign-On options tab\n4. In the **Sign-On Options** all the values we expect are already filled and no additional data is required\n5. Click **Done**. You are able to go back and edit any fields later if need be\n6. Go to the **Assignments** tab and enter the People or Group assignments as per your organization\u2019s policies\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon)\n2. Click on the **Authentication** tab\n3. On the Okta SSO card, click on **Configure**\n4. Fill in the [required credentials](https://developer.okta.com/docs/guides/find-your-app-credentials/main/), you get them from **Okta Admin Portal**:\n - **Issuer URL** you can get it from your profile icon on top, e.g. `https://company-name.okta.com`\n - **Client ID** you can get it from **General** tab on application you configured on Okta\n - **Client Secret** you can get it from **General** tab on application you configured on Okta\n\n### Supported features\n* SP-initiated SSO (Single Sign-On)\n* IdP-initiated SSO\n\n### SP-initiated SSO\n\nIf you start your authentication flow from Netdata sign-in page please check [these steps](/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md#from-netdata-sign-up-page).\n\n",integration_type:"authentication",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-authentication/metadata.yaml",troubleshooting:""}];var l=t(74112),c=t(86083),d=t(38819),u=t(58205);const m=(0,i.eU)({key:"integrationsCategoriesAtom",default:function(){let{hideEmptyCategories:e=!0}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,l.Fc)(s),t=e=>n.find((n=>n.id==e)),o=(0,c.RF)(r);let i=[];if(e){const e=(0,l.rS)(o,t,n);i=n.filter((n=>{let{id:t}=n;return e.includes(t)}))}else i=n;return i}()}),p=(0,i.eU)({key:"topLevelIntegrationsCategoriesAtom",default:(0,i.gD)({key:"topLevelIntegrationsCategoriesState",get:e=>{let{get:n}=e;return(0,l.a8)(n(m))}})}),h=(0,i.eU)({key:"integrationsNavigationHistoryAtom",default:(0,i.gD)({key:"integrationsNavigationHistoryState",get:e=>{let{get:n}=e;return(e=>{var n;let{categories:t,integrations:o}=e;const{selectedIntegrationCategory:i,selectedIntegration:a}=(0,d.PP)(),s=t.find((e=>{let{id:n}=e;return n==(i||u.yI)})),{tree:r}=(0,l.fk)({category:s,getCategoryById:e=>t.find((n=>n.id==e))}),c=null===(n=o.filter((e=>{let{id:n}=e;return n==a})))||void 0===n?void 0:n[0],m=r.filter(Boolean).map(((e,n)=>({...e,type:"category",level:n+1})));return c&&m.push({...c,type:"integration"}),m})({categories:n(m),integrations:n(g)})}})}),g=(0,i.eU)({key:"integrationsNormalized",default:(0,i.gD)({key:"integrationsNormalizedState",get:e=>{let{get:n}=e;return function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];return(0,c.RF)(r,e)}(n(m))}})}),f=(0,i.eU)({key:"integrationsSearchTerm",default:""}),y=(0,i.eU)({key:"integrationsHistoryBeforeSearch",default:null}),b=(0,i.eU)({key:"integrationsModalOpenAtom",default:"true"==(0,d.PP)().integrationsModalOpen});var _=t(9060);const w=(0,i.gD)({key:"integrationsQuickStart",get:e=>{let{get:n}=e;const t=n(g).filter((e=>e.quickStart>=0));return(0,c.xM)(t)}}),v=()=>(0,i.vc)(y),T=()=>(0,i.L4)(y),k=()=>{const e=x(),n=(0,_.A)(),[t,s]=(0,i.L4)(h),r=(0,i.E0)(y),[,c]=(0,a.N9)("selectedIntegrationCategory"),[,d]=(0,a.N9)("selectedIntegration"),[,m]=(0,a.N9)("selectedIntegrationTab"),p=(0,o.useCallback)((e=>{const{parents:t}=(0,l.fk)({category:e,getCategoryById:n}),o=[...t,e].map(((e,n)=>({...e,type:"category",level:n+1})));s(o),d(""),m(""),r()}),[e,s]),g=(0,o.useCallback)((function(){let{flattenedCategory:e,...t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const o=e||(t.categories||[])[0],i=o.categoryId||o.id,a=n(i),{parents:c}=(0,l.fk)({category:a,getCategoryById:n}),d=[...c,a].map(((e,n)=>({...e,type:"category",level:n+1})));s([...d,{...t,type:"integration"}]),m(""),r()}),[]),f=(0,o.useCallback)((()=>{t.length>0&&(s(t.slice(0,t.length-1)),r())}),[t,s]),b=(0,o.useMemo)((()=>{const e=t[t.length-1];return"integration"==(null===e||void 0===e?void 0:e.type)?e:null}),[t]),w=(0,o.useMemo)((()=>{const e=t.filter((e=>"category"==e.type))||[];return e.length?e[e.length-1]:null}),[t]),v=(0,o.useCallback)((()=>{const n=e.find((e=>{let{id:n}=e;return n==u.yI})),t=[e.find((e=>{let{id:t}=e;return t==n.parentId})),n].filter(Boolean).map(((e,n)=>({...e,type:"category",level:n+1})));s(t),r()}),[e,s]),T=(0,o.useMemo)((()=>null===w||void 0===w?void 0:w.level),[w]);return(0,o.useEffect)((()=>{null!==w&&void 0!==w&&w.id&&c(w.id)}),[w]),(0,o.useEffect)((()=>{null!==b&&void 0!==b&&b.id&&d(b.id)}),[b]),{history:t,level:T,selectedCategory:w,selectedIntegration:b,pushCategory:p,pushIntegration:g,pop:f,reset:v,setHistory:s}},x=()=>(0,i.vc)(m),P=()=>(0,i.vc)(p),C=()=>{const{selectedCategory:e}=k(),n=x(),t=q();return(0,l.IH)({category:e,integrations:t,categories:n})},q=()=>(0,i.vc)(g),D=()=>(0,i.vc)(w),A=()=>(0,i.vc)(f),S=()=>(0,i.L4)(f),M=()=>(0,i.E0)(f),E=()=>{const e=M(),{reset:n}=k();return()=>{e(),n()}},N=()=>(0,i.L4)(b)},88773:(e,n,t)=>{t.d(n,{$c:()=>r,AG:()=>d,Av:()=>l,BT:()=>u,D6:()=>s,GO:()=>p,Zp:()=>c,gY:()=>m,oO:()=>h});var o=t(8711),i=t(83199),a=t(25159);const s=o.default.div.withConfig({displayName:"styled__LayoutGrid",componentId:"sc-1kju9s3-0"})(["display:grid;grid-template-columns:"," auto;column-gap:32px;padding:0 24px;height:calc( 100vh - ","px - ","px );@media screen and ","{grid-template-columns:"," auto;}@media screen and ","{grid-template-columns:"," auto;}"],(e=>{let{isSidebarCollapsed:n}=e;return n?"".concat(60,"px"):"35%"}),(e=>(0,a.vF)(e.flavour)),(e=>(0,a.u8)(e.flavour)),i.breakpoints.laptop,(e=>{let{isSidebarCollapsed:n}=e;return n?"".concat(60,"px"):"30%"}),i.breakpoints.desktop,(e=>{let{isSidebarCollapsed:n}=e;return n?"".concat(60,"px"):"25%"})),r=o.default.div.withConfig({displayName:"styled__StyledVirtualRow",componentId:"sc-1kju9s3-1"})(["display:grid;grid-template-columns:repeat(",",1fr);grid-auto-rows:","px;column-gap:16px;row-gap:16px;padding-bottom:16px;"],(e=>{let{itemsPerRow:n}=e;return n}),(e=>{let{rowHeight:n}=e;return n>16?n-16:n})),l=(0,o.default)(i.Flex).withConfig({displayName:"styled__CategoryWrapper",componentId:"sc-1kju9s3-2"})(["cursor:pointer;&:hover{background-color:",";}"],(0,i.getColor)("integrationMenuItemHover")),c=(0,o.default)(i.Flex).withConfig({displayName:"styled__Card",componentId:"sc-1kju9s3-3"})(["background:",";img{transition:all 200ms ease-in-out;}&:hover{img{transform:scale(1.2);}}"],(0,i.getColor)("panelBg")),d=(o.default.div.withConfig({displayName:"styled__CardOverlay",componentId:"sc-1kju9s3-4"})(["position:absolute;width:100%;top:0;bottom:0;filter:blur(70px);opacity:0.2;&& img{width:100%;height:100%;}"]),(0,o.default)(i.Icon).withConfig({displayName:"styled__CardIcon",componentId:"sc-1kju9s3-5"})(["position:absolute;top:-20px;left:-35px;transform:rotate(40deg);opacity:0.1;"])),u=(0,o.default)(i.Flex).withConfig({displayName:"styled__CardDescription",componentId:"sc-1kju9s3-6"})(["position:absolute;bottom:0;left:0;transform:translateY(100%);transition:all 200ms ease-in-out;&&.hover{transform:translateY(0);}"]),m=o.default.table.withConfig({displayName:"styled__ContentTable",componentId:"sc-1kju9s3-7"})(["width:100%;border:1px solid ",";margin-top:16px;tr:nth-child(even){background:",";}th{padding:8px 16px;background:",";}td{padding:6px;}"],(0,i.getColor)("borderSecondary"),(0,i.getColor)("mainBackground"),(0,i.getColor)("mainBackground")),p=(0,o.default)(i.PortalSidebar).withConfig({displayName:"styled__SidebarModal",componentId:"sc-1kju9s3-8"})(["width:calc(100vw - 54px);z-index:35;background:",";overflow:",";"],(0,i.getColor)("mainBackground"),(e=>{let{overflow:n="hidden"}=e;return n})),h=(0,o.default)(i.Icon).withConfig({displayName:"styled__CategoryArrow",componentId:"sc-1kju9s3-9"})(["opacity:",";"],(e=>{let{disabled:n}=e;return n?"0":"1"}))},74112:(e,n,t)=>{t.d(n,{Fc:()=>l,IH:()=>p,PQ:()=>u,YK:()=>c,a8:()=>d,fk:()=>g,q1:()=>h,rS:()=>y,vF:()=>b});t(17333),t(3064),t(9920),t(41393),t(14905),t(98992),t(54520),t(72577),t(3949),t(81454),t(8872),t(25509),t(65223),t(60321),t(41927),t(11632),t(64377),t(66771),t(12516),t(68931),t(52514),t(35694),t(52774),t(49536),t(21926),t(94483),t(16215),t(62953);var o=t(63950),i=t.n(o),a=t(58205),s=t(80158),r=t(25159);const l=function(){let e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null;return(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).reduce(((n,t)=>{n.push(function(e){let{children:n,most_popular:t,...o}=e,i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null;return{...o,name:(0,s.Zr)(o.name),mostPopular:t,parentId:i}}(t,e));const o=t.children||[];return n=[...n,...l(o,t.id)]}),[])},c=function(){const e=[],n=[];(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).forEach((t=>{void 0!==t.priority&&t.priority>=0?e.push(t):n.push(t)}));const t=[],o=[];n.forEach((e=>{e.mostPopular?t.push(e):o.push(e)}));const i=(0,r.Wl)("priority"),a=(0,r.Kr)("name"),s=[...e.sort(i),...t.sort(a)];return{categories:[...s,...o.sort(a)],popular:s,rest:o}},d=function(){return((arguments.length>0&&void 0!==arguments[0]?arguments[0]:[])||[]).filter((e=>{let{parentId:n}=e;return null==n}))},u=e=>{let{category:n,categories:t}=e;return t.filter((e=>{let{parentId:t}=e;return n.id==t}))},m=e=>{let{category:n,categories:t,allSubcategories:o=[]}=e;return(u({category:n,categories:t})||[]).forEach((e=>{o.push(e),m({category:e,categories:t,allSubcategories:o})})),o},p=e=>{let{category:n={},integrations:t=[],categories:o=[]}=e;if(null==n)return[];const i=m({category:n,categories:o}),a=[n.id,...i.map((e=>{let{id:n}=e;return n}))];return t.filter((e=>{const n=e.categories.map((e=>{let{categoryId:n}=e;return n}));for(let t=0;t1&&void 0!==arguments[1]?arguments[1]:[];const t=n.find((n=>{let{id:t}=n;return t==e}));return t?null===t.parentId?a.aw[t.id]:h(t.parentId,n):null},g=e=>{let{category:n,getCategoryById:t=i()}=e;const o=(e=>((null===e||void 0===e?void 0:e.id)||"").split(".").reduce(((e,n)=>(e.length?e.push([e[e.length-1],n].join(".")):e.push(n),e)),[]))(n).map((e=>t(e)));return{tree:o,parents:o.length>1?o.slice(0,o.length-1):[],topLevelCategory:o[0]}},f=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:i(),t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:i(),o=arguments.length>3?arguments[3]:void 0;if(!e)return;const a=t(e);if(!a)return;n(a);const s=o.find((e=>{let{id:n}=e;return n==(null===a||void 0===a?void 0:a.parentId)}));s&&f(s.id,n,t,o)},y=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:i(),t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[];const o=new Set,a=e=>o.add(e.id);return e.forEach((e=>{e.categories.forEach((e=>{let{categoryId:o}=e;f(o,a,n,t)}))})),Array.from(o)},b=e=>{let{height:n,topLevelCategories:t=[],categories:o=[],setState:s=i()}=e;const r={};if(!n||!t.length||!o.length)return r;const l=Math.floor((n/t.length-a.V0)/a.V0);t.forEach((e=>{const n=u({category:e,categories:o});r[e.id]={subcategories:n.length,limit:n.length>l?l:0}})),Object.entries(r).forEach((e=>{let[n,{subcategories:t,limit:o}]=e;n!=a.qC&&o{t.d(n,{Kr:()=>i,Wl:()=>a,jU:()=>l,u8:()=>r,vF:()=>s});t(41393),t(81454);var o=t(58205);const i=e=>(n,t)=>n[e]t[e]?1:0,a=e=>(n,t)=>n[e]-t[e],s=function(){var e;let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:o.Jr;return(null===(e=o.jI[n])||void 0===e||null===(e=e.header)||void 0===e?void 0:e.height)||0},r=function(){var e;let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:o.Jr;return(null===(e=o.jI[n])||void 0===e||null===(e=e.cncf)||void 0===e?void 0:e.height)||0},l=e=>{var n;return"string"===typeof e?e:Array.isArray(e)?e.map(l).join(""):null!==e&&void 0!==e&&null!==(n=e.props)&&void 0!==n&&n.children?l(e.props.children):""}},86083:(e,n,t)=>{t.d(n,{Cj:()=>m,Fd:()=>p,M6:()=>h,RF:()=>d,xM:()=>u});t(17333),t(3064),t(9920),t(41393),t(14905),t(98992),t(54520),t(72577),t(3949),t(81454),t(8872),t(62953);var o=t(63950),i=t.n(o),a=t(49286),s=t(25159);const r=function(){let{installDescription:e,additionalInfo:n,platformInfo:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return[e,"{% command methods=$methods isNightly=$isNightly claimToken=$claimToken claimUrl=$claimUrl claimRooms=$claimRooms /%}",n,t].join("\n\n")},l={"go.d.plugin":"go.d:collector"},c=e=>{if(!e)return null;const{module_name:n,plugin_name:t}=e,o=l[t];return o&&n?"".concat(o,":").concat(n):null},d=function(){let e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];return(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).map((n=>function(){let{meta:e={},...n}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];const{monitored_instance:o,keywords:i=[],most_popular:s,community:l}=e,{name:d,categories:u=[],icon_filename:m}=o||e,p="deploy"==n.integration_type;return(0,a.bn)({name:d,categories:u.map((e=>{const n=t.find((n=>n.id==e));return{categoryId:e,name:null===n||void 0===n?void 0:n.name}})),icon:"https://www.netdata.cloud/img/"+m,keywords:i,mostPopular:s,community:l,dyncfgId:c(e),...n,...p?{deployContent:r({installDescription:n.install_description,methods:n.methods,additionalInfo:n.additional_info,platformInfo:n.platform_info})}:{}})}(n,e)))},u=function(){const e=[],n=[];(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).forEach((t=>{void 0!==t.quickStart&&t.quickStart>=0?e.push(t):n.push(t)}));const t=[],o=[];n.forEach((e=>{e.mostPopular?t.push(e):o.push(e)}));const i=[],a=[];o.forEach((e=>{e.community?a.push(e):i.push(e)}));const r=(0,s.Wl)("quickStart"),l=(0,s.Kr)("name");return[...e.sort(r),...t.sort(l),...i.sort(l),...a.sort(l)]},m=e=>{let{searchTerm:n="",integrations:t=[]}=e;if(!n)return t;const o=n.toLocaleLowerCase();return t.filter((e=>{const{name:n,keywords:t,categories:i=[]}=e;if(n.toLocaleLowerCase().includes(o))return!0;const a=i.map((e=>{let{name:n}=e;return n})).filter(Boolean);return[...t,...a].join(",").toLocaleLowerCase().includes(o)}))},p=function(){let e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"id";const n=[];return(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).forEach((t=>{n.find((n=>n[e]==t[e]))||n.push(t)})),n},h=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:i();return e.reduce(((e,t)=>(t.categories.forEach((o=>{e.push(((e,n,t)=>({...e,flattenedKey:"".concat(e.id,"-").concat(n.categoryId),flattenedCategory:t(n.categoryId)}))(t,o,n))})),e)),[])}},42402:(e,n,t)=>{t.d(n,{Ay:()=>g,FB:()=>u,TZ:()=>m,mK:()=>h,uL:()=>d});var o=t(58168),i=t(96540),a=t(83199),s=t(35261);const r="delete-node-dialog",l="deleteNodeDialog",c=e=>{const n=1===e;return"Delete ".concat(e," ").concat(n?"node":"nodes")},d=e=>"Delete ".concat(e," node"),u=e=>{let{name:n,nodesLength:t}=e;return t?1===t&&n?d(n):c(t):""},m=e=>{let{name:n}=e;return i.createElement(i.Fragment,null,"You are about to delete offline node ",i.createElement("strong",null,n),".",i.createElement("br",null),"Node will be removed from all rooms and will no longer be accessible. Plus all metadata will be removed.",i.createElement("br",null),"Are you sure you want to continue?")},p=e=>{let{nodesLength:n}=e;const t=1===n;return i.createElement(i.Fragment,null,"You are about to delete ",t?"this":"these"," ",i.createElement("strong",null,n," offline ",t?"node":"nodes"),".",i.createElement("br",null),t?"Node":"Nodes"," will be removed from all rooms and will no longer be accessible. Plus all metadata will be removed.",i.createElement("br",null),"Are you sure you want to continue?")},h=e=>{let{name:n,nodesLength:t}=e;return t?1===t&&n?i.createElement(m,{name:n}):i.createElement(p,{nodesLength:t}):""},g=e=>{let{ids:n=[],name:t,onClose:u}=e;const h=(0,s.A)(),g=t?{"data-ga":"".concat(r,"-with-name"),"data-testid":"".concat(l,"WithName"),message:i.createElement(m,{name:t}),title:d(t)}:{"data-ga":"".concat(r,"Bulk"),"data-testid":"".concat(l,"Bulk"),message:i.createElement(p,{nodesLength:n.length}),title:c(n.length)};return i.createElement(a.ConfirmationDialog,(0,o.A)({confirmLabel:"Yes, delete",handleConfirm:async()=>{await h(n)},handleDecline:u},g))}},63119:(e,n,t)=>{t.d(n,{A:()=>m,u:()=>u});var o=t(58168),i=t(96540),a=t(84976),s=t(83199),r=t(47762),l=t(4959),c=t(29217);const d=e=>{let{isLive:n,name:t}=e;return i.createElement(i.Fragment,null,i.createElement(l.cK,{"data-testid":"node-name-text",isLive:n},t),i.createElement(s.Icon,{color:"placeholder","data-testid":"node-name-icon",name:"goToNode",width:"18px",height:"18px"}))},u=e=>{let{name:n}=e;return i.createElement(s.Flex,{width:"300px",column:!0,gap:1},i.createElement(s.Text,{strong:!0},"Locked!"),i.createElement(s.Text,null,"The node ",i.createElement(s.Text,{strong:!0},n)," is locked."),i.createElement(s.Text,null,"Your plan is limited to 5 nodes. Upgrade your plan for no limitations."))},m=(p=e=>{let{id:n,state:t,isLive:s,name:c,isPreferred:u,...m}=e;const p=(0,r.Zl)(n);return("stale"===t||s)&&u?i.createElement(a.N_,(0,o.A)({"data-testid":"node-name-link",to:p},m),i.createElement(l.Zw,{"data-testid":"node-name-container"},i.createElement(d,{isLive:s,name:c}))):i.createElement(d,{isLive:s,name:c})},e=>e.isPreferred?i.createElement(p,e):i.createElement(c.A,{plain:!0,content:i.createElement(u,{name:e.name}),isBasic:!0},i.createElement(s.Flex,{gap:1},i.createElement(s.Icon,{name:"padlock",width:"18px",height:"18px",color:"placeholder"}),i.createElement(p,e))));var p},74379:(e,n,t)=>{t.d(n,{Ay:()=>f,G:()=>p,VN:()=>g,aq:()=>m,fS:()=>d});var o=t(58168),i=t(96540),a=t(83199),s=t(17632),r=t(69765);const l="remove-node-dialog",c="removeNodeDialog",d=e=>"Remove ".concat(e," node"),u=e=>{const n=1===e;return"Remove ".concat(e," ").concat(n?"node":"nodes")},m=e=>{let{name:n,nodesLength:t}=e;return t?1===t&&n?d(n):u(t):""},p=e=>{let{name:n,roomName:t}=e;return i.createElement(i.Fragment,null,"You are about to remove ",i.createElement("strong",null,n)," from room ",i.createElement("strong",null,t),".",i.createElement("br",null),"Are you sure you want to continue?")},h=e=>{let{nodesLength:n,roomName:t}=e;const o=1===n;return i.createElement(i.Fragment,null,"You are about to remove"," ",i.createElement("strong",null,n," ",o?"node":"nodes")," ","from room ",i.createElement("strong",null,t),".",i.createElement("br",null),"Are you sure you want to continue?")},g=e=>{let{name:n,nodesLength:t,roomName:o}=e;return t?1===t&&n?i.createElement(p,{name:n,roomName:o}):i.createElement(h,{nodesLength:t,roomName:o}):""},f=e=>{let{ids:n=[],name:t,onClose:m}=e;const g=(0,s.A)(),f=n.length,y=(0,r.XA)("name"),b=t?{"data-ga":"".concat(l,"WithName"),"data-testid":"".concat(c,"WithName"),message:i.createElement(p,{name:t,roomName:y}),title:d(t)}:{"data-ga":"".concat(l,"Bulk"),"data-testid":"".concat(c,"Bulk"),message:i.createElement(h,{nodesLength:f,roomName:y}),title:u(f)};return i.createElement(a.ConfirmationDialog,(0,o.A)({handleConfirm:async()=>{await g(n)},handleDecline:m},b))}},4959:(e,n,t)=>{t.d(n,{AS:()=>s,K:()=>l,SS:()=>c,Zw:()=>d,cK:()=>u,d0:()=>r});var o=t(8711),i=t(83199);const a=(0,o.default)(i.Button).attrs({flavour:"hollow",neutral:!0,tiny:!0}).withConfig({displayName:"styled__ActionButton",componentId:"sc-31e37b-0"})(["&&&{border-color:",";&:hover{background-color:",";}.button-icon{"," fill:",";}}"],(0,i.getColor)("nodeBadgeBorder"),(0,i.getColor)("nodeBadgeBackground"),(e=>{let{iconSize:n}=e;return n&&"\n height: ".concat(n,";\n width: ").concat(n,";\n ")}),(0,i.getColor)("textDescription")),s=(0,o.default)(a).attrs({icon:"trashcan",iconSize:"15px"}).withConfig({displayName:"styled__ObsoleteButton",componentId:"sc-31e37b-1"})([""]),r=(0,o.default)(a).attrs({icon:"removeNode"}).withConfig({displayName:"styled__RemoveButton",componentId:"sc-31e37b-2"})([""]),l=(0,o.default)(i.Box).attrs({background:"placeholder",height:4,width:.25}).withConfig({displayName:"styled__InfoSeparator",componentId:"sc-31e37b-3"})([""]),c=(0,o.default)(i.Flex).withConfig({displayName:"styled__NodeActions",componentId:"sc-31e37b-4"})(["display:none;"]),d=(0,o.default)(i.Flex).attrs({alignItems:"center",gap:1}).withConfig({displayName:"styled__NodeNameContainer",componentId:"sc-31e37b-5"})(["&:hover{h5{color:#00ab44;text-decoration:underline;text-decoration-color:#00ab44;}svg{fill:#00ab44;}}"]),u=(0,o.default)(i.H5).attrs((e=>{let{isLive:n}=e;return{cursor:n?"pointer":"auto",color:n?"text":"textDescription"}})).withConfig({displayName:"styled__NodeName",componentId:"sc-31e37b-6"})([""])},60383:(e,n,t)=>{t.d(n,{A:()=>Z});var o=t(96540),i=t(83199),a=t(67990),s=t(38413),r=t(46741),l=(t(41393),t(81454),t(62953),t(47767)),c=t(69765),d=t(47762),u=t(97245),m=(t(8159),t(98992),t(37550),t(33931)),p=t(78969),h=t(80158),g=t(82432);const f=e=>{let{icon:n,status:t,handleNavigateToDocs:a,message:s}=e;return o.createElement(i.Flex,{column:!0,width:{max:"200px"},gap:2},o.createElement(i.Flex,{alignItems:"center",gap:2},o.createElement(i.Icon,{name:n,size:"small"}),o.createElement(i.TextMicro,null,(0,h.Zr)(t))),o.createElement(i.Flex,{column:!0,gap:2},o.createElement(i.TextMicro,null,s),a&&o.createElement(i.TextMicro,{"data-testid":"text-agent-outdated-critical"},o.createElement(i.Box,{"data-testid":"open-add-node",onClick:a,as:i.TextMicro,cursor:"pointer",textDecoration:"underline"}," ","Check here"," "),"for troubleshooting")))},y=e=>{let{state:n,name:t}=e;const a=(0,g.jZ)(n),{icon:s,text:r,textColor:l,indicatorWrapperColor:c,tooltip:d}=p.Q$[a.toLocaleLowerCase()],u=(0,o.useCallback)((()=>{window.open("https://learn.netdata.cloud/guides/troubleshoot/troubleshooting-agent-with-cloud-connection","_blank","noopener,noreferrer")}),[]),m="".concat(t," ").concat(d);return o.createElement(i.Tooltip,{allowHoverOnTooltip:!0,content:o.createElement(f,{message:m,icon:s,status:r,handleNavigateToDocs:"Pending"===a?u:void 0})},o.createElement(i.Flex,{margin:[0,"auto",0,"auto"],height:"20px",padding:[.5,0],border:{side:"all",color:c},background:"nodeBadgeBackground",justifyContent:"center",width:30,round:4,gap:2,alignItems:"center"},o.createElement(i.Icon,{name:s,size:"small",color:l}),o.createElement(i.TextMicro,{strong:!0,color:l},r)))};var b=t(81881),_=t(55905),w=(t(9391),t(17333),t(54520),t(29217)),v=t(87659),T=t(73865),k=t(71835),x=t(50876);const P=()=>{const{loaded:e,maxNodes:n}=(0,T.A)();return o.createElement(w.A,{align:"bottom",content:e?"Your current plan allows you to enable up to ".concat(n," nodes"):""},o.createElement(i.Flex,{gap:2},o.createElement(i.Text,null,"Enabled"),e?o.createElement(i.Icon,{name:"informationPress",size:"small",color:"text"}):null))},C=()=>o.createElement(w.A,{content:"For some reason we couldn't load preferred nodes",align:"bottom"},o.createElement(i.Icon,{name:"warning_triangle",color:"warning",size:"small"})),q=e=>{var n;let{row:t}=e;const a=null===(n=t.original)||void 0===n?void 0:n.id,{loaded:s,maxNodes:r,preferredNodes:l,refreshPreferredNodes:c,hasError:u}=(0,T.A)(),m=(0,d.je)(),p=l.includes(a),[h,g]=(0,o.useState)(!1),[f,,y,b]=(0,v.A)(),_=f||!p&&s&&l.length>=r,{sendLog:w}=(0,x.A)(),[,P]=(0,k.A)();(0,o.useEffect)((()=>{s&&!u&&g(p)}),[s,l,u]);const q=(0,o.useCallback)((e=>{y(),g((e=>!e));const n=e.target.checked?[...l,a]:l.filter((e=>e!=a)),t=l.length==n.length?void 0:n.length>l.length?"add":"remove";m(n).then((()=>{w({feature:"ChangePreferredNodes",previousNodes:(l||[]).join(","),newNodes:(n||[]).join(","),nodesAction:t,isSuccess:!0}),c()})).catch((e=>{g((e=>!e)),P(e),w({feature:"ChangePreferredNodes",previousNodes:(l||[]).join(","),newNodes:(n||[]).join(","),nodesAction:t,isFailure:!0})})).finally((()=>{b()}))}),[y,g,m,l,a,c,P,b,w]);return u?o.createElement(C,null):o.createElement(i.Toggle,{colored:!0,onChange:q,checked:h,disabled:_})};var D=t(54830),A=t(69418);function S(e,n){return e===n?0:e>n?1:-1}const M=e=>{let{isSpace:n}=e;const[,,t]=(0,A.A)(),{hasLimitations:a,maxNodes:s,preferredNodes:r}=(0,T.A)(),l=a&&t>s;return(0,o.useMemo)((()=>[...n&&l?[{id:"enabled",header:P,cell:q,sortingFn:(e,n)=>{var t,o;return(null!==(t=e.original)&&void 0!==t&&t.isPreferred?1:0)-(null!==(o=n.original)&&void 0!==o&&o.isPreferred?1:0)}}]:[],{id:"agent",accessorKey:"name",enableColumnFilter:!1,filterFn:(e,n,t)=>{var o;const i=(null===(o=e.original)||void 0===o?void 0:o.name)||"";return null===i||void 0===i?void 0:i.toLowerCase().includes(t)},header:"Name",cell:e=>{let{getValue:n,row:t}=e;return o.createElement(D.A,{nodeId:t.original.id,name:n(),showLockedNodes:l,preferredNodes:r})}},{id:"version",accessorKey:"version",header:"Version",cell:e=>{var n,t;let{getValue:a,row:s}=e;return o.createElement(i.Flex,{gap:1,flexWrap:!0},o.createElement(i.Pill,{flavour:"neutral",hollow:!0},a()),s.original.updateSeverity&&o.createElement(_.A,{name:s.original.name,os:null===(n=s.original.os)||void 0===n?void 0:n.id,container:null===(t=s.original.hw)||void 0===t?void 0:t.container,warningLevel:"critical"===s.original.updateSeverity?"critical":"warning",labels:s.original.labels,version:a(),margin:[1,0]}))}},{id:"state",accessorKey:"state",header:"Status",cell:e=>{let{getValue:n}=e;return o.createElement(m.A,{state:n()})},sortingFn:(e,n)=>S((0,g.GM)(e.original.state),(0,g.GM)(n.original.state)),enableColumnFilter:!0,filterFn:(e,n,t)=>{const o=e.original.state;return t.length<1||t.some((e=>{let{value:n}=e;return"all"===n||n===(0,g.GM)(o)}))},meta:{filter:{component:"select",isMulti:!0,options:[{value:"Offline",label:"Offline"},{value:"Live",label:"Live"},{value:"Stale",label:"Stale"},{value:"Unseen",label:"Unseen"}]},tooltip:o.createElement(b.A,null)}},{id:"connectionToCloud",accessorKey:"state",header:"Connection To Cloud",cell:e=>{let{getValue:n,row:t}=e;const i=t.original;return o.createElement(y,{state:n(),name:i.name})},sortingFn:(e,n)=>S((0,g.jZ)(e.original.state),(0,g.jZ)(n.original.state)),enableColumnFilter:!0,filterFn:(e,n,t)=>{const o=e.original.state;return t.length<1||t.some((e=>{let{value:n}=e;return"all"===n||("created"===o&&"created"===n||("created"!==o&&"completed"===n||void 0))}))},meta:{filter:{component:"select",isMulti:!0,options:[{value:"created",label:"Pending"},{value:"completed",label:"Completed"}]}}},{id:"updateSeverity",accessorKey:"updateSeverity",header:"Severity",cell:e=>{var n,t;let{getValue:i,row:a}=e;const s=a.original;return o.createElement(_.A,{name:s.name,os:null===(n=s.os)||void 0===n?void 0:n.id,container:null===(t=s.hw)||void 0===t?void 0:t.container,warningLevel:s.updateSeverity,labels:s.labels||{},version:s.version,text:i()})}}]),[a,r])};var E=t(42402),N=t(74379),R=t(35261),H=t(17632);const I=e=>{let{openClaimNodeModal:n,roomUntouchable:t,isSpace:i}=e;const a=(0,R.A)(),s=(0,H.A)(),l=(0,c.XA)("name"),d=(0,r.JT)("node:Delete"),u=(0,r.JT)("room:RemoveNode"),m=(0,r.JT)("node:Create"),p=(0,r.JT)("room:AddNode"),h=async(e,n)=>{if(!e)return;const t=(e=>Array.isArray(e)?e.filter((e=>{let{hasAccessibleData:n}=e;return!n})):e.hasAccessibleData?[]:[{id:e.id}])(e),o=t.map((e=>{let{id:n}=e;return n}));await a(o,{onSuccess:n.resetRowSelection})},g=async(e,n)=>{if(!e)return;const t=(e=>Array.isArray(e)?e:[{id:e.id}])(e),o=t.map((e=>{let{id:n}=e;return n}));await s(o,{onSuccess:n.resetRowSelection})};return{rowActions:(0,o.useMemo)((()=>({delete:{confirmLabel:"Yes, delete",confirmationMessage:e=>o.createElement(E.TZ,{name:e.name}),confirmationTitle:e=>(0,E.uL)(e.name),declineLabel:"Cancel",disabledTooltipText:d?"Delete is disabled":"Only admins can delete",handleAction:h,isVisible:!!i,isDisabled:e=>e.disabled||!d,tooltipText:"Delete node from space"},remove:{confirmLabel:"Yes, remove",confirmationMessage:e=>o.createElement(N.G,{name:e.name,roomName:l}),confirmationTitle:e=>(0,N.fS)(e.name),declineLabel:"Cancel",handleAction:g,tooltipText:"Remove node from room",isVisible:!i,isDisabled:e=>e.disabled||t||!u,disabledTooltipText:t?"Remove is disabled":"Only admins can remove"}})),[E.TZ,E.uL,N.fS,h,g,N.G,t,i,d,u]),bulkActions:(0,o.useMemo)((()=>({addEntry:{disabledTooltipText:i?"Only admins can connect new nodes":"Only admins can add nodes to the room",handleAction:n,isDisabled:i?!m:!(m||p&&!t),tooltipText:i?"Connect new nodes to space":"Add nodes to room"},...i?{delete:{confirmLabel:"Yes, delete",confirmationMessage:(e,n)=>o.createElement(E.mK,{name:n[0].name,nodesLength:n.length}),confirmationTitle:(e,n)=>(0,E.FB)({name:n[0].name,nodesLength:n.length}),disabledTooltipText:d?"Delete is disabled":"Only admins can delete",declineLabel:"Cancel",handleAction:h,isDisabled:!d,tooltipText:"Delete nodes from space"}}:{remove:{confirmLabel:"Yes, remove",confirmationMessage:(e,n)=>o.createElement(N.VN,{name:n[0].name,nodesLength:n.length,roomName:l}),confirmationTitle:(e,n)=>(0,N.aq)({name:n[0].name,nodesLength:n.length}),declineLabel:"Cancel",disabledTooltipText:t?"Remove is disabled":"Only admins can remove",handleAction:g,isDisabled:t||!u,tooltipText:"Remove nodes from room"}}})),[E.mK,E.FB,N.aq,h,g,m,d,u,N.VN,t,i]),hasPermissionToDelete:d,hasPermissionToRemove:u}},O={name:!1,updateSeverity:!1,connectionToCloud:!1},L=[{id:"state",desc:!1}],U=e=>{let{flavour:n,roomUntouchable:t,customNodes:i,alwaysEnableNodeSelection:s,isSpace:r}=e;const{roomSlug:m}=(0,l.g)(),p=(0,c.ID)(m),h=(0,a.gr)(p,"ids"),f=(0,d.Gt)(h),y=M({isSpace:r}),b=(0,o.useMemo)((()=>(0,u.P)(i||f)),[i,h]),[,_]=(0,o.useState)(""),[w,,T,k]=(0,v.A)(),{rowActions:x,bulkActions:P,hasPermissionToDelete:C,hasPermissionToRemove:q}=I({openClaimNodeModal:T,roomUntouchable:t,isSpace:r}),D=(0,o.useCallback)(((e,n,t)=>{const o=t.toLowerCase(),i=e.getValue("agent").toLowerCase(),a=(0,g.GM)(e.getValue("state")).toLocaleLowerCase(),s=(0,g.jZ)(e.getValue("connectionToCloud")).toLocaleLowerCase(),r=(e.getValue("updateSeverity")||"").toLocaleLowerCase(),l=e.getValue("version").toLowerCase();return i.includes(o)||a.includes(o)||l.includes(o)||s.includes(o)||r.includes(o)}),[]);return{nodes:b.map((e=>{const t=r?!C||!s&&e.hasAccessibleData:!q;return{...e,disabled:t||"availableNodes"==n&&!e.isPreferred}}))||[],nodeIds:h,columns:y,rowActions:x,bulkActions:P,isClaimNodeModalOpen:w,sortBy:L,columnVisibility:O,setGlobalFilter:_,openClaimNodeModal:T,closeClaimNodeModal:k,globalFilterFn:D}};var B=t(83179),j=t(3914),G=t(74530),F=t(15327),z=t(74618),Y=t(77173);const X=e=>{let{onClose:n}=e;const t=(0,j.vt)(),a=(0,c.ID)(),[s,r]=(0,o.useState)((()=>a?[a]:[]));return(0,G.A)(t),o.createElement(F.GO,{onClose:n,"data-testid":"addNodes"},o.createElement(z.z,{"data-testid":"addNodes-header",title:"Add Nodes",onClose:n}),o.createElement(F.Yv,{hasModalTitle:!1},o.createElement(i.Flex,{column:!0,"data-testid":"manageClaimedNodes-controls",gap:3},o.createElement(B.A,{placeholder:"Select Rooms",selectedValue:s,onChange:r}),o.createElement(Y.A,{rooms:s}))))};var W=t(8711),K=t(93155);const $=(0,W.default)(i.Box).withConfig({displayName:"activeNodesIndicatior__ProgressBar",componentId:"sc-sq1772-0"})(["position:absolute;left:0;height:4px;"]),V=e=>{let{isSpace:n}=e;const[,,t]=(0,A.A)(),{hasLimitations:a,maxNodes:s,preferredNodes:r}=(0,T.A)(),l=K.bO&&n&&a&&t>s,c=(0,o.useMemo)((()=>s>0?"".concat(r.length/s*100,"%"):"0%"),[r,s]);return l?o.createElement(i.Flex,{width:"140px",column:!0,gap:1,margin:[0,0,0,2]},o.createElement(i.Text,null,"Active nodes: ",r.length,o.createElement(i.Text,{color:"primary"},"/",s)),o.createElement(i.Box,{width:"100%",height:"4px",round:!0,overflow:"hidden",position:"relative",background:"border"},o.createElement($,{width:c,background:"primary"}))):null},Z=e=>{let{flavour:n,roomUntouchable:t,showClaimNodeOnEmptySpace:l=!1,customNodes:c=[],customBulkActions:d,customRowActions:u,enableSelection:m=!0,showDefaultRowActions:p=!0,showDefaultBulkActions:h=!0,onRowSelected:g,onClickRow:f,disableClickRow:y,columnVisibility:b,customSortBy:_,showClaimModalWithRoomSelection:w=!1,alwaysEnableNodeSelection:v=!1,isSpace:T}=e;const{nodes:k,columns:x,rowActions:P,bulkActions:C,sortBy:q,isClaimNodeModalOpen:D,openClaimNodeModal:A,closeClaimNodeModal:S,columnVisibility:M,setGlobalFilter:E,globalFilterFn:N}=U({flavour:n,roomUntouchable:t,customNodes:c,alwaysEnableNodeSelection:v,isSpace:T,showClaimModalWithRoomSelection:w}),R=(0,r.JT)("node:Create"),H=(0,a.nj)();(0,o.useEffect)((()=>{H&&l&&R&&0===k.length&&A()}),[H]);const I=(0,o.useMemo)((()=>({...M,...b})),[M,b]);return x.length?o.createElement(o.Fragment,null,o.createElement(i.Table,{headerChildren:o.createElement(V,{isSpace:T}),onRowSelected:g,globalFilterFn:N,columnVisibility:I,enableSelection:m,enableSorting:!0,dataColumns:x,data:k,rowActions:p&&P||u,bulkActions:h&&C||d,sortBy:_||q,onSearch:E,testPrefixCallback:e=>e.hostname||e.name,onClickRow:f,disableClickRow:y}),D&&!w&&o.createElement(s.A,{onClose:S,isSubmodal:!0}),D&&w&&o.createElement(X,{onClose:S})):null}},54830:(e,n,t)=>{t.d(n,{A:()=>r});var o=t(96540),i=t(83199),a=t(29217),s=t(63119);const r=(l=e=>{let{name:n}=e;return o.createElement(i.TextSmall,null,n)},e=>{let{nodeId:n,showLockedNodes:t,preferredNodes:r=[],...c}=e;return t&&!r.includes(n)?o.createElement(a.A,{plain:!0,content:o.createElement(s.u,{name:c.name}),isBasic:!0},o.createElement(i.Flex,{gap:1},o.createElement(i.Icon,{name:"padlock",width:"18px",height:"18px",color:"placeholder"}),o.createElement(l,c))):o.createElement(l,c)});var l},82432:(e,n,t)=>{t.d(n,{Bb:()=>v,GM:()=>u,O:()=>b,Pg:()=>C,Po:()=>h,RI:()=>w,Ud:()=>g,gm:()=>q,jZ:()=>m,lw:()=>p,t3:()=>c,tv:()=>D,vt:()=>r,ws:()=>f});t(17333),t(3064),t(41393),t(98992),t(54520),t(72577),t(81454);var o=t(96540),i=t(87860),a=t(12800);const s="N/A",r=e=>{var n,t;let{isLive:o,health:i={}}=e;return o?null!==(n=i.alerts)&&void 0!==n&&n.critical?"critical":null!==(t=i.alerts)&&void 0!==t&&t.warning?"warning":"clear":"unreachable"},l=e=>e>9?"9+":"".concat(e),c=e=>{let{count:n,dataTestId:t,isLive:o,onAlertClick:i,type:a}=e;const s=t?"".concat(t,"-alertIndicator"):"alertIndicator",r={"data-testid":"".concat(s,"-").concat(a)};return o?{...r,...n&&{flavour:"critical"===a?"error":"warning"},...n&&i&&{onClick:()=>i(a)},text:l(n)}:r},d={created:"Unseen",reachable:"Live",stale:"Stale",unreachable:"Offline"},u=e=>d[e]||d.stale,m=e=>"created"===e?"Pending":"Completed",p=e=>e?{flavour:"success",label:"ON",status:"ON"}:{flavour:"warning",label:"OFF",status:"OFF"},h=e=>!e||"unknown"===e,g=function(e,n){let t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:" \u2022 ";return h(e)?s:"".concat(e).concat(t).concat(n)},f=(e,n)=>h(e)?s:n?"".concat(e," ").concat(n):e,y={HZ:1,KHz:1e3,MHz:1e6,GHz:1e9,THz:1e12,PHz:1e15,EHz:1e18},b=e=>{if(h(e))return s;const n=Object.keys(y).find((n=>1e3>e/y[n]));return"".concat((e/=y[n]).toFixed(2)," ").concat(n||"")},_={B:1,KiB:1024,MiB:1048576,GiB:1073741824,TiB:1099511627776,PiB:0x4000000000000,EiB:0x1000000000000000},w=e=>{if(h(e))return s;const n=Object.keys(_).find((n=>1024>e/_[n]));return"".concat((e/=_[n]).toFixed(2)," ").concat(n||"")},v=(e,n,t,i,a)=>{const s={Live:o.createElement(o.Fragment,null,"is currently connected"),Offline:o.createElement(o.Fragment,null,"is currently not connected"),Stale:o.createElement(o.Fragment,null,"is currently not connected and has only historical data")},r={actionObsolete:o.createElement(o.Fragment,null,"Delete Node"),actionRemove:o.createElement(o.Fragment,null,"Remove Node from room"),alerts:o.createElement(o.Fragment,null,"The current Critical and Warning alerts for ",i),cloudInfo:o.createElement(o.Fragment,null,i," cloud information:",o.createElement("br",null),"Cloud Provider \u2022 Instance Type"),connectivity:n in s?o.createElement(o.Fragment,null,i," ",s[n]):null,info:o.createElement(o.Fragment,null,"View node information"),k8s:o.createElement(o.Fragment,null,i," runs on Kubernetes cluster"),kernel:o.createElement(o.Fragment,null,"Kernel information of ",i),ml:o.createElement(o.Fragment,null,"Machine Learning status of ",i," is: ",t),fn:o.createElement(o.Fragment,null,"Functions capability of ",i," is: ",t),functions:o.createElement(o.Fragment,null,i," functions"),systemInfo:o.createElement(o.Fragment,null,i," hardware information:",o.createElement("br",null),"O/S \u2022 CPU Frequency \u2022 CPU Cores \u2022 Architecture \u2022 Memory \u2022 Hard Disk Space"),type:o.createElement(o.Fragment,null,"The type of ",i," is: ",a)};return e in r?r[e]:""},T={none:!0,null:!0,unknown:!0},k=e=>!!e&&!T[e],x={container:"container","container-other":"container",other:"container"},P={"$(systemd-detect-virt -v)":"vm","$(systemd-detect-virt -v) docker-compose up":"vm","${VIRTUALIZATION}":"vm","(systemd-detect-virt -v)":"vm","detect-virt -v":"vm",lxc:"vm","none #vmware":"vmware",other:"vm","systemd-detect-virt -v":"vm",unknown:"vm","vm-other":"vm","VMware Virtual Platform":"vmware"},C=(e,n)=>k(e)?{icon:"container",label:x[e]||e}:k(n)?{icon:"cluster",label:P[n]||n}:{icon:"ram",label:"Bare Metal"},q=async e=>{let{cacheKeyPrefix:n,nodeIds:t,roomId:o,spaceId:s}=e;const r="".concat(n).concat((0,i.u)({id:o,spaceId:s}));await(0,a.y)({key:r,handleResults:e=>e.results.filter((e=>!t.includes(e.id)))})},D=async e=>{let{cacheKeyPrefix:n,nodeIds:t,roomIds:o,spaceId:s}=e;const r=e=>e.results.filter((e=>!t.includes(e.id)));o.map((async e=>{const t="".concat(n).concat((0,i.u)({id:e,spaceId:s}));await(0,a.y)({key:t,handleResults:r})}))}},54961:(e,n,t)=>{t.d(n,{A8:()=>b,An:()=>f,Jq:()=>y,Qb:()=>d,Wb:()=>u,b8:()=>c,eQ:()=>m,t9:()=>h});t(41393),t(14905),t(98992),t(81454),t(8872);var o=t(26655),i=t(80158),a=t(49286),s=t(71856),r=t(30960);const l=e=>{let{integrations:n}=e;return n.reduce(((e,n)=>{var t;const o=(0,a.bn)(n),s=o.available?"available":"unavailable";var l;(o.kindLabel=(0,i.Zr)(o.kind,!0),o.docsLink=null===(t=o.schema)||void 0===t||null===(t=t.annotations)||void 0===t?void 0:t.docsUrl,o.internal)||(o.fields=(0,r.Rm)(o.schema),o.required=null===(l=o.schema)||void 0===l?void 0:l.required);return e[s].push(o),e}),{available:[],unavailable:[],original:n})},c=e=>o.A.get("/api/v2/spaces/".concat(e,"/integrations"),{transform:l}),d=(e,n)=>o.A.post("/api/v2/spaces/".concat(e,"/channel"),n),u=(e,n)=>o.A.delete("/api/v2/spaces/".concat(e,"/channel/").concat(n)),m=(e,n,t)=>o.A.put("/api/v2/spaces/".concat(e,"/channel/").concat(n),t),p=e=>{var n;const{id:t,...o}=e.integration||{},i={...(0,a.bn)(e),...(0,a.bn)(o)};return i.docsLink=null===(n=i.schema)||void 0===n?void 0:n.annotations.docsUrl,i.internal||(i.fields=(0,r.Rm)(i.schema),i.required=i.schema.required),i},h=(e,n)=>o.A.get("/api/v2/spaces/".concat(e,"/channel/").concat(n),{transform:p}),g=e=>({channels:e.map((e=>{var n;const{id:t,...o}=e.integration||{};return{...(0,a.bn)(e),...o||{},notificationLabel:s.w8[e.alarms].label,kindLabel:(0,i.Zr)(null===(n=e.integration)||void 0===n?void 0:n.kind,!0)}})),original:e}),f=e=>o.A.get("/api/v2/spaces/".concat(e,"/channel"),{transform:g}),y=(e,n,t)=>o.A.patch("/api/v2/spaces/".concat(e,"/channel/").concat(n),{enabled:t}),b=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return o.A.post("/api/v2/spaces/".concat(e,"/notifications/test"),n)}},58159:(e,n,t)=>{t.d(n,{A:()=>a,V:()=>i});var o=t(47444);const i={additionalProperties:!1,alerts:"ALARMS_SETTING_ALL",description:"",enabled:!0,error:"",fields:[],icon:"",internal:!0,id:"",integration:"",integrationId:"",kind:"",loading:!0,loaded:!1,name:"",rooms:null,roomSelections:[t(71856).PT],secrets:{}},a=(0,o.Iz)({key:"spaceChannel",default:e=>(e=>({...i,id:e}))(e)})},63756:(e,n,t)=>{t.d(n,{A:()=>a,y:()=>i});var o=t(47444);const i={currentChannelId:"",channels:[],error:"",id:null,loading:!0,loaded:!1,original:[]},a=(0,o.Iz)({key:"spaceChannels",default:e=>(e=>({...i,id:e}))(e)})},79769:(e,n,t)=>{t.d(n,{A:()=>r});var o=t(54961),i=t(26770),a=t(63756),s=t(18061);const r=e=>{const n=(0,i.vq)(e);(0,s.A)((()=>({enabled:!!e,fetch:()=>(0,o.An)(e),onFail:e=>n({...a.y,error:e.message}),onSettle:()=>n({loading:!1,loaded:!0}),onSuccess:e=>n({...a.y,...e})})),[e])}},73743:(e,n,t)=>{t.d(n,{A:()=>a,u:()=>i});var o=t(47444);const i={available:[],error:"",id:null,loading:!0,loaded:!1,original:[],unavailable:[]},a=(0,o.Iz)({key:"spaceIntegrations",default:e=>(e=>({...i,id:e}))(e)})},96382:(e,n,t)=>{t.d(n,{yP:()=>y,ae:()=>g,j$:()=>f});t(62953);var o=t(47444),i=t(71835),a=t(26655);const s={email:null,browser:null,mobile:null},r={alerts:null},l={me:()=>"/api/v2/accounts/me/notifications/settings",room:e=>{let{spaceId:n,roomId:t}=e;return"/api/v2/spaces/".concat(n,"/rooms/").concat(t,"/notifications/settings")}},c=e=>{let{domain:n,...t}=e;return(0,l[n])(t)},d=(0,o.Iz)({key:"notificationsSettings",default:e=>{const n=(e=>{let{domain:n}=e;return{me:s,room:r}[n]})(e),t=c(e);return a.A.get(t).then((e=>({...n,...e.data,isLoaded:!0}))).catch((()=>({...n,isLoaded:!0})))}});var u=t(61649);const m=(0,o.K0)({key:"notificationsSettings",get:e=>n=>{let{get:t}=n;return t(d(e))},set:e=>(n,t)=>{let{set:o}=n;const{prop:i,...a}=e;o(d(a),(e=>{const n="object"==typeof e[i]?{...e[i],enabled:t}:t;return i?{...e,[i]:n}:n}))}}),p=function(e){let{shouldPersist:n=!0}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const t=(0,o.lZ)(m(e)),s=(e=>(0,u.A)((function(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return a.A.patch(c(e),n)}),[e.domain,e.spaceId,e.roomId]))(e),[,r]=(0,i.A)();return(0,o.Zs)((o=>{let{snapshot:i}=o;return async o=>{if(t(o),!n)return;const a=await i.getPromise(m(e)),{prop:l}=e;try{await s({...l?{[l]:o}:o})}catch(c){r(c),t(l?a[l]:a)}}}),[e])},h=e=>(0,o.xf)(m(e)),g=()=>(0,o.xf)(d({domain:"me"})),f=e=>{const n={domain:"me"},t=h(n),o="hasValue"!==t.state?s:t.contents,i=p({...n,prop:e});return[e?o[e]:o,i]},y=e=>{let{roomId:n,spaceId:t,key:o}=e;const i={domain:"room",roomId:n,spaceId:t},a=h(i),s="hasValue"!==a.state?r:a.contents,l=p({...i,prop:o});return[o?s[o]:s,l]}},26770:(e,n,t)=>{t.d(n,{EE:()=>v,Mw:()=>h,bY:()=>T,ef:()=>y,g4:()=>p,m$:()=>w,t5:()=>f,vq:()=>b});t(62953);var o=t(96540),i=t(47444),a=t(58159),s=t(63756),r=t(73743),l=t(30960),c=t(3914),d=t(54961),u=t(71835);const m=(0,i.K0)({key:"spaceChannelState",get:e=>{let{id:n,key:t}=e;return e=>{let{get:o}=e;const i=o((0,a.A)(n));return t?i[t]:i}},set:e=>{let{id:n,key:t}=e;return(e,o)=>{let{set:i}=e;i((0,a.A)(n),(e=>t?{...e,[t]:o}:{...e,...o}))}}}),p=(e,n)=>(0,i.vc)(m({id:e,key:n})),h=e=>(0,i.E0)(m(e)),g=(0,i.K0)({key:"spaceChannelsState",get:e=>{let{id:n,key:t}=e;return e=>{let{get:o}=e,i=o((0,s.A)(n));return i={...i,channels:(0,l.kz)(i.channels)},t?i[t]:i}},set:e=>{let{id:n,key:t}=e;return(e,o)=>{let{set:i}=e;i((0,s.A)(n),(e=>t?{...e,[t]:o}:{...e,...o}))}}}),f=(e,n)=>(0,i.vc)(g({id:e,key:n})),y=(e,n)=>(0,i.lZ)(m({id:e,key:n})),b=(e,n)=>(0,i.lZ)(g({id:e,key:n})),_=(0,i.K0)({key:"spaceIntegrationsState",get:e=>{let{id:n,key:t}=e;return e=>{let{get:o}=e,i=o((0,r.A)(n));return i={...i,available:(0,l.kz)(i.available),unavailable:(0,l.kz)(i.unavailable)},t?i[t]:i}},set:e=>{let{id:n,key:t}=e;return(e,o)=>{let{set:i}=e;i((0,r.A)(n),(e=>t?{...e,[t]:o}:{...e,...o}))}}}),w=(e,n)=>(0,i.vc)(_({id:e,key:n})),v=(e,n)=>(0,i.lZ)(_({id:e,key:n})),T=()=>{const e=(0,c.vt)(),[n,t]=(0,u.A)();return(0,o.useCallback)((function(){let{id:o,slug:i,secrets:a={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return(0,d.A8)(e,{channelID:o,integrationSlug:i,secrets:a}).then((()=>{n({header:"Success",text:"Successfully sent test notification"})})).catch((()=>{t({header:"Error",text:"Something went wrong"})}))}),[e])}},30960:(e,n,t)=>{t.d(n,{kz:()=>g,Zv:()=>l,ct:()=>c,O5:()=>d,Rm:()=>u,$Q:()=>m,Pl:()=>p,s7:()=>h});t(17333),t(3064),t(14905),t(98992),t(54520),t(72577),t(8872),t(62953),t(41393),t(81454);var o=t(90179),i=t.n(o),a=t(71856);const s=(e,n,t)=>{const{description:o,placeholder:s,title:r,type:l,...c}=n,d=t.includes(e),m="integer"==l,p={description:o,id:e,isRequired:d,placeholder:s||"",title:r||e};if(("string"===l||m)&&"selection"!==e){const{format:e,maxLength:n,minLength:t}=c;return{component:"input",getValue:e=>{let{id:n,secrets:t,subsetId:o}=e;return(o?t[n][o]:t[n])||""},isValid:o=>!!("uri"!==e||null!==o&&void 0!==o&&o.match(a.rx))&&(!(t&&o.lengthn)),...n?{maxlength:n}:{},...t?{minlength:t}:{},onChange:e=>{let{id:n,setSecrets:t,subsetId:o}=e;return e=>t((t=>{if(m){const n=parseInt(e,10);e=isNaN(n)?0:n}return o?{...t,[n]:{...t[n],[o]:e}}:{...t,[n]:e}}))},type:m?"number":"uri"===e?"url":e||"text",...p}}if("object"===l){const{oneOf:e,patternProperties:n}=c;if(e)return{component:"select",getValue:n=>{var t,o;let{id:i,secrets:a}=n;return{label:(null===(t=e.find((e=>{var n;return e.properties.selection.const===(null===(n=a[i])||void 0===n?void 0:n.selection)})))||void 0===t?void 0:t.title)||(d?e[0].title:"None"),value:(null===(o=a[i])||void 0===o?void 0:o.selection)||(d?e[0].properties.selection.const:"")}},onChange:n=>{let{id:t,setRequiredSubsets:o,setSecrets:a}=n;return n=>{let{label:s,value:r}=n;if(!r)return o((e=>i()(e,t))),void a((e=>i()(e,t)));const l=e.find((e=>e.title===s));null!==l&&void 0!==l&&l.required&&o((n=>({...n,[t]:e.find((e=>e.title===s)).required}))),a((e=>({...e,[t]:{selection:r}})))}},...e.reduce(((e,n)=>({fields:{...e.fields,[n.properties.selection.const]:u(n)},options:[...e.options,{label:n.title,value:n.properties.selection.const}],required:{...e.required,[n.properties.selection.const]:n.required}})),{fields:{},options:d?[]:[{label:"None",value:""}],required:{}}),...p};if(n)return{component:"pairs",componentPairs:Object.entries(n).map((e=>{let[n,{type:t}]=e;return[a.C4[n],a.dZ[t]]})),...p}}return null};var r=t(93155);const l=(e,n)=>{const t=e=>e.toLowerCase().includes(n.toLowerCase()||"");return e.filter((e=>{let{description:n,kind:o,title:i}=e;return t(n)||t(o)||t(i)}))},c=(e,n,t)=>!!n&&("selection"===e||("select"===t.component||t.isValid(n))),d=e=>"Delete ".concat(e," channel"),u=e=>{let{properties:n,required:t}=e;return Object.keys(n).reduce(((e,o)=>{const i=s(o,n[o],t);return{...e,...i?{[o]:i}:{}}}),{})},m=e=>{const[n,t]=e;return n===t?"".concat(n,"s"):"".concat(n,"/").concat(t)},p=e=>{let{roomIds:n,roomOptions:t}=e;return n.reduce(((e,n)=>{const o=t.find((e=>e.value===n));return o&&e.push(o.label),e}),[]).join(", ")},h=(e,n)=>Object.values(e).reduce(((e,t)=>"select"===t.component&&n.includes(t.id)?{...e,[t.id]:{selection:t.options[0].value}}:e),{}),g=e=>r.Mh?e:e.filter((e=>{let{id:n}=e;return n!==a.D_}))},62329:(e,n,t)=>{t.d(n,{A:()=>y});var o=t(58168),i=t(96540),a=t(83199),s=t(83084),r=t(3914),l=t(69765),c=t(60383),d=t(68831),u=t(53285);const m=e=>{let{spaceName:n}=e;return i.createElement(a.Text,{textAlign:"center"},"Please ask your administrator to claim more nodes to\xa0",i.createElement(a.Text,{strong:!0},n)," and you will be able to add them to this room")},p=()=>i.createElement(a.Text,{textAlign:"center"},"To add nodes to this room, you first need to claim them to its space."),h=e=>{let{spaceName:n}=e;const t="".concat(d.A.assetsBaseURL,"/img/rack.png");return i.createElement(a.Flex,{column:!0,padding:[4,8,0],alignItems:"center",gap:4},i.createElement("img",{src:t,alt:"server-rack",width:"188px",height:"188px"}),i.createElement(a.H4,{textAlign:"center"},"No claimed nodes available in this Space: ",n),i.createElement(u.A,{permission:"node:Create"},(e=>e?i.createElement(p,null):i.createElement(m,{spaceName:n}))))};t(17333),t(41393),t(14905),t(98992),t(54520),t(81454),t(8872),t(62953);const g=e=>{let{setNodes:n}=e;const[t,o]=(0,i.useState)(""),[a,s]=(0,i.useState)(!0),[r,l]=(0,i.useState)([]);return(0,i.useEffect)((()=>{const e=r.map((e=>e.id));e.length||!t||a||n((e=>t.split(",").reduce(((e,n)=>e.filter((e=>e.id!==n))),e))),e.length&&e.join()!==t&&(n((n=>{const t=n.map((e=>e.id));return e.reduce(((e,n,o)=>t.includes(n)?e:[...e,r[o]]),n)})),s(!1)),o(e.join())}),[r]),{onRowSelected:l}},f={connectionToCloud:!1},y=e=>{let{claimedNodeIds:n,data:t,roomId:d,setNodes:u,showHeader:m,onAddNodes:p,canAddNodes:y,addNodesGA:b,..._}=e;const{onRowSelected:w}=g({setNodes:u}),v=(0,r.ap)(),T=(0,l.wz)(d),k=(0,i.useMemo)((()=>({addEntry:{handleAction:p,tooltipText:"Add the selected nodes to the room",isDisabled:!y,disabledTooltipText:"Select some nodes to add to the room","data-ga":b}})),[p,y]);return i.createElement(a.Flex,(0,o.A)({column:!0,gap:3,padding:[2,0,0],flex:"grow",width:"100%",height:"100%"},_),m&&i.createElement(a.Flex,{margin:[0,0,5]},i.createElement(a.H3,null,"Available Nodes (",n.length,")")),m&&i.createElement(a.Flex,null,i.createElement(a.Text,{color:"textDescription"},"Nodes in ",v.name," that can be added to ",T.name)),n.length>0?i.createElement(s.A,{"data-testid":"nodesTable-layout",overflow:"hidden",height:"100%"},i.createElement(c.A,{flavour:"availableNodes",enableSelection:!0,customNodes:t,showDefaultRowActions:!1,showDefaultBulkActions:!1,customBulkActions:k,onRowSelected:w,columnVisibility:f,alwaysEnableNodeSelection:!0})):i.createElement(h,{spaceName:v.name}))}},31348:(e,n,t)=>{t.d(n,{A:()=>p});t(17333),t(41393),t(98992),t(54520),t(81454),t(62953);var o=t(96540),i=t(61360);var a=t(3914),s=t(69765),r=t(81048),l=t(63129),c=t(67990),d=t(97245),u=t(18061),m=t(71835);const p=function(e){let{addNodesCallback:n}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const[t,p,h]=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";const[n,t]=(0,o.useState)(e),[a,s]=(0,o.useState)([]);(0,i.A)((()=>{s([n])}),300,[n]);const r=(0,o.useCallback)((e=>t(e.currentTarget.value)),[t]);return[n,r,a]}(),[g,f]=(0,o.useState)([]),y=(0,a.vt)(),b=(0,s.J_)(y,r.mL),[,_]=(0,m.A)(),[w]=(0,u.A)((()=>({enabled:!!b,fetch:()=>(0,l.uQ)({roomId:b,spaceId:y}),initialValue:[],onFail:e=>{_(e)}})),[b,y]),v=(0,c.gr)(e,"ids"),T=(0,o.useMemo)((()=>w.map((e=>e.id)).filter((e=>!v.includes(e)))),[v,w]),k=(0,o.useMemo)((()=>(0,d.P)(w)),[w]),x=(0,c.Hx)(y,e,{onSuccess:n});return[{claimedNodeIds:T,data:k,filter:t,setFilter:p,debouncedFilter:h,setNodes:f},{onAddNodes:(0,o.useCallback)((()=>x(g.map((e=>e.id)))),[x,g]),onAddNode:(0,o.useCallback)((e=>{x([e],{makeCallback:!1})}),[x]),selectedNodes:g}]}},38413:(e,n,t)=>{t.d(n,{A:()=>I});var o=t(58168),i=(t(62953),t(96540)),a=t(39225),s=t(83199),r=t(69765),l=t(3914),c=t(15327),d=t(74618),u=t(45765),m=t(88773),p=t(76634),h=t(77173),g=t(62329),f=t(31348),y=t(28738),b=t(74530),_=t(63950),w=t.n(_),v=t(8711),T=t(12740),k=t(18682);const x=(0,v.default)(s.Icon).withConfig({displayName:"copyInfo__StyledIcon",componentId:"sc-fgr5qt-0"})(["display:flex;align-self:center;cursor:pointer;position:absolute;right:16px;"]),P=e=>{let{gaPrefix:n="claim-nodes",info:t,title:o="Claim token",type:a="token",copyDisabled:r=!1}=e;return i.createElement(s.Flex,{cursor:"pointer","data-testid":"".concat(a,"Copy"),gap:2,onClick:(0,k.C)(t,{text:"".concat(o," copied to clipboard")}),position:"relative"},i.createElement(T.O1,{"data-testid":"".concat(a,"TextBlock")},t),!r&&i.createElement(x,{"data-testid":"".concat(a,"CopyButton"),"data-ga":"".concat(n,"::click-copy-").concat(a,"::allpages"),name:"copy",size:"small",color:"primary",onClick:(0,k.C)(t,{text:"".concat(o," copied to clipboard")})}))},C=e=>{let{title:n="",info:t="Loading...",copyDisabled:o=!1,onClose:a=w()}=e;return i.createElement(s.Modal,null,i.createElement(s.ModalContent,{width:{min:80,base:150},height:30},i.createElement(s.ModalHeader,null,i.createElement(s.H3,null,n),i.createElement(s.ModalCloseButton,{onClose:a})),i.createElement(s.ModalBody,null,i.createElement(P,{info:t,copyDisabled:o}))))},q=e=>{var n,t;let{spaceId:a,...s}=e;const r=(0,b.A)(a);return i.createElement(C,(0,o.A)({title:"Claim Token",info:null===(n=r[0])||void 0===n?void 0:n.token,copyDisabled:!(null!==(t=r[0])&&void 0!==t&&t.token)},s))};var D=t(58205),A=t(87659),S=t(47130),M=t(92155),E=t(63314);const N=(0,M.A)((0,S.A)(s.Button)),R=(0,a.A)((()=>Promise.all([t.e(1220),t.e(7208),t.e(8239),t.e(6323),t.e(7304),t.e(979)]).then(t.bind(t,7304)))),H={default:{modal:{"data-testid":"addWarRoomModal"},sidebarModalHeader:{"data-testid":"addWarRoomModal-header"},sidebarContent:{"data-testid":"addWarRoomModal-content",overflow:"auto",hasFooter:!1}},integrations:{modal:{"data-testid":"addIntegrationModal"},sidebarModalHeader:{"data-testid":"addIntegration-header"},sidebarContent:{"data-testid":"addIntegration-content",overflow:"hidden",hasFooter:!1,padding:[0],hasModalTitle:!1}}},I=e=>{let{title:n="Add Nodes",onClose:t,isSubmodal:a,nodeType:b,flavour:_="default"}=e;const w=(0,l.vt)(),v=(0,r.XA)(),T=(0,r.pr)(),k=null!==v&&void 0!==v&&v.loaded?v:T,x=null===k||void 0===k?void 0:k.id,P=(0,i.useMemo)((()=>H[_]),[_]),S="integrations"==_,M=S?m.GO:c.GO,[I,{onAddNodes:O,selectedNodes:L}]=(0,f.A)(x,{addNodesCallback:t}),[U,,B,j]=(0,A.A)(),[G,,F,z]=(0,A.A)();return i.createElement(E.Ay,{feature:"AddNodes"},i.createElement(M,(0,o.A)({right:!0,closeOnEsc:!0,closeOnOverlayClick:!0,onClose:t},P.modal),i.createElement(d.z,(0,o.A)({title:S?i.createElement(s.TextBig,{color:"textLite"},n):i.createElement(i.Fragment,null,"Manage room",i.createElement(s.TextBig,{color:"textLite"},"\xa0/ Add Nodes")),isSubmodal:a,onClose:t},P.sidebarModalHeader||{}),!S&&i.createElement(p.A,{permission:"node:Create"},i.createElement(s.Flex,{gap:2},i.createElement(N,{feature:"AddNodes",label:"Claim Token",flavour:"borderless",onClick:B}),i.createElement(N,{feature:"AddNodes",label:"Room ID",flavour:"borderless",onClick:F})))),!S&&i.createElement(u.U,{"data-testid":"addWarRoomModal-title"},"Add nodes to\xa0",null===k||void 0===k?void 0:k.name),i.createElement(c.Yv,P.sidebarContent||{},S?i.createElement(i.Suspense,{fallback:i.createElement(y.A,{title:"Loading integrations..."})},i.createElement(R,{flavour:D.D_.addNodesModal})):i.createElement(p.A,{permission:"node:Create"},i.createElement(i.Fragment,null,i.createElement(h.A,{nodeType:b,rooms:[x]}),!k.untouchable&&i.createElement(g.A,(0,o.A)({"data-testid":"addWarRoomModal-availableNodes"},I,{onAddNodes:O,canAddNodes:!!L.length,addNodesGA:"add-war-room-modal::click-add-node::global-view"})))))),!S&&U&&i.createElement(E.Ay,{subModal:"ClaimTokenModal"},i.createElement(q,{spaceId:w,onClose:j})),!S&&G&&i.createElement(E.Ay,{subModal:"RoomIdModal"},i.createElement(C,{title:"Room ID",info:x,copyDisabled:!x,onClose:z})))}},77173:(e,n,t)=>{t.d(n,{A:()=>h});t(41393),t(81454),t(62953);var o=t(96540),i=t(83199),a=t(3914),s=t(27776),r=t(89009),l=t(67962),c=t(59778);const d=e=>{let{label:n}=e;return n?o.createElement(o.Fragment,null,o.createElement(i.Icon,{name:"code",size:"small"}),o.createElement(i.TextNano,{strong:!0,textTransform:"uppercase"},n)):null},u=(0,o.memo)(d);var m=t(12740);t(3064),t(98992),t(72577);const p={linux:"deploy-linux-generic",centos:"deploy-linux-generic",macos:"deploy-macos",freebsd:"deploy-freebsd",ubuntu:"deploy-linux-generic",debian:"deploy-linux-generic",cloudlinux:"deploy-linux-generic",Container:"deploy-docker"},h=e=>{let{integrationId:n,nodeType:t,rooms:d=[]}=e;const h=(0,s.FF)(),g=(0,a.ap)("plan"),f=(()=>{const e=(0,s.AR)();return n=>e.find((e=>e.id==n))})(),y=f(n),[b,_]=(0,o.useState)(function(){let e=arguments.length>1?arguments[1]:void 0;const n=(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).findIndex((n=>{let{id:t}=n;return t==p[e]}));return n>=0?n:0}(h,t)),[w,v]=(0,o.useState)((0,c.M)(g));return o.createElement(i.Flex,{column:!0,gap:4,width:"100%"},o.createElement(i.Flex,{justifyContent:"end"},o.createElement(r.A,{isNightly:"nightly"==w,toggleNightly:e=>{const n=e.target.checked?"nightly":"stable";v(n)}})),y?o.createElement(l.A,{integration:y,isNightly:"nightly"==w,rooms:d,navigateToSettings:!0}):o.createElement(i.Tabs,{selected:b,onChange:_,TabContent:m.Yg,tabsProps:{gap:1,margin:[0,0,4,0]}},h.map((e=>o.createElement(i.Tab,{as:m.OV,key:e.name,label:o.createElement(u,{label:e.name})},o.createElement(l.A,{integration:e,isNightly:"nightly"==w,rooms:d,navigateToSettings:!0}))))))}},79412:(e,n,t)=>{t.d(n,{n:()=>m});t(62953);var o=t(96540),i=t(47767),a=t(45588),s=t(83199),r=t(67031),l=t(3914),c=t(69765),d=t(15327),u=t(74618);const m=e=>{let{onClose:n,onDone:t,isSubmodal:m=!0}=e;const p=(0,l.vt)(),[h,g]=(0,o.useState)(""),[f,y,b]=(0,s.useInputValue)({maxChars:255,value:""}),_=(e=>{const n=(0,i.Zp)(),t=(0,l.bq)();return(0,o.useMemo)((()=>o=>{let{slug:i}=o;const s=(0,a.tW)("/spaces/:spaceSlug/rooms/:roomSlug",{spaceSlug:t,roomSlug:i});e(),n(s)}),[t])})(n),w=(0,c.NG)(p,{onSuccess:t||_}),v=(0,o.useCallback)((()=>{w({name:h,description:f})}),[h,f]);return o.createElement(d.GO,{onClose:n},o.createElement(u.z,{onClose:n,isSubmodal:m,title:"Create a new room"},o.createElement(s.Button,{label:"Add",onClick:v,disabled:!h})),o.createElement(d.Yv,null,o.createElement(r.U,{roomName:h,setRoomName:g,roomDescription:f,charsDescIndicator:b,setRoomDescription:y,isCreateForm:!0})))}},74564:(e,n,t)=>{t.d(n,{Ay:()=>m,TU:()=>d,kI:()=>c,rj:()=>u});t(62953);var o=t(96540),i=t(83199),a=t(3914),s=t(87659),r=t(69765),l=t(46741);const c=e=>{const n=1===e;return"Delete ".concat(e," ").concat(n?"room":"rooms")},d=e=>"Delete ".concat(e," room"),u=e=>{let{name:n,roomsLength:t,spaceName:i}=e;const a=n||"".concat(t,1===t?" room":" rooms");return o.createElement(o.Fragment,null,"You are about to delete ",o.createElement("strong",null,a)," from ",o.createElement("strong",null,i)," space.",o.createElement("br",null),"This cannot be undone. Are you sure you want to continue?")},m=e=>{let{id:n,name:t,navigateToParent:c}=e;const m=(0,a.ap)("name"),p=(0,r.wz)(n,"untouchable"),h=(0,l.JT)("room:Delete")&&!p,g=(0,r.HX)(n,{onSuccess:c}),[f,,y,b]=(0,s.A)();return h?o.createElement(o.Fragment,null,o.createElement(i.Button,{"data-testid":"manageWarRoom-delete",flavour:"hollow",onClick:y,label:"Delete room",danger:!0}),f&&o.createElement(i.ConfirmationDialog,{confirmLabel:"Yes, delete","data-ga":"delete-war-room-dialog","data-testid":"deleteWarRoomDialog",handleConfirm:g,handleDecline:b,message:o.createElement(u,{name:t,spaceName:m}),title:d(t)})):null}},67031:(e,n,t)=>{t.d(n,{U:()=>v});var o=t(58168),i=(t(62953),t(96540)),a=t(83199),s=t(78969),r=t(14994),l=t(69765),c=t(46741),d=t(46902),u=t(87659);const m="leave-war-room-dialog",p="leaveWarRoomDialog",h=e=>{let{id:n,name:t,navigateToParent:s}=e;const r=(0,d.ES)(n,"ids"),c=(0,l.NQ)(n,{onSuccess:s}),h=(0,l.wz)(n,"untouchable"),[g,,f,y]=(0,u.A)(),b=1===r.length&&!h?{"data-ga":"".concat(m,"-last-member"),"data-testid":"".concat(p,"LastMember"),message:i.createElement(i.Fragment,null,"If you leave, ",i.createElement("strong",null,t)," room will be deleted immediately.",i.createElement("br",null),"Are you sure you want to continue?"),title:"Leave and delete ".concat(t," room")}:{"data-ga":m,"data-testid":p,message:i.createElement(i.Fragment,null,"You are about to leave ",i.createElement("strong",null,t)," room.",i.createElement("br",null),"Are you sure you want to continue?"),title:"Leave ".concat(t," room")};return i.createElement(i.Fragment,null,i.createElement(a.Button,{"data-ga":"manage-war-room-tab::click-leave-war-room::manage-war-room-modal","data-testid":"manageWarRoom-leave",icon:"switch_off",flavour:"borderless",label:"Leave room",neutral:!0,padding:[0],width:"fit-content",onClick:f}),g&&i.createElement(a.ConfirmationDialog,(0,o.A)({confirmLabel:"Yes, leave",handleConfirm:c,handleDecline:y},b)))};var g=t(74564),f=t(80542);const y=(0,f.k)([e=>{const n=e.length>=1;return(0,f.H)(n,"Give your room a name that's at least one character.")},e=>{const n=e.length<=20;return(0,f.H)(n,"A room's name can't exceed 20 characters.")}]),b=e=>{let{charsIndicator:n,isDisabled:t,isValid:s,handleChange:r,setIsValid:l,setValidationMessage:c,validationMessage:d,value:u,...m}=e;const[p,h]=(0,a.useTouchedState)({});return(0,i.useEffect)((()=>{const e=y(u),n=e.isValid,t=e.messages&&e.messages.length?e.messages[0]:void 0;!s&&n?l(!0):s&&!n&&l(!1),t&&c(t)}),[s,l,u,p,c]),i.createElement(a.TextInput,(0,o.A)({"data-testid":"warRoomOptions-warRoomNameInput",label:"Room name",name:"createRoom",placeholder:"Enter your room's name",hint:"Tip: Use rooms to group your Nodes by their service, purpose, or location.",fieldIndicator:n,value:u,touched:p,onBlur:h,onChange:r,success:s,error:!s&&d,instantFeedback:"all",disabled:t,autoFocus:!0},m))};var _=t(63314);const w=new RegExp(/^[\w\s\d]*?$/),v=e=>{let{id:n,navigateToParent:t,isCreateForm:d,roomName:u,setRoomName:m,roomDescription:p,setRoomDescription:f,charsDescIndicator:y,onSaveClick:v,...T}=e;const k=(0,l.wz)(n),x=(0,r.DL)(),[P,C]=(0,i.useState)(!1),[q,D]=(0,i.useState)(""),[A,S]=(0,i.useState)(""),[M]=(0,a.useInputValue)({maxChars:s.ux}),E=(0,i.useCallback)((e=>m(e.target.value)),[m]),N=!!n,R=((0,c.JT)("room:LeaveAllNodes")||!k.untouchable)&&x.length>1&&k.isMember,[H,I]=(0,a.useTouchedState)({defaultState:!0}),O=!k.name||u===k.name&&p===k.description;return(0,i.useEffect)((()=>{S(w.test(p)&&H?"":"The description can only contain digits, letters, and spaces.")}),[H,p]),i.createElement(_.Ay,{tab:"Room::RoomForm"},i.createElement(a.Flex,(0,o.A)({column:!0,"data-testid":"manageWarRoom",flex:"grow",justifyContent:"between",height:"100%",gap:3},T),i.createElement(a.Flex,{column:!0,"data-testid":"manageWarRoom-settings",gap:2},i.createElement(b,{"data-testid":"manageWarRoom-createRoomInput",value:u,handleChange:E,charsIndicator:M,validationMessage:q,setValidationMessage:D,isValid:P,setIsValid:C,isDisabled:N&&k.untouchable}),i.createElement(a.TextInput,{"data-testid":"manageWarRoom-descriptionInput",label:"Description",name:"roomDescription",placeholder:"A room description...",instantFeedback:"positiveFirst",touched:H,onBlur:I,fieldIndicator:y,error:""!==A&&A,success:""===A,value:p,onChange:f})),i.createElement(a.Flex,{"data-testid":"manageWarRoom-actions"},i.createElement(a.Flex,{"data-testid":"manageWarRoom-deleteLeaveActions",gap:4},N&&R&&i.createElement(h,{id:n,name:k.name,navigateToParent:t}),N&&i.createElement(g.Ay,{id:n,name:u,navigateToParent:t})),!d&&i.createElement(a.Button,{"data-ga":"manage-war-room-tab::click-save::manage-war-room-modal","data-testid":"manageRoom-saveButton",label:"Save",onClick:v,disabled:O,margin:[0,0,0,"auto"]}))))}},97245:(e,n,t)=>{t.d(n,{L:()=>a,P:()=>s});t(14905),t(98992),t(8872);var o=t(68831),i=t(25950);const a=e=>{let{data:n,untouchable:t,currentUserId:o,canRemoveUser:i}=e;return n.reduce(((e,n)=>{const a=o===(null===n||void 0===n?void 0:n.id);return e.push({name:n.name,email:n.email,user:{avatarURL:n.avatarURL,name:n.name,email:n.email,id:n.id},type:n.role,disabled:!i||a||t,isSelf:a}),e}),[])},s=e=>e.reduce(((e,n)=>{var t;return e.push({name:n.name||"",os:n.os.id||"",node:{id:n.id,name:n.name||"",os:n.os?"".concat(o.A.assetsBaseURL,"/img/logos/os/").concat((null===(t=i.U[n.os.id])||void 0===t?void 0:t.logoFilename)||"placeholder.svg"):"".concat(o.A.assetsBaseURL,"/img/logos/os/placeholder.svg")},...n}),e}),[])},46440:(e,n,t)=>{t.d(n,{A:()=>r});var o=t(58168),i=t(96540),a=t(83199),s=t(29217);const r=e=>i.createElement(s.A,{content:"Room you're a member of. You're able to receive notifications related to nodes in this room"},i.createElement(a.Icon,(0,o.A)({name:"checkmark",width:"12px",height:"12px","data-testid":"svg"},e)))},87860:(e,n,t)=>{t.d(n,{A:()=>g,u:()=>h});t(62953);var o=t(96540),i=t(63950),a=t.n(i),s=t(71847),r=t(37618),l=t(83957),c=t(47762),d=t(41739),u=t(46741),m=t(63129),p=t(67990);const h=r.Ay?e=>{let{id:n}=e;return"rooms.".concat(n,".nodes")}:e=>{let{id:n,spaceId:t}=e;return"spaces.".concat(t,".rooms.").concat(n,".nodes")},g=e=>{let{id:n,spaceId:t,pollingInterval:i=63e3,polling:r=!0,keepPolling:g,onNodeIdsChange:f=a()}=e;const y=(0,p.gr)(n,"ids"),[b,_]=(0,o.useState)((()=>(null===y||void 0===y?void 0:y.length)||0)),w=(0,p.gr)(n,"loaded"),v=(0,u.JT)("room:Read");(0,o.useEffect)((()=>{w&&f({nodeIds:y})}),[y,w]),(0,o.useEffect)((()=>{w&&b!==y.length&&((0,s.H)("node-count","node-count-changed-from-".concat(b,"-to-").concat(y.length),"","","","node-count-change"),_(y.length))}),[y.length,b,w]),(0,l.A)((()=>({key:h({id:n,spaceId:t}),autorun:!!v&&!!t&&!!n,fetch:()=>(0,m.uQ)({roomId:n,spaceId:t}),polling:r,association:{getError:()=>(0,p.dT)({id:n,key:"error"}),getIds:()=>(0,p.dT)({id:n,key:"ids"}),getLoaded:()=>(0,p.dT)({id:n,key:"loaded"}),getUpdatedAt:()=>(0,p.dT)({id:n,key:"updatedAt"})},sort:(e,n)=>e.name.localeCompare(n.name,void 0,{sensitivity:"accent",ignorePunctuation:!0}),getResource:e=>(0,c.GN)({id:e}),getResourcesInitializer:()=>c.gl,getResourceInitialState:d.q,pollingOptions:{pollingInterval:i},maxCacheAge:900,force:!0,keepPolling:g})),[t,n,i])}},56359:(e,n,t)=>{t.d(n,{S:()=>c,z:()=>d});t(62953);var o=t(47444),i=t(2404),a=t.n(i),s=t(3914);const r={loaded:!1,entries:[],error:null,updatedAt:""},l=(0,o.Iz)({key:"spaceAlerts",default:r}),c=(0,o.K0)({key:"spaceAlertState",get:e=>{let{id:n,key:t}=e;return e=>{let{get:o}=e;return o(l(n))[t]}},set:e=>{let{id:n,key:t}=e;return(e,o)=>{let{set:i}=e;i(l(n),"error"!==t?e=>({loaded:!0,entries:a()(o,e.entries)?e.entries:o,updatedAt:(new Date).toISOString()}):{...r,error:o})}}}),d=e=>((e,n)=>(0,o.vc)(c({id:e,key:n})))((0,s.vt)(),e)},93740:(e,n,t)=>{t.d(n,{X:()=>a,_:()=>i});var o=t(26655);const i=e=>o.A.get("/api/v1/spaces/".concat(e,"/token"),{allow401:!0}),a=e=>o.A.post("/api/v1/spaces/".concat(e,"/token/rotate"),void 0,{allow401:!0})},89916:(e,n,t)=>{t.d(n,{g:()=>a});var o=t(47444);const i=(0,o.Iz)({key:"spaceClaimingTokens",default:[]}),a=(0,o.K0)({key:"claimingTokensState",get:e=>n=>{let{get:t}=n;return t(i(e))},set:e=>(n,t)=>{let{set:o}=n;o(i(e),[t])}})},74530:(e,n,t)=>{t.d(n,{A:()=>l});var o=t(96540),i=t(47444),a=t(46741),s=t(93740),r=t(89916);const l=e=>{const n=(0,i.vc)((0,r.g)(e)),t=(0,a.JT)("node:Create",e),l=(0,i.Zs)((e=>{let{snapshot:n,set:t}=e;return async e=>{if(!(await n.getPromise((0,r.g)(e))).length){const{data:n}=await(0,s._)(e);t((0,r.g)(e),n)}}}),[]);return(0,o.useEffect)((()=>{t&&e&&l(e)}),[t,e]),n}},28061:(e,n,t)=>{t.d(n,{A:()=>s});var o=t(96540),i=t(47767),a=t(27994);const s=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";const n=(0,i.Zp)(),{url:t}=(0,a.A)("business");return(0,o.useCallback)((()=>{n(t,{replace:!0,state:{coupon:e}})}),[t])}},42728:(e,n,t)=>{t.d(n,{Dy:()=>r,Kj:()=>i,LJ:()=>l,Lf:()=>u,M7:()=>c,di:()=>d,qN:()=>a,z_:()=>s});t(17333),t(14905),t(98992),t(54520),t(8872),t(62953);var o=t(88116);const i=e=>[o.VH.free,o.VH.earlyBird].includes(e),a=e=>{if(!e)return null;const{city:n,country:t,line_1:o,line_2:i,postalCode:a,state:s}=e;return[[o,i].filter(Boolean).join(" "),n,a,s,t].filter(Boolean).join(", ")},s=e=>{let{currentPlan:n,slug:t,version:o,onTrial:a}=e;const{slug:s,version:r,interval:l}=n;return s===t&&r===o?i(t)?"Current plan":a?"Upgrade":"year"===l?"Update plan options":"Change billing frequency":i(s)&&!i(t)||"pro"===s&&"business"===t?"Upgrade":"Select"},r=(e,n)=>i(n)?"hollow":"business"===n||"pro"===n&&i(e)?"default":"hollow",l=e=>e?Object.entries(e).reduce(((e,n)=>{let[t,i]=n;return o.tD.includes(t)?e+Object.keys(i).length:e}),0)+2:2,c=(e,n)=>{const t=e.split("."),o=new Date(t[0],t[1]-1),i=n.split(".");return new Date(i[0],i[1]-1)-o},d=function(){let{price:e={},promotionCode:n,commitment:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{id:o,commitment:i}=e,a={productId:o,promotionCode:n};return!o||i&&!t?null:i&&t?{...a,commitment:t}:a},u=function(){let{price:e={},promotionCode:n,commitment:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{id:o,commitment:i}=e,a={id:o,promoCode:n};return!o||i&&!t?null:i&&t?{...a,commitment:t}:a}},50105:(e,n,t)=>{t.d(n,{A:()=>y});t(62953);var o=t(96540),i=t(83199),a=t(72641),s=t(78969),r=t(15327),l=t(74618),c=t(45765),d=t(55189),u=t(80542);const m=new RegExp(/(\u00a9|\u00ae|[\u2000-\u3300]|\ud83c[\ud000-\udfff]|\ud83d[\ud000-\udfff]|\ud83e[\ud000-\udfff]|[!@#$%^&*()_+=\-])/),p=(0,u.k)([e=>{const n=e.length>=d.pz;return(0,u.H)(n,"Please enter a name for the Space that is at least 5 characters.")},e=>{const n=e.length<=d.dy;return(0,u.H)(n,"A Space's name can't exceed 20 characters.")},e=>m.test(e)?{isValid:!1,message:"There's an unexpected character in the Space's name. Use only alphanumeric characters (A-Z, a-z, 0-9) and spaces."}:{isValid:!0}]),h=e=>{let{value:n,isValid:t,setIsValid:a,charsIndicator:s,isDirty:r,handleChange:l,validationMessage:c,setValidationMessage:d,onKeyDown:u}=e;const[m,h]=(0,i.useTouchedState)({});return(0,o.useEffect)((()=>{const e=p(n),o=e.isValid,i=e.messages&&e.messages.length?e.messages[0]:void 0;!t&&o?a(!0):t&&!o&&a(!1),i&&d(i)}),[t,n,m,a,d]),o.createElement(i.TextInput,{label:"Space name",name:"createWorkspace",placeholder:"Enter your Space's name",hint:"Give your Space a name that's between 5-20 characters. This cannot change.",fieldIndicator:s,value:n,touched:m,onBlur:h,onChange:l,success:t,error:!t&&c,instantFeedback:"positiveFirst",isDirty:r,onKeyDown:u})};var g=t(50876),f=t(63314);const y=e=>{let{onClose:n,onDone:t}=e;const[u,m]=(0,o.useState)(!1),[p,y]=(0,o.useState)(!1),[b,_,w,v]=(0,i.useInputValue)({maxChars:d.dy}),[T,k]=(0,o.useState)(""),{sendLog:x,isReady:P}=(0,g.A)(),C=(0,o.useCallback)((e=>{y(!1),n(),null===t||void 0===t||t(e),x({feature:"CreateSpace",isSuccess:!0})}),[n,x,P]),q=(0,o.useCallback)((()=>{y(!1),x({feature:"CreateSpace",isFailure:!0})}),[x,P]),D=(0,a.A)({onSuccess:C,onError:q}),A=(0,o.useCallback)((()=>{u&&(y(!0),D({name:b}))}),[b,u]);return o.createElement(r.GO,{onClose:n},o.createElement(f.Ay,{feature:"CreateSpace"},o.createElement(l.z,{onClose:n,title:"Create Space"},o.createElement(i.Button,{disabled:!u,isLoading:p,loadingLabel:"Creating",label:"Save",onClick:A})),o.createElement(c.U,null,"Create a new Space"),o.createElement(r.Yv,null,o.createElement(h,{isDirty:v,isValid:u,setIsValid:m,value:b,handleChange:_,charsIndicator:w,validationMessage:T,setValidationMessage:k,onKeyDown:e=>{e.keyCode===s.I7&&u&&A()}}))))}},84280:(e,n,t)=>{t.d(n,{$d:()=>u,A8:()=>r,DT:()=>f,G0:()=>l,IV:()=>p,Wk:()=>g,aj:()=>i,bO:()=>c,bq:()=>y,iy:()=>m,mm:()=>o,uX:()=>b,ys:()=>h});const o="info",i="rooms",a="nodes",s="users",r="notifications",l="integrations",c="billing",d="integrations",u="configurations",m="authenticationTab",p=[o,i,a,s,r,c,d,u,m],h={[o]:"Info",[i]:"Rooms",[a]:"Nodes",[s]:"Users",[r]:"Alerts & Notifications",[l]:"Services",[c]:"Plan & Billing",[d]:"Integrations",[u]:"Configurations",[m]:"Authentication"},g=p[0],f={virtual:p[7],default:p[0]},y="/spaces/:spaceSlug/settings/:settingsTab/*",b="/spaces/:spaceSlug/settings/:settingsTab/:settingsSubTab/*"},83694:(e,n,t)=>{t.d(n,{A:()=>w,o:()=>b});var o=t(58168),i=(t(9391),t(62953),t(96540)),a=t(47767),s=t(83199),r=t(76634),l=t(4659),c=t(29217),d=t(57992),u=t(55189),m=t(22292),p=t(87659),h=t(19673),g=t(92155),f=t(50876);const y=(0,g.A)(s.Button),b=e=>n=>{let{currentPlan:t,...o}=n;const{class:a,interval:r,billingEmail:c}=t,d="year"===r?"yearly":"monthy",u="".concat(a," ").concat(d),m=!["Community","EarlyBird"].includes(a),p=!!c&&!m;return i.createElement(e,o,m&&i.createElement(i.Fragment,null,i.createElement(s.Text,null,"You are currently on ",i.createElement(s.Text,{strong:!0},u)," subscription, which will be cancelled automatically and any due credit from unused period will be given to your credit balance."),i.createElement(s.Text,null,"Any available credit with us won't be automatically lost. If you want to use it in the future, within the defined period on our"," ",i.createElement(l.A,{href:"https://www.netdata.cloud/service-terms/",rel:"noopener noreferrer",target:"_blank"},"Terms of Service"),", or have any requests about previous invoices you can reach out to"," ",i.createElement(s.Text,{strong:!0},"support@netdata.cloud"))),p&&i.createElement(s.Text,null,"You will lose direct access to you invoices and billing information. If you want to retrieve this information in the future, you'll have to contact"," ",i.createElement(s.Text,{strong:!0},"support@netdata.cloud")))},_=b((e=>{let{spaceName:n,children:t}=e;return i.createElement(s.Flex,{column:!0,gap:2},i.createElement(s.Text,null,"You are about to delete ",i.createElement("strong",null,n)," space."),t,i.createElement(s.Text,null,"Are you sure you want to continue?"))})),w=e=>{let{id:n,isLastSpace:t,name:l,onClose:g,...b}=e;const w=(0,a.Zp)(),v=(0,d.A)(n),T=(0,m.NJ)(),{value:k}=(0,h.JN)(),[x,,P,C]=(0,p.A)(),[q,D]=(0,p.A)(),{sendLog:A,sendButtonClickedLog:S,isReady:M}=(0,f.A)(),E=(0,i.useCallback)((e=>{g(),A({feature:"DeleteSpace",isSuccess:!0}).finally((()=>w("/spaces/".concat(e))))}),[A,M]),N=(0,i.useCallback)((()=>{D(),v({onSuccess:E}),S({feature:"DeleteSpace",label:"Confirm Delete"})}),[S,M]),R=(0,i.useCallback)((()=>{C(),A({feature:"DeleteSpace",isFailure:!0,eventReason:"User canceled"})}),[A,M]);return T&&i.createElement(r.A,{permission:"space:Delete"},i.createElement(c.A,{align:"top",content:t&&u.sh.delete,isBasic:!0,stretch:"align"},i.createElement(s.Box,null,i.createElement(y,(0,o.A)({danger:!0,"data-ga":"manage-space-tab::click-delete-space::manage-space-modal","data-testid":"deleteSpace-button",disabled:t,flavour:"hollow",label:"DELETE SPACE",onClick:P,isStart:!0,feature:"DeleteSpace"},b)))),x&&i.createElement(s.ConfirmationDialog,{confirmLabel:q?"Deleting...":"Yes, delete","data-ga":"delete-space","data-testid":"deleteSpaceDialog",handleConfirm:N,handleDecline:R,message:i.createElement(_,{spaceName:l,currentPlan:k}),title:"Delete ".concat(l," space"),isConfirmDisabled:q,isConfirmLoading:q,isDeclineDisabled:q}))}},47373:(e,n,t)=>{t.d(n,{e_:()=>l,fc:()=>s,ni:()=>r,xc:()=>a});const o=/^[a-zA-Z0-9@_.-\s!]*$/,i=/^[a-z0-9]+(?:-[a-z0-9]+)*$/,a={nameMinLength:"Space name should be more than 4 characters",slugMinLength:"Space slug should be more than 2 characters",slugMaxLength:"Space slug should not be more than 30 characters",slugNotAvailable:"Space slug is not available. Please try another one.",nameAllowedChars:"Please use alphanumeric characters (A-Z, a-z, 0-9), spaces, periods and supported special characters @, -, _,.",slugAllowedChars:"Please use lowercase alphanumeric characters (a-z, 0-9) or hyphens. The slug cannot start or end with a hyphen."},s=e=>e.length<5?"nameMinLength":o.test(e)?null:"nameAllowedChars",r=e=>e.length<3?"slugMinLength":i.test(e)?null:"slugAllowedChars",l=e=>!o.test(e)&&"allowedChars"},76999:(e,n,t)=>{t.d(n,{A:()=>w});var o=t(58168),i=t(96540),a=t(83199),s=t(56359),r=(t(3064),t(98992),t(72577),t(47444)),l=t(3914);const c={alertCounter:{warning:0,critical:0},unreachableCount:0},d=(0,r.K0)({key:"roomAlertSummaryState",get:e=>{let{id:n,key:t}=e;return e=>{let{get:o}=e;const i=o(l.EG),a=o((0,s.S)({id:i,key:"entries"})).find((e=>{let{id:t}=e;return t===n}))||c;return t?a[t]:a}}});var u=t(69765),m=t(45123),p=t(29217),h=t(8711);const g=(0,h.default)(a.Flex).attrs({flex:{grow:0,shrink:0},width:2,height:2,margin:[0,1,0,0],round:1,background:"error",justifyContent:"center",alignItems:"center"}).withConfig({displayName:"indicators__ErrorIndicator",componentId:"sc-19hg3ay-0"})([""]),f=(0,h.default)(g).attrs({background:"warning"}).withConfig({displayName:"indicators__WarningIndicator",componentId:"sc-19hg3ay-1"})([""]),y=(0,h.default)(g).attrs({background:"textLite"}).withConfig({displayName:"indicators__UnreachableIndicator",componentId:"sc-19hg3ay-2"})([""]);var b=t(23630),_=t(46440);const w=e=>{let{id:n,selectedId:t,spaceSlug:l,isSidebar:c,hideAlerts:h,differentiateIsMember:w,...v}=e;const{alertCounter:{critical:T,warning:k},unreachableCount:x}=((e,n)=>(0,r.vc)(d({id:e,key:n})))(n),P=(0,u.wz)(n,"slug"),C=(0,u.wz)(n,"name"),q=(0,u.wz)(n,"isMember"),D=(0,s.z)("error"),A=(0,s.z)("updatedAt");return i.createElement(m.A,(0,o.A)({},!!l&&{to:"/spaces/".concat(l,"/rooms/").concat(P)},{testid:"roomLabel-warRoom-".concat(C),actions:h?null:i.createElement(p.A,{content:i.createElement(b.A,{error:D,text:"Room alerts",updatedAt:A}),isBasic:!0,align:"right"},i.createElement(a.Flex,{flex:!1,flexWrap:!1,justifyContent:"end",width:{min:6},height:{min:2}},T>0&&i.createElement(g,null),k>0&&i.createElement(f,null),x>0&&i.createElement(y,null))),icon:w&&q&&i.createElement(_.A,null),iconColor:"successLite",iconHeight:"12px",iconWidth:"12px",gap:1,textProps:w&&!q&&{padding:[0,0,0,4]},selected:n===t,isSidebar:c,isSecondary:!q},v),C)}},13617:(e,n,t)=>{t.d(n,{A:()=>r});var o=t(58168),i=t(96540),a=t(83199),s=t(77279);const r=e=>{let{onFilterClick:n,showAllRooms:t,...r}=e;return i.createElement(a.Flex,(0,o.A)({gap:2,padding:[1,2],"data-testid":"roomFilterPills"},r),i.createElement(s.O,{flavour:t?"hollow":"default",onClick:n(!1),label:"My Rooms","data-ga":"roomFilterPills::click-my::global-view","data-testid":"roomFilterPills-showMy"}),i.createElement(s.O,{flavour:t?"default":"hollow",onClick:n(!0),label:"All Rooms","data-ga":"roomFilterPills::click-show-all::global-view","data-testid":"roomFilterPills-showAll"}))}},12938:(e,n,t)=>{t.d(n,{A:()=>p});var o=t(58168),i=(t(17333),t(41393),t(98992),t(54520),t(81454),t(62953),t(96540)),a=t(45467),s=t(83199),r=t(14994),l=t(3914),c=t(69765),d=t(46741),u=t(76999),m=t(13617);const p=e=>{let{isSidebar:n,...t}=e;const p=(0,r.DL)(),[h,g]=(0,i.useState)(!1),f=(0,l.vt)(),y=(0,l.bq)();(0,a.A)((()=>{g(!1)}),[f]);const b=(0,d.JT)("room:ReadAll"),_=(0,i.useMemo)((()=>p.filter((e=>{let{isMember:n}=e;return n}))),[p]),w=b&&_.length>0,v=w&&!h?_:p,T=(0,c.ID)();return i.createElement(i.Fragment,null,w&&i.createElement(m.A,{onFilterClick:e=>n=>{n.stopPropagation(),g(e)},showAllRooms:h}),v.map(((e,a)=>{let{id:r,untouchable:l}=e;return i.createElement(i.Fragment,{key:r},i.createElement(u.A,(0,o.A)({id:r,hideAlerts:!n,Wrapper:s.Text,isSidebar:n,differentiateIsMember:h,spaceSlug:y,selectedId:T},t)),l&&a!==v.length-1&&i.createElement(s.Flex,{border:{side:"top",color:"border"},margin:[1.5,0],"data-testid":"roomLabel-warRoomSeparator"}))})))}},77279:(e,n,t)=>{t.d(n,{O:()=>s,v:()=>a});var o=t(8711),i=t(83199);const a=(0,o.default)(i.Icon).withConfig({displayName:"styled__StyledIcon",componentId:"sc-i0gfkp-0"})(["transform:",";"],(e=>{let{right:n}=e;return n?"rotate(0)":"rotate(180deg)"})),s=(0,o.default)(i.Button).withConfig({displayName:"styled__StyledButton",componentId:"sc-i0gfkp-1"})(["&&{padding:2px 16px;font-size:12px;height:auto;width:auto;min-width:96px;}"])},78862:(e,n,t)=>{t.d(n,{A:()=>m});var o=t(58168),i=(t(62953),t(96540)),a=t(8711),s=t(47767),r=t(83199),l=t(29217),c=t(3914);const d=[],u=(0,a.default)(r.Flex).attrs((e=>{let{active:n,background:t,showFullname:o,...i}=e;return{width:o?"auto":7,height:7,background:n?"spaceSelected":"spaceIdle",justifyContent:"center",alignItems:"center",round:.5,padding:o?[0,1]:[0],margin:o?[0,0,1]:[0],...i}})).withConfig({displayName:"spaceLabel__SpaceBox",componentId:"sc-1e67mnq-0"})(["cursor:pointer;&:hover{background:",";}"],(e=>{let{active:n}=e;return n?(0,r.getColor)("spaceSelected"):(0,r.getColor)("spaceHovered")})),m=(p=e=>{let{active:n,spaceId:t,testIdPrefix:a,local:l=!1,onClick:m,showFullname:p,color:h,...g}=e;const f=(0,s.Zp)(),y=(0,c.ns)(t),[b,_]=(e=>{if(!e)return d;const n=e.split(" "),[t,o]=n;return[t[0],o?o[0]:""]})(y.name),w=(0,i.useCallback)((()=>m?m(y):f(l?"/overview":"/spaces/".concat(y.slug))),[y.slug,l,m]);return i.createElement(u,(0,o.A)({active:n,"data-testid":"".concat(a||"spaceLabel-space","-").concat(y.slug),onClick:w},g,{showFullname:p}),l?i.createElement(r.Icon,{name:"node",color:n?"textFocus":"textLite"}):p?i.createElement(r.TextSmall,{strong:!0,color:n?"textFocus":h||"textNoFocus"},y.name):i.createElement(i.Fragment,null,i.createElement(r.TextSmall,{strong:!0,color:n?"key":h||"text"},b),i.createElement(r.TextSmall,{strong:!0,color:n?"textLite":h||"textNoFocus"},_)))},e=>{const n=(0,c.ns)(e.spaceId);return e.showFullname?i.createElement(p,e):i.createElement(l.A,{content:null===n||void 0===n?void 0:n.name,align:"right",isBasic:!0},i.createElement(r.Box,null,i.createElement(p,e)))});var p},55463:(e,n,t)=>{t.d(n,{Z8:()=>d,K2:()=>u,kd:()=>c,jX:()=>m,pB:()=>g,i3:()=>_,Gi:()=>p,$D:()=>h,bj:()=>y});t(17333),t(9920),t(41393),t(98992),t(54520),t(3949),t(81454);var o=t(47444),i=t(70716),a=t(22292),s=t(3914);const r={ids:(0,o.Iz)({key:"spaceMemberIds",default:[]}),updatedAt:(0,o.Iz)({key:"spaceMembersUpdatedAt",default:""}),loaded:(0,o.Iz)({key:"spaceMembersLoaded",default:!1})},l=(0,o.Iz)({key:"spaceMemberRole",default:""}),c=(0,o.K0)({key:"spaceMemberState",get:e=>{let{id:n,key:t}=e;return e=>{let{get:o}=e;return o(r[t](n))}},set:e=>{let{id:n,key:t}=e;return(e,o)=>{let{set:i}=e;i(r[t](n),o)}}}),d=(e,n)=>{Object.values(r).forEach((t=>e(t(n))))},u=(0,o.K0)({key:"spaceMemberRoleState",get:e=>{let{id:n,spaceId:t}=e;return e=>{let{get:o}=e;return o(l({id:n,spaceId:t}))}},set:e=>{let{id:n,spaceId:t}=e;return(e,o)=>{let{set:i}=e;i(l({id:n,spaceId:t}),o)}}}),m=(0,o.K0)({key:"spaceMembersRoleState",set:e=>(n,t)=>{let{set:o}=n;t.forEach((n=>{let{id:t,role:i}=n;o(u({id:t,spaceId:e}),i)}))},get:e=>{let{ids:n,spaceId:t}=e;return e=>{let{get:o}=e;return n.map((e=>o(u({id:e,spaceId:t}))))}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),p=()=>{const e=(0,s.vt)();return n=e,t="ids",(0,o.vc)(c({id:n,key:t}));var n,t},h=e=>{const n=(0,s.vt)();return(0,o.vc)(m({ids:e,spaceId:n}))},g=()=>"admin"===(e=>{const n=(0,s.vt)();return(0,o.vc)(u({id:e,spaceId:n}))})((0,a.uW)("id")),f=(0,o.K0)({key:"currentSpaceMembersState",get:e=>{let{ids:n,spaceId:t}=e;return e=>{let{get:o}=e;return n.map((e=>({...o((0,i.m)({id:e})),role:o(u({id:e,spaceId:t}))})))}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),y=()=>{const e=(0,s.vt)(),n=p();return(0,o.vc)(f({ids:n,spaceId:e}))},b=(0,o.K0)({key:"spaceAdminsState",get:e=>{let{memberIds:n,spaceId:t}=e;return e=>{let{get:o}=e;return o(f({ids:n,spaceId:t})).filter((e=>{let{role:n}=e;return"admin"===n}))}}}),_=()=>{const e=(0,s.vt)(),n=p();return(0,o.vc)(b({memberIds:n,spaceId:e}))}},72641:(e,n,t)=>{t.d(n,{A:()=>r});t(25509),t(65223),t(60321),t(41927),t(11632),t(64377),t(66771),t(12516),t(68931),t(52514),t(35694),t(52774),t(49536),t(21926),t(94483),t(16215),t(62953);var o=t(47444),i=t(7484),a=t(3914),s=t(56639);const r=e=>{let{onSuccess:n,onError:t,isDefault:r=!1}=e;return(0,o.Zs)((e=>{let{set:o}=e;return async e=>{let{name:l,userId:c,email:d}=e;try{const{data:e}=r?await(0,i.qi)(c,d):await(0,i.bz)(l),{id:t,slug:u,name:m}=e;o((0,s.Ay)(t),{id:t,slug:u,name:r?m:l,loaded:!0}),o((0,s.aR)(u),t),o((0,a.nC)("ids"),(e=>e=[...new Set([...e,t])])),o((0,a.nC)("loaded"),!0),n&&n(e)}catch(u){t&&t()}}}),[n,t])}},57992:(e,n,t)=>{t.d(n,{A:()=>m});t(17333),t(98992),t(54520),t(62953);var o=t(47444),i=t(71835),a=t(7484),s=t(55189),r=t(3914),l=t(56639),c=t(48849),d=t(55463),u=t(14994);const m=e=>{const[,n]=(0,i.A)();return(0,o.Zs)((t=>{let{snapshot:o,set:i,reset:m}=t;return async t=>{let{onSuccess:p,onError:h}=t;const g=await o.getPromise((0,r.nC)("ids")),f=g.filter((n=>e!==n)),[y]=f;if(!y)return void n({header:"Spaces",text:s.sh.delete});const{slug:b}=y&&await o.getPromise((0,l.Ay)(y));i((0,r.nC)("ids"),f),i(c.A,(n=>n.filter((n=>n!==e))));try{await(0,a.cN)(e),p&&p(b),(0,d.Z8)(m,e),(0,u.Is)(m,e),m((0,l.Ay)(e))}catch(_){i((0,r.nC)("ids"),g),n(_),h&&h()}}}),[e])}},98046:(e,n,t)=>{t.d(n,{A:()=>r});t(62953);var o=t(47444),i=t(3914),a=t(7484),s=t(71835);const r=e=>{const[n,t]=(0,s.A)();return(0,o.Zs)((o=>{let{snapshot:s,set:r}=o;return async(o,l)=>{let{onSuccess:c,onFail:d}=l;const u=await s.getPromise((0,i.U2)({id:e}));r((0,i.U2)({id:e}),(e=>({...e,...o})));try{await(0,a.Yk)(e,o),n({header:"Space successfully updated!"}),c&&c()}catch(m){r((0,i.U2)({id:e}),u),t(m),d&&d()}}}),[e])}},65566:(e,n,t)=>{t.d(n,{A:()=>s});t(62953);var o=t(96540),i=t(50876);const a=()=>"dissmissed-banner",s=e=>{let{getLocalStorageKey:n=a,logKey:t}=e;const[s,r]=(0,o.useState)(!0),{sendButtonClickedLog:l}=(0,i.A)();(0,o.useEffect)((()=>{const e="true"==localStorage.getItem(n());r(e)}),[n]);return{dismissed:s,onClose:(0,o.useCallback)((()=>{localStorage.setItem(n(),!0),r(!0),t&&l({feature:t})}),[r,l,n])}}},61360:(e,n,t)=>{t.d(n,{A:()=>i});t(62953);var o=t(96540);const i=function(e,n){let t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[];const i=(0,o.useRef)(e);(0,o.useLayoutEffect)((()=>{i.current=e}),t),(0,o.useEffect)((()=>{const e=setTimeout((()=>i.current()),n);return()=>clearTimeout(e)}),[n,...t])}},67742:(e,n,t)=>{t.d(n,{A:()=>i});var o=t(96540);const i=(e,n)=>{const t=(0,o.useRef)(e);(0,o.useLayoutEffect)((()=>{t.current=e}),[e]),(0,o.useEffect)((()=>{if(!n&&0!==n)return;const e=setInterval((()=>t.current()),n);return()=>clearInterval(e)}),[n])}},540:(e,n,t)=>{t.d(n,{A:()=>s});t(62953);var o=t(96540),i=t(27467);const a={},s=(e,n,t,s)=>{const[r,l]=(0,i.N9)("modal"),[c,d]=(0,i.N9)("modalTab"),[u,m]=(0,i.N9)("modalParams"),p=function(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"",t=arguments.length>1?arguments[1]:void 0;"string"===typeof n&&d(n),l(e),t&&m(t)};return(0,o.useEffect)((()=>{n&&p(t,s)}),[]),{isModalOpen:e===r,currentModalTab:c,handleChangeModalTab:d,handleOpenModal:p,handleCloseModal:()=>{d(""),l(""),m()},params:u||a}}},35261:(e,n,t)=>{t.d(n,{A:()=>d});var o=t(96540),i=t(87337),a=t(82432),s=t(67990),r=t(3914),l=t(14994),c=t(5169);const d=()=>{const e=(0,c.t)(),n=(0,l.WW)(),t=(0,r.vt)(),d=(0,i.YN)(),u=(0,s.nl)(t,n);return(0,o.useCallback)((async function(o){let{onSuccess:i,onFail:s}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};await u(o,{onSuccess:o=>{(0,a.tv)({cacheKeyPrefix:e,nodeIds:o,roomIds:n,spaceId:t}),null===i||void 0===i||i()},onFail:s}),await d(o)}),[u,d])}},37944:(e,n,t)=>{t.d(n,{A:()=>l});var o=t(47767),i=t(96540),a=t(45588),s=t(3914),r=t(84280);const l=function(){let{roomSlug:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,s.bq)(),t=(0,o.Zp)(),{pathname:l}=(0,o.zy)(),c="".concat(l);return(0,i.useCallback)((()=>{const o=e?(0,a.tW)(r.uX,{spaceSlug:n,settingsTab:r.aj,settingsSubTab:e}):"/spaces/".concat(n,"/settings");t(o,{state:{previousUrlPath:c}})}),[c,t,e,n])}},17632:(e,n,t)=>{t.d(n,{A:()=>r});var o=t(69765),i=t(67990),a=t(82432),s=t(5169);const r=e=>{const n=(0,s.t)(),t=(0,o.ID)(),r=(0,o.wz)(e||t,"spaceId");return(0,i.vV)(r,e||t,{onSuccess:o=>(0,a.gm)({cacheKeyPrefix:n,nodeIds:o,roomId:e||t,spaceId:r})})}},67602:(e,n,t)=>{t.d(n,{A:()=>i});var o=t(540);const i=(e,n)=>{const{isModalOpen:t,handleOpenModal:i,handleCloseModal:a,params:s}=(0,o.A)(e,n);return[t,t?a:i,i,a,s]}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/5700.b7c9908dc7f30a5a57e7.chunk.js b/src/web/gui/v2/5700.b7c9908dc7f30a5a57e7.chunk.js deleted file mode 100644 index ec6879124..000000000 --- a/src/web/gui/v2/5700.b7c9908dc7f30a5a57e7.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="4efe7547-b8b3-471e-92e3-d577a967e407",e._sentryDebugIdIdentifier="sentry-dbid-4efe7547-b8b3-471e-92e3-d577a967e407")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[5700],{95700:(e,n,d)=>{d.r(n),d.d(n,{default:()=>a});d(62953);var t=d(96540),f=d(67602),o=d(55337),l=d(6323);const a=()=>{const[e,,,n]=(0,f.A)("manageInvitations");return t.createElement(o.A,null,e&&t.createElement(l.d,{onClose:n}))}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/5709.c494eb62187917e2f2f6.chunk.js b/src/web/gui/v2/5709.c494eb62187917e2f2f6.chunk.js deleted file mode 100644 index 97e6a8053..000000000 --- a/src/web/gui/v2/5709.c494eb62187917e2f2f6.chunk.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! For license information please see 5709.c494eb62187917e2f2f6.chunk.js.LICENSE.txt */ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="e99abdbe-8fc8-4842-9419-fbb26858bbf3",e._sentryDebugIdIdentifier="sentry-dbid-e99abdbe-8fc8-4842-9419-fbb26858bbf3")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[5709],{86272:(e,t,n)=>{"use strict";t.__esModule=!0,t.Button=void 0;var o,s=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=d(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),a=n(75879),l=n(34247),c=n(9736),r=(o=n(15877))&&o.__esModule?o:{default:o},i=["label","icon","flavour","isLoading","loadingLabel","onClick","textTransform","iconColor","iconSize","iconWidth","iconHeight","children"];function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(d=function(e){return e?n:t})(e)}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,i);return s.default.createElement(a.StyledButton,u({flavour:h,textTransform:g,hasIcon:!!d||v,onClick:v?void 0:w,ref:t,iconColor:y,iconWidth:x,iconHeight:_},B),v&&s.default.createElement(c.LoaderIcon,{className:"button-icon"}),d&&!v&&s.default.createElement(r.default,{justifyContent:"center",alignItems:"center",width:"auto",height:"100%"},s.default.createElement(l.Icon,{size:b,className:"button-icon",title:d,name:d,width:x,height:_})),!!z&&s.default.createElement("span",null,v&&m||z))}))},27029:(e,t,n)=>{"use strict";t.__esModule=!0,t.ButtonGroup=void 0;var o,s=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=r(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),a=(o=n(15877))&&o.__esModule?o:{default:o},l=n(86272),c=["items","checked","onChange","children","buttonProps"];function r(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(r=function(e){return e?n:t})(e)}function i(){return i=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,c);return s.default.createElement(a.default,i({alignItems:"center"},d),null!=t&&t.length?s.default.createElement(h,{items:t,checked:n,onChange:o,buttonProps:r}):s.default.createElement(u,null,l))}},25547:(e,t)=>{"use strict";t.__esModule=!0,t.HOLLOW=t.DEFAULT=t.BORDER_LESS=void 0;t.DEFAULT="default",t.HOLLOW="hollow",t.BORDER_LESS="borderless"},7517:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=u(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),s=d(n(8043)),a=d(n(15877)),l=d(n(81816)),c=n(92019),r=n(86272),i=["width","height","tooltip"];function d(e){return e&&e.__esModule?e:{default:e}}function u(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(u=function(e){return e?n:t})(e)}function h(){return h=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,i);return o.default.createElement(l.default,{plain:!0,animation:!0,content:m&&o.default.createElement(v,{content:m})},o.default.createElement(s.default,h({as:r.Button,iconWidth:a,iconHeight:d,ref:t,flavour:"borderless",neutral:!0},f)))}));t.default=m},37466:(e,t,n)=>{"use strict";t.__esModule=!0,t.IconButton=t.ButtonGroup=t.Button=void 0;var o=n(86272);t.Button=o.Button;var s,a=(s=n(7517))&&s.__esModule?s:{default:s};t.IconButton=a.default;var l=n(27029);t.ButtonGroup=l.ButtonGroup},75879:(e,t,n)=>{"use strict";t.__esModule=!0,t.StyledButton=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=w(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(8711)),s=n(13759),a=n(41234),l=f(n(32305)),c=f(n(36094)),r=f(n(62703)),i=f(n(27988)),d=f(n(60090)),u=f(n(25320)),h=n(50677),v=n(25547),m=["groupFirst","groupLast","groupMiddle"];function f(e){return e&&e.__esModule?e:{default:e}}function w(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(w=function(e){return e?n:t})(e)}function p(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,o)}return n}function g(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,m);return g({padding:s.padding||s.tiny?[.5,1]:s.small?[1,3]:[2],colors:V(s),round:t?{side:"left"}:n?{side:"right"}:!o},function(e){return e.themeType?g(g({},e),{},{theme:b[e.themeType]}):g(g({},e),{},{theme:e.theme})}(s))})).withConfig({displayName:"styled__StyledButton",componentId:"sc-14wh25s-0"})(["&&{display:flex;justify-content:center;align-items:center;position:relative;",";font-weight:",";font-size:",";white-space:nowrap;word-break:keep-all;cursor:pointer;"," pointer-events:",";"," "," transition:all 150ms;background-color:",";color:",";border-width:1px;border-style:solid;border-color:",";"," box-sizing:border-box;"," "," text-decoration:none;& > span{",";margin-left:",";}&:hover{border-color:",";background-color:",";color:",";text-decoration:none;.button-icon{fill:",";}}&:active{","}"," &:focus{outline:none;}.button-icon{height:",";width:",";fill:",";}.ntd-spinner{fill:none;stroke-width:17px;stroke-dasharray:100;stroke-dashoffset:100;animation:ntd-draw 1s linear infinite;stroke:",";width:24px;}.path{stroke:",";}@keyframes ntd-draw{to{stroke-dashoffset:0;}}}"],i.default,(function(e){return e.strong?700:500}),(function(e){var t=e.small;return e.tiny?"10px":t?"12px":"14px"}),(function(e){return e.disabled&&"opacity: 0.4;"}),(function(e){return e.disabled?"none":"auto"}),l.default,c.default,(function(e){return e.colors.bg(e)}),(function(e){return e.colors.color(e)}),(function(e){return e.colors.border(e)}),r.default,u.default,h.position,d.default,(function(e){return e.hasIcon?"4px":"0px"}),(function(e){return e.colors.borderHover(e)}),(function(e){return e.colors.bgHover(e)}),(function(e){return e.colors.colorHover(e)}),(function(e){return e.colors.colorHover(e)}),x,(function(e){return e.active&&"\n "+x+"\n "}),(function(e){return e.iconWidth?"string"===typeof e.iconWidth?e.iconWidth:e.iconWidth+"px":(0,a.getSizeBy)(2)(e)}),(function(e){return e.iconHeight?"string"===typeof e.iconHeight?e.iconHeight:e.iconHeight+"px":(0,a.getSizeBy)(2)(e)}),(function(e){return e.colors.iconColor(e)}),(function(e){return e.colors.color(e)}),(function(e){return e.colors.color(e)}))},87159:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o,s=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=i(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),a=(o=n(15877))&&o.__esModule?o:{default:o},l=n(92019),c=n(34247),r=["children","align","margin","background"];function i(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(i=function(e){return e?n:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,r);return s.default.createElement(a.default,{ref:t,column:"top"===o,columnReverse:"bottom"===o,rowReverse:"right"===o,margin:h},s.default.createElement(a.default,d({background:m,padding:[1,2],round:!0,column:!0},f),"string"===typeof n?s.default.createElement(l.Text,{color:"tooltipText"},n):n),o&&s.default.createElement(c.Icon,{name:"triangle",alignSelf:"center",color:m,rotate:u[o],height:"8px",width:"8px","data-testid":"drop-arrow"}))}));t.default=h},99436:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=w(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),s=f(n(40961)),a=f(n(44862)),l=f(n(43119)),c=f(n(35586)),r=f(n(51365)),i=f(n(34587)),d=f(n(22007)),u=f(n(93331)),h=f(n(89075)),v=f(n(8711)),m=["backdrop","target","align","stretch","onClickOutside","onEsc","children","canHideTarget","keepHorizontal"];function f(e){return e&&e.__esModule?e:{default:e}}function w(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(w=function(e){return e?n:t})(e)}function p(){return p=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,m),O=(0,r.default)(t),L=O[0],k=O[1],S=(0,d.default)(v,L,w,x,H,C);(0,o.useLayoutEffect)((function(){S()}),[S]),(0,i.default)(v,S),(0,c.default)(L,_,v),(0,l.default)(M);var A=(0,a.default)();return s.default.createPortal(h?o.default.createElement(o.default.Fragment,null,o.default.createElement(u.default,p({ref:k,width:{max:"100%"},column:!0,"data-testid":"drop"},V),z),o.default.createElement(g,null)):o.default.createElement(u.default,p({ref:k,width:{max:"100%"},column:!0,"data-testid":"drop"},V),z),A)}));t.default=b},34587:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o,s=n(96540),a=(o=n(42561))&&o.__esModule?o:{default:o};t.default=function(e,t){return(0,s.useEffect)((function(){var n,o=function(){var n=(0,a.default)(e).filter((function(e){return e.scrollHeight>e.clientHeight}));return n.forEach((function(e){return e.addEventListener("scroll",t,{capture:!1,passive:!0})})),function(){return n.forEach((function(e){return e.removeEventListener("scroll",t)}))}};n=o();var s=function(){n(),n=o(),t()};return window.addEventListener("resize",s),function(){n(),window.removeEventListener("resize",s)}}),[e,t])}},86633:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=h(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),s=u(n(8711)),a=n(28892),l=u(n(15877)),c=u(n(94474)),r=u(n(8043)),i=n(51462),d=["hideShadow","itemProps","items","onItemClick","dropTitle","dropTitlePadding","Item","Footer","value","hasSearch","searchMargin","gap","estimateSize","close"];function u(e){return e&&e.__esModule?e:{default:e}}function h(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(h=function(e){return e?n:t})(e)}function v(){return v=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,d),k=(0,o.useState)(""),S=k[0],A=k[1],E=(0,o.useMemo)((function(){if(!S)return u;var e=S.toLowerCase();return u.filter((function(t){var n=t.label,o=t.value;return!("string"!==typeof n||!n.toLowerCase().includes(e))||!("string"!==typeof o||!o.toLowerCase().includes(e))}))}),[u,S]),P=(0,o.useRef)(),T=(0,a.useVirtualizer)({count:E.length,getScrollElement:function(){return P.current},scrollOffsetFn:function(e){return e?e.target.scrollTop-P.current.offsetTop:0},overscan:3,enableSmoothScroll:!1,estimateSize:V});return o.default.createElement(m,v({as:"ul",role:"listbox",background:"dropdown",hideShadow:n,padding:[0],margin:[1,0],column:!0,tabindex:"-1",width:"auto"},L),w&&o.default.createElement(l.default,{padding:g},w),_&&o.default.createElement(r.default,{margin:z},o.default.createElement(c.default,{"data-testid":"dropdown-search",placeholder:"Search",onChange:A})),o.default.createElement("div",{ref:(0,i.mergeRefs)(P,t),style:{height:"100%",overflow:"auto"}},o.default.createElement("div",{style:{minHeight:T.getTotalSize()+"px",width:"100%",position:"relative"}},T.getVirtualItems().map((function(e){return o.default.createElement("div",{key:e.key,style:{position:"absolute",top:0,left:0,width:"100%",transform:"translateY("+e.start+"px)",padding:2*H,overflow:"hidden"},"data-index":e.index,ref:T.measureElement},o.default.createElement(y,{item:E[e.index],index:e.index,itemProps:s,value:x,onItemClick:h,close:O}))})))),b&&o.default.createElement(b,{close:O}))}));t.default=w},21046:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=t.ItemContainer=void 0;var o=d(n(96540)),s=d(n(8711)),a=n(41234),l=d(n(15877)),c=n(92019),r=["value","label","icon","reverse","disabled","onClick"],i=["item","value","onItemClick","index","style"];function d(e){return e&&e.__esModule?e:{default:e}}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}function v(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,o)}return n}function m(e,t,n){return(t=function(e){var t=function(e,t){if("object"!=typeof e||!e)return e;var n=e[Symbol.toPrimitive];if(void 0!==n){var o=n.call(e,t||"default");if("object"!=typeof o)return o;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"==typeof t?t:t+""}(t))in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}var f=t.ItemContainer=(0,s.default)(l.default).attrs((function(e){return function(e){for(var t=1;t * {\n color: "+(0,a.getColor)("textLite")({theme:n})+"\n }\n "}),(function(e){var t=e.selected,n=e.theme;return t&&"\n background-color: "+("Dark"===n.name?(0,a.getRgbColor)(["green","netdata"],.3)({theme:n}):(0,a.getRgbColor)(["green","frostee"])({theme:n}))+"\n "}));t.default=function(e){var t=e.item,n=t.value,s=t.label,a=t.icon,l=t.reverse,d=t.disabled,v=t.onClick,m=h(t,r),w=e.value,p=e.onItemClick,g=e.index,y=e.style,b=h(e,i),x=w===n;return o.default.createElement(f,u({"data-index":g,"aria-selected":x,disabled:d,selected:x,onClick:function(e){v&&v(e),p(n)}},m,b,{style:y}),l&&o.default.createElement(c.TextSmall,null,s),a,!l&&o.default.createElement(c.TextSmall,null,s))}},72982:(e,t,n)=>{"use strict";t.A=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=m(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),s=v(n(99436)),a=v(n(48004)),l=v(n(42561)),c=v(n(51365)),r=v(n(23627)),i=v(n(9763)),d=v(n(86633)),u=v(n(21046)),h=["value","onChange","onOpen","onClose","closeOnClick","open","icon","label","caret","children","dropProps","dropdownProps","itemProps","items","Item","Footer","Dropdown","animation","dropTitle","dropTitlePadding","hasSearch"];function v(e){return e&&e.__esModule?e:{default:e}}function m(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(m=function(e){return e?n:t})(e)}function f(){return f=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,h),W=(0,r.default)(_,{on:m,off:g}),U=W[0],Y=W[1],Q=W[3],X=(0,c.default)(t),Z=X[0],G=X[1],K=(0,o.useCallback)((function(e){v&&v(e),b&&Q()}),[v]),$=(0,o.useCallback)((function(e){Z.current===e.target||(0,l.default)(e.target).some((function(e){return e===Z.current}))||Q()}),[Q]),J=(0,a.default)(V,G,function(e){for(var t=1;t{"use strict";t.__esModule=!0,t.default=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=d(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),s=i(n(8711)),a=i(n(15877)),l=n(34247),c=n(92019),r=["open","icon","label","caret"];function i(e){return e&&e.__esModule?e:{default:e}}function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(d=function(e){return e?n:t})(e)}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,r);return o.default.createElement(h,u({gap:2,padding:[2,4],justifyContent:"between",alignItems:"center",role:"button",tabindex:"0","aria-haspopup":"listbox","aria-expanded":n,ref:t},v),o.default.createElement(a.default,{alignItems:"center",gap:2},s,"string"===typeof i?o.default.createElement(c.Text,null,i):i),!0===d?o.default.createElement(l.Icon,{name:"chevron_down",color:"text",width:"12px",height:"12px",rotate:n?2:null}):d)}));t.default=v},22232:(e,t)=>{"use strict";t.__esModule=!0,t.default=void 0;t.default={top:{bottom:"top"},left:{right:"left"},right:{left:"right"},bottom:{top:"bottom"}}},42561:(e,t)=>{"use strict";t.__esModule=!0,t.default=void 0;t.default=function(e){var t=[];for(e=e.parentNode;e;)t.push(e),e=e.parentNode;return t}},2310:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o,s=(o=n(42561))&&o.__esModule?o:{default:o};t.default=function(e,t){return(0,s.default)(t).some((function(t){return t===e}))}},48004:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o,s=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=l(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var c=s?Object.getOwnPropertyDescriptor(e,a):null;c&&(c.get||c.set)?Object.defineProperty(o,a,c):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),a=(o=n(68016))&&o.__esModule?o:{default:o};function l(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(l=function(e){return e?n:t})(e)}function c(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,o)}return n}function r(e){for(var t=1;t{"use strict";t.__esModule=!0,t.default=void 0;var o,s=n(96540),a=(o=n(3639))&&o.__esModule?o:{default:o};t.default=function(e){return(0,s.useMemo)((function(){return e||(0,a.default)()}),[])}},81816:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=v(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),s=h(n(99436)),a=h(n(51365)),l=h(n(23627)),c=h(n(48004)),r=h(n(88484)),i=h(n(22232)),d=h(n(87159)),u=["plain","open","align","dropProps","content","animation","disabled","zIndex","children","allowHoverOnTooltip"];function h(e){return e&&e.__esModule?e:{default:e}}function v(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(v=function(e){return e?n:t})(e)}function m(){return m=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,u),V=(0,r.default)(C["aria-describedby"]),O=(0,l.default)(!1),L=O[0],k=O[2],S=O[3],A=(0,a.default)(t),E=A[0],P=A[1],T=(0,c.default)(B,P,w(w({onMouseEnter:k,onMouseLeave:H?function(){return setTimeout((function(){j.current||S()}),300)}:S,onFocus:k,onBlur:S},L&&{"aria-describedby":V}),C)),j=(0,o.useRef)(!1);return(0,o.useLayoutEffect)((function(){E.current&&v&&k()}),[]),b?o.default.createElement(o.Fragment,null,T,L&&E.current&&!_&&o.default.createElement(s.default,m({noEvents:!H,align:(null==y?void 0:y.align)||i.default[p],hideShadow:!0,id:V,onClickOutside:S,onMouseEnter:function(){return j.current=!0},onMouseLeave:function(){j.current=!1,S()},target:E.current},y,{animation:x,onEsc:S,zIndex:z}),n?g(b):o.default.createElement(d.default,{align:p},g(b)))):B}));t.default=y},9736:(e,t,n)=>{"use strict";t.__esModule=!0,t.LoaderIcon=void 0;var o=n(27597);t.LoaderIcon=o.LoaderIcon},27597:(e,t,n)=>{"use strict";t.__esModule=!0,t.LoaderIcon=void 0;var o=l(n(96540)),s=l(n(8711)),a=n(41234);function l(e){return e&&e.__esModule?e:{default:e}}var c=s.default.svg.withConfig({displayName:"loader__StyledSvg",componentId:"sc-fxhmqg-0"})(["fill:none;stroke-width:17px;stroke-dasharray:100;stroke-dashoffset:100;animation:ntd-draw 1s linear infinite;stroke:",";width:24px;.path{stroke:",";}@keyframes ntd-draw{to{stroke-dashoffset:0;}}"],(0,a.getColor)("bright"),(0,a.getColor)("bright"));t.LoaderIcon=function(e){var t=e.className;return o.default.createElement(c,{className:t,viewBox:"0 0 21 17",version:"1.1",xmlns:"http://www.w3.org/2000/svg"},o.default.createElement("g",{className:"path",stroke:"none",strokeWidth:"1",fill:"none",fillRule:"evenodd"},o.default.createElement("path",{d:"M2,1 C8.25086152,1 11.9367136,1 13.0575562,1 C14.73882,1 19.6834591,2 19.9614325,7.72050108 C20.239406,13.4410022 15.7459591,15.1224845 13.6463763,15.1224845 C12.2466545,15.1224845 10.0279195,15.1224845 6.9901715,15.1224845 L2,1 Z",id:"Path-2",strokeWidth:"2"})))}},63534:(e,t,n)=>{"use strict";t.__esModule=!0,t.Icon=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=c(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),s=n(38857),a=n(81900),l=["name","size"];function c(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(c=function(e){return e?n:t})(e)}function r(){return r=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,l),d=s.iconsList[n];if(!d)return null;var u,h=c||((u=n).endsWith("_s")?"small":u.endsWith("_l")?"large":"medium");return o.default.createElement(a.StyledIcon,r({viewBox:d.viewBox},i,{size:h,ref:t}),o.default.createElement("use",{xlinkHref:"#"+d.id}))}))},38857:(e,t,n)=>{"use strict";t.__esModule=!0,t.iconsList=void 0;var o=hl(n(47060)),s=hl(n(18217)),a=hl(n(34975)),l=hl(n(52441)),c=hl(n(72041)),r=hl(n(60863)),i=hl(n(30882)),d=hl(n(17423)),u=hl(n(38475)),h=hl(n(71049)),v=hl(n(29044)),m=hl(n(26391)),f=hl(n(58709)),w=hl(n(62709)),p=hl(n(34390)),g=hl(n(20949)),y=hl(n(24810)),b=hl(n(97322)),x=hl(n(58655)),_=hl(n(51317)),M=hl(n(86480)),z=hl(n(98666)),B=hl(n(81886)),H=hl(n(38813)),C=hl(n(78286)),V=hl(n(52390)),O=hl(n(70157)),L=hl(n(87723)),k=hl(n(59460)),S=hl(n(25817)),A=hl(n(78957)),E=hl(n(48783)),P=hl(n(36878)),T=hl(n(62682)),j=hl(n(45715)),q=hl(n(74389)),D=hl(n(43071)),I=hl(n(15588)),F=hl(n(22203)),N=hl(n(14767)),R=hl(n(91956)),W=hl(n(87875)),U=hl(n(97517)),Y=hl(n(3559)),Q=hl(n(79919)),X=hl(n(63668)),Z=hl(n(26762)),G=hl(n(30464)),K=hl(n(66643)),$=hl(n(39078)),J=hl(n(8827)),ee=hl(n(49686)),te=hl(n(49764)),ne=hl(n(49484)),oe=hl(n(89482)),se=hl(n(20327)),ae=hl(n(93855)),le=hl(n(53079)),ce=hl(n(65377)),re=hl(n(77568)),ie=hl(n(51580)),de=hl(n(34623)),ue=hl(n(63319)),he=hl(n(85419)),ve=hl(n(81426)),me=hl(n(68124)),fe=hl(n(86547)),we=hl(n(85809)),pe=hl(n(42202)),ge=hl(n(48083)),ye=hl(n(58746)),be=hl(n(4643)),xe=hl(n(21774)),_e=hl(n(43380)),Me=hl(n(38001)),ze=hl(n(26181)),Be=hl(n(6494)),He=hl(n(66427)),Ce=hl(n(16888)),Ve=hl(n(74552)),Oe=hl(n(66194)),Le=hl(n(14946)),ke=hl(n(85095)),Se=hl(n(47307)),Ae=hl(n(68864)),Ee=hl(n(84652)),Pe=hl(n(8035)),Te=hl(n(98411)),je=hl(n(20599)),qe=hl(n(79570)),De=hl(n(8001)),Ie=hl(n(79684)),Fe=hl(n(4228)),Ne=hl(n(3113)),Re=hl(n(39088)),We=hl(n(99464)),Ue=hl(n(37414)),Ye=hl(n(91423)),Qe=hl(n(79644)),Xe=hl(n(89875)),Ze=hl(n(47720)),Ge=hl(n(72570)),Ke=hl(n(60774)),$e=hl(n(3886)),Je=hl(n(24241)),et=hl(n(41519)),tt=hl(n(22370)),nt=hl(n(40813)),ot=hl(n(58609)),st=hl(n(10237)),at=hl(n(78057)),lt=hl(n(7351)),ct=hl(n(24229)),rt=hl(n(25094)),it=hl(n(89513)),dt=hl(n(41517)),ut=hl(n(27612)),ht=hl(n(5048)),vt=hl(n(11862)),mt=hl(n(949)),ft=hl(n(61898)),wt=hl(n(73862)),pt=hl(n(92949)),gt=hl(n(78131)),yt=hl(n(11336)),bt=hl(n(27555)),xt=hl(n(9080)),_t=hl(n(56851)),Mt=hl(n(49232)),zt=hl(n(32824)),Bt=hl(n(86294)),Ht=hl(n(92605)),Ct=hl(n(35813)),Vt=hl(n(16912)),Ot=hl(n(76107)),Lt=hl(n(88909)),kt=hl(n(45266)),St=hl(n(70762)),At=hl(n(68081)),Et=hl(n(64197)),Pt=hl(n(6232)),Tt=hl(n(90728)),jt=hl(n(52547)),qt=hl(n(50199)),Dt=hl(n(20002)),It=hl(n(67977)),Ft=hl(n(70009)),Nt=hl(n(65550)),Rt=hl(n(70647)),Wt=hl(n(83424)),Ut=hl(n(40483)),Yt=hl(n(47676)),Qt=hl(n(21489)),Xt=hl(n(82118)),Zt=hl(n(64739)),Gt=hl(n(27703)),Kt=hl(n(16933)),$t=hl(n(76415)),Jt=hl(n(96104)),en=hl(n(5820)),tn=hl(n(72085)),nn=hl(n(40235)),on=hl(n(81199)),sn=hl(n(95546)),an=hl(n(40852)),ln=hl(n(43902)),cn=hl(n(21674)),rn=hl(n(1279)),dn=hl(n(53965)),un=hl(n(14608)),hn=hl(n(5057)),vn=hl(n(1498)),mn=hl(n(96317)),fn=hl(n(51991)),wn=hl(n(34111)),pn=hl(n(4344)),gn=hl(n(18647)),yn=hl(n(30983)),bn=hl(n(16410)),xn=hl(n(57013)),_n=hl(n(34031)),Mn=hl(n(55703)),zn=hl(n(12501)),Bn=hl(n(43947)),Hn=hl(n(52059)),Cn=hl(n(9150)),Vn=hl(n(74480)),On=hl(n(45753)),Ln=hl(n(89336)),kn=hl(n(59631)),Sn=hl(n(47627)),An=hl(n(22463)),En=hl(n(3599)),Pn=hl(n(61386)),Tn=hl(n(17506)),jn=hl(n(91008)),qn=hl(n(95173)),Dn=hl(n(39850)),In=hl(n(79662)),Fn=hl(n(34106)),Nn=hl(n(7297)),Rn=hl(n(80370)),Wn=hl(n(38194)),Un=hl(n(54349)),Yn=hl(n(75873)),Qn=hl(n(39862)),Xn=hl(n(29474)),Zn=hl(n(53328)),Gn=hl(n(94403)),Kn=hl(n(60188)),$n=hl(n(64612)),Jn=hl(n(56025)),eo=hl(n(94064)),to=hl(n(24934)),no=hl(n(61248)),oo=hl(n(48635)),so=hl(n(29921)),ao=hl(n(52874)),lo=hl(n(70922)),co=hl(n(38141)),ro=hl(n(26946)),io=hl(n(98108)),uo=hl(n(87007)),ho=hl(n(14167)),vo=hl(n(64628)),mo=hl(n(32365)),fo=hl(n(27506)),wo=hl(n(75116)),po=hl(n(25376)),go=hl(n(53085)),yo=hl(n(46550)),bo=hl(n(27129)),xo=hl(n(49164)),_o=hl(n(47217)),Mo=hl(n(15471)),zo=hl(n(65024)),Bo=hl(n(84024)),Ho=hl(n(69460)),Co=hl(n(62061)),Vo=hl(n(17026)),Oo=hl(n(72693)),Lo=hl(n(19227)),ko=hl(n(5323)),So=hl(n(72293)),Ao=hl(n(40484)),Eo=hl(n(71061)),Po=hl(n(43671)),To=hl(n(834)),jo=hl(n(70249)),qo=hl(n(88112)),Do=hl(n(27891)),Io=hl(n(16171)),Fo=hl(n(68205)),No=hl(n(67360)),Ro=hl(n(15958)),Wo=hl(n(47532)),Uo=hl(n(6133)),Yo=hl(n(19943)),Qo=hl(n(21832)),Xo=hl(n(47306)),Zo=hl(n(78902)),Go=hl(n(28676)),Ko=hl(n(89399)),$o=hl(n(99229)),Jo=hl(n(97116)),es=hl(n(77840)),ts=hl(n(49762)),ns=hl(n(31411)),os=hl(n(44555)),ss=hl(n(5157)),as=hl(n(31896)),ls=hl(n(62520)),cs=hl(n(14191)),rs=hl(n(50861)),is=hl(n(13974)),ds=hl(n(51665)),us=hl(n(66689)),hs=hl(n(74161)),vs=hl(n(26249)),ms=hl(n(51961)),fs=hl(n(6819)),ws=hl(n(70391)),ps=hl(n(55767)),gs=hl(n(72232)),ys=hl(n(65494)),bs=hl(n(55925)),xs=hl(n(10484)),_s=hl(n(35743)),Ms=hl(n(51771)),zs=hl(n(95715)),Bs=hl(n(12393)),Hs=hl(n(76533)),Cs=hl(n(24972)),Vs=hl(n(61357)),Os=hl(n(88222)),Ls=hl(n(43077)),ks=hl(n(8277)),Ss=hl(n(51072)),As=hl(n(78776)),Es=hl(n(44212)),Ps=hl(n(8269)),Ts=hl(n(4729)),js=hl(n(66359)),qs=hl(n(46021)),Ds=hl(n(63487)),Is=hl(n(79723)),Fs=hl(n(81925)),Ns=hl(n(22202)),Rs=hl(n(17576)),Ws=hl(n(60194)),Us=hl(n(85694)),Ys=hl(n(25628)),Qs=hl(n(2965)),Xs=hl(n(13203)),Zs=hl(n(34124)),Gs=hl(n(13151)),Ks=hl(n(52039)),$s=hl(n(15670)),Js=hl(n(20972)),ea=hl(n(99947)),ta=hl(n(86788)),na=hl(n(73931)),oa=hl(n(75194)),sa=hl(n(84458)),aa=hl(n(26140)),la=hl(n(83835)),ca=hl(n(2386)),ra=hl(n(13347)),ia=hl(n(5601)),da=hl(n(84222)),ua=hl(n(87083)),ha=hl(n(35624)),va=hl(n(51081)),ma=hl(n(13322)),fa=hl(n(44894)),wa=hl(n(79349)),pa=hl(n(20508)),ga=hl(n(73215)),ya=hl(n(37110)),ba=hl(n(67287)),xa=hl(n(99679)),_a=hl(n(97285)),Ma=hl(n(24387)),za=hl(n(54204)),Ba=hl(n(46020)),Ha=hl(n(134)),Ca=hl(n(38860)),Va=hl(n(16168)),Oa=hl(n(95436)),La=hl(n(87501)),ka=hl(n(68008)),Sa=hl(n(97701)),Aa=hl(n(1806)),Ea=hl(n(85994)),Pa=hl(n(39191)),Ta=hl(n(36419)),ja=hl(n(30807)),qa=hl(n(55370)),Da=hl(n(2381)),Ia=hl(n(42149)),Fa=hl(n(5060)),Na=hl(n(54991)),Ra=hl(n(55766)),Wa=hl(n(75444)),Ua=hl(n(18672)),Ya=hl(n(74271)),Qa=hl(n(43728)),Xa=hl(n(35655)),Za=hl(n(88335)),Ga=hl(n(62791)),Ka=hl(n(73324)),$a=hl(n(9932)),Ja=hl(n(9232)),el=hl(n(68905)),tl=hl(n(79529)),nl=hl(n(85903)),ol=hl(n(68973)),sl=hl(n(68705)),al=hl(n(29085)),ll=hl(n(99444)),cl=hl(n(83357)),rl=hl(n(34386)),il=hl(n(68317)),dl=hl(n(10120)),ul=hl(n(31952));function hl(e){return e&&e.__esModule?e:{default:e}}t.iconsList={add_node:o.default,add_user:s.default,aggregation_avg:a.default,aggregation_max:l.default,aggregation_med:c.default,aggregation_min:r.default,aggregation_sum:i.default,aggregation_sum_abs:d.default,alarm:u.default,alarmCritical:h.default,alarmCriticalWarning:v.default,alarmFilled:m.default,alarmWarning:f.default,alarm_bell:w.default,alarms_new:p.default,alarm_off:g.default,anomaliesBrain:y.default,anomaliesLens:b.default,anomalyBadge:x.default,applications_hollow:_.default,applicationsSolid:tl.default,around_clock:M.default,arrow_down:z.default,arrow_w_line_left:B.default,arrow_w_line_right:H.default,arrow_left:C.default,arrow_s_down:V.default,arrow_s_left:O.default,arrows_vertical:L.default,bookmark:k.default,bullet_one:S.default,bullet_three:A.default,bullet_two:E.default,calendarFull:P.default,calendarFullPress:T.default,chart_added:j.default,chart_bars:q.default,chart_circle:D.default,chart_gauge:I.default,chart_pie:F.default,charts:N.default,charts_view:ol.default,check:R.default,checkmark_partial_s:W.default,checkmark_s:U.default,checkmark:Y.default,chevron_double:Q.default,chevron_down:X.default,chevron_down_thin:Z.default,chevron_expand:G.default,chevron_left:K.default,chevron_left_small:J.default,chevron_left_start:$.default,chevron_right:ee.default,chevron_right_end:ne.default,chevron_right_s:te.default,chevron_right_small:oe.default,chevron_up_thin:se.default,classError:ae.default,classLatency:le.default,classUtilization:ce.default,classWorkload:re.default,clock_hollow:ie.default,clock5Min:de.default,clock5MinPress:ue.default,close_circle:he.default,cluster:ve.default,cluster_spaces:me.default,code:fe.default,collapse:we.default,collect:pe.default,community:ge.default,connection_to_cloud:ye.default,connectivityStatusLive:be.default,connectivityStatusOffline:xe.default,connectivityStatusStale:_e.default,container:Me.default,controller_kind:ze.default,controller_name:Be.default,copy:He.default,correlation:Ce.default,correlation_inv:Ve.default,cpu:Oe.default,cross_s:Le.default,data_retention:ke.default,database:Se.default,dashboard:Ae.default,dashboard_add:Ee.default,dashboards:Pe.default,disk:Te.default,discoveredConfig:je.default,documentation:qe.default,dot:De.default,dots_2x3:Ie.default,download:Fe.default,dragHorizontal:Ne.default,dynamicConfig:Re.default,edit:We.default,error:Ue.default,exclamation:Ye.default,expand:Qe.default,favorites:Xe.default,feed:Ze.default,filter:Ge.default,filterList:Ke.default,firewallSolid:Ja.default,forcePlay:$e.default,forcePlayOutline:Je.default,functions:et.default,fullScreen:tt.default,gear:nt.default,github:ot.default,google:at.default,goToNode:st.default,group_by:lt.default,h1:ct.default,h2:rt.default,hamburger:it.default,help:dt.default,hide:ut.default,highlightArea:ht.default,holder:vt.default,importExport:mt.default,incident_manager:ft.default,information:wt.default,informationPress:pt.default,insights:gt.default,integrationAWSSNS:yt.default,integrationAWSSNSColored:bt.default,integrationDiscord:xt.default,integrationDiscordColored:_t.default,integrationEmail:Mt.default,integrationEmailColored:zt.default,integrationMattermost:Bt.default,integrationMattermostColored:Ht.default,integrationMobileAppColored:Ct.default,integrationOpsgenie:Vt.default,integrationOpsgenieColored:Ot.default,integrationPagerduty:Lt.default,integrationPagerdutyColored:kt.default,integrationRocketChat:St.default,integrationRocketChatColored:At.default,integrations:Et.default,internalConfig:Pt.default,integrationSlack:Tt.default,integrationSlackColored:jt.default,integrationSplunk:qt.default,integrationTeams:Dt.default,integrationTeamsColored:It.default,integrationTelegram:Ft.default,integrationTelegramColored:Nt.default,integrationVictorOps:Rt.default,integrationVictorOpsColored:Wt.default,integrationWebhook:Ut.default,integrationWebhookColored:Yt.default,ipNetworking:Qt.default,ipNetworkingPress:Xt.default,last_week:Zt.default,line_chart:Gt.default,logo_s:$t.default,logs:Kt.default,loading:Jt.default,long_arrow_up:en.default,magnify:tn.default,metrics:nn.default,metrics_explorer:on.default,minimize_s:sn.default,mobilePushNotifications:an.default,mobilePushNotificationsHollow:ln.default,monitoring:cn.default,more:rn.default,navLeft:dn.default,navRight:un.default,nav_arrow_goto:hn.default,nav_dots:vn.default,networkingStack:nl.default,netdata:mn.default,netdataAssistant:fn.default,netdataPress:wn.default,node:pn.default,node_child:gn.default,node_default_l:yn.default,node_hollow:bn.default,node_import_export:xn.default,node_notification_l:_n.default,node_parent:Mn.default,node_selected_l:zn.default,nodes:Bn.default,nodes_hollow:Hn.default,none_selected:Cn.default,nodes_update:sl.default,notification:so.default,notification_shortcut_enabled:Vn.default,notification_shortcut_disabled:On.default,notificationTrigger:Ln.default,okta:kn.default,openid:Sn.default,os:An.default,osAlpineLinux:En.default,osAmazonLinux:Pn.default,osArchLinux:Tn.default,osCelarOS:jn.default,osCentos:qn.default,osCentosColored:Dn.default,osCoreOS:In.default,osDebian:Fn.default,osDebianColored:Nn.default,osFedora:Rn.default,osFreeBSD:Wn.default,osGentoo:Un.default,osLinux:Yn.default,osLinuxColored:Qn.default,osLinuxManjaro:Xn.default,osMacOSX:Zn.default,osOracle:Gn.default,osOracleColored:Kn.default,osPress:$n.default,osRaspbian:Jn.default,osRedHat:eo.default,osSuseLinux:to.default,osUbuntu:no.default,osUbuntuColored:oo.default,padlock:ao.default,panTool:lo.default,pauseOutline:co.default,pauseSolid:ro.default,pencilSolid:uo.default,pencilOutline:io.default,pie_chart_skeleton:ho.default,pin_element:vo.default,playOutline:mo.default,playSolid:fo.default,plus:wo.default,plus_mini_s:po.default,pod:go.default,pricing:yo.default,print:bo.default,privacy:xo.default,pushNotifications:_o.default,qrCode:Mo.default,ram:Ho.default,qualityOfServiceSolid:el.default,question:zo.default,questionFilled:Bo.default,rearrange:Co.default,reduceSize:Vo.default,refresh:Oo.default,reload:Lo.default,removeNode:ko.default,resize_handler:So.default,rocket:Ao.default,room:Eo.default,room_home:Po.default,room_new:To.default,room_overview:jo.default,sad:qo.default,save:Do.default,save2:Io.default,scheduled:Fo.default,search:No.default,search_s:Ro.default,searchPress:Wo.default,serviceAlarm:u.default,serviceApache:Uo.default,serviceAsterisk:Yo.default,serviceApacheTomcat:Qo.default,serviceBeanstalk:Xo.default,serviceBind:Zo.default,serviceContainer:Go.default,serviceCoreDns:Ko.default,serviceCouchDB:$o.default,serviceDatabase:Jo.default,serviceDNS:es.default,serviceDNSmasq:ts.default,serviceDockerHubPress:ns.default,serviceDockerHub:os.default,serviceDotNet:ss.default,serviceEBPF:as.default,serviceElasticSearch:ls.default,serviceExample:cs.default,serviceFreeNAS:rs.default,serviceHAProxy:is.default,serviceHTTPCheck:ds.default,serviceIceCast:us.default,serviceInfluxDB:hs.default,serviceIPFS:vs.default,serviceIPVS:ms.default,serviceKubernetes:fs.default,serviceLighthttpd:ws.default,serviceLighthttpd2:ps.default,serviceLiteSpeed:gs.default,serviceLxc:ys.default,serviceMariaDB:bs.default,serviceMemCached:xs.default,serviceMongoDB:_s.default,serviceMySQL:Ms.default,serviceMySQLPress:zs.default,serviceNginx:Bs.default,serviceNginxLocal:Hs.default,serviceNginxPlus:Cs.default,serviceNtpd:Vs.default,serviceNvidia:Os.default,serviceNtpdPress:Ls.default,serviceOpenStack:ks.default,serviceOpenWrt:Ss.default,servicePan:As.default,servicePandas:Es.default,servicePercona:Ps.default,servicePfSense:Ts.default,servicePhpFpm:js.default,servicePostgreSQL:qs.default,servicePrometheus:Ds.default,serviceProxySQL:Is.default,serviceRabbitMQ:Fs.default,serviceRandom:Ns.default,serviceRedis:Rs.default,serviceRethinkDB:Ws.default,serviceRetroShare:Us.default,serviceSelectedArea:Ys.default,serviceSendgrid:Qs.default,services:Xs.default,servicesSmartdlog:Zs.default,serviceSolr:Gs.default,serviceSquid:Ks.default,serviceSummaryStatistic:$s.default,serviceSystemd:Js.default,serviceTraefik:ea.default,serviceVarnish:ta.default,serviceWebLog:na.default,serviceWebLogNginx:oa.default,serviceX509Check:sa.default,serviceXen:aa.default,settings:la.default,settings_h:ca.default,sign_in:ra.default,sorting_vertical:ia.default,sort_ascending:ua.default,sorting_asc:da.default,sort_descending:va.default,sorting_desc:ha.default,sort_indicator:ma.default,space:fa.default,space_new:wa.default,spaces_v2:pa.default,stockConfig:ga.default,switch_off:ya.default,system_overview:ba.default,systemOverviewPress:xa.default,text_add:_a.default,thumb_down:Ma.default,thumb_up:za.default,tiny_buttons:Ba.default,training:Ha.default,trashcan:Ca.default,triangle:Va.default,triangle_down:Oa.default,top:La.default,unknownError:ka.default,universe:Sa.default,unreachable:Aa.default,unreachableNode:Ea.default,update:Pa.default,update_pending:Ta.default,upload:ja.default,userConfig:qa.default,user:Da.default,userPress:Ia.default,users:Fa.default,value:Na.default,view_list:Ra.default,viewSingleNode:Wa.default,viewSingleNodePress:Ua.default,virtualization:Ya.default,warning:Qa.default,warning_triangle:Xa.default,warning_triangle_hollow:Za.default,weights_compare:Ga.default,weights_drill_down:Ka.default,x:$a.default,zoomIn:al.default,zoomOut:ll.default,zoomReset:cl.default,N:rl.default,I:il.default,D:dl.default,L:ul.default}},34247:(e,t,n)=>{"use strict";t.__esModule=!0;var o={iconsList:!0,IconComponents:!0};t.iconsList=t.IconComponents=void 0;var s=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=c(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(9736));t.IconComponents=s;var a=n(38857);t.iconsList=a.iconsList;var l=n(63534);function c(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(c=function(e){return e?n:t})(e)}Object.keys(l).forEach((function(e){"default"!==e&&"__esModule"!==e&&(Object.prototype.hasOwnProperty.call(o,e)||e in t&&t[e]===l[e]||(t[e]=l[e]))}))},318:(e,t,n)=>{"use strict";t.__esModule=!0;var o={MultiRangeInput:!0,useTouchedState:!0,useFocusedState:!0,useInputValue:!0};t.useTouchedState=t.useInputValue=t.useFocusedState=t.MultiRangeInput=void 0;var s=n(65478);Object.keys(s).forEach((function(e){"default"!==e&&"__esModule"!==e&&(Object.prototype.hasOwnProperty.call(o,e)||e in t&&t[e]===s[e]||(t[e]=s[e]))}));var a,l=(a=n(11421))&&a.__esModule?a:{default:a};t.MultiRangeInput=l.default;var c=n(58926);t.useTouchedState=c.useTouchedState;var r=n(87773);t.useFocusedState=r.useFocusedState;var i=n(85956);t.useInputValue=i.useInputValue},65478:(e,t,n)=>{"use strict";t.__esModule=!0,t.TextInput=void 0;var o=r(n(96540)),s=r(n(15877)),a=n(92019),l=n(9963),c=["error","disabled","iconLeft","iconRight","name","onFocus","onBlur","className","hint","fieldIndicator","placeholder","label","value","inputRef","size","containerStyles","inputContainerStyles","hideErrorMessage"];function r(e){return e&&e.__esModule?e:{default:e}}function i(){return i=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,c);return o.default.createElement(s.default,i({gap:.5,column:!0,className:f},B,{as:"label"}),"string"===typeof b?o.default.createElement(l.LabelText,{size:z},b):b,o.default.createElement(s.default,i({position:"relative"},H),r&&o.default.createElement(s.default,{position:"absolute",left:1,top:0,bottom:0,alignItems:"center"},r),o.default.createElement(l.Input,i({disabled:n,placeholder:y,onBlur:m,onFocus:v,name:h,"aria-label":h,hasIconLeft:!!r,hasIconRight:!!u,hasIndicator:!!p,type:"text",value:x,size:z,ref:_,error:t,hasValue:!!x},V)),(!!u||!!p)&&o.default.createElement(s.default,{position:"absolute",right:1,top:0,bottom:0,alignItems:"center",gap:1},!!p&&o.default.createElement(a.TextMicro,{color:"textLite"},p),!!u&&u)),"string"===typeof w?o.default.createElement(a.TextMicro,{color:"textLite"},w):!!w&&w,C?null:o.default.createElement(d,{error:t}))}},11421:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o,s=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=i(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),a=(o=n(15877))&&o.__esModule?o:{default:o},l=n(92019),c=n(42318),r=["initMax","initMin","max","min","onChange","onInput","step","TextComponent"];function i(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(i=function(e){return e?n:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,r),b=(0,s.useState)(t||i),x=b[0],_=b[1],M=(0,s.useState)(n||h),z=M[0],B=M[1],H=(0,s.useState)(0),C=H[0],V=H[1],O=(0,s.useRef)(null),L=(0,s.useRef)(null);(0,s.useEffect)((function(){O.current&&V(O.current.getBoundingClientRect().width)}),[z]),(0,s.useEffect)((function(){_(t||i),B(n||h)}),[i,h]);return s.default.createElement(a.default,{column:!0,gap:1,flex:!0},s.default.createElement(a.default,{alignItems:"center","data-testid":"multiRangeInput",justifyContent:"center",position:"relative",width:"100%"},s.default.createElement(c.Range,d({"data-testid":"minRangeInput",max:i,min:h,onChange:function(e){v&&v({max:x,min:e.target.value})},onInput:function(e){var t=Math.min(+e.target.value,x-w);B(t),e.target.value=t.toString(),m&&m({max:x,min:e.target.value})},position:"relative",ref:L,step:w,value:z,zIndex:3},y)),s.default.createElement(c.Range,d({"data-testid":"maxRangeInput",max:i,min:h,onChange:function(e){v&&v({max:e.target.value,min:z})},onInput:function(e){var t=Math.max(+e.target.value,z+w);_(t),e.target.value=t.toString(),m&&m({max:e.target.value,min:z})},ref:O,step:w,value:x,zIndex:5},y)),s.default.createElement(c.Slider,{"data-testid":"multiRange-slider"},s.default.createElement(c.SliderTrack,{"data-testid":"multiRange-sliderTrack",max:i,maxValue:x,min:h,minValue:z,width:C}))),s.default.createElement(a.default,{"data-testid":"multiRange-values",justifyContent:"between"},s.default.createElement(g,{"data-testid":"multiRange-minValue"},z),s.default.createElement(g,{"data-testid":"multiRange-maxValue"},x)))}},42318:(e,t,n)=>{"use strict";t.__esModule=!0,t.SliderTrack=t.Slider=t.Range=void 0;var o=l(n(8711)),s=l(n(8043)),a=l(n(93980));function l(e){return e&&e.__esModule?e:{default:e}}t.Slider=(0,o.default)(s.default).withConfig({displayName:"styled__Slider",componentId:"sc-y2g216-0"})([""]),t.SliderTrack=(0,o.default)(s.default).withConfig({displayName:"styled__SliderTrack",componentId:"sc-y2g216-1"})(["background-position:",";background-color:","40;background-image:linear-gradient( ",","," );background-repeat:no-repeat;background-size:",";height:2px;width:",";"],(function(e){var t=e.max,n=e.min,o=e.minValue;return e.width*((o-n)/(t-n))*100/100+"px 100%"}),(function(e){return e.theme.colors.primary}),(function(e){return e.theme.colors.primary}),(function(e){return e.theme.colors.primary}),(function(e){var t=e.max,n=e.maxValue,o=e.min;return 100*(n-e.minValue)/(t-o)+"% 100%"}),(function(e){return e.width+"px"||0})),t.Range=(0,o.default)(a.default).withConfig({displayName:"styled__Range",componentId:"sc-y2g216-2"})(["pointer-events:none;position:absolute;height:0;outline:none;width:100%;&::-webkit-slider-thumb{pointer-events:all;}"])},93980:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=l(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var c=s?Object.getOwnPropertyDescriptor(e,a):null;c&&(c.get||c.set)?Object.defineProperty(o,a,c):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),s=n(43773),a=["max","min","step","value"];function l(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(l=function(e){return e?n:t})(e)}function c(){return c=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,a);return o.default.createElement(s.InputRange,c({"data-testid":"rangeInput",max:l,min:i,step:u,type:"range",value:v,ref:t},m))}));t.default=r},43773:(e,t,n)=>{"use strict";t.__esModule=!0,t.InputRange=void 0;var o,s=(o=n(8711))&&o.__esModule?o:{default:o},a=n(41234);t.InputRange=s.default.input.attrs({type:"range"}).withConfig({displayName:"styled__InputRange",componentId:"sc-17kts71-0"})(["-webkit-appearance:none;height:2px;background-color:","40;background-image:linear-gradient(",",",");background-repeat:no-repeat;background-size:",";cursor:pointer;width:100%;&::-webkit-slider-thumb{-webkit-appearance:none;height:10px;width:10px;border-radius:50%;background:",";transition:all 0.3s ease-in-out;&:active{height:16px;width:16px;}}"],(0,a.getColor)("primary"),(0,a.getColor)("primary"),(0,a.getColor)("primary"),(function(e){var t=e.max;return 100*e.value/t+"% 100%"}),(0,a.getColor)("primary"))},9963:(e,t,n)=>{"use strict";t.__esModule=!0,t.SuccessIcon=t.StyledLabel=t.StyledIcon=t.MetaInfo=t.MetaContainer=t.LabelText=t.Input=t.IconContainer=t.FieldInfo=t.ErrorIcon=void 0;var o,s=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=i(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(8711)),a=n(34247),l=(o=n(15877))&&o.__esModule?o:{default:o},c=n(92019),r=n(34867);function i(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(i=function(e){return e?n:t})(e)}function d(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,o)}return n}function u(e){for(var t=1;t{"use strict";t.__esModule=!0,t.useFocusedState=void 0;var o=n(96540);t.useFocusedState=function(e){var t=e.defaultState,n=void 0!==t&&t,s=e.onBlur,a=e.onFocus,l=(0,o.useState)(n),c=l[0],r=l[1],i=(0,o.useCallback)((function(e){c||r(!0),a&&a(e)}),[a,c]),d=(0,o.useCallback)((function(e){r(!1),s&&s(e)}),[s]);return[c,i,d]}},85956:(e,t,n)=>{"use strict";t.__esModule=!0,t.useInputValue=void 0;var o=n(96540);t.useInputValue=function(e){var t=e.value,n=void 0===t?"":t,s=e.onChange,a=e.maxChars,l=(0,o.useState)(n),c=l[0],r=l[1],i=(0,o.useState)(!1),d=i[0],u=i[1],h=(0,o.useCallback)((function(e){var t=e.target.value;if(a&&t.length>a)return e.preventDefault(),void e.stopPropagation();r(t),d||u(!0),s&&s(e)}),[d,a,s]),v=a?c.length+"/"+a:"",m=(0,o.useCallback)((function(e){void 0===e&&(e=""),r(e),u(!1)}),[]);return[c,h,v,d,{setIsDirty:u,setValue:r,resetValue:m}]}},58926:(e,t,n)=>{"use strict";t.__esModule=!0,t.useTouchedState=void 0;var o=n(96540);t.useTouchedState=function(e){var t=e.onBlur,n=e.defaultState,s=void 0!==n&&n,a=(0,o.useState)(s),l=a[0],c=a[1],r=(0,o.useCallback)((function(e){l||c(!0),t&&t(e)}),[t,l]);return[l,r,c]}},94474:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o,s=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=d(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(96540)),a=(o=n(44048))&&o.__esModule?o:{default:o},l=n(34247),c=n(318),r=n(37466),i=["value","onChange","onReset","placeholder"];function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(d=function(e){return e?n:t})(e)}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,i),f=(0,s.useState)(o),w=f[0],p=f[1];return(0,a.default)((function(){return d(w)}),300,[w]),s.default.createElement(c.TextInput,u({iconLeft:s.default.createElement(l.Icon,{name:"search",color:w?"textFocus":"textLite",width:"14px",height:"14px"}),iconRight:(!!w||!!o)&&s.default.createElement(r.IconButton,{icon:"x",iconColor:w?"textFocus":"textLite",width:"14px",height:"14px",onClick:function(){return h?h():p("")},padding:[0],neutral:!0}),inputRef:t,value:w,onChange:function(e){return p(e.target.value)},placeholder:v,size:"small"},m))}));t.default=h},53162:(e,t,n)=>{"use strict";t.__esModule=!0,t.sx=t.default=void 0;var o=_(n(8711)),s=_(n(27988)),a=_(n(32305)),l=_(n(36094)),c=_(n(62703)),r=_(n(31886)),i=_(n(25320)),d=_(n(96029)),u=_(n(88725)),h=_(n(86397)),v=_(n(11564)),m=_(n(48488)),f=_(n(26278)),w=_(n(90836)),p=_(n(85103)),g=_(n(92108)),y=_(n(48299)),b=n(50677),x=_(n(50402));function _(e){return e&&e.__esModule?e:{default:e}}var M=t.sx=function(e){return(0,x.default)(e.sx)(e)};t.default=function(e){return(0,o.default)(e).withConfig({displayName:"box",componentId:"sc-12jmtj1-0"})(["box-sizing:border-box;"," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," ",""],w.default,s.default,i.default,a.default,l.default,p.default,v.default,h.default,f.default,r.default,g.default,c.default,m.default,d.default,u.default,y.default,b.position,M)}},8043:(e,t,n)=>{"use strict";var o;t.__esModule=!0,t.default=void 0;var s=(0,((o=n(53162))&&o.__esModule?o:{default:o}).default)("div");t.default=s},89075:(e,t)=>{"use strict";t.__esModule=!0,t.default=void 0;t.default=function(e){var t=e.backdropBlur;return t?"boolean"===typeof t?"backdrop-filter: blur(10px);":"number"===typeof t?"backdrop-filter: blur("+t+"px);":"backdrop-filter: blur("+t+");":""}},92019:(e,t,n)=>{"use strict";t.__esModule=!0,t.TextSmall=t.TextNano=t.TextMicro=t.TextHuge=t.TextFemto=t.TextBigger=t.TextBig=t.Text=t.ListItem=t.List=t.H6=t.H5=t.H4=t.H3=t.H2=t.H1=t.H0=void 0;var o=n(36350);t.makeH0=o.makeH0,t.makeH1=o.makeH1,t.makeH2=o.makeH2,t.makeH3=o.makeH3,t.makeH4=o.makeH4,t.makeH5=o.makeH5,t.makeH6=o.makeH6,t.makeTypography=o.makeTypography,t.makeFemto=o.makeFemto,t.makeNano=o.makeNano,t.makeMicro=o.makeMicro,t.makeSmall=o.makeSmall,t.makeText=o.makeText,t.makeBig=o.makeBig,t.makeBigger=o.makeBigger,t.makeHuge=o.makeHuge;var s=n(37079);t.List=s.List,t.ListItem=s.ListItem;t.H0=(0,o.makeH0)("h1"),t.H1=(0,o.makeH1)("h1"),t.H2=(0,o.makeH2)("h2"),t.H3=(0,o.makeH3)("h3"),t.H4=(0,o.makeH4)("h4"),t.H5=(0,o.makeH5)("h5"),t.H6=(0,o.makeH6)("h6"),t.TextFemto=(0,o.makeFemto)("span"),t.TextNano=(0,o.makeNano)("span"),t.TextMicro=(0,o.makeMicro)("span"),t.TextSmall=(0,o.makeSmall)("span"),t.Text=(0,o.makeText)("span"),t.TextBig=(0,o.makeBig)("span"),t.TextBigger=(0,o.makeBigger)("span"),t.TextHuge=(0,o.makeHuge)("span")},37079:(e,t,n)=>{"use strict";t.__esModule=!0,t.ListItem=t.List=void 0;var o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=r(t);if(n&&n.has(e))return n.get(e);var o={__proto__:null},s=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&{}.hasOwnProperty.call(e,a)){var l=s?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(o,a,l):o[a]=e[a]}return o.default=e,n&&n.set(e,o),o}(n(8711)),s=c(n(27988)),a=c(n(32305)),l=c(n(36094));function c(e){return e&&e.__esModule?e:{default:e}}function r(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(r=function(e){return e?n:t})(e)}var i=(0,o.css)([""," "," ",""],s.default,a.default,l.default);t.List=o.default.ul.withConfig({displayName:"list__List",componentId:"sc-ln4euz-0"})(["list-style-type:disc;list-style-position:outside;padding-left:28px;",""],i),t.ListItem=o.default.li.withConfig({displayName:"list__ListItem",componentId:"sc-ln4euz-1"})(["line-height:22px;padding-left:9px;",""],i)},43119:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o=n(96540);t.default=function(e){return(0,o.useEffect)((function(){if(e){var t=function(t){27===t.keyCode&&e(t)};return document.addEventListener("keydown",t),function(){return document.removeEventListener("keydown",t)}}}),[e])}},35586:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o,s=n(96540),a=(o=n(2310))&&o.__esModule?o:{default:o};t.default=function(e,t,n,o){(0,s.useEffect)((function(){if(t&&!o){var s=function(o){o.target===e.current||(0,a.default)(e.current,o.target)||(0,a.default)(n,o.target)||t(o)};return document.addEventListener("mousedown",s),function(){return document.removeEventListener("mousedown",s)}}}),[t])}},23627:(e,t,n)=>{"use strict";t.__esModule=!0,t.default=void 0;var o=n(96540);t.default=function(e,t){var n=void 0===t?{}:t,s=n.on,a=n.off,l=n.toggle,c=(0,o.useState)(!!e),r=c[0],i=c[1];return[r,(0,o.useCallback)((function(e){return i((function(t){var n="boolean"===typeof e?e:!t;return l&&l(n),s&&n&&s(),a&&!n&&a(),n}))}),[l,s,a]),(0,o.useCallback)((function(){i(!0),s&&s()}),[s]),(0,o.useCallback)((function(){i(!1),a&&a()}),[a])]}},3639:(e,t)=>{"use strict";t.__esModule=!0,t.default=void 0;var n=0;t.default=function(){return--n}},54481:(e,t)=>{"use strict";function n(e){return Array.isArray(e)}function o(e){var t=typeof e;return null!=e&&("object"===t||"function"===t)&&!n(e)}t.__esModule=!0,t.isArray=n,t.isEmptyObject=function(e){return o(e)&&0===Object.keys(e).length},t.isFunction=function(e){return"function"===typeof e},t.isObject=o},69114:(e,t)=>{"use strict";t.__esModule=!0,t.default=void 0;t.default=function(e,t){var n=t?e.toLowerCase():e;return n.charAt(0).toUpperCase()+n.slice(1)}},51462:(e,t,n)=>{"use strict";t.__esModule=!0;var o={mergeRefs:!0,capitalizeFirstLetter:!0};t.mergeRefs=t.capitalizeFirstLetter=void 0;var s=n(27184);t.mergeRefs=s.mergeRefs;var a=n(54481);Object.keys(a).forEach((function(e){"default"!==e&&"__esModule"!==e&&(Object.prototype.hasOwnProperty.call(o,e)||e in t&&t[e]===a[e]||(t[e]=a[e]))}));var l,c=(l=n(69114))&&l.__esModule?l:{default:l};t.capitalizeFirstLetter=c.default},27184:(e,t,n)=>{"use strict";t.__esModule=!0,t.assignRef=s,t.mergeRefs=function(){for(var e=arguments.length,t=new Array(e),n=0;n{"use strict";n.d(t,{Ay:()=>K});var o=n(47168),s=n.n(o),a=n(12020);const l=e=>e&&e.enabled&&e.modifierKey,c=(e,t)=>e&&t[e+"Key"],r=(e,t)=>e&&!t[e+"Key"];function i(e,t,n){return void 0===e||("string"===typeof e?-1!==e.indexOf(t):"function"===typeof e&&-1!==e({chart:n}).indexOf(t))}function d(e,t){return"function"===typeof e&&(e=e({chart:t})),"string"===typeof e?{x:-1!==e.indexOf("x"),y:-1!==e.indexOf("y")}:{x:!1,y:!1}}function u(e,t,n){const{mode:o="xy",scaleMode:s,overScaleMode:l}=e||{},c=function({x:e,y:t},n){const o=n.scales,s=Object.keys(o);for(let a=0;a=n.top&&t<=n.bottom&&e>=n.left&&e<=n.right)return n}return null}(t,n),r=d(o,n),i=d(s,n);if(l){const e=d(l,n);for(const t of["x","y"])e[t]&&(i[t]=r[t],r[t]=!1)}if(c&&i[c.axis])return[c];const u=[];return(0,a.F)(n.scales,(function(e){r[e.axis]&&u.push(e)})),u}const h=new WeakMap;function v(e){let t=h.get(e);return t||(t={originalScaleLimits:{},updatedScaleLimits:{},handlers:{},panDelta:{}},h.set(e,t)),t}function m(e,t,n){const o=e.max-e.min,s=o*(t-1),a=e.isHorizontal()?n.x:n.y,l=Math.max(0,Math.min(1,(e.getValueForPixel(a)-e.min)/o||0));return{min:s*l,max:s*(1-l)}}function f(e,t,n,o,s){let l=n[o];if("original"===l){const n=e.originalScaleLimits[t.id][o];l=(0,a.v)(n.options,n.scale)}return(0,a.v)(l,s)}function w(e,{min:t,max:n},o,s=!1){const a=v(e.chart),{id:l,axis:c,options:r}=e,i=o&&(o[l]||o[c])||{},{minRange:d=0}=i,u=f(a,e,i,"min",-1/0),h=f(a,e,i,"max",1/0),m=s?Math.max(n-t,d):e.max-e.min,w=(m-n+t)/2;return n+=w,(t-=w)h&&(n=h,t=Math.max(h-m,u)),r.min=t,r.max=n,a.updatedScaleLimits[e.id]={min:t,max:n},e.parse(t)!==e.min||e.parse(n)!==e.max}const p=e=>0===e||isNaN(e)?0:e<0?Math.min(Math.round(e),-1):Math.max(Math.round(e),1);const g={second:500,minute:3e4,hour:18e5,day:432e5,week:3024e5,month:1296e6,quarter:5184e6,year:157248e5};function y(e,t,n,o=!1){const{min:s,max:a,options:l}=e,c=l.time&&l.time.round,r=g[c]||0,i=e.getValueForPixel(e.getPixelForValue(s+r)-t),d=e.getValueForPixel(e.getPixelForValue(a+r)-t),{min:u=-1/0,max:h=1/0}=o&&n&&n[e.axis]||{};return!!(isNaN(i)||isNaN(d)||ih)||w(e,{min:i,max:d},n,o)}function b(e,t,n){return y(e,t,n,!0)}const x={category:function(e,t,n,o){const s=m(e,t,n);return e.min===e.max&&t<1&&function(e){const t=e.getLabels().length-1;e.min>0&&(e.min-=1),e.maxc&&(s=Math.max(0,s-r),a=1===l?s:s+l,i=0===s),w(e,{min:s,max:a},n)||i},default:y,logarithmic:b,timeseries:b};function z(e,t){(0,a.F)(e,((n,o)=>{t[o]||delete e[o]}))}function B(e,t){const{scales:n}=e,{originalScaleLimits:o,updatedScaleLimits:s}=t;return(0,a.F)(n,(function(e){(function(e,t,n){const{id:o,options:{min:s,max:a}}=e;if(!t[o]||!n[o])return!0;const l=n[o];return l.min!==s||l.max!==a})(e,o,s)&&(o[e.id]={min:{scale:e.min,options:e.options.min},max:{scale:e.max,options:e.options.max}})})),z(o,n),z(s,n),o}function H(e,t,n,o){const s=x[e.type]||x.default;(0,a.Q)(s,[e,t,n,o])}function C(e,t,n,o,s){const l=_[e.type]||_.default;(0,a.Q)(l,[e,t,n,o,s])}function V(e){const t=e.chartArea;return{x:(t.left+t.right)/2,y:(t.top+t.bottom)/2}}function O(e,t,n="none"){const{x:o=1,y:s=1,focalPoint:l=V(e)}="number"===typeof t?{x:t,y:t}:t,c=v(e),{options:{limits:r,zoom:i}}=c;B(e,c);const d=1!==o,h=1!==s,m=u(i,l,e);(0,a.F)(m||e.scales,(function(e){e.isHorizontal()&&d?H(e,o,l,r):!e.isHorizontal()&&h&&H(e,s,l,r)})),e.update(n),(0,a.Q)(i.onZoom,[{chart:e}])}function L(e,t,n,o="none"){const s=v(e),{options:{limits:l,zoom:c}}=s,{mode:r="xy"}=c;B(e,s);const d=i(r,"x",e),u=i(r,"y",e);(0,a.F)(e.scales,(function(e){e.isHorizontal()&&d?C(e,t.x,n.x,l):!e.isHorizontal()&&u&&C(e,t.y,n.y,l)})),e.update(o),(0,a.Q)(c.onZoom,[{chart:e}])}function k(e){const t=v(e);let n=1,o=1;return(0,a.F)(e.scales,(function(e){const s=function(e,t){const n=e.originalScaleLimits[t];if(!n)return;const{min:o,max:s}=n;return(0,a.v)(s.options,s.scale)-(0,a.v)(o.options,o.scale)}(t,e.id);if(s){const t=Math.round(s/(e.max-e.min)*100)/100;n=Math.min(n,t),o=Math.max(o,t)}})),n<1?n:o}function S(e,t,n,o){const{panDelta:s}=o,l=s[e.id]||0;(0,a.s)(l)===(0,a.s)(t)&&(t+=l);const c=M[e.type]||M.default;(0,a.Q)(c,[e,t,n])?s[e.id]=0:s[e.id]=t}function A(e,t,n,o="none"){const{x:s=0,y:l=0}="number"===typeof t?{x:t,y:t}:t,c=v(e),{options:{pan:r,limits:i}}=c,{onPan:d}=r||{};B(e,c);const u=0!==s,h=0!==l;(0,a.F)(n||e.scales,(function(e){e.isHorizontal()&&u?S(e,s,i,c):!e.isHorizontal()&&h&&S(e,l,i,c)})),e.update(o),(0,a.Q)(d,[{chart:e}])}function E(e){const t=v(e);B(e,t);const n={};for(const o of Object.keys(e.scales)){const{min:e,max:s}=t.originalScaleLimits[o]||{min:{},max:{}};n[o]={min:e.scale,max:s.scale}}return n}function P(e,t){const{handlers:n}=v(e),o=n[t];o&&o.target&&(o.target.removeEventListener(t,o),delete n[t])}function T(e,t,n,o){const{handlers:s,options:a}=v(e),l=s[n];l&&l.target===t||(P(e,n),s[n]=t=>o(e,t,a),s[n].target=t,t.addEventListener(n,s[n]))}function j(e,t){const n=v(e);n.dragStart&&(n.dragging=!0,n.dragEnd=t,e.update("none"))}function q(e,t){const n=v(e);n.dragStart&&"Escape"===t.key&&(P(e,"keydown"),n.dragging=!1,n.dragStart=n.dragEnd=null,e.update("none"))}function D(e,t,n){const{onZoomStart:o,onZoomRejected:s}=n;if(o){const n=(0,a.z)(t,e);if(!1===(0,a.Q)(o,[{chart:e,event:t,point:n}]))return(0,a.Q)(s,[{chart:e,event:t}]),!1}}function I(e,t){const n=v(e),{pan:o,zoom:s={}}=n.options;if(0!==t.button||c(l(o),t)||r(l(s.drag),t))return(0,a.Q)(s.onZoomRejected,[{chart:e,event:t}]);!1!==D(e,t,s)&&(n.dragStart=t,T(e,e.canvas,"mousemove",j),T(e,window.document,"keydown",q))}function F(e,t,n,o){const s=i(t,"x",e),l=i(t,"y",e);let{top:c,left:r,right:d,bottom:u,width:h,height:v}=e.chartArea;const m=(0,a.z)(n,e),f=(0,a.z)(o,e);s&&(r=Math.min(m.x,f.x),d=Math.max(m.x,f.x)),l&&(c=Math.min(m.y,f.y),u=Math.max(m.y,f.y));const w=d-r,p=u-c;return{left:r,top:c,right:d,bottom:u,width:w,height:p,zoomX:s&&w?1+(h-w)/h:1,zoomY:l&&p?1+(v-p)/v:1}}function N(e,t){const n=v(e);if(!n.dragStart)return;P(e,"mousemove");const{mode:o,onZoomComplete:s,drag:{threshold:l=0}}=n.options.zoom,c=F(e,o,n.dragStart,t),r=i(o,"x",e)?c.width:0,d=i(o,"y",e)?c.height:0,u=Math.sqrt(r*r+d*d);if(n.dragStart=n.dragEnd=null,u<=l)return n.dragging=!1,void e.update("none");L(e,{x:c.left,y:c.top},{x:c.right,y:c.bottom},"zoom"),setTimeout((()=>n.dragging=!1),500),(0,a.Q)(s,[{chart:e}])}function R(e,t){const{handlers:{onZoomComplete:n},options:{zoom:o}}=v(e);if(!function(e,t,n){if(r(l(n.wheel),t))(0,a.Q)(n.onZoomRejected,[{chart:e,event:t}]);else if(!1!==D(e,t,n)&&(t.cancelable&&t.preventDefault(),void 0!==t.deltaY))return!0}(e,t,o))return;const s=t.target.getBoundingClientRect(),c=1+(t.deltaY>=0?-o.wheel.speed:o.wheel.speed);O(e,{x:c,y:c,focalPoint:{x:t.clientX-s.left,y:t.clientY-s.top}}),n&&n()}function W(e,t,n,o){n&&(v(e).handlers[t]=function(e,t){let n;return function(){return clearTimeout(n),n=setTimeout(e,t),t}}((()=>(0,a.Q)(n,[{chart:e}])),o))}function U(e,t){return function(n,o){const{pan:s,zoom:i={}}=t.options;if(!s||!s.enabled)return!1;const d=o&&o.srcEvent;return!d||(!(!t.panning&&"mouse"===o.pointerType&&(r(l(s),d)||c(l(i.drag),d)))||((0,a.Q)(s.onPanRejected,[{chart:e,event:o}]),!1))}}function Y(e,t,n){if(t.scale){const{center:o,pointers:s}=n,a=1/t.scale*n.scale,l=n.target.getBoundingClientRect(),c=function(e,t){const n=Math.abs(e.clientX-t.clientX),o=Math.abs(e.clientY-t.clientY),s=n/o;let a,l;return s>.3&&s<1.7?a=l=!0:n>o?a=!0:l=!0,{x:a,y:l}}(s[0],s[1]),r=t.options.zoom.mode;O(e,{x:c.x&&i(r,"x",e)?a:1,y:c.y&&i(r,"y",e)?a:1,focalPoint:{x:o.x-l.left,y:o.y-l.top}}),t.scale=n.scale}}function Q(e,t,n){const o=t.delta;o&&(t.panning=!0,A(e,{x:n.deltaX-o.x,y:n.deltaY-o.y},t.panScales),t.delta={x:n.deltaX,y:n.deltaY})}const X=new WeakMap;function Z(e,t){const n=v(e),o=e.canvas,{pan:l,zoom:c}=t,r=new(s().Manager)(o);c&&c.pinch.enabled&&(r.add(new(s().Pinch)),r.on("pinchstart",(()=>function(e,t){t.options.zoom.pinch.enabled&&(t.scale=1)}(0,n))),r.on("pinch",(t=>Y(e,n,t))),r.on("pinchend",(t=>function(e,t,n){t.scale&&(Y(e,t,n),t.scale=null,(0,a.Q)(t.options.zoom.onZoomComplete,[{chart:e}]))}(e,n,t)))),l&&l.enabled&&(r.add(new(s().Pan)({threshold:l.threshold,enable:U(e,n)})),r.on("panstart",(t=>function(e,t,n){const{enabled:o,onPanStart:s,onPanRejected:l}=t.options.pan;if(!o)return;const c=n.target.getBoundingClientRect(),r={x:n.center.x-c.left,y:n.center.y-c.top};if(!1===(0,a.Q)(s,[{chart:e,event:n,point:r}]))return(0,a.Q)(l,[{chart:e,event:n}]);t.panScales=u(t.options.pan,r,e),t.delta={x:0,y:0},clearTimeout(t.panEndTimeout),Q(e,t,n)}(e,n,t))),r.on("panmove",(t=>Q(e,n,t))),r.on("panend",(()=>function(e,t){t.delta=null,t.panning&&(t.panEndTimeout=setTimeout((()=>t.panning=!1),500),(0,a.Q)(t.options.pan.onPanComplete,[{chart:e}]))}(e,n)))),X.set(e,r)}function G(e,t,n){const o=n.zoom.drag,{dragStart:s,dragEnd:a}=v(e);if(o.drawTime!==t||!a)return;const{left:l,top:c,width:r,height:i}=F(e,n.zoom.mode,s,a),d=e.ctx;d.save(),d.beginPath(),d.fillStyle=o.backgroundColor||"rgba(225,225,225,0.3)",d.fillRect(l,c,r,i),o.borderWidth>0&&(d.lineWidth=o.borderWidth,d.strokeStyle=o.borderColor||"rgba(225,225,225)",d.strokeRect(l,c,r,i)),d.restore()}var K={id:"zoom",version:"2.0.1",defaults:{pan:{enabled:!1,mode:"xy",threshold:10,modifierKey:null},zoom:{wheel:{enabled:!1,speed:.1,modifierKey:null},drag:{enabled:!1,drawTime:"beforeDatasetsDraw",modifierKey:null},pinch:{enabled:!1},mode:"xy"}},start:function(e,t,n){v(e).options=n,Object.prototype.hasOwnProperty.call(n.zoom,"enabled")&&console.warn("The option `zoom.enabled` is no longer supported. Please use `zoom.wheel.enabled`, `zoom.drag.enabled`, or `zoom.pinch.enabled`."),(Object.prototype.hasOwnProperty.call(n.zoom,"overScaleMode")||Object.prototype.hasOwnProperty.call(n.pan,"overScaleMode"))&&console.warn("The option `overScaleMode` is deprecated. Please use `scaleMode` instead (and update `mode` as desired)."),s()&&Z(e,n),e.pan=(t,n,o)=>A(e,t,n,o),e.zoom=(t,n)=>O(e,t,n),e.zoomRect=(t,n,o)=>L(e,t,n,o),e.zoomScale=(t,n,o)=>function(e,t,n,o="none"){B(e,v(e)),w(e.scales[t],n,void 0,!0),e.update(o)}(e,t,n,o),e.resetZoom=t=>function(e,t="default"){const n=v(e),o=B(e,n);(0,a.F)(e.scales,(function(e){const t=e.options;o[e.id]?(t.min=o[e.id].min.options,t.max=o[e.id].max.options):(delete t.min,delete t.max)})),e.update(t),(0,a.Q)(n.options.zoom.onZoomComplete,[{chart:e}])}(e,t),e.getZoomLevel=()=>k(e),e.getInitialScaleBounds=()=>E(e),e.isZoomedOrPanned=()=>function(e){const t=E(e);for(const n of Object.keys(e.scales)){const{min:o,max:s}=t[n];if(void 0!==o&&e.scales[n].min!==o)return!0;if(void 0!==s&&e.scales[n].max!==s)return!0}return!1}(e)},beforeEvent(e){const t=v(e);if(t.panning||t.dragging)return!1},beforeUpdate:function(e,t,n){v(e).options=n,function(e,t){const n=e.canvas,{wheel:o,drag:s,onZoomComplete:a}=t.zoom;o.enabled?(T(e,n,"wheel",R),W(e,"onZoomComplete",a,250)):P(e,"wheel"),s.enabled?(T(e,n,"mousedown",I),T(e,n.ownerDocument,"mouseup",N)):(P(e,"mousedown"),P(e,"mousemove"),P(e,"mouseup"),P(e,"keydown"))}(e,n)},beforeDatasetsDraw(e,t,n){G(e,"beforeDatasetsDraw",n)},afterDatasetsDraw(e,t,n){G(e,"afterDatasetsDraw",n)},beforeDraw(e,t,n){G(e,"beforeDraw",n)},afterDraw(e,t,n){G(e,"afterDraw",n)},stop:function(e){!function(e){P(e,"mousedown"),P(e,"mousemove"),P(e,"mouseup"),P(e,"wheel"),P(e,"click"),P(e,"keydown")}(e),s()&&function(e){const t=X.get(e);t&&(t.remove("pinchstart"),t.remove("pinch"),t.remove("pinchend"),t.remove("panstart"),t.remove("pan"),t.remove("panend"),t.destroy(),X.delete(e))}(e),function(e){h.delete(e)}(e)},panFunctions:M,zoomFunctions:x,zoomRectFunctions:_}},47168:(e,t,n)=>{var o;!function(s,a,l,c){"use strict";var r,i=["","webkit","Moz","MS","ms","o"],d=a.createElement("div"),u="function",h=Math.round,v=Math.abs,m=Date.now;function f(e,t,n){return setTimeout(_(e,n),t)}function w(e,t,n){return!!Array.isArray(e)&&(p(e,n[t],n),!0)}function p(e,t,n){var o;if(e)if(e.forEach)e.forEach(t,n);else if(e.length!==c)for(o=0;o\s*\(/gm,"{anonymous}()@"):"Unknown Stack Trace",a=s.console&&(s.console.warn||s.console.log);return a&&a.call(s.console,o,n),e.apply(this,arguments)}}r="function"!==typeof Object.assign?function(e){if(e===c||null===e)throw new TypeError("Cannot convert undefined or null to object");for(var t=Object(e),n=1;n-1}function O(e){return e.trim().split(/\s+/g)}function L(e,t,n){if(e.indexOf&&!n)return e.indexOf(t);for(var o=0;on[t]})):o.sort()),o}function A(e,t){for(var n,o,s=t[0].toUpperCase()+t.slice(1),a=0;a1&&!n.firstMultiple?n.firstMultiple=oe(t):1===s&&(n.firstMultiple=!1);var a=n.firstInput,l=n.firstMultiple,r=l?l.center:a.center,i=t.center=se(o);t.timeStamp=m(),t.deltaTime=t.timeStamp-a.timeStamp,t.angle=re(r,i),t.distance=ce(r,i),function(e,t){var n=t.center,o=e.offsetDelta||{},s=e.prevDelta||{},a=e.prevInput||{};t.eventType!==N&&a.eventType!==R||(s=e.prevDelta={x:a.deltaX||0,y:a.deltaY||0},o=e.offsetDelta={x:n.x,y:n.y});t.deltaX=s.x+(n.x-o.x),t.deltaY=s.y+(n.y-o.y)}(n,t),t.offsetDirection=le(t.deltaX,t.deltaY);var d=ae(t.deltaTime,t.deltaX,t.deltaY);t.overallVelocityX=d.x,t.overallVelocityY=d.y,t.overallVelocity=v(d.x)>v(d.y)?d.x:d.y,t.scale=l?(u=l.pointers,h=o,ce(h[0],h[1],ee)/ce(u[0],u[1],ee)):1,t.rotation=l?function(e,t){return re(t[1],t[0],ee)+re(e[1],e[0],ee)}(l.pointers,o):0,t.maxPointers=n.prevInput?t.pointers.length>n.prevInput.maxPointers?t.pointers.length:n.prevInput.maxPointers:t.pointers.length,function(e,t){var n,o,s,a,l=e.lastInterval||t,r=t.timeStamp-l.timeStamp;if(t.eventType!=W&&(r>F||l.velocity===c)){var i=t.deltaX-l.deltaX,d=t.deltaY-l.deltaY,u=ae(r,i,d);o=u.x,s=u.y,n=v(u.x)>v(u.y)?u.x:u.y,a=le(i,d),e.lastInterval=t}else n=l.velocity,o=l.velocityX,s=l.velocityY,a=l.direction;t.velocity=n,t.velocityX=o,t.velocityY=s,t.direction=a}(n,t);var u,h;var f=e.element;C(t.srcEvent.target,f)&&(f=t.srcEvent.target);t.target=f}(e,n),e.emit("hammer.input",n),e.recognize(n),e.session.prevInput=n}function oe(e){for(var t=[],n=0;n=v(t)?e<0?Y:Q:t<0?X:Z}function ce(e,t,n){n||(n=J);var o=t[n[0]]-e[n[0]],s=t[n[1]]-e[n[1]];return Math.sqrt(o*o+s*s)}function re(e,t,n){n||(n=J);var o=t[n[0]]-e[n[0]],s=t[n[1]]-e[n[1]];return 180*Math.atan2(s,o)/Math.PI}te.prototype={handler:function(){},init:function(){this.evEl&&B(this.element,this.evEl,this.domHandler),this.evTarget&&B(this.target,this.evTarget,this.domHandler),this.evWin&&B(P(this.element),this.evWin,this.domHandler)},destroy:function(){this.evEl&&H(this.element,this.evEl,this.domHandler),this.evTarget&&H(this.target,this.evTarget,this.domHandler),this.evWin&&H(P(this.element),this.evWin,this.domHandler)}};var ie={mousedown:N,mousemove:2,mouseup:R},de="mousedown",ue="mousemove mouseup";function he(){this.evEl=de,this.evWin=ue,this.pressed=!1,te.apply(this,arguments)}x(he,te,{handler:function(e){var t=ie[e.type];t&N&&0===e.button&&(this.pressed=!0),2&t&&1!==e.which&&(t=R),this.pressed&&(t&R&&(this.pressed=!1),this.callback(this.manager,t,{pointers:[e],changedPointers:[e],pointerType:I,srcEvent:e}))}});var ve={pointerdown:N,pointermove:2,pointerup:R,pointercancel:W,pointerout:W},me={2:D,3:"pen",4:I,5:"kinect"},fe="pointerdown",we="pointermove pointerup pointercancel";function pe(){this.evEl=fe,this.evWin=we,te.apply(this,arguments),this.store=this.manager.session.pointerEvents=[]}s.MSPointerEvent&&!s.PointerEvent&&(fe="MSPointerDown",we="MSPointerMove MSPointerUp MSPointerCancel"),x(pe,te,{handler:function(e){var t=this.store,n=!1,o=e.type.toLowerCase().replace("ms",""),s=ve[o],a=me[e.pointerType]||e.pointerType,l=a==D,c=L(t,e.pointerId,"pointerId");s&N&&(0===e.button||l)?c<0&&(t.push(e),c=t.length-1):s&(R|W)&&(n=!0),c<0||(t[c]=e,this.callback(this.manager,s,{pointers:t,changedPointers:[e],pointerType:a,srcEvent:e}),n&&t.splice(c,1))}});var ge={touchstart:N,touchmove:2,touchend:R,touchcancel:W};function ye(){this.evTarget="touchstart",this.evWin="touchstart touchmove touchend touchcancel",this.started=!1,te.apply(this,arguments)}function be(e,t){var n=k(e.touches),o=k(e.changedTouches);return t&(R|W)&&(n=S(n.concat(o),"identifier",!0)),[n,o]}x(ye,te,{handler:function(e){var t=ge[e.type];if(t===N&&(this.started=!0),this.started){var n=be.call(this,e,t);t&(R|W)&&n[0].length-n[1].length===0&&(this.started=!1),this.callback(this.manager,t,{pointers:n[0],changedPointers:n[1],pointerType:D,srcEvent:e})}}});var xe={touchstart:N,touchmove:2,touchend:R,touchcancel:W},_e="touchstart touchmove touchend touchcancel";function Me(){this.evTarget=_e,this.targetIds={},te.apply(this,arguments)}function ze(e,t){var n=k(e.touches),o=this.targetIds;if(t&(2|N)&&1===n.length)return o[n[0].identifier]=!0,[n,n];var s,a,l=k(e.changedTouches),c=[],r=this.target;if(a=n.filter((function(e){return C(e.target,r)})),t===N)for(s=0;s-1&&o.splice(e,1)}),Be)}}function Oe(e){for(var t=e.srcEvent.clientX,n=e.srcEvent.clientY,o=0;o-1&&this.requireFail.splice(t,1),this},hasRequireFailures:function(){return this.requireFail.length>0},canRecognizeWith:function(e){return!!this.simultaneous[e.id]},emit:function(e){var t=this,n=this.state;function o(n){t.manager.emit(n,e)}n<8&&o(t.options.event+Re(n)),o(t.options.event),e.additionalEvent&&o(e.additionalEvent),n>=8&&o(t.options.event+Re(n))},tryEmit:function(e){if(this.canEmit())return this.emit(e);this.state=Fe},canEmit:function(){for(var e=0;et.threshold&&s&t.direction},attrTest:function(e){return Ye.prototype.attrTest.call(this,e)&&(2&this.state||!(2&this.state)&&this.directionTest(e))},emit:function(e){this.pX=e.deltaX,this.pY=e.deltaY;var t=We(e.direction);t&&(e.additionalEvent=this.options.event+t),this._super.emit.call(this,e)}}),x(Xe,Ye,{defaults:{event:"pinch",threshold:0,pointers:2},getTouchAction:function(){return[Pe]},attrTest:function(e){return this._super.attrTest.call(this,e)&&(Math.abs(e.scale-1)>this.options.threshold||2&this.state)},emit:function(e){if(1!==e.scale){var t=e.scale<1?"in":"out";e.additionalEvent=this.options.event+t}this._super.emit.call(this,e)}}),x(Ze,Ne,{defaults:{event:"press",pointers:1,time:251,threshold:9},getTouchAction:function(){return[Ae]},process:function(e){var t=this.options,n=e.pointers.length===t.pointers,o=e.distancet.time;if(this._input=e,!o||!n||e.eventType&(R|W)&&!s)this.reset();else if(e.eventType&N)this.reset(),this._timer=f((function(){this.state=8,this.tryEmit()}),t.time,this);else if(e.eventType&R)return 8;return Fe},reset:function(){clearTimeout(this._timer)},emit:function(e){8===this.state&&(e&&e.eventType&R?this.manager.emit(this.options.event+"up",e):(this._input.timeStamp=m(),this.manager.emit(this.options.event,this._input)))}}),x(Ge,Ye,{defaults:{event:"rotate",threshold:0,pointers:2},getTouchAction:function(){return[Pe]},attrTest:function(e){return this._super.attrTest.call(this,e)&&(Math.abs(e.rotation)>this.options.threshold||2&this.state)}}),x(Ke,Ye,{defaults:{event:"swipe",threshold:10,velocity:.3,direction:G|K,pointers:1},getTouchAction:function(){return Qe.prototype.getTouchAction.call(this)},attrTest:function(e){var t,n=this.options.direction;return n&(G|K)?t=e.overallVelocity:n&G?t=e.overallVelocityX:n&K&&(t=e.overallVelocityY),this._super.attrTest.call(this,e)&&n&e.offsetDirection&&e.distance>this.options.threshold&&e.maxPointers==this.options.pointers&&v(t)>this.options.velocity&&e.eventType&R},emit:function(e){var t=We(e.offsetDirection);t&&this.manager.emit(this.options.event+t,e),this.manager.emit(this.options.event,e)}}),x($e,Ne,{defaults:{event:"tap",pointers:1,taps:1,interval:300,time:250,threshold:9,posThreshold:10},getTouchAction:function(){return[Ee]},process:function(e){var t=this.options,n=e.pointers.length===t.pointers,o=e.distance{var o=n(38859),s=n(15325),a=n(29905),l=n(34932),c=n(27301),r=n(19219),i=Math.min;e.exports=function(e,t,n){for(var d=n?a:s,u=e[0].length,h=e.length,v=h,m=Array(h),f=1/0,w=[];v--;){var p=e[v];v&&t&&(p=l(p,c(t))),f=i(p.length,f),m[v]=!n&&(t||u>=120&&p.length>=120)?new o(v&&p):void 0}p=e[0];var g=-1,y=m[0];e:for(;++g{var o=n(83693);e.exports=function(e){return o(e)?e:[]}},5287:(e,t,n)=>{var o=n(34932),s=n(27185),a=n(69302),l=n(3122),c=a((function(e){var t=o(e,l);return t.length&&t[0]===e[0]?s(t):[]}));e.exports=c},47060:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"add_node",use:"add_node-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},18217:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"add_user",use:"add_user-usage",viewBox:"0 0 15 16",content:''});l().add(c);const r=c},34975:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"aggregation_avg",use:"aggregation_avg-usage",viewBox:"0 0 16 12",content:''});l().add(c);const r=c},52441:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"aggregation_max",use:"aggregation_max-usage",viewBox:"0 0 15 16",content:''});l().add(c);const r=c},72041:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"aggregation_med",use:"aggregation_med-usage",viewBox:"0 0 14 14",content:''});l().add(c);const r=c},60863:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"aggregation_min",use:"aggregation_min-usage",viewBox:"0 0 15 16",content:''});l().add(c);const r=c},30882:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"aggregation_sum",use:"aggregation_sum-usage",viewBox:"0 0 12 14",content:''});l().add(c);const r=c},17423:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"aggregation_sum_abs",use:"aggregation_sum_abs-usage",viewBox:"0 0 14 14",content:''});l().add(c);const r=c},38475:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"alarm",use:"alarm-usage",viewBox:"0 0 18 21",content:''});l().add(c);const r=c},26391:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"alarmFilled",use:"alarmFilled-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},71049:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"alarm_c",use:"alarm_c-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},29044:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"alarm_cw",use:"alarm_cw-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},20949:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"alarm_off",use:"alarm_off-usage",viewBox:"0 0 15 16",content:''});l().add(c);const r=c},58709:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"alarm_w",use:"alarm_w-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},34390:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"alarms_new",use:"alarms_new-usage",viewBox:"0 0 22 20",content:''});l().add(c);const r=c},24810:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"anomalies_brain",use:"anomalies_brain-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},97322:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"anomalies_lens",use:"anomalies_lens-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},51317:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"applications_hollow",use:"applications_hollow-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},79529:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"applications_solid",use:"applications_solid-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},86480:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"around_clock",use:"around_clock-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},52390:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"arrow-s_down",use:"arrow-s_down-usage",viewBox:"0 0 8 9",content:''});l().add(c);const r=c},70157:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"arrow-s_left",use:"arrow-s_left-usage",viewBox:"0 0 8 9",content:''});l().add(c);const r=c},98666:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"arrow_down",use:"arrow_down-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},78286:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"arrow_left",use:"arrow_left-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},81886:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"arrow_w_line_left",use:"arrow_w_line_left-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},38813:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"arrow_w_line_right",use:"arrow_w_line_right-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},87723:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"arrows_vertical",use:"arrows_vertical-usage",viewBox:"0 0 6 10",content:''});l().add(c);const r=c},59460:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"bookmark",use:"bookmark-usage",viewBox:"0 0 12 14",content:''});l().add(c);const r=c},25817:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"bullet_one",use:"bullet_one-usage",viewBox:"0 0 10 10",content:''});l().add(c);const r=c},78957:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"bullet_three",use:"bullet_three-usage",viewBox:"0 0 10 10",content:''});l().add(c);const r=c},48783:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"bullet_two",use:"bullet_two-usage",viewBox:"0 0 10 10",content:''});l().add(c);const r=c},36878:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"calendar_full",use:"calendar_full-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},62682:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"calendar_full_press",use:"calendar_full_press-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},45715:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"chart_added",use:"chart_added-usage",viewBox:"0 0 17 17",content:''});l().add(c);const r=c},14767:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"charts",use:"charts-usage",viewBox:"0 0 20 20",content:''});l().add(c);const r=c},68973:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"charts_view",use:"charts_view-usage",viewBox:"0 0 16 15",content:''});l().add(c);const r=c},91956:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"check",use:"check-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},3559:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"checkmark",use:"checkmark-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},87875:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"checkmark_partial_s",use:"checkmark_partial_s-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},79919:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"chevron_double",use:"chevron_double-usage",viewBox:"0 0 6 10",content:''});l().add(c);const r=c},66643:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"chevron_left",use:"chevron_left-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},8827:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"chevron_left_small",use:"chevron_left_small-usage",viewBox:"0 0 5 6",content:''});l().add(c);const r=c},39078:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"chevron_left_start",use:"chevron_left_start-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},49686:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"chevron_right",use:"chevron_right-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},49764:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"chevron_right_s",use:"chevron_right_s-usage",viewBox:"0 0 5 6",content:''});l().add(c);const r=c},89482:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"chevron_right_small",use:"chevron_right_small-usage",viewBox:"0 0 5 6",content:''});l().add(c);const r=c},93855:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"class_error",use:"class_error-usage",viewBox:"0 0 21 22",content:''});l().add(c);const r=c},53079:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"class_latency",use:"class_latency-usage",viewBox:"0 0 21 20",content:''});l().add(c);const r=c},65377:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"class_utilization",use:"class_utilization-usage",viewBox:"0 0 25 19",content:''});l().add(c);const r=c},77568:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"class_workload",use:"class_workload-usage",viewBox:"0 0 22 21",content:''});l().add(c);const r=c},34623:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"clock_5_min",use:"clock_5_min-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},63319:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"clock_5_min_press",use:"clock_5_min_press-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},51580:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"clock_hollow",use:"clock_hollow-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},85419:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"close_circle",use:"close_circle-usage",viewBox:"0 0 10 10",content:''});l().add(c);const r=c},81426:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"cluster",use:"cluster-usage",viewBox:"0 0 22 22",content:''});l().add(c);const r=c},68124:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"cluster_spaces",use:"cluster_spaces-usage",viewBox:"0 0 22 22",content:''});l().add(c);const r=c},86547:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"code",use:"code-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},42202:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"collect",use:"collect-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},48083:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"community",use:"community-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},58746:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"connection_to_cloud",use:"connection_to_cloud-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},4643:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"connectivity_status_live",use:"connectivity_status_live-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},21774:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"connectivity_status_offline",use:"connectivity_status_offline-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},43380:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"connectivity_status_stale",use:"connectivity_status_stale-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},26181:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"controller_kind",use:"controller_kind-usage",viewBox:"0 0 22 22",content:''});l().add(c);const r=c},6494:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"controller_name",use:"controller_name-usage",viewBox:"0 0 22 22",content:''});l().add(c);const r=c},66427:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"copy",use:"copy-usage",viewBox:"0 0 14 14",content:''});l().add(c);const r=c},16888:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"correlation",use:"correlation-usage",viewBox:"0 0 28 28",content:''});l().add(c);const r=c},74552:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"correlation_inv",use:"correlation_inv-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},66194:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"cpu",use:"cpu-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},14946:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"cross_s",use:"cross_s-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},68864:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"dashboard",use:"dashboard-usage",viewBox:"0 0 22 18",content:''});l().add(c);const r=c},84652:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"dashboard_add",use:"dashboard_add-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},8035:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"dashboards",use:"dashboards-usage",viewBox:"0 0 16 10",content:''});l().add(c);const r=c},85095:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"data_retention",use:"data_retention-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},47307:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"database",use:"database-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},20599:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"discovered_config",use:"discovered_config-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},98411:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"disk",use:"disk-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},79570:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"documentation",use:"documentation-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},79684:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"dots_2x3",use:"dots_2x3-usage",viewBox:"0 0 6 10",content:''});l().add(c);const r=c},4228:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"download",use:"download-usage",viewBox:"0 0 20 20",content:''});l().add(c);const r=c},39088:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"dynamic_config",use:"dynamic_config-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},99464:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"edit",use:"edit-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},37414:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"error",use:"error-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},91423:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"exclamation",use:"exclamation-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},89875:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"favorites",use:"favorites-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},47720:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"feed",use:"feed-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},72570:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"filter",use:"filter-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},60774:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"filterList",use:"filterList-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},9232:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"firewall_solid",use:"firewall_solid-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},3886:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"force_play",use:"force_play-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},24241:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"force_play_outline",use:"force_play_outline-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},22370:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"full_screen",use:"full_screen-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},41519:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"functions",use:"functions-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},58609:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"github",use:"github-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},10237:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"go_to_node",use:"go_to_node-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},78057:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"google",use:"google-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},7351:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"group_by",use:"group_by-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},24229:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"h1",use:"h1-usage",viewBox:"0 0 14 12",content:''});l().add(c);const r=c},25094:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"h2",use:"h2-usage",viewBox:"0 0 14 12",content:''});l().add(c);const r=c},89513:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"hamburger",use:"hamburger-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},41517:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"help",use:"help-usage",viewBox:"0 0 20 21",content:''});l().add(c);const r=c},27612:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"hide",use:"hide-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},5048:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"highlight_area",use:"highlight_area-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},11862:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"holder",use:"holder-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},949:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"importExport",use:"importExport-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},61898:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"incident_manager",use:"incident_manager-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},92949:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"information_press",use:"information_press-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},78131:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"insights",use:"insights-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},64197:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"integrations",use:"integrations-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},11336:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"aws_sns",use:"aws_sns-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},27555:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"aws_sns_colored",use:"aws_sns_colored-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},9080:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"discord",use:"discord-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},56851:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"discord_colored",use:"discord_colored-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},49232:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"email",use:"email-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},32824:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"email_colored",use:"email_colored-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},86294:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"mattermost",use:"mattermost-usage",viewBox:"0 0 700 700",content:''});l().add(c);const r=c},92605:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"mattermost_colored",use:"mattermost_colored-usage",viewBox:"0 0 700 700",content:''});l().add(c);const r=c},35813:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"mobile_app_colored",use:"mobile_app_colored-usage",viewBox:"0 0 25 24",content:''});l().add(c);const r=c},16912:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"opsgenie",use:"opsgenie-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},76107:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"opsgenie_colored",use:"opsgenie_colored-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},88909:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pagerduty",use:"pagerduty-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},45266:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pagerduty_colored",use:"pagerduty_colored-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},47217:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"push_notifications",use:"push_notifications-usage",viewBox:"0 0 22 22",content:''});l().add(c);const r=c},70762:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"rocketChat",use:"rocketChat-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},68081:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"rocketChat_colored",use:"rocketChat_colored-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},90728:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"slack",use:"slack-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},52547:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"slack_colored",use:"slack_colored-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},50199:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"splunk-black",use:"splunk-black-usage",viewBox:"0 0 122 36",content:'An information technology company based in California, United States'});l().add(c);const r=c},20002:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"teams",use:"teams-usage",viewBox:"0 0 2228.833 2073.333",content:''});l().add(c);const r=c},67977:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"teams_colored",use:"teams_colored-usage",viewBox:"0 0 2228.833 2073.333",content:''});l().add(c);const r=c},70009:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"telegram",use:"telegram-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},65550:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"telegram_colored",use:"telegram_colored-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},70647:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"victorOps",use:"victorOps-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},83424:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"victorOps_colored",use:"victorOps_colored-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},40483:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"webhook",use:"webhook-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},47676:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"webhook_colored",use:"webhook_colored-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},6232:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"internal_config",use:"internal_config-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},21489:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"ipNetworking",use:"ipNetworking-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},82118:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"ipNetworkingPress",use:"ipNetworkingPress-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},64739:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"last_week",use:"last_week-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},27703:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"line_chart",use:"line_chart-usage",viewBox:"0 0 15 15",content:''});l().add(c);const r=c},76415:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"logo_s",use:"logo_s-usage",viewBox:"0 0 14 13",content:''});l().add(c);const r=c},16933:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"logs",use:"logs-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},5820:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"long_arrow_up",use:"long_arrow_up-usage",viewBox:"0 0 10 14",content:''});l().add(c);const r=c},72085:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"magnify",use:"magnify-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},40235:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"metrics",use:"metrics-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},81199:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"metrics_explorer",use:"metrics_explorer-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},95546:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"minimize_s",use:"minimize_s-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},40852:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"mobile_push_notifications",use:"mobile_push_notifications-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},43902:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"mobile_push_notifications_hollow",use:"mobile_push_notifications_hollow-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},21674:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"monitoring",use:"monitoring-usage",viewBox:"0 0 20 20",content:''});l().add(c);const r=c},1279:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"more",use:"more-usage",viewBox:"0 0 18 4",content:''});l().add(c);const r=c},5057:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"nav_arrow_goto",use:"nav_arrow_goto-usage",viewBox:"0 0 10 10",content:''});l().add(c);const r=c},1498:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"nav_dots",use:"nav_dots-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},34111:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"netdata-press",use:"netdata-press-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},96317:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"netdata",use:"netdata-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},51991:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"netdataAssistant",use:"netdataAssistant-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},85903:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"networking_stack",use:"networking_stack-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},4344:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"node",use:"node-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},18647:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"node_child",use:"node_child-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},30983:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"node_default_l",use:"node_default_l-usage",viewBox:"0 0 40 40",content:''});l().add(c);const r=c},16410:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"node_hollow",use:"node_hollow-usage",viewBox:"0 0 22 12",content:''});l().add(c);const r=c},57013:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"node_import_export",use:"node_import_export-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},34031:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"node_notification_l",use:"node_notification_l-usage",viewBox:"0 0 40 40",content:''});l().add(c);const r=c},55703:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"node_parent",use:"node_parent-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},12501:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"node_selected_l",use:"node_selected_l-usage",viewBox:"0 0 40 40",content:''});l().add(c);const r=c},43947:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"nodes",use:"nodes-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},52059:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"nodes_hollow",use:"nodes_hollow-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},68705:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"nodes_update",use:"nodes_update-usage",viewBox:"0 0 40 40",content:''});l().add(c);const r=c},9150:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"none_selected",use:"none_selected-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},29921:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"notification",use:"notification-usage",viewBox:"0 0 40 24",content:''});l().add(c);const r=c},45753:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"notification_shortcut_disabled",use:"notification_shortcut_disabled-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},74480:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"notification_shortcut_enabled",use:"notification_shortcut_enabled-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},89336:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"notification_trigger",use:"notification_trigger-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},59631:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"okta",use:"okta-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},47627:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"openid",use:"openid-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},3599:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"alpine_linux",use:"alpine_linux-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},61386:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"amazon_linux",use:"amazon_linux-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},17506:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"arch_linux",use:"arch_linux-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},91008:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"celarOS",use:"celarOS-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},95173:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"centos",use:"centos-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},39850:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"centos_colored",use:"centos_colored-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},79662:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"coreOS",use:"coreOS-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},34106:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"debian",use:"debian-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},7297:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"debian_colored",use:"debian_colored-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},80370:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"fedora",use:"fedora-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},38194:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"freeBSD",use:"freeBSD-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},54349:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"gentoo",use:"gentoo-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},75873:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"linux",use:"linux-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},39862:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"linux_colored",use:"linux_colored-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},29474:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"linux_manjaro",use:"linux_manjaro-usage",viewBox:"0 0 14 14",content:''});l().add(c);const r=c},53328:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"macOSX",use:"macOSX-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},94403:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"oracle",use:"oracle-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},60188:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"oracle_colored",use:"oracle_colored-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},22463:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"os",use:"os-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},64612:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"os_press",use:"os_press-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},56025:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"raspbian",use:"raspbian-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},94064:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"red_hat",use:"red_hat-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},24934:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"suse_linux",use:"suse_linux-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},61248:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"ubuntu",use:"ubuntu-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},48635:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"ubuntu_colored",use:"ubuntu_colored-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},52874:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"padlock",use:"padlock-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},38141:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pause_outline",use:"pause_outline-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},26946:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pause_solid",use:"pause_solid-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},98108:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pencil_outline",use:"pencil_outline-usage",viewBox:"0 0 14 14",content:''});l().add(c);const r=c},87007:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pencil_solid",use:"pencil_solid-usage",viewBox:"0 0 19 19",content:''});l().add(c);const r=c},14167:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pie_chart_skeleton",use:"pie_chart_skeleton-usage",viewBox:"0 0 100 100",content:''});l().add(c);const r=c},64628:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pin_element",use:"pin_element-usage",viewBox:"0 0 14 14",content:''});l().add(c);const r=c},32365:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"play_outline",use:"play_outline-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},27506:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"play_solid",use:"play_solid-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},75116:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"plus",use:"plus-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},25376:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"plus_mini_s",use:"plus_mini_s-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},53085:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pod",use:"pod-usage",viewBox:"0 0 22 22",content:''});l().add(c);const r=c},46550:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pricing",use:"pricing-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},27129:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"print",use:"print-usage",viewBox:"0 0 21 20",content:''});l().add(c);const r=c},49164:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"privacy",use:"privacy-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},15471:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"qr_code",use:"qr_code-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},68905:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"qualityOfService_solid",use:"qualityOfService_solid-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},65024:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"question",use:"question-usage",viewBox:"0 0 20 20",content:''});l().add(c);const r=c},84024:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"questionFilled",use:"questionFilled-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},69460:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"ram",use:"ram-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},17026:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"reduce_size",use:"reduce_size-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},72693:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"refresh",use:"refresh-usage",viewBox:"0 0 18 19",content:''});l().add(c);const r=c},19227:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"reload",use:"reload-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},5323:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"remove_node",use:"remove_node-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},72293:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"resize_handler",use:"resize_handler-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},40484:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"rocket",use:"rocket-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},71061:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"room",use:"room-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},43671:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"room_home",use:"room_home-usage",viewBox:"0 0 14 12",content:''});l().add(c);const r=c},834:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"room_new",use:"room_new-usage",viewBox:"0 0 20 20",content:''});l().add(c);const r=c},70249:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"room_overview",use:"room_overview-usage",viewBox:"0 0 24 25",content:''});l().add(c);const r=c},88112:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"sad",use:"sad-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},27891:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"save",use:"save-usage",viewBox:"0 0 12 12",content:''});l().add(c);const r=c},16171:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"save2",use:"save2-usage",viewBox:"0 0 14 14",content:''});l().add(c);const r=c},68205:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"scheduled",use:"scheduled-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},67360:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"search",use:"search-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},47532:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"search_press",use:"search_press-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},15958:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"search_s",use:"search_s-usage",viewBox:"0 0 14 14",content:''});l().add(c);const r=c},6133:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"apache",use:"apache-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},21832:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"apache_tomcat",use:"apache_tomcat-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},19943:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"asterisk",use:"asterisk-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},47306:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"beanstalk",use:"beanstalk-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},78902:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"bind",use:"bind-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},28676:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"containerTech",use:"containerTech-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},89399:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"coreDNS",use:"coreDNS-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},99229:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"couchDB",use:"couchDB-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},97116:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"database",use:"database-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},77840:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"dns",use:"dns-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},49762:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"dnsmasq",use:"dnsmasq-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},44555:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"docker_hub",use:"docker_hub-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},31411:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"docker_hub_press",use:"docker_hub_press-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},5157:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"dotnet",use:"dotnet-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},31896:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"eBPF",use:"eBPF-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},62520:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"elasticSearch",use:"elasticSearch-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},14191:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"example",use:"example-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},50861:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"freeNAS",use:"freeNAS-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},13974:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"haProxy",use:"haProxy-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},51665:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"httpCheck",use:"httpCheck-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},66689:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"iceCast",use:"iceCast-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},74161:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"influxDB",use:"influxDB-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},26249:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"ipfs",use:"ipfs-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},51961:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"ipvs",use:"ipvs-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},6819:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"kubernetes",use:"kubernetes-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},70391:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"lighthttpd",use:"lighthttpd-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},55767:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"lighthttpd2",use:"lighthttpd2-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},72232:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"liteSpeed",use:"liteSpeed-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},65494:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"lxc",use:"lxc-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},55925:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"mariaDB",use:"mariaDB-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},10484:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"memCached",use:"memCached-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},35743:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"mongoDB",use:"mongoDB-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},51771:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"mySQL",use:"mySQL-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},95715:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"mySQL_press",use:"mySQL_press-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},12393:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"nginx",use:"nginx-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},76533:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"nginx_local",use:"nginx_local-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},24972:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"nginx_plus",use:"nginx_plus-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},61357:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"ntpd",use:"ntpd-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},43077:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"ntpd_press",use:"ntpd_press-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},88222:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"nvidia",use:"nvidia-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},8277:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"openStack",use:"openStack-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},51072:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"openWrt",use:"openWrt-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},78776:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pan",use:"pan-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},44212:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pandas",use:"pandas-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},8269:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"percona",use:"percona-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},4729:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"pfSense",use:"pfSense-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},66359:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"php_fpm",use:"php_fpm-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},46021:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"postgreSQL",use:"postgreSQL-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},63487:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"prometheus",use:"prometheus-usage",viewBox:"0 0 256 257",content:''});l().add(c);const r=c},79723:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"proxySQL",use:"proxySQL-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},81925:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"rabbitMQ",use:"rabbitMQ-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},22202:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"random",use:"random-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},17576:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"redis",use:"redis-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},60194:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"rethinkDB",use:"rethinkDB-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},85694:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"retroShare",use:"retroShare-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},25628:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"selected_area",use:"selected_area-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},2965:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"sendgrid",use:"sendgrid-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},13203:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"services",use:"services-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},34124:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"smartdlog",use:"smartdlog-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},13151:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"solr",use:"solr-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},52039:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"squid",use:"squid-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},15670:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"summary_statistic",use:"summary_statistic-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},20972:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"systemd",use:"systemd-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},99947:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"traefik",use:"traefik-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},86788:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"varnish",use:"varnish-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},73931:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"webLog",use:"webLog-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},75194:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"webLog_nginx",use:"webLog_nginx-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},84458:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"x509_check",use:"x509_check-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},26140:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"xen",use:"xen-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},83835:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"settings",use:"settings-usage",viewBox:"0 0 17 15",content:''});l().add(c);const r=c},13347:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"sign_in",use:"sign_in-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},13322:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"sort_indicator",use:"sort_indicator-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},84222:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"sorting_asc",use:"sorting_asc-usage",viewBox:"0 0 8 9",content:''});l().add(c);const r=c},35624:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"sorting_desc",use:"sorting_desc-usage",viewBox:"0 0 8 9",content:''});l().add(c);const r=c},5601:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"sorting_vertical",use:"sorting_vertical-usage",viewBox:"0 0 19 18",content:''});l().add(c);const r=c},44894:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"space",use:"space-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},79349:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"space_new",use:"space_new-usage",viewBox:"0 0 20 20",content:''});l().add(c);const r=c},20508:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"spaces_v2",use:"spaces_v2-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},73215:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"stock_config",use:"stock_config-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},37110:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"switch_off",use:"switch_off-usage",viewBox:"0 0 14 15",content:''});l().add(c);const r=c},67287:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"system_overview",use:"system_overview-usage",viewBox:"0 0 32 32",content:''});l().add(c);const r=c},99679:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"system_overview_press",use:"system_overview_press-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},97285:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"text_add",use:"text_add-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},24387:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"thumb_down",use:"thumb_down-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},54204:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"thumb_up",use:"thumb_up-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},46020:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"tiny_buttons",use:"tiny_buttons-usage",viewBox:"0 0 22 22",content:''});l().add(c);const r=c},87501:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"top",use:"top-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},134:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"training",use:"training-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},38860:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"trashcan",use:"trashcan-usage",viewBox:"0 0 14 15",content:''});l().add(c);const r=c},16168:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"triangle",use:"triangle-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},95436:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"triangle_down",use:"triangle_down-usage",viewBox:"0 0 10 5",content:''});l().add(c);const r=c},97701:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"universe",use:"universe-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},68008:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"unknownError",use:"unknownError-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},1806:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"unreachable",use:"unreachable-usage",viewBox:"0 0 12 14",content:''});l().add(c);const r=c},85994:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"unreachableNode",use:"unreachableNode-usage",viewBox:"0 0 231 230",content:''});l().add(c);const r=c},39191:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"update",use:"update-usage",viewBox:"0 0 20 20",content:''});l().add(c);const r=c},36419:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"update_pending",use:"update_pending-usage",viewBox:"0 0 20 20",content:''});l().add(c);const r=c},30807:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"upload",use:"upload-usage",viewBox:"0 0 20 21",content:''});l().add(c);const r=c},2381:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"user",use:"user-usage",viewBox:"0 0 16 18",content:''});l().add(c);const r=c},55370:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"user_config",use:"user_config-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},42149:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"user_press",use:"user_press-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},5060:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"users",use:"users-usage",viewBox:"0 0 14 14",content:''});l().add(c);const r=c},55766:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"view_list",use:"view_list-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},75444:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"single_node_view",use:"single_node_view-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},18672:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"single_node_view_press",use:"single_node_view_press-usage",viewBox:"0 0 18 18",content:''});l().add(c);const r=c},74271:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"virtualization",use:"virtualization-usage",viewBox:"0 0 16 16",content:''});l().add(c);const r=c},43728:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"warning",use:"warning-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},35655:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"warning_triangle",use:"warning_triangle-usage",viewBox:"0 0 12 10",content:''});l().add(c);const r=c},62791:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"weights_compare",use:"weights_compare-usage",viewBox:"0 0 14 12",content:''});l().add(c);const r=c},73324:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"weights_drill_down",use:"weights_drill_down-usage",viewBox:"0 0 14 14",content:''});l().add(c);const r=c},9932:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r});var o=n(12897),s=n.n(o),a=n(55042),l=n.n(a),c=new(s())({id:"x",use:"x-usage",viewBox:"0 0 24 24",content:''});l().add(c);const r=c},28892:(e,t,n)=>{"use strict";Object.defineProperty(t,Symbol.toStringTag,{value:"Module"});const o=n(96540),s=n(40961),a=n(13172);function l(e){const t=Object.create(null,{[Symbol.toStringTag]:{value:"Module"}});if(e)for(const n in e)if("default"!==n){const o=Object.getOwnPropertyDescriptor(e,n);Object.defineProperty(t,n,o.get?o:{enumerable:!0,get:()=>e[n]})}return t.default=e,Object.freeze(t)}const c=l(o),r="undefined"!==typeof document?c.useLayoutEffect:c.useEffect;function i(e){const t=c.useReducer((()=>({})),{})[1],n={...e,onChange:(n,o)=>{var a;o?s.flushSync(t):t(),null==(a=e.onChange)||a.call(e,n,o)}},[o]=c.useState((()=>new a.Virtualizer(n)));return o.setOptions(n),c.useEffect((()=>o._didMount()),[]),r((()=>o._willUpdate())),o}t.useVirtualizer=function(e){return i({observeElementRect:a.observeElementRect,observeElementOffset:a.observeElementOffset,scrollToFn:a.elementScroll,...e})},t.useWindowVirtualizer=function(e){return i({getScrollElement:()=>"undefined"!==typeof document?window:null,observeElementRect:a.observeWindowRect,observeElementOffset:a.observeWindowOffset,scrollToFn:a.windowScroll,initialOffset:"undefined"!==typeof document?window.scrollY:void 0,...e})},Object.keys(a).forEach((e=>{"default"===e||Object.prototype.hasOwnProperty.call(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:()=>a[e]})}))},13172:(e,t,n)=>{"use strict";Object.defineProperty(t,Symbol.toStringTag,{value:"Module"});const o=n(49023),s=e=>e,a=e=>{const t=Math.max(e.startIndex-e.overscan,0),n=Math.min(e.endIndex+e.overscan,e.count-1),o=[];for(let s=t;s<=n;s++)o.push(s);return o},l=(e,t,n)=>{if(null==t?void 0:t.borderBoxSize){const e=t.borderBoxSize[0];if(e){return Math.round(e[n.options.horizontal?"inlineSize":"blockSize"])}}return Math.round(e.getBoundingClientRect()[n.options.horizontal?"width":"height"])};const c=(e,t,n,o)=>{for(;e<=t;){const s=(e+t)/2|0,a=n(s);if(ao))return s;t=s-1}}return e>0?e-1:0};t.approxEqual=o.approxEqual,t.memo=o.memo,t.notUndefined=o.notUndefined,t.Virtualizer=class{constructor(e){this.unsubs=[],this.scrollElement=null,this.isScrolling=!1,this.isScrollingTimeoutId=null,this.scrollToIndexTimeoutId=null,this.measurementsCache=[],this.itemSizeCache=new Map,this.pendingMeasuredCacheIndexes=[],this.scrollDirection=null,this.scrollAdjustments=0,this.measureElementCache=new Map,this.observer=(()=>{let e=null;const t=()=>e||("undefined"!==typeof ResizeObserver?e=new ResizeObserver((e=>{e.forEach((e=>{this._measureElement(e.target,e)}))})):null);return{disconnect:()=>{var e;return null==(e=t())?void 0:e.disconnect()},observe:e=>{var n;return null==(n=t())?void 0:n.observe(e,{box:"border-box"})},unobserve:e=>{var n;return null==(n=t())?void 0:n.unobserve(e)}}})(),this.range=null,this.setOptions=e=>{Object.entries(e).forEach((([t,n])=>{"undefined"===typeof n&&delete e[t]})),this.options={debug:!1,initialOffset:0,overscan:1,paddingStart:0,paddingEnd:0,scrollPaddingStart:0,scrollPaddingEnd:0,horizontal:!1,getItemKey:s,rangeExtractor:a,onChange:()=>{},measureElement:l,initialRect:{width:0,height:0},scrollMargin:0,gap:0,scrollingDelay:150,indexAttribute:"data-index",initialMeasurementsCache:[],lanes:1,...e}},this.notify=e=>{var t,n;null==(n=(t=this.options).onChange)||n.call(t,this,e)},this.maybeNotify=o.memo((()=>(this.calculateRange(),[this.isScrolling,this.range?this.range.startIndex:null,this.range?this.range.endIndex:null])),(e=>{this.notify(e)}),{key:!1,debug:()=>this.options.debug,initialDeps:[this.isScrolling,this.range?this.range.startIndex:null,this.range?this.range.endIndex:null]}),this.cleanup=()=>{this.unsubs.filter(Boolean).forEach((e=>e())),this.unsubs=[],this.scrollElement=null},this._didMount=()=>(this.measureElementCache.forEach(this.observer.observe),()=>{this.observer.disconnect(),this.cleanup()}),this._willUpdate=()=>{const e=this.options.getScrollElement();this.scrollElement!==e&&(this.cleanup(),this.scrollElement=e,this._scrollToOffset(this.scrollOffset,{adjustments:void 0,behavior:void 0}),this.unsubs.push(this.options.observeElementRect(this,(e=>{this.scrollRect=e,this.maybeNotify()}))),this.unsubs.push(this.options.observeElementOffset(this,(e=>{this.scrollAdjustments=0,this.scrollOffset!==e&&(null!==this.isScrollingTimeoutId&&(clearTimeout(this.isScrollingTimeoutId),this.isScrollingTimeoutId=null),this.isScrolling=!0,this.scrollDirection=this.scrollOffset{this.isScrollingTimeoutId=null,this.isScrolling=!1,this.scrollDirection=null,this.maybeNotify()}),this.options.scrollingDelay))}))))},this.getSize=()=>this.scrollRect[this.options.horizontal?"width":"height"],this.memoOptions=o.memo((()=>[this.options.count,this.options.paddingStart,this.options.scrollMargin,this.options.getItemKey]),((e,t,n,o)=>(this.pendingMeasuredCacheIndexes=[],{count:e,paddingStart:t,scrollMargin:n,getItemKey:o})),{key:!1}),this.getFurthestMeasurement=(e,t)=>{const n=new Map,o=new Map;for(let s=t-1;s>=0;s--){const t=e[s];if(n.has(t.lane))continue;const a=o.get(t.lane);if(null==a||t.end>a.end?o.set(t.lane,t):t.ende.end===t.end?e.index-t.index:e.end-t.end))[0]:void 0},this.getMeasurements=o.memo((()=>[this.memoOptions(),this.itemSizeCache]),(({count:e,paddingStart:t,scrollMargin:n,getItemKey:o},s)=>{const a=this.pendingMeasuredCacheIndexes.length>0?Math.min(...this.pendingMeasuredCacheIndexes):0;this.pendingMeasuredCacheIndexes=[];const l=this.measurementsCache.slice(0,a);for(let c=a;cthis.options.debug}),this.calculateRange=o.memo((()=>[this.getMeasurements(),this.getSize(),this.scrollOffset]),((e,t,n)=>this.range=e.length>0&&t>0?function({measurements:e,outerSize:t,scrollOffset:n}){const o=e.length-1,s=t=>e[t].start,a=c(0,o,s,n);let l=a;for(;lthis.options.debug}),this.getIndexes=o.memo((()=>[this.options.rangeExtractor,this.calculateRange(),this.options.overscan,this.options.count]),((e,t,n,o)=>null===t?[]:e({...t,overscan:n,count:o})),{key:!1,debug:()=>this.options.debug}),this.indexFromElement=e=>{const t=this.options.indexAttribute,n=e.getAttribute(t);return n?parseInt(n,10):(console.warn(`Missing attribute name '${t}={index}' on measured element.`),-1)},this._measureElement=(e,t)=>{const n=this.measurementsCache[this.indexFromElement(e)];if(!n||!e.isConnected)return void this.measureElementCache.forEach(((t,n)=>{t===e&&(this.observer.unobserve(e),this.measureElementCache.delete(n))}));const o=this.measureElementCache.get(n.key);o!==e&&(o&&this.observer.unobserve(o),this.observer.observe(e),this.measureElementCache.set(n.key,e));const s=this.options.measureElement(e,t,this);this.resizeItem(n,s)},this.resizeItem=(e,t)=>{const n=t-(this.itemSizeCache.get(e.key)??e.size);0!==n&&(e.start{e&&this._measureElement(e,void 0)},this.getVirtualItems=o.memo((()=>[this.getIndexes(),this.getMeasurements()]),((e,t)=>{const n=[];for(let o=0,s=e.length;othis.options.debug}),this.getVirtualItemForOffset=e=>{const t=this.getMeasurements();return o.notUndefined(t[c(0,t.length-1,(e=>o.notUndefined(t[e]).start),e)])},this.getOffsetForAlignment=(e,t)=>{const n=this.getSize();"auto"===t&&(t=e<=this.scrollOffset?"start":e>=this.scrollOffset+n?"end":"start"),"start"===t||("end"===t?e-=n:"center"===t&&(e-=n/2));const o=this.options.horizontal?"scrollWidth":"scrollHeight",s=(this.scrollElement?"document"in this.scrollElement?this.scrollElement.document.documentElement[o]:this.scrollElement[o]:0)-this.getSize();return Math.max(Math.min(s,e),0)},this.getOffsetForIndex=(e,t="auto")=>{e=Math.max(0,Math.min(e,this.options.count-1));const n=o.notUndefined(this.getMeasurements()[e]);if("auto"===t)if(n.end>=this.scrollOffset+this.getSize()-this.options.scrollPaddingEnd)t="end";else{if(!(n.start<=this.scrollOffset+this.options.scrollPaddingStart))return[this.scrollOffset,t];t="start"}const s="end"===t?n.end+this.options.scrollPaddingEnd:n.start-this.options.scrollPaddingStart;return[this.getOffsetForAlignment(s,t),t]},this.isDynamicMode=()=>this.measureElementCache.size>0,this.cancelScrollToIndex=()=>{null!==this.scrollToIndexTimeoutId&&(clearTimeout(this.scrollToIndexTimeoutId),this.scrollToIndexTimeoutId=null)},this.scrollToOffset=(e,{align:t="start",behavior:n}={})=>{this.cancelScrollToIndex(),"smooth"===n&&this.isDynamicMode()&&console.warn("The `smooth` scroll behavior is not fully supported with dynamic size."),this._scrollToOffset(this.getOffsetForAlignment(e,t),{adjustments:void 0,behavior:n})},this.scrollToIndex=(e,{align:t="auto",behavior:n}={})=>{e=Math.max(0,Math.min(e,this.options.count-1)),this.cancelScrollToIndex(),"smooth"===n&&this.isDynamicMode()&&console.warn("The `smooth` scroll behavior is not fully supported with dynamic size.");const[s,a]=this.getOffsetForIndex(e,t);this._scrollToOffset(s,{adjustments:void 0,behavior:n}),"smooth"!==n&&this.isDynamicMode()&&(this.scrollToIndexTimeoutId=setTimeout((()=>{this.scrollToIndexTimeoutId=null;if(this.measureElementCache.has(this.options.getItemKey(e))){const[t]=this.getOffsetForIndex(e,a);o.approxEqual(t,this.scrollOffset)||this.scrollToIndex(e,{align:a,behavior:n})}else this.scrollToIndex(e,{align:a,behavior:n})})))},this.scrollBy=(e,{behavior:t}={})=>{this.cancelScrollToIndex(),"smooth"===t&&this.isDynamicMode()&&console.warn("The `smooth` scroll behavior is not fully supported with dynamic size."),this._scrollToOffset(this.scrollOffset+e,{adjustments:void 0,behavior:t})},this.getTotalSize=()=>{var e;const t=this.getMeasurements();let n;return n=0===t.length?this.options.paddingStart:1===this.options.lanes?(null==(e=t[t.length-1])?void 0:e.end)??0:Math.max(...t.slice(-this.options.lanes).map((e=>e.end))),n-this.options.scrollMargin+this.options.paddingEnd},this._scrollToOffset=(e,{adjustments:t,behavior:n})=>{this.options.scrollToFn(e,{behavior:n,adjustments:t},this)},this.measure=()=>{this.itemSizeCache=new Map,this.notify(!1)},this.setOptions(e),this.scrollRect=this.options.initialRect,this.scrollOffset=this.options.initialOffset,this.measurementsCache=this.options.initialMeasurementsCache,this.measurementsCache.forEach((e=>{this.itemSizeCache.set(e.key,e.size)})),this.maybeNotify()}},t.defaultKeyExtractor=s,t.defaultRangeExtractor=a,t.elementScroll=(e,{adjustments:t=0,behavior:n},o)=>{var s,a;const l=e+t;null==(a=null==(s=o.scrollElement)?void 0:s.scrollTo)||a.call(s,{[o.options.horizontal?"left":"top"]:l,behavior:n})},t.measureElement=l,t.observeElementOffset=(e,t)=>{const n=e.scrollElement;if(!n)return;const o=()=>{t(n[e.options.horizontal?"scrollLeft":"scrollTop"])};return o(),n.addEventListener("scroll",o,{passive:!0}),()=>{n.removeEventListener("scroll",o)}},t.observeElementRect=(e,t)=>{const n=e.scrollElement;if(!n)return;const o=e=>{const{width:n,height:o}=e;t({width:Math.round(n),height:Math.round(o)})};if(o(n.getBoundingClientRect()),"undefined"===typeof ResizeObserver)return()=>{};const s=new ResizeObserver((e=>{const t=e[0];if(null==t?void 0:t.borderBoxSize){const e=t.borderBoxSize[0];if(e)return void o({width:e.inlineSize,height:e.blockSize})}o(n.getBoundingClientRect())}));return s.observe(n,{box:"border-box"}),()=>{s.unobserve(n)}},t.observeWindowOffset=(e,t)=>{const n=e.scrollElement;if(!n)return;const o=()=>{t(n[e.options.horizontal?"scrollX":"scrollY"])};return o(),n.addEventListener("scroll",o,{passive:!0}),()=>{n.removeEventListener("scroll",o)}},t.observeWindowRect=(e,t)=>{const n=e.scrollElement;if(!n)return;const o=()=>{t({width:n.innerWidth,height:n.innerHeight})};return o(),n.addEventListener("resize",o,{passive:!0}),()=>{n.removeEventListener("resize",o)}},t.windowScroll=(e,{adjustments:t=0,behavior:n},o)=>{var s,a;const l=e+t;null==(a=null==(s=o.scrollElement)?void 0:s.scrollTo)||a.call(s,{[o.options.horizontal?"left":"top"]:l,behavior:n})}},49023:(e,t)=>{"use strict";Object.defineProperty(t,Symbol.toStringTag,{value:"Module"});t.approxEqual=(e,t)=>Math.abs(e-t)<1,t.memo=function(e,t,n){let o,s=n.initialDeps??[];return()=>{var a,l,c,r;let i;n.key&&(null==(a=n.debug)?void 0:a.call(n))&&(i=Date.now());const d=e();if(!(d.length!==s.length||d.some(((e,t)=>s[t]!==e))))return o;let u;if(s=d,n.key&&(null==(l=n.debug)?void 0:l.call(n))&&(u=Date.now()),o=t(...d),n.key&&(null==(c=n.debug)?void 0:c.call(n))){const e=Math.round(100*(Date.now()-i))/100,t=Math.round(100*(Date.now()-u))/100,o=t/16,s=(e,t)=>{for(e=String(e);e.length{"use strict";var o=n(66118),s=n(2642),a=n(58380),l=n(84929),c=n(16074),r=n(87991),i=n(87836),d=n(93001),u=n(2940);function h(e,t){return(0,i.A)(e,t*u.Cg)}function v(e,t){return(0,i.A)(e,t*u.s0)}var m=n(13999);function f(e,t){const n=7*t;return(0,m.f)(e,n)}var w=n(4883);function p(e,t){const n=3*t;return(0,w.P)(e,n)}function g(e,t){return(0,w.P)(e,12*t)}var y=n(77275),b=n(43924),x=n(10648),_=n(50502),M=n(17764),z=n(14123);function B(e,t,n){const o=(0,M.c)(e,t)/7;return(0,z.u)(n?.roundingMethod)(o)}var H=n(49858);function C(e,t,n){const o=(0,H.W)(e,t)/3;return(0,z.u)(n?.roundingMethod)(o)}var V=n(33607);function O(e){const t=(0,s.a)(e);return t.setMilliseconds(0),t}function L(e){const t=(0,s.a)(e);return t.setSeconds(0,0),t}function k(e){const t=(0,s.a)(e);return t.setMinutes(0,0,0),t}var S=n(51730),A=n(92528);function E(e){const t=(0,s.a)(e);return t.setDate(1),t.setHours(0,0,0,0),t}function P(e){const t=(0,s.a)(e),n=t.getMonth(),o=n-n%3;return t.setMonth(o,1),t.setHours(0,0,0,0),t}var T=n(39204);function j(e){const t=(0,s.a)(e);return t.setMilliseconds(999),t}function q(e){const t=(0,s.a)(e);return t.setSeconds(59,999),t}function D(e){const t=(0,s.a)(e);return t.setMinutes(59,59,999),t}var I=n(37519),F=n(82695);function N(e,t){const n=(0,F.q)(),o=t?.weekStartsOn??t?.locale?.options?.weekStartsOn??n.weekStartsOn??n.locale?.options?.weekStartsOn??0,a=(0,s.a)(e),l=a.getDay(),c=6+(l{"use strict";n.d(t,{H:()=>s});var o=n(2940);function s(e,t){const n=t?.additionalDigits??2,s=function(e){const t={},n=e.split(a.dateTimeDelimiter);let o;if(n.length>2)return t;/:/.test(n[0])?o=n[0]:(t.date=n[0],o=n[1],a.timeZoneDelimiter.test(t.date)&&(t.date=e.split(a.timeZoneDelimiter)[0],o=e.substr(t.date.length,e.length)));if(o){const e=a.timezone.exec(o);e?(t.time=o.replace(e[1],""),t.timezone=e[1]):t.time=o}return t}(e);let v;if(s.date){const e=function(e,t){const n=new RegExp("^(?:(\\d{4}|[+-]\\d{"+(4+t)+"})|(\\d{2}|[+-]\\d{"+(2+t)+"})$)"),o=e.match(n);if(!o)return{year:NaN,restDateString:""};const s=o[1]?parseInt(o[1]):null,a=o[2]?parseInt(o[2]):null;return{year:null===a?s:100*a,restDateString:e.slice((o[1]||o[2]).length)}}(s.date,n);v=function(e,t){if(null===t)return new Date(NaN);const n=e.match(l);if(!n)return new Date(NaN);const o=!!n[4],s=i(n[1]),a=i(n[2])-1,c=i(n[3]),r=i(n[4]),d=i(n[5])-1;if(o)return function(e,t,n){return t>=1&&t<=53&&n>=0&&n<=6}(0,r,d)?function(e,t,n){const o=new Date(0);o.setUTCFullYear(e,0,4);const s=o.getUTCDay()||7,a=7*(t-1)+n+1-s;return o.setUTCDate(o.getUTCDate()+a),o}(t,r,d):new Date(NaN);{const e=new Date(0);return function(e,t,n){return t>=0&&t<=11&&n>=1&&n<=(u[t]||(h(e)?29:28))}(t,a,c)&&function(e,t){return t>=1&&t<=(h(e)?366:365)}(t,s)?(e.setUTCFullYear(t,a,Math.max(s,c)),e):new Date(NaN)}}(e.restDateString,e.year)}if(!v||isNaN(v.getTime()))return new Date(NaN);const m=v.getTime();let f,w=0;if(s.time&&(w=function(e){const t=e.match(c);if(!t)return NaN;const n=d(t[1]),s=d(t[2]),a=d(t[3]);if(!function(e,t,n){if(24===e)return 0===t&&0===n;return n>=0&&n<60&&t>=0&&t<60&&e>=0&&e<25}(n,s,a))return NaN;return n*o.s0+s*o.Cg+1e3*a}(s.time),isNaN(w)))return new Date(NaN);if(!s.timezone){const e=new Date(m+w),t=new Date(0);return t.setFullYear(e.getUTCFullYear(),e.getUTCMonth(),e.getUTCDate()),t.setHours(e.getUTCHours(),e.getUTCMinutes(),e.getUTCSeconds(),e.getUTCMilliseconds()),t}return f=function(e){if("Z"===e)return 0;const t=e.match(r);if(!t)return 0;const n="+"===t[1]?-1:1,s=parseInt(t[2]),a=t[3]&&parseInt(t[3])||0;if(!function(e,t){return t>=0&&t<=59}(0,a))return NaN;return n*(s*o.s0+a*o.Cg)}(s.timezone),isNaN(f)?new Date(NaN):new Date(m+w+f)}const a={dateTimeDelimiter:/[T ]/,timeZoneDelimiter:/[Z ]/i,timezone:/([Z+-].*)$/},l=/^-?(?:(\d{3})|(\d{2})(?:-?(\d{2}))?|W(\d{2})(?:-?(\d{1}))?|)$/,c=/^(\d{2}(?:[.,]\d*)?)(?::?(\d{2}(?:[.,]\d*)?))?(?::?(\d{2}(?:[.,]\d*)?))?$/,r=/^([+-])(\d{2})(?::?(\d{2}))?$/;function i(e){return e?parseInt(e):1}function d(e){return e&&parseFloat(e.replace(",","."))||0}const u=[31,null,31,30,31,30,31,31,30,31,30,31];function h(e){return e%400===0||e%4===0&&e%100!==0}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/5709.c494eb62187917e2f2f6.chunk.js.LICENSE.txt b/src/web/gui/v2/5709.c494eb62187917e2f2f6.chunk.js.LICENSE.txt deleted file mode 100644 index 131763fa3..000000000 --- a/src/web/gui/v2/5709.c494eb62187917e2f2f6.chunk.js.LICENSE.txt +++ /dev/null @@ -1,12 +0,0 @@ -/*! -* chartjs-plugin-zoom v2.0.1 -* undefined - * (c) 2016-2023 chartjs-plugin-zoom Contributors - * Released under the MIT License - */ - -/*! Hammer.JS - v2.0.7 - 2016-04-22 - * http://hammerjs.github.io/ - * - * Copyright (c) 2016 Jorik Tangelder; - * Licensed under the MIT license */ diff --git a/src/web/gui/v2/5794.252ff787d58d64eb4988.chunk.js b/src/web/gui/v2/5794.252ff787d58d64eb4988.chunk.js deleted file mode 100644 index 1a31525ac..000000000 --- a/src/web/gui/v2/5794.252ff787d58d64eb4988.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var t="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},e=(new Error).stack;e&&(t._sentryDebugIds=t._sentryDebugIds||{},t._sentryDebugIds[e]="e1a60162-ea92-401a-9f7e-593012b55769",t._sentryDebugIdIdentifier="sentry-dbid-e1a60162-ea92-401a-9f7e-593012b55769")}catch(t){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[5794],{86027:(t,e,n)=>{Object.defineProperty(e,"__esModule",{value:!0}),Object.defineProperty(e,"DraggableCore",{enumerable:!0,get:function(){return d.default}}),e.default=void 0;var o=function(t,e){if(!e&&t&&t.__esModule)return t;if(null===t||"object"!==typeof t&&"function"!==typeof t)return{default:t};var n=p(e);if(n&&n.has(t))return n.get(t);var o={},r=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in t)if("default"!==a&&Object.prototype.hasOwnProperty.call(t,a)){var i=r?Object.getOwnPropertyDescriptor(t,a):null;i&&(i.get||i.set)?Object.defineProperty(o,a,i):o[a]=t[a]}o.default=t,n&&n.set(t,o);return o}(n(96540)),r=f(n(5556)),a=f(n(40961)),i=f(n(20053)),s=n(71089),l=n(81726),u=n(77056),d=f(n(6888)),c=f(n(18696));function f(t){return t&&t.__esModule?t:{default:t}}function p(t){if("function"!==typeof WeakMap)return null;var e=new WeakMap,n=new WeakMap;return(p=function(t){return t?n:e})(t)}function g(){return g=Object.assign?Object.assign.bind():function(t){for(var e=1;e{(0,c.default)("Draggable: onDragStart: %j",e);if(!1===this.props.onStart(t,(0,l.createDraggableData)(this,e)))return!1;this.setState({dragging:!0,dragged:!0})})),h(this,"onDrag",((t,e)=>{if(!this.state.dragging)return!1;(0,c.default)("Draggable: onDrag: %j",e);const n=(0,l.createDraggableData)(this,e),o={x:n.x,y:n.y,slackX:0,slackY:0};if(this.props.bounds){const{x:t,y:e}=o;o.x+=this.state.slackX,o.y+=this.state.slackY;const[r,a]=(0,l.getBoundPosition)(this,o.x,o.y);o.x=r,o.y=a,o.slackX=this.state.slackX+(t-o.x),o.slackY=this.state.slackY+(e-o.y),n.x=o.x,n.y=o.y,n.deltaX=o.x-this.state.x,n.deltaY=o.y-this.state.y}if(!1===this.props.onDrag(t,n))return!1;this.setState(o)})),h(this,"onDragStop",((t,e)=>{if(!this.state.dragging)return!1;if(!1===this.props.onStop(t,(0,l.createDraggableData)(this,e)))return!1;(0,c.default)("Draggable: onDragStop: %j",e);const n={dragging:!1,slackX:0,slackY:0};if(Boolean(this.props.position)){const{x:t,y:e}=this.props.position;n.x=t,n.y=e}this.setState(n)})),this.state={dragging:!1,dragged:!1,x:t.position?t.position.x:t.defaultPosition.x,y:t.position?t.position.y:t.defaultPosition.y,prevPropsPosition:{...t.position},slackX:0,slackY:0,isElementSVG:!1},!t.position||t.onDrag||t.onStop||console.warn("A `position` was applied to this , without drag handlers. This will make this component effectively undraggable. Please attach `onDrag` or `onStop` handlers so you can adjust the `position` of this element.")}componentDidMount(){"undefined"!==typeof window.SVGElement&&this.findDOMNode()instanceof window.SVGElement&&this.setState({isElementSVG:!0})}componentWillUnmount(){this.setState({dragging:!1})}findDOMNode(){var t,e;return null!==(t=null===(e=this.props)||void 0===e||null===(e=e.nodeRef)||void 0===e?void 0:e.current)&&void 0!==t?t:a.default.findDOMNode(this)}render(){const{axis:t,bounds:e,children:n,defaultPosition:r,defaultClassName:a,defaultClassNameDragging:u,defaultClassNameDragged:c,position:f,positionOffset:p,scale:h,...m}=this.props;let y={},b=null;const v=!Boolean(f)||this.state.dragging,D=f||r,S={x:(0,l.canDragX)(this)&&v?this.state.x:D.x,y:(0,l.canDragY)(this)&&v?this.state.y:D.y};this.state.isElementSVG?b=(0,s.createSVGTransform)(S,p):y=(0,s.createCSSTransform)(S,p);const w=(0,i.default)(n.props.className||"",a,{[u]:this.state.dragging,[c]:this.state.dragged});return o.createElement(d.default,g({},m,{onStart:this.onDragStart,onDrag:this.onDrag,onStop:this.onDragStop}),o.cloneElement(o.Children.only(n),{className:w,style:{...n.props.style,...y},transform:b}))}}e.default=m,h(m,"displayName","Draggable"),h(m,"propTypes",{...d.default.propTypes,axis:r.default.oneOf(["both","x","y","none"]),bounds:r.default.oneOfType([r.default.shape({left:r.default.number,right:r.default.number,top:r.default.number,bottom:r.default.number}),r.default.string,r.default.oneOf([!1])]),defaultClassName:r.default.string,defaultClassNameDragging:r.default.string,defaultClassNameDragged:r.default.string,defaultPosition:r.default.shape({x:r.default.number,y:r.default.number}),positionOffset:r.default.shape({x:r.default.oneOfType([r.default.number,r.default.string]),y:r.default.oneOfType([r.default.number,r.default.string])}),position:r.default.shape({x:r.default.number,y:r.default.number}),className:u.dontSetMe,style:u.dontSetMe,transform:u.dontSetMe}),h(m,"defaultProps",{...d.default.defaultProps,axis:"both",bounds:!1,defaultClassName:"react-draggable",defaultClassNameDragging:"react-draggable-dragging",defaultClassNameDragged:"react-draggable-dragged",defaultPosition:{x:0,y:0},scale:1})},6888:(t,e,n)=>{Object.defineProperty(e,"__esModule",{value:!0}),e.default=void 0;var o=function(t,e){if(!e&&t&&t.__esModule)return t;if(null===t||"object"!==typeof t&&"function"!==typeof t)return{default:t};var n=c(e);if(n&&n.has(t))return n.get(t);var o={},r=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in t)if("default"!==a&&Object.prototype.hasOwnProperty.call(t,a)){var i=r?Object.getOwnPropertyDescriptor(t,a):null;i&&(i.get||i.set)?Object.defineProperty(o,a,i):o[a]=t[a]}o.default=t,n&&n.set(t,o);return o}(n(96540)),r=d(n(5556)),a=d(n(40961)),i=n(71089),s=n(81726),l=n(77056),u=d(n(18696));function d(t){return t&&t.__esModule?t:{default:t}}function c(t){if("function"!==typeof WeakMap)return null;var e=new WeakMap,n=new WeakMap;return(c=function(t){return t?n:e})(t)}function f(t,e,n){return(e=function(t){var e=function(t,e){if("object"!==typeof t||null===t)return t;var n=t[Symbol.toPrimitive];if(void 0!==n){var o=n.call(t,e||"default");if("object"!==typeof o)return o;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===e?String:Number)(t)}(t,"string");return"symbol"===typeof e?e:String(e)}(e))in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}const p={start:"touchstart",move:"touchmove",stop:"touchend"},g={start:"mousedown",move:"mousemove",stop:"mouseup"};let h=g;class m extends o.Component{constructor(){super(...arguments),f(this,"dragging",!1),f(this,"lastX",NaN),f(this,"lastY",NaN),f(this,"touchIdentifier",null),f(this,"mounted",!1),f(this,"handleDragStart",(t=>{if(this.props.onMouseDown(t),!this.props.allowAnyClick&&"number"===typeof t.button&&0!==t.button)return!1;const e=this.findDOMNode();if(!e||!e.ownerDocument||!e.ownerDocument.body)throw new Error(" not mounted on DragStart!");const{ownerDocument:n}=e;if(this.props.disabled||!(t.target instanceof n.defaultView.Node)||this.props.handle&&!(0,i.matchesSelectorAndParentsTo)(t.target,this.props.handle,e)||this.props.cancel&&(0,i.matchesSelectorAndParentsTo)(t.target,this.props.cancel,e))return;"touchstart"===t.type&&t.preventDefault();const o=(0,i.getTouchIdentifier)(t);this.touchIdentifier=o;const r=(0,s.getControlPosition)(t,o,this);if(null==r)return;const{x:a,y:l}=r,d=(0,s.createCoreData)(this,a,l);(0,u.default)("DraggableCore: handleDragStart: %j",d),(0,u.default)("calling",this.props.onStart);!1!==this.props.onStart(t,d)&&!1!==this.mounted&&(this.props.enableUserSelectHack&&(0,i.addUserSelectStyles)(n),this.dragging=!0,this.lastX=a,this.lastY=l,(0,i.addEvent)(n,h.move,this.handleDrag),(0,i.addEvent)(n,h.stop,this.handleDragStop))})),f(this,"handleDrag",(t=>{const e=(0,s.getControlPosition)(t,this.touchIdentifier,this);if(null==e)return;let{x:n,y:o}=e;if(Array.isArray(this.props.grid)){let t=n-this.lastX,e=o-this.lastY;if([t,e]=(0,s.snapToGrid)(this.props.grid,t,e),!t&&!e)return;n=this.lastX+t,o=this.lastY+e}const r=(0,s.createCoreData)(this,n,o);(0,u.default)("DraggableCore: handleDrag: %j",r);if(!1!==this.props.onDrag(t,r)&&!1!==this.mounted)this.lastX=n,this.lastY=o;else try{this.handleDragStop(new MouseEvent("mouseup"))}catch(a){const t=document.createEvent("MouseEvents");t.initMouseEvent("mouseup",!0,!0,window,0,0,0,0,0,!1,!1,!1,!1,0,null),this.handleDragStop(t)}})),f(this,"handleDragStop",(t=>{if(!this.dragging)return;const e=(0,s.getControlPosition)(t,this.touchIdentifier,this);if(null==e)return;let{x:n,y:o}=e;if(Array.isArray(this.props.grid)){let t=n-this.lastX||0,e=o-this.lastY||0;[t,e]=(0,s.snapToGrid)(this.props.grid,t,e),n=this.lastX+t,o=this.lastY+e}const r=(0,s.createCoreData)(this,n,o);if(!1===this.props.onStop(t,r)||!1===this.mounted)return!1;const a=this.findDOMNode();a&&this.props.enableUserSelectHack&&(0,i.removeUserSelectStyles)(a.ownerDocument),(0,u.default)("DraggableCore: handleDragStop: %j",r),this.dragging=!1,this.lastX=NaN,this.lastY=NaN,a&&((0,u.default)("DraggableCore: Removing handlers"),(0,i.removeEvent)(a.ownerDocument,h.move,this.handleDrag),(0,i.removeEvent)(a.ownerDocument,h.stop,this.handleDragStop))})),f(this,"onMouseDown",(t=>(h=g,this.handleDragStart(t)))),f(this,"onMouseUp",(t=>(h=g,this.handleDragStop(t)))),f(this,"onTouchStart",(t=>(h=p,this.handleDragStart(t)))),f(this,"onTouchEnd",(t=>(h=p,this.handleDragStop(t))))}componentDidMount(){this.mounted=!0;const t=this.findDOMNode();t&&(0,i.addEvent)(t,p.start,this.onTouchStart,{passive:!1})}componentWillUnmount(){this.mounted=!1;const t=this.findDOMNode();if(t){const{ownerDocument:e}=t;(0,i.removeEvent)(e,g.move,this.handleDrag),(0,i.removeEvent)(e,p.move,this.handleDrag),(0,i.removeEvent)(e,g.stop,this.handleDragStop),(0,i.removeEvent)(e,p.stop,this.handleDragStop),(0,i.removeEvent)(t,p.start,this.onTouchStart,{passive:!1}),this.props.enableUserSelectHack&&(0,i.removeUserSelectStyles)(e)}}findDOMNode(){var t,e;return null!==(t=this.props)&&void 0!==t&&t.nodeRef?null===(e=this.props)||void 0===e||null===(e=e.nodeRef)||void 0===e?void 0:e.current:a.default.findDOMNode(this)}render(){return o.cloneElement(o.Children.only(this.props.children),{onMouseDown:this.onMouseDown,onMouseUp:this.onMouseUp,onTouchEnd:this.onTouchEnd})}}e.default=m,f(m,"displayName","DraggableCore"),f(m,"propTypes",{allowAnyClick:r.default.bool,children:r.default.node.isRequired,disabled:r.default.bool,enableUserSelectHack:r.default.bool,offsetParent:function(t,e){if(t[e]&&1!==t[e].nodeType)throw new Error("Draggable's offsetParent must be a DOM Node.")},grid:r.default.arrayOf(r.default.number),handle:r.default.string,cancel:r.default.string,nodeRef:r.default.object,onStart:r.default.func,onDrag:r.default.func,onStop:r.default.func,onMouseDown:r.default.func,scale:r.default.number,className:l.dontSetMe,style:l.dontSetMe,transform:l.dontSetMe}),f(m,"defaultProps",{allowAnyClick:!1,disabled:!1,enableUserSelectHack:!0,onStart:function(){},onDrag:function(){},onStop:function(){},onMouseDown:function(){},scale:1})},55794:(t,e,n)=>{const{default:o,DraggableCore:r}=n(86027);t.exports=o,t.exports.default=o,t.exports.DraggableCore=r},71089:(t,e,n)=>{Object.defineProperty(e,"__esModule",{value:!0}),e.addClassName=u,e.addEvent=function(t,e,n,o){if(!t)return;const r={capture:!0,...o};t.addEventListener?t.addEventListener(e,n,r):t.attachEvent?t.attachEvent("on"+e,n):t["on"+e]=n},e.addUserSelectStyles=function(t){if(!t)return;let e=t.getElementById("react-draggable-style-el");e||(e=t.createElement("style"),e.type="text/css",e.id="react-draggable-style-el",e.innerHTML=".react-draggable-transparent-selection *::-moz-selection {all: inherit;}\n",e.innerHTML+=".react-draggable-transparent-selection *::selection {all: inherit;}\n",t.getElementsByTagName("head")[0].appendChild(e));t.body&&u(t.body,"react-draggable-transparent-selection")},e.createCSSTransform=function(t,e){const n=l(t,e,"px");return{[(0,r.browserPrefixToKey)("transform",r.default)]:n}},e.createSVGTransform=function(t,e){return l(t,e,"")},e.getTouch=function(t,e){return t.targetTouches&&(0,o.findInArray)(t.targetTouches,(t=>e===t.identifier))||t.changedTouches&&(0,o.findInArray)(t.changedTouches,(t=>e===t.identifier))},e.getTouchIdentifier=function(t){if(t.targetTouches&&t.targetTouches[0])return t.targetTouches[0].identifier;if(t.changedTouches&&t.changedTouches[0])return t.changedTouches[0].identifier},e.getTranslation=l,e.innerHeight=function(t){let e=t.clientHeight;const n=t.ownerDocument.defaultView.getComputedStyle(t);return e-=(0,o.int)(n.paddingTop),e-=(0,o.int)(n.paddingBottom),e},e.innerWidth=function(t){let e=t.clientWidth;const n=t.ownerDocument.defaultView.getComputedStyle(t);return e-=(0,o.int)(n.paddingLeft),e-=(0,o.int)(n.paddingRight),e},e.matchesSelector=s,e.matchesSelectorAndParentsTo=function(t,e,n){let o=t;do{if(s(o,e))return!0;if(o===n)return!1;o=o.parentNode}while(o);return!1},e.offsetXYFromParent=function(t,e,n){const o=e===e.ownerDocument.body?{left:0,top:0}:e.getBoundingClientRect(),r=(t.clientX+e.scrollLeft-o.left)/n,a=(t.clientY+e.scrollTop-o.top)/n;return{x:r,y:a}},e.outerHeight=function(t){let e=t.clientHeight;const n=t.ownerDocument.defaultView.getComputedStyle(t);return e+=(0,o.int)(n.borderTopWidth),e+=(0,o.int)(n.borderBottomWidth),e},e.outerWidth=function(t){let e=t.clientWidth;const n=t.ownerDocument.defaultView.getComputedStyle(t);return e+=(0,o.int)(n.borderLeftWidth),e+=(0,o.int)(n.borderRightWidth),e},e.removeClassName=d,e.removeEvent=function(t,e,n,o){if(!t)return;const r={capture:!0,...o};t.removeEventListener?t.removeEventListener(e,n,r):t.detachEvent?t.detachEvent("on"+e,n):t["on"+e]=null},e.removeUserSelectStyles=function(t){if(!t)return;try{if(t.body&&d(t.body,"react-draggable-transparent-selection"),t.selection)t.selection.empty();else{const e=(t.defaultView||window).getSelection();e&&"Caret"!==e.type&&e.removeAllRanges()}}catch(e){}};var o=n(77056),r=function(t,e){if(!e&&t&&t.__esModule)return t;if(null===t||"object"!==typeof t&&"function"!==typeof t)return{default:t};var n=a(e);if(n&&n.has(t))return n.get(t);var o={},r=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var i in t)if("default"!==i&&Object.prototype.hasOwnProperty.call(t,i)){var s=r?Object.getOwnPropertyDescriptor(t,i):null;s&&(s.get||s.set)?Object.defineProperty(o,i,s):o[i]=t[i]}o.default=t,n&&n.set(t,o);return o}(n(33514));function a(t){if("function"!==typeof WeakMap)return null;var e=new WeakMap,n=new WeakMap;return(a=function(t){return t?n:e})(t)}let i="";function s(t,e){return i||(i=(0,o.findInArray)(["matches","webkitMatchesSelector","mozMatchesSelector","msMatchesSelector","oMatchesSelector"],(function(e){return(0,o.isFunction)(t[e])}))),!!(0,o.isFunction)(t[i])&&t[i](e)}function l(t,e,n){let{x:o,y:r}=t,a="translate(".concat(o).concat(n,",").concat(r).concat(n,")");if(e){const t="".concat("string"===typeof e.x?e.x:e.x+n),o="".concat("string"===typeof e.y?e.y:e.y+n);a="translate(".concat(t,", ").concat(o,")")+a}return a}function u(t,e){t.classList?t.classList.add(e):t.className.match(new RegExp("(?:^|\\s)".concat(e,"(?!\\S)")))||(t.className+=" ".concat(e))}function d(t,e){t.classList?t.classList.remove(e):t.className=t.className.replace(new RegExp("(?:^|\\s)".concat(e,"(?!\\S)"),"g"),"")}},33514:(t,e)=>{Object.defineProperty(e,"__esModule",{value:!0}),e.browserPrefixToKey=r,e.browserPrefixToStyle=function(t,e){return e?"-".concat(e.toLowerCase(),"-").concat(t):t},e.default=void 0,e.getPrefix=o;const n=["Moz","Webkit","O","ms"];function o(){var t;let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"transform";if("undefined"===typeof window)return"";const o=null===(t=window.document)||void 0===t||null===(t=t.documentElement)||void 0===t?void 0:t.style;if(!o)return"";if(e in o)return"";for(let a=0;a{Object.defineProperty(e,"__esModule",{value:!0}),e.default=function(){0}},81726:(t,e,n)=>{Object.defineProperty(e,"__esModule",{value:!0}),e.canDragX=function(t){return"both"===t.props.axis||"x"===t.props.axis},e.canDragY=function(t){return"both"===t.props.axis||"y"===t.props.axis},e.createCoreData=function(t,e,n){const r=!(0,o.isNum)(t.lastX),i=a(t);return r?{node:i,deltaX:0,deltaY:0,lastX:e,lastY:n,x:e,y:n}:{node:i,deltaX:e-t.lastX,deltaY:n-t.lastY,lastX:t.lastX,lastY:t.lastY,x:e,y:n}},e.createDraggableData=function(t,e){const n=t.props.scale;return{node:e.node,x:t.state.x+e.deltaX/n,y:t.state.y+e.deltaY/n,deltaX:e.deltaX/n,deltaY:e.deltaY/n,lastX:t.state.x,lastY:t.state.y}},e.getBoundPosition=function(t,e,n){if(!t.props.bounds)return[e,n];let{bounds:i}=t.props;i="string"===typeof i?i:function(t){return{left:t.left,top:t.top,right:t.right,bottom:t.bottom}}(i);const s=a(t);if("string"===typeof i){const{ownerDocument:t}=s,e=t.defaultView;let n;if(n="parent"===i?s.parentNode:t.querySelector(i),!(n instanceof e.HTMLElement))throw new Error('Bounds selector "'+i+'" could not find an element.');const a=n,l=e.getComputedStyle(s),u=e.getComputedStyle(a);i={left:-s.offsetLeft+(0,o.int)(u.paddingLeft)+(0,o.int)(l.marginLeft),top:-s.offsetTop+(0,o.int)(u.paddingTop)+(0,o.int)(l.marginTop),right:(0,r.innerWidth)(a)-(0,r.outerWidth)(s)-s.offsetLeft+(0,o.int)(u.paddingRight)-(0,o.int)(l.marginRight),bottom:(0,r.innerHeight)(a)-(0,r.outerHeight)(s)-s.offsetTop+(0,o.int)(u.paddingBottom)-(0,o.int)(l.marginBottom)}}(0,o.isNum)(i.right)&&(e=Math.min(e,i.right));(0,o.isNum)(i.bottom)&&(n=Math.min(n,i.bottom));(0,o.isNum)(i.left)&&(e=Math.max(e,i.left));(0,o.isNum)(i.top)&&(n=Math.max(n,i.top));return[e,n]},e.getControlPosition=function(t,e,n){const o="number"===typeof e?(0,r.getTouch)(t,e):null;if("number"===typeof e&&!o)return null;const i=a(n),s=n.props.offsetParent||i.offsetParent||i.ownerDocument.body;return(0,r.offsetXYFromParent)(o||t,s,n.props.scale)},e.snapToGrid=function(t,e,n){const o=Math.round(e/t[0])*t[0],r=Math.round(n/t[1])*t[1];return[o,r]};var o=n(77056),r=n(71089);function a(t){const e=t.findDOMNode();if(!e)throw new Error(": Unmounted during event!");return e}},77056:(t,e)=>{Object.defineProperty(e,"__esModule",{value:!0}),e.dontSetMe=function(t,e,n){if(t[e])return new Error("Invalid prop ".concat(e," passed to ").concat(n," - do not set this, set it on the child."))},e.findInArray=function(t,e){for(let n=0,o=t.length;n{r.d(t,{A:()=>ce});var n=r(58168),o=r(96540),a=(r(62953),r(8711)),i=r(83199),d=r(86652),c=r(38257),s=r(45463),l=r(62302),u=(r(17333),r(3064),r(41393),r(14905),r(98992),r(54520),r(72577),r(81454),r(8872),r(45467)),g=r(65570),m=r(40961),h=r(43375),p=r(43627),f=r(34604),b=r(44554),I=r(42629),v=r(74979),y=r(13692);const x=e=>(0,p.uU)({...e,wasDragging:!0}),E=e=>{var t;let{id:r,items:a,containerWidth:i,rowHeight:d,gutter:c,Component:s,isSortingContainer:l,onRemove:u,onItemRemove:g,Item:m,gridTracks:h,transforming:f,containerDndProps:I,itemProps:E,dragOverlay:C,rootId:w,...k}=e;const{active:A,attributes:S,isDragging:D,listeners:P,over:R,setNodeRef:_,transition:T,transform:O}=(0,p.gl)({id:C?"dragOverlay-".concat(r):r,disabled:C,data:{rootId:w,isContainer:!0,items:a,containerId:r,dropArea:!0,...I},animateLayoutChanges:x}),B=!!R&&(r===R.id&&"container"!==(null===A||void 0===A||null===(t=A.data.current)||void 0===t?void 0:t.type)||a.includes(R.id)),H=(0,b.Hp)(a),N=H*d+(H-1)*c+"px";return o.createElement(s,(0,n.A)({ref:_,style:C?{width:i,height:N}:{transition:T,transform:v.Ks.Translate.toString(O),opacity:D?.5:void 0,width:i,height:N},hover:B,onRemove:u,attributes:S,listeners:P},k,{itemProps:E,id:r,dragOverlay:C}),a.map(((e,t)=>o.createElement(y.A,(0,n.A)({disabled:l,key:e.id},e,{index:t,containerId:r,Item:m,onRemove:g,draggable:!0,containerWidth:i,rowHeight:d,gridTracks:h,gutter:c,transformed:f?a[t]:null,itemProps:E,containerDragOverlay:C,rootId:w,containerDndProps:I,draggableProps:e})))))},C=e=>{let{containers:t,Container:r,containerWidth:n,isSortingContainer:a,items:i,Item:d,transforming:c,onContainerRemove:s,onRemove:l,rowHeight:u,gridTracks:g,gutter:m,containerDndProps:h,itemProps:p,rootId:f}=e;return t.map((e=>o.createElement(E,{key:e,id:e,items:i[e],onRemove:()=>s(e),containerWidth:n,rowHeight:u,gridTracks:g,gutter:m,Component:r,Item:d,isSortingContainer:a,transforming:c,onItemRemove:l,containerDndProps:h,itemProps:p,rootId:f})))},w={sideEffects(e){let{active:t}=e;t.node.animate([{opacity:0},{opacity:1}],{easing:"ease-in",duration:250})}},k="TRASH_ID",A="PLACEHOLDER_ID",S=e=>{let{id:t}=e;const{setNodeRef:r,isOver:n}=(0,h.zM)({id:t,data:{dropArea:!0,isTrash:!0}});return o.createElement("div",{ref:r,style:{display:"flex",alignItems:"center",justifyContent:"center",position:"fixed",left:"50%",marginLeft:-150,bottom:20,width:300,height:60,borderRadius:5,border:"1px solid",borderColor:n?"red":"#DDD"}},"Drop here to delete")},D=e=>{let{rootId:t,items:r,containers:a,trashable:i=!0,onRemove:d,Item:c,containerWidth:s=800,rowHeight:l=40,gridTracks:v=12,gutter:x=4,allowOverlap:D=!1,compactType:P="vertical",maxRows:R=3e5,Container:_=f.A,onDragEnd:T,containerDndProps:O,itemProps:B,updateOnNewProps:H=!1,rearrangeable:N=!0}=e;const{active:L}=(0,h.fF)(),F=(0,o.useRef)(null),[W,M]=(0,o.useState)((()=>(a||Object.keys(r)).reduce(((e,t)=>(e[t]=(0,b.oE)((0,b.Su)(r[t]),P,v),e)),{})),{}),[j,z]=(0,o.useState)((()=>a||Object.keys(W)));(0,u.A)((()=>{M((a||Object.keys(r)).reduce(((e,t)=>(e[t]=(0,b.oE)((0,b.Su)(r[t]),P,v),e)),{}))}),[v]),(0,u.A)((()=>{H&&((0,g.Ay)(r,W)||(M((a||Object.keys(r)).reduce(((e,t)=>(e[t]=(0,b.oE)((0,b.Su)(r[t]),P,v),e)),{})),z(a)))}),[a,r]);const G=null===L||void 0===L?void 0:L.isContainer,[U,q]=(0,o.useState)(null),[J,Z]=(0,o.useState)(null),$=(0,o.useRef)(null),K=e=>e in W?e:null,Q=()=>{U&&M(U),q(null),Z(null),$.current=null},Y=()=>{const e=Object.keys(W),t=e[e.length-1];return String.fromCharCode(t.charCodeAt(0)+1)},V=e=>{let{active:t}=e;const{isContainer:r}=t.data.current;q(W),r||Z(W)};return(0,o.useLayoutEffect)((()=>{if(L)return V({active:L}),Q}),[]),(0,h.E5)({onDragStart:V,onDragMove:e=>{let{active:t,over:r,collisions:n}=e;const{isResizer:o,isContainer:a,itemId:i,itemContainerId:d}=t.data.current;if(a||!J)return;const{initial:c,translated:u}=t.rect.current;let g=(0,b.Su)(J[d]);const m=!J[d],h=(0,b.GN)(m?J:g,i,m)||(0,b.lc)(t.data.current);if(!h||!h.width)return;const p=null===r||void 0===r?void 0:r.id;if(o){if(!p)return;const e=r.rect.width+(u.right-c.right),t=r.rect.height+(u.bottom-c.bottom);let{width:o,height:a}=(0,I.tr)({gutter:x,maxRows:R,gridTracks:v,rowHeight:l,containerWidth:s},e,t,h);if(h.width===o&&h.height===a)return;if(!n)return;return g=g.map((e=>e.id===h.id?{...h,width:o,height:a}:e)),n=n.filter((e=>e.id!==h.id)),g=(0,b.oE)(g,P,v),$.current={...h,itemContainerId:d},void Z((e=>({...e,[d]:[...g]})))}if(p===k)return void Z(U);if(!N)return;const f=K(d),y=K(p)||r&&K(r.data.current.itemContainerId)||f;if(!y)return;if(y&&y===f||(g=[...J[y],h]),!n)return;const E=n.find((e=>e.id===y));if(!E)return;let{left:C,top:w}=(0,I.zA)({gutter:x,maxRows:R,gridTracks:v,rowHeight:l,containerWidth:s},u.top-E.data.droppableContainer.rect.current.top,u.left-E.data.droppableContainer.rect.current.left,h);if(h.top===w&&h.left===C&&f===y)return;g=(0,b.Pe)(g,h,C,w,!0,!1,P,v,D),g=(0,b.oE)(g,P,v);const A=y!==f;A&&(F.current=t.itemContainerId),$.current={...h,itemContainerId:y},Z((e=>({...e,[y]:g.filter((e=>!!e)),...A&&!!e[d]&&{[d]:(0,b.oE)(e[d].filter((e=>e.id!==h.id)),P,v)}})))},onDragEnd:e=>{let{active:t,over:r}=e;if(!J)return;const{isResizer:n,isItem:o}=t.data.current,a=null===r||void 0===r?void 0:r.id;if(null===a)return void Z(null);if(a===k)return M((e=>({...e,[t.containerId]:(0,b.oE)(e[t.containerId].filter((e=>{let{id:r}=e;return r!==t.itemId})),P,v)}))),void Z(null);if(o||n)return J&&M(J),Z(null),void T(J,$.current,j);t.id in W&&null!==r&&void 0!==r&&r.id&&z((e=>{const n=e.indexOf(t.id),o=e.indexOf(r.id),a=(0,p.be)(e,n,o);return T(J,$.current,a),a}));const i=K(t.id);if(i)if(a!==A)J&&M(J),Z(null),T(J,$.current,j);else{const e=Y();(0,m.unstable_batchedUpdates)((()=>{z((t=>[...t,e])),M((r=>({...r,[i]:r[i].filter((e=>e!==t.itemId)),[e]:[t.id]})))}))}},onDragCancel:Q}),J||W?o.createElement(o.Fragment,null,o.createElement(p.gB,{items:[...j,A],strategy:p._G},o.createElement(C,{rootId:t,containers:j,Container:_,containerWidth:s,isSortingContainer:G,items:J||W,transforming:null!==J,Item:c,onContainerRemove:e=>{z((t=>t.filter((t=>t!==e))))},onRemove:d,rowHeight:l,gridTracks:v,gutter:x,itemProps:B,containerDndProps:O})),(0,m.createPortal)(o.createElement(h.Hd,{adjustScale:!1,dropAnimation:w},L&&L.data.current.rootId===t?L.data.current.isContainer?(X=L.data.current,o.createElement(E,{rootId:t,id:X,items:J||W,containerWidth:s,rowHeight:l,gutter:x,Component:_,Item:c,gridTracks:v,containerDndProps:O,itemProps:B,dragOverlay:!0})):L.data.current.isItem?(e=>{const r=(J||W)[e.itemContainerId];if(!r)return null;const a=r.find((t=>t.id===e.itemId));return a?o.createElement(y.A,(0,n.A)({rootId:t},a,{containerId:e.itemContainerId,Item:c,onRemove:d,draggable:!0,containerWidth:s,rowHeight:l,gridTracks:v,gutter:x,dragOverlay:!0,itemProps:B})):null})(L.data.current):null:null),document.body),i&&L&&(L.isContainer||L.isItem)?o.createElement(S,{id:k}):null):null;var X},P=a.default.div.withConfig({displayName:"gridLayout__Div",componentId:"sc-jna1fu-0"})(["position:relative;"]),R=(0,o.forwardRef)(((e,t)=>{let{id:r,onRemove:a,children:c,attributes:s,listeners:l,itemProps:u={},itemProps:{dashboardId:g},style:m,dragOverlay:h,...p}=e;(0,d.QZ)(g,r);return o.createElement(i.Flex,(0,n.A)({},p,{ref:t,column:!0,flex:!0,round:!0,background:"mainBackground",padding:[3],gap:2}),o.createElement(P,{style:m},c))})),_=e=>{let{id:t,Component:r,containerWidth:a,onActiveSubMenuId:u,onActiveMenuGroupId:g,...m}=e;const[h,p]=(0,d.B1)(t),[f,b]=(0,d.Ix)(t),I=(0,s.A)("(max-width: 767px)"),v=(0,c.A)(),y=(0,o.useRef)(),x=(0,o.useMemo)((()=>(y.current&&y.current.cancel(),y.current=(0,l.A)(),()=>{})),[h,u,g]);return o.createElement(i.Flex,{ref:v,column:!0,overflow:{vertical:"auto"},flex:"1","data-testid":"dashboardGrid",height:"100%",onScroll:x},o.createElement(D,(0,n.A)({rootId:t,key:t,Container:R,onDragEnd:(e,t,r)=>{p(e),b(r)},containers:f,items:h,containerWidth:a-(I?16:26)},m,{Item:r,gridTracks:12,itemProps:{dashboardId:t},containerDndProps:{droppable:!0},updateOnNewProps:!0})))};var T=r(27078),O=r(58388),B=r(28973),H=r(80925);const N=(0,a.default)(i.Flex).attrs((e=>({height:"100%",background:"mainChartBg",round:!0,...e}))).withConfig({displayName:"container__Card",componentId:"sc-esi601-0"})(["",""],(e=>{let{dragging:t,theme:r}=e;return t&&"\n &:before {\n content: '';\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background-color: ".concat((0,i.getRgbColor)("primary",.2)({theme:r}),";\n z-index: 10;\n backdrop-filter: blur(3px);\n }\n\n ")}));var L=r(4967),F=r(36196);const W={},M=(0,o.memo)((e=>{let{dashboardId:t,id:r,handleProps:a,listeners:i,attributes:d,style:c,dragging:s,dragOverlay:l,...g}=e;const m=(0,H.e)(),{nodesScope:h,contextScope:p,...f}=(0,O.bE)(t,r)||W,b=(0,O.v_)(t,r),I=(0,o.useMemo)((()=>{const e=(0,T.My)(m,t);let n=e.getNode({id:l?"dragOverlay-".concat(r):r});if(n)return n;const[o]=p;return n=m.makeChart({attributes:{...(0,L.L)(o),id:l?"dragOverlay-".concat(r):r,cardId:r,dashboardId:t,contextScope:p,...f,...!!f.nodeId&&!h&&{nodesScope:[f.nodeId]},toolboxProps:{drag:{...a,...i,...d}}},makeTrack:T.qh}),e.appendChild(n),n}),[t,r]);return(0,u.A)((()=>{I.updateAttributes({height:c.height,width:c.width}),I.trigger("resize")}),[I,c.height,c.width]),(0,u.A)((()=>{I.updateAttributes({toolboxProps:{drag:{...a,...i,...d,dragging:s}}})}),[s]),(0,o.useLayoutEffect)((()=>(0,B.unregister)(I.onAttributesChange(["aggregationMethod","groupBy","groupByLabel","postAggregationMethod","postGroupBy","postGroupByLabel","groupingMethod","groupingTime","chartType","chartLibrary","selectedDimensions","selectedLabels","selectedNodes","selectedInstances","sparkline","selectedLegendDimensions","showingInfo","dimensionsSortBy","instancesSortBy","nodesSortBy","groupBySortBy","labelsSortBy","dimensionsSort","nodesExpanded","groupByExpanded","labelsExpanded","expanded","staticZones","title","description","showPostAggregations"],((e,t,r)=>b((t=>({...t,[r]:e}))))))),[I,b]),(0,u.A)((()=>{I.updateAttributes(f)}),[f]),o.createElement(N,(0,n.A)({"data-testid":"chartCard-".concat((f.contextScope||[]).join(",")),height:c.height,width:"100%"},g,{dragging:s}),o.createElement(F.A,{chart:I,"data-chartid":l?"dragOverlay-".concat(r):r,"data-track":I.track("container"),height:c.height,width:"100%"}))})),j=M;var z=r(46741),G=r(58384),U=r(67544);const q="text",J="customDashboard",Z=e=>{let{dashboardId:t,id:r,onClick:n}=e;const a=(0,O.bE)(t,r,"editing"),i=(0,O.v_)(t,r,"editing");return o.createElement(G.t,{icon:"pencilSolid",onClick:()=>{n(),i(!0)},"data-ga":"".concat(q,"-reset::").concat(J),"data-testid":"textCard-edit",disabled:a},"Edit")},$=e=>{let{dashboardId:t,id:r}=e;const n=(0,U.c6)(t,r);return o.createElement(G.t,{icon:"trashcan",onClick:n,"data-ga":"".concat(q,"-reset::").concat(J),"data-testid":"textCard-remove"},"Remove")},K=e=>{let{dashboardId:t,id:r,children:a,...i}=e;return o.createElement(o.Fragment,null,o.createElement(G.A,(0,n.A)({category:q,context:J,testId:"chartTextDropdown"},i),(e=>{let{close:n}=e;return o.createElement(o.Fragment,null,o.createElement(Z,{dashboardId:t,id:r,onClick:n}),o.createElement($,{dashboardId:t,id:r}))})),a)};r(25440);const Q=(0,a.default)(i.Flex).attrs({as:"form",column:!0,flex:!0,gap:2,height:"100%",padding:[0,0,2],onMouseDown:e=>e.stopPropagation()}).withConfig({displayName:"form__CardContent",componentId:"sc-3tcqxu-0"})(["cursor:auto;"]),Y=(0,a.default)(i.Box).attrs((e=>({as:"textarea",background:"inputBg",height:"100%",round:!0,border:!0,padding:[2],...e}))).withConfig({displayName:"form__Textarea",componentId:"sc-3tcqxu-1"})(["resize:none;font-size:12px;color:",";"],(0,i.getColor)("text")),V=(0,o.memo)((e=>{let{dashboardId:t,id:r}=e;const n=(0,O.bE)(t,r,"text"),a=(0,O.v_)(t,r,"editing"),d=(0,O.v_)(t,r,"text"),[c,s]=(0,o.useState)(n);return o.createElement(Q,{"data-testid":"textCardForm"},o.createElement(Y,{placeholder:"Add some text",value:c.replace(/<\/?[^>]+(>|$)/g,""),onChange:e=>s(e.target.value)}),o.createElement(i.Flex,{position:"absolute",right:"24px",bottom:0,gap:1},o.createElement(i.IconButton,{icon:"check",small:!0,padding:[0],onClick:()=>{d(c),a(!1)},"data-testid":"textCardForm-cancel",neutral:!1,flavour:"default"}),o.createElement(i.IconButton,{icon:"x",small:!0,padding:[0],onClick:()=>a(!1),"data-testid":"textCardForm-done",flavour:"default"})))})),X={h1:i.H2,h2:i.H4,text:i.Text,micro:i.TextNano},ee=e=>{let{dashboardId:t,id:r}=e;const n=(0,O.bE)(t,r,"text")||"",a=(0,O.bE)(t,r,"textType"),d=X[a]||X.text;return o.createElement(i.Box,{"data-testid":"textCardContent"},o.createElement(d,{whiteSpace:"pre-wrap"},n.replace(/<\/?[^>]+(>|$)/g,"")))},te=(0,a.default)(i.Flex).attrs({column:!0}).withConfig({displayName:"text__StyledOptions",componentId:"sc-1f67m9f-0"})([""]),re=e=>{let{dragging:t,...r}=e;const a=(0,z.JT)("dashboard:Update");return o.createElement(i.IconButton,(0,n.A)({position:"absolute",top:0,right:0,padding:[0],width:"12px",height:"12px",icon:"rearrange",title:"Drag & drop","data-testid":"text-drag",disabled:!a,cursor:t?"grabbing":"grab"},r))},ne=["h1","h2","text","micro"],oe=e=>{let{id:t,dashboardId:r}=e;const n=(0,z.JT)("dashboard:Update"),a=(0,O.bE)(r,t,"textType")||"text",d=(0,O.v_)(r,t,"textType");return o.createElement(i.IconButton,{padding:[0],width:"12px",height:"12px",icon:"text_add",title:"Change text component","data-testid":"text-component",disabled:!n,onClick:()=>{const e=ne.findIndex((e=>e===a)),t=ne[e>ne.length-2?0:e+1];d(t)}})},ae=(0,a.default)(N).withConfig({displayName:"text__StyledCard",componentId:"sc-1f67m9f-1"})(["","{opacity:0;}&:hover{border:1px solid ",";}&:hover ","{opacity:1;}"],te,(0,i.getColor)("border"),te),ie={chart:j,text:(0,o.memo)((e=>{let{dashboardId:t,id:r,listeners:a,handleProps:i,dragging:d}=e;const c=(0,O.bE)(t,r,"editing");return o.createElement(ae,{"data-testid":"textCard",alignItems:"start",overflow:"hidden",dragging:d},o.createElement(te,null,o.createElement(K,{dashboardId:t,id:r}),o.createElement(oe,{dashboardId:t,id:r})),c&&o.createElement(V,{dashboardId:t,id:r}),!c&&o.createElement(ee,{dashboardId:t,id:r}),o.createElement(re,(0,n.A)({},a,i,{dragging:d})))})),placeholder:(0,o.memo)((()=>o.createElement(N,{column:!0,"data-testid":"placeholderCard",background:"secondaryHighlight",width:"100%"})))},de=(0,o.forwardRef)(((e,t)=>{let{dashboardId:r,id:a,attributes:i,style:d,resizeHandle:c,...s}=e;const l=(0,O.bE)(r,a,"type")||"placeholder",u=ie[l]||ie.chart;return o.createElement("div",(0,n.A)({ref:t},i,{style:d}),o.createElement(u,(0,n.A)({dashboardId:r,id:a,style:d},s)),c)})),ce=(0,o.memo)((e=>{let{id:t,...r}=e;return(0,T.Ay)(t),o.createElement(_,(0,n.A)({id:t,Component:de},r))}))}}]); \ No newline at end of file diff --git a/src/web/gui/v2/6121.f7286809e53e1c6d655a.chunk.js b/src/web/gui/v2/6121.f7286809e53e1c6d655a.chunk.js deleted file mode 100644 index b4d1cea53..000000000 --- a/src/web/gui/v2/6121.f7286809e53e1c6d655a.chunk.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! For license information please see 6121.f7286809e53e1c6d655a.chunk.js.LICENSE.txt */ -!function(){try{var t="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},e=(new Error).stack;e&&(t._sentryDebugIds=t._sentryDebugIds||{},t._sentryDebugIds[e]="44385583-8059-4723-827a-84811c072356",t._sentryDebugIdIdentifier="sentry-dbid-44385583-8059-4723-827a-84811c072356")}catch(t){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[6121],{51891:(t,e,o)=>{o.d(e,{A:()=>Jt});var n=o(66118),r=o(12020);const i={modes:{point:(t,e)=>a(t,e,{intersect:!0}),nearest:(t,e,o)=>function(t,e,o){let n=Number.POSITIVE_INFINITY;return a(t,e,o).reduce(((t,i)=>{const s=i.getCenterPoint(),a=function(t,e,o){if("x"===o)return{x:t.x,y:e.y};if("y"===o)return{x:e.x,y:t.y};return e}(e,s,o.axis),d=(0,r.aE)(e,a);return dt._index-e._index)).slice(0,1)}(t,e,o),x:(t,e,o)=>a(t,e,{intersect:o.intersect,axis:"x"}),y:(t,e,o)=>a(t,e,{intersect:o.intersect,axis:"y"})}};function s(t,e,o){return(i.modes[o.mode]||i.modes.nearest)(t,e,o)}function a(t,e,o){return t.visibleElements.filter((t=>o.intersect?t.inRange(e.x,e.y):function(t,e,o){return"x"!==o&&"y"!==o?t.inRange(e.x,e.y,"x",!0)||t.inRange(e.x,e.y,"y",!0):t.inRange(e.x,e.y,o,!0)}(t,e,o.axis)))}const d=(t,e)=>e>t||t.length>e.length&&t.slice(0,e.length)===e,l=.001,c=(t,e,o)=>Math.min(o,Math.max(e,t));function h(t,e,o){for(const n of Object.keys(t))t[n]=c(t[n],e,o);return t}function u(t,{x:e,y:o,x2:n,y2:r},i,s){const a=s/2,d=t.x>=e-a-l&&t.x<=n+a+l,c=t.y>=o-a-l&&t.y<=r+a+l;return"x"===i?d:("y"===i||d)&&c}function f(t,e){const{centerX:o,centerY:n}=t.getProps(["centerX","centerY"],e);return{x:o,y:n}}const x=t=>"string"===typeof t&&t.endsWith("%"),y=t=>parseFloat(t)/100,p=t=>c(y(t),0,1);function b(t,e){return"start"===e?0:"end"===e?t:x(e)?p(e)*t:t/2}function g(t,e,o=!0){return"number"===typeof e?e:x(e)?(o?p(e):y(e))*t:t}function m(t,e="center"){return(0,r.i)(t)?{x:(0,r.v)(t.x,e),y:(0,r.v)(t.y,e)}:{x:t=(0,r.v)(t,e),y:t}}function v(t){return t&&((0,r.h)(t.xValue)||(0,r.h)(t.yValue))}function w(t,e,o,n=!1){const i=o.init;if(i)return!0===i?S(e,n):function(t,e,o){if(!0===o)return S(t,e);if((0,r.i)(o))return o}(e,n,(0,r.Q)(i,[{chart:t,properties:e,options:o}]))}function M(t,e,o){let n=!1;return e.forEach((e=>{(0,r.a7)(t[e])?(n=!0,o[e]=t[e]):(0,r.h)(o[e])&&delete o[e]})),n}function S({centerX:t,centerY:e},o){return o?{centerX:t,centerY:e,radius:0,width:0,height:0}:{x:t,y:e,x2:t,y2:e,width:0,height:0}}const k=new Map,C=t=>t.reduce((function(t,e){return t+=e.string}),"");function P(t){if(t&&"object"===typeof t){const e=t.toString();return"[object HTMLImageElement]"===e||"[object HTMLCanvasElement]"===e}}function D(t,{x:e,y:o},n){n&&(t.translate(e,o),t.rotate((0,r.t)(n)),t.translate(-e,-o))}function j(t,e){if(e&&e.borderWidth)return t.lineCap=e.borderCapStyle,t.setLineDash(e.borderDash),t.lineDashOffset=e.borderDashOffset,t.lineJoin=e.borderJoinStyle,t.lineWidth=e.borderWidth,t.strokeStyle=e.borderColor,!0}function T(t,e){t.shadowColor=e.backgroundShadowColor,t.shadowBlur=e.shadowBlur,t.shadowOffsetX=e.shadowOffsetX,t.shadowOffsetY=e.shadowOffsetY}function O(t,e){const o=e.content;if(P(o))return{width:g(o.width,e.width),height:g(o.height,e.height)};const n=e.font,i=(0,r.b)(n)?n.map((t=>(0,r.a0)(t))):[(0,r.a0)(n)],s=e.textStrokeWidth,a=(0,r.b)(o)?o:[o],d=a.join()+C(i)+s+(t._measureText?"-spriting":"");return k.has(d)||k.set(d,function(t,e,o,n){t.save();const r=e.length;let i=0,s=n;for(let a=0;a(0,r.a0)(t))):[(0,r.a0)(s)],d=o.color,l=(0,r.b)(d)?d:[d],h=function(t,e){const{x:o,width:n}=t,r=e.textAlign;return"center"===r?o+n/2:"end"===r||"right"===r?o+n:o}(e,o),u=e.y+o.textStrokeWidth/2;t.save(),t.textBaseline="middle",t.textAlign=o.textAlign,function(t,e){if(e.textStrokeWidth>0)return t.lineJoin="round",t.miterLimit=2,t.lineWidth=e.textStrokeWidth,t.strokeStyle=e.textStrokeColor,!0}(t,o)&&function(t,{x:e,y:o},n,r){t.beginPath();let i=0;n.forEach((function(n,s){const a=r[Math.min(s,r.length-1)],d=a.lineHeight;t.font=a.string,t.strokeText(n,e,o+d/2+i),i+=d})),t.stroke()}(t,{x:h,y:u},i,a),function(t,{x:e,y:o},n,{fonts:r,colors:i}){let s=0;n.forEach((function(n,a){const d=i[Math.min(a,i.length-1)],l=r[Math.min(a,r.length-1)],c=l.lineHeight;t.beginPath(),t.font=l.string,t.fillStyle=d,t.fillText(n,e,o+c/2+s),s+=c,t.fill()}))}(t,{x:h,y:u},i,{fonts:a,colors:l}),t.restore()}function I(t,e,o,n){const{radius:i,options:s}=e,a=s.pointStyle,d=s.rotation;let l=(d||0)*r.b3;if(P(a))return t.save(),t.translate(o,n),t.rotate(l),t.drawImage(a,-a.width/2,-a.height/2,a.width,a.height),void t.restore();(t=>isNaN(t)||t<=0)(i)||function(t,{x:e,y:o,radius:n,rotation:i,style:s,rad:a}){let d,l,c,h;switch(t.beginPath(),s){default:t.arc(e,o,n,0,r.T),t.closePath();break;case"triangle":t.moveTo(e+Math.sin(a)*n,o-Math.cos(a)*n),a+=r.b5,t.lineTo(e+Math.sin(a)*n,o-Math.cos(a)*n),a+=r.b5,t.lineTo(e+Math.sin(a)*n,o-Math.cos(a)*n),t.closePath();break;case"rectRounded":h=.516*n,c=n-h,d=Math.cos(a+r.b4)*c,l=Math.sin(a+r.b4)*c,t.arc(e-d,o-l,h,a-r.P,a-r.H),t.arc(e+l,o-d,h,a-r.H,a),t.arc(e+d,o+l,h,a,a+r.H),t.arc(e-l,o+d,h,a+r.H,a+r.P),t.closePath();break;case"rect":if(!i){c=Math.SQRT1_2*n,t.rect(e-c,o-c,2*c,2*c);break}a+=r.b4;case"rectRot":d=Math.cos(a)*n,l=Math.sin(a)*n,t.moveTo(e-d,o-l),t.lineTo(e+l,o-d),t.lineTo(e+d,o+l),t.lineTo(e-l,o+d),t.closePath();break;case"crossRot":a+=r.b4;case"cross":d=Math.cos(a)*n,l=Math.sin(a)*n,t.moveTo(e-d,o-l),t.lineTo(e+d,o+l),t.moveTo(e+l,o-d),t.lineTo(e-l,o+d);break;case"star":d=Math.cos(a)*n,l=Math.sin(a)*n,t.moveTo(e-d,o-l),t.lineTo(e+d,o+l),t.moveTo(e+l,o-d),t.lineTo(e-l,o+d),a+=r.b4,d=Math.cos(a)*n,l=Math.sin(a)*n,t.moveTo(e-d,o-l),t.lineTo(e+d,o+l),t.moveTo(e+l,o-d),t.lineTo(e-l,o+d);break;case"line":d=Math.cos(a)*n,l=Math.sin(a)*n,t.moveTo(e-d,o-l),t.lineTo(e+d,o+l);break;case"dash":t.moveTo(e,o),t.lineTo(e+Math.cos(a)*n,o+Math.sin(a)*n)}t.fill()}(t,{x:o,y:n,radius:i,rotation:d,style:a,rad:l})}const X={xScaleID:{min:"xMin",max:"xMax",start:"left",end:"right",startProp:"x",endProp:"x2"},yScaleID:{min:"yMin",max:"yMax",start:"bottom",end:"top",startProp:"y",endProp:"y2"}};function E(t,e,o){return e="number"===typeof e?e:t.parse(e),(0,r.g)(e)?t.getPixelForValue(e):o}function W(t,e,o){const n=e[o];if(n||"scaleID"===o)return n;const r=o.charAt(0),i=Object.values(t).filter((t=>t.axis&&t.axis===r));return i.length?i[0].id:r}function R(t,e){if(t){const o=t.options.reverse;return{start:E(t,e.min,o?e.end:e.start),end:E(t,e.max,o?e.start:e.end)}}}function _(t,e){const{chartArea:o,scales:n}=t,r=n[W(n,e,"xScaleID")],i=n[W(n,e,"yScaleID")];let s=o.width/2,a=o.height/2;return r&&(s=E(r,e.xValue,r.left+r.width/2)),i&&(a=E(i,e.yValue,i.top+i.height/2)),{x:s,y:a}}function z(t,e){const o=t.scales,n=o[W(o,e,"xScaleID")],r=o[W(o,e,"yScaleID")];if(!n&&!r)return{};let{left:i,right:s}=n||t.chartArea,{top:a,bottom:d}=r||t.chartArea;const l=$(n,{min:e.xMin,max:e.xMax,start:i,end:s});i=l.start,s=l.end;const c=$(r,{min:e.yMin,max:e.yMax,start:d,end:a});return a=c.start,d=c.end,{x:i,y:a,x2:s,y2:d,width:s-i,height:d-a,centerX:i+(s-i)/2,centerY:a+(d-a)/2}}function H(t,e){if(!v(e)){const o=z(t,e);let n=e.radius;n&&!isNaN(n)||(n=Math.min(o.width,o.height)/2,e.radius=n);const r=2*n,i=o.centerX+e.xAdjust,s=o.centerY+e.yAdjust;return{x:i-n,y:s-n,x2:i+n,y2:s+n,centerX:i,centerY:s,width:r,height:r,radius:n}}return function(t,e){const o=_(t,e),n=2*e.radius;return{x:o.x-e.radius+e.xAdjust,y:o.y-e.radius+e.yAdjust,x2:o.x+e.radius+e.xAdjust,y2:o.y+e.radius+e.yAdjust,centerX:o.x+e.xAdjust,centerY:o.y+e.yAdjust,radius:e.radius,width:n,height:n}}(t,e)}function N(t,e){const{scales:o,chartArea:n}=t,r=o[e.scaleID],i={x:n.left,y:n.top,x2:n.right,y2:n.bottom};return r?function(t,e,o){const n=E(t,o.value,NaN),r=E(t,o.endValue,n);t.isHorizontal()?(e.x=n,e.x2=r):(e.y=n,e.y2=r)}(r,i,e):function(t,e,o){for(const n of Object.keys(X)){const r=t[W(t,o,n)];if(r){const{min:t,max:i,start:s,end:a,startProp:d,endProp:l}=X[n],c=R(r,{min:o[t],max:o[i],start:r[s],end:r[a]});e[d]=c.start,e[l]=c.end}}}(o,i,e),i}function V(t,e,o){const n=z(t,e);return n.initProperties=w(t,n,e,o),n.elements=[{type:"label",optionScope:"label",properties:L(t,n,e),initProperties:n.initProperties}],n}function $(t,e){const o=R(t,e)||e;return{start:Math.min(o.start,o.end),end:Math.max(o.start,o.end)}}function B(t,e){const{start:o,end:n,borderWidth:r}=t,{position:i,padding:{start:s,end:a},adjust:d}=e;return o+r/2+d+b(n-r-o-s-a-e.size,i)}function L(t,e,o){const n=o.label;n.backgroundColor="transparent",n.callout.display=!1;const i=m(n.position),s=(0,r.E)(n.padding),a=O(t.ctx,n),d=function({properties:t,options:e},o,n,r){const{x:i,x2:s,width:a}=t;return B({start:i,end:s,size:a,borderWidth:e.borderWidth},{position:n.x,padding:{start:r.left,end:r.right},adjust:e.label.xAdjust,size:o.width})}({properties:e,options:o},a,i,s),l=function({properties:t,options:e},o,n,r){const{y:i,y2:s,height:a}=t;return B({start:i,end:s,size:a,borderWidth:e.borderWidth},{position:n.y,padding:{start:r.top,end:r.bottom},adjust:e.label.yAdjust,size:o.height})}({properties:e,options:o},a,i,s),c=a.width+s.width,h=a.height+s.height;return{x:d,y:l,x2:d+c,y2:l+h,width:c,height:h,centerX:d+c/2,centerY:l+h/2,rotation:n.rotation}}function J(t,e,o){const n=Math.cos(o),r=Math.sin(o),i=e.x,s=e.y;return{x:i+n*(t.x-i)-r*(t.y-s),y:s+r*(t.x-i)+n*(t.y-s)}}const F=["enter","leave"],Q=F.concat("click");function U(t,e,o){if(t.listened)switch(e.type){case"mousemove":case"mouseout":return function(t,e,o){if(!t.moveListened)return;let n;n="mousemove"===e.type?s(t,e,o.interaction):[];const r=t.hovered;t.hovered=n;const i={state:t,event:e};let a=q(i,"leave",r,n);return q(i,"enter",n,r)||a}(t,e,o);case"click":return function(t,e,o){const n=t.listeners,r=s(t,e,o.interaction);let i;for(const s of r)i=G(s.options.click||n.click,s,e)||i;return i}(t,e,o)}}function q({state:t,event:e},o,n,r){let i;for(const s of n)r.indexOf(s)<0&&(i=G(s.options[o]||t.listeners[o],s,e)||i);return i}function G(t,e,o){return!0===(0,r.Q)(t,[e.$context,o])}const K=["afterDraw","beforeDraw"];function Z(t,e,o){if(t.hooked){const n=e.options[o]||t.hooks[o];return(0,r.Q)(n,[e.$context])}}function tt(t,e,o){const n=function(t,e,o){const n=e.axis,i=e.id,s=n+"ScaleID",a={min:(0,r.v)(e.min,Number.NEGATIVE_INFINITY),max:(0,r.v)(e.max,Number.POSITIVE_INFINITY)};for(const r of o)r.scaleID===i?rt(r,e,["value","endValue"],a):W(t,r,s)===i&&rt(r,e,[n+"Min",n+"Max",n+"Value"],a);return a}(t.scales,e,o);let i=et(e,n,"min","suggestedMin");i=et(e,n,"max","suggestedMax")||i,i&&(0,r.a7)(e.handleTickRangeOptions)&&e.handleTickRangeOptions()}function et(t,e,o,n){if((0,r.g)(e[o])&&!function(t,e,o){return(0,r.h)(t[e])||(0,r.h)(t[o])}(t.options,o,n)){const n=t[o]!==e[o];return t[o]=e[o],n}}function ot(t,e){for(const o of["scaleID","xScaleID","yScaleID"]){const n=W(e,t,o);n&&!e[n]&&nt(t,o)&&console.warn(`No scale found with id '${n}' for annotation '${t.id}'`)}}function nt(t,e){if("scaleID"===e)return!0;const o=e.charAt(0);for(const n of["Min","Max","Value"])if((0,r.h)(t[o+n]))return!0;return!1}function rt(t,e,o,n){for(const i of o){const o=t[i];if((0,r.h)(o)){const t=e.parse(o);n.min=Math.min(n.min,t),n.max=Math.max(n.max,t)}}}class it extends n.Hg{inRange(t,e,o,n){const{x:i,y:s}=J({x:t,y:e},this.getCenterPoint(n),(0,r.t)(-this.options.rotation));return u({x:i,y:s},this.getProps(["x","y","x2","y2"],n),o,this.options.borderWidth)}getCenterPoint(t){return f(this,t)}draw(t){t.save(),D(t,this.getCenterPoint(),this.options.rotation),A(t,this,this.options),t.restore()}get label(){return this.elements&&this.elements[0]}resolveElementProperties(t,e){return V(t,e)}}it.id="boxAnnotation",it.defaults={adjustScaleRange:!0,backgroundShadowColor:"transparent",borderCapStyle:"butt",borderDash:[],borderDashOffset:0,borderJoinStyle:"miter",borderRadius:0,borderShadowColor:"transparent",borderWidth:1,display:!0,init:void 0,label:{backgroundColor:"transparent",borderWidth:0,callout:{display:!1},color:"black",content:null,display:!1,drawTime:void 0,font:{family:void 0,lineHeight:void 0,size:void 0,style:void 0,weight:"bold"},height:void 0,opacity:void 0,padding:6,position:"center",rotation:void 0,textAlign:"start",textStrokeColor:void 0,textStrokeWidth:0,width:void 0,xAdjust:0,yAdjust:0,z:void 0},rotation:0,shadowBlur:0,shadowOffsetX:0,shadowOffsetY:0,xMax:void 0,xMin:void 0,xScaleID:void 0,yMax:void 0,yMin:void 0,yScaleID:void 0,z:0},it.defaultRoutes={borderColor:"color",backgroundColor:"color"},it.descriptors={label:{_fallback:!0}};const st=["left","bottom","top","right"];class at extends n.Hg{inRange(t,e,o,n){const{x:i,y:s}=J({x:t,y:e},this.getCenterPoint(n),(0,r.t)(-this.rotation));return u({x:i,y:s},this.getProps(["x","y","x2","y2"],n),o,this.options.borderWidth)}getCenterPoint(t){return f(this,t)}draw(t){const e=this.options,o=!(0,r.h)(this._visible)||this._visible;e.display&&e.content&&o&&(t.save(),D(t,this.getCenterPoint(),this.rotation),function(t,e){const{pointX:o,pointY:n,options:i}=e,s=i.callout,a=s&&s.display&&function(t,e){const o=e.position;if(st.includes(o))return o;return function(t,e){const{x:o,y:n,x2:i,y2:s,width:a,height:d,pointX:l,pointY:c,centerX:h,centerY:u,rotation:f}=t,x={x:h,y:u},y=e.start,p=g(a,y),b=g(d,y),m=[o,o+p,o+p,i],v=[n+b,s,n,s],w=[];for(let g=0;g<4;g++){const t=J({x:m[g],y:v[g]},x,(0,r.t)(f));w.push({position:st[g],distance:(0,r.aE)(t,{x:l,y:c})})}return w.sort(((t,e)=>t.distance-e.distance))[0].position}(t,e)}(e,s);if(!a||function(t,e,o){const{pointX:n,pointY:r}=t,i=e.margin;let s=n,a=r;"left"===o?s+=i:"right"===o?s-=i:"top"===o?a+=i:"bottom"===o&&(a-=i);return t.inRange(s,a)}(e,s,a))return;t.save(),t.beginPath();const d=j(t,s);if(!d)return t.restore();const{separatorStart:l,separatorEnd:c}=function(t,e){const{x:o,y:n,x2:r,y2:i}=t,s=function(t,e){const{width:o,height:n,options:r}=t,i=r.callout.margin+r.borderWidth/2;if("right"===e)return o+i;if("bottom"===e)return n+i;return-i}(t,e);let a,d;"left"===e||"right"===e?(a={x:o+s,y:n},d={x:a.x,y:i}):(a={x:o,y:n+s},d={x:r,y:a.y});return{separatorStart:a,separatorEnd:d}}(e,a),{sideStart:h,sideEnd:u}=function(t,e,o){const{y:n,width:r,height:i,options:s}=t,a=s.callout.start,d=function(t,e){const o=e.side;if("left"===t||"top"===t)return-o;return o}(e,s.callout);let l,c;"left"===e||"right"===e?(l={x:o.x,y:n+g(i,a)},c={x:l.x+d,y:l.y}):(l={x:o.x+g(r,a),y:o.y},c={x:l.x,y:l.y+d});return{sideStart:l,sideEnd:c}}(e,a,l);(s.margin>0||0===i.borderWidth)&&(t.moveTo(l.x,l.y),t.lineTo(c.x,c.y));t.moveTo(h.x,h.y),t.lineTo(u.x,u.y);const f=J({x:o,y:n},e.getCenterPoint(),(0,r.t)(-e.rotation));t.lineTo(f.x,f.y),t.stroke(),t.restore()}(t,this),A(t,this,e),Y(t,function({x:t,y:e,width:o,height:n,options:i}){const s=i.borderWidth/2,a=(0,r.E)(i.padding);return{x:t+a.left+s,y:e+a.top+s,width:o-a.left-a.right-i.borderWidth,height:n-a.top-a.bottom-i.borderWidth}}(this),e),t.restore())}resolveElementProperties(t,e){let o;if(v(e))o=_(t,e);else{const{centerX:n,centerY:r}=z(t,e);o={x:n,y:r}}const n=(0,r.E)(e.padding),i=function(t,e,o,n){const r=e.width+n.width+o.borderWidth,i=e.height+n.height+o.borderWidth,s=m(o.position,"center"),a=dt(t.x,r,o.xAdjust,s.x),d=dt(t.y,i,o.yAdjust,s.y);return{x:a,y:d,x2:a+r,y2:d+i,width:r,height:i,centerX:a+r/2,centerY:d+i/2}}(o,O(t.ctx,e),e,n);return{initProperties:w(t,i,e),pointX:o.x,pointY:o.y,...i,rotation:e.rotation}}}function dt(t,e,o=0,n){return t-b(e,n)+o}at.id="labelAnnotation",at.defaults={adjustScaleRange:!0,backgroundColor:"transparent",backgroundShadowColor:"transparent",borderCapStyle:"butt",borderDash:[],borderDashOffset:0,borderJoinStyle:"miter",borderRadius:0,borderShadowColor:"transparent",borderWidth:0,callout:{borderCapStyle:"butt",borderColor:void 0,borderDash:[],borderDashOffset:0,borderJoinStyle:"miter",borderWidth:1,display:!1,margin:5,position:"auto",side:5,start:"50%"},color:"black",content:null,display:!0,font:{family:void 0,lineHeight:void 0,size:void 0,style:void 0,weight:void 0},height:void 0,init:void 0,opacity:void 0,padding:6,position:"center",rotation:0,shadowBlur:0,shadowOffsetX:0,shadowOffsetY:0,textAlign:"center",textStrokeColor:void 0,textStrokeWidth:0,width:void 0,xAdjust:0,xMax:void 0,xMin:void 0,xScaleID:void 0,xValue:void 0,yAdjust:0,yMax:void 0,yMin:void 0,yScaleID:void 0,yValue:void 0,z:0},at.defaultRoutes={borderColor:"color"};const lt=(t,e,o)=>({x:t.x+o*(e.x-t.x),y:t.y+o*(e.y-t.y)}),ct=(t,e,o)=>lt(e,o,Math.abs((t-e.y)/(o.y-e.y))).x,ht=(t,e,o)=>lt(e,o,Math.abs((t-e.x)/(o.x-e.x))).y,ut=t=>t*t,ft=(t,e,o,n)=>(1-n)*(1-n)*t+2*(1-n)*n*e+n*n*o,xt=(t,e,o,n)=>({x:ft(t.x,e.x,o.x,n),y:ft(t.y,e.y,o.y,n)}),yt=(t,e,o,n)=>2*(1-n)*(e-t)+2*n*(o-e),pt=(t,e,o,n)=>-Math.atan2(yt(t.x,e.x,o.x,n),yt(t.y,e.y,o.y,n))+.5*r.P;class bt extends n.Hg{inRange(t,e,o,n){const r=this.options.borderWidth/2;if("x"!==o&&"y"!==o){const o={mouseX:t,mouseY:e},{path:i,ctx:s}=this;if(i){j(s,this.options);const{chart:r}=this.$context,a=t*r.currentDevicePixelRatio,d=e*r.currentDevicePixelRatio,l=s.isPointInStroke(i,a,d)||vt(this,o,n);return s.restore(),l}return function(t,{mouseX:e,mouseY:o},n=.001,r){const{x:i,y:s,x2:a,y2:d}=t.getProps(["x","y","x2","y2"],r),l=a-i,c=d-s,h=ut(l)+ut(c),u=0===h?-1:((e-i)*l+(o-s)*c)/h;let f,x;u<0?(f=i,x=s):u>1?(f=a,x=d):(f=i+u*l,x=s+u*c);return ut(e-f)+ut(o-x)<=n}(this,o,ut(r),n)||vt(this,o,n)}return function(t,{mouseX:e,mouseY:o},n,{hBorderWidth:r,useFinalPosition:i}){const s=((t,e,{x:o,y:n,x2:r,y2:i},s)=>"y"===s?{start:Math.min(n,i),end:Math.max(n,i),value:e}:{start:Math.min(o,r),end:Math.max(o,r),value:t})(e,o,t.getProps(["x","y","x2","y2"],i),n);return s.value>=s.start-r&&s.value<=s.end+r||vt(t,{mouseX:e,mouseY:o},i,n)}(this,{mouseX:t,mouseY:e},o,{hBorderWidth:r,useFinalPosition:n})}getCenterPoint(t){return f(this,t)}draw(t){const{x:e,y:o,x2:n,y2:i,cp:s,options:a}=this;if(t.save(),!j(t,a))return t.restore();T(t,a);const d=Math.sqrt(Math.pow(n-e,2)+Math.pow(i-o,2));if(a.curve&&s)return function(t,e,o,n){const{x:i,y:s,x2:a,y2:d,options:l}=e,{startOpts:c,endOpts:h,startAdjust:u,endAdjust:f}=St(e),x={x:i,y:s},y={x:a,y:d},p=pt(x,o,y,0),b=pt(x,o,y,1)-r.P,g=xt(x,o,y,u/n),m=xt(x,o,y,1-f/n),v=new Path2D;t.beginPath(),v.moveTo(g.x,g.y),v.quadraticCurveTo(o.x,o.y,m.x,m.y),t.shadowColor=l.borderShadowColor,t.stroke(v),e.path=v,e.ctx=t,Pt(t,g,{angle:p,adjust:u},c),Pt(t,m,{angle:b,adjust:f},h)}(t,this,s,d),t.restore();const{startOpts:l,endOpts:c,startAdjust:h,endAdjust:u}=St(this),f=Math.atan2(i-o,n-e);t.translate(e,o),t.rotate(f),t.beginPath(),t.moveTo(0+h,0),t.lineTo(d-u,0),t.shadowColor=a.borderShadowColor,t.stroke(),Ct(t,0,h,l),Ct(t,d,-u,c),t.restore()}get label(){return this.elements&&this.elements[0]}resolveElementProperties(t,e){const o=N(t,e),{x:n,y:i,x2:s,y2:a}=o,d=function({x:t,y:e,x2:o,y2:n},{top:r,right:i,bottom:s,left:a}){return!(ti&&o>i||es&&n>s)}(o,t.chartArea),l=d?function(t,e,o){const{x:n,y:r}=mt(t,e,o),{x:i,y:s}=mt(e,t,o);return{x:n,y:r,x2:i,y2:s,width:Math.abs(i-n),height:Math.abs(s-r)}}({x:n,y:i},{x:s,y:a},t.chartArea):{x:n,y:i,x2:s,y2:a,width:Math.abs(s-n),height:Math.abs(a-i)};if(l.centerX=(s+n)/2,l.centerY=(a+i)/2,l.initProperties=w(t,l,e),e.curve){const t={x:l.x,y:l.y},o={x:l.x2,y:l.y2};l.cp=function(t,e,o){const{x:n,y:r,x2:i,y2:s,centerX:a,centerY:d}=t,l=Math.atan2(s-r,i-n),c=m(e.controlPoint,0);return J({x:a+g(o,c.x,!1),y:d+g(o,c.y,!1)},{x:a,y:d},l)}(l,e,(0,r.aE)(t,o))}const c=function(t,e,o){const n=o.borderWidth,i=(0,r.E)(o.padding),s=O(t.ctx,o),a=s.width+i.width+n,d=s.height+i.height+n;return function(t,e,o,n){const{width:i,height:s,padding:a}=o,{xAdjust:d,yAdjust:l}=e,c={x:t.x,y:t.y},h={x:t.x2,y:t.y2},u="auto"===e.rotation?function(t){const{x:e,y:o,x2:n,y2:i}=t,s=Math.atan2(i-o,n-e);return s>r.P/2?s-r.P:sr&&(e=ht(r,{x:t,y:e},o),t=r),ei&&(t=ct(i,{x:t,y:e},o),e=i),{x:t,y:e}}function vt(t,{mouseX:e,mouseY:o},n,r){const i=t.label;return i.options.display&&i.inRange(e,o,r,n)}function wt(t,e,o,n){const{labelSize:r,padding:i}=e,s=t.w*n.dx,a=t.h*n.dy,d=s>0&&(r.w/2+i.left-n.x)/s,l=a>0&&(r.h/2+i.top-n.y)/a;return c(Math.max(d,l),0,.25)}function Mt(t,e){const{size:o,min:n,max:r,padding:i}=e,s=o/2;return o>r-n?(r+n)/2:(n>=t-i-s&&(t=n+i+s),r<=t+i+s&&(t=r-i-s),t)}function St(t){const e=t.options,o=e.arrowHeads&&e.arrowHeads.start,n=e.arrowHeads&&e.arrowHeads.end;return{startOpts:o,endOpts:n,startAdjust:kt(t,o),endAdjust:kt(t,n)}}function kt(t,e){if(!e||!e.display)return 0;const{length:o,width:n}=e,r=t.options.borderWidth/2,i={x:o,y:n+r},s={x:0,y:r};return Math.abs(ct(0,i,s))}function Ct(t,e,o,n){if(!n||!n.display)return;const{length:r,width:i,fill:s,backgroundColor:a,borderColor:d}=n,l=Math.abs(e-r)+o;t.beginPath(),T(t,n),j(t,n),t.moveTo(l,-i),t.lineTo(e+o,0),t.lineTo(l,i),!0===s?(t.fillStyle=a||d,t.closePath(),t.fill(),t.shadowColor="transparent"):t.shadowColor=n.borderShadowColor,t.stroke()}function Pt(t,{x:e,y:o},{angle:n,adjust:r},i){i&&i.display&&(t.save(),t.translate(e,o),t.rotate(n),Ct(t,0,-r,i),t.restore())}bt.defaults={adjustScaleRange:!0,arrowHeads:{display:!1,end:Object.assign({},gt),fill:!1,length:12,start:Object.assign({},gt),width:6},borderDash:[],borderDashOffset:0,borderShadowColor:"transparent",borderWidth:2,curve:!1,controlPoint:{y:"-50%"},display:!0,endValue:void 0,init:void 0,label:{backgroundColor:"rgba(0,0,0,0.8)",backgroundShadowColor:"transparent",borderCapStyle:"butt",borderColor:"black",borderDash:[],borderDashOffset:0,borderJoinStyle:"miter",borderRadius:6,borderShadowColor:"transparent",borderWidth:0,callout:Object.assign({},at.defaults.callout),color:"#fff",content:null,display:!1,drawTime:void 0,font:{family:void 0,lineHeight:void 0,size:void 0,style:void 0,weight:"bold"},height:void 0,opacity:void 0,padding:6,position:"center",rotation:0,shadowBlur:0,shadowOffsetX:0,shadowOffsetY:0,textAlign:"center",textStrokeColor:void 0,textStrokeWidth:0,width:void 0,xAdjust:0,yAdjust:0,z:void 0},scaleID:void 0,shadowBlur:0,shadowOffsetX:0,shadowOffsetY:0,value:void 0,xMax:void 0,xMin:void 0,xScaleID:void 0,yMax:void 0,yMin:void 0,yScaleID:void 0,z:0},bt.descriptors={arrowHeads:{start:{_fallback:!0},end:{_fallback:!0},_fallback:!0}},bt.defaultRoutes={borderColor:"color"};class Dt extends n.Hg{inRange(t,e,o,n){const i=this.options.rotation,s=this.options.borderWidth;if("x"!==o&&"y"!==o)return function(t,e,o,n){const{width:i,height:s,centerX:a,centerY:d}=e,l=i/2,c=s/2;if(l<=0||c<=0)return!1;const h=(0,r.t)(o||0),u=n/2||0,f=Math.cos(h),x=Math.sin(h),y=Math.pow(f*(t.x-a)+x*(t.y-d),2),p=Math.pow(x*(t.x-a)-f*(t.y-d),2);return y/Math.pow(l+u,2)+p/Math.pow(c+u,2)<=1.0001}({x:t,y:e},this.getProps(["width","height","centerX","centerY"],n),i,s);const{x:a,y:d,x2:c,y2:h}=this.getProps(["x","y","x2","y2"],n),u=s/2,f="y"===o?{start:d,end:h}:{start:a,end:c},x=J({x:t,y:e},this.getCenterPoint(n),(0,r.t)(-i));return x[o]>=f.start-u-l&&x[o]<=f.end+u+l}getCenterPoint(t){return f(this,t)}draw(t){const{width:e,height:o,centerX:n,centerY:i,options:s}=this;t.save(),D(t,this.getCenterPoint(),s.rotation),T(t,this.options),t.beginPath(),t.fillStyle=s.backgroundColor;const a=j(t,s);t.ellipse(n,i,o/2,e/2,r.P/2,0,2*r.P),t.fill(),a&&(t.shadowColor=s.borderShadowColor,t.stroke()),t.restore()}get label(){return this.elements&&this.elements[0]}resolveElementProperties(t,e){return V(t,e,!0)}}Dt.id="ellipseAnnotation",Dt.defaults={adjustScaleRange:!0,backgroundShadowColor:"transparent",borderDash:[],borderDashOffset:0,borderShadowColor:"transparent",borderWidth:1,display:!0,init:void 0,label:Object.assign({},it.defaults.label),rotation:0,shadowBlur:0,shadowOffsetX:0,shadowOffsetY:0,xMax:void 0,xMin:void 0,xScaleID:void 0,yMax:void 0,yMin:void 0,yScaleID:void 0,z:0},Dt.defaultRoutes={borderColor:"color",backgroundColor:"color"},Dt.descriptors={label:{_fallback:!0}};class jt extends n.Hg{inRange(t,e,o,n){const{x:r,y:i,x2:s,y2:a,width:d}=this.getProps(["x","y","x2","y2","width"],n),l=this.options.borderWidth;if("x"!==o&&"y"!==o)return function(t,e,o,n){if(!t||!e||o<=0)return!1;const r=n/2;return Math.pow(t.x-e.x,2)+Math.pow(t.y-e.y,2)<=Math.pow(o+r,2)}({x:t,y:e},this.getCenterPoint(n),d/2,l);const c=l/2,h="y"===o?{start:i,end:a,value:e}:{start:r,end:s,value:t};return h.value>=h.start-c&&h.value<=h.end+c}getCenterPoint(t){return f(this,t)}draw(t){const e=this.options,o=e.borderWidth;if(e.radius<.1)return;t.save(),t.fillStyle=e.backgroundColor,T(t,e);const n=j(t,e);I(t,this,this.centerX,this.centerY),n&&!P(e.pointStyle)&&(t.shadowColor=e.borderShadowColor,t.stroke()),t.restore(),e.borderWidth=o}resolveElementProperties(t,e){const o=H(t,e);return o.initProperties=w(t,o,e,!0),o}}jt.id="pointAnnotation",jt.defaults={adjustScaleRange:!0,backgroundShadowColor:"transparent",borderDash:[],borderDashOffset:0,borderShadowColor:"transparent",borderWidth:1,display:!0,init:void 0,pointStyle:"circle",radius:10,rotation:0,shadowBlur:0,shadowOffsetX:0,shadowOffsetY:0,xAdjust:0,xMax:void 0,xMin:void 0,xScaleID:void 0,xValue:void 0,yAdjust:0,yMax:void 0,yMin:void 0,yScaleID:void 0,yValue:void 0,z:0},jt.defaultRoutes={borderColor:"color",backgroundColor:"color"};class Tt extends n.Hg{inRange(t,e,o,n){if("x"!==o&&"y"!==o)return this.options.radius>=.1&&this.elements.length>1&&function(t,e,o,n){let r=!1,i=t[t.length-1].getProps(["bX","bY"],n);for(const s of t){const t=s.getProps(["bX","bY"],n);t.bY>o!==i.bY>o&&e<(i.bX-t.bX)*(o-t.bY)/(i.bY-t.bY)+t.bX&&(r=!r),i=t}return r}(this.elements,t,e,n);const i=J({x:t,y:e},this.getCenterPoint(n),(0,r.t)(-this.options.rotation)),s=this.elements.map((t=>"y"===o?t.bY:t.bX)),a=Math.min(...s),d=Math.max(...s);return i[o]>=a&&i[o]<=d}getCenterPoint(t){return f(this,t)}draw(t){const{elements:e,options:o}=this;t.save(),t.beginPath(),t.fillStyle=o.backgroundColor,T(t,o);const n=j(t,o);let r=!0;for(const i of e)r?(t.moveTo(i.x,i.y),r=!1):t.lineTo(i.x,i.y);t.closePath(),t.fill(),n&&(t.shadowColor=o.borderShadowColor,t.stroke()),t.restore()}resolveElementProperties(t,e){const o=H(t,e),{sides:n,rotation:i}=e,s=[],a=2*r.P/n;let d=i*r.b3;for(let r=0;r{r.d.describe(`elements.${At[t].id}`,{_fallback:"plugins.annotation.common"})}));const Yt={update:Object.assign},It=Q.concat(K),Xt=(t,e)=>(0,r.i)(e)?Vt(t,e):t,Et=t=>"color"===t||"font"===t;function Wt(t="line"){return At[t]?t:(console.warn(`Unknown annotation type: '${t}', defaulting to 'line'`),"line")}function Rt(t,e,o,i){const s=function(t,e,o){if("reset"===o||"none"===o||"resize"===o)return Yt;return new n.Qw(t,e)}(t,o.animations,i),a=e.annotations,d=function(t,e){const o=e.length,n=t.length;if(no&&t.splice(o,n-o);return t}(e.elements,a);for(let n=0;nXt(t,i))):o[n]=Xt(s,i)}return o}function $t(t,e,o){return e.$context||(e.$context=Object.assign(Object.create(t.getContext()),{element:e,id:o.id,type:"annotation"}))}const Bt=new Map,Lt=Q.concat(K);var Jt={id:"annotation",version:"2.2.1",beforeRegister(){!function(t,e,o,n=!0){const r=o.split(".");let i=0;for(const s of e.split(".")){const a=r[i++];if(parseInt(s,10){const e=i[t];(0,r.i)(e)&&(e.id=t,n.push(e))})):(0,r.b)(i)&&n.push(...i),function(t,e){for(const o of t)ot(o,e)}(n,t.scales)},afterDataLimits(t,e){const o=Bt.get(t);tt(t,e.scale,o.annotations.filter((t=>t.display&&t.adjustScaleRange)))},afterUpdate(t,e,o){const n=Bt.get(t);!function(t,e,o){e.listened=M(o,Q,e.listeners),e.moveListened=!1,e._getElements=s,F.forEach((t=>{(0,r.a7)(o[t])&&(e.moveListened=!0)})),e.listened&&e.moveListened||e.annotations.forEach((t=>{!e.listened&&(0,r.a7)(t.click)&&(e.listened=!0),e.moveListened||F.forEach((o=>{(0,r.a7)(t[o])&&(e.listened=!0,e.moveListened=!0)}))}))}(0,n,o),Rt(t,n,o,e.mode),n.visibleElements=n.elements.filter((t=>!t.skip&&t.options.display)),function(t,e,o){const n=e.visibleElements;e.hooked=M(o,K,e.hooks),e.hooked||n.forEach((t=>{e.hooked||K.forEach((o=>{(0,r.a7)(t.options[o])&&(e.hooked=!0)}))}))}(0,n,o)},beforeDatasetsDraw(t,e,o){Ft(t,"beforeDatasetsDraw",o.clip)},afterDatasetsDraw(t,e,o){Ft(t,"afterDatasetsDraw",o.clip)},beforeDraw(t,e,o){Ft(t,"beforeDraw",o.clip)},afterDraw(t,e,o){Ft(t,"afterDraw",o.clip)},beforeEvent(t,e,o){U(Bt.get(t),e.event,o)&&(e.changed=!0)},afterDestroy(t){Bt.delete(t)},_getState:t=>Bt.get(t),defaults:{animations:{numbers:{properties:["x","y","x2","y2","width","height","centerX","centerY","pointX","pointY","radius"],type:"number"}},clip:!0,interaction:{mode:void 0,axis:void 0,intersect:void 0},common:{drawTime:"afterDatasetsDraw",init:!1,label:{}}},descriptors:{_indexable:!1,_scriptable:t=>!Lt.includes(t)&&"init"!==t,annotations:{_allKeys:!1,_fallback:(t,e)=>`elements.${At[Wt(e.type)].id}`},interaction:{_fallback:!0},common:{label:{_indexable:Et,_fallback:!0},_indexable:Et}},additionalOptionScopes:[""]};function Ft(t,e,o){const{ctx:n,chartArea:i}=t,s=Bt.get(t);o&&(0,r.Y)(n,i);const a=function(t,e){const o=[];for(const n of t)if(n.options.drawTime===e&&o.push({element:n,main:!0}),n.elements&&n.elements.length)for(const t of n.elements)t.options.display&&t.options.drawTime===e&&o.push({element:t});return o}(s.visibleElements,e).sort(((t,e)=>t.element.options.z-e.element.options.z));for(const r of a)Qt(n,i,s,r);o&&(0,r.$)(n)}function Qt(t,e,o,n){const r=n.element;n.main?(Z(o,r,"beforeDraw"),r.draw(t,e),Z(o,r,"afterDraw")):r.draw(t,e)}},18107:(t,e,o)=>{var n=o(46518),r=o(48981),i=o(26198),s=o(91291),a=o(6469);n({target:"Array",proto:!0},{at:function(t){var e=r(this),o=i(e),n=s(t),a=n>=0?n:o+n;return a<0||a>=o?void 0:e[a]}}),a("at")},34504:(t,e,o)=>{o(18107)},78898:(t,e,o)=>{var n=o(46518),r=o(68183).charAt,i=o(67750),s=o(91291),a=o(655);n({target:"String",proto:!0,forced:!0},{at:function(t){var e=a(i(this)),o=e.length,n=s(t),d=n>=0?n:o+n;return d<0||d>=o?void 0:r(e,d)}})}}]); \ No newline at end of file diff --git a/src/web/gui/v2/6121.f7286809e53e1c6d655a.chunk.js.LICENSE.txt b/src/web/gui/v2/6121.f7286809e53e1c6d655a.chunk.js.LICENSE.txt deleted file mode 100644 index 1b47ed127..000000000 --- a/src/web/gui/v2/6121.f7286809e53e1c6d655a.chunk.js.LICENSE.txt +++ /dev/null @@ -1,6 +0,0 @@ -/*! -* chartjs-plugin-annotation v2.2.1 -* https://www.chartjs.org/chartjs-plugin-annotation/index - * (c) 2023 chartjs-plugin-annotation Contributors - * Released under the MIT License - */ diff --git a/src/web/gui/v2/6323.26d4d949c9b6f8674c2e.chunk.js b/src/web/gui/v2/6323.26d4d949c9b6f8674c2e.chunk.js deleted file mode 100644 index 46260dde9..000000000 --- a/src/web/gui/v2/6323.26d4d949c9b6f8674c2e.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="ac1f673a-1d4a-4dd6-a45d-067f0aeb0a37",e._sentryDebugIdIdentifier="sentry-dbid-ac1f673a-1d4a-4dd6-a45d-067f0aeb0a37")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[6323],{49032:(e,t,a)=>{a.d(t,{B9:()=>l});const n=/^(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/,l=e=>!!e&&n.test(e)},84707:(e,t,a)=>{a.d(t,{y:()=>c});a(25440),a(17333),a(41393),a(98992),a(54520),a(81454),a(62953);var n=a(96540),l=a(83199),o=a(49032);const i=(0,a(8711).default)(l.Select).withConfig({displayName:"styled__StyledSelect",componentId:"sc-bkkrx-0"})(["width:100%;"]);a(71517),a(11379),a(93777),a(14190),a(12359),a(86097),a(17273),a(27415),a(19929),a(37583),a(55122),a(20230),a(57268),a(79733),a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215);const s=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"name";return e.length?"string"===typeof e[0]?[...new Set(e)]:[...new Map(e.map((e=>[e[t],e]))).values()]:[]},r=[],c=e=>{let{invitations:t,setInvitations:a}=e;const[c,d]=(0,n.useState)(r),[m,u]=(0,n.useState)(""),[p,g]=(0,n.useState)(""),v=()=>g(""),h=(0,n.useCallback)((e=>{u(e.toLowerCase())}),[u]),f=n.useCallback((e=>{let{emails:t=r,invitations:n=r,isEmailValid:l}=e;if(l){const e=s(t),l=s(n);return d(e),a(l),u(""),void v()}g("Invalid Email")}),[a]),b=(0,n.useCallback)((e=>(0,o.B9)(e)&&!c.includes(e)),[c]),y=(0,n.useCallback)((e=>{f({isEmailValid:!0,emails:e.map((e=>e.value)),invitations:e.map((e=>({email:e.value,name:e.value.split("@")[0]})))})}),[d]),E=(0,n.useCallback)((e=>{const a=e.clipboardData.getData("Text").toLowerCase().replace(/ /g,",").replace(/,,/g,",").split(",").filter((e=>b(e)))||r;f({emails:[...c,...a],invitations:[...t,...a.map((e=>({email:e,name:e.split("@")[0]})))],isEmailValid:a.length>0}),e.preventDefault()}),[c,t,b,f]),w=(0,n.useCallback)((e=>{if(m)switch(v(),e.key){case"Enter":case"Tab":case",":case" ":f({emails:[...c,m],invitations:[...t,{email:m,name:m.split("@")[0]}],isEmailValid:b(m)}),e.preventDefault()}}),[c,m,t,b,f]);return n.createElement(l.Flex,{justifyContent:"space-between",column:!0,onPaste:E},n.createElement(i,{components:{DropdownIndicator:null},inputValue:m,isClearable:!0,isMulti:!0,menuIsOpen:!1,onBlur:()=>{m&&f({emails:[...c,m],invitations:[...t,{email:m,name:m.split("@")[0]}],isEmailValid:b(m)})},onChange:y,onInputChange:h,onKeyDown:w,onClear:()=>{d(r),a(r)},placeholder:"Enter an email and hit enter",value:c.map((e=>{return{label:t=e,value:t};var t}))}),p&&n.createElement(l.Text,{color:"error"},p))}},6323:(e,t,a)=>{a.d(t,{d:()=>U});var n=a(58168),l=(a(17333),a(41393),a(98992),a(54520),a(81454),a(62953),a(96540)),o=a(83199),i=a(71847),s=a(13871),r=a(78217),c=a(83179),d=a(4659),m=a(84707),u=a(3914),p=a(14994),g=a(77181),v=a(81048),h=a(15327),f=a(74618),b=a(45765),y=a(27287);const E=e=>{let{id:t,handleDelete:a}=e;const n=(0,g.c)(t,"email");return l.createElement(o.Flex,{justifyContent:"between",alignItems:"center"},l.createElement(o.Flex,{gap:4},l.createElement(o.Icon,{color:"text",name:"check"}),l.createElement(o.Text,null,n)),l.createElement(o.Button,{flavour:"borderless",icon:"trashcan",onClick:()=>a({email:n})}))};var w=a(71835),C=a(49032),I=a(46741),k=a(69756),x=a(36850),S=a(92155),_=a(50876),A=a(63314);const R=e=>{let{email:t}=e;return(0,C.B9)(t)},T=(0,S.A)(o.Button),B={header:"Invitations",text:"Invitations successfully sent!"},U=e=>{let{onClose:t,isSubmodal:a=!1}=e;const{id:C,name:S,slug:U}=(0,u.ap)(),D=(0,p.WW)(),[N,j]=(0,l.useState)(D),[F,L]=(0,l.useState)([]),[V,M]=(0,l.useState)([]),[Z,O]=(0,l.useState)(),{sendLog:z,isReady:P}=(0,_.A)(),[K,H,Y,G]=(0,g.g)(C),[,q]=(0,w.A)(),Q=(0,l.useCallback)((e=>{const{header:a,text:n}=e||B,l=(0,s.UI)({header:a,text:n,success:!0}),o=V.filter(R).map((e=>{let{email:t}=e;return t})).join(",");(0,i.H)("invite","invite-sent","".concat(Z,"::").concat(o,"::").concat(N.join(",")),"","","invite-sent"),r.A.success(l,{context:"manageInvitations"}),t(),P&&z({isSuccess:!0},!0)}),[z,P]),W=(0,I._s)(),J=e=>t=>{let{email:a}=t;e&&H(e),M(V.filter((e=>e.email!==a))),L(F.filter((e=>e.email!==a)))},$=(0,l.useCallback)((()=>{j([])}),[j]),X="member"===Z;return l.createElement(h.GO,{onClose:t,closeOnClickOutside:!1},l.createElement(A.Ay,{feature:"ManageInvitationsModal"},l.createElement(f.z,{onClose:t,isSubmodal:a,title:"Invite Users"}),l.createElement(b.U,null,"Invite users to\xa0",S),l.createElement(h.Yv,null,l.createElement(y.dE,null,"Send invitations to your team"),l.createElement(y.BZ,null,"TIP: You can send more invitations at once, separate each with a comma."),l.createElement(m.y,{invitations:V,setInvitations:M}),l.createElement("br",null),l.createElement(y.dE,null,"Rooms"),l.createElement(o.Flex,{alignItems:"center",justifyContent:"between",margin:[1,0,2]},l.createElement(o.TextSmall,null,"Choose one or more rooms you'd like to invite users to."),!!N.length&&l.createElement(o.Button,{onClick:$,padding:[0],flavour:"borderless","data-ga":"rooms-clear",label:"Clear",small:!0},"Clear")),l.createElement(o.Box,{"data-testid":"invite-selectRoom"},l.createElement(c.A,(0,n.A)({selectedValue:N,onChange:j},X?{formatOptions:e=>{let{name:t}=e;return{isDisabled:t===v.Q8}},filterValues:e=>{let{label:t}=e;return t===v.Q8}}:{}))),l.createElement("br",null),l.createElement(y.dE,null,"Role"),l.createElement(y.BZ,null,"Choose a role for invited user."," ",l.createElement(d.A,{href:x.S,target:"_blank",rel:"noopener noreferrer",Component:o.TextSmall},"Learn more")),l.createElement(o.Box,{"data-testid":"invite-selectRole"},l.createElement(k.A,{availableRoles:W,dataGA:"invite-to-space",dataTestId:"invite-selectRole",onChange:e=>{O(e.target.value)},value:Z})),l.createElement(y.fh,null,l.createElement(T,{label:"Send",onClick:async()=>{const e=V.filter(R).map((e=>({email:e.email,name:e.name,role:Z,roomIDs:N}))),t="".concat(window.location.origin,"/spaces/").concat(U,"/join-space");Y(e,t,{onSuccess:Q,onError:e=>{q(e),z({isFailure:!0,error:e.message},!0)}})},disabled:0===V.length||!Z,flavour:"hollow",isLoading:G,"data-ga":"manage-invitations-modal::click-send::modal-footer"})),l.createElement(o.H5,{margin:[2,0,0]},"Invitations awaiting response"),l.createElement(o.Flex,{column:!0},K.length>0?K.map((e=>l.createElement(E,{key:e,handleDelete:J(e),id:e}))):l.createElement(y.au,null,l.createElement("br",null),l.createElement(y.dE,null,"You haven't invited any users yet."))))))}},27287:(e,t,a)=>{a.d(t,{BZ:()=>r,au:()=>s,dE:()=>o,fh:()=>i});var n=a(8711),l=a(83199);const o=(0,n.default)(l.H5).withConfig({displayName:"styled__StyledH5",componentId:"sc-1kusjmi-0"})(["display:flex;align-items:center;"]),i=n.default.div.withConfig({displayName:"styled__FormRow",componentId:"sc-1kusjmi-1"})(["width:100%;display:flex;flex-flow:row no-wrap;justify-content:flex-end;margin-top:",";"],(0,l.getSizeBy)(2)),s=n.default.div.withConfig({displayName:"styled__StyledUserInvitationEmptyListItem",componentId:"sc-1kusjmi-2"})(["display:flex;flex-flow:column nowrap;align-items:center;"]),r=(0,n.default)(l.TextSmall).withConfig({displayName:"styled__StyledSecondaryText",componentId:"sc-1kusjmi-3"})(["margin:2px 0 8px;"])},69756:(e,t,a)=>{a.d(t,{A:()=>v});a(41393),a(81454);var n=a(96540),l=a(83199),o=a(8711),i=a(80158),s=a(97674),r=a(3914),c=a(4659),d=a(84976),m=a(46741),u=a(27994);const p={admin:"Users with this role can control Spaces, Rooms, Nodes, Users and Billing. They can also access any Room in the Space.",member:"Users with this role can create Rooms and invite other Members. They can only see the Rooms they belong to and all Nodes in the All Nodes room",manager:"Users with this role can manage Rooms and Users. They can access any Room in the Space.",troubleshooter:"Users with this role can use Netdata to troubleshoot, not manage entities. They can access any Room in the Space.",observer:"Users with this role can only view data in specific Rooms.",billing:"Users with this role can handle billing options and invoices."},g=(0,o.default)(l.Flex).withConfig({displayName:"rolePicker__PlanBadge",componentId:"sc-ypuqww-0"})(["pointer-events:auto;"]),v=e=>{let{availableRoles:t,dataGA:a,dataTestId:o,onChange:v,value:h}=e;const f=(0,r.ap)("plan"),b=(0,n.useMemo)((()=>(0,s.L_)(f).map((e=>({isChecked:e===h,isEnabled:t.includes(e),role:e}))).sort(((e,t)=>Number(t.isEnabled)-Number(e.isEnabled)))),[t,s.L_,f,h]),y=(0,m.JT)("billing:ReadAll"),{url:E}=(0,u.A)();return n.createElement(l.Flex,{column:!0,gap:2,"data-testid":"".concat(o,"-roleOptions")},b.map((e=>{let{isChecked:t,isEnabled:s,role:r}=e;const m=s?void 0:"medium",u="troubleshooter"===r?"pro":"business";return n.createElement(l.RadioButton,{key:r,checked:t,"data-ga":"".concat(a,"::select-role-").concat(r,"::global-view"),"data-testid":"".concat(o,"-").concat(r,"Option"),disabled:!s,onChange:v,value:r,alignItems:"start"},n.createElement(l.Flex,{column:!0},n.createElement(l.Flex,{gap:2,alignItems:"center"},n.createElement(l.Text,{opacity:m},(0,i.Zr)(r)),!s&&n.createElement(g,{background:"sideBarMini",border:{side:"all",color:"border"},cursor:"initial",padding:[1],round:!0},n.createElement(c.A,{align:"bottom",as:d.N_,boxProps:{as:l.Flex},color:"text",Component:l.TextMicro,content:"Upgrade your plan in order to use this role","data-ga":"".concat(a,"::click-plan-badge-").concat(u,"::global-view"),disabled:!y,hoverColor:"textFocus",showToolTip:!0,strong:!0,to:E},"Upgrade now!"))),n.createElement(l.TextSmall,{color:"textLite",opacity:m},p[r])))})))}},77181:(e,t,a)=>{a.d(t,{c:()=>f,g:()=>h});a(17333),a(9920),a(41393),a(98992),a(54520),a(3949),a(81454),a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215),a(62953);var n=a(96540),l=a(47444);const o=(0,l.Iz)({key:"invitation",default:{id:"",email:""}}),i=(0,l.Iz)({key:"invitationIds",default:[]});var s=a(26655);const r=e=>e.map((e=>{let{id:t,email:a}=e;return{id:t,email:a}})),c={member:1,admin:2,manager:3,troubleshooter:4,observer:5,billing:6},d=e=>e.map((e=>{let{role:t,...a}=e;if(void 0===c[t])throw new Error("role not found");return{role:c[t],...a}}));a(14905),a(8872);var m=a(78969);const u=e=>{let{data:t,invitations:a}=e;return t.reduce(((e,t,n)=>{var l,o;(o=t.errorMsgKey)&&o===m.vK&&(null!==(l=a[n])&&void 0!==l&&l.email&&e.push(a[n].email));return e}),[])},p=e=>e.length>1,g=(0,l.K0)({key:"spaceInvitationValue",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;const l=n(o(t));return a?l[a]:l}}}),v=(0,l.K0)({key:"spaceInvitationsIdsValue",get:e=>t=>{let{get:a}=t;return a(i(e))},set:e=>(t,a)=>{let{set:n}=t,{invitations:l,merge:s}=a;n(i(e),(e=>[...new Set([...e,...l.map((e=>{let{id:t}=e;return t}))])])),l.forEach((e=>{n(o(e.id),(t=>({...s&&t,...e})))}))}}),h=e=>{const t=(0,l.vc)(v(e)),[a,c]=(0,n.useState)(!1),m=(0,l.Zs)((t=>{let{snapshot:a,set:n}=t;return async()=>{if(!(await a.getLoadable(v(e))).length){const{data:t}=await(e=>s.A.get("/api/v2/spaces/".concat(e,"/invitations"),{transform:r}))(e);n(v(e),{invitations:t,merge:!0})}}}),[e]),g=(0,n.useCallback)((async(t,a,n)=>{let{onSuccess:l,onError:o}=n;try{c(!0);const{data:n}=await((e,t,a)=>s.A.post("/api/v1/spaces/".concat(e,"/invitations"),{redirectURI:a,requests:d(t)}))(e,t,a),i=u({data:n,invitations:t});if(i.length&&!p(t))return void(o&&o({message:"User has already joined that space!"}));if(i.length&&p(t)){if(i.length===t.length)return void(o&&o({message:"All of the selected users are already meembers of this space"}));if(l)return void l({header:"Invitations partially send",text:"Some of the selected users are already members of this space"})}l&&l()}catch(i){o&&o(i)}finally{c(!1)}}),[e]),h=(0,l.Zs)((t=>{let{snapshot:a,set:n,reset:l}=t;return async t=>{const r=await a.getPromise(v(e)),c=r.filter((e=>e===t));n(i(e),(e=>{const t=new Set(e);return c.forEach((e=>t.delete(e))),[...t]}));try{await((e,t)=>s.A.delete("/api/v1/spaces/".concat(e,"/invitations"),{params:{invitation_ids:t.join(",")}}))(e,[t]),c.forEach((e=>{l(o(e))}))}catch(d){n(v(e),{invitations:r,merge:!1})}}}),[e]);return(0,n.useEffect)((()=>{m()}),[e]),[t,h,g,a]},f=(e,t)=>(0,l.vc)(g({id:e,key:t}))}}]); \ No newline at end of file diff --git a/src/web/gui/v2/6331.89070793921be1288bb5.css b/src/web/gui/v2/6331.89070793921be1288bb5.css deleted file mode 100644 index e5d02668f..000000000 --- a/src/web/gui/v2/6331.89070793921be1288bb5.css +++ /dev/null @@ -1,2 +0,0 @@ -.default .dygraph-axis-label{color:#35414a}.dark .dygraph-axis-label{color:#fff}.dygraph-label-rotate-right{text-align:center;transform:rotate(-90deg);-webkit-transform:rotate(-90deg);-moz-transform:rotate(-90deg);-o-transform:rotate(-90deg);-ms-transform:rotate(-90deg)}.dygraph-annotation{position:absolute;z-index:10;overflow:hidden;border:1px solid} - diff --git a/src/web/gui/v2/6331.c91b5d104cdff1be3b80.chunk.js b/src/web/gui/v2/6331.c91b5d104cdff1be3b80.chunk.js deleted file mode 100644 index 8aa97382b..000000000 --- a/src/web/gui/v2/6331.c91b5d104cdff1be3b80.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="7b52f161-70b6-4332-8d7e-92ff54db7064",e._sentryDebugIdIdentifier="sentry-dbid-7b52f161-70b6-4332-8d7e-92ff54db7064")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[6331],{36196:(e,t,n)=>{n.d(t,{A:()=>b});var l=n(58168),a=n(96540),r=n(22332),o=n(10534),i=n(89380),c=n(25369),s=n(39360),u=n(95662),d=n(99891),m=n(49096),p=n(74487),g=n(64131);const h=(0,a.forwardRef)(((e,t)=>{let{width:n,height:r,...o}=e;return a.createElement(i.ChartWrapper,{width:n,height:r},a.createElement(g.N1,(0,l.A)({hasHeader:!1,hasFilters:!1,hasFooter:!1,width:n,height:r},o,{ref:t})))})),E=(0,o.default)(h,{tile:!0}),v={dygraph:g.Ay,easypiechart:d.Ay,gauge:s.Ay,number:m.Ay,groupBoxes:c.Ay,d3pie:u.Ay,bars:p.Ay},x=e=>{const t=(0,r.useChart)(),n=(0,r.useAttributeValue)("sparkline"),o=(0,r.useAttributeValue)("chartLibrary"),i=(0,a.useMemo)((()=>t?n?E:v[o]:null),[t,o,n]);return i?a.createElement(i,(0,l.A)({},e,{chart:t})):null},b=(0,r.withChartProvider)((0,a.memo)(x))},40267:(e,t,n)=>{n.d(t,{A:()=>r});var l=n(96540),a=n(83199);const r=e=>{let{flavour:t,icon:n,children:r}=e;return l.createElement(a.Pill,{icon:n,textProps:{textTransform:"capitalize"},flavour:t},r)}},11164:(e,t,n)=>{n.d(t,{A:()=>v});var l=n(96540),a=n(36196),r=n(63950),o=n.n(r),i=n(83199),c=n(57605),s=n(28738),u=n(3914),d=n(69765),m=n(80925),p=n(47731),g=n(52768);const h=e=>t=>"alert-modal::".concat(e.getAttribute("id"),"::").concat(t),E={width:"108px",height:"77px"},v=e=>{let{instance:t,context:n,isFormattedValueLoaded:r,nodeId:v,status:x,lastStatusChange:b,formattedLastStatusChangeValue:y,lastUpdated:f,spaceId:C,roomId:A,setChartSelected:w=o()}=e;const T=(0,u.vt)();C=C||T;const I=(0,d.ID)();A=A||I;const S=(0,p.J)(),F=(0,m.e)(),k=(0,c.A)();(0,l.useEffect)((()=>(k(),k)),[]);const D=(0,u.dg)(),O=(0,l.useMemo)((()=>{if(!r)return;const e=F.makeChart({attributes:{nodeId:v,contextScope:[n],selectedInstances:t&&v?["".concat(t,"@").concat(v)]:[],id:t,roomId:A,enabledResetRange:!1,overlays:{proceeded:{type:"proceeded"},alarm:{type:"alarm",status:x,value:y,when:b}},host:D?"".concat(window.envSettings.agentApiUrl,"/api/v2"):"".concat(window.envSettings.apiUrl,"/api/v3/spaces/").concat(C,"/rooms/").concat(A),nodesScope:[v],toolboxElements:[],sparkline:S,hasToolbox:!S},makeTrack:h});return F.getRoot().appendChild(e),e}),[t,r]);return(0,g.Vt)({lastUpdated:f,lastStatusChange:b},r),(0,l.useEffect)((()=>(w((e=>({...e,...O}))),()=>{O&&O.destroy(),w(null)})),[O]),l.createElement(i.Flex,{flex:!1,width:"100%",height:75},r&&O?l.createElement(a.A,{"data-chartid":t,chart:O,hasHeader:!S,hasFooter:!S}):l.createElement(s.A,{iconProps:E,title:"Loading chart..."}))}},33590:(e,t,n)=>{n.r(t),n.d(t,{default:()=>Se});var l=n(96540),a=n(83199),r=(n(62953),n(21875)),o=n(87659),i=n(69765),c=n(36196),s=n(80925),u=n(35243);const d=()=>{const e=(0,l.useRef)(),t=(0,r.vX)().getAttribute("id"),n=(0,i.ID)(),o=(0,u.N)(),d=(0,s.e)(),m=(e=>{const[t,n]=(0,l.useState)(!1),a=(0,l.useMemo)((()=>new IntersectionObserver((e=>{let[t]=e;return n(t.isIntersecting)}))),[e]);return(0,l.useEffect)((()=>(a.observe(e.current),()=>a.disconnect())),[]),t})(e),p=(0,l.useMemo)((()=>{const e=d.makeChart({attributes:{contextScope:[t],height:80,id:t,roomId:n,toolboxElements:[]}});return o.appendChild(e),e}),[o,n,t]);return l.createElement(a.Flex,{ref:e,width:"100%",height:"350px"},m?l.createElement(c.A,{margin:[0,0,2],chart:p,"data-chartid":t,"data-track":p.track("container")}):null)},m=(0,l.memo)(d);var p=n(64118),g=n(11164),h=n(52768),E=n(3914);const v=e=>{let{alertId:t}=e;const n=(0,E.vt)(),o=(0,i.ID)(),[,c]=(0,r.v7)(),{fullyLoaded:s=!1,units:u,lastStatusChangeValue:d,lastStatusChange:m,context:v,instance:x,nodeId:b,status:y,lastUpdated:f,value:C}=(0,p.JL)(t);(0,p.yk)(t,{spaceId:n,roomId:o});const A=(0,h.J4)(C,u),w=(0,h.J4)(d,u);return s?l.createElement(g.A,{id:t,context:v,instance:x,formattedLastValue:A,formattedLastStatusChangeValue:w,lastStatusChange:m,lastUpdated:f,isFormattedValueLoaded:s,nodeId:b,status:y,testid:"alertView",spaceId:n,roomId:o,setChartSelected:c}):l.createElement(a.Flex,{height:"200px"},"Loading chart...")},x=e=>{let{title:t="Chart preview"}=e;const[n,i]=(0,o.A)(!0),c=(0,r.vX)();return l.createElement(a.Flex,{column:!0,gap:2,padding:[2,0]},l.createElement(a.Flex,{gap:1,alignItems:"center",cursor:"pointer",onClick:i},l.createElement(a.Box,null,t),l.createElement(a.Icon,{name:"chevron_down",size:"small",color:"textLite",rotate:n?2:0})),l.createElement(a.Flex,{height:n?"auto":"0px",overflow:"hidden"},null!==c&&void 0!==c&&c.alertId?l.createElement(v,{alertId:c.alertId}):l.createElement(m,null)))};var b=n(58168);n(41393),n(81454);const y=e=>{let{title:t="",showAdvanced:n,children:r,...i}=e;const[c,s]=(0,o.A)(!1);return l.createElement(a.Flex,(0,b.A)({column:!0,gap:2,padding:[2,0]},i),l.createElement(a.Flex,{alignItems:"center",justifyContent:"between"},l.createElement(a.Flex,{alignItems:"center",gap:2},l.createElement(a.Icon,{name:"dot",width:"8px",color:"primary"}),l.createElement(a.Text,{strong:!0},t)),n&&l.createElement(a.Checkbox,{label:"Show advanced",checked:c,onChange:s})),l.createElement(a.Flex,{padding:[0,0,0,4]},l.Children.map(r,(e=>l.isValidElement(e)?l.cloneElement(e,{advanced:c}):e))))},f=e=>{let{isVertical:t,...n}=e;return l.createElement(a.Box,(0,b.A)({as:"hr",height:t?"100%":"1px",width:t?"1px":"100%",sx:{borderWidth:t?"0px 0px 0px 1px":"1px 0px 0px 0px",borderColor:"borderSecondary",borderStyle:"solid"}},n))};var C=n(83863);const A=()=>{const{detectionMethod:e,setDetectionMethod:t}=(0,r.aR)();return l.createElement(a.Flex,{column:!0,gap:2},l.createElement(a.Flex,null,l.createElement(a.ButtonGroup,{items:C.Hv,checked:e,onChange:t})),l.createElement(a.TextSmall,{color:"textLite"},"An alert is triggered whenever a metric crosses a threshold"))};n(17333),n(98992),n(54520);var w=n(63950),T=n.n(w),I=n(16093);const S=e=>l.createElement(I.c.Option,e,l.createElement(a.Flex,{gap:2},l.createElement(a.Checkbox,{checked:e.isSelected}),l.createElement(a.TextSmall,null,e.label))),F=e=>l.createElement(a.Box,{position:"relative"},l.createElement(a.Select,(0,b.A)({components:{Option:S}},e))),k=function(){let{label:e,value:t={},options:n=[],updateState:r=T(),placeholder:o,defaultOption:i}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{items:c,expression:s}=t,[u,d]=(0,l.useState)(t);(0,l.useEffect)((()=>{r(u)}),[u]);const m=(0,l.useCallback)((e=>{d((t=>({...t,items:e.filter((e=>{let{value:t}=e;return!!t}))})))}),[u,d]),p=(0,l.useCallback)((e=>{let{target:{value:t}}=e;d((e=>({...e,expression:t})))}),[u,d]);return l.createElement(a.Flex,{column:!0,gap:2},l.createElement(a.Flex,{column:!0,gap:1},e&&l.createElement(a.TextSmall,{strong:!0},e),l.createElement(F,{options:n,value:null!==c&&void 0!==c&&c.length?c.filter((e=>{let{value:t}=e;return t})):i,onChange:m,styles:{size:"tiny"},isDisabled:!1,isMulti:!0,closeMenuOnSelect:!1})),l.createElement(a.TextInput,{value:s||"",onChange:p,placeholder:o||"Type an expression",size:"tiny"}))},D=()=>{const[e]=(0,r.v7)(),t=e.getAttribute&&e.getAttribute("nodes")||[],n=Object.entries(t).map((e=>{let[t,{nm:n}]=e;return{label:n,value:t}})),a={label:"".concat(n.length," ").concat(n.length>1?"Nodes":"Node"),value:""},o=[a,...n],{metrics:{hosts:i},setHosts:c}=(0,r.BO)();return l.createElement(k,{value:i,options:o,updateState:c,defaultOption:a})},O=()=>{const[e]=(0,r.v7)(),t=e.getAttribute&&e.getAttribute("instances")||[],n=Object.entries(t).map((e=>{let[t,{nm:n}]=e;return{label:n,value:t}})),a={label:"".concat(n.length," ").concat(n.length>1?"Instances":"Instance"),value:""},o=[a,...n],{metrics:{charts:i},setCharts:c}=(0,r.BO)();return l.createElement(k,{value:i,options:o,updateState:c,defaultOption:a})},B=()=>{const[e]=(0,r.v7)(),t=e.getAttribute&&e.getAttribute("labels")||[],n=Object.values(t).map((e=>{let{id:t,vl:n=[]}=e;return{label:t,options:n.map((e=>({label:e.id,value:"".concat(t,":").concat(e.id)})))}})),a={label:"".concat(n.length," ").concat(n.length>1?"Labels":"Label"),value:""},o=[a,...n],{metrics:{chartLabels:i},setChartLabels:c}=(0,r.BO)();return l.createElement(k,{value:i,options:o,updateState:c,defaultOption:a})},L=()=>{const{metrics:{os:e},setOs:t}=(0,r.BO)(),n=(0,l.useMemo)((()=>null!==e&&void 0!==e&&e.length?e.filter((e=>{let{value:t}=e;return t})):C.Lf),[e]);return l.createElement(F,{options:C.R8,value:n,onChange:t,styles:{size:"tiny"},isDisabled:!1,isMulti:!0,closeMenuOnSelect:!1})};var V=n(32089);const z=e=>{let{children:t,...n}=e;return l.createElement(a.Flex,(0,b.A)({width:{max:70}},n),t)},N=()=>l.createElement(a.Flex,{gap:2},l.createElement(z,null,l.createElement(D,null)),l.createElement(V.A,{vertical:!0}),l.createElement(z,null,l.createElement(O,null)),l.createElement(V.A,{vertical:!0}),l.createElement(z,null,l.createElement(B,null)),l.createElement(V.A,{vertical:!0}),l.createElement(z,null,l.createElement(L,null))),U=e=>{let{advanced:t}=e;const[n]=(0,r.v7)(),o=n.getAttribute?n.getAttribute("id"):"",{setOn:i}=(0,r.BO)();return(0,l.useEffect)((()=>{i(o)}),[]),l.createElement(a.Flex,{column:!0,gap:2},l.createElement(a.Flex,{alignItems:"center",gap:2},l.createElement(a.Box,{width:"200px"},l.createElement(a.TextInput,{value:o,onChange:e=>i(e.target.value),disabled:!0,size:"tiny"})),l.createElement(a.Text,null,t?"from":"on"),!t&&l.createElement(a.TextInput,{value:"each system everywhere",onChange:()=>{},disabled:!0,size:"tiny"})),t&&l.createElement(N,null))},M=()=>{const{metrics:{lookup:e},setLookup:t}=(0,r.BO)(),{aggregation:n}=e||{};return l.createElement(a.Select,{options:C.Bo,value:n,onChange:e=>t({aggregation:e}),styles:{minWidth:"80px",size:"tiny"},isDisabled:!1})},R=()=>{const{metrics:{lookup:e},setLookup:t}=(0,r.BO)(),{denoter:n}=e||{};return l.createElement(a.Select,{options:C.Lm,value:n,onChange:e=>t({denoter:e}),styles:{minWidth:"80px",size:"tiny"},isDisabled:!1})},_=()=>{const[e]=(0,r.v7)(),t=(e.getAttribute&&e.getAttribute("dimensionIds")||[]).map((e=>({label:e,value:e}))),n={label:"All Dimensions",value:""},a=[n,...t],{metrics:{lookup:o},setLookup:i}=(0,r.BO)(),{dimensions:c}=o||{},s=(0,l.useMemo)((()=>null!==c&&void 0!==c&&c.length?c.filter((e=>{let{value:t}=e;return t})):n),[c,n.label]);return l.createElement(F,{options:a,value:s,onChange:e=>i({dimensions:e}),styles:{size:"tiny"},isDisabled:!1,isMulti:!0,closeMenuOnSelect:!1})},W=()=>{const{metrics:{lookup:e},setLookup:t}=(0,r.BO)(),{timePeriod:n}=e||{};return l.createElement(a.Select,{options:C.ZE,value:n,onChange:e=>t({timePeriod:e}),styles:{minWidth:"80px",size:"tiny"},isDisabled:!1})},j=()=>{const{metrics:{lookup:e},setLookup:t}=(0,r.BO)(),{options:n}=e||{};return l.createElement(F,{options:C.Ws,value:n,placeholder:"Options",onChange:e=>t({options:e}),styles:{size:"tiny"},isDisabled:!1,isMulti:!0,closeMenuOnSelect:!1})},P=()=>l.createElement(a.Flex,{flexWrap:!0,alignItems:"center",gap:2},l.createElement(a.Text,null,"Evaluate the"),l.createElement(M,null),l.createElement(a.Text,null,"of"),l.createElement(R,null),l.createElement(a.Text,null,"of"),l.createElement(_,null),l.createElement(a.Text,null,"over the last"),l.createElement(W,null),l.createElement(j,null)),J=()=>{const{metrics:{calc:e,units:t},setCalcActive:n,setCalcValue:o,setUnitsValue:i}=(0,r.BO)(),{active:c,value:s}=e||{};return l.createElement(a.Flex,{column:!0,gap:2},l.createElement(a.Toggle,{labelRight:"Add formula",colored:!0,checked:c,onChange:e=>n(e.target.checked)}),l.createElement(a.Flex,{gap:2},l.createElement(a.Flex,{column:!0,gap:1},l.createElement(a.Text,null,"Formula"),l.createElement(a.Box,{width:"500px"},l.createElement(a.TextInput,{value:s,placeholder:"$this",onChange:e=>o(e.target.value),size:"tiny",disabled:!c}))),l.createElement(a.Flex,{column:!0,gap:1},l.createElement(a.Text,null,"Units"),l.createElement(a.Box,{width:"80px"},l.createElement(a.TextInput,{value:t,onChange:e=>i(e.target.value),size:"tiny",disabled:!c})))))},G=e=>{let{advanced:t}=e;return l.createElement(a.Flex,{column:!0,gap:3},l.createElement(U,{advanced:t}),l.createElement(P,null),t&&l.createElement(J,null))},H=e=>{let{value:t,onChange:n}=e;return l.createElement(a.Select,{options:C.j8,value:t,onChange:n,styles:{size:"tiny"}})};var $=n(40267),Z=n(29217);const K={denoter:C.jV.thresholds.below,value:75},X=e=>{let{recovery:t,onRecoveryThresholdAdd:n,onValueChange:r}=e;const{value:o}=t||{};return t?l.createElement(a.Flex,{alignItems:"center",gap:2},l.createElement(a.Pill,{flavour:"success",hollow:!0,icon:"reload"},"Recovery Threshold"),l.createElement(a.TextInput,{onChange:e=>r(e.target.value),value:o,type:"number",min:0,size:"tiny",metaShrinked:!0,containerStyles:{width:"80px"}}),l.createElement(Z.A,{content:"Remove recovery threshold"},l.createElement(a.Button,{icon:"trashcan",flavour:"borderless",onClick:()=>n(null),iconColor:"textLite",iconSize:"small"}))):l.createElement(a.Button,{label:"Recovery Threshold",icon:"plus",flavour:"borderless",onClick:()=>n(K)})};var Y=n(80158);const q=()=>()=>{},Q=(ee=e=>{let{isEdit:t,alertingConditions:n,setThreshold:r,setRecoveryThreshold:o,type:i,advanced:c,isDisabled:s}=e;const{denoter:u,value:d,recovery:m}=n[i]||{},p=r(i,"denoter"),g=r(i,"value"),h=r(i,"recovery"),E=o(i,"denoter"),v=o(i,"value");return l.createElement(a.Flex,{alignItems:"center",gap:2,height:"34px"},l.createElement(a.Box,null,l.createElement($.A,{width:"80px",flavour:i,textSize:"small"},(0,Y.Zr)(i,!0))),t?l.createElement(a.TextInput,{onChange:e=>g(e.target.value),value:d,size:"tiny",containerStyles:{width:"360px"},metaShrinked:!0}):l.createElement(l.Fragment,null,c&&!s?l.createElement(H,{value:u,onChange:p,isDisabled:s}):l.createElement(a.TextInput,{value:null===u||void 0===u?void 0:u.label,size:"tiny",containerStyles:{width:"160px"},disabled:!0,metaShrinked:!0}),l.createElement(a.TextInput,{onChange:e=>g(e.target.value),value:d,type:"number",min:0,size:"tiny",metaShrinked:!0,containerStyles:{width:"80px"},disabled:s}),c&&!s&&l.createElement(X,{recovery:m,onRecoveryThresholdAdd:h,onDenoterChange:E,onValueChange:v,disabled:s})))},e=>{var t;const n=(0,r.op)(),{alertingConditions:o,setThreshold:i=q}=n||{},c=i(e.type,"isFormula"),s=i(e.type,"value"),u=!(null!==o&&void 0!==o&&null!==(t=o[e.type])&&void 0!==t&&t.isFormula),d=!u,m=(0,l.useCallback)((e=>{let{target:{value:t}}=e;return s(t)}),[s]);return!n.isEdit&&e.advanced?l.createElement(a.Flex,{column:!0,gap:2},l.createElement(a.Flex,{alignItems:"center",gap:2},l.createElement(a.RadioButton,{checked:u,onChange:()=>c(!1)}),l.createElement(ee,(0,b.A)({},n,e,{isDisabled:!u}))),l.createElement(a.Flex,{alignItems:"center",gap:2},l.createElement(a.RadioButton,{checked:d,onChange:()=>c(!0)}),l.createElement(a.Flex,{column:!0,gap:1},l.createElement(a.Text,null,"Formula"),l.createElement(a.Box,{width:"500px"},l.createElement(a.TextInput,{value:e.value,placeholder:"$this",onChange:m,size:"tiny",disabled:!d}))))):l.createElement(ee,(0,b.A)({},n,e))});var ee;const te=e=>{let{advanced:t}=e;return l.createElement(a.Flex,{column:!0,gap:2},l.createElement(Q,{type:"critical",advanced:t}),l.createElement(V.A,null),l.createElement(Q,{type:"warning",advanced:t}))},ne=e=>{let{label:t="",value:n,onValueChange:r=T(),unit:o,onUnitChange:i=T(),unitsOptions:c=C.WB,isDisabled:s}=e;return l.createElement(a.Flex,{column:!0,gap:2},l.createElement(a.Text,{color:s?"textLite":"text"},t),l.createElement(a.Flex,{gap:2},l.createElement(a.TextInput,{value:n,onChange:e=>r(e.target.value),type:"number",min:0,size:"tiny",containerStyles:{width:"60px"},disabled:s}),l.createElement(a.Select,{options:c,value:o,onChange:e=>i(e),styles:{size:"tiny"},isDisabled:s})))},le=e=>{let{advanced:t}=e;const{alertingConditions:{interval:{value:n,unit:a}},setIntervalValue:o,setIntervalUnit:i}=(0,r.op)();return l.createElement(ne,{label:"Check interval",value:n,onValueChange:o,unit:a,onUnitChange:i,isDisabled:!t})};var ae=n(8711),re=n(59303);const oe=(0,ae.default)(a.Box).withConfig({displayName:"delayNotification__Grid",componentId:"sc-4dhott-0"})(["display:grid;grid-template-columns:repeat(2,1fr);gap:8px 16px;"]),ie=()=>{const{alertingConditions:{delayNotification:e},setDelayNotificationActive:t,setDelayNotificationUpValue:n,setDelayNotificationUpUnit:o,setDelayNotificationDownValue:i,setDelayNotificationDownUnit:c,setDelayNotificationMaxDelayValue:s,setDelayNotificationMaxDelayUnit:u,setDelayNotificationMultiplier:d}=(0,r.op)(),{active:m,up:p,down:g,multiplier:h,max:E}=e,v=!m,x=(0,re.m8)(t),b=(0,re.L7)(d);return l.createElement(a.Flex,{column:!0,gap:2},l.createElement(a.Toggle,{labelRight:"Delay Notification",colored:!0,checked:m,onChange:x}),l.createElement(oe,null,l.createElement(ne,{label:"Initial Delay on Alert Severity Escalation",value:null===p||void 0===p?void 0:p.value,onValueChange:n,unit:null===p||void 0===p?void 0:p.unit,onUnitChange:o,isDisabled:v}),l.createElement(a.Flex,{column:!0,gap:2},l.createElement(a.Text,{color:v?"textLite":"text"},"Delay Multiplier on successive delays"),l.createElement(a.TextInput,{value:h,onChange:b,type:"number",min:0,step:.1,size:"tiny",containerStyles:{width:"60px"},disabled:v})),l.createElement(ne,{label:"Initial Delay on Alert Severity De-escalation",value:null===g||void 0===g?void 0:g.value,onValueChange:i,unit:null===g||void 0===g?void 0:g.unit,onUnitChange:c,isDisabled:v}),l.createElement(ne,{label:"Limit Maximum Delay",value:null===E||void 0===E?void 0:E.value,onValueChange:s,unit:null===E||void 0===E?void 0:E.unit,onUnitChange:u,isDisabled:v})))},ce=()=>{const{alertingConditions:{agentOptions:e},setAgentOptionsActive:t,setAgentOptionsRepeatNotification:n,setAgentOptionsWarningAlertsDurationValue:o,setAgentOptionsWarningAlertsDurationUnit:i,setAgentOptionsCriticalAlertsDurationValue:c,setAgentOptionsCriticalAlertsDurationUnit:s,setAgentOptionsSendToValue:u,setAgentOptionsSendToClearNotifications:d,setAgentOptionsExecScriptActive:m,setAgentOptionsExecScriptPath:p}=(0,r.op)(),{active:g,off:h,warningAlertsDuration:E,criticalAlertsDuration:v,sendTo:x,execScript:b}=e,y=!g,f=y||h,C=(0,re.m8)(t),A=(0,re.pU)(n,!0),w=(0,re.pU)(d,!0),T=(0,re.L7)(u),I=(0,re.pU)(m),S=(0,re.L7)(p);return l.createElement(a.Flex,{column:!0,gap:2},l.createElement(a.Toggle,{labelRight:"Agent Specific Options",colored:!0,checked:g,onChange:C}),l.createElement(a.Flex,{column:!0,gap:2,width:"100%"},l.createElement(a.Checkbox,{label:"Repeat Alert Notifications",checked:!h,onChange:A,disabled:y}),l.createElement(a.Flex,{gap:4},l.createElement(ne,{label:"Warning Alerts Duration isActive",value:null===E||void 0===E?void 0:E.value,onValueChange:o,unit:null===E||void 0===E?void 0:E.unit,onUnitChange:i,isDisabled:f}),l.createElement(ne,{label:"Critical Alerts Duration isActive",value:null===v||void 0===v?void 0:v.value,onValueChange:c,unit:null===v||void 0===v?void 0:v.unit,onUnitChange:s,isDisabled:f})),l.createElement(a.Flex,{column:!0,gap:1},l.createElement(a.Text,{color:y?"textLite":"text"},"Send to"),l.createElement(Z.A,{content:"Insert space separated roles (already defined on the agent)",align:"top"},l.createElement(a.Flex,{alignItems:"center",gap:2},l.createElement(a.TextInput,{value:null===x||void 0===x?void 0:x.value,placeholder:"Roles",onChange:T,disabled:y,size:"tiny"}),l.createElement(a.Checkbox,{label:"Don't send Clear notifications",checked:!(null!==x&&void 0!==x&&x.clearNotifications),onChange:w,disabled:y})))),l.createElement(a.Flex,{alignItems:"center",gap:4},l.createElement(a.Checkbox,{label:"Custom Exec Script",checked:null===b||void 0===b?void 0:b.active,onChange:I,disabled:y}),l.createElement(a.Box,{width:"500px"},l.createElement(a.TextInput,{value:null===b||void 0===b?void 0:b.path,placeholder:"/path/to/script",onChange:S,disabled:y,size:"tiny"})))))},se=e=>{let{advanced:t}=e;const{alertingConditions:{denoter:n},setMainDenoter:o}=(0,r.op)();return l.createElement(a.Flex,{column:!0,gap:4},l.createElement(a.Flex,{alignItems:"center",gap:2},l.createElement(a.Text,null,"Trigger when the evaluated value is"),l.createElement(H,{value:n,onChange:o}),l.createElement(a.Text,null,"the threshold")),l.createElement(te,{advanced:t}),l.createElement(le,{advanced:t}),t&&l.createElement(l.Fragment,null,l.createElement(ie,null),l.createElement(ce,null)))},ue=()=>{const{templateName:e,templateDescription:t,summary:n,setTemplateName:o,setTemplateDescription:i,setSummary:c}=(0,r.I8)();return l.createElement(a.Flex,{column:!0,gap:2,width:"100%"},l.createElement(a.Flex,{column:!0,gap:1,width:"100%"},l.createElement(a.Text,null,"Alert template Name"),l.createElement(a.TextInput,{value:e,onChange:e=>o(e.target.value),size:"tiny"})),l.createElement(a.Flex,{column:!0,gap:1,width:"100%"},l.createElement(a.Text,null,"Alert template Description"),l.createElement(a.TextInput,{value:t,onChange:e=>i(e.target.value),size:"tiny"}),l.createElement(a.TextSmall,{color:"textLite"},"Select an appropriate Template Description")),l.createElement(a.Flex,{column:!0,gap:1,width:"100%"},l.createElement(a.Text,null,"Alert summary"),l.createElement(a.TextInput,{value:n,onChange:e=>c(e.target.value),size:"tiny",placeholder:"Alert summary ${label:label_name} can be added"}),l.createElement(a.TextSmall,{color:"textLite"},"Add a custom title to receive Alert notifications")))},de=ae.default.div.withConfig({displayName:"styled__StyledTerminalCommand",componentId:"sc-jb3u29-0"})(["display:flex;position:relative;flex-direction:column;color:",";background:",";border:1px solid ",";border-radius:4px;cursor:pointer;overflow-wrap:anywhere;white-space:pre-wrap;padding:16px 16px 24px;width:100%;height:100%;font-family:monospace;font-weight:bold;letter-spacing:0.09px;line-height:18px;font-size:14px;word-break:break-word;overflow-y:auto;"],(0,a.getColor)("primary"),(0,a.getColor)("terminalGreen"),(0,a.getColor)("primary")),me=(0,ae.default)(a.Icon).withConfig({displayName:"styled__StyledIcon",componentId:"sc-jb3u29-1"})(["display:flex;align-self:flex-end;cursor:pointer;position:absolute;bottom:16px;right:16px;"]);var pe=n(18682);const ge=()=>{const[e]=(0,r.$h)(),t=(0,l.useMemo)((()=>(0,re.sS)(e)),[e]);return l.createElement(de,{onClick:(0,pe.C)(t,{text:"Config copied to your clipboard."})},t,l.createElement(me,{name:"copy",size:"small",color:"textLite",onClick:(0,pe.C)(t,{text:"Config copied to your clipboard."})}))};var he=n(54856),Ee=n(4659),ve=n(22292),xe=n(19673),be=n(42728),ye=n(37618),fe=n(68741);const Ce=()=>l.createElement(a.Flex,{column:!0,gap:2,width:"100%",height:"100%",alignItems:"center",justifyContent:"center"},l.createElement(a.Flex,{flexWrap:!0,gap:2,alignItems:"center",justifyContent:"center"},l.createElement(a.Icon,{color:"warning",name:"warning_triangle",height:"16px"}),l.createElement(a.TextBig,null,"This feature is only available to paid plans"),l.createElement(he.A,{Component:a.TextBig})),l.createElement(a.Text,null,"Take a quick look at"," ",l.createElement(Ee.A,{href:"https://learn.netdata.cloud/docs/live-demo",target:"_blank",rel:"noopener noreferrer",Component:a.Text},"one of the demo spaces"),"."),l.createElement(a.Text,null,l.createElement(Ee.A,{href:"https://learn.netdata.cloud/docs/alerting/creating-alerts-with-the-alerts-configuration-manager",target:"_blank",rel:"noopener noreferrer",Component:a.Text},"Checkout the docs")," ","for more information.")),Ae=e=>{let{isAnonymous:t}=e;return l.createElement(a.Flex,{column:!0,gap:2,width:"100%",height:"100%",alignItems:"center",justifyContent:"center",padding:[0,8]},t?l.createElement(l.Fragment,null,l.createElement(a.TextBig,{textAlign:"center"},"Please sign in to unlock this feature"),l.createElement(fe.A,null)):l.createElement(a.TextBig,{textAlign:"center"},"Please go to a space that is on Business plan to unlock this feature."))},we=e=>{let{canViewConfiguration:t}=e;return t?l.createElement(a.Flex,{width:"100%",column:!0,gap:3},l.createElement(a.Text,null,"Configuration"),l.createElement(ge,null)):l.createElement(Ce,null)},Te=(e=>t=>{const{value:n}=(0,xe.JN)(),{slug:a}=n||{},r=!(0,be.Kj)(a);return l.createElement(e,(0,b.A)({canViewConfiguration:r},t))})(we),Ie=e=>{const t=(0,ve.uW)("isAnonymous"),n=(0,E.vt)();return(0,ye.ES)(n)?l.createElement(Ae,{isAnonymous:t}):t?l.createElement(we,(0,b.A)({canViewConfiguration:!0},e)):l.createElement(Te,e)},Se=()=>l.createElement(a.Flex,{gap:3,height:"100%"},l.createElement(a.Flex,{width:"70%",height:"100%",column:!0,gap:2,padding:[2,2,2,0],overflow:{vertical:"auto"}},l.createElement(x,null),l.createElement(f,null),l.createElement(y,{title:"Select the Detection Method"},l.createElement(A,null)),l.createElement(f,null),l.createElement(y,{title:"Select and define the metric to alert on",showAdvanced:!0},l.createElement(G,null)),l.createElement(f,null),l.createElement(y,{title:"Define alerting conditions",showAdvanced:!0},l.createElement(se,null)),l.createElement(f,null),l.createElement(y,{title:"Description"},l.createElement(ue,null))),l.createElement(f,{isVertical:!0}),l.createElement(a.Flex,{width:"30%",padding:[2,0]},l.createElement(Ie,null)))},54856:(e,t,n)=>{n.d(t,{A:()=>u});var l=n(58168),a=n(96540),r=n(84976),o=n(83199),i=n(4659),c=n(46741),s=n(27994);const u=function(){let{containerProps:e={},...t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,c.JT)("billing:ReadAll"),{url:u}=(0,s.A)();return u?a.createElement(o.Flex,(0,l.A)({background:"sideBarMini",border:{side:"all",color:"border"},padding:[1,2],round:!0},e),a.createElement(i.A,(0,l.A)({align:"bottom",as:r.N_,boxProps:{as:o.Flex},color:"text",Component:o.TextMicro,content:n?"Upgrade your plan in order to use this feature":"You have no permissions to manage billing",disabled:!n,hoverColor:"textFocus",showToolTip:!0,strong:!0,to:u},t),"Upgrade now!")):null}},35243:(e,t,n)=>{n.d(t,{G:()=>i,N:()=>o});n(3064),n(98992),n(72577);var l=n(47444),a=n(80925);const r=(0,l.eU)({key:"currentChartsContainerKey",default:null}),o=()=>{const e=(0,a.e)(),t=(0,l.vc)(r);return e&&e.getRoot().getChildren().find((e=>e.match({id:t})))},i=()=>(0,l.lZ)(r)}}]); \ No newline at end of file diff --git a/src/web/gui/v2/6384.0fad56b0bc902f186c98.chunk.js b/src/web/gui/v2/6384.0fad56b0bc902f186c98.chunk.js deleted file mode 100644 index 1d34f7927..000000000 --- a/src/web/gui/v2/6384.0fad56b0bc902f186c98.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="598e5ba4-f52e-4f8d-8c4b-05b81a830989",e._sentryDebugIdIdentifier="sentry-dbid-598e5ba4-f52e-4f8d-8c4b-05b81a830989")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[6384],{29176:(e,t,n)=>{n.d(t,{m:()=>m});var r=n(35840);function a(e){return(0,r.w)(e,Date.now())}var i=n(25733),o=n(2940),s=n(49858),u=n(43924),c=n(2642),f=n(96519),d=n(82695),l=n(40215);function D(e,t,n){const r=(0,d.q)(),a=n?.locale??r.locale??f.c,D=(0,i.z)(e,t);if(isNaN(D))throw new RangeError("Invalid time value");const m=Object.assign({},n,{addSuffix:n?.addSuffix,comparison:D});let N,g;D>0?(N=(0,c.a)(t),g=(0,c.a)(e)):(N=(0,c.a)(e),g=(0,c.a)(t));const w=(0,u.O)(g,N),b=((0,l.G)(g)-(0,l.G)(N))/1e3,h=Math.round((w-b)/60);let T;if(h<2)return n?.includeSeconds?w<5?a.formatDistance("lessThanXSeconds",5,m):w<10?a.formatDistance("lessThanXSeconds",10,m):w<20?a.formatDistance("lessThanXSeconds",20,m):w<40?a.formatDistance("halfAMinute",0,m):w<60?a.formatDistance("lessThanXMinutes",1,m):a.formatDistance("xMinutes",1,m):0===h?a.formatDistance("lessThanXMinutes",1,m):a.formatDistance("xMinutes",h,m);if(h<45)return a.formatDistance("xMinutes",h,m);if(h<90)return a.formatDistance("aboutXHours",1,m);if(h{n.d(t,{R:()=>a});var r=n(2642);function a(e){return+(0,r.a)(e){n.d(t,{H:()=>a});var r=n(2940);function a(e,t){const n=t?.additionalDigits??2,a=function(e){const t={},n=e.split(i.dateTimeDelimiter);let r;if(n.length>2)return t;/:/.test(n[0])?r=n[0]:(t.date=n[0],r=n[1],i.timeZoneDelimiter.test(t.date)&&(t.date=e.split(i.timeZoneDelimiter)[0],r=e.substr(t.date.length,e.length)));if(r){const e=i.timezone.exec(r);e?(t.time=r.replace(e[1],""),t.timezone=e[1]):t.time=r}return t}(e);let D;if(a.date){const e=function(e,t){const n=new RegExp("^(?:(\\d{4}|[+-]\\d{"+(4+t)+"})|(\\d{2}|[+-]\\d{"+(2+t)+"})$)"),r=e.match(n);if(!r)return{year:NaN,restDateString:""};const a=r[1]?parseInt(r[1]):null,i=r[2]?parseInt(r[2]):null;return{year:null===i?a:100*i,restDateString:e.slice((r[1]||r[2]).length)}}(a.date,n);D=function(e,t){if(null===t)return new Date(NaN);const n=e.match(o);if(!n)return new Date(NaN);const r=!!n[4],a=c(n[1]),i=c(n[2])-1,s=c(n[3]),u=c(n[4]),f=c(n[5])-1;if(r)return function(e,t,n){return t>=1&&t<=53&&n>=0&&n<=6}(0,u,f)?function(e,t,n){const r=new Date(0);r.setUTCFullYear(e,0,4);const a=r.getUTCDay()||7,i=7*(t-1)+n+1-a;return r.setUTCDate(r.getUTCDate()+i),r}(t,u,f):new Date(NaN);{const e=new Date(0);return function(e,t,n){return t>=0&&t<=11&&n>=1&&n<=(d[t]||(l(e)?29:28))}(t,i,s)&&function(e,t){return t>=1&&t<=(l(e)?366:365)}(t,a)?(e.setUTCFullYear(t,i,Math.max(a,s)),e):new Date(NaN)}}(e.restDateString,e.year)}if(!D||isNaN(D.getTime()))return new Date(NaN);const m=D.getTime();let N,g=0;if(a.time&&(g=function(e){const t=e.match(s);if(!t)return NaN;const n=f(t[1]),a=f(t[2]),i=f(t[3]);if(!function(e,t,n){if(24===e)return 0===t&&0===n;return n>=0&&n<60&&t>=0&&t<60&&e>=0&&e<25}(n,a,i))return NaN;return n*r.s0+a*r.Cg+1e3*i}(a.time),isNaN(g)))return new Date(NaN);if(!a.timezone){const e=new Date(m+g),t=new Date(0);return t.setFullYear(e.getUTCFullYear(),e.getUTCMonth(),e.getUTCDate()),t.setHours(e.getUTCHours(),e.getUTCMinutes(),e.getUTCSeconds(),e.getUTCMilliseconds()),t}return N=function(e){if("Z"===e)return 0;const t=e.match(u);if(!t)return 0;const n="+"===t[1]?-1:1,a=parseInt(t[2]),i=t[3]&&parseInt(t[3])||0;if(!function(e,t){return t>=0&&t<=59}(0,i))return NaN;return n*(a*r.s0+i*r.Cg)}(a.timezone),isNaN(N)?new Date(NaN):new Date(m+g+N)}const i={dateTimeDelimiter:/[T ]/,timeZoneDelimiter:/[Z ]/i,timezone:/([Z+-].*)$/},o=/^-?(?:(\d{3})|(\d{2})(?:-?(\d{2}))?|W(\d{2})(?:-?(\d{1}))?|)$/,s=/^(\d{2}(?:[.,]\d*)?)(?::?(\d{2}(?:[.,]\d*)?))?(?::?(\d{2}(?:[.,]\d*)?))?$/,u=/^([+-])(\d{2})(?::?(\d{2}))?$/;function c(e){return e?parseInt(e):1}function f(e){return e&&parseFloat(e.replace(",","."))||0}const d=[31,null,31,30,31,30,31,31,30,31,30,31];function l(e){return e%400===0||e%4===0&&e%100!==0}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/6469.47926fa38028dc7d0d41.chunk.js b/src/web/gui/v2/6469.47926fa38028dc7d0d41.chunk.js deleted file mode 100644 index f9ef10cf9..000000000 --- a/src/web/gui/v2/6469.47926fa38028dc7d0d41.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="ab16de72-dc9f-47f6-ac2a-d39c4e215ee3",e._sentryDebugIdIdentifier="sentry-dbid-ab16de72-dc9f-47f6-ac2a-d39c4e215ee3")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[6469],{62727:(e,t,n)=>{n.r(t),n.d(t,{default:()=>P});n(17333),n(41393),n(98992),n(54520),n(81454),n(62953);var r=n(96540),o=n(45467),l=n(11128),a=n(47762),i=n(45123),c=n(87659),s=n(47731),d=n(82230),u=n(24398),m=n(57605),f=n(11261),p=n(38413),h=n(8711),g=n(83199),v=n(49389),E=n(63950),x=n.n(E),y=n(78062),b=n(50876);const w=e=>{let{startAddingNodes:t=x(),close:n=x(),children:o}=e;const{sendLog:l}=(0,b.A)(),a=(0,r.useCallback)((()=>{n(),t(),l({feature:"NodesFilterAddNodes"})}),[n,t,l]);return r.createElement(g.Flex,{column:!0,gap:1,padding:[2,2,1],zIndex:1,border:{side:"top",color:"border"}},r.createElement(g.Flex,{gap:2,justifyContent:"end"},r.createElement(y.A,{icon:"nodes_hollow",flavour:"hollow",onClick:a,small:!0,label:"Add Nodes"})),o)};var A=n(99292),C=n(94177),k=n(90535),I=n(22292),F=n(73865),T=n(6586),N=n(12602);const S=e=>{let{onAnchorClick:t}=e;const{loaded:n,hasLimitations:o,maxNodes:l}=(0,F.A)();return n&&o?r.createElement(g.Flex,{gap:2,padding:[3]},r.createElement(g.Icon,{name:"warning_triangle",size:"small",color:"warning"}),r.createElement(g.Text,null,"Your plan is limited to ",l," nodes."," ",r.createElement(N.A,{onClick:t},r.createElement(g.Text,{color:"primary"},"Upgrade for no limitations"))," ","or"," ",r.createElement(T.A,{onClick:t},r.createElement(g.Text,{color:"primary"},"review your Space active Nodes")),".")):null},R=e=>{let{onAnchorClick:t=x()}=e;return(0,I.uW)("isAnonymous")?null:r.createElement(S,{onAnchorClick:t})},_=["filters"],D=(0,h.default)(g.Drop).attrs({backdrop:!0,align:{top:"bottom",right:"right"},animation:!0,background:"modalBackground",column:!0,margin:[2,0,0],padding:[2,0],round:4,width:200}).withConfig({displayName:"dropdown__Dropdown",componentId:"sc-1birv08-0"})([""]),O=(0,r.memo)((e=>{let{target:t,onClose:n,onAddNodes:o}=e;const a=(0,l.w7)(),i=(0,m.A)();return(0,r.useEffect)((()=>{i()}),[]),r.createElement(D,{target:t,onEsc:n,onClickOutside:n},r.createElement(g.Flex,{column:!0,height:{max:"60vh"}},r.createElement(g.Flex,{flex:"1",overflow:"hidden"},r.createElement(g.Flex,{column:!0,flex:!0},r.createElement(g.Flex,{alignItems:"center",padding:[3],border:{side:"bottom",color:"border"},height:12,flex:!1},r.createElement(g.Text,{strong:!0},"Visualizing data in space from ",a.length||"all"," ",(0,v.su)(a.length))),r.createElement(R,{onAnchorClick:n}),r.createElement(C.A,{testIdPrefix:"node-ids",param:"selectedNodeIds",groupProps:{collapsible:!1,background:"modalBackground",padding:[3],flex:"1",overflow:"hidden"},height:"auto",width:"100%"})),r.createElement(A.Ay,{basis:60,baseWidth:60,flex:!1,title:"Dynamic filters",help:"Filter your nodes for this page. Count next to each item refer to the number of nodes that will be selected and used across the page.",includedTabs:_,loaded:!0,onClose:n,groupProps:{background:"modalBackground"},background:"modalBackground"})),r.createElement(w,{startAddingNodes:o,close:n},r.createElement(k.A,null))))})),P=(0,r.memo)((()=>{const e=(0,s.J)(),[t,n,,h]=(0,c.A)(!1),g=(0,r.useRef)(),v=(0,l.w7)(),E=(0,a.BU)().length,x=(0,a.Ig)().length,y=(0,a.GE)().length,b=(0,a.no)().length,w=(0,a.BU)(v.length?v:void 0).length,A=(0,a.Ig)(v.length?v:void 0).length,C=(0,a.GE)(v.length?v:void 0).length,k=(0,a.no)(v.length?v:void 0).length,I=(0,r.useMemo)((()=>({live:{selected:w,total:E,statusText:"Live"},stale:{selected:A,total:x,statusText:"Stale"},offline:{selected:C,total:y,statusText:"Offline"},unseen:{selected:k,total:b,statusText:"Unseen"}})),[E,x,y,b,w,A,C]),F=(0,m.A)();(0,o.A)((()=>{F(t)}),[t]);const[T,,N,S]=(0,c.A)();return r.createElement(r.Fragment,null,r.createElement(i.A,{ref:g,testid:"globalFilter-nodes",icon:"nodes",onClick:n,round:1,padding:e?[1]:[2],width:"auto",cursor:"pointer",selected:t},r.createElement(f.A,{"data-testid":"nodesIndicator",alignItems:"center",justifyContent:"end",gap:2},Object.keys(I).filter((e=>I[e].total)).map((t=>r.createElement(d.A,{key:t,statusText:I[t].statusText,total:I[t].total,selected:I[t].selected,statusColor:u.P[t],isScreenSmall:e}))))),g.current&&t&&r.createElement(O,{target:g.current,onClose:h,onAddNodes:N}),T&&r.createElement(p.A,{onClose:S}))}))},78062:(e,t,n)=>{n.d(t,{A:()=>u});var r=n(58168),o=n(96540),l=n(83199),a=n(28146),i=n(69765),c=n(3914),s=n(53285);var d;const u=(d=l.Button,e=>{const t=(0,i.XA)(),n=(0,c.dg)();return o.createElement(s.A,{permission:"node:Create"},(i=>{const c=o.createElement(d,(0,r.A)({disabled:!i},e));return i?c:o.createElement(a.A,{content:n?"You are viewing your local node, connect to cloud and connect multiple nodes to view at once":"Only admin users can add nodes to ".concat(t.name),"data-testid":"addNodeTooltipContainer"},o.createElement(l.Box,null,c))}))})},90535:(e,t,n)=>{n.d(t,{A:()=>g});n(41393),n(81454),n(62953);var r=n(96540),o=n(83199),l=n(47762),a=n(11128);const i=e=>{let{children:t}=e;return r.createElement(o.Flex,{"data-testid":"selected-nodes-container",alignItems:"center",width:"100%"},r.createElement(o.Flex,{alignItems:"center"},r.createElement(o.TextSmall,{whiteSpace:"nowrap","data-testid":"selected-nodes-container-message",color:"primary"},"Selected filters:")),r.createElement(o.Flex,{overflow:"hidden",padding:[1],gap:1,alignItems:"center",flexWrap:!0},t))},c=(0,r.memo)(i);var s=n(4659);const d=e=>{let{onRemove:t}=e;return r.createElement(o.Box,{"data-testid":"remove-button",as:o.Icon,name:"close_circle",onClick:t,cursor:"pointer",width:3,height:3,color:"textLite"})},u=(0,r.forwardRef)(((e,t)=>{let{removeFilter:n,group:l,id:a,value:i,isAnchorDisabled:c,onClick:u}=e;const[m,f]=i.split("|"),p=f?"".concat(m," > ").concat(f):m;return r.createElement(o.Flex,{width:"100%",ref:t,"data-testid":"selected-node-item-".concat(a)},r.createElement(o.Pill,{flavour:"neutral",hollow:!0},r.createElement(o.Flex,{position:"relative",gap:2,alignItems:"center"},u?r.createElement(s.A,{Component:o.TextSmall,disabled:c,onClick:u,cursor:"pointer",color:"text",hoverColor:"primary",visitedColor:"accent",disabledColor:"textLite"},p):r.createElement(o.TextSmall,null,p),r.createElement(d,{onRemove:()=>n({param:l,removedValue:a}),id:a}))))})),m=(0,r.memo)(u),f={selectedNodeIds:"Node",nodeStatuses:"Status",nodeLabels:"Host label",nodeCapabilities:"Capability",nodeVersions:"Version"},p=(0,r.forwardRef)(((e,t)=>{let{group:n,values:l,hasUnion:a,union:i="AND",removeFilter:c,Component:s=m}=e;const d=f[n];return null!==l&&void 0!==l&&l.length?r.createElement(r.Fragment,{key:"filter"},a&&r.createElement(o.TextSmall,{strong:!0},i),r.createElement(o.Pill,{flavour:"neutral",hollow:!0,gap:1,TextComponent:o.Flex,textProps:{alignItems:"center",gap:1},flexWrap:!0},r.createElement(o.TextNano,{strong:!0},d,":"),l.map(((e,t)=>r.createElement(r.Fragment,{key:e},t>0&&r.createElement(o.TextSmall,{strong:!0},"OR"),r.createElement(s,{removeFilter:c,id:e,value:e,group:n})))))):null})),h=(0,r.forwardRef)(((e,t)=>{let{removeFilter:n,value:o}=e;const a=(0,l.xY)(o,"name"),i=(0,l.xY)(o,"isOffline"),c=(0,l.d3)(o);return r.createElement(m,{ref:t,removeFilter:n,group:"selectedNodeIds",id:o,value:a,isAnchorDisabled:i,onClick:c})})),g=(0,r.memo)((e=>{let{flavour:t}=e;const[[n,l=[]],i]=(0,a._e)({extraKey:t,merge:!1});return n.length||l.length?r.createElement(o.Flex,{width:"100%",overflow:"hidden"},r.createElement(c,null,n.map(((e,t)=>{let[n,o]=e;return r.createElement(p,{key:n,group:n,values:o,hasUnion:t>0,removeFilter:i})})),r.createElement(p,{group:"selectedNodeIds",values:l,hasUnion:!!n.length&&!!l.length,union:"OR",removeFilter:i,Component:h}))):r.createElement("div",null)}))},24074:(e,t,n)=>{var r=n(69565),o=n(28551),l=n(2360),a=n(55966),i=n(56279),c=n(91181),s=n(97751),d=n(53982),u=n(62529),m=s("Promise"),f="AsyncFromSyncIterator",p=c.set,h=c.getterFor(f),g=function(e,t,n){var r=e.done;m.resolve(e.value).then((function(e){t(u(e,r))}),n)},v=function(e){e.type=f,p(this,e)};v.prototype=i(l(d),{next:function(){var e=h(this);return new m((function(t,n){var l=o(r(e.next,e.iterator));g(l,t,n)}))},return:function(){var e=h(this).iterator;return new m((function(t,n){var l=a(e,"return");if(void 0===l)return t(u(void 0,!0));var i=o(r(l,e));g(i,t,n)}))}}),e.exports=v},84428:(e,t,n)=>{var r=n(78227)("iterator"),o=!1;try{var l=0,a={next:function(){return{done:!!l++}},return:function(){o=!0}};a[r]=function(){return this},Array.from(a,(function(){throw 2}))}catch(i){}e.exports=function(e,t){try{if(!t&&!o)return!1}catch(i){return!1}var n=!1;try{var l={};l[r]=function(){return{next:function(){return{done:n=!0}}}},e(l)}catch(i){}return n}},87290:(e,t,n)=>{var r=n(50516),o=n(19088);e.exports=!r&&!o&&"object"==typeof window&&"object"==typeof document},50516:e=>{e.exports="object"==typeof Deno&&Deno&&"object"==typeof Deno.version},19088:(e,t,n)=>{var r=n(24475),o=n(44576);e.exports="process"===o(r.process)},50133:(e,t,n)=>{var r=n(69565),o=n(94901),l=n(28551),a=n(1767),i=n(50851),c=n(55966),s=n(78227),d=n(24074),u=s("asyncIterator");e.exports=function(e){var t,n=l(e),s=!0,m=c(n,u);return o(m)||(m=i(n),s=!1),void 0!==m?t=r(m,n):(t=n,s=!0),l(t),a(s?t:new d(a(t)))}},48646:(e,t,n)=>{var r=n(69565),o=n(28551),l=n(1767),a=n(50851);e.exports=function(e,t){t&&"string"===typeof e||o(e);var n=a(e);return l(o(void 0!==n?r(n,e):e))}},10916:(e,t,n)=>{var r=n(24475),o=n(80550),l=n(94901),a=n(92796),i=n(33706),c=n(78227),s=n(87290),d=n(50516),u=n(96395),m=n(77388),f=o&&o.prototype,p=c("species"),h=!1,g=l(r.PromiseRejectionEvent),v=a("Promise",(function(){var e=i(o),t=e!==String(o);if(!t&&66===m)return!0;if(u&&(!f.catch||!f.finally))return!0;if(!m||m<51||!/native code/.test(e)){var n=new o((function(e){e(1)})),r=function(e){e((function(){}),(function(){}))};if((n.constructor={})[p]=r,!(h=n.then((function(){}))instanceof r))return!0}return!t&&(s||d)&&!g}));e.exports={CONSTRUCTOR:v,REJECTION_EVENT:g,SUBCLASSING:h}},90537:(e,t,n)=>{var r=n(80550),o=n(84428),l=n(10916).CONSTRUCTOR;e.exports=l||!o((function(e){r.all(e).then(void 0,(function(){}))}))},30237:(e,t,n)=>{n(6469)("flatMap")},96167:(e,t,n)=>{var r=n(46518),o=n(69565),l=n(79306),a=n(36043),i=n(1103),c=n(72652);r({target:"Promise",stat:!0,forced:n(90537)},{allSettled:function(e){var t=this,n=a.f(t),r=n.resolve,s=n.reject,d=i((function(){var n=l(t.resolve),a=[],i=0,s=1;c(e,(function(e){var l=i++,c=!1;s++,o(n,t,e).then((function(e){c||(c=!0,a[l]={status:"fulfilled",value:e},--s||r(a))}),(function(e){c||(c=!0,a[l]={status:"rejected",reason:e},--s||r(a))}))})),--s||r(a)}));return d.error&&s(d.value),n.promise}})},32679:(e,t,n)=>{var r=n(46518),o=n(69565),l=n(79306),a=n(28551),i=n(20034),c=n(1767),s=n(92059),d=n(62529),u=n(50133),m=n(20772),f=n(96395),p=s((function(e){var t=this,n=t.iterator,r=t.mapper;return new e((function(l,c){var s=function(e){t.done=!0,c(e)},f=function(e){m(n,s,e,s)},p=function(){try{e.resolve(a(o(t.next,n))).then((function(n){try{if(a(n).done)t.done=!0,l(d(void 0,!0));else{var o=n.value;try{var c=r(o,t.counter++),m=function(e){try{t.inner=u(e),h()}catch(n){f(n)}};i(c)?e.resolve(c).then(m,f):m(c)}catch(p){f(p)}}}catch(g){s(g)}}),s)}catch(c){s(c)}},h=function(){var n=t.inner;if(n)try{e.resolve(a(o(n.next,n.iterator))).then((function(e){try{a(e).done?(t.inner=null,p()):l(d(e.value,!1))}catch(n){f(n)}}),f)}catch(r){f(r)}else p()};h()}))}));r({target:"AsyncIterator",proto:!0,real:!0,forced:f},{flatMap:function(e){return a(this),l(e),new p(c(this),{mapper:e,inner:null})}})},30670:(e,t,n)=>{var r=n(46518),o=n(69565),l=n(79306),a=n(28551),i=n(1767),c=n(48646),s=n(19462),d=n(9539),u=n(96395),m=s((function(){for(var e,t,n=this.iterator,r=this.mapper;;){if(t=this.inner)try{if(!(e=a(o(t.next,t.iterator))).done)return e.value;this.inner=null}catch(l){d(n,"throw",l)}if(e=a(o(this.next,n)),this.done=!!e.done)return;try{this.inner=c(r(e.value,this.counter++),!1)}catch(l){d(n,"throw",l)}}}));r({target:"Iterator",proto:!0,real:!0,forced:u},{flatMap:function(e){return a(this),l(e),new m(i(this),{mapper:e,inner:null})}})}}]); \ No newline at end of file diff --git a/src/web/gui/v2/6469.89070793921be1288bb5.css b/src/web/gui/v2/6469.89070793921be1288bb5.css deleted file mode 100644 index e5d02668f..000000000 --- a/src/web/gui/v2/6469.89070793921be1288bb5.css +++ /dev/null @@ -1,2 +0,0 @@ -.default .dygraph-axis-label{color:#35414a}.dark .dygraph-axis-label{color:#fff}.dygraph-label-rotate-right{text-align:center;transform:rotate(-90deg);-webkit-transform:rotate(-90deg);-moz-transform:rotate(-90deg);-o-transform:rotate(-90deg);-ms-transform:rotate(-90deg)}.dygraph-annotation{position:absolute;z-index:10;overflow:hidden;border:1px solid} - diff --git a/src/web/gui/v2/6661.72f782bd78fea8c2d836.chunk.js b/src/web/gui/v2/6661.72f782bd78fea8c2d836.chunk.js deleted file mode 100644 index 38b38de3b..000000000 --- a/src/web/gui/v2/6661.72f782bd78fea8c2d836.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},d=(new Error).stack;d&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[d]="bea2cd4d-f017-4db5-b9e3-546372542647",e._sentryDebugIdIdentifier="sentry-dbid-bea2cd4d-f017-4db5-b9e3-546372542647")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[6661],{46661:(e,d,t)=>{t.r(d),t.d(d,{default:()=>f});var n=t(96540),o=t(47767),a=t(83199),l=t(68938);const r={width:600,height:600},f=e=>{let{email:d}=e;const t=(0,o.Zp)(),f=(0,n.useCallback)((()=>t(window.location.pathname,{replace:!0})),[]);return n.createElement(a.Layer,{backdrop:!0,backdropProps:{backdropBlur:"3px"},"data-track":"typeform-signup-form"},n.createElement(l.Widget,{id:localStorage.getItem("typeformId")||"jC4PHA0q",style:r,medium:"demo-test",onSubmit:f,hidden:{email:d}}))}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/6760.370b9780120c145da28f.chunk.js b/src/web/gui/v2/6760.370b9780120c145da28f.chunk.js deleted file mode 100644 index 78f07daf6..000000000 --- a/src/web/gui/v2/6760.370b9780120c145da28f.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="849965ac-d76d-4654-9eee-fc709635d008",e._sentryDebugIdIdentifier="sentry-dbid-849965ac-d76d-4654-9eee-fc709635d008")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[6760],{96760:(e,t,n)=>{n.r(t),n.d(t,{default:()=>I});var l=n(96540),a=n(8711),d=n(63950),o=n.n(d),r=n(81638),c=n(64473),i=n(83199),m=n(84976),s=n(47762);const f=e=>{let{id:t}=e;const n=(0,s.Zl)(t),a=(0,s.xY)(t,"name");return l.createElement(m.N_,{to:n},l.createElement(i.Flex,{alignItems:"center",gap:1},l.createElement(i.Icon,{name:"node_hollow",color:"primary",size:"small"}),l.createElement(i.Flex,{width:{max:40},overflow:"hidden"},l.createElement(i.Text,{color:"primary",whiteSpace:"nowrap"},a)),l.createElement(i.Icon,{name:"goToNode",color:"primary",size:"small"})))};var E=n(25825),u=n(89879),b=n(60072),h=n(80618),p=n(58010),w=n(17240);const g=e=>{let{id:t}=e;const n=(0,s.xY)(t);return n.loaded?l.createElement(i.Flex,{column:!0,gap:1,overflow:{horizontal:"hidden",vertical:"auto"},padding:[2],width:"100%"},l.createElement(E.A,{node:n}),l.createElement(u.A,{id:t,flavour:"sidebar"}),l.createElement(b.A,{node:n}),l.createElement(h.A,{node:n}),l.createElement(p.A,{node:n}),l.createElement(w.A,{node:n})):null};var y=n(20687);const v=(0,a.default)(i.Tabs).withConfig({displayName:"contents__StyledTabs",componentId:"sc-11zjb7h-0"})(["overflow:hidden;height:100%;"]),_=e=>t=>l.createElement(i.Flex,{column:!0,overflow:"hidden",height:"calc(100% - 80px)",position:"relative",gap:3,padding:[1,0]},l.createElement(e,t)),x=_(g),A=_(y.A),I=e=>{let{id:t,onClose:n=o()}=e;return l.createElement(r.Ay,{width:90,isOpen:!0,header:l.createElement(c.A,{title:l.createElement(f,{id:t}),onClick:n})},l.createElement(i.Flex,{"data-testid":"home-sidebar-tabs",column:!0,overflow:"hidden",height:"100%"},l.createElement(v,{selected:0},l.createElement(i.Tab,{label:l.createElement(i.Text,null,"Info"),small:!0},l.createElement(x,{id:t})),l.createElement(i.Tab,{label:l.createElement(i.Text,null,"Alerts"),small:!0},l.createElement(A,{nodeIds:[t]})))))}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/683.02c173493ef257c210fa.chunk.js b/src/web/gui/v2/683.02c173493ef257c210fa.chunk.js deleted file mode 100644 index 1f9470c2e..000000000 --- a/src/web/gui/v2/683.02c173493ef257c210fa.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var o="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},e=(new Error).stack;e&&(o._sentryDebugIds=o._sentryDebugIds||{},o._sentryDebugIds[e]="ee64041b-8fce-4c14-8c7a-93b3842283aa",o._sentryDebugIdIdentifier="sentry-dbid-ee64041b-8fce-4c14-8c7a-93b3842283aa")}catch(o){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[683],{683:(o,e,t)=>{t.r(e),t.d(e,{Notification:()=>l.Eg,Warning:()=>f,createErrorNotification:()=>l.gi,createNotification:()=>l.UI,default:()=>g,showDashboardCreatedNotification:()=>c.jE,showInvalidDashboardSlugNotification:()=>c.$j,showJoiningSpaceNotification:()=>c.Fw,showNodesNotification:()=>c.AM,showRoomCreationNotification:()=>c.ZM,showRoomsInSpaceNotification:()=>c.mw,showRoomsNotification:()=>c.r0,showSpaceNotification:()=>c.X7,showUsersInRoomNotification:()=>c.R9,showUsersInSpaceNotification:()=>c.sb,toast:()=>d.A});var a=t(58168),n=t(96540),r=t(8711),i=t(83199),s=t(99571),d=t(78217),c=t(24198),l=t(13871);const f=o=>{let{title:e,children:t,...r}=o;return n.createElement(i.Flex,(0,a.A)({column:!0,gap:2,background:"warningBackground",backgroundOpacity:.7,round:!0,padding:[4]},r),e&&n.createElement(i.H3,null,e),t)},g=(0,r.default)((o=>{let{className:e,...t}=o;return n.createElement(i.Box,{className:e},n.createElement(s.N9,(0,a.A)({},t,{closeButton:!1})))})).withConfig({displayName:"notifications__Container",componentId:"sc-8t8ne-0"})([".Toastify__toast-container{position:fixed;width:unset;min-width:400px;max-width:500px;z-index:50;color:",";}.Toastify__toast{padding:0;padding-top:5px;background:",";border:1px solid ",";}.Toastify__toast--error{background:",";border:1px solid ",";}.Toastify__toast--warning{background:",";border:1px solid ",";}.Toastify__toast--success{background:",";border:1px solid ",";}.Toastify__toast-icon{display:none;}.Toastify__toast-body{}.Toastify__progress-bar{bottom:unset;top:0;}.Toastify__progress-bar--success{background-color:",";}.Toastify__progress-bar--error{background-color:",";}"],(0,i.getColor)(["neutral","limedSpruce"]),(0,i.getColor)("elementBackground"),(0,i.getColor)("text"),(0,i.getColor)(["red","lavender"]),(0,i.getColor)("error"),(0,i.getColor)("bright"),(0,i.getColor)("warning"),(0,i.getColor)(["green","frostee"]),(0,i.getColor)("success"),(0,i.getColor)("success"),(0,i.getColor)("error"))}}]); \ No newline at end of file diff --git a/src/web/gui/v2/683.cc9fa5f3bdc0bf3ab2fc.css b/src/web/gui/v2/683.cc9fa5f3bdc0bf3ab2fc.css deleted file mode 100644 index c936073e2..000000000 --- a/src/web/gui/v2/683.cc9fa5f3bdc0bf3ab2fc.css +++ /dev/null @@ -1,10 +0,0 @@ -:root{--toastify-color-light: #fff;--toastify-color-dark: #121212;--toastify-color-info: #3498db;--toastify-color-success: #07bc0c;--toastify-color-warning: #f1c40f;--toastify-color-error: #e74c3c;--toastify-color-transparent: rgba(255, 255, 255, 0.7);--toastify-icon-color-info: var(--toastify-color-info);--toastify-icon-color-success: var(--toastify-color-success);--toastify-icon-color-warning: var(--toastify-color-warning);--toastify-icon-color-error: var(--toastify-color-error);--toastify-toast-width: 320px;--toastify-toast-background: #fff;--toastify-toast-min-height: 64px;--toastify-toast-max-height: 800px;--toastify-font-family: sans-serif;--toastify-z-index: 9999;--toastify-text-color-light: #757575;--toastify-text-color-dark: #fff;--toastify-text-color-info: #fff;--toastify-text-color-success: #fff;--toastify-text-color-warning: #fff;--toastify-text-color-error: #fff;--toastify-spinner-color: #616161;--toastify-spinner-color-empty-area: #e0e0e0;--toastify-color-progress-light: linear-gradient( - to right, - #4cd964, - #5ac8fa, - #007aff, - #34aadc, - #5856d6, - #ff2d55 - );--toastify-color-progress-dark: #bb86fc;--toastify-color-progress-info: var(--toastify-color-info);--toastify-color-progress-success: var(--toastify-color-success);--toastify-color-progress-warning: var(--toastify-color-warning);--toastify-color-progress-error: var(--toastify-color-error)}.Toastify__toast-container{z-index:var(--toastify-z-index);-webkit-transform:translate3d(0, 0, var(--toastify-z-index));position:fixed;padding:4px;width:var(--toastify-toast-width);box-sizing:border-box;color:#fff}.Toastify__toast-container--top-left{top:1em;left:1em}.Toastify__toast-container--top-center{top:1em;left:50%;transform:translateX(-50%)}.Toastify__toast-container--top-right{top:1em;right:1em}.Toastify__toast-container--bottom-left{bottom:1em;left:1em}.Toastify__toast-container--bottom-center{bottom:1em;left:50%;transform:translateX(-50%)}.Toastify__toast-container--bottom-right{bottom:1em;right:1em}@media only screen and (max-width: 480px){.Toastify__toast-container{width:100vw;padding:0;left:0;margin:0}.Toastify__toast-container--top-left,.Toastify__toast-container--top-center,.Toastify__toast-container--top-right{top:0;transform:translateX(0)}.Toastify__toast-container--bottom-left,.Toastify__toast-container--bottom-center,.Toastify__toast-container--bottom-right{bottom:0;transform:translateX(0)}.Toastify__toast-container--rtl{right:0;left:initial}}.Toastify__toast{position:relative;min-height:var(--toastify-toast-min-height);box-sizing:border-box;margin-bottom:1rem;padding:8px;border-radius:4px;box-shadow:0 1px 10px 0 rgba(0,0,0,0.1),0 2px 15px 0 rgba(0,0,0,0.05);display:-ms-flexbox;display:flex;-ms-flex-pack:justify;justify-content:space-between;max-height:var(--toastify-toast-max-height);overflow:hidden;font-family:var(--toastify-font-family);cursor:default;direction:ltr;z-index:0}.Toastify__toast--rtl{direction:rtl}.Toastify__toast--close-on-click{cursor:pointer}.Toastify__toast-body{margin:auto 0;-ms-flex:1 1 auto;flex:1 1 auto;padding:6px;display:-ms-flexbox;display:flex;-ms-flex-align:center;align-items:center}.Toastify__toast-body>div:last-child{word-break:break-word;-ms-flex:1;flex:1}.Toastify__toast-icon{-webkit-margin-end:10px;margin-inline-end:10px;width:20px;-ms-flex-negative:0;flex-shrink:0;display:-ms-flexbox;display:flex}.Toastify--animate{animation-fill-mode:both;animation-duration:0.7s}.Toastify--animate-icon{animation-fill-mode:both;animation-duration:0.3s}@media only screen and (max-width: 480px){.Toastify__toast{margin-bottom:0;border-radius:0}}.Toastify__toast-theme--dark{background:var(--toastify-color-dark);color:var(--toastify-text-color-dark)}.Toastify__toast-theme--light{background:var(--toastify-color-light);color:var(--toastify-text-color-light)}.Toastify__toast-theme--colored.Toastify__toast--default{background:var(--toastify-color-light);color:var(--toastify-text-color-light)}.Toastify__toast-theme--colored.Toastify__toast--info{color:var(--toastify-text-color-info);background:var(--toastify-color-info)}.Toastify__toast-theme--colored.Toastify__toast--success{color:var(--toastify-text-color-success);background:var(--toastify-color-success)}.Toastify__toast-theme--colored.Toastify__toast--warning{color:var(--toastify-text-color-warning);background:var(--toastify-color-warning)}.Toastify__toast-theme--colored.Toastify__toast--error{color:var(--toastify-text-color-error);background:var(--toastify-color-error)}.Toastify__progress-bar-theme--light{background:var(--toastify-color-progress-light)}.Toastify__progress-bar-theme--dark{background:var(--toastify-color-progress-dark)}.Toastify__progress-bar--info{background:var(--toastify-color-progress-info)}.Toastify__progress-bar--success{background:var(--toastify-color-progress-success)}.Toastify__progress-bar--warning{background:var(--toastify-color-progress-warning)}.Toastify__progress-bar--error{background:var(--toastify-color-progress-error)}.Toastify__progress-bar-theme--colored.Toastify__progress-bar--info,.Toastify__progress-bar-theme--colored.Toastify__progress-bar--success,.Toastify__progress-bar-theme--colored.Toastify__progress-bar--warning,.Toastify__progress-bar-theme--colored.Toastify__progress-bar--error{background:var(--toastify-color-transparent)}.Toastify__close-button{color:#fff;background:transparent;outline:none;border:none;padding:0;cursor:pointer;opacity:0.7;transition:0.3s ease;-ms-flex-item-align:start;align-self:flex-start}.Toastify__close-button--light{color:#000;opacity:0.3}.Toastify__close-button>svg{fill:currentColor;height:16px;width:14px}.Toastify__close-button:hover,.Toastify__close-button:focus{opacity:1}@keyframes Toastify__trackProgress{0%{transform:scaleX(1)}100%{transform:scaleX(0)}}.Toastify__progress-bar{position:absolute;bottom:0;left:0;width:100%;height:5px;z-index:var(--toastify-z-index);opacity:0.7;transform-origin:left}.Toastify__progress-bar--animated{animation:Toastify__trackProgress linear 1 forwards}.Toastify__progress-bar--controlled{transition:transform 0.2s}.Toastify__progress-bar--rtl{right:0;left:initial;transform-origin:right}.Toastify__spinner{width:20px;height:20px;box-sizing:border-box;border:2px solid;border-radius:100%;border-color:var(--toastify-spinner-color-empty-area);border-right-color:var(--toastify-spinner-color);animation:Toastify__spin 0.65s linear infinite}@keyframes Toastify__bounceInRight{from,60%,75%,90%,to{animation-timing-function:cubic-bezier(0.215, 0.61, 0.355, 1)}from{opacity:0;transform:translate3d(3000px, 0, 0)}60%{opacity:1;transform:translate3d(-25px, 0, 0)}75%{transform:translate3d(10px, 0, 0)}90%{transform:translate3d(-5px, 0, 0)}to{transform:none}}@keyframes Toastify__bounceOutRight{20%{opacity:1;transform:translate3d(-20px, 0, 0)}to{opacity:0;transform:translate3d(2000px, 0, 0)}}@keyframes Toastify__bounceInLeft{from,60%,75%,90%,to{animation-timing-function:cubic-bezier(0.215, 0.61, 0.355, 1)}0%{opacity:0;transform:translate3d(-3000px, 0, 0)}60%{opacity:1;transform:translate3d(25px, 0, 0)}75%{transform:translate3d(-10px, 0, 0)}90%{transform:translate3d(5px, 0, 0)}to{transform:none}}@keyframes Toastify__bounceOutLeft{20%{opacity:1;transform:translate3d(20px, 0, 0)}to{opacity:0;transform:translate3d(-2000px, 0, 0)}}@keyframes Toastify__bounceInUp{from,60%,75%,90%,to{animation-timing-function:cubic-bezier(0.215, 0.61, 0.355, 1)}from{opacity:0;transform:translate3d(0, 3000px, 0)}60%{opacity:1;transform:translate3d(0, -20px, 0)}75%{transform:translate3d(0, 10px, 0)}90%{transform:translate3d(0, -5px, 0)}to{transform:translate3d(0, 0, 0)}}@keyframes Toastify__bounceOutUp{20%{transform:translate3d(0, -10px, 0)}40%,45%{opacity:1;transform:translate3d(0, 20px, 0)}to{opacity:0;transform:translate3d(0, -2000px, 0)}}@keyframes Toastify__bounceInDown{from,60%,75%,90%,to{animation-timing-function:cubic-bezier(0.215, 0.61, 0.355, 1)}0%{opacity:0;transform:translate3d(0, -3000px, 0)}60%{opacity:1;transform:translate3d(0, 25px, 0)}75%{transform:translate3d(0, -10px, 0)}90%{transform:translate3d(0, 5px, 0)}to{transform:none}}@keyframes Toastify__bounceOutDown{20%{transform:translate3d(0, 10px, 0)}40%,45%{opacity:1;transform:translate3d(0, -20px, 0)}to{opacity:0;transform:translate3d(0, 2000px, 0)}}.Toastify__bounce-enter--top-left,.Toastify__bounce-enter--bottom-left{animation-name:Toastify__bounceInLeft}.Toastify__bounce-enter--top-right,.Toastify__bounce-enter--bottom-right{animation-name:Toastify__bounceInRight}.Toastify__bounce-enter--top-center{animation-name:Toastify__bounceInDown}.Toastify__bounce-enter--bottom-center{animation-name:Toastify__bounceInUp}.Toastify__bounce-exit--top-left,.Toastify__bounce-exit--bottom-left{animation-name:Toastify__bounceOutLeft}.Toastify__bounce-exit--top-right,.Toastify__bounce-exit--bottom-right{animation-name:Toastify__bounceOutRight}.Toastify__bounce-exit--top-center{animation-name:Toastify__bounceOutUp}.Toastify__bounce-exit--bottom-center{animation-name:Toastify__bounceOutDown}@keyframes Toastify__zoomIn{from{opacity:0;transform:scale3d(0.3, 0.3, 0.3)}50%{opacity:1}}@keyframes Toastify__zoomOut{from{opacity:1}50%{opacity:0;transform:scale3d(0.3, 0.3, 0.3)}to{opacity:0}}.Toastify__zoom-enter{animation-name:Toastify__zoomIn}.Toastify__zoom-exit{animation-name:Toastify__zoomOut}@keyframes Toastify__flipIn{from{transform:perspective(400px) rotate3d(1, 0, 0, 90deg);animation-timing-function:ease-in;opacity:0}40%{transform:perspective(400px) rotate3d(1, 0, 0, -20deg);animation-timing-function:ease-in}60%{transform:perspective(400px) rotate3d(1, 0, 0, 10deg);opacity:1}80%{transform:perspective(400px) rotate3d(1, 0, 0, -5deg)}to{transform:perspective(400px)}}@keyframes Toastify__flipOut{from{transform:perspective(400px)}30%{transform:perspective(400px) rotate3d(1, 0, 0, -20deg);opacity:1}to{transform:perspective(400px) rotate3d(1, 0, 0, 90deg);opacity:0}}.Toastify__flip-enter{animation-name:Toastify__flipIn}.Toastify__flip-exit{animation-name:Toastify__flipOut}@keyframes Toastify__slideInRight{from{transform:translate3d(110%, 0, 0);visibility:visible}to{transform:translate3d(0, 0, 0)}}@keyframes Toastify__slideInLeft{from{transform:translate3d(-110%, 0, 0);visibility:visible}to{transform:translate3d(0, 0, 0)}}@keyframes Toastify__slideInUp{from{transform:translate3d(0, 110%, 0);visibility:visible}to{transform:translate3d(0, 0, 0)}}@keyframes Toastify__slideInDown{from{transform:translate3d(0, -110%, 0);visibility:visible}to{transform:translate3d(0, 0, 0)}}@keyframes Toastify__slideOutRight{from{transform:translate3d(0, 0, 0)}to{visibility:hidden;transform:translate3d(110%, 0, 0)}}@keyframes Toastify__slideOutLeft{from{transform:translate3d(0, 0, 0)}to{visibility:hidden;transform:translate3d(-110%, 0, 0)}}@keyframes Toastify__slideOutDown{from{transform:translate3d(0, 0, 0)}to{visibility:hidden;transform:translate3d(0, 500px, 0)}}@keyframes Toastify__slideOutUp{from{transform:translate3d(0, 0, 0)}to{visibility:hidden;transform:translate3d(0, -500px, 0)}}.Toastify__slide-enter--top-left,.Toastify__slide-enter--bottom-left{animation-name:Toastify__slideInLeft}.Toastify__slide-enter--top-right,.Toastify__slide-enter--bottom-right{animation-name:Toastify__slideInRight}.Toastify__slide-enter--top-center{animation-name:Toastify__slideInDown}.Toastify__slide-enter--bottom-center{animation-name:Toastify__slideInUp}.Toastify__slide-exit--top-left,.Toastify__slide-exit--bottom-left{animation-name:Toastify__slideOutLeft}.Toastify__slide-exit--top-right,.Toastify__slide-exit--bottom-right{animation-name:Toastify__slideOutRight}.Toastify__slide-exit--top-center{animation-name:Toastify__slideOutUp}.Toastify__slide-exit--bottom-center{animation-name:Toastify__slideOutDown}@keyframes Toastify__spin{from{transform:rotate(0deg)}to{transform:rotate(360deg)}} - diff --git a/src/web/gui/v2/6944.ab3e70c9ac0f05013b5f.chunk.js b/src/web/gui/v2/6944.ab3e70c9ac0f05013b5f.chunk.js deleted file mode 100644 index d4fefb2ea..000000000 --- a/src/web/gui/v2/6944.ab3e70c9ac0f05013b5f.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="efcc7859-d165-4f9b-8c26-b09700c78f65",e._sentryDebugIdIdentifier="sentry-dbid-efcc7859-d165-4f9b-8c26-b09700c78f65")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[6944],{6944:(e,t,a)=>{a.r(t),a.d(t,{default:()=>O});a(62953);var n=a(96540),l=a(83199),o=a(47731),r=a(8711);const s=(0,r.default)(l.ModalContent).attrs((e=>{let{isMobile:t}=e;return{width:t?{base:"95vw"}:{min:120,max:160}}})).withConfig({displayName:"styled__AwsModalContent",componentId:"sc-cgxyx4-0"})([""]),c=(0,r.default)(l.Flex).attrs((e=>({border:{side:"all",color:"primary"},color:"primary",padding:[2],justifyContent:"center",round:!0,cursor:"pointer",opacity:e.disabled?"0.4":"1",...e}))).withConfig({displayName:"styled__StyledSpaceSelector",componentId:"sc-cgxyx4-1"})(["pointer-events:",";&:hover{background-color:",";color:#00cd51;text-decoration:none;}"],(e=>{let{disabled:t}=e;return t?"none":"auto"}),(0,l.getColor)("secondaryHighlight"));var i=a(58168);a(17333),a(98992),a(54520);const d=e=>{let{state:t,setState:a}=e;const{firstName:o,lastName:r,company:s}=t||{},[c,i]=(0,l.useTouchedState)({}),[d,u]=(0,l.useTouchedState)({}),[m,p]=(0,l.useTouchedState)({}),b=(0,n.useCallback)((e=>t=>{a((a=>({...a,[e]:t.target.value})))}),[a]);return n.createElement(l.Flex,{column:!0,gap:2},n.createElement(l.TextInput,{label:"First name",value:o||"",placeholder:"Enter your first name",onChange:b("firstName"),size:"small",touched:c,onBlur:i,error:c&&!o?"Provide your first name":""}),n.createElement(l.TextInput,{label:"Last name",value:r||"",placeholder:"Enter your last name",onChange:b("lastName"),size:"small",touched:d,onBlur:u,error:d&&!r?"Provide your last name":""}),n.createElement(l.TextInput,{label:"Company",value:s||"",placeholder:"Enter your company name",onChange:b("company"),size:"small",touched:m,onBlur:p,error:m&&!s?"Provide your company name":""}))};a(41393),a(81454);var u=a(63950),m=a.n(u),p=a(92155),b=a(80158),f=a(47130);const g=(0,p.A)((0,f.A)(c)),y=e=>{let{id:t,slug:a,name:l,isEligible:o,reason:r,isDisabled:s,onSpaceSelection:c=m()}=e;const i=(0,n.useCallback)((()=>{o&&c({spaceId:t,spaceSlug:a})}),[t,a,o,c]),d=s?"Please fill in the required fields":!o&&r?(0,b.Zr)(r):null;return n.createElement(g,{feature:"LabraSpaceSelect",payload:{spaceId:t},disabled:!o||s,tooltip:d,tooltipProps:{align:"bottom"},onClick:i},l)},E=function(){let{state:e,setState:t,eligible:a=[],nonEligible:o=[]}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{firstName:r,lastName:s,company:c}=e||{},d=!r||!s||!c,u=(0,n.useCallback)((e=>{let{spaceId:a}=e;t((e=>({...e,spaceId:a})))}),[t]);return n.createElement(l.Flex,{column:!0,gap:4},n.createElement(l.Text,null,"Select a space to connect to your AWS subscription."),n.createElement(l.Flex,{column:!0,gap:2,justifyContent:"center"},a.map((e=>n.createElement(y,(0,i.A)({key:e.id,onSpaceSelection:u,isDisabled:d},e)))),o.map((e=>n.createElement(y,(0,i.A)({key:e.id},e))))))};var h=a(4659);const v={ErrSubscriptionAlreadyAttached:()=>n.createElement(l.Text,{lineHeight:1.5},"This subscription is already attached to a Netdata space / account."),FETimeout:()=>n.createElement(l.Text,{lineHeight:1.5},"The subscription could not be validated. Please try again in sometime by refreshing the page. If it does not work, please reach out to"," ",n.createElement(h.A,{href:"mailto:support@netdata.cloud",as:"a",cursor:"pointer",textDecoration:"underline",color:"main"},"support@netdata.cloud")," ","and retry setting up the account from the AWS Marketplace"),default:()=>n.createElement(l.Text,null,"Something went wrong.")},S=e=>{let{errorKey:t,error:a}=e;const o=a?()=>n.createElement(l.Text,null,a):v[t]||v.default;return n.createElement(l.Flex,{alignItems:"center",justifyContent:"center",height:30,padding:[0,8]},n.createElement(o,null))};var C=a(28738);const I=function(){let{state:e,setState:t,isLoading:a,timeLeft:o,spaces:r=[]}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const s=r.filter((e=>{let{isEligible:t}=e;return t})),c=r.filter((e=>{let{isEligible:t}=e;return!t}));return s.length?a?n.createElement(C.A,{height:50,padding:[4],title:"Validating your Subscription. Please Wait.",body:"This may take upto 5 minutes."}):n.createElement(l.Flex,{column:!0,gap:8},n.createElement(d,{state:e,setState:t}),n.createElement(E,{state:e,setState:t,eligible:s,nonEligible:c})):n.createElement(S,{error:"No eligible spaces found."})},x=e=>{let{state:t,setState:a,isLoading:l,timeLeft:o,spacesLoadable:r={},error:s}=e;const{loaded:c,value:d={},hasError:u,error:m}=r;return c?u?n.createElement(S,{errorKey:m}):s?n.createElement(S,{errorKey:s.errorKey}):n.createElement(I,(0,i.A)({state:t,setState:a,isLoading:l,timeLeft:o},d)):n.createElement(C.A,{height:50,padding:[4],title:"Loading spaces..."})};var k=a(3914);const w=e=>{let{state:t,forceHiddenConfirmation:a,onConfirm:o,onDecline:r}=e;const{spaceId:s}=t||{},c=(0,k.ns)(s,"name");return s&&!a?n.createElement(l.ConfirmationDialog,{confirmLabel:"Yes, select space",declineLabel:"No","data-testid":"subscribeSpaceToAWSDialog",handleConfirm:o,handleDecline:r,hideIcon:!0,isConfirmPositive:!0,message:n.createElement(l.Flex,{gap:2,column:!0},n.createElement(l.TextBig,null,"You are about to tie your AWS subscription to ",n.createElement(l.TextBig,{strong:!0},c)," space."),n.createElement(l.TextBig,null,"Are you sure you want to select this space?")),title:"Select space"}):null};a(9391);var A=a(47767),L=a(84976),T=a(47444),_=a(26655),N=a(49286);var P=a(71835);const F=(0,T.K0)({key:"labraSpacesSelector",get:e=>{let{url:t}=e;return()=>(e=>e?_.A.get(e,{transform:e=>(0,N.bn)(e)}):Promise.resolve({}))(t)}}),D=["labra_subscription_id","aws_customer_id","aws_product_id"],M=()=>{const e=(0,A.Zp)(),t=(0,k.ap)("slug"),[a,l]=(0,L.ok)();return(0,n.useCallback)((n=>{a&&(D.forEach((e=>a.delete(e))),l(a)),n?window.location.href="/spaces/".concat(t):e("/spaces/".concat(t))}),[a,l,e,t])},B=e=>{let{subscriptionId:t}=e;const[a,l]=(0,P.A)(),o=M();return(0,n.useCallback)((e=>{let{firstName:n,lastName:r,company:s,spaceId:c}=e;return(e=>{let{firstName:t,lastName:a,company:n,spaceId:l,subscriptionId:o}=e;return _.A.patch("/api/v2/spaces/".concat(l,"/billing/labra/subscription"),{first_name:t,last_name:a,company:n,subscription_id:o})})({firstName:n,lastName:r,company:s,spaceId:c,subscriptionId:t}).then((()=>{a({header:"Success",text:"Successfully subscribed space"}),o(!0)})).catch(l).finally((()=>Promise.resolve()))}),[t,o])},H=(0,p.A)(l.Button),K=e=>{let{isLoading:t}=e;const a=M();return n.createElement(l.ModalFooter,null,n.createElement(H,{feature:"LabraSpacesSelectorClose",label:"Close",flavour:"hollow",onClick:a,disabled:t}))};var R=a(19673),W=a(99090),j=a(36712);const z=function(){let{enabled:e,onReceive:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=(0,k.vt)();(0,W.A)((()=>({enabled:e,polling:!0,fetch:()=>(0,j.PE)(a),onReceive:t,force:!0,pollingOptions:{pollingInterval:1e3}})),[a,t])},Y=function(){let{state:e,hasConfirmed:t,onReceive:a,onProgress:l,onStopPolling:o,waitFor:r=6e4}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{spaceId:s}=e||{};z({enabled:t,onReceive:a}),(0,n.useEffect)((()=>{let e=!0,a=null,n=r;return s&&t&&(a=setInterval((()=>{n-=1e3,null===l||void 0===l||l({timeLeft:n}),0==n&&(null===o||void 0===o||o(),clearInterval(a),a=null)}),1e3)),()=>{e=!1,a&&(clearInterval(a),a=null)}}),[s,t])};var J=a(87659),Z=a(63314);const q=3e5,O=e=>{let{subCallback:t}=e;const a=(0,o.J)(),[r,c]=(0,n.useState)(),[i,d]=(0,n.useState)({}),[u,m]=(0,n.useState)(),[p,,b]=(0,J.A)(),[f,,g]=(0,J.A)(),[y,E]=(0,n.useState)(q),h=(e=>{var t,a;let{url:n}=e;const l=(0,T.xf)(F({url:n}));return{loaded:"loading"!==l.state,value:null===(t=l.contents)||void 0===t?void 0:t.data,hasError:"hasError"===l.state,error:null===(a=l.contents)||void 0===a||null===(a=a.response)||void 0===a||null===(a=a.data)||void 0===a?void 0:a.errorMsgKey}})({url:t}),{value:v={}}=h||{},{subscriptionId:S}=v,C=B({subscriptionId:S}),I=M(),{refresh:k}=(0,R.JN)(),A=(0,n.useCallback)((function(){let{data:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{trialEndsAt:t}=e||{};e&&!t&&(k(),setTimeout((()=>{I()}),1e3))}),[I,k]),L=(0,n.useCallback)((e=>{let{timeLeft:t}=e;E(t)}),[E]),_=(0,n.useCallback)((()=>{m({errorKey:"FETimeout"}),c(!1)}),[m,c]);Y({state:i,hasConfirmed:f,onReceive:A,onProgress:L,onStopPolling:_,waitFor:q});const N=(0,n.useCallback)((()=>{c(!0),g(),C(i),b()}),[c,C,b,i]),P=(0,n.useCallback)((()=>{d((e=>({...e,spaceId:void 0})))}),[d]);return n.createElement(n.Fragment,null,n.createElement(l.Modal,{"data-testid":"aws-modal",backdropProps:{backdropBlur:!0}},n.createElement(Z.Ay,{feature:"LabraSpacesSelector"},n.createElement(s,{isMobile:a},n.createElement(l.ModalHeader,null,n.createElement(l.Flex,{gap:2,alignItems:"center"},n.createElement(l.Icon,{name:"netdataPress",color:"text"}),n.createElement(l.H4,null,"Welcome to Netdata!"))),n.createElement(l.ModalBody,{padding:[4,4,8,4]},n.createElement(x,{state:i,setState:d,spacesLoadable:h,isLoading:r,timeLeft:y,error:u})),n.createElement(K,{isLoading:r})))),n.createElement(w,{state:i,forceHiddenConfirmation:p,onConfirm:N,onDecline:P}))}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7144.382c341e09540fdebaa6.chunk.js b/src/web/gui/v2/7144.382c341e09540fdebaa6.chunk.js deleted file mode 100644 index 36d16c78e..000000000 --- a/src/web/gui/v2/7144.382c341e09540fdebaa6.chunk.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! For license information please see 7144.382c341e09540fdebaa6.chunk.js.LICENSE.txt */ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="ba656e97-6c3b-4a66-ac99-de0d672e8878",e._sentryDebugIdIdentifier="sentry-dbid-ba656e97-6c3b-4a66-ac99-de0d672e8878")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7144],{62055:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=h(r(96540)),a=h(r(8711)),o=r(83199),i=v(r(99010)),l=h(r(1249)),u=v(r(24652)),c=r(22332),f=r(96380),d=h(r(33640)),s=r(99506),p=["children","fractionDigits"],m=["children"];function g(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(g=function(e){return e?r:t})(e)}function v(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=g(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function h(e){return e&&e.__esModule?e:{default:e}}function b(){return b=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var w=(0,a.default)(o.Flex).attrs({position:"relative","data-testid":"chartPopover-dimension",padding:[1,0]}).withConfig({displayName:"dimension__GridRow",componentId:"sc-wluvip-0"})(["display:contents;"]),O=(0,a.default)(i.ColorBar).attrs({position:"absolute",top:0,left:0,backgroundOpacity:.4,round:.5}).withConfig({displayName:"dimension__ColorBackground",componentId:"sc-wluvip-1"})([""]),x={ANOMALY_RATE:"arp",default:"value"},_=function(e){var t=e.children,r=e.fractionDigits,a=void 0===r?0:r,i=y(e,p),l=t.toString().split("."),c=l[0],f=l[1];return n.default.createElement(o.Flex,{alignItems:"center",justifyContent:"end",padding:[0,.5]},n.default.createElement(u.Value,b({},i,{textAlign:"right"}),c),"undefined"!==typeof f&&n.default.createElement(u.Value,i,"."),n.default.createElement(u.Value,b({as:o.Flex,flex:!1,width:1.8*a},i,{textAlign:"left"}),f))},j=function(e){var t=e.children,r=y(e,m);return n.default.createElement(o.Flex,{gap:1,justifyContent:"end"},Object.keys(t).map((function(e){return n.default.createElement(o.Flex,{key:e,border:{size:"1px",side:"all",color:t[e]},round:!0,flex:!1,padding:[0,.5]},n.default.createElement(d.default,{content:f.labels[e]||e},n.default.createElement(u.Value,b({},r,{color:t[e]}),e)))})))};t.default=function(e){var t=e.id,r=e.strong,a=e.rowFlavour,f=e.fullCols,d=(0,c.useVisibleDimensionId)(t),p=(0,c.useChart)().getAttribute("unitsConversionFractionDigits");return n.default.createElement(w,{opacity:d?null:"weak"},n.default.createElement(o.Flex,{alignItems:"center",gap:1,position:"relative",overflow:"hidden"},n.default.createElement(O,{id:t,valueKey:x[a]||x.default,height:"100%"},n.default.createElement(i.default,{id:t})),n.default.createElement(l.default,{padding:[.5,1.5],flex:!0,id:t,strong:r,fontSize:"1.1em"})),n.default.createElement(u.default,{id:t,strong:r,visible:d,Component:_,fractionDigits:p,color:a===s.rowFlavours.default?"text":"textLite",fontSize:"1.1em"}),f&&n.default.createElement(n.default.Fragment,null,n.default.createElement(u.default,{id:t,strong:r,visible:d,valueKey:"arp",Component:_,fractionDigits:2,color:a===s.rowFlavours.ANOMALY_RATE?"anomalyTextFocus":"anomalyText",fontSize:"1.1em"}),n.default.createElement(u.default,{textAlign:"right",id:t,strong:r,visible:d,valueKey:"pa",Component:j,color:a===s.rowFlavours.ANNOTATIONS?"text":"textLite",fontSize:"1.1em"})))}},99506:(e,t,r)=>{t.__esModule=!0,t.rowFlavours=t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=c(r(8711)),o=r(22332),i=r(83199),l=c(r(75010)),u=c(r(62055));function c(e){return e&&e.__esModule?e:{default:e}}function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}var d=a.default.div.withConfig({displayName:"dimensions__Grid",componentId:"sc-x7v8hd-0"})(["display:grid;width:100%;grid-template-columns:",";gap:",";align-items:center;"],(function(e){return"full"===e.cols?"3fr 1fr 1fr 1fr":"5fr 2fr"}),(function(e){return e.gap||0})),s=[null,null],p=function(e){var t=Math.round((e-70)/18);return t<2?2:t},m=function(e){return p(e)/2},g=t.rowFlavours={ANOMALY_RATE:"ANOMALY_RATE",ANNOTATIONS:"ANNOTATIONS",default:"VALUE"},v={ANOMALY_RATE:"anomalyDesc",ANNOTATIONS:"annotationsDesc",default:"valueDesc"},h=function(e){var t=e.height,r=(e.width,(0,o.useChart)()),a=(0,o.useAttributeValue)("hoverX")||s,c=a[0],f=a[1],h=(0,o.usePayload)().data,b=(0,n.useMemo)((function(){var e=r.getClosestRow(c)||h.length-1,n=r.onHoverSortDimensions(e,v[f]||r.getAttribute("dimensionsSort")||v.default)||[];r.getAttribute("selectedDimensions").length>0&&(n=n.filter((function(e){return r.isDimensionVisible(e)})));var a=n.findIndex((function(e){return e===f})),o=n.length,i=Math.floor(function(e,t,r){return ee-m(r)?t-(m(r)+(e-t)):t-m(r)}(o,a,t)),l=Math.ceil(function(e,t,r){return ee-m(r)?e:t+m(r)}(o,a,t));return[i,l,o,n.slice(i,l)]}),[r,f,c,h,t]),y=b[0],w=b[1],O=b[2],x=b[3],_=g[f]||g.default,j=(0,o.useAttributeValue)("cols");return n.default.createElement(n.default.Fragment,null,n.default.createElement(i.TextNano,{fontSize:"1em",color:"textLite"},y>0?"\u2191"+y+" more values":n.default.createElement(n.default.Fragment,null,"\xa0")),n.default.createElement(d,{gap:"2px",column:!0,cols:j},n.default.createElement(i.TextMicro,{fontSize:"1em",strong:!0},"Dimension"),n.default.createElement(i.TextMicro,{fontSize:"1em",color:_===g.default?"text":"textLite",textAlign:"right"},"Value"," ",n.default.createElement(l.default,{visible:!0,strong:_===g.default,color:_===g.default?"text":"textLite",fontSize:"1em"})),"full"===j&&n.default.createElement(n.default.Fragment,null,n.default.createElement(i.TextMicro,{fontSize:"1em",strong:_===g.ANOMALY_RATE,color:_===g.ANOMALY_RATE?"text":"textLite",textAlign:"right"},"Anomaly%"),n.default.createElement(i.TextMicro,{fontSize:"1em",strong:_===g.ANNOTATIONS,color:_===g.ANNOTATIONS?"text":"textLite",textAlign:"right"},"Info")),x.map((function(e){return n.default.createElement(u.default,{key:e,id:e,strong:f===e,rowFlavour:_,fullCols:"full"===j})}))),n.default.createElement(i.TextNano,{color:"textLite",fontSize:"1em"},w{t.Ay=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=d(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=f(r(74113)),o=r(22332),i=f(r(10534)),l=r(89380),u=f(r(99506)),c=["uiName"];function f(e){return e&&e.__esModule?e:{default:e}}function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(d=function(e){return e?r:t})(e)}function s(){return s=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,c),f=(0,o.useOnResize)(r),d=f.width,p=f.height;return n.default.createElement(l.ChartWrapper,{ref:t},n.default.createElement(a.default,s({uiName:r,column:!0,gap:.5,position:"relative"},i),n.default.createElement(u.default,{height:p,width:d})))}));t.Ay=(0,i.default)(p,{tile:!0})},74113:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=l(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(83199),o=r(22332),i=["uiName"];function l(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(l=function(e){return e?r:t})(e)}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i),l=(0,o.useChart)(),c=(0,n.useRef)();return(0,n.useLayoutEffect)((function(){return l.getUI(t).mount(c.current),function(){return l.getUI(t)&&l.getUI(t).unmount()}}),[]),n.default.createElement(a.Flex,u({"data-testid":"chartContent",ref:c,height:"100%",width:"100%",overflow:"hidden"},r))}},89031:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n,a=(n=r(8711))&&n.__esModule?n:{default:n},o=r(83199),i=["height","width"];function l(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function u(e,t,r){return(t=function(e){var t=function(e,t){if("object"!=typeof e||!e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!=typeof n)return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"==typeof t?t:t+""}(t))in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}var c=(0,a.default)(o.Flex).attrs((function(e){var t=e.height,r=void 0===t?"100%":t,n=e.width,a=void 0===n?"100%":n;return function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i))})).withConfig({displayName:"container__Container",componentId:"sc-sbp2x3-0"})(["::selection{background:transparent;}::-moz-selection{background:transparent;}"]);t.default=c},95662:(e,t,r)=>{t.Ay=void 0;var n=p(r(96540)),a=p(r(8711)),o=r(83199),i=d(r(74113)),l=r(22332),u=d(r(10534)),c=r(89380),f=["uiName"];function d(e){return e&&e.__esModule?e:{default:e}}function s(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(s=function(e){return e?r:t})(e)}function p(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=s(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function m(){return m=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,f),o=(0,l.useAttributeValue)("loaded"),u=(0,l.useOnResize)(r),d=u.width,s=u.height,p=d{t.__esModule=!0,t.default=void 0;var n=l(r(96540)),a=r(83199),o=r(22332),i=l(r(44597));function l(e){return e&&e.__esModule?e:{default:e}}var u=function(e){var t=e.title,r=e.children;return n.default.createElement(a.Flex,{gap:2},n.default.createElement(a.TextSmall,{color:"textDescription"},t),n.default.createElement(a.Flex,{as:a.TextSmall,background:"elementBackground"},r))};t.default=function(){var e=(0,o.useAttributeValue)("contextScope");return n.default.createElement(i.default,{title:"Plugin and chart context",color:"key","data-testid":"chartDetails-context"},n.default.createElement(u,{title:"Context"},e.join(", ")))}},90276:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=l(r(96540)),a=r(83199),o=r(22332),i=l(r(44597));function l(e){return e&&e.__esModule?e:{default:e}}t.default=function(){var e=(0,o.useTitle)(),t=(0,o.useAttributeValue)("info"),r=(0,o.useAttributeValue)("sectionInfo"),l=(0,o.useChart)(),u=function(e){var t=e.target.hash,r=void 0===t?"":t;r.startsWith("#menu")&&(e.preventDefault(),l.sdk.trigger("goToLink",l,r.substr(1)))};return n.default.createElement(i.default,{title:e,color:"key","data-testid":"cartDetails-description"},n.default.createElement(a.TextSmall,{color:"textDescription",dangerouslySetInnerHTML:{__html:r},onClick:u}),n.default.createElement(a.TextSmall,{color:"textDescription",dangerouslySetInnerHTML:{__html:t},onClick:u}))}},93693:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=f(r(96540)),a=f(r(8711)),o=r(83199),i=r(22332),l=f(r(85546)),u=f(r(760)),c=f(r(90276));function f(e){return e&&e.__esModule?e:{default:e}}var d=(0,a.default)(o.Flex).attrs({column:!0,padding:[3,2],gap:3,overflow:{vertical:"auto"},width:"100%",height:"100%"}).withConfig({displayName:"details__Container",componentId:"sc-etnkd0-0"})([""]);t.default=function(){var e=(0,i.useAttributeValue)("nodeName");return n.default.createElement(d,{"data-testid":"chartDetails"},n.default.createElement(c.default,null),e&&n.default.createElement(u.default,null),n.default.createElement(l.default,null))}},44597:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n,a=(n=r(96540))&&n.__esModule?n:{default:n},o=r(83199),i=["icon","title","children"];function l(){return l=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i);return a.default.createElement(o.Flex,l({gap:4},u),t,a.default.createElement(o.Flex,{column:!0,gap:1,flex:"grow",basis:0},a.default.createElement(o.Text,{strong:!0,color:"key"},r),n&&a.default.createElement(o.Flex,{column:!0,gap:1},n)))}},760:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=l(r(96540)),a=r(83199),o=r(22332),i=l(r(44597));function l(e){return e&&e.__esModule?e:{default:e}}t.default=function(){var e=(0,o.useAttributeValue)("nodeName");return n.default.createElement(i.default,{title:"Source",color:"key","data-testid":"chartDetails-source"},n.default.createElement(a.TextSmall,{color:"textDescription"},e))}},99891:(e,t,r)=>{t.Ay=void 0;var n=m(r(96540)),a=m(r(8711)),o=r(83199),i=s(r(74113)),l=r(22332),u=s(r(10534)),c=r(89380),f=s(r(62841)),d=["uiName"];function s(e){return e&&e.__esModule?e:{default:e}}function p(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(p=function(e){return e?r:t})(e)}function m(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=p(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function g(){return g=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,d),o=(0,l.useAttributeValue)("loaded"),u=(0,l.useOnResize)(r),f=u.width,s=u.height,p=f{t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=c(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=r(83199),i=r(22332),l=(n=r(94069))&&n.__esModule?n:{default:n},u=["labelProps"];function c(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(c=function(e){return e?r:t})(e)}function f(){return f=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,u),n=(0,i.useChart)(),o=(0,i.useAttributeValue)("aggregationMethod"),c=function(e){return(0,a.useMemo)((function(){return[{value:"avg",label:"Average",description:"For each point presented, calculate the average of the metrics contributing to it.",short:"AVG()","data-track":e.track("avg")},{value:"sum",label:"Sum",description:"For each point presented, calculate the sum of the metrics contributing to it.",short:"SUM()","data-track":e.track("sum")},{value:"min",label:"Minimum",description:"For each point presented, present the minimum of the metrics contributing to it.",short:"MIN()","data-track":e.track("min")},{value:"max",label:"Maximum",description:"For each point presented, present the maximum of the metrics contributing to it.",short:"MAX()","data-track":e.track("max")}]}),[e])}(n),d=(c.find((function(e){return e.value===o}))||c[0]).short;return a.default.createElement(l.default,f({value:o,onChange:n.updateAggregationMethodAttribute,items:c,"data-track":n.track("aggregate"),dropTitle:m},r,{labelProps:s({secondaryLabel:"the",label:d,title:g.heading,tooltipProps:g},t)}))};t.default=(0,a.memo)(v)},35369:(e,t,r)=>{t.__esModule=!0,t.uniqueColumn=t.minColumn=t.metricsColumn=t.maxColumn=t.labelColumn=t.instancesColumn=t.contributionColumn=t.avgColumn=t.anomalyRateColumn=t.alertsColumn=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(83199),o=c(r(99010)),i=c(r(75010)),l=r(22332),u=c(r(36112));function c(e){return e&&e.__esModule?e:{default:e}}function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}t.labelColumn=function(e){return{id:"label",header:function(){return n.default.createElement(a.TextSmall,{strong:!0},"Name")},size:200,minSize:60,maxSize:800,cell:function(t){var r=t.getValue,i=t.row,c=function(e){return(0,n.useMemo)((function(){return{dimension:"dimensions",node:"nodes",instance:e.intl("instance",{count:2}),label:"labels",value:"values",default:"values"}}),[])}((0,l.useChart)());return n.default.createElement(a.Flex,{justifyContent:"between",alignItems:"center",padding:[0,0,0,3*i.depth],width:"100%"},n.default.createElement(a.Flex,{gap:1},n.default.createElement(o.default,{id:i.original.value}),n.default.createElement(a.TextSmall,{strong:!0,onClick:i.original.disabled?void 0:i.getToggleSelectedHandler(),cursor:i.original.disabled?"default":"pointer",whiteSpace:"normal",wordBreak:"break-word"},r())),i.getCanExpand()&&n.default.createElement(u.default,{label:c[i.original.value]||c[e]||c.default,onClick:function(e){i.getToggleExpandedHandler()(e),setTimeout((function(){return e.target.scrollIntoView({behavior:"smooth",block:"nearest"})}))},iconRotate:i.getIsExpanded()?2:null,textProps:{fontSize:"10px",color:"textLite"},alignItems:"center"}))}}},t.uniqueColumn=function(){return{id:"unique",header:n.default.createElement(a.TextMicro,{strong:!0},"Unique"),size:45,minSize:30,maxSize:90,cell:function(e){var t=e.getValue;return n.default.createElement(a.TextSmall,{color:"textLite"},t())},sortingFn:"basic"}},t.minColumn=function(){return{id:"min",header:n.default.createElement(a.TextMicro,{strong:!0},"Min ",n.default.createElement(i.default,{visible:!0})),size:60,minSize:30,maxSize:300,fullWidth:!0,cell:function(e){var t=e.getValue,r=(0,l.useConverted)(t());return n.default.createElement(a.TextSmall,{color:"textLite"},r)},sortingFn:"basic"}},t.avgColumn=function(){return{id:"avg",header:n.default.createElement(a.TextMicro,{strong:!0},"Avg ",n.default.createElement(i.default,{visible:!0})),size:60,minSize:30,maxSize:300,fullWidth:!0,cell:function(e){var t=e.getValue,r=(0,l.useConverted)(t());return n.default.createElement(a.TextSmall,{color:"textLite"},r)},sortingFn:"basic"}},t.maxColumn=function(){return{id:"max",header:n.default.createElement(a.TextMicro,{strong:!0},"Max ",n.default.createElement(i.default,{visible:!0})),size:60,minSize:30,maxSize:300,fullWidth:!0,cell:function(e){var t=e.getValue,r=(0,l.useConverted)(t());return n.default.createElement(a.TextSmall,{color:"textLite"},r)},sortingFn:"basic"}},t.instancesColumn=function(){return{id:"instances",header:n.default.createElement(a.TextMicro,{strong:!0},"Instances"),size:60,minSize:30,maxSize:300,fullWidth:!0,cell:function(e){var t,r=e.getValue,o=e.row;if(null==(t=o.original.info)||!t.is)return n.default.createElement(a.TextSmall,{color:"textLite"},r());var i=o.original.info.is,l=i.qr,u=void 0===l?0:l,c=i.sl,f=void 0===c?0:c,d=i.ex,s=void 0===d?0:d;return n.default.createElement(a.Flex,{flex:!0,column:!0,gap:.5},n.default.createElement(a.TextSmall,{color:"textLite"},n.default.createElement(a.TextSmall,{color:"primary"},u)," of ",f+s),n.default.createElement(a.ProgressBar,{background:"progressBg",color:["green","deyork"],height:2,width:u/(f+s)*100+"%",containerWidth:"100%",border:"none"}))},sortingFn:"basic"}},t.metricsColumn=function(){return{id:"metrics",header:n.default.createElement(a.TextMicro,{strong:!0},"Metrics"),size:60,minSize:30,maxSize:300,fullWidth:!0,cell:function(e){var t,r=e.row,o=e.getValue;if(null==(t=r.original.info)||!t.ds)return n.default.createElement(a.TextSmall,{color:"textLite"},o());var i=r.original.info.ds,l=i.qr,u=void 0===l?0:l,c=i.sl,f=void 0===c?0:c,d=i.ex,s=void 0===d?0:d;return n.default.createElement(a.Flex,{flex:!0,column:!0,gap:.5},n.default.createElement(a.TextSmall,{color:"textLite"},n.default.createElement(a.TextSmall,{color:"primary"},u)," of ",f+s),n.default.createElement(a.ProgressBar,{background:"progressBg",color:["green","deyork"],height:2,width:u/(f+s)*100+"%",containerWidth:"100%",border:"none"}))},sortingFn:"basic"}},t.contributionColumn=function(){return{id:"contribution",header:n.default.createElement(a.TextMicro,{strong:!0},"Vol %"),size:60,minSize:30,maxSize:300,fullWidth:!0,cell:function(e){var t,r=e.row,o=e.getValue;return null!=(t=r.original.info)&&t.sts?n.default.createElement(a.Flex,{flex:!0,column:!0,gap:.5},n.default.createElement(a.TextSmall,{color:"primary"},Math.round(100*(o()+Number.EPSILON))/100,"%"),n.default.createElement(a.ProgressBar,{background:"progressBg",color:["green","deyork"],height:2,width:o()+"%",containerWidth:"100%",border:"none"})):n.default.createElement(a.TextSmall,{color:"textLite"},o())},sortingFn:"basic"}},t.anomalyRateColumn=function(){return{id:"anomalyRate",header:n.default.createElement(a.TextMicro,{strong:!0},"Anomaly%"),size:60,minSize:30,maxSize:300,fullWidth:!0,cell:function(e){var t,r=e.row,o=e.getValue;return null!=(t=r.original.info)&&t.sts?n.default.createElement(a.Flex,{flex:!0,column:!0,gap:.5},n.default.createElement(a.TextSmall,{color:"textLite"},Math.round(100*(o()+Number.EPSILON))/100,"%"),n.default.createElement(a.ProgressBar,{background:"progressBg",color:"anomalyText",height:2,width:o()+"%",containerWidth:"100%",border:"none"})):n.default.createElement(a.TextSmall,{color:"textLite"},o())},sortingFn:"basic"}},t.alertsColumn=function(){return{id:"alerts",header:n.default.createElement(a.TextMicro,{strong:!0},"Alerts"),size:60,minSize:30,maxSize:300,fullWidth:!0,cell:function(e){var t,r=e.row,o=e.getValue;if(null==(t=r.original.info)||!t.al)return n.default.createElement(a.TextSmall,{color:"textLite"},o());var i=r.original.info.al,l=i.cl,u=void 0===l?0:l,c=i.cr,f=void 0===c?0:c,d=i.wr,s=void 0===d?0:d,p={text:f,flavour:f?"error":"disabledError"},m={text:s,flavour:s?"warning":"disabledWarning"},g={text:u,flavour:u?"clear":"disabledClear"};return n.default.createElement(a.Flex,null,n.default.createElement(a.MasterCard,{pillLeft:p,pillRight:m,pillEnd:g}))},sortingFn:"basic"}}},31866:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=c(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(83199),o=r(65562),i=u(r(36112)),l=u(r(34578));function u(e){return e&&e.__esModule?e:{default:e}}function c(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(c=function(e){return e?r:t})(e)}function f(){return f=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=u(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=r(22332),i=(n=r(94069))&&n.__esModule?n:{default:n},l=["labelProps"];function u(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(u=function(e){return e?r:t})(e)}function c(){return c=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,l),n=(0,o.useChart)(),u=(0,o.useAttributeValue)("contextScope"),f=(0,o.useAttributeValue)("contextItems");if(!f.length)return null;var s=(f.find((function(e){return e.value===u[0]}))||f[0]).label;return a.default.createElement(i.default,c({value:u,onChange:n.updateContextScopeAttribute,items:f,"data-track":n.track("contextScope")},r,{labelProps:d({secondaryLabel:"On",label:s,title:p.heading,tooltipProps:p},t)}))};t.default=(0,a.memo)(m)},65877:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=r(22332),i=(n=r(34737))&&n.__esModule?n:{default:n},l=r(96659),u=r(35369),c=["labelProps"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,c),n=(0,o.useChart)(),u=(0,o.useAttributeValue)("selectedDimensions"),f=(0,o.useAttributeValue)("dimensions"),m=(0,o.useAttributeValue)("dimensionsTotals"),g=(0,a.useCallback)((function(){return Object.keys(f).map((function(e){var t=u.includes(e);return(0,l.getStats)(n,f[e],{key:"dimensions",props:{selected:t}})}))}),[f,u]),v=(0,o.useAttribute)("nodesSortBy"),h=v[0],b=v[1];return a.default.createElement(i.default,d({title:"Dimensions",resourceName:"dimension","data-track":n.track("dimensions"),labelProps:t,onChange:n.updateDimensionsAttribute,getOptions:g,tooltipProps:s,value:u,columns:p,sortBy:h,onSortByChange:b,totals:m},r))};t.default=(0,a.memo)(m)},94069:(e,t,r)=>{t.__esModule=!0,t.default=t.ItemContainer=t.Item=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=m(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=p(r(8711)),o=r(22332),i=r(83199),l=p(r(97517)),u=p(r(73862)),c=p(r(6504)),f=p(r(33640)),d=p(r(36112)),s=["labelProps"];function p(e){return e&&e.__esModule?e:{default:e}}function m(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(m=function(e){return e?r:t})(e)}function g(){return g=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,s),a=(0,o.useAttributeValue)("id");return n.default.createElement(i.Menu,g({},r,{Item:y,dropProps:{align:{top:"bottom",left:"left"},"data-toolbox":a,width:"460px"},dropdownProps:{padding:[0,0,2,0],height:{max:"80vh"}}},r),n.default.createElement(d.default,g({},t,{"data-value":""+(r.value||"No selection")})))};t.default=(0,n.memo)(w)},34737:(e,t,r)=>{t.__esModule=!0,t.meta=t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=p(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=s(r(8711)),o=r(83199),i=r(22332),l=s(r(25772)),u=s(r(36112)),c=s(r(4975)),f=["getOptions","onItemClick","close","columns","sortBy","onSortByChange","expanded","onExpandedChange","tableMeta","enableSubRowSelection","value","newValues","totals","emptyMessage","title","filterSelectedCount"],d=["label","labelProps","onChange","getOptions","secondaryLabel","tooltipProps","value","columns","sortBy","onSortByChange","expanded","onExpandedChange","tableMeta","enableSubRowSelection","totals","emptyMessage","resourceName","title","filterSelectedCount"];function s(e){return e&&e.__esModule?e:{default:e}}function p(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(p=function(e){return e?r:t})(e)}function m(){return m=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}function v(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function h(e){for(var t=1;t0&&{backgroundOpacity:.4}),e.depth>0&&0===r&&{border:{side:"left",size:"4px"}})}},O=function(){},x=[{id:"contribution",desc:!0}],_={},j=function(e){return e},P=function(e){var t=e.getOptions,r=e.onItemClick,a=(e.close,e.columns),i=e.sortBy,u=e.onSortByChange,d=e.expanded,s=e.onExpandedChange,p=e.tableMeta,v=void 0===p?w:p,b=e.enableSubRowSelection,x=e.value,_=e.newValues,P=e.totals,k=e.emptyMessage,M=e.title,C=e.filterSelectedCount,S=void 0===C?j:C,A=g(e,f),D=(0,n.useMemo)(t,[]),T=(0,n.useState)((function(){return E(D,{})})),L=T[0],I=T[1];(0,n.useEffect)((function(){var e=E(D,{});I((function(t){return(0,l.default)(t,e)?t:e}))}),[]);var N=(0,n.useMemo)((function(){return!!_&&(0,l.default)(x,_)}),[_]),z=(0,n.useMemo)((function(){return null!=_&&_.length?S(_).length:0}),[_]);return n.default.createElement(y,m({role:"listbox",background:"dropdown",padding:[0],margin:[1,0],column:!0,tabindex:"-1",flex:!0},A),n.default.createElement(o.Table,{title:M,background:"dropdownTable",enableResizing:!0,enableSorting:!0,enableSelection:!0,dataColumns:a,data:D,onRowSelected:r,onSearch:O,meta:v,sortBy:i,rowSelection:L,onSortingChange:u,expanded:d,onExpandedChange:s,enableSubRowSelection:b,width:{base:250,max:"80vw"}}),n.default.createElement(o.Flex,{padding:[2],justifyContent:"between",alignItems:"center",border:{side:"top",color:"borderSecondary"}},n.default.createElement(o.Flex,{gap:1,alignItems:"center"},n.default.createElement(o.TextSmall,{color:"textLite"},"Selected ",n.default.createElement(o.TextSmall,{strong:!0},z)," of"," ",n.default.createElement(o.TextSmall,{strong:!0},((null==P?void 0:P.sl)||0)+((null==P?void 0:P.ex)||0)||D.length)),n.default.createElement(o.Button,{padding:[0],flavour:"borderless",width:"auto",height:"auto",cursor:"pointer",color:"primary",onClick:function(){I({}),r([])},disabled:!(null!=_&&_.length)&&!x.length,label:"clear",small:!0}),n.default.createElement(o.Button,{padding:[0],flavour:"borderless",width:"auto",height:"auto",cursor:"pointer",color:"primary",onClick:function(){I(h({},L)),r(x)},disabled:!N,label:"reset",small:!0}),!(null!=_&&_.length)&&!!k&&n.default.createElement(o.TextSmall,{color:"warningText"},k)),P&&n.default.createElement(c.default,m({selected:x},P))))},E=function e(t,r,n){return t.reduce((function(t,r,a){return"undefined"!==typeof n&&(a=n+"."+a),r.selected&&(t[a]=!0),r.children&&e(r.children,t,a),t}),r)};t.default=function(e){var t=e.label,r=e.labelProps,a=e.onChange,l=e.getOptions,f=e.secondaryLabel,s=e.tooltipProps,p=e.value,v=e.columns,h=e.sortBy,b=void 0===h?x:h,y=e.onSortByChange,w=e.expanded,O=void 0===w?_:w,j=e.onExpandedChange,E=e.tableMeta,k=e.enableSubRowSelection,M=e.totals,C=e.emptyMessage,S=e.resourceName,A=e.title,D=e.filterSelectedCount,T=g(e,d),L=(0,n.useState)(!1),I=L[0],N=L[1],z=(0,n.useState)(),W=z[0],F=z[1],V=(0,n.useRef)();(0,n.useEffect)((function(){!I&&W&&(V.current=null,a(W))}),[I]),(0,n.useEffect)((function(){return function(){return V.current&&a(V.current)}}),[]);var B=(0,i.useAttributeValue)("id");return n.default.createElement(o.Menu,m({onChange:function(e){V.current=e,F(e)},hasSearch:!1,closeOnClick:!1,Dropdown:P,dropProps:{align:{top:"bottom",left:"left"},"data-toolbox":B,keepHorizontal:!0,stretch:null},dropdownProps:{height:{max:"60vh"},width:"100%",overflow:"auto",columns:v,getOptions:l,sortBy:b,onSortByChange:y,expanded:O,onExpandedChange:j,tableMeta:E,enableSubRowSelection:k,value:p,totals:M,newValues:W,emptyMessage:C,title:A,filterSelectedCount:D},value:p,onOpen:function(){return N(!0)},onClose:function(){return N(!1)}},T),n.default.createElement(u.default,m({"data-value":p.join("|")||S+" all-selected",secondaryLabel:f,label:t||n.default.createElement(c.default,m({selected:p},M,{resourceName:S,teaser:!0})),title:s.heading,tooltipProps:s},r)))}},75157:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=E(r(96540)),a=r(83199),o=r(22753),i=r(22332),l=E(r(6533)),u=E(r(36495)),c=E(r(65877)),f=E(r(55146)),d=E(r(28119)),s=E(r(21105)),p=E(r(90648)),m=E(r(26482)),g=E(r(67231)),v=E(r(88623)),h=E(r(6885)),b=E(r(64776)),y=(E(r(31866)),E(r(34386))),w=E(r(68317)),O=E(r(10120)),x=E(r(31952)),_=E(r(40813)),j=E(r(49484)),P=E(r(6504));function E(e){return e&&e.__esModule?e:{default:e}}function k(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function M(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n,a=(n=r(8711))&&n.__esModule?n:{default:n},o=r(83199);function i(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function l(e,t,r){return(t=function(e){var t=function(e,t){if("object"!=typeof e||!e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!=typeof n)return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"==typeof t?t:t+""}(t))in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}var u=(0,a.default)(o.Flex).attrs((function(e){return function(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=d(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=r(22332),i=r(83199),l=(n=r(34737))&&n.__esModule?n:{default:n},u=r(96659),c=r(35369),f=["labelProps"];function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(d=function(e){return e?r:t})(e)}function s(){return s=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var r=0,n=new Array(t);r=0||(a[r]=e[r]);return a}(e,f),c=(0,o.useChart)(),d=(0,o.useAttributeValue)("groupBy"),m=(0,o.useAttributeValue)("groupByLabel"),y=function(e){return(0,a.useMemo)((function(){return[{nm:"node",id:"node",key:"nodes"},{nm:e.intl("instance")+" "+("instance"===e.intl("instance")?"":"(instance)"),id:"instance",key:"instances"},{nm:"dimension",id:"dimension",key:"dimensions"},{nm:"percentage of "+e.intl("instance"),id:"percentage-of-instance",key:"instances"}]}),[])}(c),w=(0,a.useCallback)((function(){var e=c.getAttributes(),t=y.map((function(t){var r=d.includes(t.id);return(0,u.getStats)(c,t,function(e){for(var t=1;t1?m.length+" labels":m[0]:e}));return e.length{t.__esModule=!0,t.default=void 0;var n=u(r(96540)),a=u(r(46874)),o=u(r(75157)),i=u(r(19751)),l=["plain"];function u(e){return e&&e.__esModule?e:{default:e}}t.default=(0,i.default)((function(e){var t=e.plain,r=function(e,t){if(null==e)return{};var r,n,a={},o=Object.keys(e);for(n=0;n=0||(a[r]=e[r]);return a}(e,l);return n.default.createElement(a.default,r,n.default.createElement(o.default,{plain:t}))}))},55146:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=d(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=r(22332),i=r(20207),l=(n=r(34737))&&n.__esModule?n:{default:n},u=r(96659),c=r(35369),f=["labelProps"];function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(d=function(e){return e?r:t})(e)}function s(){return s=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,f),n=(0,o.useChart)(),c=(0,o.useAttributeValue)("selectedInstances"),d=(0,o.useAttributeValue)("instances"),m=(0,o.useAttributeValue)("instancesTotals"),g=(0,a.useCallback)((function(){return Object.keys(d).map((function(e){return(0,u.getStats)(n,d[e],{id:e,key:"instances",props:{selected:c.includes(e)}})}))}),[d,c]),v=(0,o.useAttribute)("instancesSortBy"),h=v[0],b=v[1],y=function(e){return(0,a.useMemo)((function(){return{heading:(0,i.uppercase)(e.intl("instance",{count:2})),body:"View or filter the "+e.intl("instance",{count:2})+" contributing time-series metrics to this chart. This menu also provides the contribution of each "+e.intl("instance")+" to the volume of the chart, and a break down of the anomaly rate of the queried data per "+e.intl("instance")+"."}}),[])}(n);return a.default.createElement(l.default,s({title:(0,i.uppercase)(n.intl("instance",{count:2})),resourceName:"instance","data-track":n.track("instances"),labelProps:t,onChange:n.updateInstancesAttribute,getOptions:g,tooltipProps:y,value:c,columns:p,sortBy:h,onSortByChange:b,totals:m},r))};t.default=(0,a.memo)(m)},36112:(e,t,r)=>{t.__esModule=!0,t.default=t.Container=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=s(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=d(r(8711)),o=r(83199),i=d(r(63668)),l=d(r(6504)),u=r(33640),c=["width","open"],f=["icon","secondaryLabel","tertiaryLabel","label","chevron","iconRotate","textProps"];function d(e){return e&&e.__esModule?e:{default:e}}function s(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(s=function(e){return e?r:t})(e)}function p(){return p=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var b=t.Container=(0,a.default)(o.Flex).attrs((function(e){var t=e.width,r=void 0===t?{max:100}:t,n=e.open,a=h(e,c);return g(g({cursor:"pointer",role:"button",padding:[.5],gap:.5,width:r,alignItems:"center"},n&&{background:"selected"}),a)})).withConfig({displayName:"label__Container",componentId:"sc-1lmmfid-0"})(["&:hover{background:",";}"],(0,o.getColor)("selected")),y=(0,a.default)(o.TextSmall).attrs({whiteSpace:"nowrap",truncate:!0}).withConfig({displayName:"label__StyledLabel",componentId:"sc-1lmmfid-1"})(["flex:1;"]),w=(0,n.forwardRef)((function(e,t){var r=e.icon,a=e.secondaryLabel,u=e.tertiaryLabel,c=e.label,d=e.chevron,s=void 0===d||d,m=e.iconRotate,g=e.textProps,v=h(e,f);return r?n.default.createElement(b,p({ref:t},v),r):n.default.createElement(b,p({ref:t},v),a&&n.default.createElement(o.TextSmall,{color:"textLite",whiteSpace:"nowrap",truncate:!0},a),n.default.createElement(y,g,c),u&&n.default.createElement(o.TextSmall,{color:"textLite",whiteSpace:"nowrap",truncate:!0},u),s&&n.default.createElement(l.default,{svg:i.default,size:"12px",color:"textNoFocus",rotate:m}))}));t.default=(0,u.withTooltip)(w,{Content:function(e){var t=e.heading,r=e.body;return n.default.createElement(o.Flex,p({column:!0,gap:1},u.tooltipStyleProps),t&&n.default.createElement(o.TextSmall,{strong:!0},t),r&&n.default.createElement(o.TextSmall,null,r))},align:"top"})},88623:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=r(22332),i=(n=r(34737))&&n.__esModule?n:{default:n},l=r(96659),u=r(35369),c=["labelProps"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,c),n=(0,o.useChart)(),u=(0,o.useAttributeValue)("selectedLabels"),f=(0,o.useAttributeValue)("labels"),m=(0,o.useAttributeValue)("labelsTotals"),g=(0,a.useCallback)((function(){return Object.keys(f).map((function(e){return(0,l.getStats)(n,f[e],{key:"labels",childrenKey:"values",props:{selected:u.includes(e)},childProps:{unique:"-",parentId:e,getIsSelected:function(t){return u.includes(e+":"+t.id)},getValue:function(t){return e+":"+t.id}},children:f[e].vl})}))}),[f,u]),v=(0,o.useAttribute)("labelsSortBy"),h=v[0],b=v[1],y=(0,o.useAttribute)("labelsExpanded"),w=y[0],O=y[1],x=(0,a.useCallback)((function(e){return e.filter((function(e){return!!e.parentId}))}),[]);return a.default.createElement(i.default,d({title:"Labels",resourceName:"label","data-track":n.track("labels"),labelProps:t,onChange:n.updateLabelsAttribute,getOptions:g,tooltipProps:s,value:u,columns:p,enableSubRowSelection:!0,sortBy:h,onSortByChange:b,expanded:w,onExpandedChange:O,totals:m,filterSelectedCount:x},r))};t.default=(0,a.memo)(m)},28119:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=r(22332),i=(n=r(34737))&&n.__esModule?n:{default:n},l=r(96659),u=r(35369),c=["labelProps"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var r=0,n=new Array(t);r=0||(a[r]=e[r]);return a}(e,c),n=(0,o.useChart)(),u=(0,o.useAttributeValue)("selectedNodes"),f=(0,o.useAttributeValue)("nodes"),p=(0,o.useAttributeValue)("instances"),v=(0,o.useAttributeValue)("nodesTotals"),h=(0,o.useAttributeValue)("selectedInstances"),b=(0,a.useCallback)((function(){return Object.keys(f).map((function(e){var t=u.includes(e);return(0,l.getStats)(n,f[e],{id:e,key:"nodes",childrenKey:"instances",props:{selected:t},childProps:{isInstance:!0,getValue:function(t){return t.id+"@"+e},getIsSelected:function(t){return h.includes(t.id+"@"+e)}},children:Object.keys(p).reduce((function(t,r){return p[r].ni===f[e].ni?[].concat(s(t),[p[r]]):t}),[])})}))}),[f,u,h]),y=(0,o.useAttribute)("nodesSortBy"),w=y[0],O=y[1],x=(0,o.useAttribute)("nodesExpanded"),_=x[0],j=x[1],P=(0,a.useCallback)((function(e){return e.filter((function(e){return!e.isInstance}))}),[]);return a.default.createElement(i.default,d({title:"Nodes",resourceName:"node","data-track":n.track("nodes"),labelProps:t,onChange:n.updateNodesAttribute,getOptions:b,tooltipProps:m,value:u,columns:g,sortBy:w,onSortByChange:O,expanded:_,onExpandedChange:j,enableSubRowSelection:!1,totals:v,filterSelectedCount:P},r))};t.default=(0,a.memo)(v)},36495:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=u(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=r(22332),i=(n=r(94069))&&n.__esModule?n:{default:n},l=["labelProps"];function u(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(u=function(e){return e?r:t})(e)}function c(){return c=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,l),n=(0,o.useChart)(),u=(0,o.useAttributeValue)("postAggregationMethod"),f=function(e){return(0,a.useMemo)((function(){return[{value:"avg",label:"Average",description:"For each aggregated point, calculate the average of the metrics.",short:"AVG()","data-track":e.track("avg")},{value:"sum",label:"Sum",description:"For each aggregated point, calculate the sum of the metrics.",short:"SUM()","data-track":e.track("sum")},{value:"min",label:"Minimum",description:"For each aggregated point, present the minimum of the metrics.",short:"MIN()","data-track":e.track("min")},{value:"max",label:"Maximum",description:"For each aggregated point, present the maximum of the metrics.",short:"MAX()","data-track":e.track("max")}]}),[e])}(n),s=(f.find((function(e){return e.value===u}))||f[0]).short;return a.default.createElement(i.default,c({value:u,onChange:n.updatePostAggregationMethodAttribute,items:f,"data-track":n.track("post-aggregate")},r,{labelProps:d({secondaryLabel:"the",label:s,title:p.heading,tooltipProps:p},t)}))};t.default=(0,a.memo)(m)},26482:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=d(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=r(22332),i=r(83199),l=(n=r(34737))&&n.__esModule?n:{default:n},u=r(96659),c=r(35369),f=["labelProps"];function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(d=function(e){return e?r:t})(e)}function s(){return s=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var r=0,n=new Array(t);r=0||(a[r]=e[r]);return a}(e,f),c=(0,o.useChart)(),d=(0,o.useAttributeValue)("postGroupBy"),m=(0,o.useAttributeValue)("postGroupByLabel"),h=function(e){return(0,a.useMemo)((function(){return[{nm:"node",id:"node",key:"nodes"},{nm:e.intl("instance")+" "+("instance"===e.intl("instance")?"":"(instance)"),id:"instance",key:"instances"},{nm:"dimension",id:"dimension",key:"dimensions"},{nm:"percentage of "+e.intl("instance"),id:"percentage-of-instance",key:"instances"},{nm:"as single value",id:"selected"}]}),[])}(c),b=(0,a.useCallback)((function(){var e=c.getAttributes(),t=h.map((function(t){var r=d.includes(t.id);return(0,u.getStats)(c,t,{key:"group-by",childrenKey:"label",props:{contribution:"-",anomalyRate:"-",alerts:"-",min:"-",avg:"-",max:"-",selected:r},childProps:{unique:"-",disabled:"hidden"},children:e[t.key]?Object.values(e[t.key]):[]})}));return[].concat(p(t),p(Object.keys(e.labels).map((function(t){return(0,u.getStats)(c,e.labels[t],{key:"group-by",childrenKey:"label",props:{getLabel:function(e){return"label: "+(e.nm||t||e.id)},isLabel:!0,selected:m.includes(t)},childProps:{unique:"-",disabled:"hidden"},children:e.labels[t].vl})}))))}),[d,m]);t=(0,a.useMemo)((function(){var e=d.filter((function(e){return"node"!==e})),t=e.map((function(e){return"label"===e?m.length>1?m.length+" labels":m[0]:e}));return e.length{t.__esModule=!0,t.default=void 0;var n=l(r(96540)),a=l(r(8711)),o=r(83199),i=r(65562);function l(e){return e&&e.__esModule?e:{default:e}}var u=(0,a.default)(o.Button).attrs({flavour:"borderless",label:"Reset",width:"initial",height:"initial",padding:[0,1],title:"Reset Filters",small:!0,neutral:!0}).withConfig({displayName:"reset__StyledButton",componentId:"sc-kilpc3-0"})(["&&{height:initial;font-weight:normal;}"]);t.default=function(){var e=(0,i.useChart)(),t=(0,i.useAttributeValue)("pristine"),r=!Object.keys(t).length;return n.default.createElement(u,{disabled:r,onClick:e.resetPristine,"data-track":e.track("reset")})}},64776:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=i(r(96540)),a=r(65562),o=i(r(36112));function i(e){return e&&e.__esModule?e:{default:e}}function l(){return l=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n=l(r(96540)),a=r(83199),o=l(r(46874)),i=l(r(36112));function l(e){return e&&e.__esModule?e:{default:e}}t.default=function(){return n.default.createElement(o.default,null,n.default.createElement(a.Flex,{gap:1},n.default.createElement(i.default,{width:"90px",background:"borderSecondary",secondaryLabel:"",label:""}),n.default.createElement(i.default,{width:"120px",background:"borderSecondary"}),n.default.createElement(i.default,{width:"100px",background:"borderSecondary"})))}},67231:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=c(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=r(83199),i=r(22332),l=(n=r(94069))&&n.__esModule?n:{default:n},u=["labelProps"];function c(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(c=function(e){return e?r:t})(e)}function f(){return f=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,u),n=(0,i.useChart)(),c=(0,i.useAttributeValue)("groupingMethod").match(/[\d.]+|\D+/g)||[],d=c[0],p=void 0===d?"":d,b=c[1],y=void 0===b?"":b,w=(0,i.useAttributeValue)("viewUpdateEvery"),O=(0,i.useAttributeValue)("perTier"),x=function(e,t){void 0===t&&(t=[]);var r=t,n=r[0],o=r.slice(1);return(0,a.useMemo)((function(){return[{value:"min",label:"Minimum",description:"Reveal short dives that would otherwise be smoothed out.",short:"MIN()","data-track":e.track("time-aggregation-min")},{value:"max",label:"Maximum",description:"Reveal short spikes that would otherwise be smoothed out.",short:"MAX()","data-track":e.track("time-aggregation-max")},{value:"average",label:"Mean or Average",description:"Calculate the longer term average, as if data were collected at screen resolution.",short:"AVG()","data-track":e.track("time-aggregation-average")},{value:"sum",label:"Sum",description:"Provide the sum of the points that are aggregated over time. Use it when a sense of volume is needed over the aggregation period. It may not be sensible to use this function on all data types.",short:"SUM()","data-track":e.track("time-aggregation-sum")},Array.isArray(o)&&"undefined"!==typeof(null==n?void 0:n.points)&&{justDesc:!0,description:"The functions below lose accuracy when applied on tiered data, compared to high resolution data. Your current query is "+100*n.points/t.reduce((function(e,t){return e+t.points}),0)+"% high resolution and "+100*o.reduce((function(e,t){return e+t.points}),0)/t.reduce((function(e,t){return e+t.points}),0).toFixed(2)+"% tiered data of lower resolution."},{value:"percentile",label:"Percentile",description:"Provide the maximum value of a percentage of the aggregated points, having the smaller values. The default is p95, which provides the maximum value of the aggregated points after ignoring the top 5% of them.",short:"PERCENTILE()","data-track":e.track("time-aggregation-percentile95")},{value:"trimmed-mean",label:"Trimmed Average or Trimmed Mean",description:"Like average, but first remove a percentage of the extreme high and low values.",short:"TRIMMEAN()","data-track":e.track("time-aggregation-trimmed-mean5")},{value:"median",label:"Median",description:"The middle value of all points that would otherwise be smoothed out. This function works like average, but short extreme dives and spikes influence it significantly less than average.",short:"MEDIAN()","data-track":e.track("time-aggregation-median")},{value:"trimmed-median",label:"Trimmed Median",description:"Like median, but first remove a percentage of the extreme high and low values.",short:"TRIMMEDIAN()","data-track":e.track("time-aggregation-trimmed-median5")},{value:"stddev",label:"Standard deviation",description:"Reveal how far each point lies from the average. A high standard deviation means that values are generally far from the average, while a low standard deviation indicates that values are clustered close to the mean. The result is again in the original units of the data source metric.",short:"STDDEV()","data-track":e.track("time-aggregation-stddev")},{value:"cv",label:"Coefficient of variation or Relative standard deviation",description:"The ratio of the standard deviation to the average. Its use is the same as standard deviation, but expressed as a percentage related to the average. The units change to %.",short:"CV()","data-track":e.track("time-aggregation-cv")},{value:"incremental-sum",label:"Incremental Sum or Delta",description:"Provide the difference between the newest and the oldest values of the aggregated points. Each point will be positive if the trend grows and negative if the trend shrinks.",short:"DELTA()","data-track":e.track("time-aggregation-incremental-sum")},{value:"ses",label:"Single exponential smoothing",description:"Use the aggregated points to produce a forecast of the next value, and reveal the forecasted value. Use it when there are indications that the trend is more predictable using the more recent points than the older ones.",short:"SES()","data-track":e.track("time-aggregation-ses")},{value:"des",label:"Double exponential smoothing",description:"Like single exponential smoothing, but better suited when the aggregated points may have a strong trend.",short:"DES()","data-track":e.track("time-aggregation-des")}].filter(Boolean)}),[e,null==n?void 0:n.points])}(n,O),_=function(e){var t=e.chart,r=e.method;return(0,a.useMemo)((function(){return"percentile"===r?[{value:"25",label:"25th",short:"25th","data-track":t.track("time-aggregation-percentile25")},{value:"50",label:"50th",short:"50th","data-track":t.track("time-aggregation-percentile50")},{value:"75",label:"75th",short:"75th","data-track":t.track("time-aggregation-percentile75")},{value:"80",label:"80th",short:"80th","data-track":t.track("time-aggregation-percentile80")},{value:"90",label:"90th",short:"90th","data-track":t.track("time-aggregation-percentile90")},{value:"95",label:"95th",short:"95th","data-track":t.track("time-aggregation-percentile95")},{value:"97",label:"97th",short:"97th","data-track":t.track("time-aggregation-percentile97")},{value:"98",label:"98th",short:"98th","data-track":t.track("time-aggregation-percentile98")},{value:"99",label:"99th",short:"99th","data-track":t.track("time-aggregation-percentile99")}]:r.includes("trimmed")?[{value:"1",label:"1%",short:"1%","data-track":t.track("time-aggregation-"+r+"1")},{value:"2",label:"2%",short:"2%","data-track":t.track("time-aggregation-"+r+"2")},{value:"3",label:"3%",short:"3%","data-track":t.track("time-aggregation-"+r+"3")},{value:"5",label:"5%",short:"5%","data-track":t.track("time-aggregation-"+r+"5")},{value:"10",label:"10%",short:"10%","data-track":t.track("time-aggregation-"+r+"10")},{value:"15",label:"15%",short:"15%","data-track":t.track("time-aggregation-"+r+"15")},{value:"20",label:"20%",short:"20%","data-track":t.track("time-aggregation-"+r+"20")},{value:"25",label:"25%",short:"25%","data-track":t.track("time-aggregation-"+r+"25")}]:[]}),[t,r])}({chart:n,method:p}),j=(x.find((function(e){return e.value===p}))||x[0]).short,P=_.find((function(e){return e.value===y}))||_[0];return a.default.createElement(o.Flex,null,y&&a.default.createElement(l.default,f({value:y,onChange:function(e){return n.updateTimeAggregationMethodAttribute({alias:e,method:p})},items:_,"data-track":n.track("groupingMethodAlias")},r,{labelProps:s({secondaryLabel:"each as",label:P.short,title:g.heading,tooltipProps:g},t)})),a.default.createElement(l.default,f({value:p,onChange:function(e){return n.updateTimeAggregationMethodAttribute({alias:m[e],method:e})},items:x,"data-track":n.track("groupingMethod"),dropTitle:h},r,{labelProps:s({secondaryLabel:!y&&"each as",tertiaryLabel:"every "+w+"s",label:j,title:v.heading,tooltipProps:v},t)})))};t.default=(0,a.memo)(b)},4975:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=c(r(96540)),a=r(83199),o=c(r(97517)),i=c(r(88335)),l=c(r(6504)),u=r(22332);function c(e){return e&&e.__esModule?e:{default:e}}t.default=function(e){var t=e.selected,r=void 0===t?[]:t,c=e.qr,f=void 0===c?0:c,d=e.fl,s=void 0===d?0:d,p=e.sl,m=void 0===p?0:p,g=e.ex,v=void 0===g?0:g,h=e.teaser,b=void 0!==h&&h,y=e.resourceName,w=m+v,O=r.length&&r.length0||b&&f<(r.length||w),_=(b?r.length||w:r.length)||m,j=(0,u.useChart)();return n.default.createElement(a.TextMicro,{color:"textLite"},n.default.createElement(a.TextMicro,{color:b?"text":"primary"},O),b?" ":" queried",!b&&n.default.createElement(l.default,{margin:[-.5,1,-.5,0],width:"14px",height:"14px",color:"primary",svg:o.default}),!!s&&n.default.createElement(n.default.Fragment,null,b?n.default.createElement(a.TextMicro,{color:"errorLite"}," +"):"+ ",n.default.createElement(a.TextMicro,{color:"errorLite"},s),b?" ":"failed ",n.default.createElement(l.default,{margin:[-.5,1,-.5,0],width:"14px",height:"14px",color:"errorLite",svg:i.default})),x&&n.default.createElement(n.default.Fragment,null,"of ",n.default.createElement(a.TextMicro,{color:b?"textLite":"text"},_),b?" ":" selected"),!b&&f!==w&&n.default.createElement(n.default.Fragment,null,"of ",n.default.createElement(a.TextMicro,null,w)," available"),y?j.intl(y,{count:x?_:O}):"")}},96659:(e,t)=>{t.__esModule=!0,t.getStats=void 0;var r=["getValue","getLabel","getIsSelected"];function n(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function a(e){for(var t=1;t=0||(a[r]=e[r]);return a}(v,r);return a(a({label:(null==b?void 0:b(n))||n.nm||f||n.id,value:(null==h?void 0:h(n))||f||n.id,"data-track":t.track(d+"-"+(f||n.id||n.nm)),unique:m.length,instances:l(n.is),metrics:l(n.ds),contribution:i(n.sts,"con"),anomalyRate:i(n.sts,"arp"),min:i(n.sts,"min"),avg:i(n.sts,"avg"),max:i(n.sts,"max"),alerts:u(n.al),info:n,selected:(null==y?void 0:y(n))||!1},w),{},{children:m.map((function(r){return e(t,r,{key:d+"-"+s,props:g})}))})}},19751:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=i(r(96540)),a=r(22332),o=i(r(70419));function i(e){return e&&e.__esModule?e:{default:e}}t.default=function(e){return function(t){return(0,a.useInitialLoading)()?n.default.createElement(o.default,t):n.default.createElement(e,t)}}},39360:(e,t,r)=>{t.Ay=void 0;var n=g(r(96540)),a=g(r(8711)),o=r(83199),i=p(r(74113)),l=r(22332),u=p(r(10534)),c=r(89380),f=p(r(62841)),d=["empty","index","uiName"],s=["uiName"];function p(e){return e&&e.__esModule?e:{default:e}}function m(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(m=function(e){return e?r:t})(e)}function g(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=m(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function v(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function h(e,t,r){return(t=function(e){var t=function(e,t){if("object"!=typeof e||!e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!=typeof n)return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"==typeof t?t:t+""}(t))in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function b(){return b=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var w=(0,a.default)(o.Text).withConfig({displayName:"gauge__Label",componentId:"sc-1o49axm-0"})(["line-height:1;font-size:",";flex:",";",";"],(function(e){return e.fontSize}),(function(e){var t=e.flex;return void 0===t?0:t}),(function(e){return e.isFetching&&f.default})),O=(0,a.default)(w).withConfig({displayName:"gauge__StrokeLabel",componentId:"sc-1o49axm-1"})(["text-shadow:0.02em 0 ",",0 0.02em ",",-0.02em 0 ",",0 -0.02em ",";"],(0,o.getColor)("border"),(0,o.getColor)("border"),(0,o.getColor)("border"),(0,o.getColor)("border")),x=function(){var e=(0,l.useLatestConvertedValue)("selected");return n.default.createElement(O,{flex:"2",color:"text",fontSize:"2em",strong:!0},e)},_=function(){var e=(0,l.useDimensionIds)()[0],t=(0,l.useUnitSign)({dimensionId:e});return n.default.createElement(w,{color:"textLite",fontSize:"1em"},t)},j=function(e){var t=e.empty,r=e.index,a=e.uiName,o=y(e,d),i=(0,l.useChart)(),u=i.getUI(a).getMinMax();return n.default.createElement(w,b({color:"textLite",fontSize:"1.3em"},o),t?"-":i.getConvertedValue(u[r]))},P=(0,a.default)(o.Flex).attrs({alignItems:"center",justifyContent:"between",flex:!0}).withConfig({displayName:"gauge__BoundsContainer",componentId:"sc-1o49axm-2"})([""]),E=function(e){var t=e.uiName;return n.default.createElement(P,null,n.default.createElement(j,{index:0,uiName:t}),n.default.createElement(j,{index:1,uiName:t}))},k=(0,a.default)(o.Flex).attrs({position:"absolute",column:!0,alignContent:"center",justifyContent:"center"}).withConfig({displayName:"gauge__StatsContainer",componentId:"sc-1o49axm-3"})(["inset:",";text-align:center;font-size:",";"],(function(e){return e.inset}),(function(e){return e.fontSize})),M=function(e){var t=e.uiName,r=(0,l.useOnResize)(t),a=r.width,o=r.height,i=a{t.__esModule=!0,t.makeGetColor=t.getWidth=t.default=void 0;var n,a=r(16199),o=r(69982),i=r(33862),l=r(41377),u=(n=r(92084))&&n.__esModule?n:{default:n};function c(){"use strict";c=function(){return t};var e,t={},r=Object.prototype,n=r.hasOwnProperty,a=Object.defineProperty||function(e,t,r){e[t]=r.value},o="function"==typeof Symbol?Symbol:{},i=o.iterator||"@@iterator",l=o.asyncIterator||"@@asyncIterator",u=o.toStringTag||"@@toStringTag";function f(e,t,r){return Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}),e[t]}try{f({},"")}catch(e){f=function(e,t,r){return e[t]=r}}function d(e,t,r,n){var o=t&&t.prototype instanceof b?t:b,i=Object.create(o.prototype),l=new A(n||[]);return a(i,"_invoke",{value:k(e,r,l)}),i}function s(e,t,r){try{return{type:"normal",arg:e.call(t,r)}}catch(e){return{type:"throw",arg:e}}}t.wrap=d;var p="suspendedStart",m="suspendedYield",g="executing",v="completed",h={};function b(){}function y(){}function w(){}var O={};f(O,i,(function(){return this}));var x=Object.getPrototypeOf,_=x&&x(x(D([])));_&&_!==r&&n.call(_,i)&&(O=_);var j=w.prototype=b.prototype=Object.create(O);function P(e){["next","throw","return"].forEach((function(t){f(e,t,(function(e){return this._invoke(t,e)}))}))}function E(e,t){function r(a,o,i,l){var u=s(e[a],e,o);if("throw"!==u.type){var c=u.arg,f=c.value;return f&&"object"==typeof f&&n.call(f,"__await")?t.resolve(f.__await).then((function(e){r("next",e,i,l)}),(function(e){r("throw",e,i,l)})):t.resolve(f).then((function(e){c.value=e,i(c)}),(function(e){return r("throw",e,i,l)}))}l(u.arg)}var o;a(this,"_invoke",{value:function(e,n){function a(){return new t((function(t,a){r(e,n,t,a)}))}return o=o?o.then(a,a):a()}})}function k(t,r,n){var a=p;return function(o,i){if(a===g)throw Error("Generator is already running");if(a===v){if("throw"===o)throw i;return{value:e,done:!0}}for(n.method=o,n.arg=i;;){var l=n.delegate;if(l){var u=M(l,n);if(u){if(u===h)continue;return u}}if("next"===n.method)n.sent=n._sent=n.arg;else if("throw"===n.method){if(a===p)throw a=v,n.arg;n.dispatchException(n.arg)}else"return"===n.method&&n.abrupt("return",n.arg);a=g;var c=s(t,r,n);if("normal"===c.type){if(a=n.done?v:m,c.arg===h)continue;return{value:c.arg,done:n.done}}"throw"===c.type&&(a=v,n.method="throw",n.arg=c.arg)}}}function M(t,r){var n=r.method,a=t.iterator[n];if(a===e)return r.delegate=null,"throw"===n&&t.iterator.return&&(r.method="return",r.arg=e,M(t,r),"throw"===r.method)||"return"!==n&&(r.method="throw",r.arg=new TypeError("The iterator does not provide a '"+n+"' method")),h;var o=s(a,t.iterator,r.arg);if("throw"===o.type)return r.method="throw",r.arg=o.arg,r.delegate=null,h;var i=o.arg;return i?i.done?(r[t.resultName]=i.value,r.next=t.nextLoc,"return"!==r.method&&(r.method="next",r.arg=e),r.delegate=null,h):i:(r.method="throw",r.arg=new TypeError("iterator result is not an object"),r.delegate=null,h)}function C(e){var t={tryLoc:e[0]};1 in e&&(t.catchLoc=e[1]),2 in e&&(t.finallyLoc=e[2],t.afterLoc=e[3]),this.tryEntries.push(t)}function S(e){var t=e.completion||{};t.type="normal",delete t.arg,e.completion=t}function A(e){this.tryEntries=[{tryLoc:"root"}],e.forEach(C,this),this.reset(!0)}function D(t){if(t||""===t){var r=t[i];if(r)return r.call(t);if("function"==typeof t.next)return t;if(!isNaN(t.length)){var a=-1,o=function r(){for(;++a=0;--o){var i=this.tryEntries[o],l=i.completion;if("root"===i.tryLoc)return a("end");if(i.tryLoc<=this.prev){var u=n.call(i,"catchLoc"),c=n.call(i,"finallyLoc");if(u&&c){if(this.prev=0;--r){var a=this.tryEntries[r];if(a.tryLoc<=this.prev&&n.call(a,"finallyLoc")&&this.prev=0;--t){var r=this.tryEntries[t];if(r.finallyLoc===e)return this.complete(r.completion,r.afterLoc),S(r),h}},catch:function(e){for(var t=this.tryEntries.length-1;t>=0;--t){var r=this.tryEntries[t];if(r.tryLoc===e){var n=r.completion;if("throw"===n.type){var a=n.arg;S(r)}return a}}throw Error("illegal catch attempt")},delegateYield:function(t,r,n){return this.delegate={iterator:D(t),resultName:r,nextLoc:n},"next"===this.method&&(this.arg=e),h}},t}t.getWidth=function(e,t){var r=void 0===t?{}:t,n=r.aspectRatio,a=r.cellSize,o=(0,l.getRows)(e,n),i=(0,l.getColumns)(o,n);return(0,l.getFullWidth)(i,a)};var f=function(e,t){var r=void 0===t?{}:t,n=r.aspectRatio,a=r.cellSize,o=r.padding,i=(0,l.getRows)(e,n),u=(0,l.getColumns)(i,n);return{width:(0,l.getFullWidth)(u,a),height:(0,l.getFullHeight)(i,a,o),columns:Math.ceil(u)}},d=t.makeGetColor=function(e,t,r){return(0,a.scaleLinear)().domain([e,t]).range(r)};t.default=function(e,t,r,n){var a=c().mark(S),s=r.onMouseenter,p=r.onMouseout;void 0===n&&(n={});var m=n,g=m.cellSize,v=m.cellPadding,h=m.cellStroke,b=void 0===h?2:h,y=m.lineWidth,w=void 0===y?1:y,O=m.colorRange,x=void 0===O?[e.getThemeAttribute("themeGroupBoxesMin"),e.getThemeAttribute("themeGroupBoxesMax")]:O,_=t.getContext("2d"),j=(0,i.createCanvas)(_.width,_.height),P=j.getContext("2d"),E=-1,k=function(){},M=function(){},C=function(){};function S(r,m){var h,y,O,S,A,D,T,L,I;return c().wrap((function(a){for(;;)switch(a.prev=a.next){case 0:if(h=f(r,n),y=h.width,O=h.height,S=h.columns,y&&O){a.next=6;break}if(!(0,o.unstable_shouldYield)()){a.next=5;break}return void(a.next=5);case 5:return a.abrupt("return");case 6:j.width=parseInt(y),j.height=parseInt(O),P.clearRect(0,0,j.width,j.height),A=e.getAttribute("min"),D=e.getAttribute("max"),T=d(A,D,x),L=function(t,r,n){t.beginPath(),t.fillStyle=T(e.getRowDimensionValue(r,m));var a=(0,l.getXPosition)(S,n,g),o=(0,l.getYPosition)(S,n,g);w&&b&&t.clearRect(a-w,o-w,(0,l.getCellBoxSize)(g,v)+b,(0,l.getCellBoxSize)(g,v)+b),t.fillRect(a,o,(0,l.getCellBoxSize)(g,v),(0,l.getCellBoxSize)(g,v))},I=0;case 14:if(!(I{t.__esModule=!0,t.default=void 0;var n=r(41377);t.default=function(e,t,r,a,o){var i=a.onMouseenter,l=a.onMouseout,u=void 0===o?{}:o,c=u.cellSize,f=u.cellPadding,d=-1,s=function(r){var a=e.getBoundingClientRect(),o=(0,n.getXPosition)(t,r,c),i=(0,n.getYPosition)(t,r,c),l=a.left+o,u=a.top+i,d=(0,n.getCellBoxSize)(c,f);return{index:r,left:l,top:u,right:l+d,bottom:u+d,width:d,height:d,offsetX:o,offsetY:i}},p=function(){l(s(d)),d=-1},m=function(e){var a=e.offsetX,o=e.offsetY,l=(0,n.getOffsetPosition)(a,c),u=(0,n.getOffsetPosition)(o,c)*t+l;u!==d&&(-1!==d&&p(),u>=r||(i(s(u)),d=u))};return e.addEventListener("mousemove",m),e.addEventListener("mouseout",p),function(){e.removeEventListener("mousemove",m),e.removeEventListener("mouseout",p)}}},7365:(e,t,r)=>{t.__esModule=!0,t.default=t.Container=void 0;var n=l(r(96540)),a=r(83199),o=l(r(93658)),i=l(r(66788));function l(e){return e&&e.__esModule?e:{default:e}}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n=l(r(96540)),a=l(r(8711)),o=r(83199),i=r(22332);function l(e){return e&&e.__esModule?e:{default:e}}var u=(0,a.default)(o.Flex).attrs({width:{max:"320px",base:"100%"},height:"12px",round:!0}).withConfig({displayName:"legend__LinearColorScaleBar",componentId:"sc-1iit0cq-0"})(["background:linear-gradient( to right,",","," );"],(function(e){return e.minColor}),(function(e){return e.maxColor}));t.default=function(){var e=(0,i.useChart)(),t=(0,i.useAttributeValue)("min"),r=(0,i.useAttributeValue)("max"),a=(0,i.useUnitSign)(),l=(0,i.useAttributeValue)("selectedContexts").join(", "),c=(0,i.useAttributeValue)("contextScope").join(", ");(0,i.useAttributeValue)("theme");var f=e.getThemeAttribute("themeGroupBoxesMin"),d=e.getThemeAttribute("themeGroupBoxesMax");return n.default.createElement(o.Flex,{"data-testid":"groupBox-legend",gap:4,alignItems:"center"},n.default.createElement(o.TextNano,{strong:!0},l&&"*"!==l?l:c),n.default.createElement(o.Flex,{gap:2,alignItems:"center"},n.default.createElement(o.TextNano,null,e.getConvertedValue(t)," ",a),n.default.createElement(u,{minColor:f,maxColor:d}),n.default.createElement(o.TextNano,null,e.getConvertedValue(r)," ",a)))}},25069:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=p(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(22332),o=s(r(39912)),i=s(r(65010)),l=s(r(39600)),u=s(r(8367)),c=["uiName","dimensions","groupLabel"],f=["index"],d=["index"];function s(e){return e&&e.__esModule?e:{default:e}}function p(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(p=function(e){return e?r:t})(e)}function m(){"use strict";m=function(){return t};var e,t={},r=Object.prototype,n=r.hasOwnProperty,a=Object.defineProperty||function(e,t,r){e[t]=r.value},o="function"==typeof Symbol?Symbol:{},i=o.iterator||"@@iterator",l=o.asyncIterator||"@@asyncIterator",u=o.toStringTag||"@@toStringTag";function c(e,t,r){return Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}),e[t]}try{c({},"")}catch(e){c=function(e,t,r){return e[t]=r}}function f(e,t,r,n){var o=t&&t.prototype instanceof b?t:b,i=Object.create(o.prototype),l=new A(n||[]);return a(i,"_invoke",{value:k(e,r,l)}),i}function d(e,t,r){try{return{type:"normal",arg:e.call(t,r)}}catch(e){return{type:"throw",arg:e}}}t.wrap=f;var s="suspendedStart",p="suspendedYield",g="executing",v="completed",h={};function b(){}function y(){}function w(){}var O={};c(O,i,(function(){return this}));var x=Object.getPrototypeOf,_=x&&x(x(D([])));_&&_!==r&&n.call(_,i)&&(O=_);var j=w.prototype=b.prototype=Object.create(O);function P(e){["next","throw","return"].forEach((function(t){c(e,t,(function(e){return this._invoke(t,e)}))}))}function E(e,t){function r(a,o,i,l){var u=d(e[a],e,o);if("throw"!==u.type){var c=u.arg,f=c.value;return f&&"object"==typeof f&&n.call(f,"__await")?t.resolve(f.__await).then((function(e){r("next",e,i,l)}),(function(e){r("throw",e,i,l)})):t.resolve(f).then((function(e){c.value=e,i(c)}),(function(e){return r("throw",e,i,l)}))}l(u.arg)}var o;a(this,"_invoke",{value:function(e,n){function a(){return new t((function(t,a){r(e,n,t,a)}))}return o=o?o.then(a,a):a()}})}function k(t,r,n){var a=s;return function(o,i){if(a===g)throw Error("Generator is already running");if(a===v){if("throw"===o)throw i;return{value:e,done:!0}}for(n.method=o,n.arg=i;;){var l=n.delegate;if(l){var u=M(l,n);if(u){if(u===h)continue;return u}}if("next"===n.method)n.sent=n._sent=n.arg;else if("throw"===n.method){if(a===s)throw a=v,n.arg;n.dispatchException(n.arg)}else"return"===n.method&&n.abrupt("return",n.arg);a=g;var c=d(t,r,n);if("normal"===c.type){if(a=n.done?v:p,c.arg===h)continue;return{value:c.arg,done:n.done}}"throw"===c.type&&(a=v,n.method="throw",n.arg=c.arg)}}}function M(t,r){var n=r.method,a=t.iterator[n];if(a===e)return r.delegate=null,"throw"===n&&t.iterator.return&&(r.method="return",r.arg=e,M(t,r),"throw"===r.method)||"return"!==n&&(r.method="throw",r.arg=new TypeError("The iterator does not provide a '"+n+"' method")),h;var o=d(a,t.iterator,r.arg);if("throw"===o.type)return r.method="throw",r.arg=o.arg,r.delegate=null,h;var i=o.arg;return i?i.done?(r[t.resultName]=i.value,r.next=t.nextLoc,"return"!==r.method&&(r.method="next",r.arg=e),r.delegate=null,h):i:(r.method="throw",r.arg=new TypeError("iterator result is not an object"),r.delegate=null,h)}function C(e){var t={tryLoc:e[0]};1 in e&&(t.catchLoc=e[1]),2 in e&&(t.finallyLoc=e[2],t.afterLoc=e[3]),this.tryEntries.push(t)}function S(e){var t=e.completion||{};t.type="normal",delete t.arg,e.completion=t}function A(e){this.tryEntries=[{tryLoc:"root"}],e.forEach(C,this),this.reset(!0)}function D(t){if(t||""===t){var r=t[i];if(r)return r.call(t);if("function"==typeof t.next)return t;if(!isNaN(t.length)){var a=-1,o=function r(){for(;++a=0;--o){var i=this.tryEntries[o],l=i.completion;if("root"===i.tryLoc)return a("end");if(i.tryLoc<=this.prev){var u=n.call(i,"catchLoc"),c=n.call(i,"finallyLoc");if(u&&c){if(this.prev=0;--r){var a=this.tryEntries[r];if(a.tryLoc<=this.prev&&n.call(a,"finallyLoc")&&this.prev=0;--t){var r=this.tryEntries[t];if(r.finallyLoc===e)return this.complete(r.completion,r.afterLoc),S(r),h}},catch:function(e){for(var t=this.tryEntries.length-1;t>=0;--t){var r=this.tryEntries[t];if(r.tryLoc===e){var n=r.completion;if("throw"===n.type){var a=n.arg;S(r)}return a}}throw Error("illegal catch attempt")},delegateYield:function(t,r,n){return this.delegate={iterator:D(t),resultName:r,nextLoc:n},"next"===this.method&&(this.arg=e),h}},t}function g(e,t){if(null==e)return{};var r,n,a={},o=Object.keys(e);for(n=0;n=0||(a[r]=e[r]);return a}t.default=function(e){var t=e.uiName,r=e.dimensions,s=e.groupLabel,p=g(e,c),v=(0,a.useChart)(),h=(0,n.useRef)(),b=(0,n.useRef)(),y=(0,n.useRef)(),w=(0,n.useState)(null),O=w[0],x=w[1],_=(0,n.useRef)(-1),j=(0,n.useRef)();(0,n.useLayoutEffect)((function(){return y.current=(0,i.default)(v,b.current,{onMouseenter:function(e){var t=e.index,r=g(e,f);_.current=t,y.current.activateBox(t),j.current=setTimeout((function(){x({target:{getBoundingClientRect:function(){return r}},index:t})}),100)},onMouseout:function(){_.current=-1,clearTimeout(j.current),requestAnimationFrame((function(){x((function(e){return-1===_.current||_.current!==(null==e?void 0:e.index)?(y.current.deactivateBox(),_.current=-1,null):e}))}))},onClick:function(e){void 0===e&&(e={});var t=e,r=t.index,n=g(t,d);_.current=r,y.current.activateBox(r),j.current=setTimeout((function(){x({target:{getBoundingClientRect:function(){return n}},index:r})}),100)}},p),function(){return y.current.clear()}}),[]);var P=(0,l.default)(t),E=(0,o.default)(),k=E[1],M=E[2],C=(0,a.useAttributeValue)("theme");(0,n.useLayoutEffect)((function(){return k(m().mark((function e(){return m().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return O&&h.current&&h.current[O.index]!==r[O.index]&&(y.current.deactivateBox(),x(null),_.current=-1),h.current=r,e.delegateYield(y.current.update(r,P),"t0",3);case 3:case"end":return e.stop()}}),e)}))),function(){return M()}}),[P,k,M,C]);var S=(0,n.useMemo)((function(){if(O){var e=r[O.index].split(",");return e[e.length-1]}}),[r[null==O?void 0:O.index]]);return n.default.createElement(n.Fragment,null,n.default.createElement("canvas",{"data-testid":"groupBox",ref:b}),O&&n.default.createElement(u.default,{target:O.target,label:S,groupLabel:s,data:P,id:r[O.index]}))}},28805:(e,t,r)=>{t.__esModule=!0,t.default=t.SkeletonIcon=void 0;var n=s(r(96540)),a=s(r(8711)),o=r(83199),i=r(22332),l=f(r(93693)),u=f(r(25069)),c=f(r(5702));function f(e){return e&&e.__esModule?e:{default:e}}function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(d=function(e){return e?r:t})(e)}function s(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=d(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function p(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function m(e,t,r){return(t=function(e){var t=function(e,t){if("object"!=typeof e||!e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!=typeof n)return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"==typeof t?t:t+""}(t))in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}var g=(0,a.keyframes)(["from{opacity:0.2;}to{opacity:0.6;}"]),v=(0,a.default)(o.Flex).attrs((function(e){return function(e){for(var t=1;t3&&n.default.createElement("span",null,"(",s.length,")"))),m.length?Object.keys(a).map((function(t){return n.default.createElement(e,{key:t,label:t,subTree:a[t],data:l,uiName:r,groupedBy:m,hasMore:m.length>1})})):n.default.createElement(u.default,{dimensions:s,groupLabel:c,uiName:r,groupKey:p}))},y=function(e){var t=e.uiName,r=(0,c.default)(t),a=r.data,u=r.tree,f=(0,i.useAttributeValue)("loaded"),d=(0,i.useAttributeValue)("showingInfo"),s=(0,i.useAttributeValue)("viewDimensions").grouped||[],p=s[0],m=s.slice(1);return f?n.default.createElement(o.Flex,{"data-testid":"groupBoxes",flexWrap:!0,flex:!0,position:"relative",height:{min:"150px"}},d?n.default.createElement(l.default,null):m.length?Object.keys(u).map((function(e){return n.default.createElement(b,{key:e,label:e,subTree:u[e],data:a,uiName:t,groupedBy:m,hasMore:m.length>1})})):n.default.createElement(b,{key:p,label:p,subTree:u,data:a,uiName:t,groupedBy:m})):n.default.createElement(h,null)};t.default=(0,n.memo)(y)},25369:(e,t,r)=>{t.Ay=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=v(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=g(r(51365)),o=g(r(18121)),i=g(r(10534)),l=r(22332),u=g(r(74113)),c=g(r(75200)),f=g(r(34578)),d=g(r(89031)),s=g(r(28805)),p=g(r(7365)),m=["uiName"];function g(e){return e&&e.__esModule?e:{default:e}}function v(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(v=function(e){return e?r:t})(e)}function h(){return h=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,m),g=(0,l.useChart)(),v=(0,o.default)({onHover:g.focus,onBlur:g.blur,isOut:function(e){return!e||!e.closest('[data-toolbox="'+g.getId()+'"]')&&!e.closest('[data-chartid="'+g.getId()+'"]')}},[g]),b=(0,a.default)((function(e){v.current=e,t&&(t.current=e)}))[1],y=(0,l.useAttributeValue)("showingInfo"),w=(0,l.useAttributeValue)("focused");return n.default.createElement(d.default,h({ref:b},i),n.default.createElement(c.default,null),n.default.createElement(f.default,{opacity:w?1:.7}),n.default.createElement(u.default,{uiName:r,column:!0,gap:4,padding:[4,2]},n.default.createElement(s.default,{uiName:r})),!y&&n.default.createElement(p.default,null))}));t.Ay=(0,i.default)(b)},8367:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=d(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=f(r(40961)),o=r(22332),i=f(r(93331)),l=f(r(22007)),u=f(r(44862)),c=f(r(9408));function f(e){return e&&e.__esModule?e:{default:e}}function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(d=function(e){return e?r:t})(e)}var s={right:"left",bottom:"top"},p={right:"left",top:"bottom"},m={left:"right",bottom:"top"},g={left:"right",top:"bottom"};t.default=function(e){var t=e.target,r=e.label,f=e.groupLabel,d=e.data,v=e.id,h=(0,n.useRef)(),b=(0,n.useRef)(),y=(0,n.useState)(g),w=y[0],O=y[1];b.current=(0,l.default)(t,h,w,"width"),(0,n.useEffect)((function(){if(null!=t&&t.getBoundingClientRect&&h.current){var e=t.getBoundingClientRect(),r=e.right,n=e.bottom,a=window.innerHeight,o=window.innerWidth,i=h.current.getBoundingClientRect(),l=i.width,u=i.height;O(function(e,t){return e&&t?s:e?p:t?m:g}(r+l>o,n+u>a))}}),[t]),(0,n.useEffect)((function(){b.current()}),[w]);var x=(0,u.default)(),_=(0,o.useAttributeValue)("id");return a.default.createPortal(n.default.createElement(i.default,{"data-toolbox":_,ref:h,width:{max:"100%"},column:!0,"data-testid":"drop",sx:{pointerEvents:"none"}},n.default.createElement(c.default,{"data-testid":"chartPopover",label:r,groupLabel:f,data:d,id:v})),x)}},70741:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=i(r(96540)),a=i(r(8711)),o=r(83199);function i(e){return e&&e.__esModule?e:{default:e}}var l=(0,a.default)(o.Flex).attrs({"data-testid":"chartPopover-label"}).withConfig({displayName:"label__GridRow",componentId:"sc-1j7ox7-0"})(["display:contents;"]);t.default=function(e){var t=e.label,r=e.value;return n.default.createElement(l,null,n.default.createElement(o.TextMicro,{padding:[1,0]},t),n.default.createElement(o.TextSmall,{strong:!0},(null==r?void 0:r.join(", "))||"-"))}},9408:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=c(r(8711)),o=r(83199),i=r(22332),l=r(99010),u=c(r(70741));function c(e){return e&&e.__esModule?e:{default:e}}function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}var d=(0,a.default)(o.Flex).attrs((function(e){return{round:!0,border:{side:"all",color:"elementBackground"},width:{min:"196px",max:e.maxWidth?e.maxWidth+"px":"80vw"},background:"dropdown",column:!0,padding:[4],gap:1}})).withConfig({displayName:"labels__Container",componentId:"sc-5eymlk-0"})(["box-shadow:0px 8px 12px rgba(9,30,66,0.15),0px 0px 1px rgba(9,30,66,0.31);"]),s=(0,a.default)(l.BaseColorBar).attrs({position:"absolute",top:1,left:0,backgroundOpacity:.4,round:.5}).withConfig({displayName:"labels__ColorBackground",componentId:"sc-5eymlk-1"})([""]),p=a.default.div.withConfig({displayName:"labels__Grid",componentId:"sc-5eymlk-2"})(["display:grid;width:100%;grid-template-columns:auto 2fr;column-gap:8px;align-items:center;"]),m=(0,n.forwardRef)((function(e,t){var r=e.label,a=e.groupLabel,l=e.data,c=e.id,f=(0,i.useChart)(),m=f.getAttribute("viewDimensions"),g=f.getDimensionIndex(c),v=(0,i.useAttributeValue)("min"),h=(0,i.useAttributeValue)("max"),b=.9*f.getUI().getChartWidth(),y=f.getRowDimensionValue(c,l),w=(0,i.useConverted)(y,{valueKey:"percent"});return n.default.createElement(d,{"data-testid":"chartPopover-labels",maxWidth:b,gap:2,ref:t},n.default.createElement(o.Flex,{column:!0,gap:1},n.default.createElement(o.TextMicro,null,a),n.default.createElement(o.TextMicro,{strong:!0},r),n.default.createElement(o.Flex,{alignItems:"center",position:"relative"},n.default.createElement(s,{value:y,min:v,max:h,bg:f.getThemeAttribute("themeGroupBoxesMax"),height:"18px"}),n.default.createElement(o.TextMicro,{padding:[1.5,2],strong:!0},w,"-"!==w&&"%"))),!(null==m||!m.labels)&&n.default.createElement(p,{gap:1,column:!0},Object.keys(m.labels).map((function(e){var t;return n.default.createElement(u.default,{key:e,label:e,value:null==(t=m.labels[e])?void 0:t[g]})}))))}));t.default=(0,n.memo)(m)},5702:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=r(22332),a=r(42872);t.default=function(e){var t,r,o=(0,n.useChart)(),i=(0,n.useForceUpdate)();return(0,n.useImmediateListener)((function(){return o.getUI(e).on("groupBoxChanged",i)}),[o]),(null==(t=(r=o.getUI(e)).getGroupBox)?void 0:t.call(r))||a.initialValue}},39600:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=r(22332);t.default=function(e){var t=(0,n.useChart)(),r=(0,n.useForceUpdate)();return(0,n.useImmediateListener)((function(){return t.getUI(e).on("groupBoxRowDataChanged",r)}),[t]),t.getUI(e).getGroupBoxRowData()}},41377:(e,t)=>{t.__esModule=!0,t.getYPosition=t.getXPosition=t.getRows=t.getOffsetPosition=t.getFullWidth=t.getFullHeight=t.getColumns=t.getCellBoxSize=t.defaultPadding=t.defaultCellSize=t.defaultAspectRatio=void 0;var r=t.defaultCellSize=17,n=t.defaultPadding=1,a=t.defaultAspectRatio=Math.round(16/9);t.getCellBoxSize=function(e,t){return void 0===e&&(e=r),void 0===t&&(t=n),e-t},t.getRows=function(e,t){return void 0===t&&(t=a),Math.sqrt(e.length/t)||1},t.getColumns=function(e,t){return void 0===t&&(t=a),e*t||1},t.getXPosition=function(e,t,n){return void 0===n&&(n=r),Math.floor(t%e)*n},t.getYPosition=function(e,t,n){return void 0===n&&(n=r),Math.floor(t/e)*n},t.getFullWidth=function(e,t){return void 0===t&&(t=r),Math.ceil(e)*t},t.getFullHeight=function(e,t,n){return void 0===t&&(t=r),void 0===n&&(n=r),Math.ceil(e)*t+n},t.getOffsetPosition=function(e,t){return void 0===t&&(t=r),Math.floor(e/t)}},75200:(e,t,r)=>{t.__esModule=!0,t.default=t.Container=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(83199),o=r(22332),i=c(r(90592)),l=c(r(99684)),u=r(57513);function c(e){return e&&e.__esModule?e:{default:e}}function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n=r(8711);function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function o(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=o(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var i in e)if("default"!==i&&{}.hasOwnProperty.call(e,i)){var l=a?Object.getOwnPropertyDescriptor(e,i):null;l&&(l.get||l.set)?Object.defineProperty(n,i,l):n[i]=e[i]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=["children","Component","maxHeight","maxWidth","maxFontSize","minFontSize"];function o(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(o=function(e){return e?r:t})(e)}function i(){return i=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,a),v=(0,n.useState)(),h=v[0],b=v[1],y=(0,n.useRef)(!1);return(0,n.useEffect)((function(){if(h){var e=requestAnimationFrame((function(){y.current=!1;var e=s;for(h.style.fontSize=e+"px";!y.current&&e>m&&(h.offsetWidth>f||h.offsetHeight>u);){e-=Math.ceil(e/100),h.style.fontSize=e+"px"}}));return function(){y.current=!0,cancelAnimationFrame(e)}}}),[t,u,f,h]),n.default.createElement(o,i({truncate:!0,ref:b},g),t)}},54924:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=u(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=l(r(86263)),o=l(r(33640)),i=["text","Component","noTooltip"];function l(e){return e&&e.__esModule?e:{default:e}}function u(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(u=function(e){return e?r:t})(e)}function c(){return c=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i),d=(0,n.useState)(""),s=d[0],p=d[1],m=(0,n.useState)(),g=m[0],v=m[1];return(0,n.useEffect)((function(){if(g){for(var e=g.offsetWidth,r=0;g.scrollWidth>e;)g.textContent=(0,a.default)(g.textContent,r),r+=1;g.textContent!==t&&p(t)}}),[t,g]),n.default.createElement(o.default,{content:!u&&s?t:"",align:"bottom",isBasic:!0},n.default.createElement(l,c({truncate:!0,ref:v},f),t))}},62841:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=r(8711),a=(0,n.keyframes)(["from{opacity:0.4;}to{opacity:1;}"]),o=(0,n.css)(["animation:"," 1.6s ease-in infinite;"],a);t.default=o},39912:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=r(96540),a=r(69982);t.default=function(){var e=(0,n.useState)(!1),t=e[0],r=e[1],o=(0,n.useRef)(),i=(0,n.useRef)(),l=(0,n.useCallback)((function(){o.current&&((0,a.unstable_cancelCallback)(o.current),o.current=void 0,r(!1)),i.current&&(i.current(),i.current=void 0)}),[]),u=(0,n.useCallback)((function(e,t){void 0===t&&(t=a.unstable_IdlePriority),l();var n=e(),u=(0,a.unstable_runWithPriority)(t,(function e(){var t=n.next();if(i.current=t.value,!t.done)return e;r(!1)}));u&&((0,a.unstable_runWithPriority)(a.unstable_ImmediatePriority,(function(){return r(!0)})),o.current=(0,a.unstable_scheduleCallback)(t,u))}),[]);return(0,n.useEffect)((function(){return l}),[]),[t,u,l]}},10534:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=c(r(96540)),a=r(22332),o=c(r(94515)),i=c(r(4357)),l=c(r(14154)),u=c(r(89380));function c(e){return e&&e.__esModule?e:{default:e}}t.default=function(e,t){void 0===t&&(t={});var r=(0,o.default)((0,l.default)(e));t.tile&&(r=(0,u.default)(r)),r=(0,a.withChartProvider)((0,i.default)(r));return function(e){return n.default.createElement(r,e)}}},94515:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=o(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var i in e)if("default"!==i&&{}.hasOwnProperty.call(e,i)){var l=a?Object.getOwnPropertyDescriptor(e,i):null;l&&(l.get||l.set)?Object.defineProperty(n,i,l):n[i]=e[i]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(22332);function o(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(o=function(e){return e?r:t})(e)}function i(){return i=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=i(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var l=a?Object.getOwnPropertyDescriptor(e,o):null;l&&(l.get||l.set)?Object.defineProperty(n,o,l):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(22332),o=["isVisible","height","width"];function i(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(i=function(e){return e?r:t})(e)}function l(){return l=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(t,o),m=(0,a.useChart)();return(0,a.useImmediateListener)((function(){if(u&&(!p.uiName||"default"===p.uiName)){var e=window.requestAnimationFrame(m.activate);return function(){window.cancelAnimationFrame(e),m.deactivate()}}}),[u,m,p.uiName]),n.default.createElement(e,l({ref:r,height:f,width:s},p))}))}},4357:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=i(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var l=a?Object.getOwnPropertyDescriptor(e,o):null;l&&(l.get||l.set)?Object.defineProperty(n,o,l):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(83199),o=r(22332);function i(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(i=function(e){return e?r:t})(e)}function l(){return l=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=t.Title=t.HeadWrapper=t.ChartWrapper=void 0;var n=y(r(96540)),a=y(r(8711)),o=r(83199),i=y(r(58655)),l=y(r(79045)),u=y(r(6504)),c=y(r(18121)),f=y(r(90592)),d=r(22332),s=y(r(34578)),p=r(99010),m=y(r(33640)),g=y(r(93693)),v=["size"],h=["children","customChildren","hasFilters"],b=["count","tile","height","width","children","hasFilters"];function y(e){return e&&e.__esModule?e:{default:e}}function w(){return w=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var P=(0,a.default)(o.Text).withConfig({displayName:"withTile__Label",componentId:"sc-1oake2r-0"})(["line-height:1;font-size:",";"],(function(e){return e.fontSize})),E=(0,a.default)(o.Flex).attrs((function(e){var t=e.size,r=j(e,v);return x({background:"panelBg",round:!0,fontSize:parseInt(t/3,10),height:"100%",width:"100%",position:"relative"},r)})).withConfig({displayName:"withTile__ChartHeadWrapper",componentId:"sc-1oake2r-1"})(["font-size:","px;"],(function(e){return e.fontSize>11?11:e.fontSize<8?8:e.fontSize})),k=t.Title=function(){var e=(0,d.useChart)(),t=(0,d.useTitle)();return n.default.createElement(P,{fontSize:"1em",textAlign:"center",color:"sectionDescription",width:"80%",onClick:function(t){t.preventDefault(),e.sdk.trigger("goToLink",e)},cursor:"pointer",padding:[2,0,0]},t)},M=t.HeadWrapper=function(e){var t,r=e.children,a=e.customChildren,g=e.hasFilters,v=void 0===g||g,b=j(e,h),y=(0,d.useOnResize)().width,O=(0,d.useAttributeValue)("focused"),x=null==(t=(0,d.useDimensionIds)())?void 0:t[0],_=(0,d.useAttributeValue)("leftHeaderElements"),P=y;P=P<20?20:P>50?50:P;var M=(0,d.useChart)(),C=(0,c.default)({onHover:M.focus,onBlur:M.blur,isOut:function(e){return!e||!e.closest('[data-toolbox="'+M.getId()+'"]')&&!e.closest('[data-chartid="'+M.getId()+'"]')}},[M]),S=(0,d.useAttributeValue)("hasToolbox"),A=(0,d.useAttributeValue)("showAnomalies"),D=(0,d.useColor)("themeShadow"),T=(0,l.default)(O,400),L=(0,d.useLatestValue)("selected",{valueKey:"arp"})||0;return n.default.createElement(E,w({size:P},b,{ref:C}),S&&O&&T&&n.default.createElement(f.default,{position:"absolute",top:"-16px",right:"0",background:"mainChartHeaderBg",width:{min:"100%"},padding:[1],sx:{boxShadow:"0px 1px 5px 0px "+D+";"},overflow:"hidden"},v&&y>400&&n.default.createElement(o.Box,{width:"100%"},n.default.createElement(s.default,{border:"none",opacity:O?1:.1,focused:O}))),n.default.createElement(o.Flex,{column:!0,width:5,padding:[1,0]},_.map((function(e,t){return n.default.createElement(e,{key:t,plain:!0})})),y<400&&n.default.createElement(o.Flex,{column:!0,width:5},n.default.createElement(s.default,{column:!0,border:"none",justifyContent:"start",plain:!0,opacity:O?1:.1,focused:O}))),n.default.createElement(o.Flex,{column:!0,alignItems:"center",justifyContent:"center",padding:[1,0],height:"100%",width:"100%",position:"relative",overflow:"hidden"},n.default.createElement(k,null),r),n.default.createElement(o.Flex,{column:!0,width:5,alignItems:"center",padding:[4,0],gap:2},A&&"selected"===x&&n.default.createElement(n.default.Fragment,null,n.default.createElement(o.Flex,{column:!0,height:"100%",width:"2px",background:"neutralHighlight",justifyContent:"end"},n.default.createElement(p.ColorBar,{id:"selected",valueKey:"arp",width:"2px",styleDimension:"height",round:.5})),n.default.createElement(m.default,{content:"Anomaly rate for this metric"},n.default.createElement(u.default,{svg:i.default,color:L&&L>0?"anomalyTextLite":"neutralHighlight",size:"14px"})))),a)};t.ChartWrapper=(0,a.default)(o.Flex).attrs((function(e){return x({column:!0,justifyContent:"center",alignContent:"center",gap:2,position:"relative",width:"100%",height:"100%",overflow:"hidden"},e)})).withConfig({displayName:"withTile__ChartWrapper",componentId:"sc-1oake2r-2"})([""]),t.default=function(e){return function(t){var r=t.count,a=t.tile,o=void 0===a||a,i=t.height,l=void 0===i?"100%":i,u=t.width,c=void 0===u?"100%":u,f=t.children,s=t.hasFilters,p=void 0===s||s,m=j(t,b),v=(0,d.useAttributeValue)("showingInfo"),h=(0,d.useAttributeValue)("focused"),y=(0,d.useColor)("themeShadow"),O=h?{sx:{boxShadow:"0px 1px 5px 0px "+y+";"}}:{};return o?n.default.createElement(M,w({count:r,uiName:m.uiName,height:l,width:c,customChildren:f,hasFilters:p},O),v?n.default.createElement(g.default,null):n.default.createElement(e,m)):n.default.createElement(E,w({size:20,height:l,width:c},O),v?n.default.createElement(g.default,null):n.default.createElement(e,m),f)}}},6963:(e,t,r)=>{t.__esModule=!0,t.default=t.ContentWrapper=t.Container=void 0;var n=b(r(96540)),a=b(r(8711)),o=r(83199),i=r(22332),l=r(18121),u=v(r(74113)),c=v(r(34101)),f=v(r(75029)),d=v(r(31899)),s=v(r(94833)),p=r(48666),m=v(r(15587)),g=v(r(79881));function v(e){return e&&e.__esModule?e:{default:e}}function h(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(h=function(e){return e?r:t})(e)}function b(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=h(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function y(){return y=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n=s(r(96540)),a=r(83199),o=f(r(8001)),i=f(r(87083)),l=f(r(51081)),u=s(r(6504)),c=r(22332);function f(e){return e&&e.__esModule?e:{default:e}}function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(d=function(e){return e?r:t})(e)}function s(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=d(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function p(){return p=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=t.ColorBar=t.Color=t.BaseColorBar=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=p(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=(n=r(8711))&&n.__esModule?n:{default:n},i=r(83199),l=r(22332),u=r(22753),c=["bg"],f=["value","min","max","valueKey","bg","styleDimension"],d=["id","partIndex","valueKey"],s=["id","partIndex"];function p(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(p=function(e){return e?r:t})(e)}function m(){return m=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var b=t.Color=(0,o.default)(i.Flex).attrs((function(e){return function(e){for(var t=1;t0?c<0?0:c:p,v=(0,l.useLatestValue)(t,{valueKey:n})||0;return a.default.createElement(y,m({value:v,min:g,max:p>s?p:s,valueKey:n,bg:u},o))},t.default=function(e){var t=e.id,r=e.partIndex,n=h(e,s),o=(0,l.useChart)().selectDimensionColor(t,r);return o?a.default.createElement(b,m({bg:o},n)):null}},1249:(e,t,r)=>{t.__esModule=!0,t.default=t.Name=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=r(83199),i=r(22332),l=(n=r(54924))&&n.__esModule?n:{default:n},u=["children","isEmpty"],c=["id","partIndex","fallback"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var p=t.Name=(0,a.memo)((0,a.forwardRef)((function(e,t){var r=e.children,n=e.isEmpty,i=s(e,u);return a.default.createElement(l.default,d({text:r,Component:o.TextSmall,color:n?"textNoFocus":"text",whiteSpace:"nowrap",ref:t,"data-testid":"chartDimensions-name"},i))})));t.default=function(e){var t=e.id,r=e.partIndex,n=e.fallback,o=void 0===n?"":n,l=s(e,c),u=(0,i.useChart)().getDimensionName(t,r);return a.default.createElement(p,d({},l,{isEmpty:!u}),u||o)}},75010:(e,t,r)=>{t.__esModule=!0,t.default=t.Value=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=l(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(83199),o=r(22332),i=["visible","dimensionId"];function l(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(l=function(e){return e?r:t})(e)}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i),l=(0,o.useUnitSign)({dimensionId:r});return t?n.default.createElement(c,a,l):null};t.default=(0,n.memo)(f)},24652:(e,t,r)=>{t.__esModule=!0,t.default=t.Value=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=l(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(83199),o=r(22332),i=["id","visible","valueKey","period","objKey","Component"];function l(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(l=function(e){return e?r:t})(e)}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i),v=(0,o.useConvertedValue)(r,d,{valueKey:l,objKey:s,allowNull:!0});return a?n.default.createElement(m,u({},g,{ref:t}),v):null}));t.default=f},96720:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=c(r(96540)),a=c(r(8711)),o=r(83199),i=c(r(30464)),l=r(22332),u=c(r(6504));function c(e){return e&&e.__esModule?e:{default:e}}function f(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function d(e,t,r){return(t=function(e){var t=function(e,t){if("object"!=typeof e||!e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!=typeof n)return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"==typeof t?t:t+""}(t))in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}var s=(0,a.default)(o.Flex).attrs((function(e){return function(e){for(var t=1;t *{color:",";fill:",";}"],(0,o.getColor)("text"),(0,o.getColor)("text"));t.default=function(){var e=(0,l.useChart)(),t=(0,l.useAttributeValue)("expanded");return n.default.createElement(s,{cursor:"pointer",onClick:function(){return e.updateAttribute("expanded",!t)},alignSelf:"center"},n.default.createElement(u.default,{svg:i.default,color:"textLite",width:"7.5px",height:"5px",rotate:t?2:0}),n.default.createElement(o.TextSmall,{color:"textLite"},t?"Collapse":"Expand"),n.default.createElement(u.default,{svg:i.default,color:"textLite",width:"7.5px",height:"5px",rotate:t?2:0}))}},39591:(e,t,r)=>{t.__esModule=!0,t.default=t.Container=void 0;var n=s(r(96540)),a=r(83199),o=s(r(69529)),i=s(r(49941)),l=s(r(98744)),u=r(65562),c=s(r(93658)),f=s(r(96720)),d=r(22753);function s(e){return e&&e.__esModule?e:{default:e}}function p(){return p=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.Ay=t.N1=void 0;var n=h(r(96540)),a=g(r(51365)),o=g(r(18121)),i=g(r(10534)),l=r(22332),u=g(r(75200)),c=g(r(93693)),f=h(r(6963)),d=g(r(34578)),s=g(r(89031)),p=g(r(39591)),m=["hasHeader","hasFooter","hasFilters","uiName"];function g(e){return e&&e.__esModule?e:{default:e}}function v(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(v=function(e){return e?r:t})(e)}function h(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=v(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function b(){return b=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,m),x=(0,l.useChart)(),_=(0,l.useAttributeValue)("showingInfo"),j=(0,l.useAttributeValue)("sparkline"),P=(0,o.default)({onHover:x.focus,onBlur:x.blur,isOut:function(e){return!e||!e.closest('[data-toolbox="'+x.getId()+'"]')&&!e.closest('[data-chartid="'+x.getId()+'"]')}},[x]),E=(0,a.default)((function(e){P.current=e,t&&(t.current=e)}))[1],k=(0,l.useAttributeValue)("focused");return n.default.createElement(s.default,b({ref:E},j&&{border:!1,background:"transparent"},O),i&&n.default.createElement(u.default,null),y&&n.default.createElement(d.default,{opacity:k?1:.7}),n.default.createElement(f.ContentWrapper,null,_?n.default.createElement(c.default,null):n.default.createElement(f.default,{uiName:w})),v&&n.default.createElement(p.default,null))}));t.Ay=(0,i.default)(y)},93658:(e,t,r)=>{t.__esModule=!0,t.getDateDiff=t.default=void 0;var n,a=(n=r(96540))&&n.__esModule?n:{default:n},o=r(83199),i=r(22332);function l(){return l=Object.assign?Object.assign.bind():function(e){for(var t=1;t=p&&!!n}},{value:o,unit:"h",check:function(){return r>=m&&!!o}},{value:i,unit:"m",check:function(){return!!i}},{value:a-=60*i,unit:"s",check:function(){return r9?t:"0"+t:r)+n}(c(c({},t),{},{hasPrev:!!e[e.length-1]}));return r&&e.push(r),e}),[])},v=function(e){var t=e.date,r=e.after,n=e.before,l=(0,i.useFormatTime)(1e3*r),u=(0,i.useFormatTime)(1e3*n),c=g(r,n);return a.default.createElement(o.Flex,{gap:1},a.default.createElement(o.TextNano,{color:"textDescription"},t," \u2022"),a.default.createElement(o.TextNano,{color:"textLite"},l," \u2192 ",u),a.default.createElement(o.TextNano,{color:"textDescription"},"\u2022 ",c))},h=function(e){var t=e.afterDate,r=e.beforeDate,n=e.after,l=e.before,u=(0,i.useFormatTime)(1e3*n),c=(0,i.useFormatTime)(1e3*l),f=g(n,l);return a.default.createElement(o.Flex,{gap:1},a.default.createElement(o.TextNano,{color:"textDescription"},t," \u2022"),a.default.createElement(o.TextNano,{color:"textLite"},u," \u2192"),a.default.createElement(o.TextNano,{color:"textDescription"},r," \u2022"),a.default.createElement(o.TextNano,{color:"textLite"},c),a.default.createElement(o.TextNano,{color:"textDescription"},"\u2022 ",f))},b=function(e){var t=e.after,r=e.before,n=(0,i.useFormatDate)(1e3*r),o=(0,i.useFormatDate)(1e3*t);return n===o?a.default.createElement(v,{date:o,after:t,before:r}):a.default.createElement(h,{afterDate:o,beforeDate:n,after:t,before:r})};t.default=function(e){var t,r=(0,i.useChart)(),n=(0,i.useAttributeValue)("overlays").highlight,u=null==n?void 0:n.range,c=null!=(t=null==n?void 0:n.moveX)?t:{},f=c.after,d=c.before;return a.default.createElement(o.Flex,l({gap:1,justifyContent:"between",flex:!0},e),u?a.default.createElement(o.Flex,{onClick:function(){d&&f&&r.moveX(f,d)},cursor:"pointer",gap:1,padding:[0,11,0]},a.default.createElement(o.TextNano,{color:"textLite"},"Highlight:"),a.default.createElement(b,{after:u[0],before:u[1]})):a.default.createElement("div",null),a.default.createElement(s,null))}},33949:(e,t,r)=>{t.__esModule=!0,t.default=t.SkeletonDimension=t.EmptyDimension=void 0;var n,a=g(r(96540)),o=r(8711),i=r(83199),l=g(r(99010)),u=g(r(1249)),c=g(r(24652)),f=g(r(33640)),d=(n=r(75010))&&n.__esModule?n:{default:n},s=r(22332),p=r(22753);function m(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(m=function(e){return e?r:t})(e)}function g(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=m(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function v(){return v=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n=l(r(96540)),a=l(r(8711)),o=r(83199),i=r(22332);function l(e){return e&&e.__esModule?e:{default:e}}var u=(0,a.default)(o.Flex).attrs({width:"320px",height:"12px",round:!0}).withConfig({displayName:"heatmapColors__LinearColorScaleBar",componentId:"sc-13fexd8-0"})(["background:linear-gradient( to right,rgb(62,73,137),rgb(49,104,142),rgb(38,130,142),rgb(31,158,137),rgb(53,183,121),rgb(110,206,88),rgb(181,222,43),rgb(253,231,37) );"]);t.default=function(){var e=(0,i.useChart)(),t=(0,i.useAttributeValue)("max"),r=(0,i.useUnitSign)();return(0,i.useAttributeValue)("theme"),n.default.createElement(o.Flex,{"data-testid":"heatmap-legend",gap:2,alignItems:"center",padding:[2,11]},n.default.createElement(o.TextNano,null,e.getConvertedValue(0)," ",r),n.default.createElement(u,null),n.default.createElement(o.TextNano,null,e.getConvertedValue(t)," ",r))}},69529:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=m(r(96540)),a=s(r(8711)),o=r(81431),i=r(83199),l=s(r(53965)),u=s(r(14608)),c=r(22332),f=m(r(33949)),d=s(r(6504));function s(e){return e&&e.__esModule?e:{default:e}}function p(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(p=function(e){return e?r:t})(e)}function m(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=p(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function g(){return g=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n=h(r(96540)),a=g(r(8711)),o=r(83199),i=g(r(70922)),l=g(r(58937)),u=g(r(29085)),c=g(r(99444)),f=g(r(83357)),d=h(r(6504)),s=r(22332),p=g(r(55969)),m=g(r(49628));function g(e){return e&&e.__esModule?e:{default:e}}function v(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(v=function(e){return e?r:t})(e)}function h(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=v(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function b(){return b=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n=m(r(96540)),a=r(83199),o=s(r(3113)),i=s(r(73983)),l=s(r(20327)),u=s(r(26762)),c=r(22332),f=m(r(6504)),d=["value","onChange","onClick","open","item"];function s(e){return e&&e.__esModule?e:{default:e}}function p(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(p=function(e){return e?r:t})(e)}function m(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=p(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function g(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function v(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,d),m=s.icon,g=s.value,v=s.title;return n.default.createElement(a.Flex,b({ref:t,alignItems:"end"},p),n.default.createElement(f.Button,{icon:m,title:v,active:r===g,onClick:function(){return o(g)},padding:"2px",small:!0}),n.default.createElement(f.Button,{icon:n.default.createElement(f.default,{svg:c?l.default:u.default,size:"12px"}),onClick:i,padding:"2px",stroked:!0,small:!0}))})),w=function(e){var t=e.onItemClick,r=e.items[0],o=r.icon,i=r.value,l=r.title,u=(0,c.useAttributeValue)("id");return n.default.createElement(a.Flex,{background:"dropdown",round:{side:"bottom"},border:{side:"bottom",color:"borderSecondary"},padding:[1,0],"data-toolbox":u},n.default.createElement(f.Button,{title:l,icon:o,onClick:function(){return t(i)},padding:"2px",small:!0}))},O=function(){var e=(0,c.useChart)(),t=(0,c.useAttribute)("navigation"),r=t[0],l=t[1],u=function(e){return(0,n.useMemo)((function(){return[{value:"select",title:"Select and zoom",icon:n.default.createElement(f.default,{svg:o.default,size:"16px"}),"data-track":e.track("selectHorizontal")},{value:"selectVertical",title:"Select vertical and zoom",icon:n.default.createElement(f.default,{svg:i.default,size:"16px"}),"data-track":e.track("selectVertical")}]}),[e])}(e),d=(0,n.useMemo)((function(){return u.reduce((function(e,t){return t.value===r?v(v({},e),{},{selectedItem:t}):v(v({},e),{},{remainingItems:[t]})}),{selectedItem:u[0],remainingItems:[]})}),[r]),s=d.selectedItem,p=d.remainingItems;return n.default.createElement(a.Menu,{value:r,onChange:l,items:p,Dropdown:w,"data-track":"select"},n.default.createElement(y,{value:r,onChange:l,item:s}))};t.default=(0,n.memo)(O)},97556:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=u(r(96540)),a=r(83199),o=r(22332),i=u(r(32464));function l(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(l=function(e){return e?r:t})(e)}function u(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=l(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}var c={critical:"error",clear:"success"},f=function(e){var t=e.id,r=(0,o.useAttributeValue)("overlays")[t],l=r.status,u=r.value,f=c[l]||l,d=(0,i.getColors)(f).color;return n.default.createElement(i.default,{type:f,noBorder:!0},n.default.createElement(a.TextSmall,{color:d},"Triggered value:"," ",n.default.createElement(a.TextSmall,{strong:!0,color:d},u)))};t.default=(0,n.memo)(f)},74787:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=u(r(96540)),a=r(83199),o=r(22332),i=u(r(32464));function l(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(l=function(e){return e?r:t})(e)}function u(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=l(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}var c={critical:"error",clear:"success"},f=function(e){var t=e.id,r=(0,o.useAttributeValue)("overlays")[t],l=r.status,u=r.valueTriggered,f=c[l]||l,d=(0,i.getColors)(f).color;return n.default.createElement(i.default,{type:f,noBorder:!0},n.default.createElement(a.TextSmall,{color:d},"Triggered value:"," ",n.default.createElement(a.TextSmall,{strong:!0,color:d},u)))};t.default=(0,n.memo)(f)},33406:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=f(r(96540)),a=f(r(8711)),o=r(83199),i=r(22332),l=f(r(54924)),u=f(r(33640)),c=["field","normalize"];function f(e){return e&&e.__esModule?e:{default:e}}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,c),l=(0,i.useAttributeValue)(t),f=(0,i.useChart)();return r&&(l=r(l,f.getAttributes())),l?n.default.createElement(u.default,{content:l},n.default.createElement(s,d({color:"key",text:l,Component:o.TextSmall},a))):null}},85338:(e,t,r)=>{t.__esModule=!0,t.default=t.alignment=void 0;var n,a,o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),i=(n=r(8711))&&n.__esModule?n:{default:n},l=r(83199),u=r(22332),c=["id","align","right","fixed","children","uiName"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,c),v=(0,o.useRef)(),h=(0,o.useState)(),b=h[0],y=h[1],w=(0,u.useChart)(),O=function(e){if(w&&w.getUI(f)&&e&&v.current){var t=function(e,t,r,n,a){void 0===e&&(e=s.elementMiddle);var o=r.from,i=r.width,l=t.getUI(a).getChartWidth();return(p[e]||p.elementMiddle)({from:o,width:i,chartWidth:l,element:n})}(r,w,e,v.current,f),n=t[1];v.current.style.right="calc(100% - "+(n+a)+"px)"}};return(0,o.useLayoutEffect)((function(){return!i&&w.getUI(f).on("overlayedAreaChanged:"+t,(function(e){O(e),y((function(t){return!!t!==!!e?e:t}))}))}),[]),(0,o.useLayoutEffect)((function(){return!i&&O(b)}),[b]),b||i?o.default.createElement(m,d({ref:v},g),l):null};t.default=(0,o.memo)(g)},98066:(e,t,r)=>{t.__esModule=!0,t.default=t.Period=void 0;var n=s(r(96540)),a=s(r(8711)),o=r(83199),i=s(r(35083)),l=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=d(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(6504)),u=s(r(33640)),c=r(22332),f=r(93658);function d(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(d=function(e){return e?r:t})(e)}function s(e){return e&&e.__esModule?e:{default:e}}var p=(0,a.default)(l.Button).withConfig({displayName:"correlation__CorrelationButton",componentId:"sc-a0l0u6-0"})(["pointer-events:all;"]);t.Period=function(e){var t=e.id,r=(0,c.useAttributeValue)("overlays")[t].range,a=r[0],i=r[1],l=(0,f.getDateDiff)(a,i);return n.default.createElement(o.TextNano,{strong:!0},l)},t.default=function(e){var t=e.id,r=(0,c.useAttributeValue)("overlays")[t].range,a=r[0],f=function(e){return e<15?"requires 15 secs minimum selection":""}(r[1]-a),d=(0,c.useChart)();return n.default.createElement(u.default,{content:f?"Metrics correlation: "+f:"Run metrics correlation"},n.default.createElement(o.Flex,null,n.default.createElement(p,{"data-track":d.track("metrics-correlation"),icon:n.default.createElement(l.default,{svg:i.default,size:"20px"}),onClick:function(){return d.sdk.trigger("correlation",d,r)},"data-testid":"highlight-correlations",disabled:!!f})))}},8586:(e,t,r)=>{t.__esModule=!0,t.default=t.Divider=void 0;var n,a=f(r(96540)),o=(n=r(8711))&&n.__esModule?n:{default:n},i=r(83199),l=f(r(98066)),u=r(22332);function c(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(c=function(e){return e?r:t})(e)}function f(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=c(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function d(){return d=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=u(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),o=r(22332),i=(n=r(71840))&&n.__esModule?n:{default:n},l=["type"];function u(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(u=function(e){return e?r:t})(e)}function c(){return c=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(n,l),f=i.default[o];return a.default.createElement(f,c({key:e,id:e,uiName:t},u))})))};t.default=(0,a.memo)(f)},92585:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=c(r(96540)),a=c(r(8711)),o=r(83199),i=r(22332),l=c(r(56284)),u=["dimensionId","textProps"];function c(e){return e&&e.__esModule?e:{default:e}}function f(){return f=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,u),o=(0,i.useUnitSign)({dimensionId:t}),c=(0,i.useLatestConvertedValue)(t),m=(0,i.useOnResize)(),g=m.width,v=m.height;return c&&"-"!==c?n.default.createElement(s,f({column:!0},a),n.default.createElement(l.default,f({Component:d,maxHeight:.8*(v-20),maxWidth:g-20,fontSize:"2.1em",lineHeight:"1.1em",strong:!0},p,r),c),n.default.createElement(l.default,f({Component:d,maxHeight:.25*(v-20),maxWidth:.7*(g-20),fontSize:"1.1em",strong:!0},p,{color:"textLite"},r),o)):n.default.createElement(l.default,f({Component:d,maxHeight:.9*(v-20),maxWidth:g-20,fontSize:"2.5em",strong:!0},p,r,a),"string"!==typeof c?"Loading...":"No data")}},48666:(e,t,r)=>{t.__esModule=!0,t.default=t.Processing=void 0;var n=u(r(96540)),a=u(r(8711)),o=r(83199),i=r(22332),l=["defaultValue","uiName"];function u(e){return e&&e.__esModule?e:{default:e}}var c=(0,a.default)(o.Flex).attrs({column:!0,round:!0,border:{side:"all",color:"borderSecondary"},gap:1,padding:[1,2],flex:!1}).withConfig({displayName:"proceeded__ProceededContainer",componentId:"sc-ac5spo-0"})(["direction:initial;"]),f=function(e){var t=e.defaultValue,r=e.uiName,a=function(e,t){if(null==e)return{};var r,n,a={},o=Object.keys(e);for(n=0;n=0||(a[r]=e[r]);return a}(e,l),u=(0,i.useChart)().getUI(r).getChartWidth(),f=(0,i.useChartError)();return u<240?null:f&&t?n.default.createElement(c,a,n.default.createElement(o.Text,{textAlign:"center",textTransform:"firstLetter"},f||t)):null},d=(0,a.default)(o.Flex).withConfig({displayName:"proceeded__CenterContainer",componentId:"sc-ac5spo-1"})(["z-index:60000;position:absolute;top:50%;left:50%;transform:translate(-50%,-50%);"]);t.Processing=function(){return n.default.createElement(d,null,n.default.createElement(f,{defaultValue:"Processing"}))},t.default=f},71840:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=w(r(96540)),a=w(r(8711)),o=w(r(41956)),i=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=y(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(85338)),l=w(r(97556)),u=w(r(74787)),c=w(r(8586)),f=w(r(48666)),d=w(r(33406)),s=w(r(92585)),p=r(22332),m=["id"],g=["id"],v=["id"],h=["id","uiName"],b=["containerProps"];function y(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(y=function(e){return e?r:t})(e)}function w(e){return e&&e.__esModule?e:{default:e}}function O(){return O=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var _=(0,a.default)(o.default).withConfig({displayName:"types__NoEventsContainer",componentId:"sc-19vz950-0"})(["pointer-events:none;"]);t.default={alarm:function(e){var t=e.id,r=x(e,m);return n.default.createElement(i.default,O({id:t,top:"20px",margin:[0,2,0,0],align:i.alignment.elementLeft},r),n.default.createElement(l.default,{id:t}))},alarmRange:function(e){var t=e.id,r=x(e,g);return n.default.createElement(i.default,O({id:t,top:"20px",margin:[0,2,0,0],align:i.alignment.elementLeft},r),n.default.createElement(u.default,{id:t}))},highlight:function(e){var t=e.id,r=x(e,v);return(0,p.useAttributeValue)("sparkline")?null:n.default.createElement(i.default,O({id:t,align:i.alignment.elementRight,bottom:"25px",right:100},r,{noEvents:!0}),n.default.createElement(c.default,{id:t}))},proceeded:function(e){var t=e.id,r=e.uiName,a=x(e,h);return n.default.createElement(i.default,O({id:t,top:"50%",align:i.alignment.chartMiddle,uiName:r},a),n.default.createElement(f.default,{id:t,uiName:r}))},name:function(e){var t=e.containerProps,r=x(e,b);return n.default.createElement(_,O({isAbsolute:!0,position:"top",margin:[2,0,0,0]},t),n.default.createElement(d.default,r))},latestValue:function(e){return n.default.createElement(_,{isAbsolute:!0,position:"center"},n.default.createElement(s.default,e))}}},42417:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=h(r(96540)),a=h(r(8711)),o=r(83199),i=v(r(99010)),l=h(r(1249)),u=v(r(24652)),c=r(22332),f=r(96380),d=r(22753),s=r(67268),p=["children","fractionDigits"],m=["children","showFull"];function g(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(g=function(e){return e?r:t})(e)}function v(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=g(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}function h(e){return e&&e.__esModule?e:{default:e}}function b(){return b=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}var w=(0,a.default)(o.Flex).attrs({position:"relative","data-testid":"chartPopover-dimension",padding:[1,0]}).withConfig({displayName:"dimension__GridRow",componentId:"sc-adzl1v-0"})(["display:contents;"]),O=(0,a.default)(i.ColorBar).attrs({position:"absolute",top:1,left:0,backgroundOpacity:.4,round:.5}).withConfig({displayName:"dimension__ColorBackground",componentId:"sc-adzl1v-1"})([""]),x={ANOMALY_RATE:"arp",default:"value"},_=function(e){var t=e.children,r=e.fractionDigits,a=void 0===r?0:r,i=y(e,p),l=t.toString().split("."),c=l[0],f=l[1];return n.default.createElement(o.Flex,{alignItems:"center",justifyContent:"end"},n.default.createElement(u.Value,b({},i,{textAlign:"right"}),c),"undefined"!==typeof f&&n.default.createElement(u.Value,i,"."),n.default.createElement(u.Value,b({as:o.Flex,flex:!1,width:1.6*a},i,{textAlign:"left"}),f))},j=function(e){var t=e.children,r=e.showFull,a=y(e,m);return n.default.createElement(o.Flex,{gap:1,justifyContent:"end"},Object.keys(t).map((function(e){return n.default.createElement(o.Flex,{key:e,border:{size:"1px",side:"all",color:t[e]},round:!0,flex:!1,padding:[0,.5]},n.default.createElement(u.Value,b({},a,{color:t[e]}),r&&f.labels[e]||e))})))};t.default=function(e){var t=e.id,r=e.strong,a=e.rowFlavour,f=(0,c.useVisibleDimensionId)(t),p=(0,c.useChart)().getAttribute("unitsConversionFractionDigits"),m=(0,d.useIsHeatmap)();return n.default.createElement(w,{opacity:f?null:"weak"},n.default.createElement(o.Flex,{alignItems:"center",gap:1,position:"relative"},n.default.createElement(O,{id:t,valueKey:x[a]||x.default,height:"18px"},!m&&n.default.createElement(i.default,{id:t})),n.default.createElement(l.default,{padding:[1,2],flex:!0,id:t,strong:r,noTooltip:!0,color:r?"textFocus":"text"})),n.default.createElement(u.default,{id:t,strong:r,visible:f,Component:_,fractionDigits:p,color:a===s.rowFlavours.default?r?"textFocus":"text":"textLite"}),n.default.createElement(u.default,{id:t,strong:r,visible:f,valueKey:"arp",Component:_,fractionDigits:2,color:a===s.rowFlavours.ANOMALY_RATE?"anomalyTextFocus":"anomalyText"}),n.default.createElement(u.default,{textAlign:"right",id:t,strong:r,visible:f,valueKey:"pa",Component:j,color:a===s.rowFlavours.ANNOTATIONS?r?"textFocus":"text":"textLite",showFull:a===s.rowFlavours.ANNOTATIONS}))}},67268:(e,t,r)=>{t.__esModule=!0,t.rowFlavours=t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=s(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=d(r(8711)),o=r(83199),i=r(22332),l=d(r(75010)),u=d(r(24413)),c=d(r(17073)),f=d(r(42417));function d(e){return e&&e.__esModule?e:{default:e}}function s(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(s=function(e){return e?r:t})(e)}var p=(0,a.default)(o.Flex).attrs({round:!0,width:{min:"196px",max:"80vw"},background:"dropdown",column:!0,padding:[4],gap:1}).withConfig({displayName:"dimensions__Container",componentId:"sc-172on4u-0"})(["box-shadow:0px 8px 12px rgba(9,30,66,0.15),0px 0px 1px rgba(9,30,66,0.31);"]),m=a.default.div.withConfig({displayName:"dimensions__Grid",componentId:"sc-172on4u-1"})(["display:grid;width:100%;grid-template-columns:minmax(150px,max-content) 60px 60px minmax(80px,auto);align-items:center;"]),g=a.default.div.withConfig({displayName:"dimensions__GridHeader",componentId:"sc-172on4u-2"})(["display:contents;"]),v=[null,null],h=function(){var e=Math.floor((window.innerHeight-500)/15)||16;return e<5?5:10},b=function(){return h()/2},y=t.rowFlavours={ANOMALY_RATE:"ANOMALY_RATE",ANNOTATIONS:"ANNOTATIONS",default:"VALUE"},w={ANOMALY_RATE:"anomalyDesc",ANNOTATIONS:"annotationsDesc",default:"valueDesc"},O=function(){var e=(0,i.useChart)(),t=(0,i.useAttributeValue)("hoverX")||v,r=t[0],a=t[1],d=(0,n.useMemo)((function(){var t=e.getClosestRow(r),n=e.onHoverSortDimensions(t,w[a]||w.default)||[];e.getAttribute("selectedDimensions").length>0&&(n=n.filter((function(t){return e.isDimensionVisible(t)})));var o=n.findIndex((function(e){return e===a})),i=n.length,l=Math.floor(function(e,t){return ee-b()?t-(b()+(e-t)):t-b()}(i,o)),u=Math.ceil(function(e,t){return ee-b()?e:t+b()}(i,o));return[l,u,i,n.slice(l,u)]}),[e,a,r]),s=d[0],O=d[1],x=d[2],_=d[3],j=y[a]||y.default;return n.default.createElement(p,{"data-testid":"chartPopover-dimensions",gap:2},n.default.createElement(o.Flex,{column:!0,gap:1},r&&n.default.createElement(c.default,{value:r}),n.default.createElement(u.default,null)),n.default.createElement(o.Flex,{flex:!1,height:3},s>0&&n.default.createElement(o.TextNano,{color:"textLite"},"\u2191",s," more values")),n.default.createElement(m,{gap:1,column:!0},n.default.createElement(g,null,n.default.createElement(o.TextMicro,{strong:!0},"Dimension"),n.default.createElement(o.TextMicro,{color:j===y.default?"text":"textLite",textAlign:"right"},"Value","heatmap"!==e.getAttribute("chartType")&&n.default.createElement(n.default.Fragment,null," ",n.default.createElement(l.default,{visible:!0,strong:j===y.default,color:j===y.default?"text":"textLite"}))),n.default.createElement(o.TextMicro,{strong:j===y.ANOMALY_RATE,color:j===y.ANOMALY_RATE?"text":"textLite",textAlign:"right"},"Anomaly%"),n.default.createElement(o.TextMicro,{strong:j===y.ANNOTATIONS,color:j===y.ANNOTATIONS?"text":"textLite",textAlign:"right"},"Info")),_.map((function(e){return n.default.createElement(f.default,{key:e,id:e,strong:a===e,rowFlavour:j})}))),n.default.createElement(o.Flex,{flex:!1,height:3},O{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=p(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(83199),o=s(r(40961)),i=s(r(93331)),l=s(r(22007)),u=s(r(44862)),c=r(28973),f=r(22332),d=s(r(67268));function s(e){return e&&e.__esModule?e:{default:e}}function p(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(p=function(e){return e?r:t})(e)}var m={right:"left",bottom:"top"},g={right:"left",top:"bottom"},v={left:"right",bottom:"top"},h={left:"right",top:"bottom"};t.default=function(e){var t=e.uiName,r=(0,f.useChart)(),s=(0,n.useRef)(),p=(0,n.useState)(),b=p[0],y=p[1],w=(0,n.useRef)(),O=(0,n.useRef)(),x=(0,n.useState)(!1),_=x[0],j=x[1],P=(0,n.useState)(m),E=P[0],k=P[1];w.current=b,O.current=(0,l.default)(b,s,E,"width"),(0,n.useEffect)((function(){return(0,c.unregister)(r.getUI(t).on("mousemove",(function(e){if(!r.getAttribute("panning")&&!r.getAttribute("highlighting")){var t=e.offsetX||e.layerX,n=e.offsetY||e.layerY;if(j(!0),w.current){w.current.style.left=t+"px",w.current.style.top=n+"px",O.current();var a=window.innerHeight,o=window.innerWidth,i=s.current.getBoundingClientRect(),l=i.width,u=i.height;k(function(e,t){return e&&t?m:e?g:t?v:h}(t+l>o,n+u>a))}}})),r.getUI(t).on("mouseout",(function(){return j(!1)})),r.onAttributeChange("panning",(function(e){return e&&j(!1)})),r.onAttributeChange("highlighting",(function(e){return e&&j(!1)})))}),[r]);var M=(0,u.default)();return _?n.default.createElement(n.Fragment,null,n.default.createElement(a.Flex,{ref:function(e){return y(e)},position:"absolute"}),o.default.createPortal(n.default.createElement(i.default,{"data-toolbox":r.getId(),margin:[E.top?2:-2,E.right?-2:2],ref:s,width:{max:"100%"},column:!0,"data-testid":"drop",sx:{pointerEvents:"none"}},n.default.createElement(d.default,{uiName:t,"data-testid":"chartPopover"})),M)):null}},17073:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=i(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var l=a?Object.getOwnPropertyDescriptor(e,o):null;l&&(l.get||l.set)?Object.defineProperty(n,o,l):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(83199),o=r(22332);function i(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(i=function(e){return e?r:t})(e)}t.default=function(e){var t=e.value,r=(0,o.useChart)(),i=(0,n.useMemo)((function(){return r.formatDate(t)+" \u2022 "+r.formatTime(t)}),[t]);return n.default.createElement(a.TextMicro,{color:"textDescription","data-testid":"chartPopover-timestamp"},i)}},24413:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=i(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var l=a?Object.getOwnPropertyDescriptor(e,o):null;l&&(l.get||l.set)?Object.defineProperty(n,o,l):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(83199),o=r(22332);function i(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(i=function(e){return e?r:t})(e)}t.default=function(){var e=(0,o.useAttributeValue)("viewUpdateEvery"),t=(0,o.useAttributeValue)("updateEvery"),r=(0,o.useAttributeValue)("groupingMethod");return n.default.createElement(n.Fragment,null,n.default.createElement(a.Flex,{gap:1,"data-testid":"chartPopover-collection"},n.default.createElement(a.TextMicro,{color:"textLite"},"Granularity:"),n.default.createElement(a.TextMicro,{color:"textDescription"},t,"s")),e!==t&&n.default.createElement(a.Flex,{gap:1,"data-testid":"chartPopover-collection"},n.default.createElement(a.TextMicro,{color:"textLite"},"View point:"),n.default.createElement(a.TextMicro,{color:"textDescription"},r," ",e,"s")))}},99684:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n,a=(n=r(96540))&&n.__esModule?n:{default:n},o=r(83199);t.default=function(e){var t=e.disabled;return a.default.createElement(o.Flex,{width:"1px",background:t?"disabled":"borderSecondary"})}},31899:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=d(r(96540)),a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=f(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(8711)),o=r(83199),i=r(22332),l=d(r(6504)),u=d(r(42883)),c=["height"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(f=function(e){return e?r:t})(e)}function d(e){return e&&e.__esModule?e:{default:e}}function s(){return s=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,c),l=(0,i.useLoadingColor)();return n.default.createElement(o.Flex,s({flex:!0,padding:[0,0,0,10]},a),n.default.createElement(m,{color:l,height:r}))}},49096:(e,t,r)=>{t.Ay=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=s(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(96540)),a=r(83199),o=d(r(74113)),i=r(22332),l=d(r(10534)),u=r(89380),c=d(r(56284)),f=["uiName"];function d(e){return e&&e.__esModule?e:{default:e}}function s(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(s=function(e){return e?r:t})(e)}function p(){return p=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,f);return n.default.createElement(u.ChartWrapper,{ref:t},n.default.createElement(o.default,p({uiName:r,column:!0,alignItems:"center",justifyContent:"center",position:"relative"},a),n.default.createElement(m,null),n.default.createElement(g,null)))}));t.Ay=(0,l.default)(v,{tile:!0})},57513:(e,t,r)=>{t.__esModule=!0,t.default=t.Title=void 0;var n,a=(n=r(96540))&&n.__esModule?n:{default:n},o=r(83199),i=r(22332);function l(){return l=Object.assign?Object.assign.bind():function(e){for(var t=1;t{t.__esModule=!0,t.default=t.Container=void 0;var n=d(r(96540)),a=r(83199),o=r(22332),i=d(r(99684));t.Separator=i.default;var l=d(r(13752));t.ChartType=l.default;var u=d(r(92815));t.Fullscreen=u.default;var c=d(r(72582));t.Information=c.default;var f=["children"];function d(e){return e&&e.__esModule?e:{default:e}}function s(){return s=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,f),a=!(0,o.useAttributeValue)("focused"),i=(0,o.useAttributeValue)("toolboxElements");return n.default.createElement(p,r,t,i.map((function(e,t){return n.default.createElement(e,{key:t,disabled:a})})))}},33862:(e,t)=>{t.__esModule=!0,t.createCanvas=t.copyCanvas=void 0;t.createCanvas=function(e,t){var r=document.createElement("canvas");return r.width=e,r.height=t,r},t.copyCanvas=function(e,t){t.width=e.width,t.height=e.height;var r=t.getContext("2d");r.clearRect(0,0,t.width,t.height),r.drawImage(e,0,0)}},20207:(e,t)=>{function r(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function n(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var n,a=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=l(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(8711)),o=(n=r(15877))&&n.__esModule?n:{default:n},i=["zIndex"];function l(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(l=function(e){return e?r:t})(e)}function u(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function c(e,t,r){return(t=function(e){var t=function(e,t){if("object"!=typeof e||!e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!=typeof n)return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"==typeof t?t:t+""}(t))in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}var f=(0,a.keyframes)(["0%{opacity:0.5;transform:scale(0.8);}100%{opacity:1;transform:scale(1);}"]),d=(0,a.css)(["opacity:0;animation:"," 0.1s forwards;animation-delay:0.01s;"],f),s=(0,a.default)(o.default).attrs((function(e){var t=e.zIndex;return function(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,i))})).withConfig({displayName:"container__Container",componentId:"sc-l6u9ms-0"})(["left:-99999px;"," "," "," backface-visibility:hidden;perspective:1000;transform:translate3d(0,0,0);will-change:left,top,transform;"],(function(e){return e.animation&&d}),(function(e){return!e.hideShadow&&"box-shadow: 0 2px 6px rgba(0, 0, 0, 0.15);"}),(function(e){return!!e.noEvents&&"pointer-events: none;"}));t.default=s},22007:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=r(96540),a=function e(t,r,n,a){void 0===a&&(a=!0);var o=function(e,t,r){return"left"===e.left?t.left:"right"===e.left?t.right:"right"===e.right?t.right-r.width:"left"===e.right?t.left-r.width:t.left+t.width/2-r.width/2}(t,r,n),i=Math.max(0,o);return o=Math.min(window.innerWidth-n.width,i),a||i===o?o:e(function(e){return"left"===e.left?{right:"right"}:"right"===e.left?{right:"left"}:"right"===e.right?{left:"left"}:"left"===e.right?{left:"right"}:void 0}(t),r,n)},o=function e(t,r,n,a){void 0===a&&(a=!0);var o=function(e,t,r){if("top"===e.top)return t.top;if("bottom"===e.top)return t.bottom;if("bottom"===e.bottom)return t.bottom-r.height;if("top"===e.bottom){var n=t.top-r.height;return n<0&&t.bottom+r.height{t.__esModule=!0,t.sx=t.default=void 0;var n=M(r(8711)),a=M(r(27988)),o=M(r(32305)),i=M(r(36094)),l=M(r(62703)),u=M(r(31886)),c=M(r(25320)),f=M(r(96029)),d=M(r(88725)),s=M(r(86397)),p=M(r(11564)),m=M(r(48488)),g=M(r(26278)),v=M(r(11341)),h=M(r(22318)),b=M(r(97373)),y=M(r(90836)),w=M(r(41311)),O=M(r(85103)),x=M(r(20909)),_=M(r(92108)),j=M(r(48299)),P=r(36350),E=r(50677),k=M(r(50402));function M(e){return e&&e.__esModule?e:{default:e}}var C=t.sx=function(e){return(0,k.default)(e.sx)(e)};t.default=function(e){return(0,n.default)(e).withConfig({displayName:"flex",componentId:"sc-1gtk8kg-0"})(["display:flex;"," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," "," ",""],v.default,x.default,h.default,b.default,y.default,w.default,a.default,c.default,o.default,i.default,O.default,p.default,s.default,P.fontColor,g.default,u.default,_.default,l.default,m.default,f.default,d.default,j.default,E.position,C)}},15877:(e,t,r)=>{var n;t.__esModule=!0,t.default=void 0;var a=(0,((n=r(98909))&&n.__esModule?n:{default:n}).default)("div");t.default=a},41956:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=u(r(8711)),a=u(r(97373)),o=u(r(90836)),i=u(r(41311)),l=u(r(37341));function u(e){return e&&e.__esModule?e:{default:e}}var c=function(e,t){return"0"!==e&&"0"!==t?"calc((100% - "+e+") - "+t+")":"0"===e&&"0"===t?"100%":"calc(100% - "+("0"===e?t:e)+")"},f=new Set(["top","center","bottom"]),d=new Set(["bottom-left","left","top-left"]),s=new Set(["right","center","left"]),p=new Set(["top-left","top","top-right"]),m=new Set(["top-right","right","bottom-right"]),g=new Set(["bottom-right","bottom","bottom-left"]),v=n.default.div.attrs((function(e){var t=e.theme,r=e.margin;return{marginDimensions:(0,l.default)(t,r)}})).withConfig({displayName:"container__Container",componentId:"sc-k2hlzf-0"})(["position:",";display:flex;outline:none;pointer-events:all;"," "," "," "," "," "," "," "," "," "," "," ",""],(function(e){return e.isAbsolute?"absolute":"fixed"}),a.default,o.default,i.default,(function(e){var t=e.marginDimensions,r=t.top,n=t.bottom;return"max-height: "+c(r,n)+";"}),(function(e){var t=e.marginDimensions,r=t.right,n=t.left;return"max-width: "+c(n,r)+";"}),(function(e){var t=e.position,r=e.full,n=e.marginDimensions;return"vertical"===r||!0===r||p.has(t)?"top: "+n.top+";":s.has(t)?"top: 50%;":""}),(function(e){var t=e.position,r=e.full,n=e.marginDimensions;return"horizontal"===r||!0===r||m.has(t)?"right: "+n.right+";":""}),(function(e){var t=e.position,r=e.full,n=e.marginDimensions;return"vertical"===r||!0===r||g.has(t)?"bottom: "+n.bottom+";":""}),(function(e){var t=e.position,r=e.full,n=e.marginDimensions;return"horizontal"===r||!0===r||d.has(t)?"left: "+n.left+";":f.has(t)?"left: 50%;":""}),(function(e){var t=e.full,r=e.position,n=function(){var e=!0!==t&&"horizontal"!==t&&f.has(r),n=!0!==t&&"vertical"!==t&&s.has(r);return e||n?e&&!n?"translateX(-50%)":!e&&n?"translateY(-50%)":"translate(-50%, -50%)":""}();return n&&"transform: "+n+";"}),(function(e){return e.borderShadow&&"box-shadow: 0px 2px 68px rgba(0, 0, 0, 0.288);"}),(function(e){var t=e.zIndex,r=void 0===t?35:t;return"z-index: "+r+";"}));t.default=v},37341:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=r(48558),a={top:"0",right:"0",bottom:"0",left:"0"};t.default=function(e,t){if(!Array.isArray(t)||t.length<1||t.length>4)return a;var r=t.map((function(t){return(0,n.getDimension)(e,t)}));return 1===r.length?{top:r[0],right:r[0],bottom:r[0],left:r[0]}:2===r.length?{top:r[0],right:r[1],bottom:r[0],left:r[1]}:3===r.length?{top:r[0],right:r[1],bottom:r[2],left:r[1]}:{top:r[0],right:r[1],bottom:r[2],left:r[3]}}},90836:(e,t)=>{t.__esModule=!0,t.default=void 0;var r={start:"flex-start",center:"center",end:"flex-end",between:"space-between",around:"space-around",stretch:"stretch"};t.default=function(e){var t=e.alignContent;return t in r?"align-content: "+r[t]+";":""}},97373:(e,t)=>{t.__esModule=!0,t.default=void 0;var r={start:"flex-start",center:"center",end:"flex-end",baseline:"baseline",stretch:"stretch"};t.default=function(e){var t=e.alignItems;return t in r?"align-items: "+r[t]+";":""}},26278:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=r(41234);t.default=function(e){var t=e.theme,r=e.background,a=e.backgroundOpacity;if(!r)return"";var o=a?(0,n.getRgbColor)(r,a)({theme:t}):(0,n.getColor)(r)({theme:t});return o&&"background-color: "+o+";"}},92108:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=r(41234);function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function o(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;t.default=function(e){var t=function(e,t,r){return e?"column":t?"column-reverse":r?"row-reverse":"row"}(e.column,e.columnReverse,e.rowReverse);return"flex-direction: "+t+";"}},11341:(e,t)=>{t.__esModule=!0,t.default=void 0;t.default=function(e){var t=e.flex,r=e.basis;if(void 0===t&&void 0===r)return"";if(r&&void 0===t)return"flex-basis: "+r+";";var n=function(e,t){return void 0===t&&(t="auto"),!0===e?"1 1 "+t:!1===e?"0 0 "+t:"grow"===e?"1 0 "+t:"shrink"===e?"0 1 "+t:"number"===typeof e?e+" 0 "+t:"object"!==typeof e?e:e.grow+" "+e.shrink+" "+t}(t,r);return n?"flex: "+n+";":""}},85103:(e,t)=>{t.__esModule=!0,t.default=void 0;t.default=function(e){var t=e.theme.constants.SIZE_SUB_UNIT,r=e.gap,n=e.column,a=e.columnReverse,o=e.rowReverse;if("number"!==typeof r)return"";var i=function(e){var t=e.column,r=e.columnReverse,n=e.rowReverse;return t||r?"bottom":n?"left":"right"}({column:n,columnReverse:a,rowReverse:o});return"\n &> *:not(:last-child) {\n margin-"+i+": "+t*r+"px;\n }\n "}},86397:(e,t)=>{t.__esModule=!0,t.default=void 0;t.default=function(e){var t=e.theme.constants.SIZE_SUB_UNIT,r=e.height;if("object"===typeof r){var n=r.min,a=void 0===n?"":n,o=r.max,i=void 0===o?"":o;return"\n "+(a&&"min-height: "+("number"===typeof a?t*a+"px":a)+";")+"\n "+(i&&"max-height: "+("number"===typeof i?t*i+"px":i)+";")+"\n "}return r&&"height: "+("number"===typeof r?t*r+"px":r)+";"}},41311:(e,t)=>{t.__esModule=!0,t.default=void 0;var r={start:"flex-start",center:"center",end:"flex-end",between:"space-between",around:"space-around",evenly:"space-evenly",stretch:"stretch"};t.default=function(e){var t=e.justifyContent;return t in r?"justify-content: "+r[t]+";":""}},48488:(e,t)=>{t.__esModule=!0,t.default=void 0;t.default=function(e){var t=e.overflow;if(!t)return"";if("string"===typeof t)return"overflow: "+t+";";var r=t.vertical,n=void 0===r?"":r,a=t.horizontal,o=void 0===a?"":a;return"\n "+(n&&"overflow-y: "+n+";")+"\n "+(o&&"overflow-x: "+o+";")+"\n "}},48299:(e,t,r)=>{t.__esModule=!0,t.pseudoSelectors=t.default=t.calculateStyles=void 0;var n=c(r(26278)),a=c(r(92108)),o=r(41234),i=c(r(97373)),l=["theme"],u=["theme"];function c(e){return e&&e.__esModule?e:{default:e}}function f(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function d(e){for(var t=1;t=0||(a[r]=e[r]);return a}var m={border:a.default,background:n.default,color:function(e){var t=e.theme,r=e.color;return r?"color: "+(0,o.getColor)(r)({theme:t})+";":""},alignItems:i.default},g=t.calculateStyles=function(e){var t=e.theme,r=p(e,l),n="";for(var a in r)if(void 0===m[a]){n=n+(a+":")+r[a]+";"}else{var o=m[a];n+=o&&"function"===typeof o?o(d({theme:t},r)):""}return n},v=t.pseudoSelectors={_before:"&::before",_after:"&::after",_hover:"&:hover, &[data-hover]",_active:"&:active, &[data-active]",_focus:"&:focus, &[data-focus]",_focusWithin:"&:focus-within",_visited:"&:visited",_empty:"&:empty",_even:"&:nth-of-type(even)",_odd:"&:nth-of-type(odd)",_disabled:"&[disabled], &[aria-disabled=true], &[data-disabled]",_checked:"&[aria-checked=true]",_mixed:"&[aria-checked=mixed]",_selected:"&[aria-selected=true], [data-selected] > &",_invalid:"&[aria-invalid=true]",_pressed:"&[aria-pressed=true]",_readOnly:"&[aria-readonly=true], &[readonly]",_first:"&:first-of-type",_last:"&:last-of-type",_expanded:"&[aria-expanded=true]",_grabbed:"&[aria-grabbed=true]",_notFirst:"&:not(:first-of-type)",_notLast:"&:not(:last-of-type)",_groupHover:"[role=group]:hover &",_autofill:"&:-webkit-autofill",_placeholder:"&::placeholder"};t.default=function(e){var t=e.theme,r=p(e,u),n="";for(var a in r)if(a in v){var o=a,i=r[o],l=g(d({theme:t},i));n=n+"\n "+v[o]+"{ \n "+l+" \n }"}return n.replace(/^(?=\n)$|^\s*|\s*$|\n\n+/gm,"")}},11564:(e,t)=>{t.__esModule=!0,t.default=void 0;t.default=function(e){var t=e.theme.constants.SIZE_SUB_UNIT,r=e.width;if("object"===typeof r){var n=r.min,a=void 0===n?"":n,o=r.max,i=void 0===o?"":o,l=r.base,u=void 0===l?"":l;return"\n "+(a&&"min-width: "+("number"===typeof a?t*a+"px":a)+";")+"\n "+(i&&"max-width: "+("number"===typeof i?t*i+"px":i)+";")+"\n "+(u&&"width: "+("number"===typeof u?t*u+"px":u)+";")+"\n "}return r&&"width: "+("number"===typeof r?t*r+"px":r)+";"}},22318:(e,t)=>{t.__esModule=!0,t.default=void 0;t.default=function(e){var t=function(e){return!0===e?"wrap":!1===e?"nowrap":"reverse"===e?e:""}(e.flexWrap);return t&&"flex-wrap: "+t+";"}},23234:(e,t)=>{t.__esModule=!0,t.default=void 0;var r=new Set(["left","center","right"]);t.default=function(e){var t=e.textAlign;return r.has(t)&&"text-align: "+t+";"}},67789:(e,t)=>{t.__esModule=!0,t.default=void 0;var r=new Set(["underline","none","line-through"]);t.default=function(e){var t=e.textDecoration;return r.has(t)&&"text-decoration: "+t+";"}},610:(e,t)=>{t.__esModule=!0,t.default=void 0;t.default=function(e){return e.truncate&&"\n white-space: nowrap;\n text-overflow: ellipsis;\n overflow: hidden;\n"}},44453:(e,t)=>{t.__esModule=!0,t.default=void 0;var r=new Set(["normal","nowrap","pre-line","pre-wrap"]);t.default=function(e){var t=e.whiteSpace;return r.has(t)&&"white-space: "+t+";"}},30109:(e,t)=>{t.__esModule=!0,t.default=void 0;var r=new Set(["normal","break-all","keep-all","break-word"]);t.default=function(e){var t=e.wordBreak;return r.has(t)&&"word-break: "+t+";"}},36350:(e,t,r)=>{t.__esModule=!0,t.makeTypography=t.makeText=t.makeSmall=t.makeNano=t.makeMicro=t.makeHuge=t.makeH6=t.makeH5=t.makeH4=t.makeH3=t.makeH2=t.makeH1=t.makeH0=t.makeFemto=t.makeBigger=t.makeBig=t.fontColor=t.fontCode=void 0;var n=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=h(t);if(r&&r.has(e))return r.get(e);var n={__proto__:null},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var o in e)if("default"!==o&&{}.hasOwnProperty.call(e,o)){var i=a?Object.getOwnPropertyDescriptor(e,o):null;i&&(i.get||i.set)?Object.defineProperty(n,o,i):n[o]=e[o]}return n.default=e,r&&r.set(e,n),n}(r(8711)),a=r(41234),o=v(r(27988)),i=v(r(32305)),l=v(r(36094)),u=v(r(60090)),c=v(r(23234)),f=v(r(67789)),d=v(r(610)),s=v(r(44453)),p=v(r(30109)),m=v(r(31886)),g=v(r(88725));function v(e){return e&&e.__esModule?e:{default:e}}function h(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(h=function(e){return e?r:t})(e)}var b=t.fontColor=function(e){var t=e.theme,r=e.color,n=void 0===r?"text":r;return"color: "+(0,a.getColor)(n)({theme:t})+";"},y=t.fontCode=function(e){var t=e.background,r=void 0===t?"text":t,n=e.code,o=e.color,i=void 0===o?"elementBackground":o,l=e.theme;return n&&"\n background-color: "+(0,a.getColor)(r)({theme:l})+";\n border-radius: 4px;\n color: "+(0,a.getColor)(i)({theme:l})+";\n padding: 0 6px;\n "},w=(0,n.css)([""," "," "," "," "," "," "," "," "," "," "," "," "," "," ",""],b,y,o.default,c.default,f.default,u.default,d.default,s.default,p.default,i.default,l.default,m.default,g.default,(function(e){var t=e.fontSize;return t&&"\n font-size: "+t+";\n line-height: "+t+";\n "}),(function(e){var t=e.lineHeight;return t&&"\n line-height: "+t+";\n "})),O=t.makeTypography=function(e,t){var r,a,o=t.fontSize,i=t.lineHeight,l=t.strong;return(0,n.default)(e).withConfig({displayName:"typography",componentId:"sc-1lwqv72-0"})(['font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Ubuntu,"Helvetica Neue",sans-serif;font-style:normal;'," "," "," ",""],(a=o,function(){return"font-size: "+a+";"}),function(e){return function(){return"line-height: "+e+";"}}(i),(r=l,function(e){var t=e.strong;return"font-weight: "+((void 0===t?r:t)?"bold":"normal")+";"}),w)};t.makeH0=function(e){return O(e,{fontSize:"26px",lineHeight:"32px",strong:!0})},t.makeH1=function(e){return O(e,{fontSize:"24px",lineHeight:"28px",strong:!0})},t.makeH2=function(e){return O(e,{fontSize:"22px",lineHeight:"24px",strong:!0})},t.makeH3=function(e){return O(e,{fontSize:"20px",lineHeight:"24px",strong:!0})},t.makeH4=function(e){return O(e,{fontSize:"16px",lineHeight:"21px",strong:!0})},t.makeH5=function(e){return O(e,{fontSize:"14px",lineHeight:"18px",strong:!0})},t.makeH6=function(e){return O(e,{fontSize:"12px",lineHeight:"14px",strong:!0})},t.makeFemto=function(e){return O(e,{fontSize:"7px",lineHeight:"8px"})},t.makeNano=function(e){return O(e,{fontSize:"8px",lineHeight:"10px"})},t.makeMicro=function(e){return O(e,{fontSize:"10px",lineHeight:"13px"})},t.makeSmall=function(e){return O(e,{fontSize:"11px",lineHeight:"14px"})},t.makeText=function(e){return O(e,{fontSize:"12px",lineHeight:"16px"})},t.makeBig=function(e){return O(e,{fontSize:"14px",lineHeight:"20px"})},t.makeBigger=function(e){return O(e,{fontSize:"16px",lineHeight:"18px"})},t.makeHuge=function(e){return O(e,{fontSize:"24px",lineHeight:"32px"})}},44048:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=r(96540);function a(e){return function(e){if(Array.isArray(e))return o(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return o(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return o(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function o(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r{t.__esModule=!0,t.default=void 0;var n,a=r(96540),o=(n=r(44048))&&n.__esModule?n:{default:n};function i(e){return function(e){if(Array.isArray(e))return l(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return l(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return l(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function l(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r{t.__esModule=!0,t.default=void 0;var n=r(96540);t.default=function(){var e=(0,n.useMemo)((function(){var e=document.createElement("div");return document.body.append(e),e}),[]);return(0,n.useLayoutEffect)((function(){return function(){try{document.body.removeChild(e)}catch(t){}}}),[]),e}},51365:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n,a=r(96540),o=(n=r(68016))&&n.__esModule?n:{default:n};t.default=function(e){var t=(0,a.useRef)(),r=(0,a.useCallback)((function(r){t.current=r,(0,o.default)(e,r)}),[]);return[t,r]}},31886:(e,t)=>{t.__esModule=!0,t.default=void 0;var r={weak:.3,medium:.4,strong:.8,none:1};t.default=function(e){var t=e.opacity,n=t&&r[t]||t;return n?"opacity: "+n+";":""}},36094:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var n=r(48558);t.default=function(e){var t=e.theme,r=e.padding;return r?Array.isArray(r)&&r.length>=1&&r.length<=4?"padding: "+(0,n.getDimensions)(t,r)+";":(console.error("Please provide an array (max 4 elements) for `padding` style helper."),""):""}},25320:(e,t)=>{t.__esModule=!0,t.default=void 0;var r={static:"static",absolute:"absolute",fixed:"fixed",relative:"relative",sticky:"sticky",initial:"initial",inherit:"inherit"};t.default=function(e){var t=e.position;return t in r?"position: "+t+";":""}},62703:(e,t)=>{t.__esModule=!0,t.default=void 0;var r=function(e,t){return!0===t?e+"px":"number"===typeof t?e*t+"px":"string"===typeof t?t:""},n=function(e,t){return"border-top-left-radius: "+r(e,t)+";"},a=function(e,t){return"border-top-right-radius: "+r(e,t)+";"},o=function(e,t){return"border-bottom-left-radius: "+r(e,t)+";"},i=function(e,t){return"border-bottom-right-radius: "+r(e,t)+";"},l={top:function(e,t){return"\n "+n(e,t)+"\n "+a(e,t)+"\n "},left:function(e,t){return"\n "+n(e,t)+"\n "+o(e,t)+"\n "},bottom:function(e,t){return"\n "+o(e,t)+"\n "+i(e,t)+"\n "},right:function(e,t){return"\n "+a(e,t)+"\n "+i(e,t)+"\n "},"top-left":n,"top-right":a,"bottom-left":o,"bottom-right":i};t.default=function(e){var t=e.theme.constants.SIZE_SUB_UNIT,n=e.round;if(!n)return"";var a=r(t,n);if(a)return"border-radius: "+a+";";var o=n.side,i=n.size,u=void 0===i?1:i;return o in l?""+l[o](t,u):""}},68016:(e,t)=>{t.__esModule=!0,t.default=void 0;t.default=function(e,t){"function"===typeof e?e(t):e&&(e.current=t)}},60090:(e,t)=>{t.__esModule=!0,t.default=void 0;var r={none:"none",capitalize:"capitalize",uppercase:"uppercase",lowercase:"lowercase",firstLetter:"firstLetter",fullWidth:"full-width"};t.default=function(e){var t=(void 0===e?{}:e).textTransform,n=void 0===t?r.none:t;return n===r.firstLetter?"text-transform: lowercase;\n &::first-letter {\n text-transform: uppercase;\n }\n":n in r?"text-transform: "+r[n]+";":"text-transform: "+r.none+";"}},96029:(e,t)=>{t.__esModule=!0,t.default=void 0;t.default=function(e){var t=e.zIndex;if(t&&"number"===typeof t)return"z-index: "+t+";"}},15587:(e,t,r)=>{r.r(t)},42883:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"skeleton",use:"skeleton-usage",viewBox:"0 0 1225 192",content:''});i().add(l);const u=l},10120:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"D",use:"D-usage",viewBox:"0 0 16 16",content:''});i().add(l);const u=l},68317:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"I",use:"I-usage",viewBox:"0 0 16 16",content:''});i().add(l);const u=l},31952:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"L",use:"L-usage",viewBox:"0 0 16 16",content:''});i().add(l);const u=l},34386:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"N",use:"N-usage",viewBox:"0 0 16 16",content:''});i().add(l);const u=l},58655:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"anomaly_badge",use:"anomaly_badge-usage",viewBox:"0 0 15 16",content:''});i().add(l);const u=l},97517:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"checkmark_s",use:"checkmark_s-usage",viewBox:"0 0 16 16",content:''});i().add(l);const u=l},63668:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"chevron_down",use:"chevron_down-usage",viewBox:"0 0 12 12",content:''});i().add(l);const u=l},26762:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"chevron_down_thin",use:"chevron_down_thin-usage",viewBox:"0 0 16 24",content:''});i().add(l);const u=l},30464:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"chevron_expand",use:"chevron_expand-usage",viewBox:"0 0 8 6",content:''});i().add(l);const u=l},49484:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"chevron_right_end",use:"chevron_right_end-usage",viewBox:"0 0 18 18",content:''});i().add(l);const u=l},20327:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"chevron_up_thin",use:"chevron_up_thin-usage",viewBox:"0 0 16 24",content:''});i().add(l);const u=l},35083:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"correlations",use:"correlations-usage",viewBox:"0 0 24 24",content:''});i().add(l);const u=l},8001:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"dot",use:"dot-usage",viewBox:"0 0 10 10",content:''});i().add(l);const u=l},3113:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"drag_horizontal",use:"drag_horizontal-usage",viewBox:"0 0 24 24",content:''});i().add(l);const u=l},73983:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"drag_vertical",use:"drag_vertical-usage",viewBox:"0 0 24 24",content:''});i().add(l);const u=l},40813:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"gear",use:"gear-usage",viewBox:"0 0 20 20",content:''});i().add(l);const u=l},53965:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"nav_left",use:"nav_left-usage",viewBox:"0 0 8 10",content:''});i().add(l);const u=l},14608:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"nav_right",use:"nav_right-usage",viewBox:"0 0 8 10",content:''});i().add(l);const u=l},70922:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"pan_tool",use:"pan_tool-usage",viewBox:"0 0 24 24",content:''});i().add(l);const u=l},58937:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"selected_area",use:"selected_area-usage",viewBox:"0 0 24 24",content:''});i().add(l);const u=l},87083:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"sort_ascending",use:"sort_ascending-usage",viewBox:"0 0 18 18",content:''});i().add(l);const u=l},51081:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"sort_descending",use:"sort_descending-usage",viewBox:"0 0 18 18",content:''});i().add(l);const u=l},88335:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"warning_triangle_hollow",use:"warning_triangle_hollow-usage",viewBox:"0 0 24 24",content:''});i().add(l);const u=l},29085:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"zoom_in",use:"zoom_in-usage",viewBox:"0 0 24 24",content:''});i().add(l);const u=l},99444:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"zoom_out",use:"zoom_out-usage",viewBox:"0 0 24 24",content:''});i().add(l);const u=l},83357:(e,t,r)=>{r.r(t),r.d(t,{default:()=>u});var n=r(12897),a=r.n(n),o=r(55042),i=r.n(o),l=new(a())({id:"zoom_reset",use:"zoom_reset-usage",viewBox:"0 0 24 24",content:''});i().add(l);const u=l}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7144.382c341e09540fdebaa6.chunk.js.LICENSE.txt b/src/web/gui/v2/7144.382c341e09540fdebaa6.chunk.js.LICENSE.txt deleted file mode 100644 index ae386fb79..000000000 --- a/src/web/gui/v2/7144.382c341e09540fdebaa6.chunk.js.LICENSE.txt +++ /dev/null @@ -1 +0,0 @@ -/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */ diff --git a/src/web/gui/v2/7146.79304e386ac9238b7cf1.chunk.js b/src/web/gui/v2/7146.79304e386ac9238b7cf1.chunk.js deleted file mode 100644 index 610a44838..000000000 --- a/src/web/gui/v2/7146.79304e386ac9238b7cf1.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="200506ba-e7da-4c28-9afa-12c010f776fb",e._sentryDebugIdIdentifier="sentry-dbid-200506ba-e7da-4c28-9afa-12c010f776fb")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7146],{57146:(e,n,t)=>{t.r(n),t.d(n,{default:()=>u});t(62953);var a=t(96540),d=t(39225),l=t(86663),o=t(47767),r=t(22292),c=t(28738),f=t(24266);const s=(0,d.A)((()=>Promise.all([t.e(7144),t.e(7857),t.e(8239),t.e(9473),t.e(963),t.e(7487)]).then(t.bind(t,7356))),"AlertWebview"),u=(i=(0,f.Xc)((()=>{const{search:e}=window.location,{space_id:n,room_id:t,node_name:d,node_id:f}=l.parse(e);return(0,r.uW)("isLoaded")?a.createElement(a.Suspense,{fallback:a.createElement(c.A,null)},a.createElement(o.BV,null,a.createElement(o.qh,{path:"alerts/:alertId",element:a.createElement(s,{spaceId:n,roomId:t,nodeName:d,nodeId:f})}))):a.createElement(c.A,null)})),e=>{const{hash:n}=window.location,{token:t}=l.parse(n),[d,o]=(0,a.useState)();return(0,a.useEffect)((()=>{t&&(o(t),localStorage.setItem("netdataJWT",t))}),[]),d?a.createElement(i,e):a.createElement(c.A,null)});var i}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7170.5d6047bb6ce9d77d53db.chunk.js b/src/web/gui/v2/7170.5d6047bb6ce9d77d53db.chunk.js deleted file mode 100644 index 5afcacd22..000000000 --- a/src/web/gui/v2/7170.5d6047bb6ce9d77d53db.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="239c7385-e422-46fd-8336-50004b89c516",e._sentryDebugIdIdentifier="sentry-dbid-239c7385-e422-46fd-8336-50004b89c516")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7170],{17170:(e,n,d)=>{d.d(n,{A:()=>i});d(62953),d(3296),d(27208),d(48408);var o=d(37618);const i=()=>{if(o.Ay)return null;const{origin:e,searchParams:n}=new URL(window.location.href),d=null===n||void 0===n?void 0:n.get("labra_subscription_id"),i=null===n||void 0===n?void 0:n.get("aws_customer_id"),t=null===n||void 0===n?void 0:n.get("aws_product_id");return d&&i&&t?"".concat(e,"/api/v2/billing/labra/spaces?customer_id=").concat(i,"&marketplace=aws&product_id=").concat(t,"&subscription_id=").concat(d):null}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7208.1d75cf5d007de32e403b.chunk.js b/src/web/gui/v2/7208.1d75cf5d007de32e403b.chunk.js deleted file mode 100644 index 0cfc1ba77..000000000 --- a/src/web/gui/v2/7208.1d75cf5d007de32e403b.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var c="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},h=(new Error).stack;h&&(c._sentryDebugIds=c._sentryDebugIds||{},c._sentryDebugIds[h]="750d2334-ef0d-4b22-bdbd-a7db1f1095f4",c._sentryDebugIdIdentifier="sentry-dbid-750d2334-ef0d-4b22-bdbd-a7db1f1095f4")}catch(c){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7208],{17208:(c,h,l)=>{l.d(h,{go:()=>r,fx:()=>I,J8:()=>z,oi:()=>w});var s=l(96540),t=l(12897),i=l.n(t),n=l(55042),e=l.n(n),d=new(i())({id:"netdata.svg",use:"netdata.svg-usage",viewBox:"0 0 288 56",content:'\n\n\n\n\n\n\n\n'});e().add(d);const C=d;var a=new(i())({id:"google.svg",use:"google.svg-usage",viewBox:"0 0 48 48",content:''});e().add(a);const v=a;var _=new(i())({id:"github.svg",use:"github.svg-usage",viewBox:"0 0 16 16",content:''});e().add(_);const M=_;var V=new(i())({id:"cncfColor.svg",use:"cncfColor.svg-usage",viewBox:"0 0 400 77",content:'\n\n\n\n\n\n\n\n\n\n'});e().add(V);const f=V;var g=new(i())({id:"cncfWhite.svg",use:"cncfWhite.svg-usage",viewBox:"-1.81 -3.06 419.38 80.13",content:'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'});e().add(g);const H=g;var L=new(i())({id:"cncfBlack.svg",use:"cncfBlack.svg-usage",viewBox:"-3.69 -2.94 438.62 83.87",content:'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'});e().add(L);const p=L;var o=l(5668);const w=()=>s.createElement("svg",{width:"256",height:"48",viewBox:C.viewBox},s.createElement("use",{xlinkHref:"#".concat(C.id)})),z=()=>s.createElement("svg",{width:"16",height:"16",viewBox:v.viewBox},s.createElement("use",{xlinkHref:"#".concat(v.id)})),I=()=>s.createElement("svg",{width:"16",height:"16",viewBox:M.viewBox},s.createElement("use",{xlinkHref:"#".concat(M.id)})),D=c=>{let{width:h="152",height:l="29",svg:t={}}=c;return()=>s.createElement("svg",{width:h,height:l,viewBox:t.viewBox},s.createElement("use",{xlinkHref:"#".concat(t.id)}))},X={light:D({svg:p}),dark:D({svg:H}),unspecified:D({svg:f})},r=function(){let{theme:c}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const h=(0,o.xd)("theme"),l=X[c||h]||X.unspecified;return s.createElement(l,null)}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7304.ed4690ec296b59fbe7fd.chunk.js b/src/web/gui/v2/7304.ed4690ec296b59fbe7fd.chunk.js deleted file mode 100644 index defa09ee0..000000000 --- a/src/web/gui/v2/7304.ed4690ec296b59fbe7fd.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="bbe79fef-904d-4ba3-8a9b-697d08d1d2db",e._sentryDebugIdIdentifier="sentry-dbid-bbe79fef-904d-4ba3-8a9b-697d08d1d2db")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7304],{7304:(e,t,n)=>{n.r(t),n.d(t,{default:()=>Ae});n(62953);var r=n(96540),l=n(83199),a=n(58168),o=n(27776),i=n(86083),c=n(9060);const s=()=>{const e=(0,o.AR)(),t=(0,c.A)(),[n,l]=(0,o.oE)(),a=(0,o.WB)(),s=(0,r.useMemo)((()=>(0,i.Cj)({searchTerm:n,integrations:e})),[n,e]),m=n?(0,i.M6)(s,t):a;return{integrations:(0,i.Fd)((0,i.xM)(m)),setSearchTerm:l}};var m=n(58205);const d=e=>{let{flavour:t}=e;const[n,i]=(0,o.x9)(),{history:c,setHistory:d}=(0,o.b8)(),u=(0,o.GT)(),{setSearchTerm:g}=s(),p=(0,r.useCallback)((e=>{g(e),e?(n||i(c),d([])):n&&d(n)}),[n]),h=(0,r.useMemo)((()=>{var e;return null===(e=m.jI[t])||void 0===e?void 0:e.search}),[t]),v=(null===h||void 0===h?void 0:h.wrapperProps)||{},E=(null===h||void 0===h?void 0:h.inputProps)||{};return r.createElement(l.Flex,v,r.createElement(l.SearchInput,(0,a.A)({value:u,onChange:p,placeholder:"Search through 800+ integrations",dataGa:"integrations::searchTerm::".concat(t)},E)))},u=e=>{let{flavour:t,...n}=e;const o=(0,r.useMemo)((()=>{var e;return null===(e=m.vD[t])||void 0===e?void 0:e.headerContent}),[t]),i=(0,r.useMemo)((()=>{var e;return null===(e=m.jI[t])||void 0===e?void 0:e.header}),[t]),c=(null===i||void 0===i?void 0:i.wrapperProps)||{};return r.createElement(l.Flex,(0,a.A)({column:!0,alignItems:"center",justifyContent:"center",gap:2,width:{min:"500px"}},c),o&&r.createElement(o,n),r.createElement(d,{flavour:t}))};var g=n(88773),p=(n(41393),n(81454),n(63950)),h=n.n(p),v=n(3705),E=n(73700),y=n(74112),f=n(87659);const b=e=>{let{categories:t=[],level:n=0,flavour:l}=e;return r.createElement(r.Fragment,null,t.map((e=>r.createElement(k,{key:e.id,category:e,level:n+1,flavour:l}))))},x=e=>{let{subcategories:t=[],popular:n=[],noPopular:a=[],limitResults:o=0,level:i,flavour:c}=e;const[s,m]=(0,f.A)(o),d=o&&!s?a.slice(0,o):a,u=(0,r.useMemo)((()=>I(2)+4),[]);return r.createElement(l.Flex,{column:!0},r.createElement(b,{categories:n,level:i,flavour:c}),r.createElement(b,{categories:d,level:i,flavour:c}),!!o&&o0&&void 0!==arguments[0]?arguments[0]:1)},k=e=>{let{isSidebarCollapsed:t,category:n={},level:a=1,flavour:i,maxItemsPerCategory:c={}}=e;const s=(0,o.j6)(),{selectedCategory:d,pushCategory:u}=(0,o.b8)(),{id:p,name:h}=n,v=m.aw[p],{categories:E,popular:b,rest:k}=(0,r.useMemo)((()=>(0,y.YK)((0,y.PQ)({category:n,categories:s}))),[n,s]),A=(0,r.useMemo)((()=>{var e;const t=null===(e=c[p])||void 0===e?void 0:e.limit,n=b.length+1;return t>1?t-n:0}),[c,b]),F=p==(null===d||void 0===d?void 0:d.id),S=(0,r.useMemo)((()=>I(a)),[a]),[T,P]=(0,f.A)(),j=(0,o.q2)(),M=()=>{j(),u(n),(0,C.H)("integrations","select-category","sidebar","category-id:".concat(n.id)),E.length&&P()};return t?r.createElement(l.Flex,{alignItems:"center",justifyContent:"center",padding:[2]},r.createElement(w.A,{content:h,align:"right"},v&&r.createElement(l.Icon,{name:v,color:F?"primary":"text",cursor:"pointer",onClick:M}))):r.createElement(l.Flex,{column:!0},r.createElement(g.Av,{className:1==a?"top-level":"",gap:a>1?1:2,padding:[2,2,2,S],onClick:M,dataGa:"integrations::click-category-".concat(h,"::").concat(i),background:a>1&&F?"integrationMenuItemHover":null,alignItems:"center"},v&&r.createElement(l.Icon,{name:v,size:"small",color:F?"primary":"text"}),a>1&&r.createElement(g.oO,{name:"chevron_right",rotate:T?1:0,size:"small",color:"textLite",cursor:"pointer",disabled:!E.length}),r.createElement(l.TextBig,null,h)),!!E.length&&(T||1==a)&&r.createElement(x,{subcategories:E,popular:b,noPopular:k,limitResults:A,level:a,flavour:i}))},A=e=>r.createElement(l.Box,(0,a.A)({as:"hr",height:"1px",width:"100%",sx:{borderWidth:"1px 0px 0px 0px",borderColor:"borderSecondary",borderStyle:"solid"}},e)),F={collapsed:{alignItems:"center",justifyContent:"center",padding:[2]},expanded:{justifyContent:"end",padding:[0,2,0,0],position:"sticky",top:"0"}},S={collapsed:{icon:"chevron_right",iconSize:"large",flavour:"borderless",small:!0},expanded:{icon:"chevron_left",iconSize:"small",flavour:"hollow",tiny:!0}},T=e=>{let{isSidebarCollapsed:t,onClick:n=h()}=e;const o=(0,r.useMemo)((()=>t?"collapsed":"expanded"),[t]);return r.createElement(l.Flex,(0,a.A)({width:"100%"},F[o]),r.createElement(l.Button,(0,a.A)({neutral:!0,onClick:n},S[o])))},P=(0,E.s)(300,y.vF),j=e=>{let{flavour:t,isSidebarCollapsed:n,toggleSidebar:a=h()}=e;const[i,{height:c}]=(0,v.A)(),s=(0,o.Ss)(),m=(0,o.j6)(),[d,u]=(0,r.useState)({});return(0,r.useEffect)((()=>{P({height:c,topLevelCategories:s,categories:m,setState:u})}),[c,s,m]),r.createElement(l.Flex,{ref:i,column:!0,gap:2,overflow:{vertical:"auto"}},r.createElement(T,{isSidebarCollapsed:n,onClick:a}),s.map(((e,a)=>r.createElement(l.Flex,{key:e.id,column:!0,gap:2},r.createElement(k,{isSidebarCollapsed:n,category:e,maxItemsPerCategory:d,flavour:t}),!n&&a{const{pop:e}=(0,o.b8)(),t=(0,r.useCallback)((()=>{e(),(0,C.H)("integrations","navigate-back","breadcrumb")}),[e]);return r.createElement(l.Button,{label:"Back",icon:"arrow_left",iconColor:"primary",iconSize:"small",flavour:"borderless",onClick:t})};n(25440);var N=n(8711);const H=e=>{let{iconProps:t,...n}=e;return r.createElement(l.Flex,(0,a.A)({width:"74px",height:"74px",alignItems:"center",justifyContent:"center",background:"primary",round:"100%"},n),r.createElement(l.Icon,(0,a.A)({name:"integrations",color:"white",size:"large"},t)))},R=(0,N.default)(H).withConfig({displayName:"fallbackIcon__PlaceholderIcon",componentId:"sc-182ui8v-0"})(["filter:blur(3px);background:",";opacity:0.7;"],(0,l.getColor)("mainBackground")),L=()=>r.createElement(R,{iconProps:{color:"textLite"}}),z=H;var D=n(4659);const G=e=>{let{name:t,icon:n}=e;const[a,o]=(0,r.useState)(),i=(0,r.useCallback)((()=>o(!0)),[]);return r.createElement(l.Flex,{alignItems:"center",gap:1},n&&(a?r.createElement(z,{width:"18px",height:"18px",iconProps:{size:void 0,with:"12px",height:"12px"}}):r.createElement("img",{src:n,alt:"".concat(t,"-logo"),height:"18px",onError:i})),r.createElement(l.Text,null,t))},W=e=>{let{category:t={},index:n=0}=e;const{history:a,selectedIntegration:i,pushCategory:c}=(0,o.b8)();return!i&&n==a.length-1?r.createElement(l.Text,null,t.name):r.createElement(D.A,{onClick:()=>{c(t),(0,C.H)("integrations","select-category","breadcrumb","category-id:".concat(t.id))}},t.name)},q=e=>{let{flavour:t}=e;const{history:n}=(0,o.b8)(),a=n.map((e=>{let{name:t}=e;return t})).join("-").replace(" ","");return r.createElement(l.Flex,{gap:2,dataGa:"integrations::view-integration-".concat(a,"::").concat(t)},n.map(((e,t)=>r.createElement(l.Flex,{key:e.id,gap:2},"category"==e.type?r.createElement(W,{category:e,index:t}):r.createElement(G,e),t{let{url:t=""}=e;return t?r.createElement(D.A,{href:t,rel:"noopener noreferrer",target:"_blank"},r.createElement(w.A,{content:"Do you see something wrong? Edit the contents of this resource here."},r.createElement(l.Flex,{alignItems:"center",gap:1},r.createElement(l.Icon,{color:"primary",name:"pencilOutline",width:"12px",height:"12px"}),r.createElement(l.Text,{color:"primary"},"Edit")))):null};var Y=n(89009);const K=()=>{const{integrations:e}=s(),t=(0,o.q2)(),n=(0,o.hh)(),{setHistory:a}=(0,o.b8)(),i=(0,r.useCallback)((()=>{t(),n&&a(n)}),[]);return r.createElement(l.Flex,{alignItems:"center",justifyContent:"between"},r.createElement(l.Flex,{alignItems:"center",gap:4},r.createElement(l.TextBig,null,"Found ",e.length," results"),r.createElement(l.Button,{label:"Clear",icon:"x",iconColor:"primary",iconSize:"small",flavour:"borderless",onClick:i})))};var V=n(51220);n(17333),n(98992),n(54520);const J=N.default.img.withConfig({displayName:"progressiveImage__StyledImage",componentId:"sc-16k6xv6-0"})(["max-width:80%;max-height:74px;"]),Q=e=>{let{src:t,Placeholder:n,onError:l=h(),...o}=e;const[i,c]=(0,f.A)(!0);return(0,r.useEffect)((()=>{const e=new Image;e.src=t,e.onload=()=>c(),e.onerror=e=>l(e)}),[]),i&&n?r.createElement(n,null):r.createElement(J,(0,a.A)({src:t},o))},Z=e=>{let{isCommunity:t,...n}=e;return r.createElement(l.Flex,(0,a.A)({alignItems:"center",gap:1,position:"absolute",top:"-20px",right:"0",padding:[0,2]},n),r.createElement(w.A,{content:t?"This integration is maintaned by the community":"This integration is maintained by Netdata"},r.createElement(l.TextMicro,{strong:!0,color:"textDescription"},t?"by Community":"by Netdata")),!t&&r.createElement(l.Icon,{name:"favorites",color:"primary",width:"14px",height:"14px"}))},$=e=>{let{integration:t={},...n}=e;const{selectedCategory:i,pushIntegration:s}=(0,o.b8)(),m=(0,o.j6)(),[d,u]=(0,f.A)(),{flattenedCategory:p,community:h}=t,v=(0,c.A)(),{tree:E,topLevelCategory:b}=(0,y.fk)({category:p,getCategoryById:v}),x=(0,r.useMemo)((()=>E.slice(0,2).filter(Boolean).map((e=>{let{name:t}=e;return t})).join(" / ")),[E,b]),w=(0,y.q1)((null===i||void 0===i?void 0:i.id)||(null===p||void 0===p?void 0:p.id),m),[I,k]=(0,r.useState)(),A=(0,r.useCallback)((()=>k(!0)),[]);return r.createElement(g.Zp,(0,a.A)({alignItems:"center",justifyContent:"center",height:"100%",position:"relative",overflow:"hidden",cursor:"pointer",padding:[2],onMouseEnter:u,onMouseLeave:u,onClick:()=>{s(t),(0,C.H)("integrations","select-integration","integration-tile","integration-id:".concat(t.id))},round:"2px"},n),r.createElement(g.AG,{name:w,className:d?"hover":"",width:"190px",height:"190px",color:"textLite"}),r.createElement(l.Flex,{width:"100%",alignItems:"center",justifyContent:"center",column:!0,gap:2,zIndex:10},r.createElement(l.Flex,{justifyContent:"center",column:!0,gap:1},r.createElement(l.H3,{textAlign:"center",strong:!1},t.name),x&&r.createElement(l.TextSmall,{textAlign:"center"},x)),t.icon&&!I&&r.createElement(Q,{Placeholder:L,src:t.icon,alt:"".concat(t.name,"-logo"),onError:A}),I&&r.createElement(z,null),t.description?r.createElement(g.BT,{className:d?"hover":"",alignItems:"center",justifyContent:"center",width:"100%",padding:[3],background:"mainBackground"},r.createElement(Z,{isCommunity:h}),r.createElement(l.Text,{textAlign:"center"},t.description)):r.createElement(Z,{isCommunity:h,bottom:"4px",top:"unset"})))},U=(0,r.memo)($),X=e=>{let{integrations:t=[],itemsPerRow:n=4,rowHeight:l=200}=e;const a=(0,r.useRef)(),o=(0,r.useMemo)((()=>(e=>{let{integrations:t=[],itemsPerRow:n}=e;const r=[];let l=[];for(let a=0;a0&&r.push(l),r})({integrations:t,itemsPerRow:n})),[t,n]),i=(0,r.useCallback)((()=>l),[l]),c=(0,V.Te)({count:o.length,getScrollElement:()=>a.current,enableSmoothScroll:!1,estimateSize:i});return r.createElement("div",{ref:a,style:{height:"100%",overflow:"auto",paddingRight:"16px"}},r.createElement("div",{style:{minHeight:"".concat(c.getTotalSize(),"px"),width:"100%",position:"relative"}},c.getVirtualItems().map((e=>r.createElement("div",{key:e.key,style:{position:"absolute",top:0,left:0,width:"100%",transform:"translateY(".concat(e.start,"px)"),overflow:"hidden"},ref:c.measureElement,"data-index":e.index},r.createElement(g.$c,{itemsPerRow:n,rowHeight:l},(o[e.index]||[]).map((e=>r.createElement(U,{width:"100%",height:"100%",key:e.flattenedKey||e.id,integration:e})))))))))},ee=()=>r.createElement(l.Flex,{width:"100%",column:!0,alignItems:"center",margin:[8,0,0,0]},r.createElement(l.Flex,{column:!0,justifyContent:"center",alignItems:"center",margin:[0,0,6]},r.createElement(l.H3,{margin:[0,0,4]},"No integrations to display"),r.createElement(l.Text,{color:"textDescription",textAlign:"center"},"There are currently no integrations under this category.",r.createElement("br",null),"Try another category or use the search bar to find what you're looking for.")));var te=n(45463);const ne="(min-width: 1600px)",re="(max-width: 1599px)",le="(max-width: 1299px)",ae={large:{itemsPerRow:4,rowHeight:200},medium:{itemsPerRow:3,rowHeight:200},small:{itemsPerRow:2,rowHeight:190}};var oe=n(63314);const ie=(0,r.memo)((e=>{let{selectedCategory:t,searchTerm:n,integrations:o}=e;const{itemsPerRow:i,rowHeight:c}=(()=>{const e=(0,te.A)(ne),t=(0,te.A)(re),n=(0,te.A)(le);return ae[e?"large":t&&!n?"medium":"small"]||{}})();return r.createElement(l.Flex,{column:!0,gap:4,width:"100%",height:"100%",overflow:{vertical:"auto"}},r.createElement(l.Flex,{width:"100%",position:"sticky",top:"0",background:"mainBackground",zIndex:15},n?r.createElement(K,null):r.createElement(l.Flex,{column:!0,gap:2},r.createElement(l.H1,{strong:!1},null===t||void 0===t?void 0:t.name),r.createElement(l.Text,null,null===t||void 0===t?void 0:t.description))),o.length?r.createElement(oe.Ay,(0,a.A)({mode:"IntegrationsList"},n?{searchTerm:n}:{}),r.createElement(X,{integrations:o,itemsPerRow:i,rowHeight:c})):r.createElement(oe.Ay,(0,a.A)({mode:"NoIntegrations"},n?{searchTerm:n}:{}),r.createElement(ee,null)))})),ce=e=>{let{flavour:t}=e;const{selectedCategory:n}=(0,o.b8)(),l=(0,o.GT)(),{integrations:a}=s();return r.createElement(ie,{flavour:t,selectedCategory:n,searchTerm:l,integrations:a})};var se=n(24695),me=n(14125),de=n(47130);const ue=(0,n(92155).A)((0,de.A)(l.Button)),ge=e=>{let{itemId:t,...n}=e;const l=(0,me.l)();return t?r.createElement(ue,(0,a.A)({small:!0,icon:"gear",onClick:()=>l(t),label:"Configure",tooltip:"Add a new data collection job or edit existing jobs for this collector",payload:{dyncfgId:t}},n)):null};var pe=n(27467),he=n(38819);const ve=()=>{var e,t;const[n,a]=(0,r.useState)("calc(100% - 133px)"),{selectedIntegrationTab:i}=(0,he.PP)(),[,c]=(0,pe.N9)("selectedIntegrationTab"),[s,m]=(0,r.useState)(i||0),{selectedIntegration:d={}}=(0,o.b8)(),{overview:u,metrics:g,alerts:p,setup:h,troubleshooting:E,relatedResources:y,dyncfgId:f}=d,[b,{height:x}]=(0,v.A)(),w=(0,r.useMemo)((()=>[u?{id:"overview",label:"Overview",content:u}:null,g?{id:"metrics",label:"Metrics",content:g}:null,p?{id:"alerts",label:"Alerts",content:p}:null,h?{id:"setup",label:"Setup",content:h}:null,E?{id:"troubleshooting",label:"Troubleshooting",content:E}:null].filter(Boolean)),[u,g,p,h,E,y]);(0,r.useLayoutEffect)((()=>{x&&a("".concat(x-70,"px"))}),[x]);return r.createElement(oe.Ay,{tab:null===(e=w[s])||void 0===e?void 0:e.label,delay:0},r.createElement(l.Flex,{ref:b,column:!0,gap:2,width:"100%",height:"100%",margin:[4,0,0,0]},r.createElement(l.Flex,{border:{side:"bottom",size:"1px",color:"borderSecondary"},padding:[0,4],justifyContent:"between"},r.createElement(l.Tabs,{selected:parseInt(s),onChange:e=>{var t;c(e),m(e),(0,C.H)("integrations","select-integration-tab","integration-view","selected-tab:".concat(null===(t=w[e])||void 0===t?void 0:t.label))}},w.map((e=>{let{label:t}=e;return r.createElement(l.Tab,{key:t,label:r.createElement(l.Text,null,t),minWidth:"auto",maxWidth:"auto",padding:[1,4],background:"mainBackground",small:!0})}))),r.createElement(ge,{itemId:f})),r.createElement(l.Flex,{width:"100%",height:n,overflow:{vertical:"auto"},padding:[2,0]},r.createElement(se.A,null,null===(t=w[s])||void 0===t?void 0:t.content))))},Ee=e=>{let{integration:t={}}=e;return r.createElement(r.Fragment,null,t.description&&r.createElement(l.Flex,{margin:[4,0,0,0]},r.createElement(l.Text,null,t.description)),r.createElement(ve,null))},ye=e=>{let{integration:t={},topLevelCategory:n={},isNightly:l}=e;const a=(0,r.useMemo)((()=>(e=>m.rP[null===e||void 0===e?void 0:e.id]||Ee)(n)),[n]);return r.createElement(oe.Ay,{mode:"IntegrationView",integrationId:null===t||void 0===t?void 0:t.id},r.createElement(a,{integration:t,isNightly:l}))};var fe=n(59778);const be=e=>{let{flavour:t}=e;const n=(0,_.ap)("plan"),a=(0,c.A)(),{level:i,selectedCategory:s,selectedIntegration:m}=(0,o.b8)(),{flattenedCategory:d}=m||{},u=d||s,{topLevelCategory:g}=(0,y.fk)({category:u,getCategoryById:a}),p="deploy"==(null===g||void 0===g?void 0:g.id),[h,v]=(0,r.useState)((0,fe.M)(n)),E=m||i>2;return r.createElement(l.Flex,{column:!0,gap:2,width:"100%",overflow:m&&!p?{}:{vertical:"auto"}},r.createElement(l.Flex,{column:!0,alignItems:"start",gap:2,padding:[0,2,0,0]},E&&r.createElement(B,null),r.createElement(l.Flex,{width:"100%",alignItems:"end",justifyContent:"between"},r.createElement(l.Flex,{alignItems:"center",gap:4},r.createElement(q,{flavour:t}),(null===m||void 0===m?void 0:m.editLink)&&r.createElement(O,{url:m.editLink})),m&&p&&r.createElement(Y.A,{isNightly:"nightly"==h,toggleNightly:e=>{const t=e.target.checked?"nightly":"stable";v(t)}}))),m?r.createElement(ye,{integration:m,topLevelCategory:g,isNightly:"nightly"==h,flavour:t}):r.createElement(ce,{flavour:t}))};var xe=n(17208);const we=(0,N.default)(l.Flex).withConfig({displayName:"cncfReference__Wrapper",componentId:"sc-1ntasha-0"})(["position:absolute;bottom:0;"]),Ce=()=>r.createElement(we,{width:"100%",alignItems:"start",justifyContent:"end",background:"mainBackground",gap:2,padding:[6]},r.createElement(l.Flex,{width:"500px"},r.createElement(l.TextSmall,{textAlign:"right"},"Netdata is a member of the Cloud Native Computing Foundation (CNCF), and it is one of the most starred projects in the"," ",r.createElement(D.A,{alignSelf:"start",Component:l.TextSmall,href:"https://landscape.cncf.io/?item=observability-and-analysis--observability--netdata",rel:"noopener noreferer",target:"_blank"},"CNCF landscape"),".")),r.createElement(xe.go,null));var Ie=n(6323),ke=n(47767);const Ae=e=>{let{flavour:t=m.Jr}=e;const[n,,a,i]=(0,f.A)();(()=>{const e=(0,r.useRef)(!0),{pathname:t}=(0,ke.zy)(),n=(0,o.yv)();(0,r.useEffect)((()=>{e.current||(e.current=!1,n())}),[t])})();const[c,s]=(0,f.A)();return r.createElement(oe.Ay,{feature:"Integrations",logImpression:!1},r.createElement(l.Flex,{position:"relative",height:"100%",column:!0,background:"mainBackground"},r.createElement(l.Flex,{column:!0,alignItems:"center",justifyContent:"center"},r.createElement(u,{flavour:t,onInvite:a})),r.createElement(g.D6,{flavour:t,isSidebarCollapsed:c},r.createElement(M,{flavour:t,isSidebarCollapsed:c,toggleSidebar:s}),r.createElement(be,{flavour:t})),t==m.D_.homePage&&r.createElement(Ce,null)),n&&r.createElement(Ie.d,{onClose:i}))}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7332.3acf93dcfa52c7f1bc18.chunk.js b/src/web/gui/v2/7332.3acf93dcfa52c7f1bc18.chunk.js deleted file mode 100644 index 7c82fbac8..000000000 --- a/src/web/gui/v2/7332.3acf93dcfa52c7f1bc18.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="d40641ff-687c-4418-8176-e2eea4e305e2",e._sentryDebugIdIdentifier="sentry-dbid-d40641ff-687c-4418-8176-e2eea4e305e2")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7332],{47332:(e,t,a)=>{a.r(t),a.d(t,{Dropdown:()=>j,default:()=>R});a(62953);var d=a(96540),o=a(8711),r=a(83199),n=a(43375),i=a(61360);var s=a(74618),l=a(15327),c=a(35304),u=a(21591),p=a(68980),b=a(28738),m=a(38257);const g=e=>(0,o.keyframes)(["from{border-color:",";border-width:2px;}to{border-color:",";border-width:1px;}"],(0,r.getColor)("primary")(e),(0,r.getColor)("textLite")(e)),h=(0,o.default)(r.Flex).withConfig({displayName:"dropArea__AnimatedFlex",componentId:"sc-vs4sjk-0"})(["animation:",";"],(e=>{let{animate:t}=e;return t?(0,o.css)([""," 0.15s linear infinite"],g):""})),f={},v=(0,d.memo)((e=>{let{item:t=f,itemProps:a=f}=e;const{id:o,name:i,isNew:s}=t,{overDashboardId:l}=a,{setNodeRef:c,isOver:u,over:b}=(0,n.zM)({id:o,data:{droppable:!0,dropArea:!0,dashboardId:o,onlyPointerWithin:!0}}),{processing:m,loading:g}=(0,p.fz)(o),v=m||g;return d.createElement(r.Flex,{padding:[2],flex:!0,background:l===o&&"successSemi"},d.createElement(h,{alignItems:"center",justifyContent:"center",padding:[2],border:{side:"all",type:"dashed",color:u?"primary":"textLite"},round:!0,ref:c,width:"100%",height:15,animate:(null===b||void 0===b?void 0:b.id)===o&&l!==o},v?d.createElement(r.Text,{color:"textLite"},"Loading..."):d.createElement(r.TextBig,{color:l===o?"successText":"text"},s?"":d.createElement(d.Fragment,null,"Add in "),d.createElement(r.TextBig,{strong:!0,color:s?"primary":l===o?"successText":"text"},i))))})),w=((0,o.default)(r.Drop).attrs({align:{top:"bottom",left:"left"},animation:!0,backdrop:!0,column:!0,padding:[2,0],background:"dropdown",round:1,overflow:{vertical:"auto"},margin:[.5,0,0],width:30}).withConfig({displayName:"list__Dropdown",componentId:"sc-s8yppy-0"})([""]),(0,d.memo)((e=>{let{dashboards:t,selected:a,setSelected:o,itemProps:n}=e;const i=(0,m.A)();return d.createElement(d.Fragment,null,d.createElement(v,{item:{id:"new",name:"Create a new dashboard",isNew:!0},itemProps:n}),d.createElement(r.MenuDropdown,{items:t,Item:v,value:a,hasSearch:!1,onItemClick:o,hideShadow:!0,itemProps:n,overflow:"hidden",ref:i}))})));var x=a(86008),D=a(3914),y=a(69765),I=a(91517),E=a(47767),A=a(80925),C=a(71835),S=a(79731),k=a(50876),_=a(2025),F=a(67544),L=a(4659),N=a(18202),M=a(54621);const P=e=>{let{onDone:t}=e;const a=(0,E.Zp)(),o=(0,D.vt)(),r=(0,y.ID)(),n=(0,D.ns)(o,"slug"),i=(0,y.wz)(r,"slug"),s=(0,A.e)(),[l,c]=(0,C.A)(),{sendLog:u}=(0,k.A)(),p=(0,M.tV)(),b=(0,_.DH)(null,{onSuccess:e=>{p({title:e.slug,id:e.id,destination:e.slug,type:"dashboards",droppable:!0,droppableProps:{dashboardId:e.id,dropArea:!0}}),l({header:"Added",text:d.createElement(d.Fragment,null,"Chart added in"," ",d.createElement(L.A,{as:"a",strong:!0,onClick:()=>a("/spaces/".concat(n,"/rooms/").concat(i,"/dashboards/").concat(e.slug))},e.name)," ","dashboard.")}),setTimeout((()=>t()),2e3),u({feature:"drag-in-dashboard",isSuccess:!0})},onFail:e=>{var a;const d=null===e||void 0===e||null===(a=e.response)||void 0===a?void 0:a.data,o=(0,S.o)(null===d||void 0===d?void 0:d.errorMsgKey)||(null===d||void 0===d?void 0:d.errorMessage)||"Something went wrong";c({message:o}),t(),u({feature:"drag-in-dashboard",isFailure:!0,message:o})}}),m=(0,F.nM)(),g=(0,F.Dn)(o,r);return(0,d.useCallback)(((e,t)=>{const a=s.getNode({id:null===t||void 0===t?void 0:t.itemId}),d=(0,N.A)(a.getAttributes());"new"!==e?m(d.contextScope,{...d,dashboardId:e,itemLayout:t},(()=>b(e))):g({name:"[Untitled]"},(e=>{m(d.contextScope,{...d,dashboardId:e.id,itemLayout:t},(()=>b(e.id)))}))}),[g,m])},T=(0,o.default)(r.Flex).attrs({background:"mainBackground",padding:[2],overflow:"auto",height:"80%",width:"600px"}).withConfig({displayName:"dropDashboard__ScaledDownContainer",componentId:"sc-zd7fpn-0"})(["box-shadow:0 0 0 1px rgba(63,63,68,0.05),-1px 0 15px 0 rgba(34,33,81,0.01),0px 15px 15px 0 rgba(34,33,81,0.25);"]),z=e=>{let{id:t,setOverDashboard:a}=e;const o=(0,D.vt)(),n=(0,y.ID)(),i=(0,D.ns)(o,"slug"),s=(0,y.wz)(n,"slug"),l=(0,I.A)(t,{spaceId:o,spaceSlug:i,roomId:n,roomSlug:s}),c=P({onDone:a});return t?d.createElement(r.Flex,{flex:!0,width:"100%",height:"100%",alignItems:"center",justifyContent:"center"},d.createElement(T,null,l?d.createElement(b.A,{title:"Loading dashboard..."}):d.createElement(x.A,{id:t,containerWidth:600,trashable:!1,containerDndProps:{droppable:!0},onDragEnd:(e,d)=>{if(!d)return a();c(t,d)}}))):null},j=(0,o.default)(r.Flex).attrs({column:!0,background:"dropdown",width:{max:"auto",min:100,base:100}}).withConfig({displayName:"dropInDashboard__Dropdown",componentId:"sc-12fd9r3-0"})(["box-shadow:0 0 0 1px rgba(63,63,68,0.05),-1px 0 15px 0 rgba(34,33,81,0.01),0px 15px 15px 0 rgba(34,33,81,0.25);"]),R=()=>{const[e,t]=(0,d.useState)(),[a,o]=(0,d.useState)();(0,c.A)();const m=(0,u.q)();let g=(0,p.Sf)(m);g=(0,d.useMemo)((()=>[...g].sort(((e,t)=>new Date(e.updatedAt)2&&void 0!==arguments[2]?arguments[2]:[];const[o,r]=(0,d.useState)();return(0,i.A)((()=>r(e)),t,[e,...a]),o}(null===a||void 0===a?void 0:a.id,500),v=P({onDone:o});(0,n.E5)({onDragMove:e=>{let{active:t,over:a}=e;if(null!==a&&void 0!==a&&a.id&&t)if(a.data.current.dropinDashboards&&t.data.current.dashboardable)o((e=>e||{active:t,dropinDashboards:!0}));else if(a.data.current.dashboardId&&t.data.current.dashboardable){if("new"===a.data.current.dashboardId)return;o((e=>e&&(null===e||void 0===e?void 0:e.id)===a.data.current.dashboardId?e:{...e,id:a.data.current.dashboardId,active:t}))}},onDragEnd:e=>{let{active:t,over:a}=e;null!==a&&void 0!==a&&a.id&&t&&(a.data.current.dashboardId&&t.data.current.dashboardable?v(a.data.current.dashboardId,t.data.current):setTimeout((()=>o()),1e3))},onDragCancel:()=>{o()}});const x=(0,d.useMemo)((()=>({overDashboardId:f})),[f]);if(null===a||void 0===a||!a.active)return null;const{isItem:D,isContainer:y,dashboardable:I}=a.active.data.current;return I&&(D||y)?d.createElement(r.Layer,{position:"right",full:!0,backdrop:!0,shadow:!0,justifyContent:"end"},d.createElement(z,{id:f,setOverDashboard:o}),(null===a||void 0===a?void 0:a.dropinDashboards)&&d.createElement(j,null,d.createElement(s.z,{title:"Drop in a dashboard",onClose:()=>o()}),d.createElement(l.Yv,{gap:2,height:"100%",overflow:"hidden",background:"dropdown"},h?d.createElement(w,{dashboards:g,selected:e,setSelected:t,itemProps:x}):d.createElement(b.A,{title:"Loading dashboards..."})))):null}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7340.25dce1c5cc66b613700f.chunk.js b/src/web/gui/v2/7340.25dce1c5cc66b613700f.chunk.js deleted file mode 100644 index a36aa807e..000000000 --- a/src/web/gui/v2/7340.25dce1c5cc66b613700f.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="317c0ddc-8886-460f-a296-29c55519a411",e._sentryDebugIdIdentifier="sentry-dbid-317c0ddc-8886-460f-a296-29c55519a411")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7340],{26751:(e,t,n)=>{n.d(t,{A:()=>o});var a=n(58168),r=n(96540),l=n(83199);const o=e=>{let{message:t,title:n,footer:o,...i}=e;const c=(null===i||void 0===i?void 0:i["data-testid"])||"functionError";return r.createElement(l.Flex,(0,a.A)({alignItems:"center",column:!0,"data-testid":c,flex:!0,gap:3,justifyContent:"center",padding:[0,20]},i),r.createElement(l.H3,{"data-testid":"".concat(c,"-title")},n),r.createElement(l.TextBig,{color:"textDescription","data-testid":"".concat(c,"-message")},t),o)}},97340:(e,t,n)=>{n.r(t),n.d(t,{default:()=>Ma});n(62953);var a=n(96540),r=n(83084),l=n(67990),o=n(27467),i=n(50466),c=n(99292),s=n(99739),d=n(28738),u=n(3705),m=n(58168),f=(n(3064),n(98992),n(72577),n(83199)),p=n(11128),g=n(47762),h=n(3914),v=n(20378),y=n(47767);const x=e=>{const t=(0,p.u7)(e),{state:n}=(0,y.zy)();return(0,a.useEffect)((()=>{null!==n&&void 0!==n&&n.nodeId&&t([n.nodeId])}),[]),null===n||void 0===n?void 0:n.nodeId};var E=n(4659),b=n(26751);const C=e=>{let{paramsKey:t,...n}=e;const r=(null===n||void 0===n?void 0:n["data-testid"])||"capableNodesError",l=(0,p.w7)(t),o=(0,g.xY)(null===l||void 0===l?void 0:l[0],"name");return a.createElement(f.Flex,(0,m.A)({alignItems:"center",flex:!0,gap:8,justifyContent:"center"},n,{"data-testid":r}),a.createElement(f.Icon,{"data-testid":"".concat(r,"-icon"),height:"230px",name:"unreachableNode",width:"230px"}),a.createElement(b.A,{"data-testid":"".concat(r,"-details"),gap:2,title:o?"".concat(o," is not configured for Functions"):"Your nodes are not configured for Functions",message:a.createElement(a.Fragment,null,"Learn how to configure your nodes"," ",a.createElement(E.A,{Component:f.TextBig,"data-ga":"functions-capable-nodes-error::click-docs-link::functions-view","data-testid":"".concat(r,"-docsLink"),href:"https://learn.netdata.cloud/docs/nightly/operations/run-time-troubleshooting-with-functions#prerequisites",target:"_blank",rel:"noopener noreferrer"},"to support functions")),width:{max:"fit-content"}}))};var w=n(68741),k=n(40982),A=n(78459),I=(n(41393),n(14905),n(81454),n(8872),n(18121)),F=n(57605),T=n(29217),S=n(87659),K=n(18925),P=n(40933),z=n(44644),L=n(92155);const N={room:"space_new",space:"spaces_v2",personal:"user"},M=[{icon:N.personal,value:"personal",label:"Personal"},{icon:N.room,value:"room",label:"Room"},{icon:N.space,value:"space",label:"Space"}],O=(0,L.A)(f.Button),R=e=>{let{onSubmit:t,onClose:n}=e;const[r,l]=(0,a.useState)(""),[o,i]=(0,a.useState)("personal");return a.createElement(f.Modal,{onClickOutside:n,onEsc:n},a.createElement(f.ModalContent,{background:"dropdown"},a.createElement(f.ModalHeader,null,"Create a new settings snapshot",a.createElement(f.ModalCloseButton,{testId:"close-button",onClose:n})),a.createElement(f.ModalBody,null,a.createElement(f.Flex,{column:!0,width:80,gap:3},a.createElement(f.TextInput,{"data-testid":"name",label:"Name",onChange:e=>{let{target:t}=e;return l(t.value)},value:r}),a.createElement(f.Flex,{column:!0,gap:1},a.createElement(f.TextSmall,{strong:!0},"Scope"),a.createElement(f.Select,{options:M,value:M.find((e=>e.value===o)),onChange:e=>{let{value:t}=e;return i(t)},styles:{minWidth:"80px"}})))),a.createElement(f.ModalFooter,null,a.createElement(O,{disabled:""===r.trim(),label:"Create",onClick:()=>t({name:r,scope:o}),"data-testid":"btn-create",payload:{description:"Modal - Create Settings"}}))))},B={color:"textLite",margin:[0,1,0,0],width:"14px",height:"14px"},D=e=>{let{openForm:t,close:n}=e;return a.createElement(f.Flex,{padding:[2,1],border:{side:"top"},justifyContent:"center"},a.createElement(f.Button,{small:!0,icon:"save",onClick:()=>{t(),n()},"data-ga":"user-settings::click-create",title:"Create a new setting with your changes"},"Add new setting"))},_=e=>{let{id:t,close:n}=e;const r=(0,K.xS)(t,{onSuccess:n});return a.createElement(f.IconButton,{icon:"save",iconColor:"textLite",onClick:e=>{e.stopPropagation(),r({value:{}})},"data-ga":"user-settings::click-update",neutral:!0,padding:[0],title:"Update this setting with your changes"})},j=e=>{let{id:t,close:n}=e;const r=(0,K.z2)(t,{onSuccess:n});return a.createElement(f.IconButton,{icon:"trashcan",iconColor:"textLite",onClick:e=>{e.stopPropagation(),r()},"data-ga":"user-settings::click-delete",neutral:!0,padding:[0],title:"Delete this setting"})},W=e=>{let{item:{value:t,label:n,icon:r,disabled:l,onClick:o,...i},value:c,onItemClick:s,index:d,style:u,close:p,...g}=e;const h=c===t;return a.createElement(f.MenuItemContainer,(0,m.A)({"data-index":d,"aria-selected":h,disabled:l,selected:h,onClick:e=>{o&&o(e),s(t)}},i,g,{style:u,alignItems:"center",justifyContent:"between",padding:[1],overflow:"hidden"}),a.createElement(f.Flex,{alignItems:"center"},r,a.createElement(f.TextSmall,{whiteSpace:"normal",wordBreak:"break-word"},n)),a.createElement(f.Flex,{alignItems:"center",gap:.5},a.createElement(_,{id:t,close:p}),a.createElement(j,{id:t,close:p})))},V=e=>{let{disabled:t,type:n,types:r,entity:l,entities:o,...i}=e;r=Array.isArray(r)?r:n?[n]:[],o=Array.isArray(o)?o:l?[l]:[];const c=(0,P._)({params:{type:r,entity:o}}),[s,d]=(0,z.WY)({type:n,entity:l}),u=(e=>(0,a.useMemo)((()=>e.map((e=>({value:e.id,label:e.name,icon:a.createElement(f.Icon,(0,m.A)({name:N[e.scope]||N.personal},B)),"data-track":"setting-".concat(e.scope)})))),[e]))(c),[p,,g,h]=(0,S.A)(),v=(0,K.yK)({onSuccess:()=>{h(),close()}});return a.createElement(a.Fragment,null,a.createElement(f.Menu,{value:s,items:u,dropProps:{align:{top:"bottom",right:"right"},"data-toolbox":l},dropdownProps:{width:"200px"},onChange:d,"data-track":"user-settings",Item:W,Footer:e=>a.createElement(D,(0,m.A)({},e,{openForm:g}))},a.createElement(f.IconButton,(0,m.A)({icon:"settings_h",padding:[0],title:"User settings",disabled:t,"data-testid":"toolbox-addSettings"},i))),p&&a.createElement(R,{onSubmit:function(){return v({type:n,entity:l,path:"/",...arguments.length>0&&void 0!==arguments[0]?arguments[0]:{}})},onClose:h}))},q=(0,a.memo)(V);var H=n(93155),G=n(61360),U=n(8711);const Z=(0,U.default)(f.Flex).attrs({alignItems:"center",border:{side:"bottom",color:"borderSecondary"},padding:[2]}).withConfig({displayName:"styled__MenuItem",componentId:"sc-1bb2nho-0"})(["&:last-child{border:none;}"]),X=(0,U.default)(f.TextInput).withConfig({displayName:"styled__TextInput",componentId:"sc-1bb2nho-1"})(['margin:0 10px;min-width:unset;width:45px;> div{margin:0;}input::-webkit-outer-spin-button,input::-webkit-inner-spin-button{-webkit-appearance:none;margin:0;}input[type="number"]{-moz-appearance:textfield;}']),Y=a.createElement(f.Flex,{column:!0,width:{max:"500px"},gap:1},a.createElement(f.TextSmall,null,"Full data queries will always provide all the available facets with counters for your search, which depending on the size of the query could result in poor performance."),a.createElement(f.TextSmall,null,"i.e. Querying logs for multiple days with multitudes of entries."),a.createElement(f.TextSmall,null,"With this toggle \u201coff\u201d (default), queries will slice the data and will lower performance hit on your servers.")),$=e=>{let{onClose:t,targetRef:n,paramsKey:r,...l}=e;const c=(null===l||void 0===l?void 0:l["data-testid"])||"config",s=(0,i.Ol)({key:"acceptedParams",...r}).includes("slice"),[d,u]=(0,o.r$)("slice",{defaultValue:!0,flavour:"bool",...r}),[m,p]=(0,S.A)(!d);(0,G.A)((()=>{!!d===!!m&&u(!m)}),200,[d,m]);const g=(0,i.Ol)({key:"updateEvery",...r}),[h,v]=(0,o.r$)("pollingInterval",{...r,flavour:"int",defaultValue:g}),[y,x]=(0,a.useState)(h);return(0,G.A)((()=>{h!==y&&v(y)}),400,[h,y]),n.current?a.createElement(f.Drop,{align:{top:"bottom",right:"right"},animation:!0,background:"dropdown",close:t,column:!0,"data-testid":c,margin:[.5,0,0],onClickOutside:t,onEsc:t,round:1,target:n.current,width:58},s&&a.createElement(Z,{"data-testid":"".concat(c,"-slice-data"),justifyContent:"between"},a.createElement(f.Flex,{gap:1,alignItems:"center"},a.createElement(f.Text,{color:"textNoFocus","data-testid":"".concat(c,"-slice-data-label")},"Full data queries (slow)"),a.createElement(T.A,{plain:!0,content:Y,isBasic:!0,allowHoverOnTooltip:!0,stretch:"align"},a.createElement(f.Icon,{name:"information",color:"textDescription",width:"14px",height:"14px"}))),a.createElement(f.Toggle,{colored:!0,checked:m,"data-ga":"functions-config::auto-slice-data-switch::functions-view::".concat(m?"enabled":"disabled"),"data-testid":"".concat(c,"-slice-data-switch"),onChange:p})),a.createElement(Z,{"data-testid":"".concat(c,"-pollingInterval")},a.createElement(f.Text,{color:"textNoFocus","data-testid":"".concat(c,"-pollingInterval-label")},"Refresh every"),a.createElement(X,{"data-ga":"functions-config::polling-interval::functions-view","data-testid":"".concat(c,"-pollingInterval-input"),min:1,onChange:e=>{let{target:t}=e;const n=+t.value;n>0&&x(n)},size:"tiny",type:"number",value:y}),a.createElement(f.Text,{color:"textNoFocus","data-testid":"".concat(c,"-pollingInterval-units")},"seconds"))):null};var J=n(21290);const Q=e=>{let{paramsKey:t}=e;const{updatedAt:n}=(0,i.Ol)(t),{localeTimeString:r,localeDateString:l}=(0,J.$j)();return a.createElement(f.TextMicro,{color:"textLite"},"Last updated: ",l(n,{long:!0})," ",r(n,{secs:!0}))};var ee=n(51112);const te=U.default.div.withConfig({displayName:"reload__IconsContainer",componentId:"sc-gd414j-0"})(["position:relative;"]),ne=(0,U.default)(f.Icon).withConfig({displayName:"reload__SmallIcon",componentId:"sc-gd414j-1"})(["position:absolute;top:3px;left:3px;"]),ae=(0,U.keyframes)(["from{transform:rotate(0deg);}to{transform:rotate(359deg);}"]),re=(0,U.default)(f.IconButton).withConfig({displayName:"reload__AnimatedIcon",componentId:"sc-gd414j-2"})(["&& svg{animation:",";}animation:rotation 2s infinite linear;"],(e=>{let{animate:t}=e;return t?(0,U.css)([""," 3s linear infinite"],ae):""})),le=e=>{let{onRefresh:t,onCancel:n,dataGa:r,testId:l,loading:o}=e;const i=(0,ee.gC)(),[c,s]=(0,a.useState)(!1),[d,u]=(0,a.useState)(!1);return(0,a.useEffect)((()=>{if(!o)return s(!1),void u(!1);const e=setTimeout((()=>s(!0)),500),t=setTimeout((()=>u(!0)),3e3);return()=>{clearTimeout(e),clearTimeout(t)}}),[o]),a.createElement(f.Flex,{alignItems:"center",gap:1},a.createElement(te,null,a.createElement(ne,{name:i?"playSolid":"pauseSolid",width:"6px",height:"6px",color:i?"primary":"text"}),a.createElement(re,{animate:c,"data-testid":"".concat(l,"-refreshNowBtn"),"data-ga":"".concat(r,"::click-refresh-now::functions-view"),flavour:"hollow",icon:"refresh",onClick:()=>t({merge:!1}),disabled:i,padding:[1]})),a.createElement(f.Collapsible,{open:d,direction:"horizontal",duration:300},a.createElement(f.IconButton,{"data-testid":"".concat(l,"-cancelBtn"),"data-ga":"".concat(r,"::click-cancel::functions-view"),flavour:"hollow",warning:!0,icon:"x",onClick:()=>n(),padding:[1]})))},oe={fn:{dataGa:"functions-header",hasConfig:!0,Title:e=>{let{name:t,fn:n}=e;return n?t?"".concat(n," on ").concat(t):n:"Top"},titleTooltip:a.createElement(f.Flex,{width:{max:"500px"},alignItems:"center",flexWrap:!0},a.createElement(f.Text,null,"Netdata Functions enable detailed low-level monitoring at the edge. "),a.createElement(E.A,{Component:f.Text,"data-ga":"functions-header::click-docs-link::functions-view","data-testid":"docsLink",href:"https://learn.netdata.cloud/docs/nightly/concepts/netdata-functions",target:"_blank",rel:"noopener noreferrer"},"Read the Netdata functions documentation to learn more"),a.createElement(f.Text,null,"."))},logs:{dataGa:"logs-header",hasConfig:!0,Title:e=>{let{name:t,fn:n}=e;return n?t?"".concat(n," logs on ").concat(t):"".concat(n," logs"):"Logs"},titleTooltip:a.createElement(f.Flex,{width:{max:"500px"},alignItems:"center",flexWrap:!0},a.createElement(f.Text,null,"Explore system and applications logs. "),a.createElement(E.A,{Component:f.Text,"data-ga":"functions-header::click-docs-link::functions-view","data-testid":"docsLink",href:"https://learn.netdata.cloud/docs/logs/systemd-journal",target:"_blank",rel:"noopener noreferrer"},"Read the Netdata logs documentation to learn more"),a.createElement(f.Text,null,"."))},feed:{dataGa:"feed-header",Title:()=>"Events",titleTooltip:a.createElement(f.Flex,{width:{max:"500px"},alignItems:"center",flexWrap:!0},a.createElement(f.Text,null,"Troubleshoot faster with Netdata Events. Access topology and alert events across the room in one convenient location."," "),a.createElement(E.A,{Component:f.Text,"data-ga":"feed-header::click-docs-link::feed-view","data-testid":"docsLink",href:"https://learn.netdata.cloud/docs/nightly/concepts/events-feed",target:"_blank",rel:"noopener noreferrer"},"Read the Events feed documentation to learn more"),a.createElement(f.Text,null,"."))}},ie=(0,a.memo)((e=>{let{onRefresh:t,onCancel:n,paramsKey:r,...l}=e;const o=(null===l||void 0===l?void 0:l["data-testid"])||"functionsHeader",c=(0,a.useRef)(),{dataGa:s,hasConfig:d,titleTooltip:u,Title:m}=oe[r.extraKey]||oe.fn,[h,v,,y]=(0,S.A)(!1),x=(0,p.w7)(r),E=(0,g.xY)(null===x||void 0===x?void 0:x[0],"name"),{loaded:b,data:C,totalSize:w,actualSize:k,loading:A,loadingMore:I,aggregatedView:F}=(0,i.Ol)(r),K=(0,i.Ak)(r);return a.createElement(f.Flex,{"data-testid":o,justifyContent:"between"},a.createElement(f.Flex,{column:!0},a.createElement(f.Flex,{alignItems:"center","data-testid":"".concat(o,"-title"),gap:1},a.createElement(f.H3,null,a.createElement(m,{name:E,fn:K})),a.createElement(T.A,{plain:!0,content:u,isBasic:!0,allowHoverOnTooltip:!0,stretch:"align"},a.createElement(f.Icon,{name:"information",color:"textDescription",width:"16px",height:"16px"}))),a.createElement(f.Flex,{alignItems:"center",gap:2},b&&a.createElement(f.TextSmall,{color:"textLite",strong:!0},"(",!!F&&!!k&&a.createElement(a.Fragment,null,a.createElement(f.TextSmall,{color:"text",strong:!0},k||0," ",F.aggregatedLabel),a.createElement("span",null," \u2283 ")),w||(null===C||void 0===C?void 0:C.length)||0," ",(null===F||void 0===F?void 0:F.resultsLabel)||"results",")"),a.createElement(Q,{paramsKey:r}))),a.createElement(f.Flex,{gap:1,alignItems:"start"},a.createElement(le,{testId:o,dataGa:s,onRefresh:t,onCancel:n,loading:A||I}),d&&a.createElement(f.IconButton,{"data-testid":"".concat(o,"-ConfigBtn"),"data-ga":"".concat(s,"::click-config::functions-view"),flavour:"hollow",icon:"gear",ref:c,onClick:v,padding:[1]}),h&&a.createElement($,{"data-testid":"".concat(o,"-Config"),isOpen:h,onClose:y,targetRef:c,paramsKey:r}),H.EM&&a.createElement(q,{type:"function",path:"/view",entity:K,padding:[1],"data-testid":"".concat(o,"-SettingsBtn"),"data-ga":"".concat(s,"::click-config::functions-view"),flavour:"hollow"})))}),((e,t)=>e.onRefresh===t.onRefresh));n(25440);var ce=n(65570),se=n(98525);const de=(0,U.default)(f.Icon).attrs({color:"text",height:"16px",width:"16px"}).withConfig({displayName:"value__Arrow",componentId:"sc-1apuy8c-0"})(["rotate:",";transition:all 200ms ease;"],(e=>{let{rotate:t}=e;return t})),ue=function(e){let{transform:t,decimalPoints:n=0,units:r,defaultValue:l}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const o=(0,se.py)();return(0,a.useMemo)((()=>{switch(t){case"number":return(0,se.iX)(e,{decimalPoints:n,units:r,defaultValue:l});case"duration":return(0,se.Sh)(e,{defaultValue:l});case"datetime":return o(e,{defaultValue:l});case"datetime_usec":return o(e,{defaultValue:l,usec:!0});default:return null===e||"undefined"===typeof e?l:e}}),[e,r])},me=e=>{let{description:t="",value:n,units:r=""}=e;return t?a.createElement(f.Flex,{column:!0,gap:1,justifyContent:"center"},a.createElement(f.TextSmall,null,t),a.createElement(f.TextSmall,{strong:!0,textAlign:"center"},n," ",r)):"".concat(n," ").concat(r)},fe=(0,U.default)(f.Flex).withConfig({displayName:"value__ValueContainer",componentId:"sc-1apuy8c-1"})(["",""],(e=>{let{overflowWrap:t}=e;return t&&"* {\n overflow-wrap: anywhere;\n white-space: pre-wrap;\n word-break: break-word;\n }\n "})),pe={debug:{strong:!0,color:"textLite"},normal:{},notice:{strong:!0},warning:{strong:!0,color:"warningText"},critical:{strong:!0,color:"errorText"}},ge=(0,U.default)(f.Text).attrs((e=>({...e,...pe[e.severity]||pe.normal}))).withConfig({displayName:"value__ValueLabel",componentId:"sc-1apuy8c-2"})([""]),he=(0,a.forwardRef)(((e,t)=>{let{icon:n,iconRotate:r,children:l,onClick:o,strong:i,testId:c="cell",textAlign:s,wrap:d,truncate:u=!0,rowOptions:f,...p}=e;return a.createElement(fe,(0,m.A)({cursor:o?"pointer":"inherit",gap:.5,onClick:o,overflow:"hidden",ref:t,width:{max:"100%"},flexWrap:!0,overflowWrap:d},p),n&&a.createElement(de,{name:n,rotate:r}),a.createElement(ge,(0,m.A)({"data-testid":"".concat(c,"-value"),strong:i,textAlign:s,truncate:!d&&u,whiteSpace:d?"wrap":"nowrap"},f),l))})),ve=e=>{let{description:t="",tooltipValue:n,value:r,valueOptions:l={},wrap:o,rowOptions:i,...c}=e;const s=ue(r,l);return a.createElement(T.A,(0,m.A)({content:!o&&a.createElement(me,{description:t,value:n||r,units:l.units}),"data-testid":"valueComponent",isBasic:!0},c),a.createElement(he,{wrap:o,rowOptions:i},s))},ye=e=>{let{value:t,...n}=e;return a.createElement(ve,(0,m.A)({value:t.value},n))},xe=e=>{let{description:t="",maxValue:n,strong:r,type:l,value:o,valueOptions:i={},wrap:c,...s}=e;const d=100*o/n,u="bar"===l,m=(null===s||void 0===s?void 0:s["data-testid"])||"progressValue",p=ue(o,i);return a.createElement(T.A,{content:a.createElement(me,{description:t,value:o,units:i.units}),isBasic:!0,stretch:"align"},a.createElement(f.Flex,{column:!0,"data-testid":m,gap:1,flexWrap:c,flex:!0},!u&&a.createElement(he,{strong:r,testid:m},p),null!==o&&a.createElement(f.ProgressBar,{background:"borderSecondary",border:"none",color:["green","netdata"],containerWidth:"100%","data-testid":"".concat(m,"-bar"),height:2,width:"".concat(d,"%")})))},Ee=e=>{let{value:t,wrap:n}=e;return t=Array.isArray(t)?t:[t],a.createElement(f.Flex,{alignItems:"center",flexWrap:n},t.map(((e,t)=>a.createElement(f.Pill,{flavour:"neutral","data-testid":"pillValueComponent",key:"".concat(e,"-").concat(t),margin:[.3],normal:!1},e))))};var be=n(69765),Ce=n(40267);const we=e=>{let{hide:t,user:n}=e;return t||!n?null:a.createElement(a.Fragment,null,a.createElement(f.Text,null,"by"),a.createElement(ke,null,n))},ke=e=>{let{color:t="text",...n}=e;return a.createElement(f.Text,(0,m.A)({color:t,strong:!0},n))},Ae=e=>{let{type:t,text:n=t,hollow:r}=e;return a.createElement(Ce.A,(0,m.A)({flavour:t.toLowerCase()},r&&{border:!1}),n)},Ie=(0,U.default)(f.Flex).attrs({gap:1,flexWrap:!0,alignItems:"center"}).withConfig({displayName:"components__Container",componentId:"sc-b5rk6g-0"})([""]);var Fe=n(84976);const Te={CLEAR:0,WARNING:1,CRITICAL:2,UNINITIALIZED:3,UNDEFINED:4},Se=e=>{let{chart:t,context:n,nodeId:r,nodeName:l,spaceSlug:o,roomSlug:i,alert:c}=e;const s=(0,y.Zp)(),d=(0,g.Zl)(r),u=(0,a.useCallback)((()=>{var e;null!==c&&void 0!==c&&c.id?s(d,{state:{alertId:c.id}}):s(d,{state:{contextToGo:null===n||void 0===n||null===(e=n.name)||void 0===e?void 0:e[0]}})}),[d,c]),m="/spaces/".concat(o,"/rooms/").concat(i,"/alerts/").concat(null===c||void 0===c?void 0:c.id),{current:p,name:h,previous:v}=c;if("ERROR"===p.status)return a.createElement(Ie,null,a.createElement(f.Text,null,"For the alert"),a.createElement(E.A,{Component:ke,as:Fe.N_,to:m},h),a.createElement(f.Text,null,"for"),a.createElement(E.A,{Component:ke,onClick:u},t.name),a.createElement(f.Text,null,"on"),a.createElement(E.A,{Component:ke,onClick:u},l),a.createElement(f.Text,null,"we couldn't calculate the current value"),a.createElement(T.A,{align:"bottom",content:"Please check your alert configuration"},a.createElement(f.Icon,{color:"nodeBadgeColor",size:"small",name:"information"})));if("REMOVED"===p.status)return a.createElement(Ie,null,a.createElement(f.Text,null,"Alert"),a.createElement(E.A,{Component:ke,as:Fe.N_,to:m},h),a.createElement(f.Text,null,"for"),a.createElement(E.A,{Component:ke,onClick:u},t.name),a.createElement(f.Text,null,"on"),a.createElement(E.A,{Component:ke,onClick:u},l),a.createElement(f.Text,null,"is no longer available, state can't be assessed"));const x=Te[p.status]===Te.CLEAR;return a.createElement(Ie,null,a.createElement(f.Text,null,"Alert"),a.createElement(E.A,{Component:ke,as:Fe.N_,to:m},h),a.createElement(f.Text,null,"for"),a.createElement(E.A,{Component:ke,onClick:u},t.name),a.createElement(f.Text,null,"on"),a.createElement(E.A,{Component:ke,onClick:u},l),x?a.createElement(f.Text,null,"recovered"):a.createElement(a.Fragment,null,a.createElement(f.Text,{strong:!0},Te[v.status]===Te.CRITICAL?"was demoted":Te[v.status]===Te.WARNING?"escalated":Te[p.status]===Te.UNDEFINED?"transitioned":"was raised"),a.createElement(f.Text,null,"to"),a.createElement(Ae,{type:p.status}),Te[p.status]===Te.UNDEFINED&&a.createElement(f.Text,null,"state")),a.createElement(f.Text,null,"with value"),a.createElement(Ae,{type:p.status,text:p.value_string,hollow:!0,padding:[0]}))},Ke=e=>{let{id:t,name:n}=e;const r=(0,h.bq)(),l=(0,be.wz)(t,"slug"),o="/spaces/".concat(r,"/rooms/").concat(l,"/home");return a.createElement(E.A,{Component:ke,as:Fe.N_,to:o},n)},Pe=e=>{let{rooms:t}=e;return t.map(((e,n)=>a.createElement(a.Fragment,{key:e.id},n>0&&(n{let{rooms:t}=e;if(!t.length)return null;const n=t.length>1?"rooms":"room";return a.createElement(a.Fragment,null,a.createElement(f.Text,null,"on ",n),a.createElement(Pe,{rooms:t}))},Le=e=>{let{contexts:t}=e;t.map(((e,n)=>a.createElement(a.Fragment,{key:e},n>0&&(n{let{contexts:t}=e;if(!t.length)return null;const n=t.length>1?"contexts":"context";return a.createElement(f.Text,null,"on ",n," ",a.createElement(Le,{contexts:t}))},Me={month:"2-digit",day:"2-digit",year:"numeric",hour:"numeric",minute:"numeric",long:!1,dateStyle:void 0},Oe=e=>{let{start:t,end:n}=e;const{localeDateString:r}=(0,J.$j)();if(!t||!n)return null;const l=r(new Date(t),Me),o=r(new Date(n),Me);return a.createElement(a.Fragment,null,a.createElement(f.Text,null,"(scheduled ",l," - ",o,")"))},Re={"silencing-rule-created":"created","silencing-rule-deleted":"deleted","silencing-rule-changed":"changed"},Be=e=>{let{rooms:t=[],contexts:n=[]}=e;return t.length||n.length?a.createElement(a.Fragment,null,!!t.length&&a.createElement(ze,{rooms:t}),!!t.length&&!!n.length&&a.createElement(f.Text,null,"and ",a.createElement(Ne,{contexts:n}))):null},De=e=>{var t;let{action:n,notification:r,user:l,room:o,context:i}=e;const c=((null===r||void 0===r||null===(t=r.silencing)||void 0===t?void 0:t.rule)||[])[0],s=null===l||void 0===l?void 0:l.name,d=(null===i||void 0===i?void 0:i.name)||[];return a.createElement(Ie,null,a.createElement(f.Text,null,"Silencing rule"),a.createElement(ke,null,c.name),a.createElement(Be,{rooms:o,contexts:d}),a.createElement(f.Text,null,"was ",Re[n]),a.createElement(we,{user:s}),a.createElement(Oe,c))};var _e=n(78969);const je=(e,t)=>{var n,a;return(null===(n=e.target)||void 0===n?void 0:n[t])||(null===(a=e.target)||void 0===a?void 0:a.id)||e[t]||e.id},We=e=>Array.isArray(e)?e[0].name||e[0].id:e.name||e.id,Ve=function(){let{event:e={},user:t={},token:n={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return"space-claiming-token-created"==e.action?a.createElement(Ie,null,a.createElement(f.Text,null,"Claiming Token was created by user"),a.createElement(we,{hide:!t.target,user:t.name})):"space-claiming-token-revoked"==e.action?a.createElement(Ie,null,a.createElement(f.Text,null,"Claiming Token"),a.createElement(f.Text,{strong:!0},n.prefix),a.createElement(f.Text,null,"was revoked by user"),a.createElement(we,{hide:!t.target,user:t.name})):null},qe=e=>{let{statistics:t}=e;const{live:n,stale:r,removed:l,total:o}=(null===t||void 0===t?void 0:t.nodes)||{};return a.createElement(Ie,null,a.createElement(f.Text,null,"Space statistics. Nodes:"),a.createElement(f.Text,{color:_e.J4.live.statusTextColor},n," live"),a.createElement(f.Text,null,","),a.createElement(f.Text,{color:_e.J4.stale.statusTextColor},r," stale"),a.createElement(f.Text,null,","),a.createElement(f.Text,{color:_e.J4.offline.statusTextColor},l," removed"),a.createElement(f.Text,null,","),a.createElement(f.Text,{strong:!0},o," total"))};n(34504),n(78898);var He=n(24398);const Ge=e=>{let{stateColor:t,state:n}=e;return a.createElement(a.Fragment,null,a.createElement(f.Text,null,"became"),a.createElement(ke,{color:t},n))},Ue=e=>{let{stateColor:t,state:n}=e;return a.createElement(a.Fragment,null,a.createElement(f.Text,null,"was"),a.createElement(ke,{color:t},n))},Ze={"node-created":Ue,"node-state-live":Ge,"node-state-stale":Ge,"node-state-offline":Ge,"node-removed":Ue,"node-deleted":Ue,"node-restored":Ue,default:Ue},Xe=e=>{var t;let{action:n,hosts:r=[]}=e;const{id:l,name:o}=r[0],i=(0,y.Zp)(),c=(0,g.Zl)(l),s=(0,a.useCallback)((()=>i(c)),[c]),d=Ze[n],u=n.split("-").at(-1),m=null===(t=He.P[u])||void 0===t?void 0:t.statusTextColor;return a.createElement(Ie,null,a.createElement(f.Text,null,"Node"),a.createElement(E.A,{Component:ke,onClick:s},o),a.createElement(d,{stateColor:m,state:u}))},Ye=e=>{let{room:t}=e;const n=(0,be.XA)("untouchable");return a.createElement(a.Fragment,null,a.createElement(f.Text,null,"was added to",n?" room":""),a.createElement(ke,null,n?We(t):"this room"))},$e=e=>{let{room:t}=e;const n=(0,be.XA)("untouchable");return a.createElement(a.Fragment,null,a.createElement(f.Text,null,"was removed from",n?" room":""),a.createElement(ke,null,n?We(t):"this room"))},Je={"room-created":()=>a.createElement(f.Text,null,"was created"),"room-deleted":()=>a.createElement(f.Text,null,"was deleted"),"room-node-added":Ye,"room-node-removed":$e,"room-user-added":Ye,"room-user-removed":$e},Qe=e=>{var t;let{action:n,hosts:r=[],Netdata:l,user:o}=e;const i=Je[n],c=null===(t=r[0])||void 0===t?void 0:t.name;return a.createElement(Ie,null,a.createElement(f.Text,null,"Node"),a.createElement(ke,null,c),a.createElement(i,{room:l.room}),a.createElement(we,{user:null===o||void 0===o?void 0:o.name}))},et=e=>{let{action:t,Netdata:n,user:r}=e;const l=Je[t];return a.createElement(Ie,null,a.createElement(f.Text,null,"User"),a.createElement(ke,null,je(r,"name")),a.createElement(l,{room:n.room}),a.createElement(we,{hide:!r.target,user:r.name}))},tt=e=>{let{action:t,Netdata:n,user:r}=e;const l=Je[t];return a.createElement(Ie,null,a.createElement(f.Text,null,"Room"),a.createElement(ke,null,We(n.room)),a.createElement(l,null),a.createElement(we,{user:r.name}))},nt={"alert-node-transition":e=>{let{chart:t,hosts:n,alert:r,...l}=e;return n.map(((e,n)=>{let{id:o,name:i}=e;const c={...r,name:r.name[n],...r.current?{current:{...r.current,...r.current.status?{status:r.current.status[n]||r.current.status[0]}:{}}}:{}};return a.createElement(Se,(0,m.A)({key:o,chart:t,nodeId:o,nodeName:i,alert:c},l))}))},"node-created":Xe,"node-state-live":Xe,"node-state-stale":Xe,"node-state-offline":Xe,"node-removed":Xe,"node-deleted":Xe,"node-restored":Xe,"agent-connected":e=>{let{agentId:t}=e;return a.createElement(Ie,null,a.createElement(f.Text,null,"Agent with claim ID"),a.createElement(ke,null,t)," ",a.createElement(f.Text,null,"has connected to Netdata"))},"agent-connection-initialized":e=>{let{agentId:t,reason:n}=e;return a.createElement(Ie,null,a.createElement(f.Text,null,"Agent with claim ID"),a.createElement(ke,null,t),a.createElement(f.Text,null,"has initialized its connection to Netdata"))},"agent-disconnected":e=>{let{agentId:t,reason:n}=e;return a.createElement(Ie,null,a.createElement(f.Text,null,"Agent with claim ID"),a.createElement(ke,null,t),a.createElement(f.Text,null,"has disconnected from Netdata with reason: \u201c",n,"\u201d"))},"agent-authenticated":e=>{let{agentId:t}=e;return a.createElement(Ie,null,a.createElement(f.Text,null,"Agent with claim ID"),a.createElement(ke,null,t),a.createElement(f.Text,null,"has successfully authenticated"))},"agent-authentication-failed":e=>{let{agentId:t}=e;return a.createElement(Ie,null,a.createElement(f.Text,null,"Agent with claim ID"),a.createElement(ke,null,t),a.createElement(f.Text,null,"has failed to authenticate"))},"room-created":tt,"room-deleted":tt,"room-node-added":Qe,"room-node-removed":Qe,"room-user-added":et,"room-user-removed":et,"space-created":e=>{let{Netdata:t,user:n}=e;return a.createElement(Ie,null,a.createElement(f.Text,null,"Space"),a.createElement(ke,null,t.space.name||t.space.id),a.createElement(f.Text,null,"was created"),a.createElement(we,{user:n.name}))},"space-statistics":qe,"space-user-added":e=>{let{Netdata:t,user:n}=e;const{name:r}=(null===t||void 0===t?void 0:t.inviter)||{};return a.createElement(Ie,null,a.createElement(f.Text,null,"User"),a.createElement(ke,null,je(n,"name")),a.createElement(f.Text,null,"was added to this space"),r&&a.createElement(a.Fragment,null,a.createElement(f.Text,null,"by invite of"),a.createElement(ke,null,r)))},"space-user-changed":e=>{var t;let{user:n}=e;if(!(null===(t=n.changes)||void 0===t||!t.roles)&&1===Object.keys(n.changes).length){const e=n.changes.roles.length>1,t=n.changes.roles.join(", ");return a.createElement(Ie,null,a.createElement(f.Text,null,"User ",e?"roles":"role"," for"),a.createElement(ke,null,je(n,"name")),a.createElement(f.Text,null,e?"were":"was"," changed to"),a.createElement(ke,null,t),a.createElement(we,{hide:!n.target,user:n.name}))}return a.createElement(Ie,null,a.createElement(f.Text,null,"User"),a.createElement(ke,null,je(n,"name")),a.createElement(f.Text,null,"was modified"))},"space-user-invited":e=>{let{user:t}=e;return a.createElement(Ie,null,a.createElement(f.Text,null,"User"),a.createElement(ke,null,je(t,"email")),a.createElement(f.Text,null,"was invited to this space"),a.createElement(we,{user:t.name}))},"space-user-removed":e=>{let{user:t}=e;const{name:n}=t.target||{};return a.createElement(Ie,null,a.createElement(f.Text,null,"User"),a.createElement(ke,null,je(t,"name")),a.createElement(f.Text,null,"was removed from this space"),n&&a.createElement(we,{user:t.name}))},"space-user-uninvited":e=>{let{event:t,user:n}=e;return"space-invite-expired"===t.reason?a.createElement(Ie,null,a.createElement(f.Text,null,"The invite for"),a.createElement(ke,null,je(n,"email")),a.createElement(f.Text,null,"to this space has expired")):a.createElement(Ie,null,a.createElement(f.Text,null,"User"),a.createElement(ke,null,je(n,"email")),a.createElement(f.Text,null,"was uninvited from this space"),a.createElement(we,{hide:!n.target,user:n.name}))},"space-claiming-token-created":Ve,"space-claiming-token-revoked":Ve,"silencing-rule-created":De,"silencing-rule-deleted":De,"silencing-rule-changed":De},at=e=>{let{data:t}=e;const{action:n,roomId:r,space:l}=t.source,o=(0,h.ns)(l.id,"slug"),i=(0,be.wz)(r,"slug"),c=nt[n];return c?a.createElement(c,(0,m.A)({},t.source,{spaceSlug:o,roomSlug:i})):a.createElement(Ie,null,a.createElement(f.Text,null,"Event")," ",a.createElement(f.Text,{strong:!0},n)," ",a.createElement(f.Text,null,"emitted"))},rt={bar:xe,"bar-with-integer":xe,duration:xe,pill:Ee,number:ve,string:ve,feedTemplate:at,datetime:e=>{let{value:t,...n}=e;const r=new Date(t),{localeTimeString:l,localeDateString:o}=(0,J.$j)(),i=isNaN(r.valueOf())?"Missing date & time":"".concat(o(r,{long:!1})," ").concat(l(r,{secs:!0}));return a.createElement(ve,(0,m.A)({value:i},n))},value:ve},lt={bar:xe,pill:Ee,value:ve,richValue:ye,feedTemplate:at,rowOptions:"skip"},ot=e=>{let{visualization:t,type:n,value:r,data:l,...o}=e;const i=lt[t]||rt[n]||lt.value;return i===lt.rowOptions?null:a.createElement(i,(0,m.A)({value:r,type:n,rowOptions:null===l||void 0===l?void 0:l.rowOptions,data:l},o))},it={sum:"sum",min:"min",max:"max",extent:"extent",mean:"mean",median:"median",unique:"unique",uniqueCount:"uniqueCount",count:"count"};n(17333),n(8159),n(54520),n(37550);var ct=n(47444);const st={query:(e,t,n)=>{let{columnVisibility:a}=n;const r="string"===typeof t?t.toLowerCase():"";return Object.entries(e).some((e=>{let[t,n]=e;return!!a[t]&&(null!==n&&"undefined"!==typeof n&&(Array.isArray(n)?n.some((e=>String(e).toLowerCase().includes(r))):("object"===typeof n&&Object.keys(n).some((e=>String(n[e]).toLowerCase().includes(r))),String(n).toLowerCase().includes(r))))}))}},dt=e=>(t,n)=>!e.some((e=>{let[a,r]=e;return st[a]?!st[a](t,r,n):!(e=>(t,n,a)=>{var r,l;if(!a.columns[e]||"facet"===(null===(r=a.columns[e])||void 0===r?void 0:r.filter))return!0;if("range"===(null===(l=a.columns[e])||void 0===l?void 0:l.filter)&&null!==n&&void 0!==n&&n.length)return t[e]>=n[0]&&t[e]<=n[1];if(!Array.isArray(n)||!n.length)return!0;let o=t[e];return"number"===typeof o?(o=parseFloat(o),n.some((e=>parseFloat(e)===o))):n.includes(o)})(a)(t,r,n)})),ut=(0,ct.K0)({key:"fnDataFiltered",get:e=>{let{extraKey:t,roomId:n,omit:a,keepAll:r,fn:l}=e;return e=>{var c;let{get:s}=e;const{columnVisibility:d,data:u,columns:m,aggregations:f,requiredParams:p}=s((0,i.WB)({id:l}));if(f||!u)return u||[];const g=s((0,o.GA)({key:n,extraKey:t,merge:!1})),h=a?a.split(":::"):[],v=Object.keys(g).reduce(((e,t)=>(p.some((e=>e.id===t))||h.includes(t)||e.push([t,g[t]]),e)),[]);if(!v.length)return u;const y=dt(v),x=null===(c=s((0,o.GA)({key:n,extraKey:"".concat(l,"Settings"),flavour:"val"})))||void 0===c?void 0:c.sortColumn;return r?u.map((e=>y(e,{columnVisibility:d,columns:m,sortColumn:x})?e:{...e,hidden:!0})):u.filter((e=>y(e,{columnVisibility:d,columns:m,sortColumn:x})))}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),mt=function(){let{extraKey:e="fn",omit:t,keepAll:n=!1}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=(0,i.Ak)({extraKey:e}),r=(0,be.ID)();return(0,ct.vc)(ut({extraKey:"".concat(a||e,"Filters"),fn:a,roomId:r,omit:t,keepAll:n}))};var ft=n(94944);const pt=(0,U.default)(f.Table).withConfig({displayName:"table__StyledTable",componentId:"sc-1n2s6gl-0"})(["*{font-family:monospace;letter-spacing:0.09px;}"]),gt=(0,a.memo)(pt,((e,t)=>(0,ce.Ay)(e,t,{keep:["columnVisibility","data","fnQuery","sortBy","virtualizeOptions"]})));let ht;const vt=(0,a.memo)((0,a.forwardRef)(((e,t)=>{let{"data-testid":n,paramsKey:r,refetch:l,onShowInfo:c}=e;const s=(0,i.Ol)(r),{columns:d,sortedColumns:u,sortColumn:m,sortDirection:p,columnVisibility:g,pinnedColumns:h,loadingMore:v,retentionWarning:y,groupByColumns:x,pagination:E,merged:b,latestDirection:C,hasDirection:w,tail:k,aggregations:A}=s,I=(0,a.useRef)();I.current=s;const[F,T]=(0,a.useState)(""),S=mt(r),K="feed"===r.extraKey,P=((e,t,n)=>{let{groupBy:r}=n;return(0,a.useMemo)((()=>(e||[]).reduce(((e,n)=>{const l=t[n];if(!l)return e;const{displayName:o,name:i,sortable:c,sticky:s,units:d,max:u,dummy:m,cellSize:p,valueOptions:g,summary:h,type:v,visualization:y,wrap:x=!1,...E}=l;return m?e:[...e,{...E,displayName:o,id:o,accessorFn:e=>e[n],cell:e=>{let{getValue:t,row:n,cell:l}=e;const o=l.getIsAggregated(),c=t(),s=o&&(it[h]===it.count||it[h]===it.uniqueCount||!it[h]);return a.createElement(ot,{description:i,data:n.original,maxValue:u,padding:n.depth>0?[0,0,0,2.5*n.depth]:[0],strong:o,value:s?"".concat(c,"x"):c,tooltipValue:s?"".concat(c," ").concat(1===c?"item":"items"," associated with ").concat(r," grouping"):c,valueOptions:s?{...g,transform:"none"}:g,type:v,visualization:y,wrap:x})},header:a.createElement(f.Flex,{column:!0},a.createElement(f.TextSmall,{strong:!0},o),d&&a.createElement(f.TextMicro,null,"(",d,")")),enableSorting:c,aggregationFn:it[h]||it.count,...p,meta:{...o!==i?{tooltip:i}:{}}}]}),[])),[e,t,r])})(u,d,{...K?{}:{groupBy:F}}),[z,L]=(0,o.r$)("colSizing",{defaultValue:{},flavour:"obj",...r}),[N,M]=(0,i.Be)({key:"sortColumn",flavour:"val",...r}),[O,R]=(0,i.Be)({key:"sortDirection",flavour:"val",...r}),[B,D]=(0,i.ZY)({key:"query",flavour:"val",...r}),_=(0,i.E)({key:"columnVisibility",...r}),j=(0,a.useMemo)((()=>N||m?[{id:N||m,desc:"descending"===(O||p)}]:[]),[m,p,N,O]),W=(0,a.useCallback)((e=>{const[t]=e,n=null!==t&&void 0!==t&&t.desc?p||"descending":"ascending";M((null===t||void 0===t?void 0:t.id)||m),R(n)}),[N,m,p]),V=(0,a.useMemo)((()=>({left:h})),[h]),q=(0,a.useRef)();q.current=v;const{pause:H}=(0,ft.A)(),G=(0,a.useCallback)((e=>{let{data:t}=e;return c({data:t.json||t,columns:d})}),[]),U=(0,a.useRef)(),Z=(0,a.useRef)(),X=(0,a.useRef)(),Y=(0,a.useRef)(0),$=(0,a.useRef)(0),J=(0,a.useRef)(b);(0,a.useLayoutEffect)((()=>{var e;b&&!k||null===(e=Z.current)||void 0===e||!e.scrollOffset||Z.current.scrollToOffset(0)}),[b,k]),(0,a.useLayoutEffect)((()=>{clearTimeout(ht),v||(ht=setTimeout((()=>{J.current=!1}),200))}),[v]),(0,a.useLayoutEffect)((()=>{if(!E||!w||k)return;if($.current!==S.length&&"forward"===C){var e;const n=Z.current.scrollOffset;let a=0,r=Z.current.getMeasurements().find(((e,t)=>0!==t&&(a+=e.size,a>n)));var t;if(!r)r=null===(t=Z.current.getMeasurements())||void 0===t?void 0:t[1];if(!r)return;X.current={...S[S.length-$.current+(r.index-1)],headerSize:(null===(e=Z.current.getMeasurements())||void 0===e||null===(e=e[0])||void 0===e?void 0:e.size)||0},b&&!k?Y.current=0:X.current=null}const n=()=>{if(X.current&&Y.current<5){var e,t;Y.current=Y.current+1,$.current=S.length;const a=null===(e=X.current)||void 0===e||null===(e=e[E.column])||void 0===e||null===(t=e.toString)||void 0===t?void 0:t.call(e);let r=Z.current.getMeasurements().find((e=>e.key.replace(/-\d+?/,"")===a));if(!r){const e=S.findIndex((e=>e[E.column]==a));r=Z.current.getMeasurements()[e+1]}return r?(Z.current.scrollToOffset(r.start-X.current.headerSize),void setTimeout(n)):(Y.current=5,void(X.current=null))}Y.current=5,X.current=null};n(),$.current=S.length}),[S]);const Q=(0,a.useCallback)((function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"backward";k||q.current||I.current.hasDirection&&(J.current||("backward"!==e||I.current.hasNextPage||I.current.anchorBefore)&&("forward"!==e||I.current.hasPrevPage||I.current.anchorAfter)&&("forward"===e&&(J.current=!0),l({anchor:"backward"===e?I.current.anchorBefore:I.current.anchorAfter,merge:!0,direction:e,data_only:!0})))}),[k]),ee=(0,a.useCallback)((e=>{var t;return"undefined"!==typeof(null===(t=I.current.data[e])||void 0===t?void 0:t[null===E||void 0===E?void 0:E.column])?"".concat(I.current.data[e][null===E||void 0===E?void 0:E.column],"-").concat(e):e}),[]),te=(0,a.useCallback)((()=>I.current.hasNextPage),[]),ne=(0,a.useCallback)((()=>I.current.hasPrevPage),[]),ae=(0,a.useCallback)((e=>{e.scrollOffset>10&&H()}),[H]),re=(0,a.useMemo)((()=>({overscan:15,getHasNextPage:te,getHasPrevPage:ne,loading:v,loadMore:Q,warning:y,virtualRef:Z,getItemKey:ee,onVirtualChange:ae})),[S,v]),le=(0,i.dP)();return a.createElement(gt,{groupByColumns:x,onClickRow:G,columnPinning:V,columnVisibility:g,data:S,dataColumns:P,dataGa:"functions",enableColumnPinning:!(null===h||void 0===h||!h.length),enableColumnVisibility:!K,enableCustomSearch:!!A,enableResizing:!0,enableSorting:!K,globalFilter:K?null:B,onColumnVisibilityChange:_,onColumnSizingChange:L,columnSizing:z,onSearch:K?null:D,onGroupByChange:K?null:T,onSortingChange:K?null:W,sortBy:j,testPrefix:n,virtualizeOptions:re,ref:t,tableRef:U,onHoverCell:(0,a.useCallback)((function(){let{row:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};e||le(null),le(e)}),[])})})),(()=>!0)),yt=vt;n(9391);var xt=n(2404),Et=n.n(xt),bt=n(92138),Ct=n(99090),wt=n(65624),kt=n(61531),At=(n(9920),n(3949),n(88055)),It=n.n(At),Ft=n(64454);const Tt=90,St=110,Kt=160,Pt=1e3,zt={minSize:Tt,maxSize:Pt},Lt=50,Nt={uniqueKey:{size:Tt,downLimit:Tt-Lt,upLimit:Tt+Lt},bar:{size:Tt,downLimit:Tt-Lt,upLimit:Tt+Lt},value:{size:St,downLimit:St-Lt,upLimit:St+Lt},feedTemplate:{size:Pt,downLimit:Pt-Lt,upLimit:Pt+Lt,minSize:230,maxSize:5e3,fullWidth:!0},pill:{size:St,downLimit:St-Lt,upLimit:St+Lt},timestamp:{size:Kt,downLimit:Kt-Lt,upLimit:Kt+Lt},datetime:{size:Kt,downLimit:Kt-Lt,upLimit:Kt+Lt}},Mt=(e,t)=>{e((e=>{var n,a,r;const{updateEvery:l,merge:o,reset:i,groupBy:c,items:s,direction:d,dataOnly:u,showIds:m=!1!==e.showIds,facetsDelta:f,histogramDelta:p,itemsDelta:g,aggregatedView:h,...v}=t;let y=i?t.columns:u?It()(e.columns):It()(o?(0,Ft.A)(t.columns,e.columns):t.columns),x=i?{}:{...e.sortedColumnsObj}||{};const E=v.hasHistory||e.hasHistory,b=u?e.pagination:t.pagination;let C=Object.keys(y).reduce(((t,n)=>{const a=y[n],{id:r,name:l,sticky:o,visible:c,index:s}=a;return a.displayName=m?r||n:l,t.columnVisibility[a.displayName]=!i&&e.loaded?!!t.columnVisibility[a.displayName]:c,!o||!i&&e.loaded||t.pinnedColumns.push(a.displayName),a.valueOptions=a.valueOptions||{},a.cellSize=(e=>{let{visualization:t,type:n,fullWidth:a=!1,uniqueKey:r}=e;return{...zt,fullWidth:a,...(r&&"timestamp"!==n?Nt.uniqueKey:Nt[n])||Nt[t]||{minSize:Tt,maxSize:Pt,size:St,downLimit:St-Lt,upLimit:St+Lt}}})(a),a.sortable=a.sortable&&!(null!==b&&void 0!==b&&b.enabled),"undefined"!==typeof s&&(x[x[s]!==n?a.displayName:s]=n),t}),{columnVisibility:i?{}:{...e.columnVisibility||{}},pinnedColumns:i?[]:[...e.pinnedColumns||[]]}),w=i?t.aggregations:o?It()(e.aggregations):t.aggregations||It()(e.aggregations);f&&f.forEach((e=>{const t=w.find((t=>t.id===e.id));t?e.options.forEach((e=>{if(isNaN(e.count))return;const n=t.options.find((t=>t.id===e.id));n?n.count=(n.count||0)+((null===e||void 0===e?void 0:e.count)||0):t.options.push(e)})):w.push(e)}));let k=i?t.histogram:o?It()(e.histogram):t.histogram||It()(e.histogram);if(p){var A;if(!i&&(null===(A=e.histogram)||void 0===A?void 0:A.id)!==(null===p||void 0===p?void 0:p.id))return;const t=k.chart.result.labels;if((0,ce.Ay)(t,p.chart.result.labels))k.chart.result.data=[...k.chart.result.data,...p.chart.result.data];else{const e=((e,t)=>{const n=t.reduce(((t,n,a)=>{const r=e.findIndex((e=>n===e));return-1===r||(t[r]=a),t}),{});return t=>e.reduce(((e,a,r)=>("undefined"===typeof n[r]?e.push([0,0,0]):e.push(t[n[r]]),e)),[])})(t,p.chart.result.labels);p.chart.result.data.forEach((t=>k.chart.result.data.push(e(t))))}}let I={};Object.keys(t.columns).forEach((e=>{I[t.columns[e].index]=e}));let F=t.data.map((e=>e.reduce(((e,t,n)=>{const a=I[n];return a?(e[a]=t,e):e}),{}))),T=i?F:o?"forward"===d?F.concat(e.data||[]):(e.data||[]).concat(F):F;v.tail&&T.slice(0,500);let S=null!==b&&void 0!==b&&b.enabled&&b.column&&Array.isArray(T)&&null!==(n=T[T.length-1])&&void 0!==n&&n[b.column]&&null!==(a=T[0])&&void 0!==a&&a[b.column]?{anchorBefore:T[T.length-1][b.column],anchorAfter:T[0][b.column],anchorUnits:b.units}:{};const K=t.defaultSortColumn||e.defaultSortColumn,P=(i?t.acceptedParams||[]:t.acceptedParams||e.acceptedParams).includes("direction");let z=0;h&&(z=T.reduce(((e,t)=>e+(t[h.column]||0)),0));const L={...e||kt.zb.table,...v,groupByColumns:i?c:c||e.groupByColumns,data:T,columns:y,updatedAt:(new Date).getTime(),updateEvery:l||e.updateEvery,sortedColumns:Object.values(x),sortColumn:null===(r=y[K])||void 0===r?void 0:r.displayName,sortDirection:K?y[K].sort:"descending",...C,...S,totalSize:v.totalSize||(null===s||void 0===s?void 0:s.matched)||(!u||"forward"===d||F.length||t.partial?(e.totalSize||0)+(g?g.matched:0):0),actualSize:z,...!!E&&{hasNextPage:!(u&&"forward"!==d&&!F.length&&!t.partial)&&(!s||0!==s.after)},...!!E&&P&&{hasPrevPage:!(u&&"forward"===d&&!F.length&&!t.partial)&&(!(!v.tail&&s)||0!==s.before)},offset:o?null:(null===s||void 0===s?void 0:s.before)||null,loading:!1,loaded:!0,showIds:m,merged:o,latestDirection:d,hasHistory:E,hasDirection:P,aggregations:w,histogram:k,sortedColumnsObj:x,filtersToRefresh:(v.requiredParams||[]).reduce(((e,t)=>t.uniqueView?{...e,[t.id]:!0}:e),{}),reset:i,aggregatedView:h};return x=null,k=null,w=null,S=null,C=null,y=null,I=null,F=null,T=null,L}))},Ot={table:Mt,log:Mt,default:(e,t)=>{const{data:n,type:a,updateEvery:r,...l}=t;e({...kt.zb.default,...l,data:n,updateEvery:r,type:a,loading:!1,loaded:!0})}},Rt={default:{fetch:wt.ph,isEnabled:e=>{let{nodeIds:t,fn:n,after:a}=e;return!(null===t||void 0===t||!t.length)&&!!n&&!!a}},feed:{fetch:wt.d$,isEnabled:e=>{let{roomId:t,fn:n}=e;return!!t&&!!n}}},Bt=e=>{const{after:t,before:n,highlight:r}=(0,o.rW)();return(0,a.useMemo)((()=>t>0?[()=>({after:t,before:n}),t,n,e||r.before]:[()=>{const e=Date.now();return{after:e+1e3*t,before:e}},t,n,e||r.before]),[t,n,null===r||void 0===r?void 0:r.before,e])};var Dt=n(18061);const _t={default:{fetch:wt.JJ,isEnabled:e=>{let{nodeIds:t,fn:n}=e;return!(null===t||void 0===t||!t.length)&&!!n}},feed:{fetch:()=>Promise.resolve({data:{type:"table"}}),isEnabled:()=>!0}},jt=(e,t,n)=>{const a=(0,h.vt)(),r=(0,g.yN)(e),[{loading:l}]=(0,A.f7)(),{fetch:o,isEnabled:c}=_t[t]||_t.default,[{infoLoaded:s},d]=(0,i.WH)(n),[u]=Bt(),{loaded:m,error:f}=(0,i.vx)(n.extraKey);return(0,Dt.A)((()=>({enabled:!s&&m&&!f&&!l&&c({nodeIds:e,fn:t}),fetch:()=>o({spaceId:a,cloudNodeIds:r,nodeIds:e,fn:t,after:u().after,before:u().before}),onFail:e=>{var t,n;null===(t=e)||void 0===t||null===(t=t.response)||void 0===t||!t.status||400!==e.response.status&&304!==e.response.status?(null!==(n=e)&&void 0!==n&&null!==(n=n.response)&&void 0!==n&&n.data&&(e=e.response.data),d({error:e})):d((e=>({...e,infoLoaded:!0})))},onSuccess:function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return d((t=>({...kt.zb[e.type]||kt.zb.default,...t,hasHistory:e.hasHistory||!1,acceptedParams:e.acceptedParams||[],requiredParams:e.requiredParams||[],type:e.type,help:e.help,infoLoaded:!0})))}})),[s,m,t,l,!(null!==e&&void 0!==e&&e.length),null===e||void 0===e?void 0:e[0]]),f};var Wt=n(44731),Vt=n(51891),qt=n(3839),Ht=n(66118);const Gt=(0,ct.eU)({key:"chartjs",default:{navigation:"pan"}}),Ut=(0,ct.K0)({key:"feedChartState",get:e=>{let{key:t}=e;return e=>{let{get:n}=e;const a=n(Gt);return t?a[t]:a}},set:e=>{let{key:t}=e;return(e,n)=>{let{set:a}=e;a(Gt,(e=>t?{...e,[t]:n}:{...e,...n}))}}}),Zt=e=>(0,ct.vc)(Ut({key:e})),Xt=e=>(0,ct.lZ)(Ut({key:e}));var Yt=n(31225);n(77518);const $t=(0,U.default)(f.Button).attrs((e=>{let{active:t}=e;return{flavour:"borderless",iconColor:t?"textDescription":"border",iconSize:"small",padding:[.5],small:!0}})).withConfig({displayName:"styled__Button",componentId:"sc-8s0lqd-0"})(["&&{background-color:",";height:20px;width:20px;&:hover{background-color:",";.button-icon__color{fill:",";}}}"],(e=>{let{theme:t,active:n}=e;return n?(0,f.getColor)("borderSecondary")({theme:t}):"initial"}),(e=>{let{theme:t}=e;return(0,f.getColor)("borderSecondary")({theme:t})}),(e=>{let{theme:t}=e;return(0,f.getColor)("textDescription")({theme:t})})),Jt=(0,U.default)(f.Flex).attrs({padding:[.5],gap:1,round:!0,border:{side:"all",color:"borderSecondary"}}).withConfig({displayName:"styled__Container",componentId:"sc-8s0lqd-1"})(["position:absolute;top:18px;right:8px;background:",";&:hover{background:",";}"],(0,f.getRgbColor)("elementBackground",.5),(0,f.getColor)("elementBackground")),Qt=(0,a.forwardRef)(((e,t)=>{let{testIdPrefix:n,...r}=e;const[{after:l,before:i},c]=(0,o.N9)(),[s,d]=[Zt(u="navigation"),Xt(u)];var u;const f=(0,a.useCallback)((e=>{let t=l,n=i;if(l<0){const e=Date.now();t=e+1e3*l,n=e}const a=e*Math.round((n-t)/4);c({after:t+a,before:n-a})}),[l,i]),p=l>0&&Math.abs(i-l)<2e3||-1===l;return a.createElement(Jt,(0,m.A)({"data-testid":"chartToolbox","data-track":"chart-toolbox::hover-chart::".concat(n),"data-toolbox":"true",ref:t},r),a.createElement($t,{active:"pan"===s,"data-testid":"chartToolbox-pan","data-track":"chart-toolbox::pan-click::".concat(n),icon:"panTool",onClick:()=>d("pan"),title:"Pan"}),a.createElement($t,{active:"highlight"===s,"data-testid":"chartToolbox-highlight","data-track":"chart-toolbox::highlight-click::".concat(n),icon:"highlightArea",onClick:()=>d("highlight"),title:"Highlight"}),a.createElement($t,{active:"zoom"===s,"data-testid":"chartToolbox-zoom","data-track":"chart-toolbox::zoom-click::".concat(n),disabled:p,icon:"dragHorizontal",onClick:()=>{d("zoom")},title:"Select and zoom"}),a.createElement($t,{"data-testid":"chartToolbox-zoomIn","data-track":"chart-toolbox::zoom-in-click::".concat(n),disabled:p,icon:"zoomIn",onClick:()=>{f(1)},title:"Zoom in"}),a.createElement($t,{"data-testid":"chartToolbox-zoomOut","data-track":"chart-toolbox::zoom-out-click::".concat(n),icon:"zoomOut",onClick:()=>{f(-1)},title:"Zoom out"}),a.createElement($t,{"data-testid":"chartToolbox-zoomReset","data-track":"chart-toolbox::zoom-reset-click::".concat(n),disabled:-900===l&&0===i,icon:"zoomReset",onClick:()=>{c({after:-900,before:0})},title:"Zoom reset"}))})),en={after:null,before:null};Ht.t1.register(Vt.A,Ht.A6,Ht.E8,Ht.PP,Ht.s$,Ht.kc,Ht.UA,Ht.m_,qt.Ay);const tn=e=>{var t;let{paramsKey:n,testIdPrefix:r}=e;const l=(0,a.useRef)(),c=(0,a.useContext)(U.ThemeContext),[s,d]=(0,o.N9)(),[u,m]=(0,a.useState)([]),[p,g]=(0,a.useState)(en),[h,v]=(0,a.useState)(!1),[y,x]=(0,a.useState)(!1),[E,b]=(0,a.useState)([]),[C,w]=(0,a.useState)(null),[k,A]=(0,a.useState)("second"),[I,,F,T]=(0,S.A)(!1),{aggregations:K}=(0,i.Ol)(n),{navigation:P}=Zt(),{localeTimeString:z,localeDateString:L}=(0,J.$j)(),[N]=Bt(),M=N.before-N.after,O=(0,a.useMemo)((()=>{const e=Date.now();return{min:N.after-M,max:0===s.before?N.before:e{var e;if(null===K||void 0===K||null===(e=K.event_histogram)||void 0===e||null===(e=e.hits)||void 0===e||!e.buckets)return;const t=K.event_histogram.hits.buckets.reduce(((e,t)=>{let{key:n,doc_count:a}=t;return{data:[...e.data,a],labels:[...e.labels,n]}}),{data:[],labels:[]}),n=(0,se.Bp)(s.after,s.before)||{};m(t.data),b(t.labels),w(null),A(n.unit)}),[null===K||void 0===K||null===(t=K.event_histogram)||void 0===t||null===(t=t.hits)||void 0===t?void 0:t.buckets]);const B=(0,a.useMemo)((()=>({animation:!1,interaction:{axis:"x"},maintainAspectRatio:!1,plugins:{annotation:{annotations:{...p.after&&p.before?{annotation:{type:"box",backgroundColor:"rgba(128,128,128,0.3)",borderDash:[1,5,1,1],borderWidth:1,xMax:p.before,xMin:p.after,xScaleID:"x"}}:{}}},legend:{align:"start",display:!1,position:"bottom"},zoom:{limits:{x:O},pan:{enabled:"pan"===P&&!h,mode:"x",onPanStart:e=>{let{chart:t}=e;t.canvas.style.cursor="grabbing"},onPanComplete:e=>{let{chart:t}=e;t.canvas.style.cursor="default";const{min:n,max:a}=t.scales.x;w({min:Math.round(n),max:Math.round(a)}),d({after:Math.round(n),before:Math.round(a)}),t.update()},threshold:1},zoom:{drag:{enabled:"zoom"===P||h},mode:"x",onZoomStart:e=>{let{chart:t}=e;t.canvas.style.cursor="col-resize"},onZoomComplete:e=>{let{chart:t}=e;if("zoom"!==P&&!h)return;t.canvas.style.cursor="default";const{min:n,max:a}=t.scales.x;d({after:Math.round(n),before:Math.round(a)}),v(!1)}}}},responsive:!0,scales:{x:{type:"time",grid:{drawBorder:!0,drawOnChartArea:!1,drawTicks:!0,offset:!1},parsing:!1,ticks:{color:(0,f.getColor)("textLite")({theme:c}),font:{size:11},autoSkip:!0,maxTicksLimit:20,..."minute"===k&&M>=1e3*Yt.wD?{callback:e=>{const t=L(e,{month:"2-digit",day:"2-digit",year:"numeric",long:!1,dateStyle:void 0}).split("/").reverse().slice(1).join("-"),n=z(e,{secs:!1});return"".concat(t," ").concat(n)}}:{}},time:{displayFormats:{week:"yyyy-MM-dd",day:"MM-dd",hour:"MM-dd HH:mm",minute:"HH:mm",second:"HH:mm:ss"},tooltipFormat:"yyyy-MM-dd HH:mm:ss",unit:k||"second"},...C?{min:C.min,max:C.max}:{}},y:{beginAtZero:!0,ticks:{precision:0,color:(0,f.getColor)("textLite")({theme:c}),font:{size:11}}}}})),[p,M,h,P,C,w,k,O]);if((0,a.useEffect)((()=>{const e=e=>{"Shift"===e.key&&(e.preventDefault(),v(!0))};return document.addEventListener("keydown",e),()=>{document.removeEventListener("keydown",e)}}),[]),!u.length)return null;return a.createElement(f.Flex,{position:"relative",height:{min:50},onMouseEnter:F,onMouseLeave:T},I&&a.createElement(Qt,{testIdPrefix:r}),a.createElement(Wt.yP,{data:{datasets:R,labels:E},onMouseDown:e=>{if(e.preventDefault(),"highlight"!==P)return;x(!0);const t=e.nativeEvent.offsetX||e.nativeEvent.layerX,n=l.current;n.canvas.style.cursor="crosshair",g({after:Math.round(n.scales.x.getValueForPixel(t)),before:null})},onMouseMove:e=>{if(e.preventDefault(),"highlight"!==P||!y)return;const t=e.nativeEvent.offsetX||e.nativeEvent.layerX,n=l.current;g((e=>({...e,before:Math.round(n.scales.x.getValueForPixel(t))})))},onMouseUp:e=>{e.preventDefault(),x(!1);const t=e.nativeEvent.offsetX||e.nativeEvent.layerX,n=l.current;n.canvas.style.cursor="default";p.after===Math.round(n.scales.x.getValueForPixel(t))?g(en):g(p)},options:B,ref:l}))};var nn=n(80158);const an={none:!0,facet:!0},rn=(e,t)=>{const{loaded:n,loading:r,columns:l,sortedColumns:o}=(0,i.Ol)({extraKey:e}),c=(0,a.useRef)([]);return(0,a.useMemo)((()=>(!n||r||(c.current=(o||[]).reduce(((e,n)=>l[n]&&l[n].filter&&!an[l[n].filter]?[...e,t(n,l)]:e),[])),c.current)),[n,r])};n(74648),n(23215),n(25509),n(65223),n(60321),n(41927),n(11632),n(64377),n(66771),n(12516),n(68931),n(52514),n(35694),n(52774),n(49536),n(21926),n(94483),n(16215);const ln={sum:e=>e.reduce(((e,t)=>e+("number"===typeof t?t:0)),0),min:e=>{let t;return e.forEach((e=>{null!=e&&(t>e||void 0===t&&e>=e)&&(t=e)})),t},max:e=>{let t;return e.forEach((e=>{null!=e&&(t=e)&&(t=e)})),t},extent:e=>{let t,n;return e.forEach((e=>{null!=e&&(void 0===t?e>=e&&(t=n=e):(t>e&&(t=e),n{let t=0,n=0;if(e.forEach((e=>{null!=e&&(e=+e)>=e&&(++t,n+=e)})),t)return n/t},median:e=>{if(!e.length)return;if(t=e,!Array.isArray(t)||!t.every((e=>"number"===typeof e)))return;var t;if(1===e.length)return e[0];const n=Math.floor(e.length/2),a=e.sort(((e,t)=>e-t));return e.length%2!==0?a[n]:(a[n-1]+a[n])/2},unique:e=>Array.from(new Set(e.map((e=>e))).values()),uniqueCount:e=>new Set(e.map((e=>e))).size,count:e=>e.length},on=e=>({label:e,value:e}),cn=(0,a.memo)((e=>{let{paramsKey:t,initData:n,initFilter:r}=e;const{charts:l,loaded:o,columns:c}=(0,i.Ol)(t),s=mt(t),d=(0,a.useContext)(U.ThemeContext),u=Object.keys(l).map(on),m=rn(t.extraKey,on),[p,g]=(0,a.useState)(n),[h,v]=(0,a.useState)(r),y=(0,i.Ak)(t);(0,a.useEffect)((()=>{g(n)}),[n]),(0,a.useEffect)((()=>{v(r)}),[r]);const x=(0,a.useMemo)((()=>{if(!l[p])return{datasets:[],labels:[]};const e=l[p].columns.reduce(((e,t)=>{var n;const a=((e,t,n)=>e.reduce(((e,a)=>(e[a[n]]||(e[a[n]]={label:a[n],data:[]}),e[a[n]].data.push(a[t]),e)),{}))(s,t,h),r=(null===(n=c[t])||void 0===n?void 0:n.summary)||"count",l=ln[r];return Object.keys(a).forEach((n=>{const r=l(a[n].data);e[a[n].label]={...e[a[n].label],[t]:r,label:a[n].label,sortByValue:e[a[n].label]&&e[a[n].label].sortByValue>r?e[a[n].label].sortByValue:r}})),e}),{}),t=Object.values(e).sort(((e,t)=>t.sortByValue-e.sortByValue)).slice(0,15).map((e=>{let{sortByValue:t,...n}=e;return n}));return{datasets:l[p].columns.map(((e,n)=>({backgroundColor:Yt.b4[n],borderWidth:0,data:t.map((t=>t[e])),label:e,maxBarThickness:50,minBarLength:0}))),labels:t.map((e=>{let{label:t}=e;return t}))}}),[y,l,p,h,s]),E={animation:!1,maintainAspectRatio:!1,plugins:{legend:{position:"bottom",labels:{boxWidth:4,boxHeight:20,title:{color:(0,f.getColor)("textLite")({theme:d})}}}},responsive:!0,scales:{x:{stacked:!0,grid:{color:(0,f.getColor)("borderSecondary")({theme:d})},afterSetDimensions:e=>{e.maxHeight=50},ticks:{callback:(e,t)=>{let n=x.labels[t];if(!n)return;let a=0;for(;(null===(r=n)||void 0===r?void 0:r.length)>20;){var r;n=(0,nn.P3)(n,a),a+=1}return n}}},y:{stacked:!0,grid:{color:(0,f.getColor)("borderSecondary")({theme:d})},afterSetDimensions:e=>{e.maxWidth=50}}}},[b,C]=(0,I.useHovered)({},[o]),w=(0,F.A)();return(0,a.useEffect)((()=>{w(C)}),[C]),a.createElement(f.Flex,{column:!0,"data-testid":"functionsCharts",gap:3,width:"50%",background:"mainChartBg",border:{side:"all",color:"mainChartBorder"},padding:[4],round:.5},a.createElement(f.Flex,{alignItems:"center","data-testid":"functionsChart".concat(p,"-selects"),gap:2},a.createElement(f.Select,{"data-ga":"functions-chart-".concat(p,"::select-data::functions-view"),"data-testid":"functionsChart".concat(p,"-dataSelect"),isLoading:!o,onChange:e=>{let{value:t}=e;return g(t)},options:u,styles:{size:"tiny",minWidth:120},value:{label:p,value:p}}),a.createElement(f.Text,{"data-testid":"functionsChart".concat(p,"-perLabel"),color:"textLite"},"per"),a.createElement(f.Select,{"data-ga":"functions-chart-".concat(h,"::select-filter::functions-view"),"data-testid":"functionsChart".concat(h,"-filterSelect"),isLoading:!o,onChange:e=>{let{value:t}=e;return v(t)},options:m,styles:{size:"tiny",minWidth:120},value:{label:h,value:h}})),a.createElement(f.Flex,{position:"relative",height:{min:60},width:{min:"100%"},ref:b},a.createElement(Wt.yP,{data:x,options:E})))})),sn=cn;var dn=n(83465),un=n(75233),mn=n(19018),fn=n(5668);const pn={dark:{publicColor:"#FF9700",privateColor:"#0075A2",serverColor:"#00a44a",clientColor:"#DB162F",otherColor:"#3E4551",appFontColor:"#AEBBBB",appFontFamily:"monospace",appFontSize:"12px",appFontWeight:"regular",borderFontColor:"#EDF0F0",borderFontFamily:"monospace",borderFontSize:"14px",borderFontWeight:"bold"},light:{publicColor:"#BFA142",privateColor:"#197EA8",serverColor:"#339979",clientColor:"#CD6364",otherColor:"#AFB5BA",appFontColor:"#526161",appFontFamily:"monospace",appFontSize:"12px",appFontWeight:"regular",borderFontColor:"#5D7070",borderFontFamily:"monospace",borderFontSize:"14px",borderFontWeight:"bold"}},gn=(0,a.memo)((0,a.forwardRef)(((e,t)=>{let{data:n,width:r,height:l,processing:o}=e;const i=(0,a.useRef)({}),c=(0,fn.xd)("theme"),s=(0,bt.A)(n),d=(0,bt.A)(r),u=(0,bt.A)(l),m=(0,bt.A)(o),p=(0,a.useRef)({}),g=function(){let e=arguments.length>0&&void 0!==arguments[0]&&arguments[0];const t=!e&&(0,ce.Ay)(n,s);p.current=t?p.current:n.reduce(((e,t)=>(e[t.Process]||(e[t.Process]={listen:0,inbound:0,outbound:0,local:0,private:0,public:0,total:0,name:t.Process}),e[t.Process].total+=t.Count,"listen"===t.Direction?e[t.Process].listen+=t.Count:"local"===t.Direction?e[t.Process].local+=t.Count:"inbound"===t.Direction?e[t.Process].inbound+=t.Count:"outbound"===t.Direction&&(e[t.Process].outbound+=t.Count),"public"===t.RemoteAddressSpace?e[t.Process].public+=t.Count:"private"===t.RemoteAddressSpace&&(e[t.Process].private+=t.Count),e)),{});function a(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1;if(7!==e.length||"#"!==e[0])throw new Error("Invalid hex color format");const n=parseInt(e.slice(1,3),16),a=parseInt(e.slice(3,5),16),r=parseInt(e.slice(5,7),16);return"rgba(".concat(n,", ").concat(a,", ").concat(r,", ").concat(t,")")}function o(e,t,n,a,r){const l=t/2,o=n/2;!function(e){e.selectAll(".app").each((function(e){e&&(i.current[e.name]={x:e.x,y:e.y})}))}(e);const c=mn.scaleOrdinal().domain(["public","private","listenInbound","outbound","others"]).range([r.publicColor,r.privateColor,r.serverColor,r.clientColor,r.otherColor]),s=mn.pie().value((e=>e.value)),d=mn.arc(),g=e.selectAll(".app").data(Object.values(p.current),(e=>e.name));g.exit().transition().style("opacity",0).remove();const h=g.enter().append("g").attr("class","app").attr("transform",(e=>{const t=i.current[e.name];return"translate(".concat((null===t||void 0===t?void 0:t.x)||l,", ").concat((null===t||void 0===t?void 0:t.x)||o,")")}));h.each((function(e){const t=mn.select(this),n=s(e.d3.pie),a=e.d3.size;t.selectAll("path").data(n).enter().append("path").transition().attr("fill",((e,t)=>c(t))),t.append("text").text((e=>e.name)).attr("text-anchor","middle").attr("y",a+10).style("font-family",r.appFontFamily).style("font-size",r.appFontSize).style("font-weight",r.appFontWeight).style("fill",r.appFontColor)}));const v=h.merge(g);return v.each((function(e){const t=mn.select(this),n=i.current[e.name]||function(e){return e.d3.isListener?{x:e.d3.x,y:e.d3.y}:{x:l,y:o}}(e);e.x=n.x,e.y=n.y,t.selectAll("path").data(s(e.d3.pie)).transition().attr("d",d.innerRadius(0).outerRadius(e.d3.size)),t.select("text").transition().attr("y",e.d3.size+10)})),v.call(mn.drag().on("start",u).on("drag",m).on("end",f)),v}let d;function u(e,t){e.active||d.alphaTarget(1).restart(),t.fx=t.x,t.fy=t.y}function m(e,t){t.fx=e.x,t.fy=e.y}function f(e,t){e.active||d.alphaTarget(0),t.fx=null,t.fy=null}!function(n,i){let c=mn.select("#d3-canvas").select("svg");((e,n,a)=>{if(t)return;const r=e/2-a,l=n/2-a,o=Math.min(2*r/3-a,2*l/3-a,Math.max(5,Math.min(e,n)/Object.keys(p.current).length)+13),i=Object.values(p.current),c={total:mn.max(i,(e=>e.total)),local:mn.max(i,(e=>e.local)),listen:mn.max(i,(e=>e.listen)),private:mn.max(i,(e=>e.private)),public:mn.max(i,(e=>e.public)),inbound:mn.max(i,(e=>e.inbound)),outbound:mn.max(i,(e=>e.outbound))},s=mn.scaleLog().domain([1,c.total]).range([13,o]).clamp(!0);let d=0,u=0,m=0,f=n-a;i.forEach((t=>{const o=mn.scaleLog().domain([1,t.total+1]).range([0,r-a]),i=mn.scaleLog().domain([1,t.total+1]).range([0,r-a]),g=mn.scaleLog().domain([1,t.total+1]).range([0,l-a]),h=mn.scaleLog().domain([1,t.total+1]).range([0,l-a]);p.current[t.name].forces={total:t.total/c.total,local:t.local/c.local,listen:t.listen/c.listen,private:t.private/c.private,public:t.public/c.public,inbound:t.inbound/c.inbound,outbound:t.outbound/c.outbound},p.current[t.name].pos={right:o(t.public+1),left:i(t.private+1),top:g(t.outbound+1),bottom:h((t.listen+t.inbound)/2+1)};const v=t.total-(t.public+t.private+t.listen+t.inbound+t.outbound);let y=a+r+p.current[t.name].pos.right-p.current[t.name].pos.left,x=a+l+p.current[t.name].pos.bottom-p.current[t.name].pos.top,E=s(t.total);t.listen===t.total&&(E=10,m===u&&50*d>2*e/3&&(d=0,m=0,u=0,f-=80),d?m>=u?(u++,y=e/2+50*u,x=f-E-(u%2===0?0:40)):(m++,y=e/2-50*m,x=f-E-(m%2===0?0:40)):(y=e/2,x=f-E),d++),p.current[t.name].d3={isListener:t.listen===t.total,x:y,y:x,size:E,pie:[{value:t.public},{value:t.private},{value:t.listen+t.inbound},{value:t.outbound},{value:v>0?v:0}]},p.current[t.name].d3.x-p.current[t.name].d3.size/2e&&(p.current[t.name].d3.x=e-2*p.current[t.name].d3.size),p.current[t.name].d3.y-p.current[t.name].d3.size/2n&&(p.current[t.name].d3.y=n-2*p.current[t.name].d3.size)}))})(r,l,n),(c.empty()||e)&&(c=c.empty()?mn.select("#d3-canvas").append("svg").attr("width",r).attr("height",l):c.attr("width",r).attr("height",l),function(e,t,n,r,l){mn.select("#d3-canvas").selectAll("svg > *").remove();const o=n/2,i=e.append("defs").append("linearGradient").attr("id","clientsGradient").attr("x1","0%").attr("y1","0%").attr("x2","0%").attr("y2","100%");i.append("stop").attr("offset","0%").style("stop-color",a(l.clientColor,1)),i.append("stop").attr("offset","100%").style("stop-color",a(l.clientColor,0));const c=e.append("g").attr("class","topRect");c.append("rect").attr("x",0).attr("y",0).attr("width","100%").attr("height",r/2).style("fill","url(#clientsGradient)"),c.append("text").text("Clients").attr("x","50%").attr("y",12).attr("text-anchor","middle").style("font-family",l.borderFontFamily).style("font-size",l.borderFontSize).style("font-weight",l.borderFontWeight).style("fill",l.borderFontColor);const s=e.append("defs").append("linearGradient").attr("id","serversGradient").attr("x1","0%").attr("y1","100%").attr("x2","0%").attr("y2","0%");s.append("stop").attr("offset","0%").style("stop-color",a(l.serverColor,1)),s.append("stop").attr("offset","100%").style("stop-color",a(l.serverColor,0));const d=e.append("g").attr("class","bottomRect");d.append("rect").attr("x",0).attr("y","100%").attr("width","100%").attr("height",r/2).attr("transform","translate(0, -".concat(r/2,")")).style("fill","url(#serversGradient)"),d.append("text").text("Servers").attr("x","50%").attr("y","100%").attr("text-anchor","middle").attr("transform","translate(0, -4)").style("font-family",l.borderFontFamily).style("font-size",l.borderFontSize).style("font-weight",l.borderFontWeight).style("fill",l.borderFontColor);const u=e.append("defs").append("linearGradient").attr("id","publicGradient").attr("x1","100%").attr("y1","0%").attr("x2","0%").attr("y2","0%");u.append("stop").attr("offset","0%").style("stop-color",a(l.publicColor,1)),u.append("stop").attr("offset","100%").style("stop-color",a(l.publicColor,0));const m=e.append("g").attr("class","rightRect");m.append("rect").attr("x","100%").attr("y",0).attr("transform","translate(-".concat(r/2,", 0)")).attr("width",r/2).attr("height","100%").style("fill","url(#publicGradient)"),m.append("text").text("Public").attr("x","100%").attr("y","50%").attr("text-anchor","middle").attr("dominant-baseline","middle").attr("transform","rotate(90, ".concat(t-r/4,", ").concat(o,")")).style("font-family",l.borderFontFamily).style("font-size",l.borderFontSize).style("font-weight",l.borderFontWeight).style("fill",l.borderFontColor);const f=e.append("defs").append("linearGradient").attr("id","privateGradient").attr("x1","0%").attr("y1","0%").attr("x2","100%").attr("y2","0%");f.append("stop").attr("offset","0%").style("stop-color",a(l.privateColor,1)),f.append("stop").attr("offset","100%").style("stop-color",a(l.privateColor,0));const p=e.append("g").attr("class","leftRect");p.append("rect").attr("x",0).attr("y",0).attr("width",r/2).attr("height","100%").style("fill","url(#privateGradient)"),p.append("text").text("Private").attr("x",r/2).attr("y","50%").attr("text-anchor","middle").attr("dominant-baseline","middle").attr("transform","rotate(-90, ".concat(r/2-10,", ").concat(o,")")).style("font-family",l.borderFontFamily).style("font-size",l.borderFontSize).style("font-weight",l.borderFontWeight).style("fill",l.borderFontColor)}(c,r,l,n,i));const s=o(c,r,l,0,i);d=mn.forceSimulation(Object.values(p.current)).force("x",mn.forceX((e=>e.d3.x)).strength((e=>e.d3.isListener?.2:.1))).force("y",mn.forceY((e=>e.d3.y)).strength((e=>e.d3.isListener?.2:.1))).force("collision",mn.forceCollide((e=>1.1*e.d3.size+15)).strength(1)).on("tick",(()=>{s.each((e=>{e.x>r-n?e.x=r-n:e.xl-n?e.y=l-n:e.y"translate(".concat(e.x,", ").concat(e.y,")")))}))}(40,pn[c]||pn.dark)};return(0,a.useLayoutEffect)((()=>{o||r&&l&&g(d!==r||u!==l||m!==o)}),[n,r,l,c,o]),a.createElement(f.Flex,{id:"d3-canvas",ref:t,width:"100%",height:"100%",flex:!0,round:!0,overflow:"hidden"})}))),hn={"network-viewer":gn},vn=(0,a.forwardRef)(((e,t)=>{let{id:n,resizeHandle:r,style:l,paramsKey:o,containerWidth:i,resizing:c,...s}=e;const d=mt(o),p=hn[n],[g,h]=(0,I.useHovered)({},[]),v=(0,F.A)();(0,a.useEffect)((()=>{v(h)}),[h]);const[y,{width:x,height:E}]=(0,u.A)();return a.createElement(f.Flex,{ref:(0,f.mergeRefs)(t,y),position:"relative",style:l},a.createElement(p,(0,m.A)({data:d,key:n,id:n,role:"graphics-object","aria-roledescription":"chart",ref:g,containerWidth:i,width:x,height:E,processing:c},s)),r)})),yn=(0,a.memo)((e=>{var t;let{paramsKey:n,customCharts:r,containerWidth:l}=e;const o=Object.values(r).filter((e=>!!hn[e.type])),i=null===(t=o[0])||void 0===t?void 0:t.type,[c,s]=(0,a.useState)(function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return[(0,dn.bP)(e,{left:0,top:0,width:12,height:9,id:e,...t,chartId:e,minWidth:12,maxWidth:12,minHeight:4,static:!1,dashboardable:!0})]}(i,{paramsKey:n,containerWidth:l}));return o.length?a.createElement(f.Flex,{column:!0,"data-testid":"functionsCharts",gap:3,width:"100%",background:"mainChartBg",round:.5},a.createElement(un.A,{rootId:n.extraKey,containerId:n.extraKey,onDragEnd:(e,t)=>{(0,dn.Q)(t),s(e)},items:c,containerWidth:l-40,Item:vn,rearrangeable:!1})):"No chart found"}));var xn=n(36196),En=n(28973),bn=n(13752),Cn=n(92815),wn=n(72582),kn=n(80925),An=n(22332),In=n(72982),Fn=n(6504);const Tn=e=>{let{fn:t,histograms:n,selectedId:r,disabled:l}=e;const i=(0,An.useChart)(),c=(0,be.ID)(),s=(0,a.useMemo)((()=>n.map((e=>{let{id:t,name:n}=e;return{value:t,label:n,"data-track":i.track("select-histogram-".concat(n))}}))),[n]),[d,u]=(0,o.N9)("histogram",{defaultValue:[r],key:c,extraKey:"".concat(t,"Filters"),flavour:"val"}),m=d[0];return a.createElement(In.A,{value:m,items:s,dropProps:{align:{top:"bottom",right:"right"},"data-toolbox":!0},onChange:e=>u([e]),"data-track":i.track("selectHistogram"),dropdownProps:{width:"150px"}},a.createElement(Fn.Button,{disabled:l},"Source"))},Sn=(0,a.memo)(Tn),Kn=e=>t=>"histogram::".concat(e.getAttribute("id"),"::").concat(t),Pn={width:"108px",height:"77px"},zn=e=>{let{nodeId:t,fn:n,paramsKey:r}=e;const{availableHistograms:l,histogram:o,help:c,pagination:s,data:u}=(0,i.Ol)(r),m=(0,i.nm)(),p=(0,kn.e)(),g=(0,a.useRef)();g.current=()=>Promise.resolve(null===o||void 0===o?void 0:o.chart);const[h,v]=(0,a.useMemo)((()=>{const e="".concat(t,"-").concat(n),a=p.makeChart({attributes:{id:e,info:c},getChart:()=>g.current(),makeTrack:Kn});return p.getRoot().appendChild(a),[a,e]}),[t,n]);(0,a.useEffect)((()=>h.trigger("fetch")),[o]);const[y,x]=(0,i.x9)();return(0,a.useEffect)((()=>{if(h)return(0,En.unregister)(h.on("highlightClick",(e=>x(e+1e3*o.chart.view.update_every))),h.on("highlightEnd",(()=>x(null))))}),[h,o]),(0,a.useEffect)((()=>{var e;return!!l&&h.updateAttributes({toolboxElements:[e=>{let{disabled:t}=e;return a.createElement(Sn,{histograms:l,fn:n,selectedId:o.id,disabled:t})},wn.default,bn.default,Cn.default],title:null===o||void 0===o||null===(e=o.chart)||void 0===e||null===(e=e.view)||void 0===e?void 0:e.title})}),[l]),(0,a.useEffect)((()=>{if(!m||!h||!s)return;const e=u[m],t=Math.floor((null===e||void 0===e?void 0:e[null===s||void 0===s?void 0:s.column])/1e6);t&&!isNaN(t)&&h.updateAttribute("hoverX",[1e3*Math.floor(t-t%o.chart.view.update_every),null])}),[u,o,m]),(0,a.useEffect)((()=>{y&&h&&s&&h.updateAttribute("clickX",[y,null])}),[y]),(0,a.useEffect)((()=>()=>h&&h.destroy()),[h]),a.createElement(f.Flex,{flex:!1,width:"100%",height:75},h?a.createElement(xn.A,{"data-chartid":v,chart:h,overflow:"hidden",hasFilters:!1}):a.createElement(d.A,{iconProps:Pn,title:"Loading chart..."}))},Ln={extraKey:"fn"},Nn=(0,a.memo)((e=>{let{margin:t,defaultCharts:n,hasCustom:r,...l}=e;const[o,i]=(0,S.A)(!r);return null!==n&&void 0!==n&&n.length?a.createElement(f.Flex,{column:!0},a.createElement(f.Flex,{alignItems:"center",justifyContent:"end",gap:1,onClick:i,cursor:"pointer"},a.createElement(f.TextMicro,{color:"textLite"},o?"Collapse":"Expand"," charts"),a.createElement(f.Icon,{name:"chevron_down",color:"textLite",rotate:o?0:3})),a.createElement(f.Collapsible,{open:o},a.createElement(f.Flex,{gap:4,margin:t},n.map(((e,t)=>{let[n,r]=e;return a.createElement(sn,(0,m.A)({key:t,initData:n,initFilter:r},l))}))))):null})),Mn=(0,a.memo)((e=>{let{margin:t,hasHistogram:n=!1,...r}=e;const[l,o]=(0,S.A)(!0);return n?a.createElement(f.Flex,{column:!0},a.createElement(f.Flex,{alignItems:"center",justifyContent:"end",gap:1,onClick:o,cursor:"pointer"},a.createElement(f.TextMicro,{color:"textLite"},l?"Collapse":"Expand"," histogram"),a.createElement(f.Icon,{name:"chevron_down",color:"textLite",rotate:l?0:3})),a.createElement(f.Collapsible,{open:l},a.createElement(f.Flex,{gap:4,margin:t},a.createElement(zn,r)))):null})),On=(0,a.memo)((e=>{let{margin:t,...n}=e;const[r,l]=(0,S.A)(!0);return n.customCharts?a.createElement(f.Flex,{column:!0},a.createElement(f.Flex,{alignItems:"center",justifyContent:"end",gap:1,onClick:l,cursor:"pointer"},a.createElement(f.TextMicro,{color:"textLite"},r?"Collapse":"Expand"," chart"),a.createElement(f.Icon,{name:"chevron_down",color:"textLite",rotate:r?0:3})),a.createElement(f.Collapsible,{open:r},a.createElement(f.Flex,{gap:4,margin:t},a.createElement(yn,n)))):null})),Rn={fn:{Chart:Nn,CustomChart:On,loadingMessage:"Loading function...",item:"functions"},logs:{Chart:Mn,CustomChart:On,loadingMessage:"Loading logs...",item:"logs",missingMessage:a.createElement(a.Fragment,null,"In this room, no node has logs collection configured."," ",a.createElement("a",{href:"https://learn.netdata.cloud/docs/working-with-logs/"},"Learn about monitoring logs with Netdata"),".")},feed:{Chart:tn,CustomChart:On,loadingMessage:"Loading events...",item:"events"}},Bn={table:yt},Dn=(0,a.memo)((e=>{let{paramsKey:t=Ln,testIdPrefix:n="functions",availableFns:r,availableFnsLoaded:l,hasError:c,containerWidth:s,...u}=e;const{Chart:v,loadingMessage:y}=Rn[t.extraKey]||Rn.fn,[x,E]=(0,i._H)(t),C=(0,p.w7)({...t,merge:!1,emptyIfAll:"feed"===t.extraKey}),{infoLoaded:w,loaded:k,data:T,error:S,type:K,retentionWarning:P,defaultCharts:z,histogram:L,missingRequired:N,requiredParams:M,customCharts:O}=(0,i.Ol)(t),R=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];const[t]=e,n=(0,g.xY)(t,"name");return 0===e.length?"on your nodes":1===e.length&&n?"on ".concat(n):"on your selected node"}(C);(0,a.useEffect)((()=>{var e;null!==x&&void 0!==x&&x.length||null===r||void 0===r||null===(e=r[0])||void 0===e||!e.name||c||E([r.reduce(((e,t)=>"undefined"===typeof e.priority||e.priority"processes"===e.name))||r[0]).name])}),[l,x]);const[B,D]=(0,I.useHovered)({},[k]),_=(0,F.A)();(0,a.useEffect)((()=>{_(D)}),[D]);const j=jt(C,x[0],t),[W,V]=((e,t,n)=>{const r=(0,h.vt)(),l=(0,be.QW)(),c=(0,ee.mQ)(),s=(0,o.l6)("pollingInterval",{flavour:"int",...n}),d=(0,o.l6)("slice",{defaultValue:!0,flavour:"bool",...n}),[u,m]=(0,i.WH)(n),[f,p]=(0,i.x9)(),v=(0,a.useRef)();v.current=u;const y=e=>{let{skip:t,data:n={}}=e;!t&&n.data?((Ot[n.type]||Ot.default)(m,n),p((e=>null!==e?null:e))):m((e=>({...e,tail:n.tail||!1,loading:!1})))},{fetch:x,isEnabled:E}=Rt[t]||Rt.default,[b,C,w,k]=Bt(f),I=(0,bt.A)(k),F=(0,a.useRef)();F.current=b;const T=(0,a.useRef)();T.current=u.lastModified;const S=(0,a.useRef)();S.current=u.anchorAfter;const K=(0,be.ID)(),P=(0,o.rW)(null,{key:K,extraKey:"".concat(t||n.extraKey,"Filters")}),z=(0,bt.A)(P,!0),L=(0,g.yN)(e),[{loading:N}]=(0,A.f7)();(0,a.useEffect)((()=>{"feed"!==t&&(T.current=null,S.current=null,m({...kt.zb[u.type]||kt.zb.default,type:u.type}))}),[null===e||void 0===e?void 0:e[0],t]);const M=P[Object.keys(P).find((e=>u.filtersToRefresh[e]&&!(0,ce.Ay)(null===z||void 0===z?void 0:z[e],P[e])))],O=(0,a.useRef)();O.current=!1,(0,a.useEffect)((()=>{"feed"!==t&&(O.current=!0)}),[!!u.requiredParams.length&&M]),(0,a.useEffect)((()=>{z&&!Et()(z,P)&&u.loaded&&(T.current=null,S.current=null,m((e=>({...e,lastModified:null,anchorAfter:null,anchorBefore:null,offset:0}))))}),[!!u.aggregations&&P]),(0,a.useEffect)((()=>{u.loaded&&(T.current=null,S.current=null,m((e=>({...e,lastModified:null,anchorAfter:null,anchorBefore:null,offset:0}))))}),[C,d]);const R=(0,a.useRef)(),B=(0,a.useRef)();B.current=function(){let{checkPeriod:n,...a}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const o=F.current();return m((e=>({...e,tail:!!a.tail,loading:!0,requestedPeriod:o}))),n&&v.current.tail&&v.current.requestedPeriod.after{var t,n;const{merge:a,direction:r,dataOnly:l,tail:o}=e||{};null===(t=e)||void 0===t||null===(t=t.response)||void 0===t||!t.status||400!==e.response.status&&304!==e.response.status?(null!==(n=e)&&void 0!==n&&null!==(n=n.response)&&void 0!==n&&n.data&&(e=e.response.data),m((t=>({...t,error:e,loaded:!0,loading:!1}))),p((e=>null!==e?null:e))):m((t=>({...t,loaded:!0,loading:!1,...!!e&&{merge:a,direction:r,dataOnly:l,tail:o}})))},_=(0,Ct.A)((()=>({enabled:!u.missingRequired&&u.infoLoaded&&!N&&E({nodeIds:e,fn:t,roomId:K,after:C}),fetch:()=>B.current(!c&&C<0?{if_modified_since:T.current,...T.current&&{direction:"forward",merge:u.hasHistory,tail:!0,delta:!0,data_only:!0,anchor:S.current},reset:O.current}:!c&&((e,t)=>{let{after:n,before:a}=e;return tn})(F.current(),k)?{anchor:1e3*k,data_only:!!T.current,reset:O.current}:{direction:"backward",data_only:!!T.current,checkPeriod:!0,reset:O.current}),onFail:D,onPollingFail:D,onReceive:y,onBlurMode:()=>v.current.loading&&m((e=>({...e,loading:!1}))),polling:!c&&C<0,pollingOptions:{pollingInterval:1e3*(s||u.updateEvery),shouldPause:!0},force:!0,skip:!!I&&!k})),[c,u.infoLoaded,u.missingRequired,K,null===e||void 0===e?void 0:e.length,null===e||void 0===e?void 0:e[0],t,s,(!!u.aggregations||!!u.requiredParams.length)&&!!z&&P,C,w,N,k,d]);return[(0,a.useCallback)((e=>{if(v.current.loadingMore)return;m((e=>({...e,loadingMore:!0})));const t=B.current(e);return t.then(y).catch(D).finally((()=>m((e=>({...e,loadingMore:!1}))))),t}),[m]),(0,a.useCallback)((()=>{var e,t,n,a,r;null===_||void 0===_||null===(e=_.clearRef)||void 0===e||null===(e=e.current)||void 0===e||null===(e=e.promise)||void 0===e||null===(t=e.cancel)||void 0===t||t.call(e),clearTimeout(null===_||void 0===_||null===(n=_.clearRef)||void 0===n||null===(n=n.current)||void 0===n?void 0:n.timeoutId),null===(a=R.current)||void 0===a||null===(r=a.cancel)||void 0===r||r.call(a),m((e=>({...e,loaded:!0,loading:!1,loadingMore:!1})))}),[m])]})(C,x[0],t);if(!l)return a.createElement(d.A,{title:y,"data-testid":"".concat(n,"Loading")});const{item:q="items",missingMessage:H}=Rn[t.extraKey]||{};if(j)return a.createElement(b.A,{title:"".concat((0,nn.Zr)(q)," couldn't be loaded"),message:"ErrAllNodesFailed"==j.errorMsgKey?"You do not have permissions to load ".concat(q):"Something went wrong."});if(c||l&&!r.length)return a.createElement(b.A,{title:"".concat((0,nn.Zr)(q)," couldn't be loaded"),message:H||"We couldn't find any available ".concat(q,".")});if(S&&"canceled"!==S){if("ErrNodeResourceNotFound"===(null===S||void 0===S?void 0:S.errorMsgKey))return a.createElement(b.A,{message:"Please review your selection and try again.",title:"".concat("feed"===t.extraKey?"Events are":'Function "'.concat(x,'" is')," not available ").concat(R,".")});if(null===T||void 0===T||!T.length)return a.createElement(b.A,{title:"".concat("feed"===t.extraKey?"Events":'Function "'.concat(x,'"')," couldn't be loaded"),message:"ErrForbidden"===(null===S||void 0===S?void 0:S.errorMsgKey)?"Forbidden":(null===S||void 0===S?void 0:S.errorMessage)||""})}if(w&&N)return a.createElement(b.A,{title:"Required filters are needed",message:'Function "'.concat(x,'" needs: ').concat(M.map((e=>e.name)).join(", "),"."),footer:"Check them on the right sidebar!"});const G=Bn[K]||Bn.table;return a.createElement(f.Box,{overflow:{horizontal:"hidden",vertical:"auto"},column:!0,padding:[4,4,0],gap:2},a.createElement(ie,{nodeIds:C,onRefresh:W,onCancel:V,paramsKey:t}),k?a.createElement(a.Fragment,null,a.createElement(On,{paramsKey:t,testIdPrefix:n,customCharts:O,refetch:W,fn:x,nodeIds:C,containerWidth:s}),a.createElement(v,{paramsKey:t,testIdPrefix:n,defaultCharts:z,hasCustom:!!O,hasHistogram:!!L,refetch:W,fn:x,nodeIds:C,containerWidth:s}),a.createElement(G,(0,m.A)({key:x,"data-testid":"".concat(n,"Component"),paramsKey:t,refetch:W,ref:B,selectedFn:x,containerWidth:s},u)),!(null!==T&&void 0!==T&&T.length)&&a.createElement(f.Flex,{column:!0,justifyContent:"center",alignItems:"center",alignSelf:"center",margin:["feed"===t.extraKey?3:30,0,0],gap:3},a.createElement(f.H3,null,"No results to display"),a.createElement(f.Text,{color:"textDescription",textAlign:"center"},"Double-check your search or filters and dates and try again with different conditions"),P&&a.createElement(f.Text,{color:"text",textAlign:"center"},P))):a.createElement(d.A,{title:y,"data-testid":"".concat(n,"Loading")}))})),_n=Dn;var jn=n(15255);const Wn=e=>{const t=(0,A.OS)();return a.createElement(f.Button,(0,m.A)({label:"Get a fresh agent token",onClick:t},e))},Vn="Functions expose sensitive information about your systems and applications. To protect your privacy, Netdata exposes this information only to logged-in users and claimed agents. When viewing Functions directly on a Netdata Agent UI, this information is sent directly from the Netdata Agent to your web browser, without exposing it to any third parties.",qn={notLoggedIn:{title:"Sign in to Netdata to use this function",description:Vn,footer:a.createElement(w.A,null)},notClaimed:{title:"Connect this agent to Netdata to use this function",description:Vn,footer:a.createElement(k.A,null)},noAccess:{title:"This agent belongs to a Netdata Space you are not member of",description:Vn,footer:a.createElement(f.TextBig,{color:"textDescription"},"Ask for an invitation from the administrators of the Netdata Space of the agent to use functions.")},bearerError:{title:"You are not authorized to use this function",description:Vn,footer:a.createElement(Wn,null)}},Hn={extraKey:"fn"},Gn=e=>{let{paramsKey:t=Hn,hasError:n,refreshAvailableFns:r,availableFnsLoaded:l,availableFns:o,...c}=e;const s=(0,p.w7)({...t,emptyIfAll:!1,merge:!0}),[d,u]=(0,p.Oj)(t),f=(0,g.dN)(),y=x(t),E=(0,i.Ak)(t),w=(0,i.E)(t);(0,a.useEffect)((()=>w),[E,null===d||void 0===d?void 0:d[0]]),(0,a.useEffect)((()=>{var e;if(y||!E)return;const t=null===(e=o.find((e=>e.name===E)))||void 0===e?void 0:e.nodes;t&&(null!==d&&void 0!==d&&d.length&&t[d[0]]||u([Object.keys(t)[0]]))}),[y,u,s.length,E,o]);const[k]=(0,v.Q8)(),[{bearerProtection:I,error:F,token:T}]=(0,A.f7)(),{error:S}=(0,i.Ol)(t),K=(0,h.dg)(),[{canBeClaimed:P,cloudStatus:z}]=(0,jn.RJ)();if((0,a.useEffect)((()=>{n&&r()}),[z,T]),!f.length)return a.createElement(C,{paramsKey:t});if(K&&(S||n||F&&I)){const{title:e,description:t,footer:n}=(e=>{let{userStatus:t,userNodeStatus:n,accessError:a,canBeClaimed:r,cloudStatus:l}=e;return qn[t]?qn[t]:a?qn.notLoggedIn:r?qn.notClaimed:qn[n]?qn[n]:qn.bearerError?qn.bearerError:qn.notLoggedIn})({...k,bearerError:F,canBeClaimed:P,cloudStatus:z,error:S});return a.createElement(b.A,{title:e,message:t,footer:n})}return a.createElement(_n,(0,m.A)({testIdPrefix:"fn",paramsKey:t,availableFnsLoaded:l,availableFns:o},c))},Un={extraKey:"feed"},Zn=e=>{let{paramsKey:t=Un,...n}=e;x(t);const r=(0,i.E)(t);return(0,a.useEffect)((()=>r),[]),a.createElement(_n,(0,m.A)({testIdPrefix:"feed",paramsKey:t},n))};var Xn=n(5287),Yn=n.n(Xn),$n=n(42828),Jn=n(59846);const Qn={fn:!0,logs:!0},ea=(0,a.memo)((e=>{let{roomId:t,flavour:n,availableFns:r}=e;const l=(e=>{const t=Qn[e];return(0,a.useMemo)((()=>({Live:{head:{label:"Live nodes",textColor:"textLite",iconName:"connectivityStatusLive",hasFn:t},node:{textColor:"textFocus",showFn:t,requireFn:t}},Stale:{head:{label:"Stale nodes",textColor:"textLite",iconName:"connectivityStatusStale"},node:{textColor:"textFocus",disabled:t}},Offline:{head:{label:"Offline nodes",textColor:"textLite",iconName:"connectivityStatusOffline"},node:{textColor:"textLite",disabled:t}}})),[e])})(n),o=(e=>{const{aggregations:t}=(0,i.Ol)({extraKey:e});return(0,a.useMemo)((()=>null!==t&&void 0!==t&&t.node_ids?t.node_ids.buckets.reduce(((e,t)=>({...e,[t.key]:t.hits.doc_count})),{}):null),[t])})(n),c=Qn[n],s=(0,i.Ak)({extraKey:n}),d=(0,a.useMemo)((()=>{var e;return Array.isArray(r)&&(null===(e=r.find((e=>e.name===s)))||void 0===e?void 0:e.nodes)||null}),[s,r]),u=(0,a.useCallback)((e=>!!d&&!d[e]),[d]),f=(0,a.useCallback)((e=>Yn()(d?Object.keys(d):[],e)),[d]);return c&&!d?null:a.createElement(Jn.A,{title:"Nodes",testIdPrefix:"nodes",baseKey:t,extraKey:n,param:"selectedNodeIds",multi:"feed"===n},a.createElement($n.Ay,(0,m.A)({key:n,baseKey:t,extraKey:n,statusProps:l,background:"mainBackground",itemProps:{padding:[1,1,1,.5]},searchMargin:[0,0,1],height:{max:"300px"},multi:"feed"===n,nodeCounts:o},c&&{useFilteredIds:f},{checkIsDisabled:u})))})),ta=ea;var na=n(82700);const aa=e=>e.name,ra=(0,a.memo)((e=>{let{param:t,baseKey:n,extraKey:r,testIdPrefix:l,title:o,items:i=[],multi:c=!1}=e;return i.length?a.createElement(Jn.A,{title:o,testIdPrefix:l,baseKey:n,extraKey:r,param:t,multi:c},a.createElement(na.Ay,{param:t,baseKey:n,extraKey:r,testIdPrefix:l,collection:i,multi:c,getValue:aa,getLabel:aa})):null}));var la=n(83488);const oa=e=>e.id,ia={string:(e,t)=>e.localeCompare(t,void 0,{sensitivity:"accent",ignorePunctuation:!0}),integer:(e,t)=>e-t},ca={table:function(e){let{param:t,columns:n,aggregatedView:a}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return Object.entries(e.reduce(((e,r)=>{var l,o,i;return e[r[t]]={count:((null===(l=e[r[t]])||void 0===l?void 0:l.count)||0)+(r.hidden?0:1),type:null===(o=n[t])||void 0===o?void 0:o.type,...a&&{actualCount:((null===(i=e[r[t]])||void 0===i?void 0:i.actualCount)||0)+(r.hidden?0:r[a.column]||1),actualCountLabel:a.aggregatedLabel,countLabel:a.resultsLabel}},e}),{})).map((e=>{let[t,n]=e;return{id:t,...n}})).sort(((e,t)=>(ia[e.type]||ia.integer)(e.id,t.id)))},default:n.n(la)()},sa=e=>{let{param:t,baseKey:n,extraKey:r,filtersKey:l,testIdPrefix:c,title:s,defaultExpanded:d=!1}=e;const u=(e=>{let{extraKey:t,filtersKey:n,param:r}=e;const l=mt({extraKey:t,omit:r,keepAll:!0}),c=(0,o.l6)(r,{extraKey:n,flavour:"arr"}),s=(0,i.Ol)({key:"type",extraKey:t}),d=ca[s]||ca.default,{columns:u,aggregatedView:m}=(0,i.Ol)({extraKey:t});return(0,a.useMemo)((()=>d(l,{param:r,columns:u,aggregatedView:m})),[r,l,c])})({param:t,extraKey:r,filtersKey:l});return u&&u.length?a.createElement(Jn.A,{title:s,testIdPrefix:c,baseKey:n,extraKey:l,param:t,defaultIsOpen:d},a.createElement(na.Ay,{param:t,baseKey:n,extraKey:l,testIdPrefix:c,collection:u,capitalized:!1,getValue:oa,getLabel:oa})):null};var da=n(73700);const ua=e=>{let{param:t,baseKey:n,extraKey:r,filtersKey:l,testIdPrefix:c,title:s,defaultExpanded:d=!1}=e;const{columns:u}=(0,i.Ol)({extraKey:r}),[m,p]=(0,o.r$)(t,{extraKey:l,flavour:"arr",defaultValue:[]}),g=(0,a.useCallback)((0,da.n)(10,(e=>{let{min:t,max:n}=e;return p([t,n])})),[]);if(!u[t])return null;const h=1*(u[t].min||0),v=1*(u[t].max||0);return a.createElement(Jn.A,{title:s,testIdPrefix:c,baseKey:n,extraKey:l,param:t,defaultIsOpen:d,showCounter:!1},a.createElement(f.Box,{padding:[2,0],width:"100%"},a.createElement(f.MultiRangeInput,{min:h,max:v,onChange:g,initMin:m[0]?1*m[0]:h,initMax:m[1]?1*m[1]:v,step:(v-h)/20})))},ma=(e,t)=>({key:e,type:t[e].filter,defaultExpanded:t[e].defaultExpandedFilter}),fa={multiselect:sa,range:ua},pa=e=>{let{roomId:t,extraKey:n}=e;const r=(0,i.Ak)({extraKey:n}),l=(e=>rn(e,ma))(n);return l.map((e=>{const l=fa[e.type]||fa.multiselect;return a.createElement(l,{key:e.key,title:e.key,testIdPrefix:"function",baseKey:t,extraKey:n,filtersKey:"".concat(r||n,"Filters"),param:e.key,defaultExpanded:e.defaultExpanded})}))},ga=e=>e.name||e.id,ha=e=>e.id,va=(0,a.memo)((e=>{let{title:t,options:n,param:r,baseKey:l,filtersKey:o,testIdPrefix:i,multi:c=!0,defaultIsOpen:s=!1,required:d=!1}=e;const[u,m]=(0,S.A)(!1),p=(0,a.useMemo)((()=>null!==n&&void 0!==n&&n.length?n.filter((e=>"number"!==typeof e.count||e.count>0)):null),[n]);return null!==n&&void 0!==n&&n.length?a.createElement(Jn.A,{title:t,testIdPrefix:i,baseKey:l,extraKey:o,param:r,defaultIsOpen:s,required:d,multi:c,configElements:e=>{let{isOpen:t}=e;return t&&(null===p||void 0===p?void 0:p.length)!==(null===n||void 0===n?void 0:n.length)&&a.createElement(f.Button,{padding:[0],flavour:"borderless",onClick:e=>{e.stopPropagation(),m()},"data-testid":"".concat(i,"-filter-resetAll"),label:u?"Show zeros":"Hide zeros",tiny:!0,neutral:!0})}},a.createElement(na.Ay,{param:r,baseKey:l,extraKey:o,testIdPrefix:i,collection:u?p:n,capitalized:!1,getValue:ha,getLabel:ga,multi:c})):null})),ya={nodes:!0},xa=e=>{let{roomId:t,extraKey:n}=e;const r=(0,i.Ak)({extraKey:n}),l=(0,i.Ph)(n),{columns:o}=(0,i.Ol)({extraKey:n});return l.length?l.map((e=>{var l;return ya[e.id]?null:a.createElement(va,{key:e.id,title:e.name,options:e.options,param:e.id,testIdPrefix:"function",baseKey:t,filtersKey:"".concat(r||n,"Filters"),defaultExpanded:null===(l=o[e.id])||void 0===l?void 0:l.defaultExpandedFilter})})):null},Ea={select:!0},ba=e=>{let{roomId:t,extraKey:n,requiredParams:r}=e;return r.map((e=>a.createElement(va,{key:e.id,title:e.name,options:e.options,param:e.id,testIdPrefix:"function",baseKey:t,filtersKey:"".concat(n,"Filters"),multi:!Ea[e.type],defaultIsOpen:!0,required:!0})))},Ca=(0,a.memo)((e=>{let{extraKey:t}=e;const n=(0,i.Ak)({extraKey:t}),[{requiredParams:r,infoLoaded:l},c]=(0,i.WH)({extraKey:t}),s=(0,be.ID)(),[d,u]=(0,o.N9)(null,{key:s,extraKey:"".concat(n,"Filters")});return(0,a.useEffect)((()=>{l&&r.length&&r.forEach((e=>{var t,n;return(null===(t=e.options)||void 0===t?void 0:t.length)>0&&!(null!==(n=d[e.id])&&void 0!==n&&n.length)&&u({[e.id]:[e.options[0].id]})}))}),[l]),(0,a.useEffect)((()=>{l&&(r.length?d&&c((e=>({...e,missingRequired:!!r.length&&!r.every((e=>{var t,n;return!(null===(t=e.options)||void 0===t||!t.length)&&(Array.isArray(d[e.id])?!(null===(n=d[e.id])||void 0===n||!n.length):!!d[e.id])}))}))):c((e=>({...e,missingRequired:!1}))))}),[l,r,d]),a.createElement(ba,{roomId:s,extraKey:n,requiredParams:r})})),wa=(0,a.memo)((e=>{let{flavour:t,availableFns:n}=e;const r=(0,be.ID)(),{loaded:l,aggregations:o,error:c}=(0,i.Ol)({extraKey:t});return a.createElement(f.Flex,{column:!0,overflow:{vertical:"auto"},padding:[0,3,30]},"feed"!==t&&a.createElement(ra,{title:"Function",testIdPrefix:"function",baseKey:r,extraKey:t,param:"selectedFn",items:n}),a.createElement(ta,{roomId:r,flavour:t,availableFns:n}),a.createElement(Ca,{extraKey:t}),l&&!o&&!c&&a.createElement(pa,{roomId:r,extraKey:t}),l&&o&&!c&&a.createElement(xa,{roomId:r,extraKey:t}))})),ka=wa;var Aa=n(12352),Ia=n(50065),Fa=n(18682);const Ta=e=>{let{data:t}=e;return a.createElement(Ia.A,{name:"Files"},a.createElement(f.Flex,{gap:1,padding:[0,1],alignItems:"center"},a.createElement(f.Box,{sx:{fontWeight:"500",letterSpacing:"1px"},as:f.Text,color:"textLite"},"{;}"),a.createElement(f.TextSmall,null,"Copy data as"),a.createElement(f.Flex,{cursor:"pointer",gap:1,onClick:(0,Fa.C)(JSON.stringify(t),{text:"JSON copied to clipboard"})},a.createElement(f.TextSmall,{color:"primary"},"json"),a.createElement(f.Icon,{color:"primary",size:"small",name:"copy"}))))},Sa=function(){let{data:e={},columns:t={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[],a=arguments.length>2?arguments[2]:void 0,r=arguments.length>3?arguments[3]:void 0,l=arguments.length>4&&void 0!==arguments[4]?arguments[4]:"fn";return Object.entries(e).sort(((e,n)=>{var a,r;let[l]=e,[o]=n;return((null===(a=t[l])||void 0===a?void 0:a.displayName)||l).localeCompare((null===(r=t[o])||void 0===r?void 0:r.displayName)||o,void 0,{sensitivity:"accent",ignorePunctuation:!0})})).reduce(((e,n)=>{var o,i,c;let[s,d]=n;const u=a?"".concat(a,".").concat((null===(i=t[s])||void 0===i?void 0:i.displayName)||s):(null===(o=t[s])||void 0===o?void 0:o.displayName)||s;if(null!==(c=t[s])&&void 0!==c&&c.dummy)return e;if("feed"!==l&&!t[s])return e;const m=Array.isArray(d);if(m&&"object"===typeof d[0]&&null!==d[0]||m&&d.length>1)e.push([u,JSON.stringify(d)]),r[u]=d;else if(d&&"object"===typeof d)Sa({data:d},e,u,r,l);else{if(null===d)return e;e.push([u,m?d.length>1?"[".concat(d.join(", "),"]"):d[0]:d]),r[u]=m?d.length>1?"[".concat(d.join(", "),"]"):d[0]:d}return e}),n)},Ka=(0,U.default)(f.Flex).withConfig({displayName:"sidebarInfo__Container",componentId:"sc-18vn5bm-0"})(["*{font-family:monospace;letter-spacing:0.09px;line-height:13px;font-size:12px;overflow-wrap:anywhere;white-space:pre-wrap;word-break:break-word;}"]),Pa=e=>{let{selectedRowData:t={},setSelectedRowData:n,flavour:r,...l}=e,o={};const i=(0,a.useMemo)((()=>Sa(t,[],null,o,r).sort(((e,t)=>{let[n]=e,[a]=t;return n.localeCompare(a)}))),[t]);return a.createElement(f.Flex,(0,m.A)({column:!0,gap:2,overflow:{vertical:"auto"},padding:[0,2,30]},l),a.createElement(f.Flex,{justifyContent:"between",padding:[2,0],border:{side:"bottom",color:"borderSecondary"}},a.createElement(f.TextBig,{strong:!0},"Row info"),i.length>0&&a.createElement(f.Box,{onClick:()=>n(),cursor:"pointer"},a.createElement(f.TextSmall,{color:"primary"},"Clear selection"))),Array.isArray(i)&&i.length?a.createElement(a.Fragment,null,a.createElement(Ka,{column:!0,gap:2},i.map((e=>{let[t,n]=e;return a.createElement(Aa.A,{key:t,size:"small",name:t,testId:"sidebar-rowInfoContent-".concat(t),padding:[1,0,0]},n)}))),a.createElement(Ta,{data:t.json||o})):a.createElement(f.Text,null,"Select a row to see raw data"))};var za=n(63314);const La={fn:{Component:Gn,paramsKey:{extraKey:"fn"},loadingMessage:"Loading functions..."},logs:{Component:Gn,paramsKey:{extraKey:"logs"},loadingMessage:"Loading logs..."},feed:{Component:Zn,paramsKey:{extraKey:"feed"},loadingMessage:"Loading events..."}},Na={filters:{iconName:"filterList",...c.kO,width:"19px",Content:ka,dataTestId:"fnFilters",label:"Filters"},info:{iconName:"information",...c.kO,width:"19px",dataTestId:"info",Content:Pa,label:"Info"}},Ma=e=>{let{flavour:t}=e;const{Component:n,paramsKey:m,loadingMessage:f}=La[t]||La.fn,p=(0,l.CK)(),g=(0,l.nj)(),h=(0,o.Fw)("sidebarTab",{flavour:"val",extraKey:t}),v=(0,o.Fw)("sidebarOpen",{flavour:"bool"}),[y,x]=(0,a.useState)(),E=(0,a.useCallback)((function(){v(!0),h("info"),x(...arguments)}),[]),{loaded:b,value:C,hasError:w,refresh:k}=(0,i.vx)(m.extraKey),[A,{width:I}]=(0,u.A)();return g?p.length||"feed"===t?a.createElement(za.Ay,{feature:"Functions-".concat(t)},a.createElement(r.A,{"data-testid":"".concat(t,"Page"),margin:[0,0,2],sidebar:a.createElement(c.Ay,{title:"Filters",flavour:t,selectedRowData:y,setSelectedRowData:x,availableFns:C,tabs:Na,loaded:!0,hasSearch:!1}),ref:A},a.createElement(n,{testIdPrefix:t,paramsKey:m,onShowInfo:E,availableFnsLoaded:b,availableFns:C,refreshAvailableFns:k,hasError:w,containerWidth:I}))):a.createElement(za.Ay,{feature:"Functions-".concat(t),mode:"NoNodesView"},a.createElement(s.A,null)):a.createElement(d.A,{title:f})}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7436.1ebd371d70e6a87c5499.chunk.js b/src/web/gui/v2/7436.1ebd371d70e6a87c5499.chunk.js deleted file mode 100644 index d986f3f46..000000000 --- a/src/web/gui/v2/7436.1ebd371d70e6a87c5499.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="52d45d1a-d347-4647-97da-7eabff5f654a",e._sentryDebugIdIdentifier="sentry-dbid-52d45d1a-d347-4647-97da-7eabff5f654a")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7436],{99904:(e,t,n)=>{n.d(t,{A:()=>s});n(62953);var a=n(96540),r=n(3914),l=n(25624);var o=n(87659);const i=e=>{const t=new Date(e||void 0).toDateString();return"Invalid Date"!==t?t:null},s=()=>{var e;const t=(0,r.ap)(),n=(0,a.useMemo)((()=>"".concat("dismissedBumpedWarningKey","_").concat(null===t||void 0===t?void 0:t.id)),[null===t||void 0===t?void 0:t.id]),[s,,,d]=(0,o.A)(!localStorage.getItem(n)),{trialEndsAtRaw:u}=(0,l.A)(),c=u&&"EarlybirdAndCommunitySunset"==(null===t||void 0===t||null===(e=t.metadata)||void 0===e?void 0:e.joinTrialCode),m=s&&u&&c,g=c?"EARLYB25":null,p=(0,a.useCallback)((()=>{localStorage.setItem(n,!0),d()}),[d,n]);return{isModalVisible:m,isEarlybirdAndCommunitySunset:c,coupon:g,trialEndsAt:i(u),onClose:p}}},57436:(e,t,n)=>{n.r(t),n.d(t,{OptOut:()=>y.A,OptOutModal:()=>A.A,TrialMessage:()=>E.A,TrialWarning:()=>b,default:()=>w,useBusinessPlan:()=>v.A,useTrial:()=>i.A});var a=n(96540),r=n(22292),l=(n(62953),n(3296),n(27208),n(48408),n(39225)),o=n(10648),i=n(25624),s=n(99904),d=n(17170);const u=(0,l.A)((()=>n.e(9843).then(n.bind(n,19843))),"TrialWelcome"),c=(0,l.A)((()=>n.e(1839).then(n.bind(n,41839))),"BumpedWarning"),m=(0,l.A)((()=>Promise.all([n.e(8938),n.e(6661)]).then(n.bind(n,46661))),"NewUserForm"),g=()=>{const{isEarlybirdAndCommunitySunset:e}=(0,s.A)(),{trialWelcomeVisible:t,paymentProvider:n}=(0,i.A)(),{email:l,createdAt:g,isAnonymous:p}=(0,r.uW)(),f=(0,d.A)();if(!e&&!t&&!f&&"AWS"!==n){const e=new URL(window.location.href).searchParams;return!p&&(0,o.o)(new Date,new Date(g))<3&&(e.get("email")||e.get("oauth"))?a.createElement(a.Suspense,{fallback:""},a.createElement(m,{email:l})):null}const b=e?c:u;return a.createElement(a.Suspense,{fallback:""},a.createElement(b,null))};var p=n(93155);const f=(0,l.A)((()=>Promise.all([n.e(6384),n.e(86)]).then(n.bind(n,60086))),"Warnings"),b=e=>{const t=(0,r.uW)("isAnonymous");return p.bO&&!t?a.createElement(a.Suspense,{fallback:""},a.createElement(f,e)):null};var y=n(66732),A=n(34641),E=n(93476),v=n(28061);const w=()=>{const e=(0,r.uW)("isAnonymous");return window.envSettings.onprem||e?null:a.createElement(g,null)}},34641:(e,t,n)=>{n.d(t,{A:()=>f});n(9391),n(62953);var a=n(96540),r=n(63950),l=n.n(r),o=n(83199),i=n(19673),s=n(71835),d=n(92155),u=n(50876),c=n(63314),m=n(87659),g=n(97118);const p=(0,d.A)(o.Button),f=e=>{let{onConfirm:t,onDecline:n=l(),onCancellingEnd:r=l()}=e;const d=(0,i.M4)(),[f,b]=(0,s.A)(),{id:y}=(0,g.A)(),{sendLog:A,isReady:E}=(0,u.A)(),[v,,w,h]=(0,m.A)(),x=(0,a.useCallback)((()=>{w(),d({productId:y}).then((()=>{f({header:"Successfully canceled subscription",text:"You are now on Community plan"}),A({feature:"TrialOptOut",isSuccess:!0})})).catch((()=>{b({header:"Failed to cancel the subscription",text:"Remained on Business plan"}),A({feature:"TrialOptOut",isFailure:!0,error:"Failed to cancel the subscription"})})).finally((()=>{r(),h()}))}),[y,E]),C=(0,a.useCallback)((()=>{t?t():x()}),[t,x]);return a.createElement(o.Modal,{backdropProps:{backdropBlur:!0}},a.createElement(c.Ay,{feature:"TrialOptOut"},a.createElement(o.ModalContent,{width:{base:140}},a.createElement(o.ModalHeader,null,a.createElement(o.Flex,{gap:2,alignItems:"center"},a.createElement(o.H4,null,"Go to Community plan"))),a.createElement(o.ModalBody,null,v?a.createElement(o.Flex,{height:"100px"},a.createElement(o.TextBig,null,"Changing billing plan...")):a.createElement(o.Flex,{gap:2,column:!0},a.createElement(o.H3,null,"Are You Sure?"),a.createElement(o.TextBig,null,"It looks like you have chosen to opt-out of your free 30-day business trial. Are you sure you do not want to experience all the features Netdata has to offer?"),a.createElement(o.TextBig,null,"By opting out, you will switch to the community plan immediately."))),a.createElement(o.ModalFooter,null,a.createElement(o.Flex,{justifyContent:"end",gap:4,padding:[1,2]},a.createElement(p,{feature:"TrialOptOut",label:"Yes, I am sure!",flavour:"hollow",small:!0,onClick:C,disabled:!y||v,textTransform:""}),a.createElement(p,{feature:"TrialOptOut",label:"No, I want the trial!",small:!0,onClick:n,disabled:v,textTransform:""}))))))}},35454:(e,t,n)=>{n.d(t,{$B:()=>o,TB:()=>i,W1:()=>r,ml:()=>l,ue:()=>a});const a={default:"successSemi",warning:"warningSemi",critical:"errorSemi"},r={default:{background:"successSemi",border:"success"},warning:{background:"warningSemi",border:"warning"},critical:{background:"errorSemi",border:"error"}},l=[30,15,3,2,1],o="dismissedTrialWelcome",i="dismissedTrialWarningDate"},93476:(e,t,n)=>{n.d(t,{A:()=>g});var a=n(58168),r=n(96540),l=n(83199),o=n(63950),i=n.n(o),s=n(25624),d=n(99904),u=n(24864);const c={banner:{color:"main"},sidebar:{lineHeight:"1.6",color:"main"},freePlanUpgrade:{lineHeight:"1.6",color:"main"},billing:{color:"textLite"}},m=e=>{let{canUpgrade:t,onUpdateClick:n=i(),children:o,...s}=e;return t?r.createElement(l.Box,(0,a.A)({"data-testid":"upgrade-to-business-banner",onClick:n,as:l.Text,cursor:"pointer",textDecoration:"underline",color:"main"},s),o):null},g=e=>{let{flavour:t,couponRemainingDays:n,onUpdateClick:o=i()}=e;const{daysRemaining:g,canUpgrade:p,trialEndsAt:f}=(0,s.A)(),{isEarlybirdAndCommunitySunset:b}=(0,d.A)(),y=(0,r.useMemo)((()=>({isCoupon:n>0,isEarlybirdAndCommunitySunset:b,isBanner:"banner"==t,isSidebar:"sidebar"==t,isBilling:"billing"==t,isFreePlanUpgrade:"freePlanUpgrade"==t})),[t,n]);return r.createElement(l.Flex,{justifyContent:y.isBilling?"start":"center",alignItems:"center",width:"100%",gap:2},y.isCoupon?r.createElement(l.Flex,{column:!0},r.createElement(l.Text,(0,a.A)({},c[t],{fontSize:"10px",strong:!0}),"POST BLACK FRIDAY OFFER"),r.createElement(u.Te,(0,a.A)({},c[t],{fontSize:"38px",lineHeight:.8,strong:!0}),"50% off")):y.isEarlybirdAndCommunitySunset?r.createElement(l.Flex,{column:!0,gap:1,alignItems:"center"},r.createElement(l.Text,(0,a.A)({},c[t],{textAlign:"center",strong:!0},y.isBanner?{}:{fontSize:"10px"}),"Thank you for your support!"," ",y.isBanner?r.createElement(m,{canUpgrade:p,onUpdateClick:o},"Upgrade"):null),r.createElement(l.Text,(0,a.A)({},c[t],{color:"primary",fontSize:"22px",lineHeight:.8,strong:!0}),"25% Lifetime off")):y.isFreePlanUpgrade?r.createElement(l.Text,c[t],"Upgrade your plan for unlimited access and Business features."):r.createElement(l.Text,c[t],"You have ",r.createElement(l.Text,(0,a.A)({strong:!0},c[t]),"".concat(g," days"))," ","left to explore all the features of Netdata Business."," ",y.isBilling&&r.createElement(r.Fragment,null,"Trial ends at"," ",r.createElement(l.Text,(0,a.A)({strong:!0},c[t]),f),"."," "),y.isBanner?r.createElement(m,{canUpgrade:p,onUpdateClick:o},"Consider upgrading for unlimited access."):r.createElement(r.Fragment,null,"Consider upgrading for unlimited access.")))}},24864:(e,t,n)=>{n.d(t,{PL:()=>i,Te:()=>s,bg:()=>o});var a=n(8711),r=n(83199),l=n(35454);const o=(0,a.default)(r.Flex).attrs({position:"relative"}).withConfig({displayName:"styled__TrialWarningSidebar",componentId:"sc-66x250-0"})(["background-color:",";border-width:1px;border-style:dashed;border-color:",";border-radius:2px;"],(e=>{var t;let{type:n}=e;return(0,r.getColor)(null===(t=l.W1[n])||void 0===t?void 0:t.background)}),(e=>{var t;let{type:n}=e;return(0,r.getColor)(null===(t=l.W1[n])||void 0===t?void 0:t.border)})),i=(0,a.default)(r.Button).withConfig({displayName:"styled__TrialUpgradeButton",componentId:"sc-66x250-1"})(["flex:auto;"]),s=(0,a.default)(r.Text).attrs({strong:!0,lineHeight:.8}).withConfig({displayName:"styled__PromoText",componentId:"sc-66x250-2"})(["background-color:",";background-image:linear-gradient( 43deg,"," 0%,"," 46%,"," 100% );-webkit-background-clip:text;-webkit-text-fill-color:transparent;"],(0,r.getColor)("primary"),(0,r.getColor)(["blue","aquamarine"]),(0,r.getColor)(["purple","mauve"]),(0,r.getColor)("primary"))},66732:(e,t,n)=>{n.d(t,{A:()=>d});var a=n(96540),r=n(63950),l=n.n(r),o=n(83199),i=n(50876);const s={default:"Or you can opt to downgrade immediately",billing:"Or you can opt to downgrade immediately"},d=e=>{let{flavour:t="default",onOptOutClick:n=l(),...r}=e;const{sendLog:d,isReady:u}=(0,i.A)(),c=(0,a.useCallback)((()=>{n(),d({feature:"TrialOptOut",isStart:!0})}),[u]);return a.createElement(o.Text,r,"After the trial, you'll automatically switch to the free Community plan."," ",a.createElement(o.Box,{"data-testid":"upgrade-to-business-banner",onClick:c,as:o.Text,cursor:"pointer",textDecoration:"underline",color:"primary"},s[t]),".")}},25624:(e,t,n)=>{n.d(t,{A:()=>b});n(62953);var a=n(96540),r=n(46741),l=n(5668),o=n(22292),i=(n(8159),n(98992),n(37550),n(16074)),s=n(6593);var d=n(19673),u=n(50503),c=n(35454),m=n(42728),g=n(17170),p=n(93155);const f=e=>{const t=new Date(e||void 0).toLocaleDateString();return"Invalid Date"!==t?t:null},b=()=>{const e=(0,g.A)(),{loaded:t,value:n,refresh:b}=(0,d.JN)(),{isFailure:y}=(0,u.A)(),{slug:A,trialEndsAt:E,paymentProvider:v}=n||{},w=(0,a.useMemo)((()=>(e=>{if(!e)return null;const t=new Date(e)-new Date;return Math.ceil(t/864e5)})(E)),[E]),h=t&&!!E&&!e,x=(0,r.JT)("billing:Manage"),[C]=(0,l.ng)("trialModalDismissed"),T=p.bO&&x&&(h||y)&&!localStorage.getItem(c.$B)&&!C,[S,k]=(0,a.useState)(localStorage.getItem(c.TB)),B=(0,a.useMemo)((()=>p.bO&&h),[h]),[O,I]=(0,a.useState)(),D=(F=w)>15?"default":F>5?"warning":"critical";var F;const U=!(0,o.uW)("isAnonymous")&&x;return(0,a.useEffect)((()=>{const e=((e,t,n,a)=>{if(t<0)return!1;const r=new Date(e||void 0);if(!(0,i.f)(r))return!1;const l=new Date(a||void 0);if(!(0,i.f)(l))return!0;const{days:o}=(0,s.F)({start:l,end:r}),d=o;return n.some((e=>e>=t&&e{const e=(new Date).toISOString();k(e),localStorage.setItem(c.TB,e)},daysRemaining:w,trialEndsAt:f(E),trialEndsAtRaw:E,type:D,canUpgrade:U,onTrial:h,refreshPlan:b,planIsFreeOrEarlyBird:(0,m.Kj)(A),paymentProvider:v}}},97118:(e,t,n)=>{n.d(t,{A:()=>l});var a=n(19673),r=n(42728);const l=()=>{var e;const{value:t}=(0,a.lU)();if(null===t||void 0===t||!t.free)return{};const n=Object.keys(t.free).sort(r.M7)[0];return{id:t.free[n]?null===(e=t.free[n][0])||void 0===e?void 0:e.id:null,version:n}}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7471.f96c4d04a73fb7551c03.chunk.js b/src/web/gui/v2/7471.f96c4d04a73fb7551c03.chunk.js deleted file mode 100644 index 7e1275af1..000000000 --- a/src/web/gui/v2/7471.f96c4d04a73fb7551c03.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="796e2291-866c-4722-9c30-22064b74ae0c",e._sentryDebugIdIdentifier="sentry-dbid-796e2291-866c-4722-9c30-22064b74ae0c")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7471],{51641:(e,t,n)=>{"use strict";n.d(t,{A:()=>u,e:()=>d});var a,r=n(96540),o=n(47444),i=n(21396);const s=(0,o.eU)({key:"agentDataTrackStatus",default:""}),c=null===(a=window.localNetdataRegistry)||void 0===a?void 0:a.mg,l=window.envSettings.agentApiUrl,d=()=>(0,o.vc)(s),u=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:c,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:l;const n=(0,o.lZ)(s);(0,r.useEffect)((()=>{if(!e)return;const a=i.enc.Hex.parse("fd90fa3e33a504c10a444f910444650772e77e81b00c7523643462f298fd14c0"),r=i.lib.WordArray.random(16),o=JSON.stringify({machine_guid:e,url:t}),s=i.AES.encrypt(o,a,{iv:r}).ciphertext,c=i.enc.Hex.stringify(r)+i.enc.Hex.stringify(s),l=c+i.HmacSHA256(i.enc.Hex.parse(c),a).toString();fetch("".concat("https://frankfurt.netdata.rocks/privacy","?data=").concat(encodeURIComponent(l))).then((e=>e.json())).then((e=>n(e.status))).catch((e=>console.error("Error:",e)))}),[e])}},86147:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>A});n(62953);var a=n(96540),r=n(39225),o=n(83741),i=n(22332),s=n(80925),c=n(24266),l=n(22292),d=n(20378),u=n(78459),g=n(15255),m=n(87337),v=n(28738),f=n(38819),y=n(71835);const w={ErrInvalidRedirectURI:"Invalid redirect URI",ErrUntrustedRedirectURI:"Untrusted redirect URI",ErrSpaceMemberAlreadyExists:"Space member already exists",ErrInvalidSpaceID:"Invalid space ID",ErrInvalidInvitationToken:"Invalid invitation token",ErrInvitationNotFound:"Invitation not found",ErrInvitationEmailMismatch:"Invitation email mismatch",ErrInvitationExpired:"Invitation expired",ErrUnauthenticated:"Unauthenticated",ErrInternalServerError:"Internal server error"},h=()=>{const[,e]=(0,y.A)();(0,a.useEffect)((()=>{const{error_msg_key:t,error_message:n}=(0,f.PP)();var a,r;n&&e({message:decodeURIComponent((a=t,r=n,w[a]||r||"An unexpected error occurred"))})}),[])};var I=n(29848);const p=(0,r.A)((()=>Promise.all([n.e(4631),n.e(7519),n.e(8323),n.e(963),n.e(5598)]).then(n.bind(n,95598))),"Layout"),k=(0,i.withChartProvider)((()=>{const e=(0,l.uW)("isLoaded"),t=(0,l.uW)("email"),n=(0,l.NJ)(),r=(0,l.uW)("isAnonymous"),i=(0,I.c0)(),s=(0,u.OS)();return(0,a.useEffect)((()=>{r||s()}),[r]),(0,a.useEffect)((()=>{if(n&&t)try{o.gV({id:n,email:t})}catch(e){console.warn("Sentry: unable to set user")}}),[t,n]),(0,m.xN)(),(0,u.Ay)(),(0,d.Ay)(),(0,g.Ay)(),h(),a.createElement(a.Suspense,{fallback:a.createElement(v.A,null)},a.createElement(p,{key:i,isUserLoaded:e}))})),A=(0,c.Xc)((()=>{const e=(0,s.e)().getRoot();return a.createElement(k,{chart:e})}))},3714:(e,t,n)=>{"use strict";n.d(t,{j:()=>i});n(62953),n(48408);var a=n(26655),r=n(80158);const o=e=>(window.localNetdataRegistry.mg=e.agent.mg,window.localNetdataRegistry.hostname=(0,r.Yv)(e.agent.nm||"agent"),{cloudStatus:e.cloud.status,canBeClaimed:e.can_be_claimed,keyFilename:e.key_filename,claimId:e.cloud.claim_id,mg:e.agent.mg,nd:e.agent.nd,success:e.success,message:e.message}),i=function(){let{key:e,token:t,rooms:n,url:r}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},i=new URLSearchParams({key:e,rooms:n,token:t,url:r}).toString();return i=e&&n&&t&&r&&i?"?".concat(i):"",a.A.get("/api/v2/claim".concat(i),{baseURL:window.envSettings.agentApiUrl,transform:o})}},15255:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>m,OS:()=>u,RJ:()=>g});n(62953);var a=n(96540),r=n(47444),o=n(3914),i=n(22292),s=n(79731),c=n(3714);const l=(0,r.Iz)({key:"claimStatusAtom",default:{loaded:!1,loading:!1,cloudStatus:"disabled",canBeClaimed:!1,claimId:null,keyFilename:"",error:"",claiming:!1,claimingError:""}}),d=(0,r.Iz)({key:"checkClaimStatus",default:()=>1}),u=()=>{var e;const t=null===(e=window.localNetdataRegistry)||void 0===e?void 0:e.mg,n=(0,r.lZ)(d(t));return(0,a.useCallback)((()=>n((e=>e+1))),[t])},g=()=>{var e;const t=null===(e=window.localNetdataRegistry)||void 0===e?void 0:e.mg,[n,o]=(0,r.L4)(l({machineGuid:t}));return[n,(0,a.useCallback)((e=>o((t=>({...t,...e})),[])))]},m=()=>{var e;const t=(0,o.dg)(),n=null===(e=window.localNetdataRegistry)||void 0===e?void 0:e.mg,[l,u]=(0,r.L4)(d(n)),[m,v]=g(n),{loading:f}=m,y=(0,i.uW)("isAnonymous");return(0,a.useEffect)((()=>{!f&&n&&t&&(v({loading:!0,nodeId:null,spaceId:null,roomIds:[]}),(0,c.j)().then((e=>{let{data:t}=e;v({loading:!1,loaded:!0,...t,error:""})})).catch((e=>{var t;const n=null===e||void 0===e||null===(t=e.response)||void 0===t?void 0:t.data;v({loading:!1,loaded:!0,error:(0,s.o)(null===n||void 0===n?void 0:n.errorMsgKey)||(null===n||void 0===n?void 0:n.errorMessage)||"Something went wrong",cloudStatus:"disabled",canBeClaimed:!1,keyFilename:""})})))}),[n,l,t,y]),{...m,checkAgain:u}}},29848:(e,t,n)=>{"use strict";n.d(t,{Hs:()=>c,c0:()=>i,ly:()=>s,pp:()=>l});n(62953);var a=n(47444);const r=(0,a.eU)({key:"spaceKeyAtom",default:0}),o=(0,a.eU)({key:"roomViewLoading",default:!0}),i=()=>(0,a.vc)(r),s=()=>{const[e,t]=(0,a.L4)(r);return()=>t(e+1)},c=()=>(0,a.vc)(o),l=()=>(0,a.L4)(o)},78459:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>h,OS:()=>y,f7:()=>w});n(62953);var a,r=n(96540),o=n(47444),i=n(92138),s=n(3914),c=n(22292),l=n(79731),d=n(15255),u=n(9224),g=n(51641);const m=null===(a=window.localNetdataRegistry)||void 0===a?void 0:a.mg,v=(0,o.Iz)({key:"currentAgentBearerAtom",default:{loading:!1,token:localStorage.getItem("agentJWT:".concat(m))||"",expiration:localStorage.getItem("agentJWTExp:".concat(m))||null,bearerProtection:!0,error:""}}),f=(0,o.Iz)({key:"checkAgentBearer",default:()=>1}),y=()=>{const[{mg:e}]=(0,d.RJ)(),t=(0,o.lZ)(f(e));return(0,r.useCallback)((()=>t((e=>e+1))),[e])},w=()=>{const[{claimId:e,mg:t,nd:n}]=(0,d.RJ)();return(0,o.L4)(v({nodeId:n,machineGuid:t,claimId:e}))},h=()=>{const e=(0,s.dg)(),[{claimId:t,mg:n,nd:a}]=(0,d.RJ)(),[m,v]=(0,o.L4)(f(n)),[{loading:y,token:h,expiration:I,bearerProtection:p,error:k},A]=w(),S=(0,i.A)(m),b=(0,c.uW)("isAnonymous");return(0,r.useEffect)((()=>{!y&&n&&e&&!b&&t&&a&&(m===S&&I&&1e3*I>(new Date).getTime()+3600||(A((e=>({...e,loading:!0}))),(0,u.q5)(a,n,t).then((e=>{let{data:t}=e;A({loading:!1,...t,error:""}),localStorage.setItem("agentJWT:".concat(n),null===t||void 0===t?void 0:t.token),localStorage.setItem("agentJWTExp:".concat(n),null===t||void 0===t?void 0:t.expiration)})).catch((e=>{var t;const a=null===e||void 0===e||null===(t=e.response)||void 0===t?void 0:t.data;localStorage.removeItem("agentJWT:".concat(n)),localStorage.removeItem("agentJWTExp:".concat(n)),A({loading:!1,token:"",expiration:null,bearerProtection:!0,error:(0,l.o)(null===a||void 0===a?void 0:a.errorMsgKey)||(null===a||void 0===a?void 0:a.errorMessage)||"Something went wrong"})}))))}),[n,I,m,e,b,S]),(0,g.A)(n),{token:h,bearerProtection:p,checkAgain:v,error:k}}},20378:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>m,OS:()=>u,Q8:()=>g});n(62953);var a=n(96540),r=n(47444),o=n(3914),i=n(22292),s=n(79731),c=n(9224);const l=(0,r.Iz)({key:"userAccessAtom",default:{loaded:!1,loading:!1,userStatus:"notLoggedIn",userNodeStatus:"noAccess",nodeId:null,spaceId:null,roomIds:[],error:""}}),d=(0,r.Iz)({key:"checkUserAccess",default:()=>1}),u=()=>{var e;const t=null===(e=window.localNetdataRegistry)||void 0===e?void 0:e.mg,n=(0,r.lZ)(d(t));return(0,a.useCallback)((()=>n((e=>e+1))),[t])},g=()=>{var e;const t=null===(e=window.localNetdataRegistry)||void 0===e?void 0:e.mg;return(0,r.L4)(l({machineGuid:t}))},m=()=>{var e;const t=(0,o.dg)(),n=null===(e=window.localNetdataRegistry)||void 0===e?void 0:e.mg,[l,u]=(0,r.L4)(d(n)),[m,v]=g(n),{loading:f}=m,y=(0,i.uW)("isAnonymous");return(0,a.useEffect)((()=>{!f&&n&&t&&!y&&(v((e=>({loading:!0,nodeId:null,spaceId:null,roomIds:[],...e}))),(0,c.az)(n).then((e=>{let{data:t}=e;v({loading:!1,loaded:!0,...t,error:""})})).catch((e=>{var t;const n=null===e||void 0===e||null===(t=e.response)||void 0===t?void 0:t.data;v({loading:!1,loaded:!0,error:(0,s.o)(null===n||void 0===n?void 0:n.errorMsgKey)||(null===n||void 0===n?void 0:n.errorMessage)||"Something went wrong",userStatus:"notLoggedIn",userNodeStatus:"noAccess",nodeId:null,spaceId:null,roomIds:[]})})))}),[n,l,t,y,f]),{...m,checkAgain:u}}},87337:(e,t,n)=>{"use strict";n.d(t,{xN:()=>f,vS:()=>g,YN:()=>y,KF:()=>v,iw:()=>u,rE:()=>m});n(17333),n(3064),n(41393),n(14905),n(98992),n(54520),n(72577),n(81454),n(8872),n(25509),n(65223),n(60321),n(41927),n(11632),n(64377),n(66771),n(12516),n(68931),n(52514),n(35694),n(52774),n(49536),n(21926),n(94483),n(16215),n(62953);var a=n(96540),r=n(47444),o=n(47767),i=n(22292);const s=(0,r.Iz)({key:"visitedNodes",default:()=>[]});var c=n(47762),l=n(9224);const d=(0,r.K0)({key:"visitedNodeIdsValue",get:e=>t=>{let{get:n}=t;return n(s(e)).map((e=>{let{id:t}=e;return t}))}}),u=()=>{const e=(0,i.NJ)(),t=(0,r.vc)(s(e)),n=(0,r.Zs)((e=>{let{set:t}=e;return e=>{t(c.gl,{values:e.reduce(((e,t)=>({...e,[t.id]:{...t,loaded:!0}})),{}),merge:!0})}}),[]);return(0,a.useEffect)((()=>{n(t)}),[t]),(0,r.vc)(d(e))},g=e=>{const t=(0,i.NJ)(),n=(0,r.vc)(s(t)),o=(0,a.useMemo)((()=>e?n.filter((t=>t.name.toUpperCase().includes(e.toUpperCase()))):n),[n,e]);return(0,a.useMemo)((()=>o.map((e=>e.id))),[o])},m=()=>(0,r.Zs)((e=>{let{snapshot:t,set:n}=e;return async(e,a)=>{const r=await t.getPromise((0,i.Dm)("id")),{urls:o,name:d}=await t.getPromise((0,c.GN)({id:e})),u=o.filter((e=>e!==a));n((0,c.GN)({id:e,key:"urls"}),u),u.length||n(s(r),(t=>t.filter((t=>t.id!==e))));try{await(u.length?(0,l.Bz)(r,e,d,u):(0,l.sm)(r,[e])),(0,l.UL)(r,e).catch((()=>{}))}catch(g){n((0,c.GN)({id:e,key:"urls"}),o)}}}),[]),v=()=>{const{pathname:e}=(0,o.zy)(),t=(0,i.NJ)(),n=f({autoFetch:!1});return(0,r.Zs)((e=>{let{snapshot:t,set:a}=e;return async(e,r,o)=>{if(await t.getPromise((0,i.Dm)("isAnonymous")))return;const d=await t.getPromise((0,i.Dm)("id")),{urls:u,name:g}=await t.getPromise((0,c.GN)({id:e}));let m=r?[r,...u]:u;m=[...new Set([window.location.href,...m])];const v=m.length!==u.length;try{a((0,c.GN)({id:e,key:"urls"}),m),a(s(d),(t=>{const n=t.find((t=>t.id===e)),a=t.filter((t=>t.id!==e));return n?[{...n,accessCount:n.accessCount+1,lastAccessTime:(new Date).toISOString()},...a]:[{accessCount:1,id:e,lastAccessTime:(new Date).toISOString(),urls:m,name:o},...a]})),v&&await(0,l.Bz)(d,e,o||g,m),n(),await(0,l.UL)(d,e)}catch(f){a((0,c.GN)({id:e,key:"urls"}),u)}}}),[n,e,t])},f=function(){let{autoFetch:e=!0}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{id:t,isAnonymous:n}=(0,i.uW)(),[,o]=(0,r.L4)(s(t)),[c,d]=(0,a.useState)(0),u=(0,a.useCallback)((()=>{d((e=>e+1))}),[d]);return(0,a.useEffect)((()=>{if(t&&(e||c))if(n){const e=(window.visitedNodes||[]).sort(((e,t)=>new Date(t.lastAccessTime)-new Date(e.lastAccessTime)));o(e)}else(0,l.uQ)(t).then((e=>{if(!e)return;const{data:{results:t}}=e,n=t.sort(((e,t)=>new Date(t.lastAccessTime)-new Date(e.lastAccessTime)));o(n)}))}),[e,c,t,n]),u},y=()=>{const e=(0,i.NJ)(),t=s(e);return(0,r.Zs)((e=>{let{snapshot:n,set:a}=e;return async e=>{const r=(await n.getPromise((0,c.th)(e))).map((e=>e.machineGUID)),o=await n.getPromise(t),i=o.filter((e=>!r.includes(e.id)));i.length!==o.length&&a(t,i)}}),[e])}},92138:(e,t,n)=>{"use strict";n.d(t,{A:()=>s});var a=n(96540),r=n(2404),o=n.n(r),i=n(80862);const s=function(e){let t=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:o();const r=(0,a.useRef)(),s=(0,a.useRef)(e);return!(0,i.A)().current||t&&n(s.current,e)||(r.current=s.current,s.current=e),r.current}},50477:()=>{}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7487.89070793921be1288bb5.css b/src/web/gui/v2/7487.89070793921be1288bb5.css deleted file mode 100644 index e5d02668f..000000000 --- a/src/web/gui/v2/7487.89070793921be1288bb5.css +++ /dev/null @@ -1,2 +0,0 @@ -.default .dygraph-axis-label{color:#35414a}.dark .dygraph-axis-label{color:#fff}.dygraph-label-rotate-right{text-align:center;transform:rotate(-90deg);-webkit-transform:rotate(-90deg);-moz-transform:rotate(-90deg);-o-transform:rotate(-90deg);-ms-transform:rotate(-90deg)}.dygraph-annotation{position:absolute;z-index:10;overflow:hidden;border:1px solid} - diff --git a/src/web/gui/v2/7487.db63c95c27d973a07d9b.chunk.js b/src/web/gui/v2/7487.db63c95c27d973a07d9b.chunk.js deleted file mode 100644 index cbc15d0f9..000000000 --- a/src/web/gui/v2/7487.db63c95c27d973a07d9b.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="6e15794f-c21d-4dbd-b39f-a55646c86750",e._sentryDebugIdIdentifier="sentry-dbid-6e15794f-c21d-4dbd-b39f-a55646c86750")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7487],{18686:(e,t,n)=>{n.d(t,{A:()=>l});var a=n(96540),o=n(83199),r=n(47731);const l=e=>{let{children:t}=e;return(0,r.J)()?a.createElement(o.Layer,{full:!0},a.createElement(o.Flex,{width:"100%",background:"mainBackground","data-testid":"alertView-mobileContainer"},t)):t}},29848:(e,t,n)=>{n.d(t,{Hs:()=>c,c0:()=>l,ly:()=>d,pp:()=>i});n(62953);var a=n(47444);const o=(0,a.eU)({key:"spaceKeyAtom",default:0}),r=(0,a.eU)({key:"roomViewLoading",default:!0}),l=()=>(0,a.vc)(o),d=()=>{const[e,t]=(0,a.L4)(o);return()=>t(e+1)},c=()=>(0,a.vc)(r),i=()=>(0,a.L4)(r)},7356:(e,t,n)=>{n.r(t),n.d(t,{default:()=>d});var a=n(96540),o=n(47767),r=n(41122),l=n(18686);const d=e=>{let{spaceId:t,roomId:n,nodeName:d}=e;const{alertId:c}=(0,o.g)();return a.createElement(l.A,null,a.createElement(r.A,{alertId:c,spaceId:t,roomId:n,nodeName:d,isWebview:!0}))}},41122:(e,t,n)=>{n.d(t,{A:()=>_});var a=n(96540),o=n(83199),r=n(64118),l=n(28738),d=n(47731),c=n(69765),i=n(11164),s=n(43407),u=n(5871),m=n(52768),f=n(47767),g=n(27467),p=n(47762),v=n(8711);const y=(0,v.default)(o.Flex).attrs({alignItems:"center"}).withConfig({displayName:"styled__StyledButtonContainer",componentId:"sc-1glv09p-0"})(["position:sticky;bottom:0;"]);var h=n(38966);const I=(0,n(92155).A)(o.Button),E=e=>{let{disabled:t,nodeId:n,alertId:r,context:l,lastStatusChange:d,onClose:c,isLoading:i,small:s=!1,testid:u="alertDetailsModal"}=e;const m=(0,f.Zp)(),v=(0,p.Zl)(n),E=(0,a.useCallback)((()=>{c&&c(),m(v,r?{state:{alertId:r}}:{state:{contextToGo:l}})}),[v,r]),b=(0,g.rI)(),w=(0,a.useCallback)((()=>{const e=1e3*d;b({highlight:{after:e-6e4,before:e},correlation:!0}),E()}),[d,l,E,r]);return a.createElement(y,{justifyContent:"end",gap:2},a.createElement(o.Flex,{gap:2,justifyContent:"end"},a.createElement(I,{small:s,label:"Run correlations",onClick:w,flavour:"hollow",isLoading:i,width:s?"112px":"170px","data-testid":"".concat(u,"-runCorrelations-button"),"data-ga":"alert-modal::click-run-correlations::alerts-view",payload:{action:"Run correlations",alertId:r,nodeId:n}}),a.createElement(I,{small:s,label:"Go to chart",onClick:E,isLoading:i,disabled:i||t,width:s?"112px":"150px","data-testid":"".concat(u,"-goToNode-button"),"data-ga":"alert-modal::click-goto-chart::alerts-view",payload:{action:"Go to chart",alertId:r,nodeId:n}}),a.createElement(h.A,{alertId:r,nodeId:n,isLoading:i,small:s,"data-testid":"".concat(u,"-edit-alert-button")})))},b=(0,a.memo)(E);var w=n(7660),x=n(40267);const C=e=>{let{alertId:t,context:n,name:r,nodeId:l,status:c,lastStatusChange:i,fullyLoaded:s,isWebview:u}=e;const m=(0,d.J)(),f=m?o.H4:o.H0;return a.createElement(o.Flex,{column:!0,gap:4},a.createElement(o.Flex,{justifyContent:"between"},a.createElement(o.Flex,{alignItems:"center",gap:2},a.createElement(x.A,{margin:m?null:[.5,0,0],flavour:c,"data-testid":"alertView-statusPill"},c),a.createElement(f,{"data-testid":"alertView-alertName"},r)),!1),a.createElement(o.Flex,{justifyContent:"between",alignItems:"center"},a.createElement(w.A,{alertId:t}),!u&&s&&!m&&a.createElement(b,{lastStatusChange:i,alertId:t,context:n,name:r,nodeId:l,small:!0,testid:"alertView"})))};var A=n(68831),S=n(63314);const k=v.default.img.withConfig({displayName:"sc-404__Illustration",componentId:"sc-4w81fg-0"})(["height:35%;width:35%;"]),N=v.default.div.withConfig({displayName:"sc-404__ButtonContainer",componentId:"sc-4w81fg-1"})(["margin:",";"],(0,o.getSizeBy)(4)),L=()=>{const e="".concat(A.A.assetsBaseURL,"/img/no-nodes-room.svg");return a.createElement(S.DL,null,a.createElement(o.Flex,{column:!0,alignItems:"center",justifyItems:"center",justifyContent:"center",height:"100%",width:"100%",padding:[0,0,"10%"]},a.createElement(k,{src:e,alt:"Unreachable alert",title:"Unreachable alert"}),a.createElement(o.H3,null,"We couldn't find the alert"),a.createElement(o.Text,null,"This can be a temporary problem of that specific alert."),a.createElement(N,null,a.createElement(o.Button,{label:"Retry",icon:"reload"}))))};var R=n(4974),T=n(73865),D=n(85686);const V=e=>{let{children:t}=e;return a.createElement(o.Flex,{background:"modalHeaderBackground",height:12,flex:!1,gap:4,padding:[0,2,0,4],alignItems:"center"},a.createElement(o.Icon,{name:"logo_s",color:"success",width:"23px"}),t)},_=e=>{let{alertId:t,spaceId:n,roomId:f,isWebview:g,nodeName:p}=e;const v=(0,c.XA)("name"),{isNodeRestricted:y}=(0,T.A)(),{fullyLoaded:h=!1,fullyLoading:I=!0,info:E,units:b,lastStatusChangeValue:w,lastStatusChange:x,context:A,instance:k,name:N,nodeId:_,status:F,lastUpdated:j,value:B}=(0,r.JL)(t);(0,r.yk)(t,{spaceId:n,roomId:f});const U=(0,m.J4)(B,b),M=(0,m.J4)(w,b),O=(0,d.J)();return _&&y(_)?a.createElement(S.Ay,{feature:"AlertDetailsViewRestricted"},a.createElement(D.A,null)):a.createElement(S.Ay,{feature:"AlertDetailsView"},a.createElement(o.Flex,{column:!0,width:O?"100%":{max:280},padding:O?null:[0,0,10],background:O?"modalBackground":null},!g&&O&&a.createElement(V,null,a.createElement(o.Flex,{column:!0},a.createElement(o.H6,{color:"textLite"},"ROOM"),a.createElement(o.Text,{"data-testid":"alertView-mobile-roomName"},v))),a.createElement(o.Flex,{column:!0,padding:O?[3]:[0],overflow:O?"auto":"visible",gap:3},a.createElement(C,{alertId:t,context:A,status:F,name:N,nodeId:_,lastStatusChange:x,fullyLoaded:h,isWebview:g}),h?null:I?a.createElement(l.A,{title:"Loading alert..."}):a.createElement(L,null),h&&E&&a.createElement(a.Fragment,null,a.createElement(s.A,{iconName:"documentation"},"Alert Description"),a.createElement(o.Text,{"data-testid":"alertView-info"},E),a.createElement(R.A,{alertId:t})),h&&a.createElement(i.A,{id:t,context:A,instance:k,formattedLastValue:U,formattedLastStatusChangeValue:M,lastStatusChange:x,lastUpdated:j,isFormattedValueLoaded:h,nodeId:_,status:F,testid:"alertView",spaceId:n,roomId:f}),h&&a.createElement(u.A,{id:t,nodeName:p,testid:"alertView"})),O&&a.createElement(o.Box,{position:"sticky",padding:[4],background:"modalBackground",bottom:0,border:{side:"top",color:"border"}},a.createElement(o.TextSmall,null,"In order to ",a.createElement(o.TextSmall,{strong:!0},"Run Correlations")," or"," ",a.createElement(o.TextSmall,{strong:!0},"View the Chart")," you will have to visit this alert from its' dedicated page on a desktop device."))))}},20378:(e,t,n)=>{n.d(t,{Ay:()=>f,OS:()=>u,Q8:()=>m});n(62953);var a=n(96540),o=n(47444),r=n(3914),l=n(22292),d=n(79731),c=n(9224);const i=(0,o.Iz)({key:"userAccessAtom",default:{loaded:!1,loading:!1,userStatus:"notLoggedIn",userNodeStatus:"noAccess",nodeId:null,spaceId:null,roomIds:[],error:""}}),s=(0,o.Iz)({key:"checkUserAccess",default:()=>1}),u=()=>{var e;const t=null===(e=window.localNetdataRegistry)||void 0===e?void 0:e.mg,n=(0,o.lZ)(s(t));return(0,a.useCallback)((()=>n((e=>e+1))),[t])},m=()=>{var e;const t=null===(e=window.localNetdataRegistry)||void 0===e?void 0:e.mg;return(0,o.L4)(i({machineGuid:t}))},f=()=>{var e;const t=(0,r.dg)(),n=null===(e=window.localNetdataRegistry)||void 0===e?void 0:e.mg,[i,u]=(0,o.L4)(s(n)),[f,g]=m(n),{loading:p}=f,v=(0,l.uW)("isAnonymous");return(0,a.useEffect)((()=>{!p&&n&&t&&!v&&(g((e=>({loading:!0,nodeId:null,spaceId:null,roomIds:[],...e}))),(0,c.az)(n).then((e=>{let{data:t}=e;g({loading:!1,loaded:!0,...t,error:""})})).catch((e=>{var t;const n=null===e||void 0===e||null===(t=e.response)||void 0===t?void 0:t.data;g({loading:!1,loaded:!0,error:(0,d.o)(null===n||void 0===n?void 0:n.errorMsgKey)||(null===n||void 0===n?void 0:n.errorMessage)||"Something went wrong",userStatus:"notLoggedIn",userNodeStatus:"noAccess",nodeId:null,spaceId:null,roomIds:[]})})))}),[n,i,t,v,p]),{...f,checkAgain:u}}},84428:(e,t,n)=>{var a=n(78227)("iterator"),o=!1;try{var r=0,l={next:function(){return{done:!!r++}},return:function(){o=!0}};l[a]=function(){return this},Array.from(l,(function(){throw 2}))}catch(d){}e.exports=function(e,t){try{if(!t&&!o)return!1}catch(d){return!1}var n=!1;try{var r={};r[a]=function(){return{next:function(){return{done:n=!0}}}},e(r)}catch(d){}return n}},87290:(e,t,n)=>{var a=n(50516),o=n(19088);e.exports=!a&&!o&&"object"==typeof window&&"object"==typeof document},50516:e=>{e.exports="object"==typeof Deno&&Deno&&"object"==typeof Deno.version},19088:(e,t,n)=>{var a=n(24475),o=n(44576);e.exports="process"===o(a.process)},10916:(e,t,n)=>{var a=n(24475),o=n(80550),r=n(94901),l=n(92796),d=n(33706),c=n(78227),i=n(87290),s=n(50516),u=n(96395),m=n(77388),f=o&&o.prototype,g=c("species"),p=!1,v=r(a.PromiseRejectionEvent),y=l("Promise",(function(){var e=d(o),t=e!==String(o);if(!t&&66===m)return!0;if(u&&(!f.catch||!f.finally))return!0;if(!m||m<51||!/native code/.test(e)){var n=new o((function(e){e(1)})),a=function(e){e((function(){}),(function(){}))};if((n.constructor={})[g]=a,!(p=n.then((function(){}))instanceof a))return!0}return!t&&(i||s)&&!v}));e.exports={CONSTRUCTOR:y,REJECTION_EVENT:v,SUBCLASSING:p}},90537:(e,t,n)=>{var a=n(80550),o=n(84428),r=n(10916).CONSTRUCTOR;e.exports=r||!o((function(e){a.all(e).then(void 0,(function(){}))}))},96167:(e,t,n)=>{var a=n(46518),o=n(69565),r=n(79306),l=n(36043),d=n(1103),c=n(72652);a({target:"Promise",stat:!0,forced:n(90537)},{allSettled:function(e){var t=this,n=l.f(t),a=n.resolve,i=n.reject,s=d((function(){var n=r(t.resolve),l=[],d=0,i=1;c(e,(function(e){var r=d++,c=!1;i++,o(n,t,e).then((function(e){c||(c=!0,l[r]={status:"fulfilled",value:e},--i||a(l))}),(function(e){c||(c=!0,l[r]={status:"rejected",reason:e},--i||a(l))}))})),--i||a(l)}));return s.error&&i(s.value),n.promise}})},33436:(e,t,n)=>{function a(e,{target:t=document.body}={}){if("string"!==typeof e)throw new TypeError(`Expected parameter \`text\` to be a \`string\`, got \`${typeof e}\`.`);const n=document.createElement("textarea"),a=document.activeElement;n.value=e,n.setAttribute("readonly",""),n.style.contain="strict",n.style.position="absolute",n.style.left="-9999px",n.style.fontSize="12pt";const o=document.getSelection(),r=o.rangeCount>0&&o.getRangeAt(0);t.append(n),n.select(),n.selectionStart=0,n.selectionEnd=e.length;let l=!1;try{l=document.execCommand("copy")}catch{}return n.remove(),r&&(o.removeAllRanges(),o.addRange(r)),a&&a.focus(),l}n.d(t,{A:()=>a})},14123:(e,t,n)=>{function a(e){return t=>{const n=(e?Math[e]:Math.trunc)(t);return 0===n?0:n}}n.d(t,{u:()=>a})},25733:(e,t,n)=>{n.d(t,{z:()=>o});var a=n(2642);function o(e,t){const n=(0,a.a)(e),o=(0,a.a)(t),r=n.getTime()-o.getTime();return r<0?-1:r>0?1:r}},31826:(e,t,n)=>{n.d(t,{k:()=>s});var a=n(96519),o=n(82695),r=n(14123),l=n(40215),d=n(25733),c=n(2940),i=n(2642);function s(e,t,n){const s=(0,o.q)(),u=n?.locale??s.locale??a.c,m=(0,d.z)(e,t);if(isNaN(m))throw new RangeError("Invalid time value");const f=Object.assign({},n,{addSuffix:n?.addSuffix,comparison:m});let g,p;m>0?(g=(0,i.a)(t),p=(0,i.a)(e)):(g=(0,i.a)(e),p=(0,i.a)(t));const v=(0,r.u)(n?.roundingMethod??"round"),y=p.getTime()-g.getTime(),h=y/c.Cg,I=(y-((0,l.G)(p)-(0,l.G)(g)))/c.Cg,E=n?.unit;let b;if(b=E||(h<1?"second":h<60?"minute":h{"use strict";r.d(t,{Ay:()=>Lr});var n=r(74848),o=r(96540),a=r(78239),s=r(58156),i=r.n(s),c=r(62193),l=r.n(c),d=r(44383),u=r.n(d),f=r(42072),p=r.n(f),m=r(88055),h=r.n(m),y=r(23805),g=r.n(y),v=r(63560),$=r.n(v);let b=(e=21)=>crypto.getRandomValues(new Uint8Array(e)).reduce(((e,t)=>e+=(t&=63)<36?t.toString(36):t<62?(t-26).toString(36).toUpperCase():t>62?"-":"_"),"");function w(){return b()}function S(e){return Array.isArray(e)?e.map((e=>({key:w(),item:e}))):[]}function _(e){return Array.isArray(e)?e.map((e=>e.item)):[]}class x extends o.Component{constructor(e){super(e),this._getNewFormDataRow=()=>{const{schema:e,registry:t}=this.props,{schemaUtils:r}=t;let n=e.items;return(0,a.nQ)(e)&&(0,a.Hh)(e)&&(n=e.additionalItems),r.getDefaultFormState(n)},this.onAddClick=e=>{this._handleAddClick(e)},this.onAddIndexClick=e=>t=>{this._handleAddClick(t,e)},this.onCopyIndexClick=e=>t=>{t&&t.preventDefault();const{onChange:r,errorSchema:n}=this.props,{keyedFormData:o}=this.state;let a;if(n){a={};for(const t in n){const r=parseInt(t);r<=e?$()(a,[r],n[t]):r>e&&$()(a,[r+1],n[t])}}const s={key:w(),item:h()(o[e].item)},i=[...o];void 0!==e?i.splice(e+1,0,s):i.push(s),this.setState({keyedFormData:i,updatedKeyedFormData:!0},(()=>r(_(i),a)))},this.onDropIndexClick=e=>t=>{t&&t.preventDefault();const{onChange:r,errorSchema:n}=this.props,{keyedFormData:o}=this.state;let a;if(n){a={};for(const t in n){const r=parseInt(t);re&&$()(a,[r-1],n[t])}}const s=o.filter(((t,r)=>r!==e));this.setState({keyedFormData:s,updatedKeyedFormData:!0},(()=>r(_(s),a)))},this.onReorderClick=(e,t)=>r=>{r&&(r.preventDefault(),r.currentTarget.blur());const{onChange:n,errorSchema:o}=this.props;let a;if(o){a={};for(const r in o){const n=parseInt(r);n==e?$()(a,[t],o[e]):n==t?$()(a,[e],o[t]):$()(a,[r],o[n])}}const{keyedFormData:s}=this.state;const i=function(){const r=s.slice();return r.splice(e,1),r.splice(t,0,s[e]),r}();this.setState({keyedFormData:i},(()=>n(_(i),a)))},this.onChangeForIndex=e=>(t,r,n)=>{const{formData:o,onChange:a,errorSchema:s}=this.props,i=(Array.isArray(o)?o:[]).map(((r,n)=>e===n?"undefined"===typeof t?null:t:r));a(i,s&&s&&{...s,[e]:r},n)},this.onSelectChange=e=>{const{onChange:t,idSchema:r}=this.props;t(e,void 0,r&&r.$id)};const{formData:t=[]}=e,r=S(t);this.state={keyedFormData:r,updatedKeyedFormData:!1}}static getDerivedStateFromProps(e,t){if(t.updatedKeyedFormData)return{updatedKeyedFormData:!1};const r=Array.isArray(e.formData)?e.formData:[],n=t.keyedFormData||[];return{keyedFormData:r.length===n.length?n.map(((e,t)=>({key:e.key,item:r[t]}))):S(r)}}get itemTitle(){const{schema:e,registry:t}=this.props,{translateString:r}=t;return i()(e,[a.ZN,"title"],i()(e,[a.ZN,"description"],r(a.Zl.ArrayItemTitle)))}isItemRequired(e){return Array.isArray(e.type)?!e.type.includes("null"):"null"!==e.type}canAddItem(e){const{schema:t,uiSchema:r,registry:n}=this.props;let{addable:o}=(0,a.$R)(r,n.globalUiOptions);return!1!==o&&(o=void 0===t.maxItems||e.length=t&&$()(a,[r+1],n[e])}}const s={key:w(),item:this._getNewFormDataRow()},i=[...o];void 0!==t?i.splice(t,0,s):i.push(s),this.setState({keyedFormData:i,updatedKeyedFormData:!0},(()=>r(_(i),a)))}render(){const{schema:e,uiSchema:t,idSchema:r,registry:o}=this.props,{schemaUtils:s,translateString:i}=o;if(!(a.ZN in e)){const s=(0,a.$R)(t),c=(0,a.$F)("UnsupportedFieldTemplate",o,s);return(0,n.jsx)(c,{schema:e,idSchema:r,reason:i(a.Zl.MissingItems),registry:o})}return s.isMultiSelect(e)?this.renderMultiSelect():(0,a.ar)(t)?this.renderCustomWidget():(0,a.nQ)(e)?this.renderFixedArray():s.isFilesArray(e,t)?this.renderFiles():this.renderNormalArray()}renderNormalArray(){const{schema:e,uiSchema:t={},errorSchema:r,idSchema:o,name:s,title:i,disabled:c=!1,readonly:l=!1,autofocus:d=!1,required:u=!1,registry:f,onBlur:p,onFocus:m,idPrefix:h,idSeparator:y="_",rawErrors:v}=this.props,{keyedFormData:$}=this.state,b=e.title||i||s,{schemaUtils:w,formContext:S}=f,x=(0,a.$R)(t),E=g()(e.items)?e.items:{},k=w.retrieveSchema(E),j=_(this.state.keyedFormData),C=this.canAddItem(j),P={canAdd:C,items:$.map(((e,n)=>{const{key:a,item:i}=e,c=i,l=w.retrieveSchema(E,c),u=r?r[n]:void 0,f=o.$id+y+n,g=w.toIdSchema(l,f,c,h,y);return this.renderArrayFieldItem({key:a,index:n,name:s&&`${s}-${n}`,title:b?`${b}-${n+1}`:void 0,canAdd:C,canMoveUp:n>0,canMoveDown:nx.retrieveSchema(e,r[t]))),j=g()(e.additionalItems)?x.retrieveSchema(e.additionalItems,r):null;(!w||w.length{const{key:a,item:d}=r,u=d,f=n>=k.length,m=(f&&g()(e.additionalItems)?x.retrieveSchema(e.additionalItems,u):k[n])||{},h=c.$id+i+n,_=x.toIdSchema(m,h,u,s,i),E=f?t.additionalItems||{}:Array.isArray(t.items)?t.items[n]:t.items||{},j=o?o[n]:void 0;return this.renderArrayFieldItem({key:a,index:n,name:l&&`${l}-${n}`,title:S?`${S}-${n+1}`:void 0,canAdd:C,canRemove:f,canMoveUp:n>=k.length+1,canMoveDown:f&&nT[e])),{children:(0,n.jsx)(A,{name:o,title:b,index:r,schema:d,uiSchema:f,formData:u,formContext:C,errorSchema:m,idPrefix:_,idSeparator:x,idSchema:p,required:this.isItemRequired(d),onChange:this.onChangeForIndex(r),onBlur:y,onFocus:g,registry:j,disabled:w,readonly:E,hideError:S,autofocus:h,rawErrors:v}),className:"array-item",disabled:w,canAdd:s,hasCopy:T.copy,hasToolbar:T.toolbar,hasMoveUp:T.moveUp,hasMoveDown:T.moveDown,hasRemove:T.remove,index:r,totalItems:$,key:t,onAddIndexClick:this.onAddIndexClick,onCopyIndexClick:this.onCopyIndexClick,onDropIndexClick:this.onDropIndexClick,onReorderClick:this.onReorderClick,readonly:E,registry:j,schema:d,uiSchema:f}}}const E=x;const k=function(e){var t,r,o;const{schema:s,name:i,uiSchema:c,idSchema:l,formData:d,registry:u,required:f,disabled:p,readonly:m,hideError:h,autofocus:y,title:v,onChange:$,onFocus:b,onBlur:w,rawErrors:S}=e,{title:_}=s,{widgets:x,formContext:E,translateString:k,globalUiOptions:j}=u,{widget:C="checkbox",title:P,label:O=!0,...N}=(0,a.$R)(c,j),A=(0,a.Bt)(s,C,x),D=k(a.Zl.YesLabel),I=k(a.Zl.NoLabel);let F;const T=null!==(r=null!==(t=null!==P&&void 0!==P?P:_)&&void 0!==t?t:v)&&void 0!==r?r:i;if(Array.isArray(s.oneOf))F=(0,a.f9)({oneOf:s.oneOf.map((e=>{if(g()(e))return{...e,title:e.title||(!0===e.const?D:I)}})).filter((e=>e))});else{const e=s,t=null!==(o=s.enum)&&void 0!==o?o:[!0,!1];F=!e.enumNames&&2===t.length&&t.every((e=>"boolean"===typeof e))?[{value:t[0],label:t[0]?D:I},{value:t[1],label:t[1]?D:I}]:(0,a.f9)({enum:t,enumNames:e.enumNames})}return(0,n.jsx)(A,{options:{...N,enumOptions:F},schema:s,uiSchema:c,id:l.$id,name:i,onChange:$,onFocus:b,onBlur:w,label:T,hideLabel:!O,value:d,required:f,disabled:p,readonly:m,hideError:h,registry:u,formContext:E,autofocus:y,rawErrors:S})};var j=r(90179),C=r.n(j);class P extends o.Component{constructor(e){super(e),this.onOptionChange=e=>{const{selectedOption:t,retrievedOptions:r}=this.state,{formData:n,onChange:o,registry:a}=this.props,{schemaUtils:s}=a,i=void 0!==e?parseInt(e,10):-1;if(i===t)return;const c=i>=0?r[i]:void 0,l=t>=0?r[t]:void 0;let d=s.sanitizeDataForNewSchema(c,l,n);d&&c&&(d=s.getDefaultFormState(c,d,"excludeObjectChildren")),o(d,void 0,this.getFieldId()),this.setState({selectedOption:i})};const{formData:t,options:r,registry:{schemaUtils:n}}=this.props,o=r.map((e=>n.retrieveSchema(e,t)));this.state={retrievedOptions:o,selectedOption:this.getMatchingOption(0,t,o)}}componentDidUpdate(e,t){const{formData:r,options:n,idSchema:o}=this.props,{selectedOption:s}=this.state;let i=this.state;if(!(0,a.c2)(e.options,n)){const{registry:{schemaUtils:e}}=this.props;i={selectedOption:s,retrievedOptions:n.map((t=>e.retrieveSchema(t,r)))}}if(!(0,a.c2)(r,e.formData)&&o.$id===e.idSchema.$id){const{retrievedOptions:e}=i,n=this.getMatchingOption(s,r,e);t&&n!==s&&(i={selectedOption:n,retrievedOptions:e})}i!==this.state&&this.setState(i)}getMatchingOption(e,t,r){const{schema:n,registry:{schemaUtils:o}}=this.props,s=(0,a.KU)(n);return o.getClosestMatchingOption(t,r,e,s)}getFieldId(){const{idSchema:e,schema:t}=this.props;return`${e.$id}${t.oneOf?"__oneof_select":"__anyof_select"}`}render(){const{name:e,disabled:t=!1,errorSchema:r={},formContext:o,onBlur:s,onFocus:c,registry:d,schema:u,uiSchema:f}=this.props,{widgets:p,fields:m,translateString:h,globalUiOptions:y,schemaUtils:g}=d,{SchemaField:v}=m,{selectedOption:$,retrievedOptions:b}=this.state,{widget:w="select",placeholder:S,autofocus:_,autocomplete:x,title:E=u.title,...k}=(0,a.$R)(f,y),j=(0,a.Bt)({type:"number"},w,p),P=i()(r,a.s2,[]),O=C()(r,[a.s2]),N=g.getDisplayLabel(u,f,y),A=$>=0&&b[$]||null;let D;if(A){const{required:e}=u;D=e?(0,a.dW)({required:e},A):A}let I=[];a.Ru in u&&f&&a.Ru in f?Array.isArray(f[a.Ru])?I=f[a.Ru]:console.warn(`uiSchema.oneOf is not an array for "${E||e}"`):a.Xo in u&&f&&a.Xo in f&&(Array.isArray(f[a.Xo])?I=f[a.Xo]:console.warn(`uiSchema.anyOf is not an array for "${E||e}"`));let F=f;$>=0&&I.length>$&&(F=I[$]);const T=E?a.Zl.TitleOptionPrefix:a.Zl.OptionPrefix,R=E?[E]:[],M=b.map(((e,t)=>{const{title:r=e.title}=(0,a.$R)(I[t]);return{label:r||h(T,R.concat(String(t+1))),value:t}}));return(0,n.jsxs)("div",{className:"panel panel-default panel-body",children:[(0,n.jsx)("div",{className:"form-group",children:(0,n.jsx)(j,{id:this.getFieldId(),name:`${e}${u.oneOf?"__oneof_select":"__anyof_select"}`,schema:{type:"number",default:0},onChange:this.onOptionChange,onBlur:s,onFocus:c,disabled:t||l()(M),multiple:!1,rawErrors:P,errorSchema:O,value:$>=0?$:void 0,options:{enumOptions:M,...k},registry:d,formContext:o,placeholder:S,autocomplete:x,autofocus:_,label:null!==E&&void 0!==E?E:e,hideLabel:!N})}),D&&(0,n.jsx)(v,{...this.props,schema:D,uiSchema:F})]})}}const O=P,N=/\.([0-9]*0)*$/,A=/[0.]0*$/;const D=function(e){const{registry:t,onChange:r,formData:s,value:i}=e,[c,l]=(0,o.useState)(i),{StringField:d}=t.fields;let u=s;const f=(0,o.useCallback)((e=>{l(e),"."===`${e}`.charAt(0)&&(e=`0${e}`);const t="string"===typeof e&&e.match(N)?(0,a.i)(e.replace(A,"")):(0,a.i)(e);r(t)}),[r]);if("string"===typeof c&&"number"===typeof u){const e=new RegExp(`${u}`.replace(".","\\.")+"\\.?0*$");c.match(e)&&(u=c)}return(0,n.jsx)(d,{...e,formData:u,onChange:f})};function I(){return I=Object.assign?Object.assign.bind():function(e){for(var t=1;t(e[t.toLowerCase()]=t,e)),{for:"htmlFor"}),V={amp:"&",apos:"'",gt:">",lt:"<",nbsp:"\xa0",quot:"\u201c"},B=["style","script"],q=/([-A-Z0-9_:]+)(?:\s*=\s*(?:(?:"((?:\\.|[^"])*)")|(?:'((?:\\.|[^'])*)')|(?:\{((?:\\.|{[^}]*?}|[^}])*)\})))?/gi,z=/mailto:/i,L=/\n{2,}$/,K=/^(\s*>[\s\S]*?)(?=\n{2,})/,W=/^ *> ?/gm,H=/^ {2,}\n/,G=/^(?:( *[-*_])){3,} *(?:\n *)+\n/,Z=/^\s*(`{3,}|~{3,}) *(\S+)?([^\n]*?)?\n([\s\S]+?)\s*\1 *(?:\n *)*\n?/,J=/^(?: {4}[^\n]+\n*)+(?:\n *)+\n?/,Y=/^(`+)\s*([\s\S]*?[^`])\s*\1(?!`)/,Q=/^(?:\n *)*\n/,X=/\r\n?/g,ee=/^\[\^([^\]]+)](:(.*)((\n+ {4,}.*)|(\n(?!\[\^).+))*)/,te=/^\[\^([^\]]+)]/,re=/\f/g,ne=/^---[ \t]*\n(.|\n)*\n---[ \t]*\n/,oe=/^\s*?\[(x|\s)\]/,ae=/^ *(#{1,6}) *([^\n]+?)(?: +#*)?(?:\n *)*(?:\n|$)/,se=/^ *(#{1,6}) +([^\n]+?)(?: +#*)?(?:\n *)*(?:\n|$)/,ie=/^([^\n]+)\n *(=|-){3,} *(?:\n *)+\n/,ce=/^ *(?!<[a-z][^ >/]* ?\/>)<([a-z][^ >/]*) ?([^>]*)>\n?(\s*(?:<\1[^>]*?>[\s\S]*?<\/\1>|(?!<\1\b)[\s\S])*?)<\/\1>(?!<\/\1>)\n*/i,le=/&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-fA-F]{1,6});/gi,de=/^)/,ue=/^(data|aria|x)-[a-z_][a-z\d_.-]*$/,fe=/^ *<([a-z][a-z0-9:]*)(?:\s+((?:<.*?>|[^>])*))?\/?>(?!<\/\1>)(\s*\n)?/i,pe=/^\{.*\}$/,me=/^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])/,he=/^<([^ >]+@[^ >]+)>/,ye=/^<([^ >]+:\/[^ >]+)>/,ge=/-([a-z])?/gi,ve=/^(.*\|?.*)\n *(\|? *[-:]+ *\|[-| :]*)\n((?:.*\|.*\n)*)\n?/,$e=/^\[([^\]]*)\]:\s+]+)>?\s*("([^"]*)")?/,be=/^!\[([^\]]*)\] ?\[([^\]]*)\]/,we=/^\[([^\]]*)\] ?\[([^\]]*)\]/,Se=/(\[|\])/g,_e=/(\n|^[-*]\s|^#|^ {2,}|^-{2,}|^>\s)/,xe=/\t/g,Ee=/^ *\| */,ke=/(^ *\||\| *$)/g,je=/ *$/,Ce=/^ *:-+: *$/,Pe=/^ *:-+ *$/,Oe=/^ *-+: *$/,Ne="((?:\\[.*?\\][([].*?[)\\]]|<.*?>(?:.*?<.*?>)?|`.*?`|~~.*?~~|==.*?==|.|\\n)*?)",Ae=new RegExp(`^([*_])\\1${Ne}\\1\\1(?!\\1)`),De=new RegExp(`^([*_])${Ne}\\1(?!\\1|\\w)`),Ie=new RegExp(`^==${Ne}==`),Fe=new RegExp(`^~~${Ne}~~`),Te=/^\\([^0-9A-Za-z\s])/,Re=/^[\s\S]+?(?=[^0-9A-Z\s\u00c0-\uffff&#;.()'"]|\d+\.|\n\n| {2,}\n|\w+:\S|$)/i,Me=/^\n+/,Ue=/^([ \t]*)/,Ve=/\\([^\\])/g,Be=/ *\n+$/,qe=/(?:^|\n)( *)$/,ze="(?:\\d+\\.)",Le="(?:[*+-])";function Ke(e){return"( *)("+(1===e?ze:Le)+") +"}const We=Ke(1),He=Ke(2);function Ge(e){return new RegExp("^"+(1===e?We:He))}const Ze=Ge(1),Je=Ge(2);function Ye(e){return new RegExp("^"+(1===e?We:He)+"[^\\n]*(?:\\n(?!\\1"+(1===e?ze:Le)+" )[^\\n]*)*(\\n|$)","gm")}const Qe=Ye(1),Xe=Ye(2);function et(e){const t=1===e?ze:Le;return new RegExp("^( *)("+t+") [\\s\\S]+?(?:\\n{2,}(?! )(?!\\1"+t+" (?!"+t+" ))\\n*|\\s*\\n*$)")}const tt=et(1),rt=et(2);function nt(e,t){const r=1===t,n=r?tt:rt,o=r?Qe:Xe,a=r?Ze:Je;return{match(e,t,r){const o=qe.exec(r);return o&&(t.list||!t.inline&&!t.simple)?n.exec(e=o[1]+e):null},order:1,parse(e,t,n){const s=r?+e[2]:void 0,i=e[0].replace(L,"\n").match(o);let c=!1;return{items:i.map((function(e,r){const o=a.exec(e)[0].length,s=new RegExp("^ {1,"+o+"}","gm"),l=e.replace(s,"").replace(a,""),d=r===i.length-1,u=-1!==l.indexOf("\n\n")||d&&c;c=u;const f=n.inline,p=n.list;let m;n.list=!0,u?(n.inline=!1,m=l.replace(Be,"\n\n")):(n.inline=!0,m=l.replace(Be,""));const h=t(m,n);return n.inline=f,n.list=p,h})),ordered:r,start:s}},render:(t,r,n)=>e(t.ordered?"ol":"ul",{key:n.key,start:"20"===t.type?t.start:void 0},t.items.map((function(t,o){return e("li",{key:o},r(t,n))})))}}const ot=new RegExp("^\\[((?:\\[[^\\]]*\\]|[^\\[\\]]|\\](?=[^\\[]*\\]))*)\\]\\(\\s*?(?:\\s+['\"]([\\s\\S]*?)['\"])?\\s*\\)"),at=/^!\[(.*?)\]\( *((?:\([^)]*\)|[^() ])*) *"?([^)"]*)?"?\)/,st=[K,Z,J,ae,ie,se,de,ve,Qe,tt,Xe,rt],it=[...st,/^[^\n]+(?: \n|\n{2,})/,ce,fe];function ct(e){return e.replace(/[\xc0\xc1\xc2\xc3\xc4\xc5\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xc6]/g,"a").replace(/[\xe7\xc7]/g,"c").replace(/[\xf0\xd0]/g,"d").replace(/[\xc8\xc9\xca\xcb\xe9\xe8\xea\xeb]/g,"e").replace(/[\xcf\xef\xce\xee\xcd\xed\xcc\xec]/g,"i").replace(/[\xd1\xf1]/g,"n").replace(/[\xf8\xd8\u0153\u0152\xd5\xf5\xd4\xf4\xd3\xf3\xd2\xf2]/g,"o").replace(/[\xdc\xfc\xdb\xfb\xda\xfa\xd9\xf9]/g,"u").replace(/[\u0178\xff\xdd\xfd]/g,"y").replace(/[^a-z0-9- ]/gi,"").replace(/ /gi,"-").toLowerCase()}function lt(e){return Oe.test(e)?"right":Ce.test(e)?"center":Pe.test(e)?"left":null}function dt(e,t,r){const n=r.inTable;r.inTable=!0;const o=t(e.trim(),r);r.inTable=n;let a=[[]];return o.forEach((function(e,t){"26"===e.type?0!==t&&t!==o.length-1&&a.push([]):("27"!==e.type||null!=o[t+1]&&"26"!==o[t+1].type||(e.text=e.text.replace(je,"")),a[a.length-1].push(e))})),a}function ut(e,t,r){r.inline=!0;const n=dt(e[1],t,r),o=e[2].replace(ke,"").split("|").map(lt),a=function(e,t,r){return e.trim().split("\n").map((function(e){return dt(e,t,r)}))}(e[3],t,r);return r.inline=!1,{align:o,cells:a,header:n,type:"25"}}function ft(e,t){return null==e.align[t]?{}:{textAlign:e.align[t]}}function pt(e){return function(t,r){return r.inline?e.exec(t):null}}function mt(e){return function(t,r){return r.inline||r.simple?e.exec(t):null}}function ht(e){return function(t,r){return r.inline||r.simple?null:e.exec(t)}}function yt(e){return function(t){return e.exec(t)}}function gt(e,t,r){if(t.inline||t.simple)return null;if(r&&!r.endsWith("\n"))return null;let n="";e.split("\n").every((e=>!st.some((t=>t.test(e)))&&(n+=e+"\n",e.trim())));const o=n.trimEnd();return""==o?null:[n,o]}function vt(e){try{if(decodeURIComponent(e).replace(/[^A-Za-z0-9/:]/g,"").match(/^\s*(javascript|vbscript|data(?!:image)):/i))return}catch(e){return null}return e}function $t(e){return e.replace(Ve,"$1")}function bt(e,t,r){const n=r.inline||!1,o=r.simple||!1;r.inline=!0,r.simple=!0;const a=e(t,r);return r.inline=n,r.simple=o,a}function wt(e,t,r){const n=r.inline||!1,o=r.simple||!1;r.inline=!1,r.simple=!0;const a=e(t,r);return r.inline=n,r.simple=o,a}function St(e,t,r){return r.inline=!1,e(t,r)}const _t=(e,t,r)=>({children:bt(t,e[1],r)});function xt(){return{}}function Et(){return null}function kt(...e){return e.filter(Boolean).join(" ")}function jt(e,t,r){let n=e;const o=t.split(".");for(;o.length&&(n=n[o[0]],void 0!==n);)o.shift();return n||r}function Ct(e="",t={}){t.overrides=t.overrides||{},t.slugify=t.slugify||ct,t.namedCodesToUnicode=t.namedCodesToUnicode?I({},V,t.namedCodesToUnicode):V;const r=t.createElement||o.createElement;function n(e,n,...o){const a=jt(t.overrides,`${e}.props`,{});return r(function(e,t){const r=jt(t,e);return r?"function"==typeof r||"object"==typeof r&&"render"in r?r:jt(t,`${e}.component`,e):e}(e,t.overrides),I({},n,a,{className:kt(null==n?void 0:n.className,a.className)||void 0}),...o)}function a(e){e=e.replace(ne,"");let r=!1;t.forceInline?r=!0:t.forceBlock||(r=!1===_e.test(e));const a=u(d(r?e:`${e.trimEnd().replace(Me,"")}\n\n`,{inline:r}));for(;"string"==typeof a[a.length-1]&&!a[a.length-1].trim();)a.pop();if(null===t.wrapper)return a;const s=t.wrapper||(r?"span":"div");let i;if(a.length>1||t.forceWrapper)i=a;else{if(1===a.length)return i=a[0],"string"==typeof i?n("span",{key:"outer"},i):i;i=null}return o.createElement(s,{key:"outer"},i)}function s(e){const t=e.match(q);return t?t.reduce((function(e,t,r){const n=t.indexOf("=");if(-1!==n){const s=function(e){return-1!==e.indexOf("-")&&null===e.match(ue)&&(e=e.replace(ge,(function(e,t){return t.toUpperCase()}))),e}(t.slice(0,n)).trim(),i=function(e){const t=e[0];return('"'===t||"'"===t)&&e.length>=2&&e[e.length-1]===t?e.slice(1,-1):e}(t.slice(n+1).trim()),c=U[s]||s,l=e[c]=function(e,t){return"style"===e?t.split(/;\s?/).reduce((function(e,t){const r=t.slice(0,t.indexOf(":"));return e[r.trim().replace(/(-[a-z])/g,(e=>e[1].toUpperCase()))]=t.slice(r.length+1).trim(),e}),{}):"href"===e||"src"===e?vt(t):(t.match(pe)&&(t=t.slice(1,t.length-1)),"true"===t||"false"!==t&&t)}(s,i);"string"==typeof l&&(ce.test(l)||fe.test(l))&&(e[c]=o.cloneElement(a(l.trim()),{key:r}))}else"style"!==t&&(e[U[t]||t]=!0);return e}),{}):null}const i=[],c={},l={0:{match:ht(K),order:1,parse:(e,t,r)=>({children:t(e[0].replace(W,""),r)}),render:(e,t,r)=>n("blockquote",{key:r.key},t(e.children,r))},1:{match:yt(H),order:1,parse:xt,render:(e,t,r)=>n("br",{key:r.key})},2:{match:ht(G),order:1,parse:xt,render:(e,t,r)=>n("hr",{key:r.key})},3:{match:ht(J),order:0,parse:e=>({lang:void 0,text:e[0].replace(/^ {4}/gm,"").replace(/\n+$/,"")}),render:(e,t,r)=>n("pre",{key:r.key},n("code",I({},e.attrs,{className:e.lang?`lang-${e.lang}`:""}),e.text))},4:{match:ht(Z),order:0,parse:e=>({attrs:s(e[3]||""),lang:e[2]||void 0,text:e[4],type:"3"})},5:{match:mt(Y),order:3,parse:e=>({text:e[2]}),render:(e,t,r)=>n("code",{key:r.key},e.text)},6:{match:ht(ee),order:0,parse:e=>(i.push({footnote:e[2],identifier:e[1]}),{}),render:Et},7:{match:pt(te),order:1,parse:e=>({target:`#${t.slugify(e[1])}`,text:e[1]}),render:(e,t,r)=>n("a",{key:r.key,href:vt(e.target)},n("sup",{key:r.key},e.text))},8:{match:pt(oe),order:1,parse:e=>({completed:"x"===e[1].toLowerCase()}),render:(e,t,r)=>n("input",{checked:e.completed,key:r.key,readOnly:!0,type:"checkbox"})},9:{match:ht(t.enforceAtxHeadings?se:ae),order:1,parse:(e,r,n)=>({children:bt(r,e[2],n),id:t.slugify(e[2]),level:e[1].length}),render:(e,t,r)=>n(`h${e.level}`,{id:e.id,key:r.key},t(e.children,r))},10:{match:ht(ie),order:0,parse:(e,t,r)=>({children:bt(t,e[1],r),level:"="===e[2]?1:2,type:"9"})},11:{match:yt(ce),order:1,parse(e,t,r){const[,n]=e[3].match(Ue),o=new RegExp(`^${n}`,"gm"),a=e[3].replace(o,""),i=(c=a,it.some((e=>e.test(c)))?St:bt);var c;const l=e[1].toLowerCase(),d=-1!==B.indexOf(l),u={attrs:s(e[2]),noInnerParse:d,tag:(d?l:e[1]).trim()};return r.inAnchor=r.inAnchor||"a"===l,d?u.text=e[3]:u.children=i(t,a,r),r.inAnchor=!1,u},render:(e,t,r)=>n(e.tag,I({key:r.key},e.attrs),e.text||t(e.children,r))},13:{match:yt(fe),order:1,parse:e=>({attrs:s(e[2]||""),tag:e[1].trim()}),render:(e,t,r)=>n(e.tag,I({},e.attrs,{key:r.key}))},12:{match:yt(de),order:1,parse:()=>({}),render:Et},14:{match:mt(at),order:1,parse:e=>({alt:e[1],target:$t(e[2]),title:e[3]}),render:(e,t,r)=>n("img",{key:r.key,alt:e.alt||void 0,title:e.title||void 0,src:vt(e.target)})},15:{match:pt(ot),order:3,parse:(e,t,r)=>({children:wt(t,e[1],r),target:$t(e[2]),title:e[3]}),render:(e,t,r)=>n("a",{key:r.key,href:vt(e.target),title:e.title},t(e.children,r))},16:{match:pt(ye),order:0,parse:e=>({children:[{text:e[1],type:"27"}],target:e[1],type:"15"})},17:{match:(e,t)=>t.inAnchor?null:pt(me)(e,t),order:0,parse:e=>({children:[{text:e[1],type:"27"}],target:e[1],title:void 0,type:"15"})},18:{match:pt(he),order:0,parse(e){let t=e[1],r=e[1];return z.test(r)||(r="mailto:"+r),{children:[{text:t.replace("mailto:",""),type:"27"}],target:r,type:"15"}}},20:nt(n,1),33:nt(n,2),19:{match:ht(Q),order:3,parse:xt,render:()=>"\n"},21:{match:gt,order:3,parse:_t,render:(e,t,r)=>n("p",{key:r.key},t(e.children,r))},22:{match:pt($e),order:0,parse:e=>(c[e[1]]={target:e[2],title:e[4]},{}),render:Et},23:{match:mt(be),order:0,parse:e=>({alt:e[1]||void 0,ref:e[2]}),render:(e,t,r)=>c[e.ref]?n("img",{key:r.key,alt:e.alt,src:vt(c[e.ref].target),title:c[e.ref].title}):null},24:{match:pt(we),order:0,parse:(e,t,r)=>({children:t(e[1],r),fallbackChildren:t(e[0].replace(Se,"\\$1"),r),ref:e[2]}),render:(e,t,r)=>c[e.ref]?n("a",{key:r.key,href:vt(c[e.ref].target),title:c[e.ref].title},t(e.children,r)):n("span",{key:r.key},t(e.fallbackChildren,r))},25:{match:ht(ve),order:1,parse:ut,render:(e,t,r)=>n("table",{key:r.key},n("thead",null,n("tr",null,e.header.map((function(o,a){return n("th",{key:a,style:ft(e,a)},t(o,r))})))),n("tbody",null,e.cells.map((function(o,a){return n("tr",{key:a},o.map((function(o,a){return n("td",{key:a,style:ft(e,a)},t(o,r))})))}))))},26:{match:function(e,t){return t.inTable?(t.inline=!0,Ee.exec(e)):null},order:1,parse:function(){return{type:"26"}},render:()=>" | "},27:{match:yt(Re),order:4,parse:e=>({text:e[0].replace(le,((e,r)=>t.namedCodesToUnicode[r]?t.namedCodesToUnicode[r]:e))}),render:e=>e.text},28:{match:mt(Ae),order:2,parse:(e,t,r)=>({children:t(e[2],r)}),render:(e,t,r)=>n("strong",{key:r.key},t(e.children,r))},29:{match:mt(De),order:3,parse:(e,t,r)=>({children:t(e[2],r)}),render:(e,t,r)=>n("em",{key:r.key},t(e.children,r))},30:{match:mt(Te),order:1,parse:e=>({text:e[1],type:"27"})},31:{match:mt(Ie),order:3,parse:_t,render:(e,t,r)=>n("mark",{key:r.key},t(e.children,r))},32:{match:mt(Fe),order:3,parse:_t,render:(e,t,r)=>n("del",{key:r.key},t(e.children,r))}};!0===t.disableParsingRawHTML&&(delete l[11],delete l[13]);const d=function(e){let t=Object.keys(e);function r(n,o){let a=[],s="";for(;n;){let i=0;for(;ia(r,n,o)),r,n,o):a(r,n,o)}}(l,t.renderRule),function e(t,r={}){if(Array.isArray(t)){const n=r.key,o=[];let a=!1;for(let s=0;s{let{children:t="",options:r}=e,n=function(e,t){if(null==e)return{};var r,n,o={},a=Object.keys(e);for(n=0;n=0||(o[r]=e[r]);return o}(e,F);return o.cloneElement(Ct(t,r),n)};var Ot=r(61448),Nt=r.n(Ot),At=r(73357),Dt=r.n(At);class It extends o.Component{constructor(){super(...arguments),this.state={wasPropertyKeyModified:!1,additionalProperties:{}},this.onPropertyChange=(e,t=!1)=>(r,n,o)=>{const{formData:a,onChange:s,errorSchema:i}=this.props;void 0===r&&t&&(r="");s({...a,[e]:r},i&&i&&{...i,[e]:n},o)},this.onDropPropertyClick=e=>t=>{t.preventDefault();const{onChange:r,formData:n}=this.props,o={...n};Dt()(o,e),r(o)},this.getAvailableKey=(e,t)=>{const{uiSchema:r,registry:n}=this.props,{duplicateKeySuffixSeparator:o="-"}=(0,a.$R)(r,n.globalUiOptions);let s=0,i=e;for(;Nt()(t,i);)i=`${e}${o}${++s}`;return i},this.onKeyChange=e=>(t,r)=>{if(e===t)return;const{formData:n,onChange:o,errorSchema:a}=this.props;t=this.getAvailableKey(t,n);const s={...n},i={[e]:t},c=Object.keys(s).map((e=>({[i[e]||e]:s[e]}))),l=Object.assign({},...c);this.setState({wasPropertyKeyModified:!0}),o(l,a&&a&&{...a,[t]:r})},this.handleAddClick=e=>()=>{if(!e.additionalProperties)return;const{formData:t,onChange:r,registry:n}=this.props,o={...t};let s;if(g()(e.additionalProperties)){s=e.additionalProperties.type;let r=e.additionalProperties;if(a.Es in r){const{schemaUtils:e}=n;r=e.retrieveSchema({$ref:r[a.Es]},t),s=r.type}s||!(a.Xo in r)&&!(a.Ru in r)||(s="object")}const i=this.getAvailableKey("newKey",o);$()(o,i,this.getDefaultValue(s)),r(o)}}isRequired(e){const{schema:t}=this.props;return Array.isArray(t.required)&&-1!==t.required.indexOf(e)}getDefaultValue(e){const{registry:{translateString:t}}=this.props;switch(e){case"array":return[];case"boolean":return!1;case"null":return null;case"number":return 0;case"object":return{};default:return t(a.Zl.NewStringDefault)}}render(){var e,t,r,o;const{schema:s,uiSchema:c={},formData:l,errorSchema:d,idSchema:u,name:f,required:p=!1,disabled:m=!1,readonly:h=!1,hideError:y,idPrefix:g,idSeparator:v,onBlur:$,onFocus:b,registry:w,title:S}=this.props,{fields:_,formContext:x,schemaUtils:E,translateString:k,globalUiOptions:j}=w,{SchemaField:C}=_,P=E.retrieveSchema(s,l),O=(0,a.$R)(c,j),{properties:N={}}=P,A=null!==(r=null!==(t=null!==(e=O.title)&&void 0!==e?e:P.title)&&void 0!==t?t:S)&&void 0!==r?r:f,D=null!==(o=O.description)&&void 0!==o?o:P.description;let I;try{const e=Object.keys(N);I=(0,a.UI)(e,O.order)}catch(R){return(0,n.jsxs)("div",{children:[(0,n.jsx)("p",{className:"config-error",style:{color:"red"},children:(0,n.jsx)(Pt,{children:k(a.Zl.InvalidObjectField,[f||"root",R.message])})}),(0,n.jsx)("pre",{children:JSON.stringify(P)})]})}const F=(0,a.$F)("ObjectFieldTemplate",w,O),T={title:!1===O.label?"":A,description:!1===O.label?void 0:D,properties:I.map((e=>{const t=Nt()(P,[a.s1,e,a.Rr]),r=t?c.additionalProperties:c[e],o="hidden"===(0,a.$R)(r).widget,s=i()(u,[e],{});return{content:(0,n.jsx)(C,{name:e,required:this.isRequired(e),schema:i()(P,[a.s1,e],{}),uiSchema:r,errorSchema:i()(d,e),idSchema:s,idPrefix:g,idSeparator:v,formData:i()(l,e),formContext:x,wasPropertyKeyModified:this.state.wasPropertyKeyModified,onKeyChange:this.onKeyChange(e),onChange:this.onPropertyChange(e,t),onBlur:$,onFocus:b,registry:w,disabled:m,readonly:h,hideError:y,onDropPropertyClick:this.onDropPropertyClick},e),name:e,readonly:h,disabled:m,required:p,hidden:o}})),readonly:h,disabled:m,required:p,idSchema:u,uiSchema:c,errorSchema:d,schema:P,formData:l,formContext:x,registry:w};return(0,n.jsx)(F,{...T,onAddClick:this.handleAddClick})}}const Ft=It,Tt={array:"ArrayField",boolean:"BooleanField",integer:"NumberField",number:"NumberField",object:"ObjectField",string:"StringField",null:"NullField"};function Rt(e){const{schema:t,idSchema:r,uiSchema:s,formData:i,errorSchema:c,idPrefix:l,idSeparator:d,name:u,onChange:f,onKeyChange:p,onDropPropertyClick:m,required:h,registry:y,wasPropertyKeyModified:v=!1}=e,{formContext:$,schemaUtils:b,globalUiOptions:w}=y,S=(0,a.$R)(s,w),_=(0,a.$F)("FieldTemplate",y,S),x=(0,a.$F)("DescriptionFieldTemplate",y,S),E=(0,a.$F)("FieldHelpTemplate",y,S),k=(0,a.$F)("FieldErrorTemplate",y,S),j=b.retrieveSchema(t,i),P=r[a.K0],O=(0,a.rL)(b.toIdSchema(j,P,i,l,d),r),N=(0,o.useCallback)(((e,t,r)=>f(e,t,r||P)),[P,f]),A=function(e,t,r,o){const s=t.field,{fields:i,translateString:c}=o;if("function"===typeof s)return s;if("string"===typeof s&&s in i)return i[s];const l=(0,a._I)(e),d=Array.isArray(l)?l[0]:l||"",u=e.$id;let f=Tt[d];return u&&u in i&&(f=u),f||!e.anyOf&&!e.oneOf?f in i?i[f]:()=>{const s=(0,a.$F)("UnsupportedFieldTemplate",o,t);return(0,n.jsx)(s,{schema:e,idSchema:r,reason:c(a.Zl.UnknownFieldType,[String(e.type)]),registry:o})}:()=>null}(j,S,O,y),D=Boolean(e.disabled||S.disabled),I=Boolean(e.readonly||S.readonly||e.schema.readOnly||j.readOnly),F=S.hideError,T=void 0===F?e.hideError:Boolean(F),R=Boolean(e.autofocus||S.autofocus);if(0===Object.keys(j).length)return null;const M=b.getDisplayLabel(j,s,w),{__errors:U,...V}=c||{},B=C()(s,["ui:classNames","classNames","ui:style"]);a.ce in B&&(B[a.ce]=C()(B[a.ce],["classNames","style"]));const q=(0,n.jsx)(A,{...e,onChange:N,idSchema:O,schema:j,uiSchema:B,disabled:D,readonly:I,hideError:T,autofocus:R,errorSchema:V,formContext:$,rawErrors:U}),z=O[a.K0];let L;L=v||a.Rr in j?u:S.title||e.schema.title||j.title||e.title||u;const K=S.description||e.schema.description||j.description||"",W=S.enableMarkdownInDescription?(0,n.jsx)(Pt,{children:K}):K,H=S.help,G="hidden"===S.widget,Z=["form-group","field",`field-${(0,a._I)(j)}`];!T&&U&&U.length>0&&Z.push("field-error has-error has-danger"),(null===s||void 0===s?void 0:s.classNames)&&Z.push(s.classNames),S.classNames&&Z.push(S.classNames);const J=(0,n.jsx)(E,{help:H,idSchema:O,schema:j,uiSchema:s,hasErrors:!T&&U&&U.length>0,registry:y}),Y=T||(j.anyOf||j.oneOf)&&!b.isSelect(j)?void 0:(0,n.jsx)(k,{errors:U,errorSchema:c,idSchema:O,schema:j,uiSchema:s,registry:y}),Q={description:(0,n.jsx)(x,{id:(0,a.IR)(z),description:W,schema:j,uiSchema:s,registry:y}),rawDescription:K,help:J,rawHelp:"string"===typeof H?H:void 0,errors:Y,rawErrors:T?void 0:U,id:z,label:L,hidden:G,onChange:f,onKeyChange:p,onDropPropertyClick:m,required:h,disabled:D,readonly:I,hideError:T,displayLabel:M,classNames:Z.join(" ").trim(),style:S.style,formContext:$,formData:i,schema:j,uiSchema:s,registry:y},X=y.fields.AnyOfField,ee=y.fields.OneOfField,te=(null===s||void 0===s?void 0:s["ui:field"])&&!0===(null===s||void 0===s?void 0:s["ui:fieldReplacesAnyOrOneOf"]);return(0,n.jsx)(_,{...Q,children:(0,n.jsxs)(n.Fragment,{children:[q,j.anyOf&&!te&&!b.isSelect(j)&&(0,n.jsx)(X,{name:u,disabled:D,readonly:I,hideError:T,errorSchema:c,formData:i,formContext:$,idPrefix:l,idSchema:O,idSeparator:d,onBlur:e.onBlur,onChange:e.onChange,onFocus:e.onFocus,options:j.anyOf.map((e=>b.retrieveSchema(g()(e)?e:{},i))),registry:y,schema:j,uiSchema:s}),j.oneOf&&!te&&!b.isSelect(j)&&(0,n.jsx)(ee,{name:u,disabled:D,readonly:I,hideError:T,errorSchema:c,formData:i,formContext:$,idPrefix:l,idSchema:O,idSeparator:d,onBlur:e.onBlur,onChange:e.onChange,onFocus:e.onFocus,options:j.oneOf.map((e=>b.retrieveSchema(g()(e)?e:{},i))),registry:y,schema:j,uiSchema:s})]})})}class Mt extends o.Component{shouldComponentUpdate(e){return!(0,a.c2)(this.props,e)}render(){return(0,n.jsx)(Rt,{...this.props})}}const Ut=Mt;const Vt=function(e){var t;const{schema:r,name:o,uiSchema:s,idSchema:i,formData:c,required:l,disabled:d=!1,readonly:u=!1,autofocus:f=!1,onChange:p,onBlur:m,onFocus:h,registry:y,rawErrors:g,hideError:v}=e,{title:$,format:b}=r,{widgets:w,formContext:S,schemaUtils:_,globalUiOptions:x}=y,E=_.isSelect(r)?(0,a.f9)(r):void 0;let k=E?"select":"text";b&&(0,a.Lw)(r,b,w)&&(k=b);const{widget:j=k,placeholder:C="",title:P,...O}=(0,a.$R)(s),N=_.getDisplayLabel(r,s,x),A=null!==(t=null!==P&&void 0!==P?P:$)&&void 0!==t?t:o,D=(0,a.Bt)(r,j,w);return(0,n.jsx)(D,{options:{...O,enumOptions:E},schema:r,uiSchema:s,id:i.$id,name:o,label:A,hideLabel:!N,hideError:v,value:c,onChange:p,onBlur:m,onFocus:h,required:l,disabled:d,readonly:u,formContext:S,autofocus:f,registry:y,placeholder:C,rawErrors:g})};const Bt=function(e){const{formData:t,onChange:r}=e;return(0,o.useEffect)((()=>{void 0===t&&r(null)}),[t,r]),null};const qt=function(){return{AnyOfField:O,ArrayField:E,BooleanField:k,NumberField:D,ObjectField:Ft,OneOfField:O,SchemaField:Ut,StringField:Vt,NullField:Bt}};function zt(e){const{idSchema:t,description:r,registry:o,schema:s,uiSchema:i}=e,c=(0,a.$R)(i,o.globalUiOptions),{label:l=!0}=c;if(!r||!l)return null;const d=(0,a.$F)("DescriptionFieldTemplate",o,c);return(0,n.jsx)(d,{id:(0,a.IR)(t),description:r,schema:s,uiSchema:i,registry:o})}function Lt(e){const{children:t,className:r,disabled:o,hasToolbar:a,hasMoveDown:s,hasMoveUp:i,hasRemove:c,hasCopy:l,index:d,onCopyIndexClick:u,onDropIndexClick:f,onReorderClick:p,readonly:m,registry:h,uiSchema:y}=e,{CopyButton:g,MoveDownButton:v,MoveUpButton:$,RemoveButton:b}=h.templates.ButtonTemplates,w={flex:1,paddingLeft:6,paddingRight:6,fontWeight:"bold"};return(0,n.jsxs)("div",{className:r,children:[(0,n.jsx)("div",{className:a?"col-xs-9":"col-xs-12",children:t}),a&&(0,n.jsx)("div",{className:"col-xs-3 array-item-toolbox",children:(0,n.jsxs)("div",{className:"btn-group",style:{display:"flex",justifyContent:"space-around"},children:[(i||s)&&(0,n.jsx)($,{style:w,disabled:o||m||!i,onClick:p(d,d-1),uiSchema:y,registry:h}),(i||s)&&(0,n.jsx)(v,{style:w,disabled:o||m||!s,onClick:p(d,d+1),uiSchema:y,registry:h}),l&&(0,n.jsx)(g,{style:w,disabled:o||m,onClick:u(d),uiSchema:y,registry:h}),c&&(0,n.jsx)(b,{style:w,disabled:o||m,onClick:f(d),uiSchema:y,registry:h})]})})]})}function Kt(e){const{canAdd:t,className:r,disabled:o,idSchema:s,uiSchema:i,items:c,onAddClick:l,readonly:d,registry:u,required:f,schema:p,title:m}=e,h=(0,a.$R)(i),y=(0,a.$F)("ArrayFieldDescriptionTemplate",u,h),g=(0,a.$F)("ArrayFieldItemTemplate",u,h),v=(0,a.$F)("ArrayFieldTitleTemplate",u,h),{ButtonTemplates:{AddButton:$}}=u.templates;return(0,n.jsxs)("fieldset",{className:r,id:s.$id,children:[(0,n.jsx)(v,{idSchema:s,title:h.title||m,required:f,schema:p,uiSchema:i,registry:u}),(0,n.jsx)(y,{idSchema:s,description:h.description||p.description,schema:p,uiSchema:i,registry:u}),(0,n.jsx)("div",{className:"row array-item-list",children:c&&c.map((({key:e,...t})=>(0,n.jsx)(g,{...t},e)))}),t&&(0,n.jsx)($,{className:"array-item-add",onClick:l,disabled:o||d,uiSchema:i,registry:u})]})}function Wt(e){const{idSchema:t,title:r,schema:o,uiSchema:s,required:i,registry:c}=e,l=(0,a.$R)(s,c.globalUiOptions),{label:d=!0}=l;if(!r||!d)return null;const u=(0,a.$F)("TitleFieldTemplate",c,l);return(0,n.jsx)(u,{id:(0,a.EH)(t),title:r,required:i,schema:o,uiSchema:s,registry:c})}function Ht(e){const{id:t,name:r,value:s,readonly:i,disabled:c,autofocus:l,onBlur:d,onFocus:u,onChange:f,onChangeOverride:p,options:m,schema:h,uiSchema:y,formContext:g,registry:v,rawErrors:$,type:b,hideLabel:w,hideError:S,..._}=e;if(!t)throw console.log("No id for",e),new Error(`no id for props ${JSON.stringify(e)}`);const x={..._,...(0,a.ti)(h,b,m)};let E;E="number"===x.type||"integer"===x.type?s||0===s?s:"":null==s?"":s;const k=(0,o.useCallback)((({target:{value:e}})=>f(""===e?m.emptyValue:e)),[f,m]),j=(0,o.useCallback)((({target:{value:e}})=>d(t,e)),[d,t]),C=(0,o.useCallback)((({target:{value:e}})=>u(t,e)),[u,t]);return(0,n.jsxs)(n.Fragment,{children:[(0,n.jsx)("input",{id:t,name:t,className:"form-control",readOnly:i,disabled:c,autoFocus:l,value:E,...x,list:h.examples?(0,a.$c)(t):void 0,onChange:p||k,onBlur:j,onFocus:C,"aria-describedby":(0,a.eG)(t,!!h.examples)}),Array.isArray(h.examples)&&(0,n.jsx)("datalist",{id:(0,a.$c)(t),children:h.examples.concat(h.default&&!h.examples.includes(h.default)?[h.default]:[]).map((e=>(0,n.jsx)("option",{value:e},e)))},`datalist_${t}`)]})}function Gt({uiSchema:e}){const{submitText:t,norender:r,props:o={}}=(0,a.Bj)(e);return r?null:(0,n.jsx)("div",{children:(0,n.jsx)("button",{type:"submit",...o,className:`btn btn-info ${o.className||""}`,children:t})})}function Zt(e){const{iconType:t="default",icon:r,className:o,uiSchema:a,registry:s,...i}=e;return(0,n.jsx)("button",{type:"button",className:`btn btn-${t} ${o}`,...i,children:(0,n.jsx)("i",{className:`glyphicon glyphicon-${r}`})})}function Jt(e){const{registry:{translateString:t}}=e;return(0,n.jsx)(Zt,{title:t(a.Zl.CopyButton),className:"array-item-copy",...e,icon:"copy"})}function Yt(e){const{registry:{translateString:t}}=e;return(0,n.jsx)(Zt,{title:t(a.Zl.MoveDownButton),className:"array-item-move-down",...e,icon:"arrow-down"})}function Qt(e){const{registry:{translateString:t}}=e;return(0,n.jsx)(Zt,{title:t(a.Zl.MoveUpButton),className:"array-item-move-up",...e,icon:"arrow-up"})}function Xt(e){const{registry:{translateString:t}}=e;return(0,n.jsx)(Zt,{title:t(a.Zl.RemoveButton),className:"array-item-remove",...e,iconType:"danger",icon:"remove"})}function er({className:e,onClick:t,disabled:r,registry:o}){const{translateString:s}=o;return(0,n.jsx)("div",{className:"row",children:(0,n.jsx)("p",{className:`col-xs-3 col-xs-offset-9 text-right ${e}`,children:(0,n.jsx)(Zt,{iconType:"info",icon:"plus",className:"btn-add col-xs-12",title:s(a.Zl.AddButton),onClick:t,disabled:r,registry:o})})})}const tr=function(){return{SubmitButton:Gt,AddButton:er,CopyButton:Jt,MoveDownButton:Yt,MoveUpButton:Qt,RemoveButton:Xt}};function rr(e){const{id:t,description:r}=e;return r?"string"===typeof r?(0,n.jsx)("p",{id:t,className:"field-description",children:r}):(0,n.jsx)("div",{id:t,className:"field-description",children:r}):null}function nr({errors:e,registry:t}){const{translateString:r}=t;return(0,n.jsxs)("div",{className:"panel panel-danger errors",children:[(0,n.jsx)("div",{className:"panel-heading",children:(0,n.jsx)("h3",{className:"panel-title",children:r(a.Zl.ErrorsLabel)})}),(0,n.jsx)("ul",{className:"list-group",children:e.map(((e,t)=>(0,n.jsx)("li",{className:"list-group-item text-danger",children:e.stack},t)))})]})}const or="*";function ar(e){const{label:t,required:r,id:o}=e;return t?(0,n.jsxs)("label",{className:"control-label",htmlFor:o,children:[t,r&&(0,n.jsx)("span",{className:"required",children:or})]}):null}const sr=function(e){const{id:t,label:r,children:o,errors:s,help:i,description:c,hidden:l,required:d,displayLabel:u,registry:f,uiSchema:p}=e,m=(0,a.$R)(p),h=(0,a.$F)("WrapIfAdditionalTemplate",f,m);return l?(0,n.jsx)("div",{className:"hidden",children:o}):(0,n.jsxs)(h,{...e,children:[u&&(0,n.jsx)(ar,{label:r,required:d,id:t}),u&&c?c:null,o,s,i]})};function ir(e){const{errors:t=[],idSchema:r}=e;if(0===t.length)return null;const o=(0,a.rD)(r);return(0,n.jsx)("div",{children:(0,n.jsx)("ul",{id:o,className:"error-detail bs-callout bs-callout-info",children:t.filter((e=>!!e)).map(((e,t)=>(0,n.jsx)("li",{className:"text-danger",children:e},t)))})})}function cr(e){const{idSchema:t,help:r}=e;if(!r)return null;const o=(0,a.F5)(t);return"string"===typeof r?(0,n.jsx)("p",{id:o,className:"help-block",children:r}):(0,n.jsx)("div",{id:o,className:"help-block",children:r})}function lr(e){const{description:t,disabled:r,formData:o,idSchema:s,onAddClick:i,properties:c,readonly:l,registry:d,required:u,schema:f,title:p,uiSchema:m}=e,h=(0,a.$R)(m),y=(0,a.$F)("TitleFieldTemplate",d,h),g=(0,a.$F)("DescriptionFieldTemplate",d,h),{ButtonTemplates:{AddButton:v}}=d.templates;return(0,n.jsxs)("fieldset",{id:s.$id,children:[p&&(0,n.jsx)(y,{id:(0,a.EH)(s),title:p,required:u,schema:f,uiSchema:m,registry:d}),t&&(0,n.jsx)(g,{id:(0,a.IR)(s),description:t,schema:f,uiSchema:m,registry:d}),c.map((e=>e.content)),(0,a.Xl)(f,m,o)&&(0,n.jsx)(v,{className:"object-property-expand",onClick:i(f),disabled:r||l,uiSchema:m,registry:d})]})}const dr="*";function ur(e){const{id:t,title:r,required:o}=e;return(0,n.jsxs)("legend",{id:t,children:[r,o&&(0,n.jsx)("span",{className:"required",children:dr})]})}const fr=function(e){const{schema:t,idSchema:r,reason:o,registry:s}=e,{translateString:i}=s;let c=a.Zl.UnsupportedField;const l=[];return r&&r.$id&&(c=a.Zl.UnsupportedFieldWithId,l.push(r.$id)),o&&(c=c===a.Zl.UnsupportedField?a.Zl.UnsupportedFieldWithReason:a.Zl.UnsupportedFieldWithIdAndReason,l.push(o)),(0,n.jsxs)("div",{className:"unsupported-field",children:[(0,n.jsx)("p",{children:(0,n.jsx)(Pt,{children:i(c,l)})}),t&&(0,n.jsx)("pre",{children:JSON.stringify(t,null,2)})]})};function pr(e){const{id:t,classNames:r,style:o,disabled:s,label:i,onKeyChange:c,onDropPropertyClick:l,readonly:d,required:u,schema:f,children:p,uiSchema:m,registry:h}=e,{templates:y,translateString:g}=h,{RemoveButton:v}=y.ButtonTemplates,$=g(a.Zl.KeyLabel,[i]);return a.Rr in f?(0,n.jsx)("div",{className:r,style:o,children:(0,n.jsxs)("div",{className:"row",children:[(0,n.jsx)("div",{className:"col-xs-5 form-additional",children:(0,n.jsxs)("div",{className:"form-group",children:[(0,n.jsx)(ar,{label:$,required:u,id:`${t}-key`}),(0,n.jsx)("input",{className:"form-control",type:"text",id:`${t}-key`,onBlur:e=>c(e.target.value),defaultValue:i})]})}),(0,n.jsx)("div",{className:"form-additional form-group col-xs-5",children:p}),(0,n.jsx)("div",{className:"col-xs-2",children:(0,n.jsx)(v,{className:"array-item-remove btn-block",style:{border:"0"},disabled:s||d,onClick:l(i),uiSchema:m,registry:h})})]})}):(0,n.jsx)("div",{className:r,style:o,children:p})}const mr=function(){return{ArrayFieldDescriptionTemplate:zt,ArrayFieldItemTemplate:Lt,ArrayFieldTemplate:Kt,ArrayFieldTitleTemplate:Wt,ButtonTemplates:tr(),BaseInputTemplate:Ht,DescriptionFieldTemplate:rr,ErrorListTemplate:nr,FieldTemplate:sr,FieldErrorTemplate:ir,FieldHelpTemplate:cr,ObjectFieldTemplate:lr,TitleFieldTemplate:ur,UnsupportedFieldTemplate:fr,WrapIfAdditionalTemplate:pr}};function hr(e,t){const r=[];for(let n=e;n<=t;n++)r.push({value:n,label:(0,a.eV)(n,2)});return r}function yr({type:e,range:t,value:r,select:o,rootId:s,name:i,disabled:c,readonly:l,autofocus:d,registry:u,onBlur:f,onFocus:p}){const m=s+"_"+e,{SelectWidget:h}=u.widgets;return(0,n.jsx)(h,{schema:{type:"integer"},id:m,name:i,className:"form-control",options:{enumOptions:hr(t[0],t[1])},placeholder:e,value:r,disabled:c,readonly:l,autofocus:d,onChange:t=>o(e,t),onBlur:f,onFocus:p,registry:u,label:"","aria-describedby":(0,a.eG)(s)})}const gr=function({time:e=!1,disabled:t=!1,readonly:r=!1,autofocus:s=!1,options:i,id:c,name:l,registry:d,onBlur:u,onFocus:f,onChange:p,value:m}){const{translateString:h}=d,[y,g]=(0,o.useState)(m),[v,$]=(0,o.useReducer)(((e,t)=>({...e,...t})),(0,a.Rm)(m,e));(0,o.useEffect)((()=>{const t=(0,a.HN)(v,e);!function(e){return Object.values(e).every((e=>-1!==e))}(v)||t===m?y!==m&&(g(m),$((0,a.Rm)(m,e))):p(t)}),[e,m,p,v,y]);const b=(0,o.useCallback)(((e,t)=>{$({[e]:t})}),[]),w=(0,o.useCallback)((n=>{if(n.preventDefault(),t||r)return;const o=(0,a.Rm)((new Date).toJSON(),e);p((0,a.HN)(o,e))}),[t,r,e]),S=(0,o.useCallback)((e=>{e.preventDefault(),t||r||p(void 0)}),[t,r,p]);return(0,n.jsxs)("ul",{className:"list-inline",children:[(0,a.dO)(v,e,i.yearsRange,i.format).map(((e,o)=>(0,n.jsx)("li",{className:"list-inline-item",children:(0,n.jsx)(yr,{rootId:c,name:l,select:b,...e,disabled:t,readonly:r,registry:d,onBlur:u,onFocus:f,autofocus:s&&0===o})},o))),("undefined"===i.hideNowButton||!i.hideNowButton)&&(0,n.jsx)("li",{className:"list-inline-item",children:(0,n.jsx)("a",{href:"#",className:"btn btn-info btn-now",onClick:w,children:h(a.Zl.NowLabel)})}),("undefined"===i.hideClearButton||!i.hideClearButton)&&(0,n.jsx)("li",{className:"list-inline-item",children:(0,n.jsx)("a",{href:"#",className:"btn btn-warning btn-clear",onClick:S,children:h(a.Zl.ClearLabel)})})]})};const vr=function({time:e=!0,...t}){const{AltDateWidget:r}=t.registry.widgets;return(0,n.jsx)(r,{time:e,...t})};const $r=function({schema:e,uiSchema:t,options:r,id:s,value:i,disabled:c,readonly:l,label:d,hideLabel:u,autofocus:f=!1,onBlur:p,onFocus:m,onChange:h,registry:y}){var g;const v=(0,a.$F)("DescriptionFieldTemplate",y,r),$=(0,a.l1)(e),b=(0,o.useCallback)((e=>h(e.target.checked)),[h]),w=(0,o.useCallback)((e=>p(s,e.target.checked)),[p,s]),S=(0,o.useCallback)((e=>m(s,e.target.checked)),[m,s]),_=null!==(g=r.description)&&void 0!==g?g:e.description;return(0,n.jsxs)("div",{className:"checkbox "+(c||l?"disabled":""),children:[!u&&!!_&&(0,n.jsx)(v,{id:(0,a.IR)(s),description:_,schema:e,uiSchema:t,registry:y}),(0,n.jsxs)("label",{children:[(0,n.jsx)("input",{type:"checkbox",id:s,name:s,checked:"undefined"!==typeof i&&i,required:$,disabled:c||l,autoFocus:f,onChange:b,onBlur:w,onFocus:S,"aria-describedby":(0,a.eG)(s)}),(0,a.w)((0,n.jsx)("span",{children:d}),u)]})]})};const br=function({id:e,disabled:t,options:{inline:r=!1,enumOptions:s,enumDisabled:i,emptyValue:c},value:l,autofocus:d=!1,readonly:u,onChange:f,onBlur:p,onFocus:m}){const h=Array.isArray(l)?l:[l],y=(0,o.useCallback)((({target:{value:t}})=>p(e,(0,a.Od)(t,s,c))),[p,e]),g=(0,o.useCallback)((({target:{value:t}})=>m(e,(0,a.Od)(t,s,c))),[m,e]);return(0,n.jsx)("div",{className:"checkboxes",id:e,children:Array.isArray(s)&&s.map(((o,c)=>{const l=(0,a.BH)(o.value,h),p=Array.isArray(i)&&-1!==i.indexOf(o.value),m=t||p||u?"disabled":"",v=(0,n.jsxs)("span",{children:[(0,n.jsx)("input",{type:"checkbox",id:(0,a.pk)(e,c),name:e,checked:l,value:String(c),disabled:t||p||u,autoFocus:d&&0===c,onChange:e=>{e.target.checked?f((0,a.L4)(c,h,s)):f((0,a.Uw)(c,h,s))},onBlur:y,onFocus:g,"aria-describedby":(0,a.eG)(e)}),(0,n.jsx)("span",{children:o.label})]});return r?(0,n.jsx)("label",{className:`checkbox-inline ${m}`,children:v},c):(0,n.jsx)("div",{className:`checkbox ${m}`,children:(0,n.jsx)("label",{children:v})},c)}))})};function wr(e){const{disabled:t,readonly:r,options:o,registry:s}=e,i=(0,a.$F)("BaseInputTemplate",s,o);return(0,n.jsx)(i,{type:"color",...e,disabled:t||r})}function Sr(e){const{onChange:t,options:r,registry:s}=e,i=(0,a.$F)("BaseInputTemplate",s,r),c=(0,o.useCallback)((e=>t(e||void 0)),[t]);return(0,n.jsx)(i,{type:"date",...e,onChange:c})}function _r(e){const{onChange:t,value:r,options:o,registry:s}=e,i=(0,a.$F)("BaseInputTemplate",s,o);return(0,n.jsx)(i,{type:"datetime-local",...e,value:(0,a.v4)(r),onChange:e=>t((0,a.z$)(e))})}function xr(e){const{options:t,registry:r}=e,o=(0,a.$F)("BaseInputTemplate",r,t);return(0,n.jsx)(o,{type:"email",...e})}function Er(e,t){return null===e?null:e.replace(";base64",`;name=${encodeURIComponent(t)};base64`)}function kr(e){const{name:t,size:r,type:n}=e;return new Promise(((o,a)=>{const s=new window.FileReader;s.onerror=a,s.onload=e=>{var a;"string"===typeof(null===(a=e.target)||void 0===a?void 0:a.result)?o({dataURL:Er(e.target.result,t),name:t,size:r,type:n}):o({dataURL:null,name:t,size:r,type:n})},s.readAsDataURL(e)}))}function jr({fileInfo:e,registry:t}){const{translateString:r}=t,{dataURL:o,type:s,name:i}=e;return o?["image/jpeg","image/png"].includes(s)?(0,n.jsx)("img",{src:o,style:{maxWidth:"100%"},className:"file-preview"}):(0,n.jsxs)(n.Fragment,{children:[" ",(0,n.jsx)("a",{download:`preview-${i}`,href:o,className:"file-download",children:r(a.Zl.PreviewLabel)})]}):null}function Cr({filesInfo:e,registry:t,preview:r,onRemove:o,options:s}){if(0===e.length)return null;const{translateString:i}=t,{RemoveButton:c}=(0,a.$F)("ButtonTemplates",t,s);return(0,n.jsx)("ul",{className:"file-info",children:e.map(((e,s)=>{const{name:l,size:d,type:u}=e;return(0,n.jsxs)("li",{children:[(0,n.jsx)(Pt,{children:i(a.Zl.FilesInfo,[l,u,String(d)])}),r&&(0,n.jsx)(jr,{fileInfo:e,registry:t}),(0,n.jsx)(c,{onClick:()=>o(s),registry:t})]},s)}))})}const Pr=function(e){const{disabled:t,readonly:r,required:s,multiple:i,onChange:c,value:l,options:d,registry:u}=e,f=(0,a.$F)("BaseInputTemplate",u,d),p=(0,o.useCallback)((e=>{var t;e.target.files&&(t=e.target.files,Promise.all(Array.from(t).map(kr))).then((e=>{const t=e.map((e=>e.dataURL));c(i?l.concat(t[0]):t[0])}))}),[i,l,c]),m=(0,o.useMemo)((()=>(Array.isArray(l)?l:[l]).reduce(((e,t)=>{if(!t)return e;try{const{blob:r,name:n}=(0,a.zM)(t);return[...e,{dataURL:t,name:n,size:r.size,type:r.type}]}catch(M){return e}}),[])),[l]),h=(0,o.useCallback)((e=>{if(i){const t=l.filter(((t,r)=>r!==e));c(t)}else c(void 0)}),[i,l,c]);return(0,n.jsxs)("div",{children:[(0,n.jsx)(f,{...e,disabled:t||r,type:"file",required:!l&&s,onChangeOverride:p,value:"",accept:d.accept?String(d.accept):void 0}),(0,n.jsx)(Cr,{filesInfo:m,onRemove:h,registry:u,preview:d.filePreview,options:d})]})};const Or=function({id:e,value:t}){return(0,n.jsx)("input",{type:"hidden",id:e,name:e,value:"undefined"===typeof t?"":t})};function Nr(e){const{options:t,registry:r}=e,o=(0,a.$F)("BaseInputTemplate",r,t);return(0,n.jsx)(o,{type:"password",...e})}const Ar=function({options:e,value:t,required:r,disabled:s,readonly:i,autofocus:c=!1,onBlur:l,onFocus:d,onChange:u,id:f}){const{enumOptions:p,enumDisabled:m,inline:h,emptyValue:y}=e,g=(0,o.useCallback)((({target:{value:e}})=>l(f,(0,a.Od)(e,p,y))),[l,f]),v=(0,o.useCallback)((({target:{value:e}})=>d(f,(0,a.Od)(e,p,y))),[d,f]);return(0,n.jsx)("div",{className:"field-radio-group",id:f,children:Array.isArray(p)&&p.map(((e,o)=>{const l=(0,a.BH)(e.value,t),d=Array.isArray(m)&&-1!==m.indexOf(e.value),p=s||d||i?"disabled":"",y=(0,n.jsxs)("span",{children:[(0,n.jsx)("input",{type:"radio",id:(0,a.pk)(f,o),checked:l,name:f,required:r,value:String(o),disabled:s||d||i,autoFocus:c&&0===o,onChange:()=>u(e.value),onBlur:g,onFocus:v,"aria-describedby":(0,a.eG)(f)}),(0,n.jsx)("span",{children:e.label})]});return h?(0,n.jsx)("label",{className:`radio-inline ${p}`,children:y},o):(0,n.jsx)("div",{className:`radio ${p}`,children:(0,n.jsx)("label",{children:y})},o)}))})};function Dr(e){const{value:t,registry:{templates:{BaseInputTemplate:r}}}=e;return(0,n.jsxs)("div",{className:"field-range-wrapper",children:[(0,n.jsx)(r,{type:"range",...e}),(0,n.jsx)("span",{className:"range-view",children:t})]})}function Ir(e,t){return t?Array.from(e.target.options).slice().filter((e=>e.selected)).map((e=>e.value)):e.target.value}const Fr=function({schema:e,id:t,options:r,value:s,required:i,disabled:c,readonly:l,multiple:d=!1,autofocus:u=!1,onChange:f,onBlur:p,onFocus:m,placeholder:h}){const{enumOptions:y,enumDisabled:g,emptyValue:v}=r,$=d?[]:"",b=(0,o.useCallback)((e=>{const r=Ir(e,d);return m(t,(0,a.Od)(r,y,v))}),[m,t,e,d,r]),w=(0,o.useCallback)((e=>{const r=Ir(e,d);return p(t,(0,a.Od)(r,y,v))}),[p,t,e,d,r]),S=(0,o.useCallback)((e=>{const t=Ir(e,d);return f((0,a.Od)(t,y,v))}),[f,e,d,r]),_=(0,a.Yu)(s,y,d);return(0,n.jsxs)("select",{id:t,name:t,multiple:d,className:"form-control",value:"undefined"===typeof _?$:_,required:i,disabled:c||l,autoFocus:u,onBlur:w,onFocus:b,onChange:S,"aria-describedby":(0,a.eG)(t),children:[!d&&void 0===e.default&&(0,n.jsx)("option",{value:"",children:h}),Array.isArray(y)&&y.map((({value:e,label:t},r)=>{const o=g&&-1!==g.indexOf(e);return(0,n.jsx)("option",{value:String(r),disabled:o,children:t},r)}))]})};function Tr({id:e,options:t={},placeholder:r,value:s,required:i,disabled:c,readonly:l,autofocus:d=!1,onChange:u,onBlur:f,onFocus:p}){const m=(0,o.useCallback)((({target:{value:e}})=>u(""===e?t.emptyValue:e)),[u,t.emptyValue]),h=(0,o.useCallback)((({target:{value:t}})=>f(e,t)),[f,e]),y=(0,o.useCallback)((({target:{value:t}})=>p(e,t)),[e,p]);return(0,n.jsx)("textarea",{id:e,name:e,className:"form-control",value:s||"",placeholder:r,required:i,disabled:c,readOnly:l,autoFocus:d,rows:t.rows,onBlur:h,onFocus:y,onChange:m,"aria-describedby":(0,a.eG)(e)})}Tr.defaultProps={autofocus:!1,options:{}};const Rr=Tr;function Mr(e){const{options:t,registry:r}=e,o=(0,a.$F)("BaseInputTemplate",r,t);return(0,n.jsx)(o,{...e})}function Ur(e){const{onChange:t,options:r,registry:s}=e,i=(0,a.$F)("BaseInputTemplate",s,r),c=(0,o.useCallback)((e=>t(e?`${e}:00`:void 0)),[t]);return(0,n.jsx)(i,{type:"time",...e,onChange:c})}function Vr(e){const{options:t,registry:r}=e,o=(0,a.$F)("BaseInputTemplate",r,t);return(0,n.jsx)(o,{type:"url",...e})}function Br(e){const{options:t,registry:r}=e,o=(0,a.$F)("BaseInputTemplate",r,t);return(0,n.jsx)(o,{type:"number",...e})}const qr=function(){return{AltDateWidget:gr,AltDateTimeWidget:vr,CheckboxWidget:$r,CheckboxesWidget:br,ColorWidget:wr,DateWidget:Sr,DateTimeWidget:_r,EmailWidget:xr,FileWidget:Pr,HiddenWidget:Or,PasswordWidget:Nr,RadioWidget:Ar,RangeWidget:Dr,SelectWidget:Fr,TextWidget:Mr,TextareaWidget:Rr,TimeWidget:Ur,UpDownWidget:Br,URLWidget:Vr}};class zr extends o.Component{constructor(e){if(super(e),this.getUsedFormData=(e,t)=>{if(0===t.length&&"object"!==typeof e)return e;const r=u()(e,t);return Array.isArray(e)?Object.keys(r).map((e=>r[e])):r},this.getFieldNames=(e,t)=>{const r=(e,n=[],o=[[]])=>(Object.keys(e).forEach((s=>{if("object"===typeof e[s]){const t=o.map((e=>[...e,s]));e[s][a.yB]&&""!==e[s][a.oS]?n.push(e[s][a.oS]):r(e[s],n,t)}else s===a.oS&&""!==e[s]&&o.forEach((e=>{const r=i()(t,e);("object"!==typeof r||l()(r)||Array.isArray(r)&&r.every((e=>"object"!==typeof e)))&&n.push(e)}))})),n);return r(e)},this.onChange=(e,t,r)=>{const{extraErrors:n,omitExtraData:o,liveOmit:s,noValidate:i,liveValidate:c,onChange:l}=this.props,{schemaUtils:d,schema:u,retrievedSchema:f}=this.state;if((0,a.Gv)(e)||Array.isArray(e)){e=this.getStateFromProps(this.props,e,f).formData}const p=!i&&c;let m,h={formData:e,schema:u},y=e;if(!0===o&&!0===s){m=d.retrieveSchema(u,e);const t=d.toPathSchema(m,"",e),r=this.getFieldNames(t,e);y=this.getUsedFormData(e,r),h={formData:y}}if(p){const e=this.validate(y,u,d,f);let t=e.errors,r=e.errorSchema;const o=t,s=r;if(n){const o=(0,a.k6)(e,n);r=o.errorSchema,t=o.errors}h={formData:y,errors:t,errorSchema:r,schemaValidationErrors:o,schemaValidationErrorSchema:s}}else if(!i&&t){const e=n?(0,a.rL)(t,n,"preventDuplicates"):t;h={formData:y,errorSchema:e,errors:(0,a.SL)(e)}}m&&(h.retrievedSchema=m),this.setState(h,(()=>l&&l({...this.state,...h},r)))},this.reset=()=>{const{onChange:e}=this.props,t={formData:this.getStateFromProps(this.props,void 0).formData,errorSchema:{},errors:[],schemaValidationErrors:[],schemaValidationErrorSchema:{}};this.setState(t,(()=>e&&e({...this.state,...t})))},this.onBlur=(e,t)=>{const{onBlur:r}=this.props;r&&r(e,t)},this.onFocus=(e,t)=>{const{onFocus:r}=this.props;r&&r(e,t)},this.onSubmit=e=>{if(e.preventDefault(),e.target!==e.currentTarget)return;e.persist();const{omitExtraData:t,extraErrors:r,noValidate:n,onSubmit:o}=this.props;let{formData:s}=this.state;const{schema:i,schemaUtils:c}=this.state;if(!0===t){const e=c.retrieveSchema(i,s),t=c.toPathSchema(e,"",s),r=this.getFieldNames(t,s);s=this.getUsedFormData(s,r)}if(n||this.validateForm()){const t=r||{},n=r?(0,a.SL)(r):[];this.setState({formData:s,errors:n,errorSchema:t,schemaValidationErrors:[],schemaValidationErrorSchema:{}},(()=>{o&&o({...this.state,formData:s,status:"submitted"},e)}))}},this.submit=()=>{if(this.formElement.current){const e=new CustomEvent("submit",{cancelable:!0});e.preventDefault(),this.formElement.current.dispatchEvent(e),this.formElement.current.requestSubmit()}},!e.validator)throw new Error("A validator is required for Form functionality to work");this.state=this.getStateFromProps(e,e.formData),this.props.onChange&&!(0,a.c2)(this.state.formData,this.props.formData)&&this.props.onChange(this.state),this.formElement=(0,o.createRef)()}getSnapshotBeforeUpdate(e,t){if(!(0,a.c2)(this.props,e)){const r=!(0,a.c2)(e.schema,this.props.schema),n=!(0,a.c2)(e.formData,this.props.formData),o=this.getStateFromProps(this.props,this.props.formData,r||n?void 0:this.state.retrievedSchema,r);return{nextState:o,shouldUpdate:!(0,a.c2)(o,t)}}return{shouldUpdate:!1}}componentDidUpdate(e,t,r){if(r.shouldUpdate){const{nextState:e}=r;(0,a.c2)(e.formData,this.props.formData)||(0,a.c2)(e.formData,t.formData)||!this.props.onChange||this.props.onChange(e),this.setState(e)}}getStateFromProps(e,t,r,n=!1){const o=this.state||{},s="schema"in e?e.schema:this.props.schema,i=("uiSchema"in e?e.uiSchema:this.props.uiSchema)||{},c="undefined"!==typeof t,l="liveValidate"in e?e.liveValidate:this.props.liveValidate,d=c&&!e.noValidate&&l,u=s,f="experimental_defaultFormStateBehavior"in e?e.experimental_defaultFormStateBehavior:this.props.experimental_defaultFormStateBehavior;let p=o.schemaUtils;p&&!p.doesSchemaUtilsDiffer(e.validator,u,f)||(p=(0,a.BP)(e.validator,u,f));const m=p.getDefaultFormState(s,t),h=null!==r&&void 0!==r?r:p.retrieveSchema(s,m);let y,g,v=o.schemaValidationErrors,$=o.schemaValidationErrorSchema;if(d){const e=this.validate(m,s,p,h);y=e.errors,g=e.errorSchema,v=y,$=g}else{const t=e.noValidate||n?{errors:[],errorSchema:{}}:e.liveValidate?{errors:o.errors||[],errorSchema:o.errorSchema||{}}:{errors:o.schemaValidationErrors||[],errorSchema:o.schemaValidationErrorSchema||{}};y=t.errors,g=t.errorSchema}if(e.extraErrors){const t=(0,a.k6)({errorSchema:g,errors:y},e.extraErrors);g=t.errorSchema,y=t.errors}const b=p.toIdSchema(h,i["ui:rootFieldId"],m,e.idPrefix,e.idSeparator);return{schemaUtils:p,schema:s,uiSchema:i,idSchema:b,formData:m,edit:c,errors:y,errorSchema:g,schemaValidationErrors:v,schemaValidationErrorSchema:$,retrievedSchema:h}}shouldComponentUpdate(e,t){return(0,a.F9)(this,e,t)}validate(e,t=this.props.schema,r,n){const o=r||this.state.schemaUtils,{customValidate:a,transformErrors:s,uiSchema:i}=this.props,c=null!==n&&void 0!==n?n:o.retrieveSchema(t,e);return o.getValidator().validateFormData(e,c,a,s,i)}renderErrors(e){const{errors:t,errorSchema:r,schema:o,uiSchema:s}=this.state,{formContext:i}=this.props,c=(0,a.$R)(s),l=(0,a.$F)("ErrorListTemplate",e,c);return t&&t.length?(0,n.jsx)(l,{errors:t,errorSchema:r||{},schema:o,uiSchema:s,formContext:i,registry:e}):null}getRegistry(){var e;const{translateString:t,uiSchema:r={}}=this.props,{schemaUtils:n}=this.state,{fields:o,templates:s,widgets:i,formContext:c,translateString:l}={fields:qt(),templates:mr(),widgets:qr(),rootSchema:{},formContext:{},translateString:a.qe};return{fields:{...o,...this.props.fields},templates:{...s,...this.props.templates,ButtonTemplates:{...s.ButtonTemplates,...null===(e=this.props.templates)||void 0===e?void 0:e.ButtonTemplates}},widgets:{...i,...this.props.widgets},rootSchema:this.props.schema,formContext:this.props.formContext||c,schemaUtils:n,translateString:t||l,globalUiOptions:r[a.AK]}}focusOnError(e){const{idPrefix:t="root",idSeparator:r="_"}=this.props,{property:n}=e,o=p()(n);""===o[0]?o[0]=t:o.unshift(t);const a=o.join(r);let s=this.formElement.current.elements[a];s||(s=this.formElement.current.querySelector(`input[id^=${a}`)),s&&s.length&&(s=s[0]),s&&s.focus()}validateForm(){const{extraErrors:e,extraErrorsBlockSubmit:t,focusOnFirstError:r,onError:n}=this.props,{formData:o,errors:s}=this.state,i=this.validate(o);let c=i.errors,l=i.errorSchema;const d=c,u=l,f=c.length>0||e&&t;if(f){if(e){const t=(0,a.k6)(i,e);l=t.errorSchema,c=t.errors}r&&("function"===typeof r?r(c[0]):this.focusOnError(c[0])),this.setState({errors:c,errorSchema:l,schemaValidationErrors:d,schemaValidationErrorSchema:u},(()=>{n?n(c):console.error("Form validation failed",c)}))}else s.length>0&&this.setState({errors:[],errorSchema:{},schemaValidationErrors:[],schemaValidationErrorSchema:{}});return!f}render(){const{children:e,id:t,idPrefix:r,idSeparator:o,className:s="",tagName:i,name:c,method:l,target:d,action:u,autoComplete:f,enctype:p,acceptcharset:m,noHtml5Validate:h=!1,disabled:y=!1,readonly:g=!1,formContext:v,showErrorList:$="top",_internalFormWrapper:b}=this.props,{schema:w,uiSchema:S,formData:_,errorSchema:x,idSchema:E}=this.state,k=this.getRegistry(),{SchemaField:j}=k.fields,{SubmitButton:C}=k.templates.ButtonTemplates,P=b?i:void 0,O=b||i||"form";let{[a.xh]:N={}}=(0,a.$R)(S);y&&(N={...N,props:{...N.props,disabled:!0}});const A={[a.ce]:{[a.xh]:N}};return(0,n.jsxs)(O,{className:s||"rjsf",id:t,name:c,method:l,target:d,action:u,autoComplete:f,encType:p,acceptCharset:m,noValidate:h,onSubmit:this.onSubmit,as:P,ref:this.formElement,children:["top"===$&&this.renderErrors(k),(0,n.jsx)(j,{name:"",schema:w,uiSchema:S,errorSchema:x,idSchema:E,idPrefix:r,idSeparator:o,formContext:v,formData:_,onChange:this.onChange,onBlur:this.onBlur,onFocus:this.onFocus,registry:k,disabled:y,readonly:g}),e||(0,n.jsx)(C,{uiSchema:A,registry:k}),"bottom"===$&&this.renderErrors(k)]})}}const Lr=zr},78239:(e,t,r)=>{"use strict";function n(e){return!("undefined"!==typeof File&&e instanceof File)&&(!("undefined"!==typeof Date&&e instanceof Date)&&("object"===typeof e&&null!==e&&!Array.isArray(e)))}function o(e){return!0===e.additionalItems&&console.warn("additionalItems=true is currently not supported"),n(e.additionalItems)}function a(e){if(""===e)return;if(null===e)return null;if(/\.$/.test(e))return e;if(/\.0$/.test(e))return e;if(/\.\d*0$/.test(e))return e;const t=Number(e);return"number"===typeof t&&!Number.isNaN(t)?t:e}r.d(t,{Rr:()=>s,Xo:()=>l,s2:()=>m,K0:()=>h,ZN:()=>g,oS:()=>v,Ru:()=>$,s1:()=>b,Es:()=>_,yB:()=>x,at:()=>E,xh:()=>S,Zl:()=>Zt,AK:()=>C,ce:()=>j,Hh:()=>o,eG:()=>Rt,i:()=>a,Xl:()=>O,JF:()=>D,BP:()=>at,zM:()=>st,c2:()=>T,IR:()=>At,qe:()=>it,Uw:()=>lt,Yu:()=>ut,BH:()=>dt,L4:()=>mt,Od:()=>ct,rD:()=>Dt,$c:()=>It,dO:()=>vt,NV:()=>Ze,KU:()=>Se,ti:()=>bt,_I:()=>ke,Bj:()=>St,$F:()=>_t,$R:()=>P,Bt:()=>Ct,Lw:()=>Ot,$K:()=>Pt,F5:()=>Ft,ar:()=>Je,nQ:()=>Ve,Gv:()=>n,w:()=>Ut,z$:()=>Vt,rL:()=>qe,dW:()=>je,pk:()=>Mt,f9:()=>Bt,UI:()=>qt,eV:()=>zt,Rm:()=>Lt,l1:()=>Kt,F9:()=>Wt,EH:()=>Tt,HN:()=>Ht,SL:()=>Gt,MD:()=>Qt,fV:()=>Xt,v4:()=>er,k6:()=>tr,vh:()=>rr});const s="__additional_property",i="additionalProperties",c="allOf",l="anyOf",d="const",u="default",f="dependencies",p="enum",m="__errors",h="$id",y="if",g="items",v="$name",$="oneOf",b="properties",w="required",S="submitButtonOptions",_="$ref",x="__rjsf_additionalProperties",E="__rjsf_rootSchema",k="ui:widget",j="ui:options",C="ui:globalOptions";function P(e={},t={}){return Object.keys(e).filter((e=>0===e.indexOf("ui:"))).reduce(((t,r)=>{const o=e[r];return r===k&&n(o)?(console.error("Setting options via ui:widget object is no longer supported, use ui:options instead"),t):r===j&&n(o)?{...t,...o}:{...t,[r.substring(3)]:o}}),{...t})}function O(e,t={},r){if(!e.additionalProperties)return!1;const{expandable:n=!0}=P(t);return!1===n?n:void 0===e.maxProperties||!r||Object.keys(r).length({...e,[r]:D(t)})),t);if(A()(e)){const r=e;return Object.keys(r).reduce(((e,t)=>({...e,[t]:D(r[t])})),t)}return t}var I=r(29132),F=r.n(I);function T(e,t){return F()(e,t,((e,t)=>{if("function"===typeof e&&"function"===typeof t)return!0}))}var R=r(58156),M=r.n(R),U=r(62193),V=r.n(U),B=r(56239),q=r(90179),z=r.n(q);function L(e,t){const r=t[e];return[z()(t,[e]),r]}function K(e,t={},r=[]){const n=e||"";let o;if(!n.startsWith("#"))throw new Error(`Could not find a definition for ${e}.`);o=decodeURIComponent(n.substring(1));const a=B.get(t,o);if(void 0===a)throw new Error(`Could not find a definition for ${e}.`);const s=a[_];if(s){if(r.includes(s)){if(1===r.length)throw new Error(`Definition for ${e} is a circular reference`);const[t,...o]=r,a=[...o,n,t].join(" -> ");throw new Error(`Definition for ${t} contains a circular reference through ${a}`)}const[o,i]=L(_,a),c=K(i,t,[...r,n]);return Object.keys(o).length>0?{...o,...c}:c}return a}function W(e,t={}){return K(e,t,[])}var H=r(61448),G=r.n(H),Z=r(98023),J=r.n(Z),Y=r(23805),Q=r.n(Y),X=r(85015),ee=r.n(X),te=r(40860),re=r.n(te),ne=r(6638),oe=r.n(ne);function ae(e,t,r){var n;if(e&&r){const o=M()(e,r);if(void 0===o)return;for(let e=0;e({required:[e]})))};let o;if(a.anyOf){const{...e}=a;e.allOf?e.allOf=e.allOf.slice():e.allOf=[],e.allOf.push(r),o=e}else o=Object.assign({},a,r);if(delete o.required,e.isValid(o,t,n))return s}else if(e.isValid(a,t,n))return s}return 0}function ie(e,t,r,n,o){return se(e,t,r,n,o)}var ce=r(2404),le=r.n(ce),de=r(63560),ue=r.n(de),fe=r(69752),pe=r.n(fe),me=r(55364),he=r.n(me),ye=r(3176),ge=r.n(ye),ve=r(63375),$e=r.n(ve),be=r(33978),we=r.n(be);function Se(e){let t;const r=M()(e,"discriminator.propertyName",void 0);return ee()(r)?t=r:void 0!==r&&console.warn(`Expecting discriminator to be a string, got "${typeof r}" instead`),t}function _e(e){return Array.isArray(e)?"array":"string"===typeof e?"string":null==e?"null":"boolean"===typeof e?"boolean":isNaN(e)?"object"===typeof e?"object":"string":"number"}var xe=r(80299),Ee=r.n(xe);function ke(e){let{type:t}=e;return!t&&e.const?_e(e.const):!t&&e.enum?"string":t||!e.properties&&!e.additionalProperties?(Array.isArray(t)&&(t=2===t.length&&t.includes("null")?t.find((e=>"null"!==e)):t[0]),t):"object"}function je(e,t){const r=Object.assign({},e);return Object.keys(t).reduce(((r,o)=>{const a=e?e[o]:{},s=t[o];return e&&o in e&&n(s)?r[o]=je(a,s):e&&t&&("object"===ke(e)||"object"===ke(t))&&o===w&&Array.isArray(a)&&Array.isArray(s)?r[o]=Ee()(a,s):r[o]=s,r}),r)}function Ce(e,t,r={},n){return Ae(e,t,r,n)[0]}function Pe(e){return e.reduce(((e,t)=>t.length>1?t.flatMap((t=>oe()(e.length,(r=>[...e[r]].concat(t))))):(e.forEach((e=>e.push(t[0]))),e)),[[]])}function Oe(e,t,r,n,o,a){const s=Ne(t,r,o);return s!==t?Ae(e,s,r,a,n,o):[t]}function Ne(e,t,r){if(!n(e))return e;let o=e;if(_ in o){const{$ref:e,...n}=o;if(r.includes(e))return o;r.push(e);o={...W(e,t),...n}}if(b in o){const e=[],n=pe()(o[b],((n,o,a)=>{const s=[...r];n[a]=Ne(o,t,s),e.push(s)}),{});he()(r,$e()(ge()(e))),o={...o,[b]:n}}return g in o&&!Array.isArray(o.items)&&"boolean"!==typeof o.items&&(o={...o,items:Ne(o.items,t,r)}),le()(e,o)?e:o}function Ae(e,t,r,o,a=!1,d=[]){if(!n(t))return[{}];const u=function(e,t,r,n,o,a){const s=Oe(e,t,r,n,o,a);if(s.length>1||s[0]!==t)return s;if(f in t)return De(e,t,r,n,o,a).flatMap((t=>Ae(e,t,r,a,n,o)));if(c in t&&Array.isArray(t.allOf))return Pe(t.allOf.map((t=>Ae(e,t,r,a,n,o)))).map((e=>({...t,allOf:e})));return[t]}(e,t,r,a,d,o);return u.flatMap((t=>{let u=t;if(y in u)return function(e,t,r,n,o,a){const{if:s,then:i,else:c,...l}=t,d=e.isValid(s,a||{},r);let u=[l],f=[];if(n)i&&"boolean"!==typeof i&&(f=f.concat(Ae(e,i,r,a,n,o))),c&&"boolean"!==typeof c&&(f=f.concat(Ae(e,c,r,a,n,o)));else{const t=d?i:c;t&&"boolean"!==typeof t&&(f=f.concat(Ae(e,t,r,a,n,o)))}return f.length&&(u=f.map((e=>je(l,e)))),u.flatMap((t=>Ae(e,t,r,a,n,o)))}(e,u,r,a,d,o);if(c in u){if(a){const{allOf:e,...t}=u;return[...e,t]}try{u=we()(u,{deep:!1})}catch(f){console.warn("could not merge subschemas in allOf:\n",f);const{allOf:e,...t}=u;return t}}return i in u&&!1!==u.additionalProperties?function(e,t,r,o){const a={...t,properties:{...t.properties}},i=o&&n(o)?o:{};return Object.keys(i).forEach((t=>{if(t in a.properties)return;let n={};n="boolean"!==typeof a.additionalProperties?_ in a.additionalProperties?Ce(e,{$ref:M()(a.additionalProperties,[_])},r,i):"type"in a.additionalProperties?{...a.additionalProperties}:l in a.additionalProperties||$ in a.additionalProperties?{type:"object",...a.additionalProperties}:{type:_e(M()(i,[t]))}:{type:_e(M()(i,[t]))},a.properties[t]=n,ue()(a.properties,[t,s],!0)})),a}(e,u,r,o):u}))}function De(e,t,r,n,o,a){const{dependencies:s,...i}=t,c=function(e,t,r,n,o){let a;const{oneOf:s,anyOf:i,...c}=t;if(Array.isArray(s)?a=s:Array.isArray(i)&&(a=i),a){const s=void 0===o&&n?{}:o,i=Se(t);a=a.map((e=>Ne(e,r,[])));const l=ie(e,s,a,r,i);if(n)return a.map((e=>je(c,e)));t=je(c,a[l])}return[t]}(e,i,r,n,a);return c.flatMap((t=>Ie(e,s,t,r,n,o,a)))}function Ie(e,t,r,o,a,s,i){let c=[r];for(const l in t){if(!a&&void 0===M()(i,[l]))continue;if(r.properties&&!(l in r.properties))continue;const[d,u]=L(l,t);return Array.isArray(u)?c[0]=Fe(r,u):n(u)&&(c=Te(e,r,o,l,u,a,s,i)),c.flatMap((t=>Ie(e,d,t,o,a,s,i)))}return c}function Fe(e,t){if(!t)return e;const r=Array.isArray(e.required)?Array.from(new Set([...e.required,...t])):t;return{...e,required:r}}function Te(e,t,r,n,o,a,s,i){return Ae(e,o,r,i,a,s).flatMap((o=>{const{oneOf:c,...l}=o;if(t=je(t,l),void 0===c)return t;return Pe(c.map((t=>"boolean"!==typeof t&&_ in t?Oe(e,t,r,a,s,i):[t]))).flatMap((o=>function(e,t,r,n,o,a,s,i){const c=o.filter((t=>{if("boolean"===typeof t||!t||!t.properties)return!1;const{[n]:o}=t.properties;if(o){const t={type:"object",properties:{[n]:o}};return e.isValid(t,i,r)||a}return!1}));if(!a&&1!==c.length)return console.warn("ignoring oneOf in dependencies because there isn't exactly one subschema that is valid"),[t];return c.flatMap((o=>{const c=o,[l]=L(n,c.properties),d={...c,properties:l};return Ae(e,d,r,i,a,s).map((e=>je(t,e)))}))}(e,t,r,n,o,a,s,i)))}))}const Re={type:"object",$id:"_$junk_option_schema_id$_",properties:{__not_really_there__:{type:"number"}}};function Me(e,t,r,n={}){let o=0;return r&&(Q()(r.properties)?o+=re()(r.properties,((r,o,a)=>{const s=M()(n,a);if("boolean"===typeof o)return r;if(G()(o,_)){const n=Ce(e,o,t,s);return r+Me(e,t,n,s||{})}if((G()(o,$)||G()(o,l))&&s){const n=G()(o,$)?$:l,a=Se(o);return r+Ue(e,t,s,M()(o,n),-1,a)}if("object"===o.type)return r+Me(e,t,o,s||{});if(o.type===_e(s)){let e=r+1;return o.default?e+=s===o.default?1:-1:o.const&&(e+=s===o.const?1:-1),e}return r}),0):ee()(r.type)&&r.type===_e(n)&&(o+=1)),o}function Ue(e,t,r,n,o=-1,a){const s=n.map((e=>Ne(e,t,[]))),i=ae(r,n,a);if(J()(i))return i;const c=s.reduce(((n,o,s)=>(1===ie(e,r,[Re,o],t,a)&&n.push(s),n)),[]);if(1===c.length)return c[0];c.length||oe()(s.length,(e=>c.push(e)));const l=new Set,{bestIndex:d}=c.reduce(((n,o)=>{const{bestScore:a}=n,i=s[o],c=Me(e,t,i,r);return l.add(c),c>a?{bestIndex:o,bestScore:c}:n}),{bestIndex:o,bestScore:0});return 1===l.size&&o>=0?o:d}function Ve(e){return Array.isArray(e.items)&&e.items.length>0&&e.items.every((e=>n(e)))}function Be(e,t,r=!1){if(Array.isArray(t)){const n=Array.isArray(e)?e:[],o=t.map(((e,t)=>n[t]?Be(n[t],e,r):e));return r&&o.length(n[o]=Be(e?M()(e,o):{},M()(t,o),r),n)),n)}return t}function qe(e,t,r=!1){return Object.keys(t).reduce(((o,a)=>{const s=e?e[a]:{},i=t[a];if(e&&a in e&&n(i))o[a]=qe(s,i,r);else if(r&&Array.isArray(s)&&Array.isArray(i)){let e=i;"preventDuplicates"===r&&(e=i.reduce(((e,t)=>(s.includes(t)||e.push(t),e)),[])),o[a]=s.concat(e)}else o[a]=i;return o}),Object.assign({},e))}function ze(e,t,r={}){const n=Ce(e,t,r,void 0),o=n.oneOf||n.anyOf;return!!Array.isArray(n.enum)||!!Array.isArray(o)&&o.every((e=>"boolean"!==typeof e&&function(e){return Array.isArray(e.enum)&&1===e.enum.length||d in e}(e)))}function Le(e,t,r){return!(!t.uniqueItems||!t.items||"boolean"===typeof t.items)&&ze(e,t.items,r)}var Ke;function We(e,t=Ke.Ignore,r=-1){if(r>=0){if(Array.isArray(e.items)&&rGe(e,t,{rootSchema:a,includeUndefinedValues:s,_recurseList:i,experimental_defaultFormStateBehavior:d,parentDefaults:Array.isArray(r)?r[n]:void 0,rawFormData:v,required:p})));else if($ in w){const{oneOf:t,...r}=w;if(0===t.length)return;const n=Se(w);x=t[Ue(e,a,V()(v)?void 0:v,t,0,n)],x=je(r,x)}else if(l in w){const{anyOf:t,...r}=w;if(0===t.length)return;const n=Se(w);x=t[Ue(e,a,V()(v)?void 0:v,t,0,n)],x=je(r,x)}if(x)return Ge(e,x,{rootSchema:a,includeUndefinedValues:s,_recurseList:E,experimental_defaultFormStateBehavior:d,parentDefaults:S,rawFormData:v,required:p});switch(void 0===S&&(S=w.default),ke(w)){case"object":{const t="populateDefaults"===(null===d||void 0===d?void 0:d.allOf)&&c in w?Ce(e,w,a,v):w,r=Object.keys(t.properties||{}).reduce(((r,n)=>{var o;return He(r,n,Ge(e,M()(t,[b,n]),{rootSchema:a,_recurseList:i,experimental_defaultFormStateBehavior:d,includeUndefinedValues:!0===s,parentDefaults:M()(S,[n]),rawFormData:M()(v,[n]),required:null===(o=t.required)||void 0===o?void 0:o.includes(n)}),s,p,t.required,d),r}),{});if(t.additionalProperties){const o=n(t.additionalProperties)?t.additionalProperties:{},c=new Set;n(S)&&Object.keys(S).filter((e=>!t.properties||!t.properties[e])).forEach((e=>c.add(e)));const l=[];Object.keys(v).filter((e=>!t.properties||!t.properties[e])).forEach((e=>{c.add(e),l.push(e)})),c.forEach((n=>{var c;const u=Ge(e,o,{rootSchema:a,_recurseList:i,experimental_defaultFormStateBehavior:d,includeUndefinedValues:!0===s,parentDefaults:M()(S,[n]),rawFormData:M()(v,[n]),required:null===(c=t.required)||void 0===c?void 0:c.includes(n)});He(r,n,u,s,p,l)}))}return r}case"array":{const t="never"===(null===(m=null===d||void 0===d?void 0:d.arrayMinItems)||void 0===m?void 0:m.populate),r="requiredOnly"===(null===(h=null===d||void 0===d?void 0:d.arrayMinItems)||void 0===h?void 0:h.populate),n="skipEmptyDefaults"===(null===d||void 0===d?void 0:d.emptyObjectFields),s=null!==(g=null===(y=null===d||void 0===d?void 0:d.arrayMinItems)||void 0===y?void 0:y.computeSkipPopulate)&&void 0!==g?g:()=>!1,c=n?void 0:[];if(Array.isArray(S)&&(S=S.map(((t,r)=>{const n=We(w,Ke.Fallback,r);return Ge(e,n,{rootSchema:a,_recurseList:i,experimental_defaultFormStateBehavior:d,parentDefaults:t,required:p})}))),Array.isArray(o)){const r=We(w);S=t?o:o.map(((t,n)=>Ge(e,r,{rootSchema:a,_recurseList:i,experimental_defaultFormStateBehavior:d,rawFormData:t,parentDefaults:M()(S,[n]),required:p})))}if(t)return null!==S&&void 0!==S?S:c;if(r&&!p)return S||void 0;const l=Array.isArray(S)?S.length:0;if(!w.minItems||Le(e,w,a)||s(e,w,a)||w.minItems<=l)return S||c;const u=S||[],f=We(w,Ke.Invert),v=f.default,$=new Array(w.minItems-l).fill(Ge(e,f,{parentDefaults:v,rootSchema:a,_recurseList:i,experimental_defaultFormStateBehavior:d,required:p}));return u.concat($)}}return S}function Ze(e,t,r,o,a=!1,s){if(!n(t))throw new Error("Invalid schema: "+t);const i=Ge(e,Ce(e,t,o,r),{rootSchema:o,includeUndefinedValues:a,experimental_defaultFormStateBehavior:s,rawFormData:r});if(void 0===r||null===r||"number"===typeof r&&isNaN(r))return i;const{mergeExtraDefaults:c}=(null===s||void 0===s?void 0:s.arrayMinItems)||{};return n(r)||Array.isArray(r)?Be(i,r,c):r}function Je(e={}){return"widget"in P(e)&&"hidden"!==P(e).widget}function Ye(e,t,r={},n){if("files"===r[k])return!0;if(t.items){const r=Ce(e,t.items,n);return"string"===r.type&&"data-url"===r.format}return!1}!function(e){e[e.Ignore=0]="Ignore",e[e.Invert=1]="Invert",e[e.Fallback=2]="Fallback"}(Ke||(Ke={}));const Qe=Symbol("no Value");function Xe(e,t,r,n,o={}){let a;if(G()(r,b)){const s={};if(G()(n,b)){const e=M()(n,b,{});Object.keys(e).forEach((e=>{G()(o,e)&&(s[e]=void 0)}))}const i=Object.keys(M()(r,b,{})),c={};i.forEach((a=>{const i=M()(o,a);let l=M()(n,[b,a],{}),d=M()(r,[b,a],{});G()(l,_)&&(l=Ce(e,l,t,i)),G()(d,_)&&(d=Ce(e,d,t,i));const u=M()(l,"type"),f=M()(d,"type");if(!u||u===f)if(G()(s,a)&&delete s[a],"object"===f||"array"===f&&Array.isArray(i)){const r=Xe(e,t,d,l,i);void 0===r&&"array"!==f||(c[a]=r)}else{const e=M()(d,"default",Qe),t=M()(l,"default",Qe);e!==Qe&&e!==i&&(t===i?s[a]=e:!0===M()(d,"readOnly")&&(s[a]=void 0));const r=M()(d,"const",Qe),n=M()(l,"const",Qe);r!==Qe&&r!==i&&(s[a]=n===i?r:void 0)}})),a={..."string"==typeof o||Array.isArray(o)?void 0:o,...s,...c}}else if("array"===M()(n,"type")&&"array"===M()(r,"type")&&Array.isArray(o)){let s=M()(n,"items"),i=M()(r,"items");if("object"!==typeof s||"object"!==typeof i||Array.isArray(s)||Array.isArray(i))"boolean"===typeof s&&"boolean"===typeof i&&s===i&&(a=o);else{G()(s,_)&&(s=Ce(e,s,t,o)),G()(i,_)&&(i=Ce(e,i,t,o));const n=M()(s,"type"),c=M()(i,"type");if(!n||n===c){const n=M()(r,"maxItems",-1);a="object"===c?o.reduce(((r,o)=>{const a=Xe(e,t,i,s,o);return void 0!==a&&(n<0||r.length0&&o.length>n?o.slice(0,n):o}}}return a}function et(e,t,r,o,a,s,i,l=[]){if(_ in t||f in t||c in t){const n=Ce(e,t,s,i);if(-1===l.findIndex((e=>le()(e,n))))return et(e,n,r,o,a,s,i,l.concat(n))}if(g in t&&!M()(t,[g,_]))return et(e,M()(t,g),r,o,a,s,i,l);const d={$id:a||r};if("object"===ke(t)&&b in t)for(const c in t.properties){const a=M()(t,[b,c]),u=d[h]+o+c;d[c]=et(e,n(a)?a:{},r,o,u,s,M()(i,[c]),l)}return d}function tt(e,t,r,n,o,a="root",s="_"){return et(e,t,a,s,r,n,o)}function rt(e,t,r,n,o,a=[]){if(_ in t||f in t||c in t){const s=Ce(e,t,n,o);if(-1===a.findIndex((e=>le()(e,s))))return rt(e,s,r,n,o,a.concat(s))}let s={[v]:r.replace(/^\./,"")};if($ in t||l in t){const i=$ in t?t.oneOf:t.anyOf,c=Se(t),l=i[Ue(e,n,o,i,0,c)];s={...s,...rt(e,l,r,n,o,a)}}if(i in t&&!1!==t[i]&&ue()(s,x,!0),g in t&&Array.isArray(o)){const{items:i,additionalItems:c}=t;Array.isArray(i)?o.forEach(((t,o)=>{i[o]?s[o]=rt(e,i[o],`${r}.${o}`,n,t,a):c?s[o]=rt(e,c,`${r}.${o}`,n,t,a):console.warn(`Unable to generate path schema for "${r}.${o}". No schema defined for it`)})):o.forEach(((t,o)=>{s[o]=rt(e,i,`${r}.${o}`,n,t,a)}))}else if(b in t)for(const i in t.properties){const c=M()(t,[b,i]);s[i]=rt(e,c,`${r}.${i}`,n,M()(o,[i]),a)}return s}function nt(e,t,r="",n,o){return rt(e,t,r,n,o)}class ot{constructor(e,t,r){this.rootSchema=t,this.validator=e,this.experimental_defaultFormStateBehavior=r}getValidator(){return this.validator}doesSchemaUtilsDiffer(e,t,r={}){return!(!e||!t)&&(this.validator!==e||!T(this.rootSchema,t)||!T(this.experimental_defaultFormStateBehavior,r))}getDefaultFormState(e,t,r=!1){return Ze(this.validator,e,t,this.rootSchema,r,this.experimental_defaultFormStateBehavior)}getDisplayLabel(e,t,r){return function(e,t,r={},n,o){const a=P(r,o),{label:s=!0}=a;let i=!!s;const c=ke(t);return"array"===c&&(i=Le(e,t,n)||Ye(e,t,r,n)||Je(r)),"object"===c&&(i=!1),"boolean"!==c||r[k]||(i=!1),r["ui:field"]&&(i=!1),i}(this.validator,e,t,this.rootSchema,r)}getClosestMatchingOption(e,t,r,n){return Ue(this.validator,this.rootSchema,e,t,r,n)}getFirstMatchingOption(e,t,r){return ie(this.validator,e,t,this.rootSchema,r)}getMatchingOption(e,t,r){return se(this.validator,e,t,this.rootSchema,r)}isFilesArray(e,t){return Ye(this.validator,e,t,this.rootSchema)}isMultiSelect(e){return Le(this.validator,e,this.rootSchema)}isSelect(e){return ze(this.validator,e,this.rootSchema)}mergeValidationData(e,t){return function(e,t,r){if(!r)return t;const{errors:n,errorSchema:o}=t;let a=e.toErrorList(r),s=r;return V()(o)||(s=qe(o,r,!0),a=[...n].concat(a)),{errorSchema:s,errors:a}}(this.validator,e,t)}retrieveSchema(e,t){return Ce(this.validator,e,this.rootSchema,t)}sanitizeDataForNewSchema(e,t,r){return Xe(this.validator,this.rootSchema,e,t,r)}toIdSchema(e,t,r,n="root",o="_"){return tt(this.validator,e,t,this.rootSchema,r,n,o)}toPathSchema(e,t,r){return nt(this.validator,e,t,this.rootSchema,r)}}function at(e,t,r={}){return new ot(e,t,r)}function st(e){var t;if(-1===e.indexOf("data:"))throw new Error("File is invalid: URI must be a dataURI");const r=e.slice(5).split(";base64,");if(2!==r.length)throw new Error("File is invalid: dataURI must be base64");const[n,o]=r,[a,...s]=n.split(";"),i=a||"",c=decodeURI((null===(t=s.map((e=>e.split("="))).find((([e])=>"name"===e)))||void 0===t?void 0:t[1])||"unknown");try{const e=atob(o),t=new Array(e.length);for(let r=0;r{const n=e.findIndex((e=>e===`%${r+1}`));n>=0&&(e[n]=t)})),r=e.join("")}return r}(e,t)}function ct(e,t=[],r){if(Array.isArray(e))return e.map((e=>ct(e,t))).filter((e=>e!==r));const n=""===e||null===e?-1:Number(e),o=t[n];return o?o.value:r}function lt(e,t,r=[]){const n=ct(e,r);return Array.isArray(t)?t.filter((e=>!le()(e,n))):le()(n,t)?void 0:t}function dt(e,t){return Array.isArray(t)?t.some((t=>le()(t,e))):le()(t,e)}function ut(e,t=[],r=!1){const n=t.map(((t,r)=>dt(t.value,e)?String(r):void 0)).filter((e=>"undefined"!==typeof e));return r?n:n[0]}var ft=r(69843),pt=r.n(ft);function mt(e,t,r=[]){const n=ct(e,r);if(!pt()(n)){const e=r.findIndex((e=>n===e.value)),o=r.map((({value:e})=>e));return t.slice(0,e).concat(n,t.slice(e)).sort(((e,t)=>Number(o.indexOf(e)>o.indexOf(t))))}return t}var ht=r(88055),yt=r.n(ht);class gt{constructor(e){this.errorSchema={},this.resetAllErrors(e)}get ErrorSchema(){return this.errorSchema}getOrCreateErrorBlock(e){let t=Array.isArray(e)&&e.length>0||"string"===typeof e?M()(this.errorSchema,e):this.errorSchema;return!t&&e&&(t={},ue()(this.errorSchema,e,t)),t}resetAllErrors(e){return this.errorSchema=e?yt()(e):{},this}addErrors(e,t){const r=this.getOrCreateErrorBlock(t);let n=M()(r,m);return Array.isArray(n)||(n=[],r[m]=n),Array.isArray(e)?n.push(...e):n.push(e),this}setErrors(e,t){const r=this.getOrCreateErrorBlock(t),n=Array.isArray(e)?[...e]:[e];return ue()(r,m,n),this}clearErrors(e){const t=this.getOrCreateErrorBlock(e);return ue()(t,m,[]),this}}function vt(e,t,r=[1900,(new Date).getFullYear()+2],n="YMD"){const{day:o,month:a,year:s,hour:i,minute:c,second:l}=e,d={type:"day",range:[1,31],value:o},u={type:"month",range:[1,12],value:a},f={type:"year",range:r,value:s},p=[];switch(n){case"MDY":p.push(u,d,f);break;case"DMY":p.push(d,u,f);break;default:p.push(f,u,d)}return t&&p.push({type:"hour",range:[0,23],value:i},{type:"minute",range:[0,59],value:c},{type:"second",range:[0,59],value:l}),p}function $t(e){const t={};return e.multipleOf&&(t.step=e.multipleOf),(e.minimum||0===e.minimum)&&(t.min=e.minimum),(e.maximum||0===e.maximum)&&(t.max=e.maximum),t}function bt(e,t,r={},n=!0){const o={type:t||"text",...$t(e)};return r.inputType?o.type=r.inputType:t||("number"===e.type?(o.type="number",n&&void 0===o.step&&(o.step="any")):"integer"===e.type&&(o.type="number",void 0===o.step&&(o.step=1))),r.autocomplete&&(o.autoComplete=r.autocomplete),o}const wt={props:{disabled:!1},submitText:"Submit",norender:!1};function St(e={}){const t=P(e);if(t&&t[S]){const e=t[S];return{...wt,...e}}return wt}function _t(e,t,r={}){const{templates:n}=t;return"ButtonTemplates"===e?n[e]:r[e]||n[e]}var xt=r(74848),Et=r(96540),kt=r(44363);const jt={boolean:{checkbox:"CheckboxWidget",radio:"RadioWidget",select:"SelectWidget",hidden:"HiddenWidget"},string:{text:"TextWidget",password:"PasswordWidget",email:"EmailWidget",hostname:"TextWidget",ipv4:"TextWidget",ipv6:"TextWidget",uri:"URLWidget","data-url":"FileWidget",radio:"RadioWidget",select:"SelectWidget",textarea:"TextareaWidget",hidden:"HiddenWidget",date:"DateWidget",datetime:"DateTimeWidget","date-time":"DateTimeWidget","alt-date":"AltDateWidget","alt-datetime":"AltDateTimeWidget",time:"TimeWidget",color:"ColorWidget",file:"FileWidget"},number:{text:"TextWidget",select:"SelectWidget",updown:"UpDownWidget",range:"RangeWidget",radio:"RadioWidget",hidden:"HiddenWidget"},integer:{text:"TextWidget",select:"SelectWidget",updown:"UpDownWidget",range:"RangeWidget",radio:"RadioWidget",hidden:"HiddenWidget"},array:{select:"SelectWidget",checkboxes:"CheckboxesWidget",files:"FileWidget",hidden:"HiddenWidget"}};function Ct(e,t,r={}){const n=ke(e);if("function"===typeof t||t&&kt.isForwardRef((0,Et.createElement)(t))||kt.isMemo(t))return function(e){let t=M()(e,"MergedWidget");if(!t){const r=e.defaultProps&&e.defaultProps.options||{};t=({options:t,...n})=>(0,xt.jsx)(e,{options:{...r,...t},...n}),ue()(e,"MergedWidget",t)}return t}(t);if("string"!==typeof t)throw new Error("Unsupported widget definition: "+typeof t);if(t in r){return Ct(e,r[t],r)}if("string"===typeof n){if(!(n in jt))throw new Error(`No widget for type '${n}'`);if(t in jt[n]){return Ct(e,r[jt[n][t]],r)}}throw new Error(`No widget '${t}' for type '${n}'`)}function Pt(e){const t=new Set;return JSON.stringify(e,((e,r)=>(t.add(e),r))),function(e){let t=0;for(let r=0;r({label:t.enumNames&&t.enumNames[r]||String(e),value:e})));const r=e.oneOf||e.anyOf;return r&&r.map((e=>{const t=e,r=function(e){if(p in e&&Array.isArray(e.enum)&&1===e.enum.length)return e.enum[0];if(d in e)return e.const;throw new Error("schema cannot be inferred as a constant")}(t);return{schema:t,label:t.title||String(r),value:r}}))}function qt(e,t){if(!Array.isArray(t))return e;const r=e=>e.reduce(((e,t)=>(e[t]=!0,e)),{}),n=r(e),o=t.filter((e=>"*"===e||n[e])),a=r(o),s=e.filter((e=>!a[e])),i=o.indexOf("*");if(-1===i){if(s.length)throw new Error("uiSchema order list does not contain "+((c=s).length>1?`properties '${c.join("', '")}'`:`property '${c[0]}'`));return o}var c;if(i!==o.lastIndexOf("*"))throw new Error("uiSchema order list contains more than one wildcard item");const l=[...o];return l.splice(i,1,...s),l}function zt(e,t){let r=String(e);for(;r.lengthKt(e);return e.allOf.some(t)}return!1}function Wt(e,t,r){const{props:n,state:o}=e;return!T(n,t)||!T(o,r)}function Ht(e,t=!0){const{year:r,month:n,day:o,hour:a=0,minute:s=0,second:i=0}=e,c=Date.UTC(r,n-1,o,a,s,i),l=new Date(c).toJSON();return t?l:l.slice(0,10)}function Gt(e,t=[]){if(!e)return[];let r=[];return m in e&&(r=r.concat(e[m].map((e=>{const r=`.${t.join(".")}`;return{property:r,message:e,stack:`${r} ${e}`}})))),Object.keys(e).reduce(((r,n)=>{if(n!==m){const o=e[n];A()(o)&&(r=r.concat(Gt(o,[...t,n])))}return r}),r)}var Zt,Jt=r(42072),Yt=r.n(Jt);function Qt(e){const t=new gt;return e.length&&e.forEach((e=>{const{property:r,message:n}=e,o="."===r?[]:Yt()(r);o.length>0&&""===o[0]&&o.splice(0,1),n&&t.addErrors(n,o)})),t.ErrorSchema}function Xt(e){return Object.keys(e).reduce(((t,r)=>{if("addError"===r)return t;{const n=e[r];return A()(n)?{...t,[r]:Xt(n)}:{...t,[r]:n}}}),{})}function er(e){if(!e)return"";const t=new Date(e);return`${zt(t.getFullYear(),4)}-${zt(t.getMonth()+1,2)}-${zt(t.getDate(),2)}T${zt(t.getHours(),2)}:${zt(t.getMinutes(),2)}:${zt(t.getSeconds(),2)}.${zt(t.getMilliseconds(),3)}`}function tr(e,t){if(!t)return e;const{errors:r,errorSchema:n}=e;let o=Gt(t),a=t;return V()(n)||(a=qe(n,t,!0),o=[...r].concat(o)),{errorSchema:a,errors:o}}function rr(e){return Array.isArray(e)?function(e){for(let t=0;t{"use strict";r.d(t,{Ay:()=>g});var n=r(78239),o=r(63282),a=r.n(o),s=r(68182),i=r.n(s),c=r(23805),l=r.n(c);const d={allErrors:!0,multipleOfPrecision:8,strict:!1,verbose:!0},u=/^(#?([0-9A-Fa-f]{3}){1,2}\b|aqua|black|blue|fuchsia|gray|green|lime|maroon|navy|olive|orange|purple|red|silver|teal|white|yellow|(rgb\(\s*\b([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\b\s*,\s*\b([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\b\s*,\s*\b([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\b\s*\))|(rgb\(\s*(\d?\d%|100%)+\s*,\s*(\d?\d%|100%)+\s*,\s*(\d?\d%|100%)+\s*\)))$/,f=/^data:([a-z]+\/[a-z0-9-+.]+)?;(?:name=(.*);)?base64,(.*)$/;var p=r(58156),m=r.n(p);function h(e,t,r,o,a,s,i){const{validationError:c}=t;let l=function(e=[],t){return e.map((e=>{const{instancePath:r,keyword:o,params:a,schemaPath:s,parentSchema:i,...c}=e;let{message:l=""}=c,d=r.replace(/\//g,"."),u=`${d} ${l}`.trim();if("missingProperty"in a){d=d?`${d}.${a.missingProperty}`:a.missingProperty;const e=a.missingProperty,r=(0,n.$R)(m()(t,`${d.replace(/^\./,"")}`)).title;if(r)l=l.replace(e,r);else{const t=m()(i,[n.s1,e,"title"]);t&&(l=l.replace(e,t))}u=l}else{const e=(0,n.$R)(m()(t,`${d.replace(/^\./,"")}`)).title;if(e)u=`'${e}' ${l}`.trim();else{const e=null===i||void 0===i?void 0:i.title;e&&(u=`'${e}' ${l}`.trim())}}return{name:o,property:d,message:l,params:a,stack:u,schemaPath:s}}))}(t.errors,i);c&&(l=[...l,{stack:c.message}]),"function"===typeof s&&(l=s(l,i));let d=(0,n.MD)(l);if(c&&(d={...d,$schema:{__errors:[c.message]}}),"function"!==typeof a)return{errors:l,errorSchema:d};const u=(0,n.NV)(e,o,r,o,!0),f=a(u,(0,n.JF)(u),i),p=(0,n.fV)(f);return(0,n.k6)({errors:l,errorSchema:d},p)}class y{constructor(e,t){const{additionalMetaSchemas:r,customFormats:o,ajvOptionsOverrides:s,ajvFormatOptions:c,AjvClass:p}=e;this.ajv=function(e,t,r={},o,s=a()){const c=new s({...d,...r});return o?i()(c,o):!1!==o&&i()(c),c.addFormat("data-url",f),c.addFormat("color",u),c.addKeyword(n.Rr),c.addKeyword(n.yB),Array.isArray(e)&&c.addMetaSchema(e),l()(t)&&Object.keys(t).forEach((e=>{c.addFormat(e,t[e])})),c}(r,o,s,c,p),this.localizer=t}toErrorList(e,t=[]){return(0,n.SL)(e,t)}rawValidation(e,t){let r,o,a;e[n.K0]&&(o=this.ajv.getSchema(e[n.K0]));try{void 0===o&&(o=this.ajv.compile(e)),o(t)}catch(s){r=s}return o&&("function"===typeof this.localizer&&this.localizer(o.errors),a=o.errors||void 0,o.errors=null),{errors:a,validationError:r}}validateFormData(e,t,r,n,o){return h(this,this.rawValidation(t,e),e,t,r,n,o)}isValid(e,t,r){var o,a;const s=null!==(o=r[n.K0])&&void 0!==o?o:n.at;try{this.ajv.addSchema(r,s);const o=(0,n.vh)(e),i=null!==(a=o[n.K0])&&void 0!==a?a:(0,n.$K)(o);let c;c=this.ajv.getSchema(i),void 0===c&&(c=this.ajv.addSchema(o,i).getSchema(i)||this.ajv.compile(o));return c(t)}catch(i){return console.warn("Error encountered compiling schema:",i),!1}finally{this.ajv.removeSchema(s)}}}r(2404);const g=function(e={},t){return new y(e,t)}()},14018:(e,t)=>{"use strict";function r(e,t){return{validate:e,compare:t}}Object.defineProperty(t,"__esModule",{value:!0}),t.formatNames=t.fastFormats=t.fullFormats=void 0,t.fullFormats={date:r(a,s),time:r(c,l),"date-time":r((function(e){const t=e.split(d);return 2===t.length&&a(t[0])&&c(t[1],!0)}),u),duration:/^P(?!$)((\d+Y)?(\d+M)?(\d+D)?(T(?=\d)(\d+H)?(\d+M)?(\d+S)?)?|(\d+W)?)$/,uri:function(e){return f.test(e)&&p.test(e)},"uri-reference":/^(?:[a-z][a-z0-9+\-.]*:)?(?:\/?\/(?:(?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9a-f]{2})*@)?(?:\[(?:(?:(?:(?:[0-9a-f]{1,4}:){6}|::(?:[0-9a-f]{1,4}:){5}|(?:[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){4}|(?:(?:[0-9a-f]{1,4}:){0,1}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){3}|(?:(?:[0-9a-f]{1,4}:){0,2}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){2}|(?:(?:[0-9a-f]{1,4}:){0,3}[0-9a-f]{1,4})?::[0-9a-f]{1,4}:|(?:(?:[0-9a-f]{1,4}:){0,4}[0-9a-f]{1,4})?::)(?:[0-9a-f]{1,4}:[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?))|(?:(?:[0-9a-f]{1,4}:){0,5}[0-9a-f]{1,4})?::[0-9a-f]{1,4}|(?:(?:[0-9a-f]{1,4}:){0,6}[0-9a-f]{1,4})?::)|[Vv][0-9a-f]+\.[a-z0-9\-._~!$&'()*+,;=:]+)\]|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)|(?:[a-z0-9\-._~!$&'"()*+,;=]|%[0-9a-f]{2})*)(?::\d*)?(?:\/(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})*)*|\/(?:(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})*)*)?|(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})*)*)?(?:\?(?:[a-z0-9\-._~!$&'"()*+,;=:@/?]|%[0-9a-f]{2})*)?(?:#(?:[a-z0-9\-._~!$&'"()*+,;=:@/?]|%[0-9a-f]{2})*)?$/i,"uri-template":/^(?:(?:[^\x00-\x20"'<>%\\^`{|}]|%[0-9a-f]{2})|\{[+#./;?&=,!@|]?(?:[a-z0-9_]|%[0-9a-f]{2})+(?::[1-9][0-9]{0,3}|\*)?(?:,(?:[a-z0-9_]|%[0-9a-f]{2})+(?::[1-9][0-9]{0,3}|\*)?)*\})*$/i,url:/^(?:https?|ftp):\/\/(?:\S+(?::\S*)?@)?(?:(?!(?:10|127)(?:\.\d{1,3}){3})(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z0-9\u{00a1}-\u{ffff}]+-)*[a-z0-9\u{00a1}-\u{ffff}]+)(?:\.(?:[a-z0-9\u{00a1}-\u{ffff}]+-)*[a-z0-9\u{00a1}-\u{ffff}]+)*(?:\.(?:[a-z\u{00a1}-\u{ffff}]{2,})))(?::\d{2,5})?(?:\/[^\s]*)?$/iu,email:/^[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$/i,hostname:/^(?=.{1,253}\.?$)[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?(?:\.[a-z0-9](?:[-0-9a-z]{0,61}[0-9a-z])?)*\.?$/i,ipv4:/^(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)$/,ipv6:/^((([0-9a-f]{1,4}:){7}([0-9a-f]{1,4}|:))|(([0-9a-f]{1,4}:){6}(:[0-9a-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9a-f]{1,4}:){5}(((:[0-9a-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9a-f]{1,4}:){4}(((:[0-9a-f]{1,4}){1,3})|((:[0-9a-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9a-f]{1,4}:){3}(((:[0-9a-f]{1,4}){1,4})|((:[0-9a-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9a-f]{1,4}:){2}(((:[0-9a-f]{1,4}){1,5})|((:[0-9a-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9a-f]{1,4}:){1}(((:[0-9a-f]{1,4}){1,6})|((:[0-9a-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9a-f]{1,4}){1,7})|((:[0-9a-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))$/i,regex:function(e){if(v.test(e))return!1;try{return new RegExp(e),!0}catch(t){return!1}},uuid:/^(?:urn:uuid:)?[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}$/i,"json-pointer":/^(?:\/(?:[^~/]|~0|~1)*)*$/,"json-pointer-uri-fragment":/^#(?:\/(?:[a-z0-9_\-.!$&'()*+,;:=@]|%[0-9a-f]{2}|~0|~1)*)*$/i,"relative-json-pointer":/^(?:0|[1-9][0-9]*)(?:#|(?:\/(?:[^~/]|~0|~1)*)*)$/,byte:function(e){return m.lastIndex=0,m.test(e)},int32:{type:"number",validate:function(e){return Number.isInteger(e)&&e<=y&&e>=h}},int64:{type:"number",validate:function(e){return Number.isInteger(e)}},float:{type:"number",validate:g},double:{type:"number",validate:g},password:!0,binary:!0},t.fastFormats={...t.fullFormats,date:r(/^\d\d\d\d-[0-1]\d-[0-3]\d$/,s),time:r(/^(?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?(?:z|[+-]\d\d(?::?\d\d)?)?$/i,l),"date-time":r(/^\d\d\d\d-[0-1]\d-[0-3]\d[t\s](?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?(?:z|[+-]\d\d(?::?\d\d)?)$/i,u),uri:/^(?:[a-z][a-z0-9+\-.]*:)(?:\/?\/)?[^\s]*$/i,"uri-reference":/^(?:(?:[a-z][a-z0-9+\-.]*:)?\/?\/)?(?:[^\\\s#][^\s#]*)?(?:#[^\\\s]*)?$/i,email:/^[a-z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?(?:\.[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?)*$/i},t.formatNames=Object.keys(t.fullFormats);const n=/^(\d\d\d\d)-(\d\d)-(\d\d)$/,o=[0,31,28,31,30,31,30,31,31,30,31,30,31];function a(e){const t=n.exec(e);if(!t)return!1;const r=+t[1],a=+t[2],s=+t[3];return a>=1&&a<=12&&s>=1&&s<=(2===a&&function(e){return e%4===0&&(e%100!==0||e%400===0)}(r)?29:o[a])}function s(e,t){if(e&&t)return e>t?1:e(t=n[1]+n[2]+n[3]+(n[4]||""))?1:e{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(14018),o=r(26461),a=r(99029),s=new a.Name("fullFormats"),i=new a.Name("fastFormats"),c=(e,t={keywords:!0})=>{if(Array.isArray(t))return l(e,t,n.fullFormats,s),e;const[r,a]="fast"===t.mode?[n.fastFormats,i]:[n.fullFormats,s];return l(e,t.formats||n.formatNames,r,a),t.keywords&&o.default(e),e};function l(e,t,r,n){var o,s;null!==(o=(s=e.opts.code).formats)&&void 0!==o||(s.formats=a._`require("ajv-formats/dist/formats").${n}`);for(const a of t)e.addFormat(a,r[a])}c.get=(e,t="full")=>{const r=("fast"===t?n.fastFormats:n.fullFormats)[e];if(!r)throw new Error(`Unknown format "${e}"`);return r},e.exports=t=c,Object.defineProperty(t,"__esModule",{value:!0}),t.default=c},26461:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.formatLimitDefinition=void 0;const n=r(63282),o=r(99029),a=o.operators,s={formatMaximum:{okStr:"<=",ok:a.LTE,fail:a.GT},formatMinimum:{okStr:">=",ok:a.GTE,fail:a.LT},formatExclusiveMaximum:{okStr:"<",ok:a.LT,fail:a.GTE},formatExclusiveMinimum:{okStr:">",ok:a.GT,fail:a.LTE}},i={message:({keyword:e,schemaCode:t})=>o.str`should be ${s[e].okStr} ${t}`,params:({keyword:e,schemaCode:t})=>o._`{comparison: ${s[e].okStr}, limit: ${t}}`};t.formatLimitDefinition={keyword:Object.keys(s),type:"string",schemaType:"string",$data:!0,error:i,code(e){const{gen:t,data:r,schemaCode:a,keyword:i,it:c}=e,{opts:l,self:d}=c;if(!l.validateFormats)return;const u=new n.KeywordCxt(c,d.RULES.all.format.definition,"format");function f(e){return o._`${e}.compare(${r}, ${a}) ${s[i].fail} 0`}u.$data?function(){const r=t.scopeValue("formats",{ref:d.formats,code:l.code.formats}),n=t.const("fmt",o._`${r}[${u.schemaCode}]`);e.fail$data(o.or(o._`typeof ${n} != "object"`,o._`${n} instanceof RegExp`,o._`typeof ${n}.compare != "function"`,f(n)))}():function(){const r=u.schema,n=d.formats[r];if(!n||!0===n)return;if("object"!=typeof n||n instanceof RegExp||"function"!=typeof n.compare)throw new Error(`"${i}": format "${r}" does not define "compare" function`);const a=t.scopeValue("formats",{key:r,ref:n,code:l.code.formats?o._`${l.code.formats}${o.getProperty(r)}`:void 0});e.fail$data(f(a))}()},dependencies:["format"]};t.default=e=>(e.addKeyword(t.formatLimitDefinition),e)},63282:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.MissingRefError=t.ValidationError=t.CodeGen=t.Name=t.nil=t.stringify=t.str=t._=t.KeywordCxt=void 0;const n=r(4042),o=r(86144),a=r(36653),s=r(72079),i=["/properties"],c="http://json-schema.org/draft-07/schema";class l extends n.default{_addVocabularies(){super._addVocabularies(),o.default.forEach((e=>this.addVocabulary(e))),this.opts.discriminator&&this.addKeyword(a.default)}_addDefaultMetaSchema(){if(super._addDefaultMetaSchema(),!this.opts.meta)return;const e=this.opts.$data?this.$dataMetaSchema(s,i):s;this.addMetaSchema(e,c,!1),this.refs["http://json-schema.org/schema"]=c}defaultMeta(){return this.opts.defaultMeta=super.defaultMeta()||(this.getSchema(c)?c:void 0)}}e.exports=t=l,Object.defineProperty(t,"__esModule",{value:!0}),t.default=l;var d=r(62586);Object.defineProperty(t,"KeywordCxt",{enumerable:!0,get:function(){return d.KeywordCxt}});var u=r(99029);Object.defineProperty(t,"_",{enumerable:!0,get:function(){return u._}}),Object.defineProperty(t,"str",{enumerable:!0,get:function(){return u.str}}),Object.defineProperty(t,"stringify",{enumerable:!0,get:function(){return u.stringify}}),Object.defineProperty(t,"nil",{enumerable:!0,get:function(){return u.nil}}),Object.defineProperty(t,"Name",{enumerable:!0,get:function(){return u.Name}}),Object.defineProperty(t,"CodeGen",{enumerable:!0,get:function(){return u.CodeGen}});var f=r(13558);Object.defineProperty(t,"ValidationError",{enumerable:!0,get:function(){return f.default}});var p=r(34551);Object.defineProperty(t,"MissingRefError",{enumerable:!0,get:function(){return p.default}})},41520:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.regexpCode=t.getEsmExportName=t.getProperty=t.safeStringify=t.stringify=t.strConcat=t.addCodeArg=t.str=t._=t.nil=t._Code=t.Name=t.IDENTIFIER=t._CodeOrName=void 0;class r{}t._CodeOrName=r,t.IDENTIFIER=/^[a-z$_][a-z$_0-9]*$/i;class n extends r{constructor(e){if(super(),!t.IDENTIFIER.test(e))throw new Error("CodeGen: name must be a valid identifier");this.str=e}toString(){return this.str}emptyStr(){return!1}get names(){return{[this.str]:1}}}t.Name=n;class o extends r{constructor(e){super(),this._items="string"===typeof e?[e]:e}toString(){return this.str}emptyStr(){if(this._items.length>1)return!1;const e=this._items[0];return""===e||'""'===e}get str(){var e;return null!==(e=this._str)&&void 0!==e?e:this._str=this._items.reduce(((e,t)=>`${e}${t}`),"")}get names(){var e;return null!==(e=this._names)&&void 0!==e?e:this._names=this._items.reduce(((e,t)=>(t instanceof n&&(e[t.str]=(e[t.str]||0)+1),e)),{})}}function a(e,...t){const r=[e[0]];let n=0;for(;n{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.or=t.and=t.not=t.CodeGen=t.operators=t.varKinds=t.ValueScopeName=t.ValueScope=t.Scope=t.Name=t.regexpCode=t.stringify=t.getProperty=t.nil=t.strConcat=t.str=t._=void 0;const n=r(41520),o=r(57845);var a=r(41520);Object.defineProperty(t,"_",{enumerable:!0,get:function(){return a._}}),Object.defineProperty(t,"str",{enumerable:!0,get:function(){return a.str}}),Object.defineProperty(t,"strConcat",{enumerable:!0,get:function(){return a.strConcat}}),Object.defineProperty(t,"nil",{enumerable:!0,get:function(){return a.nil}}),Object.defineProperty(t,"getProperty",{enumerable:!0,get:function(){return a.getProperty}}),Object.defineProperty(t,"stringify",{enumerable:!0,get:function(){return a.stringify}}),Object.defineProperty(t,"regexpCode",{enumerable:!0,get:function(){return a.regexpCode}}),Object.defineProperty(t,"Name",{enumerable:!0,get:function(){return a.Name}});var s=r(57845);Object.defineProperty(t,"Scope",{enumerable:!0,get:function(){return s.Scope}}),Object.defineProperty(t,"ValueScope",{enumerable:!0,get:function(){return s.ValueScope}}),Object.defineProperty(t,"ValueScopeName",{enumerable:!0,get:function(){return s.ValueScopeName}}),Object.defineProperty(t,"varKinds",{enumerable:!0,get:function(){return s.varKinds}}),t.operators={GT:new n._Code(">"),GTE:new n._Code(">="),LT:new n._Code("<"),LTE:new n._Code("<="),EQ:new n._Code("==="),NEQ:new n._Code("!=="),NOT:new n._Code("!"),OR:new n._Code("||"),AND:new n._Code("&&"),ADD:new n._Code("+")};class i{optimizeNodes(){return this}optimizeNames(e,t){return this}}class c extends i{constructor(e,t,r){super(),this.varKind=e,this.name=t,this.rhs=r}render({es5:e,_n:t}){const r=e?o.varKinds.var:this.varKind,n=void 0===this.rhs?"":` = ${this.rhs}`;return`${r} ${this.name}${n};`+t}optimizeNames(e,t){if(e[this.name.str])return this.rhs&&(this.rhs=N(this.rhs,e,t)),this}get names(){return this.rhs instanceof n._CodeOrName?this.rhs.names:{}}}class l extends i{constructor(e,t,r){super(),this.lhs=e,this.rhs=t,this.sideEffects=r}render({_n:e}){return`${this.lhs} = ${this.rhs};`+e}optimizeNames(e,t){if(!(this.lhs instanceof n.Name)||e[this.lhs.str]||this.sideEffects)return this.rhs=N(this.rhs,e,t),this}get names(){return O(this.lhs instanceof n.Name?{}:{...this.lhs.names},this.rhs)}}class d extends l{constructor(e,t,r,n){super(e,r,n),this.op=t}render({_n:e}){return`${this.lhs} ${this.op}= ${this.rhs};`+e}}class u extends i{constructor(e){super(),this.label=e,this.names={}}render({_n:e}){return`${this.label}:`+e}}class f extends i{constructor(e){super(),this.label=e,this.names={}}render({_n:e}){return`break${this.label?` ${this.label}`:""};`+e}}class p extends i{constructor(e){super(),this.error=e}render({_n:e}){return`throw ${this.error};`+e}get names(){return this.error.names}}class m extends i{constructor(e){super(),this.code=e}render({_n:e}){return`${this.code};`+e}optimizeNodes(){return`${this.code}`?this:void 0}optimizeNames(e,t){return this.code=N(this.code,e,t),this}get names(){return this.code instanceof n._CodeOrName?this.code.names:{}}}class h extends i{constructor(e=[]){super(),this.nodes=e}render(e){return this.nodes.reduce(((t,r)=>t+r.render(e)),"")}optimizeNodes(){const{nodes:e}=this;let t=e.length;for(;t--;){const r=e[t].optimizeNodes();Array.isArray(r)?e.splice(t,1,...r):r?e[t]=r:e.splice(t,1)}return e.length>0?this:void 0}optimizeNames(e,t){const{nodes:r}=this;let n=r.length;for(;n--;){const o=r[n];o.optimizeNames(e,t)||(A(e,o.names),r.splice(n,1))}return r.length>0?this:void 0}get names(){return this.nodes.reduce(((e,t)=>P(e,t.names)),{})}}class y extends h{render(e){return"{"+e._n+super.render(e)+"}"+e._n}}class g extends h{}class v extends y{}v.kind="else";class $ extends y{constructor(e,t){super(t),this.condition=e}render(e){let t=`if(${this.condition})`+super.render(e);return this.else&&(t+="else "+this.else.render(e)),t}optimizeNodes(){super.optimizeNodes();const e=this.condition;if(!0===e)return this.nodes;let t=this.else;if(t){const e=t.optimizeNodes();t=this.else=Array.isArray(e)?new v(e):e}return t?!1===e?t instanceof $?t:t.nodes:this.nodes.length?this:new $(D(e),t instanceof $?[t]:t.nodes):!1!==e&&this.nodes.length?this:void 0}optimizeNames(e,t){var r;if(this.else=null===(r=this.else)||void 0===r?void 0:r.optimizeNames(e,t),super.optimizeNames(e,t)||this.else)return this.condition=N(this.condition,e,t),this}get names(){const e=super.names;return O(e,this.condition),this.else&&P(e,this.else.names),e}}$.kind="if";class b extends y{}b.kind="for";class w extends b{constructor(e){super(),this.iteration=e}render(e){return`for(${this.iteration})`+super.render(e)}optimizeNames(e,t){if(super.optimizeNames(e,t))return this.iteration=N(this.iteration,e,t),this}get names(){return P(super.names,this.iteration.names)}}class S extends b{constructor(e,t,r,n){super(),this.varKind=e,this.name=t,this.from=r,this.to=n}render(e){const t=e.es5?o.varKinds.var:this.varKind,{name:r,from:n,to:a}=this;return`for(${t} ${r}=${n}; ${r}<${a}; ${r}++)`+super.render(e)}get names(){const e=O(super.names,this.from);return O(e,this.to)}}class _ extends b{constructor(e,t,r,n){super(),this.loop=e,this.varKind=t,this.name=r,this.iterable=n}render(e){return`for(${this.varKind} ${this.name} ${this.loop} ${this.iterable})`+super.render(e)}optimizeNames(e,t){if(super.optimizeNames(e,t))return this.iterable=N(this.iterable,e,t),this}get names(){return P(super.names,this.iterable.names)}}class x extends y{constructor(e,t,r){super(),this.name=e,this.args=t,this.async=r}render(e){return`${this.async?"async ":""}function ${this.name}(${this.args})`+super.render(e)}}x.kind="func";class E extends h{render(e){return"return "+super.render(e)}}E.kind="return";class k extends y{render(e){let t="try"+super.render(e);return this.catch&&(t+=this.catch.render(e)),this.finally&&(t+=this.finally.render(e)),t}optimizeNodes(){var e,t;return super.optimizeNodes(),null===(e=this.catch)||void 0===e||e.optimizeNodes(),null===(t=this.finally)||void 0===t||t.optimizeNodes(),this}optimizeNames(e,t){var r,n;return super.optimizeNames(e,t),null===(r=this.catch)||void 0===r||r.optimizeNames(e,t),null===(n=this.finally)||void 0===n||n.optimizeNames(e,t),this}get names(){const e=super.names;return this.catch&&P(e,this.catch.names),this.finally&&P(e,this.finally.names),e}}class j extends y{constructor(e){super(),this.error=e}render(e){return`catch(${this.error})`+super.render(e)}}j.kind="catch";class C extends y{render(e){return"finally"+super.render(e)}}C.kind="finally";function P(e,t){for(const r in t)e[r]=(e[r]||0)+(t[r]||0);return e}function O(e,t){return t instanceof n._CodeOrName?P(e,t.names):e}function N(e,t,r){return e instanceof n.Name?a(e):(o=e)instanceof n._Code&&o._items.some((e=>e instanceof n.Name&&1===t[e.str]&&void 0!==r[e.str]))?new n._Code(e._items.reduce(((e,t)=>(t instanceof n.Name&&(t=a(t)),t instanceof n._Code?e.push(...t._items):e.push(t),e)),[])):e;var o;function a(e){const n=r[e.str];return void 0===n||1!==t[e.str]?e:(delete t[e.str],n)}}function A(e,t){for(const r in t)e[r]=(e[r]||0)-(t[r]||0)}function D(e){return"boolean"==typeof e||"number"==typeof e||null===e?!e:n._`!${R(e)}`}t.CodeGen=class{constructor(e,t={}){this._values={},this._blockStarts=[],this._constants={},this.opts={...t,_n:t.lines?"\n":""},this._extScope=e,this._scope=new o.Scope({parent:e}),this._nodes=[new g]}toString(){return this._root.render(this.opts)}name(e){return this._scope.name(e)}scopeName(e){return this._extScope.name(e)}scopeValue(e,t){const r=this._extScope.value(e,t);return(this._values[r.prefix]||(this._values[r.prefix]=new Set)).add(r),r}getScopeValue(e,t){return this._extScope.getValue(e,t)}scopeRefs(e){return this._extScope.scopeRefs(e,this._values)}scopeCode(){return this._extScope.scopeCode(this._values)}_def(e,t,r,n){const o=this._scope.toName(t);return void 0!==r&&n&&(this._constants[o.str]=r),this._leafNode(new c(e,o,r)),o}const(e,t,r){return this._def(o.varKinds.const,e,t,r)}let(e,t,r){return this._def(o.varKinds.let,e,t,r)}var(e,t,r){return this._def(o.varKinds.var,e,t,r)}assign(e,t,r){return this._leafNode(new l(e,t,r))}add(e,r){return this._leafNode(new d(e,t.operators.ADD,r))}code(e){return"function"==typeof e?e():e!==n.nil&&this._leafNode(new m(e)),this}object(...e){const t=["{"];for(const[r,o]of e)t.length>1&&t.push(","),t.push(r),(r!==o||this.opts.es5)&&(t.push(":"),(0,n.addCodeArg)(t,o));return t.push("}"),new n._Code(t)}if(e,t,r){if(this._blockNode(new $(e)),t&&r)this.code(t).else().code(r).endIf();else if(t)this.code(t).endIf();else if(r)throw new Error('CodeGen: "else" body without "then" body');return this}elseIf(e){return this._elseNode(new $(e))}else(){return this._elseNode(new v)}endIf(){return this._endBlockNode($,v)}_for(e,t){return this._blockNode(e),t&&this.code(t).endFor(),this}for(e,t){return this._for(new w(e),t)}forRange(e,t,r,n,a=(this.opts.es5?o.varKinds.var:o.varKinds.let)){const s=this._scope.toName(e);return this._for(new S(a,s,t,r),(()=>n(s)))}forOf(e,t,r,a=o.varKinds.const){const s=this._scope.toName(e);if(this.opts.es5){const e=t instanceof n.Name?t:this.var("_arr",t);return this.forRange("_i",0,n._`${e}.length`,(t=>{this.var(s,n._`${e}[${t}]`),r(s)}))}return this._for(new _("of",a,s,t),(()=>r(s)))}forIn(e,t,r,a=(this.opts.es5?o.varKinds.var:o.varKinds.const)){if(this.opts.ownProperties)return this.forOf(e,n._`Object.keys(${t})`,r);const s=this._scope.toName(e);return this._for(new _("in",a,s,t),(()=>r(s)))}endFor(){return this._endBlockNode(b)}label(e){return this._leafNode(new u(e))}break(e){return this._leafNode(new f(e))}return(e){const t=new E;if(this._blockNode(t),this.code(e),1!==t.nodes.length)throw new Error('CodeGen: "return" should have one node');return this._endBlockNode(E)}try(e,t,r){if(!t&&!r)throw new Error('CodeGen: "try" without "catch" and "finally"');const n=new k;if(this._blockNode(n),this.code(e),t){const e=this.name("e");this._currNode=n.catch=new j(e),t(e)}return r&&(this._currNode=n.finally=new C,this.code(r)),this._endBlockNode(j,C)}throw(e){return this._leafNode(new p(e))}block(e,t){return this._blockStarts.push(this._nodes.length),e&&this.code(e).endBlock(t),this}endBlock(e){const t=this._blockStarts.pop();if(void 0===t)throw new Error("CodeGen: not in self-balancing block");const r=this._nodes.length-t;if(r<0||void 0!==e&&r!==e)throw new Error(`CodeGen: wrong number of nodes: ${r} vs ${e} expected`);return this._nodes.length=t,this}func(e,t=n.nil,r,o){return this._blockNode(new x(e,t,r)),o&&this.code(o).endFunc(),this}endFunc(){return this._endBlockNode(x)}optimize(e=1){for(;e-- >0;)this._root.optimizeNodes(),this._root.optimizeNames(this._root.names,this._constants)}_leafNode(e){return this._currNode.nodes.push(e),this}_blockNode(e){this._currNode.nodes.push(e),this._nodes.push(e)}_endBlockNode(e,t){const r=this._currNode;if(r instanceof e||t&&r instanceof t)return this._nodes.pop(),this;throw new Error(`CodeGen: not in block "${t?`${e.kind}/${t.kind}`:e.kind}"`)}_elseNode(e){const t=this._currNode;if(!(t instanceof $))throw new Error('CodeGen: "else" without "if"');return this._currNode=t.else=e,this}get _root(){return this._nodes[0]}get _currNode(){const e=this._nodes;return e[e.length-1]}set _currNode(e){const t=this._nodes;t[t.length-1]=e}},t.not=D;const I=T(t.operators.AND);t.and=function(...e){return e.reduce(I)};const F=T(t.operators.OR);function T(e){return(t,r)=>t===n.nil?r:r===n.nil?t:n._`${R(t)} ${e} ${R(r)}`}function R(e){return e instanceof n.Name?e:n._`(${e})`}t.or=function(...e){return e.reduce(F)}},57845:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.ValueScope=t.ValueScopeName=t.Scope=t.varKinds=t.UsedValueState=void 0;const n=r(41520);class o extends Error{constructor(e){super(`CodeGen: "code" for ${e} not defined`),this.value=e.value}}var a;!function(e){e[e.Started=0]="Started",e[e.Completed=1]="Completed"}(a=t.UsedValueState||(t.UsedValueState={})),t.varKinds={const:new n.Name("const"),let:new n.Name("let"),var:new n.Name("var")};class s{constructor({prefixes:e,parent:t}={}){this._names={},this._prefixes=e,this._parent=t}toName(e){return e instanceof n.Name?e:this.name(e)}name(e){return new n.Name(this._newName(e))}_newName(e){return`${e}${(this._names[e]||this._nameGroup(e)).index++}`}_nameGroup(e){var t,r;if((null===(r=null===(t=this._parent)||void 0===t?void 0:t._prefixes)||void 0===r?void 0:r.has(e))||this._prefixes&&!this._prefixes.has(e))throw new Error(`CodeGen: prefix "${e}" is not allowed in this scope`);return this._names[e]={prefix:e,index:0}}}t.Scope=s;class i extends n.Name{constructor(e,t){super(t),this.prefix=e}setValue(e,{property:t,itemIndex:r}){this.value=e,this.scopePath=n._`.${new n.Name(t)}[${r}]`}}t.ValueScopeName=i;const c=n._`\n`;t.ValueScope=class extends s{constructor(e){super(e),this._values={},this._scope=e.scope,this.opts={...e,_n:e.lines?c:n.nil}}get(){return this._scope}name(e){return new i(e,this._newName(e))}value(e,t){var r;if(void 0===t.ref)throw new Error("CodeGen: ref must be passed in value");const n=this.toName(e),{prefix:o}=n,a=null!==(r=t.key)&&void 0!==r?r:t.ref;let s=this._values[o];if(s){const e=s.get(a);if(e)return e}else s=this._values[o]=new Map;s.set(a,n);const i=this._scope[o]||(this._scope[o]=[]),c=i.length;return i[c]=t.ref,n.setValue(t,{property:o,itemIndex:c}),n}getValue(e,t){const r=this._values[e];if(r)return r.get(t)}scopeRefs(e,t=this._values){return this._reduceValues(t,(t=>{if(void 0===t.scopePath)throw new Error(`CodeGen: name "${t}" has no value`);return n._`${e}${t.scopePath}`}))}scopeCode(e=this._values,t,r){return this._reduceValues(e,(e=>{if(void 0===e.value)throw new Error(`CodeGen: name "${e}" has no value`);return e.value.code}),t,r)}_reduceValues(e,r,s={},i){let c=n.nil;for(const l in e){const d=e[l];if(!d)continue;const u=s[l]=s[l]||new Map;d.forEach((e=>{if(u.has(e))return;u.set(e,a.Started);let s=r(e);if(s){const r=this.opts.es5?t.varKinds.var:t.varKinds.const;c=n._`${c}${r} ${e} = ${s};${this.opts._n}`}else{if(!(s=null===i||void 0===i?void 0:i(e)))throw new o(e);c=n._`${c}${s}${this.opts._n}`}u.set(e,a.Completed)}))}return c}}},48708:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.extendErrors=t.resetErrorsCount=t.reportExtraError=t.reportError=t.keyword$DataError=t.keywordError=void 0;const n=r(99029),o=r(94227),a=r(42023);function s(e,t){const r=e.const("err",t);e.if(n._`${a.default.vErrors} === null`,(()=>e.assign(a.default.vErrors,n._`[${r}]`)),n._`${a.default.vErrors}.push(${r})`),e.code(n._`${a.default.errors}++`)}function i(e,t){const{gen:r,validateName:o,schemaEnv:a}=e;a.$async?r.throw(n._`new ${e.ValidationError}(${t})`):(r.assign(n._`${o}.errors`,t),r.return(!1))}t.keywordError={message:({keyword:e})=>n.str`must pass "${e}" keyword validation`},t.keyword$DataError={message:({keyword:e,schemaType:t})=>t?n.str`"${e}" keyword must be ${t} ($data)`:n.str`"${e}" keyword is invalid ($data)`},t.reportError=function(e,r=t.keywordError,o,a){const{it:c}=e,{gen:d,compositeRule:u,allErrors:f}=c,p=l(e,r,o);(null!==a&&void 0!==a?a:u||f)?s(d,p):i(c,n._`[${p}]`)},t.reportExtraError=function(e,r=t.keywordError,n){const{it:o}=e,{gen:c,compositeRule:d,allErrors:u}=o;s(c,l(e,r,n)),d||u||i(o,a.default.vErrors)},t.resetErrorsCount=function(e,t){e.assign(a.default.errors,t),e.if(n._`${a.default.vErrors} !== null`,(()=>e.if(t,(()=>e.assign(n._`${a.default.vErrors}.length`,t)),(()=>e.assign(a.default.vErrors,null)))))},t.extendErrors=function({gen:e,keyword:t,schemaValue:r,data:o,errsCount:s,it:i}){if(void 0===s)throw new Error("ajv implementation error");const c=e.name("err");e.forRange("i",s,a.default.errors,(s=>{e.const(c,n._`${a.default.vErrors}[${s}]`),e.if(n._`${c}.instancePath === undefined`,(()=>e.assign(n._`${c}.instancePath`,(0,n.strConcat)(a.default.instancePath,i.errorPath)))),e.assign(n._`${c}.schemaPath`,n.str`${i.errSchemaPath}/${t}`),i.opts.verbose&&(e.assign(n._`${c}.schema`,r),e.assign(n._`${c}.data`,o))}))};const c={keyword:new n.Name("keyword"),schemaPath:new n.Name("schemaPath"),params:new n.Name("params"),propertyName:new n.Name("propertyName"),message:new n.Name("message"),schema:new n.Name("schema"),parentSchema:new n.Name("parentSchema")};function l(e,t,r){const{createErrors:o}=e.it;return!1===o?n._`{}`:function(e,t,r={}){const{gen:o,it:s}=e,i=[d(s,r),u(e,r)];return function(e,{params:t,message:r},o){const{keyword:s,data:i,schemaValue:l,it:d}=e,{opts:u,propertyName:f,topSchemaRef:p,schemaPath:m}=d;o.push([c.keyword,s],[c.params,"function"==typeof t?t(e):t||n._`{}`]),u.messages&&o.push([c.message,"function"==typeof r?r(e):r]);u.verbose&&o.push([c.schema,l],[c.parentSchema,n._`${p}${m}`],[a.default.data,i]);f&&o.push([c.propertyName,f])}(e,t,i),o.object(...i)}(e,t,r)}function d({errorPath:e},{instancePath:t}){const r=t?n.str`${e}${(0,o.getErrorPath)(t,o.Type.Str)}`:e;return[a.default.instancePath,(0,n.strConcat)(a.default.instancePath,r)]}function u({keyword:e,it:{errSchemaPath:t}},{schemaPath:r,parentSchema:a}){let s=a?t:n.str`${t}/${e}`;return r&&(s=n.str`${s}${(0,o.getErrorPath)(r,o.Type.Str)}`),[c.schemaPath,s]}},73835:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.resolveSchema=t.getCompilingSchema=t.resolveRef=t.compileSchema=t.SchemaEnv=void 0;const n=r(99029),o=r(13558),a=r(42023),s=r(66939),i=r(94227),c=r(62586);class l{constructor(e){var t;let r;this.refs={},this.dynamicAnchors={},"object"==typeof e.schema&&(r=e.schema),this.schema=e.schema,this.schemaId=e.schemaId,this.root=e.root||this,this.baseId=null!==(t=e.baseId)&&void 0!==t?t:(0,s.normalizeId)(null===r||void 0===r?void 0:r[e.schemaId||"$id"]),this.schemaPath=e.schemaPath,this.localRefs=e.localRefs,this.meta=e.meta,this.$async=null===r||void 0===r?void 0:r.$async,this.refs={}}}function d(e){const t=f.call(this,e);if(t)return t;const r=(0,s.getFullPath)(this.opts.uriResolver,e.root.baseId),{es5:i,lines:l}=this.opts.code,{ownProperties:d}=this.opts,u=new n.CodeGen(this.scope,{es5:i,lines:l,ownProperties:d});let p;e.$async&&(p=u.scopeValue("Error",{ref:o.default,code:n._`require("ajv/dist/runtime/validation_error").default`}));const m=u.scopeName("validate");e.validateName=m;const h={gen:u,allErrors:this.opts.allErrors,data:a.default.data,parentData:a.default.parentData,parentDataProperty:a.default.parentDataProperty,dataNames:[a.default.data],dataPathArr:[n.nil],dataLevel:0,dataTypes:[],definedProperties:new Set,topSchemaRef:u.scopeValue("schema",!0===this.opts.code.source?{ref:e.schema,code:(0,n.stringify)(e.schema)}:{ref:e.schema}),validateName:m,ValidationError:p,schema:e.schema,schemaEnv:e,rootId:r,baseId:e.baseId||r,schemaPath:n.nil,errSchemaPath:e.schemaPath||(this.opts.jtd?"":"#"),errorPath:n._`""`,opts:this.opts,self:this};let y;try{this._compilations.add(e),(0,c.validateFunctionCode)(h),u.optimize(this.opts.code.optimize);const t=u.toString();y=`${u.scopeRefs(a.default.scope)}return ${t}`,this.opts.code.process&&(y=this.opts.code.process(y,e));const r=new Function(`${a.default.self}`,`${a.default.scope}`,y)(this,this.scope.get());if(this.scope.value(m,{ref:r}),r.errors=null,r.schema=e.schema,r.schemaEnv=e,e.$async&&(r.$async=!0),!0===this.opts.code.source&&(r.source={validateName:m,validateCode:t,scopeValues:u._values}),this.opts.unevaluated){const{props:e,items:t}=h;r.evaluated={props:e instanceof n.Name?void 0:e,items:t instanceof n.Name?void 0:t,dynamicProps:e instanceof n.Name,dynamicItems:t instanceof n.Name},r.source&&(r.source.evaluated=(0,n.stringify)(r.evaluated))}return e.validate=r,e}catch(g){throw delete e.validate,delete e.validateName,y&&this.logger.error("Error compiling schema, function code:",y),g}finally{this._compilations.delete(e)}}function u(e){return(0,s.inlineRef)(e.schema,this.opts.inlineRefs)?e.schema:e.validate?e:d.call(this,e)}function f(e){for(const n of this._compilations)if(r=e,(t=n).schema===r.schema&&t.root===r.root&&t.baseId===r.baseId)return n;var t,r}function p(e,t){let r;for(;"string"==typeof(r=this.refs[t]);)t=r;return r||this.schemas[t]||m.call(this,e,t)}function m(e,t){const r=this.opts.uriResolver.parse(t),n=(0,s._getFullPath)(this.opts.uriResolver,r);let o=(0,s.getFullPath)(this.opts.uriResolver,e.baseId,void 0);if(Object.keys(e.schema).length>0&&n===o)return y.call(this,r,e);const a=(0,s.normalizeId)(n),i=this.refs[a]||this.schemas[a];if("string"==typeof i){const t=m.call(this,e,i);if("object"!==typeof(null===t||void 0===t?void 0:t.schema))return;return y.call(this,r,t)}if("object"===typeof(null===i||void 0===i?void 0:i.schema)){if(i.validate||d.call(this,i),a===(0,s.normalizeId)(t)){const{schema:t}=i,{schemaId:r}=this.opts,n=t[r];return n&&(o=(0,s.resolveUrl)(this.opts.uriResolver,o,n)),new l({schema:t,schemaId:r,root:e,baseId:o})}return y.call(this,r,i)}}t.SchemaEnv=l,t.compileSchema=d,t.resolveRef=function(e,t,r){var n;r=(0,s.resolveUrl)(this.opts.uriResolver,t,r);const o=e.refs[r];if(o)return o;let a=p.call(this,e,r);if(void 0===a){const o=null===(n=e.localRefs)||void 0===n?void 0:n[r],{schemaId:s}=this.opts;o&&(a=new l({schema:o,schemaId:s,root:e,baseId:t}))}return void 0!==a?e.refs[r]=u.call(this,a):void 0},t.getCompilingSchema=f,t.resolveSchema=m;const h=new Set(["properties","patternProperties","enum","dependencies","definitions"]);function y(e,{baseId:t,schema:r,root:n}){var o;if("/"!==(null===(o=e.fragment)||void 0===o?void 0:o[0]))return;for(const l of e.fragment.slice(1).split("/")){if("boolean"===typeof r)return;const e=r[(0,i.unescapeFragment)(l)];if(void 0===e)return;const n="object"===typeof(r=e)&&r[this.opts.schemaId];!h.has(l)&&n&&(t=(0,s.resolveUrl)(this.opts.uriResolver,t,n))}let a;if("boolean"!=typeof r&&r.$ref&&!(0,i.schemaHasRulesButRef)(r,this.RULES)){const e=(0,s.resolveUrl)(this.opts.uriResolver,t,r.$ref);a=m.call(this,n,e)}const{schemaId:c}=this.opts;return a=a||new l({schema:r,schemaId:c,root:n,baseId:t}),a.schema!==a.root.schema?a:void 0}},42023:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o={data:new n.Name("data"),valCxt:new n.Name("valCxt"),instancePath:new n.Name("instancePath"),parentData:new n.Name("parentData"),parentDataProperty:new n.Name("parentDataProperty"),rootData:new n.Name("rootData"),dynamicAnchors:new n.Name("dynamicAnchors"),vErrors:new n.Name("vErrors"),errors:new n.Name("errors"),this:new n.Name("this"),self:new n.Name("self"),scope:new n.Name("scope"),json:new n.Name("json"),jsonPos:new n.Name("jsonPos"),jsonLen:new n.Name("jsonLen"),jsonPart:new n.Name("jsonPart")};t.default=o},34551:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(66939);class o extends Error{constructor(e,t,r,o){super(o||`can't resolve reference ${r} from id ${t}`),this.missingRef=(0,n.resolveUrl)(e,t,r),this.missingSchema=(0,n.normalizeId)((0,n.getFullPath)(e,this.missingRef))}}t.default=o},66939:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.getSchemaRefs=t.resolveUrl=t.normalizeId=t._getFullPath=t.getFullPath=t.inlineRef=void 0;const n=r(94227),o=r(32017),a=r(7106),s=new Set(["type","format","pattern","maxLength","minLength","maxProperties","minProperties","maxItems","minItems","maximum","minimum","uniqueItems","multipleOf","required","enum","const"]);t.inlineRef=function(e,t=!0){return"boolean"==typeof e||(!0===t?!c(e):!!t&&l(e)<=t)};const i=new Set(["$ref","$recursiveRef","$recursiveAnchor","$dynamicRef","$dynamicAnchor"]);function c(e){for(const t in e){if(i.has(t))return!0;const r=e[t];if(Array.isArray(r)&&r.some(c))return!0;if("object"==typeof r&&c(r))return!0}return!1}function l(e){let t=0;for(const r in e){if("$ref"===r)return 1/0;if(t++,!s.has(r)&&("object"==typeof e[r]&&(0,n.eachItem)(e[r],(e=>t+=l(e))),t===1/0))return 1/0}return t}function d(e,t="",r){!1!==r&&(t=p(t));const n=e.parse(t);return u(e,n)}function u(e,t){return e.serialize(t).split("#")[0]+"#"}t.getFullPath=d,t._getFullPath=u;const f=/#\/?$/;function p(e){return e?e.replace(f,""):""}t.normalizeId=p,t.resolveUrl=function(e,t,r){return r=p(r),e.resolve(t,r)};const m=/^[a-z_][-a-z0-9._]*$/i;t.getSchemaRefs=function(e,t){if("boolean"==typeof e)return{};const{schemaId:r,uriResolver:n}=this.opts,s=p(e[r]||t),i={"":s},c=d(n,s,!1),l={},u=new Set;return a(e,{allKeys:!0},((e,t,n,o)=>{if(void 0===o)return;const a=c+t;let s=i[o];function d(t){const r=this.opts.uriResolver.resolve;if(t=p(s?r(s,t):t),u.has(t))throw h(t);u.add(t);let n=this.refs[t];return"string"==typeof n&&(n=this.refs[n]),"object"==typeof n?f(e,n.schema,t):t!==p(a)&&("#"===t[0]?(f(e,l[t],t),l[t]=e):this.refs[t]=a),t}function y(e){if("string"==typeof e){if(!m.test(e))throw new Error(`invalid anchor "${e}"`);d.call(this,`#${e}`)}}"string"==typeof e[r]&&(s=d.call(this,e[r])),y.call(this,e.$anchor),y.call(this,e.$dynamicAnchor),i[t]=s})),l;function f(e,t,r){if(void 0!==t&&!o(e,t))throw h(r)}function h(e){return new Error(`reference "${e}" resolves to more than one schema`)}}},10396:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.getRules=t.isJSONType=void 0;const r=new Set(["string","number","integer","boolean","null","object","array"]);t.isJSONType=function(e){return"string"==typeof e&&r.has(e)},t.getRules=function(){const e={number:{type:"number",rules:[]},string:{type:"string",rules:[]},array:{type:"array",rules:[]},object:{type:"object",rules:[]}};return{types:{...e,integer:!0,boolean:!0,null:!0},rules:[{rules:[]},e.number,e.string,e.array,e.object],post:{rules:[]},all:{},keywords:{}}}},94227:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.checkStrictMode=t.getErrorPath=t.Type=t.useFunc=t.setEvaluated=t.evaluatedPropsToName=t.mergeEvaluated=t.eachItem=t.unescapeJsonPointer=t.escapeJsonPointer=t.escapeFragment=t.unescapeFragment=t.schemaRefOrVal=t.schemaHasRulesButRef=t.schemaHasRules=t.checkUnknownRules=t.alwaysValidSchema=t.toHash=void 0;const n=r(99029),o=r(41520);function a(e,t=e.schema){const{opts:r,self:n}=e;if(!r.strictSchema)return;if("boolean"===typeof t)return;const o=n.RULES.keywords;for(const a in t)o[a]||m(e,`unknown keyword: "${a}"`)}function s(e,t){if("boolean"==typeof e)return!e;for(const r in e)if(t[r])return!0;return!1}function i(e){return"number"==typeof e?`${e}`:e.replace(/~/g,"~0").replace(/\//g,"~1")}function c(e){return e.replace(/~1/g,"/").replace(/~0/g,"~")}function l({mergeNames:e,mergeToName:t,mergeValues:r,resultToName:o}){return(a,s,i,c)=>{const l=void 0===i?s:i instanceof n.Name?(s instanceof n.Name?e(a,s,i):t(a,s,i),i):s instanceof n.Name?(t(a,i,s),s):r(s,i);return c!==n.Name||l instanceof n.Name?l:o(a,l)}}function d(e,t){if(!0===t)return e.var("props",!0);const r=e.var("props",n._`{}`);return void 0!==t&&u(e,r,t),r}function u(e,t,r){Object.keys(r).forEach((r=>e.assign(n._`${t}${(0,n.getProperty)(r)}`,!0)))}t.toHash=function(e){const t={};for(const r of e)t[r]=!0;return t},t.alwaysValidSchema=function(e,t){return"boolean"==typeof t?t:0===Object.keys(t).length||(a(e,t),!s(t,e.self.RULES.all))},t.checkUnknownRules=a,t.schemaHasRules=s,t.schemaHasRulesButRef=function(e,t){if("boolean"==typeof e)return!e;for(const r in e)if("$ref"!==r&&t.all[r])return!0;return!1},t.schemaRefOrVal=function({topSchemaRef:e,schemaPath:t},r,o,a){if(!a){if("number"==typeof r||"boolean"==typeof r)return r;if("string"==typeof r)return n._`${r}`}return n._`${e}${t}${(0,n.getProperty)(o)}`},t.unescapeFragment=function(e){return c(decodeURIComponent(e))},t.escapeFragment=function(e){return encodeURIComponent(i(e))},t.escapeJsonPointer=i,t.unescapeJsonPointer=c,t.eachItem=function(e,t){if(Array.isArray(e))for(const r of e)t(r);else t(e)},t.mergeEvaluated={props:l({mergeNames:(e,t,r)=>e.if(n._`${r} !== true && ${t} !== undefined`,(()=>{e.if(n._`${t} === true`,(()=>e.assign(r,!0)),(()=>e.assign(r,n._`${r} || {}`).code(n._`Object.assign(${r}, ${t})`)))})),mergeToName:(e,t,r)=>e.if(n._`${r} !== true`,(()=>{!0===t?e.assign(r,!0):(e.assign(r,n._`${r} || {}`),u(e,r,t))})),mergeValues:(e,t)=>!0===e||{...e,...t},resultToName:d}),items:l({mergeNames:(e,t,r)=>e.if(n._`${r} !== true && ${t} !== undefined`,(()=>e.assign(r,n._`${t} === true ? true : ${r} > ${t} ? ${r} : ${t}`))),mergeToName:(e,t,r)=>e.if(n._`${r} !== true`,(()=>e.assign(r,!0===t||n._`${r} > ${t} ? ${r} : ${t}`))),mergeValues:(e,t)=>!0===e||Math.max(e,t),resultToName:(e,t)=>e.var("items",t)})},t.evaluatedPropsToName=d,t.setEvaluated=u;const f={};var p;function m(e,t,r=e.opts.strictSchema){if(r){if(t=`strict mode: ${t}`,!0===r)throw new Error(t);e.self.logger.warn(t)}}t.useFunc=function(e,t){return e.scopeValue("func",{ref:t,code:f[t.code]||(f[t.code]=new o._Code(t.code))})},function(e){e[e.Num=0]="Num",e[e.Str=1]="Str"}(p=t.Type||(t.Type={})),t.getErrorPath=function(e,t,r){if(e instanceof n.Name){const o=t===p.Num;return r?o?n._`"[" + ${e} + "]"`:n._`"['" + ${e} + "']"`:o?n._`"/" + ${e}`:n._`"/" + ${e}.replace(/~/g, "~0").replace(/\\//g, "~1")`}return r?(0,n.getProperty)(e).toString():"/"+i(e)},t.checkStrictMode=m},7887:(e,t)=>{"use strict";function r(e,t){return t.rules.some((t=>n(e,t)))}function n(e,t){var r;return void 0!==e[t.keyword]||(null===(r=t.definition.implements)||void 0===r?void 0:r.some((t=>void 0!==e[t])))}Object.defineProperty(t,"__esModule",{value:!0}),t.shouldUseRule=t.shouldUseGroup=t.schemaHasRulesForType=void 0,t.schemaHasRulesForType=function({schema:e,self:t},n){const o=t.RULES.types[n];return o&&!0!==o&&r(e,o)},t.shouldUseGroup=r,t.shouldUseRule=n},28727:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.boolOrEmptySchema=t.topBoolOrEmptySchema=void 0;const n=r(48708),o=r(99029),a=r(42023),s={message:"boolean schema is false"};function i(e,t){const{gen:r,data:o}=e,a={gen:r,keyword:"false schema",data:o,schema:!1,schemaCode:!1,schemaValue:!1,params:{},it:e};(0,n.reportError)(a,s,void 0,t)}t.topBoolOrEmptySchema=function(e){const{gen:t,schema:r,validateName:n}=e;!1===r?i(e,!1):"object"==typeof r&&!0===r.$async?t.return(a.default.data):(t.assign(o._`${n}.errors`,null),t.return(!0))},t.boolOrEmptySchema=function(e,t){const{gen:r,schema:n}=e;!1===n?(r.var(t,!1),i(e)):r.var(t,!0)}},10208:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.reportTypeError=t.checkDataTypes=t.checkDataType=t.coerceAndCheckDataType=t.getJSONTypes=t.getSchemaTypes=t.DataType=void 0;const n=r(10396),o=r(7887),a=r(48708),s=r(99029),i=r(94227);var c;function l(e){const t=Array.isArray(e)?e:e?[e]:[];if(t.every(n.isJSONType))return t;throw new Error("type must be JSONType or JSONType[]: "+t.join(","))}!function(e){e[e.Correct=0]="Correct",e[e.Wrong=1]="Wrong"}(c=t.DataType||(t.DataType={})),t.getSchemaTypes=function(e){const t=l(e.type);if(t.includes("null")){if(!1===e.nullable)throw new Error("type: null contradicts nullable: false")}else{if(!t.length&&void 0!==e.nullable)throw new Error('"nullable" cannot be used without "type"');!0===e.nullable&&t.push("null")}return t},t.getJSONTypes=l,t.coerceAndCheckDataType=function(e,t){const{gen:r,data:n,opts:a}=e,i=function(e,t){return t?e.filter((e=>d.has(e)||"array"===t&&"array"===e)):[]}(t,a.coerceTypes),l=t.length>0&&!(0===i.length&&1===t.length&&(0,o.schemaHasRulesForType)(e,t[0]));if(l){const o=f(t,n,a.strictNumbers,c.Wrong);r.if(o,(()=>{i.length?function(e,t,r){const{gen:n,data:o,opts:a}=e,i=n.let("dataType",s._`typeof ${o}`),c=n.let("coerced",s._`undefined`);"array"===a.coerceTypes&&n.if(s._`${i} == 'object' && Array.isArray(${o}) && ${o}.length == 1`,(()=>n.assign(o,s._`${o}[0]`).assign(i,s._`typeof ${o}`).if(f(t,o,a.strictNumbers),(()=>n.assign(c,o)))));n.if(s._`${c} !== undefined`);for(const s of r)(d.has(s)||"array"===s&&"array"===a.coerceTypes)&&l(s);function l(e){switch(e){case"string":return void n.elseIf(s._`${i} == "number" || ${i} == "boolean"`).assign(c,s._`"" + ${o}`).elseIf(s._`${o} === null`).assign(c,s._`""`);case"number":return void n.elseIf(s._`${i} == "boolean" || ${o} === null - || (${i} == "string" && ${o} && ${o} == +${o})`).assign(c,s._`+${o}`);case"integer":return void n.elseIf(s._`${i} === "boolean" || ${o} === null - || (${i} === "string" && ${o} && ${o} == +${o} && !(${o} % 1))`).assign(c,s._`+${o}`);case"boolean":return void n.elseIf(s._`${o} === "false" || ${o} === 0 || ${o} === null`).assign(c,!1).elseIf(s._`${o} === "true" || ${o} === 1`).assign(c,!0);case"null":return n.elseIf(s._`${o} === "" || ${o} === 0 || ${o} === false`),void n.assign(c,null);case"array":n.elseIf(s._`${i} === "string" || ${i} === "number" - || ${i} === "boolean" || ${o} === null`).assign(c,s._`[${o}]`)}}n.else(),m(e),n.endIf(),n.if(s._`${c} !== undefined`,(()=>{n.assign(o,c),function({gen:e,parentData:t,parentDataProperty:r},n){e.if(s._`${t} !== undefined`,(()=>e.assign(s._`${t}[${r}]`,n)))}(e,c)}))}(e,t,i):m(e)}))}return l};const d=new Set(["string","number","integer","boolean","null"]);function u(e,t,r,n=c.Correct){const o=n===c.Correct?s.operators.EQ:s.operators.NEQ;let a;switch(e){case"null":return s._`${t} ${o} null`;case"array":a=s._`Array.isArray(${t})`;break;case"object":a=s._`${t} && typeof ${t} == "object" && !Array.isArray(${t})`;break;case"integer":a=i(s._`!(${t} % 1) && !isNaN(${t})`);break;case"number":a=i();break;default:return s._`typeof ${t} ${o} ${e}`}return n===c.Correct?a:(0,s.not)(a);function i(e=s.nil){return(0,s.and)(s._`typeof ${t} == "number"`,e,r?s._`isFinite(${t})`:s.nil)}}function f(e,t,r,n){if(1===e.length)return u(e[0],t,r,n);let o;const a=(0,i.toHash)(e);if(a.array&&a.object){const e=s._`typeof ${t} != "object"`;o=a.null?e:s._`!${t} || ${e}`,delete a.null,delete a.array,delete a.object}else o=s.nil;a.number&&delete a.integer;for(const i in a)o=(0,s.and)(o,u(i,t,r,n));return o}t.checkDataType=u,t.checkDataTypes=f;const p={message:({schema:e})=>`must be ${e}`,params:({schema:e,schemaValue:t})=>"string"==typeof e?s._`{type: ${e}}`:s._`{type: ${t}}`};function m(e){const t=function(e){const{gen:t,data:r,schema:n}=e,o=(0,i.schemaRefOrVal)(e,n,"type");return{gen:t,keyword:"type",data:r,schema:n.type,schemaCode:o,schemaValue:o,parentSchema:n,params:{},it:e}}(e);(0,a.reportError)(t,p)}t.reportTypeError=m},7870:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.assignDefaults=void 0;const n=r(99029),o=r(94227);function a(e,t,r){const{gen:a,compositeRule:s,data:i,opts:c}=e;if(void 0===r)return;const l=n._`${i}${(0,n.getProperty)(t)}`;if(s)return void(0,o.checkStrictMode)(e,`default is ignored for: ${l}`);let d=n._`${l} === undefined`;"empty"===c.useDefaults&&(d=n._`${d} || ${l} === null || ${l} === ""`),a.if(d,n._`${l} = ${(0,n.stringify)(r)}`)}t.assignDefaults=function(e,t){const{properties:r,items:n}=e.schema;if("object"===t&&r)for(const o in r)a(e,o,r[o].default);else"array"===t&&Array.isArray(n)&&n.forEach(((t,r)=>a(e,r,t.default)))}},62586:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.getData=t.KeywordCxt=t.validateFunctionCode=void 0;const n=r(28727),o=r(10208),a=r(7887),s=r(10208),i=r(7870),c=r(33673),l=r(24495),d=r(99029),u=r(42023),f=r(66939),p=r(94227),m=r(48708);function h({gen:e,validateName:t,schema:r,schemaEnv:n,opts:o},a){o.code.es5?e.func(t,d._`${u.default.data}, ${u.default.valCxt}`,n.$async,(()=>{e.code(d._`"use strict"; ${y(r,o)}`),function(e,t){e.if(u.default.valCxt,(()=>{e.var(u.default.instancePath,d._`${u.default.valCxt}.${u.default.instancePath}`),e.var(u.default.parentData,d._`${u.default.valCxt}.${u.default.parentData}`),e.var(u.default.parentDataProperty,d._`${u.default.valCxt}.${u.default.parentDataProperty}`),e.var(u.default.rootData,d._`${u.default.valCxt}.${u.default.rootData}`),t.dynamicRef&&e.var(u.default.dynamicAnchors,d._`${u.default.valCxt}.${u.default.dynamicAnchors}`)}),(()=>{e.var(u.default.instancePath,d._`""`),e.var(u.default.parentData,d._`undefined`),e.var(u.default.parentDataProperty,d._`undefined`),e.var(u.default.rootData,u.default.data),t.dynamicRef&&e.var(u.default.dynamicAnchors,d._`{}`)}))}(e,o),e.code(a)})):e.func(t,d._`${u.default.data}, ${function(e){return d._`{${u.default.instancePath}="", ${u.default.parentData}, ${u.default.parentDataProperty}, ${u.default.rootData}=${u.default.data}${e.dynamicRef?d._`, ${u.default.dynamicAnchors}={}`:d.nil}}={}`}(o)}`,n.$async,(()=>e.code(y(r,o)).code(a)))}function y(e,t){const r="object"==typeof e&&e[t.schemaId];return r&&(t.code.source||t.code.process)?d._`/*# sourceURL=${r} */`:d.nil}function g(e,t){$(e)&&(b(e),v(e))?function(e,t){const{schema:r,gen:n,opts:o}=e;o.$comment&&r.$comment&&S(e);(function(e){const t=e.schema[e.opts.schemaId];t&&(e.baseId=(0,f.resolveUrl)(e.opts.uriResolver,e.baseId,t))})(e),function(e){if(e.schema.$async&&!e.schemaEnv.$async)throw new Error("async schema in sync schema")}(e);const a=n.const("_errs",u.default.errors);w(e,a),n.var(t,d._`${a} === ${u.default.errors}`)}(e,t):(0,n.boolOrEmptySchema)(e,t)}function v({schema:e,self:t}){if("boolean"==typeof e)return!e;for(const r in e)if(t.RULES.all[r])return!0;return!1}function $(e){return"boolean"!=typeof e.schema}function b(e){(0,p.checkUnknownRules)(e),function(e){const{schema:t,errSchemaPath:r,opts:n,self:o}=e;t.$ref&&n.ignoreKeywordsWithRef&&(0,p.schemaHasRulesButRef)(t,o.RULES)&&o.logger.warn(`$ref: keywords ignored in schema at path "${r}"`)}(e)}function w(e,t){if(e.opts.jtd)return _(e,[],!1,t);const r=(0,o.getSchemaTypes)(e.schema);_(e,r,!(0,o.coerceAndCheckDataType)(e,r),t)}function S({gen:e,schemaEnv:t,schema:r,errSchemaPath:n,opts:o}){const a=r.$comment;if(!0===o.$comment)e.code(d._`${u.default.self}.logger.log(${a})`);else if("function"==typeof o.$comment){const r=d.str`${n}/$comment`,o=e.scopeValue("root",{ref:t.root});e.code(d._`${u.default.self}.opts.$comment(${a}, ${r}, ${o}.schema)`)}}function _(e,t,r,n){const{gen:o,schema:i,data:c,allErrors:l,opts:f,self:m}=e,{RULES:h}=m;function y(p){(0,a.shouldUseGroup)(i,p)&&(p.type?(o.if((0,s.checkDataType)(p.type,c,f.strictNumbers)),x(e,p),1===t.length&&t[0]===p.type&&r&&(o.else(),(0,s.reportTypeError)(e)),o.endIf()):x(e,p),l||o.if(d._`${u.default.errors} === ${n||0}`))}!i.$ref||!f.ignoreKeywordsWithRef&&(0,p.schemaHasRulesButRef)(i,h)?(f.jtd||function(e,t){if(e.schemaEnv.meta||!e.opts.strictTypes)return;(function(e,t){if(!t.length)return;if(!e.dataTypes.length)return void(e.dataTypes=t);t.forEach((t=>{k(e.dataTypes,t)||j(e,`type "${t}" not allowed by context "${e.dataTypes.join(",")}"`)})),function(e,t){const r=[];for(const n of e.dataTypes)k(t,n)?r.push(n):t.includes("integer")&&"number"===n&&r.push("integer");e.dataTypes=r}(e,t)})(e,t),e.opts.allowUnionTypes||function(e,t){t.length>1&&(2!==t.length||!t.includes("null"))&&j(e,"use allowUnionTypes to allow union type keyword")}(e,t);!function(e,t){const r=e.self.RULES.all;for(const n in r){const o=r[n];if("object"==typeof o&&(0,a.shouldUseRule)(e.schema,o)){const{type:r}=o.definition;r.length&&!r.some((e=>E(t,e)))&&j(e,`missing type "${r.join(",")}" for keyword "${n}"`)}}}(e,e.dataTypes)}(e,t),o.block((()=>{for(const e of h.rules)y(e);y(h.post)}))):o.block((()=>P(e,"$ref",h.all.$ref.definition)))}function x(e,t){const{gen:r,schema:n,opts:{useDefaults:o}}=e;o&&(0,i.assignDefaults)(e,t.type),r.block((()=>{for(const r of t.rules)(0,a.shouldUseRule)(n,r)&&P(e,r.keyword,r.definition,t.type)}))}function E(e,t){return e.includes(t)||"number"===t&&e.includes("integer")}function k(e,t){return e.includes(t)||"integer"===t&&e.includes("number")}function j(e,t){t+=` at "${e.schemaEnv.baseId+e.errSchemaPath}" (strictTypes)`,(0,p.checkStrictMode)(e,t,e.opts.strictTypes)}t.validateFunctionCode=function(e){$(e)&&(b(e),v(e))?function(e){const{schema:t,opts:r,gen:n}=e;h(e,(()=>{r.$comment&&t.$comment&&S(e),function(e){const{schema:t,opts:r}=e;void 0!==t.default&&r.useDefaults&&r.strictSchema&&(0,p.checkStrictMode)(e,"default is ignored in the schema root")}(e),n.let(u.default.vErrors,null),n.let(u.default.errors,0),r.unevaluated&&function(e){const{gen:t,validateName:r}=e;e.evaluated=t.const("evaluated",d._`${r}.evaluated`),t.if(d._`${e.evaluated}.dynamicProps`,(()=>t.assign(d._`${e.evaluated}.props`,d._`undefined`))),t.if(d._`${e.evaluated}.dynamicItems`,(()=>t.assign(d._`${e.evaluated}.items`,d._`undefined`)))}(e),w(e),function(e){const{gen:t,schemaEnv:r,validateName:n,ValidationError:o,opts:a}=e;r.$async?t.if(d._`${u.default.errors} === 0`,(()=>t.return(u.default.data)),(()=>t.throw(d._`new ${o}(${u.default.vErrors})`))):(t.assign(d._`${n}.errors`,u.default.vErrors),a.unevaluated&&function({gen:e,evaluated:t,props:r,items:n}){r instanceof d.Name&&e.assign(d._`${t}.props`,r);n instanceof d.Name&&e.assign(d._`${t}.items`,n)}(e),t.return(d._`${u.default.errors} === 0`))}(e)}))}(e):h(e,(()=>(0,n.topBoolOrEmptySchema)(e)))};class C{constructor(e,t,r){if((0,c.validateKeywordUsage)(e,t,r),this.gen=e.gen,this.allErrors=e.allErrors,this.keyword=r,this.data=e.data,this.schema=e.schema[r],this.$data=t.$data&&e.opts.$data&&this.schema&&this.schema.$data,this.schemaValue=(0,p.schemaRefOrVal)(e,this.schema,r,this.$data),this.schemaType=t.schemaType,this.parentSchema=e.schema,this.params={},this.it=e,this.def=t,this.$data)this.schemaCode=e.gen.const("vSchema",A(this.$data,e));else if(this.schemaCode=this.schemaValue,!(0,c.validSchemaType)(this.schema,t.schemaType,t.allowUndefined))throw new Error(`${r} value must be ${JSON.stringify(t.schemaType)}`);("code"in t?t.trackErrors:!1!==t.errors)&&(this.errsCount=e.gen.const("_errs",u.default.errors))}result(e,t,r){this.failResult((0,d.not)(e),t,r)}failResult(e,t,r){this.gen.if(e),r?r():this.error(),t?(this.gen.else(),t(),this.allErrors&&this.gen.endIf()):this.allErrors?this.gen.endIf():this.gen.else()}pass(e,t){this.failResult((0,d.not)(e),void 0,t)}fail(e){if(void 0===e)return this.error(),void(this.allErrors||this.gen.if(!1));this.gen.if(e),this.error(),this.allErrors?this.gen.endIf():this.gen.else()}fail$data(e){if(!this.$data)return this.fail(e);const{schemaCode:t}=this;this.fail(d._`${t} !== undefined && (${(0,d.or)(this.invalid$data(),e)})`)}error(e,t,r){if(t)return this.setParams(t),this._error(e,r),void this.setParams({});this._error(e,r)}_error(e,t){(e?m.reportExtraError:m.reportError)(this,this.def.error,t)}$dataError(){(0,m.reportError)(this,this.def.$dataError||m.keyword$DataError)}reset(){if(void 0===this.errsCount)throw new Error('add "trackErrors" to keyword definition');(0,m.resetErrorsCount)(this.gen,this.errsCount)}ok(e){this.allErrors||this.gen.if(e)}setParams(e,t){t?Object.assign(this.params,e):this.params=e}block$data(e,t,r=d.nil){this.gen.block((()=>{this.check$data(e,r),t()}))}check$data(e=d.nil,t=d.nil){if(!this.$data)return;const{gen:r,schemaCode:n,schemaType:o,def:a}=this;r.if((0,d.or)(d._`${n} === undefined`,t)),e!==d.nil&&r.assign(e,!0),(o.length||a.validateSchema)&&(r.elseIf(this.invalid$data()),this.$dataError(),e!==d.nil&&r.assign(e,!1)),r.else()}invalid$data(){const{gen:e,schemaCode:t,schemaType:r,def:n,it:o}=this;return(0,d.or)(function(){if(r.length){if(!(t instanceof d.Name))throw new Error("ajv implementation error");const e=Array.isArray(r)?r:[r];return d._`${(0,s.checkDataTypes)(e,t,o.opts.strictNumbers,s.DataType.Wrong)}`}return d.nil}(),function(){if(n.validateSchema){const r=e.scopeValue("validate$data",{ref:n.validateSchema});return d._`!${r}(${t})`}return d.nil}())}subschema(e,t){const r=(0,l.getSubschema)(this.it,e);(0,l.extendSubschemaData)(r,this.it,e),(0,l.extendSubschemaMode)(r,e);const n={...this.it,...r,items:void 0,props:void 0};return g(n,t),n}mergeEvaluated(e,t){const{it:r,gen:n}=this;r.opts.unevaluated&&(!0!==r.props&&void 0!==e.props&&(r.props=p.mergeEvaluated.props(n,e.props,r.props,t)),!0!==r.items&&void 0!==e.items&&(r.items=p.mergeEvaluated.items(n,e.items,r.items,t)))}mergeValidEvaluated(e,t){const{it:r,gen:n}=this;if(r.opts.unevaluated&&(!0!==r.props||!0!==r.items))return n.if(t,(()=>this.mergeEvaluated(e,d.Name))),!0}}function P(e,t,r,n){const o=new C(e,r,t);"code"in r?r.code(o,n):o.$data&&r.validate?(0,c.funcKeywordCode)(o,r):"macro"in r?(0,c.macroKeywordCode)(o,r):(r.compile||r.validate)&&(0,c.funcKeywordCode)(o,r)}t.KeywordCxt=C;const O=/^\/(?:[^~]|~0|~1)*$/,N=/^([0-9]+)(#|\/(?:[^~]|~0|~1)*)?$/;function A(e,{dataLevel:t,dataNames:r,dataPathArr:n}){let o,a;if(""===e)return u.default.rootData;if("/"===e[0]){if(!O.test(e))throw new Error(`Invalid JSON-pointer: ${e}`);o=e,a=u.default.rootData}else{const s=N.exec(e);if(!s)throw new Error(`Invalid JSON-pointer: ${e}`);const i=+s[1];if(o=s[2],"#"===o){if(i>=t)throw new Error(c("property/index",i));return n[t-i]}if(i>t)throw new Error(c("data",i));if(a=r[t-i],!o)return a}let s=a;const i=o.split("/");for(const l of i)l&&(a=d._`${a}${(0,d.getProperty)((0,p.unescapeJsonPointer)(l))}`,s=d._`${s} && ${a}`);return s;function c(e,r){return`Cannot access ${e} ${r} levels up, current level is ${t}`}}t.getData=A},33673:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.validateKeywordUsage=t.validSchemaType=t.funcKeywordCode=t.macroKeywordCode=void 0;const n=r(99029),o=r(42023),a=r(15765),s=r(48708);function i(e){const{gen:t,data:r,it:o}=e;t.if(o.parentData,(()=>t.assign(r,n._`${o.parentData}[${o.parentDataProperty}]`)))}function c(e,t,r){if(void 0===r)throw new Error(`keyword "${t}" failed to compile`);return e.scopeValue("keyword","function"==typeof r?{ref:r}:{ref:r,code:(0,n.stringify)(r)})}t.macroKeywordCode=function(e,t){const{gen:r,keyword:o,schema:a,parentSchema:s,it:i}=e,l=t.macro.call(i.self,a,s,i),d=c(r,o,l);!1!==i.opts.validateSchema&&i.self.validateSchema(l,!0);const u=r.name("valid");e.subschema({schema:l,schemaPath:n.nil,errSchemaPath:`${i.errSchemaPath}/${o}`,topSchemaRef:d,compositeRule:!0},u),e.pass(u,(()=>e.error(!0)))},t.funcKeywordCode=function(e,t){var r;const{gen:l,keyword:d,schema:u,parentSchema:f,$data:p,it:m}=e;!function({schemaEnv:e},t){if(t.async&&!e.$async)throw new Error("async keyword in sync schema")}(m,t);const h=!p&&t.compile?t.compile.call(m.self,u,f,m):t.validate,y=c(l,d,h),g=l.let("valid");function v(r=(t.async?n._`await `:n.nil)){const s=m.opts.passContext?o.default.this:o.default.self,i=!("compile"in t&&!p||!1===t.schema);l.assign(g,n._`${r}${(0,a.callValidateCode)(e,y,s,i)}`,t.modifying)}function $(e){var r;l.if((0,n.not)(null!==(r=t.valid)&&void 0!==r?r:g),e)}e.block$data(g,(function(){if(!1===t.errors)v(),t.modifying&&i(e),$((()=>e.error()));else{const r=t.async?function(){const e=l.let("ruleErrs",null);return l.try((()=>v(n._`await `)),(t=>l.assign(g,!1).if(n._`${t} instanceof ${m.ValidationError}`,(()=>l.assign(e,n._`${t}.errors`)),(()=>l.throw(t))))),e}():function(){const e=n._`${y}.errors`;return l.assign(e,null),v(n.nil),e}();t.modifying&&i(e),$((()=>function(e,t){const{gen:r}=e;r.if(n._`Array.isArray(${t})`,(()=>{r.assign(o.default.vErrors,n._`${o.default.vErrors} === null ? ${t} : ${o.default.vErrors}.concat(${t})`).assign(o.default.errors,n._`${o.default.vErrors}.length`),(0,s.extendErrors)(e)}),(()=>e.error()))}(e,r)))}})),e.ok(null!==(r=t.valid)&&void 0!==r?r:g)},t.validSchemaType=function(e,t,r=!1){return!t.length||t.some((t=>"array"===t?Array.isArray(e):"object"===t?e&&"object"==typeof e&&!Array.isArray(e):typeof e==t||r&&"undefined"==typeof e))},t.validateKeywordUsage=function({schema:e,opts:t,self:r,errSchemaPath:n},o,a){if(Array.isArray(o.keyword)?!o.keyword.includes(a):o.keyword!==a)throw new Error("ajv implementation error");const s=o.dependencies;if(null===s||void 0===s?void 0:s.some((t=>!Object.prototype.hasOwnProperty.call(e,t))))throw new Error(`parent schema must have dependencies of ${a}: ${s.join(",")}`);if(o.validateSchema){if(!o.validateSchema(e[a])){const e=`keyword "${a}" value is invalid at path "${n}": `+r.errorsText(o.validateSchema.errors);if("log"!==t.validateSchema)throw new Error(e);r.logger.error(e)}}}},24495:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.extendSubschemaMode=t.extendSubschemaData=t.getSubschema=void 0;const n=r(99029),o=r(94227);t.getSubschema=function(e,{keyword:t,schemaProp:r,schema:a,schemaPath:s,errSchemaPath:i,topSchemaRef:c}){if(void 0!==t&&void 0!==a)throw new Error('both "keyword" and "schema" passed, only one allowed');if(void 0!==t){const a=e.schema[t];return void 0===r?{schema:a,schemaPath:n._`${e.schemaPath}${(0,n.getProperty)(t)}`,errSchemaPath:`${e.errSchemaPath}/${t}`}:{schema:a[r],schemaPath:n._`${e.schemaPath}${(0,n.getProperty)(t)}${(0,n.getProperty)(r)}`,errSchemaPath:`${e.errSchemaPath}/${t}/${(0,o.escapeFragment)(r)}`}}if(void 0!==a){if(void 0===s||void 0===i||void 0===c)throw new Error('"schemaPath", "errSchemaPath" and "topSchemaRef" are required with "schema"');return{schema:a,schemaPath:s,topSchemaRef:c,errSchemaPath:i}}throw new Error('either "keyword" or "schema" must be passed')},t.extendSubschemaData=function(e,t,{dataProp:r,dataPropType:a,data:s,dataTypes:i,propertyName:c}){if(void 0!==s&&void 0!==r)throw new Error('both "data" and "dataProp" passed, only one allowed');const{gen:l}=t;if(void 0!==r){const{errorPath:s,dataPathArr:i,opts:c}=t;d(l.let("data",n._`${t.data}${(0,n.getProperty)(r)}`,!0)),e.errorPath=n.str`${s}${(0,o.getErrorPath)(r,a,c.jsPropertySyntax)}`,e.parentDataProperty=n._`${r}`,e.dataPathArr=[...i,e.parentDataProperty]}if(void 0!==s){d(s instanceof n.Name?s:l.let("data",s,!0)),void 0!==c&&(e.propertyName=c)}function d(r){e.data=r,e.dataLevel=t.dataLevel+1,e.dataTypes=[],t.definedProperties=new Set,e.parentData=t.data,e.dataNames=[...t.dataNames,r]}i&&(e.dataTypes=i)},t.extendSubschemaMode=function(e,{jtdDiscriminator:t,jtdMetadata:r,compositeRule:n,createErrors:o,allErrors:a}){void 0!==n&&(e.compositeRule=n),void 0!==o&&(e.createErrors=o),void 0!==a&&(e.allErrors=a),e.jtdDiscriminator=t,e.jtdMetadata=r}},4042:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.CodeGen=t.Name=t.nil=t.stringify=t.str=t._=t.KeywordCxt=void 0;var n=r(62586);Object.defineProperty(t,"KeywordCxt",{enumerable:!0,get:function(){return n.KeywordCxt}});var o=r(99029);Object.defineProperty(t,"_",{enumerable:!0,get:function(){return o._}}),Object.defineProperty(t,"str",{enumerable:!0,get:function(){return o.str}}),Object.defineProperty(t,"stringify",{enumerable:!0,get:function(){return o.stringify}}),Object.defineProperty(t,"nil",{enumerable:!0,get:function(){return o.nil}}),Object.defineProperty(t,"Name",{enumerable:!0,get:function(){return o.Name}}),Object.defineProperty(t,"CodeGen",{enumerable:!0,get:function(){return o.CodeGen}});const a=r(13558),s=r(34551),i=r(10396),c=r(73835),l=r(99029),d=r(66939),u=r(10208),f=r(94227),p=r(63837),m=r(55944),h=(e,t)=>new RegExp(e,t);h.code="new RegExp";const y=["removeAdditional","useDefaults","coerceTypes"],g=new Set(["validate","serialize","parse","wrapper","root","schema","keyword","pattern","formats","validate$data","func","obj","Error"]),v={errorDataPath:"",format:"`validateFormats: false` can be used instead.",nullable:'"nullable" keyword is supported by default.',jsonPointers:"Deprecated jsPropertySyntax can be used instead.",extendRefs:"Deprecated ignoreKeywordsWithRef can be used instead.",missingRefs:"Pass empty schema with $id that should be ignored to ajv.addSchema.",processCode:"Use option `code: {process: (code, schemaEnv: object) => string}`",sourceCode:"Use option `code: {source: true}`",strictDefaults:"It is default now, see option `strict`.",strictKeywords:"It is default now, see option `strict`.",uniqueItems:'"uniqueItems" keyword is always validated.',unknownFormats:"Disable strict mode or pass `true` to `ajv.addFormat` (or `formats` option).",cache:"Map is used as cache, schema object as key.",serialize:"Map is used as cache, schema object as key.",ajvErrors:"It is default now."},$={ignoreKeywordsWithRef:"",jsPropertySyntax:"",unicode:'"minLength"/"maxLength" account for unicode characters by default.'};function b(e){var t,r,n,o,a,s,i,c,l,d,u,f,p,y,g,v,$,b,w,S,_,x,E,k,j;const C=e.strict,P=null===(t=e.code)||void 0===t?void 0:t.optimize,O=!0===P||void 0===P?1:P||0,N=null!==(n=null===(r=e.code)||void 0===r?void 0:r.regExp)&&void 0!==n?n:h,A=null!==(o=e.uriResolver)&&void 0!==o?o:m.default;return{strictSchema:null===(s=null!==(a=e.strictSchema)&&void 0!==a?a:C)||void 0===s||s,strictNumbers:null===(c=null!==(i=e.strictNumbers)&&void 0!==i?i:C)||void 0===c||c,strictTypes:null!==(d=null!==(l=e.strictTypes)&&void 0!==l?l:C)&&void 0!==d?d:"log",strictTuples:null!==(f=null!==(u=e.strictTuples)&&void 0!==u?u:C)&&void 0!==f?f:"log",strictRequired:null!==(y=null!==(p=e.strictRequired)&&void 0!==p?p:C)&&void 0!==y&&y,code:e.code?{...e.code,optimize:O,regExp:N}:{optimize:O,regExp:N},loopRequired:null!==(g=e.loopRequired)&&void 0!==g?g:200,loopEnum:null!==(v=e.loopEnum)&&void 0!==v?v:200,meta:null===($=e.meta)||void 0===$||$,messages:null===(b=e.messages)||void 0===b||b,inlineRefs:null===(w=e.inlineRefs)||void 0===w||w,schemaId:null!==(S=e.schemaId)&&void 0!==S?S:"$id",addUsedSchema:null===(_=e.addUsedSchema)||void 0===_||_,validateSchema:null===(x=e.validateSchema)||void 0===x||x,validateFormats:null===(E=e.validateFormats)||void 0===E||E,unicodeRegExp:null===(k=e.unicodeRegExp)||void 0===k||k,int32range:null===(j=e.int32range)||void 0===j||j,uriResolver:A}}class w{constructor(e={}){this.schemas={},this.refs={},this.formats={},this._compilations=new Set,this._loading={},this._cache=new Map,e=this.opts={...e,...b(e)};const{es5:t,lines:r}=this.opts.code;this.scope=new l.ValueScope({scope:{},prefixes:g,es5:t,lines:r}),this.logger=function(e){if(!1===e)return C;if(void 0===e)return console;if(e.log&&e.warn&&e.error)return e;throw new Error("logger must implement log, warn and error methods")}(e.logger);const n=e.validateFormats;e.validateFormats=!1,this.RULES=(0,i.getRules)(),S.call(this,v,e,"NOT SUPPORTED"),S.call(this,$,e,"DEPRECATED","warn"),this._metaOpts=j.call(this),e.formats&&E.call(this),this._addVocabularies(),this._addDefaultMetaSchema(),e.keywords&&k.call(this,e.keywords),"object"==typeof e.meta&&this.addMetaSchema(e.meta),x.call(this),e.validateFormats=n}_addVocabularies(){this.addKeyword("$async")}_addDefaultMetaSchema(){const{$data:e,meta:t,schemaId:r}=this.opts;let n=p;"id"===r&&(n={...p},n.id=n.$id,delete n.$id),t&&e&&this.addMetaSchema(n,n[r],!1)}defaultMeta(){const{meta:e,schemaId:t}=this.opts;return this.opts.defaultMeta="object"==typeof e?e[t]||e:void 0}validate(e,t){let r;if("string"==typeof e){if(r=this.getSchema(e),!r)throw new Error(`no schema with key or ref "${e}"`)}else r=this.compile(e);const n=r(t);return"$async"in r||(this.errors=r.errors),n}compile(e,t){const r=this._addSchema(e,t);return r.validate||this._compileSchemaEnv(r)}compileAsync(e,t){if("function"!=typeof this.opts.loadSchema)throw new Error("options.loadSchema should be a function");const{loadSchema:r}=this.opts;return n.call(this,e,t);async function n(e,t){await o.call(this,e.$schema);const r=this._addSchema(e,t);return r.validate||a.call(this,r)}async function o(e){e&&!this.getSchema(e)&&await n.call(this,{$ref:e},!0)}async function a(e){try{return this._compileSchemaEnv(e)}catch(t){if(!(t instanceof s.default))throw t;return i.call(this,t),await c.call(this,t.missingSchema),a.call(this,e)}}function i({missingSchema:e,missingRef:t}){if(this.refs[e])throw new Error(`AnySchema ${e} is loaded but ${t} cannot be resolved`)}async function c(e){const r=await l.call(this,e);this.refs[e]||await o.call(this,r.$schema),this.refs[e]||this.addSchema(r,e,t)}async function l(e){const t=this._loading[e];if(t)return t;try{return await(this._loading[e]=r(e))}finally{delete this._loading[e]}}}addSchema(e,t,r,n=this.opts.validateSchema){if(Array.isArray(e)){for(const t of e)this.addSchema(t,void 0,r,n);return this}let o;if("object"===typeof e){const{schemaId:t}=this.opts;if(o=e[t],void 0!==o&&"string"!=typeof o)throw new Error(`schema ${t} must be string`)}return t=(0,d.normalizeId)(t||o),this._checkUnique(t),this.schemas[t]=this._addSchema(e,r,t,n,!0),this}addMetaSchema(e,t,r=this.opts.validateSchema){return this.addSchema(e,t,!0,r),this}validateSchema(e,t){if("boolean"==typeof e)return!0;let r;if(r=e.$schema,void 0!==r&&"string"!=typeof r)throw new Error("$schema must be a string");if(r=r||this.opts.defaultMeta||this.defaultMeta(),!r)return this.logger.warn("meta-schema not available"),this.errors=null,!0;const n=this.validate(r,e);if(!n&&t){const e="schema is invalid: "+this.errorsText();if("log"!==this.opts.validateSchema)throw new Error(e);this.logger.error(e)}return n}getSchema(e){let t;for(;"string"==typeof(t=_.call(this,e));)e=t;if(void 0===t){const{schemaId:r}=this.opts,n=new c.SchemaEnv({schema:{},schemaId:r});if(t=c.resolveSchema.call(this,n,e),!t)return;this.refs[e]=t}return t.validate||this._compileSchemaEnv(t)}removeSchema(e){if(e instanceof RegExp)return this._removeAllSchemas(this.schemas,e),this._removeAllSchemas(this.refs,e),this;switch(typeof e){case"undefined":return this._removeAllSchemas(this.schemas),this._removeAllSchemas(this.refs),this._cache.clear(),this;case"string":{const t=_.call(this,e);return"object"==typeof t&&this._cache.delete(t.schema),delete this.schemas[e],delete this.refs[e],this}case"object":{const t=e;this._cache.delete(t);let r=e[this.opts.schemaId];return r&&(r=(0,d.normalizeId)(r),delete this.schemas[r],delete this.refs[r]),this}default:throw new Error("ajv.removeSchema: invalid parameter")}}addVocabulary(e){for(const t of e)this.addKeyword(t);return this}addKeyword(e,t){let r;if("string"==typeof e)r=e,"object"==typeof t&&(this.logger.warn("these parameters are deprecated, see docs for addKeyword"),t.keyword=r);else{if("object"!=typeof e||void 0!==t)throw new Error("invalid addKeywords parameters");if(r=(t=e).keyword,Array.isArray(r)&&!r.length)throw new Error("addKeywords: keyword must be string or non-empty array")}if(O.call(this,r,t),!t)return(0,f.eachItem)(r,(e=>N.call(this,e))),this;D.call(this,t);const n={...t,type:(0,u.getJSONTypes)(t.type),schemaType:(0,u.getJSONTypes)(t.schemaType)};return(0,f.eachItem)(r,0===n.type.length?e=>N.call(this,e,n):e=>n.type.forEach((t=>N.call(this,e,n,t)))),this}getKeyword(e){const t=this.RULES.all[e];return"object"==typeof t?t.definition:!!t}removeKeyword(e){const{RULES:t}=this;delete t.keywords[e],delete t.all[e];for(const r of t.rules){const t=r.rules.findIndex((t=>t.keyword===e));t>=0&&r.rules.splice(t,1)}return this}addFormat(e,t){return"string"==typeof t&&(t=new RegExp(t)),this.formats[e]=t,this}errorsText(e=this.errors,{separator:t=", ",dataVar:r="data"}={}){return e&&0!==e.length?e.map((e=>`${r}${e.instancePath} ${e.message}`)).reduce(((e,r)=>e+t+r)):"No errors"}$dataMetaSchema(e,t){const r=this.RULES.all;e=JSON.parse(JSON.stringify(e));for(const n of t){const t=n.split("/").slice(1);let o=e;for(const e of t)o=o[e];for(const e in r){const t=r[e];if("object"!=typeof t)continue;const{$data:n}=t.definition,a=o[e];n&&a&&(o[e]=F(a))}}return e}_removeAllSchemas(e,t){for(const r in e){const n=e[r];t&&!t.test(r)||("string"==typeof n?delete e[r]:n&&!n.meta&&(this._cache.delete(n.schema),delete e[r]))}}_addSchema(e,t,r,n=this.opts.validateSchema,o=this.opts.addUsedSchema){let a;const{schemaId:s}=this.opts;if("object"==typeof e)a=e[s];else{if(this.opts.jtd)throw new Error("schema must be object");if("boolean"!=typeof e)throw new Error("schema must be object or boolean")}let i=this._cache.get(e);if(void 0!==i)return i;r=(0,d.normalizeId)(a||r);const l=d.getSchemaRefs.call(this,e,r);return i=new c.SchemaEnv({schema:e,schemaId:s,meta:t,baseId:r,localRefs:l}),this._cache.set(i.schema,i),o&&!r.startsWith("#")&&(r&&this._checkUnique(r),this.refs[r]=i),n&&this.validateSchema(e,!0),i}_checkUnique(e){if(this.schemas[e]||this.refs[e])throw new Error(`schema with key or id "${e}" already exists`)}_compileSchemaEnv(e){if(e.meta?this._compileMetaSchema(e):c.compileSchema.call(this,e),!e.validate)throw new Error("ajv implementation error");return e.validate}_compileMetaSchema(e){const t=this.opts;this.opts=this._metaOpts;try{c.compileSchema.call(this,e)}finally{this.opts=t}}}function S(e,t,r,n="error"){for(const o in e){const a=o;a in t&&this.logger[n](`${r}: option ${o}. ${e[a]}`)}}function _(e){return e=(0,d.normalizeId)(e),this.schemas[e]||this.refs[e]}function x(){const e=this.opts.schemas;if(e)if(Array.isArray(e))this.addSchema(e);else for(const t in e)this.addSchema(e[t],t)}function E(){for(const e in this.opts.formats){const t=this.opts.formats[e];t&&this.addFormat(e,t)}}function k(e){if(Array.isArray(e))this.addVocabulary(e);else{this.logger.warn("keywords option as map is deprecated, pass array");for(const t in e){const r=e[t];r.keyword||(r.keyword=t),this.addKeyword(r)}}}function j(){const e={...this.opts};for(const t of y)delete e[t];return e}t.default=w,w.ValidationError=a.default,w.MissingRefError=s.default;const C={log(){},warn(){},error(){}};const P=/^[a-z_$][a-z0-9_$:-]*$/i;function O(e,t){const{RULES:r}=this;if((0,f.eachItem)(e,(e=>{if(r.keywords[e])throw new Error(`Keyword ${e} is already defined`);if(!P.test(e))throw new Error(`Keyword ${e} has invalid name`)})),t&&t.$data&&!("code"in t)&&!("validate"in t))throw new Error('$data keyword must have "code" or "validate" function')}function N(e,t,r){var n;const o=null===t||void 0===t?void 0:t.post;if(r&&o)throw new Error('keyword with "post" flag cannot have "type"');const{RULES:a}=this;let s=o?a.post:a.rules.find((({type:e})=>e===r));if(s||(s={type:r,rules:[]},a.rules.push(s)),a.keywords[e]=!0,!t)return;const i={keyword:e,definition:{...t,type:(0,u.getJSONTypes)(t.type),schemaType:(0,u.getJSONTypes)(t.schemaType)}};t.before?A.call(this,s,i,t.before):s.rules.push(i),a.all[e]=i,null===(n=t.implements)||void 0===n||n.forEach((e=>this.addKeyword(e)))}function A(e,t,r){const n=e.rules.findIndex((e=>e.keyword===r));n>=0?e.rules.splice(n,0,t):(e.rules.push(t),this.logger.warn(`rule ${r} is not defined`))}function D(e){let{metaSchema:t}=e;void 0!==t&&(e.$data&&this.opts.$data&&(t=F(t)),e.validateSchema=this.compile(t,!0))}const I={$ref:"https://raw.githubusercontent.com/ajv-validator/ajv/master/lib/refs/data.json#"};function F(e){return{anyOf:[e,I]}}},76250:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(32017);n.code='require("ajv/dist/runtime/equal").default',t.default=n},53853:(e,t)=>{"use strict";function r(e){const t=e.length;let r,n=0,o=0;for(;o=55296&&r<=56319&&o{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(46579);n.code='require("ajv/dist/runtime/uri").default',t.default=n},13558:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});class r extends Error{constructor(e){super("validation failed"),this.errors=e,this.ajv=this.validation=!0}}t.default=r},15457:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.validateAdditionalItems=void 0;const n=r(99029),o=r(94227),a={keyword:"additionalItems",type:"array",schemaType:["boolean","object"],before:"uniqueItems",error:{message:({params:{len:e}})=>n.str`must NOT have more than ${e} items`,params:({params:{len:e}})=>n._`{limit: ${e}}`},code(e){const{parentSchema:t,it:r}=e,{items:n}=t;Array.isArray(n)?s(e,n):(0,o.checkStrictMode)(r,'"additionalItems" is ignored when "items" is not an array of schemas')}};function s(e,t){const{gen:r,schema:a,data:s,keyword:i,it:c}=e;c.items=!0;const l=r.const("len",n._`${s}.length`);if(!1===a)e.setParams({len:t.length}),e.pass(n._`${l} <= ${t.length}`);else if("object"==typeof a&&!(0,o.alwaysValidSchema)(c,a)){const a=r.var("valid",n._`${l} <= ${t.length}`);r.if((0,n.not)(a),(()=>function(a){r.forRange("i",t.length,l,(t=>{e.subschema({keyword:i,dataProp:t,dataPropType:o.Type.Num},a),c.allErrors||r.if((0,n.not)(a),(()=>r.break()))}))}(a))),e.ok(a)}}t.validateAdditionalItems=s,t.default=a},38660:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(15765),o=r(99029),a=r(42023),s=r(94227),i={keyword:"additionalProperties",type:["object"],schemaType:["boolean","object"],allowUndefined:!0,trackErrors:!0,error:{message:"must NOT have additional properties",params:({params:e})=>o._`{additionalProperty: ${e.additionalProperty}}`},code(e){const{gen:t,schema:r,parentSchema:i,data:c,errsCount:l,it:d}=e;if(!l)throw new Error("ajv implementation error");const{allErrors:u,opts:f}=d;if(d.props=!0,"all"!==f.removeAdditional&&(0,s.alwaysValidSchema)(d,r))return;const p=(0,n.allSchemaProperties)(i.properties),m=(0,n.allSchemaProperties)(i.patternProperties);function h(e){t.code(o._`delete ${c}[${e}]`)}function y(n){if("all"===f.removeAdditional||f.removeAdditional&&!1===r)h(n);else{if(!1===r)return e.setParams({additionalProperty:n}),e.error(),void(u||t.break());if("object"==typeof r&&!(0,s.alwaysValidSchema)(d,r)){const r=t.name("valid");"failing"===f.removeAdditional?(g(n,r,!1),t.if((0,o.not)(r),(()=>{e.reset(),h(n)}))):(g(n,r),u||t.if((0,o.not)(r),(()=>t.break())))}}}function g(t,r,n){const o={keyword:"additionalProperties",dataProp:t,dataPropType:s.Type.Str};!1===n&&Object.assign(o,{compositeRule:!0,createErrors:!1,allErrors:!1}),e.subschema(o,r)}t.forIn("key",c,(r=>{p.length||m.length?t.if(function(r){let a;if(p.length>8){const e=(0,s.schemaRefOrVal)(d,i.properties,"properties");a=(0,n.isOwnProperty)(t,e,r)}else a=p.length?(0,o.or)(...p.map((e=>o._`${r} === ${e}`))):o.nil;return m.length&&(a=(0,o.or)(a,...m.map((t=>o._`${(0,n.usePattern)(e,t)}.test(${r})`)))),(0,o.not)(a)}(r),(()=>y(r))):y(r)})),e.ok(o._`${l} === ${a.default.errors}`)}};t.default=i},15844:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(94227),o={keyword:"allOf",schemaType:"array",code(e){const{gen:t,schema:r,it:o}=e;if(!Array.isArray(r))throw new Error("ajv implementation error");const a=t.name("valid");r.forEach(((t,r)=>{if((0,n.alwaysValidSchema)(o,t))return;const s=e.subschema({keyword:"allOf",schemaProp:r},a);e.ok(a),e.mergeEvaluated(s)}))}};t.default=o},16505:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n={keyword:"anyOf",schemaType:"array",trackErrors:!0,code:r(15765).validateUnion,error:{message:"must match a schema in anyOf"}};t.default=n},12661:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o=r(94227),a={keyword:"contains",type:"array",schemaType:["object","boolean"],before:"uniqueItems",trackErrors:!0,error:{message:({params:{min:e,max:t}})=>void 0===t?n.str`must contain at least ${e} valid item(s)`:n.str`must contain at least ${e} and no more than ${t} valid item(s)`,params:({params:{min:e,max:t}})=>void 0===t?n._`{minContains: ${e}}`:n._`{minContains: ${e}, maxContains: ${t}}`},code(e){const{gen:t,schema:r,parentSchema:a,data:s,it:i}=e;let c,l;const{minContains:d,maxContains:u}=a;i.opts.next?(c=void 0===d?1:d,l=u):c=1;const f=t.const("len",n._`${s}.length`);if(e.setParams({min:c,max:l}),void 0===l&&0===c)return void(0,o.checkStrictMode)(i,'"minContains" == 0 without "maxContains": "contains" keyword ignored');if(void 0!==l&&c>l)return(0,o.checkStrictMode)(i,'"minContains" > "maxContains" is always invalid'),void e.fail();if((0,o.alwaysValidSchema)(i,r)){let t=n._`${f} >= ${c}`;return void 0!==l&&(t=n._`${t} && ${f} <= ${l}`),void e.pass(t)}i.items=!0;const p=t.name("valid");function m(){const e=t.name("_valid"),r=t.let("count",0);h(e,(()=>t.if(e,(()=>function(e){t.code(n._`${e}++`),void 0===l?t.if(n._`${e} >= ${c}`,(()=>t.assign(p,!0).break())):(t.if(n._`${e} > ${l}`,(()=>t.assign(p,!1).break())),1===c?t.assign(p,!0):t.if(n._`${e} >= ${c}`,(()=>t.assign(p,!0))))}(r)))))}function h(r,n){t.forRange("i",0,f,(t=>{e.subschema({keyword:"contains",dataProp:t,dataPropType:o.Type.Num,compositeRule:!0},r),n()}))}void 0===l&&1===c?h(p,(()=>t.if(p,(()=>t.break())))):0===c?(t.let(p,!0),void 0!==l&&t.if(n._`${s}.length > 0`,m)):(t.let(p,!1),m()),e.result(p,(()=>e.reset()))}};t.default=a},83025:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.validateSchemaDeps=t.validatePropertyDeps=t.error=void 0;const n=r(99029),o=r(94227),a=r(15765);t.error={message:({params:{property:e,depsCount:t,deps:r}})=>{const o=1===t?"property":"properties";return n.str`must have ${o} ${r} when property ${e} is present`},params:({params:{property:e,depsCount:t,deps:r,missingProperty:o}})=>n._`{property: ${e}, - missingProperty: ${o}, - depsCount: ${t}, - deps: ${r}}`};const s={keyword:"dependencies",type:"object",schemaType:"object",error:t.error,code(e){const[t,r]=function({schema:e}){const t={},r={};for(const n in e){if("__proto__"===n)continue;(Array.isArray(e[n])?t:r)[n]=e[n]}return[t,r]}(e);i(e,t),c(e,r)}};function i(e,t=e.schema){const{gen:r,data:o,it:s}=e;if(0===Object.keys(t).length)return;const i=r.let("missing");for(const c in t){const l=t[c];if(0===l.length)continue;const d=(0,a.propertyInData)(r,o,c,s.opts.ownProperties);e.setParams({property:c,depsCount:l.length,deps:l.join(", ")}),s.allErrors?r.if(d,(()=>{for(const t of l)(0,a.checkReportMissingProp)(e,t)})):(r.if(n._`${d} && (${(0,a.checkMissingProp)(e,l,i)})`),(0,a.reportMissingProp)(e,i),r.else())}}function c(e,t=e.schema){const{gen:r,data:n,keyword:s,it:i}=e,c=r.name("valid");for(const l in t)(0,o.alwaysValidSchema)(i,t[l])||(r.if((0,a.propertyInData)(r,n,l,i.opts.ownProperties),(()=>{const t=e.subschema({keyword:s,schemaProp:l},c);e.mergeValidEvaluated(t,c)}),(()=>r.var(c,!0))),e.ok(c))}t.validatePropertyDeps=i,t.validateSchemaDeps=c,t.default=s},23620:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o=r(94227),a={keyword:"if",schemaType:["object","boolean"],trackErrors:!0,error:{message:({params:e})=>n.str`must match "${e.ifClause}" schema`,params:({params:e})=>n._`{failingKeyword: ${e.ifClause}}`},code(e){const{gen:t,parentSchema:r,it:a}=e;void 0===r.then&&void 0===r.else&&(0,o.checkStrictMode)(a,'"if" without "then" and "else" is ignored');const i=s(a,"then"),c=s(a,"else");if(!i&&!c)return;const l=t.let("valid",!0),d=t.name("_valid");if(function(){const t=e.subschema({keyword:"if",compositeRule:!0,createErrors:!1,allErrors:!1},d);e.mergeEvaluated(t)}(),e.reset(),i&&c){const r=t.let("ifClause");e.setParams({ifClause:r}),t.if(d,u("then",r),u("else",r))}else i?t.if(d,u("then")):t.if((0,n.not)(d),u("else"));function u(r,o){return()=>{const a=e.subschema({keyword:r},d);t.assign(l,d),e.mergeValidEvaluated(a,l),o?t.assign(o,n._`${r}`):e.setParams({ifClause:r})}}e.pass(l,(()=>e.error(!0)))}};function s(e,t){const r=e.schema[t];return void 0!==r&&!(0,o.alwaysValidSchema)(e,r)}t.default=a},56378:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(15457),o=r(65354),a=r(20494),s=r(93966),i=r(12661),c=r(83025),l=r(19713),d=r(38660),u=r(40117),f=r(45333),p=r(57923),m=r(16505),h=r(96163),y=r(15844),g=r(23620),v=r(14426);t.default=function(e=!1){const t=[p.default,m.default,h.default,y.default,g.default,v.default,l.default,d.default,c.default,u.default,f.default];return e?t.push(o.default,s.default):t.push(n.default,a.default),t.push(i.default),t}},20494:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.validateTuple=void 0;const n=r(99029),o=r(94227),a=r(15765),s={keyword:"items",type:"array",schemaType:["object","array","boolean"],before:"uniqueItems",code(e){const{schema:t,it:r}=e;if(Array.isArray(t))return i(e,"additionalItems",t);r.items=!0,(0,o.alwaysValidSchema)(r,t)||e.ok((0,a.validateArray)(e))}};function i(e,t,r=e.schema){const{gen:a,parentSchema:s,data:i,keyword:c,it:l}=e;!function(e){const{opts:n,errSchemaPath:a}=l,s=r.length,i=s===e.minItems&&(s===e.maxItems||!1===e[t]);if(n.strictTuples&&!i){const e=`"${c}" is ${s}-tuple, but minItems or maxItems/${t} are not specified or different at path "${a}"`;(0,o.checkStrictMode)(l,e,n.strictTuples)}}(s),l.opts.unevaluated&&r.length&&!0!==l.items&&(l.items=o.mergeEvaluated.items(a,r.length,l.items));const d=a.name("valid"),u=a.const("len",n._`${i}.length`);r.forEach(((t,r)=>{(0,o.alwaysValidSchema)(l,t)||(a.if(n._`${u} > ${r}`,(()=>e.subschema({keyword:c,schemaProp:r,dataProp:r},d))),e.ok(d))}))}t.validateTuple=i,t.default=s},93966:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o=r(94227),a=r(15765),s=r(15457),i={keyword:"items",type:"array",schemaType:["object","boolean"],before:"uniqueItems",error:{message:({params:{len:e}})=>n.str`must NOT have more than ${e} items`,params:({params:{len:e}})=>n._`{limit: ${e}}`},code(e){const{schema:t,parentSchema:r,it:n}=e,{prefixItems:i}=r;n.items=!0,(0,o.alwaysValidSchema)(n,t)||(i?(0,s.validateAdditionalItems)(e,i):e.ok((0,a.validateArray)(e)))}};t.default=i},57923:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(94227),o={keyword:"not",schemaType:["object","boolean"],trackErrors:!0,code(e){const{gen:t,schema:r,it:o}=e;if((0,n.alwaysValidSchema)(o,r))return void e.fail();const a=t.name("valid");e.subschema({keyword:"not",compositeRule:!0,createErrors:!1,allErrors:!1},a),e.failResult(a,(()=>e.reset()),(()=>e.error()))},error:{message:"must NOT be valid"}};t.default=o},96163:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o=r(94227),a={keyword:"oneOf",schemaType:"array",trackErrors:!0,error:{message:"must match exactly one schema in oneOf",params:({params:e})=>n._`{passingSchemas: ${e.passing}}`},code(e){const{gen:t,schema:r,parentSchema:a,it:s}=e;if(!Array.isArray(r))throw new Error("ajv implementation error");if(s.opts.discriminator&&a.discriminator)return;const i=r,c=t.let("valid",!1),l=t.let("passing",null),d=t.name("_valid");e.setParams({passing:l}),t.block((function(){i.forEach(((r,a)=>{let i;(0,o.alwaysValidSchema)(s,r)?t.var(d,!0):i=e.subschema({keyword:"oneOf",schemaProp:a,compositeRule:!0},d),a>0&&t.if(n._`${d} && ${c}`).assign(c,!1).assign(l,n._`[${l}, ${a}]`).else(),t.if(d,(()=>{t.assign(c,!0),t.assign(l,a),i&&e.mergeEvaluated(i,n.Name)}))}))})),e.result(c,(()=>e.reset()),(()=>e.error(!0)))}};t.default=a},45333:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(15765),o=r(99029),a=r(94227),s=r(94227),i={keyword:"patternProperties",type:"object",schemaType:"object",code(e){const{gen:t,schema:r,data:i,parentSchema:c,it:l}=e,{opts:d}=l,u=(0,n.allSchemaProperties)(r),f=u.filter((e=>(0,a.alwaysValidSchema)(l,r[e])));if(0===u.length||f.length===u.length&&(!l.opts.unevaluated||!0===l.props))return;const p=d.strictSchema&&!d.allowMatchingProperties&&c.properties,m=t.name("valid");!0===l.props||l.props instanceof o.Name||(l.props=(0,s.evaluatedPropsToName)(t,l.props));const{props:h}=l;function y(e){for(const t in p)new RegExp(e).test(t)&&(0,a.checkStrictMode)(l,`property ${t} matches pattern ${e} (use allowMatchingProperties)`)}function g(r){t.forIn("key",i,(a=>{t.if(o._`${(0,n.usePattern)(e,r)}.test(${a})`,(()=>{const n=f.includes(r);n||e.subschema({keyword:"patternProperties",schemaProp:r,dataProp:a,dataPropType:s.Type.Str},m),l.opts.unevaluated&&!0!==h?t.assign(o._`${h}[${a}]`,!0):n||l.allErrors||t.if((0,o.not)(m),(()=>t.break()))}))}))}!function(){for(const e of u)p&&y(e),l.allErrors?g(e):(t.var(m,!0),g(e),t.if(m))}()}};t.default=i},65354:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(20494),o={keyword:"prefixItems",type:"array",schemaType:["array"],before:"uniqueItems",code:e=>(0,n.validateTuple)(e,"items")};t.default=o},40117:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(62586),o=r(15765),a=r(94227),s=r(38660),i={keyword:"properties",type:"object",schemaType:"object",code(e){const{gen:t,schema:r,parentSchema:i,data:c,it:l}=e;"all"===l.opts.removeAdditional&&void 0===i.additionalProperties&&s.default.code(new n.KeywordCxt(l,s.default,"additionalProperties"));const d=(0,o.allSchemaProperties)(r);for(const n of d)l.definedProperties.add(n);l.opts.unevaluated&&d.length&&!0!==l.props&&(l.props=a.mergeEvaluated.props(t,(0,a.toHash)(d),l.props));const u=d.filter((e=>!(0,a.alwaysValidSchema)(l,r[e])));if(0===u.length)return;const f=t.name("valid");for(const n of u)p(n)?m(n):(t.if((0,o.propertyInData)(t,c,n,l.opts.ownProperties)),m(n),l.allErrors||t.else().var(f,!0),t.endIf()),e.it.definedProperties.add(n),e.ok(f);function p(e){return l.opts.useDefaults&&!l.compositeRule&&void 0!==r[e].default}function m(t){e.subschema({keyword:"properties",schemaProp:t,dataProp:t},f)}}};t.default=i},19713:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o=r(94227),a={keyword:"propertyNames",type:"object",schemaType:["object","boolean"],error:{message:"property name must be valid",params:({params:e})=>n._`{propertyName: ${e.propertyName}}`},code(e){const{gen:t,schema:r,data:a,it:s}=e;if((0,o.alwaysValidSchema)(s,r))return;const i=t.name("valid");t.forIn("key",a,(r=>{e.setParams({propertyName:r}),e.subschema({keyword:"propertyNames",data:r,dataTypes:["string"],propertyName:r,compositeRule:!0},i),t.if((0,n.not)(i),(()=>{e.error(!0),s.allErrors||t.break()}))})),e.ok(i)}};t.default=a},14426:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(94227),o={keyword:["then","else"],schemaType:["object","boolean"],code({keyword:e,parentSchema:t,it:r}){void 0===t.if&&(0,n.checkStrictMode)(r,`"${e}" without "if" is ignored`)}};t.default=o},15765:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.validateUnion=t.validateArray=t.usePattern=t.callValidateCode=t.schemaProperties=t.allSchemaProperties=t.noPropertyInData=t.propertyInData=t.isOwnProperty=t.hasPropFunc=t.reportMissingProp=t.checkMissingProp=t.checkReportMissingProp=void 0;const n=r(99029),o=r(94227),a=r(42023),s=r(94227);function i(e){return e.scopeValue("func",{ref:Object.prototype.hasOwnProperty,code:n._`Object.prototype.hasOwnProperty`})}function c(e,t,r){return n._`${i(e)}.call(${t}, ${r})`}function l(e,t,r,o){const a=n._`${t}${(0,n.getProperty)(r)} === undefined`;return o?(0,n.or)(a,(0,n.not)(c(e,t,r))):a}function d(e){return e?Object.keys(e).filter((e=>"__proto__"!==e)):[]}t.checkReportMissingProp=function(e,t){const{gen:r,data:o,it:a}=e;r.if(l(r,o,t,a.opts.ownProperties),(()=>{e.setParams({missingProperty:n._`${t}`},!0),e.error()}))},t.checkMissingProp=function({gen:e,data:t,it:{opts:r}},o,a){return(0,n.or)(...o.map((o=>(0,n.and)(l(e,t,o,r.ownProperties),n._`${a} = ${o}`))))},t.reportMissingProp=function(e,t){e.setParams({missingProperty:t},!0),e.error()},t.hasPropFunc=i,t.isOwnProperty=c,t.propertyInData=function(e,t,r,o){const a=n._`${t}${(0,n.getProperty)(r)} !== undefined`;return o?n._`${a} && ${c(e,t,r)}`:a},t.noPropertyInData=l,t.allSchemaProperties=d,t.schemaProperties=function(e,t){return d(t).filter((r=>!(0,o.alwaysValidSchema)(e,t[r])))},t.callValidateCode=function({schemaCode:e,data:t,it:{gen:r,topSchemaRef:o,schemaPath:s,errorPath:i},it:c},l,d,u){const f=u?n._`${e}, ${t}, ${o}${s}`:t,p=[[a.default.instancePath,(0,n.strConcat)(a.default.instancePath,i)],[a.default.parentData,c.parentData],[a.default.parentDataProperty,c.parentDataProperty],[a.default.rootData,a.default.rootData]];c.opts.dynamicRef&&p.push([a.default.dynamicAnchors,a.default.dynamicAnchors]);const m=n._`${f}, ${r.object(...p)}`;return d!==n.nil?n._`${l}.call(${d}, ${m})`:n._`${l}(${m})`};const u=n._`new RegExp`;t.usePattern=function({gen:e,it:{opts:t}},r){const o=t.unicodeRegExp?"u":"",{regExp:a}=t.code,i=a(r,o);return e.scopeValue("pattern",{key:i.toString(),ref:i,code:n._`${"new RegExp"===a.code?u:(0,s.useFunc)(e,a)}(${r}, ${o})`})},t.validateArray=function(e){const{gen:t,data:r,keyword:a,it:s}=e,i=t.name("valid");if(s.allErrors){const e=t.let("valid",!0);return c((()=>t.assign(e,!1))),e}return t.var(i,!0),c((()=>t.break())),i;function c(s){const c=t.const("len",n._`${r}.length`);t.forRange("i",0,c,(r=>{e.subschema({keyword:a,dataProp:r,dataPropType:o.Type.Num},i),t.if((0,n.not)(i),s)}))}},t.validateUnion=function(e){const{gen:t,schema:r,keyword:a,it:s}=e;if(!Array.isArray(r))throw new Error("ajv implementation error");if(r.some((e=>(0,o.alwaysValidSchema)(s,e)))&&!s.opts.unevaluated)return;const i=t.let("valid",!1),c=t.name("_valid");t.block((()=>r.forEach(((r,o)=>{const s=e.subschema({keyword:a,schemaProp:o,compositeRule:!0},c);t.assign(i,n._`${i} || ${c}`);e.mergeValidEvaluated(s,c)||t.if((0,n.not)(i))})))),e.result(i,(()=>e.reset()),(()=>e.error(!0)))}},83463:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const r={keyword:"id",code(){throw new Error('NOT SUPPORTED: keyword "id", use "$id" for schema ID')}};t.default=r},72128:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(83463),o=r(13693),a=["$schema","$id","$defs","$vocabulary",{keyword:"$comment"},"definitions",n.default,o.default];t.default=a},13693:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.callRef=t.getValidate=void 0;const n=r(34551),o=r(15765),a=r(99029),s=r(42023),i=r(73835),c=r(94227),l={keyword:"$ref",schemaType:"string",code(e){const{gen:t,schema:r,it:o}=e,{baseId:s,schemaEnv:c,validateName:l,opts:f,self:p}=o,{root:m}=c;if(("#"===r||"#/"===r)&&s===m.baseId)return function(){if(c===m)return u(e,l,c,c.$async);const r=t.scopeValue("root",{ref:m});return u(e,a._`${r}.validate`,m,m.$async)}();const h=i.resolveRef.call(p,m,s,r);if(void 0===h)throw new n.default(o.opts.uriResolver,s,r);return h instanceof i.SchemaEnv?function(t){const r=d(e,t);u(e,r,t,t.$async)}(h):function(n){const o=t.scopeValue("schema",!0===f.code.source?{ref:n,code:(0,a.stringify)(n)}:{ref:n}),s=t.name("valid"),i=e.subschema({schema:n,dataTypes:[],schemaPath:a.nil,topSchemaRef:o,errSchemaPath:r},s);e.mergeEvaluated(i),e.ok(s)}(h)}};function d(e,t){const{gen:r}=e;return t.validate?r.scopeValue("validate",{ref:t.validate}):a._`${r.scopeValue("wrapper",{ref:t})}.validate`}function u(e,t,r,n){const{gen:i,it:l}=e,{allErrors:d,schemaEnv:u,opts:f}=l,p=f.passContext?s.default.this:a.nil;function m(e){const t=a._`${e}.errors`;i.assign(s.default.vErrors,a._`${s.default.vErrors} === null ? ${t} : ${s.default.vErrors}.concat(${t})`),i.assign(s.default.errors,a._`${s.default.vErrors}.length`)}function h(e){var t;if(!l.opts.unevaluated)return;const n=null===(t=null===r||void 0===r?void 0:r.validate)||void 0===t?void 0:t.evaluated;if(!0!==l.props)if(n&&!n.dynamicProps)void 0!==n.props&&(l.props=c.mergeEvaluated.props(i,n.props,l.props));else{const t=i.var("props",a._`${e}.evaluated.props`);l.props=c.mergeEvaluated.props(i,t,l.props,a.Name)}if(!0!==l.items)if(n&&!n.dynamicItems)void 0!==n.items&&(l.items=c.mergeEvaluated.items(i,n.items,l.items));else{const t=i.var("items",a._`${e}.evaluated.items`);l.items=c.mergeEvaluated.items(i,t,l.items,a.Name)}}n?function(){if(!u.$async)throw new Error("async schema referenced by sync schema");const r=i.let("valid");i.try((()=>{i.code(a._`await ${(0,o.callValidateCode)(e,t,p)}`),h(t),d||i.assign(r,!0)}),(e=>{i.if(a._`!(${e} instanceof ${l.ValidationError})`,(()=>i.throw(e))),m(e),d||i.assign(r,!1)})),e.ok(r)}():e.result((0,o.callValidateCode)(e,t,p),(()=>h(t)),(()=>m(t)))}t.getValidate=d,t.callRef=u,t.default=l},36653:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o=r(97652),a=r(73835),s=r(94227),i={keyword:"discriminator",type:"object",schemaType:"object",error:{message:({params:{discrError:e,tagName:t}})=>e===o.DiscrError.Tag?`tag "${t}" must be string`:`value of tag "${t}" must be in oneOf`,params:({params:{discrError:e,tag:t,tagName:r}})=>n._`{error: ${e}, tag: ${r}, tagValue: ${t}}`},code(e){const{gen:t,data:r,schema:i,parentSchema:c,it:l}=e,{oneOf:d}=c;if(!l.opts.discriminator)throw new Error("discriminator: requires discriminator option");const u=i.propertyName;if("string"!=typeof u)throw new Error("discriminator: requires propertyName");if(i.mapping)throw new Error("discriminator: mapping is not supported");if(!d)throw new Error("discriminator: requires oneOf keyword");const f=t.let("valid",!1),p=t.const("tag",n._`${r}${(0,n.getProperty)(u)}`);function m(r){const o=t.name("valid"),a=e.subschema({keyword:"oneOf",schemaProp:r},o);return e.mergeEvaluated(a,n.Name),o}t.if(n._`typeof ${p} == "string"`,(()=>function(){const r=function(){var e;const t={},r=o(c);let n=!0;for(let c=0;ce.error(!1,{discrError:o.DiscrError.Tag,tag:p,tagName:u}))),e.ok(f)}};t.default=i},97652:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.DiscrError=void 0,function(e){e.Tag="tag",e.Mapping="mapping"}(t.DiscrError||(t.DiscrError={}))},86144:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(72128),o=r(67060),a=r(56378),s=r(97532),i=r(69857),c=[n.default,o.default,(0,a.default)(),s.default,i.metadataVocabulary,i.contentVocabulary];t.default=c},94737:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o={keyword:"format",type:["number","string"],schemaType:"string",$data:!0,error:{message:({schemaCode:e})=>n.str`must match format "${e}"`,params:({schemaCode:e})=>n._`{format: ${e}}`},code(e,t){const{gen:r,data:o,$data:a,schema:s,schemaCode:i,it:c}=e,{opts:l,errSchemaPath:d,schemaEnv:u,self:f}=c;l.validateFormats&&(a?function(){const a=r.scopeValue("formats",{ref:f.formats,code:l.code.formats}),s=r.const("fDef",n._`${a}[${i}]`),c=r.let("fType"),d=r.let("format");r.if(n._`typeof ${s} == "object" && !(${s} instanceof RegExp)`,(()=>r.assign(c,n._`${s}.type || "string"`).assign(d,n._`${s}.validate`)),(()=>r.assign(c,n._`"string"`).assign(d,s))),e.fail$data((0,n.or)(!1===l.strictSchema?n.nil:n._`${i} && !${d}`,function(){const e=u.$async?n._`(${s}.async ? await ${d}(${o}) : ${d}(${o}))`:n._`${d}(${o})`,r=n._`(typeof ${d} == "function" ? ${e} : ${d}.test(${o}))`;return n._`${d} && ${d} !== true && ${c} === ${t} && !${r}`}()))}():function(){const a=f.formats[s];if(!a)return void function(){if(!1===l.strictSchema)return void f.logger.warn(e());throw new Error(e());function e(){return`unknown format "${s}" ignored in schema at path "${d}"`}}();if(!0===a)return;const[i,c,p]=function(e){const t=e instanceof RegExp?(0,n.regexpCode)(e):l.code.formats?n._`${l.code.formats}${(0,n.getProperty)(s)}`:void 0,o=r.scopeValue("formats",{key:s,ref:e,code:t});if("object"==typeof e&&!(e instanceof RegExp))return[e.type||"string",e.validate,n._`${o}.validate`];return["string",e,o]}(a);i===t&&e.pass(function(){if("object"==typeof a&&!(a instanceof RegExp)&&a.async){if(!u.$async)throw new Error("async format in sync schema");return n._`await ${p}(${o})`}return"function"==typeof c?n._`${p}(${o})`:n._`${p}.test(${o})`}())}())}};t.default=o},97532:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=[r(94737).default];t.default=n},69857:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.contentVocabulary=t.metadataVocabulary=void 0,t.metadataVocabulary=["title","description","default","deprecated","readOnly","writeOnly","examples"],t.contentVocabulary=["contentMediaType","contentEncoding","contentSchema"]},27935:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o=r(94227),a=r(76250),s={keyword:"const",$data:!0,error:{message:"must be equal to constant",params:({schemaCode:e})=>n._`{allowedValue: ${e}}`},code(e){const{gen:t,data:r,$data:s,schemaCode:i,schema:c}=e;s||c&&"object"==typeof c?e.fail$data(n._`!${(0,o.useFunc)(t,a.default)}(${r}, ${i})`):e.fail(n._`${c} !== ${r}`)}};t.default=s},28643:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o=r(94227),a=r(76250),s={keyword:"enum",schemaType:"array",$data:!0,error:{message:"must be equal to one of the allowed values",params:({schemaCode:e})=>n._`{allowedValues: ${e}}`},code(e){const{gen:t,data:r,$data:s,schema:i,schemaCode:c,it:l}=e;if(!s&&0===i.length)throw new Error("enum must have non-empty array");const d=i.length>=l.opts.loopEnum;let u;const f=()=>null!==u&&void 0!==u?u:u=(0,o.useFunc)(t,a.default);let p;if(d||s)p=t.let("valid"),e.block$data(p,(function(){t.assign(p,!1),t.forOf("v",c,(e=>t.if(n._`${f()}(${r}, ${e})`,(()=>t.assign(p,!0).break()))))}));else{if(!Array.isArray(i))throw new Error("ajv implementation error");const e=t.const("vSchema",c);p=(0,n.or)(...i.map(((t,o)=>function(e,t){const o=i[t];return"object"===typeof o&&null!==o?n._`${f()}(${r}, ${e}[${t}])`:n._`${r} === ${o}`}(e,o))))}e.pass(p)}};t.default=s},67060:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(75882),o=r(63439),a=r(77307),s=r(90422),i=r(34486),c=r(34003),l=r(61163),d=r(60617),u=r(27935),f=r(28643),p=[n.default,o.default,a.default,s.default,i.default,c.default,l.default,d.default,{keyword:"type",schemaType:["string","array"]},{keyword:"nullable",schemaType:"boolean"},u.default,f.default];t.default=p},61163:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o={keyword:["maxItems","minItems"],type:"array",schemaType:"number",$data:!0,error:{message({keyword:e,schemaCode:t}){const r="maxItems"===e?"more":"fewer";return n.str`must NOT have ${r} than ${t} items`},params:({schemaCode:e})=>n._`{limit: ${e}}`},code(e){const{keyword:t,data:r,schemaCode:o}=e,a="maxItems"===t?n.operators.GT:n.operators.LT;e.fail$data(n._`${r}.length ${a} ${o}`)}};t.default=o},77307:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o=r(94227),a=r(53853),s={keyword:["maxLength","minLength"],type:"string",schemaType:"number",$data:!0,error:{message({keyword:e,schemaCode:t}){const r="maxLength"===e?"more":"fewer";return n.str`must NOT have ${r} than ${t} characters`},params:({schemaCode:e})=>n._`{limit: ${e}}`},code(e){const{keyword:t,data:r,schemaCode:s,it:i}=e,c="maxLength"===t?n.operators.GT:n.operators.LT,l=!1===i.opts.unicode?n._`${r}.length`:n._`${(0,o.useFunc)(e.gen,a.default)}(${r})`;e.fail$data(n._`${l} ${c} ${s}`)}};t.default=s},75882:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o=n.operators,a={maximum:{okStr:"<=",ok:o.LTE,fail:o.GT},minimum:{okStr:">=",ok:o.GTE,fail:o.LT},exclusiveMaximum:{okStr:"<",ok:o.LT,fail:o.GTE},exclusiveMinimum:{okStr:">",ok:o.GT,fail:o.LTE}},s={message:({keyword:e,schemaCode:t})=>n.str`must be ${a[e].okStr} ${t}`,params:({keyword:e,schemaCode:t})=>n._`{comparison: ${a[e].okStr}, limit: ${t}}`},i={keyword:Object.keys(a),type:"number",schemaType:"number",$data:!0,error:s,code(e){const{keyword:t,data:r,schemaCode:o}=e;e.fail$data(n._`${r} ${a[t].fail} ${o} || isNaN(${r})`)}};t.default=i},34486:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o={keyword:["maxProperties","minProperties"],type:"object",schemaType:"number",$data:!0,error:{message({keyword:e,schemaCode:t}){const r="maxProperties"===e?"more":"fewer";return n.str`must NOT have ${r} than ${t} properties`},params:({schemaCode:e})=>n._`{limit: ${e}}`},code(e){const{keyword:t,data:r,schemaCode:o}=e,a="maxProperties"===t?n.operators.GT:n.operators.LT;e.fail$data(n._`Object.keys(${r}).length ${a} ${o}`)}};t.default=o},63439:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(99029),o={keyword:"multipleOf",type:"number",schemaType:"number",$data:!0,error:{message:({schemaCode:e})=>n.str`must be multiple of ${e}`,params:({schemaCode:e})=>n._`{multipleOf: ${e}}`},code(e){const{gen:t,data:r,schemaCode:o,it:a}=e,s=a.opts.multipleOfPrecision,i=t.let("res"),c=s?n._`Math.abs(Math.round(${i}) - ${i}) > 1e-${s}`:n._`${i} !== parseInt(${i})`;e.fail$data(n._`(${o} === 0 || (${i} = ${r}/${o}, ${c}))`)}};t.default=o},90422:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(15765),o=r(99029),a={keyword:"pattern",type:"string",schemaType:"string",$data:!0,error:{message:({schemaCode:e})=>o.str`must match pattern "${e}"`,params:({schemaCode:e})=>o._`{pattern: ${e}}`},code(e){const{data:t,$data:r,schema:a,schemaCode:s,it:i}=e,c=i.opts.unicodeRegExp?"u":"",l=r?o._`(new RegExp(${s}, ${c}))`:(0,n.usePattern)(e,a);e.fail$data(o._`!${l}.test(${t})`)}};t.default=a},34003:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(15765),o=r(99029),a=r(94227),s={keyword:"required",type:"object",schemaType:"array",$data:!0,error:{message:({params:{missingProperty:e}})=>o.str`must have required property '${e}'`,params:({params:{missingProperty:e}})=>o._`{missingProperty: ${e}}`},code(e){const{gen:t,schema:r,schemaCode:s,data:i,$data:c,it:l}=e,{opts:d}=l;if(!c&&0===r.length)return;const u=r.length>=d.loopRequired;if(l.allErrors?function(){if(u||c)e.block$data(o.nil,f);else for(const t of r)(0,n.checkReportMissingProp)(e,t)}():function(){const a=t.let("missing");if(u||c){const r=t.let("valid",!0);e.block$data(r,(()=>function(r,a){e.setParams({missingProperty:r}),t.forOf(r,s,(()=>{t.assign(a,(0,n.propertyInData)(t,i,r,d.ownProperties)),t.if((0,o.not)(a),(()=>{e.error(),t.break()}))}),o.nil)}(a,r))),e.ok(r)}else t.if((0,n.checkMissingProp)(e,r,a)),(0,n.reportMissingProp)(e,a),t.else()}(),d.strictRequired){const t=e.parentSchema.properties,{definedProperties:n}=e.it;for(const e of r)if(void 0===(null===t||void 0===t?void 0:t[e])&&!n.has(e)){const t=`required property "${e}" is not defined at "${l.schemaEnv.baseId+l.errSchemaPath}" (strictRequired)`;(0,a.checkStrictMode)(l,t,l.opts.strictRequired)}}function f(){t.forOf("prop",s,(r=>{e.setParams({missingProperty:r}),t.if((0,n.noPropertyInData)(t,i,r,d.ownProperties),(()=>e.error()))}))}}};t.default=s},60617:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(10208),o=r(99029),a=r(94227),s=r(76250),i={keyword:"uniqueItems",type:"array",schemaType:"boolean",$data:!0,error:{message:({params:{i:e,j:t}})=>o.str`must NOT have duplicate items (items ## ${t} and ${e} are identical)`,params:({params:{i:e,j:t}})=>o._`{i: ${e}, j: ${t}}`},code(e){const{gen:t,data:r,$data:i,schema:c,parentSchema:l,schemaCode:d,it:u}=e;if(!i&&!c)return;const f=t.let("valid"),p=l.items?(0,n.getSchemaTypes)(l.items):[];function m(a,s){const i=t.name("item"),c=(0,n.checkDataTypes)(p,i,u.opts.strictNumbers,n.DataType.Wrong),l=t.const("indices",o._`{}`);t.for(o._`;${a}--;`,(()=>{t.let(i,o._`${r}[${a}]`),t.if(c,o._`continue`),p.length>1&&t.if(o._`typeof ${i} == "string"`,o._`${i} += "_"`),t.if(o._`typeof ${l}[${i}] == "number"`,(()=>{t.assign(s,o._`${l}[${i}]`),e.error(),t.assign(f,!1).break()})).code(o._`${l}[${i}] = ${a}`)}))}function h(n,i){const c=(0,a.useFunc)(t,s.default),l=t.name("outer");t.label(l).for(o._`;${n}--;`,(()=>t.for(o._`${i} = ${n}; ${i}--;`,(()=>t.if(o._`${c}(${r}[${n}], ${r}[${i}])`,(()=>{e.error(),t.assign(f,!1).break(l)}))))))}e.block$data(f,(function(){const n=t.let("i",o._`${r}.length`),a=t.let("j");e.setParams({i:n,j:a}),t.assign(f,!0),t.if(o._`${n} > 1`,(()=>(p.length>0&&!p.some((e=>"object"===e||"array"===e))?m:h)(n,a)))}),o._`${d} === false`),e.ok(f)}};t.default=i},6641:(e,t,r)=>{"use strict";var n=r(7800),o=r(96552),a=r(82986),s=Math.pow(2,31)-1;function i(e,t){var r,n=1;if(0===e)return t;if(0===t)return e;for(;e%2===0&&t%2===0;)e/=2,t/=2,n*=2;for(;e%2===0;)e/=2;for(;t;){for(;t%2===0;)t/=2;e>t&&(r=t,t=e,e=r),t-=e}return n*e}function c(e,t){var r,n=0;if(0===e)return t;if(0===t)return e;for(;0===(1&e)&&0===(1&t);)e>>>=1,t>>>=1,n++;for(;0===(1&e);)e>>>=1;for(;t;){for(;0===(1&t);)t>>>=1;e>t&&(r=t,t=e,e=r),t-=e}return e<1){if(r=e[0],t=e[1],!a(t))throw new TypeError("gcd()::invalid input argument. Accessor must be a function. Value: `"+t+"`.")}else r=e[0]}if((l=r.length)<2)return null;if(t){for(d=new Array(l),f=0;f{"use strict";var n=r(6641),o=r(7800),a=r(96552),s=r(82986);e.exports=function(){var e,t,r,i,c,l,d,u=arguments.length;for(e=new Array(u),d=0;d1){if(r=e[0],t=e[1],!s(t))throw new TypeError("lcm()::invalid input argument. Accessor must be a function. Value: `"+t+"`.")}else r=e[0]}if((i=r.length)<2)return null;if(t){for(c=new Array(i),d=0;d{"use strict";e.exports=function e(t,r){if(t===r)return!0;if(t&&r&&"object"==typeof t&&"object"==typeof r){if(t.constructor!==r.constructor)return!1;var n,o,a;if(Array.isArray(t)){if((n=t.length)!=r.length)return!1;for(o=n;0!==o--;)if(!e(t[o],r[o]))return!1;return!0}if(t.constructor===RegExp)return t.source===r.source&&t.flags===r.flags;if(t.valueOf!==Object.prototype.valueOf)return t.valueOf()===r.valueOf();if(t.toString!==Object.prototype.toString)return t.toString()===r.toString();if((n=(a=Object.keys(t)).length)!==Object.keys(r).length)return!1;for(o=n;0!==o--;)if(!Object.prototype.hasOwnProperty.call(r,a[o]))return!1;for(o=n;0!==o--;){var s=a[o];if(!e(t[s],r[s]))return!1}return!0}return t!==t&&r!==r}},90370:(e,t,r)=>{var n=r(2404),o=r(33031),a=r(63375),s=r(9063),i=r(84684),c=r(80191),l=r(11331),d=r(53812),u=e=>Array.isArray(e)?e:[e],f=e=>void 0===e,p=e=>l(e)||Array.isArray(e)?Object.keys(e):[],m=(e,t)=>e.hasOwnProperty(t),h=e=>o(a(e)),y=e=>f(e)||Array.isArray(e)&&0===e.length,g=(e,t,r,n)=>t&&m(t,r)&&e&&m(e,r)&&n(e[r],t[r]),v=(e,t)=>f(e)&&0===t||f(t)&&0===e||n(e,t),$=e=>f(e)||n(e,{})||!0===e,b=e=>f(e)||n(e,{}),w=e=>f(e)||l(e)||!0===e||!1===e;function S(e,t){return!(!y(e)||!y(t))||n(h(e),h(t))}function _(e,t,r,o){var s=a(p(e).concat(p(t)));return!(!b(e)||!b(t))||(!b(e)||!p(t).length)&&((!b(t)||!p(e).length)&&s.every((function(r){var a=e[r],s=t[r];return Array.isArray(a)&&Array.isArray(s)?n(h(e),h(t)):!(Array.isArray(a)&&!Array.isArray(s))&&(!(Array.isArray(s)&&!Array.isArray(a))&&g(e,t,r,o))})))}function x(e,t,r,n){var o=s(e,n),a=s(t,n);return c(o,a,n).length===Math.max(o.length,a.length)}var E={title:n,uniqueItems:(e,t)=>f(e)&&!1===t||f(t)&&!1===e||n(e,t),minLength:v,minItems:v,minProperties:v,required:S,enum:S,type:function(e,t){return e=u(e),t=u(t),n(h(e),h(t))},items:function(e,t,r,o){return l(e)&&l(t)?o(e,t):Array.isArray(e)&&Array.isArray(t)?_(e,t,0,o):n(e,t)},anyOf:x,allOf:x,oneOf:x,properties:_,patternProperties:_,dependencies:_},k=["properties","patternProperties","dependencies","uniqueItems","minLength","minItems","minProperties","required"],j=["additionalProperties","additionalItems","contains","propertyNames","not"];e.exports=function e(t,r,o){if(o=i(o,{ignore:[]}),$(t)&&$(r))return!0;if(!w(t)||!w(r))throw new Error("Either of the values are not a JSON schema.");if(t===r)return!0;if(d(t)&&d(r))return t===r;if(void 0===t&&!1===r||void 0===r&&!1===t)return!1;if(f(t)&&!f(r)||!f(t)&&f(r))return!1;var s=a(Object.keys(t).concat(Object.keys(r)));if(o.ignore.length&&(s=s.filter((e=>-1===o.ignore.indexOf(e)))),!s.length)return!0;function c(t,r){return e(t,r,o)}return s.every((function(a){var s=t[a],i=r[a];if(-1!==j.indexOf(a))return e(s,i,o);var l=E[a];if(l||(l=n),n(s,i))return!0;if(-1===k.indexOf(a)&&(!m(t,a)&&m(r,a)||m(t,a)&&!m(r,a)))return s===i;var u=l(s,i,a,c);if(!d(u))throw new Error("Comparer must return true or false");return u}))}},5109:(e,t,r)=>{const n=r(35970),o=r(3176),a=r(11331),s=r(63375),i=r(9063),c=r(91648);const l=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),d=e=>a(e)||Array.isArray(e)?Object.keys(e):[],u=e=>!d(e).length&&!1!==e&&!0!==e;e.exports={allUniqueKeys:e=>s(o(e.map(d))),deleteUndefinedProps:function(e){for(const t in e)l(e,t)&&u(e[t])&&delete e[t];return e},getValues:(e,t)=>e.map((e=>e&&e[t])),has:l,isEmptySchema:u,isSchema:e=>a(e)||!0===e||!1===e,keys:d,notUndefined:e=>void 0!==e,uniqWith:i,withoutArr:(e,...t)=>c.apply(null,[e].concat(n(t)))}},11051:(e,t,r)=>{const n=r(90370),o=r(39754),{allUniqueKeys:a,deleteUndefinedProps:s,has:i,isSchema:c,notUndefined:l,uniqWith:d}=r(5109);function u(e,t,r){return a(r).reduce((function(r,o){const a=function(e,t){return e.map((function(e){if(e){if(!Array.isArray(e.items))return e.items;{const r=e.items[t];if(c(r))return r;if(i(e,"additionalItems"))return e.additionalItems}}}))}(e,o),s=d(a.filter(l),n);return r[o]=t(s,o),r}),[])}e.exports={keywords:["items","additionalItems"],resolver(e,t,r){const n=e.map((e=>e.items)),a=n.filter(l),i={};let d;var f;return a.every(c)?i.items=r.items(n):i.items=u(e,r.items,n),a.every(Array.isArray)?d=e.map((e=>e.additionalItems)):a.some(Array.isArray)&&(d=e.map((function(e){if(e)return Array.isArray(e.items)?e.additionalItems:e.items}))),d&&(i.additionalItems=r.additionalItems(d)),!1===i.additionalItems&&Array.isArray(i.items)&&(f=i.items,o(f,(function(e,t){!1===e&&f.splice(t,1)}))),s(i)}}},7894:(e,t,r)=>{const n=r(90370),o=r(39754),{allUniqueKeys:a,deleteUndefinedProps:s,getValues:i,keys:c,notUndefined:l,uniqWith:d,withoutArr:u}=r(5109);function f(e,t){return a(e).reduce((function(r,o){const a=i(e,o),s=d(a.filter(l),n);return r[o]=t(s,o),r}),{})}e.exports={keywords:["properties","patternProperties","additionalProperties"],resolver(e,t,r,n){n.ignoreAdditionalProperties||(e.forEach((function(t){const n=e.filter((e=>e!==t)),o=c(t.properties),a=c(t.patternProperties).map((e=>new RegExp(e)));n.forEach((function(e){const n=c(e.properties),s=n.filter((e=>a.some((t=>t.test(e)))));u(n,o,s).forEach((function(n){e.properties[n]=r.properties([e.properties[n],t.additionalProperties],n)}))}))})),e.forEach((function(t){const r=e.filter((e=>e!==t)),n=c(t.patternProperties);!1===t.additionalProperties&&r.forEach((function(e){const t=c(e.patternProperties);u(t,n).forEach((t=>delete e.patternProperties[t]))}))})));const a={additionalProperties:r.additionalProperties(e.map((e=>e.additionalProperties))),patternProperties:f(e.map((e=>e.patternProperties)),r.patternProperties),properties:f(e.map((e=>e.properties)),r.properties)};var i;return!1===a.additionalProperties&&o(i=a.properties,(function(e,t){!1===e&&delete i[t]})),s(a)}}},33978:(e,t,r)=>{const n=r(88055),o=r(90370),a=r(78867),s=r(74354),i=r(35970),c=r(3176),l=r(5287),d=r(80191),u=r(2404),f=r(11331),p=r(12358),m=r(33031),h=r(63375),y=r(9063),g=r(7894),v=r(11051),$=(e,t)=>-1!==e.indexOf(t),b=e=>f(e)||!0===e||!1===e,w=e=>!1===e,S=e=>!0===e,_=(e,t,r)=>r(e),x=e=>m(h(c(e))),E=e=>void 0!==e,k=e=>h(c(e.map(A))),j=e=>e[0],C=e=>Math.max.apply(Math,e),P=e=>Math.min.apply(Math,e);function O(e){let{allOf:t=[],...r}=e;return r=f(e)?r:e,[r,...t.map(O)]}function N(e,t){return e.map((e=>e&&e[t]))}function A(e){return f(e)||Array.isArray(e)?Object.keys(e):[]}function D(e,t){if(t=t||[],!e.length)return t;const r=e.slice(0).shift(),n=e.slice(1);return t.length?D(n,i(t.map((e=>r.map((t=>[t].concat(e))))))):D(n,r.map((e=>e)))}function I(e,t){let r;try{r=e.map((function(e){return JSON.stringify(e,null,2)})).join("\n")}catch(n){r=e.join(", ")}throw new Error('Could not resolve values for path:"'+t.join(".")+'". They are probably incompatible. Values: \n'+r)}function F(e,t,r,n,a,s){if(e.length){const i=a.complexResolvers[t];if(!i||!i.resolver)throw new Error("No resolver found for "+t);const c=r.map((t=>e.reduce(((e,r)=>(void 0!==t[r]&&(e[r]=t[r]),e)),{}))),l=y(c,o),d=i.keywords.reduce(((e,t)=>({...e,[t]:(e,r=[])=>n(e,null,s.concat(t,r))})),{}),u=i.resolver(l,s.concat(t),d,a);return f(u)||I(l,s.concat(t)),u}}function T(e){return{required:e}}const R=["properties","patternProperties","definitions","dependencies"],M=["anyOf","oneOf"],U=["additionalProperties","additionalItems","contains","propertyNames","not","items"],V={type(e){if(e.some(Array.isArray)){const t=e.map((function(e){return Array.isArray(e)?e:[e]})),r=l.apply(null,t);if(1===r.length)return r[0];if(r.length>1)return h(r)}},dependencies:(e,t,r)=>k(e).reduce((function(t,n){const a=N(e,n);let s=y(a.filter(E),u);const i=s.filter(Array.isArray);if(i.length){if(i.length===s.length)t[n]=x(s);else{const e=s.filter(b),o=i.map(T);t[n]=r(e.concat(o),n)}return t}return s=y(s,o),t[n]=r(s,n),t}),{}),oneOf(e,t,r){const a=function(e,t){return e.map((function(e,r){try{return t(e,r)}catch(n){return}})).filter(E)}(D(n(e)),r),s=y(a,o);if(s.length)return s},not:e=>({anyOf:e}),pattern:e=>e.map((e=>"(?="+e+")")).join(""),multipleOf(e){let t=e.slice(0),r=1;for(;t.some((e=>!Number.isInteger(e)));)t=t.map((e=>10*e)),r*=10;return a(t)/r},enum(e){const t=d.apply(null,e.concat(u));if(t.length)return m(t)}};V.$id=j,V.$ref=j,V.$schema=j,V.additionalItems=_,V.additionalProperties=_,V.anyOf=V.oneOf,V.contains=_,V.default=j,V.definitions=V.dependencies,V.description=j,V.examples=e=>y(i(e),u),V.exclusiveMaximum=P,V.exclusiveMinimum=C,V.items=v,V.maximum=P,V.maxItems=P,V.maxLength=P,V.maxProperties=P,V.minimum=C,V.minItems=C,V.minLength=C,V.minProperties=C,V.properties=g,V.propertyNames=_,V.required=e=>x(e),V.title=j,V.uniqueItems=e=>e.some(S);const B={properties:g,items:v};function q(e,t,r){r=r||[],t=s(t,{ignoreAdditionalProperties:!1,resolvers:V,complexResolvers:B,deep:!0});const a=Object.entries(t.complexResolvers);const i=function e(s,i,c){s=n(s.filter(E)),c=c||[];const l=f(i)?i:{};if(!s.length)return;if(s.some(w))return!1;if(s.every(S))return!0;s=s.filter(f);const d=k(s);if(t.deep&&$(d,"allOf"))return q({allOf:s},t,r);const u=a.map((([e,t])=>d.filter((e=>t.keywords.includes(e)))));return u.forEach((e=>p(d,e))),d.forEach((function(r){const n=N(s,r),a=y(n.filter(E),function(e){return function(t,r){return o({[e]:t},{[e]:r})}}(r));if(1===a.length&&$(M,r))l[r]=a[0].map((t=>e([t],t)));else if(1!==a.length||$(R,r)||$(U,r)){const n=t.resolvers[r]||t.resolvers.defaultResolver;if(!n)throw new Error("No resolver found for key "+r+". You can provide a resolver for this keyword in the options, or provide a default resolver.");const o=(t,n=[])=>e(t,null,c.concat(r,n));l[r]=n(a,c.concat(r),o,t),void 0===l[r]?I(a,c.concat(r)):void 0===l[r]&&delete l[r]}else l[r]=a[0]})),a.reduce(((r,[n,o],a)=>({...r,...F(u[a],n,s,e,t,c)})),l)}(c(O(e)));return i}q.options={resolvers:V},e.exports=q},7106:e=>{"use strict";var t=e.exports=function(e,t,n){"function"==typeof t&&(n=t,t={}),r(t,"function"==typeof(n=t.cb||n)?n:n.pre||function(){},n.post||function(){},e,"",e)};function r(e,n,o,a,s,i,c,l,d,u){if(a&&"object"==typeof a&&!Array.isArray(a)){for(var f in n(a,s,i,c,l,d,u),a){var p=a[f];if(Array.isArray(p)){if(f in t.arrayKeywords)for(var m=0;m{var r=/~/,n=/~[01]/g;function o(e){switch(e){case"~1":return"/";case"~0":return"~"}throw new Error("Invalid tilde escape: "+e)}function a(e){return r.test(e)?e.replace(n,o):e}function s(e){if("string"===typeof e){if(""===(e=e.split("/"))[0])return e;throw new Error("Invalid JSON pointer.")}if(Array.isArray(e)){for(const t of e)if("string"!==typeof t&&"number"!==typeof t)throw new Error("Invalid JSON pointer. Must be of type string or number.");return e}throw new Error("Invalid JSON pointer.")}function i(e,t){if("object"!==typeof e)throw new Error("Invalid input object.");var r=(t=s(t)).length;if(1===r)return e;for(var n=1;ns,"undefined"===typeof e[n]&&(Array.isArray(e)&&"-"===n&&(n=e.length),o&&(""!==t[s]&&t[s]<1/0||"-"===t[s]?e[n]=[]:e[n]={})),!o)break;e=e[n]}var c=e[n];return void 0===r?delete e[n]:e[n]=r,c}(e,t,r)}t.get=i,t.set=c,t.compile=function(e){var t=s(e);return{get:function(e){return i(e,t)},set:function(e,r){return c(e,t,r)}}}},40882:e=>{e.exports=function(e,t,r,n){var o=-1,a=null==e?0:e.length;for(n&&a&&(r=e[++o]);++o{var t=Object.prototype.hasOwnProperty;e.exports=function(e,r){return null!=e&&t.call(e,r)}},12027:e=>{e.exports=function(e,t,r,n){for(var o=r-1,a=e.length;++o{var n=r(38859),o=r(15325),a=r(29905),s=r(34932),i=r(27301),c=r(19219),l=Math.min;e.exports=function(e,t,r){for(var d=r?a:o,u=e[0].length,f=e.length,p=f,m=Array(f),h=1/0,y=[];p--;){var g=e[p];p&&t&&(g=s(g,i(t))),h=l(g.length,h),m[p]=!r&&(t||u>=120&&g.length>=120)?new n(p&&g):void 0}g=e[0];var v=-1,$=m[0];e:for(;++v{var n=r(34932),o=r(47422),a=r(15389),s=r(5128),i=r(73937),c=r(27301),l=r(43714),d=r(83488),u=r(56449);e.exports=function(e,t,r){t=t.length?n(t,(function(e){return u(e)?function(t){return o(t,1===e.length?e[0]:e)}:e})):[d];var f=-1;t=n(t,c(a));var p=s(e,(function(e,r,o){return{criteria:n(t,(function(t){return t(e)})),index:++f,value:e}}));return i(p,(function(e,t){return l(e,t,r)}))}},76001:(e,t,r)=>{var n=r(97420),o=r(80631);e.exports=function(e,t){return n(e,t,(function(t,r){return o(e,r)}))}},97420:(e,t,r)=>{var n=r(47422),o=r(73170),a=r(31769);e.exports=function(e,t,r){for(var s=-1,i=t.length,c={};++s{var n=r(34932),o=r(96131),a=r(12027),s=r(27301),i=r(23007),c=Array.prototype.splice;e.exports=function(e,t,r,l){var d=l?a:o,u=-1,f=t.length,p=e;for(e===t&&(t=i(t)),r&&(p=n(e,s(r)));++u-1;)p!==e&&c.call(p,m,1),c.call(e,m,1);return e}},85558:e=>{e.exports=function(e,t,r,n,o){return o(e,(function(e,o,a){r=n?(n=!1,e):t(r,e,o,a)})),r}},73170:(e,t,r)=>{var n=r(16547),o=r(31769),a=r(30361),s=r(23805),i=r(77797);e.exports=function(e,t,r,c){if(!s(e))return e;for(var l=-1,d=(t=o(t,e)).length,u=d-1,f=e;null!=f&&++l{e.exports=function(e,t){var r=e.length;for(e.sort(t);r--;)e[r]=e[r].value;return e}},3122:(e,t,r)=>{var n=r(83693);e.exports=function(e){return n(e)?e:[]}},53730:(e,t,r)=>{var n=r(44394);e.exports=function(e,t){if(e!==t){var r=void 0!==e,o=null===e,a=e===e,s=n(e),i=void 0!==t,c=null===t,l=t===t,d=n(t);if(!c&&!d&&!s&&e>t||s&&i&&l&&!c&&!d||o&&i&&l||!r&&l||!a)return 1;if(!o&&!s&&!d&&e{var n=r(53730);e.exports=function(e,t,r){for(var o=-1,a=e.criteria,s=t.criteria,i=a.length,c=r.length;++o=c?l:l*("desc"==r[o]?-1:1)}return e.index-t.index}},52606:(e,t,r)=>{var n=r(85250),o=r(23805);e.exports=function e(t,r,a,s,i,c){return o(t)&&o(r)&&(c.set(r,t),n(t,r,void 0,e,c),c.delete(r)),t}},84684:(e,t,r)=>{var n=r(69302),o=r(75288),a=r(36800),s=r(37241),i=Object.prototype,c=i.hasOwnProperty,l=n((function(e,t){e=Object(e);var r=-1,n=t.length,l=n>2?t[2]:void 0;for(l&&a(t[0],t[1],l)&&(n=1);++r{var n=r(91033),o=r(69302),a=r(52606),s=r(6924),i=o((function(e){return e.push(void 0,a),n(s,void 0,e)}));e.exports=i},3176:(e,t,r)=>{var n=r(83120),o=1/0;e.exports=function(e){return(null==e?0:e.length)?n(e,o):[]}},39754:(e,t,r)=>{var n=r(83729),o=r(80909),a=r(24066),s=r(56449);e.exports=function(e,t){return(s(e)?n:o)(e,a(t))}},61448:(e,t,r)=>{var n=r(20426),o=r(49326);e.exports=function(e,t){return null!=e&&o(e,t,n)}},5287:(e,t,r)=>{var n=r(34932),o=r(27185),a=r(69302),s=r(3122),i=a((function(e){var t=n(e,s);return t.length&&t[0]===e[0]?o(t):[]}));e.exports=i},80191:(e,t,r)=>{var n=r(34932),o=r(27185),a=r(69302),s=r(3122),i=r(68090),c=a((function(e){var t=i(e),r=n(e,s);return(t="function"==typeof t?t:void 0)&&r.pop(),r.length&&r[0]===e[0]?o(r,void 0,t):[]}));e.exports=c},53812:(e,t,r)=>{var n=r(72552),o=r(40346);e.exports=function(e){return!0===e||!1===e||o(e)&&"[object Boolean]"==n(e)}},29132:(e,t,r)=>{var n=r(60270);e.exports=function(e,t,r){var o=(r="function"==typeof r?r:void 0)?r(e,t):void 0;return void 0===o?n(e,t,void 0,r):!!o}},69843:e=>{e.exports=function(e){return null==e}},98023:(e,t,r)=>{var n=r(72552),o=r(40346);e.exports=function(e){return"number"==typeof e||o(e)&&"[object Number]"==n(e)}},6924:(e,t,r)=>{var n=r(85250),o=r(20999)((function(e,t,r,o){n(e,t,r,o)}));e.exports=o},44383:(e,t,r)=>{var n=r(76001),o=r(38816)((function(e,t){return null==e?{}:n(e,t)}));e.exports=o},12358:(e,t,r)=>{var n=r(21988);e.exports=function(e,t){return e&&e.length&&t&&t.length?n(e,t):e}},40860:(e,t,r)=>{var n=r(40882),o=r(80909),a=r(15389),s=r(85558),i=r(56449);e.exports=function(e,t,r){var c=i(e)?n:s,l=arguments.length<3;return c(e,a(t,4),r,l,o)}},63560:(e,t,r)=>{var n=r(73170);e.exports=function(e,t,r){return null==e?e:n(e,t,r)}},33031:(e,t,r)=>{var n=r(83120),o=r(46155),a=r(69302),s=r(36800),i=a((function(e,t){if(null==e)return[];var r=t.length;return r>1&&s(e,t[0],t[1])?t=[]:r>2&&s(t[0],t[1],t[2])&&(t=[t[0]]),o(e,n(t,1),[])}));e.exports=i},6638:(e,t,r)=>{var n=r(78096),o=r(24066),a=r(61489),s=4294967295,i=Math.min;e.exports=function(e,t){if((e=a(e))<1||e>9007199254740991)return[];var r=s,c=i(e,s);t=o(t),e-=s;for(var l=n(c,t);++r{var n=r(99374),o=1/0;e.exports=function(e){return e?(e=n(e))===o||e===-1/0?17976931348623157e292*(e<0?-1:1):e===e?e:0:0===e?e:0}},61489:(e,t,r)=>{var n=r(17400);e.exports=function(e){var t=n(e),r=t%1;return t===t?r?t-r:t:0}},42072:(e,t,r)=>{var n=r(34932),o=r(23007),a=r(56449),s=r(44394),i=r(61802),c=r(77797),l=r(13222);e.exports=function(e){return a(e)?n(e,c):s(e)?[e]:o(i(l(e)))}},69752:(e,t,r)=>{var n=r(83729),o=r(39344),a=r(30641),s=r(15389),i=r(28879),c=r(56449),l=r(3656),d=r(1882),u=r(23805),f=r(37167);e.exports=function(e,t,r){var p=c(e),m=p||l(e)||f(e);if(t=s(t,4),null==r){var h=e&&e.constructor;r=m?p?new h:[]:u(e)&&d(h)?o(i(e)):{}}return(m?n:a)(e,(function(e,n,o){return t(r,e,n,o)})),r}},80299:(e,t,r)=>{var n=r(83120),o=r(69302),a=r(55765),s=r(83693),i=o((function(e){return a(n(e,1,s,!0))}));e.exports=i},63375:(e,t,r)=>{var n=r(55765);e.exports=function(e){return e&&e.length?n(e):[]}},9063:(e,t,r)=>{var n=r(55765);e.exports=function(e,t){return t="function"==typeof t?t:void 0,e&&e.length?n(e,void 0,t):[]}},73357:(e,t,r)=>{var n=r(19931);e.exports=function(e,t){return null==e||n(e,t)}},91648:(e,t,r)=>{var n=r(83915),o=r(69302),a=r(83693),s=o((function(e,t){return a(e)?n(e,t):[]}));e.exports=s},21020:(e,t,r)=>{"use strict";var n=r(96540),o=Symbol.for("react.element"),a=Symbol.for("react.fragment"),s=Object.prototype.hasOwnProperty,i=n.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED.ReactCurrentOwner,c={key:!0,ref:!0,__self:!0,__source:!0};function l(e,t,r){var n,a={},l=null,d=null;for(n in void 0!==r&&(l=""+r),void 0!==t.key&&(l=""+t.key),void 0!==t.ref&&(d=t.ref),t)s.call(t,n)&&!c.hasOwnProperty(n)&&(a[n]=t[n]);if(e&&e.defaultProps)for(n in t=e.defaultProps)void 0===a[n]&&(a[n]=t[n]);return{$$typeof:o,type:e,key:l,ref:d,props:a,_owner:i.current}}t.Fragment=a,t.jsx=l,t.jsxs=l},74848:(e,t,r)=>{"use strict";e.exports=r(21020)},46579:function(e,t){!function(e){"use strict";function t(){for(var e=arguments.length,t=Array(e),r=0;r1){t[0]=t[0].slice(0,-1);for(var n=t.length-1,o=1;o= 0x80 (not a basic code point)","invalid-input":"Invalid input"},E=p-m,k=Math.floor,j=String.fromCharCode;function C(e){throw new RangeError(x[e])}function P(e,t){for(var r=[],n=e.length;n--;)r[n]=t(e[n]);return r}function O(e,t){var r=e.split("@"),n="";return r.length>1&&(n=r[0]+"@",e=r[1]),n+P((e=e.replace(_,".")).split("."),t).join(".")}function N(e){for(var t=[],r=0,n=e.length;r=55296&&o<=56319&&r>1,e+=k(e/t);e>E*h>>1;n+=p)e=k(e/E);return k(n+(E+1)*e/(e+y))},F=function(e){var t=[],r=e.length,n=0,o=$,a=v,s=e.lastIndexOf(b);s<0&&(s=0);for(var i=0;i=128&&C("not-basic"),t.push(e.charCodeAt(i));for(var c=s>0?s+1:0;c=r&&C("invalid-input");var y=A(e.charCodeAt(c++));(y>=p||y>k((f-n)/d))&&C("overflow"),n+=y*d;var g=u<=a?m:u>=a+h?h:u-a;if(yk(f/w)&&C("overflow"),d*=w}var S=t.length+1;a=I(n-l,S,0==l),k(n/S)>f-o&&C("overflow"),o+=k(n/S),n%=S,t.splice(n++,0,o)}return String.fromCodePoint.apply(String,t)},T=function(e){var t=[],r=(e=N(e)).length,n=$,o=0,a=v,s=!0,i=!1,c=void 0;try{for(var l,d=e[Symbol.iterator]();!(s=(l=d.next()).done);s=!0){var u=l.value;u<128&&t.push(j(u))}}catch(W){i=!0,c=W}finally{try{!s&&d.return&&d.return()}finally{if(i)throw c}}var y=t.length,g=y;for(y&&t.push(b);g=n&&Ok((f-o)/A)&&C("overflow"),o+=(w-n)*A,n=w;var F=!0,T=!1,R=void 0;try{for(var M,U=e[Symbol.iterator]();!(F=(M=U.next()).done);F=!0){var V=M.value;if(Vf&&C("overflow"),V==n){for(var B=o,q=p;;q+=p){var z=q<=a?m:q>=a+h?h:q-a;if(B>6|192).toString(16).toUpperCase()+"%"+(63&t|128).toString(16).toUpperCase():"%"+(t>>12|224).toString(16).toUpperCase()+"%"+(t>>6&63|128).toString(16).toUpperCase()+"%"+(63&t|128).toString(16).toUpperCase()}function q(e){for(var t="",r=0,n=e.length;r=194&&o<224){if(n-r>=6){var a=parseInt(e.substr(r+4,2),16);t+=String.fromCharCode((31&o)<<6|63&a)}else t+=e.substr(r,6);r+=6}else if(o>=224){if(n-r>=9){var s=parseInt(e.substr(r+4,2),16),i=parseInt(e.substr(r+7,2),16);t+=String.fromCharCode((15&o)<<12|(63&s)<<6|63&i)}else t+=e.substr(r,9);r+=9}else t+=e.substr(r,3),r+=3}return t}function z(e,t){function r(e){var r=q(e);return r.match(t.UNRESERVED)?r:e}return e.scheme&&(e.scheme=String(e.scheme).replace(t.PCT_ENCODED,r).toLowerCase().replace(t.NOT_SCHEME,"")),void 0!==e.userinfo&&(e.userinfo=String(e.userinfo).replace(t.PCT_ENCODED,r).replace(t.NOT_USERINFO,B).replace(t.PCT_ENCODED,o)),void 0!==e.host&&(e.host=String(e.host).replace(t.PCT_ENCODED,r).toLowerCase().replace(t.NOT_HOST,B).replace(t.PCT_ENCODED,o)),void 0!==e.path&&(e.path=String(e.path).replace(t.PCT_ENCODED,r).replace(e.scheme?t.NOT_PATH:t.NOT_PATH_NOSCHEME,B).replace(t.PCT_ENCODED,o)),void 0!==e.query&&(e.query=String(e.query).replace(t.PCT_ENCODED,r).replace(t.NOT_QUERY,B).replace(t.PCT_ENCODED,o)),void 0!==e.fragment&&(e.fragment=String(e.fragment).replace(t.PCT_ENCODED,r).replace(t.NOT_FRAGMENT,B).replace(t.PCT_ENCODED,o)),e}function L(e){return e.replace(/^0*(.*)/,"$1")||"0"}function K(e,t){var r=e.match(t.IPV4ADDRESS)||[],n=d(r,2)[1];return n?n.split(".").map(L).join("."):e}function W(e,t){var r=e.match(t.IPV6ADDRESS)||[],n=d(r,3),o=n[1],a=n[2];if(o){for(var s=o.toLowerCase().split("::").reverse(),i=d(s,2),c=i[0],l=i[1],u=l?l.split(":").map(L):[],f=c.split(":").map(L),p=t.IPV4ADDRESS.test(f[f.length-1]),m=p?7:8,h=f.length-m,y=Array(m),g=0;g1){var b=y.slice(0,v.index),w=y.slice(v.index+v.length);$=b.join(":")+"::"+w.join(":")}else $=y.join(":");return a&&($+="%"+a),$}return e}var H=/^(?:([^:\/?#]+):)?(?:\/\/((?:([^\/?#@]*)@)?(\[[^\/?#\]]+\]|[^\/?#:]*)(?:\:(\d*))?))?([^?#]*)(?:\?([^#]*))?(?:#((?:.|\n|\r)*))?/i,G=void 0==="".match(/(){0}/)[1];function Z(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r={},n=!1!==t.iri?l:c;"suffix"===t.reference&&(e=(t.scheme?t.scheme+":":"")+"//"+e);var o=e.match(H);if(o){G?(r.scheme=o[1],r.userinfo=o[3],r.host=o[4],r.port=parseInt(o[5],10),r.path=o[6]||"",r.query=o[7],r.fragment=o[8],isNaN(r.port)&&(r.port=o[5])):(r.scheme=o[1]||void 0,r.userinfo=-1!==e.indexOf("@")?o[3]:void 0,r.host=-1!==e.indexOf("//")?o[4]:void 0,r.port=parseInt(o[5],10),r.path=o[6]||"",r.query=-1!==e.indexOf("?")?o[7]:void 0,r.fragment=-1!==e.indexOf("#")?o[8]:void 0,isNaN(r.port)&&(r.port=e.match(/\/\/(?:.|\n)*\:(?:\/|\?|\#|$)/)?o[4]:void 0)),r.host&&(r.host=W(K(r.host,n),n)),void 0!==r.scheme||void 0!==r.userinfo||void 0!==r.host||void 0!==r.port||r.path||void 0!==r.query?void 0===r.scheme?r.reference="relative":void 0===r.fragment?r.reference="absolute":r.reference="uri":r.reference="same-document",t.reference&&"suffix"!==t.reference&&t.reference!==r.reference&&(r.error=r.error||"URI is not a "+t.reference+" reference.");var a=V[(t.scheme||r.scheme||"").toLowerCase()];if(t.unicodeSupport||a&&a.unicodeSupport)z(r,n);else{if(r.host&&(t.domainHost||a&&a.domainHost))try{r.host=U.toASCII(r.host.replace(n.PCT_ENCODED,q).toLowerCase())}catch(s){r.error=r.error||"Host's domain name can not be converted to ASCII via punycode: "+s}z(r,c)}a&&a.parse&&a.parse(r,t)}else r.error=r.error||"URI can not be parsed.";return r}function J(e,t){var r=!1!==t.iri?l:c,n=[];return void 0!==e.userinfo&&(n.push(e.userinfo),n.push("@")),void 0!==e.host&&n.push(W(K(String(e.host),r),r).replace(r.IPV6ADDRESS,(function(e,t,r){return"["+t+(r?"%25"+r:"")+"]"}))),"number"!==typeof e.port&&"string"!==typeof e.port||(n.push(":"),n.push(String(e.port))),n.length?n.join(""):void 0}var Y=/^\.\.?\//,Q=/^\/\.(\/|$)/,X=/^\/\.\.(\/|$)/,ee=/^\/?(?:.|\n)*?(?=\/|$)/;function te(e){for(var t=[];e.length;)if(e.match(Y))e=e.replace(Y,"");else if(e.match(Q))e=e.replace(Q,"/");else if(e.match(X))e=e.replace(X,"/"),t.pop();else if("."===e||".."===e)e="";else{var r=e.match(ee);if(!r)throw new Error("Unexpected dot segment condition");var n=r[0];e=e.slice(n.length),t.push(n)}return t.join("")}function re(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.iri?l:c,n=[],o=V[(t.scheme||e.scheme||"").toLowerCase()];if(o&&o.serialize&&o.serialize(e,t),e.host)if(r.IPV6ADDRESS.test(e.host));else if(t.domainHost||o&&o.domainHost)try{e.host=t.iri?U.toUnicode(e.host):U.toASCII(e.host.replace(r.PCT_ENCODED,q).toLowerCase())}catch(i){e.error=e.error||"Host's domain name can not be converted to "+(t.iri?"Unicode":"ASCII")+" via punycode: "+i}z(e,r),"suffix"!==t.reference&&e.scheme&&(n.push(e.scheme),n.push(":"));var a=J(e,t);if(void 0!==a&&("suffix"!==t.reference&&n.push("//"),n.push(a),e.path&&"/"!==e.path.charAt(0)&&n.push("/")),void 0!==e.path){var s=e.path;t.absolutePath||o&&o.absolutePath||(s=te(s)),void 0===a&&(s=s.replace(/^\/\//,"/%2F")),n.push(s)}return void 0!==e.query&&(n.push("?"),n.push(e.query)),void 0!==e.fragment&&(n.push("#"),n.push(e.fragment)),n.join("")}function ne(e,t){var r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},n={};return arguments[3]||(e=Z(re(e,r),r),t=Z(re(t,r),r)),!(r=r||{}).tolerant&&t.scheme?(n.scheme=t.scheme,n.userinfo=t.userinfo,n.host=t.host,n.port=t.port,n.path=te(t.path||""),n.query=t.query):(void 0!==t.userinfo||void 0!==t.host||void 0!==t.port?(n.userinfo=t.userinfo,n.host=t.host,n.port=t.port,n.path=te(t.path||""),n.query=t.query):(t.path?("/"===t.path.charAt(0)?n.path=te(t.path):(void 0===e.userinfo&&void 0===e.host&&void 0===e.port||e.path?e.path?n.path=e.path.slice(0,e.path.lastIndexOf("/")+1)+t.path:n.path=t.path:n.path="/"+t.path,n.path=te(n.path)),n.query=t.query):(n.path=e.path,void 0!==t.query?n.query=t.query:n.query=e.query),n.userinfo=e.userinfo,n.host=e.host,n.port=e.port),n.scheme=e.scheme),n.fragment=t.fragment,n}function oe(e,t,r){var n=s({scheme:"null"},r);return re(ne(Z(e,n),Z(t,n),n,!0),n)}function ae(e,t){return"string"===typeof e?e=re(Z(e,t),t):"object"===n(e)&&(e=Z(re(e,t),t)),e}function se(e,t,r){return"string"===typeof e?e=re(Z(e,r),r):"object"===n(e)&&(e=re(e,r)),"string"===typeof t?t=re(Z(t,r),r):"object"===n(t)&&(t=re(t,r)),e===t}function ie(e,t){return e&&e.toString().replace(t&&t.iri?l.ESCAPE:c.ESCAPE,B)}function ce(e,t){return e&&e.toString().replace(t&&t.iri?l.PCT_ENCODED:c.PCT_ENCODED,q)}var le={scheme:"http",domainHost:!0,parse:function(e,t){return e.host||(e.error=e.error||"HTTP URIs must have a host."),e},serialize:function(e,t){var r="https"===String(e.scheme).toLowerCase();return e.port!==(r?443:80)&&""!==e.port||(e.port=void 0),e.path||(e.path="/"),e}},de={scheme:"https",domainHost:le.domainHost,parse:le.parse,serialize:le.serialize};function ue(e){return"boolean"===typeof e.secure?e.secure:"wss"===String(e.scheme).toLowerCase()}var fe={scheme:"ws",domainHost:!0,parse:function(e,t){var r=e;return r.secure=ue(r),r.resourceName=(r.path||"/")+(r.query?"?"+r.query:""),r.path=void 0,r.query=void 0,r},serialize:function(e,t){if(e.port!==(ue(e)?443:80)&&""!==e.port||(e.port=void 0),"boolean"===typeof e.secure&&(e.scheme=e.secure?"wss":"ws",e.secure=void 0),e.resourceName){var r=e.resourceName.split("?"),n=d(r,2),o=n[0],a=n[1];e.path=o&&"/"!==o?o:void 0,e.query=a,e.resourceName=void 0}return e.fragment=void 0,e}},pe={scheme:"wss",domainHost:fe.domainHost,parse:fe.parse,serialize:fe.serialize},me={},he="[A-Za-z0-9\\-\\.\\_\\~\\xA0-\\u200D\\u2010-\\u2029\\u202F-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF]",ye="[0-9A-Fa-f]",ge=r(r("%[EFef]"+ye+"%"+ye+ye+"%"+ye+ye)+"|"+r("%[89A-Fa-f]"+ye+"%"+ye+ye)+"|"+r("%"+ye+ye)),ve="[A-Za-z0-9\\!\\$\\%\\'\\*\\+\\-\\^\\_\\`\\{\\|\\}\\~]",$e=t("[\\!\\$\\%\\'\\(\\)\\*\\+\\,\\-\\.0-9\\<\\>A-Z\\x5E-\\x7E]",'[\\"\\\\]'),be="[\\!\\$\\'\\(\\)\\*\\+\\,\\;\\:\\@]",we=new RegExp(he,"g"),Se=new RegExp(ge,"g"),_e=new RegExp(t("[^]",ve,"[\\.]",'[\\"]',$e),"g"),xe=new RegExp(t("[^]",he,be),"g"),Ee=xe;function ke(e){var t=q(e);return t.match(we)?t:e}var je={scheme:"mailto",parse:function(e,t){var r=e,n=r.to=r.path?r.path.split(","):[];if(r.path=void 0,r.query){for(var o=!1,a={},s=r.query.split("&"),i=0,c=s.length;i{"use strict";e.exports=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)}},82986:e=>{"use strict";e.exports=function(e){return"function"===typeof e}},96552:(e,t,r)=>{"use strict";var n=r(7800),o=r(84356);e.exports=function(e){var t;if(!n(e))return!1;if(!(t=e.length))return!1;for(var r=0;r{"use strict";var n=r(66415);e.exports=function(e){return n(e)&&e%1===0}},66415:e=>{"use strict";e.exports=function(e){return("number"===typeof e||"[object Number]"===Object.prototype.toString.call(e))&&e.valueOf()===e.valueOf()}},63837:e=>{"use strict";e.exports=JSON.parse('{"$id":"https://raw.githubusercontent.com/ajv-validator/ajv/master/lib/refs/data.json#","description":"Meta-schema for $data reference (JSON AnySchema extension proposal)","type":"object","required":["$data"],"properties":{"$data":{"type":"string","anyOf":[{"format":"relative-json-pointer"},{"format":"json-pointer"}]}},"additionalProperties":false}')},72079:e=>{"use strict";e.exports=JSON.parse('{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}')}}]); \ No newline at end of file diff --git a/src/web/gui/v2/749.e44087ac3a2e3a994318.chunk.js.LICENSE.txt b/src/web/gui/v2/749.e44087ac3a2e3a994318.chunk.js.LICENSE.txt deleted file mode 100644 index d639ff9e1..000000000 --- a/src/web/gui/v2/749.e44087ac3a2e3a994318.chunk.js.LICENSE.txt +++ /dev/null @@ -1,11 +0,0 @@ -/** - * @license React - * react-jsx-runtime.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -/** @license URI.js v4.4.1 (c) 2011 Gary Court. License: http://github.com/garycourt/uri-js */ diff --git a/src/web/gui/v2/7519.7982a2e0fcdf82ba78dd.chunk.js b/src/web/gui/v2/7519.7982a2e0fcdf82ba78dd.chunk.js deleted file mode 100644 index 58957538a..000000000 --- a/src/web/gui/v2/7519.7982a2e0fcdf82ba78dd.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="0d9f1c5f-f842-4403-af92-e8b799d150e0",e._sentryDebugIdIdentifier="sentry-dbid-0d9f1c5f-f842-4403-af92-e8b799d150e0")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7519],{68415:(e,t)=>{var r="user-astronaut",n=[],a="f4fb",i="M370.7 96.1C346.1 39.5 289.7 0 224 0S101.9 39.5 77.3 96.1C60.9 97.5 48 111.2 48 128v64c0 16.8 12.9 30.5 29.3 31.9C101.9 280.5 158.3 320 224 320s122.1-39.5 146.7-96.1c16.4-1.4 29.3-15.1 29.3-31.9V128c0-16.8-12.9-30.5-29.3-31.9zM336 144v16c0 53-43 96-96 96H208c-53 0-96-43-96-96V144c0-26.5 21.5-48 48-48H288c26.5 0 48 21.5 48 48zM189.3 162.7l-6-21.2c-.9-3.3-3.9-5.5-7.3-5.5s-6.4 2.2-7.3 5.5l-6 21.2-21.2 6c-3.3 .9-5.5 3.9-5.5 7.3s2.2 6.4 5.5 7.3l21.2 6 6 21.2c.9 3.3 3.9 5.5 7.3 5.5s6.4-2.2 7.3-5.5l6-21.2 21.2-6c3.3-.9 5.5-3.9 5.5-7.3s-2.2-6.4-5.5-7.3l-21.2-6zM112.7 316.5C46.7 342.6 0 407 0 482.3C0 498.7 13.3 512 29.7 512H128V448c0-17.7 14.3-32 32-32H288c17.7 0 32 14.3 32 32v64l98.3 0c16.4 0 29.7-13.3 29.7-29.7c0-75.3-46.7-139.7-112.7-165.8C303.9 338.8 265.5 352 224 352s-79.9-13.2-111.3-35.5zM176 448c-8.8 0-16 7.2-16 16v48h32V464c0-8.8-7.2-16-16-16zm96 32a16 16 0 1 0 0-32 16 16 0 1 0 0 32z";t.mw={prefix:"fas",iconName:r,icon:[448,512,n,a,i]},t.yV=t.mw},20982:(e,t,r)=>{function n(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function a(e){for(var t=1;te.length)&&(t=e.length);for(var r=0,n=new Array(t);rFr});var p=function(){},d={},h={},m=null,g={mark:p,measure:p};try{"undefined"!==typeof window&&(d=window),"undefined"!==typeof document&&(h=document),"undefined"!==typeof MutationObserver&&(m=MutationObserver),"undefined"!==typeof performance&&(g=performance)}catch(Lr){}var b,y,v,k,_,w=(d.navigator||{}).userAgent,x=void 0===w?"":w,A=d,C=h,E=m,S=g,D=(A.document,!!C.documentElement&&!!C.head&&"function"===typeof C.addEventListener&&"function"===typeof C.createElement),j=~x.indexOf("MSIE")||~x.indexOf("Trident/"),O="___FONT_AWESOME___",q=16,F="fa",z="svg-inline--fa",L="data-fa-i2svg",I="data-fa-pseudo-element",N="data-fa-pseudo-element-pending",M="data-prefix",T="data-icon",P="fontawesome-i2svg",R="async",$=["HTML","HEAD","STYLE","SCRIPT"],B=function(){try{return!0}catch(Lr){return!1}}(),U="classic",V="sharp",Z=[U,V];function H(e){return new Proxy(e,{get:function(e,t){return t in e?e[t]:e[U]}})}var Y=H((s(b={},U,{fa:"solid",fas:"solid","fa-solid":"solid",far:"regular","fa-regular":"regular",fal:"light","fa-light":"light",fat:"thin","fa-thin":"thin",fad:"duotone","fa-duotone":"duotone",fab:"brands","fa-brands":"brands",fak:"kit",fakd:"kit","fa-kit":"kit","fa-kit-duotone":"kit"}),s(b,V,{fa:"solid",fass:"solid","fa-solid":"solid",fasr:"regular","fa-regular":"regular",fasl:"light","fa-light":"light",fast:"thin","fa-thin":"thin"}),b)),G=H((s(y={},U,{solid:"fas",regular:"far",light:"fal",thin:"fat",duotone:"fad",brands:"fab",kit:"fak"}),s(y,V,{solid:"fass",regular:"fasr",light:"fasl",thin:"fast"}),y)),W=H((s(v={},U,{fab:"fa-brands",fad:"fa-duotone",fak:"fa-kit",fal:"fa-light",far:"fa-regular",fas:"fa-solid",fat:"fa-thin"}),s(v,V,{fass:"fa-solid",fasr:"fa-regular",fasl:"fa-light",fast:"fa-thin"}),v)),J=H((s(k={},U,{"fa-brands":"fab","fa-duotone":"fad","fa-kit":"fak","fa-light":"fal","fa-regular":"far","fa-solid":"fas","fa-thin":"fat"}),s(k,V,{"fa-solid":"fass","fa-regular":"fasr","fa-light":"fasl","fa-thin":"fast"}),k)),X=/fa(s|r|l|t|d|b|k|ss|sr|sl|st)?[\-\ ]/,K="fa-layers-text",Q=/Font ?Awesome ?([56 ]*)(Solid|Regular|Light|Thin|Duotone|Brands|Free|Pro|Sharp|Kit)?.*/i,ee=H((s(_={},U,{900:"fas",400:"far",normal:"far",300:"fal",100:"fat"}),s(_,V,{900:"fass",400:"fasr",300:"fasl",100:"fast"}),_)),te=[1,2,3,4,5,6,7,8,9,10],re=te.concat([11,12,13,14,15,16,17,18,19,20]),ne=["class","data-prefix","data-icon","data-fa-transform","data-fa-mask"],ae={GROUP:"duotone-group",SWAP_OPACITY:"swap-opacity",PRIMARY:"primary",SECONDARY:"secondary"},ie=new Set;Object.keys(G[U]).map(ie.add.bind(ie)),Object.keys(G[V]).map(ie.add.bind(ie));var oe=[].concat(Z,l(ie),["2xs","xs","sm","lg","xl","2xl","beat","border","fade","beat-fade","bounce","flip-both","flip-horizontal","flip-vertical","flip","fw","inverse","layers-counter","layers-text","layers","li","pull-left","pull-right","pulse","rotate-180","rotate-270","rotate-90","rotate-by","shake","spin-pulse","spin-reverse","spin","stack-1x","stack-2x","stack","ul",ae.GROUP,ae.SWAP_OPACITY,ae.PRIMARY,ae.SECONDARY]).concat(te.map((function(e){return"".concat(e,"x")}))).concat(re.map((function(e){return"w-".concat(e)}))),se=A.FontAwesomeConfig||{};if(C&&"function"===typeof C.querySelector){[["data-family-prefix","familyPrefix"],["data-css-prefix","cssPrefix"],["data-family-default","familyDefault"],["data-style-default","styleDefault"],["data-replacement-class","replacementClass"],["data-auto-replace-svg","autoReplaceSvg"],["data-auto-add-css","autoAddCss"],["data-auto-a11y","autoA11y"],["data-search-pseudo-elements","searchPseudoElements"],["data-observe-mutations","observeMutations"],["data-mutate-approach","mutateApproach"],["data-keep-original-source","keepOriginalSource"],["data-measure-performance","measurePerformance"],["data-show-missing-icons","showMissingIcons"]].forEach((function(e){var t=c(e,2),r=t[0],n=t[1],a=function(e){return""===e||"false"!==e&&("true"===e||e)}(function(e){var t=C.querySelector("script["+e+"]");if(t)return t.getAttribute(e)}(r));void 0!==a&&null!==a&&(se[n]=a)}))}var ce={styleDefault:"solid",familyDefault:"classic",cssPrefix:F,replacementClass:z,autoReplaceSvg:!0,autoAddCss:!0,autoA11y:!0,searchPseudoElements:!1,observeMutations:!0,mutateApproach:"async",keepOriginalSource:!0,measurePerformance:!1,showMissingIcons:!0};se.familyPrefix&&(se.cssPrefix=se.familyPrefix);var le=a(a({},ce),se);le.autoReplaceSvg||(le.observeMutations=!1);var ue={};Object.keys(ce).forEach((function(e){Object.defineProperty(ue,e,{enumerable:!0,set:function(t){le[e]=t,fe.forEach((function(e){return e(ue)}))},get:function(){return le[e]}})})),Object.defineProperty(ue,"familyPrefix",{enumerable:!0,set:function(e){le.cssPrefix=e,fe.forEach((function(e){return e(ue)}))},get:function(){return le.cssPrefix}}),A.FontAwesomeConfig=ue;var fe=[];var pe=q,de={size:16,x:0,y:0,rotate:0,flipX:!1,flipY:!1};var he="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";function me(){for(var e=12,t="";e-- >0;)t+=he[62*Math.random()|0];return t}function ge(e){for(var t=[],r=(e||[]).length>>>0;r--;)t[r]=e[r];return t}function be(e){return e.classList?ge(e.classList):(e.getAttribute("class")||"").split(" ").filter((function(e){return e}))}function ye(e){return"".concat(e).replace(/&/g,"&").replace(/"/g,""").replace(/'/g,"'").replace(//g,">")}function ve(e){return Object.keys(e||{}).reduce((function(t,r){return t+"".concat(r,": ").concat(e[r].trim(),";")}),"")}function ke(e){return e.size!==de.size||e.x!==de.x||e.y!==de.y||e.rotate!==de.rotate||e.flipX||e.flipY}var _e=':root, :host {\n --fa-font-solid: normal 900 1em/1 "Font Awesome 6 Solid";\n --fa-font-regular: normal 400 1em/1 "Font Awesome 6 Regular";\n --fa-font-light: normal 300 1em/1 "Font Awesome 6 Light";\n --fa-font-thin: normal 100 1em/1 "Font Awesome 6 Thin";\n --fa-font-duotone: normal 900 1em/1 "Font Awesome 6 Duotone";\n --fa-font-sharp-solid: normal 900 1em/1 "Font Awesome 6 Sharp";\n --fa-font-sharp-regular: normal 400 1em/1 "Font Awesome 6 Sharp";\n --fa-font-sharp-light: normal 300 1em/1 "Font Awesome 6 Sharp";\n --fa-font-sharp-thin: normal 100 1em/1 "Font Awesome 6 Sharp";\n --fa-font-brands: normal 400 1em/1 "Font Awesome 6 Brands";\n}\n\nsvg:not(:root).svg-inline--fa, svg:not(:host).svg-inline--fa {\n overflow: visible;\n box-sizing: content-box;\n}\n\n.svg-inline--fa {\n display: var(--fa-display, inline-block);\n height: 1em;\n overflow: visible;\n vertical-align: -0.125em;\n}\n.svg-inline--fa.fa-2xs {\n vertical-align: 0.1em;\n}\n.svg-inline--fa.fa-xs {\n vertical-align: 0em;\n}\n.svg-inline--fa.fa-sm {\n vertical-align: -0.0714285705em;\n}\n.svg-inline--fa.fa-lg {\n vertical-align: -0.2em;\n}\n.svg-inline--fa.fa-xl {\n vertical-align: -0.25em;\n}\n.svg-inline--fa.fa-2xl {\n vertical-align: -0.3125em;\n}\n.svg-inline--fa.fa-pull-left {\n margin-right: var(--fa-pull-margin, 0.3em);\n width: auto;\n}\n.svg-inline--fa.fa-pull-right {\n margin-left: var(--fa-pull-margin, 0.3em);\n width: auto;\n}\n.svg-inline--fa.fa-li {\n width: var(--fa-li-width, 2em);\n top: 0.25em;\n}\n.svg-inline--fa.fa-fw {\n width: var(--fa-fw-width, 1.25em);\n}\n\n.fa-layers svg.svg-inline--fa {\n bottom: 0;\n left: 0;\n margin: auto;\n position: absolute;\n right: 0;\n top: 0;\n}\n\n.fa-layers-counter, .fa-layers-text {\n display: inline-block;\n position: absolute;\n text-align: center;\n}\n\n.fa-layers {\n display: inline-block;\n height: 1em;\n position: relative;\n text-align: center;\n vertical-align: -0.125em;\n width: 1em;\n}\n.fa-layers svg.svg-inline--fa {\n -webkit-transform-origin: center center;\n transform-origin: center center;\n}\n\n.fa-layers-text {\n left: 50%;\n top: 50%;\n -webkit-transform: translate(-50%, -50%);\n transform: translate(-50%, -50%);\n -webkit-transform-origin: center center;\n transform-origin: center center;\n}\n\n.fa-layers-counter {\n background-color: var(--fa-counter-background-color, #ff253a);\n border-radius: var(--fa-counter-border-radius, 1em);\n box-sizing: border-box;\n color: var(--fa-inverse, #fff);\n line-height: var(--fa-counter-line-height, 1);\n max-width: var(--fa-counter-max-width, 5em);\n min-width: var(--fa-counter-min-width, 1.5em);\n overflow: hidden;\n padding: var(--fa-counter-padding, 0.25em 0.5em);\n right: var(--fa-right, 0);\n text-overflow: ellipsis;\n top: var(--fa-top, 0);\n -webkit-transform: scale(var(--fa-counter-scale, 0.25));\n transform: scale(var(--fa-counter-scale, 0.25));\n -webkit-transform-origin: top right;\n transform-origin: top right;\n}\n\n.fa-layers-bottom-right {\n bottom: var(--fa-bottom, 0);\n right: var(--fa-right, 0);\n top: auto;\n -webkit-transform: scale(var(--fa-layers-scale, 0.25));\n transform: scale(var(--fa-layers-scale, 0.25));\n -webkit-transform-origin: bottom right;\n transform-origin: bottom right;\n}\n\n.fa-layers-bottom-left {\n bottom: var(--fa-bottom, 0);\n left: var(--fa-left, 0);\n right: auto;\n top: auto;\n -webkit-transform: scale(var(--fa-layers-scale, 0.25));\n transform: scale(var(--fa-layers-scale, 0.25));\n -webkit-transform-origin: bottom left;\n transform-origin: bottom left;\n}\n\n.fa-layers-top-right {\n top: var(--fa-top, 0);\n right: var(--fa-right, 0);\n -webkit-transform: scale(var(--fa-layers-scale, 0.25));\n transform: scale(var(--fa-layers-scale, 0.25));\n -webkit-transform-origin: top right;\n transform-origin: top right;\n}\n\n.fa-layers-top-left {\n left: var(--fa-left, 0);\n right: auto;\n top: var(--fa-top, 0);\n -webkit-transform: scale(var(--fa-layers-scale, 0.25));\n transform: scale(var(--fa-layers-scale, 0.25));\n -webkit-transform-origin: top left;\n transform-origin: top left;\n}\n\n.fa-1x {\n font-size: 1em;\n}\n\n.fa-2x {\n font-size: 2em;\n}\n\n.fa-3x {\n font-size: 3em;\n}\n\n.fa-4x {\n font-size: 4em;\n}\n\n.fa-5x {\n font-size: 5em;\n}\n\n.fa-6x {\n font-size: 6em;\n}\n\n.fa-7x {\n font-size: 7em;\n}\n\n.fa-8x {\n font-size: 8em;\n}\n\n.fa-9x {\n font-size: 9em;\n}\n\n.fa-10x {\n font-size: 10em;\n}\n\n.fa-2xs {\n font-size: 0.625em;\n line-height: 0.1em;\n vertical-align: 0.225em;\n}\n\n.fa-xs {\n font-size: 0.75em;\n line-height: 0.0833333337em;\n vertical-align: 0.125em;\n}\n\n.fa-sm {\n font-size: 0.875em;\n line-height: 0.0714285718em;\n vertical-align: 0.0535714295em;\n}\n\n.fa-lg {\n font-size: 1.25em;\n line-height: 0.05em;\n vertical-align: -0.075em;\n}\n\n.fa-xl {\n font-size: 1.5em;\n line-height: 0.0416666682em;\n vertical-align: -0.125em;\n}\n\n.fa-2xl {\n font-size: 2em;\n line-height: 0.03125em;\n vertical-align: -0.1875em;\n}\n\n.fa-fw {\n text-align: center;\n width: 1.25em;\n}\n\n.fa-ul {\n list-style-type: none;\n margin-left: var(--fa-li-margin, 2.5em);\n padding-left: 0;\n}\n.fa-ul > li {\n position: relative;\n}\n\n.fa-li {\n left: calc(var(--fa-li-width, 2em) * -1);\n position: absolute;\n text-align: center;\n width: var(--fa-li-width, 2em);\n line-height: inherit;\n}\n\n.fa-border {\n border-color: var(--fa-border-color, #eee);\n border-radius: var(--fa-border-radius, 0.1em);\n border-style: var(--fa-border-style, solid);\n border-width: var(--fa-border-width, 0.08em);\n padding: var(--fa-border-padding, 0.2em 0.25em 0.15em);\n}\n\n.fa-pull-left {\n float: left;\n margin-right: var(--fa-pull-margin, 0.3em);\n}\n\n.fa-pull-right {\n float: right;\n margin-left: var(--fa-pull-margin, 0.3em);\n}\n\n.fa-beat {\n -webkit-animation-name: fa-beat;\n animation-name: fa-beat;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, ease-in-out);\n animation-timing-function: var(--fa-animation-timing, ease-in-out);\n}\n\n.fa-bounce {\n -webkit-animation-name: fa-bounce;\n animation-name: fa-bounce;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.28, 0.84, 0.42, 1));\n animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.28, 0.84, 0.42, 1));\n}\n\n.fa-fade {\n -webkit-animation-name: fa-fade;\n animation-name: fa-fade;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1));\n animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1));\n}\n\n.fa-beat-fade {\n -webkit-animation-name: fa-beat-fade;\n animation-name: fa-beat-fade;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1));\n animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1));\n}\n\n.fa-flip {\n -webkit-animation-name: fa-flip;\n animation-name: fa-flip;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, ease-in-out);\n animation-timing-function: var(--fa-animation-timing, ease-in-out);\n}\n\n.fa-shake {\n -webkit-animation-name: fa-shake;\n animation-name: fa-shake;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, linear);\n animation-timing-function: var(--fa-animation-timing, linear);\n}\n\n.fa-spin {\n -webkit-animation-name: fa-spin;\n animation-name: fa-spin;\n -webkit-animation-delay: var(--fa-animation-delay, 0s);\n animation-delay: var(--fa-animation-delay, 0s);\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 2s);\n animation-duration: var(--fa-animation-duration, 2s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, linear);\n animation-timing-function: var(--fa-animation-timing, linear);\n}\n\n.fa-spin-reverse {\n --fa-animation-direction: reverse;\n}\n\n.fa-pulse,\n.fa-spin-pulse {\n -webkit-animation-name: fa-spin;\n animation-name: fa-spin;\n -webkit-animation-direction: var(--fa-animation-direction, normal);\n animation-direction: var(--fa-animation-direction, normal);\n -webkit-animation-duration: var(--fa-animation-duration, 1s);\n animation-duration: var(--fa-animation-duration, 1s);\n -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n animation-iteration-count: var(--fa-animation-iteration-count, infinite);\n -webkit-animation-timing-function: var(--fa-animation-timing, steps(8));\n animation-timing-function: var(--fa-animation-timing, steps(8));\n}\n\n@media (prefers-reduced-motion: reduce) {\n .fa-beat,\n.fa-bounce,\n.fa-fade,\n.fa-beat-fade,\n.fa-flip,\n.fa-pulse,\n.fa-shake,\n.fa-spin,\n.fa-spin-pulse {\n -webkit-animation-delay: -1ms;\n animation-delay: -1ms;\n -webkit-animation-duration: 1ms;\n animation-duration: 1ms;\n -webkit-animation-iteration-count: 1;\n animation-iteration-count: 1;\n -webkit-transition-delay: 0s;\n transition-delay: 0s;\n -webkit-transition-duration: 0s;\n transition-duration: 0s;\n }\n}\n@-webkit-keyframes fa-beat {\n 0%, 90% {\n -webkit-transform: scale(1);\n transform: scale(1);\n }\n 45% {\n -webkit-transform: scale(var(--fa-beat-scale, 1.25));\n transform: scale(var(--fa-beat-scale, 1.25));\n }\n}\n@keyframes fa-beat {\n 0%, 90% {\n -webkit-transform: scale(1);\n transform: scale(1);\n }\n 45% {\n -webkit-transform: scale(var(--fa-beat-scale, 1.25));\n transform: scale(var(--fa-beat-scale, 1.25));\n }\n}\n@-webkit-keyframes fa-bounce {\n 0% {\n -webkit-transform: scale(1, 1) translateY(0);\n transform: scale(1, 1) translateY(0);\n }\n 10% {\n -webkit-transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0);\n transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0);\n }\n 30% {\n -webkit-transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em));\n transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em));\n }\n 50% {\n -webkit-transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0);\n transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0);\n }\n 57% {\n -webkit-transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em));\n transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em));\n }\n 64% {\n -webkit-transform: scale(1, 1) translateY(0);\n transform: scale(1, 1) translateY(0);\n }\n 100% {\n -webkit-transform: scale(1, 1) translateY(0);\n transform: scale(1, 1) translateY(0);\n }\n}\n@keyframes fa-bounce {\n 0% {\n -webkit-transform: scale(1, 1) translateY(0);\n transform: scale(1, 1) translateY(0);\n }\n 10% {\n -webkit-transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0);\n transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0);\n }\n 30% {\n -webkit-transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em));\n transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em));\n }\n 50% {\n -webkit-transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0);\n transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0);\n }\n 57% {\n -webkit-transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em));\n transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em));\n }\n 64% {\n -webkit-transform: scale(1, 1) translateY(0);\n transform: scale(1, 1) translateY(0);\n }\n 100% {\n -webkit-transform: scale(1, 1) translateY(0);\n transform: scale(1, 1) translateY(0);\n }\n}\n@-webkit-keyframes fa-fade {\n 50% {\n opacity: var(--fa-fade-opacity, 0.4);\n }\n}\n@keyframes fa-fade {\n 50% {\n opacity: var(--fa-fade-opacity, 0.4);\n }\n}\n@-webkit-keyframes fa-beat-fade {\n 0%, 100% {\n opacity: var(--fa-beat-fade-opacity, 0.4);\n -webkit-transform: scale(1);\n transform: scale(1);\n }\n 50% {\n opacity: 1;\n -webkit-transform: scale(var(--fa-beat-fade-scale, 1.125));\n transform: scale(var(--fa-beat-fade-scale, 1.125));\n }\n}\n@keyframes fa-beat-fade {\n 0%, 100% {\n opacity: var(--fa-beat-fade-opacity, 0.4);\n -webkit-transform: scale(1);\n transform: scale(1);\n }\n 50% {\n opacity: 1;\n -webkit-transform: scale(var(--fa-beat-fade-scale, 1.125));\n transform: scale(var(--fa-beat-fade-scale, 1.125));\n }\n}\n@-webkit-keyframes fa-flip {\n 50% {\n -webkit-transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg));\n transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg));\n }\n}\n@keyframes fa-flip {\n 50% {\n -webkit-transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg));\n transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg));\n }\n}\n@-webkit-keyframes fa-shake {\n 0% {\n -webkit-transform: rotate(-15deg);\n transform: rotate(-15deg);\n }\n 4% {\n -webkit-transform: rotate(15deg);\n transform: rotate(15deg);\n }\n 8%, 24% {\n -webkit-transform: rotate(-18deg);\n transform: rotate(-18deg);\n }\n 12%, 28% {\n -webkit-transform: rotate(18deg);\n transform: rotate(18deg);\n }\n 16% {\n -webkit-transform: rotate(-22deg);\n transform: rotate(-22deg);\n }\n 20% {\n -webkit-transform: rotate(22deg);\n transform: rotate(22deg);\n }\n 32% {\n -webkit-transform: rotate(-12deg);\n transform: rotate(-12deg);\n }\n 36% {\n -webkit-transform: rotate(12deg);\n transform: rotate(12deg);\n }\n 40%, 100% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n}\n@keyframes fa-shake {\n 0% {\n -webkit-transform: rotate(-15deg);\n transform: rotate(-15deg);\n }\n 4% {\n -webkit-transform: rotate(15deg);\n transform: rotate(15deg);\n }\n 8%, 24% {\n -webkit-transform: rotate(-18deg);\n transform: rotate(-18deg);\n }\n 12%, 28% {\n -webkit-transform: rotate(18deg);\n transform: rotate(18deg);\n }\n 16% {\n -webkit-transform: rotate(-22deg);\n transform: rotate(-22deg);\n }\n 20% {\n -webkit-transform: rotate(22deg);\n transform: rotate(22deg);\n }\n 32% {\n -webkit-transform: rotate(-12deg);\n transform: rotate(-12deg);\n }\n 36% {\n -webkit-transform: rotate(12deg);\n transform: rotate(12deg);\n }\n 40%, 100% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n}\n@-webkit-keyframes fa-spin {\n 0% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n@keyframes fa-spin {\n 0% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n.fa-rotate-90 {\n -webkit-transform: rotate(90deg);\n transform: rotate(90deg);\n}\n\n.fa-rotate-180 {\n -webkit-transform: rotate(180deg);\n transform: rotate(180deg);\n}\n\n.fa-rotate-270 {\n -webkit-transform: rotate(270deg);\n transform: rotate(270deg);\n}\n\n.fa-flip-horizontal {\n -webkit-transform: scale(-1, 1);\n transform: scale(-1, 1);\n}\n\n.fa-flip-vertical {\n -webkit-transform: scale(1, -1);\n transform: scale(1, -1);\n}\n\n.fa-flip-both,\n.fa-flip-horizontal.fa-flip-vertical {\n -webkit-transform: scale(-1, -1);\n transform: scale(-1, -1);\n}\n\n.fa-rotate-by {\n -webkit-transform: rotate(var(--fa-rotate-angle, 0));\n transform: rotate(var(--fa-rotate-angle, 0));\n}\n\n.fa-stack {\n display: inline-block;\n vertical-align: middle;\n height: 2em;\n position: relative;\n width: 2.5em;\n}\n\n.fa-stack-1x,\n.fa-stack-2x {\n bottom: 0;\n left: 0;\n margin: auto;\n position: absolute;\n right: 0;\n top: 0;\n z-index: var(--fa-stack-z-index, auto);\n}\n\n.svg-inline--fa.fa-stack-1x {\n height: 1em;\n width: 1.25em;\n}\n.svg-inline--fa.fa-stack-2x {\n height: 2em;\n width: 2.5em;\n}\n\n.fa-inverse {\n color: var(--fa-inverse, #fff);\n}\n\n.sr-only,\n.fa-sr-only {\n position: absolute;\n width: 1px;\n height: 1px;\n padding: 0;\n margin: -1px;\n overflow: hidden;\n clip: rect(0, 0, 0, 0);\n white-space: nowrap;\n border-width: 0;\n}\n\n.sr-only-focusable:not(:focus),\n.fa-sr-only-focusable:not(:focus) {\n position: absolute;\n width: 1px;\n height: 1px;\n padding: 0;\n margin: -1px;\n overflow: hidden;\n clip: rect(0, 0, 0, 0);\n white-space: nowrap;\n border-width: 0;\n}\n\n.svg-inline--fa .fa-primary {\n fill: var(--fa-primary-color, currentColor);\n opacity: var(--fa-primary-opacity, 1);\n}\n\n.svg-inline--fa .fa-secondary {\n fill: var(--fa-secondary-color, currentColor);\n opacity: var(--fa-secondary-opacity, 0.4);\n}\n\n.svg-inline--fa.fa-swap-opacity .fa-primary {\n opacity: var(--fa-secondary-opacity, 0.4);\n}\n\n.svg-inline--fa.fa-swap-opacity .fa-secondary {\n opacity: var(--fa-primary-opacity, 1);\n}\n\n.svg-inline--fa mask .fa-primary,\n.svg-inline--fa mask .fa-secondary {\n fill: black;\n}\n\n.fad.fa-inverse,\n.fa-duotone.fa-inverse {\n color: var(--fa-inverse, #fff);\n}';function we(){var e=F,t=z,r=ue.cssPrefix,n=ue.replacementClass,a=_e;if(r!==e||n!==t){var i=new RegExp("\\.".concat(e,"\\-"),"g"),o=new RegExp("\\--".concat(e,"\\-"),"g"),s=new RegExp("\\.".concat(t),"g");a=a.replace(i,".".concat(r,"-")).replace(o,"--".concat(r,"-")).replace(s,".".concat(n))}return a}var xe=!1;function Ae(){ue.autoAddCss&&!xe&&(!function(e){if(e&&D){var t=C.createElement("style");t.setAttribute("type","text/css"),t.innerHTML=e;for(var r=C.head.childNodes,n=null,a=r.length-1;a>-1;a--){var i=r[a],o=(i.tagName||"").toUpperCase();["STYLE","LINK"].indexOf(o)>-1&&(n=i)}C.head.insertBefore(t,n)}}(we()),xe=!0)}var Ce={mixout:function(){return{dom:{css:we,insertCss:Ae}}},hooks:function(){return{beforeDOMElementCreation:function(){Ae()},beforeI2svg:function(){Ae()}}}},Ee=A||{};Ee[O]||(Ee[O]={}),Ee[O].styles||(Ee[O].styles={}),Ee[O].hooks||(Ee[O].hooks={}),Ee[O].shims||(Ee[O].shims=[]);var Se=Ee[O],De=[],je=!1;function Oe(e){var t=e.tag,r=e.attributes,n=void 0===r?{}:r,a=e.children,i=void 0===a?[]:a;return"string"===typeof e?ye(e):"<".concat(t," ").concat(function(e){return Object.keys(e||{}).reduce((function(t,r){return t+"".concat(r,'="').concat(ye(e[r]),'" ')}),"").trim()}(n),">").concat(i.map(Oe).join(""),"")}function qe(e,t,r){if(e&&e[t]&&e[t][r])return{prefix:t,iconName:r,icon:e[t][r]}}D&&((je=(C.documentElement.doScroll?/^loaded|^c/:/^loaded|^i|^c/).test(C.readyState))||C.addEventListener("DOMContentLoaded",(function e(){C.removeEventListener("DOMContentLoaded",e),je=1,De.map((function(e){return e()}))})));var Fe=function(e,t,r,n){var a,i,o,s=Object.keys(e),c=s.length,l=void 0!==n?function(e,t){return function(r,n,a,i){return e.call(t,r,n,a,i)}}(t,n):t;for(void 0===r?(a=1,o=e[s[0]]):(a=0,o=r);a=55296&&a<=56319&&r2&&void 0!==arguments[2]?arguments[2]:{}).skipHooks,n=void 0!==r&&r,i=Le(t);"function"!==typeof Se.hooks.addPack||n?Se.styles[e]=a(a({},Se.styles[e]||{}),i):Se.hooks.addPack(e,Le(t)),"fas"===e&&Ie("fa",t)}var Ne,Me,Te,Pe=Se.styles,Re=Se.shims,$e=(s(Ne={},U,Object.values(W[U])),s(Ne,V,Object.values(W[V])),Ne),Be=null,Ue={},Ve={},Ze={},He={},Ye={},Ge=(s(Me={},U,Object.keys(Y[U])),s(Me,V,Object.keys(Y[V])),Me);function We(e,t){var r,n=t.split("-"),a=n[0],i=n.slice(1).join("-");return a!==e||""===i||(r=i,~oe.indexOf(r))?null:i}var Je,Xe=function(){var e=function(e){return Fe(Pe,(function(t,r,n){return t[n]=Fe(r,e,{}),t}),{})};Ue=e((function(e,t,r){(t[3]&&(e[t[3]]=r),t[2])&&t[2].filter((function(e){return"number"===typeof e})).forEach((function(t){e[t.toString(16)]=r}));return e})),Ve=e((function(e,t,r){(e[r]=r,t[2])&&t[2].filter((function(e){return"string"===typeof e})).forEach((function(t){e[t]=r}));return e})),Ye=e((function(e,t,r){var n=t[2];return e[r]=r,n.forEach((function(t){e[t]=r})),e}));var t="far"in Pe||ue.autoFetchSvg,r=Fe(Re,(function(e,r){var n=r[0],a=r[1],i=r[2];return"far"!==a||t||(a="fas"),"string"===typeof n&&(e.names[n]={prefix:a,iconName:i}),"number"===typeof n&&(e.unicodes[n.toString(16)]={prefix:a,iconName:i}),e}),{names:{},unicodes:{}});Ze=r.names,He=r.unicodes,Be=nt(ue.styleDefault,{family:ue.familyDefault})};function Ke(e,t){return(Ue[e]||{})[t]}function Qe(e,t){return(Ye[e]||{})[t]}function et(e){return Ze[e]||{prefix:null,iconName:null}}function tt(){return Be}Je=function(e){Be=nt(e.styleDefault,{family:ue.familyDefault})},fe.push(Je),Xe();var rt=function(){return{prefix:null,iconName:null,rest:[]}};function nt(e){var t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).family,r=void 0===t?U:t,n=Y[r][e],a=G[r][e]||G[r][n],i=e in Se.styles?e:null;return a||i||null}var at=(s(Te={},U,Object.keys(W[U])),s(Te,V,Object.keys(W[V])),Te);function it(e){var t,r=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).skipLookups,n=void 0!==r&&r,a=(s(t={},U,"".concat(ue.cssPrefix,"-").concat(U)),s(t,V,"".concat(ue.cssPrefix,"-").concat(V)),t),i=null,o=U;(e.includes(a[U])||e.some((function(e){return at[U].includes(e)})))&&(o=U),(e.includes(a[V])||e.some((function(e){return at[V].includes(e)})))&&(o=V);var c=e.reduce((function(e,t){var r=We(ue.cssPrefix,t);if(Pe[t]?(t=$e[o].includes(t)?J[o][t]:t,i=t,e.prefix=t):Ge[o].indexOf(t)>-1?(i=t,e.prefix=nt(t,{family:o})):r?e.iconName=r:t!==ue.replacementClass&&t!==a[U]&&t!==a[V]&&e.rest.push(t),!n&&e.prefix&&e.iconName){var s="fa"===i?et(e.iconName):{},c=Qe(e.prefix,e.iconName);s.prefix&&(i=null),e.iconName=s.iconName||c||e.iconName,e.prefix=s.prefix||e.prefix,"far"!==e.prefix||Pe.far||!Pe.fas||ue.autoFetchSvg||(e.prefix="fas")}return e}),rt());return(e.includes("fa-brands")||e.includes("fab"))&&(c.prefix="fab"),(e.includes("fa-duotone")||e.includes("fad"))&&(c.prefix="fad"),c.prefix||o!==V||!Pe.fass&&!ue.autoFetchSvg||(c.prefix="fass",c.iconName=Qe(c.prefix,c.iconName)||c.iconName),"fa"!==c.prefix&&"fa"!==i||(c.prefix=tt()||"fas"),c}var ot=function(){function e(){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this.definitions={}}var t,r,n;return t=e,r=[{key:"add",value:function(){for(var e=this,t=arguments.length,r=new Array(t),n=0;n0&&s.forEach((function(t){"string"===typeof t&&(e[a][t]=o)})),e[a][i]=o})),e}}],r&&o(t.prototype,r),n&&o(t,n),Object.defineProperty(t,"prototype",{writable:!1}),e}(),st=[],ct={},lt={},ut=Object.keys(lt);function ft(e,t){for(var r=arguments.length,n=new Array(r>2?r-2:0),a=2;a1?t-1:0),n=1;n0&&void 0!==arguments[0]?arguments[0]:{};return D?(pt("beforeI2svg",e),dt("pseudoElements2svg",e),dt("i2svg",e)):Promise.reject("Operation requires a DOM of some kind.")},watch:function(){var e,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},r=t.autoReplaceSvgRoot;!1===ue.autoReplaceSvg&&(ue.autoReplaceSvg=!0),ue.observeMutations=!0,e=function(){yt({autoReplaceSvgRoot:r}),pt("watch",t)},D&&(je?setTimeout(e,0):De.push(e))}},bt={noAuto:function(){ue.autoReplaceSvg=!1,ue.observeMutations=!1,pt("noAuto")},config:ue,dom:gt,parse:{icon:function(e){if(null===e)return null;if("object"===i(e)&&e.prefix&&e.iconName)return{prefix:e.prefix,iconName:Qe(e.prefix,e.iconName)||e.iconName};if(Array.isArray(e)&&2===e.length){var t=0===e[1].indexOf("fa-")?e[1].slice(3):e[1],r=nt(e[0]);return{prefix:r,iconName:Qe(r,t)||t}}if("string"===typeof e&&(e.indexOf("".concat(ue.cssPrefix,"-"))>-1||e.match(X))){var n=it(e.split(" "),{skipLookups:!0});return{prefix:n.prefix||tt(),iconName:Qe(n.prefix,n.iconName)||n.iconName}}if("string"===typeof e){var a=tt();return{prefix:a,iconName:Qe(a,e)||e}}}},library:mt,findIconDefinition:ht,toHtml:Oe},yt=function(){var e=(arguments.length>0&&void 0!==arguments[0]?arguments[0]:{}).autoReplaceSvgRoot,t=void 0===e?C:e;(Object.keys(Se.styles).length>0||ue.autoFetchSvg)&&D&&ue.autoReplaceSvg&&bt.dom.i2svg({node:t})};function vt(e,t){return Object.defineProperty(e,"abstract",{get:t}),Object.defineProperty(e,"html",{get:function(){return e.abstract.map((function(e){return Oe(e)}))}}),Object.defineProperty(e,"node",{get:function(){if(D){var t=C.createElement("div");return t.innerHTML=e.html,t.children}}}),e}function kt(e){var t=e.icons,r=t.main,n=t.mask,i=e.prefix,o=e.iconName,s=e.transform,c=e.symbol,l=e.title,u=e.maskId,f=e.titleId,p=e.extra,d=e.watchable,h=void 0!==d&&d,m=n.found?n:r,g=m.width,b=m.height,y="fak"===i,v=[ue.replacementClass,o?"".concat(ue.cssPrefix,"-").concat(o):""].filter((function(e){return-1===p.classes.indexOf(e)})).filter((function(e){return""!==e||!!e})).concat(p.classes).join(" "),k={children:[],attributes:a(a({},p.attributes),{},{"data-prefix":i,"data-icon":o,class:v,role:p.attributes.role||"img",xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 ".concat(g," ").concat(b)})},_=y&&!~p.classes.indexOf("fa-fw")?{width:"".concat(g/b*16*.0625,"em")}:{};h&&(k.attributes[L]=""),l&&(k.children.push({tag:"title",attributes:{id:k.attributes["aria-labelledby"]||"title-".concat(f||me())},children:[l]}),delete k.attributes.title);var w=a(a({},k),{},{prefix:i,iconName:o,main:r,mask:n,maskId:u,transform:s,symbol:c,styles:a(a({},_),p.styles)}),x=n.found&&r.found?dt("generateAbstractMask",w)||{children:[],attributes:{}}:dt("generateAbstractIcon",w)||{children:[],attributes:{}},A=x.children,C=x.attributes;return w.children=A,w.attributes=C,c?function(e){var t=e.prefix,r=e.iconName,n=e.children,i=e.attributes,o=e.symbol,s=!0===o?"".concat(t,"-").concat(ue.cssPrefix,"-").concat(r):o;return[{tag:"svg",attributes:{style:"display: none;"},children:[{tag:"symbol",attributes:a(a({},i),{},{id:s}),children:n}]}]}(w):function(e){var t=e.children,r=e.main,n=e.mask,i=e.attributes,o=e.styles,s=e.transform;if(ke(s)&&r.found&&!n.found){var c={x:r.width/r.height/2,y:.5};i.style=ve(a(a({},o),{},{"transform-origin":"".concat(c.x+s.x/16,"em ").concat(c.y+s.y/16,"em")}))}return[{tag:"svg",attributes:i,children:t}]}(w)}function _t(e){var t=e.content,r=e.width,n=e.height,i=e.transform,o=e.title,s=e.extra,c=e.watchable,l=void 0!==c&&c,u=a(a(a({},s.attributes),o?{title:o}:{}),{},{class:s.classes.join(" ")});l&&(u[L]="");var f=a({},s.styles);ke(i)&&(f.transform=function(e){var t=e.transform,r=e.width,n=void 0===r?q:r,a=e.height,i=void 0===a?q:a,o=e.startCentered,s=void 0!==o&&o,c="";return c+=s&&j?"translate(".concat(t.x/pe-n/2,"em, ").concat(t.y/pe-i/2,"em) "):s?"translate(calc(-50% + ".concat(t.x/pe,"em), calc(-50% + ").concat(t.y/pe,"em)) "):"translate(".concat(t.x/pe,"em, ").concat(t.y/pe,"em) "),c+="scale(".concat(t.size/pe*(t.flipX?-1:1),", ").concat(t.size/pe*(t.flipY?-1:1),") "),c+"rotate(".concat(t.rotate,"deg) ")}({transform:i,startCentered:!0,width:r,height:n}),f["-webkit-transform"]=f.transform);var p=ve(f);p.length>0&&(u.style=p);var d=[];return d.push({tag:"span",attributes:u,children:[t]}),o&&d.push({tag:"span",attributes:{class:"sr-only"},children:[o]}),d}var wt=Se.styles;function xt(e){var t=e[0],r=e[1],n=c(e.slice(4),1)[0];return{found:!0,width:t,height:r,icon:Array.isArray(n)?{tag:"g",attributes:{class:"".concat(ue.cssPrefix,"-").concat(ae.GROUP)},children:[{tag:"path",attributes:{class:"".concat(ue.cssPrefix,"-").concat(ae.SECONDARY),fill:"currentColor",d:n[0]}},{tag:"path",attributes:{class:"".concat(ue.cssPrefix,"-").concat(ae.PRIMARY),fill:"currentColor",d:n[1]}}]}:{tag:"path",attributes:{fill:"currentColor",d:n}}}}var At={found:!1,width:512,height:512};function Ct(e,t){var r=t;return"fa"===t&&null!==ue.styleDefault&&(t=tt()),new Promise((function(n,i){dt("missingIconAbstract");if("fa"===r){var o=et(e)||{};e=o.iconName||e,t=o.prefix||t}if(e&&t&&wt[t]&&wt[t][e])return n(xt(wt[t][e]));!function(e,t){B||ue.showMissingIcons||!e||console.error('Icon with name "'.concat(e,'" and prefix "').concat(t,'" is missing.'))}(e,t),n(a(a({},At),{},{icon:ue.showMissingIcons&&e&&dt("missingIconAbstract")||{}}))}))}var Et=function(){},St=ue.measurePerformance&&S&&S.mark&&S.measure?S:{mark:Et,measure:Et},Dt='FA "6.5.2"',jt=function(e){St.mark("".concat(Dt," ").concat(e," ends")),St.measure("".concat(Dt," ").concat(e),"".concat(Dt," ").concat(e," begins"),"".concat(Dt," ").concat(e," ends"))},Ot={begin:function(e){return St.mark("".concat(Dt," ").concat(e," begins")),function(){return jt(e)}},end:jt},qt=function(){};function Ft(e){return"string"===typeof(e.getAttribute?e.getAttribute(L):null)}function zt(e){return C.createElementNS("http://www.w3.org/2000/svg",e)}function Lt(e){return C.createElement(e)}function It(e){var t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).ceFn,r=void 0===t?"svg"===e.tag?zt:Lt:t;if("string"===typeof e)return C.createTextNode(e);var n=r(e.tag);return Object.keys(e.attributes||[]).forEach((function(t){n.setAttribute(t,e.attributes[t])})),(e.children||[]).forEach((function(e){n.appendChild(It(e,{ceFn:r}))})),n}var Nt={replace:function(e){var t=e[0];if(t.parentNode)if(e[1].forEach((function(e){t.parentNode.insertBefore(It(e),t)})),null===t.getAttribute(L)&&ue.keepOriginalSource){var r=C.createComment(function(e){var t=" ".concat(e.outerHTML," ");return"".concat(t,"Font Awesome fontawesome.com ")}(t));t.parentNode.replaceChild(r,t)}else t.remove()},nest:function(e){var t=e[0],r=e[1];if(~be(t).indexOf(ue.replacementClass))return Nt.replace(e);var n=new RegExp("".concat(ue.cssPrefix,"-.*"));if(delete r[0].attributes.id,r[0].attributes.class){var a=r[0].attributes.class.split(" ").reduce((function(e,t){return t===ue.replacementClass||t.match(n)?e.toSvg.push(t):e.toNode.push(t),e}),{toNode:[],toSvg:[]});r[0].attributes.class=a.toSvg.join(" "),0===a.toNode.length?t.removeAttribute("class"):t.setAttribute("class",a.toNode.join(" "))}var i=r.map((function(e){return Oe(e)})).join("\n");t.setAttribute(L,""),t.innerHTML=i}};function Mt(e){e()}function Tt(e,t){var r="function"===typeof t?t:qt;if(0===e.length)r();else{var n=Mt;ue.mutateApproach===R&&(n=A.requestAnimationFrame||Mt),n((function(){var t=!0===ue.autoReplaceSvg?Nt.replace:Nt[ue.autoReplaceSvg]||Nt.replace,n=Ot.begin("mutate");e.map(t),n(),r()}))}}var Pt=!1;function Rt(){Pt=!0}function $t(){Pt=!1}var Bt=null;function Ut(e){if(E&&ue.observeMutations){var t=e.treeCallback,r=void 0===t?qt:t,n=e.nodeCallback,a=void 0===n?qt:n,i=e.pseudoElementsCallback,o=void 0===i?qt:i,s=e.observeMutationsRoot,c=void 0===s?C:s;Bt=new E((function(e){if(!Pt){var t=tt();ge(e).forEach((function(e){if("childList"===e.type&&e.addedNodes.length>0&&!Ft(e.addedNodes[0])&&(ue.searchPseudoElements&&o(e.target),r(e.target)),"attributes"===e.type&&e.target.parentNode&&ue.searchPseudoElements&&o(e.target.parentNode),"attributes"===e.type&&Ft(e.target)&&~ne.indexOf(e.attributeName))if("class"===e.attributeName&&function(e){var t=e.getAttribute?e.getAttribute(M):null,r=e.getAttribute?e.getAttribute(T):null;return t&&r}(e.target)){var n=it(be(e.target)),i=n.prefix,s=n.iconName;e.target.setAttribute(M,i||t),s&&e.target.setAttribute(T,s)}else(c=e.target)&&c.classList&&c.classList.contains&&c.classList.contains(ue.replacementClass)&&a(e.target);var c}))}})),D&&Bt.observe(c,{childList:!0,attributes:!0,characterData:!0,subtree:!0})}}function Vt(e){var t,r,n=e.getAttribute("data-prefix"),a=e.getAttribute("data-icon"),i=void 0!==e.innerText?e.innerText.trim():"",o=it(be(e));return o.prefix||(o.prefix=tt()),n&&a&&(o.prefix=n,o.iconName=a),o.iconName&&o.prefix||(o.prefix&&i.length>0&&(o.iconName=(t=o.prefix,r=e.innerText,(Ve[t]||{})[r]||Ke(o.prefix,ze(e.innerText)))),!o.iconName&&ue.autoFetchSvg&&e.firstChild&&e.firstChild.nodeType===Node.TEXT_NODE&&(o.iconName=e.firstChild.data)),o}function Zt(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{styleParser:!0},r=Vt(e),n=r.iconName,i=r.prefix,o=r.rest,s=function(e){var t=ge(e.attributes).reduce((function(e,t){return"class"!==e.name&&"style"!==e.name&&(e[t.name]=t.value),e}),{}),r=e.getAttribute("title"),n=e.getAttribute("data-fa-title-id");return ue.autoA11y&&(r?t["aria-labelledby"]="".concat(ue.replacementClass,"-title-").concat(n||me()):(t["aria-hidden"]="true",t.focusable="false")),t}(e),c=ft("parseNodeAttributes",{},e),l=t.styleParser?function(e){var t=e.getAttribute("style"),r=[];return t&&(r=t.split(";").reduce((function(e,t){var r=t.split(":"),n=r[0],a=r.slice(1);return n&&a.length>0&&(e[n]=a.join(":").trim()),e}),{})),r}(e):[];return a({iconName:n,title:e.getAttribute("title"),titleId:e.getAttribute("data-fa-title-id"),prefix:i,transform:de,mask:{iconName:null,prefix:null,rest:[]},maskId:null,symbol:!1,extra:{classes:o,styles:l,attributes:s}},c)}var Ht=Se.styles;function Yt(e){var t="nest"===ue.autoReplaceSvg?Zt(e,{styleParser:!1}):Zt(e);return~t.extra.classes.indexOf(K)?dt("generateLayersText",e,t):dt("generateSvgReplacementMutation",e,t)}var Gt=new Set;function Wt(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null;if(!D)return Promise.resolve();var r=C.documentElement.classList,n=function(e){return r.add("".concat(P,"-").concat(e))},a=function(e){return r.remove("".concat(P,"-").concat(e))},i=ue.autoFetchSvg?Gt:Z.map((function(e){return"fa-".concat(e)})).concat(Object.keys(Ht));i.includes("fa")||i.push("fa");var o=[".".concat(K,":not([").concat(L,"])")].concat(i.map((function(e){return".".concat(e,":not([").concat(L,"])")}))).join(", ");if(0===o.length)return Promise.resolve();var s=[];try{s=ge(e.querySelectorAll(o))}catch(Lr){}if(!(s.length>0))return Promise.resolve();n("pending"),a("complete");var c=Ot.begin("onTree"),l=s.reduce((function(e,t){try{var r=Yt(t);r&&e.push(r)}catch(Lr){B||"MissingIcon"===Lr.name&&console.error(Lr)}return e}),[]);return new Promise((function(e,r){Promise.all(l).then((function(r){Tt(r,(function(){n("active"),n("complete"),a("pending"),"function"===typeof t&&t(),c(),e()}))})).catch((function(e){c(),r(e)}))}))}function Jt(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null;Yt(e).then((function(e){e&&Tt([e],t)}))}Z.map((function(e){Gt.add("fa-".concat(e))})),Object.keys(Y[U]).map(Gt.add.bind(Gt)),Object.keys(Y[V]).map(Gt.add.bind(Gt)),Gt=l(Gt);var Xt=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.transform,n=void 0===r?de:r,i=t.symbol,o=void 0!==i&&i,s=t.mask,c=void 0===s?null:s,l=t.maskId,u=void 0===l?null:l,f=t.title,p=void 0===f?null:f,d=t.titleId,h=void 0===d?null:d,m=t.classes,g=void 0===m?[]:m,b=t.attributes,y=void 0===b?{}:b,v=t.styles,k=void 0===v?{}:v;if(e){var _=e.prefix,w=e.iconName,x=e.icon;return vt(a({type:"icon"},e),(function(){return pt("beforeDOMElementCreation",{iconDefinition:e,params:t}),ue.autoA11y&&(p?y["aria-labelledby"]="".concat(ue.replacementClass,"-title-").concat(h||me()):(y["aria-hidden"]="true",y.focusable="false")),kt({icons:{main:xt(x),mask:c?xt(c.icon):{found:!1,width:null,height:null,icon:{}}},prefix:_,iconName:w,transform:a(a({},de),n),symbol:o,title:p,maskId:u,titleId:h,extra:{attributes:y,styles:k,classes:g}})}))}},Kt={mixout:function(){return{icon:(e=Xt,function(t){var r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=(t||{}).icon?t:ht(t||{}),i=r.mask;return i&&(i=(i||{}).icon?i:ht(i||{})),e(n,a(a({},r),{},{mask:i}))})};var e},hooks:function(){return{mutationObserverCallbacks:function(e){return e.treeCallback=Wt,e.nodeCallback=Jt,e}}},provides:function(e){e.i2svg=function(e){var t=e.node,r=void 0===t?C:t,n=e.callback;return Wt(r,void 0===n?function(){}:n)},e.generateSvgReplacementMutation=function(e,t){var r=t.iconName,n=t.title,a=t.titleId,i=t.prefix,o=t.transform,s=t.symbol,l=t.mask,u=t.maskId,f=t.extra;return new Promise((function(t,p){Promise.all([Ct(r,i),l.iconName?Ct(l.iconName,l.prefix):Promise.resolve({found:!1,width:512,height:512,icon:{}})]).then((function(l){var p=c(l,2),d=p[0],h=p[1];t([e,kt({icons:{main:d,mask:h},prefix:i,iconName:r,transform:o,symbol:s,maskId:u,title:n,titleId:a,extra:f,watchable:!0})])})).catch(p)}))},e.generateAbstractIcon=function(e){var t,r=e.children,n=e.attributes,a=e.main,i=e.transform,o=ve(e.styles);return o.length>0&&(n.style=o),ke(i)&&(t=dt("generateAbstractTransformGrouping",{main:a,transform:i,containerWidth:a.width,iconWidth:a.width})),r.push(t||a.icon),{children:r,attributes:n}}}},Qt={mixout:function(){return{layer:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.classes,n=void 0===r?[]:r;return vt({type:"layer"},(function(){pt("beforeDOMElementCreation",{assembler:e,params:t});var r=[];return e((function(e){Array.isArray(e)?e.map((function(e){r=r.concat(e.abstract)})):r=r.concat(e.abstract)})),[{tag:"span",attributes:{class:["".concat(ue.cssPrefix,"-layers")].concat(l(n)).join(" ")},children:r}]}))}}}},er={mixout:function(){return{counter:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.title,n=void 0===r?null:r,i=t.classes,o=void 0===i?[]:i,s=t.attributes,c=void 0===s?{}:s,u=t.styles,f=void 0===u?{}:u;return vt({type:"counter",content:e},(function(){return pt("beforeDOMElementCreation",{content:e,params:t}),function(e){var t=e.content,r=e.title,n=e.extra,i=a(a(a({},n.attributes),r?{title:r}:{}),{},{class:n.classes.join(" ")}),o=ve(n.styles);o.length>0&&(i.style=o);var s=[];return s.push({tag:"span",attributes:i,children:[t]}),r&&s.push({tag:"span",attributes:{class:"sr-only"},children:[r]}),s}({content:e.toString(),title:n,extra:{attributes:c,styles:f,classes:["".concat(ue.cssPrefix,"-layers-counter")].concat(l(o))}})}))}}}},tr={mixout:function(){return{text:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.transform,n=void 0===r?de:r,i=t.title,o=void 0===i?null:i,s=t.classes,c=void 0===s?[]:s,u=t.attributes,f=void 0===u?{}:u,p=t.styles,d=void 0===p?{}:p;return vt({type:"text",content:e},(function(){return pt("beforeDOMElementCreation",{content:e,params:t}),_t({content:e,transform:a(a({},de),n),title:o,extra:{attributes:f,styles:d,classes:["".concat(ue.cssPrefix,"-layers-text")].concat(l(c))}})}))}}},provides:function(e){e.generateLayersText=function(e,t){var r=t.title,n=t.transform,a=t.extra,i=null,o=null;if(j){var s=parseInt(getComputedStyle(e).fontSize,10),c=e.getBoundingClientRect();i=c.width/s,o=c.height/s}return ue.autoA11y&&!r&&(a.attributes["aria-hidden"]="true"),Promise.resolve([e,_t({content:e.innerHTML,width:i,height:o,transform:n,title:r,extra:a,watchable:!0})])}}},rr=new RegExp('"',"ug"),nr=[1105920,1112319];function ar(e,t){var r="".concat(N).concat(t.replace(":","-"));return new Promise((function(n,i){if(null!==e.getAttribute(r))return n();var o=ge(e.children).filter((function(e){return e.getAttribute(I)===t}))[0],s=A.getComputedStyle(e,t),c=s.getPropertyValue("font-family").match(Q),l=s.getPropertyValue("font-weight"),u=s.getPropertyValue("content");if(o&&!c)return e.removeChild(o),n();if(c&&"none"!==u&&""!==u){var f=s.getPropertyValue("content"),p=~["Sharp"].indexOf(c[2])?V:U,d=~["Solid","Regular","Light","Thin","Duotone","Brands","Kit"].indexOf(c[2])?G[p][c[2].toLowerCase()]:ee[p][l],h=function(e){var t=e.replace(rr,""),r=function(e,t){var r,n=e.length,a=e.charCodeAt(t);return a>=55296&&a<=56319&&n>t+1&&(r=e.charCodeAt(t+1))>=56320&&r<=57343?1024*(a-55296)+r-56320+65536:a}(t,0),n=r>=nr[0]&&r<=nr[1],a=2===t.length&&t[0]===t[1];return{value:ze(a?t[0]:t),isSecondary:n||a}}(f),m=h.value,g=h.isSecondary,b=c[0].startsWith("FontAwesome"),y=Ke(d,m),v=y;if(b){var k=function(e){var t=He[e],r=Ke("fas",e);return t||(r?{prefix:"fas",iconName:r}:null)||{prefix:null,iconName:null}}(m);k.iconName&&k.prefix&&(y=k.iconName,d=k.prefix)}if(!y||g||o&&o.getAttribute(M)===d&&o.getAttribute(T)===v)n();else{e.setAttribute(r,v),o&&e.removeChild(o);var _={iconName:null,title:null,titleId:null,prefix:null,transform:de,symbol:!1,mask:{iconName:null,prefix:null,rest:[]},maskId:null,extra:{classes:[],styles:{},attributes:{}}},w=_.extra;w.attributes[I]=t,Ct(y,d).then((function(i){var o=kt(a(a({},_),{},{icons:{main:i,mask:rt()},prefix:d,iconName:v,extra:w,watchable:!0})),s=C.createElementNS("http://www.w3.org/2000/svg","svg");"::before"===t?e.insertBefore(s,e.firstChild):e.appendChild(s),s.outerHTML=o.map((function(e){return Oe(e)})).join("\n"),e.removeAttribute(r),n()})).catch(i)}}else n()}))}function ir(e){return Promise.all([ar(e,"::before"),ar(e,"::after")])}function or(e){return e.parentNode!==document.head&&!~$.indexOf(e.tagName.toUpperCase())&&!e.getAttribute(I)&&(!e.parentNode||"svg"!==e.parentNode.tagName)}function sr(e){if(D)return new Promise((function(t,r){var n=ge(e.querySelectorAll("*")).filter(or).map(ir),a=Ot.begin("searchPseudoElements");Rt(),Promise.all(n).then((function(){a(),$t(),t()})).catch((function(){a(),$t(),r()}))}))}var cr=!1,lr=function(e){return e.toLowerCase().split(" ").reduce((function(e,t){var r=t.toLowerCase().split("-"),n=r[0],a=r.slice(1).join("-");if(n&&"h"===a)return e.flipX=!0,e;if(n&&"v"===a)return e.flipY=!0,e;if(a=parseFloat(a),isNaN(a))return e;switch(n){case"grow":e.size=e.size+a;break;case"shrink":e.size=e.size-a;break;case"left":e.x=e.x-a;break;case"right":e.x=e.x+a;break;case"up":e.y=e.y-a;break;case"down":e.y=e.y+a;break;case"rotate":e.rotate=e.rotate+a}return e}),{size:16,x:0,y:0,flipX:!1,flipY:!1,rotate:0})},ur={mixout:function(){return{parse:{transform:function(e){return lr(e)}}}},hooks:function(){return{parseNodeAttributes:function(e,t){var r=t.getAttribute("data-fa-transform");return r&&(e.transform=lr(r)),e}}},provides:function(e){e.generateAbstractTransformGrouping=function(e){var t=e.main,r=e.transform,n=e.containerWidth,i=e.iconWidth,o={transform:"translate(".concat(n/2," 256)")},s="translate(".concat(32*r.x,", ").concat(32*r.y,") "),c="scale(".concat(r.size/16*(r.flipX?-1:1),", ").concat(r.size/16*(r.flipY?-1:1),") "),l="rotate(".concat(r.rotate," 0 0)"),u={outer:o,inner:{transform:"".concat(s," ").concat(c," ").concat(l)},path:{transform:"translate(".concat(i/2*-1," -256)")}};return{tag:"g",attributes:a({},u.outer),children:[{tag:"g",attributes:a({},u.inner),children:[{tag:t.icon.tag,children:t.icon.children,attributes:a(a({},t.icon.attributes),u.path)}]}]}}}},fr={x:0,y:0,width:"100%",height:"100%"};function pr(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return e.attributes&&(e.attributes.fill||t)&&(e.attributes.fill="black"),e}var dr={hooks:function(){return{parseNodeAttributes:function(e,t){var r=t.getAttribute("data-fa-mask"),n=r?it(r.split(" ").map((function(e){return e.trim()}))):rt();return n.prefix||(n.prefix=tt()),e.mask=n,e.maskId=t.getAttribute("data-fa-mask-id"),e}}},provides:function(e){e.generateAbstractMask=function(e){var t,r=e.children,n=e.attributes,i=e.main,o=e.mask,s=e.maskId,c=e.transform,l=i.width,u=i.icon,f=o.width,p=o.icon,d=function(e){var t=e.transform,r=e.containerWidth,n=e.iconWidth,a={transform:"translate(".concat(r/2," 256)")},i="translate(".concat(32*t.x,", ").concat(32*t.y,") "),o="scale(".concat(t.size/16*(t.flipX?-1:1),", ").concat(t.size/16*(t.flipY?-1:1),") "),s="rotate(".concat(t.rotate," 0 0)");return{outer:a,inner:{transform:"".concat(i," ").concat(o," ").concat(s)},path:{transform:"translate(".concat(n/2*-1," -256)")}}}({transform:c,containerWidth:f,iconWidth:l}),h={tag:"rect",attributes:a(a({},fr),{},{fill:"white"})},m=u.children?{children:u.children.map(pr)}:{},g={tag:"g",attributes:a({},d.inner),children:[pr(a({tag:u.tag,attributes:a(a({},u.attributes),d.path)},m))]},b={tag:"g",attributes:a({},d.outer),children:[g]},y="mask-".concat(s||me()),v="clip-".concat(s||me()),k={tag:"mask",attributes:a(a({},fr),{},{id:y,maskUnits:"userSpaceOnUse",maskContentUnits:"userSpaceOnUse"}),children:[h,b]},_={tag:"defs",children:[{tag:"clipPath",attributes:{id:v},children:(t=p,"g"===t.tag?t.children:[t])},k]};return r.push(_,{tag:"rect",attributes:a({fill:"currentColor","clip-path":"url(#".concat(v,")"),mask:"url(#".concat(y,")")},fr)}),{children:r,attributes:n}}}},hr={provides:function(e){var t=!1;A.matchMedia&&(t=A.matchMedia("(prefers-reduced-motion: reduce)").matches),e.missingIconAbstract=function(){var e=[],r={fill:"currentColor"},n={attributeType:"XML",repeatCount:"indefinite",dur:"2s"};e.push({tag:"path",attributes:a(a({},r),{},{d:"M156.5,447.7l-12.6,29.5c-18.7-9.5-35.9-21.2-51.5-34.9l22.7-22.7C127.6,430.5,141.5,440,156.5,447.7z M40.6,272H8.5 c1.4,21.2,5.4,41.7,11.7,61.1L50,321.2C45.1,305.5,41.8,289,40.6,272z M40.6,240c1.4-18.8,5.2-37,11.1-54.1l-29.5-12.6 C14.7,194.3,10,216.7,8.5,240H40.6z M64.3,156.5c7.8-14.9,17.2-28.8,28.1-41.5L69.7,92.3c-13.7,15.6-25.5,32.8-34.9,51.5 L64.3,156.5z M397,419.6c-13.9,12-29.4,22.3-46.1,30.4l11.9,29.8c20.7-9.9,39.8-22.6,56.9-37.6L397,419.6z M115,92.4 c13.9-12,29.4-22.3,46.1-30.4l-11.9-29.8c-20.7,9.9-39.8,22.6-56.8,37.6L115,92.4z M447.7,355.5c-7.8,14.9-17.2,28.8-28.1,41.5 l22.7,22.7c13.7-15.6,25.5-32.9,34.9-51.5L447.7,355.5z M471.4,272c-1.4,18.8-5.2,37-11.1,54.1l29.5,12.6 c7.5-21.1,12.2-43.5,13.6-66.8H471.4z M321.2,462c-15.7,5-32.2,8.2-49.2,9.4v32.1c21.2-1.4,41.7-5.4,61.1-11.7L321.2,462z M240,471.4c-18.8-1.4-37-5.2-54.1-11.1l-12.6,29.5c21.1,7.5,43.5,12.2,66.8,13.6V471.4z M462,190.8c5,15.7,8.2,32.2,9.4,49.2h32.1 c-1.4-21.2-5.4-41.7-11.7-61.1L462,190.8z M92.4,397c-12-13.9-22.3-29.4-30.4-46.1l-29.8,11.9c9.9,20.7,22.6,39.8,37.6,56.9 L92.4,397z M272,40.6c18.8,1.4,36.9,5.2,54.1,11.1l12.6-29.5C317.7,14.7,295.3,10,272,8.5V40.6z M190.8,50 c15.7-5,32.2-8.2,49.2-9.4V8.5c-21.2,1.4-41.7,5.4-61.1,11.7L190.8,50z M442.3,92.3L419.6,115c12,13.9,22.3,29.4,30.5,46.1 l29.8-11.9C470,128.5,457.3,109.4,442.3,92.3z M397,92.4l22.7-22.7c-15.6-13.7-32.8-25.5-51.5-34.9l-12.6,29.5 C370.4,72.1,384.4,81.5,397,92.4z"})});var i=a(a({},n),{},{attributeName:"opacity"}),o={tag:"circle",attributes:a(a({},r),{},{cx:"256",cy:"364",r:"28"}),children:[]};return t||o.children.push({tag:"animate",attributes:a(a({},n),{},{attributeName:"r",values:"28;14;28;28;14;28;"})},{tag:"animate",attributes:a(a({},i),{},{values:"1;0;1;1;0;1;"})}),e.push(o),e.push({tag:"path",attributes:a(a({},r),{},{opacity:"1",d:"M263.7,312h-16c-6.6,0-12-5.4-12-12c0-71,77.4-63.9,77.4-107.8c0-20-17.8-40.2-57.4-40.2c-29.1,0-44.3,9.6-59.2,28.7 c-3.9,5-11.1,6-16.2,2.4l-13.1-9.2c-5.6-3.9-6.9-11.8-2.6-17.2c21.2-27.2,46.4-44.7,91.2-44.7c52.3,0,97.4,29.8,97.4,80.2 c0,67.6-77.4,63.5-77.4,107.8C275.7,306.6,270.3,312,263.7,312z"}),children:t?[]:[{tag:"animate",attributes:a(a({},i),{},{values:"1;0;0;0;0;1;"})}]}),t||e.push({tag:"path",attributes:a(a({},r),{},{opacity:"0",d:"M232.5,134.5l7,168c0.3,6.4,5.6,11.5,12,11.5h9c6.4,0,11.7-5.1,12-11.5l7-168c0.3-6.8-5.2-12.5-12-12.5h-23 C237.7,122,232.2,127.7,232.5,134.5z"}),children:[{tag:"animate",attributes:a(a({},i),{},{values:"0;0;1;1;0;0;"})}]}),{tag:"g",attributes:{class:"missing"},children:e}}}};!function(e,t){var r=t.mixoutsTo;st=e,ct={},Object.keys(lt).forEach((function(e){-1===ut.indexOf(e)&&delete lt[e]})),st.forEach((function(e){var t=e.mixout?e.mixout():{};if(Object.keys(t).forEach((function(e){"function"===typeof t[e]&&(r[e]=t[e]),"object"===i(t[e])&&Object.keys(t[e]).forEach((function(n){r[e]||(r[e]={}),r[e][n]=t[e][n]}))})),e.hooks){var n=e.hooks();Object.keys(n).forEach((function(e){ct[e]||(ct[e]=[]),ct[e].push(n[e])}))}e.provides&&e.provides(lt)}))}([Ce,Kt,Qt,er,tr,{hooks:function(){return{mutationObserverCallbacks:function(e){return e.pseudoElementsCallback=sr,e}}},provides:function(e){e.pseudoElements2svg=function(e){var t=e.node,r=void 0===t?C:t;ue.searchPseudoElements&&sr(r)}}},{mixout:function(){return{dom:{unwatch:function(){Rt(),cr=!0}}}},hooks:function(){return{bootstrap:function(){Ut(ft("mutationObserverCallbacks",{}))},noAuto:function(){Bt&&Bt.disconnect()},watch:function(e){var t=e.observeMutationsRoot;cr?$t():Ut(ft("mutationObserverCallbacks",{observeMutationsRoot:t}))}}}},ur,dr,hr,{hooks:function(){return{parseNodeAttributes:function(e,t){var r=t.getAttribute("data-fa-symbol"),n=null!==r&&(""===r||r);return e.symbol=n,e}}}}],{mixoutsTo:bt});var mr=bt.parse,gr=bt.icon,br=r(5556),yr=r.n(br),vr=r(96540);function kr(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function _r(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}function Cr(e){return function(e){if(Array.isArray(e))return Er(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return Er(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return Er(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function Er(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r0||!Array.isArray(t)&&t?xr({},e,t):{}}var Fr=vr.forwardRef((function(e,t){var r=e.icon,n=e.mask,a=e.symbol,i=e.className,o=e.title,s=e.titleId,c=e.maskId,l=Or(r),u=qr("classes",[].concat(Cr(function(e){var t,r=e.beat,n=e.fade,a=e.beatFade,i=e.bounce,o=e.shake,s=e.flash,c=e.spin,l=e.spinPulse,u=e.spinReverse,f=e.pulse,p=e.fixedWidth,d=e.inverse,h=e.border,m=e.listItem,g=e.flip,b=e.size,y=e.rotation,v=e.pull,k=(xr(t={"fa-beat":r,"fa-fade":n,"fa-beat-fade":a,"fa-bounce":i,"fa-shake":o,"fa-flash":s,"fa-spin":c,"fa-spin-reverse":u,"fa-spin-pulse":l,"fa-pulse":f,"fa-fw":p,"fa-inverse":d,"fa-border":h,"fa-li":m,"fa-flip":!0===g,"fa-flip-horizontal":"horizontal"===g||"both"===g,"fa-flip-vertical":"vertical"===g||"both"===g},"fa-".concat(b),"undefined"!==typeof b&&null!==b),xr(t,"fa-rotate-".concat(y),"undefined"!==typeof y&&null!==y&&0!==y),xr(t,"fa-pull-".concat(v),"undefined"!==typeof v&&null!==v),xr(t,"fa-swap-opacity",e.swapOpacity),t);return Object.keys(k).map((function(e){return k[e]?e:null})).filter((function(e){return e}))}(e)),Cr(i.split(" ")))),f=qr("transform","string"===typeof e.transform?mr.transform(e.transform):e.transform),p=qr("mask",Or(n)),d=gr(l,_r(_r(_r(_r({},u),f),p),{},{symbol:a,title:o,titleId:s,maskId:c}));if(!d)return function(){var e;!jr&&console&&"function"===typeof console.error&&(e=console).error.apply(e,arguments)}("Could not find icon",l),null;var h=d.abstract,m={ref:t};return Object.keys(e).forEach((function(t){Fr.defaultProps.hasOwnProperty(t)||(m[t]=e[t])})),zr(h[0],m)}));Fr.displayName="FontAwesomeIcon",Fr.propTypes={beat:yr().bool,border:yr().bool,beatFade:yr().bool,bounce:yr().bool,className:yr().string,fade:yr().bool,flash:yr().bool,mask:yr().oneOfType([yr().object,yr().array,yr().string]),maskId:yr().string,fixedWidth:yr().bool,inverse:yr().bool,flip:yr().oneOf([!0,!1,"horizontal","vertical","both"]),icon:yr().oneOfType([yr().object,yr().array,yr().string]),listItem:yr().bool,pull:yr().oneOf(["right","left"]),pulse:yr().bool,rotation:yr().oneOf([0,90,180,270]),shake:yr().bool,size:yr().oneOf(["2xs","xs","sm","lg","xl","2xl","1x","2x","3x","4x","5x","6x","7x","8x","9x","10x"]),spin:yr().bool,spinPulse:yr().bool,spinReverse:yr().bool,symbol:yr().oneOfType([yr().bool,yr().string]),title:yr().string,titleId:yr().string,transform:yr().oneOfType([yr().string,yr().object]),swapOpacity:yr().bool},Fr.defaultProps={border:!1,className:"",mask:null,maskId:null,fixedWidth:!1,inverse:!1,flip:!1,icon:null,listItem:!1,pull:null,pulse:!1,rotation:null,size:null,spin:!1,spinPulse:!1,spinReverse:!1,beat:!1,fade:!1,beatFade:!1,bounce:!1,shake:!1,symbol:!1,title:"",titleId:null,transform:null,swapOpacity:!1};var zr=function e(t,r){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};if("string"===typeof r)return r;var a=(r.children||[]).map((function(r){return e(t,r)})),i=Object.keys(r.attributes||{}).reduce((function(e,t){var n=r.attributes[t];switch(t){case"class":e.attrs.className=n,delete r.attributes.class;break;case"style":e.attrs.style=n.split(";").map((function(e){return e.trim()})).filter((function(e){return e})).reduce((function(e,t){var r,n=t.indexOf(":"),a=Sr(t.slice(0,n)),i=t.slice(n+1).trim();return a.startsWith("webkit")?e[(r=a,r.charAt(0).toUpperCase()+r.slice(1))]=i:e[a]=i,e}),{});break;default:0===t.indexOf("aria-")||0===t.indexOf("data-")?e.attrs[t.toLowerCase()]=n:e.attrs[Sr(t)]=n}return e}),{attrs:{}}),o=n.style,s=void 0===o?{}:o,c=Ar(n,Dr);return i.attrs.style=_r(_r({},i.attrs.style),s),t.apply(void 0,[r.tag,_r(_r({},i.attrs),c)].concat(Cr(a)))}.bind(null,vr.createElement)},13083:(e,t,r)=>{r.d(t,{Ay:()=>$r});var n=Object.create,a=Object.defineProperty,i=Object.getOwnPropertyDescriptor,o=Object.getOwnPropertyNames,s=Object.getPrototypeOf,c=Object.prototype.hasOwnProperty,l=e=>a(e,"__esModule",{value:!0}),u=(e,t)=>function(){return t||(0,e[Object.keys(e)[0]])((t={exports:{}}).exports,t),t.exports},f=(e,t)=>{for(var r in l(e),t)a(e,r,{get:t[r],enumerable:!0})},p=e=>((e,t,r)=>{if(t&&"object"===typeof t||"function"===typeof t)for(let n of o(t))c.call(e,n)||"default"===n||a(e,n,{get:()=>t[n],enumerable:!(r=i(t,n))||r.enumerable});return e})(l(a(null!=e?n(s(e)):{},"default",e&&e.__esModule&&"default"in e?{get:()=>e.default,enumerable:!0}:{value:e,enumerable:!0})),e),d=u({"src/grammar/tag.js"(e,t){"use strict";function r(e,t,n,a){this.message=e,this.expected=t,this.found=n,this.location=a,this.name="SyntaxError","function"===typeof Error.captureStackTrace&&Error.captureStackTrace(this,r)}!function(e,t){function r(){this.constructor=e}r.prototype=t.prototype,e.prototype=new r}(r,Error),r.buildMessage=function(e,t,r){var n,a={literal:function(e){return'"'+o(e.text)+'"'},class:function(e){var t=e.parts.map((function(e){return Array.isArray(e)?s(e[0])+"-"+s(e[1]):s(e)}));return"["+(e.inverted?"^":"")+t+"]"},any:function(){return"any character"},end:function(){return"end of input"},other:function(e){return e.description},not:function(e){return"not "+c(e.expected)}};function i(e){return e.charCodeAt(0).toString(16).toUpperCase()}function o(e){return e.replace(/\\/g,"\\\\").replace(/"/g,'\\"').replace(/\0/g,"\\0").replace(/\t/g,"\\t").replace(/\n/g,"\\n").replace(/\r/g,"\\r").replace(/[\x00-\x0F]/g,(function(e){return"\\x0"+i(e)})).replace(/[\x10-\x1F\x7F-\x9F]/g,(function(e){return"\\x"+i(e)}))}function s(e){return e.replace(/\\/g,"\\\\").replace(/\]/g,"\\]").replace(/\^/g,"\\^").replace(/-/g,"\\-").replace(/\0/g,"\\0").replace(/\t/g,"\\t").replace(/\n/g,"\\n").replace(/\r/g,"\\r").replace(/[\x00-\x0F]/g,(function(e){return"\\x0"+i(e)})).replace(/[\x10-\x1F\x7F-\x9F]/g,(function(e){return"\\x"+i(e)}))}function c(e){return a[e.type](e)}return"Expected "+function(e){var t,r,n=e.map(c);if(n.sort(),n.length>0){for(t=1,r=1;t0;function Ie(e,r){var n={};Le&&(n.filename=t.filename);var a=ze(e);n.start={offset:e,line:a.line,column:a.column};var i=ze(r);return n.end={offset:r,line:i.line,column:i.column},n}function Ne(e){var t=je[je.length-1];Eet.pos&&(t.pos=Ee,t.variants=[]),t.variants.push(e))}function Me(e,t,n){return new r(r.buildMessage(e,t,n),e,t,n)}function Te(){var t;return(t=function(){var e,t;e=Ee,(t=Ye())===a&&(t=Ue());t!==a&&(Se=e,t=J(t));return e=t,e}())===a&&(t=function(){var e,t,r,n;if(e=Ee,(t=Re())!==a){for(r=[],n=nt();n!==a;)r.push(n),n=nt();Se=e,e=X(t)}else Ee=e,e=a;return e}())===a&&(t=function(){var t,r,n,i,o,c,l,u=function(e){0===Oe&&Ne(e)};if(t=Ee,(r=Pe())!==a){for(n=[],i=nt();i!==a;)n.push(i),i=nt();for(i=Ee,(o=We())!==a?((c=nt())===a&&(c=null),Se=i,i=K(r,o)):(Ee=i,i=a),i===a&&(i=null),(o=Re())===a&&(o=null),c=[],l=nt();l!==a;)c.push(l),l=nt();u(O),47===e.charCodeAt(Ee)?(l=s,Ee++):l=a,l===a&&(l=null),Se=t,t=Q(r,i,o,l)}else Ee=t,t=a;return t}(),t===a&&(t=function(){var t,r,n,i=function(e){0===Oe&&Ne(e)};t=Ee,i(O),47===e.charCodeAt(Ee)?(r=s,Ee++):r=a;r!==a&&(n=Pe())!==a?(Se=t,t=ee(n)):(Ee=t,t=a);return t}())),t}function Pe(){var e,t;return t=q,0===Oe&&Ne(t),Oe++,e=rt(),Oe--,e}function Re(){var e,t,r,n;if(e=Ee,(t=Be())!==a){for(r=[],n=$e();n!==a;)r.push(n),n=$e();Se=e,e=te(t,r)}else Ee=e,e=a;return e}function $e(){var e,t,r;if(e=Ee,t=[],(r=nt())!==a)for(;r!==a;)t.push(r),r=nt();else t=a;return t!==a&&(r=Be())!==a?(Se=e,e=re(r)):(Ee=e,e=a),e}function Be(){var t,r;return t=Ee,r=function(){var t,r,n,i=function(e){0===Oe&&Ne(e)};i(z),Oe++,t=Ee,35===e.charCodeAt(Ee)?(r=l,Ee++):r=a;r!==a&&(n=rt())!==a?(Se=t,t=se(n)):(Ee=t,t=a);return Oe--,t}(),r!==a&&(Se=t,r=ne(r)),(t=r)===a&&(t=Ee,r=function(){var t,r,n,i=function(e){0===Oe&&Ne(e)};i(F),Oe++,t=Ee,46===e.charCodeAt(Ee)?(r=c,Ee++):r=a;r!==a&&(n=rt())!==a?(Se=t,t=oe(n)):(Ee=t,t=a);return Oe--,t}(),r!==a&&(Se=t,r=ae(r)),(t=r)===a&&(t=Ee,r=function(){var t,r,n,i,o=function(e){0===Oe&&Ne(e)};t=Ee,r=rt(),r!==a?(o(L),61===e.charCodeAt(Ee)?(n=u,Ee++):n=a,n!==a&&(i=We())!==a?(Se=t,t=ce(r,i)):(Ee=t,t=a)):(Ee=t,t=a);return t}(),r!==a&&(Se=t,r=ie(r)),t=r)),t}function Ue(){var t,r,n,i,o,s,c,l,u=function(e){0===Oe&&Ne(e)};if(t=Ee,(r=rt())!==a)if(u(I),40===e.charCodeAt(Ee)?(n=f,Ee++):n=a,n!==a){for(i=[],o=nt();o!==a;)i.push(o),o=nt();for(o=Ee,(s=Ve())===a&&(s=null),c=[],l=Ze();l!==a;)c.push(l),l=Ze();Se=o,o=le(r,s,c),u(N),41===e.charCodeAt(Ee)?(s=p,Ee++):s=a,s!==a?(Se=t,t=ue(r,o)):(Ee=t,t=a)}else Ee=t,t=a;else Ee=t,t=a;return t}function Ve(){var t,r,n,i,o;return t=Ee,r=Ee,(n=rt())!==a?(o=L,0===Oe&&Ne(o),61===e.charCodeAt(Ee)?(i=u,Ee++):i=a,i!==a?(Se=r,r=fe(n)):(Ee=r,r=a)):(Ee=r,r=a),r===a&&(r=null),(n=We())!==a?(Se=t,t=pe(r,n)):(Ee=t,t=a),t}function Ze(){var t,r,n,i,o,s;for(t=Ee,r=[],n=nt();n!==a;)r.push(n),n=nt();if(s=M,0===Oe&&Ne(s),44===e.charCodeAt(Ee)?(n=d,Ee++):n=a,n!==a){for(i=[],o=nt();o!==a;)i.push(o),o=nt();(o=Ve())!==a?(Se=t,t=de(o)):(Ee=t,t=a)}else Ee=t,t=a;return t}function He(){var t,r,n,i;for(t=Ee,r=[],n=nt();n!==a;)r.push(n),n=nt();return i=M,0===Oe&&Ne(i),44===e.charCodeAt(Ee)?(n=d,Ee++):n=a,n!==a?t=r=[r,n]:(Ee=t,t=a),t===a&&(t=null),t}function Ye(){var t,r,n,i,o,s;if(s=T,0===Oe&&Ne(s),Oe++,t=Ee,C.test(e.charAt(Ee))?(r=e.charAt(Ee),Ee++):r=a,r!==a)if((n=rt())!==a){for(i=[],o=Ge();o!==a;)i.push(o),o=Ge();Se=t,t=he(r,n,i)}else Ee=t,t=a;else Ee=t,t=a;return Oe--,t}function Ge(){var t,r,n,i;return t=Ee,46===e.charCodeAt(Ee)?(r=c,Ee++):r=a,r!==a&&(n=rt())!==a?(Se=t,t=fe(n)):(Ee=t,t=a),t===a&&(t=Ee,91===e.charCodeAt(Ee)?(r=h,Ee++):r=a,r!==a?((n=Qe())===a&&(n=et()),n!==a?(93===e.charCodeAt(Ee)?(i=m,Ee++):i=a,i!==a?(Se=t,t=de(n)):(Ee=t,t=a)):(Ee=t,t=a)):(Ee=t,t=a)),t}function We(){var t;return t=function(){var t,r,n=function(e){0===Oe&&Ne(e)};n(P),Oe++,t=Ee,e.substr(Ee,4)===g?(r=g,Ee+=4):r=a;r!==a&&(Se=t,r=me());return t=r,Oe--,t}(),t===a&&(t=function(){var t,r,n=function(e){0===Oe&&Ne(e)};n(R),Oe++,t=Ee,e.substr(Ee,4)===b?(r=b,Ee+=4):r=a;r!==a&&(Se=t,r=ge());t=r,t===a&&(t=Ee,e.substr(Ee,5)===y?(r=y,Ee+=5):r=a,r!==a&&(Se=t,r=be()),t=r);return Oe--,t}(),t===a&&(t=et())===a&&(t=Qe())===a&&(t=function(){var t,r,n,i,o,s,c,l=function(e){0===Oe&&Ne(e)};t=Ee,l($),91===e.charCodeAt(Ee)?(r=h,Ee++):r=a;if(r!==a){for(n=[],i=nt();i!==a;)n.push(i),i=nt();if(i=Ee,(o=We())!==a){for(s=[],c=Je();c!==a;)s.push(c),c=Je();c=He(),Se=i,i=ye(o,s)}else Ee=i,i=a;for(i===a&&(i=null),o=[],s=nt();s!==a;)o.push(s),s=nt();l(B),93===e.charCodeAt(Ee)?(s=m,Ee++):s=a,s!==a?(Se=t,t=ve(i)):(Ee=t,t=a)}else Ee=t,t=a;return t}(),t===a&&(t=function(){var t,r,n,i,o,s,c,l=function(e){0===Oe&&Ne(e)};t=Ee,l(U),123===e.charCodeAt(Ee)?(r=v,Ee++):r=a;if(r!==a){for(n=[],i=nt();i!==a;)n.push(i),i=nt();if(i=Ee,(o=Ke())!==a){for(s=[],c=Xe();c!==a;)s.push(c),c=Xe();c=He(),Se=i,i=ke(o,s)}else Ee=i,i=a;for(i===a&&(i=null),o=[],s=nt();s!==a;)o.push(s),s=nt();l(V),125===e.charCodeAt(Ee)?(s=k,Ee++):s=a,s!==a?(Se=t,t=_e(i)):(Ee=t,t=a)}else Ee=t,t=a;return t}(),t===a&&(t=Ue())===a&&(t=Ye())))),t}function Je(){var t,r,n,i,o,s;for(t=Ee,r=[],n=nt();n!==a;)r.push(n),n=nt();if(s=M,0===Oe&&Ne(s),44===e.charCodeAt(Ee)?(n=d,Ee++):n=a,n!==a){for(i=[],o=nt();o!==a;)i.push(o),o=nt();(o=We())!==a?(Se=t,t=de(o)):(Ee=t,t=a)}else Ee=t,t=a;return t}function Xe(){var t,r,n,i,o,s;for(t=Ee,r=[],n=nt();n!==a;)r.push(n),n=nt();if(s=M,0===Oe&&Ne(s),44===e.charCodeAt(Ee)?(n=d,Ee++):n=a,n!==a){for(i=[],o=nt();o!==a;)i.push(o),o=nt();(o=Ke())!==a?(Se=t,t=re(o)):(Ee=t,t=a)}else Ee=t,t=a;return t}function Ke(){var t,r,n,i,o,s;if(t=Ee,(r=rt())===a&&(r=et()),r!==a)if(s=Z,0===Oe&&Ne(s),58===e.charCodeAt(Ee)?(n=_,Ee++):n=a,n!==a){for(i=[],o=nt();o!==a;)i.push(o),o=nt();(o=We())!==a?(Se=t,t=we(r,o)):(Ee=t,t=a)}else Ee=t,t=a;else Ee=t,t=a;return t}function Qe(){var t,r,n,i,o,s,l,u;if(u=H,0===Oe&&Ne(u),Oe++,t=Ee,45===e.charCodeAt(Ee)?(r=w,Ee++):r=a,r===a&&(r=null),n=[],E.test(e.charAt(Ee))?(i=e.charAt(Ee),Ee++):i=a,i!==a)for(;i!==a;)n.push(i),E.test(e.charAt(Ee))?(i=e.charAt(Ee),Ee++):i=a;else n=a;if(n!==a){if(i=Ee,46===e.charCodeAt(Ee)?(o=c,Ee++):o=a,o!==a){if(s=[],E.test(e.charAt(Ee))?(l=e.charAt(Ee),Ee++):l=a,l!==a)for(;l!==a;)s.push(l),E.test(e.charAt(Ee))?(l=e.charAt(Ee),Ee++):l=a;else s=a;s!==a?i=o=[o,s]:(Ee=i,i=a)}else Ee=i,i=a;i===a&&(i=null),Se=t,t=xe()}else Ee=t,t=a;return Oe--,t}function et(){var t,r,n,i,o;if(o=Y,0===Oe&&Ne(o),Oe++,t=Ee,34===e.charCodeAt(Ee)?(r=x,Ee++):r=a,r!==a){for(n=[],i=tt();i!==a;)n.push(i),i=tt();34===e.charCodeAt(Ee)?(i=x,Ee++):i=a,i!==a?(Se=t,t=Ae(n)):(Ee=t,t=a)}else Ee=t,t=a;return Oe--,t}function tt(){var t;return S.test(e.charAt(Ee))?(t=e.charAt(Ee),Ee++):t=a,t===a&&(t=function(){var t,r,n;t=Ee,92===e.charCodeAt(Ee)?(r=A,Ee++):r=a;r!==a?(34===e.charCodeAt(Ee)?(n=x,Ee++):n=a,n===a&&(92===e.charCodeAt(Ee)?(n=A,Ee++):n=a),n!==a?(Se=t,t=Ce(n)):(Ee=t,t=a)):(Ee=t,t=a);return t}()),t}function rt(){var t,r,n,i;if(i=G,0===Oe&&Ne(i),Oe++,t=Ee,r=[],D.test(e.charAt(Ee))?(n=e.charAt(Ee),Ee++):n=a,n!==a)for(;n!==a;)r.push(n),D.test(e.charAt(Ee))?(n=e.charAt(Ee),Ee++):n=a;else r=a;return t=r!==a?e.substring(t,Ee):r,Oe--,t}function nt(){var t,r;return r=W,0===Oe&&Ne(r),Oe++,j.test(e.charAt(Ee))?(t=e.charAt(Ee),Ee++):t=a,Oe--,t}const{Variable:at,Function:it}=t;if(je.push({pos:Ee,variants:[]}),(n=o())!==a&&Ee===e.length)return n;throw n!==a&&Ee",GT:">",Gt:"\u226b",gtdot:"\u22d7",gtlPar:"\u2995",gtquest:"\u2a7c",gtrapprox:"\u2a86",gtrarr:"\u2978",gtrdot:"\u22d7",gtreqless:"\u22db",gtreqqless:"\u2a8c",gtrless:"\u2277",gtrsim:"\u2273",gvertneqq:"\u2269\ufe00",gvnE:"\u2269\ufe00",Hacek:"\u02c7",hairsp:"\u200a",half:"\xbd",hamilt:"\u210b",HARDcy:"\u042a",hardcy:"\u044a",harrcir:"\u2948",harr:"\u2194",hArr:"\u21d4",harrw:"\u21ad",Hat:"^",hbar:"\u210f",Hcirc:"\u0124",hcirc:"\u0125",hearts:"\u2665",heartsuit:"\u2665",hellip:"\u2026",hercon:"\u22b9",hfr:"\ud835\udd25",Hfr:"\u210c",HilbertSpace:"\u210b",hksearow:"\u2925",hkswarow:"\u2926",hoarr:"\u21ff",homtht:"\u223b",hookleftarrow:"\u21a9",hookrightarrow:"\u21aa",hopf:"\ud835\udd59",Hopf:"\u210d",horbar:"\u2015",HorizontalLine:"\u2500",hscr:"\ud835\udcbd",Hscr:"\u210b",hslash:"\u210f",Hstrok:"\u0126",hstrok:"\u0127",HumpDownHump:"\u224e",HumpEqual:"\u224f",hybull:"\u2043",hyphen:"\u2010",Iacute:"\xcd",iacute:"\xed",ic:"\u2063",Icirc:"\xce",icirc:"\xee",Icy:"\u0418",icy:"\u0438",Idot:"\u0130",IEcy:"\u0415",iecy:"\u0435",iexcl:"\xa1",iff:"\u21d4",ifr:"\ud835\udd26",Ifr:"\u2111",Igrave:"\xcc",igrave:"\xec",ii:"\u2148",iiiint:"\u2a0c",iiint:"\u222d",iinfin:"\u29dc",iiota:"\u2129",IJlig:"\u0132",ijlig:"\u0133",Imacr:"\u012a",imacr:"\u012b",image:"\u2111",ImaginaryI:"\u2148",imagline:"\u2110",imagpart:"\u2111",imath:"\u0131",Im:"\u2111",imof:"\u22b7",imped:"\u01b5",Implies:"\u21d2",incare:"\u2105",in:"\u2208",infin:"\u221e",infintie:"\u29dd",inodot:"\u0131",intcal:"\u22ba",int:"\u222b",Int:"\u222c",integers:"\u2124",Integral:"\u222b",intercal:"\u22ba",Intersection:"\u22c2",intlarhk:"\u2a17",intprod:"\u2a3c",InvisibleComma:"\u2063",InvisibleTimes:"\u2062",IOcy:"\u0401",iocy:"\u0451",Iogon:"\u012e",iogon:"\u012f",Iopf:"\ud835\udd40",iopf:"\ud835\udd5a",Iota:"\u0399",iota:"\u03b9",iprod:"\u2a3c",iquest:"\xbf",iscr:"\ud835\udcbe",Iscr:"\u2110",isin:"\u2208",isindot:"\u22f5",isinE:"\u22f9",isins:"\u22f4",isinsv:"\u22f3",isinv:"\u2208",it:"\u2062",Itilde:"\u0128",itilde:"\u0129",Iukcy:"\u0406",iukcy:"\u0456",Iuml:"\xcf",iuml:"\xef",Jcirc:"\u0134",jcirc:"\u0135",Jcy:"\u0419",jcy:"\u0439",Jfr:"\ud835\udd0d",jfr:"\ud835\udd27",jmath:"\u0237",Jopf:"\ud835\udd41",jopf:"\ud835\udd5b",Jscr:"\ud835\udca5",jscr:"\ud835\udcbf",Jsercy:"\u0408",jsercy:"\u0458",Jukcy:"\u0404",jukcy:"\u0454",Kappa:"\u039a",kappa:"\u03ba",kappav:"\u03f0",Kcedil:"\u0136",kcedil:"\u0137",Kcy:"\u041a",kcy:"\u043a",Kfr:"\ud835\udd0e",kfr:"\ud835\udd28",kgreen:"\u0138",KHcy:"\u0425",khcy:"\u0445",KJcy:"\u040c",kjcy:"\u045c",Kopf:"\ud835\udd42",kopf:"\ud835\udd5c",Kscr:"\ud835\udca6",kscr:"\ud835\udcc0",lAarr:"\u21da",Lacute:"\u0139",lacute:"\u013a",laemptyv:"\u29b4",lagran:"\u2112",Lambda:"\u039b",lambda:"\u03bb",lang:"\u27e8",Lang:"\u27ea",langd:"\u2991",langle:"\u27e8",lap:"\u2a85",Laplacetrf:"\u2112",laquo:"\xab",larrb:"\u21e4",larrbfs:"\u291f",larr:"\u2190",Larr:"\u219e",lArr:"\u21d0",larrfs:"\u291d",larrhk:"\u21a9",larrlp:"\u21ab",larrpl:"\u2939",larrsim:"\u2973",larrtl:"\u21a2",latail:"\u2919",lAtail:"\u291b",lat:"\u2aab",late:"\u2aad",lates:"\u2aad\ufe00",lbarr:"\u290c",lBarr:"\u290e",lbbrk:"\u2772",lbrace:"{",lbrack:"[",lbrke:"\u298b",lbrksld:"\u298f",lbrkslu:"\u298d",Lcaron:"\u013d",lcaron:"\u013e",Lcedil:"\u013b",lcedil:"\u013c",lceil:"\u2308",lcub:"{",Lcy:"\u041b",lcy:"\u043b",ldca:"\u2936",ldquo:"\u201c",ldquor:"\u201e",ldrdhar:"\u2967",ldrushar:"\u294b",ldsh:"\u21b2",le:"\u2264",lE:"\u2266",LeftAngleBracket:"\u27e8",LeftArrowBar:"\u21e4",leftarrow:"\u2190",LeftArrow:"\u2190",Leftarrow:"\u21d0",LeftArrowRightArrow:"\u21c6",leftarrowtail:"\u21a2",LeftCeiling:"\u2308",LeftDoubleBracket:"\u27e6",LeftDownTeeVector:"\u2961",LeftDownVectorBar:"\u2959",LeftDownVector:"\u21c3",LeftFloor:"\u230a",leftharpoondown:"\u21bd",leftharpoonup:"\u21bc",leftleftarrows:"\u21c7",leftrightarrow:"\u2194",LeftRightArrow:"\u2194",Leftrightarrow:"\u21d4",leftrightarrows:"\u21c6",leftrightharpoons:"\u21cb",leftrightsquigarrow:"\u21ad",LeftRightVector:"\u294e",LeftTeeArrow:"\u21a4",LeftTee:"\u22a3",LeftTeeVector:"\u295a",leftthreetimes:"\u22cb",LeftTriangleBar:"\u29cf",LeftTriangle:"\u22b2",LeftTriangleEqual:"\u22b4",LeftUpDownVector:"\u2951",LeftUpTeeVector:"\u2960",LeftUpVectorBar:"\u2958",LeftUpVector:"\u21bf",LeftVectorBar:"\u2952",LeftVector:"\u21bc",lEg:"\u2a8b",leg:"\u22da",leq:"\u2264",leqq:"\u2266",leqslant:"\u2a7d",lescc:"\u2aa8",les:"\u2a7d",lesdot:"\u2a7f",lesdoto:"\u2a81",lesdotor:"\u2a83",lesg:"\u22da\ufe00",lesges:"\u2a93",lessapprox:"\u2a85",lessdot:"\u22d6",lesseqgtr:"\u22da",lesseqqgtr:"\u2a8b",LessEqualGreater:"\u22da",LessFullEqual:"\u2266",LessGreater:"\u2276",lessgtr:"\u2276",LessLess:"\u2aa1",lesssim:"\u2272",LessSlantEqual:"\u2a7d",LessTilde:"\u2272",lfisht:"\u297c",lfloor:"\u230a",Lfr:"\ud835\udd0f",lfr:"\ud835\udd29",lg:"\u2276",lgE:"\u2a91",lHar:"\u2962",lhard:"\u21bd",lharu:"\u21bc",lharul:"\u296a",lhblk:"\u2584",LJcy:"\u0409",ljcy:"\u0459",llarr:"\u21c7",ll:"\u226a",Ll:"\u22d8",llcorner:"\u231e",Lleftarrow:"\u21da",llhard:"\u296b",lltri:"\u25fa",Lmidot:"\u013f",lmidot:"\u0140",lmoustache:"\u23b0",lmoust:"\u23b0",lnap:"\u2a89",lnapprox:"\u2a89",lne:"\u2a87",lnE:"\u2268",lneq:"\u2a87",lneqq:"\u2268",lnsim:"\u22e6",loang:"\u27ec",loarr:"\u21fd",lobrk:"\u27e6",longleftarrow:"\u27f5",LongLeftArrow:"\u27f5",Longleftarrow:"\u27f8",longleftrightarrow:"\u27f7",LongLeftRightArrow:"\u27f7",Longleftrightarrow:"\u27fa",longmapsto:"\u27fc",longrightarrow:"\u27f6",LongRightArrow:"\u27f6",Longrightarrow:"\u27f9",looparrowleft:"\u21ab",looparrowright:"\u21ac",lopar:"\u2985",Lopf:"\ud835\udd43",lopf:"\ud835\udd5d",loplus:"\u2a2d",lotimes:"\u2a34",lowast:"\u2217",lowbar:"_",LowerLeftArrow:"\u2199",LowerRightArrow:"\u2198",loz:"\u25ca",lozenge:"\u25ca",lozf:"\u29eb",lpar:"(",lparlt:"\u2993",lrarr:"\u21c6",lrcorner:"\u231f",lrhar:"\u21cb",lrhard:"\u296d",lrm:"\u200e",lrtri:"\u22bf",lsaquo:"\u2039",lscr:"\ud835\udcc1",Lscr:"\u2112",lsh:"\u21b0",Lsh:"\u21b0",lsim:"\u2272",lsime:"\u2a8d",lsimg:"\u2a8f",lsqb:"[",lsquo:"\u2018",lsquor:"\u201a",Lstrok:"\u0141",lstrok:"\u0142",ltcc:"\u2aa6",ltcir:"\u2a79",lt:"<",LT:"<",Lt:"\u226a",ltdot:"\u22d6",lthree:"\u22cb",ltimes:"\u22c9",ltlarr:"\u2976",ltquest:"\u2a7b",ltri:"\u25c3",ltrie:"\u22b4",ltrif:"\u25c2",ltrPar:"\u2996",lurdshar:"\u294a",luruhar:"\u2966",lvertneqq:"\u2268\ufe00",lvnE:"\u2268\ufe00",macr:"\xaf",male:"\u2642",malt:"\u2720",maltese:"\u2720",Map:"\u2905",map:"\u21a6",mapsto:"\u21a6",mapstodown:"\u21a7",mapstoleft:"\u21a4",mapstoup:"\u21a5",marker:"\u25ae",mcomma:"\u2a29",Mcy:"\u041c",mcy:"\u043c",mdash:"\u2014",mDDot:"\u223a",measuredangle:"\u2221",MediumSpace:"\u205f",Mellintrf:"\u2133",Mfr:"\ud835\udd10",mfr:"\ud835\udd2a",mho:"\u2127",micro:"\xb5",midast:"*",midcir:"\u2af0",mid:"\u2223",middot:"\xb7",minusb:"\u229f",minus:"\u2212",minusd:"\u2238",minusdu:"\u2a2a",MinusPlus:"\u2213",mlcp:"\u2adb",mldr:"\u2026",mnplus:"\u2213",models:"\u22a7",Mopf:"\ud835\udd44",mopf:"\ud835\udd5e",mp:"\u2213",mscr:"\ud835\udcc2",Mscr:"\u2133",mstpos:"\u223e",Mu:"\u039c",mu:"\u03bc",multimap:"\u22b8",mumap:"\u22b8",nabla:"\u2207",Nacute:"\u0143",nacute:"\u0144",nang:"\u2220\u20d2",nap:"\u2249",napE:"\u2a70\u0338",napid:"\u224b\u0338",napos:"\u0149",napprox:"\u2249",natural:"\u266e",naturals:"\u2115",natur:"\u266e",nbsp:"\xa0",nbump:"\u224e\u0338",nbumpe:"\u224f\u0338",ncap:"\u2a43",Ncaron:"\u0147",ncaron:"\u0148",Ncedil:"\u0145",ncedil:"\u0146",ncong:"\u2247",ncongdot:"\u2a6d\u0338",ncup:"\u2a42",Ncy:"\u041d",ncy:"\u043d",ndash:"\u2013",nearhk:"\u2924",nearr:"\u2197",neArr:"\u21d7",nearrow:"\u2197",ne:"\u2260",nedot:"\u2250\u0338",NegativeMediumSpace:"\u200b",NegativeThickSpace:"\u200b",NegativeThinSpace:"\u200b",NegativeVeryThinSpace:"\u200b",nequiv:"\u2262",nesear:"\u2928",nesim:"\u2242\u0338",NestedGreaterGreater:"\u226b",NestedLessLess:"\u226a",NewLine:"\n",nexist:"\u2204",nexists:"\u2204",Nfr:"\ud835\udd11",nfr:"\ud835\udd2b",ngE:"\u2267\u0338",nge:"\u2271",ngeq:"\u2271",ngeqq:"\u2267\u0338",ngeqslant:"\u2a7e\u0338",nges:"\u2a7e\u0338",nGg:"\u22d9\u0338",ngsim:"\u2275",nGt:"\u226b\u20d2",ngt:"\u226f",ngtr:"\u226f",nGtv:"\u226b\u0338",nharr:"\u21ae",nhArr:"\u21ce",nhpar:"\u2af2",ni:"\u220b",nis:"\u22fc",nisd:"\u22fa",niv:"\u220b",NJcy:"\u040a",njcy:"\u045a",nlarr:"\u219a",nlArr:"\u21cd",nldr:"\u2025",nlE:"\u2266\u0338",nle:"\u2270",nleftarrow:"\u219a",nLeftarrow:"\u21cd",nleftrightarrow:"\u21ae",nLeftrightarrow:"\u21ce",nleq:"\u2270",nleqq:"\u2266\u0338",nleqslant:"\u2a7d\u0338",nles:"\u2a7d\u0338",nless:"\u226e",nLl:"\u22d8\u0338",nlsim:"\u2274",nLt:"\u226a\u20d2",nlt:"\u226e",nltri:"\u22ea",nltrie:"\u22ec",nLtv:"\u226a\u0338",nmid:"\u2224",NoBreak:"\u2060",NonBreakingSpace:"\xa0",nopf:"\ud835\udd5f",Nopf:"\u2115",Not:"\u2aec",not:"\xac",NotCongruent:"\u2262",NotCupCap:"\u226d",NotDoubleVerticalBar:"\u2226",NotElement:"\u2209",NotEqual:"\u2260",NotEqualTilde:"\u2242\u0338",NotExists:"\u2204",NotGreater:"\u226f",NotGreaterEqual:"\u2271",NotGreaterFullEqual:"\u2267\u0338",NotGreaterGreater:"\u226b\u0338",NotGreaterLess:"\u2279",NotGreaterSlantEqual:"\u2a7e\u0338",NotGreaterTilde:"\u2275",NotHumpDownHump:"\u224e\u0338",NotHumpEqual:"\u224f\u0338",notin:"\u2209",notindot:"\u22f5\u0338",notinE:"\u22f9\u0338",notinva:"\u2209",notinvb:"\u22f7",notinvc:"\u22f6",NotLeftTriangleBar:"\u29cf\u0338",NotLeftTriangle:"\u22ea",NotLeftTriangleEqual:"\u22ec",NotLess:"\u226e",NotLessEqual:"\u2270",NotLessGreater:"\u2278",NotLessLess:"\u226a\u0338",NotLessSlantEqual:"\u2a7d\u0338",NotLessTilde:"\u2274",NotNestedGreaterGreater:"\u2aa2\u0338",NotNestedLessLess:"\u2aa1\u0338",notni:"\u220c",notniva:"\u220c",notnivb:"\u22fe",notnivc:"\u22fd",NotPrecedes:"\u2280",NotPrecedesEqual:"\u2aaf\u0338",NotPrecedesSlantEqual:"\u22e0",NotReverseElement:"\u220c",NotRightTriangleBar:"\u29d0\u0338",NotRightTriangle:"\u22eb",NotRightTriangleEqual:"\u22ed",NotSquareSubset:"\u228f\u0338",NotSquareSubsetEqual:"\u22e2",NotSquareSuperset:"\u2290\u0338",NotSquareSupersetEqual:"\u22e3",NotSubset:"\u2282\u20d2",NotSubsetEqual:"\u2288",NotSucceeds:"\u2281",NotSucceedsEqual:"\u2ab0\u0338",NotSucceedsSlantEqual:"\u22e1",NotSucceedsTilde:"\u227f\u0338",NotSuperset:"\u2283\u20d2",NotSupersetEqual:"\u2289",NotTilde:"\u2241",NotTildeEqual:"\u2244",NotTildeFullEqual:"\u2247",NotTildeTilde:"\u2249",NotVerticalBar:"\u2224",nparallel:"\u2226",npar:"\u2226",nparsl:"\u2afd\u20e5",npart:"\u2202\u0338",npolint:"\u2a14",npr:"\u2280",nprcue:"\u22e0",nprec:"\u2280",npreceq:"\u2aaf\u0338",npre:"\u2aaf\u0338",nrarrc:"\u2933\u0338",nrarr:"\u219b",nrArr:"\u21cf",nrarrw:"\u219d\u0338",nrightarrow:"\u219b",nRightarrow:"\u21cf",nrtri:"\u22eb",nrtrie:"\u22ed",nsc:"\u2281",nsccue:"\u22e1",nsce:"\u2ab0\u0338",Nscr:"\ud835\udca9",nscr:"\ud835\udcc3",nshortmid:"\u2224",nshortparallel:"\u2226",nsim:"\u2241",nsime:"\u2244",nsimeq:"\u2244",nsmid:"\u2224",nspar:"\u2226",nsqsube:"\u22e2",nsqsupe:"\u22e3",nsub:"\u2284",nsubE:"\u2ac5\u0338",nsube:"\u2288",nsubset:"\u2282\u20d2",nsubseteq:"\u2288",nsubseteqq:"\u2ac5\u0338",nsucc:"\u2281",nsucceq:"\u2ab0\u0338",nsup:"\u2285",nsupE:"\u2ac6\u0338",nsupe:"\u2289",nsupset:"\u2283\u20d2",nsupseteq:"\u2289",nsupseteqq:"\u2ac6\u0338",ntgl:"\u2279",Ntilde:"\xd1",ntilde:"\xf1",ntlg:"\u2278",ntriangleleft:"\u22ea",ntrianglelefteq:"\u22ec",ntriangleright:"\u22eb",ntrianglerighteq:"\u22ed",Nu:"\u039d",nu:"\u03bd",num:"#",numero:"\u2116",numsp:"\u2007",nvap:"\u224d\u20d2",nvdash:"\u22ac",nvDash:"\u22ad",nVdash:"\u22ae",nVDash:"\u22af",nvge:"\u2265\u20d2",nvgt:">\u20d2",nvHarr:"\u2904",nvinfin:"\u29de",nvlArr:"\u2902",nvle:"\u2264\u20d2",nvlt:"<\u20d2",nvltrie:"\u22b4\u20d2",nvrArr:"\u2903",nvrtrie:"\u22b5\u20d2",nvsim:"\u223c\u20d2",nwarhk:"\u2923",nwarr:"\u2196",nwArr:"\u21d6",nwarrow:"\u2196",nwnear:"\u2927",Oacute:"\xd3",oacute:"\xf3",oast:"\u229b",Ocirc:"\xd4",ocirc:"\xf4",ocir:"\u229a",Ocy:"\u041e",ocy:"\u043e",odash:"\u229d",Odblac:"\u0150",odblac:"\u0151",odiv:"\u2a38",odot:"\u2299",odsold:"\u29bc",OElig:"\u0152",oelig:"\u0153",ofcir:"\u29bf",Ofr:"\ud835\udd12",ofr:"\ud835\udd2c",ogon:"\u02db",Ograve:"\xd2",ograve:"\xf2",ogt:"\u29c1",ohbar:"\u29b5",ohm:"\u03a9",oint:"\u222e",olarr:"\u21ba",olcir:"\u29be",olcross:"\u29bb",oline:"\u203e",olt:"\u29c0",Omacr:"\u014c",omacr:"\u014d",Omega:"\u03a9",omega:"\u03c9",Omicron:"\u039f",omicron:"\u03bf",omid:"\u29b6",ominus:"\u2296",Oopf:"\ud835\udd46",oopf:"\ud835\udd60",opar:"\u29b7",OpenCurlyDoubleQuote:"\u201c",OpenCurlyQuote:"\u2018",operp:"\u29b9",oplus:"\u2295",orarr:"\u21bb",Or:"\u2a54",or:"\u2228",ord:"\u2a5d",order:"\u2134",orderof:"\u2134",ordf:"\xaa",ordm:"\xba",origof:"\u22b6",oror:"\u2a56",orslope:"\u2a57",orv:"\u2a5b",oS:"\u24c8",Oscr:"\ud835\udcaa",oscr:"\u2134",Oslash:"\xd8",oslash:"\xf8",osol:"\u2298",Otilde:"\xd5",otilde:"\xf5",otimesas:"\u2a36",Otimes:"\u2a37",otimes:"\u2297",Ouml:"\xd6",ouml:"\xf6",ovbar:"\u233d",OverBar:"\u203e",OverBrace:"\u23de",OverBracket:"\u23b4",OverParenthesis:"\u23dc",para:"\xb6",parallel:"\u2225",par:"\u2225",parsim:"\u2af3",parsl:"\u2afd",part:"\u2202",PartialD:"\u2202",Pcy:"\u041f",pcy:"\u043f",percnt:"%",period:".",permil:"\u2030",perp:"\u22a5",pertenk:"\u2031",Pfr:"\ud835\udd13",pfr:"\ud835\udd2d",Phi:"\u03a6",phi:"\u03c6",phiv:"\u03d5",phmmat:"\u2133",phone:"\u260e",Pi:"\u03a0",pi:"\u03c0",pitchfork:"\u22d4",piv:"\u03d6",planck:"\u210f",planckh:"\u210e",plankv:"\u210f",plusacir:"\u2a23",plusb:"\u229e",pluscir:"\u2a22",plus:"+",plusdo:"\u2214",plusdu:"\u2a25",pluse:"\u2a72",PlusMinus:"\xb1",plusmn:"\xb1",plussim:"\u2a26",plustwo:"\u2a27",pm:"\xb1",Poincareplane:"\u210c",pointint:"\u2a15",popf:"\ud835\udd61",Popf:"\u2119",pound:"\xa3",prap:"\u2ab7",Pr:"\u2abb",pr:"\u227a",prcue:"\u227c",precapprox:"\u2ab7",prec:"\u227a",preccurlyeq:"\u227c",Precedes:"\u227a",PrecedesEqual:"\u2aaf",PrecedesSlantEqual:"\u227c",PrecedesTilde:"\u227e",preceq:"\u2aaf",precnapprox:"\u2ab9",precneqq:"\u2ab5",precnsim:"\u22e8",pre:"\u2aaf",prE:"\u2ab3",precsim:"\u227e",prime:"\u2032",Prime:"\u2033",primes:"\u2119",prnap:"\u2ab9",prnE:"\u2ab5",prnsim:"\u22e8",prod:"\u220f",Product:"\u220f",profalar:"\u232e",profline:"\u2312",profsurf:"\u2313",prop:"\u221d",Proportional:"\u221d",Proportion:"\u2237",propto:"\u221d",prsim:"\u227e",prurel:"\u22b0",Pscr:"\ud835\udcab",pscr:"\ud835\udcc5",Psi:"\u03a8",psi:"\u03c8",puncsp:"\u2008",Qfr:"\ud835\udd14",qfr:"\ud835\udd2e",qint:"\u2a0c",qopf:"\ud835\udd62",Qopf:"\u211a",qprime:"\u2057",Qscr:"\ud835\udcac",qscr:"\ud835\udcc6",quaternions:"\u210d",quatint:"\u2a16",quest:"?",questeq:"\u225f",quot:'"',QUOT:'"',rAarr:"\u21db",race:"\u223d\u0331",Racute:"\u0154",racute:"\u0155",radic:"\u221a",raemptyv:"\u29b3",rang:"\u27e9",Rang:"\u27eb",rangd:"\u2992",range:"\u29a5",rangle:"\u27e9",raquo:"\xbb",rarrap:"\u2975",rarrb:"\u21e5",rarrbfs:"\u2920",rarrc:"\u2933",rarr:"\u2192",Rarr:"\u21a0",rArr:"\u21d2",rarrfs:"\u291e",rarrhk:"\u21aa",rarrlp:"\u21ac",rarrpl:"\u2945",rarrsim:"\u2974",Rarrtl:"\u2916",rarrtl:"\u21a3",rarrw:"\u219d",ratail:"\u291a",rAtail:"\u291c",ratio:"\u2236",rationals:"\u211a",rbarr:"\u290d",rBarr:"\u290f",RBarr:"\u2910",rbbrk:"\u2773",rbrace:"}",rbrack:"]",rbrke:"\u298c",rbrksld:"\u298e",rbrkslu:"\u2990",Rcaron:"\u0158",rcaron:"\u0159",Rcedil:"\u0156",rcedil:"\u0157",rceil:"\u2309",rcub:"}",Rcy:"\u0420",rcy:"\u0440",rdca:"\u2937",rdldhar:"\u2969",rdquo:"\u201d",rdquor:"\u201d",rdsh:"\u21b3",real:"\u211c",realine:"\u211b",realpart:"\u211c",reals:"\u211d",Re:"\u211c",rect:"\u25ad",reg:"\xae",REG:"\xae",ReverseElement:"\u220b",ReverseEquilibrium:"\u21cb",ReverseUpEquilibrium:"\u296f",rfisht:"\u297d",rfloor:"\u230b",rfr:"\ud835\udd2f",Rfr:"\u211c",rHar:"\u2964",rhard:"\u21c1",rharu:"\u21c0",rharul:"\u296c",Rho:"\u03a1",rho:"\u03c1",rhov:"\u03f1",RightAngleBracket:"\u27e9",RightArrowBar:"\u21e5",rightarrow:"\u2192",RightArrow:"\u2192",Rightarrow:"\u21d2",RightArrowLeftArrow:"\u21c4",rightarrowtail:"\u21a3",RightCeiling:"\u2309",RightDoubleBracket:"\u27e7",RightDownTeeVector:"\u295d",RightDownVectorBar:"\u2955",RightDownVector:"\u21c2",RightFloor:"\u230b",rightharpoondown:"\u21c1",rightharpoonup:"\u21c0",rightleftarrows:"\u21c4",rightleftharpoons:"\u21cc",rightrightarrows:"\u21c9",rightsquigarrow:"\u219d",RightTeeArrow:"\u21a6",RightTee:"\u22a2",RightTeeVector:"\u295b",rightthreetimes:"\u22cc",RightTriangleBar:"\u29d0",RightTriangle:"\u22b3",RightTriangleEqual:"\u22b5",RightUpDownVector:"\u294f",RightUpTeeVector:"\u295c",RightUpVectorBar:"\u2954",RightUpVector:"\u21be",RightVectorBar:"\u2953",RightVector:"\u21c0",ring:"\u02da",risingdotseq:"\u2253",rlarr:"\u21c4",rlhar:"\u21cc",rlm:"\u200f",rmoustache:"\u23b1",rmoust:"\u23b1",rnmid:"\u2aee",roang:"\u27ed",roarr:"\u21fe",robrk:"\u27e7",ropar:"\u2986",ropf:"\ud835\udd63",Ropf:"\u211d",roplus:"\u2a2e",rotimes:"\u2a35",RoundImplies:"\u2970",rpar:")",rpargt:"\u2994",rppolint:"\u2a12",rrarr:"\u21c9",Rrightarrow:"\u21db",rsaquo:"\u203a",rscr:"\ud835\udcc7",Rscr:"\u211b",rsh:"\u21b1",Rsh:"\u21b1",rsqb:"]",rsquo:"\u2019",rsquor:"\u2019",rthree:"\u22cc",rtimes:"\u22ca",rtri:"\u25b9",rtrie:"\u22b5",rtrif:"\u25b8",rtriltri:"\u29ce",RuleDelayed:"\u29f4",ruluhar:"\u2968",rx:"\u211e",Sacute:"\u015a",sacute:"\u015b",sbquo:"\u201a",scap:"\u2ab8",Scaron:"\u0160",scaron:"\u0161",Sc:"\u2abc",sc:"\u227b",sccue:"\u227d",sce:"\u2ab0",scE:"\u2ab4",Scedil:"\u015e",scedil:"\u015f",Scirc:"\u015c",scirc:"\u015d",scnap:"\u2aba",scnE:"\u2ab6",scnsim:"\u22e9",scpolint:"\u2a13",scsim:"\u227f",Scy:"\u0421",scy:"\u0441",sdotb:"\u22a1",sdot:"\u22c5",sdote:"\u2a66",searhk:"\u2925",searr:"\u2198",seArr:"\u21d8",searrow:"\u2198",sect:"\xa7",semi:";",seswar:"\u2929",setminus:"\u2216",setmn:"\u2216",sext:"\u2736",Sfr:"\ud835\udd16",sfr:"\ud835\udd30",sfrown:"\u2322",sharp:"\u266f",SHCHcy:"\u0429",shchcy:"\u0449",SHcy:"\u0428",shcy:"\u0448",ShortDownArrow:"\u2193",ShortLeftArrow:"\u2190",shortmid:"\u2223",shortparallel:"\u2225",ShortRightArrow:"\u2192",ShortUpArrow:"\u2191",shy:"\xad",Sigma:"\u03a3",sigma:"\u03c3",sigmaf:"\u03c2",sigmav:"\u03c2",sim:"\u223c",simdot:"\u2a6a",sime:"\u2243",simeq:"\u2243",simg:"\u2a9e",simgE:"\u2aa0",siml:"\u2a9d",simlE:"\u2a9f",simne:"\u2246",simplus:"\u2a24",simrarr:"\u2972",slarr:"\u2190",SmallCircle:"\u2218",smallsetminus:"\u2216",smashp:"\u2a33",smeparsl:"\u29e4",smid:"\u2223",smile:"\u2323",smt:"\u2aaa",smte:"\u2aac",smtes:"\u2aac\ufe00",SOFTcy:"\u042c",softcy:"\u044c",solbar:"\u233f",solb:"\u29c4",sol:"/",Sopf:"\ud835\udd4a",sopf:"\ud835\udd64",spades:"\u2660",spadesuit:"\u2660",spar:"\u2225",sqcap:"\u2293",sqcaps:"\u2293\ufe00",sqcup:"\u2294",sqcups:"\u2294\ufe00",Sqrt:"\u221a",sqsub:"\u228f",sqsube:"\u2291",sqsubset:"\u228f",sqsubseteq:"\u2291",sqsup:"\u2290",sqsupe:"\u2292",sqsupset:"\u2290",sqsupseteq:"\u2292",square:"\u25a1",Square:"\u25a1",SquareIntersection:"\u2293",SquareSubset:"\u228f",SquareSubsetEqual:"\u2291",SquareSuperset:"\u2290",SquareSupersetEqual:"\u2292",SquareUnion:"\u2294",squarf:"\u25aa",squ:"\u25a1",squf:"\u25aa",srarr:"\u2192",Sscr:"\ud835\udcae",sscr:"\ud835\udcc8",ssetmn:"\u2216",ssmile:"\u2323",sstarf:"\u22c6",Star:"\u22c6",star:"\u2606",starf:"\u2605",straightepsilon:"\u03f5",straightphi:"\u03d5",strns:"\xaf",sub:"\u2282",Sub:"\u22d0",subdot:"\u2abd",subE:"\u2ac5",sube:"\u2286",subedot:"\u2ac3",submult:"\u2ac1",subnE:"\u2acb",subne:"\u228a",subplus:"\u2abf",subrarr:"\u2979",subset:"\u2282",Subset:"\u22d0",subseteq:"\u2286",subseteqq:"\u2ac5",SubsetEqual:"\u2286",subsetneq:"\u228a",subsetneqq:"\u2acb",subsim:"\u2ac7",subsub:"\u2ad5",subsup:"\u2ad3",succapprox:"\u2ab8",succ:"\u227b",succcurlyeq:"\u227d",Succeeds:"\u227b",SucceedsEqual:"\u2ab0",SucceedsSlantEqual:"\u227d",SucceedsTilde:"\u227f",succeq:"\u2ab0",succnapprox:"\u2aba",succneqq:"\u2ab6",succnsim:"\u22e9",succsim:"\u227f",SuchThat:"\u220b",sum:"\u2211",Sum:"\u2211",sung:"\u266a",sup1:"\xb9",sup2:"\xb2",sup3:"\xb3",sup:"\u2283",Sup:"\u22d1",supdot:"\u2abe",supdsub:"\u2ad8",supE:"\u2ac6",supe:"\u2287",supedot:"\u2ac4",Superset:"\u2283",SupersetEqual:"\u2287",suphsol:"\u27c9",suphsub:"\u2ad7",suplarr:"\u297b",supmult:"\u2ac2",supnE:"\u2acc",supne:"\u228b",supplus:"\u2ac0",supset:"\u2283",Supset:"\u22d1",supseteq:"\u2287",supseteqq:"\u2ac6",supsetneq:"\u228b",supsetneqq:"\u2acc",supsim:"\u2ac8",supsub:"\u2ad4",supsup:"\u2ad6",swarhk:"\u2926",swarr:"\u2199",swArr:"\u21d9",swarrow:"\u2199",swnwar:"\u292a",szlig:"\xdf",Tab:"\t",target:"\u2316",Tau:"\u03a4",tau:"\u03c4",tbrk:"\u23b4",Tcaron:"\u0164",tcaron:"\u0165",Tcedil:"\u0162",tcedil:"\u0163",Tcy:"\u0422",tcy:"\u0442",tdot:"\u20db",telrec:"\u2315",Tfr:"\ud835\udd17",tfr:"\ud835\udd31",there4:"\u2234",therefore:"\u2234",Therefore:"\u2234",Theta:"\u0398",theta:"\u03b8",thetasym:"\u03d1",thetav:"\u03d1",thickapprox:"\u2248",thicksim:"\u223c",ThickSpace:"\u205f\u200a",ThinSpace:"\u2009",thinsp:"\u2009",thkap:"\u2248",thksim:"\u223c",THORN:"\xde",thorn:"\xfe",tilde:"\u02dc",Tilde:"\u223c",TildeEqual:"\u2243",TildeFullEqual:"\u2245",TildeTilde:"\u2248",timesbar:"\u2a31",timesb:"\u22a0",times:"\xd7",timesd:"\u2a30",tint:"\u222d",toea:"\u2928",topbot:"\u2336",topcir:"\u2af1",top:"\u22a4",Topf:"\ud835\udd4b",topf:"\ud835\udd65",topfork:"\u2ada",tosa:"\u2929",tprime:"\u2034",trade:"\u2122",TRADE:"\u2122",triangle:"\u25b5",triangledown:"\u25bf",triangleleft:"\u25c3",trianglelefteq:"\u22b4",triangleq:"\u225c",triangleright:"\u25b9",trianglerighteq:"\u22b5",tridot:"\u25ec",trie:"\u225c",triminus:"\u2a3a",TripleDot:"\u20db",triplus:"\u2a39",trisb:"\u29cd",tritime:"\u2a3b",trpezium:"\u23e2",Tscr:"\ud835\udcaf",tscr:"\ud835\udcc9",TScy:"\u0426",tscy:"\u0446",TSHcy:"\u040b",tshcy:"\u045b",Tstrok:"\u0166",tstrok:"\u0167",twixt:"\u226c",twoheadleftarrow:"\u219e",twoheadrightarrow:"\u21a0",Uacute:"\xda",uacute:"\xfa",uarr:"\u2191",Uarr:"\u219f",uArr:"\u21d1",Uarrocir:"\u2949",Ubrcy:"\u040e",ubrcy:"\u045e",Ubreve:"\u016c",ubreve:"\u016d",Ucirc:"\xdb",ucirc:"\xfb",Ucy:"\u0423",ucy:"\u0443",udarr:"\u21c5",Udblac:"\u0170",udblac:"\u0171",udhar:"\u296e",ufisht:"\u297e",Ufr:"\ud835\udd18",ufr:"\ud835\udd32",Ugrave:"\xd9",ugrave:"\xf9",uHar:"\u2963",uharl:"\u21bf",uharr:"\u21be",uhblk:"\u2580",ulcorn:"\u231c",ulcorner:"\u231c",ulcrop:"\u230f",ultri:"\u25f8",Umacr:"\u016a",umacr:"\u016b",uml:"\xa8",UnderBar:"_",UnderBrace:"\u23df",UnderBracket:"\u23b5",UnderParenthesis:"\u23dd",Union:"\u22c3",UnionPlus:"\u228e",Uogon:"\u0172",uogon:"\u0173",Uopf:"\ud835\udd4c",uopf:"\ud835\udd66",UpArrowBar:"\u2912",uparrow:"\u2191",UpArrow:"\u2191",Uparrow:"\u21d1",UpArrowDownArrow:"\u21c5",updownarrow:"\u2195",UpDownArrow:"\u2195",Updownarrow:"\u21d5",UpEquilibrium:"\u296e",upharpoonleft:"\u21bf",upharpoonright:"\u21be",uplus:"\u228e",UpperLeftArrow:"\u2196",UpperRightArrow:"\u2197",upsi:"\u03c5",Upsi:"\u03d2",upsih:"\u03d2",Upsilon:"\u03a5",upsilon:"\u03c5",UpTeeArrow:"\u21a5",UpTee:"\u22a5",upuparrows:"\u21c8",urcorn:"\u231d",urcorner:"\u231d",urcrop:"\u230e",Uring:"\u016e",uring:"\u016f",urtri:"\u25f9",Uscr:"\ud835\udcb0",uscr:"\ud835\udcca",utdot:"\u22f0",Utilde:"\u0168",utilde:"\u0169",utri:"\u25b5",utrif:"\u25b4",uuarr:"\u21c8",Uuml:"\xdc",uuml:"\xfc",uwangle:"\u29a7",vangrt:"\u299c",varepsilon:"\u03f5",varkappa:"\u03f0",varnothing:"\u2205",varphi:"\u03d5",varpi:"\u03d6",varpropto:"\u221d",varr:"\u2195",vArr:"\u21d5",varrho:"\u03f1",varsigma:"\u03c2",varsubsetneq:"\u228a\ufe00",varsubsetneqq:"\u2acb\ufe00",varsupsetneq:"\u228b\ufe00",varsupsetneqq:"\u2acc\ufe00",vartheta:"\u03d1",vartriangleleft:"\u22b2",vartriangleright:"\u22b3",vBar:"\u2ae8",Vbar:"\u2aeb",vBarv:"\u2ae9",Vcy:"\u0412",vcy:"\u0432",vdash:"\u22a2",vDash:"\u22a8",Vdash:"\u22a9",VDash:"\u22ab",Vdashl:"\u2ae6",veebar:"\u22bb",vee:"\u2228",Vee:"\u22c1",veeeq:"\u225a",vellip:"\u22ee",verbar:"|",Verbar:"\u2016",vert:"|",Vert:"\u2016",VerticalBar:"\u2223",VerticalLine:"|",VerticalSeparator:"\u2758",VerticalTilde:"\u2240",VeryThinSpace:"\u200a",Vfr:"\ud835\udd19",vfr:"\ud835\udd33",vltri:"\u22b2",vnsub:"\u2282\u20d2",vnsup:"\u2283\u20d2",Vopf:"\ud835\udd4d",vopf:"\ud835\udd67",vprop:"\u221d",vrtri:"\u22b3",Vscr:"\ud835\udcb1",vscr:"\ud835\udccb",vsubnE:"\u2acb\ufe00",vsubne:"\u228a\ufe00",vsupnE:"\u2acc\ufe00",vsupne:"\u228b\ufe00",Vvdash:"\u22aa",vzigzag:"\u299a",Wcirc:"\u0174",wcirc:"\u0175",wedbar:"\u2a5f",wedge:"\u2227",Wedge:"\u22c0",wedgeq:"\u2259",weierp:"\u2118",Wfr:"\ud835\udd1a",wfr:"\ud835\udd34",Wopf:"\ud835\udd4e",wopf:"\ud835\udd68",wp:"\u2118",wr:"\u2240",wreath:"\u2240",Wscr:"\ud835\udcb2",wscr:"\ud835\udccc",xcap:"\u22c2",xcirc:"\u25ef",xcup:"\u22c3",xdtri:"\u25bd",Xfr:"\ud835\udd1b",xfr:"\ud835\udd35",xharr:"\u27f7",xhArr:"\u27fa",Xi:"\u039e",xi:"\u03be",xlarr:"\u27f5",xlArr:"\u27f8",xmap:"\u27fc",xnis:"\u22fb",xodot:"\u2a00",Xopf:"\ud835\udd4f",xopf:"\ud835\udd69",xoplus:"\u2a01",xotime:"\u2a02",xrarr:"\u27f6",xrArr:"\u27f9",Xscr:"\ud835\udcb3",xscr:"\ud835\udccd",xsqcup:"\u2a06",xuplus:"\u2a04",xutri:"\u25b3",xvee:"\u22c1",xwedge:"\u22c0",Yacute:"\xdd",yacute:"\xfd",YAcy:"\u042f",yacy:"\u044f",Ycirc:"\u0176",ycirc:"\u0177",Ycy:"\u042b",ycy:"\u044b",yen:"\xa5",Yfr:"\ud835\udd1c",yfr:"\ud835\udd36",YIcy:"\u0407",yicy:"\u0457",Yopf:"\ud835\udd50",yopf:"\ud835\udd6a",Yscr:"\ud835\udcb4",yscr:"\ud835\udcce",YUcy:"\u042e",yucy:"\u044e",yuml:"\xff",Yuml:"\u0178",Zacute:"\u0179",zacute:"\u017a",Zcaron:"\u017d",zcaron:"\u017e",Zcy:"\u0417",zcy:"\u0437",Zdot:"\u017b",zdot:"\u017c",zeetrf:"\u2128",ZeroWidthSpace:"\u200b",Zeta:"\u0396",zeta:"\u03b6",zfr:"\ud835\udd37",Zfr:"\u2128",ZHcy:"\u0416",zhcy:"\u0436",zigrarr:"\u21dd",zopf:"\ud835\udd6b",Zopf:"\u2124",Zscr:"\ud835\udcb5",zscr:"\ud835\udccf",zwj:"\u200d",zwnj:"\u200c"}}}),m=u({"node_modules/markdown-it/lib/common/entities.js"(e,t){"use strict";t.exports=h()}}),g=u({"node_modules/uc.micro/categories/P/regex.js"(e,t){t.exports=/[!-#%-\*,-\/:;\?@\[-\]_\{\}\xA1\xA7\xAB\xB6\xB7\xBB\xBF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061E\u061F\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E\u085E\u0964\u0965\u0970\u09FD\u0A76\u0AF0\u0C84\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB\u1360-\u1368\u1400\u166D\u166E\u169B\u169C\u16EB-\u16ED\u1735\u1736\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F\u1CC0-\u1CC7\u1CD3\u2010-\u2027\u2030-\u2043\u2045-\u2051\u2053-\u205E\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E4E\u3001-\u3003\u3008-\u3011\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]|\uD800[\uDD00-\uDD02\uDF9F\uDFD0]|\uD801\uDD6F|\uD802[\uDC57\uDD1F\uDD3F\uDE50-\uDE58\uDE7F\uDEF0-\uDEF6\uDF39-\uDF3F\uDF99-\uDF9C]|\uD803[\uDF55-\uDF59]|\uD804[\uDC47-\uDC4D\uDCBB\uDCBC\uDCBE-\uDCC1\uDD40-\uDD43\uDD74\uDD75\uDDC5-\uDDC8\uDDCD\uDDDB\uDDDD-\uDDDF\uDE38-\uDE3D\uDEA9]|\uD805[\uDC4B-\uDC4F\uDC5B\uDC5D\uDCC6\uDDC1-\uDDD7\uDE41-\uDE43\uDE60-\uDE6C\uDF3C-\uDF3E]|\uD806[\uDC3B\uDE3F-\uDE46\uDE9A-\uDE9C\uDE9E-\uDEA2]|\uD807[\uDC41-\uDC45\uDC70\uDC71\uDEF7\uDEF8]|\uD809[\uDC70-\uDC74]|\uD81A[\uDE6E\uDE6F\uDEF5\uDF37-\uDF3B\uDF44]|\uD81B[\uDE97-\uDE9A]|\uD82F\uDC9F|\uD836[\uDE87-\uDE8B]|\uD83A[\uDD5E\uDD5F]/}}),b=u({"node_modules/mdurl/encode.js"(e,t){"use strict";var r={};function n(e,t,a){var i,o,s,c,l,u="";for("string"!==typeof t&&(a=t,t=n.defaultChars),"undefined"===typeof a&&(a=!0),l=function(e){var t,n,a=r[e];if(a)return a;for(a=r[e]=[],t=0;t<128;t++)n=String.fromCharCode(t),/^[0-9a-z]$/i.test(n)?a.push(n):a.push("%"+("0"+t.toString(16).toUpperCase()).slice(-2));for(t=0;t=55296&&s<=57343){if(s>=55296&&s<=56319&&i+1=56320&&c<=57343){u+=encodeURIComponent(e[i]+e[i+1]),i++;continue}u+="%EF%BF%BD"}else u+=encodeURIComponent(e[i]);return u}n.defaultChars=";/?:@&=+$,-_.!~*'()#",n.componentChars="-_.!~*'()",t.exports=n}}),y=u({"node_modules/mdurl/decode.js"(e,t){"use strict";var r={};function n(e,t){var a;return"string"!==typeof t&&(t=n.defaultChars),a=function(e){var t,n,a=r[e];if(a)return a;for(a=r[e]=[],t=0;t<128;t++)n=String.fromCharCode(t),a.push(n);for(t=0;t=55296&&c<=57343?"\ufffd\ufffd\ufffd":String.fromCharCode(c),t+=6):240===(248&n)&&t+91114111?l+="\ufffd\ufffd\ufffd\ufffd":(c-=65536,l+=String.fromCharCode(55296+(c>>10),56320+(1023&c))),t+=9):l+="\ufffd";return l}))}n.defaultChars=";/?:@&=+$,#",n.componentChars="",t.exports=n}}),v=u({"node_modules/mdurl/format.js"(e,t){"use strict";t.exports=function(e){var t="";return t+=e.protocol||"",t+=e.slashes?"//":"",t+=e.auth?e.auth+"@":"",e.hostname&&-1!==e.hostname.indexOf(":")?t+="["+e.hostname+"]":t+=e.hostname||"",t+=e.port?":"+e.port:"",t+=e.pathname||"",t+=e.search||"",t+=e.hash||""}}}),k=u({"node_modules/mdurl/parse.js"(e,t){"use strict";function r(){this.protocol=null,this.slashes=null,this.auth=null,this.port=null,this.hostname=null,this.hash=null,this.search=null,this.pathname=null}var n=/^([a-z0-9.+-]+:)/i,a=/:[0-9]*$/,i=/^(\/\/?(?!\/)[^\?\s]*)(\?[^\s]*)?$/,o=["{","}","|","\\","^","`"].concat(["<",">",'"',"`"," ","\r","\n","\t"]),s=["'"].concat(o),c=["%","/","?",";","#"].concat(s),l=["/","?","#"],u=/^[+a-z0-9A-Z_-]{0,63}$/,f=/^([+a-z0-9A-Z_-]{0,63})(.*)$/,p={javascript:!0,"javascript:":!0},d={http:!0,https:!0,ftp:!0,gopher:!0,file:!0,"http:":!0,"https:":!0,"ftp:":!0,"gopher:":!0,"file:":!0};r.prototype.parse=function(e,t){var r,a,o,s,h,m=e;if(m=m.trim(),!t&&1===e.split("#").length){var g=i.exec(m);if(g)return this.pathname=g[1],g[2]&&(this.search=g[2]),this}var b=n.exec(m);if(b&&(o=(b=b[0]).toLowerCase(),this.protocol=b,m=m.substr(b.length)),(t||b||m.match(/^\/\/[^@\/]+@[^@\/]+/))&&(!(h="//"===m.substr(0,2))||b&&p[b]||(m=m.substr(2),this.slashes=!0)),!p[b]&&(h||b&&!d[b])){var y,v,k=-1;for(r=0;r127?C+="x":C+=A[E];if(!C.match(u)){var D=x.slice(0,r),j=x.slice(r+1),O=A.match(f);O&&(D.push(O[1]),j.unshift(O[2])),j.length&&(m=j.join(".")+m),this.hostname=D.join(".");break}}}}this.hostname.length>255&&(this.hostname=""),w&&(this.hostname=this.hostname.substr(1,this.hostname.length-2))}var q=m.indexOf("#");-1!==q&&(this.hash=m.substr(q),m=m.slice(0,q));var F=m.indexOf("?");return-1!==F&&(this.search=m.substr(F),m=m.slice(0,F)),m&&(this.pathname=m),d[o]&&this.hostname&&!this.pathname&&(this.pathname=""),this},r.prototype.parseHost=function(e){var t=a.exec(e);t&&(":"!==(t=t[0])&&(this.port=t.substr(1)),e=e.substr(0,e.length-t.length)),e&&(this.hostname=e)},t.exports=function(e,t){if(e&&e instanceof r)return e;var n=new r;return n.parse(e,t),n}}}),_=u({"node_modules/mdurl/index.js"(e,t){"use strict";t.exports.encode=b(),t.exports.decode=y(),t.exports.format=v(),t.exports.parse=k()}}),w=u({"node_modules/uc.micro/properties/Any/regex.js"(e,t){t.exports=/[\0-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF]/}}),x=u({"node_modules/uc.micro/categories/Cc/regex.js"(e,t){t.exports=/[\0-\x1F\x7F-\x9F]/}}),A=u({"node_modules/uc.micro/categories/Cf/regex.js"(e,t){t.exports=/[\xAD\u0600-\u0605\u061C\u06DD\u070F\u08E2\u180E\u200B-\u200F\u202A-\u202E\u2060-\u2064\u2066-\u206F\uFEFF\uFFF9-\uFFFB]|\uD804[\uDCBD\uDCCD]|\uD82F[\uDCA0-\uDCA3]|\uD834[\uDD73-\uDD7A]|\uDB40[\uDC01\uDC20-\uDC7F]/}}),C=u({"node_modules/uc.micro/categories/Z/regex.js"(e,t){t.exports=/[ \xA0\u1680\u2000-\u200A\u2028\u2029\u202F\u205F\u3000]/}}),E=u({"node_modules/uc.micro/index.js"(e){"use strict";e.Any=w(),e.Cc=x(),e.Cf=A(),e.P=g(),e.Z=C()}}),S=u({"node_modules/markdown-it/lib/common/utils.js"(e){"use strict";var t=Object.prototype.hasOwnProperty;function r(e,r){return t.call(e,r)}function n(e){return!(e>=55296&&e<=57343)&&(!(e>=64976&&e<=65007)&&(65535!==(65535&e)&&65534!==(65535&e)&&(!(e>=0&&e<=8)&&(11!==e&&(!(e>=14&&e<=31)&&(!(e>=127&&e<=159)&&!(e>1114111)))))))}function a(e){if(e>65535){var t=55296+((e-=65536)>>10),r=56320+(1023&e);return String.fromCharCode(t,r)}return String.fromCharCode(e)}var i=/\\([!"#$%&'()*+,\-.\/:;<=>?@[\\\]^_`{|}~])/g,o=new RegExp(i.source+"|"+/&([a-z#][a-z0-9]{1,31});/gi.source,"gi"),s=/^#((?:x[a-f0-9]{1,8}|[0-9]{1,8}))/i,c=m();var l=/[&<>"]/,u=/[&<>"]/g,f={"&":"&","<":"<",">":">",'"':"""};function p(e){return f[e]}var d=/[.?*+^$[\]\\(){}|-]/g;var h=g();e.lib={},e.lib.mdurl=_(),e.lib.ucmicro=E(),e.assign=function(e){return Array.prototype.slice.call(arguments,1).forEach((function(t){if(t){if("object"!==typeof t)throw new TypeError(t+"must be object");Object.keys(t).forEach((function(r){e[r]=t[r]}))}})),e},e.isString=function(e){return"[object String]"===function(e){return Object.prototype.toString.call(e)}(e)},e.has=r,e.unescapeMd=function(e){return e.indexOf("\\")<0?e:e.replace(i,"$1")},e.unescapeAll=function(e){return e.indexOf("\\")<0&&e.indexOf("&")<0?e:e.replace(o,(function(e,t,i){return t||function(e,t){var i=0;return r(c,t)?c[t]:35===t.charCodeAt(0)&&s.test(t)&&n(i="x"===t[1].toLowerCase()?parseInt(t.slice(2),16):parseInt(t.slice(1),10))?a(i):e}(e,i)}))},e.isValidEntityCode=n,e.fromCodePoint=a,e.escapeHtml=function(e){return l.test(e)?e.replace(u,p):e},e.arrayReplaceAt=function(e,t,r){return[].concat(e.slice(0,t),r,e.slice(t+1))},e.isSpace=function(e){switch(e){case 9:case 32:return!0}return!1},e.isWhiteSpace=function(e){if(e>=8192&&e<=8202)return!0;switch(e){case 9:case 10:case 11:case 12:case 13:case 32:case 160:case 5760:case 8239:case 8287:case 12288:return!0}return!1},e.isMdAsciiPunct=function(e){switch(e){case 33:case 34:case 35:case 36:case 37:case 38:case 39:case 40:case 41:case 42:case 43:case 44:case 45:case 46:case 47:case 58:case 59:case 60:case 61:case 62:case 63:case 64:case 91:case 92:case 93:case 94:case 95:case 96:case 123:case 124:case 125:case 126:return!0;default:return!1}},e.isPunctChar=function(e){return h.test(e)},e.escapeRE=function(e){return e.replace(d,"\\$&")},e.normalizeReference=function(e){return e=e.trim().replace(/\s+/g," "),"\u1e7e"==="\u1e9e".toLowerCase()&&(e=e.replace(/\u1e9e/g,"\xdf")),e.toLowerCase().toUpperCase()}}}),D=u({"node_modules/markdown-it/lib/helpers/parse_link_label.js"(e,t){"use strict";t.exports=function(e,t,r){var n,a,i,o,s=-1,c=e.posMax,l=e.pos;for(e.pos=t+1,n=1;e.pos32)return s;if(41===a){if(0===i)break;i--}t++}return o===t||0!==i||(s.str=r(e.slice(o,t)),s.lines=0,s.pos=t,s.ok=!0),s}}}),O=u({"node_modules/markdown-it/lib/helpers/parse_link_title.js"(e,t){"use strict";var r=S().unescapeAll;t.exports=function(e,t,n){var a,i,o=0,s=t,c={ok:!1,pos:0,lines:0,str:""};if(t>=n)return c;if(34!==(i=e.charCodeAt(t))&&39!==i&&40!==i)return c;for(t++,40===i&&(i=41);t"+a(e[t].content)+""},i.code_block=function(e,t,r,n,i){var o=e[t];return""+a(e[t].content)+"\n"},i.fence=function(e,t,r,i,o){var s,c,l,u,f,p=e[t],d=p.info?n(p.info).trim():"",h="",m="";return d&&(h=(l=d.split(/(\s+)/g))[0],m=l.slice(2).join("")),0===(s=r.highlight&&r.highlight(p.content,h,m)||a(p.content)).indexOf(""+s+"\n"):"
    "+s+"
    \n"},i.image=function(e,t,r,n,a){var i=e[t];return i.attrs[i.attrIndex("alt")][1]=a.renderInlineAsText(i.children,r,n),a.renderToken(e,t,r)},i.hardbreak=function(e,t,r){return r.xhtmlOut?"
    \n":"
    \n"},i.softbreak=function(e,t,r){return r.breaks?r.xhtmlOut?"
    \n":"
    \n":"\n"},i.text=function(e,t){return a(e[t].content)},i.html_block=function(e,t){return e[t].content},i.html_inline=function(e,t){return e[t].content},o.prototype.renderAttrs=function(e){var t,r,n;if(!e.attrs)return"";for(n="",t=0,r=e.attrs.length;t\n":">")},o.prototype.renderInline=function(e,t,r){for(var n,a="",i=this.rules,o=0,s=e.length;o/i.test(e)}t.exports=function(e){var t,a,i,o,s,c,l,u,f,p,d,h,m,g,b,y,v,k,_=e.tokens;if(e.md.options.linkify)for(a=0,i=_.length;a=0;t--)if("link_close"!==(c=o[t]).type){if("html_inline"===c.type&&(k=c.content,/^\s]/i.test(k)&&m>0&&m--,n(c.content)&&m++),!(m>0)&&"text"===c.type&&e.md.linkify.test(c.content)){for(f=c.content,v=e.md.linkify.match(f),l=[],h=c.level,d=0,u=0;ud&&((s=new e.Token("text","",0)).content=f.slice(d,p),s.level=h,l.push(s)),(s=new e.Token("link_open","a",1)).attrs=[["href",b]],s.level=h++,s.markup="linkify",s.info="auto",l.push(s),(s=new e.Token("text","",0)).content=y,s.level=h,l.push(s),(s=new e.Token("link_close","a",-1)).level=--h,s.markup="linkify",s.info="auto",l.push(s),d=v[u].lastIndex);d=0;t--)"text"!==(r=e[t]).type||n||(r.content=r.content.replace(a,o)),"link_open"===r.type&&"auto"===r.info&&n--,"link_close"===r.type&&"auto"===r.info&&n++}function c(e){var t,n,a=0;for(t=e.length-1;t>=0;t--)"text"!==(n=e[t]).type||a||r.test(n.content)&&(n.content=n.content.replace(/\+-/g,"\xb1").replace(/\.{2,}/g,"\u2026").replace(/([?!])\u2026/g,"$1..").replace(/([?!]){4,}/g,"$1$1$1").replace(/,{2,}/g,",").replace(/(^|[^-])---(?=[^-]|$)/gm,"$1\u2014").replace(/(^|\s)--(?=\s|$)/gm,"$1\u2013").replace(/(^|[^-\s])--(?=[^-\s]|$)/gm,"$1\u2013")),"link_open"===n.type&&"auto"===n.info&&a--,"link_close"===n.type&&"auto"===n.info&&a++}t.exports=function(e){var t;if(e.md.options.typographer)for(t=e.tokens.length-1;t>=0;t--)"inline"===e.tokens[t].type&&(n.test(e.tokens[t].content)&&s(e.tokens[t].children),r.test(e.tokens[t].content)&&c(e.tokens[t].children))}}}),P=u({"node_modules/markdown-it/lib/rules_core/smartquotes.js"(e,t){"use strict";var r=S().isWhiteSpace,n=S().isPunctChar,a=S().isMdAsciiPunct,i=/['"]/,o=/['"]/g;function s(e,t,r){return e.substr(0,t)+r+e.substr(t+1)}function c(e,t){var i,c,l,u,f,p,d,h,m,g,b,y,v,k,_,w,x,A,C,E,S;for(C=[],i=0;i=0&&!(C[x].level<=d);x--);if(C.length=x+1,"text"===c.type){f=0,p=(l=c.content).length;e:for(;f=0)m=l.charCodeAt(u.index-1);else for(x=i-1;x>=0&&("softbreak"!==e[x].type&&"hardbreak"!==e[x].type);x--)if(e[x].content){m=e[x].content.charCodeAt(e[x].content.length-1);break}if(g=32,f=48&&m<=57&&(w=_=!1),_&&w&&(_=b,w=y),_||w){if(w)for(x=C.length-1;x>=0&&(h=C[x],!(C[x].level=0;t--)"inline"===e.tokens[t].type&&i.test(e.tokens[t].content)&&c(e.tokens[t].children,e)}}}),R=u({"node_modules/markdown-it/lib/token.js"(e,t){"use strict";function r(e,t,r){this.type=e,this.tag=t,this.attrs=null,this.map=null,this.nesting=r,this.level=0,this.children=null,this.content="",this.markup="",this.info="",this.meta=null,this.block=!1,this.hidden=!1}r.prototype.attrIndex=function(e){var t,r,n;if(!this.attrs)return-1;for(r=0,n=(t=this.attrs).length;r=0&&(r=this.attrs[t][1]),r},r.prototype.attrJoin=function(e,t){var r=this.attrIndex(e);r<0?this.attrPush([e,t]):this.attrs[r][1]=this.attrs[r][1]+" "+t},t.exports=r}}),$=u({"node_modules/markdown-it/lib/rules_core/state_core.js"(e,t){"use strict";var r=R();function n(e,t,r){this.src=e,this.env=r,this.tokens=[],this.inlineMode=!1,this.md=t}n.prototype.Token=r,t.exports=n}}),B=u({"node_modules/markdown-it/lib/parser_core.js"(e,t){"use strict";var r=z(),n=[["normalize",L()],["block",I()],["inline",N()],["linkify",M()],["replacements",T()],["smartquotes",P()]];function a(){this.ruler=new r;for(var e=0;ei)return!1;if(p=t+1,e.sCount[p]=4)return!1;if((l=e.bMarks[p]+e.tShift[p])>=e.eMarks[p])return!1;if(124!==(x=e.src.charCodeAt(l++))&&45!==x&&58!==x)return!1;if(l>=e.eMarks[p])return!1;if(124!==(A=e.src.charCodeAt(l++))&&45!==A&&58!==A&&!r(A))return!1;if(45===x&&r(A))return!1;for(;l=4)return!1;if((d=a(c)).length&&""===d[0]&&d.shift(),d.length&&""===d[d.length-1]&&d.pop(),0===(h=d.length)||h!==g.length)return!1;if(o)return!0;for(k=e.parentType,e.parentType="table",w=e.md.block.ruler.getRules("blockquote"),(m=e.push("table_open","table",1)).map=y=[t,0],(m=e.push("thead_open","thead",1)).map=[t,t+1],(m=e.push("tr_open","tr",1)).map=[t,t+1],u=0;u=4)break;for((d=a(c)).length&&""===d[0]&&d.shift(),d.length&&""===d[d.length-1]&&d.pop(),p===t+2&&((m=e.push("tbody_open","tbody",1)).map=v=[t+2,0]),(m=e.push("tr_open","tr",1)).map=[p,p+1],u=0;u=4))break;a=++n}return e.line=a,(i=e.push("code_block","code",0)).content=e.getLines(t,a,4+e.blkIndent,!1)+"\n",i.map=[t,e.line],!0}}}),Z=u({"node_modules/markdown-it/lib/rules_block/fence.js"(e,t){"use strict";t.exports=function(e,t,r,n){var a,i,o,s,c,l,u,f=!1,p=e.bMarks[t]+e.tShift[t],d=e.eMarks[t];if(!e.md.options.allowIndentation&&e.sCount[t]-e.blkIndent>=4)return!1;if(p+3>d)return!1;if(126!==(a=e.src.charCodeAt(p))&&96!==a)return!1;if(c=p,(i=(p=e.skipChars(p,a))-c)<3)return!1;if(u=e.src.slice(c,p),o=e.src.slice(p,d),96===a&&o.indexOf(String.fromCharCode(a))>=0)return!1;if(n)return!0;for(s=t;!(++s>=r)&&!((p=c=e.bMarks[s]+e.tShift[s])<(d=e.eMarks[s])&&e.sCount[s]=4))&&!((p=e.skipChars(p,a))-c=4)return!1;if(62!==e.src.charCodeAt(E++))return!1;if(a)return!0;for(c=d=e.sCount[t]+1,32===e.src.charCodeAt(E)?(E++,c++,d++,i=!1,k=!0):9===e.src.charCodeAt(E)?(k=!0,(e.bsCount[t]+d)%4===3?(E++,c++,d++,i=!1):i=!0):k=!1,h=[e.bMarks[t]],e.bMarks[t]=E;E=S,y=[e.sCount[t]],e.sCount[t]=d-c,v=[e.tShift[t]],e.tShift[t]=E-e.bMarks[t],w=e.md.block.ruler.getRules("blockquote"),b=e.parentType,e.parentType="blockquote",p=t+1;p=(S=e.eMarks[p])));p++)if(62!==e.src.charCodeAt(E++)||A){if(u)break;for(_=!1,s=0,l=w.length;s=S,m.push(e.bsCount[p]),e.bsCount[p]=e.sCount[p]+1+(k?1:0),y.push(e.sCount[p]),e.sCount[p]=d-c,v.push(e.tShift[p]),e.tShift[p]=E-e.bMarks[p]}for(g=e.blkIndent,e.blkIndent=0,(x=e.push("blockquote_open","blockquote",1)).markup=">",x.map=f=[t,0],e.md.block.tokenize(e,t,p),(x=e.push("blockquote_close","blockquote",-1)).markup=">",e.lineMax=C,e.parentType=b,f[1]=e.line,s=0;s=4)return!1;if(42!==(i=e.src.charCodeAt(l++))&&45!==i&&95!==i)return!1;for(o=1;l=o)return-1;if((n=e.src.charCodeAt(i++))<48||n>57)return-1;for(;;){if(i>=o)return-1;if(!((n=e.src.charCodeAt(i++))>=48&&n<=57)){if(41===n||46===n)break;return-1}if(i-a>=10)return-1}return i=4)return!1;if(!e.md.options.allowIndentation&&e.listIndent>=0&&e.sCount[t]-e.listIndent>=4&&e.sCount[t]=e.blkIndent&&(L=!0),(D=a(e,t))>=0){if(p=!0,O=e.bMarks[t]+e.tShift[t],y=Number(e.src.slice(O,D-1)),L&&1!==y)return!1}else{if(!((D=n(e,t))>=0))return!1;p=!1}if(L&&e.skipSpaces(D)>=e.eMarks[t])return!1;if(b=e.src.charCodeAt(D-1),i)return!0;for(g=e.tokens.length,p?(z=e.push("ordered_list_open","ol",1),1!==y&&(z.attrs=[["start",y]])):z=e.push("bullet_list_open","ul",1),z.map=m=[t,0],z.markup=String.fromCharCode(b),k=t,j=!1,F=e.md.block.ruler.getRules("list"),x=e.parentType,e.parentType="list";k=v?1:_-f,!e.md.options.allowIndentation&&u>4&&(u=1),l=f+u,(z=e.push("list_item_open","li",1)).markup=String.fromCharCode(b),z.map=d=[t,0],p&&(z.info=e.src.slice(O,D-1)),E=e.tight,C=e.tShift[t],A=e.sCount[t],w=e.listIndent,e.listIndent=e.blkIndent,e.blkIndent=l,e.tight=!0,e.tShift[t]=s-e.bMarks[t],e.sCount[t]=_,s>=v&&e.isEmpty(t+1)?e.line=Math.min(e.line+2,r):e.md.block.tokenize(e,t,r,!0),e.tight&&!j||(I=!1),j=e.line-t>1&&e.isEmpty(e.line-1),e.blkIndent=e.listIndent,e.listIndent=w,e.tShift[t]=C,e.sCount[t]=A,e.tight=E,(z=e.push("list_item_close","li",-1)).markup=String.fromCharCode(b),k=t=e.line,d[1]=k,s=e.bMarks[t],k>=r)break;if(e.sCount[k]=4)break;for(q=!1,c=0,h=F.length;c=4)return!1;if(91!==e.src.charCodeAt(x))return!1;for(;++x3))&&!(e.sCount[C]<0)){for(v=!1,f=0,p=k.length;f`\\x00-\\x20]+|'[^']*'|\"[^\"]*\"))?)*\\s*\\/?>",n="<\\/[A-Za-z][A-Za-z0-9\\-]*\\s*>",a=new RegExp("^(?:"+r+"|"+n+"|\x3c!----\x3e|\x3c!--(?:-?[^>-])(?:-?[^-])*--\x3e|<[?][\\s\\S]*?[?]>|]*>|)"),i=new RegExp("^(?:"+r+"|"+n+")");t.exports.HTML_TAG_RE=a,t.exports.HTML_OPEN_CLOSE_TAG_RE=i}}),K=u({"node_modules/markdown-it/lib/rules_block/html_block.js"(e,t){"use strict";var r=J(),n=X().HTML_OPEN_CLOSE_TAG_RE,a=[[/^<(script|pre|style|textarea)(?=(\s|>|$))/i,/<\/(script|pre|style|textarea)>/i,!0],[/^/,!0],[/^<\?/,/\?>/,!0],[/^/,!0],[/^/,!0],[new RegExp("^|$))","i"),/^$/,!0],[new RegExp(n.source+"\\s*$"),/^$/,!1]];t.exports=function(e,t,r,n){var i,o,s,c,l=e.bMarks[t]+e.tShift[t],u=e.eMarks[t];if(!e.md.options.allowIndentation&&e.sCount[t]-e.blkIndent>=4)return!1;if(!e.md.options.html)return!1;if(60!==e.src.charCodeAt(l))return!1;for(c=e.src.slice(l,u),i=0;i=4)return!1;if(35!==(i=e.src.charCodeAt(l))||l>=u)return!1;for(o=1,i=e.src.charCodeAt(++l);35===i&&l6||ll&&r(e.src.charCodeAt(s-1))&&(u=s),e.line=t+1,(c=e.push("heading_open","h"+String(o),1)).markup="########".slice(0,o),c.map=[t,e.line],(c=e.push("inline","",0)).content=e.src.slice(l,u).trim(),c.map=[t,e.line],c.children=[],(c=e.push("heading_close","h"+String(o),-1)).markup="########".slice(0,o)),!0)}}}),ee=u({"node_modules/markdown-it/lib/rules_block/lheading.js"(e,t){"use strict";t.exports=function(e,t,r){var n,a,i,o,s,c,l,u,f,p,d=t+1,h=e.md.block.ruler.getRules("paragraph");if(!e.md.options.allowIndentation&&e.sCount[t]-e.blkIndent>=4)return!1;for(p=e.parentType,e.parentType="paragraph";d3)){if(e.sCount[d]>=e.blkIndent&&(c=e.bMarks[d]+e.tShift[d])<(l=e.eMarks[d])&&(45===(f=e.src.charCodeAt(c))||61===f)&&(c=e.skipChars(c,f),(c=e.skipSpaces(c))>=l)){u=61===f?1:2;break}if(!(e.sCount[d]<0)){for(a=!1,i=0,o=h.length;i3))&&!(e.sCount[c]<0)){for(n=!1,a=0,i=l.length;a0&&this.level++,this.tokens.push(a),a},a.prototype.isEmpty=function(e){return this.bMarks[e]+this.tShift[e]>=this.eMarks[e]},a.prototype.skipEmptyLines=function(e){for(var t=this.lineMax;et;)if(!n(this.src.charCodeAt(--e)))return e+1;return e},a.prototype.skipChars=function(e,t){for(var r=this.src.length;er;)if(t!==this.src.charCodeAt(--e))return e+1;return e},a.prototype.getLines=function(e,t,r,a){var i,o,s,c,l,u,f,p=e;if(e>=t)return"";for(u=new Array(t-e),i=0;pr?new Array(o-r+1).join(" ")+this.src.slice(c,l):this.src.slice(c,l)}return u.join("")},a.prototype.Token=r,t.exports=a}}),ne=u({"node_modules/markdown-it/lib/parser_block.js"(e,t){"use strict";var r=z(),n=[["table",U(),["paragraph","reference"]],["code",V()],["fence",Z(),["paragraph","reference","blockquote","list"]],["blockquote",H(),["paragraph","reference","blockquote","list"]],["hr",Y(),["paragraph","reference","blockquote","list"]],["list",G(),["paragraph","reference","blockquote"]],["reference",W()],["html_block",K(),["paragraph","reference","blockquote"]],["heading",Q(),["paragraph","reference","blockquote"]],["lheading",ee()],["paragraph",te()]];function a(){this.ruler=new r;for(var e=0;e=r))&&!(e.sCount[o]=c){e.line=r;break}for(n=0;n=0&&32===e.pending.charCodeAt(n))if(n>=1&&32===e.pending.charCodeAt(n-1)){for(i=n-1;i>=1&&32===e.pending.charCodeAt(i-1);)i--;e.pending=e.pending.slice(0,i),e.push("hardbreak","br",0)}else e.pending=e.pending.slice(0,-1),e.push("softbreak","br",0);else e.push("softbreak","br",0);for(o++;o?@[]^_`{|}~-".split("").forEach((function(e){a[e.charCodeAt(0)]=1})),t.exports=function(e,t){var r,i=e.pos,o=e.posMax;if(92!==e.src.charCodeAt(i))return!1;if(++i=0;r--)95!==(n=t[r]).marker&&42!==n.marker||-1!==n.end&&(a=t[n.end],s=r>0&&t[r-1].end===n.end+1&&t[r-1].marker===n.marker&&t[r-1].token===n.token-1&&t[n.end+1].token===a.token+1,o=String.fromCharCode(n.marker),(i=e.tokens[n.token]).type=s?"strong_open":"em_open",i.tag=s?"strong":"em",i.nesting=1,i.markup=s?o+o:o,i.content="",(i=e.tokens[a.token]).type=s?"strong_close":"em_close",i.tag=s?"strong":"em",i.nesting=-1,i.markup=s?o+o:o,i.content="",s&&(e.tokens[t[r-1].token].content="",e.tokens[t[n.end+1].token].content="",r--))}t.exports.tokenize=function(e,t){var r,n,a=e.pos,i=e.src.charCodeAt(a);if(t)return!1;if(95!==i&&42!==i)return!1;for(n=e.scanDelims(e.pos,42===i),r=0;r=m)return!1;if(g=l,(u=e.md.helpers.parseLinkDestination(e.src,l,e.posMax)).ok){for(p=e.md.normalizeLink(u.str),e.md.validateLink(p)?l=u.pos:p="",g=l;l=m||41!==e.src.charCodeAt(l))&&(b=!0),l++}if(b){if("undefined"===typeof e.env.references)return!1;if(l=0?o=e.src.slice(g,l++):l=s+1):l=s+1,o||(o=e.src.slice(c,s)),!(f=e.env.references[r(o)]))return e.pos=h,!1;p=f.href,d=f.title}return t||(e.pos=c,e.posMax=s,e.push("link_open","a",1).attrs=a=[["href",p]],d&&a.push(["title",d]),e.md.inline.tokenize(e),e.push("link_close","a",-1)),e.pos=l,e.posMax=m,!0}}}),fe=u({"node_modules/markdown-it/lib/rules_inline/image.js"(e,t){"use strict";var r=S().normalizeReference,n=S().isSpace;t.exports=function(e,t){var a,i,o,s,c,l,u,f,p,d,h,m,g,b="",y=e.pos,v=e.posMax;if(33!==e.src.charCodeAt(e.pos))return!1;if(91!==e.src.charCodeAt(e.pos+1))return!1;if(l=e.pos+2,(c=e.md.helpers.parseLinkLabel(e,e.pos+1,!1))<0)return!1;if((u=c+1)=v)return!1;for(g=u,(p=e.md.helpers.parseLinkDestination(e.src,u,e.posMax)).ok&&(b=e.md.normalizeLink(p.str),e.md.validateLink(b)?u=p.pos:b=""),g=u;u=v||41!==e.src.charCodeAt(u))return e.pos=y,!1;u++}else{if("undefined"===typeof e.env.references)return!1;if(u=0?s=e.src.slice(g,u++):u=c+1):u=c+1,s||(s=e.src.slice(l,c)),!(f=e.env.references[r(s)]))return e.pos=y,!1;b=f.href,d=f.title}return t||(o=e.src.slice(l,c),e.md.inline.parse(o,e.md,e.env,m=[]),(h=e.push("image","img",0)).attrs=a=[["src",b],["alt",""]],h.children=m,h.content=o,d&&a.push(["title",d])),e.pos=u,e.posMax=v,!0}}}),pe=u({"node_modules/markdown-it/lib/rules_inline/autolink.js"(e,t){"use strict";var r=/^([a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)$/,n=/^([a-zA-Z][a-zA-Z0-9+.\-]{1,31}):([^<>\x00-\x20]*)$/;t.exports=function(e,t){var a,i,o,s,c,l,u=e.pos;if(60!==e.src.charCodeAt(u))return!1;for(c=e.pos,l=e.posMax;;){if(++u>=l)return!1;if(60===(s=e.src.charCodeAt(u)))return!1;if(62===s)break}return a=e.src.slice(c+1,u),n.test(a)?(i=e.md.normalizeLink(a),!!e.md.validateLink(i)&&(t||((o=e.push("link_open","a",1)).attrs=[["href",i]],o.markup="autolink",o.info="auto",(o=e.push("text","",0)).content=e.md.normalizeLinkText(a),(o=e.push("link_close","a",-1)).markup="autolink",o.info="auto"),e.pos+=a.length+2,!0)):!!r.test(a)&&(i=e.md.normalizeLink("mailto:"+a),!!e.md.validateLink(i)&&(t||((o=e.push("link_open","a",1)).attrs=[["href",i]],o.markup="autolink",o.info="auto",(o=e.push("text","",0)).content=e.md.normalizeLinkText(a),(o=e.push("link_close","a",-1)).markup="autolink",o.info="auto"),e.pos+=a.length+2,!0))}}}),de=u({"node_modules/markdown-it/lib/rules_inline/html_inline.js"(e,t){"use strict";var r=X().HTML_TAG_RE;t.exports=function(e,t){var n,a,i,o=e.pos;return!!e.md.options.html&&(i=e.posMax,!(60!==e.src.charCodeAt(o)||o+2>=i)&&(!(33!==(n=e.src.charCodeAt(o+1))&&63!==n&&47!==n&&!function(e){var t=32|e;return t>=97&&t<=122}(n))&&(!!(a=e.src.slice(o).match(r))&&(t||(e.push("html_inline","",0).content=e.src.slice(o,o+a[0].length)),e.pos+=a[0].length,!0))))}}}),he=u({"node_modules/markdown-it/lib/rules_inline/entity.js"(e,t){"use strict";var r=m(),n=S().has,a=S().isValidEntityCode,i=S().fromCodePoint,o=/^&#((?:x[a-f0-9]{1,6}|[0-9]{1,7}));/i,s=/^&([a-z][a-z0-9]{1,31});/i;t.exports=function(e,t){var c,l,u=e.pos,f=e.posMax;if(38!==e.src.charCodeAt(u))return!1;if(u+1o;n-=h[n]+1)if((i=t[n]).marker===a.marker&&i.open&&i.end<0&&(c=!1,(i.close||a.open)&&(i.length+a.length)%3===0&&(i.length%3===0&&a.length%3===0||(c=!0)),!c)){l=n>0&&!t[n-1].open?h[n-1]+1:0,h[r]=r-n+l,h[n]=l,a.open=!1,i.end=r,i.close=!1,s=-1,d=-2;break}-1!==s&&(u[a.marker][(a.open?3:0)+(a.length||0)%3]=s)}}}t.exports=function(e){var t,n=e.tokens_meta,a=e.tokens_meta.length;for(r(0,e.delimiters),t=0;t0&&n++,"text"===a[t].type&&t+10&&(this.level++,this._prev_delimiters.push(this.delimiters),this.delimiters=[],i={delimiters:this.delimiters}),this.pendingLevel=this.level,this.tokens.push(a),this.tokens_meta.push(i),a},o.prototype.scanDelims=function(e,t){var r,o,s,c,l,u,f,p,d,h=e,m=!0,g=!0,b=this.posMax,y=this.src.charCodeAt(e);for(r=e>0?this.src.charCodeAt(e-1):32;h=i)break}else e.pending+=e.src[e.pos++]}e.pending&&e.pushPending()},i.prototype.parse=function(e,t,r,n){var a,i,o,s=new this.State(e,t,r,n);for(this.tokenize(s),o=(i=this.ruler2.getRules("")).length,a=0;a|$))",t.tpl_email_fuzzy='(^|[><\uff5c]|"|\\(|'+t.src_ZCc+")("+t.src_email_name+"@"+t.tpl_host_fuzzy_strict+")",t.tpl_link_fuzzy="(^|(?![.:/\\-_@])(?:[$+<=>^`|\uff5c]|"+t.src_ZPCc+"))((?![$+<=>^`|\uff5c])"+t.tpl_host_port_fuzzy_strict+t.src_path+")",t.tpl_link_no_ip_fuzzy="(^|(?![.:/\\-_@])(?:[$+<=>^`|\uff5c]|"+t.src_ZPCc+"))((?![$+<=>^`|\uff5c])"+t.tpl_host_port_no_ip_fuzzy_strict+t.src_path+")",t}}}),ke=u({"node_modules/linkify-it/index.js"(e,t){"use strict";function r(e){return Array.prototype.slice.call(arguments,1).forEach((function(t){t&&Object.keys(t).forEach((function(r){e[r]=t[r]}))})),e}function n(e){return Object.prototype.toString.call(e)}function a(e){return"[object Function]"===n(e)}function i(e){return e.replace(/[.?*+^$[\]\\(){}|-]/g,"\\$&")}var o={fuzzyLink:!0,fuzzyEmail:!0,fuzzyIP:!1};var s={"http:":{validate:function(e,t,r){var n=e.slice(t);return r.re.http||(r.re.http=new RegExp("^\\/\\/"+r.re.src_auth+r.re.src_host_port_strict+r.re.src_path,"i")),r.re.http.test(n)?n.match(r.re.http)[0].length:0}},"https:":"http:","ftp:":"http:","//":{validate:function(e,t,r){var n=e.slice(t);return r.re.no_http||(r.re.no_http=new RegExp("^"+r.re.src_auth+"(?:localhost|(?:(?:"+r.re.src_domain+")\\.)+"+r.re.src_domain_root+")"+r.re.src_port+r.re.src_host_terminator+r.re.src_path,"i")),r.re.no_http.test(n)?t>=3&&":"===e[t-3]||t>=3&&"/"===e[t-3]?0:n.match(r.re.no_http)[0].length:0}},"mailto:":{validate:function(e,t,r){var n=e.slice(t);return r.re.mailto||(r.re.mailto=new RegExp("^"+r.re.src_email_name+"@"+r.re.src_host_strict,"i")),r.re.mailto.test(n)?n.match(r.re.mailto)[0].length:0}}},c="biz|com|edu|gov|net|org|pro|web|xxx|aero|asia|coop|info|museum|name|shop|\u0440\u0444".split("|");function l(e){var t=e.re=ve()(e.__opts__),r=e.__tlds__.slice();function o(e){return e.replace("%TLDS%",t.src_tlds)}e.onCompile(),e.__tlds_replaced__||r.push("a[cdefgilmnoqrstuwxz]|b[abdefghijmnorstvwyz]|c[acdfghiklmnoruvwxyz]|d[ejkmoz]|e[cegrstu]|f[ijkmor]|g[abdefghilmnpqrstuwy]|h[kmnrtu]|i[delmnoqrst]|j[emop]|k[eghimnprwyz]|l[abcikrstuvy]|m[acdeghklmnopqrstuvwxyz]|n[acefgilopruz]|om|p[aefghklmnrstwy]|qa|r[eosuw]|s[abcdeghijklmnortuvxyz]|t[cdfghjklmnortvwz]|u[agksyz]|v[aceginu]|w[fs]|y[et]|z[amw]"),r.push(t.src_xn),t.src_tlds=r.join("|"),t.email_fuzzy=RegExp(o(t.tpl_email_fuzzy),"i"),t.link_fuzzy=RegExp(o(t.tpl_link_fuzzy),"i"),t.link_no_ip_fuzzy=RegExp(o(t.tpl_link_no_ip_fuzzy),"i"),t.host_fuzzy_test=RegExp(o(t.tpl_host_fuzzy_test),"i");var s=[];function c(e,t){throw new Error('(LinkifyIt) Invalid schema "'+e+'": '+t)}e.__compiled__={},Object.keys(e.__schemas__).forEach((function(t){var r=e.__schemas__[t];if(null!==r){var i={validate:null,link:null};if(e.__compiled__[t]=i,"[object Object]"===n(r))return!function(e){return"[object RegExp]"===n(e)}(r.validate)?a(r.validate)?i.validate=r.validate:c(t,r):i.validate=function(e){return function(t,r){var n=t.slice(r);return e.test(n)?n.match(e)[0].length:0}}(r.validate),void(a(r.normalize)?i.normalize=r.normalize:r.normalize?c(t,r):i.normalize=function(e,t){t.normalize(e)});!function(e){return"[object String]"===n(e)}(r)?c(t,r):s.push(t)}})),s.forEach((function(t){e.__compiled__[e.__schemas__[t]]&&(e.__compiled__[t].validate=e.__compiled__[e.__schemas__[t]].validate,e.__compiled__[t].normalize=e.__compiled__[e.__schemas__[t]].normalize)})),e.__compiled__[""]={validate:null,normalize:function(e,t){t.normalize(e)}};var l=Object.keys(e.__compiled__).filter((function(t){return t.length>0&&e.__compiled__[t]})).map(i).join("|");e.re.schema_test=RegExp("(^|(?!_)(?:[><\uff5c]|"+t.src_ZPCc+"))("+l+")","i"),e.re.schema_search=RegExp("(^|(?!_)(?:[><\uff5c]|"+t.src_ZPCc+"))("+l+")","ig"),e.re.pretest=RegExp("("+e.re.schema_test.source+")|("+e.re.host_fuzzy_test.source+")|@","i"),function(e){e.__index__=-1,e.__text_cache__=""}(e)}function u(e,t){var r=e.__index__,n=e.__last_index__,a=e.__text_cache__.slice(r,n);this.schema=e.__schema__.toLowerCase(),this.index=r+t,this.lastIndex=n+t,this.raw=a,this.text=a,this.url=a}function f(e,t){var r=new u(e,t);return e.__compiled__[r.schema].normalize(r,e),r}function p(e,t){if(!(this instanceof p))return new p(e,t);var n;t||(n=e,Object.keys(n||{}).reduce((function(e,t){return e||o.hasOwnProperty(t)}),!1)&&(t=e,e={})),this.__opts__=r({},o,t),this.__index__=-1,this.__last_index__=-1,this.__schema__="",this.__text_cache__="",this.__schemas__=r({},s,e),this.__compiled__={},this.__tlds__=c,this.__tlds_replaced__=!1,this.re={},l(this)}p.prototype.add=function(e,t){return this.__schemas__[e]=t,l(this),this},p.prototype.set=function(e){return this.__opts__=r(this.__opts__,e),this},p.prototype.test=function(e){if(this.__text_cache__=e,this.__index__=-1,!e.length)return!1;var t,r,n,a,i,o,s,c;if(this.re.schema_test.test(e))for((s=this.re.schema_search).lastIndex=0;null!==(t=s.exec(e));)if(a=this.testSchemaAt(e,t[2],s.lastIndex)){this.__schema__=t[2],this.__index__=t.index+t[1].length,this.__last_index__=t.index+t[0].length+a;break}return this.__opts__.fuzzyLink&&this.__compiled__["http:"]&&(c=e.search(this.re.host_fuzzy_test))>=0&&(this.__index__<0||c=0&&null!==(n=e.match(this.re.email_fuzzy))&&(i=n.index+n[1].length,o=n.index+n[0].length,(this.__index__<0||ithis.__last_index__)&&(this.__schema__="mailto:",this.__index__=i,this.__last_index__=o)),this.__index__>=0},p.prototype.pretest=function(e){return this.re.pretest.test(e)},p.prototype.testSchemaAt=function(e,t,r){return this.__compiled__[t.toLowerCase()]?this.__compiled__[t.toLowerCase()].validate(e,r,this):0},p.prototype.match=function(e){var t=0,r=[];this.__index__>=0&&this.__text_cache__===e&&(r.push(f(this,t)),t=this.__last_index__);for(var n=t?e.slice(t):e;this.test(n);)r.push(f(this,t)),n=n.slice(this.__last_index__),t+=this.__last_index__;return r.length?r:null},p.prototype.tlds=function(e,t){return e=Array.isArray(e)?e:[e],t?(this.__tlds__=this.__tlds__.concat(e).sort().filter((function(e,t,r){return e!==r[t-1]})).reverse(),l(this),this):(this.__tlds__=e.slice(),this.__tlds_replaced__=!0,l(this),this)},p.prototype.normalize=function(e){e.schema||(e.url="http://"+e.url),"mailto:"!==e.schema||/^mailto:/i.test(e.url)||(e.url="mailto:"+e.url)},p.prototype.onCompile=function(){},t.exports=p}}),_e=u({"node_modules/punycode/punycode.js"(e,t){"use strict";var r=2147483647,n=36,a=/^xn--/,i=/[^\0-\x7E]/,o=/[\x2E\u3002\uFF0E\uFF61]/g,s={overflow:"Overflow: input needs wider integers to process","not-basic":"Illegal input >= 0x80 (not a basic code point)","invalid-input":"Invalid input"},c=Math.floor,l=String.fromCharCode;function u(e){throw new RangeError(s[e])}function f(e,t){const r=e.split("@");let n="";r.length>1&&(n=r[0]+"@",e=r[1]);const a=function(e,t){const r=[];let n=e.length;for(;n--;)r[n]=t(e[n]);return r}((e=e.replace(o,".")).split("."),t).join(".");return n+a}function p(e){const t=[];let r=0;const n=e.length;for(;r=55296&&a<=56319&&r>1,e+=c(e/t);e>455;a+=n)e=c(e/35);return c(a+36*e/(e+38))},m=function(e){const t=[],a=e.length;let i=0,o=128,s=72,l=e.lastIndexOf("-");l<0&&(l=0);for(let r=0;r=128&&u("not-basic"),t.push(e.charCodeAt(r));for(let p=l>0?l+1:0;p=a&&u("invalid-input");const l=(f=e.charCodeAt(p++))-48<10?f-22:f-65<26?f-65:f-97<26?f-97:n;(l>=n||l>c((r-i)/t))&&u("overflow"),i+=l*t;const d=o<=s?1:o>=s+26?26:o-s;if(lc(r/h)&&u("overflow"),t*=h}const d=t.length+1;s=h(i-l,d,0==l),c(i/d)>r-o&&u("overflow"),o+=c(i/d),i%=d,t.splice(i++,0,o)}var f;return String.fromCodePoint(...t)},g=function(e){const t=[];let a=(e=p(e)).length,i=128,o=0,s=72;for(const r of e)r<128&&t.push(l(r));let f=t.length,m=f;for(f&&t.push("-");m=i&&tc((r-o)/p)&&u("overflow"),o+=(a-i)*p,i=a;for(const g of e)if(gr&&u("overflow"),g==i){let e=o;for(let r=n;;r+=n){const a=r<=s?1:r>=s+26?26:r-s;if(eString.fromCodePoint(...e)},decode:m,encode:g,toASCII:function(e){return f(e,(function(e){return i.test(e)?"xn--"+g(e):e}))},toUnicode:function(e){return f(e,(function(e){return a.test(e)?m(e.slice(4).toLowerCase()):e}))}};t.exports=b}}),we=u({"node_modules/markdown-it/lib/presets/default.js"(e,t){"use strict";t.exports={options:{html:!1,xhtmlOut:!1,breaks:!1,langPrefix:"language-",linkify:!1,typographer:!1,quotes:"\u201c\u201d\u2018\u2019",highlight:null,maxNesting:100},components:{core:{},block:{},inline:{}}}}}),xe=u({"node_modules/markdown-it/lib/presets/zero.js"(e,t){"use strict";t.exports={options:{html:!1,xhtmlOut:!1,breaks:!1,langPrefix:"language-",linkify:!1,typographer:!1,quotes:"\u201c\u201d\u2018\u2019",highlight:null,maxNesting:20},components:{core:{rules:["normalize","block","inline"]},block:{rules:["paragraph"]},inline:{rules:["text"],rules2:["balance_pairs","text_collapse"]}}}}}),Ae=u({"node_modules/markdown-it/lib/presets/commonmark.js"(e,t){"use strict";t.exports={options:{html:!0,xhtmlOut:!0,breaks:!1,langPrefix:"language-",linkify:!1,typographer:!1,quotes:"\u201c\u201d\u2018\u2019",highlight:null,maxNesting:20},components:{core:{rules:["normalize","block","inline"]},block:{rules:["blockquote","code","fence","heading","hr","html_block","lheading","list","reference","paragraph"]},inline:{rules:["autolink","backticks","emphasis","entity","escape","html_inline","image","link","newline","text"],rules2:["balance_pairs","emphasis","text_collapse"]}}}}}),Ce=u({"node_modules/markdown-it/lib/index.js"(e,t){"use strict";var r=S(),n=q(),a=F(),i=B(),o=ne(),s=ye(),c=ke(),l=_(),u=_e(),f={default:we(),zero:xe(),commonmark:Ae()},p=/^(vbscript|javascript|file|data):/,d=/^data:image\/(gif|png|jpeg|webp);/;function h(e){var t=e.trim().toLowerCase();return!p.test(t)||!!d.test(t)}var m=["http:","https:","mailto:"];function g(e){var t=l.parse(e,!0);if(t.hostname&&(!t.protocol||m.indexOf(t.protocol)>=0))try{t.hostname=u.toASCII(t.hostname)}catch(r){}return l.encode(l.format(t))}function b(e){var t=l.parse(e,!0);if(t.hostname&&(!t.protocol||m.indexOf(t.protocol)>=0))try{t.hostname=u.toUnicode(t.hostname)}catch(r){}return l.decode(l.format(t),l.decode.defaultChars+"%")}function y(e,t){if(!(this instanceof y))return new y(e,t);t||r.isString(e)||(t=e||{},e="default"),this.inline=new s,this.block=new o,this.core=new i,this.renderer=new a,this.linkify=new c,this.validateLink=h,this.normalizeLink=g,this.normalizeLinkText=b,this.utils=r,this.helpers=r.assign({},n),this.options={},this.configure(e),t&&this.set(t)}y.prototype.set=function(e){return r.assign(this.options,e),this},y.prototype.configure=function(e){var t,n=this;if(r.isString(e)&&!(e=f[t=e]))throw new Error('Wrong `markdown-it` preset "'+t+'", check name');if(!e)throw new Error("Wrong `markdown-it` preset, can't be empty");return e.options&&n.set(e.options),e.components&&Object.keys(e.components).forEach((function(t){e.components[t].rules&&n[t].ruler.enableOnly(e.components[t].rules),e.components[t].rules2&&n[t].ruler2.enableOnly(e.components[t].rules2)})),this},y.prototype.enable=function(e,t){var r=[];Array.isArray(e)||(e=[e]),["core","block","inline"].forEach((function(t){r=r.concat(this[t].ruler.enable(e,!0))}),this),r=r.concat(this.inline.ruler2.enable(e,!0));var n=e.filter((function(e){return r.indexOf(e)<0}));if(n.length&&!t)throw new Error("MarkdownIt. Failed to enable unknown rule(s): "+n);return this},y.prototype.disable=function(e,t){var r=[];Array.isArray(e)||(e=[e]),["core","block","inline"].forEach((function(t){r=r.concat(this[t].ruler.disable(e,!0))}),this),r=r.concat(this.inline.ruler2.disable(e,!0));var n=e.filter((function(e){return r.indexOf(e)<0}));if(n.length&&!t)throw new Error("MarkdownIt. Failed to disable unknown rule(s): "+n);return this},y.prototype.use=function(e){var t=[this].concat(Array.prototype.slice.call(arguments,1));return e.apply(e,t),this},y.prototype.parse=function(e,t){if("string"!==typeof e)throw new Error("Input data should be a String");var r=new this.core.State(e,this,t);return this.core.process(r),r.tokens},y.prototype.render=function(e,t){return t=t||{},this.renderer.render(this.parse(e,t),this.options,t)},y.prototype.parseInline=function(e,t){var r=new this.core.State(e,this,t);return r.inlineMode=!0,this.core.process(r),r.tokens},y.prototype.renderInline=function(e,t){return t=t||{},this.renderer.render(this.parseInline(e,t),this.options,t)},t.exports=y}}),Ee=u({"node_modules/markdown-it/index.js"(e,t){"use strict";t.exports=Ce()}}),Se={};function De(e){return!!e?.$$mdtype}function je(e){return!("Function"!==e?.$$mdtype)}function Oe(e){return!("Variable"!==e?.$$mdtype)}function*qe(e){if(null!=e&&"object"===typeof e){if(Array.isArray(e))for(const t of e)yield*qe(t);if(De(e)&&(yield e),Object.getPrototypeOf(e)===Object.prototype)for(const t of Object.values(e))yield*qe(t)}}function Fe(e,t={}){if(null==e||"object"!==typeof e)return e;if(Array.isArray(e))return e.map((e=>Fe(e,t)));if(De(e)&&e?.resolve instanceof Function)return e.resolve(t);if(Object.getPrototypeOf(e)!==Object.prototype)return e;const r={};for(const[n,a]of Object.entries(e))r[n]=Fe(a,t);return r}f(Se,{getAstValues:()=>qe,isAst:()=>De,isFunction:()=>je,isVariable:()=>Oe,resolve:()=>Fe});var ze=class{constructor(e="div",t={},r=[]){this.$$mdtype="Tag",this.name=e,this.attributes=t,this.children=r}};ze.isTag=e=>!("Tag"!==e?.$$mdtype);var Le,Ie,Ne=p(d()),Me=class{constructor(e=[]){this.$$mdtype="Variable",this.path=e}resolve({variables:e}={}){return e instanceof Function?e(this.path):this.path.reduce(((e={},t)=>e[t]),e)}},Te=class{constructor(e,t){this.$$mdtype="Function",this.name=e,this.parameters=t}resolve(e={}){const t=e?.functions?.[this.name];if(!t)return null;const r=Fe(this.parameters,e);return t.transform?.(r,e)}};(Ie=Le||(Le={}))[Ie.normal=0]="normal",Ie[Ie.string=1]="string",Ie[Ie.escape=2]="escape";var Pe="{%",Re="%}",$e=/^[a-zA-Z0-9_-]+$/;function Be(e){return"string"===typeof e&&$e.test(e)}function Ue(e){return e&&"object"===typeof e&&"function"===typeof e.then}function Ve(e,t=0){let r=0;for(let n=t;ne.tag?r[e.tag]:t[e.type],attributes(e,t={}){const r=this.findSchema(e,t)??{},n={},a={...Ye,...r.attributes};for(const[i,o]of Object.entries(a)){if(0==o.render)continue;const r="string"===typeof o.render?o.render:i;let a=e.attributes[i];if("function"===typeof o.type){const e=new o.type;e.transform&&(a=e.transform(a,t))}a=void 0===a?o.default:a,void 0!==a&&(n[r]=a)}if(r.slots)for(const[i,o]of Object.entries(r.slots)){if(!1===o.render)continue;const r="string"===typeof o.render?o.render:i;e.slots[i]&&(n[r]=this.node(e.slots[i],t))}return n},children(e,t={}){const r=e.children.flatMap((e=>this.node(e,t)));return r.some(Ue)?Promise.all(r):r},node(e,t={}){const r=this.findSchema(e,t)??{};if(r&&r.transform instanceof Function)return r.transform(e,t);const n=this.children(e,t);if(!r||!r.render)return n;const a=this.attributes(e,t);return Ue(a)||Ue(n)?Promise.all([a,n]).then((e=>new ze(r.render,...e))):new ze(r.render,a,n)}},We=class{constructor(e="node",t={},r=[],n){this.$$mdtype="Node",this.errors=[],this.lines=[],this.inline=!1,this.attributes=t,this.children=r,this.type=e,this.tag=n,this.annotations=[],this.slots={}}*walk(){for(const e of[...Object.values(this.slots),...this.children])yield e,yield*e.walk()}push(e){this.children.push(e)}resolve(e={}){return Object.assign(new We,this,{children:this.children.map((t=>t.resolve(e))),attributes:Fe(this.attributes,e),slots:Object.fromEntries(Object.entries(this.slots).map((([t,r])=>[t,r.resolve(e)])))})}findSchema(e={}){return Ge.findSchema(this,e)}transformAttributes(e={}){return Ge.attributes(this,e)}transformChildren(e){return Ge.children(this,e)}transform(e){return Ge.node(this,e)}},Je={Function:Te,Node:We,Variable:Me};function Xe(e,t){if(!t)return t;const r=Je[t.$$mdtype];return r?Object.assign(new r,t):t}var Ke={...Je,...Se,fromJSON:function(e){return JSON.parse(e,Xe)}},Qe=" ",et=", ",tt="\n",rt=".",nt="-",at=80,it=["strong","em","s"],ot=(e,t)=>Math.max(e,t),st=(e,t=2)=>({...e,indent:(e.indent||0)+t});function*ct(e,t){for(const r of e.children)yield*bt(r,t)}function*lt(e){yield[...e].join("").trim()}function*ut(e){yield`| ${e.join(" | ")} |`}function ft(e){if(void 0!==e)return Ke.isAst(e)?yt(e):null===e?"null":Array.isArray(e)?"["+e.map(ft).join(et)+"]":"object"===typeof e?"{"+Object.entries(e).map((([e,t])=>`${Be(e)?e:`"${e}"`}: ${ft(t)}`)).join(et)+"}":JSON.stringify(e)}function pt(e){const t=ft(e.value);if(void 0!==t)return"primary"===e.name?t:"id"===e.name&&"string"===typeof e.value&&Be(e.value)?"#"+e.value:"class"===e.type&&Be(e.name)?"."+e.name:`${e.name}=${t}`}function*dt(e){for(const[t,r]of Object.entries(e.attributes))if("class"!==t||"object"!==typeof r||Ke.isAst(r))yield pt({type:"attribute",name:t,value:r});else for(const e of Object.keys(r))yield pt({type:"class",name:e,value:r})}function*ht(e){e.annotations.length&&(yield Pe+Qe,yield e.annotations.map(pt).join(Qe),yield Qe+Re)}function*mt(e){let t;do{const{value:r,done:n}=e.next();if(n)return;t=r.trimStart()}while(!t.length);yield t,yield*e}function*gt(e,t){yield e.replace(t,"\\$&").replace(new RegExp("\xa0","g")," ")}function*bt(e,t={}){switch(typeof e){case"undefined":break;case"boolean":case"number":case"string":yield e.toString();break;case"object":if(null===e)break;if(Array.isArray(e)){for(const r of e)yield*bt(r,t);break}switch(e.$$mdtype){case"Function":yield*function*(e){yield e.name,yield"(",yield Object.values(e.parameters).map(ft).join(et),yield")"}(e);break;case"Node":yield*function*(e,t={}){const r={...t,parent:e},n=Qe.repeat(r.indent||0);switch(e.type){case"document":e.attributes.frontmatter&&e.attributes.frontmatter.length&&(yield"---"+tt+e.attributes.frontmatter+tt+"---"+tt+tt),yield*mt(ct(e,r));break;case"heading":yield tt,yield n,yield"#".repeat(e.attributes.level||1),yield Qe,yield*mt(ct(e,r)),yield*ht(e),yield tt;break;case"paragraph":yield tt,yield*ct(e,r),yield*ht(e),yield tt;break;case"inline":yield n,yield*ct(e,r);break;case"image":yield"!",yield"[",yield*bt(e.attributes.alt,r),yield"]",yield"(",yield*"string"===typeof e.attributes.src?gt(e.attributes.src,/[()]/):bt(e.attributes.src,r),e.attributes.title&&(yield Qe+`"${e.attributes.title}"`),yield")";break;case"link":yield"[",yield*ct(e,r),yield"]",yield"(",yield*"string"===typeof e.attributes.href?gt(e.attributes.href,/[()]/g):bt(e.attributes.href,r),e.attributes.title&&(yield Qe+`"${e.attributes.title}"`),yield")";break;case"text":{const{content:n}=e.attributes;Ke.isAst(n)?(yield Pe+Qe,yield*bt(n,r),yield Qe+Re):t.parent&&it.includes(t.parent.type)?yield*gt(n,/[*_~]/g):yield*gt(n,/^[*>#]/);break}case"blockquote":{const t=">"+Qe;yield e.children.map((e=>yt(e,r).trimStart())).map((e=>tt+n+t+e)).join(n+t);break}case"hr":yield tt,yield n,yield"---",yield tt;break;case"fence":{yield tt,yield n;const t=(e.attributes.content.match(/`{3,}/g)||[]).map((e=>e.length)).reduce(ot,0),r="`".repeat(t?t+1:3);yield r,e.attributes.language&&(yield e.attributes.language),e.annotations.length&&(yield Qe),yield*ht(e),yield tt,yield n,yield e.attributes.content.split(tt).join(tt+n),yield r,yield tt;break}case"tag":{e.inline||(yield tt,yield n);const a=Pe+Qe,i=[...dt(e)].filter((e=>void 0!==e)),o=[a+e.tag,...i],s=o.join(Qe),c=s.length+2*a.length>(t.maxTagOpeningWidth||at);yield(!e.inline&&c?o.join(tt+Qe.repeat(a.length)+n):s)+Qe+(e.children.length?"":"/")+Re,e.children.length&&(yield*ct(e,r.allowIndentation?st(r):r),e.inline||(yield n),yield Pe+Qe+"/"+e.tag+Qe+Re),e.inline||(yield tt);break}case"list":{const t=e.children.some((e=>e.children.some((e=>"paragraph"===e.type))));for(let a=0;at+Qe.repeat(e[r]-t.length)))),yield tt,yield*ut(t.map(((t,r)=>"-".repeat(e[r])))),yield tt;for(const n of r)yield*ut(n.map(((t,r)=>t+Qe.repeat(e[r]-t.length)))),yield tt}break}case"thead":{const[t]=[...ct(e,r)];yield t||[];break}case"tr":yield[...ct(e,r)];break;case"td":case"th":yield[...ct(e,r),...ht(e)].join("").trim();break;case"tbody":yield*ct(e,r);break;case"comment":yield"\x3c!-- "+e.attributes.content+" --\x3e\n"}}(e,t);break;case"Variable":yield*function*(e){yield"$",yield e.path.map(((e,t)=>0===t?e:Be(e)?"."+e:"number"===typeof e?`[${e}]`:`["${e}"]`)).join("")}(e);break;default:throw new Error(`Unimplemented: "${e.$$mdtype}"`)}}}function yt(e,t){let r="";for(const n of bt(e,t))r+=n;return r.trimStart()}function vt(e){return!1!==e&&void 0!==e&&null!==e}var kt={attributes:{primary:{type:Object,render:!1}},transform(e,t){const r=function(e){const t=[{condition:e.attributes.primary,children:[]}];for(const r of e.children)"tag"===r.type&&"else"===r.tag?t.push({condition:!("primary"in r.attributes)||r.attributes.primary,children:[]}):t[t.length-1].children.push(r);return t}(e);for(const{condition:n,children:a}of r)if(vt(n)){const e=a.flatMap((e=>e.transform(t)));return e.some(Ue)?Promise.all(e).then((e=>e.flat())):e}return[]}},_t={selfClosing:!0,attributes:{primary:{type:Object,render:!1}}},wt={and:{transform:e=>Object.values(e).every((e=>vt(e)))},or:{transform:e=>void 0!==Object.values(e).find((e=>vt(e)))},not:{parameters:{0:{required:!0}},transform:e=>!vt(e[0])},equals:{transform(e){const t=Object.values(e);return t.every((e=>e===t[0]))}},default:{transform:e=>void 0===e[0]?e[1]:e[0]},debug:{transform:e=>JSON.stringify(e[0],null,2)}};function xt(e,t="td"){e.type="tr",e.attributes={};for(const r of e.children)r.type=t;return e}var At=[function(e){for(const t of e.walk()){if("tag"!==t.type||"table"!==t.tag)continue;const[e,...r]=t.children;if(!e||"table"===e.type)continue;const n=new Ke.Node("table",t.attributes,[new Ke.Node("thead"),new Ke.Node("tbody")]),[a,i]=n.children;"list"===e.type&&a.push(xt(e,"th"));for(const t of r){if("list"===t.type)xt(t);else{if("tag"!==t.type||"if"!==t.tag)continue;{const e=[];for(const r of t.children)"hr"!==r.type&&("list"===r.type&&xt(r),e.push(r));t.children=e}}i.push(t)}t.children=[n]}}],Ct={ordered_list:"list",bullet_list:"list",code_inline:"code",list_item:"item",variable:"text"};function Et(e,t){for(const r of t){e.annotations.push(r);const{name:t,value:n,type:a}=r;"attribute"===a?(void 0!==e.attributes[t]&&e.errors.push({id:"duplicate-attribute",level:"warning",message:`Attribute '${t}' already set`}),e.attributes[t]=n):"class"===a&&(e.attributes.class?e.attributes.class[t]=n:e.attributes.class={[t]:n})}}function St(e,t,r,n,a,i){if("frontmatter"===e.type)return void(t[0].attributes.frontmatter=e.content);if(e.hidden||"text"===e.type&&""===e.content)return;const o=e.errors||[],s=t[t.length-1],{tag:c,attributes:l,error:u}=e.meta||{};if("annotation"===e.type)return i?Et(i,l):s.errors.push({id:"no-inline-annotations",level:"error",message:`Can't apply inline annotations to '${s.type}'`});let f=e.type.replace(/_(open|close)$/,"");if(Ct[f]&&(f=Ct[f]),"error"===f){const{message:e,location:t}=u;o.push({id:"parse-error",level:"critical",message:e,location:t})}if(e.nesting<0){if(s.type===f&&s.tag===c)return s.lines&&e.map&&s.lines.push(...e.map),t.pop();o.push({id:"missing-opening",level:"critical",message:`Node '${f}' is missing opening`})}const p=function(e,t){switch(t){case"heading":return{level:Number(e.tag.replace("h",""))};case"list":{const t=e.attrs?Object.fromEntries(e.attrs):void 0,r=e.type.startsWith("ordered");return r&&t?.start?{ordered:!0,start:t.start,marker:e.markup}:{ordered:r,marker:e.markup}}case"link":{const t=Object.fromEntries(e.attrs);return t.title?{href:t.href,title:t.title}:{href:t.href}}case"image":{const t=Object.fromEntries(e.attrs);return t.title?{alt:e.content,src:t.src,title:t.title}:{alt:e.content,src:t.src}}case"em":case"strong":return{marker:e.markup};case"text":case"code":case"comment":return{content:(e.meta||{}).variable||e.content};case"fence":{const[t]=e.info.split(" ",1);return""===t||t===Pe?{content:e.content}:{content:e.content,language:t}}case"td":case"th":if(e.attrs){const t=Object.fromEntries(e.attrs);let r;if(t.style&&(t.style.includes("left")?r="left":t.style.includes("center")?r="center":t.style.includes("right")&&(r="right")),r)return{align:r}}return{};default:return{}}}(e,f),d=new We(f,p,void 0,c||void 0),{position:h={}}=e;if(d.errors=o,!1!==a&&(d.lines=e.map||s.lines||[],d.location={file:r,start:{line:d.lines[0],character:h.start},end:{line:d.lines[1],character:h.end}}),i&&(d.inline=!0),l&&["tag","fence","image"].includes(f)&&Et(d,l),n&&"slot"===c&&"string"===typeof d.attributes.primary?s.slots[d.attributes.primary]=d:s.push(d),e.nesting>0&&t.push(d),!Array.isArray(e.children))return;"inline"===d.type&&(i=s),t.push(d);if(!("image"===f))for(const m of e.children)St(m,t,r,n,a,i);t.pop()}var Dt={};f(Dt,{blockquote:()=>Lt,code:()=>Wt,comment:()=>Qt,document:()=>jt,em:()=>Zt,error:()=>er,fence:()=>zt,hardbreak:()=>Xt,heading:()=>Ot,hr:()=>Mt,image:()=>Ft,inline:()=>Yt,item:()=>It,link:()=>Gt,list:()=>Nt,node:()=>tr,paragraph:()=>qt,s:()=>Ht,softbreak:()=>Kt,strong:()=>Vt,table:()=>Tt,tbody:()=>Bt,td:()=>Pt,text:()=>Jt,th:()=>Rt,thead:()=>Ut,tr:()=>$t});var jt={render:"article",children:["heading","paragraph","image","table","tag","fence","blockquote","comment","list","hr"],attributes:{frontmatter:{render:!1}}},Ot={children:["inline"],attributes:{level:{type:Number,render:!1,required:!0}},transform:(e,t)=>new ze(`h${e.attributes.level}`,e.transformAttributes(t),e.transformChildren(t))},qt={render:"p",children:["inline"]},Ft={render:"img",attributes:{src:{type:String,required:!0},alt:{type:String},title:{type:String}}},zt={render:"pre",attributes:{content:{type:String,render:!1,required:!0},language:{type:String,render:"data-language"},process:{type:Boolean,render:!1,default:!0}},transform(e,t){const r=e.transformAttributes(t),n=e.children.length?e.transformChildren(t):[e.attributes.content];return new ze("pre",r,n)}},Lt={render:"blockquote",children:["heading","paragraph","image","table","tag","fence","blockquote","list","hr"]},It={render:"li",children:["inline","heading","paragraph","image","table","tag","fence","blockquote","list","hr"]},Nt={children:["item"],attributes:{ordered:{type:Boolean,render:!1,required:!0},start:{type:Number},marker:{type:String,render:!1}},transform:(e,t)=>new ze(e.attributes.ordered?"ol":"ul",e.transformAttributes(t),e.transformChildren(t))},Mt={render:"hr"},Tt={render:"table"},Pt={render:"td",children:["inline","heading","paragraph","image","table","tag","fence","blockquote","list","hr"],attributes:{align:{type:String},colspan:{type:Number,render:"colSpan"},rowspan:{type:Number,render:"rowSpan"}}},Rt={render:"th",attributes:{width:{type:Number},align:{type:String},colspan:{type:Number,render:"colSpan"},rowspan:{type:Number,render:"rowSpan"}}},$t={render:"tr",children:["th","td"]},Bt={render:"tbody",children:["tr","tag"]},Ut={render:"thead",children:["tr"]},Vt={render:"strong",children:["em","s","link","code","text","tag"],attributes:{marker:{type:String,render:!1}}},Zt={render:"em",children:["strong","s","link","code","text","tag"],attributes:{marker:{type:String,render:!1}}},Ht={render:"s",children:["strong","em","link","code","text","tag"]},Yt={children:["strong","em","s","code","text","tag","link","image","hardbreak","softbreak","comment"]},Gt={render:"a",children:["strong","em","s","code","text","tag"],attributes:{href:{type:String,required:!0},title:{type:String}}},Wt={render:"code",attributes:{content:{type:String,render:!1,required:!0}},transform(e,t){const r=e.transformAttributes(t);return new ze("code",r,[e.attributes.content])}},Jt={attributes:{content:{type:String,required:!0}},transform:e=>e.attributes.content},Xt={render:"br"},Kt={transform:()=>" "},Qt={attributes:{content:{type:String,required:!0}}},er={},tr={},rr=p(Ee()),{escapeHtml:nr}=(0,rr.default)().utils,ar=new Set(["area","base","br","col","embed","hr","img","input","link","meta","param","source","track","wbr"]);function ir(e,t){return"string"!==typeof e?"Fragment":e[0]!==e[0].toUpperCase()?e:t instanceof Function?t(e):t[e]}function or(e){return e.map(cr).join(", ")}function sr(e){if(null==e||"object"!==typeof e)return JSON.stringify(e);if(Array.isArray(e))return`[${e.map((e=>sr(e))).join(", ")}]`;if("Tag"===e.$$mdtype)return cr(e);if("object"!==typeof e)return JSON.stringify(e);return`{${Object.entries(e).map((([e,t])=>[JSON.stringify(e),sr(t)].join(": "))).join(", ")}}`}function cr(e){if(Array.isArray(e))return`React.createElement(React.Fragment, null, ${or(e)})`;if(null===e||"object"!==typeof e||!ze.isTag(e))return JSON.stringify(e);const{name:t,attributes:{class:r,...n}={},children:a=[]}=e;return r&&(n.className=r),`React.createElement(\n tagName(${JSON.stringify(t)}, components),\n ${0==Object.keys(n).length?"null":sr(n)},\n ${or(a)})`}var lr={html:function e(t){if("string"===typeof t||"number"===typeof t)return nr(String(t));if(Array.isArray(t))return t.map(e).join("");if(null===t||"object"!==typeof t||!ze.isTag(t))return"";const{name:r,attributes:n,children:a=[]}=t;if(!r)return e(a);let i=`<${r}`;for(const[o,s]of Object.entries(n??{}))i+=` ${o.toLowerCase()}="${nr(String(s))}"`;return i+=">",ar.has(r)||(a.length&&(i+=e(a)),i+=``),i},react:function(e,t,{components:r={}}={}){function n(e){if(null==e||"object"!==typeof e)return e;if(Array.isArray(e))return e.map((e=>n(e)));if("Tag"===e.$$mdtype)return a(e);if("object"!==typeof e)return e;const t={};for(const[r,a]of Object.entries(e))t[r]=n(a);return t}function a(e){if(Array.isArray(e))return t.createElement(t.Fragment,null,...e.map(a));if(null===e||"object"!==typeof e||!ze.isTag(e))return e;const{name:i,attributes:{class:o,...s}={},children:c=[]}=e;return o&&(s.className=o),t.createElement(function(e,t){return"string"!==typeof e||e[0]!==e[0].toUpperCase()?e:t instanceof Function?t(e):t[e]}(i,r),0==Object.keys(s).length?null:n(s),...c.map(a))}return a(e)},reactStatic:function(e){return`\n (({components = {}} = {}) => {\n ${ir}\n return ${cr(e)};\n })\n`}},ur={else:_t,if:kt,partial:{inline:!1,selfClosing:!0,attributes:{file:{type:class{validate(e,t){const{partials:r={}}=t;return r[e]?[]:[{id:"attribute-value-invalid",level:"error",message:`Partial \`${e}\` not found. The 'file' attribute must be set in \`config.partials\``}]}},render:!1,required:!0},variables:{type:Object,render:!1}},transform(e,t){const{partials:r={}}=t,{file:n,variables:a}=e.attributes,i=r[n];if(!i)return null;const o={...t,variables:{...t.variables,...a,"$$partial:filename":n}},s=e=>e.resolve(o).transformChildren(o);return Array.isArray(i)?i.flatMap(s):s(i)}},slot:{attributes:{primary:{type:String,required:!0}}},table:{children:["table"],inline:!1}},fr=p(Ce()),pr=p(d());function dr(e,t,r){try{const{type:r,meta:n,nesting:a=0}=(0,pr.parse)(t,{Variable:Me,Function:Te}),i=e.push(r,"",a);return i.info=t,i.meta=n,e.delimiters||(e.delimiters=[]),i}catch(n){if(!(n instanceof pr.SyntaxError))throw n;const{message:t,location:{start:a,end:i}}=n,o=r?{start:{offset:a.offset+r},end:{offset:i.offset+r}}:null,s=e.push("error","",0);return s.meta={error:{message:t,location:o}},s}}function hr(e,t,r,n){const a=e.bMarks[t]+e.tShift[t],i=e.eMarks[t];if(!e.src.startsWith(Pe,a))return!1;const o=Ve(e.src,a),s=e.src.slice(0,i).trim().length;if(!o||o"process"===e.name&&!e.value))||(t.children=He(t.content,t.map[0]))}}function br(e){e.block.ruler.before("paragraph","annotations",hr,{alt:["paragraph","blockquote"]}),e.inline.ruler.push("containers",mr),e.core.ruler.push("annotations",gr)}var yr="---";function vr(e,t){return e.src.slice(e.bMarks[t],e.eMarks[t]).trim()}function kr(e,t,r,n){if(0!=t||vr(e,0)!=yr)return!1;const a=function(e,t){for(let r=1;rt===e)):n.returns===e)}if(Ke.isAst(t))return!0;if(Array.isArray(e))return e.some((e=>jr(e,t,r,n)));if("string"===typeof e&&(e=Dr[e]),"function"===typeof e){const a=new e;if(a.validate)return a.validate(t,r,n)}return null!=t&&t.constructor===e}function Or(e){return"string"===typeof e?e:Array.isArray(e)?e.map(Or).join(" | "):e.name}function qr(e,t){const r=t.functions?.[e.name],n=[];if(!r)return[{id:"function-undefined",level:"critical",message:`Undefined function: '${e.name}'`}];if(r.validate&&n.push(...r.validate(e,t)),r.parameters)for(const[a,i]of Object.entries(e.parameters)){const o=r.parameters?.[a];if(o){if((!Ke.isAst(i)||Ke.isFunction(i))&&o.type){const r=jr(o.type,i,t,a);!1===r?n.push({id:"parameter-type-invalid",level:"error",message:`Parameter '${a}' of '${e.name}' must be type of '${Or(o.type)}'`}):Array.isArray(r)&&n.push(...r)}}else n.push({id:"parameter-undefined",level:"error",message:`Invalid parameter: '${a}'`})}for(const[a,{required:i}]of Object.entries(r.parameters??{}))i&&void 0===e.parameters[a]&&n.push({id:"parameter-missing-required",level:"error",message:`Missing required parameter: '${a}'`});return n}function Fr(e,t){if(e.length<=t)return JSON.stringify(e);return`[${e.slice(0,t).map((e=>JSON.stringify(e))).join(",")}, ... ${e.length-t} more]`}function zr(e,t){const r=e.findSchema(t),n=[...e.errors||[]];if(!r)return n.push({id:e.tag?"tag-undefined":"node-undefined",level:"critical",message:e.tag?`Undefined tag: '${e.tag}'`:`Undefined node: '${e.type}'`}),n;void 0!=r.inline&&e.inline!==r.inline&&n.push({id:"tag-placement-invalid",level:"critical",message:`'${e.tag}' tag should be ${r.inline?"inline":"block"}`}),r.selfClosing&&e.children.length>0&&n.push({id:"tag-selfclosing-has-children",level:"critical",message:`'${e.tag}' tag should be self-closing`});const a={...Ye,...r.attributes};for(const i of Object.keys(e.slots)){const e=r.slots?.[i];e||n.push({id:"slot-undefined",level:"error",message:`Invalid slot: '${i}'`})}for(let[i,o]of Object.entries(e.attributes)){const e=a[i];if(!e){n.push({id:"attribute-undefined",level:"error",message:`Invalid attribute: '${i}'`});continue}let{type:r,matches:s,errorLevel:c}=e;if(Ke.isAst(o))if(Ke.isFunction(o)&&t.validation?.validateFunctions)n.push(...qr(o,t));else{if(!Ke.isVariable(o)||!t.variables)continue;{let e=!1,r=t.variables;for(const t of o.path){if(!Object.prototype.hasOwnProperty.call(r,t)){e=!0;break}r=r[t]}e&&n.push({id:"variable-undefined",level:"error",message:`Undefined variable: '${o.path.join(".")}'`})}}if(r){const e=jr(r,o,t,i);!1===e&&n.push({id:"attribute-type-invalid",level:c||"error",message:`Attribute '${i}' must be type of '${Or(r)}'`}),Array.isArray(e)&&n.push(...e)}if("function"===typeof s&&(s=s(t)),Array.isArray(s)&&!s.includes(o)&&n.push({id:"attribute-value-invalid",level:c||"error",message:`Attribute '${i}' must match one of ${Fr(s,8)}. Got '${o}' instead.`}),s instanceof RegExp&&!s.test(o)&&n.push({id:"attribute-value-invalid",level:c||"error",message:`Attribute '${i}' must match ${s}. Got '${o}' instead.`}),"function"===typeof e.validate){const r=e.validate(o,t,i);Array.isArray(r)&&n.push(...r)}}for(const[i,{required:o}]of Object.entries(a))o&&void 0===e.attributes[i]&&n.push({id:"attribute-missing-required",level:"error",message:`Missing required attribute: '${i}'`});if(r.slots)for(const[i,{required:o}]of Object.entries(r.slots))o&&void 0===e.slots[i]&&n.push({id:"slot-missing-required",level:"error",message:`Missing required slot: '${i}'`});for(const{type:i}of e.children)r.children&&"error"!==i&&!r.children.includes(i)&&n.push({id:"child-invalid",level:"warning",message:`Can't nest '${i}' in '${e.tag||e.type}'`});if(r.validate){const a=r.validate(e,t);if(Ue(a))return a.then((e=>n.concat(e)));n.push(...a)}return n}function*Lr(e,t=[]){yield[e,t];for(const r of[...Object.values(e.slots),...e.children])yield*Lr(r,[...t,e])}var Ir=new Sr;function Nr(e={}){return{...e,tags:{...ur,...e.tags},nodes:{...Dt,...e.nodes},functions:{...wt,...e.functions}}}function Mr(e,t){return"string"===typeof e&&(e=Ir.tokenize(e)),function(e,t){const r=new We("document"),n=[r];"string"===typeof t&&(t={file:t});for(const a of e)St(a,n,t?.file,t?.slots,t?.location);if(n.length>1)for(const a of n.slice(1))a.errors.push({id:"missing-closing",level:"critical",message:`Node '${a.tag||a.type}' is missing closing`});for(const a of At)a(r);return r}(e,t)}function Tr(e,t){return Array.isArray(e)?e.flatMap((e=>e.resolve(t))):e.resolve(t)}function Pr(e,t){const r=Nr(t),n=Tr(e,r);return Array.isArray(n)?n.flatMap((e=>e.transform(r))):n.transform(r)}function Rr(e,t){return function(e,t){const r=[...Lr(e)].map((([e,r])=>{const{type:n,lines:a,location:i}=e,o=zr(e,{...t,validation:{...t.validation,parents:r}});return Ue(o)?o.then((e=>e.map((e=>({type:n,lines:a,location:i,error:e}))))):o.map((e=>({type:n,lines:a,location:i,error:e})))}));return r.some(Ue)?Promise.all(r).then((e=>e.flat())):r.flat()}(e,Nr(t))}var $r=class{constructor(e){this.parse=Mr,this.resolve=e=>Tr(e,this.config),this.transform=e=>Pr(e,this.config),this.validate=e=>Rr(e,this.config),this.config=e}};$r.nodes=Dt,$r.tags=ur,$r.functions=wt,$r.globalAttributes=Ye,$r.renderers=lr,$r.transforms=At,$r.Ast=Ke,$r.Tag=ze,$r.Tokenizer=Sr,$r.parseTags=He,$r.transformer=Ge,$r.validator=zr,$r.parse=Mr,$r.transform=Pr,$r.validate=Rr,$r.createElement=function(e,t={},...r){return{name:e,attributes:t,children:r}},$r.truthy=vt,$r.format=yt},33436:(e,t,r)=>{function n(e,{target:t=document.body}={}){if("string"!==typeof e)throw new TypeError(`Expected parameter \`text\` to be a \`string\`, got \`${typeof e}\`.`);const r=document.createElement("textarea"),n=document.activeElement;r.value=e,r.setAttribute("readonly",""),r.style.contain="strict",r.style.position="absolute",r.style.left="-9999px",r.style.fontSize="12pt";const a=document.getSelection(),i=a.rangeCount>0&&a.getRangeAt(0);t.append(r),r.select(),r.selectionStart=0,r.selectionEnd=e.length;let o=!1;try{o=document.execCommand("copy")}catch{}return r.remove(),i&&(a.removeAllRanges(),a.addRange(i)),n&&n.focus(),o}r.d(t,{A:()=>n})}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7529.658d363e12e73df83b60.chunk.js b/src/web/gui/v2/7529.658d363e12e73df83b60.chunk.js deleted file mode 100644 index b5c87231b..000000000 --- a/src/web/gui/v2/7529.658d363e12e73df83b60.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="fef08628-86a7-43cd-a204-367ad851ff11",e._sentryDebugIdIdentifier="sentry-dbid-fef08628-86a7-43cd-a204-367ad851ff11")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7529],{67529:(e,n,d)=>{d.r(n),d.d(n,{AuthTokenCallback:()=>t,default:()=>a});var f=d(96540),o=d(38819);const t=()=>{const e=(0,o.W6)("redirect_uri"),n=(0,o.W6)("token");return(0,f.useEffect)((()=>{e&&n&&(localStorage.setItem("netdataJWT",n),location.href=decodeURIComponent(e))}),[]),null},a=t}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7840.2f2023f2eb1dcc943d94.chunk.js b/src/web/gui/v2/7840.2f2023f2eb1dcc943d94.chunk.js deleted file mode 100644 index c69510bf7..000000000 --- a/src/web/gui/v2/7840.2f2023f2eb1dcc943d94.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="8601ed23-6969-43ce-a05e-e4f49fef792e",e._sentryDebugIdIdentifier="sentry-dbid-8601ed23-6969-43ce-a05e-e4f49fef792e")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7840],{17840:(e,t,r)=>{r.r(t),r.d(t,{default:()=>h});var n=r(96540),o=r(47767),a=(r(30067),r(93518),r(17333),r(41393),r(8159),r(98992),r(54520),r(81454),r(37550),r(62953),r(3296),r(27208),r(48408),r(83199)),c=r(22292),i=r(79731),s=r(9224);const u=new URLSearchParams(window.location.search.substr(1)),d=()=>{location.assign("https://registry.my-netdata.io/goto-host-from-alarm.html".concat(location.search))},l=e=>fetch(e,{redirect:"follow"}).then((()=>e)),f=e=>{let t="top;nowelcome=1";for(let[n,o]of u.entries())n=decodeURIComponent(n),"agentID"!==n&&(t+=";".concat(encodeURIComponent(n)),o=decodeURIComponent(o),""!==o&&(t+="=".concat(encodeURIComponent(o))));const r=new URL(e);return"/"===r.pathname&&(r.pathname=""),r.hash=t,r.toString()},p=(e,t)=>{if(!u.has("agentID"))return void(document.body.innerHTML='

    missing "agentID" query string parameter

    ');(async(e,t)=>{try{return(await(0,s.tz)(t,e)).data}catch(r){const{data:e}=r.response;document.body.innerHTML="".concat((0,i.o)(null===e||void 0===e?void 0:e.errorMsgKey),"

    Getting you back to Netdata...");const t=new Error((0,i.o)(null===e||void 0===e?void 0:e.errorMsgKey)||"Redirect error");throw t.name="".concat((null===e||void 0===e?void 0:e.errorMsgKey)||"Server error"),t.stack="".concat((null===e||void 0===e?void 0:e.errorCode)||"Redirect error code"),t}})(decodeURIComponent(u.get("agentID")||"unknown_agent_id"),e).catch((e=>{if("ErrVisitedNodeNotFound"!==(null===e||void 0===e?void 0:e.name))throw document.body.innerHTML="

    ".concat(null===e||void 0===e?void 0:e.message,"

    "),e;d()})).then((e=>{var r;const n=null===e||void 0===e||null===(r=e.urls)||void 0===r?void 0:r.map(f),o="https:"===location.protocol&&n.some((e=>!e.startsWith("https:"))),a=o?n.filter((e=>e.startsWith("https:"))):n;if(0!==a.length)return Promise.any(a.map(l)).then((()=>{location.assign("url".concat(t))}),(()=>{o?location.protocol="http:":document.body.innerHTML="\n
    \n ")}));o?location.protocol="http:":d()}))},v=()=>{const e=(0,c.NJ)(),{search:t}=(0,o.zy)();return(0,n.useEffect)((()=>{e&&p(e,t||"")}),[e,t]),n.createElement(a.H4,null,"Redirecting...")},h=()=>n.createElement(o.BV,null,n.createElement(o.qh,{path:"/redirects/alerts",element:n.createElement(v,null)}),n.createElement(o.qh,{path:"/redirects/alarms",element:n.createElement(v,null)})," ")},84428:(e,t,r)=>{var n=r(78227)("iterator"),o=!1;try{var a=0,c={next:function(){return{done:!!a++}},return:function(){o=!0}};c[n]=function(){return this},Array.from(c,(function(){throw 2}))}catch(i){}e.exports=function(e,t){try{if(!t&&!o)return!1}catch(i){return!1}var r=!1;try{var a={};a[n]=function(){return{next:function(){return{done:r=!0}}}},e(a)}catch(i){}return r}},87290:(e,t,r)=>{var n=r(50516),o=r(19088);e.exports=!n&&!o&&"object"==typeof window&&"object"==typeof document},50516:e=>{e.exports="object"==typeof Deno&&Deno&&"object"==typeof Deno.version},19088:(e,t,r)=>{var n=r(24475),o=r(44576);e.exports="process"===o(n.process)},16193:(e,t,r)=>{var n=r(79504),o=Error,a=n("".replace),c=String(new o("zxcasd").stack),i=/\n\s*at [^:]*:[^\n]*/,s=i.test(c);e.exports=function(e,t){if(s&&"string"==typeof e&&!o.prepareStackTrace)for(;t--;)e=a(e,i,"");return e}},80747:(e,t,r)=>{var n=r(66699),o=r(16193),a=r(24659),c=Error.captureStackTrace;e.exports=function(e,t,r,i){a&&(c?c(e,t):n(e,"stack",o(r,i)))}},24659:(e,t,r)=>{var n=r(79039),o=r(6980);e.exports=!n((function(){var e=new Error("a");return!("stack"in e)||(Object.defineProperty(e,"stack",o(1,7)),7!==e.stack)}))},77584:(e,t,r)=>{var n=r(20034),o=r(66699);e.exports=function(e,t){n(t)&&"cause"in t&&o(e,"cause",t.cause)}},32603:(e,t,r)=>{var n=r(655);e.exports=function(e,t){return void 0===e?arguments.length<2?"":t:n(e)}},10916:(e,t,r)=>{var n=r(24475),o=r(80550),a=r(94901),c=r(92796),i=r(33706),s=r(78227),u=r(87290),d=r(50516),l=r(96395),f=r(77388),p=o&&o.prototype,v=s("species"),h=!1,g=a(n.PromiseRejectionEvent),m=c("Promise",(function(){var e=i(o),t=e!==String(o);if(!t&&66===f)return!0;if(l&&(!p.catch||!p.finally))return!0;if(!f||f<51||!/native code/.test(e)){var r=new o((function(e){e(1)})),n=function(e){e((function(){}),(function(){}))};if((r.constructor={})[v]=n,!(h=r.then((function(){}))instanceof n))return!0}return!t&&(u||d)&&!g}));e.exports={CONSTRUCTOR:m,REJECTION_EVENT:g,SUBCLASSING:h}},90537:(e,t,r)=>{var n=r(80550),o=r(84428),a=r(10916).CONSTRUCTOR;e.exports=a||!o((function(e){n.all(e).then(void 0,(function(){}))}))},17145:(e,t,r)=>{var n=r(46518),o=r(1625),a=r(42787),c=r(52967),i=r(77740),s=r(2360),u=r(66699),d=r(6980),l=r(77584),f=r(80747),p=r(72652),v=r(32603),h=r(78227)("toStringTag"),g=Error,m=[].push,y=function(e,t){var r,n=o(b,this);c?r=c(new g,n?a(this):b):(r=n?this:s(b),u(r,h,"Error")),void 0!==t&&u(r,"message",v(t)),f(r,y,r.stack,1),arguments.length>2&&l(r,arguments[2]);var i=[];return p(e,m,{that:i}),u(r,"errors",i),r};c?c(y,g):i(y,g,{name:!0});var b=y.prototype=s(g.prototype,{constructor:d(1,y),message:d(1,""),name:d(1,"AggregateError")});n({global:!0,constructor:!0,arity:2},{AggregateError:y})},30067:(e,t,r)=>{r(17145)},93518:(e,t,r)=>{var n=r(46518),o=r(69565),a=r(79306),c=r(97751),i=r(36043),s=r(1103),u=r(72652),d=r(90537),l="No one promise resolved";n({target:"Promise",stat:!0,forced:d},{any:function(e){var t=this,r=c("AggregateError"),n=i.f(t),d=n.resolve,f=n.reject,p=s((function(){var n=a(t.resolve),c=[],i=0,s=1,p=!1;u(e,(function(e){var a=i++,u=!1;s++,o(n,t,e).then((function(e){u||p||(p=!0,d(e))}),(function(e){u||p||(u=!0,c[a]=e,--s||f(new r(c,l)))}))})),--s||f(new r(c,l))}));return p.error&&f(p.value),n.promise}})}}]); \ No newline at end of file diff --git a/src/web/gui/v2/785.d016913841bcc0209d5b.chunk.js b/src/web/gui/v2/785.d016913841bcc0209d5b.chunk.js deleted file mode 100644 index 8b95ef5d9..000000000 --- a/src/web/gui/v2/785.d016913841bcc0209d5b.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="9ed79861-050e-4ee2-ae0b-209bed9bcbc6",e._sentryDebugIdIdentifier="sentry-dbid-9ed79861-050e-4ee2-ae0b-209bed9bcbc6")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[785],{52919:(e,t,a)=>{a.d(t,{D7:()=>r,Jp:()=>d,NT:()=>u,N_:()=>h,iS:()=>s,vE:()=>i,v_:()=>o,xI:()=>c,z6:()=>m});var n=a(8711),l=a(83199);const o=n.default.img.withConfig({displayName:"styled__Illustration",componentId:"sc-1yhntgl-0"})(["margin:0 auto;"]),r=(0,n.default)(l.Text).withConfig({displayName:"styled__StyledText",componentId:"sc-1yhntgl-1"})(["display:block;"]),d=n.default.div.withConfig({displayName:"styled__TextHeader",componentId:"sc-1yhntgl-2"})(["margin-bottom:",";font-weight:700;"],(0,l.getSizeBy)(2)),c=n.default.div.withConfig({displayName:"styled__StaticCheckmarks",componentId:"sc-1yhntgl-3"})(["margin-top:",";margin-bottom:",";"],(0,l.getSizeBy)(2),(0,l.getSizeBy)(5)),s=n.default.div.withConfig({displayName:"styled__CheckmarkLine",componentId:"sc-1yhntgl-4"})(["display:flex;align-items:center;"]),i=(0,n.default)(l.Icon).withConfig({displayName:"styled__StyledIcon",componentId:"sc-1yhntgl-5"})(["margin-right:4px;height:18px;> use{fill:",";}"],(0,l.getColor)("primary")),u=(0,n.default)(i).withConfig({displayName:"styled__HelpIcon",componentId:"sc-1yhntgl-6"})(["vertical-align:middle;"]),m=n.default.div.withConfig({displayName:"styled__LearnMoreSection",componentId:"sc-1yhntgl-7"})(["margin-top:",";"],(0,l.getSizeBy)(4)),h=n.default.a.withConfig({displayName:"styled__Link",componentId:"sc-1yhntgl-8"})(["text-decoration:underline;color:",";&:visited{color:",";}"],(0,l.getColor)("success"),(0,l.getColor)("success"))},40785:(e,t,a)=>{a.r(t),a.d(t,{DashboardContent:()=>ze,default:()=>Fe});var n=a(96540),l=a(47767),o=a(43124);var r=a(56820),d=a(3914),c=a(69765),s=a(35304),i=a(21591),u=a(68980),m=a(54621),h=a(91517),b=a(28738);const g=()=>n.createElement(b.A,{title:"Loading charts...","data-testid":"dashboardLoading"});a(62953);var f=a(67602),p=a(3705),v=a(86652),E=a(58388),y=a(11128),C=a(47762),A=a(58168),x=a(83199),w=a(49667);const I=e=>{let{compact:t,...a}=e;const[,l]=(0,f.A)("addChartModal"),o=(0,C.TG)().length>0;return n.createElement(x.Flex,(0,A.A)({column:!0,alignItems:"center",justifyContent:"center",overflow:{vertical:"auto"},flex:!0,basis:0,"data-testid":"dashboardBlankSlate"},a),!t&&n.createElement(x.Flex,{as:"img",src:w.$,height:"35%"}),n.createElement(x.H3,{margin:[6,0,2]},"Empty Dashboard"),o?n.createElement(n.Fragment,null,n.createElement(x.Text,{textAlign:"center"},"Let\u2019s fill your dashboard."),n.createElement(x.Text,{textAlign:"center"},"Go to a node view or node list and add one chart to this Dashboard or just use")):n.createElement(n.Fragment,null,n.createElement(x.Text,{textAlign:"center"},"You don't have any available node to retrieve charts from."),n.createElement(x.Text,{textAlign:"center"},"Please verify your nodes' state and/or connect a new node to Netdata")),!t&&n.createElement(x.Button,{margin:[8,0,0,0],onClick:l,disabled:!o,label:"Add chart","data-testid":"dashboardBlankSlate-addChart"}))};a(3064),a(41393),a(98992),a(72577),a(81454);var k=a(15327),D=a(74618),N=a(45765),S=a(67990),_=a(69388),T=a(23931),z=a(57605),F=a(67544),M=(a(9920),a(3949),a(45387)),B=a(36196),L=a(80925),R=a(13752),j=a(92815),H=a(72582),G=a(64125),U=a(44644),K=a(29217);const O=()=>n.createElement(x.Flex,{column:!0,width:"300px",gap:1},n.createElement(x.TextSmall,{strong:!0},"Not finding a chart?"),n.createElement(x.TextSmall,null,"When selecting",n.createElement(x.TextSmall,{strong:!0,margin:[0,1]},"All Nodes"),"you search and add charts using their context. The outcome will be a composite chart over All Nodes, like on the Overview tab."),n.createElement(x.TextSmall,null,"When you select a",n.createElement(x.TextSmall,{strong:!0,margin:[0,1]},"specific Node"),"you search and add charts using their name. The result is a specific chart over the node selected, like on Single Node tab.")),Y=()=>n.createElement(K.A,{isBasic:!0,plain:!0,content:O,align:"bottom"},n.createElement(x.Icon,{name:"question",color:"textDescription",size:"small"})),J=e=>{let{spaceId:t,roomId:a,value:l,onChange:o,options:r,testid:c,selectedNodeIds:s}=e;const i=null===l||void 0===l?void 0:l.value,u=(0,L.e)(),m=(0,T.R7)(i&&a,i),h=(0,d.dg)(),b=(0,U.CS)({type:"chart",entity:i}),g=(0,n.useMemo)((()=>{u.getNodes({id:"newDashboardChart"}).forEach((e=>e.destroy()));const{info:e,valueRange:n,height:l,colors:o}=(0,M.Ay)(m),r=u.makeChart({attributes:{id:"newDashboardChart",contextScope:[i],host:h?"".concat(window.envSettings.agentApiUrl,"/api/v2"):"".concat(window.envSettings.apiUrl,"/api/v3/spaces/").concat(t,"/rooms/").concat(a),roomId:a,info:e,valueRange:n,height:l+207,enabledNavigation:!1,selectedNodes:s,...o&&{colors:o},toolboxElements:[H.default,R.default,j.default,G.A]}});return u.getRoot().appendChild(r),r}),[u,i]);return(0,n.useLayoutEffect)((()=>{g&&b&&(g.updateAttributes(b),g.trigger("fetch"))}),[g,a,i,b]),(0,n.useLayoutEffect)((()=>()=>g.destroy()),[]),n.createElement(x.Flex,{gap:6,column:!0},n.createElement(x.Flex,{gap:2,column:!0,"data-testid":c},n.createElement(x.Flex,{gap:2},n.createElement(x.H5,null,"Context in room overview"),n.createElement(Y,null)),n.createElement(x.Select,{options:r,value:l,onChange:o,isClearable:!0,form:c})),l&&n.createElement(x.Flex,{height:{min:"320px"},column:!0},n.createElement(B.A,{chart:g,margin:[2,0,0]})))};var W=a(18202);const P={label:"All Nodes",value:"all-nodes"},X={label:"All Nodes (no reachable nodes)",value:"all-nodes",isDisabled:!0},q=e=>{let{id:t,isLive:a,nodeStatus:n,name:l}=e;return{label:a?l:"".concat(l," (").concat(n,")"),value:t,isDisabled:!a}},Q=e=>({value:e,label:e}),V=e=>{let{id:t,onClose:a}=e;const l=(0,d.vt)(),o=(0,c.ID)(),r=(0,u.fz)(t,"name"),s=(0,c.XA)("name"),i=(0,S.CK)(),m=(0,C.BU)().length>0,h=(0,S.nj)(),b=(0,C.Gt)(i),g=(0,F.nM)(t),f=m?P:X,[p,v]=(0,n.useState)(f.value),[E,y]=(0,n.useState)(null),A=(0,n.useMemo)((()=>[f,...b.map(q)]),[b]),w=(0,n.useMemo)((()=>A.find((e=>{let{value:t}=e;return t===p}))),[A,p]),I=(0,n.useCallback)((e=>{v((null===e||void 0===e?void 0:e.value)||f.value),y(null)}),[]),M=(0,n.useMemo)((()=>p===f.value?[]:[p]),[p]),B=(0,_.A)(o,M,{spaceId:l});if(B)throw B;const R=(0,T.R7)(o),j=(0,n.useMemo)((()=>Object.keys(R).sort(((e,t)=>e.localeCompare(t,void 0,{sensitivity:"accent",ignorePunctuation:!0}))).map(Q)),[R]);(0,n.useEffect)((()=>{j.length&&w&&y((e=>e||j[0].value))}),[E,j,w]);const H=(0,n.useMemo)((()=>E&&Q(E)),[E]),G=(0,n.useCallback)((e=>y(null===e||void 0===e?void 0:e.value)),[]),U=R[E],K=(0,L.e)(),O=(0,n.useCallback)((()=>{const e=K.getNode({id:"newDashboardChart"}),t=(0,W.A)(e.getAttributes());g(t.contextScope,t,(()=>setTimeout(a)))}),[p,U]),Y=!E,V=(0,z.A)();return(0,n.useEffect)((()=>(V(!0),()=>V(!1)))),n.createElement(k.GO,{onClose:a},n.createElement(D.z,{onClose:a,title:"Add Chart to Dashboard"},n.createElement(x.Button,{label:"Add chart",icon:"plus",onClick:O,disabled:Y,"data-testid":"addChartModal-addChart-button"})),n.createElement(N.U,null,"Add Charts to ",r),n.createElement(k.Yv,{overflow:"visible"},n.createElement(x.Flex,{gap:6,column:!0},n.createElement(x.Flex,{gap:2,column:!0,"data-testid":"addChartModal-selectNodes"},n.createElement(x.H5,null,"Nodes in ",s),n.createElement(x.Select,{options:A,value:w,onChange:I,isLoading:!h,isClearable:!0,form:"addChartModal-selectNodes"})),H&&n.createElement(J,{id:t,spaceId:l,roomId:o,value:H,onChange:G,options:j,testid:"addChartModal-selectChart",selectedNodeIds:M}))))};var Z=a(83084),$=a(76777),ee=a(27467),te=a(16579),ae=a(63314),ne=(a(14905),a(8872),a(79304)),le=a(44554);const oe=function(e,t){let{containerIds:a,containers:n,layout:l}=e,{extraKey:o}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const r={},d=a.reduce(((e,a)=>{const d=(0,le.bj)(l[a],"vertical");return e[a]={...n[a],level:0,id:a,subMenuIds:d.reduce(((e,n)=>{let{id:l}=n;return"text"!==t(l).type&&e.push("".concat(a,"|").concat(l)),e}),[]),subMenuChartIds:[],link:"".concat((0,ne.A)("menu_".concat(a))),forceVisibility:!0,arFlavour:"anomaly",extraKey:o},d.forEach((e=>{const n="".concat(a,"|").concat(e.id),l=t(e.id);"text"!==l.type&&(r[n]={name:l.title,level:1,id:n,menuGroupId:a,chartIds:[],link:"".concat((0,ne.A)("menu_".concat(a,"_submenu_").concat(n))),arFlavour:"anomaly",showAR:!1,extraKey:o})})),e}),{});return{menuGroupIds:a,menuGroupById:d,subMenuById:r}};var re=a(75793);const de=e=>{let{id:t}=e;const a=(0,u.fz)(t,"name");return n.createElement(re.A,{TextComponent:x.H0,text:a})};var ce=a(46741);const se=()=>{const[,e]=(0,f.A)("addChartModal"),t=(0,ce.JT)("dashboard:Update");return(0,C.TG)().length>0?n.createElement(x.Button,{neutral:!0,flavour:"hollow",label:"Add chart",icon:"chart_added",onClick:e,"data-testid":"dashboardHeaderActionBar-addChart-button","data-ga":"action-bar::click-add-chart::cust-dashboard",disabled:!t}):null},ie=e=>{let{id:t}=e;const a=(0,F.Kv)(t),l=(0,ce.JT)("dashboard:Update");return n.createElement(x.Button,{neutral:!0,flavour:"hollow",label:"Add text",icon:"text_add",onClick:a,"data-testid":"dashboardHeaderActionBar-addText-button","data-ga":"action-bar::click-add-text::cust-dashboard",disabled:!l})};var ue=a(71835),me=a(2025),he=a(64754);const be=e=>{let{id:t}=e;const[,a]=(0,ue.A)(),l=(0,me.DH)(t,{onFail:a}),o=(0,u.fz)(t,"processing"),r=(0,u.fz)(t,"isOwner"),d=(0,u.zN)(t);return n.createElement(he.A,{permission:r?"dashboard:UpdateSelf":"dashboard:Update",flavour:"hollow",label:"Save",icon:"save",onClick:()=>l(),disabled:d,isLoading:o,"data-testid":"dashboardHeaderActionBar-save-button","data-ga":"action-bar::click-save::cust-dashboard"})},ge=e=>{let{id:t}=e;return n.createElement(x.Flex,{gap:2},n.createElement(se,null),n.createElement(ie,{id:t}),n.createElement(be,{id:t}))},fe=(0,n.memo)(ge);var pe=a(58384),ve=a(87659),Ee=a(78969),ye=a(92138),Ce=a(8018),Ae=a(52919);const xe=e=>{let{close:t}=e;const[a,o]=(0,n.useState)(!0),r=(0,u.zi)(),[d,s]=(0,u.TN)({id:r,key:"name"}),i=(0,u.oj)("slug"),h=(0,ye.A)(i),[b,g]=(0,n.useState)(d),f=(0,me.DH)(r),p=(0,m.DF)(),v=(0,l.Zp)(),E=(0,c.r9)(),y=b.length>0,C=a&&y&&b!==d,A=(0,n.useCallback)((()=>{s(b),f().then((e=>{let{data:a}=e;const{slug:n}=a;if(h===n)return void t();const l="".concat(E,"/dashboards/").concat(n);p({id:r,slug:n,path:l}),v(l,{replace:!0}),t()}))}),[t,b,r,E,h]);return n.createElement(k.GO,{onClose:t,"data-testid":"renameDashboard-modal"},n.createElement(D.z,{onClose:t,title:"Rename Dashboard"},n.createElement(x.Button,{label:"Save Changes",onClick:A,disabled:!C})),n.createElement(N.U,{"data-testid":"renameDashboard-title"},"Rename dashboard ",d),n.createElement(k.Yv,null,n.createElement(Ce.A,{value:b,label:"Name",onChange:e=>{g(e.target.value)},isValid:a,setIsValid:o,isDirty:y,instantFeedback:"all",onKeyDown:e=>e.keyCode===Ee.I7&&C&&A(),"data-testid":"renameDashboard-input"}),n.createElement(x.Flex,{column:!0,justifyContent:"between",height:"100%",margin:[8,0,16],"data-testid":"renameDashboard-learnMoreSection"},n.createElement(Ae.z6,null,n.createElement(Ae.NT,{name:"help"}),n.createElement(x.Text,null,"Learn more about Dashboards")," ",n.createElement(Ae.N_,{href:"https://learn.netdata.cloud/docs/cloud/visualize/dashboards",target:"_blank",rel:"noopener noreferrer"},"In our documentation")),n.createElement(Ae.v_,{src:w.$}))))},we=()=>{const e=(0,l.Zp)(),t=(0,d.bq)(),a=(0,c.QW)(),o=(0,u.zi)(),{name:r}=(0,u.fz)(o),s=(0,c.XA)("name"),[i,,m,h]=(0,ve.A)(),[b,,g,f]=(0,ve.A)(),p=(0,n.useCallback)((()=>{e("/spaces/".concat(t,"/rooms/").concat(a,"/dashboards"))}),[t,a]),v=(0,F.A_)(o,{onSuccess:p}),E=(0,ce.JT)("dashboard:Update"),y=(0,ce.JT)("dashboard:Delete");return n.createElement(n.Fragment,null,n.createElement(pe.A,{category:"dashboard",context:"title",testId:"dashboardDropdown"},(e=>{let{close:t}=e;return n.createElement(n.Fragment,null,n.createElement(pe.t,{icon:"pencilOutline",onClick:()=>{t(),g()},"data-testid":"renameDashboard-option","data-ga":"dropdown-item::click-rename-dashboard::cust-dashboard",disabled:!E},"Rename Dashboard"),n.createElement(pe.t,{icon:"trashcan",color:"errorText",onClick:()=>{t(),m()},"data-testid":"removeDashboard-option","data-ga":"dropdown-item::click-remove-dashboard::cust-dashboard",disabled:!y},"Delete Dashboard"))})),b&&n.createElement(xe,{close:f}),i&&n.createElement(x.ConfirmationDialog,{confirmLabel:"Yes, delete","data-ga":"delete-dashboard-dialog","data-testid":"deleteDashboardDialog",handleConfirm:v,handleDecline:h,message:n.createElement(n.Fragment,null,"You are about to delete ",n.createElement("strong",null,r)," from ",n.createElement("strong",null,s),".",n.createElement("br",null),"Are you sure you want to continue?"),title:"Delete ".concat(r)}))},Ie=e=>{let{id:t}=e;return n.createElement(x.Flex,{justifyContent:"between",padding:[3],"data-testid":"dashboardHeader"},n.createElement(x.Flex,{alignItems:"center"},n.createElement(we,null),n.createElement(de,{id:t})),n.createElement(fe,{id:t}))},ke=(0,n.memo)(Ie);var De=a(86008);const Ne=function(e){let t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];!function(e){let t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];const{navigator:a}=(0,n.useContext)(l.jb),o=(0,l.zy)();(0,n.useEffect)((()=>{if(!t)return;const n=a.block((t=>{const a={...t,retry(){n(),t.retry()}};e(a)}));return n}),[a,e,t,o])}((0,n.useCallback)((t=>{"REPLACE"!==t.action?window.confirm(e)&&t.retry():t.retry()}),[e]),t)},Se=()=>((0,$.A)(),null),_e=(0,n.memo)((e=>{let{id:t}=e;const[a,,,l]=(0,f.A)("addChartModal"),{fullyLoaded:o,cardIds:r,name:d}=(0,u.fz)(t);(e=>{const t=(0,u.zN)(e);Ne("Are you sure you want to leave this dashboard?\nChanges will be lost.",!t)})(t);const[s,{width:i}]=(0,p.A)(),m=(0,c.ID)(),[h,b]=(0,ee.N9)("chartName",{key:m,extraKey:t,flavour:"val"}),g=(0,y.w7)({extraKey:"dashboard",merge:!1,scoped:!0}),A=((0,C.TG)(g),(0,v.Xc)(t)),x=(0,E.T6)(t);return n.createElement(ae.Ay,{feature:"custom-dashboard"},n.createElement(te.A,{getObject:x,ids:A,getMenu:oe,extraKey:t},n.createElement(Se,null),n.createElement(Z.A,{ref:s,overflow:"hidden"},n.createElement(ke,{id:t}),o&&r.length>0&&n.createElement(De.A,{id:t,containerWidth:i,initialChartName:h,onChartNameChange:b}),o&&0===r.length&&n.createElement(I,null),a&&n.createElement(V,{id:t,onClose:l}))))}));var Te=a(85686);const ze=e=>{let{id:t}=e;const a=(0,d.vt)(),l=(0,d.bq)(),o=(0,c.ID)(),r=(0,c.QW)(),s=(0,u.oj)("state");return(0,h.A)(t,{spaceId:a,spaceSlug:l,roomId:o,roomSlug:r})?n.createElement(g,null):n.createElement(ae.Ay,{feature:"Dashboard",dashboardId:t},"notAvailable"==s?n.createElement(Te.A,{flavour:"dashboard"}):n.createElement(_e,{id:t}))},Fe=(0,n.memo)((e=>{let{customDashboardId:t}=e;(0,s.A)(),(e=>{const{dashboardSlug:t=e}=(0,l.g)(),a=(0,o.Xv)(),r=(0,o.Tf)();(0,n.useEffect)((()=>{r!==t&&a(t)}),[r,t]),(0,n.useEffect)((()=>()=>a(null)),[])})(t),(0,r.z1)(t);const a=(0,u.zi)(t),d=(0,u.oj)("slug");(0,m.ZB)({title:d,id:a,destination:d,type:"dashboards",droppable:!0,droppableProps:{dashboardId:a,dropArea:!0}});const c=(0,i.RQ)();return n.createElement(ze,{id:c?a:null})}))}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7857.813ae058cca579e05462.chunk.js b/src/web/gui/v2/7857.813ae058cca579e05462.chunk.js deleted file mode 100644 index 8d60c3724..000000000 --- a/src/web/gui/v2/7857.813ae058cca579e05462.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="7acfc46e-2ecc-4a4b-a71f-3099ab003030",e._sentryDebugIdIdentifier="sentry-dbid-7acfc46e-2ecc-4a4b-a71f-3099ab003030")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7857],{45413:(e,t)=>{"use strict";var n;Object.defineProperty(t,"__esModule",{value:!0}),t.Doctype=t.CDATA=t.Tag=t.Style=t.Script=t.Comment=t.Directive=t.Text=t.Root=t.isTag=t.ElementType=void 0,function(e){e.Root="root",e.Text="text",e.Directive="directive",e.Comment="comment",e.Script="script",e.Style="style",e.Tag="tag",e.CDATA="cdata",e.Doctype="doctype"}(n=t.ElementType||(t.ElementType={})),t.isTag=function(e){return e.type===n.Tag||e.type===n.Script||e.type===n.Style},t.Root=n.Root,t.Text=n.Text,t.Directive=n.Directive,t.Comment=n.Comment,t.Script=n.Script,t.Style=n.Style,t.Tag=n.Tag,t.CDATA=n.CDATA,t.Doctype=n.Doctype},41141:function(e,t,n){"use strict";var r=this&&this.__createBinding||(Object.create?function(e,t,n,r){void 0===r&&(r=n);var o=Object.getOwnPropertyDescriptor(t,n);o&&!("get"in o?!t.__esModule:o.writable||o.configurable)||(o={enumerable:!0,get:function(){return t[n]}}),Object.defineProperty(e,r,o)}:function(e,t,n,r){void 0===r&&(r=n),e[r]=t[n]}),o=this&&this.__exportStar||function(e,t){for(var n in e)"default"===n||Object.prototype.hasOwnProperty.call(t,n)||r(t,e,n)};Object.defineProperty(t,"__esModule",{value:!0}),t.DomHandler=void 0;var i=n(45413),a=n(36957);o(n(36957),t);var l={withStartIndices:!1,withEndIndices:!1,xmlMode:!1},s=function(){function e(e,t,n){this.dom=[],this.root=new a.Document(this.dom),this.done=!1,this.tagStack=[this.root],this.lastNode=null,this.parser=null,"function"===typeof t&&(n=t,t=l),"object"===typeof e&&(t=e,e=void 0),this.callback=null!==e&&void 0!==e?e:null,this.options=null!==t&&void 0!==t?t:l,this.elementCB=null!==n&&void 0!==n?n:null}return e.prototype.onparserinit=function(e){this.parser=e},e.prototype.onreset=function(){this.dom=[],this.root=new a.Document(this.dom),this.done=!1,this.tagStack=[this.root],this.lastNode=null,this.parser=null},e.prototype.onend=function(){this.done||(this.done=!0,this.parser=null,this.handleCallback(null))},e.prototype.onerror=function(e){this.handleCallback(e)},e.prototype.onclosetag=function(){this.lastNode=null;var e=this.tagStack.pop();this.options.withEndIndices&&(e.endIndex=this.parser.endIndex),this.elementCB&&this.elementCB(e)},e.prototype.onopentag=function(e,t){var n=this.options.xmlMode?i.ElementType.Tag:void 0,r=new a.Element(e,t,void 0,n);this.addNode(r),this.tagStack.push(r)},e.prototype.ontext=function(e){var t=this.lastNode;if(t&&t.type===i.ElementType.Text)t.data+=e,this.options.withEndIndices&&(t.endIndex=this.parser.endIndex);else{var n=new a.Text(e);this.addNode(n),this.lastNode=n}},e.prototype.oncomment=function(e){if(this.lastNode&&this.lastNode.type===i.ElementType.Comment)this.lastNode.data+=e;else{var t=new a.Comment(e);this.addNode(t),this.lastNode=t}},e.prototype.oncommentend=function(){this.lastNode=null},e.prototype.oncdatastart=function(){var e=new a.Text(""),t=new a.CDATA([e]);this.addNode(t),e.parent=t,this.lastNode=e},e.prototype.oncdataend=function(){this.lastNode=null},e.prototype.onprocessinginstruction=function(e,t){var n=new a.ProcessingInstruction(e,t);this.addNode(n)},e.prototype.handleCallback=function(e){if("function"===typeof this.callback)this.callback(e,this.dom);else if(e)throw e},e.prototype.addNode=function(e){var t=this.tagStack[this.tagStack.length-1],n=t.children[t.children.length-1];this.options.withStartIndices&&(e.startIndex=this.parser.startIndex),this.options.withEndIndices&&(e.endIndex=this.parser.endIndex),t.children.push(e),n&&(e.prev=n,n.next=e),e.parent=t,this.lastNode=null},e}();t.DomHandler=s,t.default=s},36957:function(e,t,n){"use strict";var r=this&&this.__extends||function(){var e=function(t,n){return e=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var n in t)Object.prototype.hasOwnProperty.call(t,n)&&(e[n]=t[n])},e(t,n)};return function(t,n){if("function"!==typeof n&&null!==n)throw new TypeError("Class extends value "+String(n)+" is not a constructor or null");function r(){this.constructor=t}e(t,n),t.prototype=null===n?Object.create(n):(r.prototype=n.prototype,new r)}}(),o=this&&this.__assign||function(){return o=Object.assign||function(e){for(var t,n=1,r=arguments.length;n0?this.children[this.children.length-1]:null},enumerable:!1,configurable:!0}),Object.defineProperty(t.prototype,"childNodes",{get:function(){return this.children},set:function(e){this.children=e},enumerable:!1,configurable:!0}),t}(a);t.NodeWithChildren=p;var d=function(e){function t(){var t=null!==e&&e.apply(this,arguments)||this;return t.type=i.ElementType.CDATA,t}return r(t,e),Object.defineProperty(t.prototype,"nodeType",{get:function(){return 4},enumerable:!1,configurable:!0}),t}(p);t.CDATA=d;var f=function(e){function t(){var t=null!==e&&e.apply(this,arguments)||this;return t.type=i.ElementType.Root,t}return r(t,e),Object.defineProperty(t.prototype,"nodeType",{get:function(){return 9},enumerable:!1,configurable:!0}),t}(p);t.Document=f;var h=function(e){function t(t,n,r,o){void 0===r&&(r=[]),void 0===o&&(o="script"===t?i.ElementType.Script:"style"===t?i.ElementType.Style:i.ElementType.Tag);var a=e.call(this,r)||this;return a.name=t,a.attribs=n,a.type=o,a}return r(t,e),Object.defineProperty(t.prototype,"nodeType",{get:function(){return 1},enumerable:!1,configurable:!0}),Object.defineProperty(t.prototype,"tagName",{get:function(){return this.name},set:function(e){this.name=e},enumerable:!1,configurable:!0}),Object.defineProperty(t.prototype,"attributes",{get:function(){var e=this;return Object.keys(this.attribs).map((function(t){var n,r;return{name:t,value:e.attribs[t],namespace:null===(n=e["x-attribsNamespace"])||void 0===n?void 0:n[t],prefix:null===(r=e["x-attribsPrefix"])||void 0===r?void 0:r[t]}}))},enumerable:!1,configurable:!0}),t}(p);function m(e){return(0,i.isTag)(e)}function g(e){return e.type===i.ElementType.CDATA}function y(e){return e.type===i.ElementType.Text}function v(e){return e.type===i.ElementType.Comment}function b(e){return e.type===i.ElementType.Directive}function x(e){return e.type===i.ElementType.Root}function k(e,t){var n;if(void 0===t&&(t=!1),y(e))n=new s(e.data);else if(v(e))n=new c(e.data);else if(m(e)){var r=t?w(e.children):[],i=new h(e.name,o({},e.attribs),r);r.forEach((function(e){return e.parent=i})),null!=e.namespace&&(i.namespace=e.namespace),e["x-attribsNamespace"]&&(i["x-attribsNamespace"]=o({},e["x-attribsNamespace"])),e["x-attribsPrefix"]&&(i["x-attribsPrefix"]=o({},e["x-attribsPrefix"])),n=i}else if(g(e)){r=t?w(e.children):[];var a=new d(r);r.forEach((function(e){return e.parent=a})),n=a}else if(x(e)){r=t?w(e.children):[];var l=new f(r);r.forEach((function(e){return e.parent=l})),e["x-mode"]&&(l["x-mode"]=e["x-mode"]),n=l}else{if(!b(e))throw new Error("Not implemented yet: ".concat(e.type));var p=new u(e.name,e.data);null!=e["x-name"]&&(p["x-name"]=e["x-name"],p["x-publicId"]=e["x-publicId"],p["x-systemId"]=e["x-systemId"]),n=p}return n.startIndex=e.startIndex,n.endIndex=e.endIndex,null!=e.sourceCodeLocation&&(n.sourceCodeLocation=e.sourceCodeLocation),n}function w(e){for(var t=e.map((function(e){return k(e,!0)})),n=1;n{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.CASE_SENSITIVE_TAG_NAMES_MAP=t.CASE_SENSITIVE_TAG_NAMES=void 0,t.CASE_SENSITIVE_TAG_NAMES=["animateMotion","animateTransform","clipPath","feBlend","feColorMatrix","feComponentTransfer","feComposite","feConvolveMatrix","feDiffuseLighting","feDisplacementMap","feDropShadow","feFlood","feFuncA","feFuncB","feFuncG","feFuncR","feGaussianBlur","feImage","feMerge","feMergeNode","feMorphology","feOffset","fePointLight","feSpecularLighting","feSpotLight","feTile","feTurbulence","foreignObject","linearGradient","radialGradient","textPath"],t.CASE_SENSITIVE_TAG_NAMES_MAP=t.CASE_SENSITIVE_TAG_NAMES.reduce((function(e,t){return e[t.toLowerCase()]=t,e}),{})},65496:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n="html",r="head",o="body",i=/<([a-zA-Z]+[0-9]?)/,a=//i,l=//i,s=function(e,t){throw new Error("This browser does not support `document.implementation.createHTMLDocument`")},c=function(e,t){throw new Error("This browser does not support `DOMParser.prototype.parseFromString`")},u="object"===typeof window&&window.DOMParser;if("function"===typeof u){var p=new u;s=c=function(e,t){return t&&(e="<".concat(t,">").concat(e,"")),p.parseFromString(e,"text/html")}}if("object"===typeof document&&document.implementation){var d=document.implementation.createHTMLDocument();s=function(e,t){if(t){var n=d.documentElement.querySelector(t);return n&&(n.innerHTML=e),d}return d.documentElement.innerHTML=e,d}}var f,h="object"===typeof document&&document.createElement("template");h&&h.content&&(f=function(e){return h.innerHTML=e,h.content.childNodes}),t.default=function(e){var t,u,p=e.match(i),d=p&&p[1]?p[1].toLowerCase():"";switch(d){case n:var h=c(e);if(!a.test(e))null===(t=null===(g=h.querySelector(r))||void 0===g?void 0:g.parentNode)||void 0===t||t.removeChild(g);if(!l.test(e))null===(u=null===(g=h.querySelector(o))||void 0===g?void 0:g.parentNode)||void 0===u||u.removeChild(g);return h.querySelectorAll(n);case r:case o:var m=s(e).querySelectorAll(d);return l.test(e)&&a.test(e)?m[0].parentNode.childNodes:m;default:return f?f(e):(g=s(e,o).querySelector(o)).childNodes;var g}}},92471:function(e,t,n){"use strict";var r=this&&this.__importDefault||function(e){return e&&e.__esModule?e:{default:e}};Object.defineProperty(t,"__esModule",{value:!0});var o=r(n(65496)),i=n(67731),a=/<(![a-zA-Z\s]+)>/;t.default=function(e){if("string"!==typeof e)throw new TypeError("First argument must be a string");if(!e)return[];var t=e.match(a),n=t?t[1]:void 0;return(0,i.formatDOM)((0,o.default)(e),null,n)}},67731:(e,t,n)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.formatDOM=t.formatAttributes=void 0;var r=n(41141),o=n(15270);function i(e){for(var t={},n=0,r=e.length;n{var r=n(41141),o=n(92471).default,i=n(20840),a=n(10308);o="function"===typeof o.default?o.default:o;var l={lowerCaseAttributeNames:!1};function s(e,t){if("string"!==typeof e)throw new TypeError("First argument must be a string");return""===e?[]:a(o(e,(t=t||{}).htmlparser2||l),t)}s.domToReact=a,s.htmlToDOM=o,s.attributesToProps=i,s.Comment=r.Comment,s.Element=r.Element,s.ProcessingInstruction=r.ProcessingInstruction,s.Text=r.Text,e.exports=s,s.default=s},20840:(e,t,n)=>{var r=n(14210),o=n(74958),i=["checked","value"],a=["input","select","textarea"],l={reset:!0,submit:!0};function s(e){return r.possibleStandardNames[e]}e.exports=function(e,t){var n,c,u,p,d,f={},h=(e=e||{}).type&&l[e.type];for(n in e)if(u=e[n],r.isCustomAttribute(n))f[n]=u;else if(p=s(c=n.toLowerCase()))switch(d=r.getPropertyInfo(p),-1===i.indexOf(p)||-1===a.indexOf(t)||h||(p=s("default"+c)),f[p]=u,d&&d.type){case r.BOOLEAN:f[p]=!0;break;case r.OVERLOADED_BOOLEAN:""===u&&(f[p]=!0)}else o.PRESERVE_CUSTOM_ATTRIBUTES&&(f[n]=u);return o.setStyleProp(e.style,f),f}},10308:(e,t,n)=>{var r=n(96540),o=n(20840),i=n(74958),a=i.setStyleProp,l=i.canTextBeChildOfNode;function s(e){return i.PRESERVE_CUSTOM_ATTRIBUTES&&"tag"===e.type&&i.isCustomComponent(e.name,e.attribs)}e.exports=function e(t,n){for(var c,u,p,d,f,h=(n=n||{}).library||r,m=h.cloneElement,g=h.createElement,y=h.isValidElement,v=[],b="function"===typeof n.replace,x=n.transform||i.returnFirstArg,k=n.trim,w=0,E=t.length;w1&&(p=m(p,{key:p.key||w})),v.push(x(p,c,w));else if("text"!==c.type){switch(d=c.attribs,s(c)?a(d.style,d):d&&(d=o(d,c.name)),f=null,c.type){case"script":case"style":c.children[0]&&(d.dangerouslySetInnerHTML={__html:c.children[0].data});break;case"tag":"textarea"===c.name&&c.children[0]?d.defaultValue=c.children[0].data:c.children&&c.children.length&&(f=e(c.children,n));break;default:continue}E>1&&(d.key=w),v.push(x(g(c.name,d,f),c,w))}else{if((u=!c.data.trim().length)&&c.parent&&!l(c.parent))continue;if(k&&u)continue;v.push(x(c.data,c,w))}return 1===v.length?v[0]:v}},74958:(e,t,n)=>{var r=n(96540),o=n(35229).default,i=new Set(["annotation-xml","color-profile","font-face","font-face-src","font-face-uri","font-face-format","font-face-name","missing-glyph"]);var a={reactCompat:!0};var l=r.version.split(".")[0]>=16,s=new Set(["tr","tbody","thead","tfoot","colgroup","table","head","html","frameset"]);e.exports={PRESERVE_CUSTOM_ATTRIBUTES:l,ELEMENTS_WITH_NO_TEXT_CHILDREN:s,isCustomComponent:function(e,t){return-1===e.indexOf("-")?t&&"string"===typeof t.is:!i.has(e)},setStyleProp:function(e,t){if(null!==e&&void 0!==e)try{t.style=o(e,a)}catch(n){t.style={}}},canTextBeChildOfNode:function(e){return!s.has(e.name)},returnFirstArg:function(e){return e}}},19788:e=>{var t=/\/\*[^*]*\*+([^/*][^*]*\*+)*\//g,n=/\n/g,r=/^\s*/,o=/^(\*?[-#/*\\\w]+(\[[0-9a-z_-]+\])?)\s*/,i=/^:\s*/,a=/^((?:'(?:\\'|.)*?'|"(?:\\"|.)*?"|\([^)]*?\)|[^};])+)/,l=/^[;\s]*/,s=/^\s+|\s+$/g,c="";function u(e){return e?e.replace(s,c):c}e.exports=function(e,s){if("string"!==typeof e)throw new TypeError("First argument must be a string");if(!e)return[];s=s||{};var p=1,d=1;function f(e){var t=e.match(n);t&&(p+=t.length);var r=e.lastIndexOf("\n");d=~r?e.length-r:d+e.length}function h(){var e={line:p,column:d};return function(t){return t.position=new m(e),b(),t}}function m(e){this.start=e,this.end={line:p,column:d},this.source=s.source}m.prototype.content=e;var g=[];function y(t){var n=new Error(s.source+":"+p+":"+d+": "+t);if(n.reason=t,n.filename=s.source,n.line=p,n.column=d,n.source=e,!s.silent)throw n;g.push(n)}function v(t){var n=t.exec(e);if(n){var r=n[0];return f(r),e=e.slice(r.length),n}}function b(){v(r)}function x(e){var t;for(e=e||[];t=k();)!1!==t&&e.push(t);return e}function k(){var t=h();if("/"==e.charAt(0)&&"*"==e.charAt(1)){for(var n=2;c!=e.charAt(n)&&("*"!=e.charAt(n)||"/"!=e.charAt(n+1));)++n;if(n+=2,c===e.charAt(n-1))return y("End of comment missing");var r=e.slice(2,n-2);return d+=2,f(r),e=e.slice(n),d+=2,t({type:"comment",comment:r})}}function w(){var e=h(),n=v(o);if(n){if(k(),!v(i))return y("property missing ':'");var r=v(a),s=e({type:"declaration",property:u(n[0].replace(t,c)),value:r?u(r[0].replace(t,c)):c});return v(l),s}}return b(),function(){var e,t=[];for(x(t);e=w();)!1!==e&&(t.push(e),x(t));return t}()}},14210:(e,t,n)=>{"use strict";function r(e,t,n,r,o,i,a){this.acceptsBooleans=2===t||3===t||4===t,this.attributeName=r,this.attributeNamespace=o,this.mustUseProperty=n,this.propertyName=e,this.type=t,this.sanitizeURL=i,this.removeEmptyString=a}const o={};["children","dangerouslySetInnerHTML","defaultValue","defaultChecked","innerHTML","suppressContentEditableWarning","suppressHydrationWarning","style"].forEach((e=>{o[e]=new r(e,0,!1,e,null,!1,!1)})),[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach((([e,t])=>{o[e]=new r(e,1,!1,t,null,!1,!1)})),["contentEditable","draggable","spellCheck","value"].forEach((e=>{o[e]=new r(e,2,!1,e.toLowerCase(),null,!1,!1)})),["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach((e=>{o[e]=new r(e,2,!1,e,null,!1,!1)})),["allowFullScreen","async","autoFocus","autoPlay","controls","default","defer","disabled","disablePictureInPicture","disableRemotePlayback","formNoValidate","hidden","loop","noModule","noValidate","open","playsInline","readOnly","required","reversed","scoped","seamless","itemScope"].forEach((e=>{o[e]=new r(e,3,!1,e.toLowerCase(),null,!1,!1)})),["checked","multiple","muted","selected"].forEach((e=>{o[e]=new r(e,3,!0,e,null,!1,!1)})),["capture","download"].forEach((e=>{o[e]=new r(e,4,!1,e,null,!1,!1)})),["cols","rows","size","span"].forEach((e=>{o[e]=new r(e,6,!1,e,null,!1,!1)})),["rowSpan","start"].forEach((e=>{o[e]=new r(e,5,!1,e.toLowerCase(),null,!1,!1)}));const i=/[\-\:]([a-z])/g,a=e=>e[1].toUpperCase();["accent-height","alignment-baseline","arabic-form","baseline-shift","cap-height","clip-path","clip-rule","color-interpolation","color-interpolation-filters","color-profile","color-rendering","dominant-baseline","enable-background","fill-opacity","fill-rule","flood-color","flood-opacity","font-family","font-size","font-size-adjust","font-stretch","font-style","font-variant","font-weight","glyph-name","glyph-orientation-horizontal","glyph-orientation-vertical","horiz-adv-x","horiz-origin-x","image-rendering","letter-spacing","lighting-color","marker-end","marker-mid","marker-start","overline-position","overline-thickness","paint-order","panose-1","pointer-events","rendering-intent","shape-rendering","stop-color","stop-opacity","strikethrough-position","strikethrough-thickness","stroke-dasharray","stroke-dashoffset","stroke-linecap","stroke-linejoin","stroke-miterlimit","stroke-opacity","stroke-width","text-anchor","text-decoration","text-rendering","underline-position","underline-thickness","unicode-bidi","unicode-range","units-per-em","v-alphabetic","v-hanging","v-ideographic","v-mathematical","vector-effect","vert-adv-y","vert-origin-x","vert-origin-y","word-spacing","writing-mode","xmlns:xlink","x-height"].forEach((e=>{const t=e.replace(i,a);o[t]=new r(t,1,!1,e,null,!1,!1)})),["xlink:actuate","xlink:arcrole","xlink:role","xlink:show","xlink:title","xlink:type"].forEach((e=>{const t=e.replace(i,a);o[t]=new r(t,1,!1,e,"http://www.w3.org/1999/xlink",!1,!1)})),["xml:base","xml:lang","xml:space"].forEach((e=>{const t=e.replace(i,a);o[t]=new r(t,1,!1,e,"http://www.w3.org/XML/1998/namespace",!1,!1)})),["tabIndex","crossOrigin"].forEach((e=>{o[e]=new r(e,1,!1,e.toLowerCase(),null,!1,!1)}));o.xlinkHref=new r("xlinkHref",1,!1,"xlink:href","http://www.w3.org/1999/xlink",!0,!1),["src","href","action","formAction"].forEach((e=>{o[e]=new r(e,1,!1,e.toLowerCase(),null,!0,!0)}));const{CAMELCASE:l,SAME:s,possibleStandardNames:c}=n(96811),u=RegExp.prototype.test.bind(new RegExp("^(data|aria)-[:A-Z_a-z\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD\\-.0-9\\u00B7\\u0300-\\u036F\\u203F-\\u2040]*$")),p=Object.keys(c).reduce(((e,t)=>{const n=c[t];return n===s?e[t]=t:n===l?e[t.toLowerCase()]=t:e[t]=n,e}),{});t.BOOLEAN=3,t.BOOLEANISH_STRING=2,t.NUMERIC=5,t.OVERLOADED_BOOLEAN=4,t.POSITIVE_NUMERIC=6,t.RESERVED=0,t.STRING=1,t.getPropertyInfo=function(e){return o.hasOwnProperty(e)?o[e]:null},t.isCustomAttribute=u,t.possibleStandardNames=p},96811:(e,t)=>{t.SAME=0;t.CAMELCASE=1,t.possibleStandardNames={accept:0,acceptCharset:1,"accept-charset":"acceptCharset",accessKey:1,action:0,allowFullScreen:1,alt:0,as:0,async:0,autoCapitalize:1,autoComplete:1,autoCorrect:1,autoFocus:1,autoPlay:1,autoSave:1,capture:0,cellPadding:1,cellSpacing:1,challenge:0,charSet:1,checked:0,children:0,cite:0,class:"className",classID:1,className:1,cols:0,colSpan:1,content:0,contentEditable:1,contextMenu:1,controls:0,controlsList:1,coords:0,crossOrigin:1,dangerouslySetInnerHTML:1,data:0,dateTime:1,default:0,defaultChecked:1,defaultValue:1,defer:0,dir:0,disabled:0,disablePictureInPicture:1,disableRemotePlayback:1,download:0,draggable:0,encType:1,enterKeyHint:1,for:"htmlFor",form:0,formMethod:1,formAction:1,formEncType:1,formNoValidate:1,formTarget:1,frameBorder:1,headers:0,height:0,hidden:0,high:0,href:0,hrefLang:1,htmlFor:1,httpEquiv:1,"http-equiv":"httpEquiv",icon:0,id:0,innerHTML:1,inputMode:1,integrity:0,is:0,itemID:1,itemProp:1,itemRef:1,itemScope:1,itemType:1,keyParams:1,keyType:1,kind:0,label:0,lang:0,list:0,loop:0,low:0,manifest:0,marginWidth:1,marginHeight:1,max:0,maxLength:1,media:0,mediaGroup:1,method:0,min:0,minLength:1,multiple:0,muted:0,name:0,noModule:1,nonce:0,noValidate:1,open:0,optimum:0,pattern:0,placeholder:0,playsInline:1,poster:0,preload:0,profile:0,radioGroup:1,readOnly:1,referrerPolicy:1,rel:0,required:0,reversed:0,role:0,rows:0,rowSpan:1,sandbox:0,scope:0,scoped:0,scrolling:0,seamless:0,selected:0,shape:0,size:0,sizes:0,span:0,spellCheck:1,src:0,srcDoc:1,srcLang:1,srcSet:1,start:0,step:0,style:0,summary:0,tabIndex:1,target:0,title:0,type:0,useMap:1,value:0,width:0,wmode:0,wrap:0,about:0,accentHeight:1,"accent-height":"accentHeight",accumulate:0,additive:0,alignmentBaseline:1,"alignment-baseline":"alignmentBaseline",allowReorder:1,alphabetic:0,amplitude:0,arabicForm:1,"arabic-form":"arabicForm",ascent:0,attributeName:1,attributeType:1,autoReverse:1,azimuth:0,baseFrequency:1,baselineShift:1,"baseline-shift":"baselineShift",baseProfile:1,bbox:0,begin:0,bias:0,by:0,calcMode:1,capHeight:1,"cap-height":"capHeight",clip:0,clipPath:1,"clip-path":"clipPath",clipPathUnits:1,clipRule:1,"clip-rule":"clipRule",color:0,colorInterpolation:1,"color-interpolation":"colorInterpolation",colorInterpolationFilters:1,"color-interpolation-filters":"colorInterpolationFilters",colorProfile:1,"color-profile":"colorProfile",colorRendering:1,"color-rendering":"colorRendering",contentScriptType:1,contentStyleType:1,cursor:0,cx:0,cy:0,d:0,datatype:0,decelerate:0,descent:0,diffuseConstant:1,direction:0,display:0,divisor:0,dominantBaseline:1,"dominant-baseline":"dominantBaseline",dur:0,dx:0,dy:0,edgeMode:1,elevation:0,enableBackground:1,"enable-background":"enableBackground",end:0,exponent:0,externalResourcesRequired:1,fill:0,fillOpacity:1,"fill-opacity":"fillOpacity",fillRule:1,"fill-rule":"fillRule",filter:0,filterRes:1,filterUnits:1,floodOpacity:1,"flood-opacity":"floodOpacity",floodColor:1,"flood-color":"floodColor",focusable:0,fontFamily:1,"font-family":"fontFamily",fontSize:1,"font-size":"fontSize",fontSizeAdjust:1,"font-size-adjust":"fontSizeAdjust",fontStretch:1,"font-stretch":"fontStretch",fontStyle:1,"font-style":"fontStyle",fontVariant:1,"font-variant":"fontVariant",fontWeight:1,"font-weight":"fontWeight",format:0,from:0,fx:0,fy:0,g1:0,g2:0,glyphName:1,"glyph-name":"glyphName",glyphOrientationHorizontal:1,"glyph-orientation-horizontal":"glyphOrientationHorizontal",glyphOrientationVertical:1,"glyph-orientation-vertical":"glyphOrientationVertical",glyphRef:1,gradientTransform:1,gradientUnits:1,hanging:0,horizAdvX:1,"horiz-adv-x":"horizAdvX",horizOriginX:1,"horiz-origin-x":"horizOriginX",ideographic:0,imageRendering:1,"image-rendering":"imageRendering",in2:0,in:0,inlist:0,intercept:0,k1:0,k2:0,k3:0,k4:0,k:0,kernelMatrix:1,kernelUnitLength:1,kerning:0,keyPoints:1,keySplines:1,keyTimes:1,lengthAdjust:1,letterSpacing:1,"letter-spacing":"letterSpacing",lightingColor:1,"lighting-color":"lightingColor",limitingConeAngle:1,local:0,markerEnd:1,"marker-end":"markerEnd",markerHeight:1,markerMid:1,"marker-mid":"markerMid",markerStart:1,"marker-start":"markerStart",markerUnits:1,markerWidth:1,mask:0,maskContentUnits:1,maskUnits:1,mathematical:0,mode:0,numOctaves:1,offset:0,opacity:0,operator:0,order:0,orient:0,orientation:0,origin:0,overflow:0,overlinePosition:1,"overline-position":"overlinePosition",overlineThickness:1,"overline-thickness":"overlineThickness",paintOrder:1,"paint-order":"paintOrder",panose1:0,"panose-1":"panose1",pathLength:1,patternContentUnits:1,patternTransform:1,patternUnits:1,pointerEvents:1,"pointer-events":"pointerEvents",points:0,pointsAtX:1,pointsAtY:1,pointsAtZ:1,prefix:0,preserveAlpha:1,preserveAspectRatio:1,primitiveUnits:1,property:0,r:0,radius:0,refX:1,refY:1,renderingIntent:1,"rendering-intent":"renderingIntent",repeatCount:1,repeatDur:1,requiredExtensions:1,requiredFeatures:1,resource:0,restart:0,result:0,results:0,rotate:0,rx:0,ry:0,scale:0,security:0,seed:0,shapeRendering:1,"shape-rendering":"shapeRendering",slope:0,spacing:0,specularConstant:1,specularExponent:1,speed:0,spreadMethod:1,startOffset:1,stdDeviation:1,stemh:0,stemv:0,stitchTiles:1,stopColor:1,"stop-color":"stopColor",stopOpacity:1,"stop-opacity":"stopOpacity",strikethroughPosition:1,"strikethrough-position":"strikethroughPosition",strikethroughThickness:1,"strikethrough-thickness":"strikethroughThickness",string:0,stroke:0,strokeDasharray:1,"stroke-dasharray":"strokeDasharray",strokeDashoffset:1,"stroke-dashoffset":"strokeDashoffset",strokeLinecap:1,"stroke-linecap":"strokeLinecap",strokeLinejoin:1,"stroke-linejoin":"strokeLinejoin",strokeMiterlimit:1,"stroke-miterlimit":"strokeMiterlimit",strokeWidth:1,"stroke-width":"strokeWidth",strokeOpacity:1,"stroke-opacity":"strokeOpacity",suppressContentEditableWarning:1,suppressHydrationWarning:1,surfaceScale:1,systemLanguage:1,tableValues:1,targetX:1,targetY:1,textAnchor:1,"text-anchor":"textAnchor",textDecoration:1,"text-decoration":"textDecoration",textLength:1,textRendering:1,"text-rendering":"textRendering",to:0,transform:0,typeof:0,u1:0,u2:0,underlinePosition:1,"underline-position":"underlinePosition",underlineThickness:1,"underline-thickness":"underlineThickness",unicode:0,unicodeBidi:1,"unicode-bidi":"unicodeBidi",unicodeRange:1,"unicode-range":"unicodeRange",unitsPerEm:1,"units-per-em":"unitsPerEm",unselectable:0,vAlphabetic:1,"v-alphabetic":"vAlphabetic",values:0,vectorEffect:1,"vector-effect":"vectorEffect",version:0,vertAdvY:1,"vert-adv-y":"vertAdvY",vertOriginX:1,"vert-origin-x":"vertOriginX",vertOriginY:1,"vert-origin-y":"vertOriginY",vHanging:1,"v-hanging":"vHanging",vIdeographic:1,"v-ideographic":"vIdeographic",viewBox:1,viewTarget:1,visibility:0,vMathematical:1,"v-mathematical":"vMathematical",vocab:0,widths:0,wordSpacing:1,"word-spacing":"wordSpacing",writingMode:1,"writing-mode":"writingMode",x1:0,x2:0,x:0,xChannelSelector:1,xHeight:1,"x-height":"xHeight",xlinkActuate:1,"xlink:actuate":"xlinkActuate",xlinkArcrole:1,"xlink:arcrole":"xlinkArcrole",xlinkHref:1,"xlink:href":"xlinkHref",xlinkRole:1,"xlink:role":"xlinkRole",xlinkShow:1,"xlink:show":"xlinkShow",xlinkTitle:1,"xlink:title":"xlinkTitle",xlinkType:1,"xlink:type":"xlinkType",xmlBase:1,"xml:base":"xmlBase",xmlLang:1,"xml:lang":"xmlLang",xmlns:0,"xml:space":"xmlSpace",xmlnsXlink:1,"xmlns:xlink":"xmlnsXlink",xmlSpace:1,y1:0,y2:0,y:0,yChannelSelector:1,z:0,zoomAndPan:1}},35229:function(e,t,n){"use strict";var r=this&&this.__importDefault||function(e){return e&&e.__esModule?e:{default:e}};Object.defineProperty(t,"__esModule",{value:!0});var o=r(n(51133)),i=n(98917);t.default=function(e,t){var n={};return e&&"string"===typeof e?((0,o.default)(e,(function(e,r){e&&r&&(n[(0,i.camelCase)(e,t)]=r)})),n):n}},98917:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.camelCase=void 0;var n=/^--[a-zA-Z0-9-]+$/,r=/-([a-z])/g,o=/^[^-]+$/,i=/^-(webkit|moz|ms|o|khtml)-/,a=/^-(ms)-/,l=function(e,t){return t.toUpperCase()},s=function(e,t){return"".concat(t,"-")};t.camelCase=function(e,t){return void 0===t&&(t={}),function(e){return!e||o.test(e)||n.test(e)}(e)?e:(e=e.toLowerCase(),(e=t.reactCompat?e.replace(a,s):e.replace(i,s)).replace(r,l))}},51133:function(e,t,n){"use strict";var r=this&&this.__importDefault||function(e){return e&&e.__esModule?e:{default:e}};Object.defineProperty(t,"__esModule",{value:!0});var o=r(n(19788));t.default=function(e,t){var n=null;if(!e||"string"!==typeof e)return n;var r=(0,o.default)(e),i="function"===typeof t;return r.forEach((function(e){if("declaration"===e.type){var r=e.property,o=e.value;i?t(r,o,e):o&&((n=n||{})[r]=o)}})),n}},87836:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});var r=n(2642),o=n(35840);function i(e,t){const n=+(0,r.a)(e);return(0,o.w)(e,n+t)}},93001:(e,t,n)=>{"use strict";n.d(t,{p:()=>o});var r=n(87836);function o(e,t){return(0,r.A)(e,1e3*t)}},29708:(e,t,n)=>{"use strict";n.d(t,{W:()=>o});var r=n(93001);function o(e,t){return(0,r.p)(e,-t)}},25232:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>o});var r=n(86614);r.domToReact,r.htmlToDOM,r.attributesToProps,r.Comment,r.Element,r.ProcessingInstruction,r.Text;const o=r}}]); \ No newline at end of file diff --git a/src/web/gui/v2/7959.4f20f4b203e2bad8af39.chunk.js b/src/web/gui/v2/7959.4f20f4b203e2bad8af39.chunk.js deleted file mode 100644 index f61fba954..000000000 --- a/src/web/gui/v2/7959.4f20f4b203e2bad8af39.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="e64832dc-26ea-4e8b-9ee9-4433adcc06fb",e._sentryDebugIdIdentifier="sentry-dbid-e64832dc-26ea-4e8b-9ee9-4433adcc06fb")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[7959],{7959:(e,n,t)=>{t.r(n),t.d(n,{default:()=>g});t(62953);var a=t(96540),o=t(83199),l=t(3914),s=t(87659),r=t(46741),c=t(98496),i=t(39522),d=t(4659),m=t(84976);const u={noRoomPermission:{header:e=>"Your role doesn't give you permission to access any of the troubleshooting information on ".concat(e,".")},noSpacePermission:{header:e=>"Your role doesn't give you permission to access any information on ".concat(e,".")},noRooms:{header:e=>"You currently don't have permission to see any room on ".concat(e,".")}},f=e=>{let{onClick:n,spaceName:t}=e;return a.createElement(a.Fragment,null,a.createElement(o.Text,{margin:[2,0,0]},"You can also leave this Space if you wish. "),a.createElement(o.Button,{flavour:"borderless",icon:"switch_off",label:"Leave ".concat(t),margin:[0,0,0,7],onClick:n,strong:!0}))},g=e=>{let{reason:n}=e;const t=(0,l.ap)(),[g,,b,p]=(0,s.A)(),{header:h}=u[n],y=(0,l.bq)(),E=(0,r.JT)("billing:ReadBasic");return a.createElement(c.A,{title:h(t.name)},a.createElement(a.Fragment,null,"noSpacePermission"===n&&a.createElement(a.Fragment,null,a.createElement(o.Text,null,"Please contact the Space administrators if this is unexpected."),a.createElement(f,{onClick:b,spaceName:t.name})),E&&"noRooms"!==n&&a.createElement(o.Text,null,"To manage Plan & Billing information please"," ",a.createElement(d.A,{as:m.N_,to:"/spaces/".concat(y,"/settings/billing")},"click here.")),"noRooms"===n&&a.createElement(a.Fragment,null,a.createElement(o.Text,null,"Please contact a Space administrator or manager if this is unexpected."),a.createElement(f,{onClick:b,spaceName:t.name})),g&&a.createElement(i.A,{id:t.id,name:t.name,onClose:p})))}},39522:(e,n,t)=>{t.d(n,{A:()=>v});var a=t(58168),o=t(96540),l=t(83199),s=t(47767),r=t(22292),c=t(55463),i=(t(17333),t(98992),t(54520),t(62953),t(47444)),d=t(71835),m=t(54702),u=t(55189),f=t(3914),g=t(48849),b=t(56639),p=t(14994);var h=t(57992);const y="leave-space-dialog",E="leaveSpaceDialog",v=e=>{let{id:n,name:t,onClose:v}=e;const w=(0,s.Zp)(),C=(0,r.uW)("id"),A=(0,c.Gi)(),S=(0,c.i3)(),k=(0,c.pB)(),_=(e=>{const[,n]=(0,d.A)();return(0,i.Zs)((t=>{let{snapshot:a,set:o,reset:l}=t;return async t=>{let{currentUserId:s,onSuccess:r,onError:i}=t;const d=await a.getPromise((0,f.nC)("ids")),h=d.filter((n=>e!==n)),[y]=h;if(!y)return void n({header:"Spaces",text:u.sh.leave});const{slug:E}=y&&await a.getPromise((0,b.Ay)(y));o((0,f.nC)("ids"),h),o(g.A,(n=>n.filter((n=>n!==e))));try{await(0,m.XY)(e,[s]),r&&r(E),(0,c.Z8)(l,e),(0,p.Is)(l,e),l((0,b.Ay)(e))}catch(v){o((0,f.nC)("ids"),d),i&&i()}}}),[e])})(n),L=(0,h.A)(n),P=(0,o.useCallback)((e=>w("/spaces/".concat(e))),[]),Y=1===A.length,I=1===S.length&&k,x=Y?{confirmLabel:"Yes, leave","data-ga":"".concat(y,"-last-member"),"data-testid":"".concat(E,"LastMember"),handleConfirm:()=>L({onSuccess:P}),message:o.createElement(o.Fragment,null,"If you leave, space ",o.createElement("strong",null,t)," will be deleted immediately.",o.createElement("br",null),"Are you sure you want to continue?"),title:"Leave and delete ".concat(t," space")}:I?{confirmLabel:"Give rights","data-ga":"".concat(y,"-last-admin"),"data-testid":"".concat(E,"LastAdmin"),handleConfirm:()=>w("users"),isConfirmPositive:!0,message:o.createElement(o.Fragment,null,"You are the last admin of ",o.createElement("strong",null,t)," space. Please give admin rights to another member so you can leave this space."),title:"Leave ".concat(t," space")}:{confirmLabel:"Yes, leave","data-ga":y,"data-testid":E,handleConfirm:()=>_({currentUserId:C,onSuccess:P}),message:o.createElement(o.Fragment,null,"You are about to leave ",o.createElement("strong",null,t)," space.",o.createElement("br",null),"Are you sure you want to continue?"),title:"Leave ".concat(t," space")};return o.createElement(l.ConfirmationDialog,(0,a.A)({handleDecline:v},x))}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/8059.4fdc76bb2cac1f74b41b.chunk.js b/src/web/gui/v2/8059.4fdc76bb2cac1f74b41b.chunk.js deleted file mode 100644 index f86454512..000000000 --- a/src/web/gui/v2/8059.4fdc76bb2cac1f74b41b.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="b3079749-081a-4019-8834-624f6596c442",e._sentryDebugIdIdentifier="sentry-dbid-b3079749-081a-4019-8834-624f6596c442")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8059],{62232:(e,t,a)=>{a.d(t,{A:()=>g});var n=a(58168),l=(a(17333),a(41393),a(98992),a(54520),a(81454),a(8711)),r=a(68090),o=a.n(r),c=a(96540),i=a(83199);const s=(0,l.default)(i.Box).withConfig({displayName:"breadcrumbs__StyledItemContainer",componentId:"sc-3u39st-0"})([""]),d=(0,l.css)(["&:hover{color:",";}"],(e=>{let{withHover:t,theme:a}=e;return t&&(0,i.getColor)("success")({theme:a})})),u=(0,l.default)(i.Text).withConfig({displayName:"breadcrumbs__StyledText",componentId:"sc-3u39st-1"})(["",""],d),m=(0,l.default)(i.TextSmall).withConfig({displayName:"breadcrumbs__StyledTextSmall",componentId:"sc-3u39st-2"})(["",""],d),g=e=>{let{items:t,isBig:a,showBackButton:l=!0,testid:r="",...d}=e;const g=(0,c.useMemo)((()=>{if(null===t||void 0===t||!t.length||!l)return null;return o()(t.filter((e=>{let{onClick:t}=e;return!!t}))).onClick}),[t,l]);if(null===t||void 0===t||!t.length)return null;const h=a?u:m;return c.createElement(i.Flex,(0,n.A)({gap:4},d),l&&c.createElement(i.Button,{onClick:g,icon:"chevron_left",label:"Back",neutral:!0,flavour:"hollow",small:!0,padding:[0,2,0,1],textTransform:"uppercase","data-testid":"".concat(r,"-breadcrumbs-backButton")}),c.createElement(i.Flex,{gap:2,alignItems:"center"},t.map(((e,t)=>{let{isDisabled:a,name:l,onClick:o}=e;return c.createElement(s,(0,n.A)({key:t,alignItems:"center"},o&&{cursor:"pointer",onClick:o},{"data-testid":"".concat(r,"-breadcrumbs-level-").concat(t)}),c.createElement(h,{color:a&&"textLite","data-testid":"".concat(r,"-breadcrumbs-level-").concat(t),withHover:!!o},0!==t&&" / ",l))}))))}},88059:(e,t,a)=>{a.r(t),a.d(t,{default:()=>Ve});var n=a(96540),l=a(47444),r=a(51074),o=a(83084),c=a(96935),i=a(99292),s=a(83199),d=a(67990),u=a(99739),m=a(23452),g=a(64118),h=a(28738),f=(a(41393),a(81454),a(62953),a(63950)),b=a.n(f),C=a(58168),E=a(8711),y=a(40267),p=a(33195),v=a(47130);const w=(0,E.default)(s.Text).attrs({whiteSpace:"normal",wordBreak:"break-word"}).withConfig({displayName:"tableSchema__TextCell",componentId:"sc-u3gyvm-0"})(["",""],(e=>{let{monospace:t}=e;return t?"font-family: monospace;":""})),A=e=>t=>{let{getValue:a}=t;return n.createElement(w,e,a())},x=(0,E.default)(s.Icon).withConfig({displayName:"tableSchema__StyledIcon",componentId:"sc-u3gyvm-1"})(["vertical-align:middle;"]),I=e=>{let{flavour:t,value:a,...l}=e;return n.createElement(s.Flex,(0,C.A)({padding:[0,2]},l,{justifyContent:"center"}),0===a?n.createElement(w,{textAlign:"center"},"-"):n.createElement(y.A,{flavour:t,"data-testid":"alertView-alertPill-".concat(t)},a))},T={id:"chevron",header:"",cell:()=>n.createElement(x,{rotate:2,name:"chevron_left",color:"textDescription"})},N={displayName:!1},S=(0,v.A)(w),k=[{id:"summary",accessorKey:"summary",header:"Alert",cell:e=>{let{getValue:t,row:a}=e;const l=t(),{name:r}=(null===a||void 0===a?void 0:a.original)||{},o=l!=r?l:null;return n.createElement(S,{monospace:!0,strong:!0,"data-testid":"alertName",tooltip:o},r)},fullWidth:!0},{id:"nodesRunningAlert",accessorKey:"nodeCount",header:"Nodes Running Alert",cell:e=>{let{getValue:t}=e;return n.createElement(w,{textAlign:"center","data-testid":"nodesRunningAlert"},t())}},{id:"alertInstances",accessorKey:"instanceCount",header:"Alert Instances",cell:e=>{let{getValue:t}=e;return n.createElement(w,{textAlign:"center","data-testid":"alertInstances"},t())}},{id:"criticalNodes",accessorKey:"criticalCount",header:"Critical",cell:e=>{let{getValue:t}=e;return n.createElement(I,{flavour:"critical",value:t(),"data-testid":"criticalNodes"})}},{id:"warningNodes",accessorKey:"warningCount",header:"Warning",cell:e=>{let{getValue:t}=e;return n.createElement(I,{flavour:"warning",value:t(),"data-testid":"warningNodes"})}},{id:"clearNodes",accessorKey:"clearCount",header:"Clear",cell:e=>{let{getValue:t}=e;return n.createElement(I,{flavour:"clear",value:t(),"data-testid":"clearNodes"})}},{id:"errorNodes",accessorKey:"errorCount",header:"Error",cell:e=>{let{getValue:t}=e;return n.createElement(I,{flavour:"neutral",value:t(),"data-testid":"errorNodes"})}},{id:"uniqueConfigs",accessorKey:"configCount",header:"Configs",cell:e=>{let{getValue:t}=e;return n.createElement(w,{textAlign:"center","data-testid":"uniqueConfigs"},t())}},{id:"silencing",accessorKey:"silencing",header:"Silencing",cell:e=>{let{getValue:t}=e;return n.createElement(p.A,{silencing:t()})}},{id:"displayName",accessorKey:"displayName",header:"Name",cell:e=>{let{getValue:t}=e;return n.createElement(w,{"data-testid":"alertName"},t())}}];var V=a(45976),K=a(87815),_=a(47767),R=a(97054),D=a(47762);const L=(0,E.default)(s.TextSmall).withConfig({displayName:"styled__StyledLinkText",componentId:"sc-1y769u1-0"})(["",""],"\n cursor: pointer;\n overflow-wrap: break-word;\n max-width: 140px;\n &:hover {\n opacity: 0.7;\n }\n"),F=(0,E.default)(s.TextSmall).withConfig({displayName:"styled__StyledText",componentId:"sc-1y769u1-1"})(["&:hover{color:",";text-decoration:underline;}"],(0,s.getColor)("accent"));var B=a(75793),M=a(4659),P=a(29217),W=a(63119),z=a(69418),G=a(73865),j=a(13271),q=a(93155);const U=e=>{let{row:t,value:a,openModal:l}=e;const{id:r,info:o}=t.original,c=(0,n.useCallback)((()=>{l({alertId:r})}),[r]);return n.createElement(M.A,{Component:s.TextSmall,flavour:"tableLink",cursor:"pointer",showToolTip:!0,content:o,onClick:c,"data-testid":"alertsTableSchema-alertName-".concat(a),"data-ga":"alerts-table::click-alert::alerts-view::".concat(a)},a)},H=e=>{let{getValue:t}=e;const a=(0,_.Zp)(),l=t(),[,r]=(0,j.sK)(),o="Go to ".concat(l," context in charts overview"),[c,i]=(0,R.Ay)(),d=(0,n.useCallback)((()=>{r(l),a("/spaces/".concat(c,"/rooms/").concat(i,"/overview"))}),[l,r,a,c,i]);return n.createElement(M.A,{Component:s.TextSmall,flavour:"tableLink",cursor:"pointer",showToolTip:!0,content:o,onClick:d,"data-testid":"alertsTableSchema-alertContext-".concat(l)},l)},O=e=>{let{row:t,nodeId:a}=e;const{id:l}=t.original,r=(0,D.xY)(a,"name"),o=(0,D.d3)(a,{alertId:l}),[,,c]=(0,z.A)(),{hasLimitations:i,maxNodes:d,preferredNodes:u}=(0,G.A)(),m=q.bO&&i&&c>d&&!u.includes(a);return n.createElement(s.Flex,{width:35,"data-testid":"alertsTableSchema-nodeName-".concat(r),"data-ga":"alerts-table::click-node::alerts-view::".concat(r)},m?n.createElement(P.A,{content:n.createElement(W.u,{name:r}),align:"bottom",isBasic:!0},n.createElement(s.Flex,{gap:1},n.createElement(s.Icon,{name:"padlock",size:"small",color:"text"}),n.createElement(s.Text,null,n.createElement(B.A,{text:r,TextComponent:s.Text})))):n.createElement(L,{onClick:o},n.createElement(B.A,{text:r,TextComponent:F})))},Y=e=>{let{row:t,value:a}=e;const{id:l,nodeId:r,instanceName:o}=t.original,c=(0,D.d3)(r,{alertId:l});return n.createElement(s.Flex,{"data-testid":"alertsTableSchema-chart-id-".concat(a),"data-ga":"alerts-table::click-chartId::alerts-view::".concat(a)},n.createElement(L,{onClick:c},n.createElement(B.A,{TextComponent:F,text:o})))},J={},Q=e=>{let{openModal:t}=e;return[{id:"status",accessorKey:"status",header:"Status",cell:e=>{let{getValue:t}=e;return n.createElement(y.A,{flavour:t(),"data-testid":"alertsTable-alertPill","data-ga":"alerts-table::click-status::alerts-view::".concat(t())},t())}},{id:"summary",accessorKey:"summary",header:"Alert",cell:e=>{let{row:a,getValue:l}=e;return n.createElement(U,{row:a,value:l(),openModal:t})},fullWidth:!0},{id:"displayName",accessorKey:"displayName",header:"Name",cell:e=>{let{row:a,getValue:l}=e;return n.createElement(U,{row:a,value:l(),openModal:t})}},{id:"context",accessorKey:"context",header:"Context",cell:H},{id:"instance",header:"Instance",accessorKey:"instance",cell:e=>{let{row:t,getValue:a}=e;return n.createElement(Y,{row:t,value:a()})}},{id:"nodeName",header:"Node",accessorKey:"nodeId",cell:e=>{let{row:t,getValue:a}=e;return n.createElement(O,{row:t,nodeId:a()})}},{id:"value",header:"Latest value",accessorKey:"value",cell:e=>{let{row:t,getValue:a}=e;const{status:l,units:r}=t.original;return n.createElement(V.A,{loaded:!0,status:l,units:r,value:a(),"data-testid":"alertsTableSchema-alertValue","data-ga":"alerts-table::click-alert-value::alerts-view"})}},{id:"lastUpdated",header:"Updated at",accessorKey:"lastUpdated",cell:e=>{let{getValue:t}=e;return n.createElement(K.A,{rawTime:t(),secs:!0,"data-testid":"alertsTableSchema-latestUpdated"})}},{id:"lastStatusChangeValue",header:"Triggered value",accessorKey:"lastStatusChangeValue",cell:e=>{let{row:t,getValue:a}=e;const{status:l,units:r}=t.original;return n.createElement(V.A,{loaded:!0,status:l,units:r,value:a(),"data-testid":"alertsTableSchema-triggeredValue","data-ga":"alerts-table::click-triggered-value::alerts-view"})}},{id:"lastStatusChange",header:"Triggered at",accessorKey:"lastStatusChange",cell:e=>{let{getValue:t}=e;return n.createElement(K.A,{rawTime:t(),secs:!0,"data-testid":"alertsTableSchema-lastStatusChange"})}},{id:"silencing",accessorKey:"silencing",header:"Silencing",cell:e=>{let{getValue:t}=e;return n.createElement(p.A,{silencing:t()})}}]},Z=[{id:"summary",accessorKey:"summary",header:"Alert",cell:A({"data-testid":"alertInstanceName"}),fullWidth:!0},{id:"instance",header:"Instance",accessorKey:"instance",cell:e=>{let{row:t,getValue:a}=e;return n.createElement(Y,{row:t,value:a()})}},{id:"status",accessorKey:"status",header:"Status",cell:e=>{let{getValue:t}=e;return n.createElement(s.Flex,{padding:[0,2]},n.createElement(y.A,{flavour:t(),"data-testid":"alertView-alertPill-value",border:void 0},t()))},size:80},{id:"lastStatusChangeValue",accessorKey:"lastStatusChangeValue",header:"Triggered value",cell:e=>{let{getValue:t,row:a}=e;const{status:l,units:r}=a.original;return n.createElement(V.A,{loaded:!0,status:l,units:r,value:t(),"data-testid":"alertsTableSchema-alertValue"})}},{id:"lastStatusChange",accessorKey:"lastStatusChange",header:"Triggered at",cell:e=>{let{getValue:t}=e;return n.createElement(K.A,{rawTime:t(),secs:!0,nowrap:!0})}},{id:"displayName",accessorKey:"displayName",header:"Name",cell:A({"data-testid":"alertInstanceName"})}];var $=a(92136),X=a(3914),ee=a(69765),te=a(51913),ae=a(8239);const ne=()=>{const e=(0,_.Zp)(),t=(0,X.bq)(),a=(0,ee.ID)(),[,l]=(0,te.bg)(),r=(0,X.dg)(),{setState:o}=(0,ae.L5)();return{rowActions:(0,n.useMemo)((()=>r?{}:{alertConfiguration:{handleAction:a=>{let{nd:n,name:l}=a;o({nodeId:n,alertName:l,isAlert:!0}),e("/spaces/".concat(t,"/settings/configurations"),{replace:!0})},icon:"pencilOutline",tooltipText:"Edit this alert configuration",confirmation:!1},goto:{handleAction:a=>{const{name:n,nodeId:l,context:r,instance:o}=a,c={alertName:n,context:r,instance:o,nodeId:l};e("/spaces/".concat(t,"/settings/notifications#notificationsActiveTab=1&silencingRulePrefill=").concat(JSON.stringify(c)))},tooltipText:"Add new silencing rule",icon:"alarm_off",dataGa:"alerts-table::click-link-to-manager::active-alerts-table"},info:{handleAction:e=>l({roomId:a,alert:e}),tooltipText:"Get some help from Netdata Assistant",icon:"netdataAssistant",dataGa:"alerts::click-assistant-icon::active-alerts-table"}}),[r,l])}},le={right:["actions"]},re=()=>{const[{alertName:e,nodeId:t}]=(0,g.DV)(),a=(0,g.q1)(e,t),[l,r,,o]=(0,$.A)(),{rowActions:c}=ne(),i=(0,n.useCallback)((e=>r({alertId:e.data.id})),[e,t]);return n.createElement(n.Fragment,null,n.createElement(s.Table,{data:a,dataColumns:Z,onClickRow:i,enableSorting:!0,enableColumnVisibility:!0,enableResizing:!0,columnPinning:le,rowActions:c}),l&&o)};a(14905),a(98992),a(8872),a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215);var oe=a(54830);const ce=[{id:"nodeName",accessorKey:"nodeId",header:"Node",cell:e=>{let{getValue:t,row:a}=e;const[,,l]=(0,z.A)(),{hasLimitations:r,maxNodes:o,preferredNodes:c}=(0,G.A)(),i=r&&l>o,s=(0,D.xY)(t(),"name");return n.createElement(oe.A,{nodeId:a.original.nodeId,name:s,preferredNodes:c,showLockedNodes:i})}},{id:"nodes",accessorKey:"nodeCount",header:"Node Instances",cell:A({"data-testid":"nodes",textAlign:"center"})},{id:"instances",accessorKey:"instanceCount",header:"Alert Instances",cell:A({"data-testid":"instances",textAlign:"center"})},{id:"critical",accessorKey:"criticalCount",header:"Critical",cell:e=>{let{getValue:t}=e;return n.createElement(I,{flavour:"critical",value:t()})}},{id:"warning",accessorKey:"warningCount",header:"Warning",cell:e=>{let{getValue:t}=e;return n.createElement(I,{flavour:"warning",value:t()})}},{id:"clear",accessorKey:"clearCount",header:"Clear",cell:e=>{let{getValue:t}=e;return n.createElement(I,{flavour:"clear",value:t()})}},{id:"error",accessorKey:"errorCount",header:"Error",cell:e=>{let{getValue:t}=e;return n.createElement(I,{flavour:"neutral",value:t(),"data-testid":"errors"})}},{id:"configs",accessorKey:"configCount",header:"Configs",cell:A({"data-testid":"configs",textAlign:"center"})},T],ie={right:["actions"]},se=()=>{const[{alertName:e},t]=(0,g.DV)(),a=(0,g.q1)(e),l=(0,n.useCallback)((a=>{t({alertName:e,nodeId:a.data.nodeId})}),[e,t]),r=(0,n.useMemo)((()=>Object.values(a.reduce(((e,t)=>{switch(e[t.nodeId]||(e[t.nodeId]={nodeId:t.nodeId,nodeCount:1,instanceCount:0,criticalCount:0,warningCount:0,clearCount:0,errorCount:0,configCount:0,configs:new Set}),e[t.nodeId].instanceCount=e[t.nodeId].instanceCount+1,t.status){case"warning":e[t.nodeId].warningCount=e[t.nodeId].warningCount+1;break;case"critical":e[t.nodeId].criticalCount=e[t.nodeId].criticalCount+1;break;case"clear":e[t.nodeId].clearCount=e[t.nodeId].clearCount+1;break;default:t.value||(e[t.nodeId].errorCount=e[t.nodeId].errorCount+1)}return e[t.nodeId].configCount=e[t.nodeId].configs.add(t.aci).size,e}),{}))),[a]);return n.createElement(s.Table,{data:r,dataColumns:ce,onClickRow:l,enableSorting:!0,enableColumnVisibility:!0,enableResizing:!0,columnPinning:ie})};var de=a(62232);const ue=()=>{var e;const[{alertName:t,nodeId:a},l]=(0,g.DV)(),r=(0,D.Gt)(a?[a]:[]),o=null===r||void 0===r||null===(e=r[0])||void 0===e?void 0:e.name,c=(0,n.useMemo)((()=>o?[{name:"All Alert Configs",onClick:()=>{l({alertName:null,nodeId:null})}},{name:t,onClick:()=>{l({alertName:t,nodeId:null})}},{name:o}]:t?[{name:"All Alert Configs",onClick:()=>{l({alertName:null,nodeId:null})}},{name:t}]:null),[t,o,l]);return n.createElement(de.A,{padding:[2,0,0],items:c,testid:"alertConfigurations"})},me=()=>n.createElement(s.Text,null,"Loading..."),ge={right:["actions"]},he=(0,n.memo)((()=>{const e=(0,g.oU)(),[{alertName:t,nodeId:a},l]=(0,g.DV)(),[,r]=(0,te.Ws)(),o=(0,n.useCallback)((e=>{l({alertName:e.data.name})}),[]),{rowActions:c}=(()=>{const e=(0,ee.ID)(),[,t]=(0,te.bg)(),[,a]=(0,g.DV)(),l=(0,X.dg)();return{rowActions:(0,n.useMemo)((()=>({goto:{handleAction:e=>{let{name:t}=e;return a({alertName:t})},icon:"chevron_right",tooltipText:"Show configuration"},...!l&&{info:{handleAction:a=>t({roomId:e,alert:a}),tooltipText:"Get some help from Netdata Assistant",icon:"netdataAssistant",iconColor:"primary",neutral:!1,dataGa:"alerts::click-assistant-icon::active-alerts-table"}}})),[l,t])}})();return(0,n.useEffect)((()=>{const t=e.map((e=>{let{name:t}=e;return{name:t}}));r(t)}),[e]),n.createElement(s.Flex,{column:!0,gap:4,height:"calc(100% - 100px)",overflow:"hidden"},n.createElement(ue,null),a&&n.createElement(n.Suspense,{fallback:n.createElement(me,null)},n.createElement(re,null)),!a&&t&&n.createElement(n.Suspense,{fallback:n.createElement(me,null)},n.createElement(se,null)),!a&&!t&&n.createElement(s.Table,{data:e,dataColumns:k,enableSorting:!0,enableColumnVisibility:!0,columnPinning:ge,enableColumnPinning:!0,enableResizing:!0,rowActions:c,onClickRow:o,columnVisibility:N,onSearch:b(),meta:{searchStyles:{width:"350px"}}}))})),fe=he,be=[{id:"lastStatusChange",desc:!0}],Ce={right:["actions"]},Ee=()=>{const e=(0,m.QD)({extraKey:"alerts"}),[,t]=(0,te.Ws)(),[a,l,,r]=(0,$.A)(),{rowActions:o}=ne();return(0,n.useEffect)((()=>{t(e)}),[e]),n.createElement(n.Fragment,null,n.createElement(s.Table,{enableSorting:!0,enableColumnVisibility:!0,columnVisibility:J,data:e,dataColumns:Q({openModal:l}),rowActions:o,sortBy:be,columnPinning:Ce,onSearch:b(),enableColumnPinning:!0,enableResizing:!0,meta:{searchStyles:{width:"350px"}}}),a&&r)};var ye=a(21290);const pe=()=>{const e=(()=>{const{localeTimeString:e,localeDateString:t}=(0,ye.$j)(),a=(0,r.s)("updatedAt");return(0,n.useMemo)((()=>{if(!a)return;const n=new Date(a);return"".concat(t(n,{long:!0})," ").concat(e(n,{secs:!0}))}),[a,e,t])})();return n.createElement(s.Flex,{column:!0,justifyContent:"center",alignItems:"center",gap:4},n.createElement(s.Icon,{name:"checkmark",width:"168px",height:"168px",margin:[13,0,0]}),n.createElement(s.H3,{margin:[10,0,0]},"This room has no active alerts, you are all good!"),n.createElement(s.Text,{margin:[2,0,0,0]},"Visit later or check your notifications"),n.createElement(s.TextSmall,{color:"textLite"},"Last updated at: ",e))},ve=(0,n.memo)(pe);var we=a(68831);const Ae=()=>n.createElement(s.Flex,{column:!0,justifyContent:"center",alignItems:"center",margin:[12,0,0]},n.createElement("img",{src:"".concat(we.A.assetsBaseURL,"/img/no-filter-results.png"),alt:"No Filter Results",title:"No Filter Results"}),n.createElement(s.H3,null,"There are no results for these filtering criteria")),xe=(0,n.memo)(Ae);var Ie=a(63314);const Te=e=>{let{count:t}=e;const a=(0,r.s)("ids"),l=(0,r.s)("loaded"),o=(0,g.Gq)(a).length;return l?o?t?n.createElement(Ie.Ay,{feature:"AlertsActive",mode:"AlertsTable"},n.createElement(Ee,null)):n.createElement(Ie.Ay,{feature:"AlertsActive",mode:"NoFilterResultsIndication"},n.createElement(xe,null)):n.createElement(Ie.Ay,{feature:"AlertsActive",mode:"NoAlertsIndication"},n.createElement(ve,null)):n.createElement(h.A,{title:"Loading alerts..."})};var Ne=a(90204);const Se=()=>{const{selectedTab:e,handleTabChange:t}=(0,Ne.A)(),a=(0,m.QD)({extraKey:"alerts"}).length,l=(0,g.zu)();return(0,d.nj)()&&!l.length?n.createElement(Ie.Ay,{feature:"Alerts",mode:"NoNodesView"},n.createElement(u.A,null)):n.createElement(s.Tabs,{selected:e,onChange:t,height:"100%",padding:[4,4,0]},n.createElement(s.Tab,{label:n.createElement(s.Text,null,"Raised (",a,")"),maxWidth:"100%","data-testid":"alertTabs-activeAlerts"},n.createElement(Te,{count:a})),n.createElement(s.Tab,{label:n.createElement(s.Text,null,"Alert Configurations"),maxWidth:"100%","data-testid":"alertTabs-configurations"},n.createElement(n.Suspense,{fallback:n.createElement(h.A,{title:"Loading alert configurations..."})},n.createElement(Ie.Ay,{feature:"AlertsConfiguration"},n.createElement(fe,null)))))},ke=["chartIndexing","alerts","info","config"],Ve=()=>{const e=(0,r.s)("ids"),t=(0,l.vc)(c.J7),a=!!e.length&&1!==t;return n.createElement(o.A,{sidebar:a&&n.createElement(i.Ay,{title:"Alerts",flavour:"alerts",loaded:!0,hasSearch:!1,hiddenTabs:ke})},n.createElement(Se,null))}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/8239.c85fc9f3599f198a9efb.chunk.js b/src/web/gui/v2/8239.c85fc9f3599f198a9efb.chunk.js deleted file mode 100644 index 11ea868c2..000000000 --- a/src/web/gui/v2/8239.c85fc9f3599f198a9efb.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="4c4444fc-a2ae-4dd7-a82e-f2eeee2a30f0",e._sentryDebugIdIdentifier="sentry-dbid-4c4444fc-a2ae-4dd7-a82e-f2eeee2a30f0")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8239],{16607:(e,t,n)=>{n.d(t,{A:()=>l});var o=n(58168),r=n(96540),a=n(83199),d=n(29217);const l=e=>t=>{let{tooltip:n,onClick:l,...i}=t;const c=(0,r.useRef)(),s=(0,r.useCallback)((e=>{var t,n;null===l||void 0===l||l(e),null===(t=c.current)||void 0===t||null===(n=t.blur)||void 0===n||n.call(t)}),[l,c.current]),u=(0,r.useMemo)((()=>({...i,...l?{onClick:s}:{}})),[s,i]);return r.createElement(d.A,{align:"bottom",content:n},r.createElement(a.Box,null,r.createElement(e,(0,o.A)({ref:c},u))))}},70895:(e,t,n)=>{n.d(t,{A:()=>g,j:()=>h});var o=n(58168),r=(n(3064),n(98992),n(72577),n(62953),n(96540)),a=n(83199),d=n(3914),l=n(20378),i=n(46741),c=n(19673);const s={Business:!0,Pro:!0,Homelab:!0,Community:!1},u=()=>{const{loaded:e,value:t}=(0,c.JN)();return{loaded:e,isPaid:e&&s[null===t||void 0===t?void 0:t.class]}};const f=(0,n(16607).A)(a.Button),p=["Business","Pro","Homelab"],h=e=>{let{view:t,edit:n,ifForbidden:o="disabled",tooltip:a,disabled:c}=e;const s=(0,d.dg)(),f=(0,i.JT)("agent:ReadDynCfg"),h=(0,i.JT)("agent:EditDynCfg"),g=(0,i.JT)("billing:Manage"),b=(0,d.UV)("ids"),m=(0,d.jw)(b),[v]=(0,l.Q8)(),{userStatus:y,spaceId:k}=v||{},C=s&&"loggedIn"==y,_=!!k&&function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];if(!e)return!1;const{plan:n}=t.find((t=>{let{id:n}=t;return n==e}))||{};for(let o=0;o<=p.length;o++)if(n.startsWith(p[o]))return!0;return!1}(k,m),A=C?_:n?h:!t||f,{loaded:P,isPaid:w}=u();return{hidden:"hide"===o&&!A,isDisabled:"disabled"===o&&!A||c,tooltipText:(0,r.useMemo)((()=>A?a:P?w||s?"User does not have sufficient permissions to perform this action. Please contact your admin to grant access.":"Upgrade your plan to use this feature.":null),[P,w,A,a]),showUpgradeButton:g&&!A&&!w,hasPermission:A}},g=(0,r.forwardRef)(((e,t)=>{let{view:n,edit:a,ifForbidden:d,tooltip:l,disabled:i,...c}=e;const{hidden:s,isDisabled:u,tooltipText:p}=h({view:n,edit:a,ifForbidden:d,tooltip:l,disabled:i});return s?null:r.createElement(f,(0,o.A)({ref:t,disabled:u,tooltip:p},c))}))},94390:(e,t,n)=>{n.d(t,{$7:()=>r,D9:()=>f,DR:()=>d,FL:()=>o,Fn:()=>p,Sd:()=>a,T_:()=>h,V6:()=>s,V8:()=>g,bM:()=>c,g7:()=>i,gh:()=>u,lp:()=>b,q4:()=>l,zy:()=>m});const o=51,r="/health/alerts/prototypes",a="health:alert:prototype",d="".concat(a,":"),l="dyncfg",i={template:{color:"success"},default:{color:"text"}},c={accepted:{label:"Accepted",color:"success",icon:"checkmark_s"},disabled:{label:"Disabled",color:["neutral","grey100"],icon:"none_selected"},failed:{label:"Failed",color:["red","red100"],icon:"warning_triangle"},orphan:{label:"Orphan",color:["neutral","grey90"],icon:"checkmark_partial_s"},incomplete:{label:"Incomplete",color:["yellow","yellow100"],icon:"incindent_manager"},running:{label:"Running",color:"success",icon:"checkmark_s"}},s={internal:{icon:"internalConfig"},stock:{icon:"stockConfig"},user:{icon:"userConfig"},discovered:{icon:"discoveredConfig"},dyncfg:{icon:"dynamicConfig"}},u={loaded:!0,value:{},hasError:!1},f="__unsaved__",p="Something went wrong",h={accepted:{label:"Accepted",color:"border",searchCondition:{status:"accepted"}},disabled:{label:"Disabled",color:"border",searchCondition:{status:"disabled"}},failed:{label:"Failed",color:"border",searchCondition:{status:"failed"}},orphan:{label:"Orphan",color:"border",searchCondition:{status:"orphan"}},incomplete:{label:"Incomplete",color:"border",searchCondition:{status:"incomplete"}},running:{label:"Running",color:"border",searchCondition:{status:"running"}}},g={internal:{label:"Internal",color:"border",searchCondition:{source_type:"internal"}},stock:{label:"Stock",color:"border",searchCondition:{source_type:"stock"}},user:{label:"User",color:"border",searchCondition:{source_type:"user"}},discovered:{label:"Discovered",color:"border",searchCondition:{source_type:"discovered"}},dyncfg:{label:"Dynamic configuration",color:"border",searchCondition:{source_type:"dyncfg"}}},b={restartRequired:{label:"Restart required",color:"border",searchCondition:{restart_required:!0}},pluginRejected:{label:"Plugin rejected",color:"border",searchCondition:{plugin_rejected:!0}}},m=["restartRequired","pluginRejected","failed","incomplete"]},8239:(e,t,n)=>{n.d(t,{Nj:()=>K,hp:()=>J,SW:()=>T,Hj:()=>z,t8:()=>O,wd:()=>L,L5:()=>$,OD:()=>B,_O:()=>Y,QH:()=>N,sh:()=>F,IP:()=>G,OU:()=>H,ZN:()=>M,SD:()=>Q,EZ:()=>V,DP:()=>x,O_:()=>W,xS:()=>q,y8:()=>Z,MQ:()=>X,_F:()=>D});n(96167),n(17333),n(9920),n(41393),n(98992),n(54520),n(3949),n(81454),n(62953);var o=n(96540),r=n(47444),a=(n(48408),n(26655)),d=n(37618),l=n(49286);const i=e=>({...e,attention:(0,l.bn)(e.attention)}),c={tree:a.A.get,schema:a.A.get,get:a.A.get,enable:a.A.get,disable:a.A.get,restart:a.A.get,remove:a.A.get,add:a.A.post,update:a.A.post,test:a.A.post,userconfig:a.A.post,default:a.A.get},s=function(){let{node:e={},searchParams:t={},payload:n={},...o}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const r=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const t=Object.entries(e);if(!t.length)return"";const n=new URLSearchParams;return t.forEach((e=>{let[t,o]=e;o&&n.append(t,o)})),"?".concat(n.toString())}(t),{action:l}=t,s=c[l]||c.default,u={..."tree"==l?{transform:i,...o}:{...o},...d.Ay?{baseURL:window.envSettings.agentApiUrl,...o}:{...o}},f=s==a.A.get?[u]:[n,u];if(d.Ay){const{value:t,isParent:n}=e,o=n?"":"/host/".concat(t);return s("".concat(o,"/api/v1/config").concat(r),...f)}return s("/api/v2/nodes/".concat(null===e||void 0===e?void 0:e.value,"/config").concat(r),...f)},u=e=>{let{node:t,path:n,id:o}=e;return s({node:t,searchParams:{action:"tree",path:n,id:o},allow401:!0})},f=e=>{let{id:t,node:n}=e;return s({node:n,searchParams:{action:"enable",id:t}})},p=e=>{let{id:t,node:n}=e;return s({node:n,searchParams:{action:"disable",id:t}})},h=e=>{let{id:t,name:n,node:o,payload:r}=e;return s({node:o,searchParams:{action:"add",name:n,id:t},payload:r})},g=e=>{let{id:t,node:n,payload:o}=e;return s({node:n,searchParams:{action:"update",id:t},payload:o})},b=(0,r.eU)({key:"newConfigurationItemAtom",default:null}),m=(0,r.eU)({key:"dyncnfModalAtom",default:null}),v=(0,r.eU)({key:"dyncnfKeyAtom",default:0}),y=(0,r.Iz)({key:"dyncnfVirtualListScrollPosition",default:0}),k=(0,r.eU)({key:"dyncnfOpenItems",default:[]}),C=(0,r.Iz)({key:"dyncnfItemTree",default:null}),_=(0,r.eU)({key:"dyncfgAlertNavigationAtom",default:{}}),A={value:"",error:null},P=(0,r.eU)({key:"dyncfgUserConfig",default:A});var w=n(70895),E=n(81198),S=n(94390);const U=(0,r.K0)({key:"configurationsTree",get:e=>{let{node:t,path:n}=e;return e=>{let{get:o}=e;return o(v),u({node:t,path:n})}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),j=(0,r.K0)({key:"configurationsSchema",get:e=>{let{id:t,node:n}=e;return e=>{let{get:o}=e;return o(v),(e=>{let{id:t,node:n}=e;return s({node:n,searchParams:{action:"schema",id:t}})})({id:t,node:n})}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),I=(0,r.K0)({key:"configurationsData",get:e=>{let{id:t,node:n}=e;return e=>{let{get:o}=e;return o(v),(e=>{let{id:t,node:n}=e;return s({node:n,searchParams:{action:"get",id:t}})})({id:t,node:n})}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),R=(0,r.K0)({key:"virtualListScrollPositionSelector",get:e=>{let{node:t,path:n}=e;return e=>{let{get:o}=e;return o(y({node:t,path:n}))}},set:e=>{let{node:t,path:n}=e;return(e,o)=>{let{set:r}=e;r(y({node:t,path:n}),o)}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),D=e=>{let{node:t,path:n}=e;return(0,r.L4)(R({node:t,path:n}))},L=function(){var e,t;let{node:n,path:o}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=(0,r.xf)(U({node:n,path:o})),d="hasError"===a.state;return{loaded:"loading"!==a.state,value:null===(e=a.contents)||void 0===e?void 0:e.data,hasError:d,error:(null===(t=a.contents)||void 0===t||null===(t=t.response)||void 0===t?void 0:t.data)||(d?S.Fn:null)}},O=function(){var e,t;let{id:n,node:o}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=(0,r.xf)(j({id:n,node:o})),d=(0,r.RH)(j({id:n,node:o})),l="hasError"===a.state;return{loaded:"loading"!==a.state,value:null===(e=a.contents)||void 0===e?void 0:e.data,hasError:l,error:(null===(t=a.contents)||void 0===t||null===(t=t.response)||void 0===t?void 0:t.data)||(l?S.Fn:null),refresh:d}},T=function(){var e,t;let{id:n,node:o}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=(0,r.xf)(I({id:n,node:o})),d=(0,r.RH)(I({id:n,node:o})),l="hasError"===a.state;return{loaded:"loading"!==a.state,value:null===(e=a.contents)||void 0===e?void 0:e.data,hasError:l,error:(null===(t=a.contents)||void 0===t||null===(t=t.response)||void 0===t?void 0:t.data)||(l?S.Fn:null),refresh:d}},F=()=>(0,r.vc)(b),N=()=>(0,r.L4)(b),B=()=>(0,r.L4)(m),x=e=>{let{id:t,node:n}=e;return e=>(e?f:p)({id:t,node:n})},M=e=>{let{id:t,node:n}=e;return()=>(e=>{let{id:t,node:n}=e;return s({node:n,searchParams:{action:"restart",id:t}})})({id:t,node:n})},H=e=>{let{id:t,node:n}=e;return()=>(e=>{let{id:t,node:n}=e;return s({node:n,searchParams:{action:"remove",id:t}})})({id:t,node:n})},q=e=>{let{id:t,node:n}=e;return e=>g({id:t,node:n,payload:e})},J=e=>{let{id:t,isNewItem:n}=e;const r=(0,o.useMemo)((()=>n?h:g),[n]);return(0,o.useCallback)((function(){let{nodes:e=[],payload:o}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=n?o.name:void 0,d=e.map((e=>r({id:t,node:e,name:a,payload:o})));return Promise.allSettled(d)}),[t,r,n])},K=e=>{let{id:t,node:n}=e;return e=>{let{name:o,...r}=e;return h({id:t,name:o,node:n,payload:r})}},V=e=>{let{id:t,node:n}=e;return e=>{let{name:o,payload:r}=e;return(e=>{let{id:t,name:n,node:o,payload:r}=e;return s({node:o,searchParams:{action:"test",name:n,id:t},payload:r})})({id:t,name:o,node:n,payload:r})}},Z=e=>{let{id:t,node:n}=e;const r=(0,o.useRef)(),{hasPermission:a}=(0,w.j)({edit:!0});return function(){let{name:e="test",payload:o}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return a?(r.current&&r.current.cancel(),r.current=(e=>{let{id:t,name:n,node:o,payload:r}=e;return s({node:o,searchParams:{action:"userconfig",name:n,id:t},payload:r,noResponseParsing:!0})})({id:t,name:e,node:n,payload:o}),r.current):Promise.resolve({data:""})}},z=()=>{const e=(0,r.lZ)(v),[,t]=N();return()=>{e((e=>e+1)),t(null)}},Q=e=>{let{node:t,path:n,isNewItem:r}=e;const{loaded:a,value:d,hasError:l}=L({node:t,path:n});return(0,o.useCallback)((e=>{var t;if(!a||l)return null;const{tree:n}=d||{},o=(0,E.dG)(n);return r?o[e]:o[null===(t=o[e])||void 0===t?void 0:t.template]}),[a,l,d,r])},G=()=>{const[e,t]=(0,r.L4)(k),n=(0,o.useCallback)((e=>{t((t=>t.filter((t=>t!=e))))}),[t]);return{openItems:e,toggle:(0,o.useCallback)(((e,o)=>{o?t((t=>[...t,e])):n(e)}),[t,n]),close:n}},W=e=>{let{node:t,id:n}=e;return(0,r.Zs)((e=>{let{set:o}=e;return async()=>{u({node:t,id:n}).then((e=>{let{data:n}=e;const{tree:r}=n||{};if(r){const e=Object.values(r)[0];e&&Object.entries(e).forEach((e=>{let[n,r]=e;o(C({node:t,id:n}),(0,l.bn)(r))}))}})).catch((()=>{}))}}),[t,n])},Y=e=>{let{node:t,id:n}=e;return(0,r.vc)(C({node:t,id:n}))},$=()=>{const[e,t]=(0,r.L4)(_),n=(0,o.useCallback)((()=>t({})),[t]);return{state:e,setState:t,refresh:n}},X=()=>{const[{value:e,error:t},n]=(0,r.L4)(P),a=(0,o.useCallback)((e=>t=>{n({...A,[e]:t})}),[n]);return{value:e,error:t,setValue:a("value"),setError:a("error"),refresh:(0,o.useCallback)((()=>n(A)),[n])}}},81198:(e,t,n)=>{n.d(t,{J3:()=>o,UE:()=>r,dG:()=>a,m8:()=>d});n(25440),n(17333),n(14905),n(98992),n(54520),n(8872),n(62953);const o=e=>{const t=null===e||void 0===e?void 0:e.split(":");return t?t[t.length-1]:""},r=e=>null===e||void 0===e?void 0:e.split(":").slice(0,-1).join(":"),a=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return Object.entries(e).reduce(((e,t)=>{let[n,o]=t;return n.match(/\/[^/]+/g)?{...e,...o}:o}),{})},d=function(){const e=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return Object.entries(e).reduce(((e,t)=>{let[n,o]=t;return{...e,[n.replace(/^\/[^/]+/,"")]:o}}),{})}(arguments.length>0&&void 0!==arguments[0]?arguments[0]:{});return Object.entries(e).reduce(((e,t)=>{let[n,o]=t;return{...e,[n]:Object.entries(o).reduce(((e,t)=>{let[n,r]=t;if(["template","single"].includes(r.type)&&(e[n]={...r}),"template"==r.type){const t=Object.entries(o).filter((e=>{let[,{template:t}]=e;return t==n})).reduce(((e,t)=>{let[n,o]=t;return{...e,[n]:o}}),{});Object.keys(t).length&&(e[n].templateChildren=t)}return e}),{})}}),{})}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/8323.437406936b642e8f6cb3.chunk.js b/src/web/gui/v2/8323.437406936b642e8f6cb3.chunk.js deleted file mode 100644 index 92877d3b6..000000000 --- a/src/web/gui/v2/8323.437406936b642e8f6cb3.chunk.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! For license information please see 8323.437406936b642e8f6cb3.chunk.js.LICENSE.txt */ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="ee85b709-a5a9-4ce7-b1e4-2d0b15541cf3",e._sentryDebugIdIdentifier="sentry-dbid-ee85b709-a5a9-4ce7-b1e4-2d0b15541cf3")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8323],{20748:(e,t,r)=>{"use strict";var n,a=Object.assign||function(e){for(var t=1;t=0||Object.prototype.hasOwnProperty.call(e,n)&&(r[n]=e[n]);return r}(e,["fill","width","height","style"]);return i.default.createElement("svg",a({viewBox:"0 0 24 24",style:a({fill:r,width:o,height:l},c)},d),i.default.createElement("path",{d:"M21,7L9,19L3.5,13.5L4.91,12.09L9,16.17L19.59,5.59L21,7Z"}))}},54657:(e,t,r)=>{"use strict";var n,a=Object.assign||function(e){for(var t=1;t=0||Object.prototype.hasOwnProperty.call(e,n)&&(r[n]=e[n]);return r}(e,["fill","width","height","style"]);return i.default.createElement("svg",a({viewBox:"0 0 24 24",style:a({fill:r,width:o,height:l},c)},d),i.default.createElement("path",{d:"M12,18.17L8.83,15L7.42,16.41L12,21L16.59,16.41L15.17,15M12,5.83L15.17,9L16.58,7.59L12,3L7.41,7.59L8.83,9L12,5.83Z"}))}},80909:(e,t,r)=>{var n=r(30641),a=r(38329)(n);e.exports=a},30641:(e,t,r)=>{var n=r(86649),a=r(95950);e.exports=function(e,t){return e&&n(e,t,a)}},28077:e=>{e.exports=function(e,t){return null!=e&&t in Object(e)}},41799:(e,t,r)=>{var n=r(37217),a=r(60270);e.exports=function(e,t,r,o){var i=r.length,s=i,l=!o;if(null==e)return!s;for(e=Object(e);i--;){var u=r[i];if(l&&u[2]?u[1]!==e[u[0]]:!(u[0]in e))return!1}for(;++i{var n=r(93663),a=r(87978),o=r(83488),i=r(56449),s=r(50583);e.exports=function(e){return"function"==typeof e?e:null==e?o:"object"==typeof e?i(e)?a(e[0],e[1]):n(e):s(e)}},5128:(e,t,r)=>{var n=r(80909),a=r(64894);e.exports=function(e,t){var r=-1,o=a(e)?Array(e.length):[];return n(e,(function(e,n,a){o[++r]=t(e,n,a)})),o}},93663:(e,t,r)=>{var n=r(41799),a=r(10776),o=r(67197);e.exports=function(e){var t=a(e);return 1==t.length&&t[0][2]?o(t[0][0],t[0][1]):function(r){return r===e||n(r,e,t)}}},87978:(e,t,r)=>{var n=r(60270),a=r(58156),o=r(80631),i=r(28586),s=r(30756),l=r(67197),u=r(77797);e.exports=function(e,t){return i(e)&&s(t)?l(u(e),t):function(r){var i=a(r,e);return void 0===i&&i===t?o(r,e):n(t,i,3)}}},47237:e=>{e.exports=function(e){return function(t){return null==t?void 0:t[e]}}},17255:(e,t,r)=>{var n=r(47422);e.exports=function(e){return function(t){return n(t,e)}}},24066:(e,t,r)=>{var n=r(83488);e.exports=function(e){return"function"==typeof e?e:n}},38329:(e,t,r)=>{var n=r(64894);e.exports=function(e,t){return function(r,a){if(null==r)return r;if(!n(r))return e(r,a);for(var o=r.length,i=t?o:-1,s=Object(r);(t?i--:++i{var n=r(30756),a=r(95950);e.exports=function(e){for(var t=a(e),r=t.length;r--;){var o=t[r],i=e[o];t[r]=[o,i,n(i)]}return t}},49326:(e,t,r)=>{var n=r(31769),a=r(72428),o=r(56449),i=r(30361),s=r(30294),l=r(77797);e.exports=function(e,t,r){for(var u=-1,c=(t=n(t,e)).length,d=!1;++u{var n=r(23805);e.exports=function(e){return e===e&&!n(e)}},67197:e=>{e.exports=function(e,t){return function(r){return null!=r&&(r[e]===t&&(void 0!==t||e in Object(r)))}}},88055:(e,t,r)=>{var n=r(9999);e.exports=function(e){return n(e,5)}},33215:(e,t,r)=>{var n=r(30641),a=r(24066);e.exports=function(e,t){return e&&n(e,a(t))}},80631:(e,t,r)=>{var n=r(28077),a=r(49326);e.exports=function(e,t){return null!=e&&a(e,t,n)}},85015:(e,t,r)=>{var n=r(72552),a=r(56449),o=r(40346);e.exports=function(e){return"string"==typeof e||!a(e)&&o(e)&&"[object String]"==n(e)}},55378:(e,t,r)=>{var n=r(34932),a=r(15389),o=r(5128),i=r(56449);e.exports=function(e,t){return(i(e)?n:o)(e,a(t,3))}},50583:(e,t,r)=>{var n=r(47237),a=r(17255),o=r(28586),i=r(77797);e.exports=function(e){return o(e)?n(i(e)):a(e)}},30857:()=>{},17790:(e,t,r)=>{var n=r(19852);function a(e){this.mode=n.MODE_8BIT_BYTE,this.data=e}a.prototype={getLength:function(e){return this.data.length},write:function(e){for(var t=0;t{function t(){this.buffer=new Array,this.length=0}t.prototype={get:function(e){var t=Math.floor(e/8);return 1==(this.buffer[t]>>>7-e%8&1)},put:function(e,t){for(var r=0;r>>t-r-1&1))},getLengthInBits:function(){return this.length},putBit:function(e){var t=Math.floor(this.length/8);this.buffer.length<=t&&this.buffer.push(0),e&&(this.buffer[t]|=128>>>this.length%8),this.length++}},e.exports=t},41537:e=>{e.exports={L:1,M:0,Q:3,H:2}},30501:(e,t,r)=>{var n=r(39341);function a(e,t){if(void 0==e.length)throw new Error(e.length+"/"+t);for(var r=0;r{var n=r(17790),a=r(12835),o=r(10046),i=r(38759),s=r(30501);function l(e,t){this.typeNumber=e,this.errorCorrectLevel=t,this.modules=null,this.moduleCount=0,this.dataCache=null,this.dataList=[]}var u=l.prototype;u.addData=function(e){var t=new n(e);this.dataList.push(t),this.dataCache=null},u.isDark=function(e,t){if(e<0||this.moduleCount<=e||t<0||this.moduleCount<=t)throw new Error(e+","+t);return this.modules[e][t]},u.getModuleCount=function(){return this.moduleCount},u.make=function(){if(this.typeNumber<1){var e=1;for(e=1;e<40;e++){for(var t=a.getRSBlocks(e,this.errorCorrectLevel),r=new o,n=0,s=0;s=7&&this.setupTypeNumber(e),null==this.dataCache&&(this.dataCache=l.createData(this.typeNumber,this.errorCorrectLevel,this.dataList)),this.mapData(this.dataCache,t)},u.setupPositionProbePattern=function(e,t){for(var r=-1;r<=7;r++)if(!(e+r<=-1||this.moduleCount<=e+r))for(var n=-1;n<=7;n++)t+n<=-1||this.moduleCount<=t+n||(this.modules[e+r][t+n]=0<=r&&r<=6&&(0==n||6==n)||0<=n&&n<=6&&(0==r||6==r)||2<=r&&r<=4&&2<=n&&n<=4)},u.getBestMaskPattern=function(){for(var e=0,t=0,r=0;r<8;r++){this.makeImpl(!0,r);var n=i.getLostPoint(this);(0==r||e>n)&&(e=n,t=r)}return t},u.createMovieClip=function(e,t,r){var n=e.createEmptyMovieClip(t,r);this.make();for(var a=0;a>r&1);this.modules[Math.floor(r/3)][r%3+this.moduleCount-8-3]=n}for(r=0;r<18;r++){n=!e&&1==(t>>r&1);this.modules[r%3+this.moduleCount-8-3][Math.floor(r/3)]=n}},u.setupTypeInfo=function(e,t){for(var r=this.errorCorrectLevel<<3|t,n=i.getBCHTypeInfo(r),a=0;a<15;a++){var o=!e&&1==(n>>a&1);a<6?this.modules[a][8]=o:a<8?this.modules[a+1][8]=o:this.modules[this.moduleCount-15+a][8]=o}for(a=0;a<15;a++){o=!e&&1==(n>>a&1);a<8?this.modules[8][this.moduleCount-a-1]=o:a<9?this.modules[8][15-a-1+1]=o:this.modules[8][15-a-1]=o}this.modules[this.moduleCount-8][8]=!e},u.mapData=function(e,t){for(var r=-1,n=this.moduleCount-1,a=7,o=0,s=this.moduleCount-1;s>0;s-=2)for(6==s&&s--;;){for(var l=0;l<2;l++)if(null==this.modules[n][s-l]){var u=!1;o>>a&1)),i.getMask(t,n,s-l)&&(u=!u),this.modules[n][s-l]=u,-1==--a&&(o++,a=7)}if((n+=r)<0||this.moduleCount<=n){n-=r,r=-r;break}}},l.PAD0=236,l.PAD1=17,l.createData=function(e,t,r){for(var n=a.getRSBlocks(e,t),s=new o,u=0;u8*d)throw new Error("code length overflow. ("+s.getLengthInBits()+">"+8*d+")");for(s.getLengthInBits()+4<=8*d&&s.put(0,4);s.getLengthInBits()%8!=0;)s.putBit(!1);for(;!(s.getLengthInBits()>=8*d)&&(s.put(l.PAD0,8),!(s.getLengthInBits()>=8*d));)s.put(l.PAD1,8);return l.createBytes(s,n)},l.createBytes=function(e,t){for(var r=0,n=0,a=0,o=new Array(t.length),l=new Array(t.length),u=0;u=0?h.get(v):0}}var g=0;for(p=0;p{var n=r(41537);function a(e,t){this.totalCount=e,this.dataCount=t}a.RS_BLOCK_TABLE=[[1,26,19],[1,26,16],[1,26,13],[1,26,9],[1,44,34],[1,44,28],[1,44,22],[1,44,16],[1,70,55],[1,70,44],[2,35,17],[2,35,13],[1,100,80],[2,50,32],[2,50,24],[4,25,9],[1,134,108],[2,67,43],[2,33,15,2,34,16],[2,33,11,2,34,12],[2,86,68],[4,43,27],[4,43,19],[4,43,15],[2,98,78],[4,49,31],[2,32,14,4,33,15],[4,39,13,1,40,14],[2,121,97],[2,60,38,2,61,39],[4,40,18,2,41,19],[4,40,14,2,41,15],[2,146,116],[3,58,36,2,59,37],[4,36,16,4,37,17],[4,36,12,4,37,13],[2,86,68,2,87,69],[4,69,43,1,70,44],[6,43,19,2,44,20],[6,43,15,2,44,16],[4,101,81],[1,80,50,4,81,51],[4,50,22,4,51,23],[3,36,12,8,37,13],[2,116,92,2,117,93],[6,58,36,2,59,37],[4,46,20,6,47,21],[7,42,14,4,43,15],[4,133,107],[8,59,37,1,60,38],[8,44,20,4,45,21],[12,33,11,4,34,12],[3,145,115,1,146,116],[4,64,40,5,65,41],[11,36,16,5,37,17],[11,36,12,5,37,13],[5,109,87,1,110,88],[5,65,41,5,66,42],[5,54,24,7,55,25],[11,36,12],[5,122,98,1,123,99],[7,73,45,3,74,46],[15,43,19,2,44,20],[3,45,15,13,46,16],[1,135,107,5,136,108],[10,74,46,1,75,47],[1,50,22,15,51,23],[2,42,14,17,43,15],[5,150,120,1,151,121],[9,69,43,4,70,44],[17,50,22,1,51,23],[2,42,14,19,43,15],[3,141,113,4,142,114],[3,70,44,11,71,45],[17,47,21,4,48,22],[9,39,13,16,40,14],[3,135,107,5,136,108],[3,67,41,13,68,42],[15,54,24,5,55,25],[15,43,15,10,44,16],[4,144,116,4,145,117],[17,68,42],[17,50,22,6,51,23],[19,46,16,6,47,17],[2,139,111,7,140,112],[17,74,46],[7,54,24,16,55,25],[34,37,13],[4,151,121,5,152,122],[4,75,47,14,76,48],[11,54,24,14,55,25],[16,45,15,14,46,16],[6,147,117,4,148,118],[6,73,45,14,74,46],[11,54,24,16,55,25],[30,46,16,2,47,17],[8,132,106,4,133,107],[8,75,47,13,76,48],[7,54,24,22,55,25],[22,45,15,13,46,16],[10,142,114,2,143,115],[19,74,46,4,75,47],[28,50,22,6,51,23],[33,46,16,4,47,17],[8,152,122,4,153,123],[22,73,45,3,74,46],[8,53,23,26,54,24],[12,45,15,28,46,16],[3,147,117,10,148,118],[3,73,45,23,74,46],[4,54,24,31,55,25],[11,45,15,31,46,16],[7,146,116,7,147,117],[21,73,45,7,74,46],[1,53,23,37,54,24],[19,45,15,26,46,16],[5,145,115,10,146,116],[19,75,47,10,76,48],[15,54,24,25,55,25],[23,45,15,25,46,16],[13,145,115,3,146,116],[2,74,46,29,75,47],[42,54,24,1,55,25],[23,45,15,28,46,16],[17,145,115],[10,74,46,23,75,47],[10,54,24,35,55,25],[19,45,15,35,46,16],[17,145,115,1,146,116],[14,74,46,21,75,47],[29,54,24,19,55,25],[11,45,15,46,46,16],[13,145,115,6,146,116],[14,74,46,23,75,47],[44,54,24,7,55,25],[59,46,16,1,47,17],[12,151,121,7,152,122],[12,75,47,26,76,48],[39,54,24,14,55,25],[22,45,15,41,46,16],[6,151,121,14,152,122],[6,75,47,34,76,48],[46,54,24,10,55,25],[2,45,15,64,46,16],[17,152,122,4,153,123],[29,74,46,14,75,47],[49,54,24,10,55,25],[24,45,15,46,46,16],[4,152,122,18,153,123],[13,74,46,32,75,47],[48,54,24,14,55,25],[42,45,15,32,46,16],[20,147,117,4,148,118],[40,75,47,7,76,48],[43,54,24,22,55,25],[10,45,15,67,46,16],[19,148,118,6,149,119],[18,75,47,31,76,48],[34,54,24,34,55,25],[20,45,15,61,46,16]],a.getRSBlocks=function(e,t){var r=a.getRsBlockTable(e,t);if(void 0==r)throw new Error("bad rs block @ typeNumber:"+e+"/errorCorrectLevel:"+t);for(var n=r.length/3,o=new Array,i=0;i{for(var t={glog:function(e){if(e<1)throw new Error("glog("+e+")");return t.LOG_TABLE[e]},gexp:function(e){for(;e<0;)e+=255;for(;e>=256;)e-=255;return t.EXP_TABLE[e]},EXP_TABLE:new Array(256),LOG_TABLE:new Array(256)},r=0;r<8;r++)t.EXP_TABLE[r]=1<{e.exports={MODE_NUMBER:1,MODE_ALPHA_NUM:2,MODE_8BIT_BYTE:4,MODE_KANJI:8}},38759:(e,t,r)=>{var n=r(19852),a=r(30501),o=r(39341),i=0,s=1,l=2,u=3,c=4,d=5,p=6,f=7,h={PATTERN_POSITION_TABLE:[[],[6,18],[6,22],[6,26],[6,30],[6,34],[6,22,38],[6,24,42],[6,26,46],[6,28,50],[6,30,54],[6,32,58],[6,34,62],[6,26,46,66],[6,26,48,70],[6,26,50,74],[6,30,54,78],[6,30,56,82],[6,30,58,86],[6,34,62,90],[6,28,50,72,94],[6,26,50,74,98],[6,30,54,78,102],[6,28,54,80,106],[6,32,58,84,110],[6,30,58,86,114],[6,34,62,90,118],[6,26,50,74,98,122],[6,30,54,78,102,126],[6,26,52,78,104,130],[6,30,56,82,108,134],[6,34,60,86,112,138],[6,30,58,86,114,142],[6,34,62,90,118,146],[6,30,54,78,102,126,150],[6,24,50,76,102,128,154],[6,28,54,80,106,132,158],[6,32,58,84,110,136,162],[6,26,54,82,110,138,166],[6,30,58,86,114,142,170]],G15:1335,G18:7973,G15_MASK:21522,getBCHTypeInfo:function(e){for(var t=e<<10;h.getBCHDigit(t)-h.getBCHDigit(h.G15)>=0;)t^=h.G15<=0;)t^=h.G18<>>=1;return t},getPatternPosition:function(e){return h.PATTERN_POSITION_TABLE[e-1]},getMask:function(e,t,r){switch(e){case i:return(t+r)%2==0;case s:return t%2==0;case l:return r%3==0;case u:return(t+r)%3==0;case c:return(Math.floor(t/2)+Math.floor(r/3))%2==0;case d:return t*r%2+t*r%3==0;case p:return(t*r%2+t*r%3)%2==0;case f:return(t*r%3+(t+r)%2)%2==0;default:throw new Error("bad maskPattern:"+e)}},getErrorCorrectPolynomial:function(e){for(var t=new a([1],0),r=0;r5&&(r+=3+o-5)}for(n=0;n{"use strict";var n=r(96540),a=r(58527),o={},i=function(e,t,r,n){var a=e+"-"+t+"-"+r+(n?"-server":"");if(o[a])return o[a];var i=function(e,t,r,n){if("undefined"===typeof document&&!n)return null;var a=n?new n:document.createElement("canvas");a.width=2*r,a.height=2*r;var o=a.getContext("2d");return o?(o.fillStyle=e,o.fillRect(0,0,a.width,a.height),o.fillStyle=t,o.fillRect(0,0,r,r),o.translate(r,r),o.fillRect(0,0,r,r),a.toDataURL()):null}(e,t,r,n);return o[a]=i,i},s=Object.assign||function(e){for(var t=1;ti?1:Math.round(100*c/i)/100,t.a!==d)return{h:t.h,s:t.s,l:t.l,a:d,source:"rgb"}}else{var p=void 0;if(n!==(p=u<0?0:u>o?1:Math.round(100*u/o)/100))return{h:t.h,s:t.s,l:t.l,a:p,source:"rgb"}}return null}(e,n.props.hsl,n.props.direction,n.props.a,n.container);t&&"function"===typeof n.props.onChange&&n.props.onChange(t,e)},n.handleMouseDown=function(e){n.handleChange(e),window.addEventListener("mousemove",n.handleChange),window.addEventListener("mouseup",n.handleMouseUp)},n.handleMouseUp=function(){n.unbindEventListeners()},n.unbindEventListeners=function(){window.removeEventListener("mousemove",n.handleChange),window.removeEventListener("mouseup",n.handleMouseUp)},p(n,r)}return function(e,t){if("function"!==typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),d(t,[{key:"componentWillUnmount",value:function(){this.unbindEventListeners()}},{key:"render",value:function(){var e=this,t=this.props.rgb,r=(0,a.Ay)({default:{alpha:{absolute:"0px 0px 0px 0px",borderRadius:this.props.radius},checkboard:{absolute:"0px 0px 0px 0px",overflow:"hidden",borderRadius:this.props.radius},gradient:{absolute:"0px 0px 0px 0px",background:"linear-gradient(to right, rgba("+t.r+","+t.g+","+t.b+", 0) 0%,\n rgba("+t.r+","+t.g+","+t.b+", 1) 100%)",boxShadow:this.props.shadow,borderRadius:this.props.radius},container:{position:"relative",height:"100%",margin:"0 3px"},pointer:{position:"absolute",left:100*t.a+"%"},slider:{width:"4px",borderRadius:"1px",height:"8px",boxShadow:"0 0 2px rgba(0, 0, 0, .6)",background:"#fff",marginTop:"1px",transform:"translateX(-2px)"}},vertical:{gradient:{background:"linear-gradient(to bottom, rgba("+t.r+","+t.g+","+t.b+", 0) 0%,\n rgba("+t.r+","+t.g+","+t.b+", 1) 100%)"},pointer:{left:0,top:100*t.a+"%"}},overwrite:c({},this.props.style)},{vertical:"vertical"===this.props.direction,overwrite:!0});return n.createElement("div",{style:r.alpha},n.createElement("div",{style:r.checkboard},n.createElement(u,{renderers:this.props.renderers})),n.createElement("div",{style:r.gradient}),n.createElement("div",{style:r.container,ref:function(t){return e.container=t},onMouseDown:this.handleMouseDown,onTouchMove:this.handleChange,onTouchStart:this.handleChange},n.createElement("div",{style:r.pointer},this.props.pointer?n.createElement(this.props.pointer,this.props):n.createElement("div",{style:r.slider}))))}}]),t}(n.PureComponent||n.Component);var h=function(){function e(e,t){for(var r=0;r-1)){var a=r.getArrowOffset(),o=38===e.keyCode?n+a:n-a;r.setUpdatedValue(o,e)}},r.handleDrag=function(e){if(r.props.dragLabel){var t=Math.round(r.props.value+e.movementX);t>=0&&t<=r.props.dragMax&&r.props.onChange&&r.props.onChange(r.getValueObjectWithLabel(t),e)}},r.handleMouseDown=function(e){r.props.dragLabel&&(e.preventDefault(),r.handleDrag(e),window.addEventListener("mousemove",r.handleDrag),window.addEventListener("mouseup",r.handleMouseUp))},r.handleMouseUp=function(){r.unbindEventListeners()},r.unbindEventListeners=function(){window.removeEventListener("mousemove",r.handleDrag),window.removeEventListener("mouseup",r.handleMouseUp)},r.state={value:String(e.value).toUpperCase(),blurValue:String(e.value).toUpperCase()},r.inputId="rc-editable-input-"+g++,r}return function(e,t){if("function"!==typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),h(t,[{key:"componentDidUpdate",value:function(e,t){this.props.value===this.state.value||e.value===this.props.value&&t.value===this.state.value||(this.input===document.activeElement?this.setState({blurValue:String(this.props.value).toUpperCase()}):this.setState({value:String(this.props.value).toUpperCase(),blurValue:!this.state.blurValue&&String(this.props.value).toUpperCase()}))}},{key:"componentWillUnmount",value:function(){this.unbindEventListeners()}},{key:"getValueObjectWithLabel",value:function(e){return function(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}({},this.props.label,e)}},{key:"getArrowOffset",value:function(){return this.props.arrowOffset||1}},{key:"setUpdatedValue",value:function(e,t){var r=this.props.label?this.getValueObjectWithLabel(e):e;this.props.onChange&&this.props.onChange(r,t),this.setState({value:e})}},{key:"render",value:function(){var e=this,t=(0,a.Ay)({default:{wrap:{position:"relative"}},"user-override":{wrap:this.props.style&&this.props.style.wrap?this.props.style.wrap:{},input:this.props.style&&this.props.style.input?this.props.style.input:{},label:this.props.style&&this.props.style.label?this.props.style.label:{}},"dragLabel-true":{label:{cursor:"ew-resize"}}},{"user-override":!0},this.props);return n.createElement("div",{style:t.wrap},n.createElement("input",{id:this.inputId,style:t.input,ref:function(t){return e.input=t},value:this.state.value,onKeyDown:this.handleKeyDown,onChange:this.handleChange,onBlur:this.handleBlur,placeholder:this.props.placeholder,spellCheck:"false"}),this.props.label&&!this.props.hideLabel?n.createElement("label",{htmlFor:this.inputId,style:t.label,onMouseDown:this.handleMouseDown},this.props.label):null)}}]),t}(n.PureComponent||n.Component);var b=function(){function e(e,t){for(var r=0;ro?0:360*(-100*u/o+100)/100,r.h!==c)return{h:c,s:r.s,l:r.l,a:r.a,source:"hsl"}}else{var d=void 0;if(d=l<0?0:l>a?359:100*l/a*360/100,r.h!==d)return{h:d,s:r.s,l:r.l,a:r.a,source:"hsl"}}return null}(e,n.props.direction,n.props.hsl,n.container);t&&"function"===typeof n.props.onChange&&n.props.onChange(t,e)},n.handleMouseDown=function(e){n.handleChange(e),window.addEventListener("mousemove",n.handleChange),window.addEventListener("mouseup",n.handleMouseUp)},n.handleMouseUp=function(){n.unbindEventListeners()},y(n,r)}return function(e,t){if("function"!==typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),b(t,[{key:"componentWillUnmount",value:function(){this.unbindEventListeners()}},{key:"unbindEventListeners",value:function(){window.removeEventListener("mousemove",this.handleChange),window.removeEventListener("mouseup",this.handleMouseUp)}},{key:"render",value:function(){var e=this,t=this.props.direction,r=void 0===t?"horizontal":t,o=(0,a.Ay)({default:{hue:{absolute:"0px 0px 0px 0px",borderRadius:this.props.radius,boxShadow:this.props.shadow},container:{padding:"0 2px",position:"relative",height:"100%",borderRadius:this.props.radius},pointer:{position:"absolute",left:100*this.props.hsl.h/360+"%"},slider:{marginTop:"1px",width:"4px",borderRadius:"1px",height:"8px",boxShadow:"0 0 2px rgba(0, 0, 0, .6)",background:"#fff",transform:"translateX(-2px)"}},vertical:{pointer:{left:"0px",top:-100*this.props.hsl.h/360+100+"%"}}},{vertical:"vertical"===r});return n.createElement("div",{style:o.hue},n.createElement("div",{className:"hue-"+r,style:o.container,ref:function(t){return e.container=t},onMouseDown:this.handleMouseDown,onTouchMove:this.handleChange,onTouchStart:this.handleChange},n.createElement("style",null,"\n .hue-horizontal {\n background: linear-gradient(to right, #f00 0%, #ff0 17%, #0f0\n 33%, #0ff 50%, #00f 67%, #f0f 83%, #f00 100%);\n background: -webkit-linear-gradient(to right, #f00 0%, #ff0\n 17%, #0f0 33%, #0ff 50%, #00f 67%, #f0f 83%, #f00 100%);\n }\n\n .hue-vertical {\n background: linear-gradient(to top, #f00 0%, #ff0 17%, #0f0 33%,\n #0ff 50%, #00f 67%, #f0f 83%, #f00 100%);\n background: -webkit-linear-gradient(to top, #f00 0%, #ff0 17%,\n #0f0 33%, #0ff 50%, #00f 67%, #f0f 83%, #f00 100%);\n }\n "),n.createElement("div",{style:o.pointer},this.props.pointer?n.createElement(this.props.pointer,this.props):n.createElement("div",{style:o.slider}))))}}]),t}(n.PureComponent||n.Component);var x=r(5556),k=r.n(x);const D=function(){this.__data__=[],this.size=0};const C=function(e,t){return e===t||e!==e&&t!==t};const E=function(e,t){for(var r=e.length;r--;)if(C(e[r][0],t))return r;return-1};var A=Array.prototype.splice;const S=function(e){var t=this.__data__,r=E(t,e);return!(r<0)&&(r==t.length-1?t.pop():A.call(t,r,1),--this.size,!0)};const _=function(e){var t=this.__data__,r=E(t,e);return r<0?void 0:t[r][1]};const M=function(e){return E(this.__data__,e)>-1};const T=function(e,t){var r=this.__data__,n=E(r,e);return n<0?(++this.size,r.push([e,t])):r[n][1]=t,this};function O(e){var t=-1,r=null==e?0:e.length;for(this.clear();++t-1&&e%1==0&&e<=9007199254740991};const ut=function(e){return null!=e&<(e.length)&&!$(e)};const ct=function(e){return tt(e)&&ut(e)};const dt=function(){return!1};var pt="object"==typeof exports&&exports&&!exports.nodeType&&exports,ft=pt&&"object"==typeof module&&module&&!module.nodeType&&module,ht=ft&&ft.exports===pt?B.Buffer:void 0;const vt=(ht?ht.isBuffer:void 0)||dt;var gt=Function.prototype,mt=Object.prototype,bt=gt.toString,yt=mt.hasOwnProperty,wt=bt.call(Object);const xt=function(e){if(!tt(e)||"[object Object]"!=X(e))return!1;var t=$e(e);if(null===t)return!0;var r=yt.call(t,"constructor")&&t.constructor;return"function"==typeof r&&r instanceof r&&bt.call(r)==wt};var kt={};kt["[object Float32Array]"]=kt["[object Float64Array]"]=kt["[object Int8Array]"]=kt["[object Int16Array]"]=kt["[object Int32Array]"]=kt["[object Uint8Array]"]=kt["[object Uint8ClampedArray]"]=kt["[object Uint16Array]"]=kt["[object Uint32Array]"]=!0,kt["[object Arguments]"]=kt["[object Array]"]=kt["[object ArrayBuffer]"]=kt["[object Boolean]"]=kt["[object DataView]"]=kt["[object Date]"]=kt["[object Error]"]=kt["[object Function]"]=kt["[object Map]"]=kt["[object Number]"]=kt["[object Object]"]=kt["[object RegExp]"]=kt["[object Set]"]=kt["[object String]"]=kt["[object WeakMap]"]=!1;const Dt=function(e){return tt(e)&<(e.length)&&!!kt[X(e)]};const Ct=function(e){return function(t){return e(t)}};var Et="object"==typeof exports&&exports&&!exports.nodeType&&exports,At=Et&&"object"==typeof module&&module&&!module.nodeType&&module,St=At&&At.exports===Et&&Y.process,_t=function(){try{var e=At&&At.require&&At.require("util").types;return e||St&&St.binding&&St.binding("util")}catch(t){}}();var Mt=_t&&_t.isTypedArray;const Tt=Mt?Ct(Mt):Dt;const Ot=function(e,t){if(("constructor"!==t||"function"!==typeof e[t])&&"__proto__"!=t)return e[t]};var Pt=Object.prototype.hasOwnProperty;const Nt=function(e,t,r){var n=e[t];Pt.call(e,t)&&C(n,r)&&(void 0!==r||t in e)||Fe(e,t,r)};const Rt=function(e,t,r,n){var a=!r;r||(r={});for(var o=-1,i=t.length;++o-1&&e%1==0&&e0){if(++t>=800)return arguments[0]}else t=0;return e.apply(void 0,arguments)}}(Zt);const tr=function(e,t){return er(Vt(e,t,Kt),e+"")};const rr=function(e,t,r){if(!V(r))return!1;var n=typeof t;return!!("number"==n?ut(r)&&Yt(t,r.length):"string"==n&&t in r)&&C(r[t],e)};const nr=function(e){return tr((function(t,r){var n=-1,a=r.length,o=a>1?r[a-1]:void 0,i=a>2?r[2]:void 0;for(o=e.length>3&&"function"==typeof o?(a--,o):void 0,i&&rr(r[0],r[1],i)&&(o=a<3?void 0:o,a=1),t=Object(t);++n=t||r<0||d&&e-u>=o}function v(){var e=ir();if(h(e))return g(e);s=setTimeout(v,function(e){var r=t-(e-l);return d?br(r,o-(e-u)):r}(e))}function g(e){return s=void 0,p&&n?f(e):(n=a=void 0,i)}function m(){var e=ir(),r=h(e);if(n=arguments,a=this,l=e,r){if(void 0===s)return function(e){return u=e,s=setTimeout(v,t),c?f(e):i}(l);if(d)return clearTimeout(s),s=setTimeout(v,t),f(l)}return void 0===s&&(s=setTimeout(v,t)),i}return t=gr(t)||0,V(r)&&(c=!!r.leading,o=(d="maxWait"in r)?mr(gr(r.maxWait)||0,t):o,p="trailing"in r?!!r.trailing:p),m.cancel=function(){void 0!==s&&clearTimeout(s),u=0,n=l=a=s=void 0},m.flush=function(){return void 0===s?i:g(ir())},m};const wr=function(e,t,r){var n=!0,a=!0;if("function"!=typeof e)throw new TypeError("Expected a function");return V(r)&&(n="leading"in r?!!r.leading:n,a="trailing"in r?!!r.trailing:a),yr(e,t,{leading:n,maxWait:t,trailing:a})};var xr=function(){function e(e,t){for(var r=0;ra&&(l=a),u<0?u=0:u>o&&(u=o);var c=l/a,d=1-u/o;return{h:t.h,s:c,v:d,a:t.a,source:"hsv"}}(e,r.props.hsl,r.container),e)},r.handleMouseDown=function(e){r.handleChange(e);var t=r.getContainerRenderWindow();t.addEventListener("mousemove",r.handleChange),t.addEventListener("mouseup",r.handleMouseUp)},r.handleMouseUp=function(){r.unbindEventListeners()},r.throttle=wr((function(e,t,r){e(t,r)}),50),r}return function(e,t){if("function"!==typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),xr(t,[{key:"componentWillUnmount",value:function(){this.throttle.cancel(),this.unbindEventListeners()}},{key:"getContainerRenderWindow",value:function(){for(var e=this.container,t=window;!t.document.contains(e)&&t.parent!==t;)t=t.parent;return t}},{key:"unbindEventListeners",value:function(){var e=this.getContainerRenderWindow();e.removeEventListener("mousemove",this.handleChange),e.removeEventListener("mouseup",this.handleMouseUp)}},{key:"render",value:function(){var e=this,t=this.props.style||{},r=t.color,o=t.white,i=t.black,s=t.pointer,l=t.circle,u=(0,a.Ay)({default:{color:{absolute:"0px 0px 0px 0px",background:"hsl("+this.props.hsl.h+",100%, 50%)",borderRadius:this.props.radius},white:{absolute:"0px 0px 0px 0px",borderRadius:this.props.radius},black:{absolute:"0px 0px 0px 0px",boxShadow:this.props.shadow,borderRadius:this.props.radius},pointer:{position:"absolute",top:-100*this.props.hsv.v+100+"%",left:100*this.props.hsv.s+"%",cursor:"default"},circle:{width:"4px",height:"4px",boxShadow:"0 0 0 1.5px #fff, inset 0 0 1px 1px rgba(0,0,0,.3),\n 0 0 1px 2px rgba(0,0,0,.4)",borderRadius:"50%",cursor:"hand",transform:"translate(-2px, -2px)"}},custom:{color:r,white:o,black:i,pointer:s,circle:l}},{custom:!!this.props.style});return n.createElement("div",{style:u.color,ref:function(t){return e.container=t},onMouseDown:this.handleMouseDown,onTouchMove:this.handleChange,onTouchStart:this.handleChange},n.createElement("style",null,"\n .saturation-white {\n background: -webkit-linear-gradient(to right, #fff, rgba(255,255,255,0));\n background: linear-gradient(to right, #fff, rgba(255,255,255,0));\n }\n .saturation-black {\n background: -webkit-linear-gradient(to top, #000, rgba(0,0,0,0));\n background: linear-gradient(to top, #000, rgba(0,0,0,0));\n }\n "),n.createElement("div",{style:u.white,className:"saturation-white"},n.createElement("div",{style:u.black,className:"saturation-black"}),n.createElement("div",{style:u.pointer},this.props.pointer?n.createElement(this.props.pointer,this.props):n.createElement("div",{style:u.circle}))))}}]),t}(n.PureComponent||n.Component);const Dr=kr;const Cr=function(e,t){for(var r=-1,n=null==e?0:e.length;++r1&&(r-=1),r<1/6?e+6*(t-e)*r:r<.5?t:r<2/3?e+(t-e)*(2/3-r)*6:e}if(e=tn(e,360),t=tn(t,100),r=tn(r,100),0===t)n=a=o=r;else{var s=r<.5?r*(1+t):r+t-r*t,l=2*r-s;n=i(l,s,e+1/3),a=i(l,s,e),o=i(l,s,e-1/3)}return{r:255*n,g:255*a,b:255*o}}(e.h,n,o),i=!0,s="hsl"),e.hasOwnProperty("a")&&(r=e.a));var l,u,c;return r=en(r),{ok:i,format:e.format||s,r:Math.min(255,Math.max(t.r,0)),g:Math.min(255,Math.max(t.g,0)),b:Math.min(255,Math.max(t.b,0)),a:r}}(e);this._originalInput=e,this._r=r.r,this._g=r.g,this._b=r.b,this._a=r.a,this._roundA=Math.round(100*this._a)/100,this._format=t.format||r.format,this._gradientType=t.gradientType,this._r<1&&(this._r=Math.round(this._r)),this._g<1&&(this._g=Math.round(this._g)),this._b<1&&(this._b=Math.round(this._b)),this._ok=r.ok}function Fr(e,t,r){e=tn(e,255),t=tn(t,255),r=tn(r,255);var n,a,o=Math.max(e,t,r),i=Math.min(e,t,r),s=(o+i)/2;if(o==i)n=a=0;else{var l=o-i;switch(a=s>.5?l/(2-o-i):l/(o+i),o){case e:n=(t-r)/l+(t>1)+720)%360;--t;)n.h=(n.h+a)%360,o.push(Lr(n));return o}function $r(e,t){t=t||6;for(var r=Lr(e).toHsv(),n=r.h,a=r.s,o=r.v,i=[],s=1/t;t--;)i.push(Lr({h:n,s:a,v:o})),o=(o+s)%1;return i}Lr.prototype={isDark:function(){return this.getBrightness()<128},isLight:function(){return!this.isDark()},isValid:function(){return this._ok},getOriginalInput:function(){return this._originalInput},getFormat:function(){return this._format},getAlpha:function(){return this._a},getBrightness:function(){var e=this.toRgb();return(299*e.r+587*e.g+114*e.b)/1e3},getLuminance:function(){var e,t,r,n=this.toRgb();return e=n.r/255,t=n.g/255,r=n.b/255,.2126*(e<=.03928?e/12.92:Math.pow((e+.055)/1.055,2.4))+.7152*(t<=.03928?t/12.92:Math.pow((t+.055)/1.055,2.4))+.0722*(r<=.03928?r/12.92:Math.pow((r+.055)/1.055,2.4))},setAlpha:function(e){return this._a=en(e),this._roundA=Math.round(100*this._a)/100,this},toHsv:function(){var e=Yr(this._r,this._g,this._b);return{h:360*e.h,s:e.s,v:e.v,a:this._a}},toHsvString:function(){var e=Yr(this._r,this._g,this._b),t=Math.round(360*e.h),r=Math.round(100*e.s),n=Math.round(100*e.v);return 1==this._a?"hsv("+t+", "+r+"%, "+n+"%)":"hsva("+t+", "+r+"%, "+n+"%, "+this._roundA+")"},toHsl:function(){var e=Fr(this._r,this._g,this._b);return{h:360*e.h,s:e.s,l:e.l,a:this._a}},toHslString:function(){var e=Fr(this._r,this._g,this._b),t=Math.round(360*e.h),r=Math.round(100*e.s),n=Math.round(100*e.l);return 1==this._a?"hsl("+t+", "+r+"%, "+n+"%)":"hsla("+t+", "+r+"%, "+n+"%, "+this._roundA+")"},toHex:function(e){return jr(this._r,this._g,this._b,e)},toHexString:function(e){return"#"+this.toHex(e)},toHex8:function(e){return function(e,t,r,n,a){var o=[an(Math.round(e).toString(16)),an(Math.round(t).toString(16)),an(Math.round(r).toString(16)),an(sn(n))];if(a&&o[0].charAt(0)==o[0].charAt(1)&&o[1].charAt(0)==o[1].charAt(1)&&o[2].charAt(0)==o[2].charAt(1)&&o[3].charAt(0)==o[3].charAt(1))return o[0].charAt(0)+o[1].charAt(0)+o[2].charAt(0)+o[3].charAt(0);return o.join("")}(this._r,this._g,this._b,this._a,e)},toHex8String:function(e){return"#"+this.toHex8(e)},toRgb:function(){return{r:Math.round(this._r),g:Math.round(this._g),b:Math.round(this._b),a:this._a}},toRgbString:function(){return 1==this._a?"rgb("+Math.round(this._r)+", "+Math.round(this._g)+", "+Math.round(this._b)+")":"rgba("+Math.round(this._r)+", "+Math.round(this._g)+", "+Math.round(this._b)+", "+this._roundA+")"},toPercentageRgb:function(){return{r:Math.round(100*tn(this._r,255))+"%",g:Math.round(100*tn(this._g,255))+"%",b:Math.round(100*tn(this._b,255))+"%",a:this._a}},toPercentageRgbString:function(){return 1==this._a?"rgb("+Math.round(100*tn(this._r,255))+"%, "+Math.round(100*tn(this._g,255))+"%, "+Math.round(100*tn(this._b,255))+"%)":"rgba("+Math.round(100*tn(this._r,255))+"%, "+Math.round(100*tn(this._g,255))+"%, "+Math.round(100*tn(this._b,255))+"%, "+this._roundA+")"},toName:function(){return 0===this._a?"transparent":!(this._a<1)&&(Jr[jr(this._r,this._g,this._b,!0)]||!1)},toFilter:function(e){var t="#"+Br(this._r,this._g,this._b,this._a),r=t,n=this._gradientType?"GradientType = 1, ":"";if(e){var a=Lr(e);r="#"+Br(a._r,a._g,a._b,a._a)}return"progid:DXImageTransform.Microsoft.gradient("+n+"startColorstr="+t+",endColorstr="+r+")"},toString:function(e){var t=!!e;e=e||this._format;var r=!1,n=this._a<1&&this._a>=0;return t||!n||"hex"!==e&&"hex6"!==e&&"hex3"!==e&&"hex4"!==e&&"hex8"!==e&&"name"!==e?("rgb"===e&&(r=this.toRgbString()),"prgb"===e&&(r=this.toPercentageRgbString()),"hex"!==e&&"hex6"!==e||(r=this.toHexString()),"hex3"===e&&(r=this.toHexString(!0)),"hex4"===e&&(r=this.toHex8String(!0)),"hex8"===e&&(r=this.toHex8String()),"name"===e&&(r=this.toName()),"hsl"===e&&(r=this.toHslString()),"hsv"===e&&(r=this.toHsvString()),r||this.toHexString()):"name"===e&&0===this._a?this.toName():this.toRgbString()},clone:function(){return Lr(this.toString())},_applyModification:function(e,t){var r=e.apply(null,[this].concat([].slice.call(t)));return this._r=r._r,this._g=r._g,this._b=r._b,this.setAlpha(r._a),this},lighten:function(){return this._applyModification(Ur,arguments)},brighten:function(){return this._applyModification(qr,arguments)},darken:function(){return this._applyModification(zr,arguments)},desaturate:function(){return this._applyModification(Ir,arguments)},saturate:function(){return this._applyModification(Hr,arguments)},greyscale:function(){return this._applyModification(Wr,arguments)},spin:function(){return this._applyModification(Qr,arguments)},_applyCombination:function(e,t){return e.apply(null,[this].concat([].slice.call(t)))},analogous:function(){return this._applyCombination(Vr,arguments)},complement:function(){return this._applyCombination(Kr,arguments)},monochromatic:function(){return this._applyCombination($r,arguments)},splitcomplement:function(){return this._applyCombination(Xr,arguments)},triad:function(){return this._applyCombination(Gr,[3])},tetrad:function(){return this._applyCombination(Gr,[4])}},Lr.fromRatio=function(e,t){if("object"==Pr(e)){var r={};for(var n in e)e.hasOwnProperty(n)&&(r[n]="a"===n?e[n]:on(e[n]));e=r}return Lr(e,t)},Lr.equals=function(e,t){return!(!e||!t)&&Lr(e).toRgbString()==Lr(t).toRgbString()},Lr.random=function(){return Lr.fromRatio({r:Math.random(),g:Math.random(),b:Math.random()})},Lr.mix=function(e,t,r){r=0===r?0:r||50;var n=Lr(e).toRgb(),a=Lr(t).toRgb(),o=r/100;return Lr({r:(a.r-n.r)*o+n.r,g:(a.g-n.g)*o+n.g,b:(a.b-n.b)*o+n.b,a:(a.a-n.a)*o+n.a})},Lr.readability=function(e,t){var r=Lr(e),n=Lr(t);return(Math.max(r.getLuminance(),n.getLuminance())+.05)/(Math.min(r.getLuminance(),n.getLuminance())+.05)},Lr.isReadable=function(e,t,r){var n,a,o=Lr.readability(e,t);switch(a=!1,(n=function(e){var t,r;t=((e=e||{level:"AA",size:"small"}).level||"AA").toUpperCase(),r=(e.size||"small").toLowerCase(),"AA"!==t&&"AAA"!==t&&(t="AA");"small"!==r&&"large"!==r&&(r="small");return{level:t,size:r}}(r)).level+n.size){case"AAsmall":case"AAAlarge":a=o>=4.5;break;case"AAlarge":a=o>=3;break;case"AAAsmall":a=o>=7}return a},Lr.mostReadable=function(e,t,r){var n,a,o,i,s=null,l=0;a=(r=r||{}).includeFallbackColors,o=r.level,i=r.size;for(var u=0;ul&&(l=n,s=Lr(t[u]));return Lr.isReadable(e,s,{level:o,size:i})||!a?s:(r.includeFallbackColors=!1,Lr.mostReadable(e,["#fff","#000"],r))};var Zr=Lr.names={aliceblue:"f0f8ff",antiquewhite:"faebd7",aqua:"0ff",aquamarine:"7fffd4",azure:"f0ffff",beige:"f5f5dc",bisque:"ffe4c4",black:"000",blanchedalmond:"ffebcd",blue:"00f",blueviolet:"8a2be2",brown:"a52a2a",burlywood:"deb887",burntsienna:"ea7e5d",cadetblue:"5f9ea0",chartreuse:"7fff00",chocolate:"d2691e",coral:"ff7f50",cornflowerblue:"6495ed",cornsilk:"fff8dc",crimson:"dc143c",cyan:"0ff",darkblue:"00008b",darkcyan:"008b8b",darkgoldenrod:"b8860b",darkgray:"a9a9a9",darkgreen:"006400",darkgrey:"a9a9a9",darkkhaki:"bdb76b",darkmagenta:"8b008b",darkolivegreen:"556b2f",darkorange:"ff8c00",darkorchid:"9932cc",darkred:"8b0000",darksalmon:"e9967a",darkseagreen:"8fbc8f",darkslateblue:"483d8b",darkslategray:"2f4f4f",darkslategrey:"2f4f4f",darkturquoise:"00ced1",darkviolet:"9400d3",deeppink:"ff1493",deepskyblue:"00bfff",dimgray:"696969",dimgrey:"696969",dodgerblue:"1e90ff",firebrick:"b22222",floralwhite:"fffaf0",forestgreen:"228b22",fuchsia:"f0f",gainsboro:"dcdcdc",ghostwhite:"f8f8ff",gold:"ffd700",goldenrod:"daa520",gray:"808080",green:"008000",greenyellow:"adff2f",grey:"808080",honeydew:"f0fff0",hotpink:"ff69b4",indianred:"cd5c5c",indigo:"4b0082",ivory:"fffff0",khaki:"f0e68c",lavender:"e6e6fa",lavenderblush:"fff0f5",lawngreen:"7cfc00",lemonchiffon:"fffacd",lightblue:"add8e6",lightcoral:"f08080",lightcyan:"e0ffff",lightgoldenrodyellow:"fafad2",lightgray:"d3d3d3",lightgreen:"90ee90",lightgrey:"d3d3d3",lightpink:"ffb6c1",lightsalmon:"ffa07a",lightseagreen:"20b2aa",lightskyblue:"87cefa",lightslategray:"789",lightslategrey:"789",lightsteelblue:"b0c4de",lightyellow:"ffffe0",lime:"0f0",limegreen:"32cd32",linen:"faf0e6",magenta:"f0f",maroon:"800000",mediumaquamarine:"66cdaa",mediumblue:"0000cd",mediumorchid:"ba55d3",mediumpurple:"9370db",mediumseagreen:"3cb371",mediumslateblue:"7b68ee",mediumspringgreen:"00fa9a",mediumturquoise:"48d1cc",mediumvioletred:"c71585",midnightblue:"191970",mintcream:"f5fffa",mistyrose:"ffe4e1",moccasin:"ffe4b5",navajowhite:"ffdead",navy:"000080",oldlace:"fdf5e6",olive:"808000",olivedrab:"6b8e23",orange:"ffa500",orangered:"ff4500",orchid:"da70d6",palegoldenrod:"eee8aa",palegreen:"98fb98",paleturquoise:"afeeee",palevioletred:"db7093",papayawhip:"ffefd5",peachpuff:"ffdab9",peru:"cd853f",pink:"ffc0cb",plum:"dda0dd",powderblue:"b0e0e6",purple:"800080",rebeccapurple:"663399",red:"f00",rosybrown:"bc8f8f",royalblue:"4169e1",saddlebrown:"8b4513",salmon:"fa8072",sandybrown:"f4a460",seagreen:"2e8b57",seashell:"fff5ee",sienna:"a0522d",silver:"c0c0c0",skyblue:"87ceeb",slateblue:"6a5acd",slategray:"708090",slategrey:"708090",snow:"fffafa",springgreen:"00ff7f",steelblue:"4682b4",tan:"d2b48c",teal:"008080",thistle:"d8bfd8",tomato:"ff6347",turquoise:"40e0d0",violet:"ee82ee",wheat:"f5deb3",white:"fff",whitesmoke:"f5f5f5",yellow:"ff0",yellowgreen:"9acd32"},Jr=Lr.hexNames=function(e){var t={};for(var r in e)e.hasOwnProperty(r)&&(t[e[r]]=r);return t}(Zr);function en(e){return e=parseFloat(e),(isNaN(e)||e<0||e>1)&&(e=1),e}function tn(e,t){(function(e){return"string"==typeof e&&-1!=e.indexOf(".")&&1===parseFloat(e)})(e)&&(e="100%");var r=function(e){return"string"===typeof e&&-1!=e.indexOf("%")}(e);return e=Math.min(t,Math.max(0,parseFloat(e))),r&&(e=parseInt(e*t,10)/100),Math.abs(e-t)<1e-6?1:e%t/parseFloat(t)}function rn(e){return Math.min(1,Math.max(0,e))}function nn(e){return parseInt(e,16)}function an(e){return 1==e.length?"0"+e:""+e}function on(e){return e<=1&&(e=100*e+"%"),e}function sn(e){return Math.round(255*parseFloat(e)).toString(16)}function ln(e){return nn(e)/255}var un=function(){var e="(?:[-\\+]?\\d*\\.\\d+%?)|(?:[-\\+]?\\d+%?)",t="[\\s|\\(]+("+e+")[,|\\s]+("+e+")[,|\\s]+("+e+")\\s*\\)?",r="[\\s|\\(]+("+e+")[,|\\s]+("+e+")[,|\\s]+("+e+")[,|\\s]+("+e+")\\s*\\)?";return{CSS_UNIT:new RegExp(e),rgb:new RegExp("rgb"+t),rgba:new RegExp("rgba"+r),hsl:new RegExp("hsl"+t),hsla:new RegExp("hsla"+r),hsv:new RegExp("hsv"+t),hsva:new RegExp("hsva"+r),hex3:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex6:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/,hex4:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex8:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/}}();function cn(e){return!!un.CSS_UNIT.exec(e)}var dn=function(e){var t=0,r=0;return Or(["r","g","b","a","h","s","l","v"],(function(n){if(e[n]&&(t+=1,isNaN(e[n])||(r+=1),"s"===n||"l"===n)){/^\d+%$/.test(e[n])&&(r+=1)}})),t===r&&e},pn=function(e,t){var r=e.hex?Lr(e.hex):Lr(e),n=r.toHsl(),a=r.toHsv(),o=r.toRgb(),i=r.toHex();return 0===n.s&&(n.h=t||0,a.h=t||0),{hsl:n,hex:"000000"===i&&0===o.a?"transparent":"#"+i,rgb:o,hsv:a,oldHue:e.h||t||n.h,source:e.source}},fn=function(e){if("transparent"===e)return!0;var t="#"===String(e).charAt(0)?1:0;return e.length!==4+t&&e.length<7+t&&Lr(e).isValid()},hn=function(e){if(!e)return"#fff";var t=pn(e);return"transparent"===t.hex?"rgba(0,0,0,0.4)":(299*t.rgb.r+587*t.rgb.g+114*t.rgb.b)/1e3>=128?"#000":"#fff"},vn=function(e,t){return Lr(t+" ("+e.replace("\xb0","")+")")._ok},gn=Object.assign||function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:"span";return function(r){function a(){var e,t,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,a);for(var n=arguments.length,o=Array(n),i=0;is))return!1;var u=o.get(e),c=o.get(t);if(u&&c)return u==t&&c==e;var d=-1,p=!0,f=2&r?new On:void 0;for(o.set(e,t),o.set(t,e);++d1&&(e.a=1),r.props.onChange({h:r.props.hsl.h,s:r.props.hsl.s,l:r.props.hsl.l,a:Math.round(100*e.a)/100,source:"rgb"},t)):(e.h||e.s||e.l)&&("string"===typeof e.s&&e.s.includes("%")&&(e.s=e.s.replace("%","")),"string"===typeof e.l&&e.l.includes("%")&&(e.l=e.l.replace("%","")),1==e.s?e.s=.01:1==e.l&&(e.l=.01),r.props.onChange({h:e.h||r.props.hsl.h,s:Number(yo(e.s)?r.props.hsl.s:e.s),l:Number(yo(e.l)?r.props.hsl.l:e.l),source:"hsl"},t))},r.showHighlight=function(e){e.currentTarget.style.background="#eee"},r.hideHighlight=function(e){e.currentTarget.style.background="transparent"},1!==e.hsl.a&&"hex"===e.view?r.state={view:"rgb"}:r.state={view:e.view},r}return function(e,t){if("function"!==typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),xo(t,[{key:"render",value:function(){var e=this,t=(0,a.Ay)({default:{wrap:{paddingTop:"16px",display:"flex"},fields:{flex:"1",display:"flex",marginLeft:"-6px"},field:{paddingLeft:"6px",width:"100%"},alpha:{paddingLeft:"6px",width:"100%"},toggle:{width:"32px",textAlign:"right",position:"relative"},icon:{marginRight:"-4px",marginTop:"12px",cursor:"pointer",position:"relative"},iconHighlight:{position:"absolute",width:"24px",height:"28px",background:"#eee",borderRadius:"4px",top:"10px",left:"12px",display:"none"},input:{fontSize:"11px",color:"#333",width:"100%",borderRadius:"2px",border:"none",boxShadow:"inset 0 0 0 1px #dadada",height:"21px",textAlign:"center"},label:{textTransform:"uppercase",fontSize:"11px",lineHeight:"11px",color:"#969696",textAlign:"center",display:"block",marginTop:"12px"},svg:{fill:"#333",width:"24px",height:"24px",border:"1px transparent solid",borderRadius:"5px"}},disableAlpha:{alpha:{display:"none"}}},this.props,this.state),r=void 0;return"hex"===this.state.view?r=n.createElement("div",{style:t.fields,className:"flexbox-fix"},n.createElement("div",{style:t.field},n.createElement(m,{style:{input:t.input,label:t.label},label:"hex",value:this.props.hex,onChange:this.handleChange}))):"rgb"===this.state.view?r=n.createElement("div",{style:t.fields,className:"flexbox-fix"},n.createElement("div",{style:t.field},n.createElement(m,{style:{input:t.input,label:t.label},label:"r",value:this.props.rgb.r,onChange:this.handleChange})),n.createElement("div",{style:t.field},n.createElement(m,{style:{input:t.input,label:t.label},label:"g",value:this.props.rgb.g,onChange:this.handleChange})),n.createElement("div",{style:t.field},n.createElement(m,{style:{input:t.input,label:t.label},label:"b",value:this.props.rgb.b,onChange:this.handleChange})),n.createElement("div",{style:t.alpha},n.createElement(m,{style:{input:t.input,label:t.label},label:"a",value:this.props.rgb.a,arrowOffset:.01,onChange:this.handleChange}))):"hsl"===this.state.view&&(r=n.createElement("div",{style:t.fields,className:"flexbox-fix"},n.createElement("div",{style:t.field},n.createElement(m,{style:{input:t.input,label:t.label},label:"h",value:Math.round(this.props.hsl.h),onChange:this.handleChange})),n.createElement("div",{style:t.field},n.createElement(m,{style:{input:t.input,label:t.label},label:"s",value:Math.round(100*this.props.hsl.s)+"%",onChange:this.handleChange})),n.createElement("div",{style:t.field},n.createElement(m,{style:{input:t.input,label:t.label},label:"l",value:Math.round(100*this.props.hsl.l)+"%",onChange:this.handleChange})),n.createElement("div",{style:t.alpha},n.createElement(m,{style:{input:t.input,label:t.label},label:"a",value:this.props.hsl.a,arrowOffset:.01,onChange:this.handleChange})))),n.createElement("div",{style:t.wrap,className:"flexbox-fix"},r,n.createElement("div",{style:t.toggle},n.createElement("div",{style:t.icon,onClick:this.toggleViews,ref:function(t){return e.icon=t}},n.createElement(wo.A,{style:t.svg,onMouseOver:this.showHighlight,onMouseEnter:this.showHighlight,onMouseOut:this.hideHighlight}))))}}],[{key:"getDerivedStateFromProps",value:function(e,t){return 1!==e.hsl.a&&"hex"===t.view?{view:"rgb"}:null}}]),t}(n.Component);ko.defaultProps={view:"hex"};const Do=ko;const Co=function(){var e=(0,a.Ay)({default:{picker:{width:"12px",height:"12px",borderRadius:"6px",transform:"translate(-6px, -1px)",backgroundColor:"rgb(248, 248, 248)",boxShadow:"0 1px 4px 0 rgba(0, 0, 0, 0.37)"}}});return n.createElement("div",{style:e.picker})};const Eo=function(){var e=(0,a.Ay)({default:{picker:{width:"12px",height:"12px",borderRadius:"6px",boxShadow:"inset 0 0 0 1px #fff",transform:"translate(-6px, -6px)"}}});return n.createElement("div",{style:e.picker})};var Ao=function(e){var t=e.width,r=e.onChange,o=e.disableAlpha,i=e.rgb,s=e.hsl,l=e.hsv,c=e.hex,d=e.renderers,p=e.styles,h=void 0===p?{}:p,v=e.className,g=void 0===v?"":v,m=e.defaultView,b=(0,a.Ay)(nr({default:{picker:{width:t,background:"#fff",borderRadius:"2px",boxShadow:"0 0 2px rgba(0,0,0,.3), 0 4px 8px rgba(0,0,0,.3)",boxSizing:"initial",fontFamily:"Menlo"},saturation:{width:"100%",paddingBottom:"55%",position:"relative",borderRadius:"2px 2px 0 0",overflow:"hidden"},Saturation:{radius:"2px 2px 0 0"},body:{padding:"16px 16px 12px"},controls:{display:"flex"},color:{width:"32px"},swatch:{marginTop:"6px",width:"16px",height:"16px",borderRadius:"8px",position:"relative",overflow:"hidden"},active:{absolute:"0px 0px 0px 0px",borderRadius:"8px",boxShadow:"inset 0 0 0 1px rgba(0,0,0,.1)",background:"rgba("+i.r+", "+i.g+", "+i.b+", "+i.a+")",zIndex:"2"},toggles:{flex:"1"},hue:{height:"10px",position:"relative",marginBottom:"8px"},Hue:{radius:"2px"},alpha:{height:"10px",position:"relative"},Alpha:{radius:"2px"}},disableAlpha:{color:{width:"22px"},alpha:{display:"none"},hue:{marginBottom:"0px"},swatch:{width:"10px",height:"10px",marginTop:"0px"}}},h),{disableAlpha:o});return n.createElement("div",{style:b.picker,className:"chrome-picker "+g},n.createElement("div",{style:b.saturation},n.createElement(Dr,{style:b.Saturation,hsl:s,hsv:l,pointer:Eo,onChange:r})),n.createElement("div",{style:b.body},n.createElement("div",{style:b.controls,className:"flexbox-fix"},n.createElement("div",{style:b.color},n.createElement("div",{style:b.swatch},n.createElement("div",{style:b.active}),n.createElement(u,{renderers:d}))),n.createElement("div",{style:b.toggles},n.createElement("div",{style:b.hue},n.createElement(w,{style:b.Hue,hsl:s,pointer:Co,onChange:r})),n.createElement("div",{style:b.alpha},n.createElement(f,{style:b.Alpha,rgb:i,hsl:s,pointer:Co,renderers:d,onChange:r})))),n.createElement(Do,{rgb:i,hsl:s,hex:c,view:m,onChange:r,disableAlpha:o})))};Ao.propTypes={width:k().oneOfType([k().string,k().number]),disableAlpha:k().bool,styles:k().object,defaultView:k().oneOf(["hex","rgb","hsl"])},Ao.defaultProps={width:225,disableAlpha:!1,styles:{}};bn(Ao);const So=function(e){var t=e.color,r=e.onClick,o=void 0===r?function(){}:r,i=e.onSwatchHover,s=e.active,l=(0,a.Ay)({default:{color:{background:t,width:"15px",height:"15px",float:"left",marginRight:"5px",marginBottom:"5px",position:"relative",cursor:"pointer"},dot:{absolute:"5px 5px 5px 5px",background:hn(t),borderRadius:"50%",opacity:"0"}},active:{dot:{opacity:"1"}},"color-#FFFFFF":{color:{boxShadow:"inset 0 0 0 1px #ddd"},dot:{background:"#000"}},transparent:{dot:{background:"#000"}}},{active:s,"color-#FFFFFF":"#FFFFFF"===t,transparent:"transparent"===t});return n.createElement(Dn,{style:l.color,color:t,onClick:o,onHover:i,focusStyle:{boxShadow:"0 0 4px "+t}},n.createElement("div",{style:l.dot}))};const _o=function(e){var t=e.hex,r=e.rgb,o=e.onChange,i=(0,a.Ay)({default:{fields:{display:"flex",paddingBottom:"6px",paddingRight:"5px",position:"relative"},active:{position:"absolute",top:"6px",left:"5px",height:"9px",width:"9px",background:t},HEXwrap:{flex:"6",position:"relative"},HEXinput:{width:"80%",padding:"0px",paddingLeft:"20%",border:"none",outline:"none",background:"none",fontSize:"12px",color:"#333",height:"16px"},HEXlabel:{display:"none"},RGBwrap:{flex:"3",position:"relative"},RGBinput:{width:"70%",padding:"0px",paddingLeft:"30%",border:"none",outline:"none",background:"none",fontSize:"12px",color:"#333",height:"16px"},RGBlabel:{position:"absolute",top:"3px",left:"0px",lineHeight:"16px",textTransform:"uppercase",fontSize:"12px",color:"#999"}}}),s=function(e,t){e.r||e.g||e.b?o({r:e.r||r.r,g:e.g||r.g,b:e.b||r.b,source:"rgb"},t):o({hex:e.hex,source:"hex"},t)};return n.createElement("div",{style:i.fields,className:"flexbox-fix"},n.createElement("div",{style:i.active}),n.createElement(m,{style:{wrap:i.HEXwrap,input:i.HEXinput,label:i.HEXlabel},label:"hex",value:t,onChange:s}),n.createElement(m,{style:{wrap:i.RGBwrap,input:i.RGBinput,label:i.RGBlabel},label:"r",value:r.r,onChange:s}),n.createElement(m,{style:{wrap:i.RGBwrap,input:i.RGBinput,label:i.RGBlabel},label:"g",value:r.g,onChange:s}),n.createElement(m,{style:{wrap:i.RGBwrap,input:i.RGBinput,label:i.RGBlabel},label:"b",value:r.b,onChange:s}))};var Mo=function(e){var t=e.onChange,r=e.onSwatchHover,o=e.colors,i=e.hex,s=e.rgb,l=e.styles,u=void 0===l?{}:l,c=e.className,d=void 0===c?"":c,p=(0,a.Ay)(nr({default:{Compact:{background:"#f6f6f6",radius:"4px"},compact:{paddingTop:"5px",paddingLeft:"5px",boxSizing:"initial",width:"240px"},clear:{clear:"both"}}},u)),f=function(e,r){e.hex?fn(e.hex)&&t({hex:e.hex,source:"hex"},r):t(e,r)};return n.createElement(or,{style:p.Compact,styles:u},n.createElement("div",{style:p.compact,className:"compact-picker "+d},n.createElement("div",null,Ga(o,(function(e){return n.createElement(So,{key:e,color:e,active:e.toLowerCase()===i,onClick:f,onSwatchHover:r})})),n.createElement("div",{style:p.clear})),n.createElement(_o,{hex:i,rgb:s,onChange:f})))};Mo.propTypes={colors:k().arrayOf(k().string),styles:k().object},Mo.defaultProps={colors:["#4D4D4D","#999999","#FFFFFF","#F44E3B","#FE9200","#FCDC00","#DBDF00","#A4DD00","#68CCCA","#73D8FF","#AEA1FF","#FDA1FF","#333333","#808080","#cccccc","#D33115","#E27300","#FCC400","#B0BC00","#68BC00","#16A5A5","#009CE0","#7B64FF","#FA28FF","#000000","#666666","#B3B3B3","#9F0500","#C45100","#FB9E00","#808900","#194D33","#0C797D","#0062B1","#653294","#AB149E"],styles:{}};bn(Mo);const To=(0,a.H8)((function(e){var t=e.hover,r=e.color,o=e.onClick,i=e.onSwatchHover,s={position:"relative",zIndex:"2",outline:"2px solid #fff",boxShadow:"0 0 5px 2px rgba(0,0,0,0.25)"},l=(0,a.Ay)({default:{swatch:{width:"25px",height:"25px",fontSize:"0"}},hover:{swatch:s}},{hover:t});return n.createElement("div",{style:l.swatch},n.createElement(Dn,{color:r,onClick:o,onHover:i,focusStyle:s}))}));var Oo=function(e){var t=e.width,r=e.colors,o=e.onChange,i=e.onSwatchHover,s=e.triangle,l=e.styles,u=void 0===l?{}:l,c=e.className,d=void 0===c?"":c,p=(0,a.Ay)(nr({default:{card:{width:t,background:"#fff",border:"1px solid rgba(0,0,0,0.2)",boxShadow:"0 3px 12px rgba(0,0,0,0.15)",borderRadius:"4px",position:"relative",padding:"5px",display:"flex",flexWrap:"wrap"},triangle:{position:"absolute",border:"7px solid transparent",borderBottomColor:"#fff"},triangleShadow:{position:"absolute",border:"8px solid transparent",borderBottomColor:"rgba(0,0,0,0.15)"}},"hide-triangle":{triangle:{display:"none"},triangleShadow:{display:"none"}},"top-left-triangle":{triangle:{top:"-14px",left:"10px"},triangleShadow:{top:"-16px",left:"9px"}},"top-right-triangle":{triangle:{top:"-14px",right:"10px"},triangleShadow:{top:"-16px",right:"9px"}},"bottom-left-triangle":{triangle:{top:"35px",left:"10px",transform:"rotate(180deg)"},triangleShadow:{top:"37px",left:"9px",transform:"rotate(180deg)"}},"bottom-right-triangle":{triangle:{top:"35px",right:"10px",transform:"rotate(180deg)"},triangleShadow:{top:"37px",right:"9px",transform:"rotate(180deg)"}}},u),{"hide-triangle":"hide"===s,"top-left-triangle":"top-left"===s,"top-right-triangle":"top-right"===s,"bottom-left-triangle":"bottom-left"===s,"bottom-right-triangle":"bottom-right"===s}),f=function(e,t){return o({hex:e,source:"hex"},t)};return n.createElement("div",{style:p.card,className:"github-picker "+d},n.createElement("div",{style:p.triangleShadow}),n.createElement("div",{style:p.triangle}),Ga(r,(function(e){return n.createElement(To,{color:e,key:e,onClick:f,onSwatchHover:i})})))};Oo.propTypes={width:k().oneOfType([k().string,k().number]),colors:k().arrayOf(k().string),triangle:k().oneOf(["hide","top-left","top-right","bottom-left","bottom-right"]),styles:k().object},Oo.defaultProps={width:200,colors:["#B80000","#DB3E00","#FCCB00","#008B02","#006B76","#1273DE","#004DCF","#5300EB","#EB9694","#FAD0C3","#FEF3BD","#C1E1C5","#BEDADC","#C4DEF6","#BED3F3","#D4C4FB"],triangle:"top-left",styles:{}};bn(Oo);const Po=function(e){var t=e.direction,r=(0,a.Ay)({default:{picker:{width:"18px",height:"18px",borderRadius:"50%",transform:"translate(-9px, -1px)",backgroundColor:"rgb(248, 248, 248)",boxShadow:"0 1px 4px 0 rgba(0, 0, 0, 0.37)"}},vertical:{picker:{transform:"translate(-3px, -9px)"}}},{vertical:"vertical"===t});return n.createElement("div",{style:r.picker})};var No=Object.assign||function(e){for(var t=1;t.5});return n.createElement("div",{style:r.picker})};const Yo=function(){var e=(0,a.Ay)({default:{triangle:{width:0,height:0,borderStyle:"solid",borderWidth:"4px 0 4px 6px",borderColor:"transparent transparent transparent #fff",position:"absolute",top:"1px",left:"1px"},triangleBorder:{width:0,height:0,borderStyle:"solid",borderWidth:"5px 0 5px 8px",borderColor:"transparent transparent transparent #555"},left:{Extend:"triangleBorder",transform:"translate(-13px, -4px)"},leftInside:{Extend:"triangle",transform:"translate(-8px, -5px)"},right:{Extend:"triangleBorder",transform:"translate(20px, -14px) rotate(180deg)"},rightInside:{Extend:"triangle",transform:"translate(-8px, -5px)"}}});return n.createElement("div",{style:e.pointer},n.createElement("div",{style:e.left},n.createElement("div",{style:e.leftInside})),n.createElement("div",{style:e.right},n.createElement("div",{style:e.rightInside})))};const jo=function(e){var t=e.onClick,r=e.label,o=e.children,i=e.active,s=(0,a.Ay)({default:{button:{backgroundImage:"linear-gradient(-180deg, #FFFFFF 0%, #E6E6E6 100%)",border:"1px solid #878787",borderRadius:"2px",height:"20px",boxShadow:"0 1px 0 0 #EAEAEA",fontSize:"14px",color:"#000",lineHeight:"20px",textAlign:"center",marginBottom:"10px",cursor:"pointer"}},active:{button:{boxShadow:"0 0 0 1px #878787"}}},{active:i});return n.createElement("div",{style:s.button,onClick:t},r||o)};const Bo=function(e){var t=e.rgb,r=e.currentColor,o=(0,a.Ay)({default:{swatches:{border:"1px solid #B3B3B3",borderBottom:"1px solid #F0F0F0",marginBottom:"2px",marginTop:"1px"},new:{height:"34px",background:"rgb("+t.r+","+t.g+", "+t.b+")",boxShadow:"inset 1px 0 0 #000, inset -1px 0 0 #000, inset 0 1px 0 #000"},current:{height:"34px",background:r,boxShadow:"inset 1px 0 0 #000, inset -1px 0 0 #000, inset 0 -1px 0 #000"},label:{fontSize:"14px",color:"#000",textAlign:"center"}}});return n.createElement("div",null,n.createElement("div",{style:o.label},"new"),n.createElement("div",{style:o.swatches},n.createElement("div",{style:o.new}),n.createElement("div",{style:o.current})),n.createElement("div",{style:o.label},"current"))};var Io=function(){function e(e,t){for(var r=0;r100&&(e.a=100),e.a/=100,t({h:o.h,s:o.s,l:o.l,a:e.a,source:"rgb"},n))};return n.createElement("div",{style:l.fields,className:"flexbox-fix"},n.createElement("div",{style:l.double},n.createElement(m,{style:{input:l.input,label:l.label},label:"hex",value:i.replace("#",""),onChange:u})),n.createElement("div",{style:l.single},n.createElement(m,{style:{input:l.input,label:l.label},label:"r",value:r.r,onChange:u,dragLabel:"true",dragMax:"255"})),n.createElement("div",{style:l.single},n.createElement(m,{style:{input:l.input,label:l.label},label:"g",value:r.g,onChange:u,dragLabel:"true",dragMax:"255"})),n.createElement("div",{style:l.single},n.createElement(m,{style:{input:l.input,label:l.label},label:"b",value:r.b,onChange:u,dragLabel:"true",dragMax:"255"})),n.createElement("div",{style:l.alpha},n.createElement(m,{style:{input:l.input,label:l.label},label:"a",value:Math.round(100*r.a),onChange:u,dragLabel:"true",dragMax:"100"})))};var Uo=Object.assign||function(e){for(var t=1;te.length)&&(t=e.length);for(var r=0,n=new Array(t);r1&&void 0!==arguments[1]?arguments[1]:{},r=t.minDate,n=t.maxDate,a=t.excludeDates,o=t.excludeDateIntervals,i=t.includeDates,s=t.includeDateIntervals,l=t.filterDate;return pr(e,{minDate:r,maxDate:n})||a&&a.some((function(t){return $t(e,t)}))||o&&o.some((function(t){var r=t.start,n=t.end;return ct.default(e,{start:r,end:n})}))||i&&!i.some((function(t){return $t(e,t)}))||s&&!s.some((function(t){var r=t.start,n=t.end;return ct.default(e,{start:r,end:n})}))||l&&!l(jt(e))||!1}function or(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.excludeDates,n=t.excludeDateIntervals;return n&&n.length>0?n.some((function(t){var r=t.start,n=t.end;return ct.default(e,{start:r,end:n})})):r&&r.some((function(t){return $t(e,t)}))||!1}function ir(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.minDate,n=t.maxDate,a=t.excludeDates,o=t.includeDates,i=t.filterDate;return pr(e,{minDate:$e.default(r),maxDate:tt.default(n)})||a&&a.some((function(t){return Xt(e,t)}))||o&&!o.some((function(t){return Xt(e,t)}))||i&&!i(jt(e))||!1}function sr(e,t,r,n){var a=Fe.default(e),o=Re.default(e),i=Fe.default(t),s=Re.default(t),l=Fe.default(n);return a===i&&a===l?o<=r&&r<=s:a=r||la:void 0}function lr(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.minDate,n=t.maxDate,a=t.excludeDates,o=t.includeDates,i=t.filterDate;return pr(e,{minDate:r,maxDate:n})||a&&a.some((function(t){return Vt(e,t)}))||o&&!o.some((function(t){return Vt(e,t)}))||i&&!i(jt(e))||!1}function ur(e,t,r){if(!he.default(t)||!he.default(r))return!1;var n=Fe.default(t),a=Fe.default(r);return n<=e&&a>=e}function cr(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.minDate,n=t.maxDate,a=t.excludeDates,o=t.includeDates,i=t.filterDate,s=new Date(e,0,1);return pr(s,{minDate:Je.default(r),maxDate:rt.default(n)})||a&&a.some((function(e){return Gt(s,e)}))||o&&!o.some((function(e){return Gt(s,e)}))||i&&!i(jt(s))||!1}function dr(e,t,r,n){var a=Fe.default(e),o=Le.default(e),i=Fe.default(t),s=Le.default(t),l=Fe.default(n);return a===i&&a===l?o<=r&&r<=s:a=r||la:void 0}function pr(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.minDate,n=t.maxDate;return r&&Qe.default(e,r)<0||n&&Qe.default(e,n)>0}function fr(e,t){return t.some((function(t){return Te.default(t)===Te.default(e)&&Me.default(t)===Me.default(e)}))}function hr(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.excludeTimes,n=t.includeTimes,a=t.filterTime;return r&&fr(e,r)||n&&!fr(e,n)||a&&!a(e)||!1}function vr(e,t){var r=t.minTime,n=t.maxTime;if(!r||!n)throw new Error("Both minTime and maxTime props required");var a,o=jt(),i=Ie.default(Be.default(o,Me.default(e)),Te.default(e)),s=Ie.default(Be.default(o,Me.default(r)),Te.default(r)),l=Ie.default(Be.default(o,Me.default(n)),Te.default(n));try{a=!ct.default(i,{start:s,end:l})}catch(e){a=!1}return a}function gr(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.minDate,n=t.includeDates,a=Ee.default(e,1);return r&&Ke.default(r,a)>0||n&&n.every((function(e){return Ke.default(e,a)>0}))||!1}function mr(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.maxDate,n=t.includeDates,a=we.default(e,1);return r&&Ke.default(a,r)>0||n&&n.every((function(e){return Ke.default(a,e)>0}))||!1}function br(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.minDate,n=t.includeDates,a=Se.default(e,1);return r&&Ge.default(r,a)>0||n&&n.every((function(e){return Ge.default(e,a)>0}))||!1}function yr(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.maxDate,n=t.includeDates,a=ke.default(e,1);return r&&Ge.default(a,r)>0||n&&n.every((function(e){return Ge.default(a,e)>0}))||!1}function wr(e){var t=e.minDate,r=e.includeDates;if(r&&t){var n=r.filter((function(e){return Qe.default(e,t)>=0}));return qe.default(n)}return r?qe.default(r):t}function xr(e){var t=e.maxDate,r=e.includeDates;if(r&&t){var n=r.filter((function(e){return Qe.default(e,t)<=0}));return ze.default(n)}return r?ze.default(r):t}function kr(){for(var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"react-datepicker__day--highlighted",r=new Map,n=0,a=e.length;n0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"react-datepicker__day--holidays",r=new Map;return e.forEach((function(e){var n=e.date,a=e.holidayName;if(fe.default(n)){var o=It(n,"MM.dd.yyyy"),i=r.get(o)||{};if(!("className"in i)||i.className!==t||(s=i.holidayNames,l=[a],s.length!==l.length||!s.every((function(e,t){return e===l[t]})))){var s,l;i.className=t;var u=i.holidayNames;i.holidayNames=u?[].concat(Tt(u),[a]):[a],r.set(o,i)}}})),r}function Cr(e,t,r,n,a){for(var o=a.length,i=[],s=0;s1&&void 0!==arguments[1]?arguments[1]:Ft,r=Math.ceil(Fe.default(e)/t)*t;return{startPeriod:r-(t-1),endPeriod:r}}function Sr(e){var t=e.getSeconds(),r=e.getMilliseconds();return dt.default(e.getTime()-1e3*t-r)}function _r(e,t,r,n){for(var a=[],o=0;o<2*t+1;o++){var i=e+t-o,s=!0;r&&(s=Fe.default(r)<=i),n&&s&&(s=Fe.default(n)>=i),s&&a.push(i)}return a}var Mr=function(e){Et(n,e);var r=Mt(n);function n(e){var a;wt(this,n),Dt(_t(a=r.call(this,e)),"renderOptions",(function(){var e=a.props.year,t=a.state.yearsList.map((function(t){return de.default.createElement("div",{className:e===t?"react-datepicker__year-option react-datepicker__year-option--selected_year":"react-datepicker__year-option",key:t,onClick:a.onChange.bind(_t(a),t),"aria-selected":e===t?"true":void 0},e===t?de.default.createElement("span",{className:"react-datepicker__year-option--selected"},"\u2713"):"",t)})),r=a.props.minDate?Fe.default(a.props.minDate):null,n=a.props.maxDate?Fe.default(a.props.maxDate):null;return n&&a.state.yearsList.find((function(e){return e===n}))||t.unshift(de.default.createElement("div",{className:"react-datepicker__year-option",key:"upcoming",onClick:a.incrementYears},de.default.createElement("a",{className:"react-datepicker__navigation react-datepicker__navigation--years react-datepicker__navigation--years-upcoming"}))),r&&a.state.yearsList.find((function(e){return e===r}))||t.push(de.default.createElement("div",{className:"react-datepicker__year-option",key:"previous",onClick:a.decrementYears},de.default.createElement("a",{className:"react-datepicker__navigation react-datepicker__navigation--years react-datepicker__navigation--years-previous"}))),t})),Dt(_t(a),"onChange",(function(e){a.props.onChange(e)})),Dt(_t(a),"handleClickOutside",(function(){a.props.onCancel()})),Dt(_t(a),"shiftYears",(function(e){var t=a.state.yearsList.map((function(t){return t+e}));a.setState({yearsList:t})})),Dt(_t(a),"incrementYears",(function(){return a.shiftYears(1)})),Dt(_t(a),"decrementYears",(function(){return a.shiftYears(-1)}));var o=e.yearDropdownItemNumber,i=e.scrollableYearDropdown,s=o||(i?10:5);return a.state={yearsList:_r(a.props.year,s,a.props.minDate,a.props.maxDate)},a.dropdownRef=t.createRef(),a}return kt(n,[{key:"componentDidMount",value:function(){var e=this.dropdownRef.current;if(e){var t=e.children?Array.from(e.children):null,r=t?t.find((function(e){return e.ariaSelected})):null;e.scrollTop=r?r.offsetTop+(r.clientHeight-e.clientHeight)/2:(e.scrollHeight-e.clientHeight)/2}}},{key:"render",value:function(){var e=pe.default({"react-datepicker__year-dropdown":!0,"react-datepicker__year-dropdown--scrollable":this.props.scrollableYearDropdown});return de.default.createElement("div",{className:e,ref:this.dropdownRef},this.renderOptions())}}]),n}(de.default.Component),Tr=ht.default(Mr),Or=function(e){Et(r,e);var t=Mt(r);function r(){var e;wt(this,r);for(var n=arguments.length,a=new Array(n),o=0;o0?a.get(o).holidayNames.join(", "):""})),Dt(_t(e),"getTabIndex",(function(t,r){var n=t||e.props.selected,a=r||e.props.preSelection;return(!e.props.showWeekPicker||!e.props.showWeekNumber&&e.isStartOfWeek())&&(e.isKeyboardSelected()||e.isSameDay(n)&&$t(a,n))?0:-1})),Dt(_t(e),"handleFocusDay",(function(){var t,r=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},n=!1;0===e.getTabIndex()&&!r.isInputFocused&&e.isSameDay(e.props.preSelection)&&(document.activeElement&&document.activeElement!==document.body||(n=!0),e.props.inline&&!e.props.shouldFocusDayInline&&(n=!1),e.props.containerRef&&e.props.containerRef.current&&e.props.containerRef.current.contains(document.activeElement)&&document.activeElement.classList.contains("react-datepicker__day")&&(n=!0),e.props.monthShowsDuplicateDaysEnd&&e.isAfterMonth()&&(n=!1),e.props.monthShowsDuplicateDaysStart&&e.isBeforeMonth()&&(n=!1)),n&&(null===(t=e.dayEl.current)||void 0===t||t.focus({preventScroll:!0}))})),Dt(_t(e),"renderDayContents",(function(){return e.props.monthShowsDuplicateDaysEnd&&e.isAfterMonth()||e.props.monthShowsDuplicateDaysStart&&e.isBeforeMonth()?null:e.props.renderDayContents?e.props.renderDayContents(Pe.default(e.props.day),e.props.day):Pe.default(e.props.day)})),Dt(_t(e),"render",(function(){return de.default.createElement("div",{ref:e.dayEl,className:e.getClassNames(e.props.day),onKeyDown:e.handleOnKeyDown,onClick:e.handleClick,onMouseEnter:e.handleMouseEnter,tabIndex:e.getTabIndex(),"aria-label":e.getAriaLabel(),role:"option",title:e.getTitle(),"aria-disabled":e.isDisabled(),"aria-current":e.isCurrentDay()?"date":void 0,"aria-selected":e.isSelected()||e.isInRange()},e.renderDayContents(),""!==e.getTitle()&&de.default.createElement("span",{className:"holiday-overlay"},e.getTitle()))})),e}return kt(r,[{key:"componentDidMount",value:function(){this.handleFocusDay()}},{key:"componentDidUpdate",value:function(e){this.handleFocusDay(e)}}]),r}(de.default.Component),Ir=function(e){Et(r,e);var t=Mt(r);function r(){var e;wt(this,r);for(var n=arguments.length,a=new Array(n),o=0;o0&&void 0!==arguments[0]?arguments[0]:{},r=!1;0===e.getTabIndex()&&!t.isInputFocused&&$t(e.props.date,e.props.preSelection)&&(document.activeElement&&document.activeElement!==document.body||(r=!0),e.props.inline&&!e.props.shouldFocusDayInline&&(r=!1),e.props.containerRef&&e.props.containerRef.current&&e.props.containerRef.current.contains(document.activeElement)&&document.activeElement&&document.activeElement.classList.contains("react-datepicker__week-number")&&(r=!0)),r&&e.weekNumberEl.current&&e.weekNumberEl.current.focus({preventScroll:!0})})),e}return kt(r,[{key:"componentDidMount",value:function(){this.handleFocusWeekNumber()}},{key:"componentDidUpdate",value:function(e){this.handleFocusWeekNumber(e)}},{key:"render",value:function(){var e=this.props,t=e.weekNumber,r=e.ariaLabelPrefix,n=void 0===r?"week ":r,a={"react-datepicker__week-number":!0,"react-datepicker__week-number--clickable":!!e.onClick,"react-datepicker__week-number--selected":$t(this.props.date,this.props.selected),"react-datepicker__week-number--keyboard-selected":this.isKeyboardSelected()};return de.default.createElement("div",{ref:this.weekNumberEl,className:pe.default(a),"aria-label":"".concat(n," ").concat(this.props.weekNumber),onClick:this.handleClick,onKeyDown:this.handleOnKeyDown,tabIndex:this.getTabIndex()},t)}}],[{key:"defaultProps",get:function(){return{ariaLabelPrefix:"week "}}}]),r}(de.default.Component),Hr=function(e){Et(r,e);var t=Mt(r);function r(){var e;wt(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=6,s=!r&&!e.isWeekInMonth(o);if(i||s){if(!e.props.peekNextMonth)break;a=!0}}return t})),Dt(_t(e),"onMonthClick",(function(t,r){e.handleDayClick(qt(He.default(e.props.day,r)),t)})),Dt(_t(e),"onMonthMouseEnter",(function(t){e.handleDayMouseEnter(qt(He.default(e.props.day,t)))})),Dt(_t(e),"handleMonthNavigation",(function(t,r){e.isDisabled(r)||e.isExcluded(r)||(e.props.setPreSelection(r),e.MONTH_REFS[t].current&&e.MONTH_REFS[t].current.focus())})),Dt(_t(e),"onMonthKeyDown",(function(t,r){var n=e.props,a=n.selected,o=n.preSelection,i=n.disabledKeyboardNavigation,s=n.showTwoColumnMonthYearPicker,l=n.showFourColumnMonthYearPicker,u=n.setPreSelection,c=t.key;if("Tab"!==c&&t.preventDefault(),!i){var d=Qr(l,s),p=zr[d].verticalNavigationOffset,f=zr[d].grid;switch(c){case"Enter":e.onMonthClick(t,r),u(a);break;case"ArrowRight":e.handleMonthNavigation(11===r?0:r+1,we.default(o,1));break;case"ArrowLeft":e.handleMonthNavigation(0===r?11:r-1,Ee.default(o,1));break;case"ArrowUp":e.handleMonthNavigation(f[0].includes(r)?r+12-p:r-p,Ee.default(o,p));break;case"ArrowDown":e.handleMonthNavigation(f[f.length-1].includes(r)?r-12+p:r+p,we.default(o,p))}}})),Dt(_t(e),"onQuarterClick",(function(t,r){e.handleDayClick(Qt(We.default(e.props.day,r)),t)})),Dt(_t(e),"onQuarterMouseEnter",(function(t){e.handleDayMouseEnter(Qt(We.default(e.props.day,t)))})),Dt(_t(e),"handleQuarterNavigation",(function(t,r){e.isDisabled(r)||e.isExcluded(r)||(e.props.setPreSelection(r),e.QUARTER_REFS[t-1].current&&e.QUARTER_REFS[t-1].current.focus())})),Dt(_t(e),"onQuarterKeyDown",(function(t,r){var n=t.key;if(!e.props.disabledKeyboardNavigation)switch(n){case"Enter":e.onQuarterClick(t,r),e.props.setPreSelection(e.props.selected);break;case"ArrowRight":e.handleQuarterNavigation(4===r?1:r+1,xe.default(e.props.preSelection,1));break;case"ArrowLeft":e.handleQuarterNavigation(1===r?4:r-1,Ae.default(e.props.preSelection,1))}})),Dt(_t(e),"getMonthClassNames",(function(t){var r=e.props,n=r.day,a=r.startDate,o=r.endDate,i=r.selected,s=r.minDate,l=r.maxDate,u=r.preSelection,c=r.monthClassName,d=r.excludeDates,p=r.includeDates,f=c?c(He.default(n,t)):void 0,h=He.default(n,t);return pe.default("react-datepicker__month-text","react-datepicker__month-".concat(t),f,{"react-datepicker__month-text--disabled":(s||l||d||p)&&ir(h,e.props),"react-datepicker__month-text--selected":e.isSelectedMonth(n,t,i),"react-datepicker__month-text--keyboard-selected":!e.props.disabledKeyboardNavigation&&Re.default(u)===t,"react-datepicker__month-text--in-selecting-range":e.isInSelectingRangeMonth(t),"react-datepicker__month-text--in-range":sr(a,o,t,n),"react-datepicker__month-text--range-start":e.isRangeStartMonth(t),"react-datepicker__month-text--range-end":e.isRangeEndMonth(t),"react-datepicker__month-text--selecting-range-start":e.isSelectingMonthRangeStart(t),"react-datepicker__month-text--selecting-range-end":e.isSelectingMonthRangeEnd(t),"react-datepicker__month-text--today":e.isCurrentMonth(n,t)})})),Dt(_t(e),"getTabIndex",(function(t){var r=Re.default(e.props.preSelection);return e.props.disabledKeyboardNavigation||t!==r?"-1":"0"})),Dt(_t(e),"getQuarterTabIndex",(function(t){var r=Le.default(e.props.preSelection);return e.props.disabledKeyboardNavigation||t!==r?"-1":"0"})),Dt(_t(e),"getAriaLabel",(function(t){var r=e.props,n=r.chooseDayAriaLabelPrefix,a=void 0===n?"Choose":n,o=r.disabledDayAriaLabelPrefix,i=void 0===o?"Not available":o,s=r.day,l=He.default(s,t),u=e.isDisabled(l)||e.isExcluded(l)?i:a;return"".concat(u," ").concat(It(l,"MMMM yyyy"))})),Dt(_t(e),"getQuarterClassNames",(function(t){var r=e.props,n=r.day,a=r.startDate,o=r.endDate,i=r.selected,s=r.minDate,l=r.maxDate,u=r.preSelection,c=r.disabledKeyboardNavigation;return pe.default("react-datepicker__quarter-text","react-datepicker__quarter-".concat(t),{"react-datepicker__quarter-text--disabled":(s||l)&&lr(We.default(n,t),e.props),"react-datepicker__quarter-text--selected":e.isSelectedQuarter(n,t,i),"react-datepicker__quarter-text--keyboard-selected":!c&&Le.default(u)===t,"react-datepicker__quarter-text--in-selecting-range":e.isInSelectingRangeQuarter(t),"react-datepicker__quarter-text--in-range":dr(a,o,t,n),"react-datepicker__quarter-text--range-start":e.isRangeStartQuarter(t),"react-datepicker__quarter-text--range-end":e.isRangeEndQuarter(t)})})),Dt(_t(e),"getMonthContent",(function(t){var r=e.props,n=r.showFullMonthYearPicker,a=r.renderMonthContent,o=r.locale,i=r.day,s=nr(t,o),l=rr(t,o);return a?a(t,s,l,i):n?l:s})),Dt(_t(e),"getQuarterContent",(function(t){var r=e.props,n=r.renderQuarterContent,a=function(e,t){return It(We.default(jt(),e),"QQQ",t)}(t,r.locale);return n?n(t,a):a})),Dt(_t(e),"renderMonths",(function(){var t=e.props,r=t.showTwoColumnMonthYearPicker,n=t.showFourColumnMonthYearPicker,a=t.day,o=t.selected;return zr[Qr(n,r)].grid.map((function(t,r){return de.default.createElement("div",{className:"react-datepicker__month-wrapper",key:r},t.map((function(t,r){return de.default.createElement("div",{ref:e.MONTH_REFS[t],key:r,onClick:function(r){e.onMonthClick(r,t)},onKeyDown:function(r){e.onMonthKeyDown(r,t)},onMouseEnter:function(){return e.onMonthMouseEnter(t)},tabIndex:e.getTabIndex(t),className:e.getMonthClassNames(t),role:"option","aria-label":e.getAriaLabel(t),"aria-current":e.isCurrentMonth(a,t)?"date":void 0,"aria-selected":e.isSelectedMonth(a,t,o)},e.getMonthContent(t))})))}))})),Dt(_t(e),"renderQuarters",(function(){var t=e.props,r=t.day,n=t.selected;return de.default.createElement("div",{className:"react-datepicker__quarter-wrapper"},[1,2,3,4].map((function(t,a){return de.default.createElement("div",{key:a,ref:e.QUARTER_REFS[a],role:"option",onClick:function(r){e.onQuarterClick(r,t)},onKeyDown:function(r){e.onQuarterKeyDown(r,t)},onMouseEnter:function(){return e.onQuarterMouseEnter(t)},className:e.getQuarterClassNames(t),"aria-selected":e.isSelectedQuarter(r,t,n),tabIndex:e.getQuarterTabIndex(t),"aria-current":e.isCurrentQuarter(r,t)?"date":void 0},e.getQuarterContent(t))})))})),Dt(_t(e),"getClassNames",(function(){var t=e.props,r=t.selectingDate,n=t.selectsStart,a=t.selectsEnd,o=t.showMonthYearPicker,i=t.showQuarterYearPicker,s=t.showWeekPicker;return pe.default("react-datepicker__month",{"react-datepicker__month--selecting-range":r&&(n||a)},{"react-datepicker__monthPicker":o},{"react-datepicker__quarterPicker":i},{"react-datepicker__weekPicker":s})})),e}return kt(r,[{key:"render",value:function(){var e=this.props,t=e.showMonthYearPicker,r=e.showQuarterYearPicker,n=e.day,a=e.ariaLabelPrefix,o=void 0===a?"month ":a;return de.default.createElement("div",{className:this.getClassNames(),onMouseLeave:this.handleMouseLeave,"aria-label":"".concat(o," ").concat(It(n,"yyyy-MM")),role:"listbox"},t?this.renderMonths():r?this.renderQuarters():this.renderWeeks())}}]),r}(de.default.Component),Gr=function(e){Et(r,e);var t=Mt(r);function r(){var e;wt(this,r);for(var n=arguments.length,a=new Array(n),o=0;o0&&void 0!==arguments[0]?arguments[0]:{}).className||"").split(/\s+/);return Zr.some((function(t){return e.indexOf(t)>=0}))})(e.target)&&n.props.onDropdownFocus()})),Dt(_t(n),"getDateInView",(function(){var e=n.props,t=e.preSelection,r=e.selected,a=e.openToDate,o=wr(n.props),i=xr(n.props),s=jt();return a||r||t||(o&&ut.default(s,o)?o:i&<.default(s,i)?i:s)})),Dt(_t(n),"increaseMonth",(function(){n.setState((function(e){var t=e.date;return{date:we.default(t,1)}}),(function(){return n.handleMonthChange(n.state.date)}))})),Dt(_t(n),"decreaseMonth",(function(){n.setState((function(e){var t=e.date;return{date:Ee.default(t,1)}}),(function(){return n.handleMonthChange(n.state.date)}))})),Dt(_t(n),"handleDayClick",(function(e,t,r){n.props.onSelect(e,t,r),n.props.setPreSelection&&n.props.setPreSelection(e)})),Dt(_t(n),"handleDayMouseEnter",(function(e){n.setState({selectingDate:e}),n.props.onDayMouseEnter&&n.props.onDayMouseEnter(e)})),Dt(_t(n),"handleMonthMouseLeave",(function(){n.setState({selectingDate:null}),n.props.onMonthMouseLeave&&n.props.onMonthMouseLeave()})),Dt(_t(n),"handleYearMouseEnter",(function(e,t){n.setState({selectingDate:Ue.default(jt(),t)}),n.props.onYearMouseEnter&&n.props.onYearMouseEnter(e,t)})),Dt(_t(n),"handleYearMouseLeave",(function(e,t){n.props.onYearMouseLeave&&n.props.onYearMouseLeave(e,t)})),Dt(_t(n),"handleYearChange",(function(e){n.props.onYearChange&&(n.props.onYearChange(e),n.setState({isRenderAriaLiveMessage:!0})),n.props.adjustDateOnChange&&(n.props.onSelect&&n.props.onSelect(e),n.props.setOpen&&n.props.setOpen(!0)),n.props.setPreSelection&&n.props.setPreSelection(e)})),Dt(_t(n),"handleMonthChange",(function(e){n.handleCustomMonthChange(e),n.props.adjustDateOnChange&&(n.props.onSelect&&n.props.onSelect(e),n.props.setOpen&&n.props.setOpen(!0)),n.props.setPreSelection&&n.props.setPreSelection(e)})),Dt(_t(n),"handleCustomMonthChange",(function(e){n.props.onMonthChange&&(n.props.onMonthChange(e),n.setState({isRenderAriaLiveMessage:!0}))})),Dt(_t(n),"handleMonthYearChange",(function(e){n.handleYearChange(e),n.handleMonthChange(e)})),Dt(_t(n),"changeYear",(function(e){n.setState((function(t){var r=t.date;return{date:Ue.default(r,e)}}),(function(){return n.handleYearChange(n.state.date)}))})),Dt(_t(n),"changeMonth",(function(e){n.setState((function(t){var r=t.date;return{date:He.default(r,e)}}),(function(){return n.handleMonthChange(n.state.date)}))})),Dt(_t(n),"changeMonthYear",(function(e){n.setState((function(t){var r=t.date;return{date:Ue.default(He.default(r,Re.default(e)),Fe.default(e))}}),(function(){return n.handleMonthYearChange(n.state.date)}))})),Dt(_t(n),"header",(function(){var e=Ut(arguments.length>0&&void 0!==arguments[0]?arguments[0]:n.state.date,n.props.locale,n.props.calendarStartDay),t=[];return n.props.showWeekNumbers&&t.push(de.default.createElement("div",{key:"W",className:"react-datepicker__day-name"},n.props.weekLabel||"#")),t.concat([0,1,2,3,4,5,6].map((function(t){var r=be.default(e,t),a=n.formatWeekday(r,n.props.locale),o=n.props.weekDayClassName?n.props.weekDayClassName(r):void 0;return de.default.createElement("div",{key:t,className:pe.default("react-datepicker__day-name",o)},a)})))})),Dt(_t(n),"formatWeekday",(function(e,t){return n.props.formatWeekDay?function(e,t,r){return t(It(e,"EEEE",r))}(e,n.props.formatWeekDay,t):n.props.useWeekdaysShort?function(e,t){return It(e,"EEE",t)}(e,t):function(e,t){return It(e,"EEEEEE",t)}(e,t)})),Dt(_t(n),"decreaseYear",(function(){n.setState((function(e){var t=e.date;return{date:Se.default(t,n.props.showYearPicker?n.props.yearItemNumber:1)}}),(function(){return n.handleYearChange(n.state.date)}))})),Dt(_t(n),"clearSelectingDate",(function(){n.setState({selectingDate:null})})),Dt(_t(n),"renderPreviousButton",(function(){if(!n.props.renderCustomHeader){var e;switch(!0){case n.props.showMonthYearPicker:e=br(n.state.date,n.props);break;case n.props.showYearPicker:e=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.minDate,n=t.yearItemNumber,a=void 0===n?Ft:n,o=Ar(zt(Se.default(e,a)),a).endPeriod,i=r&&Fe.default(r);return i&&i>o||!1}(n.state.date,n.props);break;default:e=gr(n.state.date,n.props)}if((n.props.forceShowMonthNavigation||n.props.showDisabledMonthNavigation||!e)&&!n.props.showTimeSelectOnly){var t=["react-datepicker__navigation","react-datepicker__navigation--previous"],r=n.decreaseMonth;(n.props.showMonthYearPicker||n.props.showQuarterYearPicker||n.props.showYearPicker)&&(r=n.decreaseYear),e&&n.props.showDisabledMonthNavigation&&(t.push("react-datepicker__navigation--previous--disabled"),r=null);var a=n.props.showMonthYearPicker||n.props.showQuarterYearPicker||n.props.showYearPicker,o=n.props,i=o.previousMonthButtonLabel,s=o.previousYearButtonLabel,l=n.props,u=l.previousMonthAriaLabel,c=void 0===u?"string"==typeof i?i:"Previous Month":u,d=l.previousYearAriaLabel,p=void 0===d?"string"==typeof s?s:"Previous Year":d;return de.default.createElement("button",{type:"button",className:t.join(" "),onClick:r,onKeyDown:n.props.handleOnKeyDown,"aria-label":a?p:c},de.default.createElement("span",{className:["react-datepicker__navigation-icon","react-datepicker__navigation-icon--previous"].join(" ")},a?n.props.previousYearButtonLabel:n.props.previousMonthButtonLabel))}}})),Dt(_t(n),"increaseYear",(function(){n.setState((function(e){var t=e.date;return{date:ke.default(t,n.props.showYearPicker?n.props.yearItemNumber:1)}}),(function(){return n.handleYearChange(n.state.date)}))})),Dt(_t(n),"renderNextButton",(function(){if(!n.props.renderCustomHeader){var e;switch(!0){case n.props.showMonthYearPicker:e=yr(n.state.date,n.props);break;case n.props.showYearPicker:e=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.maxDate,n=t.yearItemNumber,a=void 0===n?Ft:n,o=Ar(ke.default(e,a),a).startPeriod,i=r&&Fe.default(r);return i&&i0&&void 0!==arguments[0]?arguments[0]:n.state.date,t=["react-datepicker__current-month"];return n.props.showYearDropdown&&t.push("react-datepicker__current-month--hasYearDropdown"),n.props.showMonthDropdown&&t.push("react-datepicker__current-month--hasMonthDropdown"),n.props.showMonthYearDropdown&&t.push("react-datepicker__current-month--hasMonthYearDropdown"),de.default.createElement("div",{className:t.join(" ")},It(e,n.props.dateFormat,n.props.locale))})),Dt(_t(n),"renderYearDropdown",(function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0];if(n.props.showYearDropdown&&!e)return de.default.createElement(Or,{adjustDateOnChange:n.props.adjustDateOnChange,date:n.state.date,onSelect:n.props.onSelect,setOpen:n.props.setOpen,dropdownMode:n.props.dropdownMode,onChange:n.changeYear,minDate:n.props.minDate,maxDate:n.props.maxDate,year:Fe.default(n.state.date),scrollableYearDropdown:n.props.scrollableYearDropdown,yearDropdownItemNumber:n.props.yearDropdownItemNumber})})),Dt(_t(n),"renderMonthDropdown",(function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0];if(n.props.showMonthDropdown&&!e)return de.default.createElement(Rr,{dropdownMode:n.props.dropdownMode,locale:n.props.locale,onChange:n.changeMonth,month:Re.default(n.state.date),useShortMonthInDropdown:n.props.useShortMonthInDropdown})})),Dt(_t(n),"renderMonthYearDropdown",(function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0];if(n.props.showMonthYearDropdown&&!e)return de.default.createElement(jr,{dropdownMode:n.props.dropdownMode,locale:n.props.locale,dateFormat:n.props.dateFormat,onChange:n.changeMonthYear,minDate:n.props.minDate,maxDate:n.props.maxDate,date:n.state.date,scrollableMonthYearDropdown:n.props.scrollableMonthYearDropdown})})),Dt(_t(n),"handleTodayButtonClick",(function(e){n.props.onSelect(Kt(),e),n.props.setPreSelection&&n.props.setPreSelection(Kt())})),Dt(_t(n),"renderTodayButton",(function(){if(n.props.todayButton&&!n.props.showTimeSelectOnly)return de.default.createElement("div",{className:"react-datepicker__today-button",onClick:function(e){return n.handleTodayButtonClick(e)}},n.props.todayButton)})),Dt(_t(n),"renderDefaultHeader",(function(e){var t=e.monthDate,r=e.i;return de.default.createElement("div",{className:"react-datepicker__header ".concat(n.props.showTimeSelect?"react-datepicker__header--has-time-select":"")},n.renderCurrentMonth(t),de.default.createElement("div",{className:"react-datepicker__header__dropdown react-datepicker__header__dropdown--".concat(n.props.dropdownMode),onFocus:n.handleDropdownFocus},n.renderMonthDropdown(0!==r),n.renderMonthYearDropdown(0!==r),n.renderYearDropdown(0!==r)),de.default.createElement("div",{className:"react-datepicker__day-names"},n.header(t)))})),Dt(_t(n),"renderCustomHeader",(function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.monthDate,r=e.i;if(n.props.showTimeSelect&&!n.state.monthContainer||n.props.showTimeSelectOnly)return null;var a=gr(n.state.date,n.props),o=mr(n.state.date,n.props),i=br(n.state.date,n.props),s=yr(n.state.date,n.props),l=!n.props.showMonthYearPicker&&!n.props.showQuarterYearPicker&&!n.props.showYearPicker;return de.default.createElement("div",{className:"react-datepicker__header react-datepicker__header--custom",onFocus:n.props.onDropdownFocus},n.props.renderCustomHeader(bt(bt({},n.state),{},{customHeaderCount:r,monthDate:t,changeMonth:n.changeMonth,changeYear:n.changeYear,decreaseMonth:n.decreaseMonth,increaseMonth:n.increaseMonth,decreaseYear:n.decreaseYear,increaseYear:n.increaseYear,prevMonthButtonDisabled:a,nextMonthButtonDisabled:o,prevYearButtonDisabled:i,nextYearButtonDisabled:s})),l&&de.default.createElement("div",{className:"react-datepicker__day-names"},n.header(t)))})),Dt(_t(n),"renderYearHeader",(function(){var e=n.state.date,t=n.props,r=t.showYearPicker,a=Ar(e,t.yearItemNumber),o=a.startPeriod,i=a.endPeriod;return de.default.createElement("div",{className:"react-datepicker__header react-datepicker-year-header"},r?"".concat(o," - ").concat(i):Fe.default(e))})),Dt(_t(n),"renderHeader",(function(e){switch(!0){case void 0!==n.props.renderCustomHeader:return n.renderCustomHeader(e);case n.props.showMonthYearPicker||n.props.showQuarterYearPicker||n.props.showYearPicker:return n.renderYearHeader(e);default:return n.renderDefaultHeader(e)}})),Dt(_t(n),"renderMonths",(function(){var e;if(!n.props.showTimeSelectOnly&&!n.props.showYearPicker){for(var t=[],r=n.props.showPreviousMonths?n.props.monthsShown-1:0,a=Ee.default(n.state.date,r),o=null!==(e=n.props.monthSelectedIn)&&void 0!==e?e:r,i=0;i0;t.push(de.default.createElement("div",{key:u,ref:function(e){n.monthContainer=e},className:"react-datepicker__month-container"},n.renderHeader({monthDate:l,i:i}),de.default.createElement(Kr,{chooseDayAriaLabelPrefix:n.props.chooseDayAriaLabelPrefix,disabledDayAriaLabelPrefix:n.props.disabledDayAriaLabelPrefix,weekAriaLabelPrefix:n.props.weekAriaLabelPrefix,ariaLabelPrefix:n.props.monthAriaLabelPrefix,onChange:n.changeMonthYear,day:l,dayClassName:n.props.dayClassName,calendarStartDay:n.props.calendarStartDay,monthClassName:n.props.monthClassName,onDayClick:n.handleDayClick,handleOnKeyDown:n.props.handleOnDayKeyDown,onDayMouseEnter:n.handleDayMouseEnter,onMouseLeave:n.handleMonthMouseLeave,onWeekSelect:n.props.onWeekSelect,orderInDisplay:i,formatWeekNumber:n.props.formatWeekNumber,locale:n.props.locale,minDate:n.props.minDate,maxDate:n.props.maxDate,excludeDates:n.props.excludeDates,excludeDateIntervals:n.props.excludeDateIntervals,highlightDates:n.props.highlightDates,holidays:n.props.holidays,selectingDate:n.state.selectingDate,includeDates:n.props.includeDates,includeDateIntervals:n.props.includeDateIntervals,inline:n.props.inline,shouldFocusDayInline:n.props.shouldFocusDayInline,fixedHeight:n.props.fixedHeight,filterDate:n.props.filterDate,preSelection:n.props.preSelection,setPreSelection:n.props.setPreSelection,selected:n.props.selected,selectsStart:n.props.selectsStart,selectsEnd:n.props.selectsEnd,selectsRange:n.props.selectsRange,selectsDisabledDaysInRange:n.props.selectsDisabledDaysInRange,showWeekNumbers:n.props.showWeekNumbers,startDate:n.props.startDate,endDate:n.props.endDate,peekNextMonth:n.props.peekNextMonth,setOpen:n.props.setOpen,shouldCloseOnSelect:n.props.shouldCloseOnSelect,renderDayContents:n.props.renderDayContents,renderMonthContent:n.props.renderMonthContent,renderQuarterContent:n.props.renderQuarterContent,renderYearContent:n.props.renderYearContent,disabledKeyboardNavigation:n.props.disabledKeyboardNavigation,showMonthYearPicker:n.props.showMonthYearPicker,showFullMonthYearPicker:n.props.showFullMonthYearPicker,showTwoColumnMonthYearPicker:n.props.showTwoColumnMonthYearPicker,showFourColumnMonthYearPicker:n.props.showFourColumnMonthYearPicker,showYearPicker:n.props.showYearPicker,showQuarterYearPicker:n.props.showQuarterYearPicker,showWeekPicker:n.props.showWeekPicker,isInputFocused:n.props.isInputFocused,containerRef:n.containerRef,monthShowsDuplicateDaysEnd:c,monthShowsDuplicateDaysStart:d})))}return t}})),Dt(_t(n),"renderYears",(function(){if(!n.props.showTimeSelectOnly)return n.props.showYearPicker?de.default.createElement("div",{className:"react-datepicker__year--container"},n.renderHeader(),de.default.createElement(Xr,Ct({onDayClick:n.handleDayClick,selectingDate:n.state.selectingDate,clearSelectingDate:n.clearSelectingDate,date:n.state.date},n.props,{onYearMouseEnter:n.handleYearMouseEnter,onYearMouseLeave:n.handleYearMouseLeave}))):void 0})),Dt(_t(n),"renderTimeSection",(function(){if(n.props.showTimeSelect&&(n.state.monthContainer||n.props.showTimeSelectOnly))return de.default.createElement(Gr,{selected:n.props.selected,openToDate:n.props.openToDate,onChange:n.props.onTimeChange,timeClassName:n.props.timeClassName,format:n.props.timeFormat,includeTimes:n.props.includeTimes,intervals:n.props.timeIntervals,minTime:n.props.minTime,maxTime:n.props.maxTime,excludeTimes:n.props.excludeTimes,filterTime:n.props.filterTime,timeCaption:n.props.timeCaption,todayButton:n.props.todayButton,showMonthDropdown:n.props.showMonthDropdown,showMonthYearDropdown:n.props.showMonthYearDropdown,showYearDropdown:n.props.showYearDropdown,withPortal:n.props.withPortal,monthRef:n.state.monthContainer,injectTimes:n.props.injectTimes,locale:n.props.locale,handleOnKeyDown:n.props.handleOnKeyDown,showTimeSelectOnly:n.props.showTimeSelectOnly})})),Dt(_t(n),"renderInputTimeSection",(function(){var e=new Date(n.props.selected),t=Bt(e)&&Boolean(n.props.selected)?"".concat(Er(e.getHours()),":").concat(Er(e.getMinutes())):"";if(n.props.showTimeInput)return de.default.createElement(Vr,{date:e,timeString:t,timeInputLabel:n.props.timeInputLabel,onChange:n.props.onTimeChange,customTimeInput:n.props.customTimeInput})})),Dt(_t(n),"renderAriaLiveRegion",(function(){var e,t=Ar(n.state.date,n.props.yearItemNumber),r=t.startPeriod,a=t.endPeriod;return e=n.props.showYearPicker?"".concat(r," - ").concat(a):n.props.showMonthYearPicker||n.props.showQuarterYearPicker?Fe.default(n.state.date):"".concat(rr(Re.default(n.state.date),n.props.locale)," ").concat(Fe.default(n.state.date)),de.default.createElement("span",{role:"alert","aria-live":"polite",className:"react-datepicker__aria-live"},n.state.isRenderAriaLiveMessage&&e)})),Dt(_t(n),"renderChildren",(function(){if(n.props.children)return de.default.createElement("div",{className:"react-datepicker__children-container"},n.props.children)})),n.containerRef=de.default.createRef(),n.state={date:n.getDateInView(),selectingDate:null,monthContainer:null,isRenderAriaLiveMessage:!1},n}return kt(r,[{key:"componentDidMount",value:function(){var e=this;this.props.showTimeSelect&&(this.assignMonthContainer=void e.setState({monthContainer:e.monthContainer}))}},{key:"componentDidUpdate",value:function(e){var t=this;if(!this.props.preSelection||$t(this.props.preSelection,e.preSelection)&&this.props.monthSelectedIn===e.monthSelectedIn)this.props.openToDate&&!$t(this.props.openToDate,e.openToDate)&&this.setState({date:this.props.openToDate});else{var r=!Xt(this.state.date,this.props.preSelection);this.setState({date:this.props.preSelection},(function(){return r&&t.handleCustomMonthChange(t.state.date)}))}}},{key:"render",value:function(){var e=this.props.container||$r;return de.default.createElement("div",{style:{display:"contents"},ref:this.containerRef},de.default.createElement(e,{className:pe.default("react-datepicker",this.props.className,{"react-datepicker--time-only":this.props.showTimeSelectOnly}),showPopperArrow:this.props.showPopperArrow,arrowProps:this.props.arrowProps},this.renderAriaLiveRegion(),this.renderPreviousButton(),this.renderNextButton(),this.renderMonths(),this.renderYears(),this.renderTodayButton(),this.renderTimeSection(),this.renderInputTimeSection(),this.renderChildren()))}}],[{key:"defaultProps",get:function(){return{onDropdownFocus:function(){},monthsShown:1,forceShowMonthNavigation:!1,timeCaption:"Time",previousYearButtonLabel:"Previous Year",nextYearButtonLabel:"Next Year",previousMonthButtonLabel:"Previous Month",nextMonthButtonLabel:"Next Month",customTimeInput:null,yearItemNumber:Ft}}}]),r}(de.default.Component),en=function(e){var t=e.icon,r=e.className,n=void 0===r?"":r,a=e.onClick,o="react-datepicker__calendar-icon";return de.default.isValidElement(t)?de.default.cloneElement(t,{className:"".concat(t.props.className||""," ").concat(o," ").concat(n),onClick:function(e){"function"==typeof t.props.onClick&&t.props.onClick(e),"function"==typeof a&&a(e)}}):"string"==typeof t?de.default.createElement("i",{className:"".concat(o," ").concat(t," ").concat(n),"aria-hidden":"true",onClick:a}):de.default.createElement("svg",{className:"".concat(o," ").concat(n),xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 448 512",onClick:a},de.default.createElement("path",{d:"M96 32V64H48C21.5 64 0 85.5 0 112v48H448V112c0-26.5-21.5-48-48-48H352V32c0-17.7-14.3-32-32-32s-32 14.3-32 32V64H160V32c0-17.7-14.3-32-32-32S96 14.3 96 32zM448 192H0V464c0 26.5 21.5 48 48 48H400c26.5 0 48-21.5 48-48V192z"}))},tn=function(e){Et(r,e);var t=Mt(r);function r(e){var n;return wt(this,r),(n=t.call(this,e)).el=document.createElement("div"),n}return kt(r,[{key:"componentDidMount",value:function(){this.portalRoot=(this.props.portalHost||document).getElementById(this.props.portalId),this.portalRoot||(this.portalRoot=document.createElement("div"),this.portalRoot.setAttribute("id",this.props.portalId),(this.props.portalHost||document.body).appendChild(this.portalRoot)),this.portalRoot.appendChild(this.el)}},{key:"componentWillUnmount",value:function(){this.portalRoot.removeChild(this.el)}},{key:"render",value:function(){return vt.default.createPortal(this.props.children,this.el)}}]),r}(de.default.Component),rn=function(e){return!e.disabled&&-1!==e.tabIndex},nn=function(e){Et(r,e);var t=Mt(r);function r(e){var n;return wt(this,r),Dt(_t(n=t.call(this,e)),"getTabChildren",(function(){return Array.prototype.slice.call(n.tabLoopRef.current.querySelectorAll("[tabindex], a, button, input, select, textarea"),1,-1).filter(rn)})),Dt(_t(n),"handleFocusStart",(function(){var e=n.getTabChildren();e&&e.length>1&&e[e.length-1].focus()})),Dt(_t(n),"handleFocusEnd",(function(){var e=n.getTabChildren();e&&e.length>1&&e[0].focus()})),n.tabLoopRef=de.default.createRef(),n}return kt(r,[{key:"render",value:function(){return this.props.enableTabLoop?de.default.createElement("div",{className:"react-datepicker__tab-loop",ref:this.tabLoopRef},de.default.createElement("div",{className:"react-datepicker__tab-loop__start",tabIndex:"0",onFocus:this.handleFocusStart}),this.props.children,de.default.createElement("div",{className:"react-datepicker__tab-loop__end",tabIndex:"0",onFocus:this.handleFocusEnd})):this.props.children}}],[{key:"defaultProps",get:function(){return{enableTabLoop:!0}}}]),r}(de.default.Component),an=function(e){Et(r,e);var t=Mt(r);function r(){return wt(this,r),t.apply(this,arguments)}return kt(r,[{key:"render",value:function(){var e,t=this.props,r=t.className,n=t.wrapperClassName,a=t.hidePopper,o=t.popperComponent,i=t.popperModifiers,s=t.popperPlacement,l=t.popperProps,u=t.targetComponent,c=t.enableTabLoop,d=t.popperOnKeyDown,p=t.portalId,f=t.portalHost;if(!a){var h=pe.default("react-datepicker-popper",r);e=de.default.createElement(le.Popper,Ct({modifiers:i,placement:s},l),(function(e){var t=e.ref,r=e.style,n=e.placement,a=e.arrowProps;return de.default.createElement(nn,{enableTabLoop:c},de.default.createElement("div",{ref:t,style:r,className:h,"data-placement":n,onKeyDown:d},de.default.cloneElement(o,{arrowProps:a})))}))}this.props.popperContainer&&(e=de.default.createElement(this.props.popperContainer,{},e)),p&&!a&&(e=de.default.createElement(tn,{portalId:p,portalHost:f},e));var v=pe.default("react-datepicker-wrapper",n);return de.default.createElement(le.Manager,{className:"react-datepicker-manager"},de.default.createElement(le.Reference,null,(function(e){var t=e.ref;return de.default.createElement("div",{ref:t,className:v},u)})),e)}}],[{key:"defaultProps",get:function(){return{hidePopper:!0,popperModifiers:[],popperProps:{},popperPlacement:"bottom-start"}}}]),r}(de.default.Component),on="react-datepicker-ignore-onclickoutside",sn=ht.default(Jr),ln="Date input not valid.",un=function(e){Et(r,e);var t=Mt(r);function r(e){var n;return wt(this,r),Dt(_t(n=t.call(this,e)),"getPreSelection",(function(){return n.props.openToDate?n.props.openToDate:n.props.selectsEnd&&n.props.startDate?n.props.startDate:n.props.selectsStart&&n.props.endDate?n.props.endDate:jt()})),Dt(_t(n),"modifyHolidays",(function(){var e;return null===(e=n.props.holidays)||void 0===e?void 0:e.reduce((function(e,t){var r=new Date(t.date);return he.default(r)?[].concat(Tt(e),[bt(bt({},t),{},{date:r})]):e}),[])})),Dt(_t(n),"calcInitialState",(function(){var e,t=n.getPreSelection(),r=wr(n.props),a=xr(n.props),o=r&&ut.default(t,Xe.default(r))?r:a&<.default(t,et.default(a))?a:t;return{open:n.props.startOpen||!1,preventFocus:!1,preSelection:null!==(e=n.props.selectsRange?n.props.startDate:n.props.selected)&&void 0!==e?e:o,highlightDates:kr(n.props.highlightDates),focused:!1,shouldFocusDayInline:!1,isRenderAriaLiveMessage:!1}})),Dt(_t(n),"clearPreventFocusTimeout",(function(){n.preventFocusTimeout&&clearTimeout(n.preventFocusTimeout)})),Dt(_t(n),"setFocus",(function(){n.input&&n.input.focus&&n.input.focus({preventScroll:!0})})),Dt(_t(n),"setBlur",(function(){n.input&&n.input.blur&&n.input.blur(),n.cancelFocusInput()})),Dt(_t(n),"setOpen",(function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];n.setState({open:e,preSelection:e&&n.state.open?n.state.preSelection:n.calcInitialState().preSelection,lastPreSelectChange:dn},(function(){e||n.setState((function(e){return{focused:!!t&&e.focused}}),(function(){!t&&n.setBlur(),n.setState({inputValue:null})}))}))})),Dt(_t(n),"inputOk",(function(){return fe.default(n.state.preSelection)})),Dt(_t(n),"isCalendarOpen",(function(){return void 0===n.props.open?n.state.open&&!n.props.disabled&&!n.props.readOnly:n.props.open})),Dt(_t(n),"handleFocus",(function(e){n.state.preventFocus||(n.props.onFocus(e),n.props.preventOpenOnFocus||n.props.readOnly||n.setOpen(!0)),n.setState({focused:!0})})),Dt(_t(n),"sendFocusBackToInput",(function(){n.preventFocusTimeout&&n.clearPreventFocusTimeout(),n.setState({preventFocus:!0},(function(){n.preventFocusTimeout=setTimeout((function(){n.setFocus(),n.setState({preventFocus:!1})}))}))})),Dt(_t(n),"cancelFocusInput",(function(){clearTimeout(n.inputFocusTimeout),n.inputFocusTimeout=null})),Dt(_t(n),"deferFocusInput",(function(){n.cancelFocusInput(),n.inputFocusTimeout=setTimeout((function(){return n.setFocus()}),1)})),Dt(_t(n),"handleDropdownFocus",(function(){n.cancelFocusInput()})),Dt(_t(n),"handleBlur",(function(e){(!n.state.open||n.props.withPortal||n.props.showTimeInput)&&n.props.onBlur(e),n.setState({focused:!1})})),Dt(_t(n),"handleCalendarClickOutside",(function(e){n.props.inline||n.setOpen(!1),n.props.onClickOutside(e),n.props.withPortal&&e.preventDefault()})),Dt(_t(n),"handleChange",(function(){for(var e=arguments.length,t=new Array(e),r=0;r0&&(c=pt.default(o,i.slice(0,o.length),new Date)),Bt(c)||(c=new Date(o))),Bt(c)&&p?c:null));n.props.showTimeSelectOnly&&n.props.selected&&f&&!$t(f,n.props.selected)&&(f=gt.default(n.props.selected,{hours:Te.default(f),minutes:Me.default(f),seconds:_e.default(f)})),!f&&a.target.value||(n.props.showWeekPicker&&(f=Ut(f,n.props.locale,n.props.calendarStartDay)),n.setSelected(f,a,!0))}})),Dt(_t(n),"handleSelect",(function(e,t,r){if(n.props.shouldCloseOnSelect&&!n.props.showTimeSelect&&n.sendFocusBackToInput(),n.props.onChangeRaw&&n.props.onChangeRaw(t),n.props.showWeekPicker&&(e=Ut(e,n.props.locale,n.props.calendarStartDay)),n.setSelected(e,t,!1,r),n.props.showDateSelect&&n.setState({isRenderAriaLiveMessage:!0}),!n.props.shouldCloseOnSelect||n.props.showTimeSelect)n.setPreSelection(e);else if(!n.props.inline){n.props.selectsRange||n.setOpen(!1);var a=n.props,o=a.startDate,i=a.endDate;!o||i||ut.default(e,o)||n.setOpen(!1)}})),Dt(_t(n),"setSelected",(function(e,t,r,a){var o=e;if(n.props.showYearPicker){if(null!==o&&cr(Fe.default(o),n.props))return}else if(n.props.showMonthYearPicker){if(null!==o&&ir(o,n.props))return}else if(null!==o&&ar(o,n.props))return;var i=n.props,s=i.onChange,l=i.selectsRange,u=i.startDate,c=i.endDate;if(!Zt(n.props.selected,o)||n.props.allowSameDay||l)if(null!==o&&(!n.props.selected||r&&(n.props.showTimeSelect||n.props.showTimeSelectOnly||n.props.showTimeInput)||(o=Wt(o,{hour:Te.default(n.props.selected),minute:Me.default(n.props.selected),second:_e.default(n.props.selected)})),n.props.inline||n.setState({preSelection:o}),n.props.focusSelectedMonth||n.setState({monthSelectedIn:a})),l){var d=u&&c;u||c?u&&!c&&(ut.default(o,u)?s([o,null],t):s([u,o],t)):s([o,null],t),d&&s([o,null],t)}else s(o,t);r||(n.props.onSelect(o,t),n.setState({inputValue:null}))})),Dt(_t(n),"setPreSelection",(function(e){var t=void 0!==n.props.minDate,r=void 0!==n.props.maxDate,a=!0;if(e){n.props.showWeekPicker&&(e=Ut(e,n.props.locale,n.props.calendarStartDay));var o=Xe.default(e);if(t&&r)a=Jt(e,n.props.minDate,n.props.maxDate);else if(t){var i=Xe.default(n.props.minDate);a=lt.default(e,i)||Zt(o,i)}else if(r){var s=et.default(n.props.maxDate);a=ut.default(e,s)||Zt(o,s)}}a&&n.setState({preSelection:e})})),Dt(_t(n),"toggleCalendar",(function(){n.setOpen(!n.state.open)})),Dt(_t(n),"handleTimeChange",(function(e){var t=n.props.selected?n.props.selected:n.getPreSelection(),r=n.props.selected?e:Wt(t,{hour:Te.default(e),minute:Me.default(e)});n.setState({preSelection:r}),n.props.onChange(r),n.props.shouldCloseOnSelect&&(n.sendFocusBackToInput(),n.setOpen(!1)),n.props.showTimeInput&&n.setOpen(!0),(n.props.showTimeSelectOnly||n.props.showTimeSelect)&&n.setState({isRenderAriaLiveMessage:!0}),n.setState({inputValue:null})})),Dt(_t(n),"onInputClick",(function(){n.props.disabled||n.props.readOnly||n.setOpen(!0),n.props.onInputClick()})),Dt(_t(n),"onInputKeyDown",(function(e){n.props.onKeyDown(e);var t=e.key;if(n.state.open||n.props.inline||n.props.preventOpenOnFocus){if(n.state.open){if("ArrowDown"===t||"ArrowUp"===t){e.preventDefault();var r=n.props.showWeekPicker&&n.props.showWeekNumbers?'.react-datepicker__week-number[tabindex="0"]':'.react-datepicker__day[tabindex="0"]',a=n.calendar.componentNode&&n.calendar.componentNode.querySelector(r);return void(a&&a.focus({preventScroll:!0}))}var o=jt(n.state.preSelection);"Enter"===t?(e.preventDefault(),n.inputOk()&&n.state.lastPreSelectChange===dn?(n.handleSelect(o,e),!n.props.shouldCloseOnSelect&&n.setPreSelection(o)):n.setOpen(!1)):"Escape"===t?(e.preventDefault(),n.sendFocusBackToInput(),n.setOpen(!1)):"Tab"===t&&n.setOpen(!1),n.inputOk()||n.props.onInputError({code:1,msg:ln})}}else"ArrowDown"!==t&&"ArrowUp"!==t&&"Enter"!==t||n.onInputClick()})),Dt(_t(n),"onPortalKeyDown",(function(e){"Escape"===e.key&&(e.preventDefault(),n.setState({preventFocus:!0},(function(){n.setOpen(!1),setTimeout((function(){n.setFocus(),n.setState({preventFocus:!1})}))})))})),Dt(_t(n),"onDayKeyDown",(function(e){n.props.onKeyDown(e);var t=e.key,r=jt(n.state.preSelection);if("Enter"===t)e.preventDefault(),n.handleSelect(r,e),!n.props.shouldCloseOnSelect&&n.setPreSelection(r);else if("Escape"===t)e.preventDefault(),n.setOpen(!1),n.inputOk()||n.props.onInputError({code:1,msg:ln});else if(!n.props.disabledKeyboardNavigation){var a;switch(t){case"ArrowLeft":a=n.props.showWeekPicker?Ce.default(r,1):De.default(r,1);break;case"ArrowRight":a=n.props.showWeekPicker?ye.default(r,1):be.default(r,1);break;case"ArrowUp":a=Ce.default(r,1);break;case"ArrowDown":a=ye.default(r,1);break;case"PageUp":a=Ee.default(r,1);break;case"PageDown":a=we.default(r,1);break;case"Home":a=Se.default(r,1);break;case"End":a=ke.default(r,1);break;default:a=null}if(!a)return void(n.props.onInputError&&n.props.onInputError({code:1,msg:ln}));if(e.preventDefault(),n.setState({lastPreSelectChange:dn}),n.props.adjustDateOnChange&&n.setSelected(a),n.setPreSelection(a),n.props.inline){var o=Re.default(r),i=Re.default(a),s=Fe.default(r),l=Fe.default(a);o!==i||s!==l?n.setState({shouldFocusDayInline:!0}):n.setState({shouldFocusDayInline:!1})}}})),Dt(_t(n),"onPopperKeyDown",(function(e){"Escape"===e.key&&(e.preventDefault(),n.sendFocusBackToInput())})),Dt(_t(n),"onClearClick",(function(e){e&&e.preventDefault&&e.preventDefault(),n.sendFocusBackToInput(),n.props.selectsRange?n.props.onChange([null,null],e):n.props.onChange(null,e),n.setState({inputValue:null})})),Dt(_t(n),"clear",(function(){n.onClearClick()})),Dt(_t(n),"onScroll",(function(e){"boolean"==typeof n.props.closeOnScroll&&n.props.closeOnScroll?e.target!==document&&e.target!==document.documentElement&&e.target!==document.body||n.setOpen(!1):"function"==typeof n.props.closeOnScroll&&n.props.closeOnScroll(e)&&n.setOpen(!1)})),Dt(_t(n),"renderCalendar",(function(){return n.props.inline||n.isCalendarOpen()?de.default.createElement(sn,{ref:function(e){n.calendar=e},locale:n.props.locale,calendarStartDay:n.props.calendarStartDay,chooseDayAriaLabelPrefix:n.props.chooseDayAriaLabelPrefix,disabledDayAriaLabelPrefix:n.props.disabledDayAriaLabelPrefix,weekAriaLabelPrefix:n.props.weekAriaLabelPrefix,monthAriaLabelPrefix:n.props.monthAriaLabelPrefix,adjustDateOnChange:n.props.adjustDateOnChange,setOpen:n.setOpen,shouldCloseOnSelect:n.props.shouldCloseOnSelect,dateFormat:n.props.dateFormatCalendar,useWeekdaysShort:n.props.useWeekdaysShort,formatWeekDay:n.props.formatWeekDay,dropdownMode:n.props.dropdownMode,selected:n.props.selected,preSelection:n.state.preSelection,onSelect:n.handleSelect,onWeekSelect:n.props.onWeekSelect,openToDate:n.props.openToDate,minDate:n.props.minDate,maxDate:n.props.maxDate,selectsStart:n.props.selectsStart,selectsEnd:n.props.selectsEnd,selectsRange:n.props.selectsRange,startDate:n.props.startDate,endDate:n.props.endDate,excludeDates:n.props.excludeDates,excludeDateIntervals:n.props.excludeDateIntervals,filterDate:n.props.filterDate,onClickOutside:n.handleCalendarClickOutside,formatWeekNumber:n.props.formatWeekNumber,highlightDates:n.state.highlightDates,holidays:Dr(n.modifyHolidays()),includeDates:n.props.includeDates,includeDateIntervals:n.props.includeDateIntervals,includeTimes:n.props.includeTimes,injectTimes:n.props.injectTimes,inline:n.props.inline,shouldFocusDayInline:n.state.shouldFocusDayInline,peekNextMonth:n.props.peekNextMonth,showMonthDropdown:n.props.showMonthDropdown,showPreviousMonths:n.props.showPreviousMonths,useShortMonthInDropdown:n.props.useShortMonthInDropdown,showMonthYearDropdown:n.props.showMonthYearDropdown,showWeekNumbers:n.props.showWeekNumbers,showYearDropdown:n.props.showYearDropdown,withPortal:n.props.withPortal,forceShowMonthNavigation:n.props.forceShowMonthNavigation,showDisabledMonthNavigation:n.props.showDisabledMonthNavigation,scrollableYearDropdown:n.props.scrollableYearDropdown,scrollableMonthYearDropdown:n.props.scrollableMonthYearDropdown,todayButton:n.props.todayButton,weekLabel:n.props.weekLabel,outsideClickIgnoreClass:on,fixedHeight:n.props.fixedHeight,monthsShown:n.props.monthsShown,monthSelectedIn:n.state.monthSelectedIn,onDropdownFocus:n.handleDropdownFocus,onMonthChange:n.props.onMonthChange,onYearChange:n.props.onYearChange,dayClassName:n.props.dayClassName,weekDayClassName:n.props.weekDayClassName,monthClassName:n.props.monthClassName,timeClassName:n.props.timeClassName,showDateSelect:n.props.showDateSelect,showTimeSelect:n.props.showTimeSelect,showTimeSelectOnly:n.props.showTimeSelectOnly,onTimeChange:n.handleTimeChange,timeFormat:n.props.timeFormat,timeIntervals:n.props.timeIntervals,minTime:n.props.minTime,maxTime:n.props.maxTime,excludeTimes:n.props.excludeTimes,filterTime:n.props.filterTime,timeCaption:n.props.timeCaption,className:n.props.calendarClassName,container:n.props.calendarContainer,yearItemNumber:n.props.yearItemNumber,yearDropdownItemNumber:n.props.yearDropdownItemNumber,previousMonthAriaLabel:n.props.previousMonthAriaLabel,previousMonthButtonLabel:n.props.previousMonthButtonLabel,nextMonthAriaLabel:n.props.nextMonthAriaLabel,nextMonthButtonLabel:n.props.nextMonthButtonLabel,previousYearAriaLabel:n.props.previousYearAriaLabel,previousYearButtonLabel:n.props.previousYearButtonLabel,nextYearAriaLabel:n.props.nextYearAriaLabel,nextYearButtonLabel:n.props.nextYearButtonLabel,timeInputLabel:n.props.timeInputLabel,disabledKeyboardNavigation:n.props.disabledKeyboardNavigation,renderCustomHeader:n.props.renderCustomHeader,popperProps:n.props.popperProps,renderDayContents:n.props.renderDayContents,renderMonthContent:n.props.renderMonthContent,renderQuarterContent:n.props.renderQuarterContent,renderYearContent:n.props.renderYearContent,onDayMouseEnter:n.props.onDayMouseEnter,onMonthMouseLeave:n.props.onMonthMouseLeave,onYearMouseEnter:n.props.onYearMouseEnter,onYearMouseLeave:n.props.onYearMouseLeave,selectsDisabledDaysInRange:n.props.selectsDisabledDaysInRange,showTimeInput:n.props.showTimeInput,showMonthYearPicker:n.props.showMonthYearPicker,showFullMonthYearPicker:n.props.showFullMonthYearPicker,showTwoColumnMonthYearPicker:n.props.showTwoColumnMonthYearPicker,showFourColumnMonthYearPicker:n.props.showFourColumnMonthYearPicker,showYearPicker:n.props.showYearPicker,showQuarterYearPicker:n.props.showQuarterYearPicker,showWeekPicker:n.props.showWeekPicker,showPopperArrow:n.props.showPopperArrow,excludeScrollbar:n.props.excludeScrollbar,handleOnKeyDown:n.props.onKeyDown,handleOnDayKeyDown:n.onDayKeyDown,isInputFocused:n.state.focused,customTimeInput:n.props.customTimeInput,setPreSelection:n.setPreSelection},n.props.children):null})),Dt(_t(n),"renderAriaLiveRegion",(function(){var e,t=n.props,r=t.dateFormat,a=t.locale,o=n.props.showTimeInput||n.props.showTimeSelect?"PPPPp":"PPPP";return e=n.props.selectsRange?"Selected start date: ".concat(Ht(n.props.startDate,{dateFormat:o,locale:a}),". ").concat(n.props.endDate?"End date: "+Ht(n.props.endDate,{dateFormat:o,locale:a}):""):n.props.showTimeSelectOnly?"Selected time: ".concat(Ht(n.props.selected,{dateFormat:r,locale:a})):n.props.showYearPicker?"Selected year: ".concat(Ht(n.props.selected,{dateFormat:"yyyy",locale:a})):n.props.showMonthYearPicker?"Selected month: ".concat(Ht(n.props.selected,{dateFormat:"MMMM yyyy",locale:a})):n.props.showQuarterYearPicker?"Selected quarter: ".concat(Ht(n.props.selected,{dateFormat:"yyyy, QQQ",locale:a})):"Selected date: ".concat(Ht(n.props.selected,{dateFormat:o,locale:a})),de.default.createElement("span",{role:"alert","aria-live":"polite",className:"react-datepicker__aria-live"},e)})),Dt(_t(n),"renderDateInput",(function(){var e,t=pe.default(n.props.className,Dt({},on,n.state.open)),r=n.props.customInput||de.default.createElement("input",{type:"text"}),a=n.props.customInputRef||"ref",o="string"==typeof n.props.value?n.props.value:"string"==typeof n.state.inputValue?n.state.inputValue:n.props.selectsRange?function(e,t,r){if(!e)return"";var n=Ht(e,r),a=t?Ht(t,r):"";return"".concat(n," - ").concat(a)}(n.props.startDate,n.props.endDate,n.props):Ht(n.props.selected,n.props);return de.default.cloneElement(r,(Dt(Dt(Dt(Dt(Dt(Dt(Dt(Dt(Dt(Dt(e={},a,(function(e){n.input=e})),"value",o),"onBlur",n.handleBlur),"onChange",n.handleChange),"onClick",n.onInputClick),"onFocus",n.handleFocus),"onKeyDown",n.onInputKeyDown),"id",n.props.id),"name",n.props.name),"form",n.props.form),Dt(Dt(Dt(Dt(Dt(Dt(Dt(Dt(Dt(Dt(e,"autoFocus",n.props.autoFocus),"placeholder",n.props.placeholderText),"disabled",n.props.disabled),"autoComplete",n.props.autoComplete),"className",pe.default(r.props.className,t)),"title",n.props.title),"readOnly",n.props.readOnly),"required",n.props.required),"tabIndex",n.props.tabIndex),"aria-describedby",n.props.ariaDescribedBy),Dt(Dt(Dt(e,"aria-invalid",n.props.ariaInvalid),"aria-labelledby",n.props.ariaLabelledBy),"aria-required",n.props.ariaRequired)))})),Dt(_t(n),"renderClearButton",(function(){var e=n.props,t=e.isClearable,r=e.disabled,a=e.selected,o=e.startDate,i=e.endDate,s=e.clearButtonTitle,l=e.clearButtonClassName,u=void 0===l?"":l,c=e.ariaLabelClose,d=void 0===c?"Close":c;return!t||null==a&&null==o&&null==i?null:de.default.createElement("button",{type:"button",className:pe.default("react-datepicker__close-icon",u,{"react-datepicker__close-icon--disabled":r}),disabled:r,"aria-label":d,onClick:n.onClearClick,title:s,tabIndex:-1})})),n.state=n.calcInitialState(),n.preventFocusTimeout=null,n}return kt(r,[{key:"componentDidMount",value:function(){window.addEventListener("scroll",this.onScroll,!0)}},{key:"componentDidUpdate",value:function(e,t){var r,n;e.inline&&(r=e.selected,n=this.props.selected,r&&n?Re.default(r)!==Re.default(n)||Fe.default(r)!==Fe.default(n):r!==n)&&this.setPreSelection(this.props.selected),void 0!==this.state.monthSelectedIn&&e.monthsShown!==this.props.monthsShown&&this.setState({monthSelectedIn:0}),e.highlightDates!==this.props.highlightDates&&this.setState({highlightDates:kr(this.props.highlightDates)}),t.focused||Zt(e.selected,this.props.selected)||this.setState({inputValue:null}),t.open!==this.state.open&&(!1===t.open&&!0===this.state.open&&this.props.onCalendarOpen(),!0===t.open&&!1===this.state.open&&this.props.onCalendarClose())}},{key:"componentWillUnmount",value:function(){this.clearPreventFocusTimeout(),window.removeEventListener("scroll",this.onScroll,!0)}},{key:"renderInputContainer",value:function(){var e=this.props,t=e.showIcon,r=e.icon,n=e.calendarIconClassname,a=e.toggleCalendarOnIconClick,o=this.state.open;return de.default.createElement("div",{className:"react-datepicker__input-container".concat(t?" react-datepicker__view-calendar-icon":"")},t&&de.default.createElement(en,Ct({icon:r,className:"".concat(n," ").concat(o&&"react-datepicker-ignore-onclickoutside")},a?{onClick:this.toggleCalendar}:null)),this.state.isRenderAriaLiveMessage&&this.renderAriaLiveRegion(),this.renderDateInput(),this.renderClearButton())}},{key:"render",value:function(){var e=this.renderCalendar();if(this.props.inline)return e;if(this.props.withPortal){var t=this.state.open?de.default.createElement(nn,{enableTabLoop:this.props.enableTabLoop},de.default.createElement("div",{className:"react-datepicker__portal",tabIndex:-1,onKeyDown:this.onPortalKeyDown},e)):null;return this.state.open&&this.props.portalId&&(t=de.default.createElement(tn,{portalId:this.props.portalId,portalHost:this.props.portalHost},t)),de.default.createElement("div",null,this.renderInputContainer(),t)}return de.default.createElement(an,{className:this.props.popperClassName,wrapperClassName:this.props.wrapperClassName,hidePopper:!this.isCalendarOpen(),portalId:this.props.portalId,portalHost:this.props.portalHost,popperModifiers:this.props.popperModifiers,targetComponent:this.renderInputContainer(),popperContainer:this.props.popperContainer,popperComponent:e,popperPlacement:this.props.popperPlacement,popperProps:this.props.popperProps,popperOnKeyDown:this.onPopperKeyDown,enableTabLoop:this.props.enableTabLoop})}}],[{key:"defaultProps",get:function(){return{allowSameDay:!1,dateFormat:"MM/dd/yyyy",dateFormatCalendar:"LLLL yyyy",onChange:function(){},disabled:!1,disabledKeyboardNavigation:!1,dropdownMode:"scroll",onFocus:function(){},onBlur:function(){},onKeyDown:function(){},onInputClick:function(){},onSelect:function(){},onClickOutside:function(){},onMonthChange:function(){},onCalendarOpen:function(){},onCalendarClose:function(){},preventOpenOnFocus:!1,onYearChange:function(){},onInputError:function(){},monthsShown:1,readOnly:!1,withPortal:!1,selectsDisabledDaysInRange:!1,shouldCloseOnSelect:!0,showTimeSelect:!1,showTimeInput:!1,showPreviousMonths:!1,showMonthYearPicker:!1,showFullMonthYearPicker:!1,showTwoColumnMonthYearPicker:!1,showFourColumnMonthYearPicker:!1,showYearPicker:!1,showQuarterYearPicker:!1,showWeekPicker:!1,strictParsing:!1,timeIntervals:30,timeCaption:"Time",previousMonthAriaLabel:"Previous Month",previousMonthButtonLabel:"Previous Month",nextMonthAriaLabel:"Next Month",nextMonthButtonLabel:"Next Month",previousYearAriaLabel:"Previous Year",previousYearButtonLabel:"Previous Year",nextYearAriaLabel:"Next Year",nextYearButtonLabel:"Next Year",timeInputLabel:"Time",enableTabLoop:!0,yearItemNumber:Ft,focusSelectedMonth:!1,showPopperArrow:!0,excludeScrollbar:!0,customTimeInput:null,calendarStartDay:void 0,toggleCalendarOnIconClick:!1}}}]),r}(de.default.Component),cn="input",dn="navigate";e.CalendarContainer=$r,e.default=un,e.getDefaultLocale=er,e.registerLocale=function(e,t){var r="undefined"!=typeof window?window:globalThis;r.__localeData__||(r.__localeData__={}),r.__localeData__[e]=t},e.setDefaultLocale=function(e){("undefined"!=typeof window?window:globalThis).__localeId__=e},Object.defineProperty(e,"__esModule",{value:!0})}(t,r(96540),r(5556),r(46942),r(47586),r(92964),r(56658),r(62658),r(92908),r(7806),r(12454),r(83214),r(58980),r(38733),r(10443),r(5377),r(5231),r(63801),r(70486),r(80187),r(89691),r(30161),r(2928),r(24234),r(37066),r(98308),r(17286),r(63571),r(12335),r(78991),r(42231),r(63701),r(49864),r(5618),r(32455),r(38990),r(50136),r(53511),r(38195),r(88138),r(26876),r(15163),r(34631),r(63501),r(78918),r(65068),r(3494),r(49128),r(81191),r(15898),r(16010),r(13238),r(35269),r(68840),r(65104),r(15549),r(58559),r(99699),r(71082),r(85670),r(73908),r(40961),r(32430),r(2402))},98090:(e,t,r)=>{"use strict";r.d(t,{A:()=>d});var n={lessThanXSeconds:{one:"less than a second",other:"less than {{count}} seconds"},xSeconds:{one:"1 second",other:"{{count}} seconds"},halfAMinute:"half a minute",lessThanXMinutes:{one:"less than a minute",other:"less than {{count}} minutes"},xMinutes:{one:"1 minute",other:"{{count}} minutes"},aboutXHours:{one:"about 1 hour",other:"about {{count}} hours"},xHours:{one:"1 hour",other:"{{count}} hours"},xDays:{one:"1 day",other:"{{count}} days"},aboutXWeeks:{one:"about 1 week",other:"about {{count}} weeks"},xWeeks:{one:"1 week",other:"{{count}} weeks"},aboutXMonths:{one:"about 1 month",other:"about {{count}} months"},xMonths:{one:"1 month",other:"{{count}} months"},aboutXYears:{one:"about 1 year",other:"about {{count}} years"},xYears:{one:"1 year",other:"{{count}} years"},overXYears:{one:"over 1 year",other:"over {{count}} years"},almostXYears:{one:"almost 1 year",other:"almost {{count}} years"}};const a=function(e,t,r){var a,o=n[e];return a="string"===typeof o?o:1===t?o.one:o.other.replace("{{count}}",t.toString()),null!==r&&void 0!==r&&r.addSuffix?r.comparison&&r.comparison>0?"in "+a:a+" ago":a};function o(e){return function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},r=t.width?String(t.width):e.defaultWidth;return e.formats[r]||e.formats[e.defaultWidth]}}var i={date:o({formats:{full:"EEEE, MMMM do, y",long:"MMMM do, y",medium:"MMM d, y",short:"MM/dd/yyyy"},defaultWidth:"full"}),time:o({formats:{full:"h:mm:ss a zzzz",long:"h:mm:ss a z",medium:"h:mm:ss a",short:"h:mm a"},defaultWidth:"full"}),dateTime:o({formats:{full:"{{date}} 'at' {{time}}",long:"{{date}} 'at' {{time}}",medium:"{{date}}, {{time}}",short:"{{date}}, {{time}}"},defaultWidth:"full"})};var s={lastWeek:"'last' eeee 'at' p",yesterday:"'yesterday at' p",today:"'today at' p",tomorrow:"'tomorrow at' p",nextWeek:"eeee 'at' p",other:"P"};function l(e){return function(t,r){var n;if("formatting"===(null!==r&&void 0!==r&&r.context?String(r.context):"standalone")&&e.formattingValues){var a=e.defaultFormattingWidth||e.defaultWidth,o=null!==r&&void 0!==r&&r.width?String(r.width):a;n=e.formattingValues[o]||e.formattingValues[a]}else{var i=e.defaultWidth,s=null!==r&&void 0!==r&&r.width?String(r.width):e.defaultWidth;n=e.values[s]||e.values[i]}return n[e.argumentCallback?e.argumentCallback(t):t]}}function u(e){return function(t){var r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=r.width,a=n&&e.matchPatterns[n]||e.matchPatterns[e.defaultMatchWidth],o=t.match(a);if(!o)return null;var i,s=o[0],l=n&&e.parsePatterns[n]||e.parsePatterns[e.defaultParseWidth],u=Array.isArray(l)?function(e,t){for(var r=0;r20||n<10)switch(n%10){case 1:return r+"st";case 2:return r+"nd";case 3:return r+"rd"}return r+"th"},era:l({values:{narrow:["B","A"],abbreviated:["BC","AD"],wide:["Before Christ","Anno Domini"]},defaultWidth:"wide"}),quarter:l({values:{narrow:["1","2","3","4"],abbreviated:["Q1","Q2","Q3","Q4"],wide:["1st quarter","2nd quarter","3rd quarter","4th quarter"]},defaultWidth:"wide",argumentCallback:function(e){return e-1}}),month:l({values:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],abbreviated:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],wide:["January","February","March","April","May","June","July","August","September","October","November","December"]},defaultWidth:"wide"}),day:l({values:{narrow:["S","M","T","W","T","F","S"],short:["Su","Mo","Tu","We","Th","Fr","Sa"],abbreviated:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],wide:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},defaultWidth:"wide"}),dayPeriod:l({values:{narrow:{am:"a",pm:"p",midnight:"mi",noon:"n",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"},abbreviated:{am:"AM",pm:"PM",midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"},wide:{am:"a.m.",pm:"p.m.",midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"}},defaultWidth:"wide",formattingValues:{narrow:{am:"a",pm:"p",midnight:"mi",noon:"n",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"},abbreviated:{am:"AM",pm:"PM",midnight:"midnight",noon:"noon",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"},wide:{am:"a.m.",pm:"p.m.",midnight:"midnight",noon:"noon",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"}},defaultFormattingWidth:"wide"})},match:{ordinalNumber:(c={matchPattern:/^(\d+)(th|st|nd|rd)?/i,parsePattern:/\d+/i,valueCallback:function(e){return parseInt(e,10)}},function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=e.match(c.matchPattern);if(!r)return null;var n=r[0],a=e.match(c.parsePattern);if(!a)return null;var o=c.valueCallback?c.valueCallback(a[0]):a[0];return{value:o=t.valueCallback?t.valueCallback(o):o,rest:e.slice(n.length)}}),era:u({matchPatterns:{narrow:/^(b|a)/i,abbreviated:/^(b\.?\s?c\.?|b\.?\s?c\.?\s?e\.?|a\.?\s?d\.?|c\.?\s?e\.?)/i,wide:/^(before christ|before common era|anno domini|common era)/i},defaultMatchWidth:"wide",parsePatterns:{any:[/^b/i,/^(a|c)/i]},defaultParseWidth:"any"}),quarter:u({matchPatterns:{narrow:/^[1234]/i,abbreviated:/^q[1234]/i,wide:/^[1234](th|st|nd|rd)? quarter/i},defaultMatchWidth:"wide",parsePatterns:{any:[/1/i,/2/i,/3/i,/4/i]},defaultParseWidth:"any",valueCallback:function(e){return e+1}}),month:u({matchPatterns:{narrow:/^[jfmasond]/i,abbreviated:/^(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)/i,wide:/^(january|february|march|april|may|june|july|august|september|october|november|december)/i},defaultMatchWidth:"wide",parsePatterns:{narrow:[/^j/i,/^f/i,/^m/i,/^a/i,/^m/i,/^j/i,/^j/i,/^a/i,/^s/i,/^o/i,/^n/i,/^d/i],any:[/^ja/i,/^f/i,/^mar/i,/^ap/i,/^may/i,/^jun/i,/^jul/i,/^au/i,/^s/i,/^o/i,/^n/i,/^d/i]},defaultParseWidth:"any"}),day:u({matchPatterns:{narrow:/^[smtwf]/i,short:/^(su|mo|tu|we|th|fr|sa)/i,abbreviated:/^(sun|mon|tue|wed|thu|fri|sat)/i,wide:/^(sunday|monday|tuesday|wednesday|thursday|friday|saturday)/i},defaultMatchWidth:"wide",parsePatterns:{narrow:[/^s/i,/^m/i,/^t/i,/^w/i,/^t/i,/^f/i,/^s/i],any:[/^su/i,/^m/i,/^tu/i,/^w/i,/^th/i,/^f/i,/^sa/i]},defaultParseWidth:"any"}),dayPeriod:u({matchPatterns:{narrow:/^(a|p|mi|n|(in the|at) (morning|afternoon|evening|night))/i,any:/^([ap]\.?\s?m\.?|midnight|noon|(in the|at) (morning|afternoon|evening|night))/i},defaultMatchWidth:"any",parsePatterns:{any:{am:/^a/i,pm:/^p/i,midnight:/^mi/i,noon:/^no/i,morning:/morning/i,afternoon:/afternoon/i,evening:/evening/i,night:/night/i}},defaultParseWidth:"any"})},options:{weekStartsOn:0,firstWeekContainsDate:1}}},28458:(e,t,r)=>{"use strict";r.d(t,{q:()=>a});var n={};function a(){return n}},49668:(e,t,r)=>{"use strict";r.d(t,{A:()=>o});var n=function(e,t){switch(e){case"P":return t.date({width:"short"});case"PP":return t.date({width:"medium"});case"PPP":return t.date({width:"long"});default:return t.date({width:"full"})}},a=function(e,t){switch(e){case"p":return t.time({width:"short"});case"pp":return t.time({width:"medium"});case"ppp":return t.time({width:"long"});default:return t.time({width:"full"})}};const o={p:a,P:function(e,t){var r,o=e.match(/(P+)(p+)?/)||[],i=o[1],s=o[2];if(!s)return n(e,t);switch(i){case"P":r=t.dateTime({width:"short"});break;case"PP":r=t.dateTime({width:"medium"});break;case"PPP":r=t.dateTime({width:"long"});break;default:r=t.dateTime({width:"full"})}return r.replace("{{date}}",n(i,t)).replace("{{time}}",a(s,t))}}},40460:(e,t,r)=>{"use strict";function n(e){var t=new Date(Date.UTC(e.getFullYear(),e.getMonth(),e.getDate(),e.getHours(),e.getMinutes(),e.getSeconds(),e.getMilliseconds()));return t.setUTCFullYear(e.getFullYear()),e.getTime()-t.getTime()}r.d(t,{A:()=>n})},49717:(e,t,r)=>{"use strict";r.d(t,{A:()=>l});var n=r(99699),a=r(74715),o=r(76535),i=r(11151);var s=6048e5;function l(e){(0,i.A)(1,arguments);var t=(0,n.default)(e),r=(0,a.A)(t).getTime()-function(e){(0,i.A)(1,arguments);var t=(0,o.A)(e),r=new Date(0);return r.setUTCFullYear(t,0,4),r.setUTCHours(0,0,0,0),(0,a.A)(r)}(t).getTime();return Math.round(r/s)+1}},76535:(e,t,r)=>{"use strict";r.d(t,{A:()=>i});var n=r(99699),a=r(11151),o=r(74715);function i(e){(0,a.A)(1,arguments);var t=(0,n.default)(e),r=t.getUTCFullYear(),i=new Date(0);i.setUTCFullYear(r+1,0,4),i.setUTCHours(0,0,0,0);var s=(0,o.A)(i),l=new Date(0);l.setUTCFullYear(r,0,4),l.setUTCHours(0,0,0,0);var u=(0,o.A)(l);return t.getTime()>=s.getTime()?r+1:t.getTime()>=u.getTime()?r:r-1}},9478:(e,t,r)=>{"use strict";r.d(t,{A:()=>c});var n=r(99699),a=r(67350),o=r(38888),i=r(11151),s=r(67668),l=r(28458);var u=6048e5;function c(e,t){(0,i.A)(1,arguments);var r=(0,n.default)(e),c=(0,a.A)(r,t).getTime()-function(e,t){var r,n,u,c,d,p,f,h;(0,i.A)(1,arguments);var v=(0,l.q)(),g=(0,s.A)(null!==(r=null!==(n=null!==(u=null!==(c=null===t||void 0===t?void 0:t.firstWeekContainsDate)&&void 0!==c?c:null===t||void 0===t||null===(d=t.locale)||void 0===d||null===(p=d.options)||void 0===p?void 0:p.firstWeekContainsDate)&&void 0!==u?u:v.firstWeekContainsDate)&&void 0!==n?n:null===(f=v.locale)||void 0===f||null===(h=f.options)||void 0===h?void 0:h.firstWeekContainsDate)&&void 0!==r?r:1),m=(0,o.A)(e,t),b=new Date(0);return b.setUTCFullYear(m,0,g),b.setUTCHours(0,0,0,0),(0,a.A)(b,t)}(r,t).getTime();return Math.round(c/u)+1}},38888:(e,t,r)=>{"use strict";r.d(t,{A:()=>l});var n=r(99699),a=r(11151),o=r(67350),i=r(67668),s=r(28458);function l(e,t){var r,l,u,c,d,p,f,h;(0,a.A)(1,arguments);var v=(0,n.default)(e),g=v.getUTCFullYear(),m=(0,s.q)(),b=(0,i.A)(null!==(r=null!==(l=null!==(u=null!==(c=null===t||void 0===t?void 0:t.firstWeekContainsDate)&&void 0!==c?c:null===t||void 0===t||null===(d=t.locale)||void 0===d||null===(p=d.options)||void 0===p?void 0:p.firstWeekContainsDate)&&void 0!==u?u:m.firstWeekContainsDate)&&void 0!==l?l:null===(f=m.locale)||void 0===f||null===(h=f.options)||void 0===h?void 0:h.firstWeekContainsDate)&&void 0!==r?r:1);if(!(b>=1&&b<=7))throw new RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var y=new Date(0);y.setUTCFullYear(g+1,0,b),y.setUTCHours(0,0,0,0);var w=(0,o.A)(y,t),x=new Date(0);x.setUTCFullYear(g,0,b),x.setUTCHours(0,0,0,0);var k=(0,o.A)(x,t);return v.getTime()>=w.getTime()?g+1:v.getTime()>=k.getTime()?g:g-1}},9581:(e,t,r)=>{"use strict";r.d(t,{ef:()=>o,lJ:()=>s,xM:()=>i});var n=["D","DD"],a=["YY","YYYY"];function o(e){return-1!==n.indexOf(e)}function i(e){return-1!==a.indexOf(e)}function s(e,t,r){if("YYYY"===e)throw new RangeError("Use `yyyy` instead of `YYYY` (in `".concat(t,"`) for formatting years to the input `").concat(r,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"));if("YY"===e)throw new RangeError("Use `yy` instead of `YY` (in `".concat(t,"`) for formatting years to the input `").concat(r,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"));if("D"===e)throw new RangeError("Use `d` instead of `D` (in `".concat(t,"`) for formatting days of the month to the input `").concat(r,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"));if("DD"===e)throw new RangeError("Use `dd` instead of `DD` (in `".concat(t,"`) for formatting days of the month to the input `").concat(r,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"))}},11151:(e,t,r)=>{"use strict";function n(e,t){if(t.length1?"s":"")+" required, but only "+t.length+" present")}r.d(t,{A:()=>n})},74715:(e,t,r)=>{"use strict";r.d(t,{A:()=>o});var n=r(99699),a=r(11151);function o(e){(0,a.A)(1,arguments);var t=(0,n.default)(e),r=t.getUTCDay(),o=(r<1?7:0)+r-1;return t.setUTCDate(t.getUTCDate()-o),t.setUTCHours(0,0,0,0),t}},67350:(e,t,r)=>{"use strict";r.d(t,{A:()=>s});var n=r(99699),a=r(11151),o=r(67668),i=r(28458);function s(e,t){var r,s,l,u,c,d,p,f;(0,a.A)(1,arguments);var h=(0,i.q)(),v=(0,o.A)(null!==(r=null!==(s=null!==(l=null!==(u=null===t||void 0===t?void 0:t.weekStartsOn)&&void 0!==u?u:null===t||void 0===t||null===(c=t.locale)||void 0===c||null===(d=c.options)||void 0===d?void 0:d.weekStartsOn)&&void 0!==l?l:h.weekStartsOn)&&void 0!==s?s:null===(p=h.locale)||void 0===p||null===(f=p.options)||void 0===f?void 0:f.weekStartsOn)&&void 0!==r?r:0);if(!(v>=0&&v<=6))throw new RangeError("weekStartsOn must be between 0 and 6 inclusively");var g=(0,n.default)(e),m=g.getUTCDay(),b=(m{"use strict";function n(e){if(null===e||!0===e||!1===e)return NaN;var t=Number(e);return isNaN(t)?t:t<0?Math.ceil(t):Math.floor(t)}r.d(t,{A:()=>n})},7806:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(99699),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,a.default)(e),i=(0,n.A)(t);return isNaN(i)?new Date(NaN):i?(r.setDate(r.getDate()+i),r):r}},92908:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>s});var n=r(67668),a=r(77871),o=r(11151),i=36e5;function s(e,t){(0,o.A)(2,arguments);var r=(0,n.A)(t);return(0,a.A)(e,r*i)}},77871:(e,t,r)=>{"use strict";r.d(t,{A:()=>i});var n=r(67668),a=r(99699),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,a.default)(e).getTime(),i=(0,n.A)(t);return new Date(r+i)}},62658:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>s});var n=r(67668),a=r(77871),o=r(11151),i=6e4;function s(e,t){(0,o.A)(2,arguments);var r=(0,n.A)(t);return(0,a.A)(e,r*i)}},83214:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(99699),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,a.default)(e),i=(0,n.A)(t);if(isNaN(i))return new Date(NaN);if(!i)return r;var s=r.getDate(),l=new Date(r.getTime());return l.setMonth(r.getMonth()+i+1,0),s>=l.getDate()?l:(r.setFullYear(l.getFullYear(),l.getMonth(),s),r)}},58980:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(83214),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=3*(0,n.A)(t);return(0,a.default)(e,r)}},12454:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(7806),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=7*(0,n.A)(t);return(0,a.default)(e,r)}},38733:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(83214),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,n.A)(t);return(0,a.default)(e,12*r)}},4019:(e,t,r)=>{"use strict";r.d(t,{Cg:()=>n,_m:()=>o,s0:()=>a});Math.pow(10,8);var n=6e4,a=36e5,o=1e3},53511:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>s});var n=r(40460),a=r(26876),o=r(11151),i=864e5;function s(e,t){(0,o.A)(2,arguments);var r=(0,a.default)(e),s=(0,a.default)(t),l=r.getTime()-(0,n.A)(r),u=s.getTime()-(0,n.A)(s);return Math.round((l-u)/i)}},38195:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e,t){(0,a.A)(2,arguments);var r=(0,n.default)(e),o=(0,n.default)(t);return 12*(r.getFullYear()-o.getFullYear())+(r.getMonth()-o.getMonth())}},88138:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e,t){(0,a.A)(2,arguments);var r=(0,n.default)(e),o=(0,n.default)(t);return r.getFullYear()-o.getFullYear()}},65068:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){(0,a.A)(1,arguments);var t=(0,n.default)(e);return t.setHours(23,59,59,999),t}},49128:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){(0,a.A)(1,arguments);var t=(0,n.default)(e),r=t.getMonth();return t.setFullYear(t.getFullYear(),r+1,0),t.setHours(23,59,59,999),t}},3494:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>s});var n=r(28458),a=r(99699),o=r(67668),i=r(11151);function s(e,t){var r,s,l,u,c,d,p,f;(0,i.A)(1,arguments);var h=(0,n.q)(),v=(0,o.A)(null!==(r=null!==(s=null!==(l=null!==(u=null===t||void 0===t?void 0:t.weekStartsOn)&&void 0!==u?u:null===t||void 0===t||null===(c=t.locale)||void 0===c||null===(d=c.options)||void 0===d?void 0:d.weekStartsOn)&&void 0!==l?l:h.weekStartsOn)&&void 0!==s?s:null===(p=h.locale)||void 0===p||null===(f=p.options)||void 0===f?void 0:f.weekStartsOn)&&void 0!==r?r:0);if(!(v>=0&&v<=6))throw new RangeError("weekStartsOn must be between 0 and 6 inclusively");var g=(0,a.default)(e),m=g.getDay(),b=6+(m{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){(0,a.A)(1,arguments);var t=(0,n.default)(e),r=t.getFullYear();return t.setFullYear(r+1,0,0),t.setHours(23,59,59,999),t}},56658:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>R});var n=r(92964),a=r(50670),o=r(99699),i=r(11151);var s=r(49717),l=r(76535),u=r(9478),c=r(38888);function d(e,t){for(var r=e<0?"-":"",n=Math.abs(e).toString();n.length0?r:1-r;return d("yy"===t?n%100:n,t.length)},M:function(e,t){var r=e.getUTCMonth();return"M"===t?String(r+1):d(r+1,2)},d:function(e,t){return d(e.getUTCDate(),t.length)},a:function(e,t){var r=e.getUTCHours()/12>=1?"pm":"am";switch(t){case"a":case"aa":return r.toUpperCase();case"aaa":return r;case"aaaaa":return r[0];default:return"am"===r?"a.m.":"p.m."}},h:function(e,t){return d(e.getUTCHours()%12||12,t.length)},H:function(e,t){return d(e.getUTCHours(),t.length)},m:function(e,t){return d(e.getUTCMinutes(),t.length)},s:function(e,t){return d(e.getUTCSeconds(),t.length)},S:function(e,t){var r=t.length,n=e.getUTCMilliseconds();return d(Math.floor(n*Math.pow(10,r-3)),t.length)}};var f="midnight",h="noon",v="morning",g="afternoon",m="evening",b="night";function y(e,t){var r=e>0?"-":"+",n=Math.abs(e),a=Math.floor(n/60),o=n%60;if(0===o)return r+String(a);var i=t||"";return r+String(a)+i+d(o,2)}function w(e,t){return e%60===0?(e>0?"-":"+")+d(Math.abs(e)/60,2):x(e,t)}function x(e,t){var r=t||"",n=e>0?"-":"+",a=Math.abs(e);return n+d(Math.floor(a/60),2)+r+d(a%60,2)}const k={G:function(e,t,r){var n=e.getUTCFullYear()>0?1:0;switch(t){case"G":case"GG":case"GGG":return r.era(n,{width:"abbreviated"});case"GGGGG":return r.era(n,{width:"narrow"});default:return r.era(n,{width:"wide"})}},y:function(e,t,r){if("yo"===t){var n=e.getUTCFullYear(),a=n>0?n:1-n;return r.ordinalNumber(a,{unit:"year"})}return p.y(e,t)},Y:function(e,t,r,n){var a=(0,c.A)(e,n),o=a>0?a:1-a;return"YY"===t?d(o%100,2):"Yo"===t?r.ordinalNumber(o,{unit:"year"}):d(o,t.length)},R:function(e,t){return d((0,l.A)(e),t.length)},u:function(e,t){return d(e.getUTCFullYear(),t.length)},Q:function(e,t,r){var n=Math.ceil((e.getUTCMonth()+1)/3);switch(t){case"Q":return String(n);case"QQ":return d(n,2);case"Qo":return r.ordinalNumber(n,{unit:"quarter"});case"QQQ":return r.quarter(n,{width:"abbreviated",context:"formatting"});case"QQQQQ":return r.quarter(n,{width:"narrow",context:"formatting"});default:return r.quarter(n,{width:"wide",context:"formatting"})}},q:function(e,t,r){var n=Math.ceil((e.getUTCMonth()+1)/3);switch(t){case"q":return String(n);case"qq":return d(n,2);case"qo":return r.ordinalNumber(n,{unit:"quarter"});case"qqq":return r.quarter(n,{width:"abbreviated",context:"standalone"});case"qqqqq":return r.quarter(n,{width:"narrow",context:"standalone"});default:return r.quarter(n,{width:"wide",context:"standalone"})}},M:function(e,t,r){var n=e.getUTCMonth();switch(t){case"M":case"MM":return p.M(e,t);case"Mo":return r.ordinalNumber(n+1,{unit:"month"});case"MMM":return r.month(n,{width:"abbreviated",context:"formatting"});case"MMMMM":return r.month(n,{width:"narrow",context:"formatting"});default:return r.month(n,{width:"wide",context:"formatting"})}},L:function(e,t,r){var n=e.getUTCMonth();switch(t){case"L":return String(n+1);case"LL":return d(n+1,2);case"Lo":return r.ordinalNumber(n+1,{unit:"month"});case"LLL":return r.month(n,{width:"abbreviated",context:"standalone"});case"LLLLL":return r.month(n,{width:"narrow",context:"standalone"});default:return r.month(n,{width:"wide",context:"standalone"})}},w:function(e,t,r,n){var a=(0,u.A)(e,n);return"wo"===t?r.ordinalNumber(a,{unit:"week"}):d(a,t.length)},I:function(e,t,r){var n=(0,s.A)(e);return"Io"===t?r.ordinalNumber(n,{unit:"week"}):d(n,t.length)},d:function(e,t,r){return"do"===t?r.ordinalNumber(e.getUTCDate(),{unit:"date"}):p.d(e,t)},D:function(e,t,r){var n=function(e){(0,i.A)(1,arguments);var t=(0,o.default)(e),r=t.getTime();t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0);var n=r-t.getTime();return Math.floor(n/864e5)+1}(e);return"Do"===t?r.ordinalNumber(n,{unit:"dayOfYear"}):d(n,t.length)},E:function(e,t,r){var n=e.getUTCDay();switch(t){case"E":case"EE":case"EEE":return r.day(n,{width:"abbreviated",context:"formatting"});case"EEEEE":return r.day(n,{width:"narrow",context:"formatting"});case"EEEEEE":return r.day(n,{width:"short",context:"formatting"});default:return r.day(n,{width:"wide",context:"formatting"})}},e:function(e,t,r,n){var a=e.getUTCDay(),o=(a-n.weekStartsOn+8)%7||7;switch(t){case"e":return String(o);case"ee":return d(o,2);case"eo":return r.ordinalNumber(o,{unit:"day"});case"eee":return r.day(a,{width:"abbreviated",context:"formatting"});case"eeeee":return r.day(a,{width:"narrow",context:"formatting"});case"eeeeee":return r.day(a,{width:"short",context:"formatting"});default:return r.day(a,{width:"wide",context:"formatting"})}},c:function(e,t,r,n){var a=e.getUTCDay(),o=(a-n.weekStartsOn+8)%7||7;switch(t){case"c":return String(o);case"cc":return d(o,t.length);case"co":return r.ordinalNumber(o,{unit:"day"});case"ccc":return r.day(a,{width:"abbreviated",context:"standalone"});case"ccccc":return r.day(a,{width:"narrow",context:"standalone"});case"cccccc":return r.day(a,{width:"short",context:"standalone"});default:return r.day(a,{width:"wide",context:"standalone"})}},i:function(e,t,r){var n=e.getUTCDay(),a=0===n?7:n;switch(t){case"i":return String(a);case"ii":return d(a,t.length);case"io":return r.ordinalNumber(a,{unit:"day"});case"iii":return r.day(n,{width:"abbreviated",context:"formatting"});case"iiiii":return r.day(n,{width:"narrow",context:"formatting"});case"iiiiii":return r.day(n,{width:"short",context:"formatting"});default:return r.day(n,{width:"wide",context:"formatting"})}},a:function(e,t,r){var n=e.getUTCHours()/12>=1?"pm":"am";switch(t){case"a":case"aa":return r.dayPeriod(n,{width:"abbreviated",context:"formatting"});case"aaa":return r.dayPeriod(n,{width:"abbreviated",context:"formatting"}).toLowerCase();case"aaaaa":return r.dayPeriod(n,{width:"narrow",context:"formatting"});default:return r.dayPeriod(n,{width:"wide",context:"formatting"})}},b:function(e,t,r){var n,a=e.getUTCHours();switch(n=12===a?h:0===a?f:a/12>=1?"pm":"am",t){case"b":case"bb":return r.dayPeriod(n,{width:"abbreviated",context:"formatting"});case"bbb":return r.dayPeriod(n,{width:"abbreviated",context:"formatting"}).toLowerCase();case"bbbbb":return r.dayPeriod(n,{width:"narrow",context:"formatting"});default:return r.dayPeriod(n,{width:"wide",context:"formatting"})}},B:function(e,t,r){var n,a=e.getUTCHours();switch(n=a>=17?m:a>=12?g:a>=4?v:b,t){case"B":case"BB":case"BBB":return r.dayPeriod(n,{width:"abbreviated",context:"formatting"});case"BBBBB":return r.dayPeriod(n,{width:"narrow",context:"formatting"});default:return r.dayPeriod(n,{width:"wide",context:"formatting"})}},h:function(e,t,r){if("ho"===t){var n=e.getUTCHours()%12;return 0===n&&(n=12),r.ordinalNumber(n,{unit:"hour"})}return p.h(e,t)},H:function(e,t,r){return"Ho"===t?r.ordinalNumber(e.getUTCHours(),{unit:"hour"}):p.H(e,t)},K:function(e,t,r){var n=e.getUTCHours()%12;return"Ko"===t?r.ordinalNumber(n,{unit:"hour"}):d(n,t.length)},k:function(e,t,r){var n=e.getUTCHours();return 0===n&&(n=24),"ko"===t?r.ordinalNumber(n,{unit:"hour"}):d(n,t.length)},m:function(e,t,r){return"mo"===t?r.ordinalNumber(e.getUTCMinutes(),{unit:"minute"}):p.m(e,t)},s:function(e,t,r){return"so"===t?r.ordinalNumber(e.getUTCSeconds(),{unit:"second"}):p.s(e,t)},S:function(e,t){return p.S(e,t)},X:function(e,t,r,n){var a=(n._originalDate||e).getTimezoneOffset();if(0===a)return"Z";switch(t){case"X":return w(a);case"XXXX":case"XX":return x(a);default:return x(a,":")}},x:function(e,t,r,n){var a=(n._originalDate||e).getTimezoneOffset();switch(t){case"x":return w(a);case"xxxx":case"xx":return x(a);default:return x(a,":")}},O:function(e,t,r,n){var a=(n._originalDate||e).getTimezoneOffset();switch(t){case"O":case"OO":case"OOO":return"GMT"+y(a,":");default:return"GMT"+x(a,":")}},z:function(e,t,r,n){var a=(n._originalDate||e).getTimezoneOffset();switch(t){case"z":case"zz":case"zzz":return"GMT"+y(a,":");default:return"GMT"+x(a,":")}},t:function(e,t,r,n){var a=n._originalDate||e;return d(Math.floor(a.getTime()/1e3),t.length)},T:function(e,t,r,n){return d((n._originalDate||e).getTime(),t.length)}};var D=r(49668),C=r(40460),E=r(9581),A=r(67668),S=r(28458),_=r(98090),M=/[yYQqMLwIdDecihHKkms]o|(\w)\1*|''|'(''|[^'])+('|$)|./g,T=/P+p+|P+|p+|''|'(''|[^'])+('|$)|./g,O=/^'([^]*?)'?$/,P=/''/g,N=/[a-zA-Z]/;function R(e,t,r){var s,l,u,c,d,p,f,h,v,g,m,b,y,w,x,R,L,F;(0,i.A)(2,arguments);var Y=String(t),j=(0,S.q)(),B=null!==(s=null!==(l=null===r||void 0===r?void 0:r.locale)&&void 0!==l?l:j.locale)&&void 0!==s?s:_.A,I=(0,A.A)(null!==(u=null!==(c=null!==(d=null!==(p=null===r||void 0===r?void 0:r.firstWeekContainsDate)&&void 0!==p?p:null===r||void 0===r||null===(f=r.locale)||void 0===f||null===(h=f.options)||void 0===h?void 0:h.firstWeekContainsDate)&&void 0!==d?d:j.firstWeekContainsDate)&&void 0!==c?c:null===(v=j.locale)||void 0===v||null===(g=v.options)||void 0===g?void 0:g.firstWeekContainsDate)&&void 0!==u?u:1);if(!(I>=1&&I<=7))throw new RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var H=(0,A.A)(null!==(m=null!==(b=null!==(y=null!==(w=null===r||void 0===r?void 0:r.weekStartsOn)&&void 0!==w?w:null===r||void 0===r||null===(x=r.locale)||void 0===x||null===(R=x.options)||void 0===R?void 0:R.weekStartsOn)&&void 0!==y?y:j.weekStartsOn)&&void 0!==b?b:null===(L=j.locale)||void 0===L||null===(F=L.options)||void 0===F?void 0:F.weekStartsOn)&&void 0!==m?m:0);if(!(H>=0&&H<=6))throw new RangeError("weekStartsOn must be between 0 and 6 inclusively");if(!B.localize)throw new RangeError("locale must contain localize property");if(!B.formatLong)throw new RangeError("locale must contain formatLong property");var W=(0,o.default)(e);if(!(0,n.default)(W))throw new RangeError("Invalid time value");var U=(0,C.A)(W),q=(0,a.A)(W,U),z={firstWeekContainsDate:I,weekStartsOn:H,locale:B,_originalDate:W};return Y.match(T).map((function(e){var t=e[0];return"p"===t||"P"===t?(0,D.A[t])(e,B.formatLong):e})).join("").match(M).map((function(n){if("''"===n)return"'";var a=n[0];if("'"===a)return function(e){var t=e.match(O);if(!t)return e;return t[1].replace(P,"'")}(n);var o=k[a];if(o)return null!==r&&void 0!==r&&r.useAdditionalWeekYearTokens||!(0,E.xM)(n)||(0,E.lJ)(n,t,String(e)),null!==r&&void 0!==r&&r.useAdditionalDayOfYearTokens||!(0,E.ef)(n)||(0,E.lJ)(n,t,String(e)),o(q,n,B.localize,z);if(a.match(N))throw new RangeError("Format string contains an unescaped latin alphabet character `"+a+"`");return n})).join("")}},24234:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){return(0,a.A)(1,arguments),(0,n.default)(e).getDate()}},2928:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){return(0,a.A)(1,arguments),(0,n.default)(e).getDay()}},30161:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){return(0,a.A)(1,arguments),(0,n.default)(e).getHours()}},37066:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>u});var n=r(99699),a=r(15163),o=r(11151);function i(e){return(0,o.A)(1,arguments),(0,a.default)(e,{weekStartsOn:1})}function s(e){(0,o.A)(1,arguments);var t=function(e){(0,o.A)(1,arguments);var t=(0,n.default)(e),r=t.getFullYear(),a=new Date(0);a.setFullYear(r+1,0,4),a.setHours(0,0,0,0);var s=i(a),l=new Date(0);l.setFullYear(r,0,4),l.setHours(0,0,0,0);var u=i(l);return t.getTime()>=s.getTime()?r+1:t.getTime()>=u.getTime()?r:r-1}(e),r=new Date(0);return r.setFullYear(t,0,4),r.setHours(0,0,0,0),i(r)}var l=6048e5;function u(e){(0,o.A)(1,arguments);var t=(0,n.default)(e),r=i(t).getTime()-s(t).getTime();return Math.round(r/l)+1}},89691:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){return(0,a.A)(1,arguments),(0,n.default)(e).getMinutes()}},98308:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){return(0,a.A)(1,arguments),(0,n.default)(e).getMonth()}},17286:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){(0,a.A)(1,arguments);var t=(0,n.default)(e);return Math.floor(t.getMonth()/3)+1}},80187:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){return(0,a.A)(1,arguments),(0,n.default)(e).getSeconds()}},12335:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){return(0,a.A)(1,arguments),(0,n.default)(e).getTime()}},63571:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){return(0,a.A)(1,arguments),(0,n.default)(e).getFullYear()}},65104:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e,t){(0,a.A)(2,arguments);var r=(0,n.default)(e),o=(0,n.default)(t);return r.getTime()>o.getTime()}},15549:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e,t){(0,a.A)(2,arguments);var r=(0,n.default)(e),o=(0,n.default)(t);return r.getTime(){"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(82284),a=r(11151);function o(e){return(0,a.A)(1,arguments),e instanceof Date||"object"===(0,n.A)(e)&&"[object Date]"===Object.prototype.toString.call(e)}},15898:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e,t){(0,a.A)(2,arguments);var r=(0,n.default)(e),o=(0,n.default)(t);return r.getTime()===o.getTime()}},16010:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(26876),a=r(11151);function o(e,t){(0,a.A)(2,arguments);var r=(0,n.default)(e),o=(0,n.default)(t);return r.getTime()===o.getTime()}},13238:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e,t){(0,a.A)(2,arguments);var r=(0,n.default)(e),o=(0,n.default)(t);return r.getFullYear()===o.getFullYear()&&r.getMonth()===o.getMonth()}},68840:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(63501),a=r(11151);function o(e,t){(0,a.A)(2,arguments);var r=(0,n.default)(e),o=(0,n.default)(t);return r.getTime()===o.getTime()}},35269:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e,t){(0,a.A)(2,arguments);var r=(0,n.default)(e),o=(0,n.default)(t);return r.getFullYear()===o.getFullYear()}},92964:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(47586),a=r(99699),o=r(11151);function i(e){if((0,o.A)(1,arguments),!(0,n.default)(e)&&"number"!==typeof e)return!1;var t=(0,a.default)(e);return!isNaN(Number(t))}},58559:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e,t){(0,a.A)(2,arguments);var r=(0,n.default)(e).getTime(),o=(0,n.default)(t.start).getTime(),i=(0,n.default)(t.end).getTime();if(!(o<=i))throw new RangeError("Invalid interval");return r>=o&&r<=i}},50136:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(82284),a=r(99699),o=r(11151);function i(e){var t,r;if((0,o.A)(1,arguments),e&&"function"===typeof e.forEach)t=e;else{if("object"!==(0,n.A)(e)||null===e)return new Date(NaN);t=Array.prototype.slice.call(e)}return t.forEach((function(e){var t=(0,a.default)(e);(void 0===r||r{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(82284),a=r(99699),o=r(11151);function i(e){var t,r;if((0,o.A)(1,arguments),e&&"function"===typeof e.forEach)t=e;else{if("object"!==(0,n.A)(e)||null===e)return new Date(NaN);t=Array.prototype.slice.call(e)}return t.forEach((function(e){var t=(0,a.default)(e);(void 0===r||r>t||isNaN(t.getDate()))&&(r=t)})),r||new Date(NaN)}},71082:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>Ze});var n=r(82284),a=r(27800);function o(e,t){var r="undefined"!==typeof Symbol&&e[Symbol.iterator]||e["@@iterator"];if(!r){if(Array.isArray(e)||(r=(0,a.A)(e))||t&&e&&"number"===typeof e.length){r&&(e=r);var n=0,o=function(){};return{s:o,n:function(){return n>=e.length?{done:!0}:{done:!1,value:e[n++]}},e:function(e){throw e},f:o}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var i,s=!0,l=!1;return{s:function(){r=r.call(e)},n:function(){var e=r.next();return s=e.done,e},e:function(e){l=!0,i=e},f:function(){try{s||null==r.return||r.return()}finally{if(l)throw i}}}}var i=r(98090),s=r(50670),l=r(99699);function u(e,t){if(null==e)throw new TypeError("assign requires that input parameter not be null or undefined");for(var r in t)Object.prototype.hasOwnProperty.call(t,r)&&(e[r]=t[r]);return e}var c=r(49668),d=r(40460),p=r(9581),f=r(67668),h=r(11151),v=r(9417),g=r(85501),m=r(49640),b=r(23029),y=r(92901),w=r(64467),x=function(){function e(){(0,b.A)(this,e),(0,w.A)(this,"priority",void 0),(0,w.A)(this,"subPriority",0)}return(0,y.A)(e,[{key:"validate",value:function(e,t){return!0}}]),e}(),k=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(e,n,a,o,i){var s;return(0,b.A)(this,r),(s=t.call(this)).value=e,s.validateValue=n,s.setValue=a,s.priority=o,i&&(s.subPriority=i),s}return(0,y.A)(r,[{key:"validate",value:function(e,t){return this.validateValue(e,this.value,t)}},{key:"set",value:function(e,t,r){return this.setValue(e,t,this.value,r)}}]),r}(x),D=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o0,a=n?t:1-t;if(a<=50)r=e||100;else{var o=a+50;r=e+100*Math.floor(o/100)-(e>=o%100?100:0)}return n?r:1-r}function oe(e){return e%400===0||e%4===0&&e%100!==0}var ie=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o0}},{key:"set",value:function(e,t,r){var n=e.getUTCFullYear();if(r.isTwoDigitYear){var a=ae(r.year,n);return e.setUTCFullYear(a,0,1),e.setUTCHours(0,0,0,0),e}var o="era"in t&&1!==t.era?1-r.year:r.year;return e.setUTCFullYear(o,0,1),e.setUTCHours(0,0,0,0),e}}]),r}(C),se=r(38888),le=r(67350),ue=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o0}},{key:"set",value:function(e,t,r,n){var a=(0,se.A)(e,n);if(r.isTwoDigitYear){var o=ae(r.year,a);return e.setUTCFullYear(o,0,n.firstWeekContainsDate),e.setUTCHours(0,0,0,0),(0,le.A)(e,n)}var i="era"in t&&1!==t.era?1-r.year:r.year;return e.setUTCFullYear(i,0,n.firstWeekContainsDate),e.setUTCHours(0,0,0,0),(0,le.A)(e,n)}}]),r}(C),ce=r(74715),de=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=1&&t<=4}},{key:"set",value:function(e,t,r){return e.setUTCMonth(3*(r-1),1),e.setUTCHours(0,0,0,0),e}}]),r}(C),he=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=1&&t<=4}},{key:"set",value:function(e,t,r){return e.setUTCMonth(3*(r-1),1),e.setUTCHours(0,0,0,0),e}}]),r}(C),ve=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=0&&t<=11}},{key:"set",value:function(e,t,r){return e.setUTCMonth(r,1),e.setUTCHours(0,0,0,0),e}}]),r}(C),ge=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=0&&t<=11}},{key:"set",value:function(e,t,r){return e.setUTCMonth(r,1),e.setUTCHours(0,0,0,0),e}}]),r}(C),me=r(9478);var be=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=1&&t<=53}},{key:"set",value:function(e,t,r,n){return(0,le.A)(function(e,t,r){(0,h.A)(2,arguments);var n=(0,l.default)(e),a=(0,f.A)(t),o=(0,me.A)(n,r)-a;return n.setUTCDate(n.getUTCDate()-7*o),n}(e,r,n),n)}}]),r}(C),ye=r(49717);var we=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=1&&t<=53}},{key:"set",value:function(e,t,r){return(0,ce.A)(function(e,t){(0,h.A)(2,arguments);var r=(0,l.default)(e),n=(0,f.A)(t),a=(0,ye.A)(r)-n;return r.setUTCDate(r.getUTCDate()-7*a),r}(e,r))}}]),r}(C),xe=[31,28,31,30,31,30,31,31,30,31,30,31],ke=[31,29,31,30,31,30,31,31,30,31,30,31],De=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=1&&t<=ke[n]:t>=1&&t<=xe[n]}},{key:"set",value:function(e,t,r){return e.setUTCDate(r),e.setUTCHours(0,0,0,0),e}}]),r}(C),Ce=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=1&&t<=366:t>=1&&t<=365}},{key:"set",value:function(e,t,r){return e.setUTCMonth(0,r),e.setUTCHours(0,0,0,0),e}}]),r}(C),Ee=r(28458);function Ae(e,t,r){var n,a,o,i,s,u,c,d;(0,h.A)(2,arguments);var p=(0,Ee.q)(),v=(0,f.A)(null!==(n=null!==(a=null!==(o=null!==(i=null===r||void 0===r?void 0:r.weekStartsOn)&&void 0!==i?i:null===r||void 0===r||null===(s=r.locale)||void 0===s||null===(u=s.options)||void 0===u?void 0:u.weekStartsOn)&&void 0!==o?o:p.weekStartsOn)&&void 0!==a?a:null===(c=p.locale)||void 0===c||null===(d=c.options)||void 0===d?void 0:d.weekStartsOn)&&void 0!==n?n:0);if(!(v>=0&&v<=6))throw new RangeError("weekStartsOn must be between 0 and 6 inclusively");var g=(0,l.default)(e),m=(0,f.A)(t),b=((m%7+7)%7=0&&t<=6}},{key:"set",value:function(e,t,r,n){return(e=Ae(e,r,n)).setUTCHours(0,0,0,0),e}}]),r}(C),_e=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=0&&t<=6}},{key:"set",value:function(e,t,r,n){return(e=Ae(e,r,n)).setUTCHours(0,0,0,0),e}}]),r}(C),Me=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=0&&t<=6}},{key:"set",value:function(e,t,r,n){return(e=Ae(e,r,n)).setUTCHours(0,0,0,0),e}}]),r}(C);var Te=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=1&&t<=7}},{key:"set",value:function(e,t,r){return e=function(e,t){(0,h.A)(2,arguments);var r=(0,f.A)(t);r%7===0&&(r-=7);var n=(0,l.default)(e),a=((r%7+7)%7<1?7:0)+r-n.getUTCDay();return n.setUTCDate(n.getUTCDate()+a),n}(e,r),e.setUTCHours(0,0,0,0),e}}]),r}(C),Oe=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=1&&t<=12}},{key:"set",value:function(e,t,r){var n=e.getUTCHours()>=12;return n&&r<12?e.setUTCHours(r+12,0,0,0):n||12!==r?e.setUTCHours(r,0,0,0):e.setUTCHours(0,0,0,0),e}}]),r}(C),Le=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=0&&t<=23}},{key:"set",value:function(e,t,r){return e.setUTCHours(r,0,0,0),e}}]),r}(C),Fe=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=0&&t<=11}},{key:"set",value:function(e,t,r){return e.getUTCHours()>=12&&r<12?e.setUTCHours(r+12,0,0,0):e.setUTCHours(r,0,0,0),e}}]),r}(C),Ye=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=1&&t<=24}},{key:"set",value:function(e,t,r){var n=r<=24?r%24:r;return e.setUTCHours(n,0,0,0),e}}]),r}(C),je=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=0&&t<=59}},{key:"set",value:function(e,t,r){return e.setUTCMinutes(r,0,0),e}}]),r}(C),Be=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=0&&t<=59}},{key:"set",value:function(e,t,r){return e.setUTCSeconds(r,0),e}}]),r}(C),Ie=function(e){(0,g.A)(r,e);var t=(0,m.A)(r);function r(){var e;(0,b.A)(this,r);for(var n=arguments.length,a=new Array(n),o=0;o=1&&j<=7))throw new RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var B=(0,f.A)(null!==(A=null!==(S=null!==(_=null!==(M=null===a||void 0===a?void 0:a.weekStartsOn)&&void 0!==M?M:null===a||void 0===a||null===(T=a.locale)||void 0===T||null===(O=T.options)||void 0===O?void 0:O.weekStartsOn)&&void 0!==_?_:F.weekStartsOn)&&void 0!==S?S:null===(P=F.locale)||void 0===P||null===(N=P.options)||void 0===N?void 0:N.weekStartsOn)&&void 0!==A?A:0);if(!(B>=0&&B<=6))throw new RangeError("weekStartsOn must be between 0 and 6 inclusively");if(""===L)return""===R?(0,l.default)(r):new Date(NaN);var I,H={firstWeekContainsDate:j,weekStartsOn:B,locale:Y},W=[new D],U=L.match(Ke).map((function(e){var t=e[0];return t in c.A?(0,c.A[t])(e,Y.formatLong):e})).join("").match(Qe),q=[],z=o(U);try{var Q=function(){var t=I.value;null!==a&&void 0!==a&&a.useAdditionalWeekYearTokens||!(0,p.xM)(t)||(0,p.lJ)(t,L,e),null!==a&&void 0!==a&&a.useAdditionalDayOfYearTokens||!(0,p.ef)(t)||(0,p.lJ)(t,L,e);var r=t[0],n=ze[r];if(n){var o=n.incompatibleTokens;if(Array.isArray(o)){var i=q.find((function(e){return o.includes(e.token)||e.token===r}));if(i)throw new RangeError("The format string mustn't contain `".concat(i.fullToken,"` and `").concat(t,"` at the same time"))}else if("*"===n.incompatibleTokens&&q.length>0)throw new RangeError("The format string mustn't contain `".concat(t,"` and any other token at the same time"));q.push({token:r,fullToken:t});var s=n.run(R,t,Y.match,H);if(!s)return{v:new Date(NaN)};W.push(s.setter),R=s.rest}else{if(r.match($e))throw new RangeError("Format string contains an unescaped latin alphabet character `"+r+"`");if("''"===t?t="'":"'"===r&&(t=t.match(Ge)[1].replace(Xe,"'")),0!==R.indexOf(t))return{v:new Date(NaN)};R=R.slice(t.length)}};for(z.s();!(I=z.n()).done;){var K=Q();if("object"===(0,n.A)(K))return K.v}}catch(re){z.e(re)}finally{z.f()}if(R.length>0&&Ve.test(R))return new Date(NaN);var G=W.map((function(e){return e.priority})).sort((function(e,t){return t-e})).filter((function(e,t,r){return r.indexOf(e)===t})).map((function(e){return W.filter((function(t){return t.priority===e})).sort((function(e,t){return t.subPriority-e.subPriority}))})).map((function(e){return e[0]})),X=(0,l.default)(r);if(isNaN(X.getTime()))return new Date(NaN);var V,$=(0,s.A)(X,(0,d.A)(X)),Z={},J=o(G);try{for(J.s();!(V=J.n()).done;){var ee=V.value;if(!ee.validate($,H))return new Date(NaN);var te=ee.set($,Z,H);Array.isArray(te)?($=te[0],u(Z,te[1])):$=te}}catch(re){J.e(re)}finally{J.f()}return $}},85670:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(4019),a=r(11151),o=r(67668);function i(e,t){var r;(0,a.A)(1,arguments);var i=(0,o.A)(null!==(r=null===t||void 0===t?void 0:t.additionalDigits)&&void 0!==r?r:2);if(2!==i&&1!==i&&0!==i)throw new RangeError("additionalDigits must be 0, 1 or 2");if("string"!==typeof e&&"[object String]"!==Object.prototype.toString.call(e))return new Date(NaN);var v,g=function(e){var t,r={},n=e.split(s.dateTimeDelimiter);if(n.length>2)return r;/:/.test(n[0])?t=n[0]:(r.date=n[0],t=n[1],s.timeZoneDelimiter.test(r.date)&&(r.date=e.split(s.timeZoneDelimiter)[0],t=e.substr(r.date.length,e.length)));if(t){var a=s.timezone.exec(t);a?(r.time=t.replace(a[1],""),r.timezone=a[1]):r.time=t}return r}(e);if(g.date){var m=function(e,t){var r=new RegExp("^(?:(\\d{4}|[+-]\\d{"+(4+t)+"})|(\\d{2}|[+-]\\d{"+(2+t)+"})$)"),n=e.match(r);if(!n)return{year:NaN,restDateString:""};var a=n[1]?parseInt(n[1]):null,o=n[2]?parseInt(n[2]):null;return{year:null===o?a:100*o,restDateString:e.slice((n[1]||n[2]).length)}}(g.date,i);v=function(e,t){if(null===t)return new Date(NaN);var r=e.match(l);if(!r)return new Date(NaN);var n=!!r[4],a=d(r[1]),o=d(r[2])-1,i=d(r[3]),s=d(r[4]),u=d(r[5])-1;if(n)return function(e,t,r){return t>=1&&t<=53&&r>=0&&r<=6}(0,s,u)?function(e,t,r){var n=new Date(0);n.setUTCFullYear(e,0,4);var a=n.getUTCDay()||7,o=7*(t-1)+r+1-a;return n.setUTCDate(n.getUTCDate()+o),n}(t,s,u):new Date(NaN);var c=new Date(0);return function(e,t,r){return t>=0&&t<=11&&r>=1&&r<=(f[t]||(h(e)?29:28))}(t,o,i)&&function(e,t){return t>=1&&t<=(h(e)?366:365)}(t,a)?(c.setUTCFullYear(t,o,Math.max(a,i)),c):new Date(NaN)}(m.restDateString,m.year)}if(!v||isNaN(v.getTime()))return new Date(NaN);var b,y=v.getTime(),w=0;if(g.time&&(w=function(e){var t=e.match(u);if(!t)return NaN;var r=p(t[1]),a=p(t[2]),o=p(t[3]);if(!function(e,t,r){if(24===e)return 0===t&&0===r;return r>=0&&r<60&&t>=0&&t<60&&e>=0&&e<25}(r,a,o))return NaN;return r*n.s0+a*n.Cg+1e3*o}(g.time),isNaN(w)))return new Date(NaN);if(!g.timezone){var x=new Date(y+w),k=new Date(0);return k.setFullYear(x.getUTCFullYear(),x.getUTCMonth(),x.getUTCDate()),k.setHours(x.getUTCHours(),x.getUTCMinutes(),x.getUTCSeconds(),x.getUTCMilliseconds()),k}return b=function(e){if("Z"===e)return 0;var t=e.match(c);if(!t)return 0;var r="+"===t[1]?-1:1,a=parseInt(t[2]),o=t[3]&&parseInt(t[3])||0;if(!function(e,t){return t>=0&&t<=59}(0,o))return NaN;return r*(a*n.s0+o*n.Cg)}(g.timezone),isNaN(b)?new Date(NaN):new Date(y+w+b)}var s={dateTimeDelimiter:/[T ]/,timeZoneDelimiter:/[Z ]/i,timezone:/([Z+-].*)$/},l=/^-?(?:(\d{3})|(\d{2})(?:-?(\d{2}))?|W(\d{2})(?:-?(\d{1}))?|)$/,u=/^(\d{2}(?:[.,]\d*)?)(?::?(\d{2}(?:[.,]\d*)?))?(?::?(\d{2}(?:[.,]\d*)?))?$/,c=/^([+-])(\d{2})(?::?(\d{2}))?$/;function d(e){return e?parseInt(e):1}function p(e){return e&&parseFloat(e.replace(",","."))||0}var f=[31,null,31,30,31,30,31,31,30,31,30,31];function h(e){return e%400===0||e%4===0&&e%100!==0}},2402:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>l});var n=r(82284),a=r(99699),o=r(49864),i=r(67668),s=r(11151);function l(e,t){if((0,s.A)(2,arguments),"object"!==(0,n.A)(t)||null===t)throw new RangeError("values parameter must be an object");var r=(0,a.default)(e);return isNaN(r.getTime())?new Date(NaN):(null!=t.year&&r.setFullYear(t.year),null!=t.month&&(r=(0,o.default)(r,t.month)),null!=t.date&&r.setDate((0,i.A)(t.date)),null!=t.hours&&r.setHours((0,i.A)(t.hours)),null!=t.minutes&&r.setMinutes((0,i.A)(t.minutes)),null!=t.seconds&&r.setSeconds((0,i.A)(t.seconds)),null!=t.milliseconds&&r.setMilliseconds((0,i.A)(t.milliseconds)),r)}},63701:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(99699),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,a.default)(e),i=(0,n.A)(t);return r.setHours(i),r}},42231:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(99699),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,a.default)(e),i=(0,n.A)(t);return r.setMinutes(i),r}},49864:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(99699),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,a.default)(e),i=(0,n.A)(t),s=r.getFullYear(),l=r.getDate(),u=new Date(0);u.setFullYear(s,i,15),u.setHours(0,0,0,0);var c=function(e){(0,o.A)(1,arguments);var t=(0,a.default)(e),r=t.getFullYear(),n=t.getMonth(),i=new Date(0);return i.setFullYear(r,n+1,0),i.setHours(0,0,0,0),i.getDate()}(u);return r.setMonth(i,Math.min(l,c)),r}},5618:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>s});var n=r(67668),a=r(99699),o=r(49864),i=r(11151);function s(e,t){(0,i.A)(2,arguments);var r=(0,a.default)(e),s=(0,n.A)(t)-(Math.floor(r.getMonth()/3)+1);return(0,o.default)(r,r.getMonth()+3*s)}},78991:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(99699),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,a.default)(e),i=(0,n.A)(t);return r.setSeconds(i),r}},32455:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(99699),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,a.default)(e),i=(0,n.A)(t);return isNaN(r.getTime())?new Date(NaN):(r.setFullYear(i),r)}},26876:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){(0,a.A)(1,arguments);var t=(0,n.default)(e);return t.setHours(0,0,0,0),t}},34631:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){(0,a.A)(1,arguments);var t=(0,n.default)(e);return t.setDate(1),t.setHours(0,0,0,0),t}},63501:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){(0,a.A)(1,arguments);var t=(0,n.default)(e),r=t.getMonth(),o=r-r%3;return t.setMonth(o,1),t.setHours(0,0,0,0),t}},15163:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>s});var n=r(99699),a=r(67668),o=r(11151),i=r(28458);function s(e,t){var r,s,l,u,c,d,p,f;(0,o.A)(1,arguments);var h=(0,i.q)(),v=(0,a.A)(null!==(r=null!==(s=null!==(l=null!==(u=null===t||void 0===t?void 0:t.weekStartsOn)&&void 0!==u?u:null===t||void 0===t||null===(c=t.locale)||void 0===c||null===(d=c.options)||void 0===d?void 0:d.weekStartsOn)&&void 0!==l?l:h.weekStartsOn)&&void 0!==s?s:null===(p=h.locale)||void 0===p||null===(f=p.options)||void 0===f?void 0:f.weekStartsOn)&&void 0!==r?r:0);if(!(v>=0&&v<=6))throw new RangeError("weekStartsOn must be between 0 and 6 inclusively");var g=(0,n.default)(e),m=g.getDay(),b=(m{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(99699),a=r(11151);function o(e){(0,a.A)(1,arguments);var t=(0,n.default)(e),r=new Date(0);return r.setFullYear(t.getFullYear(),0,1),r.setHours(0,0,0,0),r}},10443:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(7806),a=r(11151),o=r(67668);function i(e,t){(0,a.A)(2,arguments);var r=(0,o.A)(t);return(0,n.default)(e,-r)}},50670:(e,t,r)=>{"use strict";r.d(t,{A:()=>i});var n=r(77871),a=r(11151),o=r(67668);function i(e,t){(0,a.A)(2,arguments);var r=(0,o.A)(t);return(0,n.A)(e,-r)}},5231:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(83214),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,n.A)(t);return(0,a.default)(e,-r)}},63801:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(58980),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,n.A)(t);return(0,a.default)(e,-r)}},5377:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(12454),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,n.A)(t);return(0,a.default)(e,-r)}},70486:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(67668),a=r(38733),o=r(11151);function i(e,t){(0,o.A)(2,arguments);var r=(0,n.A)(t);return(0,a.default)(e,-r)}},99699:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(82284),a=r(11151);function o(e){(0,a.A)(1,arguments);var t=Object.prototype.toString.call(e);return e instanceof Date||"object"===(0,n.A)(e)&&"[object Date]"===t?new Date(e.getTime()):"number"===typeof e||"[object Number]"===t?new Date(e):("string"!==typeof e&&"[object String]"!==t||"undefined"===typeof console||(console.warn("Starting with v2.0.0-beta.1 date-fns doesn't accept strings as date arguments. Please use `parseISO` to parse strings. See: https://github.com/date-fns/date-fns/blob/master/docs/upgradeGuide.md#string-arguments"),console.warn((new Error).stack)),new Date(NaN))}},30115:e=>{var t="undefined"!==typeof Element,r="function"===typeof Map,n="function"===typeof Set,a="function"===typeof ArrayBuffer&&!!ArrayBuffer.isView;function o(e,i){if(e===i)return!0;if(e&&i&&"object"==typeof e&&"object"==typeof i){if(e.constructor!==i.constructor)return!1;var s,l,u,c;if(Array.isArray(e)){if((s=e.length)!=i.length)return!1;for(l=s;0!==l--;)if(!o(e[l],i[l]))return!1;return!0}if(r&&e instanceof Map&&i instanceof Map){if(e.size!==i.size)return!1;for(c=e.entries();!(l=c.next()).done;)if(!i.has(l.value[0]))return!1;for(c=e.entries();!(l=c.next()).done;)if(!o(l.value[1],i.get(l.value[0])))return!1;return!0}if(n&&e instanceof Set&&i instanceof Set){if(e.size!==i.size)return!1;for(c=e.entries();!(l=c.next()).done;)if(!i.has(l.value[0]))return!1;return!0}if(a&&ArrayBuffer.isView(e)&&ArrayBuffer.isView(i)){if((s=e.length)!=i.length)return!1;for(l=s;0!==l--;)if(e[l]!==i[l])return!1;return!0}if(e.constructor===RegExp)return e.source===i.source&&e.flags===i.flags;if(e.valueOf!==Object.prototype.valueOf&&"function"===typeof e.valueOf&&"function"===typeof i.valueOf)return e.valueOf()===i.valueOf();if(e.toString!==Object.prototype.toString&&"function"===typeof e.toString&&"function"===typeof i.toString)return e.toString()===i.toString();if((s=(u=Object.keys(e)).length)!==Object.keys(i).length)return!1;for(l=s;0!==l--;)if(!Object.prototype.hasOwnProperty.call(i,u[l]))return!1;if(t&&e instanceof Element)return!1;for(l=s;0!==l--;)if(("_owner"!==u[l]&&"__v"!==u[l]&&"__o"!==u[l]||!e.$$typeof)&&!o(e[u[l]],i[u[l]]))return!1;return!0}return e!==e&&i!==i}e.exports=function(e,t){try{return o(e,t)}catch(r){if((r.message||"").match(/stack|recursion/i))return console.warn("react-fast-compare cannot handle circular refs"),!1;throw r}}},73908:(e,t,r)=>{"use strict";r.r(t),r.d(t,{IGNORE_CLASS_NAME:()=>h,default:()=>g});var n=r(96540),a=r(40961);function o(e,t){return o=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e},o(e,t)}function i(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}function s(e,t,r){return e===t||(e.correspondingElement?e.correspondingElement.classList.contains(r):e.classList.contains(r))}var l,u,c=(void 0===l&&(l=0),function(){return++l}),d={},p={},f=["touchstart","touchmove"],h="ignore-react-onclickoutside";function v(e,t){var r={};return-1!==f.indexOf(t)&&u&&(r.passive=!e.props.preventDefault),r}const g=function(e,t){var r,l,f=e.displayName||e.name||"Component";return l=r=function(r){var l,h;function g(e){var n;return(n=r.call(this,e)||this).__outsideClickHandler=function(e){if("function"!==typeof n.__clickOutsideHandlerProp){var t=n.getInstance();if("function"!==typeof t.props.handleClickOutside){if("function"!==typeof t.handleClickOutside)throw new Error("WrappedComponent: "+f+" lacks a handleClickOutside(event) function for processing outside click events.");t.handleClickOutside(e)}else t.props.handleClickOutside(e)}else n.__clickOutsideHandlerProp(e)},n.__getComponentNode=function(){var e=n.getInstance();return t&&"function"===typeof t.setClickOutsideRef?t.setClickOutsideRef()(e):"function"===typeof e.setClickOutsideRef?e.setClickOutsideRef():(0,a.findDOMNode)(e)},n.enableOnClickOutside=function(){if("undefined"!==typeof document&&!p[n._uid]){"undefined"===typeof u&&(u=function(){if("undefined"!==typeof window&&"function"===typeof window.addEventListener){var e=!1,t=Object.defineProperty({},"passive",{get:function(){e=!0}}),r=function(){};return window.addEventListener("testPassiveEventSupport",r,t),window.removeEventListener("testPassiveEventSupport",r,t),e}}()),p[n._uid]=!0;var e=n.props.eventTypes;e.forEach||(e=[e]),d[n._uid]=function(e){var t;null!==n.componentNode&&(n.props.preventDefault&&e.preventDefault(),n.props.stopPropagation&&e.stopPropagation(),n.props.excludeScrollbar&&(t=e,document.documentElement.clientWidth<=t.clientX||document.documentElement.clientHeight<=t.clientY)||function(e,t,r){if(e===t)return!0;for(;e.parentNode||e.host;){if(e.parentNode&&s(e,t,r))return!0;e=e.parentNode||e.host}return e}(e.composed&&e.composedPath&&e.composedPath().shift()||e.target,n.componentNode,n.props.outsideClickIgnoreClass)===document&&n.__outsideClickHandler(e))},e.forEach((function(e){document.addEventListener(e,d[n._uid],v(i(n),e))}))}},n.disableOnClickOutside=function(){delete p[n._uid];var e=d[n._uid];if(e&&"undefined"!==typeof document){var t=n.props.eventTypes;t.forEach||(t=[t]),t.forEach((function(t){return document.removeEventListener(t,e,v(i(n),t))})),delete d[n._uid]}},n.getRef=function(e){return n.instanceRef=e},n._uid=c(),n}h=r,(l=g).prototype=Object.create(h.prototype),l.prototype.constructor=l,o(l,h);var m=g.prototype;return m.getInstance=function(){if(e.prototype&&!e.prototype.isReactComponent)return this;var t=this.instanceRef;return t.getInstance?t.getInstance():t},m.componentDidMount=function(){if("undefined"!==typeof document&&document.createElement){var e=this.getInstance();if(t&&"function"===typeof t.handleClickOutside&&(this.__clickOutsideHandlerProp=t.handleClickOutside(e),"function"!==typeof this.__clickOutsideHandlerProp))throw new Error("WrappedComponent: "+f+" lacks a function for processing outside click events specified by the handleClickOutside config option.");this.componentNode=this.__getComponentNode(),this.props.disableOnClickOutside||this.enableOnClickOutside()}},m.componentDidUpdate=function(){this.componentNode=this.__getComponentNode()},m.componentWillUnmount=function(){this.disableOnClickOutside()},m.render=function(){var t=this.props;t.excludeScrollbar;var r=function(e,t){if(null==e)return{};var r,n,a={},o=Object.keys(e);for(n=0;n=0||(a[r]=e[r]);return a}(t,["excludeScrollbar"]);return e.prototype&&e.prototype.isReactComponent?r.ref=this.getRef:r.wrappedRef=this.getRef,r.disableOnClickOutside=this.disableOnClickOutside,r.enableOnClickOutside=this.enableOnClickOutside,(0,n.createElement)(e,r)},g}(n.Component),r.displayName="OnClickOutside("+f+")",r.defaultProps={eventTypes:["mousedown","touchstart"],excludeScrollbar:t&&t.excludeScrollbar||!1,outsideClickIgnoreClass:h,preventDefault:!1,stopPropagation:!1},r.getClass=function(){return e.getClass?e.getClass():e},l}},32430:(e,t,r)=>{"use strict";r.r(t),r.d(t,{Manager:()=>i,Popper:()=>Le,Reference:()=>je,usePopper:()=>Oe});var n=r(96540),a=n.createContext(),o=n.createContext();function i(e){var t=e.children,r=n.useState(null),i=r[0],s=r[1],l=n.useRef(!1);n.useEffect((function(){return function(){l.current=!0}}),[]);var u=n.useCallback((function(e){l.current||s(e)}),[]);return n.createElement(a.Provider,{value:i},n.createElement(o.Provider,{value:u},t))}var s=function(e){return Array.isArray(e)?e[0]:e},l=function(e){if("function"===typeof e){for(var t=arguments.length,r=new Array(t>1?t-1:0),n=1;n0&&y(n.width)/e.offsetWidth||1,o=e.offsetHeight>0&&y(n.height)/e.offsetHeight||1);var i=(h(e)?f(e):window).visualViewport,s=!x()&&r,l=(n.left+(s&&i?i.offsetLeft:0))/a,u=(n.top+(s&&i?i.offsetTop:0))/o,c=n.width/a,d=n.height/o;return{width:c,height:d,top:u,right:l+c,bottom:u+d,left:l,x:l,y:u}}function D(e){var t=f(e);return{scrollLeft:t.pageXOffset,scrollTop:t.pageYOffset}}function C(e){return e?(e.nodeName||"").toLowerCase():null}function E(e){return((h(e)?e.ownerDocument:e.document)||window.document).documentElement}function A(e){return k(E(e)).left+D(e).scrollLeft}function S(e){return f(e).getComputedStyle(e)}function _(e){var t=S(e),r=t.overflow,n=t.overflowX,a=t.overflowY;return/auto|scroll|overlay|hidden/.test(r+a+n)}function M(e,t,r){void 0===r&&(r=!1);var n=v(t),a=v(t)&&function(e){var t=e.getBoundingClientRect(),r=y(t.width)/e.offsetWidth||1,n=y(t.height)/e.offsetHeight||1;return 1!==r||1!==n}(t),o=E(t),i=k(e,a,r),s={scrollLeft:0,scrollTop:0},l={x:0,y:0};return(n||!n&&!r)&&(("body"!==C(t)||_(o))&&(s=function(e){return e!==f(e)&&v(e)?{scrollLeft:(t=e).scrollLeft,scrollTop:t.scrollTop}:D(e);var t}(t)),v(t)?((l=k(t,!0)).x+=t.clientLeft,l.y+=t.clientTop):o&&(l.x=A(o))),{x:i.left+s.scrollLeft-l.x,y:i.top+s.scrollTop-l.y,width:i.width,height:i.height}}function T(e){var t=k(e),r=e.offsetWidth,n=e.offsetHeight;return Math.abs(t.width-r)<=1&&(r=t.width),Math.abs(t.height-n)<=1&&(n=t.height),{x:e.offsetLeft,y:e.offsetTop,width:r,height:n}}function O(e){return"html"===C(e)?e:e.assignedSlot||e.parentNode||(g(e)?e.host:null)||E(e)}function P(e){return["html","body","#document"].indexOf(C(e))>=0?e.ownerDocument.body:v(e)&&_(e)?e:P(O(e))}function N(e,t){var r;void 0===t&&(t=[]);var n=P(e),a=n===(null==(r=e.ownerDocument)?void 0:r.body),o=f(n),i=a?[o].concat(o.visualViewport||[],_(n)?n:[]):n,s=t.concat(i);return a?s:s.concat(N(O(i)))}function R(e){return["table","td","th"].indexOf(C(e))>=0}function L(e){return v(e)&&"fixed"!==S(e).position?e.offsetParent:null}function F(e){for(var t=f(e),r=L(e);r&&R(r)&&"static"===S(r).position;)r=L(r);return r&&("html"===C(r)||"body"===C(r)&&"static"===S(r).position)?t:r||function(e){var t=/firefox/i.test(w());if(/Trident/i.test(w())&&v(e)&&"fixed"===S(e).position)return null;var r=O(e);for(g(r)&&(r=r.host);v(r)&&["html","body"].indexOf(C(r))<0;){var n=S(r);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||t&&"filter"===n.willChange||t&&n.filter&&"none"!==n.filter)return r;r=r.parentNode}return null}(e)||t}var Y="top",j="bottom",B="right",I="left",H="auto",W=[Y,j,B,I],U="start",q="end",z="clippingParents",Q="viewport",K="popper",G="reference",X=W.reduce((function(e,t){return e.concat([t+"-"+U,t+"-"+q])}),[]),V=[].concat(W,[H]).reduce((function(e,t){return e.concat([t,t+"-"+U,t+"-"+q])}),[]),$=["beforeRead","read","afterRead","beforeMain","main","afterMain","beforeWrite","write","afterWrite"];function Z(e){var t=new Map,r=new Set,n=[];function a(e){r.add(e.name),[].concat(e.requires||[],e.requiresIfExists||[]).forEach((function(e){if(!r.has(e)){var n=t.get(e);n&&a(n)}})),n.push(e)}return e.forEach((function(e){t.set(e.name,e)})),e.forEach((function(e){r.has(e.name)||a(e)})),n}function J(e){var t;return function(){return t||(t=new Promise((function(r){Promise.resolve().then((function(){t=void 0,r(e())}))}))),t}}var ee={placement:"bottom",modifiers:[],strategy:"absolute"};function te(){for(var e=arguments.length,t=new Array(e),r=0;r=0?"x":"y"}function se(e){var t,r=e.reference,n=e.element,a=e.placement,o=a?ae(a):null,i=a?oe(a):null,s=r.x+r.width/2-n.width/2,l=r.y+r.height/2-n.height/2;switch(o){case Y:t={x:s,y:r.y-n.height};break;case j:t={x:s,y:r.y+r.height};break;case B:t={x:r.x+r.width,y:l};break;case I:t={x:r.x-n.width,y:l};break;default:t={x:r.x,y:r.y}}var u=o?ie(o):null;if(null!=u){var c="y"===u?"height":"width";switch(i){case U:t[u]=t[u]-(r[c]/2-n[c]/2);break;case q:t[u]=t[u]+(r[c]/2-n[c]/2)}}return t}var le={top:"auto",right:"auto",bottom:"auto",left:"auto"};function ue(e){var t,r=e.popper,n=e.popperRect,a=e.placement,o=e.variation,i=e.offsets,s=e.position,l=e.gpuAcceleration,u=e.adaptive,c=e.roundOffsets,d=e.isFixed,p=i.x,h=void 0===p?0:p,v=i.y,g=void 0===v?0:v,m="function"===typeof c?c({x:h,y:g}):{x:h,y:g};h=m.x,g=m.y;var b=i.hasOwnProperty("x"),w=i.hasOwnProperty("y"),x=I,k=Y,D=window;if(u){var C=F(r),A="clientHeight",_="clientWidth";if(C===f(r)&&"static"!==S(C=E(r)).position&&"absolute"===s&&(A="scrollHeight",_="scrollWidth"),a===Y||(a===I||a===B)&&o===q)k=j,g-=(d&&C===D&&D.visualViewport?D.visualViewport.height:C[A])-n.height,g*=l?1:-1;if(a===I||(a===Y||a===j)&&o===q)x=B,h-=(d&&C===D&&D.visualViewport?D.visualViewport.width:C[_])-n.width,h*=l?1:-1}var M,T=Object.assign({position:s},u&&le),O=!0===c?function(e,t){var r=e.x,n=e.y,a=t.devicePixelRatio||1;return{x:y(r*a)/a||0,y:y(n*a)/a||0}}({x:h,y:g},f(r)):{x:h,y:g};return h=O.x,g=O.y,l?Object.assign({},T,((M={})[k]=w?"0":"",M[x]=b?"0":"",M.transform=(D.devicePixelRatio||1)<=1?"translate("+h+"px, "+g+"px)":"translate3d("+h+"px, "+g+"px, 0)",M)):Object.assign({},T,((t={})[k]=w?g+"px":"",t[x]=b?h+"px":"",t.transform="",t))}const ce={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(e){var t=e.state,r=e.options,n=e.name,a=r.offset,o=void 0===a?[0,0]:a,i=V.reduce((function(e,r){return e[r]=function(e,t,r){var n=ae(e),a=[I,Y].indexOf(n)>=0?-1:1,o="function"===typeof r?r(Object.assign({},t,{placement:e})):r,i=o[0],s=o[1];return i=i||0,s=(s||0)*a,[I,B].indexOf(n)>=0?{x:s,y:i}:{x:i,y:s}}(r,t.rects,o),e}),{}),s=i[t.placement],l=s.x,u=s.y;null!=t.modifiersData.popperOffsets&&(t.modifiersData.popperOffsets.x+=l,t.modifiersData.popperOffsets.y+=u),t.modifiersData[n]=i}};var de={left:"right",right:"left",bottom:"top",top:"bottom"};function pe(e){return e.replace(/left|right|bottom|top/g,(function(e){return de[e]}))}var fe={start:"end",end:"start"};function he(e){return e.replace(/start|end/g,(function(e){return fe[e]}))}function ve(e,t){var r=t.getRootNode&&t.getRootNode();if(e.contains(t))return!0;if(r&&g(r)){var n=t;do{if(n&&e.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function ge(e){return Object.assign({},e,{left:e.x,top:e.y,right:e.x+e.width,bottom:e.y+e.height})}function me(e,t,r){return t===Q?ge(function(e,t){var r=f(e),n=E(e),a=r.visualViewport,o=n.clientWidth,i=n.clientHeight,s=0,l=0;if(a){o=a.width,i=a.height;var u=x();(u||!u&&"fixed"===t)&&(s=a.offsetLeft,l=a.offsetTop)}return{width:o,height:i,x:s+A(e),y:l}}(e,r)):h(t)?function(e,t){var r=k(e,!1,"fixed"===t);return r.top=r.top+e.clientTop,r.left=r.left+e.clientLeft,r.bottom=r.top+e.clientHeight,r.right=r.left+e.clientWidth,r.width=e.clientWidth,r.height=e.clientHeight,r.x=r.left,r.y=r.top,r}(t,r):ge(function(e){var t,r=E(e),n=D(e),a=null==(t=e.ownerDocument)?void 0:t.body,o=m(r.scrollWidth,r.clientWidth,a?a.scrollWidth:0,a?a.clientWidth:0),i=m(r.scrollHeight,r.clientHeight,a?a.scrollHeight:0,a?a.clientHeight:0),s=-n.scrollLeft+A(e),l=-n.scrollTop;return"rtl"===S(a||r).direction&&(s+=m(r.clientWidth,a?a.clientWidth:0)-o),{width:o,height:i,x:s,y:l}}(E(e)))}function be(e,t,r,n){var a="clippingParents"===t?function(e){var t=N(O(e)),r=["absolute","fixed"].indexOf(S(e).position)>=0&&v(e)?F(e):e;return h(r)?t.filter((function(e){return h(e)&&ve(e,r)&&"body"!==C(e)})):[]}(e):[].concat(t),o=[].concat(a,[r]),i=o[0],s=o.reduce((function(t,r){var a=me(e,r,n);return t.top=m(a.top,t.top),t.right=b(a.right,t.right),t.bottom=b(a.bottom,t.bottom),t.left=m(a.left,t.left),t}),me(e,i,n));return s.width=s.right-s.left,s.height=s.bottom-s.top,s.x=s.left,s.y=s.top,s}function ye(e){return Object.assign({},{top:0,right:0,bottom:0,left:0},e)}function we(e,t){return t.reduce((function(t,r){return t[r]=e,t}),{})}function xe(e,t){void 0===t&&(t={});var r=t,n=r.placement,a=void 0===n?e.placement:n,o=r.strategy,i=void 0===o?e.strategy:o,s=r.boundary,l=void 0===s?z:s,u=r.rootBoundary,c=void 0===u?Q:u,d=r.elementContext,p=void 0===d?K:d,f=r.altBoundary,v=void 0!==f&&f,g=r.padding,m=void 0===g?0:g,b=ye("number"!==typeof m?m:we(m,W)),y=p===K?G:K,w=e.rects.popper,x=e.elements[v?y:p],D=be(h(x)?x:x.contextElement||E(e.elements.popper),l,c,i),C=k(e.elements.reference),A=se({reference:C,element:w,strategy:"absolute",placement:a}),S=ge(Object.assign({},w,A)),_=p===K?S:C,M={top:D.top-_.top+b.top,bottom:_.bottom-D.bottom+b.bottom,left:D.left-_.left+b.left,right:_.right-D.right+b.right},T=e.modifiersData.offset;if(p===K&&T){var O=T[a];Object.keys(M).forEach((function(e){var t=[B,j].indexOf(e)>=0?1:-1,r=[Y,j].indexOf(e)>=0?"y":"x";M[e]+=O[r]*t}))}return M}function ke(e,t,r){return m(e,b(t,r))}const De={name:"preventOverflow",enabled:!0,phase:"main",fn:function(e){var t=e.state,r=e.options,n=e.name,a=r.mainAxis,o=void 0===a||a,i=r.altAxis,s=void 0!==i&&i,l=r.boundary,u=r.rootBoundary,c=r.altBoundary,d=r.padding,p=r.tether,f=void 0===p||p,h=r.tetherOffset,v=void 0===h?0:h,g=xe(t,{boundary:l,rootBoundary:u,padding:d,altBoundary:c}),y=ae(t.placement),w=oe(t.placement),x=!w,k=ie(y),D="x"===k?"y":"x",C=t.modifiersData.popperOffsets,E=t.rects.reference,A=t.rects.popper,S="function"===typeof v?v(Object.assign({},t.rects,{placement:t.placement})):v,_="number"===typeof S?{mainAxis:S,altAxis:S}:Object.assign({mainAxis:0,altAxis:0},S),M=t.modifiersData.offset?t.modifiersData.offset[t.placement]:null,O={x:0,y:0};if(C){if(o){var P,N="y"===k?Y:I,R="y"===k?j:B,L="y"===k?"height":"width",H=C[k],W=H+g[N],q=H-g[R],z=f?-A[L]/2:0,Q=w===U?E[L]:A[L],K=w===U?-A[L]:-E[L],G=t.elements.arrow,X=f&&G?T(G):{width:0,height:0},V=t.modifiersData["arrow#persistent"]?t.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},$=V[N],Z=V[R],J=ke(0,E[L],X[L]),ee=x?E[L]/2-z-J-$-_.mainAxis:Q-J-$-_.mainAxis,te=x?-E[L]/2+z+J+Z+_.mainAxis:K+J+Z+_.mainAxis,re=t.elements.arrow&&F(t.elements.arrow),ne=re?"y"===k?re.clientTop||0:re.clientLeft||0:0,se=null!=(P=null==M?void 0:M[k])?P:0,le=H+te-se,ue=ke(f?b(W,H+ee-se-ne):W,H,f?m(q,le):q);C[k]=ue,O[k]=ue-H}if(s){var ce,de="x"===k?Y:I,pe="x"===k?j:B,fe=C[D],he="y"===D?"height":"width",ve=fe+g[de],ge=fe-g[pe],me=-1!==[Y,I].indexOf(y),be=null!=(ce=null==M?void 0:M[D])?ce:0,ye=me?ve:fe-E[he]-A[he]-be+_.altAxis,we=me?fe+E[he]+A[he]-be-_.altAxis:ge,De=f&&me?function(e,t,r){var n=ke(e,t,r);return n>r?r:n}(ye,fe,we):ke(f?ye:ve,fe,f?we:ge);C[D]=De,O[D]=De-fe}t.modifiersData[n]=O}},requiresIfExists:["offset"]};const Ce={name:"arrow",enabled:!0,phase:"main",fn:function(e){var t,r=e.state,n=e.name,a=e.options,o=r.elements.arrow,i=r.modifiersData.popperOffsets,s=ae(r.placement),l=ie(s),u=[I,B].indexOf(s)>=0?"height":"width";if(o&&i){var c=function(e,t){return ye("number"!==typeof(e="function"===typeof e?e(Object.assign({},t.rects,{placement:t.placement})):e)?e:we(e,W))}(a.padding,r),d=T(o),p="y"===l?Y:I,f="y"===l?j:B,h=r.rects.reference[u]+r.rects.reference[l]-i[l]-r.rects.popper[u],v=i[l]-r.rects.reference[l],g=F(o),m=g?"y"===l?g.clientHeight||0:g.clientWidth||0:0,b=h/2-v/2,y=c[p],w=m-d[u]-c[f],x=m/2-d[u]/2+b,k=ke(y,x,w),D=l;r.modifiersData[n]=((t={})[D]=k,t.centerOffset=k-x,t)}},effect:function(e){var t=e.state,r=e.options.element,n=void 0===r?"[data-popper-arrow]":r;null!=n&&("string"!==typeof n||(n=t.elements.popper.querySelector(n)))&&ve(t.elements.popper,n)&&(t.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function Ee(e,t,r){return void 0===r&&(r={x:0,y:0}),{top:e.top-t.height-r.y,right:e.right-t.width+r.x,bottom:e.bottom-t.height+r.y,left:e.left-t.width-r.x}}function Ae(e){return[Y,B,j,I].some((function(t){return e[t]>=0}))}var Se=re({defaultModifiers:[{name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(e){var t=e.state,r=e.instance,n=e.options,a=n.scroll,o=void 0===a||a,i=n.resize,s=void 0===i||i,l=f(t.elements.popper),u=[].concat(t.scrollParents.reference,t.scrollParents.popper);return o&&u.forEach((function(e){e.addEventListener("scroll",r.update,ne)})),s&&l.addEventListener("resize",r.update,ne),function(){o&&u.forEach((function(e){e.removeEventListener("scroll",r.update,ne)})),s&&l.removeEventListener("resize",r.update,ne)}},data:{}},{name:"popperOffsets",enabled:!0,phase:"read",fn:function(e){var t=e.state,r=e.name;t.modifiersData[r]=se({reference:t.rects.reference,element:t.rects.popper,strategy:"absolute",placement:t.placement})},data:{}},{name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(e){var t=e.state,r=e.options,n=r.gpuAcceleration,a=void 0===n||n,o=r.adaptive,i=void 0===o||o,s=r.roundOffsets,l=void 0===s||s,u={placement:ae(t.placement),variation:oe(t.placement),popper:t.elements.popper,popperRect:t.rects.popper,gpuAcceleration:a,isFixed:"fixed"===t.options.strategy};null!=t.modifiersData.popperOffsets&&(t.styles.popper=Object.assign({},t.styles.popper,ue(Object.assign({},u,{offsets:t.modifiersData.popperOffsets,position:t.options.strategy,adaptive:i,roundOffsets:l})))),null!=t.modifiersData.arrow&&(t.styles.arrow=Object.assign({},t.styles.arrow,ue(Object.assign({},u,{offsets:t.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),t.attributes.popper=Object.assign({},t.attributes.popper,{"data-popper-placement":t.placement})},data:{}},{name:"applyStyles",enabled:!0,phase:"write",fn:function(e){var t=e.state;Object.keys(t.elements).forEach((function(e){var r=t.styles[e]||{},n=t.attributes[e]||{},a=t.elements[e];v(a)&&C(a)&&(Object.assign(a.style,r),Object.keys(n).forEach((function(e){var t=n[e];!1===t?a.removeAttribute(e):a.setAttribute(e,!0===t?"":t)})))}))},effect:function(e){var t=e.state,r={popper:{position:t.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(t.elements.popper.style,r.popper),t.styles=r,t.elements.arrow&&Object.assign(t.elements.arrow.style,r.arrow),function(){Object.keys(t.elements).forEach((function(e){var n=t.elements[e],a=t.attributes[e]||{},o=Object.keys(t.styles.hasOwnProperty(e)?t.styles[e]:r[e]).reduce((function(e,t){return e[t]="",e}),{});v(n)&&C(n)&&(Object.assign(n.style,o),Object.keys(a).forEach((function(e){n.removeAttribute(e)})))}))}},requires:["computeStyles"]},ce,{name:"flip",enabled:!0,phase:"main",fn:function(e){var t=e.state,r=e.options,n=e.name;if(!t.modifiersData[n]._skip){for(var a=r.mainAxis,o=void 0===a||a,i=r.altAxis,s=void 0===i||i,l=r.fallbackPlacements,u=r.padding,c=r.boundary,d=r.rootBoundary,p=r.altBoundary,f=r.flipVariations,h=void 0===f||f,v=r.allowedAutoPlacements,g=t.options.placement,m=ae(g),b=l||(m===g||!h?[pe(g)]:function(e){if(ae(e)===H)return[];var t=pe(e);return[he(e),t,he(t)]}(g)),y=[g].concat(b).reduce((function(e,r){return e.concat(ae(r)===H?function(e,t){void 0===t&&(t={});var r=t,n=r.placement,a=r.boundary,o=r.rootBoundary,i=r.padding,s=r.flipVariations,l=r.allowedAutoPlacements,u=void 0===l?V:l,c=oe(n),d=c?s?X:X.filter((function(e){return oe(e)===c})):W,p=d.filter((function(e){return u.indexOf(e)>=0}));0===p.length&&(p=d);var f=p.reduce((function(t,r){return t[r]=xe(e,{placement:r,boundary:a,rootBoundary:o,padding:i})[ae(r)],t}),{});return Object.keys(f).sort((function(e,t){return f[e]-f[t]}))}(t,{placement:r,boundary:c,rootBoundary:d,padding:u,flipVariations:h,allowedAutoPlacements:v}):r)}),[]),w=t.rects.reference,x=t.rects.popper,k=new Map,D=!0,C=y[0],E=0;E=0,T=M?"width":"height",O=xe(t,{placement:A,boundary:c,rootBoundary:d,altBoundary:p,padding:u}),P=M?_?B:I:_?j:Y;w[T]>x[T]&&(P=pe(P));var N=pe(P),R=[];if(o&&R.push(O[S]<=0),s&&R.push(O[P]<=0,O[N]<=0),R.every((function(e){return e}))){C=A,D=!1;break}k.set(A,R)}if(D)for(var L=function(e){var t=y.find((function(t){var r=k.get(t);if(r)return r.slice(0,e).every((function(e){return e}))}));if(t)return C=t,"break"},F=h?3:1;F>0;F--){if("break"===L(F))break}t.placement!==C&&(t.modifiersData[n]._skip=!0,t.placement=C,t.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}},De,Ce,{name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(e){var t=e.state,r=e.name,n=t.rects.reference,a=t.rects.popper,o=t.modifiersData.preventOverflow,i=xe(t,{elementContext:"reference"}),s=xe(t,{altBoundary:!0}),l=Ee(i,n),u=Ee(s,a,o),c=Ae(l),d=Ae(u);t.modifiersData[r]={referenceClippingOffsets:l,popperEscapeOffsets:u,isReferenceHidden:c,hasPopperEscaped:d},t.attributes.popper=Object.assign({},t.attributes.popper,{"data-popper-reference-hidden":c,"data-popper-escaped":d})}}]}),_e=r(30115),Me=r.n(_e),Te=[],Oe=function(e,t,r){void 0===r&&(r={});var a=n.useRef(null),o={onFirstUpdate:r.onFirstUpdate,placement:r.placement||"bottom",strategy:r.strategy||"absolute",modifiers:r.modifiers||Te},i=n.useState({styles:{popper:{position:o.strategy,left:"0",top:"0"},arrow:{position:"absolute"}},attributes:{}}),s=i[0],l=i[1],u=n.useMemo((function(){return{name:"updateState",enabled:!0,phase:"write",fn:function(e){var t=e.state,r=Object.keys(t.elements);p.flushSync((function(){l({styles:c(r.map((function(e){return[e,t.styles[e]||{}]}))),attributes:c(r.map((function(e){return[e,t.attributes[e]]})))})}))},requires:["computeStyles"]}}),[]),f=n.useMemo((function(){var e={onFirstUpdate:o.onFirstUpdate,placement:o.placement,strategy:o.strategy,modifiers:[].concat(o.modifiers,[u,{name:"applyStyles",enabled:!1}])};return Me()(a.current,e)?a.current||e:(a.current=e,e)}),[o.onFirstUpdate,o.placement,o.strategy,o.modifiers,u]),h=n.useRef();return d((function(){h.current&&h.current.setOptions(f)}),[f]),d((function(){if(null!=e&&null!=t){var n=(r.createPopper||Se)(e,t,f);return h.current=n,function(){n.destroy(),h.current=null}}}),[e,t,r.createPopper]),{state:h.current?h.current.state:null,styles:s.styles,attributes:s.attributes,update:h.current?h.current.update:null,forceUpdate:h.current?h.current.forceUpdate:null}},Pe=function(){},Ne=function(){return Promise.resolve(null)},Re=[];function Le(e){var t=e.placement,r=void 0===t?"bottom":t,o=e.strategy,i=void 0===o?"absolute":o,l=e.modifiers,c=void 0===l?Re:l,d=e.referenceElement,p=e.onFirstUpdate,f=e.innerRef,h=e.children,v=n.useContext(a),g=n.useState(null),m=g[0],b=g[1],y=n.useState(null),w=y[0],x=y[1];n.useEffect((function(){u(f,m)}),[f,m]);var k=n.useMemo((function(){return{placement:r,strategy:i,onFirstUpdate:p,modifiers:[].concat(c,[{name:"arrow",enabled:null!=w,options:{element:w}}])}}),[r,i,p,c,w]),D=Oe(d||v,m,k),C=D.state,E=D.styles,A=D.forceUpdate,S=D.update,_=n.useMemo((function(){return{ref:b,style:E.popper,placement:C?C.placement:r,hasPopperEscaped:C&&C.modifiersData.hide?C.modifiersData.hide.hasPopperEscaped:null,isReferenceHidden:C&&C.modifiersData.hide?C.modifiersData.hide.isReferenceHidden:null,arrowProps:{style:E.arrow,ref:x},forceUpdate:A||Pe,update:S||Ne}}),[b,x,r,C,E,S,A]);return s(h)(_)}var Fe=r(9771),Ye=r.n(Fe);function je(e){var t=e.children,r=e.innerRef,a=n.useContext(o),i=n.useCallback((function(e){u(r,e),l(a,e)}),[r,a]);return n.useEffect((function(){return function(){return u(r,null)}}),[]),n.useEffect((function(){Ye()(Boolean(a),"`Reference` should not be used outside of a `Manager` component.")}),[a]),s(t)({ref:i})}},41853:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=Object.assign||function(e){for(var t=1;t=0||Object.prototype.hasOwnProperty.call(e,n)&&(r[n]=e[n]);return r}(e,["bgColor","bgD","fgD","fgColor","size","title","viewBoxSize"]);return i.default.createElement("svg",n({},d,{height:l,ref:t,viewBox:"0 0 "+c+" "+c,width:l}),u?i.default.createElement("title",null,u):null,i.default.createElement("path",{d:a,fill:r}),i.default.createElement("path",{d:o,fill:s}))}));c.displayName="QRCodeSvg",c.propTypes=l,c.defaultProps=u,t.default=c},194:(e,t,r)=>{"use strict";var n=Object.assign||function(e){for(var t=1;t=0||Object.prototype.hasOwnProperty.call(e,n)&&(r[n]=e[n]);return r}(e,["bgColor","fgColor","level","size","value"]),f=new a.default(-1,o.default[s]);f.addData(d),f.make();var h=f.modules;return l.default.createElement(u.default,n({},p,{bgColor:r,bgD:h.map((function(e,t){return e.map((function(e,r){return e?"":"M "+r+" "+t+" l 1 0 0 1 -1 0 Z"})).join(" ")})).join(" "),fgColor:i,fgD:h.map((function(e,t){return e.map((function(e,r){return e?"M "+r+" "+t+" l 1 0 0 1 -1 0 Z":""})).join(" ")})).join(" "),ref:t,size:c,viewBoxSize:h.length}))}));p.displayName="QRCode",p.propTypes=d,p.defaultProps={bgColor:"#FFFFFF",fgColor:"#000000",level:"L",size:256},t.Ay=p},26892:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.autoprefix=void 0;var n,a=r(33215),o=(n=a)&&n.__esModule?n:{default:n},i=Object.assign||function(e){for(var t=1;t{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.active=void 0;var n,a=Object.assign||function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:"span";return function(r){function n(){var r,o,l;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,n);for(var u=arguments.length,c=Array(u),d=0;d{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.hover=void 0;var n,a=Object.assign||function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:"span";return function(r){function n(){var r,o,l;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,n);for(var u=arguments.length,c=Array(u),d=0;d{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.flattenNames=void 0;var n=s(r(85015)),a=s(r(33215)),o=s(r(11331)),i=s(r(55378));function s(e){return e&&e.__esModule?e:{default:e}}var l=t.flattenNames=function e(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],r=[];return(0,i.default)(t,(function(t){Array.isArray(t)?e(t).map((function(e){return r.push(e)})):(0,o.default)(t)?(0,a.default)(t,(function(e,t){!0===e&&r.push(t),r.push(t+"-"+e)})):(0,n.default)(t)&&r.push(t)})),r};t.default=l},58527:(e,t,r)=>{"use strict";t.H8=void 0;var n=u(r(99265)),a=u(r(76203)),o=u(r(26892)),i=u(r(16686)),s=u(r(75268)),l=u(r(62693));function u(e){return e&&e.__esModule?e:{default:e}}i.default,t.H8=i.default,s.default,l.default;var c=function(e){for(var t=arguments.length,r=Array(t>1?t-1:0),i=1;i{"use strict";Object.defineProperty(t,"__esModule",{value:!0});t.default=function(e,t){var r={},n=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];r[e]=t};return 0===e&&n("first-child"),e===t-1&&n("last-child"),(0===e||e%2===0)&&n("even"),1===Math.abs(e%2)&&n("odd"),n("nth-child",e),r}},76203:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.mergeClasses=void 0;var n=i(r(33215)),a=i(r(88055)),o=Object.assign||function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:[],r=e.default&&(0,a.default)(e.default)||{};return t.map((function(t){var a=e[t];return a&&(0,n.default)(a,(function(e,t){r[t]||(r[t]={}),r[t]=o({},r[t],a[t])})),t})),r};t.default=s},9771:e=>{"use strict";var t=function(){};e.exports=t},46942:(e,t)=>{var r;!function(){"use strict";var n={}.hasOwnProperty;function a(){for(var e="",t=0;t{"use strict";var n=r(20034),a=r(44576),o=r(78227)("match");e.exports=function(e){var t;return n(e)&&(void 0!==(t=e[o])?!!t:"RegExp"===a(e))}},61034:(e,t,r)=>{"use strict";var n=r(69565),a=r(39297),o=r(1625),i=r(67979),s=RegExp.prototype;e.exports=function(e){var t=e.flags;return void 0!==t||"flags"in s||a(e,"flags")||!o(s,e)?t:n(i,e)}},93514:(e,t,r)=>{"use strict";r(6469)("flat")},79978:(e,t,r)=>{"use strict";var n=r(46518),a=r(69565),o=r(79504),i=r(67750),s=r(94901),l=r(64117),u=r(60788),c=r(655),d=r(55966),p=r(61034),f=r(2478),h=r(78227),v=r(96395),g=h("replace"),m=TypeError,b=o("".indexOf),y=o("".replace),w=o("".slice),x=Math.max;n({target:"String",proto:!0},{replaceAll:function(e,t){var r,n,o,h,k,D,C,E,A,S=i(this),_=0,M=0,T="";if(!l(e)){if((r=u(e))&&(n=c(i(p(e))),!~b(n,"g")))throw new m("`.replaceAll` does not allow non-global regexes");if(o=d(e,g))return a(o,e,S,t);if(v&&r)return y(c(S),e,t)}for(h=c(S),k=c(e),(D=s(t))||(t=c(t)),C=k.length,E=x(1,C),_=b(h,k);-1!==_;)A=D?c(t(k,_,h)):f(k,h,_,[],void 0,t),T+=w(h,M,_)+A,M=_+C,_=_+E>h.length?-1:b(h,k,_+E);return M{"use strict";var n=r(46518),a=r(90679),o=r(42787),i=r(66699),s=r(39297),l=r(78227),u=r(53982),c=r(96395),d=l("toStringTag"),p=TypeError,f=function(){if(a(this,u),o(this)===u)throw new p("Abstract class AsyncIterator not directly constructable")};f.prototype=u,s(u,d)||i(u,d,"AsyncIterator"),!c&&s(u,"constructor")&&u.constructor!==Object||i(u,"constructor",f),n({global:!0,constructor:!0,forced:c},{AsyncIterator:f})},86994:(e,t,r)=>{"use strict";var n=r(46518),a=r(36639).toArray;n({target:"AsyncIterator",proto:!0,real:!0},{toArray:function(){return a(this,void 0,[])}})},41795:(e,t,r)=>{"use strict";var n=r(46518),a=r(28551),o=r(72652),i=r(1767),s=[].push;n({target:"Iterator",proto:!0,real:!0},{toArray:function(){var e=[];return o(i(a(this)),s,{that:e,IS_RECORD:!0}),e}})},14123:(e,t,r)=>{"use strict";function n(e){return t=>{const r=(e?Math[e]:Math.trunc)(t);return 0===r?0:r}}r.d(t,{u:()=>n})},82526:(e,t,r)=>{"use strict";r.d(t,{W:()=>s});var n=r(13999),a=r(4883),o=r(35840),i=r(2642);function s(e,t){const{years:r=0,months:s=0,weeks:l=0,days:u=0,hours:c=0,minutes:d=0,seconds:p=0}=t,f=(0,i.a)(e),h=s||r?(0,a.P)(f,s+12*r):f,v=u||l?(0,n.f)(h,u+7*l):h,g=1e3*(p+60*(d+60*c));return(0,o.w)(e,v.getTime()+g)}},13999:(e,t,r)=>{"use strict";r.d(t,{f:()=>o});var n=r(2642),a=r(35840);function o(e,t){const r=(0,n.a)(e);return isNaN(t)?(0,a.w)(e,NaN):t?(r.setDate(r.getDate()+t),r):r}},4883:(e,t,r)=>{"use strict";r.d(t,{P:()=>o});var n=r(2642),a=r(35840);function o(e,t){const r=(0,n.a)(e);if(isNaN(t))return(0,a.w)(e,NaN);if(!t)return r;const o=r.getDate(),i=(0,a.w)(e,r.getTime());i.setMonth(r.getMonth()+t+1,0);return o>=i.getDate()?i:(r.setFullYear(i.getFullYear(),i.getMonth(),o),r)}},25733:(e,t,r)=>{"use strict";r.d(t,{z:()=>a});var n=r(2642);function a(e,t){const r=(0,n.a)(e),a=(0,n.a)(t),o=r.getTime()-a.getTime();return o<0?-1:o>0?1:o}},17764:(e,t,r)=>{"use strict";r.d(t,{c:()=>o});var n=r(20514),a=r(2642);function o(e,t){const r=(0,a.a)(e),o=(0,a.a)(t),s=i(r,o),l=Math.abs((0,n.m)(r,o));r.setDate(r.getDate()-s*l);const u=s*(l-Number(i(r,o)===-s));return 0===u?0:u}function i(e,t){const r=e.getFullYear()-t.getFullYear()||e.getMonth()-t.getMonth()||e.getDate()-t.getDate()||e.getHours()-t.getHours()||e.getMinutes()-t.getMinutes()||e.getSeconds()-t.getSeconds()||e.getMilliseconds()-t.getMilliseconds();return r<0?-1:r>0?1:r}},50502:(e,t,r)=>{"use strict";r.d(t,{M:()=>i});var n=r(14123),a=r(2940),o=r(77275);function i(e,t,r){const i=(0,o.b)(e,t)/a.s0;return(0,n.u)(r?.roundingMethod)(i)}},77275:(e,t,r)=>{"use strict";r.d(t,{b:()=>a});var n=r(2642);function a(e,t){return+(0,n.a)(e)-+(0,n.a)(t)}},10648:(e,t,r)=>{"use strict";r.d(t,{o:()=>i});var n=r(14123),a=r(2940),o=r(77275);function i(e,t,r){const i=(0,o.b)(e,t)/a.Cg;return(0,n.u)(r?.roundingMethod)(i)}},49858:(e,t,r)=>{"use strict";r.d(t,{W:()=>u});var n=r(25733),a=r(2642);function o(e,t){const r=(0,a.a)(e),n=(0,a.a)(t);return 12*(r.getFullYear()-n.getFullYear())+(r.getMonth()-n.getMonth())}var i=r(37519),s=r(71359);function l(e){const t=(0,a.a)(e);return+(0,i.D)(t)===+(0,s.p)(t)}function u(e,t){const r=(0,a.a)(e),i=(0,a.a)(t),s=(0,n.z)(r,i),u=Math.abs(o(r,i));let c;if(u<1)c=0;else{1===r.getMonth()&&r.getDate()>27&&r.setDate(30),r.setMonth(r.getMonth()-s*u);let t=(0,n.z)(r,i)===-s;l((0,a.a)(e))&&1===u&&1===(0,n.z)(e,i)&&(t=!1),c=s*(u-Number(t))}return 0===c?0:c}},43924:(e,t,r)=>{"use strict";r.d(t,{O:()=>o});var n=r(14123),a=r(77275);function o(e,t,r){const o=(0,a.b)(e,t)/1e3;return(0,n.u)(r?.roundingMethod)(o)}},33607:(e,t,r)=>{"use strict";r.d(t,{V:()=>i});var n=r(25733),a=r(2642);function o(e,t){const r=(0,a.a)(e),n=(0,a.a)(t);return r.getFullYear()-n.getFullYear()}function i(e,t){const r=(0,a.a)(e),i=(0,a.a)(t),s=(0,n.z)(r,i),l=Math.abs(o(r,i));r.setFullYear(1584),i.setFullYear(1584);const u=s*(l-+((0,n.z)(r,i)===-s));return 0===u?0:u}},37519:(e,t,r)=>{"use strict";r.d(t,{D:()=>a});var n=r(2642);function a(e){const t=(0,n.a)(e);return t.setHours(23,59,59,999),t}},71359:(e,t,r)=>{"use strict";r.d(t,{p:()=>a});var n=r(2642);function a(e){const t=(0,n.a)(e),r=t.getMonth();return t.setFullYear(t.getFullYear(),r+1,0),t.setHours(23,59,59,999),t}},31826:(e,t,r)=>{"use strict";r.d(t,{k:()=>c});var n=r(96519),a=r(82695),o=r(14123),i=r(40215),s=r(25733),l=r(2940),u=r(2642);function c(e,t,r){const c=(0,a.q)(),d=r?.locale??c.locale??n.c,p=(0,s.z)(e,t);if(isNaN(p))throw new RangeError("Invalid time value");const f=Object.assign({},r,{addSuffix:r?.addSuffix,comparison:p});let h,v;p>0?(h=(0,u.a)(t),v=(0,u.a)(e)):(h=(0,u.a)(e),v=(0,u.a)(t));const g=(0,o.u)(r?.roundingMethod??"round"),m=v.getTime()-h.getTime(),b=m/l.Cg,y=(m-((0,i.G)(v)-(0,i.G)(h)))/l.Cg,w=r?.unit;let x;if(x=w||(b<1?"second":b<60?"minute":b{"use strict";r.d(t,{W:()=>a});var n=r(2642);function a(e){return(0,n.a)(e).getTime()}},26010:(e,t,r)=>{"use strict";r.d(t,{_:()=>a});var n=r(2642);function a(e){return Math.trunc(+(0,n.a)(e)/1e3)}},6593:(e,t,r)=>{"use strict";r.d(t,{F:()=>d});var n=r(82526),a=r(17764),o=r(50502),i=r(10648),s=r(49858),l=r(43924),u=r(33607),c=r(2642);function d(e){const t=(0,c.a)(e.start),r=(0,c.a)(e.end),d={},p=(0,u.V)(r,t);p&&(d.years=p);const f=(0,n.W)(t,{years:d.years}),h=(0,s.W)(r,f);h&&(d.months=h);const v=(0,n.W)(f,{months:d.months}),g=(0,a.c)(r,v);g&&(d.days=g);const m=(0,n.W)(v,{days:d.days}),b=(0,o.M)(r,m);b&&(d.hours=b);const y=(0,n.W)(m,{hours:d.hours}),w=(0,i.o)(r,y);w&&(d.minutes=w);const x=(0,n.W)(y,{minutes:d.minutes}),k=(0,l.O)(r,x);return k&&(d.seconds=k),d}},72880:(e,t,r)=>{"use strict";r.d(t,{Y:()=>a});var n=r(2642);function a(e,t){return+(0,n.a)(e)<+(0,n.a)(t)}},84128:(e,t,r)=>{"use strict";r.d(t,{u:()=>o});var n=r(16074),a=r(58380);function o(e,t,r){return(0,n.f)((0,a.qg)(e,t,new Date,r))}},85551:(e,t,r)=>{"use strict";r.d(t,{r:()=>a});var n=r(51730);function a(e,t){return+(0,n.o)(e)===+(0,n.o)(t)}},58380:(e,t,r)=>{"use strict";r.d(t,{qg:()=>ge});var n=r(35840),a=r(82695);function o(){return Object.assign({},(0,a.q)())}var i=r(96519),s=r(2642),l=r(36847),u=r(76992);function c(e,t){const r=t instanceof Date?(0,n.w)(t,0):new t(0);return r.setFullYear(e.getFullYear(),e.getMonth(),e.getDate()),r.setHours(e.getHours(),e.getMinutes(),e.getSeconds(),e.getMilliseconds()),r}class d{subPriority=0;validate(e,t){return!0}}class p extends d{constructor(e,t,r,n,a){super(),this.value=e,this.validateValue=t,this.setValue=r,this.priority=n,a&&(this.subPriority=a)}validate(e,t){return this.validateValue(e,this.value,t)}set(e,t,r){return this.setValue(e,t,this.value,r)}}class f extends d{priority=10;subPriority=-1;set(e,t){return t.timestampIsSet?e:(0,n.w)(e,c(e,Date))}}class h{run(e,t,r,n){const a=this.parse(e,t,r,n);return a?{setter:new p(a.value,this.validate,this.set,this.priority,this.subPriority),rest:a.rest}:null}validate(e,t,r){return!0}}var v=r(2940);const g=/^(1[0-2]|0?\d)/,m=/^(3[0-1]|[0-2]?\d)/,b=/^(36[0-6]|3[0-5]\d|[0-2]?\d?\d)/,y=/^(5[0-3]|[0-4]?\d)/,w=/^(2[0-3]|[0-1]?\d)/,x=/^(2[0-4]|[0-1]?\d)/,k=/^(1[0-1]|0?\d)/,D=/^(1[0-2]|0?\d)/,C=/^[0-5]?\d/,E=/^[0-5]?\d/,A=/^\d/,S=/^\d{1,2}/,_=/^\d{1,3}/,M=/^\d{1,4}/,T=/^-?\d+/,O=/^-?\d/,P=/^-?\d{1,2}/,N=/^-?\d{1,3}/,R=/^-?\d{1,4}/,L=/^([+-])(\d{2})(\d{2})?|Z/,F=/^([+-])(\d{2})(\d{2})|Z/,Y=/^([+-])(\d{2})(\d{2})((\d{2}))?|Z/,j=/^([+-])(\d{2}):(\d{2})|Z/,B=/^([+-])(\d{2}):(\d{2})(:(\d{2}))?|Z/;function I(e,t){return e?{value:t(e.value),rest:e.rest}:e}function H(e,t){const r=t.match(e);return r?{value:parseInt(r[0],10),rest:t.slice(r[0].length)}:null}function W(e,t){const r=t.match(e);if(!r)return null;if("Z"===r[0])return{value:0,rest:t.slice(1)};const n="+"===r[1]?1:-1,a=r[2]?parseInt(r[2],10):0,o=r[3]?parseInt(r[3],10):0,i=r[5]?parseInt(r[5],10):0;return{value:n*(a*v.s0+o*v.Cg+i*v._m),rest:t.slice(r[0].length)}}function U(e){return H(T,e)}function q(e,t){switch(e){case 1:return H(A,t);case 2:return H(S,t);case 3:return H(_,t);case 4:return H(M,t);default:return H(new RegExp("^\\d{1,"+e+"}"),t)}}function z(e,t){switch(e){case 1:return H(O,t);case 2:return H(P,t);case 3:return H(N,t);case 4:return H(R,t);default:return H(new RegExp("^-?\\d{1,"+e+"}"),t)}}function Q(e){switch(e){case"morning":return 4;case"evening":return 17;case"pm":case"noon":case"afternoon":return 12;default:return 0}}function K(e,t){const r=t>0,n=r?t:1-t;let a;if(n<=50)a=e||100;else{const t=n+50;a=e+100*Math.trunc(t/100)-(e>=t%100?100:0)}return r?a:1-a}function G(e){return e%400===0||e%4===0&&e%100!==0}var X=r(29220),V=r(92528);var $=r(83787);var Z=r(50016);function J(e,t,r){const n=(0,s.a)(e),a=(0,Z.N)(n,r)-t;return n.setDate(n.getDate()-7*a),n}var ee=r(76974);function te(e,t){const r=(0,s.a)(e),n=(0,ee.s)(r)-t;return r.setDate(r.getDate()-7*n),r}const re=[31,28,31,30,31,30,31,31,30,31,30,31],ne=[31,29,31,30,31,30,31,31,30,31,30,31];var ae=r(13999);function oe(e,t,r){const n=(0,a.q)(),o=r?.weekStartsOn??r?.locale?.options?.weekStartsOn??n.weekStartsOn??n.locale?.options?.weekStartsOn??0,i=(0,s.a)(e),l=i.getDay(),u=7-o,c=t<0||t>6?t-(l+u)%7:((t%7+7)%7+u)%7-(l+u)%7;return(0,ae.f)(i,c)}function ie(e){let t=(0,s.a)(e).getDay();return 0===t&&(t=7),t}function se(e,t){const r=(0,s.a)(e),n=t-ie(r);return(0,ae.f)(r,n)}var le=r(40215);const ue={G:new class extends h{priority=140;parse(e,t,r){switch(t){case"G":case"GG":case"GGG":return r.era(e,{width:"abbreviated"})||r.era(e,{width:"narrow"});case"GGGGG":return r.era(e,{width:"narrow"});default:return r.era(e,{width:"wide"})||r.era(e,{width:"abbreviated"})||r.era(e,{width:"narrow"})}}set(e,t,r){return t.era=r,e.setFullYear(r,0,1),e.setHours(0,0,0,0),e}incompatibleTokens=["R","u","t","T"]},y:new class extends h{priority=130;incompatibleTokens=["Y","R","u","w","I","i","e","c","t","T"];parse(e,t,r){const n=e=>({year:e,isTwoDigitYear:"yy"===t});switch(t){case"y":return I(q(4,e),n);case"yo":return I(r.ordinalNumber(e,{unit:"year"}),n);default:return I(q(t.length,e),n)}}validate(e,t){return t.isTwoDigitYear||t.year>0}set(e,t,r){const n=e.getFullYear();if(r.isTwoDigitYear){const t=K(r.year,n);return e.setFullYear(t,0,1),e.setHours(0,0,0,0),e}const a="era"in t&&1!==t.era?1-r.year:r.year;return e.setFullYear(a,0,1),e.setHours(0,0,0,0),e}},Y:new class extends h{priority=130;parse(e,t,r){const n=e=>({year:e,isTwoDigitYear:"YY"===t});switch(t){case"Y":return I(q(4,e),n);case"Yo":return I(r.ordinalNumber(e,{unit:"year"}),n);default:return I(q(t.length,e),n)}}validate(e,t){return t.isTwoDigitYear||t.year>0}set(e,t,r,n){const a=(0,X.h)(e,n);if(r.isTwoDigitYear){const t=K(r.year,a);return e.setFullYear(t,0,n.firstWeekContainsDate),e.setHours(0,0,0,0),(0,V.k)(e,n)}const o="era"in t&&1!==t.era?1-r.year:r.year;return e.setFullYear(o,0,n.firstWeekContainsDate),e.setHours(0,0,0,0),(0,V.k)(e,n)}incompatibleTokens=["y","R","u","Q","q","M","L","I","d","D","i","t","T"]},R:new class extends h{priority=130;parse(e,t){return z("R"===t?4:t.length,e)}set(e,t,r){const a=(0,n.w)(e,0);return a.setFullYear(r,0,4),a.setHours(0,0,0,0),(0,$.b)(a)}incompatibleTokens=["G","y","Y","u","Q","q","M","L","w","d","D","e","c","t","T"]},u:new class extends h{priority=130;parse(e,t){return z("u"===t?4:t.length,e)}set(e,t,r){return e.setFullYear(r,0,1),e.setHours(0,0,0,0),e}incompatibleTokens=["G","y","Y","R","w","I","i","e","c","t","T"]},Q:new class extends h{priority=120;parse(e,t,r){switch(t){case"Q":case"QQ":return q(t.length,e);case"Qo":return r.ordinalNumber(e,{unit:"quarter"});case"QQQ":return r.quarter(e,{width:"abbreviated",context:"formatting"})||r.quarter(e,{width:"narrow",context:"formatting"});case"QQQQQ":return r.quarter(e,{width:"narrow",context:"formatting"});default:return r.quarter(e,{width:"wide",context:"formatting"})||r.quarter(e,{width:"abbreviated",context:"formatting"})||r.quarter(e,{width:"narrow",context:"formatting"})}}validate(e,t){return t>=1&&t<=4}set(e,t,r){return e.setMonth(3*(r-1),1),e.setHours(0,0,0,0),e}incompatibleTokens=["Y","R","q","M","L","w","I","d","D","i","e","c","t","T"]},q:new class extends h{priority=120;parse(e,t,r){switch(t){case"q":case"qq":return q(t.length,e);case"qo":return r.ordinalNumber(e,{unit:"quarter"});case"qqq":return r.quarter(e,{width:"abbreviated",context:"standalone"})||r.quarter(e,{width:"narrow",context:"standalone"});case"qqqqq":return r.quarter(e,{width:"narrow",context:"standalone"});default:return r.quarter(e,{width:"wide",context:"standalone"})||r.quarter(e,{width:"abbreviated",context:"standalone"})||r.quarter(e,{width:"narrow",context:"standalone"})}}validate(e,t){return t>=1&&t<=4}set(e,t,r){return e.setMonth(3*(r-1),1),e.setHours(0,0,0,0),e}incompatibleTokens=["Y","R","Q","M","L","w","I","d","D","i","e","c","t","T"]},M:new class extends h{incompatibleTokens=["Y","R","q","Q","L","w","I","D","i","e","c","t","T"];priority=110;parse(e,t,r){const n=e=>e-1;switch(t){case"M":return I(H(g,e),n);case"MM":return I(q(2,e),n);case"Mo":return I(r.ordinalNumber(e,{unit:"month"}),n);case"MMM":return r.month(e,{width:"abbreviated",context:"formatting"})||r.month(e,{width:"narrow",context:"formatting"});case"MMMMM":return r.month(e,{width:"narrow",context:"formatting"});default:return r.month(e,{width:"wide",context:"formatting"})||r.month(e,{width:"abbreviated",context:"formatting"})||r.month(e,{width:"narrow",context:"formatting"})}}validate(e,t){return t>=0&&t<=11}set(e,t,r){return e.setMonth(r,1),e.setHours(0,0,0,0),e}},L:new class extends h{priority=110;parse(e,t,r){const n=e=>e-1;switch(t){case"L":return I(H(g,e),n);case"LL":return I(q(2,e),n);case"Lo":return I(r.ordinalNumber(e,{unit:"month"}),n);case"LLL":return r.month(e,{width:"abbreviated",context:"standalone"})||r.month(e,{width:"narrow",context:"standalone"});case"LLLLL":return r.month(e,{width:"narrow",context:"standalone"});default:return r.month(e,{width:"wide",context:"standalone"})||r.month(e,{width:"abbreviated",context:"standalone"})||r.month(e,{width:"narrow",context:"standalone"})}}validate(e,t){return t>=0&&t<=11}set(e,t,r){return e.setMonth(r,1),e.setHours(0,0,0,0),e}incompatibleTokens=["Y","R","q","Q","M","w","I","D","i","e","c","t","T"]},w:new class extends h{priority=100;parse(e,t,r){switch(t){case"w":return H(y,e);case"wo":return r.ordinalNumber(e,{unit:"week"});default:return q(t.length,e)}}validate(e,t){return t>=1&&t<=53}set(e,t,r,n){return(0,V.k)(J(e,r,n),n)}incompatibleTokens=["y","R","u","q","Q","M","L","I","d","D","i","t","T"]},I:new class extends h{priority=100;parse(e,t,r){switch(t){case"I":return H(y,e);case"Io":return r.ordinalNumber(e,{unit:"week"});default:return q(t.length,e)}}validate(e,t){return t>=1&&t<=53}set(e,t,r){return(0,$.b)(te(e,r))}incompatibleTokens=["y","Y","u","q","Q","M","L","w","d","D","e","c","t","T"]},d:new class extends h{priority=90;subPriority=1;parse(e,t,r){switch(t){case"d":return H(m,e);case"do":return r.ordinalNumber(e,{unit:"date"});default:return q(t.length,e)}}validate(e,t){const r=G(e.getFullYear()),n=e.getMonth();return r?t>=1&&t<=ne[n]:t>=1&&t<=re[n]}set(e,t,r){return e.setDate(r),e.setHours(0,0,0,0),e}incompatibleTokens=["Y","R","q","Q","w","I","D","i","e","c","t","T"]},D:new class extends h{priority=90;subpriority=1;parse(e,t,r){switch(t){case"D":case"DD":return H(b,e);case"Do":return r.ordinalNumber(e,{unit:"date"});default:return q(t.length,e)}}validate(e,t){return G(e.getFullYear())?t>=1&&t<=366:t>=1&&t<=365}set(e,t,r){return e.setMonth(0,r),e.setHours(0,0,0,0),e}incompatibleTokens=["Y","R","q","Q","M","L","w","I","d","E","i","e","c","t","T"]},E:new class extends h{priority=90;parse(e,t,r){switch(t){case"E":case"EE":case"EEE":return r.day(e,{width:"abbreviated",context:"formatting"})||r.day(e,{width:"short",context:"formatting"})||r.day(e,{width:"narrow",context:"formatting"});case"EEEEE":return r.day(e,{width:"narrow",context:"formatting"});case"EEEEEE":return r.day(e,{width:"short",context:"formatting"})||r.day(e,{width:"narrow",context:"formatting"});default:return r.day(e,{width:"wide",context:"formatting"})||r.day(e,{width:"abbreviated",context:"formatting"})||r.day(e,{width:"short",context:"formatting"})||r.day(e,{width:"narrow",context:"formatting"})}}validate(e,t){return t>=0&&t<=6}set(e,t,r,n){return(e=oe(e,r,n)).setHours(0,0,0,0),e}incompatibleTokens=["D","i","e","c","t","T"]},e:new class extends h{priority=90;parse(e,t,r,n){const a=e=>{const t=7*Math.floor((e-1)/7);return(e+n.weekStartsOn+6)%7+t};switch(t){case"e":case"ee":return I(q(t.length,e),a);case"eo":return I(r.ordinalNumber(e,{unit:"day"}),a);case"eee":return r.day(e,{width:"abbreviated",context:"formatting"})||r.day(e,{width:"short",context:"formatting"})||r.day(e,{width:"narrow",context:"formatting"});case"eeeee":return r.day(e,{width:"narrow",context:"formatting"});case"eeeeee":return r.day(e,{width:"short",context:"formatting"})||r.day(e,{width:"narrow",context:"formatting"});default:return r.day(e,{width:"wide",context:"formatting"})||r.day(e,{width:"abbreviated",context:"formatting"})||r.day(e,{width:"short",context:"formatting"})||r.day(e,{width:"narrow",context:"formatting"})}}validate(e,t){return t>=0&&t<=6}set(e,t,r,n){return(e=oe(e,r,n)).setHours(0,0,0,0),e}incompatibleTokens=["y","R","u","q","Q","M","L","I","d","D","E","i","c","t","T"]},c:new class extends h{priority=90;parse(e,t,r,n){const a=e=>{const t=7*Math.floor((e-1)/7);return(e+n.weekStartsOn+6)%7+t};switch(t){case"c":case"cc":return I(q(t.length,e),a);case"co":return I(r.ordinalNumber(e,{unit:"day"}),a);case"ccc":return r.day(e,{width:"abbreviated",context:"standalone"})||r.day(e,{width:"short",context:"standalone"})||r.day(e,{width:"narrow",context:"standalone"});case"ccccc":return r.day(e,{width:"narrow",context:"standalone"});case"cccccc":return r.day(e,{width:"short",context:"standalone"})||r.day(e,{width:"narrow",context:"standalone"});default:return r.day(e,{width:"wide",context:"standalone"})||r.day(e,{width:"abbreviated",context:"standalone"})||r.day(e,{width:"short",context:"standalone"})||r.day(e,{width:"narrow",context:"standalone"})}}validate(e,t){return t>=0&&t<=6}set(e,t,r,n){return(e=oe(e,r,n)).setHours(0,0,0,0),e}incompatibleTokens=["y","R","u","q","Q","M","L","I","d","D","E","i","e","t","T"]},i:new class extends h{priority=90;parse(e,t,r){const n=e=>0===e?7:e;switch(t){case"i":case"ii":return q(t.length,e);case"io":return r.ordinalNumber(e,{unit:"day"});case"iii":return I(r.day(e,{width:"abbreviated",context:"formatting"})||r.day(e,{width:"short",context:"formatting"})||r.day(e,{width:"narrow",context:"formatting"}),n);case"iiiii":return I(r.day(e,{width:"narrow",context:"formatting"}),n);case"iiiiii":return I(r.day(e,{width:"short",context:"formatting"})||r.day(e,{width:"narrow",context:"formatting"}),n);default:return I(r.day(e,{width:"wide",context:"formatting"})||r.day(e,{width:"abbreviated",context:"formatting"})||r.day(e,{width:"short",context:"formatting"})||r.day(e,{width:"narrow",context:"formatting"}),n)}}validate(e,t){return t>=1&&t<=7}set(e,t,r){return(e=se(e,r)).setHours(0,0,0,0),e}incompatibleTokens=["y","Y","u","q","Q","M","L","w","d","D","E","e","c","t","T"]},a:new class extends h{priority=80;parse(e,t,r){switch(t){case"a":case"aa":case"aaa":return r.dayPeriod(e,{width:"abbreviated",context:"formatting"})||r.dayPeriod(e,{width:"narrow",context:"formatting"});case"aaaaa":return r.dayPeriod(e,{width:"narrow",context:"formatting"});default:return r.dayPeriod(e,{width:"wide",context:"formatting"})||r.dayPeriod(e,{width:"abbreviated",context:"formatting"})||r.dayPeriod(e,{width:"narrow",context:"formatting"})}}set(e,t,r){return e.setHours(Q(r),0,0,0),e}incompatibleTokens=["b","B","H","k","t","T"]},b:new class extends h{priority=80;parse(e,t,r){switch(t){case"b":case"bb":case"bbb":return r.dayPeriod(e,{width:"abbreviated",context:"formatting"})||r.dayPeriod(e,{width:"narrow",context:"formatting"});case"bbbbb":return r.dayPeriod(e,{width:"narrow",context:"formatting"});default:return r.dayPeriod(e,{width:"wide",context:"formatting"})||r.dayPeriod(e,{width:"abbreviated",context:"formatting"})||r.dayPeriod(e,{width:"narrow",context:"formatting"})}}set(e,t,r){return e.setHours(Q(r),0,0,0),e}incompatibleTokens=["a","B","H","k","t","T"]},B:new class extends h{priority=80;parse(e,t,r){switch(t){case"B":case"BB":case"BBB":return r.dayPeriod(e,{width:"abbreviated",context:"formatting"})||r.dayPeriod(e,{width:"narrow",context:"formatting"});case"BBBBB":return r.dayPeriod(e,{width:"narrow",context:"formatting"});default:return r.dayPeriod(e,{width:"wide",context:"formatting"})||r.dayPeriod(e,{width:"abbreviated",context:"formatting"})||r.dayPeriod(e,{width:"narrow",context:"formatting"})}}set(e,t,r){return e.setHours(Q(r),0,0,0),e}incompatibleTokens=["a","b","t","T"]},h:new class extends h{priority=70;parse(e,t,r){switch(t){case"h":return H(D,e);case"ho":return r.ordinalNumber(e,{unit:"hour"});default:return q(t.length,e)}}validate(e,t){return t>=1&&t<=12}set(e,t,r){const n=e.getHours()>=12;return n&&r<12?e.setHours(r+12,0,0,0):n||12!==r?e.setHours(r,0,0,0):e.setHours(0,0,0,0),e}incompatibleTokens=["H","K","k","t","T"]},H:new class extends h{priority=70;parse(e,t,r){switch(t){case"H":return H(w,e);case"Ho":return r.ordinalNumber(e,{unit:"hour"});default:return q(t.length,e)}}validate(e,t){return t>=0&&t<=23}set(e,t,r){return e.setHours(r,0,0,0),e}incompatibleTokens=["a","b","h","K","k","t","T"]},K:new class extends h{priority=70;parse(e,t,r){switch(t){case"K":return H(k,e);case"Ko":return r.ordinalNumber(e,{unit:"hour"});default:return q(t.length,e)}}validate(e,t){return t>=0&&t<=11}set(e,t,r){return e.getHours()>=12&&r<12?e.setHours(r+12,0,0,0):e.setHours(r,0,0,0),e}incompatibleTokens=["h","H","k","t","T"]},k:new class extends h{priority=70;parse(e,t,r){switch(t){case"k":return H(x,e);case"ko":return r.ordinalNumber(e,{unit:"hour"});default:return q(t.length,e)}}validate(e,t){return t>=1&&t<=24}set(e,t,r){const n=r<=24?r%24:r;return e.setHours(n,0,0,0),e}incompatibleTokens=["a","b","h","H","K","t","T"]},m:new class extends h{priority=60;parse(e,t,r){switch(t){case"m":return H(C,e);case"mo":return r.ordinalNumber(e,{unit:"minute"});default:return q(t.length,e)}}validate(e,t){return t>=0&&t<=59}set(e,t,r){return e.setMinutes(r,0,0),e}incompatibleTokens=["t","T"]},s:new class extends h{priority=50;parse(e,t,r){switch(t){case"s":return H(E,e);case"so":return r.ordinalNumber(e,{unit:"second"});default:return q(t.length,e)}}validate(e,t){return t>=0&&t<=59}set(e,t,r){return e.setSeconds(r,0),e}incompatibleTokens=["t","T"]},S:new class extends h{priority=30;parse(e,t){return I(q(t.length,e),(e=>Math.trunc(e*Math.pow(10,3-t.length))))}set(e,t,r){return e.setMilliseconds(r),e}incompatibleTokens=["t","T"]},X:new class extends h{priority=10;parse(e,t){switch(t){case"X":return W(L,e);case"XX":return W(F,e);case"XXXX":return W(Y,e);case"XXXXX":return W(B,e);default:return W(j,e)}}set(e,t,r){return t.timestampIsSet?e:(0,n.w)(e,e.getTime()-(0,le.G)(e)-r)}incompatibleTokens=["t","T","x"]},x:new class extends h{priority=10;parse(e,t){switch(t){case"x":return W(L,e);case"xx":return W(F,e);case"xxxx":return W(Y,e);case"xxxxx":return W(B,e);default:return W(j,e)}}set(e,t,r){return t.timestampIsSet?e:(0,n.w)(e,e.getTime()-(0,le.G)(e)-r)}incompatibleTokens=["t","T","X"]},t:new class extends h{priority=40;parse(e){return U(e)}set(e,t,r){return[(0,n.w)(e,1e3*r),{timestampIsSet:!0}]}incompatibleTokens="*"},T:new class extends h{priority=20;parse(e){return U(e)}set(e,t,r){return[(0,n.w)(e,r),{timestampIsSet:!0}]}incompatibleTokens="*"}},ce=/[yYQqMLwIdDecihHKkms]o|(\w)\1*|''|'(''|[^'])+('|$)|./g,de=/P+p+|P+|p+|''|'(''|[^'])+('|$)|./g,pe=/^'([^]*?)'?$/,fe=/''/g,he=/\S/,ve=/[a-zA-Z]/;function ge(e,t,r,a){const c=o(),d=a?.locale??c.locale??i.c,p=a?.firstWeekContainsDate??a?.locale?.options?.firstWeekContainsDate??c.firstWeekContainsDate??c.locale?.options?.firstWeekContainsDate??1,h=a?.weekStartsOn??a?.locale?.options?.weekStartsOn??c.weekStartsOn??c.locale?.options?.weekStartsOn??0;if(""===t)return""===e?(0,s.a)(r):(0,n.w)(r,NaN);const v={firstWeekContainsDate:p,weekStartsOn:h,locale:d},g=[new f],m=t.match(de).map((e=>{const t=e[0];if(t in l.m){return(0,l.m[t])(e,d.formatLong)}return e})).join("").match(ce),b=[];for(let o of m){!a?.useAdditionalWeekYearTokens&&(0,u.xM)(o)&&(0,u.Ss)(o,t,e),!a?.useAdditionalDayOfYearTokens&&(0,u.ef)(o)&&(0,u.Ss)(o,t,e);const i=o[0],s=ue[i];if(s){const{incompatibleTokens:t}=s;if(Array.isArray(t)){const e=b.find((e=>t.includes(e.token)||e.token===i));if(e)throw new RangeError(`The format string mustn't contain \`${e.fullToken}\` and \`${o}\` at the same time`)}else if("*"===s.incompatibleTokens&&b.length>0)throw new RangeError(`The format string mustn't contain \`${o}\` and any other token at the same time`);b.push({token:i,fullToken:o});const a=s.run(e,o,d.match,v);if(!a)return(0,n.w)(r,NaN);g.push(a.setter),e=a.rest}else{if(i.match(ve))throw new RangeError("Format string contains an unescaped latin alphabet character `"+i+"`");if("''"===o?o="'":"'"===i&&(o=o.match(pe)[1].replace(fe,"'")),0!==e.indexOf(o))return(0,n.w)(r,NaN);e=e.slice(o.length)}}if(e.length>0&&he.test(e))return(0,n.w)(r,NaN);const y=g.map((e=>e.priority)).sort(((e,t)=>t-e)).filter(((e,t,r)=>r.indexOf(e)===t)).map((e=>g.filter((t=>t.priority===e)).sort(((e,t)=>t.subPriority-e.subPriority)))).map((e=>e[0]));let w=(0,s.a)(r);if(isNaN(w.getTime()))return(0,n.w)(r,NaN);const x={};for(const o of y){if(!o.validate(w,v))return(0,n.w)(r,NaN);const e=o.set(w,x,v);Array.isArray(e)?(w=e[0],Object.assign(x,e[1])):w=e}return(0,n.w)(r,w)}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/8323.437406936b642e8f6cb3.chunk.js.LICENSE.txt b/src/web/gui/v2/8323.437406936b642e8f6cb3.chunk.js.LICENSE.txt deleted file mode 100644 index 827f2732c..000000000 --- a/src/web/gui/v2/8323.437406936b642e8f6cb3.chunk.js.LICENSE.txt +++ /dev/null @@ -1,5 +0,0 @@ -/*! - Copyright (c) 2018 Jed Watson. - Licensed under the MIT License (MIT), see - http://jedwatson.github.io/classnames -*/ diff --git a/src/web/gui/v2/8323.e22de33686bb2f34063c.css b/src/web/gui/v2/8323.e22de33686bb2f34063c.css deleted file mode 100644 index 371f90fba..000000000 --- a/src/web/gui/v2/8323.e22de33686bb2f34063c.css +++ /dev/null @@ -1,2 +0,0 @@ -.react-datepicker__year-read-view--down-arrow,.react-datepicker__month-read-view--down-arrow,.react-datepicker__month-year-read-view--down-arrow,.react-datepicker__navigation-icon::before{border-color:#ccc;border-style:solid;border-width:3px 3px 0 0;content:"";display:block;height:9px;position:absolute;top:6px;width:9px}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle,.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle{margin-left:-4px;position:absolute;width:0}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::before,.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::before,.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::after,.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::after{box-sizing:content-box;position:absolute;border:8px solid transparent;height:0;width:1px;content:"";z-index:-1;border-width:8px;left:-8px}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::before,.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::before{border-bottom-color:#aeaeae}.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle{top:0;margin-top:-8px}.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::before,.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::after{border-top:none;border-bottom-color:#f0f0f0}.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::after{top:0}.react-datepicker-popper[data-placement^=bottom] .react-datepicker__triangle::before{top:-1px;border-bottom-color:#aeaeae}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle{bottom:0;margin-bottom:-8px}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::before,.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::after{border-bottom:none;border-top-color:#fff}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::after{bottom:0}.react-datepicker-popper[data-placement^=top] .react-datepicker__triangle::before{bottom:-1px;border-top-color:#aeaeae}.react-datepicker-wrapper{display:inline-block;padding:0;border:0}.react-datepicker{font-family:"Helvetica Neue", helvetica, arial, sans-serif;font-size:0.8rem;background-color:#fff;color:#000;border:1px solid #aeaeae;border-radius:0.3rem;display:inline-block;position:relative}.react-datepicker--time-only .react-datepicker__triangle{left:35px}.react-datepicker--time-only .react-datepicker__time-container{border-left:0}.react-datepicker--time-only .react-datepicker__time,.react-datepicker--time-only .react-datepicker__time-box{border-bottom-left-radius:0.3rem;border-bottom-right-radius:0.3rem}.react-datepicker__triangle{position:absolute;left:50px}.react-datepicker-popper{z-index:1}.react-datepicker-popper[data-placement^=bottom]{padding-top:10px}.react-datepicker-popper[data-placement=bottom-end] .react-datepicker__triangle,.react-datepicker-popper[data-placement=top-end] .react-datepicker__triangle{left:auto;right:50px}.react-datepicker-popper[data-placement^=top]{padding-bottom:10px}.react-datepicker-popper[data-placement^=right]{padding-left:8px}.react-datepicker-popper[data-placement^=right] .react-datepicker__triangle{left:auto;right:42px}.react-datepicker-popper[data-placement^=left]{padding-right:8px}.react-datepicker-popper[data-placement^=left] .react-datepicker__triangle{left:42px;right:auto}.react-datepicker__header{text-align:center;background-color:#f0f0f0;border-bottom:1px solid #aeaeae;border-top-left-radius:0.3rem;padding:8px 0;position:relative}.react-datepicker__header--time{padding-bottom:8px;padding-left:5px;padding-right:5px}.react-datepicker__header--time:not(.react-datepicker__header--time--only){border-top-left-radius:0}.react-datepicker__header:not(.react-datepicker__header--has-time-select){border-top-right-radius:0.3rem}.react-datepicker__year-dropdown-container--select,.react-datepicker__month-dropdown-container--select,.react-datepicker__month-year-dropdown-container--select,.react-datepicker__year-dropdown-container--scroll,.react-datepicker__month-dropdown-container--scroll,.react-datepicker__month-year-dropdown-container--scroll{display:inline-block;margin:0 15px}.react-datepicker__current-month,.react-datepicker-time__header,.react-datepicker-year-header{margin-top:0;color:#000;font-weight:bold;font-size:0.944rem}.react-datepicker-time__header{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.react-datepicker__navigation{align-items:center;background:none;display:flex;justify-content:center;text-align:center;cursor:pointer;position:absolute;top:2px;padding:0;border:none;z-index:1;height:32px;width:32px;text-indent:-999em;overflow:hidden}.react-datepicker__navigation--previous{left:2px}.react-datepicker__navigation--next{right:2px}.react-datepicker__navigation--next--with-time:not(.react-datepicker__navigation--next--with-today-button){right:85px}.react-datepicker__navigation--years{position:relative;top:0;display:block;margin-left:auto;margin-right:auto}.react-datepicker__navigation--years-previous{top:4px}.react-datepicker__navigation--years-upcoming{top:-4px}.react-datepicker__navigation:hover *::before{border-color:#a6a6a6}.react-datepicker__navigation-icon{position:relative;top:-1px;font-size:20px;width:0}.react-datepicker__navigation-icon--next{left:-2px}.react-datepicker__navigation-icon--next::before{transform:rotate(45deg);left:-7px}.react-datepicker__navigation-icon--previous{right:-2px}.react-datepicker__navigation-icon--previous::before{transform:rotate(225deg);right:-7px}.react-datepicker__month-container{float:left}.react-datepicker__year{margin:0.4rem;text-align:center}.react-datepicker__year-wrapper{display:flex;flex-wrap:wrap;max-width:180px}.react-datepicker__year .react-datepicker__year-text{display:inline-block;width:4rem;margin:2px}.react-datepicker__month{margin:0.4rem;text-align:center}.react-datepicker__month .react-datepicker__month-text,.react-datepicker__month .react-datepicker__quarter-text{display:inline-block;width:4rem;margin:2px}.react-datepicker__input-time-container{clear:both;width:100%;float:left;margin:5px 0 10px 15px;text-align:left}.react-datepicker__input-time-container .react-datepicker-time__caption{display:inline-block}.react-datepicker__input-time-container .react-datepicker-time__input-container{display:inline-block}.react-datepicker__input-time-container .react-datepicker-time__input-container .react-datepicker-time__input{display:inline-block;margin-left:10px}.react-datepicker__input-time-container .react-datepicker-time__input-container .react-datepicker-time__input input{width:auto}.react-datepicker__input-time-container .react-datepicker-time__input-container .react-datepicker-time__input input[type=time]::-webkit-inner-spin-button,.react-datepicker__input-time-container .react-datepicker-time__input-container .react-datepicker-time__input input[type=time]::-webkit-outer-spin-button{-webkit-appearance:none;margin:0}.react-datepicker__input-time-container .react-datepicker-time__input-container .react-datepicker-time__input input[type=time]{-moz-appearance:textfield}.react-datepicker__input-time-container .react-datepicker-time__input-container .react-datepicker-time__delimiter{margin-left:5px;display:inline-block}.react-datepicker__time-container{float:right;border-left:1px solid #aeaeae;width:85px}.react-datepicker__time-container--with-today-button{display:inline;border:1px solid #aeaeae;border-radius:0.3rem;position:absolute;right:-87px;top:0}.react-datepicker__time-container .react-datepicker__time{position:relative;background:white;border-bottom-right-radius:0.3rem}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box{width:85px;overflow-x:hidden;margin:0 auto;text-align:center;border-bottom-right-radius:0.3rem}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list{list-style:none;margin:0;height:calc(195px + 1.7rem / 2);overflow-y:scroll;padding-right:0;padding-left:0;width:100%;box-sizing:content-box}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list li.react-datepicker__time-list-item{height:30px;padding:5px 10px;white-space:nowrap}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list li.react-datepicker__time-list-item:hover{cursor:pointer;background-color:#f0f0f0}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list li.react-datepicker__time-list-item--selected{background-color:#216ba5;color:white;font-weight:bold}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list li.react-datepicker__time-list-item--selected:hover{background-color:#216ba5}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list li.react-datepicker__time-list-item--disabled{color:#ccc}.react-datepicker__time-container .react-datepicker__time .react-datepicker__time-box ul.react-datepicker__time-list li.react-datepicker__time-list-item--disabled:hover{cursor:default;background-color:transparent}.react-datepicker__week-number{color:#ccc;display:inline-block;width:1.7rem;line-height:1.7rem;text-align:center;margin:0.166rem}.react-datepicker__week-number.react-datepicker__week-number--clickable{cursor:pointer}.react-datepicker__week-number.react-datepicker__week-number--clickable:not(.react-datepicker__week-number--selected,.react-datepicker__week-number--keyboard-selected):hover{border-radius:0.3rem;background-color:#f0f0f0}.react-datepicker__week-number--selected{border-radius:0.3rem;background-color:#216ba5;color:#fff}.react-datepicker__week-number--selected:hover{background-color:#1d5d90}.react-datepicker__week-number--keyboard-selected{border-radius:0.3rem;background-color:#2a87d0;color:#fff}.react-datepicker__week-number--keyboard-selected:hover{background-color:#1d5d90}.react-datepicker__day-names{white-space:nowrap;margin-bottom:-8px}.react-datepicker__week{white-space:nowrap}.react-datepicker__day-name,.react-datepicker__day,.react-datepicker__time-name{color:#000;display:inline-block;width:1.7rem;line-height:1.7rem;text-align:center;margin:0.166rem}.react-datepicker__day,.react-datepicker__month-text,.react-datepicker__quarter-text,.react-datepicker__year-text{cursor:pointer}.react-datepicker__day:hover,.react-datepicker__month-text:hover,.react-datepicker__quarter-text:hover,.react-datepicker__year-text:hover{border-radius:0.3rem;background-color:#f0f0f0}.react-datepicker__day--today,.react-datepicker__month-text--today,.react-datepicker__quarter-text--today,.react-datepicker__year-text--today{font-weight:bold}.react-datepicker__day--highlighted,.react-datepicker__month-text--highlighted,.react-datepicker__quarter-text--highlighted,.react-datepicker__year-text--highlighted{border-radius:0.3rem;background-color:#3dcc4a;color:#fff}.react-datepicker__day--highlighted:hover,.react-datepicker__month-text--highlighted:hover,.react-datepicker__quarter-text--highlighted:hover,.react-datepicker__year-text--highlighted:hover{background-color:#32be3f}.react-datepicker__day--highlighted-custom-1,.react-datepicker__month-text--highlighted-custom-1,.react-datepicker__quarter-text--highlighted-custom-1,.react-datepicker__year-text--highlighted-custom-1{color:magenta}.react-datepicker__day--highlighted-custom-2,.react-datepicker__month-text--highlighted-custom-2,.react-datepicker__quarter-text--highlighted-custom-2,.react-datepicker__year-text--highlighted-custom-2{color:green}.react-datepicker__day--holidays,.react-datepicker__month-text--holidays,.react-datepicker__quarter-text--holidays,.react-datepicker__year-text--holidays{position:relative;border-radius:0.3rem;background-color:#ff6803;color:#fff}.react-datepicker__day--holidays .holiday-overlay,.react-datepicker__month-text--holidays .holiday-overlay,.react-datepicker__quarter-text--holidays .holiday-overlay,.react-datepicker__year-text--holidays .holiday-overlay{position:absolute;bottom:100%;left:50%;transform:translateX(-50%);background-color:#333;color:#fff;padding:4px;border-radius:4px;white-space:nowrap;visibility:hidden;opacity:0;transition:visibility 0s, opacity 0.3s ease-in-out}.react-datepicker__day--holidays:hover,.react-datepicker__month-text--holidays:hover,.react-datepicker__quarter-text--holidays:hover,.react-datepicker__year-text--holidays:hover{background-color:#cf5300}.react-datepicker__day--holidays:hover .holiday-overlay,.react-datepicker__month-text--holidays:hover .holiday-overlay,.react-datepicker__quarter-text--holidays:hover .holiday-overlay,.react-datepicker__year-text--holidays:hover .holiday-overlay{visibility:visible;opacity:1}.react-datepicker__day--selected,.react-datepicker__day--in-selecting-range,.react-datepicker__day--in-range,.react-datepicker__month-text--selected,.react-datepicker__month-text--in-selecting-range,.react-datepicker__month-text--in-range,.react-datepicker__quarter-text--selected,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__quarter-text--in-range,.react-datepicker__year-text--selected,.react-datepicker__year-text--in-selecting-range,.react-datepicker__year-text--in-range{border-radius:0.3rem;background-color:#216ba5;color:#fff}.react-datepicker__day--selected:hover,.react-datepicker__day--in-selecting-range:hover,.react-datepicker__day--in-range:hover,.react-datepicker__month-text--selected:hover,.react-datepicker__month-text--in-selecting-range:hover,.react-datepicker__month-text--in-range:hover,.react-datepicker__quarter-text--selected:hover,.react-datepicker__quarter-text--in-selecting-range:hover,.react-datepicker__quarter-text--in-range:hover,.react-datepicker__year-text--selected:hover,.react-datepicker__year-text--in-selecting-range:hover,.react-datepicker__year-text--in-range:hover{background-color:#1d5d90}.react-datepicker__day--keyboard-selected,.react-datepicker__month-text--keyboard-selected,.react-datepicker__quarter-text--keyboard-selected,.react-datepicker__year-text--keyboard-selected{border-radius:0.3rem;background-color:#bad9f1;color:#000}.react-datepicker__day--keyboard-selected:hover,.react-datepicker__month-text--keyboard-selected:hover,.react-datepicker__quarter-text--keyboard-selected:hover,.react-datepicker__year-text--keyboard-selected:hover{background-color:#1d5d90}.react-datepicker__day--in-selecting-range:not(.react-datepicker__day--in-range,.react-datepicker__month-text--in-range,.react-datepicker__quarter-text--in-range,.react-datepicker__year-text--in-range),.react-datepicker__month-text--in-selecting-range:not(.react-datepicker__day--in-range,.react-datepicker__month-text--in-range,.react-datepicker__quarter-text--in-range,.react-datepicker__year-text--in-range),.react-datepicker__quarter-text--in-selecting-range:not(.react-datepicker__day--in-range,.react-datepicker__month-text--in-range,.react-datepicker__quarter-text--in-range,.react-datepicker__year-text--in-range),.react-datepicker__year-text--in-selecting-range:not(.react-datepicker__day--in-range,.react-datepicker__month-text--in-range,.react-datepicker__quarter-text--in-range,.react-datepicker__year-text--in-range){background-color:rgba(33,107,165,0.5)}.react-datepicker__month--selecting-range .react-datepicker__day--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__year--selecting-range .react-datepicker__day--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__month--selecting-range .react-datepicker__month-text--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__year--selecting-range .react-datepicker__month-text--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__month--selecting-range .react-datepicker__quarter-text--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__year--selecting-range .react-datepicker__quarter-text--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__month--selecting-range .react-datepicker__year-text--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range),.react-datepicker__year--selecting-range .react-datepicker__year-text--in-range:not(.react-datepicker__day--in-selecting-range,.react-datepicker__month-text--in-selecting-range,.react-datepicker__quarter-text--in-selecting-range,.react-datepicker__year-text--in-selecting-range){background-color:#f0f0f0;color:#000}.react-datepicker__day--disabled,.react-datepicker__month-text--disabled,.react-datepicker__quarter-text--disabled,.react-datepicker__year-text--disabled{cursor:default;color:#ccc}.react-datepicker__day--disabled:hover,.react-datepicker__month-text--disabled:hover,.react-datepicker__quarter-text--disabled:hover,.react-datepicker__year-text--disabled:hover{background-color:transparent}.react-datepicker__input-container{position:relative;display:inline-block;width:100%}.react-datepicker__input-container .react-datepicker__calendar-icon{position:absolute;padding:0.5rem;box-sizing:content-box}.react-datepicker__view-calendar-icon input{padding:6px 10px 5px 25px}.react-datepicker__year-read-view,.react-datepicker__month-read-view,.react-datepicker__month-year-read-view{border:1px solid transparent;border-radius:0.3rem;position:relative}.react-datepicker__year-read-view:hover,.react-datepicker__month-read-view:hover,.react-datepicker__month-year-read-view:hover{cursor:pointer}.react-datepicker__year-read-view:hover .react-datepicker__year-read-view--down-arrow,.react-datepicker__year-read-view:hover .react-datepicker__month-read-view--down-arrow,.react-datepicker__month-read-view:hover .react-datepicker__year-read-view--down-arrow,.react-datepicker__month-read-view:hover .react-datepicker__month-read-view--down-arrow,.react-datepicker__month-year-read-view:hover .react-datepicker__year-read-view--down-arrow,.react-datepicker__month-year-read-view:hover .react-datepicker__month-read-view--down-arrow{border-top-color:#b3b3b3}.react-datepicker__year-read-view--down-arrow,.react-datepicker__month-read-view--down-arrow,.react-datepicker__month-year-read-view--down-arrow{transform:rotate(135deg);right:-16px;top:0}.react-datepicker__year-dropdown,.react-datepicker__month-dropdown,.react-datepicker__month-year-dropdown{background-color:#f0f0f0;position:absolute;width:50%;left:25%;top:30px;z-index:1;text-align:center;border-radius:0.3rem;border:1px solid #aeaeae}.react-datepicker__year-dropdown:hover,.react-datepicker__month-dropdown:hover,.react-datepicker__month-year-dropdown:hover{cursor:pointer}.react-datepicker__year-dropdown--scrollable,.react-datepicker__month-dropdown--scrollable,.react-datepicker__month-year-dropdown--scrollable{height:150px;overflow-y:scroll}.react-datepicker__year-option,.react-datepicker__month-option,.react-datepicker__month-year-option{line-height:20px;width:100%;display:block;margin-left:auto;margin-right:auto}.react-datepicker__year-option:first-of-type,.react-datepicker__month-option:first-of-type,.react-datepicker__month-year-option:first-of-type{border-top-left-radius:0.3rem;border-top-right-radius:0.3rem}.react-datepicker__year-option:last-of-type,.react-datepicker__month-option:last-of-type,.react-datepicker__month-year-option:last-of-type{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;border-bottom-left-radius:0.3rem;border-bottom-right-radius:0.3rem}.react-datepicker__year-option:hover,.react-datepicker__month-option:hover,.react-datepicker__month-year-option:hover{background-color:#ccc}.react-datepicker__year-option:hover .react-datepicker__navigation--years-upcoming,.react-datepicker__month-option:hover .react-datepicker__navigation--years-upcoming,.react-datepicker__month-year-option:hover .react-datepicker__navigation--years-upcoming{border-bottom-color:#b3b3b3}.react-datepicker__year-option:hover .react-datepicker__navigation--years-previous,.react-datepicker__month-option:hover .react-datepicker__navigation--years-previous,.react-datepicker__month-year-option:hover .react-datepicker__navigation--years-previous{border-top-color:#b3b3b3}.react-datepicker__year-option--selected,.react-datepicker__month-option--selected,.react-datepicker__month-year-option--selected{position:absolute;left:15px}.react-datepicker__close-icon{cursor:pointer;background-color:transparent;border:0;outline:0;padding:0 6px 0 0;position:absolute;top:0;right:0;height:100%;display:table-cell;vertical-align:middle}.react-datepicker__close-icon::after{cursor:pointer;background-color:#216ba5;color:#fff;border-radius:50%;height:16px;width:16px;padding:2px;font-size:12px;line-height:1;text-align:center;display:table-cell;vertical-align:middle;content:"×"}.react-datepicker__close-icon--disabled{cursor:default}.react-datepicker__close-icon--disabled::after{cursor:default;background-color:#ccc}.react-datepicker__today-button{background:#f0f0f0;border-top:1px solid #aeaeae;cursor:pointer;text-align:center;font-weight:bold;padding:5px 0;clear:left}.react-datepicker__portal{position:fixed;width:100vw;height:100vh;background-color:rgba(0,0,0,0.8);left:0;top:0;justify-content:center;align-items:center;display:flex;z-index:2147483647}.react-datepicker__portal .react-datepicker__day-name,.react-datepicker__portal .react-datepicker__day,.react-datepicker__portal .react-datepicker__time-name{width:3rem;line-height:3rem}@media (max-width: 400px), (max-height: 550px){.react-datepicker__portal .react-datepicker__day-name,.react-datepicker__portal .react-datepicker__day,.react-datepicker__portal .react-datepicker__time-name{width:2rem;line-height:2rem}}.react-datepicker__portal .react-datepicker__current-month,.react-datepicker__portal .react-datepicker-time__header{font-size:1.44rem}.react-datepicker__children-container{width:13.8rem;margin:0.4rem;padding-right:0.2rem;padding-left:0.2rem;height:auto}.react-datepicker__aria-live{position:absolute;clip-path:circle(0);border:0;height:1px;margin:-1px;overflow:hidden;padding:0;width:1px;white-space:nowrap}.react-datepicker__calendar-icon{width:1em;height:1em;vertical-align:-0.125em} - diff --git a/src/web/gui/v2/8505.c330f2104fefd71717da.chunk.js b/src/web/gui/v2/8505.c330f2104fefd71717da.chunk.js deleted file mode 100644 index eef2a88b9..000000000 --- a/src/web/gui/v2/8505.c330f2104fefd71717da.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="80647485-8a0e-4651-b370-ef6ea86fc55d",e._sentryDebugIdIdentifier="sentry-dbid-80647485-8a0e-4651-b370-ef6ea86fc55d")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8505],{8505:(e,t,n)=>{n.r(t),n.d(t,{default:()=>y});n(62953);var a=n(96540),r=n(84929),o=n(16074),l=n(63933),i=n(82526),d=n(29176),s=n(87991),c=n(26655),u=n(47444),f=n(83199),p=n(5396),g=n(4659),b=n(87659);const m=(0,u.gD)({key:"fetchLicense",get:()=>c.A.get("/api/v1/license").then((e=>e.data)).catch((()=>null))}),w=(0,u.eU)({key:"licenseAtom",default:m}),y=()=>{if(!window.envSettings.onprem)return null;const e=(0,u.xf)(w),[t,n]=(0,b.A)(),[c,m]=(0,a.useState)(),[y,E]=(0,a.useState)(!1);return(0,a.useEffect)((()=>{if("hasValue"!==e.state)return;const t=e.contents,a=(0,r.H)(t.exp);if(m(a),!(0,o.f)(a))return;const i=parseInt((a-new Date)/1e3/60/60/24);if(i>30)return;const d=localStorage.getItem("dismissLicenceWarning");d&&(0,o.f)((0,r.H)(d))&&!(0,l.R)((0,r.H)(d))&&i>0||(n(!0),i<0&&E(!0))}),[e]),t?y?a.createElement(f.Layer,{full:!0,backdropProps:{backdropBlur:"3px"}},a.createElement(p.A,{testId:"onprem-banner",width:"100%",background:"errorBackground",position:"absolute",top:"0",tooltipProps:{align:"top"},zIndex:20},a.createElement(f.Flex,{justifyContent:"center",alignItems:"center",width:"100%",gap:2},a.createElement(f.Text,null,"Your Netdata Enterprise On-prem License has expired on ",(0,s.GP)(c,"PPPP"),". Please contact your admin /"," ",a.createElement(g.A,{"data-testid":"renew",href:"mailto:billing@netdata.cloud",as:"a",cursor:"pointer",textDecoration:"underline",color:"main"},"billing@netdata.cloud")," ","to renew your license.")))):a.createElement(p.A,{testId:"onprem-banner",width:"100%",background:"warningBackground",position:"absolute",top:"0",onClose:()=>{n(),localStorage.setItem("dismissLicenceWarning",(0,i.W)(new Date,{days:1}).toISOString())},tooltipProps:{align:"top"},zIndex:20},a.createElement(f.Flex,{justifyContent:"center",alignItems:"center",width:"100%",gap:2},a.createElement(f.Text,null,"Your Netdata Enterprise On-prem License will expire in ",(0,d.m)(c),". Please contact your admin /"," ",a.createElement(g.A,{"data-testid":"renew",href:"mailto:billing@netdata.cloud",as:"a",cursor:"pointer",textDecoration:"underline",color:"main"},"billing@netdata.cloud")," ","to renew your license."))):null}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/86.2c88d4d37b88e2620051.chunk.js b/src/web/gui/v2/86.2c88d4d37b88e2620051.chunk.js deleted file mode 100644 index ca4e5c42a..000000000 --- a/src/web/gui/v2/86.2c88d4d37b88e2620051.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="8dce808c-0875-4ae0-b589-d21a5bf0a9b0",e._sentryDebugIdIdentifier="sentry-dbid-8dce808c-0875-4ae0-b589-d21a5bf0a9b0")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[86],{60086:(e,n,t)=>{t.r(n),t.d(n,{default:()=>U});var a=t(58168),r=(t(62953),t(96540)),o=t(83199),l=t(16074),d=t(84929),i=t(63933),s=t(82526),c=t(29176),p=t(87991),u=t(5396),b=t(29217),f=t(87659),g=t(92155),m=t(25624),E=t(99904),w=t(35454),y=t(24864),x=t(93476),I=t(28061);const h=new Date("12/31/2023 23:59:59 +2"),A=new Date,S=(k=(0,g.A)(y.PL),e=>{let{canUpgrade:n,...t}=e;return n?r.createElement(k,t):r.createElement(b.A,{content:"You don't have permissions to upgrade the plan"},r.createElement(o.Flex,null,r.createElement(k,(0,a.A)({disabled:!0},t))))});var k;const U=e=>{let{flavour:n="banner"}=e;const{onTrial:t,sidebarWarningVisible:a,bannerVisible:b,dismissBanner:g,type:k,canUpgrade:U,planIsFreeOrEarlyBird:D}=(0,m.A)(),{coupon:_}=(0,E.A)(),v=parseFloat((h-A)/1e3/60/60/24),B=_||(v>0?"50SURPRISE":""),C=(0,I.A)(B),P=!t&&D,[R,T]=(0,f.A)();return(0,r.useEffect)((()=>{const e=localStorage.getItem("dismissSidebarBanner");e&&(0,l.f)((0,d.H)(e))&&!(0,i.R)((0,d.H)(e))||T(!0)}),[]),"sidebar"==n&&(a||P)?R?r.createElement(y.bg,{type:t?k:"default",column:!0,gap:2,margin:[2],padding:[2]},r.createElement(o.Box,{"data-testid":"close-button",as:o.Icon,color:"text",cursor:"pointer",name:"x",position:"absolute",right:"2px",top:"2px",width:"12px",height:"12px",onClick:()=>{T(),localStorage.setItem("dismissSidebarBanner",(0,s.W)(new Date,{days:3}).toISOString())}}),r.createElement(x.A,{flavour:t?n:"freePlanUpgrade",couponRemainingDays:v,expDate:h,onUpdateClick:C}),r.createElement(S,{feature:"UpgradeToBusiness",isStart:!0,small:!0,"data-testid":"upgradeToBusiness-sidebar",label:v>0?"Upgrade now!":"Upgrade",canUpgrade:U,onClick:C}),v>0&&(0,l.f)(h)&&r.createElement(o.TextNano,{strong:!0,textAlign:"center",lineHeight:.8},"Expires"," ",v<1?"".concat((0,c.m)(h,{addSuffix:!0})," ").concat((0,p.GP)(h,"'at' p")):"on ".concat((0,p.GP)(h,"do 'of' MMM', at' p")))):null:b?r.createElement(u.A,{testId:"trial-banner",width:"100%",background:w.ue[k],onClose:g,tooltipProps:{align:"top"},zIndex:20},r.createElement(x.A,{flavour:n,onUpdateClick:C})):null}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/8637.0958494526e838a60d2b.js b/src/web/gui/v2/8637.0958494526e838a60d2b.js deleted file mode 100644 index 21e04559c..000000000 --- a/src/web/gui/v2/8637.0958494526e838a60d2b.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! For license information please see 8637.0958494526e838a60d2b.js.LICENSE.txt */ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="86db4fec-f805-4ffb-a487-2a38cbe66c27",e._sentryDebugIdIdentifier="sentry-dbid-86db4fec-f805-4ffb-a487-2a38cbe66c27")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8637],{43375:(e,t,n)=>{"use strict";n.d(t,{Mp:()=>Ye,Hd:()=>pt,vL:()=>ne,Pf:()=>xe,cA:()=>ge,IG:()=>ye,fp:()=>E,WB:()=>lt,Sj:()=>D,TT:()=>T,Qo:()=>R,fF:()=>Je,E5:()=>c,PM:()=>Qe,zM:()=>nt,MS:()=>v,FR:()=>y});var r=n(96540),o=n(40961),i=n(74979);const a={display:"none"};function s(e){let{id:t,value:n}=e;return r.createElement("div",{id:t,style:a},n)}function u(e){let{id:t,announcement:n,ariaLiveType:o="assertive"}=e;return r.createElement("div",{id:t,style:{position:"fixed",width:1,height:1,margin:-1,border:0,padding:0,overflow:"hidden",clip:"rect(0 0 0 0)",clipPath:"inset(100%)",whiteSpace:"nowrap"},role:"status","aria-live":o,"aria-atomic":!0},n)}const l=(0,r.createContext)(null);function c(e){const t=(0,r.useContext)(l);(0,r.useEffect)((()=>{if(!t)throw new Error("useDndMonitor must be used within a children of ");return t(e)}),[e,t])}const f={draggable:"\n To pick up a draggable item, press the space bar.\n While dragging, use the arrow keys to move the item.\n Press space again to drop the item in its new position, or press escape to cancel.\n "},d={onDragStart(e){let{active:t}=e;return"Picked up draggable item "+t.id+"."},onDragOver(e){let{active:t,over:n}=e;return n?"Draggable item "+t.id+" was moved over droppable area "+n.id+".":"Draggable item "+t.id+" is no longer over a droppable area."},onDragEnd(e){let{active:t,over:n}=e;return n?"Draggable item "+t.id+" was dropped over droppable area "+n.id:"Draggable item "+t.id+" was dropped."},onDragCancel(e){let{active:t}=e;return"Dragging was cancelled. Draggable item "+t.id+" was dropped."}};function h(e){let{announcements:t=d,container:n,hiddenTextDescribedById:a,screenReaderInstructions:l=f}=e;const{announce:h,announcement:p}=function(){const[e,t]=(0,r.useState)("");return{announce:(0,r.useCallback)((e=>{null!=e&&t(e)}),[]),announcement:e}}(),g=(0,i.YG)("DndLiveRegion"),[v,y]=(0,r.useState)(!1);if((0,r.useEffect)((()=>{y(!0)}),[]),c((0,r.useMemo)((()=>({onDragStart(e){let{active:n}=e;h(t.onDragStart({active:n}))},onDragMove(e){let{active:n,over:r}=e;t.onDragMove&&h(t.onDragMove({active:n,over:r}))},onDragOver(e){let{active:n,over:r}=e;h(t.onDragOver({active:n,over:r}))},onDragEnd(e){let{active:n,over:r}=e;h(t.onDragEnd({active:n,over:r}))},onDragCancel(e){let{active:n,over:r}=e;h(t.onDragCancel({active:n,over:r}))}})),[h,t])),!v)return null;const m=r.createElement(r.Fragment,null,r.createElement(s,{id:a,value:l.draggable}),r.createElement(u,{id:g,announcement:p}));return n?(0,o.createPortal)(m,n):m}var p;function g(){}function v(e,t){return(0,r.useMemo)((()=>({sensor:e,options:null!=t?t:{}})),[e,t])}function y(){for(var e=arguments.length,t=new Array(e),n=0;n[...t].filter((e=>null!=e))),[...t])}!function(e){e.DragStart="dragStart",e.DragMove="dragMove",e.DragEnd="dragEnd",e.DragCancel="dragCancel",e.DragOver="dragOver",e.RegisterDroppable="registerDroppable",e.SetDroppableDisabled="setDroppableDisabled",e.UnregisterDroppable="unregisterDroppable"}(p||(p={}));const m=Object.freeze({x:0,y:0});function b(e,t){return Math.sqrt(Math.pow(e.x-t.x,2)+Math.pow(e.y-t.y,2))}function _(e,t){const n=(0,i.e_)(e);if(!n)return"0 0";return(n.x-t.left)/t.width*100+"% "+(n.y-t.top)/t.height*100+"%"}function w(e,t){let{data:{value:n}}=e,{data:{value:r}}=t;return n-r}function x(e,t){let{data:{value:n}}=e,{data:{value:r}}=t;return r-n}function S(e){let{left:t,top:n,height:r,width:o}=e;return[{x:t,y:n},{x:t+o,y:n},{x:t,y:n+r},{x:t+o,y:n+r}]}function A(e,t,n){return void 0===t&&(t=e.left),void 0===n&&(n=e.top),{x:t+.5*e.width,y:n+.5*e.height}}const E=e=>{let{collisionRect:t,droppableRects:n,droppableContainers:r}=e;const o=A(t,t.left,t.top),i=[];for(const a of r){const{id:e}=a,t=n.get(e);if(t){const n=b(A(t),o);i.push({id:e,data:{droppableContainer:a,value:n}})}}return i.sort(w)};function C(e,t){const n=Math.max(t.top,e.top),r=Math.max(t.left,e.left),o=Math.min(t.left+t.width,e.left+e.width),i=Math.min(t.top+t.height,e.top+e.height),a=o-r,s=i-n;if(r{let{collisionRect:t,droppableRects:n,droppableContainers:r}=e;const o=[];for(const i of r){const{id:e}=i,r=n.get(e);if(r){const n=C(r,t);n>0&&o.push({id:e,data:{droppableContainer:i,value:n}})}}return o.sort(x)};function O(e,t){const{top:n,left:r,bottom:o,right:i}=t;return n<=e.y&&e.y<=o&&r<=e.x&&e.x<=i}const T=e=>{let{droppableContainers:t,droppableRects:n,pointerCoordinates:r}=e;if(!r)return[];const o=[];for(const i of t){const{id:e}=i,t=n.get(e);if(t&&O(r,t)){const n=S(t).reduce(((e,t)=>e+b(r,t)),0),a=Number((n/4).toFixed(4));o.push({id:e,data:{droppableContainer:i,value:a}})}}return o.sort(w)};function k(e,t){return e&&t?{x:e.left-t.left,y:e.top-t.top}:m}function M(e){return function(t){for(var n=arguments.length,r=new Array(n>1?n-1:0),o=1;o({...t,top:t.top+e*n.y,bottom:t.bottom+e*n.y,left:t.left+e*n.x,right:t.right+e*n.x})),{...t})}}const P=M(1);function I(e){if(e.startsWith("matrix3d(")){const t=e.slice(9,-1).split(/, /);return{x:+t[12],y:+t[13],scaleX:+t[0],scaleY:+t[5]}}if(e.startsWith("matrix(")){const t=e.slice(7,-1).split(/, /);return{x:+t[4],y:+t[5],scaleX:+t[0],scaleY:+t[3]}}return null}const N={ignoreTransform:!1};function D(e,t){void 0===t&&(t=N);let n=e.getBoundingClientRect();if(t.ignoreTransform){const{transform:t,transformOrigin:r}=(0,i.zk)(e).getComputedStyle(e);t&&(n=function(e,t,n){const r=I(t);if(!r)return e;const{scaleX:o,scaleY:i,x:a,y:s}=r,u=e.left-a-(1-o)*parseFloat(n),l=e.top-s-(1-i)*parseFloat(n.slice(n.indexOf(" ")+1)),c=o?e.width/o:e.width,f=i?e.height/i:e.height;return{width:c,height:f,top:l,right:u+c,bottom:l+f,left:u}}(n,t,r))}const{top:r,left:o,width:a,height:s,bottom:u,right:l}=n;return{top:r,left:o,width:a,height:s,bottom:u,right:l}}function L(e){return D(e,{ignoreTransform:!0})}function F(e,t){const n=[];return e?function r(o){if(null!=t&&n.length>=t)return n;if(!o)return n;if((0,i.wz)(o)&&null!=o.scrollingElement&&!n.includes(o.scrollingElement))return n.push(o.scrollingElement),n;if(!(0,i.sb)(o)||(0,i.xZ)(o))return n;if(n.includes(o))return n;const a=(0,i.zk)(e).getComputedStyle(o);return o!==e&&function(e,t){void 0===t&&(t=(0,i.zk)(e).getComputedStyle(e));const n=/(auto|scroll|overlay)/;return["overflow","overflowX","overflowY"].some((e=>{const r=t[e];return"string"===typeof r&&n.test(r)}))}(o,a)&&n.push(o),function(e,t){return void 0===t&&(t=(0,i.zk)(e).getComputedStyle(e)),"fixed"===t.position}(o,a)?n:r(o.parentNode)}(e):n}function j(e){const[t]=F(e,1);return null!=t?t:null}function U(e){return i.Sw&&e?(0,i.l6)(e)?e:(0,i.Ll)(e)?(0,i.wz)(e)||e===(0,i.TW)(e).scrollingElement?window:(0,i.sb)(e)?e:null:null:null}function z(e){return(0,i.l6)(e)?e.scrollX:e.scrollLeft}function B(e){return(0,i.l6)(e)?e.scrollY:e.scrollTop}function V(e){return{x:z(e),y:B(e)}}var $;function H(e){return!(!i.Sw||!e)&&e===document.scrollingElement}function W(e){const t={x:0,y:0},n=H(e)?{height:window.innerHeight,width:window.innerWidth}:{height:e.clientHeight,width:e.clientWidth},r={x:e.scrollWidth-n.width,y:e.scrollHeight-n.height};return{isTop:e.scrollTop<=t.y,isLeft:e.scrollLeft<=t.x,isBottom:e.scrollTop>=r.y,isRight:e.scrollLeft>=r.x,maxScroll:r,minScroll:t}}!function(e){e[e.Forward=1]="Forward",e[e.Backward=-1]="Backward"}($||($={}));const q={x:.2,y:.2};function G(e,t,n,r,o){let{top:i,left:a,right:s,bottom:u}=n;void 0===r&&(r=10),void 0===o&&(o=q);const{isTop:l,isBottom:c,isLeft:f,isRight:d}=W(e),h={x:0,y:0},p={x:0,y:0},g=t.height*o.y,v=t.width*o.x;return!l&&i<=t.top+g?(h.y=$.Backward,p.y=r*Math.abs((t.top+g-i)/g)):!c&&u>=t.bottom-g&&(h.y=$.Forward,p.y=r*Math.abs((t.bottom-g-u)/g)),!d&&s>=t.right-v?(h.x=$.Forward,p.x=r*Math.abs((t.right-v-s)/v)):!f&&a<=t.left+v&&(h.x=$.Backward,p.x=r*Math.abs((t.left+v-a)/v)),{direction:h,speed:p}}function Y(e){if(e===document.scrollingElement){const{innerWidth:e,innerHeight:t}=window;return{top:0,left:0,right:e,bottom:t,width:e,height:t}}const{top:t,left:n,right:r,bottom:o}=e.getBoundingClientRect();return{top:t,left:n,right:r,bottom:o,width:e.clientWidth,height:e.clientHeight}}function X(e){return e.reduce(((e,t)=>(0,i.WQ)(e,V(t))),m)}function K(e,t){if(void 0===t&&(t=D),!e)return;const{top:n,left:r,bottom:o,right:i}=t(e);j(e)&&(o<=0||i<=0||n>=window.innerHeight||r>=window.innerWidth)&&e.scrollIntoView({block:"center",inline:"center"})}const Z=[["x",["left","right"],function(e){return e.reduce(((e,t)=>e+z(t)),0)}],["y",["top","bottom"],function(e){return e.reduce(((e,t)=>e+B(t)),0)}]];class Q{constructor(e,t){this.rect=void 0,this.width=void 0,this.height=void 0,this.top=void 0,this.bottom=void 0,this.right=void 0,this.left=void 0;const n=F(t),r=X(n);this.rect={...e},this.width=e.width,this.height=e.height;for(const[o,i,a]of Z)for(const e of i)Object.defineProperty(this,e,{get:()=>{const t=a(n),i=r[o]-t;return this.rect[e]+i},enumerable:!0});Object.defineProperty(this,"rect",{enumerable:!1})}}class J{constructor(e){this.target=void 0,this.listeners=[],this.removeAll=()=>{this.listeners.forEach((e=>{var t;return null==(t=this.target)?void 0:t.removeEventListener(...e)}))},this.target=e}add(e,t,n){var r;null==(r=this.target)||r.addEventListener(e,t,n),this.listeners.push([e,t,n])}}function ee(e,t){const n=Math.abs(e.x),r=Math.abs(e.y);return"number"===typeof t?Math.sqrt(n**2+r**2)>t:"x"in t&&"y"in t?n>t.x&&r>t.y:"x"in t?n>t.x:"y"in t&&r>t.y}var te,ne;function re(e){e.preventDefault()}function oe(e){e.stopPropagation()}!function(e){e.Click="click",e.DragStart="dragstart",e.Keydown="keydown",e.ContextMenu="contextmenu",e.Resize="resize",e.SelectionChange="selectionchange",e.VisibilityChange="visibilitychange"}(te||(te={})),function(e){e.Space="Space",e.Down="ArrowDown",e.Right="ArrowRight",e.Left="ArrowLeft",e.Up="ArrowUp",e.Esc="Escape",e.Enter="Enter"}(ne||(ne={}));const ie={start:[ne.Space,ne.Enter],cancel:[ne.Esc],end:[ne.Space,ne.Enter]},ae=(e,t)=>{let{currentCoordinates:n}=t;switch(e.code){case ne.Right:return{...n,x:n.x+25};case ne.Left:return{...n,x:n.x-25};case ne.Down:return{...n,y:n.y+25};case ne.Up:return{...n,y:n.y-25}}};class se{constructor(e){this.props=void 0,this.autoScrollEnabled=!1,this.referenceCoordinates=void 0,this.listeners=void 0,this.windowListeners=void 0,this.props=e;const{event:{target:t}}=e;this.props=e,this.listeners=new J((0,i.TW)(t)),this.windowListeners=new J((0,i.zk)(t)),this.handleKeyDown=this.handleKeyDown.bind(this),this.handleCancel=this.handleCancel.bind(this),this.attach()}attach(){this.handleStart(),this.windowListeners.add(te.Resize,this.handleCancel),this.windowListeners.add(te.VisibilityChange,this.handleCancel),setTimeout((()=>this.listeners.add(te.Keydown,this.handleKeyDown)))}handleStart(){const{activeNode:e,onStart:t}=this.props,n=e.node.current;n&&K(n),t(m)}handleKeyDown(e){if((0,i.kx)(e)){const{active:t,context:n,options:r}=this.props,{keyboardCodes:o=ie,coordinateGetter:a=ae,scrollBehavior:s="smooth"}=r,{code:u}=e;if(o.end.includes(u))return void this.handleEnd(e);if(o.cancel.includes(u))return void this.handleCancel(e);const{collisionRect:l}=n.current,c=l?{x:l.left,y:l.top}:m;this.referenceCoordinates||(this.referenceCoordinates=c);const f=a(e,{active:t,context:n.current,currentCoordinates:c});if(f){const t=(0,i.Re)(f,c),r={x:0,y:0},{scrollableAncestors:o}=n.current;for(const n of o){const o=e.code,{isTop:i,isRight:a,isLeft:u,isBottom:l,maxScroll:c,minScroll:d}=W(n),h=Y(n),p={x:Math.min(o===ne.Right?h.right-h.width/2:h.right,Math.max(o===ne.Right?h.left:h.left+h.width/2,f.x)),y:Math.min(o===ne.Down?h.bottom-h.height/2:h.bottom,Math.max(o===ne.Down?h.top:h.top+h.height/2,f.y))},g=o===ne.Right&&!a||o===ne.Left&&!u,v=o===ne.Down&&!l||o===ne.Up&&!i;if(g&&p.x!==f.x){const e=n.scrollLeft+t.x,i=o===ne.Right&&e<=c.x||o===ne.Left&&e>=d.x;if(i&&!t.y)return void n.scrollTo({left:e,behavior:s});r.x=i?n.scrollLeft-e:o===ne.Right?n.scrollLeft-c.x:n.scrollLeft-d.x,r.x&&n.scrollBy({left:-r.x,behavior:s});break}if(v&&p.y!==f.y){const e=n.scrollTop+t.y,i=o===ne.Down&&e<=c.y||o===ne.Up&&e>=d.y;if(i&&!t.x)return void n.scrollTo({top:e,behavior:s});r.y=i?n.scrollTop-e:o===ne.Down?n.scrollTop-c.y:n.scrollTop-d.y,r.y&&n.scrollBy({top:-r.y,behavior:s});break}}this.handleMove(e,(0,i.WQ)((0,i.Re)(f,this.referenceCoordinates),r))}}}handleMove(e,t){const{onMove:n}=this.props;e.preventDefault(),n(t)}handleEnd(e){const{onEnd:t}=this.props;e.preventDefault(),this.detach(),t()}handleCancel(e){const{onCancel:t}=this.props;e.preventDefault(),this.detach(),t()}detach(){this.listeners.removeAll(),this.windowListeners.removeAll()}}function ue(e){return Boolean(e&&"distance"in e)}function le(e){return Boolean(e&&"delay"in e)}se.activators=[{eventName:"onKeyDown",handler:(e,t,n)=>{let{keyboardCodes:r=ie,onActivation:o}=t,{active:i}=n;const{code:a}=e.nativeEvent;if(r.start.includes(a)){const t=i.activatorNode.current;return(!t||e.target===t)&&(e.preventDefault(),null==o||o({event:e.nativeEvent}),!0)}return!1}}];class ce{constructor(e,t,n){var r;void 0===n&&(n=function(e){const{EventTarget:t}=(0,i.zk)(e);return e instanceof t?e:(0,i.TW)(e)}(e.event.target)),this.props=void 0,this.events=void 0,this.autoScrollEnabled=!0,this.document=void 0,this.activated=!1,this.initialCoordinates=void 0,this.timeoutId=null,this.listeners=void 0,this.documentListeners=void 0,this.windowListeners=void 0,this.props=e,this.events=t;const{event:o}=e,{target:a}=o;this.props=e,this.events=t,this.document=(0,i.TW)(a),this.documentListeners=new J(this.document),this.listeners=new J(n),this.windowListeners=new J((0,i.zk)(a)),this.initialCoordinates=null!=(r=(0,i.e_)(o))?r:m,this.handleStart=this.handleStart.bind(this),this.handleMove=this.handleMove.bind(this),this.handleEnd=this.handleEnd.bind(this),this.handleCancel=this.handleCancel.bind(this),this.handleKeydown=this.handleKeydown.bind(this),this.removeTextSelection=this.removeTextSelection.bind(this),this.attach()}attach(){const{events:e,props:{options:{activationConstraint:t,bypassActivationConstraint:n}}}=this;if(this.listeners.add(e.move.name,this.handleMove,{passive:!1}),this.listeners.add(e.end.name,this.handleEnd),this.windowListeners.add(te.Resize,this.handleCancel),this.windowListeners.add(te.DragStart,re),this.windowListeners.add(te.VisibilityChange,this.handleCancel),this.windowListeners.add(te.ContextMenu,re),this.documentListeners.add(te.Keydown,this.handleKeydown),t){if(null!=n&&n({event:this.props.event,activeNode:this.props.activeNode,options:this.props.options}))return this.handleStart();if(le(t))return void(this.timeoutId=setTimeout(this.handleStart,t.delay));if(ue(t))return}this.handleStart()}detach(){this.listeners.removeAll(),this.windowListeners.removeAll(),setTimeout(this.documentListeners.removeAll,50),null!==this.timeoutId&&(clearTimeout(this.timeoutId),this.timeoutId=null)}handleStart(){const{initialCoordinates:e}=this,{onStart:t}=this.props;e&&(this.activated=!0,this.documentListeners.add(te.Click,oe,{capture:!0}),this.removeTextSelection(),this.documentListeners.add(te.SelectionChange,this.removeTextSelection),t(e))}handleMove(e){var t;const{activated:n,initialCoordinates:r,props:o}=this,{onMove:a,options:{activationConstraint:s}}=o;if(!r)return;const u=null!=(t=(0,i.e_)(e))?t:m,l=(0,i.Re)(r,u);if(!n&&s){if(ue(s)){if(null!=s.tolerance&&ee(l,s.tolerance))return this.handleCancel();if(ee(l,s.distance))return this.handleStart()}return le(s)&&ee(l,s.tolerance)?this.handleCancel():void 0}e.cancelable&&e.preventDefault(),a(u)}handleEnd(){const{onEnd:e}=this.props;this.detach(),e()}handleCancel(){const{onCancel:e}=this.props;this.detach(),e()}handleKeydown(e){e.code===ne.Esc&&this.handleCancel()}removeTextSelection(){var e;null==(e=this.document.getSelection())||e.removeAllRanges()}}const fe={move:{name:"pointermove"},end:{name:"pointerup"}};class de extends ce{constructor(e){const{event:t}=e,n=(0,i.TW)(t.target);super(e,fe,n)}}de.activators=[{eventName:"onPointerDown",handler:(e,t)=>{let{nativeEvent:n}=e,{onActivation:r}=t;return!(!n.isPrimary||0!==n.button)&&(null==r||r({event:n}),!0)}}];const he={move:{name:"mousemove"},end:{name:"mouseup"}};var pe;!function(e){e[e.RightClick=2]="RightClick"}(pe||(pe={}));class ge extends ce{constructor(e){super(e,he,(0,i.TW)(e.event.target))}}ge.activators=[{eventName:"onMouseDown",handler:(e,t)=>{let{nativeEvent:n}=e,{onActivation:r}=t;return n.button!==pe.RightClick&&(null==r||r({event:n}),!0)}}];const ve={move:{name:"touchmove"},end:{name:"touchend"}};class ye extends ce{constructor(e){super(e,ve)}static setup(){return window.addEventListener(ve.move.name,e,{capture:!1,passive:!1}),function(){window.removeEventListener(ve.move.name,e)};function e(){}}}var me,be;function _e(e){let{acceleration:t,activator:n=me.Pointer,canScroll:o,draggingRect:a,enabled:s,interval:u=5,order:l=be.TreeOrder,pointerCoordinates:c,scrollableAncestors:f,scrollableAncestorRects:d,delta:h,threshold:p}=e;const g=function(e){let{delta:t,disabled:n}=e;const r=(0,i.ZC)(t);return(0,i.KG)((e=>{if(n||!r||!e)return we;const o={x:Math.sign(t.x-r.x),y:Math.sign(t.y-r.y)};return{x:{[$.Backward]:e.x[$.Backward]||-1===o.x,[$.Forward]:e.x[$.Forward]||1===o.x},y:{[$.Backward]:e.y[$.Backward]||-1===o.y,[$.Forward]:e.y[$.Forward]||1===o.y}}}),[n,t,r])}({delta:h,disabled:!s}),[v,y]=(0,i.$$)(),m=(0,r.useRef)({x:0,y:0}),b=(0,r.useRef)({x:0,y:0}),_=(0,r.useMemo)((()=>{switch(n){case me.Pointer:return c?{top:c.y,bottom:c.y,left:c.x,right:c.x}:null;case me.DraggableRect:return a}}),[n,a,c]),w=(0,r.useRef)(null),x=(0,r.useCallback)((()=>{const e=w.current;if(!e)return;const t=m.current.x*b.current.x,n=m.current.y*b.current.y;e.scrollBy(t,n)}),[]),S=(0,r.useMemo)((()=>l===be.TreeOrder?[...f].reverse():f),[l,f]);(0,r.useEffect)((()=>{if(s&&f.length&&_){for(const e of S){if(!1===(null==o?void 0:o(e)))continue;const n=f.indexOf(e),r=d[n];if(!r)continue;const{direction:i,speed:a}=G(e,r,_,t,p);for(const e of["x","y"])g[e][i[e]]||(a[e]=0,i[e]=0);if(a.x>0||a.y>0)return y(),w.current=e,v(x,u),m.current=a,void(b.current=i)}m.current={x:0,y:0},b.current={x:0,y:0},y()}else y()}),[t,x,o,y,s,u,JSON.stringify(_),JSON.stringify(g),v,f,S,d,JSON.stringify(p)])}ye.activators=[{eventName:"onTouchStart",handler:(e,t)=>{let{nativeEvent:n}=e,{onActivation:r}=t;const{touches:o}=n;return!(o.length>1)&&(null==r||r({event:n}),!0)}}],function(e){e[e.Pointer=0]="Pointer",e[e.DraggableRect=1]="DraggableRect"}(me||(me={})),function(e){e[e.TreeOrder=0]="TreeOrder",e[e.ReversedTreeOrder=1]="ReversedTreeOrder"}(be||(be={}));const we={x:{[$.Backward]:!1,[$.Forward]:!1},y:{[$.Backward]:!1,[$.Forward]:!1}};var xe,Se;!function(e){e[e.Always=0]="Always",e[e.BeforeDragging=1]="BeforeDragging",e[e.WhileDragging=2]="WhileDragging"}(xe||(xe={})),function(e){e.Optimized="optimized"}(Se||(Se={}));const Ae=new Map;function Ee(e,t){return(0,i.KG)((n=>e?n||("function"===typeof t?t(e):e):null),[t,e])}function Ce(e){let{callback:t,disabled:n}=e;const o=(0,i._q)(t),a=(0,r.useMemo)((()=>{if(n||"undefined"===typeof window||"undefined"===typeof window.ResizeObserver)return;const{ResizeObserver:e}=window;return new e(o)}),[n]);return(0,r.useEffect)((()=>()=>null==a?void 0:a.disconnect()),[a]),a}function Re(e){return new Q(D(e),e)}function Oe(e,t,n){void 0===t&&(t=Re);const[o,a]=(0,r.useReducer)((function(r){if(!e)return null;var o;if(!1===e.isConnected)return null!=(o=null!=r?r:n)?o:null;const i=t(e);if(JSON.stringify(r)===JSON.stringify(i))return r;return i}),null),s=function(e){let{callback:t,disabled:n}=e;const o=(0,i._q)(t),a=(0,r.useMemo)((()=>{if(n||"undefined"===typeof window||"undefined"===typeof window.MutationObserver)return;const{MutationObserver:e}=window;return new e(o)}),[o,n]);return(0,r.useEffect)((()=>()=>null==a?void 0:a.disconnect()),[a]),a}({callback(t){if(e)for(const n of t){const{type:t,target:r}=n;if("childList"===t&&r instanceof HTMLElement&&r.contains(e)){a();break}}}}),u=Ce({callback:a});return(0,i.Es)((()=>{a(),e?(null==u||u.observe(e),null==s||s.observe(document.body,{childList:!0,subtree:!0})):(null==u||u.disconnect(),null==s||s.disconnect())}),[e]),o}const Te=[];function ke(e,t){void 0===t&&(t=[]);const n=(0,r.useRef)(null);return(0,r.useEffect)((()=>{n.current=null}),t),(0,r.useEffect)((()=>{const t=e!==m;t&&!n.current&&(n.current=e),!t&&n.current&&(n.current=null)}),[e]),n.current?(0,i.Re)(e,n.current):m}function Me(e){return(0,r.useMemo)((()=>e?function(e){const t=e.innerWidth,n=e.innerHeight;return{top:0,left:0,right:t,bottom:n,width:t,height:n}}(e):null),[e])}const Pe=[];function Ie(e){if(!e)return null;if(e.children.length>1)return e;const t=e.children[0];return(0,i.sb)(t)?t:e}const Ne=[{sensor:de,options:{}},{sensor:se,options:{}}],De={current:{}},Le={draggable:{measure:L},droppable:{measure:L,strategy:xe.WhileDragging,frequency:Se.Optimized},dragOverlay:{measure:D}};class Fe extends Map{get(e){var t;return null!=e&&null!=(t=super.get(e))?t:void 0}toArray(){return Array.from(this.values())}getEnabled(){return this.toArray().filter((e=>{let{disabled:t}=e;return!t}))}getNodeFor(e){var t,n;return null!=(t=null==(n=this.get(e))?void 0:n.node.current)?t:void 0}}const je={activatorEvent:null,active:null,activeNode:null,activeNodeRect:null,collisions:null,containerNodeRect:null,draggableNodes:new Map,droppableRects:new Map,droppableContainers:new Fe,over:null,dragOverlay:{nodeRef:{current:null},rect:null,setRef:g},scrollableAncestors:[],scrollableAncestorRects:[],measuringConfiguration:Le,measureDroppableContainers:g,windowRect:null,measuringScheduled:!1},Ue={activatorEvent:null,activators:[],active:null,activeNodeRect:null,ariaDescribedById:{draggable:""},dispatch:g,draggableNodes:new Map,over:null,measureDroppableContainers:g},ze=(0,r.createContext)(Ue),Be=(0,r.createContext)(je);function Ve(){return{draggable:{active:null,initialCoordinates:{x:0,y:0},nodes:new Map,translate:{x:0,y:0}},droppable:{containers:new Fe}}}function $e(e,t){switch(t.type){case p.DragStart:return{...e,draggable:{...e.draggable,initialCoordinates:t.initialCoordinates,active:t.active}};case p.DragMove:return e.draggable.active?{...e,draggable:{...e.draggable,translate:{x:t.coordinates.x-e.draggable.initialCoordinates.x,y:t.coordinates.y-e.draggable.initialCoordinates.y}}}:e;case p.DragEnd:case p.DragCancel:return{...e,draggable:{...e.draggable,active:null,initialCoordinates:{x:0,y:0},translate:{x:0,y:0}}};case p.RegisterDroppable:{const{element:n}=t,{id:r}=n,o=new Fe(e.droppable.containers);return o.set(r,n),{...e,droppable:{...e.droppable,containers:o}}}case p.SetDroppableDisabled:{const{id:n,key:r,disabled:o}=t,i=e.droppable.containers.get(n);if(!i||r!==i.key)return e;const a=new Fe(e.droppable.containers);return a.set(n,{...i,disabled:o}),{...e,droppable:{...e.droppable,containers:a}}}case p.UnregisterDroppable:{const{id:n,key:r}=t,o=e.droppable.containers.get(n);if(!o||r!==o.key)return e;const i=new Fe(e.droppable.containers);return i.delete(n),{...e,droppable:{...e.droppable,containers:i}}}default:return e}}function He(e){let{disabled:t}=e;const{active:n,activatorEvent:o,draggableNodes:a}=(0,r.useContext)(ze),s=(0,i.ZC)(o),u=(0,i.ZC)(null==n?void 0:n.id);return(0,r.useEffect)((()=>{if(!t&&!o&&s&&null!=u){if(!(0,i.kx)(s))return;if(document.activeElement===s.target)return;const e=a.get(u);if(!e)return;const{activatorNode:t,node:n}=e;if(!t.current&&!n.current)return;requestAnimationFrame((()=>{for(const e of[t.current,n.current]){if(!e)continue;const t=(0,i.ag)(e);if(t){t.focus();break}}}))}}),[o,t,a,u,s]),null}function We(e,t){let{transform:n,...r}=t;return null!=e&&e.length?e.reduce(((e,t)=>t({transform:e,...r})),n):n}const qe=(0,r.createContext)({...m,scaleX:1,scaleY:1});var Ge;!function(e){e[e.Uninitialized=0]="Uninitialized",e[e.Initializing=1]="Initializing",e[e.Initialized=2]="Initialized"}(Ge||(Ge={}));const Ye=(0,r.memo)((function(e){var t,n,a,s;let{id:u,accessibility:c,autoScroll:f=!0,children:d,sensors:g=Ne,collisionDetection:v=R,measuring:y,modifiers:b,..._}=e;const w=(0,r.useReducer)($e,void 0,Ve),[x,S]=w,[A,E]=function(){const[e]=(0,r.useState)((()=>new Set)),t=(0,r.useCallback)((t=>(e.add(t),()=>e.delete(t))),[e]);return[(0,r.useCallback)((t=>{let{type:n,event:r}=t;e.forEach((e=>{var t;return null==(t=e[n])?void 0:t.call(e,r)}))}),[e]),t]}(),[C,O]=(0,r.useState)(Ge.Uninitialized),T=C===Ge.Initialized,{draggable:{active:M,nodes:I,translate:N},droppable:{containers:L}}=x,z=M?I.get(M):null,B=(0,r.useRef)({initial:null,translated:null}),$=(0,r.useMemo)((()=>{var e;return null!=M?{id:M,data:null!=(e=null==z?void 0:z.data)?e:De,rect:B}:null}),[M,z]),W=(0,r.useRef)(null),[q,G]=(0,r.useState)(null),[Y,K]=(0,r.useState)(null),Z=(0,i.YN)(_,Object.values(_)),J=(0,i.YG)("DndDescribedBy",u),ee=(0,r.useMemo)((()=>L.getEnabled()),[L]),te=(ne=y,(0,r.useMemo)((()=>({draggable:{...Le.draggable,...null==ne?void 0:ne.draggable},droppable:{...Le.droppable,...null==ne?void 0:ne.droppable},dragOverlay:{...Le.dragOverlay,...null==ne?void 0:ne.dragOverlay}})),[null==ne?void 0:ne.draggable,null==ne?void 0:ne.droppable,null==ne?void 0:ne.dragOverlay]));var ne;const{droppableRects:re,measureDroppableContainers:oe,measuringScheduled:ie}=function(e,t){let{dragging:n,dependencies:o,config:a}=t;const[s,u]=(0,r.useState)(null),{frequency:l,measure:c,strategy:f}=a,d=(0,r.useRef)(e),h=function(){switch(f){case xe.Always:return!1;case xe.BeforeDragging:return n;default:return!n}}(),p=(0,i.YN)(h),g=(0,r.useCallback)((function(e){void 0===e&&(e=[]),p.current||u((t=>null===t?e:t.concat(e.filter((e=>!t.includes(e))))))}),[p]),v=(0,r.useRef)(null),y=(0,i.KG)((t=>{if(h&&!n)return Ae;if(!t||t===Ae||d.current!==e||null!=s){const t=new Map;for(let n of e){if(!n)continue;if(s&&s.length>0&&!s.includes(n.id)&&n.rect.current){t.set(n.id,n.rect.current);continue}const e=n.node.current,r=e?new Q(c(e),e):null;n.rect.current=r,r&&t.set(n.id,r)}return t}return t}),[e,s,n,h,c]);return(0,r.useEffect)((()=>{d.current=e}),[e]),(0,r.useEffect)((()=>{h||g()}),[n,h]),(0,r.useEffect)((()=>{s&&s.length>0&&u(null)}),[JSON.stringify(s)]),(0,r.useEffect)((()=>{h||"number"!==typeof l||null!==v.current||(v.current=setTimeout((()=>{g(),v.current=null}),l))}),[l,h,g,...o]),{droppableRects:y,measureDroppableContainers:g,measuringScheduled:null!=s}}(ee,{dragging:T,dependencies:[N.x,N.y],config:te.droppable}),ae=function(e,t){const n=null!==t?e.get(t):void 0,r=n?n.node.current:null;return(0,i.KG)((e=>{var n;return null===t?null:null!=(n=null!=r?r:e)?n:null}),[r,t])}(I,M),se=(0,r.useMemo)((()=>Y?(0,i.e_)(Y):null),[Y]),ue=function(){const e=!1===(null==q?void 0:q.autoScrollEnabled),t="object"===typeof f?!1===f.enabled:!1===f,n=T&&!e&&!t;if("object"===typeof f)return{...f,enabled:n};return{enabled:n}}(),le=function(e,t){return Ee(e,t)}(ae,te.draggable.measure);!function(e){let{activeNode:t,measure:n,initialRect:o,config:a=!0}=e;const s=(0,r.useRef)(!1),{x:u,y:l}="boolean"===typeof a?{x:a,y:a}:a;(0,i.Es)((()=>{if(!u&&!l||!t)return void(s.current=!1);if(s.current||!o)return;const e=null==t?void 0:t.node.current;if(!e||!1===e.isConnected)return;const r=k(n(e),o);if(u||(r.x=0),l||(r.y=0),s.current=!0,Math.abs(r.x)>0||Math.abs(r.y)>0){const t=j(e);t&&t.scrollBy({top:r.y,left:r.x})}}),[t,u,l,o,n])}({activeNode:M?I.get(M):null,config:ue.layoutShiftCompensation,initialRect:le,measure:te.draggable.measure});const ce=Oe(ae,te.draggable.measure,le),fe=Oe(ae?ae.parentElement:null),de=(0,r.useRef)({activatorEvent:null,active:null,activeNode:ae,collisionRect:null,collisions:null,droppableRects:re,draggableNodes:I,draggingNode:null,draggingNodeRect:null,droppableContainers:L,over:null,scrollableAncestors:[],scrollAdjustedTranslate:null}),he=L.getNodeFor(null==(t=de.current.over)?void 0:t.id),pe=function(e){let{measure:t}=e;const[n,o]=(0,r.useState)(null),a=Ce({callback:(0,r.useCallback)((e=>{for(const{target:n}of e)if((0,i.sb)(n)){o((e=>{const r=t(n);return e?{...e,width:r.width,height:r.height}:r}));break}}),[t])}),s=(0,r.useCallback)((e=>{const n=Ie(e);null==a||a.disconnect(),n&&(null==a||a.observe(n)),o(n?t(n):null)}),[t,a]),[u,l]=(0,i.lk)(s);return(0,r.useMemo)((()=>({nodeRef:u,rect:n,setRef:l})),[n,u,l])}({measure:te.dragOverlay.measure}),ge=null!=(n=pe.nodeRef.current)?n:ae,ve=T?null!=(a=pe.rect)?a:ce:null,ye=Boolean(pe.nodeRef.current&&pe.rect),me=k(be=ye?null:ce,Ee(be));var be;const we=Me(ge?(0,i.zk)(ge):null),Se=function(e){const t=(0,r.useRef)(e),n=(0,i.KG)((n=>e?n&&n!==Te&&e&&t.current&&e.parentNode===t.current.parentNode?n:F(e):Te),[e]);return(0,r.useEffect)((()=>{t.current=e}),[e]),n}(T?null!=he?he:ae:null),Re=function(e,t){void 0===t&&(t=D);const[n]=e,o=Me(n?(0,i.zk)(n):null),[a,s]=(0,r.useReducer)((function(){return e.length?e.map((e=>H(e)?o:new Q(t(e),e))):Pe}),Pe),u=Ce({callback:s});return e.length>0&&a===Pe&&s(),(0,i.Es)((()=>{e.length?e.forEach((e=>null==u?void 0:u.observe(e))):(null==u||u.disconnect(),s())}),[e]),a}(Se),Fe=We(b,{transform:{x:N.x-me.x,y:N.y-me.y,scaleX:1,scaleY:1},activatorEvent:Y,active:$,activeNodeRect:ce,containerNodeRect:fe,draggingNodeRect:ve,over:de.current.over,overlayNodeRect:pe.rect,scrollableAncestors:Se,scrollableAncestorRects:Re,windowRect:we}),je=se?(0,i.WQ)(se,N):null,Ue=function(e){const[t,n]=(0,r.useState)(null),o=(0,r.useRef)(e),a=(0,r.useCallback)((e=>{const t=U(e.target);t&&n((e=>e?(e.set(t,V(t)),new Map(e)):null))}),[]);return(0,r.useEffect)((()=>{const t=o.current;if(e!==t){r(t);const i=e.map((e=>{const t=U(e);return t?(t.addEventListener("scroll",a,{passive:!0}),[t,V(t)]):null})).filter((e=>null!=e));n(i.length?new Map(i):null),o.current=e}return()=>{r(e),r(t)};function r(e){e.forEach((e=>{const t=U(e);null==t||t.removeEventListener("scroll",a)}))}}),[a,e]),(0,r.useMemo)((()=>e.length?t?Array.from(t.values()).reduce(((e,t)=>(0,i.WQ)(e,t)),m):X(e):m),[e,t])}(Se),Ye=ke(Ue),Xe=ke(Ue,[ce]),Ke=(0,i.WQ)(Fe,Ye),Ze=ve?P(ve,Fe):null,Qe=$&&Ze?v({active:$,collisionRect:Ze,droppableRects:re,droppableContainers:ee,pointerCoordinates:je}):null,Je=function(e,t){if(!e||0===e.length)return null;const[n]=e;return t?n[t]:n}(Qe,"id"),[et,tt]=(0,r.useState)(null),nt=function(e,t,n){return{...e,scaleX:t&&n?t.width/n.width:1,scaleY:t&&n?t.height/n.height:1}}(ye?Fe:(0,i.WQ)(Fe,Xe),null!=(s=null==et?void 0:et.rect)?s:null,ce),rt=(0,r.useCallback)(((e,t)=>{let{sensor:n,options:r}=t;if(null==W.current)return;const i=I.get(W.current);if(!i)return;const a=e.nativeEvent,s=new n({active:W.current,activeNode:i,event:a,options:r,context:de,onStart(e){const t=W.current;if(null==t)return;const n=I.get(t);if(!n)return;const{onDragStart:r}=Z.current,i={active:{id:t,data:n.data,rect:B}};(0,o.unstable_batchedUpdates)((()=>{null==r||r(i),O(Ge.Initializing),S({type:p.DragStart,initialCoordinates:e,active:t}),A({type:"onDragStart",event:i})}))},onMove(e){S({type:p.DragMove,coordinates:e})},onEnd:u(p.DragEnd),onCancel:u(p.DragCancel)});function u(e){return async function(){const{active:t,collisions:n,over:r,scrollAdjustedTranslate:i}=de.current;let s=null;if(t&&i){const{cancelDrop:o}=Z.current;if(s={activatorEvent:a,active:t,collisions:n,delta:i,over:r},e===p.DragEnd&&"function"===typeof o){await Promise.resolve(o(s))&&(e=p.DragCancel)}}W.current=null,(0,o.unstable_batchedUpdates)((()=>{S({type:e}),O(Ge.Uninitialized),tt(null),G(null),K(null);const t=e===p.DragEnd?"onDragEnd":"onDragCancel";if(s){const e=Z.current[t];null==e||e(s),A({type:t,event:s})}}))}}(0,o.unstable_batchedUpdates)((()=>{G(s),K(e.nativeEvent)}))}),[I]),ot=(0,r.useCallback)(((e,t)=>(n,r)=>{const o=n.nativeEvent,i=I.get(r);if(null!==W.current||!i||o.dndKit||o.defaultPrevented)return;const a={active:i};!0===e(n,t.options,a)&&(o.dndKit={capturedBy:t.sensor},W.current=r,rt(n,t))}),[I,rt]),it=function(e,t){return(0,r.useMemo)((()=>e.reduce(((e,n)=>{const{sensor:r}=n;return[...e,...r.activators.map((e=>({eventName:e.eventName,handler:t(e.handler,n)})))]}),[])),[e,t])}(g,ot);!function(e){(0,r.useEffect)((()=>{if(!i.Sw)return;const t=e.map((e=>{let{sensor:t}=e;return null==t.setup?void 0:t.setup()}));return()=>{for(const e of t)null==e||e()}}),e.map((e=>{let{sensor:t}=e;return t})))}(g),(0,i.Es)((()=>{ce&&C===Ge.Initializing&&O(Ge.Initialized)}),[ce,C]),(0,r.useEffect)((()=>{const{onDragMove:e}=Z.current,{active:t,activatorEvent:n,collisions:r,over:i}=de.current;if(!t||!n)return;const a={active:t,activatorEvent:n,collisions:r,delta:{x:Ke.x,y:Ke.y},over:i};(0,o.unstable_batchedUpdates)((()=>{null==e||e(a),A({type:"onDragMove",event:a})}))}),[Ke.x,Ke.y]),(0,r.useEffect)((()=>{const{active:e,activatorEvent:t,collisions:n,droppableContainers:r,scrollAdjustedTranslate:i}=de.current;if(!e||null==W.current||!t||!i)return;const{onDragOver:a}=Z.current,s=r.get(Je),u=s&&s.rect.current?{id:s.id,rect:s.rect.current,data:s.data,disabled:s.disabled}:null,l={active:e,activatorEvent:t,collisions:n,delta:{x:i.x,y:i.y},over:u};(0,o.unstable_batchedUpdates)((()=>{tt(u),null==a||a(l),A({type:"onDragOver",event:l})}))}),[Je]),(0,i.Es)((()=>{de.current={activatorEvent:Y,active:$,activeNode:ae,collisionRect:Ze,collisions:Qe,droppableRects:re,draggableNodes:I,draggingNode:ge,draggingNodeRect:ve,droppableContainers:L,over:et,scrollableAncestors:Se,scrollAdjustedTranslate:Ke},B.current={initial:ve,translated:Ze}}),[$,ae,Qe,Ze,I,ge,ve,re,L,et,Se,Ke]),_e({...ue,delta:N,draggingRect:Ze,pointerCoordinates:je,scrollableAncestors:Se,scrollableAncestorRects:Re});const at=(0,r.useMemo)((()=>({active:$,activeNode:ae,activeNodeRect:ce,activatorEvent:Y,collisions:Qe,containerNodeRect:fe,dragOverlay:pe,draggableNodes:I,droppableContainers:L,droppableRects:re,over:et,measureDroppableContainers:oe,scrollableAncestors:Se,scrollableAncestorRects:Re,measuringConfiguration:te,measuringScheduled:ie,windowRect:we})),[$,ae,ce,Y,Qe,fe,pe,I,L,re,et,oe,Se,Re,te,ie,we]),st=(0,r.useMemo)((()=>({activatorEvent:Y,activators:it,active:$,activeNodeRect:ce,ariaDescribedById:{draggable:J},dispatch:S,draggableNodes:I,over:et,measureDroppableContainers:oe})),[Y,it,$,ce,S,J,I,et,oe]);return r.createElement(l.Provider,{value:E},r.createElement(ze.Provider,{value:st},r.createElement(Be.Provider,{value:at},r.createElement(qe.Provider,{value:nt},d)),r.createElement(He,{disabled:!1===(null==c?void 0:c.restoreFocus)})),r.createElement(h,{...c,hiddenTextDescribedById:J}))})),Xe=(0,r.createContext)(null),Ke="button",Ze="Droppable";function Qe(e){let{id:t,data:n,disabled:o=!1,attributes:a}=e;const s=(0,i.YG)(Ze),{activators:u,activatorEvent:l,active:c,activeNodeRect:f,ariaDescribedById:d,draggableNodes:h,over:p}=(0,r.useContext)(ze),{role:g=Ke,roleDescription:v="draggable",tabIndex:y=0}=null!=a?a:{},m=(null==c?void 0:c.id)===t,b=(0,r.useContext)(m?qe:Xe),[_,w]=(0,i.lk)(),[x,S]=(0,i.lk)(),A=function(e,t){return(0,r.useMemo)((()=>e.reduce(((e,n)=>{let{eventName:r,handler:o}=n;return e[r]=e=>{o(e,t)},e}),{})),[e,t])}(u,t),E=(0,i.YN)(n);(0,i.Es)((()=>(h.set(t,{id:t,key:s,node:_,activatorNode:x,data:E}),()=>{const e=h.get(t);e&&e.key===s&&h.delete(t)})),[h,t]);return{active:c,activatorEvent:l,activeNodeRect:f,attributes:(0,r.useMemo)((()=>({role:g,tabIndex:y,"aria-disabled":o,"aria-pressed":!(!m||g!==Ke)||void 0,"aria-roledescription":v,"aria-describedby":d.draggable})),[o,g,y,m,v,d.draggable]),isDragging:m,listeners:o?void 0:A,node:_,over:p,setNodeRef:w,setActivatorNodeRef:S,transform:b}}function Je(){return(0,r.useContext)(Be)}const et="Droppable",tt={timeout:25};function nt(e){let{data:t,disabled:n=!1,id:o,resizeObserverConfig:a}=e;const s=(0,i.YG)(et),{active:u,dispatch:l,over:c,measureDroppableContainers:f}=(0,r.useContext)(ze),d=(0,r.useRef)({disabled:n}),h=(0,r.useRef)(!1),g=(0,r.useRef)(null),v=(0,r.useRef)(null),{disabled:y,updateMeasurementsFor:m,timeout:b}={...tt,...a},_=(0,i.YN)(null!=m?m:o),w=Ce({callback:(0,r.useCallback)((()=>{h.current?(null!=v.current&&clearTimeout(v.current),v.current=setTimeout((()=>{f(Array.isArray(_.current)?_.current:[_.current]),v.current=null}),b)):h.current=!0}),[b]),disabled:y||!u}),x=(0,r.useCallback)(((e,t)=>{w&&(t&&(w.unobserve(t),h.current=!1),e&&w.observe(e))}),[w]),[S,A]=(0,i.lk)(x),E=(0,i.YN)(t);return(0,r.useEffect)((()=>{w&&S.current&&(w.disconnect(),h.current=!1,w.observe(S.current))}),[S,w]),(0,i.Es)((()=>(l({type:p.RegisterDroppable,element:{id:o,key:s,disabled:n,node:S,rect:g,data:E}}),()=>l({type:p.UnregisterDroppable,key:s,id:o}))),[o]),(0,r.useEffect)((()=>{n!==d.current.disabled&&(l({type:p.SetDroppableDisabled,id:o,key:s,disabled:n}),d.current.disabled=n)}),[o,s,n,l]),{active:u,rect:g,isOver:(null==c?void 0:c.id)===o,node:S,over:c,setNodeRef:A}}function rt(e){let{animation:t,children:n}=e;const[o,a]=(0,r.useState)(null),[s,u]=(0,r.useState)(null),l=(0,i.ZC)(n);return n||o||!l||a(l),(0,i.Es)((()=>{if(!s)return;const e=null==o?void 0:o.key,n=null==o?void 0:o.props.id;null!=e&&null!=n?Promise.resolve(t(n,s)).then((()=>{a(null)})):a(null)}),[t,o,s]),r.createElement(r.Fragment,null,n,o?(0,r.cloneElement)(o,{ref:u}):null)}const ot={x:0,y:0,scaleX:1,scaleY:1};function it(e){let{children:t}=e;return r.createElement(ze.Provider,{value:Ue},r.createElement(qe.Provider,{value:ot},t))}const at={position:"fixed",touchAction:"none"},st=e=>(0,i.kx)(e)?"transform 250ms ease":void 0,ut=(0,r.forwardRef)(((e,t)=>{let{as:n,activatorEvent:o,adjustScale:a,children:s,className:u,rect:l,style:c,transform:f,transition:d=st}=e;if(!l)return null;const h=a?f:{...f,scaleX:1,scaleY:1},p={...at,width:l.width,height:l.height,top:l.top,left:l.left,transform:i.Ks.Transform.toString(h),transformOrigin:a&&o?_(o,l):void 0,transition:"function"===typeof d?d(o):d,...c};return r.createElement(n,{className:u,style:p,ref:t},s)})),lt=e=>t=>{let{active:n,dragOverlay:r}=t;const o={},{styles:i,className:a}=e;if(null!=i&&i.active)for(const[e,s]of Object.entries(i.active))void 0!==s&&(o[e]=n.node.style.getPropertyValue(e),n.node.style.setProperty(e,s));if(null!=i&&i.dragOverlay)for(const[e,s]of Object.entries(i.dragOverlay))void 0!==s&&r.node.style.setProperty(e,s);return null!=a&&a.active&&n.node.classList.add(a.active),null!=a&&a.dragOverlay&&r.node.classList.add(a.dragOverlay),function(){for(const[e,t]of Object.entries(o))n.node.style.setProperty(e,t);null!=a&&a.active&&n.node.classList.remove(a.active)}},ct={duration:250,easing:"ease",keyframes:e=>{let{transform:{initial:t,final:n}}=e;return[{transform:i.Ks.Transform.toString(t)},{transform:i.Ks.Transform.toString(n)}]},sideEffects:lt({styles:{active:{opacity:"0"}}})};function ft(e){let{config:t,draggableNodes:n,droppableContainers:r,measuringConfiguration:o}=e;return(0,i._q)(((e,a)=>{if(null===t)return;const s=n.get(e);if(!s)return;const u=s.node.current;if(!u)return;const l=Ie(a);if(!l)return;const{transform:c}=(0,i.zk)(a).getComputedStyle(a),f=I(c);if(!f)return;const d="function"===typeof t?t:function(e){const{duration:t,easing:n,sideEffects:r,keyframes:o}={...ct,...e};return e=>{let{active:i,dragOverlay:a,transform:s,...u}=e;if(!t)return;const l={x:a.rect.left-i.rect.left,y:a.rect.top-i.rect.top},c={scaleX:1!==s.scaleX?i.rect.width*s.scaleX/a.rect.width:1,scaleY:1!==s.scaleY?i.rect.height*s.scaleY/a.rect.height:1},f={x:s.x-l.x,y:s.y-l.y,...c},d=o({...u,active:i,dragOverlay:a,transform:{initial:s,final:f}}),[h]=d,p=d[d.length-1];if(JSON.stringify(h)===JSON.stringify(p))return;const g=null==r?void 0:r({active:i,dragOverlay:a,...u}),v=a.node.animate(d,{duration:t,easing:n,fill:"forwards"});return new Promise((e=>{v.onfinish=()=>{null==g||g(),e()}}))}}(t);return K(u,o.draggable.measure),d({active:{id:e,data:s.data,node:u,rect:o.draggable.measure(u)},draggableNodes:n,dragOverlay:{node:a,rect:o.dragOverlay.measure(l)},droppableContainers:r,measuringConfiguration:o,transform:f})}))}let dt=0;function ht(e){return(0,r.useMemo)((()=>{if(null!=e)return dt++,dt}),[e])}const pt=r.memo((e=>{let{adjustScale:t=!1,children:n,dropAnimation:o,style:i,transition:a,modifiers:s,wrapperElement:u="div",className:l,zIndex:c=999}=e;const{activatorEvent:f,active:d,activeNodeRect:h,containerNodeRect:p,draggableNodes:g,droppableContainers:v,dragOverlay:y,over:m,measuringConfiguration:b,scrollableAncestors:_,scrollableAncestorRects:w,windowRect:x}=Je(),S=(0,r.useContext)(qe),A=ht(null==d?void 0:d.id),E=We(s,{activatorEvent:f,active:d,activeNodeRect:h,containerNodeRect:p,draggingNodeRect:y.rect,over:m,overlayNodeRect:y.rect,scrollableAncestors:_,scrollableAncestorRects:w,transform:S,windowRect:x}),C=Ee(h),R=ft({config:o,draggableNodes:g,droppableContainers:v,measuringConfiguration:b}),O=C?y.setRef:void 0;return r.createElement(it,null,r.createElement(rt,{animation:R},d&&A?r.createElement(ut,{key:A,id:d.id,ref:O,as:u,activatorEvent:f,adjustScale:t,className:l,transition:a,rect:C,style:{zIndex:c,...i},transform:E},n):null))}))},43627:(e,t,n)=>{"use strict";n.d(t,{_G:()=>h,be:()=>a,gB:()=>v,gl:()=>S,m$:()=>c,uU:()=>m});var r=n(96540),o=n(43375),i=n(74979);function a(e,t,n){const r=e.slice();return r.splice(n<0?r.length+n:n,0,r.splice(t,1)[0]),r}function s(e,t){return e.reduce(((e,n,r)=>{const o=t.get(n);return o&&(e[r]=o),e}),Array(e.length))}function u(e){return null!==e&&e>=0}const l={scaleX:1,scaleY:1},c=e=>{var t;let{rects:n,activeNodeRect:r,activeIndex:o,overIndex:i,index:a}=e;const s=null!=(t=n[o])?t:r;if(!s)return null;const u=function(e,t,n){const r=e[t],o=e[t-1],i=e[t+1];if(!r||!o&&!i)return 0;if(no&&a<=i?{x:-s.width-u,y:0,...l}:a=i?{x:s.width+u,y:0,...l}:{x:0,y:0,...l}};const f=e=>{let{rects:t,activeIndex:n,overIndex:r,index:o}=e;const i=a(t,r,n),s=t[o],u=i[o];return u&&s?{x:u.left-s.left,y:u.top-s.top,scaleX:u.width/s.width,scaleY:u.height/s.height}:null},d={scaleX:1,scaleY:1},h=e=>{var t;let{activeIndex:n,activeNodeRect:r,index:o,rects:i,overIndex:a}=e;const s=null!=(t=i[n])?t:r;if(!s)return null;if(o===n){const e=i[a];return e?{x:0,y:nn&&o<=a?{x:0,y:-s.height-u,...d}:o=a?{x:0,y:s.height+u,...d}:{x:0,y:0,...d}};const p="Sortable",g=r.createContext({activeIndex:-1,containerId:p,disableTransforms:!1,items:[],overIndex:-1,useDragOverlay:!1,sortedRects:[],strategy:f,disabled:{draggable:!1,droppable:!1}});function v(e){let{children:t,id:n,items:a,strategy:u=f,disabled:l=!1}=e;const{active:c,dragOverlay:d,droppableRects:h,over:v,measureDroppableContainers:y}=(0,o.fF)(),m=(0,i.YG)(p,n),b=Boolean(null!==d.rect),_=(0,r.useMemo)((()=>a.map((e=>"object"===typeof e&&"id"in e?e.id:e))),[a]),w=null!=c,x=c?_.indexOf(c.id):-1,S=v?_.indexOf(v.id):-1,A=(0,r.useRef)(_),E=!function(e,t){if(e===t)return!0;if(e.length!==t.length)return!1;for(let n=0;n{E&&w&&y(_)}),[E,_,w,y]),(0,r.useEffect)((()=>{A.current=_}),[_]);const O=(0,r.useMemo)((()=>({activeIndex:x,containerId:m,disabled:R,disableTransforms:C,items:_,overIndex:S,useDragOverlay:b,sortedRects:s(_,h),strategy:u})),[x,m,R.draggable,R.droppable,C,_,S,h,b,u]);return r.createElement(g.Provider,{value:O},t)}const y=e=>{let{id:t,items:n,activeIndex:r,overIndex:o}=e;return a(n,r,o).indexOf(t)},m=e=>{let{containerId:t,isSorting:n,wasDragging:r,index:o,items:i,newIndex:a,previousItems:s,previousContainerId:u,transition:l}=e;return!(!l||!r)&&((s===i||o!==a)&&(!!n||a!==o&&t===u))},b={duration:200,easing:"ease"},_="transform",w=i.Ks.Transition.toString({property:_,duration:0,easing:"linear"}),x={roleDescription:"sortable"};function S(e){let{animateLayoutChanges:t=m,attributes:n,disabled:a,data:s,getNewIndex:l=y,id:c,strategy:f,resizeObserverConfig:d,transition:h=b}=e;const{items:p,containerId:v,activeIndex:S,disabled:A,disableTransforms:E,sortedRects:C,overIndex:R,useDragOverlay:O,strategy:T}=(0,r.useContext)(g),k=function(e,t){var n,r;if("boolean"===typeof e)return{draggable:e,droppable:!1};return{draggable:null!=(n=null==e?void 0:e.draggable)?n:t.draggable,droppable:null!=(r=null==e?void 0:e.droppable)?r:t.droppable}}(a,A),M=p.indexOf(c),P=(0,r.useMemo)((()=>({sortable:{containerId:v,index:M,items:p},...s})),[v,s,M,p]),I=(0,r.useMemo)((()=>p.slice(p.indexOf(c))),[p,c]),{rect:N,node:D,isOver:L,setNodeRef:F}=(0,o.zM)({id:c,data:P,disabled:k.droppable,resizeObserverConfig:{updateMeasurementsFor:I,...d}}),{active:j,activatorEvent:U,activeNodeRect:z,attributes:B,setNodeRef:V,listeners:$,isDragging:H,over:W,setActivatorNodeRef:q,transform:G}=(0,o.PM)({id:c,data:P,attributes:{...x,...n},disabled:k.draggable}),Y=(0,i.jn)(F,V),X=Boolean(j),K=X&&!E&&u(S)&&u(R),Z=!O&&H,Q=Z&&K?G:null,J=K?null!=Q?Q:(null!=f?f:T)({rects:C,activeNodeRect:z,activeIndex:S,overIndex:R,index:M}):null,ee=u(S)&&u(R)?l({id:c,items:p,activeIndex:S,overIndex:R}):M,te=null==j?void 0:j.id,ne=(0,r.useRef)({activeId:te,items:p,newIndex:ee,containerId:v}),re=p!==ne.current.items,oe=t({active:j,containerId:v,isDragging:H,isSorting:X,id:c,index:M,items:p,newIndex:ne.current.newIndex,previousItems:ne.current.items,previousContainerId:ne.current.containerId,transition:h,wasDragging:null!=ne.current.activeId}),ie=function(e){let{disabled:t,index:n,node:a,rect:s}=e;const[u,l]=(0,r.useState)(null),c=(0,r.useRef)(n);return(0,i.Es)((()=>{if(!t&&n!==c.current&&a.current){const e=s.current;if(e){const t=(0,o.Sj)(a.current,{ignoreTransform:!0}),n={x:e.left-t.left,y:e.top-t.top,scaleX:e.width/t.width,scaleY:e.height/t.height};(n.x||n.y)&&l(n)}}n!==c.current&&(c.current=n)}),[t,n,a,s]),(0,r.useEffect)((()=>{u&&l(null)}),[u]),u}({disabled:!oe,index:M,node:D,rect:N});return(0,r.useEffect)((()=>{X&&ne.current.newIndex!==ee&&(ne.current.newIndex=ee),v!==ne.current.containerId&&(ne.current.containerId=v),p!==ne.current.items&&(ne.current.items=p)}),[X,ee,v,p]),(0,r.useEffect)((()=>{if(te===ne.current.activeId)return;if(te&&!ne.current.activeId)return void(ne.current.activeId=te);const e=setTimeout((()=>{ne.current.activeId=te}),50);return()=>clearTimeout(e)}),[te]),{active:j,activeIndex:S,attributes:B,data:P,rect:N,index:M,newIndex:ee,items:p,isOver:L,isSorting:X,isDragging:H,listeners:$,node:D,overIndex:R,over:W,setNodeRef:Y,setActivatorNodeRef:q,setDroppableNodeRef:F,setDraggableNodeRef:V,transform:null!=ie?ie:J,transition:function(){if(ie||re&&ne.current.newIndex===M)return w;if(Z&&!(0,i.kx)(U)||!h)return;if(X||oe)return i.Ks.Transition.toString({...h,property:_});return}()}}o.vL.Down,o.vL.Right,o.vL.Up,o.vL.Left},74979:(e,t,n)=>{"use strict";n.d(t,{$$:()=>g,Es:()=>h,KG:()=>y,Ks:()=>R,Ll:()=>s,Re:()=>A,Sw:()=>i,TW:()=>d,WQ:()=>S,YG:()=>w,YN:()=>v,ZC:()=>b,_q:()=>p,ag:()=>T,e_:()=>C,jn:()=>o,kx:()=>E,l6:()=>a,lk:()=>m,sb:()=>c,wz:()=>l,xZ:()=>f,zk:()=>u});var r=n(96540);function o(){for(var e=arguments.length,t=new Array(e),n=0;ne=>{t.forEach((t=>t(e)))}),t)}const i="undefined"!==typeof window&&"undefined"!==typeof window.document&&"undefined"!==typeof window.document.createElement;function a(e){const t=Object.prototype.toString.call(e);return"[object Window]"===t||"[object global]"===t}function s(e){return"nodeType"in e}function u(e){var t,n;return e?a(e)?e:s(e)&&null!=(t=null==(n=e.ownerDocument)?void 0:n.defaultView)?t:window:window}function l(e){const{Document:t}=u(e);return e instanceof t}function c(e){return!a(e)&&e instanceof u(e).HTMLElement}function f(e){return e instanceof u(e).SVGElement}function d(e){return e?a(e)?e.document:s(e)?l(e)?e:c(e)||f(e)?e.ownerDocument:document:document:document}const h=i?r.useLayoutEffect:r.useEffect;function p(e){const t=(0,r.useRef)(e);return h((()=>{t.current=e})),(0,r.useCallback)((function(){for(var e=arguments.length,n=new Array(e),r=0;r{e.current=setInterval(t,n)}),[]),(0,r.useCallback)((()=>{null!==e.current&&(clearInterval(e.current),e.current=null)}),[])]}function v(e,t){void 0===t&&(t=[e]);const n=(0,r.useRef)(e);return h((()=>{n.current!==e&&(n.current=e)}),t),n}function y(e,t){const n=(0,r.useRef)();return(0,r.useMemo)((()=>{const t=e(n.current);return n.current=t,t}),[...t])}function m(e){const t=p(e),n=(0,r.useRef)(null),o=(0,r.useCallback)((e=>{e!==n.current&&(null==t||t(e,n.current)),n.current=e}),[]);return[n,o]}function b(e){const t=(0,r.useRef)();return(0,r.useEffect)((()=>{t.current=e}),[e]),t.current}let _={};function w(e,t){return(0,r.useMemo)((()=>{if(t)return t;const n=null==_[e]?0:_[e]+1;return _[e]=n,e+"-"+n}),[e,t])}function x(e){return function(t){for(var n=arguments.length,r=new Array(n>1?n-1:0),o=1;o{const r=Object.entries(n);for(const[o,i]of r){const n=t[o];null!=n&&(t[o]=n+e*i)}return t}),{...t})}}const S=x(1),A=x(-1);function E(e){if(!e)return!1;const{KeyboardEvent:t}=u(e.target);return t&&e instanceof t}function C(e){if(function(e){if(!e)return!1;const{TouchEvent:t}=u(e.target);return t&&e instanceof t}(e)){if(e.touches&&e.touches.length){const{clientX:t,clientY:n}=e.touches[0];return{x:t,y:n}}if(e.changedTouches&&e.changedTouches.length){const{clientX:t,clientY:n}=e.changedTouches[0];return{x:t,y:n}}}return function(e){return"clientX"in e&&"clientY"in e}(e)?{x:e.clientX,y:e.clientY}:null}const R=Object.freeze({Translate:{toString(e){if(!e)return;const{x:t,y:n}=e;return"translate3d("+(t?Math.round(t):0)+"px, "+(n?Math.round(n):0)+"px, 0)"}},Scale:{toString(e){if(!e)return;const{scaleX:t,scaleY:n}=e;return"scaleX("+t+") scaleY("+n+")"}},Transform:{toString(e){if(e)return[R.Translate.toString(e),R.Scale.toString(e)].join(" ")}},Transition:{toString(e){let{property:t,duration:n,easing:r}=e;return t+" "+n+"ms "+r}}}),O="a,frame,iframe,input:not([type=hidden]):not(:disabled),select:not(:disabled),textarea:not(:disabled),button:not(:disabled),*[tabindex]";function T(e){return e.matches(O)?e:e.querySelector(O)}},12009:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=n(96540).createContext(null)},70659:(e,t,n)=>{"use strict";n.d(t,{A:()=>ge});var r={};n.r(r),n.d(r,{a11yNotify:()=>oe,addFilter:()=>V,clearFilters:()=>H,removeFilter:()=>W,reset:()=>q,setCurrent:()=>G,setFilter:()=>Y,setResultsPerPage:()=>X,setSearchTerm:()=>K,setSort:()=>Z,trackAutocompleteClickThrough:()=>$,trackAutocompleteSuggestionClickThrough:()=>ie,trackClickThrough:()=>Q});var o=n(96540),i=n(58168);function a(e){return"/"===e.charAt(0)}function s(e,t){for(var n=t,r=n+1,o=e.length;r=0;d--){var h=o[d];"."===h?s(o,d):".."===h?(s(o,d),f++):f&&(s(o,d),f--)}if(!l)for(;f--;f)o.unshift("..");!l||""===o[0]||o[0]&&a(o[0])||o.unshift("");var p=o.join("/");return n&&"/"!==p.substr(-1)&&(p+="/"),p};var l=!0,c="Invariant failed";function f(e){return"/"===e.charAt(0)?e:"/"+e}function d(e,t){return function(e,t){return 0===e.toLowerCase().indexOf(t.toLowerCase())&&-1!=="/?#".indexOf(e.charAt(t.length))}(e,t)?e.substr(t.length):e}function h(e){return"/"===e.charAt(e.length-1)?e.slice(0,-1):e}function p(e){var t=e.pathname,n=e.search,r=e.hash,o=t||"/";return n&&"?"!==n&&(o+="?"===n.charAt(0)?n:"?"+n),r&&"#"!==r&&(o+="#"===r.charAt(0)?r:"#"+r),o}function g(e,t,n,r){var o;"string"===typeof e?(o=function(e){var t=e||"/",n="",r="",o=t.indexOf("#");-1!==o&&(r=t.substr(o),t=t.substr(0,o));var i=t.indexOf("?");return-1!==i&&(n=t.substr(i),t=t.substr(0,i)),{pathname:t,search:"?"===n?"":n,hash:"#"===r?"":r}}(e),o.state=t):(void 0===(o=(0,i.A)({},e)).pathname&&(o.pathname=""),o.search?"?"!==o.search.charAt(0)&&(o.search="?"+o.search):o.search="",o.hash?"#"!==o.hash.charAt(0)&&(o.hash="#"+o.hash):o.hash="",void 0!==t&&void 0===o.state&&(o.state=t));try{o.pathname=decodeURI(o.pathname)}catch(a){throw a instanceof URIError?new URIError('Pathname "'+o.pathname+'" could not be decoded. This is likely caused by an invalid percent-encoding.'):a}return n&&(o.key=n),r?o.pathname?"/"!==o.pathname.charAt(0)&&(o.pathname=u(o.pathname,r.pathname)):o.pathname=r.pathname:o.pathname||(o.pathname="/"),o}function v(){var e=null;var t=[];return{setPrompt:function(t){return e=t,function(){e===t&&(e=null)}},confirmTransitionTo:function(t,n,r,o){if(null!=e){var i="function"===typeof e?e(t,n):e;"string"===typeof i?"function"===typeof r?r(i,o):o(!0):o(!1!==i)}else o(!0)},appendListener:function(e){var n=!0;function r(){n&&e.apply(void 0,arguments)}return t.push(r),function(){n=!1,t=t.filter((function(e){return e!==r}))}},notifyListeners:function(){for(var e=arguments.length,n=new Array(e),r=0;rfunction(e){return void 0!==e&&null!==e&&"number"===typeof e}(e)?`n_${e}_n`:function(e){return e&&"boolean"===typeof e}(e)?`b_${e}_b`:t(e),decode(e,t){if(/n_-?[\d\.]*_n/.test(e)){const t=e.substring(2,e.length-2);return Number(t)}if(/^b_(true|false)*_b$/.test(e)){return function(e){if("true"===e)return!0;if("false"===e)return!1;throw"Invalid type parsed as Boolean value"}(e.substring(2,e.length-2))}return t(e)}},R={parse:e=>E().parse(e,{ignoreQueryPrefix:!0,decoder:C.decode,arrayLimit:1e3}),stringify:e=>E().stringify(e,{encoder:C.encode})};function O(e){return Array.isArray(e)?e[e.length-1]:e}function T(e){return function(e){if(!function(e){return!isNaN(e)}(e))return;return parseInt(e,10)}(O(e))}function k(e){return e.filters}function M(e){return T(e.current)}function P(e){return O(e.q)}function I(e){const t=O(e["sort-field"]),n=O(e["sort-direction"]);return t?[t,n]:[]}function N(e){return T(e.size)}function D(e){return e.sort}class L{constructor(e={}){this.routingOptions={readUrl:e.readUrl||this.readUrl.bind(this),writeUrl:e.writeUrl||this.writeUrl.bind(this),urlToState:e.urlToState||this.urlToState.bind(this),stateToUrl:e.stateToUrl||this.stateToUrl.bind(this),routeChangeHandler:e.routeChangeHandler||this.routeChangeHandler.bind(this)},this.history="undefined"!==typeof window?x():function(e){void 0===e&&(e={});var t=e,n=t.getUserConfirmation,r=t.initialEntries,o=void 0===r?["/"]:r,a=t.initialIndex,s=void 0===a?0:a,u=t.keyLength,l=void 0===u?6:u,c=v();function f(e){(0,i.A)(_,e),_.length=_.entries.length,c.notifyListeners(_.location,_.action)}function d(){return Math.random().toString(36).substr(2,l)}var h=S(s,0,o.length-1),y=o.map((function(e){return g(e,void 0,"string"===typeof e?d():e.key||d())})),m=p;function b(e){var t=S(_.index+e,0,_.entries.length-1),r=_.entries[t];c.confirmTransitionTo(r,"POP",n,(function(e){e?f({action:"POP",location:r,index:t}):f()}))}var _={length:y.length,action:"POP",location:y[h],index:h,entries:y,createHref:m,push:function(e,t){var r="PUSH",o=g(e,t,d(),_.location);c.confirmTransitionTo(o,r,n,(function(e){if(e){var t=_.index+1,n=_.entries.slice(0);n.length>t?n.splice(t,n.length-t,o):n.push(o),f({action:r,location:o,index:t,entries:n})}}))},replace:function(e,t){var r="REPLACE",o=g(e,t,d(),_.location);c.confirmTransitionTo(o,r,n,(function(e){e&&(_.entries[_.index]=o,f({action:r,location:o}))}))},go:b,goBack:function(){b(-1)},goForward:function(){b(1)},canGo:function(e){var t=_.index+e;return t>=0&&t<_.entries.length},block:function(e){return void 0===e&&(e=!1),c.setPrompt(e)},listen:function(e){return c.appendListener(e)}};return _}(),this.lastPushSearchString=""}readUrl(){return this.history?this.history.location.search:""}writeUrl(e,{replaceUrl:t=!1}={}){(t?this.history.replace:this.history.push)(`?${e}`)}urlToState(e){return function(e){const t={current:M(e),filters:k(e),searchTerm:P(e),resultsPerPage:N(e),sortField:I(e)[0],sortDirection:I(e)[1],sortList:D(e)};return Object.keys(t).reduce(((e,n)=>{const r=t[n];return r&&(e[n]=r),e}),{})}(R.parse(e))}stateToUrl(e){return`${function(e){return R.stringify(function({searchTerm:e,current:t,filters:n,resultsPerPage:r,sortDirection:o,sortField:i,sortList:a}){const s={};return t>1&&(s.current=t),e&&(s.q=e),r&&(s.size=r),n&&n.length>0&&(s.filters=n),a&&a.length>0?s.sort=a:i&&(s["sort-field"]=i,s["sort-direction"]=o),s}(e))}(e)}`}getStateFromURL(){return this.routingOptions.urlToState(this.routingOptions.readUrl())}pushStateToURL(e,{replaceUrl:t=!1}={}){const n=this.routingOptions.stateToUrl(e);this.lastPushSearchString=n,this.routingOptions.writeUrl(n,{replaceUrl:t})}onURLStateChange(e){this.unlisten=this.routingOptions.routeChangeHandler((t=>{`?${this.lastPushSearchString}`!==t&&(this.lastPushSearchString="",e(this.routingOptions.urlToState(t)))}).bind(this))}routeChangeHandler(e){return this.history.listen((t=>{e(t.search)}))}tearDown(){this.unlisten()}}class F{constructor(){this.requestSequence=0,this.lastCompleted=0}next(){return++this.requestSequence}isOldRequest(e){return e{e.apply(null,r)}),t)};return r.cancel=()=>{n&&(clearTimeout(n),n=null)},r}class U{constructor(){this.debounceCache={}}runWithDebounce(e,t,n,...r){if(!e)return n(...r);const o=`${t}|${e.toString()}`;let i=this.debounceCache[o];i||(this.debounceCache[o]=j(n,e),i=this.debounceCache[o]),i(...r)}cancelByName(e){Object.entries(this.debounceCache).filter((([t])=>t.startsWith(`${e}|`))).forEach((([e,t])=>t.cancel()))}}U.debounce=(e,t)=>j(t,e);const z=U;var B=n(65433);function V(e,t,n="all"){this.debug&&console.log("Search UI: Action","addFilter",...arguments);const{filters:r}=this.state,o=r.find((t=>t.field===e&&t.type===n))||null,i=r.filter((t=>t.field!==e||t.type!==n))||[],a=(null===o||void 0===o?void 0:o.values)||[],s=a.find((e=>(0,B.doFilterValuesMatch)(e,t)))?a:a.concat(t);this._updateSearchResults({current:1,filters:[...i,{field:e,values:s,type:n}]});this.events.emit({type:"FacetFilterSelected",field:e,value:(0,B.serialiseFilter)(s),query:this.state.searchTerm})}function $(e,t=[]){this.debug&&console.log("Search UI: Action","trackAutocompleteClickThrough",...arguments);const{autocompletedResultsRequestId:n,searchTerm:r,autocompletedResults:o,current:i,resultsPerPage:a,totalResults:s,filters:u}=this.state,l=o.findIndex((t=>t._meta.id===e)),c=o[l],f=this.events;f.autocompleteResultClick({query:r,documentId:e,requestId:n,tags:t,result:c,resultIndex:l}),f.emit({type:"ResultSelected",documentId:e,query:r,position:l,origin:"autocomplete",tags:t,totalResults:s,filters:u,currentPage:i,resultsPerPage:a})}function H(e=[]){this.debug&&console.log("Search UI: Action","clearFilters",...arguments);const{filters:t}=this.state,n=t.filter((t=>{const n=t.field;return e.includes(n)}));this._updateSearchResults({current:1,filters:n})}function W(e,t,n){this.debug&&console.log("Search UI: Action","removeFilter",...arguments);const{filters:r}=this.state;let o=r;o=!t&&n?r.filter((t=>!(t.field===e&&t.type===n))):t?(0,B.removeSingleFilterValue)(r,e,t,n):r.filter((t=>t.field!==e)),this._updateSearchResults({current:1,filters:o});this.events.emit({type:"FacetFilterRemoved",field:e,value:t&&(0,B.serialiseFilter)([t]),query:this.state.searchTerm})}function q(){this.debug&&console.log("Search UI: Action","reset",...arguments),this._setState(this.startingState),this.trackUrlState&&this.URLManager.pushStateToURL(this.state)}function G(e){this.debug&&console.log("Search UI: Action","setCurrent",...arguments),this._updateSearchResults({current:e})}function Y(e,t,n="all"){this.debug&&console.log("Search UI: Action","setFilter",...arguments);let{filters:r}=this.state;r=r.filter((t=>t.field!==e||t.type!==n)),this._updateSearchResults({current:1,filters:[...r,{field:e,values:[t],type:n}]});this.events.emit({type:"FacetFilterSelected",field:e,value:t&&(0,B.serialiseFilter)([t]),query:this.state.searchTerm})}function X(e){this.debug&&console.log("Search UI: Action","setResultsPerPage",...arguments),this._updateSearchResults({current:1,resultsPerPage:e})}function K(e,{autocompleteMinimumCharacters:t=0,autocompleteResults:n=!1,autocompleteSuggestions:r=!1,shouldClearFilters:o=!0,refresh:i=!0,debounce:a=0}={}){this.debug&&console.log("Search UI: Action","setSearchTerm",...arguments),this._setState({searchTerm:e}),i&&this.debounceManager.runWithDebounce(a,"_updateSearchResults",this._updateSearchResults,Object.assign({current:1},o&&{filters:[]})),(n||r)&&e.length>=t&&this.debounceManager.runWithDebounce(a,"_updateAutocomplete",this._updateAutocomplete,e,{autocompleteResults:n,autocompleteSuggestions:r})}function Z(e,t){this.debug&&console.log("Search UI: Action","setSort",...arguments);const n={current:1,sortList:null,sortField:null,sortDirection:null};Array.isArray(e)?n.sortList=e:(n.sortField=e,n.sortDirection=t),this._updateSearchResults(n)}function Q(e,t=[]){this.debug&&console.log("Search UI: Action","trackClickThrough",...arguments);const{requestId:n,searchTerm:r,results:o,current:i,resultsPerPage:a,totalResults:s,filters:u}=this.state,l=o.findIndex((t=>t._meta.id===e)),c=o[l],f=this.events;this.events.resultClick({query:r,documentId:e,requestId:n,tags:t,result:c,page:i,resultsPerPage:a,resultIndexOnPage:l}),f.emit({type:"ResultSelected",documentId:e,query:r,origin:"results",position:l,tags:t,totalResults:s,filters:u,currentPage:i,resultsPerPage:a})}const J="search-ui-screen-reader-notifications",ee="undefined"!==typeof document,te=()=>{if(!ee)return null;let e=document.getElementById(J);return e||(e=document.createElement("div"),e.id=J,e.setAttribute("role","status"),e.setAttribute("aria-live","polite"),e.style.position="absolute",e.style.width="1px",e.style.height="1px",e.style.margin="-1px",e.style.padding="0",e.style.border="0",e.style.overflow="hidden",e.style.clip="rect(0 0 0 0)",document.body.appendChild(e),e)},ne=e=>{const t=te();t&&(t.textContent=e)},re={searchResults:({start:e,end:t,totalResults:n,searchTerm:r})=>{let o=`Showing ${e} to ${t} results out of ${n}`;return r&&(o+=`, searching for "${r}".`),o}};function oe(e,t){if(!this.hasA11yNotifications)return;const n=this.a11yNotificationMessages[e];if(!n){const t=`Could not find corresponding message function in a11yNotificationMessages: "${e}"`;return void console.warn("Action","a11yNotify",t)}const r=n(t);ne(r),this.debug&&console.log("Search UI: Action","a11yNotify",{messageFunc:e,messageArgs:t,message:r})}function ie(e,t,n=[]){this.debug&&console.log("Search UI: Action","trackAutocompleteSuggestionClickThrough",...arguments);const{searchTerm:r}=this.state;this.events.emit({type:"AutocompleteSuggestionSelected",position:t,query:r,tags:n,suggestion:e})}function ae(e,t,n){if(n){if(t){const r=t[e].bind(t);return(...e)=>n(...e,r)}return n}return t&&t[e]?t[e].bind(t):()=>{throw`No ${e} handler provided and no Connector provided. You must configure one or the other.`}}const se=class{constructor({apiConnector:e,onSearch:t,onAutocomplete:n,onResultClick:r,onAutocompleteResultClick:o,plugins:i=[]}={}){this.search=ae("onSearch",e,t),this.autocomplete=ae("onAutocomplete",e,n),this.resultClick=ae("onResultClick",e,r),this.autocompleteResultClick=ae("onAutocompleteResultClick",e,o),this.plugins=i}emit(e){this.plugins.forEach((t=>{t.subscribe(e)}))}};var ue=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"===typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o(t[r]&&"function"===typeof t[r]&&!t[r]({filters:n})||(e[r]=o),e)),{})}const de=class{constructor({apiConnector:e,autocompleteQuery:t={},plugins:n=[],debug:o,initialState:i,onSearch:a,onAutocomplete:s,onResultClick:u,onAutocompleteResultClick:l,searchQuery:c={},trackUrlState:f=!0,routingOptions:d={},urlPushDebounceLength:h=500,hasA11yNotifications:p=!1,a11yNotificationMessages:g={},alwaysSearchOnInitialLoad:v=!1}){let y;this.state=ce,this._updateAutocomplete=(e,{autocompleteResults:t,autocompleteSuggestions:n}={})=>{const r=this.autocompleteRequestSequencer.next(),o=Object.assign(Object.assign({},t&&{results:this.autocompleteQuery.results||{}}),n&&{suggestions:this.autocompleteQuery.suggestions||{}});return this.events.autocomplete({searchTerm:e},o).then((e=>{this.autocompleteRequestSequencer.isOldRequest(r)||(this.autocompleteRequestSequencer.completed(r),this._setState(e))}))},this._updateSearchResults=(e,{skipPushToUrl:t=!1,replaceUrl:n=!1}={})=>{const{current:r,filters:o,resultsPerPage:i,searchTerm:a,sortDirection:s,sortField:u,sortList:l}=Object.assign(Object.assign({},this.state),e);this.debounceManager.cancelByName("_updateSearchResults"),this._setState({current:r,error:"",filters:o,resultsPerPage:i,searchTerm:a,sortDirection:s,sortField:u,sortList:l}),this._makeSearchRequest({skipPushToUrl:t,replaceUrl:n})},this._makeSearchRequest=z.debounce(0,(({skipPushToUrl:e,replaceUrl:t})=>{const{current:n,filters:r,resultsPerPage:o,searchTerm:i,sortDirection:a,sortField:s,sortList:u}=this.state;this._setState({isLoading:!0});const l=this.searchRequestSequencer.next(),c=this.searchQuery,{conditionalFacets:f}=c,d=ue(c,["conditionalFacets"]),h=Object.assign(Object.assign({},d),{facets:fe(this.searchQuery.facets,f,r)}),p=Object.assign(Object.assign({},le(this.state)),{filters:(0,B.mergeFilters)(r,this.searchQuery.filters)});return this.events.search(p,h).then((c=>{if(this.searchRequestSequencer.isOldRequest(l))return;this.searchRequestSequencer.completed(l);const{totalResults:f}=c;this.events.emit({type:"SearchQuery",filters:this.state.filters,query:this.state.searchTerm,currentPage:p.current,resultsPerPage:p.resultsPerPage,totalResults:f});const d=0===f?0:(n-1)*o+1,h=f{var t;"Invalid credentials"!==e.message?this._setState({error:`An unexpected error occurred: ${e.message}`}):this._setState(Object.assign({},(null===(t=this.apiConnector)||void 0===t?void 0:t.state)&&Object.assign({},this.apiConnector.state)))}))})),this.actions=Object.entries(r).reduce(((e,[t,n])=>Object.assign(Object.assign({},e),{[t]:n.bind(this)})),{}),this.actions=Object.assign(Object.assign({},this.actions),(null===e||void 0===e?void 0:e.actions)&&Object.assign({},e.actions)),Object.assign(this,this.actions),this.events=new se({apiConnector:e,onSearch:a,onAutocomplete:s,onResultClick:u,onAutocompleteResultClick:l,plugins:n}),this.debug=o,this.debug&&(console.warn("Search UI Debugging is enabled. This should be turned off in production deployments."),"undefined"!==typeof window&&(window.searchUI=this)),this.autocompleteRequestSequencer=new F,this.searchRequestSequencer=new F,this.debounceManager=new z,this.autocompleteQuery=t,this.searchQuery=c,this.subscriptions=[],this.trackUrlState=f,this.urlPushDebounceLength=h,this.alwaysSearchOnInitialLoad=v,this.apiConnector=e,f?(this.URLManager=new L(d),y=this.URLManager.getStateFromURL(),this.URLManager.onURLStateChange((e=>{this._updateSearchResults(Object.assign(Object.assign({},ce),e),{skipPushToUrl:!0})}))):y={},this.hasA11yNotifications=p,this.hasA11yNotifications&&te(),this.a11yNotificationMessages=Object.assign(Object.assign({},re),g),this.startingState=Object.assign(Object.assign({},this.state),i);const m=le(Object.assign(Object.assign({},this.startingState),y));this.state=Object.assign(Object.assign(Object.assign({},this.state),(null===e||void 0===e?void 0:e.state)&&Object.assign({},e.state)),m),(m.searchTerm||m.filters.length>0||this.alwaysSearchOnInitialLoad)&&this._updateSearchResults(m,{replaceUrl:!0})}_setState(e){const t=Object.assign(Object.assign({},this.state),e);this.debug&&console.log("Search UI: State Update",e,t),this.state=t,this.subscriptions.forEach((e=>e(t)))}setSearchQuery(e){this.searchQuery=e,this._updateSearchResults({})}setAutocompleteQuery(e){this.autocompleteQuery=e}subscribeToStateChanges(e){this.subscriptions.push(e)}unsubscribeToStateChanges(e){this.subscriptions=this.subscriptions.filter((t=>t!==e))}tearDown(){this.subscriptions=[],this.URLManager&&this.URLManager.tearDown(),this.debounceManager.cancelByName("pushStateToURL")}getActions(){return this.actions}getState(){return Object.assign({},this.state)}};var he=n(12009);const pe={moreFilters:({visibleOptionsCount:e,showingAll:t})=>{let n=t?"All ":"";return n+=`${e} options shown.`,n}},ge=({children:e,config:t,driver:n})=>{const[r,i]=(0,o.useState)(null);if((0,o.useEffect)((()=>{const e=n||new de(Object.assign(Object.assign({},t),{a11yNotificationMessages:Object.assign(Object.assign({},pe),t.a11yNotificationMessages)}));return i(e),()=>{e.tearDown()}}),[]),(0,o.useEffect)((()=>{r&&r.setSearchQuery(t.searchQuery)}),[t.searchQuery]),(0,o.useEffect)((()=>{r&&r.setAutocompleteQuery(t.autocompleteQuery)}),[t.autocompleteQuery]),!r)return null;const a={driver:r};return o.createElement(he.A.Provider,{value:a},e)}},46963:(e,t,n)=>{"use strict";n.d(t,{A:()=>f});var r=n(96540),o=n(12009),i=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"===typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o{this.mounted&&this.setState((n=>a(Object.assign(Object.assign({},n),t),e,this.props)))},this.mounted=!1,this.state=Object.assign({},a(function(e){return Object.assign(Object.assign({},e.driver.getState()),e.driver.getActions())}(n),e,t))}componentDidMount(){this.mounted=!0,this.context.driver.subscribeToStateChanges(this.subscription)}componentWillUnmount(){this.mounted=!1,this.context.driver.unsubscribeToStateChanges(this.subscription)}render(){const e=i(this.props,[]);return r.createElement(t,Object.assign({},this.state,e))}}return n.contextType=o.A,n}};var u=n(5556),l=n.n(u);function c({mapContextToProps:e,children:t}){const n=s(e)((e=>t(e)));return r.createElement(n,null)}c.propTypes={mapContextToProps:l().func,children:l().func.isRequired};const f=c},81703:(e,t,n)=>{"use strict";n.d(t,{A:()=>p});var r=n(65433);const o=Object.assign({},r);var i=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"===typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o{if("value"==t.type)return t.sort&&console.warn("search-ui-site-search-connector: Site Search does not support 'sort' on facets"),t.size&&console.warn("search-ui-site-search-connector: Site Search does not support 'size' on facets"),[e,t];console.warn(`search-ui-site-search-connector: Dropping ${e} facet, only value facets are supported in Site Search`)})).filter((e=>e)).map((([e])=>e));return t.length?t:void 0}(t.facets),u=function(e){if(e&&0!==Object.keys(e).length)return e.reduce(((e,t)=>{const n=t.field,r=t.values;if(e[n])return console.warn("search-ui-site-search-connector: More than one filter found for a single field"),e;if(t.type&&"all"!==t.type&&"any"!==t.type)return console.warn(`search-ui-site-search-connector: Unsupported filter type "${t.type}" found, only "all" and "any" are currently supported`),e;if(void 0!==r.find((e=>"object"===typeof e))){if(r.length>1)return console.warn("search-ui-site-search-connector: Cannot apply more than 1 none-value filters to a single field"),e;const t=r[0];if(o.isFilterValueRange(t)){const{name:r}=t,o=i(t,["name"]);return e[n]=Object.assign({type:"range"},o),e}return e}return e[n]=Object.assign(Object.assign({},"any"===t.type?{}:{type:"and"}),{values:r}),e}),{})}(void 0!==t.filters?t.filters:e.filters),l=void 0!==t.current?t.current:e.current,c=void 0!==t.resultsPerPage?t.resultsPerPage:e.resultsPerPage,f=void 0!==t.sortDirection?t.sortDirection:e.sortDirection,d=void 0!==t.sortField?t.sortField:e.sortField,h=void 0!==t.sortList?t.sortList:e.sortList,[p,g]=(v=t.result_fields)?[Object.keys(v),Object.entries(v).reduce(((e,[t,n])=>n.snippet?Object.assign(Object.assign({},e),{[t]:n.snippet}):e),{})]:[];var v;const y=(m=t.search_fields)?Object.keys(m):[];var m;const b=e.searchTerm;return Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({},c&&{per_page:c}),l&&{page:l}),f&&{sort_direction:{[n]:f}}),d&&{sort_field:{[n]:d}}),h&&{sort_list:{[n]:h}}),u&&{filters:{[n]:u}}),s&&{facets:{[n]:s}}),p&&{fetch_fields:{[n]:p}}),g&&{highlight_fields:{[n]:g}}),y&&!!y.length&&{search_fields:{[n]:y}}),{q:b})}var s=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"===typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);oObject.assign(Object.assign({},e),{[t]:n});function l(e,t){const n=function(e,t){return e[t].map((e=>{const{highlight:t,sort:n}=e,r=s(e,["highlight","sort"]),o=Object.entries(r).filter((([e])=>!e.startsWith("_"))).map((([e,t])=>{return[e,(n=t,{raw:n})];var n})).reduce(u,{});return Object.entries(t).forEach((([e,t])=>{o[e].snippet=t})),o}))}(e.records,t),r=e.info[t].num_pages,o=e.info[t].total_result_count,i=(a=e.info[t]).facets?Object.entries(a.facets).map((([e,t])=>[e,[{field:e,data:Object.entries(t).map((([e,t])=>({value:e,count:t}))),type:"value"}]])).reduce(u,{}):{};var a;return Object.assign({rawResponse:e,results:n,totalPages:r,totalResults:o,requestId:""},Object.keys(i).length>0&&{facets:i})}var c=function(e,t,n,r){return new(n||(n=Promise))((function(o,i){function a(e){try{u(r.next(e))}catch(t){i(t)}}function s(e){try{u(r.throw(e))}catch(t){i(t)}}function u(e){var t;e.done?o(e.value):(t=e.value,t instanceof n?t:new n((function(e){e(t)}))).then(a,s)}u((r=r.apply(e,t||[])).next())}))};function f(e,t,n,r){return c(this,void 0,void 0,(function*(){const o=new Headers({"Content-Type":"application/json"}),i=yield fetch(`https://search-api.swiftype.com/api/v1/public/${n}`,{method:t,headers:o,body:JSON.stringify(Object.assign({engine_key:e},r)),credentials:"include"});let a;try{a=yield i.json()}catch(s){}if(i.status>=200&&i.status<300)return a;{const e=a&&a.errors&&Object.entries(a.errors).length>0?JSON.stringify(a.errors):i.status;throw new Error(`${e}`)}}))}var d=function(e,t,n,r){return new(n||(n=Promise))((function(o,i){function a(e){try{u(r.next(e))}catch(t){i(t)}}function s(e){try{u(r.throw(e))}catch(t){i(t)}}function u(e){var t;e.done?o(e.value):(t=e.value,t instanceof n?t:new n((function(e){e(t)}))).then(a,s)}u((r=r.apply(e,t||[])).next())}))};function h(e,t,n){const r=Object.entries(Object.assign({engine_key:e},n)).map((([e,t])=>`${e}=${encodeURIComponent(t)}`)).join("&");return fetch(`https://search-api.swiftype.com/api/v1/public/${t}?${r}`,{method:"GET",credentials:"include"})}const p=class{constructor({documentType:e,engineKey:t,beforeSearchCall:n=((e,t)=>t(e)),beforeAutocompleteResultsCall:r=((e,t)=>t(e))}){this.documentType=e,this.engineKey=t,this.beforeSearchCall=n,this.beforeAutocompleteResultsCall=r,this.request=f.bind(this,t),this._get=h.bind(this,t)}onResultClick({query:e,documentId:t,tags:n}){n&&n.length>0&&console.warn("search-ui-site-search-connector: Site Search does not support tags on click"),this._get("analytics/pc",{t:(new Date).getTime(),q:e,doc_id:t})}onAutocompleteResultClick({query:e,documentId:t,tags:n}){n&&console.warn("search-ui-site-search-connector: Site Search does not support tags on autocompleteClick"),this._get("analytics/pas",{t:(new Date).getTime(),q:e,doc_id:t})}onSearch(e,t){const n=a(e,t,this.documentType);return this.beforeSearchCall(n,(e=>this.request("POST","engines/search.json",e).then((e=>l(e,this.documentType)))))}onAutocomplete({searchTerm:e},t){return d(this,void 0,void 0,(function*(){if(t.results){const n=a({searchTerm:e},t.results,this.documentType);return this.beforeAutocompleteResultsCall(n,(e=>this.request("POST","engines/suggest.json",e).then((e=>({autocompletedResults:l(e,this.documentType).results})))))}t.suggestions&&console.warn("search-ui-site-search-connector: Site Search does support query suggestions on autocomplete")}))}}},65433:(e,t,n)=>{"use strict";n.r(t),n.d(t,{doFilterValuesMatch:()=>l,findFilterValues:()=>a,isFilterValueRange:()=>f,markSelectedFacetValuesFromFilters:()=>u,mergeFilters:()=>c,removeSingleFilterValue:()=>s,serialiseFilter:()=>d});var r=n(62303),o=n.n(r),i=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"===typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);oe.field===t&&e.type===n));return r?r.values:[]}function s(e,t,n,r){return e.reduce(((e,o)=>{const{field:a,values:s,type:u}=o,c=i(o,["field","values","type"]);if(a===t&&(!r||u===r)){const t=s.filter((e=>!l(e,n)));return t.length>0?e.concat(Object.assign({field:a,values:t,type:u},c)):e}return e.concat(o)}),[])}function u(e,t,n,r){const o=e.data,i=a(t,n,r)||[];return Object.assign(Object.assign({},e),{data:o.map((e=>Object.assign(Object.assign({},e),{selected:i.some((t=>l(t,e.value)))})))})}function l(e,t){return!!(e&&e.name&&t&&t.name&&e.name===t.name)||o()(e,t,{strict:!0})}function c(e,t){return t?t.reduce(((e,t)=>e.find((e=>e.type===t.type&&e.field===t.field))?e:[...e,t]),e):e}function f(e){return"object"===typeof e&&void 0!==e.name}const d=e=>e.reduce(((e,t)=>(f(t)?e.push(t.name):e.push(t.toString()),e)),[]).join(",")},62303:(e,t,n)=>{var r=n(1189),o=n(47244),i=n(37653),a=n(14035),s=n(71589),u=n(62120),l=Date.prototype.getTime;function c(e,t,n){var h=n||{};return!!(h.strict?i(e,t):e===t)||(!e||!t||"object"!==typeof e&&"object"!==typeof t?h.strict?i(e,t):e==t:function(e,t,n){var i,h;if(typeof e!==typeof t)return!1;if(f(e)||f(t))return!1;if(e.prototype!==t.prototype)return!1;if(o(e)!==o(t))return!1;var p=a(e),g=a(t);if(p!==g)return!1;if(p||g)return e.source===t.source&&s(e)===s(t);if(u(e)&&u(t))return l.call(e)===l.call(t);var v=d(e),y=d(t);if(v!==y)return!1;if(v||y){if(e.length!==t.length)return!1;for(i=0;i=0;i--)if(m[i]!=b[i])return!1;for(i=m.length-1;i>=0;i--)if(!c(e[h=m[i]],t[h],n))return!1;return!0}(e,t,h))}function f(e){return null===e||void 0===e}function d(e){return!(!e||"object"!==typeof e||"number"!==typeof e.length)&&("function"===typeof e.copy&&"function"===typeof e.slice&&!(e.length>0&&"number"!==typeof e[0]))}e.exports=c},84300:(e,t,n)=>{"use strict";n.d(t,{A:()=>oe});var r=function(){function e(e){var t=this;this._insertTag=function(e){var n;n=0===t.tags.length?t.insertionPoint?t.insertionPoint.nextSibling:t.prepend?t.container.firstChild:t.before:t.tags[t.tags.length-1].nextSibling,t.container.insertBefore(e,n),t.tags.push(e)},this.isSpeedy=void 0===e.speedy||e.speedy,this.tags=[],this.ctr=0,this.nonce=e.nonce,this.key=e.key,this.container=e.container,this.prepend=e.prepend,this.insertionPoint=e.insertionPoint,this.before=null}var t=e.prototype;return t.hydrate=function(e){e.forEach(this._insertTag)},t.insert=function(e){this.ctr%(this.isSpeedy?65e3:1)===0&&this._insertTag(function(e){var t=document.createElement("style");return t.setAttribute("data-emotion",e.key),void 0!==e.nonce&&t.setAttribute("nonce",e.nonce),t.appendChild(document.createTextNode("")),t.setAttribute("data-s",""),t}(this));var t=this.tags[this.tags.length-1];if(this.isSpeedy){var n=function(e){if(e.sheet)return e.sheet;for(var t=0;t0?c(_,--m):0,v--,10===b&&(v=1,g--),b}function A(){return b=m2||O(b)>3?"":" "}function I(e,t){for(;--t&&A()&&!(b<48||b>102||b>57&&b<65||b>70&&b<97););return R(e,C()+(t<6&&32==E()&&32==A()))}function N(e){for(;A();)switch(b){case e:return m;case 34:case 39:34!==e&&39!==e&&N(b);break;case 40:41===e&&N(e);break;case 92:A()}return m}function D(e,t){for(;A()&&e+b!==57&&(e+b!==84||47!==E()););return"/*"+R(t,m-1)+"*"+i(47===e?e:A())}function L(e){for(;!O(E());)A();return R(e,m)}var F="-ms-",j="-moz-",U="-webkit-",z="comm",B="rule",V="decl",$="@keyframes";function H(e,t){for(var n="",r=h(e),o=0;o0&&d(j)-y&&p(b>32?K(j+";",r,n,y-1):K(u(j," ","")+";",r,n,y-2),h);break;case 59:j+=";";default:if(p(F=Y(j,t,n,g,v,o,f,T,k=[],N=[],y),a),123===O)if(0===v)G(j,t,F,F,k,a,y,f,N);else switch(99===m&&110===c(j,3)?100:m){case 100:case 108:case 109:case 115:G(e,F,F,r&&p(Y(e,F,F,0,0,o,f,T,o,k=[],y),N),o,N,y,f,r?k:N);break;default:G(j,F,F,F,[""],N,0,f,N)}}g=v=b=0,w=R=1,T=j="",y=s;break;case 58:y=1+d(j),b=_;default:if(w<1)if(123==O)--w;else if(125==O&&0==w++&&125==S())continue;switch(j+=i(O),O*w){case 38:R=v>0?1:(j+="\f",-1);break;case 44:f[g++]=(d(j)-1)*R,R=1;break;case 64:45===E()&&(j+=M(A())),m=E(),v=y=d(T=j+=L(C())),O++;break;case 45:45===_&&2==d(j)&&(w=0)}}return a}function Y(e,t,n,r,i,a,l,c,d,p,g){for(var v=i-1,y=0===i?a:[""],m=h(y),b=0,_=0,x=0;b0?y[S]+" "+A:u(A,/&\f/g,y[S])))&&(d[x++]=E);return w(e,t,n,0===i?B:c,d,p,g)}function X(e,t,n){return w(e,t,n,z,i(b),f(e,2,-2),0)}function K(e,t,n,r){return w(e,t,n,V,f(e,0,r),f(e,r+1,-1),r)}var Z=function(e,t,n){for(var r=0,o=0;r=o,o=E(),38===r&&12===o&&(t[n]=1),!O(o);)A();return R(e,m)},Q=function(e,t){return k(function(e,t){var n=-1,r=44;do{switch(O(r)){case 0:38===r&&12===E()&&(t[n]=1),e[n]+=Z(m-1,t,n);break;case 2:e[n]+=M(r);break;case 4:if(44===r){e[++n]=58===E()?"&\f":"",t[n]=e[n].length;break}default:e[n]+=i(r)}}while(r=A());return e}(T(e),t))},J=new WeakMap,ee=function(e){if("rule"===e.type&&e.parent&&!(e.length<1)){for(var t=e.value,n=e.parent,r=e.column===n.column&&e.line===n.line;"rule"!==n.type;)if(!(n=n.parent))return;if((1!==e.props.length||58===t.charCodeAt(0)||J.get(n))&&!r){J.set(e,!0);for(var o=[],i=Q(t,o),a=n.props,s=0,u=0;s6)switch(c(e,t+1)){case 109:if(45!==c(e,t+4))break;case 102:return u(e,/(.+:)(.+)-([^]+)/,"$1"+U+"$2-$3$1"+j+(108==c(e,t+3)?"$3":"$2-$3"))+e;case 115:return~l(e,"stretch")?ne(u(e,"stretch","fill-available"),t)+e:e}break;case 4949:if(115!==c(e,t+1))break;case 6444:switch(c(e,d(e)-3-(~l(e,"!important")&&10))){case 107:return u(e,":",":"+U)+e;case 101:return u(e,/(.+:)([^;!]+)(;|!.+)?/,"$1"+U+(45===c(e,14)?"inline-":"")+"box$3$1"+U+"$2$3$1"+F+"$2box$3")+e}break;case 5936:switch(c(e,t+11)){case 114:return U+e+F+u(e,/[svh]\w+-[tblr]{2}/,"tb")+e;case 108:return U+e+F+u(e,/[svh]\w+-[tblr]{2}/,"tb-rl")+e;case 45:return U+e+F+u(e,/[svh]\w+-[tblr]{2}/,"lr")+e}return U+e+F+e+e}return e}var re=[function(e,t,n,r){if(e.length>-1&&!e.return)switch(e.type){case V:e.return=ne(e.value,e.length);break;case $:return H([x(e,{value:u(e.value,"@","@"+U)})],r);case B:if(e.length)return function(e,t){return e.map(t).join("")}(e.props,(function(t){switch(function(e,t){return(e=t.exec(e))?e[0]:e}(t,/(::plac\w+|:read-\w+)/)){case":read-only":case":read-write":return H([x(e,{props:[u(t,/:(read-\w+)/,":-moz-$1")]})],r);case"::placeholder":return H([x(e,{props:[u(t,/:(plac\w+)/,":"+U+"input-$1")]}),x(e,{props:[u(t,/:(plac\w+)/,":-moz-$1")]}),x(e,{props:[u(t,/:(plac\w+)/,F+"input-$1")]})],r)}return""}))}}],oe=function(e){var t=e.key;if("css"===t){var n=document.querySelectorAll("style[data-emotion]:not([data-s])");Array.prototype.forEach.call(n,(function(e){-1!==e.getAttribute("data-emotion").indexOf(" ")&&(document.head.appendChild(e),e.setAttribute("data-s",""))}))}var o=e.stylisPlugins||re;var i,a,s={},u=[];i=e.container||document.head,Array.prototype.forEach.call(document.querySelectorAll('style[data-emotion^="'+t+' "]'),(function(e){for(var t=e.getAttribute("data-emotion").split(" "),n=1;n{"use strict";function r(e){var t=Object.create(null);return function(n){return void 0===t[n]&&(t[n]=e(n)),t[n]}}n.d(t,{A:()=>r})},24553:(e,t,n)=>{"use strict";n.d(t,{AH:()=>T,Y:()=>O,i7:()=>k});var r=n(96540),o=n.t(r,2),i=n(84300);var a=function(e,t,n){var r=e.key+"-"+t.name;!1===n&&void 0===e.registered[r]&&(e.registered[r]=t.styles)};var s={animationIterationCount:1,aspectRatio:1,borderImageOutset:1,borderImageSlice:1,borderImageWidth:1,boxFlex:1,boxFlexGroup:1,boxOrdinalGroup:1,columnCount:1,columns:1,flex:1,flexGrow:1,flexPositive:1,flexShrink:1,flexNegative:1,flexOrder:1,gridRow:1,gridRowEnd:1,gridRowSpan:1,gridRowStart:1,gridColumn:1,gridColumnEnd:1,gridColumnSpan:1,gridColumnStart:1,msGridRow:1,msGridRowSpan:1,msGridColumn:1,msGridColumnSpan:1,fontWeight:1,lineHeight:1,opacity:1,order:1,orphans:1,tabSize:1,widows:1,zIndex:1,zoom:1,WebkitLineClamp:1,fillOpacity:1,floodOpacity:1,stopOpacity:1,strokeDasharray:1,strokeDashoffset:1,strokeMiterlimit:1,strokeOpacity:1,strokeWidth:1},u=n(36289),l=/[A-Z]|^ms/g,c=/_EMO_([^_]+?)_([^]*?)_EMO_/g,f=function(e){return 45===e.charCodeAt(1)},d=function(e){return null!=e&&"boolean"!==typeof e},h=(0,u.A)((function(e){return f(e)?e:e.replace(l,"-$&").toLowerCase()})),p=function(e,t){switch(e){case"animation":case"animationName":if("string"===typeof t)return t.replace(c,(function(e,t,n){return v={name:t,styles:n,next:v},t}))}return 1===s[e]||f(e)||"number"!==typeof t||0===t?t:t+"px"};function g(e,t,n){if(null==n)return"";if(void 0!==n.__emotion_styles)return n;switch(typeof n){case"boolean":return"";case"object":if(1===n.anim)return v={name:n.name,styles:n.styles,next:v},n.name;if(void 0!==n.styles){var r=n.next;if(void 0!==r)for(;void 0!==r;)v={name:r.name,styles:r.styles,next:v},r=r.next;return n.styles+";"}return function(e,t,n){var r="";if(Array.isArray(n))for(var o=0;o=4;++r,o-=4)t=1540483477*(65535&(t=255&e.charCodeAt(r)|(255&e.charCodeAt(++r))<<8|(255&e.charCodeAt(++r))<<16|(255&e.charCodeAt(++r))<<24))+(59797*(t>>>16)<<16),n=1540483477*(65535&(t^=t>>>24))+(59797*(t>>>16)<<16)^1540483477*(65535&n)+(59797*(n>>>16)<<16);switch(o){case 3:n^=(255&e.charCodeAt(r+2))<<16;case 2:n^=(255&e.charCodeAt(r+1))<<8;case 1:n=1540483477*(65535&(n^=255&e.charCodeAt(r)))+(59797*(n>>>16)<<16)}return(((n=1540483477*(65535&(n^=n>>>13))+(59797*(n>>>16)<<16))^n>>>15)>>>0).toString(36)}(o)+u;return{name:l,styles:o,next:v}},b=!!o.useInsertionEffect&&o.useInsertionEffect,_=b||function(e){return e()},w=(b||r.useLayoutEffect,{}.hasOwnProperty),x=r.createContext("undefined"!==typeof HTMLElement?(0,i.A)({key:"css"}):null);x.Provider;var S=function(e){return(0,r.forwardRef)((function(t,n){var o=(0,r.useContext)(x);return e(t,o,n)}))};var A=r.createContext({});var E="__EMOTION_TYPE_PLEASE_DO_NOT_USE__",C=function(e){var t=e.cache,n=e.serialized,r=e.isStringTag;return a(t,n,r),_((function(){return function(e,t,n){a(e,t,n);var r=e.key+"-"+t.name;if(void 0===e.inserted[t.name]){var o=t;do{e.insert(t===o?"."+r:"",o,e.sheet,!0),o=o.next}while(void 0!==o)}}(t,n,r)})),null};var R=S((function(e,t,n){var o=e.css;"string"===typeof o&&void 0!==t.registered[o]&&(o=t.registered[o]);var i=e[E],a=[o],s="";"string"===typeof e.className?s=function(e,t,n){var r="";return n.split(" ").forEach((function(n){void 0!==e[n]?t.push(e[n]+";"):r+=n+" "})),r}(t.registered,a,e.className):null!=e.className&&(s=e.className+" ");var u=m(a,void 0,r.useContext(A));s+=t.key+"-"+u.name;var l={};for(var c in e)w.call(e,c)&&"css"!==c&&c!==E&&(l[c]=e[c]);return l.ref=n,l.className=s,r.createElement(r.Fragment,null,r.createElement(C,{cache:t,serialized:u,isStringTag:"string"===typeof i}),r.createElement(i,l))})),O=(n(4146),function(e,t){var n=arguments;if(null==t||!w.call(t,"css"))return r.createElement.apply(void 0,n);var o=n.length,i=new Array(o);i[0]=R,i[1]=function(e,t){var n={};for(var r in t)w.call(t,r)&&(n[r]=t[r]);return n[E]=e,n}(e,t);for(var a=2;a{"use strict";function r(){return r=Object.assign?Object.assign.bind():function(e){for(var t=1;ts,B6:()=>S,Gh:()=>T,HS:()=>k,Oi:()=>i,Rr:()=>u,pX:()=>D,pb:()=>E,rc:()=>o,tH:()=>N,tW:()=>x,ue:()=>c,yD:()=>O}),function(e){e.Pop="POP",e.Push="PUSH",e.Replace="REPLACE"}(o||(o={}));function i(e,t){if(!1===e||null===e||"undefined"===typeof e)throw new Error(t)}function a(e,t){if(!e){"undefined"!==typeof console&&console.warn(t);try{throw new Error(t)}catch(n){}}}function s(e){let{pathname:t="/",search:n="",hash:r=""}=e;return n&&"?"!==n&&(t+="?"===n.charAt(0)?n:"?"+n),r&&"#"!==r&&(t+="#"===r.charAt(0)?r:"#"+r),t}function u(e){let t={};if(e){let n=e.indexOf("#");n>=0&&(t.hash=e.substr(n),e=e.substr(0,n));let r=e.indexOf("?");r>=0&&(t.search=e.substr(r),e=e.substr(0,r)),e&&(t.pathname=e)}return t}var l;!function(e){e.data="data",e.deferred="deferred",e.redirect="redirect",e.error="error"}(l||(l={}));new Set(["lazy","caseSensitive","path","id","index","children"]);function c(e,t,n){void 0===n&&(n="/");let r=E(("string"===typeof t?u(t):t).pathname||"/",n);if(null==r)return null;let o=f(e);!function(e){e.sort(((e,t)=>e.score!==t.score?t.score-e.score:function(e,t){let n=e.length===t.length&&e.slice(0,-1).every(((e,n)=>e===t[n]));return n?e[e.length-1]-t[t.length-1]:0}(e.routesMeta.map((e=>e.childrenIndex)),t.routesMeta.map((e=>e.childrenIndex)))))}(o);let i=null;for(let a=0;null==i&&a{let s={relativePath:void 0===a?e.path||"":a,caseSensitive:!0===e.caseSensitive,childrenIndex:o,route:e};s.relativePath.startsWith("/")&&(i(s.relativePath.startsWith(r),'Absolute route path "'+s.relativePath+'" nested under path "'+r+'" is not valid. An absolute child route path must start with the combined path of all its parent routes.'),s.relativePath=s.relativePath.slice(r.length));let u=k([r,s.relativePath]),l=n.concat(s);e.children&&e.children.length>0&&(i(!0!==e.index,'Index routes must not have child routes. Please remove all child routes from route path "'+u+'".'),f(e.children,t,l,u)),(null!=e.path||e.index)&&t.push({path:u,score:_(u,e.index),routesMeta:l})};return e.forEach(((e,t)=>{var n;if(""!==e.path&&null!=(n=e.path)&&n.includes("?"))for(let r of d(e.path))o(e,t,r);else o(e,t)})),t}function d(e){let t=e.split("/");if(0===t.length)return[];let[n,...r]=t,o=n.endsWith("?"),i=n.replace(/\?$/,"");if(0===r.length)return o?[i,""]:[i];let a=d(r.join("/")),s=[];return s.push(...a.map((e=>""===e?i:[i,e].join("/")))),o&&s.push(...a),s.map((t=>e.startsWith("/")&&""===t?"/":t))}const h=/^:[\w-]+$/,p=3,g=2,v=1,y=10,m=-2,b=e=>"*"===e;function _(e,t){let n=e.split("/"),r=n.length;return n.some(b)&&(r+=m),t&&(r+=g),n.filter((e=>!b(e))).reduce(((e,t)=>e+(h.test(t)?p:""===t?v:y)),r)}function w(e,t){let{routesMeta:n}=e,r={},o="/",i=[];for(let a=0;anull==e?"":"string"===typeof e?e:String(e);return r+n.split(/\/+/).map(((e,n,r)=>{if(n===r.length-1&&"*"===e){return o(t["*"])}const a=e.match(/^:([\w-]+)(\??)$/);if(a){const[,e,n]=a;let r=t[e];return i("?"===n||null!=r,'Missing ":'+e+'" param'),o(r)}return e.replace(/\?$/g,"")})).filter((e=>!!e)).join("/")}function S(e,t){"string"===typeof e&&(e={path:e,caseSensitive:!1,end:!0});let[n,r]=function(e,t,n){void 0===t&&(t=!1);void 0===n&&(n=!0);a("*"===e||!e.endsWith("*")||e.endsWith("/*"),'Route path "'+e+'" will be treated as if it were "'+e.replace(/\*$/,"/*")+'" because the `*` character must always follow a `/` in the pattern. To get rid of this warning, please change the route path to "'+e.replace(/\*$/,"/*")+'".');let r=[],o="^"+e.replace(/\/*\*?$/,"").replace(/^\/*/,"/").replace(/[\\.*+^${}|()[\]]/g,"\\$&").replace(/\/:([\w-]+)(\?)?/g,((e,t,n)=>(r.push({paramName:t,isOptional:null!=n}),n?"/?([^\\/]+)?":"/([^\\/]+)")));e.endsWith("*")?(r.push({paramName:"*"}),o+="*"===e||"/*"===e?"(.*)$":"(?:\\/(.+)|\\/*)$"):n?o+="\\/*$":""!==e&&"/"!==e&&(o+="(?:(?=\\/|$))");let i=new RegExp(o,t?void 0:"i");return[i,r]}(e.path,e.caseSensitive,e.end),o=t.match(n);if(!o)return null;let i=o[0],s=i.replace(/(.)\/+$/,"$1"),u=o.slice(1);return{params:r.reduce(((e,t,n)=>{let{paramName:r,isOptional:o}=t;if("*"===r){let e=u[n]||"";s=i.slice(0,i.length-e.length).replace(/(.)\/+$/,"$1")}const a=u[n];return e[r]=o&&!a?void 0:(a||"").replace(/%2F/g,"/"),e}),{}),pathname:i,pathnameBase:s,pattern:e}}function A(e){try{return e.split("/").map((e=>decodeURIComponent(e).replace(/\//g,"%2F"))).join("/")}catch(t){return a(!1,'The URL path "'+e+'" could not be decoded because it is is a malformed URL segment. This is probably due to a bad percent encoding ('+t+")."),e}}function E(e,t){if("/"===t)return e;if(!e.toLowerCase().startsWith(t.toLowerCase()))return null;let n=t.endsWith("/")?t.length-1:t.length,r=e.charAt(n);return r&&"/"!==r?null:e.slice(n)||"/"}function C(e,t,n,r){return"Cannot include a '"+e+"' character in a manually specified `to."+t+"` field ["+JSON.stringify(r)+"]. Please separate it out to the `to."+n+'` field. Alternatively you may provide the full path as a string in and the router will parse it for you.'}function R(e){return e.filter(((e,t)=>0===t||e.route.path&&e.route.path.length>0))}function O(e,t){let n=R(e);return t?n.map(((t,n)=>n===e.length-1?t.pathname:t.pathnameBase)):n.map((e=>e.pathnameBase))}function T(e,t,n,o){let a;void 0===o&&(o=!1),"string"===typeof e?a=u(e):(a=r({},e),i(!a.pathname||!a.pathname.includes("?"),C("?","pathname","search",a)),i(!a.pathname||!a.pathname.includes("#"),C("#","pathname","hash",a)),i(!a.search||!a.search.includes("#"),C("#","search","hash",a)));let s,l=""===e||""===a.pathname,c=l?"/":a.pathname;if(null==c)s=n;else{let e=t.length-1;if(!o&&c.startsWith("..")){let t=c.split("/");for(;".."===t[0];)t.shift(),e-=1;a.pathname=t.join("/")}s=e>=0?t[e]:"/"}let f=function(e,t){void 0===t&&(t="/");let{pathname:n,search:r="",hash:o=""}="string"===typeof e?u(e):e,i=n?n.startsWith("/")?n:function(e,t){let n=t.replace(/\/+$/,"").split("/");return e.split("/").forEach((e=>{".."===e?n.length>1&&n.pop():"."!==e&&n.push(e)})),n.length>1?n.join("/"):"/"}(n,t):t;return{pathname:i,search:P(r),hash:I(o)}}(a,s),d=c&&"/"!==c&&c.endsWith("/"),h=(l||"."===c)&&n.endsWith("/");return f.pathname.endsWith("/")||!d&&!h||(f.pathname+="/"),f}const k=e=>e.join("/").replace(/\/\/+/g,"/"),M=e=>e.replace(/\/+$/,"").replace(/^\/*/,"/"),P=e=>e&&"?"!==e?e.startsWith("?")?e:"?"+e:"",I=e=>e&&"#"!==e?e.startsWith("#")?e:"#"+e:"";class N extends Error{}function D(e){return null!=e&&"number"===typeof e.status&&"string"===typeof e.statusText&&"boolean"===typeof e.internal&&"data"in e}const L=["post","put","patch","delete"],F=(new Set(L),["get",...L]);new Set(F),new Set([301,302,303,307,308]),new Set([307,308]);Symbol("deferred")},10390:(e,t,n)=>{"use strict";n.d(t,{Ts:()=>Zt,mn:()=>Qt});var r=n(17412),o=n(6936),i=n(39653),a=n(92220),s=n(42531),u=n(86641);const l=[];function c(e){const t=e.defaultIntegrations||[],n=e.integrations;let r;t.forEach((e=>{e.isDefaultInstance=!0})),r=Array.isArray(n)?[...t,...n]:"function"===typeof n?(0,o.k9)(n(t)):t;const i=function(e){const t={};return e.forEach((e=>{const{name:n}=e,r=t[n];r&&!r.isDefaultInstance&&e.isDefaultInstance||(t[n]=e)})),Object.keys(t).map((e=>t[e]))}(r),a=function(e,t){for(let n=0;n"Debug"===e.name));if(-1!==a){const[e]=i.splice(a,1);i.push(e)}return i}function f(e,t){for(const n of t)n&&n.afterAllSetup&&n.afterAllSetup(e)}function d(e,t,n){if(n[t.name])a.T&&r.vF.log(`Integration skipped because it was already installed: ${t.name}`);else{if(n[t.name]=t,-1===l.indexOf(t.name)&&(t.setupOnce(s.lb,u.BF),l.push(t.name)),t.setup&&"function"===typeof t.setup&&t.setup(e),e.on&&"function"===typeof t.preprocessEvent){const n=t.preprocessEvent.bind(t);e.on("preprocessEvent",((t,r)=>n(t,r,e)))}if(e.addEventProcessor&&"function"===typeof t.processEvent){const n=t.processEvent.bind(t),r=Object.assign(((t,r)=>n(t,r,e)),{id:t.name});e.addEventProcessor(r)}a.T&&r.vF.log(`Integration installed: ${t.name}`)}}function h(e,t){return Object.assign((function(...e){return t(...e)}),{id:e})}const p=[/^Script error\.?$/,/^Javascript error: Script error\.? on line 0$/,/^ResizeObserver loop completed with undelivered notifications.$/,/^Cannot redefine property: googletag$/],g=[/^.*\/healthcheck$/,/^.*\/healthy$/,/^.*\/live$/,/^.*\/ready$/,/^.*\/heartbeat$/,/^.*\/health$/,/^.*\/healthz$/],v="InboundFilters",y=(e={})=>({name:v,setupOnce(){},processEvent(t,n,s){const u=s.getOptions(),l=function(e={},t={}){return{allowUrls:[...e.allowUrls||[],...t.allowUrls||[]],denyUrls:[...e.denyUrls||[],...t.denyUrls||[]],ignoreErrors:[...e.ignoreErrors||[],...t.ignoreErrors||[],...e.disableErrorDefaults?[]:p],ignoreTransactions:[...e.ignoreTransactions||[],...t.ignoreTransactions||[],...e.disableTransactionDefaults?[]:g],ignoreInternal:void 0===e.ignoreInternal||e.ignoreInternal}}(e,u);return function(e,t){if(t.ignoreInternal&&function(e){try{return"SentryError"===e.exception.values[0].type}catch(t){}return!1}(e))return a.T&&r.vF.warn(`Event dropped due to being internal Sentry Error.\nEvent: ${(0,o.$X)(e)}`),!0;if(function(e,t){if(e.type||!t||!t.length)return!1;return function(e){const t=[];e.message&&t.push(e.message);let n;try{n=e.exception.values[e.exception.values.length-1]}catch(i){}n&&n.value&&(t.push(n.value),n.type&&t.push(`${n.type}: ${n.value}`));a.T&&0===t.length&&r.vF.error(`Could not extract message for event ${(0,o.$X)(e)}`);return t}(e).some((e=>(0,i.Xr)(e,t)))}(e,t.ignoreErrors))return a.T&&r.vF.warn(`Event dropped due to being matched by \`ignoreErrors\` option.\nEvent: ${(0,o.$X)(e)}`),!0;if(function(e,t){if("transaction"!==e.type||!t||!t.length)return!1;const n=e.transaction;return!!n&&(0,i.Xr)(n,t)}(e,t.ignoreTransactions))return a.T&&r.vF.warn(`Event dropped due to being matched by \`ignoreTransactions\` option.\nEvent: ${(0,o.$X)(e)}`),!0;if(function(e,t){if(!t||!t.length)return!1;const n=m(e);return!!n&&(0,i.Xr)(n,t)}(e,t.denyUrls))return a.T&&r.vF.warn(`Event dropped due to being matched by \`denyUrls\` option.\nEvent: ${(0,o.$X)(e)}.\nUrl: ${m(e)}`),!0;if(!function(e,t){if(!t||!t.length)return!0;const n=m(e);return!n||(0,i.Xr)(n,t)}(e,t.allowUrls))return a.T&&r.vF.warn(`Event dropped due to not being matched by \`allowUrls\` option.\nEvent: ${(0,o.$X)(e)}.\nUrl: ${m(e)}`),!0;return!1}(t,l)?null:t}});h(v,y);function m(e){try{let n;try{n=e.exception.values[0].stacktrace.frames}catch(t){}return n?function(e=[]){for(let t=e.length-1;t>=0;t--){const n=e[t];if(n&&""!==n.filename&&"[native code]"!==n.filename)return n.filename||null}return null}(n):null}catch(n){return a.T&&r.vF.error(`Cannot extract url for event ${(0,o.$X)(e)}`),null}}var b=n(68411),_=n(83741);let w;const x="FunctionToString",S=new WeakMap,A=()=>({name:x,setupOnce(){w=Function.prototype.toString;try{Function.prototype.toString=function(...e){const t=(0,b.sp)(this),n=S.has((0,_.KU)())&&void 0!==t?t:this;return w.apply(n,e)}}catch(e){}},setup(e){S.set(e,!0)}});h(x,A);function E(e,t){!0===t.debug&&(a.T?r.vF.enable():(0,r.pq)((()=>{console.warn("[Sentry] Cannot initialize SDK with `debug` option using a non-debug bundle.")})));(0,_.o5)().update(t.initialScope);const n=new e(t);!function(e){const t=(0,u.BF)().getStackTop();t.client=e,t.scope.setClient(e)}(n),function(e){e.init?e.init():e.setupIntegrations&&e.setupIntegrations()}(n)}var C=n(5644);const R=/^(?:(\w+):)\/\/(?:(\w+)(?::(\w+)?)?@)([\w.-]+)(?::(\d+))?\/(.+)/;function O(e,t=!1){const{host:n,path:r,pass:o,port:i,projectId:a,protocol:s,publicKey:u}=e;return`${s}://${u}${t&&o?`:${o}`:""}@${n}${i?`:${i}`:""}/${r?`${r}/`:r}${a}`}function T(e){return{protocol:e.protocol,publicKey:e.publicKey||"",pass:e.pass||"",host:e.host,port:e.port||"",path:e.path||"",projectId:e.projectId}}function k(e){const t="string"===typeof e?function(e){const t=R.exec(e);if(!t)return void(0,r.pq)((()=>{console.error(`Invalid Sentry Dsn: ${e}`)}));const[n,o,i="",a,s="",u]=t.slice(1);let l="",c=u;const f=c.split("/");if(f.length>1&&(l=f.slice(0,-1).join("/"),c=f.pop()),c){const e=c.match(/^\d+/);e&&(c=e[0])}return T({host:a,pass:i,path:l,projectId:c,port:s,protocol:n,publicKey:o})}(e):T(e);if(t&&function(e){if(!C.T)return!0;const{port:t,projectId:n,protocol:o}=e;return!["protocol","publicKey","host","projectId"].find((t=>!e[t]&&(r.vF.error(`Invalid Sentry Dsn: ${t} missing`),!0)))&&(n.match(/^\d+$/)?function(e){return"http"===e||"https"===e}(o)?!t||!isNaN(parseInt(t,10))||(r.vF.error(`Invalid Sentry Dsn: Invalid port ${t}`),!1):(r.vF.error(`Invalid Sentry Dsn: Invalid protocol ${o}`),!1):(r.vF.error(`Invalid Sentry Dsn: Invalid projectId ${n}`),!1))}(t))return t}const M="7";function P(e){const t=e.protocol?`${e.protocol}:`:"",n=e.port?`:${e.port}`:"";return`${t}//${e.host}${n}${e.path?`/${e.path}`:""}/api/`}function I(e,t={}){const n="string"===typeof t?t:t.tunnel,r="string"!==typeof t&&t._metadata?t._metadata.sdk:void 0;return n||`${function(e){return`${P(e)}${e.projectId}/envelope/`}(e)}?${function(e,t){return(0,b.u4)({sentry_key:e.publicKey,sentry_version:M,...t&&{sentry_client:`${t.name}/${t.version}`}})}(e,r)}`}var N=n(83917),D=n(16341);const L=(0,D.VZ)();function F(){if(!("fetch"in L))return!1;try{return new Headers,new Request("http://www.example.com"),new Response,!0}catch(e){return!1}}function j(e){return e&&/^function fetch\(\)\s+\{\s+\[native code\]\s+\}$/.test(e.toString())}const U=(0,D.VZ)();const z={},B={};function V(e,t){z[e]=z[e]||[],z[e].push(t)}function $(e,t){B[e]||(t(),B[e]=!0)}function H(e,t){const n=e&&z[e];if(n)for(const i of n)try{i(t)}catch(o){C.T&&r.vF.error(`Error while triggering instrumentation handler.\nType: ${e}\nName: ${(0,N.qQ)(i)}\nError:`,o)}}const W=D.OW;let q;function G(e){const t="history";V(t,e),$(t,Y)}function Y(){if(!function(){const e=U.chrome,t=e&&e.app&&e.app.runtime,n="history"in U&&!!U.history.pushState&&!!U.history.replaceState;return!t&&n}())return;const e=W.onpopstate;function t(e){return function(...t){const n=t.length>2?t[2]:void 0;if(n){const e=q,t=String(n);q=t;H("history",{from:e,to:t})}return e.apply(this,t)}}W.onpopstate=function(...t){const n=W.location.href,r=q;q=n;if(H("history",{from:r,to:n}),e)try{return e.apply(this,t)}catch(o){}},(0,b.GS)(W.history,"pushState",t),(0,b.GS)(W.history,"replaceState",t)}var X=n(73816),K=n(60690),Z=n(42181);function Q(e,t=[]){return[e,t]}function J(e,t){const[n,r]=e;return[n,[...r,t]]}function ee(e,t){const n=e[1];for(const r of n){if(t(r,r[0].type))return!0}return!1}function te(e,t){return(t||new TextEncoder).encode(e)}function ne(e,t){const[n,r]=e;let o=JSON.stringify(n);function i(e){"string"===typeof o?o="string"===typeof e?o+e:[te(o,t),e]:o.push("string"===typeof e?te(e,t):e)}for(const s of r){const[e,t]=s;if(i(`\n${JSON.stringify(e)}\n`),"string"===typeof t||t instanceof Uint8Array)i(t);else{let e;try{e=JSON.stringify(t)}catch(a){e=JSON.stringify((0,Z.S8)(t))}i(e)}}return"string"===typeof o?o:function(e){const t=e.reduce(((e,t)=>e+t.length),0),n=new Uint8Array(t);let r=0;for(const o of e)n.set(o,r),r+=o.length;return n}(o)}function re(e,t){const n="string"===typeof e.data?te(e.data,t):e.data;return[(0,b.Ce)({type:"attachment",length:n.length,filename:e.filename,content_type:e.contentType,attachment_type:e.attachmentType}),n]}const oe={session:"session",sessions:"session",attachment:"attachment",transaction:"transaction",event:"error",client_report:"internal",user_report:"default",profile:"profile",replay_event:"replay",replay_recording:"replay",check_in:"monitor",feedback:"feedback",span:"span",statsd:"metric_bucket"};function ie(e){return oe[e]}function ae(e){if(!e||!e.sdk)return;const{name:t,version:n}=e.sdk;return{name:t,version:n}}class se extends Error{constructor(e,t="warn"){super(e),this.message=e,this.name=new.target.prototype.constructor.name,Object.setPrototypeOf(this,new.target.prototype),this.logLevel=t}}function ue(e,t,n,r){const o=ae(n),i=e.type&&"replay_event"!==e.type?e.type:"event";!function(e,t){t&&(e.sdk=e.sdk||{},e.sdk.name=e.sdk.name||t.name,e.sdk.version=e.sdk.version||t.version,e.sdk.integrations=[...e.sdk.integrations||[],...t.integrations||[]],e.sdk.packages=[...e.sdk.packages||[],...t.packages||[]])}(e,n&&n.sdk);const a=function(e,t,n,r){const o=e.sdkProcessingMetadata&&e.sdkProcessingMetadata.dynamicSamplingContext;return{event_id:e.event_id,sent_at:(new Date).toISOString(),...t&&{sdk:t},...!!n&&r&&{dsn:O(r)},...o&&{trace:(0,b.Ce)({...o})}}}(e,o,r,t);delete e.sdkProcessingMetadata;return Q(a,[[{type:i},e]])}function le(e,t,n,r){const o={sent_at:(new Date).toISOString()};n&&n.sdk&&(o.sdk={name:n.sdk.name,version:n.sdk.version}),r&&t&&(o.dsn=O(t));const i=function(e){const t=function(e){let t="";for(const n of e){const e=Object.entries(n.tags),r=e.length>0?`|#${e.map((([e,t])=>`${e}:${t}`)).join(",")}`:"";t+=`${n.name}@${n.unit}:${n.metric}|${n.metricType}${r}|T${n.timestamp}\n`}return t}(e);return[{type:"statsd",length:t.length},t]}(e);return Q(o,[i])}var ce=n(7058),fe=n(75452),de=n(49475);const he="Not capturing exception because it's already been captured.";class pe{constructor(e){if(this._options=e,this._integrations={},this._integrationsInitialized=!1,this._numProcessing=0,this._outcomes={},this._hooks={},this._eventProcessors=[],e.dsn?this._dsn=k(e.dsn):a.T&&r.vF.warn("No DSN provided, client will not send events."),this._dsn){const t=I(this._dsn,e);this._transport=e.transport({recordDroppedEvent:this.recordDroppedEvent.bind(this),...e.transportOptions,url:t})}}captureException(e,t,n){if((0,o.GR)(e))return void(a.T&&r.vF.log(he));let i=t&&t.event_id;return this._process(this.eventFromException(e,t).then((e=>this._captureEvent(e,t,n))).then((e=>{i=e}))),i}captureMessage(e,t,n,r){let o=n&&n.event_id;const i=(0,X.NF)(e)?e:String(e),a=(0,X.sO)(e)?this.eventFromMessage(i,t,n):this.eventFromException(e,n);return this._process(a.then((e=>this._captureEvent(e,n,r))).then((e=>{o=e}))),o}captureEvent(e,t,n){if(t&&t.originalException&&(0,o.GR)(t.originalException))return void(a.T&&r.vF.log(he));let i=t&&t.event_id;const s=(e.sdkProcessingMetadata||{}).capturedSpanScope;return this._process(this._captureEvent(e,t,s||n).then((e=>{i=e}))),i}captureSession(e){"string"!==typeof e.release?a.T&&r.vF.warn("Discarded session because of missing or non-string release"):(this.sendSession(e),(0,ce.qO)(e,{init:!1}))}getDsn(){return this._dsn}getOptions(){return this._options}getSdkMetadata(){return this._options._metadata}getTransport(){return this._transport}flush(e){const t=this._transport;return t?(this.metricsAggregator&&this.metricsAggregator.flush(),this._isClientDoneProcessing(e).then((n=>t.flush(e).then((e=>n&&e))))):(0,K.XW)(!0)}close(e){return this.flush(e).then((e=>(this.getOptions().enabled=!1,this.metricsAggregator&&this.metricsAggregator.close(),e)))}getEventProcessors(){return this._eventProcessors}addEventProcessor(e){this._eventProcessors.push(e)}setupIntegrations(e){(e&&!this._integrationsInitialized||this._isEnabled()&&!this._integrationsInitialized)&&this._setupIntegrations()}init(){this._isEnabled()&&this._setupIntegrations()}getIntegrationById(e){return this.getIntegrationByName(e)}getIntegrationByName(e){return this._integrations[e]}getIntegration(e){try{return this._integrations[e.id]||null}catch(t){return a.T&&r.vF.warn(`Cannot retrieve integration ${e.id} from the current Client`),null}}addIntegration(e){const t=this._integrations[e.name];d(this,e,this._integrations),t||f(this,[e])}sendEvent(e,t={}){this.emit("beforeSendEvent",e,t);let n=ue(e,this._dsn,this._options._metadata,this._options.tunnel);for(const o of t.attachments||[])n=J(n,re(o,this._options.transportOptions&&this._options.transportOptions.textEncoder));const r=this._sendEnvelope(n);r&&r.then((t=>this.emit("afterSendEvent",e,t)),null)}sendSession(e){const t=function(e,t,n,r){const o=ae(n);return Q({sent_at:(new Date).toISOString(),...o&&{sdk:o},...!!r&&t&&{dsn:O(t)}},["aggregates"in e?[{type:"sessions"},e]:[{type:"session"},e.toJSON()]])}(e,this._dsn,this._options._metadata,this._options.tunnel);this._sendEnvelope(t)}recordDroppedEvent(e,t,n){if(this._options.sendClientReports){const n=`${e}:${t}`;a.T&&r.vF.log(`Adding outcome: "${n}"`),this._outcomes[n]=this._outcomes[n]+1||1}}captureAggregateMetrics(e){a.T&&r.vF.log(`Flushing aggregated metrics, number of metrics: ${e.length}`);const t=le(e,this._dsn,this._options._metadata,this._options.tunnel);this._sendEnvelope(t)}on(e,t){this._hooks[e]||(this._hooks[e]=[]),this._hooks[e].push(t)}emit(e,...t){this._hooks[e]&&this._hooks[e].forEach((e=>e(...t)))}_setupIntegrations(){const{integrations:e}=this._options;this._integrations=function(e,t){const n={};return t.forEach((t=>{t&&d(e,t,n)})),n}(this,e),f(this,e),this._integrationsInitialized=!0}_updateSessionFromEvent(e,t){let n=!1,r=!1;const o=t.exception&&t.exception.values;if(o){r=!0;for(const e of o){const t=e.mechanism;if(t&&!1===t.handled){n=!0;break}}}const i="ok"===e.status;(i&&0===e.errors||i&&n)&&((0,ce.qO)(e,{...n&&{status:"crashed"},errors:e.errors||Number(r||n)}),this.captureSession(e))}_isClientDoneProcessing(e){return new K.T2((t=>{let n=0;const r=setInterval((()=>{0==this._numProcessing?(clearInterval(r),t(!0)):(n+=1,e&&n>=e&&(clearInterval(r),t(!1)))}),1)}))}_isEnabled(){return!1!==this.getOptions().enabled&&void 0!==this._transport}_prepareEvent(e,t,n,r=(0,u.rm)()){const o=this.getOptions(),i=Object.keys(this._integrations);return!t.integrations&&i.length>0&&(t.integrations=i),this.emit("preprocessEvent",e,t),(0,de.mG)(o,e,t,n,this,r).then((e=>{if(null===e)return e;const t={...r.getPropagationContext(),...n?n.getPropagationContext():void 0};if(!(e.contexts&&e.contexts.trace)&&t){const{traceId:r,spanId:o,parentSpanId:i,dsc:a}=t;e.contexts={trace:{trace_id:r,span_id:o,parent_span_id:i},...e.contexts};const s=a||(0,fe.l)(r,this,n);e.sdkProcessingMetadata={dynamicSamplingContext:s,...e.sdkProcessingMetadata}}return e}))}_captureEvent(e,t={},n){return this._processEvent(e,t,n).then((e=>e.event_id),(e=>{if(a.T){const t=e;"log"===t.logLevel?r.vF.log(t.message):r.vF.warn(t)}}))}_processEvent(e,t,n){const r=this.getOptions(),{sampleRate:o}=r,i=ve(e),a=ge(e),s=e.type||"error",u=`before send for type \`${s}\``;if(a&&"number"===typeof o&&Math.random()>o)return this.recordDroppedEvent("sample_rate","error",e),(0,K.xg)(new se(`Discarding event because it's not included in the random sample (sampling rate = ${o})`,"log"));const l="replay_event"===s?"replay":s,c=(e.sdkProcessingMetadata||{}).capturedSpanIsolationScope;return this._prepareEvent(e,t,n,c).then((n=>{if(null===n)throw this.recordDroppedEvent("event_processor",l,e),new se("An event processor returned `null`, will not send event.","log");if(t.data&&!0===t.data.__sentry__)return n;const o=function(e,t,n){const{beforeSend:r,beforeSendTransaction:o}=e;if(ge(t)&&r)return r(t,n);if(ve(t)&&o)return o(t,n);return t}(r,n,t);return function(e,t){const n=`${t} must return \`null\` or a valid event.`;if((0,X.Qg)(e))return e.then((e=>{if(!(0,X.Qd)(e)&&null!==e)throw new se(n);return e}),(e=>{throw new se(`${t} rejected with ${e}`)}));if(!(0,X.Qd)(e)&&null!==e)throw new se(n);return e}(o,u)})).then((r=>{if(null===r)throw this.recordDroppedEvent("before_send",l,e),new se(`${u} returned \`null\`, will not send event.`,"log");const o=n&&n.getSession();!i&&o&&this._updateSessionFromEvent(o,r);const a=r.transaction_info;if(i&&a&&r.transaction!==e.transaction){const e="custom";r.transaction_info={...a,source:e}}return this.sendEvent(r,t),r})).then(null,(e=>{if(e instanceof se)throw e;throw this.captureException(e,{data:{__sentry__:!0},originalException:e}),new se(`Event processing pipeline threw an error, original event will not be sent. Details have been sent as a new event.\nReason: ${e}`)}))}_process(e){this._numProcessing++,e.then((e=>(this._numProcessing--,e)),(e=>(this._numProcessing--,e)))}_sendEnvelope(e){if(this.emit("beforeEnvelope",e),this._isEnabled()&&this._transport)return this._transport.send(e).then(null,(e=>{a.T&&r.vF.error("Error while sending event:",e)}));a.T&&r.vF.error("Transport disabled")}_clearOutcomes(){const e=this._outcomes;return this._outcomes={},Object.keys(e).map((t=>{const[n,r]=t.split(":");return{reason:n,category:r,quantity:e[t]}}))}}function ge(e){return void 0===e.type}function ve(e){return"transaction"===e.type}var ye=n(40601);var me=n(89297);const be="undefined"===typeof __SENTRY_DEBUG__||__SENTRY_DEBUG__;function _e(e,t){const n=xe(e,t),r={type:t&&t.name,value:Ae(t)};return n.length&&(r.stacktrace={frames:n}),void 0===r.type&&""===r.value&&(r.value="Unrecoverable error caught"),r}function we(e,t){return{exception:{values:[_e(e,t)]}}}function xe(e,t){const n=t.stacktrace||t.stack||"",r=function(e){if(e){if("number"===typeof e.framesToPop)return e.framesToPop;if(Se.test(e.message))return 1}return 0}(t);try{return e(n,r)}catch(o){}return[]}const Se=/Minified React error #\d+;/i;function Ae(e){const t=e&&e.message;return t?t.error&&"string"===typeof t.error.message?t.error.message:t:"No error message"}function Ee(e,t,n,r,i){let a;if((0,X.T2)(t)&&t.error){return we(e,t.error)}if((0,X.BD)(t)||(0,X.W6)(t)){const i=t;if("stack"in t)a=we(e,t);else{const t=i.name||((0,X.BD)(i)?"DOMError":"DOMException"),s=i.message?`${t}: ${i.message}`:t;a=Ce(e,s,n,r),(0,o.gO)(a,s)}return"code"in i&&(a.tags={...a.tags,"DOMException.code":`${i.code}`}),a}if((0,X.bJ)(t))return we(e,t);if((0,X.Qd)(t)||(0,X.xH)(t)){return a=function(e,t,n,r){const o=(0,_.KU)(),i=o&&o.getOptions().normalizeDepth,a={exception:{values:[{type:(0,X.xH)(t)?t.constructor.name:r?"UnhandledRejection":"Error",value:Re(t,{isUnhandledRejection:r})}]},extra:{__serialized__:(0,Z.cd)(t,i)}};if(n){const t=xe(e,n);t.length&&(a.exception.values[0].stacktrace={frames:t})}return a}(e,t,n,i),(0,o.M6)(a,{synthetic:!0}),a}return a=Ce(e,t,n,r),(0,o.gO)(a,`${t}`,void 0),(0,o.M6)(a,{synthetic:!0}),a}function Ce(e,t,n,r){const o={};if(r&&n){const r=xe(e,n);r.length&&(o.exception={values:[{value:t,stacktrace:{frames:r}}]})}if((0,X.NF)(t)){const{__sentry_template_string__:e,__sentry_template_values__:n}=t;return o.logentry={message:e,params:n},o}return o.message=t,o}function Re(e,{isUnhandledRejection:t}){const n=(0,b.HF)(e),r=t?"promise rejection":"exception";if((0,X.T2)(e))return`Event \`ErrorEvent\` captured as ${r} with message \`${e.message}\``;if((0,X.xH)(e)){return`Event \`${function(e){try{const t=Object.getPrototypeOf(e);return t?t.constructor.name:void 0}catch(t){}}(e)}\` (type=${e.type}) captured as ${r}`}return`Object captured as ${r} with keys: ${n}`}const Oe=D.OW;let Te=0;function ke(){return Te>0}function Me(e,t={},n){if("function"!==typeof e)return e;try{const t=e.__sentry_wrapped__;if(t)return t;if((0,b.sp)(e))return e}catch(i){return e}const r=function(){const r=Array.prototype.slice.call(arguments);try{n&&"function"===typeof n&&n.apply(this,arguments);const o=r.map((e=>Me(e,t)));return e.apply(this,o)}catch(i){throw Te++,setTimeout((()=>{Te--})),(0,_.v4)((e=>{e.addEventProcessor((e=>(t.mechanism&&((0,o.gO)(e,void 0,void 0),(0,o.M6)(e,t.mechanism)),e.extra={...e.extra,arguments:r},e))),(0,_.Cp)(i)})),i}};try{for(const t in e)Object.prototype.hasOwnProperty.call(e,t)&&(r[t]=e[t])}catch(a){}(0,b.pO)(r,e),(0,b.my)(e,"__sentry_wrapped__",r);try{Object.getOwnPropertyDescriptor(r,"name").configurable&&Object.defineProperty(r,"name",{get:()=>e.name})}catch(a){}return r}class Pe extends pe{constructor(e){const t=Oe.SENTRY_SDK_SOURCE||"npm";(0,ye.K)(e,"browser",["browser"],t),super(e),e.sendClientReports&&Oe.document&&Oe.document.addEventListener("visibilitychange",(()=>{"hidden"===Oe.document.visibilityState&&this._flushOutcomes()}))}eventFromException(e,t){return function(e,t,n,r){const i=Ee(e,t,n&&n.syntheticException||void 0,r);return(0,o.M6)(i),i.level="error",n&&n.event_id&&(i.event_id=n.event_id),(0,K.XW)(i)}(this._options.stackParser,e,t,this._options.attachStacktrace)}eventFromMessage(e,t="info",n){return function(e,t,n="info",r,o){const i=Ce(e,t,r&&r.syntheticException||void 0,o);return i.level=n,r&&r.event_id&&(i.event_id=r.event_id),(0,K.XW)(i)}(this._options.stackParser,e,t,n,this._options.attachStacktrace)}captureUserFeedback(e){if(!this._isEnabled())return void(be&&r.vF.warn("SDK not enabled, will not capture user feedback."));const t=function(e,{metadata:t,tunnel:n,dsn:r}){const o={event_id:e.event_id,sent_at:(new Date).toISOString(),...t&&t.sdk&&{sdk:{name:t.sdk.name,version:t.sdk.version}},...!!n&&!!r&&{dsn:O(r)}},i=function(e){return[{type:"user_report"},e]}(e);return Q(o,[i])}(e,{metadata:this.getSdkMetadata(),dsn:this.getDsn(),tunnel:this.getOptions().tunnel});this._sendEnvelope(t)}_prepareEvent(e,t,n){return e.platform=e.platform||"javascript",super._prepareEvent(e,t,n)}_flushOutcomes(){const e=this._clearOutcomes();if(0===e.length)return void(be&&r.vF.log("No outcomes to send"));if(!this._dsn)return void(be&&r.vF.log("No dsn provided, will not send outcomes"));be&&r.vF.log("Sending outcomes:",e);const t=(n=e,Q((o=this._options.tunnel&&O(this._dsn))?{dsn:o}:{},[[{type:"client_report"},{timestamp:i||(0,me.lu)(),discarded_events:n}]]));var n,o,i;this._sendEnvelope(t)}}function Ie(){"console"in D.OW&&r.Ow.forEach((function(e){e in D.OW.console&&(0,b.GS)(D.OW.console,e,(function(t){return r.Z9[e]=t,function(...t){H("console",{args:t,level:e});const n=r.Z9[e];n&&n.apply(D.OW.console,t)}}))}))}const Ne=D.OW,De=1e3;let Le,Fe,je;function Ue(){if(!Ne.document)return;const e=H.bind(null,"dom"),t=ze(e,!0);Ne.document.addEventListener("click",t,!1),Ne.document.addEventListener("keypress",t,!1),["EventTarget","Node"].forEach((t=>{const n=Ne[t]&&Ne[t].prototype;n&&n.hasOwnProperty&&n.hasOwnProperty("addEventListener")&&((0,b.GS)(n,"addEventListener",(function(t){return function(n,r,o){if("click"===n||"keypress"==n)try{const r=this,i=r.__sentry_instrumentation_handlers__=r.__sentry_instrumentation_handlers__||{},a=i[n]=i[n]||{refCount:0};if(!a.handler){const r=ze(e);a.handler=r,t.call(this,n,r,o)}a.refCount++}catch(i){}return t.call(this,n,r,o)}})),(0,b.GS)(n,"removeEventListener",(function(e){return function(t,n,r){if("click"===t||"keypress"==t)try{const n=this,o=n.__sentry_instrumentation_handlers__||{},i=o[t];i&&(i.refCount--,i.refCount<=0&&(e.call(this,t,i.handler,r),i.handler=void 0,delete o[t]),0===Object.keys(o).length&&delete n.__sentry_instrumentation_handlers__)}catch(o){}return e.call(this,t,n,r)}})))}))}function ze(e,t=!1){return n=>{if(!n||n._sentryCaptured)return;const r=function(e){try{return e.target}catch(t){return null}}(n);if(function(e,t){return"keypress"===e&&(!t||!t.tagName||"INPUT"!==t.tagName&&"TEXTAREA"!==t.tagName&&!t.isContentEditable)}(n.type,r))return;(0,b.my)(n,"_sentryCaptured",!0),r&&!r._sentryId&&(0,b.my)(r,"_sentryId",(0,o.eJ)());const i="keypress"===n.type?"input":n.type;if(!function(e){if(e.type!==Fe)return!1;try{if(!e.target||e.target._sentryId!==je)return!1}catch(t){}return!0}(n)){e({event:n,name:i,global:t}),Fe=n.type,je=r?r._sentryId:void 0}clearTimeout(Le),Le=Ne.setTimeout((()=>{je=void 0,Fe=void 0}),De)}}const Be=D.OW,Ve="__sentry_xhr_v3__";function $e(){if(!Be.XMLHttpRequest)return;const e=XMLHttpRequest.prototype;(0,b.GS)(e,"open",(function(e){return function(...t){const n=Date.now(),r=(0,X.Kg)(t[0])?t[0].toUpperCase():void 0,o=function(e){if((0,X.Kg)(e))return e;try{return e.toString()}catch(t){}return}(t[1]);if(!r||!o)return e.apply(this,t);this[Ve]={method:r,url:o,request_headers:{}},"POST"===r&&o.match(/sentry_key/)&&(this.__sentry_own_request__=!0);const i=()=>{const e=this[Ve];if(e&&4===this.readyState){try{e.status_code=this.status}catch(t){}H("xhr",{args:[r,o],endTimestamp:Date.now(),startTimestamp:n,xhr:this})}};return"onreadystatechange"in this&&"function"===typeof this.onreadystatechange?(0,b.GS)(this,"onreadystatechange",(function(e){return function(...t){return i(),e.apply(this,t)}})):this.addEventListener("readystatechange",i),(0,b.GS)(this,"setRequestHeader",(function(e){return function(...t){const[n,r]=t,o=this[Ve];return o&&(0,X.Kg)(n)&&(0,X.Kg)(r)&&(o.request_headers[n.toLowerCase()]=r),e.apply(this,t)}})),e.apply(this,t)}})),(0,b.GS)(e,"send",(function(e){return function(...t){const n=this[Ve];if(!n)return e.apply(this,t);void 0!==t[0]&&(n.body=t[0]);return H("xhr",{args:[n.method,n.url],startTimestamp:Date.now(),xhr:this}),e.apply(this,t)}}))}function He(){(function(){if("string"===typeof EdgeRuntime)return!0;if(!F())return!1;if(j(L.fetch))return!0;let e=!1;const t=L.document;if(t&&"function"===typeof t.createElement)try{const n=t.createElement("iframe");n.hidden=!0,t.head.appendChild(n),n.contentWindow&&n.contentWindow.fetch&&(e=j(n.contentWindow.fetch)),t.head.removeChild(n)}catch(n){C.T&&r.vF.warn("Could not create sandbox iframe for pure fetch check, bailing to window.fetch: ",n)}return e})()&&(0,b.GS)(D.OW,"fetch",(function(e){return function(...t){const{method:n,url:r}=function(e){if(0===e.length)return{method:"GET",url:""};if(2===e.length){const[t,n]=e;return{url:qe(t),method:We(n,"method")?String(n.method).toUpperCase():"GET"}}const t=e[0];return{url:qe(t),method:We(t,"method")?String(t.method).toUpperCase():"GET"}}(t),o={args:t,fetchData:{method:n,url:r},startTimestamp:Date.now()};return H("fetch",{...o}),e.apply(D.OW,t).then((e=>(H("fetch",{...o,endTimestamp:Date.now(),response:e}),e)),(e=>{throw H("fetch",{...o,endTimestamp:Date.now(),error:e}),e}))}}))}function We(e,t){return!!e&&"object"===typeof e&&!!e[t]}function qe(e){return"string"===typeof e?e:e?We(e,"url")?e.url:e.toString?e.toString():"":""}var Ge=n(19766);const Ye=["fatal","error","warning","log","info","debug"];function Xe(e){return"warn"===e?"warning":Ye.includes(e)?e:"log"}function Ke(e){if(!e)return{};const t=e.match(/^(([^:/?#]+):)?(\/\/([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?$/);if(!t)return{};const n=t[6]||"",r=t[8]||"";return{host:t[4],path:t[5],protocol:t[2],search:n,hash:r,relative:t[5]+n+r}}const Ze=1024,Qe="Breadcrumbs",Je=(e={})=>{const t={console:!0,dom:!0,fetch:!0,history:!0,sentry:!0,xhr:!0,...e};return{name:Qe,setupOnce(){},setup(e){var n;t.console&&function(e){const t="console";V(t,e),$(t,Ie)}(function(e){return function(t){if((0,_.KU)()!==e)return;const n={category:"console",data:{arguments:t.args,logger:"console"},level:Xe(t.level),message:(0,i.gt)(t.args," ")};if("assert"===t.level){if(!1!==t.args[0])return;n.message=`Assertion failed: ${(0,i.gt)(t.args.slice(1)," ")||"console.assert"}`,n.data.arguments=t.args.slice(1)}(0,_.ZQ)(n,{input:t.args,level:t.level})}}(e)),t.dom&&(n=function(e,t){return function(n){if((0,_.KU)()!==e)return;let o,i,a="object"===typeof t?t.serializeAttribute:void 0,s="object"===typeof t&&"number"===typeof t.maxStringLength?t.maxStringLength:void 0;s&&s>Ze&&(be&&r.vF.warn(`\`dom.maxStringLength\` cannot exceed 1024, but a value of ${s} was configured. Sentry will use 1024 instead.`),s=Ze),"string"===typeof a&&(a=[a]);try{const e=n.event,t=function(e){return!!e&&!!e.target}(e)?e.target:e;o=(0,Ge.Hd)(t,{keyAttrs:a,maxStringLength:s}),i=(0,Ge.xE)(t)}catch(l){o=""}if(0===o.length)return;const u={category:`ui.${n.name}`,message:o};i&&(u.data={"ui.component_name":i}),(0,_.ZQ)(u,{event:n.event,name:n.name,global:n.global})}}(e,t.dom),V("dom",n),$("dom",Ue)),t.xhr&&function(e){V("xhr",e),$("xhr",$e)}(function(e){return function(t){if((0,_.KU)()!==e)return;const{startTimestamp:n,endTimestamp:r}=t,o=t.xhr[Ve];if(!n||!r||!o)return;const{method:i,url:a,status_code:s,body:u}=o,l={method:i,url:a,status_code:s},c={xhr:t.xhr,input:u,startTimestamp:n,endTimestamp:r};(0,_.ZQ)({category:"xhr",data:l,type:"http"},c)}}(e)),t.fetch&&function(e){const t="fetch";V(t,e),$(t,He)}(function(e){return function(t){if((0,_.KU)()!==e)return;const{startTimestamp:n,endTimestamp:r}=t;if(r&&(!t.fetchData.url.match(/sentry_key/)||"POST"!==t.fetchData.method))if(t.error){const e=t.fetchData,o={data:t.error,input:t.args,startTimestamp:n,endTimestamp:r};(0,_.ZQ)({category:"fetch",data:e,level:"error",type:"http"},o)}else{const e=t.response,o={...t.fetchData,status_code:e&&e.status},i={input:t.args,response:e,startTimestamp:n,endTimestamp:r};(0,_.ZQ)({category:"fetch",data:o,type:"http"},i)}}}(e)),t.history&&G(function(e){return function(t){if((0,_.KU)()!==e)return;let n=t.from,r=t.to;const o=Ke(Oe.location.href);let i=n?Ke(n):void 0;const a=Ke(r);i&&i.path||(i=o),o.protocol===a.protocol&&o.host===a.host&&(r=a.relative),o.protocol===i.protocol&&o.host===i.host&&(n=i.relative),(0,_.ZQ)({category:"navigation",data:{from:n,to:r}})}}(e)),t.sentry&&e.on&&e.on("beforeSendEvent",function(e){return function(t){(0,_.KU)()===e&&(0,_.ZQ)({category:"sentry."+("transaction"===t.type?"transaction":"event"),event_id:t.event_id,level:t.level,message:(0,o.$X)(t)},{event:t})}}(e))}}};h(Qe,Je);const et="Dedupe",tt=()=>{let e;return{name:et,setupOnce(){},processEvent(t){if(t.type)return t;try{if(function(e,t){if(!t)return!1;if(function(e,t){const n=e.message,r=t.message;if(!n&&!r)return!1;if(n&&!r||!n&&r)return!1;if(n!==r)return!1;if(!rt(e,t))return!1;if(!nt(e,t))return!1;return!0}(e,t))return!0;if(function(e,t){const n=ot(t),r=ot(e);if(!n||!r)return!1;if(n.type!==r.type||n.value!==r.value)return!1;if(!rt(e,t))return!1;if(!nt(e,t))return!1;return!0}(e,t))return!0;return!1}(t,e))return be&&r.vF.warn("Event dropped due to being a duplicate of previously captured event."),null}catch(n){}return e=t}}};h(et,tt);function nt(e,t){let n=it(e),r=it(t);if(!n&&!r)return!0;if(n&&!r||!n&&r)return!1;if(r.length!==n.length)return!1;for(let o=0;o{const t={onerror:!0,onunhandledrejection:!0,...e};return{name:ct,setupOnce(){Error.stackTraceLimit=50},setup(e){t.onerror&&(!function(e){!function(e){const t="error";V(t,e),$(t,st)}((t=>{const{stackParser:n,attachStacktrace:r}=pt();if((0,_.KU)()!==e||ke())return;const{msg:o,url:i,line:a,column:s,error:u}=t,l=void 0===u&&(0,X.Kg)(o)?function(e,t,n,r){const o=/^(?:[Uu]ncaught (?:exception: )?)?(?:((?:Eval|Internal|Range|Reference|Syntax|Type|URI|)Error): )?(.*)$/i;let i=(0,X.T2)(e)?e.message:e,a="Error";const s=i.match(o);s&&(a=s[1],i=s[2]);const u={exception:{values:[{type:a,value:i}]}};return dt(u,t,n,r)}(o,i,a,s):dt(Ee(n,u||o,void 0,r,!1),i,a,s);l.level="error",(0,_.r)(l,{originalException:u,mechanism:{handled:!1,type:"onerror"}})}))}(e),ht("onerror")),t.onunhandledrejection&&(!function(e){!function(e){const t="unhandledrejection";V(t,e),$(t,lt)}((t=>{const{stackParser:n,attachStacktrace:r}=pt();if((0,_.KU)()!==e||ke())return;const o=function(e){if((0,X.sO)(e))return e;const t=e;try{if("reason"in t)return t.reason;if("detail"in t&&"reason"in t.detail)return t.detail.reason}catch(n){}return e}(t),i=(0,X.sO)(o)?{exception:{values:[{type:"UnhandledRejection",value:`Non-Error promise rejection captured with value: ${String(o)}`}]}}:Ee(n,o,void 0,r,!0);i.level="error",(0,_.r)(i,{originalException:o,mechanism:{handled:!1,type:"onunhandledrejection"}})}))}(e),ht("onunhandledrejection"))}}};h(ct,ft);function dt(e,t,n,r){const o=e.exception=e.exception||{},i=o.values=o.values||[],a=i[0]=i[0]||{},s=a.stacktrace=a.stacktrace||{},u=s.frames=s.frames||[],l=isNaN(parseInt(r,10))?void 0:r,c=isNaN(parseInt(n,10))?void 0:n,f=(0,X.Kg)(t)&&t.length>0?t:(0,Ge.$N)();return 0===u.length&&u.push({colno:l,filename:f,function:"?",in_app:!0,lineno:c}),e}function ht(e){be&&r.vF.log(`Global Handler attached: ${e}`)}function pt(){const e=(0,_.KU)();return e&&e.getOptions()||{stackParser:()=>[],attachStacktrace:!1}}const gt="HttpContext",vt=()=>({name:gt,setupOnce(){},preprocessEvent(e){if(!Oe.navigator&&!Oe.location&&!Oe.document)return;const t=e.request&&e.request.url||Oe.location&&Oe.location.href,{referrer:n}=Oe.document||{},{userAgent:r}=Oe.navigator||{},o={...e.request&&e.request.headers,...n&&{Referer:n},...r&&{"User-Agent":r}},i={...e.request,...t&&{url:t},headers:o};e.request=i}});h(gt,vt);function yt(e,t,n=250,r,o,a,s){if(!a.exception||!a.exception.values||!s||!(0,X.tH)(s.originalException,Error))return;const u=a.exception.values.length>0?a.exception.values[a.exception.values.length-1]:void 0;var l,c;u&&(a.exception.values=(l=mt(e,t,o,s.originalException,r,a.exception.values,u,0),c=n,l.map((e=>(e.value&&(e.value=(0,i.xv)(e.value,c)),e)))))}function mt(e,t,n,r,o,i,a,s){if(i.length>=n+1)return i;let u=[...i];if((0,X.tH)(r[o],Error)){bt(a,s);const i=e(t,r[o]),l=u.length;_t(i,o,l,s),u=mt(e,t,n,r[o],o,[i,...u],i,l)}return Array.isArray(r.errors)&&r.errors.forEach(((r,i)=>{if((0,X.tH)(r,Error)){bt(a,s);const l=e(t,r),c=u.length;_t(l,`errors[${i}]`,c,s),u=mt(e,t,n,r,o,[l,...u],l,c)}})),u}function bt(e,t){e.mechanism=e.mechanism||{type:"generic",handled:!0},e.mechanism={...e.mechanism,..."AggregateError"===e.type&&{is_exception_group:!0},exception_id:t}}function _t(e,t,n,r){e.mechanism=e.mechanism||{type:"generic",handled:!0},e.mechanism={...e.mechanism,type:"chained",source:t,exception_id:n,parent_id:r}}const wt="LinkedErrors",xt=(e={})=>{const t=e.limit||5,n=e.key||"cause";return{name:wt,setupOnce(){},preprocessEvent(e,r,o){const i=o.getOptions();yt(_e,i.stackParser,i.maxValueLength,n,t,e,r)}}},St=(h(wt,xt),["EventTarget","Window","Node","ApplicationCache","AudioTrackList","BroadcastChannel","ChannelMergerNode","CryptoOperation","EventSource","FileReader","HTMLUnknownElement","IDBDatabase","IDBRequest","IDBTransaction","KeyOperation","MediaController","MessagePort","ModalWindow","Notification","SVGElementInstance","Screen","SharedWorker","TextTrack","TextTrackCue","TextTrackList","WebSocket","WebSocketWorker","Worker","XMLHttpRequest","XMLHttpRequestEventTarget","XMLHttpRequestUpload"]),At="TryCatch",Et=(e={})=>{const t={XMLHttpRequest:!0,eventTarget:!0,requestAnimationFrame:!0,setInterval:!0,setTimeout:!0,...e};return{name:At,setupOnce(){t.setTimeout&&(0,b.GS)(Oe,"setTimeout",Ct),t.setInterval&&(0,b.GS)(Oe,"setInterval",Ct),t.requestAnimationFrame&&(0,b.GS)(Oe,"requestAnimationFrame",Rt),t.XMLHttpRequest&&"XMLHttpRequest"in Oe&&(0,b.GS)(XMLHttpRequest.prototype,"send",Ot);const e=t.eventTarget;if(e){(Array.isArray(e)?e:St).forEach(Tt)}}}};h(At,Et);function Ct(e){return function(...t){const n=t[0];return t[0]=Me(n,{mechanism:{data:{function:(0,N.qQ)(e)},handled:!1,type:"instrument"}}),e.apply(this,t)}}function Rt(e){return function(t){return e.apply(this,[Me(t,{mechanism:{data:{function:"requestAnimationFrame",handler:(0,N.qQ)(e)},handled:!1,type:"instrument"}})])}}function Ot(e){return function(...t){const n=this;return["onload","onerror","onprogress","onreadystatechange"].forEach((e=>{e in n&&"function"===typeof n[e]&&(0,b.GS)(n,e,(function(t){const n={mechanism:{data:{function:e,handler:(0,N.qQ)(t)},handled:!1,type:"instrument"}},r=(0,b.sp)(t);return r&&(n.mechanism.data.handler=(0,N.qQ)(r)),Me(t,n)}))})),e.apply(this,t)}}function Tt(e){const t=Oe,n=t[e]&&t[e].prototype;n&&n.hasOwnProperty&&n.hasOwnProperty("addEventListener")&&((0,b.GS)(n,"addEventListener",(function(t){return function(n,r,o){try{"function"===typeof r.handleEvent&&(r.handleEvent=Me(r.handleEvent,{mechanism:{data:{function:"handleEvent",handler:(0,N.qQ)(r),target:e},handled:!1,type:"instrument"}}))}catch(i){}return t.apply(this,[n,Me(r,{mechanism:{data:{function:"addEventListener",handler:(0,N.qQ)(r),target:e},handled:!1,type:"instrument"}}),o])}})),(0,b.GS)(n,"removeEventListener",(function(e){return function(t,n,r){const o=n;try{const n=o&&o.__sentry_wrapped__;n&&e.call(this,t,n,r)}catch(i){}return e.call(this,t,o,r)}})))}const kt="?";function Mt(e,t,n,r){const o={filename:e,function:t,in_app:!0};return void 0!==n&&(o.lineno=n),void 0!==r&&(o.colno=r),o}const Pt=/^\s*at (?:(.+?\)(?: \[.+\])?|.*?) ?\((?:address at )?)?(?:async )?((?:|[-a-z]+:|.*bundle|\/)?.*?)(?::(\d+))?(?::(\d+))?\)?\s*$/i,It=/\((\S*)(?::(\d+))(?::(\d+))\)/,Nt=/^\s*(.*?)(?:\((.*?)\))?(?:^|@)?((?:[-a-z]+)?:\/.*?|\[native code\]|[^@]*(?:bundle|\d+\.js)|\/[\w\-. /=]+)(?::(\d+))?(?::(\d+))?\s*$/i,Dt=/(\S+) line (\d+)(?: > eval line \d+)* > eval/i,Lt=/^\s*at (?:((?:\[object object\])?.+) )?\(?((?:[-a-z]+):.*?):(\d+)(?::(\d+))?\)?\s*$/i,Ft=[[30,e=>{const t=Pt.exec(e);if(t){if(t[2]&&0===t[2].indexOf("eval")){const e=It.exec(t[2]);e&&(t[2]=e[1],t[3]=e[2],t[4]=e[3])}const[e,n]=Ut(t[1]||kt,t[2]);return Mt(n,e,t[3]?+t[3]:void 0,t[4]?+t[4]:void 0)}}],[50,e=>{const t=Nt.exec(e);if(t){if(t[3]&&t[3].indexOf(" > eval")>-1){const e=Dt.exec(t[3]);e&&(t[1]=t[1]||"eval",t[3]=e[1],t[4]=e[2],t[5]="")}let e=t[3],n=t[1]||kt;return[n,e]=Ut(n,e),Mt(e,n,t[4]?+t[4]:void 0,t[5]?+t[5]:void 0)}}],[40,e=>{const t=Lt.exec(e);return t?Mt(t[2],t[1]||kt,+t[3],t[4]?+t[4]:void 0):void 0}]],jt=(0,N.gd)(...Ft),Ut=(e,t)=>{const n=-1!==e.indexOf("safari-extension"),r=-1!==e.indexOf("safari-web-extension");return n||r?[-1!==e.indexOf("@")?e.split("@")[0]:kt,n?`safari-extension:${t}`:`safari-web-extension:${t}`]:[e,t]};function zt(e){const t=[];function n(e){return t.splice(t.indexOf(e),1)[0]}return{$:t,add:function(r){if(!(void 0===e||t.lengthn(o))).then(null,(()=>n(o).then(null,(()=>{})))),o},drain:function(e){return new K.T2(((n,r)=>{let o=t.length;if(!o)return n(!0);const i=setTimeout((()=>{e&&e>0&&n(!1)}),e);t.forEach((e=>{(0,K.XW)(e).then((()=>{--o||(clearTimeout(i),n(!0))}),r)}))}))}}}const Bt=6e4;function Vt(e,{statusCode:t,headers:n},r=Date.now()){const o={...e},i=n&&n["x-sentry-rate-limits"],a=n&&n["retry-after"];if(i)for(const s of i.trim().split(",")){const[e,t,,,n]=s.split(":",5),i=parseInt(e,10),a=1e3*(isNaN(i)?60:i);if(t)for(const s of t.split(";"))"metric_bucket"===s&&n&&!n.split(";").includes("custom")||(o[s]=r+a);else o.all=r+a}else a?o.all=r+function(e,t=Date.now()){const n=parseInt(`${e}`,10);if(!isNaN(n))return 1e3*n;const r=Date.parse(`${e}`);return isNaN(r)?Bt:r-t}(a,r):429===t&&(o.all=r+6e4);return o}const $t=30;function Ht(e,t,n=zt(e.bufferSize||$t)){let o={};function i(i){const s=[];if(ee(i,((t,n)=>{const r=ie(n);if(function(e,t,n=Date.now()){return function(e,t){return e[t]||e.all||0}(e,t)>n}(o,r)){const o=Wt(t,n);e.recordDroppedEvent("ratelimit_backoff",r,o)}else s.push(t)})),0===s.length)return(0,K.XW)();const u=Q(i[0],s),l=t=>{ee(u,((n,r)=>{const o=Wt(n,r);e.recordDroppedEvent(t,ie(r),o)}))};return n.add((()=>t({body:ne(u,e.textEncoder)}).then((e=>(void 0!==e.statusCode&&(e.statusCode<200||e.statusCode>=300)&&a.T&&r.vF.warn(`Sentry responded with status code ${e.statusCode} to sent event.`),o=Vt(o,e),e)),(e=>{throw l("network_error"),e})))).then((e=>e),(e=>{if(e instanceof se)return a.T&&r.vF.error("Skipped sending event because buffer is full."),l("queue_overflow"),(0,K.XW)();throw e}))}return i.__sentry__baseTransport__=!0,{send:i,flush:e=>n.drain(e)}}function Wt(e,t){if("event"===t||"transaction"===t)return Array.isArray(e)?e[1]:void 0}let qt;function Gt(e,t=function(){if(qt)return qt;if(j(Oe.fetch))return qt=Oe.fetch.bind(Oe);const e=Oe.document;let t=Oe.fetch;if(e&&"function"===typeof e.createElement)try{const n=e.createElement("iframe");n.hidden=!0,e.head.appendChild(n);const r=n.contentWindow;r&&r.fetch&&(t=r.fetch),e.head.removeChild(n)}catch(n){be&&r.vF.warn("Could not create sandbox iframe for pure fetch check, bailing to window.fetch: ",n)}return qt=t.bind(Oe)}()){let n=0,o=0;return Ht(e,(function(r){const i=r.body.length;n+=i,o++;const a={body:r.body,method:"POST",referrerPolicy:"origin",headers:e.headers,keepalive:n<=6e4&&o<15,...e.fetchOptions};try{return t(e.url,a).then((e=>(n-=i,o--,{statusCode:e.status,headers:{"x-sentry-rate-limits":e.headers.get("X-Sentry-Rate-Limits"),"retry-after":e.headers.get("Retry-After")}})))}catch(s){return qt=void 0,n-=i,o--,(0,K.xg)(s)}}))}const Yt=4;function Xt(e){return Ht(e,(function(t){return new K.T2(((n,r)=>{const o=new XMLHttpRequest;o.onerror=r,o.onreadystatechange=()=>{o.readyState===Yt&&n({statusCode:o.status,headers:{"x-sentry-rate-limits":o.getResponseHeader("X-Sentry-Rate-Limits"),"retry-after":o.getResponseHeader("Retry-After")}})},o.open("POST",e.url);for(const t in e.headers)Object.prototype.hasOwnProperty.call(e.headers,t)&&o.setRequestHeader(t,e.headers[t]);o.send(t.body)}))}))}const Kt=[y(),A(),Et(),Je(),ft(),xt(),tt(),vt()];function Zt(e={}){void 0===e.defaultIntegrations&&(e.defaultIntegrations=[...Kt]),void 0===e.release&&("string"===typeof __SENTRY_RELEASE__&&(e.release=__SENTRY_RELEASE__),Oe.SENTRY_RELEASE&&Oe.SENTRY_RELEASE.id&&(e.release=Oe.SENTRY_RELEASE.id)),void 0===e.autoSessionTracking&&(e.autoSessionTracking=!0),void 0===e.sendClientReports&&(e.sendClientReports=!0);const t={...e,stackParser:(0,N.vk)(e.stackParser||jt),integrations:c(e),transport:e.transport||(F()?Gt:Xt)};E(Pe,t),e.autoSessionTracking&&function(){if("undefined"===typeof Oe.document)return void(be&&r.vF.warn("Session tracking in non-browser environment with @sentry/browser is not supported."));(0,_.J0)({ignoreDuration:!0}),(0,_.J5)(),G((({from:e,to:t})=>{void 0!==e&&e!==t&&((0,_.J0)({ignoreDuration:!0}),(0,_.J5)())}))}()}const Qt=(e={},t=(0,u.BF)())=>{if(!Oe.document)return void(be&&r.vF.error("Global document not defined in showReportDialog call"));const{client:n,scope:o}=t.getStackTop(),i=e.dsn||n&&n.getDsn();if(!i)return void(be&&r.vF.error("DSN not configured for showReportDialog call"));o&&(e.user={...o.getUser(),...e.user}),e.eventId||(e.eventId=t.lastEventId());const a=Oe.document.createElement("script");a.async=!0,a.crossOrigin="anonymous",a.src=function(e,t){const n=k(e);if(!n)return"";const r=`${P(n)}embed/error-page/`;let o=`dsn=${O(n)}`;for(const i in t)if("dsn"!==i&&"onClose"!==i)if("user"===i){const e=t.user;if(!e)continue;e.name&&(o+=`&name=${encodeURIComponent(e.name)}`),e.email&&(o+=`&email=${encodeURIComponent(e.email)}`)}else o+=`&${encodeURIComponent(i)}=${encodeURIComponent(t[i])}`;return`${r}?${o}`}(i,e),e.onLoad&&(a.onload=e.onLoad);const{onClose:s}=e;if(s){const e=t=>{if("__sentry_reportdialog_closed__"===t.data)try{s()}finally{Oe.removeEventListener("message",e)}};Oe.addEventListener("message",e)}const l=Oe.document.head||Oe.document.body;l?l.appendChild(a):be&&r.vF.error("Not injecting report dialog. No injection point found in HTML")}},92967:(e,t,n)=>{"use strict";n.d(t,{U:()=>r});const r="production"},92220:(e,t,n)=>{"use strict";n.d(t,{T:()=>r});const r="undefined"===typeof __SENTRY_DEBUG__||__SENTRY_DEBUG__},42531:(e,t,n)=>{"use strict";n.d(t,{jB:()=>c,lG:()=>u,lb:()=>l});var r=n(16341),o=n(60690),i=n(17412),a=n(73816),s=n(92220);function u(){return(0,r.BY)("globalEventProcessors",(()=>[]))}function l(e){u().push(e)}function c(e,t,n,r=0){return new o.T2(((o,u)=>{const l=e[r];if(null===t||"function"!==typeof l)o(t);else{const f=l({...t},n);s.T&&l.id&&null===f&&i.vF.log(`Event processor "${l.id}" dropped event`),(0,a.Qg)(f)?f.then((t=>c(e,t,n,r+1).then(o))).then(null,u):c(e,f,n,r+1).then(o).then(null,u)}}))}},83741:(e,t,n)=>{"use strict";n.d(t,{Cp:()=>u,J0:()=>g,J5:()=>m,KU:()=>h,ZQ:()=>c,gV:()=>f,o5:()=>p,r:()=>l,v4:()=>d});var r=n(16341),o=n(92967),i=n(86641),a=n(7058),s=n(49475);function u(e,t){return(0,i.BF)().captureException(e,(0,s.li)(t))}function l(e,t){return(0,i.BF)().captureEvent(e,t)}function c(e,t){(0,i.BF)().addBreadcrumb(e,t)}function f(e){(0,i.BF)().setUser(e)}function d(...e){const t=(0,i.BF)();if(2===e.length){const[n,r]=e;return n?t.withScope((()=>(t.getStackTop().scope=n,r(n)))):t.withScope(r)}return t.withScope(e[0])}function h(){return(0,i.BF)().getClient()}function p(){return(0,i.BF)().getScope()}function g(e){const t=h(),n=(0,i.rm)(),s=p(),{release:u,environment:l=o.U}=t&&t.getOptions()||{},{userAgent:c}=r.OW.navigator||{},f=(0,a.fj)({release:u,environment:l,user:s.getUser()||n.getUser(),...c&&{userAgent:c},...e}),d=n.getSession();return d&&"ok"===d.status&&(0,a.qO)(d,{status:"exited"}),v(),n.setSession(f),s.setSession(f),f}function v(){const e=(0,i.rm)(),t=p(),n=t.getSession()||e.getSession();n&&(0,a.Vu)(n),y(),e.setSession(),t.setSession()}function y(){const e=(0,i.rm)(),t=p(),n=h(),r=t.getSession()||e.getSession();r&&n&&n.captureSession&&n.captureSession(r)}function m(e=!1){e?v():y()}},86641:(e,t,n)=>{"use strict";n.d(t,{BF:()=>m,rm:()=>b});var r=n(73816),o=n(6936),i=n(89297),a=n(17412),s=n(16341),u=n(92967),l=n(92220),c=n(22486),f=n(7058),d=n(57986);const h=parseFloat(d.M),p=100;class g{constructor(e,t,n,r=h){let o,i;this._version=r,t?o=t:(o=new c.HG,o.setClient(e)),n?i=n:(i=new c.HG,i.setClient(e)),this._stack=[{scope:o}],e&&this.bindClient(e),this._isolationScope=i}isOlderThan(e){return this._version(this.popScope(),e)),(e=>{throw this.popScope(),e})):(this.popScope(),n)}getClient(){return this.getStackTop().client}getScope(){return this.getStackTop().scope}getIsolationScope(){return this._isolationScope}getStack(){return this._stack}getStackTop(){return this._stack[this._stack.length-1]}captureException(e,t){const n=this._lastEventId=t&&t.event_id?t.event_id:(0,o.eJ)(),r=new Error("Sentry syntheticException");return this.getScope().captureException(e,{originalException:e,syntheticException:r,...t,event_id:n}),n}captureMessage(e,t,n){const r=this._lastEventId=n&&n.event_id?n.event_id:(0,o.eJ)(),i=new Error(e);return this.getScope().captureMessage(e,t,{originalException:e,syntheticException:i,...n,event_id:r}),r}captureEvent(e,t){const n=t&&t.event_id?t.event_id:(0,o.eJ)();return e.type||(this._lastEventId=n),this.getScope().captureEvent(e,{...t,event_id:n}),n}lastEventId(){return this._lastEventId}addBreadcrumb(e,t){const{scope:n,client:r}=this.getStackTop();if(!r)return;const{beforeBreadcrumb:o=null,maxBreadcrumbs:s=p}=r.getOptions&&r.getOptions()||{};if(s<=0)return;const u={timestamp:(0,i.lu)(),...e},l=o?(0,a.pq)((()=>o(u,t))):u;null!==l&&(r.emit&&r.emit("beforeAddBreadcrumb",l,t),n.addBreadcrumb(l,s))}setUser(e){this.getScope().setUser(e),this.getIsolationScope().setUser(e)}setTags(e){this.getScope().setTags(e),this.getIsolationScope().setTags(e)}setExtras(e){this.getScope().setExtras(e),this.getIsolationScope().setExtras(e)}setTag(e,t){this.getScope().setTag(e,t),this.getIsolationScope().setTag(e,t)}setExtra(e,t){this.getScope().setExtra(e,t),this.getIsolationScope().setExtra(e,t)}setContext(e,t){this.getScope().setContext(e,t),this.getIsolationScope().setContext(e,t)}configureScope(e){const{scope:t,client:n}=this.getStackTop();n&&e(t)}run(e){const t=y(this);try{e(this)}finally{y(t)}}getIntegration(e){const t=this.getClient();if(!t)return null;try{return t.getIntegration(e)}catch(n){return l.T&&a.vF.warn(`Cannot retrieve integration ${e.id} from the current Hub`),null}}startTransaction(e,t){const n=this._callExtensionMethod("startTransaction",e,t);if(l.T&&!n){this.getClient()?a.vF.warn("Tracing extension 'startTransaction' has not been added. Call 'addTracingExtensions' before calling 'init':\nSentry.addTracingExtensions();\nSentry.init({...});\n"):a.vF.warn("Tracing extension 'startTransaction' is missing. You should 'init' the SDK before calling 'startTransaction'")}return n}traceHeaders(){return this._callExtensionMethod("traceHeaders")}captureSession(e=!1){if(e)return this.endSession();this._sendSessionUpdate()}endSession(){const e=this.getStackTop().scope,t=e.getSession();t&&(0,f.Vu)(t),this._sendSessionUpdate(),e.setSession()}startSession(e){const{scope:t,client:n}=this.getStackTop(),{release:r,environment:o=u.U}=n&&n.getOptions()||{},{userAgent:i}=s.OW.navigator||{},a=(0,f.fj)({release:r,environment:o,user:t.getUser(),...i&&{userAgent:i},...e}),l=t.getSession&&t.getSession();return l&&"ok"===l.status&&(0,f.qO)(l,{status:"exited"}),this.endSession(),t.setSession(a),a}shouldSendDefaultPii(){const e=this.getClient(),t=e&&e.getOptions();return Boolean(t&&t.sendDefaultPii)}_sendSessionUpdate(){const{scope:e,client:t}=this.getStackTop(),n=e.getSession();n&&t&&t.captureSession&&t.captureSession(n)}_callExtensionMethod(e,...t){const n=v().__SENTRY__;if(n&&n.extensions&&"function"===typeof n.extensions[e])return n.extensions[e].apply(this,t);l.T&&a.vF.warn(`Extension method ${e} couldn't be found, doing nothing.`)}}function v(){return s.OW.__SENTRY__=s.OW.__SENTRY__||{extensions:{},hub:void 0},s.OW}function y(e){const t=v(),n=x(t);return S(t,e),n}function m(){const e=v();if(e.__SENTRY__&&e.__SENTRY__.acs){const t=e.__SENTRY__.acs.getCurrentHub();if(t)return t}return _(e)}function b(){return m().getIsolationScope()}function _(e=v()){return w(e)&&!x(e).isOlderThan(h)||S(e,new g),x(e)}function w(e){return!!(e&&e.__SENTRY__&&e.__SENTRY__.hub)}function x(e){return(0,s.BY)("hub",(()=>new g),e)}function S(e,t){if(!e)return!1;return(e.__SENTRY__=e.__SENTRY__||{}).hub=t,!0}},22486:(e,t,n)=>{"use strict";n.d(t,{HG:()=>f,m6:()=>d});var r=n(73816),o=n(89297),i=n(6936),a=n(17412),s=n(42531),u=n(7058),l=n(41535);let c;class f{constructor(){this._notifyingListeners=!1,this._scopeListeners=[],this._eventProcessors=[],this._breadcrumbs=[],this._attachments=[],this._user={},this._tags={},this._extra={},this._contexts={},this._sdkProcessingMetadata={},this._propagationContext=h()}static clone(e){return e?e.clone():new f}clone(){const e=new f;return e._breadcrumbs=[...this._breadcrumbs],e._tags={...this._tags},e._extra={...this._extra},e._contexts={...this._contexts},e._user=this._user,e._level=this._level,e._span=this._span,e._session=this._session,e._transactionName=this._transactionName,e._fingerprint=this._fingerprint,e._eventProcessors=[...this._eventProcessors],e._requestSession=this._requestSession,e._attachments=[...this._attachments],e._sdkProcessingMetadata={...this._sdkProcessingMetadata},e._propagationContext={...this._propagationContext},e._client=this._client,e}setClient(e){this._client=e}getClient(){return this._client}addScopeListener(e){this._scopeListeners.push(e)}addEventProcessor(e){return this._eventProcessors.push(e),this}setUser(e){return this._user=e||{email:void 0,id:void 0,ip_address:void 0,segment:void 0,username:void 0},this._session&&(0,u.qO)(this._session,{user:e}),this._notifyScopeListeners(),this}getUser(){return this._user}getRequestSession(){return this._requestSession}setRequestSession(e){return this._requestSession=e,this}setTags(e){return this._tags={...this._tags,...e},this._notifyScopeListeners(),this}setTag(e,t){return this._tags={...this._tags,[e]:t},this._notifyScopeListeners(),this}setExtras(e){return this._extra={...this._extra,...e},this._notifyScopeListeners(),this}setExtra(e,t){return this._extra={...this._extra,[e]:t},this._notifyScopeListeners(),this}setFingerprint(e){return this._fingerprint=e,this._notifyScopeListeners(),this}setLevel(e){return this._level=e,this._notifyScopeListeners(),this}setTransactionName(e){return this._transactionName=e,this._notifyScopeListeners(),this}setContext(e,t){return null===t?delete this._contexts[e]:this._contexts[e]=t,this._notifyScopeListeners(),this}setSpan(e){return this._span=e,this._notifyScopeListeners(),this}getSpan(){return this._span}getTransaction(){const e=this._span;return e&&e.transaction}setSession(e){return e?this._session=e:delete this._session,this._notifyScopeListeners(),this}getSession(){return this._session}update(e){if(!e)return this;const t="function"===typeof e?e(this):e;if(t instanceof f){const e=t.getScopeData();this._tags={...this._tags,...e.tags},this._extra={...this._extra,...e.extra},this._contexts={...this._contexts,...e.contexts},e.user&&Object.keys(e.user).length&&(this._user=e.user),e.level&&(this._level=e.level),e.fingerprint.length&&(this._fingerprint=e.fingerprint),t.getRequestSession()&&(this._requestSession=t.getRequestSession()),e.propagationContext&&(this._propagationContext=e.propagationContext)}else if((0,r.Qd)(t)){const t=e;this._tags={...this._tags,...t.tags},this._extra={...this._extra,...t.extra},this._contexts={...this._contexts,...t.contexts},t.user&&(this._user=t.user),t.level&&(this._level=t.level),t.fingerprint&&(this._fingerprint=t.fingerprint),t.requestSession&&(this._requestSession=t.requestSession),t.propagationContext&&(this._propagationContext=t.propagationContext)}return this}clear(){return this._breadcrumbs=[],this._tags={},this._extra={},this._user={},this._contexts={},this._level=void 0,this._transactionName=void 0,this._fingerprint=void 0,this._requestSession=void 0,this._span=void 0,this._session=void 0,this._notifyScopeListeners(),this._attachments=[],this._propagationContext=h(),this}addBreadcrumb(e,t){const n="number"===typeof t?t:100;if(n<=0)return this;const r={timestamp:(0,o.lu)(),...e},i=this._breadcrumbs;return i.push(r),this._breadcrumbs=i.length>n?i.slice(-n):i,this._notifyScopeListeners(),this}getLastBreadcrumb(){return this._breadcrumbs[this._breadcrumbs.length-1]}clearBreadcrumbs(){return this._breadcrumbs=[],this._notifyScopeListeners(),this}addAttachment(e){return this._attachments.push(e),this}getAttachments(){return this.getScopeData().attachments}clearAttachments(){return this._attachments=[],this}getScopeData(){const{_breadcrumbs:e,_attachments:t,_contexts:n,_tags:r,_extra:o,_user:i,_level:a,_fingerprint:s,_eventProcessors:u,_propagationContext:l,_sdkProcessingMetadata:c,_transactionName:f,_span:d}=this;return{breadcrumbs:e,attachments:t,contexts:n,tags:r,extra:o,user:i,level:a,fingerprint:s||[],eventProcessors:u,propagationContext:l,sdkProcessingMetadata:c,transactionName:f,span:d}}applyToEvent(e,t={},n=[]){(0,l.e2)(e,this.getScopeData());const r=[...n,...(0,s.lG)(),...this._eventProcessors];return(0,s.jB)(r,e,t)}setSDKProcessingMetadata(e){return this._sdkProcessingMetadata={...this._sdkProcessingMetadata,...e},this}setPropagationContext(e){return this._propagationContext=e,this}getPropagationContext(){return this._propagationContext}captureException(e,t){const n=t&&t.event_id?t.event_id:(0,i.eJ)();if(!this._client)return a.vF.warn("No client configured on scope - will not capture exception!"),n;const r=new Error("Sentry syntheticException");return this._client.captureException(e,{originalException:e,syntheticException:r,...t,event_id:n},this),n}captureMessage(e,t,n){const r=n&&n.event_id?n.event_id:(0,i.eJ)();if(!this._client)return a.vF.warn("No client configured on scope - will not capture message!"),r;const o=new Error(e);return this._client.captureMessage(e,t,{originalException:e,syntheticException:o,...n,event_id:r},this),r}captureEvent(e,t){const n=t&&t.event_id?t.event_id:(0,i.eJ)();return this._client?(this._client.captureEvent(e,{...t,event_id:n},this),n):(a.vF.warn("No client configured on scope - will not capture event!"),n)}_notifyScopeListeners(){this._notifyingListeners||(this._notifyingListeners=!0,this._scopeListeners.forEach((e=>{e(this)})),this._notifyingListeners=!1)}}function d(){return c||(c=new f),c}function h(){return{traceId:(0,i.eJ)(),spanId:(0,i.eJ)().substring(16)}}},7058:(e,t,n)=>{"use strict";n.d(t,{Vu:()=>u,fj:()=>a,qO:()=>s});var r=n(89297),o=n(6936),i=n(68411);function a(e){const t=(0,r.zf)(),n={sid:(0,o.eJ)(),init:!0,timestamp:t,started:t,duration:0,status:"ok",errors:0,ignoreDuration:!1,toJSON:()=>function(e){return(0,i.Ce)({sid:`${e.sid}`,init:e.init,started:new Date(1e3*e.started).toISOString(),timestamp:new Date(1e3*e.timestamp).toISOString(),status:e.status,errors:e.errors,did:"number"===typeof e.did||"string"===typeof e.did?`${e.did}`:void 0,duration:e.duration,abnormal_mechanism:e.abnormal_mechanism,attrs:{release:e.release,environment:e.environment,ip_address:e.ipAddress,user_agent:e.userAgent}})}(n)};return e&&s(n,e),n}function s(e,t={}){if(t.user&&(!e.ipAddress&&t.user.ip_address&&(e.ipAddress=t.user.ip_address),e.did||t.did||(e.did=t.user.id||t.user.email||t.user.username)),e.timestamp=t.timestamp||(0,r.zf)(),t.abnormal_mechanism&&(e.abnormal_mechanism=t.abnormal_mechanism),t.ignoreDuration&&(e.ignoreDuration=t.ignoreDuration),t.sid&&(e.sid=32===t.sid.length?t.sid:(0,o.eJ)()),void 0!==t.init&&(e.init=t.init),!e.did&&t.did&&(e.did=`${t.did}`),"number"===typeof t.started&&(e.started=t.started),e.ignoreDuration)e.duration=void 0;else if("number"===typeof t.duration)e.duration=t.duration;else{const t=e.timestamp-e.started;e.duration=t>=0?t:0}t.release&&(e.release=t.release),t.environment&&(e.environment=t.environment),!e.ipAddress&&t.ipAddress&&(e.ipAddress=t.ipAddress),!e.userAgent&&t.userAgent&&(e.userAgent=t.userAgent),"number"===typeof t.errors&&(e.errors=t.errors),t.status&&(e.status=t.status)}function u(e,t){let n={};t?n={status:t}:"ok"===e.status&&(n={status:"exited"}),s(e,n)}},75452:(e,t,n)=>{"use strict";n.d(t,{k:()=>l,l:()=>u});var r=n(68411),o=n(92967),i=n(83741),a=n(34636),s=n(3445);function u(e,t,n){const i=t.getOptions(),{publicKey:a}=t.getDsn()||{},{segment:s}=n&&n.getUser()||{},u=(0,r.Ce)({environment:i.environment||o.U,release:i.release,user_segment:s,public_key:a,trace_id:e});return t.emit&&t.emit("createDsc",u),u}function l(e){const t=(0,i.KU)();if(!t)return{};const n=u((0,s.et)(e).trace_id||"",t,(0,i.o5)()),r=(0,a.z)(e);if(!r)return n;const o=r&&r._frozenDynamicSamplingContext;if(o)return o;const{sampleRate:l,source:c}=r.metadata;null!=l&&(n.sample_rate=`${l}`);const f=(0,s.et)(r);return c&&"url"!==c&&(n.transaction=f.description),n.sampled=String((0,s.pK)(r)),t.emit&&t.emit("createDsc",n),n}},41535:(e,t,n)=>{"use strict";n.d(t,{Rg:()=>l,e2:()=>u});var r=n(68411),o=n(6936),i=n(75452),a=n(34636),s=n(3445);function u(e,t){const{fingerprint:n,span:u,breadcrumbs:l,sdkProcessingMetadata:c}=t;!function(e,t){const{extra:n,tags:o,user:i,contexts:a,level:s,transactionName:u}=t,l=(0,r.Ce)(n);l&&Object.keys(l).length&&(e.extra={...l,...e.extra});const c=(0,r.Ce)(o);c&&Object.keys(c).length&&(e.tags={...c,...e.tags});const f=(0,r.Ce)(i);f&&Object.keys(f).length&&(e.user={...f,...e.user});const d=(0,r.Ce)(a);d&&Object.keys(d).length&&(e.contexts={...d,...e.contexts});s&&(e.level=s);u&&(e.transaction=u)}(e,t),u&&function(e,t){e.contexts={trace:(0,s.kX)(t),...e.contexts};const n=(0,a.z)(t);if(n){e.sdkProcessingMetadata={dynamicSamplingContext:(0,i.k)(t),...e.sdkProcessingMetadata};const r=(0,s.et)(n).description;r&&(e.tags={transaction:r,...e.tags})}}(e,u),function(e,t){e.fingerprint=e.fingerprint?(0,o.k9)(e.fingerprint):[],t&&(e.fingerprint=e.fingerprint.concat(t));e.fingerprint&&!e.fingerprint.length&&delete e.fingerprint}(e,n),function(e,t){const n=[...e.breadcrumbs||[],...t];e.breadcrumbs=n.length?n:void 0}(e,l),function(e,t){e.sdkProcessingMetadata={...e.sdkProcessingMetadata,...t}}(e,c)}function l(e,t){const{extra:n,tags:r,user:o,contexts:i,level:a,sdkProcessingMetadata:s,breadcrumbs:u,fingerprint:l,eventProcessors:f,attachments:d,propagationContext:h,transactionName:p,span:g}=t;c(e,"extra",n),c(e,"tags",r),c(e,"user",o),c(e,"contexts",i),c(e,"sdkProcessingMetadata",s),a&&(e.level=a),p&&(e.transactionName=p),g&&(e.span=g),u.length&&(e.breadcrumbs=[...e.breadcrumbs,...u]),l.length&&(e.fingerprint=[...e.fingerprint,...l]),f.length&&(e.eventProcessors=[...e.eventProcessors,...f]),d.length&&(e.attachments=[...e.attachments,...d]),e.propagationContext={...e.propagationContext,...h}}function c(e,t,n){if(n&&Object.keys(n).length){e[t]={...e[t]};for(const r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[t][r]=n[r])}}},34636:(e,t,n)=>{"use strict";function r(e){return e.transaction}n.d(t,{z:()=>r})},49475:(e,t,n)=>{"use strict";n.d(t,{li:()=>g,mG:()=>h});var r=n(6936),o=n(89297),i=n(39653),a=n(16341),s=n(42181),u=n(92967),l=n(42531),c=n(22486),f=n(41535),d=n(3445);function h(e,t,n,h,g,v){const{normalizeDepth:y=3,normalizeMaxBreadth:m=1e3}=e,b={...t,event_id:t.event_id||n.event_id||(0,r.eJ)(),timestamp:t.timestamp||(0,o.lu)()},_=n.integrations||e.integrations.map((e=>e.name));!function(e,t){const{environment:n,release:r,dist:o,maxValueLength:a=250}=t;"environment"in e||(e.environment="environment"in t?n:u.U);void 0===e.release&&void 0!==r&&(e.release=r);void 0===e.dist&&void 0!==o&&(e.dist=o);e.message&&(e.message=(0,i.xv)(e.message,a));const s=e.exception&&e.exception.values&&e.exception.values[0];s&&s.value&&(s.value=(0,i.xv)(s.value,a));const l=e.request;l&&l.url&&(l.url=(0,i.xv)(l.url,a))}(b,e),function(e,t){t.length>0&&(e.sdk=e.sdk||{},e.sdk.integrations=[...e.sdk.integrations||[],...t])}(b,_),void 0===t.type&&function(e,t){const n=a.OW._sentryDebugIds;if(!n)return;let r;const o=p.get(t);o?r=o:(r=new Map,p.set(t,r));const i=Object.keys(n).reduce(((e,o)=>{let i;const a=r.get(o);a?i=a:(i=t(o),r.set(o,i));for(let t=i.length-1;t>=0;t--){const r=i[t];if(r.filename){e[r.filename]=n[o];break}}return e}),{});try{e.exception.values.forEach((e=>{e.stacktrace.frames.forEach((e=>{e.filename&&(e.debug_id=i[e.filename])}))}))}catch(s){}}(b,e.stackParser);const w=function(e,t){if(!t)return e;const n=e?e.clone():new c.HG;return n.update(t),n}(h,n.captureContext);n.mechanism&&(0,r.M6)(b,n.mechanism);const x=g&&g.getEventProcessors?g.getEventProcessors():[],S=(0,c.m6)().getScopeData();if(v){const e=v.getScopeData();(0,f.Rg)(S,e)}if(w){const e=w.getScopeData();(0,f.Rg)(S,e)}const A=[...n.attachments||[],...S.attachments];A.length&&(n.attachments=A),(0,f.e2)(b,S);const E=[...x,...(0,l.lG)(),...S.eventProcessors];return(0,l.jB)(E,b,n).then((e=>(e&&function(e){const t={};try{e.exception.values.forEach((e=>{e.stacktrace.frames.forEach((e=>{e.debug_id&&(e.abs_path?t[e.abs_path]=e.debug_id:e.filename&&(t[e.filename]=e.debug_id),delete e.debug_id)}))}))}catch(r){}if(0===Object.keys(t).length)return;e.debug_meta=e.debug_meta||{},e.debug_meta.images=e.debug_meta.images||[];const n=e.debug_meta.images;Object.keys(t).forEach((e=>{n.push({type:"sourcemap",code_file:e,debug_id:t[e]})}))}(e),"number"===typeof y&&y>0?function(e,t,n){if(!e)return null;const r={...e,...e.breadcrumbs&&{breadcrumbs:e.breadcrumbs.map((e=>({...e,...e.data&&{data:(0,s.S8)(e.data,t,n)}})))},...e.user&&{user:(0,s.S8)(e.user,t,n)},...e.contexts&&{contexts:(0,s.S8)(e.contexts,t,n)},...e.extra&&{extra:(0,s.S8)(e.extra,t,n)}};e.contexts&&e.contexts.trace&&r.contexts&&(r.contexts.trace=e.contexts.trace,e.contexts.trace.data&&(r.contexts.trace.data=(0,s.S8)(e.contexts.trace.data,t,n)));e.spans&&(r.spans=e.spans.map((e=>{const r=(0,d.et)(e).data;return r&&(e.data=(0,s.S8)(r,t,n)),e})));return r}(e,y,m):e)))}const p=new WeakMap;function g(e){if(e)return function(e){return e instanceof c.HG||"function"===typeof e}(e)||function(e){return Object.keys(e).some((e=>v.includes(e)))}(e)?{captureContext:e}:e}const v=["user","level","extra","contexts","tags","fingerprint","requestSession","propagationContext"]},40601:(e,t,n)=>{"use strict";n.d(t,{K:()=>o});var r=n(57986);function o(e,t,n=[t],o="npm"){const i=e._metadata||{};i.sdk||(i.sdk={name:`sentry.javascript.${t}`,packages:n.map((e=>({name:`${o}:@sentry/${e}`,version:r.M}))),version:r.M}),e._metadata=i}},3445:(e,t,n)=>{"use strict";n.d(t,{et:()=>a,kX:()=>i,pK:()=>s});var r=n(68411);const o=1;function i(e){const{spanId:t,traceId:n}=e.spanContext(),{data:o,op:i,parent_span_id:s,status:u,tags:l,origin:c}=a(e);return(0,r.Ce)({data:o,op:i,parent_span_id:s,span_id:t,status:u,tags:l,trace_id:n,origin:c})}function a(e){return function(e){return"function"===typeof e.getSpanJSON}(e)?e.getSpanJSON():"function"===typeof e.toJSON?e.toJSON():{}}function s(e){const{traceFlags:t}=e.spanContext();return Boolean(t&o)}},57986:(e,t,n)=>{"use strict";n.d(t,{M:()=>r});const r="7.112.2"},41358:(e,t,n)=>{"use strict";n.d(t,{tH:()=>c});var r=n(83741),o=n(10390),i=n(73816),a=n(17412),s=(n(4146),n(96540));const u="undefined"===typeof __SENTRY_DEBUG__||__SENTRY_DEBUG__;const l={componentStack:null,error:null,eventId:null};class c extends s.Component{constructor(e){super(e),c.prototype.__init.call(this),this.state=l,this._openFallbackReportDialog=!0;const t=(0,r.KU)();t&&t.on&&e.showDialog&&(this._openFallbackReportDialog=!1,t.on("afterSendEvent",(t=>{t.type||t.event_id!==this._lastEventId||(0,o.mn)({...e.dialogOptions,eventId:this._lastEventId})})))}componentDidCatch(e,{componentStack:t}){const{beforeCapture:n,onError:a,showDialog:u,dialogOptions:l}=this.props;(0,r.v4)((c=>{if(function(e){const t=e.match(/^([^.]+)/);return null!==t&&parseInt(t[0])>=17}(s.version)&&(0,i.bJ)(e)){const n=new Error(e.message);n.name=`React ErrorBoundary ${e.name}`,n.stack=t,function(e,t){const n=new WeakMap;!function e(t,r){if(!n.has(t))return t.cause?(n.set(t,!0),e(t.cause,r)):void(t.cause=r)}(e,t)}(e,n)}n&&n(c,e,t);const f=(0,r.Cp)(e,{captureContext:{contexts:{react:{componentStack:t}}},mechanism:{handled:!!this.props.fallback}});a&&a(e,t,f),u&&(this._lastEventId=f,this._openFallbackReportDialog&&(0,o.mn)({...l,eventId:f})),this.setState({error:e,componentStack:t,eventId:f})}))}componentDidMount(){const{onMount:e}=this.props;e&&e()}componentWillUnmount(){const{error:e,componentStack:t,eventId:n}=this.state,{onUnmount:r}=this.props;r&&r(e,t,n)}__init(){this.resetErrorBoundary=()=>{const{onReset:e}=this.props,{error:t,componentStack:n,eventId:r}=this.state;e&&e(t,n,r),this.setState(l)}}render(){const{fallback:e,children:t}=this.props,n=this.state;if(n.error){let t;return t="function"===typeof e?e({error:n.error,componentStack:n.componentStack,resetError:this.resetErrorBoundary,eventId:n.eventId}):e,s.isValidElement(t)?t:(e&&u&&a.vF.warn("fallback did not produce a valid ReactElement"),null)}return"function"===typeof t?t():t}}},12968:(e,t,n)=>{"use strict";n.d(t,{T:()=>i});var r=n(10390),o=n(40601);function i(e){const t={...e};(0,o.K)(t,"react"),(0,r.Ts)(t)}},19766:(e,t,n)=>{"use strict";n.d(t,{$N:()=>u,Hd:()=>a,xE:()=>l});var r=n(73816);const o=(0,n(16341).VZ)(),i=80;function a(e,t={}){if(!e)return"";try{let n=e;const r=5,o=[];let a=0,u=0;const l=" > ",c=l.length;let f;const d=Array.isArray(t)?t:t.keyAttrs,h=!Array.isArray(t)&&t.maxStringLength||i;for(;n&&a++1&&u+o.length*c+f.length>=h));)o.push(f),u+=f.length,n=n.parentNode;return o.reverse().join(l)}catch(n){return""}}function s(e,t){const n=e,i=[];let a,s,u,l,c;if(!n||!n.tagName)return"";if(o.HTMLElement&&n instanceof HTMLElement&&n.dataset&&n.dataset.sentryComponent)return n.dataset.sentryComponent;i.push(n.tagName.toLowerCase());const f=t&&t.length?t.filter((e=>n.getAttribute(e))).map((e=>[e,n.getAttribute(e)])):null;if(f&&f.length)f.forEach((e=>{i.push(`[${e[0]}="${e[1]}"]`)}));else if(n.id&&i.push(`#${n.id}`),a=n.className,a&&(0,r.Kg)(a))for(s=a.split(/\s+/),c=0;c{"use strict";n.d(t,{T:()=>r});const r="undefined"===typeof __SENTRY_DEBUG__||__SENTRY_DEBUG__},73816:(e,t,n)=>{"use strict";n.d(t,{BD:()=>s,Kg:()=>l,L2:()=>_,NF:()=>c,Qd:()=>d,Qg:()=>v,T2:()=>a,W6:()=>u,bJ:()=>o,gd:()=>g,mE:()=>y,sO:()=>f,tH:()=>b,vq:()=>p,xH:()=>h,yr:()=>m});const r=Object.prototype.toString;function o(e){switch(r.call(e)){case"[object Error]":case"[object Exception]":case"[object DOMException]":return!0;default:return b(e,Error)}}function i(e,t){return r.call(e)===`[object ${t}]`}function a(e){return i(e,"ErrorEvent")}function s(e){return i(e,"DOMError")}function u(e){return i(e,"DOMException")}function l(e){return i(e,"String")}function c(e){return"object"===typeof e&&null!==e&&"__sentry_template_string__"in e&&"__sentry_template_values__"in e}function f(e){return null===e||c(e)||"object"!==typeof e&&"function"!==typeof e}function d(e){return i(e,"Object")}function h(e){return"undefined"!==typeof Event&&b(e,Event)}function p(e){return"undefined"!==typeof Element&&b(e,Element)}function g(e){return i(e,"RegExp")}function v(e){return Boolean(e&&e.then&&"function"===typeof e.then)}function y(e){return d(e)&&"nativeEvent"in e&&"preventDefault"in e&&"stopPropagation"in e}function m(e){return"number"===typeof e&&e!==e}function b(e,t){try{return e instanceof t}catch(n){return!1}}function _(e){return!("object"!==typeof e||null===e||!e.__isVue&&!e._isVue)}},17412:(e,t,n)=>{"use strict";n.d(t,{Ow:()=>i,Z9:()=>a,pq:()=>s,vF:()=>u});var r=n(5644),o=n(16341);const i=["debug","info","warn","error","log","assert","trace"],a={};function s(e){if(!("console"in o.OW))return e();const t=o.OW.console,n={},r=Object.keys(a);r.forEach((e=>{const r=a[e];n[e]=t[e],t[e]=r}));try{return e()}finally{r.forEach((e=>{t[e]=n[e]}))}}const u=function(){let e=!1;const t={enable:()=>{e=!0},disable:()=>{e=!1},isEnabled:()=>e};return r.T?i.forEach((n=>{t[n]=(...t)=>{e&&s((()=>{o.OW.console[n](`Sentry Logger [${n}]:`,...t)}))}})):i.forEach((e=>{t[e]=()=>{}})),t}()},6936:(e,t,n)=>{"use strict";n.d(t,{$X:()=>s,GR:()=>c,M6:()=>l,eJ:()=>i,gO:()=>u,k9:()=>f});var r=n(68411),o=n(16341);function i(){const e=o.OW,t=e.crypto||e.msCrypto;let n=()=>16*Math.random();try{if(t&&t.randomUUID)return t.randomUUID().replace(/-/g,"");t&&t.getRandomValues&&(n=()=>{const e=new Uint8Array(1);return t.getRandomValues(e),e[0]})}catch(r){}return([1e7]+1e3+4e3+8e3+1e11).replace(/[018]/g,(e=>(e^(15&n())>>e/4).toString(16)))}function a(e){return e.exception&&e.exception.values?e.exception.values[0]:void 0}function s(e){const{message:t,event_id:n}=e;if(t)return t;const r=a(e);return r?r.type&&r.value?`${r.type}: ${r.value}`:r.type||r.value||n||"":n||""}function u(e,t,n){const r=e.exception=e.exception||{},o=r.values=r.values||[],i=o[0]=o[0]||{};i.value||(i.value=t||""),i.type||(i.type=n||"Error")}function l(e,t){const n=a(e);if(!n)return;const r=n.mechanism;if(n.mechanism={type:"generic",handled:!0,...r,...t},t&&"data"in t){const e={...r&&r.data,...t.data};n.mechanism.data=e}}function c(e){if(e&&e.__sentry_captured__)return!0;try{(0,r.my)(e,"__sentry_captured__",!0)}catch(t){}return!1}function f(e){return Array.isArray(e)?e:[e]}},42181:(e,t,n)=>{"use strict";n.d(t,{S8:()=>a,cd:()=>s});var r=n(73816);var o=n(68411),i=n(83917);function a(e,t=100,n=1/0){try{return u("",e,t,n)}catch(r){return{ERROR:`**non-serializable** (${r})`}}}function s(e,t=3,n=102400){const r=a(e,t);return o=r,function(e){return~-encodeURI(e).split(/%..|./).length}(JSON.stringify(o))>n?s(e,t-1,n):r;var o}function u(e,t,a=1/0,s=1/0,l=function(){const e="function"===typeof WeakSet,t=e?new WeakSet:[];return[function(n){if(e)return!!t.has(n)||(t.add(n),!1);for(let e=0;e=s){g[n]="[MaxProperties ~]";break}const e=y[n];g[n]=u(n,e,h-1,s,l),v++}return f(t),g}},68411:(e,t,n)=>{"use strict";n.d(t,{Ce:()=>y,GS:()=>u,HF:()=>v,W4:()=>h,my:()=>l,pO:()=>c,sp:()=>f,u4:()=>d});var r=n(19766),o=n(5644),i=n(73816),a=n(17412),s=n(39653);function u(e,t,n){if(!(t in e))return;const r=e[t],o=n(r);"function"===typeof o&&c(o,r),e[t]=o}function l(e,t,n){try{Object.defineProperty(e,t,{value:n,writable:!0,configurable:!0})}catch(r){o.T&&a.vF.log(`Failed to add non-enumerable property "${t}" to object`,e)}}function c(e,t){try{const n=t.prototype||{};e.prototype=t.prototype=n,l(e,"__sentry_original__",t)}catch(n){}}function f(e){return e.__sentry_original__}function d(e){return Object.keys(e).map((t=>`${encodeURIComponent(t)}=${encodeURIComponent(e[t])}`)).join("&")}function h(e){if((0,i.bJ)(e))return{message:e.message,name:e.name,stack:e.stack,...g(e)};if((0,i.xH)(e)){const t={type:e.type,target:p(e.target),currentTarget:p(e.currentTarget),...g(e)};return"undefined"!==typeof CustomEvent&&(0,i.tH)(e,CustomEvent)&&(t.detail=e.detail),t}return e}function p(e){try{return(0,i.vq)(e)?(0,r.Hd)(e):Object.prototype.toString.call(e)}catch(t){return""}}function g(e){if("object"===typeof e&&null!==e){const t={};for(const n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t}return{}}function v(e,t=40){const n=Object.keys(h(e));if(n.sort(),!n.length)return"[object has no keys]";if(n[0].length>=t)return(0,s.xv)(n[0],t);for(let r=n.length;r>0;r--){const e=n.slice(0,r).join(", ");if(!(e.length>t))return r===n.length?e:(0,s.xv)(e,t)}return""}function y(e){return m(e,new Map)}function m(e,t){if(function(e){if(!(0,i.Qd)(e))return!1;try{const t=Object.getPrototypeOf(e).constructor.name;return!t||"Object"===t}catch(t){return!0}}(e)){const n=t.get(e);if(void 0!==n)return n;const r={};t.set(e,r);for(const o of Object.keys(e))"undefined"!==typeof e[o]&&(r[o]=m(e[o],t));return r}if(Array.isArray(e)){const n=t.get(e);if(void 0!==n)return n;const r=[];return t.set(e,r),e.forEach((e=>{r.push(m(e,t))})),r}return e}},83917:(e,t,n)=>{"use strict";n.d(t,{gd:()=>a,qQ:()=>l,vk:()=>s});const r=50,o=/\(error: (.*)\)/,i=/captureMessage|captureException/;function a(...e){const t=e.sort(((e,t)=>e[0]-t[0])).map((e=>e[1]));return(e,n=0)=>{const a=[],s=e.split("\n");for(let i=n;i1024)continue;const n=o.test(e)?e.replace(o,"$1"):e;if(!n.match(/\S*Error: /)){for(const e of t){const t=e(n);if(t){a.push(t);break}}if(a.length>=r)break}}return function(e){if(!e.length)return[];const t=Array.from(e);/sentryWrapped/.test(t[t.length-1].function||"")&&t.pop();t.reverse(),i.test(t[t.length-1].function||"")&&(t.pop(),i.test(t[t.length-1].function||"")&&t.pop());return t.slice(0,r).map((e=>({...e,filename:e.filename||t[t.length-1].filename,function:e.function||"?"})))}(a)}}function s(e){return Array.isArray(e)?a(...e):e}const u="";function l(e){try{return e&&"function"===typeof e&&e.name||u}catch(t){return u}}},39653:(e,t,n)=>{"use strict";n.d(t,{Xr:()=>a,gt:()=>i,xv:()=>o});var r=n(73816);function o(e,t=0){return"string"!==typeof e||0===t||e.length<=t?e:`${e.slice(0,t)}...`}function i(e,t){if(!Array.isArray(e))return"";const n=[];for(let i=0;ifunction(e,t,n=!1){return!!(0,r.Kg)(e)&&((0,r.gd)(t)?t.test(e):!!(0,r.Kg)(t)&&(n?e===t:e.includes(t)))}(e,t,n)))}},60690:(e,t,n)=>{"use strict";n.d(t,{T2:()=>s,XW:()=>i,xg:()=>a});var r,o=n(73816);function i(e){return new s((t=>{t(e)}))}function a(e){return new s(((t,n)=>{n(e)}))}!function(e){e[e.PENDING=0]="PENDING";e[e.RESOLVED=1]="RESOLVED";e[e.REJECTED=2]="REJECTED"}(r||(r={}));class s{constructor(e){s.prototype.__init.call(this),s.prototype.__init2.call(this),s.prototype.__init3.call(this),s.prototype.__init4.call(this),this._state=r.PENDING,this._handlers=[];try{e(this._resolve,this._reject)}catch(t){this._reject(t)}}then(e,t){return new s(((n,r)=>{this._handlers.push([!1,t=>{if(e)try{n(e(t))}catch(o){r(o)}else n(t)},e=>{if(t)try{n(t(e))}catch(o){r(o)}else r(e)}]),this._executeHandlers()}))}catch(e){return this.then((e=>e),e)}finally(e){return new s(((t,n)=>{let r,o;return this.then((t=>{o=!1,r=t,e&&e()}),(t=>{o=!0,r=t,e&&e()})).then((()=>{o?n(r):t(r)}))}))}__init(){this._resolve=e=>{this._setResult(r.RESOLVED,e)}}__init2(){this._reject=e=>{this._setResult(r.REJECTED,e)}}__init3(){this._setResult=(e,t)=>{this._state===r.PENDING&&((0,o.Qg)(t)?t.then(this._resolve,this._reject):(this._state=e,this._value=t,this._executeHandlers()))}}__init4(){this._executeHandlers=()=>{if(this._state===r.PENDING)return;const e=this._handlers.slice();this._handlers=[],e.forEach((e=>{e[0]||(this._state===r.RESOLVED&&e[1](this._value),this._state===r.REJECTED&&e[2](this._value),e[0]=!0)}))}}}},89297:(e,t,n)=>{"use strict";n.d(t,{lu:()=>i,zf:()=>a});var r=n(16341);const o=1e3;function i(){return Date.now()/o}const a=function(){const{performance:e}=r.OW;if(!e||!e.now)return i;const t=Date.now()-e.now(),n=void 0==e.timeOrigin?t:e.timeOrigin;return()=>(n+e.now())/o}();let s;(()=>{const{performance:e}=r.OW;if(!e||!e.now)return void(s="none");const t=36e5,n=e.now(),o=Date.now(),i=e.timeOrigin?Math.abs(e.timeOrigin+n-o):t,a=i{"use strict";function r(e){return e&&e.Math==Math?e:void 0}n.d(t,{BY:()=>a,OW:()=>o,VZ:()=>i});const o="object"==typeof globalThis&&r(globalThis)||"object"==typeof window&&r(window)||"object"==typeof self&&r(self)||"object"==typeof n.g&&r(n.g)||function(){return this}()||{};function i(){return o}function a(e,t,n){const r=n||o,i=r.__SENTRY__=r.__SENTRY__||{};return i[e]||(i[e]=t())}},50402:(e,t,n)=>{"use strict";function r(){return r=Object.assign||function(e){for(var t=1;th,default:()=>p,get:()=>o,responsive:()=>d});var o=function(e,t,n,r,o){for(t=t&&t.split?t.split("."):[t],r=0;r=0)return o(e,t,t);var n=Math.abs(t),r=o(e,n,n);return"string"===typeof r?"-"+r:-1*r},f=["margin","marginTop","marginRight","marginBottom","marginLeft","marginX","marginY","top","bottom","left","right"].reduce((function(e,t){var n;return r({},e,((n={})[t]=c,n))}),{}),d=function(e){return function(t){var n={},r=o(t,"breakpoints",i),a=[null].concat(r.map((function(e){return"@media screen and (min-width: "+e+")"})));for(var s in e){var u="function"===typeof e[s]?e[s](t):e[s];if(null!=u)if(Array.isArray(u))for(var l=0;l{"use strict";var r=n(70453),o=n(10487),i=o(r("String.prototype.indexOf"));e.exports=function(e,t){var n=r(e,!!t);return"function"===typeof n&&i(e,".prototype.")>-1?o(n):n}},10487:(e,t,n)=>{"use strict";var r=n(66743),o=n(70453),i=n(96897),a=n(69675),s=o("%Function.prototype.apply%"),u=o("%Function.prototype.call%"),l=o("%Reflect.apply%",!0)||r.call(u,s),c=n(30655),f=o("%Math.max%");e.exports=function(e){if("function"!==typeof e)throw new a("a function is required");var t=l(r,u,arguments);return i(t,1+f(0,e.length-(arguments.length-1)),!0)};var d=function(){return l(r,s,arguments)};c?c(e.exports,"apply",{value:d}):e.exports.apply=d},92151:e=>{var t={utf8:{stringToBytes:function(e){return t.bin.stringToBytes(unescape(encodeURIComponent(e)))},bytesToString:function(e){return decodeURIComponent(escape(t.bin.bytesToString(e)))}},bin:{stringToBytes:function(e){for(var t=[],n=0;n{"use strict";function r(e){var t,n,o="";if("string"==typeof e||"number"==typeof e)o+=e;else if("object"==typeof e)if(Array.isArray(e))for(t=0;to,default:()=>i});const i=o},3939:e=>{!function(){var t="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",n={rotl:function(e,t){return e<>>32-t},rotr:function(e,t){return e<<32-t|e>>>t},endian:function(e){if(e.constructor==Number)return 16711935&n.rotl(e,8)|4278255360&n.rotl(e,24);for(var t=0;t0;e--)t.push(Math.floor(256*Math.random()));return t},bytesToWords:function(e){for(var t=[],n=0,r=0;n>>5]|=e[n]<<24-r%32;return t},wordsToBytes:function(e){for(var t=[],n=0;n<32*e.length;n+=8)t.push(e[n>>>5]>>>24-n%32&255);return t},bytesToHex:function(e){for(var t=[],n=0;n>>4).toString(16)),t.push((15&e[n]).toString(16));return t.join("")},hexToBytes:function(e){for(var t=[],n=0;n>>6*(3-i)&63)):n.push("=");return n.join("")},base64ToBytes:function(e){e=e.replace(/[^A-Z0-9+\/]/gi,"");for(var n=[],r=0,o=0;r>>6-2*o);return n}};e.exports=n}()},30454:e=>{"use strict";var t="%[a-f0-9]{2}",n=new RegExp("("+t+")|([^%]+?)","gi"),r=new RegExp("("+t+")+","gi");function o(e,t){try{return[decodeURIComponent(e.join(""))]}catch(i){}if(1===e.length)return e;t=t||1;var n=e.slice(0,t),r=e.slice(t);return Array.prototype.concat.call([],o(n),o(r))}function i(e){try{return decodeURIComponent(e)}catch(i){for(var t=e.match(n)||[],r=1;r{"use strict";var r=n(30655),o=n(58068),i=n(69675),a=n(75795);e.exports=function(e,t,n){if(!e||"object"!==typeof e&&"function"!==typeof e)throw new i("`obj` must be an object or a function`");if("string"!==typeof t&&"symbol"!==typeof t)throw new i("`property` must be a string or a symbol`");if(arguments.length>3&&"boolean"!==typeof arguments[3]&&null!==arguments[3])throw new i("`nonEnumerable`, if provided, must be a boolean or null");if(arguments.length>4&&"boolean"!==typeof arguments[4]&&null!==arguments[4])throw new i("`nonWritable`, if provided, must be a boolean or null");if(arguments.length>5&&"boolean"!==typeof arguments[5]&&null!==arguments[5])throw new i("`nonConfigurable`, if provided, must be a boolean or null");if(arguments.length>6&&"boolean"!==typeof arguments[6])throw new i("`loose`, if provided, must be a boolean");var s=arguments.length>3?arguments[3]:null,u=arguments.length>4?arguments[4]:null,l=arguments.length>5?arguments[5]:null,c=arguments.length>6&&arguments[6],f=!!a&&a(e,t);if(r)r(e,t,{configurable:null===l&&f?f.configurable:!l,enumerable:null===s&&f?f.enumerable:!s,value:n,writable:null===u&&f?f.writable:!u});else{if(!c&&(s||u||l))throw new o("This environment does not support defining a property as non-configurable, non-writable, or non-enumerable.");e[t]=n}}},38452:(e,t,n)=>{"use strict";var r=n(1189),o="function"===typeof Symbol&&"symbol"===typeof Symbol("foo"),i=Object.prototype.toString,a=Array.prototype.concat,s=n(30041),u=n(30592)(),l=function(e,t,n,r){if(t in e)if(!0===r){if(e[t]===n)return}else if("function"!==typeof(o=r)||"[object Function]"!==i.call(o)||!r())return;var o;u?s(e,t,n,!0):s(e,t,n)},c=function(e,t){var n=arguments.length>2?arguments[2]:{},i=r(t);o&&(i=a.call(i,Object.getOwnPropertySymbols(t)));for(var s=0;s{"use strict";n.r(t),n.d(t,{default:()=>r.default});var r=n(77887)},76641:(e,t,n)=>{"use strict";n.d(t,{A4:()=>_,GH:()=>i,Tj:()=>o,Z7:()=>s,fi:()=>a});var r=n(43015),o=function(e,t,n,r,o,a){return i(e,t,n,(function(e){return"logscale"!==e&&r(e)}),o,a)},i=function(e,t,n,o,i,a){var s,u,l,c,f=o("pixelsPerLabel"),d=[];if(a)for(s=0;s=c/4){for(var v=p;v>=h;v--){var m=y[v],b=Math.log(m/e)/Math.log(t/e)*n,_={v:m};null===g||Math.abs(b-g.pixel_coord)>=f?g={tickValue:m,pixel_coord:b}:_.label="",d.push(_)}d.reverse()}}if(0===d.length){var w,x;o("labelsKMG2")?(w=[1,2,4,8,16,32,64,128,256],x=16):(w=[1,2,5,10,20,50,100],x=10);var S,A,E,C=Math.ceil(n/f),R=Math.abs(t-e)/C,O=Math.floor(Math.log(R)/Math.log(x)),T=Math.pow(x,O);for(u=0;uf));u++);for(A>E&&(S*=-1),s=0;s<=c;s++)l=A+s*S,d.push({v:l})}}var k=o("axisLabelFormatter");for(s=0;s=0?_(e,t,a,r,o):[]},s={MILLISECONDLY:0,TWO_MILLISECONDLY:1,FIVE_MILLISECONDLY:2,TEN_MILLISECONDLY:3,FIFTY_MILLISECONDLY:4,HUNDRED_MILLISECONDLY:5,FIVE_HUNDRED_MILLISECONDLY:6,SECONDLY:7,TWO_SECONDLY:8,FIVE_SECONDLY:9,TEN_SECONDLY:10,THIRTY_SECONDLY:11,MINUTELY:12,TWO_MINUTELY:13,FIVE_MINUTELY:14,TEN_MINUTELY:15,THIRTY_MINUTELY:16,HOURLY:17,TWO_HOURLY:18,SIX_HOURLY:19,DAILY:20,TWO_DAILY:21,WEEKLY:22,MONTHLY:23,QUARTERLY:24,BIANNUAL:25,ANNUAL:26,DECADAL:27,CENTENNIAL:28,NUM_GRANULARITIES:29},u=0,l=1,c=2,f=3,d=4,h=5,p=6,g=7,v=[];v[s.MILLISECONDLY]={datefield:p,step:1,spacing:1},v[s.TWO_MILLISECONDLY]={datefield:p,step:2,spacing:2},v[s.FIVE_MILLISECONDLY]={datefield:p,step:5,spacing:5},v[s.TEN_MILLISECONDLY]={datefield:p,step:10,spacing:10},v[s.FIFTY_MILLISECONDLY]={datefield:p,step:50,spacing:50},v[s.HUNDRED_MILLISECONDLY]={datefield:p,step:100,spacing:100},v[s.FIVE_HUNDRED_MILLISECONDLY]={datefield:p,step:500,spacing:500},v[s.SECONDLY]={datefield:h,step:1,spacing:1e3},v[s.TWO_SECONDLY]={datefield:h,step:2,spacing:2e3},v[s.FIVE_SECONDLY]={datefield:h,step:5,spacing:5e3},v[s.TEN_SECONDLY]={datefield:h,step:10,spacing:1e4},v[s.THIRTY_SECONDLY]={datefield:h,step:30,spacing:3e4},v[s.MINUTELY]={datefield:d,step:1,spacing:6e4},v[s.TWO_MINUTELY]={datefield:d,step:2,spacing:12e4},v[s.FIVE_MINUTELY]={datefield:d,step:5,spacing:3e5},v[s.TEN_MINUTELY]={datefield:d,step:10,spacing:6e5},v[s.THIRTY_MINUTELY]={datefield:d,step:30,spacing:18e5},v[s.HOURLY]={datefield:f,step:1,spacing:36e5},v[s.TWO_HOURLY]={datefield:f,step:2,spacing:72e5},v[s.SIX_HOURLY]={datefield:f,step:6,spacing:216e5},v[s.DAILY]={datefield:c,step:1,spacing:864e5},v[s.TWO_DAILY]={datefield:c,step:2,spacing:1728e5},v[s.WEEKLY]={datefield:c,step:7,spacing:6048e5},v[s.MONTHLY]={datefield:l,step:1,spacing:2629746e3},v[s.QUARTERLY]={datefield:l,step:3,spacing:7889238e3},v[s.BIANNUAL]={datefield:l,step:6,spacing:15778476e3},v[s.ANNUAL]={datefield:u,step:1,spacing:31556952e3},v[s.DECADAL]={datefield:u,step:10,spacing:31556952e4},v[s.CENTENNIAL]={datefield:u,step:100,spacing:31556952e5};var y=function(){for(var e=[],t=-39;t<=39;t++)for(var n=Math.pow(10,t),r=1;r<=9;r++){var o=n*r;e.push(o)}return e}(),m=function(e,t,n,r){for(var o=r("pixelsPerLabel"),i=0;i=o)return i}return-1},b=function(e,t,n){var r=v[n].spacing;return Math.round(1*(t-e)/r)},_=function(e,t,n,o,i){var a=o("axisLabelFormatter"),y=o("labelsUTC")?r.DateAccessorsUTC:r.DateAccessorsLocal,m=v[n].datefield,b=v[n].step,_=v[n].spacing,w=new Date(e),x=[];x[u]=y.getFullYear(w),x[l]=y.getMonth(w),x[c]=y.getDate(w),x[f]=y.getHours(w),x[d]=y.getMinutes(w),x[h]=y.getSeconds(w),x[p]=y.getMilliseconds(w);var S=x[m]%b;n==s.WEEKLY&&(S=y.getDay(w)),x[m]-=S;for(var A=m+1;A=s.DAILY||y.getHours(C)%b===0)&&E.push({v:R,label:a.call(i,C,n,o,i)}),x[m]+=b,R=(C=y.makeDate.apply(null,x)).getTime();return E}},43015:(e,t,n)=>{"use strict";n.r(t),n.d(t,{Circles:()=>Z,DASHED_LINE:()=>c,DOTTED_LINE:()=>l,DOT_DASH_LINE:()=>f,DateAccessorsLocal:()=>O,DateAccessorsUTC:()=>T,HORIZONTAL:()=>d,Iterator:()=>W,LN_TEN:()=>a,LOG_SCALE:()=>i,VERTICAL:()=>h,addEvent:()=>g,binarySearch:()=>I,cancelEvent:()=>y,clone:()=>V,createCanvas:()=>$,createIterator:()=>q,dateAxisLabelFormatter:()=>ve,dateParser:()=>N,dateStrToMillis:()=>D,dateString_:()=>M,dateValueFormatter:()=>ye,detectLineDelimiter:()=>Q,dragGetX_:()=>x,dragGetY_:()=>S,findPos:()=>b,floatFormat:()=>C,getContext:()=>p,getContextPixelRatio:()=>H,hmsString_:()=>k,hsvToRGB:()=>m,isArrayLike:()=>z,isCanvasSupported:()=>ie,isDateLike:()=>B,isNodeContainedBy:()=>J,isOK:()=>A,isPixelChangingOptionList:()=>K,isValidPoint:()=>E,log10:()=>s,logRangeFraction:()=>u,numberAxisLabelFormatter:()=>pe,numberValueFormatter:()=>he,pageX:()=>_,pageY:()=>w,parseFloat_:()=>ae,pow:()=>ee,removeEvent:()=>v,repeatAndCleanup:()=>Y,requestAnimFrame:()=>G,round_:()=>P,setupDOMready_:()=>we,toRGB_:()=>oe,type:()=>o,typeArrayLike:()=>U,update:()=>L,updateDeep:()=>j,zeropad:()=>R});var r=n(76641);function o(e){return null===e?"null":typeof e}var i=10,a=Math.log(i),s=function(e){return Math.log(e)/a},u=function(e,t,n){var r=s(e),o=r+n*(s(t)-r);return Math.pow(i,o)},l=[2,2],c=[7,3],f=[7,2,2,2],d=1,h=2,p=function(e){return e.getContext("2d")},g=function(e,t,n){e.addEventListener(t,n,!1)};function v(e,t,n){e.removeEventListener(t,n,!1)}function y(e){return(e=e||window.event).stopPropagation&&e.stopPropagation(),e.preventDefault&&e.preventDefault(),e.cancelBubble=!0,e.cancel=!0,e.returnValue=!1,!1}function m(e,t,n){var r,o,i;if(0===t)r=n,o=n,i=n;else{var a=Math.floor(6*e),s=6*e-a,u=n*(1-t),l=n*(1-t*s),c=n*(1-t*(1-s));switch(a){case 1:r=l,o=n,i=u;break;case 2:r=u,o=n,i=c;break;case 3:r=u,o=l,i=n;break;case 4:r=c,o=u,i=n;break;case 5:r=n,o=u,i=l;break;case 6:case 0:r=n,o=c,i=u}}return"rgb("+(r=Math.floor(255*r+.5))+","+(o=Math.floor(255*o+.5))+","+(i=Math.floor(255*i+.5))+")"}function b(e){var t=e.getBoundingClientRect(),n=window,r=document.documentElement;return{x:t.left+(n.pageXOffset||r.scrollLeft),y:t.top+(n.pageYOffset||r.scrollTop)}}function _(e){return!e.pageX||e.pageX<0?0:e.pageX}function w(e){return!e.pageY||e.pageY<0?0:e.pageY}function x(e,t){return _(e)-t.px}function S(e,t){return w(e)-t.py}function A(e){return!!e&&!isNaN(e)}function E(e,t){return!!e&&(null!==e.yval&&(null!==e.x&&void 0!==e.x&&(null!==e.y&&void 0!==e.y&&!(isNaN(e.x)||!t&&isNaN(e.y)))))}function C(e,t){var n=Math.min(Math.max(1,t||2),21);return Math.abs(e)<.001&&0!==e?e.toExponential(n-1):e.toPrecision(n)}function R(e){return e<10?"0"+e:""+e}var O={getFullYear:e=>e.getFullYear(),getMonth:e=>e.getMonth(),getDate:e=>e.getDate(),getHours:e=>e.getHours(),getMinutes:e=>e.getMinutes(),getSeconds:e=>e.getSeconds(),getMilliseconds:e=>e.getMilliseconds(),getDay:e=>e.getDay(),makeDate:function(e,t,n,r,o,i,a){return new Date(e,t,n,r,o,i,a)}},T={getFullYear:e=>e.getUTCFullYear(),getMonth:e=>e.getUTCMonth(),getDate:e=>e.getUTCDate(),getHours:e=>e.getUTCHours(),getMinutes:e=>e.getUTCMinutes(),getSeconds:e=>e.getUTCSeconds(),getMilliseconds:e=>e.getUTCMilliseconds(),getDay:e=>e.getUTCDay(),makeDate:function(e,t,n,r,o,i,a){return new Date(Date.UTC(e,t,n,r,o,i,a))}};function k(e,t,n,r){var o=R(e)+":"+R(t);if(n&&(o+=":"+R(n),r)){var i=""+r;o+="."+("000"+i).substring(i.length)}return o}function M(e,t){var n=t?T:O,r=new Date(e),o=n.getFullYear(r),i=n.getMonth(r),a=n.getDate(r),s=n.getHours(r),u=n.getMinutes(r),l=n.getSeconds(r),c=n.getMilliseconds(r),f=""+o+"/"+R(i+1)+"/"+R(a);return 3600*s+60*u+l+.001*c&&(f+=" "+k(s,u,l,c)),f}function P(e,t){var n=Math.pow(10,t);return Math.round(e*n)/n}function I(e,t,n,r,o){if(null!==r&&void 0!==r&&null!==o&&void 0!==o||(r=0,o=t.length-1),r>o)return-1;null!==n&&void 0!==n||(n=0);var i,a=function(e){return e>=0&&ee?n>0&&a(i=s-1)&&t[i]e?s:I(e,t,n,s+1,o):-1}function N(e){var t,n;if((-1==e.search("-")||-1!=e.search("T")||-1!=e.search("Z"))&&(n=D(e))&&!isNaN(n))return n;if(-1!=e.search("-")){for(t=e.replace("-","/","g");-1!=t.search("-");)t=t.replace("-","/");n=D(t)}else n=D(e);return n&&!isNaN(n)||console.error("Couldn't parse "+e+" as a date"),n}function D(e){return new Date(e).getTime()}function L(e,t){if("undefined"!=typeof t&&null!==t)for(var n in t)t.hasOwnProperty(n)&&(e[n]=t[n]);return e}var F="undefined"!==typeof Node&&null!==Node&&"object"===typeof Node?function(e){return e instanceof Node}:function(e){return"object"===typeof e&&"number"===typeof e.nodeType&&"string"===typeof e.nodeName};function j(e,t){if("undefined"!=typeof t&&null!==t)for(var n in t)if(t.hasOwnProperty(n)){const r=t[n];null===r?e[n]=null:z(r)?e[n]=r.slice():F(r)?e[n]=r:"object"==typeof r?("object"==typeof e[n]&&null!==e[n]||(e[n]={}),j(e[n],r)):e[n]=r}return e}function U(e){if(null===e)return"null";const t=typeof e;return("object"===t||"function"===t&&"function"===typeof e.item)&&"number"===typeof e.length&&3!==e.nodeType&&4!==e.nodeType?"array":t}function z(e){const t=typeof e;return null!==e&&("object"===t||"function"===t&&"function"===typeof e.item)&&"number"===typeof e.length&&3!==e.nodeType&&4!==e.nodeType}function B(e){return null!==e&&"object"===typeof e&&"function"===typeof e.getTime}function V(e){for(var t=[],n=0;n=t||G.call(window,(function(){var t=(new Date).getTime()-a;o=i;var l=(i=Math.floor(t/n))-o;i+l>s||i>=s?(e(s),r()):(0!==l&&e(i),u())}))}()}else r()}var X={annotationClickHandler:!0,annotationDblClickHandler:!0,annotationMouseOutHandler:!0,annotationMouseOverHandler:!0,axisLineColor:!0,axisLineWidth:!0,clickCallback:!0,drawCallback:!0,drawHighlightPointCallback:!0,drawPoints:!0,drawPointCallback:!0,drawGrid:!0,fillAlpha:!0,gridLineColor:!0,gridLineWidth:!0,hideOverlayOnMouseOut:!0,highlightCallback:!0,highlightCircleSize:!0,interactionModel:!0,labelsDiv:!0,labelsKMB:!0,labelsKMG2:!0,labelsSeparateLines:!0,labelsShowZeroValues:!0,legend:!0,panEdgeFraction:!0,pixelsPerYLabel:!0,pointClickCallback:!0,pointSize:!0,rangeSelectorPlotFillColor:!0,rangeSelectorPlotFillGradientColor:!0,rangeSelectorPlotStrokeColor:!0,rangeSelectorBackgroundStrokeColor:!0,rangeSelectorBackgroundLineWidth:!0,rangeSelectorPlotLineWidth:!0,rangeSelectorForegroundStrokeColor:!0,rangeSelectorForegroundLineWidth:!0,rangeSelectorAlpha:!0,showLabelsOnHighlight:!0,showRoller:!0,strokeWidth:!0,underlayCallback:!0,unhighlightCallback:!0,zoomCallback:!0};function K(e,t){var n={};if(e)for(var r=1;r=u){for(c=f.length;c>0;)if(l=ee(u,c),--c,s>=l)return s/l>=Math.pow(10,o)?e.toExponential(r):P(e/l,r)+f[c]}else if(s<1){for(c=0;c=1););return s*l=Math.pow(10,o)||s=r.Z7.DECADAL)return""+i;if(t>=r.Z7.MONTHLY)return ge[a]+" "+i;if(0===3600*u+60*l+c+.001*f||t>=r.Z7.DAILY)return R(s)+" "+ge[a];if(tr.Z7.MINUTELY?k(u,l,c,0):k(u,l,c,f)}function ye(e,t){return M(e,t("labelsUTC"))}var me=[],be=!1;function _e(e){return"function"===typeof e&&e(),!0}function we(e){if("undefined"!==typeof document){const t=function(){if(!be){be=!0,e.onDOMready=_e,document.removeEventListener("DOMContentLoaded",t,!1),window.removeEventListener("load",t,!1);for(let e=0;e{"use strict";n.r(t),n.d(t,{default:()=>Z});var r=n(43015),o=function(e){this.dygraph_=e,this.points=[],this.setNames=[],this.annotations=[],this.yAxes_=null,this.xTicks_=null,this.yTicks_=null};o.prototype.addDataset=function(e,t){this.points.push(t),this.setNames.push(e)},o.prototype.getPlotArea=function(){return this.area_},o.prototype.computePlotArea=function(){var e={x:0,y:0};e.w=this.dygraph_.width_-e.x-this.dygraph_.getOption("rightGap"),e.h=this.dygraph_.height_;var t={chart_div:this.dygraph_.graphDiv,reserveSpaceLeft:function(t){var n={x:e.x,y:e.y,w:t,h:e.h};return e.x+=t,e.w-=t,n},reserveSpaceRight:function(t){var n={x:e.x+e.w-t,y:e.y,w:t,h:e.h};return e.w-=t,n},reserveSpaceTop:function(t){var n={x:e.x,y:e.y,w:e.w,h:t};return e.y+=t,e.h-=t,n},reserveSpaceBottom:function(t){var n={x:e.x,y:e.y+e.h-t,w:e.w,h:t};return e.h-=t,n},chartRect:function(){return{x:e.x,y:e.y,w:e.w,h:e.h}}};this.dygraph_.cascadeEvents_("layout",t),this.area_=e},o.prototype.setAnnotations=function(e){this.annotations=[];for(var t=this.dygraph_.getOption("xValueParser")||function(e){return e},n=0;n=0&&r<1&&this.xticks.push({pos:r,label:n,has_tick:i});for(this.yticks=[],e=0;e0&&r<=1&&this.yticks.push({axis:e,pos:r,label:n,has_tick:i})},o.prototype._evaluateAnnotations=function(){var e,t={};for(e=0;e=2,v=e.drawingContext;v.save(),g&&v.setLineDash&&v.setLineDash(o);var y=a._drawSeries(e,p,n,u,i,f,c,t);a._drawPointsOnLine(e,y,s,t,u),g&&v.setLineDash&&v.setLineDash([]),v.restore()},a._drawSeries=function(e,t,n,r,o,i,a,s){var u,l,c=null,f=null,d=null,h=[],p=!0,g=e.drawingContext;g.beginPath(),g.strokeStyle=s,g.lineWidth=n;for(var v=t.array_,y=t.end_,m=t.predicate_,b=t.start_;b0;n--)if(2==(s=t[n])[0]){var r=t[n-1];r[1]==s[1]&&r[2]==s[2]&&t.splice(n,1)}for(n=0;n2&&!e){var o=0;2==t[0][0]&&o++;var i=null,a=null;for(n=o;nt[a][2]&&(a=n)}}var l=t[i],c=t[a];t.splice(o,t.length-o),ia?(t.push(c),t.push(l)):t.push(l)}}}(n);for(var i=0,a=t.length;i1||s-n>1),n=s);t.push([e,o,a])};return{moveTo:function(e,t){a(2,e,t)},lineTo:function(e,t){a(1,e,t)},stroke:function(){i(!0),e.stroke()},fill:function(){i(!0),e.fill()},beginPath:function(){i(!0),e.beginPath()},closePath:function(){i(!0),e.closePath()},_count:function(){return o}}},a._fillPlotter=function(e){if(!e.singleSeriesName&&0===e.seriesIndex){for(var t=e.dygraph,n=t.getLabels().slice(1),o=n.length;o>=0;o--)t.visibility()[o]||n.splice(o,1);var i=function(){for(var e=0;e=0;o--){var i=r[o];e.lineTo(i[0],i[1])}},v=f-1;v>=0;v--){var y=e.drawingContext,m=n[v];if(t.getBooleanOption("fillGraph",m)){var b=t.getNumericOption("fillAlpha",m),_=t.getBooleanOption("stepPlot",m),w=h[v],x=t.axisPropertiesForSeries(m),S=1+x.minyval*x.yscale;S<0?S=0:S>1&&(S=1),S=l.h*S+l.y;var A,E=c[v],C=r.createIterator(E,0,E.length,a._getIteratorPredicate(t.getBooleanOption("connectSeparatedPoints",m))),R=NaN,O=[-1,-1],T=r.toRGB_(w),k="rgba("+T.r+","+T.g+","+T.b+","+b+")";y.fillStyle=k,y.beginPath();var M,P=!0;(E.length>2*t.width_||Z.FORCE_FAST_PROXY)&&(y=a._fastCanvasProxy(y));for(var I,N=[];C.hasNext;)if(I=C.next(),r.isOK(I.y)||_){if(d){if(!P&&M==I.xval)continue;var D;P=!1,M=I.xval,D=void 0===(s=p[I.canvasx])?S:u?s[0]:s,A=[I.canvasy,D],_?-1===O[0]?p[I.canvasx]=[I.canvasy,S]:p[I.canvasx]=[I.canvasy,O[0]]:p[I.canvasx]=I.canvasy}else A=isNaN(I.canvasy)&&_?[l.y+l.h,S]:[I.canvasy,S];isNaN(R)?(y.moveTo(I.canvasx,A[1]),y.lineTo(I.canvasx,A[0])):(_?(y.lineTo(I.canvasx,O[0]),y.lineTo(I.canvasx,A[0])):y.lineTo(I.canvasx,A[0]),d&&(N.push([R,O[1]]),u&&s?N.push([I.canvasx,s[1]]):N.push([I.canvasx,A[1]]))),O=A,R=I.canvasx}else g(y,R,O[1],N),N=[],R=NaN,null===I.y_stacked||isNaN(I.y_stacked)||(p[I.canvasx]=l.h*I.y_stacked+l.y);u=_,A&&I&&(g(y,I.canvasx,A[1],N),N=[]),y.fill()}}}};const s=a;var u=n(76641),l={maybeTreatMouseOpAsClick:function(e,t,n){n.dragEndX=r.dragGetX_(e,n),n.dragEndY=r.dragGetY_(e,n);var o=Math.abs(n.dragEndX-n.dragStartX),i=Math.abs(n.dragEndY-n.dragStartY);o<2&&i<2&&void 0!==t.lastx_&&null!==t.lastx_&&l.treatMouseOpAsClick(t,e,n),n.regionWidth=o,n.regionHeight=i},startPan:function(e,t,n){var o,i;n.isPanning=!0;var a=t.xAxisRange();if(t.getOptionForAxis("logscale","x")?(n.initialLeftmostDate=r.log10(a[0]),n.dateRange=r.log10(a[1])-r.log10(a[0])):(n.initialLeftmostDate=a[0],n.dateRange=a[1]-a[0]),n.xUnitsPerPixel=n.dateRange/(t.plotter_.area.w-1),t.getNumericOption("panEdgeFraction")){var s=t.width_*t.getNumericOption("panEdgeFraction"),u=t.xAxisExtremes(),l=t.toDomXCoord(u[0])-s,c=t.toDomXCoord(u[1])+s,f=t.toDataXCoord(l),d=t.toDataXCoord(c);n.boundedDates=[f,d];var h=[],p=t.height_*t.getNumericOption("panEdgeFraction");for(o=0;on.boundedDates[1]&&(i=(o-=i-n.boundedDates[1])+n.dateRange),t.getOptionForAxis("logscale","x")?t.dateWindow_=[Math.pow(r.LOG_SCALE,o),Math.pow(r.LOG_SCALE,i)]:t.dateWindow_=[o,i],n.is2DPan)for(var a=n.dragEndY-n.dragStartY,s=0;s=10&&n.dragDirection==r.HORIZONTAL){var i=Math.min(n.dragStartX,n.dragEndX),a=Math.max(n.dragStartX,n.dragEndX);(i=Math.max(i,o.x))<(a=Math.min(a,o.x+o.w))&&t.doZoomX_(i,a),n.cancelNextDblclick=!0}else if(n.regionHeight>=10&&n.dragDirection==r.VERTICAL){var s=Math.min(n.dragStartY,n.dragEndY),u=Math.max(n.dragStartY,n.dragEndY);(s=Math.max(s,o.y))<(u=Math.min(u,o.y+o.h))&&t.doZoomY_(s,u),n.cancelNextDblclick=!0}n.dragStartX=null,n.dragStartY=null},l.startTouch=function(e,t,n){e.preventDefault(),e.touches.length>1&&(n.startTimeForDoubleTapMs=null);for(var r=[],o=0;o=2){n.initialPinchCenter={pageX:.5*(r[0].pageX+r[1].pageX),pageY:.5*(r[0].pageY+r[1].pageY),dataX:.5*(r[0].dataX+r[1].dataX),dataY:.5*(r[0].dataY+r[1].dataY)};var s=180/Math.PI*Math.atan2(n.initialPinchCenter.pageY-r[0].pageY,r[0].pageX-n.initialPinchCenter.pageX);(s=Math.abs(s))>90&&(s=90-s),n.touchDirections={x:s<67.5,y:s>22.5}}n.initialRange={x:t.xAxisRange(),y:t.yAxisRange()}},l.moveTouch=function(e,t,n){n.startTimeForDoubleTapMs=null;var r,o=[];for(r=0;r=2){var p=l[1].pageX-c.pageX;s=(o[1].pageX-a.pageX)/p;var g=l[1].pageY-c.pageY;u=(o[1].pageY-a.pageY)/g}s=Math.min(8,Math.max(.125,s)),u=Math.min(8,Math.max(.125,u));var v=!1;if(n.touchDirections.x){var y=c.dataX-f.dataX/s;t.dateWindow_=[y+(n.initialRange.x[0]-c.dataX)/s,y+(n.initialRange.x[1]-c.dataX)/s],v=!0}if(n.touchDirections.y)for(r=0;r<1;r++){var m=t.axes_[r];if(t.attributes_.getForAxis("logscale",r));else{y=c.dataY-f.dataY/u;m.valueRange=[y+(n.initialRange.y[0]-c.dataY)/u,y+(n.initialRange.y[1]-c.dataY)/u],v=!0}}if(t.drawGraph_(!1),v&&o.length>1&&t.getFunctionOption("zoomCallback")){var b=t.xAxisRange();t.getFunctionOption("zoomCallback").call(t,b[0],b[1],t.yAxisRanges())}},l.endTouch=function(e,t,n){if(0!==e.touches.length)l.startTouch(e,t,n);else if(1==e.changedTouches.length){var r=(new Date).getTime(),o=e.changedTouches[0];n.startTimeForDoubleTapMs&&r-n.startTimeForDoubleTapMs<500&&n.doubleTapX&&Math.abs(n.doubleTapX-o.screenX)<50&&n.doubleTapY&&Math.abs(n.doubleTapY-o.screenY)<50?t.resetZoom():(n.startTimeForDoubleTapMs=r,n.doubleTapX=o.screenX,n.doubleTapY=o.screenY)}};var c=function(e,t,n){return en?e-n:0};l.defaultModel={mousedown:function(e,t,n){if(!e.button||2!=e.button){n.initializeMouseDown(e,t,n),e.altKey||e.shiftKey?l.startPan(e,t,n):l.startZoom(e,t,n);var o=function(e){if(n.isZooming){var o=function(e,t){var n=r.findPos(t.canvas_),o={left:n.x,right:n.x+t.canvas_.offsetWidth,top:n.y,bottom:n.y+t.canvas_.offsetHeight},i={x:r.pageX(e),y:r.pageY(e)},a=c(i.x,o.left,o.right),s=c(i.y,o.top,o.bottom);return Math.max(a,s)}(e,t);o<100?l.moveZoom(e,t,n):null!==n.dragEndX&&(n.dragEndX=null,n.dragEndY=null,t.clearZoomRect_())}else n.isPanning&&l.movePan(e,t,n)},i=function(e){n.isZooming?null!==n.dragEndX?l.endZoom(e,t,n):l.maybeTreatMouseOpAsClick(e,t,n):n.isPanning&&l.endPan(e,t,n),r.removeEvent(document,"mousemove",o),r.removeEvent(document,"mouseup",i),n.destroy()};t.addAndTrackEvent(document,"mousemove",o),t.addAndTrackEvent(document,"mouseup",i)}},willDestroyContextMyself:!0,touchstart:function(e,t,n){l.startTouch(e,t,n)},touchmove:function(e,t,n){l.moveTouch(e,t,n)},touchend:function(e,t,n){l.endTouch(e,t,n)},dblclick:function(e,t,n){if(n.cancelNextDblclick)n.cancelNextDblclick=!1;else{var r={canvasx:n.dragEndX,canvasy:n.dragEndY,cancelable:!0};t.cascadeEvents_("dblclick",r)||e.altKey||e.shiftKey||t.resetZoom()}}},l.nonInteractiveModel_={mousedown:function(e,t,n){n.initializeMouseDown(e,t,n)},mouseup:l.maybeTreatMouseOpAsClick},l.dragIsPanInteractionModel={mousedown:function(e,t,n){n.initializeMouseDown(e,t,n),l.startPan(e,t,n)},mousemove:function(e,t,n){n.isPanning&&l.movePan(e,t,n)},mouseup:function(e,t,n){n.isPanning&&l.endPan(e,t,n)}};const f=l;const d={highlightCircleSize:3,highlightSeriesOpts:null,highlightSeriesBackgroundAlpha:.5,highlightSeriesBackgroundColor:"rgb(255, 255, 255)",labelsSeparateLines:!1,labelsShowZeroValues:!0,labelsKMB:!1,labelsKMG2:!1,showLabelsOnHighlight:!0,digitsAfterDecimal:2,maxNumberWidth:6,sigFigs:null,strokeWidth:1,strokeBorderWidth:0,strokeBorderColor:"white",axisTickSize:3,axisLabelFontSize:14,rightGap:5,showRoller:!1,xValueParser:void 0,delimiter:",",sigma:2,errorBars:!1,fractions:!1,wilsonInterval:!0,customBars:!1,fillGraph:!1,fillAlpha:.15,connectSeparatedPoints:!1,stackedGraph:!1,stackedGraphNaNFill:"all",hideOverlayOnMouseOut:!0,resizable:"no",legend:"onmouseover",legendFollowOffsetX:50,legendFollowOffsetY:-50,stepPlot:!1,xRangePad:0,yRangePad:null,drawAxesAtZero:!1,titleHeight:28,xLabelHeight:18,yLabelWidth:18,axisLineColor:"black",axisLineWidth:.3,gridLineWidth:.3,axisLabelWidth:50,gridLineColor:"rgb(128,128,128)",interactionModel:f.defaultModel,animatedZooms:!1,animateBackgroundFade:!0,showRangeSelector:!1,rangeSelectorHeight:40,rangeSelectorPlotStrokeColor:"#808FAB",rangeSelectorPlotFillGradientColor:"white",rangeSelectorPlotFillColor:"#A7B1C4",rangeSelectorBackgroundStrokeColor:"gray",rangeSelectorBackgroundLineWidth:1,rangeSelectorPlotLineWidth:1.5,rangeSelectorForegroundStrokeColor:"black",rangeSelectorForegroundLineWidth:1,rangeSelectorAlpha:.6,showInRangeSelector:null,plotter:[s._fillPlotter,s._errorPlotter,s._linePlotter],plugins:[],axes:{x:{pixelsPerLabel:70,axisLabelWidth:60,axisLabelFormatter:r.dateAxisLabelFormatter,valueFormatter:r.dateValueFormatter,drawGrid:!0,drawAxis:!0,independentTicks:!0,ticker:u.fi},y:{axisLabelWidth:50,pixelsPerLabel:30,valueFormatter:r.numberValueFormatter,axisLabelFormatter:r.numberAxisLabelFormatter,drawGrid:!0,drawAxis:!0,independentTicks:!0,ticker:u.GH},y2:{axisLabelWidth:50,pixelsPerLabel:30,valueFormatter:r.numberValueFormatter,axisLabelFormatter:r.numberAxisLabelFormatter,drawAxis:!0,drawGrid:!1,independentTicks:!1,ticker:u.GH}}};var h=function(e){this.dygraph_=e,this.yAxes_=[],this.xAxis_={},this.series_={},this.global_=this.dygraph_.attrs_,this.user_=this.dygraph_.user_attrs_||{},this.labels_=[],this.highlightSeries_=this.get("highlightSeriesOpts")||{},this.reparseSeries()};h.AXIS_STRING_MAPPINGS_={y:0,Y:0,y1:0,Y1:0,y2:1,Y2:1},h.axisToIndex_=function(e){if("string"==typeof e){if(h.AXIS_STRING_MAPPINGS_.hasOwnProperty(e))return h.AXIS_STRING_MAPPINGS_[e];throw"Unknown axis : "+e}if("number"==typeof e){if(0===e||1===e)return e;throw"Dygraphs only supports two y-axes, indexed from 0-1."}if(e)throw"Unknown axis : "+e;return 0},h.prototype.reparseSeries=function(){var e=this.get("labels");if(e){this.labels_=e.slice(1),this.yAxes_=[{series:[],options:{}}],this.xAxis_={options:{}},this.series_={};for(var t=this.user_.series||{},n=0;n1&&r.update(this.yAxes_[1].options,s.y2||{}),r.update(this.xAxis_.options,s.x||{})}},h.prototype.get=function(e){var t=this.getGlobalUser_(e);return null!==t?t:this.getGlobalDefault_(e)},h.prototype.getGlobalUser_=function(e){return this.user_.hasOwnProperty(e)?this.user_[e]:null},h.prototype.getGlobalDefault_=function(e){return this.global_.hasOwnProperty(e)?this.global_[e]:d.hasOwnProperty(e)?d[e]:null},h.prototype.getForAxis=function(e,t){var n,r;if("number"==typeof t)r=0===(n=t)?"y":"y2";else{if("y1"==t&&(t="y"),"y"==t)n=0;else if("y2"==t)n=1;else{if("x"!=t)throw"Unknown axis "+t;n=-1}r=t}var o=-1==n?this.xAxis_:this.yAxes_[n];if(o){var i=o.options;if(i.hasOwnProperty(e))return i[e]}if("x"!==t||"logscale"!==e){var a=this.getGlobalUser_(e);if(null!==a)return a}var s=d.axes[r];return s.hasOwnProperty(e)?s[e]:this.getGlobalDefault_(e)},h.prototype.getForSeries=function(e,t){if(t===this.dygraph_.getHighlightSeries()&&this.highlightSeries_.hasOwnProperty(e))return this.highlightSeries_[e];if(!this.series_.hasOwnProperty(t))throw"Unknown series: "+t;var n=this.series_[t],r=n.options;return r.hasOwnProperty(e)?r[e]:this.getForAxis(e,n.yAxis)},h.prototype.numAxes=function(){return this.yAxes_.length},h.prototype.axisForSeries=function(e){return this.series_[e].yAxis},h.prototype.axisOptions=function(e){return this.yAxes_[e].options},h.prototype.seriesForAxis=function(e){return this.yAxes_[e].series},h.prototype.seriesNames=function(){return this.labels_};const p=h;function g(){this.tarps=[]}g.prototype.cover=function(){for(var e=document.getElementsByTagName("iframe"),t=0;ti)&&(i=r),(null===o||rr&&(u=r),li)&&(i=l),(null===o||u=0){var f=e[r-t];null===f[1]||isNaN(f[1])||(i-=f[2][0],s-=f[1],a-=f[2][1],u-=1)}c[r]=u?[e[r][0],1*s/u,[1*i/u,1*a/u]]:[e[r][0],null,[null,null]]}return c};const R=C;var O=function(){};(O.prototype=new w).extractSeries=function(e,t,n){var r,o,i,a,s,u=[];const l=n.get("labels")[t],c=n.getForSeries("logscale",l);for(var f=0;f=0&&(i-=e[r-t][2][0],a-=e[r-t][2][1]);var s=e[r][0],u=a?i/a:0;o[r]=[s,100*u]}return o};const T=O;var k=function(){};(k.prototype=new S).extractSeries=function(e,t,n){var r,o,i,a,s,u,l,c=[];const f=n.get("labels")[t],d=n.getForSeries("logscale",f),h=n.getForSeries("sigma",f);for(var p=0;p=0&&(f-=e[r-t][2][2],d-=e[r-t][2][3]);var p=e[r][0],g=d?f/d:0;if(s)if(d){var v=g<0?0:g,y=d,m=a*Math.sqrt(v*(1-v)/y+a*a/(4*y*y)),b=1+a*a/d;u=(v+a*a/(2*d)-m)/b,l=(v+a*a/(2*d)+m)/b,o[r]=[p,v*h,[u*h,l*h]]}else o[r]=[p,0,[0,0]];else c=d?a*Math.sqrt(g*(1-g)/d):1,o[r]=[p,h*g,[h*(g-c),h*(g+c)]]}return o};const M=k;var P=function(){this.annotations_=[]};P.prototype.toString=function(){return"Annotations Plugin"},P.prototype.activate=function(e){return{clearChart:this.clearChart,didDrawChart:this.didDrawChart}},P.prototype.detachLabels=function(){for(var e=0;ei.x+i.w||u.canvasyi.y+i.h)){var l=u.annotation,c=6;l.hasOwnProperty("tickHeight")&&(c=l.tickHeight);var f=document.createElement("div");f.style.fontSize=t.getOption("axisLabelFontSize")+"px";var d="dygraph-annotation";l.hasOwnProperty("icon")||(d+=" dygraphDefaultAnnotation dygraph-default-annotation"),l.hasOwnProperty("cssClass")&&(d+=" "+l.cssClass),f.className=d;var h=l.hasOwnProperty("width")?l.width:16,p=l.hasOwnProperty("height")?l.height:16;if(l.hasOwnProperty("icon")){var g=document.createElement("img");g.src=l.icon,g.width=h,g.height=p,f.appendChild(g)}else u.annotation.hasOwnProperty("shortText")&&f.appendChild(document.createTextNode(u.annotation.shortText));var v=u.canvasx-h/2;f.style.left=v+"px";var y=0;if(l.attachAtBottom){var m=i.y+i.h-p-c;a[v]?m-=a[v]:a[v]=0,a[v]+=c+p,y=m}else y=u.canvasy-p-c;f.style.top=y+"px",f.style.width=h+"px",f.style.height=p+"px",f.title=u.annotation.text,f.style.color=t.colorsMap_[u.name],f.style.borderColor=t.colorsMap_[u.name],l.div=f,t.addAndTrackEvent(f,"click",o("clickHandler","annotationClickHandler",u)),t.addAndTrackEvent(f,"mouseover",o("mouseOverHandler","annotationMouseOverHandler",u)),t.addAndTrackEvent(f,"mouseout",o("mouseOutHandler","annotationMouseOutHandler",u)),t.addAndTrackEvent(f,"dblclick",o("dblClickHandler","annotationDblClickHandler",u)),r.appendChild(f),this.annotations_.push(f);var b=e.drawingContext;if(b.save(),b.strokeStyle=l.hasOwnProperty("tickColor")?l.tickColor:t.colorsMap_[u.name],b.lineWidth=l.hasOwnProperty("tickWidth")?l.tickWidth:t.getOption("strokeWidth"),b.beginPath(),l.attachAtBottom){m=y+p;b.moveTo(u.canvasx,m),b.lineTo(u.canvasx,m+c)}else b.moveTo(u.canvasx,u.canvasy),b.lineTo(u.canvasx,u.canvasy-2-c);b.closePath(),b.stroke(),b.restore()}}},P.prototype.destroy=function(){this.detachLabels()};const I=P;var N=function(){this.xlabels_=[],this.ylabels_=[]};N.prototype.toString=function(){return"Axes Plugin"},N.prototype.activate=function(e){return{layout:this.layout,clearChart:this.clearChart,willDrawChart:this.willDrawChart}},N.prototype.layout=function(e){var t,n=e.dygraph;if(n.getOptionForAxis("drawAxis","y")){var r=n.getOptionForAxis("axisLabelWidth","y")+2*n.getOptionForAxis("axisTickSize","y");e.reserveSpaceLeft(r)}n.getOptionForAxis("drawAxis","x")&&(t=n.getOption("xAxisHeight")?n.getOption("xAxisHeight"):n.getOptionForAxis("axisLabelFontSize","x")+2*n.getOptionForAxis("axisTickSize","x"),e.reserveSpaceBottom(t));if(2==n.numAxes()){if(n.getOptionForAxis("drawAxis","y2")){r=n.getOptionForAxis("axisLabelWidth","y2")+2*n.getOptionForAxis("axisTickSize","y2");e.reserveSpaceRight(r)}}else n.numAxes()>2&&n.error("Only two y-axes are supported at this time. (Trying to use "+n.numAxes()+")")},N.prototype.detachLabels=function(){function e(e){for(var t=0;t0){var b=t.numAxes(),_=[y("y"),y("y2")];g.yticks.forEach((function(e){if(void 0!==e.label){a=v.x;var t="y1",n=_[0];if(1==e.axis&&(a=v.x+v.w,-1,t="y2",n=_[1]),n("drawAxis")){var r=n("axisLabelFontSize");s=v.y+e.pos*v.h,i=p(e.label,"y",2==b?t:null);var o=s-r/2;o<0&&(o=0),o+r+3>f?i.style.bottom="0":i.style.top=Math.min(o,f-2*r)+"px",0===e.axis?(i.style.left=v.x-n("axisLabelWidth")-n("axisTickSize")+"px",i.style.textAlign="right"):1==e.axis&&(i.style.left=v.x+v.w+n("axisTickSize")+"px",i.style.textAlign="left"),i.style.width=n("axisLabelWidth")+"px",l.appendChild(i),m.ylabels_.push(i)}}}))}var w;if(t.getOption("drawAxesAtZero"))((A=t.toPercentXCoord(0))>1||A<0||isNaN(A))&&(A=0),w=n(v.x+A*v.w);else w=n(v.x);u.strokeStyle=t.getOptionForAxis("axisLineColor","y"),u.lineWidth=t.getOptionForAxis("axisLineWidth","y"),u.beginPath(),u.moveTo(w,o(v.y)),u.lineTo(w,o(v.y+v.h)),u.closePath(),u.stroke(),2==t.numAxes()&&t.getOptionForAxis("drawAxis","y2")&&(u.strokeStyle=t.getOptionForAxis("axisLineColor","y2"),u.lineWidth=t.getOptionForAxis("axisLineWidth","y2"),u.beginPath(),u.moveTo(o(v.x+v.w),o(v.y)),u.lineTo(o(v.x+v.w),o(v.y+v.h)),u.closePath(),u.stroke())}if(t.getOptionForAxis("drawAxis","x")){if(g.xticks){var x=y("x");g.xticks.forEach((function(e){if(void 0!==e.label){a=v.x+e.pos*v.w,s=v.y+v.h,(i=p(e.label,"x")).style.textAlign="center",i.style.top=s+x("axisTickSize")+"px";var t=a-x("axisLabelWidth")/2;t+x("axisLabelWidth")>c&&(t=c-x("axisLabelWidth"),i.style.textAlign="right"),t<0&&(t=0,i.style.textAlign="left"),i.style.left=t+"px",i.style.width=x("axisLabelWidth")+"px",l.appendChild(i),m.xlabels_.push(i)}}))}var S,A;if(u.strokeStyle=t.getOptionForAxis("axisLineColor","x"),u.lineWidth=t.getOptionForAxis("axisLineWidth","x"),u.beginPath(),t.getOption("drawAxesAtZero"))((A=t.toPercentYCoord(0,0))>1||A<0)&&(A=1),S=o(v.y+A*v.h);else S=o(v.y+v.h);u.moveTo(n(v.x),S),u.lineTo(n(v.x+v.w),S),u.closePath(),u.stroke()}u.restore()};const D=N;var L=function(){this.title_div_=null,this.xlabel_div_=null,this.ylabel_div_=null,this.y2label_div_=null};L.prototype.toString=function(){return"ChartLabels Plugin"},L.prototype.activate=function(e){return{layout:this.layout,didDrawChart:this.didDrawChart}};var F=function(e){var t=document.createElement("div");return t.style.position="absolute",t.style.left=e.x+"px",t.style.top=e.y+"px",t.style.width=e.w+"px",t.style.height=e.h+"px",t};L.prototype.detachLabels_=function(){for(var e=[this.title_div_,this.xlabel_div_,this.ylabel_div_,this.y2label_div_],t=0;t=2);r=a.yticks,i.save(),r.forEach((e=>{if(e.has_tick){var r=e.axis;h[r]&&(i.save(),p[r]&&i.setLineDash&&i.setLineDash(g[r]),i.strokeStyle=f[r],i.lineWidth=d[r],t=u(s.x),n=l(s.y+e.pos*s.h),i.beginPath(),i.moveTo(t,n),i.lineTo(t+s.w,n),i.stroke(),i.restore())}})),i.restore()}if(o.getOptionForAxis("drawGrid","x")){r=a.xticks,i.save();g=o.getOptionForAxis("gridLinePattern","x");(p=g&&g.length>=2)&&i.setLineDash&&i.setLineDash(g),i.strokeStyle=o.getOptionForAxis("gridLineColor","x"),i.lineWidth=o.getOptionForAxis("gridLineWidth","x"),r.forEach((e=>{e.has_tick&&(t=u(s.x+e.pos*s.w),n=l(s.y+s.h),i.beginPath(),i.moveTo(t,n),i.lineTo(t,s.y),i.stroke())})),p&&i.setLineDash&&i.setLineDash([]),i.restore()}},z.prototype.destroy=function(){};const B=z;var V=function(){this.legend_div_=null,this.is_generated_div_=!1};V.prototype.toString=function(){return"Legend Plugin"},V.prototype.activate=function(e){var t,n=e.getOption("labelsDiv");return n&&null!==n?t="string"==typeof n||n instanceof String?document.getElementById(n):n:((t=document.createElement("div")).className="dygraph-legend",e.graphDiv.appendChild(t),this.is_generated_div_=!0),this.legend_div_=t,this.one_em_width_=10,{select:this.select,deselect:this.deselect,predraw:this.predraw,didDrawChart:this.didDrawChart}};function $(e,t,n){if(!e||e.length<=1)return`
    `;var r,o,i,a,s=0,u=0,l=[];for(r=0;r<=e.length;r++)s+=e[r%e.length];if((a=Math.floor(n/(s-e[0])))>1){for(r=0;r`;return c}V.prototype.select=function(e){var t=e.selectedX,n=e.selectedPoints,r=e.selectedRow,o=e.dygraph.getOption("legend");if("never"!==o){var i=V.generateLegendHTML(e.dygraph,t,n,this.one_em_width_,r);if(i instanceof Node&&i.nodeType===Node.DOCUMENT_FRAGMENT_NODE?(this.legend_div_.innerHTML="",this.legend_div_.appendChild(i)):this.legend_div_.innerHTML=i,this.legend_div_.style.display="","follow"===o){var a,s=e.dygraph.plotter_.area,u=this.legend_div_.offsetWidth,l=e.dygraph.getOptionForAxis("axisLabelWidth","y"),c=e.dygraph.getHighlightSeries();c&&(a=n.find((e=>e.name===c)))||(a=n[0]);const t=e.dygraph.getNumericOption("legendFollowOffsetX"),r=e.dygraph.getNumericOption("legendFollowOffsetY");var f=a.x*s.w+t,d=a.y*s.h+r;f+u+1>s.w&&(f=f-2*t-u-(l-s.x)),this.legend_div_.style.left=l+f+"px",this.legend_div_.style.top=d+"px"}else if("onmouseover"===o&&this.is_generated_div_){s=e.dygraph.plotter_.area,u=this.legend_div_.offsetWidth;this.legend_div_.style.left=s.x+s.w-u-1+"px",this.legend_div_.style.top=s.y+"px"}}else this.legend_div_.style.display="none"},V.prototype.deselect=function(e){"always"!==e.dygraph.getOption("legend")&&(this.legend_div_.style.display="none");var t=function(e){var t=document.createElement("span");t.setAttribute("style","margin: 0; padding: 0 0 0 1em; border: 0;"),e.appendChild(t);var n=t.offsetWidth;return e.removeChild(t),n}(this.legend_div_);this.one_em_width_=t;var n=V.generateLegendHTML(e.dygraph,void 0,void 0,t,null);n instanceof Node&&n.nodeType===Node.DOCUMENT_FRAGMENT_NODE?(this.legend_div_.innerHTML="",this.legend_div_.appendChild(n)):this.legend_div_.innerHTML=n},V.prototype.didDrawChart=function(e){this.deselect(e)},V.prototype.predraw=function(e){if(this.is_generated_div_){e.dygraph.graphDiv.appendChild(this.legend_div_);var t=e.dygraph.plotter_.area,n=this.legend_div_.offsetWidth;this.legend_div_.style.left=t.x+t.w-n-1+"px",this.legend_div_.style.top=t.y+"px"}},V.prototype.destroy=function(){this.legend_div_=null},V.generateLegendHTML=function(e,t,n,o,i){var a,s={dygraph:e,x:t,i:i,series:[]},u={},l=e.getLabels();if(l)for(var c=1;c/g,">")),isVisible:f.visible,color:f.color};s.series.push(d),u[l[c]]=d}if("undefined"!==typeof t){var h=e.optionsViewForAxis_("x"),p=h("valueFormatter");s.xHTML=p.call(e,t,h,l[0],e,i,0);var g=[],v=e.numAxes();for(c=0;c":" "),n+=`${i.dashHTML} ${i.labelHTML}`)}return n}n=e.xHTML+":";for(o=0;o"),n+=` ${i.labelHTML}: ${i.yHTML}`}return n};const H=V;var W=function(){this.hasTouchInterface_="undefined"!=typeof TouchEvent,this.isMobileDevice_=/mobile|android/gi.test(navigator.appVersion),this.interfaceCreated_=!1};W.prototype.toString=function(){return"RangeSelector Plugin"},W.prototype.activate=function(e){return this.dygraph_=e,this.getOption_("showRangeSelector")&&this.createInterface_(),{layout:this.reserveSpace_,predraw:this.renderStaticLayer_,didDrawChart:this.renderInteractiveLayer_}},W.prototype.destroy=function(){this.bgcanvas_=null,this.fgcanvas_=null,this.leftZoomHandle_=null,this.rightZoomHandle_=null},W.prototype.getOption_=function(e,t){return this.dygraph_.getOption(e,t)},W.prototype.setDefaultOption_=function(e,t){this.dygraph_.attrs_[e]=t},W.prototype.createInterface_=function(){this.createCanvases_(),this.createZoomHandles_(),this.initInteraction_(),this.getOption_("animatedZooms")&&(console.warn("Animated zooms and range selector are not compatible; disabling animatedZooms."),this.dygraph_.updateOptions({animatedZooms:!1},!0)),this.interfaceCreated_=!0,this.addToGraph_()},W.prototype.addToGraph_=function(){var e=this.graphDiv_=this.dygraph_.graphDiv;e.appendChild(this.bgcanvas_),e.appendChild(this.fgcanvas_),e.appendChild(this.leftZoomHandle_),e.appendChild(this.rightZoomHandle_)},W.prototype.removeFromGraph_=function(){var e=this.graphDiv_;e.removeChild(this.bgcanvas_),e.removeChild(this.fgcanvas_),e.removeChild(this.leftZoomHandle_),e.removeChild(this.rightZoomHandle_),this.graphDiv_=null},W.prototype.reserveSpace_=function(e){this.getOption_("showRangeSelector")&&e.reserveSpaceBottom(this.getOption_("rangeSelectorHeight")+4)},W.prototype.renderStaticLayer_=function(){this.updateVisibility_()&&(this.resize_(),this.drawStaticLayer_())},W.prototype.renderInteractiveLayer_=function(){this.updateVisibility_()&&!this.isChangingRange_&&(this.placeZoomHandles_(),this.drawInteractiveLayer_())},W.prototype.updateVisibility_=function(){var e=this.getOption_("showRangeSelector");if(e)this.interfaceCreated_?this.graphDiv_&&this.graphDiv_.parentNode||this.addToGraph_():this.createInterface_();else if(this.graphDiv_){this.removeFromGraph_();var t=this.dygraph_;setTimeout((function(){t.width_=0,t.resize()}),1)}return e},W.prototype.resize_=function(){function e(e,t,n,o){var i=o||r.getContextPixelRatio(t);e.style.top=n.y+"px",e.style.left=n.x+"px",e.width=n.w*i,e.height=n.h*i,e.style.width=n.w+"px",e.style.height=n.h+"px",1!=i&&t.scale(i,i)}var t=this.dygraph_.layout_.getPlotArea(),n=0;this.dygraph_.getOptionForAxis("drawAxis","x")&&(n=this.getOption_("xAxisHeight")||this.getOption_("axisLabelFontSize")+2*this.getOption_("axisTickSize")),this.canvasRect_={x:t.x,y:t.y+t.h+n+4,w:t.w,h:this.getOption_("rangeSelectorHeight")};var o=this.dygraph_.getNumericOption("pixelRatio");e(this.bgcanvas_,this.bgcanvas_ctx_,this.canvasRect_,o),e(this.fgcanvas_,this.fgcanvas_ctx_,this.canvasRect_,o)},W.prototype.createCanvases_=function(){this.bgcanvas_=r.createCanvas(),this.bgcanvas_.className="dygraph-rangesel-bgcanvas",this.bgcanvas_.style.position="absolute",this.bgcanvas_.style.zIndex=9,this.bgcanvas_ctx_=r.getContext(this.bgcanvas_),this.fgcanvas_=r.createCanvas(),this.fgcanvas_.className="dygraph-rangesel-fgcanvas",this.fgcanvas_.style.position="absolute",this.fgcanvas_.style.zIndex=9,this.fgcanvas_.style.cursor="default",this.fgcanvas_ctx_=r.getContext(this.fgcanvas_)},W.prototype.createZoomHandles_=function(){var e=new Image;e.className="dygraph-rangesel-zoomhandle",e.style.position="absolute",e.style.zIndex=10,e.style.visibility="hidden",e.style.cursor="col-resize",e.width=9,e.height=16,e.src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAQCAYAAADESFVDAAAAAXNSR0IArs4c6QAAAAZiS0dEANAAzwDP4Z7KegAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAd0SU1FB9sHGw0cMqdt1UwAAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAAaElEQVQoz+3SsRFAQBCF4Z9WJM8KCDVwownl6YXsTmCUsyKGkZzcl7zkz3YLkypgAnreFmDEpHkIwVOMfpdi9CEEN2nGpFdwD03yEqDtOgCaun7sqSTDH32I1pQA2Pb9sZecAxc5r3IAb21d6878xsAAAAAASUVORK5CYII=",this.isMobileDevice_&&(e.width*=2,e.height*=2),this.leftZoomHandle_=e,this.rightZoomHandle_=e.cloneNode(!1)},W.prototype.initInteraction_=function(){var e,t,n,o,i,a,s,u,l,c,d,h,p,g,y=this,m=document,b=0,_=null,w=!1,x=!1,S=!this.isMobileDevice_,A=new v;e=function(e){var t=y.dygraph_.xAxisExtremes(),n=(t[1]-t[0])/y.canvasRect_.w;return[t[0]+(e.leftHandlePos-y.canvasRect_.x)*n,t[0]+(e.rightHandlePos-y.canvasRect_.x)*n]},t=function(e){return r.cancelEvent(e),w=!0,b=e.clientX,_=e.target?e.target:e.srcElement,"mousedown"!==e.type&&"dragstart"!==e.type||(r.addEvent(m,"mousemove",n),r.addEvent(m,"mouseup",o)),y.fgcanvas_.style.cursor="col-resize",A.cover(),!0},n=function(e){if(!w)return!1;r.cancelEvent(e);var t=e.clientX-b;if(Math.abs(t)<4)return!0;b=e.clientX;var n,o=y.getZoomHandleStatus_();_==y.leftZoomHandle_?(n=o.leftHandlePos+t,n=Math.min(n,o.rightHandlePos-_.width-3),n=Math.max(n,y.canvasRect_.x)):(n=o.rightHandlePos+t,n=Math.min(n,y.canvasRect_.x+y.canvasRect_.w),n=Math.max(n,o.leftHandlePos+_.width+3));var a=_.width/2;return _.style.left=n-a+"px",y.drawInteractiveLayer_(),S&&i(),!0},o=function(e){return!!w&&(w=!1,A.uncover(),r.removeEvent(m,"mousemove",n),r.removeEvent(m,"mouseup",o),y.fgcanvas_.style.cursor="default",S||i(),!0)},i=function(){try{var t=y.getZoomHandleStatus_();if(y.isChangingRange_=!0,t.isZoomed){var n=e(t);y.dygraph_.doZoomXDates_(n[0],n[1])}else y.dygraph_.resetZoom()}finally{y.isChangingRange_=!1}},a=function(e){var t=y.leftZoomHandle_.getBoundingClientRect(),n=t.left+t.width/2,r=(t=y.rightZoomHandle_.getBoundingClientRect()).left+t.width/2;return e.clientX>n&&e.clientX=y.canvasRect_.x+y.canvasRect_.w?o=(i=y.canvasRect_.x+y.canvasRect_.w)-a:(o+=t,i+=t);var s=y.leftZoomHandle_.width/2;return y.leftZoomHandle_.style.left=o-s+"px",y.rightZoomHandle_.style.left=i-s+"px",y.drawInteractiveLayer_(),S&&c(),!0},l=function(e){return!!x&&(x=!1,r.removeEvent(m,"mousemove",u),r.removeEvent(m,"mouseup",l),S||c(),!0)},c=function(){try{y.isChangingRange_=!0,y.dygraph_.dateWindow_=e(y.getZoomHandleStatus_()),y.dygraph_.drawGraph_(!1)}finally{y.isChangingRange_=!1}},d=function(e){if(!w&&!x){var t=a(e)?"move":"default";t!=y.fgcanvas_.style.cursor&&(y.fgcanvas_.style.cursor=t)}},h=function(e){"touchstart"==e.type&&1==e.targetTouches.length?t(e.targetTouches[0])&&r.cancelEvent(e):"touchmove"==e.type&&1==e.targetTouches.length?n(e.targetTouches[0])&&r.cancelEvent(e):o(e)},p=function(e){"touchstart"==e.type&&1==e.targetTouches.length?s(e.targetTouches[0])&&r.cancelEvent(e):"touchmove"==e.type&&1==e.targetTouches.length?u(e.targetTouches[0])&&r.cancelEvent(e):l(e)},g=function(e,t){for(var n=["touchstart","touchend","touchmove","touchcancel"],r=0;r1&&(p=d.rollingAverage(p,t.rollPeriod(),h,e)),f.push(p)}var g=[];for(e=0;e0)&&(_=Math.min(_,x),w=Math.max(w,x))}var S=.25;if(n)for(w=r.log10(w),w+=w*S,_=r.log10(_),e=0;ethis.canvasRect_.x||n+1=0;i--){var a=o[i][0];if(o[i][1].call(a,n),n.propagationStopped)break}return n.defaultPrevented},X.prototype.getPluginInstance_=function(e){for(var t=0;t!!e.valueRange)).indexOf(!0)>=0;if(null===e||void 0===e)return t||n;if("y"===e)return n;throw new Error(`axis parameter is [${e}] must be null, 'x' or 'y'.`)},X.prototype.toString=function(){var e=this.maindiv_;return"[Dygraph "+(e&&e.id?e.id:e)+"]"},X.prototype.attr_=function(e,t){return t?this.attributes_.getForSeries(e,t):this.attributes_.get(e)},X.prototype.getOption=function(e,t){return this.attr_(e,t)},X.prototype.getNumericOption=function(e,t){return this.getOption(e,t)},X.prototype.getStringOption=function(e,t){return this.getOption(e,t)},X.prototype.getBooleanOption=function(e,t){return this.getOption(e,t)},X.prototype.getFunctionOption=function(e,t){return this.getOption(e,t)},X.prototype.getOptionForAxis=function(e,t){return this.attributes_.getForAxis(e,t)},X.prototype.optionsViewForAxis_=function(e){var t=this;return function(n){var r=t.user_attrs_.axes;return r&&r[e]&&r[e].hasOwnProperty(n)?r[e][n]:("x"!==e||"logscale"!==n)&&("undefined"!=typeof t.user_attrs_[n]?t.user_attrs_[n]:(r=t.attrs_.axes)&&r[e]&&r[e].hasOwnProperty(n)?r[e][n]:"y"==e&&t.axes_[0].hasOwnProperty(n)?t.axes_[0][n]:"y2"==e&&t.axes_[1].hasOwnProperty(n)?t.axes_[1][n]:t.attr_(n))}},X.prototype.rollPeriod=function(){return this.rollPeriod_},X.prototype.xAxisRange=function(){return this.dateWindow_?this.dateWindow_:this.xAxisExtremes()},X.prototype.xAxisExtremes=function(){var e=this.getNumericOption("xRangePad")/this.plotter_.area.w;if(0===this.numRows())return[0-e,1+e];var t=this.rawData_[0][0],n=this.rawData_[this.rawData_.length-1][0];if(e){var r=n-t;t-=r*e,n+=r*e}return[t,n]},X.prototype.yAxisExtremes=function(){const e=this.gatherDatasets_(this.rolledSeries_,null),{extremes:t}=e,n=this.axes_;this.computeYAxisRanges_(t);const r=this.axes_;return this.axes_=n,r.map((e=>e.extremeRange))},X.prototype.yAxisRange=function(e){if("undefined"==typeof e&&(e=0),e<0||e>=this.axes_.length)return null;var t=this.axes_[e];return[t.computedValueRange[0],t.computedValueRange[1]]},X.prototype.yAxisRanges=function(){for(var e=[],t=0;t=this.rawData_.length||t<0||t>=this.rawData_[e].length?null:this.rawData_[e][t]},X.prototype.createInterface_=function(){var e=this.maindiv_;this.graphDiv=document.createElement("div"),this.graphDiv.style.textAlign="left",this.graphDiv.style.position="relative",e.appendChild(this.graphDiv),this.canvas_=r.createCanvas(),this.canvas_.style.position="absolute",this.canvas_.style.top=0,this.canvas_.style.left=0,this.hidden_=this.createPlotKitCanvas_(this.canvas_),this.canvas_ctx_=r.getContext(this.canvas_),this.hidden_ctx_=r.getContext(this.hidden_),this.resizeElements_(),this.graphDiv.appendChild(this.hidden_),this.graphDiv.appendChild(this.canvas_),this.mouseEventElement_=this.createMouseEventElement_(),this.layout_=new i(this);var t=this;if(this.mouseMoveHandler_=function(e){t.mouseMove_(e)},this.mouseOutHandler_=function(e){var n=e.target||e.fromElement,o=e.relatedTarget||e.toElement;r.isNodeContainedBy(n,t.graphDiv)&&!r.isNodeContainedBy(o,t.graphDiv)&&t.mouseOut_(e)},this.addAndTrackEvent(window,"mouseout",this.mouseOutHandler_),this.addAndTrackEvent(this.mouseEventElement_,"mousemove",this.mouseMoveHandler_),!this.resizeHandler_){this.resizeHandler_=function(e){t.resize()},this.addAndTrackEvent(window,"resize",this.resizeHandler_),this.resizeObserver_=null;var n=this.getStringOption("resizable");if("undefined"===typeof ResizeObserver&&"no"!==n&&(console.error("ResizeObserver unavailable; ignoring resizable property"),n="no"),"horizontal"===n||"vertical"===n||"both"===n?e.style.resize=n:"passive"!==n&&(n="no"),"no"!==n){window.getComputedStyle(e).overflow;"visible"===window.getComputedStyle(e).overflow&&(e.style.overflow="hidden"),this.resizeObserver_=new ResizeObserver(this.resizeHandler_),this.resizeObserver_.observe(e)}}},X.prototype.resizeElements_=function(){this.graphDiv.style.width=this.width_+"px",this.graphDiv.style.height=this.height_+"px";var e=this.getNumericOption("pixelRatio"),t=e||r.getContextPixelRatio(this.canvas_ctx_);this.canvas_.width=this.width_*t,this.canvas_.height=this.height_*t,this.canvas_.style.width=this.width_+"px",this.canvas_.style.height=this.height_+"px",1!==t&&this.canvas_ctx_.scale(t,t);var n=e||r.getContextPixelRatio(this.hidden_ctx_);this.hidden_.width=this.width_*n,this.hidden_.height=this.height_*n,this.hidden_.style.width=this.width_+"px",this.hidden_.style.height=this.height_+"px",1!==n&&this.hidden_ctx_.scale(n,n)},X.prototype.destroy=function(){this.canvas_ctx_.restore(),this.hidden_ctx_.restore();for(var e=this.plugins_.length-1;e>=0;e--){var t=this.plugins_.pop();t.plugin.destroy&&t.plugin.destroy()}var n=function(e){for(;e.hasChildNodes();)n(e.firstChild),e.removeChild(e.firstChild)};this.removeTrackedEvents_(),r.removeEvent(window,"mouseout",this.mouseOutHandler_),r.removeEvent(this.mouseEventElement_,"mousemove",this.mouseMoveHandler_),this.resizeObserver_&&(this.resizeObserver_.disconnect(),this.resizeObserver_=null),r.removeEvent(window,"resize",this.resizeHandler_),this.resizeHandler_=null,n(this.maindiv_);var o=function(e){for(var t in e)"object"===typeof e[t]&&(e[t]=null)};o(this.layout_),o(this.plotter_),o(this)},X.prototype.createPlotKitCanvas_=function(e){var t=r.createCanvas();return t.style.position="absolute",t.style.top=e.style.top,t.style.left=e.style.left,t.width=this.width_,t.height=this.height_,t.style.width=this.width_+"px",t.style.height=this.height_+"px",t},X.prototype.createMouseEventElement_=function(){return this.canvas_},X.prototype.setColors_=function(){var e=this.getLabels(),t=e.length-1;this.colors_=[],this.colorsMap_={};for(var n=this.getNumericOption("colorSaturation")||1,o=this.getNumericOption("colorValue")||.5,i=Math.ceil(t/2),a=this.getOption("colors"),s=this.visibility(),u=0;u{e.valueRange&&delete e.valueRange})),this.drawGraph_(),void(a&&a.call(this,r,o,this.yAxisRanges()));var s=null,u=null,l=null,c=null;e&&(s=this.xAxisRange(),u=[r,o]),t&&(l=this.yAxisRanges(),c=this.yAxisExtremes());const f=this;this.doAnimatedZoom(s,u,l,c,(function(){f.dateWindow_=null,f.axes_.forEach((e=>{e.valueRange&&delete e.valueRange})),a&&a.call(f,r,o,f.yAxisRanges())}))},X.prototype.doAnimatedZoom=function(e,t,n,o,i){var a,s,u=this.getBooleanOption("animatedZooms")?X.ANIMATION_STEPS:1,l=[],c=[];if(null!==e&&null!==t)for(a=1;a<=u;a++)s=X.zoomAnimationFunction(a,u),l[a-1]=[e[0]*(1-s)+s*t[0],e[1]*(1-s)+s*t[1]];if(null!==n&&null!==o)for(a=1;a<=u;a++){s=X.zoomAnimationFunction(a,u);for(var f=[],d=0;d=0;--f)for(var d=this.layout_.points[f],h=0;h=u.length)){var l=u[s];if(r.isValidPoint(l)){var c=l.canvasy;if(e>l.canvasx&&s+10)c+=(e-l.canvasx)/d*(f.canvasy-l.canvasy)}else if(e0){var d,h=u[s-1];if(r.isValidPoint(h))if((d=l.canvasx-h.canvasx)>0)c+=(l.canvasx-e)/d*(h.canvasy-l.canvasy)}(0===a||c=0){var a=0,s=this.attr_("labels");for(t=1;ta&&(a=u)}var l=this.previousVerticalX_;n.clearRect(l-a-1,0,2*a+2,this.height_)}if(this.selPoints_.length>0){var c=this.selPoints_[0].canvasx;for(n.save(),t=0;t=0){e!=this.lastRow_&&(o=!0),this.lastRow_=e;for(var i=0;i=0&&s=0&&(o=!0),this.lastRow_=-1;if(this.selPoints_.length?this.lastx_=this.selPoints_[0].xval:this.lastx_=null,void 0!==t&&(this.highlightSet_!==t&&(o=!0),this.highlightSet_=t),void 0!==n&&(this.lockedSet_=n),o&&(this.updateSelection_(void 0),r)){var c=this.getFunctionOption("highlightCallback");if(c){c.call(this,{},this.lastx_,this.selPoints_,this.lastRow_,this.highlightSet_)}}return o},X.prototype.mouseOut_=function(e){this.getFunctionOption("unhighlightCallback")&&this.getFunctionOption("unhighlightCallback").call(this,e),this.getBooleanOption("hideOverlayOnMouseOut")&&!this.lockedSet_&&this.clearSelection()},X.prototype.clearSelection=function(){this.cascadeEvents_("deselect",{}),this.lockedSet_=!1,this.fadeLevel?this.animateSelection_(-1):(this.canvas_ctx_.clearRect(0,0,this.width_,this.height_),this.fadeLevel=0,this.selPoints_=[],this.lastx_=null,this.lastRow_=-1,this.highlightSet_=null)},X.prototype.getSelection=function(){if(!this.selPoints_||this.selPoints_.length<1)return-1;for(var e=0;e1&&(n=this.dataHandler_.rollingAverage(n,this.rollPeriod_,this.attributes_,t)),this.rolledSeries_.push(n)}this.drawGraph_();var r=new Date;this.drawingTimeMs_=r-e},X.PointType=void 0,X.stackPoints_=function(e,t,n,r){for(var o=null,i=null,a=null,s=-1,u=function(t){if(!(s>=t))for(var n=t;nn[1]&&(n[1]=h),h=1;n--)if(this.visibility()[n-1]){if(t){s=e[n];var d=t[0],h=t[1];for(o=null,i=null,r=0;r=d&&null===o&&(o=r),s[r][0]<=h&&(i=r);null===o&&(o=0);for(var p=o,g=!0;g&&p>0;)g=null===s[--p][1];null===i&&(i=s.length-1);var v=i;for(g=!0;g&&v0;){this.readyFns_.pop()(this)}},X.prototype.computeYAxes_=function(){var e,t,n;for(this.axes_=[],e=0;e0&&(v=0),y<0&&(y=0)),v==1/0&&(v=0),y==-1/0&&(y=1),0===(n=y-v)&&(0!==y?n=Math.abs(y):(y=1,n=1));var b=y,_=v;t&&(f?(b=y+i*n,_=v):((_=v-i*n)<0&&v>=0&&(_=0),(b=y+i*n)>0&&y<=0&&(b=0))),c.extremeRange=[_,b]}if(c.valueRange){var w=s(c.valueRange[0])?c.extremeRange[0]:c.valueRange[0],x=s(c.valueRange[1])?c.extremeRange[1]:c.valueRange[1];c.computedValueRange=[w,x]}else c.computedValueRange=c.extremeRange;if(!t){if((w=c.computedValueRange[0])===(x=c.computedValueRange[1]))if(0===w)x=1;else{var S=Math.abs(w/10);w-=S,x+=S}if(f){var A=i/(2*i-1),E=(i-1)/(2*i-1);c.computedValueRange[0]=r.logRangeFraction(w,x,A),c.computedValueRange[1]=r.logRangeFraction(w,x,E)}else n=x-w,c.computedValueRange[0]=w-n*i,c.computedValueRange[1]=x+n*i}if(h){c.independentTicks=h;var C=(R=this.optionsViewForAxis_("y"+(l?"2":"")))("ticker");c.ticks=C(c.computedValueRange[0],c.computedValueRange[1],this.plotter_.area.h,R,this),a||(a=c)}}if(void 0===a)throw'Configuration Error: At least one axis has to have the "independentTicks" option activated.';for(l=0;l0&&"e"!=e[n-1]&&"E"!=e[n-1]||e.indexOf("/")>=0||isNaN(parseFloat(e)))&&(t=!0),this.setXAxisOptions_(t)},X.prototype.setXAxisOptions_=function(e){e?(this.attrs_.xValueParser=r.dateParser,this.attrs_.axes.x.valueFormatter=r.dateValueFormatter,this.attrs_.axes.x.ticker=u.fi,this.attrs_.axes.x.axisLabelFormatter=r.dateAxisLabelFormatter):(this.attrs_.xValueParser=function(e){return parseFloat(e)},this.attrs_.axes.x.valueFormatter=function(e){return e},this.attrs_.axes.x.ticker=u.GH,this.attrs_.axes.x.axisLabelFormatter=this.attrs_.axes.x.valueFormatter)},X.prototype.parseCSV_=function(e){var t,n,o=[],i=r.detectLineDelimiter(e),a=e.split(i||"\n"),s=this.getStringOption("delimiter");-1==a[0].indexOf(s)&&a[0].indexOf("\t")>=0&&(s="\t");var u=0;"labels"in this.user_attrs_||(u=1,this.attrs_.labels=a[0].split(s),this.attributes_.reparseSeries());for(var l,c=!1,f=this.attr_("labels").length,d=!1,h=u;h0&&v[0]0;)t=String.fromCharCode(65+(e-1)%26)+t.toLowerCase(),e=Math.floor((e-1)/26);return t},n=e.getNumberOfColumns(),o=e.getNumberOfRows(),i=e.getColumnType(0);if("date"==i||"datetime"==i)this.attrs_.xValueParser=r.dateParser,this.attrs_.axes.x.valueFormatter=r.dateValueFormatter,this.attrs_.axes.x.ticker=u.fi,this.attrs_.axes.x.axisLabelFormatter=r.dateAxisLabelFormatter;else{if("number"!=i)throw new Error("only 'date', 'datetime' and 'number' types are supported for column 1 of DataTable input (Got '"+i+"')");this.attrs_.xValueParser=function(e){return parseFloat(e)},this.attrs_.axes.x.valueFormatter=function(e){return e},this.attrs_.axes.x.ticker=u.GH,this.attrs_.axes.x.axisLabelFormatter=this.attrs_.axes.x.valueFormatter}var a,s,l=[],c={},f=!1;for(a=1;a0&&m[0]0&&this.setAnnotations(y,!0),this.attributes_.reparseSeries()},X.prototype.cascadeDataDidUpdateEvent_=function(){this.cascadeEvents_("dataDidUpdate",{})},X.prototype.start_=function(){var e=this.file_;"function"==typeof e&&(e=e());const t=r.typeArrayLike(e);if("array"==t)this.rawData_=this.parseArray_(e),this.cascadeDataDidUpdateEvent_(),this.predraw_();else if("object"==t&&"function"==typeof e.getColumnRange)this.parseDataTable_(e),this.cascadeDataDidUpdateEvent_(),this.predraw_();else if("string"==t){if(r.detectLineDelimiter(e))this.loadedEvent_(e);else{var n;n=window.XMLHttpRequest?new XMLHttpRequest:new ActiveXObject("Microsoft.XMLHTTP");var o=this;n.onreadystatechange=function(){4==n.readyState&&(200!==n.status&&0!==n.status||o.loadedEvent_(n.responseText))},n.open("GET",e,!0),n.send(null)}}else console.error("Unknown data format: "+t)},X.prototype.updateOptions=function(e,t){"undefined"==typeof t&&(t=!1);var n=e.file,o=X.copyUserAttrs_(e),i=this.attributes_.numAxes();"rollPeriod"in o&&(this.rollPeriod_=o.rollPeriod),"dateWindow"in o&&(this.dateWindow_=o.dateWindow);var a=r.isPixelChangingOptionList(this.attr_("labels"),o);r.updateDeep(this.user_attrs_,o),this.attributes_.reparseSeries(),i=n.length?console.warn("Invalid series number in setVisibility: "+o):n[o]=e[o]);else for(o=0;o=n.length?console.warn("Invalid series number in setVisibility: "+o):n[o]=e[o]:e[o]<0||e[o]>=n.length?console.warn("Invalid series number in setVisibility: "+e[o]):n[e[o]]=t;this.predraw_()},X.prototype.size=function(){return{width:this.width_,height:this.height_}},X.prototype.setAnnotations=function(e,t){this.annotations_=e,this.layout_?(this.layout_.setAnnotations(this.annotations_),t||this.predraw_()):console.warn("Tried to setAnnotations before dygraph was ready. Try setting them in a ready() block. See dygraphs.com/tests/annotation.html")},X.prototype.annotations=function(){return this.annotations_},X.prototype.getLabels=function(){var e=this.attr_("labels");return e?e.slice():null},X.prototype.indexFromSetName=function(e){return this.setIndexByName_[e]},X.prototype.getRowForX=function(e){for(var t=0,n=this.numRows()-1;t<=n;){var r=n+t>>1,o=this.getValue(r,0);if(oe)n=r-1;else{if(t==r)return r;n=r}}return null},X.prototype.ready=function(e){this.is_initial_draw_?this.readyFns_.push(e):e.call(this,this)},X.prototype.addAndTrackEvent=function(e,t,n){r.addEvent(e,t,n),this.registeredEvents_.push({elem:e,type:t,fn:n})},X.prototype.removeTrackedEvents_=function(){if(this.registeredEvents_)for(var e=0;e{!function(){"use strict";var e;function t(e,t,n,r,o){var i=void 0!==r?r:1/3,a=o||!1;if(!n)return[t.x,t.y,null,null];var s=(1-i)*t.x+i*e.x,u=(1-i)*t.y+i*e.y,l=(1-i)*t.x+i*n.x,c=(1-i)*t.y+i*n.y;if(s!=l){var f=t.y-c-(t.x-l)*(u-c)/(s-l);u+=f,c+=f}return a||(u>e.y&&u>t.y?(u=Math.max(e.y,t.y),c=2*t.y-u):ut.y&&c>n.y?(c=Math.max(t.y,n.y),u=2*t.y-c):c1&&(i=window.devicePixelRatio,r.style.width=r.style.height=[t.size,"px"].join(""),r.width=r.height=t.size*i,o.scale(i,i)),o.translate(t.size/2,t.size/2),o.rotate((t.rotate/180-.5)*Math.PI);var a=(t.size-t.lineWidth)/2;t.scaleColor&&t.scaleLength&&(a-=t.scaleLength+2),Date.now=Date.now||function(){return+new Date};var s=function(e,t,n){var r=(n=Math.min(Math.max(-1,n||0),1))<=0;o.beginPath(),o.arc(0,0,a,0,2*Math.PI*n,r),o.strokeStyle=e,o.lineWidth=t,o.stroke()},u=function(){var e,n;o.lineWidth=1,o.fillStyle=t.scaleColor,o.save();for(var r=24;r>0;--r)r%6===0?(n=t.scaleLength,e=0):(n=.6*t.scaleLength,e=t.scaleLength-n),o.fillRect(-t.size/2+e,0,n,1),o.rotate(Math.PI/12);o.restore()},l=window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||function(e){window.setTimeout(e,1e3/60)},c=function(){t.scaleColor&&u(),t.trackColor&&s(t.trackColor,t.trackWidth||t.lineWidth,1)};this.getCanvas=function(){return r},this.getCtx=function(){return o},this.clear=function(){o.clearRect(t.size/-2,t.size/-2,t.size,t.size)},this.draw=function(e){var r;t.scaleColor||t.trackColor?o.getImageData&&o.putImageData?n?o.putImageData(n,0,0):(c(),n=o.getImageData(0,0,t.size*i,t.size*i)):(this.clear(),c()):this.clear(),o.lineCap=t.lineCap,r="function"===typeof t.barColor?t.barColor(e):t.barColor,s(r,t.lineWidth,e/100)}.bind(this),this.animate=function(e,n){var r=Date.now();t.onStart(e,n);var o=function(){var i=Math.min(Date.now()-r,t.animate.duration),a=t.easing(this,i,e,n-e,t.animate.duration);this.draw(a),t.onStep(e,n,a),i>=t.animate.duration?t.onStop(e,n):l(o)}.bind(this);l(o)}.bind(this)};return function(t,n){var r={barColor:"#ef1e25",trackColor:"#f9f9f9",scaleColor:"#dfe0e0",scaleLength:5,lineCap:"round",lineWidth:3,trackWidth:void 0,size:110,rotate:0,animate:{duration:1e3,enabled:!0},easing:function(e,t,n,r,o){return(t/=o/2)<1?r/2*t*t+n:-r/2*(--t*(t-2)-1)+n},onStart:function(e,t){},onStep:function(e,t,n){},onStop:function(e,t){}};if("undefined"!==typeof e)r.renderer=e;else{if("undefined"===typeof SVGRenderer)throw new Error("Please load either the SVG- or the CanvasRenderer");r.renderer=SVGRenderer}var o={},i=0,a=function(){for(var e in this.el=t,this.options=o,r)r.hasOwnProperty(e)&&(o[e]=n&&"undefined"!==typeof n[e]?n[e]:r[e],"function"===typeof o[e]&&(o[e]=o[e].bind(this)));"string"===typeof o.easing&&"undefined"!==typeof jQuery&&jQuery.isFunction(jQuery.easing[o.easing])?o.easing=jQuery.easing[o.easing]:o.easing=r.easing,"number"===typeof o.animate&&(o.animate={duration:o.animate,enabled:!0}),"boolean"!==typeof o.animate||o.animate||(o.animate={duration:1e3,enabled:o.animate}),this.renderer=new o.renderer(t,o),this.renderer.draw(i),t.dataset&&t.dataset.percent?this.update(parseFloat(t.dataset.percent)):t.getAttribute&&t.getAttribute("data-percent")&&this.update(parseFloat(t.getAttribute("data-percent")))}.bind(this);this.update=function(e){return e=parseFloat(e),o.animate.enabled?this.renderer.animate(i,e):this.renderer.draw(e),i=e,this}.bind(this),this.disableAnimation=function(){return o.animate.enabled=!1,this},this.enableAnimation=function(){return o.animate.enabled=!0,this},a()}}()}.apply(t,[]))||(e.exports=n)},30655:(e,t,n)=>{"use strict";var r=n(70453)("%Object.defineProperty%",!0)||!1;if(r)try{r({},"a",{value:1})}catch(o){r=!1}e.exports=r},41237:e=>{"use strict";e.exports=EvalError},69383:e=>{"use strict";e.exports=Error},79290:e=>{"use strict";e.exports=RangeError},79538:e=>{"use strict";e.exports=ReferenceError},58068:e=>{"use strict";e.exports=SyntaxError},69675:e=>{"use strict";e.exports=TypeError},35345:e=>{"use strict";e.exports=URIError},73055:e=>{"use strict";e.exports=function(e,t){for(var n={},r=Object.keys(e),o=Array.isArray(t),i=0;i{"use strict";var t=Object.prototype.toString,n=Math.max,r=function(e,t){for(var n=[],r=0;r{"use strict";var r=n(89353);e.exports=Function.prototype.bind||r},74462:e=>{"use strict";var t=function(){return"string"===typeof function(){}.name},n=Object.getOwnPropertyDescriptor;if(n)try{n([],"length")}catch(o){n=null}t.functionsHaveConfigurableNames=function(){if(!t()||!n)return!1;var e=n((function(){}),"name");return!!e&&!!e.configurable};var r=Function.prototype.bind;t.boundFunctionsHaveNames=function(){return t()&&"function"===typeof r&&""!==function(){}.bind().name},e.exports=t},70453:(e,t,n)=>{"use strict";var r,o=n(69383),i=n(41237),a=n(79290),s=n(79538),u=n(58068),l=n(69675),c=n(35345),f=Function,d=function(e){try{return f('"use strict"; return ('+e+").constructor;")()}catch(t){}},h=Object.getOwnPropertyDescriptor;if(h)try{h({},"")}catch(D){h=null}var p=function(){throw new l},g=h?function(){try{return p}catch(e){try{return h(arguments,"callee").get}catch(t){return p}}}():p,v=n(64039)(),y=n(80024)(),m=Object.getPrototypeOf||(y?function(e){return e.__proto__}:null),b={},_="undefined"!==typeof Uint8Array&&m?m(Uint8Array):r,w={__proto__:null,"%AggregateError%":"undefined"===typeof AggregateError?r:AggregateError,"%Array%":Array,"%ArrayBuffer%":"undefined"===typeof ArrayBuffer?r:ArrayBuffer,"%ArrayIteratorPrototype%":v&&m?m([][Symbol.iterator]()):r,"%AsyncFromSyncIteratorPrototype%":r,"%AsyncFunction%":b,"%AsyncGenerator%":b,"%AsyncGeneratorFunction%":b,"%AsyncIteratorPrototype%":b,"%Atomics%":"undefined"===typeof Atomics?r:Atomics,"%BigInt%":"undefined"===typeof BigInt?r:BigInt,"%BigInt64Array%":"undefined"===typeof BigInt64Array?r:BigInt64Array,"%BigUint64Array%":"undefined"===typeof BigUint64Array?r:BigUint64Array,"%Boolean%":Boolean,"%DataView%":"undefined"===typeof DataView?r:DataView,"%Date%":Date,"%decodeURI%":decodeURI,"%decodeURIComponent%":decodeURIComponent,"%encodeURI%":encodeURI,"%encodeURIComponent%":encodeURIComponent,"%Error%":o,"%eval%":eval,"%EvalError%":i,"%Float32Array%":"undefined"===typeof Float32Array?r:Float32Array,"%Float64Array%":"undefined"===typeof Float64Array?r:Float64Array,"%FinalizationRegistry%":"undefined"===typeof FinalizationRegistry?r:FinalizationRegistry,"%Function%":f,"%GeneratorFunction%":b,"%Int8Array%":"undefined"===typeof Int8Array?r:Int8Array,"%Int16Array%":"undefined"===typeof Int16Array?r:Int16Array,"%Int32Array%":"undefined"===typeof Int32Array?r:Int32Array,"%isFinite%":isFinite,"%isNaN%":isNaN,"%IteratorPrototype%":v&&m?m(m([][Symbol.iterator]())):r,"%JSON%":"object"===typeof JSON?JSON:r,"%Map%":"undefined"===typeof Map?r:Map,"%MapIteratorPrototype%":"undefined"!==typeof Map&&v&&m?m((new Map)[Symbol.iterator]()):r,"%Math%":Math,"%Number%":Number,"%Object%":Object,"%parseFloat%":parseFloat,"%parseInt%":parseInt,"%Promise%":"undefined"===typeof Promise?r:Promise,"%Proxy%":"undefined"===typeof Proxy?r:Proxy,"%RangeError%":a,"%ReferenceError%":s,"%Reflect%":"undefined"===typeof Reflect?r:Reflect,"%RegExp%":RegExp,"%Set%":"undefined"===typeof Set?r:Set,"%SetIteratorPrototype%":"undefined"!==typeof Set&&v&&m?m((new Set)[Symbol.iterator]()):r,"%SharedArrayBuffer%":"undefined"===typeof SharedArrayBuffer?r:SharedArrayBuffer,"%String%":String,"%StringIteratorPrototype%":v&&m?m(""[Symbol.iterator]()):r,"%Symbol%":v?Symbol:r,"%SyntaxError%":u,"%ThrowTypeError%":g,"%TypedArray%":_,"%TypeError%":l,"%Uint8Array%":"undefined"===typeof Uint8Array?r:Uint8Array,"%Uint8ClampedArray%":"undefined"===typeof Uint8ClampedArray?r:Uint8ClampedArray,"%Uint16Array%":"undefined"===typeof Uint16Array?r:Uint16Array,"%Uint32Array%":"undefined"===typeof Uint32Array?r:Uint32Array,"%URIError%":c,"%WeakMap%":"undefined"===typeof WeakMap?r:WeakMap,"%WeakRef%":"undefined"===typeof WeakRef?r:WeakRef,"%WeakSet%":"undefined"===typeof WeakSet?r:WeakSet};if(m)try{null.error}catch(D){var x=m(m(D));w["%Error.prototype%"]=x}var S=function e(t){var n;if("%AsyncFunction%"===t)n=d("async function () {}");else if("%GeneratorFunction%"===t)n=d("function* () {}");else if("%AsyncGeneratorFunction%"===t)n=d("async function* () {}");else if("%AsyncGenerator%"===t){var r=e("%AsyncGeneratorFunction%");r&&(n=r.prototype)}else if("%AsyncIteratorPrototype%"===t){var o=e("%AsyncGenerator%");o&&m&&(n=m(o.prototype))}return w[t]=n,n},A={__proto__:null,"%ArrayBufferPrototype%":["ArrayBuffer","prototype"],"%ArrayPrototype%":["Array","prototype"],"%ArrayProto_entries%":["Array","prototype","entries"],"%ArrayProto_forEach%":["Array","prototype","forEach"],"%ArrayProto_keys%":["Array","prototype","keys"],"%ArrayProto_values%":["Array","prototype","values"],"%AsyncFunctionPrototype%":["AsyncFunction","prototype"],"%AsyncGenerator%":["AsyncGeneratorFunction","prototype"],"%AsyncGeneratorPrototype%":["AsyncGeneratorFunction","prototype","prototype"],"%BooleanPrototype%":["Boolean","prototype"],"%DataViewPrototype%":["DataView","prototype"],"%DatePrototype%":["Date","prototype"],"%ErrorPrototype%":["Error","prototype"],"%EvalErrorPrototype%":["EvalError","prototype"],"%Float32ArrayPrototype%":["Float32Array","prototype"],"%Float64ArrayPrototype%":["Float64Array","prototype"],"%FunctionPrototype%":["Function","prototype"],"%Generator%":["GeneratorFunction","prototype"],"%GeneratorPrototype%":["GeneratorFunction","prototype","prototype"],"%Int8ArrayPrototype%":["Int8Array","prototype"],"%Int16ArrayPrototype%":["Int16Array","prototype"],"%Int32ArrayPrototype%":["Int32Array","prototype"],"%JSONParse%":["JSON","parse"],"%JSONStringify%":["JSON","stringify"],"%MapPrototype%":["Map","prototype"],"%NumberPrototype%":["Number","prototype"],"%ObjectPrototype%":["Object","prototype"],"%ObjProto_toString%":["Object","prototype","toString"],"%ObjProto_valueOf%":["Object","prototype","valueOf"],"%PromisePrototype%":["Promise","prototype"],"%PromiseProto_then%":["Promise","prototype","then"],"%Promise_all%":["Promise","all"],"%Promise_reject%":["Promise","reject"],"%Promise_resolve%":["Promise","resolve"],"%RangeErrorPrototype%":["RangeError","prototype"],"%ReferenceErrorPrototype%":["ReferenceError","prototype"],"%RegExpPrototype%":["RegExp","prototype"],"%SetPrototype%":["Set","prototype"],"%SharedArrayBufferPrototype%":["SharedArrayBuffer","prototype"],"%StringPrototype%":["String","prototype"],"%SymbolPrototype%":["Symbol","prototype"],"%SyntaxErrorPrototype%":["SyntaxError","prototype"],"%TypedArrayPrototype%":["TypedArray","prototype"],"%TypeErrorPrototype%":["TypeError","prototype"],"%Uint8ArrayPrototype%":["Uint8Array","prototype"],"%Uint8ClampedArrayPrototype%":["Uint8ClampedArray","prototype"],"%Uint16ArrayPrototype%":["Uint16Array","prototype"],"%Uint32ArrayPrototype%":["Uint32Array","prototype"],"%URIErrorPrototype%":["URIError","prototype"],"%WeakMapPrototype%":["WeakMap","prototype"],"%WeakSetPrototype%":["WeakSet","prototype"]},E=n(66743),C=n(9957),R=E.call(Function.call,Array.prototype.concat),O=E.call(Function.apply,Array.prototype.splice),T=E.call(Function.call,String.prototype.replace),k=E.call(Function.call,String.prototype.slice),M=E.call(Function.call,RegExp.prototype.exec),P=/[^%.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|%$))/g,I=/\\(\\)?/g,N=function(e,t){var n,r=e;if(C(A,r)&&(r="%"+(n=A[r])[0]+"%"),C(w,r)){var o=w[r];if(o===b&&(o=S(r)),"undefined"===typeof o&&!t)throw new l("intrinsic "+e+" exists, but is not available. Please file an issue!");return{alias:n,name:r,value:o}}throw new u("intrinsic "+e+" does not exist!")};e.exports=function(e,t){if("string"!==typeof e||0===e.length)throw new l("intrinsic name must be a non-empty string");if(arguments.length>1&&"boolean"!==typeof t)throw new l('"allowMissing" argument must be a boolean');if(null===M(/^%?[^%]*%?$/,e))throw new u("`%` may not be present anywhere but at the beginning and end of the intrinsic name");var n=function(e){var t=k(e,0,1),n=k(e,-1);if("%"===t&&"%"!==n)throw new u("invalid intrinsic syntax, expected closing `%`");if("%"===n&&"%"!==t)throw new u("invalid intrinsic syntax, expected opening `%`");var r=[];return T(e,P,(function(e,t,n,o){r[r.length]=n?T(o,I,"$1"):t||e})),r}(e),r=n.length>0?n[0]:"",o=N("%"+r+"%",t),i=o.name,a=o.value,s=!1,c=o.alias;c&&(r=c[0],O(n,R([0,1],c)));for(var f=1,d=!0;f=n.length){var y=h(a,p);a=(d=!!y)&&"get"in y&&!("originalValue"in y.get)?y.get:a[p]}else d=C(a,p),a=a[p];d&&!s&&(w[i]=a)}}return a}},75795:(e,t,n)=>{"use strict";var r=n(70453)("%Object.getOwnPropertyDescriptor%",!0);if(r)try{r([],"length")}catch(o){r=null}e.exports=r},30592:(e,t,n)=>{"use strict";var r=n(30655),o=function(){return!!r};o.hasArrayLengthDefineBug=function(){if(!r)return null;try{return 1!==r([],"length",{value:1}).length}catch(e){return!0}},e.exports=o},80024:e=>{"use strict";var t={__proto__:null,foo:{}},n=Object;e.exports=function(){return{__proto__:t}.foo===t.foo&&!(t instanceof n)}},64039:(e,t,n)=>{"use strict";var r="undefined"!==typeof Symbol&&Symbol,o=n(41333);e.exports=function(){return"function"===typeof r&&("function"===typeof Symbol&&("symbol"===typeof r("foo")&&("symbol"===typeof Symbol("bar")&&o())))}},41333:e=>{"use strict";e.exports=function(){if("function"!==typeof Symbol||"function"!==typeof Object.getOwnPropertySymbols)return!1;if("symbol"===typeof Symbol.iterator)return!0;var e={},t=Symbol("test"),n=Object(t);if("string"===typeof t)return!1;if("[object Symbol]"!==Object.prototype.toString.call(t))return!1;if("[object Symbol]"!==Object.prototype.toString.call(n))return!1;for(t in e[t]=42,e)return!1;if("function"===typeof Object.keys&&0!==Object.keys(e).length)return!1;if("function"===typeof Object.getOwnPropertyNames&&0!==Object.getOwnPropertyNames(e).length)return!1;var r=Object.getOwnPropertySymbols(e);if(1!==r.length||r[0]!==t)return!1;if(!Object.prototype.propertyIsEnumerable.call(e,t))return!1;if("function"===typeof Object.getOwnPropertyDescriptor){var o=Object.getOwnPropertyDescriptor(e,t);if(42!==o.value||!0!==o.enumerable)return!1}return!0}},49092:(e,t,n)=>{"use strict";var r=n(41333);e.exports=function(){return r()&&!!Symbol.toStringTag}},9957:(e,t,n)=>{"use strict";var r=Function.prototype.call,o=Object.prototype.hasOwnProperty,i=n(66743);e.exports=i.call(r,o)},57971:(e,t,n)=>{"use strict";n.d(t,{zR:()=>u});var r,o=n(58168);!function(e){e.Pop="POP",e.Push="PUSH",e.Replace="REPLACE"}(r||(r={}));var i=function(e){return e};var a="beforeunload",s="popstate";function u(e){void 0===e&&(e={});var t=e.window,n=void 0===t?document.defaultView:t,u=n.history;function p(){var e=n.location,t=e.pathname,r=e.search,o=e.hash,a=u.state||{};return[a.idx,i({pathname:t,search:r,hash:o,state:a.usr||null,key:a.key||"default"})]}var g=null;n.addEventListener(s,(function(){if(g)w.call(g),g=null;else{var e=r.Pop,t=p(),n=t[0],o=t[1];if(w.length){if(null!=n){var i=m-n;i&&(g={action:e,location:o,retry:function(){R(-1*i)}},R(i))}}else C(e)}}));var v=r.Pop,y=p(),m=y[0],b=y[1],_=c(),w=c();function x(e){return"string"===typeof e?e:d(e)}function S(e,t){return void 0===t&&(t=null),i((0,o.A)({pathname:b.pathname,hash:"",search:""},"string"===typeof e?h(e):e,{state:t,key:f()}))}function A(e,t){return[{usr:e.state,key:e.key,idx:t},x(e)]}function E(e,t,n){return!w.length||(w.call({action:e,location:t,retry:n}),!1)}function C(e){v=e;var t=p();m=t[0],b=t[1],_.call({action:v,location:b})}function R(e){u.go(e)}null==m&&(m=0,u.replaceState((0,o.A)({},u.state,{idx:m}),""));var O={get action(){return v},get location(){return b},createHref:x,push:function e(t,o){var i=r.Push,a=S(t,o);if(E(i,a,(function(){e(t,o)}))){var s=A(a,m+1),l=s[0],c=s[1];try{u.pushState(l,"",c)}catch(f){n.location.assign(c)}C(i)}},replace:function e(t,n){var o=r.Replace,i=S(t,n);if(E(o,i,(function(){e(t,n)}))){var a=A(i,m),s=a[0],l=a[1];u.replaceState(s,"",l),C(o)}},go:R,back:function(){R(-1)},forward:function(){R(1)},listen:function(e){return _.push(e)},block:function(e){var t=w.push(e);return 1===w.length&&n.addEventListener(a,l),function(){t(),w.length||n.removeEventListener(a,l)}}};return O}function l(e){e.preventDefault(),e.returnValue=""}function c(){var e=[];return{get length(){return e.length},push:function(t){return e.push(t),function(){e=e.filter((function(e){return e!==t}))}},call:function(t){e.forEach((function(e){return e&&e(t)}))}}}function f(){return Math.random().toString(36).substr(2,8)}function d(e){var t=e.pathname,n=void 0===t?"/":t,r=e.search,o=void 0===r?"":r,i=e.hash,a=void 0===i?"":i;return o&&"?"!==o&&(n+="?"===o.charAt(0)?o:"?"+o),a&&"#"!==a&&(n+="#"===a.charAt(0)?a:"#"+a),n}function h(e){var t={};if(e){var n=e.indexOf("#");n>=0&&(t.hash=e.substr(n),e=e.substr(0,n));var r=e.indexOf("?");r>=0&&(t.search=e.substr(r),e=e.substr(0,r)),e&&(t.pathname=e)}return t}},4146:(e,t,n)=>{"use strict";var r=n(73404),o={childContextTypes:!0,contextType:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,getDerivedStateFromError:!0,getDerivedStateFromProps:!0,mixins:!0,propTypes:!0,type:!0},i={name:!0,length:!0,prototype:!0,caller:!0,callee:!0,arguments:!0,arity:!0},a={$$typeof:!0,compare:!0,defaultProps:!0,displayName:!0,propTypes:!0,type:!0},s={};function u(e){return r.isMemo(e)?a:s[e.$$typeof]||o}s[r.ForwardRef]={$$typeof:!0,render:!0,defaultProps:!0,displayName:!0,propTypes:!0},s[r.Memo]=a;var l=Object.defineProperty,c=Object.getOwnPropertyNames,f=Object.getOwnPropertySymbols,d=Object.getOwnPropertyDescriptor,h=Object.getPrototypeOf,p=Object.prototype;e.exports=function e(t,n,r){if("string"!==typeof n){if(p){var o=h(n);o&&o!==p&&e(t,o,r)}var a=c(n);f&&(a=a.concat(f(n)));for(var s=u(t),g=u(n),v=0;v{"use strict";var n="function"===typeof Symbol&&Symbol.for,r=n?Symbol.for("react.element"):60103,o=n?Symbol.for("react.portal"):60106,i=n?Symbol.for("react.fragment"):60107,a=n?Symbol.for("react.strict_mode"):60108,s=n?Symbol.for("react.profiler"):60114,u=n?Symbol.for("react.provider"):60109,l=n?Symbol.for("react.context"):60110,c=n?Symbol.for("react.async_mode"):60111,f=n?Symbol.for("react.concurrent_mode"):60111,d=n?Symbol.for("react.forward_ref"):60112,h=n?Symbol.for("react.suspense"):60113,p=n?Symbol.for("react.suspense_list"):60120,g=n?Symbol.for("react.memo"):60115,v=n?Symbol.for("react.lazy"):60116,y=n?Symbol.for("react.block"):60121,m=n?Symbol.for("react.fundamental"):60117,b=n?Symbol.for("react.responder"):60118,_=n?Symbol.for("react.scope"):60119;function w(e){if("object"===typeof e&&null!==e){var t=e.$$typeof;switch(t){case r:switch(e=e.type){case c:case f:case i:case s:case a:case h:return e;default:switch(e=e&&e.$$typeof){case l:case d:case v:case g:case u:return e;default:return t}}case o:return t}}}function x(e){return w(e)===f}t.AsyncMode=c,t.ConcurrentMode=f,t.ContextConsumer=l,t.ContextProvider=u,t.Element=r,t.ForwardRef=d,t.Fragment=i,t.Lazy=v,t.Memo=g,t.Portal=o,t.Profiler=s,t.StrictMode=a,t.Suspense=h,t.isAsyncMode=function(e){return x(e)||w(e)===c},t.isConcurrentMode=x,t.isContextConsumer=function(e){return w(e)===l},t.isContextProvider=function(e){return w(e)===u},t.isElement=function(e){return"object"===typeof e&&null!==e&&e.$$typeof===r},t.isForwardRef=function(e){return w(e)===d},t.isFragment=function(e){return w(e)===i},t.isLazy=function(e){return w(e)===v},t.isMemo=function(e){return w(e)===g},t.isPortal=function(e){return w(e)===o},t.isProfiler=function(e){return w(e)===s},t.isStrictMode=function(e){return w(e)===a},t.isSuspense=function(e){return w(e)===h},t.isValidElementType=function(e){return"string"===typeof e||"function"===typeof e||e===i||e===f||e===s||e===a||e===h||e===p||"object"===typeof e&&null!==e&&(e.$$typeof===v||e.$$typeof===g||e.$$typeof===u||e.$$typeof===l||e.$$typeof===d||e.$$typeof===m||e.$$typeof===b||e.$$typeof===_||e.$$typeof===y)},t.typeOf=w},73404:(e,t,n)=>{"use strict";e.exports=n(3072)},47244:(e,t,n)=>{"use strict";var r=n(49092)(),o=n(38075)("Object.prototype.toString"),i=function(e){return!(r&&e&&"object"===typeof e&&Symbol.toStringTag in e)&&"[object Arguments]"===o(e)},a=function(e){return!!i(e)||null!==e&&"object"===typeof e&&"number"===typeof e.length&&e.length>=0&&"[object Array]"!==o(e)&&"[object Function]"===o(e.callee)},s=function(){return i(arguments)}();i.isLegacyArguments=a,e.exports=s?i:a},87206:e=>{function t(e){return!!e.constructor&&"function"===typeof e.constructor.isBuffer&&e.constructor.isBuffer(e)}e.exports=function(e){return null!=e&&(t(e)||function(e){return"function"===typeof e.readFloatLE&&"function"===typeof e.slice&&t(e.slice(0,0))}(e)||!!e._isBuffer)}},62120:(e,t,n)=>{"use strict";var r=Date.prototype.getDay,o=Object.prototype.toString,i=n(49092)();e.exports=function(e){return"object"===typeof e&&null!==e&&(i?function(e){try{return r.call(e),!0}catch(t){return!1}}(e):"[object Date]"===o.call(e))}},14035:(e,t,n)=>{"use strict";var r,o,i,a,s=n(38075),u=n(49092)();if(u){r=s("Object.prototype.hasOwnProperty"),o=s("RegExp.prototype.exec"),i={};var l=function(){throw i};a={toString:l,valueOf:l},"symbol"===typeof Symbol.toPrimitive&&(a[Symbol.toPrimitive]=l)}var c=s("Object.prototype.toString"),f=Object.getOwnPropertyDescriptor;e.exports=u?function(e){if(!e||"object"!==typeof e)return!1;var t=f(e,"lastIndex");if(!(t&&r(t,"value")))return!1;try{o(e,a)}catch(n){return n===i}}:function(e){return!(!e||"object"!==typeof e&&"function"!==typeof e)&&"[object RegExp]"===c(e)}},55580:(e,t,n)=>{var r=n(56110)(n(9325),"DataView");e.exports=r},21549:(e,t,n)=>{var r=n(22032),o=n(63862),i=n(66721),a=n(12749),s=n(35749);function u(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t{var r=n(39344),o=n(94033);function i(e){this.__wrapped__=e,this.__actions__=[],this.__dir__=1,this.__filtered__=!1,this.__iteratees__=[],this.__takeCount__=4294967295,this.__views__=[]}i.prototype=r(o.prototype),i.prototype.constructor=i,e.exports=i},80079:(e,t,n)=>{var r=n(63702),o=n(70080),i=n(24739),a=n(48655),s=n(31175);function u(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t{var r=n(39344),o=n(94033);function i(e,t){this.__wrapped__=e,this.__actions__=[],this.__chain__=!!t,this.__index__=0,this.__values__=void 0}i.prototype=r(o.prototype),i.prototype.constructor=i,e.exports=i},68223:(e,t,n)=>{var r=n(56110)(n(9325),"Map");e.exports=r},53661:(e,t,n)=>{var r=n(63040),o=n(17670),i=n(90289),a=n(4509),s=n(72949);function u(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t{var r=n(56110)(n(9325),"Promise");e.exports=r},76545:(e,t,n)=>{var r=n(56110)(n(9325),"Set");e.exports=r},38859:(e,t,n)=>{var r=n(53661),o=n(31380),i=n(51459);function a(e){var t=-1,n=null==e?0:e.length;for(this.__data__=new r;++t{var r=n(80079),o=n(51420),i=n(90938),a=n(63605),s=n(29817),u=n(80945);function l(e){var t=this.__data__=new r(e);this.size=t.size}l.prototype.clear=o,l.prototype.delete=i,l.prototype.get=a,l.prototype.has=s,l.prototype.set=u,e.exports=l},51873:(e,t,n)=>{var r=n(9325).Symbol;e.exports=r},37828:(e,t,n)=>{var r=n(9325).Uint8Array;e.exports=r},28303:(e,t,n)=>{var r=n(56110)(n(9325),"WeakMap");e.exports=r},91033:e=>{e.exports=function(e,t,n){switch(n.length){case 0:return e.call(t);case 1:return e.call(t,n[0]);case 2:return e.call(t,n[0],n[1]);case 3:return e.call(t,n[0],n[1],n[2])}return e.apply(t,n)}},83729:e=>{e.exports=function(e,t){for(var n=-1,r=null==e?0:e.length;++n{e.exports=function(e,t){for(var n=-1,r=null==e?0:e.length,o=0,i=[];++n{var r=n(96131);e.exports=function(e,t){return!!(null==e?0:e.length)&&r(e,t,0)>-1}},29905:e=>{e.exports=function(e,t,n){for(var r=-1,o=null==e?0:e.length;++r{var r=n(78096),o=n(72428),i=n(56449),a=n(3656),s=n(30361),u=n(37167),l=Object.prototype.hasOwnProperty;e.exports=function(e,t){var n=i(e),c=!n&&o(e),f=!n&&!c&&a(e),d=!n&&!c&&!f&&u(e),h=n||c||f||d,p=h?r(e.length,String):[],g=p.length;for(var v in e)!t&&!l.call(e,v)||h&&("length"==v||f&&("offset"==v||"parent"==v)||d&&("buffer"==v||"byteLength"==v||"byteOffset"==v)||s(v,g))||p.push(v);return p}},34932:e=>{e.exports=function(e,t){for(var n=-1,r=null==e?0:e.length,o=Array(r);++n{e.exports=function(e,t){for(var n=-1,r=t.length,o=e.length;++n{e.exports=function(e,t){for(var n=-1,r=null==e?0:e.length;++n{var r=n(43360),o=n(75288);e.exports=function(e,t,n){(void 0!==n&&!o(e[t],n)||void 0===n&&!(t in e))&&r(e,t,n)}},16547:(e,t,n)=>{var r=n(43360),o=n(75288),i=Object.prototype.hasOwnProperty;e.exports=function(e,t,n){var a=e[t];i.call(e,t)&&o(a,n)&&(void 0!==n||t in e)||r(e,t,n)}},26025:(e,t,n)=>{var r=n(75288);e.exports=function(e,t){for(var n=e.length;n--;)if(r(e[n][0],t))return n;return-1}},74733:(e,t,n)=>{var r=n(21791),o=n(95950);e.exports=function(e,t){return e&&r(t,o(t),e)}},43838:(e,t,n)=>{var r=n(21791),o=n(37241);e.exports=function(e,t){return e&&r(t,o(t),e)}},43360:(e,t,n)=>{var r=n(93243);e.exports=function(e,t,n){"__proto__"==t&&r?r(e,t,{configurable:!0,enumerable:!0,value:n,writable:!0}):e[t]=n}},9999:(e,t,n)=>{var r=n(37217),o=n(83729),i=n(16547),a=n(74733),s=n(43838),u=n(93290),l=n(23007),c=n(92271),f=n(48948),d=n(50002),h=n(83349),p=n(5861),g=n(76189),v=n(77199),y=n(35529),m=n(56449),b=n(3656),_=n(87730),w=n(23805),x=n(38440),S=n(95950),A=n(37241),E="[object Arguments]",C="[object Function]",R="[object Object]",O={};O[E]=O["[object Array]"]=O["[object ArrayBuffer]"]=O["[object DataView]"]=O["[object Boolean]"]=O["[object Date]"]=O["[object Float32Array]"]=O["[object Float64Array]"]=O["[object Int8Array]"]=O["[object Int16Array]"]=O["[object Int32Array]"]=O["[object Map]"]=O["[object Number]"]=O[R]=O["[object RegExp]"]=O["[object Set]"]=O["[object String]"]=O["[object Symbol]"]=O["[object Uint8Array]"]=O["[object Uint8ClampedArray]"]=O["[object Uint16Array]"]=O["[object Uint32Array]"]=!0,O["[object Error]"]=O[C]=O["[object WeakMap]"]=!1,e.exports=function e(t,n,T,k,M,P){var I,N=1&n,D=2&n,L=4&n;if(T&&(I=M?T(t,k,M,P):T(t)),void 0!==I)return I;if(!w(t))return t;var F=m(t);if(F){if(I=g(t),!N)return l(t,I)}else{var j=p(t),U=j==C||"[object GeneratorFunction]"==j;if(b(t))return u(t,N);if(j==R||j==E||U&&!M){if(I=D||U?{}:y(t),!N)return D?f(t,s(I,t)):c(t,a(I,t))}else{if(!O[j])return M?t:{};I=v(t,j,N)}}P||(P=new r);var z=P.get(t);if(z)return z;P.set(t,I),x(t)?t.forEach((function(r){I.add(e(r,n,T,r,t,P))})):_(t)&&t.forEach((function(r,o){I.set(o,e(r,n,T,o,t,P))}));var B=F?void 0:(L?D?h:d:D?A:S)(t);return o(B||t,(function(r,o){B&&(r=t[o=r]),i(I,o,e(r,n,T,o,t,P))})),I}},39344:(e,t,n)=>{var r=n(23805),o=Object.create,i=function(){function e(){}return function(t){if(!r(t))return{};if(o)return o(t);e.prototype=t;var n=new e;return e.prototype=void 0,n}}();e.exports=i},83915:(e,t,n)=>{var r=n(38859),o=n(15325),i=n(29905),a=n(34932),s=n(27301),u=n(19219);e.exports=function(e,t,n,l){var c=-1,f=o,d=!0,h=e.length,p=[],g=t.length;if(!h)return p;n&&(t=a(t,s(n))),l?(f=i,d=!1):t.length>=200&&(f=u,d=!1,t=new r(t));e:for(;++c{e.exports=function(e,t,n,r){for(var o=e.length,i=n+(r?1:-1);r?i--:++i{var r=n(14528),o=n(45891);e.exports=function e(t,n,i,a,s){var u=-1,l=t.length;for(i||(i=o),s||(s=[]);++u0&&i(c)?n>1?e(c,n-1,i,a,s):r(s,c):a||(s[s.length]=c)}return s}},86649:(e,t,n)=>{var r=n(83221)();e.exports=r},47422:(e,t,n)=>{var r=n(31769),o=n(77797);e.exports=function(e,t){for(var n=0,i=(t=r(t,e)).length;null!=e&&n{var r=n(14528),o=n(56449);e.exports=function(e,t,n){var i=t(e);return o(e)?i:r(i,n(e))}},72552:(e,t,n)=>{var r=n(51873),o=n(659),i=n(59350),a=r?r.toStringTag:void 0;e.exports=function(e){return null==e?void 0===e?"[object Undefined]":"[object Null]":a&&a in Object(e)?o(e):i(e)}},96131:(e,t,n)=>{var r=n(2523),o=n(85463),i=n(76959);e.exports=function(e,t,n){return t===t?i(e,t,n):r(e,o,n)}},27534:(e,t,n)=>{var r=n(72552),o=n(40346);e.exports=function(e){return o(e)&&"[object Arguments]"==r(e)}},60270:(e,t,n)=>{var r=n(87068),o=n(40346);e.exports=function e(t,n,i,a,s){return t===n||(null==t||null==n||!o(t)&&!o(n)?t!==t&&n!==n:r(t,n,i,a,e,s))}},87068:(e,t,n)=>{var r=n(37217),o=n(25911),i=n(21986),a=n(50689),s=n(5861),u=n(56449),l=n(3656),c=n(37167),f="[object Arguments]",d="[object Array]",h="[object Object]",p=Object.prototype.hasOwnProperty;e.exports=function(e,t,n,g,v,y){var m=u(e),b=u(t),_=m?d:s(e),w=b?d:s(t),x=(_=_==f?h:_)==h,S=(w=w==f?h:w)==h,A=_==w;if(A&&l(e)){if(!l(t))return!1;m=!0,x=!1}if(A&&!x)return y||(y=new r),m||c(e)?o(e,t,n,g,v,y):i(e,t,_,n,g,v,y);if(!(1&n)){var E=x&&p.call(e,"__wrapped__"),C=S&&p.call(t,"__wrapped__");if(E||C){var R=E?e.value():e,O=C?t.value():t;return y||(y=new r),v(R,O,n,g,y)}}return!!A&&(y||(y=new r),a(e,t,n,g,v,y))}},29172:(e,t,n)=>{var r=n(5861),o=n(40346);e.exports=function(e){return o(e)&&"[object Map]"==r(e)}},85463:e=>{e.exports=function(e){return e!==e}},45083:(e,t,n)=>{var r=n(1882),o=n(87296),i=n(23805),a=n(47473),s=/^\[object .+?Constructor\]$/,u=Function.prototype,l=Object.prototype,c=u.toString,f=l.hasOwnProperty,d=RegExp("^"+c.call(f).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");e.exports=function(e){return!(!i(e)||o(e))&&(r(e)?d:s).test(a(e))}},16038:(e,t,n)=>{var r=n(5861),o=n(40346);e.exports=function(e){return o(e)&&"[object Set]"==r(e)}},4901:(e,t,n)=>{var r=n(72552),o=n(30294),i=n(40346),a={};a["[object Float32Array]"]=a["[object Float64Array]"]=a["[object Int8Array]"]=a["[object Int16Array]"]=a["[object Int32Array]"]=a["[object Uint8Array]"]=a["[object Uint8ClampedArray]"]=a["[object Uint16Array]"]=a["[object Uint32Array]"]=!0,a["[object Arguments]"]=a["[object Array]"]=a["[object ArrayBuffer]"]=a["[object Boolean]"]=a["[object DataView]"]=a["[object Date]"]=a["[object Error]"]=a["[object Function]"]=a["[object Map]"]=a["[object Number]"]=a["[object Object]"]=a["[object RegExp]"]=a["[object Set]"]=a["[object String]"]=a["[object WeakMap]"]=!1,e.exports=function(e){return i(e)&&o(e.length)&&!!a[r(e)]}},88984:(e,t,n)=>{var r=n(55527),o=n(3650),i=Object.prototype.hasOwnProperty;e.exports=function(e){if(!r(e))return o(e);var t=[];for(var n in Object(e))i.call(e,n)&&"constructor"!=n&&t.push(n);return t}},72903:(e,t,n)=>{var r=n(23805),o=n(55527),i=n(90181),a=Object.prototype.hasOwnProperty;e.exports=function(e){if(!r(e))return i(e);var t=o(e),n=[];for(var s in e)("constructor"!=s||!t&&a.call(e,s))&&n.push(s);return n}},94033:e=>{e.exports=function(){}},85250:(e,t,n)=>{var r=n(37217),o=n(87805),i=n(86649),a=n(42824),s=n(23805),u=n(37241),l=n(14974);e.exports=function e(t,n,c,f,d){t!==n&&i(n,(function(i,u){if(d||(d=new r),s(i))a(t,n,u,c,e,f,d);else{var h=f?f(l(t,u),i,u+"",t,n,d):void 0;void 0===h&&(h=i),o(t,u,h)}}),u)}},42824:(e,t,n)=>{var r=n(87805),o=n(93290),i=n(71961),a=n(23007),s=n(35529),u=n(72428),l=n(56449),c=n(83693),f=n(3656),d=n(1882),h=n(23805),p=n(11331),g=n(37167),v=n(14974),y=n(69884);e.exports=function(e,t,n,m,b,_,w){var x=v(e,n),S=v(t,n),A=w.get(S);if(A)r(e,n,A);else{var E=_?_(x,S,n+"",e,t,w):void 0,C=void 0===E;if(C){var R=l(S),O=!R&&f(S),T=!R&&!O&&g(S);E=S,R||O||T?l(x)?E=x:c(x)?E=a(x):O?(C=!1,E=o(S,!0)):T?(C=!1,E=i(S,!0)):E=[]:p(S)||u(S)?(E=x,u(x)?E=y(x):h(x)&&!d(x)||(E=s(S))):C=!1}C&&(w.set(S,E),b(E,S,m,_,w),w.delete(S)),r(e,n,E)}}},69302:(e,t,n)=>{var r=n(83488),o=n(56757),i=n(32865);e.exports=function(e,t){return i(o(e,t,r),e+"")}},19570:(e,t,n)=>{var r=n(37334),o=n(93243),i=n(83488),a=o?function(e,t){return o(e,"toString",{configurable:!0,enumerable:!1,value:r(t),writable:!0})}:i;e.exports=a},25160:e=>{e.exports=function(e,t,n){var r=-1,o=e.length;t<0&&(t=-t>o?0:o+t),(n=n>o?o:n)<0&&(n+=o),o=t>n?0:n-t>>>0,t>>>=0;for(var i=Array(o);++r{e.exports=function(e,t){for(var n=-1,r=Array(e);++n{var r=n(51873),o=n(34932),i=n(56449),a=n(44394),s=r?r.prototype:void 0,u=s?s.toString:void 0;e.exports=function e(t){if("string"==typeof t)return t;if(i(t))return o(t,e)+"";if(a(t))return u?u.call(t):"";var n=t+"";return"0"==n&&1/t==-1/0?"-0":n}},27301:e=>{e.exports=function(e){return function(t){return e(t)}}},19931:(e,t,n)=>{var r=n(31769),o=n(68090),i=n(68969),a=n(77797);e.exports=function(e,t){return t=r(t,e),null==(e=i(e,t))||delete e[a(o(t))]}},19219:e=>{e.exports=function(e,t){return e.has(t)}},31769:(e,t,n)=>{var r=n(56449),o=n(28586),i=n(61802),a=n(13222);e.exports=function(e,t){return r(e)?e:o(e,t)?[e]:i(a(e))}},49653:(e,t,n)=>{var r=n(37828);e.exports=function(e){var t=new e.constructor(e.byteLength);return new r(t).set(new r(e)),t}},93290:(e,t,n)=>{e=n.nmd(e);var r=n(9325),o=t&&!t.nodeType&&t,i=o&&e&&!e.nodeType&&e,a=i&&i.exports===o?r.Buffer:void 0,s=a?a.allocUnsafe:void 0;e.exports=function(e,t){if(t)return e.slice();var n=e.length,r=s?s(n):new e.constructor(n);return e.copy(r),r}},76169:(e,t,n)=>{var r=n(49653);e.exports=function(e,t){var n=t?r(e.buffer):e.buffer;return new e.constructor(n,e.byteOffset,e.byteLength)}},73201:e=>{var t=/\w*$/;e.exports=function(e){var n=new e.constructor(e.source,t.exec(e));return n.lastIndex=e.lastIndex,n}},93736:(e,t,n)=>{var r=n(51873),o=r?r.prototype:void 0,i=o?o.valueOf:void 0;e.exports=function(e){return i?Object(i.call(e)):{}}},71961:(e,t,n)=>{var r=n(49653);e.exports=function(e,t){var n=t?r(e.buffer):e.buffer;return new e.constructor(n,e.byteOffset,e.length)}},23007:e=>{e.exports=function(e,t){var n=-1,r=e.length;for(t||(t=Array(r));++n{var r=n(16547),o=n(43360);e.exports=function(e,t,n,i){var a=!n;n||(n={});for(var s=-1,u=t.length;++s{var r=n(21791),o=n(4664);e.exports=function(e,t){return r(e,o(e),t)}},48948:(e,t,n)=>{var r=n(21791),o=n(86375);e.exports=function(e,t){return r(e,o(e),t)}},55481:(e,t,n)=>{var r=n(9325)["__core-js_shared__"];e.exports=r},20999:(e,t,n)=>{var r=n(69302),o=n(36800);e.exports=function(e){return r((function(t,n){var r=-1,i=n.length,a=i>1?n[i-1]:void 0,s=i>2?n[2]:void 0;for(a=e.length>3&&"function"==typeof a?(i--,a):void 0,s&&o(n[0],n[1],s)&&(a=i<3?void 0:a,i=1),t=Object(t);++r{e.exports=function(e){return function(t,n,r){for(var o=-1,i=Object(t),a=r(t),s=a.length;s--;){var u=a[e?s:++o];if(!1===n(i[u],u,i))break}return t}}},66929:(e,t,n)=>{var r=n(56017),o=n(38816),i=n(37381),a=n(62284),s=n(56449),u=n(85087);e.exports=function(e){return o((function(t){var n=t.length,o=n,l=r.prototype.thru;for(e&&t.reverse();o--;){var c=t[o];if("function"!=typeof c)throw new TypeError("Expected a function");if(l&&!f&&"wrapper"==a(c))var f=new r([],!0)}for(o=f?o:n;++o{var r=n(11331);e.exports=function(e){return r(e)?void 0:e}},93243:(e,t,n)=>{var r=n(56110),o=function(){try{var e=r(Object,"defineProperty");return e({},"",{}),e}catch(t){}}();e.exports=o},25911:(e,t,n)=>{var r=n(38859),o=n(14248),i=n(19219);e.exports=function(e,t,n,a,s,u){var l=1&n,c=e.length,f=t.length;if(c!=f&&!(l&&f>c))return!1;var d=u.get(e),h=u.get(t);if(d&&h)return d==t&&h==e;var p=-1,g=!0,v=2&n?new r:void 0;for(u.set(e,t),u.set(t,e);++p{var r=n(51873),o=n(37828),i=n(75288),a=n(25911),s=n(20317),u=n(84247),l=r?r.prototype:void 0,c=l?l.valueOf:void 0;e.exports=function(e,t,n,r,l,f,d){switch(n){case"[object DataView]":if(e.byteLength!=t.byteLength||e.byteOffset!=t.byteOffset)return!1;e=e.buffer,t=t.buffer;case"[object ArrayBuffer]":return!(e.byteLength!=t.byteLength||!f(new o(e),new o(t)));case"[object Boolean]":case"[object Date]":case"[object Number]":return i(+e,+t);case"[object Error]":return e.name==t.name&&e.message==t.message;case"[object RegExp]":case"[object String]":return e==t+"";case"[object Map]":var h=s;case"[object Set]":var p=1&r;if(h||(h=u),e.size!=t.size&&!p)return!1;var g=d.get(e);if(g)return g==t;r|=2,d.set(e,t);var v=a(h(e),h(t),r,l,f,d);return d.delete(e),v;case"[object Symbol]":if(c)return c.call(e)==c.call(t)}return!1}},50689:(e,t,n)=>{var r=n(50002),o=Object.prototype.hasOwnProperty;e.exports=function(e,t,n,i,a,s){var u=1&n,l=r(e),c=l.length;if(c!=r(t).length&&!u)return!1;for(var f=c;f--;){var d=l[f];if(!(u?d in t:o.call(t,d)))return!1}var h=s.get(e),p=s.get(t);if(h&&p)return h==t&&p==e;var g=!0;s.set(e,t),s.set(t,e);for(var v=u;++f{var r=n(35970),o=n(56757),i=n(32865);e.exports=function(e){return i(o(e,void 0,r),e+"")}},34840:(e,t,n)=>{var r="object"==typeof n.g&&n.g&&n.g.Object===Object&&n.g;e.exports=r},50002:(e,t,n)=>{var r=n(82199),o=n(4664),i=n(95950);e.exports=function(e){return r(e,i,o)}},83349:(e,t,n)=>{var r=n(82199),o=n(86375),i=n(37241);e.exports=function(e){return r(e,i,o)}},37381:(e,t,n)=>{var r=n(48152),o=n(63950),i=r?function(e){return r.get(e)}:o;e.exports=i},62284:(e,t,n)=>{var r=n(84629),o=Object.prototype.hasOwnProperty;e.exports=function(e){for(var t=e.name+"",n=r[t],i=o.call(r,t)?n.length:0;i--;){var a=n[i],s=a.func;if(null==s||s==e)return a.name}return t}},12651:(e,t,n)=>{var r=n(74218);e.exports=function(e,t){var n=e.__data__;return r(t)?n["string"==typeof t?"string":"hash"]:n.map}},56110:(e,t,n)=>{var r=n(45083),o=n(10392);e.exports=function(e,t){var n=o(e,t);return r(n)?n:void 0}},28879:(e,t,n)=>{var r=n(74335)(Object.getPrototypeOf,Object);e.exports=r},659:(e,t,n)=>{var r=n(51873),o=Object.prototype,i=o.hasOwnProperty,a=o.toString,s=r?r.toStringTag:void 0;e.exports=function(e){var t=i.call(e,s),n=e[s];try{e[s]=void 0;var r=!0}catch(u){}var o=a.call(e);return r&&(t?e[s]=n:delete e[s]),o}},4664:(e,t,n)=>{var r=n(79770),o=n(63345),i=Object.prototype.propertyIsEnumerable,a=Object.getOwnPropertySymbols,s=a?function(e){return null==e?[]:(e=Object(e),r(a(e),(function(t){return i.call(e,t)})))}:o;e.exports=s},86375:(e,t,n)=>{var r=n(14528),o=n(28879),i=n(4664),a=n(63345),s=Object.getOwnPropertySymbols?function(e){for(var t=[];e;)r(t,i(e)),e=o(e);return t}:a;e.exports=s},5861:(e,t,n)=>{var r=n(55580),o=n(68223),i=n(32804),a=n(76545),s=n(28303),u=n(72552),l=n(47473),c="[object Map]",f="[object Promise]",d="[object Set]",h="[object WeakMap]",p="[object DataView]",g=l(r),v=l(o),y=l(i),m=l(a),b=l(s),_=u;(r&&_(new r(new ArrayBuffer(1)))!=p||o&&_(new o)!=c||i&&_(i.resolve())!=f||a&&_(new a)!=d||s&&_(new s)!=h)&&(_=function(e){var t=u(e),n="[object Object]"==t?e.constructor:void 0,r=n?l(n):"";if(r)switch(r){case g:return p;case v:return c;case y:return f;case m:return d;case b:return h}return t}),e.exports=_},10392:e=>{e.exports=function(e,t){return null==e?void 0:e[t]}},22032:(e,t,n)=>{var r=n(81042);e.exports=function(){this.__data__=r?r(null):{},this.size=0}},63862:e=>{e.exports=function(e){var t=this.has(e)&&delete this.__data__[e];return this.size-=t?1:0,t}},66721:(e,t,n)=>{var r=n(81042),o=Object.prototype.hasOwnProperty;e.exports=function(e){var t=this.__data__;if(r){var n=t[e];return"__lodash_hash_undefined__"===n?void 0:n}return o.call(t,e)?t[e]:void 0}},12749:(e,t,n)=>{var r=n(81042),o=Object.prototype.hasOwnProperty;e.exports=function(e){var t=this.__data__;return r?void 0!==t[e]:o.call(t,e)}},35749:(e,t,n)=>{var r=n(81042);e.exports=function(e,t){var n=this.__data__;return this.size+=this.has(e)?0:1,n[e]=r&&void 0===t?"__lodash_hash_undefined__":t,this}},76189:e=>{var t=Object.prototype.hasOwnProperty;e.exports=function(e){var n=e.length,r=new e.constructor(n);return n&&"string"==typeof e[0]&&t.call(e,"index")&&(r.index=e.index,r.input=e.input),r}},77199:(e,t,n)=>{var r=n(49653),o=n(76169),i=n(73201),a=n(93736),s=n(71961);e.exports=function(e,t,n){var u=e.constructor;switch(t){case"[object ArrayBuffer]":return r(e);case"[object Boolean]":case"[object Date]":return new u(+e);case"[object DataView]":return o(e,n);case"[object Float32Array]":case"[object Float64Array]":case"[object Int8Array]":case"[object Int16Array]":case"[object Int32Array]":case"[object Uint8Array]":case"[object Uint8ClampedArray]":case"[object Uint16Array]":case"[object Uint32Array]":return s(e,n);case"[object Map]":case"[object Set]":return new u;case"[object Number]":case"[object String]":return new u(e);case"[object RegExp]":return i(e);case"[object Symbol]":return a(e)}}},35529:(e,t,n)=>{var r=n(39344),o=n(28879),i=n(55527);e.exports=function(e){return"function"!=typeof e.constructor||i(e)?{}:r(o(e))}},45891:(e,t,n)=>{var r=n(51873),o=n(72428),i=n(56449),a=r?r.isConcatSpreadable:void 0;e.exports=function(e){return i(e)||o(e)||!!(a&&e&&e[a])}},30361:e=>{var t=/^(?:0|[1-9]\d*)$/;e.exports=function(e,n){var r=typeof e;return!!(n=null==n?9007199254740991:n)&&("number"==r||"symbol"!=r&&t.test(e))&&e>-1&&e%1==0&&e{var r=n(75288),o=n(64894),i=n(30361),a=n(23805);e.exports=function(e,t,n){if(!a(n))return!1;var s=typeof t;return!!("number"==s?o(n)&&i(t,n.length):"string"==s&&t in n)&&r(n[t],e)}},28586:(e,t,n)=>{var r=n(56449),o=n(44394),i=/\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/,a=/^\w*$/;e.exports=function(e,t){if(r(e))return!1;var n=typeof e;return!("number"!=n&&"symbol"!=n&&"boolean"!=n&&null!=e&&!o(e))||(a.test(e)||!i.test(e)||null!=t&&e in Object(t))}},74218:e=>{e.exports=function(e){var t=typeof e;return"string"==t||"number"==t||"symbol"==t||"boolean"==t?"__proto__"!==e:null===e}},85087:(e,t,n)=>{var r=n(30980),o=n(37381),i=n(62284),a=n(53758);e.exports=function(e){var t=i(e),n=a[t];if("function"!=typeof n||!(t in r.prototype))return!1;if(e===n)return!0;var s=o(n);return!!s&&e===s[0]}},87296:(e,t,n)=>{var r=n(55481),o=function(){var e=/[^.]+$/.exec(r&&r.keys&&r.keys.IE_PROTO||"");return e?"Symbol(src)_1."+e:""}();e.exports=function(e){return!!o&&o in e}},55527:e=>{var t=Object.prototype;e.exports=function(e){var n=e&&e.constructor;return e===("function"==typeof n&&n.prototype||t)}},63702:e=>{e.exports=function(){this.__data__=[],this.size=0}},70080:(e,t,n)=>{var r=n(26025),o=Array.prototype.splice;e.exports=function(e){var t=this.__data__,n=r(t,e);return!(n<0)&&(n==t.length-1?t.pop():o.call(t,n,1),--this.size,!0)}},24739:(e,t,n)=>{var r=n(26025);e.exports=function(e){var t=this.__data__,n=r(t,e);return n<0?void 0:t[n][1]}},48655:(e,t,n)=>{var r=n(26025);e.exports=function(e){return r(this.__data__,e)>-1}},31175:(e,t,n)=>{var r=n(26025);e.exports=function(e,t){var n=this.__data__,o=r(n,e);return o<0?(++this.size,n.push([e,t])):n[o][1]=t,this}},63040:(e,t,n)=>{var r=n(21549),o=n(80079),i=n(68223);e.exports=function(){this.size=0,this.__data__={hash:new r,map:new(i||o),string:new r}}},17670:(e,t,n)=>{var r=n(12651);e.exports=function(e){var t=r(this,e).delete(e);return this.size-=t?1:0,t}},90289:(e,t,n)=>{var r=n(12651);e.exports=function(e){return r(this,e).get(e)}},4509:(e,t,n)=>{var r=n(12651);e.exports=function(e){return r(this,e).has(e)}},72949:(e,t,n)=>{var r=n(12651);e.exports=function(e,t){var n=r(this,e),o=n.size;return n.set(e,t),this.size+=n.size==o?0:1,this}},20317:e=>{e.exports=function(e){var t=-1,n=Array(e.size);return e.forEach((function(e,r){n[++t]=[r,e]})),n}},62224:(e,t,n)=>{var r=n(50104);e.exports=function(e){var t=r(e,(function(e){return 500===n.size&&n.clear(),e})),n=t.cache;return t}},48152:(e,t,n)=>{var r=n(28303),o=r&&new r;e.exports=o},81042:(e,t,n)=>{var r=n(56110)(Object,"create");e.exports=r},3650:(e,t,n)=>{var r=n(74335)(Object.keys,Object);e.exports=r},90181:e=>{e.exports=function(e){var t=[];if(null!=e)for(var n in Object(e))t.push(n);return t}},86009:(e,t,n)=>{e=n.nmd(e);var r=n(34840),o=t&&!t.nodeType&&t,i=o&&e&&!e.nodeType&&e,a=i&&i.exports===o&&r.process,s=function(){try{var e=i&&i.require&&i.require("util").types;return e||a&&a.binding&&a.binding("util")}catch(t){}}();e.exports=s},59350:e=>{var t=Object.prototype.toString;e.exports=function(e){return t.call(e)}},74335:e=>{e.exports=function(e,t){return function(n){return e(t(n))}}},56757:(e,t,n)=>{var r=n(91033),o=Math.max;e.exports=function(e,t,n){return t=o(void 0===t?e.length-1:t,0),function(){for(var i=arguments,a=-1,s=o(i.length-t,0),u=Array(s);++a{var r=n(47422),o=n(25160);e.exports=function(e,t){return t.length<2?e:r(e,o(t,0,-1))}},84629:e=>{e.exports={}},9325:(e,t,n)=>{var r=n(34840),o="object"==typeof self&&self&&self.Object===Object&&self,i=r||o||Function("return this")();e.exports=i},14974:e=>{e.exports=function(e,t){if(("constructor"!==t||"function"!==typeof e[t])&&"__proto__"!=t)return e[t]}},31380:e=>{e.exports=function(e){return this.__data__.set(e,"__lodash_hash_undefined__"),this}},51459:e=>{e.exports=function(e){return this.__data__.has(e)}},84247:e=>{e.exports=function(e){var t=-1,n=Array(e.size);return e.forEach((function(e){n[++t]=e})),n}},32865:(e,t,n)=>{var r=n(19570),o=n(51811)(r);e.exports=o},51811:e=>{var t=Date.now;e.exports=function(e){var n=0,r=0;return function(){var o=t(),i=16-(o-r);if(r=o,i>0){if(++n>=800)return arguments[0]}else n=0;return e.apply(void 0,arguments)}}},51420:(e,t,n)=>{var r=n(80079);e.exports=function(){this.__data__=new r,this.size=0}},90938:e=>{e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},63605:e=>{e.exports=function(e){return this.__data__.get(e)}},29817:e=>{e.exports=function(e){return this.__data__.has(e)}},80945:(e,t,n)=>{var r=n(80079),o=n(68223),i=n(53661);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!o||a.length<199)return a.push([e,t]),this.size=++n.size,this;n=this.__data__=new i(a)}return n.set(e,t),this.size=n.size,this}},76959:e=>{e.exports=function(e,t,n){for(var r=n-1,o=e.length;++r{var r=n(62224),o=/[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g,i=/\\(\\)?/g,a=r((function(e){var t=[];return 46===e.charCodeAt(0)&&t.push(""),e.replace(o,(function(e,n,r,o){t.push(r?o.replace(i,"$1"):n||e)})),t}));e.exports=a},77797:(e,t,n)=>{var r=n(44394);e.exports=function(e){if("string"==typeof e||r(e))return e;var t=e+"";return"0"==t&&1/e==-1/0?"-0":t}},47473:e=>{var t=Function.prototype.toString;e.exports=function(e){if(null!=e){try{return t.call(e)}catch(n){}try{return e+""}catch(n){}}return""}},80257:(e,t,n)=>{var r=n(30980),o=n(56017),i=n(23007);e.exports=function(e){if(e instanceof r)return e.clone();var t=new o(e.__wrapped__,e.__chain__);return t.__actions__=i(e.__actions__),t.__index__=e.__index__,t.__values__=e.__values__,t}},37334:e=>{e.exports=function(e){return function(){return e}}},66245:(e,t,n)=>{var r=n(83915),o=n(83120),i=n(69302),a=n(83693),s=i((function(e,t){return a(e)?r(e,o(t,1,a,!0)):[]}));e.exports=s},75288:e=>{e.exports=function(e,t){return e===t||e!==e&&t!==t}},35970:(e,t,n)=>{var r=n(83120);e.exports=function(e){return(null==e?0:e.length)?r(e,1):[]}},49870:(e,t,n)=>{var r=n(66929)();e.exports=r},58156:(e,t,n)=>{var r=n(47422);e.exports=function(e,t,n){var o=null==e?void 0:r(e,t);return void 0===o?n:o}},83488:e=>{e.exports=function(e){return e}},72428:(e,t,n)=>{var r=n(27534),o=n(40346),i=Object.prototype,a=i.hasOwnProperty,s=i.propertyIsEnumerable,u=r(function(){return arguments}())?r:function(e){return o(e)&&a.call(e,"callee")&&!s.call(e,"callee")};e.exports=u},56449:e=>{var t=Array.isArray;e.exports=t},64894:(e,t,n)=>{var r=n(1882),o=n(30294);e.exports=function(e){return null!=e&&o(e.length)&&!r(e)}},83693:(e,t,n)=>{var r=n(64894),o=n(40346);e.exports=function(e){return o(e)&&r(e)}},3656:(e,t,n)=>{e=n.nmd(e);var r=n(9325),o=n(89935),i=t&&!t.nodeType&&t,a=i&&e&&!e.nodeType&&e,s=a&&a.exports===i?r.Buffer:void 0,u=(s?s.isBuffer:void 0)||o;e.exports=u},2404:(e,t,n)=>{var r=n(60270);e.exports=function(e,t){return r(e,t)}},1882:(e,t,n)=>{var r=n(72552),o=n(23805);e.exports=function(e){if(!o(e))return!1;var t=r(e);return"[object Function]"==t||"[object GeneratorFunction]"==t||"[object AsyncFunction]"==t||"[object Proxy]"==t}},30294:e=>{e.exports=function(e){return"number"==typeof e&&e>-1&&e%1==0&&e<=9007199254740991}},87730:(e,t,n)=>{var r=n(29172),o=n(27301),i=n(86009),a=i&&i.isMap,s=a?o(a):r;e.exports=s},23805:e=>{e.exports=function(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)}},40346:e=>{e.exports=function(e){return null!=e&&"object"==typeof e}},11331:(e,t,n)=>{var r=n(72552),o=n(28879),i=n(40346),a=Function.prototype,s=Object.prototype,u=a.toString,l=s.hasOwnProperty,c=u.call(Object);e.exports=function(e){if(!i(e)||"[object Object]"!=r(e))return!1;var t=o(e);if(null===t)return!0;var n=l.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&u.call(n)==c}},38440:(e,t,n)=>{var r=n(16038),o=n(27301),i=n(86009),a=i&&i.isSet,s=a?o(a):r;e.exports=s},44394:(e,t,n)=>{var r=n(72552),o=n(40346);e.exports=function(e){return"symbol"==typeof e||o(e)&&"[object Symbol]"==r(e)}},37167:(e,t,n)=>{var r=n(4901),o=n(27301),i=n(86009),a=i&&i.isTypedArray,s=a?o(a):r;e.exports=s},95950:(e,t,n)=>{var r=n(70695),o=n(88984),i=n(64894);e.exports=function(e){return i(e)?r(e):o(e)}},37241:(e,t,n)=>{var r=n(70695),o=n(72903),i=n(64894);e.exports=function(e){return i(e)?r(e,!0):o(e)}},68090:e=>{e.exports=function(e){var t=null==e?0:e.length;return t?e[t-1]:void 0}},50104:(e,t,n)=>{var r=n(53661);function o(e,t){if("function"!=typeof e||null!=t&&"function"!=typeof t)throw new TypeError("Expected a function");var n=function(){var r=arguments,o=t?t.apply(this,r):r[0],i=n.cache;if(i.has(o))return i.get(o);var a=e.apply(this,r);return n.cache=i.set(o,a)||i,a};return n.cache=new(o.Cache||r),n}o.Cache=r,e.exports=o},55364:(e,t,n)=>{var r=n(85250),o=n(20999)((function(e,t,n){r(e,t,n)}));e.exports=o},63950:e=>{e.exports=function(){}},90179:(e,t,n)=>{var r=n(34932),o=n(9999),i=n(19931),a=n(31769),s=n(21791),u=n(53138),l=n(38816),c=n(83349),f=l((function(e,t){var n={};if(null==e)return n;var l=!1;t=r(t,(function(t){return t=a(t,e),l||(l=t.length>1),t})),s(e,c(e),n),l&&(n=o(n,7,u));for(var f=t.length;f--;)i(n,t[f]);return n}));e.exports=f},63345:e=>{e.exports=function(){return[]}},89935:e=>{e.exports=function(){return!1}},69884:(e,t,n)=>{var r=n(21791),o=n(37241);e.exports=function(e){return r(e,o(e))}},13222:(e,t,n)=>{var r=n(77556);e.exports=function(e){return null==e?"":r(e)}},53758:(e,t,n)=>{var r=n(30980),o=n(56017),i=n(94033),a=n(56449),s=n(40346),u=n(80257),l=Object.prototype.hasOwnProperty;function c(e){if(s(e)&&!a(e)&&!(e instanceof r)){if(e instanceof o)return e;if(l.call(e,"__wrapped__"))return u(e)}return new o(e)}c.prototype=i.prototype,c.prototype.constructor=c,e.exports=c},83503:(e,t,n)=>{!function(){var t=n(3939),r=n(92151).utf8,o=n(87206),i=n(92151).bin,a=function(e,n){e.constructor==String?e=n&&"binary"===n.encoding?i.stringToBytes(e):r.stringToBytes(e):o(e)?e=Array.prototype.slice.call(e,0):Array.isArray(e)||e.constructor===Uint8Array||(e=e.toString());for(var s=t.bytesToWords(e),u=8*e.length,l=1732584193,c=-271733879,f=-1732584194,d=271733878,h=0;h>>24)|4278255360&(s[h]<<24|s[h]>>>8);s[u>>>5]|=128<>>9<<4)]=u;var p=a._ff,g=a._gg,v=a._hh,y=a._ii;for(h=0;h>>0,c=c+b>>>0,f=f+_>>>0,d=d+w>>>0}return t.endian([l,c,f,d])};a._ff=function(e,t,n,r,o,i,a){var s=e+(t&n|~t&r)+(o>>>0)+a;return(s<>>32-i)+t},a._gg=function(e,t,n,r,o,i,a){var s=e+(t&r|n&~r)+(o>>>0)+a;return(s<>>32-i)+t},a._hh=function(e,t,n,r,o,i,a){var s=e+(t^n^r)+(o>>>0)+a;return(s<>>32-i)+t},a._ii=function(e,t,n,r,o,i,a){var s=e+(n^(t|~r))+(o>>>0)+a;return(s<>>32-i)+t},a._blocksize=16,a._digestsize=16,e.exports=function(e,n){if(void 0===e||null===e)throw new Error("Illegal argument "+e);var r=t.wordsToBytes(a(e,n));return n&&n.asBytes?r:n&&n.asString?i.bytesToString(r):t.bytesToHex(r)}}()},45228:e=>{"use strict";var t=Object.getOwnPropertySymbols,n=Object.prototype.hasOwnProperty,r=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map((function(e){return t[e]})).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach((function(e){r[e]=e})),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(o){return!1}}()?Object.assign:function(e,o){for(var i,a,s=function(e){if(null===e||void 0===e)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(e)}(e),u=1;u{var r="function"===typeof Map&&Map.prototype,o=Object.getOwnPropertyDescriptor&&r?Object.getOwnPropertyDescriptor(Map.prototype,"size"):null,i=r&&o&&"function"===typeof o.get?o.get:null,a=r&&Map.prototype.forEach,s="function"===typeof Set&&Set.prototype,u=Object.getOwnPropertyDescriptor&&s?Object.getOwnPropertyDescriptor(Set.prototype,"size"):null,l=s&&u&&"function"===typeof u.get?u.get:null,c=s&&Set.prototype.forEach,f="function"===typeof WeakMap&&WeakMap.prototype?WeakMap.prototype.has:null,d="function"===typeof WeakSet&&WeakSet.prototype?WeakSet.prototype.has:null,h="function"===typeof WeakRef&&WeakRef.prototype?WeakRef.prototype.deref:null,p=Boolean.prototype.valueOf,g=Object.prototype.toString,v=Function.prototype.toString,y=String.prototype.match,m=String.prototype.slice,b=String.prototype.replace,_=String.prototype.toUpperCase,w=String.prototype.toLowerCase,x=RegExp.prototype.test,S=Array.prototype.concat,A=Array.prototype.join,E=Array.prototype.slice,C=Math.floor,R="function"===typeof BigInt?BigInt.prototype.valueOf:null,O=Object.getOwnPropertySymbols,T="function"===typeof Symbol&&"symbol"===typeof Symbol.iterator?Symbol.prototype.toString:null,k="function"===typeof Symbol&&"object"===typeof Symbol.iterator,M="function"===typeof Symbol&&Symbol.toStringTag&&(typeof Symbol.toStringTag===k||"symbol")?Symbol.toStringTag:null,P=Object.prototype.propertyIsEnumerable,I=("function"===typeof Reflect?Reflect.getPrototypeOf:Object.getPrototypeOf)||([].__proto__===Array.prototype?function(e){return e.__proto__}:null);function N(e,t){if(e===1/0||e===-1/0||e!==e||e&&e>-1e3&&e<1e3||x.call(/e/,t))return t;var n=/[0-9](?=(?:[0-9]{3})+(?![0-9]))/g;if("number"===typeof e){var r=e<0?-C(-e):C(e);if(r!==e){var o=String(r),i=m.call(t,o.length+1);return b.call(o,n,"$&_")+"."+b.call(b.call(i,/([0-9]{3})/g,"$&_"),/_$/,"")}}return b.call(t,n,"$&_")}var D=n(42634),L=D.custom,F=V(L)?L:null;function j(e,t,n){var r="double"===(n.quoteStyle||t)?'"':"'";return r+e+r}function U(e){return b.call(String(e),/"/g,""")}function z(e){return"[object Array]"===W(e)&&(!M||!("object"===typeof e&&M in e))}function B(e){return"[object RegExp]"===W(e)&&(!M||!("object"===typeof e&&M in e))}function V(e){if(k)return e&&"object"===typeof e&&e instanceof Symbol;if("symbol"===typeof e)return!0;if(!e||"object"!==typeof e||!T)return!1;try{return T.call(e),!0}catch(t){}return!1}e.exports=function e(t,r,o,s){var u=r||{};if(H(u,"quoteStyle")&&"single"!==u.quoteStyle&&"double"!==u.quoteStyle)throw new TypeError('option "quoteStyle" must be "single" or "double"');if(H(u,"maxStringLength")&&("number"===typeof u.maxStringLength?u.maxStringLength<0&&u.maxStringLength!==1/0:null!==u.maxStringLength))throw new TypeError('option "maxStringLength", if provided, must be a positive integer, Infinity, or `null`');var g=!H(u,"customInspect")||u.customInspect;if("boolean"!==typeof g&&"symbol"!==g)throw new TypeError("option \"customInspect\", if provided, must be `true`, `false`, or `'symbol'`");if(H(u,"indent")&&null!==u.indent&&"\t"!==u.indent&&!(parseInt(u.indent,10)===u.indent&&u.indent>0))throw new TypeError('option "indent" must be "\\t", an integer > 0, or `null`');if(H(u,"numericSeparator")&&"boolean"!==typeof u.numericSeparator)throw new TypeError('option "numericSeparator", if provided, must be `true` or `false`');var _=u.numericSeparator;if("undefined"===typeof t)return"undefined";if(null===t)return"null";if("boolean"===typeof t)return t?"true":"false";if("string"===typeof t)return G(t,u);if("number"===typeof t){if(0===t)return 1/0/t>0?"0":"-0";var x=String(t);return _?N(t,x):x}if("bigint"===typeof t){var C=String(t)+"n";return _?N(t,C):C}var O="undefined"===typeof u.depth?5:u.depth;if("undefined"===typeof o&&(o=0),o>=O&&O>0&&"object"===typeof t)return z(t)?"[Array]":"[Object]";var L=function(e,t){var n;if("\t"===e.indent)n="\t";else{if(!("number"===typeof e.indent&&e.indent>0))return null;n=A.call(Array(e.indent+1)," ")}return{base:n,prev:A.call(Array(t+1),n)}}(u,o);if("undefined"===typeof s)s=[];else if(q(s,t)>=0)return"[Circular]";function $(t,n,r){if(n&&(s=E.call(s)).push(n),r){var i={depth:u.depth};return H(u,"quoteStyle")&&(i.quoteStyle=u.quoteStyle),e(t,i,o+1,s)}return e(t,u,o+1,s)}if("function"===typeof t&&!B(t)){var Y=function(e){if(e.name)return e.name;var t=y.call(v.call(e),/^function\s*([\w$]+)/);if(t)return t[1];return null}(t),ee=J(t,$);return"[Function"+(Y?": "+Y:" (anonymous)")+"]"+(ee.length>0?" { "+A.call(ee,", ")+" }":"")}if(V(t)){var te=k?b.call(String(t),/^(Symbol\(.*\))_[^)]*$/,"$1"):T.call(t);return"object"!==typeof t||k?te:X(te)}if(function(e){if(!e||"object"!==typeof e)return!1;if("undefined"!==typeof HTMLElement&&e instanceof HTMLElement)return!0;return"string"===typeof e.nodeName&&"function"===typeof e.getAttribute}(t)){for(var ne="<"+w.call(String(t.nodeName)),re=t.attributes||[],oe=0;oe"}if(z(t)){if(0===t.length)return"[]";var ie=J(t,$);return L&&!function(e){for(var t=0;t=0)return!1;return!0}(ie)?"["+Q(ie,L)+"]":"[ "+A.call(ie,", ")+" ]"}if(function(e){return"[object Error]"===W(e)&&(!M||!("object"===typeof e&&M in e))}(t)){var ae=J(t,$);return"cause"in Error.prototype||!("cause"in t)||P.call(t,"cause")?0===ae.length?"["+String(t)+"]":"{ ["+String(t)+"] "+A.call(ae,", ")+" }":"{ ["+String(t)+"] "+A.call(S.call("[cause]: "+$(t.cause),ae),", ")+" }"}if("object"===typeof t&&g){if(F&&"function"===typeof t[F]&&D)return D(t,{depth:O-o});if("symbol"!==g&&"function"===typeof t.inspect)return t.inspect()}if(function(e){if(!i||!e||"object"!==typeof e)return!1;try{i.call(e);try{l.call(e)}catch(ne){return!0}return e instanceof Map}catch(t){}return!1}(t)){var se=[];return a&&a.call(t,(function(e,n){se.push($(n,t,!0)+" => "+$(e,t))})),Z("Map",i.call(t),se,L)}if(function(e){if(!l||!e||"object"!==typeof e)return!1;try{l.call(e);try{i.call(e)}catch(t){return!0}return e instanceof Set}catch(n){}return!1}(t)){var ue=[];return c&&c.call(t,(function(e){ue.push($(e,t))})),Z("Set",l.call(t),ue,L)}if(function(e){if(!f||!e||"object"!==typeof e)return!1;try{f.call(e,f);try{d.call(e,d)}catch(ne){return!0}return e instanceof WeakMap}catch(t){}return!1}(t))return K("WeakMap");if(function(e){if(!d||!e||"object"!==typeof e)return!1;try{d.call(e,d);try{f.call(e,f)}catch(ne){return!0}return e instanceof WeakSet}catch(t){}return!1}(t))return K("WeakSet");if(function(e){if(!h||!e||"object"!==typeof e)return!1;try{return h.call(e),!0}catch(t){}return!1}(t))return K("WeakRef");if(function(e){return"[object Number]"===W(e)&&(!M||!("object"===typeof e&&M in e))}(t))return X($(Number(t)));if(function(e){if(!e||"object"!==typeof e||!R)return!1;try{return R.call(e),!0}catch(t){}return!1}(t))return X($(R.call(t)));if(function(e){return"[object Boolean]"===W(e)&&(!M||!("object"===typeof e&&M in e))}(t))return X(p.call(t));if(function(e){return"[object String]"===W(e)&&(!M||!("object"===typeof e&&M in e))}(t))return X($(String(t)));if("undefined"!==typeof window&&t===window)return"{ [object Window] }";if(t===n.g)return"{ [object globalThis] }";if(!function(e){return"[object Date]"===W(e)&&(!M||!("object"===typeof e&&M in e))}(t)&&!B(t)){var le=J(t,$),ce=I?I(t)===Object.prototype:t instanceof Object||t.constructor===Object,fe=t instanceof Object?"":"null prototype",de=!ce&&M&&Object(t)===t&&M in t?m.call(W(t),8,-1):fe?"Object":"",he=(ce||"function"!==typeof t.constructor?"":t.constructor.name?t.constructor.name+" ":"")+(de||fe?"["+A.call(S.call([],de||[],fe||[]),": ")+"] ":"");return 0===le.length?he+"{}":L?he+"{"+Q(le,L)+"}":he+"{ "+A.call(le,", ")+" }"}return String(t)};var $=Object.prototype.hasOwnProperty||function(e){return e in this};function H(e,t){return $.call(e,t)}function W(e){return g.call(e)}function q(e,t){if(e.indexOf)return e.indexOf(t);for(var n=0,r=e.length;nt.maxStringLength){var n=e.length-t.maxStringLength,r="... "+n+" more character"+(n>1?"s":"");return G(m.call(e,0,t.maxStringLength),t)+r}return j(b.call(b.call(e,/(['\\])/g,"\\$1"),/[\x00-\x1f]/g,Y),"single",t)}function Y(e){var t=e.charCodeAt(0),n={8:"b",9:"t",10:"n",12:"f",13:"r"}[t];return n?"\\"+n:"\\x"+(t<16?"0":"")+_.call(t.toString(16))}function X(e){return"Object("+e+")"}function K(e){return e+" { ? }"}function Z(e,t,n,r){return e+" ("+t+") {"+(r?Q(n,r):A.call(n,", "))+"}"}function Q(e,t){if(0===e.length)return"";var n="\n"+t.prev+t.base;return n+A.call(e,","+n)+"\n"+t.prev}function J(e,t){var n=z(e),r=[];if(n){r.length=e.length;for(var o=0;o{"use strict";var t=function(e){return e!==e};e.exports=function(e,n){return 0===e&&0===n?1/e===1/n:e===n||!(!t(e)||!t(n))}},37653:(e,t,n)=>{"use strict";var r=n(38452),o=n(10487),i=n(89211),a=n(9394),s=n(36576),u=o(a(),Object);r(u,{getPolyfill:a,implementation:i,shim:s}),e.exports=u},9394:(e,t,n)=>{"use strict";var r=n(89211);e.exports=function(){return"function"===typeof Object.is?Object.is:r}},36576:(e,t,n)=>{"use strict";var r=n(9394),o=n(38452);e.exports=function(){var e=r();return o(Object,{is:e},{is:function(){return Object.is!==e}}),e}},28875:(e,t,n)=>{"use strict";var r;if(!Object.keys){var o=Object.prototype.hasOwnProperty,i=Object.prototype.toString,a=n(1093),s=Object.prototype.propertyIsEnumerable,u=!s.call({toString:null},"toString"),l=s.call((function(){}),"prototype"),c=["toString","toLocaleString","valueOf","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","constructor"],f=function(e){var t=e.constructor;return t&&t.prototype===e},d={$applicationCache:!0,$console:!0,$external:!0,$frame:!0,$frameElement:!0,$frames:!0,$innerHeight:!0,$innerWidth:!0,$onmozfullscreenchange:!0,$onmozfullscreenerror:!0,$outerHeight:!0,$outerWidth:!0,$pageXOffset:!0,$pageYOffset:!0,$parent:!0,$scrollLeft:!0,$scrollTop:!0,$scrollX:!0,$scrollY:!0,$self:!0,$webkitIndexedDB:!0,$webkitStorageInfo:!0,$window:!0},h=function(){if("undefined"===typeof window)return!1;for(var e in window)try{if(!d["$"+e]&&o.call(window,e)&&null!==window[e]&&"object"===typeof window[e])try{f(window[e])}catch(t){return!0}}catch(t){return!0}return!1}();r=function(e){var t=null!==e&&"object"===typeof e,n="[object Function]"===i.call(e),r=a(e),s=t&&"[object String]"===i.call(e),d=[];if(!t&&!n&&!r)throw new TypeError("Object.keys called on a non-object");var p=l&&n;if(s&&e.length>0&&!o.call(e,0))for(var g=0;g0)for(var v=0;v{"use strict";var r=Array.prototype.slice,o=n(1093),i=Object.keys,a=i?function(e){return i(e)}:n(28875),s=Object.keys;a.shim=function(){if(Object.keys){var e=function(){var e=Object.keys(arguments);return e&&e.length===arguments.length}(1,2);e||(Object.keys=function(e){return o(e)?s(r.call(e)):s(e)})}else Object.keys=a;return Object.keys||a},e.exports=a},1093:e=>{"use strict";var t=Object.prototype.toString;e.exports=function(e){var n=t.call(e),r="[object Arguments]"===n;return r||(r="[object Array]"!==n&&null!==e&&"object"===typeof e&&"number"===typeof e.length&&e.length>=0&&"[object Function]"===t.call(e.callee)),r}},13759:(e,t,n)=>{"use strict";n.r(t),n.d(t,{adjustHue:()=>Ke,animation:()=>wt,backgroundImages:()=>xt,backgrounds:()=>St,between:()=>W,border:()=>Et,borderColor:()=>Ct,borderRadius:()=>Rt,borderStyle:()=>Ot,borderWidth:()=>Tt,buttons:()=>Nt,clearFix:()=>q,complement:()=>Ze,cover:()=>G,cssVar:()=>_,darken:()=>Je,desaturate:()=>et,directionalProperty:()=>A,easeIn:()=>z,easeInOut:()=>V,easeOut:()=>H,ellipsis:()=>Y,em:()=>T,fluidRange:()=>Z,fontFace:()=>ie,getContrast:()=>nt,getLuminance:()=>tt,getValueAndUnit:()=>M,grayscale:()=>rt,hiDPI:()=>ue,hideText:()=>ae,hideVisually:()=>se,hsl:()=>Ue,hslToColorString:()=>ot,hsla:()=>ze,important:()=>P,invert:()=>it,lighten:()=>at,linearGradient:()=>ce,margin:()=>Dt,math:()=>m,meetsContrastGuidelines:()=>st,mix:()=>ut,modularScale:()=>N,normalize:()=>fe,opacify:()=>lt,padding:()=>Lt,parseToHsl:()=>Ie,parseToRgb:()=>Pe,position:()=>jt,radialGradient:()=>de,readableColor:()=>dt,rem:()=>D,remToPx:()=>j,retinaImage:()=>he,rgb:()=>Be,rgbToColorString:()=>ht,rgba:()=>Ve,saturate:()=>pt,setHue:()=>gt,setLightness:()=>vt,setSaturation:()=>yt,shade:()=>mt,size:()=>Ut,stripUnit:()=>R,textInputs:()=>Vt,timingFunctions:()=>ge,tint:()=>bt,toColorString:()=>Ge,transitions:()=>$t,transparentize:()=>_t,triangle:()=>me,wordWrap:()=>be});var r=n(58168),o=n(9417),i=n(77387),a=n(53954),s=n(63662);var u=n(52176);function l(e){var t="function"===typeof Map?new Map:void 0;return l=function(e){if(null===e||!function(e){try{return-1!==Function.toString.call(e).indexOf("[native code]")}catch(t){return"function"===typeof e}}(e))return e;if("function"!==typeof e)throw new TypeError("Super expression must either be null or a function");if("undefined"!==typeof t){if(t.has(e))return t.get(e);t.set(e,n)}function n(){return function(e,t,n){if((0,u.A)())return Reflect.construct.apply(null,arguments);var r=[null];r.push.apply(r,t);var o=new(e.bind.apply(e,r));return n&&(0,s.A)(o,n.prototype),o}(e,arguments,(0,a.A)(this).constructor)}return n.prototype=Object.create(e.prototype,{constructor:{value:n,enumerable:!1,writable:!0,configurable:!0}}),(0,s.A)(n,e)},l(e)}function c(e,t){return t||(t=e.slice(0)),e.raw=t,e}function f(){var e;return(e=arguments.length-1)<0||arguments.length<=e?void 0:arguments[e]}var d={symbols:{"*":{infix:{symbol:"*",f:function(e,t){return e*t},notation:"infix",precedence:4,rightToLeft:0,argCount:2},symbol:"*",regSymbol:"\\*"},"/":{infix:{symbol:"/",f:function(e,t){return e/t},notation:"infix",precedence:4,rightToLeft:0,argCount:2},symbol:"/",regSymbol:"/"},"+":{infix:{symbol:"+",f:function(e,t){return e+t},notation:"infix",precedence:2,rightToLeft:0,argCount:2},prefix:{symbol:"+",f:f,notation:"prefix",precedence:3,rightToLeft:0,argCount:1},symbol:"+",regSymbol:"\\+"},"-":{infix:{symbol:"-",f:function(e,t){return e-t},notation:"infix",precedence:2,rightToLeft:0,argCount:2},prefix:{symbol:"-",f:function(e){return-e},notation:"prefix",precedence:3,rightToLeft:0,argCount:1},symbol:"-",regSymbol:"-"},",":{infix:{symbol:",",f:function(){return Array.of.apply(Array,arguments)},notation:"infix",precedence:1,rightToLeft:0,argCount:2},symbol:",",regSymbol:","},"(":{prefix:{symbol:"(",f:f,notation:"prefix",precedence:0,rightToLeft:0,argCount:1},symbol:"(",regSymbol:"\\("},")":{postfix:{symbol:")",f:void 0,notation:"postfix",precedence:0,rightToLeft:0,argCount:1},symbol:")",regSymbol:"\\)"},min:{func:{symbol:"min",f:function(){return Math.min.apply(Math,arguments)},notation:"func",precedence:0,rightToLeft:0,argCount:1},symbol:"min",regSymbol:"min\\b"},max:{func:{symbol:"max",f:function(){return Math.max.apply(Math,arguments)},notation:"func",precedence:0,rightToLeft:0,argCount:1},symbol:"max",regSymbol:"max\\b"}}};var h=function(e){function t(t){var n;return n=e.call(this,"An error occurred. See https://github.com/styled-components/polished/blob/main/src/internalHelpers/errors.md#"+t+" for more information.")||this,(0,o.A)(n)}return(0,i.A)(t,e),t}(l(Error)),p=/((?!\w)a|na|hc|mc|dg|me[r]?|xe|ni(?![a-zA-Z])|mm|cp|tp|xp|q(?!s)|hv|xamv|nimv|wv|sm|s(?!\D|$)|ged|darg?|nrut)/g;function g(e,t){var n,r=e.pop();return t.push(r.f.apply(r,(n=[]).concat.apply(n,t.splice(-r.argCount)))),r.precedence}function v(e,t){var n,o=function(e){var t={};return t.symbols=e?(0,r.A)({},d.symbols,e.symbols):(0,r.A)({},d.symbols),t}(t),i=[o.symbols["("].prefix],a=[],s=new RegExp("\\d+(?:\\.\\d+)?|"+Object.keys(o.symbols).map((function(e){return o.symbols[e]})).sort((function(e,t){return t.symbol.length-e.symbol.length})).map((function(e){return e.regSymbol})).join("|")+"|(\\S)","g");s.lastIndex=0;var u=!1;do{var l=(n=s.exec(e))||[")",void 0],c=l[0],f=l[1],p=o.symbols[c],v=p&&!p.prefix&&!p.func,y=!p||!p.postfix&&!p.infix;if(f||(u?y:v))throw new h(37,n?n.index:e.length,e);if(u){var m=p.postfix||p.infix;do{var b=i[i.length-1];if((m.precedence-b.precedence||b.rightToLeft)>0)break}while(g(i,a));u="postfix"===m.notation,")"!==m.symbol&&(i.push(m),u&&g(i,a))}else if(p){if(i.push(p.prefix||p.func),p.func&&(!(n=s.exec(e))||"("!==n[0]))throw new h(38,n?n.index:e.length,e)}else a.push(+c),u=!0}while(n&&i.length);if(i.length)throw new h(39,n?n.index:e.length,e);if(n)throw new h(40,n?n.index:e.length,e);return a.pop()}function y(e){return e.split("").reverse().join("")}function m(e,t){var n=y(e),r=n.match(p);if(r&&!r.every((function(e){return e===r[0]})))throw new h(41);return""+v(y(n.replace(p,"")),t)+(r?y(r[0]):"")}var b=/--[\S]*/g;function _(e,t){if(!e||!e.match(b))throw new h(73);var n;if("undefined"!==typeof document&&null!==document.documentElement&&(n=getComputedStyle(document.documentElement).getPropertyValue(e)),n)return n.trim();if(t)return t;throw new h(74)}function w(e){return e.charAt(0).toUpperCase()+e.slice(1)}var x=["Top","Right","Bottom","Left"];function S(e,t){if(!e)return t.toLowerCase();var n=e.split("-");if(n.length>1)return n.splice(1,0,t),n.reduce((function(e,t){return""+e+w(t)}));var r=e.replace(/([a-z])([A-Z])/g,"$1"+t+"$2");return e===r?""+e+t:r}function A(e){for(var t=arguments.length,n=new Array(t>1?t-1:0),r=1;r=0)?n[r]=e[r]+" !important":n[r]=e[r]})),n}var I={minorSecond:1.067,majorSecond:1.125,minorThird:1.2,majorThird:1.25,perfectFourth:1.333,augFourth:1.414,perfectFifth:1.5,minorSixth:1.6,goldenSection:1.618,majorSixth:1.667,minorSeventh:1.778,majorSeventh:1.875,octave:2,majorTenth:2.5,majorEleventh:2.667,majorTwelfth:3,doubleOctave:4};function N(e,t,n){if(void 0===t&&(t="1em"),void 0===n&&(n=1.333),"number"!==typeof e)throw new h(42);if("string"===typeof n&&!I[n])throw new h(43);var r="string"===typeof t?M(t):[t,""],o=r[0],i=r[1],a="string"===typeof n?I[n]:n;if("string"===typeof o)throw new h(44,t);return""+o*Math.pow(a,e)+(i||"")}var D=O("rem"),L=16;function F(e){var t=M(e);if("px"===t[1])return parseFloat(e);if("%"===t[1])return parseFloat(e)/100*L;throw new h(78,t[1])}function j(e,t){var n=M(e);if("rem"!==n[1]&&""!==n[1])throw new h(77,n[1]);var r=t?F(t):function(){if("undefined"!==typeof document&&null!==document.documentElement){var e=getComputedStyle(document.documentElement).fontSize;return e?F(e):L}return L}();return n[0]*r+"px"}var U={back:"cubic-bezier(0.600, -0.280, 0.735, 0.045)",circ:"cubic-bezier(0.600, 0.040, 0.980, 0.335)",cubic:"cubic-bezier(0.550, 0.055, 0.675, 0.190)",expo:"cubic-bezier(0.950, 0.050, 0.795, 0.035)",quad:"cubic-bezier(0.550, 0.085, 0.680, 0.530)",quart:"cubic-bezier(0.895, 0.030, 0.685, 0.220)",quint:"cubic-bezier(0.755, 0.050, 0.855, 0.060)",sine:"cubic-bezier(0.470, 0.000, 0.745, 0.715)"};function z(e){return U[e.toLowerCase().trim()]}var B={back:"cubic-bezier(0.680, -0.550, 0.265, 1.550)",circ:"cubic-bezier(0.785, 0.135, 0.150, 0.860)",cubic:"cubic-bezier(0.645, 0.045, 0.355, 1.000)",expo:"cubic-bezier(1.000, 0.000, 0.000, 1.000)",quad:"cubic-bezier(0.455, 0.030, 0.515, 0.955)",quart:"cubic-bezier(0.770, 0.000, 0.175, 1.000)",quint:"cubic-bezier(0.860, 0.000, 0.070, 1.000)",sine:"cubic-bezier(0.445, 0.050, 0.550, 0.950)"};function V(e){return B[e.toLowerCase().trim()]}var $={back:"cubic-bezier(0.175, 0.885, 0.320, 1.275)",cubic:"cubic-bezier(0.215, 0.610, 0.355, 1.000)",circ:"cubic-bezier(0.075, 0.820, 0.165, 1.000)",expo:"cubic-bezier(0.190, 1.000, 0.220, 1.000)",quad:"cubic-bezier(0.250, 0.460, 0.450, 0.940)",quart:"cubic-bezier(0.165, 0.840, 0.440, 1.000)",quint:"cubic-bezier(0.230, 1.000, 0.320, 1.000)",sine:"cubic-bezier(0.390, 0.575, 0.565, 1.000)"};function H(e){return $[e.toLowerCase().trim()]}function W(e,t,n,r){void 0===n&&(n="320px"),void 0===r&&(r="1200px");var o=M(e),i=o[0],a=o[1],s=M(t),u=s[0],l=s[1],c=M(n),f=c[0],d=c[1],p=M(r),g=p[0],v=p[1];if("number"!==typeof f||"number"!==typeof g||!d||!v||d!==v)throw new h(47);if("number"!==typeof i||"number"!==typeof u||a!==l)throw new h(48);if(a!==d||l!==v)throw new h(76);var y=(i-u)/(f-g);return"calc("+(u-y*g).toFixed(2)+(a||"")+" + "+(100*y).toFixed(2)+"vw)"}function q(e){var t;return void 0===e&&(e="&"),(t={})[e+"::after"]={clear:"both",content:'""',display:"table"},t}function G(e){return void 0===e&&(e=0),{position:"absolute",top:e,right:e,bottom:e,left:e}}function Y(e,t){void 0===t&&(t=1);var n={display:"inline-block",maxWidth:e||"100%",overflow:"hidden",textOverflow:"ellipsis",whiteSpace:"nowrap",wordWrap:"normal"};return t>1?(0,r.A)({},n,{WebkitBoxOrient:"vertical",WebkitLineClamp:t,display:"-webkit-box",whiteSpace:"normal"}):n}function X(e,t){var n="undefined"!==typeof Symbol&&e[Symbol.iterator]||e["@@iterator"];if(n)return(n=n.call(e)).next.bind(n);if(Array.isArray(e)||(n=function(e,t){if(!e)return;if("string"===typeof e)return K(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);"Object"===n&&e.constructor&&(n=e.constructor.name);if("Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return K(e,t)}(e))||t&&e&&"number"===typeof e.length){n&&(e=n);var r=0;return function(){return r>=e.length?{done:!0}:{done:!1,value:e[r++]}}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}function K(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n1?n-1:0),o=1;o1?(t=t.slice(0,-1),t+=", "+r[i]):1===a.length&&(t+=""+r[i])}else r[i]&&(t+=r[i]+" ");return t.trim()}function ce(e){var t=e.colorStops,n=e.fallback,r=e.toDirection,o=void 0===r?"":r;if(!t||t.length<2)throw new h(56);return{backgroundColor:n||t[0].replace(/,\s+/g,",").split(" ")[0].replace(/,(?=\S)/g,", "),backgroundImage:le(Q||(Q=c(["linear-gradient(","",")"])),o,t.join(", ").replace(/,(?=\S)/g,", "))}}function fe(){var e;return[(e={html:{lineHeight:"1.15",textSizeAdjust:"100%"},body:{margin:"0"},main:{display:"block"},h1:{fontSize:"2em",margin:"0.67em 0"},hr:{boxSizing:"content-box",height:"0",overflow:"visible"},pre:{fontFamily:"monospace, monospace",fontSize:"1em"},a:{backgroundColor:"transparent"},"abbr[title]":{borderBottom:"none",textDecoration:"underline"}},e["b,\n strong"]={fontWeight:"bolder"},e["code,\n kbd,\n samp"]={fontFamily:"monospace, monospace",fontSize:"1em"},e.small={fontSize:"80%"},e["sub,\n sup"]={fontSize:"75%",lineHeight:"0",position:"relative",verticalAlign:"baseline"},e.sub={bottom:"-0.25em"},e.sup={top:"-0.5em"},e.img={borderStyle:"none"},e["button,\n input,\n optgroup,\n select,\n textarea"]={fontFamily:"inherit",fontSize:"100%",lineHeight:"1.15",margin:"0"},e["button,\n input"]={overflow:"visible"},e["button,\n select"]={textTransform:"none"},e['button,\n html [type="button"],\n [type="reset"],\n [type="submit"]']={WebkitAppearance:"button"},e['button::-moz-focus-inner,\n [type="button"]::-moz-focus-inner,\n [type="reset"]::-moz-focus-inner,\n [type="submit"]::-moz-focus-inner']={borderStyle:"none",padding:"0"},e['button:-moz-focusring,\n [type="button"]:-moz-focusring,\n [type="reset"]:-moz-focusring,\n [type="submit"]:-moz-focusring']={outline:"1px dotted ButtonText"},e.fieldset={padding:"0.35em 0.625em 0.75em"},e.legend={boxSizing:"border-box",color:"inherit",display:"table",maxWidth:"100%",padding:"0",whiteSpace:"normal"},e.progress={verticalAlign:"baseline"},e.textarea={overflow:"auto"},e['[type="checkbox"],\n [type="radio"]']={boxSizing:"border-box",padding:"0"},e['[type="number"]::-webkit-inner-spin-button,\n [type="number"]::-webkit-outer-spin-button']={height:"auto"},e['[type="search"]']={WebkitAppearance:"textfield",outlineOffset:"-2px"},e['[type="search"]::-webkit-search-decoration']={WebkitAppearance:"none"},e["::-webkit-file-upload-button"]={WebkitAppearance:"button",font:"inherit"},e.details={display:"block"},e.summary={display:"list-item"},e.template={display:"none"},e["[hidden]"]={display:"none"},e),{"abbr[title]":{textDecoration:"underline dotted"}}]}function de(e){var t=e.colorStops,n=e.extent,r=void 0===n?"":n,o=e.fallback,i=e.position,a=void 0===i?"":i,s=e.shape,u=void 0===s?"":s;if(!t||t.length<2)throw new h(57);return{backgroundColor:o||t[0].split(" ")[0],backgroundImage:le(J||(J=c(["radial-gradient(","","","",")"])),a,u,r,t.join(", "))}}function he(e,t,n,o,i){var a;if(void 0===n&&(n="png"),void 0===i&&(i="_2x"),!e)throw new h(58);var s=n.replace(/^\./,""),u=o?o+"."+s:""+e+i+"."+s;return(a={backgroundImage:"url("+e+"."+s+")"})[ue()]=(0,r.A)({backgroundImage:"url("+u+")"},t?{backgroundSize:t}:{}),a}var pe={easeInBack:"cubic-bezier(0.600, -0.280, 0.735, 0.045)",easeInCirc:"cubic-bezier(0.600, 0.040, 0.980, 0.335)",easeInCubic:"cubic-bezier(0.550, 0.055, 0.675, 0.190)",easeInExpo:"cubic-bezier(0.950, 0.050, 0.795, 0.035)",easeInQuad:"cubic-bezier(0.550, 0.085, 0.680, 0.530)",easeInQuart:"cubic-bezier(0.895, 0.030, 0.685, 0.220)",easeInQuint:"cubic-bezier(0.755, 0.050, 0.855, 0.060)",easeInSine:"cubic-bezier(0.470, 0.000, 0.745, 0.715)",easeOutBack:"cubic-bezier(0.175, 0.885, 0.320, 1.275)",easeOutCubic:"cubic-bezier(0.215, 0.610, 0.355, 1.000)",easeOutCirc:"cubic-bezier(0.075, 0.820, 0.165, 1.000)",easeOutExpo:"cubic-bezier(0.190, 1.000, 0.220, 1.000)",easeOutQuad:"cubic-bezier(0.250, 0.460, 0.450, 0.940)",easeOutQuart:"cubic-bezier(0.165, 0.840, 0.440, 1.000)",easeOutQuint:"cubic-bezier(0.230, 1.000, 0.320, 1.000)",easeOutSine:"cubic-bezier(0.390, 0.575, 0.565, 1.000)",easeInOutBack:"cubic-bezier(0.680, -0.550, 0.265, 1.550)",easeInOutCirc:"cubic-bezier(0.785, 0.135, 0.150, 0.860)",easeInOutCubic:"cubic-bezier(0.645, 0.045, 0.355, 1.000)",easeInOutExpo:"cubic-bezier(1.000, 0.000, 0.000, 1.000)",easeInOutQuad:"cubic-bezier(0.455, 0.030, 0.515, 0.955)",easeInOutQuart:"cubic-bezier(0.770, 0.000, 0.175, 1.000)",easeInOutQuint:"cubic-bezier(0.860, 0.000, 0.070, 1.000)",easeInOutSine:"cubic-bezier(0.445, 0.050, 0.550, 0.950)"};function ge(e){return pe[e]}var ve=function(e,t,n){var r=""+n[0]+(n[1]||""),o=""+n[0]/2+(n[1]||""),i=""+t[0]+(t[1]||""),a=""+t[0]/2+(t[1]||"");switch(e){case"top":return"0 "+o+" "+i+" "+o;case"topLeft":return r+" "+i+" 0 0";case"left":return a+" "+r+" "+a+" 0";case"bottomLeft":return r+" 0 0 "+i;case"bottom":return i+" "+o+" 0 "+o;case"bottomRight":return"0 0 "+r+" "+i;case"right":return a+" 0 "+a+" "+r;default:return"0 "+r+" "+i+" 0"}},ye=function(e,t){switch(e){case"top":case"bottomRight":return{borderBottomColor:t};case"right":case"bottomLeft":return{borderLeftColor:t};case"bottom":case"topLeft":return{borderTopColor:t};case"left":case"topRight":return{borderRightColor:t};default:throw new h(59)}};function me(e){var t=e.pointingDirection,n=e.height,o=e.width,i=e.foregroundColor,a=e.backgroundColor,s=void 0===a?"transparent":a,u=M(o),l=M(n);if(isNaN(l[0])||isNaN(u[0]))throw new h(60);return(0,r.A)({width:"0",height:"0",borderColor:s},ye(t,i),{borderStyle:"solid",borderWidth:ve(t,l,u)})}function be(e){return void 0===e&&(e="break-word"),{overflowWrap:e,wordWrap:e,wordBreak:"break-word"===e?"break-all":e}}function _e(e){return Math.round(255*e)}function we(e,t,n){return _e(e)+","+_e(t)+","+_e(n)}function xe(e,t,n,r){if(void 0===r&&(r=we),0===t)return r(n,n,n);var o=(e%360+360)%360/60,i=(1-Math.abs(2*n-1))*t,a=i*(1-Math.abs(o%2-1)),s=0,u=0,l=0;o>=0&&o<1?(s=i,u=a):o>=1&&o<2?(s=a,u=i):o>=2&&o<3?(u=i,l=a):o>=3&&o<4?(u=a,l=i):o>=4&&o<5?(s=a,l=i):o>=5&&o<6&&(s=i,l=a);var c=n-i/2;return r(s+c,u+c,l+c)}var Se={aliceblue:"f0f8ff",antiquewhite:"faebd7",aqua:"00ffff",aquamarine:"7fffd4",azure:"f0ffff",beige:"f5f5dc",bisque:"ffe4c4",black:"000",blanchedalmond:"ffebcd",blue:"0000ff",blueviolet:"8a2be2",brown:"a52a2a",burlywood:"deb887",cadetblue:"5f9ea0",chartreuse:"7fff00",chocolate:"d2691e",coral:"ff7f50",cornflowerblue:"6495ed",cornsilk:"fff8dc",crimson:"dc143c",cyan:"00ffff",darkblue:"00008b",darkcyan:"008b8b",darkgoldenrod:"b8860b",darkgray:"a9a9a9",darkgreen:"006400",darkgrey:"a9a9a9",darkkhaki:"bdb76b",darkmagenta:"8b008b",darkolivegreen:"556b2f",darkorange:"ff8c00",darkorchid:"9932cc",darkred:"8b0000",darksalmon:"e9967a",darkseagreen:"8fbc8f",darkslateblue:"483d8b",darkslategray:"2f4f4f",darkslategrey:"2f4f4f",darkturquoise:"00ced1",darkviolet:"9400d3",deeppink:"ff1493",deepskyblue:"00bfff",dimgray:"696969",dimgrey:"696969",dodgerblue:"1e90ff",firebrick:"b22222",floralwhite:"fffaf0",forestgreen:"228b22",fuchsia:"ff00ff",gainsboro:"dcdcdc",ghostwhite:"f8f8ff",gold:"ffd700",goldenrod:"daa520",gray:"808080",green:"008000",greenyellow:"adff2f",grey:"808080",honeydew:"f0fff0",hotpink:"ff69b4",indianred:"cd5c5c",indigo:"4b0082",ivory:"fffff0",khaki:"f0e68c",lavender:"e6e6fa",lavenderblush:"fff0f5",lawngreen:"7cfc00",lemonchiffon:"fffacd",lightblue:"add8e6",lightcoral:"f08080",lightcyan:"e0ffff",lightgoldenrodyellow:"fafad2",lightgray:"d3d3d3",lightgreen:"90ee90",lightgrey:"d3d3d3",lightpink:"ffb6c1",lightsalmon:"ffa07a",lightseagreen:"20b2aa",lightskyblue:"87cefa",lightslategray:"789",lightslategrey:"789",lightsteelblue:"b0c4de",lightyellow:"ffffe0",lime:"0f0",limegreen:"32cd32",linen:"faf0e6",magenta:"f0f",maroon:"800000",mediumaquamarine:"66cdaa",mediumblue:"0000cd",mediumorchid:"ba55d3",mediumpurple:"9370db",mediumseagreen:"3cb371",mediumslateblue:"7b68ee",mediumspringgreen:"00fa9a",mediumturquoise:"48d1cc",mediumvioletred:"c71585",midnightblue:"191970",mintcream:"f5fffa",mistyrose:"ffe4e1",moccasin:"ffe4b5",navajowhite:"ffdead",navy:"000080",oldlace:"fdf5e6",olive:"808000",olivedrab:"6b8e23",orange:"ffa500",orangered:"ff4500",orchid:"da70d6",palegoldenrod:"eee8aa",palegreen:"98fb98",paleturquoise:"afeeee",palevioletred:"db7093",papayawhip:"ffefd5",peachpuff:"ffdab9",peru:"cd853f",pink:"ffc0cb",plum:"dda0dd",powderblue:"b0e0e6",purple:"800080",rebeccapurple:"639",red:"f00",rosybrown:"bc8f8f",royalblue:"4169e1",saddlebrown:"8b4513",salmon:"fa8072",sandybrown:"f4a460",seagreen:"2e8b57",seashell:"fff5ee",sienna:"a0522d",silver:"c0c0c0",skyblue:"87ceeb",slateblue:"6a5acd",slategray:"708090",slategrey:"708090",snow:"fffafa",springgreen:"00ff7f",steelblue:"4682b4",tan:"d2b48c",teal:"008080",thistle:"d8bfd8",tomato:"ff6347",turquoise:"40e0d0",violet:"ee82ee",wheat:"f5deb3",white:"fff",whitesmoke:"f5f5f5",yellow:"ff0",yellowgreen:"9acd32"};var Ae=/^#[a-fA-F0-9]{6}$/,Ee=/^#[a-fA-F0-9]{8}$/,Ce=/^#[a-fA-F0-9]{3}$/,Re=/^#[a-fA-F0-9]{4}$/,Oe=/^rgb\(\s*(\d{1,3})\s*(?:,)?\s*(\d{1,3})\s*(?:,)?\s*(\d{1,3})\s*\)$/i,Te=/^rgb(?:a)?\(\s*(\d{1,3})\s*(?:,)?\s*(\d{1,3})\s*(?:,)?\s*(\d{1,3})\s*(?:,|\/)\s*([-+]?\d*[.]?\d+[%]?)\s*\)$/i,ke=/^hsl\(\s*(\d{0,3}[.]?[0-9]+(?:deg)?)\s*(?:,)?\s*(\d{1,3}[.]?[0-9]?)%\s*(?:,)?\s*(\d{1,3}[.]?[0-9]?)%\s*\)$/i,Me=/^hsl(?:a)?\(\s*(\d{0,3}[.]?[0-9]+(?:deg)?)\s*(?:,)?\s*(\d{1,3}[.]?[0-9]?)%\s*(?:,)?\s*(\d{1,3}[.]?[0-9]?)%\s*(?:,|\/)\s*([-+]?\d*[.]?\d+[%]?)\s*\)$/i;function Pe(e){if("string"!==typeof e)throw new h(3);var t=function(e){if("string"!==typeof e)return e;var t=e.toLowerCase();return Se[t]?"#"+Se[t]:e}(e);if(t.match(Ae))return{red:parseInt(""+t[1]+t[2],16),green:parseInt(""+t[3]+t[4],16),blue:parseInt(""+t[5]+t[6],16)};if(t.match(Ee)){var n=parseFloat((parseInt(""+t[7]+t[8],16)/255).toFixed(2));return{red:parseInt(""+t[1]+t[2],16),green:parseInt(""+t[3]+t[4],16),blue:parseInt(""+t[5]+t[6],16),alpha:n}}if(t.match(Ce))return{red:parseInt(""+t[1]+t[1],16),green:parseInt(""+t[2]+t[2],16),blue:parseInt(""+t[3]+t[3],16)};if(t.match(Re)){var r=parseFloat((parseInt(""+t[4]+t[4],16)/255).toFixed(2));return{red:parseInt(""+t[1]+t[1],16),green:parseInt(""+t[2]+t[2],16),blue:parseInt(""+t[3]+t[3],16),alpha:r}}var o=Oe.exec(t);if(o)return{red:parseInt(""+o[1],10),green:parseInt(""+o[2],10),blue:parseInt(""+o[3],10)};var i=Te.exec(t.substring(0,50));if(i)return{red:parseInt(""+i[1],10),green:parseInt(""+i[2],10),blue:parseInt(""+i[3],10),alpha:parseFloat(""+i[4])>1?parseFloat(""+i[4])/100:parseFloat(""+i[4])};var a=ke.exec(t);if(a){var s="rgb("+xe(parseInt(""+a[1],10),parseInt(""+a[2],10)/100,parseInt(""+a[3],10)/100)+")",u=Oe.exec(s);if(!u)throw new h(4,t,s);return{red:parseInt(""+u[1],10),green:parseInt(""+u[2],10),blue:parseInt(""+u[3],10)}}var l=Me.exec(t.substring(0,50));if(l){var c="rgb("+xe(parseInt(""+l[1],10),parseInt(""+l[2],10)/100,parseInt(""+l[3],10)/100)+")",f=Oe.exec(c);if(!f)throw new h(4,t,c);return{red:parseInt(""+f[1],10),green:parseInt(""+f[2],10),blue:parseInt(""+f[3],10),alpha:parseFloat(""+l[4])>1?parseFloat(""+l[4])/100:parseFloat(""+l[4])}}throw new h(5)}function Ie(e){return function(e){var t,n=e.red/255,r=e.green/255,o=e.blue/255,i=Math.max(n,r,o),a=Math.min(n,r,o),s=(i+a)/2;if(i===a)return void 0!==e.alpha?{hue:0,saturation:0,lightness:s,alpha:e.alpha}:{hue:0,saturation:0,lightness:s};var u=i-a,l=s>.5?u/(2-i-a):u/(i+a);switch(i){case n:t=(r-o)/u+(r=1?je(e,t,n):"rgba("+xe(e,t,n)+","+r+")";if("object"===typeof e&&void 0===t&&void 0===n&&void 0===r)return e.alpha>=1?je(e.hue,e.saturation,e.lightness):"rgba("+xe(e.hue,e.saturation,e.lightness)+","+e.alpha+")";throw new h(2)}function Be(e,t,n){if("number"===typeof e&&"number"===typeof t&&"number"===typeof n)return Ne("#"+De(e)+De(t)+De(n));if("object"===typeof e&&void 0===t&&void 0===n)return Ne("#"+De(e.red)+De(e.green)+De(e.blue));throw new h(6)}function Ve(e,t,n,r){if("string"===typeof e&&"number"===typeof t){var o=Pe(e);return"rgba("+o.red+","+o.green+","+o.blue+","+t+")"}if("number"===typeof e&&"number"===typeof t&&"number"===typeof n&&"number"===typeof r)return r>=1?Be(e,t,n):"rgba("+e+","+t+","+n+","+r+")";if("object"===typeof e&&void 0===t&&void 0===n&&void 0===r)return e.alpha>=1?Be(e.red,e.green,e.blue):"rgba("+e.red+","+e.green+","+e.blue+","+e.alpha+")";throw new h(7)}var $e=function(e){return"number"===typeof e.red&&"number"===typeof e.green&&"number"===typeof e.blue&&("number"!==typeof e.alpha||"undefined"===typeof e.alpha)},He=function(e){return"number"===typeof e.red&&"number"===typeof e.green&&"number"===typeof e.blue&&"number"===typeof e.alpha},We=function(e){return"number"===typeof e.hue&&"number"===typeof e.saturation&&"number"===typeof e.lightness&&("number"!==typeof e.alpha||"undefined"===typeof e.alpha)},qe=function(e){return"number"===typeof e.hue&&"number"===typeof e.saturation&&"number"===typeof e.lightness&&"number"===typeof e.alpha};function Ge(e){if("object"!==typeof e)throw new h(8);if(He(e))return Ve(e);if($e(e))return Be(e);if(qe(e))return ze(e);if(We(e))return Ue(e);throw new h(8)}function Ye(e,t,n){return function(){var r=n.concat(Array.prototype.slice.call(arguments));return r.length>=t?e.apply(this,r):Ye(e,t,r)}}function Xe(e){return Ye(e,e.length,[])}var Ke=Xe((function(e,t){if("transparent"===t)return t;var n=Ie(t);return Ge((0,r.A)({},n,{hue:n.hue+parseFloat(e)}))}));function Ze(e){if("transparent"===e)return e;var t=Ie(e);return Ge((0,r.A)({},t,{hue:(t.hue+180)%360}))}function Qe(e,t,n){return Math.max(e,Math.min(t,n))}var Je=Xe((function(e,t){if("transparent"===t)return t;var n=Ie(t);return Ge((0,r.A)({},n,{lightness:Qe(0,1,n.lightness-parseFloat(e))}))}));var et=Xe((function(e,t){if("transparent"===t)return t;var n=Ie(t);return Ge((0,r.A)({},n,{saturation:Qe(0,1,n.saturation-parseFloat(e))}))}));function tt(e){if("transparent"===e)return 0;var t=Pe(e),n=Object.keys(t).map((function(e){var n=t[e]/255;return n<=.03928?n/12.92:Math.pow((n+.055)/1.055,2.4)})),r=n[0],o=n[1],i=n[2];return parseFloat((.2126*r+.7152*o+.0722*i).toFixed(3))}function nt(e,t){var n=tt(e),r=tt(t);return parseFloat((n>r?(n+.05)/(r+.05):(r+.05)/(n+.05)).toFixed(2))}function rt(e){return"transparent"===e?e:Ge((0,r.A)({},Ie(e),{saturation:0}))}function ot(e){if("object"===typeof e&&"number"===typeof e.hue&&"number"===typeof e.saturation&&"number"===typeof e.lightness)return e.alpha&&"number"===typeof e.alpha?ze({hue:e.hue,saturation:e.saturation,lightness:e.lightness,alpha:e.alpha}):Ue({hue:e.hue,saturation:e.saturation,lightness:e.lightness});throw new h(45)}function it(e){if("transparent"===e)return e;var t=Pe(e);return Ge((0,r.A)({},t,{red:255-t.red,green:255-t.green,blue:255-t.blue}))}var at=Xe((function(e,t){if("transparent"===t)return t;var n=Ie(t);return Ge((0,r.A)({},n,{lightness:Qe(0,1,n.lightness+parseFloat(e))}))}));function st(e,t){var n=nt(e,t);return{AA:n>=4.5,AALarge:n>=3,AAA:n>=7,AAALarge:n>=4.5}}var ut=Xe((function(e,t,n){if("transparent"===t)return n;if("transparent"===n)return t;if(0===e)return n;var o=Pe(t),i=(0,r.A)({},o,{alpha:"number"===typeof o.alpha?o.alpha:1}),a=Pe(n),s=(0,r.A)({},a,{alpha:"number"===typeof a.alpha?a.alpha:1}),u=i.alpha-s.alpha,l=2*parseFloat(e)-1,c=((l*u===-1?l:l+u)/(1+l*u)+1)/2,f=1-c;return Ve({red:Math.floor(i.red*c+s.red*f),green:Math.floor(i.green*c+s.green*f),blue:Math.floor(i.blue*c+s.blue*f),alpha:i.alpha*parseFloat(e)+s.alpha*(1-parseFloat(e))})}));var lt=Xe((function(e,t){if("transparent"===t)return t;var n=Pe(t),o="number"===typeof n.alpha?n.alpha:1;return Ve((0,r.A)({},n,{alpha:Qe(0,1,(100*o+100*parseFloat(e))/100)}))})),ct="#000",ft="#fff";function dt(e,t,n,r){void 0===t&&(t=ct),void 0===n&&(n=ft),void 0===r&&(r=!0);var o=tt(e)>.179,i=o?t:n;return!r||nt(e,i)>=4.5?i:o?ct:ft}function ht(e){if("object"===typeof e&&"number"===typeof e.red&&"number"===typeof e.green&&"number"===typeof e.blue)return"number"===typeof e.alpha?Ve({red:e.red,green:e.green,blue:e.blue,alpha:e.alpha}):Be({red:e.red,green:e.green,blue:e.blue});throw new h(46)}var pt=Xe((function(e,t){if("transparent"===t)return t;var n=Ie(t);return Ge((0,r.A)({},n,{saturation:Qe(0,1,n.saturation+parseFloat(e))}))}));var gt=Xe((function(e,t){return"transparent"===t?t:Ge((0,r.A)({},Ie(t),{hue:parseFloat(e)}))}));var vt=Xe((function(e,t){return"transparent"===t?t:Ge((0,r.A)({},Ie(t),{lightness:parseFloat(e)}))}));var yt=Xe((function(e,t){return"transparent"===t?t:Ge((0,r.A)({},Ie(t),{saturation:parseFloat(e)}))}));var mt=Xe((function(e,t){return"transparent"===t?t:ut(parseFloat(e),"rgb(0, 0, 0)",t)}));var bt=Xe((function(e,t){return"transparent"===t?t:ut(parseFloat(e),"rgb(255, 255, 255)",t)}));var _t=Xe((function(e,t){if("transparent"===t)return t;var n=Pe(t),o="number"===typeof n.alpha?n.alpha:1;return Ve((0,r.A)({},n,{alpha:Qe(0,1,+(100*o-100*parseFloat(e)).toFixed(2)/100)}))}));function wt(){for(var e=arguments.length,t=new Array(e),n=0;n8)throw new h(64);return{animation:t.map((function(e){if(r&&!Array.isArray(e)||!r&&Array.isArray(e))throw new h(65);if(Array.isArray(e)&&e.length>8)throw new h(66);return Array.isArray(e)?e.join(" "):e})).join(", ")}}function xt(){for(var e=arguments.length,t=new Array(e),n=0;n1?t-1:0),r=1;r=0?((o={})["border"+w(e)+"Width"]=n[0],o["border"+w(e)+"Style"]=n[1],o["border"+w(e)+"Color"]=n[2],o):(n.unshift(e),{borderWidth:n[0],borderStyle:n[1],borderColor:n[2]})}function Ct(){for(var e=arguments.length,t=new Array(e),n=0;n1?t-1:0),o=1;o=0&&e?(0,r.A)({},A.apply(void 0,[""].concat(n)),{position:e}):A.apply(void 0,["",e].concat(n))}function Ut(e,t){return void 0===t&&(t=e),{height:e,width:t}}var zt=[void 0,null,"active","focus","hover"];function Bt(e){return'input[type="color"]'+e+',\n input[type="date"]'+e+',\n input[type="datetime"]'+e+',\n input[type="datetime-local"]'+e+',\n input[type="email"]'+e+',\n input[type="month"]'+e+',\n input[type="number"]'+e+',\n input[type="password"]'+e+',\n input[type="search"]'+e+',\n input[type="tel"]'+e+',\n input[type="text"]'+e+',\n input[type="time"]'+e+',\n input[type="url"]'+e+',\n input[type="week"]'+e+",\n input:not([type])"+e+",\n textarea"+e}function Vt(){for(var e=arguments.length,t=new Array(e),n=0;n{"use strict";var r=n(6925);function o(){}function i(){}i.resetWarningCache=o,e.exports=function(){function e(e,t,n,o,i,a){if(a!==r){var s=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw s.name="Invariant Violation",s}}function t(){return e}e.isRequired=e;var n={array:e,bigint:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,elementType:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t,checkPropTypes:i,resetWarningCache:o};return n.PropTypes=n,n}},5556:(e,t,n)=>{e.exports=n(2694)()},6925:e=>{"use strict";e.exports="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED"},74765:e=>{"use strict";var t=String.prototype.replace,n=/%20/g,r="RFC1738",o="RFC3986";e.exports={default:o,formatters:{RFC1738:function(e){return t.call(e,n,"+")},RFC3986:function(e){return String(e)}},RFC1738:r,RFC3986:o}},55373:(e,t,n)=>{"use strict";var r=n(98636),o=n(62642),i=n(74765);e.exports={formats:i,parse:o,stringify:r}},62642:(e,t,n)=>{"use strict";var r=n(37720),o=Object.prototype.hasOwnProperty,i=Array.isArray,a={allowDots:!1,allowEmptyArrays:!1,allowPrototypes:!1,allowSparse:!1,arrayLimit:20,charset:"utf-8",charsetSentinel:!1,comma:!1,decodeDotInKeys:!0,decoder:r.decode,delimiter:"&",depth:5,duplicates:"combine",ignoreQueryPrefix:!1,interpretNumericEntities:!1,parameterLimit:1e3,parseArrays:!0,plainObjects:!1,strictNullHandling:!1},s=function(e){return e.replace(/&#(\d+);/g,(function(e,t){return String.fromCharCode(parseInt(t,10))}))},u=function(e,t){return e&&"string"===typeof e&&t.comma&&e.indexOf(",")>-1?e.split(","):e},l=function(e,t,n,r){if(e){var i=n.allowDots?e.replace(/\.([^.[]+)/g,"[$1]"):e,a=/(\[[^[\]]*])/g,s=n.depth>0&&/(\[[^[\]]*])/.exec(i),l=s?i.slice(0,s.index):i,c=[];if(l){if(!n.plainObjects&&o.call(Object.prototype,l)&&!n.allowPrototypes)return;c.push(l)}for(var f=0;n.depth>0&&null!==(s=a.exec(i))&&f=0;--i){var a,s=e[i];if("[]"===s&&n.parseArrays)a=n.allowEmptyArrays&&""===o?[]:[].concat(o);else{a=n.plainObjects?Object.create(null):{};var l="["===s.charAt(0)&&"]"===s.charAt(s.length-1)?s.slice(1,-1):s,c=n.decodeDotInKeys?l.replace(/%2E/g,"."):l,f=parseInt(c,10);n.parseArrays||""!==c?!isNaN(f)&&s!==c&&String(f)===c&&f>=0&&n.parseArrays&&f<=n.arrayLimit?(a=[])[f]=o:"__proto__"!==c&&(a[c]=o):a={0:o}}o=a}return o}(c,t,n,r)}};e.exports=function(e,t){var n=function(e){if(!e)return a;if("undefined"!==typeof e.allowEmptyArrays&&"boolean"!==typeof e.allowEmptyArrays)throw new TypeError("`allowEmptyArrays` option can only be `true` or `false`, when provided");if("undefined"!==typeof e.decodeDotInKeys&&"boolean"!==typeof e.decodeDotInKeys)throw new TypeError("`decodeDotInKeys` option can only be `true` or `false`, when provided");if(null!==e.decoder&&"undefined"!==typeof e.decoder&&"function"!==typeof e.decoder)throw new TypeError("Decoder has to be a function.");if("undefined"!==typeof e.charset&&"utf-8"!==e.charset&&"iso-8859-1"!==e.charset)throw new TypeError("The charset option must be either utf-8, iso-8859-1, or undefined");var t="undefined"===typeof e.charset?a.charset:e.charset,n="undefined"===typeof e.duplicates?a.duplicates:e.duplicates;if("combine"!==n&&"first"!==n&&"last"!==n)throw new TypeError("The duplicates option must be either combine, first, or last");return{allowDots:"undefined"===typeof e.allowDots?!0===e.decodeDotInKeys||a.allowDots:!!e.allowDots,allowEmptyArrays:"boolean"===typeof e.allowEmptyArrays?!!e.allowEmptyArrays:a.allowEmptyArrays,allowPrototypes:"boolean"===typeof e.allowPrototypes?e.allowPrototypes:a.allowPrototypes,allowSparse:"boolean"===typeof e.allowSparse?e.allowSparse:a.allowSparse,arrayLimit:"number"===typeof e.arrayLimit?e.arrayLimit:a.arrayLimit,charset:t,charsetSentinel:"boolean"===typeof e.charsetSentinel?e.charsetSentinel:a.charsetSentinel,comma:"boolean"===typeof e.comma?e.comma:a.comma,decodeDotInKeys:"boolean"===typeof e.decodeDotInKeys?e.decodeDotInKeys:a.decodeDotInKeys,decoder:"function"===typeof e.decoder?e.decoder:a.decoder,delimiter:"string"===typeof e.delimiter||r.isRegExp(e.delimiter)?e.delimiter:a.delimiter,depth:"number"===typeof e.depth||!1===e.depth?+e.depth:a.depth,duplicates:n,ignoreQueryPrefix:!0===e.ignoreQueryPrefix,interpretNumericEntities:"boolean"===typeof e.interpretNumericEntities?e.interpretNumericEntities:a.interpretNumericEntities,parameterLimit:"number"===typeof e.parameterLimit?e.parameterLimit:a.parameterLimit,parseArrays:!1!==e.parseArrays,plainObjects:"boolean"===typeof e.plainObjects?e.plainObjects:a.plainObjects,strictNullHandling:"boolean"===typeof e.strictNullHandling?e.strictNullHandling:a.strictNullHandling}}(t);if(""===e||null===e||"undefined"===typeof e)return n.plainObjects?Object.create(null):{};for(var c="string"===typeof e?function(e,t){var n,l={__proto__:null},c=t.ignoreQueryPrefix?e.replace(/^\?/,""):e,f=t.parameterLimit===1/0?void 0:t.parameterLimit,d=c.split(t.delimiter,f),h=-1,p=t.charset;if(t.charsetSentinel)for(n=0;n-1&&(v=i(v)?[v]:v);var _=o.call(l,g);_&&"combine"===t.duplicates?l[g]=r.combine(l[g],v):_&&"last"!==t.duplicates||(l[g]=v)}return l}(e,n):e,f=n.plainObjects?Object.create(null):{},d=Object.keys(c),h=0;h{"use strict";var r=n(920),o=n(37720),i=n(74765),a=Object.prototype.hasOwnProperty,s={brackets:function(e){return e+"[]"},comma:"comma",indices:function(e,t){return e+"["+t+"]"},repeat:function(e){return e}},u=Array.isArray,l=Array.prototype.push,c=function(e,t){l.apply(e,u(t)?t:[t])},f=Date.prototype.toISOString,d=i.default,h={addQueryPrefix:!1,allowDots:!1,allowEmptyArrays:!1,arrayFormat:"indices",charset:"utf-8",charsetSentinel:!1,delimiter:"&",encode:!0,encodeDotInKeys:!1,encoder:o.encode,encodeValuesOnly:!1,format:d,formatter:i.formatters[d],indices:!1,serializeDate:function(e){return f.call(e)},skipNulls:!1,strictNullHandling:!1},p={},g=function e(t,n,i,a,s,l,f,d,g,v,y,m,b,_,w,x,S,A){for(var E,C=t,R=A,O=0,T=!1;void 0!==(R=R.get(p))&&!T;){var k=R.get(t);if(O+=1,"undefined"!==typeof k){if(k===O)throw new RangeError("Cyclic object value");T=!0}"undefined"===typeof R.get(p)&&(O=0)}if("function"===typeof v?C=v(n,C):C instanceof Date?C=b(C):"comma"===i&&u(C)&&(C=o.maybeMap(C,(function(e){return e instanceof Date?b(e):e}))),null===C){if(l)return g&&!x?g(n,h.encoder,S,"key",_):n;C=""}if("string"===typeof(E=C)||"number"===typeof E||"boolean"===typeof E||"symbol"===typeof E||"bigint"===typeof E||o.isBuffer(C))return g?[w(x?n:g(n,h.encoder,S,"key",_))+"="+w(g(C,h.encoder,S,"value",_))]:[w(n)+"="+w(String(C))];var M,P=[];if("undefined"===typeof C)return P;if("comma"===i&&u(C))x&&g&&(C=o.maybeMap(C,g)),M=[{value:C.length>0?C.join(",")||null:void 0}];else if(u(v))M=v;else{var I=Object.keys(C);M=y?I.sort(y):I}var N=d?n.replace(/\./g,"%2E"):n,D=a&&u(C)&&1===C.length?N+"[]":N;if(s&&u(C)&&0===C.length)return D+"[]";for(var L=0;L0?_+b:""}},37720:(e,t,n)=>{"use strict";var r=n(74765),o=Object.prototype.hasOwnProperty,i=Array.isArray,a=function(){for(var e=[],t=0;t<256;++t)e.push("%"+((t<16?"0":"")+t.toString(16)).toUpperCase());return e}(),s=function(e,t){for(var n=t&&t.plainObjects?Object.create(null):{},r=0;r1;){var t=e.pop(),n=t.obj[t.prop];if(i(n)){for(var r=[],o=0;o=48&&c<=57||c>=65&&c<=90||c>=97&&c<=122||i===r.RFC1738&&(40===c||41===c)?u+=s.charAt(l):c<128?u+=a[c]:c<2048?u+=a[192|c>>6]+a[128|63&c]:c<55296||c>=57344?u+=a[224|c>>12]+a[128|c>>6&63]+a[128|63&c]:(l+=1,c=65536+((1023&c)<<10|1023&s.charCodeAt(l)),u+=a[240|c>>18]+a[128|c>>12&63]+a[128|c>>6&63]+a[128|63&c])}return u},isBuffer:function(e){return!(!e||"object"!==typeof e)&&!!(e.constructor&&e.constructor.isBuffer&&e.constructor.isBuffer(e))},isRegExp:function(e){return"[object RegExp]"===Object.prototype.toString.call(e)},maybeMap:function(e,t){if(i(e)){for(var n=[],r=0;r{"use strict";const r=n(24280),o=n(30454),i=n(528),a=n(73055),s=Symbol("encodeFragmentIdentifier");function u(e){if("string"!==typeof e||1!==e.length)throw new TypeError("arrayFormatSeparator must be single character string")}function l(e,t){return t.encode?t.strict?r(e):encodeURIComponent(e):e}function c(e,t){return t.decode?o(e):e}function f(e){return Array.isArray(e)?e.sort():"object"===typeof e?f(Object.keys(e)).sort(((e,t)=>Number(e)-Number(t))).map((t=>e[t])):e}function d(e){const t=e.indexOf("#");return-1!==t&&(e=e.slice(0,t)),e}function h(e){const t=(e=d(e)).indexOf("?");return-1===t?"":e.slice(t+1)}function p(e,t){return t.parseNumbers&&!Number.isNaN(Number(e))&&"string"===typeof e&&""!==e.trim()?e=Number(e):!t.parseBooleans||null===e||"true"!==e.toLowerCase()&&"false"!==e.toLowerCase()||(e="true"===e.toLowerCase()),e}function g(e,t){u((t=Object.assign({decode:!0,sort:!0,arrayFormat:"none",arrayFormatSeparator:",",parseNumbers:!1,parseBooleans:!1},t)).arrayFormatSeparator);const n=function(e){let t;switch(e.arrayFormat){case"index":return(e,n,r)=>{t=/\[(\d*)\]$/.exec(e),e=e.replace(/\[\d*\]$/,""),t?(void 0===r[e]&&(r[e]={}),r[e][t[1]]=n):r[e]=n};case"bracket":return(e,n,r)=>{t=/(\[\])$/.exec(e),e=e.replace(/\[\]$/,""),t?void 0!==r[e]?r[e]=[].concat(r[e],n):r[e]=[n]:r[e]=n};case"colon-list-separator":return(e,n,r)=>{t=/(:list)$/.exec(e),e=e.replace(/:list$/,""),t?void 0!==r[e]?r[e]=[].concat(r[e],n):r[e]=[n]:r[e]=n};case"comma":case"separator":return(t,n,r)=>{const o="string"===typeof n&&n.includes(e.arrayFormatSeparator),i="string"===typeof n&&!o&&c(n,e).includes(e.arrayFormatSeparator);n=i?c(n,e):n;const a=o||i?n.split(e.arrayFormatSeparator).map((t=>c(t,e))):null===n?n:c(n,e);r[t]=a};case"bracket-separator":return(t,n,r)=>{const o=/(\[\])$/.test(t);if(t=t.replace(/\[\]$/,""),!o)return void(r[t]=n?c(n,e):n);const i=null===n?[]:n.split(e.arrayFormatSeparator).map((t=>c(t,e)));void 0!==r[t]?r[t]=[].concat(r[t],i):r[t]=i};default:return(e,t,n)=>{void 0!==n[e]?n[e]=[].concat(n[e],t):n[e]=t}}}(t),r=Object.create(null);if("string"!==typeof e)return r;if(!(e=e.trim().replace(/^[?#&]/,"")))return r;for(const o of e.split("&")){if(""===o)continue;let[e,a]=i(t.decode?o.replace(/\+/g," "):o,"=");a=void 0===a?null:["comma","separator","bracket-separator"].includes(t.arrayFormat)?a:c(a,t),n(c(e,t),a,r)}for(const o of Object.keys(r)){const e=r[o];if("object"===typeof e&&null!==e)for(const n of Object.keys(e))e[n]=p(e[n],t);else r[o]=p(e,t)}return!1===t.sort?r:(!0===t.sort?Object.keys(r).sort():Object.keys(r).sort(t.sort)).reduce(((e,t)=>{const n=r[t];return Boolean(n)&&"object"===typeof n&&!Array.isArray(n)?e[t]=f(n):e[t]=n,e}),Object.create(null))}t.extract=h,t.parse=g,t.stringify=(e,t)=>{if(!e)return"";u((t=Object.assign({encode:!0,strict:!0,arrayFormat:"none",arrayFormatSeparator:","},t)).arrayFormatSeparator);const n=n=>{return t.skipNull&&(null===(r=e[n])||void 0===r)||t.skipEmptyString&&""===e[n];var r},r=function(e){switch(e.arrayFormat){case"index":return t=>(n,r)=>{const o=n.length;return void 0===r||e.skipNull&&null===r||e.skipEmptyString&&""===r?n:null===r?[...n,[l(t,e),"[",o,"]"].join("")]:[...n,[l(t,e),"[",l(o,e),"]=",l(r,e)].join("")]};case"bracket":return t=>(n,r)=>void 0===r||e.skipNull&&null===r||e.skipEmptyString&&""===r?n:null===r?[...n,[l(t,e),"[]"].join("")]:[...n,[l(t,e),"[]=",l(r,e)].join("")];case"colon-list-separator":return t=>(n,r)=>void 0===r||e.skipNull&&null===r||e.skipEmptyString&&""===r?n:null===r?[...n,[l(t,e),":list="].join("")]:[...n,[l(t,e),":list=",l(r,e)].join("")];case"comma":case"separator":case"bracket-separator":{const t="bracket-separator"===e.arrayFormat?"[]=":"=";return n=>(r,o)=>void 0===o||e.skipNull&&null===o||e.skipEmptyString&&""===o?r:(o=null===o?"":o,0===r.length?[[l(n,e),t,l(o,e)].join("")]:[[r,l(o,e)].join(e.arrayFormatSeparator)])}default:return t=>(n,r)=>void 0===r||e.skipNull&&null===r||e.skipEmptyString&&""===r?n:null===r?[...n,l(t,e)]:[...n,[l(t,e),"=",l(r,e)].join("")]}}(t),o={};for(const a of Object.keys(e))n(a)||(o[a]=e[a]);const i=Object.keys(o);return!1!==t.sort&&i.sort(t.sort),i.map((n=>{const o=e[n];return void 0===o?"":null===o?l(n,t):Array.isArray(o)?0===o.length&&"bracket-separator"===t.arrayFormat?l(n,t)+"[]":o.reduce(r(n),[]).join("&"):l(n,t)+"="+l(o,t)})).filter((e=>e.length>0)).join("&")},t.parseUrl=(e,t)=>{t=Object.assign({decode:!0},t);const[n,r]=i(e,"#");return Object.assign({url:n.split("?")[0]||"",query:g(h(e),t)},t&&t.parseFragmentIdentifier&&r?{fragmentIdentifier:c(r,t)}:{})},t.stringifyUrl=(e,n)=>{n=Object.assign({encode:!0,strict:!0,[s]:!0},n);const r=d(e.url).split("?")[0]||"",o=t.extract(e.url),i=t.parse(o,{sort:!1}),a=Object.assign(i,e.query);let u=t.stringify(a,n);u&&(u=`?${u}`);let c=function(e){let t="";const n=e.indexOf("#");return-1!==n&&(t=e.slice(n)),t}(e.url);return e.fragmentIdentifier&&(c=`#${n[s]?l(e.fragmentIdentifier,n):e.fragmentIdentifier}`),`${r}${u}${c}`},t.pick=(e,n,r)=>{r=Object.assign({parseFragmentIdentifier:!0,[s]:!1},r);const{url:o,query:i,fragmentIdentifier:u}=t.parseUrl(e,r);return t.stringifyUrl({url:o,query:a(i,n),fragmentIdentifier:u},r)},t.exclude=(e,n,r)=>{const o=Array.isArray(n)?e=>!n.includes(e):(e,t)=>!n(e,t);return t.pick(e,o,r)}},22799:(e,t)=>{"use strict";var n,r=Symbol.for("react.element"),o=Symbol.for("react.portal"),i=Symbol.for("react.fragment"),a=Symbol.for("react.strict_mode"),s=Symbol.for("react.profiler"),u=Symbol.for("react.provider"),l=Symbol.for("react.context"),c=Symbol.for("react.server_context"),f=Symbol.for("react.forward_ref"),d=Symbol.for("react.suspense"),h=Symbol.for("react.suspense_list"),p=Symbol.for("react.memo"),g=Symbol.for("react.lazy"),v=Symbol.for("react.offscreen");function y(e){if("object"===typeof e&&null!==e){var t=e.$$typeof;switch(t){case r:switch(e=e.type){case i:case s:case a:case d:case h:return e;default:switch(e=e&&e.$$typeof){case c:case l:case f:case g:case p:case u:return e;default:return t}}case o:return t}}}n=Symbol.for("react.module.reference"),t.ContextConsumer=l,t.ContextProvider=u,t.Element=r,t.ForwardRef=f,t.Fragment=i,t.Lazy=g,t.Memo=p,t.Portal=o,t.Profiler=s,t.StrictMode=a,t.Suspense=d,t.SuspenseList=h,t.isAsyncMode=function(){return!1},t.isConcurrentMode=function(){return!1},t.isContextConsumer=function(e){return y(e)===l},t.isContextProvider=function(e){return y(e)===u},t.isElement=function(e){return"object"===typeof e&&null!==e&&e.$$typeof===r},t.isForwardRef=function(e){return y(e)===f},t.isFragment=function(e){return y(e)===i},t.isLazy=function(e){return y(e)===g},t.isMemo=function(e){return y(e)===p},t.isPortal=function(e){return y(e)===o},t.isProfiler=function(e){return y(e)===s},t.isStrictMode=function(e){return y(e)===a},t.isSuspense=function(e){return y(e)===d},t.isSuspenseList=function(e){return y(e)===h},t.isValidElementType=function(e){return"string"===typeof e||"function"===typeof e||e===i||e===s||e===a||e===d||e===h||e===v||"object"===typeof e&&null!==e&&(e.$$typeof===g||e.$$typeof===p||e.$$typeof===u||e.$$typeof===l||e.$$typeof===f||e.$$typeof===n||void 0!==e.getModuleId)},t.typeOf=y},44363:(e,t,n)=>{"use strict";e.exports=n(22799)},30708:(e,t,n)=>{"use strict";n.d(t,{A:()=>p});var r=n(40961),o=n(96540),i=n(5556),a=n.n(i),s=!("undefined"===typeof window||!window.document||!window.document.createElement),u=function(){function e(e,t){for(var n=0;n{"use strict";var r,o;n.d(t,{N_:()=>y,ok:()=>_,rI:()=>p});var i=n(96540),a=n(40961),s=n(47767),u=n(45588);function l(){return l=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}function f(e){return void 0===e&&(e=""),new URLSearchParams("string"===typeof e||Array.isArray(e)||e instanceof URLSearchParams?e:Object.keys(e).reduce(((t,n)=>{let r=e[n];return t.concat(Array.isArray(r)?r.map((e=>[n,e])):[[n,r]])}),[]))}new Set(["application/x-www-form-urlencoded","multipart/form-data","text/plain"]);const d=["onClick","relative","reloadDocument","replace","state","target","to","preventScrollReset","unstable_viewTransition"];try{window.__reactRouterVersion="6"}catch(w){}new Map;const h=(r||(r=n.t(i,2))).startTransition;(o||(o=n.t(a,2))).flushSync,(r||(r=n.t(i,2))).useId;function p(e){let{basename:t,children:n,future:r,history:o}=e,[a,u]=i.useState({action:o.action,location:o.location}),{v7_startTransition:l}=r||{},c=i.useCallback((e=>{l&&h?h((()=>u(e))):u(e)}),[u,l]);return i.useLayoutEffect((()=>o.listen(c)),[o,c]),i.createElement(s.Ix,{basename:t,children:n,location:a.location,navigationType:a.action,navigator:o,future:r})}const g="undefined"!==typeof window&&"undefined"!==typeof window.document&&"undefined"!==typeof window.document.createElement,v=/^(?:[a-z][a-z0-9+.-]*:|\/\/)/i,y=i.forwardRef((function(e,t){let n,{onClick:r,relative:o,reloadDocument:a,replace:f,state:h,target:p,to:y,preventScrollReset:m,unstable_viewTransition:b}=e,_=c(e,d),{basename:x}=i.useContext(s.jb),S=!1;if("string"===typeof y&&v.test(y)&&(n=y,g))try{let e=new URL(window.location.href),t=y.startsWith("//")?new URL(e.protocol+y):new URL(y),n=(0,u.pb)(t.pathname,x);t.origin===e.origin&&null!=n?y=n+t.search+t.hash:S=!0}catch(w){}let A=(0,s.$P)(y,{relative:o}),E=function(e,t){let{target:n,replace:r,state:o,preventScrollReset:a,relative:l,unstable_viewTransition:c}=void 0===t?{}:t,f=(0,s.Zp)(),d=(0,s.zy)(),h=(0,s.x$)(e,{relative:l});return i.useCallback((t=>{if(function(e,t){return 0===e.button&&(!t||"_self"===t)&&!function(e){return!!(e.metaKey||e.altKey||e.ctrlKey||e.shiftKey)}(e)}(t,n)){t.preventDefault();let n=void 0!==r?r:(0,u.AO)(d)===(0,u.AO)(h);f(e,{replace:n,state:o,preventScrollReset:a,relative:l,unstable_viewTransition:c})}}),[d,f,h,r,o,n,e,a,l,c])}(y,{replace:f,state:h,target:p,preventScrollReset:m,relative:o,unstable_viewTransition:b});return i.createElement("a",l({},_,{href:n||A,onClick:S||a?r:function(e){r&&r(e),e.defaultPrevented||E(e)},ref:t,target:p}))}));var m,b;function _(e){let t=i.useRef(f(e)),n=i.useRef(!1),r=(0,s.zy)(),o=i.useMemo((()=>function(e,t){let n=f(e);return t&&t.forEach(((e,r)=>{n.has(r)||t.getAll(r).forEach((e=>{n.append(r,e)}))})),n}(r.search,n.current?null:t.current)),[r.search]),a=(0,s.Zp)(),u=i.useCallback(((e,t)=>{const r=f("function"===typeof e?e(o):e);n.current=!0,a("?"+r,t)}),[a,o]);return[o,u]}(function(e){e.UseScrollRestoration="useScrollRestoration",e.UseSubmit="useSubmit",e.UseSubmitFetcher="useSubmitFetcher",e.UseFetcher="useFetcher",e.useViewTransitionState="useViewTransitionState"})(m||(m={})),function(e){e.UseFetcher="useFetcher",e.UseFetchers="useFetchers",e.UseScrollRestoration="useScrollRestoration"}(b||(b={}))},47767:(e,t,n)=>{"use strict";var r;n.d(t,{$P:()=>h,BV:()=>L,C5:()=>I,Ix:()=>D,RQ:()=>v,Zp:()=>m,g:()=>b,jb:()=>l,qh:()=>N,x$:()=>_,zy:()=>g});var o=n(96540),i=n(45588);function a(){return a=Object.assign?Object.assign.bind():function(e){for(var t=1;t(0,i.B6)(e,t)),[t,e])}function y(e){o.useContext(l).static||o.useLayoutEffect(e)}function m(){let{isDataRoute:e}=o.useContext(f);return e?function(){let{router:e}=T(R.UseNavigateStable),t=M(O.UseNavigateStable),n=o.useRef(!1);return y((()=>{n.current=!0})),o.useCallback((function(r,o){void 0===o&&(o={}),n.current&&("number"===typeof r?e.navigate(r):e.navigate(r,a({fromRouteId:t},o)))}),[e,t])}():function(){p()||(0,i.Oi)(!1);let e=o.useContext(s),{basename:t,future:n,navigator:r}=o.useContext(l),{matches:a}=o.useContext(f),{pathname:u}=g(),c=JSON.stringify((0,i.yD)(a,n.v7_relativeSplatPath)),d=o.useRef(!1);return y((()=>{d.current=!0})),o.useCallback((function(n,o){if(void 0===o&&(o={}),!d.current)return;if("number"===typeof n)return void r.go(n);let a=(0,i.Gh)(n,JSON.parse(c),u,"path"===o.relative);null==e&&"/"!==t&&(a.pathname="/"===a.pathname?t:(0,i.HS)([t,a.pathname])),(o.replace?r.replace:r.push)(a,o.state,o)}),[t,r,c,u,e])}()}function b(){let{matches:e}=o.useContext(f),t=e[e.length-1];return t?t.params:{}}function _(e,t){let{relative:n}=void 0===t?{}:t,{future:r}=o.useContext(l),{matches:a}=o.useContext(f),{pathname:s}=g(),u=JSON.stringify((0,i.yD)(a,r.v7_relativeSplatPath));return o.useMemo((()=>(0,i.Gh)(e,JSON.parse(u),s,"path"===n)),[e,u,s,n])}function w(e,t,n,r){p()||(0,i.Oi)(!1);let{navigator:s}=o.useContext(l),{matches:u}=o.useContext(f),d=u[u.length-1],h=d?d.params:{},v=(d&&d.pathname,d?d.pathnameBase:"/");d&&d.route;let y,m=g();if(t){var b;let e="string"===typeof t?(0,i.Rr)(t):t;"/"===v||(null==(b=e.pathname)?void 0:b.startsWith(v))||(0,i.Oi)(!1),y=e}else y=m;let _=y.pathname||"/",w=_;if("/"!==v){let e=v.replace(/^\//,"").split("/");w="/"+_.replace(/^\//,"").split("/").slice(e.length).join("/")}let x=(0,i.ue)(e,{pathname:w});let S=C(x&&x.map((e=>Object.assign({},e,{params:Object.assign({},h,e.params),pathname:(0,i.HS)([v,s.encodeLocation?s.encodeLocation(e.pathname).pathname:e.pathname]),pathnameBase:"/"===e.pathnameBase?v:(0,i.HS)([v,s.encodeLocation?s.encodeLocation(e.pathnameBase).pathname:e.pathnameBase])}))),u,n,r);return t&&S?o.createElement(c.Provider,{value:{location:a({pathname:"/",search:"",hash:"",state:null,key:"default"},y),navigationType:i.rc.Pop}},S):S}function x(){let e=function(){var e;let t=o.useContext(d),n=k(O.UseRouteError),r=M(O.UseRouteError);if(void 0!==t)return t;return null==(e=n.errors)?void 0:e[r]}(),t=(0,i.pX)(e)?e.status+" "+e.statusText:e instanceof Error?e.message:JSON.stringify(e),n=e instanceof Error?e.stack:null,r="rgba(200,200,200, 0.5)",a={padding:"0.5rem",backgroundColor:r};return o.createElement(o.Fragment,null,o.createElement("h2",null,"Unexpected Application Error!"),o.createElement("h3",{style:{fontStyle:"italic"}},t),n?o.createElement("pre",{style:a},n):null,null)}const S=o.createElement(x,null);class A extends o.Component{constructor(e){super(e),this.state={location:e.location,revalidation:e.revalidation,error:e.error}}static getDerivedStateFromError(e){return{error:e}}static getDerivedStateFromProps(e,t){return t.location!==e.location||"idle"!==t.revalidation&&"idle"===e.revalidation?{error:e.error,location:e.location,revalidation:e.revalidation}:{error:void 0!==e.error?e.error:t.error,location:t.location,revalidation:e.revalidation||t.revalidation}}componentDidCatch(e,t){console.error("React Router caught the following error during render",e,t)}render(){return void 0!==this.state.error?o.createElement(f.Provider,{value:this.props.routeContext},o.createElement(d.Provider,{value:this.state.error,children:this.props.component})):this.props.children}}function E(e){let{routeContext:t,match:n,children:r}=e,i=o.useContext(s);return i&&i.static&&i.staticContext&&(n.route.errorElement||n.route.ErrorBoundary)&&(i.staticContext._deepestRenderedBoundaryId=n.route.id),o.createElement(f.Provider,{value:t},r)}function C(e,t,n,r){var a;if(void 0===t&&(t=[]),void 0===n&&(n=null),void 0===r&&(r=null),null==e){var s;if(null==(s=n)||!s.errors)return null;e=n.matches}let u=e,l=null==(a=n)?void 0:a.errors;if(null!=l){let e=u.findIndex((e=>e.route.id&&void 0!==(null==l?void 0:l[e.route.id])));e>=0||(0,i.Oi)(!1),u=u.slice(0,Math.min(u.length,e+1))}let c=!1,f=-1;if(n&&r&&r.v7_partialHydration)for(let o=0;o=0?u.slice(0,f+1):[u[0]];break}}}return u.reduceRight(((e,r,i)=>{let a,s=!1,d=null,h=null;var p;n&&(a=l&&r.route.id?l[r.route.id]:void 0,d=r.route.errorElement||S,c&&(f<0&&0===i?(p="route-fallback",!1||P[p]||(P[p]=!0),s=!0,h=null):f===i&&(s=!0,h=r.route.hydrateFallbackElement||null)));let g=t.concat(u.slice(0,i+1)),v=()=>{let t;return t=a?d:s?h:r.route.Component?o.createElement(r.route.Component,null):r.route.element?r.route.element:e,o.createElement(E,{match:r,routeContext:{outlet:e,matches:g,isDataRoute:null!=n},children:t})};return n&&(r.route.ErrorBoundary||r.route.errorElement||0===i)?o.createElement(A,{location:n.location,revalidation:n.revalidation,component:d,error:a,children:v(),routeContext:{outlet:null,matches:g,isDataRoute:!0}}):v()}),null)}var R=function(e){return e.UseBlocker="useBlocker",e.UseRevalidator="useRevalidator",e.UseNavigateStable="useNavigate",e}(R||{}),O=function(e){return e.UseBlocker="useBlocker",e.UseLoaderData="useLoaderData",e.UseActionData="useActionData",e.UseRouteError="useRouteError",e.UseNavigation="useNavigation",e.UseRouteLoaderData="useRouteLoaderData",e.UseMatches="useMatches",e.UseRevalidator="useRevalidator",e.UseNavigateStable="useNavigate",e.UseRouteId="useRouteId",e}(O||{});function T(e){let t=o.useContext(s);return t||(0,i.Oi)(!1),t}function k(e){let t=o.useContext(u);return t||(0,i.Oi)(!1),t}function M(e){let t=function(e){let t=o.useContext(f);return t||(0,i.Oi)(!1),t}(),n=t.matches[t.matches.length-1];return n.route.id||(0,i.Oi)(!1),n.route.id}const P={};(r||(r=n.t(o,2))).startTransition;function I(e){let{to:t,replace:n,state:r,relative:a}=e;p()||(0,i.Oi)(!1);let{future:s,static:u}=o.useContext(l),{matches:c}=o.useContext(f),{pathname:d}=g(),h=m(),v=(0,i.Gh)(t,(0,i.yD)(c,s.v7_relativeSplatPath),d,"path"===a),y=JSON.stringify(v);return o.useEffect((()=>h(JSON.parse(y),{replace:n,state:r,relative:a})),[h,y,a,n,r]),null}function N(e){(0,i.Oi)(!1)}function D(e){let{basename:t="/",children:n=null,location:r,navigationType:s=i.rc.Pop,navigator:u,static:f=!1,future:d}=e;p()&&(0,i.Oi)(!1);let h=t.replace(/^\/*/,"/"),g=o.useMemo((()=>({basename:h,navigator:u,static:f,future:a({v7_relativeSplatPath:!1},d)})),[h,d,u,f]);"string"===typeof r&&(r=(0,i.Rr)(r));let{pathname:v="/",search:y="",hash:m="",state:b=null,key:_="default"}=r,w=o.useMemo((()=>{let e=(0,i.pb)(v,h);return null==e?null:{location:{pathname:e,search:y,hash:m,state:b,key:_},navigationType:s}}),[h,v,y,m,b,_,s]);return null==w?null:o.createElement(l.Provider,{value:g},o.createElement(c.Provider,{children:n,value:w}))}function L(e){let{children:t,location:n}=e;return w(F(t),n)}new Promise((()=>{}));o.Component;function F(e,t){void 0===t&&(t=[]);let n=[];return o.Children.forEach(e,((e,r)=>{if(!o.isValidElement(e))return;let a=[...t,r];if(e.type===o.Fragment)return void n.push.apply(n,F(e.props.children,a));e.type!==N&&(0,i.Oi)(!1),e.props.index&&e.props.children&&(0,i.Oi)(!1);let s={id:e.props.id||a.join("-"),caseSensitive:e.props.caseSensitive,element:e.props.element,Component:e.props.Component,index:e.props.index,path:e.props.path,loader:e.props.loader,action:e.props.action,errorElement:e.props.errorElement,ErrorBoundary:e.props.ErrorBoundary,hasErrorBoundary:null!=e.props.ErrorBoundary||null!=e.props.errorElement,shouldRevalidate:e.props.shouldRevalidate,handle:e.props.handle,lazy:e.props.lazy};e.props.children&&(s.children=F(e.props.children,a)),n.push(s)})),n}},12689:(e,t,n)=>{"use strict";n.d(t,{A:()=>p});var r=n(58168),o=n(96540),i=n(17531),a=n(52836),s=n(89379),u=n(45458),l=n(80045),c=n(16093),f=["allowCreateWhileLoading","createOptionPosition","formatCreateLabel","isValidNewOption","getNewOptionData","onCreateOption","options","onChange"],d=function(){var e=arguments.length>1?arguments[1]:void 0,t=arguments.length>2?arguments[2]:void 0,n=String(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"").toLowerCase(),r=String(t.getOptionValue(e)).toLowerCase(),o=String(t.getOptionLabel(e)).toLowerCase();return r===n||o===n},h={formatCreateLabel:function(e){return'Create "'.concat(e,'"')},isValidNewOption:function(e,t,n,r){return!(!e||t.some((function(t){return d(e,t,r)}))||n.some((function(t){return d(e,t,r)})))},getNewOptionData:function(e,t){return{label:t,value:e,__isNew__:!0}}};n(40961),n(27003);var p=(0,o.forwardRef)((function(e,t){var n=function(e){var t=e.allowCreateWhileLoading,n=void 0!==t&&t,r=e.createOptionPosition,a=void 0===r?"last":r,d=e.formatCreateLabel,p=void 0===d?h.formatCreateLabel:d,g=e.isValidNewOption,v=void 0===g?h.isValidNewOption:g,y=e.getNewOptionData,m=void 0===y?h.getNewOptionData:y,b=e.onCreateOption,_=e.options,w=void 0===_?[]:_,x=e.onChange,S=(0,l.A)(e,f),A=S.getOptionValue,E=void 0===A?i.g:A,C=S.getOptionLabel,R=void 0===C?i.b:C,O=S.inputValue,T=S.isLoading,k=S.isMulti,M=S.value,P=S.name,I=(0,o.useMemo)((function(){return v(O,(0,c.H)(M),w,{getOptionValue:E,getOptionLabel:R})?m(O,p(O)):void 0}),[p,m,R,E,O,v,w,M]),N=(0,o.useMemo)((function(){return!n&&T||!I?w:"first"===a?[I].concat((0,u.A)(w)):[].concat((0,u.A)(w),[I])}),[n,a,T,I,w]),D=(0,o.useCallback)((function(e,t){if("select-option"!==t.action)return x(e,t);var n=Array.isArray(e)?e:[e];if(n[n.length-1]!==I)x(e,t);else if(b)b(O);else{var r=m(O,O),o={action:"create-option",name:P,option:r};x((0,c.D)(k,[].concat((0,u.A)((0,c.H)(M)),[r]),r),o)}}),[m,O,k,P,I,b,x,M]);return(0,s.A)((0,s.A)({},S),{},{options:N,onChange:D})}((0,a.u)(e));return o.createElement(i.S,(0,r.A)({ref:t},n))}))},17531:(e,t,n)=>{"use strict";n.d(t,{S:()=>ge,b:()=>K,g:()=>Z});var r=n(58168),o=n(89379),i=n(23029),a=n(92901),s=n(85501),u=n(49640),l=n(45458),c=n(96540),f=n(16093),d=n(24553),h=Number.isNaN||function(e){return"number"===typeof e&&e!==e};function p(e,t){if(e.length!==t.length)return!1;for(var n=0;n1?"s":""," ").concat(o.join(","),", selected.");case"select-option":return"option ".concat(r,i?" is disabled. Select another option.":", selected.");default:return""}},onFocus:function(e){var t=e.context,n=e.focused,r=e.options,o=e.label,i=void 0===o?"":o,a=e.selectValue,s=e.isDisabled,u=e.isSelected,l=e.isAppleDevice,c=function(e,t){return e&&e.length?"".concat(e.indexOf(t)+1," of ").concat(e.length):""};if("value"===t&&a)return"value ".concat(i," focused, ").concat(c(a,n),".");if("menu"===t&&l){var f=s?" disabled":"",d="".concat(u?" selected":"").concat(f);return"".concat(i).concat(d,", ").concat(c(r,n),".")}return""},onFilter:function(e){var t=e.inputValue,n=e.resultsMessage;return"".concat(n).concat(t?" for search term "+t:"",".")}},b=function(e){var t=e.ariaSelection,n=e.focusedOption,r=e.focusedValue,i=e.focusableOptions,a=e.isFocused,s=e.selectValue,u=e.selectProps,l=e.id,f=e.isAppleDevice,h=u.ariaLiveMessages,p=u.getOptionLabel,g=u.inputValue,v=u.isMulti,b=u.isOptionDisabled,_=u.isSearchable,w=u.menuIsOpen,x=u.options,S=u.screenReaderStatus,A=u.tabSelectsValue,E=u.isLoading,C=u["aria-label"],R=u["aria-live"],O=(0,c.useMemo)((function(){return(0,o.A)((0,o.A)({},m),h||{})}),[h]),T=(0,c.useMemo)((function(){var e,n="";if(t&&O.onChange){var r=t.option,i=t.options,a=t.removedValue,u=t.removedValues,l=t.value,c=a||r||(e=l,Array.isArray(e)?null:e),f=c?p(c):"",d=i||u||void 0,h=d?d.map(p):[],g=(0,o.A)({isDisabled:c&&b(c,s),label:f,labels:h},t);n=O.onChange(g)}return n}),[t,O,b,s,p]),k=(0,c.useMemo)((function(){var e="",t=n||r,o=!!(n&&s&&s.includes(n));if(t&&O.onFocus){var a={focused:t,label:p(t),isDisabled:b(t,s),isSelected:o,options:i,context:t===n?"menu":"value",selectValue:s,isAppleDevice:f};e=O.onFocus(a)}return e}),[n,r,p,b,O,i,s,f]),M=(0,c.useMemo)((function(){var e="";if(w&&x.length&&!E&&O.onFilter){var t=S({count:i.length});e=O.onFilter({inputValue:g,resultsMessage:t})}return e}),[i,g,w,O,x,S,E]),P="initial-input-focus"===(null===t||void 0===t?void 0:t.action),I=(0,c.useMemo)((function(){var e="";if(O.guidance){var t=r?"value":w?"menu":"input";e=O.guidance({"aria-label":C,context:t,isDisabled:n&&b(n,s),isMulti:v,isSearchable:_,tabSelectsValue:A,isInitialFocus:P})}return e}),[C,n,r,v,b,_,w,O,s,A,P]),N=(0,d.Y)(c.Fragment,null,(0,d.Y)("span",{id:"aria-selection"},T),(0,d.Y)("span",{id:"aria-focused"},k),(0,d.Y)("span",{id:"aria-results"},M),(0,d.Y)("span",{id:"aria-guidance"},I));return(0,d.Y)(c.Fragment,null,(0,d.Y)(y,{id:l},P&&N),(0,d.Y)(y,{"aria-live":R,"aria-atomic":"false","aria-relevant":"additions text",role:"log"},a&&!P&&N))},_=[{base:"A",letters:"A\u24b6\uff21\xc0\xc1\xc2\u1ea6\u1ea4\u1eaa\u1ea8\xc3\u0100\u0102\u1eb0\u1eae\u1eb4\u1eb2\u0226\u01e0\xc4\u01de\u1ea2\xc5\u01fa\u01cd\u0200\u0202\u1ea0\u1eac\u1eb6\u1e00\u0104\u023a\u2c6f"},{base:"AA",letters:"\ua732"},{base:"AE",letters:"\xc6\u01fc\u01e2"},{base:"AO",letters:"\ua734"},{base:"AU",letters:"\ua736"},{base:"AV",letters:"\ua738\ua73a"},{base:"AY",letters:"\ua73c"},{base:"B",letters:"B\u24b7\uff22\u1e02\u1e04\u1e06\u0243\u0182\u0181"},{base:"C",letters:"C\u24b8\uff23\u0106\u0108\u010a\u010c\xc7\u1e08\u0187\u023b\ua73e"},{base:"D",letters:"D\u24b9\uff24\u1e0a\u010e\u1e0c\u1e10\u1e12\u1e0e\u0110\u018b\u018a\u0189\ua779"},{base:"DZ",letters:"\u01f1\u01c4"},{base:"Dz",letters:"\u01f2\u01c5"},{base:"E",letters:"E\u24ba\uff25\xc8\xc9\xca\u1ec0\u1ebe\u1ec4\u1ec2\u1ebc\u0112\u1e14\u1e16\u0114\u0116\xcb\u1eba\u011a\u0204\u0206\u1eb8\u1ec6\u0228\u1e1c\u0118\u1e18\u1e1a\u0190\u018e"},{base:"F",letters:"F\u24bb\uff26\u1e1e\u0191\ua77b"},{base:"G",letters:"G\u24bc\uff27\u01f4\u011c\u1e20\u011e\u0120\u01e6\u0122\u01e4\u0193\ua7a0\ua77d\ua77e"},{base:"H",letters:"H\u24bd\uff28\u0124\u1e22\u1e26\u021e\u1e24\u1e28\u1e2a\u0126\u2c67\u2c75\ua78d"},{base:"I",letters:"I\u24be\uff29\xcc\xcd\xce\u0128\u012a\u012c\u0130\xcf\u1e2e\u1ec8\u01cf\u0208\u020a\u1eca\u012e\u1e2c\u0197"},{base:"J",letters:"J\u24bf\uff2a\u0134\u0248"},{base:"K",letters:"K\u24c0\uff2b\u1e30\u01e8\u1e32\u0136\u1e34\u0198\u2c69\ua740\ua742\ua744\ua7a2"},{base:"L",letters:"L\u24c1\uff2c\u013f\u0139\u013d\u1e36\u1e38\u013b\u1e3c\u1e3a\u0141\u023d\u2c62\u2c60\ua748\ua746\ua780"},{base:"LJ",letters:"\u01c7"},{base:"Lj",letters:"\u01c8"},{base:"M",letters:"M\u24c2\uff2d\u1e3e\u1e40\u1e42\u2c6e\u019c"},{base:"N",letters:"N\u24c3\uff2e\u01f8\u0143\xd1\u1e44\u0147\u1e46\u0145\u1e4a\u1e48\u0220\u019d\ua790\ua7a4"},{base:"NJ",letters:"\u01ca"},{base:"Nj",letters:"\u01cb"},{base:"O",letters:"O\u24c4\uff2f\xd2\xd3\xd4\u1ed2\u1ed0\u1ed6\u1ed4\xd5\u1e4c\u022c\u1e4e\u014c\u1e50\u1e52\u014e\u022e\u0230\xd6\u022a\u1ece\u0150\u01d1\u020c\u020e\u01a0\u1edc\u1eda\u1ee0\u1ede\u1ee2\u1ecc\u1ed8\u01ea\u01ec\xd8\u01fe\u0186\u019f\ua74a\ua74c"},{base:"OI",letters:"\u01a2"},{base:"OO",letters:"\ua74e"},{base:"OU",letters:"\u0222"},{base:"P",letters:"P\u24c5\uff30\u1e54\u1e56\u01a4\u2c63\ua750\ua752\ua754"},{base:"Q",letters:"Q\u24c6\uff31\ua756\ua758\u024a"},{base:"R",letters:"R\u24c7\uff32\u0154\u1e58\u0158\u0210\u0212\u1e5a\u1e5c\u0156\u1e5e\u024c\u2c64\ua75a\ua7a6\ua782"},{base:"S",letters:"S\u24c8\uff33\u1e9e\u015a\u1e64\u015c\u1e60\u0160\u1e66\u1e62\u1e68\u0218\u015e\u2c7e\ua7a8\ua784"},{base:"T",letters:"T\u24c9\uff34\u1e6a\u0164\u1e6c\u021a\u0162\u1e70\u1e6e\u0166\u01ac\u01ae\u023e\ua786"},{base:"TZ",letters:"\ua728"},{base:"U",letters:"U\u24ca\uff35\xd9\xda\xdb\u0168\u1e78\u016a\u1e7a\u016c\xdc\u01db\u01d7\u01d5\u01d9\u1ee6\u016e\u0170\u01d3\u0214\u0216\u01af\u1eea\u1ee8\u1eee\u1eec\u1ef0\u1ee4\u1e72\u0172\u1e76\u1e74\u0244"},{base:"V",letters:"V\u24cb\uff36\u1e7c\u1e7e\u01b2\ua75e\u0245"},{base:"VY",letters:"\ua760"},{base:"W",letters:"W\u24cc\uff37\u1e80\u1e82\u0174\u1e86\u1e84\u1e88\u2c72"},{base:"X",letters:"X\u24cd\uff38\u1e8a\u1e8c"},{base:"Y",letters:"Y\u24ce\uff39\u1ef2\xdd\u0176\u1ef8\u0232\u1e8e\u0178\u1ef6\u1ef4\u01b3\u024e\u1efe"},{base:"Z",letters:"Z\u24cf\uff3a\u0179\u1e90\u017b\u017d\u1e92\u1e94\u01b5\u0224\u2c7f\u2c6b\ua762"},{base:"a",letters:"a\u24d0\uff41\u1e9a\xe0\xe1\xe2\u1ea7\u1ea5\u1eab\u1ea9\xe3\u0101\u0103\u1eb1\u1eaf\u1eb5\u1eb3\u0227\u01e1\xe4\u01df\u1ea3\xe5\u01fb\u01ce\u0201\u0203\u1ea1\u1ead\u1eb7\u1e01\u0105\u2c65\u0250"},{base:"aa",letters:"\ua733"},{base:"ae",letters:"\xe6\u01fd\u01e3"},{base:"ao",letters:"\ua735"},{base:"au",letters:"\ua737"},{base:"av",letters:"\ua739\ua73b"},{base:"ay",letters:"\ua73d"},{base:"b",letters:"b\u24d1\uff42\u1e03\u1e05\u1e07\u0180\u0183\u0253"},{base:"c",letters:"c\u24d2\uff43\u0107\u0109\u010b\u010d\xe7\u1e09\u0188\u023c\ua73f\u2184"},{base:"d",letters:"d\u24d3\uff44\u1e0b\u010f\u1e0d\u1e11\u1e13\u1e0f\u0111\u018c\u0256\u0257\ua77a"},{base:"dz",letters:"\u01f3\u01c6"},{base:"e",letters:"e\u24d4\uff45\xe8\xe9\xea\u1ec1\u1ebf\u1ec5\u1ec3\u1ebd\u0113\u1e15\u1e17\u0115\u0117\xeb\u1ebb\u011b\u0205\u0207\u1eb9\u1ec7\u0229\u1e1d\u0119\u1e19\u1e1b\u0247\u025b\u01dd"},{base:"f",letters:"f\u24d5\uff46\u1e1f\u0192\ua77c"},{base:"g",letters:"g\u24d6\uff47\u01f5\u011d\u1e21\u011f\u0121\u01e7\u0123\u01e5\u0260\ua7a1\u1d79\ua77f"},{base:"h",letters:"h\u24d7\uff48\u0125\u1e23\u1e27\u021f\u1e25\u1e29\u1e2b\u1e96\u0127\u2c68\u2c76\u0265"},{base:"hv",letters:"\u0195"},{base:"i",letters:"i\u24d8\uff49\xec\xed\xee\u0129\u012b\u012d\xef\u1e2f\u1ec9\u01d0\u0209\u020b\u1ecb\u012f\u1e2d\u0268\u0131"},{base:"j",letters:"j\u24d9\uff4a\u0135\u01f0\u0249"},{base:"k",letters:"k\u24da\uff4b\u1e31\u01e9\u1e33\u0137\u1e35\u0199\u2c6a\ua741\ua743\ua745\ua7a3"},{base:"l",letters:"l\u24db\uff4c\u0140\u013a\u013e\u1e37\u1e39\u013c\u1e3d\u1e3b\u017f\u0142\u019a\u026b\u2c61\ua749\ua781\ua747"},{base:"lj",letters:"\u01c9"},{base:"m",letters:"m\u24dc\uff4d\u1e3f\u1e41\u1e43\u0271\u026f"},{base:"n",letters:"n\u24dd\uff4e\u01f9\u0144\xf1\u1e45\u0148\u1e47\u0146\u1e4b\u1e49\u019e\u0272\u0149\ua791\ua7a5"},{base:"nj",letters:"\u01cc"},{base:"o",letters:"o\u24de\uff4f\xf2\xf3\xf4\u1ed3\u1ed1\u1ed7\u1ed5\xf5\u1e4d\u022d\u1e4f\u014d\u1e51\u1e53\u014f\u022f\u0231\xf6\u022b\u1ecf\u0151\u01d2\u020d\u020f\u01a1\u1edd\u1edb\u1ee1\u1edf\u1ee3\u1ecd\u1ed9\u01eb\u01ed\xf8\u01ff\u0254\ua74b\ua74d\u0275"},{base:"oi",letters:"\u01a3"},{base:"ou",letters:"\u0223"},{base:"oo",letters:"\ua74f"},{base:"p",letters:"p\u24df\uff50\u1e55\u1e57\u01a5\u1d7d\ua751\ua753\ua755"},{base:"q",letters:"q\u24e0\uff51\u024b\ua757\ua759"},{base:"r",letters:"r\u24e1\uff52\u0155\u1e59\u0159\u0211\u0213\u1e5b\u1e5d\u0157\u1e5f\u024d\u027d\ua75b\ua7a7\ua783"},{base:"s",letters:"s\u24e2\uff53\xdf\u015b\u1e65\u015d\u1e61\u0161\u1e67\u1e63\u1e69\u0219\u015f\u023f\ua7a9\ua785\u1e9b"},{base:"t",letters:"t\u24e3\uff54\u1e6b\u1e97\u0165\u1e6d\u021b\u0163\u1e71\u1e6f\u0167\u01ad\u0288\u2c66\ua787"},{base:"tz",letters:"\ua729"},{base:"u",letters:"u\u24e4\uff55\xf9\xfa\xfb\u0169\u1e79\u016b\u1e7b\u016d\xfc\u01dc\u01d8\u01d6\u01da\u1ee7\u016f\u0171\u01d4\u0215\u0217\u01b0\u1eeb\u1ee9\u1eef\u1eed\u1ef1\u1ee5\u1e73\u0173\u1e77\u1e75\u0289"},{base:"v",letters:"v\u24e5\uff56\u1e7d\u1e7f\u028b\ua75f\u028c"},{base:"vy",letters:"\ua761"},{base:"w",letters:"w\u24e6\uff57\u1e81\u1e83\u0175\u1e87\u1e85\u1e98\u1e89\u2c73"},{base:"x",letters:"x\u24e7\uff58\u1e8b\u1e8d"},{base:"y",letters:"y\u24e8\uff59\u1ef3\xfd\u0177\u1ef9\u0233\u1e8f\xff\u1ef7\u1e99\u1ef5\u01b4\u024f\u1eff"},{base:"z",letters:"z\u24e9\uff5a\u017a\u1e91\u017c\u017e\u1e93\u1e95\u01b6\u0225\u0240\u2c6c\ua763"}],w=new RegExp("["+_.map((function(e){return e.letters})).join("")+"]","g"),x={},S=0;S<_.length;S++)for(var A=_[S],E=0;E0,g=f-d-c,v=!1;g>t&&a.current&&(r&&r(e),a.current=!1),p&&s.current&&(i&&i(e),s.current=!1),p&&t>g?(n&&!a.current&&n(e),h.scrollTop=f,v=!0,a.current=!0):!p&&-t>c&&(o&&!s.current&&o(e),h.scrollTop=0,v=!0,s.current=!0),v&&P(e)}}),[n,r,o,i]),h=(0,c.useCallback)((function(e){d(e,e.deltaY)}),[d]),p=(0,c.useCallback)((function(e){u.current=e.changedTouches[0].clientY}),[]),g=(0,c.useCallback)((function(e){var t=u.current-e.changedTouches[0].clientY;d(e,t)}),[d]),v=(0,c.useCallback)((function(e){if(e){var t=!!f.s&&{passive:!1};e.addEventListener("wheel",h,t),e.addEventListener("touchstart",p,t),e.addEventListener("touchmove",g,t)}}),[g,p,h]),y=(0,c.useCallback)((function(e){e&&(e.removeEventListener("wheel",h,!1),e.removeEventListener("touchstart",p,!1),e.removeEventListener("touchmove",g,!1))}),[g,p,h]);return(0,c.useEffect)((function(){if(t){var e=l.current;return v(e),function(){y(e)}}}),[t,v,y]),function(e){l.current=e}}({isEnabled:void 0===r||r,onBottomArrive:e.onBottomArrive,onBottomLeave:e.onBottomLeave,onTopArrive:e.onTopArrive,onTopLeave:e.onTopLeave}),i=function(e){var t=e.isEnabled,n=e.accountForScrollbars,r=void 0===n||n,o=(0,c.useRef)({}),i=(0,c.useRef)(null),a=(0,c.useCallback)((function(e){if(U){var t=document.body,n=t&&t.style;if(r&&I.forEach((function(e){var t=n&&n[e];o.current[e]=t})),r&&z<1){var i=parseInt(o.current.paddingRight,10)||0,a=document.body?document.body.clientWidth:0,s=window.innerWidth-a+i||0;Object.keys(N).forEach((function(e){var t=N[e];n&&(n[e]=t)})),n&&(n.paddingRight="".concat(s,"px"))}t&&j()&&(t.addEventListener("touchmove",D,B),e&&(e.addEventListener("touchstart",F,B),e.addEventListener("touchmove",L,B))),z+=1}}),[r]),s=(0,c.useCallback)((function(e){if(U){var t=document.body,n=t&&t.style;z=Math.max(z-1,0),r&&z<1&&I.forEach((function(e){var t=o.current[e];n&&(n[e]=t)})),t&&j()&&(t.removeEventListener("touchmove",D,B),e&&(e.removeEventListener("touchstart",F,B),e.removeEventListener("touchmove",L,B)))}}),[r]);return(0,c.useEffect)((function(){if(t){var e=i.current;return a(e),function(){s(e)}}}),[t,a,s]),function(e){i.current=e}}({isEnabled:n});return(0,d.Y)(c.Fragment,null,n&&(0,d.Y)("div",{onClick:V,css:$}),t((function(e){o(e),i(e)})))}var W={name:"1a0ro4n-requiredInput",styles:"label:requiredInput;opacity:0;pointer-events:none;position:absolute;bottom:0;left:0;right:0;width:100%"},q=function(e){var t=e.name,n=e.onFocus;return(0,d.Y)("input",{required:!0,name:t,tabIndex:-1,"aria-hidden":"true",onFocus:n,css:W,value:"",onChange:function(){}})};function G(e){var t;return"undefined"!==typeof window&&null!=window.navigator&&e.test((null===(t=window.navigator.userAgentData)||void 0===t?void 0:t.platform)||window.navigator.platform)}function Y(){return G(/^Mac/i)}function X(){return G(/^iPhone/i)||G(/^iPad/i)||Y()&&navigator.maxTouchPoints>1}var K=function(e){return e.label},Z=function(e){return e.value},Q={clearIndicator:f.a,container:f.b,control:f.d,dropdownIndicator:f.e,group:f.g,groupHeading:f.f,indicatorsContainer:f.i,indicatorSeparator:f.h,input:f.j,loadingIndicator:f.l,loadingMessage:f.k,menu:f.m,menuList:f.n,menuPortal:f.o,multiValue:f.p,multiValueLabel:f.q,multiValueRemove:f.t,noOptionsMessage:f.u,option:f.v,placeholder:f.w,singleValue:f.x,valueContainer:f.y};var J,ee={borderRadius:4,colors:{primary:"#2684FF",primary75:"#4C9AFF",primary50:"#B2D4FF",primary25:"#DEEBFF",danger:"#DE350B",dangerLight:"#FFBDAD",neutral0:"hsl(0, 0%, 100%)",neutral5:"hsl(0, 0%, 95%)",neutral10:"hsl(0, 0%, 90%)",neutral20:"hsl(0, 0%, 80%)",neutral30:"hsl(0, 0%, 70%)",neutral40:"hsl(0, 0%, 60%)",neutral50:"hsl(0, 0%, 50%)",neutral60:"hsl(0, 0%, 40%)",neutral70:"hsl(0, 0%, 30%)",neutral80:"hsl(0, 0%, 20%)",neutral90:"hsl(0, 0%, 10%)"},spacing:{baseUnit:4,controlHeight:38,menuGutter:8}},te={"aria-live":"polite",backspaceRemovesValue:!0,blurInputOnSelect:(0,f.z)(),captureMenuScroll:!(0,f.z)(),classNames:{},closeMenuOnSelect:!0,closeMenuOnScroll:!1,components:{},controlShouldRenderValue:!0,escapeClearsValue:!1,filterOption:function(e,t){if(e.data.__isNew__)return!0;var n=(0,o.A)({ignoreCase:!0,ignoreAccents:!0,stringify:T,trim:!0,matchFrom:"any"},J),r=n.ignoreCase,i=n.ignoreAccents,a=n.stringify,s=n.trim,u=n.matchFrom,l=s?O(t):t,c=s?O(a(e)):a(e);return r&&(l=l.toLowerCase(),c=c.toLowerCase()),i&&(l=R(l),c=C(c)),"start"===u?c.substr(0,l.length)===l:c.indexOf(l)>-1},formatGroupLabel:function(e){return e.label},getOptionLabel:K,getOptionValue:Z,isDisabled:!1,isLoading:!1,isMulti:!1,isRtl:!1,isSearchable:!0,isOptionDisabled:function(e){return!!e.isDisabled},loadingMessage:function(){return"Loading..."},maxMenuHeight:300,minMenuHeight:140,menuIsOpen:!1,menuPlacement:"bottom",menuPosition:"absolute",menuShouldBlockScroll:!1,menuShouldScrollIntoView:!(0,f.A)(),noOptionsMessage:function(){return"No options"},openMenuOnFocus:!1,openMenuOnClick:!0,options:[],pageSize:5,placeholder:"Select...",screenReaderStatus:function(e){var t=e.count;return"".concat(t," result").concat(1!==t?"s":""," available")},styles:{},tabIndex:0,tabSelectsValue:!0,unstyled:!1};function ne(e,t,n,r){return{type:"option",data:t,isDisabled:ce(e,t,n),isSelected:fe(e,t,n),label:ue(e,t),value:le(e,t),index:r}}function re(e,t){return e.options.map((function(n,r){if("options"in n){var o=n.options.map((function(n,r){return ne(e,n,t,r)})).filter((function(t){return ae(e,t)}));return o.length>0?{type:"group",data:n,options:o,index:r}:void 0}var i=ne(e,n,t,r);return ae(e,i)?i:void 0})).filter(f.K)}function oe(e){return e.reduce((function(e,t){return"group"===t.type?e.push.apply(e,(0,l.A)(t.options.map((function(e){return e.data})))):e.push(t.data),e}),[])}function ie(e,t){return e.reduce((function(e,n){return"group"===n.type?e.push.apply(e,(0,l.A)(n.options.map((function(e){return{data:e.data,id:"".concat(t,"-").concat(n.index,"-").concat(e.index)}})))):e.push({data:n.data,id:"".concat(t,"-").concat(n.index)}),e}),[])}function ae(e,t){var n=e.inputValue,r=void 0===n?"":n,o=t.data,i=t.isSelected,a=t.label,s=t.value;return(!he(e)||!i)&&de(e,{label:a,value:s,data:o},r)}var se=function(e,t){var n;return(null===(n=e.find((function(e){return e.data===t})))||void 0===n?void 0:n.id)||null},ue=function(e,t){return e.getOptionLabel(t)},le=function(e,t){return e.getOptionValue(t)};function ce(e,t,n){return"function"===typeof e.isOptionDisabled&&e.isOptionDisabled(t,n)}function fe(e,t,n){if(n.indexOf(t)>-1)return!0;if("function"===typeof e.isOptionSelected)return e.isOptionSelected(t,n);var r=le(e,t);return n.some((function(t){return le(e,t)===r}))}function de(e,t,n){return!e.filterOption||e.filterOption(t,n)}var he=function(e){var t=e.hideSelectedOptions,n=e.isMulti;return void 0===t?n:t},pe=1,ge=function(e){(0,s.A)(n,e);var t=(0,u.A)(n);function n(e){var r;if((0,i.A)(this,n),(r=t.call(this,e)).state={ariaSelection:null,focusedOption:null,focusedOptionId:null,focusableOptionsWithIds:[],focusedValue:null,inputIsHidden:!1,isFocused:!1,selectValue:[],clearFocusValueOnUpdate:!1,prevWasFocused:!1,inputIsHiddenAfterUpdate:void 0,prevProps:void 0,instancePrefix:""},r.blockOptionHover=!1,r.isComposing=!1,r.commonProps=void 0,r.initialTouchX=0,r.initialTouchY=0,r.openAfterFocus=!1,r.scrollToFocusedOptionOnUpdate=!1,r.userIsDragging=void 0,r.isAppleDevice=Y()||X(),r.controlRef=null,r.getControlRef=function(e){r.controlRef=e},r.focusedOptionRef=null,r.getFocusedOptionRef=function(e){r.focusedOptionRef=e},r.menuListRef=null,r.getMenuListRef=function(e){r.menuListRef=e},r.inputRef=null,r.getInputRef=function(e){r.inputRef=e},r.focus=r.focusInput,r.blur=r.blurInput,r.onChange=function(e,t){var n=r.props,o=n.onChange,i=n.name;t.name=i,r.ariaOnChange(e,t),o(e,t)},r.setValue=function(e,t,n){var o=r.props,i=o.closeMenuOnSelect,a=o.isMulti,s=o.inputValue;r.onInputChange("",{action:"set-value",prevInputValue:s}),i&&(r.setState({inputIsHiddenAfterUpdate:!a}),r.onMenuClose()),r.setState({clearFocusValueOnUpdate:!0}),r.onChange(e,{action:t,option:n})},r.selectOption=function(e){var t=r.props,n=t.blurInputOnSelect,o=t.isMulti,i=t.name,a=r.state.selectValue,s=o&&r.isOptionSelected(e,a),u=r.isOptionDisabled(e,a);if(s){var c=r.getOptionValue(e);r.setValue((0,f.B)(a.filter((function(e){return r.getOptionValue(e)!==c}))),"deselect-option",e)}else{if(u)return void r.ariaOnChange((0,f.C)(e),{action:"select-option",option:e,name:i});o?r.setValue((0,f.B)([].concat((0,l.A)(a),[e])),"select-option",e):r.setValue((0,f.C)(e),"select-option")}n&&r.blurInput()},r.removeValue=function(e){var t=r.props.isMulti,n=r.state.selectValue,o=r.getOptionValue(e),i=n.filter((function(e){return r.getOptionValue(e)!==o})),a=(0,f.D)(t,i,i[0]||null);r.onChange(a,{action:"remove-value",removedValue:e}),r.focusInput()},r.clearValue=function(){var e=r.state.selectValue;r.onChange((0,f.D)(r.props.isMulti,[],null),{action:"clear",removedValues:e})},r.popValue=function(){var e=r.props.isMulti,t=r.state.selectValue,n=t[t.length-1],o=t.slice(0,t.length-1),i=(0,f.D)(e,o,o[0]||null);r.onChange(i,{action:"pop-value",removedValue:n})},r.getFocusedOptionId=function(e){return se(r.state.focusableOptionsWithIds,e)},r.getFocusableOptionsWithIds=function(){return ie(re(r.props,r.state.selectValue),r.getElementId("option"))},r.getValue=function(){return r.state.selectValue},r.cx=function(){for(var e=arguments.length,t=new Array(e),n=0;n5||i>5}},r.onTouchEnd=function(e){r.userIsDragging||(r.controlRef&&!r.controlRef.contains(e.target)&&r.menuListRef&&!r.menuListRef.contains(e.target)&&r.blurInput(),r.initialTouchX=0,r.initialTouchY=0)},r.onControlTouchEnd=function(e){r.userIsDragging||r.onControlMouseDown(e)},r.onClearIndicatorTouchEnd=function(e){r.userIsDragging||r.onClearIndicatorMouseDown(e)},r.onDropdownIndicatorTouchEnd=function(e){r.userIsDragging||r.onDropdownIndicatorMouseDown(e)},r.handleInputChange=function(e){var t=r.props.inputValue,n=e.currentTarget.value;r.setState({inputIsHiddenAfterUpdate:!1}),r.onInputChange(n,{action:"input-change",prevInputValue:t}),r.props.menuIsOpen||r.onMenuOpen()},r.onInputFocus=function(e){r.props.onFocus&&r.props.onFocus(e),r.setState({inputIsHiddenAfterUpdate:!1,isFocused:!0}),(r.openAfterFocus||r.props.openMenuOnFocus)&&r.openMenu("first"),r.openAfterFocus=!1},r.onInputBlur=function(e){var t=r.props.inputValue;r.menuListRef&&r.menuListRef.contains(document.activeElement)?r.inputRef.focus():(r.props.onBlur&&r.props.onBlur(e),r.onInputChange("",{action:"input-blur",prevInputValue:t}),r.onMenuClose(),r.setState({focusedValue:null,isFocused:!1}))},r.onOptionHover=function(e){if(!r.blockOptionHover&&r.state.focusedOption!==e){var t=r.getFocusableOptions().indexOf(e);r.setState({focusedOption:e,focusedOptionId:t>-1?r.getFocusedOptionId(e):null})}},r.shouldHideSelectedOptions=function(){return he(r.props)},r.onValueInputFocus=function(e){e.preventDefault(),e.stopPropagation(),r.focus()},r.onKeyDown=function(e){var t=r.props,n=t.isMulti,o=t.backspaceRemovesValue,i=t.escapeClearsValue,a=t.inputValue,s=t.isClearable,u=t.isDisabled,l=t.menuIsOpen,c=t.onKeyDown,f=t.tabSelectsValue,d=t.openMenuOnFocus,h=r.state,p=h.focusedOption,g=h.focusedValue,v=h.selectValue;if(!u&&("function"!==typeof c||(c(e),!e.defaultPrevented))){switch(r.blockOptionHover=!0,e.key){case"ArrowLeft":if(!n||a)return;r.focusValue("previous");break;case"ArrowRight":if(!n||a)return;r.focusValue("next");break;case"Delete":case"Backspace":if(a)return;if(g)r.removeValue(g);else{if(!o)return;n?r.popValue():s&&r.clearValue()}break;case"Tab":if(r.isComposing)return;if(e.shiftKey||!l||!f||!p||d&&r.isOptionSelected(p,v))return;r.selectOption(p);break;case"Enter":if(229===e.keyCode)break;if(l){if(!p)return;if(r.isComposing)return;r.selectOption(p);break}return;case"Escape":l?(r.setState({inputIsHiddenAfterUpdate:!1}),r.onInputChange("",{action:"menu-close",prevInputValue:a}),r.onMenuClose()):s&&i&&r.clearValue();break;case" ":if(a)return;if(!l){r.openMenu("first");break}if(!p)return;r.selectOption(p);break;case"ArrowUp":l?r.focusOption("up"):r.openMenu("last");break;case"ArrowDown":l?r.focusOption("down"):r.openMenu("first");break;case"PageUp":if(!l)return;r.focusOption("pageup");break;case"PageDown":if(!l)return;r.focusOption("pagedown");break;case"Home":if(!l)return;r.focusOption("first");break;case"End":if(!l)return;r.focusOption("last");break;default:return}e.preventDefault()}},r.state.instancePrefix="react-select-"+(r.props.instanceId||++pe),r.state.selectValue=(0,f.H)(e.value),e.menuIsOpen&&r.state.selectValue.length){var a=r.getFocusableOptionsWithIds(),s=r.buildFocusableOptions(),u=s.indexOf(r.state.selectValue[0]);r.state.focusableOptionsWithIds=a,r.state.focusedOption=s[u],r.state.focusedOptionId=se(a,s[u])}return r}return(0,a.A)(n,[{key:"componentDidMount",value:function(){this.startListeningComposition(),this.startListeningToTouch(),this.props.closeMenuOnScroll&&document&&document.addEventListener&&document.addEventListener("scroll",this.onScroll,!0),this.props.autoFocus&&this.focusInput(),this.props.menuIsOpen&&this.state.focusedOption&&this.menuListRef&&this.focusedOptionRef&&(0,f.I)(this.menuListRef,this.focusedOptionRef)}},{key:"componentDidUpdate",value:function(e){var t=this.props,n=t.isDisabled,r=t.menuIsOpen,o=this.state.isFocused;(o&&!n&&e.isDisabled||o&&r&&!e.menuIsOpen)&&this.focusInput(),o&&n&&!e.isDisabled?this.setState({isFocused:!1},this.onMenuClose):o||n||!e.isDisabled||this.inputRef!==document.activeElement||this.setState({isFocused:!0}),this.menuListRef&&this.focusedOptionRef&&this.scrollToFocusedOptionOnUpdate&&((0,f.I)(this.menuListRef,this.focusedOptionRef),this.scrollToFocusedOptionOnUpdate=!1)}},{key:"componentWillUnmount",value:function(){this.stopListeningComposition(),this.stopListeningToTouch(),document.removeEventListener("scroll",this.onScroll,!0)}},{key:"onMenuOpen",value:function(){this.props.onMenuOpen()}},{key:"onMenuClose",value:function(){this.onInputChange("",{action:"menu-close",prevInputValue:this.props.inputValue}),this.props.onMenuClose()}},{key:"onInputChange",value:function(e,t){this.props.onInputChange(e,t)}},{key:"focusInput",value:function(){this.inputRef&&this.inputRef.focus()}},{key:"blurInput",value:function(){this.inputRef&&this.inputRef.blur()}},{key:"openMenu",value:function(e){var t=this,n=this.state,r=n.selectValue,o=n.isFocused,i=this.buildFocusableOptions(),a="first"===e?0:i.length-1;if(!this.props.isMulti){var s=i.indexOf(r[0]);s>-1&&(a=s)}this.scrollToFocusedOptionOnUpdate=!(o&&this.menuListRef),this.setState({inputIsHiddenAfterUpdate:!1,focusedValue:null,focusedOption:i[a],focusedOptionId:this.getFocusedOptionId(i[a])},(function(){return t.onMenuOpen()}))}},{key:"focusValue",value:function(e){var t=this.state,n=t.selectValue,r=t.focusedValue;if(this.props.isMulti){this.setState({focusedOption:null});var o=n.indexOf(r);r||(o=-1);var i=n.length-1,a=-1;if(n.length){switch(e){case"previous":a=0===o?0:-1===o?i:o-1;break;case"next":o>-1&&o0&&void 0!==arguments[0]?arguments[0]:"first",t=this.props.pageSize,n=this.state.focusedOption,r=this.getFocusableOptions();if(r.length){var o=0,i=r.indexOf(n);n||(i=-1),"up"===e?o=i>0?i-1:r.length-1:"down"===e?o=(i+1)%r.length:"pageup"===e?(o=i-t)<0&&(o=0):"pagedown"===e?(o=i+t)>r.length-1&&(o=r.length-1):"last"===e&&(o=r.length-1),this.scrollToFocusedOptionOnUpdate=!0,this.setState({focusedOption:r[o],focusedValue:null,focusedOptionId:this.getFocusedOptionId(r[o])})}}},{key:"getTheme",value:function(){return this.props.theme?"function"===typeof this.props.theme?this.props.theme(ee):(0,o.A)((0,o.A)({},ee),this.props.theme):ee}},{key:"getCommonProps",value:function(){var e=this.clearValue,t=this.cx,n=this.getStyles,r=this.getClassNames,o=this.getValue,i=this.selectOption,a=this.setValue,s=this.props,u=s.isMulti,l=s.isRtl,c=s.options;return{clearValue:e,cx:t,getStyles:n,getClassNames:r,getValue:o,hasValue:this.hasValue(),isMulti:u,isRtl:l,options:c,selectOption:i,selectProps:s,setValue:a,theme:this.getTheme()}}},{key:"hasValue",value:function(){return this.state.selectValue.length>0}},{key:"hasOptions",value:function(){return!!this.getFocusableOptions().length}},{key:"isClearable",value:function(){var e=this.props,t=e.isClearable,n=e.isMulti;return void 0===t?n:t}},{key:"isOptionDisabled",value:function(e,t){return ce(this.props,e,t)}},{key:"isOptionSelected",value:function(e,t){return fe(this.props,e,t)}},{key:"filterOption",value:function(e,t){return de(this.props,e,t)}},{key:"formatOptionLabel",value:function(e,t){if("function"===typeof this.props.formatOptionLabel){var n=this.props.inputValue,r=this.state.selectValue;return this.props.formatOptionLabel(e,{context:t,inputValue:n,selectValue:r})}return this.getOptionLabel(e)}},{key:"formatGroupLabel",value:function(e){return this.props.formatGroupLabel(e)}},{key:"startListeningComposition",value:function(){document&&document.addEventListener&&(document.addEventListener("compositionstart",this.onCompositionStart,!1),document.addEventListener("compositionend",this.onCompositionEnd,!1))}},{key:"stopListeningComposition",value:function(){document&&document.removeEventListener&&(document.removeEventListener("compositionstart",this.onCompositionStart),document.removeEventListener("compositionend",this.onCompositionEnd))}},{key:"startListeningToTouch",value:function(){document&&document.addEventListener&&(document.addEventListener("touchstart",this.onTouchStart,!1),document.addEventListener("touchmove",this.onTouchMove,!1),document.addEventListener("touchend",this.onTouchEnd,!1))}},{key:"stopListeningToTouch",value:function(){document&&document.removeEventListener&&(document.removeEventListener("touchstart",this.onTouchStart),document.removeEventListener("touchmove",this.onTouchMove),document.removeEventListener("touchend",this.onTouchEnd))}},{key:"renderInput",value:function(){var e=this.props,t=e.isDisabled,n=e.isSearchable,i=e.inputId,a=e.inputValue,s=e.tabIndex,u=e.form,l=e.menuIsOpen,d=e.required,h=this.getComponents().Input,p=this.state,g=p.inputIsHidden,v=p.ariaSelection,y=this.commonProps,m=i||this.getElementId("input"),b=(0,o.A)((0,o.A)((0,o.A)({"aria-autocomplete":"list","aria-expanded":l,"aria-haspopup":!0,"aria-errormessage":this.props["aria-errormessage"],"aria-invalid":this.props["aria-invalid"],"aria-label":this.props["aria-label"],"aria-labelledby":this.props["aria-labelledby"],"aria-required":d,role:"combobox","aria-activedescendant":this.isAppleDevice?void 0:this.state.focusedOptionId||""},l&&{"aria-controls":this.getElementId("listbox")}),!n&&{"aria-readonly":!0}),this.hasValue()?"initial-input-focus"===(null===v||void 0===v?void 0:v.action)&&{"aria-describedby":this.getElementId("live-region")}:{"aria-describedby":this.getElementId("placeholder")});return n?c.createElement(h,(0,r.A)({},y,{autoCapitalize:"none",autoComplete:"off",autoCorrect:"off",id:m,innerRef:this.getInputRef,isDisabled:t,isHidden:g,onBlur:this.onInputBlur,onChange:this.handleInputChange,onFocus:this.onInputFocus,spellCheck:"false",tabIndex:s,form:u,type:"text",value:a},b)):c.createElement(M,(0,r.A)({id:m,innerRef:this.getInputRef,onBlur:this.onInputBlur,onChange:f.J,onFocus:this.onInputFocus,disabled:t,tabIndex:s,inputMode:"none",form:u,value:""},b))}},{key:"renderPlaceholderOrValue",value:function(){var e=this,t=this.getComponents(),n=t.MultiValue,o=t.MultiValueContainer,i=t.MultiValueLabel,a=t.MultiValueRemove,s=t.SingleValue,u=t.Placeholder,l=this.commonProps,f=this.props,d=f.controlShouldRenderValue,h=f.isDisabled,p=f.isMulti,g=f.inputValue,v=f.placeholder,y=this.state,m=y.selectValue,b=y.focusedValue,_=y.isFocused;if(!this.hasValue()||!d)return g?null:c.createElement(u,(0,r.A)({},l,{key:"placeholder",isDisabled:h,isFocused:_,innerProps:{id:this.getElementId("placeholder")}}),v);if(p)return m.map((function(t,s){var u=t===b,f="".concat(e.getOptionLabel(t),"-").concat(e.getOptionValue(t));return c.createElement(n,(0,r.A)({},l,{components:{Container:o,Label:i,Remove:a},isFocused:u,isDisabled:h,key:f,index:s,removeProps:{onClick:function(){return e.removeValue(t)},onTouchEnd:function(){return e.removeValue(t)},onMouseDown:function(e){e.preventDefault()}},data:t}),e.formatOptionLabel(t,"value"))}));if(g)return null;var w=m[0];return c.createElement(s,(0,r.A)({},l,{data:w,isDisabled:h}),this.formatOptionLabel(w,"value"))}},{key:"renderClearIndicator",value:function(){var e=this.getComponents().ClearIndicator,t=this.commonProps,n=this.props,o=n.isDisabled,i=n.isLoading,a=this.state.isFocused;if(!this.isClearable()||!e||o||!this.hasValue()||i)return null;var s={onMouseDown:this.onClearIndicatorMouseDown,onTouchEnd:this.onClearIndicatorTouchEnd,"aria-hidden":"true"};return c.createElement(e,(0,r.A)({},t,{innerProps:s,isFocused:a}))}},{key:"renderLoadingIndicator",value:function(){var e=this.getComponents().LoadingIndicator,t=this.commonProps,n=this.props,o=n.isDisabled,i=n.isLoading,a=this.state.isFocused;if(!e||!i)return null;return c.createElement(e,(0,r.A)({},t,{innerProps:{"aria-hidden":"true"},isDisabled:o,isFocused:a}))}},{key:"renderIndicatorSeparator",value:function(){var e=this.getComponents(),t=e.DropdownIndicator,n=e.IndicatorSeparator;if(!t||!n)return null;var o=this.commonProps,i=this.props.isDisabled,a=this.state.isFocused;return c.createElement(n,(0,r.A)({},o,{isDisabled:i,isFocused:a}))}},{key:"renderDropdownIndicator",value:function(){var e=this.getComponents().DropdownIndicator;if(!e)return null;var t=this.commonProps,n=this.props.isDisabled,o=this.state.isFocused,i={onMouseDown:this.onDropdownIndicatorMouseDown,onTouchEnd:this.onDropdownIndicatorTouchEnd,"aria-hidden":"true"};return c.createElement(e,(0,r.A)({},t,{innerProps:i,isDisabled:n,isFocused:o}))}},{key:"renderMenu",value:function(){var e=this,t=this.getComponents(),n=t.Group,o=t.GroupHeading,i=t.Menu,a=t.MenuList,s=t.MenuPortal,u=t.LoadingMessage,l=t.NoOptionsMessage,d=t.Option,h=this.commonProps,p=this.state.focusedOption,g=this.props,v=g.captureMenuScroll,y=g.inputValue,m=g.isLoading,b=g.loadingMessage,_=g.minMenuHeight,w=g.maxMenuHeight,x=g.menuIsOpen,S=g.menuPlacement,A=g.menuPosition,E=g.menuPortalTarget,C=g.menuShouldBlockScroll,R=g.menuShouldScrollIntoView,O=g.noOptionsMessage,T=g.onMenuScrollToTop,k=g.onMenuScrollToBottom;if(!x)return null;var M,P=function(t,n){var o=t.type,i=t.data,a=t.isDisabled,s=t.isSelected,u=t.label,l=t.value,f=p===i,g=a?void 0:function(){return e.onOptionHover(i)},v=a?void 0:function(){return e.selectOption(i)},y="".concat(e.getElementId("option"),"-").concat(n),m={id:y,onClick:v,onMouseMove:g,onMouseOver:g,tabIndex:-1,role:"option","aria-selected":e.isAppleDevice?void 0:s};return c.createElement(d,(0,r.A)({},h,{innerProps:m,data:i,isDisabled:a,isSelected:s,key:y,label:u,type:o,value:l,isFocused:f,innerRef:f?e.getFocusedOptionRef:void 0}),e.formatOptionLabel(t.data,"menu"))};if(this.hasOptions())M=this.getCategorizedOptions().map((function(t){if("group"===t.type){var i=t.data,a=t.options,s=t.index,u="".concat(e.getElementId("group"),"-").concat(s),l="".concat(u,"-heading");return c.createElement(n,(0,r.A)({},h,{key:u,data:i,options:a,Heading:o,headingProps:{id:l,data:t.data},label:e.formatGroupLabel(t.data)}),t.options.map((function(e){return P(e,"".concat(s,"-").concat(e.index))})))}if("option"===t.type)return P(t,"".concat(t.index))}));else if(m){var I=b({inputValue:y});if(null===I)return null;M=c.createElement(u,h,I)}else{var N=O({inputValue:y});if(null===N)return null;M=c.createElement(l,h,N)}var D={minMenuHeight:_,maxMenuHeight:w,menuPlacement:S,menuPosition:A,menuShouldScrollIntoView:R},L=c.createElement(f.M,(0,r.A)({},h,D),(function(t){var n=t.ref,o=t.placerProps,s=o.placement,u=o.maxHeight;return c.createElement(i,(0,r.A)({},h,D,{innerRef:n,innerProps:{onMouseDown:e.onMenuMouseDown,onMouseMove:e.onMenuMouseMove},isLoading:m,placement:s}),c.createElement(H,{captureEnabled:v,onTopArrive:T,onBottomArrive:k,lockEnabled:C},(function(t){return c.createElement(a,(0,r.A)({},h,{innerRef:function(n){e.getMenuListRef(n),t(n)},innerProps:{role:"listbox","aria-multiselectable":h.isMulti,id:e.getElementId("listbox")},isLoading:m,maxHeight:u,focusedOption:p}),M)})))}));return E||"fixed"===A?c.createElement(s,(0,r.A)({},h,{appendTo:E,controlElement:this.controlRef,menuPlacement:S,menuPosition:A}),L):L}},{key:"renderFormField",value:function(){var e=this,t=this.props,n=t.delimiter,r=t.isDisabled,o=t.isMulti,i=t.name,a=t.required,s=this.state.selectValue;if(a&&!this.hasValue()&&!r)return c.createElement(q,{name:i,onFocus:this.onValueInputFocus});if(i&&!r){if(o){if(n){var u=s.map((function(t){return e.getOptionValue(t)})).join(n);return c.createElement("input",{name:i,type:"hidden",value:u})}var l=s.length>0?s.map((function(t,n){return c.createElement("input",{key:"i-".concat(n),name:i,type:"hidden",value:e.getOptionValue(t)})})):c.createElement("input",{name:i,type:"hidden",value:""});return c.createElement("div",null,l)}var f=s[0]?this.getOptionValue(s[0]):"";return c.createElement("input",{name:i,type:"hidden",value:f})}}},{key:"renderLiveRegion",value:function(){var e=this.commonProps,t=this.state,n=t.ariaSelection,o=t.focusedOption,i=t.focusedValue,a=t.isFocused,s=t.selectValue,u=this.getFocusableOptions();return c.createElement(b,(0,r.A)({},e,{id:this.getElementId("live-region"),ariaSelection:n,focusedOption:o,focusedValue:i,isFocused:a,selectValue:s,focusableOptions:u,isAppleDevice:this.isAppleDevice}))}},{key:"render",value:function(){var e=this.getComponents(),t=e.Control,n=e.IndicatorsContainer,o=e.SelectContainer,i=e.ValueContainer,a=this.props,s=a.className,u=a.id,l=a.isDisabled,f=a.menuIsOpen,d=this.state.isFocused,h=this.commonProps=this.getCommonProps();return c.createElement(o,(0,r.A)({},h,{className:s,innerProps:{id:u,onKeyDown:this.onKeyDown},isDisabled:l,isFocused:d}),this.renderLiveRegion(),c.createElement(t,(0,r.A)({},h,{innerRef:this.getControlRef,innerProps:{onMouseDown:this.onControlMouseDown,onTouchEnd:this.onControlTouchEnd},isDisabled:l,isFocused:d,menuIsOpen:f}),c.createElement(i,(0,r.A)({},h,{isDisabled:l}),this.renderPlaceholderOrValue(),this.renderInput()),c.createElement(n,(0,r.A)({},h,{isDisabled:l}),this.renderClearIndicator(),this.renderLoadingIndicator(),this.renderIndicatorSeparator(),this.renderDropdownIndicator())),this.renderMenu(),this.renderFormField())}}],[{key:"getDerivedStateFromProps",value:function(e,t){var n=t.prevProps,r=t.clearFocusValueOnUpdate,i=t.inputIsHiddenAfterUpdate,a=t.ariaSelection,s=t.isFocused,u=t.prevWasFocused,l=t.instancePrefix,c=e.options,d=e.value,h=e.menuIsOpen,p=e.inputValue,g=e.isMulti,v=(0,f.H)(d),y={};if(n&&(d!==n.value||c!==n.options||h!==n.menuIsOpen||p!==n.inputValue)){var m=h?function(e,t){return oe(re(e,t))}(e,v):[],b=h?ie(re(e,v),"".concat(l,"-option")):[],_=r?function(e,t){var n=e.focusedValue,r=e.selectValue.indexOf(n);if(r>-1){if(t.indexOf(n)>-1)return n;if(r-1?n:t[0]}(t,m);y={selectValue:v,focusedOption:w,focusedOptionId:se(b,w),focusableOptionsWithIds:b,focusedValue:_,clearFocusValueOnUpdate:!1}}var x=null!=i&&e!==n?{inputIsHidden:i,inputIsHiddenAfterUpdate:void 0}:{},S=a,A=s&&u;return s&&!A&&(S={value:(0,f.D)(g,v,v[0]||null),options:v,action:"initial-input-focus"},A=!u),"initial-input-focus"===(null===a||void 0===a?void 0:a.action)&&(S=null),(0,o.A)((0,o.A)((0,o.A)({},y),x),{},{prevProps:e,ariaSelection:S,prevWasFocused:A})}}]),n}(c.Component);ge.defaultProps=te},16093:(e,t,n)=>{"use strict";n.d(t,{A:()=>J,B:()=>se,C:()=>ae,D:()=>ie,E:()=>$,F:()=>st,G:()=>G,H:()=>H,I:()=>Z,J:()=>B,K:()=>oe,M:()=>ve,a:()=>Le,b:()=>Se,c:()=>at,d:()=>Be,e:()=>De,f:()=>We,g:()=>He,h:()=>Fe,i:()=>Ee,j:()=>Ye,k:()=>we,l:()=>Ue,m:()=>pe,n:()=>me,o:()=>xe,p:()=>Qe,q:()=>Je,r:()=>ue,s:()=>re,t:()=>et,u:()=>_e,v:()=>rt,w:()=>ot,x:()=>it,y:()=>Ae,z:()=>Q});var r=n(89379),o=n(58168),i=n(24553),a=n(80296),s=n(80045),u=n(82284);var l=n(64467),c=n(96540),f=n(40961);const d=Math.min,h=Math.max,p=Math.round,g=Math.floor,v=e=>({x:e,y:e});function y(e){return{...e,top:e.y,left:e.x,right:e.x+e.width,bottom:e.y+e.height}}function m(e){return w(e)?(e.nodeName||"").toLowerCase():"#document"}function b(e){var t;return(null==e||null==(t=e.ownerDocument)?void 0:t.defaultView)||window}function _(e){var t;return null==(t=(w(e)?e.ownerDocument:e.document)||window.document)?void 0:t.documentElement}function w(e){return e instanceof Node||e instanceof b(e).Node}function x(e){return e instanceof Element||e instanceof b(e).Element}function S(e){return e instanceof HTMLElement||e instanceof b(e).HTMLElement}function A(e){return"undefined"!==typeof ShadowRoot&&(e instanceof ShadowRoot||e instanceof b(e).ShadowRoot)}function E(e){const{overflow:t,overflowX:n,overflowY:r,display:o}=O(e);return/auto|scroll|overlay|hidden|clip/.test(t+r+n)&&!["inline","contents"].includes(o)}function C(){return!("undefined"===typeof CSS||!CSS.supports)&&CSS.supports("-webkit-backdrop-filter","none")}function R(e){return["html","body","#document"].includes(m(e))}function O(e){return b(e).getComputedStyle(e)}function T(e){if("html"===m(e))return e;const t=e.assignedSlot||e.parentNode||A(e)&&e.host||_(e);return A(t)?t.host:t}function k(e){const t=T(e);return R(t)?e.ownerDocument?e.ownerDocument.body:e.body:S(t)&&E(t)?t:k(t)}function M(e,t,n){var r;void 0===t&&(t=[]),void 0===n&&(n=!0);const o=k(e),i=o===(null==(r=e.ownerDocument)?void 0:r.body),a=b(o);return i?t.concat(a,a.visualViewport||[],E(o)?o:[],a.frameElement&&n?M(a.frameElement):[]):t.concat(o,M(o,[],n))}function P(e){const t=O(e);let n=parseFloat(t.width)||0,r=parseFloat(t.height)||0;const o=S(e),i=o?e.offsetWidth:n,a=o?e.offsetHeight:r,s=p(n)!==i||p(r)!==a;return s&&(n=i,r=a),{width:n,height:r,$:s}}function I(e){return x(e)?e:e.contextElement}function N(e){const t=I(e);if(!S(t))return v(1);const n=t.getBoundingClientRect(),{width:r,height:o,$:i}=P(t);let a=(i?p(n.width):n.width)/r,s=(i?p(n.height):n.height)/o;return a&&Number.isFinite(a)||(a=1),s&&Number.isFinite(s)||(s=1),{x:a,y:s}}const D=v(0);function L(e){const t=b(e);return C()&&t.visualViewport?{x:t.visualViewport.offsetLeft,y:t.visualViewport.offsetTop}:D}function F(e,t,n,r){void 0===t&&(t=!1),void 0===n&&(n=!1);const o=e.getBoundingClientRect(),i=I(e);let a=v(1);t&&(r?x(r)&&(a=N(r)):a=N(e));const s=function(e,t,n){return void 0===t&&(t=!1),!(!n||t&&n!==b(e))&&t}(i,n,r)?L(i):v(0);let u=(o.left+s.x)/a.x,l=(o.top+s.y)/a.y,c=o.width/a.x,f=o.height/a.y;if(i){const e=b(i),t=r&&x(r)?b(r):r;let n=e,o=n.frameElement;for(;o&&r&&t!==n;){const e=N(o),t=o.getBoundingClientRect(),r=O(o),i=t.left+(o.clientLeft+parseFloat(r.paddingLeft))*e.x,a=t.top+(o.clientTop+parseFloat(r.paddingTop))*e.y;u*=e.x,l*=e.y,c*=e.x,f*=e.y,u+=i,l+=a,n=b(o),o=n.frameElement}}return y({width:c,height:f,x:u,y:l})}function j(e,t,n,r){void 0===r&&(r={});const{ancestorScroll:o=!0,ancestorResize:i=!0,elementResize:a="function"===typeof ResizeObserver,layoutShift:s="function"===typeof IntersectionObserver,animationFrame:u=!1}=r,l=I(e),c=o||i?[...l?M(l):[],...M(t)]:[];c.forEach((e=>{o&&e.addEventListener("scroll",n,{passive:!0}),i&&e.addEventListener("resize",n)}));const f=l&&s?function(e,t){let n,r=null;const o=_(e);function i(){var e;clearTimeout(n),null==(e=r)||e.disconnect(),r=null}return function a(s,u){void 0===s&&(s=!1),void 0===u&&(u=1),i();const{left:l,top:c,width:f,height:p}=e.getBoundingClientRect();if(s||t(),!f||!p)return;const v={rootMargin:-g(c)+"px "+-g(o.clientWidth-(l+f))+"px "+-g(o.clientHeight-(c+p))+"px "+-g(l)+"px",threshold:h(0,d(1,u))||1};let y=!0;function m(e){const t=e[0].intersectionRatio;if(t!==u){if(!y)return a();t?a(!1,t):n=setTimeout((()=>{a(!1,1e-7)}),100)}y=!1}try{r=new IntersectionObserver(m,{...v,root:o.ownerDocument})}catch(b){r=new IntersectionObserver(m,v)}r.observe(e)}(!0),i}(l,n):null;let p,v=-1,y=null;a&&(y=new ResizeObserver((e=>{let[r]=e;r&&r.target===l&&y&&(y.unobserve(t),cancelAnimationFrame(v),v=requestAnimationFrame((()=>{var e;null==(e=y)||e.observe(t)}))),n()})),l&&!u&&y.observe(l),y.observe(t));let m=u?F(e):null;return u&&function t(){const r=F(e);!m||r.x===m.x&&r.y===m.y&&r.width===m.width&&r.height===m.height||n();m=r,p=requestAnimationFrame(t)}(),n(),()=>{var e;c.forEach((e=>{o&&e.removeEventListener("scroll",n),i&&e.removeEventListener("resize",n)})),null==f||f(),null==(e=y)||e.disconnect(),y=null,u&&cancelAnimationFrame(p)}}var U=n(27003),z=["className","clearValue","cx","getStyles","getClassNames","getValue","hasValue","isMulti","isRtl","options","selectOption","selectProps","setValue","theme"],B=function(){};function V(e,t){return t?"-"===t[0]?e+t:e+"__"+t:e}function $(e,t){for(var n=arguments.length,r=new Array(n>2?n-2:0),o=2;o-1}function Y(e){return G(e)?window.pageYOffset:e.scrollTop}function X(e,t){G(e)?window.scrollTo(0,t):e.scrollTop=t}function K(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:200,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:B,o=Y(e),i=t-o,a=0;!function t(){var s,u=i*((s=(s=a+=10)/n-1)*s*s+1)+o;X(e,u),an.bottom?X(e,Math.min(t.offsetTop+t.clientHeight-e.offsetHeight+o,e.scrollHeight)):r.top-o1?t-1:0),r=1;r=p)return{placement:"bottom",maxHeight:t};if(A>=p&&!a)return i&&K(u,E,R),{placement:"bottom",maxHeight:t};if(!a&&A>=r||a&&x>=r)return i&&K(u,E,R),{placement:"bottom",maxHeight:a?x-b:A-b};if("auto"===o||a){var O=t,T=a?w:S;return T>=r&&(O=Math.min(T-b-s,t)),{placement:"top",maxHeight:O}}if("bottom"===o)return i&&X(u,E),{placement:"bottom",maxHeight:t};break;case"top":if(w>=p)return{placement:"top",maxHeight:t};if(S>=p&&!a)return i&&K(u,C,R),{placement:"top",maxHeight:t};if(!a&&S>=r||a&&w>=r){var k=t;return(!a&&S>=r||a&&w>=r)&&(k=a?w-_:S-_),i&&K(u,C,R),{placement:"top",maxHeight:k}}return{placement:"bottom",maxHeight:t};default:throw new Error('Invalid placement provided "'.concat(o,'".'))}return l}var de,he=function(e){return"auto"===e?"bottom":e},pe=function(e,t){var n,o=e.placement,i=e.theme,a=i.borderRadius,s=i.spacing,u=i.colors;return(0,r.A)((n={label:"menu"},(0,l.A)(n,function(e){return e?{bottom:"top",top:"bottom"}[e]:"bottom"}(o),"100%"),(0,l.A)(n,"position","absolute"),(0,l.A)(n,"width","100%"),(0,l.A)(n,"zIndex",1),n),t?{}:{backgroundColor:u.neutral0,borderRadius:a,boxShadow:"0 0 0 1px hsla(0, 0%, 0%, 0.1), 0 4px 11px hsla(0, 0%, 0%, 0.1)",marginBottom:s.menuGutter,marginTop:s.menuGutter})},ge=(0,c.createContext)(null),ve=function(e){var t=e.children,n=e.minMenuHeight,o=e.maxMenuHeight,i=e.menuPlacement,s=e.menuPosition,u=e.menuShouldScrollIntoView,l=e.theme,f=((0,c.useContext)(ge)||{}).setPortalPlacement,d=(0,c.useRef)(null),h=(0,c.useState)(o),p=(0,a.A)(h,2),g=p[0],v=p[1],y=(0,c.useState)(null),m=(0,a.A)(y,2),b=m[0],_=m[1],w=l.spacing.controlHeight;return(0,U.A)((function(){var e=d.current;if(e){var t="fixed"===s,r=fe({maxHeight:o,menuEl:e,minHeight:n,placement:i,shouldScroll:u&&!t,isFixedPosition:t,controlHeight:w});v(r.maxHeight),_(r.placement),null===f||void 0===f||f(r.placement)}}),[o,i,s,u,n,f,w]),t({ref:d,placerProps:(0,r.A)((0,r.A)({},e),{},{placement:b||he(i),maxHeight:g})})},ye=function(e){var t=e.children,n=e.innerRef,r=e.innerProps;return(0,i.Y)("div",(0,o.A)({},q(e,"menu",{menu:!0}),{ref:n},r),t)},me=function(e,t){var n=e.maxHeight,o=e.theme.spacing.baseUnit;return(0,r.A)({maxHeight:n,overflowY:"auto",position:"relative",WebkitOverflowScrolling:"touch"},t?{}:{paddingBottom:o,paddingTop:o})},be=function(e,t){var n=e.theme,o=n.spacing.baseUnit,i=n.colors;return(0,r.A)({textAlign:"center"},t?{}:{color:i.neutral40,padding:"".concat(2*o,"px ").concat(3*o,"px")})},_e=be,we=be,xe=function(e){var t=e.rect,n=e.offset,r=e.position;return{left:t.left,position:r,top:n,width:t.width,zIndex:1}},Se=function(e){var t=e.isDisabled;return{label:"container",direction:e.isRtl?"rtl":void 0,pointerEvents:t?"none":void 0,position:"relative"}},Ae=function(e,t){var n=e.theme.spacing,o=e.isMulti,i=e.hasValue,a=e.selectProps.controlShouldRenderValue;return(0,r.A)({alignItems:"center",display:o&&i&&a?"flex":"grid",flex:1,flexWrap:"wrap",WebkitOverflowScrolling:"touch",position:"relative",overflow:"hidden"},t?{}:{padding:"".concat(n.baseUnit/2,"px ").concat(2*n.baseUnit,"px")})},Ee=function(){return{alignItems:"center",alignSelf:"stretch",display:"flex",flexShrink:0}},Ce=["size"],Re=["innerProps","isRtl","size"];var Oe,Te,ke={name:"8mmkcg",styles:"display:inline-block;fill:currentColor;line-height:1;stroke:currentColor;stroke-width:0"},Me=function(e){var t=e.size,n=(0,s.A)(e,Ce);return(0,i.Y)("svg",(0,o.A)({height:t,width:t,viewBox:"0 0 20 20","aria-hidden":"true",focusable:"false",css:ke},n))},Pe=function(e){return(0,i.Y)(Me,(0,o.A)({size:20},e),(0,i.Y)("path",{d:"M14.348 14.849c-0.469 0.469-1.229 0.469-1.697 0l-2.651-3.030-2.651 3.029c-0.469 0.469-1.229 0.469-1.697 0-0.469-0.469-0.469-1.229 0-1.697l2.758-3.15-2.759-3.152c-0.469-0.469-0.469-1.228 0-1.697s1.228-0.469 1.697 0l2.652 3.031 2.651-3.031c0.469-0.469 1.228-0.469 1.697 0s0.469 1.229 0 1.697l-2.758 3.152 2.758 3.15c0.469 0.469 0.469 1.229 0 1.698z"}))},Ie=function(e){return(0,i.Y)(Me,(0,o.A)({size:20},e),(0,i.Y)("path",{d:"M4.516 7.548c0.436-0.446 1.043-0.481 1.576 0l3.908 3.747 3.908-3.747c0.533-0.481 1.141-0.446 1.574 0 0.436 0.445 0.408 1.197 0 1.615-0.406 0.418-4.695 4.502-4.695 4.502-0.217 0.223-0.502 0.335-0.787 0.335s-0.57-0.112-0.789-0.335c0 0-4.287-4.084-4.695-4.502s-0.436-1.17 0-1.615z"}))},Ne=function(e,t){var n=e.isFocused,o=e.theme,i=o.spacing.baseUnit,a=o.colors;return(0,r.A)({label:"indicatorContainer",display:"flex",transition:"color 150ms"},t?{}:{color:n?a.neutral60:a.neutral20,padding:2*i,":hover":{color:n?a.neutral80:a.neutral40}})},De=Ne,Le=Ne,Fe=function(e,t){var n=e.isDisabled,o=e.theme,i=o.spacing.baseUnit,a=o.colors;return(0,r.A)({label:"indicatorSeparator",alignSelf:"stretch",width:1},t?{}:{backgroundColor:n?a.neutral10:a.neutral20,marginBottom:2*i,marginTop:2*i})},je=(0,i.i7)(de||(Oe=["\n 0%, 80%, 100% { opacity: 0; }\n 40% { opacity: 1; }\n"],Te||(Te=Oe.slice(0)),de=Object.freeze(Object.defineProperties(Oe,{raw:{value:Object.freeze(Te)}})))),Ue=function(e,t){var n=e.isFocused,o=e.size,i=e.theme,a=i.colors,s=i.spacing.baseUnit;return(0,r.A)({label:"loadingIndicator",display:"flex",transition:"color 150ms",alignSelf:"center",fontSize:o,lineHeight:1,marginRight:o,textAlign:"center",verticalAlign:"middle"},t?{}:{color:n?a.neutral60:a.neutral20,padding:2*s})},ze=function(e){var t=e.delay,n=e.offset;return(0,i.Y)("span",{css:(0,i.AH)({animation:"".concat(je," 1s ease-in-out ").concat(t,"ms infinite;"),backgroundColor:"currentColor",borderRadius:"1em",display:"inline-block",marginLeft:n?"1em":void 0,height:"1em",verticalAlign:"top",width:"1em"},"","")})},Be=function(e,t){var n=e.isDisabled,o=e.isFocused,i=e.theme,a=i.colors,s=i.borderRadius,u=i.spacing;return(0,r.A)({label:"control",alignItems:"center",cursor:"default",display:"flex",flexWrap:"wrap",justifyContent:"space-between",minHeight:u.controlHeight,outline:"0 !important",position:"relative",transition:"all 100ms"},t?{}:{backgroundColor:n?a.neutral5:a.neutral0,borderColor:n?a.neutral10:o?a.primary:a.neutral20,borderRadius:s,borderStyle:"solid",borderWidth:1,boxShadow:o?"0 0 0 1px ".concat(a.primary):void 0,"&:hover":{borderColor:o?a.primary:a.neutral30}})},Ve=function(e){var t=e.children,n=e.isDisabled,r=e.isFocused,a=e.innerRef,s=e.innerProps,u=e.menuIsOpen;return(0,i.Y)("div",(0,o.A)({ref:a},q(e,"control",{control:!0,"control--is-disabled":n,"control--is-focused":r,"control--menu-is-open":u}),s,{"aria-disabled":n||void 0}),t)},$e=["data"],He=function(e,t){var n=e.theme.spacing;return t?{}:{paddingBottom:2*n.baseUnit,paddingTop:2*n.baseUnit}},We=function(e,t){var n=e.theme,o=n.colors,i=n.spacing;return(0,r.A)({label:"group",cursor:"default",display:"block"},t?{}:{color:o.neutral40,fontSize:"75%",fontWeight:500,marginBottom:"0.25em",paddingLeft:3*i.baseUnit,paddingRight:3*i.baseUnit,textTransform:"uppercase"})},qe=function(e){var t=e.children,n=e.cx,r=e.getStyles,a=e.getClassNames,s=e.Heading,u=e.headingProps,l=e.innerProps,c=e.label,f=e.theme,d=e.selectProps;return(0,i.Y)("div",(0,o.A)({},q(e,"group",{group:!0}),l),(0,i.Y)(s,(0,o.A)({},u,{selectProps:d,theme:f,getStyles:r,getClassNames:a,cx:n}),c),(0,i.Y)("div",null,t))},Ge=["innerRef","isDisabled","isHidden","inputClassName"],Ye=function(e,t){var n=e.isDisabled,o=e.value,i=e.theme,a=i.spacing,s=i.colors;return(0,r.A)((0,r.A)({visibility:n?"hidden":"visible",transform:o?"translateZ(0)":""},Ke),t?{}:{margin:a.baseUnit/2,paddingBottom:a.baseUnit/2,paddingTop:a.baseUnit/2,color:s.neutral80})},Xe={gridArea:"1 / 2",font:"inherit",minWidth:"2px",border:0,margin:0,outline:0,padding:0},Ke={flex:"1 1 auto",display:"inline-grid",gridArea:"1 / 1 / 2 / 3",gridTemplateColumns:"0 min-content","&:after":(0,r.A)({content:'attr(data-value) " "',visibility:"hidden",whiteSpace:"pre"},Xe)},Ze=function(e){return(0,r.A)({label:"input",color:"inherit",background:0,opacity:e?0:1,width:"100%"},Xe)},Qe=function(e,t){var n=e.theme,o=n.spacing,i=n.borderRadius,a=n.colors;return(0,r.A)({label:"multiValue",display:"flex",minWidth:0},t?{}:{backgroundColor:a.neutral10,borderRadius:i/2,margin:o.baseUnit/2})},Je=function(e,t){var n=e.theme,o=n.borderRadius,i=n.colors,a=e.cropWithEllipsis;return(0,r.A)({overflow:"hidden",textOverflow:a||void 0===a?"ellipsis":void 0,whiteSpace:"nowrap"},t?{}:{borderRadius:o/2,color:i.neutral80,fontSize:"85%",padding:3,paddingLeft:6})},et=function(e,t){var n=e.theme,o=n.spacing,i=n.borderRadius,a=n.colors,s=e.isFocused;return(0,r.A)({alignItems:"center",display:"flex"},t?{}:{borderRadius:i/2,backgroundColor:s?a.dangerLight:void 0,paddingLeft:o.baseUnit,paddingRight:o.baseUnit,":hover":{backgroundColor:a.dangerLight,color:a.danger}})},tt=function(e){var t=e.children,n=e.innerProps;return(0,i.Y)("div",n,t)};var nt=function(e){var t=e.children,n=e.components,o=e.data,a=e.innerProps,s=e.isDisabled,u=e.removeProps,l=e.selectProps,c=n.Container,f=n.Label,d=n.Remove;return(0,i.Y)(c,{data:o,innerProps:(0,r.A)((0,r.A)({},q(e,"multiValue",{"multi-value":!0,"multi-value--is-disabled":s})),a),selectProps:l},(0,i.Y)(f,{data:o,innerProps:(0,r.A)({},q(e,"multiValueLabel",{"multi-value__label":!0})),selectProps:l},t),(0,i.Y)(d,{data:o,innerProps:(0,r.A)((0,r.A)({},q(e,"multiValueRemove",{"multi-value__remove":!0})),{},{"aria-label":"Remove ".concat(t||"option")},u),selectProps:l}))},rt=function(e,t){var n=e.isDisabled,o=e.isFocused,i=e.isSelected,a=e.theme,s=a.spacing,u=a.colors;return(0,r.A)({label:"option",cursor:"default",display:"block",fontSize:"inherit",width:"100%",userSelect:"none",WebkitTapHighlightColor:"rgba(0, 0, 0, 0)"},t?{}:{backgroundColor:i?u.primary:o?u.primary25:"transparent",color:n?u.neutral20:i?u.neutral0:"inherit",padding:"".concat(2*s.baseUnit,"px ").concat(3*s.baseUnit,"px"),":active":{backgroundColor:n?void 0:i?u.primary:u.primary50}})},ot=function(e,t){var n=e.theme,o=n.spacing,i=n.colors;return(0,r.A)({label:"placeholder",gridArea:"1 / 1 / 2 / 3"},t?{}:{color:i.neutral50,marginLeft:o.baseUnit/2,marginRight:o.baseUnit/2})},it=function(e,t){var n=e.isDisabled,o=e.theme,i=o.spacing,a=o.colors;return(0,r.A)({label:"singleValue",gridArea:"1 / 1 / 2 / 3",maxWidth:"100%",overflow:"hidden",textOverflow:"ellipsis",whiteSpace:"nowrap"},t?{}:{color:n?a.neutral40:a.neutral80,marginLeft:i.baseUnit/2,marginRight:i.baseUnit/2})},at={ClearIndicator:function(e){var t=e.children,n=e.innerProps;return(0,i.Y)("div",(0,o.A)({},q(e,"clearIndicator",{indicator:!0,"clear-indicator":!0}),n),t||(0,i.Y)(Pe,null))},Control:Ve,DropdownIndicator:function(e){var t=e.children,n=e.innerProps;return(0,i.Y)("div",(0,o.A)({},q(e,"dropdownIndicator",{indicator:!0,"dropdown-indicator":!0}),n),t||(0,i.Y)(Ie,null))},DownChevron:Ie,CrossIcon:Pe,Group:qe,GroupHeading:function(e){var t=W(e);t.data;var n=(0,s.A)(t,$e);return(0,i.Y)("div",(0,o.A)({},q(e,"groupHeading",{"group-heading":!0}),n))},IndicatorsContainer:function(e){var t=e.children,n=e.innerProps;return(0,i.Y)("div",(0,o.A)({},q(e,"indicatorsContainer",{indicators:!0}),n),t)},IndicatorSeparator:function(e){var t=e.innerProps;return(0,i.Y)("span",(0,o.A)({},t,q(e,"indicatorSeparator",{"indicator-separator":!0})))},Input:function(e){var t=e.cx,n=e.value,r=W(e),a=r.innerRef,u=r.isDisabled,l=r.isHidden,c=r.inputClassName,f=(0,s.A)(r,Ge);return(0,i.Y)("div",(0,o.A)({},q(e,"input",{"input-container":!0}),{"data-value":n||""}),(0,i.Y)("input",(0,o.A)({className:t({input:!0},c),ref:a,style:Ze(l),disabled:u},f)))},LoadingIndicator:function(e){var t=e.innerProps,n=e.isRtl,a=e.size,u=void 0===a?4:a,l=(0,s.A)(e,Re);return(0,i.Y)("div",(0,o.A)({},q((0,r.A)((0,r.A)({},l),{},{innerProps:t,isRtl:n,size:u}),"loadingIndicator",{indicator:!0,"loading-indicator":!0}),t),(0,i.Y)(ze,{delay:0,offset:n}),(0,i.Y)(ze,{delay:160,offset:!0}),(0,i.Y)(ze,{delay:320,offset:!n}))},Menu:ye,MenuList:function(e){var t=e.children,n=e.innerProps,r=e.innerRef,a=e.isMulti;return(0,i.Y)("div",(0,o.A)({},q(e,"menuList",{"menu-list":!0,"menu-list--is-multi":a}),{ref:r},n),t)},MenuPortal:function(e){var t=e.appendTo,n=e.children,s=e.controlElement,u=e.innerProps,l=e.menuPlacement,d=e.menuPosition,h=(0,c.useRef)(null),p=(0,c.useRef)(null),g=(0,c.useState)(he(l)),v=(0,a.A)(g,2),y=v[0],m=v[1],b=(0,c.useMemo)((function(){return{setPortalPlacement:m}}),[]),_=(0,c.useState)(null),w=(0,a.A)(_,2),x=w[0],S=w[1],A=(0,c.useCallback)((function(){if(s){var e=function(e){var t=e.getBoundingClientRect();return{bottom:t.bottom,height:t.height,left:t.left,right:t.right,top:t.top,width:t.width}}(s),t="fixed"===d?0:window.pageYOffset,n=e[y]+t;n===(null===x||void 0===x?void 0:x.offset)&&e.left===(null===x||void 0===x?void 0:x.rect.left)&&e.width===(null===x||void 0===x?void 0:x.rect.width)||S({offset:n,rect:e})}}),[s,d,y,null===x||void 0===x?void 0:x.offset,null===x||void 0===x?void 0:x.rect.left,null===x||void 0===x?void 0:x.rect.width]);(0,U.A)((function(){A()}),[A]);var E=(0,c.useCallback)((function(){"function"===typeof p.current&&(p.current(),p.current=null),s&&h.current&&(p.current=j(s,h.current,A,{elementResize:"ResizeObserver"in window}))}),[s,A]);(0,U.A)((function(){E()}),[E]);var C=(0,c.useCallback)((function(e){h.current=e,E()}),[E]);if(!t&&"fixed"!==d||!x)return null;var R=(0,i.Y)("div",(0,o.A)({ref:C},q((0,r.A)((0,r.A)({},e),{},{offset:x.offset,position:d,rect:x.rect}),"menuPortal",{"menu-portal":!0}),u),n);return(0,i.Y)(ge.Provider,{value:b},t?(0,f.createPortal)(R,t):R)},LoadingMessage:function(e){var t=e.children,n=void 0===t?"Loading...":t,a=e.innerProps,u=(0,s.A)(e,ce);return(0,i.Y)("div",(0,o.A)({},q((0,r.A)((0,r.A)({},u),{},{children:n,innerProps:a}),"loadingMessage",{"menu-notice":!0,"menu-notice--loading":!0}),a),n)},NoOptionsMessage:function(e){var t=e.children,n=void 0===t?"No options":t,a=e.innerProps,u=(0,s.A)(e,le);return(0,i.Y)("div",(0,o.A)({},q((0,r.A)((0,r.A)({},u),{},{children:n,innerProps:a}),"noOptionsMessage",{"menu-notice":!0,"menu-notice--no-options":!0}),a),n)},MultiValue:nt,MultiValueContainer:tt,MultiValueLabel:tt,MultiValueRemove:function(e){var t=e.children,n=e.innerProps;return(0,i.Y)("div",(0,o.A)({role:"button"},n),t||(0,i.Y)(Pe,{size:14}))},Option:function(e){var t=e.children,n=e.isDisabled,r=e.isFocused,a=e.isSelected,s=e.innerRef,u=e.innerProps;return(0,i.Y)("div",(0,o.A)({},q(e,"option",{option:!0,"option--is-disabled":n,"option--is-focused":r,"option--is-selected":a}),{ref:s,"aria-disabled":n},u),t)},Placeholder:function(e){var t=e.children,n=e.innerProps;return(0,i.Y)("div",(0,o.A)({},q(e,"placeholder",{placeholder:!0}),n),t)},SelectContainer:function(e){var t=e.children,n=e.innerProps,r=e.isDisabled,a=e.isRtl;return(0,i.Y)("div",(0,o.A)({},q(e,"container",{"--is-disabled":r,"--is-rtl":a}),n),t)},SingleValue:function(e){var t=e.children,n=e.isDisabled,r=e.innerProps;return(0,i.Y)("div",(0,o.A)({},q(e,"singleValue",{"single-value":!0,"single-value--is-disabled":n}),r),t)},ValueContainer:function(e){var t=e.children,n=e.innerProps,r=e.isMulti,a=e.hasValue;return(0,i.Y)("div",(0,o.A)({},q(e,"valueContainer",{"value-container":!0,"value-container--is-multi":r,"value-container--has-value":a}),n),t)}},st=function(e){return(0,r.A)((0,r.A)({},at),e.components)}},46005:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>s});var r=n(52836),o=n(58168),i=n(96540),a=n(17531),s=(n(84300),n(40961),n(27003),(0,i.forwardRef)((function(e,t){var n=(0,r.u)(e);return i.createElement(a.S,(0,o.A)({ref:t},n))})))},52836:(e,t,n)=>{"use strict";n.d(t,{u:()=>u});var r=n(89379),o=n(80296),i=n(80045),a=n(96540),s=["defaultInputValue","defaultMenuIsOpen","defaultValue","inputValue","menuIsOpen","onChange","onInputChange","onMenuClose","onMenuOpen","value"];function u(e){var t=e.defaultInputValue,n=void 0===t?"":t,u=e.defaultMenuIsOpen,l=void 0!==u&&u,c=e.defaultValue,f=void 0===c?null:c,d=e.inputValue,h=e.menuIsOpen,p=e.onChange,g=e.onInputChange,v=e.onMenuClose,y=e.onMenuOpen,m=e.value,b=(0,i.A)(e,s),_=(0,a.useState)(void 0!==d?d:n),w=(0,o.A)(_,2),x=w[0],S=w[1],A=(0,a.useState)(void 0!==h?h:l),E=(0,o.A)(A,2),C=E[0],R=E[1],O=(0,a.useState)(void 0!==m?m:f),T=(0,o.A)(O,2),k=T[0],M=T[1],P=(0,a.useCallback)((function(e,t){"function"===typeof p&&p(e,t),M(e)}),[p]),I=(0,a.useCallback)((function(e,t){var n;"function"===typeof g&&(n=g(e,t)),S(void 0!==n?n:e)}),[g]),N=(0,a.useCallback)((function(){"function"===typeof y&&y(),R(!0)}),[y]),D=(0,a.useCallback)((function(){"function"===typeof v&&v(),R(!1)}),[v]),L=void 0!==d?d:x,F=void 0!==h?h:C,j=void 0!==m?m:k;return(0,r.A)((0,r.A)({},b),{},{inputValue:L,menuIsOpen:F,onChange:P,onInputChange:I,onMenuClose:D,onMenuOpen:N,value:j})}},13674:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>v});var r=n(98587),o=n(77387),i=n(96540),a=n(40961);const s=!1,u=i.createContext(null);var l="unmounted",c="exited",f="entering",d="entered",h="exiting",p=function(e){function t(t,n){var r;r=e.call(this,t,n)||this;var o,i=n&&!n.isMounting?t.enter:t.appear;return r.appearStatus=null,t.in?i?(o=c,r.appearStatus=f):o=d:o=t.unmountOnExit||t.mountOnEnter?l:c,r.state={status:o},r.nextCallback=null,r}(0,o.A)(t,e),t.getDerivedStateFromProps=function(e,t){return e.in&&t.status===l?{status:c}:null};var n=t.prototype;return n.componentDidMount=function(){this.updateStatus(!0,this.appearStatus)},n.componentDidUpdate=function(e){var t=null;if(e!==this.props){var n=this.state.status;this.props.in?n!==f&&n!==d&&(t=f):n!==f&&n!==d||(t=h)}this.updateStatus(!1,t)},n.componentWillUnmount=function(){this.cancelNextCallback()},n.getTimeouts=function(){var e,t,n,r=this.props.timeout;return e=t=n=r,null!=r&&"number"!==typeof r&&(e=r.exit,t=r.enter,n=void 0!==r.appear?r.appear:t),{exit:e,enter:t,appear:n}},n.updateStatus=function(e,t){if(void 0===e&&(e=!1),null!==t)if(this.cancelNextCallback(),t===f){if(this.props.unmountOnExit||this.props.mountOnEnter){var n=this.props.nodeRef?this.props.nodeRef.current:a.findDOMNode(this);n&&function(e){e.scrollTop}(n)}this.performEnter(e)}else this.performExit();else this.props.unmountOnExit&&this.state.status===c&&this.setState({status:l})},n.performEnter=function(e){var t=this,n=this.props.enter,r=this.context?this.context.isMounting:e,o=this.props.nodeRef?[r]:[a.findDOMNode(this),r],i=o[0],u=o[1],l=this.getTimeouts(),c=r?l.appear:l.enter;!e&&!n||s?this.safeSetState({status:d},(function(){t.props.onEntered(i)})):(this.props.onEnter(i,u),this.safeSetState({status:f},(function(){t.props.onEntering(i,u),t.onTransitionEnd(c,(function(){t.safeSetState({status:d},(function(){t.props.onEntered(i,u)}))}))})))},n.performExit=function(){var e=this,t=this.props.exit,n=this.getTimeouts(),r=this.props.nodeRef?void 0:a.findDOMNode(this);t&&!s?(this.props.onExit(r),this.safeSetState({status:h},(function(){e.props.onExiting(r),e.onTransitionEnd(n.exit,(function(){e.safeSetState({status:c},(function(){e.props.onExited(r)}))}))}))):this.safeSetState({status:c},(function(){e.props.onExited(r)}))},n.cancelNextCallback=function(){null!==this.nextCallback&&(this.nextCallback.cancel(),this.nextCallback=null)},n.safeSetState=function(e,t){t=this.setNextCallback(t),this.setState(e,t)},n.setNextCallback=function(e){var t=this,n=!0;return this.nextCallback=function(r){n&&(n=!1,t.nextCallback=null,e(r))},this.nextCallback.cancel=function(){n=!1},this.nextCallback},n.onTransitionEnd=function(e,t){this.setNextCallback(t);var n=this.props.nodeRef?this.props.nodeRef.current:a.findDOMNode(this),r=null==e&&!this.props.addEndListener;if(n&&!r){if(this.props.addEndListener){var o=this.props.nodeRef?[this.nextCallback]:[n,this.nextCallback],i=o[0],s=o[1];this.props.addEndListener(i,s)}null!=e&&setTimeout(this.nextCallback,e)}else setTimeout(this.nextCallback,0)},n.render=function(){var e=this.state.status;if(e===l)return null;var t=this.props,n=t.children,o=(t.in,t.mountOnEnter,t.unmountOnExit,t.appear,t.enter,t.exit,t.timeout,t.addEndListener,t.onEnter,t.onEntering,t.onEntered,t.onExit,t.onExiting,t.onExited,t.nodeRef,(0,r.A)(t,["children","in","mountOnEnter","unmountOnExit","appear","enter","exit","timeout","addEndListener","onEnter","onEntering","onEntered","onExit","onExiting","onExited","nodeRef"]));return i.createElement(u.Provider,{value:null},"function"===typeof n?n(e,o):i.cloneElement(i.Children.only(n),o))},t}(i.Component);function g(){}p.contextType=u,p.propTypes={},p.defaultProps={in:!1,mountOnEnter:!1,unmountOnExit:!1,appear:!1,enter:!0,exit:!0,onEnter:g,onEntering:g,onEntered:g,onExit:g,onExiting:g,onExited:g},p.UNMOUNTED=l,p.EXITED=c,p.ENTERING=f,p.ENTERED=d,p.EXITING=h;const v=p},15287:(e,t)=>{"use strict";var n=Symbol.for("react.element"),r=Symbol.for("react.portal"),o=Symbol.for("react.fragment"),i=Symbol.for("react.strict_mode"),a=Symbol.for("react.profiler"),s=Symbol.for("react.provider"),u=Symbol.for("react.context"),l=Symbol.for("react.forward_ref"),c=Symbol.for("react.suspense"),f=Symbol.for("react.memo"),d=Symbol.for("react.lazy"),h=Symbol.iterator;var p={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},g=Object.assign,v={};function y(e,t,n){this.props=e,this.context=t,this.refs=v,this.updater=n||p}function m(){}function b(e,t,n){this.props=e,this.context=t,this.refs=v,this.updater=n||p}y.prototype.isReactComponent={},y.prototype.setState=function(e,t){if("object"!==typeof e&&"function"!==typeof e&&null!=e)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")},y.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")},m.prototype=y.prototype;var _=b.prototype=new m;_.constructor=b,g(_,y.prototype),_.isPureReactComponent=!0;var w=Array.isArray,x=Object.prototype.hasOwnProperty,S={current:null},A={key:!0,ref:!0,__self:!0,__source:!0};function E(e,t,r){var o,i={},a=null,s=null;if(null!=t)for(o in void 0!==t.ref&&(s=t.ref),void 0!==t.key&&(a=""+t.key),t)x.call(t,o)&&!A.hasOwnProperty(o)&&(i[o]=t[o]);var u=arguments.length-2;if(1===u)i.children=r;else if(1{"use strict";e.exports=n(15287)},47444:(e,t,n)=>{"use strict";n.d(t,{E0:()=>bs,Iz:()=>hs,K0:()=>ps,L4:()=>ys,RH:()=>_s,Zs:()=>ws,bi:()=>cs,eU:()=>fs,gD:()=>ds,lZ:()=>ms,vc:()=>gs,xf:()=>vs});var r=n(96540),o=n(40961);var i=function(e){const t=new Error(e);if(void 0===t.stack)try{throw t}catch(n){}return t};var a=function(e){return!!e&&"function"===typeof e.then};var s=function(e,t){if(null!=e)return e;throw i(null!==t&&void 0!==t?t:"Got unexpected null or undefined")};function u(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}class l{getValue(){throw i("BaseLoadable")}toPromise(){throw i("BaseLoadable")}valueMaybe(){throw i("BaseLoadable")}valueOrThrow(){throw i(`Loadable expected value, but in "${this.state}" state`)}promiseMaybe(){throw i("BaseLoadable")}promiseOrThrow(){throw i(`Loadable expected promise, but in "${this.state}" state`)}errorMaybe(){throw i("BaseLoadable")}errorOrThrow(){throw i(`Loadable expected error, but in "${this.state}" state`)}is(e){return e.state===this.state&&e.contents===this.contents}map(e){throw i("BaseLoadable")}}class c extends l{constructor(e){super(),u(this,"state","hasValue"),u(this,"contents",void 0),this.contents=e}getValue(){return this.contents}toPromise(){return Promise.resolve(this.contents)}valueMaybe(){return this.contents}valueOrThrow(){return this.contents}promiseMaybe(){}errorMaybe(){}map(e){try{const t=e(this.contents);return a(t)?g(t):m(t)?t:h(t)}catch(t){return a(t)?g(t.next((()=>this.map(e)))):p(t)}}}class f extends l{constructor(e){super(),u(this,"state","hasError"),u(this,"contents",void 0),this.contents=e}getValue(){throw this.contents}toPromise(){return Promise.reject(this.contents)}valueMaybe(){}promiseMaybe(){}errorMaybe(){return this.contents}errorOrThrow(){return this.contents}map(e){return this}}class d extends l{constructor(e){super(),u(this,"state","loading"),u(this,"contents",void 0),this.contents=e}getValue(){throw this.contents}toPromise(){return this.contents}valueMaybe(){}promiseMaybe(){return this.contents}promiseOrThrow(){return this.contents}errorMaybe(){}map(e){return g(this.contents.then((t=>{const n=e(t);if(m(n)){const e=n;switch(e.state){case"hasValue":case"loading":return e.contents;case"hasError":throw e.contents}}return n})).catch((t=>{if(a(t))return t.then((()=>this.map(e).contents));throw t})))}}function h(e){return Object.freeze(new c(e))}function p(e){return Object.freeze(new f(e))}function g(e){return Object.freeze(new d(e))}function v(){return Object.freeze(new d(new Promise((()=>{}))))}function y(e){const t=function(e){return e.every((e=>"hasValue"===e.state))?h(e.map((e=>e.contents))):e.some((e=>"hasError"===e.state))?p(s(e.find((e=>"hasError"===e.state)),"Invalid loadable passed to loadableAll").contents):g(Promise.all(e.map((e=>e.contents))))}((Array.isArray(e)?e:Object.getOwnPropertyNames(e).map((t=>e[t]))).map((e=>m(e)?e:a(e)?g(e):h(e))));return Array.isArray(e)?t:t.map((t=>Object.getOwnPropertyNames(e).reduce(((e,n,r)=>({...e,[n]:t[r]})),{})))}function m(e){return e instanceof l}const b={of:e=>a(e)?g(e):m(e)?e:h(e),error:e=>p(e),loading:()=>v(),all:y,isLoadable:m};var _={loadableWithValue:h,loadableWithError:p,loadableWithPromise:g,loadableLoading:v,loadableAll:y,isLoadable:m,RecoilLoadable:b},w=_.loadableWithValue,x=_.loadableWithError,S=_.loadableWithPromise,A=_.loadableLoading,E=_.loadableAll,C=_.isLoadable,R=_.RecoilLoadable,O=Object.freeze({__proto__:null,loadableWithValue:w,loadableWithError:x,loadableWithPromise:S,loadableLoading:A,loadableAll:E,isLoadable:C,RecoilLoadable:R});const T={RECOIL_DUPLICATE_ATOM_KEY_CHECKING_ENABLED:!0,RECOIL_GKS_ENABLED:new Set(["recoil_hamt_2020","recoil_sync_external_store","recoil_suppress_rerender_in_callback","recoil_memory_managament_2020"])};!function(){var e;"undefined"!==typeof process&&null!=(null===(e=process)||void 0===e?void 0:e.env)&&(function(e,t){var n,r;const o=null===(n=process.env[e])||void 0===n||null===(r=n.toLowerCase())||void 0===r?void 0:r.trim();if(null==o||""===o)return;if(!["true","false"].includes(o))throw i(`process.env.${e} value must be 'true', 'false', or empty: ${o}`);t("true"===o)}("RECOIL_DUPLICATE_ATOM_KEY_CHECKING_ENABLED",(e=>{T.RECOIL_DUPLICATE_ATOM_KEY_CHECKING_ENABLED=e})),function(e,t){var n;const r=null===(n=process.env[e])||void 0===n?void 0:n.trim();null!=r&&""!==r&&t(r.split(/\s*,\s*|\s+/))}("RECOIL_GKS_ENABLED",(e=>{e.forEach((e=>{T.RECOIL_GKS_ENABLED.add(e)}))})))}();var k=T;function M(e){return k.RECOIL_GKS_ENABLED.has(e)}M.setPass=e=>{k.RECOIL_GKS_ENABLED.add(e)},M.setFail=e=>{k.RECOIL_GKS_ENABLED.delete(e)},M.clear=()=>{k.RECOIL_GKS_ENABLED.clear()};var P=M;var I,N,D,L=function(e,t,{error:n}={}){return null};const F=null!==(I=r.createMutableSource)&&void 0!==I?I:r.unstable_createMutableSource,j=null!==(N=r.useMutableSource)&&void 0!==N?N:r.unstable_useMutableSource,U=null!==(D=r.useSyncExternalStore)&&void 0!==D?D:r.unstable_useSyncExternalStore;let z=!1;var B={createMutableSource:F,useMutableSource:j,useSyncExternalStore:U,currentRendererSupportsUseSyncExternalStore:function(){var e;const{ReactCurrentDispatcher:t,ReactCurrentOwner:n}=r.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED,o=null!=(null!==(e=null===t||void 0===t?void 0:t.current)&&void 0!==e?e:n.currentDispatcher).useSyncExternalStore;return!U||o||z||(z=!0,L("A React renderer without React 18+ API support is being used with React 18+.")),o},reactMode:function(){return P("recoil_transition_support")?{mode:"TRANSITION_SUPPORT",early:!0,concurrent:!0}:P("recoil_sync_external_store")&&null!=U?{mode:"SYNC_EXTERNAL_STORE",early:!0,concurrent:!1}:P("recoil_mutable_source")&&null!=j&&"undefined"!==typeof window&&!window.$disableRecoilValueMutableSource_TEMP_HACK_DO_NOT_USE?P("recoil_suppress_rerender_in_callback")?{mode:"MUTABLE_SOURCE",early:!0,concurrent:!0}:{mode:"MUTABLE_SOURCE",early:!1,concurrent:!1}:P("recoil_suppress_rerender_in_callback")?{mode:"LEGACY",early:!0,concurrent:!1}:{mode:"LEGACY",early:!1,concurrent:!1}},isFastRefreshEnabled:function(){return!1}};class V{constructor(e){u(this,"key",void 0),this.key=e}toJSON(){return{key:this.key}}}class $ extends V{}class H extends V{}var W={AbstractRecoilValue:V,RecoilState:$,RecoilValueReadOnly:H,isRecoilValue:function(e){return e instanceof $||e instanceof H}},q=W.AbstractRecoilValue,G=W.RecoilState,Y=W.RecoilValueReadOnly,X=W.isRecoilValue,K=Object.freeze({__proto__:null,AbstractRecoilValue:q,RecoilState:G,RecoilValueReadOnly:Y,isRecoilValue:X});var Z=function(e,...t){0};var Q=function(e,t){return function*(){let n=0;for(const r of e)yield t(r,n++)}()};const{isFastRefreshEnabled:J}=B;class ee{}const te=new ee,ne=new Map,re=new Map;class oe extends Error{}const ie=new Map;function ae(e){return ie.get(e)}var se={nodes:ne,recoilValues:re,registerNode:function(e){k.RECOIL_DUPLICATE_ATOM_KEY_CHECKING_ENABLED&&function(e){if(ne.has(e)){const t=`Duplicate atom key "${e}". This is a FATAL ERROR in\n production. But it is safe to ignore this warning if it occurred because of\n hot module replacement.`;console.warn(t)}}(e.key),ne.set(e.key,e);const t=null==e.set?new K.RecoilValueReadOnly(e.key):new K.RecoilState(e.key);return re.set(e.key,t),t},getNode:function(e){const t=ne.get(e);if(null==t)throw new oe(`Missing definition for RecoilValue: "${e}""`);return t},getNodeMaybe:function(e){return ne.get(e)},deleteNodeConfigIfPossible:function(e){var t;if(!P("recoil_memory_managament_2020"))return;const n=ne.get(e);var r;null!==n&&void 0!==n&&null!==(t=n.shouldDeleteConfigOnRelease)&&void 0!==t&&t.call(n)&&(ne.delete(e),null===(r=ae(e))||void 0===r||r(),ie.delete(e))},setConfigDeletionHandler:function(e,t){P("recoil_memory_managament_2020")&&(void 0===t?ie.delete(e):ie.set(e,t))},getConfigDeletionHandler:ae,recoilValuesForKeys:function(e){return Q(e,(e=>s(re.get(e))))},NodeMissingError:oe,DefaultValue:ee,DEFAULT_VALUE:te};var ue={enqueueExecution:function(e,t){t()}};var le,ce,fe=(le=function(e){var t="function"===typeof Symbol&&"symbol"===typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"===typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},n={},r=Math.pow(2,5),o=r-1,i=r/2,a=r/4,s={},u=function(e){return function(){return e}},l=n.hash=function(e){var n="undefined"===typeof e?"undefined":t(e);if("number"===n)return e;"string"!==n&&(e+="");for(var r=0,o=0,i=e.length;o>>e&o},f=function(e){return 1<>1&1431655765))+(n>>2&858993459))+(n>>4)&252645135,127&(n+=n>>8)+(n>>16);var n},h=function(e,t,n,r){var o=r;if(!e){var i=r.length;o=new Array(i);for(var a=0;a1?m(e,this.hash,u):u[0]}var l=r();return l===s?this:(++a.value,w(e,n,this.hash,this,o,y(e,o,i,l)))},E=function(e,t,n,r,o,a,s){var u=this.mask,l=this.children,y=c(n,o),m=f(y),w=d(u,m),S=u&m,A=S?l[w]:g,E=A._modify(e,t,n+5,r,o,a,s);if(A===E)return this;var C=x(e,this),R=u,O=void 0;if(S&&v(E)){if(!(R&=~m))return g;if(l.length<=2&&function(e){return e===g||1===e.type||2===e.type}(l[1^w]))return l[1^w];O=p(C,w,l)}else if(S||v(E))O=h(C,w,E,l);else{if(l.length>=i)return function(e,t,n,r,o){for(var i=[],a=r,s=0,u=0;a;++u)1&a&&(i[u]=o[s++]),a>>>=1;return i[t]=n,_(e,s+1,i)}(e,y,E,u,l);R|=m,O=function(e,t,n,r){var o=r.length;if(e){for(var i=o;i>=t;)r[i--]=r[i];return r[t]=n,r}for(var a=0,s=0,u=new Array(o+1);a{n.set(r,t(e,r))})),n};function be(e,t,n,r){const{nodeDeps:o,nodeToNodeSubscriptions:i}=n,a=o.get(e);if(a&&r&&a!==r.nodeDeps.get(e))return;o.set(e,t);const u=null==a?t:ye(t,a);for(const l of u){i.has(l)||i.set(l,new Set);s(i.get(l)).add(e)}if(a){const n=ye(a,t);for(const t of n){if(!i.has(t))return;const n=s(i.get(t));n.delete(e),0===n.size&&i.delete(t)}}}var _e={cloneGraph:function(e){return{nodeDeps:me(e.nodeDeps,(e=>new Set(e))),nodeToNodeSubscriptions:me(e.nodeToNodeSubscriptions,(e=>new Set(e)))}},graph:function(){return{nodeDeps:new Map,nodeToNodeSubscriptions:new Map}},saveDepsToStore:function(e,t,n,r){var o,i,a,s;const u=n.getState();r!==u.currentTree.version&&r!==(null===(o=u.nextTree)||void 0===o?void 0:o.version)&&r!==(null===(i=u.previousTree)||void 0===i?void 0:i.version)&&L("Tried to save dependencies to a discarded tree");const l=n.getGraph(r);if(be(e,t,l),r===(null===(a=u.previousTree)||void 0===a?void 0:a.version)){be(e,t,n.getGraph(u.currentTree.version),l)}if(r===(null===(s=u.previousTree)||void 0===s?void 0:s.version)||r===u.currentTree.version){var c;const r=null===(c=u.nextTree)||void 0===c?void 0:c.version;if(void 0!==r){be(e,t,n.getGraph(r),l)}}}};let we=0;let xe=0;let Se=0;var Ae={getNextTreeStateVersion:()=>we++,getNextStoreID:()=>xe++,getNextComponentID:()=>Se++};const{persistentMap:Ee}=ve,{graph:Ce}=_e,{getNextTreeStateVersion:Re}=Ae;function Oe(){const e=Re();return{version:e,stateID:e,transactionMetadata:{},dirtyAtoms:new Set,atomValues:Ee(),nonvalidatedAtoms:Ee()}}var Te={makeEmptyTreeState:Oe,makeEmptyStoreState:function(){const e=Oe();return{currentTree:e,nextTree:null,previousTree:null,commitDepth:0,knownAtoms:new Set,knownSelectors:new Set,transactionSubscriptions:new Map,nodeTransactionSubscriptions:new Map,nodeToComponentSubscriptions:new Map,queuedComponentCallbacks_DEPRECATED:[],suspendedComponentResolvers:new Set,graphsByVersion:(new Map).set(e.version,Ce()),retention:{referenceCounts:new Map,nodesRetainedByZone:new Map,retainablesToCheckForRelease:new Set},nodeCleanupFunctions:new Map}},getNextTreeStateVersion:Re};class ke{}var Me={RetentionZone:ke,retentionZone:function(){return new ke}};var Pe={setByAddingToSet:function(e,t){const n=new Set(e);return n.add(t),n},setByDeletingFromSet:function(e,t){const n=new Set(e);return n.delete(t),n},mapBySettingInMap:function(e,t,n){const r=new Map(e);return r.set(t,n),r},mapByUpdatingInMap:function(e,t,n){const r=new Map(e);return r.set(t,n(r.get(t))),r},mapByDeletingFromMap:function(e,t){const n=new Map(e);return n.delete(t),n},mapByDeletingMultipleFromMap:function(e,t){const n=new Map(e);return t.forEach((e=>n.delete(e))),n}};var Ie=function*(e,t){let n=0;for(const r of e)t(r,n++)&&(yield r)};var Ne=function(e,t){return new Proxy(e,{get:(e,n)=>(!(n in e)&&n in t&&(e[n]=t[n]()),e[n]),ownKeys:e=>Object.keys(e)})};const{getNode:De,getNodeMaybe:Le,recoilValuesForKeys:Fe}=se,{RetentionZone:je}=Me,{setByAddingToSet:Ue}=Pe,ze=Object.freeze(new Set);class Be extends Error{}function Ve(e,t,n,r){const o=e.getState();if(o.nodeCleanupFunctions.has(n))return;const i=De(n),a=function(e,t,n){if(!P("recoil_memory_managament_2020"))return()=>{};const{nodesRetainedByZone:r}=e.getState().retention;function o(e){let n=r.get(e);n||r.set(e,n=new Set),n.add(t)}if(n instanceof je)o(n);else if(Array.isArray(n))for(const i of n)o(i);return()=>{if(!P("recoil_memory_managament_2020"))return;const{retention:r}=e.getState();function o(e){const n=r.nodesRetainedByZone.get(e);null===n||void 0===n||n.delete(t),n&&0===n.size&&r.nodesRetainedByZone.delete(e)}if(n instanceof je)o(n);else if(Array.isArray(n))for(const e of n)o(e)}}(e,n,i.retainedBy),s=i.init(e,t,r);o.nodeCleanupFunctions.set(n,(()=>{s(),a()}))}function $e(e,t,n){return De(n).peek(e,t)}function He(e,t,n){const r=new Set,o=Array.from(n),i=e.getGraph(t.version);for(let s=o.pop();s;s=o.pop()){var a;r.add(s);const e=null!==(a=i.nodeToNodeSubscriptions.get(s))&&void 0!==a?a:ze;for(const t of e)r.has(t)||o.push(t)}return r}var We={getNodeLoadable:function(e,t,n){return Ve(e,t,n,"get"),De(n).get(e,t)},peekNodeLoadable:$e,setNodeValue:function(e,t,n,r){const o=De(n);if(null==o.set)throw new Be(`Attempt to set read-only RecoilValue: ${n}`);const i=o.set;return Ve(e,t,n,"set"),i(e,t,r)},initializeNode:function(e,t,n){Ve(e,e.getState().currentTree,t,n)},cleanUpNode:function(e,t){var n;const r=e.getState();null===(n=r.nodeCleanupFunctions.get(t))||void 0===n||n(),r.nodeCleanupFunctions.delete(t)},setUnvalidatedAtomValue_DEPRECATED:function(e,t,n){var r;const o=Le(t);return null===o||void 0===o||null===(r=o.invalidate)||void 0===r||r.call(o,e),{...e,atomValues:e.atomValues.clone().delete(t),nonvalidatedAtoms:e.nonvalidatedAtoms.clone().set(t,n),dirtyAtoms:Ue(e.dirtyAtoms,t)}},peekNodeInfo:function(e,t,n){const r=e.getState(),o=e.getGraph(t.version),i=De(n).nodeType;return Ne({type:i},{loadable:()=>$e(e,t,n),isActive:()=>r.knownAtoms.has(n)||r.knownSelectors.has(n),isSet:()=>"selector"!==i&&t.atomValues.has(n),isModified:()=>t.dirtyAtoms.has(n),deps:()=>{var e;return Fe(null!==(e=o.nodeDeps.get(n))&&void 0!==e?e:[])},subscribers:()=>{var o,i;return{nodes:Fe(Ie(He(e,t,new Set([n])),(e=>e!==n))),components:Q(null!==(o=null===(i=r.nodeToComponentSubscriptions.get(n))||void 0===i?void 0:i.values())&&void 0!==o?o:[],(([e])=>({name:e})))}}})},getDownstreamNodes:He};let qe=null;var Ge={setInvalidateMemoizedSnapshot:function(e){qe=e},invalidateMemoizedSnapshot:function(){var e;null===(e=qe)||void 0===e||e()}};const{getDownstreamNodes:Ye,getNodeLoadable:Xe,setNodeValue:Ke}=We,{getNextComponentID:Ze}=Ae,{getNode:Qe,getNodeMaybe:Je}=se,{DefaultValue:et}=se,{reactMode:tt}=B,{AbstractRecoilValue:nt,RecoilState:rt,RecoilValueReadOnly:ot,isRecoilValue:it}=K,{invalidateMemoizedSnapshot:at}=Ge;function st(e,t,n){if("set"===n.type){const{recoilValue:r,valueOrUpdater:o}=n,a=function(e,t,{key:n},r){if("function"===typeof r){const o=Xe(e,t,n);if("loading"===o.state){const e=`Tried to set atom or selector "${n}" using an updater function while the current state is pending, this is not currently supported.`;throw L(e),i(e)}if("hasError"===o.state)throw o.contents;return r(o.contents)}return r}(e,t,r,o),s=Ke(e,t,r.key,a);for(const[e,n]of s.entries())ut(t,e,n)}else if("setLoadable"===n.type){const{recoilValue:{key:e},loadable:r}=n;ut(t,e,r)}else if("markModified"===n.type){const{recoilValue:{key:e}}=n;t.dirtyAtoms.add(e)}else if("setUnvalidated"===n.type){var r;const{recoilValue:{key:e},unvalidatedValue:o}=n,i=Je(e);null===i||void 0===i||null===(r=i.invalidate)||void 0===r||r.call(i,t),t.atomValues.delete(e),t.nonvalidatedAtoms.set(e,o),t.dirtyAtoms.add(e)}else L(`Unknown action ${n.type}`)}function ut(e,t,n){"hasValue"===n.state&&n.contents instanceof et?e.atomValues.delete(t):e.atomValues.set(t,n),e.dirtyAtoms.add(t),e.nonvalidatedAtoms.delete(t)}function lt(e,t){e.replaceState((n=>{const r=dt(n);for(const o of t)st(e,r,o);return ht(e,r),at(),r}))}function ct(e,t){if(ft.length){const n=ft[ft.length-1];let r=n.get(e);r||n.set(e,r=[]),r.push(t)}else lt(e,[t])}const ft=[];function dt(e){return{...e,atomValues:e.atomValues.clone(),nonvalidatedAtoms:e.nonvalidatedAtoms.clone(),dirtyAtoms:new Set(e.dirtyAtoms)}}function ht(e,t){const n=Ye(e,t,t.dirtyAtoms);for(const i of n){var r,o;null===(r=Je(i))||void 0===r||null===(o=r.invalidate)||void 0===o||o.call(r,t)}}function pt(e,t,n){ct(e,{type:"set",recoilValue:t,valueOrUpdater:n})}var gt={RecoilValueReadOnly:ot,AbstractRecoilValue:nt,RecoilState:rt,getRecoilValueAsLoadable:function(e,{key:t},n=e.getState().currentTree){var r,o;const i=e.getState();n.version!==i.currentTree.version&&n.version!==(null===(r=i.nextTree)||void 0===r?void 0:r.version)&&n.version!==(null===(o=i.previousTree)||void 0===o?void 0:o.version)&&L("Tried to read from a discarded tree");const a=Xe(e,n,t);return"loading"===a.state&&a.contents.catch((()=>{})),a},setRecoilValue:pt,setRecoilValueLoadable:function(e,t,n){if(n instanceof et)return pt(e,t,n);ct(e,{type:"setLoadable",recoilValue:t,loadable:n})},markRecoilValueModified:function(e,t){ct(e,{type:"markModified",recoilValue:t})},setUnvalidatedRecoilValue:function(e,t,n){ct(e,{type:"setUnvalidated",recoilValue:t,unvalidatedValue:n})},subscribeToRecoilValue:function(e,{key:t},n,r=null){const o=Ze(),i=e.getState();i.nodeToComponentSubscriptions.has(t)||i.nodeToComponentSubscriptions.set(t,new Map),s(i.nodeToComponentSubscriptions.get(t)).set(o,[null!==r&&void 0!==r?r:"",n]);const a=tt();if(a.early&&("LEGACY"===a.mode||"MUTABLE_SOURCE"===a.mode)){const r=e.getState().nextTree;r&&r.dirtyAtoms.has(t)&&n(r)}return{release:()=>{const n=e.getState(),r=n.nodeToComponentSubscriptions.get(t);void 0!==r&&r.has(o)?(r.delete(o),0===r.size&&n.nodeToComponentSubscriptions.delete(t)):L(`Subscription missing at release time for atom ${t}. This is a bug in Recoil.`)}}},isRecoilValue:it,applyAtomValueWrites:function(e,t){const n=e.clone();return t.forEach(((e,t)=>{"hasValue"===e.state&&e.contents instanceof et?n.delete(t):n.set(t,e)})),n},batchStart:function(){const e=new Map;return ft.push(e),()=>{for(const[t,n]of e)lt(t,n);ft.pop()!==e&&L("Incorrect order of batch popping")}},writeLoadableToTreeState:ut,invalidateDownstreams:ht,copyTreeState:dt,refreshRecoilValue:function(e,t){var n;const{currentTree:r}=e.getState(),o=Qe(t.key);null===(n=o.clearCache)||void 0===n||n.call(o,e,r)}};var vt=function(e,t,n){const r=e.entries();let o=r.next();for(;!o.done;){const i=o.value;if(t.call(n,i[1],i[0],e))return!0;o=r.next()}return!1};const{cleanUpNode:yt}=We,{deleteNodeConfigIfPossible:mt,getNode:bt}=se,{RetentionZone:_t}=Me,wt=new Set;function xt(e,t){const n=e.getState(),r=n.currentTree;if(n.nextTree)return void L("releaseNodesNowOnCurrentTree should only be called at the end of a batch");const o=new Set;for(const a of t)if(a instanceof _t)for(const e of At(n,a))o.add(e);else o.add(a);const i=function(e,t){const n=e.getState(),r=n.currentTree,o=e.getGraph(r.version),i=new Set,a=new Set;return u(t),i;function u(t){const l=new Set,c=function(e,t,n,r,o){const i=e.getGraph(t.version),a=[],u=new Set;for(;n.size>0;)l(s(n.values().next().value));return a;function l(e){if(r.has(e)||o.has(e))return void n.delete(e);if(u.has(e))return;const t=i.nodeToNodeSubscriptions.get(e);if(t)for(const n of t)l(n);u.add(e),n.delete(e),a.push(e)}}(e,r,t,i,a);for(const e of c){var f;if("recoilRoot"===bt(e).retainedBy){a.add(e);continue}if((null!==(f=n.retention.referenceCounts.get(e))&&void 0!==f?f:0)>0){a.add(e);continue}if(Et(e).some((e=>n.retention.referenceCounts.get(e)))){a.add(e);continue}const t=o.nodeToNodeSubscriptions.get(e);t&&vt(t,(e=>a.has(e)))?a.add(e):(i.add(e),l.add(e))}const d=new Set;for(const e of l)for(const t of null!==(h=o.nodeDeps.get(e))&&void 0!==h?h:wt){var h;i.has(t)||d.add(t)}d.size&&u(d)}}(e,o);for(const a of i)St(e,r,a)}function St(e,t,n){if(!P("recoil_memory_managament_2020"))return;yt(e,n);const r=e.getState();r.knownAtoms.delete(n),r.knownSelectors.delete(n),r.nodeTransactionSubscriptions.delete(n),r.retention.referenceCounts.delete(n);const o=Et(n);for(const u of o){var i;null===(i=r.retention.nodesRetainedByZone.get(u))||void 0===i||i.delete(n)}t.atomValues.delete(n),t.dirtyAtoms.delete(n),t.nonvalidatedAtoms.delete(n);const a=r.graphsByVersion.get(t.version);if(a){const e=a.nodeDeps.get(n);if(void 0!==e){a.nodeDeps.delete(n);for(const t of e){var s;null===(s=a.nodeToNodeSubscriptions.get(t))||void 0===s||s.delete(n)}}a.nodeToNodeSubscriptions.delete(n)}mt(n)}function At(e,t){var n;return null!==(n=e.retention.nodesRetainedByZone.get(t))&&void 0!==n?n:wt}function Et(e){const t=bt(e).retainedBy;return void 0===t||"components"===t||"recoilRoot"===t?[]:t instanceof _t?[t]:t}function Ct(e,t){if(!P("recoil_memory_managament_2020"))return;e.getState().retention.referenceCounts.delete(t),function(e,t){const n=e.getState();n.nextTree?n.retention.retainablesToCheckForRelease.add(t):xt(e,new Set([t]))}(e,t)}var Rt={SUSPENSE_TIMEOUT_MS:12e4,updateRetainCount:function(e,t,n){var r;if(!P("recoil_memory_managament_2020"))return;const o=e.getState().retention.referenceCounts,i=(null!==(r=o.get(t))&&void 0!==r?r:0)+n;0===i?Ct(e,t):o.set(t,i)},updateRetainCountToZero:Ct,releaseScheduledRetainablesNow:function(e){if(!P("recoil_memory_managament_2020"))return;const t=e.getState();xt(e,t.retention.retainablesToCheckForRelease),t.retention.retainablesToCheckForRelease.clear()},retainedByOptionWithDefault:function(e){return void 0===e?"recoilRoot":e}};const{unstable_batchedUpdates:Ot}=o;var Tt={unstable_batchedUpdates:Ot};const{unstable_batchedUpdates:kt}=Tt;var Mt={unstable_batchedUpdates:kt};const{batchStart:Pt}=gt,{unstable_batchedUpdates:It}=Mt;let Nt=It||(e=>e());var Dt={getBatcher:()=>Nt,setBatcher:e=>{Nt=e},batchUpdates:e=>{Nt((()=>{let t=()=>{};try{t=Pt(),e()}finally{t()}}))}};var Lt=function*(e){for(const t of e)for(const e of t)yield e};const Ft="undefined"===typeof Window||"undefined"===typeof window,jt="undefined"!==typeof navigator&&"ReactNative"===navigator.product;var Ut={isSSR:Ft,isReactNative:jt,isWindow:e=>!Ft&&(e===window||e instanceof Window)};var zt={memoizeWithArgsHash:function(e,t){let n;return(...r)=>{n||(n={});const o=t(...r);return Object.hasOwnProperty.call(n,o)||(n[o]=e(...r)),n[o]}},memoizeOneWithArgsHash:function(e,t){let n,r;return(...o)=>{const i=t(...o);return n===i||(n=i,r=e(...o)),r}},memoizeOneWithArgsHashAndInvalidation:function(e,t){let n,r;return[(...o)=>{const i=t(...o);return n===i||(n=i,r=e(...o)),r},()=>{n=null}]}};const{batchUpdates:Bt}=Dt,{initializeNode:Vt,peekNodeInfo:$t}=We,{graph:Ht}=_e,{getNextStoreID:Wt}=Ae,{DEFAULT_VALUE:qt,recoilValues:Gt,recoilValuesForKeys:Yt}=se,{AbstractRecoilValue:Xt,getRecoilValueAsLoadable:Kt,setRecoilValue:Zt,setUnvalidatedRecoilValue:Qt}=gt,{updateRetainCount:Jt}=Rt,{setInvalidateMemoizedSnapshot:en}=Ge,{getNextTreeStateVersion:tn,makeEmptyStoreState:nn}=Te,{isSSR:rn}=Ut,{memoizeOneWithArgsHashAndInvalidation:on}=zt;class an{constructor(e,t){u(this,"_store",void 0),u(this,"_refCount",1),u(this,"getLoadable",(e=>(this.checkRefCount_INTERNAL(),Kt(this._store,e)))),u(this,"getPromise",(e=>(this.checkRefCount_INTERNAL(),this.getLoadable(e).toPromise()))),u(this,"getNodes_UNSTABLE",(e=>{if(this.checkRefCount_INTERNAL(),!0===(null===e||void 0===e?void 0:e.isModified)){if(!1===(null===e||void 0===e?void 0:e.isInitialized))return[];const t=this._store.getState().currentTree;return Yt(t.dirtyAtoms)}const t=this._store.getState().knownAtoms,n=this._store.getState().knownSelectors;return null==(null===e||void 0===e?void 0:e.isInitialized)?Gt.values():!0===e.isInitialized?Yt(Lt([t,n])):Ie(Gt.values(),(({key:e})=>!t.has(e)&&!n.has(e)))})),u(this,"getInfo_UNSTABLE",(({key:e})=>(this.checkRefCount_INTERNAL(),$t(this._store,this._store.getState().currentTree,e)))),u(this,"map",(e=>{this.checkRefCount_INTERNAL();const t=new cn(this,Bt);return e(t),t})),u(this,"asyncMap",(async e=>{this.checkRefCount_INTERNAL();const t=new cn(this,Bt);return t.retain(),await e(t),t.autoRelease_INTERNAL(),t})),this._store={storeID:Wt(),parentStoreID:t,getState:()=>e,replaceState:t=>{e.currentTree=t(e.currentTree)},getGraph:t=>{const n=e.graphsByVersion;if(n.has(t))return s(n.get(t));const r=Ht();return n.set(t,r),r},subscribeToTransactions:()=>({release:()=>{}}),addTransactionMetadata:()=>{throw i("Cannot subscribe to Snapshots")}};for(const n of this._store.getState().knownAtoms)Vt(this._store,n,"get"),Jt(this._store,n,1);this.autoRelease_INTERNAL()}retain(){this._refCount<=0&&L("Attempt to retain() Snapshot that was already released."),this._refCount++;let e=!1;return()=>{e||(e=!0,this._release())}}autoRelease_INTERNAL(){rn||window.setTimeout((()=>this._release()),10)}_release(){if(this._refCount--,0===this._refCount){if(this._store.getState().nodeCleanupFunctions.forEach((e=>e())),this._store.getState().nodeCleanupFunctions.clear(),!P("recoil_memory_managament_2020"))return}else this._refCount}isRetained(){return this._refCount>0}checkRefCount_INTERNAL(){P("recoil_memory_managament_2020")&&this._refCount}getStore_INTERNAL(){return this.checkRefCount_INTERNAL(),this._store}getID(){return this.checkRefCount_INTERNAL(),this._store.getState().currentTree.stateID}getStoreID(){return this.checkRefCount_INTERNAL(),this._store.storeID}}function sn(e,t,n=!1){const r=e.getState(),o=n?tn():t.version;return{currentTree:{version:n?o:t.version,stateID:n?o:t.stateID,transactionMetadata:{...t.transactionMetadata},dirtyAtoms:new Set(t.dirtyAtoms),atomValues:t.atomValues.clone(),nonvalidatedAtoms:t.nonvalidatedAtoms.clone()},commitDepth:0,nextTree:null,previousTree:null,knownAtoms:new Set(r.knownAtoms),knownSelectors:new Set(r.knownSelectors),transactionSubscriptions:new Map,nodeTransactionSubscriptions:new Map,nodeToComponentSubscriptions:new Map,queuedComponentCallbacks_DEPRECATED:[],suspendedComponentResolvers:new Set,graphsByVersion:(new Map).set(o,e.getGraph(t.version)),retention:{referenceCounts:new Map,nodesRetainedByZone:new Map,retainablesToCheckForRelease:new Set},nodeCleanupFunctions:new Map(Q(r.nodeCleanupFunctions.entries(),(([e])=>[e,()=>{}])))}}const[un,ln]=on(((e,t)=>{var n;const r=e.getState(),o="latest"===t?null!==(n=r.nextTree)&&void 0!==n?n:r.currentTree:s(r.previousTree);return new an(sn(e,o),e.storeID)}),((e,t)=>{var n,r;return String(t)+String(e.storeID)+String(null===(n=e.getState().nextTree)||void 0===n?void 0:n.version)+String(e.getState().currentTree.version)+String(null===(r=e.getState().previousTree)||void 0===r?void 0:r.version)}));en(ln);class cn extends an{constructor(e,t){super(sn(e.getStore_INTERNAL(),e.getStore_INTERNAL().getState().currentTree,!0),e.getStoreID()),u(this,"_batch",void 0),u(this,"set",((e,t)=>{this.checkRefCount_INTERNAL();const n=this.getStore_INTERNAL();this._batch((()=>{Jt(n,e.key,1),Zt(this.getStore_INTERNAL(),e,t)}))})),u(this,"reset",(e=>{this.checkRefCount_INTERNAL();const t=this.getStore_INTERNAL();this._batch((()=>{Jt(t,e.key,1),Zt(this.getStore_INTERNAL(),e,qt)}))})),u(this,"setUnvalidatedAtomValues_DEPRECATED",(e=>{this.checkRefCount_INTERNAL();const t=this.getStore_INTERNAL();Bt((()=>{for(const[n,r]of e.entries())Jt(t,n,1),Qt(t,new Xt(n),r)}))})),this._batch=t}}var fn={Snapshot:an,MutableSnapshot:cn,freshSnapshot:function(e){const t=new an(nn());return null!=e?t.map(e):t},cloneSnapshot:function(e,t="latest"){const n=un(e,t);return n.isRetained()?n:(ln(),un(e,t))}},dn=fn.Snapshot,hn=fn.MutableSnapshot,pn=fn.freshSnapshot,gn=fn.cloneSnapshot,vn=Object.freeze({__proto__:null,Snapshot:dn,MutableSnapshot:hn,freshSnapshot:pn,cloneSnapshot:gn});var yn=function(...e){const t=new Set;for(const n of e)for(const e of n)t.add(e);return t};const{useRef:mn}=r;var bn=function(e){const t=mn(e);return t.current===e&&"function"===typeof e&&(t.current=e()),t};const{getNextTreeStateVersion:_n,makeEmptyStoreState:wn}=Te,{cleanUpNode:xn,getDownstreamNodes:Sn,initializeNode:An,setNodeValue:En,setUnvalidatedAtomValue_DEPRECATED:Cn}=We,{graph:Rn}=_e,{cloneGraph:On}=_e,{getNextStoreID:Tn}=Ae,{createMutableSource:kn,reactMode:Mn}=B,{applyAtomValueWrites:Pn}=gt,{releaseScheduledRetainablesNow:In}=Rt,{freshSnapshot:Nn}=vn,{useCallback:Dn,useContext:Ln,useEffect:Fn,useMemo:jn,useRef:Un,useState:zn}=r;function Bn(){throw i("This component must be used inside a component.")}const Vn=Object.freeze({storeID:Tn(),getState:Bn,replaceState:Bn,getGraph:Bn,subscribeToTransactions:Bn,addTransactionMetadata:Bn});let $n=!1;function Hn(e){if($n)throw i("An atom update was triggered within the execution of a state updater function. State updater functions provided to Recoil must be pure functions.");const t=e.getState();if(null===t.nextTree){P("recoil_memory_managament_2020")&&P("recoil_release_on_cascading_update_killswitch_2021")&&t.commitDepth>0&&In(e);const n=t.currentTree.version,r=_n();t.nextTree={...t.currentTree,version:r,stateID:r,dirtyAtoms:new Set,transactionMetadata:{}},t.graphsByVersion.set(r,On(s(t.graphsByVersion.get(n))))}}const Wn=r.createContext({current:Vn}),qn=()=>Ln(Wn),Gn=r.createContext(null);function Yn(e,t,n){const r=Sn(e,n,n.dirtyAtoms);for(const o of r){const e=t.nodeToComponentSubscriptions.get(o);if(e)for(const[t,[r,o]]of e)o(n)}}function Xn(e){const t=e.getState(),n=t.currentTree,r=n.dirtyAtoms;if(r.size){for(const[n,o]of t.nodeTransactionSubscriptions)if(r.has(n))for(const[t,r]of o)r(e);for(const[n,r]of t.transactionSubscriptions)r(e);(!Mn().early||t.suspendedComponentResolvers.size>0)&&(Yn(e,t,n),t.suspendedComponentResolvers.forEach((e=>e())),t.suspendedComponentResolvers.clear())}t.queuedComponentCallbacks_DEPRECATED.forEach((e=>e(n))),t.queuedComponentCallbacks_DEPRECATED.splice(0,t.queuedComponentCallbacks_DEPRECATED.length)}function Kn({setNotifyBatcherOfChange:e}){const t=qn(),[,n]=zn([]);return e((()=>n({}))),Fn((()=>(e((()=>n({}))),()=>{e((()=>{}))})),[e]),Fn((()=>{ue.enqueueExecution("Batcher",(()=>{!function(e){const t=e.getState();t.commitDepth++;try{const{nextTree:n}=t;if(null==n)return;t.previousTree=t.currentTree,t.currentTree=n,t.nextTree=null,Xn(e),null!=t.previousTree?t.graphsByVersion.delete(t.previousTree.version):L("Ended batch with no previous state, which is unexpected","recoil"),t.previousTree=null,P("recoil_memory_managament_2020")&&null==n&&In(e)}finally{t.commitDepth--}}(t.current)}))})),null}let Zn=0;function Qn({initializeState_DEPRECATED:e,initializeState:t,store_INTERNAL:n,children:o}){let i;const a=e=>{const t=i.current.graphsByVersion;if(t.has(e))return s(t.get(e));const n=Rn();return t.set(e,n),n},u=(e,t)=>{if(null==t){const{transactionSubscriptions:t}=h.current.getState(),n=Zn++;return t.set(n,e),{release:()=>{t.delete(n)}}}{const{nodeTransactionSubscriptions:n}=h.current.getState();n.has(t)||n.set(t,new Map);const r=Zn++;return s(n.get(t)).set(r,e),{release:()=>{const e=n.get(t);e&&(e.delete(r),0===e.size&&n.delete(t))}}}},l=e=>{Hn(h.current);for(const t of Object.keys(e))s(h.current.getState().nextTree).transactionMetadata[t]=e[t]},c=e=>{Hn(h.current);const t=s(i.current.nextTree);let n;try{$n=!0,n=e(t)}finally{$n=!1}n!==t&&(i.current.nextTree=n,Mn().early&&Yn(h.current,i.current,n),s(f.current)())},f=Un(null),d=Dn((e=>{f.current=e}),[f]),h=bn((()=>null!==n&&void 0!==n?n:{storeID:Tn(),getState:()=>i.current,replaceState:c,getGraph:a,subscribeToTransactions:u,addTransactionMetadata:l}));null!=n&&(h.current=n),i=bn((()=>null!=e?function(e,t){const n=wn();return t({set:(t,r)=>{const o=n.currentTree,i=En(e,o,t.key,r),a=new Set(i.keys()),s=o.nonvalidatedAtoms.clone();for(const e of a)s.delete(e);n.currentTree={...o,dirtyAtoms:yn(o.dirtyAtoms,a),atomValues:Pn(o.atomValues,i),nonvalidatedAtoms:s}},setUnvalidatedAtomValues:e=>{e.forEach(((e,t)=>{n.currentTree=Cn(n.currentTree,t,e)}))}}),n}(h.current,e):null!=t?function(e){const t=Nn(e),n=t.getStore_INTERNAL().getState();return t.retain(),n.nodeCleanupFunctions.forEach((e=>e())),n.nodeCleanupFunctions.clear(),n}(t):wn()));const p=jn((()=>null===kn||void 0===kn?void 0:kn(i,(()=>i.current.currentTree.version))),[i]);return Fn((()=>{const e=h.current;for(const t of new Set(e.getState().knownAtoms))An(e,t,"get");return()=>{for(const t of e.getState().knownAtoms)xn(e,t)}}),[h]),r.createElement(Wn.Provider,{value:h},r.createElement(Gn.Provider,{value:p},r.createElement(Kn,{setNotifyBatcherOfChange:d}),o))}var Jn={RecoilRoot:function(e){const{override:t,...n}=e,o=qn();return!1===t&&o.current!==Vn?e.children:r.createElement(Qn,n)},useStoreRef:qn,useRecoilMutableSource:function(){const e=Ln(Gn);return null==e&&Z("Attempted to use a Recoil hook outside of a . must be an ancestor of any component that uses Recoil hooks."),e},useRecoilStoreID:function(){return qn().current.storeID},notifyComponents_FOR_TESTING:Yn,sendEndOfBatchNotifications_FOR_TESTING:Xn};var er=function(e,t){if(e===t)return!0;if(e.length!==t.length)return!1;for(let n=0,r=e.length;n{t.current=e})),t.current};const{useStoreRef:or}=Jn,{SUSPENSE_TIMEOUT_MS:ir}=Rt,{updateRetainCount:ar}=Rt,{RetentionZone:sr}=Me,{useEffect:ur,useRef:lr}=r,{isSSR:cr}=Ut;var fr=function(e){if(P("recoil_memory_managament_2020"))return function(e){const t=Array.isArray(e)?e:[e],n=t.map((e=>e instanceof sr?e:e.key)),r=or();ur((()=>{if(!P("recoil_memory_managament_2020"))return;const e=r.current;if(o.current&&!cr)window.clearTimeout(o.current),o.current=null;else for(const t of n)ar(e,t,1);return()=>{for(const t of n)ar(e,t,-1)}}),[r,...n]);const o=lr(),i=rr(n);if(!cr&&(void 0===i||!er(i,n))){const e=r.current;for(const t of n)ar(e,t,1);if(i)for(const t of i)ar(e,t,-1);o.current&&window.clearTimeout(o.current),o.current=window.setTimeout((()=>{o.current=null;for(const t of n)ar(e,t,-1)}),ir)}}(e)};var dr=function(){return""};const{batchUpdates:hr}=Dt,{DEFAULT_VALUE:pr}=se,{currentRendererSupportsUseSyncExternalStore:gr,reactMode:vr,useMutableSource:yr,useSyncExternalStore:mr}=B,{useRecoilMutableSource:br,useStoreRef:_r}=Jn,{isRecoilValue:wr}=K,{AbstractRecoilValue:xr,getRecoilValueAsLoadable:Sr,setRecoilValue:Ar,setUnvalidatedRecoilValue:Er,subscribeToRecoilValue:Cr}=gt,{useCallback:Rr,useEffect:Or,useMemo:Tr,useRef:kr,useState:Mr}=r,{setByAddingToSet:Pr}=Pe,{isSSR:Ir}=Ut;function Nr(e,t,n){if("hasValue"===e.state)return e.contents;if("loading"===e.state){throw new Promise((t=>{const r=n.current.getState().suspendedComponentResolvers;r.add(t),Ir&&a(e.contents)&&e.contents.finally((()=>{r.delete(t)}))}))}throw"hasError"===e.state?e.contents:i(`Invalid value of loadable atom "${t.key}"`)}function Dr(e){const t=_r(),n=dr(),r=Rr((()=>{var n;const r=t.current,o=r.getState(),i=vr().early&&null!==(n=o.nextTree)&&void 0!==n?n:o.currentTree;return{loadable:Sr(r,e,i),key:e.key}}),[t,e]),o=Rr((e=>{let t;return()=>{var n,r;const o=e();return null!==(n=t)&&void 0!==n&&n.loadable.is(o.loadable)&&(null===(r=t)||void 0===r?void 0:r.key)===o.key?t:(t=o,o)}}),[]),i=Tr((()=>o(r)),[r,o]),a=Rr((r=>{const o=t.current;return Cr(o,e,r,n).release}),[t,e,n]);return mr(a,i,i).loadable}function Lr(e){const t=_r(),n=Rr((()=>{var n;const r=t.current,o=r.getState(),i=vr().early&&null!==(n=o.nextTree)&&void 0!==n?n:o.currentTree;return Sr(r,e,i)}),[t,e]),r=Rr((()=>n()),[n]),o=dr(),a=Rr(((r,i)=>{const a=t.current;return Cr(a,e,(()=>{if(!P("recoil_suppress_rerender_in_callback"))return i();const e=n();l.current.is(e)||i(),l.current=e}),o).release}),[t,e,o,n]),s=br();if(null==s)throw i("Recoil hooks must be used in components contained within a component.");const u=yr(s,r,a),l=kr(u);return Or((()=>{l.current=u})),u}function Fr(e){const t=_r(),n=dr(),r=Rr((()=>{var n;const r=t.current,o=r.getState(),i=vr().early&&null!==(n=o.nextTree)&&void 0!==n?n:o.currentTree;return Sr(r,e,i)}),[t,e]),o=Rr((()=>({loadable:r(),key:e.key})),[r,e.key]),i=Rr((e=>{const t=o();return e.loadable.is(t.loadable)&&e.key===t.key?e:t}),[o]);Or((()=>{const r=Cr(t.current,e,(e=>{s(i)}),n);return s(i),r.release}),[n,e,t,i]);const[a,s]=Mr(o);return a.key!==e.key?o().loadable:a.loadable}function jr(e){const t=_r(),[,n]=Mr([]),r=dr(),o=Rr((()=>{var n;const r=t.current,o=r.getState(),i=vr().early&&null!==(n=o.nextTree)&&void 0!==n?n:o.currentTree;return Sr(r,e,i)}),[t,e]),i=o(),a=kr(i);return Or((()=>{a.current=i})),Or((()=>{const i=t.current,s=i.getState(),u=Cr(i,e,(e=>{var t;if(!P("recoil_suppress_rerender_in_callback"))return n([]);const r=o();null!==(t=a.current)&&void 0!==t&&t.is(r)||n(r),a.current=r}),r);if(s.nextTree)i.getState().queuedComponentCallbacks_DEPRECATED.push((()=>{a.current=null,n([])}));else{var l;if(!P("recoil_suppress_rerender_in_callback"))return n([]);const e=o();null!==(l=a.current)&&void 0!==l&&l.is(e)||n(e),a.current=e}return u.release}),[r,o,e,t]),i}function Ur(e){return P("recoil_memory_managament_2020")&&fr(e),{TRANSITION_SUPPORT:Fr,SYNC_EXTERNAL_STORE:gr()?Dr:Fr,MUTABLE_SOURCE:Lr,LEGACY:jr}[vr().mode](e)}function zr(e){const t=_r();return Nr(Ur(e),e,t)}function Br(e){const t=_r();return Rr((n=>{Ar(t.current,e,n)}),[t,e])}function Vr(e){return P("recoil_memory_managament_2020")&&fr(e),Fr(e)}function $r(e){const t=_r();return Nr(Vr(e),e,t)}var Hr={recoilComponentGetRecoilValueCount_FOR_TESTING:{current:0},useRecoilInterface:function(){const e=dr(),t=_r(),[,n]=Mr([]),r=kr(new Set);r.current=new Set;const o=kr(new Set),i=kr(new Map),a=Rr((e=>{const t=i.current.get(e);t&&(t.release(),i.current.delete(e))}),[i]),s=Rr(((e,t)=>{i.current.has(t)&&n([])}),[]);return Or((()=>{const n=t.current;ye(r.current,o.current).forEach((t=>{if(i.current.has(t))return void Z(`Double subscription to RecoilValue "${t}"`);const r=Cr(n,new xr(t),(e=>s(e,t)),e);i.current.set(t,r);n.getState().nextTree?n.getState().queuedComponentCallbacks_DEPRECATED.push((()=>{s(n.getState(),t)})):s(n.getState(),t)})),ye(o.current,r.current).forEach((e=>{a(e)})),o.current=r.current})),Or((()=>{const n=i.current;return ye(r.current,new Set(n.keys())).forEach((r=>{const o=Cr(t.current,new xr(r),(e=>s(e,r)),e);n.set(r,o)})),()=>n.forEach(((e,t)=>a(t)))}),[e,t,a,s]),Tr((()=>{function e(e){return n=>{Ar(t.current,e,n)}}function n(e){var n;r.current.has(e.key)||(r.current=Pr(r.current,e.key));const o=t.current.getState();return Sr(t.current,e,vr().early&&null!==(n=o.nextTree)&&void 0!==n?n:o.currentTree)}function o(e){return Nr(n(e),e,t)}return{getRecoilValue:o,getRecoilValueLoadable:n,getRecoilState:function(t){return[o(t),e(t)]},getRecoilStateLoadable:function(t){return[n(t),e(t)]},getSetRecoilState:e,getResetRecoilState:function(e){return()=>Ar(t.current,e,pr)}}}),[r,t])},useRecoilState:function(e){return[zr(e),Br(e)]},useRecoilStateLoadable:function(e){return[Ur(e),Br(e)]},useRecoilValue:zr,useRecoilValueLoadable:Ur,useResetRecoilState:function(e){const t=_r();return Rr((()=>{Ar(t.current,e,pr)}),[t,e])},useSetRecoilState:Br,useSetUnvalidatedAtomValues:function(){const e=_r();return(t,n={})=>{hr((()=>{e.current.addTransactionMetadata(n),t.forEach(((t,n)=>Er(e.current,new xr(n),t)))}))}},useRecoilValueLoadable_TRANSITION_SUPPORT_UNSTABLE:Vr,useRecoilValue_TRANSITION_SUPPORT_UNSTABLE:$r,useRecoilState_TRANSITION_SUPPORT_UNSTABLE:function(e){return[$r(e),Br(e)]}};var Wr=function(e,t){const n=new Map;for(const[r,o]of e)t(o,r)&&n.set(r,o);return n};var qr=function(e,t){const n=new Set;for(const r of e)t(r)&&n.add(r);return n};var Gr=function(...e){const t=new Map;for(let n=0;nt.current.subscribeToTransactions(e).release),[e,t])}function lo(e){const t=e.atomValues.toMap(),n=me(Wr(t,((e,t)=>{const n=Kr(t).persistence_UNSTABLE;return null!=n&&"none"!==n.type&&"hasValue"===e.state})),(e=>e.contents));return Gr(e.nonvalidatedAtoms.toMap(),n)}function co(e,t){var n;const r=e.getState(),o=null!==(n=r.nextTree)&&void 0!==n?n:r.currentTree,i=t.getStore_INTERNAL().getState().currentTree;Yr((()=>{const n=new Set;for(const e of[o.atomValues.keys(),i.atomValues.keys()])for(const t of e){var r,a;(null===(r=o.atomValues.get(t))||void 0===r?void 0:r.contents)!==(null===(a=i.atomValues.get(t))||void 0===a?void 0:a.contents)&&Kr(t).shouldRestoreFromSnapshots&&n.add(t)}n.forEach((t=>{eo(e,new Jr(t),i.atomValues.has(t)?s(i.atomValues.get(t)):Xr)})),e.replaceState((e=>({...e,stateID:t.getID()})))}))}var fo={useRecoilSnapshot:function(){const e=Qr(),[t,n]=ao((()=>no(e.current))),r=rr(t),o=io(),i=io();if(uo(ro((e=>n(no(e))),[])),oo((()=>{const e=t.retain();var n;o.current&&!so&&(window.clearTimeout(o.current),o.current=null,null===(n=i.current)||void 0===n||n.call(i),i.current=null);return()=>{window.setTimeout(e,10)}}),[t]),r!==t&&!so){var a;if(o.current)window.clearTimeout(o.current),o.current=null,null===(a=i.current)||void 0===a||a.call(i),i.current=null;i.current=t.retain(),o.current=window.setTimeout((()=>{var e;o.current=null,null===(e=i.current)||void 0===e||e.call(i),i.current=null}),to)}return t},gotoSnapshot:co,useGotoRecoilSnapshot:function(){const e=Qr();return ro((t=>co(e.current,t)),[e])},useRecoilTransactionObserver:function(e){uo(ro((t=>{const n=no(t,"latest"),r=no(t,"previous");e({snapshot:n,previousSnapshot:r})}),[e]))},useTransactionObservation_DEPRECATED:function(e){uo(ro((t=>{let n=t.getState().previousTree;const r=t.getState().currentTree;n||(L("Transaction subscribers notified without a previous tree being present -- this is a bug in Recoil"),n=t.getState().currentTree);const o=lo(r),i=lo(n),a=me(Zr,(e=>{var t,n,r,o;return{persistence_UNSTABLE:{type:null!==(t=null===(n=e.persistence_UNSTABLE)||void 0===n?void 0:n.type)&&void 0!==t?t:"none",backButton:null!==(r=null===(o=e.persistence_UNSTABLE)||void 0===o?void 0:o.backButton)&&void 0!==r&&r}}})),s=qr(r.dirtyAtoms,(e=>o.has(e)||i.has(e)));e({atomValues:o,previousAtomValues:i,atomInfo:a,modifiedAtoms:s,transactionMetadata:{...r.transactionMetadata}})}),[e]))},useTransactionSubscription_DEPRECATED:uo};const{peekNodeInfo:ho}=We,{useStoreRef:po}=Jn;var go=function(){const e=po();return({key:t})=>ho(e.current,e.current.getState().currentTree,t)};const{reactMode:vo}=B,{RecoilRoot:yo,useStoreRef:mo}=Jn,{useMemo:bo}=r;var _o=function(){"MUTABLE_SOURCE"===vo().mode&&console.warn("Warning: There are known issues using useRecoilBridgeAcrossReactRoots() in recoil_mutable_source rendering mode. Please consider upgrading to recoil_sync_external_store mode.");const e=mo().current;return bo((()=>function({children:t}){return r.createElement(yo,{store_INTERNAL:e},t)}),[e])};const{loadableWithValue:wo}=O,{initializeNode:xo}=We,{DEFAULT_VALUE:So,getNode:Ao}=se,{copyTreeState:Eo,getRecoilValueAsLoadable:Co,invalidateDownstreams:Ro,writeLoadableToTreeState:Oo}=gt;function To(e){return"atom"===Ao(e.key).nodeType}class ko{constructor(e,t){u(this,"_store",void 0),u(this,"_treeState",void 0),u(this,"_changes",void 0),u(this,"get",(e=>{if(this._changes.has(e.key))return this._changes.get(e.key);if(!To(e))throw i("Reading selectors within atomicUpdate is not supported");const t=Co(this._store,e,this._treeState);if("hasValue"===t.state)return t.contents;throw"hasError"===t.state?t.contents:i(`Expected Recoil atom ${e.key} to have a value, but it is in a loading state.`)})),u(this,"set",((e,t)=>{if(!To(e))throw i("Setting selectors within atomicUpdate is not supported");if("function"===typeof t){const n=this.get(e);this._changes.set(e.key,t(n))}else xo(this._store,e.key,"set"),this._changes.set(e.key,t)})),u(this,"reset",(e=>{this.set(e,So)})),this._store=e,this._treeState=t,this._changes=new Map}newTreeState_INTERNAL(){if(0===this._changes.size)return this._treeState;const e=Eo(this._treeState);for(const[t,n]of this._changes)Oo(e,t,wo(n));return Ro(this._store,e),e}}var Mo=function(e){return t=>{e.replaceState((n=>{const r=new ko(e,n);return t(r),r.newTreeState_INTERNAL()}))}},Po=Mo,Io=Object.freeze({__proto__:null,atomicUpdater:Po});var No=function(e,t){if(!e)throw new Error(t)};const{atomicUpdater:Do}=Io,{batchUpdates:Lo}=Dt,{DEFAULT_VALUE:Fo}=se,{useStoreRef:jo}=Jn,{refreshRecoilValue:Uo,setRecoilValue:zo}=gt,{cloneSnapshot:Bo}=vn,{gotoSnapshot:Vo}=fo,{useCallback:$o}=r;class Ho{}const Wo=new Ho;function qo(e,t,n,r){let o,s=Wo;var u;(Lo((()=>{const a="useRecoilCallback() expects a function that returns a function: it accepts a function of the type (RecoilInterface) => (Args) => ReturnType and returns a callback function (Args) => ReturnType, where RecoilInterface is an object {snapshot, set, ...} and Args and ReturnType are the argument and return types of the callback you want to create. Please see the docs at recoiljs.org for details.";if("function"!==typeof t)throw i(a);const u=Ne({...null!==r&&void 0!==r?r:{},set:(t,n)=>zo(e,t,n),reset:t=>zo(e,t,Fo),refresh:t=>Uo(e,t),gotoSnapshot:t=>Vo(e,t),transact_UNSTABLE:t=>Do(e)(t)},{snapshot:()=>{const t=Bo(e);return o=t.retain(),t}}),l=t(u);if("function"!==typeof l)throw i(a);s=l(...n)})),s instanceof Ho&&No(!1),a(s))?s=s.finally((()=>{var e;null===(e=o)||void 0===e||e()})):null===(u=o)||void 0===u||u();return s}var Go={recoilCallback:qo,useRecoilCallback:function(e,t){const n=jo();return $o(((...t)=>qo(n.current,e,t)),null!=t?[...t,n]:void 0)}};const{useStoreRef:Yo}=Jn,{refreshRecoilValue:Xo}=gt,{useCallback:Ko}=r;var Zo=function(e){const t=Yo();return Ko((()=>{const n=t.current;Xo(n,e)}),[e,t])};const{atomicUpdater:Qo}=Io,{useStoreRef:Jo}=Jn,{useMemo:ei}=r;var ti=function(e,t){const n=Jo();return ei((()=>(...t)=>{Qo(n.current)((n=>{e(n)(...t)}))}),null!=t?[...t,n]:void 0)};var ni=class{constructor(e){u(this,"value",void 0),this.value=e}},ri=Object.freeze({__proto__:null,WrappedValue:ni});const{isFastRefreshEnabled:oi}=B;class ii extends Error{}var ai=class{constructor(e){var t,n,r;u(this,"_name",void 0),u(this,"_numLeafs",void 0),u(this,"_root",void 0),u(this,"_onHit",void 0),u(this,"_onSet",void 0),u(this,"_mapNodeValue",void 0),this._name=null===e||void 0===e?void 0:e.name,this._numLeafs=0,this._root=null,this._onHit=null!==(t=null===e||void 0===e?void 0:e.onHit)&&void 0!==t?t:()=>{},this._onSet=null!==(n=null===e||void 0===e?void 0:e.onSet)&&void 0!==n?n:()=>{},this._mapNodeValue=null!==(r=null===e||void 0===e?void 0:e.mapNodeValue)&&void 0!==r?r:e=>e}size(){return this._numLeafs}root(){return this._root}get(e,t){var n;return null===(n=this.getLeafNode(e,t))||void 0===n?void 0:n.value}getLeafNode(e,t){if(null==this._root)return;let n=this._root;for(;n;){if(null===t||void 0===t||t.onNodeVisit(n),"leaf"===n.type)return this._onHit(n),n;const r=this._mapNodeValue(e(n.nodeKey));n=n.branches.get(r)}}set(e,t,n){const r=()=>{var r,o,i,a;let s,u;for(const[t,p]of e){var l,c,f;const e=this._root;if("leaf"===(null===e||void 0===e?void 0:e.type))throw this.invalidCacheError();const r=s;if(s=r?r.branches.get(u):e,s=null!==(l=s)&&void 0!==l?l:{type:"branch",nodeKey:t,parent:r,branches:new Map,branchKey:u},"branch"!==s.type||s.nodeKey!==t)throw this.invalidCacheError();null===r||void 0===r||r.branches.set(u,s),null===n||void 0===n||null===(c=n.onNodeVisit)||void 0===c||c.call(n,s),u=this._mapNodeValue(p),this._root=null!==(f=this._root)&&void 0!==f?f:s}const d=s?null===(r=s)||void 0===r?void 0:r.branches.get(u):this._root;if(null!=d&&("leaf"!==d.type||d.branchKey!==u))throw this.invalidCacheError();const h={type:"leaf",value:t,parent:s,branchKey:u};null===(o=s)||void 0===o||o.branches.set(u,h),this._root=null!==(i=this._root)&&void 0!==i?i:h,this._numLeafs++,this._onSet(h),null===n||void 0===n||null===(a=n.onNodeVisit)||void 0===a||a.call(n,h)};try{r()}catch(o){if(!(o instanceof ii))throw o;this.clear(),r()}}delete(e){const t=this.root();if(!t)return!1;if(e===t)return this._root=null,this._numLeafs=0,!0;let n=e.parent,r=e.branchKey;for(;n;){var o;if(n.branches.delete(r),n===t)return 0===n.branches.size?(this._root=null,this._numLeafs=0):this._numLeafs--,!0;if(n.branches.size>0)break;r=null===(o=n)||void 0===o?void 0:o.branchKey,n=n.parent}for(;n!==t;n=n.parent)if(null==n)return!1;return this._numLeafs--,!0}clear(){this._numLeafs=0,this._root=null}invalidCacheError(){const e=oi()?"Possible Fast Refresh module reload detected. This may also be caused by an selector returning inconsistent values. Resetting cache.":"Invalid cache values. This happens when selectors do not return consistent values for the same input dependency values. That may also be caused when using Fast Refresh to change a selector implementation. Resetting cache.";throw L(e+(null!=this._name?` - ${this._name}`:"")),new ii}},si=Object.freeze({__proto__:null,TreeCache:ai});var ui=class{constructor(e){var t;u(this,"_maxSize",void 0),u(this,"_size",void 0),u(this,"_head",void 0),u(this,"_tail",void 0),u(this,"_map",void 0),u(this,"_keyMapper",void 0),this._maxSize=e.maxSize,this._size=0,this._head=null,this._tail=null,this._map=new Map,this._keyMapper=null!==(t=e.mapKey)&&void 0!==t?t:e=>e}head(){return this._head}tail(){return this._tail}size(){return this._size}maxSize(){return this._maxSize}has(e){return this._map.has(this._keyMapper(e))}get(e){const t=this._keyMapper(e),n=this._map.get(t);if(n)return this.set(e,n.value),n.value}set(e,t){const n=this._keyMapper(e);this._map.get(n)&&this.delete(e);const r=this.head(),o={key:e,right:r,left:null,value:t};r?r.left=o:this._tail=o,this._map.set(n,o),this._head=o,this._size++,this._maybeDeleteLRU()}_maybeDeleteLRU(){this.size()>this.maxSize()&&this.deleteLru()}deleteLru(){const e=this.tail();e&&this.delete(e.key)}delete(e){const t=this._keyMapper(e);if(!this._size||!this._map.has(t))return;const n=s(this._map.get(t)),r=n.right,o=n.left;r&&(r.left=n.left),o&&(o.right=n.right),n===this.head()&&(this._head=r),n===this.tail()&&(this._tail=o),this._map.delete(t),this._size--}clear(){this._size=0,this._head=null,this._tail=null,this._map=new Map}},li=Object.freeze({__proto__:null,LRUCache:ui});const{LRUCache:ci}=li,{TreeCache:fi}=si;var di=function({name:e,maxSize:t,mapNodeValue:n=(e=>e)}){const r=new ci({maxSize:t}),o=new fi({name:e,mapNodeValue:n,onHit:e=>{r.set(e,!0)},onSet:e=>{const n=r.tail();r.set(e,!0),n&&o.size()>t&&o.delete(n.key)}});return o};function hi(e,t,n){if("string"===typeof e&&!e.includes('"')&&!e.includes("\\"))return`"${e}"`;switch(typeof e){case"undefined":return"";case"boolean":return e?"true":"false";case"number":case"symbol":return String(e);case"string":return JSON.stringify(e);case"function":if(!0!==(null===t||void 0===t?void 0:t.allowFunctions))throw i("Attempt to serialize function in a Recoil cache key");return`__FUNCTION(${e.name})__`}if(null===e)return"null";var r;if("object"!==typeof e)return null!==(r=JSON.stringify(e))&&void 0!==r?r:"";if(a(e))return"__PROMISE__";if(Array.isArray(e))return`[${e.map(((e,n)=>hi(e,t,n.toString())))}]`;if("function"===typeof e.toJSON)return hi(e.toJSON(n),t,n);if(e instanceof Map){const r={};for(const[n,o]of e)r["string"===typeof n?n:hi(n,t)]=o;return hi(r,t,n)}return e instanceof Set?hi(Array.from(e).sort(((e,n)=>hi(e,t).localeCompare(hi(n,t)))),t,n):void 0!==Symbol&&null!=e[Symbol.iterator]&&"function"===typeof e[Symbol.iterator]?hi(Array.from(e),t,n):`{${Object.keys(e).filter((t=>void 0!==e[t])).sort().map((n=>`${hi(n,t)}:${hi(e[n],t,n)}`)).join(",")}}`}var pi=function(e,t={allowFunctions:!1}){return hi(e,t)};const{TreeCache:gi}=si,vi={equality:"reference",eviction:"keep-all",maxSize:1/0};var yi=function({equality:e=vi.equality,eviction:t=vi.eviction,maxSize:n=vi.maxSize}=vi,r){const o=function(e){switch(e){case"reference":return e=>e;case"value":return e=>pi(e)}throw i(`Unrecognized equality policy ${e}`)}(e);return function(e,t,n,r){switch(e){case"keep-all":return new gi({name:r,mapNodeValue:n});case"lru":return di({name:r,maxSize:s(t),mapNodeValue:n});case"most-recent":return di({name:r,maxSize:1,mapNodeValue:n})}throw i(`Unrecognized eviction policy ${e}`)}(t,n,o,r)};const{isReactNative:mi,isWindow:bi}=Ut;var _i={startPerfBlock:function(e){return()=>null}};const{isLoadable:wi,loadableWithError:xi,loadableWithPromise:Si,loadableWithValue:Ai}=O,{WrappedValue:Ei}=ri,{getNodeLoadable:Ci,peekNodeLoadable:Ri,setNodeValue:Oi}=We,{saveDepsToStore:Ti}=_e,{DEFAULT_VALUE:ki,getConfigDeletionHandler:Mi,getNode:Pi,registerNode:Ii}=se,{isRecoilValue:Ni}=K,{markRecoilValueModified:Di}=gt,{retainedByOptionWithDefault:Li}=Rt,{recoilCallback:Fi}=Go,{startPerfBlock:ji}=_i;class Ui{}const zi=new Ui,Bi=[],Vi=new Map,$i=(()=>{let e=0;return()=>e++})();function Hi(e){let t=null;const{key:n,get:r,cachePolicy_UNSTABLE:o}=e,u=null!=e.set?e.set:void 0;const l=new Set,c=yi(null!==o&&void 0!==o?o:{equality:"reference",eviction:"keep-all"},n),f=Li(e.retainedBy_UNSTABLE),d=new Map;let h=0;function p(){return!P("recoil_memory_managament_2020")||h>0}function g(e){return e.getState().knownSelectors.add(n),h++,()=>{h--}}function v(){return void 0!==Mi(n)&&!p()}function y(e,t,n,r,o){k(t,r,o),m(e,n)}function m(e,t){T(e,t)&&O(e),b(t,!0)}function b(e,n){const r=Vi.get(e);if(null!=r){for(const e of r)Di(e,s(t));n&&Vi.delete(e)}}function _(e,t){let n=Vi.get(t);null==n&&Vi.set(t,n=new Set),n.add(e)}function w(e,t,n,r,o,i){return t.then((r=>{if(!p())throw O(e),zi;null!=i.loadingDepKey&&i.loadingDepPromise===t?n.atomValues.set(i.loadingDepKey,Ai(r)):e.getState().knownSelectors.forEach((e=>{n.atomValues.delete(e)}));const a=A(e,n);if(a&&"loading"!==a.state){if((T(e,o)||null==R(e))&&m(e,o),"hasValue"===a.state)return a.contents;throw a.contents}if(!T(e,o)){const t=C(e,n);if(null!=t)return t.loadingLoadable.contents}const[s,u]=S(e,n,o);if("loading"!==s.state&&y(e,n,o,s,u),"hasError"===s.state)throw s.contents;return s.contents})).catch((t=>{if(t instanceof Ui)throw zi;if(!p())throw O(e),zi;const i=xi(t);throw y(e,n,o,i,r),t}))}function x(e,t,r,o){var i,a,s,u,c,f,d;(T(e,o)||t.version===(null===(i=e.getState())||void 0===i||null===(a=i.currentTree)||void 0===a?void 0:a.version)||t.version===(null===(s=e.getState())||void 0===s||null===(u=s.nextTree)||void 0===u?void 0:u.version))&&Ti(n,r,e,null!==(c=null===(f=e.getState())||void 0===f||null===(d=f.nextTree)||void 0===d?void 0:d.version)&&void 0!==c?c:e.getState().currentTree.version);for(const n of r)l.add(n)}function S(e,o,u){const l=ji(n);let c=!0,f=!0;const d=()=>{l(),f=!1};let h,g,v=!1;const m={loadingDepKey:null,loadingDepPromise:null},_=new Map;function S({key:t}){const n=Ci(e,o,t);switch(_.set(t,n),c||(x(e,o,new Set(_.keys()),u),function(e,t){T(e,t)&&(s(R(e)).stateVersions.clear(),b(t,!1))}(e,u)),n.state){case"hasValue":return n.contents;case"hasError":throw n.contents;case"loading":throw m.loadingDepKey=t,m.loadingDepPromise=n.contents,n.contents}throw i("Invalid Loadable state")}const A=n=>(...r)=>{if(f)throw i("Callbacks from getCallback() should only be called asynchronously after the selector is evalutated. It can be used for selectors to return objects with callbacks that can work with Recoil state without a subscription.");return null==t&&No(!1),Fi(e,n,r,{node:t})};try{h=r({get:S,getCallback:A}),h=Ni(h)?S(h):h,wi(h)&&("hasError"===h.state&&(v=!0),h=h.contents),a(h)?h=function(e,t,n,r,o,i){return t.then((t=>{if(!p())throw O(e),zi;const i=Ai(t);return y(e,n,o,i,r),t})).catch((t=>{if(!p())throw O(e),zi;if(a(t))return w(e,t,n,r,o,i);const s=xi(t);throw y(e,n,o,s,r),t}))}(e,h,o,_,u,m).finally(d):d(),h=h instanceof Ei?h.value:h}catch(E){h=E,a(h)?h=w(e,h,o,_,u,m).finally(d):(v=!0,d())}return g=v?xi(h):a(h)?Si(h):Ai(h),c=!1,function(e,t,n){if(T(e,t)){const t=R(e);null!=t&&(t.depValuesDiscoveredSoFarDuringAsyncWork=n)}}(e,u,_),x(e,o,new Set(_.keys()),u),[g,_]}function A(e,t){let r=t.atomValues.get(n);if(null!=r)return r;const o=new Set;try{r=c.get((n=>("string"!==typeof n&&No(!1),Ci(e,t,n).contents)),{onNodeVisit:e=>{"branch"===e.type&&e.nodeKey!==n&&o.add(e.nodeKey)}})}catch(s){throw i(`Problem with cache lookup for selector "${n}": ${s.message}`)}var a;r&&(t.atomValues.set(n,r),x(e,t,o,null===(a=R(e))||void 0===a?void 0:a.executionID));return r}function E(e,t){const n=A(e,t);if(null!=n)return O(e),n;const r=C(e,t);var o;if(null!=r)return"loading"===(null===(o=r.loadingLoadable)||void 0===o?void 0:o.state)&&_(e,r.executionID),r.loadingLoadable;const i=$i(),[a,s]=S(e,t,i);return"loading"===a.state?(!function(e,t,n,r,o){d.set(e,{depValuesDiscoveredSoFarDuringAsyncWork:r,executionID:t,loadingLoadable:n,stateVersions:new Map([[o.version,!0]])})}(e,i,a,s,t),_(e,i)):(O(e),k(t,a,s)),a}function C(e,t){const n=Lt([d.has(e)?[s(d.get(e))]:[],Q(Ie(d,(([t])=>t!==e)),(([,e])=>e))]);function r(n){for(const[r,o]of n)if(!Ci(e,t,r).is(o))return!0;return!1}for(const o of n){if(o.stateVersions.get(t.version)||!r(o.depValuesDiscoveredSoFarDuringAsyncWork))return o.stateVersions.set(t.version,!0),o;o.stateVersions.set(t.version,!1)}}function R(e){return d.get(e)}function O(e){d.delete(e)}function T(e,t){var n;return t===(null===(n=R(e))||void 0===n?void 0:n.executionID)}function k(e,t,r){e.atomValues.set(n,t);try{c.set(function(e){return Array.from(e.entries()).map((([e,t])=>[e,t.contents]))}(r),t)}catch(o){throw i(`Problem with setting cache for selector "${n}": ${o.message}`)}}function M(e,t){const r=t.atomValues.get(n);return null!=r?r:c.get((n=>{var r;return"string"!==typeof n&&No(!1),null===(r=Ri(e,t,n))||void 0===r?void 0:r.contents}))}function I(e,t){return function(e){if(Bi.includes(n)){const e=`Recoil selector has circular dependencies: ${Bi.slice(Bi.indexOf(n)).join(" \u2192 ")}`;return xi(i(e))}Bi.push(n);try{return e()}finally{Bi.pop()}}((()=>E(e,t)))}function N(e){e.atomValues.delete(n)}function D(e,n){null==t&&No(!1);for(const t of l){var r;const o=Pi(t);null===(r=o.clearCache)||void 0===r||r.call(o,e,n)}l.clear(),N(n),c.clear(),Di(e,t)}if(null!=u){return t=Ii({key:n,nodeType:"selector",peek:M,get:I,set:(e,t,r)=>{let o=!1;const s=new Map;function l({key:r}){if(o)throw i("Recoil: Async selector sets are not currently supported.");const a=Ci(e,t,r);if("hasValue"===a.state)return a.contents;if("loading"===a.state){const e=`Getting value of asynchronous atom or selector "${r}" in a pending state while setting selector "${n}" is not yet supported.`;throw L(e),i(e)}throw a.contents}function c(n,r){if(o){const e="Recoil: Async selector sets are not currently supported.";throw L(e),i(e)}const a="function"===typeof r?r(l(n)):r;Oi(e,t,n.key,a).forEach(((e,t)=>s.set(t,e)))}const f=u({set:c,get:l,reset:function(e){c(e,ki)}},r);if(void 0!==f)throw a(f)?i("Recoil: Async selector sets are not currently supported."):i("Recoil: selector set should be a void function.");return o=!0,s},init:g,invalidate:N,clearCache:D,shouldDeleteConfigOnRelease:v,dangerouslyAllowMutability:e.dangerouslyAllowMutability,shouldRestoreFromSnapshots:!1,retainedBy:f})}return t=Ii({key:n,nodeType:"selector",peek:M,get:I,init:g,invalidate:N,clearCache:D,shouldDeleteConfigOnRelease:v,dangerouslyAllowMutability:e.dangerouslyAllowMutability,shouldRestoreFromSnapshots:!1,retainedBy:f})}Hi.value=e=>new Ei(e);var Wi=Hi;const{isLoadable:qi,loadableWithError:Gi,loadableWithPromise:Yi,loadableWithValue:Xi}=O,{WrappedValue:Ki}=ri,{peekNodeInfo:Zi}=We,{DEFAULT_VALUE:Qi,DefaultValue:Ji,getConfigDeletionHandler:ea,registerNode:ta,setConfigDeletionHandler:na}=se,{isRecoilValue:ra}=K,{getRecoilValueAsLoadable:oa,markRecoilValueModified:ia,setRecoilValue:aa,setRecoilValueLoadable:sa}=gt,{retainedByOptionWithDefault:ua}=Rt,la=e=>e instanceof Ki?e.value:e;function ca(e){const{key:t,persistence_UNSTABLE:n}=e,r=ua(e.retainedBy_UNSTABLE);let o=0;function u(e){return Yi(e.then((e=>(c=Xi(e),e))).catch((e=>{throw c=Gi(e),e})))}let l,c=a(e.default)?u(e.default):qi(e.default)?"loading"===e.default.state?u(e.default.contents):e.default:Xi(la(e.default));d(c.contents);const f=new Map;function d(e){return e}function h(e,n){var r,o;return null!==(r=null!==(o=n.atomValues.get(t))&&void 0!==o?o:l)&&void 0!==r?r:c}const p=ta({key:t,nodeType:"atom",peek:h,get:function(e,r){if(r.atomValues.has(t))return s(r.atomValues.get(t));if(r.nonvalidatedAtoms.has(t)){if(null!=l)return l;if(null==n)return Z(`Tried to restore a persisted value for atom ${t} but it has no persistence settings.`),c;const e=r.nonvalidatedAtoms.get(t),o=n.validator(e,Qi),i=o instanceof Ji?c:Xi(o);return l=i,l}return c},set:function(e,n,r){if(n.atomValues.has(t)){const e=s(n.atomValues.get(t));if("hasValue"===e.state&&r===e.contents)return new Map}else if(!n.nonvalidatedAtoms.has(t)&&r instanceof Ji)return new Map;return l=void 0,(new Map).set(t,Xi(r))},init:function(n,r,s){var u;if(o++,n.getState().knownAtoms.add(t),"loading"===c.state){const y=()=>{var e;(null!==(e=n.getState().nextTree)&&void 0!==e?e:n.getState().currentTree).atomValues.has(t)||ia(n,p)};c.contents.finally(y)}const l=null!==(u=e.effects)&&void 0!==u?u:e.effects_UNSTABLE;if(null!=l){let m=Qi,b=!0,_=!1,w=null;function x(e){if(b&&e.key===t){const e=m;return e instanceof Ji?h(n,r):a(e)?Yi(e.then((e=>e instanceof Ji?c.toPromise():e))):Xi(e)}return oa(n,e)}function S(e){return x(e).toPromise()}function A(e){var r;const o=Zi(n,null!==(r=n.getState().nextTree)&&void 0!==r?r:n.getState().currentTree,e.key);return!b||e.key!==t||m instanceof Ji?o:{...o,isSet:!0,loadable:x(e)}}const E=e=>t=>{if(b){const n=x(p),r="hasValue"===n.state?n.contents:Qi;m="function"===typeof t?t(r):t,a(m)&&(m=m.then((t=>(w={effect:e,value:t},t))))}else{if(a(t))throw i("Setting atoms to async values is not implemented.");"function"!==typeof t&&(w={effect:e,value:la(t)}),aa(n,p,"function"===typeof t?n=>{const r=la(t(n));return w={effect:e,value:r},r}:la(t))}},C=e=>()=>E(e)(Qi),R=e=>r=>{var o;const{release:i}=n.subscribeToTransactions((n=>{var o;let{currentTree:i,previousTree:a}=n.getState();a||(L("Transaction subscribers notified without a next tree being present -- this is a bug in Recoil"),a=i);const s=null!==(o=i.atomValues.get(t))&&void 0!==o?o:c;if("hasValue"===s.state){var u,l,f,d;const n=s.contents,o=null!==(u=a.atomValues.get(t))&&void 0!==u?u:c,h="hasValue"===o.state?o.contents:Qi;(null===(l=w)||void 0===l?void 0:l.effect)!==e||(null===(f=w)||void 0===f?void 0:f.value)!==n?r(n,h,!i.atomValues.has(t)):(null===(d=w)||void 0===d?void 0:d.effect)===e&&(w=null)}}),t);f.set(n,[...null!==(o=f.get(n))&&void 0!==o?o:[],i])};for(const O of l)try{const T=O({node:p,storeID:n.storeID,parentStoreID_UNSTABLE:n.parentStoreID,trigger:s,setSelf:E(O),resetSelf:C(O),onSet:R(O),getPromise:S,getLoadable:x,getInfo_UNSTABLE:A});var d;if(null!=T)f.set(n,[...null!==(d=f.get(n))&&void 0!==d?d:[],T])}catch(v){m=v,_=!0}if(b=!1,!(m instanceof Ji)){var g;const k=_?Gi(m):a(m)?Yi(function(e,n){const r=n.then((n=>{var o,i;return(null===(i=(null!==(o=e.getState().nextTree)&&void 0!==o?o:e.getState().currentTree).atomValues.get(t))||void 0===i?void 0:i.contents)===r&&aa(e,p,n),n})).catch((n=>{var o,i;throw(null===(i=(null!==(o=e.getState().nextTree)&&void 0!==o?o:e.getState().currentTree).atomValues.get(t))||void 0===i?void 0:i.contents)===r&&sa(e,p,Gi(n)),n}));return r}(n,m)):Xi(la(m));k.contents,r.atomValues.set(t,k),null===(g=n.getState().nextTree)||void 0===g||g.atomValues.set(t,k)}}return()=>{var e;o--,null===(e=f.get(n))||void 0===e||e.forEach((e=>e())),f.delete(n)}},invalidate:function(){l=void 0},shouldDeleteConfigOnRelease:function(){return void 0!==ea(t)&&o<=0},dangerouslyAllowMutability:e.dangerouslyAllowMutability,persistence_UNSTABLE:e.persistence_UNSTABLE?{type:e.persistence_UNSTABLE.type,backButton:e.persistence_UNSTABLE.backButton}:void 0,shouldRestoreFromSnapshots:!0,retainedBy:r});return p}function fa(e){const{...t}=e,n="default"in e?e.default:new Promise((()=>{}));return ra(n)?function(e){const t=fa({...e,default:Qi,persistence_UNSTABLE:void 0===e.persistence_UNSTABLE?void 0:{...e.persistence_UNSTABLE,validator:t=>t instanceof Ji?t:s(e.persistence_UNSTABLE).validator(t,Qi)},effects:e.effects,effects_UNSTABLE:e.effects_UNSTABLE}),n=Wi({key:`${e.key}__withFallback`,get:({get:n})=>{const r=n(t);return r instanceof Ji?e.default:r},set:({set:e},n)=>e(t,n),cachePolicy_UNSTABLE:{eviction:"most-recent"},dangerouslyAllowMutability:e.dangerouslyAllowMutability});return na(n.key,ea(e.key)),n}({...t,default:n}):ca({...t,default:n})}fa.value=e=>new Ki(e);var da=fa;var ha=class{constructor(e){var t;u(this,"_map",void 0),u(this,"_keyMapper",void 0),this._map=new Map,this._keyMapper=null!==(t=null===e||void 0===e?void 0:e.mapKey)&&void 0!==t?t:e=>e}size(){return this._map.size}has(e){return this._map.has(this._keyMapper(e))}get(e){return this._map.get(this._keyMapper(e))}set(e,t){this._map.set(this._keyMapper(e),t)}delete(e){this._map.delete(this._keyMapper(e))}clear(){this._map.clear()}},pa=Object.freeze({__proto__:null,MapCache:ha});const{LRUCache:ga}=li,{MapCache:va}=pa,ya={equality:"reference",eviction:"none",maxSize:1/0};var ma=function({equality:e=ya.equality,eviction:t=ya.eviction,maxSize:n=ya.maxSize}=ya){const r=function(e){switch(e){case"reference":return e=>e;case"value":return e=>pi(e)}throw i(`Unrecognized equality policy ${e}`)}(e);return function(e,t,n){switch(e){case"keep-all":return new va({mapKey:n});case"lru":return new ga({mapKey:n,maxSize:s(t)});case"most-recent":return new ga({mapKey:n,maxSize:1})}throw i(`Unrecognized eviction policy ${e}`)}(t,n,r)};const{setConfigDeletionHandler:ba}=se;var _a=function(e){var t,n;const r=ma({equality:null!==(t=null===(n=e.cachePolicyForParams_UNSTABLE)||void 0===n?void 0:n.equality)&&void 0!==t?t:"value",eviction:"keep-all"});return t=>{var n,o;const i=r.get(t);if(null!=i)return i;const{cachePolicyForParams_UNSTABLE:a,...s}=e,u="default"in e?e.default:new Promise((()=>{})),l=da({...s,key:`${e.key}__${null!==(n=pi(t))&&void 0!==n?n:"void"}`,default:"function"===typeof u?u(t):u,retainedBy_UNSTABLE:"function"===typeof e.retainedBy_UNSTABLE?e.retainedBy_UNSTABLE(t):e.retainedBy_UNSTABLE,effects:"function"===typeof e.effects?e.effects(t):"function"===typeof e.effects_UNSTABLE?e.effects_UNSTABLE(t):null!==(o=e.effects)&&void 0!==o?o:e.effects_UNSTABLE});return r.set(t,l),ba(l.key,(()=>{r.delete(t)})),l}};const{setConfigDeletionHandler:wa}=se;let xa=0;var Sa=function(e){var t,n;const r=ma({equality:null!==(t=null===(n=e.cachePolicyForParams_UNSTABLE)||void 0===n?void 0:n.equality)&&void 0!==t?t:"value",eviction:"keep-all"});return t=>{var n;let o;try{o=r.get(t)}catch(f){throw i(`Problem with cache lookup for selector ${e.key}: ${f.message}`)}if(null!=o)return o;const a=`${e.key}__selectorFamily/${null!==(n=pi(t,{allowFunctions:!0}))&&void 0!==n?n:"void"}/${xa++}`,s=n=>e.get(t)(n),u=e.cachePolicy_UNSTABLE,l="function"===typeof e.retainedBy_UNSTABLE?e.retainedBy_UNSTABLE(t):e.retainedBy_UNSTABLE;let c;if(null!=e.set){const n=e.set;c=Wi({key:a,get:s,set:(e,r)=>n(t)(e,r),cachePolicy_UNSTABLE:u,dangerouslyAllowMutability:e.dangerouslyAllowMutability,retainedBy_UNSTABLE:l})}else c=Wi({key:a,get:s,cachePolicy_UNSTABLE:u,dangerouslyAllowMutability:e.dangerouslyAllowMutability,retainedBy_UNSTABLE:l});return r.set(t,c),wa(c.key,(()=>{r.delete(t)})),c}};const Aa=Sa({key:"__constant",get:e=>()=>e,cachePolicyForParams_UNSTABLE:{equality:"reference"}});var Ea=function(e){return Aa(e)};const Ca=Sa({key:"__error",get:e=>()=>{throw i(e)},cachePolicyForParams_UNSTABLE:{equality:"reference"}});var Ra=function(e){return Ca(e)};var Oa=function(e){return e};const{loadableWithError:Ta,loadableWithPromise:ka,loadableWithValue:Ma}=O;function Pa(e,t){const n=Array(t.length).fill(void 0),r=Array(t.length).fill(void 0);for(const[i,a]of t.entries())try{n[i]=e(a)}catch(o){r[i]=o}return[n,r]}function Ia(e){return null!=e&&!a(e)}function Na(e){return Array.isArray(e)?e:Object.getOwnPropertyNames(e).map((t=>e[t]))}function Da(e,t){return Array.isArray(e)?t:Object.getOwnPropertyNames(e).reduce(((e,n,r)=>({...e,[n]:t[r]})),{})}function La(e,t,n){return Da(e,n.map(((e,n)=>null==e?Ma(t[n]):a(e)?ka(e):Ta(e))))}var Fa={waitForNone:Sa({key:"__waitForNone",get:e=>({get:t})=>{const n=Na(e),[r,o]=Pa(t,n);return La(e,r,o)},dangerouslyAllowMutability:!0}),waitForAny:Sa({key:"__waitForAny",get:e=>({get:t})=>{const n=Na(e),[r,o]=Pa(t,n);return o.some((e=>!a(e)))?La(e,r,o):new Promise((t=>{for(const[n,i]of o.entries())a(i)&&i.then((i=>{r[n]=i,o[n]=void 0,t(La(e,r,o))})).catch((i=>{o[n]=i,t(La(e,r,o))}))}))},dangerouslyAllowMutability:!0}),waitForAll:Sa({key:"__waitForAll",get:e=>({get:t})=>{const n=Na(e),[r,o]=Pa(t,n);if(o.every((e=>null==e)))return Da(e,r);const i=o.find(Ia);if(null!=i)throw i;return Promise.all(o).then((t=>{return Da(e,(n=r,t.map(((e,t)=>void 0===e?n[t]:e))));var n}))},dangerouslyAllowMutability:!0}),waitForAllSettled:Sa({key:"__waitForAllSettled",get:e=>({get:t})=>{const n=Na(e),[r,o]=Pa(t,n);return o.every((e=>!a(e)))?La(e,r,o):Promise.all(o.map(((e,t)=>a(e)?e.then((e=>{r[t]=e,o[t]=void 0})).catch((e=>{r[t]=void 0,o[t]=e})):null))).then((()=>La(e,r,o)))},dangerouslyAllowMutability:!0}),noWait:Sa({key:"__noWait",get:e=>({get:t})=>{try{return Wi.value(Ma(t(e)))}catch(n){return Wi.value(a(n)?ka(n):Ta(n))}},dangerouslyAllowMutability:!0})};const{RecoilLoadable:ja}=O,{DefaultValue:Ua}=se,{RecoilRoot:za,useRecoilStoreID:Ba}=Jn,{isRecoilValue:Va}=K,{retentionZone:$a}=Me,{freshSnapshot:Ha}=vn,{useRecoilState:Wa,useRecoilState_TRANSITION_SUPPORT_UNSTABLE:qa,useRecoilStateLoadable:Ga,useRecoilValue:Ya,useRecoilValue_TRANSITION_SUPPORT_UNSTABLE:Xa,useRecoilValueLoadable:Ka,useRecoilValueLoadable_TRANSITION_SUPPORT_UNSTABLE:Za,useResetRecoilState:Qa,useSetRecoilState:Ja}=Hr,{useGotoRecoilSnapshot:es,useRecoilSnapshot:ts,useRecoilTransactionObserver:ns}=fo,{useRecoilCallback:rs}=Go,{noWait:os,waitForAll:is,waitForAllSettled:as,waitForAny:ss,waitForNone:us}=Fa;var ls={DefaultValue:Ua,isRecoilValue:Va,RecoilLoadable:ja,RecoilEnv:k,RecoilRoot:za,useRecoilStoreID:Ba,useRecoilBridgeAcrossReactRoots_UNSTABLE:_o,atom:da,selector:Wi,atomFamily:_a,selectorFamily:Sa,constSelector:Ea,errorSelector:Ra,readOnlySelector:Oa,noWait:os,waitForNone:us,waitForAny:ss,waitForAll:is,waitForAllSettled:as,useRecoilValue:Ya,useRecoilValueLoadable:Ka,useRecoilState:Wa,useRecoilStateLoadable:Ga,useSetRecoilState:Ja,useResetRecoilState:Qa,useGetRecoilValueInfo_UNSTABLE:go,useRecoilRefresher_UNSTABLE:Zo,useRecoilValueLoadable_TRANSITION_SUPPORT_UNSTABLE:Za,useRecoilValue_TRANSITION_SUPPORT_UNSTABLE:Xa,useRecoilState_TRANSITION_SUPPORT_UNSTABLE:qa,useRecoilCallback:rs,useRecoilTransaction_UNSTABLE:ti,useGotoRecoilSnapshot:es,useRecoilSnapshot:ts,useRecoilTransactionObserver_UNSTABLE:ns,snapshot_UNSTABLE:Ha,useRetain:fr,retentionZone:$a},cs=ls.RecoilRoot,fs=ls.atom,ds=ls.selector,hs=ls.atomFamily,ps=ls.selectorFamily,gs=ls.useRecoilValue,vs=ls.useRecoilValueLoadable,ys=ls.useRecoilState,ms=ls.useSetRecoilState,bs=ls.useResetRecoilState,_s=ls.useRecoilRefresher_UNSTABLE,ws=ls.useRecoilCallback},77675:(e,t,n)=>{"use strict";var r=n(43206),o=n(69675),i=Object;e.exports=r((function(){if(null==this||this!==i(this))throw new o("RegExp.prototype.flags getter called on non-object");var e="";return this.hasIndices&&(e+="d"),this.global&&(e+="g"),this.ignoreCase&&(e+="i"),this.multiline&&(e+="m"),this.dotAll&&(e+="s"),this.unicode&&(e+="u"),this.unicodeSets&&(e+="v"),this.sticky&&(e+="y"),e}),"get flags",!0)},71589:(e,t,n)=>{"use strict";var r=n(38452),o=n(10487),i=n(77675),a=n(15330),s=n(43984),u=o(a());r(u,{getPolyfill:a,implementation:i,shim:s}),e.exports=u},15330:(e,t,n)=>{"use strict";var r=n(77675),o=n(38452).supportsDescriptors,i=Object.getOwnPropertyDescriptor;e.exports=function(){if(o&&"gim"===/a/gim.flags){var e=i(RegExp.prototype,"flags");if(e&&"function"===typeof e.get&&"boolean"===typeof RegExp.prototype.dotAll&&"boolean"===typeof RegExp.prototype.hasIndices){var t="",n={};if(Object.defineProperty(n,"hasIndices",{get:function(){t+="d"}}),Object.defineProperty(n,"sticky",{get:function(){t+="y"}}),"dy"===t)return e.get}}return r}},43984:(e,t,n)=>{"use strict";var r=n(38452).supportsDescriptors,o=n(15330),i=Object.getOwnPropertyDescriptor,a=Object.defineProperty,s=TypeError,u=Object.getPrototypeOf,l=/a/;e.exports=function(){if(!r||!u)throw new s("RegExp.prototype.flags requires a true ES5 environment that supports property descriptors");var e=o(),t=u(l),n=i(t,"flags");return n&&n.get===e||a(t,"flags",{configurable:!0,enumerable:!1,get:e}),e}},7463:(e,t)=>{"use strict";function n(e,t){var n=e.length;e.push(t);e:for(;0>>1,o=e[r];if(!(0>>1;ri(u,n))li(c,u)?(e[r]=c,e[l]=n,r=l):(e[r]=u,e[s]=n,r=s);else{if(!(li(c,n)))break e;e[r]=c,e[l]=n,r=l}}}return t}function i(e,t){var n=e.sortIndex-t.sortIndex;return 0!==n?n:e.id-t.id}if("object"===typeof performance&&"function"===typeof performance.now){var a=performance;t.unstable_now=function(){return a.now()}}else{var s=Date,u=s.now();t.unstable_now=function(){return s.now()-u}}var l=[],c=[],f=1,d=null,h=3,p=!1,g=!1,v=!1,y="function"===typeof setTimeout?setTimeout:null,m="function"===typeof clearTimeout?clearTimeout:null,b="undefined"!==typeof setImmediate?setImmediate:null;function _(e){for(var t=r(c);null!==t;){if(null===t.callback)o(c);else{if(!(t.startTime<=e))break;o(c),t.sortIndex=t.expirationTime,n(l,t)}t=r(c)}}function w(e){if(v=!1,_(e),!g)if(null!==r(l))g=!0,I(x);else{var t=r(c);null!==t&&N(w,t.startTime-e)}}function x(e,n){g=!1,v&&(v=!1,m(C),C=-1),p=!0;var i=h;try{for(_(n),d=r(l);null!==d&&(!(d.expirationTime>n)||e&&!T());){var a=d.callback;if("function"===typeof a){d.callback=null,h=d.priorityLevel;var s=a(d.expirationTime<=n);n=t.unstable_now(),"function"===typeof s?d.callback=s:d===r(l)&&o(l),_(n)}else o(l);d=r(l)}if(null!==d)var u=!0;else{var f=r(c);null!==f&&N(w,f.startTime-n),u=!1}return u}finally{d=null,h=i,p=!1}}"undefined"!==typeof navigator&&void 0!==navigator.scheduling&&void 0!==navigator.scheduling.isInputPending&&navigator.scheduling.isInputPending.bind(navigator.scheduling);var S,A=!1,E=null,C=-1,R=5,O=-1;function T(){return!(t.unstable_now()-Oe||125a?(e.sortIndex=i,n(c,e),null===r(l)&&e===r(c)&&(v?(m(C),C=-1):v=!0,N(w,i-a))):(e.sortIndex=s,n(l,e),g||p||(g=!0,I(x))),e},t.unstable_shouldYield=T,t.unstable_wrapCallback=function(e){var t=h;return function(){var n=h;h=t;try{return e.apply(this,arguments)}finally{h=n}}}},69982:(e,t,n)=>{"use strict";e.exports=n(7463)},96897:(e,t,n)=>{"use strict";var r=n(70453),o=n(30041),i=n(30592)(),a=n(75795),s=n(69675),u=r("%Math.floor%");e.exports=function(e,t){if("function"!==typeof e)throw new s("`fn` is not a function");if("number"!==typeof t||t<0||t>4294967295||u(t)!==t)throw new s("`length` must be a positive 32-bit integer");var n=arguments.length>2&&!!arguments[2],r=!0,l=!0;if("length"in e&&a){var c=a(e,"length");c&&!c.configurable&&(r=!1),c&&!c.writable&&(l=!1)}return(r||l||!n)&&(i?o(e,"length",t,!0,!0):o(e,"length",t)),e}},43206:(e,t,n)=>{"use strict";var r=n(30041),o=n(30592)(),i=n(74462).functionsHaveConfigurableNames(),a=n(69675);e.exports=function(e,t){if("function"!==typeof e)throw new a("`fn` is not a function");return arguments.length>2&&!!arguments[2]&&!i||(o?r(e,"name",t,!0,!0):r(e,"name",t)),e}},2833:e=>{e.exports=function(e,t,n,r){var o=n?n.call(r,e,t):void 0;if(void 0!==o)return!!o;if(e===t)return!0;if("object"!==typeof e||!e||"object"!==typeof t||!t)return!1;var i=Object.keys(e),a=Object.keys(t);if(i.length!==a.length)return!1;for(var s=Object.prototype.hasOwnProperty.bind(t),u=0;u{"use strict";var r=n(70453),o=n(38075),i=n(58859),a=n(69675),s=r("%WeakMap%",!0),u=r("%Map%",!0),l=o("WeakMap.prototype.get",!0),c=o("WeakMap.prototype.set",!0),f=o("WeakMap.prototype.has",!0),d=o("Map.prototype.get",!0),h=o("Map.prototype.set",!0),p=o("Map.prototype.has",!0),g=function(e,t){for(var n,r=e;null!==(n=r.next);r=n)if(n.key===t)return r.next=n.next,n.next=e.next,e.next=n,n};e.exports=function(){var e,t,n,r={assert:function(e){if(!r.has(e))throw new a("Side channel does not contain "+i(e))},get:function(r){if(s&&r&&("object"===typeof r||"function"===typeof r)){if(e)return l(e,r)}else if(u){if(t)return d(t,r)}else if(n)return function(e,t){var n=g(e,t);return n&&n.value}(n,r)},has:function(r){if(s&&r&&("object"===typeof r||"function"===typeof r)){if(e)return f(e,r)}else if(u){if(t)return p(t,r)}else if(n)return function(e,t){return!!g(e,t)}(n,r);return!1},set:function(r,o){s&&r&&("object"===typeof r||"function"===typeof r)?(e||(e=new s),c(e,r,o)):u?(t||(t=new u),h(t,r,o)):(n||(n={key:{},next:null}),function(e,t,n){var r=g(e,t);r?r.value=n:e.next={key:t,next:e.next,value:n}}(n,r,o))}};return r}},528:e=>{"use strict";e.exports=(e,t)=>{if("string"!==typeof e||"string"!==typeof t)throw new TypeError("Expected the arguments to be of type `string`");if(""===t)return[e];const n=e.indexOf(t);return-1===n?[e]:[e.slice(0,n),e.slice(n+t.length)]}},24280:e=>{"use strict";e.exports=e=>encodeURIComponent(e).replace(/[!'()*]/g,(e=>`%${e.charCodeAt(0).toString(16).toUpperCase()}`))},8711:(e,t,n)=>{"use strict";n.r(t),n.d(t,{ServerStyleSheet:()=>Be,StyleSheetConsumer:()=>ie,StyleSheetContext:()=>oe,StyleSheetManager:()=>fe,ThemeConsumer:()=>Ie,ThemeContext:()=>Pe,ThemeProvider:()=>Ne,__PRIVATE__:()=>He,createGlobalStyle:()=>Ue,css:()=>we,default:()=>We,isStyledComponent:()=>w,keyframes:()=>ze,useTheme:()=>$e,version:()=>S,withTheme:()=>Ve});var r=n(44363),o=n(96540),i=n(2833),a=n.n(i);const s=function(e){function t(e,r,u,l,d){for(var h,p,g,v,_,x=0,S=0,A=0,E=0,C=0,P=0,N=g=h=0,L=0,F=0,j=0,U=0,z=u.length,B=z-1,V="",$="",H="",W="";Lh)&&(U=(V=V.replace(" ",":")).length),0r&&(r=(t=t.trim()).charCodeAt(0)),r){case 38:return t.replace(v,"$1"+e.trim());case 58:return e.trim()+t.replace(v,"$1"+e.trim());default:if(0<1*n&&0u.charCodeAt(8))break;case 115:a=a.replace(u,"-webkit-"+u)+";"+a;break;case 207:case 102:a=a.replace(u,"-webkit-"+(102r.charCodeAt(0)&&(r=r.trim()),r=[r],01?t-1:0),r=1;r0?" Args: "+n.join(", "):""))}var O=function(){function e(e){this.groupSizes=new Uint32Array(512),this.length=512,this.tag=e}var t=e.prototype;return t.indexOfGroup=function(e){for(var t=0,n=0;n=this.groupSizes.length){for(var n=this.groupSizes,r=n.length,o=r;e>=o;)(o<<=1)<0&&R(16,""+e);this.groupSizes=new Uint32Array(o),this.groupSizes.set(n),this.length=o;for(var i=r;i=this.length||0===this.groupSizes[e])return t;for(var n=this.groupSizes[e],r=this.indexOfGroup(e),o=r+n,i=r;i=M&&(M=t+1),T.set(e,t),k.set(t,e)},D="style["+x+'][data-styled-version="5.3.11"]',L=new RegExp("^"+x+'\\.g(\\d+)\\[id="([\\w\\d-]+)"\\].*?"([^"]*)'),F=function(e,t,n){for(var r,o=n.split(","),i=0,a=o.length;i=0;n--){var r=t[n];if(r&&1===r.nodeType&&r.hasAttribute(x))return r}}(n),i=void 0!==o?o.nextSibling:null;r.setAttribute(x,"active"),r.setAttribute("data-styled-version","5.3.11");var a=U();return a&&r.setAttribute("nonce",a),n.insertBefore(r,i),r},B=function(){function e(e){var t=this.element=z(e);t.appendChild(document.createTextNode("")),this.sheet=function(e){if(e.sheet)return e.sheet;for(var t=document.styleSheets,n=0,r=t.length;n=0){var n=document.createTextNode(t),r=this.nodes[e];return this.element.insertBefore(n,r||null),this.length++,!0}return!1},t.deleteRule=function(e){this.element.removeChild(this.nodes[e]),this.length--},t.getRule=function(e){return e0&&(l+=e+",")})),r+=""+s+u+'{content:"'+l+'"}/*!sc*/\n'}}}return r}(this)},e}(),G=/(a)(d)/gi,Y=function(e){return String.fromCharCode(e+(e>25?39:97))};function X(e){var t,n="";for(t=Math.abs(e);t>52;t=t/52|0)n=Y(t%52)+n;return(Y(t%52)+n).replace(G,"$1-$2")}var K=function(e,t){for(var n=t.length;n;)e=33*e^t.charCodeAt(--n);return e},Z=function(e){return K(5381,e)};function Q(e){for(var t=0;t>>0);if(!t.hasNameForId(r,a)){var s=n(i,"."+a,void 0,r);t.insertRules(r,a,s)}o.push(a),this.staticRulesId=a}else{for(var u=this.rules.length,l=K(this.baseHash,n.hash),c="",f=0;f>>0);if(!t.hasNameForId(r,g)){var v=n(c,"."+g,void 0,r);t.insertRules(r,g,v)}o.push(g)}}return o.join(" ")},e}(),te=/^\s*\/\/.*$/gm,ne=[":","[",".","#"];function re(e){var t,n,r,o,i=void 0===e?m:e,a=i.options,u=void 0===a?m:a,l=i.plugins,c=void 0===l?y:l,f=new s(u),d=[],h=function(e){function t(t){if(t)try{e(t+"}")}catch(e){}}return function(n,r,o,i,a,s,u,l,c,f){switch(n){case 1:if(0===c&&64===r.charCodeAt(0))return e(r+";"),"";break;case 2:if(0===l)return r+"/*|*/";break;case 3:switch(l){case 102:case 112:return e(o[0]+r),"";default:return r+(0===f?"/*|*/":"")}case-2:r.split("/*|*/}").forEach(t)}}}((function(e){d.push(e)})),p=function(e,r,i){return 0===r&&-1!==ne.indexOf(i[n.length])||i.match(o)?e:"."+t};function g(e,i,a,s){void 0===s&&(s="&");var u=e.replace(te,""),l=i&&a?a+" "+i+" { "+u+" }":u;return t=s,n=i,r=new RegExp("\\"+n+"\\b","g"),o=new RegExp("(\\"+n+"\\b){2,}"),f(a||!i?"":i,l)}return f.use([].concat(c,[function(e,t,o){2===e&&o.length&&o[0].lastIndexOf(n)>0&&(o[0]=o[0].replace(r,p))},h,function(e){if(-2===e){var t=d;return d=[],t}}])),g.hash=c.length?c.reduce((function(e,t){return t.name||R(15),K(e,t.name)}),5381).toString():"",g}var oe=o.createContext(),ie=oe.Consumer,ae=o.createContext(),se=(ae.Consumer,new q),ue=re();function le(){return(0,o.useContext)(oe)||se}function ce(){return(0,o.useContext)(ae)||ue}function fe(e){var t=(0,o.useState)(e.stylisPlugins),n=t[0],r=t[1],i=le(),s=(0,o.useMemo)((function(){var t=i;return e.sheet?t=e.sheet:e.target&&(t=t.reconstructWithOptions({target:e.target},!1)),e.disableCSSOMInjection&&(t=t.reconstructWithOptions({useCSSOMInjection:!1})),t}),[e.disableCSSOMInjection,e.sheet,e.target]),u=(0,o.useMemo)((function(){return re({options:{prefix:!e.disableVendorPrefixes},plugins:n})}),[e.disableVendorPrefixes,n]);return(0,o.useEffect)((function(){a()(n,e.stylisPlugins)||r(e.stylisPlugins)}),[e.stylisPlugins]),o.createElement(oe.Provider,{value:s},o.createElement(ae.Provider,{value:u},e.children))}var de=function(){function e(e,t){var n=this;this.inject=function(e,t){void 0===t&&(t=ue);var r=n.name+t.hash;e.hasNameForId(n.id,r)||e.insertRules(n.id,r,t(n.rules,r,"@keyframes"))},this.toString=function(){return R(12,String(n.name))},this.name=e,this.id="sc-keyframes-"+e,this.rules=t}return e.prototype.getName=function(e){return void 0===e&&(e=ue),this.name+e.hash},e}(),he=/([A-Z])/,pe=/([A-Z])/g,ge=/^ms-/,ve=function(e){return"-"+e.toLowerCase()};function ye(e){return he.test(e)?e.replace(pe,ve).replace(ge,"-ms-"):e}var me=function(e){return null==e||!1===e||""===e};function be(e,t,n,r){if(Array.isArray(e)){for(var o,i=[],a=0,s=e.length;a1?t-1:0),r=1;r?@[\\\]^`{|}~-]+/g,Ae=/(^-|-$)/g;function Ee(e){return e.replace(Se,"-").replace(Ae,"")}var Ce=function(e){return X(Z(e)>>>0)};function Re(e){return"string"==typeof e&&!0}var Oe=function(e){return"function"==typeof e||"object"==typeof e&&null!==e&&!Array.isArray(e)},Te=function(e){return"__proto__"!==e&&"constructor"!==e&&"prototype"!==e};function ke(e,t,n){var r=e[n];Oe(t)&&Oe(r)?Me(r,t):e[n]=t}function Me(e){for(var t=arguments.length,n=new Array(t>1?t-1:0),r=1;r=0||(o[n]=e[n]);return o}(t,["componentId"]),i=r&&r+"-"+(Re(e)?e:Ee(_(e)));return Le(e,p({},o,{attrs:v,componentId:i}),n)},Object.defineProperty(S,"defaultProps",{get:function(){return this._foldedDefaultProps},set:function(t){this._foldedDefaultProps=r?Me({},e.defaultProps,t):t}}),Object.defineProperty(S,"toString",{value:function(){return"."+S.styledComponentId}}),i&&h()(S,e,{attrs:!0,componentStyle:!0,displayName:!0,foldedComponentIds:!0,shouldForwardProp:!0,styledComponentId:!0,target:!0,withComponent:!0}),S}var Fe=function(e){return function e(t,n,o){if(void 0===o&&(o=m),!(0,r.isValidElementType)(n))return R(1,String(n));var i=function(){return t(n,o,we.apply(void 0,arguments))};return i.withConfig=function(r){return e(t,n,p({},o,{},r))},i.attrs=function(r){return e(t,n,p({},o,{attrs:Array.prototype.concat(o.attrs,r).filter(Boolean)}))},i}(Le,e)};["a","abbr","address","area","article","aside","audio","b","base","bdi","bdo","big","blockquote","body","br","button","canvas","caption","cite","code","col","colgroup","data","datalist","dd","del","details","dfn","dialog","div","dl","dt","em","embed","fieldset","figcaption","figure","footer","form","h1","h2","h3","h4","h5","h6","head","header","hgroup","hr","html","i","iframe","img","input","ins","kbd","keygen","label","legend","li","link","main","map","mark","marquee","menu","menuitem","meta","meter","nav","noscript","object","ol","optgroup","option","output","p","param","picture","pre","progress","q","rp","rt","ruby","s","samp","script","section","select","small","source","span","strong","style","sub","summary","sup","table","tbody","td","textarea","tfoot","th","thead","time","title","tr","track","u","ul","var","video","wbr","circle","clipPath","defs","ellipse","foreignObject","g","image","line","linearGradient","marker","mask","path","pattern","polygon","polyline","radialGradient","rect","stop","svg","text","textPath","tspan"].forEach((function(e){Fe[e]=Fe(e)}));var je=function(){function e(e,t){this.rules=e,this.componentId=t,this.isStatic=Q(e),q.registerId(this.componentId+1)}var t=e.prototype;return t.createStyles=function(e,t,n,r){var o=r(be(this.rules,t,n,r).join(""),""),i=this.componentId+e;n.insertRules(i,i,o)},t.removeStyles=function(e,t){t.clearRules(this.componentId+e)},t.renderStyles=function(e,t,n,r){e>2&&q.registerId(this.componentId+e),this.removeStyles(e,n),this.createStyles(e,t,n,r)},e}();function Ue(e){for(var t=arguments.length,n=new Array(t>1?t-1:0),r=1;r1?t-1:0),r=1;r"+t+""},this.getStyleTags=function(){return e.sealed?R(2):e._emitSheetCSS()},this.getStyleElement=function(){var t;if(e.sealed)return R(2);var n=((t={})[x]="",t["data-styled-version"]="5.3.11",t.dangerouslySetInnerHTML={__html:e.instance.toString()},t),r=U();return r&&(n.nonce=r),[o.createElement("style",p({},n,{key:"sc-0-0"}))]},this.seal=function(){e.sealed=!0},this.instance=new q({isServer:!0}),this.sealed=!1}var t=e.prototype;return t.collectStyles=function(e){return this.sealed?R(2):o.createElement(fe,{sheet:this.instance},e)},t.interleaveWithNodeStream=function(e){return R(3)},e}(),Ve=function(e){var t=o.forwardRef((function(t,n){var r=(0,o.useContext)(Pe),i=e.defaultProps,a=xe(t,r,i);return o.createElement(e,p({},t,{theme:a,ref:n}))}));return h()(t,e),t.displayName="WithTheme("+_(e)+")",t},$e=function(){return(0,o.useContext)(Pe)},He={StyleSheet:q,masterSheet:se};const We=Fe},50677:(e,t,n)=>{"use strict";n.r(t),n.d(t,{alignContent:()=>me,alignItems:()=>ye,alignSelf:()=>Oe,background:()=>P,backgroundImage:()=>Ke,backgroundPosition:()=>Qe,backgroundRepeat:()=>Je,backgroundSize:()=>Ze,border:()=>T,borderBottom:()=>Ge,borderColor:()=>He,borderLeft:()=>Ye,borderRadius:()=>Xe,borderRight:()=>qe,borderStyle:()=>$e,borderTop:()=>We,borderWidth:()=>Ve,borders:()=>k,bottom:()=>rt,boxShadow:()=>W,buttonStyle:()=>Y,color:()=>b,colorStyle:()=>K,compose:()=>g,createParser:()=>c,createStyleFunction:()=>h,display:()=>ie,flex:()=>Se,flexBasis:()=>Ce,flexDirection:()=>xe,flexGrow:()=>Ae,flexShrink:()=>Ee,flexWrap:()=>we,flexbox:()=>S,fontFamily:()=>fe,fontSize:()=>ce,fontStyle:()=>ge,fontWeight:()=>de,get:()=>l,grid:()=>C,gridArea:()=>Be,gridAutoColumns:()=>Le,gridAutoFlow:()=>De,gridAutoRows:()=>Fe,gridColumn:()=>Ie,gridColumnGap:()=>Me,gridGap:()=>ke,gridRow:()=>Ne,gridRowGap:()=>Pe,gridTemplateAreas:()=>ze,gridTemplateColumns:()=>je,gridTemplateRows:()=>Ue,height:()=>Q,justifyContent:()=>_e,justifyItems:()=>be,justifySelf:()=>Re,layout:()=>v,left:()=>ot,letterSpacing:()=>ve,lineHeight:()=>he,margin:()=>B,maxHeight:()=>ne,maxWidth:()=>te,minHeight:()=>ee,minWidth:()=>J,opacity:()=>le,order:()=>Te,overflow:()=>ae,overflowX:()=>se,overflowY:()=>ue,padding:()=>V,position:()=>D,right:()=>nt,shadow:()=>H,size:()=>re,space:()=>$,style:()=>it,system:()=>p,textAlign:()=>pe,textShadow:()=>W,textStyle:()=>X,top:()=>tt,typography:()=>w,variant:()=>G,verticalAlign:()=>oe,width:()=>Z,zIndex:()=>et});var r=n(45228),o=n.n(r),i=function(e,t){var n=o()({},e,t);for(var r in e){var i;e[r]&&"object"===typeof t[r]&&o()(n,((i={})[r]=o()(e[r],t[r]),i))}return n},a={breakpoints:[40,52,64].map((function(e){return e+"em"}))},s=function(e){return"@media screen and (min-width: "+e+")"},u=function(e,t){return l(t,e,e)},l=function(e,t,n,r,o){for(t=t&&t.split?t.split("."):[t],r=0;r1&&u.forEach((function(n){var o;r[n]=e(((o={})[n]=t[n],o))})),r},f=function(e,t,n,r,i){var a={};return r.slice(0,e.length).forEach((function(r,s){var u,l=e[s],c=t(r,n,i);l?o()(a,((u={})[l]=o()({},a[l],c),u)):o()(a,c)})),a},d=function(e,t,n,r,i){var a={};for(var u in r){var l=e[u],c=t(r[u],n,i);if(l){var f,d=s(l);o()(a,((f={})[d]=o()({},a[d],c),f))}else o()(a,c)}return a},h=function(e){var t=e.properties,n=e.property,r=e.scale,o=e.transform,i=void 0===o?u:o,a=e.defaultScale;t=t||[n];var s=function(e,n,r){var o={},a=i(e,n,r);if(null!==a)return t.forEach((function(e){o[e]=a})),o};return s.scale=r,s.defaults=a,s},p=function(e){void 0===e&&(e={});var t={};return Object.keys(e).forEach((function(n){var r=e[n];t[n]=!0!==r?"function"!==typeof r?h(r):r:h({property:n,scale:n})})),c(t)},g=function(){for(var e={},t=arguments.length,n=new Array(t),r=0;r1?e:100*e+"%")}},height:{property:"height",scale:"sizes"},minWidth:{property:"minWidth",scale:"sizes"},minHeight:{property:"minHeight",scale:"sizes"},maxWidth:{property:"maxWidth",scale:"sizes"},maxHeight:{property:"maxHeight",scale:"sizes"},size:{properties:["width","height"],scale:"sizes"},overflow:!0,overflowX:!0,overflowY:!0,display:!0,verticalAlign:!0});const y=v;var m={color:{property:"color",scale:"colors"},backgroundColor:{property:"backgroundColor",scale:"colors"},opacity:!0};m.bg=m.backgroundColor;var b=p(m);const _=b;var w=p({fontFamily:{property:"fontFamily",scale:"fonts"},fontSize:{property:"fontSize",scale:"fontSizes",defaultScale:[12,14,16,20,24,32,48,64,72]},fontWeight:{property:"fontWeight",scale:"fontWeights"},lineHeight:{property:"lineHeight",scale:"lineHeights"},letterSpacing:{property:"letterSpacing",scale:"letterSpacings"},textAlign:!0,fontStyle:!0});const x=w;var S=p({alignItems:!0,alignContent:!0,justifyItems:!0,justifyContent:!0,flexWrap:!0,flexDirection:!0,flex:!0,flexGrow:!0,flexShrink:!0,flexBasis:!0,justifySelf:!0,alignSelf:!0,order:!0});const A=S;var E={space:[0,4,8,16,32,64,128,256,512]},C=p({gridGap:{property:"gridGap",scale:"space",defaultScale:E.space},gridColumnGap:{property:"gridColumnGap",scale:"space",defaultScale:E.space},gridRowGap:{property:"gridRowGap",scale:"space",defaultScale:E.space},gridColumn:!0,gridRow:!0,gridAutoFlow:!0,gridAutoColumns:!0,gridAutoRows:!0,gridTemplateColumns:!0,gridTemplateRows:!0,gridTemplateAreas:!0,gridArea:!0});const R=C;var O={border:{property:"border",scale:"borders"},borderWidth:{property:"borderWidth",scale:"borderWidths"},borderStyle:{property:"borderStyle",scale:"borderStyles"},borderColor:{property:"borderColor",scale:"colors"},borderRadius:{property:"borderRadius",scale:"radii"},borderTop:{property:"borderTop",scale:"borders"},borderTopLeftRadius:{property:"borderTopLeftRadius",scale:"radii"},borderTopRightRadius:{property:"borderTopRightRadius",scale:"radii"},borderRight:{property:"borderRight",scale:"borders"},borderBottom:{property:"borderBottom",scale:"borders"},borderBottomLeftRadius:{property:"borderBottomLeftRadius",scale:"radii"},borderBottomRightRadius:{property:"borderBottomRightRadius",scale:"radii"},borderLeft:{property:"borderLeft",scale:"borders"},borderX:{properties:["borderLeft","borderRight"],scale:"borders"},borderY:{properties:["borderTop","borderBottom"],scale:"borders"},borderTopWidth:{property:"borderTopWidth",scale:"borderWidths"},borderTopColor:{property:"borderTopColor",scale:"colors"},borderTopStyle:{property:"borderTopStyle",scale:"borderStyles"}};O.borderTopLeftRadius={property:"borderTopLeftRadius",scale:"radii"},O.borderTopRightRadius={property:"borderTopRightRadius",scale:"radii"},O.borderBottomWidth={property:"borderBottomWidth",scale:"borderWidths"},O.borderBottomColor={property:"borderBottomColor",scale:"colors"},O.borderBottomStyle={property:"borderBottomStyle",scale:"borderStyles"},O.borderBottomLeftRadius={property:"borderBottomLeftRadius",scale:"radii"},O.borderBottomRightRadius={property:"borderBottomRightRadius",scale:"radii"},O.borderLeftWidth={property:"borderLeftWidth",scale:"borderWidths"},O.borderLeftColor={property:"borderLeftColor",scale:"colors"},O.borderLeftStyle={property:"borderLeftStyle",scale:"borderStyles"},O.borderRightWidth={property:"borderRightWidth",scale:"borderWidths"},O.borderRightColor={property:"borderRightColor",scale:"colors"},O.borderRightStyle={property:"borderRightStyle",scale:"borderStyles"};var T=p(O);const k=T;var M={background:!0,backgroundImage:!0,backgroundSize:!0,backgroundPosition:!0,backgroundRepeat:!0};M.bgImage=M.backgroundImage,M.bgSize=M.backgroundSize,M.bgPosition=M.backgroundPosition,M.bgRepeat=M.backgroundRepeat;var P=p(M);const I=P;var N={space:[0,4,8,16,32,64,128,256,512]},D=p({position:!0,zIndex:{property:"zIndex",scale:"zIndices"},top:{property:"top",scale:"space",defaultScale:N.space},right:{property:"right",scale:"space",defaultScale:N.space},bottom:{property:"bottom",scale:"space",defaultScale:N.space},left:{property:"left",scale:"space",defaultScale:N.space}});const L=D;var F={space:[0,4,8,16,32,64,128,256,512]},j=function(e){return"number"===typeof e&&!isNaN(e)},U=function(e,t){if(!j(e))return l(t,e,e);var n=e<0,r=Math.abs(e),o=l(t,r,r);return j(o)?o*(n?-1:1):n?"-"+o:o},z={};z.margin={margin:{property:"margin",scale:"space",transform:U,defaultScale:F.space},marginTop:{property:"marginTop",scale:"space",transform:U,defaultScale:F.space},marginRight:{property:"marginRight",scale:"space",transform:U,defaultScale:F.space},marginBottom:{property:"marginBottom",scale:"space",transform:U,defaultScale:F.space},marginLeft:{property:"marginLeft",scale:"space",transform:U,defaultScale:F.space},marginX:{properties:["marginLeft","marginRight"],scale:"space",transform:U,defaultScale:F.space},marginY:{properties:["marginTop","marginBottom"],scale:"space",transform:U,defaultScale:F.space}},z.margin.m=z.margin.margin,z.margin.mt=z.margin.marginTop,z.margin.mr=z.margin.marginRight,z.margin.mb=z.margin.marginBottom,z.margin.ml=z.margin.marginLeft,z.margin.mx=z.margin.marginX,z.margin.my=z.margin.marginY,z.padding={padding:{property:"padding",scale:"space",defaultScale:F.space},paddingTop:{property:"paddingTop",scale:"space",defaultScale:F.space},paddingRight:{property:"paddingRight",scale:"space",defaultScale:F.space},paddingBottom:{property:"paddingBottom",scale:"space",defaultScale:F.space},paddingLeft:{property:"paddingLeft",scale:"space",defaultScale:F.space},paddingX:{properties:["paddingLeft","paddingRight"],scale:"space",defaultScale:F.space},paddingY:{properties:["paddingTop","paddingBottom"],scale:"space",defaultScale:F.space}},z.padding.p=z.padding.padding,z.padding.pt=z.padding.paddingTop,z.padding.pr=z.padding.paddingRight,z.padding.pb=z.padding.paddingBottom,z.padding.pl=z.padding.paddingLeft,z.padding.px=z.padding.paddingX,z.padding.py=z.padding.paddingY;var B=p(z.margin),V=p(z.padding),$=g(B,V);var H=p({boxShadow:{property:"boxShadow",scale:"shadows"},textShadow:{property:"textShadow",scale:"shadows"}});const W=H;var q=n(50402),G=function(e){var t,n,r=e.scale,o=e.prop,i=void 0===o?"variant":o,a=e.variants,s=void 0===a?{}:a,u=e.key;n=Object.keys(s).length?function(e,t,n){return(0,q.default)(l(t,e,null))(n.theme)}:function(e,t){return l(t,e,null)},n.scale=r||u,n.defaults=s;var f=((t={})[i]=n,t);return c(f)};var Y=G({key:"buttons"}),X=G({key:"textStyles",prop:"textStyle"}),K=G({key:"colorStyles",prop:"colors"}),Z=y.width,Q=y.height,J=y.minWidth,ee=y.minHeight,te=y.maxWidth,ne=y.maxHeight,re=y.size,oe=y.verticalAlign,ie=y.display,ae=y.overflow,se=y.overflowX,ue=y.overflowY,le=_.opacity,ce=x.fontSize,fe=x.fontFamily,de=x.fontWeight,he=x.lineHeight,pe=x.textAlign,ge=x.fontStyle,ve=x.letterSpacing,ye=A.alignItems,me=A.alignContent,be=A.justifyItems,_e=A.justifyContent,we=A.flexWrap,xe=A.flexDirection,Se=A.flex,Ae=A.flexGrow,Ee=A.flexShrink,Ce=A.flexBasis,Re=A.justifySelf,Oe=A.alignSelf,Te=A.order,ke=R.gridGap,Me=R.gridColumnGap,Pe=R.gridRowGap,Ie=R.gridColumn,Ne=R.gridRow,De=R.gridAutoFlow,Le=R.gridAutoColumns,Fe=R.gridAutoRows,je=R.gridTemplateColumns,Ue=R.gridTemplateRows,ze=R.gridTemplateAreas,Be=R.gridArea,Ve=k.borderWidth,$e=k.borderStyle,He=k.borderColor,We=k.borderTop,qe=k.borderRight,Ge=k.borderBottom,Ye=k.borderLeft,Xe=k.borderRadius,Ke=I.backgroundImage,Ze=I.backgroundSize,Qe=I.backgroundPosition,Je=I.backgroundRepeat,et=L.zIndex,tt=L.top,nt=L.right,rt=L.bottom,ot=L.left,it=function(e){var t=e.prop,n=e.cssProperty,r=e.alias,o=e.key,i=e.transformValue,a=e.scale,s=e.properties,u={};return u[t]=h({properties:s,property:n||t,scale:o,defaultScale:a,transform:i}),r&&(u[r]=u[t]),c(u)}},12897:function(e,t,n){e.exports=function(){"use strict";var e=function(e){var t=e.id,n=e.viewBox,r=e.content;this.id=t,this.viewBox=n,this.content=r};e.prototype.stringify=function(){return this.content},e.prototype.toString=function(){return this.stringify()},e.prototype.destroy=function(){var e=this;["id","viewBox","content"].forEach((function(t){return delete e[t]}))};var t=function(e){var t=!!document.importNode,n=(new DOMParser).parseFromString(e,"image/svg+xml").documentElement;return t?document.importNode(n,!0):n};function r(e,t){return e(t={exports:{}},t.exports),t.exports}"undefined"!==typeof window?window:"undefined"!==typeof n.g?n.g:"undefined"!==typeof self&&self;var o=r((function(e,t){!function(t,n){e.exports=n()}(0,(function(){function e(e){return e&&"object"===typeof e&&"[object RegExp]"!==Object.prototype.toString.call(e)&&"[object Date]"!==Object.prototype.toString.call(e)}function t(e){return Array.isArray(e)?[]:{}}function n(n,r){return r&&!0===r.clone&&e(n)?i(t(n),n,r):n}function r(t,r,o){var a=t.slice();return r.forEach((function(r,s){"undefined"===typeof a[s]?a[s]=n(r,o):e(r)?a[s]=i(t[s],r,o):-1===t.indexOf(r)&&a.push(n(r,o))})),a}function o(t,r,o){var a={};return e(t)&&Object.keys(t).forEach((function(e){a[e]=n(t[e],o)})),Object.keys(r).forEach((function(s){e(r[s])&&t[s]?a[s]=i(t[s],r[s],o):a[s]=n(r[s],o)})),a}function i(e,t,i){var a=Array.isArray(t),s=(i||{arrayMerge:r}).arrayMerge||r;return a?Array.isArray(e)?s(e,t,i):n(t,i):o(e,t,i)}return i.all=function(e,t){if(!Array.isArray(e)||e.length<2)throw new Error("first argument should be an array with at least two elements");return e.reduce((function(e,n){return i(e,n,t)}))},i}))})),i=r((function(e,t){var n={svg:{name:"xmlns",uri:"http://www.w3.org/2000/svg"},xlink:{name:"xmlns:xlink",uri:"http://www.w3.org/1999/xlink"}};t.default=n,e.exports=t.default})),a=function(e){return Object.keys(e).map((function(t){return t+'="'+e[t].toString().replace(/"/g,""")+'"'})).join(" ")},s=i.svg,u=i.xlink,l={};l[s.name]=s.uri,l[u.name]=u.uri;var c=function(e,t){void 0===e&&(e="");var n=o(l,t||{});return""+e+""},f=function(e){function n(){e.apply(this,arguments)}e&&(n.__proto__=e),n.prototype=Object.create(e&&e.prototype),n.prototype.constructor=n;var r={isMounted:{}};return r.isMounted.get=function(){return!!this.node},n.createFromExistingNode=function(e){return new n({id:e.getAttribute("id"),viewBox:e.getAttribute("viewBox"),content:e.outerHTML})},n.prototype.destroy=function(){this.isMounted&&this.unmount(),e.prototype.destroy.call(this)},n.prototype.mount=function(e){if(this.isMounted)return this.node;var t="string"===typeof e?document.querySelector(e):e,n=this.render();return this.node=n,t.appendChild(n),n},n.prototype.render=function(){var e=this.stringify();return t(c(e)).childNodes[0]},n.prototype.unmount=function(){this.node.parentNode.removeChild(this.node)},Object.defineProperties(n.prototype,r),n}(e);return f}()},55042:function(e,t,n){e.exports=function(){"use strict";function e(e,t){return e(t={exports:{}},t.exports),t.exports}"undefined"!==typeof window?window:"undefined"!==typeof n.g?n.g:"undefined"!==typeof self&&self;var t=e((function(e,t){!function(t,n){e.exports=n()}(0,(function(){function e(e){return e&&"object"===typeof e&&"[object RegExp]"!==Object.prototype.toString.call(e)&&"[object Date]"!==Object.prototype.toString.call(e)}function t(e){return Array.isArray(e)?[]:{}}function n(n,r){return r&&!0===r.clone&&e(n)?i(t(n),n,r):n}function r(t,r,o){var a=t.slice();return r.forEach((function(r,s){"undefined"===typeof a[s]?a[s]=n(r,o):e(r)?a[s]=i(t[s],r,o):-1===t.indexOf(r)&&a.push(n(r,o))})),a}function o(t,r,o){var a={};return e(t)&&Object.keys(t).forEach((function(e){a[e]=n(t[e],o)})),Object.keys(r).forEach((function(s){e(r[s])&&t[s]?a[s]=i(t[s],r[s],o):a[s]=n(r[s],o)})),a}function i(e,t,i){var a=Array.isArray(t),s=(i||{arrayMerge:r}).arrayMerge||r;return a?Array.isArray(e)?s(e,t,i):n(t,i):o(e,t,i)}return i.all=function(e,t){if(!Array.isArray(e)||e.length<2)throw new Error("first argument should be an array with at least two elements");return e.reduce((function(e,n){return i(e,n,t)}))},i}))}));function r(e){return e=e||Object.create(null),{on:function(t,n){(e[t]||(e[t]=[])).push(n)},off:function(t,n){e[t]&&e[t].splice(e[t].indexOf(n)>>>0,1)},emit:function(t,n){(e[t]||[]).map((function(e){e(n)})),(e["*"]||[]).map((function(e){e(t,n)}))}}}var o=e((function(e,t){var n={svg:{name:"xmlns",uri:"http://www.w3.org/2000/svg"},xlink:{name:"xmlns:xlink",uri:"http://www.w3.org/1999/xlink"}};t.default=n,e.exports=t.default})),i=function(e){return Object.keys(e).map((function(t){return t+'="'+e[t].toString().replace(/"/g,""")+'"'})).join(" ")},a=o.svg,s=o.xlink,u={};u[a.name]=a.uri,u[s.name]=s.uri;var l,c=function(e,n){void 0===e&&(e="");var r=t(u,n||{});return""+e+""},f=o.svg,d=o.xlink,h={attrs:(l={style:["position: absolute","width: 0","height: 0"].join("; "),"aria-hidden":"true"},l[f.name]=f.uri,l[d.name]=d.uri,l)},p=function(e){this.config=t(h,e||{}),this.symbols=[]};p.prototype.add=function(e){var t=this.symbols,n=this.find(e.id);return n?(t[t.indexOf(n)]=e,!1):(t.push(e),!0)},p.prototype.remove=function(e){var t=this.symbols,n=this.find(e);return!!n&&(t.splice(t.indexOf(n),1),n.destroy(),!0)},p.prototype.find=function(e){return this.symbols.filter((function(t){return t.id===e}))[0]||null},p.prototype.has=function(e){return null!==this.find(e)},p.prototype.stringify=function(){var e=this.config.attrs,t=this.symbols.map((function(e){return e.stringify()})).join("");return c(t,e)},p.prototype.toString=function(){return this.stringify()},p.prototype.destroy=function(){this.symbols.forEach((function(e){return e.destroy()}))};var g=function(e){var t=e.id,n=e.viewBox,r=e.content;this.id=t,this.viewBox=n,this.content=r};g.prototype.stringify=function(){return this.content},g.prototype.toString=function(){return this.stringify()},g.prototype.destroy=function(){var e=this;["id","viewBox","content"].forEach((function(t){return delete e[t]}))};var v=function(e){var t=!!document.importNode,n=(new DOMParser).parseFromString(e,"image/svg+xml").documentElement;return t?document.importNode(n,!0):n},y=function(e){function t(){e.apply(this,arguments)}e&&(t.__proto__=e),t.prototype=Object.create(e&&e.prototype),t.prototype.constructor=t;var n={isMounted:{}};return n.isMounted.get=function(){return!!this.node},t.createFromExistingNode=function(e){return new t({id:e.getAttribute("id"),viewBox:e.getAttribute("viewBox"),content:e.outerHTML})},t.prototype.destroy=function(){this.isMounted&&this.unmount(),e.prototype.destroy.call(this)},t.prototype.mount=function(e){if(this.isMounted)return this.node;var t="string"===typeof e?document.querySelector(e):e,n=this.render();return this.node=n,t.appendChild(n),n},t.prototype.render=function(){var e=this.stringify();return v(c(e)).childNodes[0]},t.prototype.unmount=function(){this.node.parentNode.removeChild(this.node)},Object.defineProperties(t.prototype,n),t}(g),m={autoConfigure:!0,mountTo:"body",syncUrlsWithBaseTag:!1,listenLocationChangeEvent:!0,locationChangeEvent:"locationChange",locationChangeAngularEmitter:!1,usagesToUpdate:"use[*|href]",moveGradientsOutsideSymbol:!1},b=function(e){return Array.prototype.slice.call(e,0)},_={isChrome:function(){return/chrome/i.test(navigator.userAgent)},isFirefox:function(){return/firefox/i.test(navigator.userAgent)},isIE:function(){return/msie/i.test(navigator.userAgent)||/trident/i.test(navigator.userAgent)},isEdge:function(){return/edge/i.test(navigator.userAgent)}},w=function(e,t){var n=document.createEvent("CustomEvent");n.initCustomEvent(e,!1,!1,t),window.dispatchEvent(n)},x=function(e){var t=[];return b(e.querySelectorAll("style")).forEach((function(e){e.textContent+="",t.push(e)})),t},S=function(e){return(e||window.location.href).split("#")[0]},A=function(e){angular.module("ng").run(["$rootScope",function(t){t.$on("$locationChangeSuccess",(function(t,n,r){w(e,{oldUrl:r,newUrl:n})}))}])},E="linearGradient, radialGradient, pattern, mask, clipPath",C=function(e,t){return void 0===t&&(t=E),b(e.querySelectorAll("symbol")).forEach((function(e){b(e.querySelectorAll(t)).forEach((function(t){e.parentNode.insertBefore(t,e)}))})),e};function R(e,t){return b(e).reduce((function(e,n){if(!n.attributes)return e;var r=b(n.attributes),o=t?r.filter(t):r;return e.concat(o)}),[])}var O=o.xlink.uri,T="xlink:href",k=/[{}|\\\^\[\]`"<>]/g;function M(e){return e.replace(k,(function(e){return"%"+e[0].charCodeAt(0).toString(16).toUpperCase()}))}function P(e){return e.replace(/[.*+?^${}()|[\]\\]/g,"\\$&")}function I(e,t,n){return b(e).forEach((function(e){var r=e.getAttribute(T);if(r&&0===r.indexOf(t)){var o=r.replace(t,n);e.setAttributeNS(O,T,o)}})),e}var N,D=["clipPath","colorProfile","src","cursor","fill","filter","marker","markerStart","markerMid","markerEnd","mask","stroke","style"],L=D.map((function(e){return"["+e+"]"})).join(","),F=function(e,t,n,r){var o=M(n),i=M(r);R(e.querySelectorAll(L),(function(e){var t=e.localName,n=e.value;return-1!==D.indexOf(t)&&-1!==n.indexOf("url("+o)})).forEach((function(e){return e.value=e.value.replace(new RegExp(P(o),"g"),i)})),I(t,o,i)},j={MOUNT:"mount",SYMBOL_MOUNT:"symbol_mount"},U=function(e){function n(n){var o=this;void 0===n&&(n={}),e.call(this,t(m,n));var i=r();this._emitter=i,this.node=null;var a=this.config;if(a.autoConfigure&&this._autoConfigure(n),a.syncUrlsWithBaseTag){var s=document.getElementsByTagName("base")[0].getAttribute("href");i.on(j.MOUNT,(function(){return o.updateUrls("#",s)}))}var u=this._handleLocationChange.bind(this);this._handleLocationChange=u,a.listenLocationChangeEvent&&window.addEventListener(a.locationChangeEvent,u),a.locationChangeAngularEmitter&&A(a.locationChangeEvent),i.on(j.MOUNT,(function(e){a.moveGradientsOutsideSymbol&&C(e)})),i.on(j.SYMBOL_MOUNT,(function(e){a.moveGradientsOutsideSymbol&&C(e.parentNode),(_.isIE()||_.isEdge())&&x(e)}))}e&&(n.__proto__=e),n.prototype=Object.create(e&&e.prototype),n.prototype.constructor=n;var o={isMounted:{}};return o.isMounted.get=function(){return!!this.node},n.prototype._autoConfigure=function(e){var t=this.config;"undefined"===typeof e.syncUrlsWithBaseTag&&(t.syncUrlsWithBaseTag="undefined"!==typeof document.getElementsByTagName("base")[0]),"undefined"===typeof e.locationChangeAngularEmitter&&(t.locationChangeAngularEmitter="undefined"!==typeof window.angular),"undefined"===typeof e.moveGradientsOutsideSymbol&&(t.moveGradientsOutsideSymbol=_.isFirefox())},n.prototype._handleLocationChange=function(e){var t=e.detail,n=t.oldUrl,r=t.newUrl;this.updateUrls(n,r)},n.prototype.add=function(t){var n=this,r=e.prototype.add.call(this,t);return this.isMounted&&r&&(t.mount(n.node),this._emitter.emit(j.SYMBOL_MOUNT,t.node)),r},n.prototype.attach=function(e){var t=this,n=this;if(n.isMounted)return n.node;var r="string"===typeof e?document.querySelector(e):e;return n.node=r,this.symbols.forEach((function(e){e.mount(n.node),t._emitter.emit(j.SYMBOL_MOUNT,e.node)})),b(r.querySelectorAll("symbol")).forEach((function(e){var t=y.createFromExistingNode(e);t.node=e,n.add(t)})),this._emitter.emit(j.MOUNT,r),r},n.prototype.destroy=function(){var e=this,t=e.config,n=e.symbols,r=e._emitter;n.forEach((function(e){return e.destroy()})),r.off("*"),window.removeEventListener(t.locationChangeEvent,this._handleLocationChange),this.isMounted&&this.unmount()},n.prototype.mount=function(e,t){void 0===e&&(e=this.config.mountTo),void 0===t&&(t=!1);var n=this;if(n.isMounted)return n.node;var r="string"===typeof e?document.querySelector(e):e,o=n.render();return this.node=o,t&&r.childNodes[0]?r.insertBefore(o,r.childNodes[0]):r.appendChild(o),this._emitter.emit(j.MOUNT,o),o},n.prototype.render=function(){return v(this.stringify())},n.prototype.unmount=function(){this.node.parentNode.removeChild(this.node)},n.prototype.updateUrls=function(e,t){if(!this.isMounted)return!1;var n=document.querySelectorAll(this.config.usagesToUpdate);return F(this.node,n,S(e)+"#",S(t)+"#"),!0},Object.defineProperties(n.prototype,o),n}(p),z=e((function(e){var t;t=function(){var e,t=[],n=document,r=n.documentElement.doScroll,o="DOMContentLoaded",i=(r?/^loaded|^c/:/^loaded|^i|^c/).test(n.readyState);return i||n.addEventListener(o,e=function(){for(n.removeEventListener(o,e),i=1;e=t.shift();)e()}),function(e){i?setTimeout(e,0):t.push(e)}},e.exports=t()})),B="__SVG_SPRITE_NODE__",V="__SVG_SPRITE__";window[V]?N=window[V]:(N=new U({attrs:{id:B,"aria-hidden":"true"}}),window[V]=N);var $=function(){var e=document.getElementById(B);e?N.attach(e):N.mount(document.body,!0)};return document.body?$():z($),N}()},27003:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=n(96540).useLayoutEffect},22831:(e,t,n)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"NIL",{enumerable:!0,get:function(){return s.default}}),Object.defineProperty(t,"parse",{enumerable:!0,get:function(){return f.default}}),Object.defineProperty(t,"stringify",{enumerable:!0,get:function(){return c.default}}),Object.defineProperty(t,"v1",{enumerable:!0,get:function(){return r.default}}),Object.defineProperty(t,"v3",{enumerable:!0,get:function(){return o.default}}),Object.defineProperty(t,"v4",{enumerable:!0,get:function(){return i.default}}),Object.defineProperty(t,"v5",{enumerable:!0,get:function(){return a.default}}),Object.defineProperty(t,"validate",{enumerable:!0,get:function(){return l.default}}),Object.defineProperty(t,"version",{enumerable:!0,get:function(){return u.default}});var r=d(n(13518)),o=d(n(14948)),i=d(n(45073)),a=d(n(77186)),s=d(n(14808)),u=d(n(7775)),l=d(n(37037)),c=d(n(49910)),f=d(n(96792));function d(e){return e&&e.__esModule?e:{default:e}}},22311:(e,t)=>{"use strict";function n(e){return 14+(e+64>>>9<<4)+1}function r(e,t){const n=(65535&e)+(65535&t);return(e>>16)+(t>>16)+(n>>16)<<16|65535&n}function o(e,t,n,o,i,a){return r((s=r(r(t,e),r(o,a)))<<(u=i)|s>>>32-u,n);var s,u}function i(e,t,n,r,i,a,s){return o(t&n|~t&r,e,t,i,a,s)}function a(e,t,n,r,i,a,s){return o(t&r|n&~r,e,t,i,a,s)}function s(e,t,n,r,i,a,s){return o(t^n^r,e,t,i,a,s)}function u(e,t,n,r,i,a,s){return o(n^(t|~r),e,t,i,a,s)}Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var l=function(e){if("string"===typeof e){const t=unescape(encodeURIComponent(e));e=new Uint8Array(t.length);for(let n=0;n>5]>>>o%32&255,i=parseInt(r.charAt(n>>>4&15)+r.charAt(15&n),16);t.push(i)}return t}(function(e,t){e[t>>5]|=128<>5]|=(255&e[n/8])<{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={randomUUID:"undefined"!==typeof crypto&&crypto.randomUUID&&crypto.randomUUID.bind(crypto)};t.default=n},14808:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;t.default="00000000-0000-0000-0000-000000000000"},96792:(e,t,n)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r,o=(r=n(37037))&&r.__esModule?r:{default:r};var i=function(e){if(!(0,o.default)(e))throw TypeError("Invalid UUID");let t;const n=new Uint8Array(16);return n[0]=(t=parseInt(e.slice(0,8),16))>>>24,n[1]=t>>>16&255,n[2]=t>>>8&255,n[3]=255&t,n[4]=(t=parseInt(e.slice(9,13),16))>>>8,n[5]=255&t,n[6]=(t=parseInt(e.slice(14,18),16))>>>8,n[7]=255&t,n[8]=(t=parseInt(e.slice(19,23),16))>>>8,n[9]=255&t,n[10]=(t=parseInt(e.slice(24,36),16))/1099511627776&255,n[11]=t/4294967296&255,n[12]=t>>>24&255,n[13]=t>>>16&255,n[14]=t>>>8&255,n[15]=255&t,n};t.default=i},27656:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;t.default=/^(?:[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}|00000000-0000-0000-0000-000000000000)$/i},2858:(e,t)=>{"use strict";let n;Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(){if(!n&&(n="undefined"!==typeof crypto&&crypto.getRandomValues&&crypto.getRandomValues.bind(crypto),!n))throw new Error("crypto.getRandomValues() not supported. See https://github.com/uuidjs/uuid#getrandomvalues-not-supported");return n(r)};const r=new Uint8Array(16)},9042:(e,t)=>{"use strict";function n(e,t,n,r){switch(e){case 0:return t&n^~t&r;case 1:case 3:return t^n^r;case 2:return t&n^t&r^n&r}}function r(e,t){return e<>>32-t}Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=function(e){const t=[1518500249,1859775393,2400959708,3395469782],o=[1732584193,4023233417,2562383102,271733878,3285377520];if("string"===typeof e){const t=unescape(encodeURIComponent(e));e=[];for(let n=0;n>>0;f=c,c=l,l=r(a,30)>>>0,a=i,i=u}o[0]=o[0]+i>>>0,o[1]=o[1]+a>>>0,o[2]=o[2]+l>>>0,o[3]=o[3]+c>>>0,o[4]=o[4]+f>>>0}return[o[0]>>24&255,o[0]>>16&255,o[0]>>8&255,255&o[0],o[1]>>24&255,o[1]>>16&255,o[1]>>8&255,255&o[1],o[2]>>24&255,o[2]>>16&255,o[2]>>8&255,255&o[2],o[3]>>24&255,o[3]>>16&255,o[3]>>8&255,255&o[3],o[4]>>24&255,o[4]>>16&255,o[4]>>8&255,255&o[4]]};t.default=o},49910:(e,t,n)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0,t.unsafeStringify=a;var r,o=(r=n(37037))&&r.__esModule?r:{default:r};const i=[];for(let u=0;u<256;++u)i.push((u+256).toString(16).slice(1));function a(e,t=0){return i[e[t+0]]+i[e[t+1]]+i[e[t+2]]+i[e[t+3]]+"-"+i[e[t+4]]+i[e[t+5]]+"-"+i[e[t+6]]+i[e[t+7]]+"-"+i[e[t+8]]+i[e[t+9]]+"-"+i[e[t+10]]+i[e[t+11]]+i[e[t+12]]+i[e[t+13]]+i[e[t+14]]+i[e[t+15]]}var s=function(e,t=0){const n=a(e,t);if(!(0,o.default)(n))throw TypeError("Stringified UUID is invalid");return n};t.default=s},13518:(e,t,n)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r,o=(r=n(2858))&&r.__esModule?r:{default:r},i=n(49910);let a,s,u=0,l=0;var c=function(e,t,n){let r=t&&n||0;const c=t||new Array(16);let f=(e=e||{}).node||a,d=void 0!==e.clockseq?e.clockseq:s;if(null==f||null==d){const t=e.random||(e.rng||o.default)();null==f&&(f=a=[1|t[0],t[1],t[2],t[3],t[4],t[5]]),null==d&&(d=s=16383&(t[6]<<8|t[7]))}let h=void 0!==e.msecs?e.msecs:Date.now(),p=void 0!==e.nsecs?e.nsecs:l+1;const g=h-u+(p-l)/1e4;if(g<0&&void 0===e.clockseq&&(d=d+1&16383),(g<0||h>u)&&void 0===e.nsecs&&(p=0),p>=1e4)throw new Error("uuid.v1(): Can't create more than 10M uuids/sec");u=h,l=p,s=d,h+=122192928e5;const v=(1e4*(268435455&h)+p)%4294967296;c[r++]=v>>>24&255,c[r++]=v>>>16&255,c[r++]=v>>>8&255,c[r++]=255&v;const y=h/4294967296*1e4&268435455;c[r++]=y>>>8&255,c[r++]=255&y,c[r++]=y>>>24&15|16,c[r++]=y>>>16&255,c[r++]=d>>>8|128,c[r++]=255&d;for(let o=0;o<6;++o)c[r+o]=f[o];return t||(0,i.unsafeStringify)(c)};t.default=c},14948:(e,t,n)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r=i(n(59025)),o=i(n(22311));function i(e){return e&&e.__esModule?e:{default:e}}var a=(0,r.default)("v3",48,o.default);t.default=a},59025:(e,t,n)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.URL=t.DNS=void 0,t.default=function(e,t,n){function r(e,r,a,s){var u;if("string"===typeof e&&(e=function(e){e=unescape(encodeURIComponent(e));const t=[];for(let n=0;n{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r=a(n(46140)),o=a(n(2858)),i=n(49910);function a(e){return e&&e.__esModule?e:{default:e}}var s=function(e,t,n){if(r.default.randomUUID&&!t&&!e)return r.default.randomUUID();const a=(e=e||{}).random||(e.rng||o.default)();if(a[6]=15&a[6]|64,a[8]=63&a[8]|128,t){n=n||0;for(let e=0;e<16;++e)t[n+e]=a[e];return t}return(0,i.unsafeStringify)(a)};t.default=s},77186:(e,t,n)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r=i(n(59025)),o=i(n(9042));function i(e){return e&&e.__esModule?e:{default:e}}var a=(0,r.default)("v5",80,o.default);t.default=a},37037:(e,t,n)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r,o=(r=n(27656))&&r.__esModule?r:{default:r};var i=function(e){return"string"===typeof e&&o.default.test(e)};t.default=i},7775:(e,t,n)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r,o=(r=n(37037))&&r.__esModule?r:{default:r};var i=function(e){if(!(0,o.default)(e))throw TypeError("Invalid UUID");return parseInt(e.slice(14,15),16)};t.default=i},62805:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r="00000000-0000-0000-0000-000000000000"},33829:(e,t,n)=>{"use strict";n.d(t,{A:()=>l});const r={randomUUID:"undefined"!==typeof crypto&&crypto.randomUUID&&crypto.randomUUID.bind(crypto)};let o;const i=new Uint8Array(16);function a(){if(!o&&(o="undefined"!==typeof crypto&&crypto.getRandomValues&&crypto.getRandomValues.bind(crypto),!o))throw new Error("crypto.getRandomValues() not supported. See https://github.com/uuidjs/uuid#getrandomvalues-not-supported");return o(i)}const s=[];for(let c=0;c<256;++c)s.push((c+256).toString(16).slice(1));function u(e,t=0){return s[e[t+0]]+s[e[t+1]]+s[e[t+2]]+s[e[t+3]]+"-"+s[e[t+4]]+s[e[t+5]]+"-"+s[e[t+6]]+s[e[t+7]]+"-"+s[e[t+8]]+s[e[t+9]]+"-"+s[e[t+10]]+s[e[t+11]]+s[e[t+12]]+s[e[t+13]]+s[e[t+14]]+s[e[t+15]]}const l=function(e,t,n){if(r.randomUUID&&!t&&!e)return r.randomUUID();const o=(e=e||{}).random||(e.rng||a)();if(o[6]=15&o[6]|64,o[8]=63&o[8]|128,t){n=n||0;for(let e=0;e<16;++e)t[n+e]=o[e];return t}return u(o)}},79306:(e,t,n)=>{"use strict";var r=n(94901),o=n(16823),i=TypeError;e.exports=function(e){if(r(e))return e;throw new i(o(e)+" is not a function")}},35548:(e,t,n)=>{"use strict";var r=n(33517),o=n(16823),i=TypeError;e.exports=function(e){if(r(e))return e;throw new i(o(e)+" is not a constructor")}},13813:(e,t,n)=>{"use strict";var r=n(72248).has;e.exports=function(e){return r(e),e}},73506:(e,t,n)=>{"use strict";var r=n(13925),o=String,i=TypeError;e.exports=function(e){if(r(e))return e;throw new i("Can't set "+o(e)+" as a prototype")}},97080:(e,t,n)=>{"use strict";var r=n(94402).has;e.exports=function(e){return r(e),e}},6469:(e,t,n)=>{"use strict";var r=n(78227),o=n(2360),i=n(24913).f,a=r("unscopables"),s=Array.prototype;void 0===s[a]&&i(s,a,{configurable:!0,value:o(null)}),e.exports=function(e){s[a][e]=!0}},57829:(e,t,n)=>{"use strict";var r=n(68183).charAt;e.exports=function(e,t,n){return t+(n?r(e,t).length:1)}},90679:(e,t,n)=>{"use strict";var r=n(1625),o=TypeError;e.exports=function(e,t){if(r(t,e))return e;throw new o("Incorrect invocation")}},28551:(e,t,n)=>{"use strict";var r=n(20034),o=String,i=TypeError;e.exports=function(e){if(r(e))return e;throw new i(o(e)+" is not an object")}},19617:(e,t,n)=>{"use strict";var r=n(25397),o=n(35610),i=n(26198),a=function(e){return function(t,n,a){var s=r(t),u=i(s);if(0===u)return!e&&-1;var l,c=o(a,u);if(e&&n!==n){for(;u>c;)if((l=s[c++])!==l)return!0}else for(;u>c;c++)if((e||c in s)&&s[c]===n)return e||c||0;return!e&&-1}};e.exports={includes:a(!0),indexOf:a(!1)}},67680:(e,t,n)=>{"use strict";var r=n(79504);e.exports=r([].slice)},74488:(e,t,n)=>{"use strict";var r=n(67680),o=Math.floor,i=function(e,t){var n=e.length;if(n<8)for(var a,s,u=1;u0;)e[s]=e[--s];s!==u++&&(e[s]=a)}else for(var l=o(n/2),c=i(r(e,0,l),t),f=i(r(e,l),t),d=c.length,h=f.length,p=0,g=0;p{"use strict";var r=n(69565),o=n(97751),i=n(55966);e.exports=function(e,t,n,a){try{var s=i(e,"return");if(s)return o("Promise").resolve(r(s,e)).then((function(){t(n)}),(function(e){a(e)}))}catch(u){return a(u)}t(n)}},92059:(e,t,n)=>{"use strict";var r=n(69565),o=n(1103),i=n(28551),a=n(2360),s=n(66699),u=n(56279),l=n(78227),c=n(91181),f=n(97751),d=n(55966),h=n(53982),p=n(62529),g=n(9539),v=f("Promise"),y=l("toStringTag"),m="AsyncIteratorHelper",b="WrapForValidAsyncIterator",_=c.set,w=function(e){var t=!e,n=c.getterFor(e?b:m),s=function(e){var r=o((function(){return n(e)})),i=r.error,a=r.value;return i||t&&a.done?{exit:!0,value:i?v.reject(a):v.resolve(p(void 0,!0))}:{exit:!1,value:a}};return u(a(h),{next:function(){var e=s(this),t=e.value;if(e.exit)return t;var n=o((function(){return i(t.nextHandler(v))})),r=n.error,a=n.value;return r&&(t.done=!0),r?v.reject(a):v.resolve(a)},return:function(){var t=s(this),n=t.value;if(t.exit)return n;n.done=!0;var a,u,l=n.iterator,c=o((function(){if(n.inner)try{g(n.inner.iterator,"normal")}catch(e){return g(l,"throw",e)}return d(l,"return")}));return a=u=c.value,c.error?v.reject(u):void 0===a?v.resolve(p(void 0,!0)):(u=(c=o((function(){return r(a,l)}))).value,c.error?v.reject(u):e?v.resolve(u):v.resolve(u).then((function(e){return i(e),p(void 0,!0)})))}})},x=w(!0),S=w(!1);s(S,y,"Async Iterator Helper"),e.exports=function(e,t){var n=function(n,r){r?(r.iterator=n.iterator,r.next=n.next):r=n,r.type=t?b:m,r.nextHandler=e,r.counter=0,r.done=!1,_(this,r)};return n.prototype=t?x:S,n}},36639:(e,t,n)=>{"use strict";var r=n(69565),o=n(79306),i=n(28551),a=n(20034),s=n(96837),u=n(97751),l=n(1767),c=n(20772),f=function(e){var t=0===e,n=1===e,f=2===e,d=3===e;return function(e,h,p){i(e);var g=void 0!==h;!g&&t||o(h);var v=l(e),y=u("Promise"),m=v.iterator,b=v.next,_=0;return new y((function(e,o){var u=function(e){c(m,o,e,o)},l=function(){try{if(g)try{s(_)}catch(v){u(v)}y.resolve(i(r(b,m))).then((function(r){try{if(i(r).done)t?(p.length=_,e(p)):e(!d&&(f||void 0));else{var s=r.value;try{if(g){var v=h(s,_),b=function(r){if(n)l();else if(f)r?l():c(m,e,!1,o);else if(t)try{p[_++]=r,l()}catch(i){u(i)}else r?c(m,e,d||s,o):l()};a(v)?y.resolve(v).then(b,u):b(v)}else p[_++]=s,l()}catch(w){u(w)}}}catch(x){o(x)}}),o)}catch(w){o(w)}};l()}))}};e.exports={toArray:f(0),forEach:f(1),every:f(2),some:f(3),find:f(4)}},41750:(e,t,n)=>{"use strict";var r=n(69565),o=n(79306),i=n(28551),a=n(20034),s=n(1767),u=n(92059),l=n(62529),c=n(20772),f=u((function(e){var t=this,n=t.iterator,o=t.mapper;return new e((function(s,u){var f=function(e){t.done=!0,u(e)},d=function(e){c(n,f,e,f)};e.resolve(i(r(t.next,n))).then((function(n){try{if(i(n).done)t.done=!0,s(l(void 0,!0));else{var r=n.value;try{var u=o(r,t.counter++),c=function(e){s(l(e,!1))};a(u)?e.resolve(u).then(c,d):c(u)}catch(h){d(h)}}}catch(p){f(p)}}),f)}))}));e.exports=function(e){return i(this),o(e),new f(s(this),{mapper:e})}},53982:(e,t,n)=>{"use strict";var r,o,i=n(24475),a=n(77629),s=n(94901),u=n(2360),l=n(42787),c=n(36840),f=n(78227),d=n(96395),h="USE_FUNCTION_CONSTRUCTOR",p=f("asyncIterator"),g=i.AsyncIterator,v=a.AsyncIteratorPrototype;if(v)r=v;else if(s(g))r=g.prototype;else if(a[h]||i[h])try{o=l(l(l(Function("return async function*(){}()")()))),l(o)===Object.prototype&&(r=o)}catch(y){}r?d&&(r=u(r)):r={},s(r[p])||c(r,p,(function(){return this})),e.exports=r},96319:(e,t,n)=>{"use strict";var r=n(28551),o=n(9539);e.exports=function(e,t,n,i){try{return i?t(r(n)[0],n[1]):t(n)}catch(a){o(e,"throw",a)}}},44576:(e,t,n)=>{"use strict";var r=n(79504),o=r({}.toString),i=r("".slice);e.exports=function(e){return i(o(e),8,-1)}},36955:(e,t,n)=>{"use strict";var r=n(92140),o=n(94901),i=n(44576),a=n(78227)("toStringTag"),s=Object,u="Arguments"===i(function(){return arguments}());e.exports=r?i:function(e){var t,n,r;return void 0===e?"Undefined":null===e?"Null":"string"==typeof(n=function(e,t){try{return e[t]}catch(n){}}(t=s(e),a))?n:u?i(t):"Object"===(r=i(t))&&o(t.callee)?"Arguments":r}},77740:(e,t,n)=>{"use strict";var r=n(39297),o=n(35031),i=n(77347),a=n(24913);e.exports=function(e,t,n){for(var s=o(t),u=a.f,l=i.f,c=0;c{"use strict";var r=n(79039);e.exports=!r((function(){function e(){}return e.prototype.constructor=null,Object.getPrototypeOf(new e)!==e.prototype}))},62529:e=>{"use strict";e.exports=function(e,t){return{value:e,done:t}}},66699:(e,t,n)=>{"use strict";var r=n(43724),o=n(24913),i=n(6980);e.exports=r?function(e,t,n){return o.f(e,t,i(1,n))}:function(e,t,n){return e[t]=n,e}},6980:e=>{"use strict";e.exports=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}}},97040:(e,t,n)=>{"use strict";var r=n(43724),o=n(24913),i=n(6980);e.exports=function(e,t,n){r?o.f(e,t,i(0,n)):e[t]=n}},62106:(e,t,n)=>{"use strict";var r=n(50283),o=n(24913);e.exports=function(e,t,n){return n.get&&r(n.get,t,{getter:!0}),n.set&&r(n.set,t,{setter:!0}),o.f(e,t,n)}},36840:(e,t,n)=>{"use strict";var r=n(94901),o=n(24913),i=n(50283),a=n(39433);e.exports=function(e,t,n,s){s||(s={});var u=s.enumerable,l=void 0!==s.name?s.name:t;if(r(n)&&i(n,l,s),s.global)u?e[t]=n:a(t,n);else{try{s.unsafe?e[t]&&(u=!0):delete e[t]}catch(c){}u?e[t]=n:o.f(e,t,{value:n,enumerable:!1,configurable:!s.nonConfigurable,writable:!s.nonWritable})}return e}},56279:(e,t,n)=>{"use strict";var r=n(36840);e.exports=function(e,t,n){for(var o in t)r(e,o,t[o],n);return e}},39433:(e,t,n)=>{"use strict";var r=n(24475),o=Object.defineProperty;e.exports=function(e,t){try{o(r,e,{value:t,configurable:!0,writable:!0})}catch(n){r[e]=t}return t}},43724:(e,t,n)=>{"use strict";var r=n(79039);e.exports=!r((function(){return 7!==Object.defineProperty({},1,{get:function(){return 7}})[1]}))},4055:(e,t,n)=>{"use strict";var r=n(24475),o=n(20034),i=r.document,a=o(i)&&o(i.createElement);e.exports=function(e){return a?i.createElement(e):{}}},96837:e=>{"use strict";var t=TypeError;e.exports=function(e){if(e>9007199254740991)throw t("Maximum allowed index exceeded");return e}},67400:e=>{"use strict";e.exports={CSSRuleList:0,CSSStyleDeclaration:0,CSSValueList:0,ClientRectList:0,DOMRectList:0,DOMStringList:0,DOMTokenList:1,DataTransferItemList:0,FileList:0,HTMLAllCollection:0,HTMLCollection:0,HTMLFormElement:0,HTMLSelectElement:0,MediaList:0,MimeTypeArray:0,NamedNodeMap:0,NodeList:1,PaintRequestList:0,Plugin:0,PluginArray:0,SVGLengthList:0,SVGNumberList:0,SVGPathSegList:0,SVGPointList:0,SVGStringList:0,SVGTransformList:0,SourceBufferList:0,StyleSheetList:0,TextTrackCueList:0,TextTrackList:0,TouchList:0}},79296:(e,t,n)=>{"use strict";var r=n(4055)("span").classList,o=r&&r.constructor&&r.constructor.prototype;e.exports=o===Object.prototype?void 0:o},79392:e=>{"use strict";e.exports="undefined"!=typeof navigator&&String(navigator.userAgent)||""},77388:(e,t,n)=>{"use strict";var r,o,i=n(24475),a=n(79392),s=i.process,u=i.Deno,l=s&&s.versions||u&&u.version,c=l&&l.v8;c&&(o=(r=c.split("."))[0]>0&&r[0]<4?1:+(r[0]+r[1])),!o&&a&&(!(r=a.match(/Edge\/(\d+)/))||r[1]>=74)&&(r=a.match(/Chrome\/(\d+)/))&&(o=+r[1]),e.exports=o},88727:e=>{"use strict";e.exports=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"]},46518:(e,t,n)=>{"use strict";var r=n(24475),o=n(77347).f,i=n(66699),a=n(36840),s=n(39433),u=n(77740),l=n(92796);e.exports=function(e,t){var n,c,f,d,h,p=e.target,g=e.global,v=e.stat;if(n=g?r:v?r[p]||s(p,{}):r[p]&&r[p].prototype)for(c in t){if(d=t[c],f=e.dontCallGetSet?(h=o(n,c))&&h.value:n[c],!l(g?c:p+(v?".":"#")+c,e.forced)&&void 0!==f){if(typeof d==typeof f)continue;u(d,f)}(e.sham||f&&f.sham)&&i(d,"sham",!0),a(n,c,d,e)}}},79039:e=>{"use strict";e.exports=function(e){try{return!!e()}catch(t){return!0}}},89228:(e,t,n)=>{"use strict";n(27495);var r=n(69565),o=n(36840),i=n(57323),a=n(79039),s=n(78227),u=n(66699),l=s("species"),c=RegExp.prototype;e.exports=function(e,t,n,f){var d=s(e),h=!a((function(){var t={};return t[d]=function(){return 7},7!==""[e](t)})),p=h&&!a((function(){var t=!1,n=/a/;return"split"===e&&((n={}).constructor={},n.constructor[l]=function(){return n},n.flags="",n[d]=/./[d]),n.exec=function(){return t=!0,null},n[d](""),!t}));if(!h||!p||n){var g=/./[d],v=t(d,""[e],(function(e,t,n,o,a){var s=t.exec;return s===i||s===c.exec?h&&!a?{done:!0,value:r(g,t,n,o)}:{done:!0,value:r(e,n,t,o)}:{done:!1}}));o(String.prototype,e,v[0]),o(c,d,v[1])}f&&u(c[d],"sham",!0)}},18745:(e,t,n)=>{"use strict";var r=n(40616),o=Function.prototype,i=o.apply,a=o.call;e.exports="object"==typeof Reflect&&Reflect.apply||(r?a.bind(i):function(){return a.apply(i,arguments)})},76080:(e,t,n)=>{"use strict";var r=n(27476),o=n(79306),i=n(40616),a=r(r.bind);e.exports=function(e,t){return o(e),void 0===t?e:i?a(e,t):function(){return e.apply(t,arguments)}}},40616:(e,t,n)=>{"use strict";var r=n(79039);e.exports=!r((function(){var e=function(){}.bind();return"function"!=typeof e||e.hasOwnProperty("prototype")}))},69565:(e,t,n)=>{"use strict";var r=n(40616),o=Function.prototype.call;e.exports=r?o.bind(o):function(){return o.apply(o,arguments)}},10350:(e,t,n)=>{"use strict";var r=n(43724),o=n(39297),i=Function.prototype,a=r&&Object.getOwnPropertyDescriptor,s=o(i,"name"),u=s&&"something"===function(){}.name,l=s&&(!r||r&&a(i,"name").configurable);e.exports={EXISTS:s,PROPER:u,CONFIGURABLE:l}},46706:(e,t,n)=>{"use strict";var r=n(79504),o=n(79306);e.exports=function(e,t,n){try{return r(o(Object.getOwnPropertyDescriptor(e,t)[n]))}catch(i){}}},27476:(e,t,n)=>{"use strict";var r=n(44576),o=n(79504);e.exports=function(e){if("Function"===r(e))return o(e)}},79504:(e,t,n)=>{"use strict";var r=n(40616),o=Function.prototype,i=o.call,a=r&&o.bind.bind(i,i);e.exports=r?a:function(e){return function(){return i.apply(e,arguments)}}},97751:(e,t,n)=>{"use strict";var r=n(24475),o=n(94901);e.exports=function(e,t){return arguments.length<2?(n=r[e],o(n)?n:void 0):r[e]&&r[e][t];var n}},1767:e=>{"use strict";e.exports=function(e){return{iterator:e,next:e.next,done:!1}}},50851:(e,t,n)=>{"use strict";var r=n(36955),o=n(55966),i=n(64117),a=n(26269),s=n(78227)("iterator");e.exports=function(e){if(!i(e))return o(e,s)||o(e,"@@iterator")||a[r(e)]}},70081:(e,t,n)=>{"use strict";var r=n(69565),o=n(79306),i=n(28551),a=n(16823),s=n(50851),u=TypeError;e.exports=function(e,t){var n=arguments.length<2?s(e):t;if(o(n))return i(r(n,e));throw new u(a(e)+" is not iterable")}},55966:(e,t,n)=>{"use strict";var r=n(79306),o=n(64117);e.exports=function(e,t){var n=e[t];return o(n)?void 0:r(n)}},83789:(e,t,n)=>{"use strict";var r=n(79306),o=n(28551),i=n(69565),a=n(91291),s=n(1767),u="Invalid size",l=RangeError,c=TypeError,f=Math.max,d=function(e,t){this.set=e,this.size=f(t,0),this.has=r(e.has),this.keys=r(e.keys)};d.prototype={getIterator:function(){return s(o(i(this.keys,this.set)))},includes:function(e){return i(this.has,this.set,e)}},e.exports=function(e){o(e);var t=+e.size;if(t!==t)throw new c(u);var n=a(t);if(n<0)throw new l(u);return new d(e,n)}},2478:(e,t,n)=>{"use strict";var r=n(79504),o=n(48981),i=Math.floor,a=r("".charAt),s=r("".replace),u=r("".slice),l=/\$([$&'`]|\d{1,2}|<[^>]*>)/g,c=/\$([$&'`]|\d{1,2})/g;e.exports=function(e,t,n,r,f,d){var h=n+e.length,p=r.length,g=c;return void 0!==f&&(f=o(f),g=l),s(d,g,(function(o,s){var l;switch(a(s,0)){case"$":return"$";case"&":return e;case"`":return u(t,0,n);case"'":return u(t,h);case"<":l=f[u(s,1,-1)];break;default:var c=+s;if(0===c)return o;if(c>p){var d=i(c/10);return 0===d?o:d<=p?void 0===r[d-1]?a(s,1):r[d-1]+a(s,1):o}l=r[c-1]}return void 0===l?"":l}))}},24475:function(e,t,n){"use strict";var r=function(e){return e&&e.Math===Math&&e};e.exports=r("object"==typeof globalThis&&globalThis)||r("object"==typeof window&&window)||r("object"==typeof self&&self)||r("object"==typeof n.g&&n.g)||r("object"==typeof this&&this)||function(){return this}()||Function("return this")()},39297:(e,t,n)=>{"use strict";var r=n(79504),o=n(48981),i=r({}.hasOwnProperty);e.exports=Object.hasOwn||function(e,t){return i(o(e),t)}},30421:e=>{"use strict";e.exports={}},20397:(e,t,n)=>{"use strict";var r=n(97751);e.exports=r("document","documentElement")},35917:(e,t,n)=>{"use strict";var r=n(43724),o=n(79039),i=n(4055);e.exports=!r&&!o((function(){return 7!==Object.defineProperty(i("div"),"a",{get:function(){return 7}}).a}))},47055:(e,t,n)=>{"use strict";var r=n(79504),o=n(79039),i=n(44576),a=Object,s=r("".split);e.exports=o((function(){return!a("z").propertyIsEnumerable(0)}))?function(e){return"String"===i(e)?s(e,""):a(e)}:a},33706:(e,t,n)=>{"use strict";var r=n(79504),o=n(94901),i=n(77629),a=r(Function.toString);o(i.inspectSource)||(i.inspectSource=function(e){return a(e)}),e.exports=i.inspectSource},91181:(e,t,n)=>{"use strict";var r,o,i,a=n(58622),s=n(24475),u=n(20034),l=n(66699),c=n(39297),f=n(77629),d=n(66119),h=n(30421),p="Object already initialized",g=s.TypeError,v=s.WeakMap;if(a||f.state){var y=f.state||(f.state=new v);y.get=y.get,y.has=y.has,y.set=y.set,r=function(e,t){if(y.has(e))throw new g(p);return t.facade=e,y.set(e,t),t},o=function(e){return y.get(e)||{}},i=function(e){return y.has(e)}}else{var m=d("state");h[m]=!0,r=function(e,t){if(c(e,m))throw new g(p);return t.facade=e,l(e,m,t),t},o=function(e){return c(e,m)?e[m]:{}},i=function(e){return c(e,m)}}e.exports={set:r,get:o,has:i,enforce:function(e){return i(e)?o(e):r(e,{})},getterFor:function(e){return function(t){var n;if(!u(t)||(n=o(t)).type!==e)throw new g("Incompatible receiver, "+e+" required");return n}}}},44209:(e,t,n)=>{"use strict";var r=n(78227),o=n(26269),i=r("iterator"),a=Array.prototype;e.exports=function(e){return void 0!==e&&(o.Array===e||a[i]===e)}},94901:e=>{"use strict";var t="object"==typeof document&&document.all;e.exports="undefined"==typeof t&&void 0!==t?function(e){return"function"==typeof e||e===t}:function(e){return"function"==typeof e}},33517:(e,t,n)=>{"use strict";var r=n(79504),o=n(79039),i=n(94901),a=n(36955),s=n(97751),u=n(33706),l=function(){},c=s("Reflect","construct"),f=/^\s*(?:class|function)\b/,d=r(f.exec),h=!f.test(l),p=function(e){if(!i(e))return!1;try{return c(l,[],e),!0}catch(t){return!1}},g=function(e){if(!i(e))return!1;switch(a(e)){case"AsyncFunction":case"GeneratorFunction":case"AsyncGeneratorFunction":return!1}try{return h||!!d(f,u(e))}catch(t){return!0}};g.sham=!0,e.exports=!c||o((function(){var e;return p(p.call)||!p(Object)||!p((function(){e=!0}))||e}))?g:p},92796:(e,t,n)=>{"use strict";var r=n(79039),o=n(94901),i=/#|\.prototype\./,a=function(e,t){var n=u[s(e)];return n===c||n!==l&&(o(t)?r(t):!!t)},s=a.normalize=function(e){return String(e).replace(i,".").toLowerCase()},u=a.data={},l=a.NATIVE="N",c=a.POLYFILL="P";e.exports=a},31563:(e,t,n)=>{"use strict";var r=n(36955),o=n(39297),i=n(64117),a=n(78227),s=n(26269),u=a("iterator"),l=Object;e.exports=function(e){if(i(e))return!1;var t=l(e);return void 0!==t[u]||"@@iterator"in t||o(s,r(t))}},64117:e=>{"use strict";e.exports=function(e){return null===e||void 0===e}},20034:(e,t,n)=>{"use strict";var r=n(94901);e.exports=function(e){return"object"==typeof e?null!==e:r(e)}},13925:(e,t,n)=>{"use strict";var r=n(20034);e.exports=function(e){return r(e)||null===e}},96395:e=>{"use strict";e.exports=!1},10757:(e,t,n)=>{"use strict";var r=n(97751),o=n(94901),i=n(1625),a=n(7040),s=Object;e.exports=a?function(e){return"symbol"==typeof e}:function(e){var t=r("Symbol");return o(t)&&i(t.prototype,s(e))}},40507:(e,t,n)=>{"use strict";var r=n(69565);e.exports=function(e,t,n){for(var o,i,a=n?e:e.iterator,s=e.next;!(o=r(s,a)).done;)if(void 0!==(i=t(o.value)))return i}},72652:(e,t,n)=>{"use strict";var r=n(76080),o=n(69565),i=n(28551),a=n(16823),s=n(44209),u=n(26198),l=n(1625),c=n(70081),f=n(50851),d=n(9539),h=TypeError,p=function(e,t){this.stopped=e,this.result=t},g=p.prototype;e.exports=function(e,t,n){var v,y,m,b,_,w,x,S=n&&n.that,A=!(!n||!n.AS_ENTRIES),E=!(!n||!n.IS_RECORD),C=!(!n||!n.IS_ITERATOR),R=!(!n||!n.INTERRUPTED),O=r(t,S),T=function(e){return v&&d(v,"normal",e),new p(!0,e)},k=function(e){return A?(i(e),R?O(e[0],e[1],T):O(e[0],e[1])):R?O(e,T):O(e)};if(E)v=e.iterator;else if(C)v=e;else{if(!(y=f(e)))throw new h(a(e)+" is not iterable");if(s(y)){for(m=0,b=u(e);b>m;m++)if((_=k(e[m]))&&l(g,_))return _;return new p(!1)}v=c(e,y)}for(w=E?e.next:v.next;!(x=o(w,v)).done;){try{_=k(x.value)}catch(M){d(v,"throw",M)}if("object"==typeof _&&_&&l(g,_))return _}return new p(!1)}},9539:(e,t,n)=>{"use strict";var r=n(69565),o=n(28551),i=n(55966);e.exports=function(e,t,n){var a,s;o(e);try{if(!(a=i(e,"return"))){if("throw"===t)throw n;return n}a=r(a,e)}catch(u){s=!0,a=u}if("throw"===t)throw n;if(s)throw a;return o(a),n}},33994:(e,t,n)=>{"use strict";var r=n(57657).IteratorPrototype,o=n(2360),i=n(6980),a=n(10687),s=n(26269),u=function(){return this};e.exports=function(e,t,n,l){var c=t+" Iterator";return e.prototype=o(r,{next:i(+!l,n)}),a(e,c,!1,!0),s[c]=u,e}},19462:(e,t,n)=>{"use strict";var r=n(69565),o=n(2360),i=n(66699),a=n(56279),s=n(78227),u=n(91181),l=n(55966),c=n(57657).IteratorPrototype,f=n(62529),d=n(9539),h=s("toStringTag"),p="IteratorHelper",g="WrapForValidIterator",v=u.set,y=function(e){var t=u.getterFor(e?g:p);return a(o(c),{next:function(){var n=t(this);if(e)return n.nextHandler();try{var r=n.done?void 0:n.nextHandler();return f(r,n.done)}catch(o){throw n.done=!0,o}},return:function(){var n=t(this),o=n.iterator;if(n.done=!0,e){var i=l(o,"return");return i?r(i,o):f(void 0,!0)}if(n.inner)try{d(n.inner.iterator,"normal")}catch(a){return d(o,"throw",a)}return d(o,"normal"),f(void 0,!0)}})},m=y(!0),b=y(!1);i(b,h,"Iterator Helper"),e.exports=function(e,t){var n=function(n,r){r?(r.iterator=n.iterator,r.next=n.next):r=n,r.type=t?g:p,r.nextHandler=e,r.counter=0,r.done=!1,v(this,r)};return n.prototype=t?m:b,n}},51088:(e,t,n)=>{"use strict";var r=n(46518),o=n(69565),i=n(96395),a=n(10350),s=n(94901),u=n(33994),l=n(42787),c=n(52967),f=n(10687),d=n(66699),h=n(36840),p=n(78227),g=n(26269),v=n(57657),y=a.PROPER,m=a.CONFIGURABLE,b=v.IteratorPrototype,_=v.BUGGY_SAFARI_ITERATORS,w=p("iterator"),x="keys",S="values",A="entries",E=function(){return this};e.exports=function(e,t,n,a,p,v,C){u(n,t,a);var R,O,T,k=function(e){if(e===p&&D)return D;if(!_&&e&&e in I)return I[e];switch(e){case x:case S:case A:return function(){return new n(this,e)}}return function(){return new n(this)}},M=t+" Iterator",P=!1,I=e.prototype,N=I[w]||I["@@iterator"]||p&&I[p],D=!_&&N||k(p),L="Array"===t&&I.entries||N;if(L&&(R=l(L.call(new e)))!==Object.prototype&&R.next&&(i||l(R)===b||(c?c(R,b):s(R[w])||h(R,w,E)),f(R,M,!0,!0),i&&(g[M]=E)),y&&p===S&&N&&N.name!==S&&(!i&&m?d(I,"name",S):(P=!0,D=function(){return o(N,this)})),p)if(O={values:k(S),keys:v?D:k(x),entries:k(A)},C)for(T in O)(_||P||!(T in I))&&h(I,T,O[T]);else r({target:t,proto:!0,forced:_||P},O);return i&&!C||I[w]===D||h(I,w,D,{name:p}),g[t]=D,O}},20713:(e,t,n)=>{"use strict";var r=n(69565),o=n(79306),i=n(28551),a=n(1767),s=n(19462),u=n(96319),l=s((function(){var e=this.iterator,t=i(r(this.next,e));if(!(this.done=!!t.done))return u(e,this.mapper,[t.value,this.counter++],!0)}));e.exports=function(e){return i(this),o(e),new l(a(this),{mapper:e})}},57657:(e,t,n)=>{"use strict";var r,o,i,a=n(79039),s=n(94901),u=n(20034),l=n(2360),c=n(42787),f=n(36840),d=n(78227),h=n(96395),p=d("iterator"),g=!1;[].keys&&("next"in(i=[].keys())?(o=c(c(i)))!==Object.prototype&&(r=o):g=!0),!u(r)||a((function(){var e={};return r[p].call(e)!==e}))?r={}:h&&(r=l(r)),s(r[p])||f(r,p,(function(){return this})),e.exports={IteratorPrototype:r,BUGGY_SAFARI_ITERATORS:g}},26269:e=>{"use strict";e.exports={}},26198:(e,t,n)=>{"use strict";var r=n(18014);e.exports=function(e){return r(e.length)}},50283:(e,t,n)=>{"use strict";var r=n(79504),o=n(79039),i=n(94901),a=n(39297),s=n(43724),u=n(10350).CONFIGURABLE,l=n(33706),c=n(91181),f=c.enforce,d=c.get,h=String,p=Object.defineProperty,g=r("".slice),v=r("".replace),y=r([].join),m=s&&!o((function(){return 8!==p((function(){}),"length",{value:8}).length})),b=String(String).split("String"),_=e.exports=function(e,t,n){"Symbol("===g(h(t),0,7)&&(t="["+v(h(t),/^Symbol\(([^)]*)\).*$/,"$1")+"]"),n&&n.getter&&(t="get "+t),n&&n.setter&&(t="set "+t),(!a(e,"name")||u&&e.name!==t)&&(s?p(e,"name",{value:t,configurable:!0}):e.name=t),m&&n&&a(n,"arity")&&e.length!==n.arity&&p(e,"length",{value:n.arity});try{n&&a(n,"constructor")&&n.constructor?s&&p(e,"prototype",{writable:!1}):e.prototype&&(e.prototype=void 0)}catch(o){}var r=f(e);return a(r,"source")||(r.source=y(b,"string"==typeof t?t:"")),e};Function.prototype.toString=_((function(){return i(this)&&d(this).source||l(this)}),"toString")},72248:(e,t,n)=>{"use strict";var r=n(79504),o=Map.prototype;e.exports={Map:Map,set:r(o.set),get:r(o.get),has:r(o.has),remove:r(o.delete),proto:o}},26223:(e,t,n)=>{"use strict";var r=n(79504),o=n(40507),i=n(72248),a=i.Map,s=i.proto,u=r(s.forEach),l=r(s.entries),c=l(new a).next;e.exports=function(e,t,n){return n?o({iterator:l(e),next:c},(function(e){return t(e[1],e[0])})):u(e,t)}},80741:e=>{"use strict";var t=Math.ceil,n=Math.floor;e.exports=Math.trunc||function(e){var r=+e;return(r>0?n:t)(r)}},36043:(e,t,n)=>{"use strict";var r=n(79306),o=TypeError,i=function(e){var t,n;this.promise=new e((function(e,r){if(void 0!==t||void 0!==n)throw new o("Bad Promise constructor");t=e,n=r})),this.resolve=r(t),this.reject=r(n)};e.exports.f=function(e){return new i(e)}},2360:(e,t,n)=>{"use strict";var r,o=n(28551),i=n(96801),a=n(88727),s=n(30421),u=n(20397),l=n(4055),c=n(66119),f="prototype",d="script",h=c("IE_PROTO"),p=function(){},g=function(e){return"<"+d+">"+e+""},v=function(e){e.write(g("")),e.close();var t=e.parentWindow.Object;return e=null,t},y=function(){try{r=new ActiveXObject("htmlfile")}catch(t){}y="undefined"!=typeof document?document.domain&&r?v(r):function(){var e,t=l("iframe"),n="java"+d+":";return t.style.display="none",u.appendChild(t),t.src=String(n),(e=t.contentWindow.document).open(),e.write(g("document.F=Object")),e.close(),e.F}():v(r);for(var e=a.length;e--;)delete y[f][a[e]];return y()};s[h]=!0,e.exports=Object.create||function(e,t){var n;return null!==e?(p[f]=o(e),n=new p,p[f]=null,n[h]=e):n=y(),void 0===t?n:i.f(n,t)}},96801:(e,t,n)=>{"use strict";var r=n(43724),o=n(48686),i=n(24913),a=n(28551),s=n(25397),u=n(71072);t.f=r&&!o?Object.defineProperties:function(e,t){a(e);for(var n,r=s(t),o=u(t),l=o.length,c=0;l>c;)i.f(e,n=o[c++],r[n]);return e}},24913:(e,t,n)=>{"use strict";var r=n(43724),o=n(35917),i=n(48686),a=n(28551),s=n(56969),u=TypeError,l=Object.defineProperty,c=Object.getOwnPropertyDescriptor,f="enumerable",d="configurable",h="writable";t.f=r?i?function(e,t,n){if(a(e),t=s(t),a(n),"function"===typeof e&&"prototype"===t&&"value"in n&&h in n&&!n[h]){var r=c(e,t);r&&r[h]&&(e[t]=n.value,n={configurable:d in n?n[d]:r[d],enumerable:f in n?n[f]:r[f],writable:!1})}return l(e,t,n)}:l:function(e,t,n){if(a(e),t=s(t),a(n),o)try{return l(e,t,n)}catch(r){}if("get"in n||"set"in n)throw new u("Accessors not supported");return"value"in n&&(e[t]=n.value),e}},77347:(e,t,n)=>{"use strict";var r=n(43724),o=n(69565),i=n(48773),a=n(6980),s=n(25397),u=n(56969),l=n(39297),c=n(35917),f=Object.getOwnPropertyDescriptor;t.f=r?f:function(e,t){if(e=s(e),t=u(t),c)try{return f(e,t)}catch(n){}if(l(e,t))return a(!o(i.f,e,t),e[t])}},38480:(e,t,n)=>{"use strict";var r=n(61828),o=n(88727).concat("length","prototype");t.f=Object.getOwnPropertyNames||function(e){return r(e,o)}},33717:(e,t)=>{"use strict";t.f=Object.getOwnPropertySymbols},42787:(e,t,n)=>{"use strict";var r=n(39297),o=n(94901),i=n(48981),a=n(66119),s=n(12211),u=a("IE_PROTO"),l=Object,c=l.prototype;e.exports=s?l.getPrototypeOf:function(e){var t=i(e);if(r(t,u))return t[u];var n=t.constructor;return o(n)&&t instanceof n?n.prototype:t instanceof l?c:null}},1625:(e,t,n)=>{"use strict";var r=n(79504);e.exports=r({}.isPrototypeOf)},61828:(e,t,n)=>{"use strict";var r=n(79504),o=n(39297),i=n(25397),a=n(19617).indexOf,s=n(30421),u=r([].push);e.exports=function(e,t){var n,r=i(e),l=0,c=[];for(n in r)!o(s,n)&&o(r,n)&&u(c,n);for(;t.length>l;)o(r,n=t[l++])&&(~a(c,n)||u(c,n));return c}},71072:(e,t,n)=>{"use strict";var r=n(61828),o=n(88727);e.exports=Object.keys||function(e){return r(e,o)}},48773:(e,t)=>{"use strict";var n={}.propertyIsEnumerable,r=Object.getOwnPropertyDescriptor,o=r&&!n.call({1:2},1);t.f=o?function(e){var t=r(this,e);return!!t&&t.enumerable}:n},52967:(e,t,n)=>{"use strict";var r=n(46706),o=n(20034),i=n(67750),a=n(73506);e.exports=Object.setPrototypeOf||("__proto__"in{}?function(){var e,t=!1,n={};try{(e=r(Object.prototype,"__proto__","set"))(n,[]),t=n instanceof Array}catch(s){}return function(n,r){return i(n),a(r),o(n)?(t?e(n,r):n.__proto__=r,n):n}}():void 0)},84270:(e,t,n)=>{"use strict";var r=n(69565),o=n(94901),i=n(20034),a=TypeError;e.exports=function(e,t){var n,s;if("string"===t&&o(n=e.toString)&&!i(s=r(n,e)))return s;if(o(n=e.valueOf)&&!i(s=r(n,e)))return s;if("string"!==t&&o(n=e.toString)&&!i(s=r(n,e)))return s;throw new a("Can't convert object to primitive value")}},35031:(e,t,n)=>{"use strict";var r=n(97751),o=n(79504),i=n(38480),a=n(33717),s=n(28551),u=o([].concat);e.exports=r("Reflect","ownKeys")||function(e){var t=i.f(s(e)),n=a.f;return n?u(t,n(e)):t}},1103:e=>{"use strict";e.exports=function(e){try{return{error:!1,value:e()}}catch(t){return{error:!0,value:t}}}},80550:(e,t,n)=>{"use strict";var r=n(24475);e.exports=r.Promise},93438:(e,t,n)=>{"use strict";var r=n(28551),o=n(20034),i=n(36043);e.exports=function(e,t){if(r(e),o(t)&&t.constructor===e)return t;var n=i.f(e);return(0,n.resolve)(t),n.promise}},56682:(e,t,n)=>{"use strict";var r=n(69565),o=n(28551),i=n(94901),a=n(44576),s=n(57323),u=TypeError;e.exports=function(e,t){var n=e.exec;if(i(n)){var l=r(n,e,t);return null!==l&&o(l),l}if("RegExp"===a(e))return r(s,e,t);throw new u("RegExp#exec called on incompatible receiver")}},57323:(e,t,n)=>{"use strict";var r=n(69565),o=n(79504),i=n(655),a=n(67979),s=n(58429),u=n(25745),l=n(2360),c=n(91181).get,f=n(83635),d=n(18814),h=u("native-string-replace",String.prototype.replace),p=RegExp.prototype.exec,g=p,v=o("".charAt),y=o("".indexOf),m=o("".replace),b=o("".slice),_=function(){var e=/a/,t=/b*/g;return r(p,e,"a"),r(p,t,"a"),0!==e.lastIndex||0!==t.lastIndex}(),w=s.BROKEN_CARET,x=void 0!==/()??/.exec("")[1];(_||x||w||f||d)&&(g=function(e){var t,n,o,s,u,f,d,S=this,A=c(S),E=i(e),C=A.raw;if(C)return C.lastIndex=S.lastIndex,t=r(g,C,E),S.lastIndex=C.lastIndex,t;var R=A.groups,O=w&&S.sticky,T=r(a,S),k=S.source,M=0,P=E;if(O&&(T=m(T,"y",""),-1===y(T,"g")&&(T+="g"),P=b(E,S.lastIndex),S.lastIndex>0&&(!S.multiline||S.multiline&&"\n"!==v(E,S.lastIndex-1))&&(k="(?: "+k+")",P=" "+P,M++),n=new RegExp("^(?:"+k+")",T)),x&&(n=new RegExp("^"+k+"$(?!\\s)",T)),_&&(o=S.lastIndex),s=r(p,O?n:S,P),O?s?(s.input=b(s.input,M),s[0]=b(s[0],M),s.index=S.lastIndex,S.lastIndex+=s[0].length):S.lastIndex=0:_&&s&&(S.lastIndex=S.global?s.index+s[0].length:o),x&&s&&s.length>1&&r(h,s[0],n,(function(){for(u=1;u{"use strict";var r=n(28551);e.exports=function(){var e=r(this),t="";return e.hasIndices&&(t+="d"),e.global&&(t+="g"),e.ignoreCase&&(t+="i"),e.multiline&&(t+="m"),e.dotAll&&(t+="s"),e.unicode&&(t+="u"),e.unicodeSets&&(t+="v"),e.sticky&&(t+="y"),t}},58429:(e,t,n)=>{"use strict";var r=n(79039),o=n(24475).RegExp,i=r((function(){var e=o("a","y");return e.lastIndex=2,null!==e.exec("abcd")})),a=i||r((function(){return!o("a","y").sticky})),s=i||r((function(){var e=o("^r","gy");return e.lastIndex=2,null!==e.exec("str")}));e.exports={BROKEN_CARET:s,MISSED_STICKY:a,UNSUPPORTED_Y:i}},83635:(e,t,n)=>{"use strict";var r=n(79039),o=n(24475).RegExp;e.exports=r((function(){var e=o(".","s");return!(e.dotAll&&e.test("\n")&&"s"===e.flags)}))},18814:(e,t,n)=>{"use strict";var r=n(79039),o=n(24475).RegExp;e.exports=r((function(){var e=o("(?b)","g");return"b"!==e.exec("b").groups.a||"bc"!=="b".replace(e,"$c")}))},67750:(e,t,n)=>{"use strict";var r=n(64117),o=TypeError;e.exports=function(e){if(r(e))throw new o("Can't call method on "+e);return e}},93389:(e,t,n)=>{"use strict";var r=n(24475),o=n(43724),i=Object.getOwnPropertyDescriptor;e.exports=function(e){if(!o)return r[e];var t=i(r,e);return t&&t.value}},33317:e=>{"use strict";e.exports=function(e,t){return e===t||e!==e&&t!==t}},89286:(e,t,n)=>{"use strict";var r=n(94402),o=n(38469),i=r.Set,a=r.add;e.exports=function(e){var t=new i;return o(e,(function(e){a(t,e)})),t}},83440:(e,t,n)=>{"use strict";var r=n(97080),o=n(94402),i=n(89286),a=n(25170),s=n(83789),u=n(38469),l=n(40507),c=o.has,f=o.remove;e.exports=function(e){var t=r(this),n=s(e),o=i(t);return a(t)<=n.size?u(t,(function(e){n.includes(e)&&f(o,e)})):l(n.getIterator(),(function(e){c(t,e)&&f(o,e)})),o}},94402:(e,t,n)=>{"use strict";var r=n(79504),o=Set.prototype;e.exports={Set:Set,add:r(o.add),has:r(o.has),remove:r(o.delete),proto:o}},68750:(e,t,n)=>{"use strict";var r=n(97080),o=n(94402),i=n(25170),a=n(83789),s=n(38469),u=n(40507),l=o.Set,c=o.add,f=o.has;e.exports=function(e){var t=r(this),n=a(e),o=new l;return i(t)>n.size?u(n.getIterator(),(function(e){f(t,e)&&c(o,e)})):s(t,(function(e){n.includes(e)&&c(o,e)})),o}},64449:(e,t,n)=>{"use strict";var r=n(97080),o=n(94402).has,i=n(25170),a=n(83789),s=n(38469),u=n(40507),l=n(9539);e.exports=function(e){var t=r(this),n=a(e);if(i(t)<=n.size)return!1!==s(t,(function(e){if(n.includes(e))return!1}),!0);var c=n.getIterator();return!1!==u(c,(function(e){if(o(t,e))return l(c,"normal",!1)}))}},53838:(e,t,n)=>{"use strict";var r=n(97080),o=n(25170),i=n(38469),a=n(83789);e.exports=function(e){var t=r(this),n=a(e);return!(o(t)>n.size)&&!1!==i(t,(function(e){if(!n.includes(e))return!1}),!0)}},28527:(e,t,n)=>{"use strict";var r=n(97080),o=n(94402).has,i=n(25170),a=n(83789),s=n(40507),u=n(9539);e.exports=function(e){var t=r(this),n=a(e);if(i(t){"use strict";var r=n(79504),o=n(40507),i=n(94402),a=i.Set,s=i.proto,u=r(s.forEach),l=r(s.keys),c=l(new a).next;e.exports=function(e,t,n){return n?o({iterator:l(e),next:c},t):u(e,t)}},25170:(e,t,n)=>{"use strict";var r=n(46706),o=n(94402);e.exports=r(o.proto,"size","get")||function(e){return e.size}},83650:(e,t,n)=>{"use strict";var r=n(97080),o=n(94402),i=n(89286),a=n(83789),s=n(40507),u=o.add,l=o.has,c=o.remove;e.exports=function(e){var t=r(this),n=a(e).getIterator(),o=i(t);return s(n,(function(e){l(t,e)?c(o,e):u(o,e)})),o}},10687:(e,t,n)=>{"use strict";var r=n(24913).f,o=n(39297),i=n(78227)("toStringTag");e.exports=function(e,t,n){e&&!n&&(e=e.prototype),e&&!o(e,i)&&r(e,i,{configurable:!0,value:t})}},44204:(e,t,n)=>{"use strict";var r=n(97080),o=n(94402).add,i=n(89286),a=n(83789),s=n(40507);e.exports=function(e){var t=r(this),n=a(e).getIterator(),u=i(t);return s(n,(function(e){o(u,e)})),u}},66119:(e,t,n)=>{"use strict";var r=n(25745),o=n(33392),i=r("keys");e.exports=function(e){return i[e]||(i[e]=o(e))}},77629:(e,t,n)=>{"use strict";var r=n(96395),o=n(24475),i=n(39433),a="__core-js_shared__",s=e.exports=o[a]||i(a,{});(s.versions||(s.versions=[])).push({version:"3.37.0",mode:r?"pure":"global",copyright:"\xa9 2014-2024 Denis Pushkarev (zloirock.ru)",license:"https://github.com/zloirock/core-js/blob/v3.37.0/LICENSE",source:"https://github.com/zloirock/core-js"})},25745:(e,t,n)=>{"use strict";var r=n(77629);e.exports=function(e,t){return r[e]||(r[e]=t||{})}},2293:(e,t,n)=>{"use strict";var r=n(28551),o=n(35548),i=n(64117),a=n(78227)("species");e.exports=function(e,t){var n,s=r(e).constructor;return void 0===s||i(n=r(s)[a])?t:o(n)}},68183:(e,t,n)=>{"use strict";var r=n(79504),o=n(91291),i=n(655),a=n(67750),s=r("".charAt),u=r("".charCodeAt),l=r("".slice),c=function(e){return function(t,n){var r,c,f=i(a(t)),d=o(n),h=f.length;return d<0||d>=h?e?"":void 0:(r=u(f,d))<55296||r>56319||d+1===h||(c=u(f,d+1))<56320||c>57343?e?s(f,d):r:e?l(f,d,d+2):c-56320+(r-55296<<10)+65536}};e.exports={codeAt:c(!1),charAt:c(!0)}},4495:(e,t,n)=>{"use strict";var r=n(77388),o=n(79039),i=n(24475).String;e.exports=!!Object.getOwnPropertySymbols&&!o((function(){var e=Symbol("symbol detection");return!i(e)||!(Object(e)instanceof Symbol)||!Symbol.sham&&r&&r<41}))},35610:(e,t,n)=>{"use strict";var r=n(91291),o=Math.max,i=Math.min;e.exports=function(e,t){var n=r(e);return n<0?o(n+t,0):i(n,t)}},25397:(e,t,n)=>{"use strict";var r=n(47055),o=n(67750);e.exports=function(e){return r(o(e))}},91291:(e,t,n)=>{"use strict";var r=n(80741);e.exports=function(e){var t=+e;return t!==t||0===t?0:r(t)}},18014:(e,t,n)=>{"use strict";var r=n(91291),o=Math.min;e.exports=function(e){var t=r(e);return t>0?o(t,9007199254740991):0}},48981:(e,t,n)=>{"use strict";var r=n(67750),o=Object;e.exports=function(e){return o(r(e))}},72777:(e,t,n)=>{"use strict";var r=n(69565),o=n(20034),i=n(10757),a=n(55966),s=n(84270),u=n(78227),l=TypeError,c=u("toPrimitive");e.exports=function(e,t){if(!o(e)||i(e))return e;var n,u=a(e,c);if(u){if(void 0===t&&(t="default"),n=r(u,e,t),!o(n)||i(n))return n;throw new l("Can't convert object to primitive value")}return void 0===t&&(t="number"),s(e,t)}},56969:(e,t,n)=>{"use strict";var r=n(72777),o=n(10757);e.exports=function(e){var t=r(e,"string");return o(t)?t:t+""}},47650:(e,t,n)=>{"use strict";var r=n(97751),o=n(94901),i=n(31563),a=n(20034),s=r("Set");e.exports=function(e){return function(e){return a(e)&&"number"==typeof e.size&&o(e.has)&&o(e.keys)}(e)?e:i(e)?new s(e):e}},92140:(e,t,n)=>{"use strict";var r={};r[n(78227)("toStringTag")]="z",e.exports="[object z]"===String(r)},655:(e,t,n)=>{"use strict";var r=n(36955),o=String;e.exports=function(e){if("Symbol"===r(e))throw new TypeError("Cannot convert a Symbol value to a string");return o(e)}},16823:e=>{"use strict";var t=String;e.exports=function(e){try{return t(e)}catch(n){return"Object"}}},33392:(e,t,n)=>{"use strict";var r=n(79504),o=0,i=Math.random(),a=r(1..toString);e.exports=function(e){return"Symbol("+(void 0===e?"":e)+")_"+a(++o+i,36)}},67416:(e,t,n)=>{"use strict";var r=n(79039),o=n(78227),i=n(43724),a=n(96395),s=o("iterator");e.exports=!r((function(){var e=new URL("b?a=1&b=2&c=3","http://a"),t=e.searchParams,n=new URLSearchParams("a=1&a=2&b=3"),r="";return e.pathname="c%20d",t.forEach((function(e,n){t.delete("b"),r+=n+e})),n.delete("a",2),n.delete("b",void 0),a&&(!e.toJSON||!n.has("a",1)||n.has("a",2)||!n.has("a",void 0)||n.has("b"))||!t.size&&(a||!i)||!t.sort||"http://a/c%20d?a=1&c=3"!==e.href||"3"!==t.get("c")||"a=1"!==String(new URLSearchParams("?a=1"))||!t[s]||"a"!==new URL("https://a@b").username||"b"!==new URLSearchParams(new URLSearchParams("a=b")).get("a")||"xn--e1aybc"!==new URL("http://\u0442\u0435\u0441\u0442").host||"#%D0%B1"!==new URL("http://a#\u0431").hash||"a1c3"!==r||"x"!==new URL("http://x",void 0).host}))},7040:(e,t,n)=>{"use strict";var r=n(4495);e.exports=r&&!Symbol.sham&&"symbol"==typeof Symbol.iterator},48686:(e,t,n)=>{"use strict";var r=n(43724),o=n(79039);e.exports=r&&o((function(){return 42!==Object.defineProperty((function(){}),"prototype",{value:42,writable:!1}).prototype}))},22812:e=>{"use strict";var t=TypeError;e.exports=function(e,n){if(e{"use strict";var r=n(24475),o=n(94901),i=r.WeakMap;e.exports=o(i)&&/native code/.test(String(i))},78227:(e,t,n)=>{"use strict";var r=n(24475),o=n(25745),i=n(39297),a=n(33392),s=n(4495),u=n(7040),l=r.Symbol,c=o("wks"),f=u?l.for||l:l&&l.withoutSetter||a;e.exports=function(e){return i(c,e)||(c[e]=s&&i(l,e)?l[e]:f("Symbol."+e)),c[e]}},23792:(e,t,n)=>{"use strict";var r=n(25397),o=n(6469),i=n(26269),a=n(91181),s=n(24913).f,u=n(51088),l=n(62529),c=n(96395),f=n(43724),d="Array Iterator",h=a.set,p=a.getterFor(d);e.exports=u(Array,"Array",(function(e,t){h(this,{type:d,target:r(e),index:0,kind:t})}),(function(){var e=p(this),t=e.target,n=e.index++;if(!t||n>=t.length)return e.target=void 0,l(void 0,!0);switch(e.kind){case"keys":return l(n,!1);case"values":return l(t[n],!1)}return l([n,t[n]],!1)}),"values");var g=i.Arguments=i.Array;if(o("keys"),o("values"),o("entries"),!c&&f&&"values"!==g.name)try{s(g,"name",{value:"values"})}catch(v){}},9391:(e,t,n)=>{"use strict";var r=n(46518),o=n(96395),i=n(80550),a=n(79039),s=n(97751),u=n(94901),l=n(2293),c=n(93438),f=n(36840),d=i&&i.prototype;if(r({target:"Promise",proto:!0,real:!0,forced:!!i&&a((function(){d.finally.call({then:function(){}},(function(){}))}))},{finally:function(e){var t=l(this,s("Promise")),n=u(e);return this.then(n?function(n){return c(t,e()).then((function(){return n}))}:e,n?function(n){return c(t,e()).then((function(){throw n}))}:e)}}),!o&&u(i)){var h=s("Promise").prototype.finally;d.finally!==h&&f(d,"finally",h,{unsafe:!0})}},27495:(e,t,n)=>{"use strict";var r=n(46518),o=n(57323);r({target:"RegExp",proto:!0,forced:/./.exec!==o},{exec:o})},69479:(e,t,n)=>{"use strict";var r=n(24475),o=n(43724),i=n(62106),a=n(67979),s=n(79039),u=r.RegExp,l=u.prototype;o&&s((function(){var e=!0;try{u(".","d")}catch(s){e=!1}var t={},n="",r=e?"dgimsy":"gimsy",o=function(e,r){Object.defineProperty(t,e,{get:function(){return n+=r,!0}})},i={dotAll:"s",global:"g",ignoreCase:"i",multiline:"m",sticky:"y"};for(var a in e&&(i.hasIndices="d"),i)o(a,i[a]);return Object.getOwnPropertyDescriptor(l,"flags").get.call(t)!==r||n!==r}))&&i(l,"flags",{configurable:!0,get:a})},25440:(e,t,n)=>{"use strict";var r=n(18745),o=n(69565),i=n(79504),a=n(89228),s=n(79039),u=n(28551),l=n(94901),c=n(64117),f=n(91291),d=n(18014),h=n(655),p=n(67750),g=n(57829),v=n(55966),y=n(2478),m=n(56682),b=n(78227)("replace"),_=Math.max,w=Math.min,x=i([].concat),S=i([].push),A=i("".indexOf),E=i("".slice),C="$0"==="a".replace(/./,"$0"),R=!!/./[b]&&""===/./[b]("a","$0");a("replace",(function(e,t,n){var i=R?"$":"$0";return[function(e,n){var r=p(this),i=c(e)?void 0:v(e,b);return i?o(i,e,r,n):o(t,h(r),e,n)},function(e,o){var a=u(this),s=h(e);if("string"==typeof o&&-1===A(o,i)&&-1===A(o,"$<")){var c=n(t,a,s,o);if(c.done)return c.value}var p=l(o);p||(o=h(o));var v,b=a.global;b&&(v=a.unicode,a.lastIndex=0);for(var C,R=[];null!==(C=m(a,s))&&(S(R,C),b);){""===h(C[0])&&(a.lastIndex=g(s,d(a.lastIndex),v))}for(var O,T="",k=0,M=0;M=k&&(T+=E(s,k,N)+P,k=N+I.length)}return T+E(s,k)}]}),!!s((function(){var e=/./;return e.exec=function(){var e=[];return e.groups={a:"7"},e},"7"!=="".replace(e,"$")}))||!C||R)},74648:(e,t,n)=>{"use strict";var r=n(46518),o=n(36639).every;r({target:"AsyncIterator",proto:!0,real:!0},{every:function(e){return o(this,e)}})},17333:(e,t,n)=>{"use strict";var r=n(46518),o=n(69565),i=n(79306),a=n(28551),s=n(20034),u=n(1767),l=n(92059),c=n(62529),f=n(20772),d=n(96395),h=l((function(e){var t=this,n=t.iterator,r=t.predicate;return new e((function(i,u){var l=function(e){t.done=!0,u(e)},d=function(e){f(n,l,e,l)},h=function(){try{e.resolve(a(o(t.next,n))).then((function(n){try{if(a(n).done)t.done=!0,i(c(void 0,!0));else{var o=n.value;try{var u=r(o,t.counter++),f=function(e){e?i(c(o,!1)):h()};s(u)?e.resolve(u).then(f,d):f(u)}catch(p){d(p)}}}catch(g){l(g)}}),l)}catch(u){l(u)}};h()}))}));r({target:"AsyncIterator",proto:!0,real:!0,forced:d},{filter:function(e){return a(this),i(e),new h(u(this),{predicate:e})}})},3064:(e,t,n)=>{"use strict";var r=n(46518),o=n(36639).find;r({target:"AsyncIterator",proto:!0,real:!0},{find:function(e){return o(this,e)}})},9920:(e,t,n)=>{"use strict";var r=n(46518),o=n(36639).forEach;r({target:"AsyncIterator",proto:!0,real:!0},{forEach:function(e){return o(this,e)}})},41393:(e,t,n)=>{"use strict";var r=n(46518),o=n(41750);r({target:"AsyncIterator",proto:!0,real:!0,forced:n(96395)},{map:o})},14905:(e,t,n)=>{"use strict";var r=n(46518),o=n(69565),i=n(79306),a=n(28551),s=n(20034),u=n(97751),l=n(1767),c=n(20772),f=u("Promise"),d=TypeError;r({target:"AsyncIterator",proto:!0,real:!0},{reduce:function(e){a(this),i(e);var t=l(this),n=t.iterator,r=t.next,u=arguments.length<2,h=u?void 0:arguments[1],p=0;return new f((function(t,i){var l=function(e){c(n,i,e,i)},g=function(){try{f.resolve(a(o(r,n))).then((function(n){try{if(a(n).done)u?i(new d("Reduce of empty iterator with no initial value")):t(h);else{var r=n.value;if(u)u=!1,h=r,g();else try{var o=e(h,r,p),c=function(e){h=e,g()};s(o)?f.resolve(o).then(c,l):c(o)}catch(v){l(v)}}p++}catch(y){i(y)}}),i)}catch(c){i(c)}};g()}))}})},8159:(e,t,n)=>{"use strict";var r=n(46518),o=n(36639).some;r({target:"AsyncIterator",proto:!0,real:!0},{some:function(e){return o(this,e)}})},98992:(e,t,n)=>{"use strict";var r=n(46518),o=n(24475),i=n(90679),a=n(28551),s=n(94901),u=n(42787),l=n(62106),c=n(97040),f=n(79039),d=n(39297),h=n(78227),p=n(57657).IteratorPrototype,g=n(43724),v=n(96395),y="constructor",m="Iterator",b=h("toStringTag"),_=TypeError,w=o[m],x=v||!s(w)||w.prototype!==p||!f((function(){w({})})),S=function(){if(i(this,p),u(this)===p)throw new _("Abstract class Iterator not directly constructable")},A=function(e,t){g?l(p,e,{configurable:!0,get:function(){return t},set:function(t){if(a(this),this===p)throw new _("You can't redefine this property");d(this,e)?this[e]=t:c(this,e,t)}}):p[e]=t};d(p,b)||A(b,m),!x&&d(p,y)&&p[y]!==Object||A(y,S),S.prototype=p,r({global:!0,constructor:!0,forced:x},{Iterator:S})},23215:(e,t,n)=>{"use strict";var r=n(46518),o=n(72652),i=n(79306),a=n(28551),s=n(1767);r({target:"Iterator",proto:!0,real:!0},{every:function(e){a(this),i(e);var t=s(this),n=0;return!o(t,(function(t,r){if(!e(t,n++))return r()}),{IS_RECORD:!0,INTERRUPTED:!0}).stopped}})},54520:(e,t,n)=>{"use strict";var r=n(46518),o=n(69565),i=n(79306),a=n(28551),s=n(1767),u=n(19462),l=n(96319),c=n(96395),f=u((function(){for(var e,t,n=this.iterator,r=this.predicate,i=this.next;;){if(e=a(o(i,n)),this.done=!!e.done)return;if(t=e.value,l(n,r,[t,this.counter++],!0))return t}}));r({target:"Iterator",proto:!0,real:!0,forced:c},{filter:function(e){return a(this),i(e),new f(s(this),{predicate:e})}})},72577:(e,t,n)=>{"use strict";var r=n(46518),o=n(72652),i=n(79306),a=n(28551),s=n(1767);r({target:"Iterator",proto:!0,real:!0},{find:function(e){a(this),i(e);var t=s(this),n=0;return o(t,(function(t,r){if(e(t,n++))return r(t)}),{IS_RECORD:!0,INTERRUPTED:!0}).result}})},3949:(e,t,n)=>{"use strict";var r=n(46518),o=n(72652),i=n(79306),a=n(28551),s=n(1767);r({target:"Iterator",proto:!0,real:!0},{forEach:function(e){a(this),i(e);var t=s(this),n=0;o(t,(function(t){e(t,n++)}),{IS_RECORD:!0})}})},81454:(e,t,n)=>{"use strict";var r=n(46518),o=n(20713);r({target:"Iterator",proto:!0,real:!0,forced:n(96395)},{map:o})},8872:(e,t,n)=>{"use strict";var r=n(46518),o=n(72652),i=n(79306),a=n(28551),s=n(1767),u=TypeError;r({target:"Iterator",proto:!0,real:!0},{reduce:function(e){a(this),i(e);var t=s(this),n=arguments.length<2,r=n?void 0:arguments[1],l=0;if(o(t,(function(t){n?(n=!1,r=t):r=e(r,t,l),l++}),{IS_RECORD:!0}),n)throw new u("Reduce of empty iterator with no initial value");return r}})},37550:(e,t,n)=>{"use strict";var r=n(46518),o=n(72652),i=n(79306),a=n(28551),s=n(1767);r({target:"Iterator",proto:!0,real:!0},{some:function(e){a(this),i(e);var t=s(this),n=0;return o(t,(function(t,r){if(e(t,n++))return r()}),{IS_RECORD:!0,INTERRUPTED:!0}).stopped}})},71517:(e,t,n)=>{"use strict";var r=n(46518),o=n(13813),i=n(72248).remove;r({target:"Map",proto:!0,real:!0,forced:!0},{deleteAll:function(){for(var e,t=o(this),n=!0,r=0,a=arguments.length;r{"use strict";var r=n(46518),o=n(13813),i=n(72248),a=i.get,s=i.has,u=i.set;r({target:"Map",proto:!0,real:!0,forced:!0},{emplace:function(e,t){var n,r,i=o(this);return s(i,e)?(n=a(i,e),"update"in t&&(n=t.update(n,e,i),u(i,e,n)),n):(r=t.insert(e,i),u(i,e,r),r)}})},93777:(e,t,n)=>{"use strict";var r=n(46518),o=n(76080),i=n(13813),a=n(26223);r({target:"Map",proto:!0,real:!0,forced:!0},{every:function(e){var t=i(this),n=o(e,arguments.length>1?arguments[1]:void 0);return!1!==a(t,(function(e,r){if(!n(e,r,t))return!1}),!0)}})},14190:(e,t,n)=>{"use strict";var r=n(46518),o=n(76080),i=n(13813),a=n(72248),s=n(26223),u=a.Map,l=a.set;r({target:"Map",proto:!0,real:!0,forced:!0},{filter:function(e){var t=i(this),n=o(e,arguments.length>1?arguments[1]:void 0),r=new u;return s(t,(function(e,o){n(e,o,t)&&l(r,o,e)})),r}})},86097:(e,t,n)=>{"use strict";var r=n(46518),o=n(76080),i=n(13813),a=n(26223);r({target:"Map",proto:!0,real:!0,forced:!0},{findKey:function(e){var t=i(this),n=o(e,arguments.length>1?arguments[1]:void 0),r=a(t,(function(e,r){if(n(e,r,t))return{key:r}}),!0);return r&&r.key}})},12359:(e,t,n)=>{"use strict";var r=n(46518),o=n(76080),i=n(13813),a=n(26223);r({target:"Map",proto:!0,real:!0,forced:!0},{find:function(e){var t=i(this),n=o(e,arguments.length>1?arguments[1]:void 0),r=a(t,(function(e,r){if(n(e,r,t))return{value:e}}),!0);return r&&r.value}})},17273:(e,t,n)=>{"use strict";var r=n(46518),o=n(33317),i=n(13813),a=n(26223);r({target:"Map",proto:!0,real:!0,forced:!0},{includes:function(e){return!0===a(i(this),(function(t){if(o(t,e))return!0}),!0)}})},27415:(e,t,n)=>{"use strict";var r=n(46518),o=n(13813),i=n(26223);r({target:"Map",proto:!0,real:!0,forced:!0},{keyOf:function(e){var t=i(o(this),(function(t,n){if(t===e)return{key:n}}),!0);return t&&t.key}})},19929:(e,t,n)=>{"use strict";var r=n(46518),o=n(76080),i=n(13813),a=n(72248),s=n(26223),u=a.Map,l=a.set;r({target:"Map",proto:!0,real:!0,forced:!0},{mapKeys:function(e){var t=i(this),n=o(e,arguments.length>1?arguments[1]:void 0),r=new u;return s(t,(function(e,o){l(r,n(e,o,t),e)})),r}})},37583:(e,t,n)=>{"use strict";var r=n(46518),o=n(76080),i=n(13813),a=n(72248),s=n(26223),u=a.Map,l=a.set;r({target:"Map",proto:!0,real:!0,forced:!0},{mapValues:function(e){var t=i(this),n=o(e,arguments.length>1?arguments[1]:void 0),r=new u;return s(t,(function(e,o){l(r,o,n(e,o,t))})),r}})},55122:(e,t,n)=>{"use strict";var r=n(46518),o=n(13813),i=n(72652),a=n(72248).set;r({target:"Map",proto:!0,real:!0,arity:1,forced:!0},{merge:function(e){for(var t=o(this),n=arguments.length,r=0;r{"use strict";var r=n(46518),o=n(79306),i=n(13813),a=n(26223),s=TypeError;r({target:"Map",proto:!0,real:!0,forced:!0},{reduce:function(e){var t=i(this),n=arguments.length<2,r=n?void 0:arguments[1];if(o(e),a(t,(function(o,i){n?(n=!1,r=o):r=e(r,o,i,t)})),n)throw new s("Reduce of empty map with no initial value");return r}})},57268:(e,t,n)=>{"use strict";var r=n(46518),o=n(76080),i=n(13813),a=n(26223);r({target:"Map",proto:!0,real:!0,forced:!0},{some:function(e){var t=i(this),n=o(e,arguments.length>1?arguments[1]:void 0);return!0===a(t,(function(e,r){if(n(e,r,t))return!0}),!0)}})},79733:(e,t,n)=>{"use strict";var r=n(46518),o=n(79306),i=n(13813),a=n(72248),s=TypeError,u=a.get,l=a.has,c=a.set;r({target:"Map",proto:!0,real:!0,forced:!0},{update:function(e,t){var n=i(this),r=arguments.length;o(t);var a=l(n,e);if(!a&&r<3)throw new s("Updating absent value");var f=a?u(n,e):o(r>2?arguments[2]:void 0)(e,n);return c(n,e,t(f,e,n)),n}})},25509:(e,t,n)=>{"use strict";var r=n(46518),o=n(97080),i=n(94402).add;r({target:"Set",proto:!0,real:!0,forced:!0},{addAll:function(){for(var e=o(this),t=0,n=arguments.length;t{"use strict";var r=n(46518),o=n(97080),i=n(94402).remove;r({target:"Set",proto:!0,real:!0,forced:!0},{deleteAll:function(){for(var e,t=o(this),n=!0,r=0,a=arguments.length;r{"use strict";var r=n(46518),o=n(69565),i=n(47650),a=n(83440);r({target:"Set",proto:!0,real:!0,forced:!0},{difference:function(e){return o(a,this,i(e))}})},41927:(e,t,n)=>{"use strict";var r=n(46518),o=n(76080),i=n(97080),a=n(38469);r({target:"Set",proto:!0,real:!0,forced:!0},{every:function(e){var t=i(this),n=o(e,arguments.length>1?arguments[1]:void 0);return!1!==a(t,(function(e){if(!n(e,e,t))return!1}),!0)}})},11632:(e,t,n)=>{"use strict";var r=n(46518),o=n(76080),i=n(97080),a=n(94402),s=n(38469),u=a.Set,l=a.add;r({target:"Set",proto:!0,real:!0,forced:!0},{filter:function(e){var t=i(this),n=o(e,arguments.length>1?arguments[1]:void 0),r=new u;return s(t,(function(e){n(e,e,t)&&l(r,e)})),r}})},64377:(e,t,n)=>{"use strict";var r=n(46518),o=n(76080),i=n(97080),a=n(38469);r({target:"Set",proto:!0,real:!0,forced:!0},{find:function(e){var t=i(this),n=o(e,arguments.length>1?arguments[1]:void 0),r=a(t,(function(e){if(n(e,e,t))return{value:e}}),!0);return r&&r.value}})},66771:(e,t,n)=>{"use strict";var r=n(46518),o=n(69565),i=n(47650),a=n(68750);r({target:"Set",proto:!0,real:!0,forced:!0},{intersection:function(e){return o(a,this,i(e))}})},12516:(e,t,n)=>{"use strict";var r=n(46518),o=n(69565),i=n(47650),a=n(64449);r({target:"Set",proto:!0,real:!0,forced:!0},{isDisjointFrom:function(e){return o(a,this,i(e))}})},68931:(e,t,n)=>{"use strict";var r=n(46518),o=n(69565),i=n(47650),a=n(53838);r({target:"Set",proto:!0,real:!0,forced:!0},{isSubsetOf:function(e){return o(a,this,i(e))}})},52514:(e,t,n)=>{"use strict";var r=n(46518),o=n(69565),i=n(47650),a=n(28527);r({target:"Set",proto:!0,real:!0,forced:!0},{isSupersetOf:function(e){return o(a,this,i(e))}})},35694:(e,t,n)=>{"use strict";var r=n(46518),o=n(79504),i=n(97080),a=n(38469),s=n(655),u=o([].join),l=o([].push);r({target:"Set",proto:!0,real:!0,forced:!0},{join:function(e){var t=i(this),n=void 0===e?",":s(e),r=[];return a(t,(function(e){l(r,e)})),u(r,n)}})},52774:(e,t,n)=>{"use strict";var r=n(46518),o=n(76080),i=n(97080),a=n(94402),s=n(38469),u=a.Set,l=a.add;r({target:"Set",proto:!0,real:!0,forced:!0},{map:function(e){var t=i(this),n=o(e,arguments.length>1?arguments[1]:void 0),r=new u;return s(t,(function(e){l(r,n(e,e,t))})),r}})},49536:(e,t,n)=>{"use strict";var r=n(46518),o=n(79306),i=n(97080),a=n(38469),s=TypeError;r({target:"Set",proto:!0,real:!0,forced:!0},{reduce:function(e){var t=i(this),n=arguments.length<2,r=n?void 0:arguments[1];if(o(e),a(t,(function(o){n?(n=!1,r=o):r=e(r,o,o,t)})),n)throw new s("Reduce of empty set with no initial value");return r}})},21926:(e,t,n)=>{"use strict";var r=n(46518),o=n(76080),i=n(97080),a=n(38469);r({target:"Set",proto:!0,real:!0,forced:!0},{some:function(e){var t=i(this),n=o(e,arguments.length>1?arguments[1]:void 0);return!0===a(t,(function(e){if(n(e,e,t))return!0}),!0)}})},94483:(e,t,n)=>{"use strict";var r=n(46518),o=n(69565),i=n(47650),a=n(83650);r({target:"Set",proto:!0,real:!0,forced:!0},{symmetricDifference:function(e){return o(a,this,i(e))}})},16215:(e,t,n)=>{"use strict";var r=n(46518),o=n(69565),i=n(47650),a=n(44204);r({target:"Set",proto:!0,real:!0,forced:!0},{union:function(e){return o(a,this,i(e))}})},62953:(e,t,n)=>{"use strict";var r=n(24475),o=n(67400),i=n(79296),a=n(23792),s=n(66699),u=n(10687),l=n(78227)("iterator"),c=a.values,f=function(e,t){if(e){if(e[l]!==c)try{s(e,l,c)}catch(r){e[l]=c}if(u(e,t,!0),o[t])for(var n in a)if(e[n]!==a[n])try{s(e,n,a[n])}catch(r){e[n]=a[n]}}};for(var d in o)f(r[d]&&r[d].prototype,d);f(i,"DOMTokenList")},98406:(e,t,n)=>{"use strict";n(23792);var r=n(46518),o=n(24475),i=n(93389),a=n(69565),s=n(79504),u=n(43724),l=n(67416),c=n(36840),f=n(62106),d=n(56279),h=n(10687),p=n(33994),g=n(91181),v=n(90679),y=n(94901),m=n(39297),b=n(76080),_=n(36955),w=n(28551),x=n(20034),S=n(655),A=n(2360),E=n(6980),C=n(70081),R=n(50851),O=n(62529),T=n(22812),k=n(78227),M=n(74488),P=k("iterator"),I="URLSearchParams",N=I+"Iterator",D=g.set,L=g.getterFor(I),F=g.getterFor(N),j=i("fetch"),U=i("Request"),z=i("Headers"),B=U&&U.prototype,V=z&&z.prototype,$=o.RegExp,H=o.TypeError,W=o.decodeURIComponent,q=o.encodeURIComponent,G=s("".charAt),Y=s([].join),X=s([].push),K=s("".replace),Z=s([].shift),Q=s([].splice),J=s("".split),ee=s("".slice),te=/\+/g,ne=Array(4),re=function(e){return ne[e-1]||(ne[e-1]=$("((?:%[\\da-f]{2}){"+e+"})","gi"))},oe=function(e){try{return W(e)}catch(t){return e}},ie=function(e){var t=K(e,te," "),n=4;try{return W(t)}catch(r){for(;n;)t=K(t,re(n--),oe);return t}},ae=/[!'()~]|%20/g,se={"!":"%21","'":"%27","(":"%28",")":"%29","~":"%7E","%20":"+"},ue=function(e){return se[e]},le=function(e){return K(q(e),ae,ue)},ce=p((function(e,t){D(this,{type:N,target:L(e).entries,index:0,kind:t})}),I,(function(){var e=F(this),t=e.target,n=e.index++;if(!t||n>=t.length)return e.target=void 0,O(void 0,!0);var r=t[n];switch(e.kind){case"keys":return O(r.key,!1);case"values":return O(r.value,!1)}return O([r.key,r.value],!1)}),!0),fe=function(e){this.entries=[],this.url=null,void 0!==e&&(x(e)?this.parseObject(e):this.parseQuery("string"==typeof e?"?"===G(e,0)?ee(e,1):e:S(e)))};fe.prototype={type:I,bindURL:function(e){this.url=e,this.update()},parseObject:function(e){var t,n,r,o,i,s,u,l=this.entries,c=R(e);if(c)for(n=(t=C(e,c)).next;!(r=a(n,t)).done;){if(i=(o=C(w(r.value))).next,(s=a(i,o)).done||(u=a(i,o)).done||!a(i,o).done)throw new H("Expected sequence with length 2");X(l,{key:S(s.value),value:S(u.value)})}else for(var f in e)m(e,f)&&X(l,{key:f,value:S(e[f])})},parseQuery:function(e){if(e)for(var t,n,r=this.entries,o=J(e,"&"),i=0;i0?arguments[0]:void 0));u||(this.size=e.entries.length)},he=de.prototype;if(d(he,{append:function(e,t){var n=L(this);T(arguments.length,2),X(n.entries,{key:S(e),value:S(t)}),u||this.length++,n.updateURL()},delete:function(e){for(var t=L(this),n=T(arguments.length,1),r=t.entries,o=S(e),i=n<2?void 0:arguments[1],a=void 0===i?i:S(i),s=0;st.key?1:-1})),e.updateURL()},forEach:function(e){for(var t,n=L(this).entries,r=b(e,arguments.length>1?arguments[1]:void 0),o=0;o1?ve(arguments[1]):{})}}),y(U)){var ye=function(e){return v(this,B),new U(e,arguments.length>1?ve(arguments[1]):{})};B.constructor=ye,ye.prototype=B,r({global:!0,constructor:!0,dontCallGetSet:!0,forced:!0},{Request:ye})}}e.exports={URLSearchParams:de,getState:L}},48408:(e,t,n)=>{"use strict";n(98406)},81431:(e,t)=>{"use strict";function n(e,t,n){var r,o=n||{},i=o.noTrailing,a=void 0!==i&&i,s=o.noLeading,u=void 0!==s&&s,l=o.debounceMode,c=void 0===l?void 0:l,f=!1,d=0;function h(){r&&clearTimeout(r)}function p(){for(var n=arguments.length,o=new Array(n),i=0;ie?u?(d=Date.now(),a||(r=setTimeout(c?g:p,e))):p():!0!==a&&(r=setTimeout(c?g:p,void 0===c?e-l:e)))}return p.cancel=function(e){var t=(e||{}).upcomingOnly,n=void 0!==t&&t;h(),f=!n},p}Object.defineProperty(t,"__esModule",{value:!0}),t.debounce=function(e,t,r){var o=(r||{}).atBegin;return n(e,t,{debounceMode:!1!==(void 0!==o&&o)})},t.throttle=n},43145:(e,t,n)=>{"use strict";function r(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);nr})},9417:(e,t,n)=>{"use strict";function r(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}n.d(t,{A:()=>r})},23029:(e,t,n)=>{"use strict";function r(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}n.d(t,{A:()=>r})},92901:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});var r=n(20816);function o(e,t){for(var n=0;n{"use strict";n.d(t,{A:()=>s});var r=n(53954),o=n(52176),i=n(82284),a=n(9417);function s(e){var t=(0,o.A)();return function(){var n,o=(0,r.A)(e);if(t){var s=(0,r.A)(this).constructor;n=Reflect.construct(o,arguments,s)}else n=o.apply(this,arguments);return function(e,t){if(t&&("object"===(0,i.A)(t)||"function"===typeof t))return t;if(void 0!==t)throw new TypeError("Derived constructors may only return object or undefined");return(0,a.A)(e)}(this,n)}}},64467:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(20816);function o(e,t,n){return(t=(0,r.A)(t))in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}},58168:(e,t,n)=>{"use strict";function r(){return r=Object.assign?Object.assign.bind():function(e){for(var t=1;tr})},53954:(e,t,n)=>{"use strict";function r(e){return r=Object.setPrototypeOf?Object.getPrototypeOf.bind():function(e){return e.__proto__||Object.getPrototypeOf(e)},r(e)}n.d(t,{A:()=>r})},85501:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(63662);function o(e,t){if("function"!==typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),Object.defineProperty(e,"prototype",{writable:!1}),t&&(0,r.A)(e,t)}},77387:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(63662);function o(e,t){e.prototype=Object.create(t.prototype),e.prototype.constructor=e,(0,r.A)(e,t)}},52176:(e,t,n)=>{"use strict";function r(){try{var e=!Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){})))}catch(e){}return(r=function(){return!!e})()}n.d(t,{A:()=>r})},89379:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});var r=n(64467);function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t{"use strict";n.d(t,{A:()=>o});var r=n(98587);function o(e,t){if(null==e)return{};var n,o,i=(0,r.A)(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}},98587:(e,t,n)=>{"use strict";function r(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}n.d(t,{A:()=>r})},63662:(e,t,n)=>{"use strict";function r(e,t){return r=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(e,t){return e.__proto__=t,e},r(e,t)}n.d(t,{A:()=>r})},80296:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(27800);function o(e,t){return function(e){if(Array.isArray(e))return e}(e)||function(e,t){var n=null==e?null:"undefined"!=typeof Symbol&&e[Symbol.iterator]||e["@@iterator"];if(null!=n){var r,o,i,a,s=[],u=!0,l=!1;try{if(i=(n=n.call(e)).next,0===t){if(Object(n)!==n)return;u=!1}else for(;!(u=(r=i.call(n)).done)&&(s.push(r.value),s.length!==t);u=!0);}catch(e){l=!0,o=e}finally{try{if(!u&&null!=n.return&&(a=n.return(),Object(a)!==a))return}finally{if(l)throw o}}return s}}(e,t)||(0,r.A)(e,t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}},45458:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});var r=n(43145);var o=n(27800);function i(e){return function(e){if(Array.isArray(e))return(0,r.A)(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||(0,o.A)(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}},20816:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(82284);function o(e){var t=function(e,t){if("object"!=(0,r.A)(e)||!e)return e;var n=e[Symbol.toPrimitive];if(void 0!==n){var o=n.call(e,t||"default");if("object"!=(0,r.A)(o))return o;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"==(0,r.A)(t)?t:t+""}},82284:(e,t,n)=>{"use strict";function r(e){return r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},r(e)}n.d(t,{A:()=>r})},27800:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(43145);function o(e,t){if(e){if("string"===typeof e)return(0,r.A)(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);return"Object"===n&&e.constructor&&(n=e.constructor.name),"Map"===n||"Set"===n?Array.from(e):"Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)?(0,r.A)(e,t):void 0}}},64849:(e,t,n)=>{"use strict";n.d(t,{UU:()=>N});const r=e=>Array.isArray(e)?e:[e],o=(e={},t)=>({...e,filters:[...e.filters||[],...r(t)]});var i=n(20894);const a=e=>i.p.at("document.tags",r(e));var s=Object.defineProperty,u=(e,t,n)=>(((e,t,n)=>{t in e?s(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n})(e,"symbol"!==typeof t?t+"":t,n),n);class l extends Error{constructor(e="An invalid API response was returned",t,n){super(e),u(this,"url"),u(this,"response"),this.url=t,this.response=n}}const c=(e,t)=>{const n=e.find((e=>t(e)));if(!n)throw new l("Ref could not be found.",void 0,void 0);return n},f=e=>c(e,(e=>e.isMasterRef)),d=(e,t)=>c(e,(e=>e.id===t)),h=(e,t)=>c(e,(e=>e.label===t)),p=e=>e.replace(/%3B/g,";"),g=e=>i.p.any("document.tags",r(e)),v=e=>i.p.at("document.type",e);class y extends l{}class m extends l{}class b extends l{}class _ extends y{}class w extends y{}class x extends y{}class S extends m{}const A="Document",E="Media",C="Web",R=(e,...t)=>{if(!e)return null;const n="link_type"in e?e:(e=>{var t;return{link_type:A,id:e.id,uid:e.uid||void 0,type:e.type,tags:e.tags,lang:e.lang,url:null==e.url?void 0:e.url,slug:null==(t=e.slugs)?void 0:t[0],...e.data&&Object.keys(e.data).length>0?{data:e.data}:{}}})(e),[r]=t;let o;switch(o="function"===typeof r||null==r?{linkResolver:r}:{...r},n.link_type){case E:case C:return"url"in n?n.url:null;case A:if("id"in n&&o.linkResolver){const e=o.linkResolver(n);if(null!=e)return e}return"url"in n&&n.url?n.url:null;default:return null}},O={accessToken:"access_token"},T=e=>"string"===typeof e?e:"desc"===e.direction?`${e.field} desc`:e.field;var k=Object.defineProperty,M=(e,t,n)=>(((e,t,n)=>{t in e?k(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n})(e,"symbol"!==typeof t?t+"":t,n),n);var P,I;(I=P||(P={})).Master="Master",I.ReleaseID="ReleaseID",I.ReleaseLabel="ReleaseLabel",I.Manual="Manual";const N=(e,t)=>new D(e,t);class D{constructor(e,t={}){if(M(this,"endpoint"),M(this,"accessToken"),M(this,"routes"),M(this,"brokenRoute"),M(this,"fetchFn"),M(this,"fetchOptions"),M(this,"defaultParams"),M(this,"refState",{mode:P.Master,autoPreviewsEnabled:!0}),M(this,"cachedRepository"),M(this,"cachedRepositoryExpiration",0),M(this,"fetchJobs",{}),(e=>{try{return new URL(e),!0}catch{return!1}})(e)?this.endpoint=e:this.endpoint=(e=>{if(/^[a-zA-Z0-9][-a-zA-Z0-9]{2,}[a-zA-Z0-9]$/.test(e))return`https://${e}.cdn.prismic.io/api/v2`;throw new l(`An invalid Prismic repository name was given: ${e}`,void 0,void 0)})(e),this.accessToken=t.accessToken,this.routes=t.routes,this.brokenRoute=t.brokenRoute,this.fetchOptions=t.fetchOptions,this.defaultParams=t.defaultParams,t.ref&&this.queryContentFromRef(t.ref),"function"===typeof t.fetch)this.fetchFn=t.fetch;else{if("function"!==typeof globalThis.fetch)throw new l("A valid fetch implementation was not provided. In environments where fetch is not available (including Node.js), a fetch implementation must be provided via a polyfill or the `fetch` option.",void 0,void 0);this.fetchFn=globalThis.fetch}this.fetchFn===globalThis.fetch&&(this.fetchFn=this.fetchFn.bind(globalThis)),this.graphQLFetch=this.graphQLFetch.bind(this)}enableAutoPreviews(){this.refState.autoPreviewsEnabled=!0}enableAutoPreviewsFromReq(e){this.refState.httpRequest=e,this.refState.autoPreviewsEnabled=!0}disableAutoPreviews(){this.refState.autoPreviewsEnabled=!1}async get(e){const t=await this.buildQueryURL(e);return await this.fetch(t,e)}async getFirst(e){var t;const n={...e};e&&e.page||(null==e?void 0:e.pageSize)||(n.pageSize=(null==(t=this.defaultParams)?void 0:t.pageSize)??1);const r=await this.buildQueryURL(n),o=(await this.fetch(r,e)).results[0];if(o)return o;throw new m("No documents were returned",r,void 0)}async dangerouslyGetAll(e={}){var t;const{limit:n=1/0,...r}=e,o={...r,pageSize:Math.min(n,r.pageSize||(null==(t=this.defaultParams)?void 0:t.pageSize)||100)},i=[];let a;for(;(!a||a.next_page)&&i.lengthsetTimeout(e,500)))}return i.slice(0,n)}async getByID(e,t){return await this.getFirst(o(t,i.p.at("document.id",e)))}async getByIDs(e,t){return await this.get(o(t,i.p.in("document.id",e)))}async getAllByIDs(e,t){return await this.dangerouslyGetAll(o(t,i.p.in("document.id",e)))}async getByUID(e,t,n){return await this.getFirst(o(n,[v(e),i.p.at(`my.${e}.uid`,t)]))}async getByUIDs(e,t,n){return await this.get(o(n,[v(e),i.p.in(`my.${e}.uid`,t)]))}async getAllByUIDs(e,t,n){return await this.dangerouslyGetAll(o(n,[v(e),i.p.in(`my.${e}.uid`,t)]))}async getSingle(e,t){return await this.getFirst(o(t,v(e)))}async getByType(e,t){return await this.get(o(t,v(e)))}async getAllByType(e,t){return await this.dangerouslyGetAll(o(t,v(e)))}async getByTag(e,t){return await this.get(o(t,g(e)))}async getAllByTag(e,t){return await this.dangerouslyGetAll(o(t,g(e)))}async getByEveryTag(e,t){return await this.get(o(t,a(e)))}async getAllByEveryTag(e,t){return await this.dangerouslyGetAll(o(t,a(e)))}async getBySomeTags(e,t){return await this.get(o(t,g(e)))}async getAllBySomeTags(e,t){return await this.dangerouslyGetAll(o(t,g(e)))}async getRepository(e){const t=new URL(this.endpoint);return this.accessToken&&t.searchParams.set("access_token",this.accessToken),await this.fetch(t.toString(),e)}async getRefs(e){return(await this.getRepository(e)).refs}async getRefByID(e,t){const n=await this.getRefs(t);return d(n,e)}async getRefByLabel(e,t){const n=await this.getRefs(t);return h(n,e)}async getMasterRef(e){const t=await this.getRefs(e);return f(t)}async getReleases(e){return(await this.getRefs(e)).filter((e=>!e.isMasterRef))}async getReleaseByID(e,t){const n=await this.getReleases(t);return d(n,e)}async getReleaseByLabel(e,t){const n=await this.getReleases(t);return h(n,e)}async getTags(e){try{const t=await this.getCachedRepositoryForm("tags",e),n=new URL(t.action);return this.accessToken&&n.searchParams.set("access_token",this.accessToken),await this.fetch(n.toString(),e)}catch{return(await this.getRepository(e)).tags}}async buildQueryURL({signal:e,fetchOptions:t,...n}={}){const o=n.ref||await this.getResolvedRefString({signal:e,fetchOptions:t}),i=n.integrationFieldsRef||(await this.getCachedRepository({signal:e,fetchOptions:t})).integrationFieldsRef||void 0;return((e,t)=>{const{filters:n,predicates:o,...i}=t,a=new URL("documents/search",`${e}/`);if(n)for(const s of r(n))a.searchParams.append("q",`[${s}]`);if(o)for(const s of r(o))a.searchParams.append("q",`[${s}]`);for(const s in i){const e=O[s]||s;let t=i[s];if("orderings"===e){const n=i[e];null!=n&&(t=`[${r(n).map((e=>T(e))).join(",")}]`)}else"routes"===e&&"object"===typeof i[e]&&(t=JSON.stringify(r(i[e])));null!=t&&a.searchParams.set(e,r(t).join(","))}return a.toString()})(this.endpoint,{...this.defaultParams,...n,ref:o,integrationFieldsRef:i,routes:n.routes||this.routes,brokenRoute:n.brokenRoute||this.brokenRoute,accessToken:n.accessToken||this.accessToken})}async resolvePreviewURL(e){var t,n;let r=e.documentID,o=e.previewToken;if("undefined"!==typeof globalThis.location){const e=new URLSearchParams(globalThis.location.search);r=r||e.get("documentId"),o=o||e.get("token")}else if(this.refState.httpRequest)if("query"in this.refState.httpRequest)r=r||(null==(t=this.refState.httpRequest.query)?void 0:t.documentId),o=o||(null==(n=this.refState.httpRequest.query)?void 0:n.token);else if("url"in this.refState.httpRequest&&this.refState.httpRequest.url){const e=new URL(this.refState.httpRequest.url,"missing-host://").searchParams;r=r||e.get("documentId"),o=o||e.get("token")}if(null!=r&&null!=o){const t=await this.getByID(r,{ref:o,lang:"*",signal:e.signal,fetchOptions:e.fetchOptions}),n=R(t,{linkResolver:e.linkResolver});if("string"===typeof n)return n}return e.defaultURL}queryLatestContent(){this.refState.mode=P.Master}queryContentFromReleaseByID(e){this.refState={...this.refState,mode:P.ReleaseID,releaseID:e}}queryContentFromReleaseByLabel(e){this.refState={...this.refState,mode:P.ReleaseLabel,releaseLabel:e}}queryContentFromRef(e){this.refState={...this.refState,mode:P.Manual,ref:e}}async graphQLFetch(e,t){const n=await this.getCachedRepository(),r=await this.getResolvedRefString(),o={"Prismic-ref":r,Authorization:this.accessToken?`Token ${this.accessToken}`:"",...t?t.headers:{}};n.integrationFieldsRef&&(o["Prismic-integration-field-ref"]=n.integrationFieldsRef);const i={};for(const u in o)o[u]&&(i[u.toLowerCase()]=o[u]);const a=new URL(e);a.searchParams.set("ref",r);const s=a.searchParams.get("query");return s&&a.searchParams.set("query",(e=>e.replace(/(\n| )*( |{|})(\n| )*/gm,((e,t,n)=>n)))(s)),await this.fetchFn(a.toString(),{...t,headers:i})}async getCachedRepository(e){return(!this.cachedRepository||Date.now()>=this.cachedRepositoryExpiration)&&(this.cachedRepositoryExpiration=Date.now()+5e3,this.cachedRepository=await this.getRepository(e)),this.cachedRepository}async getCachedRepositoryForm(e,t){const n=(await this.getCachedRepository(t)).forms[e];if(!n)throw new l(`Form with name "${e}" could not be found`,void 0,void 0);return n}async getResolvedRefString(e){var t,n;if(this.refState.autoPreviewsEnabled){let e,r;if((null==(t=this.refState.httpRequest)?void 0:t.headers)?"get"in this.refState.httpRequest.headers&&"function"===typeof this.refState.httpRequest.headers.get?r=this.refState.httpRequest.headers.get("cookie"):"cookie"in this.refState.httpRequest.headers&&(r=this.refState.httpRequest.headers.cookie):(null==(n=globalThis.document)?void 0:n.cookie)&&(r=globalThis.document.cookie),r&&(e=(e=>{const t=e.split("; ");let n;for(const r of t){const e=r.split("=");if("io.prismic.preview"===p(e[0]).replace(/%3D/g,"=")){n=p(e.slice(1).join("="));break}}return n})(r)),e)return e}const r=await this.getCachedRepository(e),o=this.refState.mode;if(o===P.ReleaseID)return d(r.refs,this.refState.releaseID).ref;if(o===P.ReleaseLabel)return h(r.refs,this.refState.releaseLabel).ref;if(o===P.Manual){const e=await(i=this.refState.ref,"function"===typeof i?i:()=>i)();if("string"===typeof e)return e}var i;return f(r.refs).ref}async fetch(e,t={}){var n,r,o,i;const a={...this.fetchOptions,...t.fetchOptions,headers:{...null==(n=this.fetchOptions)?void 0:n.headers,...null==(r=t.fetchOptions)?void 0:r.headers},signal:(null==(o=t.fetchOptions)?void 0:o.signal)||t.signal||(null==(i=this.fetchOptions)?void 0:i.signal)};let s;this.fetchJobs[e]&&this.fetchJobs[e].has(a.signal)?s=this.fetchJobs[e].get(a.signal):(this.fetchJobs[e]=this.fetchJobs[e]||new Map,s=this.fetchFn(e,a).then((async e=>{let t;try{t=await e.json()}catch{}return{status:e.status,headers:e.headers,json:t}})).finally((()=>{this.fetchJobs[e].delete(a.signal),0===this.fetchJobs[e].size&&delete this.fetchJobs[e]})),this.fetchJobs[e].set(a.signal,s));const u=await s;if(404!==u.status&&null==u.json)throw new l(void 0,e,u.json);switch(u.status){case 200:return u.json;case 400:throw new b(u.json.message,e,u.json);case 401:case 403:throw new y(u.json.error||u.json.message,e,u.json);case 404:if(void 0===u.json)throw new S(`Prismic repository not found. Check that "${this.endpoint}" is pointing to the correct repository.`,e,void 0);if("api_notfound_error"===u.json.type)throw new x(u.json.message,e,u.json);if("api_security_error"===u.json.type&&/preview token.*expired/i.test(u.json.message))throw new _(u.json.message,e,u.json);throw new m(u.json.message,e,u.json);case 410:throw new w(u.json.message,e,u.json);case 429:{const n=Number(u.headers.get("retry-after")),r=Number.isNaN(n)?1e3:n;return await new Promise(((n,o)=>{setTimeout((async()=>{try{n(await this.fetch(e,t))}catch(r){o(r)}}),r)}))}}throw new l(void 0,e,u.json)}}},20894:(e,t,n)=>{"use strict";n.d(t,{p:()=>a});const r=e=>Array.isArray(e)?`[${e.map(r).join(", ")}]`:"string"===typeof e?`"${e}"`:e instanceof Date?`${e.getTime()}`:`${e}`,o=e=>(t,...n)=>{const o=n.map(r).join(", "),i=t&&n.length?", ":"";return`[${e}(${t}${i}${o})]`},i=e=>{const t=o(e);return e=>t(e)},a={at:o("at"),not:o("not"),any:o("any"),in:o("in"),fulltext:o("fulltext"),has:i("has"),missing:i("missing"),similar:(e=>{const t=o(e);return(...e)=>t("",...e)})("similar"),geopointNear:o("geopoint.near"),numberLessThan:o("number.lt"),numberGreaterThan:o("number.gt"),numberInRange:o("number.inRange"),dateAfter:o("date.after"),dateBefore:o("date.before"),dateBetween:o("date.between"),dateDayOfMonth:o("date.day-of-month"),dateDayOfMonthAfter:o("date.day-of-month-after"),dateDayOfMonthBefore:o("date.day-of-month-before"),dateDayOfWeek:o("date.day-of-week"),dateDayOfWeekAfter:o("date.day-of-week-after"),dateDayOfWeekBefore:o("date.day-of-week-before"),dateMonth:o("date.month"),dateMonthAfter:o("date.month-after"),dateMonthBefore:o("date.month-before"),dateYear:o("date.year"),dateHour:o("date.hour"),dateHourAfter:o("date.hour-after"),dateHourBefore:o("date.hour-before")}},1530:(e,t,n)=>{"use strict";n.d(t,{Kv:()=>i,N4:()=>a});var r=n(96540),o=n(33888);function i(e,t){return e?function(e){return"function"===typeof e&&(()=>{const t=Object.getPrototypeOf(e);return t.prototype&&t.prototype.isReactComponent})()}(n=e)||"function"===typeof n||function(e){return"object"===typeof e&&"symbol"===typeof e.$$typeof&&["react.memo","react.forward_ref"].includes(e.$$typeof.description)}(n)?r.createElement(e,t):e:null;var n}function a(e){const t={state:{},onStateChange:()=>{},renderFallbackValue:null,...e},[n]=r.useState((()=>({current:(0,o.ZR)(t)}))),[i,a]=r.useState((()=>n.current.initialState));return n.current.setOptions((t=>({...t,...e,state:{...i,...e.state},onStateChange:t=>{a(t),null==e.onStateChange||e.onStateChange(t)}}))),n.current}},33888:(e,t,n)=>{"use strict";function r(e,t){return"function"===typeof e?e(t):e}function o(e,t){return n=>{t.setState((t=>({...t,[e]:r(n,t[e])})))}}function i(e){return e instanceof Function}function a(e,t){const n=[],r=e=>{e.forEach((e=>{n.push(e);const o=t(e);null!=o&&o.length&&r(o)}))};return r(e),n}function s(e,t,n){let r,o=[];return i=>{let a;n.key&&n.debug&&(a=Date.now());const s=e(i);if(!(s.length!==o.length||s.some(((e,t)=>o[t]!==e))))return r;let u;if(o=s,n.key&&n.debug&&(u=Date.now()),r=t(...s),null==n||null==n.onChange||n.onChange(r),n.key&&n.debug&&null!=n&&n.debug()){const e=Math.round(100*(Date.now()-a))/100,t=Math.round(100*(Date.now()-u))/100,r=t/16,o=(e,t)=>{for(e=String(e);e.length{var n;return null!=(n=null==e?void 0:e.debugAll)?n:e[t]},key:!1,onChange:r}}n.d(t,{D0:()=>K,HT:()=>X,ZR:()=>Y,cU:()=>ee,h5:()=>ne,hM:()=>J,kW:()=>te});const l="debugHeaders";function c(e,t,n){var r;let o={id:null!=(r=n.id)?r:t.id,column:t,index:n.index,isPlaceholder:!!n.isPlaceholder,placeholderId:n.placeholderId,depth:n.depth,subHeaders:[],colSpan:0,rowSpan:0,headerGroup:null,getLeafHeaders:()=>{const e=[],t=n=>{n.subHeaders&&n.subHeaders.length&&n.subHeaders.map(t),e.push(n)};return t(o),e},getContext:()=>({table:e,header:o,column:t})};return e._features.forEach((t=>{null==t.createHeader||t.createHeader(o,e)})),o}const f={createTable:e=>{e.getHeaderGroups=s((()=>[e.getAllColumns(),e.getVisibleLeafColumns(),e.getState().columnPinning.left,e.getState().columnPinning.right]),((t,n,r,o)=>{var i,a;const s=null!=(i=null==r?void 0:r.map((e=>n.find((t=>t.id===e)))).filter(Boolean))?i:[],u=null!=(a=null==o?void 0:o.map((e=>n.find((t=>t.id===e)))).filter(Boolean))?a:[];return d(t,[...s,...n.filter((e=>!(null!=r&&r.includes(e.id))&&!(null!=o&&o.includes(e.id)))),...u],e)}),u(e.options,l)),e.getCenterHeaderGroups=s((()=>[e.getAllColumns(),e.getVisibleLeafColumns(),e.getState().columnPinning.left,e.getState().columnPinning.right]),((t,n,r,o)=>d(t,n=n.filter((e=>!(null!=r&&r.includes(e.id))&&!(null!=o&&o.includes(e.id)))),e,"center")),u(e.options,l)),e.getLeftHeaderGroups=s((()=>[e.getAllColumns(),e.getVisibleLeafColumns(),e.getState().columnPinning.left]),((t,n,r)=>{var o;return d(t,null!=(o=null==r?void 0:r.map((e=>n.find((t=>t.id===e)))).filter(Boolean))?o:[],e,"left")}),u(e.options,l)),e.getRightHeaderGroups=s((()=>[e.getAllColumns(),e.getVisibleLeafColumns(),e.getState().columnPinning.right]),((t,n,r)=>{var o;return d(t,null!=(o=null==r?void 0:r.map((e=>n.find((t=>t.id===e)))).filter(Boolean))?o:[],e,"right")}),u(e.options,l)),e.getFooterGroups=s((()=>[e.getHeaderGroups()]),(e=>[...e].reverse()),u(e.options,l)),e.getLeftFooterGroups=s((()=>[e.getLeftHeaderGroups()]),(e=>[...e].reverse()),u(e.options,l)),e.getCenterFooterGroups=s((()=>[e.getCenterHeaderGroups()]),(e=>[...e].reverse()),u(e.options,l)),e.getRightFooterGroups=s((()=>[e.getRightHeaderGroups()]),(e=>[...e].reverse()),u(e.options,l)),e.getFlatHeaders=s((()=>[e.getHeaderGroups()]),(e=>e.map((e=>e.headers)).flat()),u(e.options,l)),e.getLeftFlatHeaders=s((()=>[e.getLeftHeaderGroups()]),(e=>e.map((e=>e.headers)).flat()),u(e.options,l)),e.getCenterFlatHeaders=s((()=>[e.getCenterHeaderGroups()]),(e=>e.map((e=>e.headers)).flat()),u(e.options,l)),e.getRightFlatHeaders=s((()=>[e.getRightHeaderGroups()]),(e=>e.map((e=>e.headers)).flat()),u(e.options,l)),e.getCenterLeafHeaders=s((()=>[e.getCenterFlatHeaders()]),(e=>e.filter((e=>{var t;return!(null!=(t=e.subHeaders)&&t.length)}))),u(e.options,l)),e.getLeftLeafHeaders=s((()=>[e.getLeftFlatHeaders()]),(e=>e.filter((e=>{var t;return!(null!=(t=e.subHeaders)&&t.length)}))),u(e.options,l)),e.getRightLeafHeaders=s((()=>[e.getRightFlatHeaders()]),(e=>e.filter((e=>{var t;return!(null!=(t=e.subHeaders)&&t.length)}))),u(e.options,l)),e.getLeafHeaders=s((()=>[e.getLeftHeaderGroups(),e.getCenterHeaderGroups(),e.getRightHeaderGroups()]),((e,t,n)=>{var r,o,i,a,s,u;return[...null!=(r=null==(o=e[0])?void 0:o.headers)?r:[],...null!=(i=null==(a=t[0])?void 0:a.headers)?i:[],...null!=(s=null==(u=n[0])?void 0:u.headers)?s:[]].map((e=>e.getLeafHeaders())).flat()}),u(e.options,l))}};function d(e,t,n,r){var o,i;let a=0;const s=function(e,t){void 0===t&&(t=1),a=Math.max(a,t),e.filter((e=>e.getIsVisible())).forEach((e=>{var n;null!=(n=e.columns)&&n.length&&s(e.columns,t+1)}),0)};s(e);let u=[];const l=(e,t)=>{const o={depth:t,id:[r,`${t}`].filter(Boolean).join("_"),headers:[]},i=[];e.forEach((e=>{const a=[...i].reverse()[0];let s,u=!1;if(e.column.depth===o.depth&&e.column.parent?s=e.column.parent:(s=e.column,u=!0),a&&(null==a?void 0:a.column)===s)a.subHeaders.push(e);else{const o=c(n,s,{id:[r,t,s.id,null==e?void 0:e.id].filter(Boolean).join("_"),isPlaceholder:u,placeholderId:u?`${i.filter((e=>e.column===s)).length}`:void 0,depth:t,index:i.length});o.subHeaders.push(e),i.push(o)}o.headers.push(e),e.headerGroup=o})),u.push(o),t>0&&l(i,t-1)},f=t.map(((e,t)=>c(n,e,{depth:a,index:t})));l(f,a-1),u.reverse();const d=e=>e.filter((e=>e.column.getIsVisible())).map((e=>{let t=0,n=0,r=[0];e.subHeaders&&e.subHeaders.length?(r=[],d(e.subHeaders).forEach((e=>{let{colSpan:n,rowSpan:o}=e;t+=n,r.push(o)}))):t=1;return n+=Math.min(...r),e.colSpan=t,e.rowSpan=n,{colSpan:t,rowSpan:n}}));return d(null!=(o=null==(i=u[0])?void 0:i.headers)?o:[]),u}const h=(e,t,n,r,o,i,l)=>{let c={id:t,index:r,original:n,depth:o,parentId:l,_valuesCache:{},_uniqueValuesCache:{},getValue:t=>{if(c._valuesCache.hasOwnProperty(t))return c._valuesCache[t];const n=e.getColumn(t);return null!=n&&n.accessorFn?(c._valuesCache[t]=n.accessorFn(c.original,r),c._valuesCache[t]):void 0},getUniqueValues:t=>{if(c._uniqueValuesCache.hasOwnProperty(t))return c._uniqueValuesCache[t];const n=e.getColumn(t);return null!=n&&n.accessorFn?n.columnDef.getUniqueValues?(c._uniqueValuesCache[t]=n.columnDef.getUniqueValues(c.original,r),c._uniqueValuesCache[t]):(c._uniqueValuesCache[t]=[c.getValue(t)],c._uniqueValuesCache[t]):void 0},renderValue:t=>{var n;return null!=(n=c.getValue(t))?n:e.options.renderFallbackValue},subRows:null!=i?i:[],getLeafRows:()=>a(c.subRows,(e=>e.subRows)),getParentRow:()=>c.parentId?e.getRow(c.parentId,!0):void 0,getParentRows:()=>{let e=[],t=c;for(;;){const n=t.getParentRow();if(!n)break;e.push(n),t=n}return e.reverse()},getAllCells:s((()=>[e.getAllLeafColumns()]),(t=>t.map((t=>function(e,t,n,r){const o={id:`${t.id}_${n.id}`,row:t,column:n,getValue:()=>t.getValue(r),renderValue:()=>{var t;return null!=(t=o.getValue())?t:e.options.renderFallbackValue},getContext:s((()=>[e,n,t,o]),((e,t,n,r)=>({table:e,column:t,row:n,cell:r,getValue:r.getValue,renderValue:r.renderValue})),u(e.options,"debugCells"))};return e._features.forEach((r=>{null==r.createCell||r.createCell(o,n,t,e)}),{}),o}(e,c,t,t.id)))),u(e.options,"debugRows")),_getAllCellsByColumnId:s((()=>[c.getAllCells()]),(e=>e.reduce(((e,t)=>(e[t.column.id]=t,e)),{})),u(e.options,"debugRows"))};for(let a=0;a{e._getFacetedRowModel=t.options.getFacetedRowModel&&t.options.getFacetedRowModel(t,e.id),e.getFacetedRowModel=()=>e._getFacetedRowModel?e._getFacetedRowModel():t.getPreFilteredRowModel(),e._getFacetedUniqueValues=t.options.getFacetedUniqueValues&&t.options.getFacetedUniqueValues(t,e.id),e.getFacetedUniqueValues=()=>e._getFacetedUniqueValues?e._getFacetedUniqueValues():new Map,e._getFacetedMinMaxValues=t.options.getFacetedMinMaxValues&&t.options.getFacetedMinMaxValues(t,e.id),e.getFacetedMinMaxValues=()=>{if(e._getFacetedMinMaxValues)return e._getFacetedMinMaxValues()}}},g=(e,t,n)=>{var r;const o=n.toLowerCase();return Boolean(null==(r=e.getValue(t))||null==(r=r.toString())||null==(r=r.toLowerCase())?void 0:r.includes(o))};g.autoRemove=e=>E(e);const v=(e,t,n)=>{var r;return Boolean(null==(r=e.getValue(t))||null==(r=r.toString())?void 0:r.includes(n))};v.autoRemove=e=>E(e);const y=(e,t,n)=>{var r;return(null==(r=e.getValue(t))||null==(r=r.toString())?void 0:r.toLowerCase())===(null==n?void 0:n.toLowerCase())};y.autoRemove=e=>E(e);const m=(e,t,n)=>{var r;return null==(r=e.getValue(t))?void 0:r.includes(n)};m.autoRemove=e=>E(e)||!(null!=e&&e.length);const b=(e,t,n)=>!n.some((n=>{var r;return!(null!=(r=e.getValue(t))&&r.includes(n))}));b.autoRemove=e=>E(e)||!(null!=e&&e.length);const _=(e,t,n)=>n.some((n=>{var r;return null==(r=e.getValue(t))?void 0:r.includes(n)}));_.autoRemove=e=>E(e)||!(null!=e&&e.length);const w=(e,t,n)=>e.getValue(t)===n;w.autoRemove=e=>E(e);const x=(e,t,n)=>e.getValue(t)==n;x.autoRemove=e=>E(e);const S=(e,t,n)=>{let[r,o]=n;const i=e.getValue(t);return i>=r&&i<=o};S.resolveFilterValue=e=>{let[t,n]=e,r="number"!==typeof t?parseFloat(t):t,o="number"!==typeof n?parseFloat(n):n,i=null===t||Number.isNaN(r)?-1/0:r,a=null===n||Number.isNaN(o)?1/0:o;if(i>a){const e=i;i=a,a=e}return[i,a]},S.autoRemove=e=>E(e)||E(e[0])&&E(e[1]);const A={includesString:g,includesStringSensitive:v,equalsString:y,arrIncludes:m,arrIncludesAll:b,arrIncludesSome:_,equals:w,weakEquals:x,inNumberRange:S};function E(e){return void 0===e||null===e||""===e}const C={getDefaultColumnDef:()=>({filterFn:"auto"}),getInitialState:e=>({columnFilters:[],...e}),getDefaultOptions:e=>({onColumnFiltersChange:o("columnFilters",e),filterFromLeafRows:!1,maxLeafRowFilterDepth:100}),createColumn:(e,t)=>{e.getAutoFilterFn=()=>{const n=t.getCoreRowModel().flatRows[0],r=null==n?void 0:n.getValue(e.id);return"string"===typeof r?A.includesString:"number"===typeof r?A.inNumberRange:"boolean"===typeof r||null!==r&&"object"===typeof r?A.equals:Array.isArray(r)?A.arrIncludes:A.weakEquals},e.getFilterFn=()=>{var n,r;return i(e.columnDef.filterFn)?e.columnDef.filterFn:"auto"===e.columnDef.filterFn?e.getAutoFilterFn():null!=(n=null==(r=t.options.filterFns)?void 0:r[e.columnDef.filterFn])?n:A[e.columnDef.filterFn]},e.getCanFilter=()=>{var n,r,o;return(null==(n=e.columnDef.enableColumnFilter)||n)&&(null==(r=t.options.enableColumnFilters)||r)&&(null==(o=t.options.enableFilters)||o)&&!!e.accessorFn},e.getIsFiltered=()=>e.getFilterIndex()>-1,e.getFilterValue=()=>{var n;return null==(n=t.getState().columnFilters)||null==(n=n.find((t=>t.id===e.id)))?void 0:n.value},e.getFilterIndex=()=>{var n,r;return null!=(n=null==(r=t.getState().columnFilters)?void 0:r.findIndex((t=>t.id===e.id)))?n:-1},e.setFilterValue=n=>{t.setColumnFilters((t=>{const o=e.getFilterFn(),i=null==t?void 0:t.find((t=>t.id===e.id)),a=r(n,i?i.value:void 0);var s;if(R(o,a,e))return null!=(s=null==t?void 0:t.filter((t=>t.id!==e.id)))?s:[];const u={id:e.id,value:a};var l;return i?null!=(l=null==t?void 0:t.map((t=>t.id===e.id?u:t)))?l:[]:null!=t&&t.length?[...t,u]:[u]}))}},createRow:(e,t)=>{e.columnFilters={},e.columnFiltersMeta={}},createTable:e=>{e.setColumnFilters=t=>{const n=e.getAllLeafColumns();null==e.options.onColumnFiltersChange||e.options.onColumnFiltersChange((e=>{var o;return null==(o=r(t,e))?void 0:o.filter((e=>{const t=n.find((t=>t.id===e.id));if(t){if(R(t.getFilterFn(),e.value,t))return!1}return!0}))}))},e.resetColumnFilters=t=>{var n,r;e.setColumnFilters(t?[]:null!=(n=null==(r=e.initialState)?void 0:r.columnFilters)?n:[])},e.getPreFilteredRowModel=()=>e.getCoreRowModel(),e.getFilteredRowModel=()=>(!e._getFilteredRowModel&&e.options.getFilteredRowModel&&(e._getFilteredRowModel=e.options.getFilteredRowModel(e)),e.options.manualFiltering||!e._getFilteredRowModel?e.getPreFilteredRowModel():e._getFilteredRowModel())}};function R(e,t,n){return!(!e||!e.autoRemove)&&e.autoRemove(t,n)||"undefined"===typeof t||"string"===typeof t&&!t}const O={sum:(e,t,n)=>n.reduce(((t,n)=>{const r=n.getValue(e);return t+("number"===typeof r?r:0)}),0),min:(e,t,n)=>{let r;return n.forEach((t=>{const n=t.getValue(e);null!=n&&(r>n||void 0===r&&n>=n)&&(r=n)})),r},max:(e,t,n)=>{let r;return n.forEach((t=>{const n=t.getValue(e);null!=n&&(r=n)&&(r=n)})),r},extent:(e,t,n)=>{let r,o;return n.forEach((t=>{const n=t.getValue(e);null!=n&&(void 0===r?n>=n&&(r=o=n):(r>n&&(r=n),o{let n=0,r=0;if(t.forEach((t=>{let o=t.getValue(e);null!=o&&(o=+o)>=o&&(++n,r+=o)})),n)return r/n},median:(e,t)=>{if(!t.length)return;const n=t.map((t=>t.getValue(e)));if(r=n,!Array.isArray(r)||!r.every((e=>"number"===typeof e)))return;var r;if(1===n.length)return n[0];const o=Math.floor(n.length/2),i=n.sort(((e,t)=>e-t));return n.length%2!==0?i[o]:(i[o-1]+i[o])/2},unique:(e,t)=>Array.from(new Set(t.map((t=>t.getValue(e)))).values()),uniqueCount:(e,t)=>new Set(t.map((t=>t.getValue(e)))).size,count:(e,t)=>t.length},T={getDefaultColumnDef:()=>({aggregatedCell:e=>{var t,n;return null!=(t=null==(n=e.getValue())||null==n.toString?void 0:n.toString())?t:null},aggregationFn:"auto"}),getInitialState:e=>({grouping:[],...e}),getDefaultOptions:e=>({onGroupingChange:o("grouping",e),groupedColumnMode:"reorder"}),createColumn:(e,t)=>{e.toggleGrouping=()=>{t.setGrouping((t=>null!=t&&t.includes(e.id)?t.filter((t=>t!==e.id)):[...null!=t?t:[],e.id]))},e.getCanGroup=()=>{var n,r;return(null==(n=e.columnDef.enableGrouping)||n)&&(null==(r=t.options.enableGrouping)||r)&&(!!e.accessorFn||!!e.columnDef.getGroupingValue)},e.getIsGrouped=()=>{var n;return null==(n=t.getState().grouping)?void 0:n.includes(e.id)},e.getGroupedIndex=()=>{var n;return null==(n=t.getState().grouping)?void 0:n.indexOf(e.id)},e.getToggleGroupingHandler=()=>{const t=e.getCanGroup();return()=>{t&&e.toggleGrouping()}},e.getAutoAggregationFn=()=>{const n=t.getCoreRowModel().flatRows[0],r=null==n?void 0:n.getValue(e.id);return"number"===typeof r?O.sum:"[object Date]"===Object.prototype.toString.call(r)?O.extent:void 0},e.getAggregationFn=()=>{var n,r;if(!e)throw new Error;return i(e.columnDef.aggregationFn)?e.columnDef.aggregationFn:"auto"===e.columnDef.aggregationFn?e.getAutoAggregationFn():null!=(n=null==(r=t.options.aggregationFns)?void 0:r[e.columnDef.aggregationFn])?n:O[e.columnDef.aggregationFn]}},createTable:e=>{e.setGrouping=t=>null==e.options.onGroupingChange?void 0:e.options.onGroupingChange(t),e.resetGrouping=t=>{var n,r;e.setGrouping(t?[]:null!=(n=null==(r=e.initialState)?void 0:r.grouping)?n:[])},e.getPreGroupedRowModel=()=>e.getFilteredRowModel(),e.getGroupedRowModel=()=>(!e._getGroupedRowModel&&e.options.getGroupedRowModel&&(e._getGroupedRowModel=e.options.getGroupedRowModel(e)),e.options.manualGrouping||!e._getGroupedRowModel?e.getPreGroupedRowModel():e._getGroupedRowModel())},createRow:(e,t)=>{e.getIsGrouped=()=>!!e.groupingColumnId,e.getGroupingValue=n=>{if(e._groupingValuesCache.hasOwnProperty(n))return e._groupingValuesCache[n];const r=t.getColumn(n);return null!=r&&r.columnDef.getGroupingValue?(e._groupingValuesCache[n]=r.columnDef.getGroupingValue(e.original),e._groupingValuesCache[n]):e.getValue(n)},e._groupingValuesCache={}},createCell:(e,t,n,r)=>{e.getIsGrouped=()=>t.getIsGrouped()&&t.id===n.groupingColumnId,e.getIsPlaceholder=()=>!e.getIsGrouped()&&t.getIsGrouped(),e.getIsAggregated=()=>{var t;return!e.getIsGrouped()&&!e.getIsPlaceholder()&&!(null==(t=n.subRows)||!t.length)}}};const k={getInitialState:e=>({columnOrder:[],...e}),getDefaultOptions:e=>({onColumnOrderChange:o("columnOrder",e)}),createColumn:(e,t)=>{e.getIndex=s((e=>[L(t,e)]),(t=>t.findIndex((t=>t.id===e.id))),u(t.options,"debugColumns")),e.getIsFirstColumn=n=>{var r;return(null==(r=L(t,n)[0])?void 0:r.id)===e.id},e.getIsLastColumn=n=>{var r;const o=L(t,n);return(null==(r=o[o.length-1])?void 0:r.id)===e.id}},createTable:e=>{e.setColumnOrder=t=>null==e.options.onColumnOrderChange?void 0:e.options.onColumnOrderChange(t),e.resetColumnOrder=t=>{var n;e.setColumnOrder(t?[]:null!=(n=e.initialState.columnOrder)?n:[])},e._getOrderColumnsFn=s((()=>[e.getState().columnOrder,e.getState().grouping,e.options.groupedColumnMode]),((e,t,n)=>r=>{let o=[];if(null!=e&&e.length){const t=[...e],n=[...r];for(;n.length&&t.length;){const e=t.shift(),r=n.findIndex((t=>t.id===e));r>-1&&o.push(n.splice(r,1)[0])}o=[...o,...n]}else o=r;return function(e,t,n){if(null==t||!t.length||!n)return e;const r=e.filter((e=>!t.includes(e.id)));return"remove"===n?r:[...t.map((t=>e.find((e=>e.id===t)))).filter(Boolean),...r]}(o,t,n)}),u(e.options,"debugTable"))}},M={getInitialState:e=>({columnPinning:{left:[],right:[]},...e}),getDefaultOptions:e=>({onColumnPinningChange:o("columnPinning",e)}),createColumn:(e,t)=>{e.pin=n=>{const r=e.getLeafColumns().map((e=>e.id)).filter(Boolean);t.setColumnPinning((e=>{var t,o,i,a,s,u;return"right"===n?{left:(null!=(i=null==e?void 0:e.left)?i:[]).filter((e=>!(null!=r&&r.includes(e)))),right:[...(null!=(a=null==e?void 0:e.right)?a:[]).filter((e=>!(null!=r&&r.includes(e)))),...r]}:"left"===n?{left:[...(null!=(s=null==e?void 0:e.left)?s:[]).filter((e=>!(null!=r&&r.includes(e)))),...r],right:(null!=(u=null==e?void 0:e.right)?u:[]).filter((e=>!(null!=r&&r.includes(e))))}:{left:(null!=(t=null==e?void 0:e.left)?t:[]).filter((e=>!(null!=r&&r.includes(e)))),right:(null!=(o=null==e?void 0:e.right)?o:[]).filter((e=>!(null!=r&&r.includes(e))))}}))},e.getCanPin=()=>e.getLeafColumns().some((e=>{var n,r,o;return(null==(n=e.columnDef.enablePinning)||n)&&(null==(r=null!=(o=t.options.enableColumnPinning)?o:t.options.enablePinning)||r)})),e.getIsPinned=()=>{const n=e.getLeafColumns().map((e=>e.id)),{left:r,right:o}=t.getState().columnPinning,i=n.some((e=>null==r?void 0:r.includes(e))),a=n.some((e=>null==o?void 0:o.includes(e)));return i?"left":!!a&&"right"},e.getPinnedIndex=()=>{var n,r;const o=e.getIsPinned();return o?null!=(n=null==(r=t.getState().columnPinning)||null==(r=r[o])?void 0:r.indexOf(e.id))?n:-1:0}},createRow:(e,t)=>{e.getCenterVisibleCells=s((()=>[e._getAllVisibleCells(),t.getState().columnPinning.left,t.getState().columnPinning.right]),((e,t,n)=>{const r=[...null!=t?t:[],...null!=n?n:[]];return e.filter((e=>!r.includes(e.column.id)))}),u(t.options,"debugRows")),e.getLeftVisibleCells=s((()=>[e._getAllVisibleCells(),t.getState().columnPinning.left]),((e,t)=>(null!=t?t:[]).map((t=>e.find((e=>e.column.id===t)))).filter(Boolean).map((e=>({...e,position:"left"})))),u(t.options,"debugRows")),e.getRightVisibleCells=s((()=>[e._getAllVisibleCells(),t.getState().columnPinning.right]),((e,t)=>(null!=t?t:[]).map((t=>e.find((e=>e.column.id===t)))).filter(Boolean).map((e=>({...e,position:"right"})))),u(t.options,"debugRows"))},createTable:e=>{e.setColumnPinning=t=>null==e.options.onColumnPinningChange?void 0:e.options.onColumnPinningChange(t),e.resetColumnPinning=t=>{var n,r;return e.setColumnPinning(t?{left:[],right:[]}:null!=(n=null==(r=e.initialState)?void 0:r.columnPinning)?n:{left:[],right:[]})},e.getIsSomeColumnsPinned=t=>{var n;const r=e.getState().columnPinning;var o,i;return t?Boolean(null==(n=r[t])?void 0:n.length):Boolean((null==(o=r.left)?void 0:o.length)||(null==(i=r.right)?void 0:i.length))},e.getLeftLeafColumns=s((()=>[e.getAllLeafColumns(),e.getState().columnPinning.left]),((e,t)=>(null!=t?t:[]).map((t=>e.find((e=>e.id===t)))).filter(Boolean)),u(e.options,"debugColumns")),e.getRightLeafColumns=s((()=>[e.getAllLeafColumns(),e.getState().columnPinning.right]),((e,t)=>(null!=t?t:[]).map((t=>e.find((e=>e.id===t)))).filter(Boolean)),u(e.options,"debugColumns")),e.getCenterLeafColumns=s((()=>[e.getAllLeafColumns(),e.getState().columnPinning.left,e.getState().columnPinning.right]),((e,t,n)=>{const r=[...null!=t?t:[],...null!=n?n:[]];return e.filter((e=>!r.includes(e.id)))}),u(e.options,"debugColumns"))}},P={size:150,minSize:20,maxSize:Number.MAX_SAFE_INTEGER},I={getDefaultColumnDef:()=>P,getInitialState:e=>({columnSizing:{},columnSizingInfo:{startOffset:null,startSize:null,deltaOffset:null,deltaPercentage:null,isResizingColumn:!1,columnSizingStart:[]},...e}),getDefaultOptions:e=>({columnResizeMode:"onEnd",columnResizeDirection:"ltr",onColumnSizingChange:o("columnSizing",e),onColumnSizingInfoChange:o("columnSizingInfo",e)}),createColumn:(e,t)=>{e.getSize=()=>{var n,r,o;const i=t.getState().columnSizing[e.id];return Math.min(Math.max(null!=(n=e.columnDef.minSize)?n:P.minSize,null!=(r=null!=i?i:e.columnDef.size)?r:P.size),null!=(o=e.columnDef.maxSize)?o:P.maxSize)},e.getStart=s((e=>[e,L(t,e),t.getState().columnSizing]),((t,n)=>n.slice(0,e.getIndex(t)).reduce(((e,t)=>e+t.getSize()),0)),u(t.options,"debugColumns")),e.getAfter=s((e=>[e,L(t,e),t.getState().columnSizing]),((t,n)=>n.slice(e.getIndex(t)+1).reduce(((e,t)=>e+t.getSize()),0)),u(t.options,"debugColumns")),e.resetSize=()=>{t.setColumnSizing((t=>{let{[e.id]:n,...r}=t;return r}))},e.getCanResize=()=>{var n,r;return(null==(n=e.columnDef.enableResizing)||n)&&(null==(r=t.options.enableColumnResizing)||r)},e.getIsResizing=()=>t.getState().columnSizingInfo.isResizingColumn===e.id},createHeader:(e,t)=>{e.getSize=()=>{let t=0;const n=e=>{var r;e.subHeaders.length?e.subHeaders.forEach(n):t+=null!=(r=e.column.getSize())?r:0};return n(e),t},e.getStart=()=>{if(e.index>0){const t=e.headerGroup.headers[e.index-1];return t.getStart()+t.getSize()}return 0},e.getResizeHandler=n=>{const r=t.getColumn(e.column.id),o=null==r?void 0:r.getCanResize();return i=>{if(!r||!o)return;if(null==i.persist||i.persist(),D(i)&&i.touches&&i.touches.length>1)return;const a=e.getSize(),s=e?e.getLeafHeaders().map((e=>[e.column.id,e.column.getSize()])):[[r.id,r.getSize()]],u=D(i)?Math.round(i.touches[0].clientX):i.clientX,l={},c=(e,n)=>{"number"===typeof n&&(t.setColumnSizingInfo((e=>{var r,o;const i="rtl"===t.options.columnResizeDirection?-1:1,a=(n-(null!=(r=null==e?void 0:e.startOffset)?r:0))*i,s=Math.max(a/(null!=(o=null==e?void 0:e.startSize)?o:0),-.999999);return e.columnSizingStart.forEach((e=>{let[t,n]=e;l[t]=Math.round(100*Math.max(n+n*s,0))/100})),{...e,deltaOffset:a,deltaPercentage:s}})),"onChange"!==t.options.columnResizeMode&&"end"!==e||t.setColumnSizing((e=>({...e,...l}))))},f=e=>c("move",e),d=e=>{c("end",e),t.setColumnSizingInfo((e=>({...e,isResizingColumn:!1,startOffset:null,startSize:null,deltaOffset:null,deltaPercentage:null,columnSizingStart:[]})))},h=n||"undefined"!==typeof document?document:null,p={moveHandler:e=>f(e.clientX),upHandler:e=>{null==h||h.removeEventListener("mousemove",p.moveHandler),null==h||h.removeEventListener("mouseup",p.upHandler),d(e.clientX)}},g={moveHandler:e=>(e.cancelable&&(e.preventDefault(),e.stopPropagation()),f(e.touches[0].clientX),!1),upHandler:e=>{var t;null==h||h.removeEventListener("touchmove",g.moveHandler),null==h||h.removeEventListener("touchend",g.upHandler),e.cancelable&&(e.preventDefault(),e.stopPropagation()),d(null==(t=e.touches[0])?void 0:t.clientX)}},v=!!function(){if("boolean"===typeof N)return N;let e=!1;try{const t={get passive(){return e=!0,!1}},n=()=>{};window.addEventListener("test",n,t),window.removeEventListener("test",n)}catch(t){e=!1}return N=e,N}()&&{passive:!1};D(i)?(null==h||h.addEventListener("touchmove",g.moveHandler,v),null==h||h.addEventListener("touchend",g.upHandler,v)):(null==h||h.addEventListener("mousemove",p.moveHandler,v),null==h||h.addEventListener("mouseup",p.upHandler,v)),t.setColumnSizingInfo((e=>({...e,startOffset:u,startSize:a,deltaOffset:0,deltaPercentage:0,columnSizingStart:s,isResizingColumn:r.id})))}}},createTable:e=>{e.setColumnSizing=t=>null==e.options.onColumnSizingChange?void 0:e.options.onColumnSizingChange(t),e.setColumnSizingInfo=t=>null==e.options.onColumnSizingInfoChange?void 0:e.options.onColumnSizingInfoChange(t),e.resetColumnSizing=t=>{var n;e.setColumnSizing(t?{}:null!=(n=e.initialState.columnSizing)?n:{})},e.resetHeaderSizeInfo=t=>{var n;e.setColumnSizingInfo(t?{startOffset:null,startSize:null,deltaOffset:null,deltaPercentage:null,isResizingColumn:!1,columnSizingStart:[]}:null!=(n=e.initialState.columnSizingInfo)?n:{startOffset:null,startSize:null,deltaOffset:null,deltaPercentage:null,isResizingColumn:!1,columnSizingStart:[]})},e.getTotalSize=()=>{var t,n;return null!=(t=null==(n=e.getHeaderGroups()[0])?void 0:n.headers.reduce(((e,t)=>e+t.getSize()),0))?t:0},e.getLeftTotalSize=()=>{var t,n;return null!=(t=null==(n=e.getLeftHeaderGroups()[0])?void 0:n.headers.reduce(((e,t)=>e+t.getSize()),0))?t:0},e.getCenterTotalSize=()=>{var t,n;return null!=(t=null==(n=e.getCenterHeaderGroups()[0])?void 0:n.headers.reduce(((e,t)=>e+t.getSize()),0))?t:0},e.getRightTotalSize=()=>{var t,n;return null!=(t=null==(n=e.getRightHeaderGroups()[0])?void 0:n.headers.reduce(((e,t)=>e+t.getSize()),0))?t:0}}};let N=null;function D(e){return"touchstart"===e.type}function L(e,t){return t?"center"===t?e.getCenterVisibleLeafColumns():"left"===t?e.getLeftVisibleLeafColumns():e.getRightVisibleLeafColumns():e.getVisibleLeafColumns()}const F={getInitialState:e=>({rowSelection:{},...e}),getDefaultOptions:e=>({onRowSelectionChange:o("rowSelection",e),enableRowSelection:!0,enableMultiRowSelection:!0,enableSubRowSelection:!0}),createTable:e=>{e.setRowSelection=t=>null==e.options.onRowSelectionChange?void 0:e.options.onRowSelectionChange(t),e.resetRowSelection=t=>{var n;return e.setRowSelection(t?{}:null!=(n=e.initialState.rowSelection)?n:{})},e.toggleAllRowsSelected=t=>{e.setRowSelection((n=>{t="undefined"!==typeof t?t:!e.getIsAllRowsSelected();const r={...n},o=e.getPreGroupedRowModel().flatRows;return t?o.forEach((e=>{e.getCanSelect()&&(r[e.id]=!0)})):o.forEach((e=>{delete r[e.id]})),r}))},e.toggleAllPageRowsSelected=t=>e.setRowSelection((n=>{const r="undefined"!==typeof t?t:!e.getIsAllPageRowsSelected(),o={...n};return e.getRowModel().rows.forEach((t=>{j(o,t.id,r,!0,e)})),o})),e.getPreSelectedRowModel=()=>e.getCoreRowModel(),e.getSelectedRowModel=s((()=>[e.getState().rowSelection,e.getCoreRowModel()]),((t,n)=>Object.keys(t).length?U(e,n):{rows:[],flatRows:[],rowsById:{}}),u(e.options,"debugTable")),e.getFilteredSelectedRowModel=s((()=>[e.getState().rowSelection,e.getFilteredRowModel()]),((t,n)=>Object.keys(t).length?U(e,n):{rows:[],flatRows:[],rowsById:{}}),u(e.options,"debugTable")),e.getGroupedSelectedRowModel=s((()=>[e.getState().rowSelection,e.getSortedRowModel()]),((t,n)=>Object.keys(t).length?U(e,n):{rows:[],flatRows:[],rowsById:{}}),u(e.options,"debugTable")),e.getIsAllRowsSelected=()=>{const t=e.getFilteredRowModel().flatRows,{rowSelection:n}=e.getState();let r=Boolean(t.length&&Object.keys(n).length);return r&&t.some((e=>e.getCanSelect()&&!n[e.id]))&&(r=!1),r},e.getIsAllPageRowsSelected=()=>{const t=e.getPaginationRowModel().flatRows.filter((e=>e.getCanSelect())),{rowSelection:n}=e.getState();let r=!!t.length;return r&&t.some((e=>!n[e.id]))&&(r=!1),r},e.getIsSomeRowsSelected=()=>{var t;const n=Object.keys(null!=(t=e.getState().rowSelection)?t:{}).length;return n>0&&n{const t=e.getPaginationRowModel().flatRows;return!e.getIsAllPageRowsSelected()&&t.filter((e=>e.getCanSelect())).some((e=>e.getIsSelected()||e.getIsSomeSelected()))},e.getToggleAllRowsSelectedHandler=()=>t=>{e.toggleAllRowsSelected(t.target.checked)},e.getToggleAllPageRowsSelectedHandler=()=>t=>{e.toggleAllPageRowsSelected(t.target.checked)}},createRow:(e,t)=>{e.toggleSelected=(n,r)=>{const o=e.getIsSelected();t.setRowSelection((i=>{var a;if(n="undefined"!==typeof n?n:!o,e.getCanSelect()&&o===n)return i;const s={...i};return j(s,e.id,n,null==(a=null==r?void 0:r.selectChildren)||a,t),s}))},e.getIsSelected=()=>{const{rowSelection:n}=t.getState();return z(e,n)},e.getIsSomeSelected=()=>{const{rowSelection:n}=t.getState();return"some"===B(e,n)},e.getIsAllSubRowsSelected=()=>{const{rowSelection:n}=t.getState();return"all"===B(e,n)},e.getCanSelect=()=>{var n;return"function"===typeof t.options.enableRowSelection?t.options.enableRowSelection(e):null==(n=t.options.enableRowSelection)||n},e.getCanSelectSubRows=()=>{var n;return"function"===typeof t.options.enableSubRowSelection?t.options.enableSubRowSelection(e):null==(n=t.options.enableSubRowSelection)||n},e.getCanMultiSelect=()=>{var n;return"function"===typeof t.options.enableMultiRowSelection?t.options.enableMultiRowSelection(e):null==(n=t.options.enableMultiRowSelection)||n},e.getToggleSelectedHandler=()=>{const t=e.getCanSelect();return n=>{var r;t&&e.toggleSelected(null==(r=n.target)?void 0:r.checked)}}}},j=(e,t,n,r,o)=>{var i;const a=o.getRow(t,!0);n?(a.getCanMultiSelect()||Object.keys(e).forEach((t=>delete e[t])),a.getCanSelect()&&(e[t]=!0)):delete e[t],r&&null!=(i=a.subRows)&&i.length&&a.getCanSelectSubRows()&&a.subRows.forEach((t=>j(e,t.id,n,r,o)))};function U(e,t){const n=e.getState().rowSelection,r=[],o={},i=function(e,t){return e.map((e=>{var t;const a=z(e,n);if(a&&(r.push(e),o[e.id]=e),null!=(t=e.subRows)&&t.length&&(e={...e,subRows:i(e.subRows)}),a)return e})).filter(Boolean)};return{rows:i(t.rows),flatRows:r,rowsById:o}}function z(e,t){var n;return null!=(n=t[e.id])&&n}function B(e,t,n){var r;if(null==(r=e.subRows)||!r.length)return!1;let o=!0,i=!1;return e.subRows.forEach((e=>{if((!i||o)&&(e.getCanSelect()&&(z(e,t)?i=!0:o=!1),e.subRows&&e.subRows.length)){const n=B(e,t);"all"===n?i=!0:"some"===n?(i=!0,o=!1):o=!1}})),o?"all":!!i&&"some"}const V=/([0-9]+)/gm;function $(e,t){return e===t?0:e>t?1:-1}function H(e){return"number"===typeof e?isNaN(e)||e===1/0||e===-1/0?"":String(e):"string"===typeof e?e:""}function W(e,t){const n=e.split(V).filter(Boolean),r=t.split(V).filter(Boolean);for(;n.length&&r.length;){const e=n.shift(),t=r.shift(),o=parseInt(e,10),i=parseInt(t,10),a=[o,i].sort();if(isNaN(a[0])){if(e>t)return 1;if(t>e)return-1}else{if(isNaN(a[1]))return isNaN(o)?-1:1;if(o>i)return 1;if(i>o)return-1}}return n.length-r.length}const q={alphanumeric:(e,t,n)=>W(H(e.getValue(n)).toLowerCase(),H(t.getValue(n)).toLowerCase()),alphanumericCaseSensitive:(e,t,n)=>W(H(e.getValue(n)),H(t.getValue(n))),text:(e,t,n)=>$(H(e.getValue(n)).toLowerCase(),H(t.getValue(n)).toLowerCase()),textCaseSensitive:(e,t,n)=>$(H(e.getValue(n)),H(t.getValue(n))),datetime:(e,t,n)=>{const r=e.getValue(n),o=t.getValue(n);return r>o?1:r$(e.getValue(n),t.getValue(n))},G=[f,{getInitialState:e=>({columnVisibility:{},...e}),getDefaultOptions:e=>({onColumnVisibilityChange:o("columnVisibility",e)}),createColumn:(e,t)=>{e.toggleVisibility=n=>{e.getCanHide()&&t.setColumnVisibility((t=>({...t,[e.id]:null!=n?n:!e.getIsVisible()})))},e.getIsVisible=()=>{var n,r;const o=e.columns;return null==(n=o.length?o.some((e=>e.getIsVisible())):null==(r=t.getState().columnVisibility)?void 0:r[e.id])||n},e.getCanHide=()=>{var n,r;return(null==(n=e.columnDef.enableHiding)||n)&&(null==(r=t.options.enableHiding)||r)},e.getToggleVisibilityHandler=()=>t=>{null==e.toggleVisibility||e.toggleVisibility(t.target.checked)}},createRow:(e,t)=>{e._getAllVisibleCells=s((()=>[e.getAllCells(),t.getState().columnVisibility]),(e=>e.filter((e=>e.column.getIsVisible()))),u(t.options,"debugRows")),e.getVisibleCells=s((()=>[e.getLeftVisibleCells(),e.getCenterVisibleCells(),e.getRightVisibleCells()]),((e,t,n)=>[...e,...t,...n]),u(t.options,"debugRows"))},createTable:e=>{const t=(t,n)=>s((()=>[n(),n().filter((e=>e.getIsVisible())).map((e=>e.id)).join("_")]),(e=>e.filter((e=>null==e.getIsVisible?void 0:e.getIsVisible()))),u(e.options,"debugColumns"));e.getVisibleFlatColumns=t(0,(()=>e.getAllFlatColumns())),e.getVisibleLeafColumns=t(0,(()=>e.getAllLeafColumns())),e.getLeftVisibleLeafColumns=t(0,(()=>e.getLeftLeafColumns())),e.getRightVisibleLeafColumns=t(0,(()=>e.getRightLeafColumns())),e.getCenterVisibleLeafColumns=t(0,(()=>e.getCenterLeafColumns())),e.setColumnVisibility=t=>null==e.options.onColumnVisibilityChange?void 0:e.options.onColumnVisibilityChange(t),e.resetColumnVisibility=t=>{var n;e.setColumnVisibility(t?{}:null!=(n=e.initialState.columnVisibility)?n:{})},e.toggleAllColumnsVisible=t=>{var n;t=null!=(n=t)?n:!e.getIsAllColumnsVisible(),e.setColumnVisibility(e.getAllLeafColumns().reduce(((e,n)=>({...e,[n.id]:t||!(null!=n.getCanHide&&n.getCanHide())})),{}))},e.getIsAllColumnsVisible=()=>!e.getAllLeafColumns().some((e=>!(null!=e.getIsVisible&&e.getIsVisible()))),e.getIsSomeColumnsVisible=()=>e.getAllLeafColumns().some((e=>null==e.getIsVisible?void 0:e.getIsVisible())),e.getToggleAllColumnsVisibilityHandler=()=>t=>{var n;e.toggleAllColumnsVisible(null==(n=t.target)?void 0:n.checked)}}},k,M,p,C,{createTable:e=>{e._getGlobalFacetedRowModel=e.options.getFacetedRowModel&&e.options.getFacetedRowModel(e,"__global__"),e.getGlobalFacetedRowModel=()=>e.options.manualFiltering||!e._getGlobalFacetedRowModel?e.getPreFilteredRowModel():e._getGlobalFacetedRowModel(),e._getGlobalFacetedUniqueValues=e.options.getFacetedUniqueValues&&e.options.getFacetedUniqueValues(e,"__global__"),e.getGlobalFacetedUniqueValues=()=>e._getGlobalFacetedUniqueValues?e._getGlobalFacetedUniqueValues():new Map,e._getGlobalFacetedMinMaxValues=e.options.getFacetedMinMaxValues&&e.options.getFacetedMinMaxValues(e,"__global__"),e.getGlobalFacetedMinMaxValues=()=>{if(e._getGlobalFacetedMinMaxValues)return e._getGlobalFacetedMinMaxValues()}}},{getInitialState:e=>({globalFilter:void 0,...e}),getDefaultOptions:e=>({onGlobalFilterChange:o("globalFilter",e),globalFilterFn:"auto",getColumnCanGlobalFilter:t=>{var n;const r=null==(n=e.getCoreRowModel().flatRows[0])||null==(n=n._getAllCellsByColumnId()[t.id])?void 0:n.getValue();return"string"===typeof r||"number"===typeof r}}),createColumn:(e,t)=>{e.getCanGlobalFilter=()=>{var n,r,o,i;return(null==(n=e.columnDef.enableGlobalFilter)||n)&&(null==(r=t.options.enableGlobalFilter)||r)&&(null==(o=t.options.enableFilters)||o)&&(null==(i=null==t.options.getColumnCanGlobalFilter?void 0:t.options.getColumnCanGlobalFilter(e))||i)&&!!e.accessorFn}},createTable:e=>{e.getGlobalAutoFilterFn=()=>A.includesString,e.getGlobalFilterFn=()=>{var t,n;const{globalFilterFn:r}=e.options;return i(r)?r:"auto"===r?e.getGlobalAutoFilterFn():null!=(t=null==(n=e.options.filterFns)?void 0:n[r])?t:A[r]},e.setGlobalFilter=t=>{null==e.options.onGlobalFilterChange||e.options.onGlobalFilterChange(t)},e.resetGlobalFilter=t=>{e.setGlobalFilter(t?void 0:e.initialState.globalFilter)}}},{getInitialState:e=>({sorting:[],...e}),getDefaultColumnDef:()=>({sortingFn:"auto",sortUndefined:1}),getDefaultOptions:e=>({onSortingChange:o("sorting",e),isMultiSortEvent:e=>e.shiftKey}),createColumn:(e,t)=>{e.getAutoSortingFn=()=>{const n=t.getFilteredRowModel().flatRows.slice(10);let r=!1;for(const t of n){const n=null==t?void 0:t.getValue(e.id);if("[object Date]"===Object.prototype.toString.call(n))return q.datetime;if("string"===typeof n&&(r=!0,n.split(V).length>1))return q.alphanumeric}return r?q.text:q.basic},e.getAutoSortDir=()=>{const n=t.getFilteredRowModel().flatRows[0];return"string"===typeof(null==n?void 0:n.getValue(e.id))?"asc":"desc"},e.getSortingFn=()=>{var n,r;if(!e)throw new Error;return i(e.columnDef.sortingFn)?e.columnDef.sortingFn:"auto"===e.columnDef.sortingFn?e.getAutoSortingFn():null!=(n=null==(r=t.options.sortingFns)?void 0:r[e.columnDef.sortingFn])?n:q[e.columnDef.sortingFn]},e.toggleSorting=(n,r)=>{const o=e.getNextSortingOrder(),i="undefined"!==typeof n&&null!==n;t.setSorting((a=>{const s=null==a?void 0:a.find((t=>t.id===e.id)),u=null==a?void 0:a.findIndex((t=>t.id===e.id));let l,c=[],f=i?n:"desc"===o;var d;(l=null!=a&&a.length&&e.getCanMultiSort()&&r?s?"toggle":"add":null!=a&&a.length&&u!==a.length-1?"replace":s?"toggle":"replace","toggle"===l&&(i||o||(l="remove")),"add"===l)?(c=[...a,{id:e.id,desc:f}],c.splice(0,c.length-(null!=(d=t.options.maxMultiSortColCount)?d:Number.MAX_SAFE_INTEGER))):c="toggle"===l?a.map((t=>t.id===e.id?{...t,desc:f}:t)):"remove"===l?a.filter((t=>t.id!==e.id)):[{id:e.id,desc:f}];return c}))},e.getFirstSortDir=()=>{var n,r;return(null!=(n=null!=(r=e.columnDef.sortDescFirst)?r:t.options.sortDescFirst)?n:"desc"===e.getAutoSortDir())?"desc":"asc"},e.getNextSortingOrder=n=>{var r,o;const i=e.getFirstSortDir(),a=e.getIsSorted();return a?!!(a===i||null!=(r=t.options.enableSortingRemoval)&&!r||n&&null!=(o=t.options.enableMultiRemove)&&!o)&&("desc"===a?"asc":"desc"):i},e.getCanSort=()=>{var n,r;return(null==(n=e.columnDef.enableSorting)||n)&&(null==(r=t.options.enableSorting)||r)&&!!e.accessorFn},e.getCanMultiSort=()=>{var n,r;return null!=(n=null!=(r=e.columnDef.enableMultiSort)?r:t.options.enableMultiSort)?n:!!e.accessorFn},e.getIsSorted=()=>{var n;const r=null==(n=t.getState().sorting)?void 0:n.find((t=>t.id===e.id));return!!r&&(r.desc?"desc":"asc")},e.getSortIndex=()=>{var n,r;return null!=(n=null==(r=t.getState().sorting)?void 0:r.findIndex((t=>t.id===e.id)))?n:-1},e.clearSorting=()=>{t.setSorting((t=>null!=t&&t.length?t.filter((t=>t.id!==e.id)):[]))},e.getToggleSortingHandler=()=>{const n=e.getCanSort();return r=>{n&&(null==r.persist||r.persist(),null==e.toggleSorting||e.toggleSorting(void 0,!!e.getCanMultiSort()&&(null==t.options.isMultiSortEvent?void 0:t.options.isMultiSortEvent(r))))}}},createTable:e=>{e.setSorting=t=>null==e.options.onSortingChange?void 0:e.options.onSortingChange(t),e.resetSorting=t=>{var n,r;e.setSorting(t?[]:null!=(n=null==(r=e.initialState)?void 0:r.sorting)?n:[])},e.getPreSortedRowModel=()=>e.getGroupedRowModel(),e.getSortedRowModel=()=>(!e._getSortedRowModel&&e.options.getSortedRowModel&&(e._getSortedRowModel=e.options.getSortedRowModel(e)),e.options.manualSorting||!e._getSortedRowModel?e.getPreSortedRowModel():e._getSortedRowModel())}},T,{getInitialState:e=>({expanded:{},...e}),getDefaultOptions:e=>({onExpandedChange:o("expanded",e),paginateExpandedRows:!0}),createTable:e=>{let t=!1,n=!1;e._autoResetExpanded=()=>{var r,o;if(t){if(null!=(r=null!=(o=e.options.autoResetAll)?o:e.options.autoResetExpanded)?r:!e.options.manualExpanding){if(n)return;n=!0,e._queue((()=>{e.resetExpanded(),n=!1}))}}else e._queue((()=>{t=!0}))},e.setExpanded=t=>null==e.options.onExpandedChange?void 0:e.options.onExpandedChange(t),e.toggleAllRowsExpanded=t=>{(null!=t?t:!e.getIsAllRowsExpanded())?e.setExpanded(!0):e.setExpanded({})},e.resetExpanded=t=>{var n,r;e.setExpanded(t?{}:null!=(n=null==(r=e.initialState)?void 0:r.expanded)?n:{})},e.getCanSomeRowsExpand=()=>e.getPrePaginationRowModel().flatRows.some((e=>e.getCanExpand())),e.getToggleAllRowsExpandedHandler=()=>t=>{null==t.persist||t.persist(),e.toggleAllRowsExpanded()},e.getIsSomeRowsExpanded=()=>{const t=e.getState().expanded;return!0===t||Object.values(t).some(Boolean)},e.getIsAllRowsExpanded=()=>{const t=e.getState().expanded;return"boolean"===typeof t?!0===t:!!Object.keys(t).length&&!e.getRowModel().flatRows.some((e=>!e.getIsExpanded()))},e.getExpandedDepth=()=>{let t=0;return(!0===e.getState().expanded?Object.keys(e.getRowModel().rowsById):Object.keys(e.getState().expanded)).forEach((e=>{const n=e.split(".");t=Math.max(t,n.length)})),t},e.getPreExpandedRowModel=()=>e.getSortedRowModel(),e.getExpandedRowModel=()=>(!e._getExpandedRowModel&&e.options.getExpandedRowModel&&(e._getExpandedRowModel=e.options.getExpandedRowModel(e)),e.options.manualExpanding||!e._getExpandedRowModel?e.getPreExpandedRowModel():e._getExpandedRowModel())},createRow:(e,t)=>{e.toggleExpanded=n=>{t.setExpanded((r=>{var o;const i=!0===r||!(null==r||!r[e.id]);let a={};if(!0===r?Object.keys(t.getRowModel().rowsById).forEach((e=>{a[e]=!0})):a=r,n=null!=(o=n)?o:!i,!i&&n)return{...a,[e.id]:!0};if(i&&!n){const{[e.id]:t,...n}=a;return n}return r}))},e.getIsExpanded=()=>{var n;const r=t.getState().expanded;return!!(null!=(n=null==t.options.getIsRowExpanded?void 0:t.options.getIsRowExpanded(e))?n:!0===r||(null==r?void 0:r[e.id]))},e.getCanExpand=()=>{var n,r,o;return null!=(n=null==t.options.getRowCanExpand?void 0:t.options.getRowCanExpand(e))?n:(null==(r=t.options.enableExpanding)||r)&&!(null==(o=e.subRows)||!o.length)},e.getIsAllParentsExpanded=()=>{let n=!0,r=e;for(;n&&r.parentId;)r=t.getRow(r.parentId,!0),n=r.getIsExpanded();return n},e.getToggleExpandedHandler=()=>{const t=e.getCanExpand();return()=>{t&&e.toggleExpanded()}}}},{getInitialState:e=>({...e,pagination:{pageIndex:0,pageSize:10,...null==e?void 0:e.pagination}}),getDefaultOptions:e=>({onPaginationChange:o("pagination",e)}),createTable:e=>{let t=!1,n=!1;e._autoResetPageIndex=()=>{var r,o;if(t){if(null!=(r=null!=(o=e.options.autoResetAll)?o:e.options.autoResetPageIndex)?r:!e.options.manualPagination){if(n)return;n=!0,e._queue((()=>{e.resetPageIndex(),n=!1}))}}else e._queue((()=>{t=!0}))},e.setPagination=t=>null==e.options.onPaginationChange?void 0:e.options.onPaginationChange((e=>r(t,e))),e.resetPagination=t=>{var n;e.setPagination(t?{pageIndex:0,pageSize:10}:null!=(n=e.initialState.pagination)?n:{pageIndex:0,pageSize:10})},e.setPageIndex=t=>{e.setPagination((n=>{let o=r(t,n.pageIndex);const i="undefined"===typeof e.options.pageCount||-1===e.options.pageCount?Number.MAX_SAFE_INTEGER:e.options.pageCount-1;return o=Math.max(0,Math.min(o,i)),{...n,pageIndex:o}}))},e.resetPageIndex=t=>{var n,r;e.setPageIndex(t?0:null!=(n=null==(r=e.initialState)||null==(r=r.pagination)?void 0:r.pageIndex)?n:0)},e.resetPageSize=t=>{var n,r;e.setPageSize(t?10:null!=(n=null==(r=e.initialState)||null==(r=r.pagination)?void 0:r.pageSize)?n:10)},e.setPageSize=t=>{e.setPagination((e=>{const n=Math.max(1,r(t,e.pageSize)),o=e.pageSize*e.pageIndex,i=Math.floor(o/n);return{...e,pageIndex:i,pageSize:n}}))},e.setPageCount=t=>e.setPagination((n=>{var o;let i=r(t,null!=(o=e.options.pageCount)?o:-1);return"number"===typeof i&&(i=Math.max(-1,i)),{...n,pageCount:i}})),e.getPageOptions=s((()=>[e.getPageCount()]),(e=>{let t=[];return e&&e>0&&(t=[...new Array(e)].fill(null).map(((e,t)=>t))),t}),u(e.options,"debugTable")),e.getCanPreviousPage=()=>e.getState().pagination.pageIndex>0,e.getCanNextPage=()=>{const{pageIndex:t}=e.getState().pagination,n=e.getPageCount();return-1===n||0!==n&&te.setPageIndex((e=>e-1)),e.nextPage=()=>e.setPageIndex((e=>e+1)),e.firstPage=()=>e.setPageIndex(0),e.lastPage=()=>e.setPageIndex(e.getPageCount()-1),e.getPrePaginationRowModel=()=>e.getExpandedRowModel(),e.getPaginationRowModel=()=>(!e._getPaginationRowModel&&e.options.getPaginationRowModel&&(e._getPaginationRowModel=e.options.getPaginationRowModel(e)),e.options.manualPagination||!e._getPaginationRowModel?e.getPrePaginationRowModel():e._getPaginationRowModel()),e.getPageCount=()=>{var t;return null!=(t=e.options.pageCount)?t:Math.ceil(e.getRowCount()/e.getState().pagination.pageSize)},e.getRowCount=()=>{var t;return null!=(t=e.options.rowCount)?t:e.getPrePaginationRowModel().rows.length}}},{getInitialState:e=>({rowPinning:{top:[],bottom:[]},...e}),getDefaultOptions:e=>({onRowPinningChange:o("rowPinning",e)}),createRow:(e,t)=>{e.pin=(n,r,o)=>{const i=r?e.getLeafRows().map((e=>{let{id:t}=e;return t})):[],a=o?e.getParentRows().map((e=>{let{id:t}=e;return t})):[],s=new Set([...a,e.id,...i]);t.setRowPinning((e=>{var t,r,o,i,a,u;return"bottom"===n?{top:(null!=(o=null==e?void 0:e.top)?o:[]).filter((e=>!(null!=s&&s.has(e)))),bottom:[...(null!=(i=null==e?void 0:e.bottom)?i:[]).filter((e=>!(null!=s&&s.has(e)))),...Array.from(s)]}:"top"===n?{top:[...(null!=(a=null==e?void 0:e.top)?a:[]).filter((e=>!(null!=s&&s.has(e)))),...Array.from(s)],bottom:(null!=(u=null==e?void 0:e.bottom)?u:[]).filter((e=>!(null!=s&&s.has(e))))}:{top:(null!=(t=null==e?void 0:e.top)?t:[]).filter((e=>!(null!=s&&s.has(e)))),bottom:(null!=(r=null==e?void 0:e.bottom)?r:[]).filter((e=>!(null!=s&&s.has(e))))}}))},e.getCanPin=()=>{var n;const{enableRowPinning:r,enablePinning:o}=t.options;return"function"===typeof r?r(e):null==(n=null!=r?r:o)||n},e.getIsPinned=()=>{const n=[e.id],{top:r,bottom:o}=t.getState().rowPinning,i=n.some((e=>null==r?void 0:r.includes(e))),a=n.some((e=>null==o?void 0:o.includes(e)));return i?"top":!!a&&"bottom"},e.getPinnedIndex=()=>{var n,r;const o=e.getIsPinned();if(!o)return-1;const i=null==(n=t._getPinnedRows(o))?void 0:n.map((e=>{let{id:t}=e;return t}));return null!=(r=null==i?void 0:i.indexOf(e.id))?r:-1}},createTable:e=>{e.setRowPinning=t=>null==e.options.onRowPinningChange?void 0:e.options.onRowPinningChange(t),e.resetRowPinning=t=>{var n,r;return e.setRowPinning(t?{top:[],bottom:[]}:null!=(n=null==(r=e.initialState)?void 0:r.rowPinning)?n:{top:[],bottom:[]})},e.getIsSomeRowsPinned=t=>{var n;const r=e.getState().rowPinning;var o,i;return t?Boolean(null==(n=r[t])?void 0:n.length):Boolean((null==(o=r.top)?void 0:o.length)||(null==(i=r.bottom)?void 0:i.length))},e._getPinnedRows=s((t=>[e.getRowModel().rows,e.getState().rowPinning[t],t]),((t,n,r)=>{var o;return(null==(o=e.options.keepPinnedRows)||o?(null!=n?n:[]).map((t=>{const n=e.getRow(t,!0);return n.getIsAllParentsExpanded()?n:null})):(null!=n?n:[]).map((e=>t.find((t=>t.id===e))))).filter(Boolean).map((e=>({...e,position:r})))}),u(e.options,"debugRows")),e.getTopRows=()=>e._getPinnedRows("top"),e.getBottomRows=()=>e._getPinnedRows("bottom"),e.getCenterRows=s((()=>[e.getRowModel().rows,e.getState().rowPinning.top,e.getState().rowPinning.bottom]),((e,t,n)=>{const r=new Set([...null!=t?t:[],...null!=n?n:[]]);return e.filter((e=>!r.has(e.id)))}),u(e.options,"debugRows"))}},F,I];function Y(e){var t,n;const o=[...G,...null!=(t=e._features)?t:[]];let i={_features:o};const a=i._features.reduce(((e,t)=>Object.assign(e,null==t.getDefaultOptions?void 0:t.getDefaultOptions(i))),{});let l={...null!=(n=e.initialState)?n:{}};i._features.forEach((e=>{var t;l=null!=(t=null==e.getInitialState?void 0:e.getInitialState(l))?t:l}));const c=[];let f=!1;const d={_features:o,options:{...a,...e},initialState:l,_queue:e=>{c.push(e),f||(f=!0,Promise.resolve().then((()=>{for(;c.length;)c.shift()();f=!1})).catch((e=>setTimeout((()=>{throw e})))))},reset:()=>{i.setState(i.initialState)},setOptions:e=>{const t=r(e,i.options);i.options=(e=>i.options.mergeOptions?i.options.mergeOptions(a,e):{...a,...e})(t)},getState:()=>i.options.state,setState:e=>{null==i.options.onStateChange||i.options.onStateChange(e)},_getRowId:(e,t,n)=>{var r;return null!=(r=null==i.options.getRowId?void 0:i.options.getRowId(e,t,n))?r:`${n?[n.id,t].join("."):t}`},getCoreRowModel:()=>(i._getCoreRowModel||(i._getCoreRowModel=i.options.getCoreRowModel(i)),i._getCoreRowModel()),getRowModel:()=>i.getPaginationRowModel(),getRow:(e,t)=>{let n=(t?i.getPrePaginationRowModel():i.getRowModel()).rowsById[e];if(!n&&(n=i.getCoreRowModel().rowsById[e],!n))throw new Error;return n},_getDefaultColumnDef:s((()=>[i.options.defaultColumn]),(e=>{var t;return e=null!=(t=e)?t:{},{header:e=>{const t=e.header.column.columnDef;return t.accessorKey?t.accessorKey:t.accessorFn?t.id:null},cell:e=>{var t,n;return null!=(t=null==(n=e.renderValue())||null==n.toString?void 0:n.toString())?t:null},...i._features.reduce(((e,t)=>Object.assign(e,null==t.getDefaultColumnDef?void 0:t.getDefaultColumnDef())),{}),...e}}),u(e,"debugColumns")),_getColumnDefs:()=>i.options.columns,getAllColumns:s((()=>[i._getColumnDefs()]),(e=>{const t=function(e,n,r){return void 0===r&&(r=0),e.map((e=>{const o=function(e,t,n,r){var o,i;const a={...e._getDefaultColumnDef(),...t},l=a.accessorKey;let c,f=null!=(o=null!=(i=a.id)?i:l?l.replace(".","_"):void 0)?o:"string"===typeof a.header?a.header:void 0;if(a.accessorFn?c=a.accessorFn:l&&(c=l.includes(".")?e=>{let t=e;for(const r of l.split(".")){var n;t=null==(n=t)?void 0:n[r]}return t}:e=>e[a.accessorKey]),!f)throw new Error;let d={id:`${String(f)}`,accessorFn:c,parent:r,depth:n,columnDef:a,columns:[],getFlatColumns:s((()=>[!0]),(()=>{var e;return[d,...null==(e=d.columns)?void 0:e.flatMap((e=>e.getFlatColumns()))]}),u(e.options,"debugColumns")),getLeafColumns:s((()=>[e._getOrderColumnsFn()]),(e=>{var t;if(null!=(t=d.columns)&&t.length){let t=d.columns.flatMap((e=>e.getLeafColumns()));return e(t)}return[d]}),u(e.options,"debugColumns"))};for(const s of e._features)null==s.createColumn||s.createColumn(d,e);return d}(i,e,r,n),a=e;return o.columns=a.columns?t(a.columns,o,r+1):[],o}))};return t(e)}),u(e,"debugColumns")),getAllFlatColumns:s((()=>[i.getAllColumns()]),(e=>e.flatMap((e=>e.getFlatColumns()))),u(e,"debugColumns")),_getAllFlatColumnsById:s((()=>[i.getAllFlatColumns()]),(e=>e.reduce(((e,t)=>(e[t.id]=t,e)),{})),u(e,"debugColumns")),getAllLeafColumns:s((()=>[i.getAllColumns(),i._getOrderColumnsFn()]),((e,t)=>t(e.flatMap((e=>e.getLeafColumns())))),u(e,"debugColumns")),getColumn:e=>i._getAllFlatColumnsById()[e]};Object.assign(i,d);for(let r=0;rs((()=>[e.options.data]),(t=>{const n={rows:[],flatRows:[],rowsById:{}},r=function(t,o,i){void 0===o&&(o=0);const a=[];for(let u=0;ue._autoResetPageIndex())))}function K(){return e=>s((()=>[e.getState().expanded,e.getPreExpandedRowModel(),e.options.paginateExpandedRows]),((e,t,n)=>!t.rows.length||!0!==e&&!Object.keys(null!=e?e:{}).length?t:n?Z(t):t),u(e.options,"debugTable"))}function Z(e){const t=[],n=e=>{var r;t.push(e),null!=(r=e.subRows)&&r.length&&e.getIsExpanded()&&e.subRows.forEach(n)};return e.rows.forEach(n),{rows:t,flatRows:e.flatRows,rowsById:e.rowsById}}function Q(e,t,n){return n.options.filterFromLeafRows?function(e,t,n){var r;const o=[],i={},a=null!=(r=n.options.maxLeafRowFilterDepth)?r:100,s=function(e,r){void 0===r&&(r=0);const u=[];for(let c=0;cs((()=>[e.getPreFilteredRowModel(),e.getState().columnFilters,e.getState().globalFilter]),((t,n,r)=>{if(!t.rows.length||(null==n||!n.length)&&!r){for(let e=0;e{var n;const r=e.getColumn(t.id);if(!r)return;const i=r.getFilterFn();i&&o.push({id:t.id,filterFn:i,resolvedValue:null!=(n=null==i.resolveFilterValue?void 0:i.resolveFilterValue(t.value))?n:t.value})}));const a=n.map((e=>e.id)),s=e.getGlobalFilterFn(),u=e.getAllLeafColumns().filter((e=>e.getCanGlobalFilter()));let l,c;r&&s&&u.length&&(a.push("__global__"),u.forEach((e=>{var t;i.push({id:e.id,filterFn:s,resolvedValue:null!=(t=null==s.resolveFilterValue?void 0:s.resolveFilterValue(r))?t:r})})));for(let e=0;e{n.columnFiltersMeta[t]=e}))}if(i.length){for(let e=0;e{n.columnFiltersMeta[t]=e}))){n.columnFilters.__global__=!0;break}}!0!==n.columnFilters.__global__&&(n.columnFilters.__global__=!1)}}return Q(t.rows,(e=>{for(let t=0;te._autoResetPageIndex())))}function ee(){return e=>s((()=>[e.getState().grouping,e.getPreGroupedRowModel()]),((t,n)=>{if(!n.rows.length||!t.length)return n;const r=t.filter((t=>e.getColumn(t))),o=[],i={},s=function(t,n,u){if(void 0===n&&(n=0),n>=r.length)return t.map((e=>(e.depth=n,o.push(e),i[e.id]=e,e.subRows&&(e.subRows=s(e.subRows,n+1,e.id)),e)));const l=r[n],c=function(e,t){const n=new Map;return e.reduce(((e,n)=>{const r=`${n.getGroupingValue(t)}`,o=e.get(r);return o?o.push(n):e.set(r,[n]),e}),n)}(t,l),f=Array.from(c.entries()).map(((t,c)=>{let[f,d]=t,p=`${l}:${f}`;p=u?`${u}>${p}`:p;const g=s(d,n+1,p),v=n?a(d,(e=>e.subRows)):d,y=h(e,p,v[0].original,c,n,void 0,u);return Object.assign(y,{groupingColumnId:l,groupingValue:f,subRows:g,leafRows:v,getValue:t=>{if(r.includes(t)){if(y._valuesCache.hasOwnProperty(t))return y._valuesCache[t];var n;if(d[0])y._valuesCache[t]=null!=(n=d[0].getValue(t))?n:void 0;return y._valuesCache[t]}if(y._groupingValuesCache.hasOwnProperty(t))return y._groupingValuesCache[t];const o=e.getColumn(t),i=null==o?void 0:o.getAggregationFn();return i?(y._groupingValuesCache[t]=i(t,v,d),y._groupingValuesCache[t]):void 0}}),g.forEach((e=>{o.push(e),i[e.id]=e})),y}));return f},u=s(n.rows,0);return u.forEach((e=>{o.push(e),i[e.id]=e})),{rows:u,flatRows:o,rowsById:i}}),u(e.options,"debugTable",0,(()=>{e._queue((()=>{e._autoResetExpanded(),e._autoResetPageIndex()}))})))}function te(e){return e=>s((()=>[e.getState().pagination,e.getPrePaginationRowModel(),e.options.paginateExpandedRows?void 0:e.getState().expanded]),((t,n)=>{if(!n.rows.length)return n;const{pageSize:r,pageIndex:o}=t;let{rows:i,flatRows:a,rowsById:s}=n;const u=r*o,l=u+r;let c;i=i.slice(u,l),c=e.options.paginateExpandedRows?{rows:i,flatRows:a,rowsById:s}:Z({rows:i,flatRows:a,rowsById:s}),c.flatRows=[];const f=e=>{c.flatRows.push(e),e.subRows.length&&e.subRows.forEach(f)};return c.rows.forEach(f),c}),u(e.options,"debugTable"))}function ne(){return e=>s((()=>[e.getState().sorting,e.getPreSortedRowModel()]),((t,n)=>{if(!n.rows.length||null==t||!t.length)return n;const r=e.getState().sorting,o=[],i=r.filter((t=>{var n;return null==(n=e.getColumn(t.id))?void 0:n.getCanSort()})),a={};i.forEach((t=>{const n=e.getColumn(t.id);n&&(a[t.id]={sortUndefined:n.columnDef.sortUndefined,invertSorting:n.columnDef.invertSorting,sortingFn:n.getSortingFn()})}));const s=e=>{const t=e.map((e=>({...e})));return t.sort(((e,t)=>{for(let r=0;r{var t;o.push(e),null!=(t=e.subRows)&&t.length&&(e.subRows=s(e.subRows))})),t};return{rows:s(n.rows),flatRows:o,rowsById:n.rowsById}}),u(e.options,"debugTable",0,(()=>e._autoResetPageIndex())))}},46266:(e,t,n)=>{"use strict";n.d(t,{A:()=>Ge});var r={};function o(e,t){return function(){return e.apply(t,arguments)}}n.r(r),n.d(r,{hasBrowserEnv:()=>re,hasStandardBrowserEnv:()=>oe,hasStandardBrowserWebWorkerEnv:()=>ae});const{toString:i}=Object.prototype,{getPrototypeOf:a}=Object,s=(u=Object.create(null),e=>{const t=i.call(e);return u[t]||(u[t]=t.slice(8,-1).toLowerCase())});var u;const l=e=>(e=e.toLowerCase(),t=>s(t)===e),c=e=>t=>typeof t===e,{isArray:f}=Array,d=c("undefined");const h=l("ArrayBuffer");const p=c("string"),g=c("function"),v=c("number"),y=e=>null!==e&&"object"===typeof e,m=e=>{if("object"!==s(e))return!1;const t=a(e);return(null===t||t===Object.prototype||null===Object.getPrototypeOf(t))&&!(Symbol.toStringTag in e)&&!(Symbol.iterator in e)},b=l("Date"),_=l("File"),w=l("Blob"),x=l("FileList"),S=l("URLSearchParams");function A(e,t,{allOwnKeys:n=!1}={}){if(null===e||"undefined"===typeof e)return;let r,o;if("object"!==typeof e&&(e=[e]),f(e))for(r=0,o=e.length;r0;)if(r=n[o],t===r.toLowerCase())return r;return null}const C="undefined"!==typeof globalThis?globalThis:"undefined"!==typeof self?self:"undefined"!==typeof window?window:global,R=e=>!d(e)&&e!==C;const O=(T="undefined"!==typeof Uint8Array&&a(Uint8Array),e=>T&&e instanceof T);var T;const k=l("HTMLFormElement"),M=(({hasOwnProperty:e})=>(t,n)=>e.call(t,n))(Object.prototype),P=l("RegExp"),I=(e,t)=>{const n=Object.getOwnPropertyDescriptors(e),r={};A(n,((n,o)=>{let i;!1!==(i=t(n,o,e))&&(r[o]=i||n)})),Object.defineProperties(e,r)},N="abcdefghijklmnopqrstuvwxyz",D="0123456789",L={DIGIT:D,ALPHA:N,ALPHA_DIGIT:N+N.toUpperCase()+D};const F=l("AsyncFunction"),j={isArray:f,isArrayBuffer:h,isBuffer:function(e){return null!==e&&!d(e)&&null!==e.constructor&&!d(e.constructor)&&g(e.constructor.isBuffer)&&e.constructor.isBuffer(e)},isFormData:e=>{let t;return e&&("function"===typeof FormData&&e instanceof FormData||g(e.append)&&("formdata"===(t=s(e))||"object"===t&&g(e.toString)&&"[object FormData]"===e.toString()))},isArrayBufferView:function(e){let t;return t="undefined"!==typeof ArrayBuffer&&ArrayBuffer.isView?ArrayBuffer.isView(e):e&&e.buffer&&h(e.buffer),t},isString:p,isNumber:v,isBoolean:e=>!0===e||!1===e,isObject:y,isPlainObject:m,isUndefined:d,isDate:b,isFile:_,isBlob:w,isRegExp:P,isFunction:g,isStream:e=>y(e)&&g(e.pipe),isURLSearchParams:S,isTypedArray:O,isFileList:x,forEach:A,merge:function e(){const{caseless:t}=R(this)&&this||{},n={},r=(r,o)=>{const i=t&&E(n,o)||o;m(n[i])&&m(r)?n[i]=e(n[i],r):m(r)?n[i]=e({},r):f(r)?n[i]=r.slice():n[i]=r};for(let o=0,i=arguments.length;o(A(t,((t,r)=>{n&&g(t)?e[r]=o(t,n):e[r]=t}),{allOwnKeys:r}),e),trim:e=>e.trim?e.trim():e.replace(/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,""),stripBOM:e=>(65279===e.charCodeAt(0)&&(e=e.slice(1)),e),inherits:(e,t,n,r)=>{e.prototype=Object.create(t.prototype,r),e.prototype.constructor=e,Object.defineProperty(e,"super",{value:t.prototype}),n&&Object.assign(e.prototype,n)},toFlatObject:(e,t,n,r)=>{let o,i,s;const u={};if(t=t||{},null==e)return t;do{for(o=Object.getOwnPropertyNames(e),i=o.length;i-- >0;)s=o[i],r&&!r(s,e,t)||u[s]||(t[s]=e[s],u[s]=!0);e=!1!==n&&a(e)}while(e&&(!n||n(e,t))&&e!==Object.prototype);return t},kindOf:s,kindOfTest:l,endsWith:(e,t,n)=>{e=String(e),(void 0===n||n>e.length)&&(n=e.length),n-=t.length;const r=e.indexOf(t,n);return-1!==r&&r===n},toArray:e=>{if(!e)return null;if(f(e))return e;let t=e.length;if(!v(t))return null;const n=new Array(t);for(;t-- >0;)n[t]=e[t];return n},forEachEntry:(e,t)=>{const n=(e&&e[Symbol.iterator]).call(e);let r;for(;(r=n.next())&&!r.done;){const n=r.value;t.call(e,n[0],n[1])}},matchAll:(e,t)=>{let n;const r=[];for(;null!==(n=e.exec(t));)r.push(n);return r},isHTMLForm:k,hasOwnProperty:M,hasOwnProp:M,reduceDescriptors:I,freezeMethods:e=>{I(e,((t,n)=>{if(g(e)&&-1!==["arguments","caller","callee"].indexOf(n))return!1;const r=e[n];g(r)&&(t.enumerable=!1,"writable"in t?t.writable=!1:t.set||(t.set=()=>{throw Error("Can not rewrite read-only method '"+n+"'")}))}))},toObjectSet:(e,t)=>{const n={},r=e=>{e.forEach((e=>{n[e]=!0}))};return f(e)?r(e):r(String(e).split(t)),n},toCamelCase:e=>e.toLowerCase().replace(/[-_\s]([a-z\d])(\w*)/g,(function(e,t,n){return t.toUpperCase()+n})),noop:()=>{},toFiniteNumber:(e,t)=>(e=+e,Number.isFinite(e)?e:t),findKey:E,global:C,isContextDefined:R,ALPHABET:L,generateString:(e=16,t=L.ALPHA_DIGIT)=>{let n="";const{length:r}=t;for(;e--;)n+=t[Math.random()*r|0];return n},isSpecCompliantForm:function(e){return!!(e&&g(e.append)&&"FormData"===e[Symbol.toStringTag]&&e[Symbol.iterator])},toJSONObject:e=>{const t=new Array(10),n=(e,r)=>{if(y(e)){if(t.indexOf(e)>=0)return;if(!("toJSON"in e)){t[r]=e;const o=f(e)?[]:{};return A(e,((e,t)=>{const i=n(e,r+1);!d(i)&&(o[t]=i)})),t[r]=void 0,o}}return e};return n(e,0)},isAsyncFn:F,isThenable:e=>e&&(y(e)||g(e))&&g(e.then)&&g(e.catch)};function U(e,t,n,r,o){Error.call(this),Error.captureStackTrace?Error.captureStackTrace(this,this.constructor):this.stack=(new Error).stack,this.message=e,this.name="AxiosError",t&&(this.code=t),n&&(this.config=n),r&&(this.request=r),o&&(this.response=o)}j.inherits(U,Error,{toJSON:function(){return{message:this.message,name:this.name,description:this.description,number:this.number,fileName:this.fileName,lineNumber:this.lineNumber,columnNumber:this.columnNumber,stack:this.stack,config:j.toJSONObject(this.config),code:this.code,status:this.response&&this.response.status?this.response.status:null}}});const z=U.prototype,B={};["ERR_BAD_OPTION_VALUE","ERR_BAD_OPTION","ECONNABORTED","ETIMEDOUT","ERR_NETWORK","ERR_FR_TOO_MANY_REDIRECTS","ERR_DEPRECATED","ERR_BAD_RESPONSE","ERR_BAD_REQUEST","ERR_CANCELED","ERR_NOT_SUPPORT","ERR_INVALID_URL"].forEach((e=>{B[e]={value:e}})),Object.defineProperties(U,B),Object.defineProperty(z,"isAxiosError",{value:!0}),U.from=(e,t,n,r,o,i)=>{const a=Object.create(z);return j.toFlatObject(e,a,(function(e){return e!==Error.prototype}),(e=>"isAxiosError"!==e)),U.call(a,e.message,t,n,r,o),a.cause=e,a.name=e.name,i&&Object.assign(a,i),a};const V=U;function $(e){return j.isPlainObject(e)||j.isArray(e)}function H(e){return j.endsWith(e,"[]")?e.slice(0,-2):e}function W(e,t,n){return e?e.concat(t).map((function(e,t){return e=H(e),!n&&t?"["+e+"]":e})).join(n?".":""):t}const q=j.toFlatObject(j,{},null,(function(e){return/^is[A-Z]/.test(e)}));const G=function(e,t,n){if(!j.isObject(e))throw new TypeError("target must be an object");t=t||new FormData;const r=(n=j.toFlatObject(n,{metaTokens:!0,dots:!1,indexes:!1},!1,(function(e,t){return!j.isUndefined(t[e])}))).metaTokens,o=n.visitor||l,i=n.dots,a=n.indexes,s=(n.Blob||"undefined"!==typeof Blob&&Blob)&&j.isSpecCompliantForm(t);if(!j.isFunction(o))throw new TypeError("visitor must be a function");function u(e){if(null===e)return"";if(j.isDate(e))return e.toISOString();if(!s&&j.isBlob(e))throw new V("Blob is not supported. Use a Buffer instead.");return j.isArrayBuffer(e)||j.isTypedArray(e)?s&&"function"===typeof Blob?new Blob([e]):Buffer.from(e):e}function l(e,n,o){let s=e;if(e&&!o&&"object"===typeof e)if(j.endsWith(n,"{}"))n=r?n:n.slice(0,-2),e=JSON.stringify(e);else if(j.isArray(e)&&function(e){return j.isArray(e)&&!e.some($)}(e)||(j.isFileList(e)||j.endsWith(n,"[]"))&&(s=j.toArray(e)))return n=H(n),s.forEach((function(e,r){!j.isUndefined(e)&&null!==e&&t.append(!0===a?W([n],r,i):null===a?n:n+"[]",u(e))})),!1;return!!$(e)||(t.append(W(o,n,i),u(e)),!1)}const c=[],f=Object.assign(q,{defaultVisitor:l,convertValue:u,isVisitable:$});if(!j.isObject(e))throw new TypeError("data must be an object");return function e(n,r){if(!j.isUndefined(n)){if(-1!==c.indexOf(n))throw Error("Circular reference detected in "+r.join("."));c.push(n),j.forEach(n,(function(n,i){!0===(!(j.isUndefined(n)||null===n)&&o.call(t,n,j.isString(i)?i.trim():i,r,f))&&e(n,r?r.concat(i):[i])})),c.pop()}}(e),t};function Y(e){const t={"!":"%21","'":"%27","(":"%28",")":"%29","~":"%7E","%20":"+","%00":"\0"};return encodeURIComponent(e).replace(/[!'()~]|%20|%00/g,(function(e){return t[e]}))}function X(e,t){this._pairs=[],e&&G(e,this,t)}const K=X.prototype;K.append=function(e,t){this._pairs.push([e,t])},K.toString=function(e){const t=e?function(t){return e.call(this,t,Y)}:Y;return this._pairs.map((function(e){return t(e[0])+"="+t(e[1])}),"").join("&")};const Z=X;function Q(e){return encodeURIComponent(e).replace(/%3A/gi,":").replace(/%24/g,"$").replace(/%2C/gi,",").replace(/%20/g,"+").replace(/%5B/gi,"[").replace(/%5D/gi,"]")}function J(e,t,n){if(!t)return e;const r=n&&n.encode||Q,o=n&&n.serialize;let i;if(i=o?o(t,n):j.isURLSearchParams(t)?t.toString():new Z(t,n).toString(r),i){const t=e.indexOf("#");-1!==t&&(e=e.slice(0,t)),e+=(-1===e.indexOf("?")?"?":"&")+i}return e}const ee=class{constructor(){this.handlers=[]}use(e,t,n){return this.handlers.push({fulfilled:e,rejected:t,synchronous:!!n&&n.synchronous,runWhen:n?n.runWhen:null}),this.handlers.length-1}eject(e){this.handlers[e]&&(this.handlers[e]=null)}clear(){this.handlers&&(this.handlers=[])}forEach(e){j.forEach(this.handlers,(function(t){null!==t&&e(t)}))}},te={silentJSONParsing:!0,forcedJSONParsing:!0,clarifyTimeoutError:!1},ne={isBrowser:!0,classes:{URLSearchParams:"undefined"!==typeof URLSearchParams?URLSearchParams:Z,FormData:"undefined"!==typeof FormData?FormData:null,Blob:"undefined"!==typeof Blob?Blob:null},protocols:["http","https","file","blob","url","data"]},re="undefined"!==typeof window&&"undefined"!==typeof document,oe=(ie="undefined"!==typeof navigator&&navigator.product,re&&["ReactNative","NativeScript","NS"].indexOf(ie)<0);var ie;const ae="undefined"!==typeof WorkerGlobalScope&&self instanceof WorkerGlobalScope&&"function"===typeof self.importScripts,se={...r,...ne};const ue=function(e){function t(e,n,r,o){let i=e[o++];if("__proto__"===i)return!0;const a=Number.isFinite(+i),s=o>=e.length;if(i=!i&&j.isArray(r)?r.length:i,s)return j.hasOwnProp(r,i)?r[i]=[r[i],n]:r[i]=n,!a;r[i]&&j.isObject(r[i])||(r[i]=[]);return t(e,n,r[i],o)&&j.isArray(r[i])&&(r[i]=function(e){const t={},n=Object.keys(e);let r;const o=n.length;let i;for(r=0;r{t(function(e){return j.matchAll(/\w+|\[(\w*)]/g,e).map((e=>"[]"===e[0]?"":e[1]||e[0]))}(e),r,n,0)})),n}return null};const le={transitional:te,adapter:["xhr","http"],transformRequest:[function(e,t){const n=t.getContentType()||"",r=n.indexOf("application/json")>-1,o=j.isObject(e);o&&j.isHTMLForm(e)&&(e=new FormData(e));if(j.isFormData(e))return r?JSON.stringify(ue(e)):e;if(j.isArrayBuffer(e)||j.isBuffer(e)||j.isStream(e)||j.isFile(e)||j.isBlob(e))return e;if(j.isArrayBufferView(e))return e.buffer;if(j.isURLSearchParams(e))return t.setContentType("application/x-www-form-urlencoded;charset=utf-8",!1),e.toString();let i;if(o){if(n.indexOf("application/x-www-form-urlencoded")>-1)return function(e,t){return G(e,new se.classes.URLSearchParams,Object.assign({visitor:function(e,t,n,r){return se.isNode&&j.isBuffer(e)?(this.append(t,e.toString("base64")),!1):r.defaultVisitor.apply(this,arguments)}},t))}(e,this.formSerializer).toString();if((i=j.isFileList(e))||n.indexOf("multipart/form-data")>-1){const t=this.env&&this.env.FormData;return G(i?{"files[]":e}:e,t&&new t,this.formSerializer)}}return o||r?(t.setContentType("application/json",!1),function(e,t,n){if(j.isString(e))try{return(t||JSON.parse)(e),j.trim(e)}catch(r){if("SyntaxError"!==r.name)throw r}return(n||JSON.stringify)(e)}(e)):e}],transformResponse:[function(e){const t=this.transitional||le.transitional,n=t&&t.forcedJSONParsing,r="json"===this.responseType;if(e&&j.isString(e)&&(n&&!this.responseType||r)){const n=!(t&&t.silentJSONParsing)&&r;try{return JSON.parse(e)}catch(o){if(n){if("SyntaxError"===o.name)throw V.from(o,V.ERR_BAD_RESPONSE,this,null,this.response);throw o}}}return e}],timeout:0,xsrfCookieName:"XSRF-TOKEN",xsrfHeaderName:"X-XSRF-TOKEN",maxContentLength:-1,maxBodyLength:-1,env:{FormData:se.classes.FormData,Blob:se.classes.Blob},validateStatus:function(e){return e>=200&&e<300},headers:{common:{Accept:"application/json, text/plain, */*","Content-Type":void 0}}};j.forEach(["delete","get","head","post","put","patch"],(e=>{le.headers[e]={}}));const ce=le,fe=j.toObjectSet(["age","authorization","content-length","content-type","etag","expires","from","host","if-modified-since","if-unmodified-since","last-modified","location","max-forwards","proxy-authorization","referer","retry-after","user-agent"]),de=Symbol("internals");function he(e){return e&&String(e).trim().toLowerCase()}function pe(e){return!1===e||null==e?e:j.isArray(e)?e.map(pe):String(e)}function ge(e,t,n,r,o){return j.isFunction(r)?r.call(this,t,n):(o&&(t=n),j.isString(t)?j.isString(r)?-1!==t.indexOf(r):j.isRegExp(r)?r.test(t):void 0:void 0)}class ve{constructor(e){e&&this.set(e)}set(e,t,n){const r=this;function o(e,t,n){const o=he(t);if(!o)throw new Error("header name must be a non-empty string");const i=j.findKey(r,o);(!i||void 0===r[i]||!0===n||void 0===n&&!1!==r[i])&&(r[i||t]=pe(e))}const i=(e,t)=>j.forEach(e,((e,n)=>o(e,n,t)));return j.isPlainObject(e)||e instanceof this.constructor?i(e,t):j.isString(e)&&(e=e.trim())&&!/^[-_a-zA-Z0-9^`|~,!#$%&'*+.]+$/.test(e.trim())?i((e=>{const t={};let n,r,o;return e&&e.split("\n").forEach((function(e){o=e.indexOf(":"),n=e.substring(0,o).trim().toLowerCase(),r=e.substring(o+1).trim(),!n||t[n]&&fe[n]||("set-cookie"===n?t[n]?t[n].push(r):t[n]=[r]:t[n]=t[n]?t[n]+", "+r:r)})),t})(e),t):null!=e&&o(t,e,n),this}get(e,t){if(e=he(e)){const n=j.findKey(this,e);if(n){const e=this[n];if(!t)return e;if(!0===t)return function(e){const t=Object.create(null),n=/([^\s,;=]+)\s*(?:=\s*([^,;]+))?/g;let r;for(;r=n.exec(e);)t[r[1]]=r[2];return t}(e);if(j.isFunction(t))return t.call(this,e,n);if(j.isRegExp(t))return t.exec(e);throw new TypeError("parser must be boolean|regexp|function")}}}has(e,t){if(e=he(e)){const n=j.findKey(this,e);return!(!n||void 0===this[n]||t&&!ge(0,this[n],n,t))}return!1}delete(e,t){const n=this;let r=!1;function o(e){if(e=he(e)){const o=j.findKey(n,e);!o||t&&!ge(0,n[o],o,t)||(delete n[o],r=!0)}}return j.isArray(e)?e.forEach(o):o(e),r}clear(e){const t=Object.keys(this);let n=t.length,r=!1;for(;n--;){const o=t[n];e&&!ge(0,this[o],o,e,!0)||(delete this[o],r=!0)}return r}normalize(e){const t=this,n={};return j.forEach(this,((r,o)=>{const i=j.findKey(n,o);if(i)return t[i]=pe(r),void delete t[o];const a=e?function(e){return e.trim().toLowerCase().replace(/([a-z\d])(\w*)/g,((e,t,n)=>t.toUpperCase()+n))}(o):String(o).trim();a!==o&&delete t[o],t[a]=pe(r),n[a]=!0})),this}concat(...e){return this.constructor.concat(this,...e)}toJSON(e){const t=Object.create(null);return j.forEach(this,((n,r)=>{null!=n&&!1!==n&&(t[r]=e&&j.isArray(n)?n.join(", "):n)})),t}[Symbol.iterator](){return Object.entries(this.toJSON())[Symbol.iterator]()}toString(){return Object.entries(this.toJSON()).map((([e,t])=>e+": "+t)).join("\n")}get[Symbol.toStringTag](){return"AxiosHeaders"}static from(e){return e instanceof this?e:new this(e)}static concat(e,...t){const n=new this(e);return t.forEach((e=>n.set(e))),n}static accessor(e){const t=(this[de]=this[de]={accessors:{}}).accessors,n=this.prototype;function r(e){const r=he(e);t[r]||(!function(e,t){const n=j.toCamelCase(" "+t);["get","set","has"].forEach((r=>{Object.defineProperty(e,r+n,{value:function(e,n,o){return this[r].call(this,t,e,n,o)},configurable:!0})}))}(n,e),t[r]=!0)}return j.isArray(e)?e.forEach(r):r(e),this}}ve.accessor(["Content-Type","Content-Length","Accept","Accept-Encoding","User-Agent","Authorization"]),j.reduceDescriptors(ve.prototype,(({value:e},t)=>{let n=t[0].toUpperCase()+t.slice(1);return{get:()=>e,set(e){this[n]=e}}})),j.freezeMethods(ve);const ye=ve;function me(e,t){const n=this||ce,r=t||n,o=ye.from(r.headers);let i=r.data;return j.forEach(e,(function(e){i=e.call(n,i,o.normalize(),t?t.status:void 0)})),o.normalize(),i}function be(e){return!(!e||!e.__CANCEL__)}function _e(e,t,n){V.call(this,null==e?"canceled":e,V.ERR_CANCELED,t,n),this.name="CanceledError"}j.inherits(_e,V,{__CANCEL__:!0});const we=_e;const xe=se.hasStandardBrowserEnv?{write(e,t,n,r,o,i){const a=[e+"="+encodeURIComponent(t)];j.isNumber(n)&&a.push("expires="+new Date(n).toGMTString()),j.isString(r)&&a.push("path="+r),j.isString(o)&&a.push("domain="+o),!0===i&&a.push("secure"),document.cookie=a.join("; ")},read(e){const t=document.cookie.match(new RegExp("(^|;\\s*)("+e+")=([^;]*)"));return t?decodeURIComponent(t[3]):null},remove(e){this.write(e,"",Date.now()-864e5)}}:{write(){},read:()=>null,remove(){}};function Se(e,t){return e&&!/^([a-z][a-z\d+\-.]*:)?\/\//i.test(t)?function(e,t){return t?e.replace(/\/?\/$/,"")+"/"+t.replace(/^\/+/,""):e}(e,t):t}const Ae=se.hasStandardBrowserEnv?function(){const e=/(msie|trident)/i.test(navigator.userAgent),t=document.createElement("a");let n;function r(n){let r=n;return e&&(t.setAttribute("href",r),r=t.href),t.setAttribute("href",r),{href:t.href,protocol:t.protocol?t.protocol.replace(/:$/,""):"",host:t.host,search:t.search?t.search.replace(/^\?/,""):"",hash:t.hash?t.hash.replace(/^#/,""):"",hostname:t.hostname,port:t.port,pathname:"/"===t.pathname.charAt(0)?t.pathname:"/"+t.pathname}}return n=r(window.location.href),function(e){const t=j.isString(e)?r(e):e;return t.protocol===n.protocol&&t.host===n.host}}():function(){return!0};const Ee=function(e,t){e=e||10;const n=new Array(e),r=new Array(e);let o,i=0,a=0;return t=void 0!==t?t:1e3,function(s){const u=Date.now(),l=r[a];o||(o=u),n[i]=s,r[i]=u;let c=a,f=0;for(;c!==i;)f+=n[c++],c%=e;if(i=(i+1)%e,i===a&&(a=(a+1)%e),u-o{const i=o.loaded,a=o.lengthComputable?o.total:void 0,s=i-n,u=r(s);n=i;const l={loaded:i,total:a,progress:a?i/a:void 0,bytes:s,rate:u||void 0,estimated:u&&a&&i<=a?(a-i)/u:void 0,event:o};l[t?"download":"upload"]=!0,e(l)}}const Re={http:null,xhr:"undefined"!==typeof XMLHttpRequest&&function(e){return new Promise((function(t,n){let r=e.data;const o=ye.from(e.headers).normalize();let i,a,{responseType:s,withXSRFToken:u}=e;function l(){e.cancelToken&&e.cancelToken.unsubscribe(i),e.signal&&e.signal.removeEventListener("abort",i)}if(j.isFormData(r))if(se.hasStandardBrowserEnv||se.hasStandardBrowserWebWorkerEnv)o.setContentType(!1);else if(!1!==(a=o.getContentType())){const[e,...t]=a?a.split(";").map((e=>e.trim())).filter(Boolean):[];o.setContentType([e||"multipart/form-data",...t].join("; "))}let c=new XMLHttpRequest;if(e.auth){const t=e.auth.username||"",n=e.auth.password?unescape(encodeURIComponent(e.auth.password)):"";o.set("Authorization","Basic "+btoa(t+":"+n))}const f=Se(e.baseURL,e.url);function d(){if(!c)return;const r=ye.from("getAllResponseHeaders"in c&&c.getAllResponseHeaders());!function(e,t,n){const r=n.config.validateStatus;n.status&&r&&!r(n.status)?t(new V("Request failed with status code "+n.status,[V.ERR_BAD_REQUEST,V.ERR_BAD_RESPONSE][Math.floor(n.status/100)-4],n.config,n.request,n)):e(n)}((function(e){t(e),l()}),(function(e){n(e),l()}),{data:s&&"text"!==s&&"json"!==s?c.response:c.responseText,status:c.status,statusText:c.statusText,headers:r,config:e,request:c}),c=null}if(c.open(e.method.toUpperCase(),J(f,e.params,e.paramsSerializer),!0),c.timeout=e.timeout,"onloadend"in c?c.onloadend=d:c.onreadystatechange=function(){c&&4===c.readyState&&(0!==c.status||c.responseURL&&0===c.responseURL.indexOf("file:"))&&setTimeout(d)},c.onabort=function(){c&&(n(new V("Request aborted",V.ECONNABORTED,e,c)),c=null)},c.onerror=function(){n(new V("Network Error",V.ERR_NETWORK,e,c)),c=null},c.ontimeout=function(){let t=e.timeout?"timeout of "+e.timeout+"ms exceeded":"timeout exceeded";const r=e.transitional||te;e.timeoutErrorMessage&&(t=e.timeoutErrorMessage),n(new V(t,r.clarifyTimeoutError?V.ETIMEDOUT:V.ECONNABORTED,e,c)),c=null},se.hasStandardBrowserEnv&&(u&&j.isFunction(u)&&(u=u(e)),u||!1!==u&&Ae(f))){const t=e.xsrfHeaderName&&e.xsrfCookieName&&xe.read(e.xsrfCookieName);t&&o.set(e.xsrfHeaderName,t)}void 0===r&&o.setContentType(null),"setRequestHeader"in c&&j.forEach(o.toJSON(),(function(e,t){c.setRequestHeader(t,e)})),j.isUndefined(e.withCredentials)||(c.withCredentials=!!e.withCredentials),s&&"json"!==s&&(c.responseType=e.responseType),"function"===typeof e.onDownloadProgress&&c.addEventListener("progress",Ce(e.onDownloadProgress,!0)),"function"===typeof e.onUploadProgress&&c.upload&&c.upload.addEventListener("progress",Ce(e.onUploadProgress)),(e.cancelToken||e.signal)&&(i=t=>{c&&(n(!t||t.type?new we(null,e,c):t),c.abort(),c=null)},e.cancelToken&&e.cancelToken.subscribe(i),e.signal&&(e.signal.aborted?i():e.signal.addEventListener("abort",i)));const h=function(e){const t=/^([-+\w]{1,25})(:?\/\/|:)/.exec(e);return t&&t[1]||""}(f);h&&-1===se.protocols.indexOf(h)?n(new V("Unsupported protocol "+h+":",V.ERR_BAD_REQUEST,e)):c.send(r||null)}))}};j.forEach(Re,((e,t)=>{if(e){try{Object.defineProperty(e,"name",{value:t})}catch(n){}Object.defineProperty(e,"adapterName",{value:t})}}));const Oe=e=>`- ${e}`,Te=e=>j.isFunction(e)||null===e||!1===e,ke=e=>{e=j.isArray(e)?e:[e];const{length:t}=e;let n,r;const o={};for(let i=0;i`adapter ${e} `+(!1===t?"is not supported by the environment":"is not available in the build")));let n=t?e.length>1?"since :\n"+e.map(Oe).join("\n"):" "+Oe(e[0]):"as no adapter specified";throw new V("There is no suitable adapter to dispatch the request "+n,"ERR_NOT_SUPPORT")}return r};function Me(e){if(e.cancelToken&&e.cancelToken.throwIfRequested(),e.signal&&e.signal.aborted)throw new we(null,e)}function Pe(e){Me(e),e.headers=ye.from(e.headers),e.data=me.call(e,e.transformRequest),-1!==["post","put","patch"].indexOf(e.method)&&e.headers.setContentType("application/x-www-form-urlencoded",!1);return ke(e.adapter||ce.adapter)(e).then((function(t){return Me(e),t.data=me.call(e,e.transformResponse,t),t.headers=ye.from(t.headers),t}),(function(t){return be(t)||(Me(e),t&&t.response&&(t.response.data=me.call(e,e.transformResponse,t.response),t.response.headers=ye.from(t.response.headers))),Promise.reject(t)}))}const Ie=e=>e instanceof ye?{...e}:e;function Ne(e,t){t=t||{};const n={};function r(e,t,n){return j.isPlainObject(e)&&j.isPlainObject(t)?j.merge.call({caseless:n},e,t):j.isPlainObject(t)?j.merge({},t):j.isArray(t)?t.slice():t}function o(e,t,n){return j.isUndefined(t)?j.isUndefined(e)?void 0:r(void 0,e,n):r(e,t,n)}function i(e,t){if(!j.isUndefined(t))return r(void 0,t)}function a(e,t){return j.isUndefined(t)?j.isUndefined(e)?void 0:r(void 0,e):r(void 0,t)}function s(n,o,i){return i in t?r(n,o):i in e?r(void 0,n):void 0}const u={url:i,method:i,data:i,baseURL:a,transformRequest:a,transformResponse:a,paramsSerializer:a,timeout:a,timeoutMessage:a,withCredentials:a,withXSRFToken:a,adapter:a,responseType:a,xsrfCookieName:a,xsrfHeaderName:a,onUploadProgress:a,onDownloadProgress:a,decompress:a,maxContentLength:a,maxBodyLength:a,beforeRedirect:a,transport:a,httpAgent:a,httpsAgent:a,cancelToken:a,socketPath:a,responseEncoding:a,validateStatus:s,headers:(e,t)=>o(Ie(e),Ie(t),!0)};return j.forEach(Object.keys(Object.assign({},e,t)),(function(r){const i=u[r]||o,a=i(e[r],t[r],r);j.isUndefined(a)&&i!==s||(n[r]=a)})),n}const De="1.6.8",Le={};["object","boolean","number","function","string","symbol"].forEach(((e,t)=>{Le[e]=function(n){return typeof n===e||"a"+(t<1?"n ":" ")+e}}));const Fe={};Le.transitional=function(e,t,n){function r(e,t){return"[Axios v1.6.8] Transitional option '"+e+"'"+t+(n?". "+n:"")}return(n,o,i)=>{if(!1===e)throw new V(r(o," has been removed"+(t?" in "+t:"")),V.ERR_DEPRECATED);return t&&!Fe[o]&&(Fe[o]=!0,console.warn(r(o," has been deprecated since v"+t+" and will be removed in the near future"))),!e||e(n,o,i)}};const je={assertOptions:function(e,t,n){if("object"!==typeof e)throw new V("options must be an object",V.ERR_BAD_OPTION_VALUE);const r=Object.keys(e);let o=r.length;for(;o-- >0;){const i=r[o],a=t[i];if(a){const t=e[i],n=void 0===t||a(t,i,e);if(!0!==n)throw new V("option "+i+" must be "+n,V.ERR_BAD_OPTION_VALUE)}else if(!0!==n)throw new V("Unknown option "+i,V.ERR_BAD_OPTION)}},validators:Le},Ue=je.validators;class ze{constructor(e){this.defaults=e,this.interceptors={request:new ee,response:new ee}}async request(e,t){try{return await this._request(e,t)}catch(n){if(n instanceof Error){let e;Error.captureStackTrace?Error.captureStackTrace(e={}):e=new Error;const t=e.stack?e.stack.replace(/^.+\n/,""):"";n.stack?t&&!String(n.stack).endsWith(t.replace(/^.+\n.+\n/,""))&&(n.stack+="\n"+t):n.stack=t}throw n}}_request(e,t){"string"===typeof e?(t=t||{}).url=e:t=e||{},t=Ne(this.defaults,t);const{transitional:n,paramsSerializer:r,headers:o}=t;void 0!==n&&je.assertOptions(n,{silentJSONParsing:Ue.transitional(Ue.boolean),forcedJSONParsing:Ue.transitional(Ue.boolean),clarifyTimeoutError:Ue.transitional(Ue.boolean)},!1),null!=r&&(j.isFunction(r)?t.paramsSerializer={serialize:r}:je.assertOptions(r,{encode:Ue.function,serialize:Ue.function},!0)),t.method=(t.method||this.defaults.method||"get").toLowerCase();let i=o&&j.merge(o.common,o[t.method]);o&&j.forEach(["delete","get","head","post","put","patch","common"],(e=>{delete o[e]})),t.headers=ye.concat(i,o);const a=[];let s=!0;this.interceptors.request.forEach((function(e){"function"===typeof e.runWhen&&!1===e.runWhen(t)||(s=s&&e.synchronous,a.unshift(e.fulfilled,e.rejected))}));const u=[];let l;this.interceptors.response.forEach((function(e){u.push(e.fulfilled,e.rejected)}));let c,f=0;if(!s){const e=[Pe.bind(this),void 0];for(e.unshift.apply(e,a),e.push.apply(e,u),c=e.length,l=Promise.resolve(t);f{if(!n._listeners)return;let t=n._listeners.length;for(;t-- >0;)n._listeners[t](e);n._listeners=null})),this.promise.then=e=>{let t;const r=new Promise((e=>{n.subscribe(e),t=e})).then(e);return r.cancel=function(){n.unsubscribe(t)},r},e((function(e,r,o){n.reason||(n.reason=new we(e,r,o),t(n.reason))}))}throwIfRequested(){if(this.reason)throw this.reason}subscribe(e){this.reason?e(this.reason):this._listeners?this._listeners.push(e):this._listeners=[e]}unsubscribe(e){if(!this._listeners)return;const t=this._listeners.indexOf(e);-1!==t&&this._listeners.splice(t,1)}static source(){let e;return{token:new Ve((function(t){e=t})),cancel:e}}}const $e=Ve;const He={Continue:100,SwitchingProtocols:101,Processing:102,EarlyHints:103,Ok:200,Created:201,Accepted:202,NonAuthoritativeInformation:203,NoContent:204,ResetContent:205,PartialContent:206,MultiStatus:207,AlreadyReported:208,ImUsed:226,MultipleChoices:300,MovedPermanently:301,Found:302,SeeOther:303,NotModified:304,UseProxy:305,Unused:306,TemporaryRedirect:307,PermanentRedirect:308,BadRequest:400,Unauthorized:401,PaymentRequired:402,Forbidden:403,NotFound:404,MethodNotAllowed:405,NotAcceptable:406,ProxyAuthenticationRequired:407,RequestTimeout:408,Conflict:409,Gone:410,LengthRequired:411,PreconditionFailed:412,PayloadTooLarge:413,UriTooLong:414,UnsupportedMediaType:415,RangeNotSatisfiable:416,ExpectationFailed:417,ImATeapot:418,MisdirectedRequest:421,UnprocessableEntity:422,Locked:423,FailedDependency:424,TooEarly:425,UpgradeRequired:426,PreconditionRequired:428,TooManyRequests:429,RequestHeaderFieldsTooLarge:431,UnavailableForLegalReasons:451,InternalServerError:500,NotImplemented:501,BadGateway:502,ServiceUnavailable:503,GatewayTimeout:504,HttpVersionNotSupported:505,VariantAlsoNegotiates:506,InsufficientStorage:507,LoopDetected:508,NotExtended:510,NetworkAuthenticationRequired:511};Object.entries(He).forEach((([e,t])=>{He[t]=e}));const We=He;const qe=function e(t){const n=new Be(t),r=o(Be.prototype.request,n);return j.extend(r,Be.prototype,n,{allOwnKeys:!0}),j.extend(r,n,null,{allOwnKeys:!0}),r.create=function(n){return e(Ne(t,n))},r}(ce);qe.Axios=Be,qe.CanceledError=we,qe.CancelToken=$e,qe.isCancel=be,qe.VERSION=De,qe.toFormData=G,qe.AxiosError=V,qe.Cancel=qe.CanceledError,qe.all=function(e){return Promise.all(e)},qe.spread=function(e){return function(t){return e.apply(null,t)}},qe.isAxiosError=function(e){return j.isObject(e)&&!0===e.isAxiosError},qe.mergeConfig=Ne,qe.AxiosHeaders=ye,qe.formToJSON=e=>ue(j.isHTMLForm(e)?new FormData(e):e),qe.getAdapter=ke,qe.HttpStatusCode=We,qe.default=qe;const Ge=qe},12902:(e,t,n)=>{"use strict";function r(e,t){return null==e||null==t?NaN:et?1:e>=t?0:NaN}n.d(t,{A:()=>r})},52016:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>c,Jj:()=>s,WD:()=>l,ah:()=>u});var r=n(12902),o=n(86037),i=n(83131);const a=(0,o.A)(r.A),s=a.right,u=a.left,l=(0,o.A)(i.A).center,c=s},86037:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});var r=n(12902),o=n(41180);function i(e){let t,n,i;function s(e,r,o=0,i=e.length){if(o>>1;n(e[t],r)<0?o=t+1:i=t}while(o(0,r.A)(e(t),n),i=(t,n)=>e(t)-n):(t=e===r.A||e===o.A?e:a,n=e,i=e),{left:s,center:function(e,t,n=0,r=e.length){const o=s(e,t,n,r-1);return o>n&&i(e[o-1],t)>-i(e[o],t)?o-1:o},right:function(e,r,o=0,i=e.length){if(o>>1;n(e[t],r)<=0?o=t+1:i=t}while(o{"use strict";function r(e,t){return null==e||null==t?NaN:te?1:t>=e?0:NaN}n.d(t,{A:()=>r})},36205:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(12902);function o(e,t=r.A){let n,o=!1;if(1===t.length){let i;for(const a of e){const e=t(a);(o?(0,r.A)(e,i)>0:0===(0,r.A)(e,e))&&(n=a,i=e,o=!0)}}else for(const r of e)(o?t(r,n)>0:0===t(r,r))&&(n=r,o=!0);return n}},30846:(e,t,n)=>{"use strict";function r(e,t){let n;if(void 0===t)for(const r of e)null!=r&&(n=r)&&(n=r);else{let r=-1;for(let o of e)null!=(o=t(o,++r,e))&&(n=o)&&(n=o)}return n}n.d(t,{A:()=>r})},4504:(e,t,n)=>{"use strict";function r(e,t){let n,r=-1,o=-1;if(void 0===t)for(const i of e)++o,null!=i&&(n=i)&&(n=i,r=o);else for(let i of e)null!=(i=t(i,++o,e))&&(n=i)&&(n=i,r=o);return r}n.d(t,{A:()=>r})},49216:(e,t,n)=>{"use strict";function r(e,t){let n;if(void 0===t)for(const r of e)null!=r&&(n>r||void 0===n&&r>=r)&&(n=r);else{let r=-1;for(let o of e)null!=(o=t(o,++r,e))&&(n>o||void 0===n&&o>=o)&&(n=o)}return n}n.d(t,{A:()=>r})},98026:(e,t,n)=>{"use strict";function r(e,t){let n,r=-1,o=-1;if(void 0===t)for(const i of e)++o,null!=i&&(n>i||void 0===n&&i>=i)&&(n=i,r=o);else for(let i of e)null!=(i=t(i,++o,e))&&(n>i||void 0===n&&i>=i)&&(n=i,r=o);return r}n.d(t,{A:()=>r})},83131:(e,t,n)=>{"use strict";function r(e){return null===e?NaN:+e}function*o(e,t){if(void 0===t)for(let n of e)null!=n&&(n=+n)>=n&&(yield n);else{let n=-1;for(let r of e)null!=(r=t(r,++n,e))&&(r=+r)>=r&&(yield r)}}n.d(t,{A:()=>r,n:()=>o})},78314:(e,t,n)=>{"use strict";function r(e,t){return Array.from(t,(t=>e[t]))}n.d(t,{A:()=>r})},84855:(e,t,n)=>{"use strict";n.d(t,{AX:()=>h,Ay:()=>f,Z4:()=>d});var r=n(30846),o=n(4504),i=n(49216),a=n(98026),s=n(15671),u=n(83131),l=n(68886),c=n(36205);function f(e,t,n){if((o=(e=Float64Array.from((0,u.n)(e,n))).length)&&!isNaN(t=+t)){if(t<=0||o<2)return(0,i.A)(e);if(t>=1)return(0,r.A)(e);var o,a=(o-1)*t,l=Math.floor(a),c=(0,r.A)((0,s.A)(e,l).subarray(0,l+1));return c+((0,i.A)(e.subarray(l+1))-c)*(a-l)}}function d(e,t,n=u.A){if((r=e.length)&&!isNaN(t=+t)){if(t<=0||r<2)return+n(e[0],0,e);if(t>=1)return+n(e[r-1],r-1,e);var r,o=(r-1)*t,i=Math.floor(o),a=+n(e[i],i,e);return a+(+n(e[i+1],i+1,e)-a)*(o-i)}}function h(e,t,n=u.A){if(!isNaN(t=+t)){if(r=Float64Array.from(e,((t,r)=>(0,u.A)(n(e[r],r,e)))),t<=0)return(0,a.A)(r);if(t>=1)return(0,o.A)(r);var r,i=Uint32Array.from(e,((e,t)=>t)),f=r.length-1,d=Math.floor(f*t);return(0,s.A)(i,d,0,f,((e,t)=>(0,l.o2)(r[e],r[t]))),(d=(0,c.A)(i.subarray(0,d+1),(e=>r[e])))>=0?d:-1}}},15671:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(68886);function o(e,t,n=0,a=1/0,s){if(t=Math.floor(t),n=Math.floor(Math.max(0,n)),a=Math.floor(Math.min(e.length-1,a)),!(n<=t&&t<=a))return e;for(s=void 0===s?r.o2:(0,r.JC)(s);a>n;){if(a-n>600){const r=a-n+1,i=t-n+1,u=Math.log(r),l=.5*Math.exp(2*u/3),c=.5*Math.sqrt(u*l*(r-l)/r)*(i-r/2<0?-1:1);o(e,t,Math.max(n,Math.floor(t-i*l/r+c)),Math.min(a,Math.floor(t+(r-i)*l/r+c)),s)}const r=e[t];let u=n,l=a;for(i(e,n,t),s(e[a],r)>0&&i(e,n,a);u0;)--l}0===s(e[n],r)?i(e,n,l):(++l,i(e,l,a)),l<=t&&(n=l+1),t<=l&&(a=l-1)}return e}function i(e,t,n){const r=e[t];e[t]=e[n],e[n]=r}},6561:(e,t,n)=>{"use strict";function r(e,t,n){e=+e,t=+t,n=(o=arguments.length)<2?(t=e,e=0,1):o<3?1:+n;for(var r=-1,o=0|Math.max(0,Math.ceil((t-e)/n)),i=new Array(o);++rr})},68886:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>i,JC:()=>a,o2:()=>s});var r=n(12902),o=n(78314);function i(e,...t){if("function"!==typeof e[Symbol.iterator])throw new TypeError("values is not iterable");e=Array.from(e);let[n]=t;if(n&&2!==n.length||t.length>1){const r=Uint32Array.from(e,((e,t)=>t));return t.length>1?(t=t.map((t=>e.map(t))),r.sort(((e,n)=>{for(const r of t){const t=s(r[e],r[n]);if(t)return t}}))):(n=e.map(n),r.sort(((e,t)=>s(n[e],n[t])))),(0,o.A)(e,r)}return e.sort(a(n))}function a(e=r.A){if(e===r.A)return s;if("function"!==typeof e)throw new TypeError("compare is not a function");return(t,n)=>{const r=e(t,n);return r||0===r?r:(0===e(n,n))-(0===e(t,t))}}function s(e,t){return(null==e||!(e>=e))-(null==t||!(t>=t))||(et?1:0)}},6946:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>s,lq:()=>u,sG:()=>l});const r=Math.sqrt(50),o=Math.sqrt(10),i=Math.sqrt(2);function a(e,t,n){const s=(t-e)/Math.max(0,n),u=Math.floor(Math.log10(s)),l=s/Math.pow(10,u),c=l>=r?10:l>=o?5:l>=i?2:1;let f,d,h;return u<0?(h=Math.pow(10,-u)/c,f=Math.round(e*h),d=Math.round(t*h),f/ht&&--d,h=-h):(h=Math.pow(10,u)*c,f=Math.round(e/h),d=Math.round(t/h),f*ht&&--d),d0))return[];if((e=+e)===(t=+t))return[e];const r=t=o))return[];const u=i-o+1,l=new Array(u);if(r)if(s<0)for(let a=0;a{"use strict";n.d(t,{Ay:()=>_,Gw:()=>E,KI:()=>I,Q1:()=>o,Qh:()=>A,Uw:()=>a,b:()=>S,ef:()=>i});var r=n(40871);function o(){}var i=.7,a=1/i,s="\\s*([+-]?\\d+)\\s*",u="\\s*([+-]?(?:\\d*\\.)?\\d+(?:[eE][+-]?\\d+)?)\\s*",l="\\s*([+-]?(?:\\d*\\.)?\\d+(?:[eE][+-]?\\d+)?)%\\s*",c=/^#([0-9a-f]{3,8})$/,f=new RegExp(`^rgb\\(${s},${s},${s}\\)$`),d=new RegExp(`^rgb\\(${l},${l},${l}\\)$`),h=new RegExp(`^rgba\\(${s},${s},${s},${u}\\)$`),p=new RegExp(`^rgba\\(${l},${l},${l},${u}\\)$`),g=new RegExp(`^hsl\\(${u},${l},${l}\\)$`),v=new RegExp(`^hsla\\(${u},${l},${l},${u}\\)$`),y={aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074};function m(){return this.rgb().formatHex()}function b(){return this.rgb().formatRgb()}function _(e){var t,n;return e=(e+"").trim().toLowerCase(),(t=c.exec(e))?(n=t[1].length,t=parseInt(t[1],16),6===n?w(t):3===n?new E(t>>8&15|t>>4&240,t>>4&15|240&t,(15&t)<<4|15&t,1):8===n?x(t>>24&255,t>>16&255,t>>8&255,(255&t)/255):4===n?x(t>>12&15|t>>8&240,t>>8&15|t>>4&240,t>>4&15|240&t,((15&t)<<4|15&t)/255):null):(t=f.exec(e))?new E(t[1],t[2],t[3],1):(t=d.exec(e))?new E(255*t[1]/100,255*t[2]/100,255*t[3]/100,1):(t=h.exec(e))?x(t[1],t[2],t[3],t[4]):(t=p.exec(e))?x(255*t[1]/100,255*t[2]/100,255*t[3]/100,t[4]):(t=g.exec(e))?M(t[1],t[2]/100,t[3]/100,1):(t=v.exec(e))?M(t[1],t[2]/100,t[3]/100,t[4]):y.hasOwnProperty(e)?w(y[e]):"transparent"===e?new E(NaN,NaN,NaN,0):null}function w(e){return new E(e>>16&255,e>>8&255,255&e,1)}function x(e,t,n,r){return r<=0&&(e=t=n=NaN),new E(e,t,n,r)}function S(e){return e instanceof o||(e=_(e)),e?new E((e=e.rgb()).r,e.g,e.b,e.opacity):new E}function A(e,t,n,r){return 1===arguments.length?S(e):new E(e,t,n,null==r?1:r)}function E(e,t,n,r){this.r=+e,this.g=+t,this.b=+n,this.opacity=+r}function C(){return`#${k(this.r)}${k(this.g)}${k(this.b)}`}function R(){const e=O(this.opacity);return`${1===e?"rgb(":"rgba("}${T(this.r)}, ${T(this.g)}, ${T(this.b)}${1===e?")":`, ${e})`}`}function O(e){return isNaN(e)?1:Math.max(0,Math.min(1,e))}function T(e){return Math.max(0,Math.min(255,Math.round(e)||0))}function k(e){return((e=T(e))<16?"0":"")+e.toString(16)}function M(e,t,n,r){return r<=0?e=t=n=NaN:n<=0||n>=1?e=t=NaN:t<=0&&(e=NaN),new N(e,t,n,r)}function P(e){if(e instanceof N)return new N(e.h,e.s,e.l,e.opacity);if(e instanceof o||(e=_(e)),!e)return new N;if(e instanceof N)return e;var t=(e=e.rgb()).r/255,n=e.g/255,r=e.b/255,i=Math.min(t,n,r),a=Math.max(t,n,r),s=NaN,u=a-i,l=(a+i)/2;return u?(s=t===a?(n-r)/u+6*(n0&&l<1?0:s,new N(s,u,l,e.opacity)}function I(e,t,n,r){return 1===arguments.length?P(e):new N(e,t,n,null==r?1:r)}function N(e,t,n,r){this.h=+e,this.s=+t,this.l=+n,this.opacity=+r}function D(e){return(e=(e||0)%360)<0?e+360:e}function L(e){return Math.max(0,Math.min(1,e||0))}function F(e,t,n){return 255*(e<60?t+(n-t)*e/60:e<180?n:e<240?t+(n-t)*(240-e)/60:t)}(0,r.A)(o,_,{copy(e){return Object.assign(new this.constructor,this,e)},displayable(){return this.rgb().displayable()},hex:m,formatHex:m,formatHex8:function(){return this.rgb().formatHex8()},formatHsl:function(){return P(this).formatHsl()},formatRgb:b,toString:b}),(0,r.A)(E,A,(0,r.X)(o,{brighter(e){return e=null==e?a:Math.pow(a,e),new E(this.r*e,this.g*e,this.b*e,this.opacity)},darker(e){return e=null==e?i:Math.pow(i,e),new E(this.r*e,this.g*e,this.b*e,this.opacity)},rgb(){return this},clamp(){return new E(T(this.r),T(this.g),T(this.b),O(this.opacity))},displayable(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:C,formatHex:C,formatHex8:function(){return`#${k(this.r)}${k(this.g)}${k(this.b)}${k(255*(isNaN(this.opacity)?1:this.opacity))}`},formatRgb:R,toString:R})),(0,r.A)(N,I,(0,r.X)(o,{brighter(e){return e=null==e?a:Math.pow(a,e),new N(this.h,this.s,this.l*e,this.opacity)},darker(e){return e=null==e?i:Math.pow(i,e),new N(this.h,this.s,this.l*e,this.opacity)},rgb(){var e=this.h%360+360*(this.h<0),t=isNaN(e)||isNaN(this.s)?0:this.s,n=this.l,r=n+(n<.5?n:1-n)*t,o=2*n-r;return new E(F(e>=240?e-240:e+120,o,r),F(e,o,r),F(e<120?e+240:e-120,o,r),this.opacity)},clamp(){return new N(D(this.h),L(this.s),L(this.l),O(this.opacity))},displayable(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl(){const e=O(this.opacity);return`${1===e?"hsl(":"hsla("}${D(this.h)}, ${100*L(this.s)}%, ${100*L(this.l)}%${1===e?")":`, ${e})`}`}}))},40871:(e,t,n)=>{"use strict";function r(e,t,n){e.prototype=t.prototype=n,n.constructor=e}function o(e,t){var n=Object.create(e.prototype);for(var r in t)n[r]=t[r];return n}n.d(t,{A:()=>r,X:()=>o})},48701:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>s,GP:()=>o,s:()=>i});var r,o,i,a=n(56390);function s(e){return r=(0,a.A)(e),o=r.format,i=r.formatPrefix,r}s({thousands:",",grouping:[3],currency:["$",""]})},47235:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(95204);function o(e){return(e=(0,r.f)(Math.abs(e)))?e[1]:NaN}},95204:(e,t,n)=>{"use strict";function r(e){return Math.abs(e=Math.round(e))>=1e21?e.toLocaleString("en").replace(/,/g,""):e.toString(10)}function o(e,t){if((n=(e=t?e.toExponential(t-1):e.toExponential()).indexOf("e"))<0)return null;var n,r=e.slice(0,n);return[r.length>1?r[0]+r.slice(2):r,+e.slice(n+1)]}n.d(t,{A:()=>r,f:()=>o})},1631:(e,t,n)=>{"use strict";n.d(t,{A:()=>o,S:()=>i});var r=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function o(e){if(!(t=r.exec(e)))throw new Error("invalid format: "+e);var t;return new i({fill:t[1],align:t[2],sign:t[3],symbol:t[4],zero:t[5],width:t[6],comma:t[7],precision:t[8]&&t[8].slice(1),trim:t[9],type:t[10]})}function i(e){this.fill=void 0===e.fill?" ":e.fill+"",this.align=void 0===e.align?">":e.align+"",this.sign=void 0===e.sign?"-":e.sign+"",this.symbol=void 0===e.symbol?"":e.symbol+"",this.zero=!!e.zero,this.width=void 0===e.width?void 0:+e.width,this.comma=!!e.comma,this.precision=void 0===e.precision?void 0:+e.precision,this.trim=!!e.trim,this.type=void 0===e.type?"":e.type+""}o.prototype=i.prototype,i.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type}},56390:(e,t,n)=>{"use strict";n.d(t,{A:()=>d});var r=n(47235);var o=n(1631);var i,a=n(95204);function s(e,t){var n=(0,a.f)(e,t);if(!n)return e+"";var r=n[0],o=n[1];return o<0?"0."+new Array(-o).join("0")+r:r.length>o+1?r.slice(0,o+1)+"."+r.slice(o+1):r+new Array(o-r.length+2).join("0")}const u={"%":(e,t)=>(100*e).toFixed(t),b:e=>Math.round(e).toString(2),c:e=>e+"",d:a.A,e:(e,t)=>e.toExponential(t),f:(e,t)=>e.toFixed(t),g:(e,t)=>e.toPrecision(t),o:e=>Math.round(e).toString(8),p:(e,t)=>s(100*e,t),r:s,s:function(e,t){var n=(0,a.f)(e,t);if(!n)return e+"";var r=n[0],o=n[1],s=o-(i=3*Math.max(-8,Math.min(8,Math.floor(o/3))))+1,u=r.length;return s===u?r:s>u?r+new Array(s-u+1).join("0"):s>0?r.slice(0,s)+"."+r.slice(s):"0."+new Array(1-s).join("0")+(0,a.f)(e,Math.max(0,t+s-1))[0]},X:e=>Math.round(e).toString(16).toUpperCase(),x:e=>Math.round(e).toString(16)};function l(e){return e}var c=Array.prototype.map,f=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"];function d(e){var t,n,a=void 0===e.grouping||void 0===e.thousands?l:(t=c.call(e.grouping,Number),n=e.thousands+"",function(e,r){for(var o=e.length,i=[],a=0,s=t[0],u=0;o>0&&s>0&&(u+s+1>r&&(s=Math.max(1,r-u)),i.push(e.substring(o-=s,o+s)),!((u+=s+1)>r));)s=t[a=(a+1)%t.length];return i.reverse().join(n)}),s=void 0===e.currency?"":e.currency[0]+"",d=void 0===e.currency?"":e.currency[1]+"",h=void 0===e.decimal?".":e.decimal+"",p=void 0===e.numerals?l:function(e){return function(t){return t.replace(/[0-9]/g,(function(t){return e[+t]}))}}(c.call(e.numerals,String)),g=void 0===e.percent?"%":e.percent+"",v=void 0===e.minus?"\u2212":e.minus+"",y=void 0===e.nan?"NaN":e.nan+"";function m(e){var t=(e=(0,o.A)(e)).fill,n=e.align,r=e.sign,l=e.symbol,c=e.zero,m=e.width,b=e.comma,_=e.precision,w=e.trim,x=e.type;"n"===x?(b=!0,x="g"):u[x]||(void 0===_&&(_=12),w=!0,x="g"),(c||"0"===t&&"="===n)&&(c=!0,t="0",n="=");var S="$"===l?s:"#"===l&&/[boxX]/.test(x)?"0"+x.toLowerCase():"",A="$"===l?d:/[%p]/.test(x)?g:"",E=u[x],C=/[defgprs%]/.test(x);function R(e){var o,s,u,l=S,d=A;if("c"===x)d=E(e)+d,e="";else{var g=(e=+e)<0||1/e<0;if(e=isNaN(e)?y:E(Math.abs(e),_),w&&(e=function(e){e:for(var t,n=e.length,r=1,o=-1;r0&&(o=0)}return o>0?e.slice(0,o)+e.slice(t+1):e}(e)),g&&0===+e&&"+"!==r&&(g=!1),l=(g?"("===r?r:v:"-"===r||"("===r?"":r)+l,d=("s"===x?f[8+i/3]:"")+d+(g&&"("===r?")":""),C)for(o=-1,s=e.length;++o(u=e.charCodeAt(o))||u>57){d=(46===u?h+e.slice(o+1):e.slice(o))+d,e=e.slice(0,o);break}}b&&!c&&(e=a(e,1/0));var R=l.length+e.length+d.length,O=R>1)+l+e+d+O.slice(R);break;default:e=O+l+e+d}return p(e)}return _=void 0===_?6:/[gprs]/.test(x)?Math.max(1,Math.min(21,_)):Math.max(0,Math.min(20,_)),R.toString=function(){return e+""},R}return{format:m,formatPrefix:function(e,t){var n=m(((e=(0,o.A)(e)).type="f",e)),i=3*Math.max(-8,Math.min(8,Math.floor((0,r.A)(t)/3))),a=Math.pow(10,-i),s=f[8+i/3];return function(e){return n(a*e)+s}}}}},1734:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(47235);function o(e){return Math.max(0,-(0,r.A)(Math.abs(e)))}},67590:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(47235);function o(e,t){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor((0,r.A)(t)/3)))-(0,r.A)(Math.abs(e)))}},99460:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(47235);function o(e,t){return e=Math.abs(e),t=Math.abs(t)-e,Math.max(0,(0,r.A)(t)-(0,r.A)(e))+1}},68079:(e,t,n)=>{"use strict";n.d(t,{$:()=>a,A:()=>i});var r=n(58503),o=n(10128);function i(e,t){return((0,o.p)(t)?o.A:a)(e,t)}function a(e,t){var n,o=t?t.length:0,i=e?Math.min(o,e.length):0,a=new Array(i),s=new Array(o);for(n=0;n{"use strict";function r(e,t,n,r,o){var i=e*e,a=i*e;return((1-3*e+3*i-a)*t+(4-6*i+3*a)*n+(1+3*e+3*i-3*a)*r+a*o)/6}function o(e){var t=e.length-1;return function(n){var o=n<=0?n=0:n>=1?(n=1,t-1):Math.floor(n*t),i=e[o],a=e[o+1],s=o>0?e[o-1]:2*i-a,u=oo,H:()=>r})},19804:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(86160);function o(e){var t=e.length;return function(n){var o=Math.floor(((n%=1)<0?++n:n)*t),i=e[(o+t-1)%t],a=e[o%t],s=e[(o+1)%t],u=e[(o+2)%t];return(0,r.H)((n-o/t)*t,i,a,s,u)}}},84709:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>s,lG:()=>i,uN:()=>a});var r=n(3116);function o(e,t){return function(n){return e+n*t}}function i(e,t){var n=t-e;return n?o(e,n>180||n<-180?n-360*Math.round(n/360):n):(0,r.A)(isNaN(e)?t:e)}function a(e){return 1===(e=+e)?s:function(t,n){return n-t?function(e,t,n){return e=Math.pow(e,n),t=Math.pow(t,n)-e,n=1/n,function(r){return Math.pow(e+r*t,n)}}(t,n,e):(0,r.A)(isNaN(t)?n:t)}}function s(e,t){var n=t-e;return n?o(e,n):(0,r.A)(isNaN(e)?t:e)}},3116:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=e=>()=>e},58172:(e,t,n)=>{"use strict";function r(e,t){var n=new Date;return e=+e,t=+t,function(r){return n.setTime(e*(1-r)+t*r),n}}n.d(t,{A:()=>r})},8981:(e,t,n)=>{"use strict";function r(e,t){return e=+e,t=+t,function(n){return e*(1-n)+t*n}}n.d(t,{A:()=>r})},10128:(e,t,n)=>{"use strict";function r(e,t){t||(t=[]);var n,r=e?Math.min(t.length,e.length):0,o=t.slice();return function(i){for(n=0;nr,p:()=>o})},34287:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(58503);function o(e,t){var n,o={},i={};for(n in null!==e&&"object"===typeof e||(e={}),null!==t&&"object"===typeof t||(t={}),t)n in e?o[n]=(0,r.A)(e[n],t[n]):i[n]=t[n];return function(e){for(n in o)i[n]=o[n](e);return i}}},23104:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(58503);function o(e,t){void 0===t&&(t=e,e=r.A);for(var n=0,o=t.length-1,i=t[0],a=new Array(o<0?0:o);n{"use strict";n.d(t,{Ay:()=>s,Ik:()=>l,uL:()=>c});var r=n(46957),o=n(86160),i=n(19804),a=n(84709);const s=function e(t){var n=(0,a.uN)(t);function o(e,t){var o=n((e=(0,r.Qh)(e)).r,(t=(0,r.Qh)(t)).r),i=n(e.g,t.g),s=n(e.b,t.b),u=(0,a.Ay)(e.opacity,t.opacity);return function(t){return e.r=o(t),e.g=i(t),e.b=s(t),e.opacity=u(t),e+""}}return o.gamma=e,o}(1);function u(e){return function(t){var n,o,i=t.length,a=new Array(i),s=new Array(i),u=new Array(i);for(n=0;n{"use strict";function r(e,t){return e=+e,t=+t,function(n){return Math.round(e*(1-n)+t*n)}}n.d(t,{A:()=>r})},67737:(e,t,n)=>{"use strict";n.d(t,{A:()=>a});var r=n(8981),o=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g,i=new RegExp(o.source,"g");function a(e,t){var n,a,s,u=o.lastIndex=i.lastIndex=0,l=-1,c=[],f=[];for(e+="",t+="";(n=o.exec(e))&&(a=i.exec(t));)(s=a.index)>u&&(s=t.slice(u,s),c[l]?c[l]+=s:c[++l]=s),(n=n[0])===(a=a[0])?c[l]?c[l]+=a:c[++l]=a:(c[++l]=null,f.push({i:l,x:(0,r.A)(n,a)})),u=i.lastIndex;return u{"use strict";n.d(t,{A:()=>d});var r=n(46957),o=n(21197),i=n(68079),a=n(58172),s=n(8981),u=n(34287),l=n(67737),c=n(3116),f=n(10128);function d(e,t){var n,d=typeof t;return null==t||"boolean"===d?(0,c.A)(t):("number"===d?s.A:"string"===d?(n=(0,r.Ay)(t))?(t=n,o.Ay):l.A:t instanceof r.Ay?o.Ay:t instanceof Date?a.A:(0,f.p)(t)?f.A:Array.isArray(t)?i.$:"function"!==typeof t.valueOf&&"function"!==typeof t.toString||isNaN(t)?u.A:s.A)(e,t)}},16199:(e,t,n)=>{"use strict";n.r(t),n.d(t,{scaleBand:()=>l,scaleDiverging:()=>Me,scaleDivergingLog:()=>Pe,scaleDivergingPow:()=>Ne,scaleDivergingSqrt:()=>De,scaleDivergingSymlog:()=>Ie,scaleIdentity:()=>N,scaleImplicit:()=>s,scaleLinear:()=>I,scaleLog:()=>$,scaleOrdinal:()=>u,scalePoint:()=>f,scalePow:()=>Q,scaleQuantile:()=>oe,scaleQuantize:()=>ie,scaleRadial:()=>te,scaleSequential:()=>Se,scaleSequentialLog:()=>Ae,scaleSequentialPow:()=>Ce,scaleSequentialQuantile:()=>Oe,scaleSequentialSqrt:()=>Re,scaleSequentialSymlog:()=>Ee,scaleSqrt:()=>J,scaleSymlog:()=>G,scaleThreshold:()=>ae,scaleTime:()=>be,scaleUtc:()=>_e,tickFormat:()=>M});var r=n(6561);function o(e,t){switch(arguments.length){case 0:break;case 1:this.range(e);break;default:this.range(t).domain(e)}return this}function i(e,t){switch(arguments.length){case 0:break;case 1:"function"===typeof e?this.interpolator(e):this.range(e);break;default:this.domain(e),"function"===typeof t?this.interpolator(t):this.range(t)}return this}var a=n(24119);const s=Symbol("implicit");function u(){var e=new a.B,t=[],n=[],r=s;function i(o){let i=e.get(o);if(void 0===i){if(r!==s)return r;e.set(o,i=t.push(o)-1)}return n[i%n.length]}return i.domain=function(n){if(!arguments.length)return t.slice();t=[],e=new a.B;for(const r of n)e.has(r)||e.set(r,t.push(r)-1);return i},i.range=function(e){return arguments.length?(n=Array.from(e),i):n.slice()},i.unknown=function(e){return arguments.length?(r=e,i):r},i.copy=function(){return u(t,n).unknown(r)},o.apply(i,arguments),i}function l(){var e,t,n=u().unknown(void 0),i=n.domain,a=n.range,s=0,c=1,f=!1,d=0,h=0,p=.5;function g(){var n=i().length,o=ct&&(n=e,e=t,t=n),function(n){return Math.max(e,Math.min(t,n))}}(a[0],a[e-1])),r=e>2?x:w,o=i=null,f}function f(t){return null==t||isNaN(t=+t)?n:(o||(o=r(a.map(e),s,u)))(e(l(t)))}return f.invert=function(n){return l(t((i||(i=r(s,a.map(e),g.A)))(n)))},f.domain=function(e){return arguments.length?(a=Array.from(e,y),c()):a.slice()},f.range=function(e){return arguments.length?(s=Array.from(e),c()):s.slice()},f.rangeRound=function(e){return s=Array.from(e),u=v.A,c()},f.clamp=function(e){return arguments.length?(l=!!e||b,c()):l!==b},f.interpolate=function(e){return arguments.length?(u=e,c()):u},f.unknown=function(e){return arguments.length?(n=e,f):n},function(n,r){return e=n,t=r,c()}}function E(){return A()(b,b)}var C=n(1631),R=n(67590),O=n(48701),T=n(99460),k=n(1734);function M(e,t,n,r){var o,i=(0,d.sG)(e,t,n);switch((r=(0,C.A)(null==r?",f":r)).type){case"s":var a=Math.max(Math.abs(e),Math.abs(t));return null!=r.precision||isNaN(o=(0,R.A)(i,a))||(r.precision=o),(0,O.s)(r,a);case"":case"e":case"g":case"p":case"r":null!=r.precision||isNaN(o=(0,T.A)(i,Math.max(Math.abs(e),Math.abs(t))))||(r.precision=o-("e"===r.type));break;case"f":case"%":null!=r.precision||isNaN(o=(0,k.A)(i))||(r.precision=o-2*("%"===r.type))}return(0,O.GP)(r)}function P(e){var t=e.domain;return e.ticks=function(e){var n=t();return(0,d.Ay)(n[0],n[n.length-1],null==e?10:e)},e.tickFormat=function(e,n){var r=t();return M(r[0],r[r.length-1],null==e?10:e,n)},e.nice=function(n){null==n&&(n=10);var r,o,i=t(),a=0,s=i.length-1,u=i[a],l=i[s],c=10;for(l0;){if((o=(0,d.lq)(u,l,n))===r)return i[a]=u,i[s]=l,t(i);if(o>0)u=Math.floor(u/o)*o,l=Math.ceil(l/o)*o;else{if(!(o<0))break;u=Math.ceil(u*o)/o,l=Math.floor(l*o)/o}r=o}return e},e}function I(){var e=E();return e.copy=function(){return S(e,I())},o.apply(e,arguments),P(e)}function N(e){var t;function n(e){return null==e||isNaN(e=+e)?t:e}return n.invert=n,n.domain=n.range=function(t){return arguments.length?(e=Array.from(t,y),n):e.slice()},n.unknown=function(e){return arguments.length?(t=e,n):t},n.copy=function(){return N(e).unknown(t)},e=arguments.length?Array.from(e,y):[0,1],P(n)}function D(e,t){var n,r=0,o=(e=e.slice()).length-1,i=e[r],a=e[o];return a-e(-t,n)}function V(e){const t=e(L,F),n=t.domain;let r,o,i=10;function a(){return r=function(e){return e===Math.E?Math.log:10===e&&Math.log10||2===e&&Math.log2||(e=Math.log(e),t=>Math.log(t)/e)}(i),o=function(e){return 10===e?z:e===Math.E?Math.exp:t=>Math.pow(e,t)}(i),n()[0]<0?(r=B(r),o=B(o),e(j,U)):e(L,F),t}return t.base=function(e){return arguments.length?(i=+e,a()):i},t.domain=function(e){return arguments.length?(n(e),a()):n()},t.ticks=e=>{const t=n();let a=t[0],s=t[t.length-1];const u=s0){for(;f<=h;++f)for(l=1;ls)break;g.push(c)}}else for(;f<=h;++f)for(l=i-1;l>=1;--l)if(c=f>0?l/o(-f):l*o(f),!(cs)break;g.push(c)}2*g.length{if(null==e&&(e=10),null==n&&(n=10===i?"s":","),"function"!==typeof n&&(i%1||null!=(n=(0,C.A)(n)).precision||(n.trim=!0),n=(0,O.GP)(n)),e===1/0)return n;const a=Math.max(1,i*e/t.ticks().length);return e=>{let t=e/o(Math.round(r(e)));return t*in(D(n(),{floor:e=>o(Math.floor(r(e))),ceil:e=>o(Math.ceil(r(e)))})),t}function $(){const e=V(A()).domain([1,10]);return e.copy=()=>S(e,$()).base(e.base()),o.apply(e,arguments),e}function H(e){return function(t){return Math.sign(t)*Math.log1p(Math.abs(t/e))}}function W(e){return function(t){return Math.sign(t)*Math.expm1(Math.abs(t))*e}}function q(e){var t=1,n=e(H(t),W(t));return n.constant=function(n){return arguments.length?e(H(t=+n),W(t)):t},P(n)}function G(){var e=q(A());return e.copy=function(){return S(e,G()).constant(e.constant())},o.apply(e,arguments)}function Y(e){return function(t){return t<0?-Math.pow(-t,e):Math.pow(t,e)}}function X(e){return e<0?-Math.sqrt(-e):Math.sqrt(e)}function K(e){return e<0?-e*e:e*e}function Z(e){var t=e(b,b),n=1;return t.exponent=function(t){return arguments.length?1===(n=+t)?e(b,b):.5===n?e(X,K):e(Y(n),Y(1/n)):n},P(t)}function Q(){var e=Z(A());return e.copy=function(){return S(e,Q()).exponent(e.exponent())},o.apply(e,arguments),e}function J(){return Q.apply(null,arguments).exponent(.5)}function ee(e){return Math.sign(e)*e*e}function te(){var e,t=E(),n=[0,1],r=!1;function i(n){var o=function(e){return Math.sign(e)*Math.sqrt(Math.abs(e))}(t(n));return isNaN(o)?e:r?Math.round(o):o}return i.invert=function(e){return t.invert(ee(e))},i.domain=function(e){return arguments.length?(t.domain(e),i):t.domain()},i.range=function(e){return arguments.length?(t.range((n=Array.from(e,y)).map(ee)),i):n.slice()},i.rangeRound=function(e){return i.range(e).round(!0)},i.round=function(e){return arguments.length?(r=!!e,i):r},i.clamp=function(e){return arguments.length?(t.clamp(e),i):t.clamp()},i.unknown=function(t){return arguments.length?(e=t,i):e},i.copy=function(){return te(t.domain(),n).round(r).clamp(t.clamp()).unknown(e)},o.apply(i,arguments),P(i)}var ne=n(84855),re=n(12902);function oe(){var e,t=[],n=[],r=[];function i(){var e=0,o=Math.max(1,n.length);for(r=new Array(o-1);++e0?r[o-1]:t[0],o=r?[i[r-1],n]:[i[o-1],i[o]]},s.unknown=function(t){return arguments.length?(e=t,s):s},s.thresholds=function(){return i.slice()},s.copy=function(){return ie().domain([t,n]).range(a).unknown(e)},o.apply(P(s),arguments)}function ae(){var e,t=[.5],n=[0,1],r=1;function i(o){return null!=o&&o<=o?n[(0,h.Ay)(t,o,0,r)]:e}return i.domain=function(e){return arguments.length?(t=Array.from(e),r=Math.min(t.length,n.length-1),i):t.slice()},i.range=function(e){return arguments.length?(n=Array.from(e),r=Math.min(t.length,n.length-1),i):n.slice()},i.invertExtent=function(e){var r=n.indexOf(e);return[t[r-1],t[r]]},i.unknown=function(t){return arguments.length?(e=t,i):e},i.copy=function(){return ae().domain(t).range(n).unknown(e)},o.apply(i,arguments)}var se=n(526),ue=n(24355),le=n(75878),ce=n(78926),fe=n(55334),de=n(54612),he=n(13298),pe=n(55030),ge=n(53705);function ve(e){return new Date(e)}function ye(e){return e instanceof Date?+e:+new Date(+e)}function me(e,t,n,r,o,i,a,s,u,l){var c=E(),f=c.invert,d=c.domain,h=l(".%L"),p=l(":%S"),g=l("%I:%M"),v=l("%I %p"),y=l("%a %d"),m=l("%b %d"),b=l("%B"),_=l("%Y");function w(e){return(u(e)t(r/(e.length-1))))},n.quantiles=function(t){return Array.from({length:t+1},((n,r)=>(0,ne.Ay)(e,r/t)))},n.copy=function(){return Oe(t).domain(e)},i.apply(n,arguments)}var Te=n(23104);function ke(){var e,t,n,r,o,i,a,s=0,u=.5,l=1,c=1,f=b,d=!1;function h(e){return isNaN(e=+e)?a:(e=.5+((e=+i(e))-t)*(c*e{"use strict";n.d(t,{Ay:()=>l,DC:()=>o,GY:()=>s,T6:()=>i,aL:()=>a});var r,o,i,a,s,u=n(64398);function l(e){return r=(0,u.A)(e),o=r.format,i=r.parse,a=r.utcFormat,s=r.utcParse,r}l({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]})},64398:(e,t,n)=>{"use strict";n.d(t,{A:()=>l});var r=n(78926),o=n(55334),i=n(24355);function a(e){if(0<=e.y&&e.y<100){var t=new Date(-1,e.m,e.d,e.H,e.M,e.S,e.L);return t.setFullYear(e.y),t}return new Date(e.y,e.m,e.d,e.H,e.M,e.S,e.L)}function s(e){if(0<=e.y&&e.y<100){var t=new Date(Date.UTC(-1,e.m,e.d,e.H,e.M,e.S,e.L));return t.setUTCFullYear(e.y),t}return new Date(Date.UTC(e.y,e.m,e.d,e.H,e.M,e.S,e.L))}function u(e,t,n){return{y:e,m:t,d:n,H:0,M:0,S:0,L:0}}function l(e){var t=e.dateTime,n=e.date,i=e.time,l=e.periods,f=e.days,d=e.shortDays,h=e.months,p=e.shortMonths,g=v(l),X=y(l),ge=v(f),Re=y(f),Oe=v(d),Te=y(d),ke=v(h),Me=y(h),Pe=v(p),Ie=y(p),Ne={a:function(e){return d[e.getDay()]},A:function(e){return f[e.getDay()]},b:function(e){return p[e.getMonth()]},B:function(e){return h[e.getMonth()]},c:null,d:j,e:j,f:$,g:ee,G:ne,H:U,I:z,j:B,L:V,m:H,M:W,p:function(e){return l[+(e.getHours()>=12)]},q:function(e){return 1+~~(e.getMonth()/3)},Q:Ee,s:Ce,S:q,u:G,U:Y,V:K,w:Z,W:Q,x:null,X:null,y:J,Y:te,Z:re,"%":Ae},De={a:function(e){return d[e.getUTCDay()]},A:function(e){return f[e.getUTCDay()]},b:function(e){return p[e.getUTCMonth()]},B:function(e){return h[e.getUTCMonth()]},c:null,d:oe,e:oe,f:le,g:_e,G:xe,H:ie,I:ae,j:se,L:ue,m:ce,M:fe,p:function(e){return l[+(e.getUTCHours()>=12)]},q:function(e){return 1+~~(e.getUTCMonth()/3)},Q:Ee,s:Ce,S:de,u:he,U:pe,V:ve,w:ye,W:me,x:null,X:null,y:be,Y:we,Z:Se,"%":Ae},Le={a:function(e,t,n){var r=Oe.exec(t.slice(n));return r?(e.w=Te.get(r[0].toLowerCase()),n+r[0].length):-1},A:function(e,t,n){var r=ge.exec(t.slice(n));return r?(e.w=Re.get(r[0].toLowerCase()),n+r[0].length):-1},b:function(e,t,n){var r=Pe.exec(t.slice(n));return r?(e.m=Ie.get(r[0].toLowerCase()),n+r[0].length):-1},B:function(e,t,n){var r=ke.exec(t.slice(n));return r?(e.m=Me.get(r[0].toLowerCase()),n+r[0].length):-1},c:function(e,n,r){return Ue(e,t,n,r)},d:O,e:O,f:N,g:A,G:S,H:k,I:k,j:T,L:I,m:R,M:M,p:function(e,t,n){var r=g.exec(t.slice(n));return r?(e.p=X.get(r[0].toLowerCase()),n+r[0].length):-1},q:C,Q:L,s:F,S:P,u:b,U:_,V:w,w:m,W:x,x:function(e,t,r){return Ue(e,n,t,r)},X:function(e,t,n){return Ue(e,i,t,n)},y:A,Y:S,Z:E,"%":D};function Fe(e,t){return function(n){var r,o,i,a=[],s=-1,u=0,l=e.length;for(n instanceof Date||(n=new Date(+n));++s53)return null;"w"in c||(c.w=1),"Z"in c?(l=(i=s(u(c.y,0,1))).getUTCDay(),i=l>4||0===l?r.rt.ceil(i):(0,r.rt)(i),i=o.dA.offset(i,7*(c.V-1)),c.y=i.getUTCFullYear(),c.m=i.getUTCMonth(),c.d=i.getUTCDate()+(c.w+6)%7):(l=(i=a(u(c.y,0,1))).getDay(),i=l>4||0===l?r.AB.ceil(i):(0,r.AB)(i),i=o.UA.offset(i,7*(c.V-1)),c.y=i.getFullYear(),c.m=i.getMonth(),c.d=i.getDate()+(c.w+6)%7)}else("W"in c||"U"in c)&&("w"in c||(c.w="u"in c?c.u%7:"W"in c?1:0),l="Z"in c?s(u(c.y,0,1)).getUTCDay():a(u(c.y,0,1)).getDay(),c.m=0,c.d="W"in c?(c.w+6)%7+7*c.W-(l+5)%7:c.w+7*c.U-(l+6)%7);return"Z"in c?(c.H+=c.Z/100|0,c.M+=c.Z%100,s(c)):a(c)}}function Ue(e,t,n,r){for(var o,i,a=0,s=t.length,u=n.length;a=u)return-1;if(37===(o=t.charCodeAt(a++))){if(o=t.charAt(a++),!(i=Le[o in c?t.charAt(a++):o])||(r=i(e,n,r))<0)return-1}else if(o!=n.charCodeAt(r++))return-1}return r}return Ne.x=Fe(n,Ne),Ne.X=Fe(i,Ne),Ne.c=Fe(t,Ne),De.x=Fe(n,De),De.X=Fe(i,De),De.c=Fe(t,De),{format:function(e){var t=Fe(e+="",Ne);return t.toString=function(){return e},t},parse:function(e){var t=je(e+="",!1);return t.toString=function(){return e},t},utcFormat:function(e){var t=Fe(e+="",De);return t.toString=function(){return e},t},utcParse:function(e){var t=je(e+="",!0);return t.toString=function(){return e},t}}}var c={"-":"",_:" ",0:"0"},f=/^\s*\d+/,d=/^%/,h=/[\\^$*+?|[\]().{}]/g;function p(e,t,n){var r=e<0?"-":"",o=(r?-e:e)+"",i=o.length;return r+(i[e.toLowerCase(),t])))}function m(e,t,n){var r=f.exec(t.slice(n,n+1));return r?(e.w=+r[0],n+r[0].length):-1}function b(e,t,n){var r=f.exec(t.slice(n,n+1));return r?(e.u=+r[0],n+r[0].length):-1}function _(e,t,n){var r=f.exec(t.slice(n,n+2));return r?(e.U=+r[0],n+r[0].length):-1}function w(e,t,n){var r=f.exec(t.slice(n,n+2));return r?(e.V=+r[0],n+r[0].length):-1}function x(e,t,n){var r=f.exec(t.slice(n,n+2));return r?(e.W=+r[0],n+r[0].length):-1}function S(e,t,n){var r=f.exec(t.slice(n,n+4));return r?(e.y=+r[0],n+r[0].length):-1}function A(e,t,n){var r=f.exec(t.slice(n,n+2));return r?(e.y=+r[0]+(+r[0]>68?1900:2e3),n+r[0].length):-1}function E(e,t,n){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(t.slice(n,n+6));return r?(e.Z=r[1]?0:-(r[2]+(r[3]||"00")),n+r[0].length):-1}function C(e,t,n){var r=f.exec(t.slice(n,n+1));return r?(e.q=3*r[0]-3,n+r[0].length):-1}function R(e,t,n){var r=f.exec(t.slice(n,n+2));return r?(e.m=r[0]-1,n+r[0].length):-1}function O(e,t,n){var r=f.exec(t.slice(n,n+2));return r?(e.d=+r[0],n+r[0].length):-1}function T(e,t,n){var r=f.exec(t.slice(n,n+3));return r?(e.m=0,e.d=+r[0],n+r[0].length):-1}function k(e,t,n){var r=f.exec(t.slice(n,n+2));return r?(e.H=+r[0],n+r[0].length):-1}function M(e,t,n){var r=f.exec(t.slice(n,n+2));return r?(e.M=+r[0],n+r[0].length):-1}function P(e,t,n){var r=f.exec(t.slice(n,n+2));return r?(e.S=+r[0],n+r[0].length):-1}function I(e,t,n){var r=f.exec(t.slice(n,n+3));return r?(e.L=+r[0],n+r[0].length):-1}function N(e,t,n){var r=f.exec(t.slice(n,n+6));return r?(e.L=Math.floor(r[0]/1e3),n+r[0].length):-1}function D(e,t,n){var r=d.exec(t.slice(n,n+1));return r?n+r[0].length:-1}function L(e,t,n){var r=f.exec(t.slice(n));return r?(e.Q=+r[0],n+r[0].length):-1}function F(e,t,n){var r=f.exec(t.slice(n));return r?(e.s=+r[0],n+r[0].length):-1}function j(e,t){return p(e.getDate(),t,2)}function U(e,t){return p(e.getHours(),t,2)}function z(e,t){return p(e.getHours()%12||12,t,2)}function B(e,t){return p(1+o.UA.count((0,i.he)(e),e),t,3)}function V(e,t){return p(e.getMilliseconds(),t,3)}function $(e,t){return V(e,t)+"000"}function H(e,t){return p(e.getMonth()+1,t,2)}function W(e,t){return p(e.getMinutes(),t,2)}function q(e,t){return p(e.getSeconds(),t,2)}function G(e){var t=e.getDay();return 0===t?7:t}function Y(e,t){return p(r.YP.count((0,i.he)(e)-1,e),t,2)}function X(e){var t=e.getDay();return t>=4||0===t?(0,r.Mo)(e):r.Mo.ceil(e)}function K(e,t){return e=X(e),p(r.Mo.count((0,i.he)(e),e)+(4===(0,i.he)(e).getDay()),t,2)}function Z(e){return e.getDay()}function Q(e,t){return p(r.AB.count((0,i.he)(e)-1,e),t,2)}function J(e,t){return p(e.getFullYear()%100,t,2)}function ee(e,t){return p((e=X(e)).getFullYear()%100,t,2)}function te(e,t){return p(e.getFullYear()%1e4,t,4)}function ne(e,t){var n=e.getDay();return p((e=n>=4||0===n?(0,r.Mo)(e):r.Mo.ceil(e)).getFullYear()%1e4,t,4)}function re(e){var t=e.getTimezoneOffset();return(t>0?"-":(t*=-1,"+"))+p(t/60|0,"0",2)+p(t%60,"0",2)}function oe(e,t){return p(e.getUTCDate(),t,2)}function ie(e,t){return p(e.getUTCHours(),t,2)}function ae(e,t){return p(e.getUTCHours()%12||12,t,2)}function se(e,t){return p(1+o.dA.count((0,i.Mb)(e),e),t,3)}function ue(e,t){return p(e.getUTCMilliseconds(),t,3)}function le(e,t){return ue(e,t)+"000"}function ce(e,t){return p(e.getUTCMonth()+1,t,2)}function fe(e,t){return p(e.getUTCMinutes(),t,2)}function de(e,t){return p(e.getUTCSeconds(),t,2)}function he(e){var t=e.getUTCDay();return 0===t?7:t}function pe(e,t){return p(r.Hl.count((0,i.Mb)(e)-1,e),t,2)}function ge(e){var t=e.getUTCDay();return t>=4||0===t?(0,r.pT)(e):r.pT.ceil(e)}function ve(e,t){return e=ge(e),p(r.pT.count((0,i.Mb)(e),e)+(4===(0,i.Mb)(e).getUTCDay()),t,2)}function ye(e){return e.getUTCDay()}function me(e,t){return p(r.rt.count((0,i.Mb)(e)-1,e),t,2)}function be(e,t){return p(e.getUTCFullYear()%100,t,2)}function _e(e,t){return p((e=ge(e)).getUTCFullYear()%100,t,2)}function we(e,t){return p(e.getUTCFullYear()%1e4,t,4)}function xe(e,t){var n=e.getUTCDay();return p((e=n>=4||0===n?(0,r.pT)(e):r.pT.ceil(e)).getUTCFullYear()%1e4,t,4)}function Se(){return"+0000"}function Ae(){return"%"}function Ee(e){return+e}function Ce(e){return Math.floor(+e/1e3)}},55334:(e,t,n)=>{"use strict";n.d(t,{NT:()=>a,TW:()=>l,UA:()=>i,dA:()=>s,ou:()=>u,yE:()=>c});var r=n(37591),o=n(81134);const i=(0,r.f)((e=>e.setHours(0,0,0,0)),((e,t)=>e.setDate(e.getDate()+t)),((e,t)=>(t-e-(t.getTimezoneOffset()-e.getTimezoneOffset())*o.rR)/o.Nm),(e=>e.getDate()-1)),a=i.range,s=(0,r.f)((e=>{e.setUTCHours(0,0,0,0)}),((e,t)=>{e.setUTCDate(e.getUTCDate()+t)}),((e,t)=>(t-e)/o.Nm),(e=>e.getUTCDate()-1)),u=s.range,l=(0,r.f)((e=>{e.setUTCHours(0,0,0,0)}),((e,t)=>{e.setUTCDate(e.getUTCDate()+t)}),((e,t)=>(t-e)/o.Nm),(e=>Math.floor(e/o.Nm))),c=l.range},81134:(e,t,n)=>{"use strict";n.d(t,{Fq:()=>s,JJ:()=>i,MP:()=>l,Nm:()=>a,Pv:()=>u,Tt:()=>r,rR:()=>o});const r=1e3,o=60*r,i=60*o,a=24*i,s=7*a,u=30*a,l=365*a},54612:(e,t,n)=>{"use strict";n.d(t,{Ag:()=>i,L6:()=>a,iA:()=>u,pz:()=>s});var r=n(37591),o=n(81134);const i=(0,r.f)((e=>{e.setTime(e-e.getMilliseconds()-e.getSeconds()*o.Tt-e.getMinutes()*o.rR)}),((e,t)=>{e.setTime(+e+t*o.JJ)}),((e,t)=>(t-e)/o.JJ),(e=>e.getHours())),a=i.range,s=(0,r.f)((e=>{e.setUTCMinutes(0,0,0)}),((e,t)=>{e.setTime(+e+t*o.JJ)}),((e,t)=>(t-e)/o.JJ),(e=>e.getUTCHours())),u=s.range},37591:(e,t,n)=>{"use strict";n.d(t,{f:()=>i});const r=new Date,o=new Date;function i(e,t,n,a){function s(t){return e(t=0===arguments.length?new Date:new Date(+t)),t}return s.floor=t=>(e(t=new Date(+t)),t),s.ceil=n=>(e(n=new Date(n-1)),t(n,1),e(n),n),s.round=e=>{const t=s(e),n=s.ceil(e);return e-t(t(e=new Date(+e),null==n?1:Math.floor(n)),e),s.range=(n,r,o)=>{const i=[];if(n=s.ceil(n),o=null==o?1:Math.floor(o),!(n0))return i;let a;do{i.push(a=new Date(+n)),t(n,o),e(n)}while(ai((t=>{if(t>=t)for(;e(t),!n(t);)t.setTime(t-1)}),((e,r)=>{if(e>=e)if(r<0)for(;++r<=0;)for(;t(e,-1),!n(e););else for(;--r>=0;)for(;t(e,1),!n(e););})),n&&(s.count=(t,i)=>(r.setTime(+t),o.setTime(+i),e(r),e(o),Math.floor(n(r,o))),s.every=e=>(e=Math.floor(e),isFinite(e)&&e>0?e>1?s.filter(a?t=>a(t)%e===0:t=>s.count(0,t)%e===0):s:null)),s}},32613:(e,t,n)=>{"use strict";n.d(t,{R:()=>i,y:()=>o});var r=n(37591);const o=(0,r.f)((()=>{}),((e,t)=>{e.setTime(+e+t)}),((e,t)=>t-e));o.every=e=>(e=Math.floor(e),isFinite(e)&&e>0?e>1?(0,r.f)((t=>{t.setTime(Math.floor(t/e)*e)}),((t,n)=>{t.setTime(+t+n*e)}),((t,n)=>(n-t)/e)):o:null);const i=o.range},13298:(e,t,n)=>{"use strict";n.d(t,{Y4:()=>u,rn:()=>a,vD:()=>s,wX:()=>i});var r=n(37591),o=n(81134);const i=(0,r.f)((e=>{e.setTime(e-e.getMilliseconds()-e.getSeconds()*o.Tt)}),((e,t)=>{e.setTime(+e+t*o.rR)}),((e,t)=>(t-e)/o.rR),(e=>e.getMinutes())),a=i.range,s=(0,r.f)((e=>{e.setUTCSeconds(0,0)}),((e,t)=>{e.setTime(+e+t*o.rR)}),((e,t)=>(t-e)/o.rR),(e=>e.getUTCMinutes())),u=s.range},75878:(e,t,n)=>{"use strict";n.d(t,{Bz:()=>i,R6:()=>a,Ui:()=>o,oi:()=>s});var r=n(37591);const o=(0,r.f)((e=>{e.setDate(1),e.setHours(0,0,0,0)}),((e,t)=>{e.setMonth(e.getMonth()+t)}),((e,t)=>t.getMonth()-e.getMonth()+12*(t.getFullYear()-e.getFullYear())),(e=>e.getMonth())),i=o.range,a=(0,r.f)((e=>{e.setUTCDate(1),e.setUTCHours(0,0,0,0)}),((e,t)=>{e.setUTCMonth(e.getUTCMonth()+t)}),((e,t)=>t.getUTCMonth()-e.getUTCMonth()+12*(t.getUTCFullYear()-e.getUTCFullYear())),(e=>e.getUTCMonth())),s=a.range},55030:(e,t,n)=>{"use strict";n.d(t,{R:()=>i,Y:()=>a});var r=n(37591),o=n(81134);const i=(0,r.f)((e=>{e.setTime(e-e.getMilliseconds())}),((e,t)=>{e.setTime(+e+t*o.Tt)}),((e,t)=>(t-e)/o.Tt),(e=>e.getUTCSeconds())),a=i.range},526:(e,t,n)=>{"use strict";n.d(t,{$Z:()=>g,Cf:()=>y,lk:()=>v,yE:()=>m});var r=n(86037),o=n(6946),i=n(81134),a=n(32613),s=n(55030),u=n(13298),l=n(54612),c=n(55334),f=n(78926),d=n(75878),h=n(24355);function p(e,t,n,u,l,c){const f=[[s.R,1,i.Tt],[s.R,5,5*i.Tt],[s.R,15,15*i.Tt],[s.R,30,30*i.Tt],[c,1,i.rR],[c,5,5*i.rR],[c,15,15*i.rR],[c,30,30*i.rR],[l,1,i.JJ],[l,3,3*i.JJ],[l,6,6*i.JJ],[l,12,12*i.JJ],[u,1,i.Nm],[u,2,2*i.Nm],[n,1,i.Fq],[t,1,i.Pv],[t,3,3*i.Pv],[e,1,i.MP]];function d(t,n,s){const u=Math.abs(n-t)/s,l=(0,r.A)((([,,e])=>e)).right(f,u);if(l===f.length)return e.every((0,o.sG)(t/i.MP,n/i.MP,s));if(0===l)return a.y.every(Math.max((0,o.sG)(t,n,s),1));const[c,d]=f[u/f[l-1][2]{"use strict";n.d(t,{AB:()=>s,Dr:()=>v,G6:()=>M,Gu:()=>l,HU:()=>y,Hl:()=>w,Mo:()=>c,OK:()=>g,ON:()=>T,PG:()=>u,SQ:()=>m,TU:()=>f,Xo:()=>N,YP:()=>a,Zn:()=>I,_M:()=>b,a1:()=>C,aZ:()=>O,c8:()=>R,h8:()=>p,jN:()=>k,pT:()=>E,rG:()=>d,rt:()=>x,sr:()=>S,vD:()=>h,wr:()=>P,z2:()=>A});var r=n(37591),o=n(81134);function i(e){return(0,r.f)((t=>{t.setDate(t.getDate()-(t.getDay()+7-e)%7),t.setHours(0,0,0,0)}),((e,t)=>{e.setDate(e.getDate()+7*t)}),((e,t)=>(t-e-(t.getTimezoneOffset()-e.getTimezoneOffset())*o.rR)/o.Fq))}const a=i(0),s=i(1),u=i(2),l=i(3),c=i(4),f=i(5),d=i(6),h=a.range,p=s.range,g=u.range,v=l.range,y=c.range,m=f.range,b=d.range;function _(e){return(0,r.f)((t=>{t.setUTCDate(t.getUTCDate()-(t.getUTCDay()+7-e)%7),t.setUTCHours(0,0,0,0)}),((e,t)=>{e.setUTCDate(e.getUTCDate()+7*t)}),((e,t)=>(t-e)/o.Fq))}const w=_(0),x=_(1),S=_(2),A=_(3),E=_(4),C=_(5),R=_(6),O=w.range,T=x.range,k=S.range,M=A.range,P=E.range,I=C.range,N=R.range},24355:(e,t,n)=>{"use strict";n.d(t,{Mb:()=>a,Yo:()=>i,he:()=>o,jH:()=>s});var r=n(37591);const o=(0,r.f)((e=>{e.setMonth(0,1),e.setHours(0,0,0,0)}),((e,t)=>{e.setFullYear(e.getFullYear()+t)}),((e,t)=>t.getFullYear()-e.getFullYear()),(e=>e.getFullYear()));o.every=e=>isFinite(e=Math.floor(e))&&e>0?(0,r.f)((t=>{t.setFullYear(Math.floor(t.getFullYear()/e)*e),t.setMonth(0,1),t.setHours(0,0,0,0)}),((t,n)=>{t.setFullYear(t.getFullYear()+n*e)})):null;const i=o.range,a=(0,r.f)((e=>{e.setUTCMonth(0,1),e.setUTCHours(0,0,0,0)}),((e,t)=>{e.setUTCFullYear(e.getUTCFullYear()+t)}),((e,t)=>t.getUTCFullYear()-e.getUTCFullYear()),(e=>e.getUTCFullYear()));a.every=e=>isFinite(e=Math.floor(e))&&e>0?(0,r.f)((t=>{t.setUTCFullYear(Math.floor(t.getUTCFullYear()/e)*e),t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)}),((t,n)=>{t.setUTCFullYear(t.getUTCFullYear()+n*e)})):null;const s=a.range},19018:(e,t,n)=>{"use strict";n.r(t),n.d(t,{Adder:()=>S,Delaunay:()=>Ia,FormatSpecifier:()=>Xu.S,InternMap:()=>C.B,InternSet:()=>C.v,Node:()=>cp,Path:()=>Wo,Voronoi:()=>Ra,ZoomTransform:()=>B_,active:()=>Xr,arc:()=>hm,area:()=>wm,areaRadial:()=>km,ascending:()=>o.A,autoType:()=>hs,axisBottom:()=>Ze,axisLeft:()=>Qe,axisRight:()=>Ke,axisTop:()=>Xe,bin:()=>G,bisect:()=>r.Ay,bisectCenter:()=>r.WD,bisectLeft:()=>r.ah,bisectRight:()=>r.Jj,bisector:()=>i.A,blob:()=>nu,blur:()=>a,blur2:()=>s,blurImage:()=>u,brush:()=>Eo,brushSelection:()=>xo,brushX:()=>So,brushY:()=>Ao,buffer:()=>ou,chord:()=>Lo,chordDirected:()=>jo,chordTranspose:()=>Fo,cluster:()=>rp,color:()=>_r.Ay,contourDensity:()=>ta,contours:()=>Gi,count:()=>h,create:()=>By,creator:()=>on,cross:()=>y,csv:()=>lu,csvFormat:()=>es,csvFormatBody:()=>ts,csvFormatRow:()=>rs,csvFormatRows:()=>ns,csvFormatValue:()=>os,csvParse:()=>Qa,csvParseRows:()=>Ja,cubehelix:()=>Fi,cumsum:()=>m,curveBasis:()=>_b,curveBasisClosed:()=>xb,curveBasisOpen:()=>Ab,curveBumpX:()=>Nm,curveBumpY:()=>Dm,curveBundle:()=>Cb,curveCardinal:()=>Tb,curveCardinalClosed:()=>Mb,curveCardinalOpen:()=>Ib,curveCatmullRom:()=>Lb,curveCatmullRomClosed:()=>jb,curveCatmullRomOpen:()=>zb,curveLinear:()=>ym,curveLinearClosed:()=>Vb,curveMonotoneX:()=>Kb,curveMonotoneY:()=>Zb,curveNatural:()=>e_,curveStep:()=>n_,curveStepAfter:()=>o_,curveStepBefore:()=>r_,descending:()=>b.A,deviation:()=>w,difference:()=>Te,disjoint:()=>ke,dispatch:()=>ot,drag:()=>za,dragDisable:()=>An,dragEnable:()=>En,dsv:()=>uu,dsvFormat:()=>Ka,easeBack:()=>Ks,easeBackIn:()=>Ys,easeBackInOut:()=>Ks,easeBackOut:()=>Xs,easeBounce:()=>Ws,easeBounceIn:()=>Hs,easeBounceInOut:()=>qs,easeBounceOut:()=>Ws,easeCircle:()=>Is,easeCircleIn:()=>Ms,easeCircleInOut:()=>Is,easeCircleOut:()=>Ps,easeCubic:()=>Wr,easeCubicIn:()=>$r,easeCubicInOut:()=>Wr,easeCubicOut:()=>Hr,easeElastic:()=>Js,easeElasticIn:()=>Qs,easeElasticInOut:()=>eu,easeElasticOut:()=>Js,easeExp:()=>ks,easeExpIn:()=>Os,easeExpInOut:()=>ks,easeExpOut:()=>Ts,easeLinear:()=>gs,easePoly:()=>ws,easePolyIn:()=>bs,easePolyInOut:()=>ws,easePolyOut:()=>_s,easeQuad:()=>ms,easeQuadIn:()=>vs,easeQuadInOut:()=>ms,easeQuadOut:()=>ys,easeSin:()=>Cs,easeSinIn:()=>As,easeSinInOut:()=>Cs,easeSinOut:()=>Es,every:()=>Se,extent:()=>x,fcumsum:()=>E,filter:()=>Ee,flatGroup:()=>M,flatRollup:()=>P,forceCenter:()=>mu,forceCollide:()=>Mu,forceLink:()=>Nu,forceManyBody:()=>$u,forceRadial:()=>Hu,forceSimulation:()=>Vu,forceX:()=>Wu,forceY:()=>qu,format:()=>Gu.GP,formatDefaultLocale:()=>Gu.Ay,formatLocale:()=>Yu.A,formatPrefix:()=>Gu.s,formatSpecifier:()=>Xu.A,fsum:()=>A,geoAlbers:()=>mh,geoAlbersUsa:()=>bh,geoArea:()=>Jl,geoAzimuthalEqualArea:()=>Sh,geoAzimuthalEqualAreaRaw:()=>xh,geoAzimuthalEquidistant:()=>Eh,geoAzimuthalEquidistantRaw:()=>Ah,geoBounds:()=>Nc,geoCentroid:()=>qc,geoCircle:()=>rf,geoClipAntimeridian:()=>gf,geoClipCircle:()=>vf,geoClipExtent:()=>Af,geoClipRectangle:()=>Sf,geoConicConformal:()=>Mh,geoConicConformalRaw:()=>kh,geoConicEqualArea:()=>yh,geoConicEqualAreaRaw:()=>vh,geoConicEquidistant:()=>Dh,geoConicEquidistantRaw:()=>Nh,geoContains:()=>Bf,geoDistance:()=>Pf,geoEqualEarth:()=>Vh,geoEqualEarthRaw:()=>Bh,geoEquirectangular:()=>Ih,geoEquirectangularRaw:()=>Ph,geoGnomonic:()=>Hh,geoGnomonicRaw:()=>$h,geoGraticule:()=>Hf,geoGraticule10:()=>Wf,geoIdentity:()=>Wh,geoInterpolate:()=>qf,geoLength:()=>Tf,geoMercator:()=>Rh,geoMercatorRaw:()=>Ch,geoNaturalEarth1:()=>Gh,geoNaturalEarth1Raw:()=>qh,geoOrthographic:()=>Xh,geoOrthographicRaw:()=>Yh,geoPath:()=>Jd,geoProjection:()=>hh,geoProjectionMutator:()=>ph,geoRotation:()=>ef,geoStereographic:()=>Zh,geoStereographicRaw:()=>Kh,geoStream:()=>Tl,geoTransform:()=>eh,geoTransverseMercator:()=>Jh,geoTransverseMercatorRaw:()=>Qh,gray:()=>yi,greatest:()=>pe.A,greatestIndex:()=>ge,group:()=>O,groupSort:()=>z,groups:()=>T,hcl:()=>Ci,hierarchy:()=>ip,histogram:()=>G,hsl:()=>_r.KI,html:()=>vu,image:()=>fu,index:()=>D,indexes:()=>L,interpolate:()=>Cn.A,interpolateArray:()=>fg.A,interpolateBasis:()=>dg.A,interpolateBasisClosed:()=>hg.A,interpolateBlues:()=>py,interpolateBrBG:()=>Cv,interpolateBuGn:()=>Hv,interpolateBuPu:()=>qv,interpolateCividis:()=>Ey,interpolateCool:()=>Oy,interpolateCubehelix:()=>Mg,interpolateCubehelixDefault:()=>Cy,interpolateCubehelixLong:()=>Pg,interpolateDate:()=>pg.A,interpolateDiscrete:()=>gg,interpolateGnBu:()=>Yv,interpolateGreens:()=>vy,interpolateGreys:()=>my,interpolateHcl:()=>Og,interpolateHclLong:()=>Tg,interpolateHsl:()=>Ag,interpolateHslLong:()=>Eg,interpolateHue:()=>yg,interpolateInferno:()=>Uy,interpolateLab:()=>Cg,interpolateMagma:()=>jy,interpolateNumber:()=>cr.A,interpolateNumberArray:()=>mg.A,interpolateObject:()=>bg.A,interpolateOrRd:()=>Kv,interpolateOranges:()=>Ay,interpolatePRGn:()=>Ov,interpolatePiYG:()=>kv,interpolatePlasma:()=>zy,interpolatePuBu:()=>ey,interpolatePuBuGn:()=>Qv,interpolatePuOr:()=>Pv,interpolatePuRd:()=>ny,interpolatePurples:()=>_y,interpolateRainbow:()=>ky,interpolateRdBu:()=>Nv,interpolateRdGy:()=>Lv,interpolateRdPu:()=>oy,interpolateRdYlBu:()=>jv,interpolateRdYlGn:()=>zv,interpolateReds:()=>xy,interpolateRgb:()=>wr.Ay,interpolateRgbBasis:()=>wr.Ik,interpolateRgbBasisClosed:()=>wr.uL,interpolateRound:()=>_g.A,interpolateSinebow:()=>Ny,interpolateSpectral:()=>Vv,interpolateString:()=>xr.A,interpolateTransformCss:()=>gr,interpolateTransformSvg:()=>vr,interpolateTurbo:()=>Dy,interpolateViridis:()=>Fy,interpolateWarm:()=>Ry,interpolateYlGn:()=>uy,interpolateYlGnBu:()=>ay,interpolateYlOrBr:()=>cy,interpolateYlOrRd:()=>dy,interpolateZoom:()=>xg,interrupt:()=>ur,intersection:()=>Me,interval:()=>j_,isoFormat:()=>D_,isoParse:()=>F_,json:()=>hu,lab:()=>mi,lch:()=>Ei,least:()=>de,leastIndex:()=>he,line:()=>_m,lineRadial:()=>Tm,link:()=>Um,linkHorizontal:()=>zm,linkRadial:()=>Vm,linkVertical:()=>Bm,local:()=>$y,map:()=>Ce,matcher:()=>ct,max:()=>Z.A,maxIndex:()=>Q.A,mean:()=>J,median:()=>ee,medianIndex:()=>te,merge:()=>ne,min:()=>re.A,minIndex:()=>oe.A,mode:()=>ie,namespace:()=>Et,namespaces:()=>At,nice:()=>W,now:()=>zn,pack:()=>Dp,packEnclose:()=>bp,packSiblings:()=>Ip,pairs:()=>ae,partition:()=>Bp,path:()=>qo,pathRound:()=>Go,permute:()=>ue.A,pie:()=>Am,piecewise:()=>Ig.A,pointRadial:()=>Mm,pointer:()=>On,pointers:()=>Wy,polygonArea:()=>Dg,polygonCentroid:()=>Lg,polygonContains:()=>Bg,polygonHull:()=>zg,polygonLength:()=>Vg,precisionFixed:()=>Ku.A,precisionPrefix:()=>Zu.A,precisionRound:()=>Qu.A,quadtree:()=>Su,quantile:()=>Y.Ay,quantileIndex:()=>Y.AX,quantileSorted:()=>Y.Z4,quantize:()=>Ng,quickselect:()=>le.A,radialArea:()=>km,radialLine:()=>Tm,randomBates:()=>Xg,randomBernoulli:()=>Qg,randomBeta:()=>tv,randomBinomial:()=>nv,randomCauchy:()=>ov,randomExponential:()=>Kg,randomGamma:()=>ev,randomGeometric:()=>Jg,randomInt:()=>Wg,randomIrwinHall:()=>Yg,randomLcg:()=>cv,randomLogNormal:()=>Gg,randomLogistic:()=>iv,randomNormal:()=>qg,randomPareto:()=>Zg,randomPoisson:()=>av,randomUniform:()=>Hg,randomWeibull:()=>rv,range:()=>ce.A,rank:()=>fe,reduce:()=>Re,reverse:()=>Oe,rgb:()=>_r.Qh,ribbon:()=>oi,ribbonArrow:()=>ii,rollup:()=>I,rollups:()=>N,scaleBand:()=>fv.scaleBand,scaleDiverging:()=>fv.scaleDiverging,scaleDivergingLog:()=>fv.scaleDivergingLog,scaleDivergingPow:()=>fv.scaleDivergingPow,scaleDivergingSqrt:()=>fv.scaleDivergingSqrt,scaleDivergingSymlog:()=>fv.scaleDivergingSymlog,scaleIdentity:()=>fv.scaleIdentity,scaleImplicit:()=>fv.scaleImplicit,scaleLinear:()=>fv.scaleLinear,scaleLog:()=>fv.scaleLog,scaleOrdinal:()=>fv.scaleOrdinal,scalePoint:()=>fv.scalePoint,scalePow:()=>fv.scalePow,scaleQuantile:()=>fv.scaleQuantile,scaleQuantize:()=>fv.scaleQuantize,scaleRadial:()=>fv.scaleRadial,scaleSequential:()=>fv.scaleSequential,scaleSequentialLog:()=>fv.scaleSequentialLog,scaleSequentialPow:()=>fv.scaleSequentialPow,scaleSequentialQuantile:()=>fv.scaleSequentialQuantile,scaleSequentialSqrt:()=>fv.scaleSequentialSqrt,scaleSequentialSymlog:()=>fv.scaleSequentialSymlog,scaleSqrt:()=>fv.scaleSqrt,scaleSymlog:()=>fv.scaleSymlog,scaleThreshold:()=>fv.scaleThreshold,scaleTime:()=>fv.scaleTime,scaleUtc:()=>fv.scaleUtc,scan:()=>ve,schemeAccent:()=>pv,schemeBlues:()=>hy,schemeBrBG:()=>Ev,schemeBuGn:()=>$v,schemeBuPu:()=>Wv,schemeCategory10:()=>hv,schemeDark2:()=>gv,schemeGnBu:()=>Gv,schemeGreens:()=>gy,schemeGreys:()=>yy,schemeObservable10:()=>vv,schemeOrRd:()=>Xv,schemeOranges:()=>Sy,schemePRGn:()=>Rv,schemePaired:()=>yv,schemePastel1:()=>mv,schemePastel2:()=>bv,schemePiYG:()=>Tv,schemePuBu:()=>Jv,schemePuBuGn:()=>Zv,schemePuOr:()=>Mv,schemePuRd:()=>ty,schemePurples:()=>by,schemeRdBu:()=>Iv,schemeRdGy:()=>Dv,schemeRdPu:()=>ry,schemeRdYlBu:()=>Fv,schemeRdYlGn:()=>Uv,schemeReds:()=>wy,schemeSet1:()=>_v,schemeSet2:()=>wv,schemeSet3:()=>xv,schemeSpectral:()=>Bv,schemeTableau10:()=>Sv,schemeYlGn:()=>sy,schemeYlGnBu:()=>iy,schemeYlOrBr:()=>ly,schemeYlOrRd:()=>fy,select:()=>bn,selectAll:()=>qy,selection:()=>mn,selector:()=>at,selectorAll:()=>lt,shuffle:()=>ye,shuffler:()=>me,some:()=>Ae,sort:()=>U.Ay,stack:()=>l_,stackOffsetDiverging:()=>f_,stackOffsetExpand:()=>c_,stackOffsetNone:()=>i_,stackOffsetSilhouette:()=>d_,stackOffsetWiggle:()=>h_,stackOrderAppearance:()=>p_,stackOrderAscending:()=>v_,stackOrderDescending:()=>m_,stackOrderInsideOut:()=>b_,stackOrderNone:()=>a_,stackOrderReverse:()=>__,stratify:()=>Gp,style:()=>Lt,subset:()=>De,sum:()=>be,superset:()=>Ie,svg:()=>yu,symbol:()=>vb,symbolAsterisk:()=>Hm,symbolCircle:()=>Wm,symbolCross:()=>qm,symbolDiamond:()=>Xm,symbolDiamond2:()=>Km,symbolPlus:()=>Zm,symbolSquare:()=>Qm,symbolSquare2:()=>Jm,symbolStar:()=>rb,symbolTimes:()=>hb,symbolTriangle:()=>ib,symbolTriangle2:()=>sb,symbolWye:()=>db,symbolX:()=>hb,symbols:()=>pb,symbolsFill:()=>pb,symbolsStroke:()=>gb,text:()=>au,thresholdFreedmanDiaconis:()=>X,thresholdScott:()=>K,thresholdSturges:()=>q,tickFormat:()=>fv.tickFormat,tickIncrement:()=>H.lq,tickStep:()=>H.sG,ticks:()=>H.Ay,timeDay:()=>C_.UA,timeDays:()=>C_.NT,timeFormat:()=>M_.DC,timeFormatDefaultLocale:()=>M_.Ay,timeFormatLocale:()=>P_.A,timeFriday:()=>R_.TU,timeFridays:()=>R_.SQ,timeHour:()=>E_.Ag,timeHours:()=>E_.L6,timeInterval:()=>w_.f,timeMillisecond:()=>x_.y,timeMilliseconds:()=>x_.R,timeMinute:()=>A_.wX,timeMinutes:()=>A_.rn,timeMonday:()=>R_.AB,timeMondays:()=>R_.h8,timeMonth:()=>O_.Ui,timeMonths:()=>O_.Bz,timeParse:()=>M_.T6,timeSaturday:()=>R_.rG,timeSaturdays:()=>R_._M,timeSecond:()=>S_.R,timeSeconds:()=>S_.Y,timeSunday:()=>R_.YP,timeSundays:()=>R_.vD,timeThursday:()=>R_.Mo,timeThursdays:()=>R_.HU,timeTickInterval:()=>k_.yE,timeTicks:()=>k_.Cf,timeTuesday:()=>R_.PG,timeTuesdays:()=>R_.OK,timeWednesday:()=>R_.Gu,timeWednesdays:()=>R_.Dr,timeWeek:()=>R_.YP,timeWeeks:()=>R_.vD,timeYear:()=>T_.he,timeYears:()=>T_.Yo,timeout:()=>Yn,timer:()=>$n,timerFlush:()=>Hn,transition:()=>zr,transpose:()=>_e,tree:()=>ng,treemap:()=>sg,treemapBinary:()=>ug,treemapDice:()=>zp,treemapResquarify:()=>cg,treemapSlice:()=>rg,treemapSliceDice:()=>lg,treemapSquarify:()=>ag,tsv:()=>cu,tsvFormat:()=>us,tsvFormatBody:()=>ls,tsvFormatRow:()=>fs,tsvFormatRows:()=>cs,tsvFormatValue:()=>ds,tsvParse:()=>as,tsvParseRows:()=>ss,union:()=>Le,unixDay:()=>C_.TW,unixDays:()=>C_.yE,utcDay:()=>C_.dA,utcDays:()=>C_.ou,utcFormat:()=>M_.aL,utcFriday:()=>R_.a1,utcFridays:()=>R_.Zn,utcHour:()=>E_.pz,utcHours:()=>E_.iA,utcMillisecond:()=>x_.y,utcMilliseconds:()=>x_.R,utcMinute:()=>A_.vD,utcMinutes:()=>A_.Y4,utcMonday:()=>R_.rt,utcMondays:()=>R_.ON,utcMonth:()=>O_.R6,utcMonths:()=>O_.oi,utcParse:()=>M_.GY,utcSaturday:()=>R_.c8,utcSaturdays:()=>R_.Xo,utcSecond:()=>S_.R,utcSeconds:()=>S_.Y,utcSunday:()=>R_.Hl,utcSundays:()=>R_.aZ,utcThursday:()=>R_.pT,utcThursdays:()=>R_.wr,utcTickInterval:()=>k_.lk,utcTicks:()=>k_.$Z,utcTuesday:()=>R_.sr,utcTuesdays:()=>R_.jN,utcWednesday:()=>R_.z2,utcWednesdays:()=>R_.G6,utcWeek:()=>R_.Hl,utcWeeks:()=>R_.aZ,utcYear:()=>T_.Mb,utcYears:()=>T_.jH,variance:()=>_,window:()=>Pt,xml:()=>gu,zip:()=>xe,zoom:()=>Q_,zoomIdentity:()=>V_,zoomTransform:()=>$_});var r=n(52016),o=n(12902),i=n(86037);function a(e,t){if(!((t=+t)>=0))throw new RangeError("invalid r");let n=e.length;if(!((n=Math.floor(n))>=0))throw new RangeError("invalid length");if(!n||!t)return e;const r=d(t),o=e.slice();return r(e,o,0,n,1),r(o,e,0,n,1),r(e,o,0,n,1),e}const s=l(d),u=l((function(e){const t=d(e);return(e,n,r,o,i)=>{t(e,n,(r<<=2)+0,(o<<=2)+0,i<<=2),t(e,n,r+1,o+1,i),t(e,n,r+2,o+2,i),t(e,n,r+3,o+3,i)}}));function l(e){return function(t,n,r=n){if(!((n=+n)>=0))throw new RangeError("invalid rx");if(!((r=+r)>=0))throw new RangeError("invalid ry");let{data:o,width:i,height:a}=t;if(!((i=Math.floor(i))>=0))throw new RangeError("invalid width");if(!((a=Math.floor(void 0!==a?a:o.length/i))>=0))throw new RangeError("invalid height");if(!i||!a||!n&&!r)return t;const s=n&&e(n),u=r&&e(r),l=o.slice();return s&&u?(c(s,l,o,i,a),c(s,o,l,i,a),c(s,l,o,i,a),f(u,o,l,i,a),f(u,l,o,i,a),f(u,o,l,i,a)):s?(c(s,o,l,i,a),c(s,l,o,i,a),c(s,o,l,i,a)):u&&(f(u,o,l,i,a),f(u,l,o,i,a),f(u,o,l,i,a)),t}}function c(e,t,n,r,o){for(let i=0,a=r*o;i{if(!((i-=a)>=o))return;let s=e*r[o];const u=a*e;for(let e=o,t=o+u;e{if(!((a-=s)>=i))return;let u=t*o[i];const l=s*t,c=l+s;for(let t=i,n=i+l;t=r&&++n;else{let r=-1;for(let o of e)null!=(o=t(o,++r,e))&&(o=+o)>=o&&++n}return n}function p(e){return 0|e.length}function g(e){return!(e>0)}function v(e){return"object"!==typeof e||"length"in e?e:Array.from(e)}function y(...e){const t="function"===typeof e[e.length-1]&&function(e){return t=>e(...t)}(e.pop()),n=(e=e.map(v)).map(p),r=e.length-1,o=new Array(r+1).fill(0),i=[];if(r<0||n.some(g))return i;for(;;){i.push(o.map(((t,n)=>e[n][t])));let a=r;for(;++o[a]===n[a];){if(0===a)return t?i.map(t):i;o[a--]=0}}}function m(e,t){var n=0,r=0;return Float64Array.from(e,void 0===t?e=>n+=+e||0:o=>n+=+t(o,r++,e)||0)}var b=n(41180);function _(e,t){let n,r=0,o=0,i=0;if(void 0===t)for(let a of e)null!=a&&(a=+a)>=a&&(n=a-o,o+=n/++r,i+=n*(a-o));else{let a=-1;for(let s of e)null!=(s=t(s,++a,e))&&(s=+s)>=s&&(n=s-o,o+=n/++r,i+=n*(s-o))}if(r>1)return i/(r-1)}function w(e,t){const n=_(e,t);return n?Math.sqrt(n):n}function x(e,t){let n,r;if(void 0===t)for(const o of e)null!=o&&(void 0===n?o>=o&&(n=r=o):(n>o&&(n=o),r=i&&(n=r=i):(n>i&&(n=i),r0){for(i=e[--o];o>0&&(t=i,n=e[--o],i=t+n,r=n-(i-t),!r););o>0&&(r<0&&e[o-1]<0||r>0&&e[o-1]>0)&&(n=2*r,t=i+n,n==t-i&&(i=t))}return i}}function A(e,t){const n=new S;if(void 0===t)for(let r of e)(r=+r)&&n.add(r);else{let r=-1;for(let o of e)(o=+t(o,++r,e))&&n.add(o)}return+n}function E(e,t){const n=new S;let r=-1;return Float64Array.from(e,void 0===t?e=>n.add(+e||0):o=>n.add(+t(o,++r,e)||0))}var C=n(24119);function R(e){return e}function O(e,...t){return j(e,R,R,t)}function T(e,...t){return j(e,Array.from,R,t)}function k(e,t){for(let n=1,r=t.length;ne.pop().map((([t,n])=>[...e,t,n]))));return e}function M(e,...t){return k(T(e,...t),t)}function P(e,t,...n){return k(N(e,t,...n),n)}function I(e,t,...n){return j(e,R,t,n)}function N(e,t,...n){return j(e,Array.from,t,n)}function D(e,...t){return j(e,R,F,t)}function L(e,...t){return j(e,Array.from,F,t)}function F(e){if(1!==e.length)throw new Error("duplicate key");return e[0]}function j(e,t,n,r){return function e(o,i){if(i>=r.length)return n(o);const a=new C.B,s=r[i++];let u=-1;for(const t of o){const e=s(t,++u,o),n=a.get(e);n?n.push(t):a.set(e,[t])}for(const[t,n]of a)a.set(t,e(n,i));return t(a)}(e,0)}var U=n(68886);function z(e,t,n){return(2!==t.length?(0,U.Ay)(I(e,t,n),(([e,t],[n,r])=>(0,o.A)(t,r)||(0,o.A)(e,n))):(0,U.Ay)(O(e,n),(([e,n],[r,i])=>t(n,i)||(0,o.A)(e,r)))).map((([e])=>e))}var B=Array.prototype,V=B.slice;B.map;function $(e){return()=>e}var H=n(6946);function W(e,t,n){let r;for(;;){const o=(0,H.lq)(e,t,n);if(o===r||0===o||!isFinite(o))return[e,t];o>0?(e=Math.floor(e/o)*o,t=Math.ceil(t/o)*o):o<0&&(e=Math.ceil(e*o)/o,t=Math.floor(t*o)/o),r=o}}function q(e){return Math.max(1,Math.ceil(Math.log(h(e))/Math.LN2)+1)}function G(){var e=R,t=x,n=q;function o(o){Array.isArray(o)||(o=Array.from(o));var i,a,s,u=o.length,l=new Array(u);for(i=0;i=d)if(e>=d&&t===x){const e=(0,H.lq)(f,d,n);isFinite(e)&&(e>0?d=(Math.floor(d/e)+1)*e:e<0&&(d=(Math.ceil(d*-e)+1)/-e))}else h.pop()}for(var p=h.length,g=0,v=p;h[g]<=f;)++g;for(;h[v-1]>d;)--v;(g||v0?h[i-1]:f,y.x1=i0)for(i=0;i=o&&(++n,r+=o);else{let o=-1;for(let i of e)null!=(i=t(i,++o,e))&&(i=+i)>=i&&(++n,r+=i)}if(n)return r/n}function ee(e,t){return(0,Y.Ay)(e,.5,t)}function te(e,t){return(0,Y.AX)(e,.5,t)}function ne(e){return Array.from(function*(e){for(const t of e)yield*t}(e))}var re=n(49216),oe=n(98026);function ie(e,t){const n=new C.B;if(void 0===t)for(let i of e)null!=i&&i>=i&&n.set(i,(n.get(i)||0)+1);else{let r=-1;for(let o of e)null!=(o=t(o,++r,e))&&o>=o&&n.set(o,(n.get(o)||0)+1)}let r,o=0;for(const[i,a]of n)a>o&&(o=a,r=i);return r}function ae(e,t=se){const n=[];let r,o=!1;for(const i of e)o&&n.push(t(r,i)),r=i,o=!0;return n}function se(e,t){return[e,t]}var ue=n(78314),le=n(15671),ce=n(6561);function fe(e,t=o.A){if("function"!==typeof e[Symbol.iterator])throw new TypeError("values is not iterable");let n=Array.from(e);const r=new Float64Array(n.length);2!==t.length&&(n=n.map(t),t=o.A);const i=(e,r)=>t(n[e],n[r]);let a,s;return(e=Uint32Array.from(n,((e,t)=>t))).sort(t===o.A?(e,t)=>(0,U.o2)(n[e],n[t]):(0,U.JC)(i)),e.forEach(((e,t)=>{const n=i(e,void 0===a?e:a);n>=0?((void 0===a||n>0)&&(a=e,s=t),r[e]=s):r[e]=NaN})),r}function de(e,t=o.A){let n,r=!1;if(1===t.length){let i;for(const a of e){const e=t(a);(r?(0,o.A)(e,i)<0:0===(0,o.A)(e,e))&&(n=a,i=e,r=!0)}}else for(const o of e)(r?t(o,n)<0:0===t(o,o))&&(n=o,r=!0);return n}function he(e,t=o.A){if(1===t.length)return(0,oe.A)(e,t);let n,r=-1,i=-1;for(const o of e)++i,(r<0?0===t(o,o):t(o,n)<0)&&(n=o,r=i);return r}var pe=n(36205);function ge(e,t=o.A){if(1===t.length)return(0,Q.A)(e,t);let n,r=-1,i=-1;for(const o of e)++i,(r<0?0===t(o,o):t(o,n)>0)&&(n=o,r=i);return r}function ve(e,t){const n=he(e,t);return n<0?void 0:n}const ye=me(Math.random);function me(e){return function(t,n=0,r=t.length){let o=r-(n=+n);for(;o;){const r=e()*o--|0,i=t[o+n];t[o+n]=t[r+n],t[r+n]=i}return t}}function be(e,t){let n=0;if(void 0===t)for(let r of e)(r=+r)&&(n+=r);else{let r=-1;for(let o of e)(o=+t(o,++r,e))&&(n+=o)}return n}function _e(e){if(!(o=e.length))return[];for(var t=-1,n=(0,re.A)(e,we),r=new Array(n);++tt(n,r,e)))}function Re(e,t,n){if("function"!==typeof t)throw new TypeError("reducer is not a function");const r=e[Symbol.iterator]();let o,i,a=-1;if(arguments.length<3){if(({done:o,value:n}=r.next()),o)return;++a}for(;({done:o,value:i}=r.next()),!o;)n=t(n,i,++a,e);return n}function Oe(e){if("function"!==typeof e[Symbol.iterator])throw new TypeError("values is not iterable");return Array.from(e).reverse()}function Te(e,...t){e=new C.v(e);for(const n of t)for(const t of n)e.delete(t);return e}function ke(e,t){const n=t[Symbol.iterator](),r=new C.v;for(const o of e){if(r.has(o))return!1;let e,t;for(;({value:e,done:t}=n.next())&&!t;){if(Object.is(o,e))return!1;r.add(e)}}return!0}function Me(e,...t){e=new C.v(e),t=t.map(Pe);e:for(const n of e)for(const r of t)if(!r.has(n)){e.delete(n);continue e}return e}function Pe(e){return e instanceof C.v?e:new C.v(e)}function Ie(e,t){const n=e[Symbol.iterator](),r=new Set;for(const o of t){const e=Ne(o);if(r.has(e))continue;let t,i;for(;({value:t,done:i}=n.next());){if(i)return!1;const n=Ne(t);if(r.add(n),Object.is(e,n))break}}return!0}function Ne(e){return null!==e&&"object"===typeof e?e.valueOf():e}function De(e,t){return Ie(t,e)}function Le(...e){const t=new C.v;for(const n of e)for(const e of n)t.add(e);return t}function Fe(e){return e}var je=1,Ue=2,ze=3,Be=4,Ve=1e-6;function $e(e){return"translate("+e+",0)"}function He(e){return"translate(0,"+e+")"}function We(e){return t=>+e(t)}function qe(e,t){return t=Math.max(0,e.bandwidth()-2*t)/2,e.round()&&(t=Math.round(t)),n=>+e(n)+t}function Ge(){return!this.__axis}function Ye(e,t){var n=[],r=null,o=null,i=6,a=6,s=3,u="undefined"!==typeof window&&window.devicePixelRatio>1?0:.5,l=e===je||e===Be?-1:1,c=e===Be||e===Ue?"x":"y",f=e===je||e===ze?$e:He;function d(d){var h=null==r?t.ticks?t.ticks.apply(t,n):t.domain():r,p=null==o?t.tickFormat?t.tickFormat.apply(t,n):Fe:o,g=Math.max(i,0)+s,v=t.range(),y=+v[0]+u,m=+v[v.length-1]+u,b=(t.bandwidth?qe:We)(t.copy(),u),_=d.selection?d.selection():d,w=_.selectAll(".domain").data([null]),x=_.selectAll(".tick").data(h,t).order(),S=x.exit(),A=x.enter().append("g").attr("class","tick"),E=x.select("line"),C=x.select("text");w=w.merge(w.enter().insert("path",".tick").attr("class","domain").attr("stroke","currentColor")),x=x.merge(A),E=E.merge(A.append("line").attr("stroke","currentColor").attr(c+"2",l*i)),C=C.merge(A.append("text").attr("fill","currentColor").attr(c,l*g).attr("dy",e===je?"0em":e===ze?"0.71em":"0.32em")),d!==_&&(w=w.transition(d),x=x.transition(d),E=E.transition(d),C=C.transition(d),S=S.transition(d).attr("opacity",Ve).attr("transform",(function(e){return isFinite(e=b(e))?f(e+u):this.getAttribute("transform")})),A.attr("opacity",Ve).attr("transform",(function(e){var t=this.parentNode.__axis;return f((t&&isFinite(t=t(e))?t:b(e))+u)}))),S.remove(),w.attr("d",e===Be||e===Ue?a?"M"+l*a+","+y+"H"+u+"V"+m+"H"+l*a:"M"+u+","+y+"V"+m:a?"M"+y+","+l*a+"V"+u+"H"+m+"V"+l*a:"M"+y+","+u+"H"+m),x.attr("opacity",1).attr("transform",(function(e){return f(b(e)+u)})),E.attr(c+"2",l*i),C.attr(c,l*g).text(p),_.filter(Ge).attr("fill","none").attr("font-size",10).attr("font-family","sans-serif").attr("text-anchor",e===Ue?"start":e===Be?"end":"middle"),_.each((function(){this.__axis=b}))}return d.scale=function(e){return arguments.length?(t=e,d):t},d.ticks=function(){return n=Array.from(arguments),d},d.tickArguments=function(e){return arguments.length?(n=null==e?[]:Array.from(e),d):n.slice()},d.tickValues=function(e){return arguments.length?(r=null==e?null:Array.from(e),d):r&&r.slice()},d.tickFormat=function(e){return arguments.length?(o=e,d):o},d.tickSize=function(e){return arguments.length?(i=a=+e,d):i},d.tickSizeInner=function(e){return arguments.length?(i=+e,d):i},d.tickSizeOuter=function(e){return arguments.length?(a=+e,d):a},d.tickPadding=function(e){return arguments.length?(s=+e,d):s},d.offset=function(e){return arguments.length?(u=+e,d):u},d}function Xe(e){return Ye(je,e)}function Ke(e){return Ye(Ue,e)}function Ze(e){return Ye(ze,e)}function Qe(e){return Ye(Be,e)}var Je={value:()=>{}};function et(){for(var e,t=0,n=arguments.length,r={};t=0&&(t=e.slice(n+1),e=e.slice(0,n)),e&&!r.hasOwnProperty(e))throw new Error("unknown type: "+e);return{type:e,name:t}}))),a=-1,s=i.length;if(!(arguments.length<2)){if(null!=t&&"function"!==typeof t)throw new Error("invalid callback: "+t);for(;++a0)for(var n,r,o=new Array(n),i=0;it?1:e>=t?0:NaN}yt.prototype={constructor:yt,appendChild:function(e){return this._parent.insertBefore(e,this._next)},insertBefore:function(e,t){return this._parent.insertBefore(e,t)},querySelector:function(e){return this._parent.querySelector(e)},querySelectorAll:function(e){return this._parent.querySelectorAll(e)}};var St="http://www.w3.org/1999/xhtml";const At={svg:"http://www.w3.org/2000/svg",xhtml:St,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};function Et(e){var t=e+="",n=t.indexOf(":");return n>=0&&"xmlns"!==(t=e.slice(0,n))&&(e=e.slice(n+1)),At.hasOwnProperty(t)?{space:At[t],local:e}:e}function Ct(e){return function(){this.removeAttribute(e)}}function Rt(e){return function(){this.removeAttributeNS(e.space,e.local)}}function Ot(e,t){return function(){this.setAttribute(e,t)}}function Tt(e,t){return function(){this.setAttributeNS(e.space,e.local,t)}}function kt(e,t){return function(){var n=t.apply(this,arguments);null==n?this.removeAttribute(e):this.setAttribute(e,n)}}function Mt(e,t){return function(){var n=t.apply(this,arguments);null==n?this.removeAttributeNS(e.space,e.local):this.setAttributeNS(e.space,e.local,n)}}function Pt(e){return e.ownerDocument&&e.ownerDocument.defaultView||e.document&&e||e.defaultView}function It(e){return function(){this.style.removeProperty(e)}}function Nt(e,t,n){return function(){this.style.setProperty(e,t,n)}}function Dt(e,t,n){return function(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(e):this.style.setProperty(e,r,n)}}function Lt(e,t){return e.style.getPropertyValue(t)||Pt(e).getComputedStyle(e,null).getPropertyValue(t)}function Ft(e){return function(){delete this[e]}}function jt(e,t){return function(){this[e]=t}}function Ut(e,t){return function(){var n=t.apply(this,arguments);null==n?delete this[e]:this[e]=n}}function zt(e){return e.trim().split(/^|\s+/)}function Bt(e){return e.classList||new Vt(e)}function Vt(e){this._node=e,this._names=zt(e.getAttribute("class")||"")}function $t(e,t){for(var n=Bt(e),r=-1,o=t.length;++r=0&&(this._names.splice(t,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(e){return this._names.indexOf(e)>=0}};var gn=[null];function vn(e,t){this._groups=e,this._parents=t}function yn(){return new vn([[document.documentElement]],gn)}vn.prototype=yn.prototype={constructor:vn,select:function(e){"function"!==typeof e&&(e=at(e));for(var t=this._groups,n=t.length,r=new Array(n),o=0;o=_&&(_=b+1);!(m=v[_])&&++_=0;)(r=o[i])&&(a&&4^r.compareDocumentPosition(a)&&a.parentNode.insertBefore(r,a),a=r);return this},sort:function(e){function t(t,n){return t&&n?e(t.__data__,n.__data__):!t-!n}e||(e=xt);for(var n=this._groups,r=n.length,o=new Array(r),i=0;i1?this.each((null==t?It:"function"===typeof t?Dt:Nt)(e,t,null==n?"":n)):Lt(this.node(),e)},property:function(e,t){return arguments.length>1?this.each((null==t?Ft:"function"===typeof t?Ut:jt)(e,t)):this.node()[e]},classed:function(e,t){var n=zt(e+"");if(arguments.length<2){for(var r=Bt(this.node()),o=-1,i=n.length;++o=0&&(t=e.slice(n+1),e=e.slice(0,n)),{type:e,name:t}}))}(e+""),a=i.length;if(!(arguments.length<2)){for(s=t?fn:cn,r=0;r=0&&t._call.call(void 0,e),t=t._next;--Mn}function Wn(){Ln=(Dn=jn.now())+Fn,Mn=Pn=0;try{Hn()}finally{Mn=0,function(){var e,t,n=Tn,r=1/0;for(;n;)n._call?(r>n._time&&(r=n._time),e=n,n=n._next):(t=n._next,n._next=null,n=e?e._next=t:Tn=t);kn=e,Gn(r)}(),Ln=0}}function qn(){var e=jn.now(),t=e-Dn;t>Nn&&(Fn-=t,Dn=e)}function Gn(e){Mn||(Pn&&(Pn=clearTimeout(Pn)),e-Ln>24?(e<1/0&&(Pn=setTimeout(Wn,e-jn.now()-Fn)),In&&(In=clearInterval(In))):(In||(Dn=jn.now(),In=setInterval(qn,Nn)),Mn=1,Un(Wn)))}function Yn(e,t,n){var r=new Vn;return t=null==t?0:+t,r.restart((n=>{r.stop(),e(n+t)}),t,n),r}Vn.prototype=$n.prototype={constructor:Vn,restart:function(e,t,n){if("function"!==typeof e)throw new TypeError("callback is not a function");n=(null==n?zn():+n)+(null==t?0:+t),this._next||kn===this||(kn?kn._next=this:Tn=this,kn=this),this._call=e,this._time=n,Gn()},stop:function(){this._call&&(this._call=null,this._time=1/0,Gn())}};var Xn=ot("start","end","cancel","interrupt"),Kn=[],Zn=0,Qn=1,Jn=2,er=3,tr=4,nr=5,rr=6;function or(e,t,n,r,o,i){var a=e.__transition;if(a){if(n in a)return}else e.__transition={};!function(e,t,n){var r,o=e.__transition;function i(e){n.state=Qn,n.timer.restart(a,n.delay,n.time),n.delay<=e&&a(e-n.delay)}function a(i){var l,c,f,d;if(n.state!==Qn)return u();for(l in o)if((d=o[l]).name===n.name){if(d.state===er)return Yn(a);d.state===tr?(d.state=rr,d.timer.stop(),d.on.call("interrupt",e,e.__data__,d.index,d.group),delete o[l]):+lZn)throw new Error("too late; already scheduled");return n}function ar(e,t){var n=sr(e,t);if(n.state>er)throw new Error("too late; already running");return n}function sr(e,t){var n=e.__transition;if(!n||!(n=n[t]))throw new Error("transition not found");return n}function ur(e,t){var n,r,o,i=e.__transition,a=!0;if(i){for(o in t=null==t?null:t+"",i)(n=i[o]).name===t?(r=n.state>Jn&&n.state180?t+=360:t-e>180&&(e+=360),i.push({i:n.push(o(n)+"rotate(",null,r)-2,x:(0,cr.A)(e,t)})):t&&n.push(o(n)+"rotate("+t+r)}(i.rotate,a.rotate,s,u),function(e,t,n,i){e!==t?i.push({i:n.push(o(n)+"skewX(",null,r)-2,x:(0,cr.A)(e,t)}):t&&n.push(o(n)+"skewX("+t+r)}(i.skewX,a.skewX,s,u),function(e,t,n,r,i,a){if(e!==n||t!==r){var s=i.push(o(i)+"scale(",null,",",null,")");a.push({i:s-4,x:(0,cr.A)(e,n)},{i:s-2,x:(0,cr.A)(t,r)})}else 1===n&&1===r||i.push(o(i)+"scale("+n+","+r+")")}(i.scaleX,i.scaleY,a.scaleX,a.scaleY,s,u),i=a=null,function(e){for(var t,n=-1,r=u.length;++n=0&&(e=e.slice(0,t)),!e||"start"===e}))}(t)?ir:ar;return function(){var a=i(this,e),s=a.on;s!==r&&(o=(r=s).copy()).on(t,n),a.on=o}}(n,e,t))},attr:function(e,t){var n=Et(e),r="transform"===n?vr:Sr;return this.attrTween(e,"function"===typeof t?(n.local?Tr:Or)(n,r,br(this,"attr."+e,t)):null==t?(n.local?Er:Ar)(n):(n.local?Rr:Cr)(n,r,t))},attrTween:function(e,t){var n="attr."+e;if(arguments.length<2)return(n=this.tween(n))&&n._value;if(null==t)return this.tween(n,null);if("function"!==typeof t)throw new Error;var r=Et(e);return this.tween(n,(r.local?kr:Mr)(r,t))},style:function(e,t,n){var r="transform"===(e+="")?gr:Sr;return null==t?this.styleTween(e,function(e,t){var n,r,o;return function(){var i=Lt(this,e),a=(this.style.removeProperty(e),Lt(this,e));return i===a?null:i===n&&a===r?o:o=t(n=i,r=a)}}(e,r)).on("end.style."+e,Fr(e)):"function"===typeof t?this.styleTween(e,function(e,t,n){var r,o,i;return function(){var a=Lt(this,e),s=n(this),u=s+"";return null==s&&(this.style.removeProperty(e),u=s=Lt(this,e)),a===u?null:a===r&&u===o?i:(o=u,i=t(r=a,s))}}(e,r,br(this,"style."+e,t))).each(function(e,t){var n,r,o,i,a="style."+t,s="end."+a;return function(){var u=ar(this,e),l=u.on,c=null==u.value[a]?i||(i=Fr(t)):void 0;l===n&&o===c||(r=(n=l).copy()).on(s,o=c),u.on=r}}(this._id,e)):this.styleTween(e,function(e,t,n){var r,o,i=n+"";return function(){var a=Lt(this,e);return a===i?null:a===r?o:o=t(r=a,n)}}(e,r,t),n).on("end.style."+e,null)},styleTween:function(e,t,n){var r="style."+(e+="");if(arguments.length<2)return(r=this.tween(r))&&r._value;if(null==t)return this.tween(r,null);if("function"!==typeof t)throw new Error;return this.tween(r,function(e,t,n){var r,o;function i(){var i=t.apply(this,arguments);return i!==o&&(r=(o=i)&&function(e,t,n){return function(r){this.style.setProperty(e,t.call(this,r),n)}}(e,i,n)),r}return i._value=t,i}(e,t,null==n?"":n))},text:function(e){return this.tween("text","function"===typeof e?function(e){return function(){var t=e(this);this.textContent=null==t?"":t}}(br(this,"text",e)):function(e){return function(){this.textContent=e}}(null==e?"":e+""))},textTween:function(e){var t="text";if(arguments.length<1)return(t=this.tween(t))&&t._value;if(null==e)return this.tween(t,null);if("function"!==typeof e)throw new Error;return this.tween(t,function(e){var t,n;function r(){var r=e.apply(this,arguments);return r!==n&&(t=(n=r)&&function(e){return function(t){this.textContent=e.call(this,t)}}(r)),t}return r._value=e,r}(e))},remove:function(){return this.on("end.remove",function(e){return function(){var t=this.parentNode;for(var n in this.__transition)if(+n!==e)return;t&&t.removeChild(this)}}(this._id))},tween:function(e,t){var n=this._id;if(e+="",arguments.length<2){for(var r,o=sr(this.node(),n).tween,i=0,a=o.length;iQn&&n.name===t)return new Ur([[e]],Yr,t,+r);return null}const Kr=e=>()=>e;function Zr(e,{sourceEvent:t,target:n,selection:r,mode:o,dispatch:i}){Object.defineProperties(this,{type:{value:e,enumerable:!0,configurable:!0},sourceEvent:{value:t,enumerable:!0,configurable:!0},target:{value:n,enumerable:!0,configurable:!0},selection:{value:r,enumerable:!0,configurable:!0},mode:{value:o,enumerable:!0,configurable:!0},_:{value:i}})}function Qr(e){e.preventDefault(),e.stopImmediatePropagation()}var Jr={name:"drag"},eo={name:"space"},to={name:"handle"},no={name:"center"};const{abs:ro,max:oo,min:io}=Math;function ao(e){return[+e[0],+e[1]]}function so(e){return[ao(e[0]),ao(e[1])]}var uo={name:"x",handles:["w","e"].map(yo),input:function(e,t){return null==e?null:[[+e[0],t[0][1]],[+e[1],t[1][1]]]},output:function(e){return e&&[e[0][0],e[1][0]]}},lo={name:"y",handles:["n","s"].map(yo),input:function(e,t){return null==e?null:[[t[0][0],+e[0]],[t[1][0],+e[1]]]},output:function(e){return e&&[e[0][1],e[1][1]]}},co={name:"xy",handles:["n","w","e","s","nw","ne","sw","se"].map(yo),input:function(e){return null==e?null:so(e)},output:function(e){return e}},fo={overlay:"crosshair",selection:"move",n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},ho={e:"w",w:"e",nw:"ne",ne:"nw",se:"sw",sw:"se"},po={n:"s",s:"n",nw:"sw",ne:"se",se:"ne",sw:"nw"},go={overlay:1,selection:1,n:null,e:1,s:null,w:-1,nw:-1,ne:1,se:1,sw:-1},vo={overlay:1,selection:1,n:-1,e:null,s:1,w:null,nw:-1,ne:-1,se:1,sw:1};function yo(e){return{type:e}}function mo(e){return!e.ctrlKey&&!e.button}function bo(){var e=this.ownerSVGElement||this;return e.hasAttribute("viewBox")?[[(e=e.viewBox.baseVal).x,e.y],[e.x+e.width,e.y+e.height]]:[[0,0],[e.width.baseVal.value,e.height.baseVal.value]]}function _o(){return navigator.maxTouchPoints||"ontouchstart"in this}function wo(e){for(;!e.__brush;)if(!(e=e.parentNode))return;return e.__brush}function xo(e){var t=e.__brush;return t?t.dim.output(t.selection):null}function So(){return Co(uo)}function Ao(){return Co(lo)}function Eo(){return Co(co)}function Co(e){var t,n=bo,r=mo,o=_o,i=!0,a=ot("start","brush","end"),s=6;function u(t){var n=t.property("__brush",g).selectAll(".overlay").data([yo("overlay")]);n.enter().append("rect").attr("class","overlay").attr("pointer-events","all").attr("cursor",fo.overlay).merge(n).each((function(){var e=wo(this).extent;bn(this).attr("x",e[0][0]).attr("y",e[0][1]).attr("width",e[1][0]-e[0][0]).attr("height",e[1][1]-e[0][1])})),t.selectAll(".selection").data([yo("selection")]).enter().append("rect").attr("class","selection").attr("cursor",fo.selection).attr("fill","#777").attr("fill-opacity",.3).attr("stroke","#fff").attr("shape-rendering","crispEdges");var r=t.selectAll(".handle").data(e.handles,(function(e){return e.type}));r.exit().remove(),r.enter().append("rect").attr("class",(function(e){return"handle handle--"+e.type})).attr("cursor",(function(e){return fo[e.type]})),t.each(l).attr("fill","none").attr("pointer-events","all").on("mousedown.brush",d).filter(o).on("touchstart.brush",d).on("touchmove.brush",h).on("touchend.brush touchcancel.brush",p).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function l(){var e=bn(this),t=wo(this).selection;t?(e.selectAll(".selection").style("display",null).attr("x",t[0][0]).attr("y",t[0][1]).attr("width",t[1][0]-t[0][0]).attr("height",t[1][1]-t[0][1]),e.selectAll(".handle").style("display",null).attr("x",(function(e){return"e"===e.type[e.type.length-1]?t[1][0]-s/2:t[0][0]-s/2})).attr("y",(function(e){return"s"===e.type[0]?t[1][1]-s/2:t[0][1]-s/2})).attr("width",(function(e){return"n"===e.type||"s"===e.type?t[1][0]-t[0][0]+s:s})).attr("height",(function(e){return"e"===e.type||"w"===e.type?t[1][1]-t[0][1]+s:s}))):e.selectAll(".selection,.handle").style("display","none").attr("x",null).attr("y",null).attr("width",null).attr("height",null)}function c(e,t,n){var r=e.__brush.emitter;return!r||n&&r.clean?new f(e,t,n):r}function f(e,t,n){this.that=e,this.args=t,this.state=e.__brush,this.active=0,this.clean=n}function d(n){if((!t||n.touches)&&r.apply(this,arguments)){var o,a,s,u,f,d,h,p,g,v,y,m=this,b=n.target.__data__.type,_="selection"===(i&&n.metaKey?b="overlay":b)?Jr:i&&n.altKey?no:to,w=e===lo?null:go[b],x=e===uo?null:vo[b],S=wo(m),A=S.extent,E=S.selection,C=A[0][0],R=A[0][1],O=A[1][0],T=A[1][1],k=0,M=0,P=w&&x&&i&&n.shiftKey,I=Array.from(n.touches||[n],(e=>{const t=e.identifier;return(e=On(e,m)).point0=e.slice(),e.identifier=t,e}));ur(m);var N=c(m,arguments,!0).beforestart();if("overlay"===b){E&&(g=!0);const t=[I[0],I[1]||I[0]];S.selection=E=[[o=e===lo?C:io(t[0][0],t[1][0]),s=e===uo?R:io(t[0][1],t[1][1])],[f=e===lo?O:oo(t[0][0],t[1][0]),h=e===uo?T:oo(t[0][1],t[1][1])]],I.length>1&&U(n)}else o=E[0][0],s=E[0][1],f=E[1][0],h=E[1][1];a=o,u=s,d=f,p=h;var D=bn(m).attr("pointer-events","none"),L=D.selectAll(".overlay").attr("cursor",fo[b]);if(n.touches)N.moved=j,N.ended=z;else{var F=bn(n.view).on("mousemove.brush",j,!0).on("mouseup.brush",z,!0);i&&F.on("keydown.brush",(function(e){switch(e.keyCode){case 16:P=w&&x;break;case 18:_===to&&(w&&(f=d-k*w,o=a+k*w),x&&(h=p-M*x,s=u+M*x),_=no,U(e));break;case 32:_!==to&&_!==no||(w<0?f=d-k:w>0&&(o=a-k),x<0?h=p-M:x>0&&(s=u-M),_=eo,L.attr("cursor",fo.selection),U(e));break;default:return}Qr(e)}),!0).on("keyup.brush",(function(e){switch(e.keyCode){case 16:P&&(v=y=P=!1,U(e));break;case 18:_===no&&(w<0?f=d:w>0&&(o=a),x<0?h=p:x>0&&(s=u),_=to,U(e));break;case 32:_===eo&&(e.altKey?(w&&(f=d-k*w,o=a+k*w),x&&(h=p-M*x,s=u+M*x),_=no):(w<0?f=d:w>0&&(o=a),x<0?h=p:x>0&&(s=u),_=to),L.attr("cursor",fo[b]),U(e));break;default:return}Qr(e)}),!0),An(n.view)}l.call(m),N.start(n,_.name)}function j(e){for(const t of e.changedTouches||[e])for(const e of I)e.identifier===t.identifier&&(e.cur=On(t,m));if(P&&!v&&!y&&1===I.length){const e=I[0];ro(e.cur[0]-e[0])>ro(e.cur[1]-e[1])?y=!0:v=!0}for(const t of I)t.cur&&(t[0]=t.cur[0],t[1]=t.cur[1]);g=!0,Qr(e),U(e)}function U(e){const t=I[0],n=t.point0;var r;switch(k=t[0]-n[0],M=t[1]-n[1],_){case eo:case Jr:w&&(k=oo(C-o,io(O-f,k)),a=o+k,d=f+k),x&&(M=oo(R-s,io(T-h,M)),u=s+M,p=h+M);break;case to:I[1]?(w&&(a=oo(C,io(O,I[0][0])),d=oo(C,io(O,I[1][0])),w=1),x&&(u=oo(R,io(T,I[0][1])),p=oo(R,io(T,I[1][1])),x=1)):(w<0?(k=oo(C-o,io(O-o,k)),a=o+k,d=f):w>0&&(k=oo(C-f,io(O-f,k)),a=o,d=f+k),x<0?(M=oo(R-s,io(T-s,M)),u=s+M,p=h):x>0&&(M=oo(R-h,io(T-h,M)),u=s,p=h+M));break;case no:w&&(a=oo(C,io(O,o-k*w)),d=oo(C,io(O,f+k*w))),x&&(u=oo(R,io(T,s-M*x)),p=oo(R,io(T,h+M*x)))}de+n))}function Lo(){return Uo(!1,!1)}function Fo(){return Uo(!1,!0)}function jo(){return Uo(!0,!1)}function Uo(e,t){var n=0,r=null,o=null,i=null;function a(a){var s,u=a.length,l=new Array(u),c=Do(0,u),f=new Array(u*u),d=new Array(u),h=0;a=Float64Array.from({length:u*u},t?(e,t)=>a[t%u][t/u|0]:(e,t)=>a[t/u|0][t%u]);for(let t=0;tr(l[e],l[t])));for(const n of c){const r=t;if(e){const e=Do(1+~u,u).filter((e=>e<0?a[~e*u+n]:a[n*u+e]));o&&e.sort(((e,t)=>o(e<0?-a[~e*u+n]:a[n*u+e],t<0?-a[~t*u+n]:a[n*u+t])));for(const r of e)if(r<0){(f[~r*u+n]||(f[~r*u+n]={source:null,target:null})).target={index:n,startAngle:t,endAngle:t+=a[~r*u+n]*h,value:a[~r*u+n]}}else{(f[n*u+r]||(f[n*u+r]={source:null,target:null})).source={index:n,startAngle:t,endAngle:t+=a[n*u+r]*h,value:a[n*u+r]}}d[n]={index:n,startAngle:r,endAngle:t,value:l[n]}}else{const e=Do(0,u).filter((e=>a[n*u+e]||a[e*u+n]));o&&e.sort(((e,t)=>o(a[n*u+e],a[n*u+t])));for(const r of e){let e;if(n=0))throw new Error(`invalid digits: ${e}`);if(t>15)return Ho;const n=10**t;return function(e){this._+=e[0];for(let t=1,r=e.length;tVo)if(Math.abs(c*s-u*l)>Vo&&o){let d=n-i,h=r-a,p=s*s+u*u,g=d*d+h*h,v=Math.sqrt(p),y=Math.sqrt(f),m=o*Math.tan((zo-Math.acos((p+f-g)/(2*v*y)))/2),b=m/y,_=m/v;Math.abs(b-1)>Vo&&this._append`L${e+b*l},${t+b*c}`,this._append`A${o},${o},0,0,${+(c*d>l*h)},${this._x1=e+_*s},${this._y1=t+_*u}`}else this._append`L${this._x1=e},${this._y1=t}`;else;}arc(e,t,n,r,o,i){if(e=+e,t=+t,i=!!i,(n=+n)<0)throw new Error(`negative radius: ${n}`);let a=n*Math.cos(r),s=n*Math.sin(r),u=e+a,l=t+s,c=1^i,f=i?r-o:o-r;null===this._x1?this._append`M${u},${l}`:(Math.abs(this._x1-u)>Vo||Math.abs(this._y1-l)>Vo)&&this._append`L${u},${l}`,n&&(f<0&&(f=f%Bo+Bo),f>$o?this._append`A${n},${n},0,1,${c},${e-a},${t-s}A${n},${n},0,1,${c},${this._x1=u},${this._y1=l}`:f>Vo&&this._append`A${n},${n},0,${+(f>=zo)},${c},${this._x1=e+n*Math.cos(o)},${this._y1=t+n*Math.sin(o)}`)}rect(e,t,n,r){this._append`M${this._x0=this._x1=+e},${this._y0=this._y1=+t}h${n=+n}v${+r}h${-n}Z`}toString(){return this._}}function qo(){return new Wo}function Go(e=3){return new Wo(+e)}qo.prototype=Wo.prototype;var Yo=Array.prototype.slice;function Xo(e){return function(){return e}}function Ko(e){return e.source}function Zo(e){return e.target}function Qo(e){return e.radius}function Jo(e){return e.startAngle}function ei(e){return e.endAngle}function ti(){return 0}function ni(){return 10}function ri(e){var t=Ko,n=Zo,r=Qo,o=Qo,i=Jo,a=ei,s=ti,u=null;function l(){var l,c=t.apply(this,arguments),f=n.apply(this,arguments),d=s.apply(this,arguments)/2,h=Yo.call(arguments),p=+r.apply(this,(h[0]=c,h)),g=i.apply(this,h)-Mo,v=a.apply(this,h)-Mo,y=+o.apply(this,(h[0]=f,h)),m=i.apply(this,h)-Mo,b=a.apply(this,h)-Mo;if(u||(u=l=qo()),d>No&&(Ro(v-g)>2*d+No?v>g?(g+=d,v-=d):(g-=d,v+=d):g=v=(g+v)/2,Ro(b-m)>2*d+No?b>m?(m+=d,b-=d):(m-=d,b+=d):m=b=(m+b)/2),u.moveTo(p*Oo(g),p*To(g)),u.arc(0,0,p,g,v),g!==m||v!==b)if(e){var _=y-+e.apply(this,arguments),w=(m+b)/2;u.quadraticCurveTo(0,0,_*Oo(m),_*To(m)),u.lineTo(y*Oo(w),y*To(w)),u.lineTo(_*Oo(b),_*To(b))}else u.quadraticCurveTo(0,0,y*Oo(m),y*To(m)),u.arc(0,0,y,m,b);if(u.quadraticCurveTo(0,0,p*Oo(g),p*To(g)),u.closePath(),l)return u=null,l+""||null}return e&&(l.headRadius=function(t){return arguments.length?(e="function"===typeof t?t:Xo(+t),l):e}),l.radius=function(e){return arguments.length?(r=o="function"===typeof e?e:Xo(+e),l):r},l.sourceRadius=function(e){return arguments.length?(r="function"===typeof e?e:Xo(+e),l):r},l.targetRadius=function(e){return arguments.length?(o="function"===typeof e?e:Xo(+e),l):o},l.startAngle=function(e){return arguments.length?(i="function"===typeof e?e:Xo(+e),l):i},l.endAngle=function(e){return arguments.length?(a="function"===typeof e?e:Xo(+e),l):a},l.padAngle=function(e){return arguments.length?(s="function"===typeof e?e:Xo(+e),l):s},l.source=function(e){return arguments.length?(t=e,l):t},l.target=function(e){return arguments.length?(n=e,l):n},l.context=function(e){return arguments.length?(u=null==e?null:e,l):u},l}function oi(){return ri()}function ii(){return ri(ni)}var ai=n(40871);const si=Math.PI/180,ui=180/Math.PI,li=.96422,ci=1,fi=.82521,di=4/29,hi=6/29,pi=3*hi*hi,gi=hi*hi*hi;function vi(e){if(e instanceof bi)return new bi(e.l,e.a,e.b,e.opacity);if(e instanceof Ri)return Oi(e);e instanceof _r.Gw||(e=(0,_r.b)(e));var t,n,r=Si(e.r),o=Si(e.g),i=Si(e.b),a=_i((.2225045*r+.7168786*o+.0606169*i)/ci);return r===o&&o===i?t=n=a:(t=_i((.4360747*r+.3850649*o+.1430804*i)/li),n=_i((.0139322*r+.0971045*o+.7141733*i)/fi)),new bi(116*a-16,500*(t-a),200*(a-n),e.opacity)}function yi(e,t){return new bi(e,0,0,null==t?1:t)}function mi(e,t,n,r){return 1===arguments.length?vi(e):new bi(e,t,n,null==r?1:r)}function bi(e,t,n,r){this.l=+e,this.a=+t,this.b=+n,this.opacity=+r}function _i(e){return e>gi?Math.pow(e,1/3):e/pi+di}function wi(e){return e>hi?e*e*e:pi*(e-di)}function xi(e){return 255*(e<=.0031308?12.92*e:1.055*Math.pow(e,1/2.4)-.055)}function Si(e){return(e/=255)<=.04045?e/12.92:Math.pow((e+.055)/1.055,2.4)}function Ai(e){if(e instanceof Ri)return new Ri(e.h,e.c,e.l,e.opacity);if(e instanceof bi||(e=vi(e)),0===e.a&&0===e.b)return new Ri(NaN,0()=>e;function Vi(e,t){for(var n,r=-1,o=t.length;++rr!==h>r&&n<(d-l)*(r-c)/(h-c)+l&&(o=-o)}return o}function Hi(e,t,n){var r,o,i,a;return function(e,t,n){return(t[0]-e[0])*(n[1]-e[1])===(n[0]-e[0])*(t[1]-e[1])}(e,t,n)&&(o=e[r=+(e[0]===t[0])],i=n[r],a=t[r],o<=i&&i<=a||a<=i&&i<=o)}function Wi(){}var qi=[[],[[[1,1.5],[.5,1]]],[[[1.5,1],[1,1.5]]],[[[1.5,1],[.5,1]]],[[[1,.5],[1.5,1]]],[[[1,1.5],[.5,1]],[[1,.5],[1.5,1]]],[[[1,.5],[1,1.5]]],[[[1,.5],[.5,1]]],[[[.5,1],[1,.5]]],[[[1,1.5],[1,.5]]],[[[.5,1],[1,.5]],[[1.5,1],[1,1.5]]],[[[1.5,1],[1,.5]]],[[[.5,1],[1.5,1]]],[[[1,1.5],[1.5,1]]],[[[.5,1],[1,1.5]]],[]];function Gi(){var e=1,t=1,n=q,r=s;function o(e){var t=n(e);if(Array.isArray(t))t=t.slice().sort(zi);else{const n=x(e,Yi);for(t=(0,H.Ay)(...W(n[0],n[1],t),t);t[t.length-1]>=n[1];)t.pop();for(;t[1]i(e,t)))}function i(n,o){const i=null==o?NaN:+o;if(isNaN(i))throw new Error(`invalid value: ${o}`);var s=[],u=[];return function(n,r,o){var i,s,u,l,c,f,d=new Array,h=new Array;i=s=-1,l=Xi(n[0],r),qi[l<<1].forEach(p);for(;++i=r,qi[c<<2].forEach(p);for(;++i0?s.push([e]):u.push(e)})),u.forEach((function(e){for(var t,n=0,r=s.length;n0&&i0&&a=0&&i>=0))throw new Error("invalid size");return e=r,t=i,o},o.thresholds=function(e){return arguments.length?(n="function"===typeof e?e:Array.isArray(e)?Bi(Ui.call(e)):Bi(e),o):n},o.smooth=function(e){return arguments.length?(r=e?s:Wi,o):r===s},o}function Yi(e){return isFinite(e)?e:NaN}function Xi(e,t){return null!=e&&+e>=t}function Ki(e){return null==e||isNaN(e=+e)?-1/0:e}function Zi(e,t,n,r){const o=r-t,i=n-t,a=isFinite(o)||isFinite(i)?o/i:Math.sign(o)/Math.sign(i);return isNaN(a)?e:e+a-.5}function Qi(e){return e[0]}function Ji(e){return e[1]}function ea(){return 1}function ta(){var e=Qi,t=Ji,n=ea,r=960,o=500,i=20,a=2,u=3*i,l=r+2*u>>a,c=o+2*u>>a,f=Bi(20);function d(r){var o=new Float32Array(l*c),f=Math.pow(2,-a),d=-1;for(const i of r){var h=(e(i,++d,r)+u)*f,p=(t(i,d,r)+u)*f,g=+n(i,d,r);if(g&&h>=0&&h=0&&pe*r)))(t).map(((e,t)=>(e.value=+n[t],p(e))))}function p(e){return e.coordinates.forEach(g),e}function g(e){e.forEach(v)}function v(e){e.forEach(y)}function y(e){e[0]=e[0]*Math.pow(2,a)-u,e[1]=e[1]*Math.pow(2,a)-u}function m(){return l=r+2*(u=3*i)>>a,c=o+2*u>>a,h}return h.contours=function(e){var t=d(e),n=Gi().size([l,c]),r=Math.pow(2,2*a),o=e=>{e=+e;var o=p(n.contour(t,e*r));return o.value=e,o};return Object.defineProperty(o,"max",{get:()=>(0,Z.A)(t)/r}),o},h.x=function(t){return arguments.length?(e="function"===typeof t?t:Bi(+t),h):e},h.y=function(e){return arguments.length?(t="function"===typeof e?e:Bi(+e),h):t},h.weight=function(e){return arguments.length?(n="function"===typeof e?e:Bi(+e),h):n},h.size=function(e){if(!arguments.length)return[r,o];var t=+e[0],n=+e[1];if(!(t>=0&&n>=0))throw new Error("invalid size");return r=t,o=n,m()},h.cellSize=function(e){if(!arguments.length)return 1<=1))throw new Error("invalid cell size");return a=Math.floor(Math.log(e)/Math.LN2),m()},h.thresholds=function(e){return arguments.length?(f="function"===typeof e?e:Array.isArray(e)?Bi(Ui.call(e)):Bi(e),h):f},h.bandwidth=function(e){if(!arguments.length)return Math.sqrt(i*(i+1));if(!((e=+e)>=0))throw new Error("invalid bandwidth");return i=(Math.sqrt(4*e*e+1)-1)/2,m()},h}const na=134217729,ra=33306690738754706e-32;function oa(e,t,n,r,o){let i,a,s,u,l=t[0],c=r[0],f=0,d=0;c>l===c>-l?(i=l,l=t[++f]):(i=c,c=r[++d]);let h=0;if(fl===c>-l?(a=l+i,s=i-(a-l),l=t[++f]):(a=c+i,s=i-(a-c),c=r[++d]),i=a,0!==s&&(o[h++]=s);fl===c>-l?(a=i+l,u=a-i,s=i-(a-u)+(l-u),l=t[++f]):(a=i+c,u=a-i,s=i-(a-u)+(c-u),c=r[++d]),i=a,0!==s&&(o[h++]=s);for(;f=33306690738754716e-32*l?u:-function(e,t,n,r,o,i,a){let s,u,l,c,f,d,h,p,g,v,y,m,b,_,w,x,S,A;const E=e-o,C=n-o,R=t-i,O=r-i;_=E*O,d=na*E,h=d-(d-E),p=E-h,d=na*O,g=d-(d-O),v=O-g,w=p*v-(_-h*g-p*g-h*v),x=R*C,d=na*R,h=d-(d-R),p=R-h,d=na*C,g=d-(d-C),v=C-g,S=p*v-(x-h*g-p*g-h*v),y=w-S,f=w-y,ua[0]=w-(y+f)+(f-S),m=_+y,f=m-_,b=_-(m-f)+(y-f),y=b-x,f=b-y,ua[1]=b-(y+f)+(f-x),A=m+y,f=A-m,ua[2]=m-(A-f)+(y-f),ua[3]=A;let T=function(e,t){let n=t[0];for(let r=1;r=k||-T>=k)return T;if(f=e-E,s=e-(E+f)+(f-o),f=n-C,l=n-(C+f)+(f-o),f=t-R,u=t-(R+f)+(f-i),f=r-O,c=r-(O+f)+(f-i),0===s&&0===u&&0===l&&0===c)return T;if(k=sa*a+ra*Math.abs(T),T+=E*c+O*s-(R*l+C*u),T>=k||-T>=k)return T;_=s*O,d=na*s,h=d-(d-s),p=s-h,d=na*O,g=d-(d-O),v=O-g,w=p*v-(_-h*g-p*g-h*v),x=u*C,d=na*u,h=d-(d-u),p=u-h,d=na*C,g=d-(d-C),v=C-g,S=p*v-(x-h*g-p*g-h*v),y=w-S,f=w-y,da[0]=w-(y+f)+(f-S),m=_+y,f=m-_,b=_-(m-f)+(y-f),y=b-x,f=b-y,da[1]=b-(y+f)+(f-x),A=m+y,f=A-m,da[2]=m-(A-f)+(y-f),da[3]=A;const M=oa(4,ua,4,da,la);_=E*c,d=na*E,h=d-(d-E),p=E-h,d=na*c,g=d-(d-c),v=c-g,w=p*v-(_-h*g-p*g-h*v),x=R*l,d=na*R,h=d-(d-R),p=R-h,d=na*l,g=d-(d-l),v=l-g,S=p*v-(x-h*g-p*g-h*v),y=w-S,f=w-y,da[0]=w-(y+f)+(f-S),m=_+y,f=m-_,b=_-(m-f)+(y-f),y=b-x,f=b-y,da[1]=b-(y+f)+(f-x),A=m+y,f=A-m,da[2]=m-(A-f)+(y-f),da[3]=A;const P=oa(M,la,4,da,ca);_=s*c,d=na*s,h=d-(d-s),p=s-h,d=na*c,g=d-(d-c),v=c-g,w=p*v-(_-h*g-p*g-h*v),x=u*l,d=na*u,h=d-(d-u),p=u-h,d=na*l,g=d-(d-l),v=l-g,S=p*v-(x-h*g-p*g-h*v),y=w-S,f=w-y,da[0]=w-(y+f)+(f-S),m=_+y,f=m-_,b=_-(m-f)+(y-f),y=b-x,f=b-y,da[1]=b-(y+f)+(f-x),A=m+y,f=A-m,da[2]=m-(A-f)+(y-f),da[3]=A;const I=oa(P,ca,4,da,fa);return fa[I-1]}(e,t,n,r,o,i,l)}ia(4),ia(4),ia(4),ia(4),ia(4),ia(4),ia(4),ia(4),ia(4),ia(8),ia(8),ia(8),ia(4),ia(8),ia(8),ia(8),ia(12);ia(192),ia(192);ia(4),ia(4),ia(4),ia(4),ia(4),ia(4),ia(4),ia(4),ia(8),ia(8),ia(8),ia(8),ia(8),ia(8),ia(8),ia(8),ia(8),ia(4),ia(4),ia(4),ia(8),ia(16),ia(16),ia(16),ia(32),ia(32),ia(48),ia(64);ia(1152),ia(1152);ia(4),ia(4),ia(4),ia(4),ia(4),ia(4),ia(4),ia(4),ia(4),ia(4),ia(24),ia(24),ia(24),ia(24),ia(24),ia(24),ia(24),ia(24),ia(24),ia(24),ia(1152),ia(1152),ia(1152),ia(1152),ia(1152),ia(2304),ia(2304),ia(3456),ia(5760),ia(8),ia(8),ia(8),ia(16),ia(24),ia(48),ia(48),ia(96),ia(192),ia(384),ia(384),ia(384),ia(768);ia(96),ia(96),ia(96),ia(1152);const pa=Math.pow(2,-52),ga=new Uint32Array(512);class va{static from(e,t=xa,n=Sa){const r=e.length,o=new Float64Array(2*r);for(let i=0;i>1;if(t>0&&"number"!==typeof e[0])throw new Error("Expected coords to contain numbers.");this.coords=e;const n=Math.max(2*t-5,0);this._triangles=new Uint32Array(3*n),this._halfedges=new Int32Array(3*n),this._hashSize=Math.ceil(Math.sqrt(t)),this._hullPrev=new Uint32Array(t),this._hullNext=new Uint32Array(t),this._hullTri=new Uint32Array(t),this._hullHash=new Int32Array(this._hashSize),this._ids=new Uint32Array(t),this._dists=new Float64Array(t),this.update()}update(){const{coords:e,_hullPrev:t,_hullNext:n,_hullTri:r,_hullHash:o}=this,i=e.length>>1;let a=1/0,s=1/0,u=-1/0,l=-1/0;for(let A=0;Au&&(u=t),n>l&&(l=n),this._ids[A]=A}const c=(a+u)/2,f=(s+l)/2;let d,h,p;for(let A=0,E=1/0;A0&&(h=A,E=t)}let y=e[2*h],m=e[2*h+1],b=1/0;for(let A=0;Ar&&(t[n++]=o,r=i)}return this.hull=t.subarray(0,n),this.triangles=new Uint32Array(0),void(this.halfedges=new Uint32Array(0))}if(ha(g,v,y,m,_,w)<0){const e=h,t=y,n=m;h=p,y=_,m=w,p=e,_=t,w=n}const x=function(e,t,n,r,o,i){const a=n-e,s=r-t,u=o-e,l=i-t,c=a*a+s*s,f=u*u+l*l,d=.5/(a*l-s*u),h=e+(l*c-s*f)*d,p=t+(a*f-u*c)*d;return{x:h,y:p}}(g,v,y,m,_,w);this._cx=x.x,this._cy=x.y;for(let A=0;A0&&Math.abs(a-A)<=pa&&Math.abs(s-E)<=pa)continue;if(A=a,E=s,i===d||i===h||i===p)continue;let u=0;for(let e=0,t=this._hashKey(a,s);e=0;)if(c=l,c===u){c=-1;break}if(-1===c)continue;let f=this._addTriangle(c,i,n[c],-1,-1,r[c]);r[i]=this._legalize(f+2),r[c]=f,S++;let g=n[c];for(;l=n[g],ha(a,s,e[2*g],e[2*g+1],e[2*l],e[2*l+1])<0;)f=this._addTriangle(g,i,l,r[i],-1,r[g]),r[i]=this._legalize(f+2),n[g]=g,S--,g=l;if(c===u)for(;l=t[c],ha(a,s,e[2*l],e[2*l+1],e[2*c],e[2*c+1])<0;)f=this._addTriangle(l,i,c,-1,r[c],r[l]),this._legalize(f+2),r[l]=f,n[c]=c,S--,c=l;this._hullStart=t[i]=c,n[c]=t[g]=i,n[i]=g,o[this._hashKey(a,s)]=i,o[this._hashKey(e[2*c],e[2*c+1])]=c}this.hull=new Uint32Array(S);for(let A=0,E=this._hullStart;A0?3-n:1+n)/4}(e-this._cx,t-this._cy)*this._hashSize)%this._hashSize}_legalize(e){const{_triangles:t,_halfedges:n,coords:r}=this;let o=0,i=0;for(;;){const a=n[e],s=e-e%3;if(i=s+(e+2)%3,-1===a){if(0===o)break;e=ga[--o];continue}const u=a-a%3,l=s+(e+1)%3,c=u+(a+2)%3,f=t[i],d=t[e],h=t[l],p=t[c];if(ma(r[2*f],r[2*f+1],r[2*d],r[2*d+1],r[2*h],r[2*h+1],r[2*p],r[2*p+1])){t[e]=p,t[a]=f;const r=n[c];if(-1===r){let t=this._hullStart;do{if(this._hullTri[t]===c){this._hullTri[t]=e;break}t=this._hullPrev[t]}while(t!==this._hullStart)}this._link(e,r),this._link(a,n[i]),this._link(i,c);const s=u+(a+1)%3;o=n&&t[e[a]]>i;)e[a+1]=e[a--];e[a+1]=r}else{let o=n+1,i=r;wa(e,n+r>>1,o),t[e[n]]>t[e[r]]&&wa(e,n,r),t[e[o]]>t[e[r]]&&wa(e,o,r),t[e[n]]>t[e[o]]&&wa(e,n,o);const a=e[o],s=t[a];for(;;){do{o++}while(t[e[o]]s);if(i=i-n?(_a(e,t,o,r),_a(e,t,n,i-1)):(_a(e,t,n,i-1),_a(e,t,o,r))}}function wa(e,t,n){const r=e[t];e[t]=e[n],e[n]=r}function xa(e){return e[0]}function Sa(e){return e[1]}const Aa=1e-6;class Ea{constructor(){this._x0=this._y0=this._x1=this._y1=null,this._=""}moveTo(e,t){this._+=`M${this._x0=this._x1=+e},${this._y0=this._y1=+t}`}closePath(){null!==this._x1&&(this._x1=this._x0,this._y1=this._y0,this._+="Z")}lineTo(e,t){this._+=`L${this._x1=+e},${this._y1=+t}`}arc(e,t,n){const r=(e=+e)+(n=+n),o=t=+t;if(n<0)throw new Error("negative radius");null===this._x1?this._+=`M${r},${o}`:(Math.abs(this._x1-r)>Aa||Math.abs(this._y1-o)>Aa)&&(this._+="L"+r+","+o),n&&(this._+=`A${n},${n},0,1,1,${e-n},${t}A${n},${n},0,1,1,${this._x1=r},${this._y1=o}`)}rect(e,t,n,r){this._+=`M${this._x0=this._x1=+e},${this._y0=this._y1=+t}h${+n}v${+r}h${-n}Z`}value(){return this._||null}}class Ca{constructor(){this._=[]}moveTo(e,t){this._.push([e,t])}closePath(){this._.push(this._[0].slice())}lineTo(e,t){this._.push([e,t])}value(){return this._.length?this._:null}}class Ra{constructor(e,[t,n,r,o]=[0,0,960,500]){if(!((r=+r)>=(t=+t))||!((o=+o)>=(n=+n)))throw new Error("invalid bounds");this.delaunay=e,this._circumcenters=new Float64Array(2*e.points.length),this.vectors=new Float64Array(2*e.points.length),this.xmax=r,this.xmin=t,this.ymax=o,this.ymin=n,this._init()}update(){return this.delaunay.update(),this._init(),this}_init(){const{delaunay:{points:e,hull:t,triangles:n},vectors:r}=this;let o,i;const a=this.circumcenters=this._circumcenters.subarray(0,n.length/3*2);for(let p,g,v=0,y=0,m=n.length;v1;)o-=2;for(let i=2;i0){if(t>=this.ymax)return null;(o=(this.ymax-t)/r)0){if(e>=this.xmax)return null;(o=(this.xmax-e)/n)this.xmax?2:0)|(tthis.ymax?8:0)}_simplify(e){if(e&&e.length>4){for(let t=0;t2&&function(e){const{triangles:t,coords:n}=e;for(let r=0;r1e-10)return!1}return!0}(e)){this.collinear=Int32Array.from({length:t.length/2},((e,t)=>t)).sort(((e,n)=>t[2*e]-t[2*n]||t[2*e+1]-t[2*n+1]));const e=this.collinear[0],n=this.collinear[this.collinear.length-1],r=[t[2*e],t[2*e+1],t[2*n],t[2*n+1]],o=1e-8*Math.hypot(r[3]-r[1],r[2]-r[0]);for(let i=0,a=t.length/2;i0&&(this.triangles=new Int32Array(3).fill(-1),this.halfedges=new Int32Array(3).fill(-1),this.triangles[0]=r[0],i[r[0]]=1,2===r.length&&(i[r[1]]=0,this.triangles[1]=r[1],this.triangles[2]=r[1]))}voronoi(e){return new Ra(this,e)}*neighbors(e){const{inedges:t,hull:n,_hullIndex:r,halfedges:o,triangles:i,collinear:a}=this;if(a){const t=a.indexOf(e);return t>0&&(yield a[t-1]),void(t=0&&o!==n&&o!==r;)n=o;return o}_step(e,t,n){const{inedges:r,hull:o,_hullIndex:i,halfedges:a,triangles:s,points:u}=this;if(-1===r[e]||!u.length)return(e+1)%(u.length>>1);let l=e,c=Ta(t-u[2*e],2)+Ta(n-u[2*e+1],2);const f=r[e];let d=f;do{let r=s[d];const f=Ta(t-u[2*r],2)+Ta(n-u[2*r+1],2);if(f()=>e;function Da(e,{sourceEvent:t,subject:n,target:r,identifier:o,active:i,x:a,y:s,dx:u,dy:l,dispatch:c}){Object.defineProperties(this,{type:{value:e,enumerable:!0,configurable:!0},sourceEvent:{value:t,enumerable:!0,configurable:!0},subject:{value:n,enumerable:!0,configurable:!0},target:{value:r,enumerable:!0,configurable:!0},identifier:{value:o,enumerable:!0,configurable:!0},active:{value:i,enumerable:!0,configurable:!0},x:{value:a,enumerable:!0,configurable:!0},y:{value:s,enumerable:!0,configurable:!0},dx:{value:u,enumerable:!0,configurable:!0},dy:{value:l,enumerable:!0,configurable:!0},_:{value:c}})}function La(e){return!e.ctrlKey&&!e.button}function Fa(){return this.parentNode}function ja(e,t){return null==t?{x:e.x,y:e.y}:t}function Ua(){return navigator.maxTouchPoints||"ontouchstart"in this}function za(){var e,t,n,r,o=La,i=Fa,a=ja,s=Ua,u={},l=ot("start","drag","end"),c=0,f=0;function d(e){e.on("mousedown.drag",h).filter(s).on("touchstart.drag",v).on("touchmove.drag",y,_n).on("touchend.drag touchcancel.drag",m).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function h(a,s){if(!r&&o.call(this,a,s)){var u=b(this,i.call(this,a,s),a,s,"mouse");u&&(bn(a.view).on("mousemove.drag",p,wn).on("mouseup.drag",g,wn),An(a.view),xn(a),n=!1,e=a.clientX,t=a.clientY,u("start",a))}}function p(r){if(Sn(r),!n){var o=r.clientX-e,i=r.clientY-t;n=o*o+i*i>f}u.mouse("drag",r)}function g(e){bn(e.view).on("mousemove.drag mouseup.drag",null),En(e.view,n),Sn(e),u.mouse("end",e)}function v(e,t){if(o.call(this,e,t)){var n,r,a=e.changedTouches,s=i.call(this,e,t),u=a.length;for(n=0;n9999?"+"+Ya(e,6):Ya(e,4)}(e.getUTCFullYear())+"-"+Ya(e.getUTCMonth()+1,2)+"-"+Ya(e.getUTCDate(),2)+(o?"T"+Ya(t,2)+":"+Ya(n,2)+":"+Ya(r,2)+"."+Ya(o,3)+"Z":r?"T"+Ya(t,2)+":"+Ya(n,2)+":"+Ya(r,2)+"Z":n||t?"T"+Ya(t,2)+":"+Ya(n,2)+"Z":"")}function Ka(e){var t=new RegExp('["'+e+"\n\r]"),n=e.charCodeAt(0);function r(e,t){var r,o=[],i=e.length,a=0,s=0,u=i<=0,l=!1;function c(){if(u)return Va;if(l)return l=!1,Ba;var t,r,o=a;if(e.charCodeAt(o)===$a){for(;a++=i?u=!0:(r=e.charCodeAt(a++))===Ha?l=!0:r===Wa&&(l=!0,e.charCodeAt(a)===Ha&&++a),e.slice(o+1,t-1).replace(/""/g,'"')}for(;a+e;function vs(e){return e*e}function ys(e){return e*(2-e)}function ms(e){return((e*=2)<=1?e*e:--e*(2-e)+1)/2}var bs=function e(t){function n(e){return Math.pow(e,t)}return t=+t,n.exponent=e,n}(3),_s=function e(t){function n(e){return 1-Math.pow(1-e,t)}return t=+t,n.exponent=e,n}(3),ws=function e(t){function n(e){return((e*=2)<=1?Math.pow(e,t):2-Math.pow(2-e,t))/2}return t=+t,n.exponent=e,n}(3),xs=Math.PI,Ss=xs/2;function As(e){return 1===+e?1:1-Math.cos(e*Ss)}function Es(e){return Math.sin(e*Ss)}function Cs(e){return(1-Math.cos(xs*e))/2}function Rs(e){return 1.0009775171065494*(Math.pow(2,-10*e)-.0009765625)}function Os(e){return Rs(1-+e)}function Ts(e){return 1-Rs(e)}function ks(e){return((e*=2)<=1?Rs(1-e):2-Rs(e-1))/2}function Ms(e){return 1-Math.sqrt(1-e*e)}function Ps(e){return Math.sqrt(1- --e*e)}function Is(e){return((e*=2)<=1?1-Math.sqrt(1-e*e):Math.sqrt(1-(e-=2)*e)+1)/2}var Ns=4/11,Ds=6/11,Ls=8/11,Fs=3/4,js=9/11,Us=10/11,zs=15/16,Bs=21/22,Vs=63/64,$s=1/Ns/Ns;function Hs(e){return 1-Ws(1-e)}function Ws(e){return(e=+e)au(t,n).then((t=>(new DOMParser).parseFromString(t,e)))}const gu=pu("application/xml");var vu=pu("text/html"),yu=pu("image/svg+xml");function mu(e,t){var n,r=1;function o(){var o,i,a=n.length,s=0,u=0;for(o=0;o=(i=(g+y)/2))?g=i:y=i,(c=n>=(a=(v+m)/2))?v=a:m=a,o=h,!(h=h[f=c<<1|l]))return o[f]=p,e;if(s=+e._x.call(null,h.data),u=+e._y.call(null,h.data),t===s&&n===u)return p.next=h,o?o[f]=p:e._root=p,e;do{o=o?o[f]=new Array(4):e._root=new Array(4),(l=t>=(i=(g+y)/2))?g=i:y=i,(c=n>=(a=(v+m)/2))?v=a:m=a}while((f=c<<1|l)===(d=(u>=a)<<1|s>=i));return o[d]=h,o[f]=p,e}function _u(e,t,n,r,o){this.node=e,this.x0=t,this.y0=n,this.x1=r,this.y1=o}function wu(e){return e[0]}function xu(e){return e[1]}function Su(e,t,n){var r=new Au(null==t?wu:t,null==n?xu:n,NaN,NaN,NaN,NaN);return null==e?r:r.addAll(e)}function Au(e,t,n,r,o,i){this._x=e,this._y=t,this._x0=n,this._y0=r,this._x1=o,this._y1=i,this._root=void 0}function Eu(e){for(var t={data:e.data},n=t;e=e.next;)n=n.next={data:e.data};return t}var Cu=Su.prototype=Au.prototype;function Ru(e){return function(){return e}}function Ou(e){return 1e-6*(e()-.5)}function Tu(e){return e.x+e.vx}function ku(e){return e.y+e.vy}function Mu(e){var t,n,r,o=1,i=1;function a(){for(var e,a,u,l,c,f,d,h=t.length,p=0;pl+p||ic+p||au.index){var g=l-s.x-s.vx,v=c-s.y-s.vy,y=g*g+v*v;ye.r&&(e.r=e[t].r)}function u(){if(t){var r,o,i=t.length;for(n=new Array(i),r=0;r[s(e,t,r),e])));for(a=0,o=new Array(l);ac&&(c=r),of&&(f=o));if(u>c||l>f)return this;for(this.cover(u,l).cover(c,f),n=0;ne||e>=o||r>t||t>=i;)switch(s=(td||(i=u.y0)>h||(a=u.x1)=y)<<1|e>=v)&&(u=p[p.length-1],p[p.length-1]=p[p.length-1-l],p[p.length-1-l]=u)}else{var m=e-+this._x.call(null,g.data),b=t-+this._y.call(null,g.data),_=m*m+b*b;if(_=(s=(p+v)/2))?p=s:v=s,(c=a>=(u=(g+y)/2))?g=u:y=u,t=h,!(h=h[f=c<<1|l]))return this;if(!h.length)break;(t[f+1&3]||t[f+2&3]||t[f+3&3])&&(n=t,d=f)}for(;h.data!==e;)if(r=h,!(h=h.next))return this;return(o=h.next)&&delete h.next,r?(o?r.next=o:delete r.next,this):t?(o?t[f]=o:delete t[f],(h=t[0]||t[1]||t[2]||t[3])&&h===(t[3]||t[2]||t[1]||t[0])&&!h.length&&(n?n[d]=h:this._root=h),this):(this._root=o,this)},Cu.removeAll=function(e){for(var t=0,n=e.length;t(e=(Du*e+Lu)%Fu)/Fu}();function f(){d(),l.call("tick",t),n1?(null==n?s.delete(e):s.set(e,p(n)),t):s.get(e)},find:function(t,n,r){var o,i,a,s,u,l=0,c=e.length;for(null==r?r=1/0:r*=r,l=0;l1?(l.on(e,n),t):l.on(e)}}}function $u(){var e,t,n,r,o,i=Ru(-30),a=1,s=1/0,u=.81;function l(n){var o,i=e.length,a=Su(e,ju,Uu).visitAfter(f);for(r=n,o=0;o=s)){(e.data!==t||e.next)&&(0===f&&(p+=(f=Ou(n))*f),0===d&&(p+=(d=Ou(n))*d),p0?1:e<0?-1:0},ml=Math.sqrt,bl=Math.tan;function _l(e){return e>1?0:e<-1?tl:Math.acos(e)}function wl(e){return e>1?nl:e<-1?-nl:Math.asin(e)}function xl(e){return(e=vl(e/2))*e}function Sl(){}function Al(e,t){e&&Cl.hasOwnProperty(e.type)&&Cl[e.type](e,t)}var El={Feature:function(e,t){Al(e.geometry,t)},FeatureCollection:function(e,t){for(var n=e.features,r=-1,o=n.length;++r=0?1:-1,o=r*n,i=cl(t=(t*=al)/2+rl),a=vl(t),s=Nl*a,u=Il*i+s*cl(o),l=s*r*vl(o);ql.add(ll(l,u)),Pl=e,Il=i,Nl=a}function Jl(e){return Gl=new S,Tl(e,Yl),2*Gl}function ec(e){return[ll(e[1],e[0]),wl(e[2])]}function tc(e){var t=e[0],n=e[1],r=cl(n);return[r*cl(t),r*vl(t),vl(n)]}function nc(e,t){return e[0]*t[0]+e[1]*t[1]+e[2]*t[2]}function rc(e,t){return[e[1]*t[2]-e[2]*t[1],e[2]*t[0]-e[0]*t[2],e[0]*t[1]-e[1]*t[0]]}function oc(e,t){e[0]+=t[0],e[1]+=t[1],e[2]+=t[2]}function ic(e,t){return[e[0]*t,e[1]*t,e[2]*t]}function ac(e){var t=ml(e[0]*e[0]+e[1]*e[1]+e[2]*e[2]);e[0]/=t,e[1]/=t,e[2]/=t}var sc,uc,lc,cc,fc,dc,hc,pc,gc,vc,yc,mc,bc,_c,wc,xc,Sc={point:Ac,lineStart:Cc,lineEnd:Rc,polygonStart:function(){Sc.point=Oc,Sc.lineStart=Tc,Sc.lineEnd=kc,$l=new S,Yl.polygonStart()},polygonEnd:function(){Yl.polygonEnd(),Sc.point=Ac,Sc.lineStart=Cc,Sc.lineEnd=Rc,ql<0?(Dl=-(Fl=180),Ll=-(jl=90)):$l>Ju?jl=90:$l<-Ju&&(Ll=-90),Wl[0]=Dl,Wl[1]=Fl},sphere:function(){Dl=-(Fl=180),Ll=-(jl=90)}};function Ac(e,t){Hl.push(Wl=[Dl=e,Fl=e]),tjl&&(jl=t)}function Ec(e,t){var n=tc([e*al,t*al]);if(Vl){var r=rc(Vl,n),o=rc([r[1],-r[0],0],r);ac(o),o=ec(o);var i,a=e-Ul,s=a>0?1:-1,u=o[0]*il*s,l=sl(a)>180;l^(s*Uljl&&(jl=i):l^(s*Ul<(u=(u+360)%360-180)&&ujl&&(jl=t)),l?eMc(Dl,Fl)&&(Fl=e):Mc(e,Fl)>Mc(Dl,Fl)&&(Dl=e):Fl>=Dl?(eFl&&(Fl=e)):e>Ul?Mc(Dl,e)>Mc(Dl,Fl)&&(Fl=e):Mc(e,Fl)>Mc(Dl,Fl)&&(Dl=e)}else Hl.push(Wl=[Dl=e,Fl=e]);tjl&&(jl=t),Vl=n,Ul=e}function Cc(){Sc.point=Ec}function Rc(){Wl[0]=Dl,Wl[1]=Fl,Sc.point=Ac,Vl=null}function Oc(e,t){if(Vl){var n=e-Ul;$l.add(sl(n)>180?n+(n>0?360:-360):n)}else zl=e,Bl=t;Yl.point(e,t),Ec(e,t)}function Tc(){Yl.lineStart()}function kc(){Oc(zl,Bl),Yl.lineEnd(),sl($l)>Ju&&(Dl=-(Fl=180)),Wl[0]=Dl,Wl[1]=Fl,Vl=null}function Mc(e,t){return(t-=e)<0?t+360:t}function Pc(e,t){return e[0]-t[0]}function Ic(e,t){return e[0]<=e[1]?e[0]<=t&&t<=e[1]:tMc(r[0],r[1])&&(r[1]=o[1]),Mc(o[0],r[1])>Mc(r[0],r[1])&&(r[0]=o[0])):i.push(r=o);for(a=-1/0,t=0,r=i[n=i.length-1];t<=n;r=o,++t)o=i[t],(s=Mc(r[1],o[0]))>a&&(a=s,Dl=o[0],Fl=r[1])}return Hl=Wl=null,Dl===1/0||Ll===1/0?[[NaN,NaN],[NaN,NaN]]:[[Dl,Ll],[Fl,jl]]}var Dc={sphere:Sl,point:Lc,lineStart:jc,lineEnd:Bc,polygonStart:function(){Dc.lineStart=Vc,Dc.lineEnd=$c},polygonEnd:function(){Dc.lineStart=jc,Dc.lineEnd=Bc}};function Lc(e,t){e*=al;var n=cl(t*=al);Fc(n*cl(e),n*vl(e),vl(t))}function Fc(e,t,n){++sc,lc+=(e-lc)/sc,cc+=(t-cc)/sc,fc+=(n-fc)/sc}function jc(){Dc.point=Uc}function Uc(e,t){e*=al;var n=cl(t*=al);_c=n*cl(e),wc=n*vl(e),xc=vl(t),Dc.point=zc,Fc(_c,wc,xc)}function zc(e,t){e*=al;var n=cl(t*=al),r=n*cl(e),o=n*vl(e),i=vl(t),a=ll(ml((a=wc*i-xc*o)*a+(a=xc*r-_c*i)*a+(a=_c*o-wc*r)*a),_c*r+wc*o+xc*i);uc+=a,dc+=a*(_c+(_c=r)),hc+=a*(wc+(wc=o)),pc+=a*(xc+(xc=i)),Fc(_c,wc,xc)}function Bc(){Dc.point=Lc}function Vc(){Dc.point=Hc}function $c(){Wc(mc,bc),Dc.point=Lc}function Hc(e,t){mc=e,bc=t,e*=al,t*=al,Dc.point=Wc;var n=cl(t);_c=n*cl(e),wc=n*vl(e),xc=vl(t),Fc(_c,wc,xc)}function Wc(e,t){e*=al;var n=cl(t*=al),r=n*cl(e),o=n*vl(e),i=vl(t),a=wc*i-xc*o,s=xc*r-_c*i,u=_c*o-wc*r,l=hl(a,s,u),c=wl(l),f=l&&-c/l;gc.add(f*a),vc.add(f*s),yc.add(f*u),uc+=c,dc+=c*(_c+(_c=r)),hc+=c*(wc+(wc=o)),pc+=c*(xc+(xc=i)),Fc(_c,wc,xc)}function qc(e){sc=uc=lc=cc=fc=dc=hc=pc=0,gc=new S,vc=new S,yc=new S,Tl(e,Dc);var t=+gc,n=+vc,r=+yc,o=hl(t,n,r);return otl&&(e-=Math.round(e/ol)*ol),[e,t]}function Kc(e,t,n){return(e%=ol)?t||n?Yc(Qc(e),Jc(t,n)):Qc(e):t||n?Jc(t,n):Xc}function Zc(e){return function(t,n){return sl(t+=e)>tl&&(t-=Math.round(t/ol)*ol),[t,n]}}function Qc(e){var t=Zc(e);return t.invert=Zc(-e),t}function Jc(e,t){var n=cl(e),r=vl(e),o=cl(t),i=vl(t);function a(e,t){var a=cl(t),s=cl(e)*a,u=vl(e)*a,l=vl(t),c=l*n+s*r;return[ll(u*o-c*i,s*n-l*r),wl(c*o+u*i)]}return a.invert=function(e,t){var a=cl(t),s=cl(e)*a,u=vl(e)*a,l=vl(t),c=l*o-u*i;return[ll(u*o+l*i,s*n+c*r),wl(c*n-s*r)]},a}function ef(e){function t(t){return(t=e(t[0]*al,t[1]*al))[0]*=il,t[1]*=il,t}return e=Kc(e[0]*al,e[1]*al,e.length>2?e[2]*al:0),t.invert=function(t){return(t=e.invert(t[0]*al,t[1]*al))[0]*=il,t[1]*=il,t},t}function tf(e,t,n,r,o,i){if(n){var a=cl(t),s=vl(t),u=r*n;null==o?(o=t+r*ol,i=t-u/2):(o=nf(a,o),i=nf(a,i),(r>0?oi)&&(o+=r*ol));for(var l,c=o;r>0?c>i:c1&&t.push(t.pop().concat(t.shift()))},result:function(){var n=t;return t=[],e=null,n}}}function af(e,t){return sl(e[0]-t[0])=0;--i)o.point((c=l[i])[0],c[1]);else r(d.x,d.p.x,-1,o);d=d.p}l=(d=d.o).z,h=!h}while(!d.v);o.lineEnd()}}}function lf(e){if(t=e.length){for(var t,n,r=0,o=e[0];++r=0?1:-1,R=C*E,O=R>tl,T=v*x;if(u.add(ll(T*C*vl(R),y*A+T*cl(R))),a+=O?E+C*ol:E,O^p>=n^_>=n){var k=rc(tc(h),tc(b));ac(k);var M=rc(i,k);ac(M);var P=(O^E>=0?-1:1)*wl(M[2]);(r>P||r===P&&(k[0]||k[1]))&&(s+=O^E>=0?1:-1)}}return(a<-Ju||a0){for(f||(o.polygonStart(),f=!0),o.lineStart(),e=0;e1&&2&u&&d.push(d.pop().concat(d.shift())),a.push(d.filter(hf))}return d}}function hf(e){return e.length>1}function pf(e,t){return((e=e.x)[0]<0?e[1]-nl-Ju:nl-e[1])-((t=t.x)[0]<0?t[1]-nl-Ju:nl-t[1])}Xc.invert=Xc;const gf=df((function(){return!0}),(function(e){var t,n=NaN,r=NaN,o=NaN;return{lineStart:function(){e.lineStart(),t=1},point:function(i,a){var s=i>0?tl:-tl,u=sl(i-n);sl(u-tl)0?nl:-nl),e.point(o,r),e.lineEnd(),e.lineStart(),e.point(s,r),e.point(i,r),t=0):o!==s&&u>=tl&&(sl(n-o)Ju?ul((vl(t)*(i=cl(r))*vl(n)-vl(r)*(o=cl(t))*vl(e))/(o*i*a)):(t+r)/2}(n,r,i,a),e.point(o,r),e.lineEnd(),e.lineStart(),e.point(s,r),t=0),e.point(n=i,r=a),o=s},lineEnd:function(){e.lineEnd(),n=r=NaN},clean:function(){return 2-t}}}),(function(e,t,n,r){var o;if(null==e)o=n*nl,r.point(-tl,o),r.point(0,o),r.point(tl,o),r.point(tl,0),r.point(tl,-o),r.point(0,-o),r.point(-tl,-o),r.point(-tl,0),r.point(-tl,o);else if(sl(e[0]-t[0])>Ju){var i=e[0]0,o=sl(t)>Ju;function i(e,n){return cl(e)*cl(n)>t}function a(e,n,r){var o=[1,0,0],i=rc(tc(e),tc(n)),a=nc(i,i),s=i[0],u=a-s*s;if(!u)return!r&&e;var l=t*a/u,c=-t*s/u,f=rc(o,i),d=ic(o,l);oc(d,ic(i,c));var h=f,p=nc(d,h),g=nc(h,h),v=p*p-g*(nc(d,d)-1);if(!(v<0)){var y=ml(v),m=ic(h,(-p-y)/g);if(oc(m,d),m=ec(m),!r)return m;var b,_=e[0],w=n[0],x=e[1],S=n[1];w<_&&(b=_,_=w,w=b);var A=w-_,E=sl(A-tl)0^m[1]<(sl(m[0]-_)tl^(_<=m[0]&&m[0]<=w)){var C=ic(h,(-p+y)/g);return oc(C,d),[m,ec(C)]}}}function s(t,n){var o=r?e:tl-e,i=0;return t<-o?i|=1:t>o&&(i|=2),n<-o?i|=4:n>o&&(i|=8),i}return df(i,(function(e){var t,n,u,l,c;return{lineStart:function(){l=u=!1,c=1},point:function(f,d){var h,p=[f,d],g=i(f,d),v=r?g?0:s(f,d):g?s(f+(f<0?tl:-tl),d):0;if(!t&&(l=u=g)&&e.lineStart(),g!==u&&(!(h=a(t,p))||af(t,h)||af(p,h))&&(p[2]=1),g!==u)c=0,g?(e.lineStart(),h=a(p,t),e.point(h[0],h[1])):(h=a(t,p),e.point(h[0],h[1],2),e.lineEnd()),t=h;else if(o&&t&&r^g){var y;v&n||!(y=a(p,t,!0))||(c=0,r?(e.lineStart(),e.point(y[0][0],y[0][1]),e.point(y[1][0],y[1][1]),e.lineEnd()):(e.point(y[1][0],y[1][1]),e.lineEnd(),e.lineStart(),e.point(y[0][0],y[0][1],3)))}!g||t&&af(t,p)||e.point(p[0],p[1]),t=p,u=g,n=v},lineEnd:function(){u&&e.lineEnd(),t=null},clean:function(){return c|(l&&u)<<1}}}),(function(t,r,o,i){tf(i,e,n,o,t,r)}),r?[0,-e]:[-tl,e-tl])}var yf,mf,bf,_f,wf=1e9,xf=-wf;function Sf(e,t,n,r){function o(o,i){return e<=o&&o<=n&&t<=i&&i<=r}function i(o,i,s,l){var c=0,f=0;if(null==o||(c=a(o,s))!==(f=a(i,s))||u(o,i)<0^s>0)do{l.point(0===c||3===c?e:n,c>1?r:t)}while((c=(c+s+4)%4)!==f);else l.point(i[0],i[1])}function a(r,o){return sl(r[0]-e)0?0:3:sl(r[0]-n)0?2:1:sl(r[1]-t)0?1:0:o>0?3:2}function s(e,t){return u(e.x,t.x)}function u(e,t){var n=a(e,1),r=a(t,1);return n!==r?n-r:0===n?t[1]-e[1]:1===n?e[0]-t[0]:2===n?e[1]-t[1]:t[0]-e[0]}return function(a){var u,l,c,f,d,h,p,g,v,y,m,b=a,_=of(),w={point:x,lineStart:function(){w.point=S,l&&l.push(c=[]);y=!0,v=!1,p=g=NaN},lineEnd:function(){u&&(S(f,d),h&&v&&_.rejoin(),u.push(_.result()));w.point=x,v&&b.lineEnd()},polygonStart:function(){b=_,u=[],l=[],m=!0},polygonEnd:function(){var t=function(){for(var t=0,n=0,o=l.length;nr&&(d-i)*(r-a)>(h-a)*(e-i)&&++t:h<=r&&(d-i)*(r-a)<(h-a)*(e-i)&&--t;return t}(),n=m&&t,o=(u=ne(u)).length;(n||o)&&(a.polygonStart(),n&&(a.lineStart(),i(null,null,1,a),a.lineEnd()),o&&uf(u,s,t,i,a),a.polygonEnd());b=a,u=l=c=null}};function x(e,t){o(e,t)&&b.point(e,t)}function S(i,a){var s=o(i,a);if(l&&c.push([i,a]),y)f=i,d=a,h=s,y=!1,s&&(b.lineStart(),b.point(i,a));else if(s&&v)b.point(i,a);else{var u=[p=Math.max(xf,Math.min(wf,p)),g=Math.max(xf,Math.min(wf,g))],_=[i=Math.max(xf,Math.min(wf,i)),a=Math.max(xf,Math.min(wf,a))];!function(e,t,n,r,o,i){var a,s=e[0],u=e[1],l=0,c=1,f=t[0]-s,d=t[1]-u;if(a=n-s,f||!(a>0)){if(a/=f,f<0){if(a0){if(a>c)return;a>l&&(l=a)}if(a=o-s,f||!(a<0)){if(a/=f,f<0){if(a>c)return;a>l&&(l=a)}else if(f>0){if(a0)){if(a/=d,d<0){if(a0){if(a>c)return;a>l&&(l=a)}if(a=i-u,d||!(a<0)){if(a/=d,d<0){if(a>c)return;a>l&&(l=a)}else if(d>0){if(a0&&(e[0]=s+l*f,e[1]=u+l*d),c<1&&(t[0]=s+c*f,t[1]=u+c*d),!0}}}}}(u,_,e,t,n,r)?s&&(b.lineStart(),b.point(i,a),m=!1):(v||(b.lineStart(),b.point(u[0],u[1])),b.point(_[0],_[1]),s||b.lineEnd(),m=!1)}p=i,g=a,v=s}return w}}function Af(){var e,t,n,r=0,o=0,i=960,a=500;return n={stream:function(n){return e&&t===n?e:e=Sf(r,o,i,a)(t=n)},extent:function(s){return arguments.length?(r=+s[0][0],o=+s[0][1],i=+s[1][0],a=+s[1][1],e=t=null,n):[[r,o],[i,a]]}}}var Ef={sphere:Sl,point:Sl,lineStart:function(){Ef.point=Rf,Ef.lineEnd=Cf},lineEnd:Sl,polygonStart:Sl,polygonEnd:Sl};function Cf(){Ef.point=Ef.lineEnd=Sl}function Rf(e,t){mf=e*=al,bf=vl(t*=al),_f=cl(t),Ef.point=Of}function Of(e,t){e*=al;var n=vl(t*=al),r=cl(t),o=sl(e-mf),i=cl(o),a=r*vl(o),s=_f*n-bf*r*i,u=bf*n+_f*r*i;yf.add(ll(ml(a*a+s*s),u)),mf=e,bf=n,_f=r}function Tf(e){return yf=new S,Tl(e,Ef),+yf}var kf=[null,null],Mf={type:"LineString",coordinates:kf};function Pf(e,t){return kf[0]=e,kf[1]=t,Tf(Mf)}var If={Feature:function(e,t){return Df(e.geometry,t)},FeatureCollection:function(e,t){for(var n=e.features,r=-1,o=n.length;++r0&&(o=Pf(e[i],e[i-1]))>0&&n<=o&&r<=o&&(n+r-o)*(1-Math.pow((n-r)/o,2))Ju})).map(u)).concat((0,ce.A)(fl(i/h)*h,o,h).filter((function(e){return sl(e%g)>Ju})).map(l))}return y.lines=function(){return m().map((function(e){return{type:"LineString",coordinates:e}}))},y.outline=function(){return{type:"Polygon",coordinates:[c(r).concat(f(a).slice(1),c(n).reverse().slice(1),f(s).reverse().slice(1))]}},y.extent=function(e){return arguments.length?y.extentMajor(e).extentMinor(e):y.extentMinor()},y.extentMajor=function(e){return arguments.length?(r=+e[0][0],n=+e[1][0],s=+e[0][1],a=+e[1][1],r>n&&(e=r,r=n,n=e),s>a&&(e=s,s=a,a=e),y.precision(v)):[[r,s],[n,a]]},y.extentMinor=function(n){return arguments.length?(t=+n[0][0],e=+n[1][0],i=+n[0][1],o=+n[1][1],t>e&&(n=t,t=e,e=n),i>o&&(n=i,i=o,o=n),y.precision(v)):[[t,i],[e,o]]},y.step=function(e){return arguments.length?y.stepMajor(e).stepMinor(e):y.stepMinor()},y.stepMajor=function(e){return arguments.length?(p=+e[0],g=+e[1],y):[p,g]},y.stepMinor=function(e){return arguments.length?(d=+e[0],h=+e[1],y):[d,h]},y.precision=function(d){return arguments.length?(v=+d,u=Vf(i,o,90),l=$f(t,e,v),c=Vf(s,a,90),f=$f(r,n,v),y):v},y.extentMajor([[-180,-90+Ju],[180,90-Ju]]).extentMinor([[-180,-80-Ju],[180,80+Ju]])}function Wf(){return Hf()()}function qf(e,t){var n=e[0]*al,r=e[1]*al,o=t[0]*al,i=t[1]*al,a=cl(r),s=vl(r),u=cl(i),l=vl(i),c=a*cl(n),f=a*vl(n),d=u*cl(o),h=u*vl(o),p=2*wl(ml(xl(i-r)+a*u*xl(o-n))),g=vl(p),v=p?function(e){var t=vl(e*=p)/g,n=vl(p-e)/g,r=n*c+t*d,o=n*f+t*h,i=n*s+t*l;return[ll(o,r)*il,ll(i,ml(r*r+o*o))*il]}:function(){return[n*il,r*il]};return v.distance=p,v}const Gf=e=>e;var Yf,Xf,Kf,Zf,Qf=new S,Jf=new S,ed={point:Sl,lineStart:Sl,lineEnd:Sl,polygonStart:function(){ed.lineStart=td,ed.lineEnd=od},polygonEnd:function(){ed.lineStart=ed.lineEnd=ed.point=Sl,Qf.add(sl(Jf)),Jf=new S},result:function(){var e=Qf/2;return Qf=new S,e}};function td(){ed.point=nd}function nd(e,t){ed.point=rd,Yf=Kf=e,Xf=Zf=t}function rd(e,t){Jf.add(Zf*e-Kf*t),Kf=e,Zf=t}function od(){rd(Yf,Xf)}const id=ed;var ad=1/0,sd=ad,ud=-ad,ld=ud,cd={point:function(e,t){eud&&(ud=e);tld&&(ld=t)},lineStart:Sl,lineEnd:Sl,polygonStart:Sl,polygonEnd:Sl,result:function(){var e=[[ad,sd],[ud,ld]];return ud=ld=-(sd=ad=1/0),e}};const fd=cd;var dd,hd,pd,gd,vd=0,yd=0,md=0,bd=0,_d=0,wd=0,xd=0,Sd=0,Ad=0,Ed={point:Cd,lineStart:Rd,lineEnd:kd,polygonStart:function(){Ed.lineStart=Md,Ed.lineEnd=Pd},polygonEnd:function(){Ed.point=Cd,Ed.lineStart=Rd,Ed.lineEnd=kd},result:function(){var e=Ad?[xd/Ad,Sd/Ad]:wd?[bd/wd,_d/wd]:md?[vd/md,yd/md]:[NaN,NaN];return vd=yd=md=bd=_d=wd=xd=Sd=Ad=0,e}};function Cd(e,t){vd+=e,yd+=t,++md}function Rd(){Ed.point=Od}function Od(e,t){Ed.point=Td,Cd(pd=e,gd=t)}function Td(e,t){var n=e-pd,r=t-gd,o=ml(n*n+r*r);bd+=o*(pd+e)/2,_d+=o*(gd+t)/2,wd+=o,Cd(pd=e,gd=t)}function kd(){Ed.point=Cd}function Md(){Ed.point=Id}function Pd(){Nd(dd,hd)}function Id(e,t){Ed.point=Nd,Cd(dd=pd=e,hd=gd=t)}function Nd(e,t){var n=e-pd,r=t-gd,o=ml(n*n+r*r);bd+=o*(pd+e)/2,_d+=o*(gd+t)/2,wd+=o,xd+=(o=gd*e-pd*t)*(pd+e),Sd+=o*(gd+t),Ad+=3*o,Cd(pd=e,gd=t)}const Dd=Ed;function Ld(e){this._context=e}Ld.prototype={_radius:4.5,pointRadius:function(e){return this._radius=e,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(e,t){switch(this._point){case 0:this._context.moveTo(e,t),this._point=1;break;case 1:this._context.lineTo(e,t);break;default:this._context.moveTo(e+this._radius,t),this._context.arc(e,t,this._radius,0,ol)}},result:Sl};var Fd,jd,Ud,zd,Bd,Vd=new S,$d={point:Sl,lineStart:function(){$d.point=Hd},lineEnd:function(){Fd&&Wd(jd,Ud),$d.point=Sl},polygonStart:function(){Fd=!0},polygonEnd:function(){Fd=null},result:function(){var e=+Vd;return Vd=new S,e}};function Hd(e,t){$d.point=Wd,jd=zd=e,Ud=Bd=t}function Wd(e,t){zd-=e,Bd-=t,Vd.add(ml(zd*zd+Bd*Bd)),zd=e,Bd=t}const qd=$d;let Gd,Yd,Xd,Kd;class Zd{constructor(e){this._append=null==e?Qd:function(e){const t=Math.floor(e);if(!(t>=0))throw new RangeError(`invalid digits: ${e}`);if(t>15)return Qd;if(t!==Gd){const e=10**t;Gd=t,Yd=function(t){let n=1;this._+=t[0];for(const r=t.length;n=0))throw new RangeError(`invalid digits: ${e}`);o=t}return null===t&&(r=new Zd(o)),a},a.projection(e).digits(o).context(t)}function eh(e){return{stream:th(e)}}function th(e){return function(t){var n=new nh;for(var r in e)n[r]=e[r];return n.stream=t,n}}function nh(){}function rh(e,t,n){var r=e.clipExtent&&e.clipExtent();return e.scale(150).translate([0,0]),null!=r&&e.clipExtent(null),Tl(n,e.stream(fd)),t(fd.result()),null!=r&&e.clipExtent(r),e}function oh(e,t,n){return rh(e,(function(n){var r=t[1][0]-t[0][0],o=t[1][1]-t[0][1],i=Math.min(r/(n[1][0]-n[0][0]),o/(n[1][1]-n[0][1])),a=+t[0][0]+(r-i*(n[1][0]+n[0][0]))/2,s=+t[0][1]+(o-i*(n[1][1]+n[0][1]))/2;e.scale(150*i).translate([a,s])}),n)}function ih(e,t,n){return oh(e,[[0,0],t],n)}function ah(e,t,n){return rh(e,(function(n){var r=+t,o=r/(n[1][0]-n[0][0]),i=(r-o*(n[1][0]+n[0][0]))/2,a=-o*n[0][1];e.scale(150*o).translate([i,a])}),n)}function sh(e,t,n){return rh(e,(function(n){var r=+t,o=r/(n[1][1]-n[0][1]),i=-o*n[0][0],a=(r-o*(n[1][1]+n[0][1]))/2;e.scale(150*o).translate([i,a])}),n)}nh.prototype={constructor:nh,point:function(e,t){this.stream.point(e,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var uh=16,lh=cl(30*al);function ch(e,t){return+t?function(e,t){function n(r,o,i,a,s,u,l,c,f,d,h,p,g,v){var y=l-r,m=c-o,b=y*y+m*m;if(b>4*t&&g--){var _=a+d,w=s+h,x=u+p,S=ml(_*_+w*w+x*x),A=wl(x/=S),E=sl(sl(x)-1)t||sl((y*T+m*k)/b-.5)>.3||a*d+s*h+u*p2?e[2]%360*al:0,T()):[v*il,y*il,m*il]},R.angle=function(e){return arguments.length?(b=e%360*al,T()):b*il},R.reflectX=function(e){return arguments.length?(_=e?-1:1,T()):_<0},R.reflectY=function(e){return arguments.length?(w=e?-1:1,T()):w<0},R.precision=function(e){return arguments.length?(a=ch(s,C=e*e),k()):ml(C)},R.fitExtent=function(e,t){return oh(R,e,t)},R.fitSize=function(e,t){return ih(R,e,t)},R.fitWidth=function(e,t){return ah(R,e,t)},R.fitHeight=function(e,t){return sh(R,e,t)},function(){return t=e.apply(this,arguments),R.invert=t.invert&&O,T()}}function gh(e){var t=0,n=tl/3,r=ph(e),o=r(t,n);return o.parallels=function(e){return arguments.length?r(t=e[0]*al,n=e[1]*al):[t*il,n*il]},o}function vh(e,t){var n=vl(e),r=(n+vl(t))/2;if(sl(r)=.12&&o<.234&&r>=-.425&&r<-.214?s:o>=.166&&o<.234&&r>=-.214&&r<-.115?u:a).invert(e)},c.stream=function(n){return e&&t===n?e:e=function(e){var t=e.length;return{point:function(n,r){for(var o=-1;++o0?t<-nl+Ju&&(t=-nl+Ju):t>nl-Ju&&(t=nl-Ju);var n=o/gl(Th(t),r);return[n*vl(r*e),o-n*cl(r*e)]}return i.invert=function(e,t){var n=o-t,i=yl(r)*ml(e*e+n*n),a=ll(e,sl(n))*yl(n);return n*r<0&&(a-=tl*yl(e)*yl(n)),[a/r,2*ul(gl(o/i,1/r))-nl]},i}function Mh(){return gh(kh).scale(109.5).parallels([30,30])}function Ph(e,t){return[e,t]}function Ih(){return hh(Ph).scale(152.63)}function Nh(e,t){var n=cl(e),r=e===t?vl(e):(n-cl(t))/(t-e),o=n/r+e;if(sl(r)2?e[2]+90:90]):[(e=n())[0],e[1],e[2]-90]},n([0,0,90]).scale(159.155)}function ep(e,t){return e.parent===t.parent?1:2}function tp(e,t){return e+t.x}function np(e,t){return Math.max(e,t.y)}function rp(){var e=ep,t=1,n=1,r=!1;function o(o){var i,a=0;o.eachAfter((function(t){var n=t.children;n?(t.x=function(e){return e.reduce(tp,0)/e.length}(n),t.y=function(e){return 1+e.reduce(np,0)}(n)):(t.x=i?a+=e(t,i):0,t.y=0,i=t)}));var s=function(e){for(var t;t=e.children;)e=t[0];return e}(o),u=function(e){for(var t;t=e.children;)e=t[t.length-1];return e}(o),l=s.x-e(s,u)/2,c=u.x+e(u,s)/2;return o.eachAfter(r?function(e){e.x=(e.x-o.x)*t,e.y=(o.y-e.y)*n}:function(e){e.x=(e.x-l)/(c-l)*t,e.y=(1-(o.y?e.y/o.y:1))*n})}return o.separation=function(t){return arguments.length?(e=t,o):e},o.size=function(e){return arguments.length?(r=!1,t=+e[0],n=+e[1],o):r?null:[t,n]},o.nodeSize=function(e){return arguments.length?(r=!0,t=+e[0],n=+e[1],o):r?[t,n]:null},o}function op(e){var t=0,n=e.children,r=n&&n.length;if(r)for(;--r>=0;)t+=n[r].value;else t=1;e.value=t}function ip(e,t){e instanceof Map?(e=[void 0,e],void 0===t&&(t=sp)):void 0===t&&(t=ap);for(var n,r,o,i,a,s=new cp(e),u=[s];n=u.pop();)if((o=t(n.data))&&(a=(o=Array.from(o)).length))for(n.children=o,i=a-1;i>=0;--i)u.push(r=o[i]=new cp(o[i])),r.parent=n,r.depth=n.depth+1;return s.eachBefore(lp)}function ap(e){return e.children}function sp(e){return Array.isArray(e)?e[1]:null}function up(e){void 0!==e.data.value&&(e.value=e.data.value),e.data=e.data.data}function lp(e){var t=0;do{e.height=t}while((e=e.parent)&&e.height<++t)}function cp(e){this.data=e,this.depth=this.height=0,this.parent=null}function fp(e){return null==e?null:dp(e)}function dp(e){if("function"!==typeof e)throw new Error;return e}function hp(){return 0}function pp(e){return function(){return e}}Bh.invert=function(e,t){for(var n,r=t,o=r*r,i=o*o*o,a=0;a<12&&(i=(o=(r-=n=(r*(Lh+Fh*o+i*(jh+Uh*o))-t)/(Lh+3*Fh*o+i*(7*jh+9*Uh*o)))*r)*o*o,!(sl(n)Ju&&--o>0);return[e/(.8707+(i=r*r)*(i*(i*i*i*(.003971-.001529*i)-.013791)-.131979)),r]},Yh.invert=wh(wl),Kh.invert=wh((function(e){return 2*ul(e)})),Qh.invert=function(e,t){return[-t,2*ul(dl(e))-nl]},cp.prototype=ip.prototype={constructor:cp,count:function(){return this.eachAfter(op)},each:function(e,t){let n=-1;for(const r of this)e.call(t,r,++n,this);return this},eachAfter:function(e,t){for(var n,r,o,i=this,a=[i],s=[],u=-1;i=a.pop();)if(s.push(i),n=i.children)for(r=0,o=n.length;r=0;--r)i.push(n[r]);return this},find:function(e,t){let n=-1;for(const r of this)if(e.call(t,r,++n,this))return r},sum:function(e){return this.eachAfter((function(t){for(var n=+e(t.data)||0,r=t.children,o=r&&r.length;--o>=0;)n+=r[o].value;t.value=n}))},sort:function(e){return this.eachBefore((function(t){t.children&&t.children.sort(e)}))},path:function(e){for(var t=this,n=function(e,t){if(e===t)return e;var n=e.ancestors(),r=t.ancestors(),o=null;e=n.pop(),t=r.pop();for(;e===t;)o=e,e=n.pop(),t=r.pop();return o}(t,e),r=[t];t!==n;)t=t.parent,r.push(t);for(var o=r.length;e!==n;)r.splice(o,0,e),e=e.parent;return r},ancestors:function(){for(var e=this,t=[e];e=e.parent;)t.push(e);return t},descendants:function(){return Array.from(this)},leaves:function(){var e=[];return this.eachBefore((function(t){t.children||e.push(t)})),e},links:function(){var e=this,t=[];return e.each((function(n){n!==e&&t.push({source:n.parent,target:n})})),t},copy:function(){return ip(this).eachBefore(up)},[Symbol.iterator]:function*(){var e,t,n,r,o=this,i=[o];do{for(e=i.reverse(),i=[];o=e.pop();)if(yield o,t=o.children)for(n=0,r=t.length;n(e=(gp*e+vp)%yp)/yp}function bp(e){return _p(e,mp())}function _p(e,t){for(var n,r,o=0,i=(e=function(e,t){let n,r,o=e.length;for(;o;)r=t()*o--|0,n=e[o],e[o]=e[r],e[r]=n;return e}(Array.from(e),t)).length,a=[];o0&&n*n>r*r+o*o}function Ap(e,t){for(var n=0;n1e-6?(R+Math.sqrt(R*R-4*C*O))/(2*C):O/R);return{x:r+x+S*T,y:o+A+E*T,r:T}}function Op(e,t,n){var r,o,i,a,s=e.x-t.x,u=e.y-t.y,l=s*s+u*u;l?(o=t.r+n.r,o*=o,a=e.r+n.r,o>(a*=a)?(r=(l+a-o)/(2*l),i=Math.sqrt(Math.max(0,a/l-r*r)),n.x=e.x-r*s-i*u,n.y=e.y-r*u+i*s):(r=(l+o-a)/(2*l),i=Math.sqrt(Math.max(0,o/l-r*r)),n.x=t.x+r*s-i*u,n.y=t.y+r*u+i*s)):(n.x=t.x+n.r,n.y=t.y)}function Tp(e,t){var n=e.r+t.r-1e-6,r=t.x-e.x,o=t.y-e.y;return n>0&&n*n>r*r+o*o}function kp(e){var t=e._,n=e.next._,r=t.r+n.r,o=(t.x*n.r+n.x*t.r)/r,i=(t.y*n.r+n.y*t.r)/r;return o*o+i*i}function Mp(e){this._=e,this.next=null,this.previous=null}function Pp(e,t){if(!(i=(e=function(e){return"object"===typeof e&&"length"in e?e:Array.from(e)}(e)).length))return 0;var n,r,o,i,a,s,u,l,c,f,d;if((n=e[0]).x=0,n.y=0,!(i>1))return n.r;if(r=e[1],n.x=-r.r,r.x=n.r,r.y=0,!(i>2))return n.r+r.r;Op(r,n,o=e[2]),n=new Mp(n),r=new Mp(r),o=new Mp(o),n.next=o.previous=r,r.next=n.previous=o,o.next=r.previous=n;e:for(u=3;ufunction(e){e=`${e}`;let t=e.length;Xp(e,t-1)&&!Xp(e,t-2)&&(e=e.slice(0,-1));return"/"===e[0]?e:`/${e}`}(e(t,n,r)))),n=t.map(Yp),o=new Set(t).add("");for(const e of n)o.has(e)||(o.add(e),t.push(e),n.push(Yp(e)),d.push(Hp));h=(e,n)=>t[n],p=(e,t)=>n[t]}for(a=0,o=d.length;a=0&&(l=d[e]).data===Hp;--e)l.data=null}if(s.parent=Vp,s.eachBefore((function(e){e.depth=e.parent.depth+1,--o})).eachBefore(lp),s.parent=null,o>0)throw new Error("cycle");return s}return r.id=function(e){return arguments.length?(t=fp(e),r):t},r.parentId=function(e){return arguments.length?(n=fp(e),r):n},r.path=function(t){return arguments.length?(e=fp(t),r):e},r}function Yp(e){let t=e.length;if(t<2)return"";for(;--t>1&&!Xp(e,t););return e.slice(0,t)}function Xp(e,t){if("/"===e[t]){let n=0;for(;t>0&&"\\"===e[--t];)++n;if(0===(1&n))return!0}return!1}function Kp(e,t){return e.parent===t.parent?1:2}function Zp(e){var t=e.children;return t?t[0]:e.t}function Qp(e){var t=e.children;return t?t[t.length-1]:e.t}function Jp(e,t,n){var r=n/(t.i-e.i);t.c-=r,t.s+=n,e.c+=r,t.z+=n,t.m+=n}function eg(e,t,n){return e.a.parent===t.parent?e.a:n}function tg(e,t){this._=e,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=t}function ng(){var e=Kp,t=1,n=1,r=null;function o(o){var u=function(e){for(var t,n,r,o,i,a=new tg(e,0),s=[a];t=s.pop();)if(r=t._.children)for(t.children=new Array(i=r.length),o=i-1;o>=0;--o)s.push(n=t.children[o]=new tg(r[o],o)),n.parent=t;return(a.parent=new tg(null,0)).children=[a],a}(o);if(u.eachAfter(i),u.parent.m=-u.z,u.eachBefore(a),r)o.eachBefore(s);else{var l=o,c=o,f=o;o.eachBefore((function(e){e.xc.x&&(c=e),e.depth>f.depth&&(f=e)}));var d=l===c?1:e(l,c)/2,h=d-l.x,p=t/(c.x+d+h),g=n/(f.depth||1);o.eachBefore((function(e){e.x=(e.x+h)*p,e.y=e.depth*g}))}return o}function i(t){var n=t.children,r=t.parent.children,o=t.i?r[t.i-1]:null;if(n){!function(e){for(var t,n=0,r=0,o=e.children,i=o.length;--i>=0;)(t=o[i]).z+=n,t.m+=n,n+=t.s+(r+=t.c)}(t);var i=(n[0].z+n[n.length-1].z)/2;o?(t.z=o.z+e(t._,o._),t.m=t.z-i):t.z=i}else o&&(t.z=o.z+e(t._,o._));t.parent.A=function(t,n,r){if(n){for(var o,i=t,a=t,s=n,u=i.parent.children[0],l=i.m,c=a.m,f=s.m,d=u.m;s=Qp(s),i=Zp(i),s&&i;)u=Zp(u),(a=Qp(a)).a=t,(o=s.z+f-i.z-l+e(s._,i._))>0&&(Jp(eg(s,t,r),t,o),l+=o,c+=o),f+=s.m,l+=i.m,d+=u.m,c+=a.m;s&&!Qp(a)&&(a.t=s,a.m+=f-c),i&&!Zp(u)&&(u.t=i,u.m+=l-d,r=t)}return r}(t,o,t.parent.A||r[0])}function a(e){e._.x=e.z+e.parent.m,e.m+=e.parent.m}function s(e){e.x*=t,e.y=e.depth*n}return o.separation=function(t){return arguments.length?(e=t,o):e},o.size=function(e){return arguments.length?(r=!1,t=+e[0],n=+e[1],o):r?null:[t,n]},o.nodeSize=function(e){return arguments.length?(r=!0,t=+e[0],n=+e[1],o):r?[t,n]:null},o}function rg(e,t,n,r,o){for(var i,a=e.children,s=-1,u=a.length,l=e.value&&(o-n)/e.value;++sd&&(d=s),v=c*c*g,(h=Math.max(d/v,v/f))>p){c-=s;break}p=h}y.push(a={value:c,dice:u1?t:1)},n}(og);function sg(){var e=ag,t=!1,n=1,r=1,o=[0],i=hp,a=hp,s=hp,u=hp,l=hp;function c(e){return e.x0=e.y0=0,e.x1=n,e.y1=r,e.eachBefore(f),o=[0],t&&e.eachBefore(Up),e}function f(t){var n=o[t.depth],r=t.x0+n,c=t.y0+n,f=t.x1-n,d=t.y1-n;f=n-1){var c=s[t];return c.x0=o,c.y0=i,c.x1=a,void(c.y1=u)}var f=l[t],d=r/2+f,h=t+1,p=n-1;for(;h>>1;l[g]u-i){var m=r?(o*y+a*v)/r:a;e(t,h,v,o,i,m,u),e(h,n,y,m,i,a,u)}else{var b=r?(i*y+u*v)/r:u;e(t,h,v,o,i,a,b),e(h,n,y,o,b,a,u)}}(0,u,e.value,t,n,r,o)}function lg(e,t,n,r,o){(1&e.depth?rg:zp)(e,t,n,r,o)}const cg=function e(t){function n(e,n,r,o,i){if((a=e._squarify)&&a.ratio===t)for(var a,s,u,l,c,f=-1,d=a.length,h=e.value;++f1?t:1)},n}(og);var fg=n(68079),dg=n(86160),hg=n(19804),pg=n(58172);function gg(e){var t=e.length;return function(n){return e[Math.max(0,Math.min(t-1,Math.floor(n*t)))]}}var vg=n(84709);function yg(e,t){var n=(0,vg.lG)(+e,+t);return function(e){var t=n(e);return t-360*Math.floor(t/360)}}var mg=n(10128),bg=n(34287),_g=n(49770);function wg(e){return((e=Math.exp(e))+1/e)/2}const xg=function e(t,n,r){function o(e,o){var i,a,s=e[0],u=e[1],l=e[2],c=o[0],f=o[1],d=o[2],h=c-s,p=f-u,g=h*h+p*p;if(g<1e-12)a=Math.log(d/l)/t,i=function(e){return[s+e*h,u+e*p,l*Math.exp(t*e*a)]};else{var v=Math.sqrt(g),y=(d*d-l*l+r*g)/(2*l*n*v),m=(d*d-l*l-r*g)/(2*d*n*v),b=Math.log(Math.sqrt(y*y+1)-y),_=Math.log(Math.sqrt(m*m+1)-m);a=(_-b)/t,i=function(e){var r=e*a,o=wg(b),i=l/(n*v)*(o*function(e){return((e=Math.exp(2*e))-1)/(e+1)}(t*r+b)-function(e){return((e=Math.exp(e))-1/e)/2}(b));return[s+i*h,u+i*p,l*o/wg(t*r+b)]}}return i.duration=1e3*a*t/Math.SQRT2,i}return o.rho=function(t){var n=Math.max(.001,+t),r=n*n;return e(n,r,r*r)},o}(Math.SQRT2,2,4);function Sg(e){return function(t,n){var r=e((t=(0,_r.KI)(t)).h,(n=(0,_r.KI)(n)).h),o=(0,vg.Ay)(t.s,n.s),i=(0,vg.Ay)(t.l,n.l),a=(0,vg.Ay)(t.opacity,n.opacity);return function(e){return t.h=r(e),t.s=o(e),t.l=i(e),t.opacity=a(e),t+""}}}const Ag=Sg(vg.lG);var Eg=Sg(vg.Ay);function Cg(e,t){var n=(0,vg.Ay)((e=mi(e)).l,(t=mi(t)).l),r=(0,vg.Ay)(e.a,t.a),o=(0,vg.Ay)(e.b,t.b),i=(0,vg.Ay)(e.opacity,t.opacity);return function(t){return e.l=n(t),e.a=r(t),e.b=o(t),e.opacity=i(t),e+""}}function Rg(e){return function(t,n){var r=e((t=Ci(t)).h,(n=Ci(n)).h),o=(0,vg.Ay)(t.c,n.c),i=(0,vg.Ay)(t.l,n.l),a=(0,vg.Ay)(t.opacity,n.opacity);return function(e){return t.h=r(e),t.c=o(e),t.l=i(e),t.opacity=a(e),t+""}}}const Og=Rg(vg.lG);var Tg=Rg(vg.Ay);function kg(e){return function t(n){function r(t,r){var o=e((t=Fi(t)).h,(r=Fi(r)).h),i=(0,vg.Ay)(t.s,r.s),a=(0,vg.Ay)(t.l,r.l),s=(0,vg.Ay)(t.opacity,r.opacity);return function(e){return t.h=o(e),t.s=i(e),t.l=a(Math.pow(e,n)),t.opacity=s(e),t+""}}return n=+n,r.gamma=t,r}(1)}const Mg=kg(vg.lG);var Pg=kg(vg.Ay),Ig=n(23104);function Ng(e,t){for(var n=new Array(t),r=0;r1&&Fg(e[n[o-2]],e[n[o-1]],e[r])<=0;)--o;n[o++]=r}return n.slice(0,o)}function zg(e){if((n=e.length)<3)return null;var t,n,r=new Array(n),o=new Array(n);for(t=0;t=0;--t)l.push(e[r[i[t]][2]]);for(t=+s;ts!==l>s&&a<(u-n)*(s-r)/(l-r)+n&&(c=!c),u=n,l=r;return c}function Vg(e){for(var t,n,r=-1,o=e.length,i=e[o-1],a=i[0],s=i[1],u=0;++r1);return e+n*i*Math.sqrt(-2*Math.log(o)/o)}}return n.source=e,n}($g),Gg=function e(t){var n=qg.source(t);function r(){var e=n.apply(this,arguments);return function(){return Math.exp(e())}}return r.source=e,r}($g),Yg=function e(t){function n(e){return(e=+e)<=0?()=>0:function(){for(var n=0,r=e;r>1;--r)n+=t();return n+r*t()}}return n.source=e,n}($g),Xg=function e(t){var n=Yg.source(t);function r(e){if(0===(e=+e))return t;var r=n(e);return function(){return r()/e}}return r.source=e,r}($g),Kg=function e(t){function n(e){return function(){return-Math.log1p(-t())/e}}return n.source=e,n}($g),Zg=function e(t){function n(e){if((e=+e)<0)throw new RangeError("invalid alpha");return e=1/-e,function(){return Math.pow(1-t(),e)}}return n.source=e,n}($g),Qg=function e(t){function n(e){if((e=+e)<0||e>1)throw new RangeError("invalid p");return function(){return Math.floor(t()+e)}}return n.source=e,n}($g),Jg=function e(t){function n(e){if((e=+e)<0||e>1)throw new RangeError("invalid p");return 0===e?()=>1/0:1===e?()=>1:(e=Math.log1p(-e),function(){return 1+Math.floor(Math.log1p(-t())/e)})}return n.source=e,n}($g),ev=function e(t){var n=qg.source(t)();function r(e,r){if((e=+e)<0)throw new RangeError("invalid k");if(0===e)return()=>0;if(r=null==r?1:+r,1===e)return()=>-Math.log1p(-t())*r;var o=(e<1?e+1:e)-1/3,i=1/(3*Math.sqrt(o)),a=e<1?()=>Math.pow(t(),1/e):()=>1;return function(){do{do{var e=n(),s=1+i*e}while(s<=0);s*=s*s;var u=1-t()}while(u>=1-.0331*e*e*e*e&&Math.log(u)>=.5*e*e+o*(1-s+Math.log(s)));return o*s*a()*r}}return r.source=e,r}($g),tv=function e(t){var n=ev.source(t);function r(e,t){var r=n(e),o=n(t);return function(){var e=r();return 0===e?0:e/(e+o())}}return r.source=e,r}($g),nv=function e(t){var n=Jg.source(t),r=tv.source(t);function o(e,t){return e=+e,(t=+t)>=1?()=>e:t<=0?()=>0:function(){for(var o=0,i=e,a=t;i*a>16&&i*(1-a)>16;){var s=Math.floor((i+1)*a),u=r(s,i-s+1)();u<=a?(o+=s,i-=s,a=(a-u)/(1-u)):(i=s-1,a/=u)}for(var l=a<.5,c=n(l?a:1-a),f=c(),d=0;f<=i;++d)f+=c();return o+(l?d:i-d)}}return o.source=e,o}($g),rv=function e(t){function n(e,n,r){var o;return 0===(e=+e)?o=e=>-Math.log(e):(e=1/e,o=t=>Math.pow(t,e)),n=null==n?0:+n,r=null==r?1:+r,function(){return n+r*o(-Math.log1p(-t()))}}return n.source=e,n}($g),ov=function e(t){function n(e,n){return e=null==e?0:+e,n=null==n?1:+n,function(){return e+n*Math.tan(Math.PI*t())}}return n.source=e,n}($g),iv=function e(t){function n(e,n){return e=null==e?0:+e,n=null==n?1:+n,function(){var r=t();return e+n*Math.log(r/(1-r))}}return n.source=e,n}($g),av=function e(t){var n=ev.source(t),r=nv.source(t);function o(e){return function(){for(var o=0,i=e;i>16;){var a=Math.floor(.875*i),s=n(a)();if(s>i)return o+r(a-1,i/s)();o+=a,i-=s}for(var u=-Math.log1p(-t()),l=0;u<=i;++l)u-=Math.log1p(-t());return o+l}}return o.source=e,o}($g),sv=1664525,uv=1013904223,lv=1/4294967296;function cv(e=Math.random()){let t=0|(0<=e&&e<1?e/lv:Math.abs(e));return()=>(t=sv*t+uv|0,lv*(t>>>0))}var fv=n(16199);function dv(e){for(var t=e.length/6|0,n=new Array(t),r=0;r(0,wr.Ik)(e[e.length-1]);var Ev=new Array(3).concat("d8b365f5f5f55ab4ac","a6611adfc27d80cdc1018571","a6611adfc27df5f5f580cdc1018571","8c510ad8b365f6e8c3c7eae55ab4ac01665e","8c510ad8b365f6e8c3f5f5f5c7eae55ab4ac01665e","8c510abf812ddfc27df6e8c3c7eae580cdc135978f01665e","8c510abf812ddfc27df6e8c3f5f5f5c7eae580cdc135978f01665e","5430058c510abf812ddfc27df6e8c3c7eae580cdc135978f01665e003c30","5430058c510abf812ddfc27df6e8c3f5f5f5c7eae580cdc135978f01665e003c30").map(dv);const Cv=Av(Ev);var Rv=new Array(3).concat("af8dc3f7f7f77fbf7b","7b3294c2a5cfa6dba0008837","7b3294c2a5cff7f7f7a6dba0008837","762a83af8dc3e7d4e8d9f0d37fbf7b1b7837","762a83af8dc3e7d4e8f7f7f7d9f0d37fbf7b1b7837","762a839970abc2a5cfe7d4e8d9f0d3a6dba05aae611b7837","762a839970abc2a5cfe7d4e8f7f7f7d9f0d3a6dba05aae611b7837","40004b762a839970abc2a5cfe7d4e8d9f0d3a6dba05aae611b783700441b","40004b762a839970abc2a5cfe7d4e8f7f7f7d9f0d3a6dba05aae611b783700441b").map(dv);const Ov=Av(Rv);var Tv=new Array(3).concat("e9a3c9f7f7f7a1d76a","d01c8bf1b6dab8e1864dac26","d01c8bf1b6daf7f7f7b8e1864dac26","c51b7de9a3c9fde0efe6f5d0a1d76a4d9221","c51b7de9a3c9fde0eff7f7f7e6f5d0a1d76a4d9221","c51b7dde77aef1b6dafde0efe6f5d0b8e1867fbc414d9221","c51b7dde77aef1b6dafde0eff7f7f7e6f5d0b8e1867fbc414d9221","8e0152c51b7dde77aef1b6dafde0efe6f5d0b8e1867fbc414d9221276419","8e0152c51b7dde77aef1b6dafde0eff7f7f7e6f5d0b8e1867fbc414d9221276419").map(dv);const kv=Av(Tv);var Mv=new Array(3).concat("998ec3f7f7f7f1a340","5e3c99b2abd2fdb863e66101","5e3c99b2abd2f7f7f7fdb863e66101","542788998ec3d8daebfee0b6f1a340b35806","542788998ec3d8daebf7f7f7fee0b6f1a340b35806","5427888073acb2abd2d8daebfee0b6fdb863e08214b35806","5427888073acb2abd2d8daebf7f7f7fee0b6fdb863e08214b35806","2d004b5427888073acb2abd2d8daebfee0b6fdb863e08214b358067f3b08","2d004b5427888073acb2abd2d8daebf7f7f7fee0b6fdb863e08214b358067f3b08").map(dv);const Pv=Av(Mv);var Iv=new Array(3).concat("ef8a62f7f7f767a9cf","ca0020f4a58292c5de0571b0","ca0020f4a582f7f7f792c5de0571b0","b2182bef8a62fddbc7d1e5f067a9cf2166ac","b2182bef8a62fddbc7f7f7f7d1e5f067a9cf2166ac","b2182bd6604df4a582fddbc7d1e5f092c5de4393c32166ac","b2182bd6604df4a582fddbc7f7f7f7d1e5f092c5de4393c32166ac","67001fb2182bd6604df4a582fddbc7d1e5f092c5de4393c32166ac053061","67001fb2182bd6604df4a582fddbc7f7f7f7d1e5f092c5de4393c32166ac053061").map(dv);const Nv=Av(Iv);var Dv=new Array(3).concat("ef8a62ffffff999999","ca0020f4a582bababa404040","ca0020f4a582ffffffbababa404040","b2182bef8a62fddbc7e0e0e09999994d4d4d","b2182bef8a62fddbc7ffffffe0e0e09999994d4d4d","b2182bd6604df4a582fddbc7e0e0e0bababa8787874d4d4d","b2182bd6604df4a582fddbc7ffffffe0e0e0bababa8787874d4d4d","67001fb2182bd6604df4a582fddbc7e0e0e0bababa8787874d4d4d1a1a1a","67001fb2182bd6604df4a582fddbc7ffffffe0e0e0bababa8787874d4d4d1a1a1a").map(dv);const Lv=Av(Dv);var Fv=new Array(3).concat("fc8d59ffffbf91bfdb","d7191cfdae61abd9e92c7bb6","d7191cfdae61ffffbfabd9e92c7bb6","d73027fc8d59fee090e0f3f891bfdb4575b4","d73027fc8d59fee090ffffbfe0f3f891bfdb4575b4","d73027f46d43fdae61fee090e0f3f8abd9e974add14575b4","d73027f46d43fdae61fee090ffffbfe0f3f8abd9e974add14575b4","a50026d73027f46d43fdae61fee090e0f3f8abd9e974add14575b4313695","a50026d73027f46d43fdae61fee090ffffbfe0f3f8abd9e974add14575b4313695").map(dv);const jv=Av(Fv);var Uv=new Array(3).concat("fc8d59ffffbf91cf60","d7191cfdae61a6d96a1a9641","d7191cfdae61ffffbfa6d96a1a9641","d73027fc8d59fee08bd9ef8b91cf601a9850","d73027fc8d59fee08bffffbfd9ef8b91cf601a9850","d73027f46d43fdae61fee08bd9ef8ba6d96a66bd631a9850","d73027f46d43fdae61fee08bffffbfd9ef8ba6d96a66bd631a9850","a50026d73027f46d43fdae61fee08bd9ef8ba6d96a66bd631a9850006837","a50026d73027f46d43fdae61fee08bffffbfd9ef8ba6d96a66bd631a9850006837").map(dv);const zv=Av(Uv);var Bv=new Array(3).concat("fc8d59ffffbf99d594","d7191cfdae61abdda42b83ba","d7191cfdae61ffffbfabdda42b83ba","d53e4ffc8d59fee08be6f59899d5943288bd","d53e4ffc8d59fee08bffffbfe6f59899d5943288bd","d53e4ff46d43fdae61fee08be6f598abdda466c2a53288bd","d53e4ff46d43fdae61fee08bffffbfe6f598abdda466c2a53288bd","9e0142d53e4ff46d43fdae61fee08be6f598abdda466c2a53288bd5e4fa2","9e0142d53e4ff46d43fdae61fee08bffffbfe6f598abdda466c2a53288bd5e4fa2").map(dv);const Vv=Av(Bv);var $v=new Array(3).concat("e5f5f999d8c92ca25f","edf8fbb2e2e266c2a4238b45","edf8fbb2e2e266c2a42ca25f006d2c","edf8fbccece699d8c966c2a42ca25f006d2c","edf8fbccece699d8c966c2a441ae76238b45005824","f7fcfde5f5f9ccece699d8c966c2a441ae76238b45005824","f7fcfde5f5f9ccece699d8c966c2a441ae76238b45006d2c00441b").map(dv);const Hv=Av($v);var Wv=new Array(3).concat("e0ecf49ebcda8856a7","edf8fbb3cde38c96c688419d","edf8fbb3cde38c96c68856a7810f7c","edf8fbbfd3e69ebcda8c96c68856a7810f7c","edf8fbbfd3e69ebcda8c96c68c6bb188419d6e016b","f7fcfde0ecf4bfd3e69ebcda8c96c68c6bb188419d6e016b","f7fcfde0ecf4bfd3e69ebcda8c96c68c6bb188419d810f7c4d004b").map(dv);const qv=Av(Wv);var Gv=new Array(3).concat("e0f3dba8ddb543a2ca","f0f9e8bae4bc7bccc42b8cbe","f0f9e8bae4bc7bccc443a2ca0868ac","f0f9e8ccebc5a8ddb57bccc443a2ca0868ac","f0f9e8ccebc5a8ddb57bccc44eb3d32b8cbe08589e","f7fcf0e0f3dbccebc5a8ddb57bccc44eb3d32b8cbe08589e","f7fcf0e0f3dbccebc5a8ddb57bccc44eb3d32b8cbe0868ac084081").map(dv);const Yv=Av(Gv);var Xv=new Array(3).concat("fee8c8fdbb84e34a33","fef0d9fdcc8afc8d59d7301f","fef0d9fdcc8afc8d59e34a33b30000","fef0d9fdd49efdbb84fc8d59e34a33b30000","fef0d9fdd49efdbb84fc8d59ef6548d7301f990000","fff7ecfee8c8fdd49efdbb84fc8d59ef6548d7301f990000","fff7ecfee8c8fdd49efdbb84fc8d59ef6548d7301fb300007f0000").map(dv);const Kv=Av(Xv);var Zv=new Array(3).concat("ece2f0a6bddb1c9099","f6eff7bdc9e167a9cf02818a","f6eff7bdc9e167a9cf1c9099016c59","f6eff7d0d1e6a6bddb67a9cf1c9099016c59","f6eff7d0d1e6a6bddb67a9cf3690c002818a016450","fff7fbece2f0d0d1e6a6bddb67a9cf3690c002818a016450","fff7fbece2f0d0d1e6a6bddb67a9cf3690c002818a016c59014636").map(dv);const Qv=Av(Zv);var Jv=new Array(3).concat("ece7f2a6bddb2b8cbe","f1eef6bdc9e174a9cf0570b0","f1eef6bdc9e174a9cf2b8cbe045a8d","f1eef6d0d1e6a6bddb74a9cf2b8cbe045a8d","f1eef6d0d1e6a6bddb74a9cf3690c00570b0034e7b","fff7fbece7f2d0d1e6a6bddb74a9cf3690c00570b0034e7b","fff7fbece7f2d0d1e6a6bddb74a9cf3690c00570b0045a8d023858").map(dv);const ey=Av(Jv);var ty=new Array(3).concat("e7e1efc994c7dd1c77","f1eef6d7b5d8df65b0ce1256","f1eef6d7b5d8df65b0dd1c77980043","f1eef6d4b9dac994c7df65b0dd1c77980043","f1eef6d4b9dac994c7df65b0e7298ace125691003f","f7f4f9e7e1efd4b9dac994c7df65b0e7298ace125691003f","f7f4f9e7e1efd4b9dac994c7df65b0e7298ace125698004367001f").map(dv);const ny=Av(ty);var ry=new Array(3).concat("fde0ddfa9fb5c51b8a","feebe2fbb4b9f768a1ae017e","feebe2fbb4b9f768a1c51b8a7a0177","feebe2fcc5c0fa9fb5f768a1c51b8a7a0177","feebe2fcc5c0fa9fb5f768a1dd3497ae017e7a0177","fff7f3fde0ddfcc5c0fa9fb5f768a1dd3497ae017e7a0177","fff7f3fde0ddfcc5c0fa9fb5f768a1dd3497ae017e7a017749006a").map(dv);const oy=Av(ry);var iy=new Array(3).concat("edf8b17fcdbb2c7fb8","ffffcca1dab441b6c4225ea8","ffffcca1dab441b6c42c7fb8253494","ffffccc7e9b47fcdbb41b6c42c7fb8253494","ffffccc7e9b47fcdbb41b6c41d91c0225ea80c2c84","ffffd9edf8b1c7e9b47fcdbb41b6c41d91c0225ea80c2c84","ffffd9edf8b1c7e9b47fcdbb41b6c41d91c0225ea8253494081d58").map(dv);const ay=Av(iy);var sy=new Array(3).concat("f7fcb9addd8e31a354","ffffccc2e69978c679238443","ffffccc2e69978c67931a354006837","ffffccd9f0a3addd8e78c67931a354006837","ffffccd9f0a3addd8e78c67941ab5d238443005a32","ffffe5f7fcb9d9f0a3addd8e78c67941ab5d238443005a32","ffffe5f7fcb9d9f0a3addd8e78c67941ab5d238443006837004529").map(dv);const uy=Av(sy);var ly=new Array(3).concat("fff7bcfec44fd95f0e","ffffd4fed98efe9929cc4c02","ffffd4fed98efe9929d95f0e993404","ffffd4fee391fec44ffe9929d95f0e993404","ffffd4fee391fec44ffe9929ec7014cc4c028c2d04","ffffe5fff7bcfee391fec44ffe9929ec7014cc4c028c2d04","ffffe5fff7bcfee391fec44ffe9929ec7014cc4c02993404662506").map(dv);const cy=Av(ly);var fy=new Array(3).concat("ffeda0feb24cf03b20","ffffb2fecc5cfd8d3ce31a1c","ffffb2fecc5cfd8d3cf03b20bd0026","ffffb2fed976feb24cfd8d3cf03b20bd0026","ffffb2fed976feb24cfd8d3cfc4e2ae31a1cb10026","ffffccffeda0fed976feb24cfd8d3cfc4e2ae31a1cb10026","ffffccffeda0fed976feb24cfd8d3cfc4e2ae31a1cbd0026800026").map(dv);const dy=Av(fy);var hy=new Array(3).concat("deebf79ecae13182bd","eff3ffbdd7e76baed62171b5","eff3ffbdd7e76baed63182bd08519c","eff3ffc6dbef9ecae16baed63182bd08519c","eff3ffc6dbef9ecae16baed64292c62171b5084594","f7fbffdeebf7c6dbef9ecae16baed64292c62171b5084594","f7fbffdeebf7c6dbef9ecae16baed64292c62171b508519c08306b").map(dv);const py=Av(hy);var gy=new Array(3).concat("e5f5e0a1d99b31a354","edf8e9bae4b374c476238b45","edf8e9bae4b374c47631a354006d2c","edf8e9c7e9c0a1d99b74c47631a354006d2c","edf8e9c7e9c0a1d99b74c47641ab5d238b45005a32","f7fcf5e5f5e0c7e9c0a1d99b74c47641ab5d238b45005a32","f7fcf5e5f5e0c7e9c0a1d99b74c47641ab5d238b45006d2c00441b").map(dv);const vy=Av(gy);var yy=new Array(3).concat("f0f0f0bdbdbd636363","f7f7f7cccccc969696525252","f7f7f7cccccc969696636363252525","f7f7f7d9d9d9bdbdbd969696636363252525","f7f7f7d9d9d9bdbdbd969696737373525252252525","fffffff0f0f0d9d9d9bdbdbd969696737373525252252525","fffffff0f0f0d9d9d9bdbdbd969696737373525252252525000000").map(dv);const my=Av(yy);var by=new Array(3).concat("efedf5bcbddc756bb1","f2f0f7cbc9e29e9ac86a51a3","f2f0f7cbc9e29e9ac8756bb154278f","f2f0f7dadaebbcbddc9e9ac8756bb154278f","f2f0f7dadaebbcbddc9e9ac8807dba6a51a34a1486","fcfbfdefedf5dadaebbcbddc9e9ac8807dba6a51a34a1486","fcfbfdefedf5dadaebbcbddc9e9ac8807dba6a51a354278f3f007d").map(dv);const _y=Av(by);var wy=new Array(3).concat("fee0d2fc9272de2d26","fee5d9fcae91fb6a4acb181d","fee5d9fcae91fb6a4ade2d26a50f15","fee5d9fcbba1fc9272fb6a4ade2d26a50f15","fee5d9fcbba1fc9272fb6a4aef3b2ccb181d99000d","fff5f0fee0d2fcbba1fc9272fb6a4aef3b2ccb181d99000d","fff5f0fee0d2fcbba1fc9272fb6a4aef3b2ccb181da50f1567000d").map(dv);const xy=Av(wy);var Sy=new Array(3).concat("fee6cefdae6be6550d","feeddefdbe85fd8d3cd94701","feeddefdbe85fd8d3ce6550da63603","feeddefdd0a2fdae6bfd8d3ce6550da63603","feeddefdd0a2fdae6bfd8d3cf16913d948018c2d04","fff5ebfee6cefdd0a2fdae6bfd8d3cf16913d948018c2d04","fff5ebfee6cefdd0a2fdae6bfd8d3cf16913d94801a636037f2704").map(dv);const Ay=Av(Sy);function Ey(e){return e=Math.max(0,Math.min(1,e)),"rgb("+Math.max(0,Math.min(255,Math.round(-4.54-e*(35.34-e*(2381.73-e*(6402.7-e*(7024.72-2710.57*e)))))))+", "+Math.max(0,Math.min(255,Math.round(32.49+e*(170.73+e*(52.82-e*(131.46-e*(176.58-67.37*e)))))))+", "+Math.max(0,Math.min(255,Math.round(81.24+e*(442.36-e*(2482.43-e*(6167.24-e*(6614.94-2475.67*e)))))))+")"}const Cy=Pg(Fi(300,.5,0),Fi(-240,.5,1));var Ry=Pg(Fi(-100,.75,.35),Fi(80,1.5,.8)),Oy=Pg(Fi(260,.75,.35),Fi(80,1.5,.8)),Ty=Fi();function ky(e){(e<0||e>1)&&(e-=Math.floor(e));var t=Math.abs(e-.5);return Ty.h=360*e-100,Ty.s=1.5-1.5*t,Ty.l=.8-.9*t,Ty+""}var My=(0,_r.Qh)(),Py=Math.PI/3,Iy=2*Math.PI/3;function Ny(e){var t;return e=(.5-e)*Math.PI,My.r=255*(t=Math.sin(e))*t,My.g=255*(t=Math.sin(e+Py))*t,My.b=255*(t=Math.sin(e+Iy))*t,My+""}function Dy(e){return e=Math.max(0,Math.min(1,e)),"rgb("+Math.max(0,Math.min(255,Math.round(34.61+e*(1172.33-e*(10793.56-e*(33300.12-e*(38394.49-14825.05*e)))))))+", "+Math.max(0,Math.min(255,Math.round(23.31+e*(557.33+e*(1225.33-e*(3574.96-e*(1073.77+707.56*e)))))))+", "+Math.max(0,Math.min(255,Math.round(27.2+e*(3211.1-e*(15327.97-e*(27814-e*(22569.18-6838.66*e)))))))+")"}function Ly(e){var t=e.length;return function(n){return e[Math.max(0,Math.min(t-1,Math.floor(n*t)))]}}const Fy=Ly(dv("44015444025645045745055946075a46085c460a5d460b5e470d60470e6147106347116447136548146748166848176948186a481a6c481b6d481c6e481d6f481f70482071482173482374482475482576482677482878482979472a7a472c7a472d7b472e7c472f7d46307e46327e46337f463480453581453781453882443983443a83443b84433d84433e85423f854240864241864142874144874045884046883f47883f48893e49893e4a893e4c8a3d4d8a3d4e8a3c4f8a3c508b3b518b3b528b3a538b3a548c39558c39568c38588c38598c375a8c375b8d365c8d365d8d355e8d355f8d34608d34618d33628d33638d32648e32658e31668e31678e31688e30698e306a8e2f6b8e2f6c8e2e6d8e2e6e8e2e6f8e2d708e2d718e2c718e2c728e2c738e2b748e2b758e2a768e2a778e2a788e29798e297a8e297b8e287c8e287d8e277e8e277f8e27808e26818e26828e26828e25838e25848e25858e24868e24878e23888e23898e238a8d228b8d228c8d228d8d218e8d218f8d21908d21918c20928c20928c20938c1f948c1f958b1f968b1f978b1f988b1f998a1f9a8a1e9b8a1e9c891e9d891f9e891f9f881fa0881fa1881fa1871fa28720a38620a48621a58521a68522a78522a88423a98324aa8325ab8225ac8226ad8127ad8128ae8029af7f2ab07f2cb17e2db27d2eb37c2fb47c31b57b32b67a34b67935b77937b87838b9773aba763bbb753dbc743fbc7340bd7242be7144bf7046c06f48c16e4ac16d4cc26c4ec36b50c46a52c56954c56856c66758c7655ac8645cc8635ec96260ca6063cb5f65cb5e67cc5c69cd5b6ccd5a6ece5870cf5773d05675d05477d1537ad1517cd2507fd34e81d34d84d44b86d54989d5488bd6468ed64590d74393d74195d84098d83e9bd93c9dd93ba0da39a2da37a5db36a8db34aadc32addc30b0dd2fb2dd2db5de2bb8de29bade28bddf26c0df25c2df23c5e021c8e020cae11fcde11dd0e11cd2e21bd5e21ad8e219dae319dde318dfe318e2e418e5e419e7e419eae51aece51befe51cf1e51df4e61ef6e620f8e621fbe723fde725"));var jy=Ly(dv("00000401000501010601010802010902020b02020d03030f03031204041405041606051806051a07061c08071e0907200a08220b09240c09260d0a290e0b2b100b2d110c2f120d31130d34140e36150e38160f3b180f3d19103f1a10421c10441d11471e114920114b21114e22115024125325125527125829115a2a115c2c115f2d11612f116331116533106734106936106b38106c390f6e3b0f703d0f713f0f72400f74420f75440f764510774710784910784a10794c117a4e117b4f127b51127c52137c54137d56147d57157e59157e5a167e5c167f5d177f5f187f601880621980641a80651a80671b80681c816a1c816b1d816d1d816e1e81701f81721f817320817521817621817822817922827b23827c23827e24828025828125818326818426818627818827818928818b29818c29818e2a81902a81912b81932b80942c80962c80982d80992d809b2e7f9c2e7f9e2f7fa02f7fa1307ea3307ea5317ea6317da8327daa337dab337cad347cae347bb0357bb2357bb3367ab5367ab73779b83779ba3878bc3978bd3977bf3a77c03a76c23b75c43c75c53c74c73d73c83e73ca3e72cc3f71cd4071cf4070d0416fd2426fd3436ed5446dd6456cd8456cd9466bdb476adc4869de4968df4a68e04c67e24d66e34e65e44f64e55064e75263e85362e95462ea5661eb5760ec5860ed5a5fee5b5eef5d5ef05f5ef1605df2625df2645cf3655cf4675cf4695cf56b5cf66c5cf66e5cf7705cf7725cf8745cf8765cf9785df9795df97b5dfa7d5efa7f5efa815ffb835ffb8560fb8761fc8961fc8a62fc8c63fc8e64fc9065fd9266fd9467fd9668fd9869fd9a6afd9b6bfe9d6cfe9f6dfea16efea36ffea571fea772fea973feaa74feac76feae77feb078feb27afeb47bfeb67cfeb77efeb97ffebb81febd82febf84fec185fec287fec488fec68afec88cfeca8dfecc8ffecd90fecf92fed194fed395fed597fed799fed89afdda9cfddc9efddea0fde0a1fde2a3fde3a5fde5a7fde7a9fde9aafdebacfcecaefceeb0fcf0b2fcf2b4fcf4b6fcf6b8fcf7b9fcf9bbfcfbbdfcfdbf")),Uy=Ly(dv("00000401000501010601010802010a02020c02020e03021004031204031405041706041907051b08051d09061f0a07220b07240c08260d08290e092b10092d110a30120a32140b34150b37160b39180c3c190c3e1b0c411c0c431e0c451f0c48210c4a230c4c240c4f260c51280b53290b552b0b572d0b592f0a5b310a5c320a5e340a5f3609613809623909633b09643d09653e0966400a67420a68440a68450a69470b6a490b6a4a0c6b4c0c6b4d0d6c4f0d6c510e6c520e6d540f6d550f6d57106e59106e5a116e5c126e5d126e5f136e61136e62146e64156e65156e67166e69166e6a176e6c186e6d186e6f196e71196e721a6e741a6e751b6e771c6d781c6d7a1d6d7c1d6d7d1e6d7f1e6c801f6c82206c84206b85216b87216b88226a8a226a8c23698d23698f24699025689225689326679526679727669827669a28659b29649d29649f2a63a02a63a22b62a32c61a52c60a62d60a82e5fa92e5eab2f5ead305dae305cb0315bb1325ab3325ab43359b63458b73557b93556ba3655bc3754bd3853bf3952c03a51c13a50c33b4fc43c4ec63d4dc73e4cc83f4bca404acb4149cc4248ce4347cf4446d04545d24644d34743d44842d54a41d74b3fd84c3ed94d3dda4e3cdb503bdd513ade5238df5337e05536e15635e25734e35933e45a31e55c30e65d2fe75e2ee8602de9612bea632aeb6429eb6628ec6726ed6925ee6a24ef6c23ef6e21f06f20f1711ff1731df2741cf3761bf37819f47918f57b17f57d15f67e14f68013f78212f78410f8850ff8870ef8890cf98b0bf98c0af98e09fa9008fa9207fa9407fb9606fb9706fb9906fb9b06fb9d07fc9f07fca108fca309fca50afca60cfca80dfcaa0ffcac11fcae12fcb014fcb216fcb418fbb61afbb81dfbba1ffbbc21fbbe23fac026fac228fac42afac62df9c72ff9c932f9cb35f8cd37f8cf3af7d13df7d340f6d543f6d746f5d949f5db4cf4dd4ff4df53f4e156f3e35af3e55df2e661f2e865f2ea69f1ec6df1ed71f1ef75f1f179f2f27df2f482f3f586f3f68af4f88ef5f992f6fa96f8fb9af9fc9dfafda1fcffa4")),zy=Ly(dv("0d088710078813078916078a19068c1b068d1d068e20068f2206902406912605912805922a05932c05942e05952f059631059733059735049837049938049a3a049a3c049b3e049c3f049c41049d43039e44039e46039f48039f4903a04b03a14c02a14e02a25002a25102a35302a35502a45601a45801a45901a55b01a55c01a65e01a66001a66100a76300a76400a76600a76700a86900a86a00a86c00a86e00a86f00a87100a87201a87401a87501a87701a87801a87a02a87b02a87d03a87e03a88004a88104a78305a78405a78606a68707a68808a68a09a58b0aa58d0ba58e0ca48f0da4910ea3920fa39410a29511a19613a19814a099159f9a169f9c179e9d189d9e199da01a9ca11b9ba21d9aa31e9aa51f99a62098a72197a82296aa2395ab2494ac2694ad2793ae2892b02991b12a90b22b8fb32c8eb42e8db52f8cb6308bb7318ab83289ba3388bb3488bc3587bd3786be3885bf3984c03a83c13b82c23c81c33d80c43e7fc5407ec6417dc7427cc8437bc9447aca457acb4679cc4778cc4977cd4a76ce4b75cf4c74d04d73d14e72d24f71d35171d45270d5536fd5546ed6556dd7566cd8576bd9586ada5a6ada5b69db5c68dc5d67dd5e66de5f65de6164df6263e06363e16462e26561e26660e3685fe4695ee56a5de56b5de66c5ce76e5be76f5ae87059e97158e97257ea7457eb7556eb7655ec7754ed7953ed7a52ee7b51ef7c51ef7e50f07f4ff0804ef1814df1834cf2844bf3854bf3874af48849f48948f58b47f58c46f68d45f68f44f79044f79143f79342f89441f89540f9973ff9983ef99a3efa9b3dfa9c3cfa9e3bfb9f3afba139fba238fca338fca537fca636fca835fca934fdab33fdac33fdae32fdaf31fdb130fdb22ffdb42ffdb52efeb72dfeb82cfeba2cfebb2bfebd2afebe2afec029fdc229fdc328fdc527fdc627fdc827fdca26fdcb26fccd25fcce25fcd025fcd225fbd324fbd524fbd724fad824fada24f9dc24f9dd25f8df25f8e125f7e225f7e425f6e626f6e826f5e926f5eb27f4ed27f3ee27f3f027f2f227f1f426f1f525f0f724f0f921"));function By(e){return bn(on(e).call(document.documentElement))}var Vy=0;function $y(){return new Hy}function Hy(){this._="@"+(++Vy).toString(36)}function Wy(e,t){return e.target&&(e=Rn(e),void 0===t&&(t=e.currentTarget),e=e.touches||[e]),Array.from(e,(e=>On(e,t)))}function qy(e){return"string"===typeof e?new vn([document.querySelectorAll(e)],[document.documentElement]):new vn([st(e)],gn)}function Gy(e){return function(){return e}}Hy.prototype=$y.prototype={constructor:Hy,get:function(e){for(var t=this._;!(t in e);)if(!(e=e.parentNode))return;return e[t]},set:function(e,t){return e[this._]=t},remove:function(e){return this._ in e&&delete e[this._]},toString:function(){return this._}};const Yy=Math.abs,Xy=Math.atan2,Ky=Math.cos,Zy=Math.max,Qy=Math.min,Jy=Math.sin,em=Math.sqrt,tm=1e-12,nm=Math.PI,rm=nm/2,om=2*nm;function im(e){return e>=1?rm:e<=-1?-rm:Math.asin(e)}function am(e){let t=3;return e.digits=function(n){if(!arguments.length)return t;if(null==n)t=null;else{const e=Math.floor(n);if(!(e>=0))throw new RangeError(`invalid digits: ${n}`);t=e}return e},()=>new Wo(t)}function sm(e){return e.innerRadius}function um(e){return e.outerRadius}function lm(e){return e.startAngle}function cm(e){return e.endAngle}function fm(e){return e&&e.padAngle}function dm(e,t,n,r,o,i,a){var s=e-n,u=t-r,l=(a?i:-i)/em(s*s+u*u),c=l*u,f=-l*s,d=e+c,h=t+f,p=n+c,g=r+f,v=(d+p)/2,y=(h+g)/2,m=p-d,b=g-h,_=m*m+b*b,w=o-i,x=d*g-p*h,S=(b<0?-1:1)*em(Zy(0,w*w*_-x*x)),A=(x*b-m*S)/_,E=(-x*m-b*S)/_,C=(x*b+m*S)/_,R=(-x*m+b*S)/_,O=A-v,T=E-y,k=C-v,M=R-y;return O*O+T*T>k*k+M*M&&(A=C,E=R),{cx:A,cy:E,x01:-c,y01:-f,x11:A*(o/w-1),y11:E*(o/w-1)}}function hm(){var e=sm,t=um,n=Gy(0),r=null,o=lm,i=cm,a=fm,s=null,u=am(l);function l(){var l,c,f=+e.apply(this,arguments),d=+t.apply(this,arguments),h=o.apply(this,arguments)-rm,p=i.apply(this,arguments)-rm,g=Yy(p-h),v=p>h;if(s||(s=l=u()),dtm)if(g>om-tm)s.moveTo(d*Ky(h),d*Jy(h)),s.arc(0,0,d,h,p,!v),f>tm&&(s.moveTo(f*Ky(p),f*Jy(p)),s.arc(0,0,f,p,h,v));else{var y,m,b=h,_=p,w=h,x=p,S=g,A=g,E=a.apply(this,arguments)/2,C=E>tm&&(r?+r.apply(this,arguments):em(f*f+d*d)),R=Qy(Yy(d-f)/2,+n.apply(this,arguments)),O=R,T=R;if(C>tm){var k=im(C/f*Jy(E)),M=im(C/d*Jy(E));(S-=2*k)>tm?(w+=k*=v?1:-1,x-=k):(S=0,w=x=(h+p)/2),(A-=2*M)>tm?(b+=M*=v?1:-1,_-=M):(A=0,b=_=(h+p)/2)}var P=d*Ky(b),I=d*Jy(b),N=f*Ky(x),D=f*Jy(x);if(R>tm){var L,F=d*Ky(_),j=d*Jy(_),U=f*Ky(w),z=f*Jy(w);if(g1?0:e<-1?nm:Math.acos(e)}((B*$+V*H)/(em(B*B+V*V)*em($*$+H*H)))/2),q=em(L[0]*L[0]+L[1]*L[1]);O=Qy(R,(f-q)/(W-1)),T=Qy(R,(d-q)/(W+1))}else O=T=0}A>tm?T>tm?(y=dm(U,z,P,I,d,T,v),m=dm(F,j,N,D,d,T,v),s.moveTo(y.cx+y.x01,y.cy+y.y01),Ttm&&S>tm?O>tm?(y=dm(N,D,F,j,f,-O,v),m=dm(P,I,U,z,f,-O,v),s.lineTo(y.cx+y.x01,y.cy+y.y01),O=f;--d)s.point(y[d],m[d]);s.lineEnd(),s.areaEnd()}v&&(y[c]=+e(h,c,l),m[c]=+t(h,c,l),s.point(r?+r(h,c,l):y[c],n?+n(h,c,l):m[c]))}if(p)return s=null,p+""||null}function c(){return _m().defined(o).curve(a).context(i)}return e="function"===typeof e?e:void 0===e?mm:Gy(+e),t="function"===typeof t?t:Gy(void 0===t?0:+t),n="function"===typeof n?n:void 0===n?bm:Gy(+n),l.x=function(t){return arguments.length?(e="function"===typeof t?t:Gy(+t),r=null,l):e},l.x0=function(t){return arguments.length?(e="function"===typeof t?t:Gy(+t),l):e},l.x1=function(e){return arguments.length?(r=null==e?null:"function"===typeof e?e:Gy(+e),l):r},l.y=function(e){return arguments.length?(t="function"===typeof e?e:Gy(+e),n=null,l):t},l.y0=function(e){return arguments.length?(t="function"===typeof e?e:Gy(+e),l):t},l.y1=function(e){return arguments.length?(n=null==e?null:"function"===typeof e?e:Gy(+e),l):n},l.lineX0=l.lineY0=function(){return c().x(e).y(t)},l.lineY1=function(){return c().x(e).y(n)},l.lineX1=function(){return c().x(r).y(t)},l.defined=function(e){return arguments.length?(o="function"===typeof e?e:Gy(!!e),l):o},l.curve=function(e){return arguments.length?(a=e,null!=i&&(s=a(i)),l):a},l.context=function(e){return arguments.length?(null==e?i=s=null:s=a(i=e),l):i},l}function xm(e,t){return te?1:t>=e?0:NaN}function Sm(e){return e}function Am(){var e=Sm,t=xm,n=null,r=Gy(0),o=Gy(om),i=Gy(0);function a(a){var s,u,l,c,f,d=(a=gm(a)).length,h=0,p=new Array(d),g=new Array(d),v=+r.apply(this,arguments),y=Math.min(om,Math.max(-om,o.apply(this,arguments)-v)),m=Math.min(Math.abs(y)/d,i.apply(this,arguments)),b=m*(y<0?-1:1);for(s=0;s0&&(h+=f);for(null!=t?p.sort((function(e,n){return t(g[e],g[n])})):null!=n&&p.sort((function(e,t){return n(a[e],a[t])})),s=0,l=h?(y-d*b)/h:0;s0?f*l:0)+b,g[u]={data:a[u],index:s,value:f,startAngle:v,endAngle:c,padAngle:m};return g}return a.value=function(t){return arguments.length?(e="function"===typeof t?t:Gy(+t),a):e},a.sortValues=function(e){return arguments.length?(t=e,n=null,a):t},a.sort=function(e){return arguments.length?(n=e,t=null,a):n},a.startAngle=function(e){return arguments.length?(r="function"===typeof e?e:Gy(+e),a):r},a.endAngle=function(e){return arguments.length?(o="function"===typeof e?e:Gy(+e),a):o},a.padAngle=function(e){return arguments.length?(i="function"===typeof e?e:Gy(+e),a):i},a}vm.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;default:this._context.lineTo(e,t)}}};var Em=Rm(ym);function Cm(e){this._curve=e}function Rm(e){function t(t){return new Cm(e(t))}return t._curve=e,t}function Om(e){var t=e.curve;return e.angle=e.x,delete e.x,e.radius=e.y,delete e.y,e.curve=function(e){return arguments.length?t(Rm(e)):t()._curve},e}function Tm(){return Om(_m().curve(Em))}function km(){var e=wm().curve(Em),t=e.curve,n=e.lineX0,r=e.lineX1,o=e.lineY0,i=e.lineY1;return e.angle=e.x,delete e.x,e.startAngle=e.x0,delete e.x0,e.endAngle=e.x1,delete e.x1,e.radius=e.y,delete e.y,e.innerRadius=e.y0,delete e.y0,e.outerRadius=e.y1,delete e.y1,e.lineStartAngle=function(){return Om(n())},delete e.lineX0,e.lineEndAngle=function(){return Om(r())},delete e.lineX1,e.lineInnerRadius=function(){return Om(o())},delete e.lineY0,e.lineOuterRadius=function(){return Om(i())},delete e.lineY1,e.curve=function(e){return arguments.length?t(Rm(e)):t()._curve},e}function Mm(e,t){return[(t=+t)*Math.cos(e-=Math.PI/2),t*Math.sin(e)]}Cm.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(e,t){this._curve.point(t*Math.sin(e),t*-Math.cos(e))}};class Pm{constructor(e,t){this._context=e,this._x=t}areaStart(){this._line=0}areaEnd(){this._line=NaN}lineStart(){this._point=0}lineEnd(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line}point(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;default:this._x?this._context.bezierCurveTo(this._x0=(this._x0+e)/2,this._y0,this._x0,t,e,t):this._context.bezierCurveTo(this._x0,this._y0=(this._y0+t)/2,e,this._y0,e,t)}this._x0=e,this._y0=t}}class Im{constructor(e){this._context=e}lineStart(){this._point=0}lineEnd(){}point(e,t){if(e=+e,t=+t,0===this._point)this._point=1;else{const n=Mm(this._x0,this._y0),r=Mm(this._x0,this._y0=(this._y0+t)/2),o=Mm(e,this._y0),i=Mm(e,t);this._context.moveTo(...n),this._context.bezierCurveTo(...r,...o,...i)}this._x0=e,this._y0=t}}function Nm(e){return new Pm(e,!0)}function Dm(e){return new Pm(e,!1)}function Lm(e){return new Im(e)}function Fm(e){return e.source}function jm(e){return e.target}function Um(e){let t=Fm,n=jm,r=mm,o=bm,i=null,a=null,s=am(u);function u(){let u;const l=pm.call(arguments),c=t.apply(this,l),f=n.apply(this,l);if(null==i&&(a=e(u=s())),a.lineStart(),l[0]=c,a.point(+r.apply(this,l),+o.apply(this,l)),l[0]=f,a.point(+r.apply(this,l),+o.apply(this,l)),a.lineEnd(),u)return a=null,u+""||null}return u.source=function(e){return arguments.length?(t=e,u):t},u.target=function(e){return arguments.length?(n=e,u):n},u.x=function(e){return arguments.length?(r="function"===typeof e?e:Gy(+e),u):r},u.y=function(e){return arguments.length?(o="function"===typeof e?e:Gy(+e),u):o},u.context=function(t){return arguments.length?(null==t?i=a=null:a=e(i=t),u):i},u}function zm(){return Um(Nm)}function Bm(){return Um(Dm)}function Vm(){const e=Um(Lm);return e.angle=e.x,delete e.x,e.radius=e.y,delete e.y,e}const $m=em(3),Hm={draw(e,t){const n=.59436*em(t+Qy(t/28,.75)),r=n/2,o=r*$m;e.moveTo(0,n),e.lineTo(0,-n),e.moveTo(-o,-r),e.lineTo(o,r),e.moveTo(-o,r),e.lineTo(o,-r)}},Wm={draw(e,t){const n=em(t/nm);e.moveTo(n,0),e.arc(0,0,n,0,om)}},qm={draw(e,t){const n=em(t/5)/2;e.moveTo(-3*n,-n),e.lineTo(-n,-n),e.lineTo(-n,-3*n),e.lineTo(n,-3*n),e.lineTo(n,-n),e.lineTo(3*n,-n),e.lineTo(3*n,n),e.lineTo(n,n),e.lineTo(n,3*n),e.lineTo(-n,3*n),e.lineTo(-n,n),e.lineTo(-3*n,n),e.closePath()}},Gm=em(1/3),Ym=2*Gm,Xm={draw(e,t){const n=em(t/Ym),r=n*Gm;e.moveTo(0,-n),e.lineTo(r,0),e.lineTo(0,n),e.lineTo(-r,0),e.closePath()}},Km={draw(e,t){const n=.62625*em(t);e.moveTo(0,-n),e.lineTo(n,0),e.lineTo(0,n),e.lineTo(-n,0),e.closePath()}},Zm={draw(e,t){const n=.87559*em(t-Qy(t/7,2));e.moveTo(-n,0),e.lineTo(n,0),e.moveTo(0,n),e.lineTo(0,-n)}},Qm={draw(e,t){const n=em(t),r=-n/2;e.rect(r,r,n,n)}},Jm={draw(e,t){const n=.4431*em(t);e.moveTo(n,n),e.lineTo(n,-n),e.lineTo(-n,-n),e.lineTo(-n,n),e.closePath()}},eb=Jy(nm/10)/Jy(7*nm/10),tb=Jy(om/10)*eb,nb=-Ky(om/10)*eb,rb={draw(e,t){const n=em(.8908130915292852*t),r=tb*n,o=nb*n;e.moveTo(0,-n),e.lineTo(r,o);for(let i=1;i<5;++i){const t=om*i/5,a=Ky(t),s=Jy(t);e.lineTo(s*n,-a*n),e.lineTo(a*r-s*o,s*r+a*o)}e.closePath()}},ob=em(3),ib={draw(e,t){const n=-em(t/(3*ob));e.moveTo(0,2*n),e.lineTo(-ob*n,-n),e.lineTo(ob*n,-n),e.closePath()}},ab=em(3),sb={draw(e,t){const n=.6824*em(t),r=n/2,o=n*ab/2;e.moveTo(0,-n),e.lineTo(o,r),e.lineTo(-o,r),e.closePath()}},ub=-.5,lb=em(3)/2,cb=1/em(12),fb=3*(cb/2+1),db={draw(e,t){const n=em(t/fb),r=n/2,o=n*cb,i=r,a=n*cb+n,s=-i,u=a;e.moveTo(r,o),e.lineTo(i,a),e.lineTo(s,u),e.lineTo(ub*r-lb*o,lb*r+ub*o),e.lineTo(ub*i-lb*a,lb*i+ub*a),e.lineTo(ub*s-lb*u,lb*s+ub*u),e.lineTo(ub*r+lb*o,ub*o-lb*r),e.lineTo(ub*i+lb*a,ub*a-lb*i),e.lineTo(ub*s+lb*u,ub*u-lb*s),e.closePath()}},hb={draw(e,t){const n=.6189*em(t-Qy(t/6,1.7));e.moveTo(-n,-n),e.lineTo(n,n),e.moveTo(-n,n),e.lineTo(n,-n)}},pb=[Wm,qm,Xm,Qm,rb,ib,db],gb=[Wm,Zm,hb,sb,Hm,Jm,Km];function vb(e,t){let n=null,r=am(o);function o(){let o;if(n||(n=o=r()),e.apply(this,arguments).draw(n,+t.apply(this,arguments)),o)return n=null,o+""||null}return e="function"===typeof e?e:Gy(e||Wm),t="function"===typeof t?t:Gy(void 0===t?64:+t),o.type=function(t){return arguments.length?(e="function"===typeof t?t:Gy(t),o):e},o.size=function(e){return arguments.length?(t="function"===typeof e?e:Gy(+e),o):t},o.context=function(e){return arguments.length?(n=null==e?null:e,o):n},o}function yb(){}function mb(e,t,n){e._context.bezierCurveTo((2*e._x0+e._x1)/3,(2*e._y0+e._y1)/3,(e._x0+2*e._x1)/3,(e._y0+2*e._y1)/3,(e._x0+4*e._x1+t)/6,(e._y0+4*e._y1+n)/6)}function bb(e){this._context=e}function _b(e){return new bb(e)}function wb(e){this._context=e}function xb(e){return new wb(e)}function Sb(e){this._context=e}function Ab(e){return new Sb(e)}function Eb(e,t){this._basis=new bb(e),this._beta=t}bb.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:mb(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:mb(this,e,t)}this._x0=this._x1,this._x1=e,this._y0=this._y1,this._y1=t}},wb.prototype={areaStart:yb,areaEnd:yb,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._x2=e,this._y2=t;break;case 1:this._point=2,this._x3=e,this._y3=t;break;case 2:this._point=3,this._x4=e,this._y4=t,this._context.moveTo((this._x0+4*this._x1+e)/6,(this._y0+4*this._y1+t)/6);break;default:mb(this,e,t)}this._x0=this._x1,this._x1=e,this._y0=this._y1,this._y1=t}},Sb.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var n=(this._x0+4*this._x1+e)/6,r=(this._y0+4*this._y1+t)/6;this._line?this._context.lineTo(n,r):this._context.moveTo(n,r);break;case 3:this._point=4;default:mb(this,e,t)}this._x0=this._x1,this._x1=e,this._y0=this._y1,this._y1=t}},Eb.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var e=this._x,t=this._y,n=e.length-1;if(n>0)for(var r,o=e[0],i=t[0],a=e[n]-o,s=t[n]-i,u=-1;++u<=n;)r=u/n,this._basis.point(this._beta*e[u]+(1-this._beta)*(o+r*a),this._beta*t[u]+(1-this._beta)*(i+r*s));this._x=this._y=null,this._basis.lineEnd()},point:function(e,t){this._x.push(+e),this._y.push(+t)}};const Cb=function e(t){function n(e){return 1===t?new bb(e):new Eb(e,t)}return n.beta=function(t){return e(+t)},n}(.85);function Rb(e,t,n){e._context.bezierCurveTo(e._x1+e._k*(e._x2-e._x0),e._y1+e._k*(e._y2-e._y0),e._x2+e._k*(e._x1-t),e._y2+e._k*(e._y1-n),e._x2,e._y2)}function Ob(e,t){this._context=e,this._k=(1-t)/6}Ob.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:Rb(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2,this._x1=e,this._y1=t;break;case 2:this._point=3;default:Rb(this,e,t)}this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};const Tb=function e(t){function n(e){return new Ob(e,t)}return n.tension=function(t){return e(+t)},n}(0);function kb(e,t){this._context=e,this._k=(1-t)/6}kb.prototype={areaStart:yb,areaEnd:yb,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._x3=e,this._y3=t;break;case 1:this._point=2,this._context.moveTo(this._x4=e,this._y4=t);break;case 2:this._point=3,this._x5=e,this._y5=t;break;default:Rb(this,e,t)}this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};const Mb=function e(t){function n(e){return new kb(e,t)}return n.tension=function(t){return e(+t)},n}(0);function Pb(e,t){this._context=e,this._k=(1-t)/6}Pb.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:Rb(this,e,t)}this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};const Ib=function e(t){function n(e){return new Pb(e,t)}return n.tension=function(t){return e(+t)},n}(0);function Nb(e,t,n){var r=e._x1,o=e._y1,i=e._x2,a=e._y2;if(e._l01_a>tm){var s=2*e._l01_2a+3*e._l01_a*e._l12_a+e._l12_2a,u=3*e._l01_a*(e._l01_a+e._l12_a);r=(r*s-e._x0*e._l12_2a+e._x2*e._l01_2a)/u,o=(o*s-e._y0*e._l12_2a+e._y2*e._l01_2a)/u}if(e._l23_a>tm){var l=2*e._l23_2a+3*e._l23_a*e._l12_a+e._l12_2a,c=3*e._l23_a*(e._l23_a+e._l12_a);i=(i*l+e._x1*e._l23_2a-t*e._l12_2a)/c,a=(a*l+e._y1*e._l23_2a-n*e._l12_2a)/c}e._context.bezierCurveTo(r,o,i,a,e._x2,e._y2)}function Db(e,t){this._context=e,this._alpha=t}Db.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){if(e=+e,t=+t,this._point){var n=this._x2-e,r=this._y2-t;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;break;case 2:this._point=3;default:Nb(this,e,t)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};const Lb=function e(t){function n(e){return t?new Db(e,t):new Ob(e,0)}return n.alpha=function(t){return e(+t)},n}(.5);function Fb(e,t){this._context=e,this._alpha=t}Fb.prototype={areaStart:yb,areaEnd:yb,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(e,t){if(e=+e,t=+t,this._point){var n=this._x2-e,r=this._y2-t;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=e,this._y3=t;break;case 1:this._point=2,this._context.moveTo(this._x4=e,this._y4=t);break;case 2:this._point=3,this._x5=e,this._y5=t;break;default:Nb(this,e,t)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};const jb=function e(t){function n(e){return t?new Fb(e,t):new kb(e,0)}return n.alpha=function(t){return e(+t)},n}(.5);function Ub(e,t){this._context=e,this._alpha=t}Ub.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){if(e=+e,t=+t,this._point){var n=this._x2-e,r=this._y2-t;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:Nb(this,e,t)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};const zb=function e(t){function n(e){return t?new Ub(e,t):new Pb(e,0)}return n.alpha=function(t){return e(+t)},n}(.5);function Bb(e){this._context=e}function Vb(e){return new Bb(e)}function $b(e){return e<0?-1:1}function Hb(e,t,n){var r=e._x1-e._x0,o=t-e._x1,i=(e._y1-e._y0)/(r||o<0&&-0),a=(n-e._y1)/(o||r<0&&-0),s=(i*o+a*r)/(r+o);return($b(i)+$b(a))*Math.min(Math.abs(i),Math.abs(a),.5*Math.abs(s))||0}function Wb(e,t){var n=e._x1-e._x0;return n?(3*(e._y1-e._y0)/n-t)/2:t}function qb(e,t,n){var r=e._x0,o=e._y0,i=e._x1,a=e._y1,s=(i-r)/3;e._context.bezierCurveTo(r+s,o+s*t,i-s,a-s*n,i,a)}function Gb(e){this._context=e}function Yb(e){this._context=new Xb(e)}function Xb(e){this._context=e}function Kb(e){return new Gb(e)}function Zb(e){return new Yb(e)}function Qb(e){this._context=e}function Jb(e){var t,n,r=e.length-1,o=new Array(r),i=new Array(r),a=new Array(r);for(o[0]=0,i[0]=2,a[0]=e[0]+2*e[1],t=1;t=0;--t)o[t]=(a[t]-o[t+1])/i[t];for(i[r-1]=(e[r]+o[r-1])/2,t=0;t1)for(var n,r,o,i=1,a=e[t[0]],s=a.length;i=0;)n[t]=t;return n}function s_(e,t){return e[t]}function u_(e){const t=[];return t.key=e,t}function l_(){var e=Gy([]),t=a_,n=i_,r=s_;function o(o){var i,a,s=Array.from(e.apply(this,arguments),u_),u=s.length,l=-1;for(const e of o)for(i=0,++l;i0){for(var n,r,o,i=0,a=e[0].length;i0)for(var n,r,o,i,a,s,u=0,l=e[t[0]].length;u0?(r[0]=i,r[1]=i+=o):o<0?(r[1]=a,r[0]=a+=o):(r[0]=0,r[1]=o)}function d_(e,t){if((n=e.length)>0){for(var n,r=0,o=e[t[0]],i=o.length;r0&&(r=(n=e[t[0]]).length)>0){for(var n,r,o,i=0,a=1;ai&&(i=t,r=n);return r}function v_(e){var t=e.map(y_);return a_(e).sort((function(e,n){return t[e]-t[n]}))}function y_(e){for(var t,n=0,r=-1,o=e.length;++r=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,t),this._context.lineTo(e,t);else{var n=this._x*(1-this._t)+e*this._t;this._context.lineTo(n,this._y),this._context.lineTo(n,t)}}this._x=e,this._y=t}};var w_=n(37591),x_=n(32613),S_=n(55030),A_=n(13298),E_=n(54612),C_=n(55334),R_=n(78926),O_=n(75878),T_=n(24355),k_=n(526),M_=n(53705),P_=n(64398),I_="%Y-%m-%dT%H:%M:%S.%LZ";var N_=Date.prototype.toISOString?function(e){return e.toISOString()}:(0,M_.aL)(I_);const D_=N_;var L_=+new Date("2000-01-01T00:00:00.000Z")?function(e){var t=new Date(e);return isNaN(t)?null:t}:(0,M_.GY)(I_);const F_=L_;function j_(e,t,n){var r=new Vn,o=t;return null==t?(r.restart(e,t,n),r):(r._restart=r.restart,r.restart=function(e,t,n){t=+t,n=null==n?zn():+n,r._restart((function i(a){a+=o,r._restart(i,o+=t,n),e(a)}),t,n)},r.restart(e,t,n),r)}const U_=e=>()=>e;function z_(e,{sourceEvent:t,target:n,transform:r,dispatch:o}){Object.defineProperties(this,{type:{value:e,enumerable:!0,configurable:!0},sourceEvent:{value:t,enumerable:!0,configurable:!0},target:{value:n,enumerable:!0,configurable:!0},transform:{value:r,enumerable:!0,configurable:!0},_:{value:o}})}function B_(e,t,n){this.k=e,this.x=t,this.y=n}B_.prototype={constructor:B_,scale:function(e){return 1===e?this:new B_(this.k*e,this.x,this.y)},translate:function(e,t){return 0===e&0===t?this:new B_(this.k,this.x+this.k*e,this.y+this.k*t)},apply:function(e){return[e[0]*this.k+this.x,e[1]*this.k+this.y]},applyX:function(e){return e*this.k+this.x},applyY:function(e){return e*this.k+this.y},invert:function(e){return[(e[0]-this.x)/this.k,(e[1]-this.y)/this.k]},invertX:function(e){return(e-this.x)/this.k},invertY:function(e){return(e-this.y)/this.k},rescaleX:function(e){return e.copy().domain(e.range().map(this.invertX,this).map(e.invert,e))},rescaleY:function(e){return e.copy().domain(e.range().map(this.invertY,this).map(e.invert,e))},toString:function(){return"translate("+this.x+","+this.y+") scale("+this.k+")"}};var V_=new B_(1,0,0);function $_(e){for(;!e.__zoom;)if(!(e=e.parentNode))return V_;return e.__zoom}function H_(e){e.stopImmediatePropagation()}function W_(e){e.preventDefault(),e.stopImmediatePropagation()}function q_(e){return(!e.ctrlKey||"wheel"===e.type)&&!e.button}function G_(){var e=this;return e instanceof SVGElement?(e=e.ownerSVGElement||e).hasAttribute("viewBox")?[[(e=e.viewBox.baseVal).x,e.y],[e.x+e.width,e.y+e.height]]:[[0,0],[e.width.baseVal.value,e.height.baseVal.value]]:[[0,0],[e.clientWidth,e.clientHeight]]}function Y_(){return this.__zoom||V_}function X_(e){return-e.deltaY*(1===e.deltaMode?.05:e.deltaMode?1:.002)*(e.ctrlKey?10:1)}function K_(){return navigator.maxTouchPoints||"ontouchstart"in this}function Z_(e,t,n){var r=e.invertX(t[0][0])-n[0][0],o=e.invertX(t[1][0])-n[1][0],i=e.invertY(t[0][1])-n[0][1],a=e.invertY(t[1][1])-n[1][1];return e.translate(o>r?(r+o)/2:Math.min(0,r)||Math.max(0,o),a>i?(i+a)/2:Math.min(0,i)||Math.max(0,a))}function Q_(){var e,t,n,r=q_,o=G_,i=Z_,a=X_,s=K_,u=[0,1/0],l=[[-1/0,-1/0],[1/0,1/0]],c=250,f=xg,d=ot("start","zoom","end"),h=500,p=150,g=0,v=10;function y(e){e.property("__zoom",Y_).on("wheel.zoom",A,{passive:!1}).on("mousedown.zoom",E).on("dblclick.zoom",C).filter(s).on("touchstart.zoom",R).on("touchmove.zoom",O).on("touchend.zoom touchcancel.zoom",T).style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function m(e,t){return(t=Math.max(u[0],Math.min(u[1],t)))===e.k?e:new B_(t,e.x,e.y)}function b(e,t,n){var r=t[0]-n[0]*e.k,o=t[1]-n[1]*e.k;return r===e.x&&o===e.y?e:new B_(e.k,r,o)}function _(e){return[(+e[0][0]+ +e[1][0])/2,(+e[0][1]+ +e[1][1])/2]}function w(e,t,n,r){e.on("start.zoom",(function(){x(this,arguments).event(r).start()})).on("interrupt.zoom end.zoom",(function(){x(this,arguments).event(r).end()})).tween("zoom",(function(){var e=this,i=arguments,a=x(e,i).event(r),s=o.apply(e,i),u=null==n?_(s):"function"===typeof n?n.apply(e,i):n,l=Math.max(s[1][0]-s[0][0],s[1][1]-s[0][1]),c=e.__zoom,d="function"===typeof t?t.apply(e,i):t,h=f(c.invert(u).concat(l/c.k),d.invert(u).concat(l/d.k));return function(e){if(1===e)e=d;else{var t=h(e),n=l/t[2];e=new B_(n,u[0]-t[0]*n,u[1]-t[1]*n)}a.zoom(null,e)}}))}function x(e,t,n){return!n&&e.__zooming||new S(e,t)}function S(e,t){this.that=e,this.args=t,this.active=0,this.sourceEvent=null,this.extent=o.apply(e,t),this.taps=0}function A(e,...t){if(r.apply(this,arguments)){var n=x(this,t).event(e),o=this.__zoom,s=Math.max(u[0],Math.min(u[1],o.k*Math.pow(2,a.apply(this,arguments)))),c=On(e);if(n.wheel)n.mouse[0][0]===c[0]&&n.mouse[0][1]===c[1]||(n.mouse[1]=o.invert(n.mouse[0]=c)),clearTimeout(n.wheel);else{if(o.k===s)return;n.mouse=[c,o.invert(c)],ur(this),n.start()}W_(e),n.wheel=setTimeout((function(){n.wheel=null,n.end()}),p),n.zoom("mouse",i(b(m(o,s),n.mouse[0],n.mouse[1]),n.extent,l))}}function E(e,...t){if(!n&&r.apply(this,arguments)){var o=e.currentTarget,a=x(this,t,!0).event(e),s=bn(e.view).on("mousemove.zoom",(function(e){if(W_(e),!a.moved){var t=e.clientX-c,n=e.clientY-f;a.moved=t*t+n*n>g}a.event(e).zoom("mouse",i(b(a.that.__zoom,a.mouse[0]=On(e,o),a.mouse[1]),a.extent,l))}),!0).on("mouseup.zoom",(function(e){s.on("mousemove.zoom mouseup.zoom",null),En(e.view,a.moved),W_(e),a.event(e).end()}),!0),u=On(e,o),c=e.clientX,f=e.clientY;An(e.view),H_(e),a.mouse=[u,this.__zoom.invert(u)],ur(this),a.start()}}function C(e,...t){if(r.apply(this,arguments)){var n=this.__zoom,a=On(e.changedTouches?e.changedTouches[0]:e,this),s=n.invert(a),u=n.k*(e.shiftKey?.5:2),f=i(b(m(n,u),a,s),o.apply(this,t),l);W_(e),c>0?bn(this).transition().duration(c).call(w,f,a,e):bn(this).call(y.transform,f,a,e)}}function R(n,...o){if(r.apply(this,arguments)){var i,a,s,u,l=n.touches,c=l.length,f=x(this,o,n.changedTouches.length===c).event(n);for(H_(n),a=0;a{"use strict";n.d(t,{q:()=>o});let r={};function o(){return r}},36847:(e,t,n)=>{"use strict";n.d(t,{m:()=>i});const r=(e,t)=>{switch(e){case"P":return t.date({width:"short"});case"PP":return t.date({width:"medium"});case"PPP":return t.date({width:"long"});default:return t.date({width:"full"})}},o=(e,t)=>{switch(e){case"p":return t.time({width:"short"});case"pp":return t.time({width:"medium"});case"ppp":return t.time({width:"long"});default:return t.time({width:"full"})}},i={p:o,P:(e,t)=>{const n=e.match(/(P+)(p+)?/)||[],i=n[1],a=n[2];if(!a)return r(e,t);let s;switch(i){case"P":s=t.dateTime({width:"short"});break;case"PP":s=t.dateTime({width:"medium"});break;case"PPP":s=t.dateTime({width:"long"});break;default:s=t.dateTime({width:"full"})}return s.replace("{{date}}",r(i,t)).replace("{{time}}",o(a,t))}}},40215:(e,t,n)=>{"use strict";n.d(t,{G:()=>o});var r=n(2642);function o(e){const t=(0,r.a)(e),n=new Date(Date.UTC(t.getFullYear(),t.getMonth(),t.getDate(),t.getHours(),t.getMinutes(),t.getSeconds(),t.getMilliseconds()));return n.setUTCFullYear(t.getFullYear()),+e-+n}},76992:(e,t,n)=>{"use strict";n.d(t,{Ss:()=>u,ef:()=>a,xM:()=>s});const r=/^D+$/,o=/^Y+$/,i=["D","DD","YY","YYYY"];function a(e){return r.test(e)}function s(e){return o.test(e)}function u(e,t,n){const r=function(e,t,n){const r="Y"===e[0]?"years":"days of the month";return`Use \`${e.toLowerCase()}\` instead of \`${e}\` (in \`${t}\`) for formatting ${r} to the input \`${n}\`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md`}(e,t,n);if(console.warn(r),i.includes(e))throw new RangeError(r)}},2940:(e,t,n)=>{"use strict";n.d(t,{Cg:()=>i,F6:()=>c,Ks:()=>u,Nw:()=>l,_m:()=>s,my:()=>r,s0:()=>a,w4:()=>o});Math.pow(10,8);const r=6048e5,o=864e5,i=6e4,a=36e5,s=1e3,u=525600,l=43200,c=1440},35840:(e,t,n)=>{"use strict";function r(e,t){return e instanceof Date?new e.constructor(t):new Date(t)}n.d(t,{w:()=>r})},20514:(e,t,n)=>{"use strict";n.d(t,{m:()=>a});var r=n(2940),o=n(51730),i=n(40215);function a(e,t){const n=(0,o.o)(e),a=(0,o.o)(t),s=+n-(0,i.G)(n),u=+a-(0,i.G)(a);return Math.round((s-u)/r.w4)}},87991:(e,t,n)=>{"use strict";n.d(t,{GP:()=>I});var r=n(96519),o=n(82695),i=n(20514),a=n(39204),s=n(2642);function u(e){const t=(0,s.a)(e);return(0,i.m)(t,(0,a.D)(t))+1}var l=n(76974),c=n(65961),f=n(50016),d=n(29220);function h(e,t){return(e<0?"-":"")+Math.abs(e).toString().padStart(t,"0")}const p={y(e,t){const n=e.getFullYear(),r=n>0?n:1-n;return h("yy"===t?r%100:r,t.length)},M(e,t){const n=e.getMonth();return"M"===t?String(n+1):h(n+1,2)},d:(e,t)=>h(e.getDate(),t.length),a(e,t){const n=e.getHours()/12>=1?"pm":"am";switch(t){case"a":case"aa":return n.toUpperCase();case"aaa":return n;case"aaaaa":return n[0];default:return"am"===n?"a.m.":"p.m."}},h:(e,t)=>h(e.getHours()%12||12,t.length),H:(e,t)=>h(e.getHours(),t.length),m:(e,t)=>h(e.getMinutes(),t.length),s:(e,t)=>h(e.getSeconds(),t.length),S(e,t){const n=t.length,r=e.getMilliseconds();return h(Math.trunc(r*Math.pow(10,n-3)),t.length)}},g="midnight",v="noon",y="morning",m="afternoon",b="evening",_="night",w={G:function(e,t,n){const r=e.getFullYear()>0?1:0;switch(t){case"G":case"GG":case"GGG":return n.era(r,{width:"abbreviated"});case"GGGGG":return n.era(r,{width:"narrow"});default:return n.era(r,{width:"wide"})}},y:function(e,t,n){if("yo"===t){const t=e.getFullYear(),r=t>0?t:1-t;return n.ordinalNumber(r,{unit:"year"})}return p.y(e,t)},Y:function(e,t,n,r){const o=(0,d.h)(e,r),i=o>0?o:1-o;if("YY"===t){return h(i%100,2)}return"Yo"===t?n.ordinalNumber(i,{unit:"year"}):h(i,t.length)},R:function(e,t){return h((0,c.p)(e),t.length)},u:function(e,t){return h(e.getFullYear(),t.length)},Q:function(e,t,n){const r=Math.ceil((e.getMonth()+1)/3);switch(t){case"Q":return String(r);case"QQ":return h(r,2);case"Qo":return n.ordinalNumber(r,{unit:"quarter"});case"QQQ":return n.quarter(r,{width:"abbreviated",context:"formatting"});case"QQQQQ":return n.quarter(r,{width:"narrow",context:"formatting"});default:return n.quarter(r,{width:"wide",context:"formatting"})}},q:function(e,t,n){const r=Math.ceil((e.getMonth()+1)/3);switch(t){case"q":return String(r);case"qq":return h(r,2);case"qo":return n.ordinalNumber(r,{unit:"quarter"});case"qqq":return n.quarter(r,{width:"abbreviated",context:"standalone"});case"qqqqq":return n.quarter(r,{width:"narrow",context:"standalone"});default:return n.quarter(r,{width:"wide",context:"standalone"})}},M:function(e,t,n){const r=e.getMonth();switch(t){case"M":case"MM":return p.M(e,t);case"Mo":return n.ordinalNumber(r+1,{unit:"month"});case"MMM":return n.month(r,{width:"abbreviated",context:"formatting"});case"MMMMM":return n.month(r,{width:"narrow",context:"formatting"});default:return n.month(r,{width:"wide",context:"formatting"})}},L:function(e,t,n){const r=e.getMonth();switch(t){case"L":return String(r+1);case"LL":return h(r+1,2);case"Lo":return n.ordinalNumber(r+1,{unit:"month"});case"LLL":return n.month(r,{width:"abbreviated",context:"standalone"});case"LLLLL":return n.month(r,{width:"narrow",context:"standalone"});default:return n.month(r,{width:"wide",context:"standalone"})}},w:function(e,t,n,r){const o=(0,f.N)(e,r);return"wo"===t?n.ordinalNumber(o,{unit:"week"}):h(o,t.length)},I:function(e,t,n){const r=(0,l.s)(e);return"Io"===t?n.ordinalNumber(r,{unit:"week"}):h(r,t.length)},d:function(e,t,n){return"do"===t?n.ordinalNumber(e.getDate(),{unit:"date"}):p.d(e,t)},D:function(e,t,n){const r=u(e);return"Do"===t?n.ordinalNumber(r,{unit:"dayOfYear"}):h(r,t.length)},E:function(e,t,n){const r=e.getDay();switch(t){case"E":case"EE":case"EEE":return n.day(r,{width:"abbreviated",context:"formatting"});case"EEEEE":return n.day(r,{width:"narrow",context:"formatting"});case"EEEEEE":return n.day(r,{width:"short",context:"formatting"});default:return n.day(r,{width:"wide",context:"formatting"})}},e:function(e,t,n,r){const o=e.getDay(),i=(o-r.weekStartsOn+8)%7||7;switch(t){case"e":return String(i);case"ee":return h(i,2);case"eo":return n.ordinalNumber(i,{unit:"day"});case"eee":return n.day(o,{width:"abbreviated",context:"formatting"});case"eeeee":return n.day(o,{width:"narrow",context:"formatting"});case"eeeeee":return n.day(o,{width:"short",context:"formatting"});default:return n.day(o,{width:"wide",context:"formatting"})}},c:function(e,t,n,r){const o=e.getDay(),i=(o-r.weekStartsOn+8)%7||7;switch(t){case"c":return String(i);case"cc":return h(i,t.length);case"co":return n.ordinalNumber(i,{unit:"day"});case"ccc":return n.day(o,{width:"abbreviated",context:"standalone"});case"ccccc":return n.day(o,{width:"narrow",context:"standalone"});case"cccccc":return n.day(o,{width:"short",context:"standalone"});default:return n.day(o,{width:"wide",context:"standalone"})}},i:function(e,t,n){const r=e.getDay(),o=0===r?7:r;switch(t){case"i":return String(o);case"ii":return h(o,t.length);case"io":return n.ordinalNumber(o,{unit:"day"});case"iii":return n.day(r,{width:"abbreviated",context:"formatting"});case"iiiii":return n.day(r,{width:"narrow",context:"formatting"});case"iiiiii":return n.day(r,{width:"short",context:"formatting"});default:return n.day(r,{width:"wide",context:"formatting"})}},a:function(e,t,n){const r=e.getHours()/12>=1?"pm":"am";switch(t){case"a":case"aa":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"});case"aaa":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"}).toLowerCase();case"aaaaa":return n.dayPeriod(r,{width:"narrow",context:"formatting"});default:return n.dayPeriod(r,{width:"wide",context:"formatting"})}},b:function(e,t,n){const r=e.getHours();let o;switch(o=12===r?v:0===r?g:r/12>=1?"pm":"am",t){case"b":case"bb":return n.dayPeriod(o,{width:"abbreviated",context:"formatting"});case"bbb":return n.dayPeriod(o,{width:"abbreviated",context:"formatting"}).toLowerCase();case"bbbbb":return n.dayPeriod(o,{width:"narrow",context:"formatting"});default:return n.dayPeriod(o,{width:"wide",context:"formatting"})}},B:function(e,t,n){const r=e.getHours();let o;switch(o=r>=17?b:r>=12?m:r>=4?y:_,t){case"B":case"BB":case"BBB":return n.dayPeriod(o,{width:"abbreviated",context:"formatting"});case"BBBBB":return n.dayPeriod(o,{width:"narrow",context:"formatting"});default:return n.dayPeriod(o,{width:"wide",context:"formatting"})}},h:function(e,t,n){if("ho"===t){let t=e.getHours()%12;return 0===t&&(t=12),n.ordinalNumber(t,{unit:"hour"})}return p.h(e,t)},H:function(e,t,n){return"Ho"===t?n.ordinalNumber(e.getHours(),{unit:"hour"}):p.H(e,t)},K:function(e,t,n){const r=e.getHours()%12;return"Ko"===t?n.ordinalNumber(r,{unit:"hour"}):h(r,t.length)},k:function(e,t,n){let r=e.getHours();return 0===r&&(r=24),"ko"===t?n.ordinalNumber(r,{unit:"hour"}):h(r,t.length)},m:function(e,t,n){return"mo"===t?n.ordinalNumber(e.getMinutes(),{unit:"minute"}):p.m(e,t)},s:function(e,t,n){return"so"===t?n.ordinalNumber(e.getSeconds(),{unit:"second"}):p.s(e,t)},S:function(e,t){return p.S(e,t)},X:function(e,t,n){const r=e.getTimezoneOffset();if(0===r)return"Z";switch(t){case"X":return S(r);case"XXXX":case"XX":return A(r);default:return A(r,":")}},x:function(e,t,n){const r=e.getTimezoneOffset();switch(t){case"x":return S(r);case"xxxx":case"xx":return A(r);default:return A(r,":")}},O:function(e,t,n){const r=e.getTimezoneOffset();switch(t){case"O":case"OO":case"OOO":return"GMT"+x(r,":");default:return"GMT"+A(r,":")}},z:function(e,t,n){const r=e.getTimezoneOffset();switch(t){case"z":case"zz":case"zzz":return"GMT"+x(r,":");default:return"GMT"+A(r,":")}},t:function(e,t,n){return h(Math.trunc(e.getTime()/1e3),t.length)},T:function(e,t,n){return h(e.getTime(),t.length)}};function x(e,t=""){const n=e>0?"-":"+",r=Math.abs(e),o=Math.trunc(r/60),i=r%60;return 0===i?n+String(o):n+String(o)+t+h(i,2)}function S(e,t){if(e%60===0){return(e>0?"-":"+")+h(Math.abs(e)/60,2)}return A(e,t)}function A(e,t=""){const n=e>0?"-":"+",r=Math.abs(e);return n+h(Math.trunc(r/60),2)+t+h(r%60,2)}var E=n(36847),C=n(76992),R=n(16074);const O=/[yYQqMLwIdDecihHKkms]o|(\w)\1*|''|'(''|[^'])+('|$)|./g,T=/P+p+|P+|p+|''|'(''|[^'])+('|$)|./g,k=/^'([^]*?)'?$/,M=/''/g,P=/[a-zA-Z]/;function I(e,t,n){const i=(0,o.q)(),a=n?.locale??i.locale??r.c,u=n?.firstWeekContainsDate??n?.locale?.options?.firstWeekContainsDate??i.firstWeekContainsDate??i.locale?.options?.firstWeekContainsDate??1,l=n?.weekStartsOn??n?.locale?.options?.weekStartsOn??i.weekStartsOn??i.locale?.options?.weekStartsOn??0,c=(0,s.a)(e);if(!(0,R.f)(c))throw new RangeError("Invalid time value");let f=t.match(T).map((e=>{const t=e[0];if("p"===t||"P"===t){return(0,E.m[t])(e,a.formatLong)}return e})).join("").match(O).map((e=>{if("''"===e)return{isToken:!1,value:"'"};const t=e[0];if("'"===t)return{isToken:!1,value:N(e)};if(w[t])return{isToken:!0,value:e};if(t.match(P))throw new RangeError("Format string contains an unescaped latin alphabet character `"+t+"`");return{isToken:!1,value:e}}));a.localize.preprocessor&&(f=a.localize.preprocessor(c,f));const d={firstWeekContainsDate:u,weekStartsOn:l,locale:a};return f.map((r=>{if(!r.isToken)return r.value;const o=r.value;(!n?.useAdditionalWeekYearTokens&&(0,C.xM)(o)||!n?.useAdditionalDayOfYearTokens&&(0,C.ef)(o))&&(0,C.Ss)(o,t,String(e));return(0,w[o[0]])(c,o,a.localize,d)})).join("")}function N(e){const t=e.match(k);return t?t[1].replace(M,"'"):e}},76974:(e,t,n)=>{"use strict";n.d(t,{s:()=>l});var r=n(2940),o=n(83787),i=n(65961),a=n(35840);function s(e){const t=(0,i.p)(e),n=(0,a.w)(e,0);return n.setFullYear(t,0,4),n.setHours(0,0,0,0),(0,o.b)(n)}var u=n(2642);function l(e){const t=(0,u.a)(e),n=+(0,o.b)(t)-+s(t);return Math.round(n/r.my)+1}},65961:(e,t,n)=>{"use strict";n.d(t,{p:()=>a});var r=n(35840),o=n(83787),i=n(2642);function a(e){const t=(0,i.a)(e),n=t.getFullYear(),a=(0,r.w)(e,0);a.setFullYear(n+1,0,4),a.setHours(0,0,0,0);const s=(0,o.b)(a),u=(0,r.w)(e,0);u.setFullYear(n,0,4),u.setHours(0,0,0,0);const l=(0,o.b)(u);return t.getTime()>=s.getTime()?n+1:t.getTime()>=l.getTime()?n:n-1}},50016:(e,t,n)=>{"use strict";n.d(t,{N:()=>c});var r=n(2940),o=n(92528),i=n(35840),a=n(29220),s=n(82695);function u(e,t){const n=(0,s.q)(),r=t?.firstWeekContainsDate??t?.locale?.options?.firstWeekContainsDate??n.firstWeekContainsDate??n.locale?.options?.firstWeekContainsDate??1,u=(0,a.h)(e,t),l=(0,i.w)(e,0);l.setFullYear(u,0,r),l.setHours(0,0,0,0);return(0,o.k)(l,t)}var l=n(2642);function c(e,t){const n=(0,l.a)(e),i=+(0,o.k)(n,t)-+u(n,t);return Math.round(i/r.my)+1}},29220:(e,t,n)=>{"use strict";n.d(t,{h:()=>s});var r=n(35840),o=n(92528),i=n(2642),a=n(82695);function s(e,t){const n=(0,i.a)(e),s=n.getFullYear(),u=(0,a.q)(),l=t?.firstWeekContainsDate??t?.locale?.options?.firstWeekContainsDate??u.firstWeekContainsDate??u.locale?.options?.firstWeekContainsDate??1,c=(0,r.w)(e,0);c.setFullYear(s+1,0,l),c.setHours(0,0,0,0);const f=(0,o.k)(c,t),d=(0,r.w)(e,0);d.setFullYear(s,0,l),d.setHours(0,0,0,0);const h=(0,o.k)(d,t);return n.getTime()>=f.getTime()?s+1:n.getTime()>=h.getTime()?s:s-1}},16074:(e,t,n)=>{"use strict";function r(e){return e instanceof Date||"object"===typeof e&&"[object Date]"===Object.prototype.toString.call(e)}n.d(t,{f:()=>i});var o=n(2642);function i(e){if(!r(e)&&"number"!==typeof e)return!1;const t=(0,o.a)(e);return!isNaN(Number(t))}},96519:(e,t,n)=>{"use strict";n.d(t,{c:()=>c});const r={lessThanXSeconds:{one:"less than a second",other:"less than {{count}} seconds"},xSeconds:{one:"1 second",other:"{{count}} seconds"},halfAMinute:"half a minute",lessThanXMinutes:{one:"less than a minute",other:"less than {{count}} minutes"},xMinutes:{one:"1 minute",other:"{{count}} minutes"},aboutXHours:{one:"about 1 hour",other:"about {{count}} hours"},xHours:{one:"1 hour",other:"{{count}} hours"},xDays:{one:"1 day",other:"{{count}} days"},aboutXWeeks:{one:"about 1 week",other:"about {{count}} weeks"},xWeeks:{one:"1 week",other:"{{count}} weeks"},aboutXMonths:{one:"about 1 month",other:"about {{count}} months"},xMonths:{one:"1 month",other:"{{count}} months"},aboutXYears:{one:"about 1 year",other:"about {{count}} years"},xYears:{one:"1 year",other:"{{count}} years"},overXYears:{one:"over 1 year",other:"over {{count}} years"},almostXYears:{one:"almost 1 year",other:"almost {{count}} years"}};function o(e){return(t={})=>{const n=t.width?String(t.width):e.defaultWidth;return e.formats[n]||e.formats[e.defaultWidth]}}const i={date:o({formats:{full:"EEEE, MMMM do, y",long:"MMMM do, y",medium:"MMM d, y",short:"MM/dd/yyyy"},defaultWidth:"full"}),time:o({formats:{full:"h:mm:ss a zzzz",long:"h:mm:ss a z",medium:"h:mm:ss a",short:"h:mm a"},defaultWidth:"full"}),dateTime:o({formats:{full:"{{date}} 'at' {{time}}",long:"{{date}} 'at' {{time}}",medium:"{{date}}, {{time}}",short:"{{date}}, {{time}}"},defaultWidth:"full"})},a={lastWeek:"'last' eeee 'at' p",yesterday:"'yesterday at' p",today:"'today at' p",tomorrow:"'tomorrow at' p",nextWeek:"eeee 'at' p",other:"P"};function s(e){return(t,n)=>{let r;if("formatting"===(n?.context?String(n.context):"standalone")&&e.formattingValues){const t=e.defaultFormattingWidth||e.defaultWidth,o=n?.width?String(n.width):t;r=e.formattingValues[o]||e.formattingValues[t]}else{const t=e.defaultWidth,o=n?.width?String(n.width):e.defaultWidth;r=e.values[o]||e.values[t]}return r[e.argumentCallback?e.argumentCallback(t):t]}}function u(e){return(t,n={})=>{const r=n.width,o=r&&e.matchPatterns[r]||e.matchPatterns[e.defaultMatchWidth],i=t.match(o);if(!i)return null;const a=i[0],s=r&&e.parsePatterns[r]||e.parsePatterns[e.defaultParseWidth],u=Array.isArray(s)?function(e,t){for(let n=0;ne.test(a))):function(e,t){for(const n in e)if(Object.prototype.hasOwnProperty.call(e,n)&&t(e[n]))return n;return}(s,(e=>e.test(a)));let l;l=e.valueCallback?e.valueCallback(u):u,l=n.valueCallback?n.valueCallback(l):l;return{value:l,rest:t.slice(a.length)}}}var l;const c={code:"en-US",formatDistance:(e,t,n)=>{let o;const i=r[e];return o="string"===typeof i?i:1===t?i.one:i.other.replace("{{count}}",t.toString()),n?.addSuffix?n.comparison&&n.comparison>0?"in "+o:o+" ago":o},formatLong:i,formatRelative:(e,t,n,r)=>a[e],localize:{ordinalNumber:(e,t)=>{const n=Number(e),r=n%100;if(r>20||r<10)switch(r%10){case 1:return n+"st";case 2:return n+"nd";case 3:return n+"rd"}return n+"th"},era:s({values:{narrow:["B","A"],abbreviated:["BC","AD"],wide:["Before Christ","Anno Domini"]},defaultWidth:"wide"}),quarter:s({values:{narrow:["1","2","3","4"],abbreviated:["Q1","Q2","Q3","Q4"],wide:["1st quarter","2nd quarter","3rd quarter","4th quarter"]},defaultWidth:"wide",argumentCallback:e=>e-1}),month:s({values:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],abbreviated:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],wide:["January","February","March","April","May","June","July","August","September","October","November","December"]},defaultWidth:"wide"}),day:s({values:{narrow:["S","M","T","W","T","F","S"],short:["Su","Mo","Tu","We","Th","Fr","Sa"],abbreviated:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],wide:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},defaultWidth:"wide"}),dayPeriod:s({values:{narrow:{am:"a",pm:"p",midnight:"mi",noon:"n",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"},abbreviated:{am:"AM",pm:"PM",midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"},wide:{am:"a.m.",pm:"p.m.",midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"}},defaultWidth:"wide",formattingValues:{narrow:{am:"a",pm:"p",midnight:"mi",noon:"n",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"},abbreviated:{am:"AM",pm:"PM",midnight:"midnight",noon:"noon",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"},wide:{am:"a.m.",pm:"p.m.",midnight:"midnight",noon:"noon",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"}},defaultFormattingWidth:"wide"})},match:{ordinalNumber:(l={matchPattern:/^(\d+)(th|st|nd|rd)?/i,parsePattern:/\d+/i,valueCallback:e=>parseInt(e,10)},(e,t={})=>{const n=e.match(l.matchPattern);if(!n)return null;const r=n[0],o=e.match(l.parsePattern);if(!o)return null;let i=l.valueCallback?l.valueCallback(o[0]):o[0];return i=t.valueCallback?t.valueCallback(i):i,{value:i,rest:e.slice(r.length)}}),era:u({matchPatterns:{narrow:/^(b|a)/i,abbreviated:/^(b\.?\s?c\.?|b\.?\s?c\.?\s?e\.?|a\.?\s?d\.?|c\.?\s?e\.?)/i,wide:/^(before christ|before common era|anno domini|common era)/i},defaultMatchWidth:"wide",parsePatterns:{any:[/^b/i,/^(a|c)/i]},defaultParseWidth:"any"}),quarter:u({matchPatterns:{narrow:/^[1234]/i,abbreviated:/^q[1234]/i,wide:/^[1234](th|st|nd|rd)? quarter/i},defaultMatchWidth:"wide",parsePatterns:{any:[/1/i,/2/i,/3/i,/4/i]},defaultParseWidth:"any",valueCallback:e=>e+1}),month:u({matchPatterns:{narrow:/^[jfmasond]/i,abbreviated:/^(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)/i,wide:/^(january|february|march|april|may|june|july|august|september|october|november|december)/i},defaultMatchWidth:"wide",parsePatterns:{narrow:[/^j/i,/^f/i,/^m/i,/^a/i,/^m/i,/^j/i,/^j/i,/^a/i,/^s/i,/^o/i,/^n/i,/^d/i],any:[/^ja/i,/^f/i,/^mar/i,/^ap/i,/^may/i,/^jun/i,/^jul/i,/^au/i,/^s/i,/^o/i,/^n/i,/^d/i]},defaultParseWidth:"any"}),day:u({matchPatterns:{narrow:/^[smtwf]/i,short:/^(su|mo|tu|we|th|fr|sa)/i,abbreviated:/^(sun|mon|tue|wed|thu|fri|sat)/i,wide:/^(sunday|monday|tuesday|wednesday|thursday|friday|saturday)/i},defaultMatchWidth:"wide",parsePatterns:{narrow:[/^s/i,/^m/i,/^t/i,/^w/i,/^t/i,/^f/i,/^s/i],any:[/^su/i,/^m/i,/^tu/i,/^w/i,/^th/i,/^f/i,/^sa/i]},defaultParseWidth:"any"}),dayPeriod:u({matchPatterns:{narrow:/^(a|p|mi|n|(in the|at) (morning|afternoon|evening|night))/i,any:/^([ap]\.?\s?m\.?|midnight|noon|(in the|at) (morning|afternoon|evening|night))/i},defaultMatchWidth:"any",parsePatterns:{any:{am:/^a/i,pm:/^p/i,midnight:/^mi/i,noon:/^no/i,morning:/morning/i,afternoon:/afternoon/i,evening:/evening/i,night:/night/i}},defaultParseWidth:"any"})},options:{weekStartsOn:0,firstWeekContainsDate:1}}},51730:(e,t,n)=>{"use strict";n.d(t,{o:()=>o});var r=n(2642);function o(e){const t=(0,r.a)(e);return t.setHours(0,0,0,0),t}},83787:(e,t,n)=>{"use strict";n.d(t,{b:()=>o});var r=n(92528);function o(e){return(0,r.k)(e,{weekStartsOn:1})}},92528:(e,t,n)=>{"use strict";n.d(t,{k:()=>i});var r=n(2642),o=n(82695);function i(e,t){const n=(0,o.q)(),i=t?.weekStartsOn??t?.locale?.options?.weekStartsOn??n.weekStartsOn??n.locale?.options?.weekStartsOn??0,a=(0,r.a)(e),s=a.getDay(),u=(s{"use strict";n.d(t,{D:()=>i});var r=n(2642),o=n(35840);function i(e){const t=(0,r.a)(e),n=(0,o.w)(e,0);return n.setFullYear(t.getFullYear(),0,1),n.setHours(0,0,0,0),n}},2642:(e,t,n)=>{"use strict";function r(e){const t=Object.prototype.toString.call(e);return e instanceof Date||"object"===typeof e&&"[object Date]"===t?new e.constructor(+e):"number"===typeof e||"[object Number]"===t||"string"===typeof e||"[object String]"===t?new Date(e):new Date(NaN)}n.d(t,{a:()=>r})},77783:(e,t,n)=>{"use strict";function r(e){return new Promise(((t,n)=>{e.oncomplete=e.onsuccess=()=>t(e.result),e.onabort=e.onerror=()=>n(e.error)}))}function o(e,t){const n=indexedDB.open(e);n.onupgradeneeded=()=>n.result.createObjectStore(t);const o=r(n);return(e,n)=>o.then((r=>n(r.transaction(t,e).objectStore(t))))}let i;function a(){return i||(i=o("keyval-store","keyval")),i}function s(e,t=a()){return t("readonly",(t=>r(t.get(e))))}function u(e,t,n=a()){return n("readwrite",(n=>(n.put(t,e),r(n.transaction))))}function l(e,t=a()){return t("readwrite",(t=>(t.delete(e),r(t.transaction))))}function c(e=a()){return e("readwrite",(e=>(e.clear(),r(e.transaction))))}n.d(t,{IU:()=>c,Jt:()=>s,hZ:()=>u,y$:()=>o,yH:()=>l})},24119:(e,t,n)=>{"use strict";n.d(t,{B:()=>r,v:()=>o});class r extends Map{constructor(e,t=u){if(super(),Object.defineProperties(this,{_intern:{value:new Map},_key:{value:t}}),null!=e)for(const[n,r]of e)this.set(n,r)}get(e){return super.get(i(this,e))}has(e){return super.has(i(this,e))}set(e,t){return super.set(a(this,e),t)}delete(e){return super.delete(s(this,e))}}class o extends Set{constructor(e,t=u){if(super(),Object.defineProperties(this,{_intern:{value:new Map},_key:{value:t}}),null!=e)for(const n of e)this.add(n)}has(e){return super.has(i(this,e))}add(e){return super.add(a(this,e))}delete(e){return super.delete(s(this,e))}}function i({_intern:e,_key:t},n){const r=t(n);return e.has(r)?e.get(r):n}function a({_intern:e,_key:t},n){const r=t(n);return e.has(r)?e.get(r):(e.set(r,n),n)}function s({_intern:e,_key:t},n){const r=t(n);return e.has(r)&&(n=e.get(r),e.delete(r)),n}function u(e){return null!==e&&"object"===typeof e?e.valueOf():e}},99571:(e,t,n)=>{"use strict";n.d(t,{N9:()=>A,oR:()=>N});var r=n(96540),o=n(20053);const i=e=>"number"==typeof e&&!isNaN(e),a=e=>"string"==typeof e,s=e=>"function"==typeof e,u=e=>a(e)||s(e)?e:null,l=e=>(0,r.isValidElement)(e)||a(e)||s(e)||i(e);function c(e){let{enter:t,exit:n,appendPosition:o=!1,collapse:i=!0,collapseDuration:a=300}=e;return function(e){let{children:s,position:u,preventExitTransition:l,done:c,nodeRef:f,isIn:d}=e;const h=o?`${t}--${u}`:t,p=o?`${n}--${u}`:n,g=(0,r.useRef)(0);return(0,r.useLayoutEffect)((()=>{const e=f.current,t=h.split(" "),n=r=>{r.target===f.current&&(e.dispatchEvent(new Event("d")),e.removeEventListener("animationend",n),e.removeEventListener("animationcancel",n),0===g.current&&"animationcancel"!==r.type&&e.classList.remove(...t))};e.classList.add(...t),e.addEventListener("animationend",n),e.addEventListener("animationcancel",n)}),[]),(0,r.useEffect)((()=>{const e=f.current,t=()=>{e.removeEventListener("animationend",t),i?function(e,t,n){void 0===n&&(n=300);const{scrollHeight:r,style:o}=e;requestAnimationFrame((()=>{o.minHeight="initial",o.height=r+"px",o.transition=`all ${n}ms`,requestAnimationFrame((()=>{o.height="0",o.padding="0",o.margin="0",setTimeout(t,n)}))}))}(e,c,a):c()};d||(l?t():(g.current=1,e.className+=` ${p}`,e.addEventListener("animationend",t)))}),[d]),r.createElement(r.Fragment,null,s)}}function f(e,t){return null!=e?{content:e.content,containerId:e.props.containerId,id:e.props.toastId,theme:e.props.theme,type:e.props.type,data:e.props.data||{},isLoading:e.props.isLoading,icon:e.props.icon,status:t}:{}}const d={list:new Map,emitQueue:new Map,on(e,t){return this.list.has(e)||this.list.set(e,[]),this.list.get(e).push(t),this},off(e,t){if(t){const n=this.list.get(e).filter((e=>e!==t));return this.list.set(e,n),this}return this.list.delete(e),this},cancelEmit(e){const t=this.emitQueue.get(e);return t&&(t.forEach(clearTimeout),this.emitQueue.delete(e)),this},emit(e){this.list.has(e)&&this.list.get(e).forEach((t=>{const n=setTimeout((()=>{t(...[].slice.call(arguments,1))}),0);this.emitQueue.has(e)||this.emitQueue.set(e,[]),this.emitQueue.get(e).push(n)}))}},h=e=>{let{theme:t,type:n,...o}=e;return r.createElement("svg",{viewBox:"0 0 24 24",width:"100%",height:"100%",fill:"colored"===t?"currentColor":`var(--toastify-icon-color-${n})`,...o})},p={info:function(e){return r.createElement(h,{...e},r.createElement("path",{d:"M12 0a12 12 0 1012 12A12.013 12.013 0 0012 0zm.25 5a1.5 1.5 0 11-1.5 1.5 1.5 1.5 0 011.5-1.5zm2.25 13.5h-4a1 1 0 010-2h.75a.25.25 0 00.25-.25v-4.5a.25.25 0 00-.25-.25h-.75a1 1 0 010-2h1a2 2 0 012 2v4.75a.25.25 0 00.25.25h.75a1 1 0 110 2z"}))},warning:function(e){return r.createElement(h,{...e},r.createElement("path",{d:"M23.32 17.191L15.438 2.184C14.728.833 13.416 0 11.996 0c-1.42 0-2.733.833-3.443 2.184L.533 17.448a4.744 4.744 0 000 4.368C1.243 23.167 2.555 24 3.975 24h16.05C22.22 24 24 22.044 24 19.632c0-.904-.251-1.746-.68-2.44zm-9.622 1.46c0 1.033-.724 1.823-1.698 1.823s-1.698-.79-1.698-1.822v-.043c0-1.028.724-1.822 1.698-1.822s1.698.79 1.698 1.822v.043zm.039-12.285l-.84 8.06c-.057.581-.408.943-.897.943-.49 0-.84-.367-.896-.942l-.84-8.065c-.057-.624.25-1.095.779-1.095h1.91c.528.005.84.476.784 1.1z"}))},success:function(e){return r.createElement(h,{...e},r.createElement("path",{d:"M12 0a12 12 0 1012 12A12.014 12.014 0 0012 0zm6.927 8.2l-6.845 9.289a1.011 1.011 0 01-1.43.188l-4.888-3.908a1 1 0 111.25-1.562l4.076 3.261 6.227-8.451a1 1 0 111.61 1.183z"}))},error:function(e){return r.createElement(h,{...e},r.createElement("path",{d:"M11.983 0a12.206 12.206 0 00-8.51 3.653A11.8 11.8 0 000 12.207 11.779 11.779 0 0011.8 24h.214A12.111 12.111 0 0024 11.791 11.766 11.766 0 0011.983 0zM10.5 16.542a1.476 1.476 0 011.449-1.53h.027a1.527 1.527 0 011.523 1.47 1.475 1.475 0 01-1.449 1.53h-.027a1.529 1.529 0 01-1.523-1.47zM11 12.5v-6a1 1 0 012 0v6a1 1 0 11-2 0z"}))},spinner:function(){return r.createElement("div",{className:"Toastify__spinner"})}};function g(e){const[,t]=(0,r.useReducer)((e=>e+1),0),[n,o]=(0,r.useState)([]),c=(0,r.useRef)(null),h=(0,r.useRef)(new Map).current,g=e=>-1!==n.indexOf(e),v=(0,r.useRef)({toastKey:1,displayedToast:0,count:0,queue:[],props:e,containerId:null,isToastActive:g,getToast:e=>h.get(e)}).current;function y(e){let{containerId:t}=e;const{limit:n}=v.props;!n||t&&v.containerId!==t||(v.count-=v.queue.length,v.queue=[])}function m(e){o((t=>null==e?[]:t.filter((t=>t!==e))))}function b(){const{toastContent:e,toastProps:t,staleId:n}=v.queue.shift();w(e,t,n)}function _(e,n){let{delay:o,staleId:g,...y}=n;if(!l(e)||function(e){return!c.current||v.props.enableMultiContainer&&e.containerId!==v.props.containerId||h.has(e.toastId)&&null==e.updateId}(y))return;const{toastId:_,updateId:x,data:S}=y,{props:A}=v,E=()=>m(_),C=null==x;C&&v.count++;const R={...A,style:A.toastStyle,key:v.toastKey++,...Object.fromEntries(Object.entries(y).filter((e=>{let[t,n]=e;return null!=n}))),toastId:_,updateId:x,data:S,closeToast:E,isIn:!1,className:u(y.className||A.toastClassName),bodyClassName:u(y.bodyClassName||A.bodyClassName),progressClassName:u(y.progressClassName||A.progressClassName),autoClose:!y.isLoading&&(O=y.autoClose,T=A.autoClose,!1===O||i(O)&&O>0?O:T),deleteToast(){const e=f(h.get(_),"removed");h.delete(_),d.emit(4,e);const n=v.queue.length;if(v.count=null==_?v.count-v.displayedToast:v.count-1,v.count<0&&(v.count=0),n>0){const e=null==_?v.props.limit:1;if(1===n||1===e)v.displayedToast++,b();else{const t=e>n?n:e;v.displayedToast=t;for(let e=0;ee in p)(n)&&(l=p[n](c))),l}(R),s(y.onOpen)&&(R.onOpen=y.onOpen),s(y.onClose)&&(R.onClose=y.onClose),R.closeButton=A.closeButton,!1===y.closeButton||l(y.closeButton)?R.closeButton=y.closeButton:!0===y.closeButton&&(R.closeButton=!l(A.closeButton)||A.closeButton);let k=e;(0,r.isValidElement)(e)&&!a(e.type)?k=(0,r.cloneElement)(e,{closeToast:E,toastProps:R,data:S}):s(e)&&(k=e({closeToast:E,toastProps:R,data:S})),A.limit&&A.limit>0&&v.count>A.limit&&C?v.queue.push({toastContent:k,toastProps:R,staleId:g}):i(o)?setTimeout((()=>{w(k,R,g)}),o):w(k,R,g)}function w(e,t,n){const{toastId:r}=t;n&&h.delete(n);const i={content:e,props:t};h.set(r,i),o((e=>[...e,r].filter((e=>e!==n)))),d.emit(4,f(i,null==i.props.updateId?"added":"updated"))}return(0,r.useEffect)((()=>(v.containerId=e.containerId,d.cancelEmit(3).on(0,_).on(1,(e=>c.current&&m(e))).on(5,y).emit(2,v),()=>{h.clear(),d.emit(3,v)})),[]),(0,r.useEffect)((()=>{v.props=e,v.isToastActive=g,v.displayedToast=n.length})),{getToastToRender:function(t){const n=new Map,r=Array.from(h.values());return e.newestOnTop&&r.reverse(),r.forEach((e=>{const{position:t}=e.props;n.has(t)||n.set(t,[]),n.get(t).push(e)})),Array.from(n,(e=>t(e[0],e[1])))},containerRef:c,isToastActive:g}}function v(e){return e.targetTouches&&e.targetTouches.length>=1?e.targetTouches[0].clientX:e.clientX}function y(e){return e.targetTouches&&e.targetTouches.length>=1?e.targetTouches[0].clientY:e.clientY}function m(e){const[t,n]=(0,r.useState)(!1),[o,i]=(0,r.useState)(!1),a=(0,r.useRef)(null),u=(0,r.useRef)({start:0,x:0,y:0,delta:0,removalDistance:0,canCloseOnClick:!0,canDrag:!1,boundingRect:null,didMove:!1}).current,l=(0,r.useRef)(e),{autoClose:c,pauseOnHover:f,closeToast:d,onClick:h,closeOnClick:p}=e;function g(t){if(e.draggable){"touchstart"===t.nativeEvent.type&&t.nativeEvent.preventDefault(),u.didMove=!1,document.addEventListener("mousemove",w),document.addEventListener("mouseup",x),document.addEventListener("touchmove",w),document.addEventListener("touchend",x);const n=a.current;u.canCloseOnClick=!0,u.canDrag=!0,u.boundingRect=n.getBoundingClientRect(),n.style.transition="",u.x=v(t.nativeEvent),u.y=y(t.nativeEvent),"x"===e.draggableDirection?(u.start=u.x,u.removalDistance=n.offsetWidth*(e.draggablePercent/100)):(u.start=u.y,u.removalDistance=n.offsetHeight*(80===e.draggablePercent?1.5*e.draggablePercent:e.draggablePercent/100))}}function m(t){if(u.boundingRect){const{top:n,bottom:r,left:o,right:i}=u.boundingRect;"touchend"!==t.nativeEvent.type&&e.pauseOnHover&&u.x>=o&&u.x<=i&&u.y>=n&&u.y<=r?_():b()}}function b(){n(!0)}function _(){n(!1)}function w(n){const r=a.current;u.canDrag&&r&&(u.didMove=!0,t&&_(),u.x=v(n),u.y=y(n),u.delta="x"===e.draggableDirection?u.x-u.start:u.y-u.start,u.start!==u.x&&(u.canCloseOnClick=!1),r.style.transform=`translate${e.draggableDirection}(${u.delta}px)`,r.style.opacity=""+(1-Math.abs(u.delta/u.removalDistance)))}function x(){document.removeEventListener("mousemove",w),document.removeEventListener("mouseup",x),document.removeEventListener("touchmove",w),document.removeEventListener("touchend",x);const t=a.current;if(u.canDrag&&u.didMove&&t){if(u.canDrag=!1,Math.abs(u.delta)>u.removalDistance)return i(!0),void e.closeToast();t.style.transition="transform 0.2s, opacity 0.2s",t.style.transform=`translate${e.draggableDirection}(0)`,t.style.opacity="1"}}(0,r.useEffect)((()=>{l.current=e})),(0,r.useEffect)((()=>(a.current&&a.current.addEventListener("d",b,{once:!0}),s(e.onOpen)&&e.onOpen((0,r.isValidElement)(e.children)&&e.children.props),()=>{const e=l.current;s(e.onClose)&&e.onClose((0,r.isValidElement)(e.children)&&e.children.props)})),[]),(0,r.useEffect)((()=>(e.pauseOnFocusLoss&&(document.hasFocus()||_(),window.addEventListener("focus",b),window.addEventListener("blur",_)),()=>{e.pauseOnFocusLoss&&(window.removeEventListener("focus",b),window.removeEventListener("blur",_))})),[e.pauseOnFocusLoss]);const S={onMouseDown:g,onTouchStart:g,onMouseUp:m,onTouchEnd:m};return c&&f&&(S.onMouseEnter=_,S.onMouseLeave=b),p&&(S.onClick=e=>{h&&h(e),u.canCloseOnClick&&d()}),{playToast:b,pauseToast:_,isRunning:t,preventExitTransition:o,toastRef:a,eventHandlers:S}}function b(e){let{closeToast:t,theme:n,ariaLabel:o="close"}=e;return r.createElement("button",{className:`Toastify__close-button Toastify__close-button--${n}`,type:"button",onClick:e=>{e.stopPropagation(),t(e)},"aria-label":o},r.createElement("svg",{"aria-hidden":"true",viewBox:"0 0 14 16"},r.createElement("path",{fillRule:"evenodd",d:"M7.71 8.23l3.75 3.75-1.48 1.48-3.75-3.75-3.75 3.75L1 11.98l3.75-3.75L1 4.48 2.48 3l3.75 3.75L9.98 3l1.48 1.48-3.75 3.75z"})))}function _(e){let{delay:t,isRunning:n,closeToast:i,type:a="default",hide:u,className:l,style:c,controlledProgress:f,progress:d,rtl:h,isIn:p,theme:g}=e;const v=u||f&&0===d,y={...c,animationDuration:`${t}ms`,animationPlayState:n?"running":"paused",opacity:v?0:1};f&&(y.transform=`scaleX(${d})`);const m=(0,o.default)("Toastify__progress-bar",f?"Toastify__progress-bar--controlled":"Toastify__progress-bar--animated",`Toastify__progress-bar-theme--${g}`,`Toastify__progress-bar--${a}`,{"Toastify__progress-bar--rtl":h}),b=s(l)?l({rtl:h,type:a,defaultClassName:m}):(0,o.default)(m,l);return r.createElement("div",{role:"progressbar","aria-hidden":v?"true":"false","aria-label":"notification timer",className:b,style:y,[f&&d>=1?"onTransitionEnd":"onAnimationEnd"]:f&&d<1?null:()=>{p&&i()}})}const w=e=>{const{isRunning:t,preventExitTransition:n,toastRef:i,eventHandlers:a}=m(e),{closeButton:u,children:l,autoClose:c,onClick:f,type:d,hideProgressBar:h,closeToast:p,transition:g,position:v,className:y,style:w,bodyClassName:x,bodyStyle:S,progressClassName:A,progressStyle:E,updateId:C,role:R,progress:O,rtl:T,toastId:k,deleteToast:M,isIn:P,isLoading:I,iconOut:N,closeOnClick:D,theme:L}=e,F=(0,o.default)("Toastify__toast",`Toastify__toast-theme--${L}`,`Toastify__toast--${d}`,{"Toastify__toast--rtl":T},{"Toastify__toast--close-on-click":D}),j=s(y)?y({rtl:T,position:v,type:d,defaultClassName:F}):(0,o.default)(F,y),U=!!O||!c,z={closeToast:p,type:d,theme:L};let B=null;return!1===u||(B=s(u)?u(z):(0,r.isValidElement)(u)?(0,r.cloneElement)(u,z):b(z)),r.createElement(g,{isIn:P,done:M,position:v,preventExitTransition:n,nodeRef:i},r.createElement("div",{id:k,onClick:f,className:j,...a,style:w,ref:i},r.createElement("div",{...P&&{role:R},className:s(x)?x({type:d}):(0,o.default)("Toastify__toast-body",x),style:S},null!=N&&r.createElement("div",{className:(0,o.default)("Toastify__toast-icon",{"Toastify--animate-icon Toastify__zoom-enter":!I})},N),r.createElement("div",null,l)),B,r.createElement(_,{...C&&!U?{key:`pb-${C}`}:{},rtl:T,theme:L,delay:c,isRunning:t,isIn:P,closeToast:p,hide:h,type:d,style:E,className:A,controlledProgress:U,progress:O||0})))},x=function(e,t){return void 0===t&&(t=!1),{enter:`Toastify--animate Toastify__${e}-enter`,exit:`Toastify--animate Toastify__${e}-exit`,appendPosition:t}},S=c(x("bounce",!0)),A=(c(x("slide",!0)),c(x("zoom")),c(x("flip")),(0,r.forwardRef)(((e,t)=>{const{getToastToRender:n,containerRef:i,isToastActive:a}=g(e),{className:l,style:c,rtl:f,containerId:d}=e;function h(e){const t=(0,o.default)("Toastify__toast-container",`Toastify__toast-container--${e}`,{"Toastify__toast-container--rtl":f});return s(l)?l({position:e,rtl:f,defaultClassName:t}):(0,o.default)(t,u(l))}return(0,r.useEffect)((()=>{t&&(t.current=i.current)}),[]),r.createElement("div",{ref:i,className:"Toastify",id:d},n(((e,t)=>{const n=t.length?{...c}:{...c,pointerEvents:"none"};return r.createElement("div",{className:h(e),style:n,key:`container-${e}`},t.map(((e,n)=>{let{content:o,props:i}=e;return r.createElement(w,{...i,isIn:a(i.toastId),style:{...i.style,"--nth":n+1,"--len":t.length},key:`toast-${i.key}`},o)})))})))})));A.displayName="ToastContainer",A.defaultProps={position:"top-right",transition:S,autoClose:5e3,closeButton:b,pauseOnHover:!0,pauseOnFocusLoss:!0,closeOnClick:!0,draggable:!0,draggablePercent:80,draggableDirection:"x",role:"alert",theme:"light"};let E,C=new Map,R=[],O=1;function T(){return""+O++}function k(e){return e&&(a(e.toastId)||i(e.toastId))?e.toastId:T()}function M(e,t){return C.size>0?d.emit(0,e,t):R.push({content:e,options:t}),t.toastId}function P(e,t){return{...t,type:t&&t.type||e,toastId:k(t)}}function I(e){return(t,n)=>M(t,P(e,n))}function N(e,t){return M(e,P("default",t))}N.loading=(e,t)=>M(e,P("default",{isLoading:!0,autoClose:!1,closeOnClick:!1,closeButton:!1,draggable:!1,...t})),N.promise=function(e,t,n){let r,{pending:o,error:i,success:u}=t;o&&(r=a(o)?N.loading(o,n):N.loading(o.render,{...n,...o}));const l={isLoading:null,autoClose:null,closeOnClick:null,closeButton:null,draggable:null},c=(e,t,o)=>{if(null==t)return void N.dismiss(r);const i={type:e,...l,...n,data:o},s=a(t)?{render:t}:t;return r?N.update(r,{...i,...s}):N(s.render,{...i,...s}),o},f=s(e)?e():e;return f.then((e=>c("success",u,e))).catch((e=>c("error",i,e))),f},N.success=I("success"),N.info=I("info"),N.error=I("error"),N.warning=I("warning"),N.warn=N.warning,N.dark=(e,t)=>M(e,P("default",{theme:"dark",...t})),N.dismiss=e=>{C.size>0?d.emit(1,e):R=R.filter((t=>null!=e&&t.options.toastId!==e))},N.clearWaitingQueue=function(e){return void 0===e&&(e={}),d.emit(5,e)},N.isActive=e=>{let t=!1;return C.forEach((n=>{n.isToastActive&&n.isToastActive(e)&&(t=!0)})),t},N.update=function(e,t){void 0===t&&(t={}),setTimeout((()=>{const n=function(e,t){let{containerId:n}=t;const r=C.get(n||E);return r&&r.getToast(e)}(e,t);if(n){const{props:r,content:o}=n,i={delay:100,...r,...t,toastId:t.toastId||e,updateId:T()};i.toastId!==e&&(i.staleId=e);const a=i.render||o;delete i.render,M(a,i)}}),0)},N.done=e=>{N.update(e,{progress:1})},N.onChange=e=>(d.on(4,e),()=>{d.off(4,e)}),N.POSITION={TOP_LEFT:"top-left",TOP_RIGHT:"top-right",TOP_CENTER:"top-center",BOTTOM_LEFT:"bottom-left",BOTTOM_RIGHT:"bottom-right",BOTTOM_CENTER:"bottom-center"},N.TYPE={INFO:"info",SUCCESS:"success",WARNING:"warning",ERROR:"error",DEFAULT:"default"},d.on(2,(e=>{E=e.containerId||e,C.set(E,e),R.forEach((e=>{d.emit(0,e.content,e.options)})),R=[]})).on(3,(e=>{C.delete(e.containerId||e),0===C.size&&d.off(0).off(1).off(5)}))},73700:(e,t,n)=>{"use strict";function r(e,t,n){var r,o=n||{},i=o.noTrailing,a=void 0!==i&&i,s=o.noLeading,u=void 0!==s&&s,l=o.debounceMode,c=void 0===l?void 0:l,f=!1,d=0;function h(){r&&clearTimeout(r)}function p(){for(var n=arguments.length,o=new Array(n),i=0;ie?u?(d=Date.now(),a||(r=setTimeout(c?g:p,e))):p():!0!==a&&(r=setTimeout(c?g:p,void 0===c?e-l:e)))}return p.cancel=function(e){var t=(e||{}).upcomingOnly,n=void 0!==t&&t;h(),f=!n},p}function o(e,t,n){var o=(n||{}).atBegin;return r(e,t,{debounceMode:!1!==(void 0!==o&&o)})}n.d(t,{n:()=>r,s:()=>o})},86856:(e,t,n)=>{"use strict";n.d(t,{i7:()=>d,q6:()=>f});var r=n(96540),o=n(69982),i=n(40961);const a=Symbol(),s=Symbol(),u="undefined"===typeof window||/ServerSideRendering/.test(window.navigator&&window.navigator.userAgent)?r.useEffect:r.useLayoutEffect,l=o.unstable_runWithPriority?e=>{try{(0,o.unstable_runWithPriority)(o.unstable_NormalPriority,e)}catch(t){if("Not implemented."!==t.message)throw t;e()}}:e=>e(),c=e=>({value:t,children:n})=>{const o=(0,r.useRef)(t),s=(0,r.useRef)(0),[c,f]=(0,r.useState)(null);c&&(c(t),f(null));const d=(0,r.useRef)();if(!d.current){const e=new Set,t=(t,n)=>{(0,i.unstable_batchedUpdates)((()=>{s.current+=1;const r={n:s.current};null!=n&&n.suspense&&(r.n*=-1,r.p=new Promise((e=>{f((()=>t=>{r.v=t,delete r.p,e(t)}))}))),e.forEach((e=>e(r))),t()}))};d.current={[a]:{v:o,n:s,l:e,u:t}}}return u((()=>{o.current=t,s.current+=1,l((()=>{d.current[a].l.forEach((e=>{e({n:s.current,v:t})}))}))}),[t]),(0,r.createElement)(e,{value:d.current},n)};function f(e){const t=(0,r.createContext)({[a]:{v:{current:e},n:{current:-1},l:new Set,u:e=>e()}});return t[s]=t.Provider,t.Provider=c(t.Provider),delete t.Consumer,t}function d(e,t){const n=(0,r.useContext)(e)[a];const{v:{current:o},n:{current:i},l:s}=n,l=t(o),[c,f]=(0,r.useReducer)(((e,n)=>{if(!n)return[o,l];if("p"in n)throw n.p;if(n.n===i)return Object.is(e[1],l)?e:[o,l];try{if("v"in n){if(Object.is(e[0],n.v))return e;const r=t(n.v);return Object.is(e[1],r)?e:[n.v,r]}}catch(r){}return[...e]}),[o,l]);return Object.is(c[1],l)||f(),u((()=>(s.add(f),()=>{s.delete(f)})),[s]),c[1]}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/8637.0958494526e838a60d2b.js.LICENSE.txt b/src/web/gui/v2/8637.0958494526e838a60d2b.js.LICENSE.txt deleted file mode 100644 index 47c30ce3e..000000000 --- a/src/web/gui/v2/8637.0958494526e838a60d2b.js.LICENSE.txt +++ /dev/null @@ -1,140 +0,0 @@ -/* -object-assign -(c) Sindre Sorhus -@license MIT -*/ - -/*! - * Determine if an object is a Buffer - * - * @author Feross Aboukhadijeh - * @license MIT - */ - -/** - * react-table - * - * Copyright (c) TanStack - * - * This source code is licensed under the MIT license found in the - * LICENSE.md file in the root directory of this source tree. - * - * @license MIT - */ - -/** - * @license - * Copyright 2006 Dan Vanderkam (danvdk@gmail.com) - * MIT-licenced: https://opensource.org/licenses/MIT - */ - -/** - * @license - * Copyright 2011 Dan Vanderkam (danvdk@gmail.com) - * MIT-licenced: https://opensource.org/licenses/MIT - */ - -/** - * @license - * Copyright 2011 Paul Felix (paul.eric.felix@gmail.com) - * MIT-licenced: https://opensource.org/licenses/MIT - */ - -/** - * @license - * Copyright 2012 Dan Vanderkam (danvdk@gmail.com) - * MIT-licenced: https://opensource.org/licenses/MIT - */ - -/** - * @license - * Copyright 2013 David Eberlein (david.eberlein@ch.sauter-bc.com) - * MIT-licenced: https://opensource.org/licenses/MIT - */ - -/** - * @license - * Part of dygraphs, see top-level LICENSE.txt file - * MIT-licenced: https://opensource.org/licenses/MIT - */ - -/** - * @license React - * react-is.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -/** - * @license React - * react.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -/** - * @license React - * scheduler.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -/** - * @remix-run/router v1.16.0 - * - * Copyright (c) Remix Software Inc. - * - * This source code is licensed under the MIT license found in the - * LICENSE.md file in the root directory of this source tree. - * - * @license MIT - */ - -/** - * React Router DOM v6.23.0 - * - * Copyright (c) Remix Software Inc. - * - * This source code is licensed under the MIT license found in the - * LICENSE.md file in the root directory of this source tree. - * - * @license MIT - */ - -/** - * React Router v6.23.0 - * - * Copyright (c) Remix Software Inc. - * - * This source code is licensed under the MIT license found in the - * LICENSE.md file in the root directory of this source tree. - * - * @license MIT - */ - -/** @license React v16.13.1 - * react-is.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -/**! - * easy-pie-chart - * Lightweight plugin to render simple, animated and retina optimized pie charts - * - * @license - * @author Robert Fleischmann (http://robert-fleischmann.de) - * @version 2.1.7 - **/ diff --git a/src/web/gui/v2/8784.a04e9c07186e1f057f56.chunk.js b/src/web/gui/v2/8784.a04e9c07186e1f057f56.chunk.js deleted file mode 100644 index 0387190d9..000000000 --- a/src/web/gui/v2/8784.a04e9c07186e1f057f56.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="640fdce4-5575-47c0-9c3d-be51dcbe7364",e._sentryDebugIdIdentifier="sentry-dbid-640fdce4-5575-47c0-9c3d-be51dcbe7364")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8784],{92155:(e,t,n)=>{n.d(t,{A:()=>i});var a=n(58168),o=n(96540),r=n(50876);const l=e=>(0,o.forwardRef)(((t,n)=>{let{callback:l,feature:s,isStart:i,isSuccess:d,isFailure:c,eventReason:u,payload:m={},...p}=t;const{sendLog:g,isReady:h}=(0,r.A)(),b=(0,o.useCallback)((()=>{const e=p[l],t={feature:s,isStart:i,isSuccess:d,isFailure:c,eventReason:u,...m,...p["data-ga"]?{dataGa:p["data-ga"]}:{},...p.dataGa?{dataGa:p.dataGa}:{},...p["data-track"]?{dataTrack:p["data-track"]}:{},...p.label?{label:p.label}:{}};"function"==typeof e&&e(),g(t,!0)}),[l,g,h,m,p]),f=(0,o.useMemo)((()=>({...p,[l]:b})),[p,l,b]);return o.createElement(e,(0,a.A)({ref:n},f))}));var s=n(67276);const i=e=>(0,o.forwardRef)(((t,n)=>{let{payload:r={},...i}=t;const d=l(e);return o.createElement(d,(0,a.A)({},i,{ref:n,callback:"onClick",payload:{...r,action:s.o1.buttonClicked}}))}))},96083:(e,t,n)=>{n.d(t,{A:()=>i});var a=n(58168),o=n(68415),r=n(96540),l=n(20982),s=n(83199);const i=(0,r.forwardRef)(((e,t)=>{let{onClick:n,src:i,title:d="User avatar",width:c=8,height:u=8,...m}=e;return r.createElement(s.Flex,(0,a.A)({cursor:"pointer",round:"50%","data-testid":"userAvatar",title:d,alt:d,onClick:e=>n&&n(e),width:c,height:u},m,{ref:t,alignItems:"center",justifyContent:"center",background:"spaceIdle",color:"textLite",overflow:"hidden"}),i?r.createElement(s.Flex,{as:"img",src:i,alt:d,width:c,height:u}):r.createElement(l.g,{icon:o.yV,size:"lg",alt:d}))}))},18682:(e,t,n)=>{n.d(t,{C:()=>l});var a=n(33436),o=n(78217),r=n(13871);const l=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return()=>{(0,a.A)(e);const n=(0,r.UI)({header:"Copied",text:"Command copied to your clipboard! Please run it on your node's terminal.",icon:"gear",...t,success:!0});o.A.success(n,{context:"copy"})}}},87292:(e,t,n)=>{n.d(t,{Ay:()=>u,R0:()=>c});var a=n(96540),o=n(8711),r=n(83199),l=n(18682);const s=(0,o.default)(r.Box).attrs((e=>({color:"textDescription",background:"modalTabsBackground",border:{side:"all",color:"borderSecondary"},padding:[4,10,4,4],position:"relative",width:"100%",...e}))).withConfig({displayName:"command__StyledTerminalCommand",componentId:"sc-wnwmk3-0"})(["color:",";border-radius:2px;overflow-wrap:anywhere;white-space:pre-wrap;font-family:monospace;letter-spacing:0.09px;line-height:18px;font-size:14px;word-break:break-word;"],(0,r.getColor)("textDescription")),i=(0,o.default)(r.Icon).withConfig({displayName:"command__StyledIcon",componentId:"sc-wnwmk3-1"})(["display:flex;align-self:flex-end;cursor:pointer;"]),d=(0,o.default)(r.Box).attrs({color:"textDescription",border:{side:"all",color:"borderSecondary"},background:"modalTabsBackground",padding:[0,1]}).withConfig({displayName:"command__CodeText",componentId:"sc-wnwmk3-2"})(["display:inline-block;color:",";border-radius:2px;font-family:monospace;letter-spacing:0.09px;line-height:16px;font-size:12px;font-weight:bold;word-break:break-word;"],(0,r.getColor)("textDescription")),c=e=>{let{children:t,...n}=e;return a.createElement(d,n,t)},u=e=>{let{children:t,confirmationText:n="Command copied to your clipboard.",commandText:o=t,...d}=e;return a.createElement(s,d,t,a.createElement(r.Box,{position:"absolute",bottom:"8px",right:"8px"},a.createElement(i,{name:"copy",size:"small",color:"primary",onClick:(0,l.C)(o||t,{text:n})})))}},36091:(e,t,n)=>{n.d(t,{A:()=>c});var a=n(58168),o=n(96540),r=n(13083),l=n(83199);const s=()=>o.createElement(l.Text,null,"Something went wrong during document parsing"),i=e=>t=>{let{transformConfiguration:n={},validationConfig:l={},validate:i,errorComponent:d,children:c,...u}=t;const{tree:m,errors:p}=(e=>{let{markdocContent:t,validate:n,validationConfig:a,transformConfiguration:l}=e;return(0,o.useMemo)((()=>{const e=r.Ay.parse("string"===typeof t?t:"");let o=[];return n&&(o=r.Ay.validate(e,a)),{tree:o.length?null:r.Ay.transform(e,l),errors:o}}),[t,n,a,l])})({markdocContent:c,validate:i,validationConfig:l,transformConfiguration:n}),g=d&&"function"==typeof d?d:s;return p.length?o.createElement(g,{errors:p}):o.createElement(e,(0,a.A)({tree:m},u))},d=(0,o.memo)(i((e=>{let{tree:t,renderConfiguration:n}=e;return r.Ay.renderers.react(t,o,n)}))),c=(i((e=>{let{tree:t}=e;return r.Ay.renderers.html(t)})),d)},40267:(e,t,n)=>{n.d(t,{A:()=>r});var a=n(96540),o=n(83199);const r=e=>{let{flavour:t,icon:n,children:r}=e;return a.createElement(o.Pill,{icon:n,textProps:{textTransform:"capitalize"},flavour:t},r)}},32089:(e,t,n)=>{n.d(t,{A:()=>l});var a=n(58168),o=n(96540),r=n(83199);const l=e=>{let{vertical:t,color:n="borderSecondary",...l}=e;return o.createElement(r.Box,(0,a.A)({as:"hr",height:t?"100%":"1px"},t?{}:{width:"100%"},{sx:{borderWidth:t?"0px 0px 0px 1px":"1px 0px 0px 0px",borderColor:n,borderStyle:"solid"}},l))}},38784:(e,t,n)=>{n.r(t),n.d(t,{AssistantAlerts:()=>K,AssistantChat:()=>fe});n(62953);var a=n(96540),o=n(69765),r=n(51913),l=n(58168),s=n(83199),i=n(73700),d=(n(3064),n(14905),n(98992),n(72577),n(8872),n(36091)),c=n(47762);const u=e=>{let{label:t,value:n}=e;return a.createElement(s.Flex,{gap:2},a.createElement(s.Text,{strong:!0},t,":"),a.createElement(s.Text,null,n))},m=e=>{let{name:t,nodeId:n,chartId:l,alertOptions:i}=e;const d=(0,o.ID)(),[,m]=(0,r.bg)(),p=(0,c.xY)(n,"name");return a.createElement(s.Flex,{column:!0},i.length>1&&a.createElement(s.Box,{margin:[0,0,2,0]},a.createElement(s.Select,{"data-testid":"".concat(t,"-assistant-modal-alerts-select"),onChange:e=>{let{value:t}=e;return m({roomId:d,alert:{name:t}})},styles:{size:"tiny"},menuPlacement:"auto",options:i,value:{value:t,label:t}})),1==i.length&&a.createElement(u,{label:"Alert",value:t}),p&&a.createElement(u,{label:"Node",value:p}),l&&a.createElement(u,{label:"Chart id",value:l}))};var p=n(32089),g=n(8711),h=n(4659),b=n(87292),f=n(68831);const x=g.default.ol.withConfig({displayName:"markdocSchema__OrderedList",componentId:"sc-1maymd4-0"})(["list-style:roman;padding-left:14px;"]),E=g.default.ul.withConfig({displayName:"markdocSchema__UnorderedList",componentId:"sc-1maymd4-1"})(['list-style-image:url("','/img/list-style-image.svg");padding-left:14px;'],f.A.assetsBaseURL),v={heading:{render:e=>{let{level:t=1,...n}=e,o=s.H1;switch(t){case 2:o=s.H2;break;case 3:o=s.H3;break;case 4:o=s.H4;break;case 5:o=s.H5;break;case 6:o=s.H6}return a.createElement(o,(0,l.A)({margin:[2,0]},n))},attributes:{id:{type:String},level:{type:Number}}},paragraph:{render:e=>{let{children:t,...n}=e;return a.createElement(s.Flex,(0,l.A)({padding:[2,0]},n),a.createElement(s.Text,null,t))}},link:{render:e=>{let{children:t,href:n,...o}=e;return a.createElement(h.A,(0,l.A)({href:n,rel:"noopener noreferrer",target:"_blank"},o),t)},attributes:{href:{type:String}}},code:{render:e=>{let{content:t,...n}=e;return a.createElement(b.R0,n,t)},attributes:{content:{type:String}}},fence:{render:e=>{let{content:t,...n}=e;return a.createElement(b.Ay,n,t)},attributes:{content:{type:String}}},list:{render:e=>{let{ordered:t,children:n,...o}=e;const r=t?x:E;return a.createElement(r,o,n)},attributes:{ordered:{type:Boolean}}}},y=e=>{let{selectedAlert:t}=e;const[n]=(0,r.Ws)(),{name:o,nodeId:l,chartId:i}=t,{loaded:c,value:u,hasError:g}=(0,r.vQ)({alert:o,node:l,chart:i}),h=(e=>e.reduce(((e,t)=>{const n={value:t.name,label:t.name};return e.find((e=>{let{label:t}=e;return t==n.label}))||e.push(n),e}),[]))(n);return c?g?a.createElement(s.Text,null,"Something went wrong"):null!==u&&void 0!==u&&u.result?a.createElement(s.Flex,{column:!0,gap:3},a.createElement(m,{name:o,nodeId:l,chartId:i,alertOptions:h}),a.createElement(p.A,null),a.createElement(d.A,{transformConfiguration:{nodes:v}},u.result)):a.createElement(s.Text,null,"No data"):a.createElement(s.Text,null,"Loading...")};var w=n(40267),C=n(29217);const k=(0,g.default)(s.Flex).withConfig({displayName:"styled__ElevatedContent",componentId:"sc-16kc0pw-0"})(["box-shadow:0 18px 28px rgba(0,0,0,0.5);"]),A=(0,g.default)(s.Icon).withConfig({displayName:"styled__IconAbsolute",componentId:"sc-16kc0pw-1"})(["position:absolute;top:50%;left:50%;transform:translate(-50%,-50%);"]),I=e=>{let{alert:t,...n}=e;return a.createElement(C.A,{content:"Show assistant"},a.createElement(k,(0,l.A)({position:"absolute",top:"0",right:"0",cursor:"pointer"},n),a.createElement(w.A,{flavour:(null===t||void 0===t?void 0:t.status)||"neutral"},null===t||void 0===t?void 0:t.name)))};var _=n(55794),M=n.n(_),S=n(87659);const B=g.default.ul.withConfig({displayName:"helpTooltipContent__List",componentId:"sc-uh9315-0"})(["width:350px;padding-left:16px;list-style:disc outside none;"]),F=function(){let{disableDrag:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return a.createElement(B,null,e?null:a.createElement("li",null,"You can drag around the assistant window and navigate to other tabs within the same room, and the assistant window won't go away."),a.createElement("li",null,"Use the buttons at the right to expand, or minimize the assistant window."),a.createElement("li",null,"Hold down the ",a.createElement(s.TextSmall,{strong:!0},"Shift")," button in order to select any text inside the assistant window."))};var N=n(63950),T=n.n(N);const z=e=>{let{onModalClose:t=T()}=e;return a.createElement(C.A,{content:"Close assistant"},a.createElement(k,{position:"absolute",bottom:"37px",right:"-10px",background:"mainBackground",sx:{borderRadius:"100%"},border:{side:"all",color:"primary",size:"1px",type:"solid"},width:"28px",height:"28px",onClick:t,cursor:"pointer"},a.createElement(A,{name:"x",color:"text",size:"sm"})))},R=e=>{let{onClick:t=T()}=e;return a.createElement(k,{position:"absolute",top:"0",right:"0",onClick:t,cursor:"pointer"},a.createElement(s.Pill,{flavour:"neutral"},"Show assistant"))},q=e=>{let{MaximizeComponent:t=R,isMinimized:n,maximize:o=T(),onModalClose:r=T()}=e;const[l,i]=(0,a.useState)(!1);return a.createElement(s.Flex,{position:"relative",width:"56px",height:n?"95px":"56px",onMouseEnter:()=>i(n),onMouseLeave:()=>i(!1)},a.createElement(k,{position:"absolute",bottom:"0",background:"primary",sx:{borderRadius:"100%"},width:"56px",height:"56px",cursor:"move"},a.createElement(A,{name:"netdataAssistant",size:"large"})),l&&a.createElement(a.Fragment,null,a.createElement(t,{onClick:()=>{n&&o()}}),a.createElement(z,{onModalClose:r})))};var D=n(47731);const H=e=>{let{expanded:t,isMobile:n}=e;return n?{base:"95vw"}:t?{base:"80vw"}:{base:150,max:150,min:70}},L=e=>{let{expanded:t,isMobile:n}=e;return n?{base:"80vh",max:"80vh",min:"10vh"}:t?{base:"80vh",min:"80vh"}:{base:150,max:150,min:150}},U=(0,g.default)(s.ModalContent).attrs((e=>{let{expanded:t,isMobile:n,getWidth:a,getHeight:o,...r}=e;return{width:a({expanded:t,isMobile:n}),height:o({expanded:t,isMobile:n}),padding:[0,0,4,0],...r}})).withConfig({displayName:"assistantModalBase__ModalContent",componentId:"sc-15vyjlc-0"})([""]),P=(0,g.default)(s.ModalBody).withConfig({displayName:"assistantModalBase__StyledModalBody",componentId:"sc-15vyjlc-1"})(["flex-direction:",";"],(e=>{let{columnReverse:t}=e;return t?"column-reverse":"column"})),W=function(){let{title:e,body:t,modalContentProps:n={},modalBodyProps:o={},onClose:r,MaximizeComponent:i,disableDrag:d,getWidth:c=H,getHeight:u=L}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const[m,p]=(0,S.A)(),[g,h]=(0,a.useState)(!1),[b,f,,x]=(0,S.A)(),E=(0,D.J)();return a.createElement(s.Modal,{zIndex:80,backdrop:!1},a.createElement(M(),{disabled:d||g||E},a.createElement(s.Flex,{column:!0,alignItems:"end",gap:3},!b&&a.createElement(U,(0,l.A)({expanded:m,cursor:g?"text":"move",tabIndex:0,onKeyDown:e=>{let{key:t}=e;"Shift"==t&&h(!0)},onKeyUp:()=>h(!1),isMobile:E,getWidth:c,getHeight:u},n),a.createElement(s.ModalHeader,{justifyContent:"between"},a.createElement(s.Flex,{gap:2,alignItems:"center"},a.createElement(s.Icon,{name:"netdataAssistant",color:"text"}),e||a.createElement(s.H4,null,"Netdata Assistant"),a.createElement(C.A,{plain:!0,isBasic:!0,align:"top",content:a.createElement(F,{disableDrag:d})},a.createElement(s.Flex,null,a.createElement(s.Icon,{name:"question",color:"textLite",width:"16px",height:"16px"})))),a.createElement(s.Flex,{alignItems:"baseline"},E?null:a.createElement(a.Fragment,null,a.createElement(s.ModalButton,{iconName:"minimize_s",onClick:f,tooltip:"Minimize window",testId:"assistant-modal-minimize-button"}),a.createElement(s.ModalButton,{iconName:m?"reduceSize":"fullScreen",onClick:p,tooltip:m?"Reduce window size":"Expand window",testId:"assistant-modal-expand-collapse-button"})),a.createElement(s.ModalCloseButton,{onClose:r,position:"unset",tooltip:"Close window",testId:"assistant-modal-close-button"}))),a.createElement(P,(0,l.A)({overflow:{vertical:"auto"},cursor:g?"text":"default"},o),t)),a.createElement(q,{MaximizeComponent:i,isMinimized:b,maximize:x,onModalClose:r}))))},G=e=>{let{alertNameInHeader:t,selectedAlert:n}=e;return t?a.createElement(w.A,{flavour:(null===n||void 0===n?void 0:n.status)||"neutral"},null===n||void 0===n?void 0:n.name):a.createElement(s.H4,null,"Netdata Assistant")},j=(Q=W,e=>{const[t,n]=(0,a.useState)(!1),[o,s]=(0,r.bg)(),d=(null===o||void 0===o?void 0:o.alert)||{},c=(0,i.s)(300,(e=>{let{target:t}=e;return n(t.scrollTop>150&&!(null===d||void 0===d||!d.name))})),u=(0,a.useCallback)((()=>s(null)),[s]);return a.createElement(Q,(0,l.A)({title:a.createElement(G,{alertNameInHeader:t,selectedAlert:d}),body:a.createElement(y,{selectedAlert:d}),onClose:u,modalBodyProps:{onScroll:c},MaximizeComponent:e=>a.createElement(I,(0,l.A)({alert:d},e))},e))});var Q;const Y=j,K=()=>{const e=(0,o.ID)(),[t,n]=(0,r.bg)(),{roomId:l,alert:s}=t||{};return(0,a.useEffect)((()=>{l!=e&&n(null)}),[l,e]),s&&l==e?a.createElement(Y,null):null};var V=n(22292),O=n(96083);const Z=()=>a.createElement(s.Flex,{width:12},a.createElement(s.Icon,{name:"netdataAssistant",width:"48px",height:"48px",color:"primary"})),J=e=>{let{flavour:t="assistant",children:n,...o}=e;const r="assistant"==t,i=(0,V.uW)("avatarURL"),d={...r?{background:"modalBackground"}:{},border:{side:"top",color:"primary"}};return a.createElement(s.Flex,(0,l.A)({gap:2,padding:[4]},d,o),r?a.createElement(Z,null):a.createElement(O.A,{src:i||"",width:12,height:12}),a.createElement(s.Flex,{flex:{grow:1,shrink:1}},n))};var X=n(50876);const $=e=>{let{text:t}=e;const{sendButtonClickedLog:n}=(0,X.A)(),o=(0,r.ZV)(),l=(0,a.useCallback)((()=>{o(t),n({feature:"NetdataAssistantQuestion",question:t,kickstartQuestion:!0})}),[o,n]);return a.createElement(s.Flex,{alignItems:"center",justifyContent:"between",padding:[2],background:"inputBg",round:!0,cursor:"pointer",onClick:l},a.createElement(s.TextBig,null,t),a.createElement(s.Icon,{name:"chevron_right_s",color:"text",height:"12px",cursor:"pointer"}))},ee=()=>a.createElement(s.Flex,{column:!0,gap:4},a.createElement(s.TextBig,null,"Hello! Welcome to Netdata Assistant! Here are some options to help you kickstart our conversation:"),a.createElement(s.Flex,{column:!0,gap:2},a.createElement($,{text:"What is a node?"}),a.createElement($,{text:"What is a Netdata parent?"})),a.createElement(s.TextBig,null,"Just select the option you're interested in, or feel free to ask anything else!")),te=()=>a.createElement(J,{border:"none"},a.createElement(ee,null));n(41393),n(81454);const ne=(0,n(92155).A)(s.Button),ae=e=>{let{question:t,answer:n,feedback:o,setFeedback:r,flavour:l}=e;return a.createElement(ne,{feature:"AssistantAnswerFeedback",payload:{question:t,answer:n,feedback:l},flavour:o===l?"hollow":"borderless",icon:"thumbsUp"==l?"thumb_up":"thumb_down",onClick:()=>r(l),"data-testid":"assistant-chat-".concat(l),small:!0})},oe=function(){var e,t;let{id:n,questionId:o,additionalInfo:r,messages:i=[]}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const[d,c]=(0,a.useState)(),u=null===(e=i.find((e=>e.id==o)))||void 0===e?void 0:e.value,m=null===(t=i.find((e=>e.id==n)))||void 0===t?void 0:t.value,p=(0,a.useMemo)((()=>({feedback:d,setFeedback:c,question:u,answer:m})),[d,c]);return a.createElement(s.Flex,{alignItems:"center",justifyContent:"between"},a.createElement(s.Flex,null,a.createElement(ae,(0,l.A)({},p,{flavour:"thumbsUp"})),a.createElement(ae,(0,l.A)({},p,{flavour:"thumbsDown"}))),r?a.createElement(h.A,{Component:s.Text,href:r,target:"_blank",rel:"noopener noreferrer"},"Read more..."):null)};var re=n(63314);const le=()=>a.createElement(re.DL,{feature:"NetdataAssistantAnswer"},a.createElement(s.TextBig,null,"Something went wrong. Please try again.")),se=()=>{const{messages:e}=(0,r.Gp)();return e.map((t=>{let{id:n,questionId:o,type:r,pending:l,value:i,additionalInfo:c,error:u}=t;return a.createElement(J,{key:n,flavour:"answer"==r?"assistant":"user"},"answer"==r?l?"...":u?a.createElement(le,null):a.createElement(s.Flex,{column:!0,gap:1},a.createElement(d.A,{transformConfiguration:{nodes:v}},i),a.createElement(oe,{id:n,questionId:o,additionalInfo:c,messages:e})):i)}))},ie=(0,g.default)(s.Icon).withConfig({displayName:"input__StyledIcon",componentId:"sc-m4unkx-0"})(["opacity:",";"],(e=>{let{disabled:t}=e;return t?".5":"1"})),de=e=>a.createElement(C.A,{plain:!0,content:"Send your message"},a.createElement(s.Box,null,a.createElement(ie,(0,l.A)({name:"chevron_right_s",color:"text",size:"small",cursor:"pointer"},e)))),ce=(0,g.default)(s.Box).withConfig({displayName:"input__InputContainer",componentId:"sc-m4unkx-1"})(["box-shadow:0 0 8px rgba(0,0,0,0.5);"]),ue=()=>{const{sendButtonClickedLog:e}=(0,X.A)(),[t,n]=(0,a.useState)(""),{pending:o}=(0,r.Gp)(),l=(0,r.ZV)(),i=(0,a.useCallback)((()=>{!o&&t&&(l(t),n(""),e({feature:"NetdataAssistantQuestion",question:t}))}),[o,t,l,n,e]);return a.createElement(ce,{width:"100%",position:"fixed",bottom:"68px",padding:[4],background:"modalBackground"},a.createElement(s.TextInput,{value:t,placeholder:"Message assistant...",iconRight:o?null:a.createElement(de,{onClick:i,disabled:o||!t}),onChange:e=>{let{target:{value:t}}=e;return n(t)},onKeyDown:e=>{let{key:t}=e;"Enter"==t&&i()},autoFocus:"autofocus",disabled:o}))},me=()=>a.createElement(s.Flex,{column:!0},a.createElement(s.Flex,{column:!0,padding:[0,0,17,0]},a.createElement(te,null),a.createElement(se,null)),a.createElement(ue,null)),pe=e=>{let{expanded:t,isMobile:n}=e;return n?{base:"95vw"}:t?{base:"60vw"}:{base:125,max:150,min:60}},ge=e=>{let{expanded:t,isMobile:n}=e;return n?{base:"80vh",max:"80vh",min:"10vh"}:t?{base:"80vh",min:"80vh"}:{base:80,max:150,min:80}},he=(e=>t=>a.createElement(re.Ay,{feature:"NetdataAssistant"},a.createElement(e,(0,l.A)({body:a.createElement(me,null),modalContentProps:{padding:[0]},modalBodyProps:{height:"100%",padding:[0],columnReverse:!0},getWidth:pe,getHeight:ge},t))))(W),be=he,fe=()=>{const[e,,t]=(0,r.MY)(),{clear:n}=(0,r.Gp)(),o=(0,a.useCallback)((()=>{t(),n()}),[t,n]);return e?a.createElement(be,{onClose:o}):null}},51913:(e,t,n)=>{n.d(t,{ZV:()=>v,bg:()=>h,vQ:()=>g,Ws:()=>b,Gp:()=>E,MY:()=>x});n(17333),n(3064),n(41393),n(98992),n(54520),n(72577),n(81454),n(62953);var a=n(96540),o=n(47444),r=n(26655),l=n(49286);var s=n(3914),i=n(69765);const d=(0,o.eU)({key:"assistantAlert",default:null}),c=(0,o.eU)({key:"assistantAlertsAtom",default:[]}),u=(0,o.eU)({key:"assistantChatAtom",default:{open:!1,messages:[]}});var m=n(33829);const p=(0,o.K0)({key:"assistantAlertSelector",get:e=>{let{space:t,room:n,alert:a,node:o,chart:l}=e;return()=>(e=>{let{space:t,room:n,alert:a,node:o,chart:l}=e;return r.A.post("/api/v1/troubleshoot",{space:t,room:n,alarm:a,node:o,chart:l})})({space:t,room:n,alert:a,node:o||"dummy-node-id",chart:l||"dummy-chart-id"})},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),g=e=>{var t;let{alert:n,node:a,chart:r}=e;const l=(0,s.vt)(),d=(0,i.ID)(),c=(0,o.xf)(p({space:l,room:d,alert:n,node:a,chart:r}));return{loaded:"loading"!==c.state,value:null===(t=c.contents)||void 0===t?void 0:t.data,hasError:"hasError"===c.state}},h=()=>(0,o.L4)(d),b=()=>(0,o.L4)(c),f=()=>(0,o.L4)(u),x=()=>{const[e,t]=f(),n=(0,a.useCallback)((()=>t((e=>({...e,open:!0})))),[t]),o=(0,a.useCallback)((()=>t((e=>({...e,open:!1})))),[t]);return[null===e||void 0===e?void 0:e.open,n,o]},E=()=>{const[e,t]=f(),n=(0,a.useCallback)((e=>t((t=>({...t,messages:[...t.messages,e]})))),[t]),o=(0,a.useCallback)((()=>{t((e=>({...e,messages:[]})))}),[t]),r=(0,a.useMemo)((()=>e.open&&!!e.messages.find((e=>{let{pending:t}=e;return!!t}))),[e]);return{messages:null===e||void 0===e?void 0:e.messages,pending:r,addMessage:n,clear:o}},v=()=>{const e=(()=>{const{addMessage:e}=E();return(0,a.useCallback)((t=>{const n=(0,m.A)();return e({id:n,type:"question",value:t}),n}),[e])})(),t=(()=>{const{addMessage:e}=E();return(0,a.useCallback)((t=>{let{questionId:n,pending:a,answer:o}=t;return e({id:(0,m.A)(),questionId:n,type:"answer",value:o,pending:a})}),[e])})(),n=(()=>{const[,e]=f();return(0,a.useCallback)((t=>{let{questionId:n,value:a,additionalInfo:o,error:r}=t;e((e=>({...e,messages:e.messages.map((e=>e.questionId==n?{...e,pending:!1,value:a,additionalInfo:o,error:r}:e))})))}),[])})();return(0,a.useCallback)((a=>{const o=e(a);var s;t({questionId:o,pending:!0}),(s=a,r.A.post("/api/v1/chat",{messages:[{role:"user",content:s}]},{transform:e=>(0,l.bn)(e)})).then((function(){let{data:e={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{session:{messages:t},additionalInfo:a}=e,r=(t||[]).filter((e=>{let{role:t}=e;return"assistant"==t})),l=r.length?r[r.length-1]:null;null!==l&&void 0!==l&&l.content?n({questionId:o,value:l.content,additionalInfo:a}):n({questionId:o,error:"Something went wrong"})})).catch((e=>{n({questionId:o,error:e.message||"Something went wrong"})}))}),[e,n])}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/8842.406028f523a00acb97bd.chunk.js b/src/web/gui/v2/8842.406028f523a00acb97bd.chunk.js deleted file mode 100644 index 9b8e3549f..000000000 --- a/src/web/gui/v2/8842.406028f523a00acb97bd.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="6df49833-3fc2-4c7b-8bc8-5da3e2bc850d",e._sentryDebugIdIdentifier="sentry-dbid-6df49833-3fc2-4c7b-8bc8-5da3e2bc850d")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8842],{8842:(e,t,n)=>{n.r(t),n.d(t,{default:()=>W});var a=n(96540),l=n(83199),o=(n(3064),n(98992),n(72577),n(26655));var r=n(3914),i=n(18061),c=n(47444);const s={items:[],error:"",id:null,loading:!0,loaded:!1,oidcID:null},d=(0,c.Iz)({key:"spaceOIDCKey",default:0}),u=(0,c.Iz)({key:"spaceOIDC",default:e=>(e=>({...s,id:e}))(e)}),m=(0,c.Iz)({key:"spaceOIDCConfiguration",default:null});n(62953);const g=(0,c.K0)({key:"spaceOIDCState",get:e=>{let{id:t,key:n}=e;return e=>{let{get:a}=e;const l=a(u(t));return n?l[n]:l}},set:e=>{let{id:t,key:n}=e;return(e,a)=>{let{set:l}=e;l(u(t),(e=>n?{...e,[n]:a}:{...e,...a}))}}}),f=e=>((e,t)=>(0,c.vc)(g({id:e,key:t})))((0,r.vt)(),e),p=e=>(0,c.L4)(m(e)),E=e=>{const t=(0,r.vt)(),[n,l]=p(t),i=(0,a.useCallback)((function(){return function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return o.A.post("/api/v3/spaces/".concat(e,"/oidc"),{...t})}(t,arguments.length>0&&void 0!==arguments[0]?arguments[0]:{})}),[t]),c=(0,a.useCallback)((function(){return function(e,t){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};return o.A.patch("/api/v3/spaces/".concat(t,"/oidc/").concat(e),n)}(e,t,arguments.length>0&&void 0!==arguments[0]?arguments[0]:{})}),[e,t]),s=(0,a.useCallback)((()=>((e,t)=>o.A.delete("/api/v3/spaces/".concat(t,"/oidc/").concat(e)))(e,t).then((()=>(l(null),Promise.resolve())))),[e,t,l]);return{config:n,isEdit:!!e,save:e?c:i,remove:s}},b=()=>{const e=(0,r.vt)(),t=((e,t)=>(0,c.lZ)(g({id:e,key:t})))(e),n=(()=>{const e=(0,r.vt)();return(0,c.vc)(d(e))})();(0,i.A)((()=>({enabled:!!e,fetch:()=>(e=>o.A.get("/api/v3/spaces/".concat(e,"/oidc")))(e),onFail:e=>{var n;return t({...s,error:(null===e||void 0===e||null===(n=e.response)||void 0===n?void 0:n.data)||e})},onSettle:()=>t({loading:!1,loaded:!0}),onSuccess:e=>{const{oidcID:n}=e.find((e=>{let{oidcID:t}=e;return!!t}))||{};t({...s,items:e,...n?{oidcID:n}:{}})}})),[n,e])};var h=n(58168),v=(n(17333),n(41393),n(54520),n(81454),n(4659)),C=n(87659);n(9391),n(74648),n(23215);const y=e=>{const t=(0,r.vt)(),[,n]=p(t);(0,i.A)((()=>({enabled:!!t,fetch:()=>((e,t)=>e?o.A.get("/api/v3/spaces/".concat(t,"/oidc/").concat(e)):Promise.resolve({data:null}))(e,t),onSuccess:e=>{n(e)}})),[t])},k={okta:{name:"okta",color:"bright"},"auth-server":{name:"openid",color:"bright"}},D=e=>{let{title:t,onConfirm:n,onDecline:o,isConfirmLoading:r}=e;return a.createElement(l.ConfirmationDialog,{confirmLabel:"Yes, delete",handleConfirm:n,handleDecline:o,message:a.createElement(a.Fragment,null,"You are about to delete ",a.createElement("strong",null,t)," configuration.",a.createElement("br",null),"Are you sure you want to continue?"),title:"Delete configuration",isConfirmLoading:r})};var A=n(46741),x=n(47130),w=n(92155),I=n(50876);const F=(0,x.A)((0,w.A)(l.Button)),S=e=>{let{title:t,isLoading:n,isEditable:o,validFields:r,onSave:i,onConfirmDelete:c}=e;const[s,,d,u]=(0,C.A)(),m=(0,A.JT)("oidc:Manage"),{sendLog:g}=(0,I.A)(),f=(0,a.useCallback)((()=>{c(),u(),g({feature:"OidcModalConfirmDeleteButton",title:t})}),[c,u]),p=(0,a.useMemo)((()=>({feature:"OidcModalSaveButton",label:"Save",onClick:i,disabled:!r||!m||n,tooltip:m?r?"Save the configuration":"Please provide values to the required fields":"You don't have permission to manage single sign-on configurations."})),[i,r,m,n]),E=(0,a.useMemo)((()=>({feature:"OidcModalDeleteButton",label:"Delete",flavour:"hollow",danger:!0,onClick:d,disabled:!m||n,tooltip:m?"Delete the configuration":"You don't have permission to manage single sign-on configurations."})),[d,n,m]);return a.createElement(a.Fragment,null,a.createElement(l.ModalFooter,{gap:2},o?a.createElement(F,E):null,a.createElement(F,p)),s?a.createElement(D,{title:t,onConfirm:f,onDecline:u,isConfirmLoading:n}):null)},L={string:e=>{let{propKey:t,title:n,setFormData:o,...r}=e;const i=(0,a.useCallback)((e=>{let{target:n}=e;o((e=>({...e,[t]:n.value})))}),[t,o]);return a.createElement(l.TextInput,(0,h.A)({label:n,onChange:i},r))},default:()=>null},M=e=>{let{propKey:t,type:n,setFormData:l,...o}=e;const r=L[n]||L.default,i=(0,a.useCallback)((e=>{let{target:n}=e;l((e=>({...e,[t]:n.value})))}),[l]);return a.createElement(r,(0,h.A)({onChange:i},o))},T=e=>{let{properties:t={},errors:n={},formData:o,required:r,setFormData:i}=e;return a.createElement(l.Flex,{column:!0,gap:2},Object.entries(t).map((e=>{let[t,l]=e;return a.createElement(M,(0,h.A)({key:t,propKey:t,isRequired:r.includes(t)},l,{value:o[t]||"",setFormData:i,error:n[t]}))})))};var _=n(28738),B=n(71835);n(14905),n(8872),n(3296),n(27208),n(48408);const O={string:function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const{format:n}=t;return"uri"!=n||(e=>{try{const{host:t}=new URL(e);return/^(?!-)[a-zA-Z0-9-]{1,63}(?null};var K=n(63314);const q=e=>{let{id:t,oidcID:n,title:o,slug:i,description:s,properties:u={},required:m=[],isEditable:g,docsUrl:f,onClose:p}=e;y(n);const{config:b,isEdit:h,save:D,remove:A}=E(n),[x,w]=(0,a.useState)({}),[I,,F,L]=(0,C.A)(),M=(()=>{const e=(0,r.vt)(),t=(0,c.lZ)(d(e));return(0,a.useCallback)((()=>t((e=>e+1))),[t])})(),[q,N]=(0,B.A)(),[U,Y]=(0,a.useState)({}),j=m.every((e=>!!x[e]));(0,a.useEffect)((()=>{w(b||{})}),[b]),(0,a.useEffect)((()=>{Y(function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return Object.entries(e).reduce(((e,n)=>{let[a,l]=n;const o=t[a]||{},r=(O[o.type]||O.default)(l,o);return r&&(e[a]=r),e}),{})}(x,u))}),[x,u,Y]);const z=(0,a.useCallback)((()=>{F(),D({...h?{}:{integrationID:t},...x}).then((()=>{q({header:"Success",text:"Successfully configured ".concat(o)}),p(),M()})).catch(N).finally((()=>{L()}))}),[D,x,h,q,N,p,M,F,L]),P=(0,a.useCallback)((()=>{F(),A().then((()=>{q({header:"Success",text:"Successfully deleted ".concat(o," configuration")}),p(),M()})).catch(N).finally((()=>{L()}))}),[A,q,N,p,M,F,L]);return a.createElement(K.Ay,{feature:"OidcModal",title:o},a.createElement(l.Modal,{backdropProps:{backdropBlur:!0},onEsc:p},a.createElement(l.ModalContent,{width:{min:100,base:140}},a.createElement(l.ModalHeader,{border:{side:"bottom",color:"disabled"},column:!0,gap:.5,padding:[6,4,3]},a.createElement(l.Flex,{alignItems:"center"},a.createElement(l.Flex,{gap:1,alignItems:"center"},a.createElement(l.Icon,k[i]||{}),a.createElement(l.H3,null,o)),p&&a.createElement(l.ModalCloseButton,{onClose:p,tooltip:"Close modal"})),a.createElement(l.TextSmall,{as:l.Box},s,f?a.createElement(a.Fragment,null,"\xa0",a.createElement(v.A,{Component:l.TextSmall,href:f,target:"_blank",whiteSpace:"nowrap"},"Learn how to configure it.")):null)),a.createElement(l.ModalBody,{height:{min:60,base:60},overflow:{vertical:"auto"}},I?a.createElement(_.A,{height:60,title:"Loading..."}):a.createElement(T,{formData:x,required:m,setFormData:w,properties:u,errors:U})),a.createElement(S,{title:o,isLoading:I,isEditable:g,validFields:j||!!Object.keys(U).length,onSave:z,onConfirmDelete:P}))))};var N=n(29217),U=n(63450);const Y=e=>{let{title:t,isEditable:n,isDisabled:l,onClick:o}=e;const{tooltipContent:r,buttonLabel:i}=(0,a.useMemo)((()=>l?{}:{tooltipContent:n?"Edit ".concat(t," configuration"):"Configure ".concat(t),buttonLabel:n?"Edit":"Configure"}),[n,l,t]);return l?null:a.createElement(N.A,{align:"bottom",content:r,plain:!0},a.createElement(U.ro,{label:i,icon:"gear",small:!0,onClick:o}))},j=e=>{let{id:t,slug:n,title:o,description:r,"data-testid":i="card",configuredItemExists:c,oidcID:s,schema:d={},...u}=e;const m=k[n],[g,,f,p]=(0,C.A)(),E=c&&!s,b=c&&!!s,{annotations:y={},properties:D={},required:A=[]}=d,{docsUrl:x}=y;return a.createElement(a.Fragment,null,a.createElement(l.Flex,(0,h.A)({background:"elementBackground",column:!0,"data-testid":i,justifyContent:"between",flex:!1,height:37,padding:[3,2,2,3],margin:[0,0,4,0],position:"relative",overflow:"hidden",round:.5,width:75},u),a.createElement(U.MU,(0,h.A)({"data-testid":"".concat(i,"-blurredIcon")},m)),a.createElement(l.Flex,{column:!0,"data-testid":"".concat(i,"-details"),gap:3,margin:[0,0,3,0]},a.createElement(l.Flex,{"data-testid":"".concat(i,"-header"),justifyContent:"between"},a.createElement(l.Flex,{alignItems:"center","data-testid":"".concat(i,"-titleContainer"),gap:1},a.createElement(l.Icon,(0,h.A)({"data-testid":"".concat(i,"-icon")},m)),a.createElement(l.Text,{"data-testid":"".concat(i,"-title")},o)),a.createElement(Y,{title:o,isEditable:b,isDisabled:E,onClick:f})),a.createElement(l.TextSmall,{color:"textDescription","data-testid":"".concat(i,"-description")},r," ",x&&a.createElement(v.A,{Component:U.W6,"data-testid":"".concat(i,"-docsLink"),href:x,target:"_blank",whiteSpace:"nowrap"},"Learn more.")))),g&&a.createElement(q,{id:t,oidcID:s,slug:n,title:o,description:r,docsUrl:x,properties:D,required:A,onClose:p,isEditable:b}))};var z=n(98496),P=n(12602);const R={ErrForbidden:{title:"No permissions",Content:()=>a.createElement(a.Fragment,null,a.createElement(l.Text,null,"You don't have permissions to view the authentication methods."),a.createElement(l.Text,null,"Please contact the Space administrators if this is unexpected."))},ErrNotAvailable:{title:"No available integrations",Content:()=>a.createElement(a.Fragment,null,a.createElement(l.Text,null,"No available single sign-on integrations found."),a.createElement(l.Text,null,"To enable this feature, please"," ",a.createElement(P.A,null,a.createElement(l.Text,{color:"primary"},"upgrade to the Business plan")),"."))},default:{title:"Error",Content:()=>a.createElement(a.Fragment,null,a.createElement(l.Text,null,"Something went wrong, please check again later."),a.createElement(l.Text,null,"If the problem persists feel free to contact us with a"," ",a.createElement(v.A,{href:"https://github.com/netdata/netdata-cloud/issues/new/choose",target:"_blank",rel:"noopener noreferrer"},"ticket")))}},Z=function(){let{error:e={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{errorMsgKey:t}=e,{title:n,Content:o}=R[t]||R.default;return a.createElement(K.DL,{feature:"DyncfgMainTabs",errorMsgKey:t},a.createElement(z.A,{title:n},a.createElement(l.Flex,{column:!0,gap:1},a.createElement(o,null))))},H=()=>{const{loading:e,loaded:t,items:n=[],oidcID:o,error:r}=f(),i=n.filter((e=>{let{available:t}=e;return!!t}));return e?a.createElement(_.A,{title:"Loading items"}):t?r?a.createElement(Z,{error:r}):i.length?a.createElement(l.Flex,{flexWrap:!0,padding:[0,4],gap:4},i.map((e=>a.createElement(j,(0,h.A)({key:e.id,configuredItemExists:!!o},e))))):a.createElement(Z,{error:{errorMsgKey:"ErrNotAvailable"}}):a.createElement(Z,{error:"Something went wroong"})},W=()=>(b(),a.createElement(l.Flex,{column:!0,gap:8,height:"100%"},a.createElement(l.Flex,{column:!0,gap:2,padding:[0,0,0,4]},a.createElement(l.H3,null,"Authentication"),a.createElement(l.TextBig,null,"See the available single sign-on integrations and configure the one that your company supports.")),a.createElement(H,null)))}}]); \ No newline at end of file diff --git a/src/web/gui/v2/8910.019974f8675d8834dd07.chunk.js b/src/web/gui/v2/8910.019974f8675d8834dd07.chunk.js deleted file mode 100644 index 4994dbcd9..000000000 --- a/src/web/gui/v2/8910.019974f8675d8834dd07.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="de8a317e-0a71-4867-90ec-4ad8fd101ec6",e._sentryDebugIdIdentifier="sentry-dbid-de8a317e-0a71-4867-90ec-4ad8fd101ec6")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8910],{8910:(e,t,n)=>{n.r(t),n.d(t,{default:()=>lo});var l=n(58168),o=(n(62953),n(96540)),r=n(83199),a=n(46741),i=n(11128),s=n(47762),c=n(47193);n(14905),n(98992),n(8872);const d=function(){let{selectedNode:e,nodes:t=[]}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,o.useMemo)((()=>t.reduce(((e,t)=>{var n;let{id:l,ni:o,name:r,isLive:a,isPreferred:i,capabilities:s}=t;return a&&i&&null!==(n=s.dyncfg)&&void 0!==n&&n.enabled?[...e,{label:r,value:l,isParent:0==o}]:e}),[])),[t]),l=(0,o.useMemo)((()=>(e=>e?null!==e&&void 0!==e&&e.id&&null!==e&&void 0!==e&&e.name?{label:e.name,value:e.id,isParent:0==e.ni}:e:null)(e)||(n.length?n[0]:"")),[n]),[r,a]=(0,o.useState)();return(0,o.useEffect)((()=>{l&&a(l)}),[l,a]),{options:n,node:r,setNode:a}};n(41393),n(81454);var u=n(63950),m=n.n(u);const p=function(){let{node:e,setNode:t=m(),options:n=[]}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return o.createElement(r.Box,{width:"250px"},o.createElement(r.Flex,{column:!0,gap:1},o.createElement(r.TextBig,null,"Node"),o.createElement(r.Select,{placeholder:"Select a node",options:n,value:e,onChange:t})))};n(74648),n(17333),n(23215),n(54520);var g=n(27467);const h=(0,o.createContext)({}),f=()=>{const e=(0,o.useContext)(h),[,t]=(0,g.N9)(),{searchItem:n,setSearchItem:l}=e||{},{term:r,props:a}=n||{},i=!!Object.keys(n.props||{}).length,s=(0,o.useCallback)((e=>{t({dyncfgSearchTerm:void 0}),l((t=>({...t,term:e})))}),[l,t]),c=(0,o.useCallback)(((e,t)=>{l((n=>({...n,props:{...n.props||{},[e]:t}})))}),[l]),d=(0,o.useCallback)((e=>{l((t=>{var n;const l={...t};return null!==(n=l.props)&&void 0!==n&&n[e]&&delete l.props[e],l}))}),[l]),u=(0,o.useCallback)((e=>{try{return new RegExp(r).test(e)}catch(t){return m()}}),[r]),p=(0,o.useCallback)((e=>!a||!Object.entries(a).filter((e=>{let[,t]=e;return void 0!==t})).length||Object.entries(a).every((t=>{let[n,l]=t;return e[n]==l}))),[a]);if(!e)throw new Error("Attempt to use 'useSearch' outside of 'SearchContext.Provider'");return{searchItem:n,hasProps:i,searchForTerm:s,searchByProp:c,testString:u,testProps:p,removeSearchProp:d}},b=e=>{let{children:t}=e;const[{dyncfgSearchTerm:n}]=(0,g.N9)(),[l,r]=(0,o.useState)({term:n});return o.createElement(h.Provider,{value:{searchItem:l,setSearchItem:r}},t)},E=()=>{const{searchItem:e,searchForTerm:t}=f(),{term:n}=e||{};return o.createElement(r.Box,{width:"250px"},o.createElement(r.Flex,{column:!0,gap:1},o.createElement(r.TextBig,null,"Search"),o.createElement(r.SearchInput,{value:n,placeholder:"Search",onChange:t,size:"large",height:"33px",padding:[2,3]})))};var v=n(29217),y=n(32089),C=n(8711);const x=(0,C.default)(r.Flex).withConfig({displayName:"styled__StyledSection",componentId:"sc-dutbst-0"})(["&:hover{background-color:",";}"],(0,r.getColor)("secondaryHighlight")),w=(0,C.default)(r.Button).withConfig({displayName:"styled__StyledButton",componentId:"sc-dutbst-1"})(["&:before{content:",";width:18px;height:18px;display:",";align-items:center;justify-content:center;font-size:11px;font-weight:bold;position:absolute;top:-6px;left:-10px;border-radius:50%;background-color:",";color:",";z-index:10;}"],(e=>{let{warningItems:t}=e;return'"'.concat(t,'"')}),(e=>{let{warningItems:t}=e;return t?"flex":"none"}),(0,r.getColor)("error"),(0,r.getColor)("bright"));var k=n(94390),S=n(49286);const I=e=>{let{title:t="",props:n={},config:l={}}=e;const{searchItem:a,searchByProp:i,removeSearchProp:s}=f(),{props:c}=a||{},d=(0,o.useCallback)(((e,t)=>{const{searchCondition:n}=l[e]||{},o=Object.entries(n||{});if(o.length){const[e,n]=o[0];t?s(e):i(e,n)}}),[i,s]);return o.createElement(r.Flex,{column:!0,gap:1},o.createElement(r.TextBig,{strong:!0},t),o.createElement(r.Flex,{column:!0},Object.entries(l).map((e=>{var t;let[a,{label:i,searchCondition:s}]=e;const u=(0,S.bn)(n||{})[a]||0,[m,p]=Object.entries(s)[0],g=!!c&&c[m]==p,h=k.zy.includes(a)&&u>0?{color:"error"}:{};return o.createElement(v.A,{key:a,content:null===(t=l[a])||void 0===t?void 0:t.tooltip,align:"bottom",isBasic:!0,base:{background:"main",padding:[2]}},o.createElement(x,{width:"100%",alignItems:"center",justifyContent:"between",gap:2,background:g?"menuItemSelected":"none",padding:[1,2],round:!0,cursor:"pointer",onClick:()=>d(a,g)},o.createElement(r.TextBig,h,i),o.createElement(r.TextBig,h,u)))}))))},A=e=>{let{attention:t,source_type:n,status:l}=e;return o.createElement(r.Flex,{column:!0,padding:[3],gap:2},o.createElement(I,{title:"Attention",props:t,config:k.lp}),o.createElement(y.A,{color:"border"}),o.createElement(I,{title:"Source type",props:n,config:k.V8}),o.createElement(y.A,{color:"border"}),o.createElement(I,{title:"Status",props:l,config:k.T_}))},T=(0,o.memo)(A),F=Object.keys(k.T_),B=Object.keys(k.V8),D=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];const n=t.reduce(((e,t)=>({...e,[t]:0})),{});return function(){let l=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},o=arguments.length>1&&void 0!==arguments[1]?arguments[1]:n;for(let n=0;n0&&void 0!==arguments[0]?arguments[0]:{},t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};var n,l;(t.attention||(t.attention={}),e.restart_required)&&(t.attention.restart_required=((null===(n=t.attention)||void 0===n?void 0:n.restart_required)||0)+1);e.plugin_rejected&&(t.attention.plugin_rejected=((null===(l=t.attention)||void 0===l?void 0:l.plugin_rejected)||0)+1)},P=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const n=Object.entries(e);if(n.length)for(let l=0;l{let{tree:t}=e;const{attention:n,source_type:l,status:r}=(0,o.useMemo)((()=>Object.keys(t)?P(t):{}),[t]);return{warningItems:((null===n||void 0===n?void 0:n.restart_required)||0)+((null===n||void 0===n?void 0:n.plugin_rejected)||0)+((null===r||void 0===r?void 0:r.failed)||0)+((null===r||void 0===r?void 0:r.incomplete)||0),menuItems:{attention:n,source_type:l,status:r}}};var L=n(87659);const M=e=>{let{tree:t={}}=e;const n=(0,o.useRef)(),[l,a,,i]=(0,L.A)(!1),{hasProps:s}=f(),{warningItems:c,menuItems:d}=_({tree:t});return o.createElement(o.Fragment,null,o.createElement(v.A,{align:"bottom",content:"Show filters"},o.createElement(r.Flex,{ref:n},o.createElement(w,{flavour:s?"default":"hollow",icon:"filterList",onClick:a,warningItems:c}))),l&&n.current?o.createElement(r.Drop,{width:60,target:n.current,align:{top:"bottom",right:"right"},animation:!0,background:"modalTabsBackground",margin:[2,0,0],round:1,close:i,onClickOutside:i,onEsc:i},o.createElement(T,d)):null)};var O=n(8239);const R=()=>{const e=(0,O.Hj)();return o.createElement(v.A,{align:"bottom",content:"Reload"},o.createElement(r.Flex,null,o.createElement(r.Button,{flavour:"hollow",icon:"refresh",onClick:e})))};var j=n(39225);n(8159),n(37550);const z=(0,o.createContext)({}),H=()=>(0,o.useContext)(z),U=z,W=(0,o.createContext)(),V={name:""},K=()=>{const e=(0,o.useContext)(W),{id:t,open:n,isNewItem:l,formData:r,setFormData:a,submitType:i,setSubmitType:s,isLoading:c,startLoading:d,stopLoading:u,fullPage:m,setFullPage:p,entityProps:g,isDirty:h,setIsDirty:f,error:b}=e||{},E=(0,o.useCallback)((e=>a((t=>({...t,name:e})))),[a]);if(!e)throw new Error("Attempt to use 'useConfigItemContext' outside of 'ConfigItemContext.Provider'");return{id:t,open:n,isNewItem:l,formData:r,setFormData:a,submitType:i,setSubmitType:s,setName:E,isLoading:c,startLoading:d,stopLoading:u,fullPage:m,setFullPage:p,entityProps:g,isDirty:h,setIsDirty:f,error:b}},q=e=>{let{children:t,...n}=e;const[l,r]=(0,o.useState)(V),[a,i]=(0,o.useState)("submit");return o.createElement(W.Provider,{value:{...n,formData:l,setFormData:r,submitType:a,setSubmitType:i}},t)};var Y=n(81198);n(9391);const $=()=>{const[e,t]=(0,o.useState)([]),n=(0,o.useCallback)((e=>t(e)),[t]);return{errors:e,onError:n,resetErrors:(0,o.useCallback)((()=>t([])),[t])}};var G=n(71835);const J=function(){let{defaultName:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{id:t,formData:n,isNewItem:l}=K();return(0,o.useMemo)((()=>(l?null===n||void 0===n?void 0:n.name:(0,Y.J3)(t))||e),[l,null===n||void 0===n?void 0:n.name,t,e])};var Q=n(80158);const Z=()=>function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"Something went wrong",{data:n={}}=(null===e||void 0===e?void 0:e.response)||{};try{n=JSON.parse(n)}catch(a){}const{message:l,errorMessage:o,error_message:r}=n;return(0,Q.Zr)(l||o||r||t)},X=()=>{const[e,t,,n]=(0,G.A)(),l=Z(),r=(0,o.useCallback)((function(){let{successMessage:t="Successfully saved"}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return l=>{let{data:{message:o}}=l;(o?n:e)({header:o?"Warning":"Success",text:o||t})}}),[e,n]),a=(0,o.useCallback)((function(){let{header:e="Error",text:n="Something went wrong"}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return o=>{const r=l(o,n);t({header:e,text:r})}}),[t]);return[r,a]};var ee=n(50876);const te=function(){let{id:e,node:t,onError:n}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const l=(0,O.y8)({id:e,node:t}),r=J(),{setValue:a,setError:i}=(0,O.MQ)(),[,s]=X(),c=s(),d="function"==typeof n?n:c,u=Z(),m=(0,o.useRef)();return(e,t)=>{m.current&&(clearTimeout(m.current),m.current=null);const n=null===t?t:t||r;m.current=setTimeout((()=>{l({name:n,payload:e}).then((e=>{a(e.data||"")})).catch((e=>{i(u(e)),d(e)}))}),500)}},ne=e=>{let{id:t,node:n,data:l,userConfigIsOpen:r,submitMethod:a,onSchemaFetch:i,setIsDirty:s,setError:c}=e;const{isNewItem:d,formData:u,setFormData:p,isLoading:g,startLoading:h,stopLoading:f,entityProps:b={}}=K(),{cmds:E=[]}=b,v=E.includes("userconfig"),{sendLog:y}=(0,ee.A)(),[C,x]=(0,G.A)(),{loaded:w,value:k,hasError:S,error:I,refresh:A}=(0,O.t8)({id:t,node:n}),{loaded:T,value:F,hasError:B,refresh:D}=l,N=w&&T,P=I||B,_=(0,O.Hj)(),L=(0,O.O_)({node:n,id:t}),M=te({id:t,node:n,onError:m()}),{errors:R,onError:j,resetErrors:z}=$();(0,o.useEffect)((()=>{w&&k&&!S&&i(k)}),[w,k]),(0,o.useEffect)((()=>{c(P)}),[P]);const H=(0,o.useCallback)(((e,t)=>{const{name:n,...l}=e.formData,o={name:F.name,...l};p(o),s(!t),v&&r&&M(o)}),[p,s,v,r,M]),U=(0,o.useCallback)((e=>{H({formData:e})}),[H]),W=(0,o.useCallback)((e=>{z();const n={...e.formData,...d&&null!==u&&void 0!==u&&u.name?{name:u.name}:{}};h(),a(n).then((()=>{s(!1),d?_():(L(),A(),D()),C({header:"Success",text:"Successfully submitted configuration"}),y({feature:"DyncnfForm",description:"Submit success",id:t})})).catch((e=>{var n,l;const o=(null===(n=e.response)||void 0===n?void 0:n.data)||e;x({header:"Error",text:(null===o||void 0===o?void 0:o.message)||"Something went wrong"}),y({feature:"DyncnfForm",description:"Submit error",id:t,error:JSON.stringify((null===(l=e.response)||void 0===l?void 0:l.data)||{})})})).finally((()=>{f()}))}),[d,u,h,f,L,A,D,j]);return{isLoading:g,loaded:N,formData:F,setFormData:U,schema:k,resourcesError:P,errors:R,onChange:H,onSubmit:W,onError:j,getUserConfig:M}},le=(0,C.default)(r.TextBig).withConfig({displayName:"styled__StyledTitle",componentId:"sc-16w9sl7-0"})(["font-family:monospace;"]),oe=(0,C.default)(r.Icon).attrs((e=>({opacity:"1",cursor:"pointer",...e}))).withConfig({displayName:"styled__StyledTemplateArrow",componentId:"sc-16w9sl7-1"})(["opacity:",";cursor:",";"],(e=>{let{hasTemplateChildren:t}=e;return t?"1":"0"}),(e=>{let{hasTemplateChildren:t}=e;return t?"pointer":"default"})),re=(0,C.default)(r.Flex).withConfig({displayName:"styled__StyledHeader",componentId:"sc-16w9sl7-2"})(["height:","px;border-top:1px solid ",";border-left:1px solid ",";border-right:1px solid ",";border-bottom:1px solid ",";border-bottom-style:",";"],k.FL,(0,r.getColor)("border"),(0,r.getColor)("border"),(0,r.getColor)("border"),(0,r.getColor)("border"),(e=>{let{isOpenEntity:t}=e;return t?"none":"solid"})),ae=(0,C.default)(r.Collapsible).withConfig({displayName:"styled__StyledCollapsible",componentId:"sc-16w9sl7-3"})(["height:",";max-height:",";overflow:auto;"],(e=>{let{open:t,fullPage:n}=e;return t&&n?"100%":"auto"}),(e=>{let{flavour:t,fullPage:n,isJob:l}=e;return"path"!=t?n?"calc(100% - 55px)":l?"500px":"unset":"unset"})),ie=(0,C.default)(r.TextBig).withConfig({displayName:"styled__StyledNodeName",componentId:"sc-16w9sl7-4"})(["font-family:monospace;"]);var se=n(47130);const ce=(0,se.A)(r.Flex),de=e=>{let{source:t="",sourceType:n}=e;const l=t.split(",");return o.createElement(r.Flex,{column:!0,gap:2},o.createElement(r.Flex,{alignItems:"center",gap:1},o.createElement(r.Text,{strong:!0},"Source type"),o.createElement(r.Pill,null,n)),o.createElement(r.Flex,{column:!0,gap:1},l.length?l.map((e=>o.createElement(r.TextSmall,{key:e},e))):null))},ue=e=>{let{source:t,sourceType:n,...a}=e;const i=k.V6[n];if(!i)return null;const{icon:s}=i;return o.createElement(v.A,{isBasic:!0,content:o.createElement(de,{source:t,sourceType:n})},o.createElement(r.Box,null,o.createElement(r.Icon,(0,l.A)({name:s},a))))},me=e=>{let{type:t,status:n}=e;const l=k.bM[n];if(!l)return null;const{label:a,color:i,icon:s}=l;return"template"!=t?o.createElement(r.Flex,{width:{min:28},gap:1,padding:[1,2],border:{side:"all",color:i},round:!0,alignItems:"center",justifyContent:"center"},o.createElement(r.Icon,{name:s,height:"12px",color:i}),o.createElement(r.Text,{color:i},a)):null},pe=()=>o.createElement(r.Flex,{width:{max:100}},o.createElement(r.Text,null,"You can submit your changes to another node, or to multiple nodes by selecting"," ",o.createElement(r.Text,{strong:!0},"Submit to multiple nodes")," button.")),ge=()=>{const{submitType:e}=K(),{node:t,nodes:n}=H(),[l]=(0,i.Oj)({extraKey:"dyncfg",merge:!1}),a=n.filter((e=>{let{value:t}=e;return l.includes(t)})),s=a.length?a[0].label:"0",c="submit"==e?t.label:s,d=l.length>1?"and ".concat(l.length-1," more"):"",u="submit"==e?"node":l.length>1?"nodes":"node";return o.createElement(r.Flex,{"data-testid":"dyncfg-on-node",gap:2,alignItems:"center"},o.createElement(r.TextBig,null,"on"),o.createElement(ce,{padding:[1],background:"mainBackground",tooltip:"submit"==e?o.createElement(pe,null):null,tooltipProps:{isBasic:!0,plain:!1}},o.createElement(ie,{strong:!0},c)," "),o.createElement(r.TextBig,null,"multiple_submit"==e?o.createElement(r.TextBig,{strong:!0},"".concat(d," ")):null,u))},he=function(){let{title:e,type:t,source:n,sourceType:l,status:a,templateChildren:i={},onCollapseExpand:s,userDisabled:c}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{color:d}=k.g7[t]||k.g7.default,u=c||"disabled"==a?"disabled":d,{node:p}=H(),{open:g,isNewItem:h,formData:f,setName:b,entityProps:E={}}=K(),{cmds:v=[]}=E,y=v.includes("userconfig"),{name:C}=f||{},x=!!Object.keys(i).length,w=te({id:e,node:p,onError:m()}),S=(0,o.useCallback)((e=>{b(e.target.value),y&&w(f,e.target.value||"test")}),[f,b,y,w]);return o.createElement(r.Flex,{gap:2,alignItems:"center"},"template"==t?o.createElement(oe,{name:"chevron_right",color:"textLite",rotate:g?3:1,onClick:s,hasTemplateChildren:x}):null,o.createElement(me,{type:t,status:a}),o.createElement(ue,{source:n,sourceType:l,color:u}),h?null:o.createElement(le,{color:u},(0,Y.J3)(e)),h&&g?o.createElement(r.TextInput,{"data-testid":"dyncfg-config-item-name",width:65,value:C,onChange:S,placeholder:"Add configuration name",error:h&&!(null!==f&&void 0!==f&&f.name),hideErrorMessage:!0}):null,"template"!=t&&g?o.createElement(ge,null):null)};var fe=n(26688),be=n(70895),Ee=n(92155);const ve=(0,Ee.A)(be.A),ye=function(){let{title:e,buttonProps:t={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const[,n]=(0,O.QH)(),r=(0,fe.A)(),{isLoading:a,entityProps:i}=K(),{cmds:s=[]}=i||{},c=a,d=(0,o.useCallback)((()=>{n(r({template:e,entityProps:i}))}),[e,i,r,n]);return s.includes("add")?o.createElement(ve,(0,l.A)({edit:!0,feature:"DyncnfTemplate",payload:{id:e,label:"Add job"},tooltip:"Add a new configuration item",icon:"plus",iconWidth:18,iconHeight:18,onClick:d,disabled:c},t)):null},Ce=(0,Ee.A)(be.A),xe=e=>{let{id:t,enable:n,open:l,onDecline:a,onConfirm:i}=e;const s=n?"enable":"disable";return l?o.createElement(r.ConfirmationDialog,{title:"".concat((0,Q.Zr)(s)," item?"),handleConfirm:i,handleDecline:a,confirmLabel:"Yes ".concat(s),declineLabel:"No",isConfirmPositive:!!n,message:o.createElement(r.Flex,{column:!0,gap:2},o.createElement(r.TextBig,null,"You are about to ",s," ",o.createElement(le,{strong:!0},t)," module and all its configurations."),o.createElement(r.TextBig,null,"Are you sure?"))}):null},we=function(){let{id:e,userDisabled:t,buttonProps:n={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{isLoading:l,startLoading:r,stopLoading:a}=K(),{node:i}=H(),s=(0,O.DP)({id:e,node:i}),c=(0,O.O_)({node:i,id:e}),[d,u]=X(),[m,p]=(0,o.useState)(),[g,,h,f]=(0,L.A)(),b=(0,o.useCallback)((()=>{f(),r(),s(m).then((e=>{c(),d({successMessage:"Successfully ".concat(m?"enabled":"disabled"," module")})(e)})).catch((e=>{u({header:"Couldn't ".concat(m?"enabled":"disabled"," module")})(e)})).finally((()=>{a()}))}),[f,r,s,c,d,u,a]),E=(0,o.useCallback)((e=>{l||(p(e),h())}),[l,h]),v=(0,o.useMemo)((()=>({edit:!0,feature:"DyncnfTemplate",payload:{id:e,label:t?"Enable":"Disable"},isLoading:l,disabled:l,...n,...t?{tooltip:"Enable this module",icon:"switch_off",iconColor:"disabled",onClick:()=>E(!0)}:{tooltip:"Disable this module",icon:"switch_off",onClick:()=>E(!1),iconColor:"success",danger:!0}})),[e,t,l,n,E]);return o.createElement(o.Fragment,null,o.createElement(Ce,v),o.createElement(xe,{id:e,enable:m,open:g,onDecline:f,onConfirm:b}))},ke="success",Se={iconColor:ke,flavour:"borderless",color:ke},Ie=e=>{let{title:t,userDisabled:n}=e;return o.createElement(o.Fragment,null,o.createElement(ye,{title:t,buttonProps:Se}),o.createElement(we,{id:t,userDisabled:n,buttonProps:Se}))};var Ae=n(16607);const Te=(0,Ae.A)(r.Button),Fe=function(){let{buttonProps:e={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{open:t,fullPage:n,setFullPage:r,isNewItem:a}=K(),i=(0,o.useMemo)((()=>({...e,icon:n?"reduceSize":"fullScreen",tooltip:n?"Reduce window size":"Expand window size"})),[n]),s=(0,o.useCallback)((()=>{r(!n)}),[n,r]);return t&&!a?o.createElement(Te,(0,l.A)({onClick:s},i)):null},Be=(0,Ee.A)(be.A),De=function(){let{id:e,onClick:t,buttonProps:n={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{open:r,isLoading:a,entityProps:i}=K(),{cmds:s=[]}=i||{},c=a,d=s.includes("update")?"Edit configuration":"You have no permissions to edit the configuration";return o.createElement(Be,(0,l.A)({view:!0,feature:"DyncnfItem",payload:{id:e,label:r?"Close":"View"},tooltip:r?"Close":d,icon:r?"x":"pencilOutline",onClick:t,disabled:c},n))},Ne=(0,Ae.A)(r.Button),Pe=function(){let{title:e,template:t={},onCollapseExpand:n=m(),buttonProps:r={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=(0,Y.UE)(e),[,i]=(0,O.QH)(),{cmds:s,user_disabled:c}=t,{isLoading:d,open:u,formData:p,entityProps:g}=K(),h=(0,fe.A)(),f=(0,o.useCallback)((()=>{d||(i(h({template:a,formData:p,entityProps:g})),n())}),[d,a,p,g,i,h]);return u&&null!==s&&void 0!==s&&s.includes("add")?o.createElement(Ne,(0,l.A)({icon:"copy",onClick:f,disabled:d||c},c?{}:{tooltip:"Copy this item and create a new one"},r)):null},_e=(0,Ee.A)(be.A),Le=e=>{let{id:t,open:n,onDecline:l,onConfirm:a}=e;return n?o.createElement(r.ConfirmationDialog,{title:"Delete item?",handleConfirm:a,handleDecline:l,confirmLabel:"Yes, delete",declineLabel:"No",message:o.createElement(r.Flex,{column:!0,gap:2},o.createElement(r.TextBig,null,"You are going to completely remove ",o.createElement(le,{strong:!0},t)," and this action cannot be reverted."),o.createElement(r.TextBig,null,"Are you sure that you want to delete this item?"))}):null},Me=function(){let{id:e,buttonProps:t={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{isLoading:n,entityProps:r,startLoading:a,stopLoading:i}=K(),{type:s,cmds:c=[]}=r||{},{node:d}=H(),u=(0,O.OU)({id:e,node:d}),[m,p]=X(),g=(0,O.Hj)(),[h,,f,b]=(0,L.A)(),{sendButtonClickedLog:E}=(0,ee.A)(),v=n,y=(0,o.useCallback)((()=>{b(),a(),u().then((t=>{m({successMessage:"Successfully removed configuration"})(t),E({feature:"DyncnfItem",description:"Confirm deletion",id:e}),g()})).catch((e=>{p({header:"Couldn't remove configuration"})(e)})).finally((()=>{i()})),setTimeout((()=>{i()}),1e3)}),[u,b,a,i,g,E,m,p]),C=(0,o.useCallback)((()=>{n||f()}),[n,f]);return"job"==s&&c.includes("remove")?o.createElement(o.Fragment,null,o.createElement(_e,(0,l.A)({edit:!0,feature:"DyncnfItem",payload:{id:e,label:"Remove"},icon:"trashcan",onClick:C,disabled:v,tooltip:"Remove"},t)),o.createElement(Le,{id:e,open:h,onDecline:b,onConfirm:y})):null},Oe=(0,Ee.A)(be.A),Re=function(){let{id:e,template:t={},buttonProps:n={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{node:r}=H(),a=(0,O.ZN)({id:e,node:r}),[i,s]=X(),{isLoading:c,entityProps:d,startLoading:u,stopLoading:m}=K(),{user_disabled:p}=t,{cmds:g=[]}=d||{},h=(0,O.O_)({node:r,id:e}),f=c||p,b=(0,o.useCallback)((()=>{c||p||(u(),a().then((e=>{i({successMessage:"Successfully restarted configuration"})(e),h()})).catch((e=>{s({header:"Couldn't restart configuration"})(e)})).finally((()=>{m()})))}),[c,p,a,u,h,m,i,s]);return g.includes("restart")?o.createElement(Oe,(0,l.A)({edit:!0,feature:"DyncnfItem",payload:{id:e,label:"Restart"},icon:"refresh",onClick:b,disabled:f,tooltip:"Restart"},n)):null},je=e=>{let{id:t,template:n={}}=e;const{isLoading:l,entityProps:a,startLoading:i,stopLoading:s}=K(),{cmds:c=[],userDisabled:d}=a||{},[u,m]=(0,o.useState)(!d),{node:p}=H(),{user_disabled:g}=n,h=(0,O.DP)({id:t,node:p}),f=(0,O.O_)({node:p,id:t}),[b,E]=X(),{sendButtonClickedLog:y}=(0,ee.A)(),{hidden:C,isDisabled:x,tooltipText:w}=(0,be.j)({edit:!0,tooltip:u?"Disable":"Enable",disabled:l||g}),k=(0,o.useCallback)((e=>{if(!g){const n=e.currentTarget.checked;m(n),i(),h(n).then((e=>{y({feature:"DyncnfItem",description:n?"Enable item":"Disable item",id:t}),b({successMessage:"Successfully ".concat(n?"enabled":"disabled"," configuration")})(e),f()})).catch((e=>{y({feature:"DyncnfItem",description:"Failed to ".concat(n?"enable":"disable"," item"),id:t}),m(!n),E({header:"Couldn't ".concat(n?"enable":"disable"," configuration")})(e)})).finally((()=>{s()}))}}),[g,h,i,m,f,y,b,E]),S=c.includes("enable"),I=c.includes("disable");return!C&&S&&I?o.createElement(v.A,{align:"bottom",content:w},o.createElement(r.Box,{padding:[1.5,2]},o.createElement(r.Toggle,{onChange:k,checked:u,colored:!0,disabled:x}))):null},ze=()=>{const{id:e,isNewItem:t,entityProps:n}=K(),{node:l,path:o}=H(),{template:r}=n,a=(0,O.SD)({node:l,path:o,isNewItem:t})(t?r:e),i=(0,O._O)({node:l,id:r});return i?(0,S.Jz)(i):a||{}},He="text",Ue={iconColor:He,flavour:"borderless",color:He,type:"button"},We=(0,C.default)(r.Flex).attrs({alignItems:"center",border:{side:"all",color:"border"},round:!0}).withConfig({displayName:"itemActions__StyledContainer",componentId:"sc-1kpuq1a-0"})([""]),Ve=function(){let{title:e,isOpenEntity:t,isEditFromState:n,onViewButtonClick:l,onCollapseExpand:r=m()}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=ze();return o.createElement(o.Fragment,null,n?null:o.createElement(We,{gap:1},o.createElement(Pe,{title:e,template:a,onCollapseExpand:r,buttonProps:Ue}),o.createElement(Re,{id:e,template:a,buttonProps:Ue}),o.createElement(Me,{id:e,buttonProps:Ue}),t?null:o.createElement(je,{id:e,template:a})),o.createElement(We,{gap:1},n?null:o.createElement(Fe,{buttonProps:Ue}),o.createElement(De,{id:e,onClick:l,buttonProps:Ue})))},Ke=e=>{let{title:t,onViewButtonClick:n,onCollapseExpand:l,type:a,userDisabled:i,isOpenEntity:s,isEditFromState:c}=e;return o.createElement(r.Flex,{gap:2,alignItems:"center"},"template"==a?o.createElement(Ie,{title:t,userDisabled:i}):o.createElement(Ve,{title:t,onViewButtonClick:n,onCollapseExpand:l,isOpenEntity:s,isEditFromState:c}))},qe={vertical:2,horizontal:4},Ye=e=>{let{path:t,onClick:n,...a}=e;const{open:i}=K();return o.createElement(r.Flex,(0,l.A)({gap:2,onClick:n,cursor:"pointer"},a),o.createElement(r.Icon,{name:"chevron_right",color:"textLite",rotate:i?3:1}),o.createElement(le,{strong:!0},t))},$e=e=>{let{flavour:t,title:n,onViewButtonClick:r,onCollapseExpand:a,level:i,entityProps:s={},isOpenEntity:c,isEditFromState:d}=e;const{vertical:u,horizontal:m}=qe,p=(0,o.useMemo)((()=>[u,m,u,m*i]),[i]),g="path"==t;return o.createElement(re,{alignItems:"center",justifyContent:"between",padding:p,isOpenEntity:c},g?o.createElement(Ye,{path:n,onClick:a}):o.createElement(o.Fragment,null,o.createElement(he,(0,l.A)({title:n,onCollapseExpand:a},s)),o.createElement(Ke,(0,l.A)({title:n,onViewButtonClick:r,onCollapseExpand:a,isOpenEntity:c,isEditFromState:d},s))))},Ge=e=>{let{open:t,onDecline:n,onConfirm:l}=e;return t?o.createElement(r.ConfirmationDialog,{title:"Discard changes?",handleConfirm:l,handleDecline:n,confirmLabel:"Yes, discard",declineLabel:"No, keep editing",message:o.createElement(r.Flex,{column:!0,gap:2},o.createElement(r.TextBig,null,"If you exit Edit mode before submit, your changes will be lost."),o.createElement(r.TextBig,null,"Are you sure?"))}):null},Je=e=>(0,o.useEffect)((()=>{if(!e)return;const t=t=>{"Escape"==t.code&&e(t)};return document.addEventListener("keydown",t),()=>document.removeEventListener("keydown",t)}),[e]),Qe=e=>{let{title:t,open:n,isLoading:l,isDirty:r,setIsDirty:a,toggle:i,refresh:s,refreshUserConfig:c,entityProps:d,error:u}=e;const m=d[k.D9],p=["single","job"].includes(null===d||void 0===d?void 0:d.type),[,g]=(0,O.QH)(),[h,,f,b]=(0,L.A)(),{close:E}=(0,O.IP)(),v=(0,o.useCallback)((()=>{r&&(a(!1),i(),b()),s(),c(),g(null),E(t)}),[t,E,g,r,a,i,s,c,b]),y=(0,o.useCallback)((()=>{l||(m||r?u?v():f():i())}),[l,m,r,u,v,f]);return Je((e=>{"Escape"==e.code&&n&&p&&(e.preventDefault(),y())})),{confirmationOpen:h,closeConfirmation:b,onConfirm:v,onClick:y}};var Ze=n(28738);const Xe=e=>{let{virtualIndex:t,virtualRowStart:n,flavour:a="path",title:i="",level:s=1,entityProps:c={},children:d,isNewItem:u,isOpen:m,isEditFromState:p,...g}=e;const[h,f]=(0,L.A)(u||m),{node:b,inModal:E}=H(),v=(0,O._O)({node:b,id:i})||c,y="job"==(null===v||void 0===v?void 0:v.type),C=["single","job"].includes(null===v||void 0===v?void 0:v.type),[x,,w,k]=(0,L.A)(),[S,I]=(0,o.useState)(u||p),[A,T]=(0,o.useState)(),[F,B]=(0,o.useState)(),D="path"!=a&&h&&x,{refresh:N}=(0,O.L5)(),{refresh:P}=(0,O.MQ)(),{confirmationOpen:_,closeConfirmation:M,onConfirm:R,onClick:j}=Qe({title:i,open:h,isLoading:x,isDirty:F,setIsDirty:B,refresh:N,refreshUserConfig:P,toggle:f,entityProps:v,error:A});(0,o.useEffect)((()=>{h||P()}),[h,N,P]);const z=(0,o.useCallback)((e=>{var t;u||I(null===e||void 0===e||null===(t=e.uiSchema)||void 0===t||null===(t=t.uiOptions)||void 0===t?void 0:t.fullPage)}),[I]);(0,o.useEffect)((()=>{if(void 0!==t&&void 0!==n){const e=document.querySelector(".dyncnf-virtual-row[data-key='".concat(i,"']"));e&&(e.style.transform=S&&h?"none":"translateY(".concat(n,"px)"))}}),[h,S]);const{containerProps:U,collapsibleProps:W,loaderProps:V}=(0,o.useMemo)((()=>({containerProps:{...h&&S?{position:"fixed",zIndex:"100",...E?{top:0,left:0,width:"100%",height:"100%"}:{top:"5vh",left:"5vw",width:"90vw",height:"90vh"}}:{position:"relative"},...h&&C?{border:{side:"all",color:"border"}}:{},...S?{}:{height:"100%"},background:C&&h?"modalBackground":"mainBackground",round:!0},collapsibleProps:{level:s,open:h,duration:0,flavour:a,fullPage:S,isJob:y,...h&&C?{padding:[2,2,2,4*s]}:{}},loaderProps:{position:"absolute",top:0,left:0,width:"100%",height:"100%",opacity:".9",zIndex:"100"}})),[h,S,E,C]);return o.createElement(o.Fragment,null,o.createElement(q,{id:i,open:h,isNewItem:u,isLoading:x,startLoading:w,stopLoading:k,fullPage:S,setFullPage:I,entityProps:v,isDirty:F,setIsDirty:B,error:A},o.createElement(r.Flex,(0,l.A)({"data-testid":"dyncfg-config-item-container",column:!0},U,g),o.createElement($e,{key:h,flavour:a,title:i,onViewButtonClick:j,onCollapseExpand:f,level:s,entityProps:v,isOpenEntity:h&&!!C,isEditFromState:p}),o.createElement(ae,W,o.Children.map(d,(e=>o.isValidElement(e)?o.cloneElement(e,{level:s+1,onSchemaFetch:z,setIsDirty:B,setError:T}):e))),D?o.createElement(Ze.A,(0,l.A)({title:"Loading..."},V)):null)),o.createElement(Ge,{open:_,onDecline:M,onConfirm:R}))};var et=n(73700),tt=n(51220);const nt=e=>{let{virtualIndex:t,virtualRowStart:n,flavour:r="entity",entryKey:a,entityProps:i,configItemLevel:s=1,children:c,...d}=e;return o.createElement(o.Fragment,null,o.createElement(Xe,(0,l.A)({virtualIndex:t,virtualRowStart:n,flavour:r,title:a,level:s,entityProps:i},d),c))};var lt=n(74810),ot=n(36091),rt=n(4659),at=n(87292),it=n(68831);const st=C.default.span.withConfig({displayName:"markdocSchema__Strong",componentId:"sc-1se2vza-0"})(["font-weight:bold;"]),ct=C.default.ol.withConfig({displayName:"markdocSchema__OrderedList",componentId:"sc-1se2vza-1"})(["list-style:roman;padding-left:14px;"]),dt=C.default.ul.withConfig({displayName:"markdocSchema__UnorderedList",componentId:"sc-1se2vza-2"})(['list-style-image:url("','/img/list-style-image.svg");padding-left:14px;'],it.A.assetsBaseURL),ut={heading:{render:e=>{let{level:t=1,...n}=e,a=r.H1;switch(t){case 2:a=r.H2;break;case 3:a=r.H3;break;case 4:a=r.H4;break;case 5:a=r.H5;break;case 6:a=r.H6}return o.createElement(a,(0,l.A)({margin:[2,0]},n))},attributes:{id:{type:String},level:{type:Number}}},paragraph:{render:e=>{let{children:t,...n}=e;return o.createElement(r.Flex,n,o.createElement(r.Text,null,t))}},strong:{render:st},link:{render:e=>{let{children:t,href:n,...r}=e;return o.createElement(rt.A,(0,l.A)({href:n,rel:"noopener noreferrer",target:"_blank"},r),t)},attributes:{href:{type:String}}},code:{render:e=>{let{content:t,...n}=e;return o.createElement(at.R0,n,t)},attributes:{content:{type:String}}},fence:{render:e=>{let{content:t,...n}=e;return o.createElement(at.Ay,n,t)},attributes:{content:{type:String}}},list:{render:e=>{let{ordered:t,children:n,...l}=e;const r=t?ct:dt;return o.createElement(r,l,n)},attributes:{ordered:{type:Boolean}}}},mt=e=>{let{children:t}=e;return o.createElement(ot.A,{transformConfiguration:{nodes:ut}},t)},pt=e=>{var t;let{description:n}=e;return("string"==typeof n&&n?n:"object"==typeof n?null===n||void 0===n||null===(t=n.props)||void 0===t?void 0:t.description:null)?o.createElement(v.A,{isBasic:!0,plain:!0,allowHoverOnTooltip:!0,content:o.createElement(r.Flex,{width:{max:70}},o.createElement(mt,null,n))},o.createElement(r.Icon,{name:"information",color:"text",size:"small"})):null},gt=e=>{var t;let{description:n}=e;if(!n)return null;const l="string"==typeof n?n:null===(t=n.props)||void 0===t?void 0:t.description;return o.createElement(mt,null,l)},ht=e=>{let{title:t,required:n,description:l,help:a}=e;return t?o.createElement(r.Flex,{column:!0,gap:.5},o.createElement(r.Flex,{alignItems:"center",gap:1},o.createElement(r.TextBig,{strong:!0},t,n?"*":null),a?o.createElement(pt,{description:a}):null),o.createElement(gt,{description:l})):null},ft=e=>{let{id:t,displayLabel:n,label:l,required:r,description:a,schema:i,uiSchema:s={}}=e;const c=s["ui:title"]||l,d=s["ui:help"],u=["array","boolean"].includes(i.type)||Array.isArray(i.type)&&(i.type.includes("array")||i.type.includes("boolean"));return c&&(n||u)?o.createElement("label",{htmlFor:t},o.createElement(ht,{title:c,required:r,description:a,help:d})):null},bt=e=>{let{id:t,label:n,required:l,description:a,errors:i,children:s,displayLabel:c,schema:d,classNames:u,hidden:m,uiSchema:p}=e;return m?null:o.createElement(r.Flex,{width:"100%",className:"".concat(u," dyncfg-field-container"),column:!0,gap:.5},o.createElement(ft,{id:t,displayLabel:c,label:n,required:l,description:a,schema:d,uiSchema:p}),o.createElement(r.Flex,{className:"dyncfg-field-content",column:!0,padding:[0,0,0,1]},s,i?o.createElement(r.TextMicro,{color:"errorText"},i):null))},Et=(vt=e=>{let{onAddClick:t,schema:n,registry:a,properties:i,...s}=e;const{WrapIfAdditionalTemplate:c,ButtonTemplates:d}=a.templates,{AddButton:u}=d;return o.createElement(o.Fragment,null,o.createElement(r.Flex,(0,l.A)({className:"dyncfg-object-field-content",column:!0,gap:3,padding:[0,0,0,3],border:{side:"left",color:"border"}},s),i.map((e=>o.createElement(c,(0,l.A)({key:e.content.key,elemKey:e.content.key},e.content.props),e.content)))),n.additionalProperties?o.createElement(r.Flex,{padding:[2]},o.createElement(u,{onClick:t(n)})):null)},e=>{let{className:t,...n}=e;return null!==t&&void 0!==t&&t.includes("dyncfg-grid")?o.createElement("div",{className:t},o.createElement(vt,n)):o.createElement(vt,n)});var vt;const yt=(0,o.createContext)(),Ct=e=>{let{children:t}=e;const[n,l]=(0,o.useState)({});return o.createElement(yt.Provider,{value:{collapsibleItemsState:n,setCollapsibleItemsState:l}},t)},xt=e=>{const{properties:t,uiSchema:n}=e,{tabs:a,rest:i}=n["ui:options"]||{},[s,c]=(0,o.useState)(0),d=(0,o.useMemo)((()=>(e=>{let{properties:t=[],tabs:n=[]}=e;return n.map((e=>({...e,properties:t.filter((t=>e.fields.includes(t.name)))})))})({properties:t,tabs:a})),[t,a]);return o.createElement(Ct,null,(i||[]).length?o.createElement(Et,(0,l.A)({},e,{properties:t.filter((e=>i.includes(e.name)))})):null,o.createElement(r.Tabs,{selected:s,onChange:c,height:"100%"},d.map((t=>{let{title:n,...a}=t;return o.createElement(r.Tab,{key:n,label:n},o.createElement(Et,(0,l.A)({},e,a,{border:{side:"top",color:"border"},padding:[2,0,0,3]})))}))))},wt={tabs:{Component:xt},default:{Component:Et}},kt=e=>{var t;const{title:n,description:l,required:a,uiSchema:i={},idSchema:s,formData:c}=e,d=s.$id,u=i["ui:flavour"],p=i["ui:help"],{Component:g}=wt[u]||wt.default,h=!(null===i||void 0===i||!i["ui:collapsible"])||!(null===i||void 0===i||null===(t=i["ui:options"])||void 0===t||!t.collapsible),{inContext:f,isItemDirty:b,isItemExpanded:E,setItemState:v}=(()=>{const e=(0,o.useContext)(yt),{collapsibleItemsState:t,setCollapsibleItemsState:n}=e||{},l=(0,o.useCallback)((e=>void 0!==t[e]),[t]),r=(0,o.useCallback)((e=>t[e]),[t]),a=(0,o.useCallback)(((e,t)=>{n((n=>({...n,[e]:t})))}),[n]);return{inContext:!!e,isItemDirty:e?l:m(),isItemExpanded:e?r:m(),setItemState:e?a:m()}})(),y=i["ui:initiallyExpanded"]||!!c,C=(0,o.useMemo)((()=>b(d)?E(d):y),[b,E]),[x,w]=(0,L.A)(C);return(0,o.useEffect)((()=>{f&&v(d,x)}),[f,x,v]),o.createElement(r.Flex,{className:"dyncfg-object-field-container",width:"100%",column:!0,gap:2},n?o.createElement(r.Flex,{gap:1,alignItems:"center"},o.createElement(ht,{title:n,required:a,description:l,help:p}),h?o.createElement(r.Icon,{name:"chevron_down",color:"text",size:"small",onClick:w,cursor:"pointer",rotate:x?2:0}):null):null,o.createElement(r.Collapsible,{className:"dyncfg-object-field-collapsible",open:x||!h,duration:0},o.createElement(g,e)))};n(25440);const St=(0,o.forwardRef)(((e,t)=>{let{title:n,active:a,index:i,setActiveTab:s,onDropIndexClick:c,onReorderClick:d,reordering:u,setReordering:m,hasRemove:p,hasMoveUp:g,hasMoveDown:h,...f}=e;const{onAddIndexClick:b,onCopyIndexClick:E,...v}=f,y=(0,o.useCallback)((e=>{p&&c(e)()}),[p,c]);return(0,o.useEffect)((()=>{if(2==(null===u||void 0===u?void 0:u.length)&&u[0]==i){const e=u[1];d(i,e)(),s(e),m()}}),[u]),o.createElement(r.NavigationTab,(0,l.A)({ref:t},v,{index:i,draggable:!!h&&!!g,active:a,icon:p?o.createElement(r.Icon,{name:"x",size:"small"}):null,onClick:()=>{s(i)},onRemove:y,fixed:!p}),o.createElement(r.TextBig,{whiteSpace:"nowrap"},n))})),It=(0,C.default)(r.IconButton).attrs((e=>({small:!0,padding:[0,1],...e}))).withConfig({displayName:"styled__StyledIconButton",componentId:"sc-xc1c2v-0"})(["height:",";"],(e=>{let{inTabs:t}=e;return t?"auto":"24px"})),At=e=>{let{icon:t,iconType:n,...r}=e;return o.createElement(It,(0,l.A)({flavour:"default",icon:"plus",tooltip:"Add item",type:"button",neutral:!1},r))},Tt=e=>{let{icon:t,iconType:n,...r}=e;return o.createElement(It,(0,l.A)({flavour:"hollow",icon:"copy",tooltip:"Copy current item into a new one",type:"button",neutral:!1},r))};var Ft=n(3705);const Bt=e=>{var t;let{items:n,onAddClick:l,onCopyClick:a,canAdd:i}=e;const[s,{width:c}]=(0,Ft.A)(),[d,u]=(0,o.useState)(0),[m,p]=(0,o.useState)();(0,o.useEffect)((()=>{d>=n.length&&u(0)}),[n.length]);const g=i?"calc(".concat(c,"px - 80px)"):"".concat(c,"px");return o.createElement(r.Flex,{ref:s,column:!0},o.createElement(r.Flex,{width:"".concat(c,"px"),margin:[0,0,2,0]},o.createElement(r.NavigationTabs,{width:g,overflow:{horizontal:"auto"}},o.createElement(r.DraggableTabs,{items:n.map(((e,t)=>({id:e.key,title:"Item ".concat(t),active:d==t,setActiveTab:u,reordering:m,setReordering:p,...e}))),Item:St,onDragEnd:(e,t)=>{p([e,t])}})),i?o.createElement(o.Fragment,null,o.createElement(Tt,{onClick:()=>a(d),padding:[1,3],inTabs:!0,groupFirst:!0}),o.createElement(At,{onClick:l,padding:[1,3],inTabs:!0,groupLast:!0})):null),o.Children.map(null===(t=n[d])||void 0===t?void 0:t.children,(e=>o.isValidElement(e)?o.cloneElement(e,{title:""}):e)))},Dt=(0,o.memo)(Bt),Nt=e=>{let{icon:t,iconType:n,...r}=e;return o.createElement(It,(0,l.A)({flavour:"hollow",icon:"checkmark_partial_s",danger:!0,tooltip:"Remove item",type:"button"},r))},Pt=e=>{let{icon:t,iconType:n,...r}=e;return o.createElement(It,(0,l.A)({flavour:"hollow",icon:"sort_ascending",tooltip:"Move up",type:"button"},r))},_t=e=>{let{icon:t,iconType:n,...r}=e;return o.createElement(It,(0,l.A)({flavour:"hollow",icon:"sort_descending",tooltip:"Move down",type:"button"},r))},Lt=(0,C.default)(r.Flex).attrs((e=>({open:!0,...e}))).withConfig({displayName:"collapsible__ContentWrapper",componentId:"sc-6ate9-0"})(["display:",";"],(e=>{let{open:t}=e;return t?"flex":"none"})),Mt=(0,se.A)(r.Icon),Ot=e=>{let{title:t="",isCollapsible:n,isOpen:a,onChange:i,children:s,...c}=e;const[d,u]=(0,L.A)(a);return(0,o.useEffect)((()=>{i(d)}),[d]),n?o.createElement(r.Flex,{width:"100%",column:!0,padding:[3,0]},o.createElement(r.Flex,{gap:2},o.createElement(Mt,{name:"chevron_left",size:"small",color:"textLite",rotate:d?1:3,onClick:u,cursor:"pointer",tooltip:d?"Hide contents":"Show contents"}),t?o.createElement(r.Text,{strong:!0},t):null),o.createElement(Lt,(0,l.A)({open:d},c),s)):s},Rt=e=>{var t,n;let{hasMoveDown:l,hasMoveUp:a,hasRemove:i,index:s,totalItems:c,onAddIndexClick:d,onDropIndexClick:u,onReorderClick:m,canAdd:p,uiSchema:g={},onCopyClick:h,children:f}=e;const[b,E]=(0,o.useState)(!0),v=a||l||i||p,y=m(s,s-1),C=m(s,s+1),x=u(s),w=d(s+1),k=!(null===g||void 0===g||!g["ui:collapsible"])||!(null===g||void 0===g||null===(t=g["ui:options"])||void 0===t||!t.collapsible),S=(0,o.useCallback)((e=>{E(e)}),[E]);return o.createElement(r.Flex,{width:"100%",gap:2,alignItems:b?"start":"center",justifyContent:"between",border:{side:"all",color:"border"},padding:[2,4]},o.createElement(Ot,{isCollapsible:k,title:null===(n=f.props)||void 0===n?void 0:n.title,isOpen:b,onChange:S},f),v?o.createElement(r.Flex,{gap:2,padding:[2,0]},a?o.createElement(Pt,{onClick:y}):null,l?o.createElement(_t,{onClick:C}):null,i?o.createElement(Nt,{onClick:x}):null,p?o.createElement(Tt,{onClick:()=>h(s)}):null,p&&s==c-1?o.createElement(At,{onClick:w}):null):null)},jt=e=>{let{onCopyClick:t,items:n}=e;return n.map((e=>o.createElement(Rt,(0,l.A)({key:e.key,onCopyClick:t},e))))},zt=(0,o.memo)(jt),Ht=e=>{let{canAdd:t,onAddClick:n,openEmptyItem:l}=e;return(0,o.useEffect)((()=>{l&&n()}),[l,n]),o.createElement(r.Flex,{gap:2,alignItems:"center"},o.createElement(r.TextBig,null,"No items"),t?o.createElement(At,{onClick:n}):null)},Ut=(0,se.A)(r.Toggle),Wt=e=>{var t,n;const{items:a,canAdd:i,onAddClick:s,idSchema:c,formContext:d}=e,u="list"==(null===e||void 0===e||null===(t=e.uiSchema)||void 0===t?void 0:t["ui:listFlavour"]),m=!(null===e||void 0===e||null===(n=e.uiSchema)||void 0===n||!n["ui:openEmptyItem"]),[p,g]=(0,L.A)(u),{formData:h}=K(),f=p?zt:Dt,b=(0,o.useCallback)((e=>{var t;const n=null===(t=a[e])||void 0===t||null===(t=t.children)||void 0===t||null===(t=t.props)||void 0===t?void 0:t.formData,{$id:l}=c,o=((e,t,n)=>{const l=e.replace(/^root_/,"").split("_");let o=n;for(let r=0;r{var t;let{elemKey:n,onKeyChange:l,schema:a,onDropPropertyClick:i,registry:s,children:c}=e;const[d,u]=(0,o.useState)((null===c||void 0===c||null===(t=c.props)||void 0===t?void 0:t.name)||""),{RemoveButton:m}=s.templates.ButtonTemplates,p=Vt.Rr in a,g=(0,o.useCallback)((e=>{i(n)(e)}),[i,n]),h=(0,o.useCallback)((e=>u(e.target.value)),[u]);return p?o.createElement(r.Flex,{width:"100%",gap:2,alignItems:"center"},o.createElement(r.Flex,{column:!0,gap:1,flex:{grow:1,shrink:1}},o.createElement(r.TextBig,{strong:!0},"Key"),o.createElement(r.TextInput,{value:d,onInput:h,onBlur:e=>l(e.target.value),border:"inputBorder",size:"small"})),o.createElement(r.Flex,{flex:{grow:2,shrink:1}},c),o.createElement(r.Flex,{column:!0,gap:.5},o.createElement(r.TextBig,{opacity:"0"},"x"),o.createElement(m,{onClick:g}))):c},qt={FieldTemplate:bt,ArrayFieldTemplate:Wt,ObjectFieldTemplate:kt,WrapIfAdditionalTemplate:Kt,ErrorListTemplate:()=>null,ButtonTemplates:{SubmitButton:()=>o.createElement(r.Button,{label:"Save"}),AddButton:At,RemoveButton:Nt,MoveUpButton:Pt,MoveDownButton:_t}},Yt=e=>{let{value:t,type:n,disabled:l,onChange:a,onBlur:i,placeholder:s,schema:c,rawErrors:d}=e;const u=n||(e=>"integer"==e||Array.isArray(e)&&e.includes("integer")?"number":"text")(c.type),m="number"==u?null==t||void 0==t?0:t:t||"";return o.createElement(r.TextInput,{value:m,type:u,placeholder:s,onChange:e=>{var t;return a(null===(t=e.target)||void 0===t?void 0:t.value)},onBlur:i,disabled:l,error:!(null===d||void 0===d||!d.length),hideErrorMessage:!0,border:"inputBorder",size:"small"})},$t=e=>{let{value:t,disabled:n,onChange:l}=e;return o.createElement(r.Flex,null,o.createElement(r.Checkbox,{checked:t,onChange:l,disabled:n}))};n(3064),n(72577);const Gt=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];if(!e||0==t.length)return"";if(Array.isArray(e)){return[...t.filter((t=>e.map((e=>"string"==typeof e?e:e.value)).includes(t.value))),...e.filter((e=>"object"==typeof e&&e.__isNew__))]}return t.find((t=>t.value==e))},Jt=e=>{let{value:t,options:n,disabled:l,multiple:a,onChange:i,uiSchema:s}=e;const{enumOptions:c}=n||{},d=null===s||void 0===s?void 0:s["ui:creatable"],u=(0,o.useCallback)((e=>{var t;i((t=e)?Array.isArray(t)?t.map((e=>e.value)):t.value||"":"")}),[]);return c?o.createElement(r.Select,{value:Gt(t,c),isMulti:a,options:c,onChange:u,isDisabled:l,isCreatable:d,styles:{size:"tiny"}}):null},Qt=e=>{let{value:t,disabled:n,onChange:l,uiSchema:a,options:i}=e;const s=a[Vt.ce],c=(null===s||void 0===s?void 0:s.enumOptions)||(null===i||void 0===i?void 0:i.enumOptions),d=null===s||void 0===s?void 0:s.inline,u="buttonGroup"==(null===s||void 0===s?void 0:s.flavour),m=(0,o.useMemo)((()=>({gap:2,...d?{alignItems:"center"}:{column:!0}})),[d]);if(u){const e=c.map((e=>{var t;return{...e,title:null===(t=e.schema)||void 0===t?void 0:t.description}}));return o.createElement(r.ButtonGroup,{items:e,checked:t,onChange:l,buttonProps:{type:"button",small:!0}})}return c?o.createElement(r.Flex,m,c.map((e=>{var a;return o.createElement(r.RadioButton,{key:e.value,checked:t==e.value,onChange:()=>l(e.value),disabled:n},o.createElement("label",{title:null===e||void 0===e||null===(a=e.schema)||void 0===a?void 0:a.description},e.label))}))):null},Zt=C.default.textarea.withConfig({displayName:"textareaWidget__Textarea",componentId:"sc-3aqoej-0"})(["background:",";border-width:1px;border-style:solid;border-color:",";border-radius:2px;color:",";padding:8px 12px;opacity:",';font-size:14px;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Ubuntu,"Helvetica Neue",sans-serif;'],(0,r.getColor)("inputBg"),(e=>{let{hasValue:t,error:n}=e;return t?(0,r.getColor)("text"):n?(0,r.getColor)("error"):(0,r.getColor)("inputBorder")}),(e=>{let{disabled:t,hasValue:n}=e;return n?(0,r.getColor)("text"):t?(0,r.getColor)("placeholder"):(0,r.getColor)("textLite")}),(e=>{let{disabled:t}=e;return t?".4":"1"})),Xt=e=>{let{value:t,placeholder:n,disabled:l,onChange:r,options:a,rawErrors:i}=e;const{rows:s=2}=a||{};return o.createElement(Zt,{placeholder:n,disabled:l,onChange:e=>r(e.target.value),rows:s,hasValue:!!t,error:!(null===i||void 0===i||!i.length)},t)},en=e=>o.createElement(Yt,(0,l.A)({type:"password"},e)),tn=e=>o.createElement(Yt,(0,l.A)({type:"url"},e));var nn=n(40267);const ln=/^\$this\s*(==|>|<|>=|<=)\s*\((\d+)\)$/,on=/^\$this\s*(==|>|<|>=|<=)\s*\(\(\$status\s*(==|>|<|>=|<=)\s*(\$WARNING|\$CRITICAL)\s*\)\s*\?\s*\((\d+)\)\s*:\s*\((\d+)\)\)$/,rn=Object.values({lt:{label:"Lower than",value:"lt",symbol:"<"},lte:{label:"Lower than or equal",value:"lte",symbol:"<="},eq:{label:"Equal",value:"eq",symbol:"=="},gt:{label:"Greater than",value:"gt",symbol:">"},gte:{label:"Greater than or equal",value:"gte",symbol:">="}}),an={$CRITICAL:"critical",$WARNING:"warning"},sn={critical:"$CRITICAL",warning:"$WARNING"},cn=e=>{let{value:t,onChange:n,isDisabled:l}=e;const a=rn.find((e=>{let{symbol:n}=e;return n==t}))||"";return o.createElement(r.Select,{options:rn,value:a,onChange:n,isDisabled:l,styles:{size:"tiny"}})},dn=e=>{let{isDisabled:t,parsedExpression:n={},toggleRecoveryThreshold:l,onChangeThreshold:a}=e;const{isComplex:i,threshold:s}=n,c=(0,o.useCallback)((e=>{t||a(e.target.value)}),[a,t]),d=(0,o.useCallback)((e=>{t||l(e)}),[l,t]);return i?o.createElement(r.Flex,{alignItems:"center",gap:2},o.createElement(r.Pill,{flavour:"success",hollow:!0,icon:"reload"},"Recovery Threshold"),o.createElement(r.TextInput,{onChange:c,value:s,type:"number",min:0,size:"tiny",metaShrinked:!0,containerStyles:{width:"80px"},disabled:t}),o.createElement(v.A,{content:"Remove recovery threshold"},o.createElement(r.Button,{type:"button",icon:"trashcan",flavour:"borderless",onClick:()=>d(!1),iconColor:"textLite",iconSize:"small",disabled:t}))):o.createElement(r.Button,{type:"button",label:"Recovery Threshold",icon:"plus",flavour:"borderless",onClick:()=>d(!0),disabled:t})},un=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{isSimple:t,isComplex:n,operator:l,status:o,value:r,threshold:a}=e;return t?"$this ".concat(l," (").concat(r,")"):n?"$this ".concat(l," (($status >= ").concat(o,") ? (").concat(a,") : (").concat(r,"))"):""},mn=e=>{let{status:t,parsedExpression:n,onChange:l,isDisabled:r}=e;return{onOperatorChange:(0,o.useCallback)((e=>{let{symbol:t}=e;if(!r){const e=un({...n,operator:t});l(e)}}),[n,l,r]),onValueChange:(0,o.useCallback)((e=>{if(!r){const t=un({...n,value:e});l(t)}}),[n,l,r]),toggleRecoveryThreshold:(0,o.useCallback)((e=>{if(!r){const o=!e,r=!!e,a=un({...n,isSimple:o,isComplex:r,status:sn[t],threshold:"70"});l(a)}}),[t,n,l,r]),onChangeThreshold:(0,o.useCallback)((e=>{if(!r){const t=un({...n,threshold:e});l(t)}}),[n,l,r])}},pn=e=>{let{isDisabled:t,expression:n="",onChange:l}=e;const a=t?"":n,i=(0,o.useCallback)((e=>{var n;t||l((null===(n=e.target)||void 0===n?void 0:n.value)||"")}),[t,l]);return o.createElement(r.Flex,{column:!0,gap:1},o.createElement(r.Text,null,"Formula"),o.createElement(r.Box,{width:"500px"},o.createElement(r.TextInput,{value:a,placeholder:"$this",onChange:i,size:"tiny",disabled:t})))},gn=e=>{let{onDecline:t,onConfirm:n}=e;return o.createElement(r.ConfirmationDialog,{title:"Reset expression",confirmLabel:"Continue",handleConfirm:n,handleDecline:t,message:o.createElement(r.Flex,{gap:2,column:!0},o.createElement(r.TextBig,null,"Your custom expression is going to be reset."),o.createElement(r.TextBig,null,"Are you sure you want to change?"))})},hn=e=>t=>{const{isSimple:n,isComplex:a}=t.parsedExpression,i=t.value&&!n&&!a,[s,c]=(0,o.useState)(i?1:0),[d,,u,m]=(0,L.A)(),p=(0,o.useCallback)((e=>{"0"==e.target.value?u():c(1)}),[u,c]),g=(0,o.useCallback)((()=>{c(0),m()}),[c,m]),h=(0,o.useMemo)((()=>[{radioButtonValue:0,isDisabled:0!=s,checked:0==s,Component:e},{radioButtonValue:1,isDisabled:1!=s,checked:1==s,Component:pn}]),[s]);return o.createElement(o.Fragment,null,o.createElement(r.Flex,{column:!0,gap:2},h.map((e=>{let{radioButtonValue:n,isDisabled:a,checked:i,Component:s}=e;return o.createElement(r.Flex,{key:n,alignItems:"center",gap:2},o.createElement(r.RadioButton,{value:n,checked:i,onChange:p}),o.createElement(s,(0,l.A)({isDisabled:a},t)))}))),d?o.createElement(gn,{onDecline:m,onConfirm:g}):null)},fn=hn((e=>{let{isDisabled:t,parsedExpression:n={},options:l={},onChange:a}=e;const{alertStatus:i}=l,{operator:s,value:c,status:d}=n,u=an[d]||i,{onOperatorChange:m,onValueChange:p,toggleRecoveryThreshold:g,onChangeThreshold:h}=mn({status:u,parsedExpression:n,onChange:a,isDisabled:t});return o.createElement(r.Flex,{alignItems:"center",gap:2},o.createElement(r.Box,null,o.createElement(nn.A,{width:"80px",flavour:u,textSize:"small"},(0,Q.Zr)(u,!0))),o.createElement(cn,{value:s,onChange:m,isDisabled:t}),o.createElement(r.TextInput,{onChange:e=>p(e.target.value),value:c,type:"number",disabled:t,min:0,size:"tiny",metaShrinked:!0,containerStyles:{width:"80px"}}),o.createElement(dn,{isDisabled:t,parsedExpression:n,toggleRecoveryThreshold:g,onChangeThreshold:h}))})),bn=e=>{const{alertStatus:t}=e.options,n=(0,o.useMemo)((()=>function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";const t=e.match(ln);if(t)return{isSimple:!0,operator:t[1],value:t[2]};const n=e.match(on);return n?{isComplex:!0,operator:n[1],statusOperator:n[2],status:n[3],threshold:n[4],value:n[5]}:{}}(e.value)),[e.value]);return t?o.createElement(fn,(0,l.A)({expression:e.value,parsedExpression:n},e)):(console.warn("".concat(e.id," field is declared to be an alert expression but it does not provide an alert status")),null)},En=(e,t)=>{const n=parseFloat(e),{multiplier:l}=t||{};return{num:n,multiplier:parseFloat(l)}},vn=e=>{let{value:t=0,onChange:n=m(),scale:l=[],defaultScale:a,min:i=0,step:s=1}=e;const c=l.find((e=>{let{value:t}=e;return t==a}))||l[0],[d,u]=(0,o.useState)(c),p=((e,t)=>{const{num:n,multiplier:l}=En(e,t);return l&&!isNaN(n)?Math.round(n/l*100)/100:0})(t,d),g=(0,o.useCallback)((e=>{const t=((e,t)=>{const{num:n,multiplier:l}=En(e,t);return l&&!isNaN(n)?n*l:0})(e.target.value,d);n(t)}),[d,n]);return o.createElement(r.Flex,{gap:2,alignItems:"center"},o.createElement(r.TextInput,{value:p,onChange:g,type:"number",min:i,step:s,size:"small"}),o.createElement(r.Select,{value:d,options:l,onChange:u,styles:{minWidth:"80px",size:"tiny"}}))},yn=[{value:"sec",label:"Seconds",multiplier:1},{value:"min",label:"Minutes",multiplier:60},{value:"hour",label:"Hours",multiplier:3600},{value:"day",label:"Days",multiplier:86400},{value:"week",label:"Weeks",multiplier:604800},{value:"month",label:"Months",multiplier:2592e3},{value:"year",label:"Years",multiplier:31536e3}],Cn=e=>{const{schema:t={}}=e,{type:n}=t,r="number"==n?.1:1,{scales:a}=e.options||{},i=a?yn.filter((e=>{let{value:t}=e;return a.includes(t)})):yn;return o.createElement(vn,(0,l.A)({scale:i,step:r},e))},xn=e=>{var t;return o.createElement(Cn,(0,l.A)({},e,{defaultScale:null===(t=e.options)||void 0===t?void 0:t.scale}))},wn=[{value:"B",label:"Bytes",multiplier:1},{value:"KiB",label:"KiB",multiplier:1024},{value:"MiB",label:"MiB",multiplier:Math.pow(1024,2)},{value:"GiB",label:"GiB",multiplier:Math.pow(1024,3)},{value:"TiB",label:"TiB",multiplier:Math.pow(1024,4)}],kn=e=>{const{scales:t}=e.options||{},n=t?wn.filter((e=>{let{value:n}=e;return t.includes(n)})):wn;return o.createElement(vn,(0,l.A)({scale:n},e))},Sn={TextWidget:Yt,CheckboxWidget:$t,SelectWidget:Jt,RadioWidget:Qt,TextareaWidget:Xt,PasswordWidget:en,URLWidget:tn,alertExpressionWidget:bn,durationWidget:xn,memorySizeWidget:e=>{var t;return o.createElement(kn,(0,l.A)({},e,{defaultScale:null===(t=e.options)||void 0===t?void 0:t.scale}))}};var In=n(22086);const An=(0,C.default)(In.Ay).withConfig({displayName:"styled__StyledForm",componentId:"sc-epjsfn-0"})(["display:flex;flex-direction:column;justify-content:space-between;height:100%;overflow-y:auto;padding-right:8px;"]);var Tn=n(83084),Fn=n(81638),Bn=n(30577),Dn=n(93615);const Nn=C.default.div.withConfig({displayName:"styled__StyledTerminalCommand",componentId:"sc-1oflxul-0"})(["display:flex;position:relative;flex-direction:column;color:",";background:",";border:1px solid ",";border-radius:4px;overflow-wrap:anywhere;white-space:pre-wrap;padding:16px 16px 24px;width:100%;height:100%;font-family:monospace;font-weight:bold;letter-spacing:0.09px;line-height:16px;font-size:12px;word-break:break-word;overflow-y:auto;"],(0,r.getColor)("primary"),(0,r.getColor)("terminalGreen"),(0,r.getColor)("primary")),Pn=(0,C.default)(r.Icon).withConfig({displayName:"styled__StyledIcon",componentId:"sc-1oflxul-1"})(["display:flex;align-self:flex-end;cursor:pointer;position:absolute;bottom:16px;right:16px;"]);var _n=n(3914),Ln=n(22292),Mn=n(19673),On=n(28061);const Rn={Business:!0,Pro:!0,Homelab:!0,Community:!1},jn=(0,se.A)(r.Button),zn=e=>{let{getShouldUpgrade:t,...n}=e;const{value:l}=(0,Mn.JN)(),r=(0,a.JT)("billing:Manage"),i=(0,On.A)(),s=(0,o.useMemo)((()=>"function"==typeof t?t(l):!function(){return Rn[(arguments.length>0&&void 0!==arguments[0]?arguments[0]:{}).class]}(l)),[t,l]),c=(0,o.useMemo)((()=>s?r?"Upgrade your plan to use this feature":"You don't have sufficient permissions to upgrade the plan":null),[r,s]),d=(0,o.useMemo)((()=>({label:"Upgrade now!",onClick:i,tooltip:c,disabled:!r,noWrapper:!0,...n})),[i,c,r,n]);return s?o.createElement(jn,d):null},Hn=function(){let{getShouldUpgrade:e,...t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,Ln.uW)("isAnonymous"),r=(0,_n.dg)();return n||r?null:o.createElement(zn,(0,l.A)({getShouldUpgrade:e},t))};var Un=n(18682);const Wn=e=>{let{error:t}=e;return o.createElement(r.Flex,{padding:[4]},o.createElement(r.Text,{color:"error"},t))},Vn=e=>o.createElement(r.Flex,(0,l.A)({column:!0,gap:2},e),o.createElement(r.Flex,{gap:2,alignItems:"center"},o.createElement(r.Icon,{color:"warning",name:"warning_triangle"}),o.createElement(r.Text,null,"This feature is only available to paid plans")),o.createElement(Hn,{small:!0})),Kn=e=>{let{canGetUserConfig:t,onOpenTerminal:n}=e;const{value:l,error:a}=(0,O.MQ)(),i=(0,o.useMemo)((()=>(e=>e?e.replace(/password:\s*[^ \n]*/i,"password: ****"):e)(l)),[l]),{hasPermission:s}=(0,be.j)({edit:!0});return(0,o.useEffect)((()=>{n()}),[]),t?a?o.createElement(Wn,{error:a}):s?o.createElement(Nn,null,i,o.createElement(Pn,{name:"copy",size:"small",color:"textLite",onClick:(0,Un.C)(l,{text:"Config copied to your clipboard."})})):o.createElement(Vn,null):o.createElement(r.Flex,{padding:[4]},o.createElement(r.Text,null,"Configuration text export is not supported by this node."))},qn={configuration:{iconName:"gear",color:{active:"success",notActive:"textLite"},width:"14px",Content:Kn,dataTestId:"configurationTab",label:"Config"}},Yn=["configuration"],$n=e=>{let{isOpen:t,toggleIsOpen:n,canGetUserConfig:l,onOpenTerminal:r}=e;const{Content:a}=qn.configuration;return o.createElement(Fn.Ay,{collapsedComponent:o.createElement(Bn.A,{onClickTab:n,availableTabs:qn,tabsToShow:Yn}),isOpen:t,header:o.createElement(Dn.A,{isOpen:t,onToggle:n,title:"Configuration",icon:null})},t?o.createElement(a,{padding:[0,3,30],canGetUserConfig:l,onOpenTerminal:r}):null)},Gn=e=>{let{title:t="Error",children:n,...a}=e;return o.createElement(r.Flex,(0,l.A)({column:!0,gap:2},a),o.createElement(r.Flex,{alignItems:"center",gap:2},o.createElement(r.Icon,{name:"warning_triangle",color:"errorText"}),o.createElement(r.TextBigger,{color:"errorText"},t)),n)};var Jn=n(63314);const Qn=function(){let{remainingErrors:e=[]}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const[t,n]=(0,L.A)();return o.createElement(o.Fragment,null,o.createElement(r.Collapsible,{open:t,duration:0},e.map((e=>{let{message:t}=e;return o.createElement(r.Text,{key:t,color:"errorText"},t)}))),o.createElement(r.Flex,{padding:[2,0,0,0]},o.createElement(r.Button,{flavour:"hollow",neutral:!0,small:!0,label:t?"Hide":"Show ".concat(e.length," more"),onClick:n})))},Zn=function(){let{id:e,errors:t=[]}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return t.length?o.createElement(Jn.Ay,{feature:"DyncnfForm",id:e,validationErrors:!0},o.createElement(Gn,null,o.createElement(r.Flex,{alignItems:"start",column:!0},o.createElement(r.Text,{color:"errorText"},t[0].message),t.length>1?o.createElement(Qn,{remainingErrors:t.slice(1)}):null))):null},Xn=(0,C.default)(r.Flex).attrs((e=>({padding:[2,4,2,1.5],border:{side:"bottom",color:"border"},cursor:"pointer",...e}))).withConfig({displayName:"styled__StyledMenuItem",componentId:"sc-1sw1he6-0"})(["&:last-child{border:none;}&:hover{background:",";}"],(0,r.getColor)("modalBackground")),el=e=>{let{id:t,isActive:n,title:l,description:a,setSubmitType:i,closeMenu:s}=e;const c=(0,o.useCallback)((()=>{i(t),s()}),[t,i,s]);return o.createElement(Xn,{onClick:c},o.createElement(r.Flex,{gap:1},o.createElement(r.Box,{width:"16px",height:"16px"},n?o.createElement(r.Icon,{name:"check",color:"primary"}):null),o.createElement(r.Flex,{column:!0,gap:1},o.createElement(r.TextBig,{strong:!0},l),o.createElement(r.Text,null,a))))},tl=e=>{let{submitType:t,setSubmitType:n,closeMenu:a,node:i}=e;const s=(0,o.useMemo)((()=>(e=>[{id:"submit",title:"Submit",description:null!==e&&void 0!==e&&e.label?"Submit your changes to ".concat(e.label," node"):"Submit your changes"},{id:"multiple_submit",title:"Submit to multiple nodes",description:"Submit your changes to multiple nodes. You can use parent nodes to apply your changes automatically to their children."}])(i)),[i]);return o.createElement(r.Flex,{width:120,position:"absolute",column:!0,bottom:30,right:0,round:!0,background:"tooltip",border:{side:"all",color:"border"}},s.map((e=>o.createElement(el,(0,l.A)({key:e.id},e,{isActive:t==e.id,setSubmitType:n,closeMenu:a})))))};var nl=n(94177),ll=n(99292),ol=n(42828);const rl={...ol.Ii,Live:{...ol.Ii.Live,head:{...ol.Ii.Live.head,hasML:!1,hasParentNode:!0,hasFn:!1},node:{...ol.Ii.Live.node,showMl:!1,showParentNode:!0,showFn:!1}}},al=e=>{let{extraKey:t=k.q4}=e;return o.createElement(r.Flex,{column:!0,height:{max:"60vh"}},o.createElement(r.Flex,{flex:"1",overflow:"hidden"},o.createElement(r.Flex,{column:!0,flex:!0},o.createElement(nl.A,{flavour:"dyncfg",extraKey:t,statusProps:rl,testIdPrefix:"node-ids",param:"selectedNodeIds",groupProps:{collapsible:!1,background:"modalBackground",padding:[3],flex:"1",overflow:"hidden"},height:"auto",width:"100%"})),o.createElement(ll.Ay,{basis:60,baseWidth:60,flex:!1,title:"Dynamic filters",help:"Filter your nodes for this page. Count next to each item refer to the number of nodes that will be selected and used across the page.",includedTabs:["filters"],loaded:!0,onClose:()=>{},groupProps:{background:"modalBackground"},background:"modalBackground"})))},il=(0,C.default)(r.TextBig).withConfig({displayName:"styled__NodeName",componentId:"sc-1dfnbe3-0"})(["font-family:monospace;"]),sl={fulfilled:{text:"Success",color:"primary"},rejected:{text:"Failed",color:"error"},default:{text:"Processing...",color:"text"}},cl=e=>{var t;let{label:n,result:l={}}=e;const{status:a,reason:i}=l,s=(null===i||void 0===i||null===(t=i.response)||void 0===t||null===(t=t.data)||void 0===t?void 0:t.errorMessage)||(null===i||void 0===i?void 0:i.message)||"Something went wrong...",{text:c,color:d}=(0,o.useMemo)((()=>sl[a]||sl.default),[a]),[u,m]=(0,L.A)(!0);return o.createElement(r.Flex,{column:!0,gap:1},o.createElement(r.Flex,{gap:2,alignItems:"center"},o.createElement(r.TextBig,{color:d},c),o.createElement(il,null,n),i?o.createElement(r.Icon,{name:"chevron_down",width:"10px",height:"10px",color:"textLite",cursor:"pointer",onClick:m,rotate:u?2:null}):null),i?o.createElement(r.Collapsible,{open:u,padding:[0,0,0,2]},o.createElement(r.Text,{color:"error"},(0,Q.Zr)(s))):null)},dl=e=>{let{selectedNodes:t,results:n=[]}=e;return o.createElement(r.Flex,{column:!0,gap:4,padding:[4,0]},n.length?null:o.createElement(r.TextBig,null,"Submitting to multiple nodes..."),o.createElement(r.Flex,{column:!0,gap:3},t.map((e=>{let{value:t,label:l}=e;const r=n.find((e=>{let{nodeId:n}=e;return n==t}));return o.createElement(cl,{key:t,label:l,result:r})}))))},ul=e=>{let{id:t,formRef:n,onClose:l}=e;const{nodes:r}=H(),a=(0,i.a7)({extraKey:k.q4,merge:!1}),s=r.filter((e=>{let{value:t}=e;return a.includes(t)})),{isNewItem:c,setIsDirty:d}=K(),[u,m]=(0,o.useState)([]),p=(0,O.hp)({id:t,isNewItem:c}),[g,,h,f]=(0,L.A)(),[b,,E,v]=(0,L.A)(),[y,C]=(0,o.useState)(),x=(0,O.Hj)(),[w,S,,I]=(0,G.A)(),A=(0,o.useCallback)((()=>{null===l||void 0===l||l(),c&&y&&x()}),[l,y,c,x]),T=(0,o.useCallback)((()=>{var e;const t=null===n||void 0===n||null===(e=n.current)||void 0===e||null===(e=e.state)||void 0===e?void 0:e.formData;m([]),h(),E(),p({nodes:s,payload:t}).then((e=>{d(!1),m(e.map(((e,t)=>{var n;return{...e,nodeId:null===(n=s[t])||void 0===n?void 0:n.value}})));const t=e.map((e=>{let{status:t}=e;return t}));t.find((e=>"fulfilled"==e))?t.some((e=>"rejected"==e))?I({text:"Some submissions have failed."}):w({text:"All nodes updated successfully."}):S({text:"All submissions have failed."})})).catch(S).finally((()=>{f(),C(!0)}))}),[n,s,p,h,f,m,d,x,C]);return{selectedNodes:s,onSubmit:T,onCloseButtonClick:A,loading:g,inProcess:b,hideProcess:v,results:u,submitFinished:y}},ml=(0,se.A)(r.Button),pl=e=>{let{id:t,formRef:n,onClose:l}=e;const{selectedNodes:a,onSubmit:i,onCloseButtonClick:s,loading:c,inProcess:d,hideProcess:u,results:m,submitFinished:p}=ul({id:t,formRef:n,onClose:l});return o.createElement(r.Modal,{backdropProps:{backdropBlur:!0}},o.createElement(r.ModalContent,{width:{min:100,base:200},background:"modalBackground",round:2,overflow:"hidden"},o.createElement(r.ModalHeader,{background:"modalBackground"},o.createElement(r.H4,null,"Select nodes")),o.createElement(r.ModalBody,null,d?o.createElement(dl,{selectedNodes:a,results:m}):o.createElement(al,null)),o.createElement(r.ModalFooter,{gap:2},d?o.createElement(ml,{type:"button",flavour:"hollow",small:!0,label:"Back",icon:"arrow_left",iconColor:"primary",iconSize:"small",onClick:u,tooltip:"Go back to nodes selector",disabled:c}):null,o.createElement(r.Button,{type:"button",flavour:"hollow",small:!0,label:"Close",onClick:s,disabled:c}),o.createElement(ml,{type:"button",label:"Submit",small:!0,onClick:i,tooltip:a.length?null:"Select some nodes",disabled:c||!a.length||p}))))},gl=(0,Ae.A)(r.Button),hl=e=>{var t;let{id:n,formRef:s,loading:c}=e;const[d,u,,m]=(0,L.A)(),[p,,g,h]=(0,L.A)(),{state:f}=(0,O.L5)(),{nodeIds:b=[]}=f||{},{node:E,nodes:v}=H(),y=(v||[]).length>1,{isNewItem:C,entityProps:x={},submitType:w,setSubmitType:k}=K(),{cmds:S=[]}=x,I=C||S.includes("update"),{user_disabled:A}=ze(),T=J(),F=(0,On.A)(),B=(0,Ln.uW)("isAnonymous"),D=(0,a.JT)("billing:Manage"),[,N]=(0,i.Oj)({extraKey:"dyncfg",merge:!1});(0,o.useEffect)((()=>{g&&b.length&&(N(b),k("multiple_submit"))}),[g,b,N,k]);const P=(0,o.useCallback)((e=>T?I?e||null:"You don't have permissions to save your changes":"Please provide a name first"),[T,I]),_=(0,o.useMemo)((()=>({submit:{label:"Submit",tooltip:P("Submit your changes to ".concat(E.label," node"))},multiple_submit:{type:"button",label:"Submit to Multiple Nodes",tooltip:P("Submit your changes to another node or to multiple nodes"),onClick:g},nonPaid:{type:"button",label:"Upgrade",tooltip:"Upgrade your plan to use this feature.",disabled:!D||B,onClick:F}})),[g,I,D,B,F]),M=!I||c||A||!T,{hidden:R,isDisabled:j,tooltipText:z,showUpgradeButton:U}=(0,be.j)({edit:!0,tooltip:null===(t=_[w])||void 0===t?void 0:t.tooltip,disabled:M});return R?null:U?o.createElement(gl,(0,l.A)({small:!0,disabled:M,isLoading:c},_.nonPaid)):o.createElement(r.Flex,{position:"relative",alignItem:"center"},o.createElement(gl,(0,l.A)({small:!0,groupFirst:y,disabled:j,isLoading:c},_[w],{tooltip:z})),y?o.createElement(r.Button,{type:"button",icon:"chevron_down",tiny:!0,flavour:"hollow",groupLast:!0,onClick:u,disabled:j}):null,d?o.createElement(tl,{submitType:w,setSubmitType:k,closeMenu:m,node:E}):null,p?o.createElement(pl,{id:n,formRef:s,onClose:h}):null)},fl=(0,Ee.A)(be.A),bl=e=>{let{loading:t}=e;const{node:n}=H(),{id:l,formData:r,entityProps:a={}}=K(),{cmds:i=[]}=a,{user_disabled:s}=ze(),c=i.includes("test"),d=(0,O.EZ)({id:l,node:n}),u=J({defaultName:"test"}),m=t||s,[p,g]=X(),h=p({successMessage:"Successfully tested configuration"}),f=g(),b=(0,o.useCallback)((()=>{d({name:u,payload:r}).then(h).catch(f)}),[u,d,r,h,f]);return c?o.createElement(fl,{edit:!0,feature:"DyncnfItem",payload:{id:l,node:null===n||void 0===n?void 0:n.value},label:"Test",small:!0,tooltip:"Test your configuration",flavour:"hollow",onClick:b,disabled:m,type:"button"}):null},El=((0,Ae.A)(r.Button),e=>{let{id:t,formRef:n,loading:l,errors:a=[],openUserConfig:i}=e;return o.createElement(r.Flex,{background:"modalBackground",position:"sticky",bottom:0,gap:2,padding:[4,0,0,0],alignItems:"baseline",justifyContent:"between",zIndex:10},a.length?o.createElement(Zn,{id:t,errors:a}):o.createElement(r.Flex,null),o.createElement(r.Flex,{gap:2},null,o.createElement(bl,{loading:l}),o.createElement(hl,{id:t,formRef:n,loading:l})))}),vl=function(){let{id:e,error:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n="string"==typeof t?{error_message:t}:"object"==typeof t?t:{},{error_message:l,message:a}=n;return o.createElement(Jn.DL,{feature:"DyncnfForm",id:e,error:JSON.stringify(t)},o.createElement(r.Flex,{column:!0,gap:2},o.createElement(r.Flex,{alignItems:"center",gap:2},o.createElement(r.Icon,{name:"warning_triangle",color:"text"}),o.createElement(r.H3,null,"Error")),o.createElement(r.TextBig,null,l||a||k.Fn)))},yl=function(){let{id:e,node:t,isNewItem:n,data:l=k.gh,submitMethod:r,onSchemaFetch:a=m(),setIsDirty:i=m(),setError:s=m()}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const c=(0,o.useRef)(0),d=(0,o.useRef)(),{entityProps:u={}}=K(),{cmds:p=[]}=u,g=p.includes("userconfig"),[h,f,b]=(0,L.A)(g),{isLoading:E,loaded:v,formData:y,setFormData:C,schema:x,resourcesError:w,errors:S,onChange:I,onSubmit:A,onError:T,getUserConfig:F}=ne({id:e,node:t,data:l,submitMethod:r,userConfigIsOpen:h,onSchemaFetch:a,setIsDirty:i,setError:s}),{jsonSchema:B,uiSchema:D}=x||{};(0,o.useEffect)((()=>(c.current=Date.now(),()=>{c.current=0})),[]);const N=(0,o.useCallback)((e=>{const t=Date.now()-c.current,n=0==c.current||t<500;I(e,n)}),[I]),P=(0,o.useCallback)((()=>{setTimeout((()=>{if(c.current>0&&g){var e;const t=null===(e=d.current)||void 0===e||null===(e=e.state)||void 0===e?void 0:e.formData;F(t)}}),50)}),[c.current,d.current,g,F]);return v?w?o.createElement(vl,{id:e,error:w}):o.createElement(Jn.Ay,{feature:"DyncnfForm",id:e,isNewItem:n},o.createElement(Tn.A,{margin:[0,0,2],padding:[2,0,0,0],sidebar:o.createElement($n,{isOpen:h,toggleIsOpen:f,canGetUserConfig:g,onOpenTerminal:P}),containerProps:{id:"dyncfg-form-layout",border:{side:"top",color:"border"}}},o.createElement(An,{ref:d,className:"dyncfg-form",disabled:E,schema:B,uiSchema:D,formData:y,formContext:{setFormData:C},templates:qt,widgets:Sn,validator:lt.Ay,onError:T,onChange:N,onSubmit:A},o.createElement(El,{id:e,formRef:d,loading:E,errors:S,openUserConfig:b})))):o.createElement(Ze.A,{title:"Loading form...",background:"modalBackground"})},Cl=e=>{let{id:t,node:n,children:l}=e;const{setFormData:r}=K(),{loaded:a,value:i,hasError:s,error:c,refresh:d}=(0,O.SW)({id:t,node:n});return(0,o.useEffect)((()=>{a&&!s&&r(i)}),[a,s,r,i]),a?s?o.createElement(vl,{id:t,error:c}):o.Children.map(l,(e=>o.isValidElement(e)?o.cloneElement(e,{id:t,node:n,data:{loaded:a,value:i,hasError:s,refresh:d}}):e)):null},xl=e=>{let{formData:t={},children:n}=e;const{formData:l}=K();return o.Children.map(n,(e=>o.isValidElement(e)?o.cloneElement(e,{data:{...k.gh,value:{...t,...l}}}):e))},wl=e=>{let{id:t,formData:n,...r}=e;const{node:a}=H(),{isNewItem:i}=K(),s=(0,O.Nj)({id:t,node:a}),c=(0,O.xS)({id:t,node:a});return i?o.createElement(xl,{formData:n},o.createElement(yl,(0,l.A)({id:t,node:a,isNewItem:i,submitMethod:s},r))):o.createElement(Cl,{id:t,node:a},o.createElement(yl,(0,l.A)({submitMethod:c},r)))},kl=e=>{var t,n;let{entries:r=[],rowHeight:a=k.FL,level:i=1}=e;const s=(0,o.useRef)(),{node:c,path:d,containerRef:u}=H(),[m,p]=(0,O._F)({node:c,path:d}),g=null===(t=u.current)||void 0===t||null===(t=t.getBoundingClientRect())||void 0===t?void 0:t.bottom,h=null===(n=s.current)||void 0===n||null===(n=n.getBoundingClientRect())||void 0===n?void 0:n.top,f=g&&h?g-h:500,b=Math.floor(f/a),E=r.length>b,v=(0,tt.Te)({count:r.length,getScrollElement:()=>s.current,enableSmoothScroll:!0,estimateSize:()=>a,overscan:5}),y=(0,o.useCallback)((0,et.s)(300,(e=>{p(e.target.scrollTop)})),[p]);return(0,o.useEffect)((()=>{s.current&&(s.current.scrollTop=m)}),[]),E?o.createElement("div",{ref:s,style:{height:"".concat(f,"px"),overflow:"auto"},onScroll:y},o.createElement("div",{style:{minHeight:"".concat(v.getTotalSize(),"px"),width:"100%",position:"relative"}},v.getVirtualItems().map((e=>{const[t,n]=r[e.index];return o.createElement("div",{className:"dyncnf-virtual-row",key:e.key,style:{position:"absolute",top:0,left:0,width:"100%",transform:"translateY(".concat(e.start,"px)"),overflow:"hidden"},ref:v.measureElement,"data-index":e.index,"data-key":t},o.createElement(nt,{virtualIndex:e.index,virtualRowStart:e.start,entryKey:t,entityProps:(0,S.bn)(n),configItemLevel:i},o.createElement(wl,(0,l.A)({id:t},n))))})))):r.map((e=>{let[t,n]=e;return o.createElement(nt,{key:t,entryKey:t,entityProps:(0,S.bn)(n),configItemLevel:i},o.createElement(wl,(0,l.A)({id:t},n)))}))},Sl=e=>{let{entries:t=[],level:n=0}=e;const{testString:r,testProps:a}=f();return t.map((e=>{let[t,i]=e;const{templateChildren:s}=i||{},c=(0,S.bn)(i);if(s){const e=Object.entries(s).filter((e=>{let[t,n]=e;return r(t)&&a(n)}));return o.createElement(nt,{key:t,flavour:"template",entryKey:t,entityProps:c,configItemLevel:n,isOpen:!0},o.createElement(kl,{entries:e,level:n}))}return o.createElement(nt,{key:t,entryKey:t,entityProps:c,configItemLevel:n},o.createElement(wl,(0,l.A)({id:t},c)))}))},Il=(0,o.memo)(Sl),Al=function(){let{tree:e={},level:t=1,isVirtual:n}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{testString:l,testProps:r}=f();return n?o.createElement(Il,{entries:Object.entries(e),level:t}):Object.entries(e).map((e=>{let[n,a]=e;const i=n.match(/\/[^/]+/g),s=Object.keys(a).filter((e=>{const{templateChildren:t={}}=a[e],o=Object.values(t).some((e=>r(e)));return!(!r(a[e])&&!o)&&(l(n)||l(e)||Object.keys(t).some((e=>l(e))))})).reduce(((e,t)=>({...e,[t]:a[t]})),{});if(1==i.length)return o.createElement(Xe,{key:n,title:i[0],level:t,isOpen:!0},o.createElement(Al,{tree:s,level:t+1,isVirtual:!0}));const c=i.slice(1).join("");return o.createElement(Xe,{key:c,title:i[0],level:t,isOpen:!0},o.createElement(Al,{tree:{[c]:s},level:t+1}))}))},Tl=(0,o.memo)(Al),Fl=()=>o.createElement(r.Flex,null,o.createElement(r.TextBigger,null,"No available configuration found.")),Bl=()=>{const{node:e,path:t}=H(),n=(0,O.sh)({node:e,path:t}),{entityProps:r,...a}=n||{};return n?o.createElement(Xe,{flavour:"entity",title:"".concat(n.template,":placeholder"),entityProps:{...r,...a},isNewItem:!0},o.createElement(wl,(0,l.A)({id:n.template,formData:n.formData},r))):null};var Dl,Nl=n(37618);const Pl="dyncfg-edit-alet-warning-dismiss-".concat(null===(Dl=window.envSettings)||void 0===Dl?void 0:Dl.visitor),_l={lineHeight:1.6},Ll=e=>{let{alertName:t,nodeName:n}=e;const l=localStorage.getItem(Pl),[a,,,i]=(0,L.A)(!l),[s,c]=(0,o.useState)(),d=(0,o.useCallback)((e=>{c(e)}),[c]),u=(0,o.useCallback)((()=>{s&&localStorage.setItem(Pl,!0),i()}),[s,i]);return a?o.createElement(r.Modal,{backdropProps:{backdropBlur:!0}},o.createElement(r.ModalContent,null,o.createElement(r.ModalHeader,null,o.createElement(r.H4,null,"Editing Alerts")),o.createElement(r.ModalBody,null,o.createElement(r.Flex,{column:!0,gap:2,width:{max:120}},o.createElement(r.TextBig,_l,"You are about to edit the alert ",o.createElement(r.TextBig,{color:"primary"},t),n?o.createElement(o.Fragment,null," ","on node ",o.createElement(r.TextBig,{color:"primary"},n)):null,"."),o.createElement(r.TextBig,_l,'Keep in mind that alerts in Netdata are templates that are applied to all instances of the same kind (e.g. editing a disk alert, means you are changing the alerts for all disks on that node). If you want to overwrite the rules for a single instance, do not change the current rules. Instead add a new rule and select "Apply to a specific instance" for that rule, matching the instance you want to overwrite.'),Nl.Ay?o.createElement(r.TextBig,_l,'To apply an alert on all your currently connected nodes, edit it and select "Submit to Multiple Nodes".'):o.createElement(r.TextBig,_l,'When having parents and you have connected both parents and children to Netdata Cloud, the alert needs to be altered on both Netdata parents and children, otherwise one of the two will be using the old version. To apply an alert on multiple nodes, edit it and select "Submit to Multiple Nodes".'))),o.createElement(r.ModalFooter,{justifyContent:"between"},o.createElement(r.Flex,{gap:2,alignItems:"center"},o.createElement(r.Checkbox,{checked:s,onChange:d}),o.createElement(r.TextBig,null,"Do not show this again")),o.createElement(r.Button,{type:"button",flavour:"hollow",label:"OK",textTransform:"uppercase",onClick:u})))):null},Ml={alertName:{itemTitlePrefix:k.DR,treeEntryKey:k.$7}},Ol=e=>{var t;let{tree:n}=e;const{state:r}=(0,O.L5)(),{alertName:a,nodeId:i}=r||{},c=(0,s.xY)(i,"name"),{itemTitlePrefix:d,treeEntryKey:u}=Ml.alertName,m=a?"".concat(d).concat(a):null,p=n&&m?null===(t=n[u])||void 0===t?void 0:t[m]:null;return a?o.createElement(o.Fragment,null,o.createElement(Xe,{flavour:"entity",title:m,entityProps:p,isOpen:!0,isEditFromState:!0},o.createElement(wl,(0,l.A)({id:m},p))),o.createElement(Ll,{alertName:a,nodeName:c})):null},Rl=(0,j.A)((()=>n.e(4414).then(n.bind(n,74414))),"DyncfgStyles"),jl=e=>{let{node:t,nodes:n,path:l,tree:a,inModal:i}=e;const s=(0,o.useRef)();return o.createElement(U.Provider,{value:{node:t,nodes:n,path:l,inModal:i,containerRef:s}},o.createElement(o.Suspense,{fallback:""},o.createElement(Rl,null)),o.createElement(r.Flex,{ref:s,width:"100%",padding:[0,2],column:!0},o.createElement(r.Flex,{column:!0},Object.keys(a).length?o.createElement(Tl,{tree:(0,Y.m8)(a)}):o.createElement(Fl,null)),o.createElement(Bl,null),o.createElement(Ol,{tree:a})))};var zl=n(98496);const Hl={ErrForbidden:{title:"No permissions",Content:()=>o.createElement(o.Fragment,null,o.createElement(r.Text,null,"You don't have permissions to view the configurations."),o.createElement(r.Text,null,"Please contact the Space administrators if this is unexpected."))},ErrNoConfigurableNodes:{title:"No configurable nodes found",Content:()=>o.createElement(r.Text,null,"We couldn't find any node that can be configured from the UI.")},default:{title:"Error",Content:()=>o.createElement(o.Fragment,null,o.createElement(r.Text,null,"Something went wrong, please check again later."),o.createElement(r.Text,null,"If the problem persists feel free to contact us with a"," ",o.createElement(rt.A,{href:"https://github.com/netdata/netdata-cloud/issues/new/choose",target:"_blank",rel:"noopener noreferrer"},"ticket")))}},Ul=function(){let{error:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{errorMsgKey:t}=e||{},{title:n,Content:l}=Hl[t]||Hl.default;return o.createElement(Jn.DL,{feature:"DyncfgMainTabs",errorMsgKey:t},o.createElement(zl.A,{title:n},o.createElement(r.Flex,{column:!0,gap:1},o.createElement(l,null))))},Wl=e=>{let{node:t}=e;const n=(0,o.useRef)(),{state:l}=(0,O.L5)(),{isAlert:r}=l||{},{loaded:a,value:i,hasError:s}=(0,O.wd)({node:t}),c=a&&i&&!s?Object.keys(i.tree||{}).reduce(((e,t)=>{t.startsWith("/")||(t="/".concat(t));const n=t.match(/\/[^/]+/g);if(n.length){const t=n[0],l=(0,Q.Zr)(t.replace(/^\/?/,""));e.find((e=>e.value==t))||e.push({value:t,label:l})}return e}),[]):[],[d,u]=(0,o.useState)(0);return(0,o.useEffect)((()=>{if(c.length&&r&&!n.current){const e=c.map((e=>{let{value:t}=e;return t})).indexOf("/health");u(e),n.current=!0}}),[r,c,u,n.current]),{loaded:a,tabs:c,selectedTab:d,setSelectedTab:u}},Vl=e=>{var t;let{flavour:n,inModal:l,selectedNode:a,setNode:i,options:s}=e;const{loaded:c,tabs:d,selectedTab:u,setSelectedTab:m}=Wl({node:a}),{loaded:g,value:h,hasError:f,error:v}=(0,O.wd)({node:a,path:null===d||void 0===d||null===(t=d[u])||void 0===t?void 0:t.value}),{tree:y}=h||{},C=l?"60px":"110px",x=(0,o.useMemo)((()=>({height:"settings"==n?"calc(100% - ".concat(C,")"):"100%",padding:[4,0],border:{side:"top",color:"border"},overflow:{vertical:"auto"}})),[]);return s.length?o.createElement(b,null,o.createElement(r.Flex,{column:!0,height:"100%",gap:4},o.createElement(r.Flex,{alignItems:"center",justifyContent:"between"},o.createElement(r.Flex,{alignItems:"center",gap:4},o.createElement(p,{node:a,setNode:i,options:s}),o.createElement(E,null)),o.createElement(r.Flex,{alignItems:"center",gap:3},o.createElement(M,{tree:y}),o.createElement(R,null))),c?d.length?o.createElement(r.Tabs,{height:"100%",selected:u,onChange:m},d.map((e=>{let{label:t,value:n}=e;return o.createElement(r.Tab,{key:n,label:t},o.createElement(r.Flex,x,g?f?o.createElement(r.TextBigger,null,"Something went wrong"):y?o.createElement(jl,{key:JSON.stringify({value:n,selectedNode:a}),node:a,nodes:s,path:n,tree:y,inModal:l}):o.createElement(Ul,null):o.createElement(Ze.A,{height:"calc(100% - ".concat(C,")"),title:"Loading..."})))}))):o.createElement(Ul,{error:v}):o.createElement(Ze.A,{height:"100%",title:"Loading..."}))):o.createElement(r.TextBigger,null,"There are no available nodes.")};var Kl=n(20378),ql=n(78459),Yl=n(15255),$l=n(68741),Gl=n(40982);const Jl=e=>{let{message:t,title:n,footer:a,...i}=e;const s=(null===i||void 0===i?void 0:i["data-testid"])||"dyncnfError";return o.createElement(r.Flex,(0,l.A)({alignItems:"center",column:!0,"data-testid":s,flex:!0,gap:3,justifyContent:"center",padding:[0,20]},i),o.createElement(r.H3,{"data-testid":"".concat(s,"-title")},n),o.createElement(r.TextBig,{color:"textDescription","data-testid":"".concat(s,"-message")},t),a)},Ql=e=>{let{flavour:t,inModal:n,node:l,nodes:r}=e;const{options:a,node:i,setNode:s}=d({selectedNode:l,nodes:r});return null!==i&&void 0!==i&&i.value?o.createElement(Vl,{flavour:t,inModal:n,selectedNode:i,setNode:s,options:a}):o.createElement(Ul,{error:{errorMsgKey:"ErrNoConfigurableNodes"}})},Zl=e=>{const t=(0,i.eO)({extraKey:"nodesView",merge:!1,scoped:!0,roomSlug:e.isVirtual?Nl.gB:null});return o.createElement(Ql,(0,l.A)({},e,{nodes:t}))},Xl=e=>{const{nodes:t}=(0,c.A)({polling:!1});return o.createElement(Ql,(0,l.A)({},e,{nodes:t}))},eo=e=>{const t=(0,ql.OS)();return o.createElement(r.Button,(0,l.A)({label:"Get a fresh agent token",onClick:t},e))},to="Configurations expose sensitive information about your systems and applications. To protect your privacy, Netdata exposes this information only to logged-in users and claimed agents. When viewing Configurations directly on a Netdata Agent UI, this information is sent directly from the Netdata Agent to your web browser, without exposing it to any third parties.",no={notLoggedIn:{title:"Sign in to Netdata to use Configurations",description:to,footer:o.createElement($l.A,null)},notClaimed:{title:"Connect this agent to Netdata to use Configurations",description:to,footer:o.createElement(Gl.A,null)},noAccess:{title:"This agent belongs to a Netdata Space you are not member of",description:to,footer:o.createElement(r.TextBig,{color:"textDescription"},"Ask for an invitation from the administrators of the Netdata Space of the agent to use configurations.")},bearerError:{title:"You are not authorized to use Configurations",description:to,footer:o.createElement(eo,null)}},lo=e=>{let{flavour:t="settings",node:n,inModal:r}=e;const i=(0,_n.dg)(),c=(0,a.JT)("agent:ReadDynCfg"),{state:d}=(0,O.L5)(),{nodeId:u}=d||{},m=(0,s.xY)(u),p={flavour:t,node:u?m:n,inModal:r},[g]=(0,Kl.Q8)(),[{bearerProtection:h,error:f}]=(0,ql.f7)(),[{canBeClaimed:b,cloudStatus:E}]=(0,Yl.RJ)();if(i&&f&&h){const{title:e,description:t,footer:n}=(e=>{let{userStatus:t,userNodeStatus:n,accessError:l,canBeClaimed:o}=e;return no[t]?no[t]:l?no.notLoggedIn:o?no.notClaimed:no[n]?no[n]:no.bearerError})({...g,bearerError:f,canBeClaimed:b,cloudStatus:E});return o.createElement(Jl,{title:e,message:t,footer:n})}return c?i?o.createElement(Zl,(0,l.A)({isVirtual:i},p)):o.createElement(Xl,p):o.createElement(Ul,{error:{errorMsgKey:"ErrForbidden"}})}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/8938.5116982f737a2ef85330.chunk.js b/src/web/gui/v2/8938.5116982f737a2ef85330.chunk.js deleted file mode 100644 index 969277516..000000000 --- a/src/web/gui/v2/8938.5116982f737a2ef85330.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="f94a1e45-92e1-479f-8dd1-5b8ba1dbfadb",e._sentryDebugIdIdentifier="sentry-dbid-f94a1e45-92e1-479f-8dd1-5b8ba1dbfadb")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[8938],{68938:function(e,t,n){var o;e.exports=(o=n(96540),function(){var e={378:function(e){"use strict";e.exports=function e(t,n){if(t===n)return!0;if(t&&n&&"object"==typeof t&&"object"==typeof n){if(t.constructor!==n.constructor)return!1;var o,r,i;if(Array.isArray(t)){if((o=t.length)!=n.length)return!1;for(r=o;0!=r--;)if(!e(t[r],n[r]))return!1;return!0}if(t.constructor===RegExp)return t.source===n.source&&t.flags===n.flags;if(t.valueOf!==Object.prototype.valueOf)return t.valueOf()===n.valueOf();if(t.toString!==Object.prototype.toString)return t.toString()===n.toString();if((o=(i=Object.keys(t)).length)!==Object.keys(n).length)return!1;for(r=o;0!=r--;)if(!Object.prototype.hasOwnProperty.call(n,i[r]))return!1;for(r=o;0!=r--;){var a=i[r];if(!e(t[a],n[a]))return!1}return!0}return t!=t&&n!=n}},145:function(e,t,n){"use strict";n.r(t),t.default='@keyframes spin{to{transform:rotate(360deg)}}.tf-v1-popover{bottom:96px;position:fixed;right:16px;z-index:10001}.tf-v1-popover.open{max-width:100%;min-height:360px;min-width:360px}.tf-v1-popover-wrapper{position:absolute;top:50%;left:50%;transform:translate(-50%, -50%);height:100%;width:100%;opacity:0;transition:opacity .25s ease-in-out;border-radius:4px;box-shadow:rgba(0,0,0,.08) 0 2px 4px,rgba(0,0,0,.06) 0 2px 12px}.tf-v1-popover-wrapper iframe{width:100%;height:100%;border:none;overflow:hidden;border-radius:8px}.tf-v1-popover-close{display:none}.tf-v1-popover-button{width:54px;height:54px;position:fixed;box-shadow:0 2px 12px rgba(0,0,0,.06),0 2px 4px rgba(0,0,0,.08);color:#fff;right:26px;bottom:26px;border-radius:50%;display:flex;align-items:center;justify-content:center;cursor:pointer;background:#3a7685;line-height:0;border:none;padding:0}.tf-v1-popover-button-icon{width:54px;height:54px;font-size:24px;border-radius:50%;overflow:hidden;display:flex;justify-content:center;align-items:center}.tf-v1-popover-button-icon svg.default{margin-top:6px}.tf-v1-popover-button-icon svg,.tf-v1-popover-button-icon img{max-width:54px;max-height:54px}.tf-v1-popover-button-icon img{width:100%;height:100%;object-fit:cover;border-radius:50%}.tf-v1-popover-tooltip{position:fixed;right:94px;bottom:33px;max-width:240px;padding:10px 25px 10px 10px;border-radius:8px;background:#fff;box-shadow:0 2px 4px rgba(0,0,0,.08),0 2px 12px rgba(0,0,0,.06);font-size:14px;font-family:Helvetica,Arial,sans-serif;line-height:22px}.tf-v1-popover-tooltip::before{background-color:#fff;content:"";display:block;width:12px;height:12px;position:absolute;right:-4px;bottom:15px;transform:rotate(45deg);box-shadow:2px -2px 2px 0 rgba(0,0,0,.06)}.tf-v1-popover-tooltip-text{overflow:hidden}.tf-v1-popover-tooltip-close{color:rgba(0,0,0,.2);cursor:pointer;margin-left:4px;display:inline-block;width:20px;height:20px;font-size:18px;text-align:center;position:absolute;top:8px;right:6px}.tf-v1-popover-tooltip-close:hover{color:rgba(0,0,0,.3)}.tf-v1-popover-tooltip.closing{transition:opacity .25s ease-in-out;opacity:0}.tf-v1-popover-unread-dot{width:8px;height:8px;border-radius:50%;background-color:#fa6b05;border:2px solid #fff;position:absolute;top:2px;right:2px}.tf-v1-popover-unread-dot.closing{transition:opacity .25s ease-in-out;opacity:0}.tf-v1-spinner{border:3px solid #aaa;font-size:40px;width:1em;height:1em;border-radius:.5em;box-sizing:border-box;animation:spin 1s linear infinite;border-top-color:#fff;position:absolute;top:50%;left:50%;margin:-20px 0 0 -20px}@media(max-width: 480px){.tf-v1-popover.open{top:0;left:0;bottom:0;right:0;width:100% !important;height:100% !important}.tf-v1-popover.open .tf-v1-popover-close{display:block}.tf-v1-popover-wrapper{border-radius:0;box-shadow:none}.tf-v1-popover-wrapper iframe{border-radius:0}.tf-v1-popover-close{display:block;padding:0;margin:0;position:absolute;font-size:32px;font-weight:normal;line-height:24px;width:24px;height:24px;text-align:center;text-transform:none;cursor:pointer;opacity:.75;transition:opacity .25s ease-in-out;text-decoration:none;color:#000;top:6px;right:8px;background:none;border:none;border-radius:0;z-index:1;opacity:0}.tf-v1-popover-close:hover{opacity:1}}@media(max-width: 480px)and (min-width: 481px){.tf-v1-popover-close{color:#fff !important}}@media(max-width: 480px){.tf-v1-popover-button{width:44px;height:44px;right:8px;bottom:8px}.tf-v1-popover-button-icon{font-size:20px}.tf-v1-popover-button-icon svg{margin-top:4px;max-height:24px;max-width:24px}.tf-v1-popover-button-icon img{max-width:44px;max-height:44px}.tf-v1-popover-tooltip{position:fixed;right:66px;bottom:8px;left:auto;font-size:12px}.tf-v1-popover-tooltip::before{bottom:14px}.tf-v1-popover-unread-dot{top:0;right:0}.tf-v1-spinner{border:3px solid #aaa;font-size:32px;width:1em;height:1em;border-radius:.5em;box-sizing:border-box;animation:spin 1s linear infinite;border-top-color:#fff;position:absolute;top:50%;left:50%;margin:-16px 0 0 -16px}}'},792:function(e,t,n){"use strict";n.r(t),t.default="@keyframes spin{to{transform:rotate(360deg)}}.tf-v1-popup{position:fixed;top:0;left:0;width:100%;height:100%;background:rgba(0,0,0,.75);transition:opacity .25s ease-in-out;z-index:10001;display:flex;align-items:center;justify-content:center}.tf-v1-popup .tf-v1-iframe-wrapper{position:relative;transition:opacity .25s ease-in-out;min-width:360px;min-height:360px}.tf-v1-popup .tf-v1-iframe-wrapper iframe{width:100%;height:100%;border:none;overflow:hidden;border-radius:8px}.tf-v1-popup .tf-v1-close{display:block;padding:0;margin:0;position:absolute;font-size:32px;font-weight:normal;line-height:24px;width:24px;height:24px;text-align:center;text-transform:none;cursor:pointer;opacity:.75;transition:opacity .25s ease-in-out;text-decoration:none;color:#000;top:-34px;right:0;background:none;border:none;border-radius:0}.tf-v1-popup .tf-v1-close:hover{opacity:1}@media(min-width: 481px){.tf-v1-popup .tf-v1-close{color:#fff !important}}.tf-v1-popup .tf-v1-spinner{border:3px solid #aaa;font-size:40px;width:1em;height:1em;border-radius:.5em;box-sizing:border-box;animation:spin 1s linear infinite;border-top-color:#fff;position:absolute;top:50%;left:50%;margin:-20px 0 0 -20px}@media(max-width: 480px){.tf-v1-popup{width:100% !important;height:100% !important}.tf-v1-popup .tf-v1-iframe-wrapper{position:relative;transition:opacity .25s ease-in-out;min-width:100%;min-height:100%}.tf-v1-popup .tf-v1-iframe-wrapper iframe{border-radius:0}.tf-v1-popup .tf-v1-close{display:block;padding:0;margin:0;position:absolute;font-size:32px;font-weight:normal;line-height:24px;width:24px;height:24px;text-align:center;text-transform:none;cursor:pointer;opacity:.75;transition:opacity .25s ease-in-out;text-decoration:none;color:#000;top:6px;right:8px;background:none;border:none;border-radius:0}.tf-v1-popup .tf-v1-close:hover{opacity:1}}@media(max-width: 480px)and (min-width: 481px){.tf-v1-popup .tf-v1-close{color:#fff !important}}"},838:function(e,t,n){"use strict";n.r(t),t.default="@keyframes spin{to{transform:rotate(360deg)}}.tf-v1-sidetab{position:fixed;top:50%;right:0;width:400px;height:580px;transform:translate(100%, -50%);box-shadow:0 2px 4px rgba(0,0,0,.08),0 2px 12px rgba(0,0,0,.06);z-index:10001;will-change:transform}.tf-v1-sidetab.ready{transition:transform 400ms cubic-bezier(0.5, 0, 0.75, 0)}.tf-v1-sidetab iframe{width:100%;height:100%;border:none;overflow:hidden;border-radius:8px 0 0 8px}.tf-v1-sidetab.open{transform:translate(0, -50%)}.tf-v1-sidetab-wrapper{position:relative;height:100%}.tf-v1-sidetab-button{position:absolute;top:50%;left:-48px;transform:rotate(-90deg) translateX(-50%);transform-origin:left top;min-width:100px;max-width:540px;height:48px;display:flex;align-items:center;padding:12px 16px;border-radius:8px 8px 0 0;color:#fff;box-shadow:0 2px 4px rgba(0,0,0,.08),0 2px 12px rgba(0,0,0,.06);background-color:#3a7685;cursor:pointer;border:0;text-decoration:none;outline:none}.tf-v1-sidetab-button-text{flex:1;font-size:18px;font-family:Helvetica,Arial,sans-serif;white-space:nowrap;overflow:hidden;text-overflow:ellipsis}.tf-v1-sidetab-button-icon{width:24px;height:24px;font-size:24px;transform:rotate(90deg);margin-right:12px;position:relative;order:-1}.tf-v1-sidetab-button-icon>img{width:100%;height:100%;object-fit:contain}.tf-v1-sidetab-close{display:none}.tf-v1-sidetab .tf-v1-spinner{border:3px solid #aaa;font-size:24px;width:1em;height:1em;border-radius:.5em;box-sizing:border-box;animation:spin 1s linear infinite;border-top-color:#fff;position:absolute;top:50%;left:50%;margin:-12px 0 0 -12px;top:0;left:0;margin:0}@media(max-width: 480px){.tf-v1-sidetab{transition:unset}.tf-v1-sidetab.ready{transition:unset}.tf-v1-sidetab.open{top:0;left:0;right:0;bottom:0;transform:translate(0, 0);width:100% !important;height:100% !important}.tf-v1-sidetab-close{display:block;padding:0;margin:0;position:absolute;font-size:32px;font-weight:normal;line-height:24px;width:24px;height:24px;text-align:center;text-transform:none;cursor:pointer;opacity:.75;transition:opacity .25s ease-in-out;text-decoration:none;color:#000;top:6px;right:8px;background:none;border:none;border-radius:0;display:block;z-index:1}.tf-v1-sidetab-close:hover{opacity:1}}@media(max-width: 480px)and (min-width: 481px){.tf-v1-sidetab-close{color:#fff !important}}"},630:function(e,t,n){"use strict";n.r(t),t.default="@keyframes spin{to{transform:rotate(360deg)}}.tf-v1-slider{position:fixed;top:0;left:0;width:100%;height:100%;background:rgba(0,0,0,.75);transition:opacity .25s ease-in-out;z-index:10001}.tf-v1-slider .tf-v1-iframe-wrapper{max-width:calc(100vw - 26px);height:100%;position:absolute;top:0;transition:right .5s ease-in-out,left .5s ease-in-out}.tf-v1-slider .tf-v1-iframe-wrapper iframe{width:100%;height:100%;border:none;overflow:hidden;border-radius:0}@media(min-width: 481px){.tf-v1-slider .tf-v1-iframe-wrapper iframe{border-radius:8px 0 0 8px}}.tf-v1-slider .tf-v1-close{display:block;padding:0;margin:0;position:absolute;font-size:32px;font-weight:normal;line-height:24px;width:24px;height:24px;text-align:center;text-transform:none;cursor:pointer;opacity:.75;transition:opacity .25s ease-in-out;text-decoration:none;color:#000;top:6px;right:8px;background:none;border:none;border-radius:0}.tf-v1-slider .tf-v1-close:hover{opacity:1}@media(min-width: 481px){.tf-v1-slider .tf-v1-close{color:#fff !important}}@media(min-width: 481px){.tf-v1-slider .tf-v1-close{top:4px;left:-26px}}.tf-v1-slider .tf-v1-close:hover{opacity:1}.tf-v1-slider .tf-v1-spinner{border:3px solid #aaa;font-size:40px;width:1em;height:1em;border-radius:.5em;box-sizing:border-box;animation:spin 1s linear infinite;border-top-color:#fff;position:absolute;top:50%;left:50%;margin:-20px 0 0 -20px}@media(min-width: 481px){.tf-v1-slider.left .tf-v1-iframe-wrapper iframe{border-radius:0 8px 8px 0}.tf-v1-slider.left .tf-v1-close{left:auto;right:-24px}}@media(max-width: 480px){.tf-v1-slider{width:100% !important;height:100% !important}.tf-v1-slider .tf-v1-iframe-wrapper{width:100% !important;max-width:100vw;height:100%;transition:unset}.tf-v1-slider .tf-v1-iframe-wrapper iframe{border-radius:none}.tf-v1-slider .tf-v1-close{display:block;padding:0;margin:0;position:absolute;font-size:32px;font-weight:normal;line-height:24px;width:24px;height:24px;text-align:center;text-transform:none;cursor:pointer;opacity:.75;transition:opacity .25s ease-in-out;text-decoration:none;color:#000;top:6px;right:8px;background:none;border:none;border-radius:0;left:auto}.tf-v1-slider .tf-v1-close:hover{opacity:1}}@media(max-width: 480px)and (min-width: 481px){.tf-v1-slider .tf-v1-close{color:#fff !important}}"},684:function(e,t,n){"use strict";n.r(t),t.default=".tf-v1-widget{width:100%;height:100%;min-height:inherit;position:relative}.tf-v1-widget iframe{width:100%;height:100%;border:none;overflow:hidden;border-radius:8px;min-height:inherit}.tf-v1-widget-close{display:none}.tf-v1-widget-iframe-overlay{width:100%;height:100%;border:none;overflow:hidden;border-radius:8px;position:absolute;top:0;left:0}.tf-v1-widget-fullscreen{position:fixed;top:0;left:0;right:0;bottom:0;z-index:10001;width:100% !important;height:100% !important}.tf-v1-widget-fullscreen .tf-v1-widget-close{display:block;display:block;padding:0;margin:0;position:absolute;font-size:32px;font-weight:normal;line-height:24px;width:24px;height:24px;text-align:center;text-transform:none;cursor:pointer;opacity:.75;transition:opacity .25s ease-in-out;text-decoration:none;color:#000;top:6px;right:8px;background:none;border:none;border-radius:0;z-index:1}.tf-v1-widget-fullscreen .tf-v1-widget-close:hover{opacity:1}@media(min-width: 481px){.tf-v1-widget-fullscreen .tf-v1-widget-close{color:#fff !important}}.tf-v1-widget-fullscreen iframe{border-radius:0}"},281:function(e,t,n){"use strict";var o=this&&this.__createBinding||(Object.create?function(e,t,n,o){void 0===o&&(o=n);var r=Object.getOwnPropertyDescriptor(t,n);r&&!("get"in r?!t.__esModule:r.writable||r.configurable)||(r={enumerable:!0,get:function(){return t[n]}}),Object.defineProperty(e,o,r)}:function(e,t,n,o){void 0===o&&(o=n),e[o]=t[n]}),r=this&&this.__exportStar||function(e,t){for(var n in e)"default"===n||Object.prototype.hasOwnProperty.call(t,n)||o(t,e,n)};Object.defineProperty(t,"__esModule",{value:!0}),r(n(11),t),r(n(739),t),r(n(860),t)},794:function(e,t,n){"use strict";var o=this&&this.__assign||function(){return o=Object.assign||function(e){for(var t,n=1,o=arguments.length;n\n '),a=null==e?void 0:e.startsWith("http");return o.innerHTML=a?"popover trigger icon button"):null!=e?e:r,o.dataset.testid="default-icon",o}(v.customIcon,v.buttonColor||u.buttonColor),_=function(){var e=document.createElement("div");e.className="tf-v1-spinner";var t=document.createElement("div");return t.className="tf-v1-popover-button-icon",t.dataset.testid="spinner-icon",t.append(e),t}(),P=c(),j=c("button","tf-v1-popover-close"),E=function(e,t){void 0===t&&(t={});var n=(0,i.getTextColor)(e),o=document.createElement("button");return o.className="tf-v1-popover-button",o.dataset.testid="tf-v1-popover-button",o.style.backgroundColor=e,o.style.color=n,(0,i.addAttributesToElement)(o,t),o}(v.buttonColor||u.buttonColor,v.buttonProps);(v.container||document.body).append(w),x.append(h),w.append(E),w.append(j),E.append(O);var C=function(){l&&l.parentNode&&(l.classList.add("closing"),setTimeout((function(){(0,i.unmountElement)(l)}),250))};v.tooltip&&v.tooltip.length>0&&(l=function(e,t){var n=document.createElement("span");n.className="tf-v1-popover-tooltip-close",n.dataset.testid="tf-v1-popover-tooltip-close",n.innerHTML="×",n.onclick=t;var o=document.createElement("div");o.className="tf-v1-popover-tooltip-text",o.innerHTML=e;var r=document.createElement("div");return r.className="tf-v1-popover-tooltip",r.dataset.testid="tf-v1-popover-tooltip",r.appendChild(o),r.appendChild(n),r}(v.tooltip,C),w.append(l)),v.notificationDays&&(v.enableSandbox||(0,a.canBuildNotificationDot)(e))&&(d=(0,a.buildNotificationDot)(),E.append(d)),h.onload=function(){w.classList.add("open"),x.style.opacity="1",j.style.opacity="1",s(_,P),(0,i.addCustomKeyboardListener)(I)};var S=function(){(0,i.isOpen)(x)||(C(),d&&(d.classList.add("closing"),v.notificationDays&&!v.enableSandbox&&(0,a.saveNotificationDotHideUntilTime)(e,v.notificationDays),setTimeout((function(){(0,i.unmountElement)(d)}),250)),setTimeout((function(){(0,i.isInPage)(x)?(x.style.opacity="0",j.style.opacity="0",x.style.display="flex",setTimeout((function(){w.classList.add("open"),x.style.opacity="1",j.style.opacity="1"})),s(O,P)):(w.append(x),s(O,_),x.style.opacity="0",j.style.opacity="0")})))},I=function(){var n;(0,i.isOpen)(w)&&((0,i.handlePreventReopenOnClose)(v,e),null===(n=t.onClose)||void 0===n||n.call(t),setTimeout((function(){v.keepSession?x.style.display="none":(0,i.unmountElement)(x),w.classList.remove("open"),s(P,O)}),250))};(0,i.setAutoClose)(b,v.autoClose,I);var T=function(){(0,i.isOpen)(x)?I():S()};return E.onclick=(0,i.invokeWithoutDefault)(T),j.onclick=(0,i.invokeWithoutDefault)(I),v.open&&!(0,i.isOpen)(x)&&(n=(0,i.handleCustomOpen)(S,v,e)),{open:S,close:I,toggle:T,refresh:g,focus:y,unmount:function(){(0,i.unmountElement)(w),v.open&&(null==n?void 0:n.remove)&&n.remove()}}}},1797:function(e,t,n){var o=this&&this.__createBinding||(Object.create?function(e,t,n,o){void 0===o&&(o=n);var r=Object.getOwnPropertyDescriptor(t,n);r&&!("get"in r?!t.__esModule:r.writable||r.configurable)||(r={enumerable:!0,get:function(){return t[n]}}),Object.defineProperty(e,o,r)}:function(e,t,n,o){void 0===o&&(o=n),e[o]=t[n]}),r=this&&this.__exportStar||function(e,t){for(var n in e)"default"===n||Object.prototype.hasOwnProperty.call(t,n)||o(t,e,n)};Object.defineProperty(t,"__esModule",{value:!0}),r(n(7528),t),r(n(6100),t)},1320:function(e,t){var n=this&&this.__assign||function(){return n=Object.assign||function(e){for(var t,n=1,o=arguments.length;nt&&(t&&function(e){var t=r();delete t[e],i(t)}(e),!0)},t.buildNotificationDot=function(){var e=document.createElement("span");return e.className="tf-v1-popover-unread-dot",e.dataset.testid="tf-v1-popover-unread-dot",e}},6100:function(e,t){Object.defineProperty(t,"__esModule",{value:!0})},9630:function(e,t,n){var o=this&&this.__rest||function(e,t){var n={};for(var o in e)Object.prototype.hasOwnProperty.call(e,o)&&t.indexOf(o)<0&&(n[o]=e[o]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var r=0;for(o=Object.getOwnPropertySymbols(e);r\n '),c=null==e?void 0:e.startsWith("http");return a.innerHTML=c?"popover trigger icon button"):null!=e?e:s,a.dataset.testid="default-icon",a}(d.customIcon,d.buttonColor||a.buttonColor,d.buttonTextColor,d.buttonTextSize),_=s(),P=s("button","tf-v1-sidetab-close");(d.container||document.body).append(b),g.append(p),b.append(w),b.append(P),w.append(O),w.append(x),setTimeout((function(){b.classList.add("ready")}),250),p.onload=function(){b.classList.add("open"),c(y,_),(0,i.addCustomKeyboardListener)(E)};var j=function(){(0,i.isOpen)(g)||((0,i.isInPage)(g)?(g.style.display="flex",b.classList.add("open"),c(O,_)):(b.append(g),c(O,y)))},E=function(){var t;(0,i.isOpen)(g)&&((0,i.handlePreventReopenOnClose)(d,e),null===(t=d.onClose)||void 0===t||t.call(d),b.classList.remove("open"),setTimeout((function(){d.keepSession?g.style.display="none":(0,i.unmountElement)(g),c(_,O)}),250))};(0,i.setAutoClose)(v,d.autoClose,E);var C=function(){(0,i.isOpen)(g)?E():j()};return w.onclick=(0,i.invokeWithoutDefault)(C),P.onclick=(0,i.invokeWithoutDefault)(E),d.open&&!(0,i.isOpen)(g)&&(n=(0,i.handleCustomOpen)(j,d,e)),{open:j,close:E,toggle:C,refresh:m,focus:h,unmount:function(){(0,i.unmountElement)(b),d.open&&(null==n?void 0:n.remove)&&n.remove()}}}},1434:function(e,t,n){var o=this&&this.__createBinding||(Object.create?function(e,t,n,o){void 0===o&&(o=n);var r=Object.getOwnPropertyDescriptor(t,n);r&&!("get"in r?!t.__esModule:r.writable||r.configurable)||(r={enumerable:!0,get:function(){return t[n]}}),Object.defineProperty(e,o,r)}:function(e,t,n,o){void 0===o&&(o=n),e[o]=t[n]}),r=this&&this.__exportStar||function(e,t){for(var n in e)"default"===n||Object.prototype.hasOwnProperty.call(t,n)||o(t,e,n)};Object.defineProperty(t,"__esModule",{value:!0}),r(n(382),t),r(n(7668),t)},7668:function(e,t){Object.defineProperty(t,"__esModule",{value:!0})},2603:function(e,t,n){var o=this&&this.__rest||function(e,t){var n={};for(var o in e)Object.prototype.hasOwnProperty.call(e,o)&&t.indexOf(o)<0&&(n[o]=e[o]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var r=0;for(o=Object.getOwnPropertySymbols(e);r0)&&!(o=i.next()).done;)a.push(o.value)}catch(e){r={error:e}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}return a};Object.defineProperty(t,"__esModule",{value:!0}),t.createWidget=void 0;var i=n(6797),a=n(8554),s=n(2313),c=n(1126);t.createWidget=function(e,t){if(!(0,i.hasDom)())return{refresh:function(){},focus:function(){},unmount:function(){}};var n=t.domain,u=o(t,["domain"]);u.inlineOnMobile=t.inlineOnMobile||t.fullScreen,u.inlineOnMobile||!u.forceTouch&&!(0,i.isFullscreen)()||(u.displayAsFullScreenModal=!0,u.forceTouch=!0);var l=(0,i.createIframe)("widget",{formId:e,domain:n,options:u}),d=l.embedId,f=l.iframe,p=l.refresh,v=l.focus,m=(0,s.buildWidget)(f,u.width,u.height);if(u.fullScreen&&(0,c.overrideFullScreenStyles)(t.container,f),u.autoResize){var h=r("string"==typeof u.autoResize?u.autoResize.split(",").map((function(e){return parseInt(e)})):[],2),b=h[0],g=h[1];window.addEventListener("message",(0,a.getFormHeightChangedHandler)(d,(function(e){var n=Math.max(e.height,b||0);g&&(n=Math.min(n,g)),t.container.style.height="".concat(n,"px")})))}u.autoFocus&&window.addEventListener("message",(0,a.getFormReadyHandler)(d,(function(){setTimeout((function(){v()}),1e3)})));var y,w=function(){return t.container.append(m)};if(t.container.innerHTML="",t.lazy?(0,i.lazyInitialize)(t.container,w):w(),u.displayAsFullScreenModal){var x="",O=t.container,_=O.style.height;window.addEventListener("message",(0,a.getWelcomeScreenHiddenHandler)(d,(function(){O.classList.add("tf-v1-widget-fullscreen"),void 0!==t.opacity&&(O.style.backgroundColor=x)}))),window.addEventListener("message",(0,a.getFormThemeHandler)(d,(function(e){var t;x=(0,i.changeColorOpacity)(null===(t=null==e?void 0:e.theme)||void 0===t?void 0:t.backgroundColor)})));var P=((y=document.createElement("button")).className="tf-v1-widget-close tf-v1-close-icon",y.innerHTML="×",y.ariaLabel="Close",y);P.onclick=(0,i.invokeWithoutDefault)((function(){var e;if(O.style.height=_,null===(e=t.onClose)||void 0===e||e.call(t),O.classList.remove("tf-v1-widget-fullscreen"),O.style.backgroundColor="",t.keepSession){var n=document.createElement("div");n.className="tf-v1-widget-iframe-overlay",n.onclick=function(){O.classList.add("tf-v1-widget-fullscreen"),(0,i.unmountElement)(n)},m.append(n)}else t.container.innerHTML="",w(),O.append(P)})),O.append(P)}return{refresh:p,focus:v,unmount:function(){(0,i.unmountElement)(m)}}}},1419:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0}),t.buildWidget=void 0;var o=n(6797);t.buildWidget=function(e,t,n){var r=document.createElement("div");return r.className="tf-v1-widget",r.dataset.testid="tf-v1-widget",r.append(e),(0,o.setElementSize)(r,{width:t,height:n})}},2313:function(e,t,n){var o=this&&this.__createBinding||(Object.create?function(e,t,n,o){void 0===o&&(o=n);var r=Object.getOwnPropertyDescriptor(t,n);r&&!("get"in r?!t.__esModule:r.writable||r.configurable)||(r={enumerable:!0,get:function(){return t[n]}}),Object.defineProperty(e,o,r)}:function(e,t,n,o){void 0===o&&(o=n),e[o]=t[n]}),r=this&&this.__exportStar||function(e,t){for(var n in e)"default"===n||Object.prototype.hasOwnProperty.call(t,n)||o(t,e,n)};Object.defineProperty(t,"__esModule",{value:!0}),r(n(1419),t)},1126:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.overrideFullScreenStyles=void 0,t.overrideFullScreenStyles=function(e,t){Object.assign(e.style,{zIndex:"10001",position:"absolute",top:"0",left:"0",width:"100%",height:"100%"}),t.style.borderRadius="0",Object.assign(document.body.style,{overflow:"hidden"})}},9321:function(e,t,n){var o=this&&this.__createBinding||(Object.create?function(e,t,n,o){void 0===o&&(o=n);var r=Object.getOwnPropertyDescriptor(t,n);r&&!("get"in r?!t.__esModule:r.writable||r.configurable)||(r={enumerable:!0,get:function(){return t[n]}}),Object.defineProperty(e,o,r)}:function(e,t,n,o){void 0===o&&(o=n),e[o]=t[n]}),r=this&&this.__exportStar||function(e,t){for(var n in e)"default"===n||Object.prototype.hasOwnProperty.call(t,n)||o(t,e,n)};Object.defineProperty(t,"__esModule",{value:!0}),r(n(718),t),r(n(4058),t)},4058:function(e,t){Object.defineProperty(t,"__esModule",{value:!0})},1920:function(e,t,n){var o=this&&this.__createBinding||(Object.create?function(e,t,n,o){void 0===o&&(o=n);var r=Object.getOwnPropertyDescriptor(t,n);r&&!("get"in r?!t.__esModule:r.writable||r.configurable)||(r={enumerable:!0,get:function(){return t[n]}}),Object.defineProperty(e,o,r)}:function(e,t,n,o){void 0===o&&(o=n),e[o]=t[n]}),r=this&&this.__exportStar||function(e,t){for(var n in e)"default"===n||Object.prototype.hasOwnProperty.call(t,n)||o(t,e,n)};Object.defineProperty(t,"__esModule",{value:!0}),r(n(1797),t),r(n(5970),t),r(n(4071),t),r(n(9321),t),r(n(1434),t)},5084:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0}),t.addAttributesToElement=void 0;var o=n(7377);t.addAttributesToElement=function(e,t){void 0===t&&(t={}),Object.keys(t).forEach((function(n){e.setAttribute((0,o.camelCaseToKebabCase)(n),t[n])}))}},3626:function(e,t,n){var o=this&&this.__assign||function(){return o=Object.assign||function(e){for(var t,n=1,o=arguments.length;n0)&&!(o=i.next()).done;)a.push(o.value)}catch(e){r={error:e}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}return a};Object.defineProperty(t,"__esModule",{value:!0}),t.buildIframeSrc=void 0;var i=n(8027),a=n(4527),s=n(2346),c=n(2698),u=n(863),l={widget:"embed-widget",popup:"popup-blank",slider:"popup-drawer",popover:"popup-popover","side-tab":"popup-side-panel"};t.buildIframeSrc=function(e){var t=e.domain,n=e.formId,d=e.type,f=e.embedId,p=e.options,v=function(e,t,n){var r=n.transitiveSearchParams,i=n.source,a=n.medium,s=n.mediumVersion,u=n.hideFooter,d=n.hideHeaders,f=n.opacity,p=n.disableTracking,v=n.enableSandbox,m=n.shareGaInstance,h=n.forceTouch,b=n.displayAsFullScreenModal,g=n.tracking,y=n.redirectTarget,w=n.autoResize,x=n.disableScroll,O=n.onEndingButtonClick,_=n.noHeading,P=n.noScrollbars,j=(0,c.getTransitiveSearchParams)(r);return o(o(o({},{"typeform-embed-id":t,"typeform-embed":l[e],"typeform-source":i,"typeform-medium":a,"typeform-medium-version":s,"embed-hide-footer":u?"true":void 0,"embed-hide-headers":d?"true":void 0,"embed-opacity":f,"disable-tracking":p||v?"true":void 0,"__dangerous-disable-submissions":v?"true":void 0,"share-ga-instance":m?"true":void 0,"force-touch":h?"true":void 0,"add-placeholder-ws":"widget"===e&&b?"true":void 0,"typeform-embed-redirect-target":y,"typeform-embed-handles-redirect":1,"typeform-embed-auto-resize":w?"true":void 0,"typeform-embed-disable-scroll":x?"true":void 0,"typeform-embed-handle-ending-button-click":O?"true":void 0,"typeform-embed-no-heading":_?"true":void 0,"typeform-embed-no-scrollbars":P?"true":void 0}),j),g)}(d,f,function(e){return e.noHeading||(e.noHeading=document.querySelectorAll("h1").length>0),o(o({},{source:null===(t=null===window||void 0===window?void 0:window.location)||void 0===t?void 0:t.hostname.replace(/^www\./,""),medium:"embed-sdk",mediumVersion:"next"}),(0,a.removeUndefinedKeys)(e));var t}(p)),m=function(e,t){return void 0===t&&(t=i.DEFAULT_DOMAIN),e.startsWith("http://")||e.startsWith("https://")?new URL(e):new URL("https://".concat(t,"/to/").concat(e))}(n,t);if(Object.entries(v).filter((function(e){var t=r(e,2)[1];return(0,s.isDefined)(t)})).forEach((function(e){var t=r(e,2),n=t[0],o=t[1];m.searchParams.set(n,o)})),p.hubspot){var h=(0,u.getHubspotHiddenFields)();p.hidden=o(o({},p.hidden),h)}return m.hash=function(e,t){var n=new URLSearchParams;t.hidden&&Object.entries(t.hidden).filter((function(e){var t=r(e,2)[1];return(0,s.isDefined)(t)&&""!==t})).forEach((function(o){var i=r(o,2),a=i[0],s=i[1];"boolean"==typeof t.transitiveSearchParams&&e.searchParams.delete(a),n.set(a,s)}));var o=function(e){if(!e)return null;var t=Object.keys(e).at(0),n=t&&e[t]||void 0;return void 0===t||void 0===n?null:{key:"answers-".concat(t),value:n}}(t.preselect);if(o){var i=o.key,a=o.value;n.set(i,a)}return n.toString()}(m,p),m.href}},2391:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.changeColorOpacity=void 0,t.changeColorOpacity=function(e,t){return void 0===e&&(e=""),void 0===t&&(t=255),e.startsWith("rgba(")?null==e?void 0:e.replace(/, [\d.]+\)$/,", ".concat(t,")")):e}},8972:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.handleCustomOpen=t.handlePreventReopenOnClose=void 0;var n={remove:function(){}};t.handlePreventReopenOnClose=function(e,t){e.preventReopenOnClose&&r(t)},t.handleCustomOpen=function(e,t,r){var i=t.open,a=t.openValue,s=t.preventReopenOnClose,c=function(e,t,n,r){return function(){var i,a;if(!(r&&o(t)||"all"===n&&(a=document.querySelector(".tf-v1-popup, .tf-v1-slider, .tf-v1-popover-wrapper, .tf-v1-sidetab-wrapper"),(null==a?void 0:a.offsetHeight)||(null==a?void 0:a.offsetWidth)||(null===(i=null==a?void 0:a.getClientRects())||void 0===i?void 0:i.length))||"same"===n&&function(e){var t=document.querySelectorAll(".tf-v1-popup, .tf-v1-slider, .tf-v1-popover-wrapper, .tf-v1-sidetab-wrapper");return Array.from(t).some((function(t){var n,o=null===(n=t.querySelector("iframe"))||void 0===n?void 0:n.src;return(null==o?void 0:o.includes("typeform.com/to/".concat(e)))||(null==o?void 0:o.startsWith(e))}))}(t)))return e()}}(e,r,t.respectOpenModals,s);switch(i){case"load":return c(),n;case"exit":return a?function(e,t){var n=0,o=function(r){r.clientY=i;(s>=e||c)&&(t(),document.removeEventListener("scroll",n))}return document.addEventListener("scroll",n),{remove:function(){return document.removeEventListener("scroll",n)}}}(a,c):n;default:return n}};var o=function(e){return document.cookie.includes("tf-".concat(e,"-closed=true"))},r=function(e){document.cookie="tf-".concat(e,"-closed=true;Path=/")}},1553:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0}),t.createIframe=void 0;var o=n(3626),r=n(6797),i=n(8866),a=n(8554),s=n(2256),c=n(7144),u=n(5511);t.createIframe=function(e,t){var n=t.formId,l=t.domain,d=t.options,f=(0,i.generateEmbedId)(),p=d.iframeProps,v=void 0===p?{}:p,m=d.onReady,h=d.onStarted,b=d.onQuestionChanged,g=d.onHeightChanged,y=d.onSubmit,w=d.onEndingButtonClick,x=d.shareGaInstance,O=(0,o.buildIframeSrc)({formId:n,domain:l,embedId:f,type:e,options:d}),_=document.createElement("iframe");return _.src=O,_.dataset.testid="iframe",_.style.border="0px",_.allow="microphone; camera",(0,r.addAttributesToElement)(_,v),_.addEventListener("load",s.triggerIframeRedraw,{once:!0}),window.addEventListener("message",(0,a.getFormReadyHandler)(f,m)),window.addEventListener("message",(0,a.getFormStartedHandler)(f,h)),window.addEventListener("message",(0,a.getFormQuestionChangedHandler)(f,b)),window.addEventListener("message",(0,a.getFormHeightChangedHandler)(f,g)),window.addEventListener("message",(0,a.getFormSubmitHandler)(f,y)),window.addEventListener("message",(0,a.getFormThemeHandler)(f,(function(e){var t;if(null==e?void 0:e.theme){var n=document.querySelector(".tf-v1-close-icon");n&&(n.style.color=null===(t=e.theme)||void 0===t?void 0:t.color)}}))),window.addEventListener("message",(0,a.getThankYouScreenButtonClickHandler)(f,w)),window.addEventListener("message",(0,a.getRedirectHandler)(f,_)),"widget"!==e&&window.addEventListener("message",c.dispatchCustomKeyEventFromIframe),x&&window.addEventListener("message",(0,a.getFormReadyHandler)(f,(function(){(0,r.setupGaInstance)(_,f,x)}))),{iframe:_,embedId:f,refresh:function(){return(0,u.refreshIframe)(_)},focus:function(){var e;null===(e=_.contentWindow)||void 0===e||e.postMessage("embed-focus","*")}}}},8866:function(e,t){var n=this&&this.__read||function(e,t){var n="function"==typeof Symbol&&e[Symbol.iterator];if(!n)return e;var o,r,i=n.call(e),a=[];try{for(;(void 0===t||t-- >0)&&!(o=i.next()).done;)a.push(o.value)}catch(e){r={error:e}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}return a};Object.defineProperty(t,"__esModule",{value:!0}),t.generateEmbedId=void 0,t.generateEmbedId=function(){var e=Math.random();return n(String(e).split("."),2)[1]}},8554:function(e,t,n){var o=this&&this.__rest||function(e,t){var n={};for(var o in e)Object.prototype.hasOwnProperty.call(e,o)&&t.indexOf(o)<0&&(n[o]=e[o]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var r=0;for(o=Object.getOwnPropertySymbols(e);r0&&r[r.length-1])||6!==s[0]&&2!==s[0])){a=0;continue}if(3===s[0]&&(!r||s[1]>r[0]&&s[1]150?"#000000":"#FFFFFF"}},2698:function(e,t){var n=this&&this.__assign||function(){return n=Object.assign||function(e){for(var t,n=1,o=arguments.length;n0?e.reduce((function(e,o){var r,i=t.searchParams.get(o);return i?n(n({},e),((r={})[o]=i,r)):e}),{}):{}}},8252:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.hasDom=void 0,t.hasDom=function(){return"undefined"!=typeof document&&"undefined"!=typeof window}},2938:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.hexRgb=void 0;var n="a-f\\d",o="#?[".concat(n,"]{3}[").concat(n,"]?"),r="#?[".concat(n,"]{6}([").concat(n,"]{2})?"),i=new RegExp("[^#".concat(n,"]"),"gi"),a=new RegExp("^".concat(o,"$|^").concat(r,"$"),"i");t.hexRgb=function(e){if("string"!=typeof e||i.test(e)||!a.test(e))throw new TypeError("Expected a valid hex string");8===(e=e.replace(/^#/,"")).length&&(e=e.slice(0,6)),4===e.length&&(e=e.slice(0,3)),3===e.length&&(e=e[0]+e[0]+e[1]+e[1]+e[2]+e[2]);var t=Number.parseInt(e,16);return{red:t>>16,green:t>>8&255,blue:255&t}}},863:function(e,t,n){var o=this&&this.__awaiter||function(e,t,n,o){return new(n||(n=Promise))((function(r,i){function a(e){try{c(o.next(e))}catch(e){i(e)}}function s(e){try{c(o.throw(e))}catch(e){i(e)}}function c(e){var t;e.done?r(e.value):(t=e.value,t instanceof n?t:new n((function(e){e(t)}))).then(a,s)}c((o=o.apply(e,t||[])).next())}))},r=this&&this.__generator||function(e,t){var n,o,r,i,a={label:0,sent:function(){if(1&r[0])throw r[1];return r[1]},trys:[],ops:[]};return i={next:s(0),throw:s(1),return:s(2)},"function"==typeof Symbol&&(i[Symbol.iterator]=function(){return this}),i;function s(s){return function(c){return function(s){if(n)throw new TypeError("Generator is already executing.");for(;i&&(i=0,s[0]&&(a=0)),a;)try{if(n=1,o&&(r=2&s[0]?o.return:s[0]?o.throw||((r=o.return)&&r.call(o),0):o.next)&&!(r=r.call(o,s[1])).done)return r;switch(o=0,r&&(s=[2&s[0],r.value]),s[0]){case 0:case 1:r=s;break;case 4:return a.label++,{value:s[1],done:!1};case 5:a.label++,o=s[1],s=[0];continue;case 7:s=a.ops.pop(),a.trys.pop();continue;default:if(!((r=(r=a.trys).length>0&&r[r.length-1])||6!==s[0]&&2!==s[0])){a=0;continue}if(3===s[0]&&(!r||s[1]>r[0]&&s[1]0)&&!(o=i.next()).done;)a.push(o.value)}catch(e){r={error:e}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}return a};Object.defineProperty(t,"__esModule",{value:!0}),t.loadOptionsFromAttributes=t.transformAttributeValue=t.camelCaseToKebabCase=void 0,t.camelCaseToKebabCase=function(e){return e.split("").map((function(e,t){return e.toUpperCase()===e?"".concat(0!==t?"-":"").concat(e.toLowerCase()):e})).join("")};var r=function(e){return e||void 0},i=function(e){if(null!==e)return""===e||"yes"===e||"true"===e},a=function(e){var t=e?parseInt(e,10):NaN;return isNaN(t)?void 0:t},s="%ESCAPED_COMMA%",c=function(e){if(e)return e.replace(/\s/g,"").replace(/\\,/g,s).split(",").filter((function(e){return!!e})).map((function(e){return e.replace(s,",")}))};t.transformAttributeValue=function(e,t){var u,l,d;switch(t){case"string":return r(e);case"boolean":return i(e);case"integer":return a(e);case"function":return function(e){var t=e&&e in window?window[e]:void 0;return"function"==typeof t?t:void 0}(e);case"array":return c(e);case"record":return function(e){if(e)return e.replace(/\\,/g,s).split(",").filter((function(e){return!!e})).map((function(e){return e.replace(s,",")})).reduce((function(e,t){var r,i=t.match(/^([^=]+)=(.*)$/);if(i){var a=o(i,3),s=a[1],c=a[2];return n(n({},e),((r={})[s.trim()]=c,r))}return e}),{})}(e);case"integerOrString":return function(e){if(e)return e.match(/^[0-9]+$/)?a(e):r(e)}(e);case"integerOrBoolean":return null!==(u=a(e))&&void 0!==u?u:i(e);case"stringOrBoolean":return null!==(l=r(e))&&void 0!==l?l:i(e);case"arrayOrBoolean":return null!==(d=c(e))&&void 0!==d?d:i(e);default:throw new Error("Invalid attribute transformation ".concat(t))}},t.loadOptionsFromAttributes=function(e,o){return Object.keys(o).reduce((function(r,i){var a;return n(n({},r),((a={})[i]=(0,t.transformAttributeValue)(e.getAttribute("data-tf-".concat((0,t.camelCaseToKebabCase)(i))),o[i]),a))}),{})}},6563:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.isFullscreen=t.isMobile=t.isBigScreen=void 0,t.isBigScreen=function(){return window.screen.width>=1024&&window.screen.height>=768},t.isMobile=function(){return/mobile|tablet|android/i.test(navigator.userAgent.toLowerCase())},t.isFullscreen=function(){return(0,t.isMobile)()&&!(0,t.isBigScreen)()}},4527:function(e,t,n){var o=this&&this.__assign||function(){return o=Object.assign||function(e){for(var t,n=1,o=arguments.length;n0)&&!(o=i.next()).done;)a.push(o.value)}catch(e){r={error:e}}finally{try{o&&!o.done&&(n=i.return)&&n.call(i)}finally{if(r)throw r.error}}return a};Object.defineProperty(t,"__esModule",{value:!0}),t.removeUndefinedKeys=void 0;var i=n(2346);t.removeUndefinedKeys=function(e){return Object.entries(e).filter((function(e){var t=r(e,2)[1];return(0,i.isDefined)(t)})).reduce((function(e,t){var n,i=r(t,2),a=i[0],s=i[1];return o(o({},e),((n={})[a]=s,n))}),{})}},4748:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0}),t.setAutoClose=t.DEFAULT_AUTO_CLOSE_TIME=void 0;var o=n(8554);t.DEFAULT_AUTO_CLOSE_TIME=500,t.setAutoClose=function(e,n,r){n&&r&&window.addEventListener("message",(0,o.getFormSubmitHandler)(e,(function(){return setTimeout(r,(o="number"==typeof(e=n)?e:0,Math.max(o,t.DEFAULT_AUTO_CLOSE_TIME)));var e,o})))}},9533:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.setElementSize=t.getValueWithUnits=void 0,t.getValueWithUnits=function(e){return"string"!=typeof e||e.match(/^[0-9]+$/)?"".concat(e,"px"):e},t.setElementSize=function(e,n){var o=n.width,r=n.height;return o&&(e.style.width=(0,t.getValueWithUnits)(o)),r&&(e.style.height=(0,t.getValueWithUnits)(r)),e}},4392:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.setupGaInstance=t.sendGaIdMessage=void 0,t.sendGaIdMessage=function(e,t,n){var o={embedId:e,gaClientId:t};setTimeout((function(){n&&n.contentWindow&&n.contentWindow.postMessage({type:"ga-client-id",data:o},"*")}),0)};var n=function(e){console.error(e)},o=function(e){n("Whoops! You enabled the shareGaInstance feature in your typeform embed but ".concat(e,".\n\n")+"Make sure to include Google Analytics Javascript code before the Typeform Embed Javascript code in your page and use correct tracker ID.\n\nIt is also possible the Google Analytics was blocked by your adblock plugin.")};t.setupGaInstance=function(e,r,i){var a="string"==typeof i?i:void 0;if(window.gtag){if(a||(a=function(){if(window.dataLayer){var e=window.dataLayer.find((function(e){return e.length>1&&"config"===e[0]}));return e&&e[1]}}()),!a)return void o("the tracking ID could not be retrieved");var s=!1;window.gtag("get",a,"client_id",(function(n){s=!0,(0,t.sendGaIdMessage)(r,n,e)})),setTimeout((function(){s||o("the tracker with ID ".concat(a," was not found"))}),3e3)}else try{var c=function(e,t){return t?e.find((function(e){return e.get("trackingId")===t})):e[0]}(window.ga.getAll(),a);c?(0,t.sendGaIdMessage)(r,c.get("clientId"),e):o("the tracker with ID ".concat(a," was not found"))}catch(e){o("the Google Analytics object was not found"),n(e)}}},4623:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.sleep=void 0,t.sleep=function(e){return new Promise((function(t){return setTimeout(t,e)}))}},1451:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.unmountElement=void 0,t.unmountElement=function(e){var t;null===(t=e.parentNode)||void 0===t||t.removeChild(e)}}},t={};return function n(o){var r=t[o];if(void 0!==r)return r.exports;var i=t[o]={exports:{}};return e[o].call(i.exports,i,i.exports,n),i.exports}(1920)}()},156:function(e){"use strict";e.exports=o}},t={};function n(o){var r=t[o];if(void 0!==r)return r.exports;var i=t[o]={exports:{}};return e[o].call(i.exports,i,i.exports,n),i.exports}n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(o){if("object"==typeof window)return window}}(),n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})};var r={};return function(){"use strict";var e=r;Object.defineProperty(e,"__esModule",{value:!0}),e.Sidetab=e.Popover=e.SliderButton=e.PopupButton=e.Widget=void 0;var t=n(611),o=n(281),i=n(797);e.Widget=(0,i.memoComponent)(o.Widget),e.PopupButton=(0,i.memoComponent)((0,o.makeButtonComponent)(t.createPopup,"popup")),e.SliderButton=(0,i.memoComponent)((0,o.makeButtonComponent)(t.createSlider,"slider")),e.Popover=(0,i.memoComponent)((0,o.makeInitializerComponent)(t.createPopover,"popover")),e.Sidetab=(0,i.memoComponent)((0,o.makeInitializerComponent)(t.createSidetab,"sidetab"))}(),r}())}}]); \ No newline at end of file diff --git a/src/web/gui/v2/9292.cc5055091db9a0826933.chunk.js b/src/web/gui/v2/9292.cc5055091db9a0826933.chunk.js deleted file mode 100644 index 2c4b09179..000000000 --- a/src/web/gui/v2/9292.cc5055091db9a0826933.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="9bf611d0-e74f-45a7-95ec-7650e01eea77",e._sentryDebugIdIdentifier="sentry-dbid-9bf611d0-e74f-45a7-95ec-7650e01eea77")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9292],{82700:(e,t,n)=>{n.d(t,{Ay:()=>y,H5:()=>f,V$:()=>p,b1:()=>g});var a=n(58168),r=(n(17333),n(41393),n(14905),n(8159),n(98992),n(54520),n(81454),n(8872),n(37550),n(62953),n(96540)),l=n(63950),o=n.n(l),i=n(8711),s=n(27467),c=n(83199),d=n(54924),u=n(29217);const m=e=>e?e[0].toUpperCase()+e.slice(1):"Unknown",g=(0,i.default)(c.Flex).attrs((e=>({as:"li",role:"option",padding:[1,2],gap:1,justifyContent:"between",width:"100%",...e}))).withConfig({displayName:"checkboxes__ItemContainer",componentId:"sc-1vef46u-0"})(["cursor:",";opacity:",";align-items:",";"," ",""],(e=>{let{disabled:t}=e;return t?"default":"pointer"}),(e=>{let{stale:t,disabled:n,selected:a}=e;return!t&&!n||a?1:.6}),(e=>{let{alignItems:t}=e;return t||"center"}),(e=>{let{multi:t,selected:n,theme:a}=e;return!t&&n&&"\n background-color: ".concat((e=>{let{theme:t}=e;const{name:n}=t;return("Dark"===n?(0,c.getRgbColor)(["green","green20"]):(0,c.getRgbColor)(["green","green170"]))({theme:t})})({theme:a}),";\n ")}),(e=>{let{multi:t,selected:n,disabled:a,theme:r}=e;return!t&&!n&&!a&&"\n &:hover {\n background-color: ".concat((0,c.getColor)("secondaryHighlight")({theme:r}),";\n }\n ")})),p=(0,i.default)(c.MenuDropdown).attrs((e=>({background:"transparent",hideShadow:!0,height:{max:"300px"},width:{max:"600px"},overflow:"auto",...e}))).withConfig({displayName:"checkboxes__CheckboxesContainer",componentId:"sc-1vef46u-1"})([""]),h=e=>e,f={itemProps:{padding:[1,.5],multi:!0},itemsProps:{head:{textColor:"textLite"},row:{textColor:"text"}},Item:e=>{let{item:t,onItemClick:n,itemProps:l,...i}=e;const{value:s,disabled:p,onClick:h,label:f,selected:y,excluded:v,indeterminate:x,textColor:b,iconName:I,count:E,countLabel:C="results",actualCount:A,actualCountLabel:k,pill:w,info:S,level:P=0,stale:N,...F}=t,{capitalized:_}=l,{multi:K}=l,M=p||!K&&y,L=e=>{p||(h&&h(e),n({value:s,label:f,checked:!y,item:t}))};return r.createElement(g,(0,a.A)({"aria-selected":y,selected:y,disabled:M,stale:0===E||"0"===E||N||v},F,i,l,{"data-testid":"".concat(l.testIdPrefix,"-filters-item")}),r.createElement(c.Flex,{gap:2,alignItems:"center",padding:[0,0,0,4*P],width:"100%",overflow:"hidden"},K?r.createElement(c.Checkbox,{"data-testid":"".concat(l.testIdPrefix,"-filters-checkbox-").concat(f),checked:y,disabled:M,indeterminate:x,onChange:L,label:r.createElement(c.Flex,{gap:1,alignItems:"center",width:"100%"},I&&r.createElement(c.Icon,{name:I,size:"small",color:"textLite"}),r.createElement(d.default,{Component:c.TextSmall,text:"string"===typeof f&&_?m(f):f.toString(),color:b}))}):r.createElement(c.Flex,{flex:!0,gap:1,padding:[0,1],alignItems:"center",onClick:y?o():L,"data-testid":"".concat(l.testIdPrefix,"-filters-item-").concat(f)},r.createElement(d.default,{Component:c.TextSmall,text:"string"===typeof f&&_?m(f):f.toString(),color:b}))),(!isNaN(E)||/%/.test(E||"")||w)&&r.createElement(u.A,{align:"top",content:S||(A?"".concat(A," ").concat(k," aggregated in ").concat(E," ").concat(C):"".concat(E," ").concat(C))},r.createElement(c.Pill,{flavour:"neutral",hollow:!0,"data-testid":"".concat(l.testIdPrefix,"-filters-").concat(f,"-count"),size:"small"},!!A&&r.createElement(c.TextSmall,null,A," \u2283\xa0"),(w||E).toString())))},getValue:h,getLabel:h},y=(0,r.memo)((e=>{let{baseKey:t,extraKey:n,paramFlavour:l="arr",param:o,Item:i=f.Item,testIdPrefix:c,collection:d,getValue:u=f.getValue,getLabel:m=f.getLabel,itemsProps:g=f.itemsProps,itemProps:h=f.itemProps,capitalized:y=!0,multi:v=!0,merge:x=!1,...b}=e;const[I,E]=(0,s.N9)(o,{defaultValue:[],key:t,extraKey:n,flavour:l,merge:x});(0,r.useEffect)((()=>{if(!d.length||!I.length)return;const e=I.filter((e=>d.some((t=>u(t)===e))));e.length!==I.length&&E(e)}),[d.length,E]);const C=(0,r.useCallback)((e=>{let{value:t,checked:n}=e;E(v?function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];return Array.isArray(t)?n?t:[]:n?[...e,t]:e.filter((e=>e!==t))}:()=>n?[t]:[])}),[E]),A=(0,r.useMemo)((()=>(e=>{let{collection:t,selected:n,getValue:a,getLabel:r,itemsProps:l,multi:o}=e;if(!t.length)return[];const i=o&&t.length<=n.length,s=o&&!i&&!!n.length;return t.reduce(((e,t)=>{const o=n.includes(a(t));return[...e,{...l.row,...t,value:a(t),label:r(t),selected:o}]}),o?[{...l.head,label:"Select all",value:t.map(a),selected:i,indeterminate:s}]:[])})({collection:d,selected:I,getValue:u,getLabel:m,itemsProps:g,multi:v})),[I,d]);return r.createElement(p,(0,a.A)({"data-testid":"".concat(c,"-filter-selections"),items:A,Item:i,onItemClick:C,value:I,"data-value":I.join(",")||"all-selected",hasSearch:d.length>5,searchMargin:[0,0,1],itemProps:{...h,testIdPrefix:c,capitalized:y,multi:v}},b))}),((e,t)=>e.baseKey===t.baseKey&&e.extraKey===t.extraKey&&e.multi===t.multi&&e.collection===t.collection))},59846:(e,t,n)=>{n.d(t,{A:()=>c});var a=n(58168),r=(n(62953),n(96540)),l=n(83199),o=n(27467),i=n(87659),s=n(29217);const c=e=>{let{margin:t,title:n,testIdPrefix:c,baseKey:d,extraKey:u,param:m,paramFlavour:g="arr",children:p,multi:h=!0,defaultIsOpen:f=!0,border:y={side:"bottom",color:"borderSecondary"},required:v=!1,configElements:x,showCounter:b=!0,help:I,collapsible:E=!0,merge:C=!1,...A}=e;const[k,w]=(0,o.N9)(m,{key:d,extraKey:u,flavour:g,merge:C}),[S,P]=(0,i.A)(f||!(null===k||void 0===k||!k.length));return r.createElement(l.Flex,(0,a.A)({"data-testid":"".concat(c,"-filter"),column:!0,padding:[2,0,0],border:y},A),r.createElement(l.Flex,{justifyContent:"between",margin:!E||S?t:[0],gap:2,onClick:E?P:void 0,cursor:"pointer"},r.createElement(l.Flex,{alignItems:"center",gap:2,height:4.5},r.createElement(l.Flex,{gap:2},v&&!(null!==k&&void 0!==k&&k.length)&&r.createElement(l.Icon,{color:"error",name:"error",size:"small"}),r.createElement(l.Flex,{alignItems:"center",gap:.5},r.createElement(l.TextSmall,{strong:!0},n),!!I&&r.createElement(s.A,{align:"top",content:I},r.createElement(l.Flex,null,r.createElement(l.Icon,{name:"question",color:"textLite",width:"12px",height:"12px"})))),b&&!(null===k||void 0===k||!k.length)&&r.createElement(l.Pill,{flavour:"neutral",hollow:!0,"data-testid":"".concat(c,"-selected-count"),tiny:!0},null===k||void 0===k?void 0:k.length)),!v&&h&&!(null===k||void 0===k||!k.length)&&r.createElement(l.Button,{padding:[0],flavour:"borderless",onClick:e=>{e.stopPropagation(),w([])},"data-testid":"".concat(c,"-filter-resetAll"),label:"Reset",small:!0})),r.createElement(l.Flex,{gap:1,alignItems:"center"},"function"===typeof x?x({isOpen:S,collapsible:E}):x,E&&r.createElement(l.Icon,{name:"chevron_left",size:"small",color:"textLite",rotate:S?1:3}))),r.createElement(l.Collapsible,{open:!E||S,height:"100%"},p))}},42828:(e,t,n)=>{n.d(t,{Ay:()=>T,Ii:()=>A});var a=n(58168),r=(n(17333),n(14905),n(8159),n(98992),n(54520),n(8872),n(37550),n(25509),n(65223),n(60321),n(41927),n(11632),n(64377),n(66771),n(12516),n(68931),n(52514),n(35694),n(52774),n(49536),n(21926),n(94483),n(16215),n(62953),n(96540)),l=n(83199),o=n(66245),i=n.n(o),s=n(89935),c=n.n(s),d=n(84976),u=n(47762),m=n(11128),g=n(69418),p=n(54924),h=n(4659),f=n(88494),y=n(37019),v=n(82700),x=n(73865),b=n(47130);const I=(0,b.A)(p.default),E=(0,b.A)(l.Flex),C="By selecting a parent node, you apply your changes to all it's children nodes too.",A={Live:{head:{label:"Live nodes",textColor:"textLite",iconName:"connectivityStatusLive",hasML:!1,hasParentNode:!0,hasAlerts:!0,hasFn:!1},node:{textColor:"textFocus",showAlerts:!0,showMl:!1,showFn:!1,showParentNode:!0}},Stale:{head:{label:"Stale nodes",textColor:"textLite",iconName:"connectivityStatusStale"},node:{textColor:"textFocus"}},Offline:{head:{label:"Offline nodes",textColor:"textLite",iconName:"connectivityStatusOffline"},node:{textColor:"textLite"}}},k=e=>{let{toggle:t,expanded:n}=e;return r.createElement(l.Flex,{cursor:"pointer",role:"button",padding:[.5],gap:.5,onClick:e=>{e.preventDefault(),e.stopPropagation(),t(),setTimeout((()=>e.target.scrollIntoView({behavior:"smooth",block:"nearest"})))}},r.createElement(l.TextNano,{color:"textLite"},n?"Collapse":"Expand"),r.createElement(l.Icon,{name:"chevron_down",width:"10px",height:"10px",color:"textLite",rotate:n?2:null}))},w=(S=e=>{let{iconName:t,label:n,textColor:a,value:o,nodePath:i,nodeId:s,onAnchorClick:c,isParentNode:u,showParentNode:m,onToggleHidden:g,itemsHidden:p,status:f}=e;return r.createElement(l.Flex,{gap:2,alignItems:"center"},t&&r.createElement(l.Icon,{name:t,size:"small",color:"textLite"}),r.createElement(I,{Component:l.TextSmall,text:n,color:a,truncate:!0,tooltip:m&&u?C:null}),"Offline"!==f&&!Array.isArray(o)&&r.createElement(h.A,{as:d.N_,to:i,state:{nodeId:s},"data-testid":"filterNodes-item-gta",color:"textLite",hoverColor:"text",onClick:c},r.createElement(l.Icon,{name:"nav_arrow_goto",height:"10px",width:"10px"})),!!g&&r.createElement(k,{toggle:g,expanded:!p}))},e=>{let{limitationsApplied:t,...n}=e;return t&&Array.isArray(n.value)?r.createElement(S,n):r.createElement(l.Checkbox,{"data-testid":"filterNodes-checkbox-".concat(n.label),checked:n.selected,disabled:n.isDisabled,indeterminate:n.indeterminate,onChange:n.onSelect,label:r.createElement(S,n)})});var S;const P=e=>{let{item:{value:t,disabled:n,onClick:o,label:i,nodeId:s,selected:m,excluded:b,stale:I,status:A,indeterminate:S,textColor:P,iconName:N,hasAlerts:F,hasML:_,hasParentNode:K,hasFn:M,showAlerts:L,showMl:T,showParentNode:D,showFn:B,requireML:q,requireFn:V,capabilities:O,count:z,multi:H=!0,checkIsDisabled:W=c(),onToggleHidden:j,itemsHidden:G,isParent:R,isParentNode:U,hasExtraInfo:Q,flavour:Z,...Y},onItemClick:$,itemProps:J,close:X,...ee}=e;const[,,te]=(0,g.A)(),ne=(0,u.Zl)(s),{hasLimitations:ae,preferredNodes:re,maxNodes:le}=(0,x.A)(),oe=(0,r.useMemo)((()=>{var e;return!B||!O||(null===(e=O.funcs)||void 0===e?void 0:e.enabled)}),[B]),ie=(0,r.useMemo)((()=>{var e;return!B||!O||(null===(e=O.ml)||void 0===e?void 0:e.enabled)}),[B]),se=ae&&!re.includes(s),ce=!Array.isArray(t)&&se&&te>le,de=n||ce||q&&!ie||V&&!oe||!H&&m||W(s),ue=e=>{de||(o&&o(e),$({value:t,checked:!m,status:A}))},me=e=>{e.stopPropagation(),null===X||void 0===X||X()},ge=b||I,pe={status:A,limitationsApplied:se,iconName:N,isDisabled:de,indeterminate:S,label:i,textColor:P,value:t,nodePath:ne,nodeId:s,onAnchorClick:me,onSelect:ue,isStale:ge,excluded:b,selected:m,isParent:R,isParentNode:U,showParentNode:D,itemProps:J,onToggleHidden:j,itemsHidden:G,flavour:Z};return r.createElement(v.b1,(0,a.A)({"aria-selected":m,selected:m,disabled:de,stale:ge||0===z||"0"===z},Y,ee,J,{"data-testid":"filterNodes-item-".concat(i),multi:H}),r.createElement(l.Flex,(0,a.A)({gap:2,alignItems:"center",flex:!0},R?{}:{padding:[0,0,0,1.5]}),H?r.createElement(w,(0,a.A)({},pe,ce?{iconName:"padlock"}:{})):r.createElement(l.Flex,{gap:1,padding:[0,1],alignItems:"center",onClick:ue,"data-testid":"filterNodes-item-".concat(i),flex:!0},r.createElement(p.default,{Component:l.TextSmall,text:i,color:P}),"Offline"!==A&&!Array.isArray(t)&&r.createElement(h.A,{as:d.N_,to:ne,state:{nodeId:s},"data-testid":"filterNodes-item-gta",color:"textLite",hoverColor:"text",onClick:me},r.createElement(l.Icon,{name:"nav_arrow_goto",height:"10px",width:"10px"})),!!j&&r.createElement(k,{toggle:j,expanded:!G}))),Q&&r.createElement(l.Flex,{gap:1},L&&r.createElement(l.Flex,{gap:1,justifyContent:"center",width:17},r.createElement(y.A,{id:t,isLive:!0,name:i})),T&&r.createElement(l.Flex,{gap:1,justifyContent:"center",width:10},r.createElement(f.A,{badge:"ml","data-testid":"nodes-indicator-machine-learning",enabled:ie,name:i})),D&&r.createElement(E,{gap:1,justifyContent:"center",width:14,tooltip:U?C:null},U?r.createElement(l.Icon,{name:"check",color:"primary"}):null),B&&r.createElement(l.Flex,{gap:1,justifyContent:"center",width:10},r.createElement(f.A,{badge:"fn","data-testid":"nodes-indicator-fn",enabled:oe,name:i})),F&&r.createElement(l.Flex,{gap:1,justifyContent:"center",width:17},r.createElement(l.Icon,{name:"alarm",size:"small",color:"textLite"}),r.createElement(l.TextSmall,{strong:!0,color:"textLite"},"Alerts")),_&&r.createElement(l.Flex,{gap:1,justifyContent:"center",width:10},r.createElement(l.Icon,{name:"anomaliesLens",size:"small",color:"textLite"}),r.createElement(l.TextSmall,{strong:!0,color:"textLite"},"ML")),K&&r.createElement(l.Flex,{gap:1,justifyContent:"center",width:14},r.createElement(l.TextSmall,{strong:!0,color:"textLite"},"Parent")),M&&r.createElement(l.Flex,{gap:1,justifyContent:"center",width:10},r.createElement(l.Icon,{name:"functions",size:"small",color:"textLite"}),r.createElement(l.TextSmall,{strong:!0,color:"textLite"},"Fn")),!isNaN(z)&&r.createElement(l.Pill,{flavour:"neutral",hollow:!0,"data-testid":"".concat(J.testIdPrefix,"-filters-").concat(i,"-count"),size:"small"},z.toString())))},N=["dyncfg"],F=e=>{let{ids:t,status:n,selectedIds:a,excludedIds:r,filteredIds:l,nodesById:o,statusProps:i,multi:s=!0,nodeCounts:c,checkIsDisabled:d,setHidden:u,hidden:m,extraKey:g,flavour:p}=e;if(!t.length)return[];const h=s&&!t.some((e=>!a.includes(e))),f=s&&a.length&&!h&&t.some((e=>a.includes(e))),y=s&&!t.some((e=>!r.includes(e))),v=s&&r.length&&!y&&t.some((e=>r.includes(e))),x=!g||N.includes(g);return t.reduce(((e,t)=>{var u;if(m[n])return e;const{name:g,capabilities:h,labels:f,ni:y}=o[t];if("dyncfg"==p&&(null===h||void 0===h||null===(u=h.dyncfg)||void 0===u||!u.enabled))return e;const v=a.includes(t),b=!!r.length&&r.includes(t),I=s&&!l.includes(t)&&("dyncfg"==p||!!l.length),E=0==y||"true"==(null===f||void 0===f?void 0:f._is_parent);return[...e,{value:t,label:g,nodeId:t,selected:v,excluded:b,stale:I,status:n,capabilities:h,...i[n].node,multi:s,checkIsDisabled:d,count:c?c[t]||0:NaN,hasExtraInfo:x,isParentNode:E,flavour:p}]}),[{value:t,selected:h,indeterminate:f,disabled:!s,status:n,...i[n].head,multi:s,onToggleHidden:()=>u((e=>({...e,[n]:!m[n]}))),itemsHidden:m[n],excluded:y,someExcluded:v,isParent:!0,hasExtraInfo:x}])},_=e=>e,K=(e,t)=>e.isParentNode&&!t.isParentNode?-1:t.isParentNode&&!e.isParentNode?1:0,M=e=>{const t=e.filter((e=>e.isParent)),n=e.filter((e=>!e.isParent)).sort(K);return t.length>0?[t[0],...n]:n},L=e=>{let{baseKey:t,extraKey:n,statusProps:l=A,multi:o=!0,useFilteredIds:s=_,nodeCounts:c,checkIsDisabled:d,flavour:g,...p}=e;const[h,f]=(0,r.useState)((()=>({Stale:!0,Offline:!0}))),y=(0,m.w7)({extraKey:n,omit:"selectedNodeIds",merge:!0,emptyIfAll:!1}),x=(0,m.w7)({extraKey:n,merge:!1}),[b,I]=(0,m.Oj)({key:t,extraKey:n,merge:!1}),[E,C]=(0,m.PF)({key:t,extraKey:n,merge:!1}),k=s(y),w=(0,u.BU)(n?k:void 0),S=(0,u.Ig)(n?k:void 0),N=(0,u.GE)(n?k:void 0),K=(0,r.useCallback)((e=>{let{value:t,checked:n,excluded:a}=e;if("undefined"!==typeof a){if(!o)return void C((()=>n?[t]:[]));C((function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];return Array.isArray(t)?n?[...new Set([...e,...t])]:i()(e,t):n?[...e,t]:e.filter((e=>e!==t))}))}I(o?function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];return Array.isArray(t)?n?[...new Set([...e,...t])]:i()(e,t):n?[...e,t]:e.filter((e=>e!==t))}:()=>n?[t]:[])}),[b,I]),L=(0,u.ss)(),T=(0,r.useMemo)((()=>{const e=F({ids:w,status:"Live",selectedIds:b,excludedIds:E,filteredIds:x,nodesById:L,statusProps:l,multi:o,nodeCounts:c,checkIsDisabled:d,setHidden:f,hidden:h,extraKey:n,flavour:g}),t=F({ids:S,status:"Stale",selectedIds:b,excludedIds:E,filteredIds:x,nodesById:L,statusProps:l,multi:o,nodeCounts:c,checkIsDisabled:d,setHidden:f,hidden:h,extraKey:n,flavour:g}),a=F({ids:N,status:"Offline",selectedIds:b,excludedIds:E,filteredIds:x,nodesById:L,statusProps:l,multi:o,nodeCounts:c,checkIsDisabled:d,setHidden:f,hidden:h,extraKey:n,flavour:g});return"dyncfg"==g?M(e):M(e).concat(t,a)}),[b,E.length,k.length,x.length,c,d,h,g]);return r.createElement(v.V$,(0,a.A)({"data-testid":"filterNodes-drop",items:T,Item:P,onItemClick:K,value:b,"data-value":b.join(",")||"all-nodes",hasSearch:!0},p))},T=(0,r.memo)(L,((e,t)=>e.baseKey===t.baseKey&&e.extraKey===t.extraKey&&e.statusProps===t.statusProps&&e.multi===t.multi&&e.nodeCounts===t.nodeCounts&&e.useFilteredIds===t.useFilteredIds&&e.checkIsDisabled===t.checkIsDisabled))},93615:(e,t,n)=>{n.d(t,{A:()=>s});var a=n(58168),r=n(96540),l=n(83199),o=n(64473);const i=e=>{let{onToggle:t,icon:n="node_hollow",isOpen:i,title:s,...c}=e;return r.createElement(o.A,(0,a.A)({dataTestId:"collapsed-header",iconClose:i?"arrow_w_line_right":"arrow_w_line_left",onClick:t},c),i&&r.createElement(l.Flex,{gap:2,alignItems:"center","data-testid":"sidebar-nodeName"},r.createElement(l.Icon,{name:n,color:"text",size:"small"}),r.createElement(l.TextSmall,{strong:!0},s)))},s=(0,r.memo)(i)},30577:(e,t,n)=>{n.d(t,{A:()=>s});var a=n(58168),r=(n(41393),n(81454),n(96540)),l=n(83199);const o=e=>{let{iconName:t,label:n,onClick:o,containerStyles:i,textStyles:s}=e;return r.createElement(l.Flex,(0,a.A)({"data-testid":"sidebar-icon-item"},i,{column:!0,margin:[2,0],alignItems:"center"}),r.createElement(l.Flex,{"data-testid":"sidebar-icon-item-wrapper",margin:[0,0,.5,0],alignItems:"center",onClick:o,cursor:"pointer"},r.createElement(l.Icon,{name:t,color:"textLite",width:"18px",height:"18px"})),r.createElement(l.TextMicro,(0,a.A)({color:"text"},s),n))},i=e=>{let{onClickTab:t,availableTabs:n,tabsToShow:i,...s}=e;return r.createElement(l.Flex,(0,a.A)({"data-testid":"collapsed-sidebar",column:!0,justifyContent:"center"},s),i.map(((e,a)=>{const{iconName:l,label:i}=n[e];return r.createElement(o,{containerStyles:{border:{size:"2px",type:"solid",color:"mainBackground",side:"left"}},textStyles:{color:"textLite"},key:l,onClick:()=>t(a),iconName:l,label:i})})))},s=(0,r.memo)(i)},12352:(e,t,n)=>{n.d(t,{A:()=>o});var a=n(58168),r=n(96540),l=n(83199);const o=e=>{let{children:t,iconName:n,name:o,size:i,testId:s="entry-line-text-container",...c}=e;const d="small"===i?l.TextSmall:l.Text,u="undefined"!==typeof t&&null!==t;return r.createElement(l.Flex,{alignItems:"start","data-testid":"entry-line"},n&&r.createElement(l.Flex,{"data-testid":"entry-line-icon-container",padding:[0,1,0,0]},r.createElement(l.Icon,{color:"textLite","data-testid":"entry-line-icon",name:n,width:"16px",height:"16px"})),r.createElement(l.Flex,(0,a.A)({column:!0,alignItems:"start","data-testid":s,gap:.5},c),r.createElement(d,{"data-testid":"entry-line-text-label",whiteSpace:"nowrap",color:"textDescription"},o,u&&": "),u&&r.createElement(l.TextSmall,{color:"textDescription","data-testid":"entry-line-text-value",strong:!0},t)))}},50065:(e,t,n)=>{n.d(t,{A:()=>g});var a=n(58168),r=(n(62953),n(8711)),l=n(96540),o=n(83199),i=n(87659),s=n(29217);const c=(0,r.default)(o.Icon).withConfig({displayName:"group__IconLink",componentId:"sc-9459pv-0"})(["&:hover{fill:",";}"],(0,o.getColor)("textDescription")),d=e=>e.stopPropagation(),u=e=>{let{children:t,isEnabled:n,link:a,onClick:r,open:i}=e;return l.createElement(o.Flex,{alignItems:"center",justifyContent:"between",height:8,onClick:n?r:null,cursor:n?"pointer":"default"},l.createElement(o.Flex,{gap:2,alignItems:"center"},l.createElement(o.TextSmall,{strong:!0},t),a&&l.createElement(o.Flex,{as:"a",href:a,target:"_blank",onClick:d},l.createElement(c,{height:"13px",width:"13px",name:"documentation",size:"small",color:"placeholder"}))),n&&l.createElement(o.Icon,{name:"chevron_left",size:"small",color:"textLite",rotate:i?1:3}))},m=(0,r.default)(o.Flex).withConfig({displayName:"group__StyledFlex",componentId:"sc-9459pv-1"})(["&:last-child{border:none;}"]),g=e=>{let{children:t,isEnabled:n=!0,link:r,name:c,noDataLabel:d,...g}=e;const[p,h]=(0,i.A)(!0),f=n?null:d;return l.createElement(s.A,{content:f,isBasic:!0},l.createElement(m,(0,a.A)({column:!0,border:{side:"bottom",color:"borderSecondary"},padding:[1,0,p?2:1],isEnabled:n},g),l.createElement(u,{isEnabled:n,open:p,onClick:h,link:r},c),l.createElement(o.Collapsible,{column:!0,open:p,gap:1},t)))}},64473:(e,t,n)=>{n.d(t,{A:()=>i});var a=n(58168),r=n(96540),l=n(83199),o=n(29217);const i=e=>{let{children:t,onClick:n,iconClose:i="x",icon:s,dataTestId:c,title:d,help:u,...m}=e;return r.createElement(l.Flex,(0,a.A)({"data-testid":c,height:12,alignItems:"center",justifyContent:"between",border:{side:"bottom",color:"borderSecondary"},padding:[0,2],flex:!1},m),d?r.createElement(l.Flex,{gap:2,alignItems:"center","data-testid":"sidebar-nodeName"},r.createElement(l.Icon,{name:s,color:"text",size:"small"}),r.createElement(l.TextSmall,{strong:!0},d),!!u&&r.createElement(o.A,{align:"top",content:u},r.createElement(l.Flex,null,r.createElement(l.Icon,{name:"question",color:"textLite",width:"12px",height:"12px"})))):t,r.createElement(l.Icon,{"data-testid":"sidebarHeader-icon",name:i,color:"textLite",onClick:n,cursor:"pointer"}))}},81638:(e,t,n)=>{n.d(t,{Ay:()=>o});var a=n(58168),r=n(96540),l=n(83199);const o=(0,r.memo)((e=>{let{children:t,collapsedComponent:n,isOpen:o,header:i,width:s=90,...c}=e;const d=!!n;return r.createElement(l.Collapsible,(0,a.A)({open:o,column:!0,closedValue:d?48:0,persist:d,direction:"horizontal",border:{side:"left",color:"borderSecondary"},width:s,flex:!0,basis:s},c),(e=>r.createElement(r.Fragment,null,i,e?t:n)))}))},92136:(e,t,n)=>{n.d(t,{A:()=>P});var a=n(58168),r=(n(62953),n(96540)),l=n(67602),o=n(83199),i=n(64118),s=n(15327),c=n(74618),d=n(40267),u=n(5871),m=n(28738),g=n(11164),p=n(52768);const h=(0,r.memo)((e=>{let{id:t}=e;const{fullyLoaded:n,nodeId:a,instance:l,lastStatusChangeValue:o,lastStatusChange:s,units:c,context:d,value:u,lastUpdated:m,status:h}=(0,i.JL)(t),f=(0,p.J4)(u,c),y=(0,p.J4)(o,c);return r.createElement(g.A,{instance:l,context:d,formattedLastValue:f,formattedLastStatusChangeValue:y,lastStatusChange:s,lastUpdated:m,isFormattedValueLoaded:"clear"===h||n,nodeId:a,status:h,testid:"alertDetailsModal"})}));n(9391);var f=n(3914),y=n(69765),v=n(47767),x=n(50876);const b=e=>{let{alertId:t,testid:n}=e;const a=(0,f.bq)(),l=(0,y.QW)(),i="/spaces/".concat(a,"/rooms/").concat(l,"/alerts/").concat(t),s=(0,v.Zp)(),{sendButtonClickedLog:c,isReady:d}=(0,x.A)(),u=(0,r.useCallback)((e=>{e.preventDefault(),c({dataGa:"".concat(n,"::view-alert-button")}).finally((()=>{s(i)}))}),[s,i,d]);return r.createElement(o.Button,{as:"a","data-testid":"".concat(n,"-view-alert-button"),"data-ga":"".concat(n,"::view-alert-button"),href:i,onClick:u,label:"View alert page",width:"224px"})};var I=n(43407),E=n(7660),C=n(4974),A=n(85686),k=n(73865),w=n(63314);const S=e=>{let{alertId:t,onClose:n}=e;const{isNodeRestricted:a}=(0,k.A)(),{name:l="unknown alert",status:g,fullyLoaded:p,info:f,nodeId:y}=(0,i.JL)(t);(0,i.yk)(t);const v=y&&a(y);return r.createElement(w.Ay,{mode:"AlertDetailsModal"},r.createElement(s.GO,{onClose:n},r.createElement(c.z,{onClose:n,title:l,"data-testid":"alertDetailsModal-alertName"},r.createElement(d.A,{flavour:g,icon:"alarm_bell","data-testid":"alertDetailsModal-alertPill"},g)),p?v?r.createElement(A.A,{flavour:"alert"}):r.createElement(r.Fragment,null,r.createElement(s.Yv,{overflow:{vertical:"auto"},hasModalTitle:!1,gap:2,hasFooter:!0},r.createElement(I.A,{iconName:"monitoring",iconSize:"20px"},"Time Values"),r.createElement(E.A,{alertId:t}),r.createElement(I.A,{iconName:"documentation"},"Alert Description"),r.createElement(o.Text,{"data-testid":"alertDetailsModal-info"},f),r.createElement(C.A,{alertId:t}),r.createElement(h,{id:t}),r.createElement(u.A,{id:t})),r.createElement(s.CG,null,r.createElement(b,{alertId:t,testid:"alertDetailsModal"}))):r.createElement(m.A,{title:"Loading alert..."})))},P=()=>{const[e,,t,n,o]=(0,l.A)("alertDetailsModal"),i=(0,r.useCallback)((e=>{let{alertId:n}=e;t("",{alertId:n})}),[]),s=(0,r.useMemo)((()=>r.createElement(S,(0,a.A)({onClose:n},o))),[o]);return[e,i,n,s]}},23452:(e,t,n)=>{n.d(t,{QD:()=>p,hv:()=>u,qr:()=>g});n(17333),n(41393),n(8159),n(98992),n(54520),n(81454),n(37550),n(62953);var a=n(47444),r=n(27467),l=n(69765),o=n(11128),i=n(64118);const s=e=>(t,n)=>!Array.isArray(n)||!n.length||n.includes(t[e]),c={alertStatuses:s("status"),alertNames:s("name"),alertContexts:s("context"),alertClasses:s("class"),alertRoles:s("recipient"),alertTypeComponents:(e,t)=>!Array.isArray(t)||!t.length||t.includes("".concat(e.type,"|").concat(e.component)),nodeIds:s("nodeId")},d=(0,a.K0)({key:"alertsFiltered",get:e=>{let{extraKey:t,roomId:n,omit:a,keepAll:l}=e;return e=>{let{get:s}=e;const d=s((0,r.GA)({key:n,extraKey:t,flavour:"arr"})),u=Object.keys(d).map((e=>[e,a&&a.split(":::").includes(e)?s((0,r.GA)({key:n,flavour:"arr",param:e})):d[e]]));if(!a||!a.includes("selectedNodeIds")){const e=s((0,o.QY)({key:n,extraKey:t,omit:a,keepAll:!1,merge:!1,scoped:!0}));null!==e&&void 0!==e&&e.length&&u.push(["nodeIds",e])}const m=s((0,i.Yo)(n)),g=(e=>t=>!e.some((e=>{let[n,a]=e;return!!c[n]&&!c[n](t,a)})))(u);return l?m.map((e=>g(e)?e:{...e,hidden:!0})):m.filter(g)}}}),u={alertStatuses:{critical:0,warning:0},alertClasses:{Errors:0,Latency:0,Utilization:0,Workload:0}},m={critical:0,warning:1},g={alertStatuses:(e,t)=>m[e.id]-m[t.id],default:(e,t)=>e.id.localeCompare(t.id,void 0,{sensitivity:"accent",ignorePunctuation:!0})},p=function(){let{extraKey:e,omit:t,keepAll:n=!1}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const r=(0,l.ID)();return(0,a.vc)(d({extraKey:e,roomId:r,omit:t,keepAll:n}))}},81416:(e,t,n)=>{n.d(t,{Ay:()=>g,yD:()=>u,f_:()=>m});n(17333),n(41393),n(98992),n(54520),n(81454),n(62953);var a=n(96540),r=n(27467),l=n(61427);const o=function(e){let{after:t,before:n}=e,{offset:a,limit:r}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{offset:6e4,limit:Date.now()};const l=((e,t)=>{let{offset:n,limit:a}=t;const r=n/2;return e+r<=a?r:ae||t?o({after:parseInt(e),before:parseInt(t)},(e=>{const t=Date.now();return{limit:t,offset:t-e<6e4?3e4:15e3}})(t)):d,d=[null,null],u=()=>{const{after:e,before:t,highlight:n}=(0,r.rW)();if(!n.after||!n.before)return d;if(e<0){const t=Date.now();if(n.beforet)return d}else{if(n.beforet)return d}return[n.after,n.before]},m=(e,t,n)=>{const[o,d]=(0,a.useState)(),[m,g]=(0,a.useState)(!1),{sendLog:p}=(0,i.A)();let[h,f]=(()=>{const e=u();return(0,a.useMemo)((()=>c(...e)),[...e])})();const{after:y,before:v}=(0,r.rW)(),[x]=(0,l.er)({nodeIds:n,flavour:"rhs"},e,t);return[async()=>{const e=((e,t)=>{if(e<0){const n=Date.now();e=1e3*Math.floor(n/1e3+e),t=1e3*Math.floor(n/1e3)}return{after:e,before:t}})(y,v);g(!0);try{await x({flavour:"rhs",highlightAfter:h||e.after,highlightBefore:f||e.before,baselineAfter:h?e.after:null,baselineBefore:f?e.before:null,method:"anomaly-rate",aggregation:"avg",group:"average",nodeIds:n,options:["raw","null2zero"],groupBy:["context"]}),g(!1),p({action:s.o1.elementViewed,feature:"AnomalyRates-TOC",isSuccess:!0},!0)}catch(t){if(t.isCancel)return;d(t),g(!1),p({action:s.o1.elementViewed,feature:"AnomalyRates-TOC",isFailure:!0},!0)}},m,o]},g=(e,t,n)=>{const[o,s]=(()=>{const[e,t]=(0,a.useState)(0);return[e,(0,a.useCallback)((()=>t((e=>e+1))),[])]})(),[c,d]=(0,a.useState)(),[m,g]=u(),{after:p,before:h}=(0,r.rW)(),{contexts:f,metadata:y,getWeights:v,loading:x}=(0,l.Yy)({nodeIds:n,flavour:"anomaly"},e,t),{sendLog:b,isReady:I}=(0,i.A)();(0,a.useEffect)((()=>{if(m&&g&&g&&m)try{b({feature:"AnomalyAdvisor",isStart:!0}),v({highlightAfter:m,highlightBefore:g,baselineAfter:p,baselineBefore:h,method:"anomaly-rate",aggregation:"avg",group:"average",nodeIds:n,options:["null2zero","raw"],groupBy:["context","dimension"]})}catch(e){if(b({feature:"AnomalyAdvisor",isFailure:!0}),e.isCancel)return;d(e)}}),[n.length,g,m,o,I]);return[(0,a.useMemo)((()=>f.filter((e=>{var t;return((null===(t=y[e])||void 0===t?void 0:t.weight)||0)>0})).sort(((e,t)=>{const n=y[e].weight||0,a=y[t].weight||0;return n!=a?n>a?-1:1:0})).map((e=>{const t=e.split(",");return t.length>1?"".concat(t[1],"::").concat(t[0]):e}))),[f,y]),f,y,x,c,s]}},13271:(e,t,n)=>{n.d(t,{sK:()=>h,Um:()=>p,Ad:()=>m,rq:()=>f});n(17333),n(14905),n(8159),n(98992),n(54520),n(8872),n(37550),n(62953);var a=n(96540),r=n(47444),l=n(47767),o=n(23931),i=n(47762),s=n(69765),c=n(27467);const d=(0,r.Iz)({key:"chartsFilterValue",default:{filteredChartsIds:[],filteredChartsCount:0,chartsCount:0}}),u=()=>{const e=(0,s.ID)(),t=(0,i.nl)();return(0,a.useMemo)((()=>({roomId:e,nodeId:t})),[t,e])},m=()=>{const e=u();return(0,r.vc)(d(e))},g=()=>{const e=(0,s.ID)(),{nodeID:t}=(0,l.g)();return[e,t||"overview"]},p=()=>{const[e,t]=g();return(0,c.rW)("tocSearch",{key:e,extraKey:t,defaultValue:"",flavour:"val"})},h=()=>{const[e,t]=g();return(0,c.N9)("tocSearch",{key:e,extraKey:t,defaultValue:"",flavour:"val"})},f=function(e,t,n){let{shouldHide:l=(()=>!1),force:i=!1,cacheKey:s}=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{};const m=(0,c.rW)("after"),g=(0,c.rW)("before"),h=(()=>{const e=p();return"string"!==typeof e?"":e.trim().toLowerCase()})(),f=(0,o.w1)(n),[y,v,x]=(0,a.useMemo)((()=>{let n=0,a=0;const r=Date.now()/1e3,o=h.split(/[\s,]+/).filter((e=>!!e));return[e.reduce(((e,t)=>{if(!e[t])return e;const{firstEntry:s,lastEntry:c,live:d}=e[t],u=!i&&e[t].visible||(m<0?d||r+m1e3*c||g<1e3*s)),p=!u||!Object.keys(e[t]).some((n=>{const a=e[t][n];return null===o||void 0===o||!o.length||o.some((e=>"string"===typeof a&&a.includes(e)))})),h=!u||l(e[t]);return e[t]={...e[t],filteredOut:p,visible:u,hidden:h},p||!u||h||(n+=1),h||(a+=1),e}),{...t}),a,n]}),[h,e,m,g,i,s]),b=(()=>{const e=u();return(0,r.lZ)(d(e))})();return(0,a.useEffect)((()=>{f(y),b({filteredChartsCount:x,chartsCount:v})}),[y,v,x,n]),x}},82543:(e,t,n)=>{n.d(t,{A:()=>p,g:()=>g});n(62953);var a=n(96540),r=n(83199),l=n(29217),o=n(47762),i=n(11128),s=n(69765),c=n(3914),d=n(81416),u=n(94944),m=n(50876);const g=()=>{const e=(0,o.nl)(),t=(0,i.w7)();return e?[e]:t},p=e=>t=>{const n=(0,c.vt)(),o=(0,s.ID)(),i=g(),{pause:p}=(0,u.A)(),{sendButtonClickedLog:h}=(0,m.A)(),[f,y]=(0,d.f_)(n,o,i);return a.createElement(r.Flex,{alignItems:"center",justifyContent:"between",width:"100%"},a.createElement(l.A,{content:"Overlay the maximum chart anomaly rate on each menu section.",align:"bottom",activateOn:"hover",isBasic:!0},a.createElement(r.Button,{tiny:!0,label:y?null:"Anomaly%",isLoading:y,flavour:"hollow",onClick:()=>{p(),f(),h({feature:"AnomalyRates-TOC",label:"Anomaly%",isStart:!0},!0)},textTransform:"uppercase","data-track":"menu-anomaly-rates::click-ar-button"})),a.createElement(r.Flex,{column:!0},a.createElement(e,t)))}},18713:(e,t,n)=>{n.d(t,{C:()=>u,Ny:()=>c,Ud:()=>d,W7:()=>m});n(62953);var a=n(96540),r=n(86856),l=n(55483);const o=(0,r.q6)(""),i=(0,r.q6)(""),s=(0,r.q6)(""),c=e=>{let{menuGroupId:t="",subMenuId:n="",children:r}=e;const[l,c]=(0,a.useState)(t),[d,u]=(0,a.useState)(n),m=(0,a.useMemo)((()=>({setMenuGroupId:c,setSubMenuId:u})),[]);return a.createElement(o.Provider,{value:m},a.createElement(i.Provider,{value:l},a.createElement(s.Provider,{value:d},r)))},d=()=>(0,l.A)(o),u=e=>{return t=t=>e===t,(0,l.A)(i,t);var t},m=e=>(0,l.A)(s,e)},31438:(e,t,n)=>{n.d(t,{BK:()=>E,NF:()=>v,Tg:()=>b,aA:()=>f,dd:()=>y,i8:()=>u,qR:()=>x,uy:()=>I,vN:()=>p,yO:()=>C});var a=n(58168),r=(n(41393),n(81454),n(96540)),l=n(83488),o=n.n(l),i=n(86856),s=n(55483);const c=r.createContext({}),d=(0,i.q6)({}),u=(0,i.q6)({}),m=(0,i.q6)([]),g=(0,i.q6)({}),p=e=>{let{container:t,menuItemAttributesById:n,getObject:a,allElements:l,stickyIds:o,children:i}=e;return r.createElement(c.Provider,{value:t},r.createElement(d.Provider,{value:a},r.createElement(u.Provider,{value:n},r.createElement(m.Provider,{value:l},r.createElement(g.Provider,{value:o},i)))))},h={},f=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:o();return(0,s.A)(u,(n=>t(n[e]||h)))},y=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:o();return(0,s.A)(u,(n=>Array.isArray(e)?e.map((e=>t(n[e]||h,e))):[]))},v=()=>r.useContext(c),x=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:o();return(0,s.A)(m,e)},b=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:o();return(0,s.A)(g,e)},I=function(e){return(arguments.length>1&&void 0!==arguments[1]?arguments[1]:o())(C()(e))},E=e=>(0,r.forwardRef)(((t,n)=>{let{id:l,...o}=t;const i=f(l),{chartId:s}=i,c=I(s);return r.createElement(e,(0,a.A)({ref:n,id:l,chart:c,menuChartAttributes:i},o))})),C=()=>(0,s.A)(d)},8320:(e,t,n)=>{n.d(t,{Cs:()=>o,Gr:()=>s,UH:()=>i});n(62953);var a=n(96540);const r=(0,a.createContext)(),l=(0,a.createContext)(),o=e=>{let{children:t}=e;const[n,o]=(0,a.useState)();return a.createElement(r.Provider,{value:n},a.createElement(l.Provider,{value:o},t))},i=()=>(0,a.useContext)(r),s=()=>(0,a.useContext)(l)},55309:(e,t,n)=>{n.d(t,{$V:()=>f,Ee:()=>m,HB:()=>g,Ss:()=>h,ox:()=>c,re:()=>u,sF:()=>d});var a=n(58168),r=n(96540),l=n(83488),o=n.n(l),i=n(86856),s=n(55483);const c=(0,i.q6)({}),d=e=>{let{menuGroupById:t,children:n}=e;return r.createElement(c.Provider,{value:t},n)},u=e=>(0,s.A)(c,e),m=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:o();return u((n=>t((null===n||void 0===n?void 0:n[e])||{})))},g=(e,t)=>n=>{let{id:l,...o}=n;const i=m(l,t);return r.createElement(e,(0,a.A)({id:l},i,o))},p=(0,i.q6)([]),h=e=>{let{ids:t,children:n}=e;return r.createElement(p.Provider,{value:t},n)},f=e=>t=>{const n=(0,s.A)(p,l);var l;return r.createElement(e,(0,a.A)({menuGroupIds:n},t))}},59090:(e,t,n)=>{n.d(t,{A:()=>g,Ie:()=>u,jR:()=>m,xw:()=>c,zK:()=>d});var a=n(58168),r=n(96540),l=n(83488),o=n.n(l),i=n(86856),s=n(55483);const c=(0,i.q6)({}),d=e=>{let{subMenuById:t,children:n}=e;return r.createElement(c.Provider,{value:t},n)},u=e=>(0,s.A)(c,e),m=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:o();return u((n=>t(null===n||void 0===n?void 0:n[e])))},g=(e,t)=>n=>{let{id:l,...o}=n;const i=m(l,t);return r.createElement(e,(0,a.A)({id:l},i,o))}},79566:(e,t,n)=>{n.d(t,{A:()=>f});var a=n(96540),r=n(31438),l=n(8320),o=n(55309),i=n(59090),s=n(18713),c=n(47444),d=n(47762),u=n(69765);const m=(0,c.Iz)({key:"activeMenuGroupIdValue",default:""}),g=()=>{const e=(()=>{const e=(0,u.ID)(),t=(0,d.nl)();return(0,a.useMemo)((()=>({roomId:e,nodeId:t})),[t,e])})();return(0,c.lZ)(m(e))},p=e=>(t,n)=>{const a=t.querySelector(n);a&&e(a)},h="scrollIntoViewIfNeeded"in document.body?p((e=>e.scrollIntoViewIfNeeded())):p((e=>e.scrollIntoView())),f=e=>{let{onMenuGroupClick:t,onSubMenuClick:n,onChartNameChange:c,initialChartName:d,linkToGo:u,contextToGo:m,loaded:p=!0}=e;const f=(0,r.NF)(),y=(0,l.UH)(),v=(0,o.re)(),x=(0,i.Ie)(),{setMenuGroupId:b,setSubMenuId:I}=(0,s.Ud)(),E=g(),C=(0,a.useRef)(d);(0,a.useLayoutEffect)((()=>{p&&y&&u&&!C.current&&y.goToLink(u)}),[y,u]),(0,a.useLayoutEffect)((()=>{p&&y&&m&&!C.current&&y.goToElement(m)}),[y,m]);const A=(0,a.useCallback)((e=>{b(e),E(e),h(f,'[data-sidebar="true"] [data-sidebar-menugroupid="'.concat(e,'"]')),e&&v[e]&&c(v[e].link)}),[v]),k=(0,a.useCallback)((e=>{I(e),e in x&&(b(x[e].menuGroupId),h(f,'[data-sidebar="true"] [data-sidebar-submenuid="'.concat(e,'"]')),c(x[e].link))}),[x]);(0,a.useLayoutEffect)((()=>{p&&C.current&&y&&y.goToLink(d)&&(C.current=null)}),[y]);return{setActiveMenuGroupId:A,setActiveSubMenuId:k,onMenuGroupClick:(0,a.useCallback)(((e,n)=>{n&&n.preventDefault(),y&&(y.goToElement(e),t(e))}),[y]),onSubMenuClick:(0,a.useCallback)(((e,t,a)=>{a&&a.preventDefault(),y&&(y.goToElement(t),n(e,t))}),[y])}}},11988:(e,t,n)=>{n.d(t,{l3:()=>o,Xt:()=>c,YS:()=>d});n(14905),n(98992),n(8872),n(62953);var a=n(47444);const r=(0,a.eU)({key:"alertIdsByNodeId",default:{}}),l=[],o=(0,a.gD)({key:"nodesAlertIdsState",get:e=>{let{get:t}=e;return t(r)},set:(e,t)=>{let{set:n}=e;return n(r,t)}}),i=(0,a.K0)({key:"nodeAlertIdsState",get:e=>t=>{let{get:n}=t;return n(r)[e]||l},set:e=>(t,n)=>{let{set:a}=t;a(r,(t=>({...t,[e]:[...n].sort(((e,t)=>e.id-t.id))})))}}),s=(0,a.K0)({key:"nodesAlertIdsState",get:e=>t=>{let{get:n}=t;const a=n(r);return(e=e.length?e:Object.keys(a)).reduce(((e,t)=>[...e,...a[t]||[]]),[])},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),c=e=>(0,a.vc)(i(e)),d=e=>(0,a.vc)(s(e))},28146:(e,t,n)=>{n.d(t,{A:()=>u});var a=n(58168),r=n(96540),l=n(29217),o=n(8711),i=n(83199);const s=(0,o.default)(i.Flex).attrs((()=>({width:{max:"315px"},overflow:"hidden"}))).withConfig({displayName:"styled__Container",componentId:"sc-t6teia-0"})([""]),c=e=>{let{children:t,...n}=e;return r.createElement(s,(0,a.A)({"data-testid":"nodeInfoContent"},n),t)};var d=n(82432);const u=e=>{let{align:t="bottom",badge:n,children:o,connectivityState:i,content:s,isBasic:u=!0,status:m,nodeName:g,nodeType:p,...h}=e;return r.createElement(l.A,(0,a.A)({align:t,content:()=>r.createElement(c,{"data-testid":"nodeInfoTooltip-tooltipContent"},s||(0,d.Bb)(n,i,m,g,p)),"data-testid":"nodeInfoTooltip",isBasic:u},h),o)}},60072:(e,t,n)=>{n.d(t,{A:()=>i});var a=n(96540),r=n(12352),l=n(50065),o=n(82432);const i=e=>{let{node:t}=e;const{_cloud_instance_region:n,_cloud_instance_type:i,_cloud_provider_type:s}=t.labels||{};return(0,o.Po)(n)&&(0,o.Po)(i)&&(0,o.Po)(s)?null:a.createElement(l.A,{name:"Cloud Instance Info"},a.createElement(r.A,{iconName:"ipNetworking",name:"Cloud provider",size:"small",testId:"sidebar-nodeInfoContent-cloudProvider"},(0,o.ws)(s)),a.createElement(r.A,{iconName:"universe",name:"Cloud region",size:"small",testId:"sidebar-nodeInfoContent-cloudRegion"},(0,o.ws)(n)),a.createElement(r.A,{iconName:"disk",name:"Instance type",size:"small",testId:"sidebar-nodeInfoContent-instanceType"},(0,o.ws)(i)))}},25825:(e,t,n)=>{n.d(t,{A:()=>c});var a=n(96540),r=n(50065),l=n(12352),o=n(82432),i=n(55905),s=n(33931);const c=e=>{let{node:t}=e;const n=(0,o.GM)(t.state);return a.createElement(r.A,{link:"https://learn.netdata.cloud/docs/agent/aclk",name:"Connection"},a.createElement(l.A,{size:"small",name:"Status",iconName:"integrations",testId:"sidebar-connectionGroup-connectionStatus"},a.createElement(s.A,{rawState:n})),a.createElement(l.A,{size:"small",name:"Netdata Agent",iconName:"netdata",testId:"sidebar-connectionGroup-agentVersion"},t.version),t.updateSeverity&&a.createElement(l.A,{size:"small",name:"Update Status",iconName:"update",testId:"sidebar-connectionGroup-agentVersion"},a.createElement(i.A,{name:t.name,os:t.os.id,container:t.hw.container,warningLevel:t.updateSeverity,labels:t.labels||{},version:t.version,text:t.updateSeverity})))}},17240:(e,t,n)=>{n.d(t,{A:()=>s});var a=n(96540),r=n(50065),l=n(83199),o=n(18682);const i=e=>{let{node:t}=e;return a.createElement(r.A,{name:"Files"},a.createElement(l.Flex,{gap:1,padding:[0,1],alignItems:"center"},a.createElement(l.Box,{sx:{fontWeight:"500",letterSpacing:"1px"},as:l.Text,color:"textLite"},"{;}"),a.createElement(l.TextSmall,null,"View node info in"),a.createElement(l.Flex,{cursor:"pointer",gap:1,onClick:(0,o.C)(JSON.stringify(t),{text:"JSON copied to clipboard"})},a.createElement(l.TextSmall,{color:"primary"},"json"),a.createElement(l.Icon,{color:"primary",size:"small",name:"copy"}))))},s=(0,a.memo)(i)},58010:(e,t,n)=>{n.d(t,{A:()=>o});n(17333),n(41393),n(98992),n(54520),n(81454);var a=n(96540),r=n(50065),l=n(12352);const o=e=>{let{node:{labels:t={}}={}}=e;const n=Object.keys(t).filter((e=>!e.startsWith("_")));return a.createElement(r.A,{link:"https://learn.netdata.cloud/guides/using-host-labels",name:"Host labels",isEnabled:n.length>0,noDataLabel:"No Data"},n.length>0&&n.map((e=>a.createElement(l.A,{size:"small",key:e,name:e,testId:"sidebar-hostLabelsGroup-label"},t[e]))))}},80618:(e,t,n)=>{n.d(t,{A:()=>c});var a=n(96540),r=n(83199),l=n(50065),o=n(12352),i=n(82432);const s={},c=e=>{let{node:t}=e;const n=t.hw||s,c=t.os||s,d=(0,i.Pg)(n.container,n.virtualization);return a.createElement(l.A,{name:"System info"},a.createElement(o.A,{size:"small",name:"Type",iconName:"virtualization",testId:"sidebar-nodeInfoContent-type"},a.createElement(r.Pill,{hollow:!0,icon:d.icon,flavour:"neutral"},d.label)),a.createElement(o.A,{size:"small",name:"O/S version",iconName:"database",testId:"sidebar-nodeInfoContent-osVersions"},(0,i.Ud)(c.nm,c.v,", ")),a.createElement(o.A,{iconName:"viewSingleNode",name:"Architecture",size:"small",testId:"sidebar-nodeInfoContent-architecture"},(0,i.ws)(n.architecture)),!!c.kernel&&a.createElement(o.A,{size:"small",name:"Kernel",iconName:"metrics_explorer",testId:"sidebar-nodeInfoContent-kernel"},(0,i.Ud)(c.kernel.nm,c.kernel.v,", ")),a.createElement(o.A,{size:"small",name:"CPU",iconName:"cpu",testId:"sidebar-nodeInfoContent-cpu"},(0,i.O)(n.cpuFrequency)," (",(0,i.ws)(n.cpus,1===n.cpus?"Core":"Cores"),")"),a.createElement(o.A,{size:"small",name:"Memory",iconName:"ram",testId:"sidebar-nodeInfoContent-ram"},(0,i.ws)((0,i.RI)(n.memory),"RAM")),a.createElement(o.A,{size:"small",name:"Hard disk size",iconName:"disk",testId:"sidebar-nodeInfoContent-disk"},(0,i.RI)(n.diskSpace)))}},37019:(e,t,n)=>{n.d(t,{A:()=>d});var a=n(58168),r=n(96540),l=n(83199),o=n(64118),i=n(11988),s=n(82432),c=n(28146);const d=e=>{let{id:t,isLive:n,name:d,onClick:u,...m}=e;const g=(0,i.Xt)(t),{critical:p,warning:h}=(0,o.AO)(g),f={...(null===m||void 0===m?void 0:m["data-testid"])&&{dataTestId:null===m||void 0===m?void 0:m["data-testid"]},isLive:n},y=(0,s.t3)({count:p,type:"critical",...f}),v=(0,s.t3)({count:h,type:"warning",...f});return r.createElement(c.A,(0,a.A)({badge:"alerts","data-testid":"node-alerts",nodeName:d},m),r.createElement(l.AlertMasterCard,{onClick:y.text||v.text?u:null,pillLeft:y,pillRight:v,size:"small"}))}},88494:(e,t,n)=>{n.d(t,{A:()=>s});var a=n(58168),r=n(96540),l=n(83199),o=n(82432),i=n(28146);const s=e=>{let{children:t,badge:n,enabled:s,name:c,onClick:d,...u}=e;const m=(0,o.lw)(s);return r.createElement(i.A,(0,a.A)({badge:n,"data-testid":"node-".concat(n,"-status"),status:m.status,nodeName:c},u),r.createElement(l.Flex,{alignItems:"center",gap:1,onClick:s?d:void 0,cursor:s&&d?"pointer":"default","data-ga":"nodes-list::click-capability::".concat(m.label)},t,r.createElement(l.Pill,{flavour:m.flavour,size:"small"},m.label)))}},61427:(e,t,n)=>{n.d(t,{UG:()=>I,Ip:()=>b,er:()=>v,Yy:()=>E,G9:()=>x});n(9920),n(14905),n(98992),n(3949),n(8872),n(62953);var a=n(96540),r=n(47444),l=n(27467),o=n(9224);const i={byContext:{},loaded:!1,loading:!1,method:null,options:null,group:"average",totalDimensionsCount:0},s=(0,r.Iz)({key:"weights",default:i}),c=(0,r.K0)({key:"weightsState",get:e=>t=>{let{get:n}=t;return n(s(e))},set:e=>(t,n)=>{let{set:a}=t;return a(s(e),n)},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),d={ci:"context",ni:"node",ii:"instance",di:"dimension"},u=(e,t)=>n=>({[d[e.name]]:t[e.dictionary][n]}),m={ni:u,ii:(e,t)=>(n,a,r)=>({[d[e.name]]:t[e.dictionary][n]?"".concat(t[e.dictionary][n],"@").concat(t.nodes[a[r.ni]]):null}),ci:u,di:u,row_type:e=>t=>({type:e.value[t]}),weight:()=>e=>({weight:e}),timeframe:e=>t=>({highlight:e.labels.reduce(((e,n,a)=>({...e,[n]:t[a]})),{})}),"baseline timeframe":e=>t=>({baseline:e.labels.reduce(((e,n,a)=>({...e,[n]:t[a]})),{})})},g={contexts:"ci",nodes:"ni",instances:"ii",dimensions:"di"},p={contexts:"id",nodes:"nodeId",instances:"id",dimensions:"id"},h=(e,t)=>null===e||e>t?t:e,f=(e,t)=>null===e||e{let{flavour:t,data:n,aggregation:a,options:r}=e,l={},o=0;if(n.v_schema){var i;const e=(null===(i=n.v_schema)||void 0===i?void 0:i.items)||[];l=n.result.reduce(((t,n)=>{let{id:r,v:l}=n;return t[r]=e.reduce(((e,t,n)=>{let{name:r,labels:o}=t;const i=o.indexOf(a);return e[r]=l[n][i],e}),{}),t}),{})}else{const e=(e=>{const t=Object.keys(e.dictionaries).reduce(((t,n)=>{t[n]||(t[n]={});const a=p[n];return e.dictionaries[n].forEach((e=>t[n][e[g[n]]]="nodeId"===a?e.nd||e.mg:e[a])),t}),{}),n={},a=e.schema.items.reduce(((e,a,r)=>{const l=(m[a.name]||m.default)(a,t);return e.push(l),n[a.name]=r,e}),[]);return e=>e.reduce(((t,r,l)=>({...t,...a[l](r,e,n)})),{})})(n);l=n.result.reduce(((t,n)=>{const{dimension:a,node:l,context:i,instance:s,type:c,weight:d,highlight:u,baseline:m}=e(n);if("node"===c)return t;const g=(p=t,y=i,function(e,t,n,a,r){let l=arguments.length>5&&void 0!==arguments[5]?arguments[5]:h;n&&(n&&"undefined"===typeof p[y][t][n]&&(p[y][t][n]={}),"undefined"===typeof p[y][t][n][e]&&(p[y][t][n][e]=null),a&&(p[y][t][n][e]=l(p[y][t][n][e],r)))});var p,y;t[i]||(t[i]={dimensions:{},instances:{},nodes:{},weight:null});const v=!!r&&r.includes("anomaly-bit");g("weight","dimensions",a,"dimension"===c,d,v?f:h),g("weight","instances",s,"instance"===c,d,v?f:h),g("weight","nodes",l,"node"===c,d,v?f:h);const x=u?100*u.anomaly_count/u.count:0;g("arHighlight","dimensions",a,"dimension"===c,x,f),g("arHighlight","instances",s,"instance"===c,x,f),g("arHighlight","nodes",l,"node"===c,x,f);const b=m?100*(m.anomaly_count-u.anomaly_count)/(m.count-u.count)===0?1:m.count-u.count:0;g("arBaseline","dimensions",a,"dimension"===c,b,f),g("arBaseline","instances",s,"instance"===c,b,f),g("arBaseline","nodes",l,"node"===c,b,f);const I=b?x/b:0;return g("score","dimensions",a,"dimension"===c,I,f),g("score","instances",s,"instance"===c,I,f),g("score","nodes",l,"node"===c,I,f),"context"===c?t[i].weight=null===t[i].weight||t[i].weight>d?d:t[i].weight:"dimension"===c&&(o+=1),t}),{})}return{byContext:l,totalDimensionsCount:o}},v=(e,t,n)=>{const l=(0,a.useRef)();return[(0,r.Zs)((a=>{let{set:r}=a;return a=>{let{flavour:s,highlightAfter:d,highlightBefore:u,baselineAfter:m,baselineBefore:g,method:p,options:h,group:f,context:v,nodeIds:x,aggregation:b,groupBy:I}=a;return r(c(e),{...i,loading:!0}),l.current&&l.current.cancel(),l.current=(0,o.e0)(t,n,{highlightAfter:d,highlightBefore:u,baselineAfter:m,baselineBefore:g,method:p,options:h,group:f,context:v,nodeIds:x,aggregation:b,groupBy:I}),l.current.then((t=>{let{data:n}=t;const{byContext:a,totalDimensionsCount:l}=y({flavour:s,data:n,aggregation:b,options:h});r(c(e),{loading:!1,loaded:!0,byContext:a,options:h,totalDimensionsCount:l,method:p,group:f,groupBy:I,error:null})})).catch((t=>{var n;if(!t.isCancel)throw r(c(e),{...i,error:(null===t||void 0===t||null===(n=t.response)||void 0===n?void 0:n.data)||t}),t}))}}),[e]),(0,r.Zs)((t=>{let{reset:n}=t;return()=>{l.current&&l.current.cancel(),n(c(e))}}),[e])]},x=e=>(0,r.vc)(c(e)),b=e=>(0,r.E0)(c(e)),I=.01,E=(e,t,n)=>{const[r,o]=v(e,t,n),{loaded:i,loading:s,byContext:c,totalDimensionsCount:d,options:u,groupBy:m}=x(e),g=(()=>{const e=(0,l.rW)("threshold");return e?Number.parseFloat(e):I})(),[p,h,f]=(0,a.useMemo)((()=>{let e=0;const t=null===u||void 0===u?void 0:u.includes("raw"),n=(m||[]).length?c:Object.keys(c).reduce(((n,a)=>{const r=Object.keys(c[a].dimensions).reduce(((e,n)=>(!t&&c[a].dimensions[n].weight>g||(e[n]=c[a].dimensions[n]),e)),{}),l=Object.keys(r).length;return e+=l,l?(n[a]={...c[a],dimensions:r},n):n}),{});return[n,Object.keys(n),e]}),[i,g]);return{threshold:g,metadata:p,contexts:h,dimensionsCount:f,totalDimensionsCount:d,loaded:i,loading:s,getWeights:r,resetWeights:o}}},23931:(e,t,n)=>{n.d(t,{Zr:()=>c,b0:()=>b,YP:()=>x,Uo:()=>y,GR:()=>A,Bu:()=>k,yO:()=>C,bD:()=>w,OL:()=>h,aZ:()=>f,rx:()=>m,bo:()=>u,_0:()=>d,R7:()=>I,jI:()=>g,w1:()=>E,DQ:()=>p});n(14905),n(8159),n(98992),n(8872),n(37550),n(25509),n(65223),n(60321),n(41927),n(11632),n(64377),n(66771),n(12516),n(68931),n(52514),n(35694),n(52774),n(49536),n(21926),n(94483),n(16215),n(62953);var a=n(96540),r=n(47444);const l={loaded:!1,fullyLoaded:!1,loading:!1,id:"",name:"",chartType:"",context:"",family:"",firstEntry:0,lastEntry:0,module:"",plugin:"",priority:0,updateEvery:0,visible:!1,filteredOut:!1,hidden:!1,domain:"room"};var o=n(69765),i=n(65570);const s={ids:(0,r.Iz)({key:"roomChartIds",default:[]}),updatedAt:(0,r.Iz)({key:"roomChartsUpdatedAt",default:""}),loaded:(0,r.Iz)({key:"roomChartsLoaded",default:!1}),error:(0,r.Iz)({key:"roomChartsError",default:null}),versions:(0,r.Iz)({key:"versions",default:{}}),fetchedFor:(0,r.Iz)({key:"fetchedFor",default:[]})},c=(0,r.K0)({key:"roomChartState",get:e=>{let{id:t,key:n}=e;return e=>{let{get:a}=e;return a(s[n](t))}},set:e=>{let{id:t,key:n}=e;return(e,a)=>{let{set:r}=e;r(s[n](t),a)}}}),d=e=>!!(0,r.vc)(c({id:e,key:"loaded"})),u=e=>(0,r.vc)(c({id:e,key:"fetchedFor"})),m=e=>(0,r.vc)(c({id:e,key:"error"})),g=e=>(0,r.vc)(c({id:e,key:"versions"})),p=function(e){let t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];const n=(0,r.lZ)(c({id:e,key:"versions"}));return(0,a.useCallback)((e=>n((n=>(e="function"===typeof e?e(n):e,t?e:(0,i.Ay)(n,e,{omit:["contextsSoftHash"]})?n:n.contextsHardHash&&"invalidating"!==n.contextsHardHash?e:n)))),[])},h=e=>(0,r.vc)(c({id:e,key:"ids"})),f=(e,t)=>(0,r.vc)(c({id:e,key:t})),y=(0,r.K0)({key:"roomChartsState/ids",get:e=>{let{roomId:t}=e;return e=>{let{get:n}=e;return n(s.ids(t))}},set:e=>{let{roomId:t,merge:n=!0}=e;return(e,a)=>{let{set:r}=e;r(s.ids(t),(e=>{const t=n?[...new Set([...a,...e])]:a;return n&&(0,i.Ay)(t,e)?e:t}))}}}),v=(0,r.Iz)({key:"roomsChartBatchState",default:{}}),x=(0,r.K0)({key:"roomChartsState",get:e=>{let{id:t,key:n}=e;return e=>{let{get:a}=e;const r=a(v(t))||l;return n?r[n]:r}},set:e=>{let{id:t,key:n}=e;return(e,a)=>{let{set:r}=e;"function"!==typeof a?Object.keys(a).length<1||r(v(t),(e=>({...e,[n]:a}))):r(v(t),(e=>({...e,[n]:a(e[n])})))}}}),b=(0,r.K0)({key:"roomChartsState/initialize",set:function(){let{id:e,merge:t=!0,nodeIds:n}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return(a,r)=>{let{set:o}=a,{values:s,merge:d}=r;o(c({id:e,key:"fetchedFor"}),n),o(v(e),(e=>{const n=Object.keys(s).reduce(((n,a)=>{const{id:r}=s[a];return n[r]={...l,...(t||d)&&e[r],...s[a],loaded:!0},n}),t?{...e}:{});return t&&(0,i.Ay)(n,e,{omit:["lastEntry","firstEntry","family"]})?e:n}))}}}),I=(e,t)=>(0,r.vc)(x({id:e,key:t})),E=e=>(0,r.Zs)((t=>{let{set:n}=t;return t=>n(v(e),t)}),[]),C=(e,t)=>(0,r.Zs)((n=>{let{snapshot:a}=n;return n=>({...a.getLoadable(x({id:e,key:n})).contents,...(null===t||void 0===t?void 0:t[n])||{}})}),[e,t]),A=e=>{var t,n,a;const l=(0,o.ID)(),i=(0,r.vc)(v(l));return(null===(t=i[e])||void 0===t?void 0:t.visible)&&!(null!==(n=i[e])&&void 0!==n&&n.filteredOut)&&!(null!==(a=i[e])&&void 0!==a&&a.hidden)},k=e=>{const t=(0,o.ID)(),n=(0,r.vc)(v(t));return e.some((e=>{var t,a,r;return(null===(t=n[e])||void 0===t?void 0:t.visible)&&!(null!==(a=n[e])&&void 0!==a&&a.filteredOut)&&!(null!==(r=n[e])&&void 0!==r&&r.hidden)}))},w=()=>{const e=(0,o.ID)(),t=(0,r.vc)(v(e));return(0,a.useCallback)((e=>e.some((e=>{var n,a,r;return(null===(n=t[e])||void 0===n?void 0:n.visible)&&!(null!==(a=t[e])&&void 0!==a&&a.filteredOut)&&!(null!==(r=t[e])&&void 0!==r&&r.hidden)}))),[e,t])},S=(0,r.Iz)({key:"instancesByContextAtom",default:{}}),P=(0,r.K0)({key:"roomChartInstancesState",get:e=>{let{id:t,context:n}=e;return e=>{let{get:a}=e;const r=a(S(t));return n?r[n]:r}},set:e=>{let{id:t,context:n}=e;return(e,a)=>{let{set:r}=e;"function"!==typeof a?Object.keys(a).length<1||r(S(t),(e=>({...e,[n]:a}))):r(S(t),(e=>({...e,[n]:a(e[n])})))}}});(0,r.K0)({key:"roomChartInstances/initialize",set:e=>(t,n)=>{let{set:a}=t;a(S(e),(e=>({...e,...n})))}}),(0,r.K0)({key:"roomChartsInstancesState",get:e=>{let{id:t,contexts:n=[]}=e;return e=>{let{get:a}=e;const r=a(P({id:t}));return n.reduce(((e,t)=>(r[t]&&(e[t]=r[t]),e)),{})}}})},20687:(e,t,n)=>{n.d(t,{A:()=>E});n(41393),n(81454),n(62953);var a=n(96540),r=n(83199),l=n(47762),o=n(11988),i=n(64118),s=n(27467),c=n(29217),d=n(4659),u=n(8711),m=n(47767),g=n(45588),p=n(21290),h=n(45976);const f=(0,u.default)(r.Flex).withConfig({displayName:"alert__AlertContainer",componentId:"sc-ndxwzg-0"})(["transition:opacity 0.3s ease-in-out;&:hover{& ","{opacity:0.7;}}"],r.Text),y={overview:"overview",k8s:"kubernetes",singleNode:"nodes/:nodeId"},v=e=>{let{alert:t,onAlertClick:n,flavour:o}=e;const{value:i,instance:s,lastStatusChange:u,id:v,status:x,units:b,name:I,summary:E,nodeId:C}=t,A=(0,l.xY)(C,"name"),{localeTimeString:k,localeDateString:w}=(0,p.$j)(),S=(0,a.useMemo)((()=>{const e=new Date(1e3*u);return isNaN(e.valueOf())?"":"".concat(w(e,{long:!1})," ").concat(k(e,{secs:!1}))}),[u,k,w]),P=(0,a.useCallback)((()=>{n({alertId:v})}),[]),N=(0,m.g)(),F=(0,m.Zp)();return a.createElement(f,{column:!0,padding:[4,0],gap:1,border:{color:"border",side:"bottom"},"data-testid":"alertItem","data-test-name":"alertItem-".concat(I)},a.createElement(r.Flex,{column:!0},a.createElement(c.A,{plain:!0,content:E,isBasic:!0},a.createElement(r.Text,{strong:!0,"data-testid":"alertItem-alertName"},I)),a.createElement(r.TextSmall,{wordBreak:"break-all","data-testid":"alertItem-chartId"},s," @ ",A)),a.createElement(r.Flex,{alignItems:"center",justifyContent:"between"},a.createElement(h.A,{loaded:!0,status:x,units:b,value:i,flex:!1,"data-testid":"alertItem-alertValuePill"}),a.createElement(r.TextMicro,{color:"textLite","data-testid":"alertItem-alertDate"},S)),a.createElement(r.Flex,{gap:1},!!y[o]&&a.createElement(a.Fragment,null,a.createElement(d.A,{Component:r.TextMicro,onClick:()=>{F((0,g.tW)("/spaces/:spaceSlug/rooms/:roomSlug/".concat(y[o]),N),{replace:!0,state:{alertId:v}})}},"Plot on chart"),a.createElement(r.TextMicro,{color:"textLite"},"\u2022")),a.createElement(d.A,{Component:r.TextMicro,onClick:P},"Show info")))};var x=n(92136);const b=(e,t)=>e.status===t.status?0:"critical"===e.status?-1:1,I={nodesView:!0},E=e=>{let{nodeIds:t,flavour:n}=e;const[u,m]=(0,s.r$)("sidebarNodeId",{flavour:"val",extraKey:n}),g=(0,l.xY)(u),p=(0,o.YS)(u?[u]:t),h=(0,i.Gq)(p),f=(0,a.useMemo)((()=>[...h].sort(b)),[h]),[y,E,,C]=(0,x.A)(),A=h.length?"(".concat(h.length,")"):"";return a.createElement(r.Flex,{width:"100%",column:!0,overflow:"hidden"},a.createElement(r.Flex,{column:!0,gap:5,padding:[2]},a.createElement(r.Flex,{column:!0},a.createElement(c.A,{content:"Currently Active alerts",align:"bottom"},a.createElement(r.TextBig,{strong:!0,"data-testid":"nodeAlertsView-activeAlerts-header"},"Alerts ",A)),!!I[n]&&a.createElement(r.TextMicro,null,"Showing ",g.id?a.createElement(r.TextMicro,{strong:!0},g.name):"room"," alerts"," ",!!g.id&&a.createElement(a.Fragment,null," ","-"," ",a.createElement(d.A,{Component:r.TextMicro,onClick:()=>m("")},"Show all")))),!h.length&&a.createElement(r.Flex,{column:!0,gap:8,padding:[0,5],"data-testid":"nodesAlertsView-blankSlate",alignItems:"center"},a.createElement(r.Icon,{name:"checkmark",width:"90px",height:"90px"}),a.createElement(r.H4,{textAlign:"center"},1!==t.length?"No":"This node has no"," active alerts, you are all good!"))),a.createElement(r.Flex,{column:!0,overflow:{vertical:"auto"},padding:[0,2]},f.map((e=>a.createElement(v,{key:e.id,alert:e,onAlertClick:E,flavour:n})))),y&&C)}},94177:(e,t,n)=>{n.d(t,{A:()=>i});var a=n(58168),r=n(96540),l=n(42828),o=n(59846);const i=e=>{let{baseKey:t,extraKey:n,testIdPrefix:i,groupProps:s,...c}=e;return r.createElement(o.A,(0,a.A)({title:"Nodes",testIdPrefix:i,baseKey:t,extraKey:n,param:"selectedIds",defaultIsOpen:!1,help:"Select nodes individually. By selecting nodes in this filter, you bypass the rest of the filters."},s),r.createElement(l.Ay,(0,a.A)({baseKey:t,extraKey:n,itemProps:{padding:[1,1,1,.5]},searchMargin:[0,0,1],height:{max:"300px"},testIdPrefix:i},c)))}},99292:(e,t,n)=>{n.d(t,{kO:()=>Xe,Ay:()=>lt});var a=n(58168),r=(n(17333),n(41393),n(98992),n(54520),n(81454),n(62953),n(96540)),l=n(8711),o=n(83199),i=n(27467),s=n(81638),c=n(30577),d=n(93615),u=n(64473),m=n(47762),g=n(12352),p=n(60072),h=n(80618),f=n(25825),y=n(50065);const v=[{name:"MySQL",title:"MySQL",iconName:"serviceMySQL",logoFilename:"mysql.svg",category:"Databases",collectors:["python.d.plugin:mysql","go.d.plugin:mysql"],contexts:["mysql.queries","mysql.net","mysql.connections"],priority:1},{name:"MariaDB",title:"MariaDB",iconName:"serviceMariaDB",logoFilename:"mariadb.svg",category:"Databases",collectors:["python.d.plugin:mysql","go.d.plugin:mysql"],contexts:["mysql.queries","mysql.net","mysql.connections"],priority:2},{name:"Oracle Database",title:"Oracle Database",iconName:"osOracle",logoFilename:"oracle.svg",category:"Databases",collectors:["python.d.plugin:oracledb"],contexts:["oracledb.session_count","oracledb.physical_disk_read_writes","oracledb.tablespace_usage_in_percent"],priority:3},{name:"PostgreSQL",title:"PostgreSQL",iconName:"servicePostgreSQL",logoFilename:"postgresql.svg",category:"Databases",collectors:["python.d.plugin:postgres"],contexts:["postgres.checkpointer","postgres.archive_wal","postgres.db_size"],priority:4},{name:"MongoDB",title:"MongoDB",iconName:"serviceMongoDB",logoFilename:"mongodb.svg",category:"Databases",collectors:["python.d.plugin:mongodb"],contexts:["mongodb.active_clients","mongodb.read_operations","mongodb.write_operations"],priority:5},{name:"ElasticSearch",title:"ElasticSearch",iconName:"serviceElasticSearch",logoFilename:"elasticsearch.svg",category:"Databases",collectors:["python.d.plugin:elasticsearch"],contexts:["elastic.search_performance_total","elastic.index_performance_total","elastic.index_segments_memory"],priority:6},{name:"CouchDB",title:"CouchDB",iconName:"serviceCouchDB",logoFilename:"couchdb.svg",category:"Databases",collectors:["python.d.plugin:couchdb"],contexts:["couchdb.activity","couchdb.response_codes"],priority:7},{name:"Proxy SQL",title:"Proxy SQL",iconName:"serviceProxySQL",logoFilename:"proxysql.svg",category:"Databases",collectors:["python.d.plugin:proxysql"],contexts:["proxysql.questions","proxysql.pool_status","proxysql.pool_overall_net"],priority:8},{name:"Redis",title:"Redis",iconName:"serviceRedis",logoFilename:"redis.svg",category:"Databases",collectors:["python.d.plugin:redis"],contexts:["redis.operations","redis.net","redis.connections"],priority:9},{name:"MemCached",title:"MemCached",iconName:"serviceMemCached",logoFilename:"memcached.svg",category:"Databases",collectors:["python.d.plugin:memcached"],contexts:["memcached.cache","memcached.net","memcached.connections"],priority:10},{name:"RethinkDB",title:"RethinkDB",iconName:"serviceRethinkDB",logoFilename:"rethinkdb.svg",category:"Databases",collectors:["python.d.plugin:rethinkdbs"],contexts:["rethinkdb.cluster_queries","rethinkdb.cluster_clients_active","rethinkdb.cluster_connected_servers"],priority:11},{name:"Solr",title:"Solr",iconName:"serviceSolr",logoFilename:"solr.svg",category:"Databases",collectors:["go.d.plugin:solr"],contexts:["solr.search_requests","solr.update_requests"],priority:12},{name:"RabbitMQ",title:"RabbitMQ",iconName:"serviceRabbitMQ",logoFilename:"rabbitmq.svg",category:"Messaging",collectors:["python.d.plugin:rabbitmq","go.d.plugin:rabbitmq"],contexts:["rabbitmq.queued_messages","rabbitmq.erlang_run_queue"],priority:1},{name:"Beanstalkd",title:"Beanstalkd",iconName:"serviceBeanstalk",logoFilename:"beanstalkd.svg",category:"Messaging",collectors:["python.d.plugin:beanstalk"],contexts:["beanstalk.total_jobs_rate","beanstalk.connections_rate","beanstalk.current_tubes"],priority:2},{name:"Apache",title:"Apache",iconName:"serviceApache",logoFilename:"apache.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:apache","go.d.plugin:apache"],contexts:["apache.requests","apache.connections","apache.net"],priority:1},{name:"nginx",title:"nginx",iconName:"serviceNginx",logoFilename:"nginx.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:nginx","go.d.plugin:nginx"],contexts:["nginx.requests","nginx.connections"],priority:2},{name:"nginx+",title:"nginx+",iconName:"serviceNginxPlus",logoFilename:"nginx-plus.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:nginx_plus"],contexts:["nginx_plus.requests_total","nginx_plus.connections_statistics"],priority:3},{name:"lighthttpd",title:"lighthttpd",iconName:"serviceLighthttpd",logoFilename:"lighthttpd.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:lighttpd","go.d.plugin:lighttp"],contexts:["lighttpd.requests","lighttpd.net"],priority:4},{name:"lighthttpd2",title:"lighthttpd2",iconName:"serviceLighthttpd2",logoFilename:"lighthttpd.svg",category:"Web, Proxies, LBs, Streaming",collectors:["go.d.plugin:lighttpd2"],contexts:["lighttpd2.requests","lighttpd2.traffic"],priority:5},{name:"LiteSpeed",title:"LiteSpeed",iconName:"serviceLiteSpeed",logoFilename:"litespeed.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:litespeed"],contexts:["litespeed.requests","litespeed.requests_processing"],priority:6},{name:"Tomcat",title:"Tomcat",iconName:"serviceApacheTomcat",logoFilename:"tomcat.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:tomcat"],contexts:["tomcat.accesses","tomcat.processing_time","tomcat.bandwidth"],priority:7},{name:"PHP FPM",title:"PHP FPM",iconName:"servicePhpFpm",logoFilename:"php-fpm.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:phpfm"],contexts:["phpfpm.performance","phpfpm.requests","phpfpm.connections"],priority:8},{name:"HAproxy",title:"HAproxy",iconName:"serviceHAProxy",logoFilename:"haproxy.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:haproxy"],contexts:["haproxy_f.scur","haproxy_f.bin","haproxy_f.bout"],priority:9},{name:"Squid",title:"Squid",iconName:"serviceSquid",logoFilename:"squid.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:squid"],contexts:["squid.clients_requests","squid.clients_net"],priority:10},{name:"Traefik",title:"Traefik",iconName:"serviceTraefik",logoFilename:"traefik.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:traefik"],contexts:["traefik.response_codes"],priority:11},{name:"Varnish",title:"Varnish",iconName:"serviceVarnish",logoFilename:"varnish.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:varnish"],contexts:["varnish.session_connection","varnish.client_requests"],priority:12},{name:"IPVS",title:"IPVS",iconName:"serviceIPVS",logoFilename:"load-balancer.svg",category:"Web, Proxies, LBs, Streaming",collectors:["proc.plugin:/proc/net/ip_vs_stats"],contexts:["ipvs.sockets","ipvs.packets","ipvs.net"],priority:13},{name:"Web Log",title:"Web Log",iconName:"serviceWebLog",logoFilename:"log-file.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:web_log","go.d.plugin:web_log"],contexts:["web_log.response_codes","web_log.bandwidth"],priority:14},{name:"IPFS",title:"IPFS",iconName:"serviceIPFS",logoFilename:"ipfs.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:ipfs"],contexts:["ipfs.bandwidth","ipfs.peers"],priority:15},{name:"IceCast Media Streaming",title:"IceCast Media Streaming",iconName:"serviceIceCast",logoFilename:"icecast.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:icecast"],contexts:["icecast.listeners"],priority:16},{name:"RetroShare",title:"RetroShare",iconName:"serviceRetroShare",logoFilename:"retroshare.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:retroshare"],contexts:["retroshare.bandwidth","retroshare.peers"],priority:17},{name:"HTTP Check",title:"HTTP Check",iconName:"serviceHTTPCheck",logoFilename:"server-connection.svg",category:"Web, Proxies, LBs, Streaming",collectors:["python.d.plugin:httpcheck","go.d.plugin:httpcheck"],contexts:["httpcheck.responsetime","httpcheck.status"],priority:18},{name:"x509 Check",title:"x509 Check",iconName:"serviceX509Check",logoFilename:"data-encryption.svg",category:"Web, Proxies, LBs, Streaming",collectors:["go.d.plugin:x509check"],contexts:["x509check.time_until_expiration"],priority:19}].reduce(((e,t)=>({...e,[t.name]:t})),{}),x=e=>{var t;return(null===(t=v[e])||void 0===t?void 0:t.iconName)||"services"},b=e=>{var t;let{node:n}=e;return null!==(t=n.services)&&void 0!==t&&t.length?r.createElement(y.A,{link:"https://learn.netdata.cloud/docs/agent/collectors/collectors#service-and-application-collectors",name:"Services"},n.services.map((e=>r.createElement(g.A,{size:"small",key:e,iconName:x(e),name:e,testId:"sidebar-ServicesGroup-service-".concat(e)})))):null};var I=n(58010),E=n(17240);const C=e=>{let{nodeIds:t,flavour:n}=e;const a=(0,i.l6)("sidebarNodeId",{defaultValue:t[0],flavour:"val",extraKey:n}),l=(0,m.xY)(a);return l.id?r.createElement(o.Flex,{column:!0,gap:1,overflow:{horizontal:"hidden",vertical:"auto"},padding:[2],width:"100%"},r.createElement(o.TextBig,{strong:!0},"Info"),r.createElement(o.Flex,{column:!0,padding:[2,0,3],border:{side:"bottom",color:"borderSecondary"}},false,r.createElement(o.TextSmall,{"data-testid":"sidebar-nodeInfoContent-hostname",strong:!0},"Hostname: ",l.name)),r.createElement(f.A,{node:l}),r.createElement(p.A,{node:l}),r.createElement(h.A,{node:l}),r.createElement(I.A,{node:l}),r.createElement(b,{node:l}),r.createElement(E.A,{node:l})):r.createElement(o.Text,null,"Select a node to see its\u2019 info")};var A=n(29217),k=n(20687),w=(n(25440),n(55309)),S=n(20982),P=n(18713),N=n(59090);const F=e=>e&&"0"!==e?"".concat(parseFloat(e).toFixed(2),"%"):"-";n(14905),n(8872);var _=n(82543),K=n(61427);const M=e=>{let{chartIds:t,weightKey:n,flavour:l="rhs",...i}=e;const[s,c,d]=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],{flavour:t,weightKey:n="weight"}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const a=(0,_.g)(),{loaded:l,byContext:o}=(0,K.G9)({nodeIds:a,flavour:t});return(0,r.useMemo)((()=>{if(!l)return[!1,null,null];let t=null;const a=e.reduce(((e,a)=>{let r;if(/::/.test(a)){const e=a.split("::");a=e[0],r=e[1]}const[l,i]=r?((e,t,n,a)=>{var r,l;let{max:o,maxWeightContext:i,weightKey:s}=a;const c="".concat(n,",").concat(t);return e[c]&&((null===(r=e[c])||void 0===r?void 0:r[s])||0)>o?[(null===(l=e[c])||void 0===l?void 0:l[s])||0,t]:[o,i]})(o,a,r,{max:e,maxWeightContext:t,weightKey:n}):((e,t,n)=>{let{max:a,maxWeightContext:r,weightKey:l}=n;return e[t]&&(e[t][l]||0)>a?[e[t][l]||0,t]:[a,r]})(o,a,{max:e,maxWeightContext:t,weightKey:n});return t=i,l}),0);return[!0,a,t]}),[e,o])}(t,{flavour:l,weightKey:n});return s?r.createElement(A.A,{content:d,align:"top",enterDelay:200,activateOn:"hover"},r.createElement(o.Pill,(0,a.A)({hollow:!0,flavour:"neutral",tiny:!0,cursor:i.onClick?"pointer":"auto"},i),F(c))):null},L=(0,l.default)(o.IconButton).attrs((e=>({flavour:"borderless",cursor:"pointer",width:"12px",height:"12px",iconColor:"textLite",padding:[0],...e}))).withConfig({displayName:"config__ConfigButton",componentId:"sc-1q0bfbp-0"})([""]),T=(0,l.default)(o.Flex).withConfig({displayName:"menuSettings__SettingsContainer",componentId:"sc-1nq4zzt-0"})(["display:",";"],(e=>e.active&&!e.hasSubmenuActive?"flex":"none")),D=(0,l.css)(["","{display:flex;"," .button-icon__color{fill:"," !important;}}"],T,L,(0,o.getColor)("text")),B=e=>{let{id:t,chartIds:n,arFlavour:a,showAR:l=!0,showConfig:i,weightKey:s,active:c,hasSubmenuActive:d,extraKey:u}=e;return r.createElement(o.Flex,{gap:1,alignItems:"center"},!1,l&&r.createElement(M,{chartIds:n,flavour:a,weightKey:s}))},q=(0,l.default)(o.TextSmall).attrs((e=>{let{active:t,id:n,name:a,link:r,children:l,...o}=e;return{strong:t,children:a||l||n,href:"#".concat(r),"data-sidebar-submenuid":n,truncate:!0,whiteSpace:"normal",color:"textLite",wordWrap:"break-word",...o}})).withConfig({displayName:"menuItem__MenuLabelWrapper",componentId:"sc-18613h0-0"})(["&&&{text-decoration:none;}"]),V=(0,l.css)(["border-left:1px solid ",";","{color:",";}"],(0,o.getColor)("key"),q,(0,o.getColor)("key")),O=(0,l.default)(o.Flex).attrs((e=>{let{active:t,...n}=e;return{width:"98%",padding:[.5,1,.5,4],role:"listitem",as:"a","aria-current":t,active:t,...n}})).withConfig({displayName:"menuItem__Container",componentId:"sc-18613h0-1"})(["&&&{border-left:1px solid transparent;"," "," text-decoration:none;cursor:pointer;&:hover{"," ","}}"],(e=>e.active&&V),(e=>e.active&&"".concat(q," { font-weight: bold; }")),V,D),z=(0,N.A)(q,(e=>{let{name:t,id:n}=e;return{children:t||n}})),H=(0,r.forwardRef)(((e,t)=>{let{id:n,useItemsAreVisible:l,...o}=e;const i=l(o.chartIds);return o.forceVisibility||i?r.createElement(O,(0,a.A)({ref:t,as:"a",gap:2,alignItems:"center",justifyContent:"between","data-sidebar-menugroupid":n},o)):null})),W=(0,N.A)(H,(e=>{let{id:t,link:n,chartIds:a,forceVisibility:r}=e;return{id:t,href:"#".concat(n),chartIds:a,forceVisibility:r}})),j=(0,N.A)((e=>t=>{let{id:n,...l}=t;const o=(0,P.W7)((e=>n===e));return r.createElement(e,(0,a.A)({active:o,id:n},l))})((G=e=>{let{id:t,...n}=e;return r.createElement(W,(0,a.A)({id:t},n),r.createElement(z,{id:t}),r.createElement(B,(0,a.A)({},n,{id:t})))},e=>{let{id:t,onSubMenuClick:n,...l}=e;const o=(0,r.useMemo)((()=>n&&(e=>n(t,e))),[n,t]);return r.createElement(G,(0,a.A)({onClick:o,id:t},l))})));var G;const R=(0,l.default)(o.Text).attrs({color:"textLite"}).withConfig({displayName:"menuGroup__MenuGroupLabelWrapper",componentId:"sc-1q9pvct-0"})(["font-weight:500;"]),U=(0,l.default)(o.Flex).attrs({flex:!1,color:"textLite",width:6,alignItems:"center",justifyContent:"center"}).withConfig({displayName:"menuGroup__FaIcon",componentId:"sc-1q9pvct-1"})([""]),Q=(0,l.css)(["border-left:2px solid ",";","{color:",";}"],(0,o.getColor)("text"),R,(0,o.getColor)("text")),Z=(0,l.default)(o.Flex).attrs({width:"98%"}).withConfig({displayName:"menuGroup__Container",componentId:"sc-1q9pvct-2"})(["&&&{border-left:2px solid transparent;"," "," text-decoration:none;cursor:pointer;&:hover{"," ","}}"],(e=>e.active&&Q),(e=>e.active&&"".concat(R," { font-weight: bold; }")),Q,D),Y=(0,w.HB)(R,(e=>{let{name:t,id:n}=e;return{children:t||n}})),$=(0,w.HB)((e=>{let{icon:t}=e;return t?r.createElement(U,null,o.iconsList[t]?r.createElement(o.Icon,{name:t,size:"small",color:"text"}):r.createElement(S.g,{icon:t})):null}),(e=>{let{icon:t}=e;return{icon:t}})),J=(0,r.forwardRef)(((e,t)=>{let{id:n,subMenuChartIds:l,useItemsAreVisible:o,...i}=e;const s=o(l);return i.forceVisibility||s?r.createElement(Z,(0,a.A)({ref:t,as:"a",gap:2,padding:[1],alignItems:"center",justifyContent:"between","data-sidebar-menugroupid":n},i)):null})),X=(0,w.HB)(J,(e=>{let{id:t,link:n,subMenuChartIds:a,forceVisibility:r}=e;return{id:t,href:"#".concat(n),subMenuChartIds:a,forceVisibility:r}})),ee=e=>{let{id:t,...n}=e;return r.createElement(X,(0,a.A)({id:t},n),r.createElement(o.Flex,null,r.createElement($,{id:t}),r.createElement(Y,{id:t})),r.createElement(B,(0,a.A)({},n,{id:t})))},te=(0,w.HB)((e=>{let{subMenuIds:t,onSubMenuClick:n,useItemsAreVisible:l,...i}=e;return r.createElement(o.Flex,(0,a.A)({column:!0,role:"list",padding:[0,0,2,0]},i),t.map((e=>r.createElement(j,{key:e,id:e,onSubMenuClick:n,useItemsAreVisible:l}))))}),(e=>{let{subMenuIds:t}=e;return{subMenuIds:t}})),ne=(0,r.forwardRef)(((e,t)=>r.createElement(o.Flex,(0,a.A)({as:"li",column:!0,ref:t},e)))),ae=(e=>t=>{let{id:n,...l}=t;const o=(0,P.C)(n),i=(0,P.W7)((e=>!!e));return r.createElement(e,(0,a.A)({active:o,hasSubmenuActive:i,id:n},l))})((e=>t=>{let{id:n,onMenuGroupClick:l,onSubMenuClick:o,...i}=t;const s=(0,r.useMemo)((()=>l&&(e=>l(n,e))),[l,n]),c=(0,r.useMemo)((()=>o&&((e,t)=>o(n,e,t))),[o,n]);return r.createElement(e,(0,a.A)({id:n,onMenuGroupClick:s,onSubMenuClick:c},i))})((e=>{let{id:t,active:n,hasSubmenuActive:l,onMenuGroupClick:o,onSubMenuClick:i,showChildren:s=!0,useItemsAreVisible:c,...d}=e;const u=(0,w.Ee)(t);return u?r.createElement(ne,d,r.createElement(ee,(0,a.A)({},u,{id:t,onClick:o,active:n,hasSubmenuActive:l,chartIds:u.subMenuChartIds,useItemsAreVisible:c})),s&&n&&r.createElement(te,{id:t,onSubMenuClick:i,useItemsAreVisible:c})):null}))),re=(0,r.forwardRef)(((e,t)=>r.createElement(o.Flex,(0,a.A)({as:"ul",width:"100%",role:"complementary",column:!0,overflow:{vertical:"auto"},"data-sidebar":"true",ref:t},e)))),le=()=>!0,oe=(0,r.memo)((e=>{let{menuGroupIds:t,onMenuGroupClick:n,onSubMenuClick:l,menuGroupProps:o,useItemsAreVisible:i=le,extraKey:s,...c}=e;return r.createElement(re,c,t.map((e=>r.createElement(ae,(0,a.A)({key:e,id:e,onMenuGroupClick:n,onSubMenuClick:l,useItemsAreVisible:i,extraKey:s},o)))))})),ie=(0,w.$V)(oe),se=(0,r.memo)(ie);var ce=n(79566),de=n(71847),ue=n(73700),me=n(13271);const ge=()=>{const[e,t]=(0,me.sK)(),n=(0,r.useCallback)((0,ue.s)(300,(e=>{t(e.target.value),(0,de.H)("toc-search","search-automatic-trigger","charts-view",e.target.value)})),[]),[a,l,,,{resetValue:i}]=(0,o.useInputValue)({value:e,onChange:n}),s=(0,r.useCallback)((()=>{t(""),i(),(0,de.H)("toc-search","clear-search-button","charts-view")}),[]);return r.createElement(o.TextInput,{value:a,onChange:l,size:"small",iconRight:a&&r.createElement(o.Icon,{name:"x",color:"textLite",cursor:"pointer",size:"small",onClick:s,"data-testid":"filterChartsInput-clearButton"}),placeholder:"Search charts",name:"Search charts","data-testid":"filterChartsInput",containerStyles:{width:"100%"}})};var pe=n(47767),he=n(4659),fe=n(97054);const ye=(0,ue.s)(300,((e,t)=>(0,de.H)("toc-search","search-results","charts-view",e,"".concat(t," -- charts")))),ve=()=>{const e=(0,pe.Zp)(),[t]=(0,fe.Ay)(),n=(0,me.Um)(),{filteredChartsCount:a,chartsCount:l}=(0,me.Ad)();(0,r.useEffect)((()=>{n&&ye(n,a)}),[n,a]);const i=(0,r.useCallback)((n=>{n.preventDefault(),e("/spaces/".concat(t,"/settings/configurations"),{replace:!0})}),[t,e]);return r.createElement(r.Fragment,null,r.createElement(o.TextMicro,{alignSelf:"end",color:"textLite",margin:[1,0,0],"data-testid":"numberOfChartsIndication"},"Showing"," ",r.createElement(o.TextMicro,{strong:!0,color:"textDescription","data-testid":"numberOfChartsIndication-filteredChartsCount"},a)," ","of total"," ",r.createElement(o.TextMicro,{strong:!0,color:"textDescription","data-testid":"numberOfChartsIndication-chartsCount"},l)," ","charts"),r.createElement(he.A,{Component:o.TextMicro,alignSelf:"end","data-ga":"toc-search::click-link-collectors::".concat(name),onClick:i,target:"_blank",rel:"noopener noreferrer"},"Add more charts"))},xe=()=>{const{filteredChartsCount:e}=(0,me.Ad)();return e?null:r.createElement(o.Flex,{width:"100%",gap:1,margin:[4,0,0],padding:[2],round:1,background:"disabledBackground",alignItems:"center","data-testid":"noResultsWereFound"},r.createElement(o.Icon,{name:"information",color:"textDescription"}),r.createElement(o.TextMicro,{strong:!0,color:"textDescription"},"No results were found"))},be=(0,_.A)(ve),Ie=()=>r.createElement(o.Flex,{column:!0,alignItems:"center",height:{min:"65px"}},r.createElement(ge,null),r.createElement(be,null),r.createElement(xe,null));var Ee=n(23931),Ce=n(8320),Ae=n(68831),ke=n(87659),we=n(3914),Se=n(69765);const{demoSlug:Pe,demoFavourites:Ne}=Ae.A,Fe=(0,l.default)(o.Icon).attrs({size:"small",name:"chevron_left",color:"textLite"}).withConfig({displayName:"favourites__StyledChevron",componentId:"sc-mwcyj8-0"})(["transition:transform 0.2s ease-in-out;"]),_e={showChildren:!1},Ke=e=>{let{onMenuGroupClick:t,onSubMenuClick:n,initialChartName:a,flavour:l}=e;const i=(0,we.bq)(),s=(0,Se.QW)(),c=Pe===i&&Ne[s]||[],d=(0,w.re)((u=c,e=>u.reduce(((t,n)=>e[n]?[...t,n]:t),[])));var u;const{onMenuGroupClick:m,onSubMenuClick:g}=(0,ce.A)({onMenuGroupClick:t,onSubMenuClick:n}),[p,h]=(0,ke.A)(!0),f=(0,Ce.UH)();return(0,r.useEffect)((()=>{f&&!a&&d.length&&t(d[0])}),[f,a]),d.length?r.createElement(o.Flex,{column:!0,gap:1,border:{side:"bottom",color:"borderSecondary"},padding:[0,0,2]},r.createElement(o.Flex,{onClick:h,cursor:"pointer",alignItems:"center",justifyContent:"between"},r.createElement(o.Flex,{gap:1,alignItems:"center"},r.createElement("i",{className:"fa-regular fa-star"}),r.createElement(o.Text,{strong:!0,color:"textFocus"},"Favourites")),r.createElement(Fe,{rotate:p?1:3,alignSelf:"end"})),r.createElement(o.Collapsible,{open:p,padding:[0,0,0,2]},r.createElement(oe,{menuGroupIds:d,onMenuGroupClick:m,onSubMenuClick:g,menuGroupProps:_e,extraKey:l}))):null},Me={nodesView:()=>!0,default:Ee.Bu},Le=e=>(0,de.H)("metric-sidebar","click-".concat(De(e)),"mn-overview"),Te=e=>{const t=De(e);(0,de.H)("metric-sidebar","click-".concat(t),"mn-overview",t)},De=e=>e.startsWith("Kubernetes")?"k8s":e.replace(/[^\w]/g,"-").toLowerCase(),Be=e=>{let{initialChartName:t,loaded:n,hasSearch:a=!0,flavour:l}=e;const{onMenuGroupClick:i,onSubMenuClick:s}=(0,ce.A)({onMenuGroupClick:Le,onSubMenuClick:Te,loaded:n}),c=Me[l]||Me.default;return r.createElement(r.Fragment,null,a&&r.createElement(o.Flex,{column:!0,position:"sticky",top:"0",gap:1,padding:[0,2]},r.createElement(o.Text,{strong:!0},"Sections and Charts"),r.createElement(Ie,null)),n&&r.createElement(r.Fragment,null,r.createElement(Ke,{onMenuGroupClick:i,onSubMenuClick:s,initialChartName:t,useItemsAreVisible:c,flavour:l}),r.createElement(se,{position:"relative",onMenuGroupClick:i,onSubMenuClick:s,width:"100%",useItemsAreVisible:c,extraKey:l})))};var qe=n(23452),Ve=n(59846),Oe=n(82700);const ze=e=>e.id,He=e=>{let{baseKey:t,param:n,paramKey:l,extraKey:o,testIdPrefix:s,title:c,groupProps:d}=e;const u=(e=>{let{extraKey:t,param:n,paramKey:a}=e;const l=(0,qe.QD)({extraKey:t,omit:n,keepAll:!0}),o=(0,i.l6)(n,{extraKey:t,flavour:"arr"});return(0,r.useMemo)((()=>Object.entries(l.reduce(((e,t)=>(e[t[a]]=(e[t[a]]||0)+(t.hidden?0:1),e)),{...qe.hv[n]||{}})).map((e=>{let[t,n]=e;return{id:t,count:n}})).sort(qe.qr[n]||qe.qr.default)),[l.filter((e=>e.hidden)).length,o])})({param:n,extraKey:o,paramKey:l});return u.length?r.createElement(Ve.A,(0,a.A)({title:c,testIdPrefix:s,baseKey:t,extraKey:o,param:n,help:"Filter the alerts. Count next to each item refer to the number of alerts that will be selected."},d),r.createElement(Oe.Ay,{param:n,baseKey:t,extraKey:o,testIdPrefix:s,collection:u,getValue:ze,getLabel:ze})):null};n(30237),n(32679),n(8159),n(30670),n(37550),n(25509),n(65223),n(60321),n(41927),n(11632),n(64377),n(66771),n(12516),n(68931),n(52514),n(35694),n(52774),n(49536),n(21926),n(94483),n(16215);var We=n(66245),je=n.n(We),Ge=n(45467);const Re=e=>{const{baseKey:t,extraKey:n,param:l,testIdPrefix:o,itemsProps:s,itemProps:c,groupProps:d,...u}={...Oe.H5,...e},m=(e=>{let{extraKey:t,param:n}=e;const a=(0,qe.QD)({extraKey:t,omit:n,keepAll:!0}),l=(0,i.l6)(n,{extraKey:t,flavour:"arr"});return(0,r.useMemo)((()=>Object.entries(a.reduce(((e,t)=>{let{type:n,component:a,hidden:r}=t;return e[n]=e[n]||{count:0,children:{}},e[n].count=(e[n].count||0)+(r?0:1),e[n].children[a]=(e[n].children[a]||0)+(r?0:1),e}),{})).reduce(((e,t)=>{let[n,{count:a,children:r}]=t;return e.push({id:n,count:a,children:Object.entries(r).map((e=>{let[t,a]=e;return{value:"".concat(n,"|").concat(t),label:t,count:a,level:1}}))}),e}),[]).sort(qe.qr[n]||qe.qr.default)),[a.filter((e=>e.hidden)).length,l])})({param:l,extraKey:n}),[g,p]=(0,i.N9)(l,{defaultValue:[],key:t,extraKey:n,flavour:"arr"});(0,Ge.A)((()=>{m.length&&g.length&&p((e=>e.filter((e=>m.some((t=>t.children.some((t=>t.value===e))))))))}),[m.length,p]);const h=(0,r.useCallback)((e=>{let{value:t,checked:n}=e;p((function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];return Array.isArray(t)?n?[...new Set([...e,...t])]:je()(e,t):n?[...e,t]:e.filter((e=>e!==t))}))}),[p]),f=(0,r.useMemo)((()=>(e=>{let{options:t,selected:n,itemsProps:a={}}=e;if(!t.length)return[];const r=t.flatMap((e=>e.children.map((e=>e.value)))),l=r.length===n.length,o=!l&&!!n.length;return[{...a.head,label:"Select all",value:r,selected:l,indeterminate:o},...t.flatMap((e=>{let{id:t,children:r,count:l}=e;const o=n.length&&!r.some((e=>!n.includes(e.value))),i=!o&&n.length&&r.some((e=>n.includes(e.value)));return r.reduce(((e,t)=>{const r=n.includes(t.value);return[...e,{...a.row,...t,selected:r}]}),[{...a.head,label:t,value:r.map((e=>e.value)),selected:o,indeterminate:i,count:l}])}))]})({options:m,selected:g,itemsProps:s})),[g,p,m]);return m.length?r.createElement(Ve.A,(0,a.A)({title:"Alert type & component",testIdPrefix:o,baseKey:t,extraKey:n,param:l,help:"Filter the alerts. Count next to each item refer to the number of alerts that will be selected."},d),r.createElement(Oe.V$,(0,a.A)({"data-testid":"".concat(o,'-filter-selections"'),searchMargin:[0,0,1],items:f,onItemClick:h,value:g,"data-value":g.join(",")||"all-selected",hasSearch:m.length>5,itemProps:{testIdPrefix:o,...c}},u))):null};var Ue=n(11128);const Qe=e=>e.id,Ze=e=>{let{baseKey:t,param:n,paramKey:l,extraKey:o,testIdPrefix:s,title:c,groupProps:d,...u}=e;const m=(e=>{let{extraKey:t,param:n,paramKey:a}=e;const l=(0,Ue.eO)({extraKey:t,omit:n,keepAll:!0,scoped:!!t}),o=(0,i.l6)(n,{extraKey:t,flavour:"arr"});return(0,r.useMemo)((()=>Object.entries(l.reduce(((e,t)=>(e[t[a]]=(e[t[a]]||0)+(t.hidden?0:1),e)),{})).map((e=>{let[t,n]=e;return{id:t,count:n}})).sort(Ue.qr[n]||Ue.qr.default)),[l.filter((e=>e.hidden)).length,o])})({param:n,extraKey:o,paramKey:l});return m.length?r.createElement(Ve.A,(0,a.A)({title:c,testIdPrefix:s,baseKey:t,extraKey:o,param:n,defaultIsOpen:!1},d),r.createElement(Oe.Ay,(0,a.A)({param:n,baseKey:t,extraKey:o,testIdPrefix:s,collection:m,getValue:Qe,getLabel:Qe},u))):null};n(9920),n(3949);const Ye=e=>{const{baseKey:t,extraKey:n,param:l,testIdPrefix:o,itemsProps:s,itemProps:c,groupProps:d,...u}={...Oe.H5,...e},m=(e=>{let{extraKey:t,param:n}=e;const a=(0,Ue.eO)({extraKey:t,omit:n,keepAll:!0,merge:!0}),l=(0,i.l6)(n,{extraKey:t,flavour:"arr"});return(0,r.useMemo)((()=>Object.entries(a.reduce(((e,t)=>{let{labels:n,hidden:a}=t;return Object.keys(n).forEach((t=>{e[t]=e[t]||{count:0,children:{}},e[t].count=(e[t].count||0)+(a?0:1),e[t].children[n[t]]=(e[t].children[n[t]]||0)+(a?0:1)})),e}),{})).reduce(((e,t)=>{let[n,{count:a,children:r}]=t;return e.push({id:n,count:a,children:Object.entries(r).map((e=>{let[t,a]=e;return{value:"".concat(n,"|").concat(t),label:t,count:a,level:1}}))}),e}),[]).sort(Ue.qr[n]||Ue.qr.default)),[a.filter((e=>e.hidden)).length,l])})({param:l,extraKey:n}),[g,p]=(0,i.N9)(l,{defaultValue:[],key:t,extraKey:n,flavour:"arr",merge:!1});(0,Ge.A)((()=>{m.length&&g.length&&p((e=>e.filter((e=>m.some((t=>t.children.some((t=>t.value===e))))))))}),[m.length,p]);const h=(0,r.useCallback)((e=>{let{value:t,checked:n}=e;p((function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];return Array.isArray(t)?n?[...new Set([...e,...t])]:je()(e,t):n?[...e,t]:e.filter((e=>e!==t))}))}),[p]),f=(0,r.useMemo)((()=>(e=>{let{options:t,selected:n,itemsProps:a={}}=e;if(!t.length)return[];const r=t.flatMap((e=>e.children.map((e=>e.value)))),l=r.length===n.length,o=!l&&!!n.length;return[{...a.head,label:"Select all",value:r,selected:l,indeterminate:o},...t.flatMap((e=>{let{id:t,children:r,count:l}=e;const o=n.length&&!r.some((e=>!n.includes(e.value))),i=!o&&n.length&&r.some((e=>n.includes(e.value)));return r.reduce(((e,t)=>{const r=n.includes(t.value);return[...e,{...a.row,...t,selected:r}]}),[{...a.head,label:t,value:r.map((e=>e.value)),selected:o,indeterminate:i,count:l}])}))]})({options:m,selected:g,itemsProps:s})),[g,p,m]);return m.length?r.createElement(Ve.A,(0,a.A)({title:"Host labels",testIdPrefix:o,baseKey:t,extraKey:n,param:l},d),r.createElement(Oe.V$,(0,a.A)({"data-testid":"".concat(o,'-filter-selections"'),searchMargin:[0,0,1],items:f,onItemClick:h,value:g,"data-value":g.join(",")||"all-selected",hasSearch:m.length>3,itemProps:{testIdPrefix:o,...c}},u))):null};var $e=n(94177);const Je=(0,r.memo)((e=>{let{flavour:t,groupProps:n,...l}=e;const i=(0,Se.ID)();return r.createElement(o.Flex,(0,a.A)({column:!0,overflow:{vertical:"auto"},padding:[0,2,30]},l),"alerts"===t&&r.createElement(r.Fragment,null,r.createElement(He,{title:"Alert status",testIdPrefix:"alert-status",baseKey:i,extraKey:t,param:"alertStatuses",paramKey:"status",groupProps:n}),r.createElement(He,{title:"Alert name",testIdPrefix:"alert-name",baseKey:i,extraKey:t,param:"alertNames",paramKey:"name",groupProps:n}),r.createElement(He,{title:"Alert context",testIdPrefix:"alert-context",baseKey:i,extraKey:t,param:"alertContexts",paramKey:"context",groupProps:n}),r.createElement(He,{title:"Alert class",testIdPrefix:"alert-class",baseKey:i,extraKey:t,param:"alertClasses",paramKey:"class",groupProps:n}),r.createElement(Re,{testIdPrefix:"alert-type-component",baseKey:i,extraKey:t,param:"alertTypeComponents",groupProps:n}),r.createElement(He,{title:"Alert role",testIdPrefix:"alert-role",baseKey:i,extraKey:t,param:"alertRoles",paramKey:"recipient",groupProps:n})),"singleNode"!==t&&"anomalies"!==t&&r.createElement(r.Fragment,null,r.createElement(Ye,{testIdPrefix:"node-labels",baseKey:i,extraKey:t,param:"nodeLabels",groupProps:n}),r.createElement(Ze,{title:"Node status",testIdPrefix:"node-status",baseKey:i,extraKey:t,param:"nodeStatuses",paramKey:"nodeStatus",groupProps:n}),r.createElement(Ze,{title:"Netdata version",testIdPrefix:"node-version",baseKey:i,extraKey:t,param:"nodeVersions",paramKey:"version",groupProps:n})),!!t&&"singleNode"!==t&&r.createElement($e.A,{testIdPrefix:"node-ids",baseKey:i,extraKey:t,groupProps:{defaultIsOpen:"anomalies"===t,...n}}))})),Xe={color:{active:"success",notActive:"textLite"},minWidth:"auto"},et={chartIndexing:{...Xe,iconName:"charts_view",Content:Be,dataTestId:"nodeInfo-chartIndexing",label:"Charts"},filters:{...Xe,iconName:"filterList",Content:Je,dataTestId:"nodeInfo-filters",label:"Filters"},alerts:{...Xe,iconName:"alarm_bell",Content:k.A,dataTestId:"nodeInfo-alertsTab",label:"Alerts"},info:{...Xe,iconName:"information",dataTestId:"nodeInfo-alertsTab",Content:C,label:"Info"}},tt=[],nt=(0,l.default)(o.Tabs).withConfig({displayName:"sidebar__StyledTabs",componentId:"sc-1d5ify6-0"})(["overflow:hidden;height:100%;"]),at=e=>{let{flavour:t,children:n}=e;return r.createElement(r.Fragment,null,n)},rt=e=>{let{isActive:t,color:n,name:a,label:l}=e;return r.createElement(A.A,{plain:!0,content:l},r.createElement(o.Icon,{name:a,color:t?n.active:n.notActive,height:"18px",width:"18px"}))},lt=e=>{let{tabs:t=et,hiddenTabs:n=tt,includedTabs:l=tt,title:m,help:g,nodeIds:p=tt,onClose:h,baseWidth:f,...y}=e;const[v,x]=(0,i.r$)("sidebarTab",{defaultValue:"",flavour:"val",extraKey:y.flavour}),b=(0,r.useMemo)((()=>Object.keys(t).filter((e=>(!l.length||l.includes(e))&&!n.includes(e)))),[n]),I=(0,r.useMemo)((()=>{const e=b.findIndex((e=>e===v));return-1===e?0:e}),[v]),[E,C]=(0,i.r$)("sidebarOpen",{defaultValue:!0,flavour:"bool"}),A=(0,r.useCallback)((e=>{x(b[e]),C(!0)}),[b]);return r.createElement(s.Ay,{collapsedComponent:y.flavour&&r.createElement(c.A,{onClickTab:A,availableTabs:t,tabsToShow:b}),isOpen:E,header:y.flavour?r.createElement(d.A,{isOpen:E,onToggle:()=>C(!E),title:m}):r.createElement(u.A,{title:m,help:g,onClick:h}),width:f},r.createElement(o.Flex,{"data-testid":"sidebar-tabs",column:!0,overflow:"hidden",height:"100%"},r.createElement(nt,{selected:I,onChange:A,noDefaultBorder:!0,TabsHeader:(0,r.useMemo)((()=>e=>r.createElement(at,(0,a.A)({flavour:y.flavour},e))),[y.flavour])},b.map(((e,n)=>{const{iconName:l,Content:i,dataTestId:s,color:c,label:d,...u}=t[e];return r.createElement(o.Tab,(0,a.A)({style:{display:1===b.length?"none":"inherit"},"data-testid":s,maxWidth:"100%",key:l,small:!0,label:r.createElement(rt,{isActive:v===n,name:l,color:c,label:d})},u),r.createElement(o.Flex,{column:!0,overflow:"hidden",height:"100%",position:"relative",gap:3,padding:[1,0]},r.createElement(i,(0,a.A)({nodeIds:p},y))))})))))}},55483:(e,t,n)=>{n.d(t,{A:()=>c});var a=n(96540),r=n(83488),l=n.n(r),o=n(2404),i=n.n(o),s=n(86856);const c=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:l();const n=(0,a.useRef)();return(0,s.i7)(e,(e=>{const a=t(e);return i()(n.current,a)||(n.current=a),n.current}))}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/934.24d6fdc5f60aa6493962.chunk.js b/src/web/gui/v2/934.24d6fdc5f60aa6493962.chunk.js deleted file mode 100644 index ae22a4b8e..000000000 --- a/src/web/gui/v2/934.24d6fdc5f60aa6493962.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="f54005cd-254a-4d44-abb8-62e22bbc3514",e._sentryDebugIdIdentifier="sentry-dbid-f54005cd-254a-4d44-abb8-62e22bbc3514")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[934],{83863:(e,t,n)=>{n.d(t,{Bo:()=>b,Hv:()=>l,Lf:()=>g,Lm:()=>D,NK:()=>C,OK:()=>_,Pm:()=>A,R8:()=>f,Ro:()=>y,WB:()=>c,Ws:()=>N,XG:()=>x,ZE:()=>v,j8:()=>r,jV:()=>i,uZ:()=>h});n(62953);const a={threshold:{label:"Threshold",value:"threshold"},variance:{label:"Variance",value:"variance"},anomalies:{label:"Anomalies",value:"anomalies"}},l=Object.values(a),o=a.threshold.value,i={thresholds:{above:{label:"above",value:"above",symbol:">"},below:{label:"below",value:"below",symbol:"<"}}},r=Object.values(i.thresholds),s=i.thresholds.above,u={seconds:{label:"seconds",value:"seconds",symbol:"s"},minutes:{label:"minutes",value:"minutes",symbol:"m"},hours:{label:"hours",value:"hours",symbol:"h"},days:{label:"days",value:"days",symbol:"d"}},c=Object.values(u),d={"5_min":{label:"5 mins",value:"5_min",after:"-5m"},"10_min":{label:"10 mins",value:"10_min",after:"-10m"},"15_min":{label:"15 mins",value:"15_min",after:"-15m"},"30_min":{label:"30 mins",value:"30_min",after:"-30m"},"1_hour":{label:"1 hour",value:"1_hour",after:"-1h"},"2_hours":{label:"2 hours",value:"2_hours",after:"-2h"},"6_hours":{label:"6 hours",value:"6_hours",after:"-6h"},"12_hours":{label:"12 hours",value:"12_hours",after:"-12h"},"1_day":{label:"1 day",value:"1_day",after:"-1d"},"2_days":{label:"2 days",value:"2_days",after:"-2d"},"7_days":{label:"7 days",value:"7_days",after:"-7d"}},v=Object.values(d),m=d["5_min"],p={linux:{label:"Linux",value:"linux"},freeBSD:{label:"FreeBSD",value:"freeBSD"},windows:{label:"Windows",value:"windows"}},g={label:"".concat(Object.keys(p).length," OS"),value:""},f=[g,...Object.values(p)],h={avg:{label:"AVG()",value:"average"},sum:{label:"SUM()",value:"sum"},min:{label:"MIN()",value:"min"},max:{label:"MAX()",value:"max"},stddev:{label:"STDDEV()",value:"stddev"},cv:{label:"CV()",value:"cv"}},b=Object.values(h),y=h.stddev,C=h.avg,w={sum:{label:"SUM",value:"of"},each:{label:"EACH",value:"foreach"}},D=Object.values(w),O=w.sum,A={unaligned:{label:"unaligned",value:"unaligned"},percentage:{label:"percentage",value:"percentage"},absolute:{label:"absolute",value:"absolute"},min2max:{label:"min2max",value:"min2max"}},N=Object.values(A),_={label:"&this",value:"$this"},x={alertId:null,detection:{method:o},metrics:{on:"",hosts:{items:[],expression:""},os:[],charts:{items:[],expression:""},chartLabels:{items:[],expression:""},lookup:{aggregation:C,denoter:O,dimensions:[],timePeriod:m,options:[]},calc:{active:!1,value:""},units:""},alertingConditions:{denoter:s,critical:{isFormula:!1,denoter:s,value:85,recovery:null},warning:{isFormula:!1,denoter:s,value:75,recovery:null},interval:{value:60,unit:u.seconds},delayNotification:{active:!1,up:{value:0,unit:u.seconds},down:{value:0,unit:u.seconds},multiplier:1,max:{value:0,unit:u.hours}},agentOptions:{active:!1,off:!0,warningAlertsDuration:{value:15,unit:u.minutes},criticalAlertsDuration:{value:5,unit:u.minutes},sendTo:{value:"",clearNotifications:!0},execScript:{active:!1,path:""}}},description:{summary:"",templateName:"",templateDescription:""}}},60934:(e,t,n)=>{n.r(t),n.d(t,{default:()=>f});n(62953);var a=n(96540),l=n(39225),o=n(8711),i=n(83199),r=n(29217),s=n(21875),u=n(64118),c=n(59303),d=n(28738),v=n(93155);const m=(0,l.A)((()=>Promise.all([n.e(7144),n.e(6331)]).then(n.bind(n,33590))),"Content"),p=(0,o.default)(i.ModalContent).attrs({width:{base:"90vw"},height:{base:"90vh",min:"90vh",max:"90vh"},padding:[0,0,4,0]}).withConfig({displayName:"configurationManagement__ModalContent",componentId:"sc-56l9v5-0"})(["box-shadow:0 18px 28px rgba(0,0,0,0.5);"]),g=()=>a.createElement(d.A,{title:"Loading configuration manager"}),f=()=>{const[e,t]=(0,s.v7)(),[,n]=(0,s.$h)(),l=(0,s.Vj)(),[o,d]=(0,a.useState)(!1);(0,u.yk)(null===e||void 0===e?void 0:e.alertId);const f=(0,u.JL)(null===e||void 0===e?void 0:e.alertId);(0,a.useEffect)((()=>{e&&!e.alertId&&d(!0)}),[e]),(0,a.useEffect)((()=>{if(null!==f&&void 0!==f&&f.fullyLoaded){const e=(0,c.we)(f);n(e),d(!0)}}),[f]);const h=(0,a.useCallback)((()=>{t(null),l(),d(!1)}),[t,l,d]);return v.Dm&&e?a.createElement(i.Modal,{backdropProps:{backdropBlur:!0}},a.createElement(p,null,a.createElement(i.ModalHeader,{justifyContent:"between",padding:[4]},a.createElement(i.Flex,{gap:2,alignItems:"center"},a.createElement(i.Icon,{name:"alarm_bell",color:"text",size:"small"}),a.createElement(i.H4,null,"Generate Alert Configuration")),a.createElement(i.Flex,{gap:2,alignItems:"baseline"},a.createElement(r.A,{align:"bottom",content:"Close window",zIndex:7050},a.createElement(i.Flex,null,a.createElement(i.ModalCloseButton,{onClose:h,testId:"assistant-modal-close-button"}))))),a.createElement(i.ModalBody,{height:"100%",padding:[0,4],overflow:"hidden"},o?a.createElement(a.Suspense,{fallback:a.createElement(g,null)},a.createElement(m,null)):a.createElement(g,null)))):null}},21875:(e,t,n)=>{n.d(t,{v7:()=>c,vX:()=>u,$h:()=>v,op:()=>h,I8:()=>b,aR:()=>p,BO:()=>f,Vj:()=>m});n(17333),n(98992),n(54520),n(62953);var a=n(96540),l=n(47444),o=n(83863);const i=(0,l.eU)({key:"alertConfiguration",default:o.XG}),r=(0,l.eU)({key:"alertConfigurationChart",default:null});var s=n(59303);const u=()=>(0,l.vc)(r),c=()=>(0,l.L4)(r),d=(0,l.gD)({key:"alertConfigurationSelector",get:e=>{let{get:t}=e;return t(i)},set:(e,t)=>{let{set:n}=e;n(i,(e=>({...e,...t})))}}),v=()=>(0,l.L4)(d),m=()=>(0,l.E0)(i),p=()=>{const[e,t]=v(),{detection:{method:n},metrics:a}=e;return{detectionMethod:n,setDetectionMethod:e=>t({detection:{method:e},metrics:{...a,lookup:{...a.lookup,aggregation:"variance"==e?o.Ro:o.NK}}})}},g=e=>{let{scope:t,setConfiguration:n,metrics:a,isComposite:l}=e;return e=>{const o=null!==e&&void 0!==e&&e.length?e.filter((e=>{let{value:t}=e;return t})):e;n({metrics:{...a,[t]:l||Array.isArray(o)?o:o.value?[o]:[]}})}},f=()=>{const[e,t]=v(),{metrics:n}=e,a=g({scope:"hosts",setConfiguration:t,metrics:n,isComposite:!0}),l=g({scope:"charts",setConfiguration:t,metrics:n,isComposite:!0}),o=g({scope:"chartLabels",setConfiguration:t,metrics:n,isComposite:!0}),i=g({scope:"os",setConfiguration:t,metrics:n});return{metrics:n,setLookup:e=>{let{aggregation:a,denoter:l,dimensions:o,timePeriod:i,options:r}=e;t({metrics:{...n,lookup:{...n.lookup,...a?{aggregation:a}:{},...l?{denoter:l}:{},...o?{dimensions:o}:{},...i?{timePeriod:i}:{},...r?{options:r}:{}}}})},setOn:e=>t({metrics:{...n,on:e}}),setHosts:a,setCharts:l,setChartLabels:o,setOs:i,setCalcActive:e=>t({metrics:{...n,calc:{...n.calc,active:e,...e?{}:{value:""}}}}),setCalcValue:e=>t({metrics:{...n,calc:{...n.calc,value:e}}}),setUnitsValue:e=>t({metrics:{...n,units:e}})}},h=()=>{const[e,t]=v(),{alertId:n,alertingConditions:a}=e,l=!!n,o=e=>n=>t({alertingConditions:{...a,interval:{...a.interval,[e]:n}}}),i=o("value"),r=o("unit"),s=e=>n=>t({alertingConditions:{...a,[e]:{...a[e],active:n}}}),u=s("delayNotification"),c=e=>n=>l=>o=>t({alertingConditions:{...a,[e]:{...a[e],[n]:{...a[e][n],[l]:o}}}}),d=c("delayNotification"),m=d("up"),p=m("value"),g=m("unit"),f=d("down"),h=f("value"),b=f("unit"),y=d("max"),C=y("value"),w=y("unit"),D=s("agentOptions"),O=c("agentOptions"),A=O("warningAlertsDuration"),N=A("value"),_=A("unit"),x=O("criticalAlertsDuration"),E=x("value"),I=x("unit"),k=e=>n=>t({alertingConditions:{...a,agentOptions:{...a.agentOptions,sendTo:{...a.agentOptions.sendTo,[e]:n}}}}),j=k("value"),S=k("clearNotifications"),M=e=>n=>t({alertingConditions:{...a,agentOptions:{...a.agentOptions,execScript:{...a.agentOptions.execScript,[e]:n}}}}),L=M("active"),V=M("path");return{isEdit:l,alertingConditions:a,setMainDenoter:e=>t({alertingConditions:{...a,denoter:e,critical:{...a.critical,denoter:e},warning:{...a.warning,denoter:e}}}),setIntervalValue:i,setIntervalUnit:r,setThreshold:(e,n)=>l=>t({alertingConditions:{...a,[e]:{...a[e],[n]:l}}}),setRecoveryThreshold:(e,n)=>l=>t({alertingConditions:{...a,[e]:{...a[e],recovery:{...a[e].recovery,[n]:l}}}}),setDelayNotificationActive:u,setDelayNotificationUpValue:p,setDelayNotificationUpUnit:g,setDelayNotificationDownValue:h,setDelayNotificationDownUnit:b,setDelayNotificationMaxDelayValue:C,setDelayNotificationMaxDelayUnit:w,setDelayNotificationMultiplier:e=>t({alertingConditions:{...a,delayNotification:{...a.delayNotification,multiplier:e}}}),setAgentOptionsActive:D,setAgentOptionsRepeatNotification:e=>t({alertingConditions:{...a,agentOptions:{...a.agentOptions,off:e}}}),setAgentOptionsWarningAlertsDurationValue:N,setAgentOptionsWarningAlertsDurationUnit:_,setAgentOptionsCriticalAlertsDurationValue:E,setAgentOptionsCriticalAlertsDurationUnit:I,setAgentOptionsSendToValue:j,setAgentOptionsSendToClearNotifications:S,setAgentOptionsExecScriptActive:L,setAgentOptionsExecScriptPath:V}},b=()=>{const[e,t]=v(),{metrics:{on:n},description:{templateName:l,templateDescription:o,summary:i}}=e;return(0,a.useEffect)((()=>{if(n){const{template:a,info:i}=(0,s.q7)({on:n,templateName:l,templateDescription:o});t({description:{...e.description,templateName:a,templateDescription:i}})}}),[n]),{templateName:l,templateDescription:o,summary:i,setTemplateName:n=>t({description:{...e.description,templateName:n}}),setTemplateDescription:n=>t({description:{...e.description,templateDescription:n}}),setSummary:n=>t({description:{...e.description,summary:n}})}}},59303:(e,t,n)=>{n.d(t,{L7:()=>r,m8:()=>o,pU:()=>i,q7:()=>y,sS:()=>A,we:()=>N});n(17333),n(3064),n(41393),n(14905),n(98992),n(54520),n(72577),n(81454),n(8872),n(62953);var a=n(83863),l=n(33829);const o=function(e){let t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];return n=>{let{target:{checked:a}}=n;return e(t?!a:a)}},i=function(e){let t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];return n=>e(t?!n:n)},r=e=>t=>{let{target:{value:n}}=t;return e(n)},s=function(){var e,t;let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{alertId:l,alertingConditions:o={}}=n,{critical:i,warning:r}=o;return!!l?{...null!==i&&void 0!==i&&i.value?{crit:i.value}:{},...null!==r&&void 0!==r&&r.value?{warn:r.value}:{}}:i&&r&&null!==(e=i.denoter)&&void 0!==e&&e.value&&void 0!==i.value&&null!==(t=r.denoter)&&void 0!==t&&t.value&&void 0!==r.value?{crit:c({flavour:"critical",formula:a.OK.value,isFormula:i.isFormula,denoter:i.denoter,value:i.value,recovery:i.recovery}),warn:c({flavour:"warning",formula:a.OK.value,isFormula:r.isFormula,denoter:r.denoter,value:r.value,recovery:r.recovery})}:{}},u={critical:"$status == $CRITICAL",warning:"$status >= $WARNING"},c=e=>{let{flavour:t,formula:n=a.OK.value,isFormula:l,denoter:o,value:i,recovery:r}=e;const s=u[t];return l?i:null!==r&&void 0!==r&&r.value&&s?"$this ".concat(null===o||void 0===o?void 0:o.symbol," ((").concat(s,") ? (").concat(r.value,")) : (").concat(i,")"):"".concat(n," ").concat(null===o||void 0===o?void 0:o.symbol," (").concat(i,")")},d=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{value:t,unit:n}=e;return"".concat(t).concat(null===n||void 0===n?void 0:n.symbol)},v=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"value";return function(){let t=arguments.length>1?arguments[1]:void 0;const n=(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).map((t=>t[e]));return t?n:n.join(" ")}},m=v(),p=v("label"),g=function(){let{aggregation:e,denoter:t,dimensions:n=[],timePeriod:a,options:l=[]}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},o=arguments.length>1?arguments[1]:void 0;const i=l.map((e=>{let{value:t}=e;return t})).join(" "),r=n.map((e=>{let{value:t}=e;return t})).filter(Boolean),s=r.length?"".concat((null===t||void 0===t?void 0:t.value)||"of"," ").concat(r.join(",")):"foreach"==(null===t||void 0===t?void 0:t.value)?"foreach *":null,u="anomalies"==o?"anomaly-rate":"";return{lookup:[null===e||void 0===e?void 0:e.value,null===a||void 0===a?void 0:a.after,i,s,u].filter(Boolean).join(" ")}},f=function(){let{active:e,up:t,down:n,multiplier:a,max:l}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};if(!e||0==parseInt(null===t||void 0===t?void 0:t.value,10)&&0==parseInt(null===n||void 0===n?void 0:n.value,10))return{};const o=[...parseInt(null===t||void 0===t?void 0:t.value,10)>0?["up ".concat(d(t))]:[],...parseInt(null===n||void 0===n?void 0:n.value,10)>0?["down ".concat(d(n))]:[],...parseFloat(a)>0?["multiplier ".concat(a)]:[],...parseInt(null===l||void 0===l?void 0:l.value,10)>0?["max ".concat(d(l))]:[]];return o.length?{delay:o.join(" ")}:{}},h=function(){let{off:e,warningAlertsDuration:t,criticalAlertsDuration:n}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=[...e?["off"]:[],...!e&&null!==t&&void 0!==t&&t.value?["warning ".concat(d(t))]:[],...!e&&null!==n&&void 0!==n&&n.value?["critical ".concat(d(n))]:[]];return a.length?{repeat:a.join(" ")}:{}},b=function(){let{active:e,sendTo:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{value:n,clearNotifications:a}=t||{};return{...n?{to:n}:{},...e&&!a?{options:"no-clear-notification"}:{}}},y=e=>{let{templateName:t,templateDescription:n,on:a}=e;const o=(0,l.A)(),i=a?"".concat(a.split(".").join("_"),"_").concat(o):o;return{template:t||i,info:n||"Alert description"}},C=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:p;return function(){var n;let{items:a=[],expression:l=""}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const o=null===(n=[t(a),l].join(" "))||void 0===n?void 0:n.trim();return o?{[e]:o}:{}}},w=C("hosts"),D=C("charts",(function(){return p(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],!0).reduce(((e,t)=>{var n;const a=(null===(n=/\.(.*?)@/.exec(t))||void 0===n?void 0:n[1])||t;return e.includes(a)||e.push(a),e}),[]).join(" ")})),O=C("chart labels",(e=>m(e,!0).map((e=>e.split(":").join("="))).join(" "))),A=function(){const e=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{detection:t,metrics:n={},description:a={},alertingConditions:l={}}=e,{on:o,hosts:i,os:r,charts:u,chartLabels:c,lookup:v,calc:p,units:y}=n,{templateName:C,templateDescription:A,summary:N}=a,{agentOptions:_={},interval:x={},delayNotification:E={}}=l,{execScript:I={},sendTo:k={}}=_;return{...C?{template:C}:{},...o?{on:o}:{},...w(i),...null!==r&&void 0!==r&&r.length?{os:m(r)}:{},...D(u),...O(c),...g(v,null===t||void 0===t?void 0:t.method),...null!==p&&void 0!==p&&p.value?{calc:p.value}:{},...y&&null!==p&&void 0!==p&&p.value?{units:y}:{},...x.value?{every:d(x)}:{},...s(e),..._.active&&I.active&&I.path?{exec:I.path}:{},...A?{info:A}:{},...N?{summary:N}:{},...f(E),..._.active?h(_):{},...b(_)}}(arguments.length>0&&void 0!==arguments[0]?arguments[0]:{});return Object.entries(e).reduce(((e,t)=>{let[n,a]=t;return[...e,"".concat(n,": ").concat(a)]}),[]).join("\n")},N=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{id:t,context:n="",calculation:l="",displayName:o="",configInfo:i="",critical:r="",warning:s="",lookupMethod:u="",lookupOptions:c=[],lookupAfter:d,lookupBefore:v,lookupDimensions:m="",summary:p}=e;return{...a.XG,...t?{alertId:t}:{},metrics:{...a.XG.metrics,on:n,lookup:{...a.XG.metrics.lookup,aggregation:Object.values(a.uZ).find((e=>{let{value:t}=e;return t==u})),dimensions:m.split(" ").map((e=>({label:e,value:e}))),options:Object.values(a.Pm).filter((e=>{let{value:t}=e;return c.includes(t)}))}},alertingConditions:{...a.XG.alertingConditions,critical:{value:r},warning:{value:s}},description:{summary:p,templateName:o,templateDescription:i}}}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/9400.6250bbf86c4fd3173de2.chunk.js b/src/web/gui/v2/9400.6250bbf86c4fd3173de2.chunk.js deleted file mode 100644 index 4b7c9ff18..000000000 --- a/src/web/gui/v2/9400.6250bbf86c4fd3173de2.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="2cdc8de5-2f78-4f07-abfe-b7e633a5a36c",e._sentryDebugIdIdentifier="sentry-dbid-2cdc8de5-2f78-4f07-abfe-b7e633a5a36c")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9400],{79400:(e,t,l)=>{l.r(t),l.d(t,{default:()=>se});l(41393),l(81454);var n=l(96540),a=l(83199),o=l(4659),s=l(63450),r=l(58168),i=(l(62953),l(63950)),c=l.n(i),d=l(29217),u=l(78940);const m=e=>{let{scope:t,setScope:l=c(),isDisabled:o,...s}=e;return n.createElement(a.Flex,(0,r.A)({gap:3},s),Object.entries(u.Wu).map((e=>{let[s,{text:r,tooltip:i}]=e;return n.createElement(a.RadioButton,{key:s,checked:s===t,onChange:()=>l(s),disabled:o},n.createElement(d.A,{content:i,align:"bottom"},n.createElement(a.TextSmall,{color:"textDescription"},r)))})))};var p=l(54518),b=l(87659);const g=e=>{let{selectedKey:t,selectedValue:l,onAddHostLabel:o=c(),onRemoveHostLabel:s=c(),isDefault:r=!1,showPlaceholder:i=c(),isDisabled:u}=e;const[m,b]=(0,n.useState)(t||""),[g,h]=(0,n.useState)(l||""),E=()=>{m&&g&&(o({[m]:g}),i())};return n.createElement(a.Flex,{gap:2},n.createElement(p.A,{component:"input",onChange:b,onBlur:E,placeholder:"Host key",value:m,disabled:u||!r}),n.createElement(p.A,{component:"input",onChange:h,onBlur:E,placeholder:"Host value",value:g,disabled:u||!r||!m}),r?n.createElement(d.A,{content:"Save label pair",align:"bottom"},n.createElement(a.Button,{flavour:"borderless",disabled:!m||!g},n.createElement(a.Icon,{name:"check",color:"primary",size:"small"}))):n.createElement(d.A,{content:"Remove label pair",align:"bottom"},n.createElement(a.Button,{flavour:"borderless",onClick:()=>s(t)},n.createElement(a.Icon,{name:"x",color:"primary",size:"small"}))))},h=e=>{let{hostLabels:t,onAddHostLabel:l,onRemoveHostLabel:a}=e;return Object.entries(t).map((e=>{let[t,o]=e;return n.createElement(g,{key:t,onAddHostLabel:l,selectedKey:t,selectedValue:o,onRemoveHostLabel:a})}))},E=e=>{let{hostLabels:t,onAddHostLabel:l=c(),onRemoveHostLabel:o=c(),isEdit:s,isDisabled:r}=e;const i=!!Object.entries(t||{}).length,[d,,u]=(0,b.A)(!1);return n.createElement(a.Flex,{column:!0,gap:1},n.createElement(a.TextSmall,{color:"textLite"},"Host labels"),i&&n.createElement(h,{hostLabels:t,onAddHostLabel:l,onRemoveHostLabel:o}),(!i||d)&&n.createElement(g,{key:JSON.stringify(t),onAddHostLabel:l,isDefault:!0,showPlaceholder:u,isDisabled:r}),s&&!d&&i&&n.createElement(a.Flex,{justifyContent:"end"},n.createElement(a.Button,{flavour:"borderless",onClick:u},"Add host label")))};var v=l(99851);function f(){const e=new Date,t=e.getFullYear(),l=e.getMonth(),n=e.getDate(),a=new Date(0);return a.setFullYear(t,l,n+1),a.setHours(0,0,0,0),a}var C=l(51730),A=l(82526),x=l(27467);const S=Object.entries(u.SB).map((e=>{let[t,l]=e;return{label:l,value:t}})),D=f(),y=(0,C.o)((0,A.W)(D,{years:1})),k=e=>{let{duration:t,setDuration:l,setDate:o,endDate:s,isDisabled:r}=e;const i=(0,x.rW)("offset");return n.createElement(a.Flex,{gap:2},n.createElement(p.A,{component:"select",title:"Duration",onChange:l,options:S,placeholder:"Select duration",value:t,isDisabled:r}),"custom"===t.value&&n.createElement(a.Flex,{alignSelf:"end"},n.createElement(v.A,{isSinglePicker:!0,values:{singleDate:s},minDate:D,maxDate:y,utc:i,onChange:o,isPlaying:!1,accessorProps:u.Iv,padding:[4,0],width:"auto",accessorTooltipContent:"Select end date"})))};function I(){return(0,C.o)(Date.now())}const L=I(),O=f(),R=e=>{let{start:t,end:l,onChange:o,isDisabled:s,isEdit:r}=e;const[i,c]=(0,n.useState)(r?"schedule":u.SX),[m,p]=(0,n.useState)(u.DK),[b,g]=(0,n.useState)(0),h=(0,x.rW)("offset"),[E,S]=(0,n.useState)(t?new Date(t):r?null:L),[D,y]=(0,n.useState)(l?new Date(l):r?null:O),[R,w]=(0,n.useState)((0,C.o)((0,A.W)(E,{days:1})));(0,n.useEffect)((()=>{const e=(0,A.W)(E,{days:1});w(e),!r&&E>=D&&y(e)}),[E]),(0,n.useEffect)((()=>{o({start:E,end:D,scheduleOption:i,duration:m})}),[E,D,i,m]);const H=f(),N=(0,C.o)((0,A.W)(H,{years:1}));return n.createElement(a.Flex,{column:!0,gap:3},!r&&n.createElement(a.Flex,{gap:3},Object.entries(u.Yo).map((e=>{let[t,{text:l,tooltip:o}]=e;return n.createElement(a.RadioButton,{key:t,checked:t===i,onChange:()=>c(t),disabled:s},n.createElement(d.A,{content:o,align:"bottom"},n.createElement(a.TextSmall,{color:"textDescription"},l)))}))),"now"===i?n.createElement(k,{duration:m,setDuration:p,setDate:e=>{S(Date.now()),y(e)},endDate:D,isDisabled:s}):n.createElement(a.Flex,{gap:2},n.createElement(a.Flex,{column:!0,gap:2,flex:{grow:1,shrink:1},basis:0},n.createElement(a.TextSmall,null,"Start date"),n.createElement(v.A,{isSinglePicker:!0,values:{singleDate:new Date(E)},minDate:I(),maxDate:N,utc:h,onChange:e=>{S(e),g((e=>e+1))},isPlaying:!1,accessorProps:u.Iv,padding:[4,0],width:"auto",accessorTooltipContent:"Select start date"})),n.createElement(a.Flex,{column:!0,gap:2,flex:{grow:1,shrink:1},basis:0},n.createElement(a.TextSmall,null,"End date"),n.createElement(v.A,{key:b,isSinglePicker:!0,values:{singleDate:D?new Date(D):D},minDate:R,maxDate:N,utc:h,onChange:y,isPlaying:!1,accessorProps:u.Iv,padding:[4,0],width:"auto",accessorTooltipContent:"Select end date"}))))},w=e=>{let{content:t="Loading alerts..."}=e;return n.createElement(a.Flex,{height:45,alignItems:"center",justifyContent:"center"},n.createElement(a.Text,null,t))};var H=l(51719),N=(l(9391),l(17333),l(3064),l(98992),l(54520),l(72577),l(47767)),P=l(38819),F=l(22292),_=l(46741),T=l(3914),B=l(67990),M=l(71856),j=l(88982),U=l(29848),W=l(36021),z=l(47444),K=l(45860);const V=(0,z.K0)({key:"spaceAlertMetas",get:e=>()=>(0,K.z5)(e)});var J=l(64118),Y=l(10368),G=l(79394),Z=l(71835);const q={name:"",rooms:[],nodes:[],hostLabels:null,alertNames:[],alertContexts:[],alertRoles:[],startsAt:null,lastsUntil:null},X=e=>{let{id:t,name:l,...n}=e;return{label:l,value:t,...n}},$=e=>({label:e,value:e}),Q=e=>{var t;let{rooms:l,nodes:n,hostLabels:a,startsAt:o,lastsUntil:s,...r}=e;return{...r,scope:r.accountId?"personal":u._V,canSubmit:!(null===r||void 0===r||null===(t=r.name)||void 0===t||!t.length),rooms:null!==l&&void 0!==l&&l.length?l.map(X):[M.PT],...null!==n&&void 0!==n&&n.length?{nodes:n.map(X).filter((e=>{let{notAvailable:t}=e;return!t}))}:{},...a?{hostLabels:a}:{},...o?{startsAt:o}:{},...s?{lastsUntil:s}:{}}},ee=e=>{let{value:t}=e;return t},te=(e,t)=>{const{scope:l,rooms:n,nodes:a,hostLabels:o,startsAt:s,lastsUntil:r,scheduleOption:i,duration:c,...d}=e,m=n.filter((e=>{let{value:t}=e;return t!==u.jH.value})).map(ee),p=a.map(ee),b=!!Object.keys(o||{}).length,{start:g,end:h}=(e=>{let{startsAt:t,lastsUntil:l,scheduleOption:n,duration:a}=e;if("schedule"==n)return{start:t,end:l};let o,s=Date.now();switch(a){case"oneHour":o=(0,A.W)(s,{hours:1});break;case"sixHours":o=(0,A.W)(s,{hours:6});break;case"twelveHours":o=(0,A.W)(s,{hours:12});break;case"oneDay":o=(0,A.W)(s,{days:1});break;case"custom":o=l}return{start:s,...o?{end:o}:{}}})({startsAt:s,lastsUntil:r,scheduleOption:i,duration:c});return{...d,..."personal"==l?{account_id:t}:{},...m.length?{room_ids:m}:{},...p.length?{node_ids:p}:{},...b?{host_labels:o}:{},...g?{starts_at:new Date(g).toISOString()}:{},...h?{lasts_until:new Date(h).toISOString()}:{}}},le=[{label:"CRITICAL",value:"CRITICAL"},{label:"WARNING",value:"WARNING"},{label:"CLEAR",value:"CLEAR"}],ne=e=>{var t;let{rule:l,onClose:a,isEdit:o}=e;const s=(0,F.NJ)(),r=(0,N.Zp)(),i=(0,T.bq)(),[c,d]=(0,b.A)(),m=(0,j.A)({all:!0}),p=null===(t=m.find((e=>{let{label:t}=e;return"All nodes"==t})))||void 0===t?void 0:t.value,{loaded:g,value:h,hasError:E}=(()=>{var e;const t=(0,T.vt)(),l=(0,z.xf)(V(t));return{loaded:"loading"!==l.state,value:(null===(e=l.contents)||void 0===e?void 0:e.data)||K.rx,hasError:"hasError"===l.state}})(),v=(0,W._B)(),f=(0,W.FU)(),C=o?f:v,A=(0,U.ly)(),x=(0,W.Lz)(),[,,S]=(0,Z.A)(),[D,y]=(0,n.useState)({alertNameOptions:[],alertContextOptions:[],alertRoleOptions:[]}),{alertNameOptions:k,alertContextOptions:I,alertRoleOptions:L}=D,[O,R]=(0,n.useState)((()=>({...q,...Q(l)}))),[w,H]=(0,n.useState)([]),{loaded:M,value:X}=(0,J.I8)({alertNames:O.alertNames,contexts:O.alertContexts,allowEmptyName:!0});(0,n.useEffect)((()=>{const e=(X||[]).filter((e=>!((O.alertNames||[]).length&&!O.alertNames.includes(e.name))&&(!((O.alertContexts||[]).length&&!O.alertContexts.includes(e.context))&&!((O.nodeIds||[]).length&&!O.nodeIds.includes(e.nodeId))))).map((e=>{let{instance:t,instanceName:l}=e;return{value:t,label:l}}));H(e)}),[M,X,O.alertNames,O.alertContexts,O.nodeIds]);const ee=(0,B.vv)(O.roomIds||[p]),ne=(0,n.useCallback)((e=>R((t=>({...t,scope:e})))),[]),ae=(0,n.useCallback)((e=>R((t=>({...t,name:e,canSubmit:!(null===e||void 0===e||!e.length)})))),[]),oe=(0,n.useCallback)((function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];const l=(e=>e.length<=1?e:e[e.length-1].value==u.jH.value?[u.jH]:e.filter((e=>{let{value:t}=e;return!!t})))(e),n=l.map((e=>{let{value:t}=e;return t||p})),a=t.length{let{value:l}=e;return!t.includes(l)}))[0]:null;R((e=>({...e,rooms:l,roomIds:n,lastRoomAdded:a||null})))}),[p]),se=(0,n.useCallback)((e=>R((t=>({...t,nodeIds:e.map((e=>{let{value:t}=e;return t})),nodes:e})))),[]),re=(0,n.useCallback)((e=>R((t=>({...t,hostLabels:{...t.hostLabels,...e}})))),[]),ie=(0,n.useCallback)((e=>R((t=>{const l={...t.hostLabels};return delete l[e],{...t,hostLabels:l}}))),[]),ce=(0,n.useCallback)((e=>t=>R((l=>({...l,[e]:t.map((e=>{let{value:t}=e;return t}))})))),[]),de=(0,n.useCallback)((e=>{let{start:t,end:l,scheduleOption:n,duration:a}=e;return R((e=>({...e,startsAt:t,lastsUntil:l,scheduleOption:n,duration:a.value})))}),[]),ue=(0,n.useCallback)((()=>{d(),C(te(O,s)).then((()=>{a(),x(),A()})).catch((e=>S((0,Y.H)(e)))).finally(d)}),[O,s]),me=(0,G.A)();(0,n.useEffect)((()=>{var e;const t=(null===(e=O.lastRoomAdded)||void 0===e?void 0:e.value)||p;let l=null;return t&&(l=me(t)),()=>{var e;return!(null===(e=l)||void 0===e||!e.cancel)&&l.cancel()}}),[O.roomIds,me]),(0,n.useEffect)((()=>{if(g){const{contexts:e,names:t,roles:l}=h||{};y({alertNameOptions:t?t.map($):[],alertContextOptions:e?e.map($):[],alertRoleOptions:l?l.map($):[]})}}),[g]),(0,n.useEffect)((()=>{const e=(0,P.PP)(),t=(()=>{const{silencingRulePrefill:e}=(0,P.PP)();if(!e)return{};let t={};try{t=JSON.parse(e)}catch(l){}return t})(),{alertName:l,nodeId:n,context:a,instance:o,roomId:s}=t;if(s){const e=m.filter((e=>{let{value:t}=e;return t==s}));e&&oe(e)}if(l&&R((e=>({...e,alertNames:[l]}))),a&&R((e=>({...e,alertContexts:[a]}))),o&&R((e=>({...e,alertInstances:[o]}))),n){const e=ee.filter((e=>{let{value:t}=e;return t==n}));e&&se(e)}(0,P.Z8)({...e,silencingRulePrefill:""})}),[R,ce,se,ee]);const pe=(0,_.JT)("space:CreatePersonalSilencingRule"),be=(0,n.useCallback)((()=>r("/spaces/".concat(i,"/settings/billing"))),[i]);return{state:O,onScopeChange:ne,onRuleNameChange:ae,onRoomsSelectionChange:oe,roomOptions:m,onNodesSelectionChange:se,nodesOptions:ee,alertMetasLoaded:g,alertMetas:h,alertNameOptions:k,alertContextOptions:I,alertInstancesEnabled:!0,alertInstancesOptions:w,alertRoleOptions:L,alertStatusOptions:le,alertMetasError:E,onAlertValueChange:ce,onAddHostLabel:re,onRemoveHostLabel:ie,onDatesChange:de,loading:c,onSave:ue,spaceCreatePersonalSilencingRule:pe,onUpdateButtonClick:be}},ae=e=>null!==e&&void 0!==e&&e.length?e.map($):[],oe=()=>n.createElement(a.Box,{as:"hr",height:"1px",width:"100%",sx:{borderWidth:"1px 0px 0px 0px",borderColor:"borderSecondary",borderStyle:"solid"}}),se=e=>{let{rule:t={},onClose:l,isEdit:r}=e;const{state:i,onScopeChange:c,onRuleNameChange:u,onRoomsSelectionChange:b,roomOptions:g,onNodesSelectionChange:h,nodesOptions:v,alertMetasLoaded:f,alertNameOptions:C,alertContextOptions:A,alertInstancesEnabled:x,alertInstancesOptions:S,alertRoleOptions:D,alertStatusOptions:y,onAlertValueChange:k,onAddHostLabel:I,onRemoveHostLabel:L,onDatesChange:O,loading:N,onSave:P,spaceCreatePersonalSilencingRule:F,onUpdateButtonClick:_}=ne({rule:t,onClose:l,isEdit:r});return n.createElement(a.Modal,{onEsc:l,backdropProps:{backdropBlur:!0}},n.createElement(s.$m,{style:{maxHeight:"800px"}},n.createElement(a.ModalHeader,{padding:[6,4,3]},n.createElement(a.Flex,{column:!0,gap:2},n.createElement(a.Flex,{flex:!0,alignItems:"center",justifyContent:"between"},n.createElement(a.H4,null,"Add silencing rule"),l&&n.createElement(s.Oj,{onClose:l})),n.createElement(a.Text,null,"Define an alert notification silencing rule that will apply to all users or just you."," ",n.createElement(o.A,{"data-ga":"alert-silencing::click-link-docs::rule-modal","data-testid":"silencing-rules-doc",href:"https://learn.netdata.cloud/docs/alerts-and-notifications/notifications/netdata-cloud-notifications/manage-alert-notification-silencing-rules",rel:"noopener noreferrer",target:"_blank"},"Learn how to configure silencing rules.")),!F&&n.createElement(H.A,null))),n.createElement(a.ModalBody,{padding:[0]},n.createElement(s.fn,null,n.createElement(a.Flex,{column:!0,gap:3},n.createElement(m,{scope:i.scope,setScope:c,padding:[0,0,1,0],isDisabled:!F}),n.createElement(p.A,{component:"input",onChange:u,placeholder:"Add rule name",title:"Rule name*",value:i.name,disabled:!F}),n.createElement(p.A,{component:"select",isMulti:!0,onChange:e=>b(e,i.roomIds),options:g,placeholder:"Select rooms",title:"Rooms*",value:i.rooms,isDisabled:!F}),n.createElement(p.A,{component:"select",isMulti:!0,onChange:h,options:v,placeholder:"Select nodes",title:"Nodes",value:i.nodes,isDisabled:!F||!v.length}),n.createElement(E,{hostLabels:i.hostLabels,onAddHostLabel:I,onRemoveHostLabel:L,isEdit:r,isDisabled:!F}),f?n.createElement(n.Fragment,null,n.createElement(oe,null),n.createElement(p.A,{component:"select",isMulti:!0,onChange:k("alertNames"),options:C,placeholder:"Select alert name",title:"Alert name",value:ae(i.alertNames),isDisabled:!F}),n.createElement(p.A,{component:"select",isMulti:!0,onChange:k("alertContexts"),options:A,placeholder:"Select alert context",title:"Alert context",value:ae(i.alertContexts),isDisabled:!F}),n.createElement(p.A,{component:"select",isMulti:!0,onChange:k("alertInstances"),options:S,placeholder:"Select alert instance",title:"Alert instance",value:ae(i.alertInstances),isDisabled:!F||!x}),n.createElement(p.A,{component:"select",isMulti:!0,onChange:k("severities"),options:y,placeholder:"Select alert status",title:"Alert status",value:ae(i.severities),isDisabled:!F}),n.createElement(p.A,{component:"select",isMulti:!0,onChange:k("alertRoles"),options:D,placeholder:"Select alert role",title:"Alert role",value:ae(i.alertRoles),isDisabled:!F})):n.createElement(w,null),n.createElement(oe,null),n.createElement(R,{start:i.startsAt,end:i.lastsUntil,onChange:O,isEdit:r,isDisabled:!F}))),n.createElement(s.fn,{alignItems:"end",justifyContent:"center"},n.createElement(a.Flex,{gap:4},n.createElement(a.Button,{flavour:"borderless",label:"Cancel",onClick:l,disabled:N}),F?n.createElement(d.A,{content:r?"Update the rule":"Create the rule",align:"bottom"},n.createElement(a.Button,{label:"OK",onClick:P,"data-ga":"alert-silencing::click-save-".concat(r?"edit":"new","-rule::rule-modal"),"data-testid":"saveRule-button",textTransform:"uppercase",isLoading:N,disabled:N||!i.canSubmit})):n.createElement(d.A,{content:"Update to a paid plan in order to be able to create silencing rules",align:"bottom"},n.createElement(a.Button,{label:"Upgrade!",onClick:_})))))))}},51719:(e,t,l)=>{l.d(t,{A:()=>i});var n=l(58168),a=l(96540),o=l(83199),s=l(54856),r=l(3914);const i=e=>{const t=(0,r.dg)();return a.createElement(o.Flex,(0,n.A)({gap:2,alignItems:"center"},e),a.createElement(o.Icon,{size:"small",color:"warning",name:"warning_triangle"}),a.createElement(o.Text,null,"This feature is only available to paid plans"),t?null:a.createElement(s.A,null))}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/9473.4fd4742ffb6b5348bea8.chunk.js b/src/web/gui/v2/9473.4fd4742ffb6b5348bea8.chunk.js deleted file mode 100644 index ec6f0f2ae..000000000 --- a/src/web/gui/v2/9473.4fd4742ffb6b5348bea8.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="d8fef4ac-14dc-4d3b-bf8b-36c3bcc25e90",e._sentryDebugIdIdentifier="sentry-dbid-d8fef4ac-14dc-4d3b-bf8b-36c3bcc25e90")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9473],{36196:(e,t,a)=>{a.d(t,{A:()=>y});var n=a(58168),o=a(96540),l=a(22332),r=a(10534),c=a(89380),s=a(25369),i=a(39360),d=a(95662),u=a(99891),m=a(49096),g=a(74487),p=a(64131);const h=(0,o.forwardRef)(((e,t)=>{let{width:a,height:l,...r}=e;return o.createElement(c.ChartWrapper,{width:a,height:l},o.createElement(p.N1,(0,n.A)({hasHeader:!1,hasFilters:!1,hasFooter:!1,width:a,height:l},r,{ref:t})))})),f=(0,r.default)(h,{tile:!0}),E={dygraph:p.Ay,easypiechart:u.Ay,gauge:i.Ay,number:m.Ay,groupBoxes:s.Ay,d3pie:d.Ay,bars:g.Ay},v=e=>{const t=(0,l.useChart)(),a=(0,l.useAttributeValue)("sparkline"),r=(0,l.useAttributeValue)("chartLibrary"),c=(0,o.useMemo)((()=>t?a?f:E[r]:null),[t,r,a]);return c?o.createElement(c,(0,n.A)({},e,{chart:t})):null},y=(0,l.withChartProvider)((0,o.memo)(v))},85686:(e,t,a)=>{a.d(t,{A:()=>d});var n=a(96540),o=a(8711),l=a(83199),r=a(6586),c=a(12602);const s=(0,o.default)(l.Flex).withConfig({displayName:"restrictedContentMessage__StyledBox",componentId:"sc-wbsw81-0"})(["transform:translate(-50%,-50%);"]),i={node:"This node is locked so you can't see the Single Node dashboard.",alert:"This node is locked so you can't see the full alert details. ",dashboard:"This dashboard is locked so you can't see it."},d=e=>{let{flavour:t="node"}=e;return n.createElement(s,{column:!0,width:"100%",gap:2,alignItems:"center",position:"absolute",top:"50%",left:"50%",padding:[4],round:2},n.createElement(l.TextBigger,null,i[t]||""),n.createElement(c.A,null,n.createElement(l.TextBig,{color:"primary"},"Upgrade for no limitations!")),n.createElement(l.TextBig,null,"or"),n.createElement(r.A,null,n.createElement(l.TextBig,{color:"primary"},"Change your active node selection to unlock it.")))}},40267:(e,t,a)=>{a.d(t,{A:()=>l});var n=a(96540),o=a(83199);const l=e=>{let{flavour:t,icon:a,children:l}=e;return n.createElement(o.Pill,{icon:a,textProps:{textTransform:"capitalize"},flavour:t},l)}},4974:(e,t,a)=>{a.d(t,{A:()=>d});a(62953);var n=a(96540),o=a(83199),l=a(64118),r=(a(25440),a(26655));var c=a(4659),s=a(69765),i=a(51913);const d=e=>{let{alertId:t}=e;const a=(0,l.JL)(t),{name:d="unknown alert"}=a,[u,m]=(e=>{const[t,a]=(0,n.useState)(!0),o="".concat("https://community.netdata.cloud/t","/").concat(null===e||void 0===e?void 0:e.replace(/[._]/g,"-"));return(0,n.useEffect)((()=>{e&&r.A.get(o).then((()=>a(!1))).catch((()=>a(!0)))}),[e]),[o,t]})(d),g=(0,s.ID)(),[,p]=(0,i.bg)(),h=(0,n.useMemo)((()=>({alignSelf:"start",onClick:()=>p({roomId:g,alert:a})})),[u,m,p]);return n.createElement(c.A,h,"Learn more about this alert",!m&&n.createElement(o.Icon,{name:"arrow_left",rotate:2,size:"small",margin:[0,0,-.75,1],color:"success"}))}},43407:(e,t,a)=>{a.d(t,{A:()=>r});var n=a(58168),o=a(96540),l=a(83199);const r=e=>{let{iconName:t,iconSize:a,children:r,...c}=e;return o.createElement(l.Flex,(0,n.A)({gap:2,alignItems:"center"},c),o.createElement(l.Icon,(0,n.A)({name:t,color:"textLite"},a&&{height:a,width:a})),o.createElement(l.Text,{strong:!0},r))}},7660:(e,t,a)=>{a.d(t,{A:()=>u});var n=a(58168),o=a(96540),l=a(83199),r=a(64118),c=a(21290),s=a(45976),i=a(52768);const d=e=>{let{label:t,testid:a,status:n,value:r,when:d,units:u}=e;const{localeTimeString:m,localeDateString:g}=(0,c.$j)(),p=(0,o.useMemo)((()=>{const e=new Date(1e3*d);return e&&"".concat(g(e,{long:!1})," ").concat(m(e,{secs:!0}))}),[d,g,m]),h=(0,i.J4)(r,u);return o.createElement(l.Flex,{gap:2,alignItems:"center"},o.createElement(l.Flex,{width:"100px"},o.createElement(l.TextSmall,null,t)),o.createElement(l.Flex,{gap:2,alignItems:"center"},o.createElement(l.TextSmall,{color:"text","data-testid":"".concat(a,"-dateTime")},p),o.createElement(s.A,{loaded:!0,status:n,valueWithUnit:h,"data-testid":"".concat(a,"-value")})))},u=e=>{let{alertId:t,...a}=e;const{fullyLoaded:c=!1,units:s,lastStatusChangeValue:i,lastStatusChange:u,status:m,lastUpdated:g,value:p,prevStatus:h,prevValue:f,prevDuration:E}=(0,r.JL)(t);return c?o.createElement(l.Flex,(0,n.A)({gap:2,column:!0,"data-testid":"alertValues"},a),g&&o.createElement(d,{label:"Latest",status:m,testid:"alertValues-latest",value:p,when:g,units:s}),u&&o.createElement(d,{label:"CLEAR"===m?"WARNING"===h||"CRITICAL"===h?"Cleared":"Initialized":"Triggered",status:m,testid:"alertValues-triggered",value:i,when:u,units:s})):null}},11164:(e,t,a)=>{a.d(t,{A:()=>E});var n=a(96540),o=a(36196),l=a(63950),r=a.n(l),c=a(83199),s=a(57605),i=a(28738),d=a(3914),u=a(69765),m=a(80925),g=a(47731),p=a(52768);const h=e=>t=>"alert-modal::".concat(e.getAttribute("id"),"::").concat(t),f={width:"108px",height:"77px"},E=e=>{let{instance:t,context:a,isFormattedValueLoaded:l,nodeId:E,status:v,lastStatusChange:y,formattedLastStatusChangeValue:b,lastUpdated:x,spaceId:w,roomId:A,setChartSelected:I=r()}=e;const k=(0,d.vt)();w=w||k;const C=(0,u.ID)();A=A||C;const S=(0,g.J)(),T=(0,m.e)(),N=(0,s.A)();(0,n.useEffect)((()=>(N(),N)),[]);const L=(0,d.dg)(),_=(0,n.useMemo)((()=>{if(!l)return;const e=T.makeChart({attributes:{nodeId:E,contextScope:[a],selectedInstances:t&&E?["".concat(t,"@").concat(E)]:[],id:t,roomId:A,enabledResetRange:!1,overlays:{proceeded:{type:"proceeded"},alarm:{type:"alarm",status:v,value:b,when:y}},host:L?"".concat(window.envSettings.agentApiUrl,"/api/v2"):"".concat(window.envSettings.apiUrl,"/api/v3/spaces/").concat(w,"/rooms/").concat(A),nodesScope:[E],toolboxElements:[],sparkline:S,hasToolbox:!S},makeTrack:h});return T.getRoot().appendChild(e),e}),[t,l]);return(0,p.Vt)({lastUpdated:x,lastStatusChange:y},l),(0,n.useEffect)((()=>(I((e=>({...e,..._}))),()=>{_&&_.destroy(),I(null)})),[_]),n.createElement(c.Flex,{flex:!1,width:"100%",height:75},l&&_?n.createElement(o.A,{"data-chartid":t,chart:_,hasHeader:!S,hasFooter:!S}):n.createElement(i.A,{iconProps:f,title:"Loading chart..."}))}},87815:(e,t,a)=>{a.d(t,{A:()=>i});var n=a(58168),o=(a(25440),a(96540)),l=a(8711),r=a(83199),c=a(52768);const s=l.default.div.withConfig({displayName:"timeCell__TimeBox",componentId:"sc-1k785pi-0"})(["display:flex;flex-direction:column;align-items:start;justify-content:center;"]),i=e=>{let{rawTime:t,secs:a,long:l,nowrap:i=!1,...d}=e;const{timeAgo:u,formattedDate:m=""}=(0,c.lT)({rawTime:t,secs:a,long:l});return o.createElement(s,d,o.createElement(r.TextSmall,null,u),o.createElement(r.TextMicro,(0,n.A)({color:"textLite"},i&&{whiteSpace:"nowrap"}),m?null===m||void 0===m?void 0:m.replace(/ /g," - "):""))}},5871:(e,t,a)=>{a.d(t,{A:()=>D});var n=a(96540),o=a(83199),l=a(64118),r=a(87292),c=a(8711);const s=c.default.div.withConfig({displayName:"styled__StyledAlertTypeIndicator",componentId:"sc-19nk935-0"})(["width:",";height:",";border-radius:",";background-color:",";"],(0,o.getSizeBy)(1),(0,o.getSizeBy)(1),(0,o.getSizeBy)(1),(e=>{let{critical:t,theme:a}=e;return(0,o.getColor)(t?"error":["yellow","sunglow"])({theme:a})})),i=c.default.a.withConfig({displayName:"styled__StyledLink",componentId:"sc-19nk935-1"})(["padding:",";text-decoration:none;color:",";&:visited{color:",";}&:hover{text-decoration:none;}"],(e=>{let{padding:t}=e;return null!==t&&void 0!==t?t:"0"}),(0,o.getColor)("success"),(0,o.getColor)("success"));var d=a(29217);const u=e=>{let{title:t,value:a,testId:l,status:c}=e;return a?n.createElement(n.Fragment,null,n.createElement(o.Flex,{justifyContent:"between"},n.createElement(o.Flex,{gap:1,alignItems:"center"},n.createElement(s,{critical:"critical"===c}),n.createElement(o.H6,{color:"textDescription"},t)),n.createElement(o.Flex,{gap:1,alignItems:"center"},n.createElement(o.TextSmall,{color:"textDescription"},"More details about"," ",n.createElement(o.TextSmall,{strong:!0,color:"textDescription"},"$this")," ","command"),n.createElement(d.A,{content:"The $this value comes from the DB Lookup or the Calculation above",isBasic:!0,plain:!0},n.createElement(o.Flex,{margin:[-.5,0,0]},n.createElement(o.Icon,{name:"informationPress",color:"textLite",width:"16px",height:"16px"}))))),n.createElement(r.Ay,{"data-testid":l},a)):null};var m=a(31826),g=a(93001),p=a(29708);const h=e=>{if("string"===typeof e&&(e=parseInt(e,10)),0===e)return"now";const t=new Date;return e<0?(0,m.k)((0,g.p)(t,e),t,{addSuffix:!0}):(0,m.k)((0,p.W)(t,e),t,{addSuffix:!0})},f=e=>{const{instance:t,lookupAfter:a,lookupBefore:o,lookupDimensions:r,lookupMethod:c,lookupOptions:s}=(0,l.JL)(e);return(0,n.useMemo)((()=>a?(e=>{let{lookupOptions:t,lookupDimensions:a,lookupMethod:n,lookupAfter:o,lookupBefore:l,instance:r}=e,c="of all values";if(a){const e=a.split(","),t=e.length>1?"of the sum of dimensions":"of all values of dimension";c="".concat(t," ").concat(e.join(", "))}const s=t?", with options ".concat(t):"";return"".concat(n," ").concat(c," of chart ").concat(r,", starting ").concat(h(o+l)," and up to ").concat(h(l)).concat(s)})({lookupOptions:s,lookupDimensions:r,lookupMethod:c,lookupAfter:a,lookupBefore:o,instance:t}):""),[a,t])};var E=a(43407);const v=e=>{let{heading:t,value:a,testId:l}=e;return a&&a.length?n.createElement(o.Flex,{column:!0,gap:3},t&&n.createElement(o.H6,{textTransform:"uppercase",color:"textDescription"},t),n.createElement(r.Ay,{"data-testid":l},a)):null},y=(0,n.memo)(v);var b=a(25232),x=a(58168);const w=e=>{let{iconName:t,category:a,iconSize:l="16px",value:r,testid:c}=e;return r?n.createElement(o.Flex,{alignItems:"center",gap:1},n.createElement(o.Flex,{alignItems:"center",gap:1,basis:"100px"},n.createElement(o.Icon,(0,x.A)({name:t,color:"textLite"},l&&{height:l,width:l})),n.createElement(o.TextSmall,{whiteSpace:"nowrap",color:"textDescription"},a,":")),n.createElement(o.TextSmall,{strong:!0,"data-testid":c},r)):null},A=e=>{let{id:t,testid:a}=e;const{calculation:r,warning:c,critical:i,updateEvery:d}=(0,l.JL)(t),m=f(t),g=(0,n.useMemo)((()=>(0,b.Ay)(r)),[r]);return n.createElement(o.Flex,{column:!0,gap:4},n.createElement(E.A,{iconName:"code"},"Configuration"),n.createElement(y,{heading:"DB lookup",value:m,testId:"".concat(a,"-lookup")}),n.createElement(y,{heading:"Calculation",value:g,testId:"".concat(a,"-calculation")}),n.createElement(o.H6,{textTransform:"uppercase",color:"textDescription"},"Severity Conditions"),n.createElement(o.Flex,{column:!0,gap:2},n.createElement(u,{title:"Warning when",value:c,testId:"".concat(a,"-warningWhen"),status:"warning"},n.createElement(s,{critical:!1})),n.createElement(u,{title:"Critical when",value:i,testId:"".concat(a,"-criticalWhen"),status:"critical"},n.createElement(s,{critical:!0}))),n.createElement(o.H6,{textTransform:"uppercase",color:"textDescription"},"Check"),n.createElement(w,{iconName:"clock_hollow",category:"Check every",value:"".concat(d," seconds"),testid:"".concat(a,"-checkEvery")}))},I=(0,n.memo)(A);var k=a(47762),C=a(92861),S=a(40267),T=a(87815),N=a(45976);const L=e=>{let{isHighlighted:t,lastStatusChangeValue:a,lastStatusChange:l,units:r,status:c,nodeName:s,...i}=e;return n.createElement(o.Flex,(0,x.A)({background:t&&"modalTabsBackground",border:{side:"all",color:"borderSecondary"},color:"textDescription",justifyContent:"between",padding:[4],round:!0},i),n.createElement(o.Flex,{column:!0,justifyContent:"between",gap:2},s&&n.createElement(o.Text,{strong:!0},s),n.createElement(T.A,{rawTime:l,secs:!0,"data-testid":"nodeItem-lastStatusChange"})),n.createElement(o.Flex,{height:5,gap:2,alignItems:"center",alignSelf:"start"},n.createElement(d.A,{content:"CLEAR"===c?"Cleared value":"Triggered value",align:"bottom"},n.createElement(o.Box,null,n.createElement(N.A,{loaded:!0,status:c,units:r,value:a,"data-testid":"nodeItem-alertValue"}))),n.createElement(S.A,{flavour:c,icon:"alarm_bell","data-testid":"alertView-alertPill-value"},c)))};var _=a(38966);const F=e=>{let{children:t}=e;return n.createElement(o.Flex,{gap:2,column:!0},t)},D=e=>{let{id:t,nodeName:a,testid:r="alertDetailsModal"}=e;const{class:c,instanceName:s,component:d,family:u,nodeId:m,type:g,lastStatusChangeValue:p,lastStatusChange:h,units:f,status:v,notificationType:y}=(0,l.JL)(t),b=(0,k.xY)(m,"name");return n.createElement(o.Flex,{column:!0,gap:4,padding:[0,0,4,0]},n.createElement(E.A,{iconName:"information",margin:[0,0,0,-.5]},"Alert Info"),n.createElement(F,null,n.createElement(w,{iconName:"nodes_hollow",category:"Node",value:a||b,testid:"".concat(r,"-hostname")}),n.createElement(w,{iconName:"charts_view",category:"Instance",value:s,testid:"".concat(r,"-chartId")}),n.createElement(w,{iconName:"data_retention",category:"Type",value:g,testid:"".concat(r,"-type")}),n.createElement(w,{iconName:"last_week",category:"Hostname"}),n.createElement(w,{iconName:"metrics",category:"Component",value:d,testid:"".concat(r,"-component")}),n.createElement(w,{iconName:"applications_hollow",category:"Family",value:u,testid:"".concat(r,"-family")}),n.createElement(w,{iconName:"networkingStack",category:"Class",value:c,testid:"".concat(r,"-class")}),n.createElement(w,{iconName:"incident_manager",category:"Event ID",testid:"".concat(r,"-eventId")})),n.createElement(I,{id:t,testid:r}),n.createElement(E.A,{iconName:"nodes_hollow",iconSize:"20px"},"Instance Values - Node Instances"),n.createElement(o.Flex,{column:!0,gap:2},"agent"!==y&&n.createElement(o.Text,{color:"textDescription"},"A node may be claimed through multiple instances on the cloud (streaming through parent) and Netdata aggregates the alerts from all the instances and displays a single Active alert based on the highest severity, latest change."),n.createElement(L,{lastStatusChangeValue:p,lastStatusChange:h,units:f,status:v,nodeName:b,isHighlighted:!0})),n.createElement(E.A,{iconName:"gear",iconSize:"20px"},"Edit Alert"),n.createElement(o.Flex,{gap:2},n.createElement(_.A,{alertId:t,nodeId:m,small:!0,"data-testid":"".concat(r,"-edit-alert-button-footer")}),n.createElement(i,{href:C.UW,target:"_blank",padding:"0 4px 0 0","data-testid":"".concat(r,"-editAlertConfig-link"),"data-ga":"alert-modal::click-edit::alerts-view"},"Visit the documentation",n.createElement(o.Icon,{name:"arrow_left",rotate:2,size:"small",margin:[0,0,-.75,1],color:"success"}))))}},45976:(e,t,a)=>{a.d(t,{A:()=>s});var n=a(58168),o=a(96540),l=a(83199),r=a(20081);const c={critical:{background:"errorSemi",border:{side:"all",color:"error"},color:"error"},warning:{background:"warningSemi",border:{side:"all",color:"warning"},color:"warning"},cleared:{background:"successSemi",border:{side:"all",color:"success"},color:"success"},default:{background:"generic",border:{side:"all",color:"border"},color:"text"}},s=e=>{let{loaded:t,status:a,units:s,value:i,valueWithUnit:d,...u}=e;const{background:m,border:g,color:p}=c[a]||c.default,h=(0,o.useMemo)((()=>{if(d)return d;if(void 0===i)return"-";const e=(0,r.W)(i);return"".concat(e," ").concat(s||"-")}),[i,d,s]);return o.createElement(l.Flex,(0,n.A)({background:m,border:g,padding:[.5,2],round:9,width:{min:10},justifyContent:"center"},u),o.createElement(l.TextSmall,{color:p,strong:!0,whiteSpace:"nowrap",truncate:!0},t?h:"-"))}},51913:(e,t,a)=>{a.d(t,{ZV:()=>b,bg:()=>h,vQ:()=>p,Ws:()=>f,Gp:()=>y,MY:()=>v});a(17333),a(3064),a(41393),a(98992),a(54520),a(72577),a(81454),a(62953);var n=a(96540),o=a(47444),l=a(26655),r=a(49286);var c=a(3914),s=a(69765);const i=(0,o.eU)({key:"assistantAlert",default:null}),d=(0,o.eU)({key:"assistantAlertsAtom",default:[]}),u=(0,o.eU)({key:"assistantChatAtom",default:{open:!1,messages:[]}});var m=a(33829);const g=(0,o.K0)({key:"assistantAlertSelector",get:e=>{let{space:t,room:a,alert:n,node:o,chart:r}=e;return()=>(e=>{let{space:t,room:a,alert:n,node:o,chart:r}=e;return l.A.post("/api/v1/troubleshoot",{space:t,room:a,alarm:n,node:o,chart:r})})({space:t,room:a,alert:n,node:o||"dummy-node-id",chart:r||"dummy-chart-id"})},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),p=e=>{var t;let{alert:a,node:n,chart:l}=e;const r=(0,c.vt)(),i=(0,s.ID)(),d=(0,o.xf)(g({space:r,room:i,alert:a,node:n,chart:l}));return{loaded:"loading"!==d.state,value:null===(t=d.contents)||void 0===t?void 0:t.data,hasError:"hasError"===d.state}},h=()=>(0,o.L4)(i),f=()=>(0,o.L4)(d),E=()=>(0,o.L4)(u),v=()=>{const[e,t]=E(),a=(0,n.useCallback)((()=>t((e=>({...e,open:!0})))),[t]),o=(0,n.useCallback)((()=>t((e=>({...e,open:!1})))),[t]);return[null===e||void 0===e?void 0:e.open,a,o]},y=()=>{const[e,t]=E(),a=(0,n.useCallback)((e=>t((t=>({...t,messages:[...t.messages,e]})))),[t]),o=(0,n.useCallback)((()=>{t((e=>({...e,messages:[]})))}),[t]),l=(0,n.useMemo)((()=>e.open&&!!e.messages.find((e=>{let{pending:t}=e;return!!t}))),[e]);return{messages:null===e||void 0===e?void 0:e.messages,pending:l,addMessage:a,clear:o}},b=()=>{const e=(()=>{const{addMessage:e}=y();return(0,n.useCallback)((t=>{const a=(0,m.A)();return e({id:a,type:"question",value:t}),a}),[e])})(),t=(()=>{const{addMessage:e}=y();return(0,n.useCallback)((t=>{let{questionId:a,pending:n,answer:o}=t;return e({id:(0,m.A)(),questionId:a,type:"answer",value:o,pending:n})}),[e])})(),a=(()=>{const[,e]=E();return(0,n.useCallback)((t=>{let{questionId:a,value:n,additionalInfo:o,error:l}=t;e((e=>({...e,messages:e.messages.map((e=>e.questionId==a?{...e,pending:!1,value:n,additionalInfo:o,error:l}:e))})))}),[])})();return(0,n.useCallback)((n=>{const o=e(n);var c;t({questionId:o,pending:!0}),(c=n,l.A.post("/api/v1/chat",{messages:[{role:"user",content:c}]},{transform:e=>(0,r.bn)(e)})).then((function(){let{data:e={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{session:{messages:t},additionalInfo:n}=e,l=(t||[]).filter((e=>{let{role:t}=e;return"assistant"==t})),r=l.length?l[l.length-1]:null;null!==r&&void 0!==r&&r.content?a({questionId:o,value:r.content,additionalInfo:n}):a({questionId:o,error:"Something went wrong"})})).catch((e=>{a({questionId:o,error:e.message||"Something went wrong"})}))}),[e,a])}},38966:(e,t,a)=>{a.d(t,{A:()=>i});var n=a(58168),o=a(96540),l=a(83199),r=a(14125),c=a(47130);const s=(0,a(92155).A)((0,c.A)(l.Button)),i=e=>{let{alertId:t,nodeId:a,small:l=!1,isLoading:c,disabled:i,...d}=e;const{canEdit:u,goToEdit:m}=(0,r.q)({nodeId:a,alertId:t});return o.createElement(s,(0,n.A)({small:l,label:"Edit alert",onClick:m,flavour:"hollow",isLoading:c,disabled:c||!u||i,width:l?"112px":"150px","data-ga":"alert-modal::click-edit-congiguration::alerts-view",payload:{action:"Edit alert",alertId:t,nodeId:a},tooltip:u?null:"You cannot edit this alert."},d))}},14125:(e,t,a)=>{a.d(t,{l:()=>m,q:()=>u});a(62953);var n=a(96540),o=a(47767),l=a(27467),r=a(97054),c=a(3914),s=a(47762),i=a(64118),d=a(8239);const u=e=>{var t;let{alertId:a,alertName:l,nodeId:c,nodes:u}=e;const m=(0,o.Zp)(),{setState:g}=(0,d.L5)(),[p]=(0,r.Ay)(),{name:h}=(0,i.JL)(a),f=l||h,{isParent:E}=(0,s.xY)(c),{loaded:v,value:y,hasError:b,error:x}=(0,d.wd)({node:{value:c,isParent:E},path:"/health"}),w=!(null===y||void 0===y||null===(t=y.tree)||void 0===t||null===(t=t["/health/alerts/prototypes"])||void 0===t||!t["health:alert:prototype:".concat(f)]);return{canEdit:v&&!b&&w,goToEdit:(0,n.useCallback)((()=>{g({nodeId:c,alertName:f,nodeIds:u,isAlert:!0}),m("/spaces/".concat(p,"/settings/configurations"),{replace:!0})}),[g,m,p,c,f]),error:(null===x||void 0===x?void 0:x.errorMessage)||x}},m=()=>{const e=(0,o.Zp)(),{slug:t}=(0,c.ap)(),[,a]=(0,l.N9)();return(0,n.useCallback)((n=>{n&&(a({dyncfgSearchTerm:n}),e("/spaces/".concat(t,"/settings/configurations"),{replace:!0}))}),[e,a,t])}},97054:(e,t,a)=>{a.d(t,{Ay:()=>d,KI:()=>i,TP:()=>s});var n=a(96540),o=a(47767),l=a(69765),r=a(3914);const c=(e,t,a)=>{const o=t();(0,n.useLayoutEffect)((()=>{o!==e&&a(e)}),[o,e])},s=()=>{const{roomSlug:e}=(0,o.g)(),t=(0,l.A2)();return c(e,l.QW,t),e},i=()=>{const{spaceSlug:e}=(0,o.g)(),t=(0,r.hX)();return c(e,r.bq,t),e},d=()=>[i(),s()]}}]); \ No newline at end of file diff --git a/src/web/gui/v2/963.35da4a3c4e49aac29dae.chunk.js b/src/web/gui/v2/963.35da4a3c4e49aac29dae.chunk.js deleted file mode 100644 index 6698ea438..000000000 --- a/src/web/gui/v2/963.35da4a3c4e49aac29dae.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="94514b2a-a272-48b2-988d-a174805c5b7f",e._sentryDebugIdIdentifier="sentry-dbid-94514b2a-a272-48b2-988d-a174805c5b7f")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[963],{92155:(e,t,o)=>{o.d(t,{A:()=>c});var n=o(58168),a=o(96540),r=o(50876);const l=e=>(0,a.forwardRef)(((t,o)=>{let{callback:l,feature:i,isStart:c,isSuccess:s,isFailure:d,eventReason:u,payload:p={},...g}=t;const{sendLog:f,isReady:b}=(0,r.A)(),v=(0,a.useCallback)((()=>{const e=g[l],t={feature:i,isStart:c,isSuccess:s,isFailure:d,eventReason:u,...p,...g["data-ga"]?{dataGa:g["data-ga"]}:{},...g.dataGa?{dataGa:g.dataGa}:{},...g["data-track"]?{dataTrack:g["data-track"]}:{},...g.label?{label:g.label}:{}};"function"==typeof e&&e(),f(t,!0)}),[l,f,b,p,g]),m=(0,a.useMemo)((()=>({...g,[l]:v})),[g,l,v]);return a.createElement(e,(0,n.A)({ref:o},m))}));var i=o(67276);const c=e=>(0,a.forwardRef)(((t,o)=>{let{payload:r={},...c}=t;const s=l(e);return a.createElement(s,(0,n.A)({},c,{ref:o,callback:"onClick",payload:{...r,action:i.o1.buttonClicked}}))}))},93155:(e,t,o)=>{o.d(t,{Df:()=>b,Dm:()=>g,EM:()=>f,Mh:()=>u,bO:()=>p});const n="mobileAppNotifications",a="trialWarning",r="alertConfiguration",l="editAlertConfiguration",i="userCustomSettings",c="oktaSSO",s=[a,r,n,c];function d(e){if(s.includes(e))return()=>!0;const t="true"===localStorage.getItem(e);return e=>e||t}const u=d(n)(),p=d(a)(),g=d(r)(),f=(d(l)(),d(i)()),b=d(c)()},18682:(e,t,o)=>{o.d(t,{C:()=>l});var n=o(33436),a=o(78217),r=o(13871);const l=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return()=>{(0,n.A)(e);const o=(0,r.UI)({header:"Copied",text:"Command copied to your clipboard! Please run it on your node's terminal.",icon:"gear",...t,success:!0});a.A.success(o,{context:"copy"})}}},87292:(e,t,o)=>{o.d(t,{Ay:()=>u,R0:()=>d});var n=o(96540),a=o(8711),r=o(83199),l=o(18682);const i=(0,a.default)(r.Box).attrs((e=>({color:"textDescription",background:"modalTabsBackground",border:{side:"all",color:"borderSecondary"},padding:[4,10,4,4],position:"relative",width:"100%",...e}))).withConfig({displayName:"command__StyledTerminalCommand",componentId:"sc-wnwmk3-0"})(["color:",";border-radius:2px;overflow-wrap:anywhere;white-space:pre-wrap;font-family:monospace;letter-spacing:0.09px;line-height:18px;font-size:14px;word-break:break-word;"],(0,r.getColor)("textDescription")),c=(0,a.default)(r.Icon).withConfig({displayName:"command__StyledIcon",componentId:"sc-wnwmk3-1"})(["display:flex;align-self:flex-end;cursor:pointer;"]),s=(0,a.default)(r.Box).attrs({color:"textDescription",border:{side:"all",color:"borderSecondary"},background:"modalTabsBackground",padding:[0,1]}).withConfig({displayName:"command__CodeText",componentId:"sc-wnwmk3-2"})(["display:inline-block;color:",";border-radius:2px;font-family:monospace;letter-spacing:0.09px;line-height:16px;font-size:12px;font-weight:bold;word-break:break-word;"],(0,r.getColor)("textDescription")),d=e=>{let{children:t,...o}=e;return n.createElement(s,o,t)},u=e=>{let{children:t,confirmationText:o="Command copied to your clipboard.",commandText:a=t,...s}=e;return n.createElement(i,s,t,n.createElement(r.Box,{position:"absolute",bottom:"8px",right:"8px"},n.createElement(c,{name:"copy",size:"small",color:"primary",onClick:(0,l.C)(a||t,{text:o})})))}},47130:(e,t,o)=>{o.d(t,{A:()=>i});var n=o(58168),a=o(96540),r=o(83199),l=o(29217);const i=e=>function(){let{tooltipProps:t={},noWrapper:o,...i}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return i.tooltip||Object.keys(t).length?a.createElement(l.A,(0,n.A)({plain:!0,content:i.tooltip},t),o?a.createElement(e,i):a.createElement(r.Box,{"data-testid":"tooltip-box"},a.createElement(e,i))):a.createElement(e,i)}},36712:(e,t,o)=>{o.d(t,{$t:()=>k,FN:()=>A,MQ:()=>f,O0:()=>h,PE:()=>p,U2:()=>m,fn:()=>y,n$:()=>u,pZ:()=>v,td:()=>b,ue:()=>c});o(9920),o(14905),o(98992),o(3949),o(8872),o(62953);var n=o(26655),a=o(63950),r=o.n(a),l=o(49286),i=o(88116);const c={id:null,name:"",description:"",commitment:!1,pricing:[],enrolledAt:null,currentPeriodFrom:null,currentPeriodTo:null,committedNodes:0,billingEmail:"",balance:{amount:0,currency:"usd"}},s=e=>{var t;const o=i.Nw[e.class],n=(0,l.bn)(e),a=Array.isArray(n.pricing)?n.pricing.reduce(((e,t)=>({...e,[t.type]:t})),{}):{},r=null===(t=i.uC[o])||void 0===t?void 0:t[e.version];return{...c,...n,planLimitations:r,pricing:a,slug:o}},d=e=>(0,l.bn)(e),u=e=>n.A.get("/api/v2/spaces/".concat(e,"/billing/plans"),{transform:e=>{let{plans:t}=e;const o=t.reduce(((e,t)=>{const o=s(t),n=e[o.slug]||[];return"year"===o.interval?n.unshift(o):n.push(o),e[o.slug]=n,e}),{});return Object.entries(o).forEach((e=>{let[t,n]=e;const a=n.reduce(((e,t)=>{const{version:o}=t;return e[o]?e[o]=[...e[o],t]:e[o]=[t],e}),{});o[t]=a})),o}}),p=e=>n.A.get("/api/v2/spaces/".concat(e,"/billing/plan"),{allow401:!0,transform:s}),g=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"update";return function(t){let o=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const a="update"==e?n.A.put:n.A.post;return"function"!==typeof a?r():a("/api/v2/spaces/".concat(t,"/billing/plan"),(0,l.Jz)(o),{transform:e=>{let{url:t}=e;return t}})}},f=(e,t)=>g("checkout")(e,t),b=(e,t)=>g()(e,t),v=(e,t)=>n.A.get("/api/v2/spaces/".concat(e,"/billing/portal?redirect_url=").concat(encodeURIComponent(t)),{transform:e=>{let{url:t}=e;return t}}),m=(e,t)=>{let{after:o,before:a}=t;return n.A.get("/api/v2/spaces/".concat(e,"/billing/node-count?from=").concat(o,"&to=").concat(a))},h=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return n.A.put("/api/v2/spaces/".concat(e,"/billing/plan/preview"),(0,l.Jz)(t),{transform:d})},y=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return n.A.get("/api/v2/spaces/".concat(e,"/billing/plan/quote"),{transform:d,params:(0,l.Jz)(t)})},A=function(e){let{email:t}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return n.A.post("/api/v2/spaces/".concat(e,"/billing/trial"),{email:t})},k=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return n.A.post("api/v2/spaces/".concat(e,"/billing/promo-code/check"),(0,l.Jz)(t))}},19673:(e,t,o)=>{o.d(t,{og:()=>R,L_:()=>I,D:()=>S,JN:()=>w,Qh:()=>x,lU:()=>v,qW:()=>y,Tr:()=>k,i5:()=>N,M4:()=>_});o(9391),o(62953);var n=o(96540),a=o(47444);const r=(0,a.eU)({key:"billingPlanCancelling",default:!1});var l=o(63950),i=o.n(l),c=o(3914),s=o(36712),d=o(88116),u=o(29848),p=o(46741),g=o(37618);const f=[],b=(0,a.K0)({key:"plans",get:e=>()=>(0,s.n$)(e)}),v=()=>{var e;const t=(0,c.vt)(),o=(0,a.xf)(b(t));return{loaded:"loading"!==o.state,value:(null===(e=o.contents)||void 0===e?void 0:e.data)||f,hasError:"hasError"===o.state}},m=(0,a.K0)({key:"previewPlan",get:e=>{let{spaceId:t,payload:o}=e;return()=>o?(0,s.O0)(t,o):Promise.resolve({data:{noRequest:!0}})},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),h=e=>{var t;return d.aT[null===e||void 0===e||null===(t=e.response)||void 0===t||null===(t=t.data)||void 0===t?void 0:t.errorMsgKey]},y=(e,t)=>{var o;const n=(0,c.vt)(),r=(0,a.xf)(m({spaceId:n,payload:e,forceFetch:t}));return{loading:"loading"===r.state,value:(null===(o=r.contents)||void 0===o?void 0:o.data)||{},promoCodeError:"hasError"===r.state?h(r.contents):void 0}},A=(0,a.K0)({key:"planQuoteSelector",get:e=>{let{spaceId:t,payload:o}=e;return()=>o?(0,s.fn)(t,o):Promise.resolve({data:{noRequest:!0}})},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),k=(e,t)=>{var o;const n=(0,c.vt)(),r=(0,a.xf)(A({spaceId:n,payload:e,forceFetch:t}));return{loading:"loading"===r.state,value:(null===(o=r.contents)||void 0===o?void 0:o.data)||{},promoCodeError:"hasError"===r.state?h(r.contents):void 0}},E=(0,a.K0)({key:"currentPlan",get:e=>{let{spaceId:t,canFetchPlan:o}=e;return()=>o&&t&&!(0,g.ES)(t)?(0,s.PE)(t):Promise.resolve()}}),w=()=>{var e;const t=(0,c.vt)(),o=(0,p.JT)("billing:ReadBasic"),n=(0,a.xf)(E({spaceId:t,canFetchPlan:o})),r=(0,a.RH)(E({spaceId:t,canFetchPlan:o}));return{loaded:"loading"!==n.state,value:(null===(e=n.contents)||void 0===e?void 0:e.data)||s.ue,hasError:"hasError"===n.state,refresh:r}},x=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:window.location.href;const[t,o]=(0,n.useState)(!1),a=(0,c.vt)();return[(0,n.useCallback)((()=>{o(!0),(0,s.pZ)(a,e).then((e=>{let{data:t}=e;return location.href=t})).finally((()=>o(!1)))}),[a,e]),t]},C=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:s.td;const t=(0,c.vt)(),o=w(),n=(0,u.ly)();return!o.loaded||o.hasError?i():a=>e(t,a).then((e=>{o.refresh(),n(),null!==e&&void 0!==e&&e.data&&(location.href=e.data)}))},I=()=>C(s.MQ),_=()=>C(),N=()=>{const e=(0,c.vt)();return t=>(0,s.FN)(e,t)},R=()=>{const[e,t]=(0,a.L4)(r);return{cancelling:e,startCancelling:(0,n.useCallback)((()=>t(!0)),[]),stopCancelling:(0,n.useCallback)((()=>t(!1)),[])}},P=(0,a.K0)({key:"couponCheck",get:e=>{let{spaceId:t,...o}=e;return()=>o.promotionCode?(0,s.$t)(t,o):Promise.resolve({})}}),S=function(){var e;let t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const o=(0,c.vt)(),n=(0,a.xf)(P({spaceId:o,...t}));return{loading:"loading"===n.state,discount:(null===(e=n.contents)||void 0===e||null===(e=e.data)||void 0===e?void 0:e.percent_off)||0,promoCodeError:"hasError"===n.state?h(n.contents):void 0}}},6586:(e,t,o)=>{o.d(t,{A:()=>c});var n=o(58168),a=o(96540),r=o(84976),l=o(4659),i=o(3914);const c=e=>{let{children:t,...o}=e;const c=(0,i.bq)(),s="/spaces/".concat(c,"/settings/nodes");return a.createElement(l.A,(0,n.A)({as:r.N_,to:s},o),t)}},12602:(e,t,o)=>{o.d(t,{A:()=>c});var n=o(58168),a=o(96540),r=o(84976),l=o(4659),i=o(27994);const c=e=>{let{children:t,...o}=e;const{url:c}=(0,i.A)("business");return a.createElement(l.A,(0,n.A)({as:r.N_,to:c,disabled:!c},o),t)}},73865:(e,t,o)=>{o.d(t,{A:()=>c});o(62953);var n=o(96540),a=o(69418),r=o(47762),l=o(19673),i=o(93155);const c=()=>{const[,e,t]=(0,a.A)(),{loaded:o,nodes:c,hasError:s,refresh:d}=(0,r.Du)(),{loaded:u,value:p,hasError:g}=(0,l.JN)(),f=i.bO&&!(null===p||void 0===p||!p.planLimitations),{maxNodes:b=0,maxDashboards:v=0}=(null===p||void 0===p?void 0:p.planLimitations)||{},m=(0,n.useCallback)((o=>f&&!e&&t>b&&!c.includes(o)),[c,f,e,t]);return{loaded:o&&u,hasError:s||g,currentPlan:p,hasLimitations:f,maxNodes:b,maxDashboards:v,preferredNodes:c,isNodeRestricted:m,refreshPreferredNodes:d}}},27994:(e,t,o)=>{o.d(t,{A:()=>l});var n=o(96540),a=o(19673),r=o(3914);const l=e=>{const{loaded:t,value:o,hasError:l}=(0,a.JN)(),i=(0,r.bq)(),c=t&&o?o.billingEmail?"update":"checkout":"",s="/spaces/".concat(i,"/settings/billing/all-plans"),d=(0,n.useCallback)((e=>t&&o?"".concat(s,"#billingModalType=").concat(c,"&billingModalSlug=").concat(e):null),[e,i,c]),u=e?d(e):s;return{loaded:t,getUrl:d,url:u,hasError:l}}},57605:(e,t,o)=>{o.d(t,{A:()=>l});o(62953);var n=o(45467),a=o(87659),r=o(80925);const l=()=>{const[e,t]=(0,a.A)(!1),o=(0,r.e)();return(0,n.A)((()=>{if(o&&o.getRoot())return o.getRoot().updateAttribute("paused",!o.getRoot().getAttribute("autofetchOnWindowBlur")&&o.getRoot().getAttribute("blurred")||e),()=>o.getRoot().updateAttribute("paused",!o.getRoot().getAttribute("autofetchOnWindowBlur")&&o.getRoot().getAttribute("blurred"))}),[e]),t}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/979.3e5fddf93c977e6c71c3.chunk.js b/src/web/gui/v2/979.3e5fddf93c977e6c71c3.chunk.js deleted file mode 100644 index 7f9882c44..000000000 --- a/src/web/gui/v2/979.3e5fddf93c977e6c71c3.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="aebc26a4-0684-493c-b27c-4b77771b8c35",e._sentryDebugIdIdentifier="sentry-dbid-aebc26a4-0684-493c-b27c-4b77771b8c35")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[979],{14125:(e,t,n)=>{n.d(t,{l:()=>d,q:()=>l});n(62953);var o=n(96540),r=n(47767),c=n(27467),a=n(97054),s=n(3914),i=n(47762),u=n(64118),f=n(8239);const l=e=>{var t;let{alertId:n,alertName:c,nodeId:s,nodes:l}=e;const d=(0,r.Zp)(),{setState:p}=(0,f.L5)(),[b]=(0,a.Ay)(),{name:v}=(0,u.JL)(n),h=c||v,{isParent:y}=(0,i.xY)(s),{loaded:g,value:w,hasError:m,error:E}=(0,f.wd)({node:{value:s,isParent:y},path:"/health"}),S=!(null===w||void 0===w||null===(t=w.tree)||void 0===t||null===(t=t["/health/alerts/prototypes"])||void 0===t||!t["health:alert:prototype:".concat(h)]);return{canEdit:g&&!m&&S,goToEdit:(0,o.useCallback)((()=>{p({nodeId:s,alertName:h,nodeIds:l,isAlert:!0}),d("/spaces/".concat(b,"/settings/configurations"),{replace:!0})}),[p,d,b,s,h]),error:(null===E||void 0===E?void 0:E.errorMessage)||E}},d=()=>{const e=(0,r.Zp)(),{slug:t}=(0,s.ap)(),[,n]=(0,c.N9)();return(0,o.useCallback)((o=>{o&&(n({dyncfgSearchTerm:o}),e("/spaces/".concat(t,"/settings/configurations"),{replace:!0}))}),[e,n,t])}},3705:(e,t,n)=>{n.d(t,{A:()=>c});n(62953);var o=n(96540);const r={x:0,y:0,width:0,height:0,top:0,left:0,bottom:0,right:0},c=()=>{const[e,t]=(0,o.useState)(null),[n,c]=(0,o.useState)(r),a=(0,o.useMemo)((()=>new window.ResizeObserver((e=>{if(e[0]){const{x:t,y:n,width:o,height:r,top:a,left:s,bottom:i,right:u}=e[0].contentRect;c({x:t,y:n,width:o,height:r,top:a,left:s,bottom:i,right:u})}}))),[]);return(0,o.useLayoutEffect)((()=>{if(e)return a.observe(e),()=>{a.disconnect()}}),[e]),[t,n]}},97054:(e,t,n)=>{n.d(t,{Ay:()=>f,KI:()=>u,TP:()=>i});var o=n(96540),r=n(47767),c=n(69765),a=n(3914);const s=(e,t,n)=>{const r=t();(0,o.useLayoutEffect)((()=>{r!==e&&n(e)}),[r,e])},i=()=>{const{roomSlug:e}=(0,r.g)(),t=(0,c.A2)();return s(e,c.QW,t),e},u=()=>{const{spaceSlug:e}=(0,r.g)(),t=(0,a.hX)();return s(e,a.bq,t),e},f=()=>[u(),i()]},84428:(e,t,n)=>{var o=n(78227)("iterator"),r=!1;try{var c=0,a={next:function(){return{done:!!c++}},return:function(){r=!0}};a[o]=function(){return this},Array.from(a,(function(){throw 2}))}catch(s){}e.exports=function(e,t){try{if(!t&&!r)return!1}catch(s){return!1}var n=!1;try{var c={};c[o]=function(){return{next:function(){return{done:n=!0}}}},e(c)}catch(s){}return n}},87290:(e,t,n)=>{var o=n(50516),r=n(19088);e.exports=!o&&!r&&"object"==typeof window&&"object"==typeof document},50516:e=>{e.exports="object"==typeof Deno&&Deno&&"object"==typeof Deno.version},19088:(e,t,n)=>{var o=n(24475),r=n(44576);e.exports="process"===r(o.process)},10916:(e,t,n)=>{var o=n(24475),r=n(80550),c=n(94901),a=n(92796),s=n(33706),i=n(78227),u=n(87290),f=n(50516),l=n(96395),d=n(77388),p=r&&r.prototype,b=i("species"),v=!1,h=c(o.PromiseRejectionEvent),y=a("Promise",(function(){var e=s(r),t=e!==String(r);if(!t&&66===d)return!0;if(l&&(!p.catch||!p.finally))return!0;if(!d||d<51||!/native code/.test(e)){var n=new r((function(e){e(1)})),o=function(e){e((function(){}),(function(){}))};if((n.constructor={})[b]=o,!(v=n.then((function(){}))instanceof o))return!0}return!t&&(u||f)&&!h}));e.exports={CONSTRUCTOR:y,REJECTION_EVENT:h,SUBCLASSING:v}},90537:(e,t,n)=>{var o=n(80550),r=n(84428),c=n(10916).CONSTRUCTOR;e.exports=c||!r((function(e){o.all(e).then(void 0,(function(){}))}))},96167:(e,t,n)=>{var o=n(46518),r=n(69565),c=n(79306),a=n(36043),s=n(1103),i=n(72652);o({target:"Promise",stat:!0,forced:n(90537)},{allSettled:function(e){var t=this,n=a.f(t),o=n.resolve,u=n.reject,f=s((function(){var n=c(t.resolve),a=[],s=0,u=1;i(e,(function(e){var c=s++,i=!1;u++,r(n,t,e).then((function(e){i||(i=!0,a[c]={status:"fulfilled",value:e},--u||o(a))}),(function(e){i||(i=!0,a[c]={status:"rejected",reason:e},--u||o(a))}))})),--u||o(a)}));return f.error&&u(f.value),n.promise}})}}]); \ No newline at end of file diff --git a/src/web/gui/v2/9818.3ce64e0b472412bfbc97.chunk.js b/src/web/gui/v2/9818.3ce64e0b472412bfbc97.chunk.js deleted file mode 100644 index 3d89ec69a..000000000 --- a/src/web/gui/v2/9818.3ce64e0b472412bfbc97.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},a=(new Error).stack;a&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[a]="d461deb3-9944-4c5e-b0a6-c59d648f9cb0",e._sentryDebugIdIdentifier="sentry-dbid-d461deb3-9944-4c5e-b0a6-c59d648f9cb0")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9818],{92155:(e,a,t)=>{t.d(a,{A:()=>r});var n=t(58168),l=t(96540),o=t(50876);const d=e=>(0,l.forwardRef)(((a,t)=>{let{callback:d,feature:c,isStart:r,isSuccess:i,isFailure:s,eventReason:f,payload:u={},...b}=a;const{sendLog:p,isReady:g}=(0,o.A)(),y=(0,l.useCallback)((()=>{const e=b[d],a={feature:c,isStart:r,isSuccess:i,isFailure:s,eventReason:f,...u,...b["data-ga"]?{dataGa:b["data-ga"]}:{},...b.dataGa?{dataGa:b.dataGa}:{},...b["data-track"]?{dataTrack:b["data-track"]}:{},...b.label?{label:b.label}:{}};"function"==typeof e&&e(),p(a,!0)}),[d,p,g,u,b]),k=(0,l.useMemo)((()=>({...b,[d]:y})),[b,d,y]);return l.createElement(e,(0,n.A)({ref:t},k))}));var c=t(67276);const r=e=>(0,l.forwardRef)(((a,t)=>{let{payload:o={},...r}=a;const i=d(e);return l.createElement(i,(0,n.A)({},r,{ref:t,callback:"onClick",payload:{...o,action:c.o1.buttonClicked}}))}))},89818:(e,a,t)=>{t.r(a),t.d(a,{MobileApp:()=>i,default:()=>s});t(25440);var n=t(96540),l=t(83199),o=t(28738),d=t(92155),c=t(63314);const r=(0,d.A)(l.Button),i=()=>n.createElement(c.Ay,{feature:"MobileApp"},n.createElement(l.Layer,{full:!0},n.createElement(l.Flex,{alignItems:"center",justifyContent:"center",column:!0,width:"100%",height:"100%",background:"mainBackground",gap:4},n.createElement(o.m,null),n.createElement(l.TextBigger,{textAlign:"center"},"Please open this link using your mobile device."),n.createElement(l.Flex,{alignItems:"center"},n.createElement(r,{onClick:()=>window.location.replace("/"),feature:"MobileApp",payload:{label:"Go to app"}},"Go to app"))))),s=i}}]); \ No newline at end of file diff --git a/src/web/gui/v2/9843.93f8c71c64ef97b9905e.chunk.js b/src/web/gui/v2/9843.93f8c71c64ef97b9905e.chunk.js deleted file mode 100644 index b8efdb0a9..000000000 --- a/src/web/gui/v2/9843.93f8c71c64ef97b9905e.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="ea998443-d78a-4069-961f-799ca2029e3e",e._sentryDebugIdIdentifier="sentry-dbid-ea998443-d78a-4069-961f-799ca2029e3e")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9843],{19843:(e,t,l)=>{l.r(t),l.d(t,{default:()=>C});var n=l(58168),a=(l(62953),l(96540)),o=l(83199),r=l(22292),i=l(8711);i.default.ul.withConfig({displayName:"styled__StyledList",componentId:"sc-z791f1-0"})(["list-style:disc outside none;margin-left:16px;"]);const d=(0,i.default)(o.ModalContent).attrs((e=>{let{isMobile:t}=e;return{width:t?{base:"95vw"}:{}}})).withConfig({displayName:"styled__TrialWelcomeModalContent",componentId:"sc-z791f1-1"})([""]);var s=l(34641),c=l(66732);const u=e=>{let{onOptOutClick:t,isFailure:l}=e;return a.createElement(o.Flex,{column:!0,gap:4,width:{max:120},padding:[0,0,4,0]},l?a.createElement(a.Fragment,null,a.createElement(o.TextBigger,null,"Enrolment to 30-day free Business trial failed."),a.createElement(o.TextBigger,{lineHeight:1.5},"Something unexpected happened when trying to enrol you to the free Business trial.")):a.createElement(a.Fragment,null,a.createElement(o.TextBigger,{lineHeight:1.5},"We are happy to upgrade your account to Netdata Business, for free, for"," ",a.createElement(o.TextBigger,{strong:!0},"30 days"),"."),a.createElement(o.TextBigger,null,"Enjoy the best of Netdata!"),"function"===typeof t&&a.createElement(c.A,{onOptOutClick:t})))};var m=l(87659),f=l(92155),g=l(63314),p=l(47731),E=l(25624),b=l(35454),y=l(50503),h=l(19673),w=l(5668);const _=(0,f.A)(o.Button),C=(0,a.memo)((()=>{const{trialWelcomeVisible:e}=(0,E.A)(),t=(0,p.J)(),l=(0,r.uW)("email"),[i,,,c]=(0,m.A)(!0),[f,,C]=(0,m.A)(),{isFailure:x,reset:A}=(0,y.A)(),[B,,T,k]=(0,m.A)(),{refreshPlan:v}=(0,E.A)(),[,I]=(0,w.ng)("trialModalDismissed"),M=(0,h.i5)(),F=(0,a.useCallback)((()=>{T(),M({email:l}).then((()=>{setTimeout((()=>{v(),A(),k()}),2e3)})).catch((()=>{k()}))}),[l]),P=(0,a.useMemo)((()=>x?{feature:"CloseTrialEnrolmentErrorModal",label:"Close",flavour:"hollow"}:{feature:"TrialAccept",label:"OK",textTransform:"uppercase",flavour:"hollow",icon:"thumb_up"}),[x]);return e?f?a.createElement(s.A,{onDecline:C,onCancellingEnd:()=>{localStorage.setItem(b.$B,!0),C(),c()}}):i?a.createElement(o.Modal,{backdropProps:{backdropBlur:!0}},a.createElement(g.Ay,{feature:"TrialWelcome"},a.createElement(d,{isMobile:t},a.createElement(o.ModalHeader,null,a.createElement(o.Flex,{gap:2,alignItems:"center"},a.createElement(o.Icon,{name:"netdataPress",color:"text"}),a.createElement(o.H4,null,"Welcome to Netdata!"))),a.createElement(o.ModalBody,null,a.createElement(u,{isFailure:x})),a.createElement(o.ModalFooter,null,a.createElement(o.Flex,{gap:4,justifyContent:"end",padding:[1,2]},x&&a.createElement(_,{feature:"RetryTrialEnrolment",label:"Retry",onClick:F,disabled:B,isLoading:B}),a.createElement(_,(0,n.A)({onClick:()=>{var e,t;(c(),A(),localStorage.setItem(b.$B,!0),!x&&null!==(e=window.posthog)&&void 0!==e&&e.setPersonProperties)&&(null===(t=window.posthog)||void 0===t||t.setPersonProperties({netdata_cloud_trial_modal_seen:!0}));I(!0)},disabled:B},P))))))):null:null}))}}]); \ No newline at end of file diff --git a/src/web/gui/v2/9912.702300c2dd9616289606.chunk.js b/src/web/gui/v2/9912.702300c2dd9616289606.chunk.js deleted file mode 100644 index 815f314db..000000000 --- a/src/web/gui/v2/9912.702300c2dd9616289606.chunk.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},n=(new Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="585f0732-e454-415f-880b-614ed891b696",e._sentryDebugIdIdentifier="sentry-dbid-585f0732-e454-415f-880b-614ed891b696")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[9912],{9912:(e,n,d)=>{d.r(n),d.d(n,{default:()=>c});var o=d(96540),t=d(22292),f=d(58168),l=(d(62953),d(39225)),r=d(69418),s=d(73865);const a=(0,l.A)((()=>d.e(3750).then(d.bind(d,63750))),"Modal"),u=e=>{const[,n,d]=(0,r.A)(),{loaded:t,hasLimitations:l,maxNodes:u,preferredNodes:b,refreshPreferredNodes:c}=(0,s.A)();return!(n||!t||!l)&&(d>u&&!b.length)?o.createElement(o.Suspense,{fallback:""},o.createElement(a,(0,f.A)({onSuccessfulSave:c},e))):null};var b=d(93155);const c=e=>{const n=(0,t.uW)("isAnonymous");return b.bO&&!n?o.createElement(u,e):null}}}]); \ No newline at end of file diff --git a/src/web/gui/v2/LICENSE.md b/src/web/gui/v2/LICENSE.md deleted file mode 100644 index 865d094f1..000000000 --- a/src/web/gui/v2/LICENSE.md +++ /dev/null @@ -1,42 +0,0 @@ -# Netdata Cloud UI License v1.0 (NCUL1) - -## Acceptance -By using the software, you agree to all of the terms and conditions below. - -## Copyright License -The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available the software, in each case subject to the limitations, restrictions and conditions below. - -## Limitations -This license allows you to use the Software only to interface with the licensor's other software components, such as Netdata Agents and Netdata Cloud. Any use with replacements for these components is not permitted. - -## Restrictions -The Software is provided in a binary form for use by end-users. You may not reverse engineer, decompile, disassemble, or modify the Software. The Software is licensed as a single product and its component parts may not be separated. - -## Patents -If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your license for the software granted under these terms ends immediately. If your company makes such a claim, your license ends immediately for work on behalf of your company. - -## Notices -You must ensure that anyone who gets a copy of the Software from you also gets a copy of these terms. - -## No Other Rights -These terms do not imply any licenses other than those expressly granted in these terms. - -## Termination -If you use the Software in violation of any of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violations of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently. - -## No Warranties and No Liability -The software comes "As Is", without any express or implied warranties of any kind, including but not limited to any warranties of merchantability, non-infringement, or fitness for a particular purpose. The licensor will not be liable to you for any damages arising out of these terms or the use or nature of the Software, under any kind of legal claim. - -## Open Source Components -The software includes certain third party open source components. Each of these components is subject to its own license. The list of open-source components used by Netdata Cloud UI is [here](https://app.netdata.cloud/3D_PARTY_LICENSES.txt). - -## Definitions -The "licensor" is Netdata Inc., the entity offering these terms, and the "**software**" is the Netdata Cloud UI software the licensor makes available under these terms, including any portion of it. - -"**you**" refers to the individual or entity agreeing to these terms. - -"**your company**" is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. "**Control**" means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect. - -"**your licenses**" are all the licenses granted to you for the software under these terms. - -"**use**" means anything you do with the software requiring one of your licenses. diff --git a/src/web/gui/v2/README.md b/src/web/gui/v2/README.md deleted file mode 120000 index d434d0b42..000000000 --- a/src/web/gui/v2/README.md +++ /dev/null @@ -1 +0,0 @@ -../.dashboard-v2-notice.md \ No newline at end of file diff --git a/src/web/gui/v2/agent.html b/src/web/gui/v2/agent.html deleted file mode 100644 index bad821a9d..000000000 --- a/src/web/gui/v2/agent.html +++ /dev/null @@ -1,245 +0,0 @@ -Netdata Agent Console \ No newline at end of file diff --git a/src/web/gui/v2/allFiles.6.138.3.json b/src/web/gui/v2/allFiles.6.138.3.json deleted file mode 100644 index e38d93a40..000000000 --- a/src/web/gui/v2/allFiles.6.138.3.json +++ /dev/null @@ -1,386 +0,0 @@ -{ - "bundlesManifest.json": "/bundlesManifest.6.json", - "app.css": "/app.cb2e9f9a81cf9533384e.css", - "app.js": "/app.08c9fe3ead1d43ff769b.js", - "runtime.js": "/runtime.ceccffb089cc539b1c1f.js", - "3624.chunk.js": "/3624.bfeb1fdc3057ba82ddac.chunk.js", - "7840.chunk.js": "/7840.2f2023f2eb1dcc943d94.chunk.js", - "5596.chunk.js": "/5596.2036706750ff4028cff2.chunk.js", - "3350.chunk.js": "/3350.ae7151980981854dc3d1.chunk.js", - "9818.chunk.js": "/9818.3ce64e0b472412bfbc97.chunk.js", - "7529.chunk.js": "/7529.658d363e12e73df83b60.chunk.js", - "1418.chunk.js": "/1418.16d53ba5cce2c6a8143a.chunk.js", - "1876.chunk.js": "/1876.e610906417b961290730.chunk.js", - "3621.chunk.js": "/3621.01ee70ee9c311ac163d9.chunk.js", - "7471.chunk.js": "/7471.f96c4d04a73fb7551c03.chunk.js", - "3736.chunk.js": "/3736.e572adfdf7951f74a741.chunk.js", - "683.css": "/683.cc9fa5f3bdc0bf3ab2fc.css", - "683.chunk.js": "/683.02c173493ef257c210fa.chunk.js", - "8784.chunk.js": "/8784.a04e9c07186e1f057f56.chunk.js", - "7146.chunk.js": "/7146.79304e386ac9238b7cf1.chunk.js", - "5598.chunk.js": "/5598.07ff43a6b96bd41e8637.chunk.js", - "7487.css": "/7487.89070793921be1288bb5.css", - "7487.chunk.js": "/7487.db63c95c27d973a07d9b.chunk.js", - "934.chunk.js": "/934.24d6fdc5f60aa6493962.chunk.js", - "7170.chunk.js": "/7170.5d6047bb6ce9d77d53db.chunk.js", - "8505.chunk.js": "/8505.c330f2104fefd71717da.chunk.js", - "5426.chunk.js": "/5426.254557ad3e1f2d14ad29.chunk.js", - "4680.chunk.js": "/4680.7d8122d91e9d4582836a.chunk.js", - "5700.chunk.js": "/5700.b7c9908dc7f30a5a57e7.chunk.js", - "5246.chunk.js": "/5246.07c5a1649f0805c140fe.chunk.js", - "3843.css": "/3843.89070793921be1288bb5.css", - "3843.chunk.js": "/3843.ffbb6f614ba4f7b77570.chunk.js", - "4034.chunk.js": "/4034.35199d2809d318eed690.chunk.js", - "1782.chunk.js": "/1782.d82eb301aa81b380dd0c.chunk.js", - "252.chunk.js": "/252.40edc9b0f6da1422f40b.chunk.js", - "6469.css": "/6469.89070793921be1288bb5.css", - "6469.chunk.js": "/6469.47926fa38028dc7d0d41.chunk.js", - "979.chunk.js": "/979.3e5fddf93c977e6c71c3.chunk.js", - "6331.css": "/6331.89070793921be1288bb5.css", - "6331.chunk.js": "/6331.c91b5d104cdff1be3b80.chunk.js", - "9843.chunk.js": "/9843.93f8c71c64ef97b9905e.chunk.js", - "1839.chunk.js": "/1839.a4196d2a87ac0fdd9f34.chunk.js", - "6661.chunk.js": "/6661.72f782bd78fea8c2d836.chunk.js", - "86.chunk.js": "/86.2c88d4d37b88e2620051.chunk.js", - "2007.chunk.js": "/2007.b33ce2b4b736228fd681.chunk.js", - "4958.chunk.js": "/4958.5969fedc1ff7dc82775e.chunk.js", - "9912.chunk.js": "/9912.702300c2dd9616289606.chunk.js", - "7959.chunk.js": "/7959.4f20f4b203e2bad8af39.chunk.js", - "3968.chunk.js": "/3968.483ca2ad3b300293e655.chunk.js", - "3104.chunk.js": "/3104.3b70865e21a81a616af3.chunk.js", - "8059.chunk.js": "/8059.4fdc76bb2cac1f74b41b.chunk.js", - "195.chunk.js": "/195.4cdbea6af54d14a95949.chunk.js", - "785.chunk.js": "/785.d016913841bcc0209d5b.chunk.js", - "7340.chunk.js": "/7340.25dce1c5cc66b613700f.chunk.js", - "7332.chunk.js": "/7332.3acf93dcfa52c7f1bc18.chunk.js", - "6944.chunk.js": "/6944.ab3e70c9ac0f05013b5f.chunk.js", - "6760.chunk.js": "/6760.370b9780120c145da28f.chunk.js", - "4140.css": "/4140.89070793921be1288bb5.css", - "4140.chunk.js": "/4140.46221d08bcda08826c78.chunk.js", - "185.chunk.js": "/185.42bab351ba68de7ca4aa.chunk.js", - "8842.chunk.js": "/8842.406028f523a00acb97bd.chunk.js", - "5304.chunk.js": "/5304.cc797fdd343c7e873b2f.chunk.js", - "3750.chunk.js": "/3750.4ad02f036f2a7c520b1c.chunk.js", - "4414.chunk.js": "/4414.590ba07d470ba2ce7dd0.chunk.js", - "9400.chunk.js": "/9400.6250bbf86c4fd3173de2.chunk.js", - "npm.react.dom.js": "/npm.react.dom.2994f1b4604bd8ce80f6.js", - "netdata.ui.js": "/netdata.ui.647a4c3303ee8ec0da64.js", - "netdata.charts.js": "/netdata.charts.fdfd27674ac5533bbcc2.js", - "8637.js": "/8637.0958494526e838a60d2b.js", - "7144.chunk.js": "/7144.382c341e09540fdebaa6.chunk.js", - "7857.chunk.js": "/7857.813ae058cca579e05462.chunk.js", - "4631.chunk.js": "/4631.158982e127e11bdc6a45.chunk.js", - "1220.chunk.js": "/1220.01d6bbaab869c74f4437.chunk.js", - "749.chunk.js": "/749.e44087ac3a2e3a994318.chunk.js", - "7519.chunk.js": "/7519.7982a2e0fcdf82ba78dd.chunk.js", - "6121.chunk.js": "/6121.f7286809e53e1c6d655a.chunk.js", - "6384.chunk.js": "/6384.0fad56b0bc902f186c98.chunk.js", - "8323.css": "/8323.e22de33686bb2f34063c.css", - "8323.chunk.js": "/8323.437406936b642e8f6cb3.chunk.js", - "5709.chunk.js": "/5709.c494eb62187917e2f2f6.chunk.js", - "3455.chunk.js": "/3455.f9ca876de57244386773.chunk.js", - "1396.chunk.js": "/1396.56f70d7c659ac0b694cd.chunk.js", - "5794.chunk.js": "/5794.252ff787d58d64eb4988.chunk.js", - "8938.chunk.js": "/8938.5116982f737a2ef85330.chunk.js", - "7208.chunk.js": "/7208.1d75cf5d007de32e403b.chunk.js", - "8239.chunk.js": "/8239.c85fc9f3599f198a9efb.chunk.js", - "9473.chunk.js": "/9473.4fd4742ffb6b5348bea8.chunk.js", - "6323.chunk.js": "/6323.26d4d949c9b6f8674c2e.chunk.js", - "9292.chunk.js": "/9292.cc5055091db9a0826933.chunk.js", - "7304.chunk.js": "/7304.ed4690ec296b59fbe7fd.chunk.js", - "8910.chunk.js": "/8910.019974f8675d8834dd07.chunk.js", - "6008.chunk.js": "/6008.3d0636fe17f4f6274485.chunk.js", - "963.chunk.js": "/963.35da4a3c4e49aac29dae.chunk.js", - "7436.chunk.js": "/7436.1ebd371d70e6a87c5499.chunk.js", - "3D_PARTY_LICENSES.txt": "/3D_PARTY_LICENSES.txt", - "static/site/pages/holding-page-503/holding-page-503.svg": "/static/site/pages/holding-page-503/holding-page-503.svg", - "favicon.ico": "/favicon.ico", - "static/img/logos/services/graphite.svg": "/static/img/logos/services/graphite.svg", - "sw.js": "/sw.js", - "static/img/no-filter-results.png": "/static/img/no-filter-results.png", - "static/img/logos/services/retroshare.svg": "/static/img/logos/services/retroshare.svg", - "static/img/logos/services/squid.svg": "/static/img/logos/services/squid.svg", - "static/img/no-nodes-room.svg": "/static/img/no-nodes-room.svg", - "static/img/logos/services/libvirt.svg": "/static/img/logos/services/libvirt.svg", - "static/img/logos/services/postfix.svg": "/static/img/logos/services/postfix.svg", - "static/img/logos/services/proxysql.svg": "/static/img/logos/services/proxysql.svg", - "static/img/logos/services/varnish.svg": "/static/img/logos/services/varnish.svg", - "static/img/logos/services/lxd.svg": "/static/img/logos/services/lxd.svg", - "static/img/logos/services/freeradius.svg": "/static/img/logos/services/freeradius.svg", - "static/img/logos/services/prowl.svg": "/static/img/logos/services/prowl.svg", - "static/img/logos/services/traefik.svg": "/static/img/logos/services/traefik.svg", - "static/img/logos/services/rethinkdb.svg": "/static/img/logos/services/rethinkdb.svg", - "static/img/logos/os/gentoo.svg": "/static/img/logos/os/gentoo.svg", - "static/img/logos/services/apc.svg": "/static/img/logos/services/apc.svg", - "static/img/logos/services/statsd.svg": "/static/img/logos/services/statsd.svg", - "static/img/logos/services/openldap.svg": "/static/img/logos/services/openldap.svg", - "static/img/logos/services/cups.svg": "/static/img/logos/services/cups.svg", - "static/img/logos/os/openwrt.svg": "/static/img/logos/os/openwrt.svg", - "static/img/logos/services/spigot.svg": "/static/img/logos/services/spigot.svg", - "static/img/logos/services/stiebel.svg": "/static/img/logos/services/stiebel.svg", - "agent.html": "/agent.html", - "local-agent.html": "/local-agent.html", - "static/img/logos/os/opensuse.svg": "/static/img/logos/os/opensuse.svg", - "static/img/logos/services/apache.svg": "/static/img/logos/services/apache.svg", - "static/img/logos/services/beanstalkd.svg": "/static/img/logos/services/beanstalkd.svg", - "static/img/logos/services/golang.svg": "/static/img/logos/services/golang.svg", - "static/img/new-dashboard.svg": "/static/img/new-dashboard.svg", - "static/img/logos/services/activemq.svg": "/static/img/logos/services/activemq.svg", - "static/img/logos/os/kubernetes.svg": "/static/img/logos/os/kubernetes.svg", - "static/img/logos/services/kubernetes.svg": "/static/img/logos/services/kubernetes.svg", - "static/img/logos/services/systemd.svg": "/static/img/logos/services/systemd.svg", - "static/img/logos/services/influxdb.svg": "/static/img/logos/services/influxdb.svg", - "static/img/rack.png": "/static/img/rack.png", - "static/email/img/isotype_600.png": "/static/email/img/isotype_600.png", - "static/img/logos/services/irc.svg": "/static/img/logos/services/irc.svg", - "static/img/logos/services/tomcat.svg": "/static/img/logos/services/tomcat.svg", - "registry-alert-redirect.html": "/registry-alert-redirect.html", - "static/img/logos/services/mariadb.svg": "/static/img/logos/services/mariadb.svg", - "static/img/logos/services/openzfs.svg": "/static/img/logos/services/openzfs.svg", - "static/img/logos/services/veritas.svg": "/static/img/logos/services/veritas.svg", - "static/img/logos/services/boinc.svg": "/static/img/logos/services/boinc.svg", - "static/img/logos/services/fail2ban.svg": "/static/img/logos/services/fail2ban.svg", - "static/img/logos/services/dns.svg": "/static/img/logos/services/dns.svg", - "static/img/logos/services/netfilter.svg": "/static/img/logos/services/netfilter.svg", - "static/img/logos/services/uwsgi.svg": "/static/img/logos/services/uwsgi.svg", - "static/img/logos/services/btrfs.svg": "/static/img/logos/services/btrfs.svg", - "static/img/logos/services/adaptec.svg": "/static/img/logos/services/adaptec.svg", - "static/img/logos/services/icecast.svg": "/static/img/logos/services/icecast.svg", - "static/img/logos/services/xen.svg": "/static/img/logos/services/xen.svg", - "static/img/logos/services/haproxy.svg": "/static/img/logos/services/haproxy.svg", - "static/img/logos/services/tor.svg": "/static/img/logos/services/tor.svg", - "static/img/logos/services/mysql.svg": "/static/img/logos/services/mysql.svg", - "static/img/logos/services/memcached.svg": "/static/img/logos/services/memcached.svg", - "static/img/logos/services/libreswan.svg": "/static/img/logos/services/libreswan.svg", - "static/img/logos/services/grafana.svg": "/static/img/logos/services/grafana.svg", - "static/img/logos/os/raspberry-pi.svg": "/static/img/logos/os/raspberry-pi.svg", - "static/img/logos/services/raspberry-pi.svg": "/static/img/logos/services/raspberry-pi.svg", - "static/img/logos/services/fluentd.svg": "/static/img/logos/services/fluentd.svg", - "static/img/logos/services/kairosdb.svg": "/static/img/logos/services/kairosdb.svg", - "static/img/logos/services/docker.svg": "/static/img/logos/services/docker.svg", - "static/img/logos/services/opentsdb.svg": "/static/img/logos/services/opentsdb.svg", - "static/img/logos/services/php-fpm.svg": "/static/img/logos/services/php-fpm.svg", - "static/img/logos/services/monit.svg": "/static/img/logos/services/monit.svg", - "static/img/logos/services/fping.svg": "/static/img/logos/services/fping.svg", - "static/img/logos/services/ntpd.svg": "/static/img/logos/services/ntpd.svg", - "static/email/img/header.png": "/static/email/img/header.png", - "static/img/logos/os/freenas.svg": "/static/img/logos/os/freenas.svg", - "static/img/logos/os/linux-small.svg": "/static/img/logos/os/linux-small.svg", - "static/img/logos/services/aws.svg": "/static/img/logos/services/aws.svg", - "static/img/logos/services/unbound.svg": "/static/img/logos/services/unbound.svg", - "static/img/logos/services/nsd.svg": "/static/img/logos/services/nsd.svg", - "static/img/logos/os/macos.svg": "/static/img/logos/os/macos.svg", - "static/img/logos/os/linux.svg": "/static/img/logos/os/linux.svg", - "static/img/logos/services/linux.svg": "/static/img/logos/services/linux.svg", - "static/img/logos/services/postgresql.svg": "/static/img/logos/services/postgresql.svg", - "static/img/logos/services/litespeed.svg": "/static/img/logos/services/litespeed.svg", - "registry-hello.html": "/registry-hello.html", - "static/img/logos/services/slack.svg": "/static/img/logos/services/slack.svg", - "static/img/logos/services/gnu-freeipmi.svg": "/static/img/logos/services/gnu-freeipmi.svg", - "static/img/logos/services/isc.svg": "/static/img/logos/services/isc.svg", - "static/img/logos/services/solr.svg": "/static/img/logos/services/solr.svg", - "static/img/logos/services/nut.svg": "/static/img/logos/services/nut.svg", - "static/img/logos/os/debian.svg": "/static/img/logos/os/debian.svg", - "static/img/logos/services/smstools3.svg": "/static/img/logos/services/smstools3.svg", - "static/email/img/reachability_siren.png": "/static/email/img/reachability_siren.png", - "static/splash.css": "/static/splash.css", - "registry-access.html": "/registry-access.html", - "static/img/logos/services/server-connection.svg": "/static/img/logos/services/server-connection.svg", - "LICENSE.md": "/LICENSE.md", - "static/email/img/crit_siren.png": "/static/email/img/crit_siren.png", - "static/img/logos/services/pushover.svg": "/static/img/logos/services/pushover.svg", - "static/img/logos/services/openvpn.svg": "/static/img/logos/services/openvpn.svg", - "static/img/logos/os/suse.svg": "/static/img/logos/os/suse.svg", - "static/img/logos/services/couchdb.svg": "/static/img/logos/services/couchdb.svg", - "static/img/logos/os/manjaro.svg": "/static/img/logos/os/manjaro.svg", - "static/img/logos/services/rabbitmq.svg": "/static/img/logos/services/rabbitmq.svg", - "static/img/logos/services/sma.svg": "/static/img/logos/services/sma.svg", - "static/email/img/warn_siren.png": "/static/email/img/warn_siren.png", - "static/img/logos/os/arch.svg": "/static/img/logos/os/arch.svg", - "static/img/logos/os/freebsd.svg": "/static/img/logos/os/freebsd.svg", - "static/img/logos/services/flock.svg": "/static/img/logos/services/flock.svg", - "static/img/logos/services/prometheus.svg": "/static/img/logos/services/prometheus.svg", - "static/img/logos/services/redis.svg": "/static/img/logos/services/redis.svg", - "static/img/logos/services/ipfs.svg": "/static/img/logos/services/ipfs.svg", - "static/img/logos/os/redhat.svg": "/static/img/logos/os/redhat.svg", - "static/img/logos/services/concul.svg": "/static/img/logos/services/concul.svg", - "static/img/mail/isotype.png": "/static/img/mail/isotype.png", - "static/img/logos/services/processor.svg": "/static/img/logos/services/processor.svg", - "static/img/logos/services/lm-sensors.svg": "/static/img/logos/services/lm-sensors.svg", - "static/img/logos/services/data-encryption.svg": "/static/img/logos/services/data-encryption.svg", - "static/site/pages/holding-page-503/multiple-logos-group.svg": "/static/site/pages/holding-page-503/multiple-logos-group.svg", - "static/email/img/crit_badge.png": "/static/email/img/crit_badge.png", - "static/img/logos/services/load-balancer.svg": "/static/img/logos/services/load-balancer.svg", - "index.html": "/index.html", - "static/img/logos/services/elasticsearch.svg": "/static/img/logos/services/elasticsearch.svg", - "static/img/logos/os/centos.svg": "/static/img/logos/os/centos.svg", - "static/email/img/clea_siren.png": "/static/email/img/clea_siren.png", - "static/img/logos/services/ceph.svg": "/static/img/logos/services/ceph.svg", - "static/img/logos/services/log-file.svg": "/static/img/logos/services/log-file.svg", - "static/img/logos/services/container.svg": "/static/img/logos/services/container.svg", - "static/img/logos/services/dovecot.svg": "/static/img/logos/services/dovecot.svg", - "static/img/logos/services/exim.svg": "/static/img/logos/services/exim.svg", - "static/img/logos/services/chrony.svg": "/static/img/logos/services/chrony.svg", - "static/email/img/warn_badge.png": "/static/email/img/warn_badge.png", - "static/img/logos/services/nfs.svg": "/static/img/logos/services/nfs.svg", - "static/img/logos/os/fedora.svg": "/static/img/logos/os/fedora.svg", - "static/img/logos/os/ubuntu.svg": "/static/img/logos/os/ubuntu.svg", - "static/img/logos/services/notification-bell.svg": "/static/img/logos/services/notification-bell.svg", - "static/email/img/flood_siren.png": "/static/email/img/flood_siren.png", - "static/img/logos/services/access-point.svg": "/static/img/logos/services/access-point.svg", - "static/email/img/label_recovered.png": "/static/email/img/label_recovered.png", - "static/img/logos/services/samba.svg": "/static/img/logos/services/samba.svg", - "static/img/logos/services/monitoring.svg": "/static/img/logos/services/monitoring.svg", - "static/img/logos/services/key-file.svg": "/static/img/logos/services/key-file.svg", - "static/img/logos/services/kafka.svg": "/static/img/logos/services/kafka.svg", - "static/email/img/label_warning.png": "/static/email/img/label_warning.png", - "static/img/logos/services/rocketchat.svg": "/static/img/logos/services/rocketchat.svg", - "static/img/mail/logotype.svg": "/static/img/mail/logotype.svg", - "static/img/logos/services/nginx-plus.svg": "/static/img/logos/services/nginx-plus.svg", - "static/email/img/label_critical.png": "/static/email/img/label_critical.png", - "static/img/logos/services/fronius.svg": "/static/img/logos/services/fronius.svg", - "static/img/logos/services/puppet.svg": "/static/img/logos/services/puppet.svg", - "static/img/logos/os/pfsense.svg": "/static/img/logos/os/pfsense.svg", - "static/img/logos/services/consul.svg": "/static/img/logos/services/consul.svg", - "static/img/logos/os/coreos.svg": "/static/img/logos/os/coreos.svg", - "static/site/pages/holding-page-503/index.html": "/static/site/pages/holding-page-503/index.html", - "static/site/pages/holding-page-503/holding-page-503.css": "/static/site/pages/holding-page-503/holding-page-503.css", - "static/img/logos/services/network.svg": "/static/img/logos/services/network.svg", - "static/img/logos/services/qos.svg": "/static/img/logos/services/qos.svg", - "static/img/logos/services/email.svg": "/static/img/logos/services/email.svg", - "static/img/logos/services/ddos.svg": "/static/img/logos/services/ddos.svg", - "static/img/logos/os/openstack.svg": "/static/img/logos/os/openstack.svg", - "static/img/mail/logotype.png": "/static/img/mail/logotype.png", - "static/email/img/full_logo.png": "/static/email/img/full_logo.png", - "static/email/img/community_icon.png": "/static/email/img/community_icon.png", - "static/email/img/configure_icon.png": "/static/email/img/configure_icon.png", - "static/img/logos/services/temperature.svg": "/static/img/logos/services/temperature.svg", - "static/img/logos/services/pushbullet.svg": "/static/img/logos/services/pushbullet.svg", - "static/img/logos/services/nginx.svg": "/static/img/logos/services/nginx.svg", - "static/img/logos/services/cloud.svg": "/static/img/logos/services/cloud.svg", - "static/img/logos/services/pagerduty.svg": "/static/img/logos/services/pagerduty.svg", - "static/img/logos/services/alerta.svg": "/static/img/logos/services/alerta.svg", - "static/img/logos/services/mongodb.svg": "/static/img/logos/services/mongodb.svg", - "static/email/img/clea_badge.png": "/static/email/img/clea_badge.png", - "static/img/logos/services/discord.svg": "/static/img/logos/services/discord.svg", - "static/img/logos/os/alpine.svg": "/static/img/logos/os/alpine.svg", - "static/img/logos/os/placeholder.svg": "/static/img/logos/os/placeholder.svg", - "static/img/logos/services/aws-sns.svg": "/static/img/logos/services/aws-sns.svg", - "static/img/logos/services/telegram.svg": "/static/img/logos/services/telegram.svg", - "static/img/logos/os/oracle.svg": "/static/img/logos/os/oracle.svg", - "static/img/logos/services/oracle.svg": "/static/img/logos/services/oracle.svg", - "static/img/logos/services/network-protocol.svg": "/static/img/logos/services/network-protocol.svg", - "static/img/logos/services/nvidia.svg": "/static/img/logos/services/nvidia.svg", - "static/img/logos/services/springboot.svg": "/static/img/logos/services/springboot.svg", - "static/img/logos/services/kavenegar.svg": "/static/img/logos/services/kavenegar.svg", - "static/img/logos/services/powerdns.svg": "/static/img/logos/services/powerdns.svg", - "static/img/logos/os/docker.svg": "/static/img/logos/os/docker.svg", - "static/img/logos/services/messagebird.svg": "/static/img/logos/services/messagebird.svg", - "static/img/logos/services/placeholder.svg": "/static/img/logos/services/placeholder.svg", - "static/site/pages/holding-page-503/reset.svg": "/static/site/pages/holding-page-503/reset.svg", - "static/img/logos/services/twilio.svg": "/static/img/logos/services/twilio.svg", - "static/img/logos/services/lighthttpd.svg": "/static/img/logos/services/lighthttpd.svg", - "static/img/logos/os/rocky.svg": "/static/img/logos/os/rocky.svg", - "static/img/list-style-image.svg": "/static/img/list-style-image.svg", - ".well-known/assetlinks.json": "/.well-known/assetlinks.json", - "static/.well-known/assetlinks.json": "/static/.well-known/assetlinks.json", - "static/img/logos/services/opensips.svg": "/static/img/logos/services/opensips.svg", - "static/img/logos/services/logstash.svg": "/static/img/logos/services/logstash.svg", - "static/img/mail/isotype.svg": "/static/img/mail/isotype.svg", - "static/site/pages/holding-page-503/netdata-logo-white.svg": "/static/site/pages/holding-page-503/netdata-logo-white.svg", - "static/img/logos/services/hub.svg": "/static/img/logos/services/hub.svg", - "apple-app-site-association": "/apple-app-site-association", - "static/apple-app-site-association": "/static/apple-app-site-association", - "app.css.map": "/app.cb2e9f9a81cf9533384e.css.map", - "app.js.map": "/app.08c9fe3ead1d43ff769b.js.map", - "runtime.js.map": "/runtime.ceccffb089cc539b1c1f.js.map", - "3624.chunk.js.map": "/3624.bfeb1fdc3057ba82ddac.chunk.js.map", - "7840.chunk.js.map": "/7840.2f2023f2eb1dcc943d94.chunk.js.map", - "5596.chunk.js.map": "/5596.2036706750ff4028cff2.chunk.js.map", - "3350.chunk.js.map": "/3350.ae7151980981854dc3d1.chunk.js.map", - "9818.chunk.js.map": "/9818.3ce64e0b472412bfbc97.chunk.js.map", - "7529.chunk.js.map": "/7529.658d363e12e73df83b60.chunk.js.map", - "1418.chunk.js.map": "/1418.16d53ba5cce2c6a8143a.chunk.js.map", - "1876.chunk.js.map": "/1876.e610906417b961290730.chunk.js.map", - "3621.chunk.js.map": "/3621.01ee70ee9c311ac163d9.chunk.js.map", - "7471.chunk.js.map": "/7471.f96c4d04a73fb7551c03.chunk.js.map", - "3736.chunk.js.map": "/3736.e572adfdf7951f74a741.chunk.js.map", - "683.css.map": "/683.cc9fa5f3bdc0bf3ab2fc.css.map", - "683.chunk.js.map": "/683.02c173493ef257c210fa.chunk.js.map", - "8784.chunk.js.map": "/8784.a04e9c07186e1f057f56.chunk.js.map", - "7146.chunk.js.map": "/7146.79304e386ac9238b7cf1.chunk.js.map", - "5598.chunk.js.map": "/5598.07ff43a6b96bd41e8637.chunk.js.map", - "7487.css.map": "/7487.89070793921be1288bb5.css.map", - "7487.chunk.js.map": "/7487.db63c95c27d973a07d9b.chunk.js.map", - "934.chunk.js.map": "/934.24d6fdc5f60aa6493962.chunk.js.map", - "7170.chunk.js.map": "/7170.5d6047bb6ce9d77d53db.chunk.js.map", - "8505.chunk.js.map": "/8505.c330f2104fefd71717da.chunk.js.map", - "5426.chunk.js.map": "/5426.254557ad3e1f2d14ad29.chunk.js.map", - "4680.chunk.js.map": "/4680.7d8122d91e9d4582836a.chunk.js.map", - "5700.chunk.js.map": "/5700.b7c9908dc7f30a5a57e7.chunk.js.map", - "5246.chunk.js.map": "/5246.07c5a1649f0805c140fe.chunk.js.map", - "3843.css.map": "/3843.89070793921be1288bb5.css.map", - "3843.chunk.js.map": "/3843.ffbb6f614ba4f7b77570.chunk.js.map", - "4034.chunk.js.map": "/4034.35199d2809d318eed690.chunk.js.map", - "1782.chunk.js.map": "/1782.d82eb301aa81b380dd0c.chunk.js.map", - "252.chunk.js.map": "/252.40edc9b0f6da1422f40b.chunk.js.map", - "6469.css.map": "/6469.89070793921be1288bb5.css.map", - "6469.chunk.js.map": "/6469.47926fa38028dc7d0d41.chunk.js.map", - "979.chunk.js.map": "/979.3e5fddf93c977e6c71c3.chunk.js.map", - "6331.css.map": "/6331.89070793921be1288bb5.css.map", - "6331.chunk.js.map": "/6331.c91b5d104cdff1be3b80.chunk.js.map", - "9843.chunk.js.map": "/9843.93f8c71c64ef97b9905e.chunk.js.map", - "1839.chunk.js.map": "/1839.a4196d2a87ac0fdd9f34.chunk.js.map", - "6661.chunk.js.map": "/6661.72f782bd78fea8c2d836.chunk.js.map", - "86.chunk.js.map": "/86.2c88d4d37b88e2620051.chunk.js.map", - "2007.chunk.js.map": "/2007.b33ce2b4b736228fd681.chunk.js.map", - "4958.chunk.js.map": "/4958.5969fedc1ff7dc82775e.chunk.js.map", - "9912.chunk.js.map": "/9912.702300c2dd9616289606.chunk.js.map", - "7959.chunk.js.map": "/7959.4f20f4b203e2bad8af39.chunk.js.map", - "3968.chunk.js.map": "/3968.483ca2ad3b300293e655.chunk.js.map", - "3104.chunk.js.map": "/3104.3b70865e21a81a616af3.chunk.js.map", - "8059.chunk.js.map": "/8059.4fdc76bb2cac1f74b41b.chunk.js.map", - "195.chunk.js.map": "/195.4cdbea6af54d14a95949.chunk.js.map", - "785.chunk.js.map": "/785.d016913841bcc0209d5b.chunk.js.map", - "7340.chunk.js.map": "/7340.25dce1c5cc66b613700f.chunk.js.map", - "7332.chunk.js.map": "/7332.3acf93dcfa52c7f1bc18.chunk.js.map", - "6944.chunk.js.map": "/6944.ab3e70c9ac0f05013b5f.chunk.js.map", - "6760.chunk.js.map": "/6760.370b9780120c145da28f.chunk.js.map", - "4140.css.map": "/4140.89070793921be1288bb5.css.map", - "4140.chunk.js.map": "/4140.46221d08bcda08826c78.chunk.js.map", - "185.chunk.js.map": "/185.42bab351ba68de7ca4aa.chunk.js.map", - "8842.chunk.js.map": "/8842.406028f523a00acb97bd.chunk.js.map", - "5304.chunk.js.map": "/5304.cc797fdd343c7e873b2f.chunk.js.map", - "3750.chunk.js.map": "/3750.4ad02f036f2a7c520b1c.chunk.js.map", - "4414.chunk.js.map": "/4414.590ba07d470ba2ce7dd0.chunk.js.map", - "9400.chunk.js.map": "/9400.6250bbf86c4fd3173de2.chunk.js.map", - "npm.react.dom.js.map": "/npm.react.dom.2994f1b4604bd8ce80f6.js.map", - "netdata.ui.js.map": "/netdata.ui.647a4c3303ee8ec0da64.js.map", - "netdata.charts.js.map": "/netdata.charts.fdfd27674ac5533bbcc2.js.map", - "8637.js.map": "/8637.0958494526e838a60d2b.js.map", - "7144.chunk.js.map": "/7144.382c341e09540fdebaa6.chunk.js.map", - "7857.chunk.js.map": "/7857.813ae058cca579e05462.chunk.js.map", - "4631.chunk.js.map": "/4631.158982e127e11bdc6a45.chunk.js.map", - "1220.chunk.js.map": "/1220.01d6bbaab869c74f4437.chunk.js.map", - "749.chunk.js.map": "/749.e44087ac3a2e3a994318.chunk.js.map", - "7519.chunk.js.map": "/7519.7982a2e0fcdf82ba78dd.chunk.js.map", - "6121.chunk.js.map": "/6121.f7286809e53e1c6d655a.chunk.js.map", - "6384.chunk.js.map": "/6384.0fad56b0bc902f186c98.chunk.js.map", - "8323.css.map": "/8323.e22de33686bb2f34063c.css.map", - "8323.chunk.js.map": "/8323.437406936b642e8f6cb3.chunk.js.map", - "5709.chunk.js.map": "/5709.c494eb62187917e2f2f6.chunk.js.map", - "3455.chunk.js.map": "/3455.f9ca876de57244386773.chunk.js.map", - "1396.chunk.js.map": "/1396.56f70d7c659ac0b694cd.chunk.js.map", - "5794.chunk.js.map": "/5794.252ff787d58d64eb4988.chunk.js.map", - "8938.chunk.js.map": "/8938.5116982f737a2ef85330.chunk.js.map", - "7208.chunk.js.map": "/7208.1d75cf5d007de32e403b.chunk.js.map", - "8239.chunk.js.map": "/8239.c85fc9f3599f198a9efb.chunk.js.map", - "9473.chunk.js.map": "/9473.4fd4742ffb6b5348bea8.chunk.js.map", - "6323.chunk.js.map": "/6323.26d4d949c9b6f8674c2e.chunk.js.map", - "9292.chunk.js.map": "/9292.cc5055091db9a0826933.chunk.js.map", - "7304.chunk.js.map": "/7304.ed4690ec296b59fbe7fd.chunk.js.map", - "8910.chunk.js.map": "/8910.019974f8675d8834dd07.chunk.js.map", - "6008.chunk.js.map": "/6008.3d0636fe17f4f6274485.chunk.js.map", - "963.chunk.js.map": "/963.35da4a3c4e49aac29dae.chunk.js.map", - "7436.chunk.js.map": "/7436.1ebd371d70e6a87c5499.chunk.js.map" -} \ No newline at end of file diff --git a/src/web/gui/v2/allFiles.6.json b/src/web/gui/v2/allFiles.6.json deleted file mode 100644 index e38d93a40..000000000 --- a/src/web/gui/v2/allFiles.6.json +++ /dev/null @@ -1,386 +0,0 @@ -{ - "bundlesManifest.json": "/bundlesManifest.6.json", - "app.css": "/app.cb2e9f9a81cf9533384e.css", - "app.js": "/app.08c9fe3ead1d43ff769b.js", - "runtime.js": "/runtime.ceccffb089cc539b1c1f.js", - "3624.chunk.js": "/3624.bfeb1fdc3057ba82ddac.chunk.js", - "7840.chunk.js": "/7840.2f2023f2eb1dcc943d94.chunk.js", - "5596.chunk.js": "/5596.2036706750ff4028cff2.chunk.js", - "3350.chunk.js": "/3350.ae7151980981854dc3d1.chunk.js", - "9818.chunk.js": "/9818.3ce64e0b472412bfbc97.chunk.js", - "7529.chunk.js": "/7529.658d363e12e73df83b60.chunk.js", - "1418.chunk.js": "/1418.16d53ba5cce2c6a8143a.chunk.js", - "1876.chunk.js": "/1876.e610906417b961290730.chunk.js", - "3621.chunk.js": "/3621.01ee70ee9c311ac163d9.chunk.js", - "7471.chunk.js": "/7471.f96c4d04a73fb7551c03.chunk.js", - "3736.chunk.js": "/3736.e572adfdf7951f74a741.chunk.js", - "683.css": "/683.cc9fa5f3bdc0bf3ab2fc.css", - "683.chunk.js": "/683.02c173493ef257c210fa.chunk.js", - "8784.chunk.js": "/8784.a04e9c07186e1f057f56.chunk.js", - "7146.chunk.js": "/7146.79304e386ac9238b7cf1.chunk.js", - "5598.chunk.js": "/5598.07ff43a6b96bd41e8637.chunk.js", - "7487.css": "/7487.89070793921be1288bb5.css", - "7487.chunk.js": "/7487.db63c95c27d973a07d9b.chunk.js", - "934.chunk.js": "/934.24d6fdc5f60aa6493962.chunk.js", - "7170.chunk.js": "/7170.5d6047bb6ce9d77d53db.chunk.js", - "8505.chunk.js": "/8505.c330f2104fefd71717da.chunk.js", - "5426.chunk.js": "/5426.254557ad3e1f2d14ad29.chunk.js", - "4680.chunk.js": "/4680.7d8122d91e9d4582836a.chunk.js", - "5700.chunk.js": "/5700.b7c9908dc7f30a5a57e7.chunk.js", - "5246.chunk.js": "/5246.07c5a1649f0805c140fe.chunk.js", - "3843.css": "/3843.89070793921be1288bb5.css", - "3843.chunk.js": "/3843.ffbb6f614ba4f7b77570.chunk.js", - "4034.chunk.js": "/4034.35199d2809d318eed690.chunk.js", - "1782.chunk.js": "/1782.d82eb301aa81b380dd0c.chunk.js", - "252.chunk.js": "/252.40edc9b0f6da1422f40b.chunk.js", - "6469.css": "/6469.89070793921be1288bb5.css", - "6469.chunk.js": "/6469.47926fa38028dc7d0d41.chunk.js", - "979.chunk.js": "/979.3e5fddf93c977e6c71c3.chunk.js", - "6331.css": "/6331.89070793921be1288bb5.css", - "6331.chunk.js": "/6331.c91b5d104cdff1be3b80.chunk.js", - "9843.chunk.js": "/9843.93f8c71c64ef97b9905e.chunk.js", - "1839.chunk.js": "/1839.a4196d2a87ac0fdd9f34.chunk.js", - "6661.chunk.js": "/6661.72f782bd78fea8c2d836.chunk.js", - "86.chunk.js": "/86.2c88d4d37b88e2620051.chunk.js", - "2007.chunk.js": "/2007.b33ce2b4b736228fd681.chunk.js", - "4958.chunk.js": "/4958.5969fedc1ff7dc82775e.chunk.js", - "9912.chunk.js": "/9912.702300c2dd9616289606.chunk.js", - "7959.chunk.js": "/7959.4f20f4b203e2bad8af39.chunk.js", - "3968.chunk.js": "/3968.483ca2ad3b300293e655.chunk.js", - "3104.chunk.js": "/3104.3b70865e21a81a616af3.chunk.js", - "8059.chunk.js": "/8059.4fdc76bb2cac1f74b41b.chunk.js", - "195.chunk.js": "/195.4cdbea6af54d14a95949.chunk.js", - "785.chunk.js": "/785.d016913841bcc0209d5b.chunk.js", - "7340.chunk.js": "/7340.25dce1c5cc66b613700f.chunk.js", - "7332.chunk.js": "/7332.3acf93dcfa52c7f1bc18.chunk.js", - "6944.chunk.js": "/6944.ab3e70c9ac0f05013b5f.chunk.js", - "6760.chunk.js": "/6760.370b9780120c145da28f.chunk.js", - "4140.css": "/4140.89070793921be1288bb5.css", - "4140.chunk.js": "/4140.46221d08bcda08826c78.chunk.js", - "185.chunk.js": "/185.42bab351ba68de7ca4aa.chunk.js", - "8842.chunk.js": "/8842.406028f523a00acb97bd.chunk.js", - "5304.chunk.js": "/5304.cc797fdd343c7e873b2f.chunk.js", - "3750.chunk.js": "/3750.4ad02f036f2a7c520b1c.chunk.js", - "4414.chunk.js": "/4414.590ba07d470ba2ce7dd0.chunk.js", - "9400.chunk.js": "/9400.6250bbf86c4fd3173de2.chunk.js", - "npm.react.dom.js": "/npm.react.dom.2994f1b4604bd8ce80f6.js", - "netdata.ui.js": "/netdata.ui.647a4c3303ee8ec0da64.js", - "netdata.charts.js": "/netdata.charts.fdfd27674ac5533bbcc2.js", - "8637.js": "/8637.0958494526e838a60d2b.js", - "7144.chunk.js": "/7144.382c341e09540fdebaa6.chunk.js", - "7857.chunk.js": "/7857.813ae058cca579e05462.chunk.js", - "4631.chunk.js": "/4631.158982e127e11bdc6a45.chunk.js", - "1220.chunk.js": "/1220.01d6bbaab869c74f4437.chunk.js", - "749.chunk.js": "/749.e44087ac3a2e3a994318.chunk.js", - "7519.chunk.js": "/7519.7982a2e0fcdf82ba78dd.chunk.js", - "6121.chunk.js": "/6121.f7286809e53e1c6d655a.chunk.js", - "6384.chunk.js": "/6384.0fad56b0bc902f186c98.chunk.js", - "8323.css": "/8323.e22de33686bb2f34063c.css", - "8323.chunk.js": "/8323.437406936b642e8f6cb3.chunk.js", - "5709.chunk.js": "/5709.c494eb62187917e2f2f6.chunk.js", - "3455.chunk.js": "/3455.f9ca876de57244386773.chunk.js", - "1396.chunk.js": "/1396.56f70d7c659ac0b694cd.chunk.js", - "5794.chunk.js": "/5794.252ff787d58d64eb4988.chunk.js", - "8938.chunk.js": "/8938.5116982f737a2ef85330.chunk.js", - "7208.chunk.js": "/7208.1d75cf5d007de32e403b.chunk.js", - "8239.chunk.js": "/8239.c85fc9f3599f198a9efb.chunk.js", - "9473.chunk.js": "/9473.4fd4742ffb6b5348bea8.chunk.js", - "6323.chunk.js": "/6323.26d4d949c9b6f8674c2e.chunk.js", - "9292.chunk.js": "/9292.cc5055091db9a0826933.chunk.js", - "7304.chunk.js": "/7304.ed4690ec296b59fbe7fd.chunk.js", - "8910.chunk.js": "/8910.019974f8675d8834dd07.chunk.js", - "6008.chunk.js": "/6008.3d0636fe17f4f6274485.chunk.js", - "963.chunk.js": "/963.35da4a3c4e49aac29dae.chunk.js", - "7436.chunk.js": "/7436.1ebd371d70e6a87c5499.chunk.js", - "3D_PARTY_LICENSES.txt": "/3D_PARTY_LICENSES.txt", - "static/site/pages/holding-page-503/holding-page-503.svg": "/static/site/pages/holding-page-503/holding-page-503.svg", - "favicon.ico": "/favicon.ico", - "static/img/logos/services/graphite.svg": "/static/img/logos/services/graphite.svg", - "sw.js": "/sw.js", - "static/img/no-filter-results.png": "/static/img/no-filter-results.png", - "static/img/logos/services/retroshare.svg": "/static/img/logos/services/retroshare.svg", - "static/img/logos/services/squid.svg": "/static/img/logos/services/squid.svg", - "static/img/no-nodes-room.svg": "/static/img/no-nodes-room.svg", - "static/img/logos/services/libvirt.svg": "/static/img/logos/services/libvirt.svg", - "static/img/logos/services/postfix.svg": "/static/img/logos/services/postfix.svg", - "static/img/logos/services/proxysql.svg": "/static/img/logos/services/proxysql.svg", - "static/img/logos/services/varnish.svg": "/static/img/logos/services/varnish.svg", - "static/img/logos/services/lxd.svg": "/static/img/logos/services/lxd.svg", - "static/img/logos/services/freeradius.svg": "/static/img/logos/services/freeradius.svg", - "static/img/logos/services/prowl.svg": "/static/img/logos/services/prowl.svg", - "static/img/logos/services/traefik.svg": "/static/img/logos/services/traefik.svg", - "static/img/logos/services/rethinkdb.svg": "/static/img/logos/services/rethinkdb.svg", - "static/img/logos/os/gentoo.svg": "/static/img/logos/os/gentoo.svg", - "static/img/logos/services/apc.svg": "/static/img/logos/services/apc.svg", - "static/img/logos/services/statsd.svg": "/static/img/logos/services/statsd.svg", - "static/img/logos/services/openldap.svg": "/static/img/logos/services/openldap.svg", - "static/img/logos/services/cups.svg": "/static/img/logos/services/cups.svg", - "static/img/logos/os/openwrt.svg": "/static/img/logos/os/openwrt.svg", - "static/img/logos/services/spigot.svg": "/static/img/logos/services/spigot.svg", - "static/img/logos/services/stiebel.svg": "/static/img/logos/services/stiebel.svg", - "agent.html": "/agent.html", - "local-agent.html": "/local-agent.html", - "static/img/logos/os/opensuse.svg": "/static/img/logos/os/opensuse.svg", - "static/img/logos/services/apache.svg": "/static/img/logos/services/apache.svg", - "static/img/logos/services/beanstalkd.svg": "/static/img/logos/services/beanstalkd.svg", - "static/img/logos/services/golang.svg": "/static/img/logos/services/golang.svg", - "static/img/new-dashboard.svg": "/static/img/new-dashboard.svg", - "static/img/logos/services/activemq.svg": "/static/img/logos/services/activemq.svg", - "static/img/logos/os/kubernetes.svg": "/static/img/logos/os/kubernetes.svg", - "static/img/logos/services/kubernetes.svg": "/static/img/logos/services/kubernetes.svg", - "static/img/logos/services/systemd.svg": "/static/img/logos/services/systemd.svg", - "static/img/logos/services/influxdb.svg": "/static/img/logos/services/influxdb.svg", - "static/img/rack.png": "/static/img/rack.png", - "static/email/img/isotype_600.png": "/static/email/img/isotype_600.png", - "static/img/logos/services/irc.svg": "/static/img/logos/services/irc.svg", - "static/img/logos/services/tomcat.svg": "/static/img/logos/services/tomcat.svg", - "registry-alert-redirect.html": "/registry-alert-redirect.html", - "static/img/logos/services/mariadb.svg": "/static/img/logos/services/mariadb.svg", - "static/img/logos/services/openzfs.svg": "/static/img/logos/services/openzfs.svg", - "static/img/logos/services/veritas.svg": "/static/img/logos/services/veritas.svg", - "static/img/logos/services/boinc.svg": "/static/img/logos/services/boinc.svg", - "static/img/logos/services/fail2ban.svg": "/static/img/logos/services/fail2ban.svg", - "static/img/logos/services/dns.svg": "/static/img/logos/services/dns.svg", - "static/img/logos/services/netfilter.svg": "/static/img/logos/services/netfilter.svg", - "static/img/logos/services/uwsgi.svg": "/static/img/logos/services/uwsgi.svg", - "static/img/logos/services/btrfs.svg": "/static/img/logos/services/btrfs.svg", - "static/img/logos/services/adaptec.svg": "/static/img/logos/services/adaptec.svg", - "static/img/logos/services/icecast.svg": "/static/img/logos/services/icecast.svg", - "static/img/logos/services/xen.svg": "/static/img/logos/services/xen.svg", - "static/img/logos/services/haproxy.svg": "/static/img/logos/services/haproxy.svg", - "static/img/logos/services/tor.svg": "/static/img/logos/services/tor.svg", - "static/img/logos/services/mysql.svg": "/static/img/logos/services/mysql.svg", - "static/img/logos/services/memcached.svg": "/static/img/logos/services/memcached.svg", - "static/img/logos/services/libreswan.svg": "/static/img/logos/services/libreswan.svg", - "static/img/logos/services/grafana.svg": "/static/img/logos/services/grafana.svg", - "static/img/logos/os/raspberry-pi.svg": "/static/img/logos/os/raspberry-pi.svg", - "static/img/logos/services/raspberry-pi.svg": "/static/img/logos/services/raspberry-pi.svg", - "static/img/logos/services/fluentd.svg": "/static/img/logos/services/fluentd.svg", - "static/img/logos/services/kairosdb.svg": "/static/img/logos/services/kairosdb.svg", - "static/img/logos/services/docker.svg": "/static/img/logos/services/docker.svg", - "static/img/logos/services/opentsdb.svg": "/static/img/logos/services/opentsdb.svg", - "static/img/logos/services/php-fpm.svg": "/static/img/logos/services/php-fpm.svg", - "static/img/logos/services/monit.svg": "/static/img/logos/services/monit.svg", - "static/img/logos/services/fping.svg": "/static/img/logos/services/fping.svg", - "static/img/logos/services/ntpd.svg": "/static/img/logos/services/ntpd.svg", - "static/email/img/header.png": "/static/email/img/header.png", - "static/img/logos/os/freenas.svg": "/static/img/logos/os/freenas.svg", - "static/img/logos/os/linux-small.svg": "/static/img/logos/os/linux-small.svg", - "static/img/logos/services/aws.svg": "/static/img/logos/services/aws.svg", - "static/img/logos/services/unbound.svg": "/static/img/logos/services/unbound.svg", - "static/img/logos/services/nsd.svg": "/static/img/logos/services/nsd.svg", - "static/img/logos/os/macos.svg": "/static/img/logos/os/macos.svg", - "static/img/logos/os/linux.svg": "/static/img/logos/os/linux.svg", - "static/img/logos/services/linux.svg": "/static/img/logos/services/linux.svg", - "static/img/logos/services/postgresql.svg": "/static/img/logos/services/postgresql.svg", - "static/img/logos/services/litespeed.svg": "/static/img/logos/services/litespeed.svg", - "registry-hello.html": "/registry-hello.html", - "static/img/logos/services/slack.svg": "/static/img/logos/services/slack.svg", - "static/img/logos/services/gnu-freeipmi.svg": "/static/img/logos/services/gnu-freeipmi.svg", - "static/img/logos/services/isc.svg": "/static/img/logos/services/isc.svg", - "static/img/logos/services/solr.svg": "/static/img/logos/services/solr.svg", - "static/img/logos/services/nut.svg": "/static/img/logos/services/nut.svg", - "static/img/logos/os/debian.svg": "/static/img/logos/os/debian.svg", - "static/img/logos/services/smstools3.svg": "/static/img/logos/services/smstools3.svg", - "static/email/img/reachability_siren.png": "/static/email/img/reachability_siren.png", - "static/splash.css": "/static/splash.css", - "registry-access.html": "/registry-access.html", - "static/img/logos/services/server-connection.svg": "/static/img/logos/services/server-connection.svg", - "LICENSE.md": "/LICENSE.md", - "static/email/img/crit_siren.png": "/static/email/img/crit_siren.png", - "static/img/logos/services/pushover.svg": "/static/img/logos/services/pushover.svg", - "static/img/logos/services/openvpn.svg": "/static/img/logos/services/openvpn.svg", - "static/img/logos/os/suse.svg": "/static/img/logos/os/suse.svg", - "static/img/logos/services/couchdb.svg": "/static/img/logos/services/couchdb.svg", - "static/img/logos/os/manjaro.svg": "/static/img/logos/os/manjaro.svg", - "static/img/logos/services/rabbitmq.svg": "/static/img/logos/services/rabbitmq.svg", - "static/img/logos/services/sma.svg": "/static/img/logos/services/sma.svg", - "static/email/img/warn_siren.png": "/static/email/img/warn_siren.png", - "static/img/logos/os/arch.svg": "/static/img/logos/os/arch.svg", - "static/img/logos/os/freebsd.svg": "/static/img/logos/os/freebsd.svg", - "static/img/logos/services/flock.svg": "/static/img/logos/services/flock.svg", - "static/img/logos/services/prometheus.svg": "/static/img/logos/services/prometheus.svg", - "static/img/logos/services/redis.svg": "/static/img/logos/services/redis.svg", - "static/img/logos/services/ipfs.svg": "/static/img/logos/services/ipfs.svg", - "static/img/logos/os/redhat.svg": "/static/img/logos/os/redhat.svg", - "static/img/logos/services/concul.svg": "/static/img/logos/services/concul.svg", - "static/img/mail/isotype.png": "/static/img/mail/isotype.png", - "static/img/logos/services/processor.svg": "/static/img/logos/services/processor.svg", - "static/img/logos/services/lm-sensors.svg": "/static/img/logos/services/lm-sensors.svg", - "static/img/logos/services/data-encryption.svg": "/static/img/logos/services/data-encryption.svg", - "static/site/pages/holding-page-503/multiple-logos-group.svg": "/static/site/pages/holding-page-503/multiple-logos-group.svg", - "static/email/img/crit_badge.png": "/static/email/img/crit_badge.png", - "static/img/logos/services/load-balancer.svg": "/static/img/logos/services/load-balancer.svg", - "index.html": "/index.html", - "static/img/logos/services/elasticsearch.svg": "/static/img/logos/services/elasticsearch.svg", - "static/img/logos/os/centos.svg": "/static/img/logos/os/centos.svg", - "static/email/img/clea_siren.png": "/static/email/img/clea_siren.png", - "static/img/logos/services/ceph.svg": "/static/img/logos/services/ceph.svg", - "static/img/logos/services/log-file.svg": "/static/img/logos/services/log-file.svg", - "static/img/logos/services/container.svg": "/static/img/logos/services/container.svg", - "static/img/logos/services/dovecot.svg": "/static/img/logos/services/dovecot.svg", - "static/img/logos/services/exim.svg": "/static/img/logos/services/exim.svg", - "static/img/logos/services/chrony.svg": "/static/img/logos/services/chrony.svg", - "static/email/img/warn_badge.png": "/static/email/img/warn_badge.png", - "static/img/logos/services/nfs.svg": "/static/img/logos/services/nfs.svg", - "static/img/logos/os/fedora.svg": "/static/img/logos/os/fedora.svg", - "static/img/logos/os/ubuntu.svg": "/static/img/logos/os/ubuntu.svg", - "static/img/logos/services/notification-bell.svg": "/static/img/logos/services/notification-bell.svg", - "static/email/img/flood_siren.png": "/static/email/img/flood_siren.png", - "static/img/logos/services/access-point.svg": "/static/img/logos/services/access-point.svg", - "static/email/img/label_recovered.png": "/static/email/img/label_recovered.png", - "static/img/logos/services/samba.svg": "/static/img/logos/services/samba.svg", - "static/img/logos/services/monitoring.svg": "/static/img/logos/services/monitoring.svg", - "static/img/logos/services/key-file.svg": "/static/img/logos/services/key-file.svg", - "static/img/logos/services/kafka.svg": "/static/img/logos/services/kafka.svg", - "static/email/img/label_warning.png": "/static/email/img/label_warning.png", - "static/img/logos/services/rocketchat.svg": "/static/img/logos/services/rocketchat.svg", - "static/img/mail/logotype.svg": "/static/img/mail/logotype.svg", - "static/img/logos/services/nginx-plus.svg": "/static/img/logos/services/nginx-plus.svg", - "static/email/img/label_critical.png": "/static/email/img/label_critical.png", - "static/img/logos/services/fronius.svg": "/static/img/logos/services/fronius.svg", - "static/img/logos/services/puppet.svg": "/static/img/logos/services/puppet.svg", - "static/img/logos/os/pfsense.svg": "/static/img/logos/os/pfsense.svg", - "static/img/logos/services/consul.svg": "/static/img/logos/services/consul.svg", - "static/img/logos/os/coreos.svg": "/static/img/logos/os/coreos.svg", - "static/site/pages/holding-page-503/index.html": "/static/site/pages/holding-page-503/index.html", - "static/site/pages/holding-page-503/holding-page-503.css": "/static/site/pages/holding-page-503/holding-page-503.css", - "static/img/logos/services/network.svg": "/static/img/logos/services/network.svg", - "static/img/logos/services/qos.svg": "/static/img/logos/services/qos.svg", - "static/img/logos/services/email.svg": "/static/img/logos/services/email.svg", - "static/img/logos/services/ddos.svg": "/static/img/logos/services/ddos.svg", - "static/img/logos/os/openstack.svg": "/static/img/logos/os/openstack.svg", - "static/img/mail/logotype.png": "/static/img/mail/logotype.png", - "static/email/img/full_logo.png": "/static/email/img/full_logo.png", - "static/email/img/community_icon.png": "/static/email/img/community_icon.png", - "static/email/img/configure_icon.png": "/static/email/img/configure_icon.png", - "static/img/logos/services/temperature.svg": "/static/img/logos/services/temperature.svg", - "static/img/logos/services/pushbullet.svg": "/static/img/logos/services/pushbullet.svg", - "static/img/logos/services/nginx.svg": "/static/img/logos/services/nginx.svg", - "static/img/logos/services/cloud.svg": "/static/img/logos/services/cloud.svg", - "static/img/logos/services/pagerduty.svg": "/static/img/logos/services/pagerduty.svg", - "static/img/logos/services/alerta.svg": "/static/img/logos/services/alerta.svg", - "static/img/logos/services/mongodb.svg": "/static/img/logos/services/mongodb.svg", - "static/email/img/clea_badge.png": "/static/email/img/clea_badge.png", - "static/img/logos/services/discord.svg": "/static/img/logos/services/discord.svg", - "static/img/logos/os/alpine.svg": "/static/img/logos/os/alpine.svg", - "static/img/logos/os/placeholder.svg": "/static/img/logos/os/placeholder.svg", - "static/img/logos/services/aws-sns.svg": "/static/img/logos/services/aws-sns.svg", - "static/img/logos/services/telegram.svg": "/static/img/logos/services/telegram.svg", - "static/img/logos/os/oracle.svg": "/static/img/logos/os/oracle.svg", - "static/img/logos/services/oracle.svg": "/static/img/logos/services/oracle.svg", - "static/img/logos/services/network-protocol.svg": "/static/img/logos/services/network-protocol.svg", - "static/img/logos/services/nvidia.svg": "/static/img/logos/services/nvidia.svg", - "static/img/logos/services/springboot.svg": "/static/img/logos/services/springboot.svg", - "static/img/logos/services/kavenegar.svg": "/static/img/logos/services/kavenegar.svg", - "static/img/logos/services/powerdns.svg": "/static/img/logos/services/powerdns.svg", - "static/img/logos/os/docker.svg": "/static/img/logos/os/docker.svg", - "static/img/logos/services/messagebird.svg": "/static/img/logos/services/messagebird.svg", - "static/img/logos/services/placeholder.svg": "/static/img/logos/services/placeholder.svg", - "static/site/pages/holding-page-503/reset.svg": "/static/site/pages/holding-page-503/reset.svg", - "static/img/logos/services/twilio.svg": "/static/img/logos/services/twilio.svg", - "static/img/logos/services/lighthttpd.svg": "/static/img/logos/services/lighthttpd.svg", - "static/img/logos/os/rocky.svg": "/static/img/logos/os/rocky.svg", - "static/img/list-style-image.svg": "/static/img/list-style-image.svg", - ".well-known/assetlinks.json": "/.well-known/assetlinks.json", - "static/.well-known/assetlinks.json": "/static/.well-known/assetlinks.json", - "static/img/logos/services/opensips.svg": "/static/img/logos/services/opensips.svg", - "static/img/logos/services/logstash.svg": "/static/img/logos/services/logstash.svg", - "static/img/mail/isotype.svg": "/static/img/mail/isotype.svg", - "static/site/pages/holding-page-503/netdata-logo-white.svg": "/static/site/pages/holding-page-503/netdata-logo-white.svg", - "static/img/logos/services/hub.svg": "/static/img/logos/services/hub.svg", - "apple-app-site-association": "/apple-app-site-association", - "static/apple-app-site-association": "/static/apple-app-site-association", - "app.css.map": "/app.cb2e9f9a81cf9533384e.css.map", - "app.js.map": "/app.08c9fe3ead1d43ff769b.js.map", - "runtime.js.map": "/runtime.ceccffb089cc539b1c1f.js.map", - "3624.chunk.js.map": "/3624.bfeb1fdc3057ba82ddac.chunk.js.map", - "7840.chunk.js.map": "/7840.2f2023f2eb1dcc943d94.chunk.js.map", - "5596.chunk.js.map": "/5596.2036706750ff4028cff2.chunk.js.map", - "3350.chunk.js.map": "/3350.ae7151980981854dc3d1.chunk.js.map", - "9818.chunk.js.map": "/9818.3ce64e0b472412bfbc97.chunk.js.map", - "7529.chunk.js.map": "/7529.658d363e12e73df83b60.chunk.js.map", - "1418.chunk.js.map": "/1418.16d53ba5cce2c6a8143a.chunk.js.map", - "1876.chunk.js.map": "/1876.e610906417b961290730.chunk.js.map", - "3621.chunk.js.map": "/3621.01ee70ee9c311ac163d9.chunk.js.map", - "7471.chunk.js.map": "/7471.f96c4d04a73fb7551c03.chunk.js.map", - "3736.chunk.js.map": "/3736.e572adfdf7951f74a741.chunk.js.map", - "683.css.map": "/683.cc9fa5f3bdc0bf3ab2fc.css.map", - "683.chunk.js.map": "/683.02c173493ef257c210fa.chunk.js.map", - "8784.chunk.js.map": "/8784.a04e9c07186e1f057f56.chunk.js.map", - "7146.chunk.js.map": "/7146.79304e386ac9238b7cf1.chunk.js.map", - "5598.chunk.js.map": "/5598.07ff43a6b96bd41e8637.chunk.js.map", - "7487.css.map": "/7487.89070793921be1288bb5.css.map", - "7487.chunk.js.map": "/7487.db63c95c27d973a07d9b.chunk.js.map", - "934.chunk.js.map": "/934.24d6fdc5f60aa6493962.chunk.js.map", - "7170.chunk.js.map": "/7170.5d6047bb6ce9d77d53db.chunk.js.map", - "8505.chunk.js.map": "/8505.c330f2104fefd71717da.chunk.js.map", - "5426.chunk.js.map": "/5426.254557ad3e1f2d14ad29.chunk.js.map", - "4680.chunk.js.map": "/4680.7d8122d91e9d4582836a.chunk.js.map", - "5700.chunk.js.map": "/5700.b7c9908dc7f30a5a57e7.chunk.js.map", - "5246.chunk.js.map": "/5246.07c5a1649f0805c140fe.chunk.js.map", - "3843.css.map": "/3843.89070793921be1288bb5.css.map", - "3843.chunk.js.map": "/3843.ffbb6f614ba4f7b77570.chunk.js.map", - "4034.chunk.js.map": "/4034.35199d2809d318eed690.chunk.js.map", - "1782.chunk.js.map": "/1782.d82eb301aa81b380dd0c.chunk.js.map", - "252.chunk.js.map": "/252.40edc9b0f6da1422f40b.chunk.js.map", - "6469.css.map": "/6469.89070793921be1288bb5.css.map", - "6469.chunk.js.map": "/6469.47926fa38028dc7d0d41.chunk.js.map", - "979.chunk.js.map": "/979.3e5fddf93c977e6c71c3.chunk.js.map", - "6331.css.map": "/6331.89070793921be1288bb5.css.map", - "6331.chunk.js.map": "/6331.c91b5d104cdff1be3b80.chunk.js.map", - "9843.chunk.js.map": "/9843.93f8c71c64ef97b9905e.chunk.js.map", - "1839.chunk.js.map": "/1839.a4196d2a87ac0fdd9f34.chunk.js.map", - "6661.chunk.js.map": "/6661.72f782bd78fea8c2d836.chunk.js.map", - "86.chunk.js.map": "/86.2c88d4d37b88e2620051.chunk.js.map", - "2007.chunk.js.map": "/2007.b33ce2b4b736228fd681.chunk.js.map", - "4958.chunk.js.map": "/4958.5969fedc1ff7dc82775e.chunk.js.map", - "9912.chunk.js.map": "/9912.702300c2dd9616289606.chunk.js.map", - "7959.chunk.js.map": "/7959.4f20f4b203e2bad8af39.chunk.js.map", - "3968.chunk.js.map": "/3968.483ca2ad3b300293e655.chunk.js.map", - "3104.chunk.js.map": "/3104.3b70865e21a81a616af3.chunk.js.map", - "8059.chunk.js.map": "/8059.4fdc76bb2cac1f74b41b.chunk.js.map", - "195.chunk.js.map": "/195.4cdbea6af54d14a95949.chunk.js.map", - "785.chunk.js.map": "/785.d016913841bcc0209d5b.chunk.js.map", - "7340.chunk.js.map": "/7340.25dce1c5cc66b613700f.chunk.js.map", - "7332.chunk.js.map": "/7332.3acf93dcfa52c7f1bc18.chunk.js.map", - "6944.chunk.js.map": "/6944.ab3e70c9ac0f05013b5f.chunk.js.map", - "6760.chunk.js.map": "/6760.370b9780120c145da28f.chunk.js.map", - "4140.css.map": "/4140.89070793921be1288bb5.css.map", - "4140.chunk.js.map": "/4140.46221d08bcda08826c78.chunk.js.map", - "185.chunk.js.map": "/185.42bab351ba68de7ca4aa.chunk.js.map", - "8842.chunk.js.map": "/8842.406028f523a00acb97bd.chunk.js.map", - "5304.chunk.js.map": "/5304.cc797fdd343c7e873b2f.chunk.js.map", - "3750.chunk.js.map": "/3750.4ad02f036f2a7c520b1c.chunk.js.map", - "4414.chunk.js.map": "/4414.590ba07d470ba2ce7dd0.chunk.js.map", - "9400.chunk.js.map": "/9400.6250bbf86c4fd3173de2.chunk.js.map", - "npm.react.dom.js.map": "/npm.react.dom.2994f1b4604bd8ce80f6.js.map", - "netdata.ui.js.map": "/netdata.ui.647a4c3303ee8ec0da64.js.map", - "netdata.charts.js.map": "/netdata.charts.fdfd27674ac5533bbcc2.js.map", - "8637.js.map": "/8637.0958494526e838a60d2b.js.map", - "7144.chunk.js.map": "/7144.382c341e09540fdebaa6.chunk.js.map", - "7857.chunk.js.map": "/7857.813ae058cca579e05462.chunk.js.map", - "4631.chunk.js.map": "/4631.158982e127e11bdc6a45.chunk.js.map", - "1220.chunk.js.map": "/1220.01d6bbaab869c74f4437.chunk.js.map", - "749.chunk.js.map": "/749.e44087ac3a2e3a994318.chunk.js.map", - "7519.chunk.js.map": "/7519.7982a2e0fcdf82ba78dd.chunk.js.map", - "6121.chunk.js.map": "/6121.f7286809e53e1c6d655a.chunk.js.map", - "6384.chunk.js.map": "/6384.0fad56b0bc902f186c98.chunk.js.map", - "8323.css.map": "/8323.e22de33686bb2f34063c.css.map", - "8323.chunk.js.map": "/8323.437406936b642e8f6cb3.chunk.js.map", - "5709.chunk.js.map": "/5709.c494eb62187917e2f2f6.chunk.js.map", - "3455.chunk.js.map": "/3455.f9ca876de57244386773.chunk.js.map", - "1396.chunk.js.map": "/1396.56f70d7c659ac0b694cd.chunk.js.map", - "5794.chunk.js.map": "/5794.252ff787d58d64eb4988.chunk.js.map", - "8938.chunk.js.map": "/8938.5116982f737a2ef85330.chunk.js.map", - "7208.chunk.js.map": "/7208.1d75cf5d007de32e403b.chunk.js.map", - "8239.chunk.js.map": "/8239.c85fc9f3599f198a9efb.chunk.js.map", - "9473.chunk.js.map": "/9473.4fd4742ffb6b5348bea8.chunk.js.map", - "6323.chunk.js.map": "/6323.26d4d949c9b6f8674c2e.chunk.js.map", - "9292.chunk.js.map": "/9292.cc5055091db9a0826933.chunk.js.map", - "7304.chunk.js.map": "/7304.ed4690ec296b59fbe7fd.chunk.js.map", - "8910.chunk.js.map": "/8910.019974f8675d8834dd07.chunk.js.map", - "6008.chunk.js.map": "/6008.3d0636fe17f4f6274485.chunk.js.map", - "963.chunk.js.map": "/963.35da4a3c4e49aac29dae.chunk.js.map", - "7436.chunk.js.map": "/7436.1ebd371d70e6a87c5499.chunk.js.map" -} \ No newline at end of file diff --git a/src/web/gui/v2/app.08c9fe3ead1d43ff769b.js b/src/web/gui/v2/app.08c9fe3ead1d43ff769b.js deleted file mode 100644 index 2540a1142..000000000 --- a/src/web/gui/v2/app.08c9fe3ead1d43ff769b.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="c00e49dd-0bde-488b-aa4e-cb2cbd03e2c2",e._sentryDebugIdIdentifier="sentry-dbid-c00e49dd-0bde-488b-aa4e-cb2cbd03e2c2")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[3524],{80158:(e,t,a)=>{"use strict";a.d(t,{P3:()=>r,QU:()=>s,Vn:()=>i,Yv:()=>c,Zr:()=>o});a(25440);var n=a(86263);const r=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"",t=arguments.length>1?arguments[1]:void 0;return(0,n.default)(e,t)},o=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";const t=arguments.length>1&&void 0!==arguments[1]&&arguments[1]?e.toLowerCase():e;return t.charAt(0).toUpperCase()+t.slice(1)},i=e=>e.replace(/([a-z])([A-Z])/g,"$1 $2"),s=e=>e.replace(/([^:]\/)\/+/g,"$1"),c=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return e=e.trim(),e=e.replace(/\s+/g,"-"),e=e.replace(/[^a-zA-Z0-9-]/g,""),e=e.toLowerCase(),e}},63314:(e,t,a)=>{"use strict";a.d(t,{K_:()=>s,DL:()=>m,_0:()=>d,Ay:()=>g});a(3064),a(98992),a(72577),a(62953);var n=a(96540),r=a(50876),o=a(58168);var i=a(67276);const s=(0,n.createContext)({}),c={allowLogInvisible:!0},l=e=>{let{children:t,logImpression:a=!0,options:o={},delay:l=750,...d}=e;const u={...c,...o},m=(0,n.useContext)(s),[g,p]=(0,n.useState)(),h=(0,n.useRef)(),v=(0,n.useRef)(),{sendLog:f,isReady:A}=(0,r.A)(),y=(0,n.useCallback)((e=>{const t=e[0];t&&g!==t.isIntersecting&&p(t.isIntersecting)}),[]),b=(0,n.useCallback)((()=>{var e;const t=new IntersectionObserver(y,{root:null,rootMargin:"0px",threshold:.5}),a=null===h||void 0===h||null===(e=h.current)||void 0===e?void 0:e.childNodes,n=Array.from(a||[]).find((e=>null!==e.offsetParent));n&&t.observe(n)}),[null===h||void 0===h?void 0:h.current]);return(0,n.useEffect)((()=>{u.allowLogInvisible||b()}),[u.allowLogInvisible]),(0,n.useEffect)((()=>{let e=!0,t=null;const n={action:i.o1.elementViewed,...m,...d};return A&&(null===v||void 0===v?void 0:v.current)!==JSON.stringify(n)&&a&&(u.allowLogInvisible||g)&&(t=setTimeout((()=>{e&&(f(n),v.current=JSON.stringify(n))}),l)),()=>{e=!1,t&&(clearTimeout(t),t=null)}}),[A,a,g,d]),n.createElement(s.Consumer,null,(e=>n.createElement(s.Provider,{value:{...e,...d}},n.createElement("div",{ref:h,style:{display:"contents"}},t))))},d=(u=l,e=>n.createElement(u,(0,o.A)({},e,{isSuccess:!0})));var u;const m=(e=>t=>n.createElement(e,(0,o.A)({},t,{isFailure:!0})))(l),g=l},67276:(e,t,a)=>{"use strict";a.d(t,{KI:()=>n,lO:()=>r,mu:()=>s,o1:()=>i,yq:()=>o});const n={success:"success",fail:"failure"},r={visitor:"visitor"},o={sequence:"telemetrySequence",session:"telemetrySessionId"},i={elementViewed:"element-viewed",buttonClicked:"button-clicked"},s={start:"start",end:"end"}},50876:(e,t,a)=>{"use strict";a.d(t,{A:()=>y});a(62953);var n=a(96540),r=a(63950),o=a.n(r),i=a(37618),s=a(26655);var c,l=a(3914),d=a(63314),u=a(35600),m=a(75542);const g="undefined"!==typeof process&&!(null===(c=process)||void 0===c||null===(c=c.env)||void 0===c||!c.JEST_WORKER_ID);var p=a(69418),h=a(67276),v=a(67990),f=a(88982),A=a(47762);const y=()=>{if(g)return{sendLog:o(),isReady:!0};const e=(0,m.A)(),{isAnonymous:t,...a}=e,r=(0,l.vt)(),c=(0,n.useContext)(d.K_),[,y,b]=(0,p.A)(),S=a.id&&(!y||t),k=(0,f.n)("id"),I=(0,v.gr)(k,"ids"),w=(0,A.BU)(I),E=(0,A.Ig)(I),x=(0,A.GE)(I),C=(0,A.no)(I),T=(0,n.useCallback)(((e,t)=>{var n,o;const l={...t?{...c}:{},...e},{feature:d,action:m,isStart:g,isSuccess:p,isFailure:v,eventReason:f,...A}=l;return(e=>s.A.post("/api/v1/telemetry/push",e))({"@timestamp":(new Date).toISOString(),labels:{...A,...window.localNetdataRegistry?{...window.localNetdataRegistry.mg?{machine:window.localNetdataRegistry.mg}:{},...window.localNetdataRegistry.nd?{nodeId:window.localNetdataRegistry.nd}:{}}:{}},event:{action:m,sequence:(0,u.Il)(),provider:i.Ay?"agent":"app",...p||v?{outcome:p?h.KI.success:h.KI.fail}:{},...g?{type:[h.mu.start]}:p||v?{type:[h.mu.end]}:{},...f?{reason:f}:{}},user:a,url:{full:null===(n=window)||void 0===n||null===(n=n.location)||void 0===n?void 0:n.href},Netdata:{...r?{space:{id:r}}:{},statistics:{nodes:{total:i.Ay?1:b,...i.Ay?{}:{live:(w||[]).length,stale:(E||[]).length,offline:(x||[]).length,created:(C||[]).length}}},telemetry:{feature:d,session:{id:(0,u.u0)()},visitor:{id:null===(o=window.envSettings)||void 0===o?void 0:o.visitor}}}}).catch((e=>{"production"!==window.envSettings.nodeEnv&&(console.groupCollapsed("[Netdata telemetry error]"),console.warn(e),console.groupEnd())}))}),[a,t,b,y,r,S]),N=(0,n.useCallback)(((e,t)=>T({action:h.o1.buttonClicked,...e},t)),[]);return{sendLog:T,sendButtonClickedLog:N,isReady:S,user:e}}},75542:(e,t,a)=>{"use strict";a.d(t,{A:()=>i});a(25440);var n=a(96540),r=a(22292),o=a(37618);const i=()=>{const{id:e,email:t,isAnonymous:a}=(0,r.uW)();return(0,n.useMemo)((()=>{var n,r,i;return{id:a?(null===(n=window.localNetdataRegistry)||void 0===n?void 0:n.pg)||window.envSettings.visitor:e||"",email:a?void 0:t,domain:a?(null===(r=window.localNetdataRegistry)||void 0===r?void 0:r.registry)||"localhost":o.Ay?((null===(i=window.envSettings)||void 0===i?void 0:i.apiUrl)||"").replace(/^https:\/\//,""):location.host,isAnonymous:a}}),[e,t,a])}},35600:(e,t,a)=>{"use strict";a.d(t,{Il:()=>o,u0:()=>i,y:()=>s});a(9920),a(98992),a(3949);var n=a(67276),r=a(33829);const o=()=>{var e,t;const a=n.yq.sequence,r=parseInt((null===(e=window.sessionStorage)||void 0===e?void 0:e.getItem(a))||0,10);return null===(t=window.sessionStorage)||void 0===t||t.setItem(a,r+1),r},i=()=>(sessionStorage.getItem(n.yq.session)||sessionStorage.setItem(n.yq.session,(0,r.A)()),sessionStorage.getItem(n.yq.session)||""),s=()=>{Object.values(n.yq).forEach((e=>sessionStorage.removeItem(e)))}},21290:(e,t,a)=>{"use strict";a.d(t,{$j:()=>g,ii:()=>u});var n=a(96540),r=a(87991),o=a(38819),i=a(82838);const s=!!(Intl&&Intl.DateTimeFormat&&navigator.language),c=e=>"number"===typeof e?new Date(e):e,l=e=>(e=c(e))?(0,r.GP)(e,"MM/dd/yyyy"):"",d=e=>(e=c(e))?(0,r.GP)(e,"HH:mm"):"",u=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0;return new Date(new Date(e).getTime()+60*parseInt(t)*60*1e3)},m=(e,t)=>{let{locale:a,...n}=t;return new Intl.DateTimeFormat(null!==a&&void 0!==a?a:navigator.language,(e=>{let{long:t,isTime:a,secs:n,timezone:r,...o}=e;return{hourCycle:"h23",...a?{}:t?{weekday:"short",year:"numeric",month:"short",day:"2-digit"}:{dateStyle:"short"},...a&&{timeStyle:n?"medium":"short"},timeZone:r,...o}})(n)).format(e)},g=()=>{const{utc:e,offset:t}=(()=>{let{utc:e}=(0,o.PP)();return(0,i.M)("default",e)})();return{localeDateString:(0,n.useMemo)((()=>s?(t,a)=>m(t,{long:!0,timezone:e,...a}):l),[e]),localeTimeString:(0,n.useMemo)((()=>s?(t,a)=>m(t,{secs:!0,isTime:!0,timezone:e,...a}):d),[e]),utcOffset:t}}},65570:(e,t,a)=>{"use strict";a.d(t,{Ay:()=>o,pb:()=>n});a(74648),a(17333),a(98992),a(23215),a(54520);const n=(e,t)=>{let{omit:a=[],keep:n=[]}=t;return a.length||n.length?e.filter((e=>n.length?n.includes(e):!a.includes(e))):e},r=function(e,t){let a=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};if(e===t)return!0;if("object"!==typeof e||null===e||"object"!==typeof t||null===t)return!1;const o=n(Object.keys(e),a),i=n(Object.keys(t),a);if(o.length!==i.length)return!1;const s=Object.prototype.hasOwnProperty.bind(t);return o.every((a=>!!s(a)&&r(e[a],t[a])))},o=r},64454:(e,t,a)=>{"use strict";a.d(t,{A:()=>o});a(14905),a(98992),a(8872),a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215),a(62953);var n=a(65570);const r=function(e,t){let a=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};return e===t||"object"!==typeof e||null===e||"object"!==typeof t||null===t?t:Array.isArray(t)?((e,t,a)=>{const o=(0,n.pb)(e,a);return o.reduce(((e,t,n)=>(e.push(r(t,o[n],a)),e)),[])})(e,0,a):((e,t,a)=>{const o=(0,n.pb)([...new Set([...Object.keys(e),...Object.keys(t)])],a),i=Object.prototype.hasOwnProperty.bind(t);return o.reduce(((n,o)=>(i(o)?n[o]=r(e[o],t[o],a):n[o]=e[o],n)),{})})(e,t,a)},o=r},38819:(e,t,a)=>{"use strict";a.d(t,{PP:()=>d,Pg:()=>h,W6:()=>p,Z8:()=>g,yq:()=>m});a(41393),a(14905),a(98992),a(81454),a(8872),a(62953);var n=a(49870),r=a.n(n),o=a(55364),i=a.n(o),s=a(90179),c=a.n(s);const l=/[&;]/,d=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:decodeURIComponent(window.location.hash.substr(1));if(0===e.length)return{};return e.split(l).reduce(((e,t)=>{const a=t.indexOf("=");if(-1!==a){e[t.substring(0,a)]=t.substring(a+1)}return e}),{})},u=e=>{const t=Object.entries(e);return 0===t.length?"":t.map((e=>{let[t,a]=e;return"".concat(t,"=").concat(encodeURIComponent(a))})).join("&")},m=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:decodeURIComponent(window.location.hash.substr(1));const a=c()(d(t),e);return u(a)},g=(r()([d,u]),e=>{const t=d();i()(t,e);const a="#".concat(u(t)),n=history.state;location.hash=a,history.replaceState(n,"",a)}),p=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:decodeURIComponent(window.location.hash.substr(1));return d(t)[e]},h=e=>{history.replaceState(history.state,"","#".concat(m(e)))}},63457:(e,t,a)=>{"use strict";a.d(t,{A:()=>s});var n=a(77783),r=a(63950),o=a.n(r);const i=86400,s=e=>{if(!e)throw new Error("Please pass a name for the idb store");const t=(0,n.y$)(e,"".concat(e,"-cache")),a=(e,a)=>(0,n.hZ)(e,(e=>({value:e,timestamp:Date.now()}))(a),t).catch(o());return{store:t,set:a,get:function(e){let r,{fetch:s,maxAge:c=i}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const l=(0,n.Jt)(e,t).then((t=>{const n=1e3*c;return t&&t.timestamp+n>Date.now()?t.value:s?(r=s(),r.then((t=>(a(e,t),t)))):null})).catch(o());return l.abort=()=>r&&r.abort(),l},clear:()=>(0,n.IU)(t).catch(o()),del:e=>(0,n.yH)(e,t).catch(o())}}},37618:(e,t,a)=>{"use strict";a.d(t,{mz:()=>S,LA:()=>b,hq:()=>v,gB:()=>h,yu:()=>p,Z2:()=>g,z0:()=>m,Ay:()=>k,kG:()=>f,I:()=>l,tB:()=>y,ES:()=>A,y7:()=>d,sA:()=>u});a(25440);var n=a(38280),r=a(80158);var o,i,s;const c=(null===(o=window.envSettings)||void 0===o?void 0:o.isAgent)||!1,l=()=>{var e,t,a;return c&&!(null!==(e=window.localNetdataAgent)&&void 0!==e&&e.cloudEnabled||"disabled"!==(null===(t=window.localNetdataRegistry)||void 0===t?void 0:t.cloudStatus)&&"unavailable"!==(null===(a=window.localNetdataRegistry)||void 0===a?void 0:a.cloudStatus)||localStorage.getItem("netdataJWT"))},d=/\/(spaces|nodes|overview|alerts|dashboards|anomalies|events|cloud)\/?.*/,u=c?window.location.pathname.replace(d,""):"/",m=(0,r.Yv)((null===(i=window.localNetdataRegistry)||void 0===i?void 0:i.hostname)||"agent"),g=(null===(s=window.localNetdataRegistry)||void 0===s?void 0:s.mg)||"agent",p={createdAt:"",description:"",iconURL:"",id:g,error:null,loaded:!0,name:m,slug:m,permissions:{"user:ChangeName":"user:ChangeName","user:ChangeTheme":"user:ChangeTheme","user:ChangeEmailSetting":"user:ChangeEmailSetting","user:ChangeSpaceRoomNotifications":!1,"user:AddAPIToken":"user:AddAPIToken","user:DeleteAPIToken":"user:DeleteAPIToken","user:ReplaceAPIToken":"user:ReplaceAPIToken","user:UpdateVisitedNodes":"user:UpdateVisitedNodes","space:Read":"space:Read","space:UpdateMeta":"space:UpdateMeta","room:ReadUsers":"room:ReadUsers","room:ReadAll":"room:ReadAll","room:Read":"room:Read","dashboard:ReadAll":"dashboard:ReadAll","dashboard:Update":"dashboard:Update","dashboard:UpdateSelf":"dashboard:UpdateSelf","dashboard:Create":"dashboard:Create","dashboard:Delete":"dashboard:Delete","dashboard:DeleteSelf":"dashboard:DeleteSelf","chart:AddToDashboard":"chart:AddToDashboard","alert:ReadAll":"alert:ReadAll","function:ReadAll":"function:ReadAll","function:ExecAll":"function:ExecAll","function:Exec":"function:Exec","function:ExecPrivileged":"function:ExecPrivileged","feed:ReadBasic":"feed:ReadBasic","feed:ReadManagement":"feed:ReadManagement","billing:ReadAll":"billing:ReadAll","billing:ReadBasic":"billing:ReadBasic","visitedNodes:Update":"visitedNodes:Update","visitedNodes:ReadAll":"visitedNodes:ReadAll","agent:ReadDynCfg":"agent:ReadDynCfg","agent:EditDynCfg":"agent:EditDynCfg","agent:EditNotificationsConfig":"agent:EditNotificationsConfig","agent:ViewNotificationsConfig":"agent:ViewNotificationsConfig","oidc:Manage":"oidc:Manage"}},h="local",v={...n.A,id:h,slug:h,name:h,loaded:!0,fullyLoaded:!0,spaceId:g,untouchable:!1},f="overview",A=e=>c&&e===g,y=e=>c&&e===h,b="local-custom-dashboard",S={id:b,isOwner:!0,name:b,slug:b,snapshot:{cards:{},containerIds:[],containers:{},layout:{}},version:0},k=c},39225:(e,t,a)=>{"use strict";a.d(t,{A:()=>r});var n=a(96540);const r=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";const a="refreshed-after-lazy-import",r=t?"".concat(t,"-").concat(a):a;return(0,n.lazy)((async()=>{const t=JSON.parse(window.sessionStorage.getItem(r)||"false");try{const t=await e();return window.sessionStorage.setItem(r,"false"),t}catch(a){if(!t)return window.sessionStorage.setItem(r,"true"),window.location.reload();throw a}}))}},49286:(e,t,a)=>{"use strict";a.d(t,{Ds:()=>g,Jz:()=>u,bn:()=>m});a(25440),a(41393),a(14905),a(98992),a(81454),a(8872);const n=e=>e.replace(/([A-Z])/g,"_$1").toLowerCase(),r=e=>e.replace(/([_][a-z])/g,(e=>e[1].toUpperCase())),o=e=>e.replace(/([\s\-_][a-z])/g,(e=>e[1].toUpperCase())),i=[],s=(e,t,a)=>({...e,[n(t)]:a}),c=(e,t,a)=>({...e,[r(t)]:a}),l=(e,t,a)=>({...e,[o(t)]:a}),d=(e,t)=>{let{func:a,action:n,omit:r=i,depth:o=0}=t;return o-=1,o?Array.isArray(e)?e.map((e=>a(e,{omit:r}))):"object"===typeof e&&e?Object.keys(e).reduce(((t,o)=>{if(r.includes(o))return{...t,[o]:e[o]};const i=a(e[o],{omit:r});return n(t,o,i)}),{}):e:e},u=function(e){let{omit:t,depth:a}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return d(e,{func:u,action:s,omit:t,depth:a})},m=function(e){let{omit:t,depth:a}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return d(e,{func:m,action:c,omit:t,depth:a})},g=function(e){let{omit:t,depth:a}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return d(e,{func:g,action:l,omit:t,depth:a})}},26655:(e,t,a)=>{"use strict";a.d(t,{A:()=>u});a(62953);var n=a(46266),r=a(68831),o=a(37618),i=(a(25440),a(80158));const s=(e,t,a)=>{const n=a.includes("join-callback")?a:encodeURIComponent(a);return"".concat(t).concat(t?"&":"?","cloudRoute=").concat(e).concat(n)},c=e=>(t,a)=>{try{if(/text\/plain/.test(a.getContentType()))throw{errorMessage:t};const n=JSON.parse(t||"{}");if(n.errorMsgKey)throw n;return e?e(n):n}catch(n){return n}},l=e=>e,d=function(e){let t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];return(e=>function(){for(var t=arguments.length,a=new Array(t),n=0;n{var t,n;if(401!==(null===e||void 0===e||null===(t=e.response)||void 0===t?void 0:t.status)||null!==e&&void 0!==e&&null!==(n=e.config)&&void 0!==n&&n.allow401)throw e;if(o.Ay){const t=window.location.origin+window.location.pathname.replace(o.y7,"");if(a[1].baseURL===window.envSettings.agentApiUrl)throw e;return void(window.location.href=(0,i.QU)("".concat(window.envSettings.cloudUrl,"/trust?redirect_uri=").concat(encodeURIComponent(window.location.href),"&agent_uri=").concat(encodeURIComponent(t||window.envSettings.agentApiUrl))))}const{pathname:r,search:c,hash:l}=window.location;if(!/(sign-(in|up)|join-space)/.test(r))return window.location.replace((0,i.QU)("".concat(o.Ay?window.envSettings.cloudUrl:"","/sign-in").concat(s(r,c,l))))}));return c.cancel=r.cancel,c})((function(a){for(var i,s,d,u=arguments.length,m=new Array(u>1?u-1:0),g=1;ge<300||304===e)}=h,y=localStorage.getItem("netdataJWT"),b=null===(i=window.localNetdataRegistry)||void 0===i?void 0:i.mg,S=localStorage.getItem("agentJWT:".concat(b)),k=h.baseURL||r.A.apiBaseURL||"",I=((a,r)=>t?n.A[e](a,p,r):n.A[e](a,r))(a,{validateStatus:A,...h,baseURL:k,transformResponse:h.noResponseParsing?f:[c(f)],cancelToken:v.token,...(!!y||!!S)&&{headers:{...null===h||void 0===h?void 0:h.headers,.../netdata\.cloud/.test(k)&&{Authorization:"Bearer ".concat(y)},...k===window.envSettings.agentApiUrl&&S&&"undefined"!==S&&(null!==(s=window.localNetdataRegistry)&&void 0!==s&&s.xNetdataAuthHeader?{"X-Netdata-Auth":"Bearer ".concat(S)}:{Authorization:"Bearer ".concat(S)})}}}),w=I.catch((e=>{throw e.isCancel=n.A.isCancel(e),e}));w.cancel=()=>v.cancel();const E=null===(d=a.match(/\/spaces\/(.+?)\//))||void 0===d?void 0:d[1];return(0,o.ES)(E)&&w.cancel(),w}))},u={get:d("get"),post:d("post",!0),patch:d("patch",!0),put:d("put",!0),delete:d("delete")}},94355:(e,t,a)=>{"use strict";a.d(t,{A:()=>n});const n=e=>{if(!e.response){const t=500,a=e.message||"Something went wrong";return{errorCode:t,errorMessage:a,errorMsgKey:t,status:t,statusText:a}}const{data:{errorCode:t,errorMessage:a,errorMsgKey:n},status:r,statusText:o}=e.response;return{errorCode:t,errorMessage:a,errorMsgKey:n,status:r,statusText:o}}},79731:(e,t,a)=>{"use strict";a.d(t,{o:()=>r});a(71517),a(11379),a(93777),a(14190),a(12359),a(86097),a(17273),a(27415),a(19929),a(37583),a(55122),a(20230),a(57268),a(79733),a(62953);const n=new Map([["ErrParsingRequestBody","Bad Request"],["ErrUntrustedRedirectURI","Untrusted Redirect URI"],["ErrInternalServerError","Internal Server Error"],["ErrUnauthenticated","User Not Authenticated"],["ErrInvalidLastUpdatedRange","Invalid Time Selection"],["ErrForbidden","Forbidden"],["ErrInvalidEmail","Invalid E-mail"],["ErrInvalidRedirectURI","Invalid Redirect URI"],["ErrMissingStateCookie","Missing Authentication Cookie"],["ErrIncorrectChallengeResponse","Incorrect Response"],["ErrAccountDeleted","Account Deleted!"],["ErrInvalidAccountID","Invalid Account ID"],["ErrVisitedNodeNotFound","Node Not Found"],["ErrAccountIsTheLastMemberOfAPaidSpace","Cannot delete account"],["ErrOAuthNotFound","A Netdata account linked to this email already exists, please try another login method."],["ErrWworkspaceSlugTaken","Space Slug Unavailable"],["ErrInvalidWorkspaceName","Bad Space Name"],["ErrNotWorkspaceMember","User is not a Space member"],["ErrInvalidWorkspaceID","Invalid Space ID"],["ErrWorkspaceNotFound","No Space Found"],["ErrSpaceSlugTaken","Space Slug Unavailable"],["ErrInvalidSpaceName","Invalid Space Name"],["ErrNotSpaceMember","Not a Space Member"],["ErrForbiddenNotAdmin","Not a Space Admin"],["ErrLastSpaceAdmin","Last Admin In Space"],["ErrLastSpaceMember","Last Member In Space"],["ErrInvalidSpaceID","Bad Space ID"],["ErrInvalidMemberID","Bad Member ID"],["ErrForbiddenNotMember","Not a Member"],["ErrMissingMemberIDs","No Member IDs Given"],["ErrSpaceNotFound","Space Not Found"],["ErrSpaceMemberNotFound","Space Member Not Found"],["ErrMissingTokenIDs","Token ID Not Found"],["ErrCannotDeleteCurrentToken","You cannot delete the token in use for this session"],["ErrInvalidRole","Invalid Member Role"],["ErrCannotLeaveSpace","Cannot Leave Space"],["ErrInvalidSpaceDescription","Invalid Space Description"],["ErrInvalidEmailDomain","Invalid Email Domain"],["ErrCanNotDeletePaidSpace","Cannot delete paid space"],["ErrAgentCanceledBigResponse","Agent's response is too big"],["ErrAgentExpired","Agent is overloaded"],["ErrAgentTimeout","Timeout waiting agent response"],["ErrAlreadyClaimed","Node Already Claimed"],["ErrContextNotFound","Node doesn't have the requested context"],["ErrInternal","Internal server error"],["ErrInvalidNodeID","Invalid Node ID"],["ErrMissingNodeIDs","No Node IDs Given"],["ErrNoData","Node doesn't have data for the requested period"],["ErrNodeNotFound","Node Not Found"],["ErrNodeUnreachable","Unreachable Node"],["ErrUnreachable","Node is unreachable"],["ErrNodeInstanceNotFound","Node doesn't exist on Netdata"],["ErrRoomNameExist","Room Name Already Exists"],["ErrInvalidRoomName","Invalid Room Name"],["ErrRoomNotFound","Room Not Found"],["ErrInvalidRoomID","Invalid Room ID"],["ErrRoomNameTaken","Room Name Unavailable"],["ErrContextNotFound","Context Not Found"],["ErrNodeContextNotFound","Metric Context Not Found"],["ErrInvalidContextID","Invalid Metric Context ID"],["ErrForbiddenNotRoomMember","Not a Room Member"],["ErrUntouchableRoom","Room Cannot Be Changed"],["ErrRoomCannotBeDefault","Private Room Cannot Be Set As Default"],["ErrInvalidRoomDescription","Invalid Room Description"],["ErrRoomMemberAlreadyExists","Member Already Exists In Room"],["ErrAllowedMembersIncreased","Space member limit"],["ErrPendingInvitationsLimitReached","Pending invitations limit"],["ErrSpaceMembersLimitReached","Space member limit"],["ErrCardDeclined","Card declined"],["Network Error","No internet connection"],["Timeout","Timeout"],["default","Something went wrong"]]),r=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:n.get("default");if(!e)return;return n.has(e)?n.get(e):t}},33222:(e,t,a)=>{"use strict";a.d(t,{A:()=>r});const n=36e5,r=function(e){const t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:new Date)-e;if(t<0)return"0 seconds ago";const a=Math.floor(t/864e5);if(a>1)return"".concat(a," days ago");const r=Math.floor(t/n);if(r>0)return"".concat(r," hours ago");const o=Math.floor(t/6e4);if(o>1)return"".concat(o," mins ago");const i=Math.floor(t/1e3);return"".concat(i," seconds ago")}},27229:(e,t,a)=>{"use strict";a.d(t,{W:()=>n});const n=e=>e.messages&&e.messages.length>0?e.messages[0]:void 0},80542:(e,t,a)=>{"use strict";a.d(t,{H:()=>n,k:()=>r});a(14905),a(98992),a(8872),a(62953);const n=(e,t)=>({isValid:e,message:t});function r(e){return Array.isArray(e)?t=>{return e.reduce((a=t,(e,t)=>{const{isValid:n,message:r}=t(a);if(n)return e;const{messages:o=[]}=e;return{isValid:n,messages:[...o,r]}}),{isValid:!0});var a}:(t=e,e=>{const{isValid:a,message:n}=t(e);return n?{isValid:a,messages:[n]}:{isValid:a}});var t}},44554:(e,t,a)=>{"use strict";a.d(t,{GN:()=>o,Hp:()=>r,Pe:()=>u,Su:()=>l,bj:()=>c,lc:()=>v,oE:()=>m});a(17333),a(3064),a(14905),a(98992),a(54520),a(72577),a(8872),a(62953);const n=!1,r=e=>e.reduce(((e,t)=>t.top+t.height>e?t.top+t.height:e),0),o=function(e,t){let a=arguments.length>2&&void 0!==arguments[2]&&arguments[2];if(!Array.isArray(e)){if(!a)return;return Object.keys(e||{}).reduce(((a,n)=>a||e[n].find((e=>e.id===t))),null)}return e.find((e=>e.id===t))},i=(e,t)=>e.id!==t.id&&(!(e.left+e.width<=t.left)&&(!(e.left>=t.left+t.width)&&(!(e.top+e.height<=t.top)&&!(e.top>=t.top+t.height)))),s=(e,t)=>e.find((e=>i(e,t))),c=(e,t)=>"horizontal"===t?(e=>e.slice(0).sort((function(e,t){return e.left>t.left||e.left===t.left&&e.top>t.top?1:-1})))(e):"vertical"===t?(e=>e.slice(0).sort((function(e,t){return e.top>t.top||e.top===t.top&&e.left>t.left?1:-1})))(e):e,l=function(){return(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).reduce(((e,t)=>[...e,{...t}]),[])},d=(e,t,a,n,r,o)=>{const i="horizontal"===r,c="vertical"===r,l=t.static;if(n){n=!1;const d={left:i?Math.max(t.left-a.width,0):a.left,top:c?Math.max(t.top-a.height,0):a.top,width:a.width,height:a.height,id:"-1"},m=s(e,d),g=m&&m.top+m.height>t.top,p=m&&t.left+t.width>m.left;if(!m)return u(e,a,i?d.left:void 0,c?d.top:void 0,n,l,r,o);if(g&&c)return u(e,a,void 0,t.top+1,n,l,r,o);if(g&&null==r)return t.top=a.top,a.top=a.top+a.height,e;if(p&&i)return u(e,t,a.left,void 0,n,l,r,o)}const d=i?a.left+1:void 0,m=c?a.top+1:void 0;return null==d&&null==m?e:u(e,a,i?a.left+1:void 0,c?a.top+1:void 0,n,l,r,o)},u=(e,t,a,r,o,s,u,m,g)=>{if(t.static)return e;if(t.top===r&&t.left===a)return e;const p=t.left,h=t.top;"number"===typeof a&&(t.left=a),"number"===typeof r&&(t.top=r),t.moved=!0;let v=c(e,u);("vertical"===u&&"number"===typeof r?h>=r:"horizontal"===u&&"number"===typeof a&&p>=a)&&(v=v.reverse());const f=((e,t)=>e.filter((e=>i(e,t))))(v,t),A=f.length>0;if(A&&g)return l(e);if(A&&s)return t.left=p,t.top=h,t.moved=!1,e;for(let i=0,c=f.length;i{const r=e.filter((e=>e.static)),o=c(e,t),i=Array(e.length);for(let s=0,c=o.length;s{const r=g[n];t[n]+=1;for(let o=e.findIndex((e=>e.id===t.id))+1;ot.top+t.height)break;i(t,s)&&p(e,s,a+t[r],n)}}t[n]=a},h=(e,t,a,n,o,i)=>{const c="horizontal"===a;if("vertical"===a)for(t.top=Math.min(r(e),t.top);t.top>0&&!s(e,t);)t.top--;else if(c)for(;t.left>0&&!s(e,t);)t.left--;let l;for(;(l=s(e,t))&&(null!==a||!i)&&"undefined"!==typeof(null===(d=l)||void 0===d?void 0:d.width);){var d;if(c?p(o,t,l.left+l.width,"left"):p(o,t,l.top+l.height,"top"),c&&t.left+t.width>n)for(t.left=n-t.width,t.top++;t.left>0&&!s(e,t);)t.left--}return t.top=Math.max(t.top,0),t.left=Math.max(t.left,0),t.width=t.width>n?n:t.width,t},v=e=>{let{minWidth:t,minHeight:a,...n}=e;return n}},60247:(e,t,a)=>{"use strict";a.d(t,{A:()=>d});var n=a(58168),r=a(96540),o=a(62061),i=a(6504),s=a(22332),c=a(46741);const l=e=>{var t;let{iconWidth:a="14px",iconHeight:l="14px",...d}=e;const u=(0,c.JT)("dashboard:Update"),m=(0,s.useAttributeValue)("toolboxProps");return r.createElement(i.Button,(0,n.A)({icon:r.createElement(i.default,{svg:o.default,width:a,height:l}),title:"Drag & drop","data-testid":"chartHeaderToolbox-drag",disabled:!u,cursor:null!==(t=m.drag)&&void 0!==t&&t.dragging?"grabbing":"grab"},m.drag||{},d))},d=(0,r.memo)(l)},4659:(e,t,a)=>{"use strict";a.d(t,{A:()=>p});var n=a(58168),r=a(96540),o=a(84976),i=a(8711),s=a(83199),c=a(29217);const l={default:{initial:"primary",visited:"accent",hover:"primary"},tableLink:{initial:"text",visited:"text",hover:"primary"}},d=e=>(0,i.default)(e).withConfig({displayName:"anchor__withAnchor",componentId:"sc-oaxxs6-0"})(["",""],(e=>{let{disabled:t,color:a,hoverColor:n,disabledColor:r,visitedColor:o,theme:i,flavour:c="default"}=e;return t?(e=>{var t,a,n;let{disabledColor:r,color:o,theme:i,flavour:c,visitedColor:d}=e;return"\n color: ".concat((0,s.getColor)(r||o||(null===(t=l[c])||void 0===t?void 0:t.initial))({theme:i}),";\n ").concat(r?"":"opacity: 0.4;","\n pointer-events: none;\n\n & > svg > use {\n fill: ").concat((0,s.getColor)(r||o||(null===(a=l[c])||void 0===a?void 0:a.initial))({theme:i}),";\n }\n \n &:visited {\n color: ").concat((0,s.getColor)(d||r||o||(null===(n=l[c])||void 0===n?void 0:n.visited))({theme:i}),";\n }\n")})({disabledColor:r,color:a,theme:i,flavour:c}):(e=>{var t,a,n,r,o,i;let{color:c,theme:d,hoverColor:u,visitedColor:m,flavour:g}=e;return"\n color: ".concat((0,s.getColor)(c||(null===(t=l[g])||void 0===t?void 0:t.initial))({theme:d}),";\n & > svg > use {\n fill: ").concat((0,s.getColor)(c||(null===(a=l[g])||void 0===a?void 0:a.initial))({theme:d}),";\n }\n &:hover {\n color: ").concat((0,s.getColor)(u||(null===(n=l[g])||void 0===n?void 0:n.hover))({theme:d})," !important;\n & > svg > use {\n fill: ").concat((0,s.getColor)(u||(null===(r=l[g])||void 0===r?void 0:r.hover))({theme:d})," !important;\n }\n }\n &:visited {\n color: ").concat((0,s.getColor)(m||c||(null===(o=l[g])||void 0===o?void 0:o.visited))({theme:d}),";\n & > svg > use {\n fill: ").concat((0,s.getColor)(m||c||(null===(i=l[g])||void 0===i?void 0:i.visited))({theme:d}),";\n }\n }\n cursor:pointer\n")})({color:a,theme:i,hoverColor:n,visitedColor:o,flavour:c})})),u=d("a"),m=d((0,r.forwardRef)(((e,t)=>{let{alignItems:a,disabledColor:i,color:s,flavour:c,hoverColor:l,visitedColor:d,gap:u,strong:m,...g}=e;return r.createElement(o.N_,(0,n.A)({ref:t},g))}))),g=e=>e.preventDefault(),p=(h=e=>{let{Component:t=s.Text,as:a="a",disabled:o,onClick:i,to:c,href:l,isBasic:d,...p}=e;return r.createElement(t,(0,n.A)({as:"a"===a?u:m,disabled:o,onClick:o?g:i},c&&!o&&{to:c},l&&!o&&{href:l},p))},e=>{let{boxProps:t,showToolTip:a,content:n,align:o,isBasic:i,...l}=e;return a?r.createElement(c.A,{plain:!0,content:n,align:o,isBasic:i},r.createElement(s.Box,t,r.createElement(h,l))):r.createElement(h,l)});var h},24266:(e,t,a)=>{"use strict";a.d(t,{Ay:()=>u,H4:()=>s,Xc:()=>d});var n=a(41358),r=a(96540),o=a(83199),i=a(4659);const s=e=>{let{title:t="Oops, something unexpected happened!",message:a=""}=e;return r.createElement(o.Flex,{alignItems:"center",justifyContent:"center","data-testid":"error-boundary",flex:!0},r.createElement(o.Flex,{flex:!0,column:!0,alignItems:"center",gap:4,width:{max:115}},r.createElement(o.H3,null,t),!!a&&r.createElement(o.Text,null,a),r.createElement(o.Text,null,"We track these errors automatically, but if the problem persists feel free to contact us with a"," ",r.createElement(i.A,{href:"https://github.com/netdata/netdata-cloud/issues/new/choose",target:"_blank",rel:"noopener noreferrer"},"ticket"),", a"," ",r.createElement(i.A,{href:"https://community.netdata.cloud/",target:"_blank",rel:"noopener noreferrer"},"post in the forum")," ","or through"," ",r.createElement(i.A,{href:"https://discord.com/invite/mPZ6WZKKG2",target:"_blank",rel:"noopener noreferrer"},"Discord"),"."),r.createElement(o.Text,null,"In the meantime you can refresh this page"),r.createElement(o.Button,{label:"Refresh",icon:"refresh",onClick:()=>window.location.reload(),"data-ga":"error-boundary::click-reload::reload-on-error","data-testid":"error-boundary-reload"})))};class c extends r.Component{constructor(e){super(e),this.state={hasError:!1}}static getDerivedStateFromError(e){return{hasError:!0}}componentDidCatch(e,t){console.log(e,t)}render(){return this.state.hasError?r.createElement(s,null):this.props.children}}class l extends n.tH{}l.defaultProps={fallback:s};const d=e=>{const t=window.envSettings.tracking?l:c;return a=>r.createElement(t,{fallback:s},r.createElement(e,a))},u=l},82838:(e,t,a)=>{"use strict";a.d(t,{M:()=>c,O:()=>i});a(25440),a(9920),a(14905),a(98992),a(3949),a(8872),a(62953);const n=[{value:"Dateline Standard Time",abbr:"DST",text:"International Date Line West",utc:["Etc/GMT+12"]},{value:"UTC-11",abbr:"U",text:"Coordinated Universal Time-11",utc:["Etc/GMT+11","Pacific/Midway","Pacific/Niue","Pacific/Pago_Pago"]},{value:"Hawaiian Standard Time",abbr:"HST",text:"Hawaii",utc:["Etc/GMT+10","Pacific/Honolulu","Pacific/Johnston","Pacific/Rarotonga","Pacific/Tahiti"]},{value:"Alaskan Standard Time",abbr:"AKDT",text:"Alaska",utc:["America/Anchorage","America/Juneau","America/Nome","America/Sitka","America/Yakutat"]},{value:"Pacific Standard Time (Mexico)",abbr:"PDT",text:"Baja California",utc:["America/Santa_Isabel"]},{value:"Pacific Standard Time",abbr:"PST",text:"Pacific Time (US & Canada)",utc:["America/Los_Angeles","America/Dawson","America/Tijuana","America/Vancouver","America/Whitehorse","PST8PDT"]},{value:"US Mountain Standard Time",abbr:"UMST",text:"Arizona",utc:["America/Creston","America/Dawson_Creek","America/Hermosillo","America/Phoenix","Etc/GMT+7"]},{value:"Mountain Standard Time (Mexico)",abbr:"MDT",text:"Chihuahua, La Paz, Mazatlan",utc:["America/Chihuahua","America/Mazatlan"]},{value:"Mountain Standard Time",abbr:"MDT",text:"Mountain Time (US & Canada)",utc:["America/Boise","America/Cambridge_Bay","America/Denver","America/Edmonton","America/Inuvik","America/Ojinaga","America/Yellowknife","MST7MDT"]},{value:"Central America Standard Time",abbr:"CAST",text:"Central America",utc:["America/Belize","America/Costa_Rica","America/El_Salvador","America/Guatemala","America/Managua","America/Tegucigalpa","Etc/GMT+6","Pacific/Galapagos"]},{value:"Central Standard Time",abbr:"CDT",text:"Central Time (US & Canada)",utc:["America/Chicago","America/Indiana/Knox","America/Indiana/Tell_City","America/Matamoros","America/Menominee","America/North_Dakota/Beulah","America/North_Dakota/Center","America/North_Dakota/New_Salem","America/Rainy_River","America/Rankin_Inlet","America/Resolute","America/Winnipeg","CST6CDT"]},{value:"Central Standard Time (Mexico)",abbr:"CDT",text:"Guadalajara, Mexico City, Monterrey",utc:["America/Bahia_Banderas","America/Cancun","America/Merida","America/Mexico_City","America/Monterrey"]},{value:"Canada Central Standard Time",abbr:"CCST",text:"Saskatchewan",utc:["America/Regina","America/Swift_Current"]},{value:"SA Pacific Standard Time",abbr:"SPST",text:"Bogota, Lima, Quito",utc:["America/Bogota","America/Cayman","America/Coral_Harbour","America/Eirunepe","America/Guayaquil","America/Jamaica","America/Lima","America/Panama","America/Rio_Branco","Etc/GMT+5"]},{value:"Eastern Standard Time",abbr:"EDT",text:"Eastern Time (US & Canada)",utc:["America/Detroit","America/Havana","America/Indiana/Petersburg","America/Indiana/Vincennes","America/Indiana/Winamac","America/Iqaluit","America/Kentucky/Monticello","America/Louisville","America/Montreal","America/Nassau","America/New_York","America/Nipigon","America/Pangnirtung","America/Port-au-Prince","America/Thunder_Bay","America/Toronto","EST5EDT"]},{value:"US Eastern Standard Time",abbr:"UEDT",text:"Indiana (East)",utc:["America/Indiana/Marengo","America/Indiana/Vevay","America/Indianapolis"]},{value:"Venezuela Standard Time",abbr:"VST",text:"Caracas",utc:["America/Caracas"]},{value:"Paraguay Standard Time",abbr:"PYT",text:"Asuncion",utc:["America/Asuncion"]},{value:"Atlantic Standard Time",abbr:"ADT",text:"Atlantic Time (Canada)",utc:["America/Glace_Bay","America/Goose_Bay","America/Halifax","America/Moncton","America/Thule","Atlantic/Bermuda"]},{value:"Central Brazilian Standard Time",abbr:"CBST",text:"Cuiaba",utc:["America/Campo_Grande","America/Cuiaba"]},{value:"SA Western Standard Time",abbr:"SWST",text:"Georgetown, La Paz, Manaus, San Juan",utc:["America/Anguilla","America/Antigua","America/Aruba","America/Barbados","America/Blanc-Sablon","America/Boa_Vista","America/Curacao","America/Dominica","America/Grand_Turk","America/Grenada","America/Guadeloupe","America/Guyana","America/Kralendijk","America/La_Paz","America/Lower_Princes","America/Manaus","America/Marigot","America/Martinique","America/Montserrat","America/Port_of_Spain","America/Porto_Velho","America/Puerto_Rico","America/Santo_Domingo","America/St_Barthelemy","America/St_Kitts","America/St_Lucia","America/St_Thomas","America/St_Vincent","America/Tortola","Etc/GMT+4"]},{value:"Pacific SA Standard Time",abbr:"PSST",text:"Santiago",utc:["America/Santiago","Antarctica/Palmer"]},{value:"Newfoundland Standard Time",abbr:"NDT",text:"Newfoundland",utc:["America/St_Johns"]},{value:"E. South America Standard Time",abbr:"ESAST",text:"Brasilia",utc:["America/Sao_Paulo"]},{value:"Argentina Standard Time",abbr:"AST",text:"Buenos Aires",utc:["America/Argentina/La_Rioja","America/Argentina/Rio_Gallegos","America/Argentina/Salta","America/Argentina/San_Juan","America/Argentina/San_Luis","America/Argentina/Tucuman","America/Argentina/Ushuaia","America/Buenos_Aires","America/Catamarca","America/Cordoba","America/Jujuy","America/Mendoza"]},{value:"SA Eastern Standard Time",abbr:"SEST",text:"Cayenne, Fortaleza",utc:["America/Araguaina","America/Belem","America/Cayenne","America/Fortaleza","America/Maceio","America/Paramaribo","America/Recife","America/Santarem","Antarctica/Rothera","Atlantic/Stanley","Etc/GMT+3"]},{value:"Greenland Standard Time",abbr:"GDT",text:"Greenland",utc:["America/Godthab"]},{value:"Montevideo Standard Time",abbr:"MST",text:"Montevideo",utc:["America/Montevideo"]},{value:"Bahia Standard Time",abbr:"BST",text:"Salvador",utc:["America/Bahia"]},{value:"UTC-02",abbr:"U",text:"Coordinated Universal Time-02",utc:["America/Noronha","Atlantic/South_Georgia","Etc/GMT+2"]},{value:"Mid-Atlantic Standard Time",abbr:"MDT",text:"Mid-Atlantic - Old",utc:[]},{value:"Azores Standard Time",abbr:"ADT",text:"Azores",utc:["America/Scoresbysund","Atlantic/Azores"]},{value:"Cape Verde Standard Time",abbr:"CVST",text:"Cape Verde Is.",utc:["Atlantic/Cape_Verde","Etc/GMT+1"]},{value:"Morocco Standard Time",abbr:"MDT",text:"Casablanca",utc:["Africa/Casablanca","Africa/El_Aaiun"]},{value:"UTC",abbr:"UTC",text:"Coordinated Universal Time",utc:["America/Danmarkshavn","Etc/GMT"]},{value:"GMT Standard Time",abbr:"GMT",text:"Edinburgh, London",utc:["Europe/Isle_of_Man","Europe/Guernsey","Europe/Jersey","Europe/London"]},{value:"GMT Standard Time",abbr:"GDT",text:"Dublin, Lisbon",utc:["Atlantic/Canary","Atlantic/Faeroe","Atlantic/Madeira","Europe/Dublin","Europe/Lisbon"]},{value:"Greenwich Standard Time",abbr:"GST",text:"Monrovia, Reykjavik",utc:["Africa/Abidjan","Africa/Accra","Africa/Bamako","Africa/Banjul","Africa/Bissau","Africa/Conakry","Africa/Dakar","Africa/Freetown","Africa/Lome","Africa/Monrovia","Africa/Nouakchott","Africa/Ouagadougou","Africa/Sao_Tome","Atlantic/Reykjavik","Atlantic/St_Helena"]},{value:"W. Europe Standard Time",abbr:"WEDT",text:"Amsterdam, Berlin, Bern, Rome, Stockholm, Vienna",utc:["Arctic/Longyearbyen","Europe/Amsterdam","Europe/Andorra","Europe/Berlin","Europe/Busingen","Europe/Gibraltar","Europe/Luxembourg","Europe/Malta","Europe/Monaco","Europe/Oslo","Europe/Rome","Europe/San_Marino","Europe/Stockholm","Europe/Vaduz","Europe/Vatican","Europe/Vienna","Europe/Zurich"]},{value:"Central Europe Standard Time",abbr:"CEDT",text:"Belgrade, Bratislava, Budapest, Ljubljana, Prague",utc:["Europe/Belgrade","Europe/Bratislava","Europe/Budapest","Europe/Ljubljana","Europe/Podgorica","Europe/Prague","Europe/Tirane"]},{value:"Romance Standard Time",abbr:"RDT",text:"Brussels, Copenhagen, Madrid, Paris",utc:["Africa/Ceuta","Europe/Brussels","Europe/Copenhagen","Europe/Madrid","Europe/Paris"]},{value:"Central European Standard Time",abbr:"CEDT",text:"Sarajevo, Skopje, Warsaw, Zagreb",utc:["Europe/Sarajevo","Europe/Skopje","Europe/Warsaw","Europe/Zagreb"]},{value:"W. Central Africa Standard Time",abbr:"WCAST",text:"West Central Africa",utc:["Africa/Algiers","Africa/Bangui","Africa/Brazzaville","Africa/Douala","Africa/Kinshasa","Africa/Lagos","Africa/Libreville","Africa/Luanda","Africa/Malabo","Africa/Ndjamena","Africa/Niamey","Africa/Porto-Novo","Africa/Tunis","Etc/GMT-1"]},{value:"Namibia Standard Time",abbr:"NST",text:"Windhoek",utc:["Africa/Windhoek"]},{value:"GTB Standard Time",abbr:"GDT",text:"Athens, Bucharest",utc:["Europe/Athens","Asia/Nicosia","Europe/Bucharest","Europe/Chisinau"]},{value:"Middle East Standard Time",abbr:"MEDT",text:"Beirut",utc:["Asia/Beirut"]},{value:"Egypt Standard Time",abbr:"EST",text:"Cairo",utc:["Africa/Cairo"]},{value:"Syria Standard Time",abbr:"SDT",text:"Damascus",utc:["Asia/Damascus"]},{value:"E. Europe Standard Time",abbr:"EEDT",text:"E. Europe",utc:["Asia/Nicosia","Europe/Athens","Europe/Bucharest","Europe/Chisinau","Europe/Helsinki","Europe/Kiev","Europe/Mariehamn","Europe/Nicosia","Europe/Riga","Europe/Sofia","Europe/Tallinn","Europe/Uzhgorod","Europe/Vilnius","Europe/Zaporozhye"]},{value:"South Africa Standard Time",abbr:"SAST",text:"Harare, Pretoria",utc:["Africa/Blantyre","Africa/Bujumbura","Africa/Gaborone","Africa/Harare","Africa/Johannesburg","Africa/Kigali","Africa/Lubumbashi","Africa/Lusaka","Africa/Maputo","Africa/Maseru","Africa/Mbabane","Etc/GMT-2"]},{value:"FLE Standard Time",abbr:"FDT",text:"Helsinki, Kyiv, Riga, Sofia, Tallinn, Vilnius",utc:["Europe/Helsinki","Europe/Kiev","Europe/Mariehamn","Europe/Riga","Europe/Sofia","Europe/Tallinn","Europe/Uzhgorod","Europe/Vilnius","Europe/Zaporozhye"]},{value:"Turkey Standard Time",abbr:"TDT",text:"Istanbul",utc:["Europe/Istanbul"]},{value:"Israel Standard Time",abbr:"JDT",text:"Jerusalem",utc:["Asia/Jerusalem"]},{value:"Libya Standard Time",abbr:"LST",text:"Tripoli",utc:["Africa/Tripoli"]},{value:"Jordan Standard Time",abbr:"JST",text:"Amman",utc:["Asia/Amman"]},{value:"Arabic Standard Time",abbr:"AST",text:"Baghdad",utc:["Asia/Baghdad"]},{value:"Kaliningrad Standard Time",abbr:"KST",text:"Kaliningrad",utc:["Europe/Kaliningrad"]},{value:"Arab Standard Time",abbr:"AST",text:"Kuwait, Riyadh",utc:["Asia/Aden","Asia/Bahrain","Asia/Kuwait","Asia/Qatar","Asia/Riyadh"]},{value:"E. Africa Standard Time",abbr:"EAST",text:"Nairobi",utc:["Africa/Addis_Ababa","Africa/Asmera","Africa/Dar_es_Salaam","Africa/Djibouti","Africa/Juba","Africa/Kampala","Africa/Khartoum","Africa/Mogadishu","Africa/Nairobi","Antarctica/Syowa","Etc/GMT-3","Indian/Antananarivo","Indian/Comoro","Indian/Mayotte"]},{value:"Moscow Standard Time",abbr:"MSK",text:"Moscow, St. Petersburg, Volgograd, Minsk",utc:["Europe/Kirov","Europe/Moscow","Europe/Simferopol","Europe/Volgograd","Europe/Minsk"]},{value:"Samara Time",abbr:"SAMT",text:"Samara, Ulyanovsk, Saratov",utc:["Europe/Astrakhan","Europe/Samara","Europe/Ulyanovsk"]},{value:"Iran Standard Time",abbr:"IDT",text:"Tehran",utc:["Asia/Tehran"]},{value:"Arabian Standard Time",abbr:"AST",text:"Abu Dhabi, Muscat",utc:["Asia/Dubai","Asia/Muscat","Etc/GMT-4"]},{value:"Azerbaijan Standard Time",abbr:"ADT",text:"Baku",utc:["Asia/Baku"]},{value:"Mauritius Standard Time",abbr:"MST",text:"Port Louis",utc:["Indian/Mahe","Indian/Mauritius","Indian/Reunion"]},{value:"Georgian Standard Time",abbr:"GET",text:"Tbilisi",utc:["Asia/Tbilisi"]},{value:"Caucasus Standard Time",abbr:"CST",text:"Yerevan",utc:["Asia/Yerevan"]},{value:"Afghanistan Standard Time",abbr:"AST",text:"Kabul",utc:["Asia/Kabul"]},{value:"West Asia Standard Time",abbr:"WAST",text:"Ashgabat, Tashkent",utc:["Antarctica/Mawson","Asia/Aqtau","Asia/Aqtobe","Asia/Ashgabat","Asia/Dushanbe","Asia/Oral","Asia/Samarkand","Asia/Tashkent","Etc/GMT-5","Indian/Kerguelen","Indian/Maldives"]},{value:"Yekaterinburg Time",abbr:"YEKT",text:"Yekaterinburg",utc:["Asia/Yekaterinburg"]},{value:"Pakistan Standard Time",abbr:"PKT",text:"Islamabad, Karachi",utc:["Asia/Karachi"]},{value:"India Standard Time",abbr:"IST",text:"Chennai, Kolkata, Mumbai, New Delhi",utc:["Asia/Kolkata"]},{value:"India Standard Time",abbr:"IST",text:"Chennai, Kolkata, Mumbai, New Delhi",utc:["Asia/Calcutta"]},{value:"Sri Lanka Standard Time",abbr:"SLST",text:"Sri Jayawardenepura",utc:["Asia/Colombo"]},{value:"Nepal Standard Time",abbr:"NST",text:"Kathmandu",utc:["Asia/Kathmandu"]},{value:"Central Asia Standard Time",abbr:"CAST",text:"Nur-Sultan (Astana)",utc:["Antarctica/Vostok","Asia/Almaty","Asia/Bishkek","Asia/Qyzylorda","Asia/Urumqi","Etc/GMT-6","Indian/Chagos"]},{value:"Bangladesh Standard Time",abbr:"BST",text:"Dhaka",utc:["Asia/Dhaka","Asia/Thimphu"]},{value:"Myanmar Standard Time",abbr:"MST",text:"Yangon (Rangoon)",utc:["Asia/Rangoon","Indian/Cocos"]},{value:"SE Asia Standard Time",abbr:"SAST",text:"Bangkok, Hanoi, Jakarta",utc:["Antarctica/Davis","Asia/Bangkok","Asia/Hovd","Asia/Jakarta","Asia/Phnom_Penh","Asia/Pontianak","Asia/Saigon","Asia/Vientiane","Etc/GMT-7","Indian/Christmas"]},{value:"N. Central Asia Standard Time",abbr:"NCAST",text:"Novosibirsk",utc:["Asia/Novokuznetsk","Asia/Novosibirsk","Asia/Omsk"]},{value:"China Standard Time",abbr:"CST",text:"Beijing, Chongqing, Hong Kong, Urumqi",utc:["Asia/Hong_Kong","Asia/Macau","Asia/Shanghai"]},{value:"North Asia Standard Time",abbr:"NAST",text:"Krasnoyarsk",utc:["Asia/Krasnoyarsk"]},{value:"Singapore Standard Time",abbr:"MPST",text:"Kuala Lumpur, Singapore",utc:["Asia/Brunei","Asia/Kuala_Lumpur","Asia/Kuching","Asia/Makassar","Asia/Manila","Asia/Singapore","Etc/GMT-8"]},{value:"W. Australia Standard Time",abbr:"WAST",text:"Perth",utc:["Australia/Perth","Antarctica/Casey"]},{value:"Taipei Standard Time",abbr:"TST",text:"Taipei",utc:["Asia/Taipei"]},{value:"Ulaanbaatar Standard Time",abbr:"UST",text:"Ulaanbaatar",utc:["Asia/Choibalsan","Asia/Ulaanbaatar"]},{value:"North Asia East Standard Time",abbr:"NAEST",text:"Irkutsk",utc:["Asia/Irkutsk"]},{value:"Japan Standard Time",abbr:"JST",text:"Osaka, Sapporo, Tokyo",utc:["Asia/Dili","Asia/Jayapura","Asia/Tokyo","Etc/GMT-9","Pacific/Palau"]},{value:"Korea Standard Time",abbr:"KST",text:"Seoul",utc:["Asia/Pyongyang","Asia/Seoul"]},{value:"Cen. Australia Standard Time",abbr:"CAST",text:"Adelaide",utc:["Australia/Adelaide","Australia/Broken_Hill"]},{value:"AUS Central Standard Time",abbr:"ACST",text:"Darwin",utc:["Australia/Darwin"]},{value:"E. Australia Standard Time",abbr:"EAST",text:"Brisbane",utc:["Australia/Brisbane","Australia/Lindeman"]},{value:"AUS Eastern Standard Time",abbr:"AEST",text:"Canberra, Melbourne, Sydney",utc:["Australia/Melbourne","Australia/Sydney"]},{value:"West Pacific Standard Time",abbr:"WPST",text:"Guam, Port Moresby",utc:["Antarctica/DumontDUrville","Etc/GMT-10","Pacific/Guam","Pacific/Port_Moresby","Pacific/Saipan","Pacific/Truk"]},{value:"Tasmania Standard Time",abbr:"TST",text:"Hobart",utc:["Australia/Currie","Australia/Hobart"]},{value:"Yakutsk Standard Time",abbr:"YST",text:"Yakutsk",utc:["Asia/Chita","Asia/Khandyga","Asia/Yakutsk"]},{value:"Central Pacific Standard Time",abbr:"CPST",text:"Solomon Is., New Caledonia",utc:["Etc/GMT-11"]},{value:"Vladivostok Standard Time",abbr:"VST",text:"Vladivostok",utc:["Asia/Sakhalin","Asia/Ust-Nera","Asia/Vladivostok"]},{value:"New Zealand Standard Time",abbr:"NZST",text:"Auckland, Wellington",utc:["Antarctica/McMurdo","Pacific/Auckland"]},{value:"UTC+12",abbr:"U",text:"Coordinated Universal Time+12",utc:["Etc/GMT-12","Pacific/Funafuti","Pacific/Kwajalein","Pacific/Majuro","Pacific/Nauru","Pacific/Tarawa","Pacific/Wake","Pacific/Wallis"]},{value:"Fiji Standard Time",abbr:"FST",text:"Fiji",utc:["Pacific/Fiji"]},{value:"Magadan Standard Time",abbr:"MST",text:"Magadan",utc:["Asia/Anadyr","Asia/Kamchatka","Asia/Magadan","Asia/Srednekolymsk"]},{value:"Kamchatka Standard Time",abbr:"KDT",text:"Petropavlovsk-Kamchatsky - Old",utc:["Asia/Kamchatka"]},{value:"Tonga Standard Time",abbr:"TST",text:"Nuku'alofa",utc:["Etc/GMT-13","Pacific/Enderbury","Pacific/Fakaofo","Pacific/Tongatapu"]},{value:"Samoa Standard Time",abbr:"SST",text:"Samoa",utc:["Pacific/Apia"]}],r=new Date,o=()=>{try{const e=new Intl.DateTimeFormat("default",{});return e.resolvedOptions().timeZone||"Etc/GMT"}catch(e){return"Etc/GMT"}},i=(()=>{const e={};return n.reduce(((t,a)=>{const{utc:n}=a;try{const o=new Intl.DateTimeFormat("fr",{timeZone:n[0],timeZoneName:"short"}).format(r),[i]=o.match(/[\u2212+].+/)||[],s=(e=>e?e.replace("\u2212","-"):"")(i);if(e[s])return t.concat({...a,offset:e[s]});const c=(e=>{if(!e)return"+0";const t=e.split(":");return t.length>1?"".concat(t[0]).concat((t[1]/60).toString().substr(1)):t[0]})(s);return e[s]=c,t.concat({...a,offset:c})}catch(o){return t}}),[])})().sort(((e,t)=>e.offset-t.offset)),s=(e=>e.reduce(((e,t)=>{let{utc:a,...n}=t;return a.forEach((t=>e[t]={...n,utc:t})),e}),{}))(i),c=(e,t)=>{const a=t||("default"===e?o():e);return s[a in s?a:o()]||{}}},57419:(e,t,a)=>{"use strict";a.r(t),a.d(t,{default:()=>u,isTryingToJoinWorkspace:()=>l});a(62953),a(48408);var n=a(96540),r=a(47767),o=a(78969),i=a(28738),s=a(22292),c=a(24198);const l=function(){return(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"").includes("/join-space")},d="Successfully joined space!",u=()=>{const e=(0,s.uW)("isAnonymous"),t=(0,r.Zp)(),a=new URLSearchParams(window.location.hash.substr(1));return(0,n.useEffect)((()=>{if(e)return;const n=a.has("error_msg_key")?decodeURIComponent(a.get("error_msg_key")||""):null;if(n){const e=a.has("error_message")?decodeURIComponent(a.get("error_message")||""):null;n===o.vK?(0,c.Fw)(d):(0,c.Fw)(e||"Error joining space",!0),t("/spaces",{replace:!0})}else(0,c.Fw)(d)}),[e]),n.createElement(i.A,{title:"Adding you to the space..."})}},28738:(e,t,a)=>{"use strict";a.d(t,{A:()=>d,m:()=>l});var n=a(58168),r=a(96540),o=a(8711),i=a(83199);const s=(0,o.keyframes)(["from{opacity:0.4;}to{opacity:1;}"]),c=(0,o.default)(i.Icon).withConfig({displayName:"loader__StyledIcon",componentId:"sc-a76ek6-0"})(["width:",";height:",";animation:",";"],(e=>e.width),(e=>e.height),(e=>{let{animate:t}=e;return t?(0,o.css)([""," 1.6s ease-in infinite"],s):""})),l=e=>{let{title:t="Loading",width:a="208px",height:o="177px",...i}=e;return r.createElement(c,(0,n.A)({name:"netdata",color:"primary",title:t,"data-testid":"loading-logo",width:a,height:o},i))},d=e=>{let{title:t,body:a,iconProps:o,animate:s=!0,...c}=e;return r.createElement(i.Flex,(0,n.A)({column:!0,height:"100vh",background:"mainBackground",width:"100%",justifyContent:"center",alignItems:"center"},c),r.createElement(l,(0,n.A)({},o,{animate:s})),t&&r.createElement(i.H3,{color:"text",margin:[1,0,0]},t),a&&r.createElement(i.Text,{color:"text",margin:[4.5,0,0]},a))}},13871:(e,t,a)=>{"use strict";a.d(t,{Eg:()=>c,UI:()=>d,gi:()=>l});var n=a(58168),r=a(96540),o=a(83199),i=a(79731);const s={success:"success",error:"error",warning:"warning",default:"border"},c=e=>{const{header:t,text:a,icon:n,renderContent:i,success:c,error:l,warning:d,closeToast:u}=e,m=(c?"success":l&&"error")||d&&"warning"||"default";return r.createElement(o.Flex,{padding:[2],alignItems:"center",justifyContent:"between"},r.createElement(o.Flex,{alignItems:"center",gap:3},n&&r.createElement(o.Flex,{flex:!1},r.createElement(o.Icon,{color:s[m],name:n,size:"large"})),r.createElement(o.Flex,{column:!0,gap:1},t&&r.createElement(o.H5,{color:s[m]},t),a&&r.createElement(o.TextSmall,{color:s[m]},a),i&&i(e))),r.createElement(o.Flex,null,r.createElement(o.Icon,{name:"x",size:"large",color:s[m],onClick:u})))},l=e=>{let{errorCode:t,errorMessage:a,errorMsgKey:o,...s}=e;return r.createElement(c,(0,n.A)({error:!0,icon:"error",text:a,header:(0,i.o)(o)},s))},d=e=>r.createElement(c,e)},24198:(e,t,a)=>{"use strict";a.d(t,{$j:()=>p,AM:()=>o,Fw:()=>d,R9:()=>s,X7:()=>u,ZM:()=>m,jE:()=>g,mw:()=>l,r0:()=>i,sb:()=>c});var n=a(78217),r=a(13871);const o=e=>{const t={header:"Nodes",text:e},a=(0,r.UI)({...t,success:!0,icon:"node"});n.A.success(a,{context:"showNodesNotification"})},i=e=>{const t={header:"Rooms",text:e},a=(0,r.UI)({...t,success:!0,icon:"space"});n.A.success(a,{context:"showRoomsNotification"})},s=function(e){const t={header:"Room users",text:e||(arguments.length>1&&void 0!==arguments[1]&&arguments[1]?"Users left the room!":"Users were added to room!")},a=(0,r.UI)({...t,success:!0,icon:"space"});n.A.success(a,{context:"showUsersInRoomNotification"})},c=function(e){const t={header:"Space users",text:e||(arguments.length>1&&void 0!==arguments[1]&&arguments[1]?"Users left the space!":"Users were added to space!")},a=(0,r.UI)({...t,success:!0,icon:"space"});n.A.success(a,{context:"showUsersInSpaceNotification"})},l=function(e){const t={header:"Rooms",text:e||(arguments.length>1&&void 0!==arguments[1]&&arguments[1]?"Rooms removed from Space!":"Rooms were added to Space!")},a=(0,r.UI)({...t,success:!0,icon:"space"});n.A.success(a,{context:"showRoomsInSpaceNotification"})},d=function(e){let t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];const a={header:t?"Error joining space":"Welcome!",text:e},o=(0,r.UI)({...a,success:!t,icon:"gear"});t?n.A.error(o,{context:"showJoiningSpaceNotification"}):n.A.success(o,{context:"showJoiningSpaceNotification"})},u=function(e){let t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];const a={header:t?"Space removed":"Space added",text:e},o=(0,r.UI)({...a,success:!t,icon:"gear"});n.A.success(o,{context:"showSpaceNotification"})},m=function(e){let t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];const a={header:t?"Error creating room":"Rooms",text:e},o=(0,r.UI)({...a,success:!t,icon:"gear"});t?n.A.error(o,{context:"showRoomCreationNotification"}):n.A.success(o,{context:"showRoomCreationNotification"})},g=function(e){let t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];const a={text:e},o=(0,r.UI)({...a,text:e,success:!t,icon:"chart_added"});t?n.A.error(o,{context:"showDashboardCreatedNotification"}):n.A.success(o,{context:"showDashboardCreatedNotification"})},p=()=>{const e=(0,r.UI)({header:"Dashboard",text:"Invalid dashboard link. Please double-check the dashboard you are trying to access.",success:!1});n.A.error(e,{context:"showInvalidDashboardSlugNotification"})}},78217:(e,t,a)=>{"use strict";a.d(t,{A:()=>i});var n=a(99571);const r={position:n.oR.POSITION.BOTTOM_RIGHT,autoClose:1e4,pauseOnFocusLoss:!1,closeOnClick:!1},o={},i={success:function(e){let{context:t,...a}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};t&&o[t]&&(n.oR.dismiss(o[t]),delete o[t]),o[t]=n.oR.success(e,{...r,...a})},warning:function(e){let{context:t,...a}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};t&&o[t]&&(n.oR.dismiss(o[t]),delete o[t]),o[t]=n.oR.warn(e,{...r,...a})},error:function(e){let{context:t,...a}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};t&&o[t]&&(n.oR.dismiss(o[t]),delete o[t]),o[t]=n.oR.error(e,{...r,...a})},dismiss:e=>n.oR.dismiss(e)}},80925:(e,t,a)=>{"use strict";a.d(t,{A:()=>v,e:()=>h});a(9920),a(98992),a(3949),a(62953);var n=a(96540),r=a(5668),o=a(45467),i=a(24830),s=a(28973),c=a(27467),l=a(3914),d=a(64118),u=a(52768),m=a(38819),g=a(82838);const p=(0,n.createContext)(null),h=()=>(0,n.useContext)(p),v=e=>{var t;let{children:a}=e,h=(0,r.xd)("theme");h=h&&"unspecified"!==h?h:"dark";const[{after:v,before:f,utc:A,highlight:y,forcePlay:b},S]=(0,c.N9)(),k=function(){let{after:e,before:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return(0,n.useMemo)((()=>{if(e=+e,t=+t,!isNaN(e)&&!isNaN(t)&&e&&t)return{type:"highlight",range:[Math.floor(e/1e3),Math.ceil(t/1e3)]}}),[e,t])}(y),[I]=(0,d.KB)(),w=(0,n.useMemo)((()=>({after:v<0?v:Math.floor(v/1e3),before:v<0?0:Math.ceil(f/1e3)})),[v,f]),E=(0,l.dg)(),x=(0,n.useMemo)((()=>{const e=(()=>{let{utc:e}=(0,m.PP)();return(0,g.M)("default",e).utc})(),t=(0,i.A)({attributes:{theme:h,timezone:e,overlays:{...k&&{highlight:k}},autofetchOnWindowBlur:b,...w,agent:E,containerWidth:.8*window.innerWidth,expandable:!1}});return window.netdataSDK=t,t}),[]);(0,o.A)((()=>{x.getNodes().forEach((e=>e.updateAttribute("theme",h)))}),[h]),(0,o.A)((()=>{const e=x.getRoot().getAttribute("timezone");A!==e&&x.getRoot().getApplicableNodes({syncTimezone:!0}).forEach((e=>e.updateAttributes({timezone:A})))}),[A]),(0,o.A)((()=>{const e=x.getRoot().getAttribute("overlays");if(k)x.getRoot().updateAttribute("overlays",{...e,highlight:k}),x.getRoot().getApplicableNodes({syncHighlight:!0}).forEach((e=>e.updateAttribute("overlays",{...e.getAttribute("overlays"),highlight:k})));else{const t={...e};delete t.highlight,x.getRoot().updateAttribute("overlays",t),x.getRoot().getApplicableNodes({syncHighlight:!0}).forEach((e=>{const t={...e.getAttribute("overlays")};delete t.highlight,e.updateAttribute("overlays",t)}))}}),[k]),(0,n.useEffect)((()=>{if(!I)return;x.trigger("goToLink",null,I.linkToGo);const e=x.getRoot().getNode({id:I.context});if(e){const t=e.getAttribute("overlays");e.updateAttribute("overlays",{...t,alarm:{type:"alarm",status:I.status,value:I.formattedLastStatusChangeValue,when:I.lastStatusChange}}),e.updateAttributes({selectedInstances:["".concat(I.instance,"@").concat(I.nodeId)],selectedNodes:[I.nodeId]})}const t=!!I&&x.getRoot().on("chartLoaded",(e=>{const a=e.getAttribute("id");if(I.context!==a)return;const n=e.getAttribute("overlays");e.updateAttribute("overlays",{...n,alarm:{type:"alarm",status:I.status,value:I.formattedLastStatusChangeValue,when:I.lastStatusChange}}),e.updateAttributes({selectedInstances:["".concat(I.instance,"@").concat(I.nodeId)],selectedNodes:[I.nodeId]}),t()}));return(0,s.unregister)(t)}),[I]),(0,n.useEffect)((()=>{const{after:e,before:t}=w;x.getRoot().moveX(e,t)}),[w]),(0,u.Vt)(I);const C=localStorage.getItem("netdataJWT"),T=null===(t=window.localNetdataRegistry)||void 0===t?void 0:t.mg,N=localStorage.getItem("agentJWT:".concat(T)),R=E?N:C;return(0,n.useEffect)((()=>{x.getNodes().forEach((e=>{var t;return e.updateAttributes({...E&&null!==(t=window.localNetdataRegistry)&&void 0!==t&&t.xNetdataAuthHeader?{xNetdataBearer:R,bearer:null}:{xNetdataBearer:null,bearer:R},agent:E})}))}),[E,R]),(0,n.useEffect)((()=>(0,s.unregister)(x.getRoot().onAttributeChange("after",(()=>{const{after:e,before:t}=x.getRoot().getAttributes();S({after:e<0?e:1e3*e,before:e<0?0:1e3*t})})),x.getRoot().onAttributeChange("overlays",((e,t)=>{const{highlight:a}=e;if(a===t.highlight)return;const n=null===a||void 0===a?void 0:a.range;if(n){const[e,t]=n;S({highlight:{after:1e3*e,before:1e3*t}})}else S({highlight:{after:null,before:null}})})))),[x]),x?n.createElement(p.Provider,{value:x},a):a}},74618:(e,t,a)=>{"use strict";a.d(t,{z:()=>i});var n=a(58168),r=a(96540),o=a(83199);const i=e=>{let{children:t,isSubmodal:a,onClose:i,title:s,...c}=e;return r.createElement(o.Flex,{justifyContent:"between",background:"modalHeaderBackground",alignItems:"center",padding:[3]},r.createElement(o.Flex,{alignItems:"center"},!!i&&r.createElement(o.Button,{neutral:!0,flavour:"borderless",icon:a?"chevron_left":"x",onClick:()=>i(),"data-testid":"sidebarModalHeader_".concat(a?"chevron_left":"close_button")}),r.createElement(o.H4,(0,n.A)({color:"text"},c),s)),t)}},15327:(e,t,a)=>{"use strict";a.d(t,{CG:()=>u,GO:()=>l,Yv:()=>d});var n=a(58168),r=(a(62953),a(96540)),o=a(8711),i=a(87659),s=a(83199);const c=(0,r.forwardRef)(((e,t)=>{let{toggle:a,isOpen:o=!0,closeOnClickOutside:i=!0,closeOnEsc:c=!0,full:l="vertical",backdrop:d=!0,position:u="right",...m}=e;return o?r.createElement(s.Layer,{position:u,full:l,backdrop:d,shadow:!0,onClickOutside:i?a:void 0,onEsc:c?a:void 0},r.createElement(s.Flex,(0,n.A)({column:!0,background:"mainBackground",width:{max:"50vw",base:150}},m,{ref:t}))):null})),l=e=>{let{onClose:t,defaultIsOpen:a=!0,...o}=e;const[s,l]=(0,i.A)(a);return(0,r.useEffect)((()=>{s||t&&t()}),[s]),r.createElement(c,(0,n.A)({isOpen:s,toggle:l},o))},d=(0,o.default)(s.Flex).attrs((e=>({column:!0,background:"mainBackground",padding:[3,3,0],flex:"1",gap:3,...e}))).withConfig({displayName:"sidebar__SidebarContent",componentId:"sc-tmwerm-0"})([""]),u=(0,o.default)(s.Flex).attrs({justifyContent:"end",alignItems:"center",padding:[3],background:"mainBackground",border:{side:"top",color:"borderSecondary"}}).withConfig({displayName:"sidebar__SidebarFooter",componentId:"sc-tmwerm-1"})([""])},45765:(e,t,a)=>{"use strict";a.d(t,{U:()=>i});var n=a(58168),r=a(96540),o=a(83199);const i=e=>{let{children:t,...a}=e;return r.createElement(o.Flex,(0,n.A)({justifyContent:"between",background:"mainBackground",alignItems:"center",padding:[3,3,0]},a),r.createElement(o.H3,null,t))}},55337:(e,t,a)=>{"use strict";a.d(t,{A:()=>d,g:()=>l});var n=a(58168),r=a(96540),o=a(8711),i=a(83199),s=a(5668);const c={light:i.DefaultTheme,dark:i.DarkTheme,unspecified:i.DarkTheme},l=(e,t)=>a=>{let{...n}=a;return r.createElement(o.ThemeProvider,{theme:c[t]||c.unspecified},r.createElement(e,n))},d=e=>{const t=(0,s.xd)("theme"),a=(0,s.xd)("customTheme"),i=(0,r.useMemo)((()=>"blue"===t?{...c.dark,colors:{...c.dark.colors,...a||{}}}:e.theme?c[e.theme]||c.unspecified:c[t]||c.unspecified),[t,e.theme,a]);return r.createElement(o.ThemeProvider,(0,n.A)({},e,{theme:i}))}},29217:(e,t,a)=>{"use strict";a.d(t,{A:()=>c});var n=a(58168),r=a(96540),o=a(83199);const i=e=>{let{children:t,background:a,isBasic:i,padding:s}=e;return t?r.createElement(o.Flex,(0,n.A)({padding:s||[1.5,2],margin:[2],background:a||"tooltip",round:1,alignSelf:"start"},!i&&{width:{max:"300px"}}),r.createElement(o.TextSmall,{color:"tooltipText"},t)):null},s=(e,t)=>{let{background:a,isBasic:n,padding:o}=t;const s="function"===typeof e?e():e;return"string"===typeof e||null!==e&&void 0!==e&&n?r.createElement(i,{background:a,isBasic:n,padding:o},s):s},c=e=>{let{children:t,content:a,isBasic:i,...c}=e;const l=(0,r.useCallback)((()=>s(a,{isBasic:i})),[a]);return a?r.createElement(o.Tooltip,(0,n.A)({plain:!0,animation:!0,content:l,zIndex:1e3},c),t):t}},68831:(e,t,a)=>{"use strict";a.d(t,{A:()=>n});const n={assetsBaseURL:"".concat(window.envSettings.cloudUrl,"/static"),apiBaseURL:window.envSettings.apiUrl,demoUrl:"".concat(window.envSettings.cloudUrl,"/spaces/netdata-demo"),demoSlug:window.envSettings.demoSlug,demoFavourites:window.envSettings.demoFavourites,demoDefaultRoomViews:{default:"overview"},pollingInterval:3e4,defaultRoomView:"home",integrationsView:"integrate-anything"}},78969:(e,t,a)=>{"use strict";a.d(t,{BX:()=>l,I7:()=>c,J4:()=>d,P8:()=>r,Q$:()=>u,Qy:()=>p,Wd:()=>m,_9:()=>o,eC:()=>i,fx:()=>h,ux:()=>s,vK:()=>v,x7:()=>g});var n=a(62805);const r="/api/v2",o="/api/v3/spaces",i=n.A,s=20,c=13,l=["J2kdSTRJzV4","KCFFZ_qfKXk"],d={live:{flavour:"success",statusTextColor:"primary",indicatorWrapperColor:"primary",counterColor:"primary"},stale:{flavour:"stale",statusTextColor:["green","vista"],indicatorWrapperColor:["green","vista"],counterColor:["green","vista"]},offline:{flavour:"neutral",statusTextColor:"textLite",indicatorWrapperColor:"textLite",counterColor:"textLite"},unseen:{statusTextColor:"textLite",indicatorWrapperColor:"textLite",counterColor:"textLite"}},u={completed:{icon:"check",indicatorWrapperColor:"primary",textColor:"primary",text:"Completed",tooltip:"has been claimed and has successfully connected to Netdata at least once"},pending:{icon:"clock_hollow",indicatorWrapperColor:"textLite",textColor:"textLite",text:"Pending...",tooltip:"has been claimed but never connected to the Netdata."}},m=[1,44,0],g=[1,45,6],p="v1.44.0",h="v1.45.6",v="ErrSpaceMemberAlreadyExists"},97674:(e,t,a)=>{"use strict";a.d(t,{L_:()=>n,pj:()=>r,zl:()=>o});a(17333),a(98992),a(54520);const n=e=>["admin","manager","troubleshooter","observer","billing"].concat("EarlyBird"===e?"member":[]),r=e=>[e["user:SetAdmin"]&&"admin",e["user:SetManager"]&&"manager",e["user:SetMember"]&&"member",e["user:SetTroubleshooter"]&&"troubleshooter",e["user:SetObserver"]&&"observer",e["user:SetBilling"]&&"billing"].filter(Boolean),o=e=>[e["user:SetFromAdmin"]&&"admin",e["user:SetFromManager"]&&"manager",e["user:SetFromMember"]&&"member",e["user:SetFromTroubleshooter"]&&"troubleshooter",e["user:SetFromObserver"]&&"observer",e["user:SetFromBilling"]&&"billing"].filter(Boolean)},45860:(e,t,a)=>{"use strict";a.d(t,{C2:()=>c,PY:()=>p,l1:()=>m,qM:()=>v,rn:()=>f,rx:()=>d,z5:()=>l});a(69479),a(25440),a(41393),a(81454);var n=a(26655),r=a(49286),o=a(37618);const i=e=>{let{roomID:t,alarmCounter:a,unreachableCount:n,state:r}=e;return{id:t,alertCounter:a,unreachableCount:n,state:r}},s=e=>{let{results:t}=e;return{results:(t||[]).map(i)}},c=e=>(0,o.ES)(e)?Promise.resolve({data:{results:[]}}):n.A.get("/api/v2/spaces/".concat(e,"/alarms"),{transform:s}),l=e=>(0,o.ES)(e)?Promise.resolve({data:null}):n.A.get("/api/v2/spaces/".concat(e,"/alarms/metas")),d={contexts:null,names:null,roles:null},u=e=>{let{alerts:t=[],nodes:a=[],alert_instances:n=[]}=e;return(0,r.bn)(n.map((e=>{let{ni:n,ati:r,tr_i:o,tr_v:i,tr_t:s,cfg:c,info:l,fami:d,ch:u,ch_n:m=u,ctx:g,v:p,t:h,flags:v,st:f,src:A,units:y,to:b,cl:S,cm:k,tp:I,sum:w,slc:E}=e;const x=a[n],{nm:C,cr:T,wr:N,cl:R,er:M,in:D,nd:_,cfg:U}=t[r],P=(C||"unknown_alert").replace(/_/g," ");return{mg:x.mg,nd:x.nd,nodeId:x.nd||x.mg,id:o,context:g,value:p,status:f.toLowerCase(),summary:w||P,name:C,displayName:P,alert:alert,criticalCount:T,warningCount:N,clearCount:R,errorCount:M,instanceCount:D,nodeCount:_,configCount:U,lastStatusChangeValue:i,lastStatusChange:s,configHash:c,info:l,family:d,instance:u,instanceName:m,lastUpdated:h,flags:v,units:y,source:A,recipient:b,class:S,component:k,type:I,silencing:E}})))},m=(e,t)=>(0,o.ES)(e)?n.A.get("/api/v2/alerts?options=summary,instances,values,minify&status=raised",{baseURL:window.envSettings.agentApiUrl,transform:u}):n.A.post("/api/v2/spaces/".concat(e,"/rooms/").concat(t,"/alerts"),{scope:{nodes:[],contexts:[]},selectors:{status:["raised"]},options:["summary","values","instances"]},{transform:u}),g=e=>{let{transitions:t=[]}=e;if(!t[0])return{};const{config_hash_id:a,context:n,machine_guid:r,info:o,transition_id:i,node_id:s,alert:c,new:{status:l,value:d},when:u,old:{status:m,value:g,duration:p},instance:h,instance_n:v=h,units:f}=t[0];return{configHash:a,context:n,name:c,id:i,mg:r,nodeId:s||r,nd:s,alert:c,info:o,displayName:(c||"unknown_alert").replace(/_/g," "),status:l.toLowerCase(),lastStatusChange:u,lastStatusChangeValue:d,prevStatus:m.toLowerCase(),prevValue:g,prevDuration:p,instance:h,instanceName:v,units:f}},p=(e,t,a)=>(0,o.ES)(e)?n.A.get("/api/v2/alert_transitions?options=minify&transition=".concat(a),{baseURL:window.envSettings.agentApiUrl,transform:g}).then((a=>{let{data:n}=a;return y(e,t,n.configHash,n)})):n.A.post("/api/v2/spaces/".concat(e,"/rooms/").concat(t,"/alert_transitions"),{transition:a},{transform:g}).then((a=>{let{data:n}=a;return y(e,t,n.configHash,n)})),h=e=>{let{alerts:t=[]}=e;return t.map((e=>{let{nm:t,cr:a,wr:n,cl:r,er:o,in:i,nd:s,cfg:c,sum:l,slc:d}=e;return{name:t,displayName:(t||"unknown_alert").replace(/_/g," "),criticalCount:a,warningCount:n,clearCount:r,errorCount:o,instanceCount:i,nodeCount:s,configCount:c,summary:l||t,silencing:d}}))},v=(e,t)=>(0,o.ES)(e)?n.A.get("/api/v2/alerts?options=minify",{baseURL:window.envSettings.agentApiUrl,transform:h}).then((e=>{let{data:t}=e;return t})):n.A.post("/api/v2/spaces/".concat(e,"/rooms/").concat(t,"/alerts"),{scope:{nodes:[]}},{transform:h}).then((e=>{let{data:t}=e;return t})),f=function(){let{spaceId:e,roomId:t,name:a,context:r,allowEmptyName:i}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};if((!a||!a.length)&&!i)return Promise.resolve({data:[]});if((0,o.ES)(e)){const e=Array.isArray(a)?a.join(","):a;return n.A.get("/api/v2/alerts?options=summary,values,instances,minify&alert=".concat(e),{baseURL:window.envSettings.agentApiUrl,transform:u})}const s=Array.isArray(a)?a:[a],c=r?Array.isArray(r)?r:[r]:void 0;return n.A.post("/api/v2/spaces/".concat(e,"/rooms/").concat(t,"/alerts"),{scope:{nodes:[],contexts:c},selectors:{alert:s},options:["summary","instances","values"]},{transform:u})},A=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return t=>{let{class:a,component:n,type:r,info:o,value:{calc:i="",update_every:s,units:c,db:l},status:d,notification:u}=t;const{type:m,exec:g,to:p,delay:h,repeat:v}=u||{},{warn:f,crit:A}=v||{},{warn:y,crit:b,green:S,red:k}=d||{},{dimensions:I,method:w,after:E,before:x,options:C}=l||{};return{info:o,...e,configInfo:o,class:a,component:n,type:r,calculation:i,updateEvery:s,units:c,warning:y,critical:b,lookupDimensions:I,lookupMethod:w,lookupAfter:E,lookupBefore:x,lookupOptions:C,notificationType:m,exec:g,recipient:p,delay:h,warnRepeatEvery:f,critRepeatEvery:A,green:S,red:k}}},y=function(e,t,a){let r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{};return(0,o.ES)(e)?n.A.get("/api/v2/alert_config?options=minify&config=".concat(a),{baseURL:window.envSettings.agentApiUrl,transform:A(r)}):n.A.post("/api/v2/spaces/".concat(e,"/rooms/").concat(t,"/alert_config"),{config:a},{transform:A(r)})}},96935:(e,t,a)=>{"use strict";a.d(t,{Ay:()=>s,J7:()=>l,Qk:()=>c,So:()=>m,_v:()=>u,ue:()=>i,xz:()=>d});var n=a(47444),r=a(92861),o=a(45860);const i={id:null,loaded:!1,nodeId:null,instance:null,instanceName:null,name:"",calculation:"",lookupAfter:0,lookupBefore:0,status:"unknown",date:"",fullyLoaded:!1,fullyLoading:!1,info:"",configInfo:"",warning:"",critical:"",updateEvery:0,source:"",recipient:"Unknown",units:"",delayUpDuration:null,delayDownDuration:null,delayMaxDuration:null,multiplier:null,delayUpToTimestamp:null,lookupDimensions:"",lookupMethod:"",lookupOptions:"",class:"Unknown",type:"Unknown",component:"Unknown"},s=(0,n.eU)({key:"alert",default:{}}),c=(0,n.eU)({key:"selectedAlert",default:null}),l=(0,n.eU)({key:"alertsTabsAtom",default:r.Sy}),d=(0,n.Iz)({key:"alertConfigurations",default:e=>{let{spaceId:t,roomId:a}=e;return(0,o.qM)(t,a)}}),u=(0,n.Iz)({key:"alertInstances",default:e=>{let{spaceId:t,roomId:a,name:n}=e;return(0,o.rn)({spaceId:t,roomId:a,name:n}).then((e=>{let{data:t}=e;return t}))}}),m=(0,n.Iz)({key:"selectedAlertConfiguration",default:r.Vh})},92861:(e,t,a)=>{"use strict";a.d(t,{Sy:()=>r,UW:()=>i,Vh:()=>o,kc:()=>n});const n={alertStatus:"alertStatus",os:"os"},r=0,o={alertName:null,nodeId:null},i="https://learn.netdata.cloud/docs/alerting/health-configuration-reference#edit-health-configuration-files"},52768:(e,t,a)=>{"use strict";a.d(t,{J4:()=>g,Vt:()=>v,lT:()=>p,m3:()=>m,x7:()=>u});a(62953);var n=a(96540),r=a(47767),o=a(69765),i=a(27467),s=a(11128),c=a(20081),l=a(33222),d=a(21290);const u=e=>{const t=(0,r.Zp)(),a=(0,o.r9)(),c=(0,s.u7)({extraKey:"alerts"}),l=(0,i.Fw)("alertStatuses",{extraKey:"alerts"});return(0,n.useCallback)((n=>{e&&c([e]),n&&"string"===typeof n&&l([n]),t("".concat(a,"/alerts"))}),[e,a,c])},m=(e,t)=>{if(void 0===e)return"-";const a=(0,c.W)(e);return t?"".concat(a," ").concat(t):a},g=(e,t)=>(0,n.useMemo)((()=>m(e,t)),[e,t]),p=e=>{let{rawTime:t,secs:a=!1,long:r=!1}=e;const{localeTimeString:o,localeDateString:i}=(0,d.$j)();return(0,n.useMemo)((()=>{const e=new Date(1e3*t);return isNaN(e.valueOf())?{}:{timeAgo:(0,l.A)(e,new Date),formattedDate:"".concat(i(e,{long:r})," ").concat(o(e,{secs:a}))}}),[t,o,i])},h=()=>Math.floor((new Date).getTime()/1e3),v=function(e){let t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];const{lastUpdated:a,lastStatusChange:r}=e||{},o=(0,i.rI)(),[s,c]=(e=>{const{lastUpdated:t,lastStatusChange:a}=e||{};return(0,n.useMemo)((()=>{if(!t||!a)return[];const e=t||h(),n=e-a,r=Math.round(a-n),o=Math.round(e+n);return[r,o>h()?h():o]}),[t,a])})({lastUpdated:a,lastStatusChange:r});(0,n.useEffect)((()=>{t&&(s||c)&&o({after:1e3*s,before:1e3*c})}),[t,s,c])}},64118:(e,t,a)=>{"use strict";a.d(t,{AO:()=>x,DV:()=>M,Gq:()=>b,I8:()=>N,JL:()=>f,KB:()=>A,SB:()=>h,Yo:()=>I,kJ:()=>R,oU:()=>C,q1:()=>T,ud:()=>D,x:()=>w,yk:()=>S,zu:()=>k});a(17333),a(41393),a(14905),a(98992),a(54520),a(81454),a(8872);var n=a(96540),r=a(47444),o=a(69765),i=a(67990),s=a(51074),c=a(88982),l=a(47762),d=a(92861),u=a(3914),m=a(96935),g=a(45860),p=a(20081);const h=(0,r.K0)({key:"alertState",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;const r=n(m.Ay)[t]||m.ue;return a?r[a]:r}},set:e=>{let{id:t,key:a}=e;return(e,n)=>{let{set:r}=e;r(m.Ay,a?"function"!==typeof n?e=>({...e,[t]:{...e[t],[a]:n}}):e=>({...e,[t]:{...e[t],[a]:n(e[t][a])}}):e=>({...e,[t]:{...m.ue,...e[t],...n}}))}}}),v=(0,r.K0)({key:"alertInstancesState",get:e=>{let{spaceId:t,roomId:a,alertNames:n,contexts:r,allowEmptyName:o}=e;return()=>(0,g.rn)({spaceId:t,roomId:a,name:n,context:r,allowEmptyName:o})}}),f=(e,t)=>(0,r.vc)(h({id:e,key:t})),A=()=>(0,r.L4)(m.Qk),y=(0,r.K0)({key:"alertsState",get:e=>t=>{let{get:a}=t;return e.map((e=>a(h({id:e}))))},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),b=e=>(0,r.vc)(y(e)),S=function(e){let{spaceId:t,roomId:a}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const i=(0,u.vt)();t=t||i;const s=(0,o.ID)();a=a||s;const c=(0,r.Zs)((n=>{let{set:r}=n;return()=>{r(h({id:e,key:"fullyLoading"}),!0);const n=(0,g.PY)(t,a,e);return n.then((t=>{let{data:a}=t;return r(h({id:e}),{...a,fullyLoaded:!0,fullyLoading:!1,loaded:!0})})).catch((()=>r(h({id:e,key:"fullyLoading"}),!1))),()=>n.cancel()}}),[a,t,e]);(0,n.useEffect)((()=>{e&&c()}),[e,c])},k=()=>{const e=(()=>{const e=(0,o.ID)();return(0,r.vc)(I(e))})(),t=(0,i.CK)(),a=(0,l.Gt)(t);return(0,n.useMemo)((()=>a.map((t=>{const a=e.filter((e=>e.nodeId===t.id));if(!a.length)return t;const n=a.map((e=>e.status));return{...t,[d.kc.alertStatus]:(0,p.h)(n)}}))),[e,a])},I=(0,r.K0)({key:"roomAlerts",get:e=>t=>{let{get:a}=t;const n=a((0,s.t)({id:e,key:"ids"}));return a(y(n))}}),w=()=>{const e=(0,o.ID)();return(0,r.vc)((0,s.t)({id:e,key:"loaded"}))},E={warning:0,critical:0,clear:0},x=e=>{const t=b(e);return(0,n.useMemo)((()=>t.reduce(((e,t)=>{let{status:a}=t;return e[a]=e[a]+1,e}),{...E})),[e])},C=()=>{const e=(0,u.vt)(),t=(0,o.ID)();return(0,r.vc)((0,m.xz)({spaceId:e,roomId:t}))},T=(e,t)=>{const a=(0,u.vt)(),i=(0,o.ID)(),s=(0,c.n)("id"),l=i||s,d=(0,r.vc)((0,m._v)({spaceId:a,roomId:l,name:e}));return(0,n.useMemo)((()=>t?d.filter((e=>e.nodeId===t)):d),[d,t])},N=e=>{var t;let{alertNames:a,contexts:n,allowEmptyName:i}=e;const s=(0,u.vt)(),l=(0,o.ID)(),d=(0,c.n)("id"),m=l||d,g=(0,r.xf)(v({spaceId:s,roomId:m,alertNames:a,contexts:n,allowEmptyName:i}));return{loaded:"loading"!==g.state,value:null===(t=g.contents)||void 0===t?void 0:t.data,hasError:"hasError"===g.state}},R=()=>(0,r.L4)(m.J7),M=()=>{const e=(0,o.ID)();return(0,r.L4)((0,m.So)(e))},D=()=>{const e=(0,o.ID)();return(0,r.E0)((0,m.So)(e))}},20081:(e,t,a)=>{"use strict";a.d(t,{W:()=>n,h:()=>r});const n=e=>{const t=e<1?Number(e).toPrecision(3):Number(e).toFixed(2),a=Number.parseFloat(t);return Math.abs(a)>=1e9||Math.abs(a)<=1e-5&&0!==a?a.toExponential(3):a},r=e=>e.includes("critical")?"critical":e.includes("warning")?"warning":e.includes("clear")?"clear":e[0]},58384:(e,t,a)=>{"use strict";a.d(t,{t:()=>d,A:()=>m});var n=a(58168),r=(a(62953),a(96540)),o=a(83199),i=a(87659),s=a(8711);const c=(0,s.default)(o.Drop).attrs({align:{top:"bottom",left:"left"},animation:!0,background:"dropdown",column:!0,margin:[.5,0,0],overflow:{vertical:"auto"},padding:[2,0],round:1}).withConfig({displayName:"styled__Drop",componentId:"sc-1x9syns-0"})(["box-sizing:content-box;"]),l=(0,s.default)(o.TextSmall).withConfig({displayName:"styled__DropdownItemClickable",componentId:"sc-1x9syns-1"})(["cursor:pointer;pointer-events:",";"," &:hover{background-color:",";}"],(e=>{let{isDisabled:t}=e;return t?"none":"auto"}),(e=>e.isDisabled&&"opacity: 0.6;"),(0,o.getColor)("selected")),d=e=>{let{children:t,color:a="text",disabled:i,icon:s,...c}=e;return r.createElement(l,(0,n.A)({as:o.Flex,"data-testid":"dropdownItem",gap:2,isDisabled:i,padding:[2,4]},c),r.createElement(o.Icon,{color:i?"textLite":a,"data-testid":"dropdownItem-icon",height:"16px",name:s,width:"16px"}),r.createElement(o.Text,{color:a,"data-testid":"dropdownItem-text",whiteSpace:"nowrap"},t))},u=(0,r.forwardRef)(((e,t)=>{let{Component:a,category:o,context:i,...s}=e;return r.createElement(a,(0,n.A)({"data-ga":"".concat(o,"::").concat(i,"::options"),"data-testid":"chartOptions",icon:"nav_dots",iconWidth:"12px",iconHeight:"12px",flavour:"borderless",neutral:!0,small:!0,padding:[0],ref:t,title:"Options"},s))})),m=e=>{let{Component:t=o.IconButton,category:a,context:s,children:l,testId:d,...m}=e;const[g,p,,h]=(0,i.A)(),v=(0,r.useRef)(),f=d||"chartsDropdown";return r.createElement(r.Fragment,null,r.createElement(u,(0,n.A)({},m,{"data-testid":"".concat(f,"-button"),category:a,context:s,onClick:p,ref:v,Component:t})),v.current&&g&&r.createElement(c,{close:h,"data-testid":f,onClickOutside:h,onEsc:h,target:v.current},l({close:h})))}},11604:(e,t,a)=>{"use strict";a.d(t,{Yr:()=>x,z2:()=>R,DH:()=>I,Js:()=>w,tN:()=>N,So:()=>T,LC:()=>C});a(25440),a(9920),a(41393),a(14905),a(98992),a(3949),a(81454),a(8872),a(62953);var n=a(96540),r=a(47767),o=a(79731),i=a(3914),s=a(86663),c=a(26655),l=a(18061),d=a(71835),u=a(61649),m=a(76201),g=a(37618),p=a(22292),h=a(11368),v=a(57419);const f=e=>{let{cloudRoute:t,redirectUri:a}=e;try{if(!(0,v.isTryingToJoinWorkspace)(t))return"";const{inviteToken:e,spaceId:n}=(e=>{const t=s.parse(e);let{error_retry:a,token:n}=t.error_retry||t.token?{...t,token:decodeURIComponent(t.token)}:s.parse(decodeURIComponent(e));return n||(n=s.parse(a).token),{inviteToken:n,spaceId:/spaces\/(.*)\/join-callback/.exec(a)[1]}})(a);return"?".concat(s.stringify({token:e,space:n}))}catch(n){return console.warn("error parsing join-callback url",n),""}};var A=a(35600),y=a(50503),b=a(36850),S=a(50876);const k=function(e){let{path:t="/sign-up"}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if(window.envSettings.tracking)try{var a;const n="".concat(t,"/thank-you");window.dataLayer.push({event:"pageview",virtualPage:"".concat(n).concat(e)}),window.posthog.setPersonPropertiesForFlags({netdata_cloud_account_created_days_ago:0}),(0,m.F)("".concat(null===(a=window)||void 0===a||null===(a=a.location)||void 0===a?void 0:a.origin).concat(n).concat(e))}catch(n){}},I=e=>(0,u.A)((function(){let t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return c.A.patch("/api/v1/accounts/".concat(e),t)}),[e]),w=()=>{const{search:e,hash:t}=(0,r.zy)(),a=(0,r.RQ)("/sign-up"),i=(0,r.RQ)("/sign-in"),s=a?a.pathname:i?i.pathname:"",l=(0,r.Zp)(),[u,m]=(0,d.A)(),{sendLog:g,isReady:p}=(0,S.A)();return(0,n.useCallback)((a=>{let{isSSO:n,email:r,redirectURI:i,registerURI:d,isUnverifiedRegistration:p,resend:h}=a;return(n?c.A.get("/api/v2/auth/account/okta/magic-link?redirect_uri=".concat(i,"®ister_uri=").concat(d,"&email=").concat(r)):c.A.post("/api/v2/auth/account/magic-link",{email:r,redirectURI:i,registerURI:d,isUnverifiedRegistration:p})).then((()=>{u({header:"Sign in email sent"}),h||(k(e,{path:s}),"/sign-up"==s&&g({feature:"SignUpThankYou"}),l("/sign-in/magic-link-sent".concat(e).concat(t),{state:{email:r}}))})).catch((e=>{var t,a;const n=(0,o.o)((null===e||void 0===e||null===(t=e.response)||void 0===t||null===(t=t.data)||void 0===t?void 0:t.errorMsgKey)||(null===e||void 0===e||null===(a=e.response)||void 0===a||null===(a=a.data)||void 0===a?void 0:a.errorMessage)||"Something went wrong");throw m({header:"Sign in",text:n}),n}))}),[s,e,t,g,p])},E=e=>{let{authorized_origins:t=[]}=e;return t.map((e=>{let{id:t,url:a,last_accessed_at:n}=e;return{id:t,url:a,lastAccessedAt:n}}),[])},x=function(){let{onSuccess:e,onFail:t,...a}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];return(0,l.A)((()=>({fetch:()=>c.A.get("/api/v1/auth/account/origins",{transform:E}),initialValue:[],isDefaultLoading:!0,onSuccess:t=>{null===e||void 0===e||e(t)},onFail:e=>{null===t||void 0===t||t(e)},...a})),n)},C=()=>{const[e,t]=(0,n.useState)(""),[a,r]=(0,n.useState)(!1),[,o]=(0,d.A)();return[e,(0,n.useCallback)((async e=>{r(!0);try{const a=await c.A.post("/api/v1/auth/account/origins",{origin_url:e});t(a.authorized_origin_url),r(!1)}catch(a){o({header:"Trust URL",text:"We couldn't trust the URL"}),r(!1)}}),[]),a]},T=e=>{e&&e.preventDefault();const[,t]=(0,d.A)(),{search:a}=(0,r.zy)(),{cloudRoute:o,oauth:i,token:l,username:u,redirect_uri:m}=s.parse(a),g=(0,h.hn)(),{setFailure:p}=(0,y.A)(),v=f({cloudRoute:o,redirectUri:m}),{sendLog:A,isReady:b}=(0,S.A)();return(0,n.useCallback)((async()=>{const e="/api/v2/auth/account/".concat(i||"magic-link","/register");try{const{data:t}=await c.A.post("".concat(e).concat(v),{token:l,username:u}),{errorMsgKey:n}=t||{};"ErrTrialUpdateFailed"==n&&p(),g(),k(a)}catch(n){t(n)}}),[i,l,u,A,b])},N=()=>{const e=(0,i.dg)();return(0,n.useCallback)((async()=>{var e;return await c.A.post("/api/v1/auth/account/logout"),"function"==typeof(null===(e=window.posthog)||void 0===e?void 0:e.reset)&&window.posthog.reset(),(()=>{const e=b.V.reduce(((e,t)=>({...e,[t]:localStorage.getItem(t)})),{});localStorage.clear(),Object.entries(e).forEach((e=>{let[t,a]=e;localStorage.setItem(t,a)}))})(),(0,A.y)(),g.Ay?window.location.reload():window.location.replace("/sign-in")}),[e])},R=()=>{const e=(0,p.NJ)(),t=N(),[,a]=(0,d.A)();return(0,n.useCallback)((async()=>{try{await c.A.delete("/api/v1/accounts/".concat(e)),t()}catch(n){a(n)}}),[])}},36850:(e,t,a)=>{"use strict";a.d(t,{S:()=>n,V:()=>r});const n="https://learn.netdata.cloud/docs/nightly/concepts/role-based-access-model",r=["lastSignInMethod","oktaSSO"]},11368:(e,t,a)=>{"use strict";a.d(t,{Ay:()=>f,hn:()=>h});a(14905),a(98992),a(8872);var n=a(96540),r=a(47444),o=a(26655),i=a(78969),s=a(37618),c=a(92861);const l={auth_type:"",avatarURL:null,createdAt:"",email:"",id:null,name:"",termsAccepted:!0,isAnonymous:!1,authorizedOrigins:[]},d={theme:"unspecified",spacePanelCollapsed:!0,alertsGrouping:c.kc.alertStatus},u=e=>{let{permissions:t,settings:a,...n}=e;const r=n.id===i.eC;return{permissions:t,settings:a,user:{...n,isAnonymous:r,termsAccepted:r||n.termsAccepted}}},m=()=>{var e;const t=JSON.parse(localStorage.getItem("userSettings"))||{},a={user:{...l,id:(null===(e=window.localNetdataRegistry)||void 0===e?void 0:e.pg)||null,isLoaded:!0,isAnonymous:!0},permissions:{},settings:{...d,...t,isLoaded:!0}};if((0,s.I)())return Promise.resolve(a);let n;return(()=>{var e,r;return null===(e=n)||void 0===e||null===(r=e.cancel)||void 0===r||r.call(e),n=o.A.get("/api/v2/accounts/me",{transform:u,allow401:!0}),n.then((e=>{let{data:{user:n,permissions:r=[],settings:o}}=e;return s.Ay&&n.isAnonymous?a:{user:{...l,...n,isLoaded:!0},permissions:r.reduce(((e,t)=>({...e,[t]:t})),{}),settings:{...d,...t,...o,isLoaded:!0}}})).catch((()=>s.Ay?a:{user:{...l,isLoaded:!0,isAnonymous:!0},permissions:{},settings:{...d,...t,isLoaded:!0}}))})()},g=(0,r.eU)({key:"currentUserFetcher",default:null}),p=(0,r.eU)({key:"checkAuthAtom",default:1}),h=()=>{const e=(0,r.lZ)(p),t=(0,r.lZ)(g);return(0,n.useCallback)((async()=>{const a=await m();t(a),e((e=>e+1))}),[])},v=(0,r.gD)({key:"fullStateFetcher",get:async e=>{let{get:t}=e;return t(g)||await m()}}),f=(0,r.K0)({key:"currentUserFullState",get:e=>t=>{let{get:a}=t;return a(v)[e]}})},46741:(e,t,a)=>{"use strict";a.d(t,{Dk:()=>p,_s:()=>f,Ge:()=>A,JT:()=>v});a(14905),a(98992),a(8872);var n=a(96540),r=a(47444),o=a(65570),i=a(3914),s=a(37618);const c=(0,r.Iz)({key:"permissionsBySpaceId",default:{},effects:e=>(0,s.ES)(e)?[e=>{let{setSelf:t,trigger:a}=e;"get"===a&&t(s.yu.permissions)}]:[]});var l=a(97674),d=a(11368);const u=(0,r.eU)({key:"userPermissions",default:(0,d.Ay)("permissions")}),m=["space:Leave","room:Create","space:InviteUser","space:RemoveUser","space:UpdateMeta","space:Delete","room:Delete","room:AddNode","node:Delete","user:ChangeRoles","room:UpdateMeta","room:Leave","room:RemoveNode","billing:Manage"],g={"space:ReadSettings":e=>!m.some((t=>!e[t]))},p=(0,r.K0)({key:"permissionsSelector",get:e=>t=>{let{get:a}=t;return a(c(e))},set:e=>function(t){let{set:a}=t,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};a(c(e),(e=>(0,o.Ay)(e,n)?e:n))},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),h=e=>{const t=(0,i.vt)(),a=(0,r.vc)(p(e||t)),o=(0,r.xf)(u);return(0,n.useMemo)((()=>{let e={...a};return"hasValue"===o.state&&(e={...e,...o.contents}),t=e,{...t,...Object.keys(g).reduce(((e,a)=>g[a](t)?{...e,[a]:a}:e),{})};var t}),[a,o.state])},v=(e,t)=>!!h(t)[e],f=()=>{const e=h();return(0,n.useMemo)((()=>(0,l.pj)(e)),[e])},A=()=>{const e=h();return(0,n.useMemo)((()=>(0,l.zl)(e)),[e])}},5668:(e,t,a)=>{"use strict";a.d(t,{ng:()=>h,tF:()=>g,xd:()=>m});a(62953);var n=a(47444),r=a(86723),o=a(11368);const i=(0,n.eU)({key:"currentUserSettings",default:(0,o.Ay)("settings")});var s=a(11604),c=a(22292),l=a(3914);const d=(0,n.K0)({key:"userSettings",get:e=>t=>{let{get:a}=t;const n=a(i);return e?n[e]:n},set:e=>(t,a)=>{let{set:n}=t;return n(i,(t=>e?{...t,[e]:"function"===typeof a?a(t[e]):a}:"function"===typeof a?a(t):a))}}),u=e=>{const t=(0,c.uW)("isAnonymous"),a=(0,n.lZ)(d(e)),o=(0,c.NJ)(),i=(0,s.DH)(o),[,l]=(0,r.A)("userSettings",{});return(0,n.Zs)((n=>{let{snapshot:r}=n;return async n=>{a(n);const o=r.retain();try{const a={...await r.getPromise(d()),...e?{[e]:n}:n};if(t)return void l(a);await i({settings:a})}catch(s){const t=await r.getPromise(d());a(e?t[e]:t)}finally{o()}}}),[e])},m=e=>(0,n.vc)(d(e)),g=e=>[m(e),u(e)],p="spaceSettings",h=e=>{var t;const a=(0,l.vt)(),n=m(p),r=u(p);return[null===n||void 0===n||null===(t=n[a])||void 0===t?void 0:t[e],t=>{r({...n||{},[a]:{...(null===n||void 0===n?void 0:n[a])||{},[e]:t}})}]}},76689:(e,t,a)=>{"use strict";a.d(t,{A:()=>o});var n=a(47444),r=a(11368);const o=(0,n.eU)({key:"currentUser",default:(0,r.Ay)("user")})},22292:(e,t,a)=>{"use strict";a.d(t,{Dm:()=>i,Ir:()=>d,NJ:()=>c,qO:()=>l,uW:()=>s});var n=a(47444),r=a(76689),o=a(11604);const i=(0,n.K0)({key:"userState",get:e=>t=>{let{get:a}=t;const n=a(r.A);return e?n[e]:n},set:e=>(t,a)=>{let{set:n}=t;n(r.A,(t=>e?{...t,[e]:a}:{...t,...a}))}}),s=e=>(0,n.vc)(i(e)),c=()=>(0,n.vc)(i("id")),l=function(e){let{shouldPersist:t,onSuccess:a,onFail:r}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const s=(0,n.lZ)(i(e)),l=c(),d=(0,o.DH)(l);return(0,n.Zs)((n=>{let{snapshot:r}=n;return async n=>{if(s(n),!t)return;const o=r.retain();try{await d({...e?{[e]:n}:n}),a&&a()}catch(c){const t=await r.getPromise(i());s(e?t[e]:t)}finally{o()}}}),[e,t,a,r])},d=(e,t)=>[s(e),l(e,t)]},2025:(e,t,a)=>{"use strict";a.d(t,{DH:()=>y,jA:()=>f,yK:()=>v,z2:()=>A});a(9920),a(41393),a(98992),a(3949),a(81454),a(62953);var n=a(47444),r=a(61649),o=a(71835),i=a(69765),s=a(3914),c=a(72802),l=a(68980),d=a(86652),u=a(58388),m=a(80925),g=a(27078),p=a(91517),h=a(43124);const v=(e,t)=>{const[a,n]=(0,o.A)();return(0,r.A)((function(){let r=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const o=(0,c.k9)(e,t,r);return o.then((e=>(a({header:"Dashboards",text:"Dashboard successfully created!"}),e))).catch((e=>!e.isCancel&&n(e))),o}),[e,t])},f=()=>{const[e,t]=(0,o.A)();return(0,r.A)((a=>{let{dashboards:n}=a;const r=n.map((e=>{let{id:t}=e;return t})).toString(),o=n[0].roomId,i=n[0].spaceId;return(0,c.A_)(i,o,r).then((()=>e({header:"Dashboards",text:"Dashboards successfully deleted!"}))).catch((e=>!e.isCancel&&t))}),[])},A=e=>{const t=(0,l.fz)(e,"spaceId"),a=(0,l.fz)(e,"roomId"),[n,i]=(0,o.A)();return(0,r.A)((()=>{const r=(0,c.A_)(t,a,e);return r.then((()=>n({header:"Dashboards",text:"Dashboard successfully deleted!"}))).catch((e=>!e.isCancel&&i(e))),r}),[e,t,a])},y=function(e){let{onSuccess:t,onFail:a}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const r=(0,m.e)(),o=(0,s.vt)(),v=(0,i.ID)();return(0,n.Zs)((n=>{let{snapshot:i,set:s}=n;return function(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:e;s((0,l.Qx)({id:n,key:"processing"}),!0);const m=i.getLoadable((0,l.Qx)({id:n})).contents,{layout:f,containerIds:A,containers:y}=i.getLoadable((0,d.kY)({id:n})).contents,b=i.getLoadable((0,u.nG)({id:n})).contents,S=(0,g.My)(r,n),k={version:m.version+1,name:m.name,snapshot:{uiState:{layout:f,containerIds:A,containers:y,cards:b}}},I=(0,c.mQ)(o,v,n,k);return I.then((e=>{let{data:a}=e;const{createdAt:n,id:r,name:i,roomID:c=v,slug:m,spaceID:g=o,updatedAt:f,version:A}=a,{cards:y}=(0,p.S)(a);s((0,u.nG)({id:r}),y),s((0,l.Qx)({id:r}),(e=>({...e,createdAt:n,id:r,name:i,roomId:c,slug:m,spaceId:g,updatedAt:f,version:A,processing:!1}))),s(h.H9,m),s((0,h.yC)([g,c,m]),r),s((0,d.Ie)(r)),S&&Object.keys(y).forEach((e=>{var t,a;"text"!==y[e].type&&(null===(t=S.getNode({cardId:e}))||void 0===t||null===(a=t.removePristine)||void 0===a||a.call(t))})),t&&t(a)})).catch((e=>{throw s((0,l.Qx)({id:n,key:"processing"},!1)),a&&a(e),e})),I}}))}},67544:(e,t,a)=>{"use strict";a.d(t,{A_:()=>x,Dn:()=>I,Kv:()=>b,Ts:()=>E,W6:()=>w,c6:()=>k,nM:()=>S});a(17333),a(9920),a(14905),a(98992),a(54520),a(3949),a(8872),a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215),a(62953);var n=a(96540),r=a(47444),o=a(33829),i=a(54621),s=a(21591),c=a(30673),l=a(8196),d=a(86652),u=a(68980),m=a(58388),g=a(72802),p=a(2025);const h=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return{...c.kG,...c.Pk,id:(0,o.A)(),contextScope:e,...t}},v="A",f=e=>function(t,a){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=arguments.length>3?arguments[3]:void 0;return n=n||{},r=r||Object.keys(t)[0]||v,{...t,[r]:[...t[r]||[],{...e,...n,id:a}]}},A=f(l.u6),y=f(l.BC),b=e=>(0,r.Zs)((t=>{let{set:a,snapshot:n}=t;return async function(){let{dashboardId:t,containerId:r}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const i=(0,o.A)(),s={...c.kG,...c.cY,id:i,editing:!0};e=e||t;const l=await n.getPromise((0,d.kY)({id:e,key:"containerIds"}));r=r||l[0]||v,a((0,u.Qx)({id:e,key:"cardIds"}),(e=>[...e,i])),a((0,d.xB)(e),(e=>y(e,i,null,r))),a((0,d.MX)(e),(e=>e.includes(r)?e:[...new Set([...e,r])])),a((0,m.nG)({id:e,resourceId:i}),s)}})),S=e=>(0,r.Zs)((t=>{let{set:a,snapshot:n}=t;return async function(t){let{itemLayout:r,containerId:o,...i}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},s=arguments.length>2&&void 0!==arguments[2]?arguments[2]:()=>{};const c=h(t,i),{id:l,dashboardId:g}=c;e=e||g;const p=await n.getPromise((0,d.kY)({id:e,key:"containerIds"}));o=o||p[0]||v,a((0,u.Qx)({id:e,key:"cardIds"}),(e=>[...e,l])),a((0,d.xB)(e),(e=>A(e,l,r,o))),a((0,d.MX)(e),(e=>e.includes(o)?e:[...new Set([...e,o])])),a((0,m.nG)({id:e,resourceId:l}),c),s(c)}})),k=(e,t,a)=>(0,r.Zs)((n=>{let{set:r}=n;return()=>{r((0,u.Qx)({id:e,key:"cardIds"}),(e=>e.filter((e=>e!==t)))),r((0,d.xB)(e),(e=>{if(!t){const t={...e};return delete t[a],t}return a?{...e,[a]:e[a].filter((e=>{let{id:a}=e;return a!==t}))}:Object.keys(e).reduce(((a,n)=>({[n]:e[n].filter((e=>{let{id:a}=e;return a!==t}))})),{})})),a&&r((0,d.MX)(e),(e=>e.filter((e=>e!==a)))),t&&r((0,m.nG)({id:e}),(e=>{const a={...e};return delete a[t],a}))}})),I=function(e,t){let{onSuccess:a,onFail:n}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const o=(0,p.yK)(e,t);return(0,r.Zs)((r=>{let{set:i}=r;return async function(r){let c=arguments.length>1&&void 0!==arguments[1]?arguments[1]:a;try{const{data:a}=await o(r),n={...r,spaceId:e,roomId:t,...a,loaded:!0};i((0,u._d)(a.id),n),i((0,s.hZ)(t),n),c&&c(n)}catch(l){n?n(r):console.warn("failed",l)}}}),[o,e,a,n])},w=(e,t,a)=>{const r=I(e,t,a);return(0,n.useCallback)(((n,o,i)=>{const s=h(o,i);return r({name:n},(n=>{var r;(0,g.mQ)(e,t,n.id,{version:n.version+1||2,snapshot:{uiState:A({},s.id,i.itemLayout),items:[{...s,cardAttributes:{...i,id:s.id}}]}}),null===a||void 0===a||null===(r=a.onSuccess)||void 0===r||r.call(a,n)}))}),[r])},E=()=>(e=>{const t=(0,i.nT)();return(0,r.Zs)((a=>{let{snapshot:n,set:r}=a;return async function(a){let{ids:o}=a,{onSuccess:i,onFail:c}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const l=await n.getPromise((0,u.dE)(o));e({dashboards:l}).then((()=>{l.forEach((e=>{let{id:a,roomId:n}=e;r((0,u.kg)(a)),r((0,s.wt)(n),[a]),t(a)})),i&&i(l)})).catch((e=>{c?c(l):console.warn("failed",e)}))}}),[e])})((0,p.jA)()),x=(e,t)=>function(e,t){let{onSuccess:a,onFail:n}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const o=(0,u.fz)(e,"spaceId"),c=(0,u.fz)(e,"roomId"),l=(0,i.nT)();return(0,r.Zs)((r=>{let{snapshot:o,set:i}=r;return async()=>{const r=await o.getPromise((0,u.Qx)({id:e}));try{await t(),i((0,u.kg)(e)),i((0,s.wt)(c),[e]),l(e),a&&a(r)}catch(d){n?n(r):console.warn("failed",d)}}}),[t,o,c,e,a,n])}(e,(0,p.z2)(e),t)},72802:(e,t,a)=>{"use strict";a.d(t,{A_:()=>c,f7:()=>l,k9:()=>s,mQ:()=>i});var n=a(26655),r=a(37618);const o=(0,a(63457).A)("netdata"),i=(e,t,a,i)=>(0,r.ES)(e)&&(0,r.tB)(t)&&a===r.LA?o.get(r.LA).then((e=>e||r.mz)).then((e=>{const t={...e,...i};return o.set(r.LA,t),{data:t}})):n.A.patch("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/dashboards/").concat(a),i),s=(e,t,a)=>n.A.post("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/dashboards"),a),c=(e,t,a)=>n.A.delete("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/dashboards?dashboard_ids=").concat(a)),l=(e,t,a)=>(0,r.ES)(e)&&(0,r.tB)(t)&&a===r.LA?o.get(r.LA).then((e=>({data:e||r.mz}))):n.A.get("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/dashboards/").concat(a))},30673:(e,t,a)=>{"use strict";a.d(t,{Ay:()=>s,Pk:()=>r,cY:()=>o,kG:()=>i});var n=a(47444);const r={pristine:{},id:null,type:"chart"},o={pristine:{},id:null,text:"",editing:!1,type:"text",textType:"text"},i={id:null,type:""},s=(0,n.Iz)({key:"dashboardCards",default:{}})},58388:(e,t,a)=>{"use strict";a.d(t,{T6:()=>c,bE:()=>s,nG:()=>o,r:()=>d,v_:()=>l});var n=a(47444),r=a(30673);const o=(0,n.K0)({key:"dashboardCardsState",get:e=>{let{id:t,resourceId:a}=e;return e=>{let{get:n}=e;const o=n((0,r.Ay)(t));return a?o[a]||r.kG:o}},set:e=>{let{id:t,resourceId:a}=e;return(e,n)=>{let{set:o}=e;return a?"function"===typeof n?o((0,r.Ay)(t),(e=>({...e,[a]:n(e[a])}))):void o((0,r.Ay)(t),(e=>({...e,[a]:{...e[a],...n}}))):o((0,r.Ay)(t),n)}}}),i=(0,n.K0)({key:"dashboardCardState",get:e=>{let{id:t,resourceId:a,key:n}=e;return e=>{let{get:r}=e;const i=r(o({id:t,resourceId:a}));return n?i[n]:i}},set:e=>{let{id:t,resourceId:a,key:n}=e;return(e,r)=>{let{set:i}=e;return i(o({id:t,resourceId:a}),n?{[n]:r}:r)}}}),s=(e,t,a)=>(0,n.vc)(i({id:e,resourceId:t,key:a})),c=e=>{const t=(0,n.vc)(i({id:e}));return e=>t[e]||r.kG},l=(e,t,a)=>(0,n.lZ)(i({id:e,resourceId:t,key:a})),d=(e,t,a)=>(0,n.L4)(i({id:e,resourceId:t,key:a}))},8018:(e,t,a)=>{"use strict";a.d(t,{A:()=>u});var n=a(58168),r=(a(62953),a(96540)),o=a(83199),i=a(80542),s=a(27229),c=a(49667);const l="Cannot exceed ".concat(c.q," characters"),d=e=>{const t=e.length<=c.q;return(0,i.H)(t,l)},u=e=>{let{value:t,isValid:a,setIsValid:c,onChange:l,label:u,validators:m=[],hint:g,placeholder:p="Enter dashboard name",...h}=e;const v=(0,i.k)([d,...m]),[f,A]=(0,r.useState)("");return(0,r.useEffect)((()=>{const e=v(t),n=e.isValid,r=(0,s.W)(e);!a&&n?c(!0):a&&!n&&c(!1),A(r||"")}),[a,c,v,t]),r.createElement(o.TextInput,(0,n.A)({label:u||" ",name:"name",placeholder:p,value:t,onChange:l,hint:g,error:!a&&f},h))}},49667:(e,t,a)=>{"use strict";a.d(t,{$:()=>o,q:()=>r});var n=a(68831);const r=50,o="".concat(n.A.assetsBaseURL,"/img/new-dashboard.svg")},8196:(e,t,a)=>{"use strict";a.d(t,{Ay:()=>s,BC:()=>r,u6:()=>o,ue:()=>i});var n=a(47444);const r={id:null,left:0,top:0,width:12,height:2,minWidth:1,minHeight:.5},o={id:null,left:0,top:0,width:10,height:9,minWidth:1,minHeight:3},i={layout:{},containerIds:[],containers:{},pristine:{}},s=(0,n.Iz)({key:"dashboardLayout",default:i})},86652:(e,t,a)=>{"use strict";a.d(t,{kY:()=>s,Ie:()=>d,MX:()=>m,xB:()=>l,Ix:()=>g,QZ:()=>p,B1:()=>u,Xc:()=>c});var n=a(47444),r=a(65570);var o=a(8196);const{updatePristine:i}=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"pristine";return{updatePristine:(t,a,n)=>{if(!(a in(t[e]||{}))&&!(0,r.Ay)(t[a],n))return{...t,[e]:{...t[e],[a]:t[a]}};if((0,r.Ay)(t[e][a],n)){const n={...t[e]};return delete n[a],{...t,[e]:n}}return t},resetPristine:t=>({...t,...t[e],[e]:{}})}}(),s=(0,n.K0)({key:"dashboardLayoutState",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;const r=n((0,o.Ay)(t));return a?r[a]:r}},set:e=>(t,a)=>{let{set:n}=t;n((0,o.Ay)(e),{...o.ue,...a})}}),c=(e,t)=>(0,n.vc)(s({id:e,key:t})),l=(0,n.K0)({key:"updateDashboardLayoutState",get:e=>t=>{let{get:a}=t;return a(s({id:e,key:"layout"}))},set:e=>(t,a)=>{let{set:n}=t;n((0,o.Ay)(e),(e=>({...e,...i(e,"layout",a),layout:a})))}}),d=(0,n.K0)({key:"removePristineDashboardLayoutState",set:e=>t=>{let{set:a}=t;a((0,o.Ay)(e),(e=>({...e,pristine:{}})))}}),u=e=>{const t=c(e,"layout"),a=(e=>(0,n.lZ)(l(e)))(e);return[t,a]},m=(0,n.K0)({key:"updateDashboardContainerIdsState",get:e=>t=>{let{get:a}=t;return a(s({id:e,key:"containerIds"}))},set:e=>(t,a)=>{let{set:n}=t;n((0,o.Ay)(e),(e=>({...e,...i(e,"containerIds",a),containerIds:a})))}}),g=e=>{const t=c(e,"containerIds"),a=(e=>(0,n.lZ)(m(e)))(e);return[t,a]},p=((0,n.K0)({key:"updateDashboardContainersState",get:e=>t=>{let{get:a}=t;return a(s({id:e,key:"containers"}))},set:e=>(t,a)=>{let{set:n}=t;n((0,o.Ay)(e),(e=>({...e,...i(e,"containers",a),containers:a})))}}),(e,t)=>{var a;return null===(a=c(e,"containers"))||void 0===a?void 0:a[t]})},68980:(e,t,a)=>{"use strict";a.d(t,{_u:()=>S,_d:()=>b,yl:()=>I,kg:()=>k,Qx:()=>h,dE:()=>A,zi:()=>T,oj:()=>N,zN:()=>C,TN:()=>v,fz:()=>f,Sf:()=>y});a(41393),a(8159),a(98992),a(81454),a(37550),a(62953);var n=a(96540),r=a(47444),o=a(3914),i=a(69765),s=a(80925),c=a(86652),l=a(58388),d=a(43124),u=a(37618);const m={id:null,loaded:!1,fullyLoaded:!1,loading:!1,processing:!1,isOwner:!1,spaceSlug:"",roomSlug:"",spaceId:"",roomId:"",name:"",slug:"",version:0,createdAt:"",updatedAt:"",cardIds:[]},g=(0,r.Iz)({key:"dashboard",default:m,effects:e=>u.LA===e?[e=>{let{setSelf:t,trigger:a}=e;"get"===a&&t(u.mz)}]:[]});var p=a(27078);const h=(0,r.K0)({key:"dashboardState",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;const r=n(g(t));return a?r[a]:r}},set:e=>{let{id:t,key:a}=e;return(e,n)=>{let{set:r}=e;return a?"function"===typeof n?r(g(t),(e=>({...e,[a]:n(e[a])}))):void r(g(t),(e=>({...e,[a]:n}))):r(g(t),{...m,...n})}}}),v=e=>{let{id:t,key:a}=e;return(0,r.L4)(h({id:t,key:a}))},f=(e,t)=>(0,r.vc)(h({id:e,key:t})),A=(0,r.K0)({key:"dashboardsState",get:e=>t=>{let{get:a}=t;return e.map((e=>a(h({id:e}))))},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),y=e=>(0,r.vc)(A(e)),b=(0,r.K0)({key:"dashboardInitializeState",get:e=>t=>{let{get:a}=t;return a(g(e))},set:e=>(t,a)=>{let{set:n}=t;n(h({id:e}),{...a,loaded:!0});const{spaceId:r,roomId:o,slug:i}=a;n((0,d.yC)([r,o,i]),e)}}),S=(0,r.K0)({key:"dashboardFullyInitializeState",set:e=>(t,a)=>{let{set:n}=t,{layout:r={},containerIds:o=[],containers:i={},dashboard:s={},cards:u={}}=a;n(h({id:e}),{...s,loaded:!0,fullyLoaded:!0,loading:!1}),n((0,c.kY)(e),{layout:r,containerIds:o,containers:i}),n((0,l.nG)({id:e}),u);const{spaceId:m,roomId:g,slug:p}=s;n((0,d.yC)([m,g,p]),e)}}),k=(0,r.K0)({key:"dashboardReset",set:e=>t=>{let{get:a,set:n,reset:r}=t;const{spaceId:o,roomId:i,slug:s}=a(g(e));r(g(e)),n(d.H9,null),r((0,d.yC)([o,i,s]))}}),I=(0,r.K0)({key:"dashboardLoadingState",set:e=>(t,a)=>{let{set:n}=t;n(h({id:e,key:"loading"}),a)}}),w=e=>{let{pristine:t={}}=e;return 0===Object.keys(t).length},E=(0,r.K0)({key:"dashboardIsPristineState",get:e=>t=>{let{get:a}=t;const n=a((0,l.nG)({id:e}));return!Object.keys(n).length||Object.keys(n).some((e=>w(n[e])))}}),x=(0,r.K0)({key:"dashboardLayoutIsPristineState",get:e=>t=>{let{get:a}=t;return w(a((0,c.kY)({id:e})))}}),C=e=>{const t=(0,s.e)(),a=()=>{const a=(0,p.My)(t,e);return!a||!a.getChildren().some((e=>{return t=e.getAttribute("pristine"),!(0===Object.keys(t).length);var t}))},[o,i]=(0,n.useState)(a);(0,n.useEffect)((()=>null===t||void 0===t?void 0:t.on("pristineChanged",(()=>i(a)))),[t]);const c=(0,r.vc)(E(e)),l=(0,r.vc)(x(e));return o&&c&&l},T=e=>{const t=(0,o.vt)(),a=(0,i.ID)(),n=(0,d.Tf)();return(0,d.mS)(t,a,e||n)},N=e=>{const t=T();return f(t,e)}},43124:(e,t,a)=>{"use strict";a.d(t,{H9:()=>o,Tf:()=>s,Xv:()=>c,mS:()=>l,yC:()=>i});a(62953);var n=a(47444),r=a(37618);const o=(0,n.eU)({key:"currentDashboardSlug",default:null}),i=(0,n.Iz)({key:"dashboardIdBySlug",default:"",effects:e=>{let[t,a,n]=e;return(0,r.ES)(t)&&(0,r.tB)(a)&&n===r.LA?[e=>{let{setSelf:t,trigger:a}=e;"get"===a&&t(r.LA)}]:[]}}),s=()=>(0,n.vc)(o),c=e=>(0,n.lZ)(o,e),l=(e,t,a)=>(0,n.vc)(i([e,t,a]))},27078:(e,t,a)=>{"use strict";a.d(t,{Ay:()=>F,My:()=>B,qh:()=>z});a(3064),a(9920),a(98992),a(72577),a(3949);var n=a(96540),r=a(17323),o=a(3914),i=a(80925),s=a(11128),c=a(45467),l=a(68980),d=(a(62953),a(47767)),u=a(22332),m=a(58388);a(25440);var g=a(58384),p=a(58168),h=a(46741);const v=e=>{let{category:t,closeParent:a,context:r,openDialog:o,...i}=e;const s=(0,h.JT)("dashboard:Update");return n.createElement(g.t,(0,p.A)({icon:"trashcan",onClick:()=>{o(),a()},"data-ga":"".concat(t,"::click-remove::").concat(r),"data-testid":"removeChart-option",disabled:!s},i),"Remove")},f=e=>{let{category:t,closeParent:a,context:r,openModal:o}=e;const i=(0,h.JT)("dashboard:Update");return n.createElement(g.t,{icon:"pencilOutline",onClick:()=>{a(),o()},"data-testid":"renameChart-option","data-ga":"".concat(t,"::click-rename-chart::").concat(r),disabled:!i},"Rename chart")};var A=a(83199),y=a(67544),b=a(47762);const S=e=>{let{cardId:t,close:a,id:r}=e;const{nodeId:o,chartId:i}=(0,m.bE)(r,t),s=(0,b.xY)(o,"name"),c=(0,y.c6)(r,t);return n.createElement(A.ConfirmationDialog,{"data-ga":"remove-chart-dialog","data-testid":"removeChartDialog",handleConfirm:c,handleDecline:a,message:n.createElement(n.Fragment,null,"You are about to remove ",n.createElement("strong",null,i),s&&n.createElement(n.Fragment,null," ","of node ",n.createElement("strong",null,s)),".",n.createElement("br",null),"Are you sure you want to continue?"),title:"Remove chart"})};var k=a(15327),I=a(74618),w=a(45765),E=a(78969),x=a(8018),C=a(2025);const T=e=>{let{id:t,cardId:a,close:r}=e;const[o,i]=(0,n.useState)(!0),[s,c]=(0,m.r)(t,a,"title"),[l,d]=(0,n.useState)(s||""),u=l!==s,g=o&&u,p=(0,C.DH)(t),h=(0,n.useCallback)((()=>{c(l),p().then((()=>r()))}),[r,l]);return n.createElement(k.GO,{onClose:r,"data-testid":"renameChart-modal"},n.createElement(I.z,{onClose:r,title:"Rename Chart"},n.createElement(A.Button,{label:"Save Changes",onClick:h,disabled:!g})),n.createElement(w.U,{"data-testid":"renameChart-title"},"Rename chart ",s),n.createElement(k.Yv,null,n.createElement(x.A,{value:l,label:"Name",onChange:e=>{d(e.target.value)},isValid:o,setIsValid:i,isDirty:u,instantFeedback:"all",onKeyDown:e=>e.keyCode===E.I7&&g&&h(),"data-testid":"renameChart-input",placeholder:"Chart name"})))};var N=a(87659);const R="chart-options",M="custom-dashboard",D=e=>{let{id:t,cardId:a,onClick:r}=e;const o=(0,d.Zp)(),i=(0,m.bE)(t,a,"chartId"),{spaceSlug:s,roomSlug:c}=(0,l.fz)(t);return n.createElement(g.t,{icon:"line_chart",onClick:()=>{r();const e=(e=>{return"".concat("chart_").concat("string"===typeof(t=e)?t.replace(/ /g,"_").replace(/:/g,"_").replace(/\(/g,"_").replace(/\)/g,"_").replace(/\./g,"_").replace(/\//g,"_"):"");var t})(i);o("/spaces/".concat(s,"/rooms/").concat(c,"/overview#chartName=").concat(e))},"data-ga":"".concat(R,"::click-go-to-chart::").concat(M),"data-testid":"chart-goToChart"},"Go to Chart")},_=()=>{const[e,,t,a]=(0,N.A)(),[r,,o,i]=(0,N.A)(),s=(0,u.useAttributeValue)("cardId"),c=(0,u.useAttributeValue)("dashboardId");return n.createElement(n.Fragment,null,n.createElement(g.A,{category:R,context:M,testId:"chartDropdown"},(e=>{let{close:a}=e;return n.createElement(n.Fragment,null,n.createElement(D,{id:c,cardId:s,onClick:a}),n.createElement(f,{id:c,cardId:s,closeParent:a,openModal:o,category:R,context:M}),n.createElement(v,{cardId:s,category:R,closeParent:a,context:M,id:c,openDialog:t}))})),e&&n.createElement(S,{cardId:s,close:a,id:c}),r&&n.createElement(T,{cardId:s,close:i,id:c}))};var U=a(60247),P=a(13752),L=a(92815),K=a(72582);const B=(e,t)=>e&&e.getRoot().getChildren().find((e=>e.match({id:t}))),z=e=>t=>"dashboard-page::".concat(e.getAttribute("id"),"::").concat(t),F=e=>{const{spaceId:t,roomId:a}=(0,l.fz)(e),d=(0,i.e)(),u=(0,s.w7)({extraKey:"dashboard-".concat(e),merge:!1,scoped:!0}),m=(0,o.dg)();(0,n.useMemo)((()=>{if(!d)return;let n=B(d,e);n||(n=d.makeContainer({attributes:{...d.getRoot().getAttributes(),id:e,navigation:"pan",nodesScope:u,overlays:{proceeded:{type:"proceeded"}},host:m?"".concat(window.envSettings.agentApiUrl,"/api/v2"):"".concat(window.envSettings.apiUrl,"/api/v3/spaces/").concat(t,"/rooms/").concat(a),leftHeaderElements:[_,r.default],toolboxElements:[K.default,P.default,L.default,U.A]}}),d.appendChild(n))}),[d,e]),(0,c.A)((()=>{if(!d)return;const t=B(d,e);t&&t.getNodes().forEach((e=>e.updateAttribute("nodesScope",u)))}),[d,a,u]),(0,n.useEffect)((()=>()=>{const t=B(d,e);t&&t.destroy()}),[d,a])}},91517:(e,t,a)=>{"use strict";a.d(t,{A:()=>f,S:()=>v});a(41393),a(14905),a(98992),a(81454),a(8872),a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215),a(62953);var n=a(96540),r=a(47444),o=a(44554),i=a(8196),s=a(30673),c=a(72802),l=a(68980);const d={TextCard:"text",ChartCard:"chart",compositeChart:"chart",chart:"chart",text:"text"},u=new Set(["text","chart"]),m=(e,t)=>{var a,n;return{...null!==(a=e[(t=(e=>{let{i:t,id:a=t,x:n,left:r=n,y:o,top:i=o,w:s,width:c=s,h:l,height:d=l}=e;return{id:a,width:c,height:d,left:r,top:i}})(t)).id])&&void 0!==a&&a.type&&"text"!==(null===(n=e[t.id])||void 0===n?void 0:n.type)?i.u6:i.BC,...t}},g=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return e.trim().startsWith("

    ")?e:"

    ".concat(e,"

    ")},p="A",h=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];const a=Array.isArray(e),n=[],r=t.reduce(((e,t)=>{let{type:a,cardID:r,id:o=r,chartID:i,chartId:c=i,nodeID:l,nodeId:m=l,chartMetadata:p,context:h,attributes:v,cardAttributes:f=v,contextScope:A,text:y,...b}=t;if(A=A||[h||(null===p||void 0===p?void 0:p.context)||c],a=d[a]||a,e[o]={id:o,type:a,nodeId:m,contextScope:A},u.has(a)){if(n.push(o),"text"===a)return e[o]={...s.cY,...f,text:g(y||f.text),...e[o]},e;e[o]=(e=>{let{aggregationMethod:t,chartType:a,dimensions:n=[],selectedDimensions:r=n||[],filteredLabels:o=[],selectedLabels:i=o||[],selectedNodeIds:c=[],selectedNodes:l=c||[],selectedInstances:d=[],chartId:u,groupBy:m,groupingMethod:g,id:p,host:h,...v}=e;return{...s.Pk,aggregationMethod:t||"avg",chartType:a||"line",selectedDimensions:Array.isArray(r)?r:[],selectedLabels:Array.isArray(i)?i:i&&"object"===typeof i?Object.keys(i):[],selectedNodes:Array.isArray(l)?l:[],selectedInstances:Array.isArray(d)?d:u?[u]:[],groupBy:Array.isArray(m)?m:m?[m]:["dimension"],groupingMethod:g||"average",id:p,...v}})({...b,...f,chartId:c,...e[o]})}return e}),{});return{layout:a?e?{[p]:e.map((e=>m(r,e)))}:{[p]:t.map((e=>{let{layout:t}=e;return m(r,t)}))}:e||{[p]:[]},containerIds:[p],containers:{[p]:{name:p}},cards:r}},v=e=>{var t;let{id:a,name:n,slug:r,version:i,createdAt:s,updatedAt:c,snapshot:l}=e;const d=Array.isArray(l.uiState)||!(null!==(t=l.uiState)&&void 0!==t&&t.layout),{layout:u,containerIds:g,containers:p,cards:v}=d?h(l.uiState,l.items||[]):l.uiState,f={id:a,name:n,slug:r,version:i,createdAt:s,updatedAt:c,cardIds:Object.keys(v)};return{layout:Object.keys(u).reduce(((e,t)=>({...e,[t]:(0,o.bj)(u[t].map((e=>m(v,e))),"vertical")})),{}),containerIds:g,containers:p,dashboard:f,cards:v}},f=(e,t)=>{let{spaceId:a,spaceSlug:o,roomId:i,roomSlug:s}=t;const[d,u]=(0,n.useState)(!0),m=(0,r.Zs)((t=>{let{set:n}=t;return async()=>{u(!0),n((0,l.yl)(e),!0);const{data:t}=await(0,c.f7)(a,i,e),{layout:r,containerIds:d,containers:m,dashboard:g,cards:p}=v(t);n((0,l._u)(e),{layout:r,containerIds:d,containers:m,dashboard:{...g,spaceSlug:o,roomSlug:s,spaceId:a,roomId:i},cards:p}),u(!1),n((0,l.yl)(e),!1)}}));return(0,n.useEffect)((()=>{e&&m(e)}),[e]),d}},54702:(e,t,a)=>{"use strict";a.d(t,{D4:()=>i,XY:()=>c,kE:()=>o,w5:()=>l});a(41393),a(81454);var n=a(26655);const r=e=>e.map((e=>{let{accountID:t,...a}=e;return{id:t,...a}})),o=e=>n.A.get("/api/v2/spaces/".concat(e,"/members"),{allow401:!0,transform:r}),i=(e,t)=>n.A.get("/api/v2/spaces/".concat(e,"/rooms/").concat(t,"/members"),{allow401:!0,transform:r}),s=e=>{let{account_ids:t}=e;return"account_ids=".concat(t.join(","))},c=(e,t)=>n.A.delete("/api/v1/spaces/".concat(e,"/members"),{paramsSerializer:s,params:{account_ids:t}}),l=(e,t,a)=>n.A.patch("/api/v1/spaces/".concat(e,"/members/").concat(t),a)},35273:(e,t,a)=>{"use strict";a.d(t,{A:()=>o,u:()=>r});var n=a(47444);const r={id:null,avatarURL:null,deactivated:!1,email:"",joinedAt:null,name:"",role:""},o=(0,n.eU)({key:"member",default:{}})},70716:(e,t,a)=>{"use strict";a.d(t,{Uv:()=>l,WJ:()=>d,Z6:()=>c,m:()=>s});a(9920),a(41393),a(98992),a(3949),a(81454),a(62953);var n=a(47444),r=a(64454),o=a(65570),i=a(35273);const s=(0,n.K0)({key:"memberState",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;const r=n(i.A)[t]||i.u;return a?r[a]:r}},set:e=>{let{id:t,key:a}=e;return(e,n)=>{let{set:r}=e;r(i.A,a?"function"!==typeof n?e=>({...e,[t]:{...e[t],[a]:n}}):e=>({...e,[t]:{...e[t],[a]:n(e[t][a])}}):e=>({...e,[t]:{...i.u,...e[t],...n}}))}}}),c=(0,n.K0)({key:"membersState",get:e=>t=>{let{get:a}=t;return e.map((e=>a(s({id:e}))))},set:()=>(e,t)=>{let{set:a,get:n}=e;const r=n(i.A);t.forEach((e=>{r[e.id]=e})),a(i.A,r)},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),l=e=>(0,n.vc)(c(e)),d=(0,n.gD)({key:"membersInitialize",get:e=>{let{get:t}=e;return t(i.A)},set:(e,t)=>{let{set:a}=e,{values:n,merge:s}=t;return a(i.A,(e=>{const t=s?(0,r.A)(e,n):n;return(0,o.Ay)(t,e)?e:t}))}})},54621:(e,t,a)=>{"use strict";a.d(t,{DF:()=>S,_F:()=>h,ZB:()=>v,xK:()=>A,nT:()=>y,NU:()=>b,tV:()=>f});a(17333),a(3064),a(41393),a(98992),a(54520),a(72577),a(81454),a(62953);var n=a(96540),r=a(47444),o=a(3914),i=a(69765);const s=e=>{let{spaceSlug:t,roomSlug:a}=e;return"".concat(t,"-").concat(a,"-tabs")},c=(e,t)=>{let{spaceSlug:a,roomSlug:n}=e;const r=s({spaceSlug:a,roomSlug:n});Promise.resolve().then((()=>{try{localStorage.setItem(r,JSON.stringify(t))}catch(e){}}))},l=(0,r.Iz)({key:"navigation",default:function(e){let{spaceSlug:t,roomSlug:a}=e,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];try{const e=s({spaceSlug:t,roomSlug:a}),r=localStorage.getItem(e);return r?JSON.parse(r):n}catch(r){return n}}}),d=(0,r.K0)({key:"navigationTabsState",get:e=>{let{spaceSlug:t,roomSlug:a}=e;return e=>{let{get:n}=e;return n(l({spaceSlug:t,roomSlug:a}))}},set:e=>{let{spaceSlug:t,roomSlug:a}=e;return(e,n)=>{let{set:r}=e;n&&t&&a&&r(l({spaceSlug:t,roomSlug:a}),(e=>{if(e.find((e=>{let{id:t}=e;return n.id===t})))return e;const r=[...e,n];return c({spaceSlug:t,roomSlug:a},r),r}))}}}),u=(0,r.K0)({key:"navigationTabsState/remove",get:()=>()=>null,set:e=>{let{spaceSlug:t,roomSlug:a}=e;return(e,n)=>{let{set:r}=e;r(l({spaceSlug:t,roomSlug:a}),(e=>{const r=e.filter(((e,t)=>t!==n));return c({spaceSlug:t,roomSlug:a},r),r}))}}}),m=(0,r.K0)({key:"navigationTabsState/removeById",get:()=>()=>null,set:e=>{let{spaceSlug:t,roomSlug:a}=e;return(e,n)=>{let{set:r}=e;r(l({spaceSlug:t,roomSlug:a}),(e=>{const r=e.filter((e=>e.id!==n));return c({spaceSlug:t,roomSlug:a},r),r}))}}}),g=(0,r.K0)({key:"navigationTabsState/reorder",get:()=>()=>null,set:e=>{let{spaceSlug:t,roomSlug:a}=e;return(e,n)=>{let{set:r}=e,{sourceIndex:o,destinationIndex:i}=n;r(l({spaceSlug:t,roomSlug:a}),(e=>{const n=[...e],[r]=n.splice(o,1);return n.splice(i,0,r),c({spaceSlug:t,roomSlug:a},n),n}))}}}),p=(0,r.K0)({key:"navigationTabsState/removeById",get:()=>()=>null,set:e=>{let{spaceSlug:t,roomSlug:a}=e;return(e,n)=>{let{set:r}=e,{id:o,slug:i,path:s}=n;r(l({spaceSlug:t,roomSlug:a}),(e=>{const n=e.map((e=>e.id!==o?e:{...e,title:i,path:s}));return c({spaceSlug:t,roomSlug:a},n),n}))}}}),h=()=>{const e=(0,o.bq)(),t=(0,i.QW)();return(0,r.vc)(d({spaceSlug:e,roomSlug:t}))},v=e=>{let{title:t,path:a,id:s,destination:c,type:l="nodes",params:u,isReady:m=!0,...g}=e;const p=(0,i.r9)(),h=(0,o.bq)(),v=(0,i.QW)(),f=(0,r.lZ)(d({spaceSlug:h,roomSlug:v}));(0,n.useEffect)((()=>{if(!m)return;if(!s||!t)return;f({id:s,title:t,type:l,icon:"alerts"===l?"alarm":"node_hollow",params:u,path:"".concat(a||p,"/").concat(l,"/").concat(c),...g})}),[p,t,a,s,c,l,f,m])},f=()=>{const e=(0,i.r9)(),t=(0,o.bq)(),a=(0,i.QW)(),s=(0,r.lZ)(d({spaceSlug:t,roomSlug:a}));return(0,n.useCallback)((t=>{let{title:a,path:n,id:r,destination:o,type:i="nodes",params:c,...l}=t;if(!r||!a)return;s({id:r,title:a,type:i,icon:"alerts"===i?"alarm":"node_hollow",params:c,path:"".concat(n||e,"/").concat(i,"/").concat(o),...l})}),[e,s])},A=()=>{const e=(0,o.bq)(),t=(0,i.QW)();return(0,r.lZ)(u({spaceSlug:e,roomSlug:t}))},y=()=>{const e=(0,o.bq)(),t=(0,i.QW)();return(0,r.lZ)(m({spaceSlug:e,roomSlug:t}))},b=()=>{const e=(0,o.bq)(),t=(0,i.QW)();return(0,r.lZ)(g({spaceSlug:e,roomSlug:t}))},S=()=>{const e=(0,o.bq)(),t=(0,i.QW)();return(0,r.lZ)(p({spaceSlug:e,roomSlug:t}))}},9224:(e,t,a)=>{"use strict";a.d(t,{Bz:()=>h,F:()=>y,UL:()=>l,VP:()=>u,az:()=>i,e0:()=>A,iY:()=>m,ih:()=>b,q5:()=>s,sm:()=>p,tz:()=>d,uQ:()=>c,ys:()=>g});var n=a(26655),r=a(37618),o=a(49286);const i=e=>n.A.get("/api/v1/agents/".concat(e,"/user_agent_node_access"),{transform:o.bn}),s=(e,t,a)=>n.A.get("/api/v2/bearer_get_token?node_id=".concat(e,"&claim_id=").concat(a,"&machine_guid=").concat(t),{transform:function(){let{token:e,expiration:t,bearer_protection:a}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return{token:e,expiration:t,bearerProtection:a}}}),c=e=>n.A.get("/api/v1/accounts/".concat(e,"/nodes")),l=(e,t)=>n.A.post("/api/v1/accounts/".concat(e,"/nodes/").concat(t,"/touch")),d=(e,t)=>n.A.get("/api/v1/accounts/".concat(e,"/nodes/").concat(encodeURIComponent(t))),u=(e,t)=>{const a=(0,r.ES)(t);return n.A.get(a?"/api/v1/info":"/api/v1/nodes/".concat(e,"/info"),{transform:t=>({...t,nodeId:e}),baseURL:a?window.envSettings.agentApiUrl:""})},m=e=>n.A.get("/api/v1/agents/".concat(e,"/info")),g=e=>n.A.get("/api/v1/agents/".concat(e,"/user_access"),{transform:e=>e.authorizedNodeIDs}).catch((()=>({data:[]}))),p=(e,t)=>n.A.delete("/api/v1/accounts/".concat(e,"/nodes"),{params:{node_ids:t.join(",")}}),h=(e,t,a,r)=>n.A.put("/api/v1/accounts/".concat(e,"/nodes/").concat(t),{name:a,urls:r}),v="*",f=[v],A=(e,t,a)=>{let{baselineAfter:o,baselineBefore:i,highlightAfter:s,highlightBefore:c,method:l,options:d,group:u,nodeIds:m,points:g,context:p,aggregation:h="avg",groupBy:A=[]}=a;return(0,r.ES)(e)?n.A.get("".concat(window.envSettings.agentApiUrl,"/api/v2/weights"),{params:{format:"json",options:"".concat(Array.isArray(d)?d.join("|"):d,"|minify|nonzero|unaligned"),contexts:v,scope_contexts:p||v,scope_nodes:m.join("|")||v,nodes:v,instances:v,dimensions:v,labels:v,group_by:A.join("|"),aggregation:h,method:l||"ks2",time_group:u||"average",time_group_options:"",time_resampling:0,after:Math.floor(s/1e3),before:Math.floor(c/1e3),points:g,baseline_after:Math.floor(o/1e3),baseline_before:Math.floor(i/1e3),timeout:18e4}}):n.A.post("/api/v3/spaces/".concat(e,"/rooms/").concat(t,"/weights"),{selectors:{nodes:f,contexts:f,dimensions:f,labels:f,alerts:f},aggregations:{time:{time_group:u||"average",time_group_options:"",time_resampling:0},metrics:[{group_by:A,aggregation:h}]},window:{after:Math.floor(s/1e3),before:Math.floor(c/1e3),points:g,baseline:{after:Math.floor(o/1e3),before:Math.floor(i/1e3)}},scope:{nodes:m,contexts:p?[p]:f},method:l||"ks2",options:d?Array.isArray(d)?d:[d]:[],timeout:18e4})},y=e=>n.A.get("/api/v3/spaces/".concat(e,"/settings/preferred_nodes")),b=(e,t)=>n.A.post("/api/v3/spaces/".concat(e,"/settings/preferred_nodes"),{preferred_node_ids:t})},41739:(e,t,a)=>{"use strict";a.d(t,{A:()=>o,q:()=>r});var n=a(47444);const r=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return{fullyLoaded:!1,loaded:!1,id:(null===e||void 0===e?void 0:e.nd)||(null===e||void 0===e?void 0:e.mg)||null,mg:null,nd:null,isDeleted:!1,name:"",version:"",ni:null,labels:{},labelKeys:[],hw:{architecture:"",cpuFrequency:"",cpus:"",memory:"",diskSpace:"",virtualization:"",container:""},os:{id:"",nm:"",v:"",kernel:{nm:"",v:""}},capabilities:{},state:"",isProtobufCapable:!0,urls:[],accessCount:0,lastAccessTime:"",updateSeverity:"",hasAccessibleData:!1,isLive:!1,nodeStatus:null,isPreferred:!0}},o=(0,n.eU)({key:"nodes",default:{}})},11128:(e,t,a)=>{"use strict";a.d(t,{Oj:()=>k,PF:()=>I,QY:()=>p,_e:()=>w,a7:()=>b,eO:()=>A,qr:()=>f,u7:()=>S,w7:()=>y});a(17333),a(9920),a(41393),a(14905),a(8159),a(98992),a(54520),a(3949),a(81454),a(8872),a(37550),a(62953);var n=a(47444),r=a(27467),o=a(67990),i=a(69765),s=a(47762);const c=e=>(t,a)=>!Array.isArray(a)||!a.length||"".concat(a).includes(t[e]),l={nodeStatuses:c("nodeStatus"),nodeLabels:(e,t)=>!((!Array.isArray(t)||t.length)&&t)||Object.keys(e.labels).some((a=>!Array.isArray(t)||!t.length||t.includes("".concat(a,"|").concat(e.labels[a])))),nodeCapabilities:c("capabilityKeys"),nodeVersions:c("version")},d={excludedNodeIds:c("id")},u=[],m=(0,n.K0)({key:"nodesFilters",get:e=>{let{extraKey:t,key:a,omit:n,merge:o}=e;return e=>{let{get:i}=e;const s=n&&n.split(":::"),c=t?i(p({key:a,merge:!1})):[],u=!t&&!!s&&!s.includes("selectedNodeIds"),m=s&&s.includes("selectedNodeIds")||t&&o?c:i((0,r.GA)({key:a,extraKey:t,flavour:"arr",param:"selectedNodeIds"}))||[];let g=[];const h=i((0,r.GA)({key:a,extraKey:t,flavour:"arr",merge:o}));return Object.keys(h).forEach((e=>{var n;const c=!!s&&s.includes(e);(l[e]||d[e])&&null!==(n=h[e])&&void 0!==n&&n.length&&(c||g.push([e,c?[]:h[e]]),o&&t&&g.push([e,i((0,r.GA)({key:a,flavour:"arr",param:e}))||[]]))})),[g,m,c,u]}},set:e=>{let{extraKey:t,key:a}=e;return(e,n)=>{let{get:o,set:i}=e,{param:s,removedValue:c}=n;const l=o((0,r.GA)({key:a,extraKey:t,param:s,flavour:"arr"}));i((0,r.GA)({key:a,extraKey:t,param:s,flavour:"arr"}),Array.isArray(c)?c:l.filter((e=>e!==c)))}}}),g=(0,n.K0)({key:"nodesFiltered",get:e=>{let{key:t,extraKey:a,omit:n,keepAll:o=!1,merge:i}=e;return e=>{let{get:c}=e;const[u,g,p,h]=c(m({key:t,extraKey:a,omit:n,merge:i})),v=c((0,s.kr)(t));if(!u.length&&(null===g||void 0===g||!g.length))return v;const f=((e,t)=>{let{selectedNodeIds:a,excludedNodeIds:n,globalNodeIds:r,omittedNonNodeFilter:o}=t;const i=a.reduce(((e,t)=>(e[t]=!n||n.includes(t),e)),{}),s=r.reduce(((e,t)=>(e[t]=!n||n.includes(t),e)),{});return t=>!(r.length&&!s[t.id])&&(!(!a.length||!i[t.id])||!(!o&&!e.length&&a&&a.length)&&!e.some((e=>{let[a,n]=e;return d[a]?d[a](t,n):!!l[a]&&!l[a](t,n)})))})(u,{selectedNodeIds:g,excludedNodeIds:c((0,r.GA)({key:t,extraKey:a,flavour:"arr",param:"excludedNodeIds"})),globalNodeIds:p,omittedNonNodeFilter:h});return o?v.map((e=>f(e)?e:{...e,hidden:!0})):v.filter(f)}}}),p=(0,n.K0)({key:"nodeIdsFiltered",get:e=>{let{key:t,extraKey:a,omit:n,keepAll:r,emptyIfAll:i=!0,merge:s,scoped:c}=e;return e=>{let{get:l}=e;const d=l(g({key:t,extraKey:a,omit:n,keepAll:r,merge:s}));return i&&l((0,o.dT)({id:t,key:"ids"})).length===d.length?c?l(p({key:t,extraKey:a,omit:n,keepAll:r,emptyIfAll:!1,merge:!0})):u:d.map((e=>{let{id:t,hidden:a}=e;return r?[t,!a]:t}))}}}),h={Live:0,Stale:1,Offline:2},v=["_aclk_available","_aclk_ng_new_cloud_protocol","_aclk_proxy"],f={nodeStatuses:(e,t)=>h[e.id]-h[t.id],selectedNodeIds:(e,t)=>h[e.id]-h[t.id],nodeLabels:(e,t)=>v.includes(e.id)?1:v.includes(t.id)?-1:f.default(e,t),nodeVersions:(e,t)=>t.id.localeCompare(e.id,void 0,{sensitivity:"accent",ignorePunctuation:!0}),default:(e,t)=>e.id.localeCompare(t.id,void 0,{sensitivity:"accent",ignorePunctuation:!0})},A=function(){let{key:e,extraKey:t,omit:a,keepAll:r=!1,merge:o=!0,roomSlug:s}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const c=(0,i.ID)(s);return(0,n.vc)(g({extraKey:t,key:e||c,omit:a,keepAll:r,merge:o}))},y=function(){let{key:e,extraKey:t,omit:a,keepAll:r=!1,emptyIfAll:o=!0,merge:s=!1,scoped:c=!1}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const l=(0,i.ID)();return(0,n.vc)(p({extraKey:t,key:e||l,omit:a,keepAll:r,emptyIfAll:o,merge:s,scoped:c}))},b=function(){let{key:e,extraKey:t,merge:a=!0}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,i.ID)();return(0,r.rW)("selectedNodeIds",{key:e||n,extraKey:t,flavour:"arr",merge:a,defaultValue:u})},S=function(){let{key:e,extraKey:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=(0,i.ID)();return(0,r.rI)("selectedNodeIds",{key:e||a,extraKey:t,flavour:"arr"})},k=function(){let{key:e,extraKey:t,merge:a=!1}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return[b({key:e,extraKey:t,merge:a}),S({key:e,extraKey:t})]},I=function(){let{key:e,extraKey:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=function(){let{key:e,extraKey:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=(0,i.ID)();return(0,r.rW)("excludedNodeIds",{key:e||a,extraKey:t,flavour:"arr",defaultValue:u})}({key:e,extraKey:t}),n=function(){let{key:e,extraKey:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=(0,i.ID)();return(0,r.rI)("excludedNodeIds",{key:e||a,extraKey:t,flavour:"arr"})}({key:e,extraKey:t});return[a,n]},w=function(){let{key:e,extraKey:t,omit:a,merge:r=!1}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const o=(0,i.ID)();return(0,n.L4)(m({key:e||o,extraKey:t,omit:a,merge:r}))}},47762:(e,t,a)=>{"use strict";a.d(t,{BQ:()=>y,BU:()=>M,Du:()=>f,GE:()=>P,GN:()=>b,Gn:()=>z,Gt:()=>E,Ig:()=>_,TG:()=>N,Ux:()=>Z,Y7:()=>k,Zl:()=>V,d3:()=>H,dN:()=>O,gl:()=>j,je:()=>A,kr:()=>w,nl:()=>q,no:()=>K,ss:()=>G,th:()=>I,xY:()=>S,yN:()=>C});a(17333),a(41393),a(14905),a(98992),a(54520),a(81454),a(8872),a(62953);var n=a(96540),r=a(47444),o=a(47767),i=a(64454),s=a(65570),c=a(3914),l=a(56639),d=a(54308),u=a(67990),m=a(69765),g=a(41739),p=a(9224);const h=(0,r.K0)({key:"nodeInfoState",get:e=>{let{nodeId:t,spaceId:a}=e;return async()=>{try{return(await(0,p.VP)(t,a)).data}catch(e){}}}}),v=(0,r.K0)({key:"preferredNodes",get:e=>()=>e?(0,p.F)(e):Promise.resolve()}),f=()=>{var e;const t=(0,c.vt)(),a=(0,r.xf)(v(t)),n=(0,r.RH)(v(t));return{loaded:"loading"!==a.state,nodes:(null===(e=a.contents)||void 0===e?void 0:e.data)||[],hasError:"hasError"===a.state,refresh:n}},A=()=>{const e=(0,c.vt)();return t=>(0,p.ih)(e,t)},y=(e,t)=>(0,r.xf)(h({nodeId:e,spaceId:t})),b=(0,r.K0)({key:"nodeState",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;const r=n(g.A)[t]||(0,g.q)();return a?r[a]:r}},set:e=>{let{id:t,key:a}=e;return(e,n)=>{let{set:r}=e;return a?"function"===typeof n?r(g.A,(e=>{const r=e[t]||(0,g.q)();return{...e,[t]:{...r,[a]:n(r[a])}}})):void r(g.A,(e=>{const r=e[t]||(0,g.q)();return{...e,[t]:{...r,[a]:n}}})):r(g.A,(e=>({...e,[t]:{...(0,g.q)(),...n}})))}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),S=(e,t)=>(0,r.vc)(b({id:e,key:t})),k=()=>(0,r.Zs)((e=>{let{snapshot:t}=e;return e=>t.getLoadable(b({id:e})).contents}),[]),I=(0,r.K0)({key:"nodesState",get:e=>t=>{let{get:a}=t;const n=a(g.A);return e.map((e=>n[e]||(0,g.q)()))},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),w=(0,r.K0)({key:"nodesState",get:e=>t=>{let{get:a}=t;if(!e){const t=a(l.Li),n=a((0,l.aR)(t)),r=a(d.yz);e=a((0,d.x4)([n,r]))}const n=a((0,u.dT)({id:e,key:"ids"}));return a(I(n))},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),E=e=>{const t=(0,u.CK)();return(0,r.vc)(I(e||t))},x=(0,r.K0)({key:"cloudNodeIdsSelector",get:e=>t=>{let{get:a}=t;const n=a(g.A);return e.reduce(((e,t)=>{var a;return null!==(a=n[t])&&void 0!==a&&a.nd?[...e,n[t].nd]:e}),[])},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),C=e=>(0,r.vc)(x(e||[])),T=(0,r.K0)({key:"queryableNodeIdsState",get:e=>t=>{let{get:a}=t;const n=a(g.A);return e.filter((e=>{var t,a;return(null===(t=n[e])||void 0===t?void 0:t.isLive)||"stale"===(null===(a=n[e])||void 0===a?void 0:a.state)}))},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),N=e=>{const t=(0,u.CK)();return(0,r.vc)(T(e||t))},R=(0,r.K0)({key:"liveNodeIdsState",get:e=>t=>{let{get:a}=t;const n=a(g.A);return e.filter((e=>{var t;return null===(t=n[e])||void 0===t?void 0:t.isLive}))},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),M=e=>{const t=(0,u.CK)();return(0,r.vc)(R(e||t))},D=(0,r.K0)({key:"staleNodeIdsState",get:e=>t=>{let{get:a}=t;const n=a(g.A);return e.filter((e=>{var t;return"stale"===(null===(t=n[e])||void 0===t?void 0:t.state)}))},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),_=e=>{const t=(0,u.CK)();return(0,r.vc)(D(e||t))},U=(0,r.K0)({key:"offlineNodeIdsState",get:e=>t=>{let{get:a}=t;const n=a(g.A);return e.filter((e=>{var t;return null===(t=n[e])||void 0===t?void 0:t.isOffline}))},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),P=e=>{const t=(0,u.CK)();return(0,r.vc)(U(e||t))},L=(0,r.K0)({key:"unseenNodeIdsState",get:e=>t=>{let{get:a}=t;const n=a(g.A);return e.filter((e=>{var t;return null===(t=n[e])||void 0===t?void 0:t.isUnseen}))},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),K=e=>{const t=(0,u.CK)();return(0,r.vc)(L(e||t))},B=(0,r.K0)({key:"needUpdateNodeIdsState",get:e=>{let{ids:t,severities:a=["warning","critical"]}=e;return e=>{let{get:n}=e;const r=n(g.A);return t.filter((e=>{var t;return a.includes(null===(t=r[e])||void 0===t?void 0:t.updateSeverity)}))}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),z=function(){let{ids:e,severity:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=M();return(0,r.vc)(B({ids:e||a,...t&&{severities:[t]}}))},F=(0,r.K0)({key:"fnNodeIdsState",get:e=>t=>{let{get:a}=t;const n=a(g.A);return e.filter((e=>{var t;return null===(t=n[e])||void 0===t||null===(t=t.capabilities.funcs)||void 0===t?void 0:t.enabled}))},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),O=e=>{const t=(0,u.CK)();return(0,r.vc)(F(e||t))},j=(0,r.gD)({key:"nodesState/initialize",get:e=>{let{get:t}=e;return t(g.A)},set:(e,t)=>{let{set:a}=e,{values:n,merge:r}=t;return a(g.A,(e=>{const t=r?(0,i.A)(e,n):n;return(0,s.Ay)(t,e)?e:t}))}}),G=()=>(0,r.vc)(j),W=()=>{const e=(0,m.r9)();return e&&"".concat(e,"/nodes")},V=e=>{const t=W();return"".concat(t,"/").concat(e)},Z=()=>{const e=W();return t=>"".concat(e,"/").concat(t)},H=function(e){let{alertId:t}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const a=(()=>{const e=(0,o.Zp)(),t=W();return(0,n.useCallback)(((a,n)=>{e("".concat(t,"/").concat(a),n?{state:n}:"")}),[t,e])})();return(0,n.useCallback)((()=>a(e,{alertId:t})),[a])},q=()=>{var e;return null===(e=(0,o.RQ)("/spaces/:spaceSlug/rooms/:roomSlug/nodes/:nodeId"))||void 0===e||null===(e=e.params)||void 0===e?void 0:e.nodeId}},49389:(e,t,a)=>{"use strict";a.d(t,{ID:()=>i,fe:()=>c,sH:()=>s,su:()=>r});a(25440),a(41393),a(81454),a(62953);var n=a(78969);const r=e=>1===e?"Node":"Nodes",o=[1,26],i=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:o;if(!e||"unknown"===e)return!1;const[a,n,r]=(e=>e.replace(/^v/,"").split("."))(e).map((e=>Number(e)));return!(a&&!isNaN(a)&&!isNaN(n))||(at[0])&&(nt[1])&&r{let{container:t,os:a,_install_type:r,version:o}=e;const c=i(o,n.x7);if(!t&&!a&&!r)return s.default;return s["docker"===t?"docker":"mac"===a?"mac":"binpkg"===r&&c?"binpkg":"default"]}},71856:(e,t,a)=>{"use strict";a.d(t,{C4:()=>m,D_:()=>h,HA:()=>s,N4:()=>c,Oh:()=>l,PT:()=>n,WB:()=>o,a$:()=>i,dZ:()=>g,fF:()=>u,r7:()=>r,rx:()=>p,w8:()=>d});const n={label:"All rooms",value:""},r={plan:"Your plan does not support this configuration.",role:"You do not have permission to change Global Notification configurations."},o="You do not have permission to add configurations. Contact a space administrator to add this configuration.",i={Discord:{name:"integrationDiscordColored"},Webhook:{name:"integrationWebhookColored"},Email:{name:"integrationEmailColored"},MobilePush:{color:"successLite",name:"mobilePushNotifications"},PagerDuty:{name:"integrationPagerdutyColored"},Slack:{name:"integrationSlackColored"},Splunk:{name:"integrationSplunk"},Opsgenie:{name:"integrationOpsgenieColored"},Mattermost:{name:"integrationMattermostColored"},RocketChat:{name:"integrationRocketChatColored"},MobileApp:{name:"integrationMobileAppColored"},AwsSns:{name:"integrationAWSSNSColored"},MicrosoftTeams:{name:"integrationTeamsColored"},Telegram:{name:"integrationTelegramColored"},VictorOps:{name:"integrationVictorOpsColored"}},s={PERSONAL:{icon:"userPress",tooltip:"Notification methods whose destination will be a user-specific attribute, e.g. user's e-mail."},SYSTEM:{icon:"systemOverviewPress",tooltip:"Notification methods that the destination will be a target that usually isn't specific to a single user, e.g. Slack channel."}},c={ALARMS_SETTING_ALL:{value:"ALARMS_SETTING_ALL",label:"All Alerts and unreachable"},ALARMS_SETTING_ALL_BUT_UNREACHABLE:{value:"ALARMS_SETTING_ALL_BUT_UNREACHABLE",label:"All Alerts"},ALARMS_SETTING_CRITICAL:{value:"ALARMS_SETTING_CRITICAL",label:"Critical only"},ALARMS_SETTING_UNREACHABLE:{value:"ALARMS_SETTING_UNREACHABLE",label:"Unreachable only"}},l={label:"User defined",value:"USER_DEFINED"},d={...c,USER_DEFINED:l},u={...c,ALARMS_SETTING_NONE:{value:"ALARMS_SETTING_NONE",label:"No notifications"}},m={"^.*$":"input"},g={string:"input"},p=/(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_+.~#?&/=]*)/g,h="607bfd3c-02c1-4da2-b67a-0d01b518ce5d"},88982:(e,t,a)=>{"use strict";a.d(t,{A:()=>i,n:()=>o});a(3064),a(41393),a(98992),a(72577),a(81454),a(62953);var n=a(71856),r=a(14994);const o=e=>{const t=(0,r.DL)().find((e=>{let{name:t}=e;return"All nodes"==t}));return t?e?t[e]:t:null},i=function(){let{all:e,internal:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=(0,r.DL)().map((e=>{let{id:t,name:a}=e;return{label:a,value:t}})),o=[];return t&&o.push(n.Oh),e&&o.push(n.PT),o.push(...a),o}},27467:(e,t,a)=>{"use strict";a.d(t,{GA:()=>b,r$:()=>x,l6:()=>w,N9:()=>I,rW:()=>S,Fw:()=>E,rI:()=>k});var n={};a.r(n),a.d(n,{global:()=>d,specific:()=>v});var r={};a.r(r),a.d(r,{global:()=>f,specific:()=>m});a(14905),a(98992),a(8872),a(62953);var o=a(47444),i=a(69765);const s={arr:"arr",bool:"bool",int:"int",val:"val",obj:"obj",dec:"dec"};a(41393),a(81454);var c=a(38819),l=a(82838);const d=()=>[e=>{let{onSet:t}=e;t(((e,t)=>{let{after:a,before:n,correlation:r,utc:o,forcePlay:i,...s}=e,{correlation:d,forcePlay:u,...m}=t;n=a<0?0:n;const{offset:g=""}=(0,l.M)("default",o||m.utc),p=(h=s.modalParams||m.modalParams)?Object.keys(h).map((e=>"".concat(e,"=").concat(h[e]))).join(","):"";var h;const{highlight:v,...f}={...m,...s,after:a,before:n,offset:g,metrics_correlation:"undefined"===typeof r?d:r,force_play:"undefined"===typeof i?u:i,utc:o,modalParams:p};v.after?(0,c.Z8)({highlight_after:v.after,highlight_before:v.before,...f}):((0,c.Z8)(f),(0,c.Pg)(["highlight_after","highlight_before"]))}))}];a(25440);const u={default:e=>e?e.split(","):[],[s.arr]:e=>e?e.split(","):[],[s.bool]:e=>"true"===e,[s.int]:e=>e?parseInt(e,10):0,[s.val]:e=>e,[s.dec]:e=>e?parseFloat(e):0,[s.obj]:e=>{try{return JSON.parse(decodeURIComponent(e))}catch(t){return{}}}},m=e=>{let{key:t,extraKey:a=""}=e;if(!t&&!a)return{};const n="".concat(t,"-").concat(a,"-"),r=(0,c.PP)();let o=[];const i=Object.keys(r).reduce(((e,t)=>{if(!t.startsWith(n))return e;if(t.startsWith(n+n))return o.push(t),e;const a=t.replace(n,""),[i]=a.split("-").reverse(),c=s[i]?u[i]:u[a]||u.default;return e[a]=c(r[t]),e}),{});return(0,c.Pg)(o),i},g=e=>e.toString(),p={default:e=>Array.isArray(e)?e.join(","):(e||"").toString(),[s.arr]:e=>Array.isArray(e)?e.join(","):(e||"").toString(),[s.bool]:g,[s.int]:g,[s.val]:g,[s.dec]:g,[s.obj]:e=>encodeURIComponent(JSON.stringify(e||{}))},h=e=>Array.isArray(e)?s.arr:"boolean"===typeof e?s.bool:"number"===typeof e?e%1===0?s.int:s.dec:e&&"object"===typeof e?s.obj:s.val,v=e=>{let{key:t,extraKey:a=""}=e;const n="".concat(t,"-").concat(a,"-");return[e=>{let{setSelf:n,trigger:r,node:o,getLoadable:i}=e;if("get"===r){if(Object.keys(i(o).contents).length)return;const e=(0,c.PP)();if(Object.keys(e).length)return;setTimeout((()=>n(m({key:t,extraKey:a}))))}},e=>{let{onSet:t}=e;t((function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const a=[],r=Object.keys(e).reduce(((r,o)=>{const i=o.split("-").reverse()[0];let c=i;if(s[c]||(s[c]||(c=h(e[o])),s[c]||(c=h(t[o]))),"undefined"===typeof e[o])return a.push(n+o),delete r[o],r;const l=p[c]||p[o]||p.default,d="".concat(n).concat(o).concat(s[i]?"":"-".concat(c));return r[d]=l(e[o]),r[d]||a.push(d),r}),{});(0,c.Z8)(r),(0,c.Pg)(a)}))}]},f=()=>{const{after:e=-900,before:t=0,utc:a="default",highlight_after:n,highlight_before:r,metrics_correlation:o=!1,timezoneName:i,modal:s="",modalTab:d="",modalParams:u,force_play:m}=(0,c.PP)(),{offset:g=0,utc:p,text:h=""}=(0,l.M)("default",a),v=i&&"undefined"!==i?i:h,f={after:isNaN(Number(n))?null:Number(n),before:isNaN(Number(r))?null:Number(r)},A=isNaN(Number(e))?-900:Number(e),y={after:A,before:A<0?0:isNaN(Number(t))?A+900:Number(t),utc:p,offset:g,timezoneName:v,modal:s,modalTab:d};return f.after?(0,c.Z8)({highlight_after:f.after,highlight_before:f.before,metrics_correlation:o,...y}):(0,c.Z8)({metrics_correlation:o,...y}),{highlight:f,correlation:"true"===o,forcePlay:"true"===m,...y,modalParams:(b=u,b&&b.split(",").reduce(((e,t)=>{const[a,n]=t.split("=");return e[a]=n,e}),{}))};var b},A=(0,o.Iz)({key:"paramsAtom",default:e=>(r[e.key||"global"]||m)(e),effects:e=>(n[e.key||"global"]||v)(e)}),y=(e,t,a)=>{if("global"===e)return t;const[n]=t.split("-").reverse();return s[n]?t:s[a]?"".concat(t,"-").concat(a):t},b=(0,o.K0)({key:"paramsState",get:e=>{let{key:t="global",extraKey:a,param:n,flavour:r,merge:o}=e;return e=>{let{get:i}=e,c=i(A({key:t,extraKey:a}));if(a&&o&&(c=Object.keys(c).reduce(((e,t)=>("undefined"===typeof c[t]||Array.isArray(c[t])&&!c[t].length||null===c[t]||(e[t]=c[t]),e)),{...i(A({key:t,extraKey:void 0}))})),n){const e=y(t,n,r);return r||"global"===t?e in c?c[e]:c[n]:(console.warn("Will use val as flavour for",n),c[y(t,n,"val")])}return Object.keys(c).reduce(((e,a)=>(e[((e,t,a)=>{if("global"===e)return t;const[n,...r]=t.split("-").reverse();return s[a||n]&&s[n]?r.reverse().join("-"):t})(t,a,r)]=c[a],e)),{})}},set:e=>{let{key:t="global",extraKey:a,param:n,flavour:r}=e;return(e,o)=>{let{set:i}=e;n&&(n=y(t,n,r)),i(A({key:t,extraKey:a}),(e=>"function"===typeof o?n?{...e,[n]:o(e[n])}:{...e,...o(e)}:n?{...e,[n]:o}:{...e,...o}))}}}),S=function(e){let{key:t,extraKey:a,flavour:n,defaultValue:r,merge:i}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const s=(0,o.vc)(b({key:t,param:e,extraKey:a,flavour:n,merge:i}));return"undefined"===typeof s?r:s},k=function(e){let{key:t,extraKey:a,flavour:n}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return(0,o.lZ)(b({key:t,param:e,extraKey:a,flavour:n}))},I=function(e){let{key:t,extraKey:a,flavour:n,defaultValue:r,merge:o}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return[S(e,{key:t,extraKey:a,flavour:n,defaultValue:r,merge:o}),k(e,{key:t,extraKey:a,flavour:n})]},w=(e,t)=>{let{extraKey:a,defaultValue:n,flavour:r,merge:o}=t;const s=(0,i.ID)();return S(e,{key:s,extraKey:a,defaultValue:n,flavour:r,merge:o})},E=(e,t)=>{let{extraKey:a,flavour:n}=t;const r=(0,i.ID)();return k(e,{key:r,extraKey:a,flavour:n})},x=(e,t)=>{let{extraKey:a,defaultValue:n,flavour:r,merge:o}=t;return[w(e,{extraKey:a,defaultValue:n,flavour:r,merge:o}),E(e,{extraKey:a,flavour:r})]}},1522:(e,t,a)=>{"use strict";a.d(t,{DH:()=>g,e8:()=>b,eY:()=>S,j_:()=>y,n2:()=>h,rp:()=>k,th:()=>A,y5:()=>f,yK:()=>v,z2:()=>p});a(17333),a(98992),a(54520),a(62953);var n=a(71835),r=a(61649),o=a(22292),i=a(3914),s=a(69765),c=a(12800),l=a(5169),d=a(29662),u=a(63129),m=a(46902);const g=function(e){let{silent:t=!1}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const a=(0,s.wz)(e,"spaceId"),[c,l]=(0,n.A)(),d=(0,o.uW)("isAnonymous"),m=(0,i.dg)();return(0,r.A)((function(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};if(d||m)return Promise.resolve().then((()=>!t&&c({header:"Rooms",text:"Connect to cloud to be able to save your settings!"})));const r=(0,u.xX)(a,e,n);return r.then((()=>!t&&c({header:"Rooms",text:"Room successfully updated!"}))).catch((e=>!e.isCancel&&l(e))),r}),[e,d,m,a])},p=(e,t)=>{const[a,o]=(0,n.A)(),i=(0,s.wz)(t,"name");return(0,r.A)((()=>{const n=(0,u.HN)(e,t);return n.then((()=>a({header:"Rooms",text:"Room ".concat(i," was successfully deleted!")}))).catch((e=>!e.isCancel&&o(e))),n}),[t,e])},h=(e,t)=>{const[,a]=(0,n.A)(),i=(0,o.NJ)(),c=(0,r.A)((()=>{const n=(0,u.cx)(e,t,i);return n.catch((e=>!e.isCancel&&a(e))),n}),[e,t,i]),[l,...d]=(0,m.ES)(t,"ids"),g=p(e,t),h=(0,s.wz)(t,"untouchable");return d.length||l!==i||h?c:g},v=e=>{const[t,a]=(0,n.A)();return(0,r.A)((function(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const r=(0,u.ab)(e,n);return r.then((()=>t({header:"Rooms",text:"Room ".concat(n.name," was successfully created!")}))).catch((e=>!e.isCancel&&a(e))),r}),[e])},f=(e,t)=>{const[a,o]=(0,n.A)();return(0,r.A)((n=>{const r=(0,u.pD)(e,t,n);return r.then((()=>a({header:"Rooms",text:"Member".concat(n.length>1?"s":""," successfully added!")}))).catch((e=>!e.isCancel&&o(e))),r}),[e])},A=function(e,t){let{onSuccess:a,onFail:i}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const c=(0,o.NJ)(),[l,d]=(0,n.A)(),m=(0,s.a8)(t);return(0,r.A)((()=>{const n=(0,u.pD)(e,t,[c]);return n.then((()=>{m({isMember:!0}),l({header:"Rooms",text:"Successfully joined the room"}),null===a||void 0===a||a()})).catch((e=>{!e.isCancel&&d(e),null===i||void 0===i||i()})),n}),[t,m,e])},y=(e,t)=>{const a=(0,l.t)(),[o,i]=(0,n.A)();return(0,r.A)((n=>{const r=(0,u.cx)(e,t,n);return r.then((()=>{(async e=>{let{cacheKeyPrefix:t,memberIds:a,spaceId:n,roomId:r}=e;const o="".concat(t).concat((0,d.E)(n,r));await(0,c.y)({key:o,handleResults:e=>e.results.filter((e=>!a.includes(e.id)))})})({cacheKeyPrefix:a,memberIds:n,spaceId:e,roomId:t}),o({header:"Rooms",text:"Member".concat(n.length>1?"s":""," successfully removed!")})})).catch((e=>!e.isCancel&&i(e))),r}),[t,e])},b=(e,t)=>{const[a,o]=(0,n.A)();return(0,r.A)((function(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const r=(0,u.lW)(e,t,n);return r.then((()=>a({header:"Rooms",text:"Nodes successfully added!"}))).catch((e=>!e.isCancel&&o(e))),r}),[t,e])},S=e=>{const[t,a]=(0,n.A)();return(0,r.A)((function(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];const r=(0,u.Xm)(e,n);return r.then((()=>t({header:"Rooms",text:"Node successfully deleted!"}))).catch((e=>!e.isCancel&&a(e))),r}),[e])},k=(e,t)=>{const[a,o]=(0,n.A)();return(0,r.A)((n=>{const r=(0,u.if)(e,t,n);return r.then((()=>a({header:"Rooms",text:"Nodes successfully removed!"}))).catch((e=>!e.isCancel&&o(e))),r}),[t,e])}},51074:(e,t,a)=>{"use strict";a.d(t,{t:()=>i,s:()=>s});var n=a(47444),r=a(69765);const o={ids:(0,n.Iz)({key:"roomAlertIds",default:[]}),error:(0,n.Iz)({key:"roomAlertError",default:null}),updatedAt:(0,n.Iz)({key:"roomAlertsUpdatedAt",default:""}),loaded:(0,n.Iz)({key:"roomAlertsLoaded",default:!1})},i=(0,n.K0)({key:"roomAlertState",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;return n(o[a](t))}},set:e=>{let{id:t,key:a}=e;return(e,n)=>{let{set:r}=e;r(o[a](t),n)}}}),s=e=>((e,t)=>(0,n.vc)(i({id:e,key:t})))((0,r.ID)(),e)},63129:(e,t,a)=>{"use strict";a.d(t,{HN:()=>y,K8:()=>f,SJ:()=>h,TM:()=>T,Xm:()=>I,a9:()=>g,ab:()=>v,cx:()=>S,hn:()=>R,if:()=>w,jt:()=>D,lW:()=>k,pD:()=>b,uP:()=>x,uQ:()=>u,xX:()=>A});a(41393),a(14905),a(98992),a(81454),a(8872);var n=a(26655),r=a(49286),o=a(81048),i=a(78969),s=a(49389),c=a(37618);const l=function(){let{container:e,virtualization:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return e&&"unknown"!=e?"Container":t&&"unknown"!=t?"VM":"Bare metal"},d=e=>t=>{let{nodes:a=[]}=t;return a.map((t=>{let{nm:a,capabilities:n,v:c,...d}=t;return{name:a,version:c,hasAccessibleData:o.iy[d.state],isOffline:d.state===o.j8.offline,isUnseen:d.state===o.j8.unseen,isLive:o.c1[d.state],updateSeverity:(m=c,(0,s.ID)(m,i.Wd)?"critical":(0,s.ID)(m,i.x7)?"warning":null),capabilities:n.reduce(((e,t)=>(e[t.name]=t,e)),{}),capabilityKeys:n.map((e=>e.name)),...(0,r.bn)(d),labels:d.labels||{},labelKeys:Object.keys(d.labels||{}),id:d.nd||d.mg,nodeStatus:(u=d.state,u===o.j8.offline?"Offline":u===o.j8.unseen?"Unseen":u===o.j8.stale?"Stale":o.c1[u]?"Live":"Unknown"),isPreferred:!!e||d.isPreferred,technology:l(d.hw)};var u,m}))},u=e=>{let{roomId:t,spaceId:a}=e;return(0,c.ES)(a)?n.A.get("/api/v2/nodes",{baseURL:window.envSettings.agentApiUrl,transform:d(!0)}):n.A.post("/api/v3/spaces/".concat(a,"/rooms/").concat(t,"/nodes"),{scope:{nodes:[]}},{transform:d()})},m=e=>{let{context:t,chartType:a="",...n}=e;return{id:t,name:t,context:t,chartType:a,...n}},g=function(e,t){let a=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[],{after:o,before:i}=arguments.length>3?arguments[3]:void 0;return(0,c.ES)(e)?n.A.get("/api/v2/contexts?scope_nodes=".concat(a.join("|")||"*"),{baseURL:window.envSettings.agentApiUrl,transform:e=>{let{contexts:t={},versions:a={}}=e;return{results:Object.keys(t).map((e=>m({...(0,r.bn)(t[e]),id:e,context:e}))),versions:(0,r.bn)(a)}}}):n.A.post("/api/v3/spaces/".concat(e,"/rooms/").concat(t,"/contexts"),{scope:{contexts:["*"],nodes:a},selectors:{contexts:[],nodes:[]},window:{after:o,before:i}},{transform:e=>{let{contexts:t={},versions:a={}}=e;return{results:Object.keys(t).map((e=>m({...(0,r.bn)(t[e]),id:e,context:e}))),versions:(0,r.bn)(a)}}})},p=(e,t)=>a=>{let{results:n}=a;return{results:n.map((a=>({...a,spaceId:e,roomId:t})))}},h=(e,t)=>n.A.get("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/dashboards"),{transform:p(e,t)}),v=(e,t)=>n.A.post("/api/v1/spaces/".concat(e,"/rooms"),t),f=(e,t)=>n.A.get("/api/v1/spaces/".concat(e,"/rooms/").concat(t)),A=(e,t,a)=>n.A.patch("/api/v1/spaces/".concat(e,"/rooms/").concat(t),a),y=(e,t)=>n.A.delete("/api/v1/spaces/".concat(e,"/rooms/").concat(t)),b=(e,t,a)=>n.A.post("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/members"),a),S=(e,t,a)=>n.A.delete("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/members?account_ids=").concat(a)),k=(e,t,a)=>n.A.post("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/claimed-nodes"),a),I=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];return n.A.delete("/api/v1/spaces/".concat(e,"/nodes"),{data:{node_ids:t}})},w=(e,t,a)=>n.A.delete("/api/v1/spaces/".concat(e,"/rooms/").concat(t,"/claimed-nodes?node_ids=").concat(a)),E=e=>(0,r.bn)(e,{depth:3}),x=(e,t)=>n.A.get("/api/v3/spaces/".concat(e,"/rooms/").concat(t,"/parent-child-stats"),{allow401:!0,transform:E}),C=e=>{var t;const a=(0,r.bn)(e);return a.stability=null===(t=a.stability)||void 0===t?void 0:t.reduce(((e,t)=>{let{nodeId:a,isStable:n}=t;return{...e,[a]:{isStable:n}}}),{}),a},T=function(e,t){let a=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[];return n.A.post("/api/v3/spaces/".concat(e,"/rooms/").concat(t,"/nodes/stability"),{scope:{nodes:a}},{transform:C})},N=e=>(0,r.bn)(e,{depth:3}),R=function(e,t){let{sort:a="",start:r="",end:o=""}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};return n.A.get("/api/v2/spaces/".concat(e,"/rooms/").concat(t,"/alerts_stats?sort=").concat(a,"&start=").concat(r,"&end=").concat(o),{transform:N})},M=e=>(0,r.bn)(e),D=function(e,t){let a=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[];return n.A.post("/api/v3/spaces/".concat(e,"/rooms/").concat(t,"/charts/stats"),{scope:{nodes:a}},{transform:M})}},54308:(e,t,a)=>{"use strict";a.d(t,{Ay:()=>c,x4:()=>s,yz:()=>i});a(62953);var n=a(47444),r=a(37618),o=a(38280);const i=(0,n.eU)({key:"currentRoomSlug",default:null}),s=(0,n.Iz)({key:"roomIdBySlug",default:"",effects:e=>{let[t,a]=e;return(0,r.ES)(t)&&a===r.gB?[e=>{let{setSelf:t,trigger:a}=e;"get"===a&&t(r.gB)}]:[]}}),c=(0,n.Iz)({key:"room",default:o.A,effects:e=>(0,r.tB)(e)?[e=>{let{setSelf:t,trigger:a}=e;"get"===a&&t(r.hq)}]:[]})},81048:(e,t,a)=>{"use strict";a.d(t,{Q8:()=>n,c1:()=>i,iy:()=>o,j8:()=>s,mL:()=>r});const n="All nodes",r="all-nodes",o={created:!1,reachable:!0,stale:!0,unreachable:!1},i={created:!0,reachable:!0,stale:!1,unreachable:!1},s={offline:"unreachable",unseen:"created",stale:"stale",live:"reachable"}},21591:(e,t,a)=>{"use strict";a.d(t,{z_:()=>d,hZ:()=>p,wt:()=>h,q:()=>m,RQ:()=>g});a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215),a(62953);var n=a(47444),r=a(66245),o=a.n(r),i=a(68980),s=a(69765),c=a(37618);const l={ids:(0,n.Iz)({key:"roomDashboardIds",default:[],effects:e=>c.gB===e?[e=>{let{onSet:t,trigger:a,setSelf:n}=e;"get"===a&&n([c.LA]),t((e=>{n([c.LA,...e])}))}]:[]}),loaded:(0,n.Iz)({key:"roomDashboardsLoaded",default:!1,effects:e=>c.gB===e?[e=>{let{trigger:t,setSelf:a}=e;"get"===t&&a(!0)}]:[]})},d=(0,n.K0)({key:"roomDashboardState",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;return n(l[a](t))}},set:e=>{let{id:t,key:a}=e;return(e,n)=>{let{set:r}=e;return r(l[a](t),n)}}}),u=(e,t)=>(0,n.vc)(d({id:e,key:t})),m=()=>{const e=(0,s.ID)();return u(e,"ids")},g=()=>{const e=(0,s.ID)();return u(e,"loaded")},p=(0,n.K0)({key:"roomDashboardsState/add",set:e=>(t,a)=>{let{set:n,get:r}=t;const o=e=>r((0,i.Qx)({id:e,key:"name"}));n(l.ids(e),(e=>[...new Set([...e,a.id])].sort(((e,t)=>o(e).localeCompare(o(t),void 0,{sensitivity:"accent",ignorePunctuation:!0})))))}}),h=(0,n.K0)({key:"roomDashboardsState/remove",set:e=>(t,a)=>{let{set:n}=t;n(l.ids(e),(e=>o()(e,a)))}})},38280:(e,t,a)=>{"use strict";a.d(t,{A:()=>n});const n={loaded:!1,fullyLoaded:!1,id:null,isMember:!1,name:"",memberCount:null,nodeCount:null,description:"",private:!1,slug:"",spaceId:null,createdAt:"",untouchable:!0}},46902:(e,t,a)=>{"use strict";a.d(t,{x2:()=>d,n:()=>h,lb:()=>m,di:()=>g,zC:()=>f,ES:()=>u});a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215),a(62953);var n=a(47444),r=a(66245),o=a.n(r),i=a(70716),s=a(69765);const c={ids:(0,n.Iz)({key:"roomMemberIds",default:[]}),error:(0,n.Iz)({key:"roomMembersError",default:null}),updatedAt:(0,n.Iz)({key:"roomMembersUpdatedAt",default:""}),loaded:(0,n.Iz)({key:"roomMembersLoaded",default:!1})};var l=a(1522);const d=(0,n.K0)({key:"roomMemberState",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;return n(c[a](t))}},set:e=>{let{id:t,key:a}=e;return(e,n)=>{let{set:r}=e;r(c[a](t),n)}}}),u=(e,t)=>(0,n.vc)(d({id:e,key:t})),m=()=>{const e=(0,s.ID)();return u(e,"ids")},g=()=>{const e=m();return(0,i.Uv)(e)},p=(0,n.K0)({key:"roomMembersState/add",set:e=>(t,a)=>{let{set:n,get:r}=t;const o=e=>r((0,i.m)({id:e,key:"name"}));n(c.ids(e),(e=>[...new Set([...e,...a])].sort(((e,t)=>o(e).localeCompare(o(t),void 0,{sensitivity:"accent",ignorePunctuation:!0})))))}}),h=function(e,t){let{onSuccess:a,onFail:r}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const o=(0,l.y5)(e,t);return(0,n.Zs)((e=>{let{set:n}=e;return e=>{try{o(e),n(p(t),e),a&&a(e)}catch(i){r&&r(e)}}}),[o,e,a,r])},v=(0,n.K0)({key:"roomMembersState/remove",set:e=>(t,a)=>{let{set:n}=t;return n(c.ids(e),(e=>o()(e,a)))}}),f=function(e,t){let{onSuccess:a,onFail:r}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const o=(0,l.j_)(e,t);return(0,n.Zs)((e=>{let{set:n}=e;return e=>{try{o(e),n(v(t),e),a&&a(e)}catch(i){r&&r(e)}}}),[o,e,t,a,r])}},21204:(e,t,a)=>{"use strict";a.d(t,{lM:()=>g,gV:()=>y,uB:()=>m,e6:()=>u,tY:()=>f,Vw:()=>h,tQ:()=>A,XH:()=>v});a(17333),a(9920),a(41393),a(98992),a(54520),a(3949),a(81454),a(62953);var n=a(47444),r=a(33829),o=a(1522);const i=[{id:"system.cpu",context:"system.cpu",dimensions:[],enableAllDimensions:!0,hidden:!1,title:"Total CPU",unit:"percentage"},{id:"system.load",context:"system.load",dimensions:[{id:"load15",name:"load15"}],enableAllDimensions:!1,hidden:!1,title:"Load 15",unit:"load"},{id:"mem.available",context:"mem.available",dimensions:[],enableAllDimensions:!0,hidden:!1,title:"Mem Available",unit:"MiB"},{id:"mem.swap",context:"mem.swap",dimensions:[{id:"free",name:"free"}],enableAllDimensions:!1,hidden:!1,title:"Swap Free",unit:"MiB"},{id:"system.io-in",context:"system.io",dimensions:[{id:"in",name:"in"}],enableAllDimensions:!1,hidden:!1,title:"Disk Reads",unit:"KiB/s"},{id:"system.io-out",context:"system.io",dimensions:[{id:"out",name:"out"}],enableAllDimensions:!1,hidden:!1,title:"Disk Writes",unit:"KiB/s"},{id:"system.net-received",context:"system.net",dimensions:[{id:"InOctets",name:"received"}],enableAllDimensions:!1,hidden:!1,title:"Network In",unit:"kilobits/s"},{id:"system.net-sent",context:"system.net",dimensions:[{id:"OutOctets",name:"sent"}],enableAllDimensions:!1,hidden:!1,title:"Network Out",unit:"kilobits/s"}].reduce(((e,t)=>({...e,[t.id]:t})),{}),s={id:null,title:"",context:"",unit:"",dimensions:[],enableAllDimensions:!1,hidden:!1},c=(0,n.Iz)({key:"roomMetricIds",default:Object.keys(i)}),l=(0,n.Iz)({key:"roomMetrics",default:e=>{let{id:t}=e;return i[t]||{}}}),d=(0,n.K0)({key:"roomMetricState",get:e=>{let{id:t,roomId:a,key:n}=e;return e=>{let{get:r}=e;const o=r(l({id:t,roomId:a}));return n?o[n]:o}},set:e=>{let{id:t,roomId:a,key:n}=e;return(e,r)=>{let{set:o}=e;return n?"function"===typeof r?o(l({id:t,roomId:a}),(e=>({...e,[n]:r(e[n])}))):void o(l({id:t,roomId:a}),(e=>({...e,[n]:r}))):o(l({id:t,roomId:a}),"function"===typeof r?r:{...s,...r})}}}),u=(e,t,a)=>(0,n.vc)(d({id:t,roomId:e,key:a})),m=(e,t)=>(0,n.E0)(l({id:t,roomId:e})),g=(0,n.K0)({key:"roomMetricIdsState",get:e=>t=>{let{get:a}=t;return a(c(e))},set:e=>(t,a)=>{let{set:n}=t;n(c(e),a.map((e=>e.id))),a.forEach((t=>n(l({roomId:e,id:t.id}),t)))}}),p=(0,n.K0)({key:"roomMetricsState",get:e=>t=>{let{get:a}=t;return a(g(e)).map((t=>a(l({id:t,roomId:e}))))}}),h=e=>(0,n.vc)(p(e)),v=function(e){let{shouldPersist:t=!0}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const a=(0,o.DH)(e,{silent:!0});return(0,n.Zs)((n=>{let{set:r,snapshot:o}=n;return async function(n){for(var i=arguments.length,s=new Array(i>1?i-1:0),c=1;c{let{set:a}=t;return a(d({roomId:e,id:n,key:l}),u)})).getPromise(p(e));await a({metrics:t})}catch(m){r(d({roomId:e,id:n,key:l}),await o.getPromise(d({roomId:e,id:n,key:l})))}}}),[e])},f=e=>[h(e),v(e)],A=e=>{const t=(0,o.DH)(e);return(0,n.Zs)((a=>{let{set:n,snapshot:o}=a;return async a=>{const i="new"===a.id,s=(await o.getPromise(p(e))).filter((e=>e.id!==a.id)),l=i?{...a,id:(0,r.A)()}:a,u=[...s,l];await t({metrics:u}),n(d({roomId:e,id:l.id}),l),i&&n(c(e),(e=>[...e,l.id]))}}),[e])},y=e=>{const t=(0,o.DH)(e,{silent:!0});return(0,n.Zs)((a=>{let{set:n,reset:r,snapshot:o}=a;return async a=>{const i=(await o.getPromise(p(e))).filter((e=>e.id!==a));await t({metrics:i}),n(c(e),(e=>e.filter((e=>e!==a)))),r(l({roomId:e,id:a}))}}),[e])}},67990:(e,t,a)=>{"use strict";a.d(t,{dT:()=>u,$6:()=>g,Hx:()=>y,CK:()=>v,nj:()=>f,nl:()=>k,vV:()=>I,gr:()=>m,vv:()=>h});a(9920),a(41393),a(14905),a(98992),a(3949),a(81454),a(8872),a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215),a(62953);var n=a(47444),r=a(66245),o=a.n(r),i=a(47762),s=a(69765),c=a(1522);const l={ids:(0,n.Iz)({key:"roomNodeIds",default:[]}),error:(0,n.Iz)({key:"roomNodesError",default:null}),updatedAt:(0,n.Iz)({key:"roomNodesUpdatedAt",default:""}),loaded:(0,n.Iz)({key:"roomNodesLoaded",default:!1})},d={nodes:(0,n.Iz)({key:"roomNodesOptions",default:[]}),error:(0,n.Iz)({key:"roomNodesOptionsError",default:null}),loaded:(0,n.Iz)({key:"roomNodesOptionsLoaded",default:!1})},u=(0,n.K0)({key:"roomNodeState",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;return n(l[a](t))}},set:e=>{let{id:t,key:a}=e;return(e,n)=>{let{set:r}=e;return r(l[a](t),n)}}}),m=(e,t)=>(0,n.vc)(u({id:e,key:t})),g=(0,n.K0)({key:"roomNodesOptions",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;return n(d[a](t))}},set:e=>{let{id:t,key:a}=e;return(e,n)=>{let{set:r}=e;r(d[a](t),n)}}}),p=(0,n.K0)({key:"roomsNodesOptions",get:e=>t=>{let{get:a}=t;return null!==e&&void 0!==e&&e.length?e.reduce(((e,t)=>{const n=e.map((e=>{let{value:t}=e;return t}));return a(g({id:t,key:"nodes"})).forEach((t=>{n.includes(t.value)||e.push(t)})),e}),[]):[]}}),h=e=>(0,n.vc)(p(e)),v=()=>{const e=(0,s.ID)();return m(e,"ids")},f=()=>{const e=(0,s.ID)();return m(e,"loaded")},A=(0,n.K0)({key:"roomNodesState/add",get:()=>()=>null,set:e=>(t,a)=>{let{set:n,get:r}=t;const o=e=>r((0,i.GN)({id:e,key:"name"}));n(l.ids(e),(e=>[...new Set([...e,...a])].sort(((e,t)=>o(e).localeCompare(o(t),void 0,{sensitivity:"accent",ignorePunctuation:!0}))))),n(l.updatedAt(e),"")}}),y=function(e,t){let{onSuccess:a,onFail:r}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const o=(0,c.e8)(e,t);return(0,n.Zs)((e=>{let{set:n}=e;return async function(e){let{makeCallback:i=!0}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};try{const r=await o(e);r&&e.reduce(((e,t,a)=>{var n;return null!==(n=r[a])&&void 0!==n&&n.errorCode?e:[...e,t]}),[]),n(A(t),e),i&&a&&a(e)}catch(s){i&&r&&r(e)}}}),[o,e,a,r])},b=(0,n.K0)({key:"roomNodesState/obsolete",get:()=>()=>null,set:e=>{let{roomId:t,nodeIds:a}=e;return e=>{let{set:n}=e;n(l.ids(t),(e=>o()(e,a))),n(l.updatedAt(t),"")}}}),S=(0,n.K0)({key:"roomNodesState/remove",get:()=>()=>null,set:e=>(t,a)=>{let{set:n}=t;n(l.ids(e),(e=>o()(e,a))),n(l.updatedAt(e),"")}}),k=(e,t)=>{const a=(0,c.eY)(e);return(0,n.Zs)((e=>{let{set:n}=e;return async function(e){let{onSuccess:r,onFail:o}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};try{await a(e),e.map((e=>{n((0,i.GN)({id:e,key:"state"}),"deleted")})),t.map((t=>{n(b({roomId:t,nodeIds:e}))})),r&&r(e)}catch(s){o&&o()}}}),[a,e])},I=function(e,t){let{onSuccess:a,onFail:r}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const o=(0,c.rp)(e,t);return(0,n.Zs)((e=>{let{set:n}=e;return async function(e){let{onSuccess:i,onFail:s}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};try{await o(e),n(S(t),e),a&&a(e),i&&i()}catch(c){r&&r(e),s&&s()}}}),[o,e,t,a,r])}},69765:(e,t,a)=>{"use strict";a.d(t,{If:()=>y,LS:()=>b,NG:()=>I,At:()=>U,ID:()=>M,r9:()=>_,QW:()=>C,XA:()=>D,pr:()=>R,HX:()=>x,NQ:()=>E,J_:()=>N,wz:()=>S,A2:()=>T,a8:()=>k});a(17333),a(98992),a(54520);var n=a(47444),r=a(14994),o=a(3914),i=a(81048),s=a(56820),c=a(54308),l=a(38280),d=a(1522),u=a(82265),m=a(21204),g=a(96540),p=a(5169),h=a(87633),v=a(12800);const f=e=>{const t="".concat((0,p.t)()).concat((0,h.e)(e));return(0,g.useCallback)((e=>(0,v.y)({key:t,handleResults:t=>t.results.concat(e)})),[t])};var A=a(37618);const y=(0,n.K0)({key:"roomInitialize",get:e=>t=>{let{get:a}=t;return a((0,c.Ay)(e))},set:e=>(t,a)=>{let{set:n}=t;const{settings:r,metrics:o,...i}=a;n((0,c.Ay)(e),(e=>({...l.A,...e,...i}))),n((0,c.x4)([i.spaceId,i.slug]),e),r&&n((0,u.A)(e),(e=>({...e,...r}))),o&&n((0,m.lM)(e),o)}}),b=(0,n.K0)({key:"roomState",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;const r=n((0,c.Ay)(t));return a?r[a]:r}},set:e=>{let{id:t,key:a}=e;return(e,n)=>{let{set:r}=e;r((0,c.Ay)(t),a?"function"!==typeof n?e=>({...e,[a]:n}):e=>({...e,[a]:n(e[a])}):{...l.A,...n})}},reset:e=>{let{id:t,key:a}=e;return(e,n)=>{let{reset:r,set:o}=e;a?o((0,c.Ay)(t),"function"!==typeof n?e=>({...e,[a]:n}):e=>({...e,[a]:n(e[a])})):r((0,c.Ay)(t))}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),S=(e,t)=>(0,n.vc)(b({id:e,key:t})),k=function(e){let{key:t,shouldPersist:a,onSuccess:r,onFail:o}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const i=(0,n.lZ)(b({id:e,key:t})),s=(0,d.DH)(e);return(0,n.Zs)((n=>{let{snapshot:c}=n;return async n=>{const l=t?{[t]:n}:n;if(i((e=>({...e,...l}))),!a)return;const d=await c.getPromise(b({id:e}));try{await s(l),r&&r(n)}catch(u){i(t?d[t]:d),o&&o(n)}}}),[e,t,a,r,o])},I=function(e){let{onSuccess:t,onFail:a}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const o=(0,d.yK)(e),i=f(e);return(0,n.Zs)((n=>{let{set:s}=n;return async n=>{try{const{data:a}=await o(n),c={...n,spaceId:e,...a,loaded:!0};await i(c),s(y(c.id),c),s((0,r.Jz)(e),c),t&&t(c)}catch(c){a&&a(n)}}}),[o,e,t,a,i])},w=function(e,t){let{onSuccess:a,onFail:o}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const i=(0,n.E0)(b({id:e})),l=S(e,"spaceId");return(0,n.Zs)((n=>{let{snapshot:d,set:u,reset:m}=n;return async()=>{const n=await d.getPromise(b({id:e})),g=await d.getPromise((0,r.YB)({id:n.spaceId})),p=(0,s.Pb)(g.filter((t=>t.id!==e)));try{await t(),m((0,c.x4)([l,n.slug])),u(c.yz,p),u((0,r.Oy)(l),n.id),i(),a&&a(n)}catch(h){o&&o(n)}}}),[t,l,e,a,o])},E=(e,t)=>{const a=S(e,"spaceId"),n=(0,d.n2)(a,e);return w(e,n,t)},x=(e,t)=>{const a=S(e,"spaceId"),n=(0,d.z2)(a,e);return w(e,n,t)},C=()=>(0,n.vc)(c.yz),T=e=>(0,n.lZ)(c.yz,e),N=(e,t)=>(0,n.vc)((0,c.x4)([e,t])),R=()=>{const e=(0,r.DL)(),t=(0,o.dg)()?null===A.hq||void 0===A.hq?void 0:A.hq.slug:i.mL;return e.filter((e=>{let{slug:a}=e;return a===t}))[0]||{id:""}},M=e=>{const t=(0,o.vt)(),a=C(),{id:n=""}=R();return N(t,e||a)||n},D=e=>{const t=M();return S(t,e)},_=()=>{const e=(0,o.bq)(),t=C();return e&&t&&"/spaces/".concat(e,"/rooms/").concat(t)},U=()=>{const{id:e=""}=R();return"member"===(0,o.ap)("roleInSpace")&&!!e}},82265:(e,t,a)=>{"use strict";a.d(t,{A:()=>n});const n=(0,a(47444).Iz)({key:"roomSettings",default:{groupMode:""}})},29662:(e,t,a)=>{"use strict";a.d(t,{A:()=>d,E:()=>l});var n=a(83957),r=a(54702),o=a(70716),i=a(35273),s=a(46902),c=a(46741);const l=(e,t)=>"spaces.".concat(e,".rooms.").concat(t,".members"),d=function(e,t){let{polling:a=!0}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const d=(0,c.JT)("room:ReadUsers");(0,n.A)((()=>({key:l(e,t),autorun:!!e&&!!t&&d,fetch:()=>(0,r.D4)(e,t),polling:a,association:{getError:()=>(0,s.x2)({id:t,key:"error"}),getIds:()=>(0,s.x2)({id:t,key:"ids"}),getLoaded:()=>(0,s.x2)({id:t,key:"loaded"}),getUpdatedAt:()=>(0,s.x2)({id:t,key:"updatedAt"})},sort:(e,t)=>e.name.localeCompare(t.name,void 0,{sensitivity:"accent",ignorePunctuation:!0}),getResource:e=>(0,o.m)({id:e}),getResourcesInitializer:()=>o.WJ,getResourceInitialState:()=>i.u,pollingOptions:{pollingInterval:18e4}})),[e,t])}},82428:(e,t,a)=>{"use strict";a.d(t,{p:()=>n});const n=(e,t)=>t.untouchable?1:e.untouchable?-1:e.name.localeCompare(t.name,void 0,{sensitivity:"accent",ignorePunctuation:!0})},7484:(e,t,a)=>{"use strict";a.d(t,{EL:()=>s,Q9:()=>g,RM:()=>h,XD:()=>c,Yk:()=>m,bz:()=>d,cN:()=>l,qi:()=>u});a(41393),a(14905),a(98992),a(81454),a(8872);var n=a(26655),r=a(49286),o=a(78969),i=a(91069);const s=()=>n.A.get(o._9,{transform:e=>e.map((e=>{let{permissions:t,...a}=e;return{...a,permissions:t.reduce(((e,t)=>({...e,[t]:t})),{})}}))}),c=e=>n.A.get("/api/v3/spaces/slug/".concat(e)),l=e=>n.A.delete("/api/v1/spaces/".concat(e)),d=e=>n.A.post("/api/v1/spaces",{name:e}),u=(e,t)=>n.A.post("/api/v1/spaces/default",{email:t},{headers:{"Netdata-Account-Id":e}}),m=(e,t)=>n.A.patch("/api/v1/spaces/".concat(e),t),g=e=>n.A.get("/api/v3/spaces/slug?slug=".concat(e),{transform:e=>(0,r.bn)(e)}),p=e=>t=>t.map((t=>({...(0,r.bn)(t),spaceId:e}))),h=function(e){let{defaultParam:t=!1}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return n.A.get((0,i.t)({spaceId:e}),{params:{default:t},transform:p(e)})}},56639:(e,t,a)=>{"use strict";a.d(t,{Ay:()=>c,Li:()=>i,aR:()=>s,ue:()=>o});var n=a(47444),r=a(37618);const o={createdAt:"",description:"",iconURL:"",id:"",error:null,loaded:!1,name:"",slug:""},i=(0,n.eU)({key:"currentSpaceSlug",default:null}),s=(0,n.Iz)({key:"spaceIdBySlug",default:"",effects:e=>r.Ay&&e===r.z0?[e=>{let{setSelf:t,trigger:a}=e;"get"===a&&t(r.Z2)}]:[]}),c=(0,n.Iz)({key:"space",default:o,effects:e=>(0,r.ES)(e)?[e=>{let{setSelf:t,trigger:a}=e;"get"===a&&t(r.yu)}]:[]})},88116:(e,t,a)=>{"use strict";a.d(t,{FJ:()=>i,HR:()=>s,Nw:()=>r,VH:()=>n,aT:()=>l,jS:()=>d,rY:()=>c,tD:()=>o,uC:()=>u});const n={pro:"pro",free:"free",earlyBird:"earlyBird",business:"business",homelab:"homelab"},r={Pro:n.pro,Professional:n.pro,Community:n.free,EarlyBird:n.earlyBird,Business:n.business,Homelab:n.homelab},o=[n.homelab,n.pro,n.business],i={free:{level:0,title:"Community",features:{2023.11:["Max 5 Active Connected Nodes","Max 1 Active Custom Dashboards (per Room)"]},showAnnualPrice:!1},earlyBird:{level:0,title:"Early Bird",features:{2023.02:['"Member" role available with existing permissions.','"Member" role with access to _All Nodes_ room.']},showAnnualPrice:!1},pro:{level:10,title:"Pro",features:{2023.02:["7 days of alert history and auditing events.",'Unlock the "Troubleshooter" role and add members to the space without providing management permission.',"Enable webhook alert notification integration."]},showAnnualPrice:!1},business:{level:20,title:"Business",features:{2023.02:["Up-to 90 days of alert history and topology events. Never miss an important event while troubleshooting.",'Unlock all user roles including "Manager", "Observer" and "Billing". Empower your teams to excel.',"Enable alert notification integrations (Slack, PagerDuty and more)."],2024.03:["Unlimited Nodes","Unlimited Custom Dashboards","Role-based access control (RBAC)","Single Sign-On (SSO)","Centralized Configuration Management","Enterprise Notification Integrations"]},showAnnualPrice:!1},homelab:{level:20,title:"Homelab",features:{2024.02:["Targeted for homelab users or students, for non-commercial use","No limits! Unlimited Connected Nodes or Custom Dashboards","Full business level features","Fair and flat pricing","Regulated by the Fair Usage Policy"]},showAnnualPrice:!0},enterprise:{features:{2023.11:["Host Netdata Cloud and all its components completely on your premises","Ideal for monitoring air gapped facilities and critical infrastructure","Enable the customization that your organization requires"]},showAnnualPrice:!1}},s=["year","month"],c={year:"yearly",month:"monthly"},l={ErrInvalidPromotionCode:"Promotion code is invalid",ErrInactivePromotionCode:"Promotion code is inactive",ErrInvalidPromotionCodePlan:"Promotion code cannot be applied to this plan"},d="planEnrolmentError",u={free:{2023.11:{maxNodes:5,maxDashboards:1}}}},50503:(e,t,a)=>{"use strict";a.d(t,{A:()=>o});a(62953);var n=a(96540),r=a(88116);const o=()=>{const[e,t]=(0,n.useState)(localStorage.getItem(r.jS));return{isFailure:e,setFailure:(0,n.useCallback)((()=>{t(!0),localStorage.setItem(r.jS,!0)}),[]),reset:(0,n.useCallback)((()=>{t(!1),localStorage.removeItem(r.jS)}),[])}}},55189:(e,t,a)=>{"use strict";a.d(t,{dy:()=>i,eA:()=>r,pz:()=>o,sh:()=>s});var n=a(96540);const r="SPACE_NOT_FOUND",o=5,i=20,s={delete:n.createElement(n.Fragment,null,"Cannot delete last space of account.",n.createElement("br",null),"You can delete your account instead."),leave:n.createElement(n.Fragment,null,"Cannot leave last space of account.",n.createElement("br",null),"You can delete your account instead.")}},48849:(e,t,a)=>{"use strict";a.d(t,{A:()=>n});const n=(0,a(47444).eU)({key:"publicSpacesAtom",default:[]})},14994:(e,t,a)=>{"use strict";a.d(t,{Is:()=>d,YB:()=>v,$e:()=>l,Jz:()=>p,Oy:()=>h,DL:()=>A,WW:()=>g,Y7:()=>m,CB:()=>f,sC:()=>u});a(17333),a(9920),a(41393),a(98992),a(54520),a(3949),a(81454),a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215),a(62953);var n=a(47444),r=a(69765),o=a(3914),i=a(37618);const s={ids:(0,n.Iz)({key:"spaceRoomIds",default:[],effects:e=>(0,i.ES)(e)?[e=>{let{onSet:t,trigger:a,setSelf:n}=e;"get"===a&&n([i.gB]),t((e=>{n([i.gB,...e])}))}]:[]}),updatedAt:(0,n.Iz)({key:"spaceRoomsUpdatedAt",default:""}),loaded:(0,n.Iz)({key:"spaceRoomsLoaded",default:!1,effects:e=>(0,i.ES)(e)?[e=>{let{trigger:t,setSelf:a}=e;"get"===t&&a(!0)}]:[]})};var c=a(82428);const l=(0,n.K0)({key:"spaceRoomState",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;return n(s[a](t))}},set:e=>{let{id:t,key:a}=e;return(e,n)=>{let{set:r}=e;r(s[a](t),n)}}}),d=(e,t)=>{Object.values(s).forEach((a=>e(a(t))))},u=(e,t)=>(0,n.vc)(l({id:e,key:t})),m=e=>{const t=(0,o.vt)();return u(t,e)},g=()=>{const e=(0,o.vt)();return u(e,"ids")},p=(0,n.K0)({key:"spaceRoomsState/add",get:()=>()=>null,set:e=>(t,a)=>{let{set:n,get:o}=t;const i=e=>o((0,r.LS)({id:e}));n(l({id:e,key:"ids"}),(e=>[...new Set([...e,a.id])].sort(((e,t)=>(0,c.p)(i(e),i(t)))))),n(l({id:e,key:"loaded"}),!0),n(l({id:e,key:"updatedAt"}),"")}}),h=(0,n.K0)({key:"spaceRoomsState/remove",get:()=>()=>null,set:e=>(t,a)=>{let{set:n}=t;n(l({id:e,key:"ids"}),(e=>e.filter((e=>e!==a)))),n(l({id:e,key:"loaded"}),!0),n(l({id:e,key:"updatedAt"}),"")}}),v=(0,n.K0)({key:"spaceFullRooms",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;return n(l({id:t,key:"ids"})).map((e=>n((0,r.LS)({id:e,key:a}))))}},cachePolicy_UNSTABLE:{eviction:"most-recent"}}),f=(e,t)=>(0,n.vc)(v({id:e,key:t})),A=e=>{const t=(0,o.vt)();return f(t,e)}},3914:(e,t,a)=>{"use strict";a.d(t,{EG:()=>g,Jn:()=>m,U2:()=>u,nC:()=>l,Pk:()=>f,vt:()=>b,bq:()=>A,ap:()=>k,jw:()=>I,hX:()=>y,Ak:()=>v,dg:()=>S,ns:()=>p,UV:()=>h});a(9920),a(41393),a(98992),a(3949),a(81454),a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215),a(62953);var n=a(47444),r=a(37618),o=a(76689);const i={ids:(0,n.eU)({key:"spaceIds",default:[],effects:r.Ay?[e=>{let{onSet:t,trigger:a,setSelf:n}=e;"get"===a&&n([r.Z2]),t((e=>{n([r.Z2,...e])}))}]:[]}),error:(0,n.eU)({key:"spacesError",default:null}),updatedAt:(0,n.eU)({key:"spacesUpdatedAt",default:""}),loaded:(0,n.eU)({key:"spacesLoaded",default:!1,effects:[e=>{var t;let{trigger:a,setSelf:n,getLoadable:r}=e;null!==(t=r(o.A).contents)&&void 0!==t&&t.isAnonymous&&"get"===a&&n(!0)}]})};var s=a(56639),c=a(48849);const l=(0,n.K0)({key:"spacesState",get:e=>t=>{let{get:a}=t;return a(i[e])},set:e=>(t,a)=>{let{set:n}=t;n(i[e],a)}}),d=(0,n.gD)({key:"availableSpaceIds",get:e=>{let{get:t}=e;const a=t(l("ids")),n=t(c.A);return[...new Set([...a,...n])]}}),u=(0,n.K0)({key:"spaceState",get:e=>{let{id:t,key:a}=e;return e=>{let{get:n}=e;const r=n((0,s.Ay)(t));return a?r[a]:r}},set:e=>{let{id:t,key:a}=e;return(e,n)=>{let{set:r}=e;r((0,s.Ay)(t),a?"function"!==typeof n?e=>({...e,[a]:n}):e=>({...e,[a]:n(e[a])}):{...s.ue,...n})}}}),m=(0,n.K0)({key:"spaceIdBySlugState",get:e=>t=>{let{get:a}=t;return a((0,s.aR)(e))},set:()=>(e,t)=>{let{set:a}=e;t.forEach((e=>{a((0,s.aR)(e.slug),e.id)}))}}),g=(0,n.gD)({key:"currentSpaceIdState",get:e=>{let{get:t}=e;const a=t(s.Li);return t(m(a))}}),p=(e,t)=>(0,n.vc)(u({id:e,key:t})),h=e=>(0,n.vc)(l(e)),v=e=>(0,n.vc)(m(e)),f=()=>(0,n.vc)(d),A=()=>(0,n.vc)(s.Li),y=e=>(0,n.lZ)(s.Li,e),b=()=>(0,n.vc)(g),S=()=>(0,r.ES)(b()),k=e=>{const t=b();return p(t,e)},I=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];return(0,n.Zs)((t=>{let{snapshot:a}=t;return()=>e.map((e=>a.getLoadable(u({id:e})).contents))}))()}},48388:(e,t,a)=>{"use strict";a.d(t,{A:()=>s});var n=a(3914),r=a(90709),o=a(55189),i=a(37618);const s=()=>{const e=(0,r.U)(),t=(0,n.Ak)(e),a=(0,n.ns)(t||o.eA),s=(0,n.UV)("error");return(0,i.ES)(t)?{isFetching:!1,hasAccess:!0}:{isFetching:!!e&&!a.loaded&&!s&&!a.error,hasAccess:a.loaded&&!s&&!a.error}}},90709:(e,t,a)=>{"use strict";a.d(t,{A:()=>v,U:()=>h});a(14905),a(98992),a(8872),a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215),a(62953);var n=a(47444),r=a(47767),o=a(7484),i=a(96540),s=a(3914);const c="/spaces/:spaceSlug/*";var l=a(22292),d=a(46741),u=a(48849),m=a(55189),g=a(94355),p=a(37618);const h=()=>{var e;return null===(e=(0,r.RQ)(c))||void 0===e||null===(e=e.params)||void 0===e?void 0:e.spaceSlug},v=()=>{const e=(()=>{const e=(0,r.RQ)(c),{spaceSlug:t}=(null===e||void 0===e?void 0:e.params)||{},a=(0,s.hX)(),n=(0,s.bq)();return(0,i.useEffect)((()=>{n!==t&&a(t)}),[n,t]),t})(),[t,a]=(0,i.useState)(null),h=(0,l.uW)("isAnonymous"),v=(0,n.Zs)((n=>{let{set:r}=n;return async()=>{try{const{data:t}=await(0,o.XD)(e),{id:a,permissions:n,...i}=t;r((0,d.Dk)(a),n.reduce(((e,t)=>({...e,[t]:t})),{})),r((0,s.U2)({id:a}),(e=>({...e,...i,loaded:!0}))),r((0,s.Jn)(),[{id:a,slug:e}]),r(u.A,(e=>[...new Set([...e,a])])),h&&r((0,s.nC)("loaded"),!0)}catch(t){r((0,s.U2)({id:m.eA}),(a=>({...a,error:(0,g.A)(t)}))),a(t)}}}),[h,e]);return(0,i.useEffect)((()=>{p.Ay&&e===p.z0||e&&v()}),[v,e]),t}},87633:(e,t,a)=>{"use strict";a.d(t,{A:()=>d,e:()=>l});var n=a(83957),r=a(69765),o=a(7484),i=a(14994),s=a(82428),c=a(37618);const l=e=>"spaces.".concat(e,".rooms"),d=function(e){let{autorun:t=!0,polling:a=!0}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return(0,n.A)((()=>({key:l(e),autorun:t&&!!e,polling:a,fetch:()=>(0,o.RM)(e),association:{getIds:()=>(0,i.$e)({id:e,key:"ids"}),getLoaded:()=>(0,i.$e)({id:e,key:"loaded"}),getUpdatedAt:()=>(0,i.$e)({id:e,key:"updatedAt"})},sort:s.p,getResource:r.If,pollingOptions:{pollingInterval:69e3},force:!c.Ay})),[e])}},47731:(e,t,a)=>{"use strict";a.d(t,{J:()=>r});var n=a(45463);const r=()=>(0,n.A)("(max-width: 767px)")},18061:(e,t,a)=>{"use strict";a.d(t,{A:()=>o});a(9391),a(62953);var n=a(96540);const r=()=>{},o=(e,t)=>{const{fetch:a,enabled:o=!0,initialValue:i,onFail:s=r,onSettle:c=r,onSuccess:l=r,isDefaultLoading:d=!1}=(0,n.useMemo)(e,t),[u,m]=(0,n.useState)(i),[g,p]=(0,n.useState)(d),[h,v]=(0,n.useState)(null),f=(0,n.useRef)(!0);return(0,n.useEffect)((()=>{if(!o)return;m(i),p(!0),v(null);const e=a();return e.then((e=>{let{data:t}=e;f.current&&(m(t),l(t))})).catch((e=>{f.current&&(e.isCancel||(v(e),s(e)))})).finally((()=>{f.current&&(p(!1),c())})),()=>{var t;return null===e||void 0===e||null===(t=e.cancel)||void 0===t?void 0:t.call(e)}}),t),(0,n.useEffect)((()=>()=>f.current=!1),[]),[u,g,h]}},69418:(e,t,a)=>{"use strict";a.d(t,{A:()=>c,Q:()=>s});a(14905),a(98992),a(8872);var n=a(47767),r=a(3914),o=a(14994),i=a(37618);const s=()=>{const{pathname:e}=(0,n.zy)();return{isIntegrationsPath:/integrate-anything$/.test(e)}},c=()=>{const e=(0,r.vt)(),t=(0,o.CB)(e,"nodeCount").reduce(((e,t)=>t>e?t:e),0),a=(0,o.Y7)("loaded"),n=(0,i.ES)(e);return[!n&&a&&0===t,!n&&e&&!a,t]}},61649:(e,t,a)=>{"use strict";a.d(t,{A:()=>r});a(62953);var n=a(96540);const r=(e,t)=>{const a=(0,n.useRef)();return(0,n.useCallback)((function(){var t,n;return a.current&&(null===(t=(n=a.current).cancel)||void 0===t||t.call(n)),a.current=e(...arguments),a.current}),t)}},86723:(e,t,a)=>{"use strict";a.d(t,{A:()=>o});a(62953);var n=a(96540);const r=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";const a=localStorage.getItem(e);return a?JSON.parse(a):t},o=(e,t)=>{const[a,o]=(0,n.useState)((()=>r(e,t)));return(0,n.useEffect)((()=>localStorage.setItem(e,JSON.stringify(a))),[a]),[a,o]}},45463:(e,t,a)=>{"use strict";a.d(t,{A:()=>o});a(62953);var n=a(96540);const r=e=>{var t;return"function"===typeof(null===(t=window)||void 0===t?void 0:t.matchMedia)&&window.matchMedia(e).matches},o=e=>{const[t,a]=(0,n.useState)((()=>r(e)));function o(){a(r(e))}return(0,n.useEffect)((()=>{if("function"!==typeof window.matchMedia)return;const t=window.matchMedia(e);return o(),t.addListener?t.addListener(o):t.addEventListener("change",o),()=>{t.removeListener?t.removeListener(o):t.removeEventListener("change",o)}}),[e]),t}},80862:(e,t,a)=>{"use strict";a.d(t,{A:()=>r});var n=a(96540);const r=()=>{const e=(0,n.useRef)(!1);return(0,n.useEffect)((()=>(e.current=!0,()=>e.current=!1)),[]),e}},71835:(e,t,a)=>{"use strict";a.d(t,{A:()=>i});a(9920),a(98992),a(3949);var n=a(96540),r=a(78217),o=a(13871);const i=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"default";const t=(0,n.useRef)({}),a=(0,n.useCallback)((a=>{const n=(0,o.UI)({...a,success:!0});t.current[e]&&r.A.dismiss(t.current[e]),t.current[e]=r.A.success(n)}),[]),i=(0,n.useCallback)((a=>{var n;const i=null===a||void 0===a||null===(n=a.response)||void 0===n?void 0:n.data,s=(0,o.gi)(null!==i&&void 0!==i&&i.errorMessage?i:{errorMessage:a.message,errorMsgKey:a.message,...a});t.current[e]&&r.A.dismiss(t.current[e]),t.current[e]=r.A.error(s)}),[]),s=(0,n.useCallback)((e=>e.forEach(i)),[]),c=(0,n.useCallback)((a=>{const n=(0,o.UI)({...a,warning:!0});t.current[e]&&r.A.dismiss(t.current[e]),t.current[e]=r.A.warning(n)}),[]);return[a,i,s,c]}},76201:(e,t,a)=>{"use strict";a.d(t,{A:()=>i,F:()=>o});var n=a(96540),r=a(47767);const o=function(){var e;let t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:location.href;null!==(e=window.posthog)&&void 0!==e&&e.__loaded&&window.posthog.capture("$pageview",{$current_url:t})},i=()=>{const{pathname:e}=(0,r.zy)();(0,n.useEffect)((()=>{o()}),[e])}},66627:(e,t,a)=>{"use strict";a.d(t,{A:()=>o,h:()=>r});const n=(0,a(63457).A)("netdata"),r=86400,o=function(e,t){let a,{maxAge:r}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const o=()=>{if(!a)return;const e=new Error("rejected");throw e.isCancel=!0,e},i=n.get(e,{maxAge:r}).then((e=>{if(o(),e)return t(e)})).then((()=>(o(),t=>n.set(e,t))));return i.abort=()=>{a=!0},i}},12800:(e,t,a)=>{"use strict";a.d(t,{y:()=>r});var n=a(66627);const r=async e=>{let t,{key:a,handleResults:r}=e;return(await(0,n.A)(a,(e=>(t={updatedAt:"",results:r(e)},t))))(t)}},5169:(e,t,a)=>{"use strict";a.d(t,{t:()=>r});var n=a(22292);const r=()=>{const e=(0,n.NJ)();return"user.".concat(e,".")}},83957:(e,t,a)=>{"use strict";a.d(t,{A:()=>y});a(17333),a(9920),a(41393),a(14905),a(98992),a(54520),a(3949),a(81454),a(8872),a(25509),a(65223),a(60321),a(41927),a(11632),a(64377),a(66771),a(12516),a(68931),a(52514),a(35694),a(52774),a(49536),a(21926),a(94483),a(16215),a(62953);var n=a(96540),r=a(47444),o=a(65570),i=a(83488),s=a.n(i),c=a(18300),l=a(80925),d=a(94355),u=a(68831),m=a(22292),g=a(66627),p=a(5169),h=a(71835);const v=(0,r.eU)({key:"subscriptionsLatestError",default:0}),f=(0,r.K0)({key:"useSubscribe/updateState",get:()=>()=>null,set:()=>(e,t)=>{let{get:a,set:n}=e,{results:r,wasDiff:i,updatedAt:c,getResource:l,getResourcesInitializer:d,getResourceInitialState:u,getUpdatedAt:m,getLoaded:g,getIds:p,getError:h,sort:v,saveToCache:f,restored:A=!1,polling:y=!0}=t;r=r||[],h&&n(h(),null);const b=()=>{n(m(),c),n(g(),!A||"restored")};if(0===r.length&&i)return void b();const{effected:S,deleted:k,byId:I}=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:s();return e.reduce(((e,a)=>(a.loaded=!0,"deleted"===a.state||a.isDeleted?e.deleted.push(a.id):(e.byId[a.id]={...t(a),...a},e.effected.push(a.id)),e)),{deleted:[],effected:[],byId:{}})}(r,u),w=e=>e in I?I[e]:a(l(e));let E=[];n(p(),(e=>!i&&y||!y?(E=S.length?S:E,v&&E.sort(((e,t)=>v(w(e),w(t)))),E):(k.length&&(E=e.filter((e=>!k.includes(e)))),i&&S.length&&(E=[...new Set([...e,...S])],v&&E.sort(((e,t)=>v(w(e),w(t))))),(0,o.Ay)(E,e)?e:E))),d?n(d(),{values:I,merge:!0}):S.forEach((e=>n(l(e),(t=>({...t,loaded:!0,...I[e]}))))),b(),f&&f({results:E.map(w),updatedAt:c})}}),A=e=>{const t=(0,l.e)(),a=(0,n.useMemo)((()=>t?t.getRoot():null),[t]),[,o]=(0,c.A)(),[,i]=(0,h.A)(e.key),s=(0,r.Zs)((e=>{let{snapshot:t,set:a}=e;return async(e,n)=>{let{getError:r,onFail:o}=n;const s=Date.now(),c=await t.getPromise(v);if(e.isCancel||c+1e4>s)throw e;throw r&&a(r(),(0,d.A)(e)),a(v,s),o?o(e):i(e),e}}),[e]),m=(0,r.Zs)((e=>{let{snapshot:t,set:a}=e;return async(e,n)=>{let{fetchResources:r,onReceive:o,getUpdatedAt:i,getError:c,clearRef:l,onFail:d,...u}=n;const g=await t.getPromise(i());return(l.promise=r(g),l.promise.catch((async e=>s(e,{onFail:d,getError:c})))).then((function(){let{data:e={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return(e=>{const t=Array.isArray(e),n=t?e:e.results,s=t?"":e.updatedAt;return null===o||void 0===o||o({...t?{results:n}:{results:[],...e},lastUpdated:g}),a(f(),{fetchResources:r,results:n,wasDiff:!!g,updatedAt:s,getUpdatedAt:i,getError:c,clearRef:l,onFail:d,...u}),new Promise((e=>setTimeout(e,0)))})(e)})).catch((t=>{if(t.isCancel||1===e)throw t;return m(--e,{fetchResources:r,onReceive:o,getUpdatedAt:i,getError:c,clearRef:l,onFail:d,...u})}))}}),[e]);return(0,n.useCallback)((t=>{const{polling:n=!0,fetch:r,association:i,pollingOptions:s,keepPolling:c,...l}=e,d={timeoutId:null,animationFrameId:null,promise:null,killed:!1},g=()=>m(2,{fetchResources:r,saveToCache:t,polling:n,clearRef:d,...i,...l}),p=()=>{d.killed||(d.timeoutId=setTimeout((()=>{d.animationFrameId=requestAnimationFrame((()=>!c&&a&&(a.getAttribute("hovering")||a.getAttribute("paused"))?p():c||o.current||a&&a.getAttribute("autofetchOnWindowBlur")?void g().then(p).catch((e=>!e.isCancel&&p())):p()))}),(null===s||void 0===s?void 0:s.pollingInterval)||u.A.pollingInterval))};return g().then(n?p:null).catch((e=>n&&!e.isCancel&&p())),()=>{var e,t;d.killed=!0,null===(e=d.promise)||void 0===e||null===(t=e.cancel)||void 0===t||t.call(e),cancelAnimationFrame(d.animationFrameId),clearTimeout(d.timeoutId)}}),[e])},y=(e,t)=>{const a=(0,p.t)(),o=(0,m.uW)("isAnonymous"),i=(0,n.useMemo)((()=>{const{key:t,...n}=e();return{key:"".concat(a).concat(t),...n}}),[e,...t,o]),s=A(i),c=(0,r.Zs)((e=>{let{set:t}=e;return e=>{var a;let{results:n=[],updatedAt:r=""}=e;const o=(Array.isArray(n)&&n.length)>0;return null===(a=i.onReceive)||void 0===a||a.call(i,{results:n,lastUpdated:""}),t(f(),{results:n,updatedAt:o?r:"",...i,...i.association,restored:o}),new Promise((e=>setTimeout(e,0)))}}),t),l=(0,r.Zs)((e=>{let{snapshot:t}=e;return()=>{const{association:{getLoaded:e},key:a,cache:n=!0,maxCacheAge:r,skip:o}=i,l=t.getLoadable(e()).contents;if(!o&&(l||!n))return s();const d=(0,g.A)(a,c,{maxAge:r||g.h});let u;return d.then((e=>{u=s(e)})).catch((()=>{})),()=>{var e;o||(d.abort(),null===(e=u)||void 0===e||e())}}}),t);return(0,n.useEffect)((()=>{const{autorun:e=!0,force:t=!1,skip:a=!1}=i;if((!o||t)&&!a&&e)return l()}),[o,l]),s}},87659:(e,t,a)=>{"use strict";a.d(t,{A:()=>r});a(62953);var n=a(96540);const r=function(e){let{on:t,off:a,toggle:r}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const[o,i]=(0,n.useState)(!!e);return[o,(0,n.useCallback)((e=>i((n=>{const o="boolean"===typeof e?e:!n;return r&&r(o),t&&o&&t(),a&&!o&&a(),o}))),[r,t,a]),(0,n.useCallback)((()=>{i(!0),t&&t()}),[t]),(0,n.useCallback)((()=>{i(!1),a&&a()}),[a])]}},45467:(e,t,a)=>{"use strict";a.d(t,{A:()=>o});var n=a(96540),r=a(80862);const o=(e,t)=>{const a=(0,r.A)();(0,n.useEffect)((()=>{if(a.current)return e()}),t)}},56820:(e,t,a)=>{"use strict";a.d(t,{Ay:()=>S,Pb:()=>b,z1:()=>v});a(8159),a(98992),a(37550),a(62953);var n=a(96540),r=a(47767),o=a(45588),i=a(3914),s=a(14994),c=a(48388),l=a(81048),d=a(21591),u=a(69765),m=a(68980),g=a(68831),p=a(24198),h=a(37618);const v=e=>{const t=(0,r.Zp)(),a=(0,r.g)(),o=(0,u.r9)(),i=(0,m.zi)(e||a.dashboardSlug),s=(0,d.RQ)(),c=(0,n.useRef)(!1);(0,n.useEffect)((()=>{!0===s&&(i?c.current=!0:(t("".concat(o,"/dashboards")),c.current||(0,p.$j)()))}),[s,i])},{demoSlug:f,demoDefaultRoomViews:A,defaultRoomView:y}=g.A,b=e=>{var t;return e.length?e.some((e=>{let{slug:t}=e;return t===l.mL}))?l.mL:null===(t=e[0])||void 0===t?void 0:t.slug:null},S=()=>{(0,r.zy)();const e=(0,r.Zp)(),t=(0,r.RQ)({path:"/spaces/:spaceSlug/*"}),a=(0,r.RQ)({path:"/spaces/:spaceSlug/rooms/:roomSlug/*"}),l=(0,r.RQ)({path:"/*"}),d=(null===a||void 0===a?void 0:a.params)||(null===t||void 0===t?void 0:t.params)||(null===l||void 0===l?void 0:l.params),m=!!(0,r.RQ)({path:"/spaces/:spaceSlug/settings/*"}),g=(0,i.Ak)(d.spaceSlug),p=(0,i.UV)("loaded"),v=(0,u.J_)(g,d.roomSlug),S=(0,s.Y7)("loaded"),[k]=(0,i.Pk)(),I=(0,i.ns)(k,"slug"),{isFetching:w}=(0,c.A)(),E=(0,s.DL)(),x=(0,i.hX)(),C=(0,i.bq)();(0,n.useEffect)((()=>{if(w)return;if(!h.Ay&&(!p||"restored"===p))return;if(m&&g)return;if(g&&C!==d.spaceSlug)return void x(d.spaceSlug);if(g&&!S)return;if(g&&v)return;const t=g?d.spaceSlug:I,a=v?null===d||void 0===d?void 0:d.roomSlug:b(E);e(g&&S&&!a?(0,o.tW)("/spaces/:spaceSlug/no-rooms",{spaceSlug:t}):((e,t)=>{if(!t)return(0,o.tW)("/spaces/:spaceSlug/",{spaceSlug:e});const a=f===e?A[t]||A.default:h.Ay&&e===h.z0&&t===h.gB?h.kG:y;return(0,o.tW)("/spaces/:spaceSlug/rooms/:roomSlug/".concat(a),{spaceSlug:e,roomSlug:t})})(t,a),{replace:!0})}),[m,w,d.roomSlug,d.spaceSlug,v,S,!!E.length,p,g,I,C])}},18300:(e,t,a)=>{"use strict";a.d(t,{A:()=>r});a(62953);var n=a(96540);const r=function(){let e=!(arguments.length>0&&void 0!==arguments[0])||arguments[0];const[t,a]=(0,n.useState)(e),r=(0,n.useRef)(t);return(0,n.useEffect)((()=>{const e=()=>{r.current=!1,a(!1)};window.addEventListener("blur",e);const t=()=>{r.current=!0,a(!0)};return window.addEventListener("focus",t),()=>{window.removeEventListener("blur",e),window.removeEventListener("focus",t)}}),[]),[t,r]}},85919:(e,t,a)=>{"use strict";a(17333),a(98992),a(54520),a(62953);var n=a(96540),r=a(39225),o=a(5338),i=a(47444),s=a(84976),c=a(47767),l=a(12968),d=a(86641),u=a(80925),m=a(55337),g=a(24266),p=a(47731),h=a(28738),v=a(37618);const f=Boolean("localhost"===window.location.hostname||"[::1]"===window.location.hostname||window.location.hostname.match(/^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/));function A(e,t){navigator.serviceWorker.register(e).then((e=>{a.g.registration=e,e.onupdatefound=()=>{const a=e.installing;null!=a&&(a.onstatechange=()=>{"installed"===a.state&&(navigator.serviceWorker.controller?(console.log("New content is available and will be used when all tabs for this page are closed. See http://bit.ly/CRA-PWA."),t&&t.onUpdate&&t.onUpdate(e)):(console.log("Content is cached for offline use."),t&&t.onSuccess&&t.onSuccess(e)))})}})).catch((e=>{console.error("Error during service worker registration:",e)}))}a(25440);var y=a(57971);const b=(e,t)=>(t.search=t.pathname===e.location.pathname?t.search:t.search||e.location.search,t.hash=t.hash||e.location.hash,t);function S(e){if(v.sA&&"/"!==v.sA&&e.startsWith(v.sA)){const t=v.sA+e.slice(v.sA.length).replace(new RegExp("^".concat(v.sA),"g"),"/");e=t}return e=e.replace(/\/+/g,"/")}const k=e=>"string"===typeof e?{pathname:S(e)}:{...e,pathname:S(e.pathname)},I=(0,y.zR)({window:window}),w=I.push,E=I.replace;I.push=(e,t)=>w.apply(I,[b(I,k(e)),t]),I.replace=(e,t)=>{E.apply(I,[b(I,k(e)),t])};const x=I,C=JSON.parse('{"UU":"cloud-frontend","rE":"6.138.3"}'),T=(0,r.A)((()=>Promise.all([a.e(4631),a.e(7840)]).then(a.bind(a,17840))),"Redirects"),N=(0,r.A)((()=>Promise.all([a.e(4631),a.e(7208),a.e(5596)]).then(a.bind(a,75596))),"SignIn"),R=(0,r.A)((()=>a.e(3350).then(a.bind(a,73350))),"Trust"),M=(0,r.A)((()=>a.e(9818).then(a.bind(a,89818))),"MobileApp"),D=(0,r.A)((()=>a.e(7529).then(a.bind(a,67529))),"AuthTokenCallback"),_=(0,r.A)((()=>Promise.all([a.e(4631),a.e(7208),a.e(1418)]).then(a.bind(a,11418))),"MagicLinkSent"),U=(0,r.A)((()=>a.e(1876).then(a.bind(a,41876))),"SignUpVerification"),P=(0,r.A)((()=>a.e(3621).then(a.bind(a,63621))),"CheckAuth"),L=(0,r.A)((()=>Promise.all([a.e(1396),a.e(7471)]).then(a.bind(a,86147))),"App"),K=(0,r.A)((()=>a.e(3736).then(a.bind(a,63736))),"GlobalStyles"),B=(0,r.A)((()=>a.e(683).then(a.bind(a,683))),"Notifications"),z=(0,r.A)((()=>Promise.all([a.e(7519),a.e(5794),a.e(8784)]).then(a.bind(a,38784)).then((e=>({default:e.AssistantAlerts})))),"AssistantAlerts"),F=(0,r.A)((()=>Promise.all([a.e(7519),a.e(5794),a.e(8784)]).then(a.bind(a,38784)).then((e=>({default:e.AssistantChat})))),"AssistantChat"),O=(0,r.A)((()=>a.e(7146).then(a.bind(a,57146))),"Webviews");window.envSettings.tracking?setTimeout((()=>{var e;const t=[!(null===(e=window)||void 0===e||null===(e=e.posthog)||void 0===e||!e.SentryIntegration)&&new window.posthog.SentryIntegration(window.posthog,"netdata-inc",5210883)].filter(Boolean);l.T({dsn:"https://43a4669badb04eee941ec8b242991df7@o382276.ingest.sentry.io/5210883",maxBreadcrumbs:100,release:"".concat(C.UU,"@").concat(C.rE),debug:!1,integrations:t,tracesSampleRate:.2,environment:window.envSettings.isAgent?"agent":window.envSettings.nodeEnv||"agent",denyUrls:[],autoSessionTracking:!1,beforeSend(e,t){var a;let{originalException:n={}}=t;if(!/netdata\.cloud/.test(null===e||void 0===e||null===(a=e.request)||void 0===a||null===(a=a.headers)||void 0===a?void 0:a.Referer))return null;if(!n)return e;const{response:r={},isCancel:o=!1}=n;if(o)return null;const{status:i}=r;return 400===i||422===i?e:i>=400&&i<=599?null:e},ignoreErrors:["Non-Error exception captured","Non-Error promise rejection captured","Request aborted",/ResizeObserver/,"timeout exceeded","this.get_config is not a function",/IndexSizeError/,"Invalid time value","not_found",/A mutation operation was attempted on a database that did not allow mutations/,/No data for this period/,/Network Error/]}),window.addEventListener("beforeunload",(function(){try{d.BF().getClient().getOptions().enabled=!1}catch(e){}}))}),500):console.log("Running in development mode version:",C.UU,C.rE);const j=(0,m.g)(g.H4,"light"),G=()=>null,W=()=>{const e=(0,p.J)();return n.createElement(i.bi,null,n.createElement(n.Suspense,{fallback:""},n.createElement(m.A,null,n.createElement(u.A,null,n.createElement(g.Ay,{fallback:j},n.createElement(n.Suspense,{fallback:""},n.createElement(K,{isScreenSmall:e}),n.createElement(B,null),n.createElement(z,null),n.createElement(F,null),n.createElement(s.rI,{className:"router",history:x,basename:v.Ay?v.sA:"/"},n.createElement(c.BV,null,n.createElement(c.qh,{path:"/webviews/*",element:n.createElement(G,null)}),n.createElement(c.qh,{path:"*",element:n.createElement(P,null)})),n.createElement(n.Suspense,{fallback:n.createElement(h.A,null)},n.createElement(c.BV,null,!v.Ay&&n.createElement(n.Fragment,null,n.createElement(c.qh,{path:"/sign-in",element:n.createElement(N,null)}),n.createElement(c.qh,{path:"/trust",element:n.createElement(R,null)}),n.createElement(c.qh,{path:"/sign-up",element:n.createElement(c.C5,{to:"/sign-in",replace:!0})}),n.createElement(c.qh,{path:"/sign-up/verify",element:n.createElement(U,null)}),n.createElement(c.qh,{path:"/sign-in/magic-link-sent",element:n.createElement(_,null)}),n.createElement(c.qh,{path:"/sign-in/mobile-app",element:n.createElement(M,null)}),n.createElement(c.qh,{path:"/webviews/*",element:n.createElement(O,null)})),n.createElement(c.qh,{path:"/cloud/origin/callback",element:n.createElement(D,null)}),n.createElement(c.qh,{path:"/redirects",element:n.createElement(T,null)}),n.createElement(c.qh,{path:"/spaces/test-form",element:n.createElement(U,null)}),n.createElement(c.qh,{path:"*",element:n.createElement(L,null)}))))))))))};var V;(0,o.H)(document.getElementById("app")).render(n.createElement(W,null)),"serviceWorker"in navigator&&window.addEventListener("load",(()=>{const e="".concat(window.envSettings.webpackPublicPath,"/sw.js");f?(function(e,t){fetch(e).then((a=>{const n=a.headers.get("content-type");404===a.status||null!=n&&-1===n.indexOf("javascript")?navigator.serviceWorker.ready.then((e=>{e.unregister().then((()=>{window.location.reload()}))})):A(e,t)})).catch((()=>{console.log("No internet connection found. App is running in offline mode.")}))}(e,V),navigator.serviceWorker.ready.then((()=>{console.log("This web app is being served cache-first by a service worker. To learn more, visit http://bit.ly/CRA-PWA")}))):A(e,V)}))},27752:(e,t,a)=>{"use strict";a(25440);a.p=window.envSettings.isAgent&&!window.envSettings.webpackPublicPath?window.envSettings.agentApiUrl+"/v2/":"".concat(window.envSettings.webpackPublicPath||"","/").replace(/([^:]\/)\/+/g,"$1")},98439:(e,t,a)=>{"use strict";a(62953);window.envSettings.tracking&&(window.posthog={},a.e(3624).then(a.bind(a,83624)).then((e=>{window.posthog=e.default,window.posthog.init(window.envSettings.posthogToken,{api_host:"https://app.posthog.com"})})),function(e,t,a,n,r){e[n]=e[n]||[],e[n].push({"gtm.start":(new Date).getTime(),event:"gtm.js"});var o=t.getElementsByTagName(a)[0],i=t.createElement(a);i.async=!0,i.src="https://www.googletagmanager.com/gtm.js?id=GTM-N6CBMJD",o.parentNode.insertBefore(i,o)}(window,document,"script","dataLayer"))},91069:(e,t,a)=>{"use strict";a.d(t,{t:()=>r});var n=a(78969);const r=e=>{let{spaceId:t}=e;return"".concat(n.P8,"/spaces/").concat(t,"/rooms?show_all=true")}},42634:()=>{}},e=>{e.O(0,[3524],(()=>{[3621,1396,7471].map(e.E)}),5);var t=t=>e(e.s=t);e.O(0,[3975,4292,2981,8637],(()=>(t(27752),t(98439),t(85919))));e.O()}]); \ No newline at end of file diff --git a/src/web/gui/v2/app.cb2e9f9a81cf9533384e.css b/src/web/gui/v2/app.cb2e9f9a81cf9533384e.css deleted file mode 100644 index 1dd170178..000000000 --- a/src/web/gui/v2/app.cb2e9f9a81cf9533384e.css +++ /dev/null @@ -1,2 +0,0 @@ -html{font-size:16px !important}body{margin:0}.react-grid-item{display:block;overflow-y:hidden;text-overflow:ellipsis;width:fit-content}.react-grid-item:hover{overflow-y:visible}.react-grid-item .netdata-container-with-legend{overflow:visible}.react-grid-item:hover{z-index:10}.react-grid-item>.react-resizable-handle::after{display:table-row;content:"";position:absolute;right:3px;bottom:3px;width:5px;height:5px;cursor:se-resize;border-right:1px solid rgba(0,0,0,0.4);border-bottom:1px solid rgba(0,0,0,0.4)}#hs-eu-cookie-confirmation{display:none} - diff --git a/src/web/gui/v2/apple-app-site-association b/src/web/gui/v2/apple-app-site-association deleted file mode 100644 index c4593e0c7..000000000 --- a/src/web/gui/v2/apple-app-site-association +++ /dev/null @@ -1,11 +0,0 @@ -{ - "applinks": { - "apps": [], - "details": [ - { - "appID": "2T8GZ986CU.cloud.netdata.ios", - "paths": [ "*" ] - } - ] - } -} diff --git a/src/web/gui/v2/bundlesManifest.6.json b/src/web/gui/v2/bundlesManifest.6.json deleted file mode 100644 index 96fc3d01b..000000000 --- a/src/web/gui/v2/bundlesManifest.6.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "app.css": "/app.cb2e9f9a81cf9533384e.css", - "app.js": "/app.08c9fe3ead1d43ff769b.js", - "runtime.js": "/runtime.ceccffb089cc539b1c1f.js", - "npm.react.dom.js": "/npm.react.dom.2994f1b4604bd8ce80f6.js", - "netdata.ui.js": "/netdata.ui.647a4c3303ee8ec0da64.js", - "netdata.charts.js": "/netdata.charts.fdfd27674ac5533bbcc2.js", - "8637.js": "/8637.0958494526e838a60d2b.js" -} \ No newline at end of file diff --git a/src/web/gui/v2/dashboard_v2.cmake b/src/web/gui/v2/dashboard_v2.cmake deleted file mode 100644 index 86e6e4c5d..000000000 --- a/src/web/gui/v2/dashboard_v2.cmake +++ /dev/null @@ -1,303 +0,0 @@ - - install(FILES src/web/gui/v2/1220.01d6bbaab869c74f4437.chunk.js -src/web/gui/v2/1396.56f70d7c659ac0b694cd.chunk.js -src/web/gui/v2/1396.56f70d7c659ac0b694cd.chunk.js.LICENSE.txt -src/web/gui/v2/1418.16d53ba5cce2c6a8143a.chunk.js -src/web/gui/v2/1782.d82eb301aa81b380dd0c.chunk.js -src/web/gui/v2/1839.a4196d2a87ac0fdd9f34.chunk.js -src/web/gui/v2/185.42bab351ba68de7ca4aa.chunk.js -src/web/gui/v2/1876.e610906417b961290730.chunk.js -src/web/gui/v2/195.4cdbea6af54d14a95949.chunk.js -src/web/gui/v2/2007.b33ce2b4b736228fd681.chunk.js -src/web/gui/v2/252.40edc9b0f6da1422f40b.chunk.js -src/web/gui/v2/3104.3b70865e21a81a616af3.chunk.js -src/web/gui/v2/3350.ae7151980981854dc3d1.chunk.js -src/web/gui/v2/3455.f9ca876de57244386773.chunk.js -src/web/gui/v2/3455.f9ca876de57244386773.chunk.js.LICENSE.txt -src/web/gui/v2/3621.01ee70ee9c311ac163d9.chunk.js -src/web/gui/v2/3624.bfeb1fdc3057ba82ddac.chunk.js -src/web/gui/v2/3736.e572adfdf7951f74a741.chunk.js -src/web/gui/v2/3750.4ad02f036f2a7c520b1c.chunk.js -src/web/gui/v2/3843.89070793921be1288bb5.css -src/web/gui/v2/3843.ffbb6f614ba4f7b77570.chunk.js -src/web/gui/v2/3968.483ca2ad3b300293e655.chunk.js -src/web/gui/v2/3D_PARTY_LICENSES.txt -src/web/gui/v2/4034.35199d2809d318eed690.chunk.js -src/web/gui/v2/4140.46221d08bcda08826c78.chunk.js -src/web/gui/v2/4140.89070793921be1288bb5.css -src/web/gui/v2/4414.590ba07d470ba2ce7dd0.chunk.js -src/web/gui/v2/4631.158982e127e11bdc6a45.chunk.js -src/web/gui/v2/4680.7d8122d91e9d4582836a.chunk.js -src/web/gui/v2/4958.5969fedc1ff7dc82775e.chunk.js -src/web/gui/v2/5246.07c5a1649f0805c140fe.chunk.js -src/web/gui/v2/5304.cc797fdd343c7e873b2f.chunk.js -src/web/gui/v2/5426.254557ad3e1f2d14ad29.chunk.js -src/web/gui/v2/5596.2036706750ff4028cff2.chunk.js -src/web/gui/v2/5598.07ff43a6b96bd41e8637.chunk.js -src/web/gui/v2/5700.b7c9908dc7f30a5a57e7.chunk.js -src/web/gui/v2/5709.c494eb62187917e2f2f6.chunk.js -src/web/gui/v2/5709.c494eb62187917e2f2f6.chunk.js.LICENSE.txt -src/web/gui/v2/5794.252ff787d58d64eb4988.chunk.js -src/web/gui/v2/6008.3d0636fe17f4f6274485.chunk.js -src/web/gui/v2/6121.f7286809e53e1c6d655a.chunk.js -src/web/gui/v2/6121.f7286809e53e1c6d655a.chunk.js.LICENSE.txt -src/web/gui/v2/6323.26d4d949c9b6f8674c2e.chunk.js -src/web/gui/v2/6331.89070793921be1288bb5.css -src/web/gui/v2/6331.c91b5d104cdff1be3b80.chunk.js -src/web/gui/v2/6384.0fad56b0bc902f186c98.chunk.js -src/web/gui/v2/6469.47926fa38028dc7d0d41.chunk.js -src/web/gui/v2/6469.89070793921be1288bb5.css -src/web/gui/v2/6661.72f782bd78fea8c2d836.chunk.js -src/web/gui/v2/6760.370b9780120c145da28f.chunk.js -src/web/gui/v2/683.02c173493ef257c210fa.chunk.js -src/web/gui/v2/683.cc9fa5f3bdc0bf3ab2fc.css -src/web/gui/v2/6944.ab3e70c9ac0f05013b5f.chunk.js -src/web/gui/v2/7144.382c341e09540fdebaa6.chunk.js -src/web/gui/v2/7144.382c341e09540fdebaa6.chunk.js.LICENSE.txt -src/web/gui/v2/7146.79304e386ac9238b7cf1.chunk.js -src/web/gui/v2/7170.5d6047bb6ce9d77d53db.chunk.js -src/web/gui/v2/7208.1d75cf5d007de32e403b.chunk.js -src/web/gui/v2/7304.ed4690ec296b59fbe7fd.chunk.js -src/web/gui/v2/7332.3acf93dcfa52c7f1bc18.chunk.js -src/web/gui/v2/7340.25dce1c5cc66b613700f.chunk.js -src/web/gui/v2/7436.1ebd371d70e6a87c5499.chunk.js -src/web/gui/v2/7471.f96c4d04a73fb7551c03.chunk.js -src/web/gui/v2/7487.89070793921be1288bb5.css -src/web/gui/v2/7487.db63c95c27d973a07d9b.chunk.js -src/web/gui/v2/749.e44087ac3a2e3a994318.chunk.js -src/web/gui/v2/749.e44087ac3a2e3a994318.chunk.js.LICENSE.txt -src/web/gui/v2/7519.7982a2e0fcdf82ba78dd.chunk.js -src/web/gui/v2/7529.658d363e12e73df83b60.chunk.js -src/web/gui/v2/7840.2f2023f2eb1dcc943d94.chunk.js -src/web/gui/v2/785.d016913841bcc0209d5b.chunk.js -src/web/gui/v2/7857.813ae058cca579e05462.chunk.js -src/web/gui/v2/7959.4f20f4b203e2bad8af39.chunk.js -src/web/gui/v2/8059.4fdc76bb2cac1f74b41b.chunk.js -src/web/gui/v2/8239.c85fc9f3599f198a9efb.chunk.js -src/web/gui/v2/8323.437406936b642e8f6cb3.chunk.js -src/web/gui/v2/8323.437406936b642e8f6cb3.chunk.js.LICENSE.txt -src/web/gui/v2/8323.e22de33686bb2f34063c.css -src/web/gui/v2/8505.c330f2104fefd71717da.chunk.js -src/web/gui/v2/86.2c88d4d37b88e2620051.chunk.js -src/web/gui/v2/8637.0958494526e838a60d2b.js -src/web/gui/v2/8637.0958494526e838a60d2b.js.LICENSE.txt -src/web/gui/v2/8784.a04e9c07186e1f057f56.chunk.js -src/web/gui/v2/8842.406028f523a00acb97bd.chunk.js -src/web/gui/v2/8910.019974f8675d8834dd07.chunk.js -src/web/gui/v2/8938.5116982f737a2ef85330.chunk.js -src/web/gui/v2/9292.cc5055091db9a0826933.chunk.js -src/web/gui/v2/934.24d6fdc5f60aa6493962.chunk.js -src/web/gui/v2/9400.6250bbf86c4fd3173de2.chunk.js -src/web/gui/v2/9473.4fd4742ffb6b5348bea8.chunk.js -src/web/gui/v2/963.35da4a3c4e49aac29dae.chunk.js -src/web/gui/v2/979.3e5fddf93c977e6c71c3.chunk.js -src/web/gui/v2/9818.3ce64e0b472412bfbc97.chunk.js -src/web/gui/v2/9843.93f8c71c64ef97b9905e.chunk.js -src/web/gui/v2/9912.702300c2dd9616289606.chunk.js -src/web/gui/v2/LICENSE.md -src/web/gui/v2/agent.html -src/web/gui/v2/allFiles.6.138.3.json -src/web/gui/v2/allFiles.6.json -src/web/gui/v2/app.08c9fe3ead1d43ff769b.js -src/web/gui/v2/app.cb2e9f9a81cf9533384e.css -src/web/gui/v2/apple-app-site-association -src/web/gui/v2/bundlesManifest.6.json -src/web/gui/v2/favicon.ico -src/web/gui/v2/index.html -src/web/gui/v2/local-agent.html -src/web/gui/v2/netdata.charts.fdfd27674ac5533bbcc2.js -src/web/gui/v2/netdata.ui.647a4c3303ee8ec0da64.js -src/web/gui/v2/netdata.ui.647a4c3303ee8ec0da64.js.LICENSE.txt -src/web/gui/v2/npm.react.dom.2994f1b4604bd8ce80f6.js -src/web/gui/v2/npm.react.dom.2994f1b4604bd8ce80f6.js.LICENSE.txt -src/web/gui/v2/registry-access.html -src/web/gui/v2/registry-alert-redirect.html -src/web/gui/v2/registry-hello.html -src/web/gui/v2/runtime.ceccffb089cc539b1c1f.js -src/web/gui/v2/sw.js COMPONENT netdata DESTINATION ${WEB_DEST}/v2) - install(FILES src/web/gui/v2/static/apple-app-site-association -src/web/gui/v2/static/splash.css COMPONENT netdata DESTINATION ${WEB_DEST}/v2/static) - install(FILES src/web/gui/v2/static/email/img/clea_badge.png -src/web/gui/v2/static/email/img/clea_siren.png -src/web/gui/v2/static/email/img/community_icon.png -src/web/gui/v2/static/email/img/configure_icon.png -src/web/gui/v2/static/email/img/crit_badge.png -src/web/gui/v2/static/email/img/crit_siren.png -src/web/gui/v2/static/email/img/flood_siren.png -src/web/gui/v2/static/email/img/full_logo.png -src/web/gui/v2/static/email/img/header.png -src/web/gui/v2/static/email/img/isotype_600.png -src/web/gui/v2/static/email/img/label_critical.png -src/web/gui/v2/static/email/img/label_recovered.png -src/web/gui/v2/static/email/img/label_warning.png -src/web/gui/v2/static/email/img/reachability_siren.png -src/web/gui/v2/static/email/img/warn_badge.png -src/web/gui/v2/static/email/img/warn_siren.png COMPONENT netdata DESTINATION ${WEB_DEST}/v2/static/email/img) - install(FILES src/web/gui/v2/static/img/list-style-image.svg -src/web/gui/v2/static/img/new-dashboard.svg -src/web/gui/v2/static/img/no-filter-results.png -src/web/gui/v2/static/img/no-nodes-room.svg -src/web/gui/v2/static/img/rack.png COMPONENT netdata DESTINATION ${WEB_DEST}/v2/static/img) - install(FILES src/web/gui/v2/static/img/logos/os/alpine.svg -src/web/gui/v2/static/img/logos/os/arch.svg -src/web/gui/v2/static/img/logos/os/centos.svg -src/web/gui/v2/static/img/logos/os/coreos.svg -src/web/gui/v2/static/img/logos/os/debian.svg -src/web/gui/v2/static/img/logos/os/docker.svg -src/web/gui/v2/static/img/logos/os/fedora.svg -src/web/gui/v2/static/img/logos/os/freebsd.svg -src/web/gui/v2/static/img/logos/os/freenas.svg -src/web/gui/v2/static/img/logos/os/gentoo.svg -src/web/gui/v2/static/img/logos/os/kubernetes.svg -src/web/gui/v2/static/img/logos/os/linux-small.svg -src/web/gui/v2/static/img/logos/os/linux.svg -src/web/gui/v2/static/img/logos/os/macos.svg -src/web/gui/v2/static/img/logos/os/manjaro.svg -src/web/gui/v2/static/img/logos/os/openstack.svg -src/web/gui/v2/static/img/logos/os/opensuse.svg -src/web/gui/v2/static/img/logos/os/openwrt.svg -src/web/gui/v2/static/img/logos/os/oracle.svg -src/web/gui/v2/static/img/logos/os/pfsense.svg -src/web/gui/v2/static/img/logos/os/placeholder.svg -src/web/gui/v2/static/img/logos/os/raspberry-pi.svg -src/web/gui/v2/static/img/logos/os/redhat.svg -src/web/gui/v2/static/img/logos/os/rocky.svg -src/web/gui/v2/static/img/logos/os/suse.svg -src/web/gui/v2/static/img/logos/os/ubuntu.svg COMPONENT netdata DESTINATION ${WEB_DEST}/v2/static/img/logos/os) - install(FILES src/web/gui/v2/static/img/logos/services/access-point.svg -src/web/gui/v2/static/img/logos/services/activemq.svg -src/web/gui/v2/static/img/logos/services/adaptec.svg -src/web/gui/v2/static/img/logos/services/alerta.svg -src/web/gui/v2/static/img/logos/services/apache.svg -src/web/gui/v2/static/img/logos/services/apc.svg -src/web/gui/v2/static/img/logos/services/aws-sns.svg -src/web/gui/v2/static/img/logos/services/aws.svg -src/web/gui/v2/static/img/logos/services/beanstalkd.svg -src/web/gui/v2/static/img/logos/services/boinc.svg -src/web/gui/v2/static/img/logos/services/btrfs.svg -src/web/gui/v2/static/img/logos/services/ceph.svg -src/web/gui/v2/static/img/logos/services/chrony.svg -src/web/gui/v2/static/img/logos/services/cloud.svg -src/web/gui/v2/static/img/logos/services/concul.svg -src/web/gui/v2/static/img/logos/services/consul.svg -src/web/gui/v2/static/img/logos/services/container.svg -src/web/gui/v2/static/img/logos/services/couchdb.svg -src/web/gui/v2/static/img/logos/services/cups.svg -src/web/gui/v2/static/img/logos/services/data-encryption.svg -src/web/gui/v2/static/img/logos/services/ddos.svg -src/web/gui/v2/static/img/logos/services/discord.svg -src/web/gui/v2/static/img/logos/services/dns.svg -src/web/gui/v2/static/img/logos/services/docker.svg -src/web/gui/v2/static/img/logos/services/dovecot.svg -src/web/gui/v2/static/img/logos/services/elasticsearch.svg -src/web/gui/v2/static/img/logos/services/email.svg -src/web/gui/v2/static/img/logos/services/exim.svg -src/web/gui/v2/static/img/logos/services/fail2ban.svg -src/web/gui/v2/static/img/logos/services/flock.svg -src/web/gui/v2/static/img/logos/services/fluentd.svg -src/web/gui/v2/static/img/logos/services/fping.svg -src/web/gui/v2/static/img/logos/services/freeradius.svg -src/web/gui/v2/static/img/logos/services/fronius.svg -src/web/gui/v2/static/img/logos/services/gnu-freeipmi.svg -src/web/gui/v2/static/img/logos/services/golang.svg -src/web/gui/v2/static/img/logos/services/grafana.svg -src/web/gui/v2/static/img/logos/services/graphite.svg -src/web/gui/v2/static/img/logos/services/haproxy.svg -src/web/gui/v2/static/img/logos/services/hub.svg -src/web/gui/v2/static/img/logos/services/icecast.svg -src/web/gui/v2/static/img/logos/services/influxdb.svg -src/web/gui/v2/static/img/logos/services/ipfs.svg -src/web/gui/v2/static/img/logos/services/irc.svg -src/web/gui/v2/static/img/logos/services/isc.svg -src/web/gui/v2/static/img/logos/services/kafka.svg -src/web/gui/v2/static/img/logos/services/kairosdb.svg -src/web/gui/v2/static/img/logos/services/kavenegar.svg -src/web/gui/v2/static/img/logos/services/key-file.svg -src/web/gui/v2/static/img/logos/services/kubernetes.svg -src/web/gui/v2/static/img/logos/services/libreswan.svg -src/web/gui/v2/static/img/logos/services/libvirt.svg -src/web/gui/v2/static/img/logos/services/lighthttpd.svg -src/web/gui/v2/static/img/logos/services/linux.svg -src/web/gui/v2/static/img/logos/services/litespeed.svg -src/web/gui/v2/static/img/logos/services/lm-sensors.svg -src/web/gui/v2/static/img/logos/services/load-balancer.svg -src/web/gui/v2/static/img/logos/services/log-file.svg -src/web/gui/v2/static/img/logos/services/logstash.svg -src/web/gui/v2/static/img/logos/services/lxd.svg -src/web/gui/v2/static/img/logos/services/mariadb.svg -src/web/gui/v2/static/img/logos/services/memcached.svg -src/web/gui/v2/static/img/logos/services/messagebird.svg -src/web/gui/v2/static/img/logos/services/mongodb.svg -src/web/gui/v2/static/img/logos/services/monit.svg -src/web/gui/v2/static/img/logos/services/monitoring.svg -src/web/gui/v2/static/img/logos/services/mysql.svg -src/web/gui/v2/static/img/logos/services/netfilter.svg -src/web/gui/v2/static/img/logos/services/network-protocol.svg -src/web/gui/v2/static/img/logos/services/network.svg -src/web/gui/v2/static/img/logos/services/nfs.svg -src/web/gui/v2/static/img/logos/services/nginx-plus.svg -src/web/gui/v2/static/img/logos/services/nginx.svg -src/web/gui/v2/static/img/logos/services/notification-bell.svg -src/web/gui/v2/static/img/logos/services/nsd.svg -src/web/gui/v2/static/img/logos/services/ntpd.svg -src/web/gui/v2/static/img/logos/services/nut.svg -src/web/gui/v2/static/img/logos/services/nvidia.svg -src/web/gui/v2/static/img/logos/services/openldap.svg -src/web/gui/v2/static/img/logos/services/opensips.svg -src/web/gui/v2/static/img/logos/services/opentsdb.svg -src/web/gui/v2/static/img/logos/services/openvpn.svg -src/web/gui/v2/static/img/logos/services/openzfs.svg -src/web/gui/v2/static/img/logos/services/oracle.svg -src/web/gui/v2/static/img/logos/services/pagerduty.svg -src/web/gui/v2/static/img/logos/services/php-fpm.svg -src/web/gui/v2/static/img/logos/services/placeholder.svg -src/web/gui/v2/static/img/logos/services/postfix.svg -src/web/gui/v2/static/img/logos/services/postgresql.svg -src/web/gui/v2/static/img/logos/services/powerdns.svg -src/web/gui/v2/static/img/logos/services/processor.svg -src/web/gui/v2/static/img/logos/services/prometheus.svg -src/web/gui/v2/static/img/logos/services/prowl.svg -src/web/gui/v2/static/img/logos/services/proxysql.svg -src/web/gui/v2/static/img/logos/services/puppet.svg -src/web/gui/v2/static/img/logos/services/pushbullet.svg -src/web/gui/v2/static/img/logos/services/pushover.svg -src/web/gui/v2/static/img/logos/services/qos.svg -src/web/gui/v2/static/img/logos/services/rabbitmq.svg -src/web/gui/v2/static/img/logos/services/raspberry-pi.svg -src/web/gui/v2/static/img/logos/services/redis.svg -src/web/gui/v2/static/img/logos/services/rethinkdb.svg -src/web/gui/v2/static/img/logos/services/retroshare.svg -src/web/gui/v2/static/img/logos/services/rocketchat.svg -src/web/gui/v2/static/img/logos/services/samba.svg -src/web/gui/v2/static/img/logos/services/server-connection.svg -src/web/gui/v2/static/img/logos/services/slack.svg -src/web/gui/v2/static/img/logos/services/sma.svg -src/web/gui/v2/static/img/logos/services/smstools3.svg -src/web/gui/v2/static/img/logos/services/solr.svg -src/web/gui/v2/static/img/logos/services/spigot.svg -src/web/gui/v2/static/img/logos/services/springboot.svg -src/web/gui/v2/static/img/logos/services/squid.svg -src/web/gui/v2/static/img/logos/services/statsd.svg -src/web/gui/v2/static/img/logos/services/stiebel.svg -src/web/gui/v2/static/img/logos/services/systemd.svg -src/web/gui/v2/static/img/logos/services/telegram.svg -src/web/gui/v2/static/img/logos/services/temperature.svg -src/web/gui/v2/static/img/logos/services/tomcat.svg -src/web/gui/v2/static/img/logos/services/tor.svg -src/web/gui/v2/static/img/logos/services/traefik.svg -src/web/gui/v2/static/img/logos/services/twilio.svg -src/web/gui/v2/static/img/logos/services/unbound.svg -src/web/gui/v2/static/img/logos/services/uwsgi.svg -src/web/gui/v2/static/img/logos/services/varnish.svg -src/web/gui/v2/static/img/logos/services/veritas.svg -src/web/gui/v2/static/img/logos/services/xen.svg COMPONENT netdata DESTINATION ${WEB_DEST}/v2/static/img/logos/services) - install(FILES src/web/gui/v2/static/img/mail/isotype.png -src/web/gui/v2/static/img/mail/isotype.svg -src/web/gui/v2/static/img/mail/logotype.png -src/web/gui/v2/static/img/mail/logotype.svg COMPONENT netdata DESTINATION ${WEB_DEST}/v2/static/img/mail) - install(FILES src/web/gui/v2/static/site/pages/holding-page-503/holding-page-503.css -src/web/gui/v2/static/site/pages/holding-page-503/holding-page-503.svg -src/web/gui/v2/static/site/pages/holding-page-503/index.html -src/web/gui/v2/static/site/pages/holding-page-503/multiple-logos-group.svg -src/web/gui/v2/static/site/pages/holding-page-503/netdata-logo-white.svg -src/web/gui/v2/static/site/pages/holding-page-503/reset.svg COMPONENT netdata DESTINATION ${WEB_DEST}/v2/static/site/pages/holding-page-503) diff --git a/src/web/gui/v2/favicon.ico b/src/web/gui/v2/favicon.ico deleted file mode 100644 index 59db5011b..000000000 Binary files a/src/web/gui/v2/favicon.ico and /dev/null differ diff --git a/src/web/gui/v2/index.html b/src/web/gui/v2/index.html deleted file mode 100644 index d3516de55..000000000 --- a/src/web/gui/v2/index.html +++ /dev/null @@ -1,245 +0,0 @@ -Netdata Agent Console

    Welcome to Netdata

    Loading latest Netdata UI...

    We couldn't load the latest Netdata UI. You can try again
    Or you can load the old single node dashboard or a local copy of Netdata UI
    \ No newline at end of file diff --git a/src/web/gui/v2/local-agent.html b/src/web/gui/v2/local-agent.html deleted file mode 100644 index d3516de55..000000000 --- a/src/web/gui/v2/local-agent.html +++ /dev/null @@ -1,245 +0,0 @@ -Netdata Agent Console

    Welcome to Netdata

    Loading latest Netdata UI...

    We couldn't load the latest Netdata UI. You can try again
    Or you can load the old single node dashboard or a local copy of Netdata UI
    \ No newline at end of file diff --git a/src/web/gui/v2/netdata.charts.fdfd27674ac5533bbcc2.js b/src/web/gui/v2/netdata.charts.fdfd27674ac5533bbcc2.js deleted file mode 100644 index 44ca83262..000000000 --- a/src/web/gui/v2/netdata.charts.fdfd27674ac5533bbcc2.js +++ /dev/null @@ -1 +0,0 @@ -!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=(new Error).stack;t&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[t]="a63eb6fa-d4c6-497f-9719-f27eca81b0d5",e._sentryDebugIdIdentifier="sentry-dbid-a63eb6fa-d4c6-497f-9719-f27eca81b0d5")}catch(e){}}();var _global="undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:{};_global.SENTRY_RELEASE={id:"30b1ba65cc8722be7e184f4a401fb43e6b21634d"},(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[2981],{19318:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=o(r(68652)),n=r(28973),s=o(r(58370));function o(e){return e&&e.__esModule?e:{default:e}}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,i)}return r}function l(e){for(var t=1;t{function r(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,i)}return r}function i(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var i=u(r(68652)),n=r(28973),s=u(r(58370)),o=u(r(85872)),a=r(86263),l=u(r(99906)),c=u(r(19543));function u(e){return e&&e.__esModule?e:{default:e}}function f(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,i)}return r}function d(e){for(var t=1;t{var i,n,s,o=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=a(t);if(r&&r.has(e))return r.get(e);var i={__proto__:null},n=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var s in e)if("default"!==s&&{}.hasOwnProperty.call(e,s)){var o=n?Object.getOwnPropertyDescriptor(e,s):null;o&&(o.get||o.set)?Object.defineProperty(i,s,o):i[s]=e[s]}return i.default=e,r&&r.set(e,i),i}(r(19018));function a(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(a=function(e){return e?r:t})(e)}function l(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,i)}return r}function c(e){for(var t=1;te.x+e.w||t.x+t.we.y+e.h)},getColorShade:function(e,t){(e=String(e).replace(/[^0-9a-f]/gi,"")).length<6&&(e=e[0]+e[0]+e[1]+e[1]+e[2]+e[2]),t=t||0;for(var r="#",i=0;i<3;i++){var n=parseInt(e.substr(2*i,2),16);r+=("00"+(n=Math.round(Math.min(Math.max(0,n+n*t),255)).toString(16))).substr(n.length)}return r},initSegmentColors:function(e){for(var t=e.options.data.content,r=e.options.misc.colors.segments,i=[],n=0;n=t.value?(i.push(e[u]),n+=e[u].value,s++):(e[u].isGrouped=!1,r.push(e[u]))}else if("percentage"===t.valueType){var f=l.getTotalPieSize(e);for(u=0;u99?99:o)<0?0:o;var l=i0&&(l-=c)}a=Math.floor(l/100*o)/2}else a=parseInt(t.pieOuterRadius,10);/%/.test(t.pieInnerRadius)?(o=(o=(o=parseInt(t.pieInnerRadius.replace(/[\D]/,""),10))>99?99:o)<0?0:o,s=Math.floor(a/100*o)):s=parseInt(t.pieInnerRadius,10),e.innerRadius=s,e.outerRadius=a},getTotalPieSize:function(e){for(var t=0,r=0;rt.label.toLowerCase()?1:-1}));break;case"label-desc":t.sort((function(e,t){return e.label.toLowerCase()n.truncation.truncateLength&&(r=e.label.substring(0,n.truncation.truncateLength)+"..."),r})).style("font-size",n.mainLabel.fontSize).style("font-family",n.mainLabel.font).style("font-weight",n.mainLabel.fontWeight).style("fill",(function(t,r){return"segment"===n.mainLabel.color?e.options.colors[r]:n.mainLabel.color})),i.percentage&&o.append("text").attr("id",(function(r,i){return e.cssPrefix+"segmentPercentage"+i+"-"+t})).attr("class",e.cssPrefix+"segmentPercentage-"+t).text((function(e,t){var r=e.percentage;return n.formatter?(a.index=t,a.part="percentage",a.value=e.value,a.label=e.percentage,r=n.formatter(a)):r+="%",r})).style("font-size",n.percentage.fontSize).style("font-family",n.percentage.font).style("font-weight",n.percentage.fontWeight).style("fill",n.percentage.color),i.value&&o.append("text").attr("id",(function(r,i){return e.cssPrefix+"segmentValue"+i+"-"+t})).attr("class",e.cssPrefix+"segmentValue-"+t).text((function(e,t){return a.index=t,a.part="value",a.value=e.value,a.label=e.value,a.realLabel=e.label,n.formatter?n.formatter(a,e.value):e.value})).style("font-size",n.value.fontSize).style("font-family",n.value.font).style("font-weight",n.value.fontWeight).style("fill",n.value.color)},positionLabelElements:function(e,t,r){u["dimensions-"+t]=[],e.__labels[t].each((function(r,i){var n=o.select(this).selectAll("."+e.cssPrefix+"segmentMainLabel-"+t),s=o.select(this).selectAll("."+e.cssPrefix+"segmentPercentage-"+t),a=o.select(this).selectAll("."+e.cssPrefix+"segmentValue-"+t);u["dimensions-"+t].push({mainLabel:null!==n.node()?n.node().getBBox():null,percentage:null!==s.node()?s.node().getBBox():null,value:null!==a.node()?a.node().getBBox():null})}));var i=5,n=u["dimensions-"+t];switch(r){case"label-value1":e.svg.selectAll("."+e.cssPrefix+"segmentValue-"+t).attr("dx",(function(e,t){return n[t].mainLabel.width+i}));break;case"label-value2":e.svg.selectAll("."+e.cssPrefix+"segmentValue-"+t).attr("dy",(function(e,t){return n[t].mainLabel.height}));break;case"label-percentage1":e.svg.selectAll("."+e.cssPrefix+"segmentPercentage-"+t).attr("dx",(function(e,t){return n[t].mainLabel.width+i}));break;case"label-percentage2":e.svg.selectAll("."+e.cssPrefix+"segmentPercentage-"+t).attr("dx",(function(e,t){return n[t].mainLabel.width/2-n[t].percentage.width/2})).attr("dy",(function(e,t){return n[t].mainLabel.height}))}},computeLabelLinePositions:function(e){e.lineCoordGroups=[],e.__labels.outer.each((function(t,r){return u.computeLinePosition(e,r)}))},computeLinePosition:function(e,t){var r,i,n,s,o=f.getSegmentAngle(t,e.options.data.content,e.totalSize,{midpoint:!0}),a=l.rotate(e.pieCenter.x,e.pieCenter.y-e.outerRadius,e.pieCenter.x,e.pieCenter.y,o),c=e.outerLabelGroupData[t].h/5,u=6,d=Math.floor(o/90),p=4;switch(2===d&&180===o&&(d=1),d){case 0:r=e.outerLabelGroupData[t].x-u-(e.outerLabelGroupData[t].x-u-a.x)/2,i=e.outerLabelGroupData[t].y+(a.y-e.outerLabelGroupData[t].y)/p,n=e.outerLabelGroupData[t].x-u,s=e.outerLabelGroupData[t].y-c;break;case 1:r=a.x+(e.outerLabelGroupData[t].x-a.x)/p,i=a.y+(e.outerLabelGroupData[t].y-a.y)/p,n=e.outerLabelGroupData[t].x-u,s=e.outerLabelGroupData[t].y-c;break;case 2:var b=e.outerLabelGroupData[t].x+e.outerLabelGroupData[t].w+u;r=a.x-(a.x-b)/p,i=a.y+(e.outerLabelGroupData[t].y-a.y)/p,n=e.outerLabelGroupData[t].x+e.outerLabelGroupData[t].w+u,s=e.outerLabelGroupData[t].y-c;break;case 3:var m=e.outerLabelGroupData[t].x+e.outerLabelGroupData[t].w+u;r=m+(a.x-m)/p,i=e.outerLabelGroupData[t].y+(a.y-e.outerLabelGroupData[t].y)/p,n=e.outerLabelGroupData[t].x+e.outerLabelGroupData[t].w+u,s=e.outerLabelGroupData[t].y-c}"straight"===e.options.labels.lines.style?e.lineCoordGroups[t]=[{x:a.x,y:a.y},{x:n,y:s}]:e.lineCoordGroups[t]=[{x:a.x,y:a.y},{x:r,y:i},{x:n,y:s}]},addLabelLines:function(e){var t=e.svg.insert("g","."+e.cssPrefix+"pieChart").attr("class",e.cssPrefix+"lineGroups").style("opacity",1).selectAll("."+e.cssPrefix+"lineGroup").data(e.lineCoordGroups).enter().append("g").attr("class",e.cssPrefix+"lineGroup"),r=o.line().curve(o.curveBasis).x((function(e){return e.x})).y((function(e){return e.y}));t.append("path").attr("d",r).attr("stroke",(function(t,r){return"segment"===e.options.labels.lines.color?e.options.colors[r]:e.options.labels.lines.color})).attr("stroke-width",1).attr("fill","none").style("opacity",(function(t,r){var i=e.options.labels.outer.hideWhenLessThanPercentage;return null!==i&&t.percentage0){var u=f.getSegmentAngle(i,e.options.data.content,e.totalSize,{midpoint:!0}),d=l.translate(e.pieCenter.x,e.pieCenter.y,e.innerRadius,u);c.x=d.x,c.y=d.y}var p=s.getDimensions(e.cssPrefix+"labelGroup"+i+"-inner"),b=p.w/2,m=p.h/4;n=c.x+(e.lineCoordGroups[i][0].x-c.x)/1.8,o=c.y+(e.lineCoordGroups[i][0].y-c.y)/1.8,n-=b,o+=m}return"translate("+n+","+o+")"}))},getIncludes:function(e){var t=!1,r=!1,i=!1;switch(e){case"label":t=!0;break;case"value":r=!0;break;case"percentage":i=!0;break;case"label-value1":case"label-value2":t=!0,r=!0;break;case"label-percentage1":case"label-percentage2":t=!0,i=!0}return{mainLabel:t,value:r,percentage:i}},computeOuterLabelCoords:function(e){e.__labels.outer.each((function(t,r){return u.getIdealOuterLabelPositions(e,r)})),u.resolveOuterLabelCollisions(e)},resolveOuterLabelCollisions:function(e){if("none"!==e.options.labels.outer.format){var t=e.options.data.content.length;u.checkConflict(e,0,"clockwise",t),u.checkConflict(e,t-1,"anticlockwise",t)}},checkConflict:function(e,t,r,i){var n,o;if(!(i<=1)){var a=e.outerLabelGroupData[t].hs;if(("clockwise"!==r||"right"===a)&&("anticlockwise"!==r||"left"===a)){var l="clockwise"===r?t+1:t-1,c=e.outerLabelGroupData[t],f=e.outerLabelGroupData[l],d={labelHeights:e.outerLabelGroupData[0].h,center:e.pieCenter,lineLength:e.outerRadius+e.options.labels.outer.pieDistance,heightChange:e.outerLabelGroupData[0].h+1};if("clockwise"===r){for(n=0;n<=t;n++)if(o=e.outerLabelGroupData[n],!u.isLabelHidden(e,n)&&s.rectIntersect(o,f)){u.adjustLabelPos(e,l,c,d);break}}else for(n=i-1;n>=t;n--)if(o=e.outerLabelGroupData[n],!u.isLabelHidden(e,n)&&s.rectIntersect(o,f)){u.adjustLabelPos(e,l,c,d);break}u.checkConflict(e,l,r,i)}}},isLabelHidden:function(e,t){var r=e.options.labels.outer.hideWhenLessThanPercentage;return null!==r&&d.percentageMath.abs(s)?Math.sqrt(i.lineLength*i.lineLength-s*s):Math.sqrt(s*s-i.lineLength*i.lineLength),o="right"===r.hs?i.center.x+n:i.center.x-n-e.outerLabelGroupData[t].w,e.outerLabelGroupData[t].x=o,e.outerLabelGroupData[t].y=a},getIdealOuterLabelPositions:function(e,t){var r=e.svg.select("#"+e.cssPrefix+"labelGroup"+t+"-outer").node();if(r){var i=r.getBBox(),n=f.getSegmentAngle(t,e.options.data.content,e.totalSize,{midpoint:!0}),s=e.pieCenter.x,o=e.pieCenter.y-(e.outerRadius+e.options.labels.outer.pieDistance),a=l.rotate(s,o,e.pieCenter.x,e.pieCenter.y,n),c="right";n>180?(a.x-=i.width+8,c="left"):a.x+=8,e.outerLabelGroupData[t]={x:a.x,y:a.y,w:i.width,h:i.height,hs:c}}}},f={effectMap:{none:o.easeLinear,bounce:o.easeBounce,linear:o.easeLinear,sin:o.easeSin,elastic:o.easeElastic,back:o.easeBack,quad:o.easeQuad,circle:o.easeCircle,exp:o.easeExp},create:function(e){var t=e.pieCenter,r=e.options.colors,i=(e.options.effects.load,e.options.misc.colors.segmentStroke),n=e.svg.insert("g","#"+e.cssPrefix+"title").attr("transform",(function(){return l.getPieTranslateCenter(t)})).attr("class",e.cssPrefix+"pieChart"),s=o.arc().innerRadius(e.innerRadius).outerRadius(e.outerRadius).startAngle(0).endAngle((function(t){return t.value/e.totalSize*2*Math.PI}));n.selectAll("."+e.cssPrefix+"arc").data(e.options.data.content).enter().append("g").attr("class",e.cssPrefix+"arc").append("path").attr("id",(function(t,r){return e.cssPrefix+"segment"+r})).attr("fill",(function(t,i){var n=r[i];return e.options.misc.gradient.enabled&&(n="url(#"+e.cssPrefix+"grad"+i+")"),n})).style("stroke",i).style("stroke-width",1).attr("data-index",(function(e,t){return t})).attr("d",s),e.svg.selectAll("g."+e.cssPrefix+"arc").attr("transform",(function(t,r){var i=0;return r>0&&(i=f.getSegmentAngle(r-1,e.options.data.content,e.totalSize)),"rotate("+i+")"})),e.arc=s},addGradients:function(e){var t=e.svg.append("defs").selectAll("radialGradient").data(e.options.data.content).enter().append("radialGradient").attr("gradientUnits","userSpaceOnUse").attr("cx",0).attr("cy",0).attr("r","120%").attr("id",(function(t,r){return e.cssPrefix+"grad"+r}));t.append("stop").attr("offset","0%").style("stop-color",(function(t,r){return e.options.colors[r]})),t.append("stop").attr("offset",e.options.misc.gradient.percentage+"%").style("stop-color",e.options.misc.gradient.color)},addSegmentEventHandlers:function(e){var t=e.svg.selectAll("."+e.cssPrefix+"arc");(t=t.merge(e.__labels.inner.merge(e.__labels.outer))).on("click",(function(){var t,r=o.select(this);if(r.attr("class")===e.cssPrefix+"arc")t=r.select("path");else{var i=r.attr("data-index");t=o.select("#"+e.cssPrefix+"segment"+i)}var n=t.attr("class")===e.cssPrefix+"expanded";f.onSegmentEvent(e,e.options.callbacks.onClickSegment,t,n),"none"!==e.options.effects.pullOutSegmentOnClick.effect&&(n?f.closeSegment(e,t.node()):f.openSegment(e,t.node()))})),t.on("mouseover",(function(){var t,r,i=o.select(this);if(i.attr("class")===e.cssPrefix+"arc"?t=i.select("path"):(r=i.attr("data-index"),t=o.select("#"+e.cssPrefix+"segment"+r)),e.options.effects.highlightSegmentOnMouseover){r=t.attr("data-index");var n=e.options.colors[r];t.style("fill",s.getColorShade(n,e.options.effects.highlightLuminosity))}e.options.tooltips.enabled&&(r=t.attr("data-index"),b.showTooltip(e,r));var a=t.attr("class")===e.cssPrefix+"expanded";f.onSegmentEvent(e,e.options.callbacks.onMouseoverSegment,t,a)})),t.on("mousemove",(function(){b.moveTooltip(e)})),t.on("mouseout",(function(){var t,r,i=o.select(this);if(i.attr("class")===e.cssPrefix+"arc"?t=i.select("path"):(r=i.attr("data-index"),t=o.select("#"+e.cssPrefix+"segment"+r)),e.options.effects.highlightSegmentOnMouseover){r=t.attr("data-index");var n=e.options.colors[r];e.options.misc.gradient.enabled&&(n="url(#"+e.cssPrefix+"grad"+r+")"),t.style("fill",n)}e.options.tooltips.enabled&&(r=t.attr("data-index"),b.hideTooltip(e,r));var s=t.attr("class")===e.cssPrefix+"expanded";f.onSegmentEvent(e,e.options.callbacks.onMouseoutSegment,t,s)}))},onSegmentEvent:function(e,t,r,i){if(s.isFunction(t)){var n=parseInt(r.attr("data-index"),10);t({segment:r.node(),index:n,expanded:i,data:e.options.data.content[n]})}},openSegment:function(e,t){e.isOpeningSegment||(e.isOpeningSegment=!0,f.maybeCloseOpenSegment(e),o.select(t).transition().ease(f.effectMap[e.options.effects.pullOutSegmentOnClick.effect]).duration(e.options.effects.pullOutSegmentOnClick.speed).attr("transform",(function(t,r){var i=e.arc.centroid(t),n=i[0],s=i[1],o=Math.sqrt(n*n+s*s),a=parseInt(e.options.effects.pullOutSegmentOnClick.size,10);return"translate("+n/o*a+","+s/o*a+")"})).on("end",(function(r,i){e.currentlyOpenSegment=t,e.isOpeningSegment=!1,o.select(t).attr("class",e.cssPrefix+"expanded")})))},maybeCloseOpenSegment:function(e){"undefined"!==typeof e&&e.svg.selectAll("."+e.cssPrefix+"expanded").size()>0&&f.closeSegment(e,e.svg.select("."+e.cssPrefix+"expanded").node())},closeSegment:function(e,t){o.select(t).transition().duration(400).attr("transform","translate(0,0)").on("end",(function(r,i){o.select(t).attr("class",""),e.currentlyOpenSegment=null}))},getCentroid:function(e){var t=e.getBBox();return{x:t.x+t.width/2,y:t.y+t.height/2}},getSegmentAngle:function(e,t,r,i){var n,s=a({compounded:!0,midpoint:!1},i),o=t[e].value;if(s.compounded){n=0;for(var l=0;l<=e;l++)n+=t[l].value}"undefined"===typeof n&&(n=o);var c=n/r*360;return s.midpoint&&(c-=o/r*360/2),c}},p={offscreenCoord:-1e4,addTitle:function(e){e.__title=e.svg.selectAll("."+e.cssPrefix+"title").data([e.options.header.title]).enter().append("text").text((function(e){return e.text})).attr("id",e.cssPrefix+"title").attr("class",e.cssPrefix+"title").attr("x",p.offscreenCoord).attr("y",p.offscreenCoord).attr("text-anchor",(function(){return"top-center"===e.options.header.location||"pie-center"===e.options.header.location?"middle":"left"})).attr("fill",(function(e){return e.color})).style("font-size",(function(e){return e.fontSize})).style("font-weight",(function(e){return e.fontWeight})).style("font-family",(function(e){return e.font}))},positionTitle:function(e){var t,r=e.textComponents,i=e.options.header.location,n=e.options.misc.canvasPadding,s=e.options.size.canvasWidth,o=e.options.header.titleSubtitlePadding;t="top-left"===i?n.left:(s-n.right)/2+n.left,t+=e.options.misc.pieCenterOffset.x;var a=n.top+r.title.h;"pie-center"===i&&(a=e.pieCenter.y,r.subtitle.exists?a=a-(r.title.h+o+r.subtitle.h)/2+r.title.h:a+=r.title.h/4),e.__title.attr("x",t).attr("y",a)},addSubtitle:function(e){var t=e.options.header.location;e.__subtitle=e.svg.selectAll("."+e.cssPrefix+"subtitle").data([e.options.header.subtitle]).enter().append("text").text((function(e){return e.text})).attr("x",p.offscreenCoord).attr("y",p.offscreenCoord).attr("id",e.cssPrefix+"subtitle").attr("class",e.cssPrefix+"subtitle").attr("text-anchor",(function(){return"top-center"===t||"pie-center"===t?"middle":"left"})).attr("fill",(function(e){return e.color})).style("font-size",(function(e){return e.fontSize})).style("font-weight",(function(e){return e.fontWeight})).style("font-family",(function(e){return e.font}))},positionSubtitle:function(e){var t,r=e.options.misc.canvasPadding,i=e.options.size.canvasWidth;t="top-left"===e.options.header.location?r.left:(i-r.right)/2+r.left,t+=e.options.misc.pieCenterOffset.x;var n=p.getHeaderHeight(e);e.__subtitle.attr("x",t).attr("y",n)},addFooter:function(e){e.__footer=e.svg.selectAll("."+e.cssPrefix+"footer").data([e.options.footer]).enter().append("text").text((function(e){return e.text})).attr("x",p.offscreenCoord).attr("y",p.offscreenCoord).attr("id",e.cssPrefix+"footer").attr("class",e.cssPrefix+"footer").attr("text-anchor",(function(){var t="left";return"bottom-center"===e.options.footer.location?t="middle":"bottom-right"===e.options.footer.location&&(t="left"),t})).attr("fill",(function(e){return e.color})).style("font-size",(function(e){return e.fontSize})).style("font-weight",(function(e){return e.fontWeight})).style("font-family",(function(e){return e.font}))},positionFooter:function(e){var t,r=e.options.footer.location,i=e.textComponents.footer.w,n=e.options.size.canvasWidth,s=e.options.size.canvasHeight,o=e.options.misc.canvasPadding;t="bottom-left"===r?o.left:"bottom-right"===r?n-i-o.right:n/2,e.__footer.attr("x",t).attr("y",s-o.bottom)},getHeaderHeight:function(e){var t;if(e.textComponents.title.exists){var r=e.textComponents.title.h+e.options.header.titleSubtitlePadding+e.textComponents.subtitle.h;t="pie-center"===e.options.header.location?e.pieCenter.y-r/2+r:r+e.options.misc.canvasPadding.top}else if("pie-center"===e.options.header.location){var i=e.options.misc.canvasPadding.bottom+e.textComponents.footer.h;t=(e.options.size.canvasHeight-i)/2+e.options.misc.canvasPadding.top+e.textComponents.subtitle.h/2}else t=e.options.misc.canvasPadding.top+e.textComponents.subtitle.h;return t}},b={addTooltips:function(e){var t=e.svg.insert("g").attr("class",e.cssPrefix+"tooltips");t.selectAll("."+e.cssPrefix+"tooltip").data(e.options.data.content).enter().append("g").attr("class",e.cssPrefix+"tooltip").attr("id",(function(t,r){return e.cssPrefix+"tooltip"+r})).style("opacity",0).append("rect").attr("rx",e.options.tooltips.styles.borderRadius).attr("ry",e.options.tooltips.styles.borderRadius).attr("x",-e.options.tooltips.styles.padding).attr("opacity",e.options.tooltips.styles.backgroundOpacity).style("fill",e.options.tooltips.styles.backgroundColor),t.selectAll("."+e.cssPrefix+"tooltip").data(e.options.data.content).append("text").attr("fill",(function(t){return e.options.tooltips.styles.color})).style("font-size",(function(t){return e.options.tooltips.styles.fontSize})).style("font-weight",(function(t){return e.options.tooltips.styles.fontWeight})).style("font-family",(function(t){return e.options.tooltips.styles.font})).text((function(t,r){var i=e.options.tooltips.string;return"caption"===e.options.tooltips.type&&(i=t.caption),b.replacePlaceholders(e,i,r,{label:t.label,value:t.value,percentage:t.percentage})})),t.selectAll("."+e.cssPrefix+"tooltip rect").attr("width",(function(t,r){return s.getDimensions(e.cssPrefix+"tooltip"+r).w+2*e.options.tooltips.styles.padding})).attr("height",(function(t,r){return s.getDimensions(e.cssPrefix+"tooltip"+r).h+2*e.options.tooltips.styles.padding})).attr("y",(function(t,r){return-s.getDimensions(e.cssPrefix+"tooltip"+r).h/2+1}))},showTooltip:function(e,t){var r=e.options.tooltips.styles.fadeInSpeed;b.currentTooltip===t&&(r=1),b.currentTooltip=t,o.select("#"+e.cssPrefix+"tooltip"+t).transition().duration(r).style("opacity",(function(){return 1})),b.moveTooltip(e)},moveTooltip:function(e){o.selectAll("#"+e.cssPrefix+"tooltip"+b.currentTooltip).attr("transform",(function(t){var r=o.pointer(this.parentNode);return"translate("+(r[0]+e.options.tooltips.styles.padding+2)+","+(r[1]-2*e.options.tooltips.styles.padding-2)+")"}))},hideTooltip:function(e,t){o.select("#"+e.cssPrefix+"tooltip"+t).style("opacity",(function(){return 0})),o.select("#"+e.cssPrefix+"tooltip"+b.currentTooltip).attr("transform",(function(t,r){return"translate("+(e.options.size.canvasWidth+1e3)+","+(e.options.size.canvasHeight+1e3)+")"}))},replacePlaceholders:function(e,t,r,i){s.isFunction(e.options.tooltips.placeholderParser)&&e.options.tooltips.placeholderParser(r,i);var n=function(){return function(e){var t=arguments[1];return i.hasOwnProperty(t)?i[arguments[1]]:arguments[0]}};return t.replace(/\{(\w+)\}/g,n(i))}},m=function(s,l){if(this.element=s,"string"===typeof s){var c=s.replace(/^#/,"");this.element=document.getElementById(c)}var u={};a(!0,u,i,l),this.options=u,null!==this.options.misc.cssPrefix?this.cssPrefix=this.options.misc.cssPrefix:(this.cssPrefix="p"+r+"_",r++),n.initialCheck(this)&&(o.select(this.element).attr(e,t),y.call(this),g.call(this))};m.prototype.recreate=function(){n.initialCheck(this)&&(y.call(this),g.call(this))},m.prototype.redraw=function(){this.element.innerHTML="",g.call(this)},m.prototype.destroy=function(){this.element.innerHTML="",o.select(this.element).attr(e,null)},m.prototype.getOpenSegment=function(){var e=this.currentlyOpenSegment;if(null!==e&&"undefined"!==typeof e){var t=parseInt(o.select(e).attr("data-index"),10);return{element:e,index:t,data:this.options.data.content[t]}}return null},m.prototype.openSegment=function(e){(e=parseInt(e,10))<0||e>this.options.data.content.length-1||f.openSegment(this,o.select("#"+this.cssPrefix+"segment"+e).node())},m.prototype.closeSegment=function(){f.maybeCloseOpenSegment(this)},m.prototype.updateProp=function(e,t){switch(e){case"header.title.text":var r=s.processObj(this.options,e);s.processObj(this.options,e,t),o.select("#"+this.cssPrefix+"title").html(t),(""===r&&""!==t||""!==r&&""===t)&&this.redraw();break;case"header.subtitle.text":var i=s.processObj(this.options,e);s.processObj(this.options,e,t),o.select("#"+this.cssPrefix+"subtitle").html(t),(""===i&&""!==t||""!==i&&""===t)&&this.redraw();break;case"callbacks.onload":case"callbacks.onMouseoverSegment":case"callbacks.onMouseoutSegment":case"callbacks.onClickSegment":case"effects.pullOutSegmentOnClick.effect":case"effects.pullOutSegmentOnClick.speed":case"effects.pullOutSegmentOnClick.size":case"effects.highlightSegmentOnMouseover":case"effects.highlightLuminosity":s.processObj(this.options,e,t);break;default:s.processObj(this.options,e,t),this.destroy(),this.recreate()}};var y=function(){this.options.data.content=l.sortPieData(this),this.options.data.smallSegmentGrouping.enabled&&(this.options.data.content=s.applySmallSegmentGrouping(this.options.data.content,this.options.data.smallSegmentGrouping)),this.options.colors=s.initSegmentColors(this),this.totalSize=l.getTotalPieSize(this.options.data.content);for(var e=this.options.labels.percentage.decimalPlaces,t=0;t{t.__esModule=!0,t.default=void 0;var r={hover:"themeCrosshair",click:"themeNetdata",default:"themeCrosshair"},i={hover:[5,5],click:[2,2],default:[5,5]};t.default=function(e,t,n){void 0===n&&(n="hover");var s=e.getDygraph(),o=s.getArea().h,a=s.canvas_ctx_,l=e.chart.getPayload().data[t];if(Array.isArray(l)){var c=s.toDomXCoord(l[0]);s.setSelection(t),a.save(),a.beginPath(),a.setLineDash(i[n]),a.strokeStyle=e.chart.getThemeAttribute(r[n]),a.moveTo(c,0),a.lineTo(c,o),a.stroke(),a.closePath(),a.restore()}}},99447:(e,t)=>{t.__esModule=!0,t.default=void 0;t.default=function(e){var t,r,i,n,s=function(t,r){var i=function(t,r){if(!Array.isArray(r))return{};var i=t.offsetY;return i>e.getDygraph().getArea().h-10?{seriesName:"ANNOTATIONS"}:i<15?{seriesName:"ANOMALY_RATE"}:e.chart.getAttribute("chartType")?e.getDygraph().findStackedPoint(t.offsetX,t.offsetY):e.getDygraph().findClosestPoint(t.offsetX,t.offsetY)}(t,r),n=i.seriesName;if(n){var s=e.getDygraph().getPropertiesForSeries(n);if(s){var o=e.chart.getPayloadDimensionIds();if(null!=o&&o.length)return o[s.column-1]||s.name}}},o=function(o,a,l){if(n!==a){i=l,n=a,t=o.offsetX,r=o.offsetY;var c=s(o,l);c&&(e.sdk.trigger("highlightHover",e.chart,a,c),e.chart.trigger("highlightHover",a,c))}},a=function(o,a,l){if(n!==a){i=l,n=a,t=o.offsetX,r=o.offsetY;var c=s(o,l);e.sdk.trigger("highlightClick",e.chart,a,c),e.chart.trigger("highlightClick",a,c)}},l=function(o){if(!(Math.abs(o.offsetX-t)<5&&Math.abs(o.offsetY-r)<5)){t=o.offsetX,r=o.offsetY;var a=s(o,i);a&&(e.sdk.trigger("highlightHover",e.chart,n,a),e.chart.trigger("highlightHover",n,a))}},c=function(){e.sdk.trigger("highlightBlur",e.chart),e.chart.trigger("highlightBlur")},u=function(){r=null,i=null,n=null,e.off("highlightCallback",o),e.off("mousemove",l),e.off("mouseout",c),e.off("click",a)};return{toggle:function(t){return t?e.on("highlightCallback",o).on("mousemove",l).on("mouseout",c).on("click",a):u()},destroy:u}}},80509:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=b(r(62523)),n=b(r(68652)),s=b(r(85872)),o=b(r(58370)),a=r(22753),l=r(97005),c=r(21945),u=b(r(6076)),f=b(r(99447)),d=b(r(19243)),p=b(r(34297));function b(e){return e&&e.__esModule?e:{default:e}}function m(e){return function(e){if(Array.isArray(e))return y(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return y(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return y(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function y(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,i=new Array(t);r1&&j.length>1,stackedGraphNaNFill:"none",plotter:u,errorBars:_,axes:{x:o?{ticker:i.default.dateTicker,axisLabelFormatter:t.formatXAxis,axisLabelWidth:60}:{drawAxis:!1},y:l?h(h(h({},P&&{ticker:P}),{},{axisLabelFormatter:w},c&&{axisLabelWidth:c}),{},{pixelsPerLabel:15}):{drawAxis:!1}},ylabel:t.getAttribute("hasYlabel")&&t.getUnitSign({long:!0,withoutConversion:(0,a.isHeatmap)(n)})}},S=function(){var e=g.chart.getThemeAttribute("themeGridColor");return{axisLineColor:e,gridLineColor:e}},M=function(){var e=t.getPayloadDimensionIds(),r=t.getPayload().labels;if(null==e||!e.length||null==r||!r.length)return{visibility:!1};var i=r.length-e.length,n=Array(i>0?i:0).fill(!0),s=t.getAttribute("selectedLegendDimensions");return{visibility:[].concat(m(e.map(s.length?t.isDimensionVisible:function(){return!0})),m(n))}},D=function(){var e=t.getAttributes(),r=e.outOfLimits,i=e.getValueRange,n=e.staticValueRange,s=e.chartType,o=t.getPayload(),l=o.data,c=o.labels,u=t.getDateWindow(),f=r||0===l.length;return{file:f?[[0]]:l,labels:f?["X"]:c,dateWindow:u,valueRange:n||((0,a.isHeatmap)(s)?[0,t.getVisibleDimensionIds().length]:i(t,{dygraph:!0}))}},E=function(){return t.isSparkline()?{drawGrid:!1,drawAxis:!1,ylabel:void 0,yLabelWidth:0,highlightCircleSize:3,fillAlpha:1,strokeWidth:0}:null},T=function(){var e=t.getPayloadDimensionIds();return e.length?{colors:e.map(t.selectDimensionColor)}:{}},L=h(h({},g),{},{getChartWidth:function(){return _?_.getArea().w:g.getChartWidth()},getChartHeight:function(){return _?_.getArea().h:100},getPreceded:function(){if(!_)return-1;if(1e3*t.getFirstEntry()<_.xAxisRange()[0])return-1;var e=_.xAxisExtremes()[0];return _.toDomXCoord(e)},mount:function(e){if(!_){g.mount(e);var n=t.getAttribute("theme");e.classList.add(n);var c=t.getAttributes(),u=t.getPayload(),f=u.data,d=u.labels;r=(0,s.default)();var b=c.outOfLimits||0===f.length;_=new i.default(e,b?[[0]]:f,h(h(h(h(h(h({legend:"never",showLabelsOnHighlight:!1,labels:b?["X"]:d,dateWindow:t.getDateWindow(),clickCallback:r.add((function(){for(var e=arguments.length,t=new Array(e),r=0;r{t.__esModule=!0,t.default=void 0;var i,n=r(81431),s=(i=r(10238))&&i.__esModule?i:{default:i};t.default=function(e){var t=function(t,r){return void 0===r&&(r=e.chart.getAttribute("navigation")),e.chart.updateAttributes({navigation:t,prevNavigation:r})},r=(0,n.debounce)(500,(function(e,t,r){e.moveX(t,r)})),i=e.on("mousedown",(function(e){e.shiftKey&&e.altKey?t("selectVertical"):e.altKey?t("highlight"):e.shiftKey&&t("select")})).on("mouseup",(function(){setTimeout((function(){var r=e.chart.getAttribute("prevNavigation");r&&t(r,null)}))})).on("wheel",(function(t,i){if(t.shiftKey||t.altKey){t.preventDefault(),t.stopPropagation();var n="number"!==typeof t.wheelDelta||Number.isNaN(t.wheelDelta)?-1.2*t.deltaY:t.wheelDelta/40,o=(t.detail?-1*t.detail:n)/50,a=t.offsetX||t.layerX-t.target.offsetLeft,l=function(e,t){var r=e.toDomCoords(e.xAxisRange()[0],null)[0],i=t-r,n=e.toDomCoords(e.xAxisRange()[1],null)[0]-r;return 0===n?0:i/n}(i,a);!function(t,n,o){o=o||.5;var a=t.xAxisRange(),l=a[0],c=a[1],u=(c-l)*n,f=u*o,d=u*(1-o),p=Math.round((l+f)/1e3),b=Math.round((c-d)/1e3),m=(0,s.default)({after:p,before:b}),y=m.fixedAfter,g=m.fixedBefore;y===l&&g===c||(r(e.chart,y,g),i.updateOptions({dateWindow:[1e3*y,1e3*g]}))}(i,o,l)}})).on("dblclick",e.chart.resetNavigation);return function(){return i()}}},6076:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=a(r(79513)),n=a(r(51434)),s=a(r(44047)),o=a(r(31412));function a(e){return e&&e.__esModule?e:{default:e}}var l={highlight:n.default,select:n.default,selectVertical:o.default,pan:s.default};t.default=function(e){var t,r,n=function(){null==t||t(),t=null},s=function(){n(),null==r||r(),r=null},o=function(i){var s;r&&(n(),t=null==(s=l[i])?void 0:s.call(l,e))};return{toggle:function(t,n){if(!t)return s();r=(0,i.default)(e),n&&o(n)},set:o,destroy:s}}},44047:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i,n=(i=r(62523))&&i.__esModule?i:{default:i};t.default=function(e){var t=function(t,s,o){e.sdk.trigger("panStart",e.chart),o.initializeMouseDown(t,s,o),n.default.startPan(t,s,o),o.is2DPan=!1,e.on("mousemove",r).on("mouseout",i).on("mouseup",i).on("touchmove",r).on("touchend",i)},r=function(e,t,r){r.isPanning&&n.default.movePan(e,t,r)},i=function t(i,s,o){o.isPanning&&(n.default.endPan(i,s,o),o.destroy(),e.sdk.trigger("panEnd",e.chart,s.dateWindow_)),e.off("mousemove",r),e.off("mouseup",t),e.off("mouseout",t),e.off("touchmove",r),e.off("touchend",t)};return e.on("mousedown",t).on("touchstart",t)}},51434:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i,n=(i=r(62523))&&i.__esModule?i:{default:i},s=r(43015);t.default=function(e){var t,r,i=function(e,t,i){if(i.isZooming){var n=t.canvas_ctx_,o=t.canvas_,a=t.getArea(),l=o.getBoundingClientRect(),c=l.left+a.x,u=(l.top,a.y,a.w);a.h;if(!(e.pageXc+u)){i.zoomMoved=!0,i.dragEndX=(0,s.dragGetX_)(e,i);var f=i.dragStartX,d=i.dragEndX;n.clearRect(0,0,o.width,o.height),n.fillStyle="rgba(128,128,128,0.3)",n.fillRect(Math.min(f,d),a.y,Math.abs(d-f),a.h),i.prevEndX=d,r=d}}},o=function n(s,o,a){if(a.isZooming){o.clearZoomRect_(),a.destroy();var l=function(e){return-1===r||Math.abs(t-r)<5?null:[Math.round(e.toDataXCoord(t)/1e3),Math.round(e.toDataXCoord(r)/1e3)].sort((function(e,t){return e-t}))}(o);e.sdk.trigger("highlightEnd",e.chart,l),e.chart.trigger("highlightEnd",l)}e.off("mousemove",i),e.off("mouseup",n)};return e.on("mousedown",(function(s,a,l){e.sdk.trigger("highlightStart",e.chart),l.initializeMouseDown(s,a,l),n.default.startZoom(s,a,l),t=l.dragStartX,r=-1,e.on("mousemove",i).on("mouseup",o)}))}},31412:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i,n=(i=r(62523))&&i.__esModule?i:{default:i},s=r(43015);t.default=function(e){var t,r,i=function(e,t,i){if(i.isZooming){var n=t.canvas_ctx_,o=t.canvas_,a=o.getBoundingClientRect();if(!(e.pageYa.bottom)){i.zoomMoved=!0,i.dragEndY=(0,s.dragGetY_)(e,i);var l=i.dragStartY,c=i.dragEndY,u=t.getArea();n.clearRect(0,0,o.width,o.height),n.fillStyle="rgba(128,128,128,0.3)",n.fillRect(u.x,Math.min(l,c),u.w,Math.abs(c-l)),i.prevEndY=c,r=c}}},o=function n(s,o,a){if(a.isZooming){o.clearZoomRect_(),a.destroy();var l=-1===r||Math.abs(t-r)<5?null:[o.toDataYCoord(t),o.toDataYCoord(r)].sort((function(e,t){return e-t}));e.sdk.trigger("highlightVerticalEnd",e.chart,l)}e.off("mousemove",i),e.off("mouseup",n)};return e.on("mousedown",(function(s,a,l){e.sdk.trigger("highlightVerticalStart",e.chart),l.initializeMouseDown(s,a,l),n.default.startZoom(s,a,l),t=l.dragStartY,r=-1,e.on("mousemove",i).on("mouseup",o)}))}},72222:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=r(48090),n={warning:"#F9A825",critical:"#FF4136",clear:"#00AB44"};t.default=function(e,t){var r=e.chart.getAttribute("overlays")[t],s=r.when,o=r.status,a=e.getDygraph(),l=a.getArea().h,c=a.hidden_ctx_,u=(0,i.getArea)(a,[s,s]);if(!u)return(0,i.trigger)(e,t);var f=u.from;(0,i.trigger)(e,t,u),c.save(),c.beginPath(),c.moveTo(f-1,0),c.lineTo(f-1,l),c.globalAlpha=1,c.lineWidth=2,c.setLineDash([4,4]),c.strokeStyle=n[o],c.stroke(),c.closePath(),c.restore()}},69789:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=r(48090),n={warning:"#FFF8E1",critical:"#FFEBEF",clear:"#E5F5E8"},s={warning:"#FFC300",critical:"#F59B9B",clear:"#68C47D"},o={warning:"#F9A825",critical:"#FF4136",clear:"#00AB44"};t.default=function(e,t){var r=e.chart.getAttribute("overlays")[t],a=r.whenTriggered,l=r.whenLast,c=void 0===l?Math.floor((new Date).getTime()/1e3):l,u=r.status,f=e.getDygraph(),d=f.getArea().h,p=f.hidden_ctx_,b=(0,i.getArea)(f,[a,c]);if(!b)return(0,i.trigger)(e,t);var m=b.from,y=b.width,g=b.to;(0,i.trigger)(e,t,b),p.save(),p.beginPath(),p.rect(m,0,y,d-1),p.fillStyle=s[u],p.globalAlpha=.1,p.fill();p.beginPath(),p.moveTo(m,0),p.lineTo(m,d),p.globalAlpha=1,p.lineWidth=2,p.setLineDash([4,4]),p.strokeStyle=n[u],p.stroke(),p.beginPath(),p.moveTo(g-2,0),p.lineTo(g-2,d),p.strokeStyle=o[u],p.stroke(),p.closePath(),p.restore()}},48090:(e,t)=>{t.__esModule=!0,t.trigger=t.getArea=void 0;t.getArea=function(e,t){var r=e.xAxisRange(),i=r[0],n=r[1],s=1e3*t[0],o=1e3*t[1];if(on)return null;var a=Math.max(i,s),l=Math.min(n,o),c=e.toDomXCoord(a),u=e.toDomXCoord(l);return{from:c,to:u,width:u-c}},t.trigger=function(e,t,r){return requestAnimationFrame((function(){return e.trigger("overlayedAreaChanged:"+t,r)}))}},26559:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=r(48090);t.default=function(e,t){var r=e.chart.getAttribute("overlays")[t].range;if(r){var n=e.getDygraph(),s=n.getArea().h,o=n.hidden_ctx_,a=(0,i.getArea)(n,r);if(!a)return(0,i.trigger)(e,t);var l=a.from,c=a.width;(0,i.trigger)(e,t,a),o.save(),o.beginPath(),o.rect(l,0,c,s-1),o.fillStyle="rgba(207, 213, 218, 0.12)",o.fill(),o.beginPath(),o.rect(l,0,0,s-1),o.rect(l+c,0,0,s-1),o.fill(),o.setLineDash([2,7]),o.lineWidth=1,o.strokeStyle="#CFD5DA",o.stroke(),o.stroke(),o.closePath(),o.restore()}}},19243:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i,n=(i=r(93226))&&i.__esModule?i:{default:i};t.default=function(e){var t=null,r=function(t){var r=e.chart.getAttribute("overlays")[t].type,i=n.default[r];i&&i(e,t)},i=function(){var t=e.chart.getAttribute("overlays");Object.keys(t).forEach(r)},s=function(){var t=e.getDygraph();t&&t.renderGraph_(!1)},o=function(){t&&(s(),t(),t=null)};return{toggle:function(){var r=e.chart.getAttribute("overlays");if(!Object.keys(r).length)return o();t?s():t=e.on("underlayCallback",i)},destroy:o}}},42980:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=r(48090);t.default=function(e,t){var r=e.getDygraph(),n=r.xAxisRange()[1],s=n/1e3,o=e.chart.getFirstEntry(),a=e.chart.getAttributes(),l=a.outOfLimits,c=a.error;if(l||o&&!(o>s)||c){var u=l||c?[n,n]:[o,o],f=(0,i.getArea)(r,u);(0,i.trigger)(e,t,f)}}},93226:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=a(r(72222)),n=a(r(69789)),s=a(r(26559)),o=a(r(42980));function a(e){return e&&e.__esModule?e:{default:e}}t.default={alarm:i.default,alarmRange:n.default,highlight:s.default,proceeded:o.default}},78051:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=r(96380);function n(e){return function(e){if(Array.isArray(e))return s(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return s(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return s(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function s(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,i=new Array(t);r{t.__esModule=!0,t.default=void 0;var i=r(16199);t.default=function(e){return function(t){if(e&&"ANOMALY_RATE"===t.setName){var r=t.drawingContext,n=t.points;if(!n.length||!n[1])return;var s=n[1].canvasx-n[0].canvasx+1,o=Math.floor(s),a=(0,i.scaleLinear)().domain([0,100]).range(["transparent",e.chart.getThemeAttribute("themeAnomalyScaleColor")]),l=e.chart.getPayloadDimensionIds(),c=e.chart.getAttribute("selectedLegendDimensions"),u=l.reduce((function(t,r,i){return c.length?e.chart.isDimensionVisible(r)&&t.add(i):t.add(i),t}),new Set),f=e.chart.getPayload().all;n.forEach((function(t){var i=t.canvasx,n=e.chart.getClosestRow(t.xval),s=f[n].slice(1).reduce((function(e,t,r){var i=t.arp;return u.has(r)?e>(i||0)?e:i||0:e}),0);r.strokeStyle=r.fillStyle=a(s),r.fillRect(i-o/2,0,o,15),r.strokeRect(i-o/2,0,o,15)}))}}}},19281:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=r(22753);t.default=function(e){return function(t){if(e&&0===t.seriesIndex){var r=e.chart.getVisibleDimensionIds(),n=t.dygraph,s=t.drawingContext,o=t.allSeriesPoints,a=n.layout_.setNames,l=1/0;o.forEach((function(e){var t=e[1].canvasx-e[0].canvasx;t{t.__esModule=!0,t.darkenColor=void 0;var i,n=(i=r(62523))&&i.__esModule?i:{default:i};t.darkenColor=function(e){var t=n.default.toRGB_(e);return t.r=Math.floor((255+t.r)/2),t.g=Math.floor((255+t.g)/2),t.b=Math.floor((255+t.b)/2),"rgb("+t.r+","+t.g+","+t.b+")"}},97005:(e,t,r)=>{t.__esModule=!0,t.makeStackedBarPlotter=t.makeMultiColumnBarPlotter=t.makeLinePlotter=t.makeHeatmapPlotter=t.makeAnomalyPlotter=t.makeAnnotationsPlotter=void 0;var i=c(r(5433));t.makeLinePlotter=i.default;var n=c(r(21471));t.makeStackedBarPlotter=n.default;var s=c(r(79645));t.makeMultiColumnBarPlotter=s.default;var o=c(r(19281));t.makeHeatmapPlotter=o.default;var a=c(r(54296));t.makeAnomalyPlotter=a.default;var l=c(r(78051));function c(e){return e&&e.__esModule?e:{default:e}}t.makeAnnotationsPlotter=l.default},5433:(e,t,r)=>{t.__esModule=!0,t.default=void 0,r(63303);t.default=function(){return function(e){window.smoothPlotter(e)}}},79645:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=r(32360);t.default=function(){return function(e){if(0===e.seriesIndex){var t=e.dygraph,r=e.drawingContext,n=e.allSeriesPoints,s=t.toDomYCoord(0),o=1/0;n.forEach((function(e){var t=e[1].canvasx-e[0].canvasx;t1?n.length-1:1));r.fillRect(i,e.canvasy,a/n.length,s-e.canvasy),r.strokeRect(i,e.canvasy,a/n.length,s-e.canvasy)}))}))}}}},21471:(e,t)=>{t.__esModule=!0,t.default=void 0;t.default=function(){return function(e){var t=e.drawingContext,r=e.points,i=e.dygraph.toDomYCoord(0);t.fillStyle=e.color;var n=r[1].canvasx-r[0].canvasx,s=Math.floor(2/3*n);r.forEach((function(e){var r=e.canvasx;t.fillRect(r-s/2,e.canvasy,s,i-e.canvasy),t.strokeRect(r-s/2,e.canvasy,s,i-e.canvasy)}))}}},86893:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=r(22753);function n(e){return function(e){if(Array.isArray(e))return s(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return s(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return s(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function s(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,i=new Array(t);r{t.__esModule=!0,t.numericTicker=t.heatmapTicker=void 0;var i=s(r(22190));t.numericTicker=i.default;var n=s(r(86893));function s(e){return e&&e.__esModule?e:{default:e}}t.heatmapTicker=n.default},22190:(e,t)=>{function r(e){return function(e){if(Array.isArray(e))return i(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return i(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return i(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function i(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,i=new Array(t);rf));l++);for(b>m&&(p*=-1),a=0;a<=u;a++)c=b+a*p,d.push({v:c})}var O=n("axisLabelFormatter");for(a=0;a.08}))),[{label_v:w+k,label:"i"}])}},10732:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i=a(r(88208)),n=a(r(68652)),s=r(28973),o=a(r(58370));function a(e){return e&&e.__esModule?e:{default:e}}function l(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,i)}return r}function c(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var i=a(r(52076)),n=a(r(68652)),s=r(28973),o=a(r(58370));function a(e){return e&&e.__esModule?e:{default:e}}function l(e){return function(e){if(Array.isArray(e))return c(e)}(e)||function(e){if("undefined"!==typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"===typeof e)return c(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);"Object"===r&&e.constructor&&(r=e.constructor.name);if("Map"===r||"Set"===r)return Array.from(e);if("Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r))return c(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function c(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,i=new Array(t);rt?t:e.clientHeight);e.firstChild.G__height=r,e.firstChild.style.height=r+"px";var i=t;e.firstChild.G__width=i,e.firstChild.style.width=i+"px",p.setOptions({}),p.update(!0),d.trigger("resize")}),(function(){return d.trigger("resize")}));var g=t.getAttributes().loaded;r=(0,s.unregister)(t.onAttributeChange("hoverX",y),!g&&t.onceAttributeChange("loaded",y),t.onAttributeChange("theme",(function(){var e=b(),t=e.color,r=e.strokeColor;p.setOptions({strokeColor:r,pointer:{color:t}})})));var h=e.clientWidth,_=.9*(e.clientHeight>h?h:e.clientHeight);e.firstChild.G__height=_,e.firstChild.style.height=_+"px";var v=h;e.firstChild.G__width=v,e.firstChild.style.width=v+"px",p.setOptions({}),y()}},unmount:function(){r&&r(),u&&u(),p=null,a=null,c=null,d.unmount()},render:y,getMinMax:m})}},52076:(e,t)=>{t.__esModule=!0,t.default=void 0;var r=function(e,t){for(var r in t.prototype)t.hasOwnProperty(r)&&(e[r]=t[r]);function i(){this.constructor=e}return i.prototype=t.prototype,e.prototype=new i,e.__super__=t.prototype,e},i=function(e,t){var r={};for(var i in e)e.hasOwnProperty(i)&&(r[i]=e[i]);for(var n in t)t.hasOwnProperty(n)&&(r[n]=t[n]);return r},n=function(e){return"#"===e.charAt(0)?e.substring(1,7):e};function s(e,t){null==e&&(e=!0),this.clear=null==t||t,e&&c.add(this)}function o(){return o.__super__.constructor.apply(this,arguments)}function a(e){if(this.gauge=e,void 0===this.gauge)throw new Error("The element isn't defined.");this.ctx=this.gauge.ctx,this.canvas=this.gauge.canvas,a.__super__.constructor.call(this,!1,!1),this.setOptions()}function l(e){var t,r;this.canvas=e,l.__super__.constructor.call(this),this.percentColors=null,"undefined"!==typeof G_vmlCanvasManager&&(this.canvas=window.G_vmlCanvasManager.initElement(this.canvas)),this.ctx=this.canvas.getContext("2d"),t=this.canvas.clientHeight,r=this.canvas.clientWidth,this.canvas.height=t,this.canvas.width=r,this.gp=[new a(this)],this.setOptions()}s.prototype.animationSpeed=32,s.prototype.update=function(e){var t;return null==e&&(e=!1),!(!e&&this.displayedValue===this.value)&&(this.ctx&&this.clear&&this.ctx.clearRect(0,0,this.canvas.width,this.canvas.height),t=this.value-this.displayedValue,Math.abs(t/this.animationSpeed)<=.001?this.displayedValue=this.value:this.displayedValue=this.displayedValue+t/this.animationSpeed,this.render(),!0)},r(o,s),o.prototype.displayScale=1,o.prototype.forceUpdate=!0,o.prototype.setMinValue=function(e,t){var r,i,n,s,o;if(this.minValue=e,null==t&&(t=!0),t){for(this.displayedValue=this.minValue,o=[],i=0,n=(s=this.gp||[]).length;i.5&&(this.options.angle=.5),this.configDisplayScale(),this},o.prototype.configDisplayScale=function(){var e,t,r,i;return!1===this.options.highDpiSupport?delete this.displayScale:(t=window.devicePixelRatio||1,e=this.ctx.webkitBackingStorePixelRatio||this.ctx.mozBackingStorePixelRatio||this.ctx.msBackingStorePixelRatio||this.ctx.oBackingStorePixelRatio||this.ctx.backingStorePixelRatio||1,this.displayScale=t/e),i=this.canvas.G__width||this.canvas.width,r=this.canvas.G__height||this.canvas.height,this.canvas.width=i*this.displayScale,this.canvas.height=r*this.displayScale,this.canvas.style.width=i+"px",this.canvas.style.height=r+"px",this.canvas.G__width=i,this.canvas.G__height=r,this},o.prototype.parseValue=function(e){return e=parseFloat(e)||Number(e),isFinite(e)?e:0},r(a,s),a.prototype.displayedValue=0,a.prototype.value=0,a.prototype.options={strokeWidth:.035,length:.1,color:"#000000",iconPath:null,iconScale:1,iconAngle:0},a.prototype.img=null,a.prototype.setOptions=function(e){if(null==e&&(e=null),this.options=i(this.options,e),this.length=2*this.gauge.radius*this.gauge.options.radiusScale*this.options.length,this.strokeWidth=this.canvas.height*this.options.strokeWidth,this.maxValue=this.gauge.maxValue,this.minValue=this.gauge.minValue,this.animationSpeed=this.gauge.animationSpeed,this.options.angle=this.gauge.options.angle,this.options.iconPath)return this.img=new Image,this.img.src=this.options.iconPath},a.prototype.render=function(){var e,t,r,i,n,s,o,a,l;if(e=this.gauge.getAngle.call(this,this.displayedValue),a=Math.round(this.length*Math.cos(e)),l=Math.round(this.length*Math.sin(e)),s=Math.round(this.strokeWidth*Math.cos(e-Math.PI/2)),o=Math.round(this.strokeWidth*Math.sin(e-Math.PI/2)),t=Math.round(this.strokeWidth*Math.cos(e+Math.PI/2)),r=Math.round(this.strokeWidth*Math.sin(e+Math.PI/2)),this.ctx.beginPath(),this.ctx.fillStyle=this.options.color,this.ctx.arc(0,0,this.strokeWidth,0,2*Math.PI,!1),this.ctx.fill(),this.ctx.beginPath(),this.ctx.moveTo(s,o),this.ctx.lineTo(a,l),this.ctx.lineTo(t,r),this.ctx.fill(),this.img)return i=Math.round(this.img.width*this.options.iconScale),n=Math.round(this.img.height*this.options.iconScale),this.ctx.save(),this.ctx.translate(a,l),this.ctx.rotate(e+Math.PI/180*(90+this.options.iconAngle)),this.ctx.drawImage(this.img,-i/2,-n/2,i,n),this.ctx.restore()},r(l,o),l.prototype.elem=null,l.prototype.value=[20],l.prototype.maxValue=80,l.prototype.minValue=0,l.prototype.displayedAngle=0,l.prototype.displayedValue=0,l.prototype.lineWidth=40,l.prototype.paddingTop=.1,l.prototype.paddingBottom=.1,l.prototype.percentColors=null,l.prototype.options={colorStart:"#6fadcf",colorStop:void 0,gradientType:0,strokeColor:"#e0e0e0",pointer:{length:.8,strokeWidth:.035,iconScale:1},angle:.15,lineWidth:.44,radiusScale:1,limitMax:!1,limitMin:!1},l.prototype.setOptions=function(e){var t,r,i,n,s;for(null==e&&(e=null),l.__super__.setOptions.call(this,e),this.configPercentColors(),this.extraPadding=0,this.options.angle<0&&(n=Math.PI*(1+this.options.angle),this.extraPadding=Math.sin(n)),this.availableHeight=this.canvas.height*(1-this.paddingTop-this.paddingBottom),this.lineWidth=this.availableHeight*this.options.lineWidth,this.radius=(this.availableHeight-this.lineWidth/2)/(1+this.extraPadding),this.ctx.clearRect(0,0,this.canvas.width,this.canvas.height),r=0,i=(s=this.gp).length;r=s;r=0<=s?++i:--i)a=parseInt(n(this.options.percentColors[r][1]).substring(0,2),16),t=parseInt(n(this.options.percentColors[r][1]).substring(2,4),16),e=parseInt(n(this.options.percentColors[r][1]).substring(4,6),16),o.push(this.percentColors[r]={pct:this.options.percentColors[r][0],color:{r:a,g:t,b:e}});return o}},l.prototype.set=function(e){var t,r,i,n,s,o,l,u,f;for(e instanceof Array||(e=[e]),r=i=0,l=e.length-1;0<=l?i<=l:i>=l;r=0<=l?++i:--i)e[r]=this.parseValue(e[r]);if(e.length>this.gp.length)for(r=n=0,u=e.length-this.gp.length;0<=u?nu;r=0<=u?++n:--n)(t=new a(this)).setOptions(this.options.pointer),this.gp.push(t);else e.lengththis.maxValue?this.options.limitMax?f=this.maxValue:this.maxValue=f+1:f=a;n=0<=a?++s:--s)if(e<=this.percentColors[n].pct){!0===t?(l=this.percentColors[n-1]||this.percentColors[0],i=this.percentColors[n],o=(e-l.pct)/(i.pct-l.pct),r={r:Math.floor(l.color.r*(1-o)+i.color.r*o),g:Math.floor(l.color.g*(1-o)+i.color.g*o),b:Math.floor(l.color.b*(1-o)+i.color.b*o)}):r=this.percentColors[n].color;break}return"rgb("+[r.r,r.g,r.b].join(",")+")"},l.prototype.getColorForValue=function(e,t){var r;return r=(e-this.minValue)/(this.maxValue-this.minValue),this.getColorForPercentage(r,t)},l.prototype.renderTicks=function(e,t,r,i){var n,s,o,a,l,c,u,f,d,p,b,m,y,g,h,_,v,O,A,w;if(e!=={}){for(c=e.divisions||0,O=e.subDivisions||0,o=e.divColor||"#fff",g=e.subColor||"#fff",a=e.divLength||.7,_=e.subLength||.2,d=parseFloat(this.maxValue)-parseFloat(this.minValue),p=parseFloat(d)/parseFloat(e.divisions),h=parseFloat(p)/parseFloat(e.subDivisions),n=parseFloat(this.minValue),s=0+h,l=(f=d/400)*(e.divWidth||1),v=f*(e.subWidth||1),m=[],A=u=0,b=c+1;u0?m.push(function(){var e,t,r;for(r=[],e=0,t=O-1;ethis.maxValue&&(a=this.maxValue),p=this.radius*this.options.radiusScale,m.height&&(this.ctx.lineWidth=this.lineWidth*m.height,d=this.lineWidth/2*(m.offset||1-m.height),p=this.radius*this.options.radiusScale+d),this.ctx.strokeStyle=m.strokeStyle,this.ctx.beginPath(),this.ctx.arc(0,0,p,this.getAngle(l),this.getAngle(a),!1),this.ctx.stroke();else void 0!==this.options.customFillStyle?t=this.options.customFillStyle(this):null!==this.percentColors?t=this.getColorForValue(this.displayedValue,this.options.generateGradient):void 0!==this.options.colorStop?((t=0===this.options.gradientType?this.ctx.createRadialGradient(b,r,9,b,r,70):this.ctx.createLinearGradient(0,0,b,0)).addColorStop(0,this.options.colorStart),t.addColorStop(1,this.options.colorStop)):t=this.options.colorStart,this.ctx.strokeStyle=t,this.ctx.beginPath(),this.ctx.arc(b,r,c,(1+this.options.angle)*Math.PI,e,!1),this.ctx.lineWidth=this.lineWidth,this.ctx.stroke(),this.ctx.strokeStyle=this.options.strokeColor,this.ctx.beginPath(),this.ctx.arc(b,r,c,e,(2-this.options.angle)*Math.PI,!1),this.ctx.stroke(),this.ctx.save(),this.ctx.translate(b,r);for(this.options.renderTicks&&this.renderTicks(this.options.renderTicks,b,r,c),this.ctx.restore(),this.ctx.translate(b,r),n=0,o=(f=this.gp).length;n=0;n+=-1)i=a[n],c.elements.splice(i,1);return c.animId=t?null:requestAnimationFrame(c.run)}if(!1===e)return!0===c.animId&&cancelAnimationFrame(c.animId),c.animId=requestAnimationFrame(c.run)}};t.default=l},42872:(e,t,r)=>{t.__esModule=!0,t.initialValue=t.default=void 0;var i,n=r(28973),s=(i=r(68652))&&i.__esModule?i:{default:i};function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,i)}return r}function a(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var i=o(r(68652)),n=r(28973),s=o(r(58370));function o(e){return e&&e.__esModule?e:{default:e}}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,i)}return r}function l(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;var i=o(r(68652)),n=r(28973),s=o(r(58370));function o(e){return e&&e.__esModule?e:{default:e}}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,i)}return r}function l(e){for(var t=1;t{t.__esModule=!0,t.default=void 0;t.default={"fping.latency":"FPing Latency","fping.packets":"FPing Packets","fping.quality":"FPing Quality"}},92810:(e,t,r)=>{t.__esModule=!0,t.default=void 0;var i,n=(i=r(8711))&&i.__esModule?i:{default:i},s=r(83199),o=r(33640),a=["icon","hoverIndicator","padding"];function l(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);t&&(i=i.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,i)}return r}function c(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,a);return c(c({cursor:"pointer"},o),{},{children:t||o.children,active:o.active||o["aria-expanded"],hoverIndicator:i,padding:s})})).withConfig({displayName:"button__Button",componentId:"sc-oqiqxn-0"})(["border:initial;padding:",";height:auto;line-height:0;background:",";"," color:",";svg{fill:",";stroke:",";}"," &:hover{",";color:",";svg{fill:",";stroke:",";}}"],(function(e){return e.padding}),(function(e){var t=e.theme;return e.active?(0,s.getColor)("borderSecondary")({theme:t}):"initial"}),s.cursor,(function(e){var t=e.active,r=e.disabled,i=e.theme;return(0,s.getColor)(f({active:t,disabled:r}))({theme:i})}),(function(e){var t=e.active,r=e.disabled,i=e.theme;return e.stroked?"none":(0,s.getColor)(f({active:t,disabled:r}))({theme:i})}),(function(e){var t=e.active,r=e.disabled,i=e.theme;return e.stroked?(0,s.getColor)(f({active:t,disabled:r}))({theme:i}):"none"}),(function(e){var t=e.active,r=e.hoverIndicator;return(t||r)&&"\n border-radius: 4px;\n "}),(function(e){var t=e.theme,r=e.hoverIndicator,i=e.disabled;return r&&!i&&"background: "+(0,s.getColor)("mainChartTboxHover")({theme:t})+";"}),(function(e){var t=e.active,r=e.disabled,i=e.theme;return(0,s.getColor)(f({active:t,disabled:r}))({theme:i})}),(function(e){var t=e.theme,r=e.stroked,i=e.disabled;return r?"none":(0,s.getColor)(f({defaultColor:"text",disabled:i}))({theme:t})}),(function(e){var t=e.theme,r=e.stroked,i=e.disabled;return r?(0,s.getColor)(f({defaultColor:"text",disabled:i}))({theme:t}):"none"}));t.default=(0,o.withTooltip)(d)},6504:(e,t,r)=>{t.__esModule=!0,t.default=t.Button=void 0;var i=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=c(t);if(r&&r.has(e))return r.get(e);var i={__proto__:null},n=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var s in e)if("default"!==s&&{}.hasOwnProperty.call(e,s)){var o=n?Object.getOwnPropertyDescriptor(e,s):null;o&&(o.get||o.set)?Object.defineProperty(i,s,o):i[s]=e[s]}return i.default=e,r&&r.set(e,i),i}(r(96540)),n=l(r(83503)),s=r(81900),o=l(r(92810));t.Button=o.default;var a=["svg","size","width","height"];function l(e){return e&&e.__esModule?e:{default:e}}function c(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(c=function(e){return e?r:t})(e)}function u(){return u=Object.assign?Object.assign.bind():function(e){for(var t=1;t